summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--Documentation/00-INDEX4
-rw-r--r--Documentation/ABI/testing/pstore35
-rw-r--r--Documentation/ABI/testing/sysfs-devices-mmc21
-rw-r--r--Documentation/ABI/testing/sysfs-devices-power20
-rw-r--r--Documentation/ABI/testing/sysfs-driver-hid10
-rw-r--r--Documentation/ABI/testing/sysfs-driver-hid-roccat-arvo53
-rw-r--r--Documentation/ABI/testing/sysfs-driver-hid-roccat-kone8
-rw-r--r--Documentation/ABI/testing/sysfs-driver-hid-roccat-koneplus11
-rw-r--r--Documentation/ABI/testing/sysfs-driver-hid-roccat-kovaplus100
-rw-r--r--Documentation/ABI/testing/sysfs-driver-hid-roccat-pyra9
-rw-r--r--Documentation/ABI/testing/sysfs-fs-ext413
-rw-r--r--Documentation/ABI/testing/sysfs-fs-pstore7
-rw-r--r--Documentation/DocBook/filesystems.tmpl5
-rw-r--r--Documentation/DocBook/media-entities.tmpl8
-rw-r--r--Documentation/DocBook/v4l/common.xml2
-rw-r--r--Documentation/DocBook/v4l/compat.xml11
-rw-r--r--Documentation/DocBook/v4l/dev-capture.xml13
-rw-r--r--Documentation/DocBook/v4l/dev-output.xml13
-rw-r--r--Documentation/DocBook/v4l/func-mmap.xml10
-rw-r--r--Documentation/DocBook/v4l/func-munmap.xml3
-rw-r--r--Documentation/DocBook/v4l/io.xml283
-rw-r--r--Documentation/DocBook/v4l/pixfmt-nv12m.xml154
-rw-r--r--Documentation/DocBook/v4l/pixfmt-yuv420m.xml162
-rw-r--r--Documentation/DocBook/v4l/pixfmt.xml118
-rw-r--r--Documentation/DocBook/v4l/planar-apis.xml81
-rw-r--r--Documentation/DocBook/v4l/v4l2.xml22
-rw-r--r--Documentation/DocBook/v4l/vidioc-enum-fmt.xml2
-rw-r--r--Documentation/DocBook/v4l/vidioc-g-fmt.xml15
-rw-r--r--Documentation/DocBook/v4l/vidioc-qbuf.xml24
-rw-r--r--Documentation/DocBook/v4l/vidioc-querybuf.xml14
-rw-r--r--Documentation/DocBook/v4l/vidioc-querycap.xml24
-rw-r--r--Documentation/RCU/whatisRCU.txt31
-rw-r--r--Documentation/acpi/apei/output_format.txt25
-rw-r--r--Documentation/arm/SH-Mobile/Makefile8
-rw-r--r--Documentation/arm/SH-Mobile/vrl4.c169
-rw-r--r--Documentation/arm/SH-Mobile/zboot-rom-mmcif.txt29
-rw-r--r--Documentation/arm/Sharp-LH/ADC-LH7-Touchscreen61
-rw-r--r--Documentation/arm/Sharp-LH/CompactFlash32
-rw-r--r--Documentation/arm/Sharp-LH/IOBarrier45
-rw-r--r--Documentation/arm/Sharp-LH/KEV7A4008
-rw-r--r--Documentation/arm/Sharp-LH/LCDPanels59
-rw-r--r--Documentation/arm/Sharp-LH/LPD7A40015
-rw-r--r--Documentation/arm/Sharp-LH/LPD7A40X16
-rw-r--r--Documentation/arm/Sharp-LH/SDRAM51
-rw-r--r--Documentation/arm/Sharp-LH/VectoredInterruptController80
-rw-r--r--Documentation/cgroups/memory.txt5
-rw-r--r--Documentation/cpu-freq/governors.txt11
-rw-r--r--Documentation/device-mapper/dm-crypt.txt2
-rw-r--r--Documentation/device-mapper/dm-flakey.txt18
-rw-r--r--Documentation/devicetree/00-INDEX8
-rw-r--r--Documentation/devicetree/bindings/i2c/ce4100-i2c.txt93
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-cmos.txt28
-rw-r--r--Documentation/devicetree/bindings/serial/altera_jtaguart.txt4
-rw-r--r--Documentation/devicetree/bindings/serial/altera_uart.txt7
-rw-r--r--Documentation/devicetree/bindings/serio/altera_ps2.txt4
-rw-r--r--Documentation/devicetree/bindings/spi/spi_altera.txt4
-rw-r--r--Documentation/devicetree/bindings/spi/spi_oc_tiny.txt12
-rw-r--r--Documentation/devicetree/bindings/x86/ce4100.txt38
-rw-r--r--Documentation/devicetree/bindings/x86/interrupt.txt29
-rw-r--r--Documentation/devicetree/bindings/x86/timer.txt6
-rw-r--r--Documentation/devicetree/booting-without-of.txt20
-rw-r--r--Documentation/dvb/get_dvb_firmware8
-rw-r--r--Documentation/feature-removal-schedule.txt27
-rw-r--r--Documentation/filesystems/ext4.txt207
-rw-r--r--Documentation/filesystems/romfs.txt3
-rw-r--r--Documentation/filesystems/xfs-delayed-logging-design.txt7
-rw-r--r--Documentation/hwmon/ads101567
-rw-r--r--Documentation/hwmon/lineage-pem77
-rw-r--r--Documentation/hwmon/lm755
-rw-r--r--Documentation/hwmon/lm8512
-rw-r--r--Documentation/hwmon/max663949
-rw-r--r--Documentation/hwmon/pmbus215
-rw-r--r--Documentation/hwmon/twl4030-madc-hwmon45
-rw-r--r--Documentation/hwmon/w83795127
-rw-r--r--Documentation/hwspinlock.txt293
-rw-r--r--Documentation/i2c/busses/i2c-diolan-u2c26
-rw-r--r--Documentation/i2c/instantiating-devices2
-rw-r--r--Documentation/i2c/upgrading-clients18
-rw-r--r--Documentation/ioctl/ioctl-number.txt1
-rw-r--r--Documentation/kbuild/kbuild.txt2
-rw-r--r--Documentation/kbuild/makefiles.txt3
-rw-r--r--Documentation/kernel-parameters.txt28
-rw-r--r--Documentation/kvm/api.txt96
-rw-r--r--Documentation/laptops/hpfall.c (renamed from Documentation/hwmon/hpfall.c)0
-rw-r--r--Documentation/memory-barriers.txt58
-rw-r--r--Documentation/misc-devices/lis3lv02d (renamed from Documentation/hwmon/lis3lv02d)4
-rw-r--r--Documentation/networking/00-INDEX6
-rw-r--r--Documentation/networking/batman-adv.txt16
-rw-r--r--Documentation/networking/ip-sysctl.txt11
-rw-r--r--Documentation/power/devices.txt94
-rw-r--r--Documentation/power/runtime_pm.txt13
-rw-r--r--Documentation/power/states.txt12
-rw-r--r--Documentation/powerpc/00-INDEX4
-rw-r--r--Documentation/scsi/ChangeLog.megaraid_sas23
-rw-r--r--Documentation/scsi/hpsa.txt11
-rw-r--r--Documentation/scsi/scsi_mid_low_api.txt14
-rw-r--r--Documentation/spinlocks.txt24
-rw-r--r--Documentation/sysctl/kernel.txt2
-rw-r--r--Documentation/trace/ftrace-design.txt7
-rw-r--r--Documentation/trace/ftrace.txt148
-rw-r--r--Documentation/trace/kprobetrace.txt16
-rw-r--r--MAINTAINERS89
-rw-r--r--Makefile11
-rw-r--r--arch/Kconfig6
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/alpha/include/asm/cacheflush.h2
-rw-r--r--arch/alpha/include/asm/errno.h2
-rw-r--r--arch/alpha/include/asm/rwsem.h36
-rw-r--r--arch/alpha/kernel/time.c8
-rw-r--r--arch/alpha/kernel/vmlinux.lds.S5
-rw-r--r--arch/arm/Kconfig133
-rw-r--r--arch/arm/Makefile11
-rw-r--r--arch/arm/boot/Makefile4
-rw-r--r--arch/arm/boot/compressed/.gitignore6
-rw-r--r--arch/arm/boot/compressed/Makefile21
-rw-r--r--arch/arm/boot/compressed/head-shmobile.S30
-rw-r--r--arch/arm/boot/compressed/head-vt8500.S46
-rw-r--r--arch/arm/boot/compressed/head.S251
-rw-r--r--arch/arm/boot/compressed/misc.c2
-rw-r--r--arch/arm/boot/compressed/mmcif-sh7372.c87
-rw-r--r--arch/arm/boot/compressed/vmlinux.lds.in3
-rw-r--r--arch/arm/common/Kconfig2
-rw-r--r--arch/arm/common/gic.c25
-rw-r--r--arch/arm/configs/exynos4_defconfig70
-rw-r--r--arch/arm/configs/lpd7a400_defconfig68
-rw-r--r--arch/arm/configs/lpd7a404_defconfig81
-rw-r--r--arch/arm/configs/mx51_defconfig53
-rw-r--r--arch/arm/configs/omap2plus_defconfig1
-rw-r--r--arch/arm/configs/tegra_defconfig123
-rw-r--r--arch/arm/configs/u8500_defconfig65
-rw-r--r--arch/arm/configs/vexpress_defconfig140
-rw-r--r--arch/arm/include/asm/bitops.h60
-rw-r--r--arch/arm/include/asm/cacheflush.h136
-rw-r--r--arch/arm/include/asm/cpu-multi32.h69
-rw-r--r--arch/arm/include/asm/cpu-single.h44
-rw-r--r--arch/arm/include/asm/cputype.h3
-rw-r--r--arch/arm/include/asm/fncpy.h94
-rw-r--r--arch/arm/include/asm/glue-cache.h146
-rw-r--r--arch/arm/include/asm/glue-df.h110
-rw-r--r--arch/arm/include/asm/glue-pf.h57
-rw-r--r--arch/arm/include/asm/glue-proc.h264
-rw-r--r--arch/arm/include/asm/glue.h138
-rw-r--r--arch/arm/include/asm/hardware/cache-l2x0.h1
-rw-r--r--arch/arm/include/asm/hardware/sp810.h3
-rw-r--r--arch/arm/include/asm/highmem.h29
-rw-r--r--arch/arm/include/asm/localtimer.h8
-rw-r--r--arch/arm/include/asm/mach/arch.h4
-rw-r--r--arch/arm/include/asm/memory.h75
-rw-r--r--arch/arm/include/asm/module.h27
-rw-r--r--arch/arm/include/asm/outercache.h14
-rw-r--r--arch/arm/include/asm/pgalloc.h4
-rw-r--r--arch/arm/include/asm/pgtable.h26
-rw-r--r--arch/arm/include/asm/pmu.h14
-rw-r--r--arch/arm/include/asm/proc-fns.h306
-rw-r--r--arch/arm/include/asm/processor.h12
-rw-r--r--arch/arm/include/asm/ptrace.h2
-rw-r--r--arch/arm/include/asm/setup.h8
-rw-r--r--arch/arm/include/asm/smp_scu.h7
-rw-r--r--arch/arm/include/asm/spinlock.h53
-rw-r--r--arch/arm/include/asm/system.h17
-rw-r--r--arch/arm/include/asm/tlb.h106
-rw-r--r--arch/arm/include/asm/tlbflush.h7
-rw-r--r--arch/arm/include/asm/tls.h11
-rw-r--r--arch/arm/include/asm/traps.h1
-rw-r--r--arch/arm/kernel/Makefile1
-rw-r--r--arch/arm/kernel/armksyms.c22
-rw-r--r--arch/arm/kernel/asm-offsets.c11
-rw-r--r--arch/arm/kernel/bios32.c5
-rw-r--r--arch/arm/kernel/debug.S2
-rw-r--r--arch/arm/kernel/entry-armv.S3
-rw-r--r--arch/arm/kernel/entry-header.S14
-rw-r--r--arch/arm/kernel/etm.c4
-rw-r--r--arch/arm/kernel/head-common.S90
-rw-r--r--arch/arm/kernel/head-nommu.S3
-rw-r--r--arch/arm/kernel/head.S144
-rw-r--r--arch/arm/kernel/hw_breakpoint.c26
-rw-r--r--arch/arm/kernel/irq.c50
-rw-r--r--arch/arm/kernel/kprobes-decode.c2
-rw-r--r--arch/arm/kernel/module.c35
-rw-r--r--arch/arm/kernel/perf_event.c17
-rw-r--r--arch/arm/kernel/perf_event_v6.c4
-rw-r--r--arch/arm/kernel/pmu.c22
-rw-r--r--arch/arm/kernel/ptrace.c389
-rw-r--r--arch/arm/kernel/ptrace.h37
-rw-r--r--arch/arm/kernel/return_address.c1
-rw-r--r--arch/arm/kernel/setup.c79
-rw-r--r--arch/arm/kernel/signal.c13
-rw-r--r--arch/arm/kernel/sleep.S134
-rw-r--r--arch/arm/kernel/smp.c7
-rw-r--r--arch/arm/kernel/smp_scu.c23
-rw-r--r--arch/arm/kernel/tcm.c2
-rw-r--r--arch/arm/kernel/time.c4
-rw-r--r--arch/arm/kernel/traps.c10
-rw-r--r--arch/arm/kernel/vmlinux.lds.S17
-rw-r--r--arch/arm/lib/bitops.h50
-rw-r--r--arch/arm/lib/changebit.S10
-rw-r--r--arch/arm/lib/clearbit.S11
-rw-r--r--arch/arm/lib/setbit.S11
-rw-r--r--arch/arm/lib/testchangebit.S9
-rw-r--r--arch/arm/lib/testclearbit.S9
-rw-r--r--arch/arm/lib/testsetbit.S9
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c7
-rw-r--r--arch/arm/mach-aaec2000/Kconfig11
-rw-r--r--arch/arm/mach-aaec2000/Makefile9
-rw-r--r--arch/arm/mach-aaec2000/Makefile.boot1
-rw-r--r--arch/arm/mach-aaec2000/aaed2000.c102
-rw-r--r--arch/arm/mach-aaec2000/core.c298
-rw-r--r--arch/arm/mach-aaec2000/core.h28
-rw-r--r--arch/arm/mach-aaec2000/include/mach/aaec2000.h207
-rw-r--r--arch/arm/mach-aaec2000/include/mach/aaed2000.h40
-rw-r--r--arch/arm/mach-aaec2000/include/mach/debug-macro.S35
-rw-r--r--arch/arm/mach-aaec2000/include/mach/entry-macro.S40
-rw-r--r--arch/arm/mach-aaec2000/include/mach/hardware.h50
-rw-r--r--arch/arm/mach-aaec2000/include/mach/io.h18
-rw-r--r--arch/arm/mach-aaec2000/include/mach/irqs.h46
-rw-r--r--arch/arm/mach-aaec2000/include/mach/memory.h17
-rw-r--r--arch/arm/mach-aaec2000/include/mach/system.h24
-rw-r--r--arch/arm/mach-aaec2000/include/mach/timex.h18
-rw-r--r--arch/arm/mach-aaec2000/include/mach/uncompress.h46
-rw-r--r--arch/arm/mach-aaec2000/include/mach/vmalloc.h16
-rw-r--r--arch/arm/mach-at91/board-snapper9260.c1
-rw-r--r--arch/arm/mach-at91/include/mach/gpio.h11
-rw-r--r--arch/arm/mach-at91/include/mach/memory.h2
-rw-r--r--arch/arm/mach-bcmring/include/mach/hardware.h2
-rw-r--r--arch/arm/mach-bcmring/include/mach/memory.h2
-rw-r--r--arch/arm/mach-clps711x/include/mach/memory.h2
-rw-r--r--arch/arm/mach-clps711x/include/mach/time.h2
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/memory.h2
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c18
-rw-r--r--arch/arm/mach-davinci/board-mityomapl138.c83
-rw-r--r--arch/arm/mach-davinci/board-omapl138-hawk.c284
-rw-r--r--arch/arm/mach-davinci/da850.c83
-rw-r--r--arch/arm/mach-davinci/dm355.c5
-rw-r--r--arch/arm/mach-davinci/dm365.c5
-rw-r--r--arch/arm/mach-davinci/include/mach/da8xx.h7
-rw-r--r--arch/arm/mach-davinci/include/mach/memory.h4
-rw-r--r--arch/arm/mach-davinci/include/mach/mux.h4
-rw-r--r--arch/arm/mach-davinci/include/mach/spi.h15
-rw-r--r--arch/arm/mach-dove/Kconfig2
-rw-r--r--arch/arm/mach-dove/include/mach/memory.h2
-rw-r--r--arch/arm/mach-ebsa110/include/mach/memory.h2
-rw-r--r--arch/arm/mach-ep93xx/edb93xx.c33
-rw-r--r--arch/arm/mach-ep93xx/gpio.c33
-rw-r--r--arch/arm/mach-ep93xx/include/mach/gpio.h2
-rw-r--r--arch/arm/mach-ep93xx/include/mach/memory.h10
-rw-r--r--arch/arm/mach-exynos4/Kconfig (renamed from arch/arm/mach-s5pv310/Kconfig)105
-rw-r--r--arch/arm/mach-exynos4/Makefile43
-rw-r--r--arch/arm/mach-exynos4/Makefile.boot (renamed from arch/arm/mach-s5pv310/Makefile.boot)0
-rw-r--r--arch/arm/mach-exynos4/clock.c (renamed from arch/arm/mach-s5pv310/clock.c)190
-rw-r--r--arch/arm/mach-exynos4/cpu.c (renamed from arch/arm/mach-s5pv310/cpu.c)87
-rw-r--r--arch/arm/mach-exynos4/cpufreq.c (renamed from arch/arm/mach-s5pv310/cpufreq.c)100
-rw-r--r--arch/arm/mach-exynos4/dev-audio.c (renamed from arch/arm/mach-s5pv310/dev-audio.c)143
-rw-r--r--arch/arm/mach-exynos4/dev-pd.c (renamed from arch/arm/mach-s5pv310/dev-pd.c)40
-rw-r--r--arch/arm/mach-exynos4/dev-sysmmu.c (renamed from arch/arm/mach-s5pv310/dev-sysmmu.c)78
-rw-r--r--arch/arm/mach-exynos4/dma.c (renamed from arch/arm/mach-s5pv310/dma.c)50
-rw-r--r--arch/arm/mach-exynos4/gpiolib.c (renamed from arch/arm/mach-s5pv310/gpiolib.c)154
-rw-r--r--arch/arm/mach-exynos4/headsmp.S (renamed from arch/arm/mach-s5pv310/headsmp.S)6
-rw-r--r--arch/arm/mach-exynos4/hotplug.c (renamed from arch/arm/mach-s5pv310/hotplug.c)10
-rw-r--r--arch/arm/mach-exynos4/include/mach/debug-macro.S (renamed from arch/arm/mach-s5pv310/include/mach/debug-macro.S)6
-rw-r--r--arch/arm/mach-exynos4/include/mach/dma.h (renamed from arch/arm/mach-s5pv310/include/mach/dma.h)0
-rw-r--r--arch/arm/mach-exynos4/include/mach/entry-macro.S (renamed from arch/arm/mach-s5pv310/include/mach/entry-macro.S)4
-rw-r--r--arch/arm/mach-exynos4/include/mach/gpio.h135
-rw-r--r--arch/arm/mach-exynos4/include/mach/hardware.h (renamed from arch/arm/mach-s5pv310/include/mach/hardware.h)8
-rw-r--r--arch/arm/mach-exynos4/include/mach/io.h (renamed from arch/arm/mach-s5pv310/include/mach/io.h)8
-rw-r--r--arch/arm/mach-exynos4/include/mach/irqs.h (renamed from arch/arm/mach-s5pv310/include/mach/irqs.h)8
-rw-r--r--arch/arm/mach-exynos4/include/mach/map.h144
-rw-r--r--arch/arm/mach-exynos4/include/mach/memory.h (renamed from arch/arm/mach-s5pv310/include/mach/memory.h)10
-rw-r--r--arch/arm/mach-exynos4/include/mach/pwm-clock.h (renamed from arch/arm/mach-s5pv310/include/mach/pwm-clock.h)8
-rw-r--r--arch/arm/mach-exynos4/include/mach/regs-clock.h (renamed from arch/arm/mach-s5pv310/include/mach/regs-clock.h)8
-rw-r--r--arch/arm/mach-exynos4/include/mach/regs-gpio.h42
-rw-r--r--arch/arm/mach-exynos4/include/mach/regs-irq.h (renamed from arch/arm/mach-s5pv310/include/mach/regs-irq.h)8
-rw-r--r--arch/arm/mach-exynos4/include/mach/regs-mem.h (renamed from arch/arm/mach-s5pv310/include/mach/regs-mem.h)6
-rw-r--r--arch/arm/mach-exynos4/include/mach/regs-pmu.h (renamed from arch/arm/mach-s5pv310/include/mach/regs-pmu.h)8
-rw-r--r--arch/arm/mach-exynos4/include/mach/regs-sysmmu.h (renamed from arch/arm/mach-s5pv310/include/mach/regs-sysmmu.h)6
-rw-r--r--arch/arm/mach-exynos4/include/mach/smp.h (renamed from arch/arm/mach-s5pv310/include/mach/smp.h)2
-rw-r--r--arch/arm/mach-exynos4/include/mach/sysmmu.h (renamed from arch/arm/mach-s5pv310/include/mach/sysmmu.h)18
-rw-r--r--arch/arm/mach-exynos4/include/mach/system.h (renamed from arch/arm/mach-s5pv310/include/mach/system.h)8
-rw-r--r--arch/arm/mach-exynos4/include/mach/timex.h (renamed from arch/arm/mach-s5pv310/include/mach/timex.h)8
-rw-r--r--arch/arm/mach-exynos4/include/mach/uncompress.h (renamed from arch/arm/mach-s5pv310/include/mach/uncompress.h)8
-rw-r--r--arch/arm/mach-exynos4/include/mach/vmalloc.h (renamed from arch/arm/mach-s5pv310/include/mach/vmalloc.h)8
-rw-r--r--arch/arm/mach-exynos4/init.c (renamed from arch/arm/mach-s5pv310/init.c)10
-rw-r--r--arch/arm/mach-exynos4/irq-combiner.c (renamed from arch/arm/mach-s5pv310/irq-combiner.c)4
-rw-r--r--arch/arm/mach-exynos4/irq-eint.c (renamed from arch/arm/mach-s5pv310/irq-eint.c)62
-rw-r--r--arch/arm/mach-exynos4/localtimer.c (renamed from arch/arm/mach-s5pv310/localtimer.c)5
-rw-r--r--arch/arm/mach-exynos4/mach-smdkc210.c (renamed from arch/arm/mach-s5pv310/mach-smdkc210.c)48
-rw-r--r--arch/arm/mach-exynos4/mach-smdkv310.c (renamed from arch/arm/mach-s5pv310/mach-smdkv310.c)48
-rw-r--r--arch/arm/mach-exynos4/mach-universal_c210.c (renamed from arch/arm/mach-s5pv310/mach-universal_c210.c)22
-rw-r--r--arch/arm/mach-exynos4/platsmp.c (renamed from arch/arm/mach-s5pv310/platsmp.c)12
-rw-r--r--arch/arm/mach-exynos4/setup-i2c0.c (renamed from arch/arm/mach-s5pv310/setup-i2c0.c)4
-rw-r--r--arch/arm/mach-exynos4/setup-i2c1.c (renamed from arch/arm/mach-s5pv310/setup-i2c1.c)4
-rw-r--r--arch/arm/mach-exynos4/setup-i2c2.c (renamed from arch/arm/mach-s5pv310/setup-i2c2.c)4
-rw-r--r--arch/arm/mach-exynos4/setup-i2c3.c (renamed from arch/arm/mach-s5pv310/setup-i2c3.c)4
-rw-r--r--arch/arm/mach-exynos4/setup-i2c4.c (renamed from arch/arm/mach-s5pv310/setup-i2c4.c)4
-rw-r--r--arch/arm/mach-exynos4/setup-i2c5.c (renamed from arch/arm/mach-s5pv310/setup-i2c5.c)4
-rw-r--r--arch/arm/mach-exynos4/setup-i2c6.c (renamed from arch/arm/mach-s5pv310/setup-i2c6.c)4
-rw-r--r--arch/arm/mach-exynos4/setup-i2c7.c (renamed from arch/arm/mach-s5pv310/setup-i2c7.c)4
-rw-r--r--arch/arm/mach-exynos4/setup-sdhci-gpio.c (renamed from arch/arm/mach-s5pv310/setup-sdhci-gpio.c)52
-rw-r--r--arch/arm/mach-exynos4/setup-sdhci.c (renamed from arch/arm/mach-s5pv310/setup-sdhci.c)12
-rw-r--r--arch/arm/mach-exynos4/time.c (renamed from arch/arm/mach-s5pv310/time.c)64
-rw-r--r--arch/arm/mach-footbridge/dc21285-timer.c84
-rw-r--r--arch/arm/mach-footbridge/include/mach/memory.h2
-rw-r--r--arch/arm/mach-footbridge/isa-timer.c129
-rw-r--r--arch/arm/mach-gemini/board-nas4220b.c1
-rw-r--r--arch/arm/mach-gemini/board-rut1xx.c1
-rw-r--r--arch/arm/mach-gemini/board-wbd111.c1
-rw-r--r--arch/arm/mach-gemini/board-wbd222.c1
-rw-r--r--arch/arm/mach-gemini/common.h1
-rw-r--r--arch/arm/mach-gemini/devices.c26
-rw-r--r--arch/arm/mach-gemini/include/mach/memory.h4
-rw-r--r--arch/arm/mach-h720x/include/mach/memory.h2
-rw-r--r--arch/arm/mach-imx/Kconfig12
-rw-r--r--arch/arm/mach-imx/Makefile5
-rw-r--r--arch/arm/mach-imx/clock-imx1.c1
-rw-r--r--arch/arm/mach-imx/clock-imx25.c3
-rw-r--r--arch/arm/mach-imx/devices-imx1.h7
-rw-r--r--arch/arm/mach-imx/dma-v1.c2
-rw-r--r--arch/arm/mach-imx/ehci-imx25.c80
-rw-r--r--arch/arm/mach-imx/ehci-imx27.c82
-rw-r--r--arch/arm/mach-imx/eukrea_mbimx27-baseboard.c7
-rw-r--r--arch/arm/mach-imx/eukrea_mbimxsd25-baseboard.c8
-rw-r--r--arch/arm/mach-imx/mach-cpuimx27.c25
-rw-r--r--arch/arm/mach-imx/mach-eukrea_cpuimx25.c27
-rw-r--r--arch/arm/mach-imx/mach-imx27_visstrim_m10.c32
-rw-r--r--arch/arm/mach-imx/mach-imx27ipcam.c78
-rw-r--r--arch/arm/mach-imx/mach-imx27lite.c11
-rw-r--r--arch/arm/mach-imx/mach-mx1ads.c22
-rw-r--r--arch/arm/mach-imx/mach-mx21ads.c11
-rw-r--r--arch/arm/mach-imx/mach-mx25_3ds.c32
-rw-r--r--arch/arm/mach-imx/mach-mx27_3ds.c37
-rw-r--r--arch/arm/mach-imx/mach-mx27ads.c11
-rw-r--r--arch/arm/mach-imx/mach-mxt_td60.c12
-rw-r--r--arch/arm/mach-imx/mach-pca100.c28
-rw-r--r--arch/arm/mach-imx/mach-pcm038.c33
-rw-r--r--arch/arm/mach-imx/mach-scb9328.c13
-rw-r--r--arch/arm/mach-imx/mm-imx1.c7
-rw-r--r--arch/arm/mach-imx/mm-imx21.c7
-rw-r--r--arch/arm/mach-imx/mm-imx25.c7
-rw-r--r--arch/arm/mach-imx/mm-imx27.c7
-rw-r--r--arch/arm/mach-integrator/Kconfig1
-rw-r--r--arch/arm/mach-integrator/common.h1
-rw-r--r--arch/arm/mach-integrator/core.c7
-rw-r--r--arch/arm/mach-integrator/impd1.c5
-rw-r--r--arch/arm/mach-integrator/include/mach/cm.h4
-rw-r--r--arch/arm/mach-integrator/include/mach/memory.h2
-rw-r--r--arch/arm/mach-integrator/integrator_ap.c44
-rw-r--r--arch/arm/mach-integrator/integrator_cp.c239
-rw-r--r--arch/arm/mach-iop13xx/include/mach/memory.h2
-rw-r--r--arch/arm/mach-iop32x/include/mach/memory.h2
-rw-r--r--arch/arm/mach-iop33x/include/mach/memory.h2
-rw-r--r--arch/arm/mach-ixp2000/include/mach/memory.h2
-rw-r--r--arch/arm/mach-ixp23xx/include/mach/memory.h2
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/memory.h2
-rw-r--r--arch/arm/mach-kirkwood/include/mach/memory.h2
-rw-r--r--arch/arm/mach-ks8695/include/mach/memory.h2
-rw-r--r--arch/arm/mach-lh7a40x/Kconfig74
-rw-r--r--arch/arm/mach-lh7a40x/Makefile17
-rw-r--r--arch/arm/mach-lh7a40x/Makefile.boot4
-rw-r--r--arch/arm/mach-lh7a40x/arch-kev7a400.c118
-rw-r--r--arch/arm/mach-lh7a40x/arch-lpd7a40x.c422
-rw-r--r--arch/arm/mach-lh7a40x/clcd.c241
-rw-r--r--arch/arm/mach-lh7a40x/clocks.c108
-rw-r--r--arch/arm/mach-lh7a40x/common.h17
-rw-r--r--arch/arm/mach-lh7a40x/include/mach/clocks.h18
-rw-r--r--arch/arm/mach-lh7a40x/include/mach/constants.h91
-rw-r--r--arch/arm/mach-lh7a40x/include/mach/debug-macro.S37
-rw-r--r--arch/arm/mach-lh7a40x/include/mach/dma.h86
-rw-r--r--arch/arm/mach-lh7a40x/include/mach/entry-macro.S149
-rw-r--r--arch/arm/mach-lh7a40x/include/mach/hardware.h62
-rw-r--r--arch/arm/mach-lh7a40x/include/mach/io.h20
-rw-r--r--arch/arm/mach-lh7a40x/include/mach/irqs.h200
-rw-r--r--arch/arm/mach-lh7a40x/include/mach/memory.h28
-rw-r--r--arch/arm/mach-lh7a40x/include/mach/registers.h224
-rw-r--r--arch/arm/mach-lh7a40x/include/mach/ssp.h70
-rw-r--r--arch/arm/mach-lh7a40x/include/mach/system.h19
-rw-r--r--arch/arm/mach-lh7a40x/include/mach/timex.h17
-rw-r--r--arch/arm/mach-lh7a40x/include/mach/uncompress.h38
-rw-r--r--arch/arm/mach-lh7a40x/include/mach/vmalloc.h10
-rw-r--r--arch/arm/mach-lh7a40x/irq-kev7a400.c93
-rw-r--r--arch/arm/mach-lh7a40x/irq-lh7a400.c91
-rw-r--r--arch/arm/mach-lh7a40x/irq-lh7a404.c175
-rw-r--r--arch/arm/mach-lh7a40x/irq-lpd7a40x.c128
-rw-r--r--arch/arm/mach-lh7a40x/lcd-panel.h345
-rw-r--r--arch/arm/mach-lh7a40x/ssp-cpld.c343
-rw-r--r--arch/arm/mach-lh7a40x/time.c71
-rw-r--r--arch/arm/mach-loki/include/mach/memory.h2
-rw-r--r--arch/arm/mach-lpc32xx/include/mach/memory.h2
-rw-r--r--arch/arm/mach-mmp/include/mach/memory.h2
-rw-r--r--arch/arm/mach-mmp/include/mach/mmp2.h2
-rw-r--r--arch/arm/mach-mmp/include/mach/pxa168.h2
-rw-r--r--arch/arm/mach-mmp/include/mach/pxa910.h2
-rw-r--r--arch/arm/mach-msm/Kconfig35
-rw-r--r--arch/arm/mach-msm/Makefile29
-rw-r--r--arch/arm/mach-msm/board-halibut.c2
-rw-r--r--arch/arm/mach-msm/board-mahimahi.c2
-rw-r--r--arch/arm/mach-msm/board-msm7x27.c16
-rw-r--r--arch/arm/mach-msm/board-msm7x30.c37
-rw-r--r--arch/arm/mach-msm/board-msm8960.c91
-rw-r--r--arch/arm/mach-msm/board-msm8x60.c4
-rw-r--r--arch/arm/mach-msm/board-qsd8x50.c90
-rw-r--r--arch/arm/mach-msm/board-sapphire.c4
-rw-r--r--arch/arm/mach-msm/board-trout-gpio.c2
-rw-r--r--arch/arm/mach-msm/board-trout.c3
-rw-r--r--arch/arm/mach-msm/clock-7x30.h61
-rw-r--r--arch/arm/mach-msm/clock-debug.c130
-rw-r--r--arch/arm/mach-msm/clock-dummy.c54
-rw-r--r--arch/arm/mach-msm/clock-pcom.c7
-rw-r--r--arch/arm/mach-msm/clock-pcom.h51
-rw-r--r--arch/arm/mach-msm/clock.c192
-rw-r--r--arch/arm/mach-msm/clock.h55
-rw-r--r--arch/arm/mach-msm/devices-iommu.c (renamed from arch/arm/mach-msm/devices-msm8x60-iommu.c)54
-rw-r--r--arch/arm/mach-msm/devices-msm7x00.c36
-rw-r--r--arch/arm/mach-msm/devices-msm7x30.c11
-rw-r--r--arch/arm/mach-msm/devices-msm8960.c85
-rw-r--r--arch/arm/mach-msm/devices-qsd8x50.c213
-rw-r--r--arch/arm/mach-msm/devices.h11
-rw-r--r--arch/arm/mach-msm/gpiomux-7x30.c38
-rw-r--r--arch/arm/mach-msm/gpiomux-8x50.c23
-rw-r--r--arch/arm/mach-msm/headsmp.S2
-rw-r--r--arch/arm/mach-msm/include/mach/board.h4
-rw-r--r--arch/arm/mach-msm/include/mach/clk.h31
-rw-r--r--arch/arm/mach-msm/include/mach/clkdev.h19
-rw-r--r--arch/arm/mach-msm/include/mach/cpu.h54
-rw-r--r--arch/arm/mach-msm/include/mach/io.h1
-rw-r--r--arch/arm/mach-msm/include/mach/irqs-7x30.h31
-rw-r--r--arch/arm/mach-msm/include/mach/irqs-8960.h277
-rw-r--r--arch/arm/mach-msm/include/mach/irqs-8x50.h31
-rw-r--r--arch/arm/mach-msm/include/mach/irqs.h3
-rw-r--r--arch/arm/mach-msm/include/mach/memory.h12
-rw-r--r--arch/arm/mach-msm/include/mach/mmc.h11
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap-7x00.h16
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap-7x30.h14
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap-8960.h48
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap-8x50.h22
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap-8x60.h61
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap.h9
-rw-r--r--arch/arm/mach-msm/include/mach/sirc.h31
-rw-r--r--arch/arm/mach-msm/include/mach/smp.h30
-rw-r--r--arch/arm/mach-msm/io.c41
-rw-r--r--arch/arm/mach-msm/scm-boot.h30
-rw-r--r--arch/arm/mach-msm/scm.h30
-rw-r--r--arch/arm/mach-msm/timer.c49
-rw-r--r--arch/arm/mach-mv78xx0/include/mach/memory.h2
-rw-r--r--arch/arm/mach-mx3/Kconfig25
-rw-r--r--arch/arm/mach-mx3/Makefile6
-rw-r--r--arch/arm/mach-mx3/devices-imx35.h2
-rw-r--r--arch/arm/mach-mx3/ehci-imx31.c83
-rw-r--r--arch/arm/mach-mx3/ehci-imx35.c80
-rw-r--r--arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c4
-rw-r--r--arch/arm/mach-mx3/iomux-imx31.c12
-rw-r--r--arch/arm/mach-mx3/mach-armadillo5x0.c21
-rw-r--r--arch/arm/mach-mx3/mach-bug.c66
-rw-r--r--arch/arm/mach-mx3/mach-cpuimx35.c31
-rw-r--r--arch/arm/mach-mx3/mach-kzm_arm11_01.c19
-rw-r--r--arch/arm/mach-mx3/mach-mx31_3ds.c66
-rw-r--r--arch/arm/mach-mx3/mach-mx31ads.c57
-rw-r--r--arch/arm/mach-mx3/mach-mx31lilly.c33
-rw-r--r--arch/arm/mach-mx3/mach-mx31lite.c25
-rw-r--r--arch/arm/mach-mx3/mach-mx31moboard.c34
-rw-r--r--arch/arm/mach-mx3/mach-mx35_3ds.c37
-rw-r--r--arch/arm/mach-mx3/mach-pcm037.c27
-rw-r--r--arch/arm/mach-mx3/mach-pcm037_eet.c2
-rw-r--r--arch/arm/mach-mx3/mach-pcm043.c46
-rw-r--r--arch/arm/mach-mx3/mach-qong.c22
-rw-r--r--arch/arm/mach-mx3/mach-vpr200.c328
-rw-r--r--arch/arm/mach-mx3/mm.c23
-rw-r--r--arch/arm/mach-mx3/mx31moboard-devboard.c7
-rw-r--r--arch/arm/mach-mx3/mx31moboard-marxbot.c6
-rw-r--r--arch/arm/mach-mx3/mx31moboard-smartbot.c7
-rw-r--r--arch/arm/mach-mx5/Kconfig24
-rw-r--r--arch/arm/mach-mx5/Makefile4
-rw-r--r--arch/arm/mach-mx5/board-cpuimx51.c20
-rw-r--r--arch/arm/mach-mx5/board-cpuimx51sd.c31
-rw-r--r--arch/arm/mach-mx5/board-mx50_rdp.c34
-rw-r--r--arch/arm/mach-mx5/board-mx51_3ds.c43
-rw-r--r--arch/arm/mach-mx5/board-mx51_babbage.c43
-rw-r--r--arch/arm/mach-mx5/board-mx51_efikamx.c211
-rw-r--r--arch/arm/mach-mx5/board-mx51_efikasb.c283
-rw-r--r--arch/arm/mach-mx5/board-mx53_evk.c40
-rw-r--r--arch/arm/mach-mx5/board-mx53_loco.c178
-rw-r--r--arch/arm/mach-mx5/board-mx53_smd.c40
-rw-r--r--arch/arm/mach-mx5/clock-mx51-mx53.c148
-rw-r--r--arch/arm/mach-mx5/cpu.c11
-rw-r--r--arch/arm/mach-mx5/crm_regs.h7
-rw-r--r--arch/arm/mach-mx5/devices-imx50.h (renamed from arch/arm/mach-mx5/devices-mx50.h)8
-rw-r--r--arch/arm/mach-mx5/devices-imx53.h4
-rw-r--r--arch/arm/mach-mx5/efika.h10
-rw-r--r--arch/arm/mach-mx5/ehci.c156
-rw-r--r--arch/arm/mach-mx5/mm-mx50.c6
-rw-r--r--arch/arm/mach-mx5/mm.c14
-rw-r--r--arch/arm/mach-mx5/mx51_efika.c636
-rw-r--r--arch/arm/mach-mxc91231/iomux.c14
-rw-r--r--arch/arm/mach-mxc91231/magx-zn5.c11
-rw-r--r--arch/arm/mach-mxc91231/mm.c7
-rw-r--r--arch/arm/mach-mxs/Kconfig21
-rw-r--r--arch/arm/mach-mxs/Makefile5
-rw-r--r--arch/arm/mach-mxs/clock-mx23.c8
-rw-r--r--arch/arm/mach-mxs/clock-mx28.c19
-rw-r--r--arch/arm/mach-mxs/devices-mx23.h11
-rw-r--r--arch/arm/mach-mxs/devices-mx28.h23
-rw-r--r--arch/arm/mach-mxs/devices.c2
-rw-r--r--arch/arm/mach-mxs/devices/Kconfig16
-rw-r--r--arch/arm/mach-mxs/devices/Makefile5
-rw-r--r--arch/arm/mach-mxs/devices/platform-auart.c64
-rw-r--r--arch/arm/mach-mxs/devices/platform-fec.c5
-rw-r--r--arch/arm/mach-mxs/devices/platform-flexcan.c51
-rw-r--r--arch/arm/mach-mxs/devices/platform-mxs-i2c.c51
-rw-r--r--arch/arm/mach-mxs/devices/platform-mxs-pwm.c22
-rw-r--r--arch/arm/mach-mxs/devices/platform-mxsfb.c46
-rw-r--r--arch/arm/mach-mxs/gpio.c75
-rw-r--r--arch/arm/mach-mxs/icoll.c16
-rw-r--r--arch/arm/mach-mxs/include/mach/common.h1
-rw-r--r--arch/arm/mach-mxs/include/mach/devices-common.h35
-rw-r--r--arch/arm/mach-mxs/include/mach/mmc.h18
-rw-r--r--arch/arm/mach-mxs/include/mach/mx23.h12
-rw-r--r--arch/arm/mach-mxs/include/mach/mxs.h9
-rw-r--r--arch/arm/mach-mxs/include/mach/mxsfb.h48
-rw-r--r--arch/arm/mach-mxs/include/mach/uncompress.h1
-rw-r--r--arch/arm/mach-mxs/mach-mx23evk.c11
-rw-r--r--arch/arm/mach-mxs/mach-mx28evk.c56
-rw-r--r--arch/arm/mach-mxs/mach-tx28.c183
-rw-r--r--arch/arm/mach-mxs/module-tx28.c131
-rw-r--r--arch/arm/mach-mxs/module-tx28.h9
-rw-r--r--arch/arm/mach-mxs/ocotp.c90
-rw-r--r--arch/arm/mach-mxs/pm.c43
-rw-r--r--arch/arm/mach-mxs/regs-clkctrl-mx23.h124
-rw-r--r--arch/arm/mach-mxs/regs-clkctrl-mx28.h177
-rw-r--r--arch/arm/mach-mxs/system.c2
-rw-r--r--arch/arm/mach-netx/include/mach/memory.h2
-rw-r--r--arch/arm/mach-nomadik/include/mach/memory.h2
-rw-r--r--arch/arm/mach-ns9xxx/include/mach/memory.h2
-rw-r--r--arch/arm/mach-nuc93x/include/mach/memory.h2
-rw-r--r--arch/arm/mach-omap1/Makefile2
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c16
-rw-r--r--arch/arm/mach-omap1/board-fsample.c4
-rw-r--r--arch/arm/mach-omap1/board-h2.c2
-rw-r--r--arch/arm/mach-omap1/board-h3.c2
-rw-r--r--arch/arm/mach-omap1/board-htcherald.c4
-rw-r--r--arch/arm/mach-omap1/board-innovator.c2
-rw-r--r--arch/arm/mach-omap1/board-nokia770.c6
-rw-r--r--arch/arm/mach-omap1/board-palmte.c13
-rw-r--r--arch/arm/mach-omap1/board-voiceblue.c107
-rw-r--r--arch/arm/mach-omap1/include/mach/debug-macro.S9
-rw-r--r--arch/arm/mach-omap1/mcbsp.c327
-rw-r--r--arch/arm/mach-omap1/pm.h6
-rw-r--r--arch/arm/mach-omap1/reset.c25
-rw-r--r--arch/arm/mach-omap1/sleep.S3
-rw-r--r--arch/arm/mach-omap1/sram.S1
-rw-r--r--arch/arm/mach-omap2/Kconfig29
-rw-r--r--arch/arm/mach-omap2/Makefile28
-rw-r--r--arch/arm/mach-omap2/board-2430sdp.c29
-rw-r--r--arch/arm/mach-omap2/board-3430sdp.c103
-rw-r--r--arch/arm/mach-omap2/board-3630sdp.c11
-rw-r--r--arch/arm/mach-omap2/board-4430sdp.c140
-rw-r--r--arch/arm/mach-omap2/board-am3517crane.c8
-rw-r--r--arch/arm/mach-omap2/board-am3517evm.c28
-rw-r--r--arch/arm/mach-omap2/board-apollon.c8
-rw-r--r--arch/arm/mach-omap2/board-cm-t35.c30
-rw-r--r--arch/arm/mach-omap2/board-cm-t3517.c8
-rw-r--r--arch/arm/mach-omap2/board-devkit8000.c33
-rw-r--r--arch/arm/mach-omap2/board-flash.c32
-rw-r--r--arch/arm/mach-omap2/board-flash.h4
-rw-r--r--arch/arm/mach-omap2/board-generic.c8
-rw-r--r--arch/arm/mach-omap2/board-h4.c9
-rw-r--r--arch/arm/mach-omap2/board-igep0020.c34
-rw-r--r--arch/arm/mach-omap2/board-igep0030.c12
-rw-r--r--arch/arm/mach-omap2/board-ldp.c10
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c20
-rw-r--r--arch/arm/mach-omap2/board-omap3beagle.c31
-rw-r--r--arch/arm/mach-omap2/board-omap3evm.c245
-rw-r--r--arch/arm/mach-omap2/board-omap3logic.c9
-rw-r--r--arch/arm/mach-omap2/board-omap3pandora.c31
-rw-r--r--arch/arm/mach-omap2/board-omap3stalker.c33
-rw-r--r--arch/arm/mach-omap2/board-omap3touchbook.c9
-rw-r--r--arch/arm/mach-omap2/board-omap4panda.c85
-rw-r--r--arch/arm/mach-omap2/board-overo.c8
-rw-r--r--arch/arm/mach-omap2/board-rm680.c10
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c12
-rw-r--r--arch/arm/mach-omap2/board-rx51-video.c15
-rw-r--r--arch/arm/mach-omap2/board-rx51.c8
-rw-r--r--arch/arm/mach-omap2/board-ti8168evm.c62
-rw-r--r--arch/arm/mach-omap2/board-zoom-display.c15
-rw-r--r--arch/arm/mach-omap2/board-zoom-peripherals.c14
-rw-r--r--arch/arm/mach-omap2/board-zoom.c19
-rw-r--r--arch/arm/mach-omap2/clkt_clksel.c2
-rw-r--r--arch/arm/mach-omap2/clkt_dpll.c2
-rw-r--r--arch/arm/mach-omap2/clock2420_data.c8
-rw-r--r--arch/arm/mach-omap2/clock2430_data.c20
-rw-r--r--arch/arm/mach-omap2/clock2xxx.h4
-rw-r--r--arch/arm/mach-omap2/clock3xxx_data.c31
-rw-r--r--arch/arm/mach-omap2/clock44xx_data.c35
-rw-r--r--arch/arm/mach-omap2/clockdomains2xxx_3xxx_data.c16
-rw-r--r--arch/arm/mach-omap2/common.c25
-rw-r--r--arch/arm/mach-omap2/control.h6
-rw-r--r--arch/arm/mach-omap2/devices.c611
-rw-r--r--arch/arm/mach-omap2/display.c102
-rw-r--r--arch/arm/mach-omap2/gpmc-nand.c7
-rw-r--r--arch/arm/mach-omap2/gpmc-onenand.c113
-rw-r--r--arch/arm/mach-omap2/gpmc.c56
-rw-r--r--arch/arm/mach-omap2/hsmmc.c421
-rw-r--r--arch/arm/mach-omap2/hwspinlock.c63
-rw-r--r--arch/arm/mach-omap2/id.c64
-rw-r--r--arch/arm/mach-omap2/include/mach/debug-macro.S25
-rw-r--r--arch/arm/mach-omap2/include/mach/entry-macro.S13
-rw-r--r--arch/arm/mach-omap2/io.c36
-rw-r--r--arch/arm/mach-omap2/iommu2.c33
-rw-r--r--arch/arm/mach-omap2/irq.c9
-rw-r--r--arch/arm/mach-omap2/mailbox.c76
-rw-r--r--arch/arm/mach-omap2/mcbsp.c231
-rw-r--r--arch/arm/mach-omap2/mux.c80
-rw-r--r--arch/arm/mach-omap2/mux.h3
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c177
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2420_data.c1303
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2430_data.c1785
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c2267
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c3164
-rw-r--r--arch/arm/mach-omap2/omap_phy_internal.c93
-rw-r--r--arch/arm/mach-omap2/opp2xxx.h2
-rw-r--r--arch/arm/mach-omap2/pm-debug.c8
-rw-r--r--arch/arm/mach-omap2/pm.h2
-rw-r--r--arch/arm/mach-omap2/powerdomains2xxx_data.c6
-rw-r--r--arch/arm/mach-omap2/prcm-common.h8
-rw-r--r--arch/arm/mach-omap2/prcm.c5
-rw-r--r--arch/arm/mach-omap2/prcm_mpu44xx.h4
-rw-r--r--arch/arm/mach-omap2/serial.c17
-rw-r--r--arch/arm/mach-omap2/sleep24xx.S2
-rw-r--r--arch/arm/mach-omap2/sleep34xx.S2
-rw-r--r--arch/arm/mach-omap2/smartreflex.c4
-rw-r--r--arch/arm/mach-omap2/sram242x.S3
-rw-r--r--arch/arm/mach-omap2/sram243x.S3
-rw-r--r--arch/arm/mach-omap2/sram34xx.S1
-rw-r--r--arch/arm/mach-omap2/timer-gp.c20
-rw-r--r--arch/arm/mach-omap2/timer-mpu.c7
-rw-r--r--arch/arm/mach-omap2/usb-musb.c219
-rw-r--r--arch/arm/mach-orion5x/include/mach/memory.h2
-rw-r--r--arch/arm/mach-pnx4008/include/mach/memory.h2
-rw-r--r--arch/arm/mach-pxa/balloon3.c5
-rw-r--r--arch/arm/mach-pxa/cm-x300.c2
-rw-r--r--arch/arm/mach-pxa/colibri-evalboard.c3
-rw-r--r--arch/arm/mach-pxa/colibri-pxa270-income.c3
-rw-r--r--arch/arm/mach-pxa/corgi.c2
-rw-r--r--arch/arm/mach-pxa/csb726.c2
-rw-r--r--arch/arm/mach-pxa/devices.c2
-rw-r--r--arch/arm/mach-pxa/em-x270.c2
-rw-r--r--arch/arm/mach-pxa/ezx.c2
-rw-r--r--arch/arm/mach-pxa/hx4700.c2
-rw-r--r--arch/arm/mach-pxa/include/mach/memory.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/pm.h5
-rw-r--r--arch/arm/mach-pxa/littleton.c2
-rw-r--r--arch/arm/mach-pxa/magician.c2
-rw-r--r--arch/arm/mach-pxa/mainstone.c2
-rw-r--r--arch/arm/mach-pxa/mioa701.c2
-rw-r--r--arch/arm/mach-pxa/mxm8x10.c2
-rw-r--r--arch/arm/mach-pxa/palm27x.c3
-rw-r--r--arch/arm/mach-pxa/palmz72.c2
-rw-r--r--arch/arm/mach-pxa/pcm990-baseboard.c2
-rw-r--r--arch/arm/mach-pxa/pm.c5
-rw-r--r--arch/arm/mach-pxa/poodle.c2
-rw-r--r--arch/arm/mach-pxa/pxa25x.c4
-rw-r--r--arch/arm/mach-pxa/pxa27x.c7
-rw-r--r--arch/arm/mach-pxa/pxa3xx.c9
-rw-r--r--arch/arm/mach-pxa/pxa95x.c2
-rw-r--r--arch/arm/mach-pxa/raumfeld.c2
-rw-r--r--arch/arm/mach-pxa/saar.c2
-rw-r--r--arch/arm/mach-pxa/saarb.c3
-rw-r--r--arch/arm/mach-pxa/sleep.S191
-rw-r--r--arch/arm/mach-pxa/spitz.c3
-rw-r--r--arch/arm/mach-pxa/stargate2.c2
-rw-r--r--arch/arm/mach-pxa/tavorevb3.c3
-rw-r--r--arch/arm/mach-pxa/tosa.c2
-rw-r--r--arch/arm/mach-pxa/trizeps4.c2
-rw-r--r--arch/arm/mach-pxa/viper.c2
-rw-r--r--arch/arm/mach-pxa/vpac270.c3
-rw-r--r--arch/arm/mach-pxa/xcep.c3
-rw-r--r--arch/arm/mach-pxa/z2.c3
-rw-r--r--arch/arm/mach-pxa/zeus.c5
-rw-r--r--arch/arm/mach-pxa/zylonite_pxa300.c2
-rw-r--r--arch/arm/mach-realview/Kconfig5
-rw-r--r--arch/arm/mach-realview/Makefile3
-rw-r--r--arch/arm/mach-realview/core.c233
-rw-r--r--arch/arm/mach-realview/core.h2
-rw-r--r--arch/arm/mach-realview/headsmp.S40
-rw-r--r--arch/arm/mach-realview/include/mach/memory.h4
-rw-r--r--arch/arm/mach-realview/localtimer.c26
-rw-r--r--arch/arm/mach-realview/platsmp.c98
-rw-r--r--arch/arm/mach-realview/realview_eb.c24
-rw-r--r--arch/arm/mach-realview/realview_pb1176.c24
-rw-r--r--arch/arm/mach-realview/realview_pb11mp.c24
-rw-r--r--arch/arm/mach-realview/realview_pba8.c24
-rw-r--r--arch/arm/mach-realview/realview_pbx.c24
-rw-r--r--arch/arm/mach-rpc/include/mach/memory.h2
-rw-r--r--arch/arm/mach-s3c2400/include/mach/memory.h2
-rw-r--r--arch/arm/mach-s3c2410/h1940-bluetooth.c11
-rw-r--r--arch/arm/mach-s3c2410/include/mach/h1940.h3
-rw-r--r--arch/arm/mach-s3c2410/include/mach/memory.h2
-rw-r--r--arch/arm/mach-s3c2410/mach-h1940.c302
-rw-r--r--arch/arm/mach-s3c2440/Kconfig1
-rw-r--r--arch/arm/mach-s3c2440/include/mach/gta02.h26
-rw-r--r--arch/arm/mach-s3c2440/mach-gta02.c71
-rw-r--r--arch/arm/mach-s3c2440/mach-rx1950.c57
-rw-r--r--arch/arm/mach-s3c24a0/include/mach/memory.h2
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/memory.h2
-rw-r--r--arch/arm/mach-s3c64xx/sleep.S63
-rw-r--r--arch/arm/mach-s5p6442/include/mach/map.h69
-rw-r--r--arch/arm/mach-s5p6442/include/mach/memory.h2
-rw-r--r--arch/arm/mach-s5p64x0/include/mach/gpio.h4
-rw-r--r--arch/arm/mach-s5p64x0/include/mach/map.h83
-rw-r--r--arch/arm/mach-s5p64x0/include/mach/memory.h2
-rw-r--r--arch/arm/mach-s5pc100/include/mach/map.h193
-rw-r--r--arch/arm/mach-s5pc100/include/mach/memory.h2
-rw-r--r--arch/arm/mach-s5pv210/include/mach/map.h168
-rw-r--r--arch/arm/mach-s5pv210/include/mach/memory.h2
-rw-r--r--arch/arm/mach-s5pv210/mach-aquila.c67
-rw-r--r--arch/arm/mach-s5pv210/mach-goni.c126
-rw-r--r--arch/arm/mach-s5pv210/sleep.S105
-rw-r--r--arch/arm/mach-s5pv310/Makefile43
-rw-r--r--arch/arm/mach-s5pv310/include/mach/gpio.h135
-rw-r--r--arch/arm/mach-s5pv310/include/mach/map.h147
-rw-r--r--arch/arm/mach-s5pv310/include/mach/regs-gpio.h42
-rw-r--r--arch/arm/mach-sa1100/include/mach/memory.h2
-rw-r--r--arch/arm/mach-sa1100/pm.c12
-rw-r--r--arch/arm/mach-sa1100/sleep.S72
-rw-r--r--arch/arm/mach-shark/include/mach/memory.h2
-rw-r--r--arch/arm/mach-shmobile/board-ag5evm.c11
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c13
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c13
-rw-r--r--arch/arm/mach-shmobile/clock-sh73a0.c17
-rw-r--r--arch/arm/mach-shmobile/include/mach/head-ap4evb.txt10
-rw-r--r--arch/arm/mach-shmobile/include/mach/head-mackerel.txt10
-rw-r--r--arch/arm/mach-shmobile/include/mach/memory.h2
-rw-r--r--arch/arm/mach-shmobile/include/mach/mmcif-ap4eb.h29
-rw-r--r--arch/arm/mach-shmobile/include/mach/mmcif-mackerel.h39
-rw-r--r--arch/arm/mach-shmobile/include/mach/mmcif.h18
-rw-r--r--arch/arm/mach-shmobile/localtimer.c3
-rw-r--r--arch/arm/mach-spear3xx/clock.c473
-rw-r--r--arch/arm/mach-spear3xx/include/mach/generic.h10
-rw-r--r--arch/arm/mach-spear3xx/include/mach/hardware.h2
-rw-r--r--arch/arm/mach-spear3xx/include/mach/misc_regs.h141
-rw-r--r--arch/arm/mach-spear3xx/include/mach/spear320.h2
-rw-r--r--arch/arm/mach-spear3xx/spear300.c14
-rw-r--r--arch/arm/mach-spear3xx/spear300_evb.c11
-rw-r--r--arch/arm/mach-spear3xx/spear310.c11
-rw-r--r--arch/arm/mach-spear3xx/spear310_evb.c11
-rw-r--r--arch/arm/mach-spear3xx/spear320.c11
-rw-r--r--arch/arm/mach-spear3xx/spear320_evb.c11
-rw-r--r--arch/arm/mach-spear3xx/spear3xx.c41
-rw-r--r--arch/arm/mach-spear6xx/clock.c350
-rw-r--r--arch/arm/mach-spear6xx/include/mach/generic.h3
-rw-r--r--arch/arm/mach-spear6xx/include/mach/hardware.h3
-rw-r--r--arch/arm/mach-spear6xx/include/mach/misc_regs.h141
-rw-r--r--arch/arm/mach-spear6xx/spear600_evb.c2
-rw-r--r--arch/arm/mach-spear6xx/spear6xx.c31
-rw-r--r--arch/arm/mach-tcc8k/board-tcc8000-sdk.c2
-rw-r--r--arch/arm/mach-tegra/Kconfig28
-rw-r--r--arch/arm/mach-tegra/Makefile10
-rw-r--r--arch/arm/mach-tegra/board-harmony-pinmux.c19
-rw-r--r--arch/arm/mach-tegra/board-harmony.c62
-rw-r--r--arch/arm/mach-tegra/board-seaboard-pinmux.c179
-rw-r--r--arch/arm/mach-tegra/board-seaboard.c196
-rw-r--r--arch/arm/mach-tegra/board-seaboard.h38
-rw-r--r--arch/arm/mach-tegra/board-trimslice-pinmux.c145
-rw-r--r--arch/arm/mach-tegra/board-trimslice.c103
-rw-r--r--arch/arm/mach-tegra/board-trimslice.h22
-rw-r--r--arch/arm/mach-tegra/board.h4
-rw-r--r--arch/arm/mach-tegra/clock.c532
-rw-r--r--arch/arm/mach-tegra/clock.h129
-rw-r--r--arch/arm/mach-tegra/common.c27
-rw-r--r--arch/arm/mach-tegra/cpu-tegra.c100
-rw-r--r--arch/arm/mach-tegra/devices.c505
-rw-r--r--arch/arm/mach-tegra/devices.h46
-rw-r--r--arch/arm/mach-tegra/dma.c243
-rw-r--r--arch/arm/mach-tegra/gpio.c15
-rw-r--r--arch/arm/mach-tegra/include/mach/clk.h6
-rw-r--r--arch/arm/mach-tegra/include/mach/debug-macro.S25
-rw-r--r--arch/arm/mach-tegra/include/mach/gpio.h9
-rw-r--r--arch/arm/mach-tegra/include/mach/harmony_audio.h22
-rw-r--r--arch/arm/mach-tegra/include/mach/iomap.h47
-rw-r--r--arch/arm/mach-tegra/include/mach/irqs.h14
-rw-r--r--arch/arm/mach-tegra/include/mach/kbc.h1
-rw-r--r--arch/arm/mach-tegra/include/mach/legacy_irq.h4
-rw-r--r--arch/arm/mach-tegra/include/mach/memory.h2
-rw-r--r--arch/arm/mach-tegra/include/mach/pinmux-t2.h10
-rw-r--r--arch/arm/mach-tegra/include/mach/powergate.h40
-rw-r--r--arch/arm/mach-tegra/include/mach/suspend.h38
-rw-r--r--arch/arm/mach-tegra/include/mach/system.h10
-rw-r--r--arch/arm/mach-tegra/include/mach/uncompress.h18
-rw-r--r--arch/arm/mach-tegra/irq.c186
-rw-r--r--arch/arm/mach-tegra/legacy_irq.c109
-rw-r--r--arch/arm/mach-tegra/localtimer.c3
-rw-r--r--arch/arm/mach-tegra/pinmux-t2-tables.c26
-rw-r--r--arch/arm/mach-tegra/powergate.c212
-rw-r--r--arch/arm/mach-tegra/tegra2_clocks.c1120
-rw-r--r--arch/arm/mach-tegra/tegra2_dvfs.c86
-rw-r--r--arch/arm/mach-tegra/tegra2_emc.c178
-rw-r--r--arch/arm/mach-tegra/tegra2_emc.h27
-rw-r--r--arch/arm/mach-tegra/timer.c77
-rw-r--r--arch/arm/mach-u300/core.c179
-rw-r--r--arch/arm/mach-u300/include/mach/memory.h6
-rw-r--r--arch/arm/mach-u300/mmc.c160
-rw-r--r--arch/arm/mach-u300/spi.c21
-rw-r--r--arch/arm/mach-u300/u300.c2
-rw-r--r--arch/arm/mach-ux500/Makefile8
-rw-r--r--arch/arm/mach-ux500/board-mop500-keypads.c229
-rw-r--r--arch/arm/mach-ux500/board-mop500-pins.c241
-rw-r--r--arch/arm/mach-ux500/board-mop500-regulators.c62
-rw-r--r--arch/arm/mach-ux500/board-mop500-sdi.c184
-rw-r--r--arch/arm/mach-ux500/board-mop500-stuib.c205
-rw-r--r--arch/arm/mach-ux500/board-mop500-u8500uib.c111
-rw-r--r--arch/arm/mach-ux500/board-mop500-uib.c135
-rw-r--r--arch/arm/mach-ux500/board-mop500.c328
-rw-r--r--arch/arm/mach-ux500/board-mop500.h25
-rw-r--r--arch/arm/mach-ux500/board-u5500-sdi.c25
-rw-r--r--arch/arm/mach-ux500/board-u5500.c6
-rw-r--r--arch/arm/mach-ux500/clock.c6
-rw-r--r--arch/arm/mach-ux500/cpu-db5500.c53
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c76
-rw-r--r--arch/arm/mach-ux500/devices-common.c1
-rw-r--r--arch/arm/mach-ux500/devices-common.h7
-rw-r--r--arch/arm/mach-ux500/devices-db5500.h19
-rw-r--r--arch/arm/mach-ux500/devices-db8500.c67
-rw-r--r--arch/arm/mach-ux500/devices-db8500.h15
-rw-r--r--arch/arm/mach-ux500/dma-db5500.c16
-rw-r--r--arch/arm/mach-ux500/include/mach/memory.h2
-rw-r--r--arch/arm/mach-ux500/include/mach/uncompress.h6
-rw-r--r--arch/arm/mach-ux500/include/mach/usb.h25
-rw-r--r--arch/arm/mach-ux500/localtimer.c3
-rw-r--r--arch/arm/mach-ux500/mbox-db5500.c2
-rw-r--r--arch/arm/mach-ux500/usb.c160
-rw-r--r--arch/arm/mach-versatile/core.c295
-rw-r--r--arch/arm/mach-versatile/core.h2
-rw-r--r--arch/arm/mach-versatile/include/mach/hardware.h2
-rw-r--r--arch/arm/mach-versatile/include/mach/memory.h2
-rw-r--r--arch/arm/mach-versatile/versatile_ab.c1
-rw-r--r--arch/arm/mach-versatile/versatile_pb.c6
-rw-r--r--arch/arm/mach-vexpress/Kconfig3
-rw-r--r--arch/arm/mach-vexpress/Makefile3
-rw-r--r--arch/arm/mach-vexpress/core.h1
-rw-r--r--arch/arm/mach-vexpress/ct-ca9x4.c70
-rw-r--r--arch/arm/mach-vexpress/include/mach/memory.h2
-rw-r--r--arch/arm/mach-vexpress/platsmp.c94
-rw-r--r--arch/arm/mach-vexpress/v2m.c31
-rw-r--r--arch/arm/mach-vt8500/Kconfig73
-rw-r--r--arch/arm/mach-vt8500/Makefile9
-rw-r--r--arch/arm/mach-vt8500/Makefile.boot3
-rw-r--r--arch/arm/mach-vt8500/bv07.c77
-rw-r--r--arch/arm/mach-vt8500/devices-vt8500.c91
-rw-r--r--arch/arm/mach-vt8500/devices-wm8505.c99
-rw-r--r--arch/arm/mach-vt8500/devices.c270
-rw-r--r--arch/arm/mach-vt8500/devices.h88
-rw-r--r--arch/arm/mach-vt8500/gpio.c240
-rw-r--r--arch/arm/mach-vt8500/include/mach/debug-macro.S31
-rw-r--r--arch/arm/mach-vt8500/include/mach/entry-macro.S32
-rw-r--r--arch/arm/mach-vt8500/include/mach/gpio.h6
-rw-r--r--arch/arm/mach-vt8500/include/mach/hardware.h (renamed from arch/arm/mach-tegra/tegra2_dvfs.h)10
-rw-r--r--arch/arm/mach-vt8500/include/mach/i8042.h18
-rw-r--r--arch/arm/mach-vt8500/include/mach/io.h28
-rw-r--r--arch/arm/mach-vt8500/include/mach/irqs.h22
-rw-r--r--arch/arm/mach-vt8500/include/mach/memory.h28
-rw-r--r--arch/arm/mach-vt8500/include/mach/system.h18
-rw-r--r--arch/arm/mach-vt8500/include/mach/timex.h26
-rw-r--r--arch/arm/mach-vt8500/include/mach/uncompress.h37
-rw-r--r--arch/arm/mach-vt8500/include/mach/vmalloc.h20
-rw-r--r--arch/arm/mach-vt8500/include/mach/vt8500_irqs.h88
-rw-r--r--arch/arm/mach-vt8500/include/mach/vt8500_regs.h79
-rw-r--r--arch/arm/mach-vt8500/include/mach/vt8500fb.h31
-rw-r--r--arch/arm/mach-vt8500/include/mach/wm8505_irqs.h115
-rw-r--r--arch/arm/mach-vt8500/include/mach/wm8505_regs.h78
-rw-r--r--arch/arm/mach-vt8500/irq.c177
-rw-r--r--arch/arm/mach-vt8500/pwm.c265
-rw-r--r--arch/arm/mach-vt8500/timer.c155
-rw-r--r--arch/arm/mach-vt8500/wm8505_7in.c77
-rw-r--r--arch/arm/mach-w90x900/include/mach/memory.h2
-rw-r--r--arch/arm/mm/Kconfig57
-rw-r--r--arch/arm/mm/Makefile1
-rw-r--r--arch/arm/mm/abort-ev6.S6
-rw-r--r--arch/arm/mm/cache-l2x0.c6
-rw-r--r--arch/arm/mm/dma-mapping.c11
-rw-r--r--arch/arm/mm/fault-armv.c7
-rw-r--r--arch/arm/mm/fault.c39
-rw-r--r--arch/arm/mm/idmap.c35
-rw-r--r--arch/arm/mm/init.c6
-rw-r--r--arch/arm/mm/mm.h2
-rw-r--r--arch/arm/mm/mmap.c2
-rw-r--r--arch/arm/mm/mmu.c81
-rw-r--r--arch/arm/mm/pgd.c24
-rw-r--r--arch/arm/mm/proc-arm1020.S3
-rw-r--r--arch/arm/mm/proc-arm1020e.S3
-rw-r--r--arch/arm/mm/proc-arm1022.S3
-rw-r--r--arch/arm/mm/proc-arm1026.S3
-rw-r--r--arch/arm/mm/proc-arm6_7.S6
-rw-r--r--arch/arm/mm/proc-arm720.S3
-rw-r--r--arch/arm/mm/proc-arm740.S3
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S3
-rw-r--r--arch/arm/mm/proc-arm920.S37
-rw-r--r--arch/arm/mm/proc-arm922.S3
-rw-r--r--arch/arm/mm/proc-arm925.S3
-rw-r--r--arch/arm/mm/proc-arm926.S37
-rw-r--r--arch/arm/mm/proc-arm940.S3
-rw-r--r--arch/arm/mm/proc-arm946.S3
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S3
-rw-r--r--arch/arm/mm/proc-fa526.S3
-rw-r--r--arch/arm/mm/proc-feroceon.S3
-rw-r--r--arch/arm/mm/proc-mohawk.S3
-rw-r--r--arch/arm/mm/proc-sa110.S3
-rw-r--r--arch/arm/mm/proc-sa1100.S39
-rw-r--r--arch/arm/mm/proc-v6.S50
-rw-r--r--arch/arm/mm/proc-v7.S122
-rw-r--r--arch/arm/mm/proc-xsc3.S48
-rw-r--r--arch/arm/mm/proc-xscale.S45
-rw-r--r--arch/arm/mm/vmregion.c17
-rw-r--r--arch/arm/plat-mxc/Makefile1
-rw-r--r--arch/arm/plat-mxc/devices.c2
-rw-r--r--arch/arm/plat-mxc/devices/platform-fec.c9
-rw-r--r--arch/arm/plat-mxc/devices/platform-imx-i2c.c10
-rw-r--r--arch/arm/plat-mxc/devices/platform-imx2-wdt.c9
-rw-r--r--arch/arm/plat-mxc/devices/platform-spi_imx.c9
-rw-r--r--arch/arm/plat-mxc/ehci.c369
-rw-r--r--arch/arm/plat-mxc/gpio.c1
-rw-r--r--arch/arm/plat-mxc/include/mach/common.h10
-rw-r--r--arch/arm/plat-mxc/include/mach/esdhc.h12
-rw-r--r--arch/arm/plat-mxc/include/mach/iomux-mx3.h8
-rw-r--r--arch/arm/plat-mxc/include/mach/iomux-mx35.h14
-rw-r--r--arch/arm/plat-mxc/include/mach/iomux-mx50.h44
-rw-r--r--arch/arm/plat-mxc/include/mach/iomux-mx51.h4
-rw-r--r--arch/arm/plat-mxc/include/mach/iomux-mx53.h2639
-rw-r--r--arch/arm/plat-mxc/include/mach/iomux-mxc91231.h8
-rw-r--r--arch/arm/plat-mxc/include/mach/memory.h18
-rw-r--r--arch/arm/plat-mxc/include/mach/mx1.h7
-rw-r--r--arch/arm/plat-mxc/include/mach/mx53.h2
-rw-r--r--arch/arm/plat-mxc/include/mach/mxc_ehci.h7
-rw-r--r--arch/arm/plat-mxc/include/mach/uncompress.h7
-rw-r--r--arch/arm/plat-nomadik/gpio.c375
-rw-r--r--arch/arm/plat-nomadik/include/plat/gpio.h3
-rw-r--r--arch/arm/plat-nomadik/include/plat/ste_dma40.h22
-rw-r--r--arch/arm/plat-omap/Kconfig2
-rw-r--r--arch/arm/plat-omap/common.c11
-rw-r--r--arch/arm/plat-omap/counter_32k.c4
-rw-r--r--arch/arm/plat-omap/devices.c10
-rw-r--r--arch/arm/plat-omap/dma.c2
-rw-r--r--arch/arm/plat-omap/i2c.c2
-rw-r--r--arch/arm/plat-omap/include/plat/board.h4
-rw-r--r--arch/arm/plat-omap/include/plat/clkdev_omap.h1
-rw-r--r--arch/arm/plat-omap/include/plat/clock.h1
-rw-r--r--arch/arm/plat-omap/include/plat/common.h1
-rw-r--r--arch/arm/plat-omap/include/plat/cpu.h44
-rw-r--r--arch/arm/plat-omap/include/plat/display.h16
-rw-r--r--arch/arm/plat-omap/include/plat/dmtimer.h11
-rw-r--r--arch/arm/plat-omap/include/plat/fpga.h92
-rw-r--r--arch/arm/plat-omap/include/plat/gpmc.h18
-rw-r--r--arch/arm/plat-omap/include/plat/hardware.h1
-rw-r--r--arch/arm/plat-omap/include/plat/io.h12
-rw-r--r--arch/arm/plat-omap/include/plat/iommu.h16
-rw-r--r--arch/arm/plat-omap/include/plat/irqs.h9
-rw-r--r--arch/arm/plat-omap/include/plat/l3_2xxx.h20
-rw-r--r--arch/arm/plat-omap/include/plat/l3_3xxx.h20
-rw-r--r--arch/arm/plat-omap/include/plat/l4_2xxx.h24
-rw-r--r--arch/arm/plat-omap/include/plat/l4_3xxx.h10
-rw-r--r--arch/arm/plat-omap/include/plat/mcbsp.h64
-rw-r--r--arch/arm/plat-omap/include/plat/mcspi.h11
-rw-r--r--arch/arm/plat-omap/include/plat/memory.h4
-rw-r--r--arch/arm/plat-omap/include/plat/mmc.h29
-rw-r--r--arch/arm/plat-omap/include/plat/multi.h4
-rw-r--r--arch/arm/plat-omap/include/plat/nand.h11
-rw-r--r--arch/arm/plat-omap/include/plat/omap_hwmod.h17
-rw-r--r--arch/arm/plat-omap/include/plat/onenand.h10
-rw-r--r--arch/arm/plat-omap/include/plat/prcm.h1
-rw-r--r--arch/arm/plat-omap/include/plat/sdrc.h8
-rw-r--r--arch/arm/plat-omap/include/plat/serial.h11
-rw-r--r--arch/arm/plat-omap/include/plat/sram.h14
-rw-r--r--arch/arm/plat-omap/include/plat/system.h38
-rw-r--r--arch/arm/plat-omap/include/plat/ti816x.h27
-rw-r--r--arch/arm/plat-omap/include/plat/uncompress.h7
-rw-r--r--arch/arm/plat-omap/include/plat/usb.h4
-rw-r--r--arch/arm/plat-omap/io.c5
-rw-r--r--arch/arm/plat-omap/iommu.c55
-rw-r--r--arch/arm/plat-omap/mailbox.c21
-rw-r--r--arch/arm/plat-omap/mcbsp.c203
-rw-r--r--arch/arm/plat-omap/sram.c34
-rw-r--r--arch/arm/plat-s3c24xx/sleep.S57
-rw-r--r--arch/arm/plat-s5p/Kconfig8
-rw-r--r--arch/arm/plat-s5p/cpu.c25
-rw-r--r--arch/arm/plat-s5p/dev-uart.c12
-rw-r--r--arch/arm/plat-s5p/include/plat/exynos4.h34
-rw-r--r--arch/arm/plat-s5p/include/plat/s5pv310.h34
-rw-r--r--arch/arm/plat-samsung/dev-ts.c1
-rw-r--r--arch/arm/plat-samsung/include/plat/devs.h23
-rw-r--r--arch/arm/plat-samsung/include/plat/pd.h4
-rw-r--r--arch/arm/plat-samsung/include/plat/pm.h12
-rw-r--r--arch/arm/plat-samsung/include/plat/sdhci.h63
-rw-r--r--arch/arm/plat-samsung/pm.c16
-rw-r--r--arch/arm/plat-spear/Makefile4
-rw-r--r--arch/arm/plat-spear/clock.c844
-rw-r--r--arch/arm/plat-spear/include/plat/clock.h166
-rw-r--r--arch/arm/plat-spear/include/plat/hardware.h23
-rw-r--r--arch/arm/plat-spear/include/plat/memory.h2
-rw-r--r--arch/arm/plat-spear/include/plat/uncompress.h4
-rw-r--r--arch/arm/plat-spear/include/plat/vmalloc.h2
-rw-r--r--arch/arm/plat-spear/time.c21
-rw-r--r--arch/arm/plat-stmp3xxx/include/mach/memory.h2
-rw-r--r--arch/arm/plat-tcc/include/mach/memory.h2
-rw-r--r--arch/arm/plat-versatile/Kconfig17
-rw-r--r--arch/arm/plat-versatile/Makefile13
-rw-r--r--arch/arm/plat-versatile/clcd.c182
-rw-r--r--arch/arm/plat-versatile/fpga-irq.c72
-rw-r--r--arch/arm/plat-versatile/headsmp.S (renamed from arch/arm/mach-vexpress/headsmp.S)8
-rw-r--r--arch/arm/plat-versatile/include/plat/clcd.h9
-rw-r--r--arch/arm/plat-versatile/include/plat/fpga-irq.h12
-rw-r--r--arch/arm/plat-versatile/localtimer.c (renamed from arch/arm/mach-vexpress/localtimer.c)5
-rw-r--r--arch/arm/plat-versatile/platsmp.c104
-rw-r--r--arch/arm/vfp/vfpmodule.c9
-rw-r--r--arch/avr32/Kconfig1
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c6
-rw-r--r--arch/avr32/mm/cache.c2
-rw-r--r--arch/blackfin/kernel/time.c6
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S2
-rw-r--r--arch/cris/arch-v10/kernel/time.c4
-rw-r--r--arch/cris/arch-v10/mm/init.c2
-rw-r--r--arch/cris/arch-v32/kernel/smp.c4
-rw-r--r--arch/cris/arch-v32/kernel/time.c6
-rw-r--r--arch/cris/kernel/vmlinux.lds.S7
-rw-r--r--arch/frv/Kconfig1
-rw-r--r--arch/frv/kernel/time.c14
-rw-r--r--arch/frv/kernel/vmlinux.lds.S2
-rw-r--r--arch/h8300/kernel/time.c4
-rw-r--r--arch/h8300/kernel/timer/timer8.c2
-rw-r--r--arch/ia64/Kconfig1
-rw-r--r--arch/ia64/configs/generic_defconfig1
-rw-r--r--arch/ia64/configs/gensparse_defconfig1
-rw-r--r--arch/ia64/include/asm/acpi.h6
-rw-r--r--arch/ia64/include/asm/dma-mapping.h2
-rw-r--r--arch/ia64/include/asm/perfmon.h2
-rw-r--r--arch/ia64/include/asm/rwsem.h37
-rw-r--r--arch/ia64/kernel/acpi.c14
-rw-r--r--arch/ia64/kernel/mca.c5
-rw-r--r--arch/ia64/kernel/time.c19
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S2
-rw-r--r--arch/ia64/kvm/kvm-ia64.c2
-rw-r--r--arch/ia64/sn/kernel/setup.c2
-rw-r--r--arch/ia64/sn/pci/tioca_provider.c2
-rw-r--r--arch/ia64/xen/suspend.c9
-rw-r--r--arch/ia64/xen/time.c13
-rw-r--r--arch/m32r/kernel/time.c5
-rw-r--r--arch/m32r/kernel/vmlinux.lds.S2
-rw-r--r--arch/m68k/Kconfig34
-rw-r--r--arch/m68k/Makefile1
-rw-r--r--arch/m68k/amiga/chipram.c4
-rw-r--r--arch/m68k/bvme6000/config.c4
-rw-r--r--arch/m68k/emu/Makefile9
-rw-r--r--arch/m68k/emu/natfeat.c78
-rw-r--r--arch/m68k/emu/nfblock.c195
-rw-r--r--arch/m68k/emu/nfcon.c162
-rw-r--r--arch/m68k/emu/nfeth.c270
-rw-r--r--arch/m68k/include/asm/natfeat.h22
-rw-r--r--arch/m68k/include/asm/processor.h2
-rw-r--r--arch/m68k/kernel/setup.c5
-rw-r--r--arch/m68k/kernel/signal.c24
-rw-r--r--arch/m68k/kernel/time.c4
-rw-r--r--arch/m68k/kernel/traps.c20
-rw-r--r--arch/m68k/math-emu/Makefile4
-rw-r--r--arch/m68k/mm/fault.c16
-rw-r--r--arch/m68k/mvme147/config.c4
-rw-r--r--arch/m68k/mvme16x/config.c4
-rw-r--r--arch/m68k/sun3/sun3ints.c2
-rw-r--r--arch/m68knommu/Kconfig1
-rw-r--r--arch/m68knommu/kernel/irq.c6
-rw-r--r--arch/m68knommu/kernel/time.c8
-rw-r--r--arch/m68knommu/platform/5249/intc2.c20
-rw-r--r--arch/m68knommu/platform/5272/intc.c33
-rw-r--r--arch/m68knommu/platform/68328/ints.c12
-rw-r--r--arch/m68knommu/platform/68360/ints.c18
-rw-r--r--arch/m68knommu/platform/coldfire/intc-2.c16
-rw-r--r--arch/m68knommu/platform/coldfire/intc-simr.c18
-rw-r--r--arch/m68knommu/platform/coldfire/intc.c20
-rw-r--r--arch/microblaze/include/asm/pci-bridge.h12
-rw-r--r--arch/microblaze/include/asm/prom.h15
-rw-r--r--arch/microblaze/kernel/prom_parse.c77
-rw-r--r--arch/microblaze/pci/pci-common.c1
-rw-r--r--arch/microblaze/pci/pci_32.c1
-rw-r--r--arch/mips/Kconfig3
-rw-r--r--arch/mips/include/asm/errno.h2
-rw-r--r--arch/mips/include/asm/mach-jz4740/platform.h1
-rw-r--r--arch/mips/jz4740/platform.c16
-rw-r--r--arch/mips/kernel/vmlinux.lds.S2
-rw-r--r--arch/mips/kernel/vpe.c4
-rw-r--r--arch/mn10300/kernel/time.c6
-rw-r--r--arch/mn10300/kernel/vmlinux.lds.S2
-rw-r--r--arch/parisc/Kconfig2
-rw-r--r--arch/parisc/include/asm/cacheflush.h31
-rw-r--r--arch/parisc/include/asm/errno.h2
-rw-r--r--arch/parisc/include/asm/irq.h13
-rw-r--r--arch/parisc/include/asm/pgtable.h14
-rw-r--r--arch/parisc/kernel/cache.c109
-rw-r--r--arch/parisc/kernel/entry.S217
-rw-r--r--arch/parisc/kernel/irq.c64
-rw-r--r--arch/parisc/kernel/pacache.S245
-rw-r--r--arch/parisc/kernel/time.c7
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S2
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/boot/dts/canyonlands.dts24
-rw-r--r--arch/powerpc/include/asm/cputable.h3
-rw-r--r--arch/powerpc/include/asm/machdep.h6
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h37
-rw-r--r--arch/powerpc/include/asm/pci.h2
-rw-r--r--arch/powerpc/include/asm/prom.h15
-rw-r--r--arch/powerpc/include/asm/rwsem.h51
-rw-r--r--arch/powerpc/kernel/cputable.c22
-rw-r--r--arch/powerpc/kernel/ibmebus.c404
-rw-r--r--arch/powerpc/kernel/machine_kexec.c5
-rw-r--r--arch/powerpc/kernel/of_platform.c9
-rw-r--r--arch/powerpc/kernel/pci-common.c12
-rw-r--r--arch/powerpc/kernel/pci_32.c2
-rw-r--r--arch/powerpc/kernel/pci_64.c6
-rw-r--r--arch/powerpc/kernel/pci_dn.c9
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c4
-rw-r--r--arch/powerpc/kernel/process.c8
-rw-r--r--arch/powerpc/kernel/prom_parse.c84
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S2
-rw-r--r--arch/powerpc/kvm/book3s.c14
-rw-r--r--arch/powerpc/kvm/booke.c14
-rw-r--r--arch/powerpc/mm/tlb_hash64.c6
-rw-r--r--arch/powerpc/mm/tlb_nohash_low.S35
-rw-r--r--arch/powerpc/platforms/44x/44x.h4
-rw-r--r--arch/powerpc/platforms/44x/Kconfig1
-rw-r--r--arch/powerpc/platforms/44x/Makefile1
-rw-r--r--arch/powerpc/platforms/44x/canyonlands.c134
-rw-r--r--arch/powerpc/platforms/44x/ppc44x_simple.c1
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpio.c14
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpt.c10
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c11
-rw-r--r--arch/powerpc/platforms/82xx/ep8248e.c7
-rw-r--r--arch/powerpc/platforms/83xx/suspend.c14
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c11
-rw-r--r--arch/powerpc/platforms/pasemi/gpio_mdio.c9
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c2
-rw-r--r--arch/powerpc/sysdev/axonram.c11
-rw-r--r--arch/powerpc/sysdev/bestcomm/bestcomm.c9
-rw-r--r--arch/powerpc/sysdev/fsl_85xx_l2ctlr.c9
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c13
-rw-r--r--arch/powerpc/sysdev/fsl_pmc.c7
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c7
-rw-r--r--arch/powerpc/sysdev/pmi.c9
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe.c7
-rw-r--r--arch/s390/Kconfig22
-rw-r--r--arch/s390/Makefile3
-rw-r--r--arch/s390/boot/compressed/misc.c5
-rw-r--r--arch/s390/include/asm/atomic.h26
-rw-r--r--arch/s390/include/asm/cache.h1
-rw-r--r--arch/s390/include/asm/rwsem.h63
-rw-r--r--arch/s390/kernel/vmlinux.lds.S2
-rw-r--r--arch/s390/oprofile/Makefile2
-rw-r--r--arch/s390/oprofile/hwsampler.c1256
-rw-r--r--arch/s390/oprofile/hwsampler.h113
-rw-r--r--arch/s390/oprofile/hwsampler_files.c162
-rw-r--r--arch/s390/oprofile/init.c6
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c6
-rw-r--r--arch/sh/boards/mach-se/7724/setup.c6
-rw-r--r--arch/sh/boot/compressed/Makefile2
-rw-r--r--arch/sh/drivers/pci/pcie-sh7786.c46
-rw-r--r--arch/sh/include/asm/rwsem.h56
-rw-r--r--arch/sh/include/asm/sections.h2
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7750.c13
-rw-r--r--arch/sh/kernel/vmlinux.lds.S2
-rw-r--r--arch/sh/lib/delay.c10
-rw-r--r--arch/sh/mm/Makefile2
-rw-r--r--arch/sh/mm/cache.c3
-rw-r--r--arch/sparc/Kconfig35
-rw-r--r--arch/sparc/Makefile3
-rw-r--r--arch/sparc/boot/Makefile31
-rw-r--r--arch/sparc/include/asm/errno.h2
-rw-r--r--arch/sparc/include/asm/irq_64.h18
-rw-r--r--arch/sparc/include/asm/leon.h3
-rw-r--r--arch/sparc/include/asm/leon_amba.h6
-rw-r--r--arch/sparc/include/asm/mmu_32.h3
-rw-r--r--arch/sparc/include/asm/parport.h6
-rw-r--r--arch/sparc/include/asm/rwsem.h46
-rw-r--r--arch/sparc/include/asm/smp_32.h6
-rw-r--r--arch/sparc/kernel/Makefile2
-rw-r--r--arch/sparc/kernel/apc.c7
-rw-r--r--arch/sparc/kernel/auxio_64.c7
-rw-r--r--arch/sparc/kernel/central.c14
-rw-r--r--arch/sparc/kernel/chmc.c19
-rw-r--r--arch/sparc/kernel/cpu.c2
-rw-r--r--arch/sparc/kernel/entry.h4
-rw-r--r--arch/sparc/kernel/iommu.c3
-rw-r--r--arch/sparc/kernel/ioport.c108
-rw-r--r--arch/sparc/kernel/irq.h42
-rw-r--r--arch/sparc/kernel/irq_32.c260
-rw-r--r--arch/sparc/kernel/irq_64.c312
-rw-r--r--arch/sparc/kernel/kernel.h49
-rw-r--r--arch/sparc/kernel/ldc.c28
-rw-r--r--arch/sparc/kernel/leon_kernel.c11
-rw-r--r--arch/sparc/kernel/leon_pmc.c82
-rw-r--r--arch/sparc/kernel/leon_smp.c36
-rw-r--r--arch/sparc/kernel/of_device_32.c59
-rw-r--r--arch/sparc/kernel/pci.c11
-rw-r--r--arch/sparc/kernel/pci_common.c11
-rw-r--r--arch/sparc/kernel/pci_fire.c17
-rw-r--r--arch/sparc/kernel/pci_impl.h4
-rw-r--r--arch/sparc/kernel/pci_msi.c44
-rw-r--r--arch/sparc/kernel/pci_psycho.c7
-rw-r--r--arch/sparc/kernel/pci_sabre.c9
-rw-r--r--arch/sparc/kernel/pci_schizo.c15
-rw-r--r--arch/sparc/kernel/pci_sun4v.c16
-rw-r--r--arch/sparc/kernel/pcic.c4
-rw-r--r--arch/sparc/kernel/pcr.c2
-rw-r--r--arch/sparc/kernel/pmc.c7
-rw-r--r--arch/sparc/kernel/power.c6
-rw-r--r--arch/sparc/kernel/prom_irqtrans.c16
-rw-r--r--arch/sparc/kernel/ptrace_64.c3
-rw-r--r--arch/sparc/kernel/setup_32.c19
-rw-r--r--arch/sparc/kernel/smp_64.c11
-rw-r--r--arch/sparc/kernel/sun4c_irq.c85
-rw-r--r--arch/sparc/kernel/sun4d_irq.c298
-rw-r--r--arch/sparc/kernel/sun4d_smp.c176
-rw-r--r--arch/sparc/kernel/sun4m_irq.c202
-rw-r--r--arch/sparc/kernel/sun4m_smp.c91
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c21
-rw-r--r--arch/sparc/kernel/tick14.c39
-rw-r--r--arch/sparc/kernel/time_32.c17
-rw-r--r--arch/sparc/kernel/time_64.c22
-rw-r--r--arch/sparc/kernel/traps_64.c3
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S2
-rw-r--r--arch/sparc/lib/atomic32.c2
-rw-r--r--arch/sparc/prom/misc_32.c4
-rw-r--r--arch/tile/Kconfig1
-rw-r--r--arch/tile/kernel/vmlinux.lds.S2
-rw-r--r--arch/um/Kconfig.common1
-rw-r--r--arch/um/drivers/ubd_kern.c2
-rw-r--r--arch/um/include/asm/common.lds.S2
-rw-r--r--arch/um/kernel/irq.c31
-rw-r--r--arch/unicore32/.gitignore21
-rw-r--r--arch/unicore32/Kconfig275
-rw-r--r--arch/unicore32/Kconfig.debug68
-rw-r--r--arch/unicore32/Makefile95
-rw-r--r--arch/unicore32/boot/Makefile47
-rw-r--r--arch/unicore32/boot/compressed/Makefile68
-rw-r--r--arch/unicore32/boot/compressed/head.S204
-rw-r--r--arch/unicore32/boot/compressed/misc.c126
-rw-r--r--arch/unicore32/boot/compressed/piggy.S.in6
-rw-r--r--arch/unicore32/boot/compressed/vmlinux.lds.in61
-rw-r--r--arch/unicore32/configs/debug_defconfig215
-rw-r--r--arch/unicore32/include/asm/Kbuild2
-rw-r--r--arch/unicore32/include/asm/assembler.h131
-rw-r--r--arch/unicore32/include/asm/bitops.h47
-rw-r--r--arch/unicore32/include/asm/byteorder.h24
-rw-r--r--arch/unicore32/include/asm/cache.h27
-rw-r--r--arch/unicore32/include/asm/cacheflush.h211
-rw-r--r--arch/unicore32/include/asm/checksum.h41
-rw-r--r--arch/unicore32/include/asm/cpu-single.h45
-rw-r--r--arch/unicore32/include/asm/cputype.h33
-rw-r--r--arch/unicore32/include/asm/delay.h52
-rw-r--r--arch/unicore32/include/asm/dma-mapping.h124
-rw-r--r--arch/unicore32/include/asm/dma.h23
-rw-r--r--arch/unicore32/include/asm/elf.h94
-rw-r--r--arch/unicore32/include/asm/fpstate.h26
-rw-r--r--arch/unicore32/include/asm/fpu-ucf64.h53
-rw-r--r--arch/unicore32/include/asm/futex.h143
-rw-r--r--arch/unicore32/include/asm/gpio.h104
-rw-r--r--arch/unicore32/include/asm/hwcap.h32
-rw-r--r--arch/unicore32/include/asm/io.h55
-rw-r--r--arch/unicore32/include/asm/irq.h105
-rw-r--r--arch/unicore32/include/asm/irqflags.h53
-rw-r--r--arch/unicore32/include/asm/linkage.h22
-rw-r--r--arch/unicore32/include/asm/memblock.h46
-rw-r--r--arch/unicore32/include/asm/memory.h123
-rw-r--r--arch/unicore32/include/asm/mmu.h17
-rw-r--r--arch/unicore32/include/asm/mmu_context.h87
-rw-r--r--arch/unicore32/include/asm/mutex.h20
-rw-r--r--arch/unicore32/include/asm/page.h80
-rw-r--r--arch/unicore32/include/asm/pci.h46
-rw-r--r--arch/unicore32/include/asm/pgalloc.h110
-rw-r--r--arch/unicore32/include/asm/pgtable-hwdef.h55
-rw-r--r--arch/unicore32/include/asm/pgtable.h317
-rw-r--r--arch/unicore32/include/asm/processor.h92
-rw-r--r--arch/unicore32/include/asm/ptrace.h133
-rw-r--r--arch/unicore32/include/asm/sigcontext.h29
-rw-r--r--arch/unicore32/include/asm/stacktrace.h31
-rw-r--r--arch/unicore32/include/asm/string.h38
-rw-r--r--arch/unicore32/include/asm/suspend.h30
-rw-r--r--arch/unicore32/include/asm/system.h161
-rw-r--r--arch/unicore32/include/asm/thread_info.h154
-rw-r--r--arch/unicore32/include/asm/timex.h34
-rw-r--r--arch/unicore32/include/asm/tlb.h98
-rw-r--r--arch/unicore32/include/asm/tlbflush.h195
-rw-r--r--arch/unicore32/include/asm/traps.h21
-rw-r--r--arch/unicore32/include/asm/uaccess.h47
-rw-r--r--arch/unicore32/include/asm/unistd.h18
-rw-r--r--arch/unicore32/include/mach/PKUnity.h104
-rw-r--r--arch/unicore32/include/mach/bitfield.h24
-rw-r--r--arch/unicore32/include/mach/dma.h48
-rw-r--r--arch/unicore32/include/mach/hardware.h36
-rw-r--r--arch/unicore32/include/mach/map.h20
-rw-r--r--arch/unicore32/include/mach/memory.h58
-rw-r--r--arch/unicore32/include/mach/ocd.h36
-rw-r--r--arch/unicore32/include/mach/pm.h43
-rw-r--r--arch/unicore32/include/mach/regs-ac97.h32
-rw-r--r--arch/unicore32/include/mach/regs-dmac.h81
-rw-r--r--arch/unicore32/include/mach/regs-gpio.h70
-rw-r--r--arch/unicore32/include/mach/regs-i2c.h63
-rw-r--r--arch/unicore32/include/mach/regs-intc.h28
-rw-r--r--arch/unicore32/include/mach/regs-nand.h79
-rw-r--r--arch/unicore32/include/mach/regs-ost.h92
-rw-r--r--arch/unicore32/include/mach/regs-pci.h94
-rw-r--r--arch/unicore32/include/mach/regs-pm.h126
-rw-r--r--arch/unicore32/include/mach/regs-ps2.h20
-rw-r--r--arch/unicore32/include/mach/regs-resetc.h34
-rw-r--r--arch/unicore32/include/mach/regs-rtc.h37
-rw-r--r--arch/unicore32/include/mach/regs-sdc.h156
-rw-r--r--arch/unicore32/include/mach/regs-spi.h98
-rw-r--r--arch/unicore32/include/mach/regs-uart.h3
-rw-r--r--arch/unicore32/include/mach/regs-umal.h229
-rw-r--r--arch/unicore32/include/mach/regs-unigfx.h200
-rw-r--r--arch/unicore32/include/mach/uncompress.h34
-rw-r--r--arch/unicore32/kernel/Makefile33
-rw-r--r--arch/unicore32/kernel/asm-offsets.c112
-rw-r--r--arch/unicore32/kernel/clock.c390
-rw-r--r--arch/unicore32/kernel/cpu-ucv2.c93
-rw-r--r--arch/unicore32/kernel/debug-macro.S89
-rw-r--r--arch/unicore32/kernel/debug.S85
-rw-r--r--arch/unicore32/kernel/dma.c183
-rw-r--r--arch/unicore32/kernel/early_printk.c59
-rw-r--r--arch/unicore32/kernel/elf.c38
-rw-r--r--arch/unicore32/kernel/entry.S824
-rw-r--r--arch/unicore32/kernel/fpu-ucf64.c126
-rw-r--r--arch/unicore32/kernel/gpio.c122
-rw-r--r--arch/unicore32/kernel/head.S252
-rw-r--r--arch/unicore32/kernel/hibernate.c160
-rw-r--r--arch/unicore32/kernel/hibernate_asm.S117
-rw-r--r--arch/unicore32/kernel/init_task.c44
-rw-r--r--arch/unicore32/kernel/irq.c426
-rw-r--r--arch/unicore32/kernel/ksyms.c99
-rw-r--r--arch/unicore32/kernel/ksyms.h15
-rw-r--r--arch/unicore32/kernel/module.c152
-rw-r--r--arch/unicore32/kernel/pci.c404
-rw-r--r--arch/unicore32/kernel/pm.c123
-rw-r--r--arch/unicore32/kernel/process.c389
-rw-r--r--arch/unicore32/kernel/ptrace.c149
-rw-r--r--arch/unicore32/kernel/puv3-core.c285
-rw-r--r--arch/unicore32/kernel/puv3-nb0916.c145
-rw-r--r--arch/unicore32/kernel/pwm.c263
-rw-r--r--arch/unicore32/kernel/rtc.c380
-rw-r--r--arch/unicore32/kernel/setup.c360
-rw-r--r--arch/unicore32/kernel/setup.h30
-rw-r--r--arch/unicore32/kernel/signal.c494
-rw-r--r--arch/unicore32/kernel/sleep.S202
-rw-r--r--arch/unicore32/kernel/stacktrace.c131
-rw-r--r--arch/unicore32/kernel/sys.c126
-rw-r--r--arch/unicore32/kernel/time.c143
-rw-r--r--arch/unicore32/kernel/traps.c333
-rw-r--r--arch/unicore32/kernel/vmlinux.lds.S61
-rw-r--r--arch/unicore32/lib/Makefile27
-rw-r--r--arch/unicore32/lib/backtrace.S163
-rw-r--r--arch/unicore32/lib/clear_user.S57
-rw-r--r--arch/unicore32/lib/copy_from_user.S108
-rw-r--r--arch/unicore32/lib/copy_page.S39
-rw-r--r--arch/unicore32/lib/copy_template.S214
-rw-r--r--arch/unicore32/lib/copy_to_user.S96
-rw-r--r--arch/unicore32/lib/delay.S51
-rw-r--r--arch/unicore32/lib/findbit.S98
-rw-r--r--arch/unicore32/lib/strncpy_from_user.S45
-rw-r--r--arch/unicore32/lib/strnlen_user.S42
-rw-r--r--arch/unicore32/mm/Kconfig50
-rw-r--r--arch/unicore32/mm/Makefile15
-rw-r--r--arch/unicore32/mm/alignment.c523
-rw-r--r--arch/unicore32/mm/cache-ucv2.S212
-rw-r--r--arch/unicore32/mm/dma-swiotlb.c34
-rw-r--r--arch/unicore32/mm/extable.c24
-rw-r--r--arch/unicore32/mm/fault.c479
-rw-r--r--arch/unicore32/mm/flush.c98
-rw-r--r--arch/unicore32/mm/init.c517
-rw-r--r--arch/unicore32/mm/ioremap.c261
-rw-r--r--arch/unicore32/mm/mm.h39
-rw-r--r--arch/unicore32/mm/mmu.c533
-rw-r--r--arch/unicore32/mm/pgd.c102
-rw-r--r--arch/unicore32/mm/proc-macros.S145
-rw-r--r--arch/unicore32/mm/proc-syms.c23
-rw-r--r--arch/unicore32/mm/proc-ucv2.S134
-rw-r--r--arch/unicore32/mm/tlb-ucv2.S89
-rw-r--r--arch/x86/Kconfig33
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c24
-rw-r--r--arch/x86/ia32/ia32entry.S1
-rw-r--r--arch/x86/include/asm/acpi.h20
-rw-r--r--arch/x86/include/asm/amd_nb.h14
-rw-r--r--arch/x86/include/asm/apic.h38
-rw-r--r--arch/x86/include/asm/apicdef.h12
-rw-r--r--arch/x86/include/asm/bootparam.h1
-rw-r--r--arch/x86/include/asm/cpufeature.h2
-rw-r--r--arch/x86/include/asm/e820.h2
-rw-r--r--arch/x86/include/asm/entry_arch.h5
-rw-r--r--arch/x86/include/asm/hw_irq.h24
-rw-r--r--arch/x86/include/asm/init.h6
-rw-r--r--arch/x86/include/asm/io_apic.h44
-rw-r--r--arch/x86/include/asm/ipi.h8
-rw-r--r--arch/x86/include/asm/irq.h3
-rw-r--r--arch/x86/include/asm/irq_controller.h12
-rw-r--r--arch/x86/include/asm/irq_vectors.h45
-rw-r--r--arch/x86/include/asm/kdebug.h1
-rw-r--r--arch/x86/include/asm/kvm_emulate.h1
-rw-r--r--arch/x86/include/asm/kvm_host.h4
-rw-r--r--arch/x86/include/asm/mpspec.h3
-rw-r--r--arch/x86/include/asm/msr-index.h6
-rw-r--r--arch/x86/include/asm/nmi.h1
-rw-r--r--arch/x86/include/asm/numa.h52
-rw-r--r--arch/x86/include/asm/numa_32.h7
-rw-r--r--arch/x86/include/asm/numa_64.h23
-rw-r--r--arch/x86/include/asm/olpc_ofw.h14
-rw-r--r--arch/x86/include/asm/page_types.h11
-rw-r--r--arch/x86/include/asm/percpu.h48
-rw-r--r--arch/x86/include/asm/perf_event_p4.h1
-rw-r--r--arch/x86/include/asm/processor.h4
-rw-r--r--arch/x86/include/asm/prom.h70
-rw-r--r--arch/x86/include/asm/reboot.h5
-rw-r--r--arch/x86/include/asm/rwsem.h80
-rw-r--r--arch/x86/include/asm/segment.h12
-rw-r--r--arch/x86/include/asm/smp.h10
-rw-r--r--arch/x86/include/asm/smpboot_hooks.h2
-rw-r--r--arch/x86/include/asm/system.h2
-rw-r--r--arch/x86/include/asm/topology.h19
-rw-r--r--arch/x86/include/asm/trampoline.h33
-rw-r--r--arch/x86/include/asm/unistd_32.h3
-rw-r--r--arch/x86/include/asm/unistd_64.h2
-rw-r--r--arch/x86/include/asm/x86_init.h2
-rw-r--r--arch/x86/include/asm/xen/hypercall.h15
-rw-r--r--arch/x86/include/asm/xen/page.h47
-rw-r--r--arch/x86/include/asm/xen/pci.h8
-rw-r--r--arch/x86/kernel/Makefile5
-rw-r--r--arch/x86/kernel/acpi/boot.c22
-rw-r--r--arch/x86/kernel/acpi/realmode/wakeup.S21
-rw-r--r--arch/x86/kernel/acpi/realmode/wakeup.h5
-rw-r--r--arch/x86/kernel/acpi/realmode/wakeup.lds.S28
-rw-r--r--arch/x86/kernel/acpi/sleep.c77
-rw-r--r--arch/x86/kernel/acpi/sleep.h5
-rw-r--r--arch/x86/kernel/acpi/wakeup_rm.S10
-rw-r--r--arch/x86/kernel/amd_nb.c86
-rw-r--r--arch/x86/kernel/apb_timer.c62
-rw-r--r--arch/x86/kernel/aperture_64.c33
-rw-r--r--arch/x86/kernel/apic/apic.c87
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c4
-rw-r--r--arch/x86/kernel/apic/apic_noop.c26
-rw-r--r--arch/x86/kernel/apic/bigsmp_32.c34
-rw-r--r--arch/x86/kernel/apic/es7000_32.c35
-rw-r--r--arch/x86/kernel/apic/hw_nmi.c1
-rw-r--r--arch/x86/kernel/apic/io_apic.c267
-rw-r--r--arch/x86/kernel/apic/ipi.c12
-rw-r--r--arch/x86/kernel/apic/numaq_32.c21
-rw-r--r--arch/x86/kernel/apic/probe_32.c10
-rw-r--r--arch/x86/kernel/apic/summit_32.c47
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c2
-rw-r--r--arch/x86/kernel/apm_32.c17
-rw-r--r--arch/x86/kernel/asm-offsets.c65
-rw-r--r--arch/x86/kernel/asm-offsets_32.c69
-rw-r--r--arch/x86/kernel/asm-offsets_64.c90
-rw-r--r--arch/x86/kernel/cpu/amd.c61
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/p4-clockmod.c6
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c13
-rw-r--r--arch/x86/kernel/cpu/intel.c5
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c80
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-apei.c42
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c7
-rw-r--r--arch/x86/kernel/cpu/perf_event.c86
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c175
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c19
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c4
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c4
-rw-r--r--arch/x86/kernel/devicetree.c441
-rw-r--r--arch/x86/kernel/dumpstack.c25
-rw-r--r--arch/x86/kernel/e820.c18
-rw-r--r--arch/x86/kernel/early-quirks.c16
-rw-r--r--arch/x86/kernel/entry_32.S2
-rw-r--r--arch/x86/kernel/entry_64.S5
-rw-r--r--arch/x86/kernel/head32.c9
-rw-r--r--arch/x86/kernel/head_32.S10
-rw-r--r--arch/x86/kernel/head_64.S3
-rw-r--r--arch/x86/kernel/ioport.c20
-rw-r--r--arch/x86/kernel/irq.c11
-rw-r--r--arch/x86/kernel/irqinit.c88
-rw-r--r--arch/x86/kernel/kgdb.c13
-rw-r--r--arch/x86/kernel/kvm.c2
-rw-r--r--arch/x86/kernel/microcode_amd.c188
-rw-r--r--arch/x86/kernel/microcode_core.c6
-rw-r--r--arch/x86/kernel/process.c9
-rw-r--r--arch/x86/kernel/reboot.c128
-rw-r--r--arch/x86/kernel/reboot_32.S135
-rw-r--r--arch/x86/kernel/rtc.c3
-rw-r--r--arch/x86/kernel/setup.c93
-rw-r--r--arch/x86/kernel/setup_percpu.c11
-rw-r--r--arch/x86/kernel/smpboot.c134
-rw-r--r--arch/x86/kernel/syscall_table_32.S1
-rw-r--r--arch/x86/kernel/trampoline.c42
-rw-r--r--arch/x86/kernel/trampoline_32.S15
-rw-r--r--arch/x86/kernel/trampoline_64.S28
-rw-r--r--arch/x86/kernel/vmlinux.lds.S17
-rw-r--r--arch/x86/kernel/x8664_ksyms_64.c1
-rw-r--r--arch/x86/kernel/x86_init.c1
-rw-r--r--arch/x86/kvm/emulate.c12
-rw-r--r--arch/x86/kvm/i8259.c25
-rw-r--r--arch/x86/kvm/lapic.c4
-rw-r--r--arch/x86/kvm/mmu.c16
-rw-r--r--arch/x86/kvm/svm.c24
-rw-r--r--arch/x86/kvm/vmx.c101
-rw-r--r--arch/x86/kvm/x86.c126
-rw-r--r--arch/x86/lib/Makefile1
-rw-r--r--arch/x86/lib/cmpxchg16b_emu.S59
-rw-r--r--arch/x86/lib/memmove_64.S197
-rw-r--r--arch/x86/lib/memmove_64.c192
-rw-r--r--arch/x86/mm/Makefile1
-rw-r--r--arch/x86/mm/amdtopology_64.c142
-rw-r--r--arch/x86/mm/init.c56
-rw-r--r--arch/x86/mm/init_32.c11
-rw-r--r--arch/x86/mm/init_64.c126
-rw-r--r--arch/x86/mm/numa.c212
-rw-r--r--arch/x86/mm/numa_32.c10
-rw-r--r--arch/x86/mm/numa_64.c967
-rw-r--r--arch/x86/mm/numa_emulation.c475
-rw-r--r--arch/x86/mm/numa_internal.h31
-rw-r--r--arch/x86/mm/srat_32.c6
-rw-r--r--arch/x86/mm/srat_64.c367
-rw-r--r--arch/x86/mm/tlb.c11
-rw-r--r--arch/x86/oprofile/op_model_p4.c2
-rw-r--r--arch/x86/pci/amd_bus.c2
-rw-r--r--arch/x86/pci/ce4100.c2
-rw-r--r--arch/x86/pci/irq.c15
-rw-r--r--arch/x86/pci/xen.c159
-rw-r--r--arch/x86/platform/ce4100/ce4100.c24
-rw-r--r--arch/x86/platform/ce4100/falconfalls.dts428
-rw-r--r--arch/x86/platform/mrst/mrst.c2
-rw-r--r--arch/x86/platform/mrst/vrtc.c16
-rw-r--r--arch/x86/platform/olpc/Makefile4
-rw-r--r--arch/x86/platform/olpc/olpc-xo1.c42
-rw-r--r--arch/x86/platform/olpc/olpc_dt.c3
-rw-r--r--arch/x86/xen/Kconfig10
-rw-r--r--arch/x86/xen/enlighten.c8
-rw-r--r--arch/x86/xen/mmu.c74
-rw-r--r--arch/x86/xen/p2m.c207
-rw-r--r--arch/x86/xen/setup.c67
-rw-r--r--arch/x86/xen/smp.c38
-rw-r--r--arch/x86/xen/suspend.c8
-rw-r--r--arch/x86/xen/time.c4
-rw-r--r--arch/x86/xen/xen-head.S4
-rw-r--r--arch/x86/xen/xen-ops.h2
-rw-r--r--arch/xtensa/Kconfig17
-rw-r--r--arch/xtensa/Makefile5
-rw-r--r--arch/xtensa/boot/Makefile1
-rw-r--r--arch/xtensa/configs/s6105_defconfig2
-rw-r--r--arch/xtensa/include/asm/coprocessor.h9
-rw-r--r--arch/xtensa/include/asm/io.h40
-rw-r--r--arch/xtensa/include/asm/irq.h7
-rw-r--r--arch/xtensa/include/asm/rwsem.h37
-rw-r--r--arch/xtensa/include/asm/serial.h12
-rw-r--r--arch/xtensa/kernel/Makefile4
-rw-r--r--arch/xtensa/kernel/time.c8
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S2
-rw-r--r--arch/xtensa/platforms/xtavnet/Makefile10
-rw-r--r--arch/xtensa/platforms/xtavnet/include/platform/hardware.h85
-rw-r--r--arch/xtensa/platforms/xtavnet/include/platform/lcd.h22
-rw-r--r--arch/xtensa/platforms/xtavnet/include/platform/serial.h1
-rw-r--r--arch/xtensa/platforms/xtavnet/lcd.c79
-rw-r--r--arch/xtensa/platforms/xtavnet/setup.c269
-rw-r--r--block/blk-core.c108
-rw-r--r--block/blk-flush.c441
-rw-r--r--block/blk.h12
-rw-r--r--block/cfq-iosched.c18
-rw-r--r--block/elevator.c9
-rw-r--r--block/genhd.c2
-rw-r--r--block/ioctl.c8
-rw-r--r--crypto/ablkcipher.c3
-rw-r--r--crypto/testmgr.c2
-rw-r--r--crypto/testmgr.h30
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile3
-rw-r--r--drivers/acpi/Kconfig1
-rw-r--r--drivers/acpi/apei/Kconfig10
-rw-r--r--drivers/acpi/apei/cper.c18
-rw-r--r--drivers/acpi/apei/erst-dbg.c24
-rw-r--r--drivers/acpi/apei/erst.c371
-rw-r--r--drivers/acpi/apei/ghes.c186
-rw-r--r--drivers/acpi/bus.c23
-rw-r--r--drivers/acpi/button.c11
-rw-r--r--drivers/acpi/numa.c9
-rw-r--r--drivers/acpi/nvs.c22
-rw-r--r--drivers/acpi/osl.c145
-rw-r--r--drivers/acpi/scan.c1
-rw-r--r--drivers/acpi/sleep.c33
-rw-r--r--drivers/amba/bus.c348
-rw-r--r--drivers/ata/Kconfig12
-rw-r--r--drivers/ata/ahci.c5
-rw-r--r--drivers/ata/ahci.h6
-rw-r--r--drivers/ata/ata_generic.c2
-rw-r--r--drivers/ata/ata_piix.c2
-rw-r--r--drivers/ata/libata-acpi.c3
-rw-r--r--drivers/ata/libata-core.c54
-rw-r--r--drivers/ata/libata-eh.c60
-rw-r--r--drivers/ata/libata-scsi.c4
-rw-r--r--drivers/ata/libata-sff.c2
-rw-r--r--drivers/ata/libata.h1
-rw-r--r--drivers/ata/pata_acpi.c2
-rw-r--r--drivers/ata/pata_at32.c2
-rw-r--r--drivers/ata/pata_bf54x.c4
-rw-r--r--drivers/ata/pata_hpt3x3.c2
-rw-r--r--drivers/ata/pata_it821x.c4
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c2
-rw-r--r--drivers/ata/pata_macio.c3
-rw-r--r--drivers/ata/pata_marvell.c2
-rw-r--r--drivers/ata/pata_mpc52xx.c8
-rw-r--r--drivers/ata/pata_ninja32.c2
-rw-r--r--drivers/ata/pata_octeon_cf.c3
-rw-r--r--drivers/ata/pata_of_platform.c9
-rw-r--r--drivers/ata/pata_palmld.c2
-rw-r--r--drivers/ata/pata_pcmcia.c2
-rw-r--r--drivers/ata/pata_pdc2027x.c6
-rw-r--r--drivers/ata/pata_pxa.c1
-rw-r--r--drivers/ata/pata_rb532_cf.c1
-rw-r--r--drivers/ata/pata_samsung_cf.c1
-rw-r--r--drivers/ata/pata_scc.c2
-rw-r--r--drivers/ata/pata_sis.c2
-rw-r--r--drivers/ata/pdc_adma.c4
-rw-r--r--drivers/ata/sata_dwc_460ex.c84
-rw-r--r--drivers/ata/sata_fsl.c12
-rw-r--r--drivers/ata/sata_mv.c3
-rw-r--r--drivers/ata/sata_nv.c14
-rw-r--r--drivers/ata/sata_promise.c4
-rw-r--r--drivers/ata/sata_qstor.c3
-rw-r--r--drivers/ata/sata_sil.c3
-rw-r--r--drivers/ata/sata_sil24.c3
-rw-r--r--drivers/ata/sata_sis.c2
-rw-r--r--drivers/ata/sata_svw.c12
-rw-r--r--drivers/ata/sata_sx4.c5
-rw-r--r--drivers/ata/sata_uli.c3
-rw-r--r--drivers/ata/sata_via.c9
-rw-r--r--drivers/ata/sata_vsc.c3
-rw-r--r--drivers/atm/firestream.c2
-rw-r--r--drivers/atm/fore200e.c17
-rw-r--r--drivers/base/power/Makefile3
-rw-r--r--drivers/base/power/main.c175
-rw-r--r--drivers/base/power/opp.c2
-rw-r--r--drivers/base/power/power.h21
-rw-r--r--drivers/base/power/runtime.c37
-rw-r--r--drivers/base/power/sysfs.c78
-rw-r--r--drivers/base/power/trace.c6
-rw-r--r--drivers/base/power/wakeup.c109
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/block/smart1,2.h2
-rw-r--r--drivers/block/virtio_blk.c88
-rw-r--r--drivers/block/xen-blkfront.c87
-rw-r--r--drivers/block/xsysace.c11
-rw-r--r--drivers/bluetooth/ath3k.c290
-rw-r--r--drivers/bluetooth/btusb.c26
-rw-r--r--drivers/bluetooth/hci_ldisc.c1
-rw-r--r--drivers/char/agp/amd64-agp.c9
-rw-r--r--drivers/char/agp/intel-agp.h1
-rw-r--r--drivers/char/agp/intel-gtt.c56
-rw-r--r--drivers/char/hw_random/Kconfig12
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/n2-drv.c16
-rw-r--r--drivers/char/hw_random/nomadik-rng.c2
-rw-r--r--drivers/char/hw_random/omap-rng.c14
-rw-r--r--drivers/char/hw_random/pasemi-rng.c9
-rw-r--r--drivers/char/hw_random/picoxcell-rng.c208
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c70
-rw-r--r--drivers/char/mmtimer.c30
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c3
-rw-r--r--drivers/char/pcmcia/ipwireless/main.c52
-rw-r--r--drivers/char/random.c13
-rw-r--r--drivers/char/tpm/tpm.c10
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c129
-rw-r--r--drivers/cpufreq/cpufreq.c27
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c24
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c37
-rw-r--r--drivers/cpuidle/sysfs.c2
-rw-r--r--drivers/crypto/Kconfig17
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c9
-rw-r--r--drivers/crypto/n2_core.c20
-rw-r--r--drivers/crypto/omap-aes.c4
-rw-r--r--drivers/crypto/omap-sham.c4
-rw-r--r--drivers/crypto/picoxcell_crypto.c1867
-rw-r--r--drivers/crypto/picoxcell_crypto_regs.h128
-rw-r--r--drivers/crypto/talitos.c9
-rw-r--r--drivers/dma/amba-pl08x.c2
-rw-r--r--drivers/dma/coh901318.c4
-rw-r--r--drivers/dma/dw_dmac.c36
-rw-r--r--drivers/dma/fsldma.c14
-rw-r--r--drivers/dma/mpc512x_dma.c9
-rw-r--r--drivers/dma/pl330.c2
-rw-r--r--drivers/dma/ppc4xx/adma.c11
-rw-r--r--drivers/dma/shdma.c2
-rw-r--r--drivers/dma/ste_dma40.c1402
-rw-r--r--drivers/dma/ste_dma40_ll.c218
-rw-r--r--drivers/dma/ste_dma40_ll.h66
-rw-r--r--drivers/dma/timb_dma.c5
-rw-r--r--drivers/edac/amd64_edac.c1412
-rw-r--r--drivers/edac/amd64_edac.h357
-rw-r--r--drivers/edac/amd64_edac_inj.c8
-rw-r--r--drivers/edac/edac_mc_sysfs.c26
-rw-r--r--drivers/edac/i7300_edac.c2
-rw-r--r--drivers/edac/i7core_edac.c23
-rw-r--r--drivers/edac/i82975x_edac.c69
-rw-r--r--drivers/edac/mce_amd.c8
-rw-r--r--drivers/edac/mce_amd.h28
-rw-r--r--drivers/edac/mpc85xx_edac.c27
-rw-r--r--drivers/edac/ppc4xx_edac.c23
-rw-r--r--drivers/firewire/Kconfig3
-rw-r--r--drivers/firewire/core-card.c5
-rw-r--r--drivers/firewire/core-cdev.c54
-rw-r--r--drivers/firewire/core-device.c3
-rw-r--r--drivers/firewire/core-iso.c22
-rw-r--r--drivers/firewire/core-topology.c2
-rw-r--r--drivers/firewire/ohci.c64
-rw-r--r--drivers/firmware/dcdbas.c4
-rw-r--r--drivers/gpio/janz-ttl.c3
-rw-r--r--drivers/gpio/pca953x.c13
-rw-r--r--drivers/gpio/pl061.c2
-rw-r--r--drivers/gpio/rdc321x-gpio.c3
-rw-r--r--drivers/gpio/sx150x.c54
-rw-r--r--drivers/gpio/timbgpio.c6
-rw-r--r--drivers/gpu/drm/Kconfig47
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/drm_crtc.c33
-rw-r--r--drivers/gpu/drm/drm_drv.c48
-rw-r--r--drivers/gpu/drm/drm_edid.c19
-rw-r--r--drivers/gpu/drm/drm_edid_modes.h4
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c5
-rw-r--r--drivers/gpu/drm/drm_gem.c5
-rw-r--r--drivers/gpu/drm/drm_hashtab.c27
-rw-r--r--drivers/gpu/drm/drm_info.c27
-rw-r--r--drivers/gpu/drm/drm_ioctl.c115
-rw-r--r--drivers/gpu/drm/drm_irq.c43
-rw-r--r--drivers/gpu/drm/drm_mm.c570
-rw-r--r--drivers/gpu/drm/drm_modes.c6
-rw-r--r--drivers/gpu/drm/drm_pci.c205
-rw-r--r--drivers/gpu/drm/drm_platform.c75
-rw-r--r--drivers/gpu/drm/drm_sman.c4
-rw-r--r--drivers/gpu/drm/drm_stub.c21
-rw-r--r--drivers/gpu/drm/drm_usb.c117
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c18
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c20
-rw-r--r--drivers/gpu/drm/i830/Makefile8
-rw-r--r--drivers/gpu/drm/i830/i830_dma.c1560
-rw-r--r--drivers/gpu/drm/i830/i830_drv.c107
-rw-r--r--drivers/gpu/drm/i830/i830_drv.h295
-rw-r--r--drivers/gpu/drm/i830/i830_irq.c186
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c23
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h7
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c103
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c16
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c6
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h10
-rw-r--r--drivers/gpu/drm/i915/intel_display.c95
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c37
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c2
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c41
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c260
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c75
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h45
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fb.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c192
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c44
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c139
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c46
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ramht.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c369
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c50
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_temp.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.h19
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c17
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv_modes.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fb.c59
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c164
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c191
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h42
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c290
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.h8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fb.c51
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_gpio.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c54
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vram.c65
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_instmem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vram.c62
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c14
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c8
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c31
-rw-r--r--drivers/gpu/drm/radeon/r100.c14
-rw-r--r--drivers/gpu/drm/radeon/r300.c4
-rw-r--r--drivers/gpu/drm/radeon/r420.c4
-rw-r--r--drivers/gpu/drm/radeon/r520.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c4
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c1
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c402
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c1
-rw-r--r--drivers/gpu/drm/radeon/r600d.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon.h90
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h77
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c52
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c96
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c10
-rw-r--r--drivers/gpu/drm/radeon/rs400.c4
-rw-r--r--drivers/gpu/drm/radeon/rs600.c4
-rw-r--r--drivers/gpu/drm/radeon/rs690.c4
-rw-r--r--drivers/gpu/drm/radeon/rv515.c4
-rw-r--r--drivers/gpu/drm/radeon/rv770.c4
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c14
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c13
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c13
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c34
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c12
-rw-r--r--drivers/gpu/drm/via/via_drv.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c5
-rw-r--r--drivers/hid/Kconfig37
-rw-r--r--drivers/hid/Makefile5
-rw-r--r--drivers/hid/hid-core.c38
-rw-r--r--drivers/hid/hid-gyration.c5
-rw-r--r--drivers/hid/hid-ids.h11
-rw-r--r--drivers/hid/hid-input.c35
-rw-r--r--drivers/hid/hid-keytouch.c66
-rw-r--r--drivers/hid/hid-lcpower.c70
-rw-r--r--drivers/hid/hid-multitouch.c28
-rw-r--r--drivers/hid/hid-roccat-arvo.c450
-rw-r--r--drivers/hid/hid-roccat-arvo.h98
-rw-r--r--drivers/hid/hid-roccat-common.c62
-rw-r--r--drivers/hid/hid-roccat-common.h23
-rw-r--r--drivers/hid/hid-roccat-kone.c156
-rw-r--r--drivers/hid/hid-roccat-koneplus.c165
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c715
-rw-r--r--drivers/hid/hid-roccat-kovaplus.h157
-rw-r--r--drivers/hid/hid-roccat-pyra.c174
-rw-r--r--drivers/hid/hid-roccat.c53
-rw-r--r--drivers/hid/hid-sony.c20
-rw-r--r--drivers/hid/hidraw.c112
-rw-r--r--drivers/hid/usbhid/hid-core.c35
-rw-r--r--drivers/hwmon/Kconfig162
-rw-r--r--drivers/hwmon/Makefile14
-rw-r--r--drivers/hwmon/ad7414.c1
-rw-r--r--drivers/hwmon/ads1015.c282
-rw-r--r--drivers/hwmon/adt7411.c1
-rw-r--r--drivers/hwmon/jz4740-hwmon.c4
-rw-r--r--drivers/hwmon/lineage-pem.c589
-rw-r--r--drivers/hwmon/lm75.c66
-rw-r--r--drivers/hwmon/lm85.c136
-rw-r--r--drivers/hwmon/max16064.c91
-rw-r--r--drivers/hwmon/max34440.c199
-rw-r--r--drivers/hwmon/max6639.c653
-rw-r--r--drivers/hwmon/max8688.c158
-rw-r--r--drivers/hwmon/pmbus.c203
-rw-r--r--drivers/hwmon/pmbus.h313
-rw-r--r--drivers/hwmon/pmbus_core.c1628
-rw-r--r--drivers/hwmon/sht15.c2
-rw-r--r--drivers/hwmon/twl4030-madc-hwmon.c157
-rw-r--r--drivers/hwmon/ultra45_env.c9
-rw-r--r--drivers/hwspinlock/Kconfig22
-rw-r--r--drivers/hwspinlock/Makefile6
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c548
-rw-r--r--drivers/hwspinlock/hwspinlock_internal.h61
-rw-r--r--drivers/hwspinlock/omap_hwspinlock.c231
-rw-r--r--drivers/i2c/Makefile1
-rw-r--r--drivers/i2c/busses/Kconfig60
-rw-r--r--drivers/i2c/busses/Makefile5
-rw-r--r--drivers/i2c/busses/i2c-cpm.c9
-rw-r--r--drivers/i2c/busses/i2c-diolan-u2c.c535
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c161
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c9
-rw-r--r--drivers/i2c/busses/i2c-mpc.c22
-rw-r--r--drivers/i2c/busses/i2c-mxs.c412
-rw-r--r--drivers/i2c/busses/i2c-ocores.c17
-rw-r--r--drivers/i2c/busses/i2c-omap.c35
-rw-r--r--drivers/i2c/busses/i2c-puv3.c306
-rw-r--r--drivers/i2c/busses/i2c-pxa-pci.c176
-rw-r--r--drivers/i2c/busses/i2c-pxa.c116
-rw-r--r--drivers/i2c/busses/i2c-stu300.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c700
-rw-r--r--drivers/i2c/busses/i2c-xiic.c3
-rw-r--r--drivers/i2c/i2c-boardinfo.c2
-rw-r--r--drivers/i2c/i2c-core.c30
-rw-r--r--drivers/i2c/i2c-dev.c60
-rw-r--r--drivers/idle/intel_idle.c46
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_pages.c6
-rw-r--r--drivers/infiniband/hw/nes/nes.c3
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c7
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c6
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c26
-rw-r--r--drivers/input/evdev.c10
-rw-r--r--drivers/input/gameport/gameport.c2
-rw-r--r--drivers/input/input-polldev.c4
-rw-r--r--drivers/input/input.c55
-rw-r--r--drivers/input/keyboard/lm8323.c15
-rw-r--r--drivers/input/keyboard/max7359_keypad.c17
-rw-r--r--drivers/input/keyboard/mcs_touchkey.c57
-rw-r--r--drivers/input/keyboard/omap4-keypad.c74
-rw-r--r--drivers/input/keyboard/tegra-kbc.c62
-rw-r--r--drivers/input/misc/ad714x-i2c.c17
-rw-r--r--drivers/input/misc/ad714x-spi.c17
-rw-r--r--drivers/input/misc/adxl34x-i2c.c16
-rw-r--r--drivers/input/misc/adxl34x-spi.c20
-rw-r--r--drivers/input/misc/ati_remote2.c4
-rw-r--r--drivers/input/misc/sparcspkr.c22
-rw-r--r--drivers/input/misc/twl4030-vibra.c3
-rw-r--r--drivers/input/misc/uinput.c48
-rw-r--r--drivers/input/mouse/bcm5974.c8
-rw-r--r--drivers/input/mouse/synaptics.h23
-rw-r--r--drivers/input/mouse/synaptics_i2c.c16
-rw-r--r--drivers/input/serio/altera_ps2.c15
-rw-r--r--drivers/input/serio/ambakmi.c3
-rw-r--r--drivers/input/serio/i8042-sparcio.h8
-rw-r--r--drivers/input/serio/i8042-unicore32io.h73
-rw-r--r--drivers/input/serio/i8042.h2
-rw-r--r--drivers/input/serio/serio.c2
-rw-r--r--drivers/input/serio/xilinx_ps2.c9
-rw-r--r--drivers/input/sparse-keymap.c4
-rw-r--r--drivers/input/touchscreen/Kconfig34
-rw-r--r--drivers/input/touchscreen/Makefile3
-rw-r--r--drivers/input/touchscreen/ad7877.c19
-rw-r--r--drivers/input/touchscreen/ad7879-spi.c17
-rw-r--r--drivers/input/touchscreen/ads7846.c16
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c1214
-rw-r--r--drivers/input/touchscreen/qt602240_ts.c1406
-rw-r--r--drivers/input/touchscreen/tps6507x-ts.c12
-rw-r--r--drivers/input/touchscreen/wm831x-ts.c363
-rw-r--r--drivers/isdn/hardware/eicon/istream.c2
-rw-r--r--drivers/isdn/mISDN/hwchannel.c6
-rw-r--r--drivers/leds/led-triggers.c20
-rw-r--r--drivers/leds/leds-gpio.c214
-rw-r--r--drivers/leds/leds-mc13783.c7
-rw-r--r--drivers/macintosh/smu.c7
-rw-r--r--drivers/macintosh/therm_pm72.c8
-rw-r--r--drivers/macintosh/therm_windtunnel.c9
-rw-r--r--drivers/md/Kconfig6
-rw-r--r--drivers/md/Makefile1
-rw-r--r--drivers/md/dm-crypt.c19
-rw-r--r--drivers/md/dm-flakey.c212
-rw-r--r--drivers/md/dm-ioctl.c38
-rw-r--r--drivers/md/dm-log.c2
-rw-r--r--drivers/md/dm-mpath.c39
-rw-r--r--drivers/md/dm-snap.c2
-rw-r--r--drivers/md/linear.c1
-rw-r--r--drivers/md/md.c33
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/multipath.c1
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c6
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/md/raid5.c1
-rw-r--r--drivers/media/common/tuners/tuner-types.c21
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig8
-rw-r--r--drivers/media/dvb/dvb-usb/Makefile3
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700.h2
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_core.c47
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c1374
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-ids.h6
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-remote.c111
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb.h2
-rw-r--r--drivers/media/dvb/dvb-usb/technisat-usb2.c807
-rw-r--r--drivers/media/dvb/frontends/Kconfig8
-rw-r--r--drivers/media/dvb/frontends/Makefile1
-rw-r--r--drivers/media/dvb/frontends/dib0090.c1583
-rw-r--r--drivers/media/dvb/frontends/dib0090.h31
-rw-r--r--drivers/media/dvb/frontends/dib7000p.c1945
-rw-r--r--drivers/media/dvb/frontends/dib7000p.h96
-rw-r--r--drivers/media/dvb/frontends/dib8000.c821
-rw-r--r--drivers/media/dvb/frontends/dib8000.h20
-rw-r--r--drivers/media/dvb/frontends/dib9000.c2350
-rw-r--r--drivers/media/dvb/frontends/dib9000.h131
-rw-r--r--drivers/media/dvb/frontends/dibx000_common.c279
-rw-r--r--drivers/media/dvb/frontends/dibx000_common.h152
-rw-r--r--drivers/media/dvb/frontends/stv090x.c286
-rw-r--r--drivers/media/dvb/frontends/stv090x.h16
-rw-r--r--drivers/media/dvb/frontends/stv090x_reg.h16
-rw-r--r--drivers/media/dvb/ngene/Makefile3
-rw-r--r--drivers/media/dvb/ngene/ngene-cards.c179
-rw-r--r--drivers/media/dvb/ngene/ngene-core.c236
-rw-r--r--drivers/media/dvb/ngene/ngene-dvb.c71
-rw-r--r--drivers/media/dvb/ngene/ngene.h24
-rw-r--r--drivers/media/radio/radio-timb.c3
-rw-r--r--drivers/media/radio/radio-wl1273.c2
-rw-r--r--drivers/media/rc/keymaps/Makefile1
-rw-r--r--drivers/media/rc/keymaps/rc-technisat-usb2.c93
-rw-r--r--drivers/media/rc/rc-main.c4
-rw-r--r--drivers/media/video/Kconfig36
-rw-r--r--drivers/media/video/Makefile7
-rw-r--r--drivers/media/video/adv7343.c167
-rw-r--r--drivers/media/video/adv7343_regs.h8
-rw-r--r--drivers/media/video/bt819.c129
-rw-r--r--drivers/media/video/cs5345.c87
-rw-r--r--drivers/media/video/cx18/cx18-av-audio.c92
-rw-r--r--drivers/media/video/cx18/cx18-av-core.c175
-rw-r--r--drivers/media/video/cx18/cx18-av-core.h12
-rw-r--r--drivers/media/video/cx18/cx18-controls.c285
-rw-r--r--drivers/media/video/cx18/cx18-controls.h7
-rw-r--r--drivers/media/video/cx18/cx18-driver.c30
-rw-r--r--drivers/media/video/cx18/cx18-driver.h2
-rw-r--r--drivers/media/video/cx18/cx18-fileops.c32
-rw-r--r--drivers/media/video/cx18/cx18-ioctl.c24
-rw-r--r--drivers/media/video/cx18/cx18-mailbox.c5
-rw-r--r--drivers/media/video/cx18/cx18-mailbox.h5
-rw-r--r--drivers/media/video/cx18/cx18-streams.c16
-rw-r--r--drivers/media/video/fsl-viu.c9
-rw-r--r--drivers/media/video/mem2mem_testdev.c227
-rw-r--r--drivers/media/video/noon010pc30.c792
-rw-r--r--drivers/media/video/s5p-fimc/fimc-capture.c550
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.c872
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.h134
-rw-r--r--drivers/media/video/s5p-fimc/fimc-reg.c199
-rw-r--r--drivers/media/video/s5p-fimc/regs-fimc.h29
-rw-r--r--drivers/media/video/saa7110.c115
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c39
-rw-r--r--drivers/media/video/saa7134/saa7134-core.c35
-rw-r--r--drivers/media/video/saa7134/saa7134-input.c1
-rw-r--r--drivers/media/video/saa7134/saa7134.h1
-rw-r--r--drivers/media/video/timblogiw.c3
-rw-r--r--drivers/media/video/tlv320aic23b.c74
-rw-r--r--drivers/media/video/tvp514x.c236
-rw-r--r--drivers/media/video/tvp5150.c157
-rw-r--r--drivers/media/video/tvp7002.c117
-rw-r--r--drivers/media/video/v4l2-compat-ioctl32.c229
-rw-r--r--drivers/media/video/v4l2-ioctl.c455
-rw-r--r--drivers/media/video/v4l2-mem2mem.c232
-rw-r--r--drivers/media/video/videobuf2-core.c1804
-rw-r--r--drivers/media/video/videobuf2-dma-contig.c185
-rw-r--r--drivers/media/video/videobuf2-dma-sg.c292
-rw-r--r--drivers/media/video/videobuf2-memops.c232
-rw-r--r--drivers/media/video/videobuf2-vmalloc.c132
-rw-r--r--drivers/media/video/vivi.c585
-rw-r--r--drivers/media/video/vpx3220.c137
-rw-r--r--drivers/message/fusion/lsi/mpi_cnfg.h1
-rw-r--r--drivers/message/fusion/lsi/mpi_ioc.h1
-rw-r--r--drivers/message/fusion/mptbase.c7
-rw-r--r--drivers/message/fusion/mptctl.c4
-rw-r--r--drivers/message/fusion/mptsas.c7
-rw-r--r--drivers/message/i2o/driver.c3
-rw-r--r--drivers/message/i2o/i2o_config.c3
-rw-r--r--drivers/mfd/Kconfig17
-rw-r--r--drivers/mfd/Makefile4
-rw-r--r--drivers/mfd/ab3100-core.c10
-rw-r--r--drivers/mfd/ab3550-core.c12
-rw-r--r--drivers/mfd/ab8500-core.c56
-rw-r--r--drivers/mfd/ab8500-debugfs.c6
-rw-r--r--drivers/mfd/ab8500-gpadc.c296
-rw-r--r--drivers/mfd/ab8500-sysctrl.c80
-rw-r--r--drivers/mfd/adp5520.c15
-rw-r--r--drivers/mfd/asic3.c14
-rw-r--r--drivers/mfd/cs5535-mfd.c37
-rw-r--r--drivers/mfd/davinci_voicecodec.c8
-rw-r--r--drivers/mfd/htc-pasic3.c7
-rw-r--r--drivers/mfd/janz-cmodio.c3
-rw-r--r--drivers/mfd/jz4740-adc.c4
-rw-r--r--drivers/mfd/max8998.c4
-rw-r--r--drivers/mfd/mc13xxx-core.c21
-rw-r--r--drivers/mfd/mfd-core.c135
-rw-r--r--drivers/mfd/pcf50633-core.c24
-rw-r--r--drivers/mfd/rdc321x-southbridge.c4
-rw-r--r--drivers/mfd/sh_mobile_sdhi.c4
-rw-r--r--drivers/mfd/t7l66xb.c13
-rw-r--r--drivers/mfd/tc6387xb.c7
-rw-r--r--drivers/mfd/tc6393xb.c25
-rw-r--r--drivers/mfd/timberdale.c81
-rw-r--r--drivers/mfd/tps6586x.c36
-rw-r--r--drivers/mfd/twl-core.c8
-rw-r--r--drivers/mfd/twl4030-codec.c6
-rw-r--r--drivers/mfd/twl4030-madc.c802
-rw-r--r--drivers/mfd/ucb1x00-ts.c17
-rw-r--r--drivers/mfd/vx855.c1
-rw-r--r--drivers/mfd/wl1273-core.c8
-rw-r--r--drivers/mfd/wm831x-irq.c44
-rw-r--r--drivers/mfd/wm831x-spi.c24
-rw-r--r--drivers/mfd/wm8400-core.c2
-rw-r--r--drivers/mfd/wm8994-core.c23
-rw-r--r--drivers/mfd/wm8994-irq.c8
-rw-r--r--drivers/misc/Kconfig9
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/iwmc3200top/iwmc3200top.h4
-rw-r--r--drivers/misc/iwmc3200top/main.c14
-rw-r--r--drivers/misc/kgdbts.c2
-rw-r--r--drivers/misc/lis3lv02d/Kconfig37
-rw-r--r--drivers/misc/lis3lv02d/Makefile7
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c (renamed from drivers/hwmon/lis3lv02d.c)3
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.h (renamed from drivers/hwmon/lis3lv02d.h)0
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_i2c.c (renamed from drivers/hwmon/lis3lv02d_i2c.c)2
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_spi.c (renamed from drivers/hwmon/lis3lv02d_spi.c)21
-rw-r--r--drivers/mmc/card/Kconfig3
-rw-r--r--drivers/mmc/card/block.c1
-rw-r--r--drivers/mmc/card/mmc_test.c271
-rw-r--r--drivers/mmc/core/Makefile3
-rw-r--r--drivers/mmc/core/core.c9
-rw-r--r--drivers/mmc/core/core.h2
-rw-r--r--drivers/mmc/core/host.c5
-rw-r--r--drivers/mmc/core/mmc.c86
-rw-r--r--drivers/mmc/core/quirks.c85
-rw-r--r--drivers/mmc/core/sd.c1
-rw-r--r--drivers/mmc/core/sdio.c1
-rw-r--r--drivers/mmc/host/Kconfig13
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/atmel-mci.c19
-rw-r--r--drivers/mmc/host/cb710-mmc.c2
-rw-r--r--drivers/mmc/host/dw_mmc.c26
-rw-r--r--drivers/mmc/host/dw_mmc.h2
-rw-r--r--drivers/mmc/host/mmc_spi.c4
-rw-r--r--drivers/mmc/host/mmci.c348
-rw-r--r--drivers/mmc/host/mmci.h15
-rw-r--r--drivers/mmc/host/msm_sdcc.c48
-rw-r--r--drivers/mmc/host/msm_sdcc.h1
-rw-r--r--drivers/mmc/host/mxcmmc.c181
-rw-r--r--drivers/mmc/host/mxs-mmc.c874
-rw-r--r--drivers/mmc/host/omap_hsmmc.c30
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c134
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h1
-rw-r--r--drivers/mmc/host/sdhci-of-core.c15
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c3
-rw-r--r--drivers/mmc/host/sdhci-pci.c12
-rw-r--r--drivers/mmc/host/sdhci-s3c.c3
-rw-r--r--drivers/mmc/host/sdhci-tegra.c6
-rw-r--r--drivers/mmc/host/sh_mmcif.c62
-rw-r--r--drivers/mmc/host/tmio_mmc.c120
-rw-r--r--drivers/mmc/host/via-sdmmc.c3
-rw-r--r--drivers/mtd/maps/physmap_of.c15
-rw-r--r--drivers/mtd/maps/sun_uflash.c8
-rw-r--r--drivers/mtd/nand/Kconfig17
-rw-r--r--drivers/mtd/nand/fsl_upm.c9
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c9
-rw-r--r--drivers/mtd/nand/mxc_nand.c5
-rw-r--r--drivers/mtd/nand/ndfc.c9
-rw-r--r--drivers/mtd/nand/omap2.c367
-rw-r--r--drivers/mtd/nand/pasemi_nand.c9
-rw-r--r--drivers/mtd/nand/socrates_nand.c9
-rw-r--r--drivers/mtd/nand/tmio_nand.c11
-rw-r--r--drivers/mtd/onenand/Kconfig2
-rw-r--r--drivers/mtd/onenand/omap2.c36
-rw-r--r--drivers/mtd/ubi/build.c4
-rw-r--r--drivers/mtd/ubi/io.c43
-rw-r--r--drivers/mtd/ubi/scan.c86
-rw-r--r--drivers/mtd/ubi/scan.h2
-rw-r--r--drivers/net/Kconfig21
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/atl1c/atl1c.h4
-rw-r--r--drivers/net/atl1c/atl1c_hw.c15
-rw-r--r--drivers/net/atl1c/atl1c_hw.h43
-rw-r--r--drivers/net/atl1c/atl1c_main.c5
-rw-r--r--drivers/net/atl1e/atl1e_ethtool.c12
-rw-r--r--drivers/net/atl1e/atl1e_hw.c34
-rw-r--r--drivers/net/atl1e/atl1e_hw.h111
-rw-r--r--drivers/net/atl1e/atl1e_main.c10
-rw-r--r--drivers/net/atlx/atl1.c77
-rw-r--r--drivers/net/atlx/atl2.c2
-rw-r--r--drivers/net/ax88796.c810
-rw-r--r--drivers/net/benet/be.h16
-rw-r--r--drivers/net/benet/be_cmds.c145
-rw-r--r--drivers/net/benet/be_cmds.h61
-rw-r--r--drivers/net/benet/be_ethtool.c77
-rw-r--r--drivers/net/benet/be_hw.h47
-rw-r--r--drivers/net/benet/be_main.c221
-rw-r--r--drivers/net/bna/bnad.c108
-rw-r--r--drivers/net/bna/bnad.h2
-rw-r--r--drivers/net/bnx2.c8
-rw-r--r--drivers/net/bnx2.h4
-rw-r--r--drivers/net/bnx2x/bnx2x.h61
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c135
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h26
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.c137
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.h5
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c83
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h114
-rw-r--r--drivers/net/bnx2x/bnx2x_init.h2
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c2527
-rw-r--r--drivers/net/bnx2x/bnx2x_link.h34
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c623
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h1
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c4
-rw-r--r--drivers/net/bonding/bond_alb.c2
-rw-r--r--drivers/net/bonding/bond_main.c253
-rw-r--r--drivers/net/bonding/bond_sysfs.c4
-rw-r--r--drivers/net/bonding/bonding.h27
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/c_can/Kconfig15
-rw-r--r--drivers/net/can/c_can/Makefile8
-rw-r--r--drivers/net/can/c_can/c_can.c1158
-rw-r--r--drivers/net/can/c_can/c_can.h86
-rw-r--r--drivers/net/can/c_can/c_can_platform.c215
-rw-r--r--drivers/net/can/janz-ican3.c3
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c15
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c9
-rw-r--r--drivers/net/can/softing/softing_main.c1
-rw-r--r--drivers/net/cnic.c217
-rw-r--r--drivers/net/cnic.h2
-rw-r--r--drivers/net/cnic_if.h8
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c5
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c1
-rw-r--r--drivers/net/davinci_emac.c2
-rw-r--r--drivers/net/dm9000.c16
-rw-r--r--drivers/net/dnet.c3
-rw-r--r--drivers/net/e1000/e1000_osdep.h3
-rw-r--r--drivers/net/e1000e/e1000.h5
-rw-r--r--drivers/net/e1000e/ethtool.c71
-rw-r--r--drivers/net/e1000e/ich8lan.c3
-rw-r--r--drivers/net/e1000e/lib.c4
-rw-r--r--drivers/net/e1000e/netdev.c134
-rw-r--r--drivers/net/e1000e/phy.c8
-rw-r--r--drivers/net/enic/Makefile2
-rw-r--r--drivers/net/enic/enic.h11
-rw-r--r--drivers/net/enic/enic_dev.c221
-rw-r--r--drivers/net/enic/enic_dev.h41
-rw-r--r--drivers/net/enic/enic_main.c326
-rw-r--r--drivers/net/enic/vnic_dev.c19
-rw-r--r--drivers/net/enic/vnic_dev.h8
-rw-r--r--drivers/net/enic/vnic_rq.h5
-rw-r--r--drivers/net/ethoc.c8
-rw-r--r--drivers/net/fec.c653
-rw-r--r--drivers/net/fec_mpc52xx.c13
-rw-r--r--drivers/net/fec_mpc52xx.h2
-rw-r--r--drivers/net/fec_mpc52xx_phy.c5
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c16
-rw-r--r--drivers/net/fs_enet/mii-bitbang.c9
-rw-r--r--drivers/net/fs_enet/mii-fec.c15
-rw-r--r--drivers/net/fsl_pq_mdio.c9
-rw-r--r--drivers/net/gianfar.c12
-rw-r--r--drivers/net/greth.c8
-rw-r--r--drivers/net/hamradio/bpqether.c5
-rw-r--r--drivers/net/ibm_newemac/core.c9
-rw-r--r--drivers/net/ibm_newemac/mal.c9
-rw-r--r--drivers/net/ibm_newemac/rgmii.c9
-rw-r--r--drivers/net/ibm_newemac/tah.c9
-rw-r--r--drivers/net/ibm_newemac/zmii.c9
-rw-r--r--drivers/net/igb/e1000_82575.c11
-rw-r--r--drivers/net/igb/e1000_defines.h7
-rw-r--r--drivers/net/igb/e1000_hw.h1
-rw-r--r--drivers/net/igb/e1000_mbx.c38
-rw-r--r--drivers/net/igb/e1000_regs.h4
-rw-r--r--drivers/net/igb/igb.h2
-rw-r--r--drivers/net/igb/igb_main.c102
-rw-r--r--drivers/net/igbvf/ethtool.c6
-rw-r--r--drivers/net/igbvf/igbvf.h3
-rw-r--r--drivers/net/igbvf/netdev.c63
-rw-r--r--drivers/net/igbvf/vf.c2
-rw-r--r--drivers/net/ipg.c4
-rw-r--r--drivers/net/ixgbe/ixgbe.h5
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.c177
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.h10
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.c94
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.h23
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.c115
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.h24
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c211
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c34
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c40
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c29
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h2
-rw-r--r--drivers/net/ixgbevf/defines.h2
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c37
-rw-r--r--drivers/net/jme.c306
-rw-r--r--drivers/net/jme.h87
-rw-r--r--drivers/net/ks8842.c3
-rw-r--r--drivers/net/ll_temac_main.c9
-rw-r--r--drivers/net/loopback.c9
-rw-r--r--drivers/net/macb.c2
-rw-r--r--drivers/net/macvtap.c18
-rw-r--r--drivers/net/myri10ge/myri10ge.c4
-rw-r--r--drivers/net/myri_sbus.c8
-rw-r--r--drivers/net/niu.c11
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c2
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c1
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/phy/mdio-gpio.c9
-rw-r--r--drivers/net/phy/micrel.c24
-rw-r--r--drivers/net/ppp_generic.c148
-rw-r--r--drivers/net/qla3xxx.c12
-rw-r--r--drivers/net/qlcnic/qlcnic.h5
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c15
-rw-r--r--drivers/net/r6040.c115
-rw-r--r--drivers/net/r8169.c42
-rw-r--r--drivers/net/sfc/efx.c82
-rw-r--r--drivers/net/sfc/efx.h19
-rw-r--r--drivers/net/sfc/ethtool.c59
-rw-r--r--drivers/net/sfc/falcon.c22
-rw-r--r--drivers/net/sfc/falcon_boards.c2
-rw-r--r--drivers/net/sfc/falcon_xmac.c2
-rw-r--r--drivers/net/sfc/filter.c117
-rw-r--r--drivers/net/sfc/io.h2
-rw-r--r--drivers/net/sfc/mcdi.c23
-rw-r--r--drivers/net/sfc/mcdi.h4
-rw-r--r--drivers/net/sfc/mcdi_mac.c2
-rw-r--r--drivers/net/sfc/mcdi_pcol.h2
-rw-r--r--drivers/net/sfc/mcdi_phy.c2
-rw-r--r--drivers/net/sfc/mdio_10g.c34
-rw-r--r--drivers/net/sfc/mdio_10g.h5
-rw-r--r--drivers/net/sfc/mtd.c2
-rw-r--r--drivers/net/sfc/net_driver.h83
-rw-r--r--drivers/net/sfc/nic.c73
-rw-r--r--drivers/net/sfc/nic.h9
-rw-r--r--drivers/net/sfc/phy.h2
-rw-r--r--drivers/net/sfc/qt202x_phy.c2
-rw-r--r--drivers/net/sfc/regs.h8
-rw-r--r--drivers/net/sfc/rx.c144
-rw-r--r--drivers/net/sfc/selftest.c4
-rw-r--r--drivers/net/sfc/selftest.h2
-rw-r--r--drivers/net/sfc/siena.c24
-rw-r--r--drivers/net/sfc/spi.h2
-rw-r--r--drivers/net/sfc/tenxpress.c4
-rw-r--r--drivers/net/sfc/tx.c92
-rw-r--r--drivers/net/sfc/txc43128_phy.c4
-rw-r--r--drivers/net/sfc/workarounds.h2
-rw-r--r--drivers/net/sh_eth.c208
-rw-r--r--drivers/net/sis900.c4
-rw-r--r--drivers/net/skge.c3
-rw-r--r--drivers/net/smc91x.c13
-rw-r--r--drivers/net/smc91x.h62
-rw-r--r--drivers/net/sunbmac.c9
-rw-r--r--drivers/net/sungem.c58
-rw-r--r--drivers/net/sungem.h3
-rw-r--r--drivers/net/sunhme.c14
-rw-r--r--drivers/net/sunlance.c8
-rw-r--r--drivers/net/sunqe.c8
-rw-r--r--drivers/net/tg3.c161
-rw-r--r--drivers/net/tg3.h13
-rw-r--r--drivers/net/tlan.c3775
-rw-r--r--drivers/net/tlan.h192
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/typhoon.c3
-rw-r--r--drivers/net/ucc_geth.c8
-rw-r--r--drivers/net/usb/dm9601.c4
-rw-r--r--drivers/net/vbus-enet.c1560
-rw-r--r--drivers/net/veth.c12
-rw-r--r--drivers/net/via-velocity.c9
-rw-r--r--drivers/net/via-velocity.h8
-rw-r--r--drivers/net/vxge/vxge-config.c32
-rw-r--r--drivers/net/vxge/vxge-config.h10
-rw-r--r--drivers/net/vxge/vxge-main.c234
-rw-r--r--drivers/net/vxge/vxge-main.h23
-rw-r--r--drivers/net/vxge/vxge-traffic.c116
-rw-r--r--drivers/net/vxge/vxge-traffic.h14
-rw-r--r--drivers/net/vxge/vxge-version.h4
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/Makefile5
-rw-r--r--drivers/net/wireless/adm8211.c4
-rw-r--r--drivers/net/wireless/at76c50x-usb.c10
-rw-r--r--drivers/net/wireless/ath/ar9170/Kconfig4
-rw-r--r--drivers/net/wireless/ath/ar9170/ar9170.h2
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c8
-rw-r--r--drivers/net/wireless/ath/ath.h2
-rw-r--r--drivers/net/wireless/ath/ath5k/Kconfig11
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c7
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h38
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c7
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c121
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h3
-rw-r--r--drivers/net/wireless/ath/ath5k/caps.c48
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c20
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.h10
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c24
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.h28
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c32
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c9
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c143
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c46
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h15
-rw-r--r--drivers/net/wireless/ath/ath5k/trace.h107
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c26
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c112
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h1141
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h164
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c82
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c440
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h17
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c32
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c41
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c45
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c38
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c169
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c87
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h80
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c170
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c48
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c485
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c84
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c65
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c65
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c776
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c173
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h22
-rw-r--r--drivers/net/wireless/ath/ath9k/virtual.c717
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c308
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h3
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c15
-rw-r--r--drivers/net/wireless/ath/carl9170/fwcmd.h1
-rw-r--r--drivers/net/wireless/ath/carl9170/fwdesc.h28
-rw-r--r--drivers/net/wireless/ath/carl9170/hw.h25
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c9
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c8
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/version.h8
-rw-r--r--drivers/net/wireless/ath/carl9170/wlan.h20
-rw-r--r--drivers/net/wireless/ath/key.c5
-rw-r--r--drivers/net/wireless/ath/regd.c7
-rw-r--r--drivers/net/wireless/ath/regd.h1
-rw-r--r--drivers/net/wireless/b43/main.c6
-rw-r--r--drivers/net/wireless/b43/phy_n.c28
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c1106
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h27
-rw-r--r--drivers/net/wireless/b43/xmit.c75
-rw-r--r--drivers/net/wireless/b43/xmit.h6
-rw-r--r--drivers/net/wireless/b43legacy/main.c5
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c72
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.h1
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c196
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.h2
-rw-r--r--drivers/net/wireless/iwlegacy/Kconfig116
-rw-r--r--drivers/net/wireless/iwlegacy/Makefile25
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c (renamed from drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c)11
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h (renamed from drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h)4
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-fh.h (renamed from drivers/net/wireless/iwlwifi/iwl-3945-fh.h)5
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-hw.h (renamed from drivers/net/wireless/iwlwifi/iwl-3945-hw.h)9
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-led.c (renamed from drivers/net/wireless/iwlwifi/iwl-3945-led.c)31
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-led.h (renamed from drivers/net/wireless/iwlwifi/iwl-3945-led.h)2
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-rs.c (renamed from drivers/net/wireless/iwlwifi/iwl-3945-rs.c)41
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.c (renamed from drivers/net/wireless/iwlwifi/iwl-3945.c)267
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.h (renamed from drivers/net/wireless/iwlwifi/iwl-3945.h)12
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-calib.c967
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-calib.h (renamed from drivers/net/wireless/iwlwifi/iwl-legacy.h)30
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c774
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h59
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c154
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-hw.h (renamed from drivers/net/wireless/iwlwifi/iwl-4965-hw.h)26
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-led.c74
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-led.h33
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-lib.c1260
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-rs.c2870
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-rx.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-rx.c)177
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-sta.c721
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-tx.c1369
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-ucode.c166
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.c (renamed from drivers/net/wireless/iwlwifi/iwl-4965.c)815
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.h282
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-commands.h3405
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.c2674
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.h646
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-csr.h422
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-debug.h198
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-debugfs.c1467
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-dev.h1426
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-devtrace.c45
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-devtrace.h270
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-eeprom.c561
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-eeprom.h344
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-fh.h513
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-hcmd.c271
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-helpers.h181
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-io.h545
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-led.c188
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-led.h56
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-legacy-rs.h456
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-power.c165
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-power.h55
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-prph.h523
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-rx.c302
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-scan.c625
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-spectrum.h92
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-sta.c816
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-sta.h148
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-tx.c660
-rw-r--r--drivers/net/wireless/iwlegacy/iwl3945-base.c (renamed from drivers/net/wireless/iwlwifi/iwl3945-base.c)566
-rw-r--r--drivers/net/wireless/iwlegacy/iwl4965-base.c3632
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig132
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile40
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c560
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c59
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-led.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-led.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c170
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c29
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c101
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c21
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c363
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h36
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h122
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c103
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h42
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c119
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h74
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c199
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.h16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-legacy.c662
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c419
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c78
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c3
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c7
-rw-r--r--drivers/net/wireless/libertas/cfg.c6
-rw-r--r--drivers/net/wireless/libertas/cmd.c10
-rw-r--r--drivers/net/wireless/libertas/dev.h2
-rw-r--r--drivers/net/wireless/libertas/if_spi.c368
-rw-r--r--drivers/net/wireless/libertas/main.c77
-rw-r--r--drivers/net/wireless/libertas_tf/main.c3
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c8
-rw-r--r--drivers/net/wireless/mwl8k.c510
-rw-r--r--drivers/net/wireless/orinoco/scan.c5
-rw-r--r--drivers/net/wireless/p54/eeprom.c211
-rw-r--r--drivers/net/wireless/p54/eeprom.h7
-rw-r--r--drivers/net/wireless/p54/fwio.c21
-rw-r--r--drivers/net/wireless/p54/lmac.h3
-rw-r--r--drivers/net/wireless/p54/main.c61
-rw-r--r--drivers/net/wireless/p54/p54.h7
-rw-r--r--drivers/net/wireless/p54/p54pci.c14
-rw-r--r--drivers/net/wireless/p54/p54spi_eeprom.h9
-rw-r--r--drivers/net/wireless/p54/p54usb.c1
-rw-r--r--drivers/net/wireless/p54/txrx.c19
-rw-r--r--drivers/net/wireless/rndis_wlan.c3
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig12
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c165
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c159
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h139
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c848
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c240
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c9
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h41
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c74
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00ht.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h24
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00link.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c55
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c69
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00reg.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c238
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c61
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c10
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c8
-rw-r--r--drivers/net/wireless/rtlwifi/Kconfig24
-rw-r--r--drivers/net/wireless/rtlwifi/Makefile14
-rw-r--r--drivers/net/wireless/rtlwifi/base.c91
-rw-r--r--drivers/net/wireless/rtlwifi/base.h39
-rw-r--r--drivers/net/wireless/rtlwifi/core.c26
-rw-r--r--drivers/net/wireless/rtlwifi/debug.h1
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c18
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.h3
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c152
-rw-r--r--drivers/net/wireless/rtlwifi/pci.h12
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c58
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/Makefile9
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c1398
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h204
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c (renamed from drivers/net/wireless/rtlwifi/rtl8192ce/fw.c)72
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h (renamed from drivers/net/wireless/rtlwifi/rtl8192ce/fw.h)0
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/main.c39
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c2042
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h246
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/Makefile3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/def.h144
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/dm.c1364
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/dm.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c158
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.h11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/led.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.c2081
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.h35
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/reg.h73
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/rf.c10
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/rf.h5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c22
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.h14
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c183
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.h464
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/Makefile14
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/def.h62
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/dm.c113
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/dm.h32
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c2504
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.h116
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/led.c142
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/led.h37
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c1144
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.h180
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/phy.c607
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/phy.h36
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/reg.h30
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/rf.c493
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/rf.h47
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c336
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.h53
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/table.c1888
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/table.h71
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c687
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.h430
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c1035
-rw-r--r--drivers/net/wireless/rtlwifi/usb.h164
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h680
-rw-r--r--drivers/net/wireless/wl1251/acx.c53
-rw-r--r--drivers/net/wireless/wl1251/acx.h72
-rw-r--r--drivers/net/wireless/wl1251/event.c18
-rw-r--r--drivers/net/wireless/wl1251/main.c22
-rw-r--r--drivers/net/wireless/wl1251/ps.c11
-rw-r--r--drivers/net/wireless/wl1251/rx.c51
-rw-r--r--drivers/net/wireless/wl1251/tx.c74
-rw-r--r--drivers/net/wireless/wl1251/wl1251.h7
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig2
-rw-r--r--drivers/net/wireless/wl12xx/acx.c274
-rw-r--r--drivers/net/wireless/wl12xx/acx.h141
-rw-r--r--drivers/net/wireless/wl12xx/boot.c35
-rw-r--r--drivers/net/wireless/wl12xx/cmd.c318
-rw-r--r--drivers/net/wireless/wl12xx/cmd.h161
-rw-r--r--drivers/net/wireless/wl12xx/conf.h125
-rw-r--r--drivers/net/wireless/wl12xx/debugfs.c49
-rw-r--r--drivers/net/wireless/wl12xx/event.c21
-rw-r--r--drivers/net/wireless/wl12xx/event.h10
-rw-r--r--drivers/net/wireless/wl12xx/init.c400
-rw-r--r--drivers/net/wireless/wl12xx/init.h2
-rw-r--r--drivers/net/wireless/wl12xx/main.c1314
-rw-r--r--drivers/net/wireless/wl12xx/ps.c84
-rw-r--r--drivers/net/wireless/wl12xx/ps.h2
-rw-r--r--drivers/net/wireless/wl12xx/rx.c28
-rw-r--r--drivers/net/wireless/wl12xx/rx.h17
-rw-r--r--drivers/net/wireless/wl12xx/sdio.c1
-rw-r--r--drivers/net/wireless/wl12xx/spi.c2
-rw-r--r--drivers/net/wireless/wl12xx/tx.c324
-rw-r--r--drivers/net/wireless/wl12xx/tx.h15
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx.h189
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx_80211.h11
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c169
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h5
-rw-r--r--drivers/net/wireless/zd1211rw/zd_def.h2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c453
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.h24
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c597
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.h37
-rw-r--r--drivers/net/xilinx_emaclite.c9
-rw-r--r--drivers/nfc/Kconfig2
-rw-r--r--drivers/nfc/pn544.c4
-rw-r--r--drivers/of/Kconfig6
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/device.c34
-rw-r--r--drivers/of/of_pci.c92
-rw-r--r--drivers/of/pdt.c112
-rw-r--r--drivers/of/platform.c461
-rw-r--r--drivers/oprofile/cpu_buffer.c24
-rw-r--r--drivers/oprofile/timer_int.c4
-rw-r--r--drivers/parisc/dino.c22
-rw-r--r--drivers/parisc/eisa.c12
-rw-r--r--drivers/parisc/gsc.c22
-rw-r--r--drivers/parisc/iosapic.c40
-rw-r--r--drivers/parisc/superio.c12
-rw-r--r--drivers/parport/parport_sunbpp.c8
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/intel-iommu.c2
-rw-r--r--drivers/pci/pci-acpi.c16
-rw-r--r--drivers/pci/pci-driver.c4
-rw-r--r--drivers/pci/pci-sysfs.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h9
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c182
-rw-r--r--drivers/pci/probe.c4
-rw-r--r--drivers/pci/quirks.c52
-rw-r--r--drivers/pci/xen-pcifront.c31
-rw-r--r--drivers/pcmcia/electra_cf.c9
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c9
-rw-r--r--drivers/pcmcia/pcmcia_resource.c2
-rw-r--r--drivers/pcmcia/pxa2xx_base.c2
-rw-r--r--drivers/pcmcia/pxa2xx_base.h1
-rw-r--r--drivers/pcmcia/pxa2xx_lubbock.c1
-rw-r--r--drivers/platform/x86/Kconfig18
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/acer-wmi.c1
-rw-r--r--drivers/platform/x86/hp_accel.c (renamed from drivers/hwmon/hp_accel.c)6
-rw-r--r--drivers/platform/x86/xo15-ebook.c1
-rw-r--r--drivers/power/Kconfig14
-rw-r--r--drivers/power/bq20z75.c278
-rw-r--r--drivers/power/bq27x00_battery.c725
-rw-r--r--drivers/power/ds2782_battery.c1
-rw-r--r--drivers/power/jz4740-battery.c4
-rw-r--r--drivers/power/power_supply_core.c4
-rw-r--r--drivers/power/power_supply_leds.c19
-rw-r--r--drivers/power/power_supply_sysfs.c2
-rw-r--r--drivers/power/twl4030_charger.c25
-rw-r--r--drivers/power/z2_battery.c1
-rw-r--r--drivers/pps/kapi.c2
-rw-r--r--drivers/rapidio/rio-sysfs.c12
-rw-r--r--drivers/regulator/ab3100.c3
-rw-r--r--drivers/regulator/core.c5
-rw-r--r--drivers/regulator/mc13783-regulator.c7
-rw-r--r--drivers/regulator/mc13892-regulator.c7
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c2
-rw-r--r--drivers/regulator/twl-regulator.c24
-rw-r--r--drivers/regulator/wm831x-dcdc.c1
-rw-r--r--drivers/rtc/rtc-at91sam9.c2
-rw-r--r--drivers/rtc/rtc-cmos.c45
-rw-r--r--drivers/rtc/rtc-ds3232.c14
-rw-r--r--drivers/rtc/rtc-mpc5121.c9
-rw-r--r--drivers/rtc/rtc-mrst.c13
-rw-r--r--drivers/rtc/rtc-pl030.c2
-rw-r--r--drivers/rtc/rtc-pl031.c2
-rw-r--r--drivers/s390/block/dasd_eckd.c3
-rw-r--r--drivers/s390/block/xpram.c4
-rw-r--r--drivers/s390/char/keyboard.c3
-rw-r--r--drivers/s390/cio/cio.c6
-rw-r--r--drivers/s390/cio/cio.h2
-rw-r--r--drivers/s390/net/qeth_core.h1
-rw-r--r--drivers/s390/net/qeth_core_main.c52
-rw-r--r--drivers/s390/net/qeth_l2_main.c45
-rw-r--r--drivers/s390/net/qeth_l3_main.c55
-rw-r--r--drivers/s390/scsi/zfcp_aux.c80
-rw-r--r--drivers/s390/scsi/zfcp_def.h15
-rw-r--r--drivers/s390/scsi/zfcp_erp.c4
-rw-r--r--drivers/s390/scsi/zfcp_ext.h9
-rw-r--r--drivers/s390/scsi/zfcp_fc.c333
-rw-r--r--drivers/s390/scsi/zfcp_fc.h124
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c27
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c71
-rw-r--r--drivers/sbus/char/bbc_i2c.c9
-rw-r--r--drivers/sbus/char/display7seg.c9
-rw-r--r--drivers/sbus/char/envctrl.c9
-rw-r--r--drivers/sbus/char/flash.c9
-rw-r--r--drivers/sbus/char/uctrl.c9
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/Makefile3
-rw-r--r--drivers/scsi/NCR5380.c3
-rw-r--r--drivers/scsi/aic7xxx/aic79xx.h2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.h2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c4
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c18
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h4
-rw-r--r--drivers/scsi/be2iscsi/be_main.c5
-rw-r--r--drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h1080
-rw-r--r--drivers/scsi/bnx2fc/Kconfig11
-rw-r--r--drivers/scsi/bnx2fc/Makefile3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h511
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_constants.h206
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_debug.h70
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_els.c515
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c2535
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c1868
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c1833
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c844
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h4
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c125
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c29
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c53
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c56
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.h19
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c7
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c68
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h5
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c112
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c18
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c7
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c25
-rw-r--r--drivers/scsi/fcoe/Makefile2
-rw-r--r--drivers/scsi/fcoe/fcoe.c621
-rw-r--r--drivers/scsi/fcoe/fcoe.h50
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c (renamed from drivers/scsi/fcoe/libfcoe.c)40
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c770
-rw-r--r--drivers/scsi/fcoe/libfcoe.h31
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/vnic_dev.c2
-rw-r--r--drivers/scsi/hpsa.c325
-rw-r--r--drivers/scsi/hpsa.h9
-rw-r--r--drivers/scsi/hpsa_cmd.h4
-rw-r--r--drivers/scsi/ipr.c18
-rw-r--r--drivers/scsi/iscsi_tcp.c134
-rw-r--r--drivers/scsi/iscsi_tcp.h4
-rw-r--r--drivers/scsi/libfc/fc_exch.c111
-rw-r--r--drivers/scsi/libfc/fc_fcp.c39
-rw-r--r--drivers/scsi/libfc/fc_libfc.c120
-rw-r--r--drivers/scsi/libfc/fc_libfc.h14
-rw-r--r--drivers/scsi/libfc/fc_lport.c69
-rw-r--r--drivers/scsi/libfc/fc_npiv.c10
-rw-r--r--drivers/scsi/libfc/fc_rport.c191
-rw-r--r--drivers/scsi/libiscsi.c44
-rw-r--r--drivers/scsi/libsas/Kconfig8
-rw-r--r--drivers/scsi/libsas/Makefile4
-rw-r--r--drivers/scsi/libsas/sas_ata.c153
-rw-r--r--drivers/scsi/libsas/sas_dump.c4
-rw-r--r--drivers/scsi/libsas/sas_dump.h12
-rw-r--r--drivers/scsi/libsas/sas_expander.c5
-rw-r--r--drivers/scsi/libsas/sas_internal.h6
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c14
-rw-r--r--drivers/scsi/lpfc/lpfc.h15
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c123
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h9
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c49
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c962
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h60
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c173
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c18
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h104
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c134
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c19
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c113
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c217
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c4
-rw-r--r--drivers/scsi/megaraid.c4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h10
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c149
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c19
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h5
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h6
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_history.txt384
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_sas.h7
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_tool.h8
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c128
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h40
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c1
-rw-r--r--drivers/scsi/osd/osd_initiator.c20
-rw-r--r--drivers/scsi/osst.c10
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c37
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c27
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h10
-rw-r--r--drivers/scsi/pmcraid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c41
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c52
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c45
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c191
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c59
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c2
-rw-r--r--drivers/scsi/qlogicpti.c14
-rw-r--r--drivers/scsi/scsi_devinfo.c85
-rw-r--r--drivers/scsi/scsi_error.c24
-rw-r--r--drivers/scsi/scsi_lib.c28
-rw-r--r--drivers/scsi/scsi_priv.h4
-rw-r--r--drivers/scsi/scsi_sysfs.c2
-rw-r--r--drivers/scsi/scsi_tgt_lib.c2
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c104
-rw-r--r--drivers/scsi/sun_esp.c8
-rw-r--r--drivers/spi/Kconfig19
-rw-r--r--drivers/spi/Makefile3
-rw-r--r--drivers/spi/amba-pl022.c75
-rw-r--r--drivers/spi/davinci_spi.c11
-rw-r--r--drivers/spi/mpc512x_psc_spi.c9
-rw-r--r--drivers/spi/mpc52xx_psc_spi.c9
-rw-r--r--drivers/spi/mpc52xx_spi.c9
-rw-r--r--drivers/spi/omap2_mcspi.c251
-rw-r--r--drivers/spi/pxa2xx_spi.c2
-rw-r--r--drivers/spi/pxa2xx_spi_pci.c2
-rw-r--r--drivers/spi/spi_altera.c339
-rw-r--r--drivers/spi/spi_bfin5xx.c104
-rw-r--r--drivers/spi/spi_bitbang.c13
-rw-r--r--drivers/spi/spi_fsl_espi.c11
-rw-r--r--drivers/spi/spi_fsl_lib.c3
-rw-r--r--drivers/spi/spi_fsl_lib.h3
-rw-r--r--drivers/spi/spi_fsl_spi.c11
-rw-r--r--drivers/spi/spi_imx.c12
-rw-r--r--drivers/spi/spi_oc_tiny.c425
-rw-r--r--drivers/spi/spi_ppc4xx.c9
-rw-r--r--drivers/spi/spi_sh.c543
-rw-r--r--drivers/spi/spi_sh_msiof.c127
-rw-r--r--drivers/spi/spidev.c12
-rw-r--r--drivers/spi/xilinx_spi.c9
-rw-r--r--drivers/ssb/main.c44
-rw-r--r--drivers/ssb/pci.c6
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c4
-rw-r--r--drivers/staging/brcm80211/sys/wl_mac80211.c28
-rw-r--r--drivers/staging/brcm80211/sys/wlc_mac80211.c5
-rw-r--r--drivers/staging/cxd2099/Kconfig11
-rw-r--r--drivers/staging/cxd2099/Makefile5
-rw-r--r--drivers/staging/cxd2099/TODO12
-rw-r--r--drivers/staging/cxd2099/cxd2099.c574
-rw-r--r--drivers/staging/cxd2099/cxd2099.h41
-rw-r--r--drivers/staging/winbond/wbusb.c7
-rw-r--r--drivers/target/target_core_hba.c1
-rw-r--r--drivers/target/target_core_tmr.c5
-rw-r--r--drivers/target/target_core_transport.c8
-rw-r--r--drivers/thermal/Kconfig1
-rw-r--r--drivers/thermal/thermal_sys.c40
-rw-r--r--drivers/tty/hvc/hvcs.c2
-rw-r--r--drivers/tty/serial/Kconfig42
-rw-r--r--drivers/tty/serial/Makefile2
-rw-r--r--drivers/tty/serial/altera_jtaguart.c15
-rw-r--r--drivers/tty/serial/altera_uart.c51
-rw-r--r--drivers/tty/serial/amba-pl010.c2
-rw-r--r--drivers/tty/serial/amba-pl011.c2
-rw-r--r--drivers/tty/serial/apbuart.c11
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c9
-rw-r--r--drivers/tty/serial/kgdboc.c2
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c13
-rw-r--r--drivers/tty/serial/msm_serial.c286
-rw-r--r--drivers/tty/serial/msm_serial.h28
-rw-r--r--drivers/tty/serial/mxs-auart.c798
-rw-r--r--drivers/tty/serial/of_serial.c14
-rw-r--r--drivers/tty/serial/pch_uart.c1
-rw-r--r--drivers/tty/serial/serial_cs.c1
-rw-r--r--drivers/tty/serial/serial_lh7a40x.c682
-rw-r--r--drivers/tty/serial/sh-sci.c425
-rw-r--r--drivers/tty/serial/sh-sci.h39
-rw-r--r--drivers/tty/serial/sunhv.c8
-rw-r--r--drivers/tty/serial/sunsab.c8
-rw-r--r--drivers/tty/serial/sunsu.c6
-rw-r--r--drivers/tty/serial/sunzilog.c10
-rw-r--r--drivers/tty/serial/uartlite.c103
-rw-r--r--drivers/tty/serial/ucc_uart.c9
-rw-r--r--drivers/usb/Kconfig1
-rw-r--r--drivers/usb/core/hcd-pci.c4
-rw-r--r--drivers/usb/core/hub.c30
-rw-r--r--drivers/usb/core/quirks.c8
-rw-r--r--drivers/usb/gadget/Kconfig12
-rw-r--r--drivers/usb/gadget/Makefile1
-rw-r--r--drivers/usb/gadget/f_phonet.c15
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c14
-rw-r--r--drivers/usb/gadget/gadget_chips.h8
-rw-r--r--drivers/usb/gadget/lh7a40x_udc.c2152
-rw-r--r--drivers/usb/gadget/lh7a40x_udc.h259
-rw-r--r--drivers/usb/host/ehci-hcd.c12
-rw-r--r--drivers/usb/host/ehci-mxc.c5
-rw-r--r--drivers/usb/host/ehci-ppc-of.c9
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c7
-rw-r--r--drivers/usb/host/fhci-hcd.c9
-rw-r--r--drivers/usb/host/isp1760-if.c9
-rw-r--r--drivers/usb/host/ohci-hcd.c11
-rw-r--r--drivers/usb/host/ohci-lh7a404.c252
-rw-r--r--drivers/usb/host/ohci-ppc-of.c9
-rw-r--r--drivers/usb/host/ohci-tmio.c8
-rw-r--r--drivers/usb/host/ohci.h10
-rw-r--r--drivers/usb/host/uhci-hcd.c2
-rw-r--r--drivers/usb/host/xhci-dbg.c9
-rw-r--r--drivers/usb/host/xhci-mem.c10
-rw-r--r--drivers/usb/host/xhci-ring.c40
-rw-r--r--drivers/usb/host/xhci.c14
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/musb/musb_core.c3
-rw-r--r--drivers/usb/musb/musb_core.h23
-rw-r--r--drivers/usb/musb/musbhsdma.h2
-rw-r--r--drivers/usb/musb/omap2430.c1
-rw-r--r--drivers/usb/otg/isp1301_omap.c2
-rw-r--r--drivers/usb/serial/sierra.c3
-rw-r--r--drivers/usb/serial/usb_wwan.c15
-rw-r--r--drivers/usb/serial/visor.c12
-rw-r--r--drivers/usb/wusbcore/rh.c2
-rw-r--r--drivers/vbus/Kconfig25
-rw-r--r--drivers/vbus/Makefile6
-rw-r--r--drivers/vbus/bus-proxy.c248
-rw-r--r--drivers/vbus/pci-bridge.c1016
-rw-r--r--drivers/video/Kconfig101
-rw-r--r--drivers/video/Makefile2
-rw-r--r--drivers/video/amba-clcd.c105
-rw-r--r--drivers/video/bw2.c8
-rw-r--r--drivers/video/cg14.c8
-rw-r--r--drivers/video/cg3.c9
-rw-r--r--drivers/video/cg6.c9
-rw-r--r--drivers/video/cyber2000fb.c263
-rw-r--r--drivers/video/cyber2000fb.h16
-rw-r--r--drivers/video/fb-puv3.c846
-rw-r--r--drivers/video/ffb.c9
-rw-r--r--drivers/video/fsl-diu-fb.c9
-rw-r--r--drivers/video/leo.c9
-rw-r--r--drivers/video/mb862xx/mb862xxfb.c9
-rw-r--r--drivers/video/msm/msm_fb.c11
-rw-r--r--drivers/video/mxsfb.c914
-rw-r--r--drivers/video/omap2/displays/panel-generic-dpi.c25
-rw-r--r--drivers/video/omap2/dss/Kconfig6
-rw-r--r--drivers/video/omap2/dss/core.c450
-rw-r--r--drivers/video/omap2/dss/dispc.c160
-rw-r--r--drivers/video/omap2/dss/dpi.c37
-rw-r--r--drivers/video/omap2/dss/dsi.c116
-rw-r--r--drivers/video/omap2/dss/dss.c459
-rw-r--r--drivers/video/omap2/dss/dss.h56
-rw-r--r--drivers/video/omap2/dss/manager.c4
-rw-r--r--drivers/video/omap2/dss/overlay.c4
-rw-r--r--drivers/video/omap2/dss/rfbi.c128
-rw-r--r--drivers/video/omap2/dss/sdi.c26
-rw-r--r--drivers/video/omap2/dss/venc.c122
-rw-r--r--drivers/video/omap2/omapfb/Kconfig6
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c23
-rw-r--r--drivers/video/p9100.c8
-rw-r--r--drivers/video/platinumfb.c9
-rw-r--r--drivers/video/sunxvr1000.c9
-rw-r--r--drivers/video/tcx.c9
-rw-r--r--drivers/video/tmiofb.c28
-rw-r--r--drivers/video/xilinxfb.c11
-rw-r--r--drivers/w1/masters/Kconfig2
-rw-r--r--drivers/w1/masters/ds1wm.c6
-rw-r--r--drivers/watchdog/Kconfig42
-rw-r--r--drivers/watchdog/Makefile5
-rw-r--r--drivers/watchdog/alim1535_wdt.c10
-rw-r--r--drivers/watchdog/alim7101_wdt.c2
-rw-r--r--drivers/watchdog/bcm47xx_wdt.c4
-rw-r--r--drivers/watchdog/bfin_wdt.c4
-rw-r--r--drivers/watchdog/booke_wdt.c19
-rw-r--r--drivers/watchdog/cpwd.c47
-rw-r--r--drivers/watchdog/eurotechwdt.c2
-rw-r--r--drivers/watchdog/gef_wdt.c9
-rw-r--r--drivers/watchdog/hpwdt.c2
-rw-r--r--drivers/watchdog/i6300esb.c2
-rw-r--r--drivers/watchdog/iTCO_wdt.c4
-rw-r--r--drivers/watchdog/intel_scu_watchdog.c572
-rw-r--r--drivers/watchdog/intel_scu_watchdog.h66
-rw-r--r--drivers/watchdog/it8712f_wdt.c2
-rw-r--r--drivers/watchdog/it87_wdt.c28
-rw-r--r--drivers/watchdog/jz4740_wdt.c322
-rw-r--r--drivers/watchdog/machzwd.c2
-rw-r--r--drivers/watchdog/max63xx_wdt.c2
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c21
-rw-r--r--drivers/watchdog/mpcore_wdt.c2
-rw-r--r--drivers/watchdog/mtx-1_wdt.c14
-rw-r--r--drivers/watchdog/nv_tco.c2
-rw-r--r--drivers/watchdog/omap_wdt.h2
-rw-r--r--drivers/watchdog/pc87413_wdt.c2
-rw-r--r--drivers/watchdog/pcwd_pci.c2
-rw-r--r--drivers/watchdog/pnx4008_wdt.c2
-rw-r--r--drivers/watchdog/rdc321x_wdt.c3
-rw-r--r--drivers/watchdog/riowd.c9
-rw-r--r--drivers/watchdog/s3c2410_wdt.c2
-rw-r--r--drivers/watchdog/sbc8360.c2
-rw-r--r--drivers/watchdog/sbc_epx_c3.c2
-rw-r--r--drivers/watchdog/sbc_fitpc2_wdt.c2
-rw-r--r--drivers/watchdog/sch311x_wdt.c2
-rw-r--r--drivers/watchdog/shwdt.c365
-rw-r--r--drivers/watchdog/smsc37b787_wdt.c4
-rw-r--r--drivers/watchdog/softdog.c2
-rw-r--r--drivers/watchdog/sp5100_tco.c2
-rw-r--r--drivers/watchdog/sp805_wdt.c2
-rw-r--r--drivers/watchdog/ts72xx_wdt.c2
-rw-r--r--drivers/watchdog/w83697ug_wdt.c6
-rw-r--r--drivers/watchdog/wdt.c2
-rw-r--r--drivers/watchdog/wdt977.c2
-rw-r--r--drivers/watchdog/wdt_pci.c6
-rw-r--r--drivers/watchdog/xen_wdt.c359
-rw-r--r--drivers/xen/Kconfig10
-rw-r--r--drivers/xen/Makefile2
-rw-r--r--drivers/xen/balloon.c16
-rw-r--r--drivers/xen/events.c380
-rw-r--r--drivers/xen/gntalloc.c545
-rw-r--r--drivers/xen/gntdev.c387
-rw-r--r--drivers/xen/grant-table.c6
-rw-r--r--drivers/xen/manage.c153
-rw-r--r--drivers/xen/platform-pci.c3
-rw-r--r--fs/9p/acl.c28
-rw-r--r--fs/9p/cache.c204
-rw-r--r--fs/9p/cache.h64
-rw-r--r--fs/9p/fid.c114
-rw-r--r--fs/9p/fid.h5
-rw-r--r--fs/9p/v9fs.c108
-rw-r--r--fs/9p/v9fs.h53
-rw-r--r--fs/9p/v9fs_vfs.h26
-rw-r--r--fs/9p/vfs_addr.c194
-rw-r--r--fs/9p/vfs_dentry.c47
-rw-r--r--fs/9p/vfs_dir.c1
-rw-r--r--fs/9p/vfs_file.c316
-rw-r--r--fs/9p/vfs_inode.c307
-rw-r--r--fs/9p/vfs_inode_dotl.c198
-rw-r--r--fs/9p/vfs_super.c65
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/Makefile1
-rw-r--r--fs/afs/write.c1
-rw-r--r--fs/aio.c56
-rw-r--r--fs/block_dev.c19
-rw-r--r--fs/btrfs/ctree.h3
-rw-r--r--fs/btrfs/disk-io.c2
-rw-r--r--fs/btrfs/extent-tree.c9
-rw-r--r--fs/btrfs/extent_io.c138
-rw-r--r--fs/btrfs/extent_io.h2
-rw-r--r--fs/btrfs/inode.c139
-rw-r--r--fs/btrfs/ioctl.c7
-rw-r--r--fs/btrfs/lzo.c21
-rw-r--r--fs/btrfs/relocation.c13
-rw-r--r--fs/btrfs/super.c7
-rw-r--r--fs/btrfs/volumes.c13
-rw-r--r--fs/btrfs/xattr.c6
-rw-r--r--fs/btrfs/xattr.h3
-rw-r--r--fs/cachefiles/namei.c53
-rw-r--r--fs/cachefiles/xattr.c1
-rw-r--r--fs/ceph/dir.c5
-rw-r--r--fs/ceph/snap.c14
-rw-r--r--fs/ceph/super.h1
-rw-r--r--fs/cifs/Kconfig34
-rw-r--r--fs/cifs/README28
-rw-r--r--fs/cifs/TODO2
-rw-r--r--fs/cifs/cache.c6
-rw-r--r--fs/cifs/cifs_debug.c215
-rw-r--r--fs/cifs/cifs_debug.h12
-rw-r--r--fs/cifs/cifs_dfs_ref.c2
-rw-r--r--fs/cifs/cifs_fs_sb.h4
-rw-r--r--fs/cifs/cifs_spnego.c6
-rw-r--r--fs/cifs/cifs_spnego.h2
-rw-r--r--fs/cifs/cifsacl.c4
-rw-r--r--fs/cifs/cifsacl.h2
-rw-r--r--fs/cifs/cifsencrypt.c20
-rw-r--r--fs/cifs/cifsfs.c25
-rw-r--r--fs/cifs/cifsfs.h8
-rw-r--r--fs/cifs/cifsglob.h125
-rw-r--r--fs/cifs/cifsproto.h169
-rw-r--r--fs/cifs/cifssmb.c395
-rw-r--r--fs/cifs/connect.c132
-rw-r--r--fs/cifs/dir.c8
-rw-r--r--fs/cifs/export.c4
-rw-r--r--fs/cifs/file.c38
-rw-r--r--fs/cifs/fscache.c6
-rw-r--r--fs/cifs/fscache.h8
-rw-r--r--fs/cifs/inode.c42
-rw-r--r--fs/cifs/ioctl.c2
-rw-r--r--fs/cifs/link.c12
-rw-r--r--fs/cifs/misc.c35
-rw-r--r--fs/cifs/netmisc.c8
-rw-r--r--fs/cifs/readdir.c8
-rw-r--r--fs/cifs/sess.c69
-rw-r--r--fs/cifs/smb2glob.h258
-rw-r--r--fs/cifs/smb2pdu.h996
-rw-r--r--fs/cifs/transport.c24
-rw-r--r--fs/cifs/xattr.c8
-rw-r--r--fs/dcache.c4
-rw-r--r--fs/direct-io.c6
-rw-r--r--fs/ecryptfs/dentry.c22
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h3
-rw-r--r--fs/ecryptfs/file.c1
-rw-r--r--fs/ecryptfs/inode.c138
-rw-r--r--fs/eventfd.c12
-rw-r--r--fs/eventpoll.c107
-rw-r--r--fs/exofs/dir.c15
-rw-r--r--fs/exofs/exofs.h2
-rw-r--r--fs/exofs/file.c5
-rw-r--r--fs/exofs/inode.c45
-rw-r--r--fs/exofs/super.c18
-rw-r--r--fs/ext2/ext2.h2
-rw-r--r--fs/ext2/ialloc.c5
-rw-r--r--fs/ext2/namei.c17
-rw-r--r--fs/ext2/xattr.h6
-rw-r--r--fs/ext2/xattr_security.c5
-rw-r--r--fs/ext3/balloc.c10
-rw-r--r--fs/ext3/ialloc.c5
-rw-r--r--fs/ext3/namei.c8
-rw-r--r--fs/ext3/super.c7
-rw-r--r--fs/ext3/xattr.h4
-rw-r--r--fs/ext3/xattr_security.c5
-rw-r--r--fs/ext4/extents.c199
-rw-r--r--fs/ext4/ialloc.c4
-rw-r--r--fs/ext4/inode.c339
-rw-r--r--fs/ext4/ioctl.c6
-rw-r--r--fs/ext4/mballoc.c21
-rw-r--r--fs/ext4/mballoc.h2
-rw-r--r--fs/ext4/migrate.c10
-rw-r--r--fs/ext4/page-io.c13
-rw-r--r--fs/ext4/resize.c4
-rw-r--r--fs/ext4/super.c51
-rw-r--r--fs/ext4/xattr.c2
-rw-r--r--fs/ext4/xattr.h4
-rw-r--r--fs/ext4/xattr_security.c5
-rw-r--r--fs/file_table.c5
-rw-r--r--fs/fuse/cuse.c14
-rw-r--r--fs/fuse/dir.c7
-rw-r--r--fs/fuse/file.c52
-rw-r--r--fs/fuse/fuse_i.h7
-rw-r--r--fs/gfs2/acl.c7
-rw-r--r--fs/gfs2/bmap.c20
-rw-r--r--fs/gfs2/file.c15
-rw-r--r--fs/gfs2/glock.c392
-rw-r--r--fs/gfs2/glock.h39
-rw-r--r--fs/gfs2/glops.c23
-rw-r--r--fs/gfs2/incore.h5
-rw-r--r--fs/gfs2/inode.c7
-rw-r--r--fs/gfs2/lock_dlm.c14
-rw-r--r--fs/gfs2/lops.c3
-rw-r--r--fs/gfs2/main.c15
-rw-r--r--fs/gfs2/ops_fstype.c7
-rw-r--r--fs/gfs2/ops_inode.c10
-rw-r--r--fs/gfs2/quota.c6
-rw-r--r--fs/gfs2/rgrp.c34
-rw-r--r--fs/gfs2/rgrp.h2
-rw-r--r--fs/hfsplus/extents.c9
-rw-r--r--fs/hfsplus/super.c9
-rw-r--r--fs/hfsplus/unicode.c35
-rw-r--r--fs/hfsplus/wrapper.c9
-rw-r--r--fs/inode.c31
-rw-r--r--fs/internal.h2
-rw-r--r--fs/jbd/journal.c2
-rw-r--r--fs/jbd2/journal.c2
-rw-r--r--fs/jffs2/dir.c9
-rw-r--r--fs/jffs2/nodelist.h2
-rw-r--r--fs/jffs2/security.c5
-rw-r--r--fs/jffs2/write.c18
-rw-r--r--fs/jffs2/xattr.h5
-rw-r--r--fs/jfs/jfs_xattr.h5
-rw-r--r--fs/jfs/namei.c8
-rw-r--r--fs/jfs/xattr.c6
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/nfs/inode.c2
-rw-r--r--fs/nfsd/vfs.c3
-rw-r--r--fs/nilfs2/alloc.c12
-rw-r--r--fs/nilfs2/bmap.c11
-rw-r--r--fs/nilfs2/bmap.h3
-rw-r--r--fs/nilfs2/btnode.c5
-rw-r--r--fs/nilfs2/btnode.h1
-rw-r--r--fs/nilfs2/btree.c6
-rw-r--r--fs/nilfs2/dir.c5
-rw-r--r--fs/nilfs2/direct.c4
-rw-r--r--fs/nilfs2/file.c2
-rw-r--r--fs/nilfs2/inode.c43
-rw-r--r--fs/nilfs2/ioctl.c92
-rw-r--r--fs/nilfs2/mdt.c4
-rw-r--r--fs/nilfs2/nilfs.h20
-rw-r--r--fs/nilfs2/page.c13
-rw-r--r--fs/nilfs2/page.h1
-rw-r--r--fs/nilfs2/segment.c3
-rw-r--r--fs/nilfs2/super.c2
-rw-r--r--fs/nilfs2/the_nilfs.c11
-rw-r--r--fs/notify/dnotify/dnotify.c15
-rw-r--r--fs/notify/fanotify/fanotify.c24
-rw-r--r--fs/notify/fanotify/fanotify_user.c103
-rw-r--r--fs/notify/group.c1
-rw-r--r--fs/notify/inotify/inotify_user.c2
-rw-r--r--fs/notify/mark.c36
-rw-r--r--fs/ocfs2/dir.c14
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c28
-rw-r--r--fs/ocfs2/dlmglue.c97
-rw-r--r--fs/ocfs2/ioctl.c38
-rw-r--r--fs/ocfs2/journal.h6
-rw-r--r--fs/ocfs2/namei.c4
-rw-r--r--fs/ocfs2/ocfs2.h23
-rw-r--r--fs/ocfs2/quota.h3
-rw-r--r--fs/ocfs2/quota_global.c27
-rw-r--r--fs/ocfs2/refcounttree.c10
-rw-r--r--fs/ocfs2/super.c35
-rw-r--r--fs/ocfs2/xattr.c10
-rw-r--r--fs/ocfs2/xattr.h4
-rw-r--r--fs/open.c3
-rw-r--r--fs/partitions/ldm.c5
-rw-r--r--fs/proc/proc_devtree.c2
-rw-r--r--fs/proc/proc_sysctl.c1
-rw-r--r--fs/pstore/Kconfig13
-rw-r--r--fs/pstore/Makefile7
-rw-r--r--fs/pstore/inode.c285
-rw-r--r--fs/pstore/internal.h7
-rw-r--r--fs/pstore/platform.c202
-rw-r--r--fs/quota/quota_v2.c2
-rw-r--r--fs/reiserfs/journal.c2
-rw-r--r--fs/reiserfs/namei.c9
-rw-r--r--fs/reiserfs/xattr_security.c3
-rw-r--r--fs/ubifs/commit.c58
-rw-r--r--fs/ubifs/debug.c6
-rw-r--r--fs/ubifs/io.c12
-rw-r--r--fs/ubifs/recovery.c23
-rw-r--r--fs/ubifs/scan.c2
-rw-r--r--fs/ubifs/super.c11
-rw-r--r--fs/ubifs/tnc.c10
-rw-r--r--fs/ubifs/ubifs.h21
-rw-r--r--fs/udf/balloc.c4
-rw-r--r--fs/udf/file.c7
-rw-r--r--fs/udf/inode.c239
-rw-r--r--fs/udf/truncate.c146
-rw-r--r--fs/udf/udfdecl.h12
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c5
-rw-r--r--fs/xfs/linux-2.6/xfs_discard.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c11
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c9
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c1
-rw-r--r--fs/xfs/quota/xfs_qm.c7
-rw-r--r--fs/xfs/xfs_bmap.c7
-rw-r--r--fs/xfs/xfs_fsops.c3
-rw-r--r--fs/xfs/xfs_inode.c5
-rw-r--r--fs/xfs/xfs_inode.h23
-rw-r--r--fs/xfs/xfs_mru_cache.c2
-rw-r--r--fs/xfs/xfs_rtalloc.c84
-rw-r--r--fs/xfs/xfs_rw.c18
-rw-r--r--fs/xfs/xfs_trans.h2
-rw-r--r--fs/xfs/xfs_trans_inode.c22
-rw-r--r--fs/xfs/xfs_vnodeops.c61
-rw-r--r--include/acpi/acpi_bus.h3
-rw-r--r--include/acpi/apei.h5
-rw-r--r--include/asm-generic/cputime.h3
-rw-r--r--include/asm-generic/errno.h2
-rw-r--r--include/asm-generic/ftrace.h16
-rw-r--r--include/asm-generic/io.h33
-rw-r--r--include/asm-generic/pgtable.h2
-rw-r--r--include/asm-generic/sizes.h47
-rw-r--r--include/asm-generic/uaccess.h8
-rw-r--r--include/asm-generic/user.h4
-rw-r--r--include/asm-generic/vmlinux.lds.h35
-rw-r--r--include/drm/Kbuild1
-rw-r--r--include/drm/drm.h4
-rw-r--r--include/drm/drmP.h122
-rw-r--r--include/drm/drm_crtc.h13
-rw-r--r--include/drm/drm_hashtab.h6
-rw-r--r--include/drm/drm_mm.h49
-rw-r--r--include/drm/drm_mode.h29
-rw-r--r--include/drm/drm_usb.h15
-rw-r--r--include/drm/i830_drm.h342
-rw-r--r--include/drm/nouveau_drm.h1
-rw-r--r--include/drm/radeon_drm.h1
-rw-r--r--include/drm/ttm/ttm_bo_driver.h6
-rw-r--r--include/drm/ttm/ttm_page_alloc.h8
-rw-r--r--include/linux/Kbuild4
-rw-r--r--include/linux/acpi_io.h3
-rw-r--r--include/linux/aer.h24
-rw-r--r--include/linux/amba/bus.h8
-rw-r--r--include/linux/amba/clcd.h90
-rw-r--r--include/linux/amba/mmci.h17
-rw-r--r--include/linux/audit.h2
-rw-r--r--include/linux/bitmap.h1
-rw-r--r--include/linux/blk_types.h2
-rw-r--r--include/linux/blkdev.h23
-rw-r--r--include/linux/cgroup.h4
-rw-r--r--include/linux/cgroup_subsys.h4
-rw-r--r--include/linux/cper.h2
-rw-r--r--include/linux/cpu_rmap.h73
-rw-r--r--include/linux/dcbnl.h7
-rw-r--r--include/linux/dccp.h2
-rw-r--r--include/linux/device.h9
-rw-r--r--include/linux/dm-ioctl.h12
-rw-r--r--include/linux/dw_dmac.h5
-rw-r--r--include/linux/elevator.h1
-rw-r--r--include/linux/ethtool.h91
-rw-r--r--include/linux/ext3_fs.h3
-rw-r--r--include/linux/fanotify.h5
-rw-r--r--include/linux/fb.h2
-rw-r--r--include/linux/firewire.h2
-rw-r--r--include/linux/fs.h27
-rw-r--r--include/linux/fsnotify_backend.h14
-rw-r--r--include/linux/ftrace.h2
-rw-r--r--include/linux/genalloc.h46
-rw-r--r--include/linux/hid-roccat.h (renamed from drivers/hid/hid-roccat.h)19
-rw-r--r--include/linux/hid.h5
-rw-r--r--include/linux/hidraw.h3
-rw-r--r--include/linux/hwspinlock.h292
-rw-r--r--include/linux/i2c-id.h37
-rw-r--r--include/linux/i2c-tegra.h25
-rw-r--r--include/linux/i2c.h19
-rw-r--r--include/linux/i2c/ads1015.h28
-rw-r--r--include/linux/i2c/atmel_mxt_ts.h (renamed from include/linux/i2c/qt602240_ts.h)34
-rw-r--r--include/linux/i2c/max6639.h14
-rw-r--r--include/linux/i2c/mcs.h1
-rw-r--r--include/linux/i2c/pmbus.h45
-rw-r--r--include/linux/i2c/pxa-i2c.h (renamed from arch/arm/plat-pxa/include/plat/i2c.h)0
-rw-r--r--include/linux/i2c/twl.h2
-rw-r--r--include/linux/i2c/twl4030-madc.h141
-rw-r--r--include/linux/if.h9
-rw-r--r--include/linux/if_link.h1
-rw-r--r--include/linux/ima.h6
-rw-r--r--include/linux/inetdevice.h1
-rw-r--r--include/linux/input-polldev.h4
-rw-r--r--include/linux/input.h12
-rw-r--r--include/linux/interrupt.h85
-rw-r--r--include/linux/ioq.h414
-rw-r--r--include/linux/ip_vs.h8
-rw-r--r--include/linux/irq.h366
-rw-r--r--include/linux/irqdesc.h73
-rw-r--r--include/linux/jiffies.h1
-rw-r--r--include/linux/kthread.h2
-rw-r--r--include/linux/kvm_host.h31
-rw-r--r--include/linux/leds.h3
-rw-r--r--include/linux/libata.h7
-rw-r--r--include/linux/llist.h98
-rw-r--r--include/linux/magic.h1
-rw-r--r--include/linux/mfd/ab8500-gpadc.h28
-rw-r--r--include/linux/mfd/ab8500.h4
-rw-r--r--include/linux/mfd/ab8500/sysctrl.h254
-rw-r--r--include/linux/mfd/abx500.h1
-rw-r--r--include/linux/mfd/core.h54
-rw-r--r--include/linux/mfd/mc13xxx.h3
-rw-r--r--include/linux/mfd/wm831x/pdata.h9
-rw-r--r--include/linux/mfd/wm8994/core.h1
-rw-r--r--include/linux/mfd/wm8994/pdata.h12
-rw-r--r--include/linux/mfd/wm8994/registers.h2
-rw-r--r--include/linux/micrel_phy.h16
-rw-r--r--include/linux/mm.h15
-rw-r--r--include/linux/mmc/card.h6
-rw-r--r--include/linux/mmc/dw_mmc.h8
-rw-r--r--include/linux/mmc/mmc.h3
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--include/linux/module.h30
-rw-r--r--include/linux/moduleparam.h7
-rw-r--r--include/linux/mtd/onenand_regs.h1
-rw-r--r--include/linux/net.h3
-rw-r--r--include/linux/netdevice.h212
-rw-r--r--include/linux/netfilter.h27
-rw-r--r--include/linux/netfilter/Kbuild6
-rw-r--r--include/linux/netfilter/ipset/Kbuild4
-rw-r--r--include/linux/netfilter/ipset/ip_set.h452
-rw-r--r--include/linux/netfilter/ipset/ip_set_ahash.h1074
-rw-r--r--include/linux/netfilter/ipset/ip_set_bitmap.h31
-rw-r--r--include/linux/netfilter/ipset/ip_set_getport.h21
-rw-r--r--include/linux/netfilter/ipset/ip_set_hash.h26
-rw-r--r--include/linux/netfilter/ipset/ip_set_list.h27
-rw-r--r--include/linux/netfilter/ipset/ip_set_timeout.h127
-rw-r--r--include/linux/netfilter/ipset/pfxlen.h35
-rw-r--r--include/linux/netfilter/nf_conntrack_snmp.h9
-rw-r--r--include/linux/netfilter/nfnetlink.h3
-rw-r--r--include/linux/netfilter/nfnetlink_conntrack.h9
-rw-r--r--include/linux/netfilter/x_tables.h3
-rw-r--r--include/linux/netfilter/xt_AUDIT.h30
-rw-r--r--include/linux/netfilter/xt_CT.h12
-rw-r--r--include/linux/netfilter/xt_NFQUEUE.h6
-rw-r--r--include/linux/netfilter/xt_TCPOPTSTRIP.h4
-rw-r--r--include/linux/netfilter/xt_TPROXY.h10
-rw-r--r--include/linux/netfilter/xt_cluster.h10
-rw-r--r--include/linux/netfilter/xt_comment.h2
-rw-r--r--include/linux/netfilter/xt_connlimit.h16
-rw-r--r--include/linux/netfilter/xt_conntrack.h15
-rw-r--r--include/linux/netfilter/xt_devgroup.h21
-rw-r--r--include/linux/netfilter/xt_quota.h8
-rw-r--r--include/linux/netfilter/xt_set.h56
-rw-r--r--include/linux/netfilter/xt_socket.h2
-rw-r--r--include/linux/netfilter/xt_time.h16
-rw-r--r--include/linux/netfilter/xt_u32.h18
-rw-r--r--include/linux/netfilter_bridge/ebt_802_3.h26
-rw-r--r--include/linux/netfilter_bridge/ebt_among.h4
-rw-r--r--include/linux/netfilter_bridge/ebt_arp.h6
-rw-r--r--include/linux/netfilter_bridge/ebt_ip.h14
-rw-r--r--include/linux/netfilter_bridge/ebt_ip6.h25
-rw-r--r--include/linux/netfilter_bridge/ebt_limit.h10
-rw-r--r--include/linux/netfilter_bridge/ebt_log.h8
-rw-r--r--include/linux/netfilter_bridge/ebt_mark_m.h6
-rw-r--r--include/linux/netfilter_bridge/ebt_nflog.h12
-rw-r--r--include/linux/netfilter_bridge/ebt_pkttype.h6
-rw-r--r--include/linux/netfilter_bridge/ebt_stp.h26
-rw-r--r--include/linux/netfilter_bridge/ebt_ulog.h4
-rw-r--r--include/linux/netfilter_bridge/ebt_vlan.h10
-rw-r--r--include/linux/netfilter_ipv4/ipt_CLUSTERIP.h16
-rw-r--r--include/linux/netfilter_ipv4/ipt_ECN.h8
-rw-r--r--include/linux/netfilter_ipv4/ipt_SAME.h8
-rw-r--r--include/linux/netfilter_ipv4/ipt_TTL.h6
-rw-r--r--include/linux/netfilter_ipv4/ipt_addrtype.h16
-rw-r--r--include/linux/netfilter_ipv4/ipt_ah.h6
-rw-r--r--include/linux/netfilter_ipv4/ipt_ecn.h10
-rw-r--r--include/linux/netfilter_ipv4/ipt_ttl.h6
-rw-r--r--include/linux/netfilter_ipv6/ip6t_HL.h6
-rw-r--r--include/linux/netfilter_ipv6/ip6t_REJECT.h4
-rw-r--r--include/linux/netfilter_ipv6/ip6t_ah.h10
-rw-r--r--include/linux/netfilter_ipv6/ip6t_frag.h10
-rw-r--r--include/linux/netfilter_ipv6/ip6t_hl.h6
-rw-r--r--include/linux/netfilter_ipv6/ip6t_ipv6header.h8
-rw-r--r--include/linux/netfilter_ipv6/ip6t_mh.h6
-rw-r--r--include/linux/netfilter_ipv6/ip6t_opts.h12
-rw-r--r--include/linux/netfilter_ipv6/ip6t_rt.h13
-rw-r--r--include/linux/nilfs2_fs.h26
-rw-r--r--include/linux/nl80211.h3
-rw-r--r--include/linux/of.h18
-rw-r--r--include/linux/of_device.h5
-rw-r--r--include/linux/of_pci.h9
-rw-r--r--include/linux/of_platform.h18
-rw-r--r--include/linux/oprofile.h7
-rw-r--r--include/linux/pci.h5
-rw-r--r--include/linux/pci_ids.h8
-rw-r--r--include/linux/percpu.h128
-rw-r--r--include/linux/perf_event.h37
-rw-r--r--include/linux/pkt_sched.h107
-rw-r--r--include/linux/pm.h21
-rw-r--r--include/linux/pm_runtime.h6
-rw-r--r--include/linux/pm_wakeup.h33
-rw-r--r--include/linux/posix-clock.h150
-rw-r--r--include/linux/posix-timers.h44
-rw-r--r--include/linux/power/bq20z75.h39
-rw-r--r--include/linux/power/bq27x00_battery.h19
-rw-r--r--include/linux/power_supply.h47
-rw-r--r--include/linux/pstore.h60
-rw-r--r--include/linux/reiserfs_xattr.h2
-rw-r--r--include/linux/rio_regs.h4
-rw-r--r--include/linux/rwlock_types.h8
-rw-r--r--include/linux/rwsem-spinlock.h31
-rw-r--r--include/linux/rwsem.h53
-rw-r--r--include/linux/sched.h22
-rw-r--r--include/linux/security.h38
-rw-r--r--include/linux/serial_sci.h16
-rw-r--r--include/linux/shm_signal.h189
-rw-r--r--include/linux/skbuff.h11
-rw-r--r--include/linux/slab.h1
-rw-r--r--include/linux/spi/spi_oc_tiny.h20
-rw-r--r--include/linux/spinlock_types.h8
-rw-r--r--include/linux/ssb/ssb_regs.h7
-rw-r--r--include/linux/syscalls.h12
-rw-r--r--include/linux/thermal.h8
-rw-r--r--include/linux/thread_info.h3
-rw-r--r--include/linux/time.h11
-rw-r--r--include/linux/timex.h3
-rw-r--r--include/linux/tipc.h8
-rw-r--r--include/linux/tipc_config.h32
-rw-r--r--include/linux/vbus_driver.h83
-rw-r--r--include/linux/vbus_pci.h145
-rw-r--r--include/linux/venet.h133
-rw-r--r--include/linux/videodev2.h131
-rw-r--r--include/linux/workqueue.h4
-rw-r--r--include/linux/xattr.h2
-rw-r--r--include/linux/xfrm.h1
-rw-r--r--include/media/noon010pc30.h28
-rw-r--r--include/media/rc-map.h1
-rw-r--r--include/media/s5p_fimc.h (renamed from include/media/s3c_fimc.h)20
-rw-r--r--include/media/tuner.h1
-rw-r--r--include/media/v4l2-chip-ident.h3
-rw-r--r--include/media/v4l2-ioctl.h16
-rw-r--r--include/media/v4l2-mem2mem.h56
-rw-r--r--include/media/videobuf2-core.h380
-rw-r--r--include/media/videobuf2-dma-contig.h29
-rw-r--r--include/media/videobuf2-dma-sg.h32
-rw-r--r--include/media/videobuf2-memops.h45
-rw-r--r--include/media/videobuf2-vmalloc.h20
-rw-r--r--include/net/9p/9p.h12
-rw-r--r--include/net/9p/client.h1
-rw-r--r--include/net/9p/transport.h9
-rw-r--r--include/net/bluetooth/bluetooth.h33
-rw-r--r--include/net/bluetooth/hci.h139
-rw-r--r--include/net/bluetooth/hci_core.h173
-rw-r--r--include/net/bluetooth/l2cap.h53
-rw-r--r--include/net/bluetooth/mgmt.h171
-rw-r--r--include/net/bluetooth/smp.h76
-rw-r--r--include/net/cfg80211.h8
-rw-r--r--include/net/dst.h134
-rw-r--r--include/net/dst_ops.h1
-rw-r--r--include/net/flow.h11
-rw-r--r--include/net/icmp.h3
-rw-r--r--include/net/ieee80211_radiotap.h25
-rw-r--r--include/net/inet_sock.h31
-rw-r--r--include/net/inetpeer.h44
-rw-r--r--include/net/ip.h16
-rw-r--r--include/net/ip6_fib.h1
-rw-r--r--include/net/ip_fib.h27
-rw-r--r--include/net/ip_vs.h315
-rw-r--r--include/net/ipv6.h28
-rw-r--r--include/net/mac80211.h99
-rw-r--r--include/net/net_namespace.h2
-rw-r--r--include/net/netevent.h1
-rw-r--r--include/net/netfilter/nf_conntrack.h23
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h12
-rw-r--r--include/net/netfilter/nf_conntrack_extend.h10
-rw-r--r--include/net/netfilter/nf_conntrack_helper.h6
-rw-r--r--include/net/netfilter/nf_conntrack_l3proto.h2
-rw-r--r--include/net/netfilter/nf_conntrack_timestamp.h65
-rw-r--r--include/net/netfilter/nf_nat.h6
-rw-r--r--include/net/netfilter/nf_nat_core.h4
-rw-r--r--include/net/netfilter/nf_tproxy_core.h12
-rw-r--r--include/net/netlink.h9
-rw-r--r--include/net/netns/conntrack.h4
-rw-r--r--include/net/netns/ip_vs.h143
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/phonet/pep.h22
-rw-r--r--include/net/phonet/phonet.h1
-rw-r--r--include/net/protocol.h4
-rw-r--r--include/net/route.h37
-rw-r--r--include/net/sch_generic.h61
-rw-r--r--include/net/sock.h9
-rw-r--r--include/net/tcp.h16
-rw-r--r--include/net/udp.h13
-rw-r--r--include/net/udplite.h12
-rw-r--r--include/net/xfrm.h137
-rw-r--r--include/pcmcia/ds.h1
-rw-r--r--include/scsi/fc/fc_ns.h11
-rw-r--r--include/scsi/fc_encode.h26
-rw-r--r--include/scsi/libfc.h74
-rw-r--r--include/scsi/libfcoe.h105
-rw-r--r--include/scsi/libiscsi.h8
-rw-r--r--include/scsi/sas_ata.h22
-rw-r--r--include/scsi/scsi.h5
-rw-r--r--include/scsi/scsi_device.h1
-rw-r--r--include/scsi/scsi_transport_iscsi.h6
-rw-r--r--include/sound/ac97_codec.h5
-rw-r--r--include/sound/cs4271.h24
-rw-r--r--include/sound/hdspm.h179
-rw-r--r--include/sound/mixer_oss.h3
-rw-r--r--include/sound/pcm.h91
-rw-r--r--include/sound/sh_fsi.h76
-rw-r--r--include/sound/soc-dapm.h16
-rw-r--r--include/sound/soc.h115
-rw-r--r--include/sound/version.h2
-rw-r--r--include/sound/wm8903.h30
-rw-r--r--include/sound/wm9081.h9
-rw-r--r--include/target/target_core_transport.h2
-rw-r--r--include/trace/events/asoc.h25
-rw-r--r--include/xen/events.h14
-rw-r--r--include/xen/gntalloc.h82
-rw-r--r--include/xen/gntdev.h31
-rw-r--r--include/xen/interface/io/blkif.h37
-rw-r--r--include/xen/interface/sched.h34
-rw-r--r--include/xen/interface/xen.h4
-rw-r--r--include/xen/xen-ops.h6
-rw-r--r--init/Kconfig16
-rw-r--r--kernel/audit.c2
-rw-r--r--kernel/cgroup.c54
-rw-r--r--kernel/compat.c136
-rw-r--r--kernel/cred.c2
-rw-r--r--kernel/debug/kdb/kdb_main.c2
-rw-r--r--kernel/fork.c1
-rw-r--r--kernel/futex.c22
-rw-r--r--kernel/gcov/Kconfig2
-rw-r--r--kernel/hrtimer.c13
-rw-r--r--kernel/irq/Kconfig29
-rw-r--r--kernel/irq/autoprobe.c54
-rw-r--r--kernel/irq/chip.c483
-rw-r--r--kernel/irq/compat.h72
-rw-r--r--kernel/irq/debug.h40
-rw-r--r--kernel/irq/handle.c144
-rw-r--r--kernel/irq/internals.h173
-rw-r--r--kernel/irq/irqdesc.c79
-rw-r--r--kernel/irq/manage.c604
-rw-r--r--kernel/irq/migration.c38
-rw-r--r--kernel/irq/pm.c12
-rw-r--r--kernel/irq/proc.c70
-rw-r--r--kernel/irq/resend.c19
-rw-r--r--kernel/irq/settings.h138
-rw-r--r--kernel/irq/spurious.c163
-rw-r--r--kernel/params.c9
-rw-r--r--kernel/perf_event.c1020
-rw-r--r--kernel/pid.c2
-rw-r--r--kernel/pm_qos_params.c24
-rw-r--r--kernel/posix-cpu-timers.c110
-rw-r--r--kernel/posix-timers.c321
-rw-r--r--kernel/power/Kconfig237
-rw-r--r--kernel/power/main.c3
-rw-r--r--kernel/rcupdate.c10
-rw-r--r--kernel/rcutiny_plugin.h2
-rw-r--r--kernel/rcutorture.c1
-rw-r--r--kernel/rtmutex-debug.c1
-rw-r--r--kernel/rtmutex-tester.c40
-rw-r--r--kernel/rtmutex.c318
-rw-r--r--kernel/rtmutex_common.h16
-rw-r--r--kernel/sched.c320
-rw-r--r--kernel/sched_autogroup.c15
-rw-r--r--kernel/sched_autogroup.h5
-rw-r--r--kernel/sched_debug.c2
-rw-r--r--kernel/sched_fair.c389
-rw-r--r--kernel/sched_idletask.c26
-rw-r--r--kernel/sched_rt.c19
-rw-r--r--kernel/sched_stoptask.c7
-rw-r--r--kernel/softirq.c24
-rw-r--r--kernel/sysctl.c16
-rw-r--r--kernel/time.c38
-rw-r--r--kernel/time/Makefile3
-rw-r--r--kernel/time/clockevents.c1
-rw-r--r--kernel/time/jiffies.c20
-rw-r--r--kernel/time/ntp.c13
-rw-r--r--kernel/time/posix-clock.c441
-rw-r--r--kernel/time/tick-broadcast.c11
-rw-r--r--kernel/time/tick-common.c7
-rw-r--r--kernel/time/tick-internal.h12
-rw-r--r--kernel/time/tick-oneshot.c1
-rw-r--r--kernel/time/tick-sched.c1
-rw-r--r--kernel/time/timekeeping.c86
-rw-r--r--kernel/timer.c36
-rw-r--r--kernel/trace/ftrace.c52
-rw-r--r--kernel/trace/ring_buffer.c2
-rw-r--r--kernel/trace/trace.c4
-rw-r--r--kernel/trace/trace_kprobe.c111
-rw-r--r--kernel/trace/trace_sched_switch.c48
-rw-r--r--kernel/trace/trace_syscalls.c42
-rw-r--r--kernel/workqueue.c6
-rw-r--r--lib/Kconfig28
-rw-r--r--lib/Kconfig.debug4
-rw-r--r--lib/Makefile6
-rw-r--r--lib/bitmap.c2
-rw-r--r--lib/cpu_rmap.c269
-rw-r--r--lib/genalloc.c256
-rw-r--r--lib/ioq.c304
-rw-r--r--lib/llist.c119
-rw-r--r--lib/nlattr.c2
-rw-r--r--lib/rwsem.c10
-rw-r--r--lib/shm_signal.c196
-rw-r--r--lib/swiotlb.c6
-rw-r--r--mm/Makefile8
-rw-r--r--mm/bootmem.c180
-rw-r--r--mm/internal.h5
-rw-r--r--mm/memory-failure.c32
-rw-r--r--mm/memory.c71
-rw-r--r--mm/mempolicy.c4
-rw-r--r--mm/migrate.c6
-rw-r--r--mm/mremap.c4
-rw-r--r--mm/nobootmem.c435
-rw-r--r--mm/page_alloc.c76
-rw-r--r--mm/shmem.c11
-rw-r--r--mm/slab.c16
-rw-r--r--mm/slob.c6
-rw-r--r--mm/slub.c55
-rw-r--r--mm/swapfile.c2
-rw-r--r--mm/truncate.c2
-rw-r--r--mm/vmscan.c32
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/9p/Makefile1
-rw-r--r--net/9p/client.c166
-rw-r--r--net/9p/protocol.c44
-rw-r--r--net/9p/trans_common.c97
-rw-r--r--net/9p/trans_common.h32
-rw-r--r--net/9p/trans_fd.c52
-rw-r--r--net/9p/trans_rdma.c1
-rw-r--r--net/9p/trans_virtio.c129
-rw-r--r--net/Kconfig6
-rw-r--r--net/batman-adv/Makefile2
-rw-r--r--net/batman-adv/aggregation.c2
-rw-r--r--net/batman-adv/aggregation.h2
-rw-r--r--net/batman-adv/bat_debugfs.c6
-rw-r--r--net/batman-adv/bat_debugfs.h2
-rw-r--r--net/batman-adv/bat_sysfs.c2
-rw-r--r--net/batman-adv/bat_sysfs.h2
-rw-r--r--net/batman-adv/bitarray.c2
-rw-r--r--net/batman-adv/bitarray.h2
-rw-r--r--net/batman-adv/gateway_client.c2
-rw-r--r--net/batman-adv/gateway_client.h2
-rw-r--r--net/batman-adv/gateway_common.c2
-rw-r--r--net/batman-adv/gateway_common.h2
-rw-r--r--net/batman-adv/hard-interface.c13
-rw-r--r--net/batman-adv/hard-interface.h6
-rw-r--r--net/batman-adv/hash.c2
-rw-r--r--net/batman-adv/hash.h7
-rw-r--r--net/batman-adv/icmp_socket.c3
-rw-r--r--net/batman-adv/icmp_socket.h4
-rw-r--r--net/batman-adv/main.c3
-rw-r--r--net/batman-adv/main.h17
-rw-r--r--net/batman-adv/originator.c4
-rw-r--r--net/batman-adv/originator.h2
-rw-r--r--net/batman-adv/packet.h3
-rw-r--r--net/batman-adv/ring_buffer.c2
-rw-r--r--net/batman-adv/ring_buffer.h2
-rw-r--r--net/batman-adv/routing.c27
-rw-r--r--net/batman-adv/routing.h7
-rw-r--r--net/batman-adv/send.c7
-rw-r--r--net/batman-adv/send.h4
-rw-r--r--net/batman-adv/soft-interface.c3
-rw-r--r--net/batman-adv/soft-interface.h2
-rw-r--r--net/batman-adv/translation-table.c3
-rw-r--r--net/batman-adv/translation-table.h4
-rw-r--r--net/batman-adv/types.h2
-rw-r--r--net/batman-adv/unicast.c34
-rw-r--r--net/batman-adv/unicast.h25
-rw-r--r--net/batman-adv/vis.c2
-rw-r--r--net/batman-adv/vis.h2
-rw-r--r--net/bluetooth/Kconfig20
-rw-r--r--net/bluetooth/Makefile4
-rw-r--r--net/bluetooth/af_bluetooth.c51
-rw-r--r--net/bluetooth/bnep/core.c2
-rw-r--r--net/bluetooth/bnep/sock.c1
-rw-r--r--net/bluetooth/cmtp/capi.c3
-rw-r--r--net/bluetooth/cmtp/core.c11
-rw-r--r--net/bluetooth/hci_conn.c80
-rw-r--r--net/bluetooth/hci_core.c345
-rw-r--r--net/bluetooth/hci_event.c691
-rw-r--r--net/bluetooth/hci_sock.c8
-rw-r--r--net/bluetooth/hci_sysfs.c58
-rw-r--r--net/bluetooth/hidp/core.c208
-rw-r--r--net/bluetooth/hidp/hidp.h15
-rw-r--r--net/bluetooth/l2cap_core.c (renamed from net/bluetooth/l2cap.c)1521
-rw-r--r--net/bluetooth/l2cap_sock.c1156
-rw-r--r--net/bluetooth/mgmt.c1531
-rw-r--r--net/bluetooth/rfcomm/core.c2
-rw-r--r--net/bluetooth/rfcomm/tty.c2
-rw-r--r--net/bluetooth/sco.c24
-rw-r--r--net/bridge/br_device.c17
-rw-r--r--net/bridge/br_if.c15
-rw-r--r--net/bridge/br_multicast.c23
-rw-r--r--net/bridge/br_private.h2
-rw-r--r--net/bridge/netfilter/ebt_ip6.c46
-rw-r--r--net/bridge/netfilter/ebtables.c3
-rw-r--r--net/caif/cfcnfg.c2
-rw-r--r--net/caif/cfdgml.c1
-rw-r--r--net/caif/cfserl.c1
-rw-r--r--net/caif/cfutill.c2
-rw-r--r--net/caif/cfveil.c2
-rw-r--r--net/ceph/messenger.c62
-rw-r--r--net/core/dev.c451
-rw-r--r--net/core/dev_addr_lists.c6
-rw-r--r--net/core/dst.c43
-rw-r--r--net/core/ethtool.c604
-rw-r--r--net/core/filter.c6
-rw-r--r--net/core/flow.c14
-rw-r--r--net/core/neighbour.c13
-rw-r--r--net/core/net-sysfs.c17
-rw-r--r--net/core/netpoll.c13
-rw-r--r--net/core/pktgen.c234
-rw-r--r--net/core/rtnetlink.c86
-rw-r--r--net/core/skbuff.c7
-rw-r--r--net/dcb/dcbnl.c2
-rw-r--r--net/dccp/ccids/ccid2.c9
-rw-r--r--net/dccp/input.c7
-rw-r--r--net/dccp/ipv4.c12
-rw-r--r--net/dccp/ipv6.c68
-rw-r--r--net/decnet/dn_route.c27
-rw-r--r--net/decnet/dn_table.c1
-rw-r--r--net/ipv4/Kconfig42
-rw-r--r--net/ipv4/Makefile4
-rw-r--r--net/ipv4/af_inet.c6
-rw-r--r--net/ipv4/ah4.c25
-rw-r--r--net/ipv4/datagram.c2
-rw-r--r--net/ipv4/devinet.c78
-rw-r--r--net/ipv4/fib_frontend.c63
-rw-r--r--net/ipv4/fib_hash.c1133
-rw-r--r--net/ipv4/fib_lookup.h2
-rw-r--r--net/ipv4/fib_rules.c12
-rw-r--r--net/ipv4/fib_semantics.c125
-rw-r--r--net/ipv4/fib_trie.c217
-rw-r--r--net/ipv4/icmp.c224
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/inet_timewait_sock.c2
-rw-r--r--net/ipv4/inetpeer.c52
-rw-r--r--net/ipv4/ip_input.c2
-rw-r--r--net/ipv4/ip_output.c292
-rw-r--r--net/ipv4/netfilter/Kconfig3
-rw-r--r--net/ipv4/netfilter/arp_tables.c2
-rw-r--r--net/ipv4/netfilter/ip_tables.c2
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c7
-rw-r--r--net/ipv4/netfilter/ipt_LOG.c3
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c2
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c17
-rw-r--r--net/ipv4/netfilter/nf_nat_amanda.c8
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c33
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c9
-rw-r--r--net/ipv4/raw.c5
-rw-r--r--net/ipv4/route.c749
-rw-r--r--net/ipv4/tcp.c11
-rw-r--r--net/ipv4/tcp_input.c7
-rw-r--r--net/ipv4/tcp_ipv4.c13
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv4/tcp_timer.c3
-rw-r--r--net/ipv4/udp.c108
-rw-r--r--net/ipv4/xfrm4_policy.c13
-rw-r--r--net/ipv4/xfrm4_state.c6
-rw-r--r--net/ipv6/addrconf.c3
-rw-r--r--net/ipv6/af_inet6.c19
-rw-r--r--net/ipv6/datagram.c15
-rw-r--r--net/ipv6/icmp.c133
-rw-r--r--net/ipv6/inet6_connection_sock.c25
-rw-r--r--net/ipv6/inet6_hashtables.c2
-rw-r--r--net/ipv6/ip6_output.c96
-rw-r--r--net/ipv6/ip6mr.c3
-rw-r--r--net/ipv6/mip6.c3
-rw-r--r--net/ipv6/ndisc.c4
-rw-r--r--net/ipv6/netfilter/ip6_tables.c2
-rw-r--r--net/ipv6/netfilter/ip6t_LOG.c5
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c3
-rw-r--r--net/ipv6/raw.c29
-rw-r--r--net/ipv6/route.c100
-rw-r--r--net/ipv6/sit.c23
-rw-r--r--net/ipv6/syncookies.c7
-rw-r--r--net/ipv6/tcp_ipv6.c63
-rw-r--r--net/ipv6/udp.c17
-rw-r--r--net/ipv6/xfrm6_policy.c11
-rw-r--r--net/ipv6/xfrm6_state.c6
-rw-r--r--net/key/af_key.c243
-rw-r--r--net/l2tp/l2tp_ip.c4
-rw-r--r--net/llc/llc_input.c25
-rw-r--r--net/mac80211/Kconfig4
-rw-r--r--net/mac80211/agg-rx.c7
-rw-r--r--net/mac80211/agg-tx.c23
-rw-r--r--net/mac80211/cfg.c95
-rw-r--r--net/mac80211/debugfs.c6
-rw-r--r--net/mac80211/debugfs_netdev.c122
-rw-r--r--net/mac80211/driver-ops.h41
-rw-r--r--net/mac80211/driver-trace.h222
-rw-r--r--net/mac80211/ht.c5
-rw-r--r--net/mac80211/ibss.c21
-rw-r--r--net/mac80211/ieee80211_i.h17
-rw-r--r--net/mac80211/iface.c10
-rw-r--r--net/mac80211/main.c85
-rw-r--r--net/mac80211/mesh.c4
-rw-r--r--net/mac80211/mlme.c101
-rw-r--r--net/mac80211/offchannel.c68
-rw-r--r--net/mac80211/rx.c130
-rw-r--r--net/mac80211/scan.c96
-rw-r--r--net/mac80211/sta_info.c3
-rw-r--r--net/mac80211/sta_info.h6
-rw-r--r--net/mac80211/status.c10
-rw-r--r--net/mac80211/tx.c196
-rw-r--r--net/mac80211/util.c6
-rw-r--r--net/mac80211/work.c121
-rw-r--r--net/mac80211/wpa.c39
-rw-r--r--net/netfilter/Kconfig66
-rw-r--r--net/netfilter/Makefile9
-rw-r--r--net/netfilter/core.c20
-rw-r--r--net/netfilter/ipset/Kconfig122
-rw-r--r--net/netfilter/ipset/Makefile24
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ip.c587
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c652
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_port.c515
-rw-r--r--net/netfilter/ipset/ip_set_core.c1671
-rw-r--r--net/netfilter/ipset/ip_set_getport.c141
-rw-r--r--net/netfilter/ipset/ip_set_hash_ip.c464
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c544
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c562
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c628
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c458
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c578
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c584
-rw-r--r--net/netfilter/ipset/pfxlen.c291
-rw-r--r--net/netfilter/ipvs/ip_vs_app.c98
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c237
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c400
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c896
-rw-r--r--net/netfilter/ipvs/ip_vs_est.c134
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c61
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c82
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c99
-rw-r--r--net/netfilter/ipvs/ip_vs_lc.c20
-rw-r--r--net/netfilter/ipvs/ip_vs_nfct.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_nq.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_pe.c17
-rw-r--r--net/netfilter/ipvs/ip_vs_pe_sip.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_proto.c129
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_ah_esp.c45
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c153
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_tcp.c142
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_udp.c110
-rw-r--r--net/netfilter/ipvs/ip_vs_rr.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_sched.c25
-rw-r--r--net/netfilter/ipvs/ip_vs_sed.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_sh.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c1239
-rw-r--r--net/netfilter/ipvs/ip_vs_wlc.c22
-rw-r--r--net/netfilter/ipvs/ip_vs_wrr.c14
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c67
-rw-r--r--net/netfilter/nf_conntrack_broadcast.c82
-rw-r--r--net/netfilter/nf_conntrack_core.c57
-rw-r--r--net/netfilter/nf_conntrack_expect.c34
-rw-r--r--net/netfilter/nf_conntrack_extend.c11
-rw-r--r--net/netfilter/nf_conntrack_helper.c20
-rw-r--r--net/netfilter/nf_conntrack_netbios_ns.c74
-rw-r--r--net/netfilter/nf_conntrack_netlink.c49
-rw-r--r--net/netfilter/nf_conntrack_proto.c24
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c3
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c1
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c18
-rw-r--r--net/netfilter/nf_conntrack_snmp.c77
-rw-r--r--net/netfilter/nf_conntrack_standalone.c45
-rw-r--r--net/netfilter/nf_conntrack_timestamp.c120
-rw-r--r--net/netfilter/nf_log.c10
-rw-r--r--net/netfilter/nf_queue.c82
-rw-r--r--net/netfilter/nf_tproxy_core.c27
-rw-r--r--net/netfilter/nfnetlink_log.c9
-rw-r--r--net/netfilter/nfnetlink_queue.c22
-rw-r--r--net/netfilter/x_tables.c98
-rw-r--r--net/netfilter/xt_AUDIT.c204
-rw-r--r--net/netfilter/xt_CLASSIFY.c36
-rw-r--r--net/netfilter/xt_IDLETIMER.c2
-rw-r--r--net/netfilter/xt_LED.c2
-rw-r--r--net/netfilter/xt_NFQUEUE.c34
-rw-r--r--net/netfilter/xt_TPROXY.c22
-rw-r--r--net/netfilter/xt_connlimit.c62
-rw-r--r--net/netfilter/xt_conntrack.c80
-rw-r--r--net/netfilter/xt_cpu.c2
-rw-r--r--net/netfilter/xt_devgroup.c82
-rw-r--r--net/netfilter/xt_iprange.c18
-rw-r--r--net/netfilter/xt_ipvs.c2
-rw-r--r--net/netfilter/xt_set.c359
-rw-r--r--net/netfilter/xt_socket.c13
-rw-r--r--net/netlink/af_netlink.c18
-rw-r--r--net/packet/af_packet.c38
-rw-r--r--net/phonet/af_phonet.c19
-rw-r--r--net/phonet/pep.c244
-rw-r--r--net/phonet/socket.c14
-rw-r--r--net/rds/ib.c9
-rw-r--r--net/rds/ib.h2
-rw-r--r--net/rds/ib_rdma.c27
-rw-r--r--net/rds/rds.h1
-rw-r--r--net/rose/af_rose.c7
-rw-r--r--net/rose/rose_route.c28
-rw-r--r--net/rxrpc/ar-key.c8
-rw-r--r--net/sched/Kconfig39
-rw-r--r--net/sched/Makefile4
-rw-r--r--net/sched/act_api.c46
-rw-r--r--net/sched/act_csum.c2
-rw-r--r--net/sched/act_gact.c8
-rw-r--r--net/sched/act_ipt.c16
-rw-r--r--net/sched/act_mirred.c4
-rw-r--r--net/sched/act_nat.c2
-rw-r--r--net/sched/act_pedit.c10
-rw-r--r--net/sched/act_police.c9
-rw-r--r--net/sched/act_simple.c10
-rw-r--r--net/sched/act_skbedit.c8
-rw-r--r--net/sched/cls_api.c33
-rw-r--r--net/sched/cls_basic.c17
-rw-r--r--net/sched/cls_cgroup.c8
-rw-r--r--net/sched/cls_flow.c6
-rw-r--r--net/sched/cls_fw.c38
-rw-r--r--net/sched/cls_route.c126
-rw-r--r--net/sched/cls_rsvp.h95
-rw-r--r--net/sched/cls_tcindex.c2
-rw-r--r--net/sched/cls_u32.c89
-rw-r--r--net/sched/em_cmp.c47
-rw-r--r--net/sched/em_meta.c46
-rw-r--r--net/sched/em_nbyte.c3
-rw-r--r--net/sched/em_text.c3
-rw-r--r--net/sched/em_u32.c2
-rw-r--r--net/sched/ematch.c37
-rw-r--r--net/sched/sch_api.c173
-rw-r--r--net/sched/sch_atm.c16
-rw-r--r--net/sched/sch_cbq.c362
-rw-r--r--net/sched/sch_choke.c688
-rw-r--r--net/sched/sch_dsmark.c21
-rw-r--r--net/sched/sch_fifo.c22
-rw-r--r--net/sched/sch_generic.c41
-rw-r--r--net/sched/sch_gred.c85
-rw-r--r--net/sched/sch_hfsc.c37
-rw-r--r--net/sched/sch_htb.c106
-rw-r--r--net/sched/sch_mq.c1
-rw-r--r--net/sched/sch_mqprio.c418
-rw-r--r--net/sched/sch_multiq.c8
-rw-r--r--net/sched/sch_netem.c411
-rw-r--r--net/sched/sch_prio.c34
-rw-r--r--net/sched/sch_red.c61
-rw-r--r--net/sched/sch_sfb.c709
-rw-r--r--net/sched/sch_sfq.c67
-rw-r--r--net/sched/sch_tbf.c39
-rw-r--r--net/sched/sch_teql.c36
-rw-r--r--net/sctp/sm_make_chunk.c10
-rw-r--r--net/sctp/socket.c9
-rw-r--r--net/sctp/tsnmap.c2
-rw-r--r--net/socket.c31
-rw-r--r--net/sunrpc/sched.c2
-rw-r--r--net/sunrpc/svcsock.c32
-rw-r--r--net/tipc/bcast.c47
-rw-r--r--net/tipc/bcast.h3
-rw-r--r--net/tipc/bearer.c100
-rw-r--r--net/tipc/bearer.h67
-rw-r--r--net/tipc/core.c3
-rw-r--r--net/tipc/core.h3
-rw-r--r--net/tipc/discover.c37
-rw-r--r--net/tipc/discover.h9
-rw-r--r--net/tipc/link.c82
-rw-r--r--net/tipc/link.h28
-rw-r--r--net/tipc/msg.c7
-rw-r--r--net/tipc/msg.h22
-rw-r--r--net/tipc/node.c4
-rw-r--r--net/tipc/port.c306
-rw-r--r--net/tipc/port.h73
-rw-r--r--net/tipc/socket.c6
-rw-r--r--net/tipc/subscr.c13
-rw-r--r--net/unix/af_unix.c68
-rw-r--r--net/wanrouter/wanmain.c2
-rw-r--r--net/wireless/core.c20
-rw-r--r--net/wireless/nl80211.c62
-rw-r--r--net/wireless/reg.c6
-rw-r--r--net/wireless/util.c47
-rw-r--r--net/wireless/wext-compat.c9
-rw-r--r--net/xfrm/xfrm_algo.c8
-rw-r--r--net/xfrm/xfrm_hash.h32
-rw-r--r--net/xfrm/xfrm_policy.c146
-rw-r--r--net/xfrm/xfrm_state.c60
-rw-r--r--net/xfrm/xfrm_user.c50
-rw-r--r--scripts/basic/fixdep.c12
-rwxr-xr-xscripts/checkpatch.pl5
-rwxr-xr-xscripts/extract-ikconfig9
-rw-r--r--scripts/kconfig/streamline_config.pl2
-rw-r--r--scripts/rt-tester/rt-tester.py2
-rw-r--r--scripts/rt-tester/t2-l1-2rt-sameprio.tst5
-rw-r--r--scripts/rt-tester/t2-l1-pi.tst5
-rw-r--r--scripts/rt-tester/t2-l1-signal.tst5
-rw-r--r--scripts/rt-tester/t2-l2-2rt-deadlock.tst5
-rw-r--r--scripts/rt-tester/t3-l1-pi-1rt.tst5
-rw-r--r--scripts/rt-tester/t3-l1-pi-2rt.tst5
-rw-r--r--scripts/rt-tester/t3-l1-pi-3rt.tst5
-rw-r--r--scripts/rt-tester/t3-l1-pi-signal.tst5
-rw-r--r--scripts/rt-tester/t3-l1-pi-steal.tst5
-rw-r--r--scripts/rt-tester/t3-l2-pi.tst5
-rw-r--r--scripts/rt-tester/t4-l2-pi-deboost.tst5
-rw-r--r--scripts/rt-tester/t5-l4-pi-boost-deboost-setsched.tst5
-rw-r--r--scripts/rt-tester/t5-l4-pi-boost-deboost.tst5
-rwxr-xr-xscripts/setlocalversion14
-rwxr-xr-xscripts/tags.sh9
-rw-r--r--scripts/unifdef.c247
-rw-r--r--security/capability.c11
-rw-r--r--security/commoncap.c2
-rw-r--r--security/integrity/ima/ima.h3
-rw-r--r--security/integrity/ima/ima_api.c13
-rw-r--r--security/integrity/ima/ima_iint.c5
-rw-r--r--security/integrity/ima/ima_main.c136
-rw-r--r--security/security.c19
-rw-r--r--security/selinux/hooks.c237
-rw-r--r--security/selinux/include/classmap.h3
-rw-r--r--security/selinux/include/security.h8
-rw-r--r--security/selinux/include/xfrm.h2
-rw-r--r--security/selinux/ss/avtab.h23
-rw-r--r--security/selinux/ss/ebitmap.h1
-rw-r--r--security/selinux/ss/policydb.c130
-rw-r--r--security/selinux/ss/policydb.h14
-rw-r--r--security/selinux/ss/services.c45
-rw-r--r--security/selinux/xfrm.c4
-rw-r--r--security/smack/smack.h17
-rw-r--r--security/smack/smack_access.c52
-rw-r--r--security/smack/smack_lsm.c287
-rw-r--r--security/smack/smackfs.c370
-rw-r--r--security/tomoyo/file.c5
-rw-r--r--sound/arm/aaci.c285
-rw-r--r--sound/arm/aaci.h9
-rw-r--r--sound/core/device.c7
-rw-r--r--sound/core/jack.c1
-rw-r--r--sound/core/memalloc.c3
-rw-r--r--sound/core/oss/linear.c7
-rw-r--r--sound/core/oss/mixer_oss.c10
-rw-r--r--sound/core/oss/mulaw.c2
-rw-r--r--sound/core/oss/pcm_oss.c51
-rw-r--r--sound/core/oss/pcm_plugin.c40
-rw-r--r--sound/core/oss/pcm_plugin.h11
-rw-r--r--sound/core/oss/route.c6
-rw-r--r--sound/core/pcm.c10
-rw-r--r--sound/core/pcm_misc.c35
-rw-r--r--sound/core/pcm_native.c2
-rw-r--r--sound/core/seq/seq_clientmgr.c7
-rw-r--r--sound/core/seq/seq_memory.c6
-rw-r--r--sound/core/seq/seq_memory.h4
-rw-r--r--sound/core/seq/seq_ports.c2
-rw-r--r--sound/core/vmaster.c2
-rw-r--r--sound/pci/Kconfig12
-rw-r--r--sound/pci/ac97/ac97_codec.c90
-rw-r--r--sound/pci/ac97/ac97_patch.c52
-rw-r--r--sound/pci/asihpi/asihpi.c774
-rw-r--r--sound/pci/asihpi/hpi.h1214
-rw-r--r--sound/pci/asihpi/hpi6000.c299
-rw-r--r--sound/pci/asihpi/hpi6205.c603
-rw-r--r--sound/pci/asihpi/hpi6205.h7
-rw-r--r--sound/pci/asihpi/hpi_internal.h1118
-rw-r--r--sound/pci/asihpi/hpicmn.c480
-rw-r--r--sound/pci/asihpi/hpicmn.h24
-rw-r--r--sound/pci/asihpi/hpidebug.c157
-rw-r--r--sound/pci/asihpi/hpidebug.h323
-rw-r--r--sound/pci/asihpi/hpidspcd.c37
-rw-r--r--sound/pci/asihpi/hpidspcd.h2
-rw-r--r--sound/pci/asihpi/hpifunc.c2498
-rw-r--r--sound/pci/asihpi/hpimsginit.c18
-rw-r--r--sound/pci/asihpi/hpimsginit.h12
-rw-r--r--sound/pci/asihpi/hpimsgx.c203
-rw-r--r--sound/pci/asihpi/hpioctl.c85
-rw-r--r--sound/pci/asihpi/hpios.h10
-rw-r--r--sound/pci/au88x0/au88x0.h4
-rw-r--r--sound/pci/au88x0/au88x0_core.c4
-rw-r--r--sound/pci/au88x0/au88x0_eq.c3
-rw-r--r--sound/pci/azt3328.c450
-rw-r--r--sound/pci/emu10k1/emu10k1_main.c2
-rw-r--r--sound/pci/hda/patch_analog.c117
-rw-r--r--sound/pci/hda/patch_conexant.c5
-rw-r--r--sound/pci/hda/patch_hdmi.c22
-rw-r--r--sound/pci/hda/patch_realtek.c24
-rw-r--r--sound/pci/hda/patch_sigmatel.c15
-rw-r--r--sound/pci/hda/patch_via.c2
-rw-r--r--sound/pci/rme9652/hdspm.c4464
-rw-r--r--sound/ppc/pmac.c6
-rw-r--r--sound/soc/Kconfig2
-rw-r--r--sound/soc/Makefile2
-rw-r--r--sound/soc/codecs/Kconfig17
-rw-r--r--sound/soc/codecs/Makefile10
-rw-r--r--sound/soc/codecs/ak4104.c1
-rw-r--r--sound/soc/codecs/ak4642.c24
-rw-r--r--sound/soc/codecs/cq93vc.c3
-rw-r--r--sound/soc/codecs/cs4270.c6
-rw-r--r--sound/soc/codecs/cs4271.c643
-rw-r--r--sound/soc/codecs/cx20442.c2
-rw-r--r--sound/soc/codecs/max98088.c2
-rw-r--r--sound/soc/codecs/sgtl5000.c1511
-rw-r--r--sound/soc/codecs/sgtl5000.h400
-rw-r--r--sound/soc/codecs/sn95031.c949
-rw-r--r--sound/soc/codecs/sn95031.h132
-rw-r--r--sound/soc/codecs/twl4030.c6
-rw-r--r--sound/soc/codecs/twl6040.c4
-rw-r--r--sound/soc/codecs/wl1273.c3
-rw-r--r--sound/soc/codecs/wm2000.c14
-rw-r--r--sound/soc/codecs/wm8400.c3
-rw-r--r--sound/soc/codecs/wm8523.c8
-rw-r--r--sound/soc/codecs/wm8741.c13
-rw-r--r--sound/soc/codecs/wm8753.c296
-rw-r--r--sound/soc/codecs/wm8804.c2
-rw-r--r--sound/soc/codecs/wm8900.c2
-rw-r--r--sound/soc/codecs/wm8903.c643
-rw-r--r--sound/soc/codecs/wm8903.h10
-rw-r--r--sound/soc/codecs/wm8904.c43
-rw-r--r--sound/soc/codecs/wm8955.c27
-rw-r--r--sound/soc/codecs/wm8961.c2
-rw-r--r--sound/soc/codecs/wm8962.c36
-rw-r--r--sound/soc/codecs/wm8978.c2
-rw-r--r--sound/soc/codecs/wm8991.c1427
-rw-r--r--sound/soc/codecs/wm8991.h833
-rw-r--r--sound/soc/codecs/wm8993.c2
-rw-r--r--sound/soc/codecs/wm8994-tables.c10
-rw-r--r--sound/soc/codecs/wm8994.c366
-rw-r--r--sound/soc/codecs/wm8995.c103
-rw-r--r--sound/soc/codecs/wm9081.c34
-rw-r--r--sound/soc/codecs/wm9090.c45
-rw-r--r--sound/soc/codecs/wm_hubs.c6
-rw-r--r--sound/soc/davinci/davinci-i2s.c28
-rw-r--r--sound/soc/davinci/davinci-mcasp.c29
-rw-r--r--sound/soc/davinci/davinci-vcif.c2
-rw-r--r--sound/soc/ep93xx/Kconfig9
-rw-r--r--sound/soc/ep93xx/Makefile2
-rw-r--r--sound/soc/ep93xx/edb93xx.c142
-rw-r--r--sound/soc/ep93xx/ep93xx-ac97.c1
-rw-r--r--sound/soc/fsl/fsl_dma.c9
-rw-r--r--sound/soc/fsl/fsl_ssi.c9
-rw-r--r--sound/soc/fsl/mpc5200_dma.c24
-rw-r--r--sound/soc/fsl/mpc5200_psc_ac97.c9
-rw-r--r--sound/soc/fsl/mpc5200_psc_i2s.c9
-rw-r--r--sound/soc/fsl/mpc8610_hpcd.c6
-rw-r--r--sound/soc/fsl/p1022_ds.c6
-rw-r--r--sound/soc/imx/Kconfig3
-rw-r--r--sound/soc/imx/eukrea-tlv320.c5
-rw-r--r--sound/soc/imx/imx-ssi.c5
-rw-r--r--sound/soc/mid-x86/Kconfig14
-rw-r--r--sound/soc/mid-x86/Makefile5
-rw-r--r--sound/soc/mid-x86/mfld_machine.c452
-rw-r--r--sound/soc/mid-x86/sst_platform.c474
-rw-r--r--sound/soc/mid-x86/sst_platform.h63
-rw-r--r--sound/soc/omap/Kconfig1
-rw-r--r--sound/soc/omap/omap-mcbsp.c126
-rw-r--r--sound/soc/omap/omap-mcbsp.h4
-rw-r--r--sound/soc/omap/rx51.c93
-rw-r--r--sound/soc/pxa/e740_wm9705.c4
-rw-r--r--sound/soc/pxa/e750_wm9705.c4
-rw-r--r--sound/soc/pxa/e800_wm9712.c4
-rw-r--r--sound/soc/pxa/em-x270.c4
-rw-r--r--sound/soc/pxa/mioa701_wm9713.c4
-rw-r--r--sound/soc/pxa/palm27x.c4
-rw-r--r--sound/soc/pxa/raumfeld.c20
-rw-r--r--sound/soc/pxa/tosa.c8
-rw-r--r--sound/soc/pxa/zylonite.c13
-rw-r--r--sound/soc/samsung/Kconfig2
-rw-r--r--sound/soc/samsung/ac97.c8
-rw-r--r--sound/soc/samsung/ac97.h21
-rw-r--r--sound/soc/samsung/dma.c13
-rw-r--r--sound/soc/samsung/dma.h8
-rw-r--r--sound/soc/samsung/goni_wm8994.c10
-rw-r--r--sound/soc/samsung/h1940_uda1380.c9
-rw-r--r--sound/soc/samsung/i2s.c3
-rw-r--r--sound/soc/samsung/jive_wm8750.c11
-rw-r--r--sound/soc/samsung/ln2440sbc_alc650.c7
-rw-r--r--sound/soc/samsung/neo1973_gta02_wm8753.c14
-rw-r--r--sound/soc/samsung/neo1973_wm8753.c1
-rw-r--r--sound/soc/samsung/pcm.c118
-rw-r--r--sound/soc/samsung/pcm.h107
-rw-r--r--sound/soc/samsung/rx1950_uda1380.c11
-rw-r--r--sound/soc/samsung/s3c-i2s-v2.c3
-rw-r--r--sound/soc/samsung/s3c2412-i2s.c12
-rw-r--r--sound/soc/samsung/s3c24xx-i2s.c14
-rw-r--r--sound/soc/samsung/s3c24xx_simtec.c7
-rw-r--r--sound/soc/samsung/s3c24xx_simtec_hermes.c10
-rw-r--r--sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c12
-rw-r--r--sound/soc/samsung/s3c24xx_uda134x.c9
-rw-r--r--sound/soc/samsung/smartq_wm8987.c6
-rw-r--r--sound/soc/samsung/smdk2443_wm9710.c7
-rw-r--r--sound/soc/samsung/smdk_spdif.c5
-rw-r--r--sound/soc/samsung/smdk_wm8580.c7
-rw-r--r--sound/soc/samsung/smdk_wm9713.c5
-rw-r--r--sound/soc/samsung/spdif.c3
-rw-r--r--sound/soc/sh/fsi-ak4642.c24
-rw-r--r--sound/soc/sh/fsi-da7210.c13
-rw-r--r--sound/soc/sh/fsi-hdmi.c77
-rw-r--r--sound/soc/sh/fsi.c203
-rw-r--r--sound/soc/soc-cache.c386
-rw-r--r--sound/soc/soc-core.c378
-rw-r--r--sound/soc/soc-dapm.c223
-rw-r--r--sound/soc/soc-jack.c58
-rw-r--r--sound/soc/soc-utils.c23
-rw-r--r--sound/soc/tegra/Kconfig26
-rw-r--r--sound/soc/tegra/Makefile15
-rw-r--r--sound/soc/tegra/harmony.c393
-rw-r--r--sound/soc/tegra/tegra_asoc_utils.c155
-rw-r--r--sound/soc/tegra/tegra_asoc_utils.h45
-rw-r--r--sound/soc/tegra/tegra_das.c265
-rw-r--r--sound/soc/tegra/tegra_das.h135
-rw-r--r--sound/soc/tegra/tegra_i2s.c503
-rw-r--r--sound/soc/tegra/tegra_i2s.h165
-rw-r--r--sound/soc/tegra/tegra_pcm.c404
-rw-r--r--sound/soc/tegra/tegra_pcm.h55
-rw-r--r--sound/sparc/amd7930.c8
-rw-r--r--sound/sparc/cs4231.c16
-rw-r--r--sound/sparc/dbri.c8
-rw-r--r--sound/usb/6fire/Makefile3
-rw-r--r--sound/usb/6fire/chip.c232
-rw-r--r--sound/usb/6fire/chip.h32
-rw-r--r--sound/usb/6fire/comm.c176
-rw-r--r--sound/usb/6fire/comm.h44
-rw-r--r--sound/usb/6fire/common.h30
-rw-r--r--sound/usb/6fire/control.c275
-rw-r--r--sound/usb/6fire/control.h37
-rw-r--r--sound/usb/6fire/firmware.c426
-rw-r--r--sound/usb/6fire/firmware.h27
-rw-r--r--sound/usb/6fire/midi.c203
-rw-r--r--sound/usb/6fire/midi.h46
-rw-r--r--sound/usb/6fire/pcm.c688
-rw-r--r--sound/usb/6fire/pcm.h76
-rw-r--r--sound/usb/Kconfig17
-rw-r--r--sound/usb/Makefile2
-rw-r--r--sound/usb/caiaq/audio.c1
-rw-r--r--sound/usb/caiaq/device.c6
-rw-r--r--sound/usb/caiaq/device.h1
-rw-r--r--sound/usb/card.c4
-rw-r--r--sound/usb/mixer_quirks.c174
-rw-r--r--sound/usb/pcm.c7
-rw-r--r--sound/usb/quirks-table.h14
-rw-r--r--sound/usb/quirks.c56
-rw-r--r--sound/usb/usbaudio.h1
-rw-r--r--tools/perf/.gitignore1
-rw-r--r--tools/perf/Documentation/Makefile19
-rw-r--r--tools/perf/Documentation/perf-list.txt23
-rw-r--r--tools/perf/Documentation/perf-lock.txt12
-rw-r--r--tools/perf/Documentation/perf-probe.txt26
-rw-r--r--tools/perf/Documentation/perf-record.txt11
-rw-r--r--tools/perf/Documentation/perf-stat.txt11
-rw-r--r--tools/perf/Makefile649
-rw-r--r--tools/perf/bench/sched-pipe.c2
-rw-r--r--tools/perf/builtin-annotate.c291
-rw-r--r--tools/perf/builtin-diff.c16
-rw-r--r--tools/perf/builtin-inject.c82
-rw-r--r--tools/perf/builtin-kmem.c10
-rw-r--r--tools/perf/builtin-list.c43
-rw-r--r--tools/perf/builtin-lock.c8
-rw-r--r--tools/perf/builtin-probe.c70
-rw-r--r--tools/perf/builtin-record.c439
-rw-r--r--tools/perf/builtin-report.c114
-rw-r--r--tools/perf/builtin-sched.c27
-rw-r--r--tools/perf/builtin-script.c17
-rw-r--r--tools/perf/builtin-stat.c118
-rw-r--r--tools/perf/builtin-test.c184
-rw-r--r--tools/perf/builtin-timechart.c19
-rw-r--r--tools/perf/builtin-top.c1006
-rw-r--r--tools/perf/perf.h26
-rwxr-xr-xtools/perf/python/twatch.py41
-rw-r--r--tools/perf/util/annotate.c605
-rw-r--r--tools/perf/util/annotate.h103
-rw-r--r--tools/perf/util/build-id.c21
-rw-r--r--tools/perf/util/cache.h7
-rw-r--r--tools/perf/util/callchain.c227
-rw-r--r--tools/perf/util/callchain.h76
-rw-r--r--tools/perf/util/cgroup.c178
-rw-r--r--tools/perf/util/cgroup.h17
-rw-r--r--tools/perf/util/cpumap.c5
-rw-r--r--tools/perf/util/cpumap.h2
-rw-r--r--tools/perf/util/debug.c2
-rw-r--r--tools/perf/util/debug.h2
-rw-r--r--tools/perf/util/event.c319
-rw-r--r--tools/perf/util/event.h78
-rw-r--r--tools/perf/util/evlist.c350
-rw-r--r--tools/perf/util/evlist.h64
-rw-r--r--tools/perf/util/evsel.c221
-rw-r--r--tools/perf/util/evsel.h38
-rw-r--r--tools/perf/util/exec_cmd.c19
-rw-r--r--tools/perf/util/header.c100
-rw-r--r--tools/perf/util/header.h55
-rw-r--r--tools/perf/util/hist.c244
-rw-r--r--tools/perf/util/hist.h48
-rw-r--r--tools/perf/util/include/linux/list.h1
-rw-r--r--tools/perf/util/parse-events.c175
-rw-r--r--tools/perf/util/parse-events.h12
-rw-r--r--tools/perf/util/probe-event.c159
-rw-r--r--tools/perf/util/probe-event.h4
-rw-r--r--tools/perf/util/probe-finder.c533
-rw-r--r--tools/perf/util/python.c891
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c3
-rw-r--r--tools/perf/util/session.c219
-rw-r--r--tools/perf/util/session.h27
-rw-r--r--tools/perf/util/setup.py19
-rw-r--r--tools/perf/util/strfilter.c200
-rw-r--r--tools/perf/util/strfilter.h48
-rw-r--r--tools/perf/util/svghelper.c6
-rw-r--r--tools/perf/util/symbol.c4
-rw-r--r--tools/perf/util/thread.c55
-rw-r--r--tools/perf/util/thread.h14
-rw-r--r--tools/perf/util/thread_map.c64
-rw-r--r--tools/perf/util/thread_map.h15
-rw-r--r--tools/perf/util/top.c218
-rw-r--r--tools/perf/util/top.h65
-rw-r--r--tools/perf/util/trace-event-parse.c2
-rw-r--r--tools/perf/util/ui/browser.c7
-rw-r--r--tools/perf/util/ui/browsers/annotate.c178
-rw-r--r--tools/perf/util/ui/browsers/hists.c19
-rw-r--r--tools/perf/util/ui/browsers/map.c2
-rw-r--r--tools/perf/util/ui/browsers/top.c200
-rw-r--r--tools/perf/util/ui/helpline.c5
-rw-r--r--tools/perf/util/ui/libslang.h6
-rw-r--r--tools/perf/util/ui/setup.c8
-rw-r--r--tools/perf/util/ui/ui.h8
-rw-r--r--tools/perf/util/util.h26
-rwxr-xr-xtools/testing/ktest/ktest.pl2
-rw-r--r--virt/kvm/kvm_main.c139
4443 files changed, 284510 insertions, 94075 deletions
diff --git a/.gitignore b/.gitignore
index 8faa6c02b39e..5d56a3fd0de6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -28,6 +28,7 @@ modules.builtin
*.gz
*.bz2
*.lzma
+*.xz
*.lzo
*.patch
*.gcno
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index 8dfc6708a257..f607367e642f 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -328,8 +328,6 @@ sysrq.txt
- info on the magic SysRq key.
telephony/
- directory with info on telephony (e.g. voice over IP) support.
-time_interpolators.txt
- - info on time interpolators.
uml/
- directory with information about User Mode Linux.
unicode.txt
@@ -346,8 +344,6 @@ vm/
- directory with info on the Linux vm code.
volatile-considered-harmful.txt
- Why the "volatile" type class should not be used
-voyager.txt
- - guide to running Linux on the Voyager architecture.
w1/
- directory with documents regarding the 1-wire (w1) subsystem.
watchdog/
diff --git a/Documentation/ABI/testing/pstore b/Documentation/ABI/testing/pstore
new file mode 100644
index 000000000000..f1fb2a004264
--- /dev/null
+++ b/Documentation/ABI/testing/pstore
@@ -0,0 +1,35 @@
+Where: /dev/pstore/...
+Date: January 2011
+Kernel Version: 2.6.38
+Contact: tony.luck@intel.com
+Description: Generic interface to platform dependent persistent storage.
+
+ Platforms that provide a mechanism to preserve some data
+ across system reboots can register with this driver to
+ provide a generic interface to show records captured in
+ the dying moments. In the case of a panic the last part
+ of the console log is captured, but other interesting
+ data can also be saved.
+
+ # mount -t pstore - /dev/pstore
+
+ $ ls -l /dev/pstore
+ total 0
+ -r--r--r-- 1 root root 7896 Nov 30 15:38 dmesg-erst-1
+
+ Different users of this interface will result in different
+ filename prefixes. Currently two are defined:
+
+ "dmesg" - saved console log
+ "mce" - architecture dependent data from fatal h/w error
+
+ Once the information in a file has been read, removing
+ the file will signal to the underlying persistent storage
+ device that it can reclaim the space for later re-use.
+
+ $ rm /dev/pstore/dmesg-erst-1
+
+ The expectation is that all files in /dev/pstore
+ will be saved elsewhere and erased from persistent store
+ soon after boot to free up space ready for the next
+ catastrophe.
diff --git a/Documentation/ABI/testing/sysfs-devices-mmc b/Documentation/ABI/testing/sysfs-devices-mmc
new file mode 100644
index 000000000000..5a50ab655843
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-mmc
@@ -0,0 +1,21 @@
+What: /sys/devices/.../mmc_host/mmcX/mmcX:XXXX/enhanced_area_offset
+Date: January 2011
+Contact: Chuanxiao Dong <chuanxiao.dong@intel.com>
+Description:
+ Enhanced area is a new feature defined in eMMC4.4 standard.
+ eMMC4.4 or later card can support such feature. This kind of
+ area can help to improve the card performance. If the feature
+ is enabled, this attribute will indicate the start address of
+ enhanced data area. If not, this attribute will be -EINVAL.
+ Unit Byte. Format decimal.
+
+What: /sys/devices/.../mmc_host/mmcX/mmcX:XXXX/enhanced_area_size
+Date: January 2011
+Contact: Chuanxiao Dong <chuanxiao.dong@intel.com>
+Description:
+ Enhanced area is a new feature defined in eMMC4.4 standard.
+ eMMC4.4 or later card can support such feature. This kind of
+ area can help to improve the card performance. If the feature
+ is enabled, this attribute will indicate the size of enhanced
+ data area. If not, this attribute will be -EINVAL.
+ Unit KByte. Format decimal.
diff --git a/Documentation/ABI/testing/sysfs-devices-power b/Documentation/ABI/testing/sysfs-devices-power
index 7628cd1bc36a..8ffbc25376a0 100644
--- a/Documentation/ABI/testing/sysfs-devices-power
+++ b/Documentation/ABI/testing/sysfs-devices-power
@@ -29,9 +29,8 @@ Description:
"disabled" to it.
For the devices that are not capable of generating system wakeup
- events this file contains "\n". In that cases the user space
- cannot modify the contents of this file and the device cannot be
- enabled to wake up the system.
+ events this file is not present. In that case the device cannot
+ be enabled to wake up the system from sleep states.
What: /sys/devices/.../power/control
Date: January 2009
@@ -85,7 +84,7 @@ Description:
The /sys/devices/.../wakeup_count attribute contains the number
of signaled wakeup events associated with the device. This
attribute is read-only. If the device is not enabled to wake up
- the system from sleep states, this attribute is empty.
+ the system from sleep states, this attribute is not present.
What: /sys/devices/.../power/wakeup_active_count
Date: September 2010
@@ -95,7 +94,7 @@ Description:
number of times the processing of wakeup events associated with
the device was completed (at the kernel level). This attribute
is read-only. If the device is not enabled to wake up the
- system from sleep states, this attribute is empty.
+ system from sleep states, this attribute is not present.
What: /sys/devices/.../power/wakeup_hit_count
Date: September 2010
@@ -105,7 +104,8 @@ Description:
number of times the processing of a wakeup event associated with
the device might prevent the system from entering a sleep state.
This attribute is read-only. If the device is not enabled to
- wake up the system from sleep states, this attribute is empty.
+ wake up the system from sleep states, this attribute is not
+ present.
What: /sys/devices/.../power/wakeup_active
Date: September 2010
@@ -115,7 +115,7 @@ Description:
or 0, depending on whether or not a wakeup event associated with
the device is being processed (1). This attribute is read-only.
If the device is not enabled to wake up the system from sleep
- states, this attribute is empty.
+ states, this attribute is not present.
What: /sys/devices/.../power/wakeup_total_time_ms
Date: September 2010
@@ -125,7 +125,7 @@ Description:
the total time of processing wakeup events associated with the
device, in milliseconds. This attribute is read-only. If the
device is not enabled to wake up the system from sleep states,
- this attribute is empty.
+ this attribute is not present.
What: /sys/devices/.../power/wakeup_max_time_ms
Date: September 2010
@@ -135,7 +135,7 @@ Description:
the maximum time of processing a single wakeup event associated
with the device, in milliseconds. This attribute is read-only.
If the device is not enabled to wake up the system from sleep
- states, this attribute is empty.
+ states, this attribute is not present.
What: /sys/devices/.../power/wakeup_last_time_ms
Date: September 2010
@@ -146,7 +146,7 @@ Description:
signaling the last wakeup event associated with the device, in
milliseconds. This attribute is read-only. If the device is
not enabled to wake up the system from sleep states, this
- attribute is empty.
+ attribute is not present.
What: /sys/devices/.../power/autosuspend_delay_ms
Date: September 2010
diff --git a/Documentation/ABI/testing/sysfs-driver-hid b/Documentation/ABI/testing/sysfs-driver-hid
new file mode 100644
index 000000000000..b6490e14fe83
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-hid
@@ -0,0 +1,10 @@
+What: For USB devices : /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/report_descriptor
+ For BT devices : /sys/class/bluetooth/hci<addr>/<hid-bus>:<vendor-id>:<product-id>.<num>/report_descriptor
+ Symlink : /sys/class/hidraw/hidraw<num>/device/report_descriptor
+Date: Jan 2011
+KernelVersion: 2.0.39
+Contact: Alan Ott <alan@signal11.us>
+Description: When read, this file returns the device's raw binary HID
+ report descriptor.
+ This file cannot be written.
+Users: HIDAPI library (http://www.signal11.us/oss/hidapi)
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-roccat-arvo b/Documentation/ABI/testing/sysfs-driver-hid-roccat-arvo
new file mode 100644
index 000000000000..55e281b0071a
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-hid-roccat-arvo
@@ -0,0 +1,53 @@
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/arvo/roccatarvo<minor>/actual_profile
+Date: Januar 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The integer value of this attribute ranges from 1-5.
+ When read, this attribute returns the number of the actual
+ profile which is also the profile that's active on device startup.
+ When written this attribute activates the selected profile
+ immediately.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/arvo/roccatarvo<minor>/button
+Date: Januar 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The keyboard can store short macros with consist of 1 button with
+ several modifier keys internally.
+ When written, this file lets one set the sequence for a specific
+ button for a specific profile. Button and profile numbers are
+ included in written data. The data has to be 24 bytes long.
+ This file is writeonly.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/arvo/roccatarvo<minor>/info
+Date: Januar 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When read, this file returns some info about the device like the
+ installed firmware version.
+ The size of the data is 8 bytes in size.
+ This file is readonly.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/arvo/roccatarvo<minor>/key_mask
+Date: Januar 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The keyboard lets the user deactivate 5 certain keys like the
+ windows and application keys, to protect the user from the outcome
+ of accidentally pressing them.
+ The integer value of this attribute has bits 0-4 set depending
+ on the state of the corresponding key.
+ When read, this file returns the current state of the buttons.
+ When written, the given buttons are activated/deactivated
+ immediately.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/arvo/roccatarvo<minor>/mode_key
+Date: Januar 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The keyboard has a condensed layout without num-lock key.
+ Instead it uses a mode-key which activates a gaming mode where
+ the assignment of the number block changes.
+ The integer value of this attribute ranges from 0 (OFF) to 1 (ON).
+ When read, this file returns the actual state of the key.
+ When written, the key is activated/deactivated immediately.
+Users: http://roccat.sourceforge.net
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-roccat-kone b/Documentation/ABI/testing/sysfs-driver-hid-roccat-kone
index 698b8081c473..b4c4f158ab9c 100644
--- a/Documentation/ABI/testing/sysfs-driver-hid-roccat-kone
+++ b/Documentation/ABI/testing/sysfs-driver-hid-roccat-kone
@@ -16,12 +16,14 @@ Description: It is possible to switch the dpi setting of the mouse with the
6 3200
This file is readonly.
+Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kone/roccatkone<minor>/actual_profile
Date: March 2010
Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
Description: When read, this file returns the number of the actual profile.
This file is readonly.
+Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kone/roccatkone<minor>/firmware_version
Date: March 2010
@@ -32,6 +34,7 @@ Description: When read, this file returns the raw integer version number of the
number the decimal point has to be shifted 2 positions to the
left. E.g. a returned value of 138 means 1.38
This file is readonly.
+Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kone/roccatkone<minor>/profile[1-5]
Date: March 2010
@@ -47,6 +50,7 @@ Description: The mouse can store 5 profiles which can be switched by the
The mouse will reject invalid data, whereas the profile number
stored in the profile doesn't need to fit the number of the
store.
+Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kone/roccatkone<minor>/settings
Date: March 2010
@@ -57,6 +61,7 @@ Description: When read, this file returns the settings stored in the mouse.
When written, this file lets write settings back to the mouse.
The data has to be 36 bytes long. The mouse will reject invalid
data.
+Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kone/roccatkone<minor>/startup_profile
Date: March 2010
@@ -66,6 +71,7 @@ Description: The integer value of this attribute ranges from 1 to 5.
that's active when the mouse is powered on.
When written, this file sets the number of the startup profile
and the mouse activates this profile immediately.
+Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kone/roccatkone<minor>/tcu
Date: March 2010
@@ -77,6 +83,7 @@ Description: The mouse has a "Tracking Control Unit" which lets the user
Writing 0 in this file will switch the TCU off.
Writing 1 in this file will start the calibration which takes
around 6 seconds to complete and activates the TCU.
+Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kone/roccatkone<minor>/weight
Date: March 2010
@@ -96,3 +103,4 @@ Description: The mouse can be equipped with one of four supplied weights
4 20g
This file is readonly.
+Users: http://roccat.sourceforge.net
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-roccat-koneplus b/Documentation/ABI/testing/sysfs-driver-hid-roccat-koneplus
index 0f9f30eb1742..00efced73969 100644
--- a/Documentation/ABI/testing/sysfs-driver-hid-roccat-koneplus
+++ b/Documentation/ABI/testing/sysfs-driver-hid-roccat-koneplus
@@ -4,6 +4,7 @@ Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
Description: When read, this file returns the number of the actual profile in
range 0-4.
This file is readonly.
+Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/firmware_version
Date: October 2010
@@ -14,6 +15,7 @@ Description: When read, this file returns the raw integer version number of the
number the decimal point has to be shifted 2 positions to the
left. E.g. a returned value of 121 means 1.21
This file is readonly.
+Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/macro
Date: October 2010
@@ -24,6 +26,7 @@ Description: The mouse can store a macro with max 500 key/button strokes
button for a specific profile. Button and profile numbers are
included in written data. The data has to be 2082 bytes long.
This file is writeonly.
+Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/profile_buttons
Date: August 2010
@@ -37,6 +40,7 @@ Description: The mouse can store 5 profiles which can be switched by the
Which profile to write is determined by the profile number
contained in the data.
This file is writeonly.
+Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/profile[1-5]_buttons
Date: August 2010
@@ -47,6 +51,7 @@ Description: The mouse can store 5 profiles which can be switched by the
When read, these files return the respective profile buttons.
The returned data is 77 bytes in size.
This file is readonly.
+Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/profile_settings
Date: October 2010
@@ -61,6 +66,7 @@ Description: The mouse can store 5 profiles which can be switched by the
Which profile to write is determined by the profile number
contained in the data.
This file is writeonly.
+Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/profile[1-5]_settings
Date: August 2010
@@ -72,6 +78,7 @@ Description: The mouse can store 5 profiles which can be switched by the
When read, these files return the respective profile settings.
The returned data is 43 bytes in size.
This file is readonly.
+Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/sensor
Date: October 2010
@@ -80,6 +87,7 @@ Description: The mouse has a tracking- and a distance-control-unit. These
can be activated/deactivated and the lift-off distance can be
set. The data has to be 6 bytes long.
This file is writeonly.
+Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/startup_profile
Date: October 2010
@@ -89,6 +97,7 @@ Description: The integer value of this attribute ranges from 0-4.
that's active when the mouse is powered on.
When written, this file sets the number of the startup profile
and the mouse activates this profile immediately.
+Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/tcu
Date: October 2010
@@ -97,6 +106,7 @@ Description: When written a calibration process for the tracking control unit
can be initiated/cancelled.
The data has to be 3 bytes long.
This file is writeonly.
+Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/tcu_image
Date: October 2010
@@ -106,3 +116,4 @@ Description: When read the mouse returns a 30x30 pixel image of the
calibration process initiated with tcu.
The returned data is 1028 bytes in size.
This file is readonly.
+Users: http://roccat.sourceforge.net
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-roccat-kovaplus b/Documentation/ABI/testing/sysfs-driver-hid-roccat-kovaplus
new file mode 100644
index 000000000000..fdfa16f8189b
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-hid-roccat-kovaplus
@@ -0,0 +1,100 @@
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kovaplus/roccatkovaplus<minor>/actual_cpi
+Date: January 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The integer value of this attribute ranges from 1-4.
+ When read, this attribute returns the number of the active
+ cpi level.
+ This file is readonly.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kovaplus/roccatkovaplus<minor>/actual_profile
+Date: January 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The integer value of this attribute ranges from 0-4.
+ When read, this attribute returns the number of the active
+ profile.
+ When written, the mouse activates this profile immediately.
+ The profile that's active when powered down is the same that's
+ active when the mouse is powered on.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kovaplus/roccatkovaplus<minor>/actual_sensitivity_x
+Date: January 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The integer value of this attribute ranges from 1-10.
+ When read, this attribute returns the number of the actual
+ sensitivity in x direction.
+ This file is readonly.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kovaplus/roccatkovaplus<minor>/actual_sensitivity_y
+Date: January 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The integer value of this attribute ranges from 1-10.
+ When read, this attribute returns the number of the actual
+ sensitivity in y direction.
+ This file is readonly.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kovaplus/roccatkovaplus<minor>/firmware_version
+Date: January 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When read, this file returns the raw integer version number of the
+ firmware reported by the mouse. Using the integer value eases
+ further usage in other programs. To receive the real version
+ number the decimal point has to be shifted 2 positions to the
+ left. E.g. a returned value of 121 means 1.21
+ This file is readonly.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kovaplus/roccatkovaplus<minor>/profile_buttons
+Date: January 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The mouse can store 5 profiles which can be switched by the
+ press of a button. A profile is split in settings and buttons.
+ profile_buttons holds informations about button layout.
+ When written, this file lets one write the respective profile
+ buttons back to the mouse. The data has to be 23 bytes long.
+ The mouse will reject invalid data.
+ Which profile to write is determined by the profile number
+ contained in the data.
+ This file is writeonly.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kovaplus/roccatkovaplus<minor>/profile[1-5]_buttons
+Date: January 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The mouse can store 5 profiles which can be switched by the
+ press of a button. A profile is split in settings and buttons.
+ profile_buttons holds informations about button layout.
+ When read, these files return the respective profile buttons.
+ The returned data is 23 bytes in size.
+ This file is readonly.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kovaplus/roccatkovaplus<minor>/profile_settings
+Date: January 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The mouse can store 5 profiles which can be switched by the
+ press of a button. A profile is split in settings and buttons.
+ profile_settings holds informations like resolution, sensitivity
+ and light effects.
+ When written, this file lets one write the respective profile
+ settings back to the mouse. The data has to be 16 bytes long.
+ The mouse will reject invalid data.
+ Which profile to write is determined by the profile number
+ contained in the data.
+ This file is writeonly.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kovaplus/roccatkovaplus<minor>/profile[1-5]_settings
+Date: January 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The mouse can store 5 profiles which can be switched by the
+ press of a button. A profile is split in settings and buttons.
+ profile_settings holds informations like resolution, sensitivity
+ and light effects.
+ When read, these files return the respective profile settings.
+ The returned data is 16 bytes in size.
+ This file is readonly.
+Users: http://roccat.sourceforge.net
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-roccat-pyra b/Documentation/ABI/testing/sysfs-driver-hid-roccat-pyra
index 1c37b823f142..5fab71af3c46 100644
--- a/Documentation/ABI/testing/sysfs-driver-hid-roccat-pyra
+++ b/Documentation/ABI/testing/sysfs-driver-hid-roccat-pyra
@@ -13,6 +13,7 @@ Description: It is possible to switch the cpi setting of the mouse with the
4 1600
This file is readonly.
+Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/pyra/roccatpyra<minor>/actual_profile
Date: August 2010
@@ -20,6 +21,7 @@ Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
Description: When read, this file returns the number of the actual profile in
range 0-4.
This file is readonly.
+Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/pyra/roccatpyra<minor>/firmware_version
Date: August 2010
@@ -30,6 +32,7 @@ Description: When read, this file returns the raw integer version number of the
number the decimal point has to be shifted 2 positions to the
left. E.g. a returned value of 138 means 1.38
This file is readonly.
+Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/pyra/roccatpyra<minor>/profile_settings
Date: August 2010
@@ -44,6 +47,7 @@ Description: The mouse can store 5 profiles which can be switched by the
Which profile to write is determined by the profile number
contained in the data.
This file is writeonly.
+Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/pyra/roccatpyra<minor>/profile[1-5]_settings
Date: August 2010
@@ -55,6 +59,7 @@ Description: The mouse can store 5 profiles which can be switched by the
When read, these files return the respective profile settings.
The returned data is 13 bytes in size.
This file is readonly.
+Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/pyra/roccatpyra<minor>/profile_buttons
Date: August 2010
@@ -68,6 +73,7 @@ Description: The mouse can store 5 profiles which can be switched by the
Which profile to write is determined by the profile number
contained in the data.
This file is writeonly.
+Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/pyra/roccatpyra<minor>/profile[1-5]_buttons
Date: August 2010
@@ -78,6 +84,7 @@ Description: The mouse can store 5 profiles which can be switched by the
When read, these files return the respective profile buttons.
The returned data is 19 bytes in size.
This file is readonly.
+Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/pyra/roccatpyra<minor>/startup_profile
Date: August 2010
@@ -86,6 +93,7 @@ Description: The integer value of this attribute ranges from 0-4.
When read, this attribute returns the number of the profile
that's active when the mouse is powered on.
This file is readonly.
+Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/pyra/roccatpyra<minor>/settings
Date: August 2010
@@ -96,3 +104,4 @@ Description: When read, this file returns the settings stored in the mouse.
When written, this file lets write settings back to the mouse.
The data has to be 3 bytes long. The mouse will reject invalid
data.
+Users: http://roccat.sourceforge.net
diff --git a/Documentation/ABI/testing/sysfs-fs-ext4 b/Documentation/ABI/testing/sysfs-fs-ext4
index 5fb709997d96..f22ac0872ae8 100644
--- a/Documentation/ABI/testing/sysfs-fs-ext4
+++ b/Documentation/ABI/testing/sysfs-fs-ext4
@@ -48,7 +48,7 @@ Description:
will have its blocks allocated out of its own unique
preallocation pool.
-What: /sys/fs/ext4/<disk>/inode_readahead
+What: /sys/fs/ext4/<disk>/inode_readahead_blks
Date: March 2008
Contact: "Theodore Ts'o" <tytso@mit.edu>
Description:
@@ -85,7 +85,14 @@ Date: June 2008
Contact: "Theodore Ts'o" <tytso@mit.edu>
Description:
Tuning parameter which (if non-zero) controls the goal
- inode used by the inode allocator in p0reference to
- all other allocation hueristics. This is intended for
+ inode used by the inode allocator in preference to
+ all other allocation heuristics. This is intended for
debugging use only, and should be 0 on production
systems.
+
+What: /sys/fs/ext4/<disk>/max_writeback_mb_bump
+Date: September 2009
+Contact: "Theodore Ts'o" <tytso@mit.edu>
+Description:
+ The maximum number of megabytes the writeback code will
+ try to write out before move on to another inode.
diff --git a/Documentation/ABI/testing/sysfs-fs-pstore b/Documentation/ABI/testing/sysfs-fs-pstore
new file mode 100644
index 000000000000..8e659d854805
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-fs-pstore
@@ -0,0 +1,7 @@
+What: /sys/fs/pstore/kmsg_bytes
+Date: January 2011
+Kernel Version: 2.6.38
+Contact: "Tony Luck" <tony.luck@intel.com>
+Description:
+ Controls amount of console log that will be saved
+ to persistent store on oops/panic.
diff --git a/Documentation/DocBook/filesystems.tmpl b/Documentation/DocBook/filesystems.tmpl
index 5e87ad58c0b5..f51f28531b8d 100644
--- a/Documentation/DocBook/filesystems.tmpl
+++ b/Documentation/DocBook/filesystems.tmpl
@@ -82,6 +82,11 @@
</sect1>
</chapter>
+ <chapter id="fs_events">
+ <title>Events based on file descriptors</title>
+!Efs/eventfd.c
+ </chapter>
+
<chapter id="sysfs">
<title>The Filesystem for Exporting Kernel Objects</title>
!Efs/sysfs/file.c
diff --git a/Documentation/DocBook/media-entities.tmpl b/Documentation/DocBook/media-entities.tmpl
index be34dcbe0d90..d2f99e5a3a2f 100644
--- a/Documentation/DocBook/media-entities.tmpl
+++ b/Documentation/DocBook/media-entities.tmpl
@@ -129,6 +129,7 @@
<!ENTITY v4l2-audioout "struct&nbsp;<link linkend='v4l2-audioout'>v4l2_audioout</link>">
<!ENTITY v4l2-bt-timings "struct&nbsp;<link linkend='v4l2-bt-timings'>v4l2_bt_timings</link>">
<!ENTITY v4l2-buffer "struct&nbsp;<link linkend='v4l2-buffer'>v4l2_buffer</link>">
+<!ENTITY v4l2-plane "struct&nbsp;<link linkend='v4l2-plane'>v4l2_plane</link>">
<!ENTITY v4l2-capability "struct&nbsp;<link linkend='v4l2-capability'>v4l2_capability</link>">
<!ENTITY v4l2-captureparm "struct&nbsp;<link linkend='v4l2-captureparm'>v4l2_captureparm</link>">
<!ENTITY v4l2-clip "struct&nbsp;<link linkend='v4l2-clip'>v4l2_clip</link>">
@@ -167,6 +168,8 @@
<!ENTITY v4l2-output "struct&nbsp;<link linkend='v4l2-output'>v4l2_output</link>">
<!ENTITY v4l2-outputparm "struct&nbsp;<link linkend='v4l2-outputparm'>v4l2_outputparm</link>">
<!ENTITY v4l2-pix-format "struct&nbsp;<link linkend='v4l2-pix-format'>v4l2_pix_format</link>">
+<!ENTITY v4l2-pix-format-mplane "struct&nbsp;<link linkend='v4l2-pix-format-mplane'>v4l2_pix_format_mplane</link>">
+<!ENTITY v4l2-plane-pix-format "struct&nbsp;<link linkend='v4l2-plane-pix-format'>v4l2_plane_pix_format</link>">
<!ENTITY v4l2-queryctrl "struct&nbsp;<link linkend='v4l2-queryctrl'>v4l2_queryctrl</link>">
<!ENTITY v4l2-querymenu "struct&nbsp;<link linkend='v4l2-querymenu'>v4l2_querymenu</link>">
<!ENTITY v4l2-rect "struct&nbsp;<link linkend='v4l2-rect'>v4l2_rect</link>">
@@ -202,6 +205,7 @@
<!-- Subsections -->
<!ENTITY sub-biblio SYSTEM "v4l/biblio.xml">
<!ENTITY sub-common SYSTEM "v4l/common.xml">
+<!ENTITY sub-planar-apis SYSTEM "v4l/planar-apis.xml">
<!ENTITY sub-compat SYSTEM "v4l/compat.xml">
<!ENTITY sub-controls SYSTEM "v4l/controls.xml">
<!ENTITY sub-dev-capture SYSTEM "v4l/dev-capture.xml">
@@ -233,6 +237,7 @@
<!ENTITY sub-io SYSTEM "v4l/io.xml">
<!ENTITY sub-grey SYSTEM "v4l/pixfmt-grey.xml">
<!ENTITY sub-nv12 SYSTEM "v4l/pixfmt-nv12.xml">
+<!ENTITY sub-nv12m SYSTEM "v4l/pixfmt-nv12m.xml">
<!ENTITY sub-nv16 SYSTEM "v4l/pixfmt-nv16.xml">
<!ENTITY sub-packed-rgb SYSTEM "v4l/pixfmt-packed-rgb.xml">
<!ENTITY sub-packed-yuv SYSTEM "v4l/pixfmt-packed-yuv.xml">
@@ -247,6 +252,7 @@
<!ENTITY sub-yuv410 SYSTEM "v4l/pixfmt-yuv410.xml">
<!ENTITY sub-yuv411p SYSTEM "v4l/pixfmt-yuv411p.xml">
<!ENTITY sub-yuv420 SYSTEM "v4l/pixfmt-yuv420.xml">
+<!ENTITY sub-yuv420m SYSTEM "v4l/pixfmt-yuv420m.xml">
<!ENTITY sub-yuv422p SYSTEM "v4l/pixfmt-yuv422p.xml">
<!ENTITY sub-yuyv SYSTEM "v4l/pixfmt-yuyv.xml">
<!ENTITY sub-yvyu SYSTEM "v4l/pixfmt-yvyu.xml">
@@ -333,6 +339,7 @@
<!ENTITY write SYSTEM "v4l/func-write.xml">
<!ENTITY grey SYSTEM "v4l/pixfmt-grey.xml">
<!ENTITY nv12 SYSTEM "v4l/pixfmt-nv12.xml">
+<!ENTITY nv12m SYSTEM "v4l/pixfmt-nv12m.xml">
<!ENTITY nv16 SYSTEM "v4l/pixfmt-nv16.xml">
<!ENTITY packed-rgb SYSTEM "v4l/pixfmt-packed-rgb.xml">
<!ENTITY packed-yuv SYSTEM "v4l/pixfmt-packed-yuv.xml">
@@ -347,6 +354,7 @@
<!ENTITY yuv410 SYSTEM "v4l/pixfmt-yuv410.xml">
<!ENTITY yuv411p SYSTEM "v4l/pixfmt-yuv411p.xml">
<!ENTITY yuv420 SYSTEM "v4l/pixfmt-yuv420.xml">
+<!ENTITY yuv420m SYSTEM "v4l/pixfmt-yuv420m.xml">
<!ENTITY yuv422p SYSTEM "v4l/pixfmt-yuv422p.xml">
<!ENTITY yuyv SYSTEM "v4l/pixfmt-yuyv.xml">
<!ENTITY yvyu SYSTEM "v4l/pixfmt-yvyu.xml">
diff --git a/Documentation/DocBook/v4l/common.xml b/Documentation/DocBook/v4l/common.xml
index cea23e1c4fc6..dbab79c215c1 100644
--- a/Documentation/DocBook/v4l/common.xml
+++ b/Documentation/DocBook/v4l/common.xml
@@ -846,6 +846,8 @@ conversion routine or library for integration into applications.</para>
</section>
</section>
+ &sub-planar-apis;
+
<section id="crop">
<title>Image Cropping, Insertion and Scaling</title>
diff --git a/Documentation/DocBook/v4l/compat.xml b/Documentation/DocBook/v4l/compat.xml
index c9ce61d981f5..223c24c536b7 100644
--- a/Documentation/DocBook/v4l/compat.xml
+++ b/Documentation/DocBook/v4l/compat.xml
@@ -2353,6 +2353,17 @@ that used it. It was originally scheduled for removal in 2.6.35.
</listitem>
</orderedlist>
</section>
+ <section>
+ <title>V4L2 in Linux 2.6.38</title>
+ <orderedlist>
+ <listitem>
+ <para>Multi-planar API added. Does not affect the compatibility of
+ current drivers and applications. See
+ <link linkend="planar-apis">multi-planar API</link>
+ for details.</para>
+ </listitem>
+ </orderedlist>
+ </section>
<section id="other">
<title>Relation of V4L2 to other Linux multimedia APIs</title>
diff --git a/Documentation/DocBook/v4l/dev-capture.xml b/Documentation/DocBook/v4l/dev-capture.xml
index 32807e43f170..2237c661f26a 100644
--- a/Documentation/DocBook/v4l/dev-capture.xml
+++ b/Documentation/DocBook/v4l/dev-capture.xml
@@ -18,7 +18,8 @@ files are used for video output devices.</para>
<title>Querying Capabilities</title>
<para>Devices supporting the video capture interface set the
-<constant>V4L2_CAP_VIDEO_CAPTURE</constant> flag in the
+<constant>V4L2_CAP_VIDEO_CAPTURE</constant> or
+<constant>V4L2_CAP_VIDEO_CAPTURE_MPLANE</constant> flag in the
<structfield>capabilities</structfield> field of &v4l2-capability;
returned by the &VIDIOC-QUERYCAP; ioctl. As secondary device functions
they may also support the <link linkend="overlay">video overlay</link>
@@ -64,9 +65,11 @@ linkend="crop" />.</para>
<para>To query the current image format applications set the
<structfield>type</structfield> field of a &v4l2-format; to
-<constant>V4L2_BUF_TYPE_VIDEO_CAPTURE</constant> and call the
+<constant>V4L2_BUF_TYPE_VIDEO_CAPTURE</constant> or
+<constant>V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE</constant> and call the
&VIDIOC-G-FMT; ioctl with a pointer to this structure. Drivers fill
-the &v4l2-pix-format; <structfield>pix</structfield> member of the
+the &v4l2-pix-format; <structfield>pix</structfield> or the
+&v4l2-pix-format-mplane; <structfield>pix_mp</structfield> member of the
<structfield>fmt</structfield> union.</para>
<para>To request different parameters applications set the
@@ -84,8 +87,8 @@ adjust the parameters and finally return the actual parameters as
without disabling I/O or possibly time consuming hardware
preparations.</para>
- <para>The contents of &v4l2-pix-format; are discussed in <xref
-linkend="pixfmt" />. See also the specification of the
+ <para>The contents of &v4l2-pix-format; and &v4l2-pix-format-mplane;
+are discussed in <xref linkend="pixfmt" />. See also the specification of the
<constant>VIDIOC_G_FMT</constant>, <constant>VIDIOC_S_FMT</constant>
and <constant>VIDIOC_TRY_FMT</constant> ioctls for details. Video
capture devices must implement both the
diff --git a/Documentation/DocBook/v4l/dev-output.xml b/Documentation/DocBook/v4l/dev-output.xml
index 63c3c20e5a72..919e22c53854 100644
--- a/Documentation/DocBook/v4l/dev-output.xml
+++ b/Documentation/DocBook/v4l/dev-output.xml
@@ -17,7 +17,8 @@ files are used for video capture devices.</para>
<title>Querying Capabilities</title>
<para>Devices supporting the video output interface set the
-<constant>V4L2_CAP_VIDEO_OUTPUT</constant> flag in the
+<constant>V4L2_CAP_VIDEO_OUTPUT</constant> or
+<constant>V4L2_CAP_VIDEO_OUTPUT_MPLANE</constant> flag in the
<structfield>capabilities</structfield> field of &v4l2-capability;
returned by the &VIDIOC-QUERYCAP; ioctl. As secondary device functions
they may also support the <link linkend="raw-vbi">raw VBI
@@ -60,9 +61,11 @@ linkend="crop" />.</para>
<para>To query the current image format applications set the
<structfield>type</structfield> field of a &v4l2-format; to
-<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT</constant> and call the
+<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT</constant> or
+<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE</constant> and call the
&VIDIOC-G-FMT; ioctl with a pointer to this structure. Drivers fill
-the &v4l2-pix-format; <structfield>pix</structfield> member of the
+the &v4l2-pix-format; <structfield>pix</structfield> or the
+&v4l2-pix-format-mplane; <structfield>pix_mp</structfield> member of the
<structfield>fmt</structfield> union.</para>
<para>To request different parameters applications set the
@@ -80,8 +83,8 @@ adjust the parameters and finally return the actual parameters as
without disabling I/O or possibly time consuming hardware
preparations.</para>
- <para>The contents of &v4l2-pix-format; are discussed in <xref
-linkend="pixfmt" />. See also the specification of the
+ <para>The contents of &v4l2-pix-format; and &v4l2-pix-format-mplane;
+are discussed in <xref linkend="pixfmt" />. See also the specification of the
<constant>VIDIOC_G_FMT</constant>, <constant>VIDIOC_S_FMT</constant>
and <constant>VIDIOC_TRY_FMT</constant> ioctls for details. Video
output devices must implement both the
diff --git a/Documentation/DocBook/v4l/func-mmap.xml b/Documentation/DocBook/v4l/func-mmap.xml
index 2e2fc3933aea..786732b64bbd 100644
--- a/Documentation/DocBook/v4l/func-mmap.xml
+++ b/Documentation/DocBook/v4l/func-mmap.xml
@@ -45,7 +45,10 @@ just specify a <constant>NULL</constant> pointer here.</para>
<listitem>
<para>Length of the memory area to map. This must be the
same value as returned by the driver in the &v4l2-buffer;
-<structfield>length</structfield> field.</para>
+<structfield>length</structfield> field for the
+single-planar API, and the same value as returned by the driver
+in the &v4l2-plane; <structfield>length</structfield> field for the
+multi-planar API.</para>
</listitem>
</varlistentry>
<varlistentry>
@@ -106,7 +109,10 @@ flag.</para>
<listitem>
<para>Offset of the buffer in device memory. This must be the
same value as returned by the driver in the &v4l2-buffer;
-<structfield>m</structfield> union <structfield>offset</structfield> field.</para>
+<structfield>m</structfield> union <structfield>offset</structfield> field for
+the single-planar API, and the same value as returned by the driver
+in the &v4l2-plane; <structfield>m</structfield> union
+<structfield>mem_offset</structfield> field for the multi-planar API.</para>
</listitem>
</varlistentry>
</variablelist>
diff --git a/Documentation/DocBook/v4l/func-munmap.xml b/Documentation/DocBook/v4l/func-munmap.xml
index 502ed49323b0..e2c4190f9bb6 100644
--- a/Documentation/DocBook/v4l/func-munmap.xml
+++ b/Documentation/DocBook/v4l/func-munmap.xml
@@ -37,7 +37,8 @@
<para>Length of the mapped buffer. This must be the same
value as given to <function>mmap()</function> and returned by the
driver in the &v4l2-buffer; <structfield>length</structfield>
-field.</para>
+field for the single-planar API and in the &v4l2-plane;
+<structfield>length</structfield> field for the multi-planar API.</para>
</listitem>
</varlistentry>
</variablelist>
diff --git a/Documentation/DocBook/v4l/io.xml b/Documentation/DocBook/v4l/io.xml
index d424886beda0..227e7ac45a06 100644
--- a/Documentation/DocBook/v4l/io.xml
+++ b/Documentation/DocBook/v4l/io.xml
@@ -121,18 +121,22 @@ mapped.</para>
<para>Before applications can access the buffers they must map
them into their address space with the &func-mmap; function. The
location of the buffers in device memory can be determined with the
-&VIDIOC-QUERYBUF; ioctl. The <structfield>m.offset</structfield> and
-<structfield>length</structfield> returned in a &v4l2-buffer; are
-passed as sixth and second parameter to the
-<function>mmap()</function> function. The offset and length values
-must not be modified. Remember the buffers are allocated in physical
-memory, as opposed to virtual memory which can be swapped out to disk.
-Applications should free the buffers as soon as possible with the
-&func-munmap; function.</para>
+&VIDIOC-QUERYBUF; ioctl. In the single-planar API case, the
+<structfield>m.offset</structfield> and <structfield>length</structfield>
+returned in a &v4l2-buffer; are passed as sixth and second parameter to the
+<function>mmap()</function> function. When using the multi-planar API,
+struct &v4l2-buffer; contains an array of &v4l2-plane; structures, each
+containing its own <structfield>m.offset</structfield> and
+<structfield>length</structfield>. When using the multi-planar API, every
+plane of every buffer has to be mapped separately, so the number of
+calls to &func-mmap; should be equal to number of buffers times number of
+planes in each buffer. The offset and length values must not be modified.
+Remember, the buffers are allocated in physical memory, as opposed to virtual
+memory, which can be swapped out to disk. Applications should free the buffers
+as soon as possible with the &func-munmap; function.</para>
<example>
- <title>Mapping buffers</title>
-
+ <title>Mapping buffers in the single-planar API</title>
<programlisting>
&v4l2-requestbuffers; reqbuf;
struct {
@@ -141,63 +145,145 @@ struct {
} *buffers;
unsigned int i;
-memset (&amp;reqbuf, 0, sizeof (reqbuf));
+memset(&amp;reqbuf, 0, sizeof(reqbuf));
reqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
reqbuf.memory = V4L2_MEMORY_MMAP;
reqbuf.count = 20;
if (-1 == ioctl (fd, &VIDIOC-REQBUFS;, &amp;reqbuf)) {
if (errno == EINVAL)
- printf ("Video capturing or mmap-streaming is not supported\n");
+ printf("Video capturing or mmap-streaming is not supported\n");
else
- perror ("VIDIOC_REQBUFS");
+ perror("VIDIOC_REQBUFS");
- exit (EXIT_FAILURE);
+ exit(EXIT_FAILURE);
}
/* We want at least five buffers. */
if (reqbuf.count &lt; 5) {
/* You may need to free the buffers here. */
- printf ("Not enough buffer memory\n");
- exit (EXIT_FAILURE);
+ printf("Not enough buffer memory\n");
+ exit(EXIT_FAILURE);
}
-buffers = calloc (reqbuf.count, sizeof (*buffers));
-assert (buffers != NULL);
+buffers = calloc(reqbuf.count, sizeof(*buffers));
+assert(buffers != NULL);
for (i = 0; i &lt; reqbuf.count; i++) {
&v4l2-buffer; buffer;
- memset (&amp;buffer, 0, sizeof (buffer));
+ memset(&amp;buffer, 0, sizeof(buffer));
buffer.type = reqbuf.type;
buffer.memory = V4L2_MEMORY_MMAP;
buffer.index = i;
if (-1 == ioctl (fd, &VIDIOC-QUERYBUF;, &amp;buffer)) {
- perror ("VIDIOC_QUERYBUF");
- exit (EXIT_FAILURE);
+ perror("VIDIOC_QUERYBUF");
+ exit(EXIT_FAILURE);
}
buffers[i].length = buffer.length; /* remember for munmap() */
- buffers[i].start = mmap (NULL, buffer.length,
- PROT_READ | PROT_WRITE, /* recommended */
- MAP_SHARED, /* recommended */
- fd, buffer.m.offset);
+ buffers[i].start = mmap(NULL, buffer.length,
+ PROT_READ | PROT_WRITE, /* recommended */
+ MAP_SHARED, /* recommended */
+ fd, buffer.m.offset);
if (MAP_FAILED == buffers[i].start) {
/* If you do not exit here you should unmap() and free()
the buffers mapped so far. */
- perror ("mmap");
- exit (EXIT_FAILURE);
+ perror("mmap");
+ exit(EXIT_FAILURE);
+ }
+}
+
+/* Cleanup. */
+
+for (i = 0; i &lt; reqbuf.count; i++)
+ munmap(buffers[i].start, buffers[i].length);
+ </programlisting>
+ </example>
+
+ <example>
+ <title>Mapping buffers in the multi-planar API</title>
+ <programlisting>
+&v4l2-requestbuffers; reqbuf;
+/* Our current format uses 3 planes per buffer */
+#define FMT_NUM_PLANES = 3;
+
+struct {
+ void *start[FMT_NUM_PLANES];
+ size_t length[FMT_NUM_PLANES];
+} *buffers;
+unsigned int i, j;
+
+memset(&amp;reqbuf, 0, sizeof(reqbuf));
+reqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+reqbuf.memory = V4L2_MEMORY_MMAP;
+reqbuf.count = 20;
+
+if (ioctl(fd, &VIDIOC-REQBUFS;, &amp;reqbuf) &lt; 0) {
+ if (errno == EINVAL)
+ printf("Video capturing or mmap-streaming is not supported\n");
+ else
+ perror("VIDIOC_REQBUFS");
+
+ exit(EXIT_FAILURE);
+}
+
+/* We want at least five buffers. */
+
+if (reqbuf.count &lt; 5) {
+ /* You may need to free the buffers here. */
+ printf("Not enough buffer memory\n");
+ exit(EXIT_FAILURE);
+}
+
+buffers = calloc(reqbuf.count, sizeof(*buffers));
+assert(buffers != NULL);
+
+for (i = 0; i &lt; reqbuf.count; i++) {
+ &v4l2-buffer; buffer;
+ &v4l2-plane; planes[FMT_NUM_PLANES];
+
+ memset(&amp;buffer, 0, sizeof(buffer));
+ buffer.type = reqbuf.type;
+ buffer.memory = V4L2_MEMORY_MMAP;
+ buffer.index = i;
+ /* length in struct v4l2_buffer in multi-planar API stores the size
+ * of planes array. */
+ buffer.length = FMT_NUM_PLANES;
+ buffer.m.planes = planes;
+
+ if (ioctl(fd, &VIDIOC-QUERYBUF;, &amp;buffer) &lt; 0) {
+ perror("VIDIOC_QUERYBUF");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Every plane has to be mapped separately */
+ for (j = 0; j &lt; FMT_NUM_PLANES; j++) {
+ buffers[i].length[j] = buffer.m.planes[j].length; /* remember for munmap() */
+
+ buffers[i].start[j] = mmap(NULL, buffer.m.planes[j].length,
+ PROT_READ | PROT_WRITE, /* recommended */
+ MAP_SHARED, /* recommended */
+ fd, buffer.m.planes[j].m.offset);
+
+ if (MAP_FAILED == buffers[i].start[j]) {
+ /* If you do not exit here you should unmap() and free()
+ the buffers and planes mapped so far. */
+ perror("mmap");
+ exit(EXIT_FAILURE);
+ }
}
}
/* Cleanup. */
for (i = 0; i &lt; reqbuf.count; i++)
- munmap (buffers[i].start, buffers[i].length);
+ for (j = 0; j &lt; FMT_NUM_PLANES; j++)
+ munmap(buffers[i].start[j], buffers[i].length[j]);
</programlisting>
</example>
@@ -286,13 +372,13 @@ pointer method (not only memory mapping) is supported must be
determined by calling the &VIDIOC-REQBUFS; ioctl.</para>
<para>This I/O method combines advantages of the read/write and
-memory mapping methods. Buffers are allocated by the application
+memory mapping methods. Buffers (planes) are allocated by the application
itself, and can reside for example in virtual or shared memory. Only
pointers to data are exchanged, these pointers and meta-information
-are passed in &v4l2-buffer;. The driver must be switched
-into user pointer I/O mode by calling the &VIDIOC-REQBUFS; with the
-desired buffer type. No buffers are allocated beforehands,
-consequently they are not indexed and cannot be queried like mapped
+are passed in &v4l2-buffer; (or in &v4l2-plane; in the multi-planar API case).
+The driver must be switched into user pointer I/O mode by calling the
+&VIDIOC-REQBUFS; with the desired buffer type. No buffers (planes) are allocated
+beforehand, consequently they are not indexed and cannot be queried like mapped
buffers with the <constant>VIDIOC_QUERYBUF</constant> ioctl.</para>
<example>
@@ -316,7 +402,7 @@ if (ioctl (fd, &VIDIOC-REQBUFS;, &amp;reqbuf) == -1) {
</programlisting>
</example>
- <para>Buffer addresses and sizes are passed on the fly with the
+ <para>Buffer (plane) addresses and sizes are passed on the fly with the
&VIDIOC-QBUF; ioctl. Although buffers are commonly cycled,
applications can pass different addresses and sizes at each
<constant>VIDIOC_QBUF</constant> call. If required by the hardware the
@@ -396,11 +482,18 @@ rest should be evident.</para>
<title>Buffers</title>
<para>A buffer contains data exchanged by application and
-driver using one of the Streaming I/O methods. Only pointers to
-buffers are exchanged, the data itself is not copied. These pointers,
-together with meta-information like timestamps or field parity, are
-stored in a struct <structname>v4l2_buffer</structname>, argument to
-the &VIDIOC-QUERYBUF;, &VIDIOC-QBUF; and &VIDIOC-DQBUF; ioctl.</para>
+driver using one of the Streaming I/O methods. In the multi-planar API, the
+data is held in planes, while the buffer structure acts as a container
+for the planes. Only pointers to buffers (planes) are exchanged, the data
+itself is not copied. These pointers, together with meta-information like
+timestamps or field parity, are stored in a struct
+<structname>v4l2_buffer</structname>, argument to
+the &VIDIOC-QUERYBUF;, &VIDIOC-QBUF; and &VIDIOC-DQBUF; ioctl.
+In the multi-planar API, some plane-specific members of struct
+<structname>v4l2_buffer</structname>, such as pointers and sizes for each
+plane, are stored in struct <structname>v4l2_plane</structname> instead.
+In that case, struct <structname>v4l2_buffer</structname> contains an array of
+plane structures.</para>
<para>Nominally timestamps refer to the first data byte transmitted.
In practice however the wide range of hardware covered by the V4L2 API
@@ -551,26 +644,40 @@ in accordance with the selected I/O method.</entry>
<entry></entry>
<entry>__u32</entry>
<entry><structfield>offset</structfield></entry>
- <entry>When <structfield>memory</structfield> is
-<constant>V4L2_MEMORY_MMAP</constant> this is the offset of the buffer
-from the start of the device memory. The value is returned by the
-driver and apart of serving as parameter to the &func-mmap; function
-not useful for applications. See <xref linkend="mmap" /> for details.</entry>
+ <entry>For the single-planar API and when
+<structfield>memory</structfield> is <constant>V4L2_MEMORY_MMAP</constant> this
+is the offset of the buffer from the start of the device memory. The value is
+returned by the driver and apart of serving as parameter to the &func-mmap;
+function not useful for applications. See <xref linkend="mmap" /> for details
+ </entry>
</row>
<row>
<entry></entry>
<entry>unsigned long</entry>
<entry><structfield>userptr</structfield></entry>
- <entry>When <structfield>memory</structfield> is
-<constant>V4L2_MEMORY_USERPTR</constant> this is a pointer to the
-buffer (casted to unsigned long type) in virtual memory, set by the
-application. See <xref linkend="userp" /> for details.</entry>
+ <entry>For the single-planar API and when
+<structfield>memory</structfield> is <constant>V4L2_MEMORY_USERPTR</constant>
+this is a pointer to the buffer (casted to unsigned long type) in virtual
+memory, set by the application. See <xref linkend="userp" /> for details.
+ </entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry>struct v4l2_plane</entry>
+ <entry><structfield>*planes</structfield></entry>
+ <entry>When using the multi-planar API, contains a userspace pointer
+ to an array of &v4l2-plane;. The size of the array should be put
+ in the <structfield>length</structfield> field of this
+ <structname>v4l2_buffer</structname> structure.</entry>
</row>
<row>
<entry>__u32</entry>
<entry><structfield>length</structfield></entry>
<entry></entry>
- <entry>Size of the buffer (not the payload) in bytes.</entry>
+ <entry>Size of the buffer (not the payload) in bytes for the
+ single-planar API. For the multi-planar API should contain the
+ number of elements in the <structfield>planes</structfield> array.
+ </entry>
</row>
<row>
<entry>__u32</entry>
@@ -596,6 +703,66 @@ should set this to 0.</entry>
</tgroup>
</table>
+ <table frame="none" pgwide="1" id="v4l2-plane">
+ <title>struct <structname>v4l2_plane</structname></title>
+ <tgroup cols="4">
+ &cs-ustr;
+ <tbody valign="top">
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>bytesused</structfield></entry>
+ <entry></entry>
+ <entry>The number of bytes occupied by data in the plane
+ (its payload).</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>length</structfield></entry>
+ <entry></entry>
+ <entry>Size in bytes of the plane (not its payload).</entry>
+ </row>
+ <row>
+ <entry>union</entry>
+ <entry><structfield>m</structfield></entry>
+ <entry></entry>
+ <entry></entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry>__u32</entry>
+ <entry><structfield>mem_offset</structfield></entry>
+ <entry>When the memory type in the containing &v4l2-buffer; is
+ <constant>V4L2_MEMORY_MMAP</constant>, this is the value that
+ should be passed to &func-mmap;, similar to the
+ <structfield>offset</structfield> field in &v4l2-buffer;.</entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry>__unsigned long</entry>
+ <entry><structfield>userptr</structfield></entry>
+ <entry>When the memory type in the containing &v4l2-buffer; is
+ <constant>V4L2_MEMORY_USERPTR</constant>, this is a userspace
+ pointer to the memory allocated for this plane by an application.
+ </entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>data_offset</structfield></entry>
+ <entry></entry>
+ <entry>Offset in bytes to video data in the plane, if applicable.
+ </entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>reserved[11]</structfield></entry>
+ <entry></entry>
+ <entry>Reserved for future use. Should be zeroed by an
+ application.</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+
<table frame="none" pgwide="1" id="v4l2-buf-type">
<title>enum v4l2_buf_type</title>
<tgroup cols="3">
@@ -604,13 +771,27 @@ should set this to 0.</entry>
<row>
<entry><constant>V4L2_BUF_TYPE_VIDEO_CAPTURE</constant></entry>
<entry>1</entry>
- <entry>Buffer of a video capture stream, see <xref
+ <entry>Buffer of a single-planar video capture stream, see <xref
+ linkend="capture" />.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE</constant>
+ </entry>
+ <entry>9</entry>
+ <entry>Buffer of a multi-planar video capture stream, see <xref
linkend="capture" />.</entry>
</row>
<row>
<entry><constant>V4L2_BUF_TYPE_VIDEO_OUTPUT</constant></entry>
<entry>2</entry>
- <entry>Buffer of a video output stream, see <xref
+ <entry>Buffer of a single-planar video output stream, see <xref
+ linkend="output" />.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE</constant>
+ </entry>
+ <entry>10</entry>
+ <entry>Buffer of a multi-planar video output stream, see <xref
linkend="output" />.</entry>
</row>
<row>
diff --git a/Documentation/DocBook/v4l/pixfmt-nv12m.xml b/Documentation/DocBook/v4l/pixfmt-nv12m.xml
new file mode 100644
index 000000000000..c9e166d9ded8
--- /dev/null
+++ b/Documentation/DocBook/v4l/pixfmt-nv12m.xml
@@ -0,0 +1,154 @@
+ <refentry id="V4L2-PIX-FMT-NV12M">
+ <refmeta>
+ <refentrytitle>V4L2_PIX_FMT_NV12M ('NV12M')</refentrytitle>
+ &manvol;
+ </refmeta>
+ <refnamediv>
+ <refname> <constant>V4L2_PIX_FMT_NV12M</constant></refname>
+ <refpurpose>Variation of <constant>V4L2_PIX_FMT_NV12</constant> with planes
+ non contiguous in memory. </refpurpose>
+ </refnamediv>
+ <refsect1>
+ <title>Description</title>
+
+ <para>This is a multi-planar, two-plane version of the YUV 4:2:0 format.
+The three components are separated into two sub-images or planes.
+<constant>V4L2_PIX_FMT_NV12M</constant> differs from <constant>V4L2_PIX_FMT_NV12
+</constant> in that the two planes are non-contiguous in memory, i.e. the chroma
+plane do not necessarily immediately follows the luma plane.
+The luminance data occupies the first plane. The Y plane has one byte per pixel.
+In the second plane there is a chrominance data with alternating chroma samples.
+The CbCr plane is the same width, in bytes, as the Y plane (and of the image),
+but is half as tall in pixels. Each CbCr pair belongs to four pixels. For example,
+Cb<subscript>0</subscript>/Cr<subscript>0</subscript> belongs to
+Y'<subscript>00</subscript>, Y'<subscript>01</subscript>,
+Y'<subscript>10</subscript>, Y'<subscript>11</subscript>. </para>
+
+ <para><constant>V4L2_PIX_FMT_NV12M</constant> is intended to be
+used only in drivers and applications that support the multi-planar API,
+described in <xref linkend="planar-apis"/>. </para>
+
+ <para>If the Y plane has pad bytes after each row, then the
+CbCr plane has as many pad bytes after its rows.</para>
+
+ <example>
+ <title><constant>V4L2_PIX_FMT_NV12M</constant> 4 &times; 4 pixel image</title>
+
+ <formalpara>
+ <title>Byte Order.</title>
+ <para>Each cell is one byte.
+ <informaltable frame="none">
+ <tgroup cols="5" align="center">
+ <colspec align="left" colwidth="2*" />
+ <tbody valign="top">
+ <row>
+ <entry>start0&nbsp;+&nbsp;0:</entry>
+ <entry>Y'<subscript>00</subscript></entry>
+ <entry>Y'<subscript>01</subscript></entry>
+ <entry>Y'<subscript>02</subscript></entry>
+ <entry>Y'<subscript>03</subscript></entry>
+ </row>
+ <row>
+ <entry>start0&nbsp;+&nbsp;4:</entry>
+ <entry>Y'<subscript>10</subscript></entry>
+ <entry>Y'<subscript>11</subscript></entry>
+ <entry>Y'<subscript>12</subscript></entry>
+ <entry>Y'<subscript>13</subscript></entry>
+ </row>
+ <row>
+ <entry>start0&nbsp;+&nbsp;8:</entry>
+ <entry>Y'<subscript>20</subscript></entry>
+ <entry>Y'<subscript>21</subscript></entry>
+ <entry>Y'<subscript>22</subscript></entry>
+ <entry>Y'<subscript>23</subscript></entry>
+ </row>
+ <row>
+ <entry>start0&nbsp;+&nbsp;12:</entry>
+ <entry>Y'<subscript>30</subscript></entry>
+ <entry>Y'<subscript>31</subscript></entry>
+ <entry>Y'<subscript>32</subscript></entry>
+ <entry>Y'<subscript>33</subscript></entry>
+ </row>
+ <row>
+ <entry></entry>
+ </row>
+ <row>
+ <entry>start1&nbsp;+&nbsp;0:</entry>
+ <entry>Cb<subscript>00</subscript></entry>
+ <entry>Cr<subscript>00</subscript></entry>
+ <entry>Cb<subscript>01</subscript></entry>
+ <entry>Cr<subscript>01</subscript></entry>
+ </row>
+ <row>
+ <entry>start1&nbsp;+&nbsp;4:</entry>
+ <entry>Cb<subscript>10</subscript></entry>
+ <entry>Cr<subscript>10</subscript></entry>
+ <entry>Cb<subscript>11</subscript></entry>
+ <entry>Cr<subscript>11</subscript></entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </informaltable>
+ </para>
+ </formalpara>
+
+ <formalpara>
+ <title>Color Sample Location.</title>
+ <para>
+ <informaltable frame="none">
+ <tgroup cols="7" align="center">
+ <tbody valign="top">
+ <row>
+ <entry></entry>
+ <entry>0</entry><entry></entry><entry>1</entry><entry></entry>
+ <entry>2</entry><entry></entry><entry>3</entry>
+ </row>
+ <row>
+ <entry>0</entry>
+ <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
+ <entry>Y</entry><entry></entry><entry>Y</entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry><entry>C</entry><entry></entry><entry></entry>
+ <entry></entry><entry>C</entry><entry></entry>
+ </row>
+ <row>
+ <entry>1</entry>
+ <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
+ <entry>Y</entry><entry></entry><entry>Y</entry>
+ </row>
+ <row>
+ <entry></entry>
+ </row>
+ <row>
+ <entry>2</entry>
+ <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
+ <entry>Y</entry><entry></entry><entry>Y</entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry><entry>C</entry><entry></entry><entry></entry>
+ <entry></entry><entry>C</entry><entry></entry>
+ </row>
+ <row>
+ <entry>3</entry>
+ <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
+ <entry>Y</entry><entry></entry><entry>Y</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </informaltable>
+ </para>
+ </formalpara>
+ </example>
+ </refsect1>
+ </refentry>
+
+ <!--
+Local Variables:
+mode: sgml
+sgml-parent-document: "pixfmt.sgml"
+indent-tabs-mode: nil
+End:
+ -->
diff --git a/Documentation/DocBook/v4l/pixfmt-yuv420m.xml b/Documentation/DocBook/v4l/pixfmt-yuv420m.xml
new file mode 100644
index 000000000000..f5d8f57495c8
--- /dev/null
+++ b/Documentation/DocBook/v4l/pixfmt-yuv420m.xml
@@ -0,0 +1,162 @@
+ <refentry id="V4L2-PIX-FMT-YUV420M">
+ <refmeta>
+ <refentrytitle>V4L2_PIX_FMT_YUV420M ('YU12M')</refentrytitle>
+ &manvol;
+ </refmeta>
+ <refnamediv>
+ <refname> <constant>V4L2_PIX_FMT_YUV420M</constant></refname>
+ <refpurpose>Variation of <constant>V4L2_PIX_FMT_YUV420</constant>
+ with planes non contiguous in memory. </refpurpose>
+ </refnamediv>
+
+ <refsect1>
+ <title>Description</title>
+
+ <para>This is a multi-planar format, as opposed to a packed format.
+The three components are separated into three sub- images or planes.
+
+The Y plane is first. The Y plane has one byte per pixel. The Cb data
+constitutes the second plane which is half the width and half
+the height of the Y plane (and of the image). Each Cb belongs to four
+pixels, a two-by-two square of the image. For example,
+Cb<subscript>0</subscript> belongs to Y'<subscript>00</subscript>,
+Y'<subscript>01</subscript>, Y'<subscript>10</subscript>, and
+Y'<subscript>11</subscript>. The Cr data, just like the Cb plane, is
+in the third plane. </para>
+
+ <para>If the Y plane has pad bytes after each row, then the Cb
+and Cr planes have half as many pad bytes after their rows. In other
+words, two Cx rows (including padding) is exactly as long as one Y row
+(including padding).</para>
+
+ <para><constant>V4L2_PIX_FMT_NV12M</constant> is intended to be
+used only in drivers and applications that support the multi-planar API,
+described in <xref linkend="planar-apis"/>. </para>
+
+ <example>
+ <title><constant>V4L2_PIX_FMT_YVU420M</constant> 4 &times; 4
+pixel image</title>
+
+ <formalpara>
+ <title>Byte Order.</title>
+ <para>Each cell is one byte.
+ <informaltable frame="none">
+ <tgroup cols="5" align="center">
+ <colspec align="left" colwidth="2*" />
+ <tbody valign="top">
+ <row>
+ <entry>start0&nbsp;+&nbsp;0:</entry>
+ <entry>Y'<subscript>00</subscript></entry>
+ <entry>Y'<subscript>01</subscript></entry>
+ <entry>Y'<subscript>02</subscript></entry>
+ <entry>Y'<subscript>03</subscript></entry>
+ </row>
+ <row>
+ <entry>start0&nbsp;+&nbsp;4:</entry>
+ <entry>Y'<subscript>10</subscript></entry>
+ <entry>Y'<subscript>11</subscript></entry>
+ <entry>Y'<subscript>12</subscript></entry>
+ <entry>Y'<subscript>13</subscript></entry>
+ </row>
+ <row>
+ <entry>start0&nbsp;+&nbsp;8:</entry>
+ <entry>Y'<subscript>20</subscript></entry>
+ <entry>Y'<subscript>21</subscript></entry>
+ <entry>Y'<subscript>22</subscript></entry>
+ <entry>Y'<subscript>23</subscript></entry>
+ </row>
+ <row>
+ <entry>start0&nbsp;+&nbsp;12:</entry>
+ <entry>Y'<subscript>30</subscript></entry>
+ <entry>Y'<subscript>31</subscript></entry>
+ <entry>Y'<subscript>32</subscript></entry>
+ <entry>Y'<subscript>33</subscript></entry>
+ </row>
+ <row><entry></entry></row>
+ <row>
+ <entry>start1&nbsp;+&nbsp;0:</entry>
+ <entry>Cb<subscript>00</subscript></entry>
+ <entry>Cb<subscript>01</subscript></entry>
+ </row>
+ <row>
+ <entry>start1&nbsp;+&nbsp;2:</entry>
+ <entry>Cb<subscript>10</subscript></entry>
+ <entry>Cb<subscript>11</subscript></entry>
+ </row>
+ <row><entry></entry></row>
+ <row>
+ <entry>start2&nbsp;+&nbsp;0:</entry>
+ <entry>Cr<subscript>00</subscript></entry>
+ <entry>Cr<subscript>01</subscript></entry>
+ </row>
+ <row>
+ <entry>start2&nbsp;+&nbsp;2:</entry>
+ <entry>Cr<subscript>10</subscript></entry>
+ <entry>Cr<subscript>11</subscript></entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </informaltable>
+ </para>
+ </formalpara>
+
+ <formalpara>
+ <title>Color Sample Location.</title>
+ <para>
+ <informaltable frame="none">
+ <tgroup cols="7" align="center">
+ <tbody valign="top">
+ <row>
+ <entry></entry>
+ <entry>0</entry><entry></entry><entry>1</entry><entry></entry>
+ <entry>2</entry><entry></entry><entry>3</entry>
+ </row>
+ <row>
+ <entry>0</entry>
+ <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
+ <entry>Y</entry><entry></entry><entry>Y</entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry><entry>C</entry><entry></entry><entry></entry>
+ <entry></entry><entry>C</entry><entry></entry>
+ </row>
+ <row>
+ <entry>1</entry>
+ <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
+ <entry>Y</entry><entry></entry><entry>Y</entry>
+ </row>
+ <row>
+ <entry></entry>
+ </row>
+ <row>
+ <entry>2</entry>
+ <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
+ <entry>Y</entry><entry></entry><entry>Y</entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry><entry>C</entry><entry></entry><entry></entry>
+ <entry></entry><entry>C</entry><entry></entry>
+ </row>
+ <row>
+ <entry>3</entry>
+ <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
+ <entry>Y</entry><entry></entry><entry>Y</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </informaltable>
+ </para>
+ </formalpara>
+ </example>
+ </refsect1>
+ </refentry>
+
+ <!--
+Local Variables:
+mode: sgml
+sgml-parent-document: "pixfmt.sgml"
+indent-tabs-mode: nil
+End:
+ -->
diff --git a/Documentation/DocBook/v4l/pixfmt.xml b/Documentation/DocBook/v4l/pixfmt.xml
index cfffc88d7383..f8436dcb7414 100644
--- a/Documentation/DocBook/v4l/pixfmt.xml
+++ b/Documentation/DocBook/v4l/pixfmt.xml
@@ -2,12 +2,16 @@
<para>The V4L2 API was primarily designed for devices exchanging
image data with applications. The
-<structname>v4l2_pix_format</structname> structure defines the format
-and layout of an image in memory. Image formats are negotiated with
-the &VIDIOC-S-FMT; ioctl. (The explanations here focus on video
+<structname>v4l2_pix_format</structname> and <structname>v4l2_pix_format_mplane
+</structname> structures define the format and layout of an image in memory.
+The former is used with the single-planar API, while the latter is used with the
+multi-planar version (see <xref linkend="planar-apis"/>). Image formats are
+negotiated with the &VIDIOC-S-FMT; ioctl. (The explanations here focus on video
capturing and output, for overlay frame buffer formats see also
&VIDIOC-G-FBUF;.)</para>
+<section>
+ <title>Single-planar format structure</title>
<table pgwide="1" frame="none" id="v4l2-pix-format">
<title>struct <structname>v4l2_pix_format</structname></title>
<tgroup cols="3">
@@ -106,6 +110,98 @@ set this field to zero.</entry>
</tbody>
</tgroup>
</table>
+</section>
+
+<section>
+ <title>Multi-planar format structures</title>
+ <para>The <structname>v4l2_plane_pix_format</structname> structures define
+ size and layout for each of the planes in a multi-planar format.
+ The <structname>v4l2_pix_format_mplane</structname> structure contains
+ information common to all planes (such as image width and height) and
+ an array of <structname>v4l2_plane_pix_format</structname> structures,
+ describing all planes of that format.</para>
+ <table pgwide="1" frame="none" id="v4l2-plane-pix-format">
+ <title>struct <structname>vl42_plane_pix_format</structname></title>
+ <tgroup cols="3">
+ &cs-str;
+ <tbody valign="top">
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>sizeimage</structfield></entry>
+ <entry>Maximum size in bytes required for image data in this plane.
+ </entry>
+ </row>
+ <row>
+ <entry>__u16</entry>
+ <entry><structfield>bytesperline</structfield></entry>
+ <entry>Distance in bytes between the leftmost pixels in two adjacent
+ lines.</entry>
+ </row>
+ <row>
+ <entry>__u16</entry>
+ <entry><structfield>reserved[7]</structfield></entry>
+ <entry>Reserved for future extensions. Should be zeroed by the
+ application.</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+ <table pgwide="1" frame="none" id="v4l2-pix-format-mplane">
+ <title>struct <structname>v4l2_pix_format_mplane</structname></title>
+ <tgroup cols="3">
+ &cs-str;
+ <tbody valign="top">
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>width</structfield></entry>
+ <entry>Image width in pixels.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>height</structfield></entry>
+ <entry>Image height in pixels.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>pixelformat</structfield></entry>
+ <entry>The pixel format. Both single- and multi-planar four character
+codes can be used.</entry>
+ </row>
+ <row>
+ <entry>&v4l2-field;</entry>
+ <entry><structfield>field</structfield></entry>
+ <entry>See &v4l2-pix-format;.</entry>
+ </row>
+ <row>
+ <entry>&v4l2-colorspace;</entry>
+ <entry><structfield>colorspace</structfield></entry>
+ <entry>See &v4l2-pix-format;.</entry>
+ </row>
+ <row>
+ <entry>&v4l2-plane-pix-format;</entry>
+ <entry><structfield>plane_fmt[VIDEO_MAX_PLANES]</structfield></entry>
+ <entry>An array of structures describing format of each plane this
+ pixel format consists of. The number of valid entries in this array
+ has to be put in the <structfield>num_planes</structfield>
+ field.</entry>
+ </row>
+ <row>
+ <entry>__u8</entry>
+ <entry><structfield>num_planes</structfield></entry>
+ <entry>Number of planes (i.e. separate memory buffers) for this format
+ and the number of valid entries in the
+ <structfield>plane_fmt</structfield> array.</entry>
+ </row>
+ <row>
+ <entry>__u8</entry>
+ <entry><structfield>reserved[11]</structfield></entry>
+ <entry>Reserved for future extensions. Should be zeroed by the
+ application.</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+</section>
<section>
<title>Standard Image Formats</title>
@@ -142,11 +238,19 @@ leftmost pixel of the second row from the top, and so on. The last row
has just as many pad bytes after it as the other rows.</para>
<para>In V4L2 each format has an identifier which looks like
-<constant>PIX_FMT_XXX</constant>, defined in the <filename>videodev2.h</filename>
-header file. These identifiers
-represent <link linkend="v4l2-fourcc">four character codes</link>
+<constant>PIX_FMT_XXX</constant>, defined in the <link
+linkend="videodev">videodev.h</link> header file. These identifiers
+represent <link linkend="v4l2-fourcc">four character (FourCC) codes</link>
which are also listed below, however they are not the same as those
used in the Windows world.</para>
+
+ <para>For some formats, data is stored in separate, discontiguous
+memory buffers. Those formats are identified by a separate set of FourCC codes
+and are referred to as "multi-planar formats". For example, a YUV422 frame is
+normally stored in one memory buffer, but it can also be placed in two or three
+separate buffers, with Y component in one buffer and CbCr components in another
+in the 2-planar version or with each component in its own buffer in the
+3-planar case. Those sub-buffers are referred to as "planes".</para>
</section>
<section id="colorspaces">
@@ -599,10 +703,12 @@ information.</para>
&sub-vyuy;
&sub-y41p;
&sub-yuv420;
+ &sub-yuv420m;
&sub-yuv410;
&sub-yuv422p;
&sub-yuv411p;
&sub-nv12;
+ &sub-nv12m;
&sub-nv16;
</section>
diff --git a/Documentation/DocBook/v4l/planar-apis.xml b/Documentation/DocBook/v4l/planar-apis.xml
new file mode 100644
index 000000000000..7f3f5527057e
--- /dev/null
+++ b/Documentation/DocBook/v4l/planar-apis.xml
@@ -0,0 +1,81 @@
+<section id="planar-apis">
+ <title>Single- and multi-planar APIs</title>
+
+ <para>Some devices require data for each input or output video frame
+ to be placed in discontiguous memory buffers. In such cases one
+ video frame has to be addressed using more than one memory address, i.e. one
+ pointer per "plane". A plane is a sub-buffer of current frame. For examples
+ of such formats see <xref linkend="pixfmt" />.</para>
+
+ <para>Initially, V4L2 API did not support multi-planar buffers and a set of
+ extensions has been introduced to handle them. Those extensions constitute
+ what is being referred to as the "multi-planar API".</para>
+
+ <para>Some of the V4L2 API calls and structures are interpreted differently,
+ depending on whether single- or multi-planar API is being used. An application
+ can choose whether to use one or the other by passing a corresponding buffer
+ type to its ioctl calls. Multi-planar versions of buffer types are suffixed with
+ an `_MPLANE' string. For a list of available multi-planar buffer types
+ see &v4l2-buf-type;.
+ </para>
+
+ <section>
+ <title>Multi-planar formats</title>
+ <para>Multi-planar API introduces new multi-planar formats. Those formats
+ use a separate set of FourCC codes. It is important to distinguish between
+ the multi-planar API and a multi-planar format. Multi-planar API calls can
+ handle all single-planar formats as well, while the single-planar API cannot
+ handle multi-planar formats. Applications do not have to switch between APIs
+ when handling both single- and multi-planar devices and should use the
+ multi-planar API version for both single- and multi-planar formats.
+ Drivers that do not support multi-planar API can still be handled with it,
+ utilizing a compatibility layer built into standard V4L2 ioctl handling.
+ </para>
+ </section>
+
+ <section>
+ <title>Single and multi-planar API compatibility layer</title>
+ <para>In most cases<footnote><para>The compatibility layer does not cover
+ drivers that do not use video_ioctl2() call.</para></footnote>, applications
+ can use the multi-planar API with older drivers that support
+ only its single-planar version and vice versa. Appropriate conversion is
+ done seamlessly for both applications and drivers in the V4L2 core. The
+ general rule of thumb is: as long as an application uses formats that
+ a driver supports, it can use either API (although use of multi-planar
+ formats is only possible with the multi-planar API). The list of formats
+ supported by a driver can be obtained using the &VIDIOC-ENUM-FMT; call.
+ It is possible, but discouraged, for a driver or an application to support
+ and use both versions of the API.</para>
+ </section>
+
+ <section>
+ <title>Calls that distinguish between single and multi-planar APIs</title>
+ <variablelist>
+ <varlistentry>
+ <term>&VIDIOC-QUERYCAP;</term>
+ <listitem><para>Two additional multi-planar capabilities are added. They can
+ be set together with non-multi-planar ones for devices that handle
+ both single- and multi-planar formats.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>&VIDIOC-G-FMT;, &VIDIOC-S-FMT;, &VIDIOC-TRY-FMT;</term>
+ <listitem><para>New structures for describing multi-planar formats are added:
+ &v4l2-pix-format-mplane; and &v4l2-plane-pix-format;. Drivers may
+ define new multi-planar formats, which have distinct FourCC codes from
+ the existing single-planar ones.</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>&VIDIOC-QBUF;, &VIDIOC-DQBUF;, &VIDIOC-QUERYBUF;</term>
+ <listitem><para>A new &v4l2-plane; structure for describing planes is added.
+ Arrays of this structure are passed in the new
+ <structfield>m.planes</structfield> field of &v4l2-buffer;.</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>&VIDIOC-REQBUFS;</term>
+ <listitem><para>Will allocate multi-planar buffers as requested.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ </section>
+</section>
diff --git a/Documentation/DocBook/v4l/v4l2.xml b/Documentation/DocBook/v4l/v4l2.xml
index 9288af96de34..c3d699173c17 100644
--- a/Documentation/DocBook/v4l/v4l2.xml
+++ b/Documentation/DocBook/v4l/v4l2.xml
@@ -85,6 +85,17 @@ Remote Controller chapter.</contrib>
</address>
</affiliation>
</author>
+
+ <author>
+ <firstname>Pawel</firstname>
+ <surname>Osciak</surname>
+ <contrib>Designed and documented the multi-planar API.</contrib>
+ <affiliation>
+ <address>
+ <email>pawel AT osciak.com</email>
+ </address>
+ </affiliation>
+ </author>
</authorgroup>
<copyright>
@@ -102,7 +113,8 @@ Remote Controller chapter.</contrib>
<year>2010</year>
<year>2011</year>
<holder>Bill Dirks, Michael H. Schimek, Hans Verkuil, Martin
-Rubli, Andy Walls, Muralidharan Karicheri, Mauro Carvalho Chehab</holder>
+Rubli, Andy Walls, Muralidharan Karicheri, Mauro Carvalho Chehab,
+ Pawel Osciak</holder>
</copyright>
<legalnotice>
<para>Except when explicitly stated as GPL, programming examples within
@@ -116,6 +128,14 @@ structs, ioctls) must be noted in more detail in the history chapter
applications. -->
<revision>
+ <revnumber>2.6.38</revnumber>
+ <date>2011-01-16</date>
+ <authorinitials>po</authorinitials>
+ <revremark>Added the <link linkend="planar-apis">multi-planar API</link>.
+ </revremark>
+ </revision>
+
+ <revision>
<revnumber>2.6.37</revnumber>
<date>2010-08-06</date>
<authorinitials>hv</authorinitials>
diff --git a/Documentation/DocBook/v4l/vidioc-enum-fmt.xml b/Documentation/DocBook/v4l/vidioc-enum-fmt.xml
index 960d44615ca6..71d373b6d36a 100644
--- a/Documentation/DocBook/v4l/vidioc-enum-fmt.xml
+++ b/Documentation/DocBook/v4l/vidioc-enum-fmt.xml
@@ -76,7 +76,9 @@ pixelformat</structfield> field.</entry>
<entry>Type of the data stream, set by the application.
Only these types are valid here:
<constant>V4L2_BUF_TYPE_VIDEO_CAPTURE</constant>,
+<constant>V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE</constant>,
<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT</constant>,
+<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE</constant>,
<constant>V4L2_BUF_TYPE_VIDEO_OVERLAY</constant>, and custom (driver
defined) types with code <constant>V4L2_BUF_TYPE_PRIVATE</constant>
and higher.</entry>
diff --git a/Documentation/DocBook/v4l/vidioc-g-fmt.xml b/Documentation/DocBook/v4l/vidioc-g-fmt.xml
index 7c7d1b72c40d..a4ae59b664eb 100644
--- a/Documentation/DocBook/v4l/vidioc-g-fmt.xml
+++ b/Documentation/DocBook/v4l/vidioc-g-fmt.xml
@@ -60,11 +60,13 @@ application.</para>
<structfield>type</structfield> field of a struct
<structname>v4l2_format</structname> to the respective buffer (stream)
type. For example video capture devices use
-<constant>V4L2_BUF_TYPE_VIDEO_CAPTURE</constant>. When the application
+<constant>V4L2_BUF_TYPE_VIDEO_CAPTURE</constant> or
+<constant>V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE</constant>. When the application
calls the <constant>VIDIOC_G_FMT</constant> ioctl with a pointer to
this structure the driver fills the respective member of the
<structfield>fmt</structfield> union. In case of video capture devices
-that is the &v4l2-pix-format; <structfield>pix</structfield> member.
+that is either the &v4l2-pix-format; <structfield>pix</structfield> or
+the &v4l2-pix-format-mplane; <structfield>pix_mp</structfield> member.
When the requested buffer type is not supported drivers return an
&EINVAL;.</para>
@@ -134,6 +136,15 @@ devices.</entry>
</row>
<row>
<entry></entry>
+ <entry>&v4l2-pix-format-mplane;</entry>
+ <entry><structfield>pix_mp</structfield></entry>
+ <entry>Definition of an image format, see <xref
+ linkend="pixfmt" />, used by video capture and output
+devices that support the <link linkend="planar-apis">multi-planar
+version of the API</link>.</entry>
+ </row>
+ <row>
+ <entry></entry>
<entry>&v4l2-window;</entry>
<entry><structfield>win</structfield></entry>
<entry>Definition of an overlaid image, see <xref
diff --git a/Documentation/DocBook/v4l/vidioc-qbuf.xml b/Documentation/DocBook/v4l/vidioc-qbuf.xml
index ab691ebf3b93..f2b11f8a4031 100644
--- a/Documentation/DocBook/v4l/vidioc-qbuf.xml
+++ b/Documentation/DocBook/v4l/vidioc-qbuf.xml
@@ -64,7 +64,8 @@ zero to the number of buffers allocated with &VIDIOC-REQBUFS;
contents of the struct <structname>v4l2_buffer</structname> returned
by a &VIDIOC-QUERYBUF; ioctl will do as well. When the buffer is
intended for output (<structfield>type</structfield> is
-<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT</constant> or
+<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT</constant>,
+<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE</constant>, or
<constant>V4L2_BUF_TYPE_VBI_OUTPUT</constant>) applications must also
initialize the <structfield>bytesused</structfield>,
<structfield>field</structfield> and
@@ -75,7 +76,11 @@ supports capturing from specific video inputs and you want to specify a video
input, then <structfield>flags</structfield> should be set to
<constant>V4L2_BUF_FLAG_INPUT</constant> and the field
<structfield>input</structfield> must be initialized to the desired input.
-The <structfield>reserved</structfield> field must be set to 0.
+The <structfield>reserved</structfield> field must be set to 0. When using
+the <link linkend="planar-apis">multi-planar API</link>, the
+<structfield>m.planes</structfield> field must contain a userspace pointer
+to a filled-in array of &v4l2-plane; and the <structfield>length</structfield>
+field must be set to the number of elements in that array.
</para>
<para>To enqueue a <link linkend="mmap">memory mapped</link>
@@ -93,10 +98,13 @@ structure the driver sets the
buffer applications set the <structfield>memory</structfield>
field to <constant>V4L2_MEMORY_USERPTR</constant>, the
<structfield>m.userptr</structfield> field to the address of the
-buffer and <structfield>length</structfield> to its size.
-When <constant>VIDIOC_QBUF</constant> is called with a pointer to this
-structure the driver sets the <constant>V4L2_BUF_FLAG_QUEUED</constant>
-flag and clears the <constant>V4L2_BUF_FLAG_MAPPED</constant> and
+buffer and <structfield>length</structfield> to its size. When the multi-planar
+API is used, <structfield>m.userptr</structfield> and
+<structfield>length</structfield> members of the passed array of &v4l2-plane;
+have to be used instead. When <constant>VIDIOC_QBUF</constant> is called with
+a pointer to this structure the driver sets the
+<constant>V4L2_BUF_FLAG_QUEUED</constant> flag and clears the
+<constant>V4L2_BUF_FLAG_MAPPED</constant> and
<constant>V4L2_BUF_FLAG_DONE</constant> flags in the
<structfield>flags</structfield> field, or it returns an error code.
This ioctl locks the memory pages of the buffer in physical memory,
@@ -115,7 +123,9 @@ remaining fields or returns an error code. The driver may also set
<constant>V4L2_BUF_FLAG_ERROR</constant> in the <structfield>flags</structfield>
field. It indicates a non-critical (recoverable) streaming error. In such case
the application may continue as normal, but should be aware that data in the
-dequeued buffer might be corrupted.</para>
+dequeued buffer might be corrupted. When using the multi-planar API, the
+planes array does not have to be passed; the <structfield>m.planes</structfield>
+member must be set to NULL in that case.</para>
<para>By default <constant>VIDIOC_DQBUF</constant> blocks when no
buffer is in the outgoing queue. When the
diff --git a/Documentation/DocBook/v4l/vidioc-querybuf.xml b/Documentation/DocBook/v4l/vidioc-querybuf.xml
index e649805a4908..5c104d42d31c 100644
--- a/Documentation/DocBook/v4l/vidioc-querybuf.xml
+++ b/Documentation/DocBook/v4l/vidioc-querybuf.xml
@@ -61,6 +61,10 @@ buffer at any time after buffers have been allocated with the
to the number of buffers allocated with &VIDIOC-REQBUFS;
(&v4l2-requestbuffers; <structfield>count</structfield>) minus one.
The <structfield>reserved</structfield> field should to set to 0.
+When using the <link linkend="planar-apis">multi-planar API</link>, the
+<structfield>m.planes</structfield> field must contain a userspace pointer to an
+array of &v4l2-plane; and the <structfield>length</structfield> field has
+to be set to the number of elements in that array.
After calling <constant>VIDIOC_QUERYBUF</constant> with a pointer to
this structure drivers return an error code or fill the rest of
the structure.</para>
@@ -70,11 +74,13 @@ the structure.</para>
<constant>V4L2_BUF_FLAG_QUEUED</constant> and
<constant>V4L2_BUF_FLAG_DONE</constant> flags will be valid. The
<structfield>memory</structfield> field will be set to the current
-I/O method, the <structfield>m.offset</structfield>
+I/O method. For the single-planar API, the <structfield>m.offset</structfield>
contains the offset of the buffer from the start of the device memory,
-the <structfield>length</structfield> field its size. The driver may
-or may not set the remaining fields and flags, they are meaningless in
-this context.</para>
+the <structfield>length</structfield> field its size. For the multi-planar API,
+fields <structfield>m.mem_offset</structfield> and
+<structfield>length</structfield> in the <structfield>m.planes</structfield>
+array elements will be used instead. The driver may or may not set the remaining
+fields and flags, they are meaningless in this context.</para>
<para>The <structname>v4l2_buffer</structname> structure is
specified in <xref linkend="buffer" />.</para>
diff --git a/Documentation/DocBook/v4l/vidioc-querycap.xml b/Documentation/DocBook/v4l/vidioc-querycap.xml
index d499da93a450..93699769cd07 100644
--- a/Documentation/DocBook/v4l/vidioc-querycap.xml
+++ b/Documentation/DocBook/v4l/vidioc-querycap.xml
@@ -142,14 +142,30 @@ this array to zero.</entry>
<row>
<entry><constant>V4L2_CAP_VIDEO_CAPTURE</constant></entry>
<entry>0x00000001</entry>
- <entry>The device supports the <link
-linkend="capture">Video Capture</link> interface.</entry>
+ <entry>The device supports single-planar formats through the <link
+linkend="capture">Video Capture</link> interface. An application can use either
+<link linkend="planar-apis">the single or the multi-planar API</link>.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_CAP_VIDEO_CAPTURE_MPLANE</constant></entry>
+ <entry>0x00001000</entry>
+ <entry>The device supports multi-planar formats through the <link
+linkend="capture">Video Capture</link> interface. An application has to use the
+<link linkend="planar-apis">multi-planar API</link>.</entry>
</row>
<row>
<entry><constant>V4L2_CAP_VIDEO_OUTPUT</constant></entry>
<entry>0x00000002</entry>
- <entry>The device supports the <link
-linkend="output">Video Output</link> interface.</entry>
+ <entry>The device supports single-planar formats through the <link
+linkend="output">Video Output</link> interface. An application can use either
+<link linkend="planar-apis">the single or the multi-planar API</link>.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_CAP_VIDEO_OUTPUT_MPLANE</constant></entry>
+ <entry>0x00002000</entry>
+ <entry>The device supports multi-planar formats through the <link
+linkend="output">Video Output</link> interface. An application has to use the
+<link linkend="planar-apis">multi-planar API</link>.</entry>
</row>
<row>
<entry><constant>V4L2_CAP_VIDEO_OVERLAY</constant></entry>
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index cfaac34c4557..6ef692667e2f 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -849,6 +849,37 @@ All: lockdep-checked RCU-protected pointer access
See the comment headers in the source code (or the docbook generated
from them) for more information.
+However, given that there are no fewer than four families of RCU APIs
+in the Linux kernel, how do you choose which one to use? The following
+list can be helpful:
+
+a. Will readers need to block? If so, you need SRCU.
+
+b. What about the -rt patchset? If readers would need to block
+ in an non-rt kernel, you need SRCU. If readers would block
+ in a -rt kernel, but not in a non-rt kernel, SRCU is not
+ necessary.
+
+c. Do you need to treat NMI handlers, hardirq handlers,
+ and code segments with preemption disabled (whether
+ via preempt_disable(), local_irq_save(), local_bh_disable(),
+ or some other mechanism) as if they were explicit RCU readers?
+ If so, you need RCU-sched.
+
+d. Do you need RCU grace periods to complete even in the face
+ of softirq monopolization of one or more of the CPUs? For
+ example, is your code subject to network-based denial-of-service
+ attacks? If so, you need RCU-bh.
+
+e. Is your workload too update-intensive for normal use of
+ RCU, but inappropriate for other synchronization mechanisms?
+ If so, consider SLAB_DESTROY_BY_RCU. But please be careful!
+
+f. Otherwise, use RCU.
+
+Of course, this all assumes that you have determined that RCU is in fact
+the right tool for your job.
+
8. ANSWERS TO QUICK QUIZZES
diff --git a/Documentation/acpi/apei/output_format.txt b/Documentation/acpi/apei/output_format.txt
index 9146952c612a..0c49c197c47a 100644
--- a/Documentation/acpi/apei/output_format.txt
+++ b/Documentation/acpi/apei/output_format.txt
@@ -92,6 +92,11 @@ vendor_id: <integer>, device_id: <integer>
class_code: <integer>]
[serial number: <integer>, <integer>]
[bridge: secondary_status: <integer>, control: <integer>]
+[aer_status: <integer>, aer_mask: <integer>
+<aer status string>
+[aer_uncor_severity: <integer>]
+aer_layer=<aer layer string>, aer_agent=<aer agent string>
+aer_tlp_header: <integer> <integer> <integer> <integer>]
<pcie port type string>* := PCIe end point | legacy PCI end point | \
unknown | unknown | root port | upstream switch port | \
@@ -99,6 +104,26 @@ downstream switch port | PCIe to PCI/PCI-X bridge | \
PCI/PCI-X to PCIe bridge | root complex integrated endpoint device | \
root complex event collector
+if section severity is fatal or recoverable
+<aer status string># :=
+unknown | unknown | unknown | unknown | Data Link Protocol | \
+unknown | unknown | unknown | unknown | unknown | unknown | unknown | \
+Poisoned TLP | Flow Control Protocol | Completion Timeout | \
+Completer Abort | Unexpected Completion | Receiver Overflow | \
+Malformed TLP | ECRC | Unsupported Request
+else
+<aer status string># :=
+Receiver Error | unknown | unknown | unknown | unknown | unknown | \
+Bad TLP | Bad DLLP | RELAY_NUM Rollover | unknown | unknown | unknown | \
+Replay Timer Timeout | Advisory Non-Fatal
+fi
+
+<aer layer string> :=
+Physical Layer | Data Link Layer | Transaction Layer
+
+<aer agent string> :=
+Receiver ID | Requester ID | Completer ID | Transmitter ID
+
Where, [] designate corresponding content is optional
All <field string> description with * has the following format:
diff --git a/Documentation/arm/SH-Mobile/Makefile b/Documentation/arm/SH-Mobile/Makefile
new file mode 100644
index 000000000000..8771d832cf8c
--- /dev/null
+++ b/Documentation/arm/SH-Mobile/Makefile
@@ -0,0 +1,8 @@
+BIN := vrl4
+
+.PHONY: all
+all: $(BIN)
+
+.PHONY: clean
+clean:
+ rm -f *.o $(BIN)
diff --git a/Documentation/arm/SH-Mobile/vrl4.c b/Documentation/arm/SH-Mobile/vrl4.c
new file mode 100644
index 000000000000..e8a191358ad2
--- /dev/null
+++ b/Documentation/arm/SH-Mobile/vrl4.c
@@ -0,0 +1,169 @@
+/*
+ * vrl4 format generator
+ *
+ * Copyright (C) 2010 Simon Horman
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+/*
+ * usage: vrl4 < zImage > out
+ * dd if=out of=/dev/sdx bs=512 seek=1 # Write the image to sector 1
+ *
+ * Reads a zImage from stdin and writes a vrl4 image to stdout.
+ * In practice this means writing a padded vrl4 header to stdout followed
+ * by the zImage.
+ *
+ * The padding places the zImage at ALIGN bytes into the output.
+ * The vrl4 uses ALIGN + START_BASE as the start_address.
+ * This is where the mask ROM will jump to after verifying the header.
+ *
+ * The header sets copy_size to min(sizeof(zImage), MAX_BOOT_PROG_LEN) + ALIGN.
+ * That is, the mask ROM will load the padded header (ALIGN bytes)
+ * And then MAX_BOOT_PROG_LEN bytes of the image, or the entire image,
+ * whichever is smaller.
+ *
+ * The zImage is not modified in any way.
+ */
+
+#define _BSD_SOURCE
+#include <endian.h>
+#include <unistd.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <errno.h>
+
+struct hdr {
+ uint32_t magic1;
+ uint32_t reserved1;
+ uint32_t magic2;
+ uint32_t reserved2;
+ uint16_t copy_size;
+ uint16_t boot_options;
+ uint32_t reserved3;
+ uint32_t start_address;
+ uint32_t reserved4;
+ uint32_t reserved5;
+ char reserved6[308];
+};
+
+#define DECLARE_HDR(h) \
+ struct hdr (h) = { \
+ .magic1 = htole32(0xea000000), \
+ .reserved1 = htole32(0x56), \
+ .magic2 = htole32(0xe59ff008), \
+ .reserved3 = htole16(0x1) }
+
+/* Align to 512 bytes, the MMCIF sector size */
+#define ALIGN_BITS 9
+#define ALIGN (1 << ALIGN_BITS)
+
+#define START_BASE 0xe55b0000
+
+/*
+ * With an alignment of 512 the header uses the first sector.
+ * There is a 128 sector (64kbyte) limit on the data loaded by the mask ROM.
+ * So there are 127 sectors left for the boot programme. But in practice
+ * Only a small portion of a zImage is needed, 16 sectors should be more
+ * than enough.
+ *
+ * Note that this sets how much of the zImage is copied by the mask ROM.
+ * The entire zImage is present after the header and is loaded
+ * by the code in the boot program (which is the first portion of the zImage).
+ */
+#define MAX_BOOT_PROG_LEN (16 * 512)
+
+#define ROUND_UP(x) ((x + ALIGN - 1) & ~(ALIGN - 1))
+
+ssize_t do_read(int fd, void *buf, size_t count)
+{
+ size_t offset = 0;
+ ssize_t l;
+
+ while (offset < count) {
+ l = read(fd, buf + offset, count - offset);
+ if (!l)
+ break;
+ if (l < 0) {
+ if (errno == EAGAIN || errno == EWOULDBLOCK)
+ continue;
+ perror("read");
+ return -1;
+ }
+ offset += l;
+ }
+
+ return offset;
+}
+
+ssize_t do_write(int fd, const void *buf, size_t count)
+{
+ size_t offset = 0;
+ ssize_t l;
+
+ while (offset < count) {
+ l = write(fd, buf + offset, count - offset);
+ if (l < 0) {
+ if (errno == EAGAIN || errno == EWOULDBLOCK)
+ continue;
+ perror("write");
+ return -1;
+ }
+ offset += l;
+ }
+
+ return offset;
+}
+
+ssize_t write_zero(int fd, size_t len)
+{
+ size_t i = len;
+
+ while (i--) {
+ const char x = 0;
+ if (do_write(fd, &x, 1) < 0)
+ return -1;
+ }
+
+ return len;
+}
+
+int main(void)
+{
+ DECLARE_HDR(hdr);
+ char boot_program[MAX_BOOT_PROG_LEN];
+ size_t aligned_hdr_len, alligned_prog_len;
+ ssize_t prog_len;
+
+ prog_len = do_read(0, boot_program, sizeof(boot_program));
+ if (prog_len <= 0)
+ return -1;
+
+ aligned_hdr_len = ROUND_UP(sizeof(hdr));
+ hdr.start_address = htole32(START_BASE + aligned_hdr_len);
+ alligned_prog_len = ROUND_UP(prog_len);
+ hdr.copy_size = htole16(aligned_hdr_len + alligned_prog_len);
+
+ if (do_write(1, &hdr, sizeof(hdr)) < 0)
+ return -1;
+ if (write_zero(1, aligned_hdr_len - sizeof(hdr)) < 0)
+ return -1;
+
+ if (do_write(1, boot_program, prog_len) < 0)
+ return 1;
+
+ /* Write out the rest of the kernel */
+ while (1) {
+ prog_len = do_read(0, boot_program, sizeof(boot_program));
+ if (prog_len < 0)
+ return 1;
+ if (prog_len == 0)
+ break;
+ if (do_write(1, boot_program, prog_len) < 0)
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/Documentation/arm/SH-Mobile/zboot-rom-mmcif.txt b/Documentation/arm/SH-Mobile/zboot-rom-mmcif.txt
new file mode 100644
index 000000000000..efff8ae2713d
--- /dev/null
+++ b/Documentation/arm/SH-Mobile/zboot-rom-mmcif.txt
@@ -0,0 +1,29 @@
+ROM-able zImage boot from MMC
+-----------------------------
+
+An ROM-able zImage compiled with ZBOOT_ROM_MMCIF may be written to MMC and
+SuperH Mobile ARM will to boot directly from the MMCIF hardware block.
+
+This is achieved by the mask ROM loading the first portion of the image into
+MERAM and then jumping to it. This portion contains loader code which
+copies the entire image to SDRAM and jumps to it. From there the zImage
+boot code proceeds as normal, uncompressing the image into its final
+location and then jumping to it.
+
+This code has been tested on an AP4EB board using the developer 1A eMMC
+boot mode which is configured using the following jumper settings.
+The board used for testing required a patched mask ROM in order for
+this mode to function.
+
+ 8 7 6 5 4 3 2 1
+ x|x|x|x|x| |x|
+S4 -+-+-+-+-+-+-+-
+ | | | | |x| |x on
+
+The zImage must be written to the MMC card at sector 1 (512 bytes) in
+vrl4 format. A utility vrl4 is supplied to accomplish this.
+
+e.g.
+ vrl4 < zImage | dd of=/dev/sdX bs=512 seek=1
+
+A dual-voltage MMC 4.0 card was used for testing.
diff --git a/Documentation/arm/Sharp-LH/ADC-LH7-Touchscreen b/Documentation/arm/Sharp-LH/ADC-LH7-Touchscreen
deleted file mode 100644
index dc460f055647..000000000000
--- a/Documentation/arm/Sharp-LH/ADC-LH7-Touchscreen
+++ /dev/null
@@ -1,61 +0,0 @@
-README on the ADC/Touchscreen Controller
-========================================
-
-The LH79524 and LH7A404 include a built-in Analog to Digital
-controller (ADC) that is used to process input from a touchscreen.
-The driver only implements a four-wire touch panel protocol.
-
-The touchscreen driver is maintenance free except for the pen-down or
-touch threshold. Some resistive displays and board combinations may
-require tuning of this threshold. The driver exposes some of its
-internal state in the sys filesystem. If the kernel is configured
-with it, CONFIG_SYSFS, and sysfs is mounted at /sys, there will be a
-directory
-
- /sys/devices/platform/adc-lh7.0
-
-containing these files.
-
- -r--r--r-- 1 root root 4096 Jan 1 00:00 samples
- -rw-r--r-- 1 root root 4096 Jan 1 00:00 threshold
- -r--r--r-- 1 root root 4096 Jan 1 00:00 threshold_range
-
-The threshold is the current touch threshold. It defaults to 750 on
-most targets.
-
- # cat threshold
- 750
-
-The threshold_range contains the range of valid values for the
-threshold. Values outside of this range will be silently ignored.
-
- # cat threshold_range
- 0 1023
-
-To change the threshold, write a value to the threshold file.
-
- # echo 500 > threshold
- # cat threshold
- 500
-
-The samples file contains the most recently sampled values from the
-ADC. There are 12. Below are typical of the last sampled values when
-the pen has been released. The first two and last two samples are for
-detecting whether or not the pen is down. The third through sixth are
-X coordinate samples. The seventh through tenth are Y coordinate
-samples.
-
- # cat samples
- 1023 1023 0 0 0 0 530 529 530 529 1023 1023
-
-To determine a reasonable threshold, press on the touch panel with an
-appropriate stylus and read the values from samples.
-
- # cat samples
- 1023 676 92 103 101 102 855 919 922 922 1023 679
-
-The first and eleventh samples are discarded. Thus, the important
-values are the second and twelfth which are used to determine if the
-pen is down. When both are below the threshold, the driver registers
-that the pen is down. When either is above the threshold, it
-registers then pen is up.
diff --git a/Documentation/arm/Sharp-LH/CompactFlash b/Documentation/arm/Sharp-LH/CompactFlash
deleted file mode 100644
index 8616d877df9e..000000000000
--- a/Documentation/arm/Sharp-LH/CompactFlash
+++ /dev/null
@@ -1,32 +0,0 @@
-README on the Compact Flash for Card Engines
-============================================
-
-There are three challenges in supporting the CF interface of the Card
-Engines. First, every IO operation must be followed with IO to
-another memory region. Second, the slot is wired for one-to-one
-address mapping *and* it is wired for 16 bit access only. Second, the
-interrupt request line from the CF device isn't wired.
-
-The IOBARRIER issue is covered in README.IOBARRIER. This isn't an
-onerous problem. Enough said here.
-
-The addressing issue is solved in the
-arch/arm/mach-lh7a40x/ide-lpd7a40x.c file with some awkward
-work-arounds. We implement a special SELECT_DRIVE routine that is
-called before the IDE driver performs its own SELECT_DRIVE. Our code
-recognizes that the SELECT register cannot be modified without also
-writing a command. It send an IDLE_IMMEDIATE command on selecting a
-drive. The function also prevents drive select to the slave drive
-since there can be only one. The awkward part is that the IDE driver,
-even though we have a select procedure, also attempts to change the
-drive by writing directly the SELECT register. This attempt is
-explicitly blocked by the OUTB function--not pretty, but effective.
-
-The lack of interrupts is a more serious problem. Even though the CF
-card is fast when compared to a normal IDE device, we don't know that
-the CF is really flash. A user could use one of the very small hard
-drives being shipped with a CF interface. The IDE code includes a
-check for interfaces that lack an IRQ. In these cases, submitting a
-command to the IDE controller is followed by a call to poll for
-completion. If the device isn't immediately ready, it schedules a
-timer to poll again later.
diff --git a/Documentation/arm/Sharp-LH/IOBarrier b/Documentation/arm/Sharp-LH/IOBarrier
deleted file mode 100644
index 2e953e228f4d..000000000000
--- a/Documentation/arm/Sharp-LH/IOBarrier
+++ /dev/null
@@ -1,45 +0,0 @@
-README on the IOBARRIER for CardEngine IO
-=========================================
-
-Due to an unfortunate oversight when the Card Engines were designed,
-the signals that control access to some peripherals, most notably the
-SMC91C9111 ethernet controller, are not properly handled.
-
-The symptom is that some back to back IO with the peripheral returns
-unreliable data. With the SMC chip, you'll see errors about the bank
-register being 'screwed'.
-
-The cause is that the AEN signal to the SMC chip does not transition
-for every memory access. It is driven through the CPLD from the CS7
-line of the CPU's static memory controller which is optimized to
-eliminate unnecessary transitions. Yet, the SMC requires a transition
-for every write access. The Sharp website has more information about
-the effect this power-conserving feature has on peripheral
-interfacing.
-
-The solution is to follow every write access to the SMC chip with an
-access to another memory region that will force the CPU to release the
-chip select line. It is important to guarantee that this access
-forces the CPU off-chip. We map a page of SDRAM as if it were an
-uncacheable IO device and read from it after every SMC IO write
-operation.
-
- SMC IO
- BARRIER IO
-
-Only this sequence is important. It does not matter that there is no
-BARRIER IO before the access to the SMC chip because the AEN latch
-only needs occurs after the SMC IO write cycle. The routines that
-implement this work-around make an additional concession which is to
-disable interrupts during the IO sequence. Other hardware devices
-(the LogicPD CPLD) have registers in the same physical memory
-region as the SMC chip. An interrupt might allow an access to one of
-those registers while SMC IO is being performed.
-
-You might be tempted to think that we have to access another device
-attached to the static memory controller, but the empirical evidence
-indicates that this is not so. Mapping 0x00000000 (flash) and
-0xc0000000 (SDRAM) appear to have the same effect. Using SDRAM seems
-to be faster. Choosing to access an undecoded memory region is not
-desirable as there is no way to know how that chip select will be used
-in the future.
diff --git a/Documentation/arm/Sharp-LH/KEV7A400 b/Documentation/arm/Sharp-LH/KEV7A400
deleted file mode 100644
index be32b14cd535..000000000000
--- a/Documentation/arm/Sharp-LH/KEV7A400
+++ /dev/null
@@ -1,8 +0,0 @@
-README on Implementing Linux for Sharp's KEV7a400
-=================================================
-
-This product has been discontinued by Sharp. For the time being, the
-partially implemented code remains in the kernel. At some point in
-the future, either the code will be finished or it will be removed
-completely. This depends primarily on how many of the development
-boards are in the field.
diff --git a/Documentation/arm/Sharp-LH/LCDPanels b/Documentation/arm/Sharp-LH/LCDPanels
deleted file mode 100644
index fb1b21c2f2f4..000000000000
--- a/Documentation/arm/Sharp-LH/LCDPanels
+++ /dev/null
@@ -1,59 +0,0 @@
-README on the LCD Panels
-========================
-
-Configuration options for several LCD panels, available from Logic PD,
-are included in the kernel source. This README will help you
-understand the configuration data and give you some guidance for
-adding support for other panels if you wish.
-
-
-lcd-panels.h
-------------
-
-There is no way, at present, to detect which panel is attached to the
-system at runtime. Thus the kernel configuration is static. The file
-arch/arm/mach-ld7a40x/lcd-panels.h (or similar) defines all of the
-panel specific parameters.
-
-It should be possible for this data to be shared among several device
-families. The current layout may be insufficiently general, but it is
-amenable to improvement.
-
-
-PIXEL_CLOCK
------------
-
-The panel data sheets will give a range of acceptable pixel clocks.
-The fundamental LCDCLK input frequency is divided down by a PCD
-constant in field '.tim2'. It may happen that it is impossible to set
-the pixel clock within this range. A clock which is too slow will
-tend to flicker. For the highest quality image, set the clock as high
-as possible.
-
-
-MARGINS
--------
-
-These values may be difficult to glean from the panel data sheet. In
-the case of the Sharp panels, the upper margin is explicitly called
-out as a specific number of lines from the top of the frame. The
-other values may not matter as much as the panels tend to
-automatically center the image.
-
-
-Sync Sense
-----------
-
-The sense of the hsync and vsync pulses may be called out in the data
-sheet. On one panel, the sense of these pulses determine the height
-of the visible region on the panel. Most of the Sharp panels use
-negative sense sync pulses set by the TIM2_IHS and TIM2_IVS bits in
-'.tim2'.
-
-
-Pel Layout
-----------
-
-The Sharp color TFT panels are all configured for 16 bit direct color
-modes. The amba-lcd driver sets the pel mode to 565 for 5 bits of
-each red and blue and 6 bits of green.
diff --git a/Documentation/arm/Sharp-LH/LPD7A400 b/Documentation/arm/Sharp-LH/LPD7A400
deleted file mode 100644
index 3275b453bfdf..000000000000
--- a/Documentation/arm/Sharp-LH/LPD7A400
+++ /dev/null
@@ -1,15 +0,0 @@
-README on Implementing Linux for the Logic PD LPD7A400-10
-=========================================================
-
-- CPLD memory mapping
-
- The board designers chose to use high address lines for controlling
- access to the CPLD registers. It turns out to be a big waste
- because we're using an MMU and must map IO space into virtual
- memory. The result is that we have to make a mapping for every
- register.
-
-- Serial Console
-
- It may be OK not to use the serial console option if the user passes
- the console device name to the kernel. This deserves some exploration.
diff --git a/Documentation/arm/Sharp-LH/LPD7A40X b/Documentation/arm/Sharp-LH/LPD7A40X
deleted file mode 100644
index 8c29a27e208f..000000000000
--- a/Documentation/arm/Sharp-LH/LPD7A40X
+++ /dev/null
@@ -1,16 +0,0 @@
-README on Implementing Linux for the Logic PD LPD7A40X-10
-=========================================================
-
-- CPLD memory mapping
-
- The board designers chose to use high address lines for controlling
- access to the CPLD registers. It turns out to be a big waste
- because we're using an MMU and must map IO space into virtual
- memory. The result is that we have to make a mapping for every
- register.
-
-- Serial Console
-
- It may be OK not to use the serial console option if the user passes
- the console device name to the kernel. This deserves some exploration.
-
diff --git a/Documentation/arm/Sharp-LH/SDRAM b/Documentation/arm/Sharp-LH/SDRAM
deleted file mode 100644
index 93ddc23c2faa..000000000000
--- a/Documentation/arm/Sharp-LH/SDRAM
+++ /dev/null
@@ -1,51 +0,0 @@
-README on the SDRAM Controller for the LH7a40X
-==============================================
-
-The standard configuration for the SDRAM controller generates a sparse
-memory array. The precise layout is determined by the SDRAM chips. A
-default kernel configuration assembles the discontiguous memory
-regions into separate memory nodes via the NUMA (Non-Uniform Memory
-Architecture) facilities. In this default configuration, the kernel
-is forgiving about the precise layout. As long as it is given an
-accurate picture of available memory by the bootloader the kernel will
-execute correctly.
-
-The SDRC supports a mode where some of the chip select lines are
-swapped in order to make SDRAM look like a synchronous ROM. Setting
-this bit means that the RAM will present as a contiguous array. Some
-programmers prefer this to the discontiguous layout. Be aware that
-may be a penalty for this feature where some some configurations of
-memory are significantly reduced; i.e. 64MiB of RAM appears as only 32
-MiB.
-
-There are a couple of configuration options to override the default
-behavior. When the SROMLL bit is set and memory appears as a
-contiguous array, there is no reason to support NUMA.
-CONFIG_LH7A40X_CONTIGMEM disables NUMA support. When physical memory
-is discontiguous, the memory tables are organized such that there are
-two banks per nodes with a small gap between them. This layout wastes
-some kernel memory for page tables representing non-existent memory.
-CONFIG_LH7A40X_ONE_BANK_PER_NODE optimizes the node tables such that
-there are no gaps. These options control the low level organization
-of the memory management tables in ways that may prevent the kernel
-from booting or may cause the kernel to allocated excessively large
-page tables. Be warned. Only change these options if you know what
-you are doing. The default behavior is a reasonable compromise that
-will suit all users.
-
---
-
-A typical 32MiB system with the default configuration options will
-find physical memory managed as follows.
-
- node 0: 0xc0000000 4MiB
- 0xc1000000 4MiB
- node 1: 0xc4000000 4MiB
- 0xc5000000 4MiB
- node 2: 0xc8000000 4MiB
- 0xc9000000 4MiB
- node 3: 0xcc000000 4MiB
- 0xcd000000 4MiB
-
-Setting CONFIG_LH7A40X_ONE_BANK_PER_NODE will put each bank into a
-separate node.
diff --git a/Documentation/arm/Sharp-LH/VectoredInterruptController b/Documentation/arm/Sharp-LH/VectoredInterruptController
deleted file mode 100644
index 23047e9861ee..000000000000
--- a/Documentation/arm/Sharp-LH/VectoredInterruptController
+++ /dev/null
@@ -1,80 +0,0 @@
-README on the Vectored Interrupt Controller of the LH7A404
-==========================================================
-
-The 404 revision of the LH7A40X series comes with two vectored
-interrupts controllers. While the kernel does use some of the
-features of these devices, it is far from the purpose for which they
-were designed.
-
-When this README was written, the implementation of the VICs was in
-flux. It is possible that some details, especially with priorities,
-will change.
-
-The VIC support code is inspired by routines written by Sharp.
-
-
-Priority Control
-----------------
-
-The significant reason for using the VIC's vectoring is to control
-interrupt priorities. There are two tables in
-arch/arm/mach-lh7a40x/irq-lh7a404.c that look something like this.
-
- static unsigned char irq_pri_vic1[] = { IRQ_GPIO3INTR, };
- static unsigned char irq_pri_vic2[] = {
- IRQ_T3UI, IRQ_GPIO7INTR,
- IRQ_UART1INTR, IRQ_UART2INTR, IRQ_UART3INTR, };
-
-The initialization code reads these tables and inserts a vector
-address and enable for each indicated IRQ. Vectored interrupts have
-higher priority than non-vectored interrupts. So, on VIC1,
-IRQ_GPIO3INTR will be served before any other non-FIQ interrupt. Due
-to the way that the vectoring works, IRQ_T3UI is the next highest
-priority followed by the other vectored interrupts on VIC2. After
-that, the non-vectored interrupts are scanned in VIC1 then in VIC2.
-
-
-ISR
----
-
-The interrupt service routine macro get_irqnr() in
-arch/arm/kernel/entry-armv.S scans the VICs for the next active
-interrupt. The vectoring makes this code somewhat larger than it was
-before using vectoring (refer to the LH7A400 implementation). In the
-case where an interrupt is vectored, the implementation will tend to
-be faster than the non-vectored version. However, the worst-case path
-is longer.
-
-It is worth noting that at present, there is no need to read
-VIC2_VECTADDR because the register appears to be shared between the
-controllers. The code is written such that if this changes, it ought
-to still work properly.
-
-
-Vector Addresses
-----------------
-
-The proper use of the vectoring hardware would jump to the ISR
-specified by the vectoring address. Linux isn't structured to take
-advantage of this feature, though it might be possible to change
-things to support it.
-
-In this implementation, the vectoring address is used to speed the
-search for the active IRQ. The address is coded such that the lowest
-6 bits store the IRQ number for vectored interrupts. These numbers
-correspond to the bits in the interrupt status registers. IRQ zero is
-the lowest interrupt bit in VIC1. IRQ 32 is the lowest interrupt bit
-in VIC2. Because zero is a valid IRQ number and because we cannot
-detect whether or not there is a valid vectoring address if that
-address is zero, the eigth bit (0x100) is set for vectored interrupts.
-The address for IRQ 0x18 (VIC2) is 0x118. Only the ninth bit is set
-for the default handler on VIC1 and only the tenth bit is set for the
-default handler on VIC2.
-
-In other words.
-
- 0x000 - no active interrupt
- 0x1ii - vectored interrupt 0xii
- 0x2xx - unvectored interrupt on VIC1 (xx is don't care)
- 0x4xx - unvectored interrupt on VIC2 (xx is don't care)
-
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
index 7781857dc940..b6ed61c95856 100644
--- a/Documentation/cgroups/memory.txt
+++ b/Documentation/cgroups/memory.txt
@@ -485,8 +485,9 @@ The feature can be disabled by
# echo 0 > memory.use_hierarchy
-NOTE1: Enabling/disabling will fail if the cgroup already has other
- cgroups created below it.
+NOTE1: Enabling/disabling will fail if either the cgroup already has other
+ cgroups created below it, or if the parent cgroup has use_hierarchy
+ enabled.
NOTE2: When panic_on_oom is set to "2", the whole system will panic in
case of an OOM event in any cgroup.
diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt
index 737988fca64d..e74d0a2eb1cf 100644
--- a/Documentation/cpu-freq/governors.txt
+++ b/Documentation/cpu-freq/governors.txt
@@ -158,6 +158,17 @@ intensive calculation on your laptop that you do not care how long it
takes to complete as you can 'nice' it and prevent it from taking part
in the deciding process of whether to increase your CPU frequency.
+sampling_down_factor: this parameter controls the rate at which the
+kernel makes a decision on when to decrease the frequency while running
+at top speed. When set to 1 (the default) decisions to reevaluate load
+are made at the same interval regardless of current clock speed. But
+when set to greater than 1 (e.g. 100) it acts as a multiplier for the
+scheduling interval for reevaluating load when the CPU is at its top
+speed due to high load. This improves performance by reducing the overhead
+of load evaluation and helping the CPU stay at its top speed when truly
+busy, rather than shifting back and forth in speed. This tunable has no
+effect on behavior at lower speeds/lower CPU loads.
+
2.5 Conservative
----------------
diff --git a/Documentation/device-mapper/dm-crypt.txt b/Documentation/device-mapper/dm-crypt.txt
index 59293ac4a5d0..6b5c42dbbe84 100644
--- a/Documentation/device-mapper/dm-crypt.txt
+++ b/Documentation/device-mapper/dm-crypt.txt
@@ -41,7 +41,7 @@ Example scripts
===============
LUKS (Linux Unified Key Setup) is now the preferred way to set up disk
encryption with dm-crypt using the 'cryptsetup' utility, see
-http://clemens.endorphin.org/cryptography
+http://code.google.com/p/cryptsetup/
[[
#!/bin/sh
diff --git a/Documentation/device-mapper/dm-flakey.txt b/Documentation/device-mapper/dm-flakey.txt
new file mode 100644
index 000000000000..115cccaa17ef
--- /dev/null
+++ b/Documentation/device-mapper/dm-flakey.txt
@@ -0,0 +1,18 @@
+dm-flakey
+=========
+
+This target is the same as the linear target except that it returns I/O
+errors periodically. It's been found useful in simulating failing
+devices for testing purposes.
+
+Starting from the time the table is loaded, the device is available for
+<up interval> seconds, then returns errors for <down interval> seconds,
+and then this cycle repeats.
+
+Parameters: <dev path> <offset> <up interval> <down interval>
+ <dev path>: Full pathname to the underlying block-device, or a
+ "major:minor" device-number.
+ <offset>: Starting sector within the device.
+ <up interval>: Number of seconds device is available.
+ <down interval>: Number of seconds device returns errors.
+
diff --git a/Documentation/devicetree/00-INDEX b/Documentation/devicetree/00-INDEX
new file mode 100644
index 000000000000..1dc765055427
--- /dev/null
+++ b/Documentation/devicetree/00-INDEX
@@ -0,0 +1,8 @@
+Documentation for device trees, a data structure by which bootloaders pass
+hardware layout to Linux in a device-independent manner, simplifying hardware
+probing. This subsystem is maintained by Grant Likely
+<grant.likely@secretlab.ca> and has a mailing list at
+https://lists.ozlabs.org/listinfo/devicetree-discuss
+
+booting-without-of.txt
+ - Booting Linux without Open Firmware, describes history and format of device trees.
diff --git a/Documentation/devicetree/bindings/i2c/ce4100-i2c.txt b/Documentation/devicetree/bindings/i2c/ce4100-i2c.txt
new file mode 100644
index 000000000000..569b16248514
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/ce4100-i2c.txt
@@ -0,0 +1,93 @@
+CE4100 I2C
+----------
+
+CE4100 has one PCI device which is described as the I2C-Controller. This
+PCI device has three PCI-bars, each bar contains a complete I2C
+controller. So we have a total of three independent I2C-Controllers
+which share only an interrupt line.
+The driver is probed via the PCI-ID and is gathering the information of
+attached devices from the devices tree.
+Grant Likely recommended to use the ranges property to map the PCI-Bar
+number to its physical address and to use this to find the child nodes
+of the specific I2C controller. This were his exact words:
+
+ Here's where the magic happens. Each entry in
+ ranges describes how the parent pci address space
+ (middle group of 3) is translated to the local
+ address space (first group of 2) and the size of
+ each range (last cell). In this particular case,
+ the first cell of the local address is chosen to be
+ 1:1 mapped to the BARs, and the second is the
+ offset from be base of the BAR (which would be
+ non-zero if you had 2 or more devices mapped off
+ the same BAR)
+
+ ranges allows the address mapping to be described
+ in a way that the OS can interpret without
+ requiring custom device driver code.
+
+This is an example which is used on FalconFalls:
+------------------------------------------------
+ i2c-controller@b,2 {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "pci8086,2e68.2",
+ "pci8086,2e68",
+ "pciclass,ff0000",
+ "pciclass,ff00";
+
+ reg = <0x15a00 0x0 0x0 0x0 0x0>;
+ interrupts = <16 1>;
+
+ /* as described by Grant, the first number in the group of
+ * three is the bar number followed by the 64bit bar address
+ * followed by size of the mapping. The bar address
+ * requires also a valid translation in parents ranges
+ * property.
+ */
+ ranges = <0 0 0x02000000 0 0xdffe0500 0x100
+ 1 0 0x02000000 0 0xdffe0600 0x100
+ 2 0 0x02000000 0 0xdffe0700 0x100>;
+
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "intel,ce4100-i2c-controller";
+
+ /* The first number in the reg property is the
+ * number of the bar
+ */
+ reg = <0 0 0x100>;
+
+ /* This I2C controller has no devices */
+ };
+
+ i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "intel,ce4100-i2c-controller";
+ reg = <1 0 0x100>;
+
+ /* This I2C controller has one gpio controller */
+ gpio@26 {
+ #gpio-cells = <2>;
+ compatible = "ti,pcf8575";
+ reg = <0x26>;
+ gpio-controller;
+ };
+ };
+
+ i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "intel,ce4100-i2c-controller";
+ reg = <2 0 0x100>;
+
+ gpio@26 {
+ #gpio-cells = <2>;
+ compatible = "ti,pcf8575";
+ reg = <0x26>;
+ gpio-controller;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/rtc/rtc-cmos.txt b/Documentation/devicetree/bindings/rtc/rtc-cmos.txt
new file mode 100644
index 000000000000..7382989b3052
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/rtc-cmos.txt
@@ -0,0 +1,28 @@
+ Motorola mc146818 compatible RTC
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Required properties:
+ - compatible : "motorola,mc146818"
+ - reg : should contain registers location and length.
+
+Optional properties:
+ - interrupts : should contain interrupt.
+ - interrupt-parent : interrupt source phandle.
+ - ctrl-reg : Contains the initial value of the control register also
+ called "Register B".
+ - freq-reg : Contains the initial value of the frequency register also
+ called "Regsiter A".
+
+"Register A" and "B" are usually initialized by the firmware (BIOS for
+instance). If this is not done, it can be performed by the driver.
+
+ISA Example:
+
+ rtc@70 {
+ compatible = "motorola,mc146818";
+ interrupts = <8 3>;
+ interrupt-parent = <&ioapic1>;
+ ctrl-reg = <2>;
+ freq-reg = <0x26>;
+ reg = <1 0x70 2>;
+ };
diff --git a/Documentation/devicetree/bindings/serial/altera_jtaguart.txt b/Documentation/devicetree/bindings/serial/altera_jtaguart.txt
new file mode 100644
index 000000000000..c152f65f9a28
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/altera_jtaguart.txt
@@ -0,0 +1,4 @@
+Altera JTAG UART
+
+Required properties:
+- compatible : should be "ALTR,juart-1.0"
diff --git a/Documentation/devicetree/bindings/serial/altera_uart.txt b/Documentation/devicetree/bindings/serial/altera_uart.txt
new file mode 100644
index 000000000000..71cae3f70100
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/altera_uart.txt
@@ -0,0 +1,7 @@
+Altera UART
+
+Required properties:
+- compatible : should be "ALTR,uart-1.0"
+
+Optional properties:
+- clock-frequency : frequency of the clock input to the UART
diff --git a/Documentation/devicetree/bindings/serio/altera_ps2.txt b/Documentation/devicetree/bindings/serio/altera_ps2.txt
new file mode 100644
index 000000000000..4d9eecc2ef7d
--- /dev/null
+++ b/Documentation/devicetree/bindings/serio/altera_ps2.txt
@@ -0,0 +1,4 @@
+Altera UP PS/2 controller
+
+Required properties:
+- compatible : should be "ALTR,ps2-1.0".
diff --git a/Documentation/devicetree/bindings/spi/spi_altera.txt b/Documentation/devicetree/bindings/spi/spi_altera.txt
new file mode 100644
index 000000000000..dda375943506
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi_altera.txt
@@ -0,0 +1,4 @@
+Altera SPI
+
+Required properties:
+- compatible : should be "ALTR,spi-1.0".
diff --git a/Documentation/devicetree/bindings/spi/spi_oc_tiny.txt b/Documentation/devicetree/bindings/spi/spi_oc_tiny.txt
new file mode 100644
index 000000000000..d95c0b367a04
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi_oc_tiny.txt
@@ -0,0 +1,12 @@
+OpenCores tiny SPI
+
+Required properties:
+- compatible : should be "opencores,tiny-spi-rtlsvn2".
+- gpios : should specify GPIOs used for chipselect.
+Optional properties:
+- clock-frequency : input clock frequency to the core.
+- baud-width: width, in bits, of the programmable divider used to scale
+ the input clock to SCLK.
+
+The clock-frequency and baud-width properties are needed only if the divider
+is programmable. They are not needed if the divider is fixed.
diff --git a/Documentation/devicetree/bindings/x86/ce4100.txt b/Documentation/devicetree/bindings/x86/ce4100.txt
new file mode 100644
index 000000000000..b49ae593a60b
--- /dev/null
+++ b/Documentation/devicetree/bindings/x86/ce4100.txt
@@ -0,0 +1,38 @@
+CE4100 Device Tree Bindings
+---------------------------
+
+The CE4100 SoC uses for in core peripherals the following compatible
+format: <vendor>,<chip>-<device>.
+Many of the "generic" devices like HPET or IO APIC have the ce4100
+name in their compatible property because they first appeared in this
+SoC.
+
+The CPU node
+------------
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "intel,ce4100";
+ reg = <0>;
+ lapic = <&lapic0>;
+ };
+
+The reg property describes the CPU number. The lapic property points to
+the local APIC timer.
+
+The SoC node
+------------
+
+This node describes the in-core peripherals. Required property:
+ compatible = "intel,ce4100-cp";
+
+The PCI node
+------------
+This node describes the PCI bus on the SoC. Its property should be
+ compatible = "intel,ce4100-pci", "pci";
+
+If the OS is using the IO-APIC for interrupt routing then the reported
+interrupt numbers for devices is no longer true. In order to obtain the
+correct interrupt number, the child node which represents the device has
+to contain the interrupt property. Besides the interrupt property it has
+to contain at least the reg property containing the PCI bus address and
+compatible property according to "PCI Bus Binding Revision 2.1".
diff --git a/Documentation/devicetree/bindings/x86/interrupt.txt b/Documentation/devicetree/bindings/x86/interrupt.txt
new file mode 100644
index 000000000000..8b0efb097e60
--- /dev/null
+++ b/Documentation/devicetree/bindings/x86/interrupt.txt
@@ -0,0 +1,29 @@
+Interrupt chips
+---------------
+
+* Intel I/O Advanced Programmable Interrupt Controller (IO APIC)
+
+ Required properties:
+ --------------------
+ compatible = "intel,ce4100-ioapic";
+ #interrupt-cells = <2>;
+
+ Device's interrupt property:
+
+ interrupts = <P S>;
+
+ The first number (P) represents the interrupt pin which is wired to the
+ IO APIC. The second number (S) represents the sense of interrupt which
+ should be configured and can be one of:
+ 0 - Edge Rising
+ 1 - Level Low
+ 2 - Level High
+ 3 - Edge Falling
+
+* Local APIC
+ Required property:
+
+ compatible = "intel,ce4100-lapic";
+
+ This node is currently unused by Linux as the address of the local APIC
+ read from a MSR.
diff --git a/Documentation/devicetree/bindings/x86/timer.txt b/Documentation/devicetree/bindings/x86/timer.txt
new file mode 100644
index 000000000000..c688af58e3bd
--- /dev/null
+++ b/Documentation/devicetree/bindings/x86/timer.txt
@@ -0,0 +1,6 @@
+Timers
+------
+
+* High Precision Event Timer (HPET)
+ Required property:
+ compatible = "intel,ce4100-hpet";
diff --git a/Documentation/devicetree/booting-without-of.txt b/Documentation/devicetree/booting-without-of.txt
index 28b1c9d3d351..55fd2623445b 100644
--- a/Documentation/devicetree/booting-without-of.txt
+++ b/Documentation/devicetree/booting-without-of.txt
@@ -13,6 +13,7 @@ Table of Contents
I - Introduction
1) Entry point for arch/powerpc
+ 2) Entry point for arch/x86
II - The DT block format
1) Header
@@ -225,6 +226,25 @@ it with special cases.
cannot support both configurations with Book E and configurations
with classic Powerpc architectures.
+2) Entry point for arch/x86
+-------------------------------
+
+ There is one single 32bit entry point to the kernel at code32_start,
+ the decompressor (the real mode entry point goes to the same 32bit
+ entry point once it switched into protected mode). That entry point
+ supports one calling convention which is documented in
+ Documentation/x86/boot.txt
+ The physical pointer to the device-tree block (defined in chapter II)
+ is passed via setup_data which requires at least boot protocol 2.09.
+ The type filed is defined as
+
+ #define SETUP_DTB 2
+
+ This device-tree is used as an extension to the "boot page". As such it
+ does not parse / consider data which is already covered by the boot
+ page. This includes memory size, reserved ranges, command line arguments
+ or initrd address. It simply holds information which can not be retrieved
+ otherwise like interrupt routing or a list of devices behind an I2C bus.
II - The DT block format
========================
diff --git a/Documentation/dvb/get_dvb_firmware b/Documentation/dvb/get_dvb_firmware
index 59690de8ebfe..3348d313fbe0 100644
--- a/Documentation/dvb/get_dvb_firmware
+++ b/Documentation/dvb/get_dvb_firmware
@@ -556,6 +556,9 @@ sub ngene {
my $hash1 = "d798d5a757121174f0dbc5f2833c0c85";
my $file2 = "ngene_17.fw";
my $hash2 = "26b687136e127b8ac24b81e0eeafc20b";
+ my $url2 = "http://l4m-daten.de/downloads/firmware/dvb-s2/linux/all/";
+ my $file3 = "ngene_18.fw";
+ my $hash3 = "ebce3ea769a53e3e0b0197c3b3f127e3";
checkstandard();
@@ -565,7 +568,10 @@ sub ngene {
wgetfile($file2, $url . $file2);
verify($file2, $hash2);
- "$file1, $file2";
+ wgetfile($file3, $url2 . $file3);
+ verify($file3, $hash3);
+
+ "$file1, $file2, $file3";
}
sub az6027{
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index b3f35e5f9c95..8bb7048c663d 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -35,6 +35,17 @@ Who: Luis R. Rodriguez <lrodriguez@atheros.com>
---------------------------
+What: AR9170USB
+When: 2.6.40
+
+Why: This driver is deprecated and the firmware is no longer
+ maintained. The replacement driver "carl9170" has been
+ around for a while, so the devices are still supported.
+
+Who: Christian Lamparter <chunkeey@googlemail.com>
+
+---------------------------
+
What: IRQF_SAMPLE_RANDOM
Check: IRQF_SAMPLE_RANDOM
When: July 2009
@@ -604,6 +615,13 @@ Who: Jean Delvare <khali@linux-fr.org>
----------------------------
+What: xt_connlimit rev 0
+When: 2012
+Who: Jan Engelhardt <jengelh@medozas.de>
+Files: net/netfilter/xt_connlimit.c
+
+----------------------------
+
What: noswapaccount kernel command line parameter
When: 2.6.40
Why: The original implementation of memsw feature enabled by
@@ -619,3 +637,12 @@ Why: The original implementation of memsw feature enabled by
Who: Michal Hocko <mhocko@suse.cz>
----------------------------
+
+What: i2c_driver.attach_adapter
+ i2c_driver.detach_adapter
+When: September 2011
+Why: These legacy callbacks should no longer be used as i2c-core offers
+ a variety of preferable alternative ways to instantiate I2C devices.
+Who: Jean Delvare <khali@linux-fr.org>
+
+----------------------------
diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
index 6ab9442d7eeb..6b050464a90d 100644
--- a/Documentation/filesystems/ext4.txt
+++ b/Documentation/filesystems/ext4.txt
@@ -367,12 +367,47 @@ init_itable=n The lazy itable init code will wait n times the
minimizes the impact on the systme performance
while file system's inode table is being initialized.
-discard Controls whether ext4 should issue discard/TRIM
+discard Controls whether ext4 should issue discard/TRIM
nodiscard(*) commands to the underlying block device when
blocks are freed. This is useful for SSD devices
and sparse/thinly-provisioned LUNs, but it is off
by default until sufficient testing has been done.
+nouid32 Disables 32-bit UIDs and GIDs. This is for
+ interoperability with older kernels which only
+ store and expect 16-bit values.
+
+resize Allows to resize filesystem to the end of the last
+ existing block group, further resize has to be done
+ with resize2fs either online, or offline. It can be
+ used only with conjunction with remount.
+
+block_validity This options allows to enables/disables the in-kernel
+noblock_validity facility for tracking filesystem metadata blocks
+ within internal data structures. This allows multi-
+ block allocator and other routines to quickly locate
+ extents which might overlap with filesystem metadata
+ blocks. This option is intended for debugging
+ purposes and since it negatively affects the
+ performance, it is off by default.
+
+dioread_lock Controls whether or not ext4 should use the DIO read
+dioread_nolock locking. If the dioread_nolock option is specified
+ ext4 will allocate uninitialized extent before buffer
+ write and convert the extent to initialized after IO
+ completes. This approach allows ext4 code to avoid
+ using inode mutex, which improves scalability on high
+ speed storages. However this does not work with nobh
+ option and the mount will fail. Nor does it work with
+ data journaling and dioread_nolock option will be
+ ignored with kernel warning. Note that dioread_nolock
+ code path is only used for extent-based files.
+ Because of the restrictions this options comprises
+ it is off by default (e.g. dioread_lock).
+
+i_version Enable 64-bit inode version support. This option is
+ off by default.
+
Data Mode
=========
There are 3 different data modes:
@@ -400,6 +435,176 @@ needs to be read from and written to disk at the same time where it
outperforms all others modes. Currently ext4 does not have delayed
allocation support if this data journalling mode is selected.
+/proc entries
+=============
+
+Information about mounted ext4 file systems can be found in
+/proc/fs/ext4. Each mounted filesystem will have a directory in
+/proc/fs/ext4 based on its device name (i.e., /proc/fs/ext4/hdc or
+/proc/fs/ext4/dm-0). The files in each per-device directory are shown
+in table below.
+
+Files in /proc/fs/ext4/<devname>
+..............................................................................
+ File Content
+ mb_groups details of multiblock allocator buddy cache of free blocks
+..............................................................................
+
+/sys entries
+============
+
+Information about mounted ext4 file systems can be found in
+/sys/fs/ext4. Each mounted filesystem will have a directory in
+/sys/fs/ext4 based on its device name (i.e., /sys/fs/ext4/hdc or
+/sys/fs/ext4/dm-0). The files in each per-device directory are shown
+in table below.
+
+Files in /sys/fs/ext4/<devname>
+(see also Documentation/ABI/testing/sysfs-fs-ext4)
+..............................................................................
+ File Content
+
+ delayed_allocation_blocks This file is read-only and shows the number of
+ blocks that are dirty in the page cache, but
+ which do not have their location in the
+ filesystem allocated yet.
+
+ inode_goal Tuning parameter which (if non-zero) controls
+ the goal inode used by the inode allocator in
+ preference to all other allocation heuristics.
+ This is intended for debugging use only, and
+ should be 0 on production systems.
+
+ inode_readahead_blks Tuning parameter which controls the maximum
+ number of inode table blocks that ext4's inode
+ table readahead algorithm will pre-read into
+ the buffer cache
+
+ lifetime_write_kbytes This file is read-only and shows the number of
+ kilobytes of data that have been written to this
+ filesystem since it was created.
+
+ max_writeback_mb_bump The maximum number of megabytes the writeback
+ code will try to write out before move on to
+ another inode.
+
+ mb_group_prealloc The multiblock allocator will round up allocation
+ requests to a multiple of this tuning parameter if
+ the stripe size is not set in the ext4 superblock
+
+ mb_max_to_scan The maximum number of extents the multiblock
+ allocator will search to find the best extent
+
+ mb_min_to_scan The minimum number of extents the multiblock
+ allocator will search to find the best extent
+
+ mb_order2_req Tuning parameter which controls the minimum size
+ for requests (as a power of 2) where the buddy
+ cache is used
+
+ mb_stats Controls whether the multiblock allocator should
+ collect statistics, which are shown during the
+ unmount. 1 means to collect statistics, 0 means
+ not to collect statistics
+
+ mb_stream_req Files which have fewer blocks than this tunable
+ parameter will have their blocks allocated out
+ of a block group specific preallocation pool, so
+ that small files are packed closely together.
+ Each large file will have its blocks allocated
+ out of its own unique preallocation pool.
+
+ session_write_kbytes This file is read-only and shows the number of
+ kilobytes of data that have been written to this
+ filesystem since it was mounted.
+..............................................................................
+
+Ioctls
+======
+
+There is some Ext4 specific functionality which can be accessed by applications
+through the system call interfaces. The list of all Ext4 specific ioctls are
+shown in the table below.
+
+Table of Ext4 specific ioctls
+..............................................................................
+ Ioctl Description
+ EXT4_IOC_GETFLAGS Get additional attributes associated with inode.
+ The ioctl argument is an integer bitfield, with
+ bit values described in ext4.h. This ioctl is an
+ alias for FS_IOC_GETFLAGS.
+
+ EXT4_IOC_SETFLAGS Set additional attributes associated with inode.
+ The ioctl argument is an integer bitfield, with
+ bit values described in ext4.h. This ioctl is an
+ alias for FS_IOC_SETFLAGS.
+
+ EXT4_IOC_GETVERSION
+ EXT4_IOC_GETVERSION_OLD
+ Get the inode i_generation number stored for
+ each inode. The i_generation number is normally
+ changed only when new inode is created and it is
+ particularly useful for network filesystems. The
+ '_OLD' version of this ioctl is an alias for
+ FS_IOC_GETVERSION.
+
+ EXT4_IOC_SETVERSION
+ EXT4_IOC_SETVERSION_OLD
+ Set the inode i_generation number stored for
+ each inode. The '_OLD' version of this ioctl
+ is an alias for FS_IOC_SETVERSION.
+
+ EXT4_IOC_GROUP_EXTEND This ioctl has the same purpose as the resize
+ mount option. It allows to resize filesystem
+ to the end of the last existing block group,
+ further resize has to be done with resize2fs,
+ either online, or offline. The argument points
+ to the unsigned logn number representing the
+ filesystem new block count.
+
+ EXT4_IOC_MOVE_EXT Move the block extents from orig_fd (the one
+ this ioctl is pointing to) to the donor_fd (the
+ one specified in move_extent structure passed
+ as an argument to this ioctl). Then, exchange
+ inode metadata between orig_fd and donor_fd.
+ This is especially useful for online
+ defragmentation, because the allocator has the
+ opportunity to allocate moved blocks better,
+ ideally into one contiguous extent.
+
+ EXT4_IOC_GROUP_ADD Add a new group descriptor to an existing or
+ new group descriptor block. The new group
+ descriptor is described by ext4_new_group_input
+ structure, which is passed as an argument to
+ this ioctl. This is especially useful in
+ conjunction with EXT4_IOC_GROUP_EXTEND,
+ which allows online resize of the filesystem
+ to the end of the last existing block group.
+ Those two ioctls combined is used in userspace
+ online resize tool (e.g. resize2fs).
+
+ EXT4_IOC_MIGRATE This ioctl operates on the filesystem itself.
+ It converts (migrates) ext3 indirect block mapped
+ inode to ext4 extent mapped inode by walking
+ through indirect block mapping of the original
+ inode and converting contiguous block ranges
+ into ext4 extents of the temporary inode. Then,
+ inodes are swapped. This ioctl might help, when
+ migrating from ext3 to ext4 filesystem, however
+ suggestion is to create fresh ext4 filesystem
+ and copy data from the backup. Note, that
+ filesystem has to support extents for this ioctl
+ to work.
+
+ EXT4_IOC_ALLOC_DA_BLKS Force all of the delay allocated blocks to be
+ allocated to preserve application-expected ext3
+ behaviour. Note that this will also start
+ triggering a write of the data blocks, but this
+ behaviour may change in the future as it is
+ not necessary and has been done this way only
+ for sake of simplicity.
+..............................................................................
+
References
==========
diff --git a/Documentation/filesystems/romfs.txt b/Documentation/filesystems/romfs.txt
index 2d2a7b2a16b9..e2b07cc9120a 100644
--- a/Documentation/filesystems/romfs.txt
+++ b/Documentation/filesystems/romfs.txt
@@ -17,8 +17,7 @@ comparison, an actual rescue disk used up 3202 blocks with ext2, while
with romfs, it needed 3079 blocks.
To create such a file system, you'll need a user program named
-genromfs. It is available via anonymous ftp on sunsite.unc.edu and
-its mirrors, in the /pub/Linux/system/recovery/ directory.
+genromfs. It is available on http://romfs.sourceforge.net/
As the name suggests, romfs could be also used (space-efficiently) on
various read-only media, like (E)EPROM disks if someone will have the
diff --git a/Documentation/filesystems/xfs-delayed-logging-design.txt b/Documentation/filesystems/xfs-delayed-logging-design.txt
index 7445bf335dae..5282e3e51413 100644
--- a/Documentation/filesystems/xfs-delayed-logging-design.txt
+++ b/Documentation/filesystems/xfs-delayed-logging-design.txt
@@ -791,10 +791,3 @@ mount option. Fundamentally, there is no reason why the log manager would not
be able to swap methods automatically and transparently depending on load
characteristics, but this should not be necessary if delayed logging works as
designed.
-
-Roadmap:
-
-2.6.39 Switch default mount option to use delayed logging
- => should be roughly 12 months after initial merge
- => enough time to shake out remaining problems before next round of
- enterprise distro kernel rebases
diff --git a/Documentation/hwmon/ads1015 b/Documentation/hwmon/ads1015
new file mode 100644
index 000000000000..56ee7977b1a8
--- /dev/null
+++ b/Documentation/hwmon/ads1015
@@ -0,0 +1,67 @@
+Kernel driver ads1015
+=====================
+
+Supported chips:
+ * Texas Instruments ADS1015
+ Prefix: 'ads1015'
+ Datasheet: Publicly available at the Texas Instruments website :
+ http://focus.ti.com/lit/ds/symlink/ads1015.pdf
+
+Authors:
+ Dirk Eibach, Guntermann & Drunck GmbH <eibach@gdsys.de>
+
+Description
+-----------
+
+This driver implements support for the Texas Instruments ADS1015.
+
+This device is a 12-bit A-D converter with 4 inputs.
+
+The inputs can be used single ended or in certain differential combinations.
+
+The inputs can be exported to 8 sysfs input files in0_input - in7_input:
+in0: Voltage over AIN0 and AIN1.
+in1: Voltage over AIN0 and AIN3.
+in2: Voltage over AIN1 and AIN3.
+in3: Voltage over AIN2 and AIN3.
+in4: Voltage over AIN0 and GND.
+in5: Voltage over AIN1 and GND.
+in6: Voltage over AIN2 and GND.
+in7: Voltage over AIN3 and GND.
+
+Which inputs are exported can be configured using platform data or devicetree.
+
+By default all inputs are exported.
+
+Platform Data
+-------------
+
+In linux/i2c/ads1015.h platform data is defined as:
+
+struct ads1015_platform_data {
+ unsigned int exported_channels;
+};
+
+exported_channels is a bitmask that specifies which inputs should be exported.
+
+Example:
+struct ads1015_platform_data data = {
+ .exported_channels = (1 << 2) | (1 << 4)
+};
+
+In this case only in2_input and in4_input would be created.
+
+Devicetree
+----------
+
+The ads1015 node may have an "exported-channels" property.
+exported_channels is a bitmask that specifies which inputs should be exported.
+
+Example:
+ads1015@49 {
+ compatible = "ti,ads1015";
+ reg = <0x49>;
+ exported-channels = < 0x14 >;
+};
+
+In this case only in2_input and in4_input would be created.
diff --git a/Documentation/hwmon/lineage-pem b/Documentation/hwmon/lineage-pem
new file mode 100644
index 000000000000..2ba5ed126858
--- /dev/null
+++ b/Documentation/hwmon/lineage-pem
@@ -0,0 +1,77 @@
+Kernel driver lineage-pem
+=========================
+
+Supported devices:
+ * Lineage Compact Power Line Power Entry Modules
+ Prefix: 'lineage-pem'
+ Addresses scanned: -
+ Documentation:
+ http://www.lineagepower.com/oem/pdf/CPLI2C.pdf
+
+Author: Guenter Roeck <guenter.roeck@ericsson.com>
+
+
+Description
+-----------
+
+This driver supports various Lineage Compact Power Line DC/DC and AC/DC
+converters such as CP1800, CP2000AC, CP2000DC, CP2100DC, and others.
+
+Lineage CPL power entry modules are nominally PMBus compliant. However, most
+standard PMBus commands are not supported. Specifically, all hardware monitoring
+and status reporting commands are non-standard. For this reason, a standard
+PMBus driver can not be used.
+
+
+Usage Notes
+-----------
+
+This driver does not probe for Lineage CPL devices, since there is no register
+which can be safely used to identify the chip. You will have to instantiate
+the devices explicitly.
+
+Example: the following will load the driver for a Lineage PEM at address 0x40
+on I2C bus #1:
+$ modprobe lineage-pem
+$ echo lineage-pem 0x40 > /sys/bus/i2c/devices/i2c-1/new_device
+
+All Lineage CPL power entry modules have a built-in I2C bus master selector
+(PCA9541). To ensure device access, this driver should only be used as client
+driver to the pca9541 I2C master selector driver.
+
+
+Sysfs entries
+-------------
+
+All Lineage CPL devices report output voltage and device temperature as well as
+alarms for output voltage, temperature, input voltage, input current, input power,
+and fan status.
+
+Input voltage, input current, input power, and fan speed measurement is only
+supported on newer devices. The driver detects if those attributes are supported,
+and only creates respective sysfs entries if they are.
+
+in1_input Output voltage (mV)
+in1_min_alarm Output undervoltage alarm
+in1_max_alarm Output overvoltage alarm
+in1_crit Output voltage critical alarm
+
+in2_input Input voltage (mV, optional)
+in2_alarm Input voltage alarm
+
+curr1_input Input current (mA, optional)
+curr1_alarm Input overcurrent alarm
+
+power1_input Input power (uW, optional)
+power1_alarm Input power alarm
+
+fan1_input Fan 1 speed (rpm, optional)
+fan2_input Fan 2 speed (rpm, optional)
+fan3_input Fan 3 speed (rpm, optional)
+
+temp1_input
+temp1_max
+temp1_crit
+temp1_alarm
+temp1_crit_alarm
+temp1_fault
diff --git a/Documentation/hwmon/lm75 b/Documentation/hwmon/lm75
index 8e6356fe05d7..a1790401fdde 100644
--- a/Documentation/hwmon/lm75
+++ b/Documentation/hwmon/lm75
@@ -7,6 +7,11 @@ Supported chips:
Addresses scanned: I2C 0x48 - 0x4f
Datasheet: Publicly available at the National Semiconductor website
http://www.national.com/
+ * National Semiconductor LM75A
+ Prefix: 'lm75a'
+ Addresses scanned: I2C 0x48 - 0x4f
+ Datasheet: Publicly available at the National Semiconductor website
+ http://www.national.com/
* Dallas Semiconductor DS75
Prefix: 'lm75'
Addresses scanned: I2C 0x48 - 0x4f
diff --git a/Documentation/hwmon/lm85 b/Documentation/hwmon/lm85
index 239258a63c81..7c49feaa79d2 100644
--- a/Documentation/hwmon/lm85
+++ b/Documentation/hwmon/lm85
@@ -26,6 +26,14 @@ Supported chips:
Prefix: 'emc6d102'
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
Datasheet: http://www.smsc.com/main/catalog/emc6d102.html
+ * SMSC EMC6D103
+ Prefix: 'emc6d103'
+ Addresses scanned: I2C 0x2c, 0x2d, 0x2e
+ Datasheet: http://www.smsc.com/main/catalog/emc6d103.html
+ * SMSC EMC6D103S
+ Prefix: 'emc6d103s'
+ Addresses scanned: I2C 0x2c, 0x2d, 0x2e
+ Datasheet: http://www.smsc.com/main/catalog/emc6d103s.html
Authors:
Philip Pokorny <ppokorny@penguincomputing.com>,
@@ -122,9 +130,11 @@ to be register compatible. The EMC6D100 offers all the features of the
EMC6D101 plus additional voltage monitoring and system control features.
Unfortunately it is not possible to distinguish between the package
versions on register level so these additional voltage inputs may read
-zero. The EMC6D102 features addtional ADC bits thus extending precision
+zero. EMC6D102 and EMC6D103 feature additional ADC bits thus extending precision
of voltage and temperature channels.
+SMSC EMC6D103S is similar to EMC6D103, but does not support pwm#_auto_pwm_minctl
+and temp#_auto_temp_off.
Hardware Configurations
-----------------------
diff --git a/Documentation/hwmon/max6639 b/Documentation/hwmon/max6639
new file mode 100644
index 000000000000..dc49f8be7167
--- /dev/null
+++ b/Documentation/hwmon/max6639
@@ -0,0 +1,49 @@
+Kernel driver max6639
+=====================
+
+Supported chips:
+ * Maxim MAX6639
+ Prefix: 'max6639'
+ Addresses scanned: I2C 0x2c, 0x2e, 0x2f
+ Datasheet: http://pdfserv.maxim-ic.com/en/ds/MAX6639.pdf
+
+Authors:
+ He Changqing <hechangqing@semptian.com>
+ Roland Stigge <stigge@antcom.de>
+
+Description
+-----------
+
+This driver implements support for the Maxim MAX6639. This chip is a 2-channel
+temperature monitor with dual PWM fan speed controller. It can monitor its own
+temperature and one external diode-connected transistor or two external
+diode-connected transistors.
+
+The following device attributes are implemented via sysfs:
+
+Attribute R/W Contents
+----------------------------------------------------------------------------
+temp1_input R Temperature channel 1 input (0..150 C)
+temp2_input R Temperature channel 2 input (0..150 C)
+temp1_fault R Temperature channel 1 diode fault
+temp2_fault R Temperature channel 2 diode fault
+temp1_max RW Set THERM temperature for input 1
+ (in C, see datasheet)
+temp2_max RW Set THERM temperature for input 2
+temp1_crit RW Set ALERT temperature for input 1
+temp2_crit RW Set ALERT temperature for input 2
+temp1_emergency RW Set OT temperature for input 1
+ (in C, see datasheet)
+temp2_emergency RW Set OT temperature for input 2
+pwm1 RW Fan 1 target duty cycle (0..255)
+pwm2 RW Fan 2 target duty cycle (0..255)
+fan1_input R TACH1 fan tachometer input (in RPM)
+fan2_input R TACH2 fan tachometer input (in RPM)
+fan1_fault R Fan 1 fault
+fan2_fault R Fan 2 fault
+temp1_max_alarm R Alarm on THERM temperature on channel 1
+temp2_max_alarm R Alarm on THERM temperature on channel 2
+temp1_crit_alarm R Alarm on ALERT temperature on channel 1
+temp2_crit_alarm R Alarm on ALERT temperature on channel 2
+temp1_emergency_alarm R Alarm on OT temperature on channel 1
+temp2_emergency_alarm R Alarm on OT temperature on channel 2
diff --git a/Documentation/hwmon/pmbus b/Documentation/hwmon/pmbus
new file mode 100644
index 000000000000..f2d42e8bdf48
--- /dev/null
+++ b/Documentation/hwmon/pmbus
@@ -0,0 +1,215 @@
+Kernel driver pmbus
+====================
+
+Supported chips:
+ * Ericsson BMR45X series
+ DC/DC Converter
+ Prefixes: 'bmr450', 'bmr451', 'bmr453', 'bmr454'
+ Addresses scanned: -
+ Datasheet:
+ http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146395
+ * Linear Technology LTC2978
+ Octal PMBus Power Supply Monitor and Controller
+ Prefix: 'ltc2978'
+ Addresses scanned: -
+ Datasheet: http://cds.linear.com/docs/Datasheet/2978fa.pdf
+ * Maxim MAX16064
+ Quad Power-Supply Controller
+ Prefix: 'max16064'
+ Addresses scanned: -
+ Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX16064.pdf
+ * Maxim MAX34440
+ PMBus 6-Channel Power-Supply Manager
+ Prefixes: 'max34440'
+ Addresses scanned: -
+ Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34440.pdf
+ * Maxim MAX34441
+ PMBus 5-Channel Power-Supply Manager and Intelligent Fan Controller
+ Prefixes: 'max34441'
+ Addresses scanned: -
+ Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34441.pdf
+ * Maxim MAX8688
+ Digital Power-Supply Controller/Monitor
+ Prefix: 'max8688'
+ Addresses scanned: -
+ Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX8688.pdf
+ * Generic PMBus devices
+ Prefix: 'pmbus'
+ Addresses scanned: -
+ Datasheet: n.a.
+
+Author: Guenter Roeck <guenter.roeck@ericsson.com>
+
+
+Description
+-----------
+
+This driver supports hardware montoring for various PMBus compliant devices.
+It supports voltage, current, power, and temperature sensors as supported
+by the device.
+
+Each monitored channel has its own high and low limits, plus a critical
+limit.
+
+Fan support will be added in a later version of this driver.
+
+
+Usage Notes
+-----------
+
+This driver does not probe for PMBus devices, since there is no register
+which can be safely used to identify the chip (The MFG_ID register is not
+supported by all chips), and since there is no well defined address range for
+PMBus devices. You will have to instantiate the devices explicitly.
+
+Example: the following will load the driver for an LTC2978 at address 0x60
+on I2C bus #1:
+$ modprobe pmbus
+$ echo ltc2978 0x60 > /sys/bus/i2c/devices/i2c-1/new_device
+
+
+Platform data support
+---------------------
+
+Support for additional PMBus chips can be added by defining chip parameters in
+a new chip specific driver file. For example, (untested) code to add support for
+Emerson DS1200 power modules might look as follows.
+
+static struct pmbus_driver_info ds1200_info = {
+ .pages = 1,
+ /* Note: All other sensors are in linear mode */
+ .direct[PSC_VOLTAGE_OUT] = true,
+ .direct[PSC_TEMPERATURE] = true,
+ .direct[PSC_CURRENT_OUT] = true,
+ .m[PSC_VOLTAGE_IN] = 1,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = 3,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 3,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 3,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_IIN | PMBUS_HAVE_STATUS_INPUT
+ | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
+ | PMBUS_HAVE_PIN | PMBUS_HAVE_POUT
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP
+ | PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12,
+};
+
+static int ds1200_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ return pmbus_do_probe(client, id, &ds1200_info);
+}
+
+static int ds1200_remove(struct i2c_client *client)
+{
+ return pmbus_do_remove(client);
+}
+
+static const struct i2c_device_id ds1200_id[] = {
+ {"ds1200", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, ds1200_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver ds1200_driver = {
+ .driver = {
+ .name = "ds1200",
+ },
+ .probe = ds1200_probe,
+ .remove = ds1200_remove,
+ .id_table = ds1200_id,
+};
+
+static int __init ds1200_init(void)
+{
+ return i2c_add_driver(&ds1200_driver);
+}
+
+static void __exit ds1200_exit(void)
+{
+ i2c_del_driver(&ds1200_driver);
+}
+
+
+Sysfs entries
+-------------
+
+When probing the chip, the driver identifies which PMBus registers are
+supported, and determines available sensors from this information.
+Attribute files only exist if respective sensors are suported by the chip.
+Labels are provided to inform the user about the sensor associated with
+a given sysfs entry.
+
+The following attributes are supported. Limits are read-write; all other
+attributes are read-only.
+
+inX_input Measured voltage. From READ_VIN or READ_VOUT register.
+inX_min Minumum Voltage.
+ From VIN_UV_WARN_LIMIT or VOUT_UV_WARN_LIMIT register.
+inX_max Maximum voltage.
+ From VIN_OV_WARN_LIMIT or VOUT_OV_WARN_LIMIT register.
+inX_lcrit Critical minumum Voltage.
+ From VIN_UV_FAULT_LIMIT or VOUT_UV_FAULT_LIMIT register.
+inX_crit Critical maximum voltage.
+ From VIN_OV_FAULT_LIMIT or VOUT_OV_FAULT_LIMIT register.
+inX_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status.
+inX_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status.
+inX_lcrit_alarm Voltage critical low alarm.
+ From VOLTAGE_UV_FAULT status.
+inX_crit_alarm Voltage critical high alarm.
+ From VOLTAGE_OV_FAULT status.
+inX_label "vin", "vcap", or "voutY"
+
+currX_input Measured current. From READ_IIN or READ_IOUT register.
+currX_max Maximum current.
+ From IIN_OC_WARN_LIMIT or IOUT_OC_WARN_LIMIT register.
+currX_lcrit Critical minumum output current.
+ From IOUT_UC_FAULT_LIMIT register.
+currX_crit Critical maximum current.
+ From IIN_OC_FAULT_LIMIT or IOUT_OC_FAULT_LIMIT register.
+currX_alarm Current high alarm.
+ From IIN_OC_WARNING or IOUT_OC_WARNING status.
+currX_lcrit_alarm Output current critical low alarm.
+ From IOUT_UC_FAULT status.
+currX_crit_alarm Current critical high alarm.
+ From IIN_OC_FAULT or IOUT_OC_FAULT status.
+currX_label "iin" or "vinY"
+
+powerX_input Measured power. From READ_PIN or READ_POUT register.
+powerX_cap Output power cap. From POUT_MAX register.
+powerX_max Power limit. From PIN_OP_WARN_LIMIT or
+ POUT_OP_WARN_LIMIT register.
+powerX_crit Critical output power limit.
+ From POUT_OP_FAULT_LIMIT register.
+powerX_alarm Power high alarm.
+ From PIN_OP_WARNING or POUT_OP_WARNING status.
+powerX_crit_alarm Output power critical high alarm.
+ From POUT_OP_FAULT status.
+powerX_label "pin" or "poutY"
+
+tempX_input Measured tempererature.
+ From READ_TEMPERATURE_X register.
+tempX_min Mimimum tempererature. From UT_WARN_LIMIT register.
+tempX_max Maximum tempererature. From OT_WARN_LIMIT register.
+tempX_lcrit Critical low tempererature.
+ From UT_FAULT_LIMIT register.
+tempX_crit Critical high tempererature.
+ From OT_FAULT_LIMIT register.
+tempX_min_alarm Chip temperature low alarm. Set by comparing
+ READ_TEMPERATURE_X with UT_WARN_LIMIT if
+ TEMP_UT_WARNING status is set.
+tempX_max_alarm Chip temperature high alarm. Set by comparing
+ READ_TEMPERATURE_X with OT_WARN_LIMIT if
+ TEMP_OT_WARNING status is set.
+tempX_lcrit_alarm Chip temperature critical low alarm. Set by comparing
+ READ_TEMPERATURE_X with UT_FAULT_LIMIT if
+ TEMP_UT_FAULT status is set.
+tempX_crit_alarm Chip temperature critical high alarm. Set by comparing
+ READ_TEMPERATURE_X with OT_FAULT_LIMIT if
+ TEMP_OT_FAULT status is set.
diff --git a/Documentation/hwmon/twl4030-madc-hwmon b/Documentation/hwmon/twl4030-madc-hwmon
new file mode 100644
index 000000000000..ef7984317cec
--- /dev/null
+++ b/Documentation/hwmon/twl4030-madc-hwmon
@@ -0,0 +1,45 @@
+Kernel driver twl4030-madc
+=========================
+
+Supported chips:
+ * Texas Instruments TWL4030
+ Prefix: 'twl4030-madc'
+
+
+Authors:
+ J Keerthy <j-keerthy@ti.com>
+
+Description
+-----------
+
+The Texas Instruments TWL4030 is a Power Management and Audio Circuit. Among
+other things it contains a 10-bit A/D converter MADC. The converter has 16
+channels which can be used in different modes.
+
+
+See this table for the meaning of the different channels
+
+Channel Signal
+------------------------------------------
+0 Battery type(BTYPE)
+1 BCI: Battery temperature (BTEMP)
+2 GP analog input
+3 GP analog input
+4 GP analog input
+5 GP analog input
+6 GP analog input
+7 GP analog input
+8 BCI: VBUS voltage(VBUS)
+9 Backup Battery voltage (VBKP)
+10 BCI: Battery charger current (ICHG)
+11 BCI: Battery charger voltage (VCHG)
+12 BCI: Main battery voltage (VBAT)
+13 Reserved
+14 Reserved
+15 VRUSB Supply/Speaker left/Speaker right polarization level
+
+
+The Sysfs nodes will represent the voltage in the units of mV,
+the temperature channel shows the converted temperature in
+degree celcius. The Battery charging current channel represents
+battery charging current in mA.
diff --git a/Documentation/hwmon/w83795 b/Documentation/hwmon/w83795
new file mode 100644
index 000000000000..9f160371f463
--- /dev/null
+++ b/Documentation/hwmon/w83795
@@ -0,0 +1,127 @@
+Kernel driver w83795
+====================
+
+Supported chips:
+ * Winbond/Nuvoton W83795G
+ Prefix: 'w83795g'
+ Addresses scanned: I2C 0x2c - 0x2f
+ Datasheet: Available for download on nuvoton.com
+ * Winbond/Nuvoton W83795ADG
+ Prefix: 'w83795adg'
+ Addresses scanned: I2C 0x2c - 0x2f
+ Datasheet: Available for download on nuvoton.com
+
+Authors:
+ Wei Song (Nuvoton)
+ Jean Delvare <khali@linux-fr.org>
+
+
+Pin mapping
+-----------
+
+Here is a summary of the pin mapping for the W83795G and W83795ADG.
+This can be useful to convert data provided by board manufacturers
+into working libsensors configuration statements.
+
+ W83795G |
+ Pin | Name | Register | Sysfs attribute
+------------------------------------------------------------------
+ 13 | VSEN1 (VCORE1) | 10h | in0
+ 14 | VSEN2 (VCORE2) | 11h | in1
+ 15 | VSEN3 (VCORE3) | 12h | in2
+ 16 | VSEN4 | 13h | in3
+ 17 | VSEN5 | 14h | in4
+ 18 | VSEN6 | 15h | in5
+ 19 | VSEN7 | 16h | in6
+ 20 | VSEN8 | 17h | in7
+ 21 | VSEN9 | 18h | in8
+ 22 | VSEN10 | 19h | in9
+ 23 | VSEN11 | 1Ah | in10
+ 28 | VTT | 1Bh | in11
+ 24 | 3VDD | 1Ch | in12
+ 25 | 3VSB | 1Dh | in13
+ 26 | VBAT | 1Eh | in14
+ 3 | VSEN12/TR5 | 1Fh | in15/temp5
+ 4 | VSEN13/TR5 | 20h | in16/temp6
+ 5/ 6 | VDSEN14/TR1/TD1 | 21h | in17/temp1
+ 7/ 8 | VDSEN15/TR2/TD2 | 22h | in18/temp2
+ 9/ 10 | VDSEN16/TR3/TD3 | 23h | in19/temp3
+ 11/ 12 | VDSEN17/TR4/TD4 | 24h | in20/temp4
+ 40 | FANIN1 | 2Eh | fan1
+ 42 | FANIN2 | 2Fh | fan2
+ 44 | FANIN3 | 30h | fan3
+ 46 | FANIN4 | 31h | fan4
+ 48 | FANIN5 | 32h | fan5
+ 50 | FANIN6 | 33h | fan6
+ 52 | FANIN7 | 34h | fan7
+ 54 | FANIN8 | 35h | fan8
+ 57 | FANIN9 | 36h | fan9
+ 58 | FANIN10 | 37h | fan10
+ 59 | FANIN11 | 38h | fan11
+ 60 | FANIN12 | 39h | fan12
+ 31 | FANIN13 | 3Ah | fan13
+ 35 | FANIN14 | 3Bh | fan14
+ 41 | FANCTL1 | 10h (bank 2) | pwm1
+ 43 | FANCTL2 | 11h (bank 2) | pwm2
+ 45 | FANCTL3 | 12h (bank 2) | pwm3
+ 47 | FANCTL4 | 13h (bank 2) | pwm4
+ 49 | FANCTL5 | 14h (bank 2) | pwm5
+ 51 | FANCTL6 | 15h (bank 2) | pwm6
+ 53 | FANCTL7 | 16h (bank 2) | pwm7
+ 55 | FANCTL8 | 17h (bank 2) | pwm8
+ 29/ 30 | PECI/TSI (DTS1) | 26h | temp7
+ 29/ 30 | PECI/TSI (DTS2) | 27h | temp8
+ 29/ 30 | PECI/TSI (DTS3) | 28h | temp9
+ 29/ 30 | PECI/TSI (DTS4) | 29h | temp10
+ 29/ 30 | PECI/TSI (DTS5) | 2Ah | temp11
+ 29/ 30 | PECI/TSI (DTS6) | 2Bh | temp12
+ 29/ 30 | PECI/TSI (DTS7) | 2Ch | temp13
+ 29/ 30 | PECI/TSI (DTS8) | 2Dh | temp14
+ 27 | CASEOPEN# | 46h | intrusion0
+
+ W83795ADG |
+ Pin | Name | Register | Sysfs attribute
+------------------------------------------------------------------
+ 10 | VSEN1 (VCORE1) | 10h | in0
+ 11 | VSEN2 (VCORE2) | 11h | in1
+ 12 | VSEN3 (VCORE3) | 12h | in2
+ 13 | VSEN4 | 13h | in3
+ 14 | VSEN5 | 14h | in4
+ 15 | VSEN6 | 15h | in5
+ 16 | VSEN7 | 16h | in6
+ 17 | VSEN8 | 17h | in7
+ 22 | VTT | 1Bh | in11
+ 18 | 3VDD | 1Ch | in12
+ 19 | 3VSB | 1Dh | in13
+ 20 | VBAT | 1Eh | in14
+ 48 | VSEN12/TR5 | 1Fh | in15/temp5
+ 1 | VSEN13/TR5 | 20h | in16/temp6
+ 2/ 3 | VDSEN14/TR1/TD1 | 21h | in17/temp1
+ 4/ 5 | VDSEN15/TR2/TD2 | 22h | in18/temp2
+ 6/ 7 | VDSEN16/TR3/TD3 | 23h | in19/temp3
+ 8/ 9 | VDSEN17/TR4/TD4 | 24h | in20/temp4
+ 32 | FANIN1 | 2Eh | fan1
+ 34 | FANIN2 | 2Fh | fan2
+ 36 | FANIN3 | 30h | fan3
+ 37 | FANIN4 | 31h | fan4
+ 38 | FANIN5 | 32h | fan5
+ 39 | FANIN6 | 33h | fan6
+ 40 | FANIN7 | 34h | fan7
+ 41 | FANIN8 | 35h | fan8
+ 43 | FANIN9 | 36h | fan9
+ 44 | FANIN10 | 37h | fan10
+ 45 | FANIN11 | 38h | fan11
+ 46 | FANIN12 | 39h | fan12
+ 24 | FANIN13 | 3Ah | fan13
+ 28 | FANIN14 | 3Bh | fan14
+ 33 | FANCTL1 | 10h (bank 2) | pwm1
+ 35 | FANCTL2 | 11h (bank 2) | pwm2
+ 23 | PECI (DTS1) | 26h | temp7
+ 23 | PECI (DTS2) | 27h | temp8
+ 23 | PECI (DTS3) | 28h | temp9
+ 23 | PECI (DTS4) | 29h | temp10
+ 23 | PECI (DTS5) | 2Ah | temp11
+ 23 | PECI (DTS6) | 2Bh | temp12
+ 23 | PECI (DTS7) | 2Ch | temp13
+ 23 | PECI (DTS8) | 2Dh | temp14
+ 21 | CASEOPEN# | 46h | intrusion0
diff --git a/Documentation/hwspinlock.txt b/Documentation/hwspinlock.txt
new file mode 100644
index 000000000000..7dcd1a4e726c
--- /dev/null
+++ b/Documentation/hwspinlock.txt
@@ -0,0 +1,293 @@
+Hardware Spinlock Framework
+
+1. Introduction
+
+Hardware spinlock modules provide hardware assistance for synchronization
+and mutual exclusion between heterogeneous processors and those not operating
+under a single, shared operating system.
+
+For example, OMAP4 has dual Cortex-A9, dual Cortex-M3 and a C64x+ DSP,
+each of which is running a different Operating System (the master, A9,
+is usually running Linux and the slave processors, the M3 and the DSP,
+are running some flavor of RTOS).
+
+A generic hwspinlock framework allows platform-independent drivers to use
+the hwspinlock device in order to access data structures that are shared
+between remote processors, that otherwise have no alternative mechanism
+to accomplish synchronization and mutual exclusion operations.
+
+This is necessary, for example, for Inter-processor communications:
+on OMAP4, cpu-intensive multimedia tasks are offloaded by the host to the
+remote M3 and/or C64x+ slave processors (by an IPC subsystem called Syslink).
+
+To achieve fast message-based communications, a minimal kernel support
+is needed to deliver messages arriving from a remote processor to the
+appropriate user process.
+
+This communication is based on simple data structures that is shared between
+the remote processors, and access to it is synchronized using the hwspinlock
+module (remote processor directly places new messages in this shared data
+structure).
+
+A common hwspinlock interface makes it possible to have generic, platform-
+independent, drivers.
+
+2. User API
+
+ struct hwspinlock *hwspin_lock_request(void);
+ - dynamically assign an hwspinlock and return its address, or NULL
+ in case an unused hwspinlock isn't available. Users of this
+ API will usually want to communicate the lock's id to the remote core
+ before it can be used to achieve synchronization.
+ Can be called from an atomic context (this function will not sleep) but
+ not from within interrupt context.
+
+ struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
+ - assign a specific hwspinlock id and return its address, or NULL
+ if that hwspinlock is already in use. Usually board code will
+ be calling this function in order to reserve specific hwspinlock
+ ids for predefined purposes.
+ Can be called from an atomic context (this function will not sleep) but
+ not from within interrupt context.
+
+ int hwspin_lock_free(struct hwspinlock *hwlock);
+ - free a previously-assigned hwspinlock; returns 0 on success, or an
+ appropriate error code on failure (e.g. -EINVAL if the hwspinlock
+ is already free).
+ Can be called from an atomic context (this function will not sleep) but
+ not from within interrupt context.
+
+ int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int timeout);
+ - lock a previously-assigned hwspinlock with a timeout limit (specified in
+ msecs). If the hwspinlock is already taken, the function will busy loop
+ waiting for it to be released, but give up when the timeout elapses.
+ Upon a successful return from this function, preemption is disabled so
+ the caller must not sleep, and is advised to release the hwspinlock as
+ soon as possible, in order to minimize remote cores polling on the
+ hardware interconnect.
+ Returns 0 when successful and an appropriate error code otherwise (most
+ notably -ETIMEDOUT if the hwspinlock is still busy after timeout msecs).
+ The function will never sleep.
+
+ int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int timeout);
+ - lock a previously-assigned hwspinlock with a timeout limit (specified in
+ msecs). If the hwspinlock is already taken, the function will busy loop
+ waiting for it to be released, but give up when the timeout elapses.
+ Upon a successful return from this function, preemption and the local
+ interrupts are disabled, so the caller must not sleep, and is advised to
+ release the hwspinlock as soon as possible.
+ Returns 0 when successful and an appropriate error code otherwise (most
+ notably -ETIMEDOUT if the hwspinlock is still busy after timeout msecs).
+ The function will never sleep.
+
+ int hwspin_lock_timeout_irqsave(struct hwspinlock *hwlock, unsigned int to,
+ unsigned long *flags);
+ - lock a previously-assigned hwspinlock with a timeout limit (specified in
+ msecs). If the hwspinlock is already taken, the function will busy loop
+ waiting for it to be released, but give up when the timeout elapses.
+ Upon a successful return from this function, preemption is disabled,
+ local interrupts are disabled and their previous state is saved at the
+ given flags placeholder. The caller must not sleep, and is advised to
+ release the hwspinlock as soon as possible.
+ Returns 0 when successful and an appropriate error code otherwise (most
+ notably -ETIMEDOUT if the hwspinlock is still busy after timeout msecs).
+ The function will never sleep.
+
+ int hwspin_trylock(struct hwspinlock *hwlock);
+ - attempt to lock a previously-assigned hwspinlock, but immediately fail if
+ it is already taken.
+ Upon a successful return from this function, preemption is disabled so
+ caller must not sleep, and is advised to release the hwspinlock as soon as
+ possible, in order to minimize remote cores polling on the hardware
+ interconnect.
+ Returns 0 on success and an appropriate error code otherwise (most
+ notably -EBUSY if the hwspinlock was already taken).
+ The function will never sleep.
+
+ int hwspin_trylock_irq(struct hwspinlock *hwlock);
+ - attempt to lock a previously-assigned hwspinlock, but immediately fail if
+ it is already taken.
+ Upon a successful return from this function, preemption and the local
+ interrupts are disabled so caller must not sleep, and is advised to
+ release the hwspinlock as soon as possible.
+ Returns 0 on success and an appropriate error code otherwise (most
+ notably -EBUSY if the hwspinlock was already taken).
+ The function will never sleep.
+
+ int hwspin_trylock_irqsave(struct hwspinlock *hwlock, unsigned long *flags);
+ - attempt to lock a previously-assigned hwspinlock, but immediately fail if
+ it is already taken.
+ Upon a successful return from this function, preemption is disabled,
+ the local interrupts are disabled and their previous state is saved
+ at the given flags placeholder. The caller must not sleep, and is advised
+ to release the hwspinlock as soon as possible.
+ Returns 0 on success and an appropriate error code otherwise (most
+ notably -EBUSY if the hwspinlock was already taken).
+ The function will never sleep.
+
+ void hwspin_unlock(struct hwspinlock *hwlock);
+ - unlock a previously-locked hwspinlock. Always succeed, and can be called
+ from any context (the function never sleeps). Note: code should _never_
+ unlock an hwspinlock which is already unlocked (there is no protection
+ against this).
+
+ void hwspin_unlock_irq(struct hwspinlock *hwlock);
+ - unlock a previously-locked hwspinlock and enable local interrupts.
+ The caller should _never_ unlock an hwspinlock which is already unlocked.
+ Doing so is considered a bug (there is no protection against this).
+ Upon a successful return from this function, preemption and local
+ interrupts are enabled. This function will never sleep.
+
+ void
+ hwspin_unlock_irqrestore(struct hwspinlock *hwlock, unsigned long *flags);
+ - unlock a previously-locked hwspinlock.
+ The caller should _never_ unlock an hwspinlock which is already unlocked.
+ Doing so is considered a bug (there is no protection against this).
+ Upon a successful return from this function, preemption is reenabled,
+ and the state of the local interrupts is restored to the state saved at
+ the given flags. This function will never sleep.
+
+ int hwspin_lock_get_id(struct hwspinlock *hwlock);
+ - retrieve id number of a given hwspinlock. This is needed when an
+ hwspinlock is dynamically assigned: before it can be used to achieve
+ mutual exclusion with a remote cpu, the id number should be communicated
+ to the remote task with which we want to synchronize.
+ Returns the hwspinlock id number, or -EINVAL if hwlock is null.
+
+3. Typical usage
+
+#include <linux/hwspinlock.h>
+#include <linux/err.h>
+
+int hwspinlock_example1(void)
+{
+ struct hwspinlock *hwlock;
+ int ret;
+
+ /* dynamically assign a hwspinlock */
+ hwlock = hwspin_lock_request();
+ if (!hwlock)
+ ...
+
+ id = hwspin_lock_get_id(hwlock);
+ /* probably need to communicate id to a remote processor now */
+
+ /* take the lock, spin for 1 sec if it's already taken */
+ ret = hwspin_lock_timeout(hwlock, 1000);
+ if (ret)
+ ...
+
+ /*
+ * we took the lock, do our thing now, but do NOT sleep
+ */
+
+ /* release the lock */
+ hwspin_unlock(hwlock);
+
+ /* free the lock */
+ ret = hwspin_lock_free(hwlock);
+ if (ret)
+ ...
+
+ return ret;
+}
+
+int hwspinlock_example2(void)
+{
+ struct hwspinlock *hwlock;
+ int ret;
+
+ /*
+ * assign a specific hwspinlock id - this should be called early
+ * by board init code.
+ */
+ hwlock = hwspin_lock_request_specific(PREDEFINED_LOCK_ID);
+ if (!hwlock)
+ ...
+
+ /* try to take it, but don't spin on it */
+ ret = hwspin_trylock(hwlock);
+ if (!ret) {
+ pr_info("lock is already taken\n");
+ return -EBUSY;
+ }
+
+ /*
+ * we took the lock, do our thing now, but do NOT sleep
+ */
+
+ /* release the lock */
+ hwspin_unlock(hwlock);
+
+ /* free the lock */
+ ret = hwspin_lock_free(hwlock);
+ if (ret)
+ ...
+
+ return ret;
+}
+
+
+4. API for implementors
+
+ int hwspin_lock_register(struct hwspinlock *hwlock);
+ - to be called from the underlying platform-specific implementation, in
+ order to register a new hwspinlock instance. Can be called from an atomic
+ context (this function will not sleep) but not from within interrupt
+ context. Returns 0 on success, or appropriate error code on failure.
+
+ struct hwspinlock *hwspin_lock_unregister(unsigned int id);
+ - to be called from the underlying vendor-specific implementation, in order
+ to unregister an existing (and unused) hwspinlock instance.
+ Can be called from an atomic context (will not sleep) but not from
+ within interrupt context.
+ Returns the address of hwspinlock on success, or NULL on error (e.g.
+ if the hwspinlock is sill in use).
+
+5. struct hwspinlock
+
+This struct represents an hwspinlock instance. It is registered by the
+underlying hwspinlock implementation using the hwspin_lock_register() API.
+
+/**
+ * struct hwspinlock - vendor-specific hwspinlock implementation
+ *
+ * @dev: underlying device, will be used with runtime PM api
+ * @ops: vendor-specific hwspinlock handlers
+ * @id: a global, unique, system-wide, index of the lock.
+ * @lock: initialized and used by hwspinlock core
+ * @owner: underlying implementation module, used to maintain module ref count
+ */
+struct hwspinlock {
+ struct device *dev;
+ const struct hwspinlock_ops *ops;
+ int id;
+ spinlock_t lock;
+ struct module *owner;
+};
+
+The underlying implementation is responsible to assign the dev, ops, id and
+owner members. The lock member, OTOH, is initialized and used by the hwspinlock
+core.
+
+6. Implementation callbacks
+
+There are three possible callbacks defined in 'struct hwspinlock_ops':
+
+struct hwspinlock_ops {
+ int (*trylock)(struct hwspinlock *lock);
+ void (*unlock)(struct hwspinlock *lock);
+ void (*relax)(struct hwspinlock *lock);
+};
+
+The first two callbacks are mandatory:
+
+The ->trylock() callback should make a single attempt to take the lock, and
+return 0 on failure and 1 on success. This callback may _not_ sleep.
+
+The ->unlock() callback releases the lock. It always succeed, and it, too,
+may _not_ sleep.
+
+The ->relax() callback is optional. It is called by hwspinlock core while
+spinning on a lock, and can be used by the underlying implementation to force
+a delay between two successive invocations of ->trylock(). It may _not_ sleep.
diff --git a/Documentation/i2c/busses/i2c-diolan-u2c b/Documentation/i2c/busses/i2c-diolan-u2c
new file mode 100644
index 000000000000..30fe4bb9a069
--- /dev/null
+++ b/Documentation/i2c/busses/i2c-diolan-u2c
@@ -0,0 +1,26 @@
+Kernel driver i2c-diolan-u2c
+
+Supported adapters:
+ * Diolan U2C-12 I2C-USB adapter
+ Documentation:
+ http://www.diolan.com/i2c/u2c12.html
+
+Author: Guenter Roeck <guenter.roeck@ericsson.com>
+
+Description
+-----------
+
+This is the driver for the Diolan U2C-12 USB-I2C adapter.
+
+The Diolan U2C-12 I2C-USB Adapter provides a low cost solution to connect
+a computer to I2C slave devices using a USB interface. It also supports
+connectivity to SPI devices.
+
+This driver only supports the I2C interface of U2C-12. The driver does not use
+interrupts.
+
+
+Module parameters
+-----------------
+
+* frequency: I2C bus frequency
diff --git a/Documentation/i2c/instantiating-devices b/Documentation/i2c/instantiating-devices
index 87da405a8597..9edb75d8c9b9 100644
--- a/Documentation/i2c/instantiating-devices
+++ b/Documentation/i2c/instantiating-devices
@@ -100,7 +100,7 @@ static int __devinit usb_hcd_pnx4008_probe(struct platform_device *pdev)
(...)
i2c_adap = i2c_get_adapter(2);
memset(&i2c_info, 0, sizeof(struct i2c_board_info));
- strlcpy(i2c_info.name, "isp1301_pnx", I2C_NAME_SIZE);
+ strlcpy(i2c_info.type, "isp1301_pnx", I2C_NAME_SIZE);
isp1301_i2c_client = i2c_new_probed_device(i2c_adap, &i2c_info,
normal_i2c, NULL);
i2c_put_adapter(i2c_adap);
diff --git a/Documentation/i2c/upgrading-clients b/Documentation/i2c/upgrading-clients
index 9a45f9bb6a25..d6991625c407 100644
--- a/Documentation/i2c/upgrading-clients
+++ b/Documentation/i2c/upgrading-clients
@@ -61,7 +61,7 @@ static int example_attach(struct i2c_adapter *adap, int addr, int kind)
return 0;
}
-static int __devexit example_detach(struct i2c_client *client)
+static int example_detach(struct i2c_client *client)
{
struct example_state *state = i2c_get_clientdata(client);
@@ -81,7 +81,7 @@ static struct i2c_driver example_driver = {
.name = "example",
},
.attach_adapter = example_attach_adapter,
- .detach_client = __devexit_p(example_detach),
+ .detach_client = example_detach,
.suspend = example_suspend,
.resume = example_resume,
};
@@ -93,7 +93,7 @@ Updating the client
The new style binding model will check against a list of supported
devices and their associated address supplied by the code registering
the busses. This means that the driver .attach_adapter and
-.detach_adapter methods can be removed, along with the addr_data,
+.detach_client methods can be removed, along with the addr_data,
as follows:
- static struct i2c_driver example_driver;
@@ -110,14 +110,14 @@ as follows:
static struct i2c_driver example_driver = {
- .attach_adapter = example_attach_adapter,
-- .detach_client = __devexit_p(example_detach),
+- .detach_client = example_detach,
}
Add the probe and remove methods to the i2c_driver, as so:
static struct i2c_driver example_driver = {
+ .probe = example_probe,
-+ .remove = __devexit_p(example_remove),
++ .remove = example_remove,
}
Change the example_attach method to accept the new parameters
@@ -199,8 +199,8 @@ to delete the i2c_detach_client call. It is possible that you
can also remove the ret variable as it is not not needed for
any of the core functions.
-- static int __devexit example_detach(struct i2c_client *client)
-+ static int __devexit example_remove(struct i2c_client *client)
+- static int example_detach(struct i2c_client *client)
++ static int example_remove(struct i2c_client *client)
{
struct example_state *state = i2c_get_clientdata(client);
@@ -253,7 +253,7 @@ static int example_probe(struct i2c_client *client,
return 0;
}
-static int __devexit example_remove(struct i2c_client *client)
+static int example_remove(struct i2c_client *client)
{
struct example_state *state = i2c_get_clientdata(client);
@@ -275,7 +275,7 @@ static struct i2c_driver example_driver = {
},
.id_table = example_idtable,
.probe = example_probe,
- .remove = __devexit_p(example_remove),
+ .remove = example_remove,
.suspend = example_suspend,
.resume = example_resume,
};
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index ac293e955308..e68543f767d5 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -133,6 +133,7 @@ Code Seq#(hex) Include File Comments
'H' C0-DF net/bluetooth/hidp/hidp.h conflict!
'H' C0-DF net/bluetooth/cmtp/cmtp.h conflict!
'H' C0-DF net/bluetooth/bnep/bnep.h conflict!
+'H' F1 linux/hid-roccat.h <mailto:erazor_de@users.sourceforge.net>
'I' all linux/isdn.h conflict!
'I' 00-0F drivers/isdn/divert/isdn_divert.h conflict!
'I' 40-4F linux/mISDNif.h conflict!
diff --git a/Documentation/kbuild/kbuild.txt b/Documentation/kbuild/kbuild.txt
index 4a990317b84a..8f63b224ab09 100644
--- a/Documentation/kbuild/kbuild.txt
+++ b/Documentation/kbuild/kbuild.txt
@@ -146,7 +146,7 @@ INSTALL_MOD_STRIP
INSTALL_MOD_STRIP, if defined, will cause modules to be
stripped after they are installed. If INSTALL_MOD_STRIP is '1', then
the default option --strip-debug will be used. Otherwise,
-INSTALL_MOD_STRIP will used as the options to the strip command.
+INSTALL_MOD_STRIP value will be used as the options to the strip command.
INSTALL_FW_PATH
--------------------------------------------------
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index 86e3cd0d26a0..5d145bb443c0 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -1325,7 +1325,8 @@ The top Makefile exports the following variables:
If this variable is specified, will cause modules to be stripped
after they are installed. If INSTALL_MOD_STRIP is '1', then the
default option --strip-debug will be used. Otherwise,
- INSTALL_MOD_STRIP will used as the option(s) to the strip command.
+ INSTALL_MOD_STRIP value will be used as the option(s) to the strip
+ command.
=== 9 Makefile language
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 89835a4766a6..738c6fda3fb0 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -144,6 +144,11 @@ a fixed number of characters. This limit depends on the architecture
and is between 256 and 4096 characters. It is defined in the file
./include/asm/setup.h as COMMAND_LINE_SIZE.
+Finally, the [KMG] suffix is commonly described after a number of kernel
+parameter values. These 'K', 'M', and 'G' letters represent the _binary_
+multipliers 'Kilo', 'Mega', and 'Giga', equalling 2^10, 2^20, and 2^30
+bytes respectively. Such letter suffixes can also be entirely omitted.
+
acpi= [HW,ACPI,X86]
Advanced Configuration and Power Interface
@@ -545,16 +550,20 @@ and is between 256 and 4096 characters. It is defined in the file
Format:
<first_slot>,<last_slot>,<port>,<enum_bit>[,<debug>]
- crashkernel=nn[KMG]@ss[KMG]
- [KNL] Reserve a chunk of physical memory to
- hold a kernel to switch to with kexec on panic.
+ crashkernel=size[KMG][@offset[KMG]]
+ [KNL] Using kexec, Linux can switch to a 'crash kernel'
+ upon panic. This parameter reserves the physical
+ memory region [offset, offset + size] for that kernel
+ image. If '@offset' is omitted, then a suitable offset
+ is selected automatically. Check
+ Documentation/kdump/kdump.txt for further details.
crashkernel=range1:size1[,range2:size2,...][@offset]
[KNL] Same as above, but depends on the memory
in the running system. The syntax of range is
start-[end] where start and end are both
a memory unit (amount[KMG]). See also
- Documentation/kdump/kdump.txt for a example.
+ Documentation/kdump/kdump.txt for an example.
cs89x0_dma= [HW,NET]
Format: <dma>
@@ -1262,10 +1271,9 @@ and is between 256 and 4096 characters. It is defined in the file
6 (KERN_INFO) informational
7 (KERN_DEBUG) debug-level messages
- log_buf_len=n Sets the size of the printk ring buffer, in bytes.
- Format: { n | nk | nM }
- n must be a power of two. The default size
- is set in the kernel config file.
+ log_buf_len=n[KMG] Sets the size of the printk ring buffer,
+ in bytes. n must be a power of two. The default
+ size is set in the kernel config file.
logo.nologo [FB] Disables display of the built-in Linux logo.
This may be used to provide more screen space for
@@ -2436,6 +2444,10 @@ and is between 256 and 4096 characters. It is defined in the file
<deci-seconds>: poll all this frequency
0: no polling (default)
+ threadirqs [KNL]
+ Force threading of all interrupt handlers except those
+ marked explicitely IRQF_NO_THREAD.
+
topology= [S390]
Format: {off | on}
Specify if the kernel should make use of the cpu
diff --git a/Documentation/kvm/api.txt b/Documentation/kvm/api.txt
index ad85797c1cf0..9bef4e4cec50 100644
--- a/Documentation/kvm/api.txt
+++ b/Documentation/kvm/api.txt
@@ -166,7 +166,7 @@ Returns: 0 on success, -1 on error
This ioctl is obsolete and has been removed.
-4.6 KVM_CREATE_VCPU
+4.7 KVM_CREATE_VCPU
Capability: basic
Architectures: all
@@ -177,7 +177,7 @@ Returns: vcpu fd on success, -1 on error
This API adds a vcpu to a virtual machine. The vcpu id is a small integer
in the range [0, max_vcpus).
-4.7 KVM_GET_DIRTY_LOG (vm ioctl)
+4.8 KVM_GET_DIRTY_LOG (vm ioctl)
Capability: basic
Architectures: x86
@@ -200,7 +200,7 @@ since the last call to this ioctl. Bit 0 is the first page in the
memory slot. Ensure the entire structure is cleared to avoid padding
issues.
-4.8 KVM_SET_MEMORY_ALIAS
+4.9 KVM_SET_MEMORY_ALIAS
Capability: basic
Architectures: x86
@@ -210,7 +210,7 @@ Returns: 0 (success), -1 (error)
This ioctl is obsolete and has been removed.
-4.9 KVM_RUN
+4.10 KVM_RUN
Capability: basic
Architectures: all
@@ -226,7 +226,7 @@ obtained by mmap()ing the vcpu fd at offset 0, with the size given by
KVM_GET_VCPU_MMAP_SIZE. The parameter block is formatted as a 'struct
kvm_run' (see below).
-4.10 KVM_GET_REGS
+4.11 KVM_GET_REGS
Capability: basic
Architectures: all
@@ -246,7 +246,7 @@ struct kvm_regs {
__u64 rip, rflags;
};
-4.11 KVM_SET_REGS
+4.12 KVM_SET_REGS
Capability: basic
Architectures: all
@@ -258,7 +258,7 @@ Writes the general purpose registers into the vcpu.
See KVM_GET_REGS for the data structure.
-4.12 KVM_GET_SREGS
+4.13 KVM_GET_SREGS
Capability: basic
Architectures: x86
@@ -283,7 +283,7 @@ interrupt_bitmap is a bitmap of pending external interrupts. At most
one bit may be set. This interrupt has been acknowledged by the APIC
but not yet injected into the cpu core.
-4.13 KVM_SET_SREGS
+4.14 KVM_SET_SREGS
Capability: basic
Architectures: x86
@@ -294,7 +294,7 @@ Returns: 0 on success, -1 on error
Writes special registers into the vcpu. See KVM_GET_SREGS for the
data structures.
-4.14 KVM_TRANSLATE
+4.15 KVM_TRANSLATE
Capability: basic
Architectures: x86
@@ -317,7 +317,7 @@ struct kvm_translation {
__u8 pad[5];
};
-4.15 KVM_INTERRUPT
+4.16 KVM_INTERRUPT
Capability: basic
Architectures: x86, ppc
@@ -365,7 +365,7 @@ c) KVM_INTERRUPT_SET_LEVEL
Note that any value for 'irq' other than the ones stated above is invalid
and incurs unexpected behavior.
-4.16 KVM_DEBUG_GUEST
+4.17 KVM_DEBUG_GUEST
Capability: basic
Architectures: none
@@ -375,7 +375,7 @@ Returns: -1 on error
Support for this has been removed. Use KVM_SET_GUEST_DEBUG instead.
-4.17 KVM_GET_MSRS
+4.18 KVM_GET_MSRS
Capability: basic
Architectures: x86
@@ -403,7 +403,7 @@ Application code should set the 'nmsrs' member (which indicates the
size of the entries array) and the 'index' member of each array entry.
kvm will fill in the 'data' member.
-4.18 KVM_SET_MSRS
+4.19 KVM_SET_MSRS
Capability: basic
Architectures: x86
@@ -418,7 +418,7 @@ Application code should set the 'nmsrs' member (which indicates the
size of the entries array), and the 'index' and 'data' members of each
array entry.
-4.19 KVM_SET_CPUID
+4.20 KVM_SET_CPUID
Capability: basic
Architectures: x86
@@ -446,7 +446,7 @@ struct kvm_cpuid {
struct kvm_cpuid_entry entries[0];
};
-4.20 KVM_SET_SIGNAL_MASK
+4.21 KVM_SET_SIGNAL_MASK
Capability: basic
Architectures: x86
@@ -468,7 +468,7 @@ struct kvm_signal_mask {
__u8 sigset[0];
};
-4.21 KVM_GET_FPU
+4.22 KVM_GET_FPU
Capability: basic
Architectures: x86
@@ -493,7 +493,7 @@ struct kvm_fpu {
__u32 pad2;
};
-4.22 KVM_SET_FPU
+4.23 KVM_SET_FPU
Capability: basic
Architectures: x86
@@ -518,7 +518,7 @@ struct kvm_fpu {
__u32 pad2;
};
-4.23 KVM_CREATE_IRQCHIP
+4.24 KVM_CREATE_IRQCHIP
Capability: KVM_CAP_IRQCHIP
Architectures: x86, ia64
@@ -531,7 +531,7 @@ ioapic, a virtual PIC (two PICs, nested), and sets up future vcpus to have a
local APIC. IRQ routing for GSIs 0-15 is set to both PIC and IOAPIC; GSI 16-23
only go to the IOAPIC. On ia64, a IOSAPIC is created.
-4.24 KVM_IRQ_LINE
+4.25 KVM_IRQ_LINE
Capability: KVM_CAP_IRQCHIP
Architectures: x86, ia64
@@ -552,7 +552,7 @@ struct kvm_irq_level {
__u32 level; /* 0 or 1 */
};
-4.25 KVM_GET_IRQCHIP
+4.26 KVM_GET_IRQCHIP
Capability: KVM_CAP_IRQCHIP
Architectures: x86, ia64
@@ -573,7 +573,7 @@ struct kvm_irqchip {
} chip;
};
-4.26 KVM_SET_IRQCHIP
+4.27 KVM_SET_IRQCHIP
Capability: KVM_CAP_IRQCHIP
Architectures: x86, ia64
@@ -594,7 +594,7 @@ struct kvm_irqchip {
} chip;
};
-4.27 KVM_XEN_HVM_CONFIG
+4.28 KVM_XEN_HVM_CONFIG
Capability: KVM_CAP_XEN_HVM
Architectures: x86
@@ -618,7 +618,7 @@ struct kvm_xen_hvm_config {
__u8 pad2[30];
};
-4.27 KVM_GET_CLOCK
+4.29 KVM_GET_CLOCK
Capability: KVM_CAP_ADJUST_CLOCK
Architectures: x86
@@ -636,7 +636,7 @@ struct kvm_clock_data {
__u32 pad[9];
};
-4.28 KVM_SET_CLOCK
+4.30 KVM_SET_CLOCK
Capability: KVM_CAP_ADJUST_CLOCK
Architectures: x86
@@ -654,7 +654,7 @@ struct kvm_clock_data {
__u32 pad[9];
};
-4.29 KVM_GET_VCPU_EVENTS
+4.31 KVM_GET_VCPU_EVENTS
Capability: KVM_CAP_VCPU_EVENTS
Extended by: KVM_CAP_INTR_SHADOW
@@ -693,7 +693,7 @@ struct kvm_vcpu_events {
KVM_VCPUEVENT_VALID_SHADOW may be set in the flags field to signal that
interrupt.shadow contains a valid state. Otherwise, this field is undefined.
-4.30 KVM_SET_VCPU_EVENTS
+4.32 KVM_SET_VCPU_EVENTS
Capability: KVM_CAP_VCPU_EVENTS
Extended by: KVM_CAP_INTR_SHADOW
@@ -719,7 +719,7 @@ If KVM_CAP_INTR_SHADOW is available, KVM_VCPUEVENT_VALID_SHADOW can be set in
the flags field to signal that interrupt.shadow contains a valid state and
shall be written into the VCPU.
-4.32 KVM_GET_DEBUGREGS
+4.33 KVM_GET_DEBUGREGS
Capability: KVM_CAP_DEBUGREGS
Architectures: x86
@@ -737,7 +737,7 @@ struct kvm_debugregs {
__u64 reserved[9];
};
-4.33 KVM_SET_DEBUGREGS
+4.34 KVM_SET_DEBUGREGS
Capability: KVM_CAP_DEBUGREGS
Architectures: x86
@@ -750,7 +750,7 @@ Writes debug registers into the vcpu.
See KVM_GET_DEBUGREGS for the data structure. The flags field is unused
yet and must be cleared on entry.
-4.34 KVM_SET_USER_MEMORY_REGION
+4.35 KVM_SET_USER_MEMORY_REGION
Capability: KVM_CAP_USER_MEM
Architectures: all
@@ -796,7 +796,7 @@ It is recommended to use this API instead of the KVM_SET_MEMORY_REGION ioctl.
The KVM_SET_MEMORY_REGION does not allow fine grained control over memory
allocation and is deprecated.
-4.35 KVM_SET_TSS_ADDR
+4.36 KVM_SET_TSS_ADDR
Capability: KVM_CAP_SET_TSS_ADDR
Architectures: x86
@@ -814,7 +814,7 @@ This ioctl is required on Intel-based hosts. This is needed on Intel hardware
because of a quirk in the virtualization implementation (see the internals
documentation when it pops into existence).
-4.36 KVM_ENABLE_CAP
+4.37 KVM_ENABLE_CAP
Capability: KVM_CAP_ENABLE_CAP
Architectures: ppc
@@ -849,7 +849,7 @@ function properly, this is the place to put them.
__u8 pad[64];
};
-4.37 KVM_GET_MP_STATE
+4.38 KVM_GET_MP_STATE
Capability: KVM_CAP_MP_STATE
Architectures: x86, ia64
@@ -879,7 +879,7 @@ Possible values are:
This ioctl is only useful after KVM_CREATE_IRQCHIP. Without an in-kernel
irqchip, the multiprocessing state must be maintained by userspace.
-4.38 KVM_SET_MP_STATE
+4.39 KVM_SET_MP_STATE
Capability: KVM_CAP_MP_STATE
Architectures: x86, ia64
@@ -893,7 +893,7 @@ arguments.
This ioctl is only useful after KVM_CREATE_IRQCHIP. Without an in-kernel
irqchip, the multiprocessing state must be maintained by userspace.
-4.39 KVM_SET_IDENTITY_MAP_ADDR
+4.40 KVM_SET_IDENTITY_MAP_ADDR
Capability: KVM_CAP_SET_IDENTITY_MAP_ADDR
Architectures: x86
@@ -911,7 +911,7 @@ This ioctl is required on Intel-based hosts. This is needed on Intel hardware
because of a quirk in the virtualization implementation (see the internals
documentation when it pops into existence).
-4.40 KVM_SET_BOOT_CPU_ID
+4.41 KVM_SET_BOOT_CPU_ID
Capability: KVM_CAP_SET_BOOT_CPU_ID
Architectures: x86, ia64
@@ -923,7 +923,7 @@ Define which vcpu is the Bootstrap Processor (BSP). Values are the same
as the vcpu id in KVM_CREATE_VCPU. If this ioctl is not called, the default
is vcpu 0.
-4.41 KVM_GET_XSAVE
+4.42 KVM_GET_XSAVE
Capability: KVM_CAP_XSAVE
Architectures: x86
@@ -937,7 +937,7 @@ struct kvm_xsave {
This ioctl would copy current vcpu's xsave struct to the userspace.
-4.42 KVM_SET_XSAVE
+4.43 KVM_SET_XSAVE
Capability: KVM_CAP_XSAVE
Architectures: x86
@@ -951,7 +951,7 @@ struct kvm_xsave {
This ioctl would copy userspace's xsave struct to the kernel.
-4.43 KVM_GET_XCRS
+4.44 KVM_GET_XCRS
Capability: KVM_CAP_XCRS
Architectures: x86
@@ -974,7 +974,7 @@ struct kvm_xcrs {
This ioctl would copy current vcpu's xcrs to the userspace.
-4.44 KVM_SET_XCRS
+4.45 KVM_SET_XCRS
Capability: KVM_CAP_XCRS
Architectures: x86
@@ -997,7 +997,7 @@ struct kvm_xcrs {
This ioctl would set vcpu's xcr to the value userspace specified.
-4.45 KVM_GET_SUPPORTED_CPUID
+4.46 KVM_GET_SUPPORTED_CPUID
Capability: KVM_CAP_EXT_CPUID
Architectures: x86
@@ -1062,7 +1062,7 @@ emulate them efficiently. The fields in each entry are defined as follows:
eax, ebx, ecx, edx: the values returned by the cpuid instruction for
this function/index combination
-4.46 KVM_PPC_GET_PVINFO
+4.47 KVM_PPC_GET_PVINFO
Capability: KVM_CAP_PPC_GET_PVINFO
Architectures: ppc
@@ -1085,7 +1085,7 @@ of 4 instructions that make up a hypercall.
If any additional field gets added to this structure later on, a bit for that
additional piece of information will be set in the flags bitmap.
-4.47 KVM_ASSIGN_PCI_DEVICE
+4.48 KVM_ASSIGN_PCI_DEVICE
Capability: KVM_CAP_DEVICE_ASSIGNMENT
Architectures: x86 ia64
@@ -1113,7 +1113,7 @@ following flags are specified:
/* Depends on KVM_CAP_IOMMU */
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
-4.48 KVM_DEASSIGN_PCI_DEVICE
+4.49 KVM_DEASSIGN_PCI_DEVICE
Capability: KVM_CAP_DEVICE_DEASSIGNMENT
Architectures: x86 ia64
@@ -1126,7 +1126,7 @@ Ends PCI device assignment, releasing all associated resources.
See KVM_CAP_DEVICE_ASSIGNMENT for the data structure. Only assigned_dev_id is
used in kvm_assigned_pci_dev to identify the device.
-4.49 KVM_ASSIGN_DEV_IRQ
+4.50 KVM_ASSIGN_DEV_IRQ
Capability: KVM_CAP_ASSIGN_DEV_IRQ
Architectures: x86 ia64
@@ -1164,7 +1164,7 @@ The following flags are defined:
It is not valid to specify multiple types per host or guest IRQ. However, the
IRQ type of host and guest can differ or can even be null.
-4.50 KVM_DEASSIGN_DEV_IRQ
+4.51 KVM_DEASSIGN_DEV_IRQ
Capability: KVM_CAP_ASSIGN_DEV_IRQ
Architectures: x86 ia64
@@ -1178,7 +1178,7 @@ See KVM_ASSIGN_DEV_IRQ for the data structure. The target device is specified
by assigned_dev_id, flags must correspond to the IRQ type specified on
KVM_ASSIGN_DEV_IRQ. Partial deassignment of host or guest IRQ is allowed.
-4.51 KVM_SET_GSI_ROUTING
+4.52 KVM_SET_GSI_ROUTING
Capability: KVM_CAP_IRQ_ROUTING
Architectures: x86 ia64
@@ -1226,7 +1226,7 @@ struct kvm_irq_routing_msi {
__u32 pad;
};
-4.52 KVM_ASSIGN_SET_MSIX_NR
+4.53 KVM_ASSIGN_SET_MSIX_NR
Capability: KVM_CAP_DEVICE_MSIX
Architectures: x86 ia64
@@ -1245,7 +1245,7 @@ struct kvm_assigned_msix_nr {
#define KVM_MAX_MSIX_PER_DEV 256
-4.53 KVM_ASSIGN_SET_MSIX_ENTRY
+4.54 KVM_ASSIGN_SET_MSIX_ENTRY
Capability: KVM_CAP_DEVICE_MSIX
Architectures: x86 ia64
diff --git a/Documentation/hwmon/hpfall.c b/Documentation/laptops/hpfall.c
index a4a8fc5d05d4..a4a8fc5d05d4 100644
--- a/Documentation/hwmon/hpfall.c
+++ b/Documentation/laptops/hpfall.c
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index 631ad2f1b229..f0d3a8026a56 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -21,6 +21,7 @@ Contents:
- SMP barrier pairing.
- Examples of memory barrier sequences.
- Read memory barriers vs load speculation.
+ - Transitivity
(*) Explicit kernel barriers.
@@ -959,6 +960,63 @@ the speculation will be cancelled and the value reloaded:
retrieved : : +-------+
+TRANSITIVITY
+------------
+
+Transitivity is a deeply intuitive notion about ordering that is not
+always provided by real computer systems. The following example
+demonstrates transitivity (also called "cumulativity"):
+
+ CPU 1 CPU 2 CPU 3
+ ======================= ======================= =======================
+ { X = 0, Y = 0 }
+ STORE X=1 LOAD X STORE Y=1
+ <general barrier> <general barrier>
+ LOAD Y LOAD X
+
+Suppose that CPU 2's load from X returns 1 and its load from Y returns 0.
+This indicates that CPU 2's load from X in some sense follows CPU 1's
+store to X and that CPU 2's load from Y in some sense preceded CPU 3's
+store to Y. The question is then "Can CPU 3's load from X return 0?"
+
+Because CPU 2's load from X in some sense came after CPU 1's store, it
+is natural to expect that CPU 3's load from X must therefore return 1.
+This expectation is an example of transitivity: if a load executing on
+CPU A follows a load from the same variable executing on CPU B, then
+CPU A's load must either return the same value that CPU B's load did,
+or must return some later value.
+
+In the Linux kernel, use of general memory barriers guarantees
+transitivity. Therefore, in the above example, if CPU 2's load from X
+returns 1 and its load from Y returns 0, then CPU 3's load from X must
+also return 1.
+
+However, transitivity is -not- guaranteed for read or write barriers.
+For example, suppose that CPU 2's general barrier in the above example
+is changed to a read barrier as shown below:
+
+ CPU 1 CPU 2 CPU 3
+ ======================= ======================= =======================
+ { X = 0, Y = 0 }
+ STORE X=1 LOAD X STORE Y=1
+ <read barrier> <general barrier>
+ LOAD Y LOAD X
+
+This substitution destroys transitivity: in this example, it is perfectly
+legal for CPU 2's load from X to return 1, its load from Y to return 0,
+and CPU 3's load from X to return 0.
+
+The key point is that although CPU 2's read barrier orders its pair
+of loads, it does not guarantee to order CPU 1's store. Therefore, if
+this example runs on a system where CPUs 1 and 2 share a store buffer
+or a level of cache, CPU 2 might have early access to CPU 1's writes.
+General barriers are therefore required to ensure that all CPUs agree
+on the combined order of CPU 1's and CPU 2's accesses.
+
+To reiterate, if your code requires transitivity, use general barriers
+throughout.
+
+
========================
EXPLICIT KERNEL BARRIERS
========================
diff --git a/Documentation/hwmon/lis3lv02d b/Documentation/misc-devices/lis3lv02d
index 06534f25e643..f1a4ec840f86 100644
--- a/Documentation/hwmon/lis3lv02d
+++ b/Documentation/misc-devices/lis3lv02d
@@ -17,8 +17,8 @@ Description
This driver provides support for the accelerometer found in various HP laptops
sporting the feature officially called "HP Mobile Data Protection System 3D" or
"HP 3D DriveGuard". It detects automatically laptops with this sensor. Known
-models (full list can be found in drivers/hwmon/hp_accel.c) will have their
-axis automatically oriented on standard way (eg: you can directly play
+models (full list can be found in drivers/platform/x86/hp_accel.c) will have
+their axis automatically oriented on standard way (eg: you can directly play
neverball). The accelerometer data is readable via
/sys/devices/platform/lis3lv02d. Reported values are scaled
to mg values (1/1000th of earth gravity).
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index fe5c099b8fc8..4edd78dfb362 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -40,8 +40,6 @@ decnet.txt
- info on using the DECnet networking layer in Linux.
depca.txt
- the Digital DEPCA/EtherWORKS DE1?? and DE2?? LANCE Ethernet driver
-dgrs.txt
- - the Digi International RightSwitch SE-X Ethernet driver
dmfe.txt
- info on the Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver.
e100.txt
@@ -50,8 +48,6 @@ e1000.txt
- info on Intel's E1000 line of gigabit ethernet boards
eql.txt
- serial IP load balancing
-ethertap.txt
- - the Ethertap user space packet reception and transmission driver
ewrk3.txt
- the Digital EtherWORKS 3 DE203/4/5 Ethernet driver
filter.txt
@@ -104,8 +100,6 @@ tuntap.txt
- TUN/TAP device driver, allowing user space Rx/Tx of packets.
vortex.txt
- info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards.
-wavelan.txt
- - AT&T GIS (nee NCR) WaveLAN card: An Ethernet-like radio transceiver
x25.txt
- general info on X.25 development.
x25-iface.txt
diff --git a/Documentation/networking/batman-adv.txt b/Documentation/networking/batman-adv.txt
index 77f0cdd5b0dd..18afcd8afd51 100644
--- a/Documentation/networking/batman-adv.txt
+++ b/Documentation/networking/batman-adv.txt
@@ -1,4 +1,4 @@
-[state: 21-11-2010]
+[state: 27-01-2011]
BATMAN-ADV
----------
@@ -67,15 +67,16 @@ All mesh wide settings can be found in batman's own interface
folder:
# ls /sys/class/net/bat0/mesh/
-# aggregated_ogms bonding fragmentation orig_interval
-# vis_mode
+# aggregated_ogms gw_bandwidth hop_penalty
+# bonding gw_mode orig_interval
+# fragmentation gw_sel_class vis_mode
There is a special folder for debugging informations:
# ls /sys/kernel/debug/batman_adv/bat0/
-# originators socket transtable_global transtable_local
-# vis_data
+# gateways socket transtable_global vis_data
+# originators softif_neigh transtable_local
Some of the files contain all sort of status information regard-
@@ -230,9 +231,8 @@ CONTACT
Please send us comments, experiences, questions, anything :)
IRC: #batman on irc.freenode.org
-Mailing-list: b.a.t.m.a.n@b.a.t.m.a.n@lists.open-mesh.org
- (optional subscription at
- https://lists.open-mesh.org/mm/listinfo/b.a.t.m.a.n)
+Mailing-list: b.a.t.m.a.n@open-mesh.org (optional subscription
+ at https://lists.open-mesh.org/mm/listinfo/b.a.t.m.a.n)
You can also contact the Authors:
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index ac3b4a726a1a..d3d653a5f9b9 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -280,6 +280,17 @@ tcp_max_orphans - INTEGER
more aggressively. Let me to remind again: each orphan eats
up to ~64K of unswappable memory.
+tcp_max_ssthresh - INTEGER
+ Limited Slow-Start for TCP with large congestion windows (cwnd) defined in
+ RFC3742. Limited slow-start is a mechanism to limit growth of the cwnd
+ on the region where cwnd is larger than tcp_max_ssthresh. TCP increases cwnd
+ by at most tcp_max_ssthresh segments, and by at least tcp_max_ssthresh/2
+ segments per RTT when the cwnd is above tcp_max_ssthresh.
+ If TCP connection increased cwnd to thousands (or tens of thousands) segments,
+ and thousands of packets were being dropped during slow-start, you can set
+ tcp_max_ssthresh to improve performance for new TCP connection.
+ Default: 0 (off)
+
tcp_max_syn_backlog - INTEGER
Maximal number of remembered connection requests, which are
still did not receive an acknowledgment from connecting client.
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
index 57080cd74575..f023ba6bba62 100644
--- a/Documentation/power/devices.txt
+++ b/Documentation/power/devices.txt
@@ -1,6 +1,6 @@
Device Power Management
-Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+Copyright (c) 2010-2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
Copyright (c) 2010 Alan Stern <stern@rowland.harvard.edu>
@@ -159,18 +159,18 @@ matter, and the kernel is responsible for keeping track of it. By contrast,
whether or not a wakeup-capable device should issue wakeup events is a policy
decision, and it is managed by user space through a sysfs attribute: the
power/wakeup file. User space can write the strings "enabled" or "disabled" to
-set or clear the should_wakeup flag, respectively. Reads from the file will
-return the corresponding string if can_wakeup is true, but if can_wakeup is
-false then reads will return an empty string, to indicate that the device
-doesn't support wakeup events. (But even though the file appears empty, writes
-will still affect the should_wakeup flag.)
+set or clear the "should_wakeup" flag, respectively. This file is only present
+for wakeup-capable devices (i.e. devices whose "can_wakeup" flags are set)
+and is created (or removed) by device_set_wakeup_capable(). Reads from the
+file will return the corresponding string.
The device_may_wakeup() routine returns true only if both flags are set.
-Drivers should check this routine when putting devices in a low-power state
-during a system sleep transition, to see whether or not to enable the devices'
-wakeup mechanisms. However for runtime power management, wakeup events should
-be enabled whenever the device and driver both support them, regardless of the
-should_wakeup flag.
+This information is used by subsystems, like the PCI bus type code, to see
+whether or not to enable the devices' wakeup mechanisms. If device wakeup
+mechanisms are enabled or disabled directly by drivers, they also should use
+device_may_wakeup() to decide what to do during a system sleep transition.
+However for runtime power management, wakeup events should be enabled whenever
+the device and driver both support them, regardless of the should_wakeup flag.
/sys/devices/.../power/control files
@@ -249,23 +249,18 @@ various phases always run after tasks have been frozen and before they are
unfrozen. Furthermore, the *_noirq phases run at a time when IRQ handlers have
been disabled (except for those marked with the IRQ_WAKEUP flag).
-Most phases use bus, type, and class callbacks (that is, methods defined in
-dev->bus->pm, dev->type->pm, and dev->class->pm). The prepare and complete
-phases are exceptions; they use only bus callbacks. When multiple callbacks
-are used in a phase, they are invoked in the order: <class, type, bus> during
-power-down transitions and in the opposite order during power-up transitions.
-For example, during the suspend phase the PM core invokes
-
- dev->class->pm.suspend(dev);
- dev->type->pm.suspend(dev);
- dev->bus->pm.suspend(dev);
-
-before moving on to the next device, whereas during the resume phase the core
-invokes
-
- dev->bus->pm.resume(dev);
- dev->type->pm.resume(dev);
- dev->class->pm.resume(dev);
+All phases use bus, type, or class callbacks (that is, methods defined in
+dev->bus->pm, dev->type->pm, or dev->class->pm). These callbacks are mutually
+exclusive, so if the device type provides a struct dev_pm_ops object pointed to
+by its pm field (i.e. both dev->type and dev->type->pm are defined), the
+callbacks included in that object (i.e. dev->type->pm) will be used. Otherwise,
+if the class provides a struct dev_pm_ops object pointed to by its pm field
+(i.e. both dev->class and dev->class->pm are defined), the PM core will use the
+callbacks from that object (i.e. dev->class->pm). Finally, if the pm fields of
+both the device type and class objects are NULL (or those objects do not exist),
+the callbacks provided by the bus (that is, the callbacks from dev->bus->pm)
+will be used (this allows device types to override callbacks provided by bus
+types or classes if necessary).
These callbacks may in turn invoke device- or driver-specific methods stored in
dev->driver->pm, but they don't have to.
@@ -507,6 +502,49 @@ routines. Nevertheless, different callback pointers are used in case there is a
situation where it actually matters.
+Device Power Domains
+--------------------
+Sometimes devices share reference clocks or other power resources. In those
+cases it generally is not possible to put devices into low-power states
+individually. Instead, a set of devices sharing a power resource can be put
+into a low-power state together at the same time by turning off the shared
+power resource. Of course, they also need to be put into the full-power state
+together, by turning the shared power resource on. A set of devices with this
+property is often referred to as a power domain.
+
+Support for power domains is provided through the pwr_domain field of struct
+device. This field is a pointer to an object of type struct dev_power_domain,
+defined in include/linux/pm.h, providing a set of power management callbacks
+analogous to the subsystem-level and device driver callbacks that are executed
+for the given device during all power transitions, in addition to the respective
+subsystem-level callbacks. Specifically, the power domain "suspend" callbacks
+(i.e. ->runtime_suspend(), ->suspend(), ->freeze(), ->poweroff(), etc.) are
+executed after the analogous subsystem-level callbacks, while the power domain
+"resume" callbacks (i.e. ->runtime_resume(), ->resume(), ->thaw(), ->restore,
+etc.) are executed before the analogous subsystem-level callbacks. Error codes
+returned by the "suspend" and "resume" power domain callbacks are ignored.
+
+Power domain ->runtime_idle() callback is executed before the subsystem-level
+->runtime_idle() callback and the result returned by it is not ignored. Namely,
+if it returns error code, the subsystem-level ->runtime_idle() callback will not
+be called and the helper function rpm_idle() executing it will return error
+code. This mechanism is intended to help platforms where saving device state
+is a time consuming operation and should only be carried out if all devices
+in the power domain are idle, before turning off the shared power resource(s).
+Namely, the power domain ->runtime_idle() callback may return error code until
+the pm_runtime_idle() helper (or its asychronous version) has been called for
+all devices in the power domain (it is recommended that the returned error code
+be -EBUSY in those cases), preventing the subsystem-level ->runtime_idle()
+callback from being run prematurely.
+
+The support for device power domains is only relevant to platforms needing to
+use the same subsystem-level (e.g. platform bus type) and device driver power
+management callbacks in many different power domain configurations and wanting
+to avoid incorporating the support for power domains into the subsystem-level
+callbacks. The other platforms need not implement it or take it into account
+in any way.
+
+
System Devices
--------------
System devices (sysdevs) follow a slightly different API, which can be found in
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index ffe55ffa540a..654097b130b4 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -1,6 +1,6 @@
Run-time Power Management Framework for I/O Devices
-(C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+(C) 2009-2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
(C) 2010 Alan Stern <stern@rowland.harvard.edu>
1. Introduction
@@ -44,11 +44,12 @@ struct dev_pm_ops {
};
The ->runtime_suspend(), ->runtime_resume() and ->runtime_idle() callbacks are
-executed by the PM core for either the bus type, or device type (if the bus
-type's callback is not defined), or device class (if the bus type's and device
-type's callbacks are not defined) of given device. The bus type, device type
-and device class callbacks are referred to as subsystem-level callbacks in what
-follows.
+executed by the PM core for either the device type, or the class (if the device
+type's struct dev_pm_ops object does not exist), or the bus type (if the
+device type's and class' struct dev_pm_ops objects do not exist) of the given
+device (this allows device types to override callbacks provided by bus types or
+classes if necessary). The bus type, device type and class callbacks are
+referred to as subsystem-level callbacks in what follows.
By default, the callbacks are always invoked in process context with interrupts
enabled. However, subsystems can use the pm_runtime_irq_safe() helper function
diff --git a/Documentation/power/states.txt b/Documentation/power/states.txt
index 34800cc521bf..4416b28630df 100644
--- a/Documentation/power/states.txt
+++ b/Documentation/power/states.txt
@@ -62,12 +62,12 @@ setup via another operating system for it to use. Despite the
inconvenience, this method requires minimal work by the kernel, since
the firmware will also handle restoring memory contents on resume.
-For suspend-to-disk, a mechanism called swsusp called 'swsusp' (Swap
-Suspend) is used to write memory contents to free swap space.
-swsusp has some restrictive requirements, but should work in most
-cases. Some, albeit outdated, documentation can be found in
-Documentation/power/swsusp.txt. Alternatively, userspace can do most
-of the actual suspend to disk work, see userland-swsusp.txt.
+For suspend-to-disk, a mechanism called 'swsusp' (Swap Suspend) is used
+to write memory contents to free swap space. swsusp has some restrictive
+requirements, but should work in most cases. Some, albeit outdated,
+documentation can be found in Documentation/power/swsusp.txt.
+Alternatively, userspace can do most of the actual suspend to disk work,
+see userland-swsusp.txt.
Once memory state is written to disk, the system may either enter a
low-power state (like ACPI S4), or it may simply power down. Powering
diff --git a/Documentation/powerpc/00-INDEX b/Documentation/powerpc/00-INDEX
index e3960b8c8689..5620fb5ac425 100644
--- a/Documentation/powerpc/00-INDEX
+++ b/Documentation/powerpc/00-INDEX
@@ -5,8 +5,6 @@ please mail me.
00-INDEX
- this file
-booting-without-of.txt
- - Booting the Linux/ppc kernel without Open Firmware
cpu_features.txt
- info on how we support a variety of CPUs with minimal compile-time
options.
@@ -16,8 +14,6 @@ hvcs.txt
- IBM "Hypervisor Virtual Console Server" Installation Guide
mpc52xx.txt
- Linux 2.6.x on MPC52xx family
-mpc52xx-device-tree-bindings.txt
- - MPC5200 Device Tree Bindings
sound.txt
- info on sound support under Linux/PPC
zImage_layout.txt
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index b64d10d221ec..4d9ce73ff730 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,26 @@
+Release Date : Thu. Feb 24, 2011 17:00:00 PST 2010 -
+ (emaild-id:megaraidlinux@lsi.com)
+ Adam Radford
+Current Version : 00.00.05.34-rc1
+Old Version : 00.00.05.29-rc1
+ 1. Fix some failure gotos from megasas_probe_one(), etc.
+ 2. Add missing check_and_restore_queue_depth() call in
+ complete_cmd_fusion().
+ 3. Enable MSI-X before calling megasas_init_fw().
+ 4. Call tasklet_schedule() even if outbound_intr_status == 0 for MFI based
+ boards in MSI-X mode.
+ 5. Fix megasas_probe_one() to clear PCI_MSIX_FLAGS_ENABLE in msi control
+ register in kdump kernel.
+ 6. Fix megasas_get_cmd() to only print "Command pool empty" if
+ megasas_dbg_lvl is set.
+ 7. Fix megasas_build_dcdb_fusion() to not filter by TYPE_DISK.
+ 8. Fix megasas_build_dcdb_fusion() to use io_request->LUN[1] field.
+ 9. Add MR_EVT_CFG_CLEARED to megasas_aen_polling().
+ 10. Fix tasklet_init() in megasas_init_fw() to use instancet->tasklet.
+ 11. Fix fault state handling in megasas_transition_to_ready().
+ 12. Fix max_sectors setting for IEEE SGL's.
+ 13. Fix iMR OCR support to work correctly.
+-------------------------------------------------------------------------------
Release Date : Tues. Dec 14, 2010 17:00:00 PST 2010 -
(emaild-id:megaraidlinux@lsi.com)
Adam Radford
diff --git a/Documentation/scsi/hpsa.txt b/Documentation/scsi/hpsa.txt
index dca658362cbf..880c085b951b 100644
--- a/Documentation/scsi/hpsa.txt
+++ b/Documentation/scsi/hpsa.txt
@@ -28,6 +28,12 @@ boot parameter "hpsa_allow_any=1" is specified, however these are not tested
nor supported by HP with this driver. For older Smart Arrays, the cciss
driver should still be used.
+The "hpsa_simple_mode=1" boot parameter may be used to prevent the driver from
+putting the controller into "performant" mode. The difference is that with simple
+mode, each command completion requires an interrupt, while with "performant mode"
+(the default, and ordinarily better performing) it is possible to have multiple
+command completions indicated by a single interrupt.
+
HPSA specific entries in /sys
-----------------------------
@@ -39,6 +45,7 @@ HPSA specific entries in /sys
/sys/class/scsi_host/host*/rescan
/sys/class/scsi_host/host*/firmware_revision
+ /sys/class/scsi_host/host*/transport_mode
the host "rescan" attribute is a write only attribute. Writing to this
attribute will cause the driver to scan for new, changed, or removed devices
@@ -55,6 +62,10 @@ HPSA specific entries in /sys
root@host:/sys/class/scsi_host/host4# cat firmware_revision
7.14
+ The transport_mode indicates whether the controller is in "performant"
+ or "simple" mode. This is controlled by the "hpsa_simple_mode" module
+ parameter.
+
HPSA specific disk attributes:
------------------------------
diff --git a/Documentation/scsi/scsi_mid_low_api.txt b/Documentation/scsi/scsi_mid_low_api.txt
index df322c103466..5f17d29c59b5 100644
--- a/Documentation/scsi/scsi_mid_low_api.txt
+++ b/Documentation/scsi/scsi_mid_low_api.txt
@@ -1343,7 +1343,7 @@ Members of interest:
underruns (overruns should be rare). If possible an LLD
should set 'resid' prior to invoking 'done'. The most
interesting case is data transfers from a SCSI target
- device device (i.e. READs) that underrun.
+ device (e.g. READs) that underrun.
underflow - LLD should place (DID_ERROR << 16) in 'result' if
actual number of bytes transferred is less than this
figure. Not many LLDs implement this check and some that
@@ -1351,6 +1351,18 @@ Members of interest:
report a DID_ERROR. Better for an LLD to implement
'resid'.
+It is recommended that a LLD set 'resid' on data transfers from a SCSI
+target device (e.g. READs). It is especially important that 'resid' is set
+when such data transfers have sense keys of MEDIUM ERROR and HARDWARE ERROR
+(and possibly RECOVERED ERROR). In these cases if a LLD is in doubt how much
+data has been received then the safest approach is to indicate no bytes have
+been received. For example: to indicate that no valid data has been received
+a LLD might use these helpers:
+ scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
+where 'SCpnt' is a pointer to a scsi_cmnd object. To indicate only three 512
+bytes blocks has been received 'resid' could be set like this:
+ scsi_set_resid(SCpnt, scsi_bufflen(SCpnt) - (3 * 512));
+
The scsi_cmnd structure is defined in include/scsi/scsi_cmnd.h
diff --git a/Documentation/spinlocks.txt b/Documentation/spinlocks.txt
index 178c831b907d..2e3c64b1a6a5 100644
--- a/Documentation/spinlocks.txt
+++ b/Documentation/spinlocks.txt
@@ -86,7 +86,7 @@ to change the variables it has to get an exclusive write lock.
The routines look the same as above:
- rwlock_t xxx_lock = RW_LOCK_UNLOCKED;
+ rwlock_t xxx_lock = __RW_LOCK_UNLOCKED(xxx_lock);
unsigned long flags;
@@ -196,25 +196,3 @@ appropriate:
For static initialization, use DEFINE_SPINLOCK() / DEFINE_RWLOCK() or
__SPIN_LOCK_UNLOCKED() / __RW_LOCK_UNLOCKED() as appropriate.
-
-SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED are deprecated. These interfere
-with lockdep state tracking.
-
-Most of the time, you can simply turn:
- static spinlock_t xxx_lock = SPIN_LOCK_UNLOCKED;
-into:
- static DEFINE_SPINLOCK(xxx_lock);
-
-Static structure member variables go from:
-
- struct foo bar {
- .lock = SPIN_LOCK_UNLOCKED;
- };
-
-to:
-
- struct foo bar {
- .lock = __SPIN_LOCK_UNLOCKED(bar.lock);
- };
-
-Declaration of static rw_locks undergo a similar transformation.
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 11d5ceda5bb0..36f007514db3 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -367,7 +367,7 @@ the different loglevels.
- console_loglevel: messages with a higher priority than
this will be printed to the console
-- default_message_level: messages without an explicit priority
+- default_message_loglevel: messages without an explicit priority
will be printed with this priority
- minimum_console_loglevel: minimum (highest) value to which
console_loglevel can be set
diff --git a/Documentation/trace/ftrace-design.txt b/Documentation/trace/ftrace-design.txt
index dc52bd442c92..79fcafc7fd64 100644
--- a/Documentation/trace/ftrace-design.txt
+++ b/Documentation/trace/ftrace-design.txt
@@ -247,6 +247,13 @@ You need very few things to get the syscalls tracing in an arch.
- Support the TIF_SYSCALL_TRACEPOINT thread flags.
- Put the trace_sys_enter() and trace_sys_exit() tracepoints calls from ptrace
in the ptrace syscalls tracing path.
+- If the system call table on this arch is more complicated than a simple array
+ of addresses of the system calls, implement an arch_syscall_addr to return
+ the address of a given system call.
+- If the symbol names of the system calls do not match the function names on
+ this arch, define ARCH_HAS_SYSCALL_MATCH_SYM_NAME in asm/ftrace.h and
+ implement arch_syscall_match_sym_name with the appropriate logic to return
+ true if the function name corresponds with the symbol name.
- Tag this arch as HAVE_SYSCALL_TRACEPOINTS.
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
index 557c1edeccaf..67f1cc473257 100644
--- a/Documentation/trace/ftrace.txt
+++ b/Documentation/trace/ftrace.txt
@@ -80,11 +80,11 @@ of ftrace. Here is a list of some of the key files:
tracers listed here can be configured by
echoing their name into current_tracer.
- tracing_enabled:
+ tracing_on:
- This sets or displays whether the current_tracer
- is activated and tracing or not. Echo 0 into this
- file to disable the tracer or 1 to enable it.
+ This sets or displays whether writing to the trace
+ ring buffer is enabled. Echo 0 into this file to disable
+ the tracer or 1 to enable it.
trace:
@@ -202,10 +202,6 @@ Here is the list of current tracers that may be configured.
to draw a graph of function calls similar to C code
source.
- "sched_switch"
-
- Traces the context switches and wakeups between tasks.
-
"irqsoff"
Traces the areas that disable interrupts and saves
@@ -273,39 +269,6 @@ format, the function name that was traced "path_put" and the
parent function that called this function "path_walk". The
timestamp is the time at which the function was entered.
-The sched_switch tracer also includes tracing of task wakeups
-and context switches.
-
- ksoftirqd/1-7 [01] 1453.070013: 7:115:R + 2916:115:S
- ksoftirqd/1-7 [01] 1453.070013: 7:115:R + 10:115:S
- ksoftirqd/1-7 [01] 1453.070013: 7:115:R ==> 10:115:R
- events/1-10 [01] 1453.070013: 10:115:S ==> 2916:115:R
- kondemand/1-2916 [01] 1453.070013: 2916:115:S ==> 7:115:R
- ksoftirqd/1-7 [01] 1453.070013: 7:115:S ==> 0:140:R
-
-Wake ups are represented by a "+" and the context switches are
-shown as "==>". The format is:
-
- Context switches:
-
- Previous task Next Task
-
- <pid>:<prio>:<state> ==> <pid>:<prio>:<state>
-
- Wake ups:
-
- Current task Task waking up
-
- <pid>:<prio>:<state> + <pid>:<prio>:<state>
-
-The prio is the internal kernel priority, which is the inverse
-of the priority that is usually displayed by user-space tools.
-Zero represents the highest priority (99). Prio 100 starts the
-"nice" priorities with 100 being equal to nice -20 and 139 being
-nice 19. The prio "140" is reserved for the idle task which is
-the lowest priority thread (pid 0).
-
-
Latency trace format
--------------------
@@ -491,79 +454,6 @@ x494] <- /root/a.out[+0x4a8] <- /lib/libc-2.7.so[+0x1e1a6]
latencies, as described in "Latency
trace format".
-sched_switch
-------------
-
-This tracer simply records schedule switches. Here is an example
-of how to use it.
-
- # echo sched_switch > current_tracer
- # echo 1 > tracing_enabled
- # sleep 1
- # echo 0 > tracing_enabled
- # cat trace
-
-# tracer: sched_switch
-#
-# TASK-PID CPU# TIMESTAMP FUNCTION
-# | | | | |
- bash-3997 [01] 240.132281: 3997:120:R + 4055:120:R
- bash-3997 [01] 240.132284: 3997:120:R ==> 4055:120:R
- sleep-4055 [01] 240.132371: 4055:120:S ==> 3997:120:R
- bash-3997 [01] 240.132454: 3997:120:R + 4055:120:S
- bash-3997 [01] 240.132457: 3997:120:R ==> 4055:120:R
- sleep-4055 [01] 240.132460: 4055:120:D ==> 3997:120:R
- bash-3997 [01] 240.132463: 3997:120:R + 4055:120:D
- bash-3997 [01] 240.132465: 3997:120:R ==> 4055:120:R
- <idle>-0 [00] 240.132589: 0:140:R + 4:115:S
- <idle>-0 [00] 240.132591: 0:140:R ==> 4:115:R
- ksoftirqd/0-4 [00] 240.132595: 4:115:S ==> 0:140:R
- <idle>-0 [00] 240.132598: 0:140:R + 4:115:S
- <idle>-0 [00] 240.132599: 0:140:R ==> 4:115:R
- ksoftirqd/0-4 [00] 240.132603: 4:115:S ==> 0:140:R
- sleep-4055 [01] 240.133058: 4055:120:S ==> 3997:120:R
- [...]
-
-
-As we have discussed previously about this format, the header
-shows the name of the trace and points to the options. The
-"FUNCTION" is a misnomer since here it represents the wake ups
-and context switches.
-
-The sched_switch file only lists the wake ups (represented with
-'+') and context switches ('==>') with the previous task or
-current task first followed by the next task or task waking up.
-The format for both of these is PID:KERNEL-PRIO:TASK-STATE.
-Remember that the KERNEL-PRIO is the inverse of the actual
-priority with zero (0) being the highest priority and the nice
-values starting at 100 (nice -20). Below is a quick chart to map
-the kernel priority to user land priorities.
-
- Kernel Space User Space
- ===============================================================
- 0(high) to 98(low) user RT priority 99(high) to 1(low)
- with SCHED_RR or SCHED_FIFO
- ---------------------------------------------------------------
- 99 sched_priority is not used in scheduling
- decisions(it must be specified as 0)
- ---------------------------------------------------------------
- 100(high) to 139(low) user nice -20(high) to 19(low)
- ---------------------------------------------------------------
- 140 idle task priority
- ---------------------------------------------------------------
-
-The task states are:
-
- R - running : wants to run, may not actually be running
- S - sleep : process is waiting to be woken up (handles signals)
- D - disk sleep (uninterruptible sleep) : process must be woken up
- (ignores signals)
- T - stopped : process suspended
- t - traced : process is being traced (with something like gdb)
- Z - zombie : process waiting to be cleaned up
- X - unknown
-
-
ftrace_enabled
--------------
@@ -607,10 +497,10 @@ an example:
# echo irqsoff > current_tracer
# echo latency-format > trace_options
# echo 0 > tracing_max_latency
- # echo 1 > tracing_enabled
+ # echo 1 > tracing_on
# ls -ltr
[...]
- # echo 0 > tracing_enabled
+ # echo 0 > tracing_on
# cat trace
# tracer: irqsoff
#
@@ -715,10 +605,10 @@ is much like the irqsoff tracer.
# echo preemptoff > current_tracer
# echo latency-format > trace_options
# echo 0 > tracing_max_latency
- # echo 1 > tracing_enabled
+ # echo 1 > tracing_on
# ls -ltr
[...]
- # echo 0 > tracing_enabled
+ # echo 0 > tracing_on
# cat trace
# tracer: preemptoff
#
@@ -863,10 +753,10 @@ tracers.
# echo preemptirqsoff > current_tracer
# echo latency-format > trace_options
# echo 0 > tracing_max_latency
- # echo 1 > tracing_enabled
+ # echo 1 > tracing_on
# ls -ltr
[...]
- # echo 0 > tracing_enabled
+ # echo 0 > tracing_on
# cat trace
# tracer: preemptirqsoff
#
@@ -1026,9 +916,9 @@ Instead of performing an 'ls', we will run 'sleep 1' under
# echo wakeup > current_tracer
# echo latency-format > trace_options
# echo 0 > tracing_max_latency
- # echo 1 > tracing_enabled
+ # echo 1 > tracing_on
# chrt -f 5 sleep 1
- # echo 0 > tracing_enabled
+ # echo 0 > tracing_on
# cat trace
# tracer: wakeup
#
@@ -1140,9 +1030,9 @@ ftrace_enabled is set; otherwise this tracer is a nop.
# sysctl kernel.ftrace_enabled=1
# echo function > current_tracer
- # echo 1 > tracing_enabled
+ # echo 1 > tracing_on
# usleep 1
- # echo 0 > tracing_enabled
+ # echo 0 > tracing_on
# cat trace
# tracer: function
#
@@ -1180,7 +1070,7 @@ int trace_fd;
[...]
int main(int argc, char *argv[]) {
[...]
- trace_fd = open(tracing_file("tracing_enabled"), O_WRONLY);
+ trace_fd = open(tracing_file("tracing_on"), O_WRONLY);
[...]
if (condition_hit()) {
write(trace_fd, "0", 1);
@@ -1631,9 +1521,9 @@ If I am only interested in sys_nanosleep and hrtimer_interrupt:
# echo sys_nanosleep hrtimer_interrupt \
> set_ftrace_filter
# echo function > current_tracer
- # echo 1 > tracing_enabled
+ # echo 1 > tracing_on
# usleep 1
- # echo 0 > tracing_enabled
+ # echo 0 > tracing_on
# cat trace
# tracer: ftrace
#
@@ -1879,9 +1769,9 @@ different. The trace is live.
# echo function > current_tracer
# cat trace_pipe > /tmp/trace.out &
[1] 4153
- # echo 1 > tracing_enabled
+ # echo 1 > tracing_on
# usleep 1
- # echo 0 > tracing_enabled
+ # echo 0 > tracing_on
# cat trace
# tracer: function
#
diff --git a/Documentation/trace/kprobetrace.txt b/Documentation/trace/kprobetrace.txt
index 5f77d94598dd..6d27ab8d6e9f 100644
--- a/Documentation/trace/kprobetrace.txt
+++ b/Documentation/trace/kprobetrace.txt
@@ -42,11 +42,25 @@ Synopsis of kprobe_events
+|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(**)
NAME=FETCHARG : Set NAME as the argument name of FETCHARG.
FETCHARG:TYPE : Set TYPE as the type of FETCHARG. Currently, basic types
- (u8/u16/u32/u64/s8/s16/s32/s64) and string are supported.
+ (u8/u16/u32/u64/s8/s16/s32/s64), "string" and bitfield
+ are supported.
(*) only for return probe.
(**) this is useful for fetching a field of data structures.
+Types
+-----
+Several types are supported for fetch-args. Kprobe tracer will access memory
+by given type. Prefix 's' and 'u' means those types are signed and unsigned
+respectively. Traced arguments are shown in decimal (signed) or hex (unsigned).
+String type is a special type, which fetches a "null-terminated" string from
+kernel space. This means it will fail and store NULL if the string container
+has been paged out.
+Bitfield is another special type, which takes 3 parameters, bit-width, bit-
+offset, and container-size (usually 32). The syntax is;
+
+ b<bit-width>@<bit-offset>/<container-size>
+
Per-Probe Event Filtering
-------------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index 6f99e1260db8..0bdf6bdab69f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1205,7 +1205,7 @@ ATHEROS AR9170 WIRELESS DRIVER
M: Christian Lamparter <chunkeey@web.de>
L: linux-wireless@vger.kernel.org
W: http://wireless.kernel.org/en/users/Drivers/ar9170
-S: Maintained
+S: Obsolete
F: drivers/net/wireless/ath/ar9170/
CARL9170 LINUX COMMUNITY WIRELESS DRIVER
@@ -1692,7 +1692,15 @@ M: Andy Whitcroft <apw@canonical.com>
S: Supported
F: scripts/checkpatch.pl
+CHINESE DOCUMENTATION
+M: Harry Wei <harryxiyou@gmail.com>
+L: xiyoulinuxkernelgroup@googlegroups.com
+L: linux-kernel@zh-kernel.org (moderated for non-subscribers)
+S: Maintained
+F: Documentation/zh_CN/
+
CISCO VIC ETHERNET NIC DRIVER
+M: Christian Benvenuti <benve@cisco.com>
M: Vasanthy Kolluri <vkolluri@cisco.com>
M: Roopa Prabhu <roprabhu@cisco.com>
M: David Wang <dwang2@cisco.com>
@@ -2026,7 +2034,7 @@ F: Documentation/scsi/dc395x.txt
F: drivers/scsi/dc395x.*
DCCP PROTOCOL
-M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
+M: Gerrit Renker <gerrit@erg.abdn.ac.uk>
L: dccp@vger.kernel.org
W: http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp
S: Maintained
@@ -2095,6 +2103,12 @@ F: Documentation/serial/digiepca.txt
F: drivers/char/epca*
F: drivers/char/digi*
+DIOLAN U2C-12 I2C DRIVER
+M: Guenter Roeck <guenter.roeck@ericsson.com>
+L: linux-i2c@vger.kernel.org
+S: Maintained
+F: drivers/i2c/busses/i2c-diolan-u2c.c
+
DIRECTORY NOTIFICATION (DNOTIFY)
M: Eric Paris <eparis@parisplace.org>
S: Maintained
@@ -2341,7 +2355,7 @@ F: include/linux/edac_mce.h
EDAC-I82975X
M: Ranganathan Desikan <ravi@jetztechnologies.com>
-M: "Arvind R." <arvind@jetztechnologies.com>
+M: "Arvind R." <arvino55@gmail.com>
L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
W: bluesmoke.sourceforge.net
S: Maintained
@@ -3372,6 +3386,12 @@ L: linux-serial@vger.kernel.org
S: Maintained
F: drivers/tty/serial/ioc3_serial.c
+IOQ LIBRARY
+M: Gregory Haskins <ghaskins@novell.com>
+S: Maintained
+F: include/linux/ioq.h
+F: lib/ioq.c
+
IP MASQUERADING
M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar>
S: Maintained
@@ -3437,6 +3457,7 @@ F: net/ipx/
IRDA SUBSYSTEM
M: Samuel Ortiz <samuel@sortiz.org>
L: irda-users@lists.sourceforge.net (subscribers-only)
+L: netdev@vger.kernel.org
W: http://irda.sourceforge.net/
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/sameo/irda-2.6.git
@@ -3880,6 +3901,12 @@ L: linux-security-module@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/chrisw/lsm-2.6.git
S: Supported
+LIS3LV02D ACCELEROMETER DRIVER
+M: Eric Piel <eric.piel@tremplin-utc.net>
+S: Maintained
+F: Documentation/misc-devices/lis3lv02d
+F: drivers/misc/lis3lv02d/
+
LLC (802.2)
M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
S: Maintained
@@ -3887,12 +3914,6 @@ F: include/linux/llc.h
F: include/net/llc*
F: net/llc/
-LIS3LV02D ACCELEROMETER DRIVER
-M: Eric Piel <eric.piel@tremplin-utc.net>
-S: Maintained
-F: Documentation/hwmon/lis3lv02d
-F: drivers/hwmon/lis3lv02d.*
-
LM73 HARDWARE MONITOR DRIVER
M: Guillaume Ligneul <guillaume.ligneul@gmail.com>
L: lm-sensors@lm-sensors.org
@@ -4497,14 +4518,14 @@ S: Maintained
F: sound/soc/omap/
OMAP FRAMEBUFFER SUPPORT
-M: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+M: Tomi Valkeinen <tomi.valkeinen@ti.com>
L: linux-fbdev@vger.kernel.org
L: linux-omap@vger.kernel.org
S: Maintained
F: drivers/video/omap/
OMAP DISPLAY SUBSYSTEM and FRAMEBUFFER SUPPORT (DSS2)
-M: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+M: Tomi Valkeinen <tomi.valkeinen@ti.com>
L: linux-omap@vger.kernel.org
L: linux-fbdev@vger.kernel.org
S: Maintained
@@ -4881,6 +4902,15 @@ S: Maintained
F: drivers/block/pktcdvd.c
F: include/linux/pktcdvd.h
+PKUNITY SOC DRIVERS
+M: Guan Xuetao <gxt@mprc.pku.edu.cn>
+W: http://mprc.pku.edu.cn/~guanxuetao/linux
+S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/epip/linux-2.6-unicore32.git
+F: drivers/input/serio/i8042-unicore32io.h
+F: drivers/i2c/busses/i2c-puv3.c
+F: drivers/video/fb-puv3.c
+
PMC SIERRA MaxRAID DRIVER
M: Anil Ravindranath <anil_ravindranath@pmc-sierra.com>
L: linux-scsi@vger.kernel.org
@@ -5144,6 +5174,7 @@ RALINK RT2X00 WIRELESS LAN DRIVER
P: rt2x00 project
M: Ivo van Doorn <IvDoorn@gmail.com>
M: Gertjan van Wingerde <gwingerde@gmail.com>
+M: Helmut Schaa <helmut.schaa@googlemail.com>
L: linux-wireless@vger.kernel.org
L: users@rt2x00.serialmonkey.com (moderated for non-subscribers)
W: http://rt2x00.serialmonkey.com/
@@ -5266,7 +5297,7 @@ S: Maintained
F: drivers/net/wireless/rtl818x/rtl8180/
RTL8187 WIRELESS DRIVER
-M: Herton Ronaldo Krzesinski <herton@mandriva.com.br>
+M: Herton Ronaldo Krzesinski <herton@canonical.com>
M: Hin-Tak Leung <htl10@users.sourceforge.net>
M: Larry Finger <Larry.Finger@lwfinger.net>
L: linux-wireless@vger.kernel.org
@@ -5322,8 +5353,7 @@ S: Supported
F: drivers/s390/crypto/
S390 ZFCP DRIVER
-M: Christof Schmitt <christof.schmitt@de.ibm.com>
-M: Swen Schillig <swen@vnet.ibm.com>
+M: Steffen Maier <maier@linux.vnet.ibm.com>
M: linux390@de.ibm.com
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
@@ -5612,6 +5642,12 @@ F: drivers/tty/serial/serial_lh7a40x.c
F: drivers/usb/gadget/lh7a40*
F: drivers/usb/host/ohci-lh7a40*
+SHM-SIGNAL LIBRARY
+M: Gregory Haskins <ghaskins@novell.com>
+S: Maintained
+F: include/linux/shm_signal.h
+F: lib/shm_signal.c
+
SIMPLE FIRMWARE INTERFACE (SFI)
M: Len Brown <lenb@kernel.org>
L: sfi-devel@simplefirmware.org
@@ -5640,7 +5676,8 @@ F: arch/arm/mach-s3c2410/bast-ide.c
F: arch/arm/mach-s3c2410/bast-irq.c
TI DAVINCI MACHINE SUPPORT
-M: Kevin Hilman <khilman@deeprootsystems.com>
+M: Sekhar Nori <nsekhar@ti.com>
+M: Kevin Hilman <khilman@ti.com>
L: davinci-linux-open-source@linux.davincidsp.com (subscribers-only)
Q: http://patchwork.kernel.org/project/linux-davinci/list/
S: Supported
@@ -6104,7 +6141,7 @@ S: Maintained
F: security/tomoyo/
TOPSTAR LAPTOP EXTRAS DRIVER
-M: Herton Ronaldo Krzesinski <herton@mandriva.com.br>
+M: Herton Ronaldo Krzesinski <herton@canonical.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/topstar-laptop.c
@@ -6244,6 +6281,13 @@ F: drivers/uwb/
F: include/linux/uwb.h
F: include/linux/uwb/
+UNICORE32 ARCHITECTURE:
+M: Guan Xuetao <gxt@mprc.pku.edu.cn>
+W: http://mprc.pku.edu.cn/~guanxuetao/linux
+S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/epip/linux-2.6-unicore32.git
+F: arch/unicore32/
+
UNIFDEF
M: Tony Finch <dot@dotat.at>
W: http://dotat.at/prog/unifdef
@@ -6599,6 +6643,19 @@ S: Maintained
F: Documentation/fb/uvesafb.txt
F: drivers/video/uvesafb.*
+VBUS
+M: Gregory Haskins <ghaskins@novell.com>
+S: Maintained
+F: include/linux/vbus*
+F: drivers/vbus/*
+
+VBUS ETHERNET DRIVER
+M: Gregory Haskins <ghaskins@novell.com>
+S: Maintained
+W: http://developer.novell.com/wiki/index.php/AlacrityVM
+F: include/linux/venet.h
+F: drivers/net/vbus-enet.c
+
VFAT/FAT/MSDOS FILESYSTEM
M: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp>
S: Maintained
diff --git a/Makefile b/Makefile
index 5e40aa2acbff..c6df5a5fce34 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 38
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc7
NAME = Flesh-Eating Bats with Fangs
# *DOCUMENTATION*
@@ -421,7 +421,7 @@ endif
# of make so .config is not included in this case either (for *config).
no-dot-config-targets := clean mrproper distclean \
- cscope TAGS tags help %docs check% coccicheck \
+ cscope gtags TAGS tags help %docs check% coccicheck \
include/linux/version.h headers_% \
kernelversion %src-pkg
@@ -666,7 +666,7 @@ export MODLIB
# INSTALL_MOD_STRIP, if defined, will cause modules to be
# stripped after they are installed. If INSTALL_MOD_STRIP is '1', then
# the default option --strip-debug will be used. Otherwise,
-# INSTALL_MOD_STRIP will used as the options to the strip command.
+# INSTALL_MOD_STRIP value will be used as the options to the strip command.
ifdef INSTALL_MOD_STRIP
ifeq ($(INSTALL_MOD_STRIP),1)
@@ -1135,7 +1135,7 @@ CLEAN_FILES += vmlinux System.map \
MRPROPER_DIRS += include/config usr/include include/generated
MRPROPER_FILES += .config .config.old .version .old_version \
include/linux/version.h \
- Module.symvers tags TAGS cscope*
+ Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
# clean - Delete most, but leave enough to build external modules
#
@@ -1222,6 +1222,7 @@ help:
@echo ' modules_prepare - Set up for building external modules'
@echo ' tags/TAGS - Generate tags file for editors'
@echo ' cscope - Generate cscope index'
+ @echo ' gtags - Generate GNU GLOBAL index'
@echo ' kernelrelease - Output the release version string'
@echo ' kernelversion - Output the version stored in Makefile'
@echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
@@ -1380,7 +1381,7 @@ clean: $(clean-dirs)
quiet_cmd_tags = GEN $@
cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
-tags TAGS cscope: FORCE
+tags TAGS cscope gtags: FORCE
$(call cmd,tags)
# Scripts to check various things for consistency
diff --git a/arch/Kconfig b/arch/Kconfig
index f78c2be4242b..e6e00f8d3e0c 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -30,6 +30,9 @@ config OPROFILE_EVENT_MULTIPLEX
config HAVE_OPROFILE
bool
+config HAVE_HWSAMPLER
+ bool
+
config KPROBES
bool "Kprobes"
depends on MODULES
@@ -178,4 +181,7 @@ config HAVE_ARCH_JUMP_LABEL
config HAVE_ARCH_MUTEX_CPU_RELAX
bool
+config ARCH_HAVE_NMI_SAFE_CMPXCHG
+ bool
+
source "kernel/gcov/Kconfig"
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 47f63d480141..23b8a0cdbc19 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -11,6 +11,7 @@ config ALPHA
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_PROBE
select AUTO_IRQ_AFFINITY if SMP
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG
help
The Alpha is a 64-bit general-purpose processor designed and
marketed by the Digital Equipment Corporation of blessed memory,
diff --git a/arch/alpha/include/asm/cacheflush.h b/arch/alpha/include/asm/cacheflush.h
index 012f1243b1c1..a9cb6aa447aa 100644
--- a/arch/alpha/include/asm/cacheflush.h
+++ b/arch/alpha/include/asm/cacheflush.h
@@ -63,7 +63,7 @@ extern void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page, unsigned long addr, int len);
#endif
-/* This is used only in do_no_page and do_swap_page. */
+/* This is used only in __do_fault and do_swap_page. */
#define flush_icache_page(vma, page) \
flush_icache_user_range((vma), (page), 0, 0)
diff --git a/arch/alpha/include/asm/errno.h b/arch/alpha/include/asm/errno.h
index 98099bda9370..e5f29ca28180 100644
--- a/arch/alpha/include/asm/errno.h
+++ b/arch/alpha/include/asm/errno.h
@@ -122,4 +122,6 @@
#define ERFKILL 138 /* Operation not possible due to RF-kill */
+#define EHWPOISON 139 /* Memory page has hardware error */
+
#endif
diff --git a/arch/alpha/include/asm/rwsem.h b/arch/alpha/include/asm/rwsem.h
index 1570c0b54336..a83bbea62c67 100644
--- a/arch/alpha/include/asm/rwsem.h
+++ b/arch/alpha/include/asm/rwsem.h
@@ -13,44 +13,13 @@
#ifdef __KERNEL__
#include <linux/compiler.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-struct rwsem_waiter;
-
-extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
-extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
-
-/*
- * the semaphore definition
- */
-struct rw_semaphore {
- long count;
#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
#define RWSEM_ACTIVE_BIAS 0x0000000000000001L
#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
#define RWSEM_WAITING_BIAS (-0x0000000100000000L)
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
- spinlock_t wait_lock;
- struct list_head wait_list;
-};
-
-#define __RWSEM_INITIALIZER(name) \
- { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
- LIST_HEAD_INIT((name).wait_list) }
-
-#define DECLARE_RWSEM(name) \
- struct rw_semaphore name = __RWSEM_INITIALIZER(name)
-
-static inline void init_rwsem(struct rw_semaphore *sem)
-{
- sem->count = RWSEM_UNLOCKED_VALUE;
- spin_lock_init(&sem->wait_lock);
- INIT_LIST_HEAD(&sem->wait_list);
-}
static inline void __down_read(struct rw_semaphore *sem)
{
@@ -250,10 +219,5 @@ static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem)
#endif
}
-static inline int rwsem_is_locked(struct rw_semaphore *sem)
-{
- return (sem->count != 0);
-}
-
#endif /* __KERNEL__ */
#endif /* _ALPHA_RWSEM_H */
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index c1f3e7cb82a4..a58e84f1a63b 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -159,7 +159,7 @@ void read_persistent_clock(struct timespec *ts)
/*
* timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "do_timer()" routine every clocktick
+ * as well as call the "xtime_update()" routine every clocktick
*/
irqreturn_t timer_interrupt(int irq, void *dev)
{
@@ -172,8 +172,6 @@ irqreturn_t timer_interrupt(int irq, void *dev)
profile_tick(CPU_PROFILING);
#endif
- write_seqlock(&xtime_lock);
-
/*
* Calculate how many ticks have passed since the last update,
* including any previous partial leftover. Save any resulting
@@ -187,9 +185,7 @@ irqreturn_t timer_interrupt(int irq, void *dev)
nticks = delta >> FIX_SHIFT;
if (nticks)
- do_timer(nticks);
-
- write_sequnlock(&xtime_lock);
+ xtime_update(nticks);
if (test_irq_work_pending()) {
clear_irq_work_pending();
diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S
index 003ef4c02585..433be2a24f31 100644
--- a/arch/alpha/kernel/vmlinux.lds.S
+++ b/arch/alpha/kernel/vmlinux.lds.S
@@ -1,5 +1,6 @@
#include <asm-generic/vmlinux.lds.h>
#include <asm/thread_info.h>
+#include <asm/cache.h>
#include <asm/page.h>
OUTPUT_FORMAT("elf64-alpha")
@@ -38,7 +39,7 @@ SECTIONS
__init_begin = ALIGN(PAGE_SIZE);
INIT_TEXT_SECTION(PAGE_SIZE)
INIT_DATA_SECTION(16)
- PERCPU(PAGE_SIZE)
+ PERCPU(L1_CACHE_BYTES, PAGE_SIZE)
/* Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page
needed for the THREAD_SIZE aligned init_task gets freed after init */
. = ALIGN(THREAD_SIZE);
@@ -46,7 +47,7 @@ SECTIONS
/* Freed after init ends here */
_data = .;
- RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE)
+ RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
.got : {
*(.got)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 26d45e5b636b..bcc9727b1633 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -7,7 +7,7 @@ config ARM
select HAVE_MEMBLOCK
select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION
- select GENERIC_ATOMIC64 if (!CPU_32v6K || !AEABI)
+ select GENERIC_ATOMIC64 if (CPU_V6 || !CPU_32v6K || !AEABI)
select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
select HAVE_ARCH_KGDB
select HAVE_KPROBES if (!XIP_KERNEL && !THUMB2_KERNEL)
@@ -24,7 +24,7 @@ config ARM
select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC
select HAVE_REGS_AND_STACK_ACCESS_API
- select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V7))
+ select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7))
select HAVE_C_RECORDMCOUNT
select HAVE_GENERIC_HARDIRQS
select HAVE_SPARSE_IRQ
@@ -178,11 +178,6 @@ config FIQ
config ARCH_MTD_XIP
bool
-config ARM_L1_CACHE_SHIFT_6
- bool
- help
- Setting ARM L1 cache line size to 64 Bytes.
-
config VECTORS_BASE
hex
default 0xffff0000 if MMU || CPU_HIGH_VECTOR
@@ -191,6 +186,22 @@ config VECTORS_BASE
help
The base address of exception vectors.
+config ARM_PATCH_PHYS_VIRT
+ bool "Patch physical to virtual translations at runtime (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ depends on !XIP_KERNEL && !THUMB2_KERNEL && MMU
+ depends on !ARCH_REALVIEW || !SPARSEMEM
+ help
+ Patch phys-to-virt translation functions at runtime according to
+ the position of the kernel in system memory.
+
+ This can only be used with non-XIP, non-Thumb2, MMU kernels where
+ the base of physical memory is at a 16MB boundary.
+
+config ARM_PATCH_PHYS_VIRT_16BIT
+ def_bool y
+ depends on ARM_PATCH_PHYS_VIRT && ARCH_MSM
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
@@ -212,15 +223,6 @@ choice
prompt "ARM system type"
default ARCH_VERSATILE
-config ARCH_AAEC2000
- bool "Agilent AAEC-2000 based"
- select CPU_ARM920T
- select ARM_AMBA
- select HAVE_CLK
- select ARCH_USES_GETTIMEOFFSET
- help
- This enables support for systems based on the Agilent AAEC-2000
-
config ARCH_INTEGRATOR
bool "ARM Ltd. Integrator family"
select ARM_AMBA
@@ -229,6 +231,7 @@ config ARCH_INTEGRATOR
select ICST
select GENERIC_CLOCKEVENTS
select PLAT_VERSATILE
+ select PLAT_VERSATILE_FPGA_IRQ
help
Support for ARM's Integrator platform.
@@ -236,11 +239,11 @@ config ARCH_REALVIEW
bool "ARM Ltd. RealView family"
select ARM_AMBA
select CLKDEV_LOOKUP
- select HAVE_SCHED_CLOCK
select ICST
select GENERIC_CLOCKEVENTS
select ARCH_WANT_OPTIONAL_GPIOLIB
select PLAT_VERSATILE
+ select PLAT_VERSATILE_CLCD
select ARM_TIMER_SP804
select GPIO_PL061 if GPIOLIB
help
@@ -251,11 +254,12 @@ config ARCH_VERSATILE
select ARM_AMBA
select ARM_VIC
select CLKDEV_LOOKUP
- select HAVE_SCHED_CLOCK
select ICST
select GENERIC_CLOCKEVENTS
select ARCH_WANT_OPTIONAL_GPIOLIB
select PLAT_VERSATILE
+ select PLAT_VERSATILE_CLCD
+ select PLAT_VERSATILE_FPGA_IRQ
select ARM_TIMER_SP804
help
This enables support for ARM Ltd Versatile board.
@@ -268,9 +272,10 @@ config ARCH_VEXPRESS
select CLKDEV_LOOKUP
select GENERIC_CLOCKEVENTS
select HAVE_CLK
- select HAVE_SCHED_CLOCK
+ select HAVE_PATA_PLATFORM
select ICST
select PLAT_VERSATILE
+ select PLAT_VERSATILE_CLCD
help
This enables support for the ARM Ltd Versatile Express boards.
@@ -346,7 +351,7 @@ config ARCH_FOOTBRIDGE
bool "FootBridge"
select CPU_SA110
select FOOTBRIDGE
- select ARCH_USES_GETTIMEOFFSET
+ select GENERIC_CLOCKEVENTS
help
Support for systems based on the DC21285 companion chip
("FootBridge"), such as the Simtec CATS and the Rebel NetWinder.
@@ -457,6 +462,7 @@ config ARCH_IXP4XX
config ARCH_DOVE
bool "Marvell Dove"
+ select CPU_V6K
select PCI
select ARCH_REQUIRE_GPIOLIB
select GENERIC_CLOCKEVENTS
@@ -619,6 +625,7 @@ config ARCH_MSM
select HAVE_CLK
select GENERIC_CLOCKEVENTS
select ARCH_REQUIRE_GPIOLIB
+ select CLKDEV_LOOKUP
help
Support for Qualcomm MSM/QSD based systems. This runs on the
apps processor of the MSM/QSD and depends on a shared memory
@@ -760,8 +767,8 @@ config ARCH_S5PV210
help
Samsung S5PV210/S5PC110 series based systems
-config ARCH_S5PV310
- bool "Samsung S5PV310/S5PC210"
+config ARCH_EXYNOS4
+ bool "Samsung EXYNOS4"
select CPU_V7
select ARCH_SPARSEMEM_ENABLE
select GENERIC_GPIO
@@ -772,7 +779,7 @@ config ARCH_S5PV310
select HAVE_S3C2410_I2C if I2C
select HAVE_S3C2410_WATCHDOG if WATCHDOG
help
- Samsung S5PV310 series based systems
+ Samsung EXYNOS4 series based systems
config ARCH_SHARK
bool "Shark"
@@ -795,17 +802,6 @@ config ARCH_TCC_926
help
Support for Telechips TCC ARM926-based systems.
-config ARCH_LH7A40X
- bool "Sharp LH7A40X"
- select CPU_ARM922T
- select ARCH_SPARSEMEM_ENABLE if !LH7A40X_CONTIGMEM
- select ARCH_USES_GETTIMEOFFSET
- help
- Say Y here for systems based on one of the Sharp LH7A40X
- System on a Chip processors. These CPUs include an ARM922T
- core with a wide array of integrated devices for
- hand-held and low-power applications.
-
config ARCH_U300
bool "ST-Ericsson U300 Series"
depends on MMU
@@ -875,6 +871,16 @@ config PLAT_SPEAR
help
Support for ST's SPEAr platform (SPEAr3xx, SPEAr6xx and SPEAr13xx).
+config ARCH_VT8500
+ bool "VIA/WonderMedia 85xx"
+ select CPU_ARM926T
+ select GENERIC_GPIO
+ select ARCH_HAS_CPUFREQ
+ select GENERIC_CLOCKEVENTS
+ select ARCH_REQUIRE_GPIOLIB
+ select HAVE_PWM
+ help
+ Support for VIA/WonderMedia VT8500/WM85xx System-on-Chip.
endchoice
#
@@ -882,8 +888,6 @@ endchoice
# Kconfigs may be included either alphabetically (according to the
# plat- suffix) or along side the corresponding mach-* source.
#
-source "arch/arm/mach-aaec2000/Kconfig"
-
source "arch/arm/mach-at91/Kconfig"
source "arch/arm/mach-bcmring/Kconfig"
@@ -922,8 +926,6 @@ source "arch/arm/mach-kirkwood/Kconfig"
source "arch/arm/mach-ks8695/Kconfig"
-source "arch/arm/mach-lh7a40x/Kconfig"
-
source "arch/arm/mach-loki/Kconfig"
source "arch/arm/mach-lpc32xx/Kconfig"
@@ -991,7 +993,7 @@ source "arch/arm/mach-s5pc100/Kconfig"
source "arch/arm/mach-s5pv210/Kconfig"
-source "arch/arm/mach-s5pv310/Kconfig"
+source "arch/arm/mach-exynos4/Kconfig"
source "arch/arm/mach-shmobile/Kconfig"
@@ -1006,6 +1008,9 @@ source "arch/arm/mach-ux500/Kconfig"
source "arch/arm/mach-versatile/Kconfig"
source "arch/arm/mach-vexpress/Kconfig"
+source "arch/arm/plat-versatile/Kconfig"
+
+source "arch/arm/mach-vt8500/Kconfig"
source "arch/arm/mach-w90x900/Kconfig"
@@ -1048,7 +1053,7 @@ config XSCALE_PMU
default y
config CPU_HAS_PMU
- depends on (CPU_V6 || CPU_V7 || XSCALE_PMU) && \
+ depends on (CPU_V6 || CPU_V6K || CPU_V7 || XSCALE_PMU) && \
(!ARCH_OMAP3 || OMAP3_EMU)
default y
bool
@@ -1064,7 +1069,7 @@ endif
config ARM_ERRATA_411920
bool "ARM errata: Invalidation of the Instruction Cache operation can fail"
- depends on CPU_V6
+ depends on CPU_V6 || CPU_V6K
help
Invalidation of the Instruction Cache operation can
fail. This erratum is present in 1136 (before r1p4), 1156 and 1176.
@@ -1177,6 +1182,31 @@ config ARM_ERRATA_743622
visible impact on the overall performance or power consumption of the
processor.
+config ARM_ERRATA_751472
+ bool "ARM errata: Interrupted ICIALLUIS may prevent completion of broadcasted operation"
+ depends on CPU_V7 && SMP
+ help
+ This option enables the workaround for the 751472 Cortex-A9 (prior
+ to r3p0) erratum. An interrupted ICIALLUIS operation may prevent the
+ completion of a following broadcasted operation if the second
+ operation is received by a CPU before the ICIALLUIS has completed,
+ potentially leading to corrupted entries in the cache or TLB.
+
+config ARM_ERRATA_753970
+ bool "ARM errata: cache sync operation may be faulty"
+ depends on CACHE_PL310
+ help
+ This option enables the workaround for the 753970 PL310 (r3p0) erratum.
+
+ Under some condition the effect of cache sync operation on
+ the store buffer still remains when the operation completes.
+ This means that the store buffer is always asked to drain and
+ this prevents it from merging any further writes. The workaround
+ is to replace the normal offset of cache sync operation (0x730)
+ by another offset targeting an unmapped PL310 register 0x740.
+ This has the same effect as the cache sync operation: store buffer
+ drain and waiting for all buffers empty.
+
endmenu
source "arch/arm/common/Kconfig"
@@ -1250,10 +1280,11 @@ source "kernel/time/Kconfig"
config SMP
bool "Symmetric Multi-Processing (EXPERIMENTAL)"
depends on EXPERIMENTAL
+ depends on CPU_V6K || CPU_V7
depends on GENERIC_CLOCKEVENTS
depends on REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP || \
MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 || \
- ARCH_S5PV310 || ARCH_TEGRA || ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || \
+ ARCH_EXYNOS4 || ARCH_TEGRA || ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || \
ARCH_MSM_SCORPIONMP || ARCH_SHMOBILE
select USE_GENERIC_SMP_HELPERS
select HAVE_ARM_SCU if !ARCH_MSM_SCORPIONMP
@@ -1353,7 +1384,7 @@ source kernel/Kconfig.preempt
config HZ
int
default 200 if ARCH_EBSA110 || ARCH_S3C2410 || ARCH_S5P64X0 || \
- ARCH_S5P6442 || ARCH_S5PV210 || ARCH_S5PV310
+ ARCH_S5P6442 || ARCH_S5PV210 || ARCH_EXYNOS4
default OMAP_32K_TIMER_HZ if ARCH_OMAP && OMAP_32K_TIMER
default AT91_TIMER_HZ if ARCH_AT91
default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE
@@ -1361,7 +1392,7 @@ config HZ
config THUMB2_KERNEL
bool "Compile the kernel in Thumb-2 mode (EXPERIMENTAL)"
- depends on CPU_V7 && !CPU_V6 && EXPERIMENTAL
+ depends on CPU_V7 && !CPU_V6 && !CPU_V6K && EXPERIMENTAL
select AEABI
select ARM_ASM_UNIFIED
help
@@ -1619,6 +1650,18 @@ config ZBOOT_ROM
Say Y here if you intend to execute your compressed kernel image
(zImage) directly from ROM or flash. If unsure, say N.
+config ZBOOT_ROM_MMCIF
+ bool "Include MMCIF loader in zImage (EXPERIMENTAL)"
+ depends on ZBOOT_ROM && ARCH_SH7372 && EXPERIMENTAL
+ help
+ Say Y here to include experimental MMCIF loading code in the
+ ROM-able zImage. With this enabled it is possible to write the
+ the ROM-able zImage kernel image to an MMC card and boot the
+ kernel straight from the reset vector. At reset the processor
+ Mask ROM will load the first part of the the ROM-able zImage
+ which in turn loads the rest the kernel image to RAM using the
+ MMCIF hardware block.
+
config CMDLINE
string "Default kernel command string"
default ""
@@ -1852,7 +1895,7 @@ config FPE_FASTFPE
config VFP
bool "VFP-format floating point maths"
- depends on CPU_V6 || CPU_ARM926T || CPU_V7 || CPU_FEROCEON
+ depends on CPU_V6 || CPU_V6K || CPU_ARM926T || CPU_V7 || CPU_FEROCEON
help
Say Y to include VFP support code in the kernel. This is needed
if your hardware includes a VFP unit.
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index c22c1adfedd6..d6e6206902f4 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -15,7 +15,7 @@ ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
LDFLAGS_vmlinux += --be8
endif
-OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
+OBJCOPYFLAGS :=-O binary -R .comment -S
GZFLAGS :=-9
#KBUILD_CFLAGS +=-pipe
# Explicitly specifiy 32-bit ARM ISA since toolchain default can be -mthumb:
@@ -89,6 +89,7 @@ tune-$(CONFIG_CPU_XSCALE) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110)
tune-$(CONFIG_CPU_XSC3) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale
tune-$(CONFIG_CPU_FEROCEON) :=$(call cc-option,-mtune=marvell-f,-mtune=xscale)
tune-$(CONFIG_CPU_V6) :=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm)
+tune-$(CONFIG_CPU_V6K) :=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm)
ifeq ($(CONFIG_AEABI),y)
CFLAGS_ABI :=-mabi=aapcs-linux -mno-thumb-interwork
@@ -126,7 +127,6 @@ endif
# Machine directory name. This list is sorted alphanumerically
# by CONFIG_* macro name.
-machine-$(CONFIG_ARCH_AAEC2000) := aaec2000
machine-$(CONFIG_ARCH_AT91) := at91
machine-$(CONFIG_ARCH_BCMRING) := bcmring
machine-$(CONFIG_ARCH_CLPS711X) := clps711x
@@ -146,7 +146,6 @@ machine-$(CONFIG_ARCH_IXP23XX) := ixp23xx
machine-$(CONFIG_ARCH_IXP4XX) := ixp4xx
machine-$(CONFIG_ARCH_KIRKWOOD) := kirkwood
machine-$(CONFIG_ARCH_KS8695) := ks8695
-machine-$(CONFIG_ARCH_LH7A40X) := lh7a40x
machine-$(CONFIG_ARCH_LOKI) := loki
machine-$(CONFIG_ARCH_LPC32XX) := lpc32xx
machine-$(CONFIG_ARCH_MMP) := mmp
@@ -178,7 +177,7 @@ machine-$(CONFIG_ARCH_S5P64X0) := s5p64x0
machine-$(CONFIG_ARCH_S5P6442) := s5p6442
machine-$(CONFIG_ARCH_S5PC100) := s5pc100
machine-$(CONFIG_ARCH_S5PV210) := s5pv210
-machine-$(CONFIG_ARCH_S5PV310) := s5pv310
+machine-$(CONFIG_ARCH_EXYNOS4) := exynos4
machine-$(CONFIG_ARCH_SA1100) := sa1100
machine-$(CONFIG_ARCH_SHARK) := shark
machine-$(CONFIG_ARCH_SHMOBILE) := shmobile
@@ -190,6 +189,7 @@ machine-$(CONFIG_ARCH_U300) := u300
machine-$(CONFIG_ARCH_U8500) := ux500
machine-$(CONFIG_ARCH_VERSATILE) := versatile
machine-$(CONFIG_ARCH_VEXPRESS) := vexpress
+machine-$(CONFIG_ARCH_VT8500) := vt8500
machine-$(CONFIG_ARCH_W90X900) := w90x900
machine-$(CONFIG_ARCH_NUC93X) := nuc93x
machine-$(CONFIG_FOOTBRIDGE) := footbridge
@@ -280,7 +280,7 @@ bzImage: zImage
zImage Image xipImage bootpImage uImage: vmlinux
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
-zinstall install: vmlinux
+zinstall uinstall install: vmlinux
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
# We use MRPROPER_FILES and CLEAN_FILES now
@@ -301,6 +301,7 @@ define archhelp
echo ' (supply initrd image via make variable INITRD=<path>)'
echo ' install - Install uncompressed kernel'
echo ' zinstall - Install compressed kernel'
+ echo ' uinstall - Install U-Boot wrapped compressed kernel'
echo ' Install using (your) ~/bin/$(INSTALLKERNEL) or'
echo ' (distribution) /sbin/$(INSTALLKERNEL) or'
echo ' install to $$(INSTALL_PATH) and run lilo'
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index 4d26f2c52a75..9128fddf1109 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -99,6 +99,10 @@ zinstall: $(obj)/zImage
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
$(obj)/zImage System.map "$(INSTALL_PATH)"
+uinstall: $(obj)/uImage
+ $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
+ $(obj)/uImage System.map "$(INSTALL_PATH)"
+
zi:
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
$(obj)/zImage System.map "$(INSTALL_PATH)"
diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore
index ab204db594d3..c6028967d336 100644
--- a/arch/arm/boot/compressed/.gitignore
+++ b/arch/arm/boot/compressed/.gitignore
@@ -1,3 +1,7 @@
font.c
-piggy.gz
+lib1funcs.S
+piggy.gzip
+piggy.lzo
+piggy.lzma
+vmlinux
vmlinux.lds
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 0a8f748e506a..e4375bcea1ca 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -4,9 +4,20 @@
# create a compressed vmlinuz image from the original vmlinux
#
+OBJS =
+
+# Ensure that mmcif loader code appears early in the image
+# to minimise that number of bocks that have to be read in
+# order to load it.
+ifeq ($(CONFIG_ZBOOT_ROM_MMCIF),y)
+ifeq ($(CONFIG_ARCH_SH7372),y)
+OBJS += mmcif-sh7372.o
+endif
+endif
+
AFLAGS_head.o += -DTEXT_OFFSET=$(TEXT_OFFSET)
HEAD = head.o
-OBJS = misc.o decompress.o
+OBJS += misc.o decompress.o
FONTC = $(srctree)/drivers/video/console/font_acorn_8x8.c
#
@@ -29,6 +40,10 @@ ifeq ($(CONFIG_ARCH_SA1100),y)
OBJS += head-sa1100.o
endif
+ifeq ($(CONFIG_ARCH_VT8500),y)
+OBJS += head-vt8500.o
+endif
+
ifeq ($(CONFIG_CPU_XSCALE),y)
OBJS += head-xscale.o
endif
@@ -83,9 +98,11 @@ endif
EXTRA_CFLAGS := -fpic -fno-builtin
EXTRA_AFLAGS := -Wa,-march=all
+# Provide size of uncompressed kernel to the decompressor via a linker symbol.
+LDFLAGS_vmlinux := --defsym _image_size=$(shell stat -c "%s" $(obj)/../Image)
# Supply ZRELADDR to the decompressor via a linker symbol.
ifneq ($(CONFIG_AUTO_ZRELADDR),y)
-LDFLAGS_vmlinux := --defsym zreladdr=$(ZRELADDR)
+LDFLAGS_vmlinux += --defsym zreladdr=$(ZRELADDR)
endif
ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
LDFLAGS_vmlinux += --be8
diff --git a/arch/arm/boot/compressed/head-shmobile.S b/arch/arm/boot/compressed/head-shmobile.S
index 30973b76e6ae..c943d2e7da9d 100644
--- a/arch/arm/boot/compressed/head-shmobile.S
+++ b/arch/arm/boot/compressed/head-shmobile.S
@@ -25,6 +25,36 @@
/* load board-specific initialization code */
#include <mach/zboot.h>
+#ifdef CONFIG_ZBOOT_ROM_MMCIF
+ /* Load image from MMC */
+ adr sp, __tmp_stack + 128
+ ldr r0, __image_start
+ ldr r1, __image_end
+ subs r1, r1, r0
+ ldr r0, __load_base
+ bl mmcif_loader
+
+ /* Jump to loaded code */
+ ldr r0, __loaded
+ ldr r1, __image_start
+ sub r0, r0, r1
+ ldr r1, __load_base
+ add pc, r0, r1
+
+__image_start:
+ .long _start
+__image_end:
+ .long _got_end
+__load_base:
+ .long CONFIG_MEMORY_START + 0x02000000 @ Load at 32Mb into SDRAM
+__loaded:
+ .long __continue
+ .align
+__tmp_stack:
+ .space 128
+__continue:
+#endif /* CONFIG_ZBOOT_ROM_MMCIF */
+
b 1f
__atags:@ tag #1
.long 12 @ tag->hdr.size = tag_size(tag_core);
diff --git a/arch/arm/boot/compressed/head-vt8500.S b/arch/arm/boot/compressed/head-vt8500.S
new file mode 100644
index 000000000000..1dc1e21a3be3
--- /dev/null
+++ b/arch/arm/boot/compressed/head-vt8500.S
@@ -0,0 +1,46 @@
+/*
+ * linux/arch/arm/boot/compressed/head-vt8500.S
+ *
+ * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ *
+ * VIA VT8500 specific tweaks. This is merged into head.S by the linker.
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/mach-types.h>
+
+ .section ".start", "ax"
+
+__VT8500_start:
+ @ Compare the SCC ID register against a list of known values
+ ldr r1, .SCCID
+ ldr r3, [r1]
+
+ @ VT8500 override
+ ldr r4, .VT8500SCC
+ cmp r3, r4
+ ldreq r7, .ID_BV07
+ beq .Lendvt8500
+
+ @ WM8505 override
+ ldr r4, .WM8505SCC
+ cmp r3, r4
+ ldreq r7, .ID_8505
+ beq .Lendvt8500
+
+ @ Otherwise, leave the bootloader's machine id untouched
+
+.SCCID:
+ .word 0xd8120000
+.VT8500SCC:
+ .word 0x34000102
+.WM8505SCC:
+ .word 0x34260103
+
+.ID_BV07:
+ .word MACH_TYPE_BV07
+.ID_8505:
+ .word MACH_TYPE_WM8505_7IN_NETBOOK
+
+.Lendvt8500:
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 7193884ed8b0..84ac4d656310 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -21,7 +21,7 @@
#if defined(CONFIG_DEBUG_ICEDCC)
-#ifdef CONFIG_CPU_V6
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
.macro loadsp, rb, tmp
.endm
.macro writeb, ch, rb
@@ -128,14 +128,14 @@ wait: mrc p14, 0, pc, c0, c1, 0
.arm @ Always enter in ARM state
start:
.type start,#function
- THUMB( adr r12, BSYM(1f) )
- THUMB( bx r12 )
- THUMB( .rept 6 )
- ARM( .rept 8 )
+ .rept 7
mov r0, r0
.endr
+ ARM( mov r0, r0 )
+ ARM( b 1f )
+ THUMB( adr r12, BSYM(1f) )
+ THUMB( bx r12 )
- b 1f
.word 0x016f2818 @ Magic numbers to help the loader
.word start @ absolute load/run zImage address
.word _edata @ zImage end address
@@ -174,9 +174,7 @@ not_angel:
*/
.text
- adr r0, LC0
- ldmia r0, {r1, r2, r3, r5, r6, r11, ip}
- ldr sp, [r0, #28]
+
#ifdef CONFIG_AUTO_ZRELADDR
@ determine final kernel image address
mov r4, pc
@@ -185,35 +183,108 @@ not_angel:
#else
ldr r4, =zreladdr
#endif
- subs r0, r0, r1 @ calculate the delta offset
- @ if delta is zero, we are
- beq not_relocated @ running at the address we
- @ were linked at.
+ bl cache_on
+
+restart: adr r0, LC0
+ ldmia r0, {r1, r2, r3, r5, r6, r9, r11, r12}
+ ldr sp, [r0, #32]
+
+ /*
+ * We might be running at a different address. We need
+ * to fix up various pointers.
+ */
+ sub r0, r0, r1 @ calculate the delta offset
+ add r5, r5, r0 @ _start
+ add r6, r6, r0 @ _edata
+#ifndef CONFIG_ZBOOT_ROM
+ /* malloc space is above the relocated stack (64k max) */
+ add sp, sp, r0
+ add r10, sp, #0x10000
+#else
/*
- * We're running at a different address. We need to fix
- * up various pointers:
- * r5 - zImage base address (_start)
- * r6 - size of decompressed image
- * r11 - GOT start
- * ip - GOT end
+ * With ZBOOT_ROM the bss/stack is non relocatable,
+ * but someone could still run this code from RAM,
+ * in which case our reference is _edata.
*/
- add r5, r5, r0
+ mov r10, r6
+#endif
+
+/*
+ * Check to see if we will overwrite ourselves.
+ * r4 = final kernel address
+ * r5 = start of this image
+ * r9 = size of decompressed image
+ * r10 = end of this image, including bss/stack/malloc space if non XIP
+ * We basically want:
+ * r4 >= r10 -> OK
+ * r4 + image length <= r5 -> OK
+ */
+ cmp r4, r10
+ bhs wont_overwrite
+ add r10, r4, r9
+ cmp r10, r5
+ bls wont_overwrite
+
+/*
+ * Relocate ourselves past the end of the decompressed kernel.
+ * r5 = start of this image
+ * r6 = _edata
+ * r10 = end of the decompressed kernel
+ * Because we always copy ahead, we need to do it from the end and go
+ * backward in case the source and destination overlap.
+ */
+ /* Round up to next 256-byte boundary. */
+ add r10, r10, #256
+ bic r10, r10, #255
+
+ sub r9, r6, r5 @ size to copy
+ add r9, r9, #31 @ rounded up to a multiple
+ bic r9, r9, #31 @ ... of 32 bytes
+ add r6, r9, r5
+ add r9, r9, r10
+
+1: ldmdb r6!, {r0 - r3, r10 - r12, lr}
+ cmp r6, r5
+ stmdb r9!, {r0 - r3, r10 - r12, lr}
+ bhi 1b
+
+ /* Preserve offset to relocated code. */
+ sub r6, r9, r6
+
+ bl cache_clean_flush
+
+ adr r0, BSYM(restart)
+ add r0, r0, r6
+ mov pc, r0
+
+wont_overwrite:
+/*
+ * If delta is zero, we are running at the address we were linked at.
+ * r0 = delta
+ * r2 = BSS start
+ * r3 = BSS end
+ * r4 = kernel execution address
+ * r7 = architecture ID
+ * r8 = atags pointer
+ * r11 = GOT start
+ * r12 = GOT end
+ * sp = stack pointer
+ */
+ teq r0, #0
+ beq not_relocated
add r11, r11, r0
- add ip, ip, r0
+ add r12, r12, r0
#ifndef CONFIG_ZBOOT_ROM
/*
* If we're running fully PIC === CONFIG_ZBOOT_ROM = n,
* we need to fix up pointers into the BSS region.
- * r2 - BSS start
- * r3 - BSS end
- * sp - stack pointer
+ * Note that the stack pointer has already been fixed up.
*/
add r2, r2, r0
add r3, r3, r0
- add sp, sp, r0
/*
* Relocate all entries in the GOT table.
@@ -221,7 +292,7 @@ not_angel:
1: ldr r1, [r11, #0] @ relocate entries in the GOT
add r1, r1, r0 @ table. This fixes up the
str r1, [r11], #4 @ C references.
- cmp r11, ip
+ cmp r11, r12
blo 1b
#else
@@ -234,7 +305,7 @@ not_angel:
cmphs r3, r1 @ _end < entry
addlo r1, r1, r0 @ table. This fixes up the
str r1, [r11], #4 @ C references.
- cmp r11, ip
+ cmp r11, r12
blo 1b
#endif
@@ -246,76 +317,24 @@ not_relocated: mov r0, #0
cmp r2, r3
blo 1b
- /*
- * The C runtime environment should now be setup
- * sufficiently. Turn the cache on, set up some
- * pointers, and start decompressing.
- */
- bl cache_on
-
- mov r1, sp @ malloc space above stack
- add r2, sp, #0x10000 @ 64k max
-
/*
- * Check to see if we will overwrite ourselves.
- * r4 = final kernel address
- * r5 = start of this image
- * r6 = size of decompressed image
- * r2 = end of malloc space (and therefore this image)
- * We basically want:
- * r4 >= r2 -> OK
- * r4 + image length <= r5 -> OK
+ * The C runtime environment should now be setup sufficiently.
+ * Set up some pointers, and start decompressing.
+ * r4 = kernel execution address
+ * r7 = architecture ID
+ * r8 = atags pointer
*/
- cmp r4, r2
- bhs wont_overwrite
- add r0, r4, r6
- cmp r0, r5
- bls wont_overwrite
-
- mov r5, r2 @ decompress after malloc space
- mov r0, r5
+ mov r0, r4
+ mov r1, sp @ malloc space above stack
+ add r2, sp, #0x10000 @ 64k max
mov r3, r7
bl decompress_kernel
-
- add r0, r0, #127 + 128 @ alignment + stack
- bic r0, r0, #127 @ align the kernel length
-/*
- * r0 = decompressed kernel length
- * r1-r3 = unused
- * r4 = kernel execution address
- * r5 = decompressed kernel start
- * r7 = architecture ID
- * r8 = atags pointer
- * r9-r12,r14 = corrupted
- */
- add r1, r5, r0 @ end of decompressed kernel
- adr r2, reloc_start
- ldr r3, LC1
- add r3, r2, r3
-1: ldmia r2!, {r9 - r12, r14} @ copy relocation code
- stmia r1!, {r9 - r12, r14}
- ldmia r2!, {r9 - r12, r14}
- stmia r1!, {r9 - r12, r14}
- cmp r2, r3
- blo 1b
- mov sp, r1
- add sp, sp, #128 @ relocate the stack
-
bl cache_clean_flush
- ARM( add pc, r5, r0 ) @ call relocation code
- THUMB( add r12, r5, r0 )
- THUMB( mov pc, r12 ) @ call relocation code
-
-/*
- * We're not in danger of overwriting ourselves. Do this the simple way.
- *
- * r4 = kernel execution address
- * r7 = architecture ID
- */
-wont_overwrite: mov r0, r4
- mov r3, r7
- bl decompress_kernel
- b call_kernel
+ bl cache_off
+ mov r0, #0 @ must be zero
+ mov r1, r7 @ restore architecture number
+ mov r2, r8 @ restore atags pointer
+ mov pc, r4 @ call kernel
.align 2
.type LC0, #object
@@ -323,11 +342,11 @@ LC0: .word LC0 @ r1
.word __bss_start @ r2
.word _end @ r3
.word _start @ r5
- .word _image_size @ r6
+ .word _edata @ r6
+ .word _image_size @ r9
.word _got_start @ r11
.word _got_end @ ip
.word user_stack_end @ sp
-LC1: .word reloc_end - reloc_start
.size LC0, . - LC0
#ifdef CONFIG_ARCH_RPC
@@ -353,7 +372,7 @@ params: ldr r0, =0x10000100 @ params_phys for RPC
* On exit,
* r0, r1, r2, r3, r9, r10, r12 corrupted
* This routine must preserve:
- * r4, r5, r6, r7, r8
+ * r4, r7, r8
*/
.align 5
cache_on: mov r3, #8 @ cache_on function
@@ -551,43 +570,6 @@ __common_mmu_cache_on:
#endif
/*
- * All code following this line is relocatable. It is relocated by
- * the above code to the end of the decompressed kernel image and
- * executed there. During this time, we have no stacks.
- *
- * r0 = decompressed kernel length
- * r1-r3 = unused
- * r4 = kernel execution address
- * r5 = decompressed kernel start
- * r7 = architecture ID
- * r8 = atags pointer
- * r9-r12,r14 = corrupted
- */
- .align 5
-reloc_start: add r9, r5, r0
- sub r9, r9, #128 @ do not copy the stack
- debug_reloc_start
- mov r1, r4
-1:
- .rept 4
- ldmia r5!, {r0, r2, r3, r10 - r12, r14} @ relocate kernel
- stmia r1!, {r0, r2, r3, r10 - r12, r14}
- .endr
-
- cmp r5, r9
- blo 1b
- mov sp, r1
- add sp, sp, #128 @ relocate the stack
- debug_reloc_end
-
-call_kernel: bl cache_clean_flush
- bl cache_off
- mov r0, #0 @ must be zero
- mov r1, r7 @ restore architecture number
- mov r2, r8 @ restore atags pointer
- mov pc, r4 @ call kernel
-
-/*
* Here follow the relocatable cache support functions for the
* various processors. This is a generic hook for locating an
* entry and jumping to an instruction at the specified offset
@@ -791,7 +773,7 @@ proc_types:
* On exit,
* r0, r1, r2, r3, r9, r12 corrupted
* This routine must preserve:
- * r4, r6, r7
+ * r4, r7, r8
*/
.align 5
cache_off: mov r3, #12 @ cache_off function
@@ -866,7 +848,7 @@ __armv3_mmu_cache_off:
* On exit,
* r1, r2, r3, r9, r10, r11, r12 corrupted
* This routine must preserve:
- * r0, r4, r5, r6, r7
+ * r4, r6, r7, r8
*/
.align 5
cache_clean_flush:
@@ -1088,7 +1070,6 @@ memdump: mov r12, r0
#endif
.ltorg
-reloc_end:
.align
.section ".stack", "aw", %nobits
diff --git a/arch/arm/boot/compressed/misc.c b/arch/arm/boot/compressed/misc.c
index e653a6d3c8d9..4657e877bf8f 100644
--- a/arch/arm/boot/compressed/misc.c
+++ b/arch/arm/boot/compressed/misc.c
@@ -36,7 +36,7 @@ extern void error(char *x);
#ifdef CONFIG_DEBUG_ICEDCC
-#ifdef CONFIG_CPU_V6
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
static void icedcc_putc(int ch)
{
diff --git a/arch/arm/boot/compressed/mmcif-sh7372.c b/arch/arm/boot/compressed/mmcif-sh7372.c
new file mode 100644
index 000000000000..e6180af241f6
--- /dev/null
+++ b/arch/arm/boot/compressed/mmcif-sh7372.c
@@ -0,0 +1,87 @@
+/*
+ * sh7372 MMCIF loader
+ *
+ * Copyright (C) 2010 Magnus Damm
+ * Copyright (C) 2010 Simon Horman
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/mmc/sh_mmcif.h>
+#include <mach/mmcif.h>
+
+#define MMCIF_BASE (void __iomem *)0xe6bd0000
+
+#define PORT84CR (void __iomem *)0xe6050054
+#define PORT85CR (void __iomem *)0xe6050055
+#define PORT86CR (void __iomem *)0xe6050056
+#define PORT87CR (void __iomem *)0xe6050057
+#define PORT88CR (void __iomem *)0xe6050058
+#define PORT89CR (void __iomem *)0xe6050059
+#define PORT90CR (void __iomem *)0xe605005a
+#define PORT91CR (void __iomem *)0xe605005b
+#define PORT92CR (void __iomem *)0xe605005c
+#define PORT99CR (void __iomem *)0xe6050063
+
+#define SMSTPCR3 (void __iomem *)0xe615013c
+
+/* SH7372 specific MMCIF loader
+ *
+ * loads the zImage from an MMC card starting from block 1.
+ *
+ * The image must be start with a vrl4 header and
+ * the zImage must start at offset 512 of the image. That is,
+ * at block 2 (=byte 1024) on the media
+ *
+ * Use the following line to write the vrl4 formated zImage
+ * to an MMC card
+ * # dd if=vrl4.out of=/dev/sdx bs=512 seek=1
+ */
+asmlinkage void mmcif_loader(unsigned char *buf, unsigned long len)
+{
+ mmcif_init_progress();
+ mmcif_update_progress(MMCIF_PROGRESS_ENTER);
+
+ /* Initialise MMC
+ * registers: PORT84CR-PORT92CR
+ * (MMCD0_0-MMCD0_7,MMCCMD0 Control)
+ * value: 0x04 - select function 4
+ */
+ __raw_writeb(0x04, PORT84CR);
+ __raw_writeb(0x04, PORT85CR);
+ __raw_writeb(0x04, PORT86CR);
+ __raw_writeb(0x04, PORT87CR);
+ __raw_writeb(0x04, PORT88CR);
+ __raw_writeb(0x04, PORT89CR);
+ __raw_writeb(0x04, PORT90CR);
+ __raw_writeb(0x04, PORT91CR);
+ __raw_writeb(0x04, PORT92CR);
+
+ /* Initialise MMC
+ * registers: PORT99CR (MMCCLK0 Control)
+ * value: 0x10 | 0x04 - enable output | select function 4
+ */
+ __raw_writeb(0x14, PORT99CR);
+
+ /* Enable clock to MMC hardware block */
+ __raw_writel(__raw_readl(SMSTPCR3) & ~(1 << 12), SMSTPCR3);
+
+ mmcif_update_progress(MMCIF_PROGRESS_INIT);
+
+ /* setup MMCIF hardware */
+ sh_mmcif_boot_init(MMCIF_BASE);
+
+ mmcif_update_progress(MMCIF_PROGRESS_LOAD);
+
+ /* load kernel via MMCIF interface */
+ sh_mmcif_boot_do_read(MMCIF_BASE, 2, /* Kernel is at block 2 */
+ (len + SH_MMCIF_BBS - 1) / SH_MMCIF_BBS, buf);
+
+
+ /* Disable clock to MMC hardware block */
+ __raw_writel(__raw_readl(SMSTPCR3) & (1 << 12), SMSTPCR3);
+
+ mmcif_update_progress(MMCIF_PROGRESS_DONE);
+}
diff --git a/arch/arm/boot/compressed/vmlinux.lds.in b/arch/arm/boot/compressed/vmlinux.lds.in
index 366a924019ac..5309909d7282 100644
--- a/arch/arm/boot/compressed/vmlinux.lds.in
+++ b/arch/arm/boot/compressed/vmlinux.lds.in
@@ -43,9 +43,6 @@ SECTIONS
_etext = .;
- /* Assume size of decompressed image is 4x the compressed image */
- _image_size = (_etext - _text) * 4;
-
_got_start = .;
.got : { *(.got) }
_got_end = .;
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index 778655f0257a..ea5ee4d067f3 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -6,6 +6,8 @@ config ARM_VIC
config ARM_VIC_NR
int
+ default 4 if ARCH_S5PV210
+ default 3 if ARCH_S5P6442 || ARCH_S5PC100
default 2
depends on ARM_VIC
help
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 224377211151..e21c1f4218d3 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -142,25 +142,24 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
}
#ifdef CONFIG_SMP
-static int
-gic_set_cpu(struct irq_data *d, const struct cpumask *mask_val, bool force)
+static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+ bool force)
{
void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
unsigned int shift = (d->irq % 4) * 8;
unsigned int cpu = cpumask_first(mask_val);
- u32 val;
- struct irq_desc *desc;
+ u32 val, mask, bit;
- spin_lock(&irq_controller_lock);
- desc = irq_to_desc(d->irq);
- if (desc == NULL) {
- spin_unlock(&irq_controller_lock);
+ if (cpu >= 8)
return -EINVAL;
- }
+
+ mask = 0xff << shift;
+ bit = 1 << (cpu + shift);
+
+ spin_lock(&irq_controller_lock);
d->node = cpu;
- val = readl(reg) & ~(0xff << shift);
- val |= 1 << (cpu + shift);
- writel(val, reg);
+ val = readl(reg) & ~mask;
+ writel(val | bit, reg);
spin_unlock(&irq_controller_lock);
return 0;
@@ -203,7 +202,7 @@ static struct irq_chip gic_chip = {
.irq_unmask = gic_unmask_irq,
.irq_set_type = gic_set_type,
#ifdef CONFIG_SMP
- .irq_set_affinity = gic_set_cpu,
+ .irq_set_affinity = gic_set_affinity,
#endif
};
diff --git a/arch/arm/configs/exynos4_defconfig b/arch/arm/configs/exynos4_defconfig
new file mode 100644
index 000000000000..2ffba24d2e2a
--- /dev/null
+++ b/arch/arm/configs/exynos4_defconfig
@@ -0,0 +1,70 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_ARCH_EXYNOS4=y
+CONFIG_S3C_LOWLEVEL_UART_PORT=1
+CONFIG_MACH_SMDKC210=y
+CONFIG_MACH_SMDKV310=y
+CONFIG_MACH_UNIVERSAL_C210=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=2
+CONFIG_HOTPLUG_CPU=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC1,115200 init=/linuxrc mem=256M"
+CONFIG_VFP=y
+CONFIG_NEON=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_SAMSUNG=y
+CONFIG_SERIAL_SAMSUNG_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_I2C=y
+# CONFIG_HWMON is not set
+# CONFIG_MFD_SUPPORT is not set
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_CRAMFS=y
+CONFIG_ROMFS_FS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK_SLEEP=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_DEBUG_USER=y
+CONFIG_DEBUG_ERRORS=y
+CONFIG_DEBUG_LL=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_DEBUG_S3C_UART=1
+CONFIG_CRC_CCITT=y
diff --git a/arch/arm/configs/lpd7a400_defconfig b/arch/arm/configs/lpd7a400_defconfig
deleted file mode 100644
index 5a48f171204c..000000000000
--- a/arch/arm/configs/lpd7a400_defconfig
+++ /dev/null
@@ -1,68 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-CONFIG_IKCONFIG=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EXPERT=y
-# CONFIG_HOTPLUG is not set
-# CONFIG_EPOLL is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-CONFIG_ARCH_LH7A40X=y
-CONFIG_MACH_LPD7A400=y
-CONFIG_PREEMPT=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_FPE_NWFPE=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-# CONFIG_IPV6 is not set
-CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_PHYSMAP=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_IDE=y
-CONFIG_SCSI=y
-# CONFIG_SCSI_PROC_FS is not set
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_SMC91X=y
-# CONFIG_INPUT_MOUSEDEV is not set
-CONFIG_INPUT_EVDEV=y
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_INPUT_TOUCHSCREEN=y
-# CONFIG_SERIO is not set
-CONFIG_SERIAL_LH7A40X=y
-CONFIG_SERIAL_LH7A40X_CONSOLE=y
-CONFIG_FB=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_SOUND=y
-CONFIG_SND=y
-CONFIG_SND_MIXER_OSS=y
-CONFIG_SND_PCM_OSS=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_CRAMFS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
diff --git a/arch/arm/configs/lpd7a404_defconfig b/arch/arm/configs/lpd7a404_defconfig
deleted file mode 100644
index 22d0631de009..000000000000
--- a/arch/arm/configs/lpd7a404_defconfig
+++ /dev/null
@@ -1,81 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-CONFIG_IKCONFIG=y
-CONFIG_LOG_BUF_SHIFT=16
-CONFIG_EXPERT=y
-# CONFIG_HOTPLUG is not set
-# CONFIG_EPOLL is not set
-CONFIG_SLAB=y
-# CONFIG_IOSCHED_DEADLINE is not set
-CONFIG_ARCH_LH7A40X=y
-CONFIG_MACH_LPD7A404=y
-CONFIG_PREEMPT=y
-CONFIG_DISCONTIGMEM_MANUAL=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_FPE_NWFPE=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-# CONFIG_IPV6 is not set
-CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_PHYSMAP=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_IDE=y
-CONFIG_SCSI=y
-# CONFIG_SCSI_PROC_FS is not set
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_SMC91X=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-CONFIG_INPUT_EVDEV=y
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_INPUT_TOUCHSCREEN=y
-# CONFIG_SERIO is not set
-CONFIG_SERIAL_LH7A40X=y
-CONFIG_SERIAL_LH7A40X_CONSOLE=y
-CONFIG_FB=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_SOUND=y
-CONFIG_SND=y
-CONFIG_SND_MIXER_OSS=y
-CONFIG_SND_PCM_OSS=y
-CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_STORAGE_DEBUG=y
-CONFIG_USB_STORAGE_DATAFAB=y
-CONFIG_USB_GADGET=y
-CONFIG_USB_ZERO=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-CONFIG_INOTIFY=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_CRAMFS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_MUTEXES=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
diff --git a/arch/arm/configs/mx51_defconfig b/arch/arm/configs/mx51_defconfig
index 9cba68cfa51a..11a9904a2c41 100644
--- a/arch/arm/configs/mx51_defconfig
+++ b/arch/arm/configs/mx51_defconfig
@@ -17,6 +17,12 @@ CONFIG_ARCH_MX5=y
CONFIG_MACH_MX51_BABBAGE=y
CONFIG_MACH_MX51_3DS=y
CONFIG_MACH_EUKREA_CPUIMX51=y
+CONFIG_MACH_EUKREA_CPUIMX51SD=y
+CONFIG_MACH_MX51_EFIKAMX=y
+CONFIG_MACH_MX53_EVK=y
+CONFIG_MACH_MX53_SMD=y
+CONFIG_MACH_MX53_LOCO=y
+CONFIG_MXC_PWM=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT_VOLUNTARY=y
@@ -41,13 +47,26 @@ CONFIG_IP_PNP_DHCP=y
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
+CONFIG_CAN=y
+CONFIG_CAN_RAW=y
+CONFIG_CAN_DEV=y
+CONFIG_CAN_CALC_BITTIMING=y
+CONFIG_CAN_MCP251X=y
# CONFIG_WIRELESS is not set
# CONFIG_STANDALONE is not set
CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_MXC=y
+CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=65536
-# CONFIG_MISC_DEVICES is not set
CONFIG_SCSI=y
# CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_SD=y
@@ -58,6 +77,7 @@ CONFIG_SCSI_SCAN_ASYNC=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_ATA=m
CONFIG_NETDEVICES=y
+CONFIG_MII=m
CONFIG_MARVELL_PHY=y
CONFIG_DAVICOM_PHY=y
CONFIG_QSEMI_PHY=y
@@ -74,7 +94,6 @@ CONFIG_LSI_ET1011C_PHY=y
CONFIG_MDIO_BITBANG=y
CONFIG_MDIO_GPIO=y
CONFIG_NET_ETHERNET=y
-CONFIG_MII=m
CONFIG_FEC=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
@@ -82,10 +101,12 @@ CONFIG_FEC=y
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=y
-CONFIG_KEYBOARD_GPIO=y
CONFIG_INPUT_EVBUG=m
+CONFIG_KEYBOARD_GPIO=y
CONFIG_MOUSE_PS2=m
CONFIG_MOUSE_PS2_ELANTECH=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_MC13783=y
CONFIG_SERIO_SERPORT=m
CONFIG_VT_HW_CONSOLE_BINDING=y
# CONFIG_DEVKMEM is not set
@@ -100,8 +121,21 @@ CONFIG_I2C_CHARDEV=m
CONFIG_I2C_ALGOBIT=m
CONFIG_I2C_ALGOPCF=m
CONFIG_I2C_ALGOPCA=m
+CONFIG_I2C_IMX=y
+CONFIG_SPI=y
+CONFIG_SPI_IMX=y
+CONFIG_SPI_SPIDEV=y
CONFIG_GPIO_SYSFS=y
# CONFIG_HWMON is not set
+CONFIG_MFD_MC13XXX=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_MC13783=y
+CONFIG_REGULATOR_MC13892=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+CONFIG_SND_SOC=y
+CONFIG_SND_IMX_SOC=y
# CONFIG_HID_SUPPORT is not set
CONFIG_USB=y
CONFIG_USB_EHCI_HCD=y
@@ -110,9 +144,12 @@ CONFIG_MMC=y
CONFIG_MMC_BLOCK=m
CONFIG_MMC_SDHCI=m
CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=m
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_PWM=y
CONFIG_RTC_CLASS=y
-CONFIG_RTC_INTF_DEV_UIE_EMUL=y
+CONFIG_RTC_DRV_MC13XXX=y
+CONFIG_DMADEVICES=y
+CONFIG_IMX_SDMA=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
@@ -126,7 +163,6 @@ CONFIG_EXT4_FS_SECURITY=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=y
CONFIG_AUTOFS4_FS=y
CONFIG_FUSE_FS=y
CONFIG_ISO9660_FS=m
@@ -153,14 +189,13 @@ CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_FTRACE is not set
# CONFIG_ARM_UNWIND is not set
CONFIG_DEBUG_LL=y
CONFIG_EARLY_PRINTK=y
CONFIG_SECURITYFS=y
-CONFIG_CRYPTO_DEFLATE=y
-CONFIG_CRYPTO_LZO=y
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC_CCITT=m
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index ae890caa17a7..019fb7c67dc3 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -58,6 +58,7 @@ CONFIG_ARM_ERRATA_411920=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
+CONFIG_NR_CPUS=2
# CONFIG_LOCAL_TIMERS is not set
CONFIG_AEABI=y
CONFIG_LEDS=y
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
new file mode 100644
index 000000000000..7a9267e5da55
--- /dev/null
+++ b/arch/arm/configs/tegra_defconfig
@@ -0,0 +1,123 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EMBEDDED=y
+# CONFIG_SYSCTL_SYSCALL is not set
+# CONFIG_ELF_CORE is not set
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARCH_TEGRA=y
+CONFIG_MACH_HARMONY=y
+CONFIG_TEGRA_DEBUG_UARTD=y
+CONFIG_ARM_ERRATA_742230=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=2
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+CONFIG_HIGHMEM=y
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_VFP=y
+CONFIG_PM=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_INET_ESP=y
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_TUNNEL=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+# CONFIG_WIRELESS is not set
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_MISC_DEVICES=y
+CONFIG_AD525X_DPOT=y
+CONFIG_AD525X_DPOT_I2C=y
+CONFIG_ICS932S401=y
+CONFIG_APDS9802ALS=y
+CONFIG_ISL29003=y
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+# CONFIG_HWMON is not set
+# CONFIG_MFD_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_DNOTIFY is not set
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_SLAB=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK_SLEEP=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_VM=y
+CONFIG_DEBUG_SG=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_DEBUG_LL=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_ARC4=y
+CONFIG_CRYPTO_TWOFISH=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRC_CCITT=y
+CONFIG_CRC16=y
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index 52d86c4485bf..e1d602029a4d 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -1,7 +1,6 @@
CONFIG_EXPERIMENTAL=y
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_KALLSYMS_ALL=y
CONFIG_MODULES=y
@@ -13,43 +12,93 @@ CONFIG_UX500_SOC_DB5500=y
CONFIG_UX500_SOC_DB8500=y
CONFIG_MACH_U8500=y
CONFIG_MACH_U5500=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
+CONFIG_HOTPLUG_CPU=y
CONFIG_PREEMPT=y
CONFIG_AEABI=y
CONFIG_CMDLINE="root=/dev/ram0 console=ttyAMA2,115200n8"
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_VFP=y
CONFIG_NEON=y
+CONFIG_NET=y
+CONFIG_PHONET=y
+CONFIG_PHONET_PIPECTRLR=y
+# CONFIG_WIRELESS is not set
+CONFIG_CAIF=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=65536
-# CONFIG_MISC_DEVICES is not set
+CONFIG_MISC_DEVICES=y
+CONFIG_AB8500_PWM=y
+CONFIG_SENSORS_BH1780=y
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=y
-# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_NOMADIK=y
+CONFIG_KEYBOARD_STMPE=y
+CONFIG_KEYBOARD_TC3589X=y
# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_BU21013=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_AB8500_PONKEY=y
# CONFIG_SERIO is not set
CONFIG_VT_HW_CONSOLE_BINDING=y
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
# CONFIG_LEGACY_PTYS is not set
-# CONFIG_HW_RANDOM is not set
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_NOMADIK=y
+CONFIG_I2C=y
+CONFIG_I2C_NOMADIK=y
CONFIG_SPI=y
CONFIG_SPI_PL022=y
+CONFIG_GPIO_STMPE=y
+CONFIG_GPIO_TC3589X=y
# CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
+CONFIG_MFD_STMPE=y
+CONFIG_MFD_TC3589X=y
+CONFIG_AB8500_CORE=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_AB8500=y
# CONFIG_HID_SUPPORT is not set
-# CONFIG_USB_SUPPORT is not set
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_GADGET_MUSB_HDRC=y
+CONFIG_MUSB_PIO_ONLY=y
+CONFIG_USB_GADGET=y
+CONFIG_AB8500_USB=y
+CONFIG_MMC=y
+CONFIG_MMC_ARMMMCI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_LP5521=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_AB8500=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_DMADEVICES=y
+CONFIG_STE_DMA40=y
+CONFIG_STAGING=y
+# CONFIG_STAGING_EXCLUDE_BUILD is not set
+CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
-CONFIG_INOTIFY=y
+CONFIG_EXT3_FS=y
+CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_CONFIGFS_FS=m
# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_PREEMPT is not set
@@ -58,5 +107,3 @@ CONFIG_DEBUG_INFO=y
# CONFIG_FTRACE is not set
CONFIG_DEBUG_USER=y
CONFIG_DEBUG_ERRORS=y
-CONFIG_CRC_T10DIF=m
-# CONFIG_CRC32 is not set
diff --git a/arch/arm/configs/vexpress_defconfig b/arch/arm/configs/vexpress_defconfig
new file mode 100644
index 000000000000..f2de51f0bd18
--- /dev/null
+++ b/arch/arm/configs/vexpress_defconfig
@@ -0,0 +1,140 @@
+CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+CONFIG_CPUSETS=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARCH_VEXPRESS=y
+CONFIG_ARCH_VEXPRESS_CA9X4=y
+# CONFIG_SWP_EMULATE is not set
+CONFIG_SMP=y
+CONFIG_VMSPLIT_2G=y
+CONFIG_HOTPLUG_CPU=y
+CONFIG_AEABI=y
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="root=/dev/nfs nfsroot=10.1.69.3:/work/nfsroot ip=dhcp console=ttyAMA0 mem=128M"
+CONFIG_VFP=y
+CONFIG_NEON=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_INET_LRO is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_MTD=y
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_ARM_INTEGRATOR=y
+CONFIG_MISC_DEVICES=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
+# CONFIG_SATA_PMP is not set
+CONFIG_NETDEVICES=y
+CONFIG_NET_ETHERNET=y
+CONFIG_SMSC911X=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_AMBAKMI=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_LEGACY_PTY_COUNT=16
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_ARMCLCD=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_MIXER_OSS=y
+CONFIG_SND_PCM_OSS=y
+# CONFIG_SND_DRIVERS is not set
+CONFIG_SND_ARMAACI=y
+CONFIG_HID_DRAGONRISE=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_GREENASIA=y
+CONFIG_HID_SMARTJOYPLUS=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_THRUSTMASTER=y
+CONFIG_HID_ZEROPLUS=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+# CONFIG_USB_DEVICE_CLASS is not set
+CONFIG_USB_MON=y
+CONFIG_USB_ISP1760_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_ARMMMCI=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_CRAMFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_ROOT_NFS=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_DEBUG_USER=y
+CONFIG_DEBUG_ERRORS=y
+CONFIG_DEBUG_LL=y
+CONFIG_EARLY_PRINTK=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index 7b1bb2bbaf88..af54ed102f5f 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -149,14 +149,18 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
*/
/*
+ * Native endian assembly bitops. nr = 0 -> word 0 bit 0.
+ */
+extern void _set_bit(int nr, volatile unsigned long * p);
+extern void _clear_bit(int nr, volatile unsigned long * p);
+extern void _change_bit(int nr, volatile unsigned long * p);
+extern int _test_and_set_bit(int nr, volatile unsigned long * p);
+extern int _test_and_clear_bit(int nr, volatile unsigned long * p);
+extern int _test_and_change_bit(int nr, volatile unsigned long * p);
+
+/*
* Little endian assembly bitops. nr = 0 -> byte 0 bit 0.
*/
-extern void _set_bit_le(int nr, volatile unsigned long * p);
-extern void _clear_bit_le(int nr, volatile unsigned long * p);
-extern void _change_bit_le(int nr, volatile unsigned long * p);
-extern int _test_and_set_bit_le(int nr, volatile unsigned long * p);
-extern int _test_and_clear_bit_le(int nr, volatile unsigned long * p);
-extern int _test_and_change_bit_le(int nr, volatile unsigned long * p);
extern int _find_first_zero_bit_le(const void * p, unsigned size);
extern int _find_next_zero_bit_le(const void * p, int size, int offset);
extern int _find_first_bit_le(const unsigned long *p, unsigned size);
@@ -165,12 +169,6 @@ extern int _find_next_bit_le(const unsigned long *p, int size, int offset);
/*
* Big endian assembly bitops. nr = 0 -> byte 3 bit 0.
*/
-extern void _set_bit_be(int nr, volatile unsigned long * p);
-extern void _clear_bit_be(int nr, volatile unsigned long * p);
-extern void _change_bit_be(int nr, volatile unsigned long * p);
-extern int _test_and_set_bit_be(int nr, volatile unsigned long * p);
-extern int _test_and_clear_bit_be(int nr, volatile unsigned long * p);
-extern int _test_and_change_bit_be(int nr, volatile unsigned long * p);
extern int _find_first_zero_bit_be(const void * p, unsigned size);
extern int _find_next_zero_bit_be(const void * p, int size, int offset);
extern int _find_first_bit_be(const unsigned long *p, unsigned size);
@@ -180,33 +178,26 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
/*
* The __* form of bitops are non-atomic and may be reordered.
*/
-#define ATOMIC_BITOP_LE(name,nr,p) \
- (__builtin_constant_p(nr) ? \
- ____atomic_##name(nr, p) : \
- _##name##_le(nr,p))
-
-#define ATOMIC_BITOP_BE(name,nr,p) \
- (__builtin_constant_p(nr) ? \
- ____atomic_##name(nr, p) : \
- _##name##_be(nr,p))
+#define ATOMIC_BITOP(name,nr,p) \
+ (__builtin_constant_p(nr) ? ____atomic_##name(nr, p) : _##name(nr,p))
#else
-#define ATOMIC_BITOP_LE(name,nr,p) _##name##_le(nr,p)
-#define ATOMIC_BITOP_BE(name,nr,p) _##name##_be(nr,p)
+#define ATOMIC_BITOP(name,nr,p) _##name(nr,p)
#endif
-#define NONATOMIC_BITOP(name,nr,p) \
- (____nonatomic_##name(nr, p))
+/*
+ * Native endian atomic definitions.
+ */
+#define set_bit(nr,p) ATOMIC_BITOP(set_bit,nr,p)
+#define clear_bit(nr,p) ATOMIC_BITOP(clear_bit,nr,p)
+#define change_bit(nr,p) ATOMIC_BITOP(change_bit,nr,p)
+#define test_and_set_bit(nr,p) ATOMIC_BITOP(test_and_set_bit,nr,p)
+#define test_and_clear_bit(nr,p) ATOMIC_BITOP(test_and_clear_bit,nr,p)
+#define test_and_change_bit(nr,p) ATOMIC_BITOP(test_and_change_bit,nr,p)
#ifndef __ARMEB__
/*
* These are the little endian, atomic definitions.
*/
-#define set_bit(nr,p) ATOMIC_BITOP_LE(set_bit,nr,p)
-#define clear_bit(nr,p) ATOMIC_BITOP_LE(clear_bit,nr,p)
-#define change_bit(nr,p) ATOMIC_BITOP_LE(change_bit,nr,p)
-#define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p)
-#define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p)
-#define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p)
#define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz)
#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off)
#define find_first_bit(p,sz) _find_first_bit_le(p,sz)
@@ -215,16 +206,9 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
#define WORD_BITOFF_TO_LE(x) ((x))
#else
-
/*
* These are the big endian, atomic definitions.
*/
-#define set_bit(nr,p) ATOMIC_BITOP_BE(set_bit,nr,p)
-#define clear_bit(nr,p) ATOMIC_BITOP_BE(clear_bit,nr,p)
-#define change_bit(nr,p) ATOMIC_BITOP_BE(change_bit,nr,p)
-#define test_and_set_bit(nr,p) ATOMIC_BITOP_BE(test_and_set_bit,nr,p)
-#define test_and_clear_bit(nr,p) ATOMIC_BITOP_BE(test_and_clear_bit,nr,p)
-#define test_and_change_bit(nr,p) ATOMIC_BITOP_BE(test_and_change_bit,nr,p)
#define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz)
#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off)
#define find_first_bit(p,sz) _find_first_bit_be(p,sz)
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 3acd8fa25e34..d5d8d5c72682 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -12,7 +12,7 @@
#include <linux/mm.h>
-#include <asm/glue.h>
+#include <asm/glue-cache.h>
#include <asm/shmparam.h>
#include <asm/cachetype.h>
#include <asm/outercache.h>
@@ -20,123 +20,6 @@
#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
/*
- * Cache Model
- * ===========
- */
-#undef _CACHE
-#undef MULTI_CACHE
-
-#if defined(CONFIG_CPU_CACHE_V3)
-# ifdef _CACHE
-# define MULTI_CACHE 1
-# else
-# define _CACHE v3
-# endif
-#endif
-
-#if defined(CONFIG_CPU_CACHE_V4)
-# ifdef _CACHE
-# define MULTI_CACHE 1
-# else
-# define _CACHE v4
-# endif
-#endif
-
-#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
- defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \
- defined(CONFIG_CPU_ARM1026)
-# define MULTI_CACHE 1
-#endif
-
-#if defined(CONFIG_CPU_FA526)
-# ifdef _CACHE
-# define MULTI_CACHE 1
-# else
-# define _CACHE fa
-# endif
-#endif
-
-#if defined(CONFIG_CPU_ARM926T)
-# ifdef _CACHE
-# define MULTI_CACHE 1
-# else
-# define _CACHE arm926
-# endif
-#endif
-
-#if defined(CONFIG_CPU_ARM940T)
-# ifdef _CACHE
-# define MULTI_CACHE 1
-# else
-# define _CACHE arm940
-# endif
-#endif
-
-#if defined(CONFIG_CPU_ARM946E)
-# ifdef _CACHE
-# define MULTI_CACHE 1
-# else
-# define _CACHE arm946
-# endif
-#endif
-
-#if defined(CONFIG_CPU_CACHE_V4WB)
-# ifdef _CACHE
-# define MULTI_CACHE 1
-# else
-# define _CACHE v4wb
-# endif
-#endif
-
-#if defined(CONFIG_CPU_XSCALE)
-# ifdef _CACHE
-# define MULTI_CACHE 1
-# else
-# define _CACHE xscale
-# endif
-#endif
-
-#if defined(CONFIG_CPU_XSC3)
-# ifdef _CACHE
-# define MULTI_CACHE 1
-# else
-# define _CACHE xsc3
-# endif
-#endif
-
-#if defined(CONFIG_CPU_MOHAWK)
-# ifdef _CACHE
-# define MULTI_CACHE 1
-# else
-# define _CACHE mohawk
-# endif
-#endif
-
-#if defined(CONFIG_CPU_FEROCEON)
-# define MULTI_CACHE 1
-#endif
-
-#if defined(CONFIG_CPU_V6)
-//# ifdef _CACHE
-# define MULTI_CACHE 1
-//# else
-//# define _CACHE v6
-//# endif
-#endif
-
-#if defined(CONFIG_CPU_V7)
-//# ifdef _CACHE
-# define MULTI_CACHE 1
-//# else
-//# define _CACHE v7
-//# endif
-#endif
-
-#if !defined(_CACHE) && !defined(MULTI_CACHE)
-#error Unknown cache maintainence model
-#endif
-
-/*
* This flag is used to indicate that the page pointed to by a pte is clean
* and does not require cleaning before returning it to the user.
*/
@@ -249,19 +132,11 @@ extern struct cpu_cache_fns cpu_cache;
* visible to the CPU.
*/
#define dmac_map_area cpu_cache.dma_map_area
-#define dmac_unmap_area cpu_cache.dma_unmap_area
+#define dmac_unmap_area cpu_cache.dma_unmap_area
#define dmac_flush_range cpu_cache.dma_flush_range
#else
-#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all)
-#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
-#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
-#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
-#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
-#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
-#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
-
extern void __cpuc_flush_icache_all(void);
extern void __cpuc_flush_kern_all(void);
extern void __cpuc_flush_user_all(void);
@@ -276,10 +151,6 @@ extern void __cpuc_flush_dcache_area(void *, size_t);
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
-#define dmac_map_area __glue(_CACHE,_dma_map_area)
-#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
-#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
-
extern void dmac_map_area(const void *, size_t, int);
extern void dmac_unmap_area(const void *, size_t, int);
extern void dmac_flush_range(const void *, const void *);
@@ -316,7 +187,8 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
* Optimized __flush_icache_all for the common cases. Note that UP ARMv7
* will fall through to use __flush_icache_all_generic.
*/
-#if (defined(CONFIG_CPU_V7) && defined(CONFIG_CPU_V6)) || \
+#if (defined(CONFIG_CPU_V7) && \
+ (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \
defined(CONFIG_SMP_ON_UP)
#define __flush_icache_preferred __cpuc_flush_icache_all
#elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
diff --git a/arch/arm/include/asm/cpu-multi32.h b/arch/arm/include/asm/cpu-multi32.h
deleted file mode 100644
index e2b5b0b2116a..000000000000
--- a/arch/arm/include/asm/cpu-multi32.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * arch/arm/include/asm/cpu-multi32.h
- *
- * Copyright (C) 2000 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <asm/page.h>
-
-struct mm_struct;
-
-/*
- * Don't change this structure - ASM code
- * relies on it.
- */
-extern struct processor {
- /* MISC
- * get data abort address/flags
- */
- void (*_data_abort)(unsigned long pc);
- /*
- * Retrieve prefetch fault address
- */
- unsigned long (*_prefetch_abort)(unsigned long lr);
- /*
- * Set up any processor specifics
- */
- void (*_proc_init)(void);
- /*
- * Disable any processor specifics
- */
- void (*_proc_fin)(void);
- /*
- * Special stuff for a reset
- */
- void (*reset)(unsigned long addr) __attribute__((noreturn));
- /*
- * Idle the processor
- */
- int (*_do_idle)(void);
- /*
- * Processor architecture specific
- */
- /*
- * clean a virtual address range from the
- * D-cache without flushing the cache.
- */
- void (*dcache_clean_area)(void *addr, int size);
-
- /*
- * Set the page table
- */
- void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm);
- /*
- * Set a possibly extended PTE. Non-extended PTEs should
- * ignore 'ext'.
- */
- void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext);
-} processor;
-
-#define cpu_proc_init() processor._proc_init()
-#define cpu_proc_fin() processor._proc_fin()
-#define cpu_reset(addr) processor.reset(addr)
-#define cpu_do_idle() processor._do_idle()
-#define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz)
-#define cpu_set_pte_ext(ptep,pte,ext) processor.set_pte_ext(ptep,pte,ext)
-#define cpu_do_switch_mm(pgd,mm) processor.switch_mm(pgd,mm)
diff --git a/arch/arm/include/asm/cpu-single.h b/arch/arm/include/asm/cpu-single.h
deleted file mode 100644
index f073a6d2a406..000000000000
--- a/arch/arm/include/asm/cpu-single.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * arch/arm/include/asm/cpu-single.h
- *
- * Copyright (C) 2000 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-/*
- * Single CPU
- */
-#ifdef __STDC__
-#define __catify_fn(name,x) name##x
-#else
-#define __catify_fn(name,x) name/**/x
-#endif
-#define __cpu_fn(name,x) __catify_fn(name,x)
-
-/*
- * If we are supporting multiple CPUs, then we must use a table of
- * function pointers for this lot. Otherwise, we can optimise the
- * table away.
- */
-#define cpu_proc_init __cpu_fn(CPU_NAME,_proc_init)
-#define cpu_proc_fin __cpu_fn(CPU_NAME,_proc_fin)
-#define cpu_reset __cpu_fn(CPU_NAME,_reset)
-#define cpu_do_idle __cpu_fn(CPU_NAME,_do_idle)
-#define cpu_dcache_clean_area __cpu_fn(CPU_NAME,_dcache_clean_area)
-#define cpu_do_switch_mm __cpu_fn(CPU_NAME,_switch_mm)
-#define cpu_set_pte_ext __cpu_fn(CPU_NAME,_set_pte_ext)
-
-#include <asm/page.h>
-
-struct mm_struct;
-
-/* declare all the functions as extern */
-extern void cpu_proc_init(void);
-extern void cpu_proc_fin(void);
-extern int cpu_do_idle(void);
-extern void cpu_dcache_clean_area(void *, int);
-extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
-extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
-extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 20ae96cc0020..ed5bc9e05a4e 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -23,6 +23,8 @@
#define CPUID_EXT_ISAR4 "c2, 4"
#define CPUID_EXT_ISAR5 "c2, 5"
+extern unsigned int processor_id;
+
#ifdef CONFIG_CPU_CP15
#define read_cpuid(reg) \
({ \
@@ -43,7 +45,6 @@
__val; \
})
#else
-extern unsigned int processor_id;
#define read_cpuid(reg) (processor_id)
#define read_cpuid_ext(reg) 0
#endif
diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
new file mode 100644
index 000000000000..de5354746924
--- /dev/null
+++ b/arch/arm/include/asm/fncpy.h
@@ -0,0 +1,94 @@
+/*
+ * arch/arm/include/asm/fncpy.h - helper macros for function body copying
+ *
+ * Copyright (C) 2011 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ * These macros are intended for use when there is a need to copy a low-level
+ * function body into special memory.
+ *
+ * For example, when reconfiguring the SDRAM controller, the code doing the
+ * reconfiguration may need to run from SRAM.
+ *
+ * NOTE: that the copied function body must be entirely self-contained and
+ * position-independent in order for this to work properly.
+ *
+ * NOTE: in order for embedded literals and data to get referenced correctly,
+ * the alignment of functions must be preserved when copying. To ensure this,
+ * the source and destination addresses for fncpy() must be aligned to a
+ * multiple of 8 bytes: you will be get a BUG() if this condition is not met.
+ * You will typically need a ".align 3" directive in the assembler where the
+ * function to be copied is defined, and ensure that your allocator for the
+ * destination buffer returns 8-byte-aligned pointers.
+ *
+ * Typical usage example:
+ *
+ * extern int f(args);
+ * extern uint32_t size_of_f;
+ * int (*copied_f)(args);
+ * void *sram_buffer;
+ *
+ * copied_f = fncpy(sram_buffer, &f, size_of_f);
+ *
+ * ... later, call the function: ...
+ *
+ * copied_f(args);
+ *
+ * The size of the function to be copied can't be determined from C:
+ * this must be determined by other means, such as adding assmbler directives
+ * in the file where f is defined.
+ */
+
+#ifndef __ASM_FNCPY_H
+#define __ASM_FNCPY_H
+
+#include <linux/types.h>
+#include <linux/string.h>
+
+#include <asm/bug.h>
+#include <asm/cacheflush.h>
+
+/*
+ * Minimum alignment requirement for the source and destination addresses
+ * for function copying.
+ */
+#define FNCPY_ALIGN 8
+
+#define fncpy(dest_buf, funcp, size) ({ \
+ uintptr_t __funcp_address; \
+ typeof(funcp) __result; \
+ \
+ asm("" : "=r" (__funcp_address) : "0" (funcp)); \
+ \
+ /* \
+ * Ensure alignment of source and destination addresses, \
+ * disregarding the function's Thumb bit: \
+ */ \
+ BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
+ (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
+ \
+ memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
+ flush_icache_range((unsigned long)(dest_buf), \
+ (unsigned long)(dest_buf) + (size)); \
+ \
+ asm("" : "=r" (__result) \
+ : "0" ((uintptr_t)(dest_buf) | (__funcp_address & 1))); \
+ \
+ __result; \
+})
+
+#endif /* !__ASM_FNCPY_H */
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
new file mode 100644
index 000000000000..c7afbc552c7f
--- /dev/null
+++ b/arch/arm/include/asm/glue-cache.h
@@ -0,0 +1,146 @@
+/*
+ * arch/arm/include/asm/glue-cache.h
+ *
+ * Copyright (C) 1999-2002 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ASM_GLUE_CACHE_H
+#define ASM_GLUE_CACHE_H
+
+#include <asm/glue.h>
+
+/*
+ * Cache Model
+ * ===========
+ */
+#undef _CACHE
+#undef MULTI_CACHE
+
+#if defined(CONFIG_CPU_CACHE_V3)
+# ifdef _CACHE
+# define MULTI_CACHE 1
+# else
+# define _CACHE v3
+# endif
+#endif
+
+#if defined(CONFIG_CPU_CACHE_V4)
+# ifdef _CACHE
+# define MULTI_CACHE 1
+# else
+# define _CACHE v4
+# endif
+#endif
+
+#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
+ defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \
+ defined(CONFIG_CPU_ARM1026)
+# define MULTI_CACHE 1
+#endif
+
+#if defined(CONFIG_CPU_FA526)
+# ifdef _CACHE
+# define MULTI_CACHE 1
+# else
+# define _CACHE fa
+# endif
+#endif
+
+#if defined(CONFIG_CPU_ARM926T)
+# ifdef _CACHE
+# define MULTI_CACHE 1
+# else
+# define _CACHE arm926
+# endif
+#endif
+
+#if defined(CONFIG_CPU_ARM940T)
+# ifdef _CACHE
+# define MULTI_CACHE 1
+# else
+# define _CACHE arm940
+# endif
+#endif
+
+#if defined(CONFIG_CPU_ARM946E)
+# ifdef _CACHE
+# define MULTI_CACHE 1
+# else
+# define _CACHE arm946
+# endif
+#endif
+
+#if defined(CONFIG_CPU_CACHE_V4WB)
+# ifdef _CACHE
+# define MULTI_CACHE 1
+# else
+# define _CACHE v4wb
+# endif
+#endif
+
+#if defined(CONFIG_CPU_XSCALE)
+# ifdef _CACHE
+# define MULTI_CACHE 1
+# else
+# define _CACHE xscale
+# endif
+#endif
+
+#if defined(CONFIG_CPU_XSC3)
+# ifdef _CACHE
+# define MULTI_CACHE 1
+# else
+# define _CACHE xsc3
+# endif
+#endif
+
+#if defined(CONFIG_CPU_MOHAWK)
+# ifdef _CACHE
+# define MULTI_CACHE 1
+# else
+# define _CACHE mohawk
+# endif
+#endif
+
+#if defined(CONFIG_CPU_FEROCEON)
+# define MULTI_CACHE 1
+#endif
+
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
+//# ifdef _CACHE
+# define MULTI_CACHE 1
+//# else
+//# define _CACHE v6
+//# endif
+#endif
+
+#if defined(CONFIG_CPU_V7)
+//# ifdef _CACHE
+# define MULTI_CACHE 1
+//# else
+//# define _CACHE v7
+//# endif
+#endif
+
+#if !defined(_CACHE) && !defined(MULTI_CACHE)
+#error Unknown cache maintainence model
+#endif
+
+#ifndef MULTI_CACHE
+#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all)
+#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
+#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
+#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
+#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
+#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
+#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
+
+#define dmac_map_area __glue(_CACHE,_dma_map_area)
+#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
+#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h
new file mode 100644
index 000000000000..354d571e8bcc
--- /dev/null
+++ b/arch/arm/include/asm/glue-df.h
@@ -0,0 +1,110 @@
+/*
+ * arch/arm/include/asm/glue-df.h
+ *
+ * Copyright (C) 1997-1999 Russell King
+ * Copyright (C) 2000-2002 Deep Blue Solutions Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ASM_GLUE_DF_H
+#define ASM_GLUE_DF_H
+
+#include <asm/glue.h>
+
+/*
+ * Data Abort Model
+ * ================
+ *
+ * We have the following to choose from:
+ * arm6 - ARM6 style
+ * arm7 - ARM7 style
+ * v4_early - ARMv4 without Thumb early abort handler
+ * v4t_late - ARMv4 with Thumb late abort handler
+ * v4t_early - ARMv4 with Thumb early abort handler
+ * v5tej_early - ARMv5 with Thumb and Java early abort handler
+ * xscale - ARMv5 with Thumb with Xscale extensions
+ * v6_early - ARMv6 generic early abort handler
+ * v7_early - ARMv7 generic early abort handler
+ */
+#undef CPU_DABORT_HANDLER
+#undef MULTI_DABORT
+
+#if defined(CONFIG_CPU_ARM610)
+# ifdef CPU_DABORT_HANDLER
+# define MULTI_DABORT 1
+# else
+# define CPU_DABORT_HANDLER cpu_arm6_data_abort
+# endif
+#endif
+
+#if defined(CONFIG_CPU_ARM710)
+# ifdef CPU_DABORT_HANDLER
+# define MULTI_DABORT 1
+# else
+# define CPU_DABORT_HANDLER cpu_arm7_data_abort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ABRT_LV4T
+# ifdef CPU_DABORT_HANDLER
+# define MULTI_DABORT 1
+# else
+# define CPU_DABORT_HANDLER v4t_late_abort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ABRT_EV4
+# ifdef CPU_DABORT_HANDLER
+# define MULTI_DABORT 1
+# else
+# define CPU_DABORT_HANDLER v4_early_abort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ABRT_EV4T
+# ifdef CPU_DABORT_HANDLER
+# define MULTI_DABORT 1
+# else
+# define CPU_DABORT_HANDLER v4t_early_abort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ABRT_EV5TJ
+# ifdef CPU_DABORT_HANDLER
+# define MULTI_DABORT 1
+# else
+# define CPU_DABORT_HANDLER v5tj_early_abort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ABRT_EV5T
+# ifdef CPU_DABORT_HANDLER
+# define MULTI_DABORT 1
+# else
+# define CPU_DABORT_HANDLER v5t_early_abort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ABRT_EV6
+# ifdef CPU_DABORT_HANDLER
+# define MULTI_DABORT 1
+# else
+# define CPU_DABORT_HANDLER v6_early_abort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ABRT_EV7
+# ifdef CPU_DABORT_HANDLER
+# define MULTI_DABORT 1
+# else
+# define CPU_DABORT_HANDLER v7_early_abort
+# endif
+#endif
+
+#ifndef CPU_DABORT_HANDLER
+#error Unknown data abort handler type
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/glue-pf.h b/arch/arm/include/asm/glue-pf.h
new file mode 100644
index 000000000000..d385f37c13f0
--- /dev/null
+++ b/arch/arm/include/asm/glue-pf.h
@@ -0,0 +1,57 @@
+/*
+ * arch/arm/include/asm/glue-pf.h
+ *
+ * Copyright (C) 1997-1999 Russell King
+ * Copyright (C) 2000-2002 Deep Blue Solutions Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ASM_GLUE_PF_H
+#define ASM_GLUE_PF_H
+
+#include <asm/glue.h>
+
+/*
+ * Prefetch Abort Model
+ * ================
+ *
+ * We have the following to choose from:
+ * legacy - no IFSR, no IFAR
+ * v6 - ARMv6: IFSR, no IFAR
+ * v7 - ARMv7: IFSR and IFAR
+ */
+
+#undef CPU_PABORT_HANDLER
+#undef MULTI_PABORT
+
+#ifdef CONFIG_CPU_PABRT_LEGACY
+# ifdef CPU_PABORT_HANDLER
+# define MULTI_PABORT 1
+# else
+# define CPU_PABORT_HANDLER legacy_pabort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_PABRT_V6
+# ifdef CPU_PABORT_HANDLER
+# define MULTI_PABORT 1
+# else
+# define CPU_PABORT_HANDLER v6_pabort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_PABRT_V7
+# ifdef CPU_PABORT_HANDLER
+# define MULTI_PABORT 1
+# else
+# define CPU_PABORT_HANDLER v7_pabort
+# endif
+#endif
+
+#ifndef CPU_PABORT_HANDLER
+#error Unknown prefetch abort handler type
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/glue-proc.h b/arch/arm/include/asm/glue-proc.h
new file mode 100644
index 000000000000..e2be7f142668
--- /dev/null
+++ b/arch/arm/include/asm/glue-proc.h
@@ -0,0 +1,264 @@
+/*
+ * arch/arm/include/asm/glue-proc.h
+ *
+ * Copyright (C) 1997-1999 Russell King
+ * Copyright (C) 2000 Deep Blue Solutions Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ASM_GLUE_PROC_H
+#define ASM_GLUE_PROC_H
+
+#include <asm/glue.h>
+
+/*
+ * Work out if we need multiple CPU support
+ */
+#undef MULTI_CPU
+#undef CPU_NAME
+
+/*
+ * CPU_NAME - the prefix for CPU related functions
+ */
+
+#ifdef CONFIG_CPU_ARM610
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm6
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM7TDMI
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm7tdmi
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM710
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm7
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM720T
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm720
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM740T
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm740
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM9TDMI
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm9tdmi
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM920T
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm920
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM922T
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm922
+# endif
+#endif
+
+#ifdef CONFIG_CPU_FA526
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_fa526
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM925T
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm925
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM926T
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm926
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM940T
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm940
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM946E
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm946
+# endif
+#endif
+
+#ifdef CONFIG_CPU_SA110
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_sa110
+# endif
+#endif
+
+#ifdef CONFIG_CPU_SA1100
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_sa1100
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM1020
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm1020
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM1020E
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm1020e
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM1022
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm1022
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM1026
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm1026
+# endif
+#endif
+
+#ifdef CONFIG_CPU_XSCALE
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_xscale
+# endif
+#endif
+
+#ifdef CONFIG_CPU_XSC3
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_xsc3
+# endif
+#endif
+
+#ifdef CONFIG_CPU_MOHAWK
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_mohawk
+# endif
+#endif
+
+#ifdef CONFIG_CPU_FEROCEON
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_feroceon
+# endif
+#endif
+
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_v6
+# endif
+#endif
+
+#ifdef CONFIG_CPU_V7
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_v7
+# endif
+#endif
+
+#ifndef MULTI_CPU
+#define cpu_proc_init __glue(CPU_NAME,_proc_init)
+#define cpu_proc_fin __glue(CPU_NAME,_proc_fin)
+#define cpu_reset __glue(CPU_NAME,_reset)
+#define cpu_do_idle __glue(CPU_NAME,_do_idle)
+#define cpu_dcache_clean_area __glue(CPU_NAME,_dcache_clean_area)
+#define cpu_do_switch_mm __glue(CPU_NAME,_switch_mm)
+#define cpu_set_pte_ext __glue(CPU_NAME,_set_pte_ext)
+#define cpu_suspend_size __glue(CPU_NAME,_suspend_size)
+#define cpu_do_suspend __glue(CPU_NAME,_do_suspend)
+#define cpu_do_resume __glue(CPU_NAME,_do_resume)
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/glue.h b/arch/arm/include/asm/glue.h
index 234a3fc1c78e..0ec35d1698aa 100644
--- a/arch/arm/include/asm/glue.h
+++ b/arch/arm/include/asm/glue.h
@@ -15,7 +15,6 @@
*/
#ifdef __KERNEL__
-
#ifdef __STDC__
#define ____glue(name,fn) name##fn
#else
@@ -23,141 +22,4 @@
#endif
#define __glue(name,fn) ____glue(name,fn)
-
-
-/*
- * Data Abort Model
- * ================
- *
- * We have the following to choose from:
- * arm6 - ARM6 style
- * arm7 - ARM7 style
- * v4_early - ARMv4 without Thumb early abort handler
- * v4t_late - ARMv4 with Thumb late abort handler
- * v4t_early - ARMv4 with Thumb early abort handler
- * v5tej_early - ARMv5 with Thumb and Java early abort handler
- * xscale - ARMv5 with Thumb with Xscale extensions
- * v6_early - ARMv6 generic early abort handler
- * v7_early - ARMv7 generic early abort handler
- */
-#undef CPU_DABORT_HANDLER
-#undef MULTI_DABORT
-
-#if defined(CONFIG_CPU_ARM610)
-# ifdef CPU_DABORT_HANDLER
-# define MULTI_DABORT 1
-# else
-# define CPU_DABORT_HANDLER cpu_arm6_data_abort
-# endif
-#endif
-
-#if defined(CONFIG_CPU_ARM710)
-# ifdef CPU_DABORT_HANDLER
-# define MULTI_DABORT 1
-# else
-# define CPU_DABORT_HANDLER cpu_arm7_data_abort
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ABRT_LV4T
-# ifdef CPU_DABORT_HANDLER
-# define MULTI_DABORT 1
-# else
-# define CPU_DABORT_HANDLER v4t_late_abort
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ABRT_EV4
-# ifdef CPU_DABORT_HANDLER
-# define MULTI_DABORT 1
-# else
-# define CPU_DABORT_HANDLER v4_early_abort
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ABRT_EV4T
-# ifdef CPU_DABORT_HANDLER
-# define MULTI_DABORT 1
-# else
-# define CPU_DABORT_HANDLER v4t_early_abort
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ABRT_EV5TJ
-# ifdef CPU_DABORT_HANDLER
-# define MULTI_DABORT 1
-# else
-# define CPU_DABORT_HANDLER v5tj_early_abort
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ABRT_EV5T
-# ifdef CPU_DABORT_HANDLER
-# define MULTI_DABORT 1
-# else
-# define CPU_DABORT_HANDLER v5t_early_abort
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ABRT_EV6
-# ifdef CPU_DABORT_HANDLER
-# define MULTI_DABORT 1
-# else
-# define CPU_DABORT_HANDLER v6_early_abort
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ABRT_EV7
-# ifdef CPU_DABORT_HANDLER
-# define MULTI_DABORT 1
-# else
-# define CPU_DABORT_HANDLER v7_early_abort
-# endif
-#endif
-
-#ifndef CPU_DABORT_HANDLER
-#error Unknown data abort handler type
-#endif
-
-/*
- * Prefetch Abort Model
- * ================
- *
- * We have the following to choose from:
- * legacy - no IFSR, no IFAR
- * v6 - ARMv6: IFSR, no IFAR
- * v7 - ARMv7: IFSR and IFAR
- */
-
-#undef CPU_PABORT_HANDLER
-#undef MULTI_PABORT
-
-#ifdef CONFIG_CPU_PABRT_LEGACY
-# ifdef CPU_PABORT_HANDLER
-# define MULTI_PABORT 1
-# else
-# define CPU_PABORT_HANDLER legacy_pabort
-# endif
-#endif
-
-#ifdef CONFIG_CPU_PABRT_V6
-# ifdef CPU_PABORT_HANDLER
-# define MULTI_PABORT 1
-# else
-# define CPU_PABORT_HANDLER v6_pabort
-# endif
-#endif
-
-#ifdef CONFIG_CPU_PABRT_V7
-# ifdef CPU_PABORT_HANDLER
-# define MULTI_PABORT 1
-# else
-# define CPU_PABORT_HANDLER v7_pabort
-# endif
-#endif
-
-#ifndef CPU_PABORT_HANDLER
-#error Unknown prefetch abort handler type
-#endif
-
#endif
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index 5aeec1e1735c..16bd48031583 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -36,6 +36,7 @@
#define L2X0_RAW_INTR_STAT 0x21C
#define L2X0_INTR_CLEAR 0x220
#define L2X0_CACHE_SYNC 0x730
+#define L2X0_DUMMY_REG 0x740
#define L2X0_INV_LINE_PA 0x770
#define L2X0_INV_WAY 0x77C
#define L2X0_CLEAN_LINE_PA 0x7B0
diff --git a/arch/arm/include/asm/hardware/sp810.h b/arch/arm/include/asm/hardware/sp810.h
index 721847dc68ab..e0d1c0cfa548 100644
--- a/arch/arm/include/asm/hardware/sp810.h
+++ b/arch/arm/include/asm/hardware/sp810.h
@@ -58,6 +58,9 @@
static inline void sysctl_soft_reset(void __iomem *base)
{
+ /* switch to slow mode */
+ writel(0x2, base + SCCTRL);
+
/* writing any value to SCSYSSTAT reg will reset system */
writel(0, base + SCSYSSTAT);
}
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
index 7080e2c8fa62..a4edd19dd3d6 100644
--- a/arch/arm/include/asm/highmem.h
+++ b/arch/arm/include/asm/highmem.h
@@ -19,11 +19,36 @@
extern pte_t *pkmap_page_table;
+extern void *kmap_high(struct page *page);
+extern void kunmap_high(struct page *page);
+
+/*
+ * The reason for kmap_high_get() is to ensure that the currently kmap'd
+ * page usage count does not decrease to zero while we're using its
+ * existing virtual mapping in an atomic context. With a VIVT cache this
+ * is essential to do, but with a VIPT cache this is only an optimization
+ * so not to pay the price of establishing a second mapping if an existing
+ * one can be used. However, on platforms without hardware TLB maintenance
+ * broadcast, we simply cannot use ARCH_NEEDS_KMAP_HIGH_GET at all since
+ * the locking involved must also disable IRQs which is incompatible with
+ * the IPI mechanism used by global TLB operations.
+ */
#define ARCH_NEEDS_KMAP_HIGH_GET
+#if defined(CONFIG_SMP) && defined(CONFIG_CPU_TLB_V6)
+#undef ARCH_NEEDS_KMAP_HIGH_GET
+#if defined(CONFIG_HIGHMEM) && defined(CONFIG_CPU_CACHE_VIVT)
+#error "The sum of features in your kernel config cannot be supported together"
+#endif
+#endif
-extern void *kmap_high(struct page *page);
+#ifdef ARCH_NEEDS_KMAP_HIGH_GET
extern void *kmap_high_get(struct page *page);
-extern void kunmap_high(struct page *page);
+#else
+static inline void *kmap_high_get(struct page *page)
+{
+ return NULL;
+}
+#endif
/*
* The following functions are already defined by <linux/highmem.h>
diff --git a/arch/arm/include/asm/localtimer.h b/arch/arm/include/asm/localtimer.h
index 6bc63ab498ce..080d74f8128d 100644
--- a/arch/arm/include/asm/localtimer.h
+++ b/arch/arm/include/asm/localtimer.h
@@ -44,8 +44,14 @@ int local_timer_ack(void);
/*
* Setup a local timer interrupt for a CPU.
*/
-void local_timer_setup(struct clock_event_device *);
+int local_timer_setup(struct clock_event_device *);
+#else
+
+static inline int local_timer_setup(struct clock_event_device *evt)
+{
+ return -ENXIO;
+}
#endif
#endif
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 3a0893a76a3b..bf13b814c1b8 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -15,10 +15,6 @@ struct meminfo;
struct sys_timer;
struct machine_desc {
- /*
- * Note! The first two elements are used
- * by assembler code in head.S, head-common.S
- */
unsigned int nr; /* architecture number */
const char *name; /* architecture name */
unsigned long boot_params; /* tagged list */
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index d0ee74b7cf86..431077c5a867 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -15,6 +15,7 @@
#include <linux/compiler.h>
#include <linux/const.h>
+#include <linux/types.h>
#include <mach/memory.h>
#include <asm/sizes.h>
@@ -133,20 +134,10 @@
#endif
/*
- * Physical vs virtual RAM address space conversion. These are
- * private definitions which should NOT be used outside memory.h
- * files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
- */
-#ifndef __virt_to_phys
-#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET)
-#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
-#endif
-
-/*
* Convert a physical address to a Page Frame Number and back
*/
-#define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT)
-#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
+#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT))
+#define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT)
/*
* Convert a page to/from a physical address
@@ -157,6 +148,62 @@
#ifndef __ASSEMBLY__
/*
+ * Physical vs virtual RAM address space conversion. These are
+ * private definitions which should NOT be used outside memory.h
+ * files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
+ */
+#ifndef __virt_to_phys
+#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
+
+/*
+ * Constants used to force the right instruction encodings and shifts
+ * so that all we need to do is modify the 8-bit constant field.
+ */
+#define __PV_BITS_31_24 0x81000000
+#define __PV_BITS_23_16 0x00810000
+
+extern unsigned long __pv_phys_offset;
+#define PHYS_OFFSET __pv_phys_offset
+
+#define __pv_stub(from,to,instr,type) \
+ __asm__("@ __pv_stub\n" \
+ "1: " instr " %0, %1, %2\n" \
+ " .pushsection .pv_table,\"a\"\n" \
+ " .long 1b\n" \
+ " .popsection\n" \
+ : "=r" (to) \
+ : "r" (from), "I" (type))
+
+static inline unsigned long __virt_to_phys(unsigned long x)
+{
+ unsigned long t;
+ __pv_stub(x, t, "add", __PV_BITS_31_24);
+#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
+ __pv_stub(t, t, "add", __PV_BITS_23_16);
+#endif
+ return t;
+}
+
+static inline unsigned long __phys_to_virt(unsigned long x)
+{
+ unsigned long t;
+ __pv_stub(x, t, "sub", __PV_BITS_31_24);
+#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
+ __pv_stub(t, t, "sub", __PV_BITS_23_16);
+#endif
+ return t;
+}
+#else
+#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET)
+#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
+#endif
+#endif
+
+#ifndef PHYS_OFFSET
+#define PHYS_OFFSET PLAT_PHYS_OFFSET
+#endif
+
+/*
* The DMA mask corresponding to the maximum bus address allocatable
* using GFP_DMA. The default here places no restriction on DMA
* allocations. This must be the smallest DMA mask in the system,
@@ -188,12 +235,12 @@
* translation for translating DMA addresses. Use the driver
* DMA support - see dma-mapping.h.
*/
-static inline unsigned long virt_to_phys(const volatile void *x)
+static inline phys_addr_t virt_to_phys(const volatile void *x)
{
return __virt_to_phys((unsigned long)(x));
}
-static inline void *phys_to_virt(unsigned long x)
+static inline void *phys_to_virt(phys_addr_t x)
{
return (void *)(__phys_to_virt((unsigned long)(x)));
}
diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h
index 12c8e680cbff..543b44916d2c 100644
--- a/arch/arm/include/asm/module.h
+++ b/arch/arm/include/asm/module.h
@@ -25,8 +25,31 @@ struct mod_arch_specific {
};
/*
- * Include the ARM architecture version.
+ * Add the ARM architecture version to the version magic string
*/
-#define MODULE_ARCH_VERMAGIC "ARMv" __stringify(__LINUX_ARM_ARCH__) " "
+#define MODULE_ARCH_VERMAGIC_ARMVSN "ARMv" __stringify(__LINUX_ARM_ARCH__) " "
+
+/* Add __virt_to_phys patching state as well */
+#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
+#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
+#define MODULE_ARCH_VERMAGIC_P2V "p2v16 "
+#else
+#define MODULE_ARCH_VERMAGIC_P2V "p2v8 "
+#endif
+#else
+#define MODULE_ARCH_VERMAGIC_P2V ""
+#endif
+
+/* Add instruction set architecture tag to distinguish ARM/Thumb kernels */
+#ifdef CONFIG_THUMB2_KERNEL
+#define MODULE_ARCH_VERMAGIC_ARMTHUMB "thumb2 "
+#else
+#define MODULE_ARCH_VERMAGIC_ARMTHUMB ""
+#endif
+
+#define MODULE_ARCH_VERMAGIC \
+ MODULE_ARCH_VERMAGIC_ARMVSN \
+ MODULE_ARCH_VERMAGIC_ARMTHUMB \
+ MODULE_ARCH_VERMAGIC_P2V
#endif /* _ASM_ARM_MODULE_H */
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
index fc1900925275..88ad89209764 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -21,6 +21,8 @@
#ifndef __ASM_OUTERCACHE_H
#define __ASM_OUTERCACHE_H
+#include <linux/types.h>
+
struct outer_cache_fns {
void (*inv_range)(unsigned long, unsigned long);
void (*clean_range)(unsigned long, unsigned long);
@@ -37,17 +39,17 @@ struct outer_cache_fns {
extern struct outer_cache_fns outer_cache;
-static inline void outer_inv_range(unsigned long start, unsigned long end)
+static inline void outer_inv_range(phys_addr_t start, phys_addr_t end)
{
if (outer_cache.inv_range)
outer_cache.inv_range(start, end);
}
-static inline void outer_clean_range(unsigned long start, unsigned long end)
+static inline void outer_clean_range(phys_addr_t start, phys_addr_t end)
{
if (outer_cache.clean_range)
outer_cache.clean_range(start, end);
}
-static inline void outer_flush_range(unsigned long start, unsigned long end)
+static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)
{
if (outer_cache.flush_range)
outer_cache.flush_range(start, end);
@@ -73,11 +75,11 @@ static inline void outer_disable(void)
#else
-static inline void outer_inv_range(unsigned long start, unsigned long end)
+static inline void outer_inv_range(phys_addr_t start, phys_addr_t end)
{ }
-static inline void outer_clean_range(unsigned long start, unsigned long end)
+static inline void outer_clean_range(phys_addr_t start, phys_addr_t end)
{ }
-static inline void outer_flush_range(unsigned long start, unsigned long end)
+static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)
{ }
static inline void outer_flush_all(void) { }
static inline void outer_inv_all(void) { }
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index 9763be04f77e..a87d4cf82f6f 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -10,6 +10,8 @@
#ifndef _ASMARM_PGALLOC_H
#define _ASMARM_PGALLOC_H
+#include <linux/pagemap.h>
+
#include <asm/domain.h>
#include <asm/pgtable-hwdef.h>
#include <asm/processor.h>
@@ -28,7 +30,7 @@
*/
#define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
#define pmd_free(mm, pmd) do { } while (0)
-#define pgd_populate(mm,pmd,pte) BUG()
+#define pud_populate(mm,pmd,pte) BUG()
extern pgd_t *pgd_alloc(struct mm_struct *mm);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index ebcb6432f45f..c2663f4d38e6 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -11,15 +11,16 @@
#define _ASMARM_PGTABLE_H
#include <linux/const.h>
-#include <asm-generic/4level-fixup.h>
#include <asm/proc-fns.h>
#ifndef CONFIG_MMU
+#include <asm-generic/4level-fixup.h>
#include "pgtable-nommu.h"
#else
+#include <asm-generic/pgtable-nopud.h>
#include <asm/memory.h>
#include <mach/vmalloc.h>
#include <asm/pgtable-hwdef.h>
@@ -292,19 +293,22 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
/*
- * The "pgd_xxx()" functions here are trivial for a folded two-level
- * setup: the pgd is never bad, and a pmd always exists (as it's folded
- * into the pgd entry)
+ * The "pud_xxx()" functions here are trivial when the pmd is folded into
+ * the pud: the pud entry is never bad, always exists, and can't be set or
+ * cleared.
*/
-#define pgd_none(pgd) (0)
-#define pgd_bad(pgd) (0)
-#define pgd_present(pgd) (1)
-#define pgd_clear(pgdp) do { } while (0)
-#define set_pgd(pgd,pgdp) do { } while (0)
+#define pud_none(pud) (0)
+#define pud_bad(pud) (0)
+#define pud_present(pud) (1)
+#define pud_clear(pudp) do { } while (0)
+#define set_pud(pud,pudp) do { } while (0)
/* Find an entry in the second-level page table.. */
-#define pmd_offset(dir, addr) ((pmd_t *)(dir))
+static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
+{
+ return (pmd_t *)pud;
+}
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_present(pmd) (pmd_val(pmd))
@@ -351,7 +355,7 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
#define pte_unmap(pte) __pte_unmap(pte)
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
-#define pfn_pte(pfn,prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
#define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot)
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index 8ccea012722c..7544ce6b481a 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -12,11 +12,25 @@
#ifndef __ARM_PMU_H__
#define __ARM_PMU_H__
+#include <linux/interrupt.h>
+
enum arm_pmu_type {
ARM_PMU_DEVICE_CPU = 0,
ARM_NUM_PMU_DEVICES,
};
+/*
+ * struct arm_pmu_platdata - ARM PMU platform data
+ *
+ * @handle_irq: an optional handler which will be called from the interrupt and
+ * passed the address of the low level handler, and can be used to implement
+ * any platform specific handling before or after calling it.
+ */
+struct arm_pmu_platdata {
+ irqreturn_t (*handle_irq)(int irq, void *dev,
+ irq_handler_t pmu_handler);
+};
+
#ifdef CONFIG_CPU_HAS_PMU
/**
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
index 8fdae9bc9abb..8ec535e11fd7 100644
--- a/arch/arm/include/asm/proc-fns.h
+++ b/arch/arm/include/asm/proc-fns.h
@@ -13,250 +13,86 @@
#ifdef __KERNEL__
+#include <asm/glue-proc.h>
+#include <asm/page.h>
-/*
- * Work out if we need multiple CPU support
- */
-#undef MULTI_CPU
-#undef CPU_NAME
+#ifndef __ASSEMBLY__
+
+struct mm_struct;
/*
- * CPU_NAME - the prefix for CPU related functions
+ * Don't change this structure - ASM code relies on it.
*/
-
-#ifdef CONFIG_CPU_ARM610
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm6
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ARM7TDMI
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm7tdmi
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ARM710
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm7
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ARM720T
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm720
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ARM740T
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm740
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ARM9TDMI
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm9tdmi
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ARM920T
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm920
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ARM922T
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm922
-# endif
-#endif
-
-#ifdef CONFIG_CPU_FA526
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_fa526
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ARM925T
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm925
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ARM926T
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm926
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ARM940T
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm940
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ARM946E
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm946
-# endif
-#endif
-
-#ifdef CONFIG_CPU_SA110
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_sa110
-# endif
-#endif
-
-#ifdef CONFIG_CPU_SA1100
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_sa1100
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ARM1020
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm1020
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ARM1020E
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm1020e
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ARM1022
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm1022
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ARM1026
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm1026
-# endif
-#endif
-
-#ifdef CONFIG_CPU_XSCALE
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_xscale
-# endif
-#endif
-
-#ifdef CONFIG_CPU_XSC3
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_xsc3
-# endif
-#endif
-
-#ifdef CONFIG_CPU_MOHAWK
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_mohawk
-# endif
-#endif
-
-#ifdef CONFIG_CPU_FEROCEON
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_feroceon
-# endif
-#endif
-
-#ifdef CONFIG_CPU_V6
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_v6
-# endif
-#endif
-
-#ifdef CONFIG_CPU_V7
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_v7
-# endif
-#endif
-
-#ifndef __ASSEMBLY__
+extern struct processor {
+ /* MISC
+ * get data abort address/flags
+ */
+ void (*_data_abort)(unsigned long pc);
+ /*
+ * Retrieve prefetch fault address
+ */
+ unsigned long (*_prefetch_abort)(unsigned long lr);
+ /*
+ * Set up any processor specifics
+ */
+ void (*_proc_init)(void);
+ /*
+ * Disable any processor specifics
+ */
+ void (*_proc_fin)(void);
+ /*
+ * Special stuff for a reset
+ */
+ void (*reset)(unsigned long addr) __attribute__((noreturn));
+ /*
+ * Idle the processor
+ */
+ int (*_do_idle)(void);
+ /*
+ * Processor architecture specific
+ */
+ /*
+ * clean a virtual address range from the
+ * D-cache without flushing the cache.
+ */
+ void (*dcache_clean_area)(void *addr, int size);
+
+ /*
+ * Set the page table
+ */
+ void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm);
+ /*
+ * Set a possibly extended PTE. Non-extended PTEs should
+ * ignore 'ext'.
+ */
+ void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext);
+
+ /* Suspend/resume */
+ unsigned int suspend_size;
+ void (*do_suspend)(void *);
+ void (*do_resume)(void *);
+} processor;
#ifndef MULTI_CPU
-#include <asm/cpu-single.h>
+extern void cpu_proc_init(void);
+extern void cpu_proc_fin(void);
+extern int cpu_do_idle(void);
+extern void cpu_dcache_clean_area(void *, int);
+extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
+extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
#else
-#include <asm/cpu-multi32.h>
+#define cpu_proc_init() processor._proc_init()
+#define cpu_proc_fin() processor._proc_fin()
+#define cpu_reset(addr) processor.reset(addr)
+#define cpu_do_idle() processor._do_idle()
+#define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz)
+#define cpu_set_pte_ext(ptep,pte,ext) processor.set_pte_ext(ptep,pte,ext)
+#define cpu_do_switch_mm(pgd,mm) processor.switch_mm(pgd,mm)
#endif
+extern void cpu_resume(void);
+
#include <asm/memory.h>
#ifdef CONFIG_MMU
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 67357baaeeeb..b439b41aeac1 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -29,19 +29,7 @@
#define STACK_TOP_MAX TASK_SIZE
#endif
-union debug_insn {
- u32 arm;
- u16 thumb;
-};
-
-struct debug_entry {
- u32 address;
- union debug_insn insn;
-};
-
struct debug_info {
- int nsaved;
- struct debug_entry bp[2];
#ifdef CONFIG_HAVE_HW_BREAKPOINT
struct perf_event *hbp[ARM_MAX_HBP_SLOTS];
#endif
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 783d50f32618..a8ff22b2a391 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -130,8 +130,6 @@ struct pt_regs {
#ifdef __KERNEL__
-#define arch_has_single_step() (1)
-
#define user_mode(regs) \
(((regs)->ARM_cpsr & 0xf) == 0)
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index f1e5a9bca249..95176af3df8c 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -192,14 +192,10 @@ static struct tagtable __tagtable_##fn __tag = { tag, fn }
/*
* Memory map description
*/
-#ifdef CONFIG_ARCH_LH7A40X
-# define NR_BANKS 16
-#else
-# define NR_BANKS 8
-#endif
+#define NR_BANKS 8
struct membank {
- unsigned long start;
+ phys_addr_t start;
unsigned long size;
unsigned int highmem;
};
diff --git a/arch/arm/include/asm/smp_scu.h b/arch/arm/include/asm/smp_scu.h
index 2376835015d6..4eb6d005ffaa 100644
--- a/arch/arm/include/asm/smp_scu.h
+++ b/arch/arm/include/asm/smp_scu.h
@@ -1,7 +1,14 @@
#ifndef __ASMARM_ARCH_SCU_H
#define __ASMARM_ARCH_SCU_H
+#define SCU_PM_NORMAL 0
+#define SCU_PM_DORMANT 2
+#define SCU_PM_POWEROFF 3
+
+#ifndef __ASSEMBLER__
unsigned int scu_get_core_count(void __iomem *);
void scu_enable(void __iomem *);
+int scu_power_mode(void __iomem *, unsigned int);
+#endif
#endif
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 17eb355707dd..fdd3820edff8 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -5,17 +5,52 @@
#error SMP not supported on pre-ARMv6 CPUs
#endif
+/*
+ * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
+ * extensions, so when running on UP, we have to patch these instructions away.
+ */
+#define ALT_SMP(smp, up) \
+ "9998: " smp "\n" \
+ " .pushsection \".alt.smp.init\", \"a\"\n" \
+ " .long 9998b\n" \
+ " " up "\n" \
+ " .popsection\n"
+
+#ifdef CONFIG_THUMB2_KERNEL
+#define SEV ALT_SMP("sev.w", "nop.w")
+/*
+ * For Thumb-2, special care is needed to ensure that the conditional WFE
+ * instruction really does assemble to exactly 4 bytes (as required by
+ * the SMP_ON_UP fixup code). By itself "wfene" might cause the
+ * assembler to insert a extra (16-bit) IT instruction, depending on the
+ * presence or absence of neighbouring conditional instructions.
+ *
+ * To avoid this unpredictableness, an approprite IT is inserted explicitly:
+ * the assembler won't change IT instructions which are explicitly present
+ * in the input.
+ */
+#define WFE(cond) ALT_SMP( \
+ "it " cond "\n\t" \
+ "wfe" cond ".n", \
+ \
+ "nop.w" \
+)
+#else
+#define SEV ALT_SMP("sev", "nop")
+#define WFE(cond) ALT_SMP("wfe" cond, "nop")
+#endif
+
static inline void dsb_sev(void)
{
#if __LINUX_ARM_ARCH__ >= 7
__asm__ __volatile__ (
"dsb\n"
- "sev"
+ SEV
);
-#elif defined(CONFIG_CPU_32v6K)
+#else
__asm__ __volatile__ (
"mcr p15, 0, %0, c7, c10, 4\n"
- "sev"
+ SEV
: : "r" (0)
);
#endif
@@ -46,9 +81,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
-#ifdef CONFIG_CPU_32v6K
-" wfene\n"
-#endif
+ WFE("ne")
" strexeq %0, %2, [%1]\n"
" teqeq %0, #0\n"
" bne 1b"
@@ -107,9 +140,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
-#ifdef CONFIG_CPU_32v6K
-" wfene\n"
-#endif
+ WFE("ne")
" strexeq %0, %2, [%1]\n"
" teq %0, #0\n"
" bne 1b"
@@ -176,9 +207,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
"1: ldrex %0, [%2]\n"
" adds %0, %0, #1\n"
" strexpl %1, %0, [%2]\n"
-#ifdef CONFIG_CPU_32v6K
-" wfemi\n"
-#endif
+ WFE("mi")
" rsbpls %0, %1, #0\n"
" bmi 1b"
: "=&r" (tmp), "=&r" (tmp2)
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 97f6d60297d5..9a87823642d0 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -347,6 +347,7 @@ void cpu_idle_wait(void);
#include <asm-generic/cmpxchg-local.h>
#if __LINUX_ARM_ARCH__ < 6
+/* min ARCH < ARMv6 */
#ifdef CONFIG_SMP
#error "SMP is not supported on this platform"
@@ -365,7 +366,7 @@ void cpu_idle_wait(void);
#include <asm-generic/cmpxchg.h>
#endif
-#else /* __LINUX_ARM_ARCH__ >= 6 */
+#else /* min ARCH >= ARMv6 */
extern void __bad_cmpxchg(volatile void *ptr, int size);
@@ -379,7 +380,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long oldval, res;
switch (size) {
-#ifdef CONFIG_CPU_32v6K
+#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
case 1:
do {
asm volatile("@ __cmpxchg1\n"
@@ -404,7 +405,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
: "memory", "cc");
} while (res);
break;
-#endif /* CONFIG_CPU_32v6K */
+#endif
case 4:
do {
asm volatile("@ __cmpxchg4\n"
@@ -450,12 +451,12 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
unsigned long ret;
switch (size) {
-#ifndef CONFIG_CPU_32v6K
+#ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */
case 1:
case 2:
ret = __cmpxchg_local_generic(ptr, old, new, size);
break;
-#endif /* !CONFIG_CPU_32v6K */
+#endif
default:
ret = __cmpxchg(ptr, old, new, size);
}
@@ -469,7 +470,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
(unsigned long)(n), \
sizeof(*(ptr))))
-#ifdef CONFIG_CPU_32v6K
+#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
/*
* Note : ARMv7-M (currently unsupported by Linux) does not support
@@ -524,11 +525,11 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
(unsigned long long)(o), \
(unsigned long long)(n)))
-#else /* !CONFIG_CPU_32v6K */
+#else /* min ARCH = ARMv6 */
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-#endif /* CONFIG_CPU_32v6K */
+#endif
#endif /* __LINUX_ARM_ARCH__ >= 6 */
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index f41a6f57cd12..f9f6ecd43350 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -18,16 +18,34 @@
#define __ASMARM_TLB_H
#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
#ifndef CONFIG_MMU
#include <linux/pagemap.h>
+
+#define tlb_flush(tlb) ((void) tlb)
+
#include <asm-generic/tlb.h>
#else /* !CONFIG_MMU */
+#include <linux/swap.h>
#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+
+/*
+ * We need to delay page freeing for SMP as other CPUs can access pages
+ * which have been removed but not yet had their TLB entries invalidated.
+ * Also, as ARMv7 speculative prefetch can drag new entries into the TLB,
+ * we need to apply this same delaying tactic to ensure correct operation.
+ */
+#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7)
+#define tlb_fast_mode(tlb) 0
+#define FREE_PTE_NR 500
+#else
+#define tlb_fast_mode(tlb) 1
+#define FREE_PTE_NR 0
+#endif
/*
* TLB handling. This allows us to remove pages from the page
@@ -36,12 +54,58 @@
struct mmu_gather {
struct mm_struct *mm;
unsigned int fullmm;
+ struct vm_area_struct *vma;
unsigned long range_start;
unsigned long range_end;
+ unsigned int nr;
+ struct page *pages[FREE_PTE_NR];
};
DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
+/*
+ * This is unnecessarily complex. There's three ways the TLB shootdown
+ * code is used:
+ * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
+ * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
+ * tlb->vma will be non-NULL.
+ * 2. Unmapping all vmas. See exit_mmap().
+ * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
+ * tlb->vma will be non-NULL. Additionally, page tables will be freed.
+ * 3. Unmapping argument pages. See shift_arg_pages().
+ * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
+ * tlb->vma will be NULL.
+ */
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+ if (tlb->fullmm || !tlb->vma)
+ flush_tlb_mm(tlb->mm);
+ else if (tlb->range_end > 0) {
+ flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
+ tlb->range_start = TASK_SIZE;
+ tlb->range_end = 0;
+ }
+}
+
+static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
+{
+ if (!tlb->fullmm) {
+ if (addr < tlb->range_start)
+ tlb->range_start = addr;
+ if (addr + PAGE_SIZE > tlb->range_end)
+ tlb->range_end = addr + PAGE_SIZE;
+ }
+}
+
+static inline void tlb_flush_mmu(struct mmu_gather *tlb)
+{
+ tlb_flush(tlb);
+ if (!tlb_fast_mode(tlb)) {
+ free_pages_and_swap_cache(tlb->pages, tlb->nr);
+ tlb->nr = 0;
+ }
+}
+
static inline struct mmu_gather *
tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
{
@@ -49,6 +113,8 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
tlb->mm = mm;
tlb->fullmm = full_mm_flush;
+ tlb->vma = NULL;
+ tlb->nr = 0;
return tlb;
}
@@ -56,8 +122,7 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
static inline void
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
- if (tlb->fullmm)
- flush_tlb_mm(tlb->mm);
+ tlb_flush_mmu(tlb);
/* keep the page table cache within bounds */
check_pgt_cache();
@@ -71,12 +136,7 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
static inline void
tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
{
- if (!tlb->fullmm) {
- if (addr < tlb->range_start)
- tlb->range_start = addr;
- if (addr + PAGE_SIZE > tlb->range_end)
- tlb->range_end = addr + PAGE_SIZE;
- }
+ tlb_add_flush(tlb, addr);
}
/*
@@ -89,6 +149,7 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (!tlb->fullmm) {
flush_cache_range(vma, vma->vm_start, vma->vm_end);
+ tlb->vma = vma;
tlb->range_start = TASK_SIZE;
tlb->range_end = 0;
}
@@ -97,13 +158,32 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
static inline void
tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
- if (!tlb->fullmm && tlb->range_end > 0)
- flush_tlb_range(vma, tlb->range_start, tlb->range_end);
+ if (!tlb->fullmm)
+ tlb_flush(tlb);
+}
+
+static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+{
+ if (tlb_fast_mode(tlb)) {
+ free_page_and_swap_cache(page);
+ } else {
+ tlb->pages[tlb->nr++] = page;
+ if (tlb->nr >= FREE_PTE_NR)
+ tlb_flush_mmu(tlb);
+ }
+}
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
+ unsigned long addr)
+{
+ pgtable_page_dtor(pte);
+ tlb_add_flush(tlb, addr);
+ tlb_remove_page(tlb, pte);
}
-#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page)
-#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
+#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
+#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
#define tlb_migrate_finish(mm) do { } while (0)
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index ce7378ea15a2..d2005de383b8 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -10,12 +10,7 @@
#ifndef _ASMARM_TLBFLUSH_H
#define _ASMARM_TLBFLUSH_H
-
-#ifndef CONFIG_MMU
-
-#define tlb_flush(tlb) ((void) tlb)
-
-#else /* CONFIG_MMU */
+#ifdef CONFIG_MMU
#include <asm/glue.h>
diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
index e71d6ff8d104..60843eb0f61c 100644
--- a/arch/arm/include/asm/tls.h
+++ b/arch/arm/include/asm/tls.h
@@ -28,15 +28,14 @@
#define tls_emu 1
#define has_tls_reg 1
#define set_tls set_tls_none
-#elif __LINUX_ARM_ARCH__ >= 7 || \
- (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K))
-#define tls_emu 0
-#define has_tls_reg 1
-#define set_tls set_tls_v6k
-#elif __LINUX_ARM_ARCH__ == 6
+#elif defined(CONFIG_CPU_V6)
#define tls_emu 0
#define has_tls_reg (elf_hwcap & HWCAP_TLS)
#define set_tls set_tls_v6
+#elif defined(CONFIG_CPU_32v6K)
+#define tls_emu 0
+#define has_tls_reg 1
+#define set_tls set_tls_v6k
#else
#define tls_emu 0
#define has_tls_reg 0
diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h
index 1b960d5ef6a5..f90756dc16dc 100644
--- a/arch/arm/include/asm/traps.h
+++ b/arch/arm/include/asm/traps.h
@@ -45,6 +45,7 @@ static inline int in_exception_text(unsigned long ptr)
extern void __init early_trap_init(void);
extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame);
+extern void ptrace_break(struct task_struct *tsk, struct pt_regs *regs);
extern void *vectors_page;
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 185ee822c935..74554f1742d7 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_MODULES) += armksyms.o module.o
obj-$(CONFIG_ARTHUR) += arthur.o
obj-$(CONFIG_ISA_DMA) += dma-isa.o
obj-$(CONFIG_PCI) += bios32.o isa.o
+obj-$(CONFIG_PM) += sleep.o
obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o
obj-$(CONFIG_SMP) += smp.o smp_tlb.o
obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index e5e1e5387678..acca35aebe28 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -140,24 +140,18 @@ EXPORT_SYMBOL(__aeabi_ulcmp);
#endif
/* bitops */
-EXPORT_SYMBOL(_set_bit_le);
-EXPORT_SYMBOL(_test_and_set_bit_le);
-EXPORT_SYMBOL(_clear_bit_le);
-EXPORT_SYMBOL(_test_and_clear_bit_le);
-EXPORT_SYMBOL(_change_bit_le);
-EXPORT_SYMBOL(_test_and_change_bit_le);
+EXPORT_SYMBOL(_set_bit);
+EXPORT_SYMBOL(_test_and_set_bit);
+EXPORT_SYMBOL(_clear_bit);
+EXPORT_SYMBOL(_test_and_clear_bit);
+EXPORT_SYMBOL(_change_bit);
+EXPORT_SYMBOL(_test_and_change_bit);
EXPORT_SYMBOL(_find_first_zero_bit_le);
EXPORT_SYMBOL(_find_next_zero_bit_le);
EXPORT_SYMBOL(_find_first_bit_le);
EXPORT_SYMBOL(_find_next_bit_le);
#ifdef __ARMEB__
-EXPORT_SYMBOL(_set_bit_be);
-EXPORT_SYMBOL(_test_and_set_bit_be);
-EXPORT_SYMBOL(_clear_bit_be);
-EXPORT_SYMBOL(_test_and_clear_bit_be);
-EXPORT_SYMBOL(_change_bit_be);
-EXPORT_SYMBOL(_test_and_change_bit_be);
EXPORT_SYMBOL(_find_first_zero_bit_be);
EXPORT_SYMBOL(_find_next_zero_bit_be);
EXPORT_SYMBOL(_find_first_bit_be);
@@ -170,3 +164,7 @@ EXPORT_SYMBOL(mcount);
#endif
EXPORT_SYMBOL(__gnu_mcount_nc);
#endif
+
+#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
+EXPORT_SYMBOL(__pv_phys_offset);
+#endif
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 82da66172132..927522cfc12e 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -13,6 +13,9 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+#include <asm/glue-df.h>
+#include <asm/glue-pf.h>
#include <asm/mach/arch.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
@@ -114,6 +117,14 @@ int main(void)
#ifdef MULTI_PABORT
DEFINE(PROCESSOR_PABT_FUNC, offsetof(struct processor, _prefetch_abort));
#endif
+#ifdef MULTI_CPU
+ DEFINE(CPU_SLEEP_SIZE, offsetof(struct processor, suspend_size));
+ DEFINE(CPU_DO_SUSPEND, offsetof(struct processor, do_suspend));
+ DEFINE(CPU_DO_RESUME, offsetof(struct processor, do_resume));
+#endif
+#ifdef MULTI_CACHE
+ DEFINE(CACHE_FLUSH_KERN_ALL, offsetof(struct cpu_cache_fns, flush_kern_all));
+#endif
BLANK();
DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index c6273a3bfc25..d86fcd44b220 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -583,6 +583,11 @@ void __init pci_common_init(struct hw_pci *hw)
* Assign resources.
*/
pci_bus_assign_resources(bus);
+
+ /*
+ * Enable bridges
+ */
+ pci_enable_bridges(bus);
}
/*
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S
index a0f07521ca8a..d2d983be096d 100644
--- a/arch/arm/kernel/debug.S
+++ b/arch/arm/kernel/debug.S
@@ -25,7 +25,7 @@
.macro addruart, rp, rv
.endm
-#if defined(CONFIG_CPU_V6)
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
.macro senduart, rd, rx
mcr p14, 0, \rd, c0, c5, 0
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 2b46fea36c9f..e8d885676807 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -16,7 +16,8 @@
*/
#include <asm/memory.h>
-#include <asm/glue.h>
+#include <asm/glue-df.h>
+#include <asm/glue-pf.h>
#include <asm/vfpmacros.h>
#include <mach/entry-macro.S>
#include <asm/thread_notify.h>
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index ae9464900168..051166c2a932 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -76,13 +76,13 @@
#ifndef CONFIG_THUMB2_KERNEL
.macro svc_exit, rpsr
msr spsr_cxsf, \rpsr
-#if defined(CONFIG_CPU_32v6K)
- clrex @ clear the exclusive monitor
- ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
-#elif defined (CONFIG_CPU_V6)
+#if defined(CONFIG_CPU_V6)
ldr r0, [sp]
strex r1, r2, [sp] @ clear the exclusive monitor
ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr
+#elif defined(CONFIG_CPU_32v6K)
+ clrex @ clear the exclusive monitor
+ ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
#else
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
#endif
@@ -92,10 +92,10 @@
ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
ldr lr, [sp, #\offset + S_PC]! @ get pc
msr spsr_cxsf, r1 @ save in spsr_svc
-#if defined(CONFIG_CPU_32v6K)
- clrex @ clear the exclusive monitor
-#elif defined (CONFIG_CPU_V6)
+#if defined(CONFIG_CPU_V6)
strex r1, r2, [sp] @ clear the exclusive monitor
+#elif defined(CONFIG_CPU_32v6K)
+ clrex @ clear the exclusive monitor
#endif
.if \fast
ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
diff --git a/arch/arm/kernel/etm.c b/arch/arm/kernel/etm.c
index 11db62806a1a..052b509e2d5f 100644
--- a/arch/arm/kernel/etm.c
+++ b/arch/arm/kernel/etm.c
@@ -338,7 +338,7 @@ static struct miscdevice etb_miscdev = {
.fops = &etb_fops,
};
-static int __init etb_probe(struct amba_device *dev, struct amba_id *id)
+static int __init etb_probe(struct amba_device *dev, const struct amba_id *id)
{
struct tracectx *t = &tracer;
int ret = 0;
@@ -530,7 +530,7 @@ static ssize_t trace_mode_store(struct kobject *kobj,
static struct kobj_attribute trace_mode_attr =
__ATTR(trace_mode, 0644, trace_mode_show, trace_mode_store);
-static int __init etm_probe(struct amba_device *dev, struct amba_id *id)
+static int __init etm_probe(struct amba_device *dev, const struct amba_id *id)
{
struct tracectx *t = &tracer;
int ret = 0;
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 8f57515bbdb0..c84b57d27d07 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -25,83 +25,6 @@
* machine ID for example).
*/
__HEAD
-__error_a:
-#ifdef CONFIG_DEBUG_LL
- mov r4, r1 @ preserve machine ID
- adr r0, str_a1
- bl printascii
- mov r0, r4
- bl printhex8
- adr r0, str_a2
- bl printascii
- adr r3, __lookup_machine_type_data
- ldmia r3, {r4, r5, r6} @ get machine desc list
- sub r4, r3, r4 @ get offset between virt&phys
- add r5, r5, r4 @ convert virt addresses to
- add r6, r6, r4 @ physical address space
-1: ldr r0, [r5, #MACHINFO_TYPE] @ get machine type
- bl printhex8
- mov r0, #'\t'
- bl printch
- ldr r0, [r5, #MACHINFO_NAME] @ get machine name
- add r0, r0, r4
- bl printascii
- mov r0, #'\n'
- bl printch
- add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc
- cmp r5, r6
- blo 1b
- adr r0, str_a3
- bl printascii
- b __error
-ENDPROC(__error_a)
-
-str_a1: .asciz "\nError: unrecognized/unsupported machine ID (r1 = 0x"
-str_a2: .asciz ").\n\nAvailable machine support:\n\nID (hex)\tNAME\n"
-str_a3: .asciz "\nPlease check your kernel config and/or bootloader.\n"
- .align
-#else
- b __error
-#endif
-
-/*
- * Lookup machine architecture in the linker-build list of architectures.
- * Note that we can't use the absolute addresses for the __arch_info
- * lists since we aren't running with the MMU on (and therefore, we are
- * not in the correct address space). We have to calculate the offset.
- *
- * r1 = machine architecture number
- * Returns:
- * r3, r4, r6 corrupted
- * r5 = mach_info pointer in physical address space
- */
-__lookup_machine_type:
- adr r3, __lookup_machine_type_data
- ldmia r3, {r4, r5, r6}
- sub r3, r3, r4 @ get offset between virt&phys
- add r5, r5, r3 @ convert virt addresses to
- add r6, r6, r3 @ physical address space
-1: ldr r3, [r5, #MACHINFO_TYPE] @ get machine type
- teq r3, r1 @ matches loader number?
- beq 2f @ found
- add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc
- cmp r5, r6
- blo 1b
- mov r5, #0 @ unknown machine
-2: mov pc, lr
-ENDPROC(__lookup_machine_type)
-
-/*
- * Look in arch/arm/kernel/arch.[ch] for information about the
- * __arch_info structures.
- */
- .align 2
- .type __lookup_machine_type_data, %object
-__lookup_machine_type_data:
- .long .
- .long __arch_info_begin
- .long __arch_info_end
- .size __lookup_machine_type_data, . - __lookup_machine_type_data
/* Determine validity of the r2 atags pointer. The heuristic requires
* that the pointer be aligned, in the first 16k of physical RAM and
@@ -109,8 +32,6 @@ __lookup_machine_type_data:
* of this function may be more lenient with the physical address and
* may also be able to move the ATAGS block if necessary.
*
- * r8 = machinfo
- *
* Returns:
* r2 either valid atags pointer, or zero
* r5, r6 corrupted
@@ -185,17 +106,6 @@ __mmap_switched_data:
.size __mmap_switched_data, . - __mmap_switched_data
/*
- * This provides a C-API version of __lookup_machine_type
- */
-ENTRY(lookup_machine_type)
- stmfd sp!, {r4 - r6, lr}
- mov r1, r0
- bl __lookup_machine_type
- mov r0, r5
- ldmfd sp!, {r4 - r6, pc}
-ENDPROC(lookup_machine_type)
-
-/*
* This provides a C-API version of __lookup_processor_type
*/
ENTRY(lookup_processor_type)
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 814ce1a73270..6b1e0ad9ec3b 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -44,9 +44,6 @@ ENTRY(stext)
bl __lookup_processor_type @ r5=procinfo r9=cpuid
movs r10, r5 @ invalid processor (r5=0)?
beq __error_p @ yes, error 'p'
- bl __lookup_machine_type @ r5=machinfo
- movs r8, r5 @ invalid machine (r5=0)?
- beq __error_a @ yes, error 'a'
adr lr, BSYM(__after_proc_init) @ return (PIC) address
ARM( add pc, r10, #PROCINFO_INITFUNC )
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index f06ff9feb0db..8f96ca067eb6 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -26,14 +26,6 @@
#include <mach/debug-macro.S>
#endif
-#if (PHYS_OFFSET & 0x001fffff)
-#error "PHYS_OFFSET must be at an even 2MiB boundary!"
-#endif
-
-#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)
-#define KERNEL_RAM_PADDR (PHYS_OFFSET + TEXT_OFFSET)
-
-
/*
* swapper_pg_dir is the virtual address of the initial page table.
* We place the page tables 16K below KERNEL_RAM_VADDR. Therefore, we must
@@ -41,6 +33,7 @@
* the least significant 16 bits to be 0x8000, but we could probably
* relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000.
*/
+#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)
#if (KERNEL_RAM_VADDR & 0xffff) != 0x8000
#error KERNEL_RAM_VADDR must start at 0xXXXX8000
#endif
@@ -48,8 +41,8 @@
.globl swapper_pg_dir
.equ swapper_pg_dir, KERNEL_RAM_VADDR - 0x4000
- .macro pgtbl, rd
- ldr \rd, =(KERNEL_RAM_PADDR - 0x4000)
+ .macro pgtbl, rd, phys
+ add \rd, \phys, #TEXT_OFFSET - 0x4000
.endm
#ifdef CONFIG_XIP_KERNEL
@@ -87,25 +80,33 @@ ENTRY(stext)
movs r10, r5 @ invalid processor (r5=0)?
THUMB( it eq ) @ force fixup-able long branch encoding
beq __error_p @ yes, error 'p'
- bl __lookup_machine_type @ r5=machinfo
- movs r8, r5 @ invalid machine (r5=0)?
- THUMB( it eq ) @ force fixup-able long branch encoding
- beq __error_a @ yes, error 'a'
+
+#ifndef CONFIG_XIP_KERNEL
+ adr r3, 2f
+ ldmia r3, {r4, r8}
+ sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET)
+ add r8, r8, r4 @ PHYS_OFFSET
+#else
+ ldr r8, =PLAT_PHYS_OFFSET
+#endif
/*
* r1 = machine no, r2 = atags,
- * r8 = machinfo, r9 = cpuid, r10 = procinfo
+ * r8 = phys_offset, r9 = cpuid, r10 = procinfo
*/
bl __vet_atags
#ifdef CONFIG_SMP_ON_UP
bl __fixup_smp
#endif
+#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
+ bl __fixup_pv_table
+#endif
bl __create_page_tables
/*
* The following calls CPU specific code in a position independent
* manner. See arch/arm/mm/proc-*.S for details. r10 = base of
- * xxx_proc_info structure selected by __lookup_machine_type
+ * xxx_proc_info structure selected by __lookup_processor_type
* above. On return, the CPU will be ready for the MMU to be
* turned on, and r0 will hold the CPU control register value.
*/
@@ -118,22 +119,24 @@ ENTRY(stext)
1: b __enable_mmu
ENDPROC(stext)
.ltorg
+#ifndef CONFIG_XIP_KERNEL
+2: .long .
+ .long PAGE_OFFSET
+#endif
/*
* Setup the initial page tables. We only setup the barest
* amount which are required to get the kernel running, which
* generally means mapping in the kernel code.
*
- * r8 = machinfo
- * r9 = cpuid
- * r10 = procinfo
+ * r8 = phys_offset, r9 = cpuid, r10 = procinfo
*
* Returns:
* r0, r3, r5-r7 corrupted
* r4 = physical page table address
*/
__create_page_tables:
- pgtbl r4 @ page table address
+ pgtbl r4, r8 @ page table address
/*
* Clear the 16K level 1 swapper page table
@@ -189,10 +192,8 @@ __create_page_tables:
/*
* Map some ram to cover our .data and .bss areas.
*/
- orr r3, r7, #(KERNEL_RAM_PADDR & 0xff000000)
- .if (KERNEL_RAM_PADDR & 0x00f00000)
- orr r3, r3, #(KERNEL_RAM_PADDR & 0x00f00000)
- .endif
+ add r3, r8, #TEXT_OFFSET
+ orr r3, r3, r7
add r0, r4, #(KERNEL_RAM_VADDR & 0xff000000) >> 18
str r3, [r0, #(KERNEL_RAM_VADDR & 0x00f00000) >> 18]!
ldr r6, =(_end - 1)
@@ -205,14 +206,17 @@ __create_page_tables:
#endif
/*
- * Then map first 1MB of ram in case it contains our boot params.
+ * Then map boot params address in r2 or
+ * the first 1MB of ram if boot params address is not specified.
*/
- add r0, r4, #PAGE_OFFSET >> 18
- orr r6, r7, #(PHYS_OFFSET & 0xff000000)
- .if (PHYS_OFFSET & 0x00f00000)
- orr r6, r6, #(PHYS_OFFSET & 0x00f00000)
- .endif
- str r6, [r0]
+ mov r0, r2, lsr #20
+ movs r0, r0, lsl #20
+ moveq r0, r8
+ sub r3, r0, r8
+ add r3, r3, #PAGE_OFFSET
+ add r3, r4, r3, lsr #18
+ orr r6, r7, r0
+ str r6, [r3]
#ifdef CONFIG_DEBUG_LL
#ifndef CONFIG_DEBUG_ICEDCC
@@ -457,4 +461,82 @@ ENTRY(fixup_smp)
ldmfd sp!, {r4 - r6, pc}
ENDPROC(fixup_smp)
+#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
+
+/* __fixup_pv_table - patch the stub instructions with the delta between
+ * PHYS_OFFSET and PAGE_OFFSET, which is assumed to be 16MiB aligned and
+ * can be expressed by an immediate shifter operand. The stub instruction
+ * has a form of '(add|sub) rd, rn, #imm'.
+ */
+ __HEAD
+__fixup_pv_table:
+ adr r0, 1f
+ ldmia r0, {r3-r5, r7}
+ sub r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET
+ add r4, r4, r3 @ adjust table start address
+ add r5, r5, r3 @ adjust table end address
+ str r8, [r7, r3]! @ save computed PHYS_OFFSET to __pv_phys_offset
+#ifndef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
+ mov r6, r3, lsr #24 @ constant for add/sub instructions
+ teq r3, r6, lsl #24 @ must be 16MiB aligned
+#else
+ mov r6, r3, lsr #16 @ constant for add/sub instructions
+ teq r3, r6, lsl #16 @ must be 64kiB aligned
+#endif
+ bne __error
+ str r6, [r7, #4] @ save to __pv_offset
+ b __fixup_a_pv_table
+ENDPROC(__fixup_pv_table)
+
+ .align
+1: .long .
+ .long __pv_table_begin
+ .long __pv_table_end
+2: .long __pv_phys_offset
+
+ .text
+__fixup_a_pv_table:
+#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
+ and r0, r6, #255 @ offset bits 23-16
+ mov r6, r6, lsr #8 @ offset bits 31-24
+#else
+ mov r0, #0 @ just in case...
+#endif
+ b 3f
+2: ldr ip, [r7, r3]
+ bic ip, ip, #0x000000ff
+ tst ip, #0x400 @ rotate shift tells us LS or MS byte
+ orrne ip, ip, r6 @ mask in offset bits 31-24
+ orreq ip, ip, r0 @ mask in offset bits 23-16
+ str ip, [r7, r3]
+3: cmp r4, r5
+ ldrcc r7, [r4], #4 @ use branch for delay slot
+ bcc 2b
+ mov pc, lr
+ENDPROC(__fixup_a_pv_table)
+
+ENTRY(fixup_pv_table)
+ stmfd sp!, {r4 - r7, lr}
+ ldr r2, 2f @ get address of __pv_phys_offset
+ mov r3, #0 @ no offset
+ mov r4, r0 @ r0 = table start
+ add r5, r0, r1 @ r1 = table size
+ ldr r6, [r2, #4] @ get __pv_offset
+ bl __fixup_a_pv_table
+ ldmfd sp!, {r4 - r7, pc}
+ENDPROC(fixup_pv_table)
+
+ .align
+2: .long __pv_phys_offset
+
+ .data
+ .globl __pv_phys_offset
+ .type __pv_phys_offset, %object
+__pv_phys_offset:
+ .long 0
+ .size __pv_phys_offset, . - __pv_phys_offset
+__pv_offset:
+ .long 0
+#endif
+
#include "head-common.S"
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index d600bd350704..44b84fe6e1b0 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -836,9 +836,11 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
/*
* One-time initialisation.
*/
-static void reset_ctrl_regs(void *unused)
+static void reset_ctrl_regs(void *info)
{
- int i;
+ int i, cpu = smp_processor_id();
+ u32 dbg_power;
+ cpumask_t *cpumask = info;
/*
* v7 debug contains save and restore registers so that debug state
@@ -850,6 +852,17 @@ static void reset_ctrl_regs(void *unused)
*/
if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) {
/*
+ * Ensure sticky power-down is clear (i.e. debug logic is
+ * powered up).
+ */
+ asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power));
+ if ((dbg_power & 0x1) == 0) {
+ pr_warning("CPU %d debug is powered down!\n", cpu);
+ cpumask_or(cpumask, cpumask, cpumask_of(cpu));
+ return;
+ }
+
+ /*
* Unconditionally clear the lock by writing a value
* other than 0xC5ACCE55 to the access register.
*/
@@ -887,6 +900,7 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = {
static int __init arch_hw_breakpoint_init(void)
{
u32 dscr;
+ cpumask_t cpumask = { CPU_BITS_NONE };
debug_arch = get_debug_arch();
@@ -911,7 +925,13 @@ static int __init arch_hw_breakpoint_init(void)
* Reset the breakpoint resources. We assume that a halting
* debugger will leave the world in a nice state for us.
*/
- on_each_cpu(reset_ctrl_regs, NULL, 1);
+ on_each_cpu(reset_ctrl_regs, &cpumask, 1);
+ if (!cpumask_empty(&cpumask)) {
+ core_num_brps = 0;
+ core_num_reserved_brps = 0;
+ core_num_wrps = 0;
+ return 0;
+ }
ARM_DBG_READ(c1, 0, dscr);
if (dscr & ARM_DSCR_HDBGEN) {
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 28536e352deb..3535d3793e65 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -179,14 +179,21 @@ int __init arch_probe_nr_irqs(void)
#ifdef CONFIG_HOTPLUG_CPU
-static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
+static bool migrate_one_irq(struct irq_data *d)
{
- pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->irq_data.node, cpu);
+ unsigned int cpu = cpumask_any_and(d->affinity, cpu_online_mask);
+ bool ret = false;
- raw_spin_lock_irq(&desc->lock);
- desc->irq_data.chip->irq_set_affinity(&desc->irq_data,
- cpumask_of(cpu), false);
- raw_spin_unlock_irq(&desc->lock);
+ if (cpu >= nr_cpu_ids) {
+ cpu = cpumask_any(cpu_online_mask);
+ ret = true;
+ }
+
+ pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", d->irq, d->node, cpu);
+
+ d->chip->irq_set_affinity(d, cpumask_of(cpu), true);
+
+ return ret;
}
/*
@@ -198,25 +205,30 @@ void migrate_irqs(void)
{
unsigned int i, cpu = smp_processor_id();
struct irq_desc *desc;
+ unsigned long flags;
+
+ local_irq_save(flags);
for_each_irq_desc(i, desc) {
struct irq_data *d = &desc->irq_data;
+ bool affinity_broken = false;
- if (d->node == cpu) {
- unsigned int newcpu = cpumask_any_and(d->affinity,
- cpu_online_mask);
- if (newcpu >= nr_cpu_ids) {
- if (printk_ratelimit())
- printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
- i, cpu);
+ raw_spin_lock(&desc->lock);
+ do {
+ if (desc->action == NULL)
+ break;
- cpumask_setall(d->affinity);
- newcpu = cpumask_any_and(d->affinity,
- cpu_online_mask);
- }
+ if (d->node != cpu)
+ break;
- route_irq(desc, i, newcpu);
- }
+ affinity_broken = migrate_one_irq(d);
+ } while (0);
+ raw_spin_unlock(&desc->lock);
+
+ if (affinity_broken && printk_ratelimit())
+ pr_warning("IRQ%u no longer affine to CPU%u\n", i, cpu);
}
+
+ local_irq_restore(flags);
}
#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c
index 2c1f0050c9c4..8f6ed43861f1 100644
--- a/arch/arm/kernel/kprobes-decode.c
+++ b/arch/arm/kernel/kprobes-decode.c
@@ -1437,7 +1437,7 @@ arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
return space_cccc_1100_010x(insn, asi);
- } else if ((insn & 0x0e000000) == 0x0c400000) {
+ } else if ((insn & 0x0e000000) == 0x0c000000) {
return space_cccc_110x(insn, asi);
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 6d4105e6872f..fee7c36349eb 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -76,6 +76,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rel); i++, rel++) {
unsigned long loc;
Elf32_Sym *sym;
+ const char *symname;
s32 offset;
#ifdef CONFIG_THUMB2_KERNEL
u32 upper, lower, sign, j1, j2;
@@ -83,18 +84,18 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
offset = ELF32_R_SYM(rel->r_info);
if (offset < 0 || offset > (symsec->sh_size / sizeof(Elf32_Sym))) {
- printk(KERN_ERR "%s: bad relocation, section %d reloc %d\n",
+ pr_err("%s: section %u reloc %u: bad relocation sym offset\n",
module->name, relindex, i);
return -ENOEXEC;
}
sym = ((Elf32_Sym *)symsec->sh_addr) + offset;
+ symname = strtab + sym->st_name;
if (rel->r_offset < 0 || rel->r_offset > dstsec->sh_size - sizeof(u32)) {
- printk(KERN_ERR "%s: out of bounds relocation, "
- "section %d reloc %d offset %d size %d\n",
- module->name, relindex, i, rel->r_offset,
- dstsec->sh_size);
+ pr_err("%s: section %u reloc %u sym '%s': out of bounds relocation, offset %d size %u\n",
+ module->name, relindex, i, symname,
+ rel->r_offset, dstsec->sh_size);
return -ENOEXEC;
}
@@ -120,10 +121,10 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
if (offset & 3 ||
offset <= (s32)0xfe000000 ||
offset >= (s32)0x02000000) {
- printk(KERN_ERR
- "%s: relocation out of range, section "
- "%d reloc %d sym '%s'\n", module->name,
- relindex, i, strtab + sym->st_name);
+ pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
+ module->name, relindex, i, symname,
+ ELF32_R_TYPE(rel->r_info), loc,
+ sym->st_value);
return -ENOEXEC;
}
@@ -196,10 +197,10 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
if (!(offset & 1) ||
offset <= (s32)0xff000000 ||
offset >= (s32)0x01000000) {
- printk(KERN_ERR
- "%s: relocation out of range, section "
- "%d reloc %d sym '%s'\n", module->name,
- relindex, i, strtab + sym->st_name);
+ pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
+ module->name, relindex, i, symname,
+ ELF32_R_TYPE(rel->r_info), loc,
+ sym->st_value);
return -ENOEXEC;
}
@@ -282,12 +283,13 @@ static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr,
return NULL;
}
+extern void fixup_pv_table(const void *, unsigned long);
extern void fixup_smp(const void *, unsigned long);
int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
struct module *mod)
{
- const Elf_Shdr * __maybe_unused s = NULL;
+ const Elf_Shdr *s = NULL;
#ifdef CONFIG_ARM_UNWIND
const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum;
@@ -332,6 +334,11 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
maps[i].txt_sec->sh_addr,
maps[i].txt_sec->sh_size);
#endif
+#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
+ s = find_mod_section(hdr, sechdrs, ".pv_table");
+ if (s)
+ fixup_pv_table((void *)s->sh_addr, s->sh_size);
+#endif
s = find_mod_section(hdr, sechdrs, ".alt.smp.init");
if (s && !is_smp())
fixup_smp((void *)s->sh_addr, s->sh_size);
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index d150ad1ccb5d..22e194eb8536 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -377,9 +377,18 @@ validate_group(struct perf_event *event)
return 0;
}
+static irqreturn_t armpmu_platform_irq(int irq, void *dev)
+{
+ struct arm_pmu_platdata *plat = dev_get_platdata(&pmu_device->dev);
+
+ return plat->handle_irq(irq, dev, armpmu->handle_irq);
+}
+
static int
armpmu_reserve_hardware(void)
{
+ struct arm_pmu_platdata *plat;
+ irq_handler_t handle_irq;
int i, err = -ENODEV, irq;
pmu_device = reserve_pmu(ARM_PMU_DEVICE_CPU);
@@ -390,6 +399,12 @@ armpmu_reserve_hardware(void)
init_pmu(ARM_PMU_DEVICE_CPU);
+ plat = dev_get_platdata(&pmu_device->dev);
+ if (plat && plat->handle_irq)
+ handle_irq = armpmu_platform_irq;
+ else
+ handle_irq = armpmu->handle_irq;
+
if (pmu_device->num_resources < 1) {
pr_err("no irqs for PMUs defined\n");
return -ENODEV;
@@ -400,7 +415,7 @@ armpmu_reserve_hardware(void)
if (irq < 0)
continue;
- err = request_irq(irq, armpmu->handle_irq,
+ err = request_irq(irq, handle_irq,
IRQF_DISABLED | IRQF_NOBALANCING,
"armpmu", NULL);
if (err) {
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index c058bfc8532b..6fc2d228db55 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -30,7 +30,7 @@
* enable the interrupt.
*/
-#ifdef CONFIG_CPU_V6
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
enum armv6_perf_types {
ARMV6_PERFCTR_ICACHE_MISS = 0x0,
ARMV6_PERFCTR_IBUF_STALL = 0x1,
@@ -669,4 +669,4 @@ static const struct arm_pmu *__init armv6mpcore_pmu_init(void)
{
return NULL;
}
-#endif /* CONFIG_CPU_V6 */
+#endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c
index b8af96ea62e6..2c79eec19262 100644
--- a/arch/arm/kernel/pmu.c
+++ b/arch/arm/kernel/pmu.c
@@ -97,28 +97,34 @@ set_irq_affinity(int irq,
irq, cpu);
return err;
#else
- return 0;
+ return -EINVAL;
#endif
}
static int
init_cpu_pmu(void)
{
- int i, err = 0;
+ int i, irqs, err = 0;
struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU];
- if (!pdev) {
- err = -ENODEV;
- goto out;
- }
+ if (!pdev)
+ return -ENODEV;
+
+ irqs = pdev->num_resources;
+
+ /*
+ * If we have a single PMU interrupt that we can't shift, assume that
+ * we're running on a uniprocessor machine and continue.
+ */
+ if (irqs == 1 && !irq_can_set_affinity(platform_get_irq(pdev, 0)))
+ return 0;
- for (i = 0; i < pdev->num_resources; ++i) {
+ for (i = 0; i < irqs; ++i) {
err = set_irq_affinity(platform_get_irq(pdev, i), i);
if (err)
break;
}
-out:
return err;
}
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 19c6816db61e..2bf27f364d09 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -26,8 +26,6 @@
#include <asm/system.h>
#include <asm/traps.h>
-#include "ptrace.h"
-
#define REG_PC 15
#define REG_PSR 16
/*
@@ -184,389 +182,12 @@ put_user_reg(struct task_struct *task, int offset, long data)
return ret;
}
-static inline int
-read_u32(struct task_struct *task, unsigned long addr, u32 *res)
-{
- int ret;
-
- ret = access_process_vm(task, addr, res, sizeof(*res), 0);
-
- return ret == sizeof(*res) ? 0 : -EIO;
-}
-
-static inline int
-read_instr(struct task_struct *task, unsigned long addr, u32 *res)
-{
- int ret;
-
- if (addr & 1) {
- u16 val;
- ret = access_process_vm(task, addr & ~1, &val, sizeof(val), 0);
- ret = ret == sizeof(val) ? 0 : -EIO;
- *res = val;
- } else {
- u32 val;
- ret = access_process_vm(task, addr & ~3, &val, sizeof(val), 0);
- ret = ret == sizeof(val) ? 0 : -EIO;
- *res = val;
- }
- return ret;
-}
-
-/*
- * Get value of register `rn' (in the instruction)
- */
-static unsigned long
-ptrace_getrn(struct task_struct *child, unsigned long insn)
-{
- unsigned int reg = (insn >> 16) & 15;
- unsigned long val;
-
- val = get_user_reg(child, reg);
- if (reg == 15)
- val += 8;
-
- return val;
-}
-
-/*
- * Get value of operand 2 (in an ALU instruction)
- */
-static unsigned long
-ptrace_getaluop2(struct task_struct *child, unsigned long insn)
-{
- unsigned long val;
- int shift;
- int type;
-
- if (insn & 1 << 25) {
- val = insn & 255;
- shift = (insn >> 8) & 15;
- type = 3;
- } else {
- val = get_user_reg (child, insn & 15);
-
- if (insn & (1 << 4))
- shift = (int)get_user_reg (child, (insn >> 8) & 15);
- else
- shift = (insn >> 7) & 31;
-
- type = (insn >> 5) & 3;
- }
-
- switch (type) {
- case 0: val <<= shift; break;
- case 1: val >>= shift; break;
- case 2:
- val = (((signed long)val) >> shift);
- break;
- case 3:
- val = (val >> shift) | (val << (32 - shift));
- break;
- }
- return val;
-}
-
-/*
- * Get value of operand 2 (in a LDR instruction)
- */
-static unsigned long
-ptrace_getldrop2(struct task_struct *child, unsigned long insn)
-{
- unsigned long val;
- int shift;
- int type;
-
- val = get_user_reg(child, insn & 15);
- shift = (insn >> 7) & 31;
- type = (insn >> 5) & 3;
-
- switch (type) {
- case 0: val <<= shift; break;
- case 1: val >>= shift; break;
- case 2:
- val = (((signed long)val) >> shift);
- break;
- case 3:
- val = (val >> shift) | (val << (32 - shift));
- break;
- }
- return val;
-}
-
-#define OP_MASK 0x01e00000
-#define OP_AND 0x00000000
-#define OP_EOR 0x00200000
-#define OP_SUB 0x00400000
-#define OP_RSB 0x00600000
-#define OP_ADD 0x00800000
-#define OP_ADC 0x00a00000
-#define OP_SBC 0x00c00000
-#define OP_RSC 0x00e00000
-#define OP_ORR 0x01800000
-#define OP_MOV 0x01a00000
-#define OP_BIC 0x01c00000
-#define OP_MVN 0x01e00000
-
-static unsigned long
-get_branch_address(struct task_struct *child, unsigned long pc, unsigned long insn)
-{
- u32 alt = 0;
-
- switch (insn & 0x0e000000) {
- case 0x00000000:
- case 0x02000000: {
- /*
- * data processing
- */
- long aluop1, aluop2, ccbit;
-
- if ((insn & 0x0fffffd0) == 0x012fff10) {
- /*
- * bx or blx
- */
- alt = get_user_reg(child, insn & 15);
- break;
- }
-
-
- if ((insn & 0xf000) != 0xf000)
- break;
-
- aluop1 = ptrace_getrn(child, insn);
- aluop2 = ptrace_getaluop2(child, insn);
- ccbit = get_user_reg(child, REG_PSR) & PSR_C_BIT ? 1 : 0;
-
- switch (insn & OP_MASK) {
- case OP_AND: alt = aluop1 & aluop2; break;
- case OP_EOR: alt = aluop1 ^ aluop2; break;
- case OP_SUB: alt = aluop1 - aluop2; break;
- case OP_RSB: alt = aluop2 - aluop1; break;
- case OP_ADD: alt = aluop1 + aluop2; break;
- case OP_ADC: alt = aluop1 + aluop2 + ccbit; break;
- case OP_SBC: alt = aluop1 - aluop2 + ccbit; break;
- case OP_RSC: alt = aluop2 - aluop1 + ccbit; break;
- case OP_ORR: alt = aluop1 | aluop2; break;
- case OP_MOV: alt = aluop2; break;
- case OP_BIC: alt = aluop1 & ~aluop2; break;
- case OP_MVN: alt = ~aluop2; break;
- }
- break;
- }
-
- case 0x04000000:
- case 0x06000000:
- /*
- * ldr
- */
- if ((insn & 0x0010f000) == 0x0010f000) {
- unsigned long base;
-
- base = ptrace_getrn(child, insn);
- if (insn & 1 << 24) {
- long aluop2;
-
- if (insn & 0x02000000)
- aluop2 = ptrace_getldrop2(child, insn);
- else
- aluop2 = insn & 0xfff;
-
- if (insn & 1 << 23)
- base += aluop2;
- else
- base -= aluop2;
- }
- read_u32(child, base, &alt);
- }
- break;
-
- case 0x08000000:
- /*
- * ldm
- */
- if ((insn & 0x00108000) == 0x00108000) {
- unsigned long base;
- unsigned int nr_regs;
-
- if (insn & (1 << 23)) {
- nr_regs = hweight16(insn & 65535) << 2;
-
- if (!(insn & (1 << 24)))
- nr_regs -= 4;
- } else {
- if (insn & (1 << 24))
- nr_regs = -4;
- else
- nr_regs = 0;
- }
-
- base = ptrace_getrn(child, insn);
-
- read_u32(child, base + nr_regs, &alt);
- break;
- }
- break;
-
- case 0x0a000000: {
- /*
- * bl or b
- */
- signed long displ;
- /* It's a branch/branch link: instead of trying to
- * figure out whether the branch will be taken or not,
- * we'll put a breakpoint at both locations. This is
- * simpler, more reliable, and probably not a whole lot
- * slower than the alternative approach of emulating the
- * branch.
- */
- displ = (insn & 0x00ffffff) << 8;
- displ = (displ >> 6) + 8;
- if (displ != 0 && displ != 4)
- alt = pc + displ;
- }
- break;
- }
-
- return alt;
-}
-
-static int
-swap_insn(struct task_struct *task, unsigned long addr,
- void *old_insn, void *new_insn, int size)
-{
- int ret;
-
- ret = access_process_vm(task, addr, old_insn, size, 0);
- if (ret == size)
- ret = access_process_vm(task, addr, new_insn, size, 1);
- return ret;
-}
-
-static void
-add_breakpoint(struct task_struct *task, struct debug_info *dbg, unsigned long addr)
-{
- int nr = dbg->nsaved;
-
- if (nr < 2) {
- u32 new_insn = BREAKINST_ARM;
- int res;
-
- res = swap_insn(task, addr, &dbg->bp[nr].insn, &new_insn, 4);
-
- if (res == 4) {
- dbg->bp[nr].address = addr;
- dbg->nsaved += 1;
- }
- } else
- printk(KERN_ERR "ptrace: too many breakpoints\n");
-}
-
-/*
- * Clear one breakpoint in the user program. We copy what the hardware
- * does and use bit 0 of the address to indicate whether this is a Thumb
- * breakpoint or an ARM breakpoint.
- */
-static void clear_breakpoint(struct task_struct *task, struct debug_entry *bp)
-{
- unsigned long addr = bp->address;
- union debug_insn old_insn;
- int ret;
-
- if (addr & 1) {
- ret = swap_insn(task, addr & ~1, &old_insn.thumb,
- &bp->insn.thumb, 2);
-
- if (ret != 2 || old_insn.thumb != BREAKINST_THUMB)
- printk(KERN_ERR "%s:%d: corrupted Thumb breakpoint at "
- "0x%08lx (0x%04x)\n", task->comm,
- task_pid_nr(task), addr, old_insn.thumb);
- } else {
- ret = swap_insn(task, addr & ~3, &old_insn.arm,
- &bp->insn.arm, 4);
-
- if (ret != 4 || old_insn.arm != BREAKINST_ARM)
- printk(KERN_ERR "%s:%d: corrupted ARM breakpoint at "
- "0x%08lx (0x%08x)\n", task->comm,
- task_pid_nr(task), addr, old_insn.arm);
- }
-}
-
-void ptrace_set_bpt(struct task_struct *child)
-{
- struct pt_regs *regs;
- unsigned long pc;
- u32 insn;
- int res;
-
- regs = task_pt_regs(child);
- pc = instruction_pointer(regs);
-
- if (thumb_mode(regs)) {
- printk(KERN_WARNING "ptrace: can't handle thumb mode\n");
- return;
- }
-
- res = read_instr(child, pc, &insn);
- if (!res) {
- struct debug_info *dbg = &child->thread.debug;
- unsigned long alt;
-
- dbg->nsaved = 0;
-
- alt = get_branch_address(child, pc, insn);
- if (alt)
- add_breakpoint(child, dbg, alt);
-
- /*
- * Note that we ignore the result of setting the above
- * breakpoint since it may fail. When it does, this is
- * not so much an error, but a forewarning that we may
- * be receiving a prefetch abort shortly.
- *
- * If we don't set this breakpoint here, then we can
- * lose control of the thread during single stepping.
- */
- if (!alt || predicate(insn) != PREDICATE_ALWAYS)
- add_breakpoint(child, dbg, pc + 4);
- }
-}
-
-/*
- * Ensure no single-step breakpoint is pending. Returns non-zero
- * value if child was being single-stepped.
- */
-void ptrace_cancel_bpt(struct task_struct *child)
-{
- int i, nsaved = child->thread.debug.nsaved;
-
- child->thread.debug.nsaved = 0;
-
- if (nsaved > 2) {
- printk("ptrace_cancel_bpt: bogus nsaved: %d!\n", nsaved);
- nsaved = 2;
- }
-
- for (i = 0; i < nsaved; i++)
- clear_breakpoint(child, &child->thread.debug.bp[i]);
-}
-
-void user_disable_single_step(struct task_struct *task)
-{
- task->ptrace &= ~PT_SINGLESTEP;
- ptrace_cancel_bpt(task);
-}
-
-void user_enable_single_step(struct task_struct *task)
-{
- task->ptrace |= PT_SINGLESTEP;
-}
-
/*
* Called by kernel/ptrace.c when detaching..
*/
void ptrace_disable(struct task_struct *child)
{
- user_disable_single_step(child);
+ /* Nothing to do. */
}
/*
@@ -576,8 +197,6 @@ void ptrace_break(struct task_struct *tsk, struct pt_regs *regs)
{
siginfo_t info;
- ptrace_cancel_bpt(tsk);
-
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_BRKPT;
@@ -996,10 +615,10 @@ static int ptrace_gethbpregs(struct task_struct *tsk, long num,
while (!(arch_ctrl.len & 0x1))
arch_ctrl.len >>= 1;
- if (idx & 0x1)
- reg = encode_ctrl_reg(arch_ctrl);
- else
+ if (num & 0x1)
reg = bp->attr.bp_addr;
+ else
+ reg = encode_ctrl_reg(arch_ctrl);
}
put:
diff --git a/arch/arm/kernel/ptrace.h b/arch/arm/kernel/ptrace.h
deleted file mode 100644
index 3926605b82ea..000000000000
--- a/arch/arm/kernel/ptrace.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * linux/arch/arm/kernel/ptrace.h
- *
- * Copyright (C) 2000-2003 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/ptrace.h>
-
-extern void ptrace_cancel_bpt(struct task_struct *);
-extern void ptrace_set_bpt(struct task_struct *);
-extern void ptrace_break(struct task_struct *, struct pt_regs *);
-
-/*
- * Send SIGTRAP if we're single-stepping
- */
-static inline void single_step_trap(struct task_struct *task)
-{
- if (task->ptrace & PT_SINGLESTEP) {
- ptrace_cancel_bpt(task);
- send_sig(SIGTRAP, task, 1);
- }
-}
-
-static inline void single_step_clear(struct task_struct *task)
-{
- if (task->ptrace & PT_SINGLESTEP)
- ptrace_cancel_bpt(task);
-}
-
-static inline void single_step_set(struct task_struct *task)
-{
- if (task->ptrace & PT_SINGLESTEP)
- ptrace_set_bpt(task);
-}
diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c
index df246da4ceca..0b13a72f855d 100644
--- a/arch/arm/kernel/return_address.c
+++ b/arch/arm/kernel/return_address.c
@@ -9,6 +9,7 @@
* the Free Software Foundation.
*/
#include <linux/module.h>
+#include <linux/ftrace.h>
#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
#include <linux/sched.h>
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 420b8d6485d6..d149539ccd68 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -226,8 +226,8 @@ int cpu_architecture(void)
* Register 0 and check for VMSAv7 or PMSAv7 */
asm("mrc p15, 0, %0, c0, c1, 4"
: "=r" (mmfr0));
- if ((mmfr0 & 0x0000000f) == 0x00000003 ||
- (mmfr0 & 0x000000f0) == 0x00000030)
+ if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
+ (mmfr0 & 0x000000f0) >= 0x00000030)
cpu_arch = CPU_ARCH_ARMv7;
else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
(mmfr0 & 0x000000f0) == 0x00000020)
@@ -308,7 +308,22 @@ static void __init cacheid_init(void)
* already provide the required functionality.
*/
extern struct proc_info_list *lookup_processor_type(unsigned int);
-extern struct machine_desc *lookup_machine_type(unsigned int);
+
+static void __init early_print(const char *str, ...)
+{
+ extern void printascii(const char *);
+ char buf[256];
+ va_list ap;
+
+ va_start(ap, str);
+ vsnprintf(buf, sizeof(buf), str, ap);
+ va_end(ap);
+
+#ifdef CONFIG_DEBUG_LL
+ printascii(buf);
+#endif
+ printk("%s", buf);
+}
static void __init feat_v6_fixup(void)
{
@@ -426,30 +441,38 @@ void cpu_init(void)
static struct machine_desc * __init setup_machine(unsigned int nr)
{
- struct machine_desc *list;
+ extern struct machine_desc __arch_info_begin[], __arch_info_end[];
+ struct machine_desc *p;
/*
* locate machine in the list of supported machines.
*/
- list = lookup_machine_type(nr);
- if (!list) {
- printk("Machine configuration botched (nr %d), unable "
- "to continue.\n", nr);
- while (1);
- }
+ for (p = __arch_info_begin; p < __arch_info_end; p++)
+ if (nr == p->nr) {
+ printk("Machine: %s\n", p->name);
+ return p;
+ }
- printk("Machine: %s\n", list->name);
+ early_print("\n"
+ "Error: unrecognized/unsupported machine ID (r1 = 0x%08x).\n\n"
+ "Available machine support:\n\nID (hex)\tNAME\n", nr);
- return list;
+ for (p = __arch_info_begin; p < __arch_info_end; p++)
+ early_print("%08x\t%s\n", p->nr, p->name);
+
+ early_print("\nPlease check your kernel config and/or bootloader.\n");
+
+ while (true)
+ /* can't use cpu_relax() here as it may require MMU setup */;
}
-static int __init arm_add_memory(unsigned long start, unsigned long size)
+static int __init arm_add_memory(phys_addr_t start, unsigned long size)
{
struct membank *bank = &meminfo.bank[meminfo.nr_banks];
if (meminfo.nr_banks >= NR_BANKS) {
printk(KERN_CRIT "NR_BANKS too low, "
- "ignoring memory at %#lx\n", start);
+ "ignoring memory at 0x%08llx\n", (long long)start);
return -EINVAL;
}
@@ -479,7 +502,8 @@ static int __init arm_add_memory(unsigned long start, unsigned long size)
static int __init early_mem(char *p)
{
static int usermem __initdata = 0;
- unsigned long size, start;
+ unsigned long size;
+ phys_addr_t start;
char *endp;
/*
@@ -703,7 +727,7 @@ static struct init_tags {
{ tag_size(tag_core), ATAG_CORE },
{ 1, PAGE_SIZE, 0xff },
{ tag_size(tag_mem32), ATAG_MEM },
- { MEM_SIZE, PHYS_OFFSET },
+ { MEM_SIZE },
{ 0, ATAG_NONE }
};
@@ -802,6 +826,8 @@ void __init setup_arch(char **cmdline_p)
struct machine_desc *mdesc;
char *from = default_command_line;
+ init_tags.mem.start = PHYS_OFFSET;
+
unwind_init();
setup_processor();
@@ -814,8 +840,25 @@ void __init setup_arch(char **cmdline_p)
if (__atags_pointer)
tags = phys_to_virt(__atags_pointer);
- else if (mdesc->boot_params)
- tags = phys_to_virt(mdesc->boot_params);
+ else if (mdesc->boot_params) {
+#ifdef CONFIG_MMU
+ /*
+ * We still are executing with a minimal MMU mapping created
+ * with the presumption that the machine default for this
+ * is located in the first MB of RAM. Anything else will
+ * fault and silently hang the kernel at this point.
+ */
+ if (mdesc->boot_params < PHYS_OFFSET ||
+ mdesc->boot_params >= PHYS_OFFSET + SZ_1M) {
+ printk(KERN_WARNING
+ "Default boot params at physical 0x%08lx out of reach\n",
+ mdesc->boot_params);
+ } else
+#endif
+ {
+ tags = phys_to_virt(mdesc->boot_params);
+ }
+ }
#if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
/*
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 907d5a620bca..cb8398317644 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -20,7 +20,6 @@
#include <asm/unistd.h>
#include <asm/vfp.h>
-#include "ptrace.h"
#include "signal.h"
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
@@ -348,8 +347,6 @@ asmlinkage int sys_sigreturn(struct pt_regs *regs)
if (restore_sigframe(regs, frame))
goto badframe;
- single_step_trap(current);
-
return regs->ARM_r0;
badframe:
@@ -383,8 +380,6 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
if (do_sigaltstack(&frame->sig.uc.uc_stack, NULL, regs->ARM_sp) == -EFAULT)
goto badframe;
- single_step_trap(current);
-
return regs->ARM_r0;
badframe:
@@ -474,7 +469,9 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka,
unsigned long handler = (unsigned long)ka->sa.sa_handler;
unsigned long retcode;
int thumb = 0;
- unsigned long cpsr = regs->ARM_cpsr & ~PSR_f;
+ unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT);
+
+ cpsr |= PSR_ENDSTATE;
/*
* Maybe we need to deliver a 32-bit signal to a 26-bit task.
@@ -704,8 +701,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
if (try_to_freeze())
goto no_signal;
- single_step_clear(current);
-
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0) {
sigset_t *oldset;
@@ -724,7 +719,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
if (test_thread_flag(TIF_RESTORE_SIGMASK))
clear_thread_flag(TIF_RESTORE_SIGMASK);
}
- single_step_set(current);
return;
}
@@ -770,7 +764,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
}
}
- single_step_set(current);
}
asmlinkage void
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
new file mode 100644
index 000000000000..bfad698a02e7
--- /dev/null
+++ b/arch/arm/kernel/sleep.S
@@ -0,0 +1,134 @@
+#include <linux/linkage.h>
+#include <linux/threads.h>
+#include <asm/asm-offsets.h>
+#include <asm/assembler.h>
+#include <asm/glue-cache.h>
+#include <asm/glue-proc.h>
+#include <asm/system.h>
+ .text
+
+/*
+ * Save CPU state for a suspend
+ * r1 = v:p offset
+ * r3 = virtual return function
+ * Note: sp is decremented to allocate space for CPU state on stack
+ * r0-r3,r9,r10,lr corrupted
+ */
+ENTRY(cpu_suspend)
+ mov r9, lr
+#ifdef MULTI_CPU
+ ldr r10, =processor
+ mov r2, sp @ current virtual SP
+ ldr r0, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state
+ ldr ip, [r10, #CPU_DO_RESUME] @ virtual resume function
+ sub sp, sp, r0 @ allocate CPU state on stack
+ mov r0, sp @ save pointer
+ add ip, ip, r1 @ convert resume fn to phys
+ stmfd sp!, {r1, r2, r3, ip} @ save v:p, virt SP, retfn, phys resume fn
+ ldr r3, =sleep_save_sp
+ add r2, sp, r1 @ convert SP to phys
+#ifdef CONFIG_SMP
+ ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
+ ALT_UP(mov lr, #0)
+ and lr, lr, #15
+ str r2, [r3, lr, lsl #2] @ save phys SP
+#else
+ str r2, [r3] @ save phys SP
+#endif
+ mov lr, pc
+ ldr pc, [r10, #CPU_DO_SUSPEND] @ save CPU state
+#else
+ mov r2, sp @ current virtual SP
+ ldr r0, =cpu_suspend_size
+ sub sp, sp, r0 @ allocate CPU state on stack
+ mov r0, sp @ save pointer
+ stmfd sp!, {r1, r2, r3} @ save v:p, virt SP, return fn
+ ldr r3, =sleep_save_sp
+ add r2, sp, r1 @ convert SP to phys
+#ifdef CONFIG_SMP
+ ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
+ ALT_UP(mov lr, #0)
+ and lr, lr, #15
+ str r2, [r3, lr, lsl #2] @ save phys SP
+#else
+ str r2, [r3] @ save phys SP
+#endif
+ bl cpu_do_suspend
+#endif
+
+ @ flush data cache
+#ifdef MULTI_CACHE
+ ldr r10, =cpu_cache
+ mov lr, r9
+ ldr pc, [r10, #CACHE_FLUSH_KERN_ALL]
+#else
+ mov lr, r9
+ b __cpuc_flush_kern_all
+#endif
+ENDPROC(cpu_suspend)
+ .ltorg
+
+/*
+ * r0 = control register value
+ * r1 = v:p offset (preserved by cpu_do_resume)
+ * r2 = phys page table base
+ * r3 = L1 section flags
+ */
+ENTRY(cpu_resume_mmu)
+ adr r4, cpu_resume_turn_mmu_on
+ mov r4, r4, lsr #20
+ orr r3, r3, r4, lsl #20
+ ldr r5, [r2, r4, lsl #2] @ save old mapping
+ str r3, [r2, r4, lsl #2] @ setup 1:1 mapping for mmu code
+ sub r2, r2, r1
+ ldr r3, =cpu_resume_after_mmu
+ bic r1, r0, #CR_C @ ensure D-cache is disabled
+ b cpu_resume_turn_mmu_on
+ENDPROC(cpu_resume_mmu)
+ .ltorg
+ .align 5
+cpu_resume_turn_mmu_on:
+ mcr p15, 0, r1, c1, c0, 0 @ turn on MMU, I-cache, etc
+ mrc p15, 0, r1, c0, c0, 0 @ read id reg
+ mov r1, r1
+ mov r1, r1
+ mov pc, r3 @ jump to virtual address
+ENDPROC(cpu_resume_turn_mmu_on)
+cpu_resume_after_mmu:
+ str r5, [r2, r4, lsl #2] @ restore old mapping
+ mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache
+ mov pc, lr
+ENDPROC(cpu_resume_after_mmu)
+
+/*
+ * Note: Yes, part of the following code is located into the .data section.
+ * This is to allow sleep_save_sp to be accessed with a relative load
+ * while we can't rely on any MMU translation. We could have put
+ * sleep_save_sp in the .text section as well, but some setups might
+ * insist on it to be truly read-only.
+ */
+ .data
+ .align
+ENTRY(cpu_resume)
+#ifdef CONFIG_SMP
+ adr r0, sleep_save_sp
+ ALT_SMP(mrc p15, 0, r1, c0, c0, 5)
+ ALT_UP(mov r1, #0)
+ and r1, r1, #15
+ ldr r0, [r0, r1, lsl #2] @ stack phys addr
+#else
+ ldr r0, sleep_save_sp @ stack phys addr
+#endif
+ msr cpsr_c, #PSR_I_BIT | PSR_F_BIT | SVC_MODE @ set SVC, irqs off
+#ifdef MULTI_CPU
+ ldmia r0!, {r1, sp, lr, pc} @ load v:p, stack, return fn, resume fn
+#else
+ ldmia r0!, {r1, sp, lr} @ load v:p, stack, return fn
+ b cpu_do_resume
+#endif
+ENDPROC(cpu_resume)
+
+sleep_save_sp:
+ .rept CONFIG_NR_CPUS
+ .long 0 @ preserve stack phys ptr here
+ .endr
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 4539ebcb089f..8fe05ad932e4 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -474,13 +474,12 @@ static void smp_timer_broadcast(const struct cpumask *mask)
#define smp_timer_broadcast NULL
#endif
-#ifndef CONFIG_LOCAL_TIMERS
static void broadcast_timer_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
}
-static void local_timer_setup(struct clock_event_device *evt)
+static void broadcast_timer_setup(struct clock_event_device *evt)
{
evt->name = "dummy_timer";
evt->features = CLOCK_EVT_FEAT_ONESHOT |
@@ -492,7 +491,6 @@ static void local_timer_setup(struct clock_event_device *evt)
clockevents_register_device(evt);
}
-#endif
void __cpuinit percpu_timer_setup(void)
{
@@ -502,7 +500,8 @@ void __cpuinit percpu_timer_setup(void)
evt->cpumask = cpumask_of(cpu);
evt->broadcast = smp_timer_broadcast;
- local_timer_setup(evt);
+ if (local_timer_setup(evt))
+ broadcast_timer_setup(evt);
}
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
index 9ab4149bd983..a1e757c3439b 100644
--- a/arch/arm/kernel/smp_scu.c
+++ b/arch/arm/kernel/smp_scu.c
@@ -50,3 +50,26 @@ void __init scu_enable(void __iomem *scu_base)
*/
flush_cache_all();
}
+
+/*
+ * Set the executing CPUs power mode as defined. This will be in
+ * preparation for it executing a WFI instruction.
+ *
+ * This function must be called with preemption disabled, and as it
+ * has the side effect of disabling coherency, caches must have been
+ * flushed. Interrupts must also have been disabled.
+ */
+int scu_power_mode(void __iomem *scu_base, unsigned int mode)
+{
+ unsigned int val;
+ int cpu = smp_processor_id();
+
+ if (mode > 3 || mode == 1 || cpu > 3)
+ return -EINVAL;
+
+ val = __raw_readb(scu_base + SCU_CPU_STATUS + cpu) & ~0x03;
+ val |= mode;
+ __raw_writeb(val, scu_base + SCU_CPU_STATUS + cpu);
+
+ return 0;
+}
diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
index 26685c2f7a49..f5cf660eefcc 100644
--- a/arch/arm/kernel/tcm.c
+++ b/arch/arm/kernel/tcm.c
@@ -15,7 +15,7 @@
#include <linux/string.h> /* memcpy */
#include <asm/cputype.h>
#include <asm/mach/map.h>
-#include <mach/memory.h>
+#include <asm/memory.h>
#include "tcm.h"
static struct gen_pool *tcm_pool;
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index 3d76bf233734..1ff46cabc7ef 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -107,9 +107,7 @@ void timer_tick(void)
{
profile_tick(CPU_PROFILING);
do_leds();
- write_seqlock(&xtime_lock);
- do_timer(1);
- write_sequnlock(&xtime_lock);
+ xtime_update(1);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
#endif
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index ee57640ba2bb..f0000e188c8c 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -23,6 +23,7 @@
#include <linux/kexec.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/sched.h>
#include <asm/atomic.h>
#include <asm/cacheflush.h>
@@ -32,7 +33,6 @@
#include <asm/unwind.h>
#include <asm/tls.h>
-#include "ptrace.h"
#include "signal.h"
static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
@@ -256,7 +256,7 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
return ret;
}
-DEFINE_SPINLOCK(die_lock);
+static DEFINE_SPINLOCK(die_lock);
/*
* This function is protected against re-entrancy.
@@ -712,17 +712,17 @@ EXPORT_SYMBOL(__readwrite_bug);
void __pte_error(const char *file, int line, pte_t pte)
{
- printk("%s:%d: bad pte %08lx.\n", file, line, pte_val(pte));
+ printk("%s:%d: bad pte %08llx.\n", file, line, (long long)pte_val(pte));
}
void __pmd_error(const char *file, int line, pmd_t pmd)
{
- printk("%s:%d: bad pmd %08lx.\n", file, line, pmd_val(pmd));
+ printk("%s:%d: bad pmd %08llx.\n", file, line, (long long)pmd_val(pmd));
}
void __pgd_error(const char *file, int line, pgd_t pgd)
{
- printk("%s:%d: bad pgd %08lx.\n", file, line, pgd_val(pgd));
+ printk("%s:%d: bad pgd %08llx.\n", file, line, (long long)pgd_val(pgd));
}
asmlinkage void __div0(void)
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 86b66f3f2031..b4348e62ef06 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -21,6 +21,12 @@
#define ARM_CPU_KEEP(x)
#endif
+#if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)
+#define ARM_EXIT_KEEP(x) x
+#else
+#define ARM_EXIT_KEEP(x)
+#endif
+
OUTPUT_ARCH(arm)
ENTRY(stext)
@@ -43,6 +49,7 @@ SECTIONS
_sinittext = .;
HEAD_TEXT
INIT_TEXT
+ ARM_EXIT_KEEP(EXIT_TEXT)
_einittext = .;
ARM_CPU_DISCARD(PROC_INFO)
__arch_info_begin = .;
@@ -57,6 +64,10 @@ SECTIONS
__smpalt_end = .;
#endif
+ __pv_table_begin = .;
+ *(.pv_table)
+ __pv_table_end = .;
+
INIT_SETUP(16)
INIT_CALLS
@@ -67,10 +78,11 @@ SECTIONS
#ifndef CONFIG_XIP_KERNEL
__init_begin = _stext;
INIT_DATA
+ ARM_EXIT_KEEP(EXIT_DATA)
#endif
}
- PERCPU(PAGE_SIZE)
+ PERCPU(32, PAGE_SIZE)
#ifndef CONFIG_XIP_KERNEL
. = ALIGN(PAGE_SIZE);
@@ -162,6 +174,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__init_begin = .;
INIT_DATA
+ ARM_EXIT_KEEP(EXIT_DATA)
. = ALIGN(PAGE_SIZE);
__init_end = .;
#endif
@@ -247,6 +260,8 @@ SECTIONS
}
#endif
+ NOTES
+
BSS_SECTION(0, 0, 0)
_end = .;
diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h
index d42252918bfb..10d868a5a481 100644
--- a/arch/arm/lib/bitops.h
+++ b/arch/arm/lib/bitops.h
@@ -1,44 +1,52 @@
-
-#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_CPU_32v6K)
+#if __LINUX_ARM_ARCH__ >= 6
.macro bitop, instr
+ ands ip, r1, #3
+ strneb r1, [ip] @ assert word-aligned
mov r2, #1
- and r3, r0, #7 @ Get bit offset
- add r1, r1, r0, lsr #3 @ Get byte offset
+ and r3, r0, #31 @ Get bit offset
+ mov r0, r0, lsr #5
+ add r1, r1, r0, lsl #2 @ Get word offset
mov r3, r2, lsl r3
-1: ldrexb r2, [r1]
+1: ldrex r2, [r1]
\instr r2, r2, r3
- strexb r0, r2, [r1]
+ strex r0, r2, [r1]
cmp r0, #0
bne 1b
- mov pc, lr
+ bx lr
.endm
.macro testop, instr, store
- and r3, r0, #7 @ Get bit offset
+ ands ip, r1, #3
+ strneb r1, [ip] @ assert word-aligned
mov r2, #1
- add r1, r1, r0, lsr #3 @ Get byte offset
+ and r3, r0, #31 @ Get bit offset
+ mov r0, r0, lsr #5
+ add r1, r1, r0, lsl #2 @ Get word offset
mov r3, r2, lsl r3 @ create mask
smp_dmb
-1: ldrexb r2, [r1]
+1: ldrex r2, [r1]
ands r0, r2, r3 @ save old value of bit
- \instr r2, r2, r3 @ toggle bit
- strexb ip, r2, [r1]
+ \instr r2, r2, r3 @ toggle bit
+ strex ip, r2, [r1]
cmp ip, #0
bne 1b
smp_dmb
cmp r0, #0
movne r0, #1
-2: mov pc, lr
+2: bx lr
.endm
#else
.macro bitop, instr
- and r2, r0, #7
+ ands ip, r1, #3
+ strneb r1, [ip] @ assert word-aligned
+ and r2, r0, #31
+ mov r0, r0, lsr #5
mov r3, #1
mov r3, r3, lsl r2
save_and_disable_irqs ip
- ldrb r2, [r1, r0, lsr #3]
+ ldr r2, [r1, r0, lsl #2]
\instr r2, r2, r3
- strb r2, [r1, r0, lsr #3]
+ str r2, [r1, r0, lsl #2]
restore_irqs ip
mov pc, lr
.endm
@@ -52,11 +60,13 @@
* to avoid dirtying the data cache.
*/
.macro testop, instr, store
- add r1, r1, r0, lsr #3
- and r3, r0, #7
- mov r0, #1
+ ands ip, r1, #3
+ strneb r1, [ip] @ assert word-aligned
+ and r3, r0, #31
+ mov r0, r0, lsr #5
save_and_disable_irqs ip
- ldrb r2, [r1]
+ ldr r2, [r1, r0, lsl #2]!
+ mov r0, #1
tst r2, r0, lsl r3
\instr r2, r2, r0, lsl r3
\store r2, [r1]
diff --git a/arch/arm/lib/changebit.S b/arch/arm/lib/changebit.S
index 80f3115cbee2..68ed5b62e839 100644
--- a/arch/arm/lib/changebit.S
+++ b/arch/arm/lib/changebit.S
@@ -12,12 +12,6 @@
#include "bitops.h"
.text
-/* Purpose : Function to change a bit
- * Prototype: int change_bit(int bit, void *addr)
- */
-ENTRY(_change_bit_be)
- eor r0, r0, #0x18 @ big endian byte ordering
-ENTRY(_change_bit_le)
+ENTRY(_change_bit)
bitop eor
-ENDPROC(_change_bit_be)
-ENDPROC(_change_bit_le)
+ENDPROC(_change_bit)
diff --git a/arch/arm/lib/clearbit.S b/arch/arm/lib/clearbit.S
index 1a63e43a1df0..4c04c3b51eeb 100644
--- a/arch/arm/lib/clearbit.S
+++ b/arch/arm/lib/clearbit.S
@@ -12,13 +12,6 @@
#include "bitops.h"
.text
-/*
- * Purpose : Function to clear a bit
- * Prototype: int clear_bit(int bit, void *addr)
- */
-ENTRY(_clear_bit_be)
- eor r0, r0, #0x18 @ big endian byte ordering
-ENTRY(_clear_bit_le)
+ENTRY(_clear_bit)
bitop bic
-ENDPROC(_clear_bit_be)
-ENDPROC(_clear_bit_le)
+ENDPROC(_clear_bit)
diff --git a/arch/arm/lib/setbit.S b/arch/arm/lib/setbit.S
index 1dd7176c4b2b..bbee5c66a23e 100644
--- a/arch/arm/lib/setbit.S
+++ b/arch/arm/lib/setbit.S
@@ -12,13 +12,6 @@
#include "bitops.h"
.text
-/*
- * Purpose : Function to set a bit
- * Prototype: int set_bit(int bit, void *addr)
- */
-ENTRY(_set_bit_be)
- eor r0, r0, #0x18 @ big endian byte ordering
-ENTRY(_set_bit_le)
+ENTRY(_set_bit)
bitop orr
-ENDPROC(_set_bit_be)
-ENDPROC(_set_bit_le)
+ENDPROC(_set_bit)
diff --git a/arch/arm/lib/testchangebit.S b/arch/arm/lib/testchangebit.S
index 5c98dc567f0f..15a4d431f229 100644
--- a/arch/arm/lib/testchangebit.S
+++ b/arch/arm/lib/testchangebit.S
@@ -12,9 +12,6 @@
#include "bitops.h"
.text
-ENTRY(_test_and_change_bit_be)
- eor r0, r0, #0x18 @ big endian byte ordering
-ENTRY(_test_and_change_bit_le)
- testop eor, strb
-ENDPROC(_test_and_change_bit_be)
-ENDPROC(_test_and_change_bit_le)
+ENTRY(_test_and_change_bit)
+ testop eor, str
+ENDPROC(_test_and_change_bit)
diff --git a/arch/arm/lib/testclearbit.S b/arch/arm/lib/testclearbit.S
index 543d7094d18e..521b66b5b95d 100644
--- a/arch/arm/lib/testclearbit.S
+++ b/arch/arm/lib/testclearbit.S
@@ -12,9 +12,6 @@
#include "bitops.h"
.text
-ENTRY(_test_and_clear_bit_be)
- eor r0, r0, #0x18 @ big endian byte ordering
-ENTRY(_test_and_clear_bit_le)
- testop bicne, strneb
-ENDPROC(_test_and_clear_bit_be)
-ENDPROC(_test_and_clear_bit_le)
+ENTRY(_test_and_clear_bit)
+ testop bicne, strne
+ENDPROC(_test_and_clear_bit)
diff --git a/arch/arm/lib/testsetbit.S b/arch/arm/lib/testsetbit.S
index 0b3f390401ce..1c98cc2185bb 100644
--- a/arch/arm/lib/testsetbit.S
+++ b/arch/arm/lib/testsetbit.S
@@ -12,9 +12,6 @@
#include "bitops.h"
.text
-ENTRY(_test_and_set_bit_be)
- eor r0, r0, #0x18 @ big endian byte ordering
-ENTRY(_test_and_set_bit_le)
- testop orreq, streqb
-ENDPROC(_test_and_set_bit_be)
-ENDPROC(_test_and_set_bit_le)
+ENTRY(_test_and_set_bit)
+ testop orreq, streq
+ENDPROC(_test_and_set_bit)
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index e2d2f2cd0c4f..8b9b13649f81 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -27,13 +27,18 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
+ pud_t *pud;
spinlock_t *ptl;
pgd = pgd_offset(current->mm, addr);
if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd)))
return 0;
- pmd = pmd_offset(pgd, addr);
+ pud = pud_offset(pgd, addr);
+ if (unlikely(pud_none(*pud) || pud_bad(*pud)))
+ return 0;
+
+ pmd = pmd_offset(pud, addr);
if (unlikely(pmd_none(*pmd) || pmd_bad(*pmd)))
return 0;
diff --git a/arch/arm/mach-aaec2000/Kconfig b/arch/arm/mach-aaec2000/Kconfig
deleted file mode 100644
index 5e4bef93754c..000000000000
--- a/arch/arm/mach-aaec2000/Kconfig
+++ /dev/null
@@ -1,11 +0,0 @@
-if ARCH_AAEC2000
-
-menu "Agilent AAEC-2000 Implementations"
-
-config MACH_AAED2000
- bool "Agilent AAED-2000 Development Platform"
- select CPU_ARM920T
-
-endmenu
-
-endif
diff --git a/arch/arm/mach-aaec2000/Makefile b/arch/arm/mach-aaec2000/Makefile
deleted file mode 100644
index 20ec83896c37..000000000000
--- a/arch/arm/mach-aaec2000/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-# Common support (must be linked before board specific support)
-obj-y += core.o
-
-# Specific board support
-obj-$(CONFIG_MACH_AAED2000) += aaed2000.o
diff --git a/arch/arm/mach-aaec2000/Makefile.boot b/arch/arm/mach-aaec2000/Makefile.boot
deleted file mode 100644
index 8f5a8b7c53c7..000000000000
--- a/arch/arm/mach-aaec2000/Makefile.boot
+++ /dev/null
@@ -1 +0,0 @@
- zreladdr-y := 0xf0008000
diff --git a/arch/arm/mach-aaec2000/aaed2000.c b/arch/arm/mach-aaec2000/aaed2000.c
deleted file mode 100644
index 0eb3e3e5b2d1..000000000000
--- a/arch/arm/mach-aaec2000/aaed2000.c
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * linux/arch/arm/mach-aaec2000/aaed2000.c
- *
- * Support for the Agilent AAED-2000 Development Platform.
- *
- * Copyright (c) 2005 Nicolas Bellido Y Ortega
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/major.h>
-#include <linux/interrupt.h>
-
-#include <asm/setup.h>
-#include <asm/memory.h>
-#include <asm/mach-types.h>
-#include <mach/hardware.h>
-#include <asm/irq.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <asm/mach/irq.h>
-
-#include <mach/aaed2000.h>
-
-#include "core.h"
-
-static void aaed2000_clcd_disable(struct clcd_fb *fb)
-{
- AAED_EXT_GPIO &= ~AAED_EGPIO_LCD_PWR_EN;
-}
-
-static void aaed2000_clcd_enable(struct clcd_fb *fb)
-{
- AAED_EXT_GPIO |= AAED_EGPIO_LCD_PWR_EN;
-}
-
-struct aaec2000_clcd_info clcd_info = {
- .enable = aaed2000_clcd_enable,
- .disable = aaed2000_clcd_disable,
- .panel = {
- .mode = {
- .name = "Sharp",
- .refresh = 60,
- .xres = 640,
- .yres = 480,
- .pixclock = 39721,
- .left_margin = 20,
- .right_margin = 44,
- .upper_margin = 21,
- .lower_margin = 34,
- .hsync_len = 96,
- .vsync_len = 2,
- .sync = 0,
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = TIM2_IVS | TIM2_IHS,
- .cntl = CNTL_LCDTFT,
- .bpp = 16,
- },
-};
-
-static void __init aaed2000_init_irq(void)
-{
- aaec2000_init_irq();
-}
-
-static void __init aaed2000_init(void)
-{
- aaec2000_set_clcd_plat_data(&clcd_info);
-}
-
-static struct map_desc aaed2000_io_desc[] __initdata = {
- {
- .virtual = EXT_GPIO_VBASE,
- .pfn = __phys_to_pfn(EXT_GPIO_PBASE),
- .length = EXT_GPIO_LENGTH,
- .type = MT_DEVICE
- },
-};
-
-static void __init aaed2000_map_io(void)
-{
- aaec2000_map_io();
- iotable_init(aaed2000_io_desc, ARRAY_SIZE(aaed2000_io_desc));
-}
-
-MACHINE_START(AAED2000, "Agilent AAED-2000 Development Platform")
- /* Maintainer: Nicolas Bellido Y Ortega */
- .map_io = aaed2000_map_io,
- .init_irq = aaed2000_init_irq,
- .timer = &aaec2000_timer,
- .init_machine = aaed2000_init,
-MACHINE_END
diff --git a/arch/arm/mach-aaec2000/core.c b/arch/arm/mach-aaec2000/core.c
deleted file mode 100644
index f8465bd17e67..000000000000
--- a/arch/arm/mach-aaec2000/core.c
+++ /dev/null
@@ -1,298 +0,0 @@
-/*
- * linux/arch/arm/mach-aaec2000/core.c
- *
- * Code common to all AAEC-2000 machines
- *
- * Copyright (c) 2005 Nicolas Bellido Y Ortega
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/list.h>
-#include <linux/errno.h>
-#include <linux/dma-mapping.h>
-#include <linux/interrupt.h>
-#include <linux/timex.h>
-#include <linux/signal.h>
-#include <linux/clk.h>
-#include <linux/gfp.h>
-
-#include <mach/hardware.h>
-#include <asm/irq.h>
-#include <asm/sizes.h>
-
-#include <asm/mach/flash.h>
-#include <asm/mach/irq.h>
-#include <asm/mach/time.h>
-#include <asm/mach/map.h>
-
-#include "core.h"
-
-/*
- * Common I/O mapping:
- *
- * Static virtual address mappings are as follow:
- *
- * 0xf8000000-0xf8001ffff: Devices connected to APB bus
- * 0xf8002000-0xf8003ffff: Devices connected to AHB bus
- *
- * Below 0xe8000000 is reserved for vm allocation.
- *
- * The machine specific code must provide the extra mapping beside the
- * default mapping provided here.
- */
-static struct map_desc standard_io_desc[] __initdata = {
- {
- .virtual = VIO_APB_BASE,
- .pfn = __phys_to_pfn(PIO_APB_BASE),
- .length = IO_APB_LENGTH,
- .type = MT_DEVICE
- }, {
- .virtual = VIO_AHB_BASE,
- .pfn = __phys_to_pfn(PIO_AHB_BASE),
- .length = IO_AHB_LENGTH,
- .type = MT_DEVICE
- }
-};
-
-void __init aaec2000_map_io(void)
-{
- iotable_init(standard_io_desc, ARRAY_SIZE(standard_io_desc));
-}
-
-/*
- * Interrupt handling routines
- */
-static void aaec2000_int_ack(struct irq_data *d)
-{
- IRQ_INTSR = 1 << d->irq;
-}
-
-static void aaec2000_int_mask(struct irq_data *d)
-{
- IRQ_INTENC |= (1 << d->irq);
-}
-
-static void aaec2000_int_unmask(struct irq_data *d)
-{
- IRQ_INTENS |= (1 << d->irq);
-}
-
-static struct irq_chip aaec2000_irq_chip = {
- .irq_ack = aaec2000_int_ack,
- .irq_mask = aaec2000_int_mask,
- .irq_unmask = aaec2000_int_unmask,
-};
-
-void __init aaec2000_init_irq(void)
-{
- unsigned int i;
-
- for (i = 0; i < NR_IRQS; i++) {
- set_irq_handler(i, handle_level_irq);
- set_irq_chip(i, &aaec2000_irq_chip);
- set_irq_flags(i, IRQF_VALID);
- }
-
- /* Disable all interrupts */
- IRQ_INTENC = 0xffffffff;
-
- /* Clear any pending interrupts */
- IRQ_INTSR = IRQ_INTSR;
-}
-
-/*
- * Time keeping
- */
-/* IRQs are disabled before entering here from do_gettimeofday() */
-static unsigned long aaec2000_gettimeoffset(void)
-{
- unsigned long ticks_to_match, elapsed, usec;
-
- /* Get ticks before next timer match */
- ticks_to_match = TIMER1_LOAD - TIMER1_VAL;
-
- /* We need elapsed ticks since last match */
- elapsed = LATCH - ticks_to_match;
-
- /* Now, convert them to usec */
- usec = (unsigned long)(elapsed * (tick_nsec / 1000))/LATCH;
-
- return usec;
-}
-
-/* We enter here with IRQs enabled */
-static irqreturn_t
-aaec2000_timer_interrupt(int irq, void *dev_id)
-{
- /* TODO: Check timer accuracy */
- timer_tick();
- TIMER1_CLEAR = 1;
-
- return IRQ_HANDLED;
-}
-
-static struct irqaction aaec2000_timer_irq = {
- .name = "AAEC-2000 Timer Tick",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
- .handler = aaec2000_timer_interrupt,
-};
-
-static void __init aaec2000_timer_init(void)
-{
- /* Disable timer 1 */
- TIMER1_CTRL = 0;
-
- /* We have somehow to generate a 100Hz clock.
- * We then use the 508KHz timer in periodic mode.
- */
- TIMER1_LOAD = LATCH;
- TIMER1_CLEAR = 1; /* Clear interrupt */
-
- setup_irq(INT_TMR1_OFL, &aaec2000_timer_irq);
-
- TIMER1_CTRL = TIMER_CTRL_ENABLE |
- TIMER_CTRL_PERIODIC |
- TIMER_CTRL_CLKSEL_508K;
-}
-
-struct sys_timer aaec2000_timer = {
- .init = aaec2000_timer_init,
- .offset = aaec2000_gettimeoffset,
-};
-
-static struct clcd_panel mach_clcd_panel;
-
-static int aaec2000_clcd_setup(struct clcd_fb *fb)
-{
- dma_addr_t dma;
-
- fb->panel = &mach_clcd_panel;
-
- fb->fb.screen_base = dma_alloc_writecombine(&fb->dev->dev, SZ_1M,
- &dma, GFP_KERNEL);
-
- if (!fb->fb.screen_base) {
- printk(KERN_ERR "CLCD: unable to map framebuffer\n");
- return -ENOMEM;
- }
-
- fb->fb.fix.smem_start = dma;
- fb->fb.fix.smem_len = SZ_1M;
-
- return 0;
-}
-
-static int aaec2000_clcd_mmap(struct clcd_fb *fb, struct vm_area_struct *vma)
-{
- return dma_mmap_writecombine(&fb->dev->dev, vma,
- fb->fb.screen_base,
- fb->fb.fix.smem_start,
- fb->fb.fix.smem_len);
-}
-
-static void aaec2000_clcd_remove(struct clcd_fb *fb)
-{
- dma_free_writecombine(&fb->dev->dev, fb->fb.fix.smem_len,
- fb->fb.screen_base, fb->fb.fix.smem_start);
-}
-
-static struct clcd_board clcd_plat_data = {
- .name = "AAEC-2000",
- .check = clcdfb_check,
- .decode = clcdfb_decode,
- .setup = aaec2000_clcd_setup,
- .mmap = aaec2000_clcd_mmap,
- .remove = aaec2000_clcd_remove,
-};
-
-static struct amba_device clcd_device = {
- .dev = {
- .init_name = "mb:16",
- .coherent_dma_mask = ~0,
- .platform_data = &clcd_plat_data,
- },
- .res = {
- .start = AAEC_CLCD_PHYS,
- .end = AAEC_CLCD_PHYS + SZ_4K - 1,
- .flags = IORESOURCE_MEM,
- },
- .irq = { INT_LCD, NO_IRQ },
- .periphid = 0x41110,
-};
-
-static struct amba_device *amba_devs[] __initdata = {
- &clcd_device,
-};
-
-void clk_disable(struct clk *clk)
-{
-}
-
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
- return 0;
-}
-
-int clk_enable(struct clk *clk)
-{
- return 0;
-}
-
-struct clk *clk_get(struct device *dev, const char *id)
-{
- return dev && strcmp(dev_name(dev), "mb:16") == 0 ? NULL : ERR_PTR(-ENOENT);
-}
-
-void clk_put(struct clk *clk)
-{
-}
-
-void __init aaec2000_set_clcd_plat_data(struct aaec2000_clcd_info *clcd)
-{
- clcd_plat_data.enable = clcd->enable;
- clcd_plat_data.disable = clcd->disable;
- memcpy(&mach_clcd_panel, &clcd->panel, sizeof(struct clcd_panel));
-}
-
-static struct flash_platform_data aaec2000_flash_data = {
- .map_name = "cfi_probe",
- .width = 4,
-};
-
-static struct resource aaec2000_flash_resource = {
- .start = AAEC_FLASH_BASE,
- .end = AAEC_FLASH_BASE + AAEC_FLASH_SIZE,
- .flags = IORESOURCE_MEM,
-};
-
-static struct platform_device aaec2000_flash_device = {
- .name = "armflash",
- .id = 0,
- .dev = {
- .platform_data = &aaec2000_flash_data,
- },
- .num_resources = 1,
- .resource = &aaec2000_flash_resource,
-};
-
-static int __init aaec2000_init(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
- struct amba_device *d = amba_devs[i];
- amba_device_register(d, &iomem_resource);
- }
-
- platform_device_register(&aaec2000_flash_device);
-
- return 0;
-};
-arch_initcall(aaec2000_init);
-
diff --git a/arch/arm/mach-aaec2000/core.h b/arch/arm/mach-aaec2000/core.h
deleted file mode 100644
index 59501b573167..000000000000
--- a/arch/arm/mach-aaec2000/core.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * linux/arch/arm/mach-aaec2000/core.h
- *
- * Copyright (c) 2005 Nicolas Bellido Y Ortega
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/amba/bus.h>
-#include <linux/amba/clcd.h>
-
-struct sys_timer;
-
-extern struct sys_timer aaec2000_timer;
-extern void __init aaec2000_map_io(void);
-extern void __init aaec2000_init_irq(void);
-
-struct aaec2000_clcd_info {
- struct clcd_panel panel;
- void (*disable)(struct clcd_fb *);
- void (*enable)(struct clcd_fb *);
-};
-
-extern void __init aaec2000_set_clcd_plat_data(struct aaec2000_clcd_info *);
-
diff --git a/arch/arm/mach-aaec2000/include/mach/aaec2000.h b/arch/arm/mach-aaec2000/include/mach/aaec2000.h
deleted file mode 100644
index bc729c42f843..000000000000
--- a/arch/arm/mach-aaec2000/include/mach/aaec2000.h
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * arch/arm/mach-aaec2000/include/mach/aaec2000.h
- *
- * AAEC-2000 registers definition
- *
- * Copyright (c) 2005 Nicolas Bellido Y Ortega
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ASM_ARCH_AAEC2000_H
-#define __ASM_ARCH_AAEC2000_H
-
-#ifndef __ASM_ARCH_HARDWARE_H
-#error You must include hardware.h not this file
-#endif /* __ASM_ARCH_HARDWARE_H */
-
-/* Chip selects */
-#define AAEC_CS0 0x00000000
-#define AAEC_CS1 0x10000000
-#define AAEC_CS2 0x20000000
-#define AAEC_CS3 0x30000000
-
-/* Flash */
-#define AAEC_FLASH_BASE AAEC_CS0
-#define AAEC_FLASH_SIZE SZ_64M
-
-/* Interrupt controller */
-#define IRQ_BASE __REG(0x80000500)
-#define IRQ_INTSR __REG(0x80000500) /* Int Status Register */
-#define IRQ_INTRSR __REG(0x80000504) /* Int Raw (unmasked) Status */
-#define IRQ_INTENS __REG(0x80000508) /* Int Enable Set */
-#define IRQ_INTENC __REG(0x8000050c) /* Int Enable Clear */
-
-/* UART 1 */
-#define UART1_BASE __REG(0x80000600)
-#define UART1_DR __REG(0x80000600) /* Data/FIFO Register */
-#define UART1_LCR __REG(0x80000604) /* Link Control Register */
-#define UART1_BRCR __REG(0x80000608) /* Baud Rate Control Register */
-#define UART1_CR __REG(0x8000060c) /* Control Register */
-#define UART1_SR __REG(0x80000610) /* Status Register */
-#define UART1_INT __REG(0x80000614) /* Interrupt Status Register */
-#define UART1_INTM __REG(0x80000618) /* Interrupt Mask Register */
-#define UART1_INTRES __REG(0x8000061c) /* Int Result (masked status) Register */
-
-/* UART 2 */
-#define UART2_BASE __REG(0x80000700)
-#define UART2_DR __REG(0x80000700) /* Data/FIFO Register */
-#define UART2_LCR __REG(0x80000704) /* Link Control Register */
-#define UART2_BRCR __REG(0x80000708) /* Baud Rate Control Register */
-#define UART2_CR __REG(0x8000070c) /* Control Register */
-#define UART2_SR __REG(0x80000710) /* Status Register */
-#define UART2_INT __REG(0x80000714) /* Interrupt Status Register */
-#define UART2_INTM __REG(0x80000718) /* Interrupt Mask Register */
-#define UART2_INTRES __REG(0x8000071c) /* Int Result (masked status) Register */
-
-/* UART 3 */
-#define UART3_BASE __REG(0x80000800)
-#define UART3_DR __REG(0x80000800) /* Data/FIFO Register */
-#define UART3_LCR __REG(0x80000804) /* Link Control Register */
-#define UART3_BRCR __REG(0x80000808) /* Baud Rate Control Register */
-#define UART3_CR __REG(0x8000080c) /* Control Register */
-#define UART3_SR __REG(0x80000810) /* Status Register */
-#define UART3_INT __REG(0x80000814) /* Interrupt Status Register */
-#define UART3_INTM __REG(0x80000818) /* Interrupt Mask Register */
-#define UART3_INTRES __REG(0x8000081c) /* Int Result (masked status) Register */
-
-/* These are used in some places */
-#define _UART1_BASE __PREG(UART1_BASE)
-#define _UART2_BASE __PREG(UART2_BASE)
-#define _UART3_BASE __PREG(UART3_BASE)
-
-/* UART Registers Offsets */
-#define UART_DR 0x00
-#define UART_LCR 0x04
-#define UART_BRCR 0x08
-#define UART_CR 0x0c
-#define UART_SR 0x10
-#define UART_INT 0x14
-#define UART_INTM 0x18
-#define UART_INTRES 0x1c
-
-/* UART_LCR Bitmask */
-#define UART_LCR_BRK (1 << 0) /* Send Break */
-#define UART_LCR_PEN (1 << 1) /* Parity Enable */
-#define UART_LCR_EP (1 << 2) /* Even/Odd Parity */
-#define UART_LCR_S2 (1 << 3) /* One/Two Stop bits */
-#define UART_LCR_FIFO (1 << 4) /* FIFO Enable */
-#define UART_LCR_WL5 (0 << 5) /* Word Length - 5 bits */
-#define UART_LCR_WL6 (1 << 5) /* Word Length - 6 bits */
-#define UART_LCR_WL7 (1 << 6) /* Word Length - 7 bits */
-#define UART_LCR_WL8 (1 << 7) /* Word Length - 8 bits */
-
-/* UART_CR Bitmask */
-#define UART_CR_EN (1 << 0) /* UART Enable */
-#define UART_CR_SIR (1 << 1) /* IrDA SIR Enable */
-#define UART_CR_SIRLP (1 << 2) /* Low Power IrDA Enable */
-#define UART_CR_RXP (1 << 3) /* Receive Pin Polarity */
-#define UART_CR_TXP (1 << 4) /* Transmit Pin Polarity */
-#define UART_CR_MXP (1 << 5) /* Modem Pin Polarity */
-#define UART_CR_LOOP (1 << 6) /* Loopback Mode */
-
-/* UART_SR Bitmask */
-#define UART_SR_CTS (1 << 0) /* Clear To Send Status */
-#define UART_SR_DSR (1 << 1) /* Data Set Ready Status */
-#define UART_SR_DCD (1 << 2) /* Data Carrier Detect Status */
-#define UART_SR_TxBSY (1 << 3) /* Transmitter Busy Status */
-#define UART_SR_RxFE (1 << 4) /* Receive FIFO Empty Status */
-#define UART_SR_TxFF (1 << 5) /* Transmit FIFO Full Status */
-#define UART_SR_RxFF (1 << 6) /* Receive FIFO Full Status */
-#define UART_SR_TxFE (1 << 7) /* Transmit FIFO Empty Status */
-
-/* UART_INT Bitmask */
-#define UART_INT_RIS (1 << 0) /* Rx Interrupt */
-#define UART_INT_TIS (1 << 1) /* Tx Interrupt */
-#define UART_INT_MIS (1 << 2) /* Modem Interrupt */
-#define UART_INT_RTIS (1 << 3) /* Receive Timeout Interrupt */
-
-/* Timer 1 */
-#define TIMER1_BASE __REG(0x80000c00)
-#define TIMER1_LOAD __REG(0x80000c00) /* Timer 1 Load Register */
-#define TIMER1_VAL __REG(0x80000c04) /* Timer 1 Value Register */
-#define TIMER1_CTRL __REG(0x80000c08) /* Timer 1 Control Register */
-#define TIMER1_CLEAR __REG(0x80000c0c) /* Timer 1 Clear Register */
-
-/* Timer 2 */
-#define TIMER2_BASE __REG(0x80000d00)
-#define TIMER2_LOAD __REG(0x80000d00) /* Timer 2 Load Register */
-#define TIMER2_VAL __REG(0x80000d04) /* Timer 2 Value Register */
-#define TIMER2_CTRL __REG(0x80000d08) /* Timer 2 Control Register */
-#define TIMER2_CLEAR __REG(0x80000d0c) /* Timer 2 Clear Register */
-
-/* Timer 3 */
-#define TIMER3_BASE __REG(0x80000e00)
-#define TIMER3_LOAD __REG(0x80000e00) /* Timer 3 Load Register */
-#define TIMER3_VAL __REG(0x80000e04) /* Timer 3 Value Register */
-#define TIMER3_CTRL __REG(0x80000e08) /* Timer 3 Control Register */
-#define TIMER3_CLEAR __REG(0x80000e0c) /* Timer 3 Clear Register */
-
-/* Timer Control register bits */
-#define TIMER_CTRL_ENABLE (1 << 7) /* Enable (Start Timer) */
-#define TIMER_CTRL_PERIODIC (1 << 6) /* Periodic Running Mode */
-#define TIMER_CTRL_FREE_RUNNING (0 << 6) /* Normal Running Mode */
-#define TIMER_CTRL_CLKSEL_508K (1 << 3) /* 508KHz Clock select (Timer 1, 2) */
-#define TIMER_CTRL_CLKSEL_2K (0 << 3) /* 2KHz Clock Select (Timer 1, 2) */
-
-/* Power and State Control */
-#define POWER_BASE __REG(0x80000400)
-#define POWER_PWRSR __REG(0x80000400) /* Power Status Register */
-#define POWER_PWRCNT __REG(0x80000404) /* Power/Clock control */
-#define POWER_HALT __REG(0x80000408) /* Power Idle Mode */
-#define POWER_STDBY __REG(0x8000040c) /* Power Standby Mode */
-#define POWER_BLEOI __REG(0x80000410) /* Battery Low End of Interrupt */
-#define POWER_MCEOI __REG(0x80000414) /* Media Changed EoI */
-#define POWER_TEOI __REG(0x80000418) /* Tick EoI */
-#define POWER_STFCLR __REG(0x8000041c) /* NbFlg, RSTFlg, PFFlg, CLDFlg Clear */
-#define POWER_CLKSET __REG(0x80000420) /* Clock Speed Control */
-
-/* GPIO Registers */
-#define AAEC_GPIO_PHYS 0x80000e00
-
-#define AAEC_GPIO_PADR __REG(AAEC_GPIO_PHYS + 0x00)
-#define AAEC_GPIO_PBDR __REG(AAEC_GPIO_PHYS + 0x04)
-#define AAEC_GPIO_PCDR __REG(AAEC_GPIO_PHYS + 0x08)
-#define AAEC_GPIO_PDDR __REG(AAEC_GPIO_PHYS + 0x0c)
-#define AAEC_GPIO_PADDR __REG(AAEC_GPIO_PHYS + 0x10)
-#define AAEC_GPIO_PBDDR __REG(AAEC_GPIO_PHYS + 0x14)
-#define AAEC_GPIO_PCDDR __REG(AAEC_GPIO_PHYS + 0x18)
-#define AAEC_GPIO_PDDDR __REG(AAEC_GPIO_PHYS + 0x1c)
-#define AAEC_GPIO_PEDR __REG(AAEC_GPIO_PHYS + 0x20)
-#define AAEC_GPIO_PEDDR __REG(AAEC_GPIO_PHYS + 0x24)
-#define AAEC_GPIO_KSCAN __REG(AAEC_GPIO_PHYS + 0x28)
-#define AAEC_GPIO_PINMUX __REG(AAEC_GPIO_PHYS + 0x2c)
-#define AAEC_GPIO_PFDR __REG(AAEC_GPIO_PHYS + 0x30)
-#define AAEC_GPIO_PFDDR __REG(AAEC_GPIO_PHYS + 0x34)
-#define AAEC_GPIO_PGDR __REG(AAEC_GPIO_PHYS + 0x38)
-#define AAEC_GPIO_PGDDR __REG(AAEC_GPIO_PHYS + 0x3c)
-#define AAEC_GPIO_PHDR __REG(AAEC_GPIO_PHYS + 0x40)
-#define AAEC_GPIO_PHDDR __REG(AAEC_GPIO_PHYS + 0x44)
-#define AAEC_GPIO_RAZ __REG(AAEC_GPIO_PHYS + 0x48)
-#define AAEC_GPIO_INTTYPE1 __REG(AAEC_GPIO_PHYS + 0x4c)
-#define AAEC_GPIO_INTTYPE2 __REG(AAEC_GPIO_PHYS + 0x50)
-#define AAEC_GPIO_FEOI __REG(AAEC_GPIO_PHYS + 0x54)
-#define AAEC_GPIO_INTEN __REG(AAEC_GPIO_PHYS + 0x58)
-#define AAEC_GPIO_INTSTATUS __REG(AAEC_GPIO_PHYS + 0x5c)
-#define AAEC_GPIO_RAWINTSTATUS __REG(AAEC_GPIO_PHYS + 0x60)
-#define AAEC_GPIO_DB __REG(AAEC_GPIO_PHYS + 0x64)
-#define AAEC_GPIO_PAPINDR __REG(AAEC_GPIO_PHYS + 0x68)
-#define AAEC_GPIO_PBPINDR __REG(AAEC_GPIO_PHYS + 0x6c)
-#define AAEC_GPIO_PCPINDR __REG(AAEC_GPIO_PHYS + 0x70)
-#define AAEC_GPIO_PDPINDR __REG(AAEC_GPIO_PHYS + 0x74)
-#define AAEC_GPIO_PEPINDR __REG(AAEC_GPIO_PHYS + 0x78)
-#define AAEC_GPIO_PFPINDR __REG(AAEC_GPIO_PHYS + 0x7c)
-#define AAEC_GPIO_PGPINDR __REG(AAEC_GPIO_PHYS + 0x80)
-#define AAEC_GPIO_PHPINDR __REG(AAEC_GPIO_PHYS + 0x84)
-
-#define AAEC_GPIO_PINMUX_PE0CON (1 << 0)
-#define AAEC_GPIO_PINMUX_PD0CON (1 << 1)
-#define AAEC_GPIO_PINMUX_CODECON (1 << 2)
-#define AAEC_GPIO_PINMUX_UART3CON (1 << 3)
-
-/* LCD Controller */
-#define AAEC_CLCD_PHYS 0x80003000
-
-#endif /* __ARM_ARCH_AAEC2000_H */
diff --git a/arch/arm/mach-aaec2000/include/mach/aaed2000.h b/arch/arm/mach-aaec2000/include/mach/aaed2000.h
deleted file mode 100644
index f821295ca71b..000000000000
--- a/arch/arm/mach-aaec2000/include/mach/aaed2000.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * arch/arm/mach-aaec2000/include/mach/aaed2000.h
- *
- * AAED-2000 specific bits definition
- *
- * Copyright (c) 2005 Nicolas Bellido Y Ortega
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ASM_ARCH_AAED2000_H
-#define __ASM_ARCH_AAED2000_H
-
-/* External GPIOs. */
-
-#define EXT_GPIO_PBASE AAEC_CS3
-#define EXT_GPIO_VBASE 0xf8100000
-#define EXT_GPIO_LENGTH 0x00001000
-
-#define __ext_gpio_p2v(x) ((x) - EXT_GPIO_PBASE + EXT_GPIO_VBASE)
-#define __ext_gpio_v2p(x) ((x) + EXT_GPIO_PBASE - EXT_GPIO_VBASE)
-
-#define __EXT_GPIO_REG(x) (*((volatile u32 *)__ext_gpio_p2v(x)))
-#define __EXT_GPIO_PREG(x) (__ext_gpio_v2p((u32)&(x)))
-
-#define AAED_EXT_GPIO __EXT_GPIO_REG(EXT_GPIO_PBASE)
-
-#define AAED_EGPIO_KBD_SCAN 0x00003fff /* Keyboard scan data */
-#define AAED_EGPIO_PWR_INT 0x00008fff /* Smart battery charger interrupt */
-#define AAED_EGPIO_SWITCHED 0x000f0000 /* DIP Switches */
-#define AAED_EGPIO_USB_VBUS 0x00400000 /* USB Vbus sense */
-#define AAED_EGPIO_LCD_PWR_EN 0x02000000 /* LCD and backlight PWR enable */
-#define AAED_EGPIO_nLED0 0x20000000 /* LED 0 */
-#define AAED_EGPIO_nLED1 0x20000000 /* LED 1 */
-#define AAED_EGPIO_nLED2 0x20000000 /* LED 2 */
-
-
-#endif /* __ARM_ARCH_AAED2000_H */
diff --git a/arch/arm/mach-aaec2000/include/mach/debug-macro.S b/arch/arm/mach-aaec2000/include/mach/debug-macro.S
deleted file mode 100644
index bc7ad5561c4c..000000000000
--- a/arch/arm/mach-aaec2000/include/mach/debug-macro.S
+++ /dev/null
@@ -1,35 +0,0 @@
-/* arch/arm/mach-aaec2000/include/mach/debug-macro.S
- *
- * Debugging macro include header
- *
- * Copyright (c) 2005 Nicolas Bellido Y Ortega
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include "hardware.h"
- .macro addruart, rp, rv
- mov \rp, 0x00000800
- orr \rv, \rp, #io_p2v(0x80000000) @ virtual
- orr \rp, \rp, #0x80000000 @ physical
- .endm
-
- .macro senduart,rd,rx
- str \rd, [\rx, #0]
- .endm
-
- .macro busyuart,rd,rx
-1002: ldr \rd, [\rx, #0x10]
- tst \rd, #(1 << 7)
- beq 1002b
- .endm
-
- .macro waituart,rd,rx
-#if 0
-1001: ldr \rd, [\rx, #0x10]
- tst \rd, #(1 << 5)
- beq 1001b
-#endif
- .endm
diff --git a/arch/arm/mach-aaec2000/include/mach/entry-macro.S b/arch/arm/mach-aaec2000/include/mach/entry-macro.S
deleted file mode 100644
index c8fb34469007..000000000000
--- a/arch/arm/mach-aaec2000/include/mach/entry-macro.S
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * arch/arm/mach-aaec2000/include/mach/entry-macro.S
- *
- * Low-level IRQ helper for aaec-2000 based platforms
- *
- * Copyright (c) 2005 Nicolas Bellido Y Ortega
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-#include <mach/irqs.h>
-
- .macro disable_fiq
- .endm
-
- .macro get_irqnr_preamble, base, tmp
- .endm
-
- .macro arch_ret_to_user, tmp1, tmp2
- .endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
- mov r4, #0xf8000000
- add r4, r4, #0x00000500
- mov \base, r4
- ldr \irqstat, [\base, #0]
- cmp \irqstat, #0
- bne 1001f
- ldr \irqnr, =NR_IRQS+1
- b 1003f
-1001: mov \irqnr, #0
-1002: ands \tmp, \irqstat, #1
- mov \irqstat, \irqstat, LSR #1
- add \irqnr, \irqnr, #1
- beq 1002b
- sub \irqnr, \irqnr, #1
-1003:
- .endm
diff --git a/arch/arm/mach-aaec2000/include/mach/hardware.h b/arch/arm/mach-aaec2000/include/mach/hardware.h
deleted file mode 100644
index 965a6f6672d6..000000000000
--- a/arch/arm/mach-aaec2000/include/mach/hardware.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * arch/arm/mach-aaec2000/include/mach/hardware.h
- *
- * Copyright (c) 2005 Nicolas Bellido Y Ortega
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ASM_ARCH_HARDWARE_H
-#define __ASM_ARCH_HARDWARE_H
-
-#include <asm/sizes.h>
-#include <mach/aaec2000.h>
-
-/* The kernel is loaded at physical address 0xf8000000.
- * We map the IO space a bit after
- */
-#define PIO_APB_BASE 0x80000000
-#define VIO_APB_BASE 0xf8000000
-#define IO_APB_LENGTH 0x2000
-#define PIO_AHB_BASE 0x80002000
-#define VIO_AHB_BASE 0xf8002000
-#define IO_AHB_LENGTH 0x2000
-
-#define VIO_BASE VIO_APB_BASE
-#define PIO_BASE PIO_APB_BASE
-
-#define io_p2v(x) ( (x) - PIO_BASE + VIO_BASE )
-#define io_v2p(x) ( (x) + PIO_BASE - VIO_BASE )
-
-#ifndef __ASSEMBLY__
-
-#include <asm/types.h>
-
-/* FIXME: Is it needed to optimize this a la pxa ?? */
-#define __REG(x) (*((volatile u32 *)io_p2v(x)))
-#define __PREG(x) (io_v2p((u32)&(x)))
-
-#else /* __ASSEMBLY__ */
-
-#define __REG(x) io_p2v(x)
-#define __PREG(x) io_v2p(x)
-
-#endif
-
-#include "aaec2000.h"
-
-#endif /* __ASM_ARCH_HARDWARE_H */
diff --git a/arch/arm/mach-aaec2000/include/mach/io.h b/arch/arm/mach-aaec2000/include/mach/io.h
deleted file mode 100644
index ab4fe5d20eaf..000000000000
--- a/arch/arm/mach-aaec2000/include/mach/io.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * arch/arm/mach-aaec2000/include/mach/io.h
- *
- * Copied from asm/arch/sa1100/io.h
- */
-#ifndef __ASM_ARM_ARCH_IO_H
-#define __ASM_ARM_ARCH_IO_H
-
-#define IO_SPACE_LIMIT 0xffffffff
-
-/*
- * We don't actually have real ISA nor PCI buses, but there is so many
- * drivers out there that might just work if we fake them...
- */
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-#endif
diff --git a/arch/arm/mach-aaec2000/include/mach/irqs.h b/arch/arm/mach-aaec2000/include/mach/irqs.h
deleted file mode 100644
index bf45c6d2f294..000000000000
--- a/arch/arm/mach-aaec2000/include/mach/irqs.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * arch/arm/mach-aaec2000/include/mach/irqs.h
- *
- * Copyright (c) 2005 Nicolas Bellido Y Ortega
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ASM_ARCH_IRQS_H
-#define __ASM_ARCH_IRQS_H
-
-
-#define INT_GPIOF0_FIQ 0 /* External GPIO Port F O Fast Interrupt Input */
-#define INT_BL_FIQ 1 /* Battery Low Fast Interrupt */
-#define INT_WE_FIQ 2 /* Watchdog Expired Fast Interrupt */
-#define INT_MV_FIQ 3 /* Media Changed Interrupt */
-#define INT_SC 4 /* Sound Codec Interrupt */
-#define INT_GPIO1 5 /* GPIO Port F Configurable Int 1 */
-#define INT_GPIO2 6 /* GPIO Port F Configurable Int 2 */
-#define INT_GPIO3 7 /* GPIO Port F Configurable Int 3 */
-#define INT_TMR1_OFL 8 /* Timer 1 Overflow Interrupt */
-#define INT_TMR2_OFL 9 /* Timer 2 Overflow Interrupt */
-#define INT_RTC_CM 10 /* RTC Compare Match Interrupt */
-#define INT_TICK 11 /* 64Hz Tick Interrupt */
-#define INT_UART1 12 /* UART1 Interrupt */
-#define INT_UART2 13 /* UART2 & Modem State Changed Interrupt */
-#define INT_LCD 14 /* LCD Interrupt */
-#define INT_SSI 15 /* SSI End of Transfer Interrupt */
-#define INT_UART3 16 /* UART3 Interrupt */
-#define INT_SCI 17 /* SCI Interrupt */
-#define INT_AAC 18 /* Advanced Audio Codec Interrupt */
-#define INT_MMC 19 /* MMC Interrupt */
-#define INT_USB 20 /* USB Interrupt */
-#define INT_DMA 21 /* DMA Interrupt */
-#define INT_TMR3_UOFL 22 /* Timer 3 Underflow Interrupt */
-#define INT_GPIO4 23 /* GPIO Port F Configurable Int 4 */
-#define INT_GPIO5 24 /* GPIO Port F Configurable Int 4 */
-#define INT_GPIO6 25 /* GPIO Port F Configurable Int 4 */
-#define INT_GPIO7 26 /* GPIO Port F Configurable Int 4 */
-#define INT_BMI 27 /* BMI Interrupt */
-
-#define NR_IRQS (INT_BMI + 1)
-
-#endif /* __ASM_ARCH_IRQS_H */
diff --git a/arch/arm/mach-aaec2000/include/mach/memory.h b/arch/arm/mach-aaec2000/include/mach/memory.h
deleted file mode 100644
index 4f93c567a35a..000000000000
--- a/arch/arm/mach-aaec2000/include/mach/memory.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * arch/arm/mach-aaec2000/include/mach/memory.h
- *
- * Copyright (c) 2005 Nicolas Bellido Y Ortega
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ASM_ARCH_MEMORY_H
-#define __ASM_ARCH_MEMORY_H
-
-
-#define PHYS_OFFSET UL(0xf0000000)
-
-#endif /* __ASM_ARCH_MEMORY_H */
diff --git a/arch/arm/mach-aaec2000/include/mach/system.h b/arch/arm/mach-aaec2000/include/mach/system.h
deleted file mode 100644
index fe08ca1add6f..000000000000
--- a/arch/arm/mach-aaec2000/include/mach/system.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * arch/arm/mach-aaed2000/include/mach/system.h
- *
- * Copyright (c) 2005 Nicolas Bellido Y Ortega
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ASM_ARCH_SYSTEM_H
-#define __ASM_ARCH_SYSTEM_H
-
-static inline void arch_idle(void)
-{
- cpu_do_idle();
-}
-
-static inline void arch_reset(char mode, const char *cmd)
-{
- cpu_reset(0);
-}
-
-#endif /* __ASM_ARCH_SYSTEM_H */
diff --git a/arch/arm/mach-aaec2000/include/mach/timex.h b/arch/arm/mach-aaec2000/include/mach/timex.h
deleted file mode 100644
index 6c8edf4a8828..000000000000
--- a/arch/arm/mach-aaec2000/include/mach/timex.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * arch/arm/mach-aaec2000/include/mach/timex.h
- *
- * AAEC-2000 Architecture timex specification
- *
- * Copyright (c) 2005 Nicolas Bellido Y Ortega
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ASM_ARCH_TIMEX_H
-#define __ASM_ARCH_TIMEX_H
-
-#define CLOCK_TICK_RATE 508000
-
-#endif /* __ASM_ARCH_TIMEX_H */
diff --git a/arch/arm/mach-aaec2000/include/mach/uncompress.h b/arch/arm/mach-aaec2000/include/mach/uncompress.h
deleted file mode 100644
index 381ecad1a1bb..000000000000
--- a/arch/arm/mach-aaec2000/include/mach/uncompress.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * arch/arm/mach-aaec2000/include/mach/uncompress.h
- *
- * Copyright (c) 2005 Nicolas Bellido Y Ortega
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ASM_ARCH_UNCOMPRESS_H
-#define __ASM_ARCH_UNCOMPRESS_H
-
-#include "hardware.h"
-
-#define UART(x) (*(volatile unsigned long *)(serial_port + (x)))
-
-static void putc(int c)
-{
- unsigned long serial_port;
- do {
- serial_port = _UART3_BASE;
- if (UART(UART_CR) & UART_CR_EN) break;
- serial_port = _UART1_BASE;
- if (UART(UART_CR) & UART_CR_EN) break;
- serial_port = _UART2_BASE;
- if (UART(UART_CR) & UART_CR_EN) break;
- return;
- } while (0);
-
- /* wait for space in the UART's transmitter */
- while ((UART(UART_SR) & UART_SR_TxFF))
- barrier();
-
- /* send the character out. */
- UART(UART_DR) = c;
-}
-
-static inline void flush(void)
-{
-}
-
-#define arch_decomp_setup()
-#define arch_decomp_wdog()
-
-#endif /* __ASM_ARCH_UNCOMPRESS_H */
diff --git a/arch/arm/mach-aaec2000/include/mach/vmalloc.h b/arch/arm/mach-aaec2000/include/mach/vmalloc.h
deleted file mode 100644
index a6299e8321bd..000000000000
--- a/arch/arm/mach-aaec2000/include/mach/vmalloc.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * arch/arm/mach-aaec2000/include/mach/vmalloc.h
- *
- * Copyright (c) 2005 Nicolas Bellido Y Ortega
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H
-
-#define VMALLOC_END 0xd0000000UL
-
-#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-at91/board-snapper9260.c b/arch/arm/mach-at91/board-snapper9260.c
index 0a99b3cedd7a..17f7d9b32142 100644
--- a/arch/arm/mach-at91/board-snapper9260.c
+++ b/arch/arm/mach-at91/board-snapper9260.c
@@ -153,6 +153,7 @@ static struct i2c_board_info __initdata snapper9260_i2c_devices[] = {
{
/* RTC */
I2C_BOARD_INFO("isl1208", 0x6f),
+ .irq = gpio_to_irq(AT91_PIN_PA31),
},
};
diff --git a/arch/arm/mach-at91/include/mach/gpio.h b/arch/arm/mach-at91/include/mach/gpio.h
index bfdd8ab26dc8..ddeb64536756 100644
--- a/arch/arm/mach-at91/include/mach/gpio.h
+++ b/arch/arm/mach-at91/include/mach/gpio.h
@@ -220,15 +220,8 @@ extern void at91_gpio_resume(void);
#define gpio_set_value __gpio_set_value
#define gpio_cansleep __gpio_cansleep
-static inline int gpio_to_irq(unsigned gpio)
-{
- return gpio;
-}
-
-static inline int irq_to_gpio(unsigned irq)
-{
- return irq;
-}
+#define gpio_to_irq(gpio) (gpio)
+#define irq_to_gpio(irq) (irq)
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm/mach-at91/include/mach/memory.h b/arch/arm/mach-at91/include/mach/memory.h
index 14f4ef4b6a9e..c2cfe5040642 100644
--- a/arch/arm/mach-at91/include/mach/memory.h
+++ b/arch/arm/mach-at91/include/mach/memory.h
@@ -23,6 +23,6 @@
#include <mach/hardware.h>
-#define PHYS_OFFSET (AT91_SDRAM_BASE)
+#define PLAT_PHYS_OFFSET (AT91_SDRAM_BASE)
#endif
diff --git a/arch/arm/mach-bcmring/include/mach/hardware.h b/arch/arm/mach-bcmring/include/mach/hardware.h
index 447eb340c611..8bf3564fba50 100644
--- a/arch/arm/mach-bcmring/include/mach/hardware.h
+++ b/arch/arm/mach-bcmring/include/mach/hardware.h
@@ -31,7 +31,7 @@
* *_SIZE is the size of the region
* *_BASE is the virtual address
*/
-#define RAM_START PHYS_OFFSET
+#define RAM_START PLAT_PHYS_OFFSET
#define RAM_SIZE (CFG_GLOBAL_RAM_SIZE-CFG_GLOBAL_RAM_SIZE_RESERVED)
#define RAM_BASE PAGE_OFFSET
diff --git a/arch/arm/mach-bcmring/include/mach/memory.h b/arch/arm/mach-bcmring/include/mach/memory.h
index 114f942bb4f3..15162e4c75f9 100644
--- a/arch/arm/mach-bcmring/include/mach/memory.h
+++ b/arch/arm/mach-bcmring/include/mach/memory.h
@@ -23,7 +23,7 @@
* files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
*/
-#define PHYS_OFFSET CFG_GLOBAL_RAM_BASE
+#define PLAT_PHYS_OFFSET CFG_GLOBAL_RAM_BASE
/*
* Maximum DMA memory allowed is 14M
diff --git a/arch/arm/mach-clps711x/include/mach/memory.h b/arch/arm/mach-clps711x/include/mach/memory.h
index f45c8e892cb5..3a032a67725c 100644
--- a/arch/arm/mach-clps711x/include/mach/memory.h
+++ b/arch/arm/mach-clps711x/include/mach/memory.h
@@ -23,7 +23,7 @@
/*
* Physical DRAM offset.
*/
-#define PHYS_OFFSET UL(0xc0000000)
+#define PLAT_PHYS_OFFSET UL(0xc0000000)
#if !defined(CONFIG_ARCH_CDB89712) && !defined (CONFIG_ARCH_AUTCPU12)
diff --git a/arch/arm/mach-clps711x/include/mach/time.h b/arch/arm/mach-clps711x/include/mach/time.h
index 8fe283ccd1f3..61fef9129c6a 100644
--- a/arch/arm/mach-clps711x/include/mach/time.h
+++ b/arch/arm/mach-clps711x/include/mach/time.h
@@ -30,7 +30,7 @@ p720t_timer_interrupt(int irq, void *dev_id)
{
struct pt_regs *regs = get_irq_regs();
do_leds();
- do_timer(1);
+ xtime_update(1);
#ifndef CONFIG_SMP
update_process_times(user_mode(regs));
#endif
diff --git a/arch/arm/mach-cns3xxx/include/mach/memory.h b/arch/arm/mach-cns3xxx/include/mach/memory.h
index 3b6b769b7a27..dc16c5c5d86b 100644
--- a/arch/arm/mach-cns3xxx/include/mach/memory.h
+++ b/arch/arm/mach-cns3xxx/include/mach/memory.h
@@ -13,7 +13,7 @@
/*
* Physical DRAM offset.
*/
-#define PHYS_OFFSET UL(0x00000000)
+#define PLAT_PHYS_OFFSET UL(0x00000000)
#define __phys_to_bus(x) ((x) + PHYS_OFFSET)
#define __bus_to_phys(x) ((x) - PHYS_OFFSET)
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index b01fb2ab944a..11f986bec89d 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -664,6 +664,13 @@ static struct snd_platform_data da850_evm_snd_data = {
.rxnumevt = 1,
};
+static const short da850_evm_mcasp_pins[] __initconst = {
+ DA850_AHCLKX, DA850_ACLKX, DA850_AFSX,
+ DA850_AHCLKR, DA850_ACLKR, DA850_AFSR, DA850_AMUTE,
+ DA850_AXR_11, DA850_AXR_12,
+ -1
+};
+
static int da850_evm_mmc_get_ro(int index)
{
return gpio_get_value(DA850_MMCSD_WP_PIN);
@@ -683,6 +690,13 @@ static struct davinci_mmc_config da850_mmc_config = {
.version = MMC_CTLR_VERSION_2,
};
+static const short da850_evm_mmcsd0_pins[] __initconst = {
+ DA850_MMCSD0_DAT_0, DA850_MMCSD0_DAT_1, DA850_MMCSD0_DAT_2,
+ DA850_MMCSD0_DAT_3, DA850_MMCSD0_CLK, DA850_MMCSD0_CMD,
+ DA850_GPIO4_0, DA850_GPIO4_1,
+ -1
+};
+
static void da850_panel_power_ctrl(int val)
{
/* lcd backlight */
@@ -1070,7 +1084,7 @@ static __init void da850_evm_init(void)
ret);
if (HAS_MMC) {
- ret = davinci_cfg_reg_list(da850_mmcsd0_pins);
+ ret = davinci_cfg_reg_list(da850_evm_mmcsd0_pins);
if (ret)
pr_warning("da850_evm_init: mmcsd0 mux setup failed:"
" %d\n", ret);
@@ -1106,7 +1120,7 @@ static __init void da850_evm_init(void)
__raw_writel(0, IO_ADDRESS(DA8XX_UART1_BASE) + 0x30);
__raw_writel(0, IO_ADDRESS(DA8XX_UART0_BASE) + 0x30);
- ret = davinci_cfg_reg_list(da850_mcasp_pins);
+ ret = davinci_cfg_reg_list(da850_evm_mcasp_pins);
if (ret)
pr_warning("da850_evm_init: mcasp mux setup failed: %d\n",
ret);
diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c
index 0bb5f0ce4fdc..0ea59327dc28 100644
--- a/arch/arm/mach-davinci/board-mityomapl138.c
+++ b/arch/arm/mach-davinci/board-mityomapl138.c
@@ -44,38 +44,109 @@ struct factory_config {
static struct factory_config factory_config;
+struct part_no_info {
+ const char *part_no; /* part number string of interest */
+ int max_freq; /* khz */
+};
+
+static struct part_no_info mityomapl138_pn_info[] = {
+ {
+ .part_no = "L138-C",
+ .max_freq = 300000,
+ },
+ {
+ .part_no = "L138-D",
+ .max_freq = 375000,
+ },
+ {
+ .part_no = "L138-F",
+ .max_freq = 456000,
+ },
+ {
+ .part_no = "1808-C",
+ .max_freq = 300000,
+ },
+ {
+ .part_no = "1808-D",
+ .max_freq = 375000,
+ },
+ {
+ .part_no = "1808-F",
+ .max_freq = 456000,
+ },
+ {
+ .part_no = "1810-D",
+ .max_freq = 375000,
+ },
+};
+
+#ifdef CONFIG_CPU_FREQ
+static void mityomapl138_cpufreq_init(const char *partnum)
+{
+ int i, ret;
+
+ for (i = 0; partnum && i < ARRAY_SIZE(mityomapl138_pn_info); i++) {
+ /*
+ * the part number has additional characters beyond what is
+ * stored in the table. This information is not needed for
+ * determining the speed grade, and would require several
+ * more table entries. Only check the first N characters
+ * for a match.
+ */
+ if (!strncmp(partnum, mityomapl138_pn_info[i].part_no,
+ strlen(mityomapl138_pn_info[i].part_no))) {
+ da850_max_speed = mityomapl138_pn_info[i].max_freq;
+ break;
+ }
+ }
+
+ ret = da850_register_cpufreq("pll0_sysclk3");
+ if (ret)
+ pr_warning("cpufreq registration failed: %d\n", ret);
+}
+#else
+static void mityomapl138_cpufreq_init(const char *partnum) { }
+#endif
+
static void read_factory_config(struct memory_accessor *a, void *context)
{
int ret;
+ const char *partnum = NULL;
struct davinci_soc_info *soc_info = &davinci_soc_info;
ret = a->read(a, (char *)&factory_config, 0, sizeof(factory_config));
if (ret != sizeof(struct factory_config)) {
pr_warning("MityOMAPL138: Read Factory Config Failed: %d\n",
ret);
- return;
+ goto bad_config;
}
if (factory_config.magic != FACTORY_CONFIG_MAGIC) {
pr_warning("MityOMAPL138: Factory Config Magic Wrong (%X)\n",
factory_config.magic);
- return;
+ goto bad_config;
}
if (factory_config.version != FACTORY_CONFIG_VERSION) {
pr_warning("MityOMAPL138: Factory Config Version Wrong (%X)\n",
factory_config.version);
- return;
+ goto bad_config;
}
pr_info("MityOMAPL138: Found MAC = %pM\n", factory_config.mac);
- pr_info("MityOMAPL138: Part Number = %s\n", factory_config.partnum);
if (is_valid_ether_addr(factory_config.mac))
memcpy(soc_info->emac_pdata->mac_addr,
factory_config.mac, ETH_ALEN);
else
pr_warning("MityOMAPL138: Invalid MAC found "
"in factory config block\n");
+
+ partnum = factory_config.partnum;
+ pr_info("MityOMAPL138: Part Number = %s\n", partnum);
+
+bad_config:
+ /* default maximum speed is valid for all platforms */
+ mityomapl138_cpufreq_init(partnum);
}
static struct at24_platform_data mityomapl138_fd_chip = {
@@ -383,10 +454,6 @@ static void __init mityomapl138_init(void)
if (ret)
pr_warning("rtc setup failed: %d\n", ret);
- ret = da850_register_cpufreq("pll0_sysclk3");
- if (ret)
- pr_warning("cpufreq registration failed: %d\n", ret);
-
ret = da8xx_register_cpuidle();
if (ret)
pr_warning("cpuidle registration failed: %d\n", ret);
diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c
index 0b8dbdb79fe0..67c38d0ecd10 100644
--- a/arch/arm/mach-davinci/board-omapl138-hawk.c
+++ b/arch/arm/mach-davinci/board-omapl138-hawk.c
@@ -19,6 +19,279 @@
#include <mach/cp_intc.h>
#include <mach/da8xx.h>
+#include <mach/mux.h>
+
+#define HAWKBOARD_PHY_ID "0:07"
+#define DA850_HAWK_MMCSD_CD_PIN GPIO_TO_PIN(3, 12)
+#define DA850_HAWK_MMCSD_WP_PIN GPIO_TO_PIN(3, 13)
+
+#define DA850_USB1_VBUS_PIN GPIO_TO_PIN(2, 4)
+#define DA850_USB1_OC_PIN GPIO_TO_PIN(6, 13)
+
+static short omapl138_hawk_mii_pins[] __initdata = {
+ DA850_MII_TXEN, DA850_MII_TXCLK, DA850_MII_COL, DA850_MII_TXD_3,
+ DA850_MII_TXD_2, DA850_MII_TXD_1, DA850_MII_TXD_0, DA850_MII_RXER,
+ DA850_MII_CRS, DA850_MII_RXCLK, DA850_MII_RXDV, DA850_MII_RXD_3,
+ DA850_MII_RXD_2, DA850_MII_RXD_1, DA850_MII_RXD_0, DA850_MDIO_CLK,
+ DA850_MDIO_D,
+ -1
+};
+
+static __init void omapl138_hawk_config_emac(void)
+{
+ void __iomem *cfgchip3 = DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG);
+ int ret;
+ u32 val;
+ struct davinci_soc_info *soc_info = &davinci_soc_info;
+
+ val = __raw_readl(cfgchip3);
+ val &= ~BIT(8);
+ ret = davinci_cfg_reg_list(omapl138_hawk_mii_pins);
+ if (ret) {
+ pr_warning("%s: cpgmac/mii mux setup failed: %d\n",
+ __func__, ret);
+ return;
+ }
+
+ /* configure the CFGCHIP3 register for MII */
+ __raw_writel(val, cfgchip3);
+ pr_info("EMAC: MII PHY configured\n");
+
+ soc_info->emac_pdata->phy_id = HAWKBOARD_PHY_ID;
+
+ ret = da8xx_register_emac();
+ if (ret)
+ pr_warning("%s: emac registration failed: %d\n",
+ __func__, ret);
+}
+
+/*
+ * The following EDMA channels/slots are not being used by drivers (for
+ * example: Timer, GPIO, UART events etc) on da850/omap-l138 EVM/Hawkboard,
+ * hence they are being reserved for codecs on the DSP side.
+ */
+static const s16 da850_dma0_rsv_chans[][2] = {
+ /* (offset, number) */
+ { 8, 6},
+ {24, 4},
+ {30, 2},
+ {-1, -1}
+};
+
+static const s16 da850_dma0_rsv_slots[][2] = {
+ /* (offset, number) */
+ { 8, 6},
+ {24, 4},
+ {30, 50},
+ {-1, -1}
+};
+
+static const s16 da850_dma1_rsv_chans[][2] = {
+ /* (offset, number) */
+ { 0, 28},
+ {30, 2},
+ {-1, -1}
+};
+
+static const s16 da850_dma1_rsv_slots[][2] = {
+ /* (offset, number) */
+ { 0, 28},
+ {30, 90},
+ {-1, -1}
+};
+
+static struct edma_rsv_info da850_edma_cc0_rsv = {
+ .rsv_chans = da850_dma0_rsv_chans,
+ .rsv_slots = da850_dma0_rsv_slots,
+};
+
+static struct edma_rsv_info da850_edma_cc1_rsv = {
+ .rsv_chans = da850_dma1_rsv_chans,
+ .rsv_slots = da850_dma1_rsv_slots,
+};
+
+static struct edma_rsv_info *da850_edma_rsv[2] = {
+ &da850_edma_cc0_rsv,
+ &da850_edma_cc1_rsv,
+};
+
+static const short hawk_mmcsd0_pins[] = {
+ DA850_MMCSD0_DAT_0, DA850_MMCSD0_DAT_1, DA850_MMCSD0_DAT_2,
+ DA850_MMCSD0_DAT_3, DA850_MMCSD0_CLK, DA850_MMCSD0_CMD,
+ DA850_GPIO3_12, DA850_GPIO3_13,
+ -1
+};
+
+static int da850_hawk_mmc_get_ro(int index)
+{
+ return gpio_get_value(DA850_HAWK_MMCSD_WP_PIN);
+}
+
+static int da850_hawk_mmc_get_cd(int index)
+{
+ return !gpio_get_value(DA850_HAWK_MMCSD_CD_PIN);
+}
+
+static struct davinci_mmc_config da850_mmc_config = {
+ .get_ro = da850_hawk_mmc_get_ro,
+ .get_cd = da850_hawk_mmc_get_cd,
+ .wires = 4,
+ .max_freq = 50000000,
+ .caps = MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED,
+ .version = MMC_CTLR_VERSION_2,
+};
+
+static __init void omapl138_hawk_mmc_init(void)
+{
+ int ret;
+
+ ret = davinci_cfg_reg_list(hawk_mmcsd0_pins);
+ if (ret) {
+ pr_warning("%s: MMC/SD0 mux setup failed: %d\n",
+ __func__, ret);
+ return;
+ }
+
+ ret = gpio_request_one(DA850_HAWK_MMCSD_CD_PIN,
+ GPIOF_DIR_IN, "MMC CD");
+ if (ret < 0) {
+ pr_warning("%s: can not open GPIO %d\n",
+ __func__, DA850_HAWK_MMCSD_CD_PIN);
+ return;
+ }
+
+ ret = gpio_request_one(DA850_HAWK_MMCSD_WP_PIN,
+ GPIOF_DIR_IN, "MMC WP");
+ if (ret < 0) {
+ pr_warning("%s: can not open GPIO %d\n",
+ __func__, DA850_HAWK_MMCSD_WP_PIN);
+ goto mmc_setup_wp_fail;
+ }
+
+ ret = da8xx_register_mmcsd0(&da850_mmc_config);
+ if (ret) {
+ pr_warning("%s: MMC/SD0 registration failed: %d\n",
+ __func__, ret);
+ goto mmc_setup_mmcsd_fail;
+ }
+
+ return;
+
+mmc_setup_mmcsd_fail:
+ gpio_free(DA850_HAWK_MMCSD_WP_PIN);
+mmc_setup_wp_fail:
+ gpio_free(DA850_HAWK_MMCSD_CD_PIN);
+}
+
+static irqreturn_t omapl138_hawk_usb_ocic_irq(int irq, void *dev_id);
+static da8xx_ocic_handler_t hawk_usb_ocic_handler;
+
+static const short da850_hawk_usb11_pins[] = {
+ DA850_GPIO2_4, DA850_GPIO6_13,
+ -1
+};
+
+static int hawk_usb_set_power(unsigned port, int on)
+{
+ gpio_set_value(DA850_USB1_VBUS_PIN, on);
+ return 0;
+}
+
+static int hawk_usb_get_power(unsigned port)
+{
+ return gpio_get_value(DA850_USB1_VBUS_PIN);
+}
+
+static int hawk_usb_get_oci(unsigned port)
+{
+ return !gpio_get_value(DA850_USB1_OC_PIN);
+}
+
+static int hawk_usb_ocic_notify(da8xx_ocic_handler_t handler)
+{
+ int irq = gpio_to_irq(DA850_USB1_OC_PIN);
+ int error = 0;
+
+ if (handler != NULL) {
+ hawk_usb_ocic_handler = handler;
+
+ error = request_irq(irq, omapl138_hawk_usb_ocic_irq,
+ IRQF_DISABLED | IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING,
+ "OHCI over-current indicator", NULL);
+ if (error)
+ pr_err("%s: could not request IRQ to watch "
+ "over-current indicator changes\n", __func__);
+ } else {
+ free_irq(irq, NULL);
+ }
+ return error;
+}
+
+static struct da8xx_ohci_root_hub omapl138_hawk_usb11_pdata = {
+ .set_power = hawk_usb_set_power,
+ .get_power = hawk_usb_get_power,
+ .get_oci = hawk_usb_get_oci,
+ .ocic_notify = hawk_usb_ocic_notify,
+ /* TPS2087 switch @ 5V */
+ .potpgt = (3 + 1) / 2, /* 3 ms max */
+};
+
+static irqreturn_t omapl138_hawk_usb_ocic_irq(int irq, void *dev_id)
+{
+ hawk_usb_ocic_handler(&omapl138_hawk_usb11_pdata, 1);
+ return IRQ_HANDLED;
+}
+
+static __init void omapl138_hawk_usb_init(void)
+{
+ int ret;
+ u32 cfgchip2;
+
+ ret = davinci_cfg_reg_list(da850_hawk_usb11_pins);
+ if (ret) {
+ pr_warning("%s: USB 1.1 PinMux setup failed: %d\n",
+ __func__, ret);
+ return;
+ }
+
+ /* Setup the Ref. clock frequency for the HAWK at 24 MHz. */
+
+ cfgchip2 = __raw_readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));
+ cfgchip2 &= ~CFGCHIP2_REFFREQ;
+ cfgchip2 |= CFGCHIP2_REFFREQ_24MHZ;
+ __raw_writel(cfgchip2, DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));
+
+ ret = gpio_request_one(DA850_USB1_VBUS_PIN,
+ GPIOF_DIR_OUT, "USB1 VBUS");
+ if (ret < 0) {
+ pr_err("%s: failed to request GPIO for USB 1.1 port "
+ "power control: %d\n", __func__, ret);
+ return;
+ }
+
+ ret = gpio_request_one(DA850_USB1_OC_PIN,
+ GPIOF_DIR_IN, "USB1 OC");
+ if (ret < 0) {
+ pr_err("%s: failed to request GPIO for USB 1.1 port "
+ "over-current indicator: %d\n", __func__, ret);
+ goto usb11_setup_oc_fail;
+ }
+
+ ret = da8xx_register_usb11(&omapl138_hawk_usb11_pdata);
+ if (ret) {
+ pr_warning("%s: USB 1.1 registration failed: %d\n",
+ __func__, ret);
+ goto usb11_setup_fail;
+ }
+
+ return;
+
+usb11_setup_fail:
+ gpio_free(DA850_USB1_OC_PIN);
+usb11_setup_oc_fail:
+ gpio_free(DA850_USB1_VBUS_PIN);
+}
static struct davinci_uart_config omapl138_hawk_uart_config __initdata = {
.enabled_uarts = 0x7,
@@ -30,6 +303,17 @@ static __init void omapl138_hawk_init(void)
davinci_serial_init(&omapl138_hawk_uart_config);
+ omapl138_hawk_config_emac();
+
+ ret = da850_register_edma(da850_edma_rsv);
+ if (ret)
+ pr_warning("%s: EDMA registration failed: %d\n",
+ __func__, ret);
+
+ omapl138_hawk_mmc_init();
+
+ omapl138_hawk_usb_init();
+
ret = da8xx_register_watchdog();
if (ret)
pr_warning("omapl138_hawk_init: "
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index 78b5ae29ae40..3443d9725265 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -345,6 +345,20 @@ static struct clk aemif_clk = {
.flags = ALWAYS_ENABLED,
};
+static struct clk usb11_clk = {
+ .name = "usb11",
+ .parent = &pll0_sysclk4,
+ .lpsc = DA8XX_LPSC1_USB11,
+ .gpsc = 1,
+};
+
+static struct clk usb20_clk = {
+ .name = "usb20",
+ .parent = &pll0_sysclk2,
+ .lpsc = DA8XX_LPSC1_USB20,
+ .gpsc = 1,
+};
+
static struct clk_lookup da850_clks[] = {
CLK(NULL, "ref", &ref_clk),
CLK(NULL, "pll0", &pll0_clk),
@@ -387,6 +401,8 @@ static struct clk_lookup da850_clks[] = {
CLK("davinci_mmc.0", NULL, &mmcsd0_clk),
CLK("davinci_mmc.1", NULL, &mmcsd1_clk),
CLK(NULL, "aemif", &aemif_clk),
+ CLK(NULL, "usb11", &usb11_clk),
+ CLK(NULL, "usb20", &usb20_clk),
CLK(NULL, NULL, NULL),
};
@@ -543,30 +559,19 @@ static const struct mux_config da850_pins[] = {
MUX_CFG(DA850, EMA_WAIT_1, 6, 24, 15, 1, false)
MUX_CFG(DA850, NEMA_CS_2, 7, 0, 15, 1, false)
/* GPIO function */
+ MUX_CFG(DA850, GPIO2_4, 6, 12, 15, 8, false)
MUX_CFG(DA850, GPIO2_6, 6, 4, 15, 8, false)
MUX_CFG(DA850, GPIO2_8, 5, 28, 15, 8, false)
MUX_CFG(DA850, GPIO2_15, 5, 0, 15, 8, false)
+ MUX_CFG(DA850, GPIO3_12, 7, 12, 15, 8, false)
+ MUX_CFG(DA850, GPIO3_13, 7, 8, 15, 8, false)
MUX_CFG(DA850, GPIO4_0, 10, 28, 15, 8, false)
MUX_CFG(DA850, GPIO4_1, 10, 24, 15, 8, false)
+ MUX_CFG(DA850, GPIO6_13, 13, 8, 15, 8, false)
MUX_CFG(DA850, RTC_ALARM, 0, 28, 15, 2, false)
#endif
};
-const short da850_uart0_pins[] __initdata = {
- DA850_NUART0_CTS, DA850_NUART0_RTS, DA850_UART0_RXD, DA850_UART0_TXD,
- -1
-};
-
-const short da850_uart1_pins[] __initdata = {
- DA850_UART1_RXD, DA850_UART1_TXD,
- -1
-};
-
-const short da850_uart2_pins[] __initdata = {
- DA850_UART2_RXD, DA850_UART2_TXD,
- -1
-};
-
const short da850_i2c0_pins[] __initdata = {
DA850_I2C0_SDA, DA850_I2C0_SCL,
-1
@@ -577,24 +582,6 @@ const short da850_i2c1_pins[] __initdata = {
-1
};
-const short da850_cpgmac_pins[] __initdata = {
- DA850_MII_TXEN, DA850_MII_TXCLK, DA850_MII_COL, DA850_MII_TXD_3,
- DA850_MII_TXD_2, DA850_MII_TXD_1, DA850_MII_TXD_0, DA850_MII_RXER,
- DA850_MII_CRS, DA850_MII_RXCLK, DA850_MII_RXDV, DA850_MII_RXD_3,
- DA850_MII_RXD_2, DA850_MII_RXD_1, DA850_MII_RXD_0, DA850_MDIO_CLK,
- DA850_MDIO_D, DA850_RMII_TXD_0, DA850_RMII_TXD_1, DA850_RMII_TXEN,
- DA850_RMII_CRS_DV, DA850_RMII_RXD_0, DA850_RMII_RXD_1, DA850_RMII_RXER,
- DA850_RMII_MHZ_50_CLK,
- -1
-};
-
-const short da850_mcasp_pins[] __initdata = {
- DA850_AHCLKX, DA850_ACLKX, DA850_AFSX,
- DA850_AHCLKR, DA850_ACLKR, DA850_AFSR, DA850_AMUTE,
- DA850_AXR_11, DA850_AXR_12,
- -1
-};
-
const short da850_lcdcntl_pins[] __initdata = {
DA850_LCD_D_0, DA850_LCD_D_1, DA850_LCD_D_2, DA850_LCD_D_3,
DA850_LCD_D_4, DA850_LCD_D_5, DA850_LCD_D_6, DA850_LCD_D_7,
@@ -604,29 +591,6 @@ const short da850_lcdcntl_pins[] __initdata = {
-1
};
-const short da850_mmcsd0_pins[] __initdata = {
- DA850_MMCSD0_DAT_0, DA850_MMCSD0_DAT_1, DA850_MMCSD0_DAT_2,
- DA850_MMCSD0_DAT_3, DA850_MMCSD0_CLK, DA850_MMCSD0_CMD,
- DA850_GPIO4_0, DA850_GPIO4_1,
- -1
-};
-
-const short da850_emif25_pins[] __initdata = {
- DA850_EMA_BA_1, DA850_EMA_CLK, DA850_EMA_WAIT_1, DA850_NEMA_CS_2,
- DA850_NEMA_CS_3, DA850_NEMA_CS_4, DA850_NEMA_WE, DA850_NEMA_OE,
- DA850_EMA_D_0, DA850_EMA_D_1, DA850_EMA_D_2, DA850_EMA_D_3,
- DA850_EMA_D_4, DA850_EMA_D_5, DA850_EMA_D_6, DA850_EMA_D_7,
- DA850_EMA_D_8, DA850_EMA_D_9, DA850_EMA_D_10, DA850_EMA_D_11,
- DA850_EMA_D_12, DA850_EMA_D_13, DA850_EMA_D_14, DA850_EMA_D_15,
- DA850_EMA_A_0, DA850_EMA_A_1, DA850_EMA_A_2, DA850_EMA_A_3,
- DA850_EMA_A_4, DA850_EMA_A_5, DA850_EMA_A_6, DA850_EMA_A_7,
- DA850_EMA_A_8, DA850_EMA_A_9, DA850_EMA_A_10, DA850_EMA_A_11,
- DA850_EMA_A_12, DA850_EMA_A_13, DA850_EMA_A_14, DA850_EMA_A_15,
- DA850_EMA_A_16, DA850_EMA_A_17, DA850_EMA_A_18, DA850_EMA_A_19,
- DA850_EMA_A_20, DA850_EMA_A_21, DA850_EMA_A_22, DA850_EMA_A_23,
- -1
-};
-
/* FIQ are pri 0-1; otherwise 2-7, with 7 lowest priority */
static u8 da850_default_priorities[DA850_N_CP_INTC_IRQ] = {
[IRQ_DA8XX_COMMTX] = 7,
@@ -764,6 +728,13 @@ static struct davinci_id da850_ids[] = {
.cpu_id = DAVINCI_CPU_ID_DA850,
.name = "da850/omap-l138",
},
+ {
+ .variant = 0x1,
+ .part_no = 0xb7d1,
+ .manufacturer = 0x017, /* 0x02f >> 1 */
+ .cpu_id = DAVINCI_CPU_ID_DA850,
+ .name = "da850/omap-l138/am18x",
+ },
};
static struct davinci_timer_instance da850_timer_instance[4] = {
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index a5f8a80c1f28..76364d1345df 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -403,16 +403,13 @@ static struct resource dm355_spi0_resources[] = {
.start = 16,
.flags = IORESOURCE_DMA,
},
- {
- .start = EVENTQ_1,
- .flags = IORESOURCE_DMA,
- },
};
static struct davinci_spi_platform_data dm355_spi0_pdata = {
.version = SPI_VERSION_1,
.num_chipselect = 2,
.cshold_bug = true,
+ .dma_event_q = EVENTQ_1,
};
static struct platform_device dm355_spi0_device = {
.name = "spi_davinci",
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index 02d2cc380df7..4604e72d7d99 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -625,6 +625,7 @@ static u64 dm365_spi0_dma_mask = DMA_BIT_MASK(32);
static struct davinci_spi_platform_data dm365_spi0_pdata = {
.version = SPI_VERSION_1,
.num_chipselect = 2,
+ .dma_event_q = EVENTQ_3,
};
static struct resource dm365_spi0_resources[] = {
@@ -645,10 +646,6 @@ static struct resource dm365_spi0_resources[] = {
.start = 16,
.flags = IORESOURCE_DMA,
},
- {
- .start = EVENTQ_3,
- .flags = IORESOURCE_DMA,
- },
};
static struct platform_device dm365_spi0_device = {
diff --git a/arch/arm/mach-davinci/include/mach/da8xx.h b/arch/arm/mach-davinci/include/mach/da8xx.h
index e7f952066527..cfcb2232e6b1 100644
--- a/arch/arm/mach-davinci/include/mach/da8xx.h
+++ b/arch/arm/mach-davinci/include/mach/da8xx.h
@@ -123,15 +123,8 @@ extern const short da830_ecap2_pins[];
extern const short da830_eqep0_pins[];
extern const short da830_eqep1_pins[];
-extern const short da850_uart0_pins[];
-extern const short da850_uart1_pins[];
-extern const short da850_uart2_pins[];
extern const short da850_i2c0_pins[];
extern const short da850_i2c1_pins[];
-extern const short da850_cpgmac_pins[];
-extern const short da850_mcasp_pins[];
extern const short da850_lcdcntl_pins[];
-extern const short da850_mmcsd0_pins[];
-extern const short da850_emif25_pins[];
#endif /* __ASM_ARCH_DAVINCI_DA8XX_H */
diff --git a/arch/arm/mach-davinci/include/mach/memory.h b/arch/arm/mach-davinci/include/mach/memory.h
index 22eb97c1c30b..78822723f382 100644
--- a/arch/arm/mach-davinci/include/mach/memory.h
+++ b/arch/arm/mach-davinci/include/mach/memory.h
@@ -26,9 +26,9 @@
#if defined(CONFIG_ARCH_DAVINCI_DA8XX) && defined(CONFIG_ARCH_DAVINCI_DMx)
#error Cannot enable DaVinci and DA8XX platforms concurrently
#elif defined(CONFIG_ARCH_DAVINCI_DA8XX)
-#define PHYS_OFFSET DA8XX_DDR_BASE
+#define PLAT_PHYS_OFFSET DA8XX_DDR_BASE
#else
-#define PHYS_OFFSET DAVINCI_DDR_BASE
+#define PLAT_PHYS_OFFSET DAVINCI_DDR_BASE
#endif
#define DDR2_SDRCR_OFFSET 0xc
diff --git a/arch/arm/mach-davinci/include/mach/mux.h b/arch/arm/mach-davinci/include/mach/mux.h
index de11aac76a80..5d4e0fed828a 100644
--- a/arch/arm/mach-davinci/include/mach/mux.h
+++ b/arch/arm/mach-davinci/include/mach/mux.h
@@ -908,11 +908,15 @@ enum davinci_da850_index {
DA850_NEMA_CS_2,
/* GPIO function */
+ DA850_GPIO2_4,
DA850_GPIO2_6,
DA850_GPIO2_8,
DA850_GPIO2_15,
+ DA850_GPIO3_12,
+ DA850_GPIO3_13,
DA850_GPIO4_0,
DA850_GPIO4_1,
+ DA850_GPIO6_13,
DA850_RTC_ALARM,
};
diff --git a/arch/arm/mach-davinci/include/mach/spi.h b/arch/arm/mach-davinci/include/mach/spi.h
index 38f4da5ca135..7af305b37868 100644
--- a/arch/arm/mach-davinci/include/mach/spi.h
+++ b/arch/arm/mach-davinci/include/mach/spi.h
@@ -19,6 +19,8 @@
#ifndef __ARCH_ARM_DAVINCI_SPI_H
#define __ARCH_ARM_DAVINCI_SPI_H
+#include <mach/edma.h>
+
#define SPI_INTERN_CS 0xFF
enum {
@@ -39,13 +41,16 @@ enum {
* to populate if all chip-selects are internal.
* @cshold_bug: set this to true if the SPI controller on your chip requires
* a write to CSHOLD bit in between transfers (like in DM355).
+ * @dma_event_q: DMA event queue to use if SPI_IO_TYPE_DMA is used for any
+ * device on the bus.
*/
struct davinci_spi_platform_data {
- u8 version;
- u8 num_chipselect;
- u8 intr_line;
- u8 *chip_sel;
- bool cshold_bug;
+ u8 version;
+ u8 num_chipselect;
+ u8 intr_line;
+ u8 *chip_sel;
+ bool cshold_bug;
+ enum dma_event_q dma_event_q;
};
/**
diff --git a/arch/arm/mach-dove/Kconfig b/arch/arm/mach-dove/Kconfig
index a4ed3900912a..dd937c526a45 100644
--- a/arch/arm/mach-dove/Kconfig
+++ b/arch/arm/mach-dove/Kconfig
@@ -9,7 +9,7 @@ config MACH_DOVE_DB
Say 'Y' here if you want your kernel to support the
Marvell DB-MV88AP510 Development Board.
- config MACH_CM_A510
+config MACH_CM_A510
bool "CompuLab CM-A510 Board"
help
Say 'Y' here if you want your kernel to support the
diff --git a/arch/arm/mach-dove/include/mach/memory.h b/arch/arm/mach-dove/include/mach/memory.h
index d66872074946..bbc93fee6c75 100644
--- a/arch/arm/mach-dove/include/mach/memory.h
+++ b/arch/arm/mach-dove/include/mach/memory.h
@@ -5,6 +5,6 @@
#ifndef __ASM_ARCH_MEMORY_H
#define __ASM_ARCH_MEMORY_H
-#define PHYS_OFFSET UL(0x00000000)
+#define PLAT_PHYS_OFFSET UL(0x00000000)
#endif
diff --git a/arch/arm/mach-ebsa110/include/mach/memory.h b/arch/arm/mach-ebsa110/include/mach/memory.h
index 0ca66d080c69..8e49066ad850 100644
--- a/arch/arm/mach-ebsa110/include/mach/memory.h
+++ b/arch/arm/mach-ebsa110/include/mach/memory.h
@@ -19,7 +19,7 @@
/*
* Physical DRAM offset.
*/
-#define PHYS_OFFSET UL(0x00000000)
+#define PLAT_PHYS_OFFSET UL(0x00000000)
/*
* Cache flushing area - SRAM
diff --git a/arch/arm/mach-ep93xx/edb93xx.c b/arch/arm/mach-ep93xx/edb93xx.c
index 4b0431652131..fad371df40ed 100644
--- a/arch/arm/mach-ep93xx/edb93xx.c
+++ b/arch/arm/mach-ep93xx/edb93xx.c
@@ -32,6 +32,7 @@
#include <linux/i2c-gpio.h>
#include <mach/hardware.h>
+#include <mach/fb.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -111,6 +112,37 @@ static void __init edb93xx_register_pwm(void)
}
+/*************************************************************************
+ * EDB93xx framebuffer
+ *************************************************************************/
+static struct ep93xxfb_mach_info __initdata edb93xxfb_info = {
+ .num_modes = EP93XXFB_USE_MODEDB,
+ .bpp = 16,
+ .flags = 0,
+};
+
+static int __init edb93xx_has_fb(void)
+{
+ /* These platforms have an ep93xx with video capability */
+ return machine_is_edb9307() || machine_is_edb9307a() ||
+ machine_is_edb9312() || machine_is_edb9315() ||
+ machine_is_edb9315a();
+}
+
+static void __init edb93xx_register_fb(void)
+{
+ if (!edb93xx_has_fb())
+ return;
+
+ if (machine_is_edb9307a() || machine_is_edb9315a())
+ edb93xxfb_info.flags |= EP93XXFB_USE_SDCSN0;
+ else
+ edb93xxfb_info.flags |= EP93XXFB_USE_SDCSN3;
+
+ ep93xx_register_fb(&edb93xxfb_info);
+}
+
+
static void __init edb93xx_init_machine(void)
{
ep93xx_init_devices();
@@ -118,6 +150,7 @@ static void __init edb93xx_init_machine(void)
ep93xx_register_eth(&edb93xx_eth_data, 1);
edb93xx_register_i2c();
edb93xx_register_pwm();
+ edb93xx_register_fb();
}
diff --git a/arch/arm/mach-ep93xx/gpio.c b/arch/arm/mach-ep93xx/gpio.c
index bec34b834958..a889fa7c3ba1 100644
--- a/arch/arm/mach-ep93xx/gpio.c
+++ b/arch/arm/mach-ep93xx/gpio.c
@@ -61,7 +61,7 @@ static inline void ep93xx_gpio_int_mask(unsigned line)
gpio_int_unmasked[line >> 3] &= ~(1 << (line & 7));
}
-void ep93xx_gpio_int_debounce(unsigned int irq, int enable)
+static void ep93xx_gpio_int_debounce(unsigned int irq, bool enable)
{
int line = irq_to_gpio(irq);
int port = line >> 3;
@@ -75,7 +75,6 @@ void ep93xx_gpio_int_debounce(unsigned int irq, int enable)
__raw_writeb(gpio_int_debounce[port],
EP93XX_GPIO_REG(int_debounce_register_offset[port]));
}
-EXPORT_SYMBOL(ep93xx_gpio_int_debounce);
static void ep93xx_gpio_ab_irq_handler(unsigned int irq, struct irq_desc *desc)
{
@@ -335,6 +334,20 @@ static void ep93xx_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
local_irq_restore(flags);
}
+static int ep93xx_gpio_set_debounce(struct gpio_chip *chip,
+ unsigned offset, unsigned debounce)
+{
+ int gpio = chip->base + offset;
+ int irq = gpio_to_irq(gpio);
+
+ if (irq < 0)
+ return -EINVAL;
+
+ ep93xx_gpio_int_debounce(irq, debounce ? true : false);
+
+ return 0;
+}
+
static void ep93xx_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
struct ep93xx_gpio_chip *ep93xx_chip = to_ep93xx_gpio_chip(chip);
@@ -434,6 +447,18 @@ void __init ep93xx_gpio_init(void)
EP93XX_SYSCON_DEVCFG_GONIDE |
EP93XX_SYSCON_DEVCFG_HONIDE);
- for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++)
- gpiochip_add(&ep93xx_gpio_banks[i].chip);
+ for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++) {
+ struct gpio_chip *chip = &ep93xx_gpio_banks[i].chip;
+
+ /*
+ * Ports A, B, and F support input debouncing when
+ * used as interrupts.
+ */
+ if (!strcmp(chip->label, "A") ||
+ !strcmp(chip->label, "B") ||
+ !strcmp(chip->label, "F"))
+ chip->set_debounce = ep93xx_gpio_set_debounce;
+
+ gpiochip_add(chip);
+ }
}
diff --git a/arch/arm/mach-ep93xx/include/mach/gpio.h b/arch/arm/mach-ep93xx/include/mach/gpio.h
index c991b149bdf2..c57152c231f1 100644
--- a/arch/arm/mach-ep93xx/include/mach/gpio.h
+++ b/arch/arm/mach-ep93xx/include/mach/gpio.h
@@ -99,8 +99,6 @@
/* maximum value for irq capable line identifiers */
#define EP93XX_GPIO_LINE_MAX_IRQ EP93XX_GPIO_LINE_F(7)
-extern void ep93xx_gpio_int_debounce(unsigned int irq, int enable);
-
/* new generic GPIO API - see Documentation/gpio.txt */
#include <asm-generic/gpio.h>
diff --git a/arch/arm/mach-ep93xx/include/mach/memory.h b/arch/arm/mach-ep93xx/include/mach/memory.h
index 554064e90307..c9400cf0051c 100644
--- a/arch/arm/mach-ep93xx/include/mach/memory.h
+++ b/arch/arm/mach-ep93xx/include/mach/memory.h
@@ -6,15 +6,15 @@
#define __ASM_ARCH_MEMORY_H
#if defined(CONFIG_EP93XX_SDCE3_SYNC_PHYS_OFFSET)
-#define PHYS_OFFSET UL(0x00000000)
+#define PLAT_PHYS_OFFSET UL(0x00000000)
#elif defined(CONFIG_EP93XX_SDCE0_PHYS_OFFSET)
-#define PHYS_OFFSET UL(0xc0000000)
+#define PLAT_PHYS_OFFSET UL(0xc0000000)
#elif defined(CONFIG_EP93XX_SDCE1_PHYS_OFFSET)
-#define PHYS_OFFSET UL(0xd0000000)
+#define PLAT_PHYS_OFFSET UL(0xd0000000)
#elif defined(CONFIG_EP93XX_SDCE2_PHYS_OFFSET)
-#define PHYS_OFFSET UL(0xe0000000)
+#define PLAT_PHYS_OFFSET UL(0xe0000000)
#elif defined(CONFIG_EP93XX_SDCE3_ASYNC_PHYS_OFFSET)
-#define PHYS_OFFSET UL(0xf0000000)
+#define PLAT_PHYS_OFFSET UL(0xf0000000)
#else
#error "Kconfig bug: No EP93xx PHYS_OFFSET set"
#endif
diff --git a/arch/arm/mach-s5pv310/Kconfig b/arch/arm/mach-exynos4/Kconfig
index b2a9acc5185f..ad038401290e 100644
--- a/arch/arm/mach-s5pv310/Kconfig
+++ b/arch/arm/mach-exynos4/Kconfig
@@ -1,83 +1,83 @@
-# arch/arm/mach-s5pv310/Kconfig
+# arch/arm/mach-exynos4/Kconfig
#
-# Copyright (c) 2010 Samsung Electronics Co., Ltd.
+# Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
# http://www.samsung.com/
#
# Licensed under GPLv2
-# Configuration options for the S5PV310
+# Configuration options for the EXYNOS4
-if ARCH_S5PV310
+if ARCH_EXYNOS4
-config CPU_S5PV310
+config CPU_EXYNOS4210
bool
select S3C_PL330_DMA
help
- Enable S5PV310 CPU support
+ Enable EXYNOS4210 CPU support
-config S5PV310_DEV_PD
+config EXYNOS4_DEV_PD
bool
help
Compile in platform device definitions for Power Domain
-config S5PV310_SETUP_I2C1
+config EXYNOS4_DEV_SYSMMU
+ bool
+ help
+ Common setup code for SYSTEM MMU in EXYNOS4
+
+config EXYNOS4_SETUP_I2C1
bool
help
Common setup code for i2c bus 1.
-config S5PV310_SETUP_I2C2
+config EXYNOS4_SETUP_I2C2
bool
help
Common setup code for i2c bus 2.
-config S5PV310_SETUP_I2C3
+config EXYNOS4_SETUP_I2C3
bool
help
Common setup code for i2c bus 3.
-config S5PV310_SETUP_I2C4
+config EXYNOS4_SETUP_I2C4
bool
help
Common setup code for i2c bus 4.
-config S5PV310_SETUP_I2C5
+config EXYNOS4_SETUP_I2C5
bool
help
Common setup code for i2c bus 5.
-config S5PV310_SETUP_I2C6
+config EXYNOS4_SETUP_I2C6
bool
help
Common setup code for i2c bus 6.
-config S5PV310_SETUP_I2C7
+config EXYNOS4_SETUP_I2C7
bool
help
Common setup code for i2c bus 7.
-config S5PV310_SETUP_SDHCI
+config EXYNOS4_SETUP_SDHCI
bool
- select S5PV310_SETUP_SDHCI_GPIO
+ select EXYNOS4_SETUP_SDHCI_GPIO
help
- Internal helper functions for S5PV310 based SDHCI systems.
+ Internal helper functions for EXYNOS4 based SDHCI systems.
-config S5PV310_SETUP_SDHCI_GPIO
+config EXYNOS4_SETUP_SDHCI_GPIO
bool
help
Common setup code for SDHCI gpio.
-config S5PV310_DEV_SYSMMU
- bool
- help
- Common setup code for SYSTEM MMU in S5PV310
-
# machine support
-menu "S5PC210 Machines"
+menu "EXYNOS4 Machines"
config MACH_SMDKC210
bool "SMDKC210"
- select CPU_S5PV310
+ select CPU_EXYNOS4210
select S3C_DEV_RTC
select S3C_DEV_WDT
select S3C_DEV_I2C1
@@ -85,35 +85,16 @@ config MACH_SMDKC210
select S3C_DEV_HSMMC1
select S3C_DEV_HSMMC2
select S3C_DEV_HSMMC3
- select S5PV310_DEV_PD
- select S5PV310_SETUP_I2C1
- select S5PV310_SETUP_SDHCI
- select S5PV310_DEV_SYSMMU
+ select EXYNOS4_DEV_PD
+ select EXYNOS4_DEV_SYSMMU
+ select EXYNOS4_SETUP_I2C1
+ select EXYNOS4_SETUP_SDHCI
help
Machine support for Samsung SMDKC210
- S5PC210(MCP) is one of package option of S5PV310
-
-config MACH_UNIVERSAL_C210
- bool "Mobile UNIVERSAL_C210 Board"
- select CPU_S5PV310
- select S5P_DEV_ONENAND
- select S3C_DEV_HSMMC
- select S3C_DEV_HSMMC2
- select S3C_DEV_HSMMC3
- select S5PV310_SETUP_SDHCI
- select S3C_DEV_I2C1
- select S5PV310_SETUP_I2C1
- help
- Machine support for Samsung Mobile Universal S5PC210 Reference
- Board. S5PC210(MCP) is one of package option of S5PV310
-
-endmenu
-
-menu "S5PV310 Machines"
config MACH_SMDKV310
bool "SMDKV310"
- select CPU_S5PV310
+ select CPU_EXYNOS4210
select S3C_DEV_RTC
select S3C_DEV_WDT
select S3C_DEV_I2C1
@@ -121,26 +102,40 @@ config MACH_SMDKV310
select S3C_DEV_HSMMC1
select S3C_DEV_HSMMC2
select S3C_DEV_HSMMC3
- select S5PV310_DEV_PD
- select S5PV310_DEV_SYSMMU
- select S5PV310_SETUP_I2C1
- select S5PV310_SETUP_SDHCI
+ select EXYNOS4_DEV_PD
+ select EXYNOS4_DEV_SYSMMU
+ select EXYNOS4_SETUP_I2C1
+ select EXYNOS4_SETUP_SDHCI
help
Machine support for Samsung SMDKV310
+config MACH_UNIVERSAL_C210
+ bool "Mobile UNIVERSAL_C210 Board"
+ select CPU_EXYNOS4210
+ select S3C_DEV_HSMMC
+ select S3C_DEV_HSMMC2
+ select S3C_DEV_HSMMC3
+ select S3C_DEV_I2C1
+ select S5P_DEV_ONENAND
+ select EXYNOS4_SETUP_I2C1
+ select EXYNOS4_SETUP_SDHCI
+ help
+ Machine support for Samsung Mobile Universal S5PC210 Reference
+ Board.
+
endmenu
comment "Configuration for HSMMC bus width"
menu "Use 8-bit bus width"
-config S5PV310_SDHCI_CH0_8BIT
+config EXYNOS4_SDHCI_CH0_8BIT
bool "Channel 0 with 8-bit bus"
help
Support HSMMC Channel 0 8-bit bus.
If selected, Channel 1 is disabled.
-config S5PV310_SDHCI_CH2_8BIT
+config EXYNOS4_SDHCI_CH2_8BIT
bool "Channel 2 with 8-bit bus"
help
Support HSMMC Channel 2 8-bit bus.
diff --git a/arch/arm/mach-exynos4/Makefile b/arch/arm/mach-exynos4/Makefile
new file mode 100644
index 000000000000..055823533230
--- /dev/null
+++ b/arch/arm/mach-exynos4/Makefile
@@ -0,0 +1,43 @@
+# arch/arm/mach-exynos4/Makefile
+#
+# Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+# http://www.samsung.com/
+#
+# Licensed under GPLv2
+
+obj-y :=
+obj-m :=
+obj-n :=
+obj- :=
+
+# Core support for EXYNOS4 system
+
+obj-$(CONFIG_CPU_EXYNOS4210) += cpu.o init.o clock.o irq-combiner.o
+obj-$(CONFIG_CPU_EXYNOS4210) += setup-i2c0.o time.o gpiolib.o irq-eint.o dma.o
+obj-$(CONFIG_CPU_FREQ) += cpufreq.o
+
+obj-$(CONFIG_SMP) += platsmp.o headsmp.o
+obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o
+obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
+
+# machine support
+
+obj-$(CONFIG_MACH_SMDKC210) += mach-smdkc210.o
+obj-$(CONFIG_MACH_SMDKV310) += mach-smdkv310.o
+obj-$(CONFIG_MACH_UNIVERSAL_C210) += mach-universal_c210.o
+
+# device support
+
+obj-y += dev-audio.o
+obj-$(CONFIG_EXYNOS4_DEV_PD) += dev-pd.o
+obj-$(CONFIG_EXYNOS4_DEV_SYSMMU) += dev-sysmmu.o
+
+obj-$(CONFIG_EXYNOS4_SETUP_I2C1) += setup-i2c1.o
+obj-$(CONFIG_EXYNOS4_SETUP_I2C2) += setup-i2c2.o
+obj-$(CONFIG_EXYNOS4_SETUP_I2C3) += setup-i2c3.o
+obj-$(CONFIG_EXYNOS4_SETUP_I2C4) += setup-i2c4.o
+obj-$(CONFIG_EXYNOS4_SETUP_I2C5) += setup-i2c5.o
+obj-$(CONFIG_EXYNOS4_SETUP_I2C6) += setup-i2c6.o
+obj-$(CONFIG_EXYNOS4_SETUP_I2C7) += setup-i2c7.o
+obj-$(CONFIG_EXYNOS4_SETUP_SDHCI) += setup-sdhci.o
+obj-$(CONFIG_EXYNOS4_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o
diff --git a/arch/arm/mach-s5pv310/Makefile.boot b/arch/arm/mach-exynos4/Makefile.boot
index d65956ffb43d..d65956ffb43d 100644
--- a/arch/arm/mach-s5pv310/Makefile.boot
+++ b/arch/arm/mach-exynos4/Makefile.boot
diff --git a/arch/arm/mach-s5pv310/clock.c b/arch/arm/mach-exynos4/clock.c
index fc7c2f8d165e..72d53d5e54af 100644
--- a/arch/arm/mach-s5pv310/clock.c
+++ b/arch/arm/mach-exynos4/clock.c
@@ -1,9 +1,9 @@
-/* linux/arch/arm/mach-s5pv310/clock.c
+/* linux/arch/arm/mach-exynos4/clock.c
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
*
- * S5PV310 - Clock support
+ * EXYNOS4 - Clock support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -46,72 +46,72 @@ static struct clk clk_sclk_usbphy1 = {
.id = -1,
};
-static int s5pv310_clksrc_mask_top_ctrl(struct clk *clk, int enable)
+static int exynos4_clksrc_mask_top_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_CLKSRC_MASK_TOP, clk, enable);
}
-static int s5pv310_clksrc_mask_cam_ctrl(struct clk *clk, int enable)
+static int exynos4_clksrc_mask_cam_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_CLKSRC_MASK_CAM, clk, enable);
}
-static int s5pv310_clksrc_mask_lcd0_ctrl(struct clk *clk, int enable)
+static int exynos4_clksrc_mask_lcd0_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_CLKSRC_MASK_LCD0, clk, enable);
}
-static int s5pv310_clksrc_mask_lcd1_ctrl(struct clk *clk, int enable)
+static int exynos4_clksrc_mask_lcd1_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_CLKSRC_MASK_LCD1, clk, enable);
}
-static int s5pv310_clksrc_mask_fsys_ctrl(struct clk *clk, int enable)
+static int exynos4_clksrc_mask_fsys_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_CLKSRC_MASK_FSYS, clk, enable);
}
-static int s5pv310_clksrc_mask_peril0_ctrl(struct clk *clk, int enable)
+static int exynos4_clksrc_mask_peril0_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_CLKSRC_MASK_PERIL0, clk, enable);
}
-static int s5pv310_clksrc_mask_peril1_ctrl(struct clk *clk, int enable)
+static int exynos4_clksrc_mask_peril1_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_CLKSRC_MASK_PERIL1, clk, enable);
}
-static int s5pv310_clk_ip_cam_ctrl(struct clk *clk, int enable)
+static int exynos4_clk_ip_cam_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_CLKGATE_IP_CAM, clk, enable);
}
-static int s5pv310_clk_ip_image_ctrl(struct clk *clk, int enable)
+static int exynos4_clk_ip_image_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_CLKGATE_IP_IMAGE, clk, enable);
}
-static int s5pv310_clk_ip_lcd0_ctrl(struct clk *clk, int enable)
+static int exynos4_clk_ip_lcd0_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_CLKGATE_IP_LCD0, clk, enable);
}
-static int s5pv310_clk_ip_lcd1_ctrl(struct clk *clk, int enable)
+static int exynos4_clk_ip_lcd1_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_CLKGATE_IP_LCD1, clk, enable);
}
-static int s5pv310_clk_ip_fsys_ctrl(struct clk *clk, int enable)
+static int exynos4_clk_ip_fsys_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_CLKGATE_IP_FSYS, clk, enable);
}
-static int s5pv310_clk_ip_peril_ctrl(struct clk *clk, int enable)
+static int exynos4_clk_ip_peril_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_CLKGATE_IP_PERIL, clk, enable);
}
-static int s5pv310_clk_ip_perir_ctrl(struct clk *clk, int enable)
+static int exynos4_clk_ip_perir_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_CLKGATE_IP_PERIR, clk, enable);
}
@@ -358,7 +358,7 @@ static struct clksrc_clk clk_vpllsrc = {
.clk = {
.name = "vpll_src",
.id = -1,
- .enable = s5pv310_clksrc_mask_top_ctrl,
+ .enable = exynos4_clksrc_mask_top_ctrl,
.ctrlbit = (1 << 0),
},
.sources = &clkset_vpllsrc,
@@ -389,205 +389,205 @@ static struct clk init_clocks_off[] = {
.name = "timers",
.id = -1,
.parent = &clk_aclk_100.clk,
- .enable = s5pv310_clk_ip_peril_ctrl,
+ .enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1<<24),
}, {
.name = "csis",
.id = 0,
- .enable = s5pv310_clk_ip_cam_ctrl,
+ .enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 4),
}, {
.name = "csis",
.id = 1,
- .enable = s5pv310_clk_ip_cam_ctrl,
+ .enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 5),
}, {
.name = "fimc",
.id = 0,
- .enable = s5pv310_clk_ip_cam_ctrl,
+ .enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = "fimc",
.id = 1,
- .enable = s5pv310_clk_ip_cam_ctrl,
+ .enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 1),
}, {
.name = "fimc",
.id = 2,
- .enable = s5pv310_clk_ip_cam_ctrl,
+ .enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 2),
}, {
.name = "fimc",
.id = 3,
- .enable = s5pv310_clk_ip_cam_ctrl,
+ .enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 3),
}, {
.name = "fimd",
.id = 0,
- .enable = s5pv310_clk_ip_lcd0_ctrl,
+ .enable = exynos4_clk_ip_lcd0_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = "fimd",
.id = 1,
- .enable = s5pv310_clk_ip_lcd1_ctrl,
+ .enable = exynos4_clk_ip_lcd1_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = "hsmmc",
.id = 0,
.parent = &clk_aclk_133.clk,
- .enable = s5pv310_clk_ip_fsys_ctrl,
+ .enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 5),
}, {
.name = "hsmmc",
.id = 1,
.parent = &clk_aclk_133.clk,
- .enable = s5pv310_clk_ip_fsys_ctrl,
+ .enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 6),
}, {
.name = "hsmmc",
.id = 2,
.parent = &clk_aclk_133.clk,
- .enable = s5pv310_clk_ip_fsys_ctrl,
+ .enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 7),
}, {
.name = "hsmmc",
.id = 3,
.parent = &clk_aclk_133.clk,
- .enable = s5pv310_clk_ip_fsys_ctrl,
+ .enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 8),
}, {
.name = "hsmmc",
.id = 4,
.parent = &clk_aclk_133.clk,
- .enable = s5pv310_clk_ip_fsys_ctrl,
+ .enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 9),
}, {
.name = "sata",
.id = -1,
- .enable = s5pv310_clk_ip_fsys_ctrl,
+ .enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 10),
}, {
.name = "pdma",
.id = 0,
- .enable = s5pv310_clk_ip_fsys_ctrl,
+ .enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = "pdma",
.id = 1,
- .enable = s5pv310_clk_ip_fsys_ctrl,
+ .enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 1),
}, {
.name = "adc",
.id = -1,
- .enable = s5pv310_clk_ip_peril_ctrl,
+ .enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 15),
}, {
.name = "rtc",
.id = -1,
- .enable = s5pv310_clk_ip_perir_ctrl,
+ .enable = exynos4_clk_ip_perir_ctrl,
.ctrlbit = (1 << 15),
}, {
.name = "watchdog",
.id = -1,
- .enable = s5pv310_clk_ip_perir_ctrl,
+ .enable = exynos4_clk_ip_perir_ctrl,
.ctrlbit = (1 << 14),
}, {
.name = "usbhost",
.id = -1,
- .enable = s5pv310_clk_ip_fsys_ctrl ,
+ .enable = exynos4_clk_ip_fsys_ctrl ,
.ctrlbit = (1 << 12),
}, {
.name = "otg",
.id = -1,
- .enable = s5pv310_clk_ip_fsys_ctrl,
+ .enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 13),
}, {
.name = "spi",
.id = 0,
- .enable = s5pv310_clk_ip_peril_ctrl,
+ .enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 16),
}, {
.name = "spi",
.id = 1,
- .enable = s5pv310_clk_ip_peril_ctrl,
+ .enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 17),
}, {
.name = "spi",
.id = 2,
- .enable = s5pv310_clk_ip_peril_ctrl,
+ .enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 18),
}, {
.name = "iis",
.id = 0,
- .enable = s5pv310_clk_ip_peril_ctrl,
+ .enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 19),
}, {
.name = "iis",
.id = 1,
- .enable = s5pv310_clk_ip_peril_ctrl,
+ .enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 20),
}, {
.name = "iis",
.id = 2,
- .enable = s5pv310_clk_ip_peril_ctrl,
+ .enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 21),
}, {
.name = "ac97",
.id = -1,
- .enable = s5pv310_clk_ip_peril_ctrl,
+ .enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 27),
}, {
.name = "fimg2d",
.id = -1,
- .enable = s5pv310_clk_ip_image_ctrl,
+ .enable = exynos4_clk_ip_image_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = "i2c",
.id = 0,
.parent = &clk_aclk_100.clk,
- .enable = s5pv310_clk_ip_peril_ctrl,
+ .enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 6),
}, {
.name = "i2c",
.id = 1,
.parent = &clk_aclk_100.clk,
- .enable = s5pv310_clk_ip_peril_ctrl,
+ .enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 7),
}, {
.name = "i2c",
.id = 2,
.parent = &clk_aclk_100.clk,
- .enable = s5pv310_clk_ip_peril_ctrl,
+ .enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 8),
}, {
.name = "i2c",
.id = 3,
.parent = &clk_aclk_100.clk,
- .enable = s5pv310_clk_ip_peril_ctrl,
+ .enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 9),
}, {
.name = "i2c",
.id = 4,
.parent = &clk_aclk_100.clk,
- .enable = s5pv310_clk_ip_peril_ctrl,
+ .enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 10),
}, {
.name = "i2c",
.id = 5,
.parent = &clk_aclk_100.clk,
- .enable = s5pv310_clk_ip_peril_ctrl,
+ .enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 11),
}, {
.name = "i2c",
.id = 6,
.parent = &clk_aclk_100.clk,
- .enable = s5pv310_clk_ip_peril_ctrl,
+ .enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 12),
}, {
.name = "i2c",
.id = 7,
.parent = &clk_aclk_100.clk,
- .enable = s5pv310_clk_ip_peril_ctrl,
+ .enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 13),
},
};
@@ -596,32 +596,32 @@ static struct clk init_clocks[] = {
{
.name = "uart",
.id = 0,
- .enable = s5pv310_clk_ip_peril_ctrl,
+ .enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = "uart",
.id = 1,
- .enable = s5pv310_clk_ip_peril_ctrl,
+ .enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 1),
}, {
.name = "uart",
.id = 2,
- .enable = s5pv310_clk_ip_peril_ctrl,
+ .enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 2),
}, {
.name = "uart",
.id = 3,
- .enable = s5pv310_clk_ip_peril_ctrl,
+ .enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 3),
}, {
.name = "uart",
.id = 4,
- .enable = s5pv310_clk_ip_peril_ctrl,
+ .enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 4),
}, {
.name = "uart",
.id = 5,
- .enable = s5pv310_clk_ip_peril_ctrl,
+ .enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 5),
}
};
@@ -746,7 +746,7 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "uclk1",
.id = 0,
- .enable = s5pv310_clksrc_mask_peril0_ctrl,
+ .enable = exynos4_clksrc_mask_peril0_ctrl,
.ctrlbit = (1 << 0),
},
.sources = &clkset_group,
@@ -756,7 +756,7 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "uclk1",
.id = 1,
- .enable = s5pv310_clksrc_mask_peril0_ctrl,
+ .enable = exynos4_clksrc_mask_peril0_ctrl,
.ctrlbit = (1 << 4),
},
.sources = &clkset_group,
@@ -766,7 +766,7 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "uclk1",
.id = 2,
- .enable = s5pv310_clksrc_mask_peril0_ctrl,
+ .enable = exynos4_clksrc_mask_peril0_ctrl,
.ctrlbit = (1 << 8),
},
.sources = &clkset_group,
@@ -776,7 +776,7 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "uclk1",
.id = 3,
- .enable = s5pv310_clksrc_mask_peril0_ctrl,
+ .enable = exynos4_clksrc_mask_peril0_ctrl,
.ctrlbit = (1 << 12),
},
.sources = &clkset_group,
@@ -786,7 +786,7 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "sclk_pwm",
.id = -1,
- .enable = s5pv310_clksrc_mask_peril0_ctrl,
+ .enable = exynos4_clksrc_mask_peril0_ctrl,
.ctrlbit = (1 << 24),
},
.sources = &clkset_group,
@@ -796,7 +796,7 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "sclk_csis",
.id = 0,
- .enable = s5pv310_clksrc_mask_cam_ctrl,
+ .enable = exynos4_clksrc_mask_cam_ctrl,
.ctrlbit = (1 << 24),
},
.sources = &clkset_group,
@@ -806,7 +806,7 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "sclk_csis",
.id = 1,
- .enable = s5pv310_clksrc_mask_cam_ctrl,
+ .enable = exynos4_clksrc_mask_cam_ctrl,
.ctrlbit = (1 << 28),
},
.sources = &clkset_group,
@@ -816,7 +816,7 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "sclk_cam",
.id = 0,
- .enable = s5pv310_clksrc_mask_cam_ctrl,
+ .enable = exynos4_clksrc_mask_cam_ctrl,
.ctrlbit = (1 << 16),
},
.sources = &clkset_group,
@@ -826,7 +826,7 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "sclk_cam",
.id = 1,
- .enable = s5pv310_clksrc_mask_cam_ctrl,
+ .enable = exynos4_clksrc_mask_cam_ctrl,
.ctrlbit = (1 << 20),
},
.sources = &clkset_group,
@@ -836,7 +836,7 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "sclk_fimc",
.id = 0,
- .enable = s5pv310_clksrc_mask_cam_ctrl,
+ .enable = exynos4_clksrc_mask_cam_ctrl,
.ctrlbit = (1 << 0),
},
.sources = &clkset_group,
@@ -846,7 +846,7 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "sclk_fimc",
.id = 1,
- .enable = s5pv310_clksrc_mask_cam_ctrl,
+ .enable = exynos4_clksrc_mask_cam_ctrl,
.ctrlbit = (1 << 4),
},
.sources = &clkset_group,
@@ -856,7 +856,7 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "sclk_fimc",
.id = 2,
- .enable = s5pv310_clksrc_mask_cam_ctrl,
+ .enable = exynos4_clksrc_mask_cam_ctrl,
.ctrlbit = (1 << 8),
},
.sources = &clkset_group,
@@ -866,7 +866,7 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "sclk_fimc",
.id = 3,
- .enable = s5pv310_clksrc_mask_cam_ctrl,
+ .enable = exynos4_clksrc_mask_cam_ctrl,
.ctrlbit = (1 << 12),
},
.sources = &clkset_group,
@@ -876,7 +876,7 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "sclk_fimd",
.id = 0,
- .enable = s5pv310_clksrc_mask_lcd0_ctrl,
+ .enable = exynos4_clksrc_mask_lcd0_ctrl,
.ctrlbit = (1 << 0),
},
.sources = &clkset_group,
@@ -886,7 +886,7 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "sclk_fimd",
.id = 1,
- .enable = s5pv310_clksrc_mask_lcd1_ctrl,
+ .enable = exynos4_clksrc_mask_lcd1_ctrl,
.ctrlbit = (1 << 0),
},
.sources = &clkset_group,
@@ -896,7 +896,7 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "sclk_sata",
.id = -1,
- .enable = s5pv310_clksrc_mask_fsys_ctrl,
+ .enable = exynos4_clksrc_mask_fsys_ctrl,
.ctrlbit = (1 << 24),
},
.sources = &clkset_mout_corebus,
@@ -906,7 +906,7 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "sclk_spi",
.id = 0,
- .enable = s5pv310_clksrc_mask_peril1_ctrl,
+ .enable = exynos4_clksrc_mask_peril1_ctrl,
.ctrlbit = (1 << 16),
},
.sources = &clkset_group,
@@ -916,7 +916,7 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "sclk_spi",
.id = 1,
- .enable = s5pv310_clksrc_mask_peril1_ctrl,
+ .enable = exynos4_clksrc_mask_peril1_ctrl,
.ctrlbit = (1 << 20),
},
.sources = &clkset_group,
@@ -926,7 +926,7 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "sclk_spi",
.id = 2,
- .enable = s5pv310_clksrc_mask_peril1_ctrl,
+ .enable = exynos4_clksrc_mask_peril1_ctrl,
.ctrlbit = (1 << 24),
},
.sources = &clkset_group,
@@ -945,7 +945,7 @@ static struct clksrc_clk clksrcs[] = {
.name = "sclk_mmc",
.id = 0,
.parent = &clk_dout_mmc0.clk,
- .enable = s5pv310_clksrc_mask_fsys_ctrl,
+ .enable = exynos4_clksrc_mask_fsys_ctrl,
.ctrlbit = (1 << 0),
},
.reg_div = { .reg = S5P_CLKDIV_FSYS1, .shift = 8, .size = 8 },
@@ -954,7 +954,7 @@ static struct clksrc_clk clksrcs[] = {
.name = "sclk_mmc",
.id = 1,
.parent = &clk_dout_mmc1.clk,
- .enable = s5pv310_clksrc_mask_fsys_ctrl,
+ .enable = exynos4_clksrc_mask_fsys_ctrl,
.ctrlbit = (1 << 4),
},
.reg_div = { .reg = S5P_CLKDIV_FSYS1, .shift = 24, .size = 8 },
@@ -963,7 +963,7 @@ static struct clksrc_clk clksrcs[] = {
.name = "sclk_mmc",
.id = 2,
.parent = &clk_dout_mmc2.clk,
- .enable = s5pv310_clksrc_mask_fsys_ctrl,
+ .enable = exynos4_clksrc_mask_fsys_ctrl,
.ctrlbit = (1 << 8),
},
.reg_div = { .reg = S5P_CLKDIV_FSYS2, .shift = 8, .size = 8 },
@@ -972,7 +972,7 @@ static struct clksrc_clk clksrcs[] = {
.name = "sclk_mmc",
.id = 3,
.parent = &clk_dout_mmc3.clk,
- .enable = s5pv310_clksrc_mask_fsys_ctrl,
+ .enable = exynos4_clksrc_mask_fsys_ctrl,
.ctrlbit = (1 << 12),
},
.reg_div = { .reg = S5P_CLKDIV_FSYS2, .shift = 24, .size = 8 },
@@ -981,7 +981,7 @@ static struct clksrc_clk clksrcs[] = {
.name = "sclk_mmc",
.id = 4,
.parent = &clk_dout_mmc4.clk,
- .enable = s5pv310_clksrc_mask_fsys_ctrl,
+ .enable = exynos4_clksrc_mask_fsys_ctrl,
.ctrlbit = (1 << 16),
},
.reg_div = { .reg = S5P_CLKDIV_FSYS3, .shift = 8, .size = 8 },
@@ -1022,16 +1022,16 @@ static struct clksrc_clk *sysclks[] = {
static int xtal_rate;
-static unsigned long s5pv310_fout_apll_get_rate(struct clk *clk)
+static unsigned long exynos4_fout_apll_get_rate(struct clk *clk)
{
return s5p_get_pll45xx(xtal_rate, __raw_readl(S5P_APLL_CON0), pll_4508);
}
-static struct clk_ops s5pv310_fout_apll_ops = {
- .get_rate = s5pv310_fout_apll_get_rate,
+static struct clk_ops exynos4_fout_apll_ops = {
+ .get_rate = exynos4_fout_apll_get_rate,
};
-void __init_or_cpufreq s5pv310_setup_clocks(void)
+void __init_or_cpufreq exynos4_setup_clocks(void)
{
struct clk *xtal_clk;
unsigned long apll;
@@ -1070,12 +1070,12 @@ void __init_or_cpufreq s5pv310_setup_clocks(void)
vpll = s5p_get_pll46xx(vpllsrc, __raw_readl(S5P_VPLL_CON0),
__raw_readl(S5P_VPLL_CON1), pll_4650);
- clk_fout_apll.ops = &s5pv310_fout_apll_ops;
+ clk_fout_apll.ops = &exynos4_fout_apll_ops;
clk_fout_mpll.rate = mpll;
clk_fout_epll.rate = epll;
clk_fout_vpll.rate = vpll;
- printk(KERN_INFO "S5PV310: PLL settings, A=%ld, M=%ld, E=%ld V=%ld",
+ printk(KERN_INFO "EXYNOS4: PLL settings, A=%ld, M=%ld, E=%ld V=%ld",
apll, mpll, epll, vpll);
armclk = clk_get_rate(&clk_armclk.clk);
@@ -1086,7 +1086,7 @@ void __init_or_cpufreq s5pv310_setup_clocks(void)
aclk_160 = clk_get_rate(&clk_aclk_160.clk);
aclk_133 = clk_get_rate(&clk_aclk_133.clk);
- printk(KERN_INFO "S5PV310: ARMCLK=%ld, DMC=%ld, ACLK200=%ld\n"
+ printk(KERN_INFO "EXYNOS4: ARMCLK=%ld, DMC=%ld, ACLK200=%ld\n"
"ACLK100=%ld, ACLK160=%ld, ACLK133=%ld\n",
armclk, sclk_dmc, aclk_200,
aclk_100, aclk_160, aclk_133);
@@ -1103,7 +1103,7 @@ static struct clk *clks[] __initdata = {
/* Nothing here yet */
};
-void __init s5pv310_register_clocks(void)
+void __init exynos4_register_clocks(void)
{
int ptr;
diff --git a/arch/arm/mach-s5pv310/cpu.c b/arch/arm/mach-exynos4/cpu.c
index 0db0fb65bd70..b0ec6d3d3774 100644
--- a/arch/arm/mach-s5pv310/cpu.c
+++ b/arch/arm/mach-exynos4/cpu.c
@@ -1,7 +1,7 @@
-/* linux/arch/arm/mach-s5pv310/cpu.c
+/* linux/arch/arm/mach-exynos4/cpu.c
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -19,7 +19,7 @@
#include <plat/cpu.h>
#include <plat/clock.h>
-#include <plat/s5pv310.h>
+#include <plat/exynos4.h>
#include <plat/sdhci.h>
#include <mach/regs-irq.h>
@@ -29,55 +29,55 @@ extern int combiner_init(unsigned int combiner_nr, void __iomem *base,
extern void combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq);
/* Initial IO mappings */
-static struct map_desc s5pv310_iodesc[] __initdata = {
+static struct map_desc exynos4_iodesc[] __initdata = {
{
.virtual = (unsigned long)S5P_VA_SYSRAM,
- .pfn = __phys_to_pfn(S5PV310_PA_SYSRAM),
+ .pfn = __phys_to_pfn(EXYNOS4_PA_SYSRAM),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)S5P_VA_CMU,
- .pfn = __phys_to_pfn(S5PV310_PA_CMU),
+ .pfn = __phys_to_pfn(EXYNOS4_PA_CMU),
.length = SZ_128K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)S5P_VA_PMU,
- .pfn = __phys_to_pfn(S5PV310_PA_PMU),
+ .pfn = __phys_to_pfn(EXYNOS4_PA_PMU),
.length = SZ_64K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)S5P_VA_COMBINER_BASE,
- .pfn = __phys_to_pfn(S5PV310_PA_COMBINER),
+ .pfn = __phys_to_pfn(EXYNOS4_PA_COMBINER),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)S5P_VA_COREPERI_BASE,
- .pfn = __phys_to_pfn(S5PV310_PA_COREPERI),
+ .pfn = __phys_to_pfn(EXYNOS4_PA_COREPERI),
.length = SZ_8K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)S5P_VA_L2CC,
- .pfn = __phys_to_pfn(S5PV310_PA_L2CC),
+ .pfn = __phys_to_pfn(EXYNOS4_PA_L2CC),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)S5P_VA_GPIO1,
- .pfn = __phys_to_pfn(S5PV310_PA_GPIO1),
+ .pfn = __phys_to_pfn(EXYNOS4_PA_GPIO1),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)S5P_VA_GPIO2,
- .pfn = __phys_to_pfn(S5PV310_PA_GPIO2),
+ .pfn = __phys_to_pfn(EXYNOS4_PA_GPIO2),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)S5P_VA_GPIO3,
- .pfn = __phys_to_pfn(S5PV310_PA_GPIO3),
+ .pfn = __phys_to_pfn(EXYNOS4_PA_GPIO3),
.length = SZ_256,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)S5P_VA_DMC0,
- .pfn = __phys_to_pfn(S5PV310_PA_DMC0),
+ .pfn = __phys_to_pfn(EXYNOS4_PA_DMC0),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
@@ -87,13 +87,13 @@ static struct map_desc s5pv310_iodesc[] __initdata = {
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)S5P_VA_SROMC,
- .pfn = __phys_to_pfn(S5PV310_PA_SROMC),
+ .pfn = __phys_to_pfn(EXYNOS4_PA_SROMC),
.length = SZ_4K,
.type = MT_DEVICE,
},
};
-static void s5pv310_idle(void)
+static void exynos4_idle(void)
{
if (!need_resched())
cpu_do_idle();
@@ -101,32 +101,33 @@ static void s5pv310_idle(void)
local_irq_enable();
}
-/* s5pv310_map_io
+/*
+ * exynos4_map_io
*
* register the standard cpu IO areas
-*/
-void __init s5pv310_map_io(void)
+ */
+void __init exynos4_map_io(void)
{
- iotable_init(s5pv310_iodesc, ARRAY_SIZE(s5pv310_iodesc));
+ iotable_init(exynos4_iodesc, ARRAY_SIZE(exynos4_iodesc));
/* initialize device information early */
- s5pv310_default_sdhci0();
- s5pv310_default_sdhci1();
- s5pv310_default_sdhci2();
- s5pv310_default_sdhci3();
+ exynos4_default_sdhci0();
+ exynos4_default_sdhci1();
+ exynos4_default_sdhci2();
+ exynos4_default_sdhci3();
}
-void __init s5pv310_init_clocks(int xtal)
+void __init exynos4_init_clocks(int xtal)
{
printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
s3c24xx_register_baseclocks(xtal);
s5p_register_clocks(xtal);
- s5pv310_register_clocks();
- s5pv310_setup_clocks();
+ exynos4_register_clocks();
+ exynos4_setup_clocks();
}
-void __init s5pv310_init_irq(void)
+void __init exynos4_init_irq(void)
{
int irq;
@@ -148,29 +149,29 @@ void __init s5pv310_init_irq(void)
}
/* The parameters of s5p_init_irq() are for VIC init.
- * Theses parameters should be NULL and 0 because S5PV310
+ * Theses parameters should be NULL and 0 because EXYNOS4
* uses GIC instead of VIC.
*/
s5p_init_irq(NULL, 0);
}
-struct sysdev_class s5pv310_sysclass = {
- .name = "s5pv310-core",
+struct sysdev_class exynos4_sysclass = {
+ .name = "exynos4-core",
};
-static struct sys_device s5pv310_sysdev = {
- .cls = &s5pv310_sysclass,
+static struct sys_device exynos4_sysdev = {
+ .cls = &exynos4_sysclass,
};
-static int __init s5pv310_core_init(void)
+static int __init exynos4_core_init(void)
{
- return sysdev_class_register(&s5pv310_sysclass);
+ return sysdev_class_register(&exynos4_sysclass);
}
-core_initcall(s5pv310_core_init);
+core_initcall(exynos4_core_init);
#ifdef CONFIG_CACHE_L2X0
-static int __init s5pv310_l2x0_cache_init(void)
+static int __init exynos4_l2x0_cache_init(void)
{
/* TAG, Data Latency Control: 2cycle */
__raw_writel(0x110, S5P_VA_L2CC + L2X0_TAG_LATENCY_CTRL);
@@ -188,15 +189,15 @@ static int __init s5pv310_l2x0_cache_init(void)
return 0;
}
-early_initcall(s5pv310_l2x0_cache_init);
+early_initcall(exynos4_l2x0_cache_init);
#endif
-int __init s5pv310_init(void)
+int __init exynos4_init(void)
{
- printk(KERN_INFO "S5PV310: Initializing architecture\n");
+ printk(KERN_INFO "EXYNOS4: Initializing architecture\n");
/* set idle function */
- pm_idle = s5pv310_idle;
+ pm_idle = exynos4_idle;
- return sysdev_register(&s5pv310_sysdev);
+ return sysdev_register(&exynos4_sysdev);
}
diff --git a/arch/arm/mach-s5pv310/cpufreq.c b/arch/arm/mach-exynos4/cpufreq.c
index b04cbc731128..174f080b500d 100644
--- a/arch/arm/mach-s5pv310/cpufreq.c
+++ b/arch/arm/mach-exynos4/cpufreq.c
@@ -1,9 +1,9 @@
-/* linux/arch/arm/mach-s5pv310/cpufreq.c
+/* linux/arch/arm/mach-exynos4/cpufreq.c
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * S5PV310 - CPU frequency scaling support
+ * EXYNOS4 - CPU frequency scaling support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -39,7 +39,7 @@ static struct regulator *int_regulator;
static struct cpufreq_freqs freqs;
static unsigned int memtype;
-enum s5pv310_memory_type {
+enum exynos4_memory_type {
DDR2 = 4,
LPDDR2,
DDR3,
@@ -49,7 +49,7 @@ enum cpufreq_level_index {
L0, L1, L2, L3, CPUFREQ_LEVEL_END,
};
-static struct cpufreq_frequency_table s5pv310_freq_table[] = {
+static struct cpufreq_frequency_table exynos4_freq_table[] = {
{L0, 1000*1000},
{L1, 800*1000},
{L2, 400*1000},
@@ -160,7 +160,7 @@ struct cpufreq_voltage_table {
unsigned int int_volt;
};
-static struct cpufreq_voltage_table s5pv310_volt_table[CPUFREQ_LEVEL_END] = {
+static struct cpufreq_voltage_table exynos4_volt_table[CPUFREQ_LEVEL_END] = {
{
.index = L0,
.arm_volt = 1200000,
@@ -180,7 +180,7 @@ static struct cpufreq_voltage_table s5pv310_volt_table[CPUFREQ_LEVEL_END] = {
},
};
-static unsigned int s5pv310_apll_pms_table[CPUFREQ_LEVEL_END] = {
+static unsigned int exynos4_apll_pms_table[CPUFREQ_LEVEL_END] = {
/* APLL FOUT L0: 1000MHz */
((250 << 16) | (6 << 8) | 1),
@@ -194,17 +194,17 @@ static unsigned int s5pv310_apll_pms_table[CPUFREQ_LEVEL_END] = {
((200 << 16) | (6 << 8) | 4),
};
-int s5pv310_verify_speed(struct cpufreq_policy *policy)
+int exynos4_verify_speed(struct cpufreq_policy *policy)
{
- return cpufreq_frequency_table_verify(policy, s5pv310_freq_table);
+ return cpufreq_frequency_table_verify(policy, exynos4_freq_table);
}
-unsigned int s5pv310_getspeed(unsigned int cpu)
+unsigned int exynos4_getspeed(unsigned int cpu)
{
return clk_get_rate(cpu_clk) / 1000;
}
-void s5pv310_set_clkdiv(unsigned int div_index)
+void exynos4_set_clkdiv(unsigned int div_index)
{
unsigned int tmp;
@@ -321,7 +321,7 @@ void s5pv310_set_clkdiv(unsigned int div_index)
} while (tmp & 0x11);
}
-static void s5pv310_set_apll(unsigned int index)
+static void exynos4_set_apll(unsigned int index)
{
unsigned int tmp;
@@ -340,7 +340,7 @@ static void s5pv310_set_apll(unsigned int index)
/* 3. Change PLL PMS values */
tmp = __raw_readl(S5P_APLL_CON0);
tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
- tmp |= s5pv310_apll_pms_table[index];
+ tmp |= exynos4_apll_pms_table[index];
__raw_writel(tmp, S5P_APLL_CON0);
/* 4. wait_lock_time */
@@ -357,77 +357,77 @@ static void s5pv310_set_apll(unsigned int index)
} while (tmp != (0x1 << S5P_CLKSRC_CPU_MUXCORE_SHIFT));
}
-static void s5pv310_set_frequency(unsigned int old_index, unsigned int new_index)
+static void exynos4_set_frequency(unsigned int old_index, unsigned int new_index)
{
unsigned int tmp;
if (old_index > new_index) {
/* The frequency changing to L0 needs to change apll */
- if (freqs.new == s5pv310_freq_table[L0].frequency) {
+ if (freqs.new == exynos4_freq_table[L0].frequency) {
/* 1. Change the system clock divider values */
- s5pv310_set_clkdiv(new_index);
+ exynos4_set_clkdiv(new_index);
/* 2. Change the apll m,p,s value */
- s5pv310_set_apll(new_index);
+ exynos4_set_apll(new_index);
} else {
/* 1. Change the system clock divider values */
- s5pv310_set_clkdiv(new_index);
+ exynos4_set_clkdiv(new_index);
/* 2. Change just s value in apll m,p,s value */
tmp = __raw_readl(S5P_APLL_CON0);
tmp &= ~(0x7 << 0);
- tmp |= (s5pv310_apll_pms_table[new_index] & 0x7);
+ tmp |= (exynos4_apll_pms_table[new_index] & 0x7);
__raw_writel(tmp, S5P_APLL_CON0);
}
}
else if (old_index < new_index) {
/* The frequency changing from L0 needs to change apll */
- if (freqs.old == s5pv310_freq_table[L0].frequency) {
+ if (freqs.old == exynos4_freq_table[L0].frequency) {
/* 1. Change the apll m,p,s value */
- s5pv310_set_apll(new_index);
+ exynos4_set_apll(new_index);
/* 2. Change the system clock divider values */
- s5pv310_set_clkdiv(new_index);
+ exynos4_set_clkdiv(new_index);
} else {
/* 1. Change just s value in apll m,p,s value */
tmp = __raw_readl(S5P_APLL_CON0);
tmp &= ~(0x7 << 0);
- tmp |= (s5pv310_apll_pms_table[new_index] & 0x7);
+ tmp |= (exynos4_apll_pms_table[new_index] & 0x7);
__raw_writel(tmp, S5P_APLL_CON0);
/* 2. Change the system clock divider values */
- s5pv310_set_clkdiv(new_index);
+ exynos4_set_clkdiv(new_index);
}
}
}
-static int s5pv310_target(struct cpufreq_policy *policy,
+static int exynos4_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
unsigned int index, old_index;
unsigned int arm_volt, int_volt;
- freqs.old = s5pv310_getspeed(policy->cpu);
+ freqs.old = exynos4_getspeed(policy->cpu);
- if (cpufreq_frequency_table_target(policy, s5pv310_freq_table,
+ if (cpufreq_frequency_table_target(policy, exynos4_freq_table,
freqs.old, relation, &old_index))
return -EINVAL;
- if (cpufreq_frequency_table_target(policy, s5pv310_freq_table,
+ if (cpufreq_frequency_table_target(policy, exynos4_freq_table,
target_freq, relation, &index))
return -EINVAL;
- freqs.new = s5pv310_freq_table[index].frequency;
+ freqs.new = exynos4_freq_table[index].frequency;
freqs.cpu = policy->cpu;
if (freqs.new == freqs.old)
return 0;
/* get the voltage value */
- arm_volt = s5pv310_volt_table[index].arm_volt;
- int_volt = s5pv310_volt_table[index].int_volt;
+ arm_volt = exynos4_volt_table[index].arm_volt;
+ int_volt = exynos4_volt_table[index].int_volt;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
@@ -441,7 +441,7 @@ static int s5pv310_target(struct cpufreq_policy *policy,
}
/* Clock Configuration Procedure */
- s5pv310_set_frequency(old_index, index);
+ exynos4_set_frequency(old_index, index);
/* control regulator */
if (freqs.new < freqs.old) {
@@ -458,52 +458,52 @@ static int s5pv310_target(struct cpufreq_policy *policy,
}
#ifdef CONFIG_PM
-static int s5pv310_cpufreq_suspend(struct cpufreq_policy *policy,
+static int exynos4_cpufreq_suspend(struct cpufreq_policy *policy,
pm_message_t pmsg)
{
return 0;
}
-static int s5pv310_cpufreq_resume(struct cpufreq_policy *policy)
+static int exynos4_cpufreq_resume(struct cpufreq_policy *policy)
{
return 0;
}
#endif
-static int s5pv310_cpufreq_cpu_init(struct cpufreq_policy *policy)
+static int exynos4_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
- policy->cur = policy->min = policy->max = s5pv310_getspeed(policy->cpu);
+ policy->cur = policy->min = policy->max = exynos4_getspeed(policy->cpu);
- cpufreq_frequency_table_get_attr(s5pv310_freq_table, policy->cpu);
+ cpufreq_frequency_table_get_attr(exynos4_freq_table, policy->cpu);
/* set the transition latency value */
policy->cpuinfo.transition_latency = 100000;
/*
- * S5PV310 multi-core processors has 2 cores
+ * EXYNOS4 multi-core processors has 2 cores
* that the frequency cannot be set independently.
* Each cpu is bound to the same speed.
* So the affected cpu is all of the cpus.
*/
cpumask_setall(policy->cpus);
- return cpufreq_frequency_table_cpuinfo(policy, s5pv310_freq_table);
+ return cpufreq_frequency_table_cpuinfo(policy, exynos4_freq_table);
}
-static struct cpufreq_driver s5pv310_driver = {
+static struct cpufreq_driver exynos4_driver = {
.flags = CPUFREQ_STICKY,
- .verify = s5pv310_verify_speed,
- .target = s5pv310_target,
- .get = s5pv310_getspeed,
- .init = s5pv310_cpufreq_cpu_init,
- .name = "s5pv310_cpufreq",
+ .verify = exynos4_verify_speed,
+ .target = exynos4_target,
+ .get = exynos4_getspeed,
+ .init = exynos4_cpufreq_cpu_init,
+ .name = "exynos4_cpufreq",
#ifdef CONFIG_PM
- .suspend = s5pv310_cpufreq_suspend,
- .resume = s5pv310_cpufreq_resume,
+ .suspend = exynos4_cpufreq_suspend,
+ .resume = exynos4_cpufreq_resume,
#endif
};
-static int __init s5pv310_cpufreq_init(void)
+static int __init exynos4_cpufreq_init(void)
{
cpu_clk = clk_get(NULL, "armclk");
if (IS_ERR(cpu_clk))
@@ -550,7 +550,7 @@ static int __init s5pv310_cpufreq_init(void)
printk(KERN_DEBUG "%s: memtype= 0x%x\n", __func__, memtype);
}
- return cpufreq_register_driver(&s5pv310_driver);
+ return cpufreq_register_driver(&exynos4_driver);
out:
if (!IS_ERR(cpu_clk))
@@ -577,4 +577,4 @@ out:
return -EINVAL;
}
-late_initcall(s5pv310_cpufreq_init);
+late_initcall(exynos4_cpufreq_init);
diff --git a/arch/arm/mach-s5pv310/dev-audio.c b/arch/arm/mach-exynos4/dev-audio.c
index a1964242f0fa..1eed5f9f7bd3 100644
--- a/arch/arm/mach-s5pv310/dev-audio.c
+++ b/arch/arm/mach-exynos4/dev-audio.c
@@ -1,4 +1,7 @@
-/* linux/arch/arm/mach-s5pv310/dev-audio.c
+/* linux/arch/arm/mach-exynos4/dev-audio.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
*
* Copyright (c) 2010 Samsung Electronics Co. Ltd
* Jaswinder Singh <jassi.brar@samsung.com>
@@ -24,18 +27,18 @@ static const char *rclksrc[] = {
[1] = "i2sclk",
};
-static int s5pv310_cfg_i2s(struct platform_device *pdev)
+static int exynos4_cfg_i2s(struct platform_device *pdev)
{
/* configure GPIO for i2s port */
switch (pdev->id) {
case 0:
- s3c_gpio_cfgpin_range(S5PV310_GPZ(0), 7, S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin_range(EXYNOS4_GPZ(0), 7, S3C_GPIO_SFN(2));
break;
case 1:
- s3c_gpio_cfgpin_range(S5PV310_GPC0(0), 5, S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin_range(EXYNOS4_GPC0(0), 5, S3C_GPIO_SFN(2));
break;
case 2:
- s3c_gpio_cfgpin_range(S5PV310_GPC1(0), 5, S3C_GPIO_SFN(4));
+ s3c_gpio_cfgpin_range(EXYNOS4_GPC1(0), 5, S3C_GPIO_SFN(4));
break;
default:
printk(KERN_ERR "Invalid Device %d\n", pdev->id);
@@ -46,7 +49,7 @@ static int s5pv310_cfg_i2s(struct platform_device *pdev)
}
static struct s3c_audio_pdata i2sv5_pdata = {
- .cfg_gpio = s5pv310_cfg_i2s,
+ .cfg_gpio = exynos4_cfg_i2s,
.type = {
.i2s = {
.quirks = QUIRK_PRI_6CHAN | QUIRK_SEC_DAI
@@ -56,10 +59,10 @@ static struct s3c_audio_pdata i2sv5_pdata = {
},
};
-static struct resource s5pv310_i2s0_resource[] = {
+static struct resource exynos4_i2s0_resource[] = {
[0] = {
- .start = S5PV310_PA_I2S0,
- .end = S5PV310_PA_I2S0 + 0x100 - 1,
+ .start = EXYNOS4_PA_I2S0,
+ .end = EXYNOS4_PA_I2S0 + 0x100 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -79,11 +82,11 @@ static struct resource s5pv310_i2s0_resource[] = {
},
};
-struct platform_device s5pv310_device_i2s0 = {
+struct platform_device exynos4_device_i2s0 = {
.name = "samsung-i2s",
.id = 0,
- .num_resources = ARRAY_SIZE(s5pv310_i2s0_resource),
- .resource = s5pv310_i2s0_resource,
+ .num_resources = ARRAY_SIZE(exynos4_i2s0_resource),
+ .resource = exynos4_i2s0_resource,
.dev = {
.platform_data = &i2sv5_pdata,
},
@@ -95,7 +98,7 @@ static const char *rclksrc_v3[] = {
};
static struct s3c_audio_pdata i2sv3_pdata = {
- .cfg_gpio = s5pv310_cfg_i2s,
+ .cfg_gpio = exynos4_cfg_i2s,
.type = {
.i2s = {
.quirks = QUIRK_NO_MUXPSR,
@@ -104,10 +107,10 @@ static struct s3c_audio_pdata i2sv3_pdata = {
},
};
-static struct resource s5pv310_i2s1_resource[] = {
+static struct resource exynos4_i2s1_resource[] = {
[0] = {
- .start = S5PV310_PA_I2S1,
- .end = S5PV310_PA_I2S1 + 0x100 - 1,
+ .start = EXYNOS4_PA_I2S1,
+ .end = EXYNOS4_PA_I2S1 + 0x100 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -122,20 +125,20 @@ static struct resource s5pv310_i2s1_resource[] = {
},
};
-struct platform_device s5pv310_device_i2s1 = {
+struct platform_device exynos4_device_i2s1 = {
.name = "samsung-i2s",
.id = 1,
- .num_resources = ARRAY_SIZE(s5pv310_i2s1_resource),
- .resource = s5pv310_i2s1_resource,
+ .num_resources = ARRAY_SIZE(exynos4_i2s1_resource),
+ .resource = exynos4_i2s1_resource,
.dev = {
.platform_data = &i2sv3_pdata,
},
};
-static struct resource s5pv310_i2s2_resource[] = {
+static struct resource exynos4_i2s2_resource[] = {
[0] = {
- .start = S5PV310_PA_I2S2,
- .end = S5PV310_PA_I2S2 + 0x100 - 1,
+ .start = EXYNOS4_PA_I2S2,
+ .end = EXYNOS4_PA_I2S2 + 0x100 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -150,11 +153,11 @@ static struct resource s5pv310_i2s2_resource[] = {
},
};
-struct platform_device s5pv310_device_i2s2 = {
+struct platform_device exynos4_device_i2s2 = {
.name = "samsung-i2s",
.id = 2,
- .num_resources = ARRAY_SIZE(s5pv310_i2s2_resource),
- .resource = s5pv310_i2s2_resource,
+ .num_resources = ARRAY_SIZE(exynos4_i2s2_resource),
+ .resource = exynos4_i2s2_resource,
.dev = {
.platform_data = &i2sv3_pdata,
},
@@ -162,17 +165,17 @@ struct platform_device s5pv310_device_i2s2 = {
/* PCM Controller platform_devices */
-static int s5pv310_pcm_cfg_gpio(struct platform_device *pdev)
+static int exynos4_pcm_cfg_gpio(struct platform_device *pdev)
{
switch (pdev->id) {
case 0:
- s3c_gpio_cfgpin_range(S5PV310_GPZ(0), 5, S3C_GPIO_SFN(3));
+ s3c_gpio_cfgpin_range(EXYNOS4_GPZ(0), 5, S3C_GPIO_SFN(3));
break;
case 1:
- s3c_gpio_cfgpin_range(S5PV310_GPC0(0), 5, S3C_GPIO_SFN(3));
+ s3c_gpio_cfgpin_range(EXYNOS4_GPC0(0), 5, S3C_GPIO_SFN(3));
break;
case 2:
- s3c_gpio_cfgpin_range(S5PV310_GPC1(0), 5, S3C_GPIO_SFN(3));
+ s3c_gpio_cfgpin_range(EXYNOS4_GPC1(0), 5, S3C_GPIO_SFN(3));
break;
default:
printk(KERN_DEBUG "Invalid PCM Controller number!");
@@ -183,13 +186,13 @@ static int s5pv310_pcm_cfg_gpio(struct platform_device *pdev)
}
static struct s3c_audio_pdata s3c_pcm_pdata = {
- .cfg_gpio = s5pv310_pcm_cfg_gpio,
+ .cfg_gpio = exynos4_pcm_cfg_gpio,
};
-static struct resource s5pv310_pcm0_resource[] = {
+static struct resource exynos4_pcm0_resource[] = {
[0] = {
- .start = S5PV310_PA_PCM0,
- .end = S5PV310_PA_PCM0 + 0x100 - 1,
+ .start = EXYNOS4_PA_PCM0,
+ .end = EXYNOS4_PA_PCM0 + 0x100 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -204,20 +207,20 @@ static struct resource s5pv310_pcm0_resource[] = {
},
};
-struct platform_device s5pv310_device_pcm0 = {
+struct platform_device exynos4_device_pcm0 = {
.name = "samsung-pcm",
.id = 0,
- .num_resources = ARRAY_SIZE(s5pv310_pcm0_resource),
- .resource = s5pv310_pcm0_resource,
+ .num_resources = ARRAY_SIZE(exynos4_pcm0_resource),
+ .resource = exynos4_pcm0_resource,
.dev = {
.platform_data = &s3c_pcm_pdata,
},
};
-static struct resource s5pv310_pcm1_resource[] = {
+static struct resource exynos4_pcm1_resource[] = {
[0] = {
- .start = S5PV310_PA_PCM1,
- .end = S5PV310_PA_PCM1 + 0x100 - 1,
+ .start = EXYNOS4_PA_PCM1,
+ .end = EXYNOS4_PA_PCM1 + 0x100 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -232,20 +235,20 @@ static struct resource s5pv310_pcm1_resource[] = {
},
};
-struct platform_device s5pv310_device_pcm1 = {
+struct platform_device exynos4_device_pcm1 = {
.name = "samsung-pcm",
.id = 1,
- .num_resources = ARRAY_SIZE(s5pv310_pcm1_resource),
- .resource = s5pv310_pcm1_resource,
+ .num_resources = ARRAY_SIZE(exynos4_pcm1_resource),
+ .resource = exynos4_pcm1_resource,
.dev = {
.platform_data = &s3c_pcm_pdata,
},
};
-static struct resource s5pv310_pcm2_resource[] = {
+static struct resource exynos4_pcm2_resource[] = {
[0] = {
- .start = S5PV310_PA_PCM2,
- .end = S5PV310_PA_PCM2 + 0x100 - 1,
+ .start = EXYNOS4_PA_PCM2,
+ .end = EXYNOS4_PA_PCM2 + 0x100 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -260,11 +263,11 @@ static struct resource s5pv310_pcm2_resource[] = {
},
};
-struct platform_device s5pv310_device_pcm2 = {
+struct platform_device exynos4_device_pcm2 = {
.name = "samsung-pcm",
.id = 2,
- .num_resources = ARRAY_SIZE(s5pv310_pcm2_resource),
- .resource = s5pv310_pcm2_resource,
+ .num_resources = ARRAY_SIZE(exynos4_pcm2_resource),
+ .resource = exynos4_pcm2_resource,
.dev = {
.platform_data = &s3c_pcm_pdata,
},
@@ -272,15 +275,15 @@ struct platform_device s5pv310_device_pcm2 = {
/* AC97 Controller platform devices */
-static int s5pv310_ac97_cfg_gpio(struct platform_device *pdev)
+static int exynos4_ac97_cfg_gpio(struct platform_device *pdev)
{
- return s3c_gpio_cfgpin_range(S5PV310_GPC0(0), 5, S3C_GPIO_SFN(4));
+ return s3c_gpio_cfgpin_range(EXYNOS4_GPC0(0), 5, S3C_GPIO_SFN(4));
}
-static struct resource s5pv310_ac97_resource[] = {
+static struct resource exynos4_ac97_resource[] = {
[0] = {
- .start = S5PV310_PA_AC97,
- .end = S5PV310_PA_AC97 + 0x100 - 1,
+ .start = EXYNOS4_PA_AC97,
+ .end = EXYNOS4_PA_AC97 + 0x100 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -306,36 +309,36 @@ static struct resource s5pv310_ac97_resource[] = {
};
static struct s3c_audio_pdata s3c_ac97_pdata = {
- .cfg_gpio = s5pv310_ac97_cfg_gpio,
+ .cfg_gpio = exynos4_ac97_cfg_gpio,
};
-static u64 s5pv310_ac97_dmamask = DMA_BIT_MASK(32);
+static u64 exynos4_ac97_dmamask = DMA_BIT_MASK(32);
-struct platform_device s5pv310_device_ac97 = {
+struct platform_device exynos4_device_ac97 = {
.name = "samsung-ac97",
.id = -1,
- .num_resources = ARRAY_SIZE(s5pv310_ac97_resource),
- .resource = s5pv310_ac97_resource,
+ .num_resources = ARRAY_SIZE(exynos4_ac97_resource),
+ .resource = exynos4_ac97_resource,
.dev = {
.platform_data = &s3c_ac97_pdata,
- .dma_mask = &s5pv310_ac97_dmamask,
+ .dma_mask = &exynos4_ac97_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
/* S/PDIF Controller platform_device */
-static int s5pv310_spdif_cfg_gpio(struct platform_device *pdev)
+static int exynos4_spdif_cfg_gpio(struct platform_device *pdev)
{
- s3c_gpio_cfgpin_range(S5PV310_GPC1(0), 2, S3C_GPIO_SFN(3));
+ s3c_gpio_cfgpin_range(EXYNOS4_GPC1(0), 2, S3C_GPIO_SFN(3));
return 0;
}
-static struct resource s5pv310_spdif_resource[] = {
+static struct resource exynos4_spdif_resource[] = {
[0] = {
- .start = S5PV310_PA_SPDIF,
- .end = S5PV310_PA_SPDIF + 0x100 - 1,
+ .start = EXYNOS4_PA_SPDIF,
+ .end = EXYNOS4_PA_SPDIF + 0x100 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -346,19 +349,19 @@ static struct resource s5pv310_spdif_resource[] = {
};
static struct s3c_audio_pdata samsung_spdif_pdata = {
- .cfg_gpio = s5pv310_spdif_cfg_gpio,
+ .cfg_gpio = exynos4_spdif_cfg_gpio,
};
-static u64 s5pv310_spdif_dmamask = DMA_BIT_MASK(32);
+static u64 exynos4_spdif_dmamask = DMA_BIT_MASK(32);
-struct platform_device s5pv310_device_spdif = {
+struct platform_device exynos4_device_spdif = {
.name = "samsung-spdif",
.id = -1,
- .num_resources = ARRAY_SIZE(s5pv310_spdif_resource),
- .resource = s5pv310_spdif_resource,
+ .num_resources = ARRAY_SIZE(exynos4_spdif_resource),
+ .resource = exynos4_spdif_resource,
.dev = {
.platform_data = &samsung_spdif_pdata,
- .dma_mask = &s5pv310_spdif_dmamask,
+ .dma_mask = &exynos4_spdif_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
diff --git a/arch/arm/mach-s5pv310/dev-pd.c b/arch/arm/mach-exynos4/dev-pd.c
index 58a50c2d0b67..3273f25d6a75 100644
--- a/arch/arm/mach-s5pv310/dev-pd.c
+++ b/arch/arm/mach-exynos4/dev-pd.c
@@ -1,9 +1,9 @@
-/* linux/arch/arm/mach-s5pv310/dev-pd.c
+/* linux/arch/arm/mach-exynos4/dev-pd.c
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * S5PV310 - Power Domain support
+ * EXYNOS4 - Power Domain support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -19,7 +19,7 @@
#include <plat/pd.h>
-static int s5pv310_pd_enable(struct device *dev)
+static int exynos4_pd_enable(struct device *dev)
{
struct samsung_pd_info *pdata = dev->platform_data;
u32 timeout;
@@ -42,7 +42,7 @@ static int s5pv310_pd_enable(struct device *dev)
return 0;
}
-static int s5pv310_pd_disable(struct device *dev)
+static int exynos4_pd_disable(struct device *dev)
{
struct samsung_pd_info *pdata = dev->platform_data;
u32 timeout;
@@ -64,14 +64,14 @@ static int s5pv310_pd_disable(struct device *dev)
return 0;
}
-struct platform_device s5pv310_device_pd[] = {
+struct platform_device exynos4_device_pd[] = {
{
.name = "samsung-pd",
.id = 0,
.dev = {
.platform_data = &(struct samsung_pd_info) {
- .enable = s5pv310_pd_enable,
- .disable = s5pv310_pd_disable,
+ .enable = exynos4_pd_enable,
+ .disable = exynos4_pd_disable,
.base = S5P_PMU_MFC_CONF,
},
},
@@ -80,8 +80,8 @@ struct platform_device s5pv310_device_pd[] = {
.id = 1,
.dev = {
.platform_data = &(struct samsung_pd_info) {
- .enable = s5pv310_pd_enable,
- .disable = s5pv310_pd_disable,
+ .enable = exynos4_pd_enable,
+ .disable = exynos4_pd_disable,
.base = S5P_PMU_G3D_CONF,
},
},
@@ -90,8 +90,8 @@ struct platform_device s5pv310_device_pd[] = {
.id = 2,
.dev = {
.platform_data = &(struct samsung_pd_info) {
- .enable = s5pv310_pd_enable,
- .disable = s5pv310_pd_disable,
+ .enable = exynos4_pd_enable,
+ .disable = exynos4_pd_disable,
.base = S5P_PMU_LCD0_CONF,
},
},
@@ -100,8 +100,8 @@ struct platform_device s5pv310_device_pd[] = {
.id = 3,
.dev = {
.platform_data = &(struct samsung_pd_info) {
- .enable = s5pv310_pd_enable,
- .disable = s5pv310_pd_disable,
+ .enable = exynos4_pd_enable,
+ .disable = exynos4_pd_disable,
.base = S5P_PMU_LCD1_CONF,
},
},
@@ -110,8 +110,8 @@ struct platform_device s5pv310_device_pd[] = {
.id = 4,
.dev = {
.platform_data = &(struct samsung_pd_info) {
- .enable = s5pv310_pd_enable,
- .disable = s5pv310_pd_disable,
+ .enable = exynos4_pd_enable,
+ .disable = exynos4_pd_disable,
.base = S5P_PMU_TV_CONF,
},
},
@@ -120,8 +120,8 @@ struct platform_device s5pv310_device_pd[] = {
.id = 5,
.dev = {
.platform_data = &(struct samsung_pd_info) {
- .enable = s5pv310_pd_enable,
- .disable = s5pv310_pd_disable,
+ .enable = exynos4_pd_enable,
+ .disable = exynos4_pd_disable,
.base = S5P_PMU_CAM_CONF,
},
},
@@ -130,8 +130,8 @@ struct platform_device s5pv310_device_pd[] = {
.id = 6,
.dev = {
.platform_data = &(struct samsung_pd_info) {
- .enable = s5pv310_pd_enable,
- .disable = s5pv310_pd_disable,
+ .enable = exynos4_pd_enable,
+ .disable = exynos4_pd_disable,
.base = S5P_PMU_GPS_CONF,
},
},
diff --git a/arch/arm/mach-s5pv310/dev-sysmmu.c b/arch/arm/mach-exynos4/dev-sysmmu.c
index e1bb200ac0f0..a10790a614ec 100644
--- a/arch/arm/mach-s5pv310/dev-sysmmu.c
+++ b/arch/arm/mach-exynos4/dev-sysmmu.c
@@ -1,8 +1,10 @@
-/* linux/arch/arm/mach-s5pv310/dev-sysmmu.c
+/* linux/arch/arm/mach-exynos4/dev-sysmmu.c
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
+ * EXYNOS4 - System MMU support
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -14,10 +16,10 @@
#include <mach/map.h>
#include <mach/irqs.h>
-static struct resource s5pv310_sysmmu_resource[] = {
+static struct resource exynos4_sysmmu_resource[] = {
[0] = {
- .start = S5PV310_PA_SYSMMU_MDMA,
- .end = S5PV310_PA_SYSMMU_MDMA + SZ_64K - 1,
+ .start = EXYNOS4_PA_SYSMMU_MDMA,
+ .end = EXYNOS4_PA_SYSMMU_MDMA + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -26,8 +28,8 @@ static struct resource s5pv310_sysmmu_resource[] = {
.flags = IORESOURCE_IRQ,
},
[2] = {
- .start = S5PV310_PA_SYSMMU_SSS,
- .end = S5PV310_PA_SYSMMU_SSS + SZ_64K - 1,
+ .start = EXYNOS4_PA_SYSMMU_SSS,
+ .end = EXYNOS4_PA_SYSMMU_SSS + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[3] = {
@@ -36,8 +38,8 @@ static struct resource s5pv310_sysmmu_resource[] = {
.flags = IORESOURCE_IRQ,
},
[4] = {
- .start = S5PV310_PA_SYSMMU_FIMC0,
- .end = S5PV310_PA_SYSMMU_FIMC0 + SZ_64K - 1,
+ .start = EXYNOS4_PA_SYSMMU_FIMC0,
+ .end = EXYNOS4_PA_SYSMMU_FIMC0 + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[5] = {
@@ -46,8 +48,8 @@ static struct resource s5pv310_sysmmu_resource[] = {
.flags = IORESOURCE_IRQ,
},
[6] = {
- .start = S5PV310_PA_SYSMMU_FIMC1,
- .end = S5PV310_PA_SYSMMU_FIMC1 + SZ_64K - 1,
+ .start = EXYNOS4_PA_SYSMMU_FIMC1,
+ .end = EXYNOS4_PA_SYSMMU_FIMC1 + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[7] = {
@@ -56,8 +58,8 @@ static struct resource s5pv310_sysmmu_resource[] = {
.flags = IORESOURCE_IRQ,
},
[8] = {
- .start = S5PV310_PA_SYSMMU_FIMC2,
- .end = S5PV310_PA_SYSMMU_FIMC2 + SZ_64K - 1,
+ .start = EXYNOS4_PA_SYSMMU_FIMC2,
+ .end = EXYNOS4_PA_SYSMMU_FIMC2 + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[9] = {
@@ -66,8 +68,8 @@ static struct resource s5pv310_sysmmu_resource[] = {
.flags = IORESOURCE_IRQ,
},
[10] = {
- .start = S5PV310_PA_SYSMMU_FIMC3,
- .end = S5PV310_PA_SYSMMU_FIMC3 + SZ_64K - 1,
+ .start = EXYNOS4_PA_SYSMMU_FIMC3,
+ .end = EXYNOS4_PA_SYSMMU_FIMC3 + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[11] = {
@@ -76,8 +78,8 @@ static struct resource s5pv310_sysmmu_resource[] = {
.flags = IORESOURCE_IRQ,
},
[12] = {
- .start = S5PV310_PA_SYSMMU_JPEG,
- .end = S5PV310_PA_SYSMMU_JPEG + SZ_64K - 1,
+ .start = EXYNOS4_PA_SYSMMU_JPEG,
+ .end = EXYNOS4_PA_SYSMMU_JPEG + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[13] = {
@@ -86,8 +88,8 @@ static struct resource s5pv310_sysmmu_resource[] = {
.flags = IORESOURCE_IRQ,
},
[14] = {
- .start = S5PV310_PA_SYSMMU_FIMD0,
- .end = S5PV310_PA_SYSMMU_FIMD0 + SZ_64K - 1,
+ .start = EXYNOS4_PA_SYSMMU_FIMD0,
+ .end = EXYNOS4_PA_SYSMMU_FIMD0 + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[15] = {
@@ -96,8 +98,8 @@ static struct resource s5pv310_sysmmu_resource[] = {
.flags = IORESOURCE_IRQ,
},
[16] = {
- .start = S5PV310_PA_SYSMMU_FIMD1,
- .end = S5PV310_PA_SYSMMU_FIMD1 + SZ_64K - 1,
+ .start = EXYNOS4_PA_SYSMMU_FIMD1,
+ .end = EXYNOS4_PA_SYSMMU_FIMD1 + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[17] = {
@@ -106,8 +108,8 @@ static struct resource s5pv310_sysmmu_resource[] = {
.flags = IORESOURCE_IRQ,
},
[18] = {
- .start = S5PV310_PA_SYSMMU_PCIe,
- .end = S5PV310_PA_SYSMMU_PCIe + SZ_64K - 1,
+ .start = EXYNOS4_PA_SYSMMU_PCIe,
+ .end = EXYNOS4_PA_SYSMMU_PCIe + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[19] = {
@@ -116,8 +118,8 @@ static struct resource s5pv310_sysmmu_resource[] = {
.flags = IORESOURCE_IRQ,
},
[20] = {
- .start = S5PV310_PA_SYSMMU_G2D,
- .end = S5PV310_PA_SYSMMU_G2D + SZ_64K - 1,
+ .start = EXYNOS4_PA_SYSMMU_G2D,
+ .end = EXYNOS4_PA_SYSMMU_G2D + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[21] = {
@@ -126,8 +128,8 @@ static struct resource s5pv310_sysmmu_resource[] = {
.flags = IORESOURCE_IRQ,
},
[22] = {
- .start = S5PV310_PA_SYSMMU_ROTATOR,
- .end = S5PV310_PA_SYSMMU_ROTATOR + SZ_64K - 1,
+ .start = EXYNOS4_PA_SYSMMU_ROTATOR,
+ .end = EXYNOS4_PA_SYSMMU_ROTATOR + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[23] = {
@@ -136,8 +138,8 @@ static struct resource s5pv310_sysmmu_resource[] = {
.flags = IORESOURCE_IRQ,
},
[24] = {
- .start = S5PV310_PA_SYSMMU_MDMA2,
- .end = S5PV310_PA_SYSMMU_MDMA2 + SZ_64K - 1,
+ .start = EXYNOS4_PA_SYSMMU_MDMA2,
+ .end = EXYNOS4_PA_SYSMMU_MDMA2 + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[25] = {
@@ -146,8 +148,8 @@ static struct resource s5pv310_sysmmu_resource[] = {
.flags = IORESOURCE_IRQ,
},
[26] = {
- .start = S5PV310_PA_SYSMMU_TV,
- .end = S5PV310_PA_SYSMMU_TV + SZ_64K - 1,
+ .start = EXYNOS4_PA_SYSMMU_TV,
+ .end = EXYNOS4_PA_SYSMMU_TV + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[27] = {
@@ -156,8 +158,8 @@ static struct resource s5pv310_sysmmu_resource[] = {
.flags = IORESOURCE_IRQ,
},
[28] = {
- .start = S5PV310_PA_SYSMMU_MFC_L,
- .end = S5PV310_PA_SYSMMU_MFC_L + SZ_64K - 1,
+ .start = EXYNOS4_PA_SYSMMU_MFC_L,
+ .end = EXYNOS4_PA_SYSMMU_MFC_L + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[29] = {
@@ -166,8 +168,8 @@ static struct resource s5pv310_sysmmu_resource[] = {
.flags = IORESOURCE_IRQ,
},
[30] = {
- .start = S5PV310_PA_SYSMMU_MFC_R,
- .end = S5PV310_PA_SYSMMU_MFC_R + SZ_64K - 1,
+ .start = EXYNOS4_PA_SYSMMU_MFC_R,
+ .end = EXYNOS4_PA_SYSMMU_MFC_R + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[31] = {
@@ -177,11 +179,11 @@ static struct resource s5pv310_sysmmu_resource[] = {
},
};
-struct platform_device s5pv310_device_sysmmu = {
+struct platform_device exynos4_device_sysmmu = {
.name = "s5p-sysmmu",
.id = 32,
- .num_resources = ARRAY_SIZE(s5pv310_sysmmu_resource),
- .resource = s5pv310_sysmmu_resource,
+ .num_resources = ARRAY_SIZE(exynos4_sysmmu_resource),
+ .resource = exynos4_sysmmu_resource,
};
-EXPORT_SYMBOL(s5pv310_device_sysmmu);
+EXPORT_SYMBOL(exynos4_device_sysmmu);
diff --git a/arch/arm/mach-s5pv310/dma.c b/arch/arm/mach-exynos4/dma.c
index 20066c7c9e56..564bb530f332 100644
--- a/arch/arm/mach-s5pv310/dma.c
+++ b/arch/arm/mach-exynos4/dma.c
@@ -1,4 +1,8 @@
-/*
+/* linux/arch/arm/mach-exynos4/dma.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
* Copyright (C) 2010 Samsung Electronics Co. Ltd.
* Jaswinder Singh <jassi.brar@samsung.com>
*
@@ -30,10 +34,10 @@
static u64 dma_dmamask = DMA_BIT_MASK(32);
-static struct resource s5pv310_pdma0_resource[] = {
+static struct resource exynos4_pdma0_resource[] = {
[0] = {
- .start = S5PV310_PA_PDMA0,
- .end = S5PV310_PA_PDMA0 + SZ_4K,
+ .start = EXYNOS4_PA_PDMA0,
+ .end = EXYNOS4_PA_PDMA0 + SZ_4K,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -43,7 +47,7 @@ static struct resource s5pv310_pdma0_resource[] = {
},
};
-static struct s3c_pl330_platdata s5pv310_pdma0_pdata = {
+static struct s3c_pl330_platdata exynos4_pdma0_pdata = {
.peri = {
[0] = DMACH_PCM0_RX,
[1] = DMACH_PCM0_TX,
@@ -80,22 +84,22 @@ static struct s3c_pl330_platdata s5pv310_pdma0_pdata = {
},
};
-static struct platform_device s5pv310_device_pdma0 = {
+static struct platform_device exynos4_device_pdma0 = {
.name = "s3c-pl330",
.id = 0,
- .num_resources = ARRAY_SIZE(s5pv310_pdma0_resource),
- .resource = s5pv310_pdma0_resource,
+ .num_resources = ARRAY_SIZE(exynos4_pdma0_resource),
+ .resource = exynos4_pdma0_resource,
.dev = {
.dma_mask = &dma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &s5pv310_pdma0_pdata,
+ .platform_data = &exynos4_pdma0_pdata,
},
};
-static struct resource s5pv310_pdma1_resource[] = {
+static struct resource exynos4_pdma1_resource[] = {
[0] = {
- .start = S5PV310_PA_PDMA1,
- .end = S5PV310_PA_PDMA1 + SZ_4K,
+ .start = EXYNOS4_PA_PDMA1,
+ .end = EXYNOS4_PA_PDMA1 + SZ_4K,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -105,7 +109,7 @@ static struct resource s5pv310_pdma1_resource[] = {
},
};
-static struct s3c_pl330_platdata s5pv310_pdma1_pdata = {
+static struct s3c_pl330_platdata exynos4_pdma1_pdata = {
.peri = {
[0] = DMACH_PCM0_RX,
[1] = DMACH_PCM0_TX,
@@ -142,27 +146,27 @@ static struct s3c_pl330_platdata s5pv310_pdma1_pdata = {
},
};
-static struct platform_device s5pv310_device_pdma1 = {
+static struct platform_device exynos4_device_pdma1 = {
.name = "s3c-pl330",
.id = 1,
- .num_resources = ARRAY_SIZE(s5pv310_pdma1_resource),
- .resource = s5pv310_pdma1_resource,
+ .num_resources = ARRAY_SIZE(exynos4_pdma1_resource),
+ .resource = exynos4_pdma1_resource,
.dev = {
.dma_mask = &dma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &s5pv310_pdma1_pdata,
+ .platform_data = &exynos4_pdma1_pdata,
},
};
-static struct platform_device *s5pv310_dmacs[] __initdata = {
- &s5pv310_device_pdma0,
- &s5pv310_device_pdma1,
+static struct platform_device *exynos4_dmacs[] __initdata = {
+ &exynos4_device_pdma0,
+ &exynos4_device_pdma1,
};
-static int __init s5pv310_dma_init(void)
+static int __init exynos4_dma_init(void)
{
- platform_add_devices(s5pv310_dmacs, ARRAY_SIZE(s5pv310_dmacs));
+ platform_add_devices(exynos4_dmacs, ARRAY_SIZE(exynos4_dmacs));
return 0;
}
-arch_initcall(s5pv310_dma_init);
+arch_initcall(exynos4_dma_init);
diff --git a/arch/arm/mach-s5pv310/gpiolib.c b/arch/arm/mach-exynos4/gpiolib.c
index 55217b8923ec..c46fdc57d94c 100644
--- a/arch/arm/mach-s5pv310/gpiolib.c
+++ b/arch/arm/mach-exynos4/gpiolib.c
@@ -1,9 +1,9 @@
-/* linux/arch/arm/mach-s5pv310/gpiolib.c
+/* linux/arch/arm/mach-exynos4/gpiolib.c
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * S5PV310 - GPIOlib support
+ * EXYNOS4 - GPIOlib support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -43,159 +43,159 @@ static struct s3c_gpio_cfg gpio_cfg_noint = {
* Note: The initialization of 'base' member of s3c_gpio_chip structure
* uses the above macro and depends on the banks being listed in order here.
*/
-static struct s3c_gpio_chip s5pv310_gpio_part1_4bit[] = {
+static struct s3c_gpio_chip exynos4_gpio_part1_4bit[] = {
{
.chip = {
- .base = S5PV310_GPA0(0),
- .ngpio = S5PV310_GPIO_A0_NR,
+ .base = EXYNOS4_GPA0(0),
+ .ngpio = EXYNOS4_GPIO_A0_NR,
.label = "GPA0",
},
}, {
.chip = {
- .base = S5PV310_GPA1(0),
- .ngpio = S5PV310_GPIO_A1_NR,
+ .base = EXYNOS4_GPA1(0),
+ .ngpio = EXYNOS4_GPIO_A1_NR,
.label = "GPA1",
},
}, {
.chip = {
- .base = S5PV310_GPB(0),
- .ngpio = S5PV310_GPIO_B_NR,
+ .base = EXYNOS4_GPB(0),
+ .ngpio = EXYNOS4_GPIO_B_NR,
.label = "GPB",
},
}, {
.chip = {
- .base = S5PV310_GPC0(0),
- .ngpio = S5PV310_GPIO_C0_NR,
+ .base = EXYNOS4_GPC0(0),
+ .ngpio = EXYNOS4_GPIO_C0_NR,
.label = "GPC0",
},
}, {
.chip = {
- .base = S5PV310_GPC1(0),
- .ngpio = S5PV310_GPIO_C1_NR,
+ .base = EXYNOS4_GPC1(0),
+ .ngpio = EXYNOS4_GPIO_C1_NR,
.label = "GPC1",
},
}, {
.chip = {
- .base = S5PV310_GPD0(0),
- .ngpio = S5PV310_GPIO_D0_NR,
+ .base = EXYNOS4_GPD0(0),
+ .ngpio = EXYNOS4_GPIO_D0_NR,
.label = "GPD0",
},
}, {
.chip = {
- .base = S5PV310_GPD1(0),
- .ngpio = S5PV310_GPIO_D1_NR,
+ .base = EXYNOS4_GPD1(0),
+ .ngpio = EXYNOS4_GPIO_D1_NR,
.label = "GPD1",
},
}, {
.chip = {
- .base = S5PV310_GPE0(0),
- .ngpio = S5PV310_GPIO_E0_NR,
+ .base = EXYNOS4_GPE0(0),
+ .ngpio = EXYNOS4_GPIO_E0_NR,
.label = "GPE0",
},
}, {
.chip = {
- .base = S5PV310_GPE1(0),
- .ngpio = S5PV310_GPIO_E1_NR,
+ .base = EXYNOS4_GPE1(0),
+ .ngpio = EXYNOS4_GPIO_E1_NR,
.label = "GPE1",
},
}, {
.chip = {
- .base = S5PV310_GPE2(0),
- .ngpio = S5PV310_GPIO_E2_NR,
+ .base = EXYNOS4_GPE2(0),
+ .ngpio = EXYNOS4_GPIO_E2_NR,
.label = "GPE2",
},
}, {
.chip = {
- .base = S5PV310_GPE3(0),
- .ngpio = S5PV310_GPIO_E3_NR,
+ .base = EXYNOS4_GPE3(0),
+ .ngpio = EXYNOS4_GPIO_E3_NR,
.label = "GPE3",
},
}, {
.chip = {
- .base = S5PV310_GPE4(0),
- .ngpio = S5PV310_GPIO_E4_NR,
+ .base = EXYNOS4_GPE4(0),
+ .ngpio = EXYNOS4_GPIO_E4_NR,
.label = "GPE4",
},
}, {
.chip = {
- .base = S5PV310_GPF0(0),
- .ngpio = S5PV310_GPIO_F0_NR,
+ .base = EXYNOS4_GPF0(0),
+ .ngpio = EXYNOS4_GPIO_F0_NR,
.label = "GPF0",
},
}, {
.chip = {
- .base = S5PV310_GPF1(0),
- .ngpio = S5PV310_GPIO_F1_NR,
+ .base = EXYNOS4_GPF1(0),
+ .ngpio = EXYNOS4_GPIO_F1_NR,
.label = "GPF1",
},
}, {
.chip = {
- .base = S5PV310_GPF2(0),
- .ngpio = S5PV310_GPIO_F2_NR,
+ .base = EXYNOS4_GPF2(0),
+ .ngpio = EXYNOS4_GPIO_F2_NR,
.label = "GPF2",
},
}, {
.chip = {
- .base = S5PV310_GPF3(0),
- .ngpio = S5PV310_GPIO_F3_NR,
+ .base = EXYNOS4_GPF3(0),
+ .ngpio = EXYNOS4_GPIO_F3_NR,
.label = "GPF3",
},
},
};
-static struct s3c_gpio_chip s5pv310_gpio_part2_4bit[] = {
+static struct s3c_gpio_chip exynos4_gpio_part2_4bit[] = {
{
.chip = {
- .base = S5PV310_GPJ0(0),
- .ngpio = S5PV310_GPIO_J0_NR,
+ .base = EXYNOS4_GPJ0(0),
+ .ngpio = EXYNOS4_GPIO_J0_NR,
.label = "GPJ0",
},
}, {
.chip = {
- .base = S5PV310_GPJ1(0),
- .ngpio = S5PV310_GPIO_J1_NR,
+ .base = EXYNOS4_GPJ1(0),
+ .ngpio = EXYNOS4_GPIO_J1_NR,
.label = "GPJ1",
},
}, {
.chip = {
- .base = S5PV310_GPK0(0),
- .ngpio = S5PV310_GPIO_K0_NR,
+ .base = EXYNOS4_GPK0(0),
+ .ngpio = EXYNOS4_GPIO_K0_NR,
.label = "GPK0",
},
}, {
.chip = {
- .base = S5PV310_GPK1(0),
- .ngpio = S5PV310_GPIO_K1_NR,
+ .base = EXYNOS4_GPK1(0),
+ .ngpio = EXYNOS4_GPIO_K1_NR,
.label = "GPK1",
},
}, {
.chip = {
- .base = S5PV310_GPK2(0),
- .ngpio = S5PV310_GPIO_K2_NR,
+ .base = EXYNOS4_GPK2(0),
+ .ngpio = EXYNOS4_GPIO_K2_NR,
.label = "GPK2",
},
}, {
.chip = {
- .base = S5PV310_GPK3(0),
- .ngpio = S5PV310_GPIO_K3_NR,
+ .base = EXYNOS4_GPK3(0),
+ .ngpio = EXYNOS4_GPIO_K3_NR,
.label = "GPK3",
},
}, {
.chip = {
- .base = S5PV310_GPL0(0),
- .ngpio = S5PV310_GPIO_L0_NR,
+ .base = EXYNOS4_GPL0(0),
+ .ngpio = EXYNOS4_GPIO_L0_NR,
.label = "GPL0",
},
}, {
.chip = {
- .base = S5PV310_GPL1(0),
- .ngpio = S5PV310_GPIO_L1_NR,
+ .base = EXYNOS4_GPL1(0),
+ .ngpio = EXYNOS4_GPIO_L1_NR,
.label = "GPL1",
},
}, {
.chip = {
- .base = S5PV310_GPL2(0),
- .ngpio = S5PV310_GPIO_L2_NR,
+ .base = EXYNOS4_GPL2(0),
+ .ngpio = EXYNOS4_GPIO_L2_NR,
.label = "GPL2",
},
}, {
@@ -203,8 +203,8 @@ static struct s3c_gpio_chip s5pv310_gpio_part2_4bit[] = {
.config = &gpio_cfg_noint,
.irq_base = IRQ_EINT(0),
.chip = {
- .base = S5PV310_GPX0(0),
- .ngpio = S5PV310_GPIO_X0_NR,
+ .base = EXYNOS4_GPX0(0),
+ .ngpio = EXYNOS4_GPIO_X0_NR,
.label = "GPX0",
.to_irq = samsung_gpiolib_to_irq,
},
@@ -213,8 +213,8 @@ static struct s3c_gpio_chip s5pv310_gpio_part2_4bit[] = {
.config = &gpio_cfg_noint,
.irq_base = IRQ_EINT(8),
.chip = {
- .base = S5PV310_GPX1(0),
- .ngpio = S5PV310_GPIO_X1_NR,
+ .base = EXYNOS4_GPX1(0),
+ .ngpio = EXYNOS4_GPIO_X1_NR,
.label = "GPX1",
.to_irq = samsung_gpiolib_to_irq,
},
@@ -223,8 +223,8 @@ static struct s3c_gpio_chip s5pv310_gpio_part2_4bit[] = {
.config = &gpio_cfg_noint,
.irq_base = IRQ_EINT(16),
.chip = {
- .base = S5PV310_GPX2(0),
- .ngpio = S5PV310_GPIO_X2_NR,
+ .base = EXYNOS4_GPX2(0),
+ .ngpio = EXYNOS4_GPIO_X2_NR,
.label = "GPX2",
.to_irq = samsung_gpiolib_to_irq,
},
@@ -233,25 +233,25 @@ static struct s3c_gpio_chip s5pv310_gpio_part2_4bit[] = {
.config = &gpio_cfg_noint,
.irq_base = IRQ_EINT(24),
.chip = {
- .base = S5PV310_GPX3(0),
- .ngpio = S5PV310_GPIO_X3_NR,
+ .base = EXYNOS4_GPX3(0),
+ .ngpio = EXYNOS4_GPIO_X3_NR,
.label = "GPX3",
.to_irq = samsung_gpiolib_to_irq,
},
},
};
-static struct s3c_gpio_chip s5pv310_gpio_part3_4bit[] = {
+static struct s3c_gpio_chip exynos4_gpio_part3_4bit[] = {
{
.chip = {
- .base = S5PV310_GPZ(0),
- .ngpio = S5PV310_GPIO_Z_NR,
+ .base = EXYNOS4_GPZ(0),
+ .ngpio = EXYNOS4_GPIO_Z_NR,
.label = "GPZ",
},
},
};
-static __init int s5pv310_gpiolib_init(void)
+static __init int exynos4_gpiolib_init(void)
{
struct s3c_gpio_chip *chip;
int i;
@@ -259,8 +259,8 @@ static __init int s5pv310_gpiolib_init(void)
/* GPIO part 1 */
- chip = s5pv310_gpio_part1_4bit;
- nr_chips = ARRAY_SIZE(s5pv310_gpio_part1_4bit);
+ chip = exynos4_gpio_part1_4bit;
+ nr_chips = ARRAY_SIZE(exynos4_gpio_part1_4bit);
for (i = 0; i < nr_chips; i++, chip++) {
if (chip->config == NULL)
@@ -269,12 +269,12 @@ static __init int s5pv310_gpiolib_init(void)
chip->base = S5P_VA_GPIO1 + (i) * 0x20;
}
- samsung_gpiolib_add_4bit_chips(s5pv310_gpio_part1_4bit, nr_chips);
+ samsung_gpiolib_add_4bit_chips(exynos4_gpio_part1_4bit, nr_chips);
/* GPIO part 2 */
- chip = s5pv310_gpio_part2_4bit;
- nr_chips = ARRAY_SIZE(s5pv310_gpio_part2_4bit);
+ chip = exynos4_gpio_part2_4bit;
+ nr_chips = ARRAY_SIZE(exynos4_gpio_part2_4bit);
for (i = 0; i < nr_chips; i++, chip++) {
if (chip->config == NULL)
@@ -283,12 +283,12 @@ static __init int s5pv310_gpiolib_init(void)
chip->base = S5P_VA_GPIO2 + (i) * 0x20;
}
- samsung_gpiolib_add_4bit_chips(s5pv310_gpio_part2_4bit, nr_chips);
+ samsung_gpiolib_add_4bit_chips(exynos4_gpio_part2_4bit, nr_chips);
/* GPIO part 3 */
- chip = s5pv310_gpio_part3_4bit;
- nr_chips = ARRAY_SIZE(s5pv310_gpio_part3_4bit);
+ chip = exynos4_gpio_part3_4bit;
+ nr_chips = ARRAY_SIZE(exynos4_gpio_part3_4bit);
for (i = 0; i < nr_chips; i++, chip++) {
if (chip->config == NULL)
@@ -297,8 +297,8 @@ static __init int s5pv310_gpiolib_init(void)
chip->base = S5P_VA_GPIO3 + (i) * 0x20;
}
- samsung_gpiolib_add_4bit_chips(s5pv310_gpio_part3_4bit, nr_chips);
+ samsung_gpiolib_add_4bit_chips(exynos4_gpio_part3_4bit, nr_chips);
return 0;
}
-core_initcall(s5pv310_gpiolib_init);
+core_initcall(exynos4_gpiolib_init);
diff --git a/arch/arm/mach-s5pv310/headsmp.S b/arch/arm/mach-exynos4/headsmp.S
index 164b7b045713..6c6cfc50c46b 100644
--- a/arch/arm/mach-s5pv310/headsmp.S
+++ b/arch/arm/mach-exynos4/headsmp.S
@@ -1,5 +1,5 @@
/*
- * linux/arch/arm/mach-s5pv310/headsmp.S
+ * linux/arch/arm/mach-exynos4/headsmp.S
*
* Cloned from linux/arch/arm/mach-realview/headsmp.S
*
@@ -16,11 +16,11 @@
__INIT
/*
- * s5pv310 specific entry point for secondary CPUs. This provides
+ * exynos4 specific entry point for secondary CPUs. This provides
* a "holding pen" into which all secondary cores are held until we're
* ready for them to initialise.
*/
-ENTRY(s5pv310_secondary_startup)
+ENTRY(exynos4_secondary_startup)
mrc p15, 0, r0, c0, c0, 5
and r0, r0, #15
adr r4, 1f
diff --git a/arch/arm/mach-s5pv310/hotplug.c b/arch/arm/mach-exynos4/hotplug.c
index c24235c89eed..2b5909e2ccd3 100644
--- a/arch/arm/mach-s5pv310/hotplug.c
+++ b/arch/arm/mach-exynos4/hotplug.c
@@ -1,4 +1,4 @@
-/* linux arch/arm/mach-s5pv310/hotplug.c
+/* linux arch/arm/mach-exynos4/hotplug.c
*
* Cloned from linux/arch/arm/mach-realview/hotplug.c
*
@@ -30,13 +30,13 @@ static inline void cpu_enter_lowpower(void)
* Turn off coherency
*/
" mrc p15, 0, %0, c1, c0, 1\n"
- " bic %0, %0, #0x20\n"
+ " bic %0, %0, %3\n"
" mcr p15, 0, %0, c1, c0, 1\n"
" mrc p15, 0, %0, c1, c0, 0\n"
" bic %0, %0, %2\n"
" mcr p15, 0, %0, c1, c0, 0\n"
: "=&r" (v)
- : "r" (0), "Ir" (CR_C)
+ : "r" (0), "Ir" (CR_C), "Ir" (0x40)
: "cc");
}
@@ -49,10 +49,10 @@ static inline void cpu_leave_lowpower(void)
" orr %0, %0, %1\n"
" mcr p15, 0, %0, c1, c0, 0\n"
" mrc p15, 0, %0, c1, c0, 1\n"
- " orr %0, %0, #0x20\n"
+ " orr %0, %0, %2\n"
" mcr p15, 0, %0, c1, c0, 1\n"
: "=&r" (v)
- : "Ir" (CR_C)
+ : "Ir" (CR_C), "Ir" (0x40)
: "cc");
}
diff --git a/arch/arm/mach-s5pv310/include/mach/debug-macro.S b/arch/arm/mach-exynos4/include/mach/debug-macro.S
index b0d920c474d3..58bbd049a6c4 100644
--- a/arch/arm/mach-s5pv310/include/mach/debug-macro.S
+++ b/arch/arm/mach-exynos4/include/mach/debug-macro.S
@@ -1,7 +1,7 @@
-/* linux/arch/arm/mach-s5pv310/include/mach/debug-macro.S
+/* linux/arch/arm/mach-exynos4/include/mach/debug-macro.S
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
*
* Based on arch/arm/mach-s3c6400/include/mach/debug-macro.S
*
diff --git a/arch/arm/mach-s5pv310/include/mach/dma.h b/arch/arm/mach-exynos4/include/mach/dma.h
index 81209eb1409b..81209eb1409b 100644
--- a/arch/arm/mach-s5pv310/include/mach/dma.h
+++ b/arch/arm/mach-exynos4/include/mach/dma.h
diff --git a/arch/arm/mach-s5pv310/include/mach/entry-macro.S b/arch/arm/mach-exynos4/include/mach/entry-macro.S
index e600e1d522df..d8f38c2e5654 100644
--- a/arch/arm/mach-s5pv310/include/mach/entry-macro.S
+++ b/arch/arm/mach-exynos4/include/mach/entry-macro.S
@@ -1,8 +1,8 @@
-/* arch/arm/mach-s5pv310/include/mach/entry-macro.S
+/* arch/arm/mach-exynos4/include/mach/entry-macro.S
*
* Cloned from arch/arm/mach-realview/include/mach/entry-macro.S
*
- * Low-level IRQ helper macros for S5PV310 platforms
+ * Low-level IRQ helper macros for EXYNOS4 platforms
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-exynos4/include/mach/gpio.h b/arch/arm/mach-exynos4/include/mach/gpio.h
new file mode 100644
index 000000000000..16082998bcd8
--- /dev/null
+++ b/arch/arm/mach-exynos4/include/mach/gpio.h
@@ -0,0 +1,135 @@
+/* linux/arch/arm/mach-exynos4/include/mach/gpio.h
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * EXYNOS4 - GPIO lib support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __ASM_ARCH_GPIO_H
+#define __ASM_ARCH_GPIO_H __FILE__
+
+#define gpio_get_value __gpio_get_value
+#define gpio_set_value __gpio_set_value
+#define gpio_cansleep __gpio_cansleep
+#define gpio_to_irq __gpio_to_irq
+
+/* Practically, GPIO banks upto GPZ are the configurable gpio banks */
+
+/* GPIO bank sizes */
+#define EXYNOS4_GPIO_A0_NR (8)
+#define EXYNOS4_GPIO_A1_NR (6)
+#define EXYNOS4_GPIO_B_NR (8)
+#define EXYNOS4_GPIO_C0_NR (5)
+#define EXYNOS4_GPIO_C1_NR (5)
+#define EXYNOS4_GPIO_D0_NR (4)
+#define EXYNOS4_GPIO_D1_NR (4)
+#define EXYNOS4_GPIO_E0_NR (5)
+#define EXYNOS4_GPIO_E1_NR (8)
+#define EXYNOS4_GPIO_E2_NR (6)
+#define EXYNOS4_GPIO_E3_NR (8)
+#define EXYNOS4_GPIO_E4_NR (8)
+#define EXYNOS4_GPIO_F0_NR (8)
+#define EXYNOS4_GPIO_F1_NR (8)
+#define EXYNOS4_GPIO_F2_NR (8)
+#define EXYNOS4_GPIO_F3_NR (6)
+#define EXYNOS4_GPIO_J0_NR (8)
+#define EXYNOS4_GPIO_J1_NR (5)
+#define EXYNOS4_GPIO_K0_NR (7)
+#define EXYNOS4_GPIO_K1_NR (7)
+#define EXYNOS4_GPIO_K2_NR (7)
+#define EXYNOS4_GPIO_K3_NR (7)
+#define EXYNOS4_GPIO_L0_NR (8)
+#define EXYNOS4_GPIO_L1_NR (3)
+#define EXYNOS4_GPIO_L2_NR (8)
+#define EXYNOS4_GPIO_X0_NR (8)
+#define EXYNOS4_GPIO_X1_NR (8)
+#define EXYNOS4_GPIO_X2_NR (8)
+#define EXYNOS4_GPIO_X3_NR (8)
+#define EXYNOS4_GPIO_Z_NR (7)
+
+/* GPIO bank numbers */
+
+#define EXYNOS4_GPIO_NEXT(__gpio) \
+ ((__gpio##_START) + (__gpio##_NR) + CONFIG_S3C_GPIO_SPACE + 1)
+
+enum s5p_gpio_number {
+ EXYNOS4_GPIO_A0_START = 0,
+ EXYNOS4_GPIO_A1_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_A0),
+ EXYNOS4_GPIO_B_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_A1),
+ EXYNOS4_GPIO_C0_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_B),
+ EXYNOS4_GPIO_C1_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_C0),
+ EXYNOS4_GPIO_D0_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_C1),
+ EXYNOS4_GPIO_D1_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_D0),
+ EXYNOS4_GPIO_E0_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_D1),
+ EXYNOS4_GPIO_E1_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_E0),
+ EXYNOS4_GPIO_E2_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_E1),
+ EXYNOS4_GPIO_E3_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_E2),
+ EXYNOS4_GPIO_E4_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_E3),
+ EXYNOS4_GPIO_F0_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_E4),
+ EXYNOS4_GPIO_F1_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_F0),
+ EXYNOS4_GPIO_F2_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_F1),
+ EXYNOS4_GPIO_F3_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_F2),
+ EXYNOS4_GPIO_J0_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_F3),
+ EXYNOS4_GPIO_J1_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_J0),
+ EXYNOS4_GPIO_K0_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_J1),
+ EXYNOS4_GPIO_K1_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_K0),
+ EXYNOS4_GPIO_K2_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_K1),
+ EXYNOS4_GPIO_K3_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_K2),
+ EXYNOS4_GPIO_L0_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_K3),
+ EXYNOS4_GPIO_L1_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_L0),
+ EXYNOS4_GPIO_L2_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_L1),
+ EXYNOS4_GPIO_X0_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_L2),
+ EXYNOS4_GPIO_X1_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_X0),
+ EXYNOS4_GPIO_X2_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_X1),
+ EXYNOS4_GPIO_X3_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_X2),
+ EXYNOS4_GPIO_Z_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_X3),
+};
+
+/* EXYNOS4 GPIO number definitions */
+#define EXYNOS4_GPA0(_nr) (EXYNOS4_GPIO_A0_START + (_nr))
+#define EXYNOS4_GPA1(_nr) (EXYNOS4_GPIO_A1_START + (_nr))
+#define EXYNOS4_GPB(_nr) (EXYNOS4_GPIO_B_START + (_nr))
+#define EXYNOS4_GPC0(_nr) (EXYNOS4_GPIO_C0_START + (_nr))
+#define EXYNOS4_GPC1(_nr) (EXYNOS4_GPIO_C1_START + (_nr))
+#define EXYNOS4_GPD0(_nr) (EXYNOS4_GPIO_D0_START + (_nr))
+#define EXYNOS4_GPD1(_nr) (EXYNOS4_GPIO_D1_START + (_nr))
+#define EXYNOS4_GPE0(_nr) (EXYNOS4_GPIO_E0_START + (_nr))
+#define EXYNOS4_GPE1(_nr) (EXYNOS4_GPIO_E1_START + (_nr))
+#define EXYNOS4_GPE2(_nr) (EXYNOS4_GPIO_E2_START + (_nr))
+#define EXYNOS4_GPE3(_nr) (EXYNOS4_GPIO_E3_START + (_nr))
+#define EXYNOS4_GPE4(_nr) (EXYNOS4_GPIO_E4_START + (_nr))
+#define EXYNOS4_GPF0(_nr) (EXYNOS4_GPIO_F0_START + (_nr))
+#define EXYNOS4_GPF1(_nr) (EXYNOS4_GPIO_F1_START + (_nr))
+#define EXYNOS4_GPF2(_nr) (EXYNOS4_GPIO_F2_START + (_nr))
+#define EXYNOS4_GPF3(_nr) (EXYNOS4_GPIO_F3_START + (_nr))
+#define EXYNOS4_GPJ0(_nr) (EXYNOS4_GPIO_J0_START + (_nr))
+#define EXYNOS4_GPJ1(_nr) (EXYNOS4_GPIO_J1_START + (_nr))
+#define EXYNOS4_GPK0(_nr) (EXYNOS4_GPIO_K0_START + (_nr))
+#define EXYNOS4_GPK1(_nr) (EXYNOS4_GPIO_K1_START + (_nr))
+#define EXYNOS4_GPK2(_nr) (EXYNOS4_GPIO_K2_START + (_nr))
+#define EXYNOS4_GPK3(_nr) (EXYNOS4_GPIO_K3_START + (_nr))
+#define EXYNOS4_GPL0(_nr) (EXYNOS4_GPIO_L0_START + (_nr))
+#define EXYNOS4_GPL1(_nr) (EXYNOS4_GPIO_L1_START + (_nr))
+#define EXYNOS4_GPL2(_nr) (EXYNOS4_GPIO_L2_START + (_nr))
+#define EXYNOS4_GPX0(_nr) (EXYNOS4_GPIO_X0_START + (_nr))
+#define EXYNOS4_GPX1(_nr) (EXYNOS4_GPIO_X1_START + (_nr))
+#define EXYNOS4_GPX2(_nr) (EXYNOS4_GPIO_X2_START + (_nr))
+#define EXYNOS4_GPX3(_nr) (EXYNOS4_GPIO_X3_START + (_nr))
+#define EXYNOS4_GPZ(_nr) (EXYNOS4_GPIO_Z_START + (_nr))
+
+/* the end of the EXYNOS4 specific gpios */
+#define EXYNOS4_GPIO_END (EXYNOS4_GPZ(EXYNOS4_GPIO_Z_NR) + 1)
+#define S3C_GPIO_END EXYNOS4_GPIO_END
+
+/* define the number of gpios we need to the one after the GPZ() range */
+#define ARCH_NR_GPIOS (EXYNOS4_GPZ(EXYNOS4_GPIO_Z_NR) + \
+ CONFIG_SAMSUNG_GPIO_EXTRA + 1)
+
+#include <asm-generic/gpio.h>
+
+#endif /* __ASM_ARCH_GPIO_H */
diff --git a/arch/arm/mach-s5pv310/include/mach/hardware.h b/arch/arm/mach-exynos4/include/mach/hardware.h
index 28ff9881f1a6..5109eb232f23 100644
--- a/arch/arm/mach-s5pv310/include/mach/hardware.h
+++ b/arch/arm/mach-exynos4/include/mach/hardware.h
@@ -1,9 +1,9 @@
-/* linux/arch/arm/mach-s5pv310/include/mach/hardware.h
+/* linux/arch/arm/mach-exynos4/include/mach/hardware.h
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
*
- * S5PV310 - Hardware support
+ * EXYNOS4 - Hardware support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/arch/arm/mach-s5pv310/include/mach/io.h b/arch/arm/mach-exynos4/include/mach/io.h
index 8a7f9128391f..d5478d247535 100644
--- a/arch/arm/mach-s5pv310/include/mach/io.h
+++ b/arch/arm/mach-exynos4/include/mach/io.h
@@ -1,13 +1,13 @@
-/* linux/arch/arm/mach-s5pv310/include/mach/io.h
+/* linux/arch/arm/mach-exynos4/include/mach/io.h
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
*
* Copyright 2008-2010 Ben Dooks <ben-linux@fluff.org>
*
* Based on arch/arm/mach-s5p6442/include/mach/io.h
*
- * Default IO routines for S5PV310
+ * Default IO routines for EXYNOS4
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/arch/arm/mach-s5pv310/include/mach/irqs.h b/arch/arm/mach-exynos4/include/mach/irqs.h
index 536b0b59fc83..2dc590085a9b 100644
--- a/arch/arm/mach-s5pv310/include/mach/irqs.h
+++ b/arch/arm/mach-exynos4/include/mach/irqs.h
@@ -1,9 +1,9 @@
-/* linux/arch/arm/mach-s5pv310/include/mach/irqs.h
+/* linux/arch/arm/mach-exynos4/include/mach/irqs.h
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
*
- * S5PV310 - IRQ definitions
+ * EXYNOS4 - IRQ definitions
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/arch/arm/mach-exynos4/include/mach/map.h b/arch/arm/mach-exynos4/include/mach/map.h
new file mode 100644
index 000000000000..80a41e03cc17
--- /dev/null
+++ b/arch/arm/mach-exynos4/include/mach/map.h
@@ -0,0 +1,144 @@
+/* linux/arch/arm/mach-exynos4/include/mach/map.h
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * EXYNOS4 - Memory map definitions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __ASM_ARCH_MAP_H
+#define __ASM_ARCH_MAP_H __FILE__
+
+#include <plat/map-base.h>
+
+/*
+ * EXYNOS4 UART offset is 0x10000 but the older S5P SoCs are 0x400.
+ * So need to define it, and here is to avoid redefinition warning.
+ */
+#define S3C_UART_OFFSET (0x10000)
+
+#include <plat/map-s5p.h>
+
+#define EXYNOS4_PA_SYSRAM 0x02020000
+
+#define EXYNOS4_PA_I2S0 0x03830000
+#define EXYNOS4_PA_I2S1 0xE3100000
+#define EXYNOS4_PA_I2S2 0xE2A00000
+
+#define EXYNOS4_PA_PCM0 0x03840000
+#define EXYNOS4_PA_PCM1 0x13980000
+#define EXYNOS4_PA_PCM2 0x13990000
+
+#define EXYNOS4_PA_SROM_BANK(x) (0x04000000 + ((x) * 0x01000000))
+
+#define EXYNOS4_PA_ONENAND 0x0C000000
+#define EXYNOS4_PA_ONENAND_DMA 0x0C600000
+
+#define EXYNOS4_PA_CHIPID 0x10000000
+
+#define EXYNOS4_PA_SYSCON 0x10010000
+#define EXYNOS4_PA_PMU 0x10020000
+#define EXYNOS4_PA_CMU 0x10030000
+
+#define EXYNOS4_PA_WATCHDOG 0x10060000
+#define EXYNOS4_PA_RTC 0x10070000
+
+#define EXYNOS4_PA_DMC0 0x10400000
+
+#define EXYNOS4_PA_COMBINER 0x10448000
+
+#define EXYNOS4_PA_COREPERI 0x10500000
+#define EXYNOS4_PA_GIC_CPU 0x10500100
+#define EXYNOS4_PA_TWD 0x10500600
+#define EXYNOS4_PA_GIC_DIST 0x10501000
+#define EXYNOS4_PA_L2CC 0x10502000
+
+#define EXYNOS4_PA_MDMA 0x10810000
+#define EXYNOS4_PA_PDMA0 0x12680000
+#define EXYNOS4_PA_PDMA1 0x12690000
+
+#define EXYNOS4_PA_SYSMMU_MDMA 0x10A40000
+#define EXYNOS4_PA_SYSMMU_SSS 0x10A50000
+#define EXYNOS4_PA_SYSMMU_FIMC0 0x11A20000
+#define EXYNOS4_PA_SYSMMU_FIMC1 0x11A30000
+#define EXYNOS4_PA_SYSMMU_FIMC2 0x11A40000
+#define EXYNOS4_PA_SYSMMU_FIMC3 0x11A50000
+#define EXYNOS4_PA_SYSMMU_JPEG 0x11A60000
+#define EXYNOS4_PA_SYSMMU_FIMD0 0x11E20000
+#define EXYNOS4_PA_SYSMMU_FIMD1 0x12220000
+#define EXYNOS4_PA_SYSMMU_PCIe 0x12620000
+#define EXYNOS4_PA_SYSMMU_G2D 0x12A20000
+#define EXYNOS4_PA_SYSMMU_ROTATOR 0x12A30000
+#define EXYNOS4_PA_SYSMMU_MDMA2 0x12A40000
+#define EXYNOS4_PA_SYSMMU_TV 0x12E20000
+#define EXYNOS4_PA_SYSMMU_MFC_L 0x13620000
+#define EXYNOS4_PA_SYSMMU_MFC_R 0x13630000
+
+#define EXYNOS4_PA_GPIO1 0x11400000
+#define EXYNOS4_PA_GPIO2 0x11000000
+#define EXYNOS4_PA_GPIO3 0x03860000
+
+#define EXYNOS4_PA_MIPI_CSIS0 0x11880000
+#define EXYNOS4_PA_MIPI_CSIS1 0x11890000
+
+#define EXYNOS4_PA_HSMMC(x) (0x12510000 + ((x) * 0x10000))
+
+#define EXYNOS4_PA_SROMC 0x12570000
+
+#define EXYNOS4_PA_UART 0x13800000
+
+#define EXYNOS4_PA_IIC(x) (0x13860000 + ((x) * 0x10000))
+
+#define EXYNOS4_PA_AC97 0x139A0000
+
+#define EXYNOS4_PA_TIMER 0x139D0000
+
+#define EXYNOS4_PA_SDRAM 0x40000000
+
+#define EXYNOS4_PA_SPDIF 0xE1100000
+
+/* Compatibiltiy Defines */
+
+#define S3C_PA_HSMMC0 EXYNOS4_PA_HSMMC(0)
+#define S3C_PA_HSMMC1 EXYNOS4_PA_HSMMC(1)
+#define S3C_PA_HSMMC2 EXYNOS4_PA_HSMMC(2)
+#define S3C_PA_HSMMC3 EXYNOS4_PA_HSMMC(3)
+#define S3C_PA_IIC EXYNOS4_PA_IIC(0)
+#define S3C_PA_IIC1 EXYNOS4_PA_IIC(1)
+#define S3C_PA_IIC2 EXYNOS4_PA_IIC(2)
+#define S3C_PA_IIC3 EXYNOS4_PA_IIC(3)
+#define S3C_PA_IIC4 EXYNOS4_PA_IIC(4)
+#define S3C_PA_IIC5 EXYNOS4_PA_IIC(5)
+#define S3C_PA_IIC6 EXYNOS4_PA_IIC(6)
+#define S3C_PA_IIC7 EXYNOS4_PA_IIC(7)
+#define S3C_PA_RTC EXYNOS4_PA_RTC
+#define S3C_PA_WDT EXYNOS4_PA_WATCHDOG
+
+#define S5P_PA_CHIPID EXYNOS4_PA_CHIPID
+#define S5P_PA_MIPI_CSIS0 EXYNOS4_PA_MIPI_CSIS0
+#define S5P_PA_MIPI_CSIS1 EXYNOS4_PA_MIPI_CSIS1
+#define S5P_PA_ONENAND EXYNOS4_PA_ONENAND
+#define S5P_PA_ONENAND_DMA EXYNOS4_PA_ONENAND_DMA
+#define S5P_PA_SDRAM EXYNOS4_PA_SDRAM
+#define S5P_PA_SROMC EXYNOS4_PA_SROMC
+#define S5P_PA_SYSCON EXYNOS4_PA_SYSCON
+#define S5P_PA_TIMER EXYNOS4_PA_TIMER
+
+/* UART */
+
+#define S3C_PA_UART EXYNOS4_PA_UART
+
+#define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET))
+#define S5P_PA_UART0 S5P_PA_UART(0)
+#define S5P_PA_UART1 S5P_PA_UART(1)
+#define S5P_PA_UART2 S5P_PA_UART(2)
+#define S5P_PA_UART3 S5P_PA_UART(3)
+#define S5P_PA_UART4 S5P_PA_UART(4)
+
+#define S5P_SZ_UART SZ_256
+
+#endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/mach-s5pv310/include/mach/memory.h b/arch/arm/mach-exynos4/include/mach/memory.h
index 1dffb4823245..374ef2cf7152 100644
--- a/arch/arm/mach-s5pv310/include/mach/memory.h
+++ b/arch/arm/mach-exynos4/include/mach/memory.h
@@ -1,9 +1,9 @@
-/* linux/arch/arm/mach-s5pv310/include/mach/memory.h
+/* linux/arch/arm/mach-exynos4/include/mach/memory.h
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
*
- * S5PV310 - Memory definitions
+ * EXYNOS4 - Memory definitions
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -13,7 +13,7 @@
#ifndef __ASM_ARCH_MEMORY_H
#define __ASM_ARCH_MEMORY_H __FILE__
-#define PHYS_OFFSET UL(0x40000000)
+#define PLAT_PHYS_OFFSET UL(0x40000000)
/* Maximum of 256MiB in one bank */
#define MAX_PHYSMEM_BITS 32
diff --git a/arch/arm/mach-s5pv310/include/mach/pwm-clock.h b/arch/arm/mach-exynos4/include/mach/pwm-clock.h
index 7e6da2701088..8e12090287bb 100644
--- a/arch/arm/mach-s5pv310/include/mach/pwm-clock.h
+++ b/arch/arm/mach-exynos4/include/mach/pwm-clock.h
@@ -1,7 +1,7 @@
-/* linux/arch/arm/mach-s5pv310/include/mach/pwm-clock.h
+/* linux/arch/arm/mach-exynos4/include/mach/pwm-clock.h
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
*
* Copyright 2008 Openmoko, Inc.
* Copyright 2008 Simtec Electronics
@@ -10,7 +10,7 @@
*
* Based on arch/arm/mach-s3c64xx/include/mach/pwm-clock.h
*
- * S5PV310 - pwm clock and timer support
+ * EXYNOS4 - pwm clock and timer support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/arch/arm/mach-s5pv310/include/mach/regs-clock.h b/arch/arm/mach-exynos4/include/mach/regs-clock.h
index b5c4ada1cff5..ba8f91c04e19 100644
--- a/arch/arm/mach-s5pv310/include/mach/regs-clock.h
+++ b/arch/arm/mach-exynos4/include/mach/regs-clock.h
@@ -1,9 +1,9 @@
-/* linux/arch/arm/mach-s5pv310/include/mach/regs-clock.h
+/* linux/arch/arm/mach-exynos4/include/mach/regs-clock.h
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
*
- * S5PV310 - Clock register definitions
+ * EXYNOS4 - Clock register definitions
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/arch/arm/mach-exynos4/include/mach/regs-gpio.h b/arch/arm/mach-exynos4/include/mach/regs-gpio.h
new file mode 100644
index 000000000000..1401b21663a5
--- /dev/null
+++ b/arch/arm/mach-exynos4/include/mach/regs-gpio.h
@@ -0,0 +1,42 @@
+/* linux/arch/arm/mach-exynos4/include/mach/regs-gpio.h
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * EXYNOS4 - GPIO (including EINT) register definitions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __ASM_ARCH_REGS_GPIO_H
+#define __ASM_ARCH_REGS_GPIO_H __FILE__
+
+#include <mach/map.h>
+#include <mach/irqs.h>
+
+#define EXYNOS4_EINT40CON (S5P_VA_GPIO2 + 0xE00)
+#define S5P_EINT_CON(x) (EXYNOS4_EINT40CON + ((x) * 0x4))
+
+#define EXYNOS4_EINT40FLTCON0 (S5P_VA_GPIO2 + 0xE80)
+#define S5P_EINT_FLTCON(x) (EXYNOS4_EINT40FLTCON0 + ((x) * 0x4))
+
+#define EXYNOS4_EINT40MASK (S5P_VA_GPIO2 + 0xF00)
+#define S5P_EINT_MASK(x) (EXYNOS4_EINT40MASK + ((x) * 0x4))
+
+#define EXYNOS4_EINT40PEND (S5P_VA_GPIO2 + 0xF40)
+#define S5P_EINT_PEND(x) (EXYNOS4_EINT40PEND + ((x) * 0x4))
+
+#define EINT_REG_NR(x) (EINT_OFFSET(x) >> 3)
+
+#define eint_irq_to_bit(irq) (1 << (EINT_OFFSET(irq) & 0x7))
+
+#define EINT_MODE S3C_GPIO_SFN(0xf)
+
+#define EINT_GPIO_0(x) EXYNOS4_GPX0(x)
+#define EINT_GPIO_1(x) EXYNOS4_GPX1(x)
+#define EINT_GPIO_2(x) EXYNOS4_GPX2(x)
+#define EINT_GPIO_3(x) EXYNOS4_GPX3(x)
+
+#endif /* __ASM_ARCH_REGS_GPIO_H */
diff --git a/arch/arm/mach-s5pv310/include/mach/regs-irq.h b/arch/arm/mach-exynos4/include/mach/regs-irq.h
index c6e09c7f9161..9c7b4bfd546f 100644
--- a/arch/arm/mach-s5pv310/include/mach/regs-irq.h
+++ b/arch/arm/mach-exynos4/include/mach/regs-irq.h
@@ -1,9 +1,9 @@
-/* linux/arch/arm/mach-s5pv310/include/mach/regs-irq.h
+/* linux/arch/arm/mach-exynos4/include/mach/regs-irq.h
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
*
- * S5PV310 - IRQ register definitions
+ * EXYNOS4 - IRQ register definitions
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/arch/arm/mach-s5pv310/include/mach/regs-mem.h b/arch/arm/mach-exynos4/include/mach/regs-mem.h
index 834227140eaa..0368b5a27252 100644
--- a/arch/arm/mach-s5pv310/include/mach/regs-mem.h
+++ b/arch/arm/mach-exynos4/include/mach/regs-mem.h
@@ -1,9 +1,9 @@
-/* linux/arch/arm/mach-s5pv310/include/mach/regs-mem.h
+/* linux/arch/arm/mach-exynos4/include/mach/regs-mem.h
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * S5PV310 - SROMC and DMC register definitions
+ * EXYNOS4 - SROMC and DMC register definitions
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/arch/arm/mach-s5pv310/include/mach/regs-pmu.h b/arch/arm/mach-exynos4/include/mach/regs-pmu.h
index fb333d0f6073..2ddd6175dfa0 100644
--- a/arch/arm/mach-s5pv310/include/mach/regs-pmu.h
+++ b/arch/arm/mach-exynos4/include/mach/regs-pmu.h
@@ -1,9 +1,9 @@
-/* linux/arch/arm/mach-s5pv310/include/mach/regs-pmu.h
+/* linux/arch/arm/mach-exynos4/include/mach/regs-pmu.h
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * S5PV310 - Power management unit definition
+ * EXYNOS4 - Power management unit definition
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -18,7 +18,7 @@
#define S5P_PMUREG(x) (S5P_VA_PMU + (x))
#define S5P_PMU_CAM_CONF S5P_PMUREG(0x3C00)
-#define S5P_PMU_TV_CONF S5P_PMUREG(0x3C20)
+#define S5P_PMU_TV_CONF S5P_PMUREG(0x3C20)
#define S5P_PMU_MFC_CONF S5P_PMUREG(0x3C40)
#define S5P_PMU_G3D_CONF S5P_PMUREG(0x3C60)
#define S5P_PMU_LCD0_CONF S5P_PMUREG(0x3C80)
diff --git a/arch/arm/mach-s5pv310/include/mach/regs-sysmmu.h b/arch/arm/mach-exynos4/include/mach/regs-sysmmu.h
index 0b28e81a16f7..b6aef863b9d6 100644
--- a/arch/arm/mach-s5pv310/include/mach/regs-sysmmu.h
+++ b/arch/arm/mach-exynos4/include/mach/regs-sysmmu.h
@@ -1,9 +1,9 @@
-/* linux/arch/arm/mach-s5pv310/include/mach/regs-sysmmu.h
+/* linux/arch/arm/mach-exynos4/include/mach/regs-sysmmu.h
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * S5PV310 - System MMU register
+ * EXYNOS4 - System MMU register
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/arch/arm/mach-s5pv310/include/mach/smp.h b/arch/arm/mach-exynos4/include/mach/smp.h
index 393ccbd52c4a..a463dcebcfd3 100644
--- a/arch/arm/mach-s5pv310/include/mach/smp.h
+++ b/arch/arm/mach-exynos4/include/mach/smp.h
@@ -1,4 +1,4 @@
-/* linux/arch/arm/mach-s5pv310/include/mach/smp.h
+/* linux/arch/arm/mach-exynos4/include/mach/smp.h
*
* Cloned from arch/arm/mach-realview/include/mach/smp.h
*/
diff --git a/arch/arm/mach-s5pv310/include/mach/sysmmu.h b/arch/arm/mach-exynos4/include/mach/sysmmu.h
index 598fc5c9211b..1428adad8379 100644
--- a/arch/arm/mach-s5pv310/include/mach/sysmmu.h
+++ b/arch/arm/mach-exynos4/include/mach/sysmmu.h
@@ -1,9 +1,9 @@
-/* linux/arch/arm/mach-s5pv310/include/mach/sysmmu.h
+/* linux/arch/arm/mach-exynos4/include/mach/sysmmu.h
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
*
- * Samsung sysmmu driver for S5PV310
+ * Samsung sysmmu driver for EXYNOS4
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -13,10 +13,10 @@
#ifndef __ASM_ARM_ARCH_SYSMMU_H
#define __ASM_ARM_ARCH_SYSMMU_H __FILE__
-#define S5PV310_SYSMMU_TOTAL_IPNUM 16
-#define S5P_SYSMMU_TOTAL_IPNUM S5PV310_SYSMMU_TOTAL_IPNUM
+#define EXYNOS4_SYSMMU_TOTAL_IPNUM 16
+#define S5P_SYSMMU_TOTAL_IPNUM EXYNOS4_SYSMMU_TOTAL_IPNUM
-enum s5pv310_sysmmu_ips {
+enum exynos4_sysmmu_ips {
SYSMMU_MDMA,
SYSMMU_SSS,
SYSMMU_FIMC0,
@@ -35,7 +35,7 @@ enum s5pv310_sysmmu_ips {
SYSMMU_MFC_R,
};
-static char *sysmmu_ips_name[S5PV310_SYSMMU_TOTAL_IPNUM] = {
+static char *sysmmu_ips_name[EXYNOS4_SYSMMU_TOTAL_IPNUM] = {
"SYSMMU_MDMA" ,
"SYSMMU_SSS" ,
"SYSMMU_FIMC0" ,
@@ -54,7 +54,7 @@ static char *sysmmu_ips_name[S5PV310_SYSMMU_TOTAL_IPNUM] = {
"SYSMMU_MFC_R" ,
};
-typedef enum s5pv310_sysmmu_ips sysmmu_ips;
+typedef enum exynos4_sysmmu_ips sysmmu_ips;
struct sysmmu_tt_info {
unsigned long *pgd;
diff --git a/arch/arm/mach-s5pv310/include/mach/system.h b/arch/arm/mach-exynos4/include/mach/system.h
index d10c009cf0f1..5e3220c18fc7 100644
--- a/arch/arm/mach-s5pv310/include/mach/system.h
+++ b/arch/arm/mach-exynos4/include/mach/system.h
@@ -1,9 +1,9 @@
-/* linux/arch/arm/mach-s5pv310/include/mach/system.h
+/* linux/arch/arm/mach-exynos4/include/mach/system.h
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
*
- * S5PV310 - system support header
+ * EXYNOS4 - system support header
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/arch/arm/mach-s5pv310/include/mach/timex.h b/arch/arm/mach-exynos4/include/mach/timex.h
index bd2359b952b4..6d138750a708 100644
--- a/arch/arm/mach-s5pv310/include/mach/timex.h
+++ b/arch/arm/mach-exynos4/include/mach/timex.h
@@ -1,14 +1,14 @@
-/* linux/arch/arm/mach-s5pv310/include/mach/timex.h
+/* linux/arch/arm/mach-exynos4/include/mach/timex.h
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
*
* Copyright (c) 2003-2010 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* Based on arch/arm/mach-s5p6442/include/mach/timex.h
*
- * S5PV310 - time parameters
+ * EXYNOS4 - time parameters
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/arch/arm/mach-s5pv310/include/mach/uncompress.h b/arch/arm/mach-exynos4/include/mach/uncompress.h
index 59593c1e2416..21d97bcd9acb 100644
--- a/arch/arm/mach-s5pv310/include/mach/uncompress.h
+++ b/arch/arm/mach-exynos4/include/mach/uncompress.h
@@ -1,9 +1,9 @@
-/* linux/arch/arm/mach-s5pv310/include/mach/uncompress.h
+/* linux/arch/arm/mach-exynos4/include/mach/uncompress.h
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
*
- * S5PV310 - uncompress code
+ * EXYNOS4 - uncompress code
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/arch/arm/mach-s5pv310/include/mach/vmalloc.h b/arch/arm/mach-exynos4/include/mach/vmalloc.h
index 65759fb97581..284330e571d2 100644
--- a/arch/arm/mach-s5pv310/include/mach/vmalloc.h
+++ b/arch/arm/mach-exynos4/include/mach/vmalloc.h
@@ -1,7 +1,7 @@
-/* linux/arch/arm/mach-s5pv310/include/mach/vmalloc.h
+/* linux/arch/arm/mach-exynos4/include/mach/vmalloc.h
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
*
* Copyright 2010 Ben Dooks <ben-linux@fluff.org>
*
@@ -11,7 +11,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
- * S5PV310 vmalloc definition
+ * EXYNOS4 vmalloc definition
*/
#ifndef __ASM_ARCH_VMALLOC_H
diff --git a/arch/arm/mach-s5pv310/init.c b/arch/arm/mach-exynos4/init.c
index 182dcf42cfb4..cf91f50e43ab 100644
--- a/arch/arm/mach-s5pv310/init.c
+++ b/arch/arm/mach-exynos4/init.c
@@ -1,4 +1,4 @@
-/* linux/arch/arm/mach-s5pv310/init.c
+/* linux/arch/arm/mach-exynos4/init.c
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
@@ -14,7 +14,7 @@
#include <plat/devs.h>
#include <plat/regs-serial.h>
-static struct s3c24xx_uart_clksrc s5pv310_serial_clocks[] = {
+static struct s3c24xx_uart_clksrc exynos4_serial_clocks[] = {
[0] = {
.name = "uclk1",
.divisor = 1,
@@ -24,7 +24,7 @@ static struct s3c24xx_uart_clksrc s5pv310_serial_clocks[] = {
};
/* uart registration process */
-void __init s5pv310_common_init_uarts(struct s3c2410_uartcfg *cfg, int no)
+void __init exynos4_common_init_uarts(struct s3c2410_uartcfg *cfg, int no)
{
struct s3c2410_uartcfg *tcfg = cfg;
u32 ucnt;
@@ -32,8 +32,8 @@ void __init s5pv310_common_init_uarts(struct s3c2410_uartcfg *cfg, int no)
for (ucnt = 0; ucnt < no; ucnt++, tcfg++) {
if (!tcfg->clocks) {
tcfg->has_fracval = 1;
- tcfg->clocks = s5pv310_serial_clocks;
- tcfg->clocks_size = ARRAY_SIZE(s5pv310_serial_clocks);
+ tcfg->clocks = exynos4_serial_clocks;
+ tcfg->clocks_size = ARRAY_SIZE(exynos4_serial_clocks);
}
}
diff --git a/arch/arm/mach-s5pv310/irq-combiner.c b/arch/arm/mach-exynos4/irq-combiner.c
index 1ea4a9e83bbe..31618d91ce15 100644
--- a/arch/arm/mach-s5pv310/irq-combiner.c
+++ b/arch/arm/mach-exynos4/irq-combiner.c
@@ -1,6 +1,6 @@
-/* linux/arch/arm/mach-s5pv310/irq-combiner.c
+/* linux/arch/arm/mach-exynos4/irq-combiner.c
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Based on arch/arm/common/gic.c
diff --git a/arch/arm/mach-s5pv310/irq-eint.c b/arch/arm/mach-exynos4/irq-eint.c
index 477bd9e97f0f..4f7ad4a796e4 100644
--- a/arch/arm/mach-s5pv310/irq-eint.c
+++ b/arch/arm/mach-exynos4/irq-eint.c
@@ -1,9 +1,9 @@
-/* linux/arch/arm/mach-s5pv310/irq-eint.c
+/* linux/arch/arm/mach-exynos4/irq-eint.c
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * S5PV310 - IRQ EINT support
+ * EXYNOS4 - IRQ EINT support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -27,7 +27,7 @@ static DEFINE_SPINLOCK(eint_lock);
static unsigned int eint0_15_data[16];
-static unsigned int s5pv310_get_irq_nr(unsigned int number)
+static unsigned int exynos4_get_irq_nr(unsigned int number)
{
u32 ret = 0;
@@ -48,7 +48,7 @@ static unsigned int s5pv310_get_irq_nr(unsigned int number)
return ret;
}
-static inline void s5pv310_irq_eint_mask(struct irq_data *data)
+static inline void exynos4_irq_eint_mask(struct irq_data *data)
{
u32 mask;
@@ -59,7 +59,7 @@ static inline void s5pv310_irq_eint_mask(struct irq_data *data)
spin_unlock(&eint_lock);
}
-static void s5pv310_irq_eint_unmask(struct irq_data *data)
+static void exynos4_irq_eint_unmask(struct irq_data *data)
{
u32 mask;
@@ -70,19 +70,19 @@ static void s5pv310_irq_eint_unmask(struct irq_data *data)
spin_unlock(&eint_lock);
}
-static inline void s5pv310_irq_eint_ack(struct irq_data *data)
+static inline void exynos4_irq_eint_ack(struct irq_data *data)
{
__raw_writel(eint_irq_to_bit(data->irq),
S5P_EINT_PEND(EINT_REG_NR(data->irq)));
}
-static void s5pv310_irq_eint_maskack(struct irq_data *data)
+static void exynos4_irq_eint_maskack(struct irq_data *data)
{
- s5pv310_irq_eint_mask(data);
- s5pv310_irq_eint_ack(data);
+ exynos4_irq_eint_mask(data);
+ exynos4_irq_eint_ack(data);
}
-static int s5pv310_irq_eint_set_type(struct irq_data *data, unsigned int type)
+static int exynos4_irq_eint_set_type(struct irq_data *data, unsigned int type)
{
int offs = EINT_OFFSET(data->irq);
int shift;
@@ -145,19 +145,19 @@ static int s5pv310_irq_eint_set_type(struct irq_data *data, unsigned int type)
return 0;
}
-static struct irq_chip s5pv310_irq_eint = {
- .name = "s5pv310-eint",
- .irq_mask = s5pv310_irq_eint_mask,
- .irq_unmask = s5pv310_irq_eint_unmask,
- .irq_mask_ack = s5pv310_irq_eint_maskack,
- .irq_ack = s5pv310_irq_eint_ack,
- .irq_set_type = s5pv310_irq_eint_set_type,
+static struct irq_chip exynos4_irq_eint = {
+ .name = "exynos4-eint",
+ .irq_mask = exynos4_irq_eint_mask,
+ .irq_unmask = exynos4_irq_eint_unmask,
+ .irq_mask_ack = exynos4_irq_eint_maskack,
+ .irq_ack = exynos4_irq_eint_ack,
+ .irq_set_type = exynos4_irq_eint_set_type,
#ifdef CONFIG_PM
.irq_set_wake = s3c_irqext_wake,
#endif
};
-/* s5pv310_irq_demux_eint
+/* exynos4_irq_demux_eint
*
* This function demuxes the IRQ from from EINTs 16 to 31.
* It is designed to be inlined into the specific handler
@@ -165,7 +165,7 @@ static struct irq_chip s5pv310_irq_eint = {
*
* Each EINT pend/mask registers handle eight of them.
*/
-static inline void s5pv310_irq_demux_eint(unsigned int start)
+static inline void exynos4_irq_demux_eint(unsigned int start)
{
unsigned int irq;
@@ -182,13 +182,13 @@ static inline void s5pv310_irq_demux_eint(unsigned int start)
}
}
-static void s5pv310_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
+static void exynos4_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
{
- s5pv310_irq_demux_eint(IRQ_EINT(16));
- s5pv310_irq_demux_eint(IRQ_EINT(24));
+ exynos4_irq_demux_eint(IRQ_EINT(16));
+ exynos4_irq_demux_eint(IRQ_EINT(24));
}
-static void s5pv310_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
+static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
{
u32 *irq_data = get_irq_data(irq);
struct irq_chip *chip = get_irq_chip(irq);
@@ -203,27 +203,27 @@ static void s5pv310_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
chip->irq_unmask(&desc->irq_data);
}
-int __init s5pv310_init_irq_eint(void)
+int __init exynos4_init_irq_eint(void)
{
int irq;
for (irq = 0 ; irq <= 31 ; irq++) {
- set_irq_chip(IRQ_EINT(irq), &s5pv310_irq_eint);
+ set_irq_chip(IRQ_EINT(irq), &exynos4_irq_eint);
set_irq_handler(IRQ_EINT(irq), handle_level_irq);
set_irq_flags(IRQ_EINT(irq), IRQF_VALID);
}
- set_irq_chained_handler(IRQ_EINT16_31, s5pv310_irq_demux_eint16_31);
+ set_irq_chained_handler(IRQ_EINT16_31, exynos4_irq_demux_eint16_31);
for (irq = 0 ; irq <= 15 ; irq++) {
eint0_15_data[irq] = IRQ_EINT(irq);
- set_irq_data(s5pv310_get_irq_nr(irq), &eint0_15_data[irq]);
- set_irq_chained_handler(s5pv310_get_irq_nr(irq),
- s5pv310_irq_eint0_15);
+ set_irq_data(exynos4_get_irq_nr(irq), &eint0_15_data[irq]);
+ set_irq_chained_handler(exynos4_get_irq_nr(irq),
+ exynos4_irq_eint0_15);
}
return 0;
}
-arch_initcall(s5pv310_init_irq_eint);
+arch_initcall(exynos4_init_irq_eint);
diff --git a/arch/arm/mach-s5pv310/localtimer.c b/arch/arm/mach-exynos4/localtimer.c
index 2784036cd8b1..6bf3d0ab9627 100644
--- a/arch/arm/mach-s5pv310/localtimer.c
+++ b/arch/arm/mach-exynos4/localtimer.c
@@ -1,4 +1,4 @@
-/* linux/arch/arm/mach-s5pv310/localtimer.c
+/* linux/arch/arm/mach-exynos4/localtimer.c
*
* Cloned from linux/arch/arm/mach-realview/localtimer.c
*
@@ -18,8 +18,9 @@
/*
* Setup the local clock events for a CPU.
*/
-void __cpuinit local_timer_setup(struct clock_event_device *evt)
+int __cpuinit local_timer_setup(struct clock_event_device *evt)
{
evt->irq = IRQ_LOCALTIMER;
twd_timer_setup(evt);
+ return 0;
}
diff --git a/arch/arm/mach-s5pv310/mach-smdkc210.c b/arch/arm/mach-exynos4/mach-smdkc210.c
index d9cab02e23ca..25a256818122 100644
--- a/arch/arm/mach-s5pv310/mach-smdkc210.c
+++ b/arch/arm/mach-exynos4/mach-smdkc210.c
@@ -1,7 +1,7 @@
-/* linux/arch/arm/mach-s5pv310/mach-smdkc210.c
+/* linux/arch/arm/mach-exynos4/mach-smdkc210.c
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -21,7 +21,7 @@
#include <plat/regs-serial.h>
#include <plat/regs-srom.h>
-#include <plat/s5pv310.h>
+#include <plat/exynos4.h>
#include <plat/cpu.h>
#include <plat/devs.h>
#include <plat/sdhci.h>
@@ -77,10 +77,10 @@ static struct s3c2410_uartcfg smdkc210_uartcfgs[] __initdata = {
static struct s3c_sdhci_platdata smdkc210_hsmmc0_pdata __initdata = {
.cd_type = S3C_SDHCI_CD_GPIO,
- .ext_cd_gpio = S5PV310_GPK0(2),
+ .ext_cd_gpio = EXYNOS4_GPK0(2),
.ext_cd_gpio_invert = 1,
.clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
-#ifdef CONFIG_S5PV310_SDHCI_CH0_8BIT
+#ifdef CONFIG_EXYNOS4_SDHCI_CH0_8BIT
.max_width = 8,
.host_caps = MMC_CAP_8_BIT_DATA,
#endif
@@ -88,17 +88,17 @@ static struct s3c_sdhci_platdata smdkc210_hsmmc0_pdata __initdata = {
static struct s3c_sdhci_platdata smdkc210_hsmmc1_pdata __initdata = {
.cd_type = S3C_SDHCI_CD_GPIO,
- .ext_cd_gpio = S5PV310_GPK0(2),
+ .ext_cd_gpio = EXYNOS4_GPK0(2),
.ext_cd_gpio_invert = 1,
.clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
};
static struct s3c_sdhci_platdata smdkc210_hsmmc2_pdata __initdata = {
.cd_type = S3C_SDHCI_CD_GPIO,
- .ext_cd_gpio = S5PV310_GPK2(2),
+ .ext_cd_gpio = EXYNOS4_GPK2(2),
.ext_cd_gpio_invert = 1,
.clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
-#ifdef CONFIG_S5PV310_SDHCI_CH2_8BIT
+#ifdef CONFIG_EXYNOS4_SDHCI_CH2_8BIT
.max_width = 8,
.host_caps = MMC_CAP_8_BIT_DATA,
#endif
@@ -106,15 +106,15 @@ static struct s3c_sdhci_platdata smdkc210_hsmmc2_pdata __initdata = {
static struct s3c_sdhci_platdata smdkc210_hsmmc3_pdata __initdata = {
.cd_type = S3C_SDHCI_CD_GPIO,
- .ext_cd_gpio = S5PV310_GPK2(2),
+ .ext_cd_gpio = EXYNOS4_GPK2(2),
.ext_cd_gpio_invert = 1,
.clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
};
static struct resource smdkc210_smsc911x_resources[] = {
[0] = {
- .start = S5PV310_PA_SROM_BANK(1),
- .end = S5PV310_PA_SROM_BANK(1) + SZ_64K - 1,
+ .start = EXYNOS4_PA_SROM_BANK(1),
+ .end = EXYNOS4_PA_SROM_BANK(1) + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -154,16 +154,16 @@ static struct platform_device *smdkc210_devices[] __initdata = {
&s3c_device_i2c1,
&s3c_device_rtc,
&s3c_device_wdt,
- &s5pv310_device_ac97,
- &s5pv310_device_i2s0,
- &s5pv310_device_pd[PD_MFC],
- &s5pv310_device_pd[PD_G3D],
- &s5pv310_device_pd[PD_LCD0],
- &s5pv310_device_pd[PD_LCD1],
- &s5pv310_device_pd[PD_CAM],
- &s5pv310_device_pd[PD_TV],
- &s5pv310_device_pd[PD_GPS],
- &s5pv310_device_sysmmu,
+ &exynos4_device_ac97,
+ &exynos4_device_i2s0,
+ &exynos4_device_pd[PD_MFC],
+ &exynos4_device_pd[PD_G3D],
+ &exynos4_device_pd[PD_LCD0],
+ &exynos4_device_pd[PD_LCD1],
+ &exynos4_device_pd[PD_CAM],
+ &exynos4_device_pd[PD_TV],
+ &exynos4_device_pd[PD_GPS],
+ &exynos4_device_sysmmu,
&samsung_asoc_dma,
&smdkc210_smsc911x,
};
@@ -216,8 +216,8 @@ static void __init smdkc210_machine_init(void)
MACHINE_START(SMDKC210, "SMDKC210")
/* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */
.boot_params = S5P_PA_SDRAM + 0x100,
- .init_irq = s5pv310_init_irq,
+ .init_irq = exynos4_init_irq,
.map_io = smdkc210_map_io,
.init_machine = smdkc210_machine_init,
- .timer = &s5pv310_timer,
+ .timer = &exynos4_timer,
MACHINE_END
diff --git a/arch/arm/mach-s5pv310/mach-smdkv310.c b/arch/arm/mach-exynos4/mach-smdkv310.c
index b1cddbf3c616..07860a5b2f5d 100644
--- a/arch/arm/mach-s5pv310/mach-smdkv310.c
+++ b/arch/arm/mach-exynos4/mach-smdkv310.c
@@ -1,7 +1,7 @@
-/* linux/arch/arm/mach-s5pv310/mach-smdkv310.c
+/* linux/arch/arm/mach-exynos4/mach-smdkv310.c
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -21,7 +21,7 @@
#include <plat/regs-serial.h>
#include <plat/regs-srom.h>
-#include <plat/s5pv310.h>
+#include <plat/exynos4.h>
#include <plat/cpu.h>
#include <plat/devs.h>
#include <plat/sdhci.h>
@@ -77,10 +77,10 @@ static struct s3c2410_uartcfg smdkv310_uartcfgs[] __initdata = {
static struct s3c_sdhci_platdata smdkv310_hsmmc0_pdata __initdata = {
.cd_type = S3C_SDHCI_CD_GPIO,
- .ext_cd_gpio = S5PV310_GPK0(2),
+ .ext_cd_gpio = EXYNOS4_GPK0(2),
.ext_cd_gpio_invert = 1,
.clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
-#ifdef CONFIG_S5PV310_SDHCI_CH0_8BIT
+#ifdef CONFIG_EXYNOS4_SDHCI_CH0_8BIT
.max_width = 8,
.host_caps = MMC_CAP_8_BIT_DATA,
#endif
@@ -88,17 +88,17 @@ static struct s3c_sdhci_platdata smdkv310_hsmmc0_pdata __initdata = {
static struct s3c_sdhci_platdata smdkv310_hsmmc1_pdata __initdata = {
.cd_type = S3C_SDHCI_CD_GPIO,
- .ext_cd_gpio = S5PV310_GPK0(2),
+ .ext_cd_gpio = EXYNOS4_GPK0(2),
.ext_cd_gpio_invert = 1,
.clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
};
static struct s3c_sdhci_platdata smdkv310_hsmmc2_pdata __initdata = {
.cd_type = S3C_SDHCI_CD_GPIO,
- .ext_cd_gpio = S5PV310_GPK2(2),
+ .ext_cd_gpio = EXYNOS4_GPK2(2),
.ext_cd_gpio_invert = 1,
.clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
-#ifdef CONFIG_S5PV310_SDHCI_CH2_8BIT
+#ifdef CONFIG_EXYNOS4_SDHCI_CH2_8BIT
.max_width = 8,
.host_caps = MMC_CAP_8_BIT_DATA,
#endif
@@ -106,15 +106,15 @@ static struct s3c_sdhci_platdata smdkv310_hsmmc2_pdata __initdata = {
static struct s3c_sdhci_platdata smdkv310_hsmmc3_pdata __initdata = {
.cd_type = S3C_SDHCI_CD_GPIO,
- .ext_cd_gpio = S5PV310_GPK2(2),
+ .ext_cd_gpio = EXYNOS4_GPK2(2),
.ext_cd_gpio_invert = 1,
.clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
};
static struct resource smdkv310_smsc911x_resources[] = {
[0] = {
- .start = S5PV310_PA_SROM_BANK(1),
- .end = S5PV310_PA_SROM_BANK(1) + SZ_64K - 1,
+ .start = EXYNOS4_PA_SROM_BANK(1),
+ .end = EXYNOS4_PA_SROM_BANK(1) + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -154,16 +154,16 @@ static struct platform_device *smdkv310_devices[] __initdata = {
&s3c_device_i2c1,
&s3c_device_rtc,
&s3c_device_wdt,
- &s5pv310_device_ac97,
- &s5pv310_device_i2s0,
- &s5pv310_device_pd[PD_MFC],
- &s5pv310_device_pd[PD_G3D],
- &s5pv310_device_pd[PD_LCD0],
- &s5pv310_device_pd[PD_LCD1],
- &s5pv310_device_pd[PD_CAM],
- &s5pv310_device_pd[PD_TV],
- &s5pv310_device_pd[PD_GPS],
- &s5pv310_device_sysmmu,
+ &exynos4_device_ac97,
+ &exynos4_device_i2s0,
+ &exynos4_device_pd[PD_MFC],
+ &exynos4_device_pd[PD_G3D],
+ &exynos4_device_pd[PD_LCD0],
+ &exynos4_device_pd[PD_LCD1],
+ &exynos4_device_pd[PD_CAM],
+ &exynos4_device_pd[PD_TV],
+ &exynos4_device_pd[PD_GPS],
+ &exynos4_device_sysmmu,
&samsung_asoc_dma,
&smdkv310_smsc911x,
};
@@ -217,8 +217,8 @@ MACHINE_START(SMDKV310, "SMDKV310")
/* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */
/* Maintainer: Changhwan Youn <chaos.youn@samsung.com> */
.boot_params = S5P_PA_SDRAM + 0x100,
- .init_irq = s5pv310_init_irq,
+ .init_irq = exynos4_init_irq,
.map_io = smdkv310_map_io,
.init_machine = smdkv310_machine_init,
- .timer = &s5pv310_timer,
+ .timer = &exynos4_timer,
MACHINE_END
diff --git a/arch/arm/mach-s5pv310/mach-universal_c210.c b/arch/arm/mach-exynos4/mach-universal_c210.c
index 36bc3cf825e3..b22b6ef2a94b 100644
--- a/arch/arm/mach-s5pv310/mach-universal_c210.c
+++ b/arch/arm/mach-exynos4/mach-universal_c210.c
@@ -1,4 +1,4 @@
-/* linux/arch/arm/mach-s5pv310/mach-universal_c210.c
+/* linux/arch/arm/mach-exynos4/mach-universal_c210.c
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
*
@@ -21,7 +21,7 @@
#include <asm/mach-types.h>
#include <plat/regs-serial.h>
-#include <plat/s5pv310.h>
+#include <plat/exynos4.h>
#include <plat/cpu.h>
#include <plat/devs.h>
#include <plat/sdhci.h>
@@ -72,35 +72,35 @@ static struct s3c2410_uartcfg universal_uartcfgs[] __initdata = {
static struct gpio_keys_button universal_gpio_keys_tables[] = {
{
.code = KEY_VOLUMEUP,
- .gpio = S5PV310_GPX2(0), /* XEINT16 */
+ .gpio = EXYNOS4_GPX2(0), /* XEINT16 */
.desc = "gpio-keys: KEY_VOLUMEUP",
.type = EV_KEY,
.active_low = 1,
.debounce_interval = 1,
}, {
.code = KEY_VOLUMEDOWN,
- .gpio = S5PV310_GPX2(1), /* XEINT17 */
+ .gpio = EXYNOS4_GPX2(1), /* XEINT17 */
.desc = "gpio-keys: KEY_VOLUMEDOWN",
.type = EV_KEY,
.active_low = 1,
.debounce_interval = 1,
}, {
.code = KEY_CONFIG,
- .gpio = S5PV310_GPX2(2), /* XEINT18 */
+ .gpio = EXYNOS4_GPX2(2), /* XEINT18 */
.desc = "gpio-keys: KEY_CONFIG",
.type = EV_KEY,
.active_low = 1,
.debounce_interval = 1,
}, {
.code = KEY_CAMERA,
- .gpio = S5PV310_GPX2(3), /* XEINT19 */
+ .gpio = EXYNOS4_GPX2(3), /* XEINT19 */
.desc = "gpio-keys: KEY_CAMERA",
.type = EV_KEY,
.active_low = 1,
.debounce_interval = 1,
}, {
.code = KEY_OK,
- .gpio = S5PV310_GPX3(5), /* XEINT29 */
+ .gpio = EXYNOS4_GPX3(5), /* XEINT29 */
.desc = "gpio-keys: KEY_OK",
.type = EV_KEY,
.active_low = 1,
@@ -146,7 +146,7 @@ static struct regulator_init_data mmc0_fixed_voltage_init_data = {
static struct fixed_voltage_config mmc0_fixed_voltage_config = {
.supply_name = "MASSMEMORY_EN",
.microvolts = 2800000,
- .gpio = S5PV310_GPE1(3),
+ .gpio = EXYNOS4_GPE1(3),
.enable_high = true,
.init_data = &mmc0_fixed_voltage_init_data,
};
@@ -165,7 +165,7 @@ static struct s3c_sdhci_platdata universal_hsmmc2_data __initdata = {
.host_caps = MMC_CAP_4_BIT_DATA |
MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
MMC_CAP_DISABLE,
- .ext_cd_gpio = S5PV310_GPX3(4), /* XEINT_28 */
+ .ext_cd_gpio = EXYNOS4_GPX3(4), /* XEINT_28 */
.ext_cd_gpio_invert = 1,
.cd_type = S3C_SDHCI_CD_GPIO,
.clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
@@ -230,8 +230,8 @@ static void __init universal_machine_init(void)
MACHINE_START(UNIVERSAL_C210, "UNIVERSAL_C210")
/* Maintainer: Kyungmin Park <kyungmin.park@samsung.com> */
.boot_params = S5P_PA_SDRAM + 0x100,
- .init_irq = s5pv310_init_irq,
+ .init_irq = exynos4_init_irq,
.map_io = universal_map_io,
.init_machine = universal_machine_init,
- .timer = &s5pv310_timer,
+ .timer = &exynos4_timer,
MACHINE_END
diff --git a/arch/arm/mach-s5pv310/platsmp.c b/arch/arm/mach-exynos4/platsmp.c
index 34093b069f67..6d35878ec1aa 100644
--- a/arch/arm/mach-s5pv310/platsmp.c
+++ b/arch/arm/mach-exynos4/platsmp.c
@@ -1,7 +1,7 @@
-/* linux/arch/arm/mach-s5pv310/platsmp.c
+/* linux/arch/arm/mach-exynos4/platsmp.c
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
*
* Cloned from linux/arch/arm/mach-vexpress/platsmp.c
*
@@ -28,7 +28,7 @@
#include <mach/hardware.h>
#include <mach/regs-clock.h>
-extern void s5pv310_secondary_startup(void);
+extern void exynos4_secondary_startup(void);
/*
* control for which core is the next to come out of the secondary
@@ -139,7 +139,7 @@ void __init smp_init_cpus(void)
/* sanity check */
if (ncores > NR_CPUS) {
printk(KERN_WARNING
- "S5PV310: no. of cores (%d) greater than configured "
+ "EXYNOS4: no. of cores (%d) greater than configured "
"maximum of %d - clipping\n",
ncores, NR_CPUS);
ncores = NR_CPUS;
@@ -168,5 +168,5 @@ void __init platform_smp_prepare_cpus(unsigned int max_cpus)
* until it receives a soft interrupt, and then the
* secondary CPU branches to this address.
*/
- __raw_writel(BSYM(virt_to_phys(s5pv310_secondary_startup)), S5P_VA_SYSRAM);
+ __raw_writel(BSYM(virt_to_phys(exynos4_secondary_startup)), S5P_VA_SYSRAM);
}
diff --git a/arch/arm/mach-s5pv310/setup-i2c0.c b/arch/arm/mach-exynos4/setup-i2c0.c
index f47f8f3152ec..d395bd17c38b 100644
--- a/arch/arm/mach-s5pv310/setup-i2c0.c
+++ b/arch/arm/mach-exynos4/setup-i2c0.c
@@ -1,5 +1,5 @@
/*
- * linux/arch/arm/mach-s5pv310/setup-i2c0.c
+ * linux/arch/arm/mach-exynos4/setup-i2c0.c
*
* Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
@@ -21,6 +21,6 @@ struct platform_device; /* don't need the contents */
void s3c_i2c0_cfg_gpio(struct platform_device *dev)
{
- s3c_gpio_cfgall_range(S5PV310_GPD1(0), 2,
+ s3c_gpio_cfgall_range(EXYNOS4_GPD1(0), 2,
S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
}
diff --git a/arch/arm/mach-s5pv310/setup-i2c1.c b/arch/arm/mach-exynos4/setup-i2c1.c
index 9d07e4e2f14c..fd7235a43f6e 100644
--- a/arch/arm/mach-s5pv310/setup-i2c1.c
+++ b/arch/arm/mach-exynos4/setup-i2c1.c
@@ -1,5 +1,5 @@
/*
- * linux/arch/arm/mach-s5pv310/setup-i2c1.c
+ * linux/arch/arm/mach-exynos4/setup-i2c1.c
*
* Copyright (C) 2010 Samsung Electronics Co., Ltd.
*
@@ -18,6 +18,6 @@ struct platform_device; /* don't need the contents */
void s3c_i2c1_cfg_gpio(struct platform_device *dev)
{
- s3c_gpio_cfgall_range(S5PV310_GPD1(2), 2,
+ s3c_gpio_cfgall_range(EXYNOS4_GPD1(2), 2,
S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
}
diff --git a/arch/arm/mach-s5pv310/setup-i2c2.c b/arch/arm/mach-exynos4/setup-i2c2.c
index 4163b1233daf..2694b19e8b37 100644
--- a/arch/arm/mach-s5pv310/setup-i2c2.c
+++ b/arch/arm/mach-exynos4/setup-i2c2.c
@@ -1,5 +1,5 @@
/*
- * linux/arch/arm/mach-s5pv310/setup-i2c2.c
+ * linux/arch/arm/mach-exynos4/setup-i2c2.c
*
* Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
*
@@ -18,6 +18,6 @@ struct platform_device; /* don't need the contents */
void s3c_i2c2_cfg_gpio(struct platform_device *dev)
{
- s3c_gpio_cfgall_range(S5PV310_GPA0(6), 2,
+ s3c_gpio_cfgall_range(EXYNOS4_GPA0(6), 2,
S3C_GPIO_SFN(3), S3C_GPIO_PULL_UP);
}
diff --git a/arch/arm/mach-s5pv310/setup-i2c3.c b/arch/arm/mach-exynos4/setup-i2c3.c
index 180f153d2a20..379bd306993f 100644
--- a/arch/arm/mach-s5pv310/setup-i2c3.c
+++ b/arch/arm/mach-exynos4/setup-i2c3.c
@@ -1,5 +1,5 @@
/*
- * linux/arch/arm/mach-s5pv310/setup-i2c3.c
+ * linux/arch/arm/mach-exynos4/setup-i2c3.c
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
*
@@ -18,6 +18,6 @@ struct platform_device; /* don't need the contents */
void s3c_i2c3_cfg_gpio(struct platform_device *dev)
{
- s3c_gpio_cfgall_range(S5PV310_GPA1(2), 2,
+ s3c_gpio_cfgall_range(EXYNOS4_GPA1(2), 2,
S3C_GPIO_SFN(3), S3C_GPIO_PULL_UP);
}
diff --git a/arch/arm/mach-s5pv310/setup-i2c4.c b/arch/arm/mach-exynos4/setup-i2c4.c
index 909e8dfc5316..9f3c04855b76 100644
--- a/arch/arm/mach-s5pv310/setup-i2c4.c
+++ b/arch/arm/mach-exynos4/setup-i2c4.c
@@ -1,5 +1,5 @@
/*
- * linux/arch/arm/mach-s5pv310/setup-i2c4.c
+ * linux/arch/arm/mach-exynos4/setup-i2c4.c
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
*
@@ -18,6 +18,6 @@ struct platform_device; /* don't need the contents */
void s3c_i2c4_cfg_gpio(struct platform_device *dev)
{
- s3c_gpio_cfgall_range(S5PV310_GPB(2), 2,
+ s3c_gpio_cfgall_range(EXYNOS4_GPB(2), 2,
S3C_GPIO_SFN(3), S3C_GPIO_PULL_UP);
}
diff --git a/arch/arm/mach-s5pv310/setup-i2c5.c b/arch/arm/mach-exynos4/setup-i2c5.c
index 5d0fa4ac0283..77e1a1e57c76 100644
--- a/arch/arm/mach-s5pv310/setup-i2c5.c
+++ b/arch/arm/mach-exynos4/setup-i2c5.c
@@ -1,5 +1,5 @@
/*
- * linux/arch/arm/mach-s5pv310/setup-i2c5.c
+ * linux/arch/arm/mach-exynos4/setup-i2c5.c
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
*
@@ -18,6 +18,6 @@ struct platform_device; /* don't need the contents */
void s3c_i2c5_cfg_gpio(struct platform_device *dev)
{
- s3c_gpio_cfgall_range(S5PV310_GPB(6), 2,
+ s3c_gpio_cfgall_range(EXYNOS4_GPB(6), 2,
S3C_GPIO_SFN(3), S3C_GPIO_PULL_UP);
}
diff --git a/arch/arm/mach-s5pv310/setup-i2c6.c b/arch/arm/mach-exynos4/setup-i2c6.c
index 34aafab92ac4..284d12b7af0e 100644
--- a/arch/arm/mach-s5pv310/setup-i2c6.c
+++ b/arch/arm/mach-exynos4/setup-i2c6.c
@@ -1,5 +1,5 @@
/*
- * linux/arch/arm/mach-s5pv310/setup-i2c6.c
+ * linux/arch/arm/mach-exynos4/setup-i2c6.c
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
*
@@ -18,6 +18,6 @@ struct platform_device; /* don't need the contents */
void s3c_i2c6_cfg_gpio(struct platform_device *dev)
{
- s3c_gpio_cfgall_range(S5PV310_GPC1(3), 2,
+ s3c_gpio_cfgall_range(EXYNOS4_GPC1(3), 2,
S3C_GPIO_SFN(4), S3C_GPIO_PULL_UP);
}
diff --git a/arch/arm/mach-s5pv310/setup-i2c7.c b/arch/arm/mach-exynos4/setup-i2c7.c
index 9b25b8d18920..b7611ee359a2 100644
--- a/arch/arm/mach-s5pv310/setup-i2c7.c
+++ b/arch/arm/mach-exynos4/setup-i2c7.c
@@ -1,5 +1,5 @@
/*
- * linux/arch/arm/mach-s5pv310/setup-i2c7.c
+ * linux/arch/arm/mach-exynos4/setup-i2c7.c
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
*
@@ -18,6 +18,6 @@ struct platform_device; /* don't need the contents */
void s3c_i2c7_cfg_gpio(struct platform_device *dev)
{
- s3c_gpio_cfgall_range(S5PV310_GPD0(2), 2,
+ s3c_gpio_cfgall_range(EXYNOS4_GPD0(2), 2,
S3C_GPIO_SFN(3), S3C_GPIO_PULL_UP);
}
diff --git a/arch/arm/mach-s5pv310/setup-sdhci-gpio.c b/arch/arm/mach-exynos4/setup-sdhci-gpio.c
index 86d38cc49135..1b3d3a2de95c 100644
--- a/arch/arm/mach-s5pv310/setup-sdhci-gpio.c
+++ b/arch/arm/mach-exynos4/setup-sdhci-gpio.c
@@ -1,9 +1,9 @@
-/* linux/arch/arm/mach-s5pv310/setup-sdhci-gpio.c
+/* linux/arch/arm/mach-exynos4/setup-sdhci-gpio.c
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
*
- * S5PV310 - Helper functions for setting up SDHCI device(s) GPIO (HSMMC)
+ * EXYNOS4 - Helper functions for setting up SDHCI device(s) GPIO (HSMMC)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -23,13 +23,13 @@
#include <plat/regs-sdhci.h>
#include <plat/sdhci.h>
-void s5pv310_setup_sdhci0_cfg_gpio(struct platform_device *dev, int width)
+void exynos4_setup_sdhci0_cfg_gpio(struct platform_device *dev, int width)
{
struct s3c_sdhci_platdata *pdata = dev->dev.platform_data;
unsigned int gpio;
/* Set all the necessary GPK0[0:1] pins to special-function 2 */
- for (gpio = S5PV310_GPK0(0); gpio < S5PV310_GPK0(2); gpio++) {
+ for (gpio = EXYNOS4_GPK0(0); gpio < EXYNOS4_GPK0(2); gpio++) {
s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
@@ -37,14 +37,14 @@ void s5pv310_setup_sdhci0_cfg_gpio(struct platform_device *dev, int width)
switch (width) {
case 8:
- for (gpio = S5PV310_GPK1(3); gpio <= S5PV310_GPK1(6); gpio++) {
+ for (gpio = EXYNOS4_GPK1(3); gpio <= EXYNOS4_GPK1(6); gpio++) {
/* Data pin GPK1[3:6] to special-funtion 3 */
s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(3));
s3c_gpio_setpull(gpio, S3C_GPIO_PULL_UP);
s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
}
case 4:
- for (gpio = S5PV310_GPK0(3); gpio <= S5PV310_GPK0(6); gpio++) {
+ for (gpio = EXYNOS4_GPK0(3); gpio <= EXYNOS4_GPK0(6); gpio++) {
/* Data pin GPK0[3:6] to special-funtion 2 */
s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
s3c_gpio_setpull(gpio, S3C_GPIO_PULL_UP);
@@ -55,25 +55,25 @@ void s5pv310_setup_sdhci0_cfg_gpio(struct platform_device *dev, int width)
}
if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL) {
- s3c_gpio_cfgpin(S5PV310_GPK0(2), S3C_GPIO_SFN(2));
- s3c_gpio_setpull(S5PV310_GPK0(2), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgpin(EXYNOS4_GPK0(2), S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(EXYNOS4_GPK0(2), S3C_GPIO_PULL_UP);
s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
}
}
-void s5pv310_setup_sdhci1_cfg_gpio(struct platform_device *dev, int width)
+void exynos4_setup_sdhci1_cfg_gpio(struct platform_device *dev, int width)
{
struct s3c_sdhci_platdata *pdata = dev->dev.platform_data;
unsigned int gpio;
/* Set all the necessary GPK1[0:1] pins to special-function 2 */
- for (gpio = S5PV310_GPK1(0); gpio < S5PV310_GPK1(2); gpio++) {
+ for (gpio = EXYNOS4_GPK1(0); gpio < EXYNOS4_GPK1(2); gpio++) {
s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
}
- for (gpio = S5PV310_GPK1(3); gpio <= S5PV310_GPK1(6); gpio++) {
+ for (gpio = EXYNOS4_GPK1(3); gpio <= EXYNOS4_GPK1(6); gpio++) {
/* Data pin GPK1[3:6] to special-function 2 */
s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
s3c_gpio_setpull(gpio, S3C_GPIO_PULL_UP);
@@ -81,19 +81,19 @@ void s5pv310_setup_sdhci1_cfg_gpio(struct platform_device *dev, int width)
}
if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL) {
- s3c_gpio_cfgpin(S5PV310_GPK1(2), S3C_GPIO_SFN(2));
- s3c_gpio_setpull(S5PV310_GPK1(2), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgpin(EXYNOS4_GPK1(2), S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(EXYNOS4_GPK1(2), S3C_GPIO_PULL_UP);
s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
}
}
-void s5pv310_setup_sdhci2_cfg_gpio(struct platform_device *dev, int width)
+void exynos4_setup_sdhci2_cfg_gpio(struct platform_device *dev, int width)
{
struct s3c_sdhci_platdata *pdata = dev->dev.platform_data;
unsigned int gpio;
/* Set all the necessary GPK2[0:1] pins to special-function 2 */
- for (gpio = S5PV310_GPK2(0); gpio < S5PV310_GPK2(2); gpio++) {
+ for (gpio = EXYNOS4_GPK2(0); gpio < EXYNOS4_GPK2(2); gpio++) {
s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
@@ -101,14 +101,14 @@ void s5pv310_setup_sdhci2_cfg_gpio(struct platform_device *dev, int width)
switch (width) {
case 8:
- for (gpio = S5PV310_GPK3(3); gpio <= S5PV310_GPK3(6); gpio++) {
+ for (gpio = EXYNOS4_GPK3(3); gpio <= EXYNOS4_GPK3(6); gpio++) {
/* Data pin GPK3[3:6] to special-function 3 */
s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(3));
s3c_gpio_setpull(gpio, S3C_GPIO_PULL_UP);
s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
}
case 4:
- for (gpio = S5PV310_GPK2(3); gpio <= S5PV310_GPK2(6); gpio++) {
+ for (gpio = EXYNOS4_GPK2(3); gpio <= EXYNOS4_GPK2(6); gpio++) {
/* Data pin GPK2[3:6] to special-function 2 */
s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
s3c_gpio_setpull(gpio, S3C_GPIO_PULL_UP);
@@ -119,25 +119,25 @@ void s5pv310_setup_sdhci2_cfg_gpio(struct platform_device *dev, int width)
}
if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL) {
- s3c_gpio_cfgpin(S5PV310_GPK2(2), S3C_GPIO_SFN(2));
- s3c_gpio_setpull(S5PV310_GPK2(2), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgpin(EXYNOS4_GPK2(2), S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(EXYNOS4_GPK2(2), S3C_GPIO_PULL_UP);
s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
}
}
-void s5pv310_setup_sdhci3_cfg_gpio(struct platform_device *dev, int width)
+void exynos4_setup_sdhci3_cfg_gpio(struct platform_device *dev, int width)
{
struct s3c_sdhci_platdata *pdata = dev->dev.platform_data;
unsigned int gpio;
/* Set all the necessary GPK3[0:1] pins to special-function 2 */
- for (gpio = S5PV310_GPK3(0); gpio < S5PV310_GPK3(2); gpio++) {
+ for (gpio = EXYNOS4_GPK3(0); gpio < EXYNOS4_GPK3(2); gpio++) {
s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
}
- for (gpio = S5PV310_GPK3(3); gpio <= S5PV310_GPK3(6); gpio++) {
+ for (gpio = EXYNOS4_GPK3(3); gpio <= EXYNOS4_GPK3(6); gpio++) {
/* Data pin GPK3[3:6] to special-function 2 */
s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
s3c_gpio_setpull(gpio, S3C_GPIO_PULL_UP);
@@ -145,8 +145,8 @@ void s5pv310_setup_sdhci3_cfg_gpio(struct platform_device *dev, int width)
}
if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL) {
- s3c_gpio_cfgpin(S5PV310_GPK3(2), S3C_GPIO_SFN(2));
- s3c_gpio_setpull(S5PV310_GPK3(2), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgpin(EXYNOS4_GPK3(2), S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(EXYNOS4_GPK3(2), S3C_GPIO_PULL_UP);
s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
}
}
diff --git a/arch/arm/mach-s5pv310/setup-sdhci.c b/arch/arm/mach-exynos4/setup-sdhci.c
index db8358fc4662..85f9433d4836 100644
--- a/arch/arm/mach-s5pv310/setup-sdhci.c
+++ b/arch/arm/mach-exynos4/setup-sdhci.c
@@ -1,9 +1,9 @@
-/* linux/arch/arm/mach-s5pv310/setup-sdhci.c
+/* linux/arch/arm/mach-exynos4/setup-sdhci.c
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
*
- * S5PV310 - Helper functions for settign up SDHCI device(s) (HSMMC)
+ * EXYNOS4 - Helper functions for settign up SDHCI device(s) (HSMMC)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -23,14 +23,14 @@
/* clock sources for the mmc bus clock, order as for the ctrl2[5..4] */
-char *s5pv310_hsmmc_clksrcs[4] = {
+char *exynos4_hsmmc_clksrcs[4] = {
[0] = NULL,
[1] = NULL,
[2] = "sclk_mmc", /* mmc_bus */
[3] = NULL,
};
-void s5pv310_setup_sdhci_cfg_card(struct platform_device *dev, void __iomem *r,
+void exynos4_setup_sdhci_cfg_card(struct platform_device *dev, void __iomem *r,
struct mmc_ios *ios, struct mmc_card *card)
{
u32 ctrl2, ctrl3;
diff --git a/arch/arm/mach-s5pv310/time.c b/arch/arm/mach-exynos4/time.c
index b262d4615331..e30ac7043095 100644
--- a/arch/arm/mach-s5pv310/time.c
+++ b/arch/arm/mach-exynos4/time.c
@@ -1,9 +1,9 @@
-/* linux/arch/arm/mach-s5pv310/time.c
+/* linux/arch/arm/mach-exynos4/time.c
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * S5PV310 (and compatible) HRT support
+ * EXYNOS4 (and compatible) HRT support
* PWM 2/4 is used for this feature
*
* This program is free software; you can redistribute it and/or modify
@@ -33,7 +33,7 @@ static struct clk *tdiv2;
static struct clk *tdiv4;
static struct clk *timerclk;
-static void s5pv310_pwm_stop(unsigned int pwm_id)
+static void exynos4_pwm_stop(unsigned int pwm_id)
{
unsigned long tcon;
@@ -52,7 +52,7 @@ static void s5pv310_pwm_stop(unsigned int pwm_id)
__raw_writel(tcon, S3C2410_TCON);
}
-static void s5pv310_pwm_init(unsigned int pwm_id, unsigned long tcnt)
+static void exynos4_pwm_init(unsigned int pwm_id, unsigned long tcnt)
{
unsigned long tcon;
@@ -86,7 +86,7 @@ static void s5pv310_pwm_init(unsigned int pwm_id, unsigned long tcnt)
}
}
-static inline void s5pv310_pwm_start(unsigned int pwm_id, bool periodic)
+static inline void exynos4_pwm_start(unsigned int pwm_id, bool periodic)
{
unsigned long tcon;
@@ -117,23 +117,23 @@ static inline void s5pv310_pwm_start(unsigned int pwm_id, bool periodic)
__raw_writel(tcon, S3C2410_TCON);
}
-static int s5pv310_pwm_set_next_event(unsigned long cycles,
+static int exynos4_pwm_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
- s5pv310_pwm_init(2, cycles);
- s5pv310_pwm_start(2, 0);
+ exynos4_pwm_init(2, cycles);
+ exynos4_pwm_start(2, 0);
return 0;
}
-static void s5pv310_pwm_set_mode(enum clock_event_mode mode,
+static void exynos4_pwm_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
- s5pv310_pwm_stop(2);
+ exynos4_pwm_stop(2);
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
- s5pv310_pwm_init(2, clock_count_per_tick);
- s5pv310_pwm_start(2, 1);
+ exynos4_pwm_init(2, clock_count_per_tick);
+ exynos4_pwm_start(2, 1);
break;
case CLOCK_EVT_MODE_ONESHOT:
break;
@@ -149,11 +149,11 @@ static struct clock_event_device pwm_event_device = {
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.rating = 200,
.shift = 32,
- .set_next_event = s5pv310_pwm_set_next_event,
- .set_mode = s5pv310_pwm_set_mode,
+ .set_next_event = exynos4_pwm_set_next_event,
+ .set_mode = exynos4_pwm_set_mode,
};
-irqreturn_t s5pv310_clock_event_isr(int irq, void *dev_id)
+irqreturn_t exynos4_clock_event_isr(int irq, void *dev_id)
{
struct clock_event_device *evt = &pwm_event_device;
@@ -162,13 +162,13 @@ irqreturn_t s5pv310_clock_event_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct irqaction s5pv310_clock_event_irq = {
+static struct irqaction exynos4_clock_event_irq = {
.name = "pwm_timer2_irq",
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
- .handler = s5pv310_clock_event_isr,
+ .handler = exynos4_clock_event_isr,
};
-static void __init s5pv310_clockevent_init(void)
+static void __init exynos4_clockevent_init(void)
{
unsigned long pclk;
unsigned long clock_rate;
@@ -198,10 +198,10 @@ static void __init s5pv310_clockevent_init(void)
pwm_event_device.cpumask = cpumask_of(0);
clockevents_register_device(&pwm_event_device);
- setup_irq(IRQ_TIMER2, &s5pv310_clock_event_irq);
+ setup_irq(IRQ_TIMER2, &exynos4_clock_event_irq);
}
-static cycle_t s5pv310_pwm4_read(struct clocksource *cs)
+static cycle_t exynos4_pwm4_read(struct clocksource *cs)
{
return (cycle_t) ~__raw_readl(S3C_TIMERREG(0x40));
}
@@ -209,12 +209,12 @@ static cycle_t s5pv310_pwm4_read(struct clocksource *cs)
struct clocksource pwm_clocksource = {
.name = "pwm_timer4",
.rating = 250,
- .read = s5pv310_pwm4_read,
+ .read = exynos4_pwm4_read,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS ,
};
-static void __init s5pv310_clocksource_init(void)
+static void __init exynos4_clocksource_init(void)
{
unsigned long pclk;
unsigned long clock_rate;
@@ -226,14 +226,14 @@ static void __init s5pv310_clocksource_init(void)
clock_rate = clk_get_rate(tin4);
- s5pv310_pwm_init(4, ~0);
- s5pv310_pwm_start(4, 1);
+ exynos4_pwm_init(4, ~0);
+ exynos4_pwm_start(4, 1);
if (clocksource_register_hz(&pwm_clocksource, clock_rate))
panic("%s: can't register clocksource\n", pwm_clocksource.name);
}
-static void __init s5pv310_timer_resources(void)
+static void __init exynos4_timer_resources(void)
{
struct platform_device tmpdev;
@@ -267,17 +267,17 @@ static void __init s5pv310_timer_resources(void)
clk_enable(tin4);
}
-static void __init s5pv310_timer_init(void)
+static void __init exynos4_timer_init(void)
{
#ifdef CONFIG_LOCAL_TIMERS
twd_base = S5P_VA_TWD;
#endif
- s5pv310_timer_resources();
- s5pv310_clockevent_init();
- s5pv310_clocksource_init();
+ exynos4_timer_resources();
+ exynos4_clockevent_init();
+ exynos4_clocksource_init();
}
-struct sys_timer s5pv310_timer = {
- .init = s5pv310_timer_init,
+struct sys_timer exynos4_timer = {
+ .init = exynos4_timer_init,
};
diff --git a/arch/arm/mach-footbridge/dc21285-timer.c b/arch/arm/mach-footbridge/dc21285-timer.c
index bc5e83fb5819..a921fe92b858 100644
--- a/arch/arm/mach-footbridge/dc21285-timer.c
+++ b/arch/arm/mach-footbridge/dc21285-timer.c
@@ -4,10 +4,11 @@
* Copyright (C) 1998 Russell King.
* Copyright (C) 1998 Phil Blundell
*/
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/spinlock.h>
#include <asm/irq.h>
@@ -16,32 +17,76 @@
#include "common.h"
-/*
- * Footbridge timer 1 support.
- */
-static unsigned long timer1_latch;
+static cycle_t cksrc_dc21285_read(struct clocksource *cs)
+{
+ return cs->mask - *CSR_TIMER2_VALUE;
+}
-static unsigned long timer1_gettimeoffset (void)
+static int cksrc_dc21285_enable(struct clocksource *cs)
{
- unsigned long value = timer1_latch - *CSR_TIMER1_VALUE;
+ *CSR_TIMER2_LOAD = cs->mask;
+ *CSR_TIMER2_CLR = 0;
+ *CSR_TIMER2_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_DIV16;
+ return 0;
+}
- return ((tick_nsec / 1000) * value) / timer1_latch;
+static int cksrc_dc21285_disable(struct clocksource *cs)
+{
+ *CSR_TIMER2_CNTL = 0;
}
-static irqreturn_t
-timer1_interrupt(int irq, void *dev_id)
+static struct clocksource cksrc_dc21285 = {
+ .name = "dc21285_timer2",
+ .rating = 200,
+ .read = cksrc_dc21285_read,
+ .enable = cksrc_dc21285_enable,
+ .disable = cksrc_dc21285_disable,
+ .mask = CLOCKSOURCE_MASK(24),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static void ckevt_dc21285_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *c)
{
+ switch (mode) {
+ case CLOCK_EVT_MODE_RESUME:
+ case CLOCK_EVT_MODE_PERIODIC:
+ *CSR_TIMER1_CLR = 0;
+ *CSR_TIMER1_LOAD = (mem_fclk_21285 + 8 * HZ) / (16 * HZ);
+ *CSR_TIMER1_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_AUTORELOAD |
+ TIMER_CNTL_DIV16;
+ break;
+
+ default:
+ *CSR_TIMER1_CNTL = 0;
+ break;
+ }
+}
+
+static struct clock_event_device ckevt_dc21285 = {
+ .name = "dc21285_timer1",
+ .features = CLOCK_EVT_FEAT_PERIODIC,
+ .rating = 200,
+ .irq = IRQ_TIMER1,
+ .set_mode = ckevt_dc21285_set_mode,
+};
+
+static irqreturn_t timer1_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *ce = dev_id;
+
*CSR_TIMER1_CLR = 0;
- timer_tick();
+ ce->event_handler(ce);
return IRQ_HANDLED;
}
static struct irqaction footbridge_timer_irq = {
- .name = "Timer1 timer tick",
+ .name = "dc21285_timer1",
.handler = timer1_interrupt,
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .dev_id = &ckevt_dc21285,
};
/*
@@ -49,16 +94,19 @@ static struct irqaction footbridge_timer_irq = {
*/
static void __init footbridge_timer_init(void)
{
- timer1_latch = (mem_fclk_21285 + 8 * HZ) / (16 * HZ);
+ struct clock_event_device *ce = &ckevt_dc21285;
+
+ clocksource_register_hz(&cksrc_dc21285, (mem_fclk_21285 + 8) / 16);
+
+ setup_irq(ce->irq, &footbridge_timer_irq);
- *CSR_TIMER1_CLR = 0;
- *CSR_TIMER1_LOAD = timer1_latch;
- *CSR_TIMER1_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_AUTORELOAD | TIMER_CNTL_DIV16;
+ clockevents_calc_mult_shift(ce, mem_fclk_21285, 5);
+ ce->max_delta_ns = clockevent_delta2ns(0xffffff, ce);
+ ce->min_delta_ns = clockevent_delta2ns(0x000004, ce);
- setup_irq(IRQ_TIMER1, &footbridge_timer_irq);
+ clockevents_register_device(ce);
}
struct sys_timer footbridge_timer = {
.init = footbridge_timer_init,
- .offset = timer1_gettimeoffset,
};
diff --git a/arch/arm/mach-footbridge/include/mach/memory.h b/arch/arm/mach-footbridge/include/mach/memory.h
index 8d64f4574087..5c6df377f969 100644
--- a/arch/arm/mach-footbridge/include/mach/memory.h
+++ b/arch/arm/mach-footbridge/include/mach/memory.h
@@ -62,7 +62,7 @@ extern unsigned long __bus_to_pfn(unsigned long);
/*
* Physical DRAM offset.
*/
-#define PHYS_OFFSET UL(0x00000000)
+#define PLAT_PHYS_OFFSET UL(0x00000000)
#define FLUSH_BASE_PHYS 0x50000000
diff --git a/arch/arm/mach-footbridge/isa-timer.c b/arch/arm/mach-footbridge/isa-timer.c
index f488fa2082d7..441c6ce0d555 100644
--- a/arch/arm/mach-footbridge/isa-timer.c
+++ b/arch/arm/mach-footbridge/isa-timer.c
@@ -4,10 +4,13 @@
* Copyright (C) 1998 Russell King.
* Copyright (C) 1998 Phil Blundell
*/
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
+#include <linux/timex.h>
#include <asm/irq.h>
@@ -15,77 +18,115 @@
#include "common.h"
-/*
- * ISA timer tick support
- */
-#define mSEC_10_from_14 ((14318180 + 100) / 200)
+#define PIT_MODE 0x43
+#define PIT_CH0 0x40
+
+#define PIT_LATCH ((PIT_TICK_RATE + HZ / 2) / HZ)
-static unsigned long isa_gettimeoffset(void)
+static cycle_t pit_read(struct clocksource *cs)
{
+ unsigned long flags;
+ static int old_count;
+ static u32 old_jifs;
int count;
+ u32 jifs;
- static int count_p = (mSEC_10_from_14/6); /* for the first call after boot */
- static unsigned long jiffies_p = 0;
+ raw_local_irq_save(flags);
- /*
- * cache volatile jiffies temporarily; we have IRQs turned off.
- */
- unsigned long jiffies_t;
+ jifs = jiffies;
+ outb_p(0x00, PIT_MODE); /* latch the count */
+ count = inb_p(PIT_CH0); /* read the latched count */
+ count |= inb_p(PIT_CH0) << 8;
- /* timer count may underflow right here */
- outb_p(0x00, 0x43); /* latch the count ASAP */
+ if (count > old_count && jifs == old_jifs)
+ count = old_count;
- count = inb_p(0x40); /* read the latched count */
+ old_count = count;
+ old_jifs = jifs;
- /*
- * We do this guaranteed double memory access instead of a _p
- * postfix in the previous port access. Wheee, hackady hack
- */
- jiffies_t = jiffies;
+ raw_local_irq_restore(flags);
- count |= inb_p(0x40) << 8;
+ count = (PIT_LATCH - 1) - count;
- /* Detect timer underflows. If we haven't had a timer tick since
- the last time we were called, and time is apparently going
- backwards, the counter must have wrapped during this routine. */
- if ((jiffies_t == jiffies_p) && (count > count_p))
- count -= (mSEC_10_from_14/6);
- else
- jiffies_p = jiffies_t;
+ return (cycle_t)(jifs * PIT_LATCH) + count;
+}
- count_p = count;
+static struct clocksource pit_cs = {
+ .name = "pit",
+ .rating = 110,
+ .read = pit_read,
+ .mask = CLOCKSOURCE_MASK(32),
+};
- count = (((mSEC_10_from_14/6)-1) - count) * (tick_nsec / 1000);
- count = (count + (mSEC_10_from_14/6)/2) / (mSEC_10_from_14/6);
+static void pit_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ unsigned long flags;
+
+ raw_local_irq_save(flags);
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ outb_p(0x34, PIT_MODE);
+ outb_p(PIT_LATCH & 0xff, PIT_CH0);
+ outb_p(PIT_LATCH >> 8, PIT_CH0);
+ break;
+
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_UNUSED:
+ outb_p(0x30, PIT_MODE);
+ outb_p(0, PIT_CH0);
+ outb_p(0, PIT_CH0);
+ break;
+
+ case CLOCK_EVT_MODE_ONESHOT:
+ case CLOCK_EVT_MODE_RESUME:
+ break;
+ }
+ local_irq_restore(flags);
+}
- return count;
+static int pit_set_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ return 0;
}
-static irqreturn_t
-isa_timer_interrupt(int irq, void *dev_id)
+static struct clock_event_device pit_ce = {
+ .name = "pit",
+ .features = CLOCK_EVT_FEAT_PERIODIC,
+ .set_mode = pit_set_mode,
+ .set_next_event = pit_set_next_event,
+ .shift = 32,
+};
+
+static irqreturn_t pit_timer_interrupt(int irq, void *dev_id)
{
- timer_tick();
+ struct clock_event_device *ce = dev_id;
+ ce->event_handler(ce);
return IRQ_HANDLED;
}
-static struct irqaction isa_timer_irq = {
- .name = "ISA timer tick",
- .handler = isa_timer_interrupt,
+static struct irqaction pit_timer_irq = {
+ .name = "pit",
+ .handler = pit_timer_interrupt,
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .dev_id = &pit_ce,
};
static void __init isa_timer_init(void)
{
- /* enable PIT timer */
- /* set for periodic (4) and LSB/MSB write (0x30) */
- outb(0x34, 0x43);
- outb((mSEC_10_from_14/6) & 0xFF, 0x40);
- outb((mSEC_10_from_14/6) >> 8, 0x40);
+ pit_ce.cpumask = cpumask_of(smp_processor_id());
+ pit_ce.mult = div_sc(PIT_TICK_RATE, NSEC_PER_SEC, pit_ce.shift);
+ pit_ce.max_delta_ns = clockevent_delta2ns(0x7fff, &pit_ce);
+ pit_ce.min_delta_ns = clockevent_delta2ns(0x000f, &pit_ce);
+
+ clocksource_register_hz(&pit_cs, PIT_TICK_RATE);
- setup_irq(IRQ_ISA_TIMER, &isa_timer_irq);
+ setup_irq(pit_ce.irq, &pit_timer_irq);
+ clockevents_register_device(&pit_ce);
}
struct sys_timer isa_timer = {
.init = isa_timer_init,
- .offset = isa_gettimeoffset,
};
diff --git a/arch/arm/mach-gemini/board-nas4220b.c b/arch/arm/mach-gemini/board-nas4220b.c
index 2ba096de0034..0cf7a07c3f3f 100644
--- a/arch/arm/mach-gemini/board-nas4220b.c
+++ b/arch/arm/mach-gemini/board-nas4220b.c
@@ -98,6 +98,7 @@ static void __init ib4220b_init(void)
platform_register_pflash(SZ_16M, NULL, 0);
platform_device_register(&ib4220b_led_device);
platform_device_register(&ib4220b_key_device);
+ platform_register_rtc();
}
MACHINE_START(NAS4220B, "Raidsonic NAS IB-4220-B")
diff --git a/arch/arm/mach-gemini/board-rut1xx.c b/arch/arm/mach-gemini/board-rut1xx.c
index a9a0d8b01942..4fa09af99495 100644
--- a/arch/arm/mach-gemini/board-rut1xx.c
+++ b/arch/arm/mach-gemini/board-rut1xx.c
@@ -82,6 +82,7 @@ static void __init rut1xx_init(void)
platform_register_pflash(SZ_8M, NULL, 0);
platform_device_register(&rut1xx_leds);
platform_device_register(&rut1xx_keys_device);
+ platform_register_rtc();
}
MACHINE_START(RUT100, "Teltonika RUT100")
diff --git a/arch/arm/mach-gemini/board-wbd111.c b/arch/arm/mach-gemini/board-wbd111.c
index 8b88d50d4337..af7b68a6b258 100644
--- a/arch/arm/mach-gemini/board-wbd111.c
+++ b/arch/arm/mach-gemini/board-wbd111.c
@@ -130,6 +130,7 @@ static void __init wbd111_init(void)
wbd111_num_partitions);
platform_device_register(&wbd111_leds_device);
platform_device_register(&wbd111_keys_device);
+ platform_register_rtc();
}
MACHINE_START(WBD111, "Wiliboard WBD-111")
diff --git a/arch/arm/mach-gemini/board-wbd222.c b/arch/arm/mach-gemini/board-wbd222.c
index 1eebcecd1c33..99e5bbecf923 100644
--- a/arch/arm/mach-gemini/board-wbd222.c
+++ b/arch/arm/mach-gemini/board-wbd222.c
@@ -130,6 +130,7 @@ static void __init wbd222_init(void)
wbd222_num_partitions);
platform_device_register(&wbd222_leds_device);
platform_device_register(&wbd222_keys_device);
+ platform_register_rtc();
}
MACHINE_START(WBD222, "Wiliboard WBD-222")
diff --git a/arch/arm/mach-gemini/common.h b/arch/arm/mach-gemini/common.h
index 9392834a214f..7670c39acb2f 100644
--- a/arch/arm/mach-gemini/common.h
+++ b/arch/arm/mach-gemini/common.h
@@ -18,6 +18,7 @@ extern void gemini_map_io(void);
extern void gemini_init_irq(void);
extern void gemini_timer_init(void);
extern void gemini_gpio_init(void);
+extern void platform_register_rtc(void);
/* Common platform devices registration functions */
extern int platform_register_uart(void);
diff --git a/arch/arm/mach-gemini/devices.c b/arch/arm/mach-gemini/devices.c
index 6b525253d027..5cff29818b73 100644
--- a/arch/arm/mach-gemini/devices.c
+++ b/arch/arm/mach-gemini/devices.c
@@ -90,3 +90,29 @@ int platform_register_pflash(unsigned int size, struct mtd_partition *parts,
return platform_device_register(&pflash_device);
}
+
+static struct resource gemini_rtc_resources[] = {
+ [0] = {
+ .start = GEMINI_RTC_BASE,
+ .end = GEMINI_RTC_BASE + 0x24,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_RTC,
+ .end = IRQ_RTC,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device gemini_rtc_device = {
+ .name = "rtc-gemini",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(gemini_rtc_resources),
+ .resource = gemini_rtc_resources,
+};
+
+int __init platform_register_rtc(void)
+{
+ return platform_device_register(&gemini_rtc_device);
+}
+
diff --git a/arch/arm/mach-gemini/include/mach/memory.h b/arch/arm/mach-gemini/include/mach/memory.h
index 2d14d5bf1f9f..a50915f764d8 100644
--- a/arch/arm/mach-gemini/include/mach/memory.h
+++ b/arch/arm/mach-gemini/include/mach/memory.h
@@ -11,9 +11,9 @@
#define __MACH_MEMORY_H
#ifdef CONFIG_GEMINI_MEM_SWAP
-# define PHYS_OFFSET UL(0x00000000)
+# define PLAT_PHYS_OFFSET UL(0x00000000)
#else
-# define PHYS_OFFSET UL(0x10000000)
+# define PLAT_PHYS_OFFSET UL(0x10000000)
#endif
#endif /* __MACH_MEMORY_H */
diff --git a/arch/arm/mach-h720x/include/mach/memory.h b/arch/arm/mach-h720x/include/mach/memory.h
index ef4c1e26f18e..9d3687651462 100644
--- a/arch/arm/mach-h720x/include/mach/memory.h
+++ b/arch/arm/mach-h720x/include/mach/memory.h
@@ -7,7 +7,7 @@
#ifndef __ASM_ARCH_MEMORY_H
#define __ASM_ARCH_MEMORY_H
-#define PHYS_OFFSET UL(0x40000000)
+#define PLAT_PHYS_OFFSET UL(0x40000000)
/*
* This is the maximum DMA address that can be DMAd to.
* There should not be more than (0xd0000000 - 0xc0000000)
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 56684b517070..5eec099e0c72 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -100,6 +100,7 @@ config MACH_MX25_3DS
select IMX_HAVE_PLATFORM_FSL_USB2_UDC
select IMX_HAVE_PLATFORM_IMX2_WDT
select IMX_HAVE_PLATFORM_IMXDI_RTC
+ select IMX_HAVE_PLATFORM_IMX_I2C
select IMX_HAVE_PLATFORM_IMX_FB
select IMX_HAVE_PLATFORM_IMX_KEYPAD
select IMX_HAVE_PLATFORM_IMX_UART
@@ -238,6 +239,7 @@ config MACH_MX27_3DS
select SOC_IMX27
select IMX_HAVE_PLATFORM_FSL_USB2_UDC
select IMX_HAVE_PLATFORM_IMX2_WDT
+ select IMX_HAVE_PLATFORM_IMX_I2C
select IMX_HAVE_PLATFORM_IMX_KEYPAD
select IMX_HAVE_PLATFORM_IMX_UART
select IMX_HAVE_PLATFORM_MXC_EHCI
@@ -265,6 +267,7 @@ config MACH_IMX27LITE
bool "LogicPD MX27 LITEKIT platform"
select SOC_IMX27
select IMX_HAVE_PLATFORM_IMX_UART
+ select IMX_HAVE_PLATFORM_IMX_SSI
help
Include support for MX27 LITEKIT platform. This includes specific
configurations for the board and its peripherals.
@@ -300,4 +303,13 @@ config MACH_MXT_TD60
Include support for i-MXT (aka td60) platform. This
includes specific configurations for the module and its peripherals.
+config MACH_IMX27IPCAM
+ bool "IMX27 IPCAM platform"
+ select SOC_IMX27
+ select IMX_HAVE_PLATFORM_IMX2_WDT
+ select IMX_HAVE_PLATFORM_IMX_UART
+ help
+ Include support for IMX27 IPCAM platform. This includes specific
+ configurations for the board and its peripherals.
+
endif
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index 77100bf26153..b85794d27991 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -9,10 +9,10 @@ obj-$(CONFIG_IMX_HAVE_DMA_V1) += dma-v1.o
obj-$(CONFIG_ARCH_MX1) += clock-imx1.o mm-imx1.o
obj-$(CONFIG_MACH_MX21) += clock-imx21.o mm-imx21.o
-obj-$(CONFIG_ARCH_MX25) += clock-imx25.o mm-imx25.o
+obj-$(CONFIG_ARCH_MX25) += clock-imx25.o mm-imx25.o ehci-imx25.o
obj-$(CONFIG_MACH_MX27) += cpu-imx27.o pm-imx27.o
-obj-$(CONFIG_MACH_MX27) += clock-imx27.o mm-imx27.o
+obj-$(CONFIG_MACH_MX27) += clock-imx27.o mm-imx27.o ehci-imx27.o
# Support for CMOS sensor interface
obj-$(CONFIG_MX1_VIDEO) += mx1-camera-fiq.o mx1-camera-fiq-ksym.o
@@ -36,3 +36,4 @@ obj-$(CONFIG_MACH_CPUIMX27) += mach-cpuimx27.o
obj-$(CONFIG_MACH_EUKREA_MBIMX27_BASEBOARD) += eukrea_mbimx27-baseboard.o
obj-$(CONFIG_MACH_PCA100) += mach-pca100.o
obj-$(CONFIG_MACH_MXT_TD60) += mach-mxt_td60.o
+obj-$(CONFIG_MACH_IMX27IPCAM) += mach-imx27ipcam.o
diff --git a/arch/arm/mach-imx/clock-imx1.c b/arch/arm/mach-imx/clock-imx1.c
index 3938a563b280..dcc41728fe72 100644
--- a/arch/arm/mach-imx/clock-imx1.c
+++ b/arch/arm/mach-imx/clock-imx1.c
@@ -592,6 +592,7 @@ static struct clk_lookup lookups[] __initdata = {
_REGISTER_CLOCK("imx-uart.2", NULL, uart_clk)
_REGISTER_CLOCK("imx-i2c.0", NULL, i2c_clk)
_REGISTER_CLOCK("imx1-cspi.0", NULL, spi_clk)
+ _REGISTER_CLOCK("imx1-cspi.1", NULL, spi_clk)
_REGISTER_CLOCK("imx-mmc.0", NULL, sdhc_clk)
_REGISTER_CLOCK("imx-fb.0", NULL, lcdc_clk)
_REGISTER_CLOCK(NULL, "mshc", mshc_clk)
diff --git a/arch/arm/mach-imx/clock-imx25.c b/arch/arm/mach-imx/clock-imx25.c
index daa0165b6772..a65838fc061c 100644
--- a/arch/arm/mach-imx/clock-imx25.c
+++ b/arch/arm/mach-imx/clock-imx25.c
@@ -228,6 +228,7 @@ DEFINE_CLOCK(esdhc1_per_clk, 0, CCM_CGCR0, 3, get_rate_esdhc1, NULL,
DEFINE_CLOCK(esdhc2_ahb_clk, 0, CCM_CGCR0, 22, get_rate_esdhc2, NULL, NULL);
DEFINE_CLOCK(esdhc2_per_clk, 0, CCM_CGCR0, 4, get_rate_esdhc2, NULL,
&esdhc2_ahb_clk);
+DEFINE_CLOCK(sdma_ahb_clk, 0, CCM_CGCR0, 26, NULL, NULL, NULL);
DEFINE_CLOCK(fec_ahb_clk, 0, CCM_CGCR0, 23, NULL, NULL, NULL);
DEFINE_CLOCK(lcdc_ahb_clk, 0, CCM_CGCR0, 24, NULL, NULL, NULL);
DEFINE_CLOCK(lcdc_per_clk, 0, CCM_CGCR0, 7, NULL, NULL, &lcdc_ahb_clk);
@@ -253,6 +254,7 @@ DEFINE_CLOCK(lcdc_clk, 0, CCM_CGCR1, 29, get_rate_lcdc, NULL, &lcdc_per_clk);
DEFINE_CLOCK(wdt_clk, 0, CCM_CGCR2, 19, get_rate_ipg, NULL, NULL);
DEFINE_CLOCK(ssi1_clk, 0, CCM_CGCR2, 11, get_rate_ssi1, NULL, &ssi1_per_clk);
DEFINE_CLOCK(ssi2_clk, 1, CCM_CGCR2, 12, get_rate_ssi2, NULL, &ssi2_per_clk);
+DEFINE_CLOCK(sdma_clk, 0, CCM_CGCR2, 6, get_rate_ipg, NULL, &sdma_ahb_clk);
DEFINE_CLOCK(esdhc1_clk, 0, CCM_CGCR1, 13, get_rate_esdhc1, NULL,
&esdhc1_per_clk);
DEFINE_CLOCK(esdhc2_clk, 1, CCM_CGCR1, 14, get_rate_esdhc2, NULL,
@@ -304,6 +306,7 @@ static struct clk_lookup lookups[] = {
_REGISTER_CLOCK(NULL, "audmux", audmux_clk)
_REGISTER_CLOCK("flexcan.0", NULL, can1_clk)
_REGISTER_CLOCK("flexcan.1", NULL, can2_clk)
+ _REGISTER_CLOCK("imx-sdma", NULL, sdma_clk)
};
int __init mx25_clocks_init(void)
diff --git a/arch/arm/mach-imx/devices-imx1.h b/arch/arm/mach-imx/devices-imx1.h
index 81979486218e..fd267158e0f6 100644
--- a/arch/arm/mach-imx/devices-imx1.h
+++ b/arch/arm/mach-imx/devices-imx1.h
@@ -18,3 +18,10 @@ extern const struct imx_imx_uart_3irq_data imx1_imx_uart_data[] __initconst;
imx_add_imx_uart_3irq(&imx1_imx_uart_data[id], pdata)
#define imx1_add_imx_uart0(pdata) imx1_add_imx_uart(0, pdata)
#define imx1_add_imx_uart1(pdata) imx1_add_imx_uart(1, pdata)
+
+extern const struct imx_spi_imx_data imx1_cspi_data[] __initconst;
+#define imx1_add_cspi(id, pdata) \
+ imx_add_spi_imx(&imx1_cspi_data[id], pdata)
+
+#define imx1_add_spi_imx0(pdata) imx1_add_cspi(0, pdata)
+#define imx1_add_spi_imx1(pdata) imx1_add_cspi(1, pdata)
diff --git a/arch/arm/mach-imx/dma-v1.c b/arch/arm/mach-imx/dma-v1.c
index e9f1769b49f5..236f1495efad 100644
--- a/arch/arm/mach-imx/dma-v1.c
+++ b/arch/arm/mach-imx/dma-v1.c
@@ -699,7 +699,7 @@ int imx_dma_request(int channel, const char *name)
local_irq_restore(flags);
return -EBUSY;
}
- memset(imxdma, 0, sizeof(imxdma));
+ memset(imxdma, 0, sizeof(*imxdma));
imxdma->name = name;
local_irq_restore(flags); /* request_irq() can block */
diff --git a/arch/arm/mach-imx/ehci-imx25.c b/arch/arm/mach-imx/ehci-imx25.c
new file mode 100644
index 000000000000..865daf0b09e9
--- /dev/null
+++ b/arch/arm/mach-imx/ehci-imx25.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
+ * Copyright (C) 2010 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+#include <mach/mxc_ehci.h>
+
+#define USBCTRL_OTGBASE_OFFSET 0x600
+
+#define MX25_OTG_SIC_SHIFT 29
+#define MX25_OTG_SIC_MASK (0x3 << MX25_OTG_SIC_SHIFT)
+#define MX25_OTG_PM_BIT (1 << 24)
+
+#define MX25_H1_SIC_SHIFT 21
+#define MX25_H1_SIC_MASK (0x3 << MX25_H1_SIC_SHIFT)
+#define MX25_H1_PM_BIT (1 << 8)
+#define MX25_H1_IPPUE_UP_BIT (1 << 7)
+#define MX25_H1_IPPUE_DOWN_BIT (1 << 6)
+#define MX25_H1_TLL_BIT (1 << 5)
+#define MX25_H1_USBTE_BIT (1 << 4)
+
+int mx25_initialize_usb_hw(int port, unsigned int flags)
+{
+ unsigned int v;
+
+ v = readl(MX25_IO_ADDRESS(MX25_USB_BASE_ADDR + USBCTRL_OTGBASE_OFFSET));
+
+ switch (port) {
+ case 0: /* OTG port */
+ v &= ~(MX25_OTG_SIC_MASK | MX25_OTG_PM_BIT);
+ v |= (flags & MXC_EHCI_INTERFACE_MASK) << MX25_OTG_SIC_SHIFT;
+
+ if (!(flags & MXC_EHCI_POWER_PINS_ENABLED))
+ v |= MX25_OTG_PM_BIT;
+
+ break;
+ case 1: /* H1 port */
+ v &= ~(MX25_H1_SIC_MASK | MX25_H1_PM_BIT | MX25_H1_TLL_BIT |
+ MX25_H1_USBTE_BIT | MX25_H1_IPPUE_DOWN_BIT | MX25_H1_IPPUE_UP_BIT);
+ v |= (flags & MXC_EHCI_INTERFACE_MASK) << MX25_H1_SIC_SHIFT;
+
+ if (!(flags & MXC_EHCI_POWER_PINS_ENABLED))
+ v |= MX25_H1_PM_BIT;
+
+ if (!(flags & MXC_EHCI_TTL_ENABLED))
+ v |= MX25_H1_TLL_BIT;
+
+ if (flags & MXC_EHCI_INTERNAL_PHY)
+ v |= MX25_H1_USBTE_BIT;
+
+ if (flags & MXC_EHCI_IPPUE_DOWN)
+ v |= MX25_H1_IPPUE_DOWN_BIT;
+
+ if (flags & MXC_EHCI_IPPUE_UP)
+ v |= MX25_H1_IPPUE_UP_BIT;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ writel(v, MX25_IO_ADDRESS(MX25_USB_BASE_ADDR + USBCTRL_OTGBASE_OFFSET));
+
+ return 0;
+}
+
diff --git a/arch/arm/mach-imx/ehci-imx27.c b/arch/arm/mach-imx/ehci-imx27.c
new file mode 100644
index 000000000000..fa69419eabdd
--- /dev/null
+++ b/arch/arm/mach-imx/ehci-imx27.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
+ * Copyright (C) 2010 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+#include <mach/mxc_ehci.h>
+
+#define USBCTRL_OTGBASE_OFFSET 0x600
+
+#define MX27_OTG_SIC_SHIFT 29
+#define MX27_OTG_SIC_MASK (0x3 << MX27_OTG_SIC_SHIFT)
+#define MX27_OTG_PM_BIT (1 << 24)
+
+#define MX27_H2_SIC_SHIFT 21
+#define MX27_H2_SIC_MASK (0x3 << MX27_H2_SIC_SHIFT)
+#define MX27_H2_PM_BIT (1 << 16)
+#define MX27_H2_DT_BIT (1 << 5)
+
+#define MX27_H1_SIC_SHIFT 13
+#define MX27_H1_SIC_MASK (0x3 << MX27_H1_SIC_SHIFT)
+#define MX27_H1_PM_BIT (1 << 8)
+#define MX27_H1_DT_BIT (1 << 4)
+
+int mx27_initialize_usb_hw(int port, unsigned int flags)
+{
+ unsigned int v;
+
+ v = readl(MX27_IO_ADDRESS(MX27_USB_BASE_ADDR + USBCTRL_OTGBASE_OFFSET));
+
+ switch (port) {
+ case 0: /* OTG port */
+ v &= ~(MX27_OTG_SIC_MASK | MX27_OTG_PM_BIT);
+ v |= (flags & MXC_EHCI_INTERFACE_MASK) << MX27_OTG_SIC_SHIFT;
+
+ if (!(flags & MXC_EHCI_POWER_PINS_ENABLED))
+ v |= MX27_OTG_PM_BIT;
+ break;
+ case 1: /* H1 port */
+ v &= ~(MX27_H1_SIC_MASK | MX27_H1_PM_BIT | MX27_H1_DT_BIT);
+ v |= (flags & MXC_EHCI_INTERFACE_MASK) << MX27_H1_SIC_SHIFT;
+
+ if (!(flags & MXC_EHCI_POWER_PINS_ENABLED))
+ v |= MX27_H1_PM_BIT;
+
+ if (!(flags & MXC_EHCI_TTL_ENABLED))
+ v |= MX27_H1_DT_BIT;
+
+ break;
+ case 2: /* H2 port */
+ v &= ~(MX27_H2_SIC_MASK | MX27_H2_PM_BIT | MX27_H2_DT_BIT);
+ v |= (flags & MXC_EHCI_INTERFACE_MASK) << MX27_H2_SIC_SHIFT;
+
+ if (!(flags & MXC_EHCI_POWER_PINS_ENABLED))
+ v |= MX27_H2_PM_BIT;
+
+ if (!(flags & MXC_EHCI_TTL_ENABLED))
+ v |= MX27_H2_DT_BIT;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ writel(v, MX27_IO_ADDRESS(MX27_USB_BASE_ADDR + USBCTRL_OTGBASE_OFFSET));
+
+ return 0;
+}
+
diff --git a/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c b/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c
index 275c8589d797..fa5288018ba7 100644
--- a/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c
+++ b/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c
@@ -249,7 +249,7 @@ static const struct imxuart_platform_data uart_pdata __initconst = {
#define ADS7846_PENDOWN (GPIO_PORTD | 25)
-static void ads7846_dev_init(void)
+static void __maybe_unused ads7846_dev_init(void)
{
if (gpio_request(ADS7846_PENDOWN, "ADS7846 pendown") < 0) {
printk(KERN_ERR "can't get ads746 pen down GPIO\n");
@@ -268,7 +268,8 @@ static struct ads7846_platform_data ads7846_config __initdata = {
.keep_vref_on = 1,
};
-static struct spi_board_info eukrea_mbimx27_spi_board_info[] __initdata = {
+static struct spi_board_info __maybe_unused
+ eukrea_mbimx27_spi_board_info[] __initdata = {
[0] = {
.modalias = "ads7846",
.bus_num = 0,
@@ -357,13 +358,11 @@ void __init eukrea_mbimx27_baseboard_init(void)
ads7846_dev_init();
#endif
-#if defined(CONFIG_SPI_IMX) || defined(CONFIG_SPI_IMX_MODULE)
/* SPI_CS0 init */
mxc_gpio_mode(GPIO_PORTD | 28 | GPIO_GPIO | GPIO_OUT);
imx27_add_spi_imx0(&eukrea_mbimx27_spi0_data);
spi_register_board_info(eukrea_mbimx27_spi_board_info,
ARRAY_SIZE(eukrea_mbimx27_spi_board_info));
-#endif
/* Leds configuration */
mxc_gpio_mode(GPIO_PORTF | 16 | GPIO_GPIO | GPIO_OUT);
diff --git a/arch/arm/mach-imx/eukrea_mbimxsd25-baseboard.c b/arch/arm/mach-imx/eukrea_mbimxsd25-baseboard.c
index cb705c28de02..6269053505f7 100644
--- a/arch/arm/mach-imx/eukrea_mbimxsd25-baseboard.c
+++ b/arch/arm/mach-imx/eukrea_mbimxsd25-baseboard.c
@@ -34,6 +34,7 @@
#include <mach/mx25.h>
#include <mach/imx-uart.h>
#include <mach/audmux.h>
+#include <mach/esdhc.h>
#include "devices-imx25.h"
@@ -242,6 +243,11 @@ struct imx_ssi_platform_data eukrea_mbimxsd_ssi_pdata __initconst = {
.flags = IMX_SSI_SYN | IMX_SSI_NET | IMX_SSI_USE_I2S_SLAVE,
};
+static struct esdhc_platform_data sd1_pdata = {
+ .cd_gpio = GPIO_SD1CD,
+ .wp_gpio = -EINVAL,
+};
+
/*
* system init for baseboard usage. Will be called by cpuimx25 init.
*
@@ -275,7 +281,7 @@ void __init eukrea_mbimxsd25_baseboard_init(void)
imx25_add_imx_ssi(0, &eukrea_mbimxsd_ssi_pdata);
imx25_add_flexcan1(NULL);
- imx25_add_sdhci_esdhc_imx(0, NULL);
+ imx25_add_sdhci_esdhc_imx(0, &sd1_pdata);
gpio_request(GPIO_LED1, "LED1");
gpio_direction_output(GPIO_LED1, 1);
diff --git a/arch/arm/mach-imx/mach-cpuimx27.c b/arch/arm/mach-imx/mach-cpuimx27.c
index 6cf04da2456a..6b724c2ed0a7 100644
--- a/arch/arm/mach-imx/mach-cpuimx27.c
+++ b/arch/arm/mach-imx/mach-cpuimx27.c
@@ -210,14 +210,24 @@ static struct platform_device serial_device = {
#endif
#if defined(CONFIG_USB_ULPI)
+static int eukrea_cpuimx27_otg_init(struct platform_device *pdev)
+{
+ return mx27_initialize_usb_hw(pdev->id, MXC_EHCI_INTERFACE_DIFF_UNI);
+}
+
static struct mxc_usbh_platform_data otg_pdata __initdata = {
+ .init = eukrea_cpuimx27_otg_init,
.portsc = MXC_EHCI_MODE_ULPI,
- .flags = MXC_EHCI_INTERFACE_DIFF_UNI,
};
+static int eukrea_cpuimx27_usbh2_init(struct platform_device *pdev)
+{
+ return mx27_initialize_usb_hw(pdev->id, MXC_EHCI_INTERFACE_DIFF_UNI);
+}
+
static struct mxc_usbh_platform_data usbh2_pdata __initdata = {
+ .init = eukrea_cpuimx27_usbh2_init,
.portsc = MXC_EHCI_MODE_ULPI,
- .flags = MXC_EHCI_INTERFACE_DIFF_UNI,
};
#endif
@@ -304,9 +314,10 @@ static struct sys_timer eukrea_cpuimx27_timer = {
};
MACHINE_START(CPUIMX27, "EUKREA CPUIMX27")
- .boot_params = MX27_PHYS_OFFSET + 0x100,
- .map_io = mx27_map_io,
- .init_irq = mx27_init_irq,
- .init_machine = eukrea_cpuimx27_init,
- .timer = &eukrea_cpuimx27_timer,
+ .boot_params = MX27_PHYS_OFFSET + 0x100,
+ .map_io = mx27_map_io,
+ .init_early = imx27_init_early,
+ .init_irq = mx27_init_irq,
+ .timer = &eukrea_cpuimx27_timer,
+ .init_machine = eukrea_cpuimx27_init,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-eukrea_cpuimx25.c b/arch/arm/mach-imx/mach-eukrea_cpuimx25.c
index eb395aba9237..9da8d18eeb00 100644
--- a/arch/arm/mach-imx/mach-eukrea_cpuimx25.c
+++ b/arch/arm/mach-imx/mach-eukrea_cpuimx25.c
@@ -84,15 +84,25 @@ static struct i2c_board_info eukrea_cpuimx25_i2c_devices[] = {
},
};
+static int eukrea_cpuimx25_otg_init(struct platform_device *pdev)
+{
+ return mx25_initialize_usb_hw(pdev->id, MXC_EHCI_INTERFACE_DIFF_UNI);
+}
+
static const struct mxc_usbh_platform_data otg_pdata __initconst = {
+ .init = eukrea_cpuimx25_otg_init,
.portsc = MXC_EHCI_MODE_UTMI,
- .flags = MXC_EHCI_INTERFACE_DIFF_UNI,
};
+static int eukrea_cpuimx25_usbh2_init(struct platform_device *pdev)
+{
+ return mx25_initialize_usb_hw(pdev->id, MXC_EHCI_INTERFACE_SINGLE_UNI |
+ MXC_EHCI_INTERNAL_PHY | MXC_EHCI_IPPUE_DOWN);
+}
+
static const struct mxc_usbh_platform_data usbh2_pdata __initconst = {
+ .init = eukrea_cpuimx25_usbh2_init,
.portsc = MXC_EHCI_MODE_SERIAL,
- .flags = MXC_EHCI_INTERFACE_SINGLE_UNI | MXC_EHCI_INTERNAL_PHY |
- MXC_EHCI_IPPUE_DOWN,
};
static const struct fsl_usb2_platform_data otg_device_pdata __initconst = {
@@ -153,9 +163,10 @@ static struct sys_timer eukrea_cpuimx25_timer = {
MACHINE_START(EUKREA_CPUIMX25, "Eukrea CPUIMX25")
/* Maintainer: Eukrea Electromatique */
- .boot_params = MX25_PHYS_OFFSET + 0x100,
- .map_io = mx25_map_io,
- .init_irq = mx25_init_irq,
- .init_machine = eukrea_cpuimx25_init,
- .timer = &eukrea_cpuimx25_timer,
+ .boot_params = MX25_PHYS_OFFSET + 0x100,
+ .map_io = mx25_map_io,
+ .init_early = imx25_init_early,
+ .init_irq = mx25_init_irq,
+ .timer = &eukrea_cpuimx25_timer,
+ .init_machine = eukrea_cpuimx25_init,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
index 40a3666ea632..d7e0d219726a 100644
--- a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
+++ b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
@@ -30,6 +30,7 @@
#include <linux/gpio_keys.h>
#include <linux/input.h>
#include <linux/gpio.h>
+#include <linux/delay.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
@@ -66,6 +67,11 @@ static const int visstrim_m10_pins[] __initconst = {
PD15_AOUT_FEC_COL,
PD16_AIN_FEC_TX_ER,
PF23_AIN_FEC_TX_EN,
+ /* SSI1 */
+ PC20_PF_SSI1_FS,
+ PC21_PF_SSI1_RXD,
+ PC22_PF_SSI1_TXD,
+ PC23_PF_SSI1_CLK,
/* SDHC1 */
PE18_PF_SD1_D0,
PE19_PF_SD1_D1,
@@ -204,20 +210,30 @@ static struct i2c_board_info visstrim_m10_i2c_devices[] = {
I2C_BOARD_INFO("pca9555", 0x20),
.platform_data = &visstrim_m10_pca9555_pdata,
},
+ {
+ I2C_BOARD_INFO("tlv320aic32x4", 0x18),
+ }
};
/* USB OTG */
static int otg_phy_init(struct platform_device *pdev)
{
gpio_set_value(OTG_PHY_CS_GPIO, 0);
- return 0;
+
+ mdelay(10);
+
+ return mx27_initialize_usb_hw(pdev->id, MXC_EHCI_POWER_PINS_ENABLED);
}
static const struct mxc_usbh_platform_data
visstrim_m10_usbotg_pdata __initconst = {
.init = otg_phy_init,
.portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT,
- .flags = MXC_EHCI_POWER_PINS_ENABLED,
+};
+
+/* SSI */
+static const struct imx_ssi_platform_data visstrim_m10_ssi_pdata __initconst = {
+ .flags = IMX_SSI_DMA | IMX_SSI_SYN,
};
static void __init visstrim_m10_board_init(void)
@@ -229,6 +245,7 @@ static void __init visstrim_m10_board_init(void)
if (ret)
pr_err("Failed to setup pins (%d)\n", ret);
+ imx27_add_imx_ssi(0, &visstrim_m10_ssi_pdata);
imx27_add_imx_uart0(&uart_pdata);
i2c_register_board_info(0, visstrim_m10_i2c_devices,
@@ -251,9 +268,10 @@ static struct sys_timer visstrim_m10_timer = {
};
MACHINE_START(IMX27_VISSTRIM_M10, "Vista Silicon Visstrim_M10")
- .boot_params = MX27_PHYS_OFFSET + 0x100,
- .map_io = mx27_map_io,
- .init_irq = mx27_init_irq,
- .init_machine = visstrim_m10_board_init,
- .timer = &visstrim_m10_timer,
+ .boot_params = MX27_PHYS_OFFSET + 0x100,
+ .map_io = mx27_map_io,
+ .init_early = imx27_init_early,
+ .init_irq = mx27_init_irq,
+ .timer = &visstrim_m10_timer,
+ .init_machine = visstrim_m10_board_init,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-imx27ipcam.c b/arch/arm/mach-imx/mach-imx27ipcam.c
new file mode 100644
index 000000000000..9be6cd6fbf8c
--- /dev/null
+++ b/arch/arm/mach-imx/mach-imx27ipcam.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * Author: Fabio Estevam <fabio.estevam@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/time.h>
+#include <mach/hardware.h>
+#include <mach/common.h>
+#include <mach/iomux-mx27.h>
+
+#include "devices-imx27.h"
+
+static const int mx27ipcam_pins[] __initconst = {
+ /* UART1 */
+ PE12_PF_UART1_TXD,
+ PE13_PF_UART1_RXD,
+ /* FEC */
+ PD0_AIN_FEC_TXD0,
+ PD1_AIN_FEC_TXD1,
+ PD2_AIN_FEC_TXD2,
+ PD3_AIN_FEC_TXD3,
+ PD4_AOUT_FEC_RX_ER,
+ PD5_AOUT_FEC_RXD1,
+ PD6_AOUT_FEC_RXD2,
+ PD7_AOUT_FEC_RXD3,
+ PD8_AF_FEC_MDIO,
+ PD9_AIN_FEC_MDC,
+ PD10_AOUT_FEC_CRS,
+ PD11_AOUT_FEC_TX_CLK,
+ PD12_AOUT_FEC_RXD0,
+ PD13_AOUT_FEC_RX_DV,
+ PD14_AOUT_FEC_RX_CLK,
+ PD15_AOUT_FEC_COL,
+ PD16_AIN_FEC_TX_ER,
+ PF23_AIN_FEC_TX_EN,
+};
+
+static void __init mx27ipcam_init(void)
+{
+ mxc_gpio_setup_multiple_pins(mx27ipcam_pins, ARRAY_SIZE(mx27ipcam_pins),
+ "mx27ipcam");
+
+ imx27_add_imx_uart0(NULL);
+ imx27_add_fec(NULL);
+ imx27_add_imx2_wdt(NULL);
+}
+
+static void __init mx27ipcam_timer_init(void)
+{
+ mx27_clocks_init(25000000);
+}
+
+static struct sys_timer mx27ipcam_timer = {
+ .init = mx27ipcam_timer_init,
+};
+
+MACHINE_START(IMX27IPCAM, "Freescale IMX27IPCAM")
+ /* maintainer: Freescale Semiconductor, Inc. */
+ .boot_params = MX27_PHYS_OFFSET + 0x100,
+ .map_io = mx27_map_io,
+ .init_early = imx27_init_early,
+ .init_irq = mx27_init_irq,
+ .timer = &mx27ipcam_timer,
+ .init_machine = mx27ipcam_init,
+MACHINE_END
diff --git a/arch/arm/mach-imx/mach-imx27lite.c b/arch/arm/mach-imx/mach-imx27lite.c
index 3a1202e47212..841140516ede 100644
--- a/arch/arm/mach-imx/mach-imx27lite.c
+++ b/arch/arm/mach-imx/mach-imx27lite.c
@@ -75,9 +75,10 @@ static struct sys_timer mx27lite_timer = {
};
MACHINE_START(IMX27LITE, "LogicPD i.MX27LITE")
- .boot_params = MX27_PHYS_OFFSET + 0x100,
- .map_io = mx27_map_io,
- .init_irq = mx27_init_irq,
- .init_machine = mx27lite_init,
- .timer = &mx27lite_timer,
+ .boot_params = MX27_PHYS_OFFSET + 0x100,
+ .map_io = mx27_map_io,
+ .init_early = imx27_init_early,
+ .init_irq = mx27_init_irq,
+ .timer = &mx27lite_timer,
+ .init_machine = mx27lite_init,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx1ads.c b/arch/arm/mach-imx/mach-mx1ads.c
index 1f446e5eb636..47cf56ac6d5b 100644
--- a/arch/arm/mach-imx/mach-mx1ads.c
+++ b/arch/arm/mach-imx/mach-mx1ads.c
@@ -144,17 +144,19 @@ struct sys_timer mx1ads_timer = {
MACHINE_START(MX1ADS, "Freescale MX1ADS")
/* Maintainer: Sascha Hauer, Pengutronix */
- .boot_params = MX1_PHYS_OFFSET + 0x100,
- .map_io = mx1_map_io,
- .init_irq = mx1_init_irq,
- .timer = &mx1ads_timer,
- .init_machine = mx1ads_init,
+ .boot_params = MX1_PHYS_OFFSET + 0x100,
+ .map_io = mx1_map_io,
+ .init_early = imx1_init_early,
+ .init_irq = mx1_init_irq,
+ .timer = &mx1ads_timer,
+ .init_machine = mx1ads_init,
MACHINE_END
MACHINE_START(MXLADS, "Freescale MXLADS")
- .boot_params = MX1_PHYS_OFFSET + 0x100,
- .map_io = mx1_map_io,
- .init_irq = mx1_init_irq,
- .timer = &mx1ads_timer,
- .init_machine = mx1ads_init,
+ .boot_params = MX1_PHYS_OFFSET + 0x100,
+ .map_io = mx1_map_io,
+ .init_early = imx1_init_early,
+ .init_irq = mx1_init_irq,
+ .timer = &mx1ads_timer,
+ .init_machine = mx1ads_init,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx21ads.c b/arch/arm/mach-imx/mach-mx21ads.c
index 0a372577c2ac..fa52a1086eae 100644
--- a/arch/arm/mach-imx/mach-mx21ads.c
+++ b/arch/arm/mach-imx/mach-mx21ads.c
@@ -304,9 +304,10 @@ static struct sys_timer mx21ads_timer = {
MACHINE_START(MX21ADS, "Freescale i.MX21ADS")
/* maintainer: Freescale Semiconductor, Inc. */
- .boot_params = MX21_PHYS_OFFSET + 0x100,
- .map_io = mx21ads_map_io,
- .init_irq = mx21_init_irq,
- .init_machine = mx21ads_board_init,
- .timer = &mx21ads_timer,
+ .boot_params = MX21_PHYS_OFFSET + 0x100,
+ .map_io = mx21ads_map_io,
+ .init_early = imx21_init_early,
+ .init_irq = mx21_init_irq,
+ .timer = &mx21ads_timer,
+ .init_machine = mx21ads_board_init,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx25_3ds.c b/arch/arm/mach-imx/mach-mx25_3ds.c
index 8382e7902078..06da438282aa 100644
--- a/arch/arm/mach-imx/mach-mx25_3ds.c
+++ b/arch/arm/mach-imx/mach-mx25_3ds.c
@@ -103,14 +103,18 @@ static iomux_v3_cfg_t mx25pdk_pads[] = {
MX25_PAD_SD1_DATA1__SD1_DATA1,
MX25_PAD_SD1_DATA2__SD1_DATA2,
MX25_PAD_SD1_DATA3__SD1_DATA3,
+
+ /* I2C1 */
+ MX25_PAD_I2C1_CLK__I2C1_CLK,
+ MX25_PAD_I2C1_DAT__I2C1_DAT,
};
static const struct fec_platform_data mx25_fec_pdata __initconst = {
.phy = PHY_INTERFACE_MODE_RMII,
};
-#define FEC_ENABLE_GPIO 35
-#define FEC_RESET_B_GPIO 104
+#define FEC_ENABLE_GPIO IMX_GPIO_NR(2, 3)
+#define FEC_RESET_B_GPIO IMX_GPIO_NR(4, 8)
static void __init mx25pdk_fec_reset(void)
{
@@ -185,9 +189,14 @@ static const struct matrix_keymap_data mx25pdk_keymap_data __initconst = {
.keymap_size = ARRAY_SIZE(mx25pdk_keymap),
};
+static int mx25pdk_usbh2_init(struct platform_device *pdev)
+{
+ return mx25_initialize_usb_hw(pdev->id, MXC_EHCI_INTERNAL_PHY);
+}
+
static const struct mxc_usbh_platform_data usbh2_pdata __initconst = {
+ .init = mx25pdk_usbh2_init,
.portsc = MXC_EHCI_MODE_SERIAL,
- .flags = MXC_EHCI_INTERNAL_PHY,
};
static const struct fsl_usb2_platform_data otg_device_pdata __initconst = {
@@ -195,6 +204,10 @@ static const struct fsl_usb2_platform_data otg_device_pdata __initconst = {
.phy_mode = FSL_USB2_PHY_UTMI,
};
+static const struct imxi2c_platform_data mx25_3ds_i2c0_data __initconst = {
+ .bitrate = 100000,
+};
+
static void __init mx25pdk_init(void)
{
mxc_iomux_v3_setup_multiple_pads(mx25pdk_pads,
@@ -213,6 +226,7 @@ static void __init mx25pdk_init(void)
imx25_add_imx_keypad(&mx25pdk_keymap_data);
imx25_add_sdhci_esdhc_imx(0, NULL);
+ imx25_add_imx_i2c0(&mx25_3ds_i2c0_data);
}
static void __init mx25pdk_timer_init(void)
@@ -226,10 +240,10 @@ static struct sys_timer mx25pdk_timer = {
MACHINE_START(MX25_3DS, "Freescale MX25PDK (3DS)")
/* Maintainer: Freescale Semiconductor, Inc. */
- .boot_params = MX25_PHYS_OFFSET + 0x100,
- .map_io = mx25_map_io,
- .init_irq = mx25_init_irq,
- .init_machine = mx25pdk_init,
- .timer = &mx25pdk_timer,
+ .boot_params = MX25_PHYS_OFFSET + 0x100,
+ .map_io = mx25_map_io,
+ .init_early = imx25_init_early,
+ .init_irq = mx25_init_irq,
+ .timer = &mx25pdk_timer,
+ .init_machine = mx25pdk_init,
MACHINE_END
-
diff --git a/arch/arm/mach-imx/mach-mx27_3ds.c b/arch/arm/mach-imx/mach-mx27_3ds.c
index 164331518bdd..73336c574e09 100644
--- a/arch/arm/mach-imx/mach-mx27_3ds.c
+++ b/arch/arm/mach-imx/mach-mx27_3ds.c
@@ -98,6 +98,9 @@ static const int mx27pdk_pins[] __initconst = {
PD22_PF_CSPI2_SCLK,
PD23_PF_CSPI2_MISO,
PD24_PF_CSPI2_MOSI,
+ /* I2C1 */
+ PD17_PF_I2C_DATA,
+ PD18_PF_I2C_CLK,
};
static const struct imxuart_platform_data uart_pdata __initconst = {
@@ -160,10 +163,14 @@ static int otg_phy_init(void)
}
#if defined(CONFIG_USB_ULPI)
+static int mx27_3ds_otg_init(struct platform_device *pdev)
+{
+ return mx27_initialize_usb_hw(pdev->id, MXC_EHCI_INTERFACE_DIFF_UNI);
+}
static struct mxc_usbh_platform_data otg_pdata __initdata = {
+ .init = mx27_3ds_otg_init,
.portsc = MXC_EHCI_MODE_ULPI,
- .flags = MXC_EHCI_INTERFACE_DIFF_UNI,
};
#endif
@@ -216,7 +223,7 @@ static struct regulator_init_data vgen_init = {
.consumer_supplies = vgen_consumers,
};
-static struct mc13783_regulator_init_data mx27_3ds_regulators[] = {
+static struct mc13xxx_regulator_init_data mx27_3ds_regulators[] = {
{
.id = MC13783_REG_VMMC1,
.init_data = &vmmc1_init,
@@ -227,10 +234,13 @@ static struct mc13783_regulator_init_data mx27_3ds_regulators[] = {
};
/* MC13783 */
-static struct mc13783_platform_data mc13783_pdata __initdata = {
- .regulators = mx27_3ds_regulators,
- .num_regulators = ARRAY_SIZE(mx27_3ds_regulators),
- .flags = MC13783_USE_REGULATOR,
+static struct mc13xxx_platform_data mc13783_pdata __initdata = {
+ .regulators = {
+ .regulators = mx27_3ds_regulators,
+ .num_regulators = ARRAY_SIZE(mx27_3ds_regulators),
+
+ },
+ .flags = MC13XXX_USE_REGULATOR,
};
/* SPI */
@@ -253,6 +263,9 @@ static struct spi_board_info mx27_3ds_spi_devs[] __initdata = {
},
};
+static const struct imxi2c_platform_data mx27_3ds_i2c0_data __initconst = {
+ .bitrate = 100000,
+};
static void __init mx27pdk_init(void)
{
@@ -282,6 +295,7 @@ static void __init mx27pdk_init(void)
if (mxc_expio_init(MX27_CS5_BASE_ADDR, EXPIO_PARENT_INT))
pr_warn("Init of the debugboard failed, all devices on the debugboard are unusable.\n");
+ imx27_add_imx_i2c(0, &mx27_3ds_i2c0_data);
}
static void __init mx27pdk_timer_init(void)
@@ -295,9 +309,10 @@ static struct sys_timer mx27pdk_timer = {
MACHINE_START(MX27_3DS, "Freescale MX27PDK")
/* maintainer: Freescale Semiconductor, Inc. */
- .boot_params = MX27_PHYS_OFFSET + 0x100,
- .map_io = mx27_map_io,
- .init_irq = mx27_init_irq,
- .init_machine = mx27pdk_init,
- .timer = &mx27pdk_timer,
+ .boot_params = MX27_PHYS_OFFSET + 0x100,
+ .map_io = mx27_map_io,
+ .init_early = imx27_init_early,
+ .init_irq = mx27_init_irq,
+ .timer = &mx27pdk_timer,
+ .init_machine = mx27pdk_init,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx27ads.c b/arch/arm/mach-imx/mach-mx27ads.c
index b832f960fec4..367d1e4384c7 100644
--- a/arch/arm/mach-imx/mach-mx27ads.c
+++ b/arch/arm/mach-imx/mach-mx27ads.c
@@ -344,9 +344,10 @@ static void __init mx27ads_map_io(void)
MACHINE_START(MX27ADS, "Freescale i.MX27ADS")
/* maintainer: Freescale Semiconductor, Inc. */
- .boot_params = MX27_PHYS_OFFSET + 0x100,
- .map_io = mx27ads_map_io,
- .init_irq = mx27_init_irq,
- .init_machine = mx27ads_board_init,
- .timer = &mx27ads_timer,
+ .boot_params = MX27_PHYS_OFFSET + 0x100,
+ .map_io = mx27ads_map_io,
+ .init_early = imx27_init_early,
+ .init_irq = mx27_init_irq,
+ .timer = &mx27ads_timer,
+ .init_machine = mx27ads_board_init,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mxt_td60.c b/arch/arm/mach-imx/mach-mxt_td60.c
index 4ce71b0401db..69787c30c320 100644
--- a/arch/arm/mach-imx/mach-mxt_td60.c
+++ b/arch/arm/mach-imx/mach-mxt_td60.c
@@ -266,10 +266,10 @@ static struct sys_timer mxt_td60_timer = {
MACHINE_START(MXT_TD60, "Maxtrack i-MXT TD60")
/* maintainer: Maxtrack Industrial */
- .boot_params = MX27_PHYS_OFFSET + 0x100,
- .map_io = mx27_map_io,
- .init_irq = mx27_init_irq,
- .init_machine = mxt_td60_board_init,
- .timer = &mxt_td60_timer,
+ .boot_params = MX27_PHYS_OFFSET + 0x100,
+ .map_io = mx27_map_io,
+ .init_early = imx27_init_early,
+ .init_irq = mx27_init_irq,
+ .timer = &mxt_td60_timer,
+ .init_machine = mxt_td60_board_init,
MACHINE_END
-
diff --git a/arch/arm/mach-imx/mach-pca100.c b/arch/arm/mach-imx/mach-pca100.c
index cccc0a0a9c72..f754bab040b6 100644
--- a/arch/arm/mach-imx/mach-pca100.c
+++ b/arch/arm/mach-imx/mach-pca100.c
@@ -187,7 +187,6 @@ static struct i2c_board_info pca100_i2c_devices[] = {
}
};
-#if defined(CONFIG_SPI_IMX) || defined(CONFIG_SPI_IMX_MODULE)
static struct spi_eeprom at25320 = {
.name = "at25320an",
.byte_len = 4096,
@@ -211,7 +210,6 @@ static const struct spi_imx_master pca100_spi0_data __initconst = {
.chipselect = pca100_spi_cs,
.num_chipselect = ARRAY_SIZE(pca100_spi_cs),
};
-#endif
static void pca100_ac97_warm_reset(struct snd_ac97 *ac97)
{
@@ -273,25 +271,29 @@ static const struct imxmmc_platform_data sdhc_pdata __initconst = {
static int otg_phy_init(struct platform_device *pdev)
{
gpio_set_value(OTG_PHY_CS_GPIO, 0);
- return 0;
+
+ mdelay(10);
+
+ return mx27_initialize_usb_hw(pdev->id, MXC_EHCI_INTERFACE_DIFF_UNI);
}
static struct mxc_usbh_platform_data otg_pdata __initdata = {
.init = otg_phy_init,
.portsc = MXC_EHCI_MODE_ULPI,
- .flags = MXC_EHCI_INTERFACE_DIFF_UNI,
};
static int usbh2_phy_init(struct platform_device *pdev)
{
gpio_set_value(USBH2_PHY_CS_GPIO, 0);
- return 0;
+
+ mdelay(10);
+
+ return mx27_initialize_usb_hw(pdev->id, MXC_EHCI_INTERFACE_DIFF_UNI);
}
static struct mxc_usbh_platform_data usbh2_pdata __initdata = {
.init = usbh2_phy_init,
.portsc = MXC_EHCI_MODE_ULPI,
- .flags = MXC_EHCI_INTERFACE_DIFF_UNI,
};
#endif
@@ -389,13 +391,11 @@ static void __init pca100_init(void)
imx27_add_imx_i2c(1, &pca100_i2c1_data);
-#if defined(CONFIG_SPI_IMX) || defined(CONFIG_SPI_IMX_MODULE)
mxc_gpio_mode(GPIO_PORTD | 28 | GPIO_GPIO | GPIO_IN);
mxc_gpio_mode(GPIO_PORTD | 27 | GPIO_GPIO | GPIO_IN);
spi_register_board_info(pca100_spi_board_info,
ARRAY_SIZE(pca100_spi_board_info));
imx27_add_spi_imx0(&pca100_spi0_data);
-#endif
gpio_request(OTG_PHY_CS_GPIO, "usb-otg-cs");
gpio_direction_output(OTG_PHY_CS_GPIO, 1);
@@ -437,10 +437,10 @@ static struct sys_timer pca100_timer = {
};
MACHINE_START(PCA100, "phyCARD-i.MX27")
- .boot_params = MX27_PHYS_OFFSET + 0x100,
- .map_io = mx27_map_io,
- .init_irq = mx27_init_irq,
- .init_machine = pca100_init,
- .timer = &pca100_timer,
+ .boot_params = MX27_PHYS_OFFSET + 0x100,
+ .map_io = mx27_map_io,
+ .init_early = imx27_init_early,
+ .init_irq = mx27_init_irq,
+ .init_machine = pca100_init,
+ .timer = &pca100_timer,
MACHINE_END
-
diff --git a/arch/arm/mach-imx/mach-pcm038.c b/arch/arm/mach-imx/mach-pcm038.c
index 505614803bc6..e502e79d56b9 100644
--- a/arch/arm/mach-imx/mach-pcm038.c
+++ b/arch/arm/mach-imx/mach-pcm038.c
@@ -252,7 +252,7 @@ static struct regulator_init_data cam_data = {
.consumer_supplies = cam_consumers,
};
-static struct mc13783_regulator_init_data pcm038_regulators[] = {
+static struct mc13xxx_regulator_init_data pcm038_regulators[] = {
{
.id = MC13783_REG_VCAM,
.init_data = &cam_data,
@@ -262,11 +262,13 @@ static struct mc13783_regulator_init_data pcm038_regulators[] = {
},
};
-static struct mc13783_platform_data pcm038_pmic = {
- .regulators = pcm038_regulators,
- .num_regulators = ARRAY_SIZE(pcm038_regulators),
- .flags = MC13783_USE_ADC | MC13783_USE_REGULATOR |
- MC13783_USE_TOUCHSCREEN,
+static struct mc13xxx_platform_data pcm038_pmic = {
+ .regulators = {
+ .regulators = pcm038_regulators,
+ .num_regulators = ARRAY_SIZE(pcm038_regulators),
+ },
+ .flags = MC13XXX_USE_ADC | MC13XXX_USE_REGULATOR |
+ MC13XXX_USE_TOUCHSCREEN,
};
static struct spi_board_info pcm038_spi_board_info[] __initdata = {
@@ -281,9 +283,15 @@ static struct spi_board_info pcm038_spi_board_info[] __initdata = {
}
};
+static int pcm038_usbh2_init(struct platform_device *pdev)
+{
+ return mx27_initialize_usb_hw(pdev->id, MXC_EHCI_POWER_PINS_ENABLED |
+ MXC_EHCI_INTERFACE_DIFF_UNI);
+}
+
static const struct mxc_usbh_platform_data usbh2_pdata __initconst = {
+ .init = pcm038_usbh2_init,
.portsc = MXC_EHCI_MODE_ULPI,
- .flags = MXC_EHCI_POWER_PINS_ENABLED | MXC_EHCI_INTERFACE_DIFF_UNI,
};
static void __init pcm038_init(void)
@@ -340,9 +348,10 @@ static struct sys_timer pcm038_timer = {
};
MACHINE_START(PCM038, "phyCORE-i.MX27")
- .boot_params = MX27_PHYS_OFFSET + 0x100,
- .map_io = mx27_map_io,
- .init_irq = mx27_init_irq,
- .init_machine = pcm038_init,
- .timer = &pcm038_timer,
+ .boot_params = MX27_PHYS_OFFSET + 0x100,
+ .map_io = mx27_map_io,
+ .init_early = imx27_init_early,
+ .init_irq = mx27_init_irq,
+ .timer = &pcm038_timer,
+ .init_machine = pcm038_init,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-scb9328.c b/arch/arm/mach-imx/mach-scb9328.c
index eae878f306c6..dcaee043628e 100644
--- a/arch/arm/mach-imx/mach-scb9328.c
+++ b/arch/arm/mach-imx/mach-scb9328.c
@@ -145,10 +145,11 @@ static struct sys_timer scb9328_timer = {
};
MACHINE_START(SCB9328, "Synertronixx scb9328")
- /* Sascha Hauer */
- .boot_params = 0x08000100,
- .map_io = mx1_map_io,
- .init_irq = mx1_init_irq,
- .timer = &scb9328_timer,
- .init_machine = scb9328_init,
+ /* Sascha Hauer */
+ .boot_params = 0x08000100,
+ .map_io = mx1_map_io,
+ .init_early = imx1_init_early,
+ .init_irq = mx1_init_irq,
+ .timer = &scb9328_timer,
+ .init_machine = scb9328_init,
MACHINE_END
diff --git a/arch/arm/mach-imx/mm-imx1.c b/arch/arm/mach-imx/mm-imx1.c
index 729ae0915af8..378c61b1e74c 100644
--- a/arch/arm/mach-imx/mm-imx1.c
+++ b/arch/arm/mach-imx/mm-imx1.c
@@ -30,10 +30,13 @@ static struct map_desc imx_io_desc[] __initdata = {
void __init mx1_map_io(void)
{
+ iotable_init(imx_io_desc, ARRAY_SIZE(imx_io_desc));
+}
+
+void __init imx1_init_early(void)
+{
mxc_set_cpu_type(MXC_CPU_MX1);
mxc_arch_reset_init(MX1_IO_ADDRESS(MX1_WDT_BASE_ADDR));
-
- iotable_init(imx_io_desc, ARRAY_SIZE(imx_io_desc));
}
int imx1_register_gpios(void);
diff --git a/arch/arm/mach-imx/mm-imx21.c b/arch/arm/mach-imx/mm-imx21.c
index e728af81d1b1..b6152c6b5b29 100644
--- a/arch/arm/mach-imx/mm-imx21.c
+++ b/arch/arm/mach-imx/mm-imx21.c
@@ -56,10 +56,13 @@ static struct map_desc imx21_io_desc[] __initdata = {
*/
void __init mx21_map_io(void)
{
+ iotable_init(imx21_io_desc, ARRAY_SIZE(imx21_io_desc));
+}
+
+void __init imx21_init_early(void)
+{
mxc_set_cpu_type(MXC_CPU_MX21);
mxc_arch_reset_init(MX21_IO_ADDRESS(MX21_WDOG_BASE_ADDR));
-
- iotable_init(imx21_io_desc, ARRAY_SIZE(imx21_io_desc));
}
int imx21_register_gpios(void);
diff --git a/arch/arm/mach-imx/mm-imx25.c b/arch/arm/mach-imx/mm-imx25.c
index 2edec6ce8fe7..09dd8d4e15bb 100644
--- a/arch/arm/mach-imx/mm-imx25.c
+++ b/arch/arm/mach-imx/mm-imx25.c
@@ -45,11 +45,14 @@ static struct map_desc mx25_io_desc[] __initdata = {
*/
void __init mx25_map_io(void)
{
+ iotable_init(mx25_io_desc, ARRAY_SIZE(mx25_io_desc));
+}
+
+void __init imx25_init_early(void)
+{
mxc_set_cpu_type(MXC_CPU_MX25);
mxc_iomux_v3_init(MX25_IO_ADDRESS(MX25_IOMUXC_BASE_ADDR));
mxc_arch_reset_init(MX25_IO_ADDRESS(MX25_WDOG_BASE_ADDR));
-
- iotable_init(mx25_io_desc, ARRAY_SIZE(mx25_io_desc));
}
int imx25_register_gpios(void);
diff --git a/arch/arm/mach-imx/mm-imx27.c b/arch/arm/mach-imx/mm-imx27.c
index 374e48b7a412..bcaa3b69c456 100644
--- a/arch/arm/mach-imx/mm-imx27.c
+++ b/arch/arm/mach-imx/mm-imx27.c
@@ -56,10 +56,13 @@ static struct map_desc imx27_io_desc[] __initdata = {
*/
void __init mx27_map_io(void)
{
+ iotable_init(imx27_io_desc, ARRAY_SIZE(imx27_io_desc));
+}
+
+void __init imx27_init_early(void)
+{
mxc_set_cpu_type(MXC_CPU_MX27);
mxc_arch_reset_init(MX27_IO_ADDRESS(MX27_WDOG_BASE_ADDR));
-
- iotable_init(imx27_io_desc, ARRAY_SIZE(imx27_io_desc));
}
int imx27_register_gpios(void);
diff --git a/arch/arm/mach-integrator/Kconfig b/arch/arm/mach-integrator/Kconfig
index 769b0f10c834..d701d32a07f1 100644
--- a/arch/arm/mach-integrator/Kconfig
+++ b/arch/arm/mach-integrator/Kconfig
@@ -13,6 +13,7 @@ config ARCH_INTEGRATOR_CP
bool "Support Integrator/CP platform"
select ARCH_CINTEGRATOR
select ARM_TIMER_SP804
+ select PLAT_VERSATILE_CLCD
help
Include support for the ARM(R) Integrator CP platform.
diff --git a/arch/arm/mach-integrator/common.h b/arch/arm/mach-integrator/common.h
index 5f96e1518aa9..a08f9b0299df 100644
--- a/arch/arm/mach-integrator/common.h
+++ b/arch/arm/mach-integrator/common.h
@@ -1 +1,2 @@
+void integrator_init_early(void);
void integrator_reserve(void);
diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c
index b8e884b450da..77315b995681 100644
--- a/arch/arm/mach-integrator/core.c
+++ b/arch/arm/mach-integrator/core.c
@@ -144,12 +144,15 @@ static struct clk_lookup lookups[] = {
}
};
+void __init integrator_init_early(void)
+{
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+}
+
static int __init integrator_init(void)
{
int i;
- clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
struct amba_device *d = amba_devs[i];
amba_device_register(d, &iomem_resource);
diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c
index 5db574f8ae3f..8cbb75a96bd4 100644
--- a/arch/arm/mach-integrator/impd1.c
+++ b/arch/arm/mach-integrator/impd1.c
@@ -121,6 +121,7 @@ static struct clcd_panel vga = {
.height = -1,
.tim2 = TIM2_BCD | TIM2_IPC,
.cntl = CNTL_LCDTFT | CNTL_LCDVCOMP(1),
+ .caps = CLCD_CAP_5551,
.connector = IMPD1_CTRL_DISP_VGA,
.bpp = 16,
.grayscale = 0,
@@ -149,6 +150,7 @@ static struct clcd_panel svga = {
.tim2 = TIM2_BCD,
.cntl = CNTL_LCDTFT | CNTL_LCDVCOMP(1),
.connector = IMPD1_CTRL_DISP_VGA,
+ .caps = CLCD_CAP_5551,
.bpp = 16,
.grayscale = 0,
};
@@ -175,6 +177,7 @@ static struct clcd_panel prospector = {
.height = -1,
.tim2 = TIM2_BCD,
.cntl = CNTL_LCDTFT | CNTL_LCDVCOMP(1),
+ .caps = CLCD_CAP_5551,
.fixedtimings = 1,
.connector = IMPD1_CTRL_DISP_LCD,
.bpp = 16,
@@ -206,6 +209,7 @@ static struct clcd_panel ltm10c209 = {
.height = -1,
.tim2 = TIM2_BCD,
.cntl = CNTL_LCDTFT | CNTL_LCDVCOMP(1),
+ .caps = CLCD_CAP_5551,
.fixedtimings = 1,
.connector = IMPD1_CTRL_DISP_LCD,
.bpp = 16,
@@ -279,6 +283,7 @@ static void impd1fb_clcd_remove(struct clcd_fb *fb)
static struct clcd_board impd1_clcd_data = {
.name = "IM-PD/1",
+ .caps = CLCD_CAP_5551 | CLCD_CAP_888,
.check = clcdfb_check,
.decode = clcdfb_decode,
.disable = impd1fb_clcd_disable,
diff --git a/arch/arm/mach-integrator/include/mach/cm.h b/arch/arm/mach-integrator/include/mach/cm.h
index 1ab353e23595..445d57adb043 100644
--- a/arch/arm/mach-integrator/include/mach/cm.h
+++ b/arch/arm/mach-integrator/include/mach/cm.h
@@ -24,9 +24,9 @@ void cm_control(u32, u32);
#define CM_CTRL_LCDBIASDN (1 << 10)
#define CM_CTRL_LCDMUXSEL_MASK (7 << 11)
#define CM_CTRL_LCDMUXSEL_GENLCD (1 << 11)
-#define CM_CTRL_LCDMUXSEL_VGA_16BPP (2 << 11)
+#define CM_CTRL_LCDMUXSEL_VGA565_TFT555 (2 << 11)
#define CM_CTRL_LCDMUXSEL_SHARPLCD (3 << 11)
-#define CM_CTRL_LCDMUXSEL_VGA_8421BPP (4 << 11)
+#define CM_CTRL_LCDMUXSEL_VGA555_TFT555 (4 << 11)
#define CM_CTRL_LCDEN0 (1 << 14)
#define CM_CTRL_LCDEN1 (1 << 15)
#define CM_CTRL_STATIC1 (1 << 16)
diff --git a/arch/arm/mach-integrator/include/mach/memory.h b/arch/arm/mach-integrator/include/mach/memory.h
index 991f24d2c115..334d5e271889 100644
--- a/arch/arm/mach-integrator/include/mach/memory.h
+++ b/arch/arm/mach-integrator/include/mach/memory.h
@@ -23,7 +23,7 @@
/*
* Physical DRAM offset.
*/
-#define PHYS_OFFSET UL(0x00000000)
+#define PLAT_PHYS_OFFSET UL(0x00000000)
#define BUS_OFFSET UL(0x80000000)
#define __virt_to_bus(x) ((x) - PAGE_OFFSET + BUS_OFFSET)
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
index b666443b5cbb..980803ff348c 100644
--- a/arch/arm/mach-integrator/integrator_ap.c
+++ b/arch/arm/mach-integrator/integrator_ap.c
@@ -48,6 +48,8 @@
#include <asm/mach/map.h>
#include <asm/mach/time.h>
+#include <plat/fpga-irq.h>
+
#include "common.h"
/*
@@ -57,10 +59,10 @@
* Setup a VA for the Integrator interrupt controller (for header #0,
* just for now).
*/
-#define VA_IC_BASE IO_ADDRESS(INTEGRATOR_IC_BASE)
-#define VA_SC_BASE IO_ADDRESS(INTEGRATOR_SC_BASE)
-#define VA_EBI_BASE IO_ADDRESS(INTEGRATOR_EBI_BASE)
-#define VA_CMIC_BASE IO_ADDRESS(INTEGRATOR_HDR_IC)
+#define VA_IC_BASE __io_address(INTEGRATOR_IC_BASE)
+#define VA_SC_BASE __io_address(INTEGRATOR_SC_BASE)
+#define VA_EBI_BASE __io_address(INTEGRATOR_EBI_BASE)
+#define VA_CMIC_BASE __io_address(INTEGRATOR_HDR_IC)
/*
* Logical Physical
@@ -156,27 +158,14 @@ static void __init ap_map_io(void)
#define INTEGRATOR_SC_VALID_INT 0x003fffff
-static void sc_mask_irq(struct irq_data *d)
-{
- writel(1 << d->irq, VA_IC_BASE + IRQ_ENABLE_CLEAR);
-}
-
-static void sc_unmask_irq(struct irq_data *d)
-{
- writel(1 << d->irq, VA_IC_BASE + IRQ_ENABLE_SET);
-}
-
-static struct irq_chip sc_chip = {
- .name = "SC",
- .irq_ack = sc_mask_irq,
- .irq_mask = sc_mask_irq,
- .irq_unmask = sc_unmask_irq,
+static struct fpga_irq_data sc_irq_data = {
+ .base = VA_IC_BASE,
+ .irq_start = 0,
+ .chip.name = "SC",
};
static void __init ap_init_irq(void)
{
- unsigned int i;
-
/* Disable all interrupts initially. */
/* Do the core module ones */
writel(-1, VA_CMIC_BASE + IRQ_ENABLE_CLEAR);
@@ -185,13 +174,7 @@ static void __init ap_init_irq(void)
writel(-1, VA_IC_BASE + IRQ_ENABLE_CLEAR);
writel(-1, VA_IC_BASE + FIQ_ENABLE_CLEAR);
- for (i = 0; i < NR_IRQS; i++) {
- if (((1 << i) & INTEGRATOR_SC_VALID_INT) != 0) {
- set_irq_chip(i, &sc_chip);
- set_irq_handler(i, handle_level_irq);
- set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
- }
- }
+ fpga_irq_init(-1, INTEGRATOR_SC_VALID_INT, &sc_irq_data);
}
#ifdef CONFIG_PM
@@ -282,7 +265,7 @@ static void ap_flash_exit(void)
static void ap_flash_set_vpp(int on)
{
- unsigned long reg = on ? SC_CTRLS : SC_CTRLC;
+ void __iomem *reg = on ? SC_CTRLS : SC_CTRLC;
writel(INTEGRATOR_SC_CTRL_nFLVPPEN, reg);
}
@@ -499,8 +482,9 @@ static struct sys_timer ap_timer = {
MACHINE_START(INTEGRATOR, "ARM-Integrator")
/* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */
.boot_params = 0x00000100,
- .map_io = ap_map_io,
.reserve = integrator_reserve,
+ .map_io = ap_map_io,
+ .init_early = integrator_init_early,
.init_irq = ap_init_irq,
.timer = &ap_timer,
.init_machine = ap_init,
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c
index e9327da1382e..9e3ce26023e8 100644
--- a/arch/arm/mach-integrator/integrator_cp.c
+++ b/arch/arm/mach-integrator/integrator_cp.c
@@ -42,6 +42,10 @@
#include <asm/hardware/timer-sp.h>
+#include <plat/clcd.h>
+#include <plat/fpga-irq.h>
+#include <plat/sched_clock.h>
+
#include "common.h"
#define INTCP_PA_FLASH_BASE 0x24000000
@@ -49,9 +53,9 @@
#define INTCP_PA_CLCD_BASE 0xc0000000
-#define INTCP_VA_CIC_BASE IO_ADDRESS(INTEGRATOR_HDR_BASE + 0x40)
-#define INTCP_VA_PIC_BASE IO_ADDRESS(INTEGRATOR_IC_BASE)
-#define INTCP_VA_SIC_BASE IO_ADDRESS(INTEGRATOR_CP_SIC_BASE)
+#define INTCP_VA_CIC_BASE __io_address(INTEGRATOR_HDR_BASE + 0x40)
+#define INTCP_VA_PIC_BASE __io_address(INTEGRATOR_IC_BASE)
+#define INTCP_VA_SIC_BASE __io_address(INTEGRATOR_CP_SIC_BASE)
#define INTCP_ETH_SIZE 0x10
@@ -139,129 +143,48 @@ static void __init intcp_map_io(void)
iotable_init(intcp_io_desc, ARRAY_SIZE(intcp_io_desc));
}
-#define cic_writel __raw_writel
-#define cic_readl __raw_readl
-#define pic_writel __raw_writel
-#define pic_readl __raw_readl
-#define sic_writel __raw_writel
-#define sic_readl __raw_readl
-
-static void cic_mask_irq(struct irq_data *d)
-{
- unsigned int irq = d->irq - IRQ_CIC_START;
- cic_writel(1 << irq, INTCP_VA_CIC_BASE + IRQ_ENABLE_CLEAR);
-}
-
-static void cic_unmask_irq(struct irq_data *d)
-{
- unsigned int irq = d->irq - IRQ_CIC_START;
- cic_writel(1 << irq, INTCP_VA_CIC_BASE + IRQ_ENABLE_SET);
-}
-
-static struct irq_chip cic_chip = {
- .name = "CIC",
- .irq_ack = cic_mask_irq,
- .irq_mask = cic_mask_irq,
- .irq_unmask = cic_unmask_irq,
+static struct fpga_irq_data cic_irq_data = {
+ .base = INTCP_VA_CIC_BASE,
+ .irq_start = IRQ_CIC_START,
+ .chip.name = "CIC",
};
-static void pic_mask_irq(struct irq_data *d)
-{
- unsigned int irq = d->irq - IRQ_PIC_START;
- pic_writel(1 << irq, INTCP_VA_PIC_BASE + IRQ_ENABLE_CLEAR);
-}
-
-static void pic_unmask_irq(struct irq_data *d)
-{
- unsigned int irq = d->irq - IRQ_PIC_START;
- pic_writel(1 << irq, INTCP_VA_PIC_BASE + IRQ_ENABLE_SET);
-}
-
-static struct irq_chip pic_chip = {
- .name = "PIC",
- .irq_ack = pic_mask_irq,
- .irq_mask = pic_mask_irq,
- .irq_unmask = pic_unmask_irq,
+static struct fpga_irq_data pic_irq_data = {
+ .base = INTCP_VA_PIC_BASE,
+ .irq_start = IRQ_PIC_START,
+ .chip.name = "PIC",
};
-static void sic_mask_irq(struct irq_data *d)
-{
- unsigned int irq = d->irq - IRQ_SIC_START;
- sic_writel(1 << irq, INTCP_VA_SIC_BASE + IRQ_ENABLE_CLEAR);
-}
-
-static void sic_unmask_irq(struct irq_data *d)
-{
- unsigned int irq = d->irq - IRQ_SIC_START;
- sic_writel(1 << irq, INTCP_VA_SIC_BASE + IRQ_ENABLE_SET);
-}
-
-static struct irq_chip sic_chip = {
- .name = "SIC",
- .irq_ack = sic_mask_irq,
- .irq_mask = sic_mask_irq,
- .irq_unmask = sic_unmask_irq,
+static struct fpga_irq_data sic_irq_data = {
+ .base = INTCP_VA_SIC_BASE,
+ .irq_start = IRQ_SIC_START,
+ .chip.name = "SIC",
};
-static void
-sic_handle_irq(unsigned int irq, struct irq_desc *desc)
-{
- unsigned long status = sic_readl(INTCP_VA_SIC_BASE + IRQ_STATUS);
-
- if (status == 0) {
- do_bad_IRQ(irq, desc);
- return;
- }
-
- do {
- irq = ffs(status) - 1;
- status &= ~(1 << irq);
-
- irq += IRQ_SIC_START;
-
- generic_handle_irq(irq);
- } while (status);
-}
-
static void __init intcp_init_irq(void)
{
- unsigned int i;
+ u32 pic_mask, sic_mask;
+
+ pic_mask = ~((~0u) << (11 - IRQ_PIC_START));
+ pic_mask |= (~((~0u) << (29 - 22))) << 22;
+ sic_mask = ~((~0u) << (1 + IRQ_SIC_END - IRQ_SIC_START));
/*
* Disable all interrupt sources
*/
- pic_writel(0xffffffff, INTCP_VA_PIC_BASE + IRQ_ENABLE_CLEAR);
- pic_writel(0xffffffff, INTCP_VA_PIC_BASE + FIQ_ENABLE_CLEAR);
-
- for (i = IRQ_PIC_START; i <= IRQ_PIC_END; i++) {
- if (i == 11)
- i = 22;
- if (i == 29)
- break;
- set_irq_chip(i, &pic_chip);
- set_irq_handler(i, handle_level_irq);
- set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
- }
+ writel(0xffffffff, INTCP_VA_PIC_BASE + IRQ_ENABLE_CLEAR);
+ writel(0xffffffff, INTCP_VA_PIC_BASE + FIQ_ENABLE_CLEAR);
+ writel(0xffffffff, INTCP_VA_CIC_BASE + IRQ_ENABLE_CLEAR);
+ writel(0xffffffff, INTCP_VA_CIC_BASE + FIQ_ENABLE_CLEAR);
+ writel(sic_mask, INTCP_VA_SIC_BASE + IRQ_ENABLE_CLEAR);
+ writel(sic_mask, INTCP_VA_SIC_BASE + FIQ_ENABLE_CLEAR);
- cic_writel(0xffffffff, INTCP_VA_CIC_BASE + IRQ_ENABLE_CLEAR);
- cic_writel(0xffffffff, INTCP_VA_CIC_BASE + FIQ_ENABLE_CLEAR);
+ fpga_irq_init(-1, pic_mask, &pic_irq_data);
- for (i = IRQ_CIC_START; i <= IRQ_CIC_END; i++) {
- set_irq_chip(i, &cic_chip);
- set_irq_handler(i, handle_level_irq);
- set_irq_flags(i, IRQF_VALID);
- }
-
- sic_writel(0x00000fff, INTCP_VA_SIC_BASE + IRQ_ENABLE_CLEAR);
- sic_writel(0x00000fff, INTCP_VA_SIC_BASE + FIQ_ENABLE_CLEAR);
-
- for (i = IRQ_SIC_START; i <= IRQ_SIC_END; i++) {
- set_irq_chip(i, &sic_chip);
- set_irq_handler(i, handle_level_irq);
- set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
- }
+ fpga_irq_init(-1, ~((~0u) << (1 + IRQ_CIC_END - IRQ_CIC_START)),
+ &cic_irq_data);
- set_irq_chained_handler(IRQ_CP_CPPLDINT, sic_handle_irq);
+ fpga_irq_init(IRQ_CP_CPPLDINT, sic_mask, &sic_irq_data);
}
/*
@@ -449,43 +372,21 @@ static struct amba_device aaci_device = {
/*
* CLCD support
*/
-static struct clcd_panel vga = {
- .mode = {
- .name = "VGA",
- .refresh = 60,
- .xres = 640,
- .yres = 480,
- .pixclock = 39721,
- .left_margin = 40,
- .right_margin = 24,
- .upper_margin = 32,
- .lower_margin = 11,
- .hsync_len = 96,
- .vsync_len = 2,
- .sync = 0,
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = TIM2_BCD | TIM2_IPC,
- .cntl = CNTL_LCDTFT | CNTL_LCDVCOMP(1),
- .bpp = 16,
- .grayscale = 0,
-};
-
/*
* Ensure VGA is selected.
*/
static void cp_clcd_enable(struct clcd_fb *fb)
{
- u32 val;
+ struct fb_var_screeninfo *var = &fb->fb.var;
+ u32 val = CM_CTRL_STATIC1 | CM_CTRL_STATIC2;
- if (fb->fb.var.bits_per_pixel <= 8)
- val = CM_CTRL_LCDMUXSEL_VGA_8421BPP;
+ if (var->bits_per_pixel <= 8 ||
+ (var->bits_per_pixel == 16 && var->green.length == 5))
+ /* Pseudocolor, RGB555, BGR555 */
+ val |= CM_CTRL_LCDMUXSEL_VGA555_TFT555;
else if (fb->fb.var.bits_per_pixel <= 16)
- val = CM_CTRL_LCDMUXSEL_VGA_16BPP
- | CM_CTRL_LCDEN0 | CM_CTRL_LCDEN1
- | CM_CTRL_STATIC1 | CM_CTRL_STATIC2;
+ /* truecolor RGB565 */
+ val |= CM_CTRL_LCDMUXSEL_VGA565_TFT555;
else
val = 0; /* no idea for this, don't trust the docs */
@@ -498,49 +399,24 @@ static void cp_clcd_enable(struct clcd_fb *fb)
CM_CTRL_n24BITEN, val);
}
-static unsigned long framesize = SZ_1M;
-
static int cp_clcd_setup(struct clcd_fb *fb)
{
- dma_addr_t dma;
-
- fb->panel = &vga;
-
- fb->fb.screen_base = dma_alloc_writecombine(&fb->dev->dev, framesize,
- &dma, GFP_KERNEL);
- if (!fb->fb.screen_base) {
- printk(KERN_ERR "CLCD: unable to map framebuffer\n");
- return -ENOMEM;
- }
-
- fb->fb.fix.smem_start = dma;
- fb->fb.fix.smem_len = framesize;
-
- return 0;
-}
-
-static int cp_clcd_mmap(struct clcd_fb *fb, struct vm_area_struct *vma)
-{
- return dma_mmap_writecombine(&fb->dev->dev, vma,
- fb->fb.screen_base,
- fb->fb.fix.smem_start,
- fb->fb.fix.smem_len);
-}
+ fb->panel = versatile_clcd_get_panel("VGA");
+ if (!fb->panel)
+ return -EINVAL;
-static void cp_clcd_remove(struct clcd_fb *fb)
-{
- dma_free_writecombine(&fb->dev->dev, fb->fb.fix.smem_len,
- fb->fb.screen_base, fb->fb.fix.smem_start);
+ return versatile_clcd_setup_dma(fb, SZ_1M);
}
static struct clcd_board clcd_data = {
.name = "Integrator/CP",
+ .caps = CLCD_CAP_5551 | CLCD_CAP_RGB565 | CLCD_CAP_888,
.check = clcdfb_check,
.decode = clcdfb_decode,
.enable = cp_clcd_enable,
.setup = cp_clcd_setup,
- .mmap = cp_clcd_mmap,
- .remove = cp_clcd_remove,
+ .mmap = versatile_clcd_mmap_dma,
+ .remove = versatile_clcd_remove_dma,
};
static struct amba_device clcd_device = {
@@ -565,11 +441,23 @@ static struct amba_device *amba_devs[] __initdata = {
&clcd_device,
};
+#define REFCOUNTER (__io_address(INTEGRATOR_HDR_BASE) + 0x28)
+
+static void __init intcp_init_early(void)
+{
+ clkdev_add_table(cp_lookups, ARRAY_SIZE(cp_lookups));
+
+ integrator_init_early();
+
+#ifdef CONFIG_PLAT_VERSATILE_SCHED_CLOCK
+ versatile_sched_clock_init(REFCOUNTER, 24000000);
+#endif
+}
+
static void __init intcp_init(void)
{
int i;
- clkdev_add_table(cp_lookups, ARRAY_SIZE(cp_lookups));
platform_add_devices(intcp_devs, ARRAY_SIZE(intcp_devs));
for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
@@ -599,8 +487,9 @@ static struct sys_timer cp_timer = {
MACHINE_START(CINTEGRATOR, "ARM-IntegratorCP")
/* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */
.boot_params = 0x00000100,
- .map_io = intcp_map_io,
.reserve = integrator_reserve,
+ .map_io = intcp_map_io,
+ .init_early = intcp_init_early,
.init_irq = intcp_init_irq,
.timer = &cp_timer,
.init_machine = intcp_init,
diff --git a/arch/arm/mach-iop13xx/include/mach/memory.h b/arch/arm/mach-iop13xx/include/mach/memory.h
index 3ad455318868..1afa99ef97fa 100644
--- a/arch/arm/mach-iop13xx/include/mach/memory.h
+++ b/arch/arm/mach-iop13xx/include/mach/memory.h
@@ -6,7 +6,7 @@
/*
* Physical DRAM offset.
*/
-#define PHYS_OFFSET UL(0x00000000)
+#define PLAT_PHYS_OFFSET UL(0x00000000)
#ifndef __ASSEMBLY__
diff --git a/arch/arm/mach-iop32x/include/mach/memory.h b/arch/arm/mach-iop32x/include/mach/memory.h
index c30f6450ad50..169cc239f76c 100644
--- a/arch/arm/mach-iop32x/include/mach/memory.h
+++ b/arch/arm/mach-iop32x/include/mach/memory.h
@@ -8,6 +8,6 @@
/*
* Physical DRAM offset.
*/
-#define PHYS_OFFSET UL(0xa0000000)
+#define PLAT_PHYS_OFFSET UL(0xa0000000)
#endif
diff --git a/arch/arm/mach-iop33x/include/mach/memory.h b/arch/arm/mach-iop33x/include/mach/memory.h
index a30a96aa6d2d..8e1daf7006b6 100644
--- a/arch/arm/mach-iop33x/include/mach/memory.h
+++ b/arch/arm/mach-iop33x/include/mach/memory.h
@@ -8,6 +8,6 @@
/*
* Physical DRAM offset.
*/
-#define PHYS_OFFSET UL(0x00000000)
+#define PLAT_PHYS_OFFSET UL(0x00000000)
#endif
diff --git a/arch/arm/mach-ixp2000/include/mach/memory.h b/arch/arm/mach-ixp2000/include/mach/memory.h
index 98e3471be15b..5f0c4fd4076a 100644
--- a/arch/arm/mach-ixp2000/include/mach/memory.h
+++ b/arch/arm/mach-ixp2000/include/mach/memory.h
@@ -13,7 +13,7 @@
#ifndef __ASM_ARCH_MEMORY_H
#define __ASM_ARCH_MEMORY_H
-#define PHYS_OFFSET UL(0x00000000)
+#define PLAT_PHYS_OFFSET UL(0x00000000)
#include <mach/ixp2000-regs.h>
diff --git a/arch/arm/mach-ixp23xx/include/mach/memory.h b/arch/arm/mach-ixp23xx/include/mach/memory.h
index 6ef65d813f16..6cf0704e946a 100644
--- a/arch/arm/mach-ixp23xx/include/mach/memory.h
+++ b/arch/arm/mach-ixp23xx/include/mach/memory.h
@@ -17,7 +17,7 @@
/*
* Physical DRAM offset.
*/
-#define PHYS_OFFSET (0x00000000)
+#define PLAT_PHYS_OFFSET (0x00000000)
#define IXP23XX_PCI_SDRAM_OFFSET (*((volatile int *)IXP23XX_PCI_SDRAM_BAR) & 0xfffffff0)
diff --git a/arch/arm/mach-ixp4xx/include/mach/memory.h b/arch/arm/mach-ixp4xx/include/mach/memory.h
index 0136eaa29224..6d388c9d0e20 100644
--- a/arch/arm/mach-ixp4xx/include/mach/memory.h
+++ b/arch/arm/mach-ixp4xx/include/mach/memory.h
@@ -12,7 +12,7 @@
/*
* Physical DRAM offset.
*/
-#define PHYS_OFFSET UL(0x00000000)
+#define PLAT_PHYS_OFFSET UL(0x00000000)
#if !defined(__ASSEMBLY__) && defined(CONFIG_PCI)
diff --git a/arch/arm/mach-kirkwood/include/mach/memory.h b/arch/arm/mach-kirkwood/include/mach/memory.h
index 45431e131465..4600b44e3ad3 100644
--- a/arch/arm/mach-kirkwood/include/mach/memory.h
+++ b/arch/arm/mach-kirkwood/include/mach/memory.h
@@ -5,6 +5,6 @@
#ifndef __ASM_ARCH_MEMORY_H
#define __ASM_ARCH_MEMORY_H
-#define PHYS_OFFSET UL(0x00000000)
+#define PLAT_PHYS_OFFSET UL(0x00000000)
#endif
diff --git a/arch/arm/mach-ks8695/include/mach/memory.h b/arch/arm/mach-ks8695/include/mach/memory.h
index bace9a681adc..f7e1b9bce345 100644
--- a/arch/arm/mach-ks8695/include/mach/memory.h
+++ b/arch/arm/mach-ks8695/include/mach/memory.h
@@ -18,7 +18,7 @@
/*
* Physical SRAM offset.
*/
-#define PHYS_OFFSET KS8695_SDRAM_PA
+#define PLAT_PHYS_OFFSET KS8695_SDRAM_PA
#ifndef __ASSEMBLY__
diff --git a/arch/arm/mach-lh7a40x/Kconfig b/arch/arm/mach-lh7a40x/Kconfig
deleted file mode 100644
index 9be7466e346c..000000000000
--- a/arch/arm/mach-lh7a40x/Kconfig
+++ /dev/null
@@ -1,74 +0,0 @@
-if ARCH_LH7A40X
-
-menu "LH7A40X Implementations"
-
-config MACH_KEV7A400
- bool "KEV7A400"
- select ARCH_LH7A400
- help
- Say Y here if you are using the Sharp KEV7A400 development
- board. This hardware is discontinued, so I'd be very
- surprised if you wanted this option.
-
-config MACH_LPD7A400
- bool "LPD7A400 Card Engine"
- select ARCH_LH7A400
-# select IDE_POLL
-# select HAS_TOUCHSCREEN_ADS7843_LH7
- help
- Say Y here if you are using Logic Product Development's
- LPD7A400 CardEngine. For the time being, the LPD7A400 and
- LPD7A404 options are mutually exclusive.
-
-config MACH_LPD7A404
- bool "LPD7A404 Card Engine"
- select ARCH_LH7A404
-# select IDE_POLL
-# select HAS_TOUCHSCREEN_ADC_LH7
- help
- Say Y here if you are using Logic Product Development's
- LPD7A404 CardEngine. For the time being, the LPD7A400 and
- LPD7A404 options are mutually exclusive.
-
-config ARCH_LH7A400
- bool
-
-config ARCH_LH7A404
- bool
-
-config LPD7A40X_CPLD_SSP
- bool
-
-config LH7A40X_CONTIGMEM
- bool "Disable NUMA/SparseMEM Support"
- help
- Say Y here if your bootloader sets the SROMLL bit(s) in
- the SDRAM controller, organizing memory as a contiguous
- array. This option will disable sparse memory support
- and force the kernel to manage all memory in one node.
-
- Setting this option incorrectly may prevent the kernel
- from booting. It is OK to leave it N.
-
- For more information, consult
- <file:Documentation/arm/Sharp-LH/SDRAM>.
-
-config LH7A40X_ONE_BANK_PER_NODE
- bool "Optimize NUMA Node Tables for Size"
- depends on !LH7A40X_CONTIGMEM
- help
- Say Y here to produce compact memory node tables. By
- default pairs of adjacent physical RAM banks are managed
- together in a single node, incurring some wasted overhead
- in the node tables, however also maintaining compatibility
- with systems where physical memory is truly contiguous.
-
- Setting this option incorrectly may prevent the kernel from
- booting. It is OK to leave it N.
-
- For more information, consult
- <file:Documentation/arm/Sharp-LH/SDRAM>.
-
-endmenu
-
-endif
diff --git a/arch/arm/mach-lh7a40x/Makefile b/arch/arm/mach-lh7a40x/Makefile
deleted file mode 100644
index 94b8615fb3c3..000000000000
--- a/arch/arm/mach-lh7a40x/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-# Object file lists.
-
-obj-y := time.o clocks.o
-obj-m :=
-obj-n :=
-obj- :=
-
-obj-$(CONFIG_MACH_KEV7A400) += arch-kev7a400.o irq-lh7a400.o
-obj-$(CONFIG_MACH_LPD7A400) += arch-lpd7a40x.o irq-lh7a400.o
-obj-$(CONFIG_MACH_LPD7A404) += arch-lpd7a40x.o irq-lh7a404.o
-obj-$(CONFIG_LPD7A40X_CPLD_SSP) += ssp-cpld.o
-obj-$(CONFIG_FB_ARMCLCD) += clcd.o
-
diff --git a/arch/arm/mach-lh7a40x/Makefile.boot b/arch/arm/mach-lh7a40x/Makefile.boot
deleted file mode 100644
index af941be076eb..000000000000
--- a/arch/arm/mach-lh7a40x/Makefile.boot
+++ /dev/null
@@ -1,4 +0,0 @@
- zreladdr-y := 0xc0008000
-params_phys-y := 0xc0000100
-initrd_phys-y := 0xc4000000
-
diff --git a/arch/arm/mach-lh7a40x/arch-kev7a400.c b/arch/arm/mach-lh7a40x/arch-kev7a400.c
deleted file mode 100644
index 71129c33c7d2..000000000000
--- a/arch/arm/mach-lh7a40x/arch-kev7a400.c
+++ /dev/null
@@ -1,118 +0,0 @@
-/* arch/arm/mach-lh7a40x/arch-kev7a400.c
- *
- * Copyright (C) 2004 Logic Product Development
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- */
-
-#include <linux/tty.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/interrupt.h>
-
-#include <mach/hardware.h>
-#include <asm/setup.h>
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/irq.h>
-#include <asm/mach/irq.h>
-#include <asm/mach/map.h>
-
-#include "common.h"
-
- /* This function calls the board specific IRQ initialization function. */
-
-static struct map_desc kev7a400_io_desc[] __initdata = {
- {
- .virtual = IO_VIRT,
- .pfn = __phys_to_pfn(IO_PHYS),
- .length = IO_SIZE,
- .type = MT_DEVICE
- }, {
- .virtual = CPLD_VIRT,
- .pfn = __phys_to_pfn(CPLD_PHYS),
- .length = CPLD_SIZE,
- .type = MT_DEVICE
- }
-};
-
-void __init kev7a400_map_io(void)
-{
- iotable_init (kev7a400_io_desc, ARRAY_SIZE (kev7a400_io_desc));
-}
-
-static u16 CPLD_IRQ_mask; /* Mask for CPLD IRQs, 1 == unmasked */
-
-static void kev7a400_ack_cpld_irq(struct irq_data *d)
-{
- CPLD_CL_INT = 1 << (d->irq - IRQ_KEV7A400_CPLD);
-}
-
-static void kev7a400_mask_cpld_irq(struct irq_data *d)
-{
- CPLD_IRQ_mask &= ~(1 << (d->irq - IRQ_KEV7A400_CPLD));
- CPLD_WR_PB_INT_MASK = CPLD_IRQ_mask;
-}
-
-static void kev7a400_unmask_cpld_irq(struct irq_data *d)
-{
- CPLD_IRQ_mask |= 1 << (d->irq - IRQ_KEV7A400_CPLD);
- CPLD_WR_PB_INT_MASK = CPLD_IRQ_mask;
-}
-
-static struct irq_chip kev7a400_cpld_chip = {
- .name = "CPLD",
- .irq_ack = kev7a400_ack_cpld_irq,
- .irq_mask = kev7a400_mask_cpld_irq,
- .irq_unmask = kev7a400_unmask_cpld_irq,
-};
-
-
-static void kev7a400_cpld_handler (unsigned int irq, struct irq_desc *desc)
-{
- u32 mask = CPLD_LATCHED_INTS;
- irq = IRQ_KEV7A400_CPLD;
- for (; mask; mask >>= 1, ++irq)
- if (mask & 1)
- generic_handle_irq(irq);
-}
-
-void __init lh7a40x_init_board_irq (void)
-{
- int irq;
-
- for (irq = IRQ_KEV7A400_CPLD;
- irq < IRQ_KEV7A400_CPLD + NR_IRQ_BOARD; ++irq) {
- set_irq_chip (irq, &kev7a400_cpld_chip);
- set_irq_handler (irq, handle_edge_irq);
- set_irq_flags (irq, IRQF_VALID);
- }
- set_irq_chained_handler (IRQ_CPLD, kev7a400_cpld_handler);
-
- /* Clear all CPLD interrupts */
- CPLD_CL_INT = 0xff; /* CPLD_INTR_MMC_CD | CPLD_INTR_ETH_INT; */
-
- GPIO_GPIOINTEN = 0; /* Disable all GPIO interrupts */
- barrier();
-
-#if 0
- GPIO_INTTYPE1
- = (GPIO_INTR_PCC1_CD | GPIO_INTR_PCC1_CD); /* Edge trig. */
- GPIO_INTTYPE2 = 0; /* Falling edge & low-level */
- GPIO_GPIOFEOI = 0xff; /* Clear all GPIO interrupts */
- GPIO_GPIOINTEN = 0xff; /* Enable all GPIO interrupts */
-
- init_FIQ();
-#endif
-}
-
-MACHINE_START (KEV7A400, "Sharp KEV7a400")
- /* Maintainer: Marc Singer */
- .boot_params = 0xc0000100,
- .map_io = kev7a400_map_io,
- .init_irq = lh7a400_init_irq,
- .timer = &lh7a40x_timer,
-MACHINE_END
diff --git a/arch/arm/mach-lh7a40x/arch-lpd7a40x.c b/arch/arm/mach-lh7a40x/arch-lpd7a40x.c
deleted file mode 100644
index e735546181ad..000000000000
--- a/arch/arm/mach-lh7a40x/arch-lpd7a40x.c
+++ /dev/null
@@ -1,422 +0,0 @@
-/* arch/arm/mach-lh7a40x/arch-lpd7a40x.c
- *
- * Copyright (C) 2004 Logic Product Development
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- */
-
-#include <linux/tty.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-
-#include <mach/hardware.h>
-#include <asm/setup.h>
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/irq.h>
-#include <asm/mach/irq.h>
-#include <asm/mach/map.h>
-
-#include "common.h"
-
-#define CPLD_INT_NETHERNET (1<<0)
-#define CPLD_INTMASK_ETHERNET (1<<2)
-#if defined (CONFIG_MACH_LPD7A400)
-# define CPLD_INT_NTOUCH (1<<1)
-# define CPLD_INTMASK_TOUCH (1<<3)
-# define CPLD_INT_PEN (1<<4)
-# define CPLD_INTMASK_PEN (1<<4)
-# define CPLD_INT_PIRQ (1<<4)
-#endif
-#define CPLD_INTMASK_CPLD (1<<7)
-#define CPLD_INT_CPLD (1<<6)
-
-#define CPLD_CONTROL_SWINT (1<<7) /* Disable all CPLD IRQs */
-#define CPLD_CONTROL_OCMSK (1<<6) /* Mask USB1 connect IRQ */
-#define CPLD_CONTROL_PDRV (1<<5) /* PCC_nDRV high */
-#define CPLD_CONTROL_USB1C (1<<4) /* USB1 connect IRQ active */
-#define CPLD_CONTROL_USB1P (1<<3) /* USB1 power disable */
-#define CPLD_CONTROL_AWKP (1<<2) /* Auto-wakeup disabled */
-#define CPLD_CONTROL_LCD_ENABLE (1<<1) /* LCD Vee enable */
-#define CPLD_CONTROL_WRLAN_NENABLE (1<<0) /* SMC91x power disable */
-
-
-static struct resource smc91x_resources[] = {
- [0] = {
- .start = CPLD00_PHYS,
- .end = CPLD00_PHYS + CPLD00_SIZE - 1, /* Only needs 16B */
- .flags = IORESOURCE_MEM,
- },
-
- [1] = {
- .start = IRQ_LPD7A40X_ETH_INT,
- .end = IRQ_LPD7A40X_ETH_INT,
- .flags = IORESOURCE_IRQ,
- },
-
-};
-
-static struct platform_device smc91x_device = {
- .name = "smc91x",
- .id = 0,
- .num_resources = ARRAY_SIZE(smc91x_resources),
- .resource = smc91x_resources,
-};
-
-static struct resource lh7a40x_usbclient_resources[] = {
- [0] = {
- .start = USB_PHYS,
- .end = (USB_PHYS + PAGE_SIZE),
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_USB,
- .end = IRQ_USB,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static u64 lh7a40x_usbclient_dma_mask = 0xffffffffUL;
-
-static struct platform_device lh7a40x_usbclient_device = {
-// .name = "lh7a40x_udc",
- .name = "lh7-udc",
- .id = 0,
- .dev = {
- .dma_mask = &lh7a40x_usbclient_dma_mask,
- .coherent_dma_mask = 0xffffffffUL,
- },
- .num_resources = ARRAY_SIZE (lh7a40x_usbclient_resources),
- .resource = lh7a40x_usbclient_resources,
-};
-
-#if defined (CONFIG_ARCH_LH7A404)
-
-static struct resource lh7a404_usbhost_resources [] = {
- [0] = {
- .start = USBH_PHYS,
- .end = (USBH_PHYS + 0xFF),
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_USHINTR,
- .end = IRQ_USHINTR,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static u64 lh7a404_usbhost_dma_mask = 0xffffffffUL;
-
-static struct platform_device lh7a404_usbhost_device = {
- .name = "lh7a404-ohci",
- .id = 0,
- .dev = {
- .dma_mask = &lh7a404_usbhost_dma_mask,
- .coherent_dma_mask = 0xffffffffUL,
- },
- .num_resources = ARRAY_SIZE (lh7a404_usbhost_resources),
- .resource = lh7a404_usbhost_resources,
-};
-
-#endif
-
-static struct platform_device* lpd7a40x_devs[] __initdata = {
- &smc91x_device,
- &lh7a40x_usbclient_device,
-#if defined (CONFIG_ARCH_LH7A404)
- &lh7a404_usbhost_device,
-#endif
-};
-
-extern void lpd7a400_map_io (void);
-
-static void __init lpd7a40x_init (void)
-{
-#if defined (CONFIG_MACH_LPD7A400)
- CPLD_CONTROL |= 0
- | CPLD_CONTROL_SWINT /* Disable software interrupt */
- | CPLD_CONTROL_OCMSK; /* Mask USB1 connection IRQ */
- CPLD_CONTROL &= ~(0
- | CPLD_CONTROL_LCD_ENABLE /* Disable LCD */
- | CPLD_CONTROL_WRLAN_NENABLE /* Enable SMC91x */
- );
-#endif
-
-#if defined (CONFIG_MACH_LPD7A404)
- CPLD_CONTROL &= ~(0
- | CPLD_CONTROL_WRLAN_NENABLE /* Enable SMC91x */
- );
-#endif
-
- platform_add_devices (lpd7a40x_devs, ARRAY_SIZE (lpd7a40x_devs));
-#if defined (CONFIG_FB_ARMCLCD)
- lh7a40x_clcd_init ();
-#endif
-}
-
-static void lh7a40x_ack_cpld_irq(struct irq_data *d)
-{
- /* CPLD doesn't have ack capability, but some devices may */
-
-#if defined (CPLD_INTMASK_TOUCH)
- /* The touch control *must* mask the interrupt because the
- * interrupt bit is read by the driver to determine if the pen
- * is still down. */
- if (d->irq == IRQ_TOUCH)
- CPLD_INTERRUPTS |= CPLD_INTMASK_TOUCH;
-#endif
-}
-
-static void lh7a40x_mask_cpld_irq(struct irq_data *d)
-{
- switch (d->irq) {
- case IRQ_LPD7A40X_ETH_INT:
- CPLD_INTERRUPTS |= CPLD_INTMASK_ETHERNET;
- break;
-#if defined (IRQ_TOUCH)
- case IRQ_TOUCH:
- CPLD_INTERRUPTS |= CPLD_INTMASK_TOUCH;
- break;
-#endif
- }
-}
-
-static void lh7a40x_unmask_cpld_irq(struct irq_data *d)
-{
- switch (d->irq) {
- case IRQ_LPD7A40X_ETH_INT:
- CPLD_INTERRUPTS &= ~CPLD_INTMASK_ETHERNET;
- break;
-#if defined (IRQ_TOUCH)
- case IRQ_TOUCH:
- CPLD_INTERRUPTS &= ~CPLD_INTMASK_TOUCH;
- break;
-#endif
- }
-}
-
-static struct irq_chip lpd7a40x_cpld_chip = {
- .name = "CPLD",
- .irq_ack = lh7a40x_ack_cpld_irq,
- .irq_mask = lh7a40x_mask_cpld_irq,
- .irq_unmask = lh7a40x_unmask_cpld_irq,
-};
-
-static void lpd7a40x_cpld_handler (unsigned int irq, struct irq_desc *desc)
-{
- unsigned int mask = CPLD_INTERRUPTS;
-
- desc->irq_data.chip->irq_ack(&desc->irq_data);
-
- if ((mask & (1<<0)) == 0) /* WLAN */
- generic_handle_irq(IRQ_LPD7A40X_ETH_INT);
-
-#if defined (IRQ_TOUCH)
- if ((mask & (1<<1)) == 0) /* Touch */
- generic_handle_irq(IRQ_TOUCH);
-#endif
-
- /* Level-triggered need this */
- desc->irq_data.chip->irq_unmask(&desc->irq_data);
-}
-
-
-void __init lh7a40x_init_board_irq (void)
-{
- int irq;
-
- /* Rev A (v2.8): PF0, PF1, PF2, and PF3 are available IRQs.
- PF7 supports the CPLD.
- Rev B (v3.4): PF0, PF1, and PF2 are available IRQs.
- PF3 supports the CPLD.
- (Some) LPD7A404 prerelease boards report a version
- number of 0x16, but we force an override since the
- hardware is of the newer variety.
- */
-
- unsigned char cpld_version = CPLD_REVISION;
- int pinCPLD = (cpld_version == 0x28) ? 7 : 3;
-
-#if defined CONFIG_MACH_LPD7A404
- cpld_version = 0x34; /* Coerce LPD7A404 to RevB */
-#endif
-
- /* First, configure user controlled GPIOF interrupts */
-
- GPIO_PFDD &= ~0x0f; /* PF0-3 are inputs */
- GPIO_INTTYPE1 &= ~0x0f; /* PF0-3 are level triggered */
- GPIO_INTTYPE2 &= ~0x0f; /* PF0-3 are active low */
- barrier ();
- GPIO_GPIOFINTEN |= 0x0f; /* Enable PF0, PF1, PF2, and PF3 IRQs */
-
- /* Then, configure CPLD interrupt */
-
- /* Disable all CPLD interrupts */
-#if defined (CONFIG_MACH_LPD7A400)
- CPLD_INTERRUPTS = CPLD_INTMASK_TOUCH | CPLD_INTMASK_PEN
- | CPLD_INTMASK_ETHERNET;
- /* *** FIXME: don't know why we need 7 and 4. 7 is way wrong
- and 4 is uncefined. */
- // (1<<7)|(1<<4)|(1<<3)|(1<<2);
-#endif
-#if defined (CONFIG_MACH_LPD7A404)
- CPLD_INTERRUPTS = CPLD_INTMASK_ETHERNET;
- /* *** FIXME: don't know why we need 6 and 5, neither is defined. */
- // (1<<6)|(1<<5)|(1<<3);
-#endif
- GPIO_PFDD &= ~(1 << pinCPLD); /* Make input */
- GPIO_INTTYPE1 &= ~(1 << pinCPLD); /* Level triggered */
- GPIO_INTTYPE2 &= ~(1 << pinCPLD); /* Active low */
- barrier ();
- GPIO_GPIOFINTEN |= (1 << pinCPLD); /* Enable */
-
- /* Cascade CPLD interrupts */
-
- for (irq = IRQ_BOARD_START;
- irq < IRQ_BOARD_START + NR_IRQ_BOARD; ++irq) {
- set_irq_chip (irq, &lpd7a40x_cpld_chip);
- set_irq_handler (irq, handle_level_irq);
- set_irq_flags (irq, IRQF_VALID);
- }
-
- set_irq_chained_handler ((cpld_version == 0x28)
- ? IRQ_CPLD_V28
- : IRQ_CPLD_V34,
- lpd7a40x_cpld_handler);
-}
-
-static struct map_desc lpd7a40x_io_desc[] __initdata = {
- {
- .virtual = IO_VIRT,
- .pfn = __phys_to_pfn(IO_PHYS),
- .length = IO_SIZE,
- .type = MT_DEVICE
- },
- { /* Mapping added to work around chip select problems */
- .virtual = IOBARRIER_VIRT,
- .pfn = __phys_to_pfn(IOBARRIER_PHYS),
- .length = IOBARRIER_SIZE,
- .type = MT_DEVICE
- },
- {
- .virtual = CF_VIRT,
- .pfn = __phys_to_pfn(CF_PHYS),
- .length = CF_SIZE,
- .type = MT_DEVICE
- },
- {
- .virtual = CPLD02_VIRT,
- .pfn = __phys_to_pfn(CPLD02_PHYS),
- .length = CPLD02_SIZE,
- .type = MT_DEVICE
- },
- {
- .virtual = CPLD06_VIRT,
- .pfn = __phys_to_pfn(CPLD06_PHYS),
- .length = CPLD06_SIZE,
- .type = MT_DEVICE
- },
- {
- .virtual = CPLD08_VIRT,
- .pfn = __phys_to_pfn(CPLD08_PHYS),
- .length = CPLD08_SIZE,
- .type = MT_DEVICE
- },
- {
- .virtual = CPLD08_VIRT,
- .pfn = __phys_to_pfn(CPLD08_PHYS),
- .length = CPLD08_SIZE,
- .type = MT_DEVICE
- },
- {
- .virtual = CPLD0A_VIRT,
- .pfn = __phys_to_pfn(CPLD0A_PHYS),
- .length = CPLD0A_SIZE,
- .type = MT_DEVICE
- },
- {
- .virtual = CPLD0C_VIRT,
- .pfn = __phys_to_pfn(CPLD0C_PHYS),
- .length = CPLD0C_SIZE,
- .type = MT_DEVICE
- },
- {
- .virtual = CPLD0E_VIRT,
- .pfn = __phys_to_pfn(CPLD0E_PHYS),
- .length = CPLD0E_SIZE,
- .type = MT_DEVICE
- },
- {
- .virtual = CPLD10_VIRT,
- .pfn = __phys_to_pfn(CPLD10_PHYS),
- .length = CPLD10_SIZE,
- .type = MT_DEVICE
- },
- {
- .virtual = CPLD12_VIRT,
- .pfn = __phys_to_pfn(CPLD12_PHYS),
- .length = CPLD12_SIZE,
- .type = MT_DEVICE
- },
- {
- .virtual = CPLD14_VIRT,
- .pfn = __phys_to_pfn(CPLD14_PHYS),
- .length = CPLD14_SIZE,
- .type = MT_DEVICE
- },
- {
- .virtual = CPLD16_VIRT,
- .pfn = __phys_to_pfn(CPLD16_PHYS),
- .length = CPLD16_SIZE,
- .type = MT_DEVICE
- },
- {
- .virtual = CPLD18_VIRT,
- .pfn = __phys_to_pfn(CPLD18_PHYS),
- .length = CPLD18_SIZE,
- .type = MT_DEVICE
- },
- {
- .virtual = CPLD1A_VIRT,
- .pfn = __phys_to_pfn(CPLD1A_PHYS),
- .length = CPLD1A_SIZE,
- .type = MT_DEVICE
- },
-};
-
-void __init
-lpd7a40x_map_io(void)
-{
- iotable_init (lpd7a40x_io_desc, ARRAY_SIZE (lpd7a40x_io_desc));
-}
-
-#ifdef CONFIG_MACH_LPD7A400
-
-MACHINE_START (LPD7A400, "Logic Product Development LPD7A400-10")
- /* Maintainer: Marc Singer */
- .boot_params = 0xc0000100,
- .map_io = lpd7a40x_map_io,
- .init_irq = lh7a400_init_irq,
- .timer = &lh7a40x_timer,
- .init_machine = lpd7a40x_init,
-MACHINE_END
-
-#endif
-
-#ifdef CONFIG_MACH_LPD7A404
-
-MACHINE_START (LPD7A404, "Logic Product Development LPD7A404-10")
- /* Maintainer: Marc Singer */
- .boot_params = 0xc0000100,
- .map_io = lpd7a40x_map_io,
- .init_irq = lh7a404_init_irq,
- .timer = &lh7a40x_timer,
- .init_machine = lpd7a40x_init,
-MACHINE_END
-
-#endif
diff --git a/arch/arm/mach-lh7a40x/clcd.c b/arch/arm/mach-lh7a40x/clcd.c
deleted file mode 100644
index 7fe4fd347c82..000000000000
--- a/arch/arm/mach-lh7a40x/clcd.c
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * arch/arm/mach-lh7a40x/clcd.c
- *
- * Copyright (C) 2004 Marc Singer
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- */
-
-#include <linux/init.h>
-#include <linux/gfp.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/sysdev.h>
-#include <linux/interrupt.h>
-
-//#include <linux/module.h>
-//#include <linux/time.h>
-
-//#include <asm/mach/time.h>
-#include <asm/irq.h>
-#include <asm/mach/irq.h>
-
-#include <asm/system.h>
-#include <mach/hardware.h>
-#include <linux/amba/bus.h>
-#include <linux/amba/clcd.h>
-
-#define HRTFTC_HRSETUP __REG(HRTFTC_PHYS + 0x00)
-#define HRTFTC_HRCON __REG(HRTFTC_PHYS + 0x04)
-#define HRTFTC_HRTIMING1 __REG(HRTFTC_PHYS + 0x08)
-#define HRTFTC_HRTIMING2 __REG(HRTFTC_PHYS + 0x0c)
-
-#define ALI_SETUP __REG(ALI_PHYS + 0x00)
-#define ALI_CONTROL __REG(ALI_PHYS + 0x04)
-#define ALI_TIMING1 __REG(ALI_PHYS + 0x08)
-#define ALI_TIMING2 __REG(ALI_PHYS + 0x0c)
-
-#include "lcd-panel.h"
-
-static void lh7a40x_clcd_disable (struct clcd_fb *fb)
-{
-#if defined (CONFIG_MACH_LPD7A400)
- CPLD_CONTROL &= ~(1<<1); /* Disable LCD Vee */
-#endif
-
-#if defined (CONFIG_MACH_LPD7A404)
- GPIO_PCD &= ~(1<<3); /* Disable LCD Vee */
-#endif
-
-#if defined (CONFIG_ARCH_LH7A400)
- HRTFTC_HRSETUP &= ~(1<<13); /* Disable HRTFT controller */
-#endif
-
-#if defined (CONFIG_ARCH_LH7A404)
- ALI_SETUP &= ~(1<<13); /* Disable ALI */
-#endif
-}
-
-static void lh7a40x_clcd_enable (struct clcd_fb *fb)
-{
- struct clcd_panel_extra* extra
- = (struct clcd_panel_extra*) fb->board_data;
-
-#if defined (CONFIG_MACH_LPD7A400)
- CPLD_CONTROL |= (1<<1); /* Enable LCD Vee */
-#endif
-
-#if defined (CONFIG_MACH_LPD7A404)
- GPIO_PCDD &= ~(1<<3); /* Enable LCD Vee */
- GPIO_PCD |= (1<<3);
-#endif
-
-#if defined (CONFIG_ARCH_LH7A400)
-
- if (extra) {
- HRTFTC_HRSETUP
- = (1 << 13)
- | ((fb->fb.var.xres - 1) << 4)
- | 0xc
- | (extra->hrmode ? 1 : 0);
- HRTFTC_HRCON
- = ((extra->clsen ? 1 : 0) << 1)
- | ((extra->spsen ? 1 : 0) << 0);
- HRTFTC_HRTIMING1
- = (extra->pcdel << 8)
- | (extra->revdel << 4)
- | (extra->lpdel << 0);
- HRTFTC_HRTIMING2
- = (extra->spldel << 9)
- | (extra->pc2del << 0);
- }
- else
- HRTFTC_HRSETUP
- = (1 << 13)
- | 0xc;
-#endif
-
-#if defined (CONFIG_ARCH_LH7A404)
-
- if (extra) {
- ALI_SETUP
- = (1 << 13)
- | ((fb->fb.var.xres - 1) << 4)
- | 0xc
- | (extra->hrmode ? 1 : 0);
- ALI_CONTROL
- = ((extra->clsen ? 1 : 0) << 1)
- | ((extra->spsen ? 1 : 0) << 0);
- ALI_TIMING1
- = (extra->pcdel << 8)
- | (extra->revdel << 4)
- | (extra->lpdel << 0);
- ALI_TIMING2
- = (extra->spldel << 9)
- | (extra->pc2del << 0);
- }
- else
- ALI_SETUP
- = (1 << 13)
- | 0xc;
-#endif
-
-}
-
-#define FRAMESIZE(s) (((s) + PAGE_SIZE - 1)&PAGE_MASK)
-
-static int lh7a40x_clcd_setup (struct clcd_fb *fb)
-{
- dma_addr_t dma;
- u32 len = FRAMESIZE (lcd_panel.mode.xres*lcd_panel.mode.yres
- *(lcd_panel.bpp/8));
-
- fb->panel = &lcd_panel;
-
- /* Enforce the sync polarity defaults */
- if (!(fb->panel->tim2 & TIM2_IHS))
- fb->fb.var.sync |= FB_SYNC_HOR_HIGH_ACT;
- if (!(fb->panel->tim2 & TIM2_IVS))
- fb->fb.var.sync |= FB_SYNC_VERT_HIGH_ACT;
-
-#if defined (HAS_LCD_PANEL_EXTRA)
- fb->board_data = &lcd_panel_extra;
-#endif
-
- fb->fb.screen_base
- = dma_alloc_writecombine (&fb->dev->dev, len,
- &dma, GFP_KERNEL);
- printk ("CLCD: LCD setup fb virt 0x%p phys 0x%p l %x io 0x%p \n",
- fb->fb.screen_base, (void*) dma, len,
- (void*) io_p2v (CLCDC_PHYS));
- printk ("CLCD: pixclock %d\n", lcd_panel.mode.pixclock);
-
- if (!fb->fb.screen_base) {
- printk(KERN_ERR "CLCD: unable to map framebuffer\n");
- return -ENOMEM;
- }
-
-#if defined (USE_RGB555)
- fb->fb.var.green.length = 5; /* Panel uses RGB 5:5:5 */
-#endif
-
- fb->fb.fix.smem_start = dma;
- fb->fb.fix.smem_len = len;
-
- /* Drive PE4 high to prevent CPLD crash */
- GPIO_PEDD |= (1<<4);
- GPIO_PED |= (1<<4);
-
- GPIO_PINMUX |= (1<<1) | (1<<0); /* LCDVD[15:4] */
-
-// fb->fb.fbops->fb_check_var (&fb->fb.var, &fb->fb);
-// fb->fb.fbops->fb_set_par (&fb->fb);
-
- return 0;
-}
-
-static int lh7a40x_clcd_mmap (struct clcd_fb *fb, struct vm_area_struct *vma)
-{
- return dma_mmap_writecombine(&fb->dev->dev, vma,
- fb->fb.screen_base,
- fb->fb.fix.smem_start,
- fb->fb.fix.smem_len);
-}
-
-static void lh7a40x_clcd_remove (struct clcd_fb *fb)
-{
- dma_free_writecombine (&fb->dev->dev, fb->fb.fix.smem_len,
- fb->fb.screen_base, fb->fb.fix.smem_start);
-}
-
-static struct clcd_board clcd_platform_data = {
- .name = "lh7a40x FB",
- .check = clcdfb_check,
- .decode = clcdfb_decode,
- .enable = lh7a40x_clcd_enable,
- .setup = lh7a40x_clcd_setup,
- .mmap = lh7a40x_clcd_mmap,
- .remove = lh7a40x_clcd_remove,
- .disable = lh7a40x_clcd_disable,
-};
-
-#define IRQ_CLCDC (IRQ_LCDINTR)
-
-#define AMBA_DEVICE(name,busid,base,plat,pid) \
-static struct amba_device name##_device = { \
- .dev = { \
- .coherent_dma_mask = ~0, \
- .init_name = busid, \
- .platform_data = plat, \
- }, \
- .res = { \
- .start = base##_PHYS, \
- .end = (base##_PHYS) + (4*1024) - 1, \
- .flags = IORESOURCE_MEM, \
- }, \
- .dma_mask = ~0, \
- .irq = { IRQ_##base, }, \
- /* .dma = base##_DMA,*/ \
- .periphid = pid, \
-}
-
-AMBA_DEVICE(clcd, "cldc-lh7a40x", CLCDC, &clcd_platform_data, 0x41110);
-
-static struct amba_device *amba_devs[] __initdata = {
- &clcd_device,
-};
-
-void __init lh7a40x_clcd_init (void)
-{
- int i;
- int result;
- printk ("CLCD: registering amba devices\n");
- for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
- struct amba_device *d = amba_devs[i];
- result = amba_device_register(d, &iomem_resource);
- printk (" %d -> %d\n", i ,result);
- }
-}
diff --git a/arch/arm/mach-lh7a40x/clocks.c b/arch/arm/mach-lh7a40x/clocks.c
deleted file mode 100644
index 0651f96653f9..000000000000
--- a/arch/arm/mach-lh7a40x/clocks.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/* arch/arm/mach-lh7a40x/clocks.c
- *
- * Copyright (C) 2004 Marc Singer
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- */
-#include <mach/hardware.h>
-#include <mach/clocks.h>
-#include <linux/err.h>
-#include <linux/device.h>
-#include <linux/string.h>
-
-struct module;
-
-struct clk {
- struct list_head node;
- unsigned long rate;
- struct module *owner;
- const char *name;
-};
-
-/* ----- */
-
-#define MAINDIV1(c) (((c) >> 7) & 0x0f)
-#define MAINDIV2(c) (((c) >> 11) & 0x1f)
-#define PS(c) (((c) >> 18) & 0x03)
-#define PREDIV(c) (((c) >> 2) & 0x1f)
-#define HCLKDIV(c) (((c) >> 0) & 0x02)
-#define PCLKDIV(c) (((c) >> 16) & 0x03)
-
-unsigned int fclkfreq_get (void)
-{
- unsigned int clkset = CSC_CLKSET;
- unsigned int gclk
- = XTAL_IN
- / (1 << PS(clkset))
- * (MAINDIV1(clkset) + 2)
- / (PREDIV(clkset) + 2)
- * (MAINDIV2(clkset) + 2)
- ;
- return gclk;
-}
-
-unsigned int hclkfreq_get (void)
-{
- unsigned int clkset = CSC_CLKSET;
- unsigned int hclk = fclkfreq_get () / (HCLKDIV(clkset) + 1);
-
- return hclk;
-}
-
-unsigned int pclkfreq_get (void)
-{
- unsigned int clkset = CSC_CLKSET;
- int pclkdiv = PCLKDIV(clkset);
- unsigned int pclk;
- if (pclkdiv == 0x3)
- pclkdiv = 0x2;
- pclk = hclkfreq_get () / (1 << pclkdiv);
-
- return pclk;
-}
-
-/* ----- */
-
-struct clk *clk_get (struct device *dev, const char *id)
-{
- return dev && strcmp(dev_name(dev), "cldc-lh7a40x") == 0
- ? NULL : ERR_PTR(-ENOENT);
-}
-EXPORT_SYMBOL(clk_get);
-
-void clk_put (struct clk *clk)
-{
-}
-EXPORT_SYMBOL(clk_put);
-
-int clk_enable (struct clk *clk)
-{
- return 0;
-}
-EXPORT_SYMBOL(clk_enable);
-
-void clk_disable (struct clk *clk)
-{
-}
-EXPORT_SYMBOL(clk_disable);
-
-unsigned long clk_get_rate (struct clk *clk)
-{
- return 0;
-}
-EXPORT_SYMBOL(clk_get_rate);
-
-long clk_round_rate (struct clk *clk, unsigned long rate)
-{
- return rate;
-}
-EXPORT_SYMBOL(clk_round_rate);
-
-int clk_set_rate (struct clk *clk, unsigned long rate)
-{
- return -EIO;
-}
-EXPORT_SYMBOL(clk_set_rate);
diff --git a/arch/arm/mach-lh7a40x/common.h b/arch/arm/mach-lh7a40x/common.h
deleted file mode 100644
index 6ed3f6b6db76..000000000000
--- a/arch/arm/mach-lh7a40x/common.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* arch/arm/mach-lh7a40x/common.h
- *
- * Copyright (C) 2004 Marc Singer
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- */
-
-extern struct sys_timer lh7a40x_timer;
-
-extern void lh7a400_init_irq (void);
-extern void lh7a404_init_irq (void);
-extern void lh7a40x_clcd_init (void);
-extern void lh7a40x_init_board_irq (void);
-
diff --git a/arch/arm/mach-lh7a40x/include/mach/clocks.h b/arch/arm/mach-lh7a40x/include/mach/clocks.h
deleted file mode 100644
index fe2e0255c084..000000000000
--- a/arch/arm/mach-lh7a40x/include/mach/clocks.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* arch/arm/mach-lh7a40x/include/mach/clocks.h
- *
- * Copyright (C) 2004 Marc Singer
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- */
-
-#ifndef __ASM_ARCH_CLOCKS_H
-#define __ASM_ARCH_CLOCKS_H
-
-unsigned int fclkfreq_get (void);
-unsigned int hclkfreq_get (void);
-unsigned int pclkfreq_get (void);
-
-#endif /* _ASM_ARCH_CLOCKS_H */
diff --git a/arch/arm/mach-lh7a40x/include/mach/constants.h b/arch/arm/mach-lh7a40x/include/mach/constants.h
deleted file mode 100644
index 55c6edbc2dfd..000000000000
--- a/arch/arm/mach-lh7a40x/include/mach/constants.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/* arch/arm/mach-lh7a40x/include/mach/constants.h
- *
- * Copyright (C) 2004 Coastal Environmental Systems
- * Copyright (C) 2004 Logic Product Development
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- */
-
-#ifndef __ASM_ARCH_CONSTANTS_H
-#define __ASM_ARCH_CONSTANTS_H
-
-
-/* Addressing constants */
-
- /* SoC CPU IO addressing */
-#define IO_PHYS (0x80000000)
-#define IO_VIRT (0xf8000000)
-#define IO_SIZE (0x0000B000)
-
-#ifdef CONFIG_MACH_KEV7A400
-# define CPLD_PHYS (0x20000000)
-# define CPLD_VIRT (0xf2000000)
-# define CPLD_SIZE PAGE_SIZE
-#endif
-
-#if defined (CONFIG_MACH_LPD7A400) || defined (CONFIG_MACH_LPD7A404)
-
-# define IOBARRIER_PHYS 0x10000000 /* Second bank, fastest timing */
-# define IOBARRIER_VIRT 0xf0000000
-# define IOBARRIER_SIZE PAGE_SIZE
-
-# define CF_PHYS 0x60200000
-# define CF_VIRT 0xf6020000
-# define CF_SIZE (8*1024)
-
- /* The IO mappings for the LPD CPLD are, unfortunately, sparse. */
-# define CPLDX_PHYS(x) (0x70000000 | ((x) << 20))
-# define CPLDX_VIRT(x) (0xf7000000 | ((x) << 16))
-# define CPLD00_PHYS CPLDX_PHYS (0x00) /* Wired LAN */
-# define CPLD00_VIRT CPLDX_VIRT (0x00)
-# define CPLD00_SIZE PAGE_SIZE
-# define CPLD02_PHYS CPLDX_PHYS (0x02)
-# define CPLD02_VIRT CPLDX_VIRT (0x02)
-# define CPLD02_SIZE PAGE_SIZE
-# define CPLD06_PHYS CPLDX_PHYS (0x06)
-# define CPLD06_VIRT CPLDX_VIRT (0x06)
-# define CPLD06_SIZE PAGE_SIZE
-# define CPLD08_PHYS CPLDX_PHYS (0x08)
-# define CPLD08_VIRT CPLDX_VIRT (0x08)
-# define CPLD08_SIZE PAGE_SIZE
-# define CPLD0A_PHYS CPLDX_PHYS (0x0a)
-# define CPLD0A_VIRT CPLDX_VIRT (0x0a)
-# define CPLD0A_SIZE PAGE_SIZE
-# define CPLD0C_PHYS CPLDX_PHYS (0x0c)
-# define CPLD0C_VIRT CPLDX_VIRT (0x0c)
-# define CPLD0C_SIZE PAGE_SIZE
-# define CPLD0E_PHYS CPLDX_PHYS (0x0e)
-# define CPLD0E_VIRT CPLDX_VIRT (0x0e)
-# define CPLD0E_SIZE PAGE_SIZE
-# define CPLD10_PHYS CPLDX_PHYS (0x10)
-# define CPLD10_VIRT CPLDX_VIRT (0x10)
-# define CPLD10_SIZE PAGE_SIZE
-# define CPLD12_PHYS CPLDX_PHYS (0x12)
-# define CPLD12_VIRT CPLDX_VIRT (0x12)
-# define CPLD12_SIZE PAGE_SIZE
-# define CPLD14_PHYS CPLDX_PHYS (0x14)
-# define CPLD14_VIRT CPLDX_VIRT (0x14)
-# define CPLD14_SIZE PAGE_SIZE
-# define CPLD16_PHYS CPLDX_PHYS (0x16)
-# define CPLD16_VIRT CPLDX_VIRT (0x16)
-# define CPLD16_SIZE PAGE_SIZE
-# define CPLD18_PHYS CPLDX_PHYS (0x18)
-# define CPLD18_VIRT CPLDX_VIRT (0x18)
-# define CPLD18_SIZE PAGE_SIZE
-# define CPLD1A_PHYS CPLDX_PHYS (0x1a)
-# define CPLD1A_VIRT CPLDX_VIRT (0x1a)
-# define CPLD1A_SIZE PAGE_SIZE
-#endif
-
- /* Timing constants */
-
-#define XTAL_IN 14745600 /* 14.7456 MHz crystal */
-#define PLL_CLOCK (XTAL_IN * 21) /* 309 MHz PLL clock */
-#define MAX_HCLK_KHZ 100000 /* HCLK max limit ~100MHz */
-#define HCLK (99993600)
-//#define HCLK (119808000)
-
-#endif /* __ASM_ARCH_CONSTANTS_H */
diff --git a/arch/arm/mach-lh7a40x/include/mach/debug-macro.S b/arch/arm/mach-lh7a40x/include/mach/debug-macro.S
deleted file mode 100644
index cff33625276f..000000000000
--- a/arch/arm/mach-lh7a40x/include/mach/debug-macro.S
+++ /dev/null
@@ -1,37 +0,0 @@
-/* arch/arm/mach-lh7a40x/include/mach/debug-macro.S
- *
- * Debugging macro include header
- *
- * Copyright (C) 1994-1999 Russell King
- * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
-*/
-
- @ It is not known if this will be appropriate for every 40x
- @ board.
-
- .macro addruart, rp, rv
- mov \rp, #0x00000700 @ offset from base
- orr \rv, \rp, #0xf8000000 @ virtual base
- orr \rp, \rp, #0x80000000 @ physical base
- .endm
-
- .macro senduart,rd,rx
- strb \rd, [\rx] @ DATA
- .endm
-
- .macro busyuart,rd,rx @ spin while busy
-1001: ldr \rd, [\rx, #0x10] @ STATUS
- tst \rd, #1 << 3 @ BUSY (TX FIFO not empty)
- bne 1001b @ yes, spin
- .endm
-
- .macro waituart,rd,rx @ wait for Tx FIFO room
-1001: ldrb \rd, [\rx, #0x10] @ STATUS
- tst \rd, #1 << 5 @ TXFF (TX FIFO full)
- bne 1001b @ yes, spin
- .endm
diff --git a/arch/arm/mach-lh7a40x/include/mach/dma.h b/arch/arm/mach-lh7a40x/include/mach/dma.h
deleted file mode 100644
index baa3f8dbd04b..000000000000
--- a/arch/arm/mach-lh7a40x/include/mach/dma.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/* arch/arm/mach-lh7a40x/include/mach/dma.h
- *
- * Copyright (C) 2005 Marc Singer
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- */
-
-typedef enum {
- DMA_M2M0 = 0,
- DMA_M2M1 = 1,
- DMA_M2P0 = 2, /* Tx */
- DMA_M2P1 = 3, /* Rx */
- DMA_M2P2 = 4, /* Tx */
- DMA_M2P3 = 5, /* Rx */
- DMA_M2P4 = 6, /* Tx - AC97 */
- DMA_M2P5 = 7, /* Rx - AC97 */
- DMA_M2P6 = 8, /* Tx */
- DMA_M2P7 = 9, /* Rx */
-} dma_device_t;
-
-#define DMA_LENGTH_MAX ((64*1024) - 4) /* bytes */
-
-#define DMAC_GCA __REG(DMAC_PHYS + 0x2b80)
-#define DMAC_GIR __REG(DMAC_PHYS + 0x2bc0)
-
-#define DMAC_GIR_MMI1 (1<<11)
-#define DMAC_GIR_MMI0 (1<<10)
-#define DMAC_GIR_MPI8 (1<<9)
-#define DMAC_GIR_MPI9 (1<<8)
-#define DMAC_GIR_MPI6 (1<<7)
-#define DMAC_GIR_MPI7 (1<<6)
-#define DMAC_GIR_MPI4 (1<<5)
-#define DMAC_GIR_MPI5 (1<<4)
-#define DMAC_GIR_MPI2 (1<<3)
-#define DMAC_GIR_MPI3 (1<<2)
-#define DMAC_GIR_MPI0 (1<<1)
-#define DMAC_GIR_MPI1 (1<<0)
-
-#define DMAC_M2P0 0x0000
-#define DMAC_M2P1 0x0040
-#define DMAC_M2P2 0x0080
-#define DMAC_M2P3 0x00c0
-#define DMAC_M2P4 0x0240
-#define DMAC_M2P5 0x0200
-#define DMAC_M2P6 0x02c0
-#define DMAC_M2P7 0x0280
-#define DMAC_M2P8 0x0340
-#define DMAC_M2P9 0x0300
-#define DMAC_M2M0 0x0100
-#define DMAC_M2M1 0x0140
-
-#define DMAC_P_PCONTROL(c) __REG(DMAC_PHYS + (c) + 0x00)
-#define DMAC_P_PINTERRUPT(c) __REG(DMAC_PHYS + (c) + 0x04)
-#define DMAC_P_PPALLOC(c) __REG(DMAC_PHYS + (c) + 0x08)
-#define DMAC_P_PSTATUS(c) __REG(DMAC_PHYS + (c) + 0x0c)
-#define DMAC_P_REMAIN(c) __REG(DMAC_PHYS + (c) + 0x14)
-#define DMAC_P_MAXCNT0(c) __REG(DMAC_PHYS + (c) + 0x20)
-#define DMAC_P_BASE0(c) __REG(DMAC_PHYS + (c) + 0x24)
-#define DMAC_P_CURRENT0(c) __REG(DMAC_PHYS + (c) + 0x28)
-#define DMAC_P_MAXCNT1(c) __REG(DMAC_PHYS + (c) + 0x30)
-#define DMAC_P_BASE1(c) __REG(DMAC_PHYS + (c) + 0x34)
-#define DMAC_P_CURRENT1(c) __REG(DMAC_PHYS + (c) + 0x38)
-
-#define DMAC_PCONTROL_ENABLE (1<<4)
-
-#define DMAC_PORT_USB 0
-#define DMAC_PORT_SDMMC 1
-#define DMAC_PORT_AC97_1 2
-#define DMAC_PORT_AC97_2 3
-#define DMAC_PORT_AC97_3 4
-#define DMAC_PORT_UART1 6
-#define DMAC_PORT_UART2 7
-#define DMAC_PORT_UART3 8
-
-#define DMAC_PSTATUS_CURRSTATE_SHIFT 4
-#define DMAC_PSTATUS_CURRSTATE_MASK 0x3
-
-#define DMAC_PSTATUS_NEXTBUF (1<<6)
-#define DMAC_PSTATUS_STALLRINT (1<<0)
-
-#define DMAC_INT_CHE (1<<3)
-#define DMAC_INT_NFB (1<<1)
-#define DMAC_INT_STALL (1<<0)
diff --git a/arch/arm/mach-lh7a40x/include/mach/entry-macro.S b/arch/arm/mach-lh7a40x/include/mach/entry-macro.S
deleted file mode 100644
index 069bb4cefff7..000000000000
--- a/arch/arm/mach-lh7a40x/include/mach/entry-macro.S
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * arch/arm/mach-lh7a40x/include/mach/entry-macro.S
- *
- * Low-level IRQ helper macros for LH7A40x platforms
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-#include <mach/hardware.h>
-#include <mach/irqs.h>
-
-/* In order to allow there to be support for both of the processor
- classes at the same time, we make a hack here that isn't very
- pretty. At startup, the link pointed to with the
- branch_irq_lh7a400 symbol is replaced with a NOP when the CPU is
- detected as a lh7a404.
-
- *** FIXME: we should clean this up so that there is only one
- implementation for each CPU's design.
-
-*/
-
-#if defined (CONFIG_ARCH_LH7A400) && defined (CONFIG_ARCH_LH7A404)
-
- .macro disable_fiq
- .endm
-
- .macro get_irqnr_preamble, base, tmp
- .endm
-
- .macro arch_ret_to_user, tmp1, tmp2
- .endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
-
-branch_irq_lh7a400: b 1000f
-
-@ Implementation of the LH7A404 get_irqnr_and_base.
-
- mov \irqnr, #0 @ VIC1 irq base
- mov \base, #io_p2v(0x80000000) @ APB registers
- add \base, \base, #0x8000
- ldr \tmp, [\base, #0x0030] @ VIC1_VECTADDR
- tst \tmp, #VA_VECTORED @ Direct vectored
- bne 1002f
- tst \tmp, #VA_VIC1DEFAULT @ Default vectored VIC1
- ldrne \irqstat, [\base, #0] @ VIC1_IRQSTATUS
- bne 1001f
- add \base, \base, #(0xa000 - 0x8000)
- ldr \tmp, [\base, #0x0030] @ VIC2_VECTADDR
- tst \tmp, #VA_VECTORED @ Direct vectored
- bne 1002f
- ldr \irqstat, [\base, #0] @ VIC2_IRQSTATUS
- mov \irqnr, #32 @ VIC2 irq base
-
-1001: movs \irqstat, \irqstat, lsr #1 @ Shift into carry
- bcs 1008f @ Bit set; irq found
- add \irqnr, \irqnr, #1
- bne 1001b @ Until no bits
- b 1009f @ Nothing? Hmm.
-1002: and \irqnr, \tmp, #0x3f @ Mask for valid bits
-1008: movs \irqstat, #1 @ Force !Z
- str \tmp, [\base, #0x0030] @ Clear vector
- b 1009f
-
-@ Implementation of the LH7A400 get_irqnr_and_base.
-
-1000: mov \irqnr, #0
- mov \base, #io_p2v(0x80000000) @ APB registers
- ldr \irqstat, [\base, #0x500] @ PIC INTSR
-
-1001: movs \irqstat, \irqstat, lsr #1 @ Shift into carry
- bcs 1008f @ Bit set; irq found
- add \irqnr, \irqnr, #1
- bne 1001b @ Until no bits
- b 1009f @ Nothing? Hmm.
-1008: movs \irqstat, #1 @ Force !Z
-
-1009:
- .endm
-
-
-
-#elif defined (CONFIG_ARCH_LH7A400)
- .macro disable_fiq
- .endm
-
- .macro get_irqnr_preamble, base, tmp
- .endm
-
- .macro arch_ret_to_user, tmp1, tmp2
- .endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
- mov \irqnr, #0
- mov \base, #io_p2v(0x80000000) @ APB registers
- ldr \irqstat, [\base, #0x500] @ PIC INTSR
-
-1001: movs \irqstat, \irqstat, lsr #1 @ Shift into carry
- bcs 1008f @ Bit set; irq found
- add \irqnr, \irqnr, #1
- bne 1001b @ Until no bits
- b 1009f @ Nothing? Hmm.
-1008: movs \irqstat, #1 @ Force !Z
-1009:
- .endm
-
-#elif defined(CONFIG_ARCH_LH7A404)
-
- .macro disable_fiq
- .endm
-
- .macro get_irqnr_preamble, base, tmp
- .endm
-
- .macro arch_ret_to_user, tmp1, tmp2
- .endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
- mov \irqnr, #0 @ VIC1 irq base
- mov \base, #io_p2v(0x80000000) @ APB registers
- add \base, \base, #0x8000
- ldr \tmp, [\base, #0x0030] @ VIC1_VECTADDR
- tst \tmp, #VA_VECTORED @ Direct vectored
- bne 1002f
- tst \tmp, #VA_VIC1DEFAULT @ Default vectored VIC1
- ldrne \irqstat, [\base, #0] @ VIC1_IRQSTATUS
- bne 1001f
- add \base, \base, #(0xa000 - 0x8000)
- ldr \tmp, [\base, #0x0030] @ VIC2_VECTADDR
- tst \tmp, #VA_VECTORED @ Direct vectored
- bne 1002f
- ldr \irqstat, [\base, #0] @ VIC2_IRQSTATUS
- mov \irqnr, #32 @ VIC2 irq base
-
-1001: movs \irqstat, \irqstat, lsr #1 @ Shift into carry
- bcs 1008f @ Bit set; irq found
- add \irqnr, \irqnr, #1
- bne 1001b @ Until no bits
- b 1009f @ Nothing? Hmm.
-1002: and \irqnr, \tmp, #0x3f @ Mask for valid bits
-1008: movs \irqstat, #1 @ Force !Z
- str \tmp, [\base, #0x0030] @ Clear vector
-1009:
- .endm
-#endif
-
-
diff --git a/arch/arm/mach-lh7a40x/include/mach/hardware.h b/arch/arm/mach-lh7a40x/include/mach/hardware.h
deleted file mode 100644
index 59d2ace35217..000000000000
--- a/arch/arm/mach-lh7a40x/include/mach/hardware.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/* arch/arm/mach-lh7a40x/include/mach/hardware.h
- *
- * Copyright (C) 2004 Coastal Environmental Systems
- *
- * [ Substantially cribbed from arch/arm/mach-pxa/include/mach/hardware.h ]
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- */
-
-#ifndef __ASM_ARCH_HARDWARE_H
-#define __ASM_ARCH_HARDWARE_H
-
-#include <asm/sizes.h> /* Added for the sake of amba-clcd driver */
-
-#define io_p2v(x) (0xf0000000 | (((x) & 0xfff00000) >> 4) | ((x) & 0x0000ffff))
-#define io_v2p(x) ( (((x) & 0x0fff0000) << 4) | ((x) & 0x0000ffff))
-
-#ifdef __ASSEMBLY__
-
-# define __REG(x) io_p2v(x)
-# define __PREG(x) io_v2p(x)
-
-#else
-
-# if 0
-# define __REG(x) (*((volatile u32 *)io_p2v(x)))
-# else
-/*
- * This __REG() version gives the same results as the one above, except
- * that we are fooling gcc somehow so it generates far better and smaller
- * assembly code for access to contiguous registers. It's a shame that gcc
- * doesn't guess this by itself.
- */
-#include <asm/types.h>
-typedef struct { volatile u32 offset[4096]; } __regbase;
-# define __REGP(x) ((__regbase *)((x)&~4095))->offset[((x)&4095)>>2]
-# define __REG(x) __REGP(io_p2v(x))
-typedef struct { volatile u16 offset[4096]; } __regbase16;
-# define __REGP16(x) ((__regbase16 *)((x)&~4095))->offset[((x)&4095)>>1]
-# define __REG16(x) __REGP16(io_p2v(x))
-typedef struct { volatile u8 offset[4096]; } __regbase8;
-# define __REGP8(x) ((__regbase8 *)((x)&~4095))->offset[(x)&4095]
-# define __REG8(x) __REGP8(io_p2v(x))
-#endif
-
-/* Let's kick gcc's ass again... */
-# define __REG2(x,y) \
- ( __builtin_constant_p(y) ? (__REG((x) + (y))) \
- : (*(volatile u32 *)((u32)&__REG(x) + (y))) )
-
-# define __PREG(x) (io_v2p((u32)&(x)))
-
-#endif
-
-#define MASK_AND_SET(v,m,s) (v) = ((v)&~(m))|(s)
-
-#include "registers.h"
-
-#endif /* _ASM_ARCH_HARDWARE_H */
diff --git a/arch/arm/mach-lh7a40x/include/mach/io.h b/arch/arm/mach-lh7a40x/include/mach/io.h
deleted file mode 100644
index 6ece45911cbc..000000000000
--- a/arch/arm/mach-lh7a40x/include/mach/io.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* arch/arm/mach-lh7a40x/include/mach/io.h
- *
- * Copyright (C) 2004 Coastal Environmental Systems
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- */
-
-#ifndef __ASM_ARCH_IO_H
-#define __ASM_ARCH_IO_H
-
-#define IO_SPACE_LIMIT 0xffffffff
-
-/* No ISA or PCI bus on this machine. */
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-#endif /* __ASM_ARCH_IO_H */
diff --git a/arch/arm/mach-lh7a40x/include/mach/irqs.h b/arch/arm/mach-lh7a40x/include/mach/irqs.h
deleted file mode 100644
index 0f9b83675935..000000000000
--- a/arch/arm/mach-lh7a40x/include/mach/irqs.h
+++ /dev/null
@@ -1,200 +0,0 @@
-/* arch/arm/mach-lh7a40x/include/mach/irqs.h
- *
- * Copyright (C) 2004 Coastal Environmental Systems
- * Copyright (C) 2004 Logic Product Development
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- */
-
-/* It is to be seen whether or not we can build a kernel for more than
- * one board. For the time being, these macros assume that we cannot.
- * Thus, it is OK to ifdef machine/board specific IRQ assignments.
- */
-
-
-#ifndef __ASM_ARCH_IRQS_H
-#define __ASM_ARCH_IRQS_H
-
-
-#define FIQ_START 80
-
-#if defined (CONFIG_ARCH_LH7A400)
-
- /* FIQs */
-
-# define IRQ_GPIO0FIQ 0 /* GPIO External FIQ Interrupt on F0 */
-# define IRQ_BLINT 1 /* Battery Low */
-# define IRQ_WEINT 2 /* Watchdog Timer, WDT overflow */
-# define IRQ_MCINT 3 /* Media Change, MEDCHG pin rising */
-
- /* IRQs */
-
-# define IRQ_CSINT 4 /* Audio Codec (ACI) */
-# define IRQ_GPIO1INTR 5 /* GPIO External IRQ Interrupt on F1 */
-# define IRQ_GPIO2INTR 6 /* GPIO External IRQ Interrupt on F2 */
-# define IRQ_GPIO3INTR 7 /* GPIO External IRQ Interrupt on F3 */
-# define IRQ_T1UI 8 /* Timer 1 underflow */
-# define IRQ_T2UI 9 /* Timer 2 underflow */
-# define IRQ_RTCMI 10
-# define IRQ_TINTR 11 /* Clock State Controller 64 Hz tick (CSC) */
-# define IRQ_UART1INTR 12
-# define IRQ_UART2INTR 13
-# define IRQ_LCDINTR 14
-# define IRQ_SSIEOT 15 /* Synchronous Serial Interface (SSI) */
-# define IRQ_UART3INTR 16
-# define IRQ_SCIINTR 17 /* Smart Card Interface (SCI) */
-# define IRQ_AACINTR 18 /* Advanced Audio Codec (AAC) */
-# define IRQ_MMCINTR 19 /* Multimedia Card (MMC) */
-# define IRQ_USBINTR 20
-# define IRQ_DMAINTR 21
-# define IRQ_T3UI 22 /* Timer 3 underflow */
-# define IRQ_GPIO4INTR 23 /* GPIO External IRQ Interrupt on F4 */
-# define IRQ_GPIO5INTR 24 /* GPIO External IRQ Interrupt on F5 */
-# define IRQ_GPIO6INTR 25 /* GPIO External IRQ Interrupt on F6 */
-# define IRQ_GPIO7INTR 26 /* GPIO External IRQ Interrupt on F7 */
-# define IRQ_BMIINTR 27 /* Battery Monitor Interface (BMI) */
-
-# define NR_IRQ_CPU 28 /* IRQs directly recognized by CPU */
-
- /* Given IRQ, return GPIO interrupt number 0-7 */
-# define IRQ_TO_GPIO(i) ((i) \
- - (((i) > IRQ_GPIO3INTR) ? IRQ_GPIO4INTR - IRQ_GPIO3INTR - 1 : 0)\
- - (((i) > IRQ_GPIO0INTR) ? IRQ_GPIO1INTR - IRQ_GPIO0INTR - 1 : 0))
-
-#endif
-
-#if defined (CONFIG_ARCH_LH7A404)
-
-# define IRQ_BROWN 0 /* Brownout */
-# define IRQ_WDTINTR 1 /* Watchdog Timer */
-# define IRQ_COMMRX 2 /* ARM Comm Rx for Debug */
-# define IRQ_COMMTX 3 /* ARM Comm Tx for Debug */
-# define IRQ_T1UI 4 /* Timer 1 underflow */
-# define IRQ_T2UI 5 /* Timer 2 underflow */
-# define IRQ_CSINT 6 /* Codec Interrupt (shared by AAC on 404) */
-# define IRQ_DMAM2P0 7 /* -- DMA Memory to Peripheral */
-# define IRQ_DMAM2P1 8
-# define IRQ_DMAM2P2 9
-# define IRQ_DMAM2P3 10
-# define IRQ_DMAM2P4 11
-# define IRQ_DMAM2P5 12
-# define IRQ_DMAM2P6 13
-# define IRQ_DMAM2P7 14
-# define IRQ_DMAM2P8 15
-# define IRQ_DMAM2P9 16
-# define IRQ_DMAM2M0 17 /* -- DMA Memory to Memory */
-# define IRQ_DMAM2M1 18
-# define IRQ_GPIO0INTR 19 /* -- GPIOF Interrupt */
-# define IRQ_GPIO1INTR 20
-# define IRQ_GPIO2INTR 21
-# define IRQ_GPIO3INTR 22
-# define IRQ_SOFT_V1_23 23 /* -- Unassigned */
-# define IRQ_SOFT_V1_24 24
-# define IRQ_SOFT_V1_25 25
-# define IRQ_SOFT_V1_26 26
-# define IRQ_SOFT_V1_27 27
-# define IRQ_SOFT_V1_28 28
-# define IRQ_SOFT_V1_29 29
-# define IRQ_SOFT_V1_30 30
-# define IRQ_SOFT_V1_31 31
-
-# define IRQ_BLINT 32 /* Battery Low */
-# define IRQ_BMIINTR 33 /* Battery Monitor */
-# define IRQ_MCINTR 34 /* Media Change */
-# define IRQ_TINTR 35 /* 64Hz Tick */
-# define IRQ_WEINT 36 /* Watchdog Expired */
-# define IRQ_RTCMI 37 /* Real-time Clock Match */
-# define IRQ_UART1INTR 38 /* UART1 Interrupt (including error) */
-# define IRQ_UART1ERR 39 /* UART1 Error */
-# define IRQ_UART2INTR 40 /* UART2 Interrupt (including error) */
-# define IRQ_UART2ERR 41 /* UART2 Error */
-# define IRQ_UART3INTR 42 /* UART3 Interrupt (including error) */
-# define IRQ_UART3ERR 43 /* UART3 Error */
-# define IRQ_SCIINTR 44 /* Smart Card */
-# define IRQ_TSCINTR 45 /* Touchscreen */
-# define IRQ_KMIINTR 46 /* Keyboard/Mouse (PS/2) */
-# define IRQ_GPIO4INTR 47 /* -- GPIOF Interrupt */
-# define IRQ_GPIO5INTR 48
-# define IRQ_GPIO6INTR 49
-# define IRQ_GPIO7INTR 50
-# define IRQ_T3UI 51 /* Timer 3 underflow */
-# define IRQ_LCDINTR 52 /* LCD Controller */
-# define IRQ_SSPINTR 53 /* Synchronous Serial Port */
-# define IRQ_SDINTR 54 /* Secure Digital Port (MMC) */
-# define IRQ_USBINTR 55 /* USB Device Port */
-# define IRQ_USHINTR 56 /* USB Host Port */
-# define IRQ_SOFT_V2_25 57 /* -- Unassigned */
-# define IRQ_SOFT_V2_26 58
-# define IRQ_SOFT_V2_27 59
-# define IRQ_SOFT_V2_28 60
-# define IRQ_SOFT_V2_29 61
-# define IRQ_SOFT_V2_30 62
-# define IRQ_SOFT_V2_31 63
-
-# define NR_IRQ_CPU 64 /* IRQs directly recognized by CPU */
-
- /* Given IRQ, return GPIO interrupt number 0-7 */
-# define IRQ_TO_GPIO(i) ((i) \
- - (((i) > IRQ_GPIO3INTR) ? IRQ_GPIO4INTR - IRQ_GPIO3INTR - 1 : 0)\
- - IRQ_GPIO0INTR)
-
- /* Vector Address constants */
-# define VA_VECTORED 0x100 /* Set for vectored interrupt */
-# define VA_VIC1DEFAULT 0x200 /* Set as default VECTADDR for VIC1 */
-# define VA_VIC2DEFAULT 0x400 /* Set as default VECTADDR for VIC2 */
-
-#endif
-
- /* IRQ aliases */
-
-#if !defined (IRQ_GPIO0INTR)
-# define IRQ_GPIO0INTR IRQ_GPIO0FIQ
-#endif
-#define IRQ_TICK IRQ_TINTR
-#define IRQ_PCC1_RDY IRQ_GPIO6INTR /* PCCard 1 ready */
-#define IRQ_PCC2_RDY IRQ_GPIO7INTR /* PCCard 2 ready */
-#define IRQ_USB IRQ_USBINTR /* USB device */
-
-#ifdef CONFIG_MACH_KEV7A400
-# define IRQ_TS IRQ_GPIOFIQ /* Touchscreen */
-# define IRQ_CPLD IRQ_GPIO1INTR /* CPLD cascade */
-# define IRQ_PCC1_CD IRQ_GPIO_F2 /* PCCard 1 card detect */
-# define IRQ_PCC2_CD IRQ_GPIO_F3 /* PCCard 2 card detect */
-#endif
-
-#if defined (CONFIG_MACH_LPD7A400) || defined (CONFIG_MACH_LPD7A404)
-# define IRQ_CPLD_V28 IRQ_GPIO7INTR /* CPLD cascade through GPIO_PF7 */
-# define IRQ_CPLD_V34 IRQ_GPIO3INTR /* CPLD cascade through GPIO_PF3 */
-#endif
-
- /* System specific IRQs */
-
-#define IRQ_BOARD_START NR_IRQ_CPU
-
-#ifdef CONFIG_MACH_KEV7A400
-# define IRQ_KEV7A400_CPLD IRQ_BOARD_START
-# define NR_IRQ_BOARD 5
-# define IRQ_KEV7A400_MMC_CD IRQ_KEV7A400_CPLD + 0 /* MMC Card Detect */
-# define IRQ_KEV7A400_RI2 IRQ_KEV7A400_CPLD + 1 /* Ring Indicator 2 */
-# define IRQ_KEV7A400_IDE_CF IRQ_KEV7A400_CPLD + 2 /* Compact Flash (?) */
-# define IRQ_KEV7A400_ETH_INT IRQ_KEV7A400_CPLD + 3 /* Ethernet chip */
-# define IRQ_KEV7A400_INT IRQ_KEV7A400_CPLD + 4
-#endif
-
-#if defined (CONFIG_MACH_LPD7A400) || defined (CONFIG_MACH_LPD7A404)
-# define IRQ_LPD7A40X_CPLD IRQ_BOARD_START
-# define NR_IRQ_BOARD 2
-# define IRQ_LPD7A40X_ETH_INT IRQ_LPD7A40X_CPLD + 0 /* Ethernet chip */
-# define IRQ_LPD7A400_TS IRQ_LPD7A40X_CPLD + 1 /* Touch screen */
-#endif
-
-#if defined (CONFIG_MACH_LPD7A400)
-# define IRQ_TOUCH IRQ_LPD7A400_TS
-#endif
-
-#define NR_IRQS (NR_IRQ_CPU + NR_IRQ_BOARD)
-
-#endif
diff --git a/arch/arm/mach-lh7a40x/include/mach/memory.h b/arch/arm/mach-lh7a40x/include/mach/memory.h
deleted file mode 100644
index edb8f5faf5d5..000000000000
--- a/arch/arm/mach-lh7a40x/include/mach/memory.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* arch/arm/mach-lh7a40x/include/mach/memory.h
- *
- * Copyright (C) 2004 Coastal Environmental Systems
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- *
- * Refer to <file:Documentation/arm/Sharp-LH/SDRAM> for more information.
- *
- */
-
-#ifndef __ASM_ARCH_MEMORY_H
-#define __ASM_ARCH_MEMORY_H
-
-/*
- * Physical DRAM offset.
- */
-#define PHYS_OFFSET UL(0xc0000000)
-
-/*
- * Sparsemem version of the above
- */
-#define MAX_PHYSMEM_BITS 32
-#define SECTION_SIZE_BITS 24
-
-#endif
diff --git a/arch/arm/mach-lh7a40x/include/mach/registers.h b/arch/arm/mach-lh7a40x/include/mach/registers.h
deleted file mode 100644
index ea44396383a7..000000000000
--- a/arch/arm/mach-lh7a40x/include/mach/registers.h
+++ /dev/null
@@ -1,224 +0,0 @@
-/* arch/arm/mach-lh7a40x/include/mach/registers.h
- *
- * Copyright (C) 2004 Coastal Environmental Systems
- * Copyright (C) 2004 Logic Product Development
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- */
-
-#include <mach/constants.h>
-
-#ifndef __ASM_ARCH_REGISTERS_H
-#define __ASM_ARCH_REGISTERS_H
-
-
- /* Physical register base addresses */
-
-#define AC97C_PHYS (0x80000000) /* AC97 Controller */
-#define MMC_PHYS (0x80000100) /* Multimedia Card Controller */
-#define USB_PHYS (0x80000200) /* USB Client */
-#define SCI_PHYS (0x80000300) /* Secure Card Interface */
-#define CSC_PHYS (0x80000400) /* Clock/State Controller */
-#define INTC_PHYS (0x80000500) /* Interrupt Controller */
-#define UART1_PHYS (0x80000600) /* UART1 Controller */
-#define SIR_PHYS (0x80000600) /* IR Controller, same are UART1 */
-#define UART2_PHYS (0x80000700) /* UART2 Controller */
-#define UART3_PHYS (0x80000800) /* UART3 Controller */
-#define DCDC_PHYS (0x80000900) /* DC to DC Controller */
-#define ACI_PHYS (0x80000a00) /* Audio Codec Interface */
-#define SSP_PHYS (0x80000b00) /* Synchronous ... */
-#define TIMER_PHYS (0x80000c00) /* Timer Controller */
-#define RTC_PHYS (0x80000d00) /* Real-time Clock */
-#define GPIO_PHYS (0x80000e00) /* General Purpose IO */
-#define BMI_PHYS (0x80000f00) /* Battery Monitor Interface */
-#define HRTFTC_PHYS (0x80001000) /* High-res TFT Controller (LH7A400) */
-#define ALI_PHYS (0x80001000) /* Advanced LCD Interface (LH7A404) */
-#define WDT_PHYS (0x80001400) /* Watchdog Timer */
-#define SMC_PHYS (0x80002000) /* Static Memory Controller */
-#define SDRC_PHYS (0x80002400) /* SDRAM Controller */
-#define DMAC_PHYS (0x80002800) /* DMA Controller */
-#define CLCDC_PHYS (0x80003000) /* Color LCD Controller */
-
- /* Physical registers of the LH7A404 */
-
-#define ADC_PHYS (0x80001300) /* A/D & Touchscreen Controller */
-#define VIC1_PHYS (0x80008000) /* Vectored Interrupt Controller 1 */
-#define USBH_PHYS (0x80009000) /* USB OHCI host controller */
-#define VIC2_PHYS (0x8000a000) /* Vectored Interrupt Controller 2 */
-
-/*#define KBD_PHYS (0x80000e00) */
-/*#define LCDICP_PHYS (0x80001000) */
-
-
- /* Clock/State Controller register */
-
-#define CSC_PWRSR __REG(CSC_PHYS + 0x00) /* Reset register & ID */
-#define CSC_PWRCNT __REG(CSC_PHYS + 0x04) /* Power control */
-#define CSC_CLKSET __REG(CSC_PHYS + 0x20) /* Clock speed control */
-#define CSC_USBDRESET __REG(CSC_PHYS + 0x4c) /* USB Device resets */
-
-#define CSC_PWRCNT_USBH_EN (1<<28) /* USB Host power enable */
-#define CSC_PWRCNT_DMAC_M2M1_EN (1<<27)
-#define CSC_PWRCNT_DMAC_M2M0_EN (1<<26)
-#define CSC_PWRCNT_DMAC_M2P8_EN (1<<25)
-#define CSC_PWRCNT_DMAC_M2P9_EN (1<<24)
-#define CSC_PWRCNT_DMAC_M2P6_EN (1<<23)
-#define CSC_PWRCNT_DMAC_M2P7_EN (1<<22)
-#define CSC_PWRCNT_DMAC_M2P4_EN (1<<21)
-#define CSC_PWRCNT_DMAC_M2P5_EN (1<<20)
-#define CSC_PWRCNT_DMAC_M2P2_EN (1<<19)
-#define CSC_PWRCNT_DMAC_M2P3_EN (1<<18)
-#define CSC_PWRCNT_DMAC_M2P0_EN (1<<17)
-#define CSC_PWRCNT_DMAC_M2P1_EN (1<<16)
-
-#define CSC_PWRSR_CHIPMAN_SHIFT (24)
-#define CSC_PWRSR_CHIPMAN_MASK (0xff)
-#define CSC_PWRSR_CHIPID_SHIFT (16)
-#define CSC_PWRSR_CHIPID_MASK (0xff)
-
-#define CSC_USBDRESET_APBRESETREG (1<<1)
-#define CSC_USBDRESET_IORESETREG (1<<0)
-
- /* Interrupt Controller registers */
-
-#define INTC_INTSR __REG(INTC_PHYS + 0x00) /* Status */
-#define INTC_INTRSR __REG(INTC_PHYS + 0x04) /* Raw Status */
-#define INTC_INTENS __REG(INTC_PHYS + 0x08) /* Enable Set */
-#define INTC_INTENC __REG(INTC_PHYS + 0x0c) /* Enable Clear */
-
-
- /* Vectored Interrupted Controller registers */
-
-#define VIC1_IRQSTATUS __REG(VIC1_PHYS + 0x00)
-#define VIC1_FIQSTATUS __REG(VIC1_PHYS + 0x04)
-#define VIC1_RAWINTR __REG(VIC1_PHYS + 0x08)
-#define VIC1_INTSEL __REG(VIC1_PHYS + 0x0c)
-#define VIC1_INTEN __REG(VIC1_PHYS + 0x10)
-#define VIC1_INTENCLR __REG(VIC1_PHYS + 0x14)
-#define VIC1_SOFTINT __REG(VIC1_PHYS + 0x18)
-#define VIC1_SOFTINTCLR __REG(VIC1_PHYS + 0x1c)
-#define VIC1_PROTECT __REG(VIC1_PHYS + 0x20)
-#define VIC1_VECTADDR __REG(VIC1_PHYS + 0x30)
-#define VIC1_NVADDR __REG(VIC1_PHYS + 0x34)
-#define VIC1_VAD0 __REG(VIC1_PHYS + 0x100)
-#define VIC1_VECTCNTL0 __REG(VIC1_PHYS + 0x200)
-#define VIC2_IRQSTATUS __REG(VIC2_PHYS + 0x00)
-#define VIC2_FIQSTATUS __REG(VIC2_PHYS + 0x04)
-#define VIC2_RAWINTR __REG(VIC2_PHYS + 0x08)
-#define VIC2_INTSEL __REG(VIC2_PHYS + 0x0c)
-#define VIC2_INTEN __REG(VIC2_PHYS + 0x10)
-#define VIC2_INTENCLR __REG(VIC2_PHYS + 0x14)
-#define VIC2_SOFTINT __REG(VIC2_PHYS + 0x18)
-#define VIC2_SOFTINTCLR __REG(VIC2_PHYS + 0x1c)
-#define VIC2_PROTECT __REG(VIC2_PHYS + 0x20)
-#define VIC2_VECTADDR __REG(VIC2_PHYS + 0x30)
-#define VIC2_NVADDR __REG(VIC2_PHYS + 0x34)
-#define VIC2_VAD0 __REG(VIC2_PHYS + 0x100)
-#define VIC2_VECTCNTL0 __REG(VIC2_PHYS + 0x200)
-
-#define VIC_CNTL_ENABLE (0x20)
-
- /* USB Host registers (Open HCI compatible) */
-
-#define USBH_CMDSTATUS __REG(USBH_PHYS + 0x08)
-
-
- /* GPIO registers */
-
-#define GPIO_INTTYPE1 __REG(GPIO_PHYS + 0x4c) /* Interrupt Type 1 (Edge) */
-#define GPIO_INTTYPE2 __REG(GPIO_PHYS + 0x50) /* Interrupt Type 2 */
-#define GPIO_GPIOFEOI __REG(GPIO_PHYS + 0x54) /* GPIO End-of-Interrupt */
-#define GPIO_GPIOINTEN __REG(GPIO_PHYS + 0x58) /* GPIO Interrupt Enable */
-#define GPIO_INTSTATUS __REG(GPIO_PHYS + 0x5c) /* GPIO Interrupt Status */
-#define GPIO_PINMUX __REG(GPIO_PHYS + 0x2c)
-#define GPIO_PADD __REG(GPIO_PHYS + 0x10)
-#define GPIO_PAD __REG(GPIO_PHYS + 0x00)
-#define GPIO_PCD __REG(GPIO_PHYS + 0x08)
-#define GPIO_PCDD __REG(GPIO_PHYS + 0x18)
-#define GPIO_PEDD __REG(GPIO_PHYS + 0x24)
-#define GPIO_PED __REG(GPIO_PHYS + 0x20)
-
-
- /* Static Memory Controller registers */
-
-#define SMC_BCR0 __REG(SMC_PHYS + 0x00) /* Bank 0 Configuration */
-#define SMC_BCR1 __REG(SMC_PHYS + 0x04) /* Bank 1 Configuration */
-#define SMC_BCR2 __REG(SMC_PHYS + 0x08) /* Bank 2 Configuration */
-#define SMC_BCR3 __REG(SMC_PHYS + 0x0C) /* Bank 3 Configuration */
-#define SMC_BCR6 __REG(SMC_PHYS + 0x18) /* Bank 6 Configuration */
-#define SMC_BCR7 __REG(SMC_PHYS + 0x1c) /* Bank 7 Configuration */
-
-
-#ifdef CONFIG_MACH_KEV7A400
-# define CPLD_RD_OPT_DIP_SW __REG16(CPLD_PHYS + 0x00) /* Read Option SW */
-# define CPLD_WR_IO_BRD_CTL __REG16(CPLD_PHYS + 0x00) /* Write Control */
-# define CPLD_RD_PB_KEYS __REG16(CPLD_PHYS + 0x02) /* Read Btn Keys */
-# define CPLD_LATCHED_INTS __REG16(CPLD_PHYS + 0x04) /* Read INTR stat. */
-# define CPLD_CL_INT __REG16(CPLD_PHYS + 0x04) /* Clear INTR stat */
-# define CPLD_BOOT_MMC_STATUS __REG16(CPLD_PHYS + 0x06) /* R/O */
-# define CPLD_RD_KPD_ROW_SENSE __REG16(CPLD_PHYS + 0x08)
-# define CPLD_WR_PB_INT_MASK __REG16(CPLD_PHYS + 0x08)
-# define CPLD_RD_BRD_DISP_SW __REG16(CPLD_PHYS + 0x0a)
-# define CPLD_WR_EXT_INT_MASK __REG16(CPLD_PHYS + 0x0a)
-# define CPLD_LCD_PWR_CNTL __REG16(CPLD_PHYS + 0x0c)
-# define CPLD_SEVEN_SEG __REG16(CPLD_PHYS + 0x0e) /* 7 seg. LED mask */
-
-#endif
-
-#if defined (CONFIG_MACH_LPD7A400) || defined (CONFIG_MACH_LPD7A404)
-
-# define CPLD_CONTROL __REG16(CPLD02_PHYS)
-# define CPLD_SPI_DATA __REG16(CPLD06_PHYS)
-# define CPLD_SPI_CONTROL __REG16(CPLD08_PHYS)
-# define CPLD_SPI_EEPROM __REG16(CPLD0A_PHYS)
-# define CPLD_INTERRUPTS __REG16(CPLD0C_PHYS) /* IRQ mask/status */
-# define CPLD_BOOT_MODE __REG16(CPLD0E_PHYS)
-# define CPLD_FLASH __REG16(CPLD10_PHYS)
-# define CPLD_POWER_MGMT __REG16(CPLD12_PHYS)
-# define CPLD_REVISION __REG16(CPLD14_PHYS)
-# define CPLD_GPIO_EXT __REG16(CPLD16_PHYS)
-# define CPLD_GPIO_DATA __REG16(CPLD18_PHYS)
-# define CPLD_GPIO_DIR __REG16(CPLD1A_PHYS)
-
-#endif
-
- /* Timer registers */
-
-#define TIMER_LOAD1 __REG(TIMER_PHYS + 0x00) /* Timer 1 initial value */
-#define TIMER_VALUE1 __REG(TIMER_PHYS + 0x04) /* Timer 1 current value */
-#define TIMER_CONTROL1 __REG(TIMER_PHYS + 0x08) /* Timer 1 control word */
-#define TIMER_EOI1 __REG(TIMER_PHYS + 0x0c) /* Timer 1 interrupt clear */
-
-#define TIMER_LOAD2 __REG(TIMER_PHYS + 0x20) /* Timer 2 initial value */
-#define TIMER_VALUE2 __REG(TIMER_PHYS + 0x24) /* Timer 2 current value */
-#define TIMER_CONTROL2 __REG(TIMER_PHYS + 0x28) /* Timer 2 control word */
-#define TIMER_EOI2 __REG(TIMER_PHYS + 0x2c) /* Timer 2 interrupt clear */
-
-#define TIMER_BUZZCON __REG(TIMER_PHYS + 0x40) /* Buzzer configuration */
-
-#define TIMER_LOAD3 __REG(TIMER_PHYS + 0x80) /* Timer 3 initial value */
-#define TIMER_VALUE3 __REG(TIMER_PHYS + 0x84) /* Timer 3 current value */
-#define TIMER_CONTROL3 __REG(TIMER_PHYS + 0x88) /* Timer 3 control word */
-#define TIMER_EOI3 __REG(TIMER_PHYS + 0x8c) /* Timer 3 interrupt clear */
-
-#define TIMER_C_ENABLE (1<<7)
-#define TIMER_C_PERIODIC (1<<6)
-#define TIMER_C_FREERUNNING (0)
-#define TIMER_C_2KHZ (0x00) /* 1.986 kHz */
-#define TIMER_C_508KHZ (0x08)
-
- /* GPIO registers */
-
-#define GPIO_PFDD __REG(GPIO_PHYS + 0x34) /* PF direction */
-#define GPIO_INTTYPE1 __REG(GPIO_PHYS + 0x4c) /* IRQ edge or lvl */
-#define GPIO_INTTYPE2 __REG(GPIO_PHYS + 0x50) /* IRQ activ hi/lo */
-#define GPIO_GPIOFEOI __REG(GPIO_PHYS + 0x54) /* GPIOF end of IRQ */
-#define GPIO_GPIOFINTEN __REG(GPIO_PHYS + 0x58) /* GPIOF IRQ enable */
-#define GPIO_INTSTATUS __REG(GPIO_PHYS + 0x5c) /* GPIOF IRQ latch */
-#define GPIO_RAWINTSTATUS __REG(GPIO_PHYS + 0x60) /* GPIOF IRQ raw */
-
-
-#endif /* _ASM_ARCH_REGISTERS_H */
diff --git a/arch/arm/mach-lh7a40x/include/mach/ssp.h b/arch/arm/mach-lh7a40x/include/mach/ssp.h
deleted file mode 100644
index 509916182e34..000000000000
--- a/arch/arm/mach-lh7a40x/include/mach/ssp.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* ssp.h
-
- written by Marc Singer
- 6 Dec 2004
-
- Copyright (C) 2004 Marc Singer
-
- -----------
- DESCRIPTION
- -----------
-
- This SSP header is available throughout the kernel, for this
- machine/architecture, because drivers that use it may be dispersed.
-
- This file was cloned from the 7952x implementation. It would be
- better to share them, but we're taking an easier approach for the
- time being.
-
-*/
-
-#if !defined (__SSP_H__)
-# define __SSP_H__
-
-/* ----- Includes */
-
-/* ----- Types */
-
-struct ssp_driver {
- int (*init) (void);
- void (*exit) (void);
- void (*acquire) (void);
- void (*release) (void);
- int (*configure) (int device, int mode, int speed,
- int frame_size_write, int frame_size_read);
- void (*chip_select) (int enable);
- void (*set_callbacks) (void* handle,
- irqreturn_t (*callback_tx)(void*),
- irqreturn_t (*callback_rx)(void*));
- void (*enable) (void);
- void (*disable) (void);
-// int (*save_state) (void*);
-// void (*restore_state) (void*);
- int (*read) (void);
- int (*write) (u16 data);
- int (*write_read) (u16 data);
- void (*flush) (void);
- void (*write_async) (void* pv, size_t cb);
- size_t (*write_pos) (void);
-};
-
- /* These modes are only available on the LH79524 */
-#define SSP_MODE_SPI (1)
-#define SSP_MODE_SSI (2)
-#define SSP_MODE_MICROWIRE (3)
-#define SSP_MODE_I2S (4)
-
- /* CPLD SPI devices */
-#define DEVICE_EEPROM 0 /* Configuration eeprom */
-#define DEVICE_MAC 1 /* MAC eeprom (LPD79524) */
-#define DEVICE_CODEC 2 /* Audio codec */
-#define DEVICE_TOUCH 3 /* Touch screen (LPD79520) */
-
-/* ----- Globals */
-
-/* ----- Prototypes */
-
-//extern struct ssp_driver lh79520_i2s_driver;
-extern struct ssp_driver lh7a400_cpld_ssp_driver;
-
-#endif /* __SSP_H__ */
diff --git a/arch/arm/mach-lh7a40x/include/mach/system.h b/arch/arm/mach-lh7a40x/include/mach/system.h
deleted file mode 100644
index 45a56d3b93d7..000000000000
--- a/arch/arm/mach-lh7a40x/include/mach/system.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* arch/arm/mach-lh7a40x/include/mach/system.h
- *
- * Copyright (C) 2004 Coastal Environmental Systems
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- */
-
-static inline void arch_idle(void)
-{
- cpu_do_idle ();
-}
-
-static inline void arch_reset(char mode, const char *cmd)
-{
- cpu_reset (0);
-}
diff --git a/arch/arm/mach-lh7a40x/include/mach/timex.h b/arch/arm/mach-lh7a40x/include/mach/timex.h
deleted file mode 100644
index 08028cef1b3b..000000000000
--- a/arch/arm/mach-lh7a40x/include/mach/timex.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* arch/arm/mach-lh7a40x/include/mach/timex.h
- *
- * Copyright (C) 2004 Coastal Environmental Systems
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- */
-
-#include <mach/constants.h>
-
-#define CLOCK_TICK_RATE (PLL_CLOCK/6/16)
-
-/*
-#define CLOCK_TICK_RATE 3686400
-*/
diff --git a/arch/arm/mach-lh7a40x/include/mach/uncompress.h b/arch/arm/mach-lh7a40x/include/mach/uncompress.h
deleted file mode 100644
index 55b80d479eb4..000000000000
--- a/arch/arm/mach-lh7a40x/include/mach/uncompress.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* arch/arm/mach-lh7a40x/include/mach/uncompress.h
- *
- * Copyright (C) 2004 Coastal Environmental Systems
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- */
-
-#include <mach/registers.h>
-
-#ifndef UART_R_DATA
-# define UART_R_DATA (0x00)
-#endif
-#ifndef UART_R_STATUS
-# define UART_R_STATUS (0x10)
-#endif
-#define nTxRdy (0x20) /* Not TxReady (literally Tx FIFO full) */
-
- /* Access UART with physical addresses before MMU is setup */
-#define UART_STATUS (*(volatile unsigned long*) (UART2_PHYS + UART_R_STATUS))
-#define UART_DATA (*(volatile unsigned long*) (UART2_PHYS + UART_R_DATA))
-
-static inline void putc(int ch)
-{
- while (UART_STATUS & nTxRdy)
- barrier();
- UART_DATA = ch;
-}
-
-static inline void flush(void)
-{
-}
-
- /* NULL functions; we don't presently need them */
-#define arch_decomp_setup()
-#define arch_decomp_wdog()
diff --git a/arch/arm/mach-lh7a40x/include/mach/vmalloc.h b/arch/arm/mach-lh7a40x/include/mach/vmalloc.h
deleted file mode 100644
index d62da7358b16..000000000000
--- a/arch/arm/mach-lh7a40x/include/mach/vmalloc.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* arch/arm/mach-lh7a40x/include/mach/vmalloc.h
- *
- * Copyright (C) 2004 Coastal Environmental Systems
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- */
-#define VMALLOC_END (0xe8000000UL)
diff --git a/arch/arm/mach-lh7a40x/irq-kev7a400.c b/arch/arm/mach-lh7a40x/irq-kev7a400.c
deleted file mode 100644
index c7433b3c5812..000000000000
--- a/arch/arm/mach-lh7a40x/irq-kev7a400.c
+++ /dev/null
@@ -1,93 +0,0 @@
-/* arch/arm/mach-lh7a40x/irq-kev7a400.c
- *
- * Copyright (C) 2004 Coastal Environmental Systems
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- */
-
-#include <linux/interrupt.h>
-#include <linux/init.h>
-
-#include <asm/irq.h>
-#include <asm/mach/irq.h>
-#include <asm/mach/hardware.h>
-#include <asm/mach/irqs.h>
-
-#include "common.h"
-
- /* KEV7a400 CPLD IRQ handling */
-
-static u16 CPLD_IRQ_mask; /* Mask for CPLD IRQs, 1 == unmasked */
-
-static void
-lh7a400_ack_cpld_irq (u32 irq)
-{
- CPLD_CL_INT = 1 << (irq - IRQ_KEV7A400_CPLD);
-}
-
-static void
-lh7a400_mask_cpld_irq (u32 irq)
-{
- CPLD_IRQ_mask &= ~(1 << (irq - IRQ_KEV7A400_CPLD));
- CPLD_WR_PB_INT_MASK = CPLD_IRQ_mask;
-}
-
-static void
-lh7a400_unmask_cpld_irq (u32 irq)
-{
- CPLD_IRQ_mask |= 1 << (irq - IRQ_KEV7A400_CPLD);
- CPLD_WR_PB_INT_MASK = CPLD_IRQ_mask;
-}
-
-static struct
-irq_chip lh7a400_cpld_chip = {
- .name = "CPLD",
- .ack = lh7a400_ack_cpld_irq,
- .mask = lh7a400_mask_cpld_irq,
- .unmask = lh7a400_unmask_cpld_irq,
-};
-
-static void
-lh7a400_cpld_handler (unsigned int irq, struct irq_desc *desc)
-{
- u32 mask = CPLD_LATCHED_INTS;
- irq = IRQ_KEV_7A400_CPLD;
- for (; mask; mask >>= 1, ++irq) {
- if (mask & 1)
- desc[irq].handle (irq, desc);
- }
-}
-
- /* IRQ initialization */
-
-void __init
-lh7a400_init_board_irq (void)
-{
- int irq;
-
- for (irq = IRQ_KEV7A400_CPLD;
- irq < IRQ_KEV7A400_CPLD + NR_IRQ_KEV7A400_CPLD; ++irq) {
- set_irq_chip (irq, &lh7a400_cpld_chip);
- set_irq_handler (irq, handle_edge_irq);
- set_irq_flags (irq, IRQF_VALID);
- }
- set_irq_chained_handler (IRQ_CPLD, kev7a400_cpld_handler);
-
- /* Clear all CPLD interrupts */
- CPLD_CL_INT = 0xff; /* CPLD_INTR_MMC_CD | CPLD_INTR_ETH_INT; */
-
- /* *** FIXME CF enabled in ide-probe.c */
-
- GPIO_GPIOINTEN = 0; /* Disable all GPIO interrupts */
- barrier();
- GPIO_INTTYPE1
- = (GPIO_INTR_PCC1_CD | GPIO_INTR_PCC1_CD); /* Edge trig. */
- GPIO_INTTYPE2 = 0; /* Falling edge & low-level */
- GPIO_GPIOFEOI = 0xff; /* Clear all GPIO interrupts */
- GPIO_GPIOINTEN = 0xff; /* Enable all GPIO interrupts */
-
- init_FIQ();
-}
diff --git a/arch/arm/mach-lh7a40x/irq-lh7a400.c b/arch/arm/mach-lh7a40x/irq-lh7a400.c
deleted file mode 100644
index f2e7e655ca35..000000000000
--- a/arch/arm/mach-lh7a40x/irq-lh7a400.c
+++ /dev/null
@@ -1,91 +0,0 @@
-/* arch/arm/mach-lh7a40x/irq-lh7a400.c
- *
- * Copyright (C) 2004 Coastal Environmental Systems
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-
-#include <mach/hardware.h>
-#include <asm/irq.h>
-#include <asm/mach/irq.h>
-#include <mach/irqs.h>
-
-#include "common.h"
-
- /* CPU IRQ handling */
-
-static void lh7a400_mask_irq(struct irq_data *d)
-{
- INTC_INTENC = (1 << d->irq);
-}
-
-static void lh7a400_unmask_irq(struct irq_data *d)
-{
- INTC_INTENS = (1 << d->irq);
-}
-
-static void lh7a400_ack_gpio_irq(struct irq_data *d)
-{
- GPIO_GPIOFEOI = (1 << IRQ_TO_GPIO (d->irq));
- INTC_INTENC = (1 << d->irq);
-}
-
-static struct irq_chip lh7a400_internal_chip = {
- .name = "MPU",
- .irq_ack = lh7a400_mask_irq, /* Level triggering -> mask is ack */
- .irq_mask = lh7a400_mask_irq,
- .irq_unmask = lh7a400_unmask_irq,
-};
-
-static struct irq_chip lh7a400_gpio_chip = {
- .name = "GPIO",
- .irq_ack = lh7a400_ack_gpio_irq,
- .irq_mask = lh7a400_mask_irq,
- .irq_unmask = lh7a400_unmask_irq,
-};
-
-
- /* IRQ initialization */
-
-void __init lh7a400_init_irq (void)
-{
- int irq;
-
- INTC_INTENC = 0xffffffff; /* Disable all interrupts */
- GPIO_GPIOFINTEN = 0x00; /* Disable all GPIOF interrupts */
- barrier ();
-
- for (irq = 0; irq < NR_IRQS; ++irq) {
- switch (irq) {
- case IRQ_GPIO0INTR:
- case IRQ_GPIO1INTR:
- case IRQ_GPIO2INTR:
- case IRQ_GPIO3INTR:
- case IRQ_GPIO4INTR:
- case IRQ_GPIO5INTR:
- case IRQ_GPIO6INTR:
- case IRQ_GPIO7INTR:
- set_irq_chip (irq, &lh7a400_gpio_chip);
- set_irq_handler (irq, handle_level_irq); /* OK default */
- break;
- default:
- set_irq_chip (irq, &lh7a400_internal_chip);
- set_irq_handler (irq, handle_level_irq);
- }
- set_irq_flags (irq, IRQF_VALID);
- }
-
- lh7a40x_init_board_irq ();
-
-/* *** FIXME: the LH7a400 does use FIQ interrupts in some cases. For
- the time being, these are not initialized. */
-
-/* init_FIQ(); */
-}
diff --git a/arch/arm/mach-lh7a40x/irq-lh7a404.c b/arch/arm/mach-lh7a40x/irq-lh7a404.c
deleted file mode 100644
index 14b173389573..000000000000
--- a/arch/arm/mach-lh7a40x/irq-lh7a404.c
+++ /dev/null
@@ -1,175 +0,0 @@
-/* arch/arm/mach-lh7a40x/irq-lh7a404.c
- *
- * Copyright (C) 2004 Logic Product Development
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-
-#include <mach/hardware.h>
-#include <asm/irq.h>
-#include <asm/mach/irq.h>
-#include <mach/irqs.h>
-
-#include "common.h"
-
-#define USE_PRIORITIES
-
-/* See Documentation/arm/Sharp-LH/VectoredInterruptController for more
- * information on using the vectored interrupt controller's
- * prioritizing feature. */
-
-static unsigned char irq_pri_vic1[] = {
-#if defined (USE_PRIORITIES)
- IRQ_GPIO3INTR, /* CPLD */
- IRQ_DMAM2P4, IRQ_DMAM2P5, /* AC97 */
-#endif
-};
-static unsigned char irq_pri_vic2[] = {
-#if defined (USE_PRIORITIES)
- IRQ_T3UI, /* Timer */
- IRQ_GPIO7INTR, /* CPLD */
- IRQ_UART1INTR, IRQ_UART2INTR, IRQ_UART3INTR,
- IRQ_LCDINTR, /* LCD */
- IRQ_TSCINTR, /* ADC/Touchscreen */
-#endif
-};
-
- /* CPU IRQ handling */
-
-static void lh7a404_vic1_mask_irq(struct irq_data *d)
-{
- VIC1_INTENCLR = (1 << d->irq);
-}
-
-static void lh7a404_vic1_unmask_irq(struct irq_data *d)
-{
- VIC1_INTEN = (1 << d->irq);
-}
-
-static void lh7a404_vic2_mask_irq(struct irq_data *d)
-{
- VIC2_INTENCLR = (1 << (d->irq - 32));
-}
-
-static void lh7a404_vic2_unmask_irq(struct irq_data *d)
-{
- VIC2_INTEN = (1 << (d->irq - 32));
-}
-
-static void lh7a404_vic1_ack_gpio_irq(struct irq_data *d)
-{
- GPIO_GPIOFEOI = (1 << IRQ_TO_GPIO (d->irq));
- VIC1_INTENCLR = (1 << d->irq);
-}
-
-static void lh7a404_vic2_ack_gpio_irq(struct irq_data *d)
-{
- GPIO_GPIOFEOI = (1 << IRQ_TO_GPIO (d->irq));
- VIC2_INTENCLR = (1 << d->irq);
-}
-
-static struct irq_chip lh7a404_vic1_chip = {
- .name = "VIC1",
- .irq_ack = lh7a404_vic1_mask_irq, /* Because level-triggered */
- .irq_mask = lh7a404_vic1_mask_irq,
- .irq_unmask = lh7a404_vic1_unmask_irq,
-};
-
-static struct irq_chip lh7a404_vic2_chip = {
- .name = "VIC2",
- .irq_ack = lh7a404_vic2_mask_irq, /* Because level-triggered */
- .irq_mask = lh7a404_vic2_mask_irq,
- .irq_unmask = lh7a404_vic2_unmask_irq,
-};
-
-static struct irq_chip lh7a404_gpio_vic1_chip = {
- .name = "GPIO-VIC1",
- .irq_ack = lh7a404_vic1_ack_gpio_irq,
- .irq_mask = lh7a404_vic1_mask_irq,
- .irq_unmask = lh7a404_vic1_unmask_irq,
-};
-
-static struct irq_chip lh7a404_gpio_vic2_chip = {
- .name = "GPIO-VIC2",
- .irq_ack = lh7a404_vic2_ack_gpio_irq,
- .irq_mask = lh7a404_vic2_mask_irq,
- .irq_unmask = lh7a404_vic2_unmask_irq,
-};
-
- /* IRQ initialization */
-
-#if defined (CONFIG_ARCH_LH7A400) && defined (CONFIG_ARCH_LH7A404)
-extern void* branch_irq_lh7a400;
-#endif
-
-void __init lh7a404_init_irq (void)
-{
- int irq;
-
-#if defined (CONFIG_ARCH_LH7A400) && defined (CONFIG_ARCH_LH7A404)
-#define NOP 0xe1a00000 /* mov r0, r0 */
- branch_irq_lh7a400 = NOP;
-#endif
-
- VIC1_INTENCLR = 0xffffffff;
- VIC2_INTENCLR = 0xffffffff;
- VIC1_INTSEL = 0; /* All IRQs */
- VIC2_INTSEL = 0; /* All IRQs */
- VIC1_NVADDR = VA_VIC1DEFAULT;
- VIC2_NVADDR = VA_VIC2DEFAULT;
- VIC1_VECTADDR = 0;
- VIC2_VECTADDR = 0;
-
- GPIO_GPIOFINTEN = 0x00; /* Disable all GPIOF interrupts */
- barrier ();
-
- /* Install prioritized interrupts, if there are any. */
- /* The | 0x20*/
- for (irq = 0; irq < 16; ++irq) {
- (&VIC1_VAD0)[irq]
- = (irq < ARRAY_SIZE (irq_pri_vic1))
- ? (irq_pri_vic1[irq] | VA_VECTORED) : 0;
- (&VIC1_VECTCNTL0)[irq]
- = (irq < ARRAY_SIZE (irq_pri_vic1))
- ? (irq_pri_vic1[irq] | VIC_CNTL_ENABLE) : 0;
- (&VIC2_VAD0)[irq]
- = (irq < ARRAY_SIZE (irq_pri_vic2))
- ? (irq_pri_vic2[irq] | VA_VECTORED) : 0;
- (&VIC2_VECTCNTL0)[irq]
- = (irq < ARRAY_SIZE (irq_pri_vic2))
- ? (irq_pri_vic2[irq] | VIC_CNTL_ENABLE) : 0;
- }
-
- for (irq = 0; irq < NR_IRQS; ++irq) {
- switch (irq) {
- case IRQ_GPIO0INTR:
- case IRQ_GPIO1INTR:
- case IRQ_GPIO2INTR:
- case IRQ_GPIO3INTR:
- case IRQ_GPIO4INTR:
- case IRQ_GPIO5INTR:
- case IRQ_GPIO6INTR:
- case IRQ_GPIO7INTR:
- set_irq_chip (irq, irq < 32
- ? &lh7a404_gpio_vic1_chip
- : &lh7a404_gpio_vic2_chip);
- set_irq_handler (irq, handle_level_irq); /* OK default */
- break;
- default:
- set_irq_chip (irq, irq < 32
- ? &lh7a404_vic1_chip
- : &lh7a404_vic2_chip);
- set_irq_handler (irq, handle_level_irq);
- }
- set_irq_flags (irq, IRQF_VALID);
- }
-
- lh7a40x_init_board_irq ();
-}
diff --git a/arch/arm/mach-lh7a40x/irq-lpd7a40x.c b/arch/arm/mach-lh7a40x/irq-lpd7a40x.c
deleted file mode 100644
index 1bfdcddcb93e..000000000000
--- a/arch/arm/mach-lh7a40x/irq-lpd7a40x.c
+++ /dev/null
@@ -1,128 +0,0 @@
-/* arch/arm/mach-lh7a40x/irq-lpd7a40x.c
- *
- * Copyright (C) 2004 Coastal Environmental Systems
- * Copyright (C) 2004 Logic Product Development
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-
-#include <mach/hardware.h>
-#include <asm/irq.h>
-#include <asm/mach/irq.h>
-#include <mach/irqs.h>
-
-#include "common.h"
-
-static void lh7a40x_ack_cpld_irq(struct irq_data *d)
-{
- /* CPLD doesn't have ack capability */
-}
-
-static void lh7a40x_mask_cpld_irq(struct irq_data *d)
-{
- switch (d->irq) {
- case IRQ_LPD7A40X_ETH_INT:
- CPLD_INTERRUPTS = CPLD_INTERRUPTS | 0x4;
- break;
- case IRQ_LPD7A400_TS:
- CPLD_INTERRUPTS = CPLD_INTERRUPTS | 0x8;
- break;
- }
-}
-
-static void lh7a40x_unmask_cpld_irq(struct irq_data *d)
-{
- switch (d->irq) {
- case IRQ_LPD7A40X_ETH_INT:
- CPLD_INTERRUPTS = CPLD_INTERRUPTS & ~ 0x4;
- break;
- case IRQ_LPD7A400_TS:
- CPLD_INTERRUPTS = CPLD_INTERRUPTS & ~ 0x8;
- break;
- }
-}
-
-static struct irq_chip lh7a40x_cpld_chip = {
- .name = "CPLD",
- .irq_ack = lh7a40x_ack_cpld_irq,
- .irq_mask = lh7a40x_mask_cpld_irq,
- .irq_unmask = lh7a40x_unmask_cpld_irq,
-};
-
-static void lh7a40x_cpld_handler (unsigned int irq, struct irq_desc *desc)
-{
- unsigned int mask = CPLD_INTERRUPTS;
-
- desc->irq_data.chip->ack (irq);
-
- if ((mask & 0x1) == 0) /* WLAN */
- generic_handle_irq(IRQ_LPD7A40X_ETH_INT);
-
- if ((mask & 0x2) == 0) /* Touch */
- generic_handle_irq(IRQ_LPD7A400_TS);
-
- desc->irq_data.chip->unmask (irq); /* Level-triggered need this */
-}
-
-
- /* IRQ initialization */
-
-void __init lh7a40x_init_board_irq (void)
-{
- int irq;
-
- /* Rev A (v2.8): PF0, PF1, PF2, and PF3 are available IRQs.
- PF7 supports the CPLD.
- Rev B (v3.4): PF0, PF1, and PF2 are available IRQs.
- PF3 supports the CPLD.
- (Some) LPD7A404 prerelease boards report a version
- number of 0x16, but we force an override since the
- hardware is of the newer variety.
- */
-
- unsigned char cpld_version = CPLD_REVISION;
- int pinCPLD;
-
-#if defined CONFIG_MACH_LPD7A404
- cpld_version = 0x34; /* Override, for now */
-#endif
- pinCPLD = (cpld_version == 0x28) ? 7 : 3;
-
- /* First, configure user controlled GPIOF interrupts */
-
- GPIO_PFDD &= ~0x0f; /* PF0-3 are inputs */
- GPIO_INTTYPE1 &= ~0x0f; /* PF0-3 are level triggered */
- GPIO_INTTYPE2 &= ~0x0f; /* PF0-3 are active low */
- barrier ();
- GPIO_GPIOFINTEN |= 0x0f; /* Enable PF0, PF1, PF2, and PF3 IRQs */
-
- /* Then, configure CPLD interrupt */
-
- CPLD_INTERRUPTS = 0x0c; /* Disable all CPLD interrupts */
- GPIO_PFDD &= ~(1 << pinCPLD); /* Make input */
- GPIO_INTTYPE1 |= (1 << pinCPLD); /* Edge triggered */
- GPIO_INTTYPE2 &= ~(1 << pinCPLD); /* Active low */
- barrier ();
- GPIO_GPIOFINTEN |= (1 << pinCPLD); /* Enable */
-
- /* Cascade CPLD interrupts */
-
- for (irq = IRQ_BOARD_START;
- irq < IRQ_BOARD_START + NR_IRQ_BOARD; ++irq) {
- set_irq_chip (irq, &lh7a40x_cpld_chip);
- set_irq_handler (irq, handle_edge_irq);
- set_irq_flags (irq, IRQF_VALID);
- }
-
- set_irq_chained_handler ((cpld_version == 0x28)
- ? IRQ_CPLD_V28
- : IRQ_CPLD_V34,
- lh7a40x_cpld_handler);
-}
diff --git a/arch/arm/mach-lh7a40x/lcd-panel.h b/arch/arm/mach-lh7a40x/lcd-panel.h
deleted file mode 100644
index a7f5027b2f78..000000000000
--- a/arch/arm/mach-lh7a40x/lcd-panel.h
+++ /dev/null
@@ -1,345 +0,0 @@
-/* lcd-panel.h
-
- written by Marc Singer
- 18 Jul 2005
-
- Copyright (C) 2005 Marc Singer
-
- -----------
- DESCRIPTION
- -----------
-
- Only one panel may be defined at a time.
-
- The pixel clock is calculated to be no greater than the target.
-
- Each timing value is accompanied by a specification comment.
-
- UNITS/MIN/TYP/MAX
-
- Most of the units will be in clocks.
-
- USE_RGB555
-
- Define this macro to configure the AMBA LCD controller to use an
- RGB555 encoding for the pels instead of the normal RGB565.
-
- LPD9520, LPD79524, LPD7A400, LPD7A404-10, LPD7A404-11
-
- These boards are best approximated by 555 for all panels. Some
- can use an extra low-order bit of blue in bit 16 of the color
- value, but we don't have a way to communicate this non-linear
- mapping to the kernel.
-
-*/
-
-#if !defined (__LCD_PANEL_H__)
-# define __LCD_PANEL_H__
-
-#if defined (MACH_LPD79520)\
- || defined (MACH_LPD79524)\
- || defined (MACH_LPD7A400)\
- || defined (MACH_LPD7A404)
-# define USE_RGB555
-#endif
-
-struct clcd_panel_extra {
- unsigned int hrmode;
- unsigned int clsen;
- unsigned int spsen;
- unsigned int pcdel;
- unsigned int revdel;
- unsigned int lpdel;
- unsigned int spldel;
- unsigned int pc2del;
-};
-
-#define NS_TO_CLOCK(ns,c) ((((ns)*((c)/1000) + (1000000 - 1))/1000000))
-#define CLOCK_TO_DIV(e,c) (((c) + (e) - 1)/(e))
-
-#if defined CONFIG_FB_ARMCLCD_SHARP_LQ035Q7DB02_HRTFT
-
- /* Logic Product Development LCD 3.5" QVGA HRTFT -10 */
- /* Sharp PN LQ035Q7DB02 w/HRTFT controller chip */
-
-#define PIX_CLOCK_TARGET (6800000)
-#define PIX_CLOCK_DIVIDER CLOCK_TO_DIV (PIX_CLOCK_TARGET, HCLK)
-#define PIX_CLOCK (HCLK/PIX_CLOCK_DIVIDER)
-
-static struct clcd_panel lcd_panel = {
- .mode = {
- .name = "3.5in QVGA (LQ035Q7DB02)",
- .xres = 240,
- .yres = 320,
- .pixclock = PIX_CLOCK,
- .left_margin = 16,
- .right_margin = 21,
- .upper_margin = 8, // line/8/8/8
- .lower_margin = 5,
- .hsync_len = 61,
- .vsync_len = NS_TO_CLOCK (60, PIX_CLOCK),
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = TIM2_IPC | (PIX_CLOCK_DIVIDER - 2),
- .cntl = CNTL_LCDTFT | CNTL_WATERMARK,
- .bpp = 16,
-};
-
-#define HAS_LCD_PANEL_EXTRA
-
-static struct clcd_panel_extra lcd_panel_extra = {
- .hrmode = 1,
- .clsen = 1,
- .spsen = 1,
- .pcdel = 8,
- .revdel = 7,
- .lpdel = 13,
- .spldel = 77,
- .pc2del = 208,
-};
-
-#endif
-
-#if defined CONFIG_FB_ARMCLCD_SHARP_LQ057Q3DC02
-
- /* Logic Product Development LCD 5.7" QVGA -10 */
- /* Sharp PN LQ057Q3DC02 */
- /* QVGA mode, V/Q=LOW */
-
-/* From Sharp on 2006.1.3. I believe some of the values are incorrect
- * based on the datasheet.
-
- Timing0 TIMING1 TIMING2 CONTROL
- 0x140A0C4C 0x080504EF 0x013F380D 0x00000829
- HBP= 20 VBP= 8 BCD= 0
- HFP= 10 VFP= 5 CPL=319
- HSW= 12 VSW= 1 IOE= 0
- PPL= 19 LPP=239 IPC= 1
- IHS= 1
- IVS= 1
- ACB= 0
- CSEL= 0
- PCD= 13
-
- */
-
-/* The full horizontal cycle (Th) is clock/360/400/450. */
-/* The full vertical cycle (Tv) is line/251/262/280. */
-
-#define PIX_CLOCK_TARGET (6300000) /* -/6.3/7 MHz */
-#define PIX_CLOCK_DIVIDER CLOCK_TO_DIV (PIX_CLOCK_TARGET, HCLK)
-#define PIX_CLOCK (HCLK/PIX_CLOCK_DIVIDER)
-
-static struct clcd_panel lcd_panel = {
- .mode = {
- .name = "5.7in QVGA (LQ057Q3DC02)",
- .xres = 320,
- .yres = 240,
- .pixclock = PIX_CLOCK,
- .left_margin = 11,
- .right_margin = 400-11-320-2,
- .upper_margin = 7, // line/7/7/7
- .lower_margin = 262-7-240-2,
- .hsync_len = 2, // clk/2/96/200
- .vsync_len = 2, // line/2/-/34
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = TIM2_IHS | TIM2_IVS
- | (PIX_CLOCK_DIVIDER - 2),
- .cntl = CNTL_LCDTFT | CNTL_WATERMARK,
- .bpp = 16,
-};
-
-#endif
-
-#if defined CONFIG_FB_ARMCLCD_SHARP_LQ64D343
-
- /* Logic Product Development LCD 6.4" VGA -10 */
- /* Sharp PN LQ64D343 */
-
-/* The full horizontal cycle (Th) is clock/750/800/900. */
-/* The full vertical cycle (Tv) is line/515/525/560. */
-
-#define PIX_CLOCK_TARGET (28330000)
-#define PIX_CLOCK_DIVIDER CLOCK_TO_DIV (PIX_CLOCK_TARGET, HCLK)
-#define PIX_CLOCK (HCLK/PIX_CLOCK_DIVIDER)
-
-static struct clcd_panel lcd_panel = {
- .mode = {
- .name = "6.4in QVGA (LQ64D343)",
- .xres = 640,
- .yres = 480,
- .pixclock = PIX_CLOCK,
- .left_margin = 32,
- .right_margin = 800-32-640-96,
- .upper_margin = 32, // line/34/34/34
- .lower_margin = 540-32-480-2,
- .hsync_len = 96, // clk/2/96/200
- .vsync_len = 2, // line/2/-/34
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = TIM2_IHS | TIM2_IVS
- | (PIX_CLOCK_DIVIDER - 2),
- .cntl = CNTL_LCDTFT | CNTL_WATERMARK,
- .bpp = 16,
-};
-
-#endif
-
-#if defined CONFIG_FB_ARMCLCD_SHARP_LQ10D368
-
- /* Logic Product Development LCD 10.4" VGA -10 */
- /* Sharp PN LQ10D368 */
-
-#define PIX_CLOCK_TARGET (28330000)
-#define PIX_CLOCK_DIVIDER CLOCK_TO_DIV (PIX_CLOCK_TARGET, HCLK)
-#define PIX_CLOCK (HCLK/PIX_CLOCK_DIVIDER)
-
-static struct clcd_panel lcd_panel = {
- .mode = {
- .name = "10.4in VGA (LQ10D368)",
- .xres = 640,
- .yres = 480,
- .pixclock = PIX_CLOCK,
- .left_margin = 21,
- .right_margin = 15,
- .upper_margin = 34,
- .lower_margin = 5,
- .hsync_len = 96,
- .vsync_len = 16,
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = TIM2_IHS | TIM2_IVS
- | (PIX_CLOCK_DIVIDER - 2),
- .cntl = CNTL_LCDTFT | CNTL_WATERMARK,
- .bpp = 16,
-};
-
-#endif
-
-#if defined CONFIG_FB_ARMCLCD_SHARP_LQ121S1DG41
-
- /* Logic Product Development LCD 12.1" SVGA -10 */
- /* Sharp PN LQ121S1DG41, was LQ121S1DG31 */
-
-/* Note that with a 99993900 Hz HCLK, it is not possible to hit the
- * target clock frequency range of 35MHz to 42MHz. */
-
-/* If the target pixel clock is substantially lower than the panel
- * spec, this is done to prevent the LCD display from glitching when
- * the CPU is under load. A pixel clock higher than 25MHz
- * (empirically determined) will compete with the CPU for bus cycles
- * for the Ethernet chip. However, even a pixel clock of 10MHz
- * competes with Compact Flash interface during some operations
- * (fdisk, e2fsck). And, at that speed the display may have a visible
- * flicker. */
-
-/* The full horizontal cycle (Th) is clock/832/1056/1395. */
-
-#define PIX_CLOCK_TARGET (20000000)
-#define PIX_CLOCK_DIVIDER CLOCK_TO_DIV (PIX_CLOCK_TARGET, HCLK)
-#define PIX_CLOCK (HCLK/PIX_CLOCK_DIVIDER)
-
-static struct clcd_panel lcd_panel = {
- .mode = {
- .name = "12.1in SVGA (LQ121S1DG41)",
- .xres = 800,
- .yres = 600,
- .pixclock = PIX_CLOCK,
- .left_margin = 89, // ns/5/-/(1/PIX_CLOCK)-10
- .right_margin = 1056-800-89-128,
- .upper_margin = 23, // line/23/23/23
- .lower_margin = 44,
- .hsync_len = 128, // clk/2/128/200
- .vsync_len = 4, // line/2/4/6
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = TIM2_IHS | TIM2_IVS
- | (PIX_CLOCK_DIVIDER - 2),
- .cntl = CNTL_LCDTFT | CNTL_WATERMARK,
- .bpp = 16,
-};
-
-#endif
-
-#if defined CONFIG_FB_ARMCLCD_HITACHI
-
- /* Hitachi*/
- /* Submitted by Michele Da Rold <michele.darold@ecsproject.com> */
-
-#define PIX_CLOCK_TARGET (49000000)
-#define PIX_CLOCK_DIVIDER CLOCK_TO_DIV (PIX_CLOCK_TARGET, HCLK)
-#define PIX_CLOCK (HCLK/PIX_CLOCK_DIVIDER)
-
-static struct clcd_panel lcd_panel = {
- .mode = {
- .name = "Hitachi 800x480",
- .xres = 800,
- .yres = 480,
- .pixclock = PIX_CLOCK,
- .left_margin = 88,
- .right_margin = 40,
- .upper_margin = 32,
- .lower_margin = 11,
- .hsync_len = 128,
- .vsync_len = 2,
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = TIM2_IPC | TIM2_IHS | TIM2_IVS
- | (PIX_CLOCK_DIVIDER - 2),
- .cntl = CNTL_LCDTFT | CNTL_WATERMARK,
- .bpp = 16,
-};
-
-#endif
-
-
-#if defined CONFIG_FB_ARMCLCD_AUO_A070VW01_WIDE
-
- /* AU Optotronics A070VW01 7.0 Wide Screen color Display*/
- /* Submitted by Michele Da Rold <michele.darold@ecsproject.com> */
-
-#define PIX_CLOCK_TARGET (10000000)
-#define PIX_CLOCK_DIVIDER CLOCK_TO_DIV (PIX_CLOCK_TARGET, HCLK)
-#define PIX_CLOCK (HCLK/PIX_CLOCK_DIVIDER)
-
-static struct clcd_panel lcd_panel = {
- .mode = {
- .name = "7.0in Wide (A070VW01)",
- .xres = 480,
- .yres = 234,
- .pixclock = PIX_CLOCK,
- .left_margin = 30,
- .right_margin = 25,
- .upper_margin = 14,
- .lower_margin = 12,
- .hsync_len = 100,
- .vsync_len = 1,
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = TIM2_IPC | TIM2_IHS | TIM2_IVS
- | (PIX_CLOCK_DIVIDER - 2),
- .cntl = CNTL_LCDTFT | CNTL_WATERMARK,
- .bpp = 16,
-};
-
-#endif
-
-#undef NS_TO_CLOCK
-#undef CLOCK_TO_DIV
-
-#endif /* __LCD_PANEL_H__ */
diff --git a/arch/arm/mach-lh7a40x/ssp-cpld.c b/arch/arm/mach-lh7a40x/ssp-cpld.c
deleted file mode 100644
index 2901d49d1484..000000000000
--- a/arch/arm/mach-lh7a40x/ssp-cpld.c
+++ /dev/null
@@ -1,343 +0,0 @@
-/* arch/arm/mach-lh7a40x/ssp-cpld.c
- *
- * Copyright (C) 2004,2005 Marc Singer
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * SSP/SPI driver for the CardEngine CPLD.
- *
- */
-
-/* NOTES
- -----
-
- o *** This driver is cribbed from the 7952x implementation.
- Some comments may not apply.
-
- o This driver contains sufficient logic to control either the
- serial EEPROMs or the audio codec. It is included in the kernel
- to support the codec. The EEPROMs are really the responsibility
- of the boot loader and should probably be left alone.
-
- o The code must be augmented to cope with multiple, simultaneous
- clients.
- o The audio codec writes to the codec chip whenever playback
- starts.
- o The touchscreen driver writes to the ads chip every time it
- samples.
- o The audio codec must write 16 bits, but the touch chip writes
- are 8 bits long.
- o We need to be able to keep these configurations separate while
- simultaneously active.
-
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-//#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-//#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/io.h>
-
-#include <asm/irq.h>
-#include <mach/hardware.h>
-
-#include <mach/ssp.h>
-
-//#define TALK
-
-#if defined (TALK)
-#define PRINTK(f...) printk (f)
-#else
-#define PRINTK(f...) do {} while (0)
-#endif
-
-#if defined (CONFIG_ARCH_LH7A400)
-# define CPLD_SPID __REGP16(CPLD06_VIRT) /* SPI data */
-# define CPLD_SPIC __REGP16(CPLD08_VIRT) /* SPI control */
-# define CPLD_SPIC_CS_CODEC (1<<0)
-# define CPLD_SPIC_CS_TOUCH (1<<1)
-# define CPLD_SPIC_WRITE (0<<2)
-# define CPLD_SPIC_READ (1<<2)
-# define CPLD_SPIC_DONE (1<<3) /* r/o */
-# define CPLD_SPIC_LOAD (1<<4)
-# define CPLD_SPIC_START (1<<4)
-# define CPLD_SPIC_LOADED (1<<5) /* r/o */
-#endif
-
-#define CPLD_SPI __REGP16(CPLD0A_VIRT) /* SPI operation */
-#define CPLD_SPI_CS_EEPROM (1<<3)
-#define CPLD_SPI_SCLK (1<<2)
-#define CPLD_SPI_TX_SHIFT (1)
-#define CPLD_SPI_TX (1<<CPLD_SPI_TX_SHIFT)
-#define CPLD_SPI_RX_SHIFT (0)
-#define CPLD_SPI_RX (1<<CPLD_SPI_RX_SHIFT)
-
-/* *** FIXME: these timing values are substantially larger than the
- *** chip requires. We may implement an nsleep () function. */
-#define T_SKH 1 /* Clock time high (us) */
-#define T_SKL 1 /* Clock time low (us) */
-#define T_CS 1 /* Minimum chip select low time (us) */
-#define T_CSS 1 /* Minimum chip select setup time (us) */
-#define T_DIS 1 /* Data setup time (us) */
-
- /* EEPROM SPI bits */
-#define P_START (1<<9)
-#define P_WRITE (1<<7)
-#define P_READ (2<<7)
-#define P_ERASE (3<<7)
-#define P_EWDS (0<<7)
-#define P_WRAL (0<<7)
-#define P_ERAL (0<<7)
-#define P_EWEN (0<<7)
-#define P_A_EWDS (0<<5)
-#define P_A_WRAL (1<<5)
-#define P_A_ERAL (2<<5)
-#define P_A_EWEN (3<<5)
-
-struct ssp_configuration {
- int device;
- int mode;
- int speed;
- int frame_size_write;
- int frame_size_read;
-};
-
-static struct ssp_configuration ssp_configuration;
-static spinlock_t ssp_lock;
-
-static void enable_cs (void)
-{
- switch (ssp_configuration.device) {
- case DEVICE_EEPROM:
- CPLD_SPI |= CPLD_SPI_CS_EEPROM;
- break;
- }
- udelay (T_CSS);
-}
-
-static void disable_cs (void)
-{
- switch (ssp_configuration.device) {
- case DEVICE_EEPROM:
- CPLD_SPI &= ~CPLD_SPI_CS_EEPROM;
- break;
- }
- udelay (T_CS);
-}
-
-static void pulse_clock (void)
-{
- CPLD_SPI |= CPLD_SPI_SCLK;
- udelay (T_SKH);
- CPLD_SPI &= ~CPLD_SPI_SCLK;
- udelay (T_SKL);
-}
-
-
-/* execute_spi_command
-
- sends an spi command to a device. It first sends cwrite bits from
- v. If cread is greater than zero it will read cread bits
- (discarding the leading 0 bit) and return them. If cread is less
- than zero it will check for completetion status and return 0 on
- success or -1 on timeout. If cread is zero it does nothing other
- than sending the command.
-
- On the LPD7A400, we can only read or write multiples of 8 bits on
- the codec and the touch screen device. Here, we round up.
-
-*/
-
-static int execute_spi_command (int v, int cwrite, int cread)
-{
- unsigned long l = 0;
-
-#if defined (CONFIG_MACH_LPD7A400)
- /* The codec and touch devices cannot be bit-banged. Instead,
- * the CPLD provides an eight-bit shift register and a crude
- * interface. */
- if ( ssp_configuration.device == DEVICE_CODEC
- || ssp_configuration.device == DEVICE_TOUCH) {
- int select = 0;
-
- PRINTK ("spi(%d %d.%d) 0x%04x",
- ssp_configuration.device, cwrite, cread,
- v);
-#if defined (TALK)
- if (ssp_configuration.device == DEVICE_CODEC)
- PRINTK (" 0x%03x -> %2d", v & 0x1ff, (v >> 9) & 0x7f);
-#endif
- PRINTK ("\n");
-
- if (ssp_configuration.device == DEVICE_CODEC)
- select = CPLD_SPIC_CS_CODEC;
- if (ssp_configuration.device == DEVICE_TOUCH)
- select = CPLD_SPIC_CS_TOUCH;
- if (cwrite) {
- for (cwrite = (cwrite + 7)/8; cwrite-- > 0; ) {
- CPLD_SPID = (v >> (8*cwrite)) & 0xff;
- CPLD_SPIC = select | CPLD_SPIC_LOAD;
- while (!(CPLD_SPIC & CPLD_SPIC_LOADED))
- ;
- CPLD_SPIC = select;
- while (!(CPLD_SPIC & CPLD_SPIC_DONE))
- ;
- }
- v = 0;
- }
- if (cread) {
- mdelay (2); /* *** FIXME: required by ads7843? */
- v = 0;
- for (cread = (cread + 7)/8; cread-- > 0;) {
- CPLD_SPID = 0;
- CPLD_SPIC = select | CPLD_SPIC_READ
- | CPLD_SPIC_START;
- while (!(CPLD_SPIC & CPLD_SPIC_LOADED))
- ;
- CPLD_SPIC = select | CPLD_SPIC_READ;
- while (!(CPLD_SPIC & CPLD_SPIC_DONE))
- ;
- v = (v << 8) | CPLD_SPID;
- }
- }
- return v;
- }
-#endif
-
- PRINTK ("spi(%d) 0x%04x -> 0x%x\r\n", ssp_configuration.device,
- v & 0x1ff, (v >> 9) & 0x7f);
-
- enable_cs ();
-
- v <<= CPLD_SPI_TX_SHIFT; /* Correction for position of SPI_TX bit */
- while (cwrite--) {
- CPLD_SPI
- = (CPLD_SPI & ~CPLD_SPI_TX)
- | ((v >> cwrite) & CPLD_SPI_TX);
- udelay (T_DIS);
- pulse_clock ();
- }
-
- if (cread < 0) {
- int delay = 10;
- disable_cs ();
- udelay (1);
- enable_cs ();
-
- l = -1;
- do {
- if (CPLD_SPI & CPLD_SPI_RX) {
- l = 0;
- break;
- }
- } while (udelay (1), --delay);
- }
- else
- /* We pulse the clock before the data to skip the leading zero. */
- while (cread-- > 0) {
- pulse_clock ();
- l = (l<<1)
- | (((CPLD_SPI & CPLD_SPI_RX)
- >> CPLD_SPI_RX_SHIFT) & 0x1);
- }
-
- disable_cs ();
- return l;
-}
-
-static int ssp_init (void)
-{
- spin_lock_init (&ssp_lock);
- memset (&ssp_configuration, 0, sizeof (ssp_configuration));
- return 0;
-}
-
-
-/* ssp_chip_select
-
- drops the chip select line for the CPLD shift-register controlled
- devices. It doesn't enable chip
-
-*/
-
-static void ssp_chip_select (int enable)
-{
-#if defined (CONFIG_MACH_LPD7A400)
- int select;
-
- if (ssp_configuration.device == DEVICE_CODEC)
- select = CPLD_SPIC_CS_CODEC;
- else if (ssp_configuration.device == DEVICE_TOUCH)
- select = CPLD_SPIC_CS_TOUCH;
- else
- return;
-
- if (enable)
- CPLD_SPIC = select;
- else
- CPLD_SPIC = 0;
-#endif
-}
-
-static void ssp_acquire (void)
-{
- spin_lock (&ssp_lock);
-}
-
-static void ssp_release (void)
-{
- ssp_chip_select (0); /* just in case */
- spin_unlock (&ssp_lock);
-}
-
-static int ssp_configure (int device, int mode, int speed,
- int frame_size_write, int frame_size_read)
-{
- ssp_configuration.device = device;
- ssp_configuration.mode = mode;
- ssp_configuration.speed = speed;
- ssp_configuration.frame_size_write = frame_size_write;
- ssp_configuration.frame_size_read = frame_size_read;
-
- return 0;
-}
-
-static int ssp_read (void)
-{
- return execute_spi_command (0, 0, ssp_configuration.frame_size_read);
-}
-
-static int ssp_write (u16 data)
-{
- execute_spi_command (data, ssp_configuration.frame_size_write, 0);
- return 0;
-}
-
-static int ssp_write_read (u16 data)
-{
- return execute_spi_command (data, ssp_configuration.frame_size_write,
- ssp_configuration.frame_size_read);
-}
-
-struct ssp_driver lh7a40x_cpld_ssp_driver = {
- .init = ssp_init,
- .acquire = ssp_acquire,
- .release = ssp_release,
- .configure = ssp_configure,
- .chip_select = ssp_chip_select,
- .read = ssp_read,
- .write = ssp_write,
- .write_read = ssp_write_read,
-};
-
-
-MODULE_AUTHOR("Marc Singer");
-MODULE_DESCRIPTION("LPD7A40X CPLD SPI driver");
-MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-lh7a40x/time.c b/arch/arm/mach-lh7a40x/time.c
deleted file mode 100644
index 4601e425bae3..000000000000
--- a/arch/arm/mach-lh7a40x/time.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * arch/arm/mach-lh7a40x/time.c
- *
- * Copyright (C) 2004 Logic Product Development
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/time.h>
-#include <linux/io.h>
-
-#include <mach/hardware.h>
-#include <asm/irq.h>
-#include <asm/leds.h>
-
-#include <asm/mach/time.h>
-#include "common.h"
-
-#if HZ < 100
-# define TIMER_CONTROL TIMER_CONTROL2
-# define TIMER_LOAD TIMER_LOAD2
-# define TIMER_CONSTANT (508469/HZ)
-# define TIMER_MODE (TIMER_C_ENABLE | TIMER_C_PERIODIC | TIMER_C_508KHZ)
-# define TIMER_EOI TIMER_EOI2
-# define TIMER_IRQ IRQ_T2UI
-#else
-# define TIMER_CONTROL TIMER_CONTROL3
-# define TIMER_LOAD TIMER_LOAD3
-# define TIMER_CONSTANT (3686400/HZ)
-# define TIMER_MODE (TIMER_C_ENABLE | TIMER_C_PERIODIC)
-# define TIMER_EOI TIMER_EOI3
-# define TIMER_IRQ IRQ_T3UI
-#endif
-
-static irqreturn_t
-lh7a40x_timer_interrupt(int irq, void *dev_id)
-{
- TIMER_EOI = 0;
- timer_tick();
-
- return IRQ_HANDLED;
-}
-
-static struct irqaction lh7a40x_timer_irq = {
- .name = "LHA740x Timer Tick",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
- .handler = lh7a40x_timer_interrupt,
-};
-
-static void __init lh7a40x_timer_init (void)
-{
- /* Stop/disable all timers */
- TIMER_CONTROL1 = 0;
- TIMER_CONTROL2 = 0;
- TIMER_CONTROL3 = 0;
-
- setup_irq (TIMER_IRQ, &lh7a40x_timer_irq);
-
- TIMER_LOAD = TIMER_CONSTANT;
- TIMER_CONTROL = TIMER_MODE;
-}
-
-struct sys_timer lh7a40x_timer = {
- .init = &lh7a40x_timer_init,
-};
diff --git a/arch/arm/mach-loki/include/mach/memory.h b/arch/arm/mach-loki/include/mach/memory.h
index 2ed7e6e732c2..66366657a875 100644
--- a/arch/arm/mach-loki/include/mach/memory.h
+++ b/arch/arm/mach-loki/include/mach/memory.h
@@ -5,6 +5,6 @@
#ifndef __ASM_ARCH_MEMORY_H
#define __ASM_ARCH_MEMORY_H
-#define PHYS_OFFSET UL(0x00000000)
+#define PLAT_PHYS_OFFSET UL(0x00000000)
#endif
diff --git a/arch/arm/mach-lpc32xx/include/mach/memory.h b/arch/arm/mach-lpc32xx/include/mach/memory.h
index 044e1acecbe6..a647dd624afa 100644
--- a/arch/arm/mach-lpc32xx/include/mach/memory.h
+++ b/arch/arm/mach-lpc32xx/include/mach/memory.h
@@ -22,6 +22,6 @@
/*
* Physical DRAM offset of bank 0
*/
-#define PHYS_OFFSET UL(0x80000000)
+#define PLAT_PHYS_OFFSET UL(0x80000000)
#endif
diff --git a/arch/arm/mach-mmp/include/mach/memory.h b/arch/arm/mach-mmp/include/mach/memory.h
index bdb21d70714c..d68b50a2d6a0 100644
--- a/arch/arm/mach-mmp/include/mach/memory.h
+++ b/arch/arm/mach-mmp/include/mach/memory.h
@@ -9,6 +9,6 @@
#ifndef __ASM_MACH_MEMORY_H
#define __ASM_MACH_MEMORY_H
-#define PHYS_OFFSET UL(0x00000000)
+#define PLAT_PHYS_OFFSET UL(0x00000000)
#endif /* __ASM_MACH_MEMORY_H */
diff --git a/arch/arm/mach-mmp/include/mach/mmp2.h b/arch/arm/mach-mmp/include/mach/mmp2.h
index 4aec493640b4..2cbf6df09b82 100644
--- a/arch/arm/mach-mmp/include/mach/mmp2.h
+++ b/arch/arm/mach-mmp/include/mach/mmp2.h
@@ -11,8 +11,8 @@ extern void __init mmp2_init_irq(void);
extern void mmp2_clear_pmic_int(void);
#include <linux/i2c.h>
+#include <linux/i2c/pxa-i2c.h>
#include <mach/devices.h>
-#include <plat/i2c.h>
extern struct pxa_device_desc mmp2_device_uart1;
extern struct pxa_device_desc mmp2_device_uart2;
diff --git a/arch/arm/mach-mmp/include/mach/pxa168.h b/arch/arm/mach-mmp/include/mach/pxa168.h
index 1801e4206232..a52b3d2f325c 100644
--- a/arch/arm/mach-mmp/include/mach/pxa168.h
+++ b/arch/arm/mach-mmp/include/mach/pxa168.h
@@ -8,8 +8,8 @@ extern void __init pxa168_init_irq(void);
extern void pxa168_clear_keypad_wakeup(void);
#include <linux/i2c.h>
+#include <linux/i2c/pxa-i2c.h>
#include <mach/devices.h>
-#include <plat/i2c.h>
#include <plat/pxa3xx_nand.h>
#include <video/pxa168fb.h>
#include <plat/pxa27x_keypad.h>
diff --git a/arch/arm/mach-mmp/include/mach/pxa910.h b/arch/arm/mach-mmp/include/mach/pxa910.h
index f13c49d6f8dc..91be75591398 100644
--- a/arch/arm/mach-mmp/include/mach/pxa910.h
+++ b/arch/arm/mach-mmp/include/mach/pxa910.h
@@ -7,8 +7,8 @@ extern struct sys_timer pxa910_timer;
extern void __init pxa910_init_irq(void);
#include <linux/i2c.h>
+#include <linux/i2c/pxa-i2c.h>
#include <mach/devices.h>
-#include <plat/i2c.h>
#include <plat/pxa3xx_nand.h>
extern struct pxa_device_desc pxa910_device_uart1;
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 5d3d9ade12fb..997c5bda8c18 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -45,7 +45,16 @@ config ARCH_MSM8X60
select CPU_V7
select MSM_V2_TLMM
select MSM_GPIOMUX
- select IOMMU_API
+ select MSM_SCM if SMP
+
+config ARCH_MSM8960
+ bool "MSM8960"
+ select ARCH_MSM_SCORPIONMP
+ select MACH_MSM8960_SIM if (!MACH_MSM8960_RUMI3)
+ select ARM_GIC
+ select CPU_V7
+ select MSM_V2_TLMM
+ select MSM_GPIOMUX
select MSM_SCM if SMP
endchoice
@@ -125,8 +134,32 @@ config MACH_MSM8X60_FFA
help
Support for the Qualcomm MSM8x60 FFA eval board.
+config MACH_MSM8960_SIM
+ depends on ARCH_MSM8960
+ bool "MSM8960 Simulator"
+ help
+ Support for the Qualcomm MSM8960 simulator.
+
+config MACH_MSM8960_RUMI3
+ depends on ARCH_MSM8960
+ bool "MSM8960 RUMI3"
+ help
+ Support for the Qualcomm MSM8960 RUMI3 emulator.
+
endmenu
+config MSM_IOMMU
+ bool "MSM IOMMU Support"
+ depends on ARCH_MSM8X60 || ARCH_MSM8960
+ select IOMMU_API
+ default n
+ help
+ Support for the IOMMUs found on certain Qualcomm SOCs.
+ These IOMMUs allow virtualization of the address space used by most
+ cores within the multimedia subsystem.
+
+ If unsure, say N here.
+
config IOMMU_PGTABLES_L2
def_bool y
depends on ARCH_MSM8X60 && MMU && SMP && CPU_DCACHE_DISABLE=n
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 94195c190e13..9519fd28a025 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -1,21 +1,16 @@
obj-y += io.o idle.o timer.o
-ifndef CONFIG_ARCH_MSM8X60
-obj-y += acpuclock-arm11.o
-obj-y += dma.o
-endif
+obj-y += clock.o
+obj-$(CONFIG_DEBUG_FS) += clock-debug.o
-ifdef CONFIG_MSM_VIC
-obj-y += irq-vic.o
-else
-ifndef CONFIG_ARCH_MSM8X60
-obj-y += irq.o
-endif
-endif
+obj-$(CONFIG_MSM_VIC) += irq-vic.o
+obj-$(CONFIG_MSM_IOMMU) += iommu.o iommu_dev.o devices-iommu.o
+
+obj-$(CONFIG_ARCH_MSM7X00A) += dma.o irq.o acpuclock-arm11.o
+obj-$(CONFIG_ARCH_MSM7X30) += dma.o
+obj-$(CONFIG_ARCH_QSD8X50) += dma.o sirc.o
-obj-$(CONFIG_ARCH_MSM8X60) += clock-dummy.o iommu.o iommu_dev.o devices-msm8x60-iommu.o
obj-$(CONFIG_MSM_PROC_COMM) += proc_comm.o clock-pcom.o vreg.o
-obj-$(CONFIG_MSM_PROC_COMM) += clock.o
-obj-$(CONFIG_ARCH_QSD8X50) += sirc.o
+
obj-$(CONFIG_MSM_SMD) += smd.o smd_debug.o
obj-$(CONFIG_MSM_SMD) += last_radio_log.o
obj-$(CONFIG_MSM_SCM) += scm.o scm-boot.o
@@ -29,12 +24,16 @@ obj-$(CONFIG_MACH_HALIBUT) += board-halibut.o devices-msm7x00.o
obj-$(CONFIG_ARCH_MSM7X30) += board-msm7x30.o devices-msm7x30.o
obj-$(CONFIG_ARCH_QSD8X50) += board-qsd8x50.o devices-qsd8x50.o
obj-$(CONFIG_ARCH_MSM8X60) += board-msm8x60.o
+obj-$(CONFIG_ARCH_MSM8960) += board-msm8960.o devices-msm8960.o
-obj-$(CONFIG_ARCH_MSM7X30) += gpiomux-7x30.o gpiomux-v1.o gpiomux.o
+obj-$(CONFIG_ARCH_MSM7X30) += gpiomux-v1.o gpiomux.o
obj-$(CONFIG_ARCH_QSD8X50) += gpiomux-8x50.o gpiomux-v1.o gpiomux.o
obj-$(CONFIG_ARCH_MSM8X60) += gpiomux-8x60.o gpiomux-v2.o gpiomux.o
ifdef CONFIG_MSM_V2_TLMM
+ifndef CONFIG_ARCH_MSM8960
+# TODO: TLMM Mapping issues need to be resolved
obj-y += gpio-v2.o
+endif
else
obj-y += gpio.o
endif
diff --git a/arch/arm/mach-msm/board-halibut.c b/arch/arm/mach-msm/board-halibut.c
index 75dabb16c802..18a3c97bc863 100644
--- a/arch/arm/mach-msm/board-halibut.c
+++ b/arch/arm/mach-msm/board-halibut.c
@@ -93,8 +93,6 @@ static void __init halibut_map_io(void)
}
MACHINE_START(HALIBUT, "Halibut Board (QCT SURF7200A)")
-#ifdef CONFIG_MSM_DEBUG_UART
-#endif
.boot_params = 0x10000100,
.fixup = halibut_fixup,
.map_io = halibut_map_io,
diff --git a/arch/arm/mach-msm/board-mahimahi.c b/arch/arm/mach-msm/board-mahimahi.c
index ef3ebf2f763b..7a9a03eb189c 100644
--- a/arch/arm/mach-msm/board-mahimahi.c
+++ b/arch/arm/mach-msm/board-mahimahi.c
@@ -74,8 +74,6 @@ static void __init mahimahi_map_io(void)
extern struct sys_timer msm_timer;
MACHINE_START(MAHIMAHI, "mahimahi")
-#ifdef CONFIG_MSM_DEBUG_UART
-#endif
.boot_params = 0x20000100,
.fixup = mahimahi_fixup,
.map_io = mahimahi_map_io,
diff --git a/arch/arm/mach-msm/board-msm7x27.c b/arch/arm/mach-msm/board-msm7x27.c
index e7a76eff57d9..c03f269e2e4b 100644
--- a/arch/arm/mach-msm/board-msm7x27.c
+++ b/arch/arm/mach-msm/board-msm7x27.c
@@ -130,9 +130,7 @@ static void __init msm7x2x_map_io(void)
}
MACHINE_START(MSM7X27_SURF, "QCT MSM7x27 SURF")
-#ifdef CONFIG_MSM_DEBUG_UART
-#endif
- .boot_params = PHYS_OFFSET + 0x100,
+ .boot_params = PLAT_PHYS_OFFSET + 0x100,
.map_io = msm7x2x_map_io,
.init_irq = msm7x2x_init_irq,
.init_machine = msm7x2x_init,
@@ -140,9 +138,7 @@ MACHINE_START(MSM7X27_SURF, "QCT MSM7x27 SURF")
MACHINE_END
MACHINE_START(MSM7X27_FFA, "QCT MSM7x27 FFA")
-#ifdef CONFIG_MSM_DEBUG_UART
-#endif
- .boot_params = PHYS_OFFSET + 0x100,
+ .boot_params = PLAT_PHYS_OFFSET + 0x100,
.map_io = msm7x2x_map_io,
.init_irq = msm7x2x_init_irq,
.init_machine = msm7x2x_init,
@@ -150,9 +146,7 @@ MACHINE_START(MSM7X27_FFA, "QCT MSM7x27 FFA")
MACHINE_END
MACHINE_START(MSM7X25_SURF, "QCT MSM7x25 SURF")
-#ifdef CONFIG_MSM_DEBUG_UART
-#endif
- .boot_params = PHYS_OFFSET + 0x100,
+ .boot_params = PLAT_PHYS_OFFSET + 0x100,
.map_io = msm7x2x_map_io,
.init_irq = msm7x2x_init_irq,
.init_machine = msm7x2x_init,
@@ -160,9 +154,7 @@ MACHINE_START(MSM7X25_SURF, "QCT MSM7x25 SURF")
MACHINE_END
MACHINE_START(MSM7X25_FFA, "QCT MSM7x25 FFA")
-#ifdef CONFIG_MSM_DEBUG_UART
-#endif
- .boot_params = PHYS_OFFSET + 0x100,
+ .boot_params = PLAT_PHYS_OFFSET + 0x100,
.map_io = msm7x2x_map_io,
.init_irq = msm7x2x_init_irq,
.init_machine = msm7x2x_init,
diff --git a/arch/arm/mach-msm/board-msm7x30.c b/arch/arm/mach-msm/board-msm7x30.c
index 6f3b9735e970..b7a84966b711 100644
--- a/arch/arm/mach-msm/board-msm7x30.c
+++ b/arch/arm/mach-msm/board-msm7x30.c
@@ -23,19 +23,21 @@
#include <linux/io.h>
#include <linux/smsc911x.h>
#include <linux/usb/msm_hsusb.h>
+#include <linux/clkdev.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
+#include <asm/memory.h>
#include <asm/setup.h>
#include <mach/gpio.h>
#include <mach/board.h>
-#include <mach/memory.h>
#include <mach/msm_iomap.h>
#include <mach/dma.h>
#include <mach/vreg.h>
#include "devices.h"
+#include "gpiomux.h"
#include "proc_comm.h"
extern struct sys_timer msm_timer;
@@ -52,6 +54,27 @@ static struct msm_otg_platform_data msm_otg_pdata = {
.otg_control = OTG_PHY_CONTROL,
};
+struct msm_gpiomux_config msm_gpiomux_configs[GPIOMUX_NGPIOS] = {
+#ifdef CONFIG_SERIAL_MSM_CONSOLE
+ [49] = { /* UART2 RFR */
+ .suspended = GPIOMUX_DRV_2MA | GPIOMUX_PULL_DOWN |
+ GPIOMUX_FUNC_2 | GPIOMUX_VALID,
+ },
+ [50] = { /* UART2 CTS */
+ .suspended = GPIOMUX_DRV_2MA | GPIOMUX_PULL_DOWN |
+ GPIOMUX_FUNC_2 | GPIOMUX_VALID,
+ },
+ [51] = { /* UART2 RX */
+ .suspended = GPIOMUX_DRV_2MA | GPIOMUX_PULL_DOWN |
+ GPIOMUX_FUNC_2 | GPIOMUX_VALID,
+ },
+ [52] = { /* UART2 TX */
+ .suspended = GPIOMUX_DRV_2MA | GPIOMUX_PULL_DOWN |
+ GPIOMUX_FUNC_2 | GPIOMUX_VALID,
+ },
+#endif
+};
+
static struct platform_device *devices[] __initdata = {
#if defined(CONFIG_SERIAL_MSM) || defined(CONFIG_MSM_SERIAL_DEBUGGER)
&msm_device_uart2,
@@ -83,9 +106,7 @@ static void __init msm7x30_map_io(void)
}
MACHINE_START(MSM7X30_SURF, "QCT MSM7X30 SURF")
-#ifdef CONFIG_MSM_DEBUG_UART
-#endif
- .boot_params = PHYS_OFFSET + 0x100,
+ .boot_params = PLAT_PHYS_OFFSET + 0x100,
.map_io = msm7x30_map_io,
.init_irq = msm7x30_init_irq,
.init_machine = msm7x30_init,
@@ -93,9 +114,7 @@ MACHINE_START(MSM7X30_SURF, "QCT MSM7X30 SURF")
MACHINE_END
MACHINE_START(MSM7X30_FFA, "QCT MSM7X30 FFA")
-#ifdef CONFIG_MSM_DEBUG_UART
-#endif
- .boot_params = PHYS_OFFSET + 0x100,
+ .boot_params = PLAT_PHYS_OFFSET + 0x100,
.map_io = msm7x30_map_io,
.init_irq = msm7x30_init_irq,
.init_machine = msm7x30_init,
@@ -103,9 +122,7 @@ MACHINE_START(MSM7X30_FFA, "QCT MSM7X30 FFA")
MACHINE_END
MACHINE_START(MSM7X30_FLUID, "QCT MSM7X30 FLUID")
-#ifdef CONFIG_MSM_DEBUG_UART
-#endif
- .boot_params = PHYS_OFFSET + 0x100,
+ .boot_params = PLAT_PHYS_OFFSET + 0x100,
.map_io = msm7x30_map_io,
.init_irq = msm7x30_init_irq,
.init_machine = msm7x30_init,
diff --git a/arch/arm/mach-msm/board-msm8960.c b/arch/arm/mach-msm/board-msm8960.c
new file mode 100644
index 000000000000..1993721d472e
--- /dev/null
+++ b/arch/arm/mach-msm/board-msm8960.c
@@ -0,0 +1,91 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/clkdev.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/hardware/gic.h>
+
+#include <mach/board.h>
+#include <mach/msm_iomap.h>
+
+#include "devices.h"
+
+static void __init msm8960_map_io(void)
+{
+ msm_map_msm8960_io();
+}
+
+static void __init msm8960_init_irq(void)
+{
+ unsigned int i;
+ gic_init(0, GIC_PPI_START, MSM_QGIC_DIST_BASE,
+ (void *)MSM_QGIC_CPU_BASE);
+
+ /* Edge trigger PPIs except AVS_SVICINT and AVS_SVICINTSWDONE */
+ writel(0xFFFFD7FF, MSM_QGIC_DIST_BASE + GIC_DIST_CONFIG + 4);
+
+ if (machine_is_msm8960_rumi3())
+ writel(0x0000FFFF, MSM_QGIC_DIST_BASE + GIC_DIST_ENABLE_SET);
+
+ /* FIXME: Not installing AVS_SVICINT and AVS_SVICINTSWDONE yet
+ * as they are configured as level, which does not play nice with
+ * handle_percpu_irq.
+ */
+ for (i = GIC_PPI_START; i < GIC_SPI_START; i++) {
+ if (i != AVS_SVICINT && i != AVS_SVICINTSWDONE)
+ set_irq_handler(i, handle_percpu_irq);
+ }
+}
+
+static struct platform_device *sim_devices[] __initdata = {
+ &msm8960_device_uart_gsbi2,
+};
+
+static struct platform_device *rumi3_devices[] __initdata = {
+ &msm8960_device_uart_gsbi5,
+};
+
+static void __init msm8960_sim_init(void)
+{
+ platform_add_devices(sim_devices, ARRAY_SIZE(sim_devices));
+}
+
+static void __init msm8960_rumi3_init(void)
+{
+ platform_add_devices(rumi3_devices, ARRAY_SIZE(rumi3_devices));
+}
+
+MACHINE_START(MSM8960_SIM, "QCT MSM8960 SIMULATOR")
+ .map_io = msm8960_map_io,
+ .init_irq = msm8960_init_irq,
+ .timer = &msm_timer,
+ .init_machine = msm8960_sim_init,
+MACHINE_END
+
+MACHINE_START(MSM8960_RUMI3, "QCT MSM8960 RUMI3")
+ .map_io = msm8960_map_io,
+ .init_irq = msm8960_init_irq,
+ .timer = &msm_timer,
+ .init_machine = msm8960_rumi3_init,
+MACHINE_END
+
diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c
index 9b5eb2b4ae1b..b3c55f138fce 100644
--- a/arch/arm/mach-msm/board-msm8x60.c
+++ b/arch/arm/mach-msm/board-msm8x60.c
@@ -28,10 +28,6 @@
#include <mach/board.h>
#include <mach/msm_iomap.h>
-unsigned long clk_get_max_axi_khz(void)
-{
- return 0;
-}
static void __init msm8x60_map_io(void)
{
diff --git a/arch/arm/mach-msm/board-qsd8x50.c b/arch/arm/mach-msm/board-qsd8x50.c
index 6dde8185205f..7f568611547e 100644
--- a/arch/arm/mach-msm/board-qsd8x50.c
+++ b/arch/arm/mach-msm/board-qsd8x50.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -21,6 +21,8 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/usb/msm_hsusb.h>
+#include <linux/err.h>
+#include <linux/clkdev.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -31,6 +33,8 @@
#include <mach/irqs.h>
#include <mach/sirc.h>
#include <mach/gpio.h>
+#include <mach/vreg.h>
+#include <mach/mmc.h>
#include "devices.h"
@@ -95,6 +99,81 @@ static struct platform_device *devices[] __initdata = {
&msm_device_hsusb_host,
};
+static struct msm_mmc_gpio sdc1_gpio_cfg[] = {
+ {51, "sdc1_dat_3"},
+ {52, "sdc1_dat_2"},
+ {53, "sdc1_dat_1"},
+ {54, "sdc1_dat_0"},
+ {55, "sdc1_cmd"},
+ {56, "sdc1_clk"}
+};
+
+static struct vreg *vreg_mmc;
+static unsigned long vreg_sts;
+
+static uint32_t msm_sdcc_setup_power(struct device *dv, unsigned int vdd)
+{
+ int rc = 0;
+ struct platform_device *pdev;
+
+ pdev = container_of(dv, struct platform_device, dev);
+
+ if (vdd == 0) {
+ if (!vreg_sts)
+ return 0;
+
+ clear_bit(pdev->id, &vreg_sts);
+
+ if (!vreg_sts) {
+ rc = vreg_disable(vreg_mmc);
+ if (rc)
+ pr_err("vreg_mmc disable failed for slot "
+ "%d: %d\n", pdev->id, rc);
+ }
+ return 0;
+ }
+
+ if (!vreg_sts) {
+ rc = vreg_set_level(vreg_mmc, 2900);
+ if (rc)
+ pr_err("vreg_mmc set level failed for slot %d: %d\n",
+ pdev->id, rc);
+ rc = vreg_enable(vreg_mmc);
+ if (rc)
+ pr_err("vreg_mmc enable failed for slot %d: %d\n",
+ pdev->id, rc);
+ }
+ set_bit(pdev->id, &vreg_sts);
+ return 0;
+}
+
+static struct msm_mmc_gpio_data sdc1_gpio = {
+ .gpio = sdc1_gpio_cfg,
+ .size = ARRAY_SIZE(sdc1_gpio_cfg),
+};
+
+static struct msm_mmc_platform_data qsd8x50_sdc1_data = {
+ .ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29,
+ .translate_vdd = msm_sdcc_setup_power,
+ .gpio_data = &sdc1_gpio,
+};
+
+static void __init qsd8x50_init_mmc(void)
+{
+ if (machine_is_qsd8x50_ffa() || machine_is_qsd8x50a_ffa())
+ vreg_mmc = vreg_get(NULL, "gp6");
+ else
+ vreg_mmc = vreg_get(NULL, "gp5");
+
+ if (IS_ERR(vreg_mmc)) {
+ pr_err("vreg get for vreg_mmc failed (%ld)\n",
+ PTR_ERR(vreg_mmc));
+ return;
+ }
+
+ msm_add_sdcc(1, &qsd8x50_sdc1_data, 0, 0);
+}
+
static void __init qsd8x50_map_io(void)
{
msm_map_qsd8x50_io();
@@ -113,12 +192,11 @@ static void __init qsd8x50_init(void)
msm_device_hsusb.dev.parent = &msm_device_otg.dev;
msm_device_hsusb_host.dev.parent = &msm_device_otg.dev;
platform_add_devices(devices, ARRAY_SIZE(devices));
+ qsd8x50_init_mmc();
}
MACHINE_START(QSD8X50_SURF, "QCT QSD8X50 SURF")
-#ifdef CONFIG_MSM_DEBUG_UART
-#endif
- .boot_params = PHYS_OFFSET + 0x100,
+ .boot_params = PLAT_PHYS_OFFSET + 0x100,
.map_io = qsd8x50_map_io,
.init_irq = qsd8x50_init_irq,
.init_machine = qsd8x50_init,
@@ -126,9 +204,7 @@ MACHINE_START(QSD8X50_SURF, "QCT QSD8X50 SURF")
MACHINE_END
MACHINE_START(QSD8X50A_ST1_5, "QCT QSD8X50A ST1.5")
-#ifdef CONFIG_MSM_DEBUG_UART
-#endif
- .boot_params = PHYS_OFFSET + 0x100,
+ .boot_params = PLAT_PHYS_OFFSET + 0x100,
.map_io = qsd8x50_map_io,
.init_irq = qsd8x50_init_irq,
.init_machine = qsd8x50_init,
diff --git a/arch/arm/mach-msm/board-sapphire.c b/arch/arm/mach-msm/board-sapphire.c
index 8919ffb17196..68f930f07d77 100644
--- a/arch/arm/mach-msm/board-sapphire.c
+++ b/arch/arm/mach-msm/board-sapphire.c
@@ -105,9 +105,7 @@ static void __init sapphire_map_io(void)
MACHINE_START(SAPPHIRE, "sapphire")
/* Maintainer: Brian Swetland <swetland@google.com> */
-#ifdef CONFIG_MSM_DEBUG_UART
-#endif
- .boot_params = PHYS_OFFSET + 0x100,
+ .boot_params = PLAT_PHYS_OFFSET + 0x100,
.fixup = sapphire_fixup,
.map_io = sapphire_map_io,
.init_irq = sapphire_init_irq,
diff --git a/arch/arm/mach-msm/board-trout-gpio.c b/arch/arm/mach-msm/board-trout-gpio.c
index a604ec1e44bf..31117a4499c4 100644
--- a/arch/arm/mach-msm/board-trout-gpio.c
+++ b/arch/arm/mach-msm/board-trout-gpio.c
@@ -74,8 +74,6 @@ static int msm_gpiolib_direction_output(struct gpio_chip *chip,
static int trout_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
- struct msm_gpio_chip *msm_gpio = to_msm_gpio_chip(chip);
-
return TROUT_GPIO_TO_INT(offset + chip->base);
}
diff --git a/arch/arm/mach-msm/board-trout.c b/arch/arm/mach-msm/board-trout.c
index 73f146066542..814386772c66 100644
--- a/arch/arm/mach-msm/board-trout.c
+++ b/arch/arm/mach-msm/board-trout.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
+#include <linux/clkdev.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -92,8 +93,6 @@ static void __init trout_map_io(void)
}
MACHINE_START(TROUT, "HTC Dream")
-#ifdef CONFIG_MSM_DEBUG_UART
-#endif
.boot_params = 0x10000100,
.fixup = trout_fixup,
.map_io = trout_map_io,
diff --git a/arch/arm/mach-msm/clock-7x30.h b/arch/arm/mach-msm/clock-7x30.h
index e16f72f32829..14104453688b 100644
--- a/arch/arm/mach-msm/clock-7x30.h
+++ b/arch/arm/mach-msm/clock-7x30.h
@@ -1,30 +1,13 @@
/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of Code Aurora Forum, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
- * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
*
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
#ifndef __ARCH_ARM_MACH_MSM_CLOCK_7X30_H
@@ -147,22 +130,26 @@ void pll_disable(uint32_t pll);
extern int internal_pwr_rail_ctl_auto(unsigned rail_id, bool enable);
#define CLK_7X30(clk_name, clk_id, clk_dev, clk_flags) { \
- .name = clk_name, \
- .id = L_7X30_##clk_id, \
- .remote_id = P_##clk_id, \
- .flags = clk_flags, \
- .dev = clk_dev, \
- .dbg_name = #clk_id, \
+ .con_id = clk_name, \
+ .dev_id = clk_dev, \
+ .clk = &(struct clk){ \
+ .id = L_7X30_##clk_id, \
+ .remote_id = P_##clk_id, \
+ .flags = clk_flags, \
+ .dbg_name = #clk_id, \
+ }, \
}
#define CLK_7X30S(clk_name, l_id, r_id, clk_dev, clk_flags) { \
- .name = clk_name, \
- .id = L_7X30_##l_id, \
- .remote_id = P_##r_id, \
- .flags = clk_flags, \
- .dev = clk_dev, \
- .dbg_name = #l_id, \
+ .con_id = clk_name, \
+ .dev_id = clk_dev, \
+ .clk = &(struct clk){ \
+ .id = L_7X30_##l_id, \
+ .remote_id = P_##r_id, \
+ .flags = clk_flags, \
+ .dbg_name = #l_id, \
+ .ops = &clk_ops_pcom, \
+ }, \
}
#endif
-
diff --git a/arch/arm/mach-msm/clock-debug.c b/arch/arm/mach-msm/clock-debug.c
new file mode 100644
index 000000000000..4886404d42f5
--- /dev/null
+++ b/arch/arm/mach-msm/clock-debug.c
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/clk.h>
+#include "clock.h"
+
+static int clock_debug_rate_set(void *data, u64 val)
+{
+ struct clk *clock = data;
+ int ret;
+
+ /* Only increases to max rate will succeed, but that's actually good
+ * for debugging purposes so we don't check for error. */
+ if (clock->flags & CLK_MAX)
+ clk_set_max_rate(clock, val);
+ if (clock->flags & CLK_MIN)
+ ret = clk_set_min_rate(clock, val);
+ else
+ ret = clk_set_rate(clock, val);
+ if (ret != 0)
+ printk(KERN_ERR "clk_set%s_rate failed (%d)\n",
+ (clock->flags & CLK_MIN) ? "_min" : "", ret);
+ return ret;
+}
+
+static int clock_debug_rate_get(void *data, u64 *val)
+{
+ struct clk *clock = data;
+ *val = clk_get_rate(clock);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clock_rate_fops, clock_debug_rate_get,
+ clock_debug_rate_set, "%llu\n");
+
+static int clock_debug_enable_set(void *data, u64 val)
+{
+ struct clk *clock = data;
+ int rc = 0;
+
+ if (val)
+ rc = clock->ops->enable(clock->id);
+ else
+ clock->ops->disable(clock->id);
+
+ return rc;
+}
+
+static int clock_debug_enable_get(void *data, u64 *val)
+{
+ struct clk *clock = data;
+
+ *val = clock->ops->is_enabled(clock->id);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clock_enable_fops, clock_debug_enable_get,
+ clock_debug_enable_set, "%llu\n");
+
+static int clock_debug_local_get(void *data, u64 *val)
+{
+ struct clk *clock = data;
+
+ *val = clock->ops->is_local(clock->id);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clock_local_fops, clock_debug_local_get,
+ NULL, "%llu\n");
+
+static struct dentry *debugfs_base;
+
+int __init clock_debug_init(void)
+{
+ debugfs_base = debugfs_create_dir("clk", NULL);
+ if (!debugfs_base)
+ return -ENOMEM;
+ return 0;
+}
+
+int __init clock_debug_add(struct clk *clock)
+{
+ char temp[50], *ptr;
+ struct dentry *clk_dir;
+
+ if (!debugfs_base)
+ return -ENOMEM;
+
+ strncpy(temp, clock->dbg_name, ARRAY_SIZE(temp)-1);
+ for (ptr = temp; *ptr; ptr++)
+ *ptr = tolower(*ptr);
+
+ clk_dir = debugfs_create_dir(temp, debugfs_base);
+ if (!clk_dir)
+ return -ENOMEM;
+
+ if (!debugfs_create_file("rate", S_IRUGO | S_IWUSR, clk_dir,
+ clock, &clock_rate_fops))
+ goto error;
+
+ if (!debugfs_create_file("enable", S_IRUGO | S_IWUSR, clk_dir,
+ clock, &clock_enable_fops))
+ goto error;
+
+ if (!debugfs_create_file("is_local", S_IRUGO, clk_dir, clock,
+ &clock_local_fops))
+ goto error;
+ return 0;
+error:
+ debugfs_remove_recursive(clk_dir);
+ return -ENOMEM;
+}
diff --git a/arch/arm/mach-msm/clock-dummy.c b/arch/arm/mach-msm/clock-dummy.c
deleted file mode 100644
index 1250d22082ee..000000000000
--- a/arch/arm/mach-msm/clock-dummy.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/module.h>
-
-struct clk *clk_get(struct device *dev, const char *id)
-{
- return ERR_PTR(-ENOENT);
-}
-EXPORT_SYMBOL(clk_get);
-
-int clk_enable(struct clk *clk)
-{
- return -ENOENT;
-}
-EXPORT_SYMBOL(clk_enable);
-
-void clk_disable(struct clk *clk)
-{
-}
-EXPORT_SYMBOL(clk_disable);
-
-unsigned long clk_get_rate(struct clk *clk)
-{
- return 0;
-}
-EXPORT_SYMBOL(clk_get_rate);
-
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
- return -ENOENT;
-}
-EXPORT_SYMBOL(clk_set_rate);
-
-void clk_put(struct clk *clk)
-{
-}
-EXPORT_SYMBOL(clk_put);
diff --git a/arch/arm/mach-msm/clock-pcom.c b/arch/arm/mach-msm/clock-pcom.c
index a3b45627eb4a..63b711311086 100644
--- a/arch/arm/mach-msm/clock-pcom.c
+++ b/arch/arm/mach-msm/clock-pcom.c
@@ -20,6 +20,7 @@
#include "proc_comm.h"
#include "clock.h"
+#include "clock-pcom.h"
/*
* glue for the proc_comm interface
@@ -116,6 +117,11 @@ long pc_clk_round_rate(unsigned id, unsigned rate)
return rate;
}
+static bool pc_clk_is_local(unsigned id)
+{
+ return false;
+}
+
struct clk_ops clk_ops_pcom = {
.enable = pc_clk_enable,
.disable = pc_clk_disable,
@@ -128,4 +134,5 @@ struct clk_ops clk_ops_pcom = {
.get_rate = pc_clk_get_rate,
.is_enabled = pc_clk_is_enabled,
.round_rate = pc_clk_round_rate,
+ .is_local = pc_clk_is_local,
};
diff --git a/arch/arm/mach-msm/clock-pcom.h b/arch/arm/mach-msm/clock-pcom.h
index 17d027b23501..974d0032f3a3 100644
--- a/arch/arm/mach-msm/clock-pcom.h
+++ b/arch/arm/mach-msm/clock-pcom.h
@@ -1,30 +1,13 @@
/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of Code Aurora Forum, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
- * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
*
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
#ifndef __ARCH_ARM_MACH_MSM_CLOCK_PCOM_H
@@ -132,8 +115,10 @@
#define P_CSI1_P_CLK 97
#define P_GSBI_CLK 98
#define P_GSBI_P_CLK 99
+#define P_CE_CLK 100 /* Crypto engine */
+#define P_CODEC_SSBI_CLK 101
-#define P_NR_CLKS 100
+#define P_NR_CLKS 102
struct clk_ops;
extern struct clk_ops clk_ops_pcom;
@@ -141,13 +126,15 @@ extern struct clk_ops clk_ops_pcom;
int pc_clk_reset(unsigned id, enum clk_reset_action action);
#define CLK_PCOM(clk_name, clk_id, clk_dev, clk_flags) { \
- .name = clk_name, \
- .id = P_##clk_id, \
- .remote_id = P_##clk_id, \
- .ops = &clk_ops_pcom, \
- .flags = clk_flags, \
- .dev = clk_dev, \
- .dbg_name = #clk_id, \
+ .con_id = clk_name, \
+ .dev_id = clk_dev, \
+ .clk = &(struct clk){ \
+ .id = P_##clk_id, \
+ .remote_id = P_##clk_id, \
+ .ops = &clk_ops_pcom, \
+ .flags = clk_flags, \
+ .dbg_name = #clk_id, \
+ }, \
}
#endif
diff --git a/arch/arm/mach-msm/clock.c b/arch/arm/mach-msm/clock.c
index 2069bfaa3a26..22a537669624 100644
--- a/arch/arm/mach-msm/clock.c
+++ b/arch/arm/mach-msm/clock.c
@@ -15,74 +15,32 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
#include <linux/list.h>
#include <linux/err.h>
-#include <linux/clk.h>
#include <linux/spinlock.h>
-#include <linux/debugfs.h>
-#include <linux/ctype.h>
#include <linux/pm_qos_params.h>
-#include <mach/clk.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/clkdev.h>
#include "clock.h"
-#include "proc_comm.h"
-#include "clock-7x30.h"
static DEFINE_MUTEX(clocks_mutex);
static DEFINE_SPINLOCK(clocks_lock);
static LIST_HEAD(clocks);
-struct clk *msm_clocks;
-unsigned msm_num_clocks;
-
-/*
- * Bitmap of enabled clocks, excluding ACPU which is always
- * enabled
- */
-static DECLARE_BITMAP(clock_map_enabled, NR_CLKS);
-static DEFINE_SPINLOCK(clock_map_lock);
/*
* Standard clock functions defined in include/linux/clk.h
*/
-struct clk *clk_get(struct device *dev, const char *id)
-{
- struct clk *clk;
-
- mutex_lock(&clocks_mutex);
-
- list_for_each_entry(clk, &clocks, list)
- if (!strcmp(id, clk->name) && clk->dev == dev)
- goto found_it;
-
- list_for_each_entry(clk, &clocks, list)
- if (!strcmp(id, clk->name) && clk->dev == NULL)
- goto found_it;
-
- clk = ERR_PTR(-ENOENT);
-found_it:
- mutex_unlock(&clocks_mutex);
- return clk;
-}
-EXPORT_SYMBOL(clk_get);
-
-void clk_put(struct clk *clk)
-{
-}
-EXPORT_SYMBOL(clk_put);
-
int clk_enable(struct clk *clk)
{
unsigned long flags;
spin_lock_irqsave(&clocks_lock, flags);
clk->count++;
- if (clk->count == 1) {
+ if (clk->count == 1)
clk->ops->enable(clk->id);
- spin_lock(&clock_map_lock);
- clock_map_enabled[BIT_WORD(clk->id)] |= BIT_MASK(clk->id);
- spin_unlock(&clock_map_lock);
- }
spin_unlock_irqrestore(&clocks_lock, flags);
return 0;
}
@@ -94,20 +52,14 @@ void clk_disable(struct clk *clk)
spin_lock_irqsave(&clocks_lock, flags);
BUG_ON(clk->count == 0);
clk->count--;
- if (clk->count == 0) {
+ if (clk->count == 0)
clk->ops->disable(clk->id);
- spin_lock(&clock_map_lock);
- clock_map_enabled[BIT_WORD(clk->id)] &= ~BIT_MASK(clk->id);
- spin_unlock(&clock_map_lock);
- }
spin_unlock_irqrestore(&clocks_lock, flags);
}
EXPORT_SYMBOL(clk_disable);
int clk_reset(struct clk *clk, enum clk_reset_action action)
{
- if (!clk->ops->reset)
- clk->ops->reset = &pc_clk_reset;
return clk->ops->reset(clk->remote_id, action);
}
EXPORT_SYMBOL(clk_reset);
@@ -184,25 +136,14 @@ EXPORT_SYMBOL(clk_set_flags);
*/
static struct clk *ebi1_clk;
-static void __init set_clock_ops(struct clk *clk)
-{
- if (!clk->ops) {
- clk->ops = &clk_ops_pcom;
- clk->id = clk->remote_id;
- }
-}
-
-void __init msm_clock_init(struct clk *clock_tbl, unsigned num_clocks)
+void __init msm_clock_init(struct clk_lookup *clock_tbl, unsigned num_clocks)
{
unsigned n;
- spin_lock_init(&clocks_lock);
mutex_lock(&clocks_mutex);
- msm_clocks = clock_tbl;
- msm_num_clocks = num_clocks;
- for (n = 0; n < msm_num_clocks; n++) {
- set_clock_ops(&msm_clocks[n]);
- list_add_tail(&msm_clocks[n].list, &clocks);
+ for (n = 0; n < num_clocks; n++) {
+ clkdev_add(&clock_tbl[n]);
+ list_add_tail(&clock_tbl[n].clk->list, &clocks);
}
mutex_unlock(&clocks_mutex);
@@ -211,115 +152,6 @@ void __init msm_clock_init(struct clk *clock_tbl, unsigned num_clocks)
}
-#if defined(CONFIG_DEBUG_FS)
-static struct clk *msm_clock_get_nth(unsigned index)
-{
- if (index < msm_num_clocks)
- return msm_clocks + index;
- else
- return 0;
-}
-
-static int clock_debug_rate_set(void *data, u64 val)
-{
- struct clk *clock = data;
- int ret;
-
- /* Only increases to max rate will succeed, but that's actually good
- * for debugging purposes. So we don't check for error. */
- if (clock->flags & CLK_MAX)
- clk_set_max_rate(clock, val);
- if (clock->flags & CLK_MIN)
- ret = clk_set_min_rate(clock, val);
- else
- ret = clk_set_rate(clock, val);
- if (ret != 0)
- printk(KERN_ERR "clk_set%s_rate failed (%d)\n",
- (clock->flags & CLK_MIN) ? "_min" : "", ret);
- return ret;
-}
-
-static int clock_debug_rate_get(void *data, u64 *val)
-{
- struct clk *clock = data;
- *val = clk_get_rate(clock);
- return 0;
-}
-
-static int clock_debug_enable_set(void *data, u64 val)
-{
- struct clk *clock = data;
- int rc = 0;
-
- if (val)
- rc = clock->ops->enable(clock->id);
- else
- clock->ops->disable(clock->id);
-
- return rc;
-}
-
-static int clock_debug_enable_get(void *data, u64 *val)
-{
- struct clk *clock = data;
-
- *val = clock->ops->is_enabled(clock->id);
-
- return 0;
-}
-
-static int clock_debug_local_get(void *data, u64 *val)
-{
- struct clk *clock = data;
-
- *val = clock->ops != &clk_ops_pcom;
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(clock_rate_fops, clock_debug_rate_get,
- clock_debug_rate_set, "%llu\n");
-DEFINE_SIMPLE_ATTRIBUTE(clock_enable_fops, clock_debug_enable_get,
- clock_debug_enable_set, "%llu\n");
-DEFINE_SIMPLE_ATTRIBUTE(clock_local_fops, clock_debug_local_get,
- NULL, "%llu\n");
-
-static int __init clock_debug_init(void)
-{
- struct dentry *dent_rate, *dent_enable, *dent_local;
- struct clk *clock;
- unsigned n = 0;
- char temp[50], *ptr;
-
- dent_rate = debugfs_create_dir("clk_rate", 0);
- if (IS_ERR(dent_rate))
- return PTR_ERR(dent_rate);
-
- dent_enable = debugfs_create_dir("clk_enable", 0);
- if (IS_ERR(dent_enable))
- return PTR_ERR(dent_enable);
-
- dent_local = debugfs_create_dir("clk_local", NULL);
- if (IS_ERR(dent_local))
- return PTR_ERR(dent_local);
-
- while ((clock = msm_clock_get_nth(n++)) != 0) {
- strncpy(temp, clock->dbg_name, ARRAY_SIZE(temp)-1);
- for (ptr = temp; *ptr; ptr++)
- *ptr = tolower(*ptr);
- debugfs_create_file(temp, 0644, dent_rate,
- clock, &clock_rate_fops);
- debugfs_create_file(temp, 0644, dent_enable,
- clock, &clock_enable_fops);
- debugfs_create_file(temp, S_IRUGO, dent_local,
- clock, &clock_local_fops);
- }
- return 0;
-}
-
-device_initcall(clock_debug_init);
-#endif
-
/* The bootloader and/or AMSS may have left various clocks enabled.
* Disable any clocks that belong to us (CLKFLAG_AUTO_OFF) but have
* not been explicitly enabled by a clk_enable() call.
@@ -330,8 +162,10 @@ static int __init clock_late_init(void)
struct clk *clk;
unsigned count = 0;
+ clock_debug_init();
mutex_lock(&clocks_mutex);
list_for_each_entry(clk, &clocks, list) {
+ clock_debug_add(clk);
if (clk->flags & CLKFLAG_AUTO_OFF) {
spin_lock_irqsave(&clocks_lock, flags);
if (!clk->count) {
diff --git a/arch/arm/mach-msm/clock.h b/arch/arm/mach-msm/clock.h
index c270b552ed13..2c007f606d29 100644
--- a/arch/arm/mach-msm/clock.h
+++ b/arch/arm/mach-msm/clock.h
@@ -17,12 +17,10 @@
#ifndef __ARCH_ARM_MACH_MSM_CLOCK_H
#define __ARCH_ARM_MACH_MSM_CLOCK_H
+#include <linux/init.h>
#include <linux/list.h>
#include <mach/clk.h>
-#include "clock-pcom.h"
-#include "clock-7x30.h"
-
#define CLKFLAG_INVERT 0x00000001
#define CLKFLAG_NOINVERT 0x00000002
#define CLKFLAG_NONEST 0x00000004
@@ -45,6 +43,7 @@ struct clk_ops {
unsigned (*get_rate)(unsigned id);
unsigned (*is_enabled)(unsigned id);
long (*round_rate)(unsigned id, unsigned rate);
+ bool (*is_local)(unsigned id);
};
struct clk {
@@ -52,58 +51,22 @@ struct clk {
uint32_t remote_id;
uint32_t count;
uint32_t flags;
- const char *name;
struct clk_ops *ops;
const char *dbg_name;
struct list_head list;
- struct device *dev;
};
-#define A11S_CLK_CNTL_ADDR (MSM_CSR_BASE + 0x100)
-#define A11S_CLK_SEL_ADDR (MSM_CSR_BASE + 0x104)
-#define A11S_VDD_SVS_PLEVEL_ADDR (MSM_CSR_BASE + 0x124)
-
-#ifdef CONFIG_DEBUG_FS
-#define CLOCK_DBG_NAME(x) .dbg_name = x,
-#else
-#define CLOCK_DBG_NAME(x)
-#endif
-
-#define CLOCK(clk_name, clk_id, clk_dev, clk_flags) { \
- .name = clk_name, \
- .id = clk_id, \
- .flags = clk_flags, \
- .dev = clk_dev, \
- CLOCK_DBG_NAME(#clk_id) \
- }
-
#define OFF CLKFLAG_AUTO_OFF
#define CLK_MIN CLKFLAG_MIN
#define CLK_MAX CLKFLAG_MAX
#define CLK_MINMAX (CLK_MIN | CLK_MAX)
-#define NR_CLKS P_NR_CLKS
-
-enum {
- PLL_0 = 0,
- PLL_1,
- PLL_2,
- PLL_3,
- PLL_4,
- PLL_5,
- PLL_6,
- NUM_PLL
-};
-
-enum clkvote_client {
- CLKVOTE_ACPUCLK = 0,
- CLKVOTE_PMQOS,
- CLKVOTE_MAX,
-};
-
-int msm_clock_require_tcxo(unsigned long *reason, int nbits);
-int msm_clock_get_name(uint32_t id, char *name, uint32_t size);
-int ebi1_clk_set_min_rate(enum clkvote_client client, unsigned long rate);
-unsigned long clk_get_max_axi_khz(void);
+#ifdef CONFIG_DEBUG_FS
+int __init clock_debug_init(void);
+int __init clock_debug_add(struct clk *clock);
+#else
+static inline int __init clock_debug_init(void) { return 0; }
+static inline int __init clock_debug_add(struct clk *clock) { return 0; }
#endif
+#endif
diff --git a/arch/arm/mach-msm/devices-msm8x60-iommu.c b/arch/arm/mach-msm/devices-iommu.c
index f9e7bd34ec59..c0206b727502 100644
--- a/arch/arm/mach-msm/devices-msm8x60-iommu.c
+++ b/arch/arm/mach-msm/devices-iommu.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -18,15 +18,13 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/bootmem.h>
-
-#include <mach/msm_iomap-8x60.h>
-#include <mach/irqs-8x60.h>
+#include <mach/irqs.h>
#include <mach/iommu.h>
static struct resource msm_iommu_jpegd_resources[] = {
{
- .start = MSM_IOMMU_JPEGD_PHYS,
- .end = MSM_IOMMU_JPEGD_PHYS + MSM_IOMMU_JPEGD_SIZE - 1,
+ .start = 0x07300000,
+ .end = 0x07300000 + SZ_1M - 1,
.name = "physbase",
.flags = IORESOURCE_MEM,
},
@@ -46,8 +44,8 @@ static struct resource msm_iommu_jpegd_resources[] = {
static struct resource msm_iommu_vpe_resources[] = {
{
- .start = MSM_IOMMU_VPE_PHYS,
- .end = MSM_IOMMU_VPE_PHYS + MSM_IOMMU_VPE_SIZE - 1,
+ .start = 0x07400000,
+ .end = 0x07400000 + SZ_1M - 1,
.name = "physbase",
.flags = IORESOURCE_MEM,
},
@@ -67,8 +65,8 @@ static struct resource msm_iommu_vpe_resources[] = {
static struct resource msm_iommu_mdp0_resources[] = {
{
- .start = MSM_IOMMU_MDP0_PHYS,
- .end = MSM_IOMMU_MDP0_PHYS + MSM_IOMMU_MDP0_SIZE - 1,
+ .start = 0x07500000,
+ .end = 0x07500000 + SZ_1M - 1,
.name = "physbase",
.flags = IORESOURCE_MEM,
},
@@ -88,8 +86,8 @@ static struct resource msm_iommu_mdp0_resources[] = {
static struct resource msm_iommu_mdp1_resources[] = {
{
- .start = MSM_IOMMU_MDP1_PHYS,
- .end = MSM_IOMMU_MDP1_PHYS + MSM_IOMMU_MDP1_SIZE - 1,
+ .start = 0x07600000,
+ .end = 0x07600000 + SZ_1M - 1,
.name = "physbase",
.flags = IORESOURCE_MEM,
},
@@ -109,8 +107,8 @@ static struct resource msm_iommu_mdp1_resources[] = {
static struct resource msm_iommu_rot_resources[] = {
{
- .start = MSM_IOMMU_ROT_PHYS,
- .end = MSM_IOMMU_ROT_PHYS + MSM_IOMMU_ROT_SIZE - 1,
+ .start = 0x07700000,
+ .end = 0x07700000 + SZ_1M - 1,
.name = "physbase",
.flags = IORESOURCE_MEM,
},
@@ -130,8 +128,8 @@ static struct resource msm_iommu_rot_resources[] = {
static struct resource msm_iommu_ijpeg_resources[] = {
{
- .start = MSM_IOMMU_IJPEG_PHYS,
- .end = MSM_IOMMU_IJPEG_PHYS + MSM_IOMMU_IJPEG_SIZE - 1,
+ .start = 0x07800000,
+ .end = 0x07800000 + SZ_1M - 1,
.name = "physbase",
.flags = IORESOURCE_MEM,
},
@@ -151,8 +149,8 @@ static struct resource msm_iommu_ijpeg_resources[] = {
static struct resource msm_iommu_vfe_resources[] = {
{
- .start = MSM_IOMMU_VFE_PHYS,
- .end = MSM_IOMMU_VFE_PHYS + MSM_IOMMU_VFE_SIZE - 1,
+ .start = 0x07900000,
+ .end = 0x07900000 + SZ_1M - 1,
.name = "physbase",
.flags = IORESOURCE_MEM,
},
@@ -172,8 +170,8 @@ static struct resource msm_iommu_vfe_resources[] = {
static struct resource msm_iommu_vcodec_a_resources[] = {
{
- .start = MSM_IOMMU_VCODEC_A_PHYS,
- .end = MSM_IOMMU_VCODEC_A_PHYS + MSM_IOMMU_VCODEC_A_SIZE - 1,
+ .start = 0x07A00000,
+ .end = 0x07A00000 + SZ_1M - 1,
.name = "physbase",
.flags = IORESOURCE_MEM,
},
@@ -193,8 +191,8 @@ static struct resource msm_iommu_vcodec_a_resources[] = {
static struct resource msm_iommu_vcodec_b_resources[] = {
{
- .start = MSM_IOMMU_VCODEC_B_PHYS,
- .end = MSM_IOMMU_VCODEC_B_PHYS + MSM_IOMMU_VCODEC_B_SIZE - 1,
+ .start = 0x07B00000,
+ .end = 0x07B00000 + SZ_1M - 1,
.name = "physbase",
.flags = IORESOURCE_MEM,
},
@@ -214,8 +212,8 @@ static struct resource msm_iommu_vcodec_b_resources[] = {
static struct resource msm_iommu_gfx3d_resources[] = {
{
- .start = MSM_IOMMU_GFX3D_PHYS,
- .end = MSM_IOMMU_GFX3D_PHYS + MSM_IOMMU_GFX3D_SIZE - 1,
+ .start = 0x07C00000,
+ .end = 0x07C00000 + SZ_1M - 1,
.name = "physbase",
.flags = IORESOURCE_MEM,
},
@@ -235,8 +233,8 @@ static struct resource msm_iommu_gfx3d_resources[] = {
static struct resource msm_iommu_gfx2d0_resources[] = {
{
- .start = MSM_IOMMU_GFX2D0_PHYS,
- .end = MSM_IOMMU_GFX2D0_PHYS + MSM_IOMMU_GFX2D0_SIZE - 1,
+ .start = 0x07D00000,
+ .end = 0x07D00000 + SZ_1M - 1,
.name = "physbase",
.flags = IORESOURCE_MEM,
},
@@ -256,8 +254,8 @@ static struct resource msm_iommu_gfx2d0_resources[] = {
static struct resource msm_iommu_gfx2d1_resources[] = {
{
- .start = MSM_IOMMU_GFX2D1_PHYS,
- .end = MSM_IOMMU_GFX2D1_PHYS + MSM_IOMMU_GFX2D1_SIZE - 1,
+ .start = 0x07E00000,
+ .end = 0x07E00000 + SZ_1M - 1,
.name = "physbase",
.flags = IORESOURCE_MEM,
},
diff --git a/arch/arm/mach-msm/devices-msm7x00.c b/arch/arm/mach-msm/devices-msm7x00.c
index fb548a8a21db..c4f5e26feb4d 100644
--- a/arch/arm/mach-msm/devices-msm7x00.c
+++ b/arch/arm/mach-msm/devices-msm7x00.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <linux/clkdev.h>
#include <mach/irqs.h>
#include <mach/msm_iomap.h>
@@ -24,8 +25,8 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
-
#include "clock.h"
+#include "clock-pcom.h"
#include <mach/mmc.h>
static struct resource resources_uart1[] = {
@@ -38,6 +39,7 @@ static struct resource resources_uart1[] = {
.start = MSM_UART1_PHYS,
.end = MSM_UART1_PHYS + MSM_UART1_SIZE - 1,
.flags = IORESOURCE_MEM,
+ .name = "uart_resource"
},
};
@@ -51,6 +53,7 @@ static struct resource resources_uart2[] = {
.start = MSM_UART2_PHYS,
.end = MSM_UART2_PHYS + MSM_UART2_SIZE - 1,
.flags = IORESOURCE_MEM,
+ .name = "uart_resource"
},
};
@@ -64,6 +67,7 @@ static struct resource resources_uart3[] = {
.start = MSM_UART3_PHYS,
.end = MSM_UART3_PHYS + MSM_UART3_SIZE - 1,
.flags = IORESOURCE_MEM,
+ .name = "uart_resource"
},
};
@@ -414,7 +418,7 @@ struct platform_device msm_device_mdp = {
.resource = resources_mdp,
};
-struct clk msm_clocks_7x01a[] = {
+struct clk_lookup msm_clocks_7x01a[] = {
CLK_PCOM("adm_clk", ADM_CLK, NULL, 0),
CLK_PCOM("adsp_clk", ADSP_CLK, NULL, 0),
CLK_PCOM("ebi1_clk", EBI1_CLK, NULL, 0),
@@ -423,7 +427,7 @@ struct clk msm_clocks_7x01a[] = {
CLK_PCOM("emdh_clk", EMDH_CLK, NULL, OFF),
CLK_PCOM("gp_clk", GP_CLK, NULL, 0),
CLK_PCOM("grp_clk", GRP_3D_CLK, NULL, OFF),
- CLK_PCOM("i2c_clk", I2C_CLK, &msm_device_i2c.dev, 0),
+ CLK_PCOM("i2c_clk", I2C_CLK, "msm_i2c.0", 0),
CLK_PCOM("icodec_rx_clk", ICODEC_RX_CLK, NULL, 0),
CLK_PCOM("icodec_tx_clk", ICODEC_TX_CLK, NULL, 0),
CLK_PCOM("imem_clk", IMEM_CLK, NULL, OFF),
@@ -433,25 +437,25 @@ struct clk msm_clocks_7x01a[] = {
CLK_PCOM("pcm_clk", PCM_CLK, NULL, 0),
CLK_PCOM("mddi_clk", PMDH_CLK, NULL, OFF | CLK_MINMAX),
CLK_PCOM("sdac_clk", SDAC_CLK, NULL, OFF),
- CLK_PCOM("sdc_clk", SDC1_CLK, &msm_device_sdc1.dev, OFF),
- CLK_PCOM("sdc_pclk", SDC1_P_CLK, &msm_device_sdc1.dev, OFF),
- CLK_PCOM("sdc_clk", SDC2_CLK, &msm_device_sdc2.dev, OFF),
- CLK_PCOM("sdc_pclk", SDC2_P_CLK, &msm_device_sdc2.dev, OFF),
- CLK_PCOM("sdc_clk", SDC3_CLK, &msm_device_sdc3.dev, OFF),
- CLK_PCOM("sdc_pclk", SDC3_P_CLK, &msm_device_sdc3.dev, OFF),
- CLK_PCOM("sdc_clk", SDC4_CLK, &msm_device_sdc4.dev, OFF),
- CLK_PCOM("sdc_pclk", SDC4_P_CLK, &msm_device_sdc4.dev, OFF),
+ CLK_PCOM("sdc_clk", SDC1_CLK, "msm_sdcc.1", OFF),
+ CLK_PCOM("sdc_pclk", SDC1_P_CLK, "msm_sdcc.1", OFF),
+ CLK_PCOM("sdc_clk", SDC2_CLK, "msm_sdcc.2", OFF),
+ CLK_PCOM("sdc_pclk", SDC2_P_CLK, "msm_sdcc.2", OFF),
+ CLK_PCOM("sdc_clk", SDC3_CLK, "msm_sdcc.3", OFF),
+ CLK_PCOM("sdc_pclk", SDC3_P_CLK, "msm_sdcc.3", OFF),
+ CLK_PCOM("sdc_clk", SDC4_CLK, "msm_sdcc.4", OFF),
+ CLK_PCOM("sdc_pclk", SDC4_P_CLK, "msm_sdcc.4", OFF),
CLK_PCOM("tsif_clk", TSIF_CLK, NULL, 0),
CLK_PCOM("tsif_ref_clk", TSIF_REF_CLK, NULL, 0),
CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0),
CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0),
- CLK_PCOM("uart_clk", UART1_CLK, &msm_device_uart1.dev, OFF),
- CLK_PCOM("uart_clk", UART2_CLK, &msm_device_uart2.dev, 0),
- CLK_PCOM("uart_clk", UART3_CLK, &msm_device_uart3.dev, OFF),
+ CLK_PCOM("uart_clk", UART1_CLK, "msm_serial.0", OFF),
+ CLK_PCOM("uart_clk", UART2_CLK, "msm_serial.1", 0),
+ CLK_PCOM("uart_clk", UART3_CLK, "msm_serial.2", OFF),
CLK_PCOM("uart1dm_clk", UART1DM_CLK, NULL, OFF),
CLK_PCOM("uart2dm_clk", UART2DM_CLK, NULL, 0),
- CLK_PCOM("usb_hs_clk", USB_HS_CLK, &msm_device_hsusb.dev, OFF),
- CLK_PCOM("usb_hs_pclk", USB_HS_P_CLK, &msm_device_hsusb.dev, OFF),
+ CLK_PCOM("usb_hs_clk", USB_HS_CLK, "msm_hsusb", OFF),
+ CLK_PCOM("usb_hs_pclk", USB_HS_P_CLK, "msm_hsusb", OFF),
CLK_PCOM("usb_otg_clk", USB_OTG_CLK, NULL, 0),
CLK_PCOM("vdc_clk", VDC_CLK, NULL, OFF ),
CLK_PCOM("vfe_clk", VFE_CLK, NULL, OFF),
diff --git a/arch/arm/mach-msm/devices-msm7x30.c b/arch/arm/mach-msm/devices-msm7x30.c
index 4e9a0ab3e937..09b4f1403824 100644
--- a/arch/arm/mach-msm/devices-msm7x30.c
+++ b/arch/arm/mach-msm/devices-msm7x30.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2008 Google, Inc.
- * Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -17,6 +17,7 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
+#include <linux/clkdev.h>
#include <mach/irqs.h>
#include <mach/msm_iomap.h>
#include <mach/dma.h>
@@ -28,6 +29,7 @@
#include <asm/mach/flash.h>
#include "clock-pcom.h"
+#include "clock-7x30.h"
#include <mach/mmc.h>
@@ -41,6 +43,7 @@ static struct resource resources_uart2[] = {
.start = MSM_UART2_PHYS,
.end = MSM_UART2_PHYS + MSM_UART2_SIZE - 1,
.flags = IORESOURCE_MEM,
+ .name = "uart_resource"
},
};
@@ -127,11 +130,13 @@ struct platform_device msm_device_hsusb_host = {
},
};
-struct clk msm_clocks_7x30[] = {
+struct clk_lookup msm_clocks_7x30[] = {
CLK_PCOM("adm_clk", ADM_CLK, NULL, 0),
CLK_PCOM("adsp_clk", ADSP_CLK, NULL, 0),
CLK_PCOM("cam_m_clk", CAM_M_CLK, NULL, 0),
CLK_PCOM("camif_pad_pclk", CAMIF_PAD_P_CLK, NULL, OFF),
+ CLK_PCOM("ce_clk", CE_CLK, NULL, 0),
+ CLK_PCOM("codec_ssbi_clk", CODEC_SSBI_CLK, NULL, 0),
CLK_PCOM("ebi1_clk", EBI1_CLK, NULL, CLK_MIN),
CLK_PCOM("ecodec_clk", ECODEC_CLK, NULL, 0),
CLK_PCOM("emdh_clk", EMDH_CLK, NULL, OFF | CLK_MINMAX),
@@ -177,7 +182,7 @@ struct clk msm_clocks_7x30[] = {
CLK_7X30S("tv_src_clk", TV_CLK, TV_ENC_CLK, NULL, 0),
CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0),
CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0),
- CLK_PCOM("uart_clk", UART2_CLK, &msm_device_uart2.dev, 0),
+ CLK_PCOM("uart_clk", UART2_CLK, "msm_serial.1", 0),
CLK_PCOM("usb_phy_clk", USB_PHY_CLK, NULL, 0),
CLK_PCOM("usb_hs_clk", USB_HS_CLK, NULL, OFF),
CLK_PCOM("usb_hs_pclk", USB_HS_P_CLK, NULL, OFF),
diff --git a/arch/arm/mach-msm/devices-msm8960.c b/arch/arm/mach-msm/devices-msm8960.c
new file mode 100644
index 000000000000..d9e1f26475de
--- /dev/null
+++ b/arch/arm/mach-msm/devices-msm8960.c
@@ -0,0 +1,85 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include <linux/dma-mapping.h>
+#include <mach/irqs-8960.h>
+#include <mach/board.h>
+
+#include "devices.h"
+
+#define MSM_GSBI2_PHYS 0x16100000
+#define MSM_UART2DM_PHYS (MSM_GSBI2_PHYS + 0x40000)
+
+#define MSM_GSBI5_PHYS 0x16400000
+#define MSM_UART5DM_PHYS (MSM_GSBI5_PHYS + 0x40000)
+
+static struct resource resources_uart_gsbi2[] = {
+ {
+ .start = GSBI2_UARTDM_IRQ,
+ .end = GSBI2_UARTDM_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = MSM_UART2DM_PHYS,
+ .end = MSM_UART2DM_PHYS + PAGE_SIZE - 1,
+ .name = "uart_resource",
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MSM_GSBI2_PHYS,
+ .end = MSM_GSBI2_PHYS + PAGE_SIZE - 1,
+ .name = "gsbi_resource",
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device msm8960_device_uart_gsbi2 = {
+ .name = "msm_serial",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(resources_uart_gsbi2),
+ .resource = resources_uart_gsbi2,
+};
+
+static struct resource resources_uart_gsbi5[] = {
+ {
+ .start = GSBI5_UARTDM_IRQ,
+ .end = GSBI5_UARTDM_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = MSM_UART5DM_PHYS,
+ .end = MSM_UART5DM_PHYS + PAGE_SIZE - 1,
+ .name = "uart_resource",
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MSM_GSBI5_PHYS,
+ .end = MSM_GSBI5_PHYS + PAGE_SIZE - 1,
+ .name = "gsbi_resource",
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device msm8960_device_uart_gsbi5 = {
+ .name = "msm_serial",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(resources_uart_gsbi5),
+ .resource = resources_uart_gsbi5,
+};
diff --git a/arch/arm/mach-msm/devices-qsd8x50.c b/arch/arm/mach-msm/devices-qsd8x50.c
index a4b798f20ccb..12d8deb78d9c 100644
--- a/arch/arm/mach-msm/devices-qsd8x50.c
+++ b/arch/arm/mach-msm/devices-qsd8x50.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2008 Google, Inc.
- * Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -15,8 +15,9 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
-
+#include <linux/clkdev.h>
#include <linux/dma-mapping.h>
+
#include <mach/irqs.h>
#include <mach/msm_iomap.h>
#include <mach/dma.h>
@@ -27,6 +28,7 @@
#include <asm/mach/flash.h>
#include <mach/mmc.h>
+#include "clock-pcom.h"
static struct resource resources_uart3[] = {
{
@@ -38,6 +40,7 @@ static struct resource resources_uart3[] = {
.start = MSM_UART3_PHYS,
.end = MSM_UART3_PHYS + MSM_UART3_SIZE - 1,
.flags = IORESOURCE_MEM,
+ .name = "uart_resource"
},
};
@@ -124,14 +127,204 @@ struct platform_device msm_device_hsusb_host = {
},
};
-struct clk msm_clocks_8x50[] = {
+static struct resource resources_sdc1[] = {
+ {
+ .start = MSM_SDC1_PHYS,
+ .end = MSM_SDC1_PHYS + MSM_SDC1_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = INT_SDC1_0,
+ .end = INT_SDC1_0,
+ .flags = IORESOURCE_IRQ,
+ .name = "cmd_irq",
+ },
+ {
+ .start = INT_SDC1_1,
+ .end = INT_SDC1_1,
+ .flags = IORESOURCE_IRQ,
+ .name = "pio_irq",
+ },
+ {
+ .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED,
+ .name = "status_irq"
+ },
+ {
+ .start = 8,
+ .end = 8,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+static struct resource resources_sdc2[] = {
+ {
+ .start = MSM_SDC2_PHYS,
+ .end = MSM_SDC2_PHYS + MSM_SDC2_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = INT_SDC2_0,
+ .end = INT_SDC2_0,
+ .flags = IORESOURCE_IRQ,
+ .name = "cmd_irq",
+ },
+ {
+ .start = INT_SDC2_1,
+ .end = INT_SDC2_1,
+ .flags = IORESOURCE_IRQ,
+ .name = "pio_irq",
+ },
+ {
+ .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED,
+ .name = "status_irq"
+ },
+ {
+ .start = 8,
+ .end = 8,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+static struct resource resources_sdc3[] = {
+ {
+ .start = MSM_SDC3_PHYS,
+ .end = MSM_SDC3_PHYS + MSM_SDC3_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = INT_SDC3_0,
+ .end = INT_SDC3_0,
+ .flags = IORESOURCE_IRQ,
+ .name = "cmd_irq",
+ },
+ {
+ .start = INT_SDC3_1,
+ .end = INT_SDC3_1,
+ .flags = IORESOURCE_IRQ,
+ .name = "pio_irq",
+ },
+ {
+ .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED,
+ .name = "status_irq"
+ },
+ {
+ .start = 8,
+ .end = 8,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+static struct resource resources_sdc4[] = {
+ {
+ .start = MSM_SDC4_PHYS,
+ .end = MSM_SDC4_PHYS + MSM_SDC4_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = INT_SDC4_0,
+ .end = INT_SDC4_0,
+ .flags = IORESOURCE_IRQ,
+ .name = "cmd_irq",
+ },
+ {
+ .start = INT_SDC4_1,
+ .end = INT_SDC4_1,
+ .flags = IORESOURCE_IRQ,
+ .name = "pio_irq",
+ },
+ {
+ .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED,
+ .name = "status_irq"
+ },
+ {
+ .start = 8,
+ .end = 8,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+struct platform_device msm_device_sdc1 = {
+ .name = "msm_sdcc",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(resources_sdc1),
+ .resource = resources_sdc1,
+ .dev = {
+ .coherent_dma_mask = 0xffffffff,
+ },
+};
+
+struct platform_device msm_device_sdc2 = {
+ .name = "msm_sdcc",
+ .id = 2,
+ .num_resources = ARRAY_SIZE(resources_sdc2),
+ .resource = resources_sdc2,
+ .dev = {
+ .coherent_dma_mask = 0xffffffff,
+ },
+};
+
+struct platform_device msm_device_sdc3 = {
+ .name = "msm_sdcc",
+ .id = 3,
+ .num_resources = ARRAY_SIZE(resources_sdc3),
+ .resource = resources_sdc3,
+ .dev = {
+ .coherent_dma_mask = 0xffffffff,
+ },
+};
+
+struct platform_device msm_device_sdc4 = {
+ .name = "msm_sdcc",
+ .id = 4,
+ .num_resources = ARRAY_SIZE(resources_sdc4),
+ .resource = resources_sdc4,
+ .dev = {
+ .coherent_dma_mask = 0xffffffff,
+ },
+};
+
+static struct platform_device *msm_sdcc_devices[] __initdata = {
+ &msm_device_sdc1,
+ &msm_device_sdc2,
+ &msm_device_sdc3,
+ &msm_device_sdc4,
+};
+
+int __init msm_add_sdcc(unsigned int controller,
+ struct msm_mmc_platform_data *plat,
+ unsigned int stat_irq, unsigned long stat_irq_flags)
+{
+ struct platform_device *pdev;
+ struct resource *res;
+
+ if (controller < 1 || controller > 4)
+ return -EINVAL;
+
+ pdev = msm_sdcc_devices[controller-1];
+ pdev->dev.platform_data = plat;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "status_irq");
+ if (!res)
+ return -EINVAL;
+ else if (stat_irq) {
+ res->start = res->end = stat_irq;
+ res->flags &= ~IORESOURCE_DISABLED;
+ res->flags |= stat_irq_flags;
+ }
+
+ return platform_device_register(pdev);
+}
+
+struct clk_lookup msm_clocks_8x50[] = {
CLK_PCOM("adm_clk", ADM_CLK, NULL, 0),
+ CLK_PCOM("ce_clk", CE_CLK, NULL, 0),
CLK_PCOM("ebi1_clk", EBI1_CLK, NULL, CLK_MIN),
CLK_PCOM("ebi2_clk", EBI2_CLK, NULL, 0),
CLK_PCOM("ecodec_clk", ECODEC_CLK, NULL, 0),
CLK_PCOM("emdh_clk", EMDH_CLK, NULL, OFF | CLK_MINMAX),
CLK_PCOM("gp_clk", GP_CLK, NULL, 0),
CLK_PCOM("grp_clk", GRP_3D_CLK, NULL, 0),
+ CLK_PCOM("i2c_clk", I2C_CLK, NULL, 0),
CLK_PCOM("icodec_rx_clk", ICODEC_RX_CLK, NULL, 0),
CLK_PCOM("icodec_tx_clk", ICODEC_TX_CLK, NULL, 0),
CLK_PCOM("imem_clk", IMEM_CLK, NULL, OFF),
@@ -144,12 +337,24 @@ struct clk msm_clocks_8x50[] = {
CLK_PCOM("pbus_clk", PBUS_CLK, NULL, CLK_MIN),
CLK_PCOM("pcm_clk", PCM_CLK, NULL, 0),
CLK_PCOM("sdac_clk", SDAC_CLK, NULL, OFF),
+ CLK_PCOM("sdc_clk", SDC1_CLK, "msm_sdcc.1", OFF),
+ CLK_PCOM("sdc_pclk", SDC1_P_CLK, "msm_sdcc.1", OFF),
+ CLK_PCOM("sdc_clk", SDC2_CLK, "msm_sdcc.2", OFF),
+ CLK_PCOM("sdc_pclk", SDC2_P_CLK, "msm_sdcc.2", OFF),
+ CLK_PCOM("sdc_clk", SDC3_CLK, "msm_sdcc.3", OFF),
+ CLK_PCOM("sdc_pclk", SDC3_P_CLK, "msm_sdcc.3", OFF),
+ CLK_PCOM("sdc_clk", SDC4_CLK, "msm_sdcc.4", OFF),
+ CLK_PCOM("sdc_pclk", SDC4_P_CLK, "msm_sdcc.4", OFF),
CLK_PCOM("spi_clk", SPI_CLK, NULL, 0),
CLK_PCOM("tsif_clk", TSIF_CLK, NULL, 0),
CLK_PCOM("tsif_ref_clk", TSIF_REF_CLK, NULL, 0),
CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0),
CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0),
- CLK_PCOM("uart_clk", UART3_CLK, &msm_device_uart3.dev, OFF),
+ CLK_PCOM("uart_clk", UART1_CLK, NULL, OFF),
+ CLK_PCOM("uart_clk", UART2_CLK, NULL, 0),
+ CLK_PCOM("uart_clk", UART3_CLK, "msm_serial.2", OFF),
+ CLK_PCOM("uartdm_clk", UART1DM_CLK, NULL, OFF),
+ CLK_PCOM("uartdm_clk", UART2DM_CLK, NULL, 0),
CLK_PCOM("usb_hs_clk", USB_HS_CLK, NULL, OFF),
CLK_PCOM("usb_hs_pclk", USB_HS_P_CLK, NULL, OFF),
CLK_PCOM("usb_otg_clk", USB_OTG_CLK, NULL, 0),
diff --git a/arch/arm/mach-msm/devices.h b/arch/arm/mach-msm/devices.h
index 87c70bfce2bd..9545c196c6e8 100644
--- a/arch/arm/mach-msm/devices.h
+++ b/arch/arm/mach-msm/devices.h
@@ -16,12 +16,17 @@
#ifndef __ARCH_ARM_MACH_MSM_DEVICES_H
#define __ARCH_ARM_MACH_MSM_DEVICES_H
+#include <linux/clkdev.h>
+
#include "clock.h"
extern struct platform_device msm_device_uart1;
extern struct platform_device msm_device_uart2;
extern struct platform_device msm_device_uart3;
+extern struct platform_device msm8960_device_uart_gsbi2;
+extern struct platform_device msm8960_device_uart_gsbi5;
+
extern struct platform_device msm_device_sdc1;
extern struct platform_device msm_device_sdc2;
extern struct platform_device msm_device_sdc3;
@@ -41,13 +46,13 @@ extern struct platform_device msm_device_mddi0;
extern struct platform_device msm_device_mddi1;
extern struct platform_device msm_device_mdp;
-extern struct clk msm_clocks_7x01a[];
+extern struct clk_lookup msm_clocks_7x01a[];
extern unsigned msm_num_clocks_7x01a;
-extern struct clk msm_clocks_7x30[];
+extern struct clk_lookup msm_clocks_7x30[];
extern unsigned msm_num_clocks_7x30;
-extern struct clk msm_clocks_8x50[];
+extern struct clk_lookup msm_clocks_8x50[];
extern unsigned msm_num_clocks_8x50;
#endif
diff --git a/arch/arm/mach-msm/gpiomux-7x30.c b/arch/arm/mach-msm/gpiomux-7x30.c
deleted file mode 100644
index 6ce41c5241a5..000000000000
--- a/arch/arm/mach-msm/gpiomux-7x30.c
+++ /dev/null
@@ -1,38 +0,0 @@
-/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-#include "gpiomux.h"
-
-struct msm_gpiomux_config msm_gpiomux_configs[GPIOMUX_NGPIOS] = {
-#ifdef CONFIG_SERIAL_MSM_CONSOLE
- [49] = { /* UART2 RFR */
- .suspended = GPIOMUX_DRV_2MA | GPIOMUX_PULL_DOWN |
- GPIOMUX_FUNC_2 | GPIOMUX_VALID,
- },
- [50] = { /* UART2 CTS */
- .suspended = GPIOMUX_DRV_2MA | GPIOMUX_PULL_DOWN |
- GPIOMUX_FUNC_2 | GPIOMUX_VALID,
- },
- [51] = { /* UART2 RX */
- .suspended = GPIOMUX_DRV_2MA | GPIOMUX_PULL_DOWN |
- GPIOMUX_FUNC_2 | GPIOMUX_VALID,
- },
- [52] = { /* UART2 TX */
- .suspended = GPIOMUX_DRV_2MA | GPIOMUX_PULL_DOWN |
- GPIOMUX_FUNC_2 | GPIOMUX_VALID,
- },
-#endif
-};
diff --git a/arch/arm/mach-msm/gpiomux-8x50.c b/arch/arm/mach-msm/gpiomux-8x50.c
index 4406e0f4ae95..f7a4ea593c95 100644
--- a/arch/arm/mach-msm/gpiomux-8x50.c
+++ b/arch/arm/mach-msm/gpiomux-8x50.c
@@ -16,6 +16,19 @@
*/
#include "gpiomux.h"
+#if defined(CONFIG_MMC_MSM) || defined(CONFIG_MMC_MSM_MODULE)
+ #define SDCC_DAT_0_3_CMD_ACTV_CFG (GPIOMUX_VALID | GPIOMUX_PULL_UP\
+ | GPIOMUX_FUNC_1 | GPIOMUX_DRV_8MA)
+ #define SDCC_CLK_ACTV_CFG (GPIOMUX_VALID | GPIOMUX_PULL_NONE\
+ | GPIOMUX_FUNC_1 | GPIOMUX_DRV_8MA)
+#else
+ #define SDCC_DAT_0_3_CMD_ACTV_CFG 0
+ #define SDCC_CLK_ACTV_CFG 0
+#endif
+
+#define SDC1_SUSPEND_CONFIG (GPIOMUX_VALID | GPIOMUX_PULL_DOWN\
+ | GPIOMUX_FUNC_GPIO | GPIOMUX_DRV_2MA)
+
struct msm_gpiomux_config msm_gpiomux_configs[GPIOMUX_NGPIOS] = {
[86] = { /* UART3 RX */
.suspended = GPIOMUX_DRV_2MA | GPIOMUX_PULL_DOWN |
@@ -25,4 +38,14 @@ struct msm_gpiomux_config msm_gpiomux_configs[GPIOMUX_NGPIOS] = {
.suspended = GPIOMUX_DRV_2MA | GPIOMUX_PULL_DOWN |
GPIOMUX_FUNC_1 | GPIOMUX_VALID,
},
+ /* SDC1 data[3:0] & CMD */
+ [51 ... 55] = {
+ .active = SDCC_DAT_0_3_CMD_ACTV_CFG,
+ .suspended = SDC1_SUSPEND_CONFIG
+ },
+ /* SDC1 CLK */
+ [56] = {
+ .active = SDCC_CLK_ACTV_CFG,
+ .suspended = SDC1_SUSPEND_CONFIG
+ },
};
diff --git a/arch/arm/mach-msm/headsmp.S b/arch/arm/mach-msm/headsmp.S
index d0c214338df9..0c631a9f8647 100644
--- a/arch/arm/mach-msm/headsmp.S
+++ b/arch/arm/mach-msm/headsmp.S
@@ -11,7 +11,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
- __INIT
+ __CPUINIT
/*
* MSM specific entry point for secondary CPUs. This provides
diff --git a/arch/arm/mach-msm/include/mach/board.h b/arch/arm/mach-msm/include/mach/board.h
index 6abf4a6eadc1..2ce8f1f2fc4d 100644
--- a/arch/arm/mach-msm/include/mach/board.h
+++ b/arch/arm/mach-msm/include/mach/board.h
@@ -31,7 +31,7 @@ struct msm_acpu_clock_platform_data
unsigned long wait_for_irq_khz;
};
-struct clk;
+struct clk_lookup;
extern struct sys_timer msm_timer;
@@ -41,7 +41,7 @@ void __init msm_add_devices(void);
void __init msm_map_common_io(void);
void __init msm_init_irq(void);
void __init msm_init_gpio(void);
-void __init msm_clock_init(struct clk *clock_tbl, unsigned num_clocks);
+void __init msm_clock_init(struct clk_lookup *clock_tbl, unsigned num_clocks);
void __init msm_acpu_clock_init(struct msm_acpu_clock_platform_data *);
int __init msm_add_sdcc(unsigned int controller,
struct msm_mmc_platform_data *plat,
diff --git a/arch/arm/mach-msm/include/mach/clk.h b/arch/arm/mach-msm/include/mach/clk.h
index c05ca40478c7..e8d38428d813 100644
--- a/arch/arm/mach-msm/include/mach/clk.h
+++ b/arch/arm/mach-msm/include/mach/clk.h
@@ -1,30 +1,13 @@
/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of Code Aurora Forum, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
- * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
*
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
#ifndef __MACH_CLK_H
#define __MACH_CLK_H
diff --git a/arch/arm/mach-msm/include/mach/clkdev.h b/arch/arm/mach-msm/include/mach/clkdev.h
new file mode 100644
index 000000000000..f87a57b59534
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/clkdev.h
@@ -0,0 +1,19 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __ASM_ARCH_MSM_CLKDEV_H
+#define __ASM_ARCH_MSM_CLKDEV_H
+
+struct clk;
+
+static inline int __clk_get(struct clk *clk) { return 1; }
+static inline void __clk_put(struct clk *clk) { }
+#endif
diff --git a/arch/arm/mach-msm/include/mach/cpu.h b/arch/arm/mach-msm/include/mach/cpu.h
new file mode 100644
index 000000000000..a9481b08d5c7
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/cpu.h
@@ -0,0 +1,54 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_CPU_H__
+#define __ARCH_ARM_MACH_MSM_CPU_H__
+
+/* TODO: For now, only one CPU can be compiled at a time. */
+
+#define cpu_is_msm7x01() 0
+#define cpu_is_msm7x30() 0
+#define cpu_is_qsd8x50() 0
+#define cpu_is_msm8x60() 0
+#define cpu_is_msm8960() 0
+
+#ifdef CONFIG_ARCH_MSM7X00A
+# undef cpu_is_msm7x01
+# define cpu_is_msm7x01() 1
+#endif
+
+#ifdef CONFIG_ARCH_MSM7X30
+# undef cpu_is_msm7x30
+# define cpu_is_msm7x30() 1
+#endif
+
+#ifdef CONFIG_ARCH_QSD8X50
+# undef cpu_is_qsd8x50
+# define cpu_is_qsd8x50() 1
+#endif
+
+#ifdef CONFIG_ARCH_MSM8X60
+# undef cpu_is_msm8x60
+# define cpu_is_msm8x60() 1
+#endif
+
+#ifdef CONFIG_ARCH_MSM8960
+# undef cpu_is_msm8960
+# define cpu_is_msm8960() 1
+#endif
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/io.h b/arch/arm/mach-msm/include/mach/io.h
index 7386e732baad..dc1b928745e9 100644
--- a/arch/arm/mach-msm/include/mach/io.h
+++ b/arch/arm/mach-msm/include/mach/io.h
@@ -29,6 +29,7 @@ void __iomem *__msm_ioremap(unsigned long phys_addr, size_t size, unsigned int m
void msm_map_qsd8x50_io(void);
void msm_map_msm7x30_io(void);
void msm_map_msm8x60_io(void);
+void msm_map_msm8960_io(void);
extern unsigned int msm_shared_ram_phys;
diff --git a/arch/arm/mach-msm/include/mach/irqs-7x30.h b/arch/arm/mach-msm/include/mach/irqs-7x30.h
index 67c5396514fe..1f15902655fd 100644
--- a/arch/arm/mach-msm/include/mach/irqs-7x30.h
+++ b/arch/arm/mach-msm/include/mach/irqs-7x30.h
@@ -1,30 +1,13 @@
/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of Code Aurora Forum, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
- * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
*
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
#ifndef __ASM_ARCH_MSM_IRQS_7X30_H
diff --git a/arch/arm/mach-msm/include/mach/irqs-8960.h b/arch/arm/mach-msm/include/mach/irqs-8960.h
new file mode 100644
index 000000000000..81ab2a6792bd
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/irqs-8960.h
@@ -0,0 +1,277 @@
+/* Copyright (c) 2011 Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_ARCH_MSM_IRQS_8960_H
+#define __ASM_ARCH_MSM_IRQS_8960_H
+
+/* MSM ACPU Interrupt Numbers */
+
+/* 0-15: STI/SGI (software triggered/generated interrupts)
+ 16-31: PPI (private peripheral interrupts)
+ 32+: SPI (shared peripheral interrupts) */
+
+#define GIC_PPI_START 16
+#define GIC_SPI_START 32
+
+#define INT_VGIC (GIC_PPI_START + 0)
+#define INT_DEBUG_TIMER_EXP (GIC_PPI_START + 1)
+#define INT_GP_TIMER_EXP (GIC_PPI_START + 2)
+#define INT_GP_TIMER2_EXP (GIC_PPI_START + 3)
+#define WDT0_ACCSCSSNBARK_INT (GIC_PPI_START + 4)
+#define WDT1_ACCSCSSNBARK_INT (GIC_PPI_START + 5)
+#define AVS_SVICINT (GIC_PPI_START + 6)
+#define AVS_SVICINTSWDONE (GIC_PPI_START + 7)
+#define CPU_DBGCPUXCOMMRXFULL (GIC_PPI_START + 8)
+#define CPU_DBGCPUXCOMMTXEMPTY (GIC_PPI_START + 9)
+#define CPU_SICCPUXPERFMONIRPTREQ (GIC_PPI_START + 10)
+#define SC_AVSCPUXDOWN (GIC_PPI_START + 11)
+#define SC_AVSCPUXUP (GIC_PPI_START + 12)
+#define SC_SICCPUXACGIRPTREQ (GIC_PPI_START + 13)
+#define SC_SICCPUXEXTFAULTIRPTREQ (GIC_PPI_START + 14)
+/* PPI 15 is unused */
+
+#define SC_SICMPUIRPTREQ (GIC_SPI_START + 0)
+#define SC_SICL2IRPTREQ (GIC_SPI_START + 1)
+#define SC_SICL2PERFMONIRPTREQ (GIC_SPI_START + 2)
+#define SC_SICAGCIRPTREQ (GIC_SPI_START + 3)
+#define TLMM_APCC_DIR_CONN_IRQ_0 (GIC_SPI_START + 4)
+#define TLMM_APCC_DIR_CONN_IRQ_1 (GIC_SPI_START + 5)
+#define TLMM_APCC_DIR_CONN_IRQ_2 (GIC_SPI_START + 6)
+#define TLMM_APCC_DIR_CONN_IRQ_3 (GIC_SPI_START + 7)
+#define TLMM_APCC_DIR_CONN_IRQ_4 (GIC_SPI_START + 8)
+#define TLMM_APCC_DIR_CONN_IRQ_5 (GIC_SPI_START + 9)
+#define TLMM_APCC_DIR_CONN_IRQ_6 (GIC_SPI_START + 10)
+#define TLMM_APCC_DIR_CONN_IRQ_7 (GIC_SPI_START + 11)
+#define TLMM_APCC_DIR_CONN_IRQ_8 (GIC_SPI_START + 12)
+#define TLMM_APCC_DIR_CONN_IRQ_9 (GIC_SPI_START + 13)
+#define PM8921_SEC_IRQ_103 (GIC_SPI_START + 14)
+#define PM8018_SEC_IRQ_106 (GIC_SPI_START + 15)
+#define TLMM_APCC_SUMMARY_IRQ (GIC_SPI_START + 16)
+#define SPDM_RT_1_IRQ (GIC_SPI_START + 17)
+#define SPDM_DIAG_IRQ (GIC_SPI_START + 18)
+#define RPM_APCC_CPU0_GP_HIGH_IRQ (GIC_SPI_START + 19)
+#define RPM_APCC_CPU0_GP_MEDIUM_IRQ (GIC_SPI_START + 20)
+#define RPM_APCC_CPU0_GP_LOW_IRQ (GIC_SPI_START + 21)
+#define RPM_APCC_CPU0_WAKE_UP_IRQ (GIC_SPI_START + 22)
+#define RPM_APCC_CPU1_GP_HIGH_IRQ (GIC_SPI_START + 23)
+#define RPM_APCC_CPU1_GP_MEDIUM_IRQ (GIC_SPI_START + 24)
+#define RPM_APCC_CPU1_GP_LOW_IRQ (GIC_SPI_START + 25)
+#define RPM_APCC_CPU1_WAKE_UP_IRQ (GIC_SPI_START + 26)
+#define SSBI2_2_SC_CPU0_SECURE_IRQ (GIC_SPI_START + 27)
+#define SSBI2_2_SC_CPU0_NON_SECURE_IRQ (GIC_SPI_START + 28)
+#define SSBI2_1_SC_CPU0_SECURE_IRQ (GIC_SPI_START + 29)
+#define SSBI2_1_SC_CPU0_NON_SECURE_IRQ (GIC_SPI_START + 30)
+#define MSMC_SC_SEC_CE_IRQ (GIC_SPI_START + 31)
+#define MSMC_SC_PRI_CE_IRQ (GIC_SPI_START + 32)
+#define SLIMBUS0_CORE_EE1_IRQ (GIC_SPI_START + 33)
+#define SLIMBUS0_BAM_EE1_IRQ (GIC_SPI_START + 34)
+#define Q6FW_WDOG_EXPIRED_IRQ (GIC_SPI_START + 35)
+#define Q6SW_WDOG_EXPIRED_IRQ (GIC_SPI_START + 36)
+#define MSS_TO_APPS_IRQ_0 (GIC_SPI_START + 37)
+#define MSS_TO_APPS_IRQ_1 (GIC_SPI_START + 38)
+#define MSS_TO_APPS_IRQ_2 (GIC_SPI_START + 39)
+#define MSS_TO_APPS_IRQ_3 (GIC_SPI_START + 40)
+#define MSS_TO_APPS_IRQ_4 (GIC_SPI_START + 41)
+#define MSS_TO_APPS_IRQ_5 (GIC_SPI_START + 42)
+#define MSS_TO_APPS_IRQ_6 (GIC_SPI_START + 43)
+#define MSS_TO_APPS_IRQ_7 (GIC_SPI_START + 44)
+#define MSS_TO_APPS_IRQ_8 (GIC_SPI_START + 45)
+#define MSS_TO_APPS_IRQ_9 (GIC_SPI_START + 46)
+#define VPE_IRQ (GIC_SPI_START + 47)
+#define VFE_IRQ (GIC_SPI_START + 48)
+#define VCODEC_IRQ (GIC_SPI_START + 49)
+#define TV_ENC_IRQ (GIC_SPI_START + 50)
+#define SMMU_VPE_CB_SC_SECURE_IRQ (GIC_SPI_START + 51)
+#define SMMU_VPE_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 52)
+#define SMMU_VFE_CB_SC_SECURE_IRQ (GIC_SPI_START + 53)
+#define SMMU_VFE_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 54)
+#define SMMU_VCODEC_B_CB_SC_SECURE_IRQ (GIC_SPI_START + 55)
+#define SMMU_VCODEC_B_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 56)
+#define SMMU_VCODEC_A_CB_SC_SECURE_IRQ (GIC_SPI_START + 57)
+#define SMMU_VCODEC_A_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 58)
+#define SMMU_ROT_CB_SC_SECURE_IRQ (GIC_SPI_START + 59)
+#define SMMU_ROT_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 60)
+#define SMMU_MDP1_CB_SC_SECURE_IRQ (GIC_SPI_START + 61)
+#define SMMU_MDP1_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 62)
+#define SMMU_MDP0_CB_SC_SECURE_IRQ (GIC_SPI_START + 63)
+#define SMMU_MDP0_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 64)
+#define SMMU_JPEGD_CB_SC_SECURE_IRQ (GIC_SPI_START + 65)
+#define SMMU_JPEGD_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 66)
+#define SMMU_IJPEG_CB_SC_SECURE_IRQ (GIC_SPI_START + 67)
+#define SMMU_IJPEG_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 68)
+#define SMMU_GFX3D_CB_SC_SECURE_IRQ (GIC_SPI_START + 69)
+#define SMMU_GFX3D_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 70)
+#define SMMU_GFX2D0_CB_SC_SECURE_IRQ (GIC_SPI_START + 71)
+#define SMMU_GFX2D0_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 72)
+#define ROT_IRQ (GIC_SPI_START + 73)
+#define MMSS_FABRIC_IRQ (GIC_SPI_START + 74)
+#define MDP_IRQ (GIC_SPI_START + 75)
+#define JPEGD_IRQ (GIC_SPI_START + 76)
+#define JPEG_IRQ (GIC_SPI_START + 77)
+#define MMSS_IMEM_IRQ (GIC_SPI_START + 78)
+#define HDMI_IRQ (GIC_SPI_START + 79)
+#define GFX3D_IRQ (GIC_SPI_START + 80)
+#define GFX2D0_IRQ (GIC_SPI_START + 81)
+#define DSI1_IRQ (GIC_SPI_START + 82)
+#define CSI_1_IRQ (GIC_SPI_START + 83)
+#define CSI_0_IRQ (GIC_SPI_START + 84)
+#define LPASS_SCSS_AUDIO_IF_OUT0_IRQ (GIC_SPI_START + 85)
+#define LPASS_SCSS_MIDI_IRQ (GIC_SPI_START + 86)
+#define LPASS_Q6SS_WDOG_EXPIRED (GIC_SPI_START + 87)
+#define LPASS_SCSS_GP_LOW_IRQ (GIC_SPI_START + 88)
+#define LPASS_SCSS_GP_MEDIUM_IRQ (GIC_SPI_START + 89)
+#define LPASS_SCSS_GP_HIGH_IRQ (GIC_SPI_START + 90)
+#define TOP_IMEM_IRQ (GIC_SPI_START + 91)
+#define FABRIC_SYS_IRQ (GIC_SPI_START + 92)
+#define FABRIC_APPS_IRQ (GIC_SPI_START + 93)
+#define USB1_HS_BAM_IRQ (GIC_SPI_START + 94)
+#define SDC4_BAM_IRQ (GIC_SPI_START + 95)
+#define SDC3_BAM_IRQ (GIC_SPI_START + 96)
+#define SDC2_BAM_IRQ (GIC_SPI_START + 97)
+#define SDC1_BAM_IRQ (GIC_SPI_START + 98)
+#define FABRIC_SPS_IRQ (GIC_SPI_START + 99)
+#define USB1_HS_IRQ (GIC_SPI_START + 100)
+#define SDC4_IRQ_0 (GIC_SPI_START + 101)
+#define SDC3_IRQ_0 (GIC_SPI_START + 102)
+#define SDC2_IRQ_0 (GIC_SPI_START + 103)
+#define SDC1_IRQ_0 (GIC_SPI_START + 104)
+#define SPS_BAM_DMA_IRQ (GIC_SPI_START + 105)
+#define SPS_SEC_VIOL_IRQ (GIC_SPI_START + 106)
+#define SPS_MTI_0 (GIC_SPI_START + 107)
+#define SPS_MTI_1 (GIC_SPI_START + 108)
+#define SPS_MTI_2 (GIC_SPI_START + 109)
+#define SPS_MTI_3 (GIC_SPI_START + 110)
+#define SPS_MTI_4 (GIC_SPI_START + 111)
+#define SPS_MTI_5 (GIC_SPI_START + 112)
+#define SPS_MTI_6 (GIC_SPI_START + 113)
+#define SPS_MTI_7 (GIC_SPI_START + 114)
+#define SPS_MTI_8 (GIC_SPI_START + 115)
+#define SPS_MTI_9 (GIC_SPI_START + 116)
+#define SPS_MTI_10 (GIC_SPI_START + 117)
+#define SPS_MTI_11 (GIC_SPI_START + 118)
+#define SPS_MTI_12 (GIC_SPI_START + 119)
+#define SPS_MTI_13 (GIC_SPI_START + 120)
+#define SPS_MTI_14 (GIC_SPI_START + 121)
+#define SPS_MTI_15 (GIC_SPI_START + 122)
+#define SPS_MTI_16 (GIC_SPI_START + 123)
+#define SPS_MTI_17 (GIC_SPI_START + 124)
+#define SPS_MTI_18 (GIC_SPI_START + 125)
+#define SPS_MTI_19 (GIC_SPI_START + 126)
+#define SPS_MTI_20 (GIC_SPI_START + 127)
+#define SPS_MTI_21 (GIC_SPI_START + 128)
+#define SPS_MTI_22 (GIC_SPI_START + 129)
+#define SPS_MTI_23 (GIC_SPI_START + 130)
+#define SPS_MTI_24 (GIC_SPI_START + 131)
+#define SPS_MTI_25 (GIC_SPI_START + 132)
+#define SPS_MTI_26 (GIC_SPI_START + 133)
+#define SPS_MTI_27 (GIC_SPI_START + 134)
+#define SPS_MTI_28 (GIC_SPI_START + 135)
+#define SPS_MTI_29 (GIC_SPI_START + 136)
+#define SPS_MTI_30 (GIC_SPI_START + 137)
+#define SPS_MTI_31 (GIC_SPI_START + 138)
+#define CSIPHY_4LN_IRQ (GIC_SPI_START + 139)
+#define CSIPHY_2LN_IRQ (GIC_SPI_START + 140)
+#define USB2_IRQ (GIC_SPI_START + 141)
+#define USB1_IRQ (GIC_SPI_START + 142)
+#define TSSC_SSBI_IRQ (GIC_SPI_START + 143)
+#define TSSC_SAMPLE_IRQ (GIC_SPI_START + 144)
+#define TSSC_PENUP_IRQ (GIC_SPI_START + 145)
+#define GSBI1_UARTDM_IRQ (GIC_SPI_START + 146)
+#define GSBI1_QUP_IRQ (GIC_SPI_START + 147)
+#define GSBI2_UARTDM_IRQ (GIC_SPI_START + 148)
+#define GSBI2_QUP_IRQ (GIC_SPI_START + 149)
+#define GSBI3_UARTDM_IRQ (GIC_SPI_START + 150)
+#define GSBI3_QUP_IRQ (GIC_SPI_START + 151)
+#define GSBI4_UARTDM_IRQ (GIC_SPI_START + 152)
+#define GSBI4_QUP_IRQ (GIC_SPI_START + 153)
+#define GSBI5_UARTDM_IRQ (GIC_SPI_START + 154)
+#define GSBI5_QUP_IRQ (GIC_SPI_START + 155)
+#define GSBI6_UARTDM_IRQ (GIC_SPI_START + 156)
+#define GSBI6_QUP_IRQ (GIC_SPI_START + 157)
+#define GSBI7_UARTDM_IRQ (GIC_SPI_START + 158)
+#define GSBI7_QUP_IRQ (GIC_SPI_START + 159)
+#define GSBI8_UARTDM_IRQ (GIC_SPI_START + 160)
+#define GSBI8_QUP_IRQ (GIC_SPI_START + 161)
+#define TSIF_TSPP_IRQ (GIC_SPI_START + 162)
+#define TSIF_BAM_IRQ (GIC_SPI_START + 163)
+#define TSIF2_IRQ (GIC_SPI_START + 164)
+#define TSIF1_IRQ (GIC_SPI_START + 165)
+#define DSI2_IRQ (GIC_SPI_START + 166)
+#define ISPIF_IRQ (GIC_SPI_START + 167)
+#define MSMC_SC_SEC_TMR_IRQ (GIC_SPI_START + 168)
+#define MSMC_SC_SEC_WDOG_BARK_IRQ (GIC_SPI_START + 169)
+#define INT_ADM0_SCSS_0_IRQ (GIC_SPI_START + 170)
+#define INT_ADM0_SCSS_1_IRQ (GIC_SPI_START + 171)
+#define INT_ADM0_SCSS_2_IRQ (GIC_SPI_START + 172)
+#define INT_ADM0_SCSS_3_IRQ (GIC_SPI_START + 173)
+#define CC_SCSS_WDT1CPU1BITEEXPIRED (GIC_SPI_START + 174)
+#define CC_SCSS_WDT1CPU0BITEEXPIRED (GIC_SPI_START + 175)
+#define CC_SCSS_WDT0CPU1BITEEXPIRED (GIC_SPI_START + 176)
+#define CC_SCSS_WDT0CPU0BITEEXPIRED (GIC_SPI_START + 177)
+#define TSENS_UPPER_LOWER_INT (GIC_SPI_START + 178)
+#define SSBI2_2_SC_CPU1_SECURE_INT (GIC_SPI_START + 179)
+#define SSBI2_2_SC_CPU1_NON_SECURE_INT (GIC_SPI_START + 180)
+#define SSBI2_1_SC_CPU1_SECURE_INT (GIC_SPI_START + 181)
+#define SSBI2_1_SC_CPU1_NON_SECURE_INT (GIC_SPI_START + 182)
+#define XPU_SUMMARY_IRQ (GIC_SPI_START + 183)
+#define BUS_EXCEPTION_SUMMARY_IRQ (GIC_SPI_START + 184)
+#define HSDDRX_EBI1CH0_IRQ (GIC_SPI_START + 185)
+#define HSDDRX_EBI1CH1_IRQ (GIC_SPI_START + 186)
+#define SDC5_BAM_IRQ (GIC_SPI_START + 187)
+#define SDC5_IRQ_0 (GIC_SPI_START + 188)
+#define GSBI9_UARTDM_IRQ (GIC_SPI_START + 189)
+#define GSBI9_QUP_IRQ (GIC_SPI_START + 190)
+#define GSBI10_UARTDM_IRQ (GIC_SPI_START + 191)
+#define GSBI10_QUP_IRQ (GIC_SPI_START + 192)
+#define GSBI11_UARTDM_IRQ (GIC_SPI_START + 193)
+#define GSBI11_QUP_IRQ (GIC_SPI_START + 194)
+#define GSBI12_UARTDM_IRQ (GIC_SPI_START + 195)
+#define GSBI12_QUP_IRQ (GIC_SPI_START + 196)
+#define RIVA_APSS_LTECOEX_IRQ (GIC_SPI_START + 197)
+#define RIVA_APSS_SPARE_IRQ (GIC_SPI_START + 198)
+#define RIVA_APSS_WDOG_BITE_RESET_RDY_IRQ (GIC_SPI_START + 199)
+#define RIVA_ASS_RESET_DONE_IRQ (GIC_SPI_START + 200)
+#define RIVA_APSS_ASIC_IRQ (GIC_SPI_START + 201)
+#define RIVA_APPS_WLAN_RX_DATA_AVAIL_IRQ (GIC_SPI_START + 202)
+#define RIVA_APPS_WLAN_DATA_XFER_DONE_IRQ (GIC_SPI_START + 203)
+#define RIVA_APPS_WLAM_SMSM_IRQ (GIC_SPI_START + 204)
+#define RIVA_APPS_LOG_CTRL_IRQ (GIC_SPI_START + 205)
+#define RIVA_APPS_FM_CTRL_IRQ (GIC_SPI_START + 206)
+#define RIVA_APPS_HCI_IRQ (GIC_SPI_START + 207)
+#define RIVA_APPS_WLAN_CTRL_IRQ (GIC_SPI_START + 208)
+#define A2_BAM_IRQ (GIC_SPI_START + 209)
+#define SMMU_GFX2D1_CB_SC_SECURE_IRQ (GIC_SPI_START + 210)
+#define SMMU_GFX2D1_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 211)
+#define GFX2D1_IRQ (GIC_SPI_START + 212)
+#define PPSS_WDOG_TIMER_IRQ (GIC_SPI_START + 213)
+#define SPS_SLIMBUS_CORE_EE0_IRQ (GIC_SPI_START + 214)
+#define SPS_SLIMBUS_BAM_EE0_IRQ (GIC_SPI_START + 215)
+#define QDSS_ETB_IRQ (GIC_SPI_START + 216)
+#define QDSS_CTI2KPSS_CPU1_IRQ (GIC_SPI_START + 217)
+#define QDSS_CTI2KPSS_CPU0_IRQ (GIC_SPI_START + 218)
+#define TLMM_APCC_DIR_CONN_IRQ_16 (GIC_SPI_START + 219)
+#define TLMM_APCC_DIR_CONN_IRQ_17 (GIC_SPI_START + 220)
+#define TLMM_APCC_DIR_CONN_IRQ_18 (GIC_SPI_START + 221)
+#define TLMM_APCC_DIR_CONN_IRQ_19 (GIC_SPI_START + 222)
+#define TLMM_APCC_DIR_CONN_IRQ_20 (GIC_SPI_START + 223)
+#define TLMM_APCC_DIR_CONN_IRQ_21 (GIC_SPI_START + 224)
+#define PM8921_SEC_IRQ_104 (GIC_SPI_START + 225)
+#define PM8018_SEC_IRQ_107 (GIC_SPI_START + 226)
+
+/* For now, use the maximum number of interrupts until a pending GIC issue
+ * is sorted out */
+#define NR_MSM_IRQS 1020
+#define NR_BOARD_IRQS 0
+#define NR_GPIO_IRQS 0
+
+#endif
+
diff --git a/arch/arm/mach-msm/include/mach/irqs-8x50.h b/arch/arm/mach-msm/include/mach/irqs-8x50.h
index de3d8fe24e4e..26adbe0e9406 100644
--- a/arch/arm/mach-msm/include/mach/irqs-8x50.h
+++ b/arch/arm/mach-msm/include/mach/irqs-8x50.h
@@ -1,30 +1,13 @@
/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of Code Aurora Forum, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
- * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
*
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
#ifndef __ASM_ARCH_MSM_IRQS_8XXX_H
diff --git a/arch/arm/mach-msm/include/mach/irqs.h b/arch/arm/mach-msm/include/mach/irqs.h
index 8679a4564744..3cd78b165abb 100644
--- a/arch/arm/mach-msm/include/mach/irqs.h
+++ b/arch/arm/mach-msm/include/mach/irqs.h
@@ -26,6 +26,9 @@
#include "sirc.h"
#elif defined(CONFIG_ARCH_MSM8X60)
#include "irqs-8x60.h"
+#elif defined(CONFIG_ARCH_MSM8960)
+/* TODO: Make these not generic. */
+#include "irqs-8960.h"
#elif defined(CONFIG_ARCH_MSM_ARM11)
#include "irqs-7x00.h"
#else
diff --git a/arch/arm/mach-msm/include/mach/memory.h b/arch/arm/mach-msm/include/mach/memory.h
index 070e17d237f1..f2f8d299ba95 100644
--- a/arch/arm/mach-msm/include/mach/memory.h
+++ b/arch/arm/mach-msm/include/mach/memory.h
@@ -18,15 +18,17 @@
/* physical offset of RAM */
#if defined(CONFIG_ARCH_QSD8X50) && defined(CONFIG_MSM_SOC_REV_A)
-#define PHYS_OFFSET UL(0x00000000)
+#define PLAT_PHYS_OFFSET UL(0x00000000)
#elif defined(CONFIG_ARCH_QSD8X50)
-#define PHYS_OFFSET UL(0x20000000)
+#define PLAT_PHYS_OFFSET UL(0x20000000)
#elif defined(CONFIG_ARCH_MSM7X30)
-#define PHYS_OFFSET UL(0x00200000)
+#define PLAT_PHYS_OFFSET UL(0x00200000)
#elif defined(CONFIG_ARCH_MSM8X60)
-#define PHYS_OFFSET UL(0x40200000)
+#define PLAT_PHYS_OFFSET UL(0x40200000)
+#elif defined(CONFIG_ARCH_MSM8960)
+#define PLAT_PHYS_OFFSET UL(0x40200000)
#else
-#define PHYS_OFFSET UL(0x10000000)
+#define PLAT_PHYS_OFFSET UL(0x10000000)
#endif
#endif
diff --git a/arch/arm/mach-msm/include/mach/mmc.h b/arch/arm/mach-msm/include/mach/mmc.h
index d54b6b086cff..5631b51cec46 100644
--- a/arch/arm/mach-msm/include/mach/mmc.h
+++ b/arch/arm/mach-msm/include/mach/mmc.h
@@ -15,12 +15,23 @@ struct embedded_sdio_data {
int num_funcs;
};
+struct msm_mmc_gpio {
+ unsigned no;
+ const char *name;
+};
+
+struct msm_mmc_gpio_data {
+ struct msm_mmc_gpio *gpio;
+ u8 size;
+};
+
struct msm_mmc_platform_data {
unsigned int ocr_mask; /* available voltages */
u32 (*translate_vdd)(struct device *, unsigned int);
unsigned int (*status)(struct device *);
struct embedded_sdio_data *embedded_sdio;
int (*register_status_notify)(void (*callback)(int card_present, void *dev_id), void *dev_id);
+ struct msm_mmc_gpio_data *gpio_data;
};
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-7x00.h b/arch/arm/mach-msm/include/mach/msm_iomap-7x00.h
index cfff0e74f128..8f99d97615a0 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-7x00.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-7x00.h
@@ -1,6 +1,7 @@
/* arch/arm/mach-msm/include/mach/msm_iomap.h
*
* Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
@@ -47,13 +48,8 @@
#define MSM_VIC_PHYS 0xC0000000
#define MSM_VIC_SIZE SZ_4K
-#define MSM_CSR_BASE IOMEM(0xE0001000)
-#define MSM_CSR_PHYS 0xC0100000
-#define MSM_CSR_SIZE SZ_4K
-
-#define MSM_GPT_PHYS MSM_CSR_PHYS
-#define MSM_GPT_BASE MSM_CSR_BASE
-#define MSM_GPT_SIZE SZ_4K
+#define MSM7X00_CSR_PHYS 0xC0100000
+#define MSM7X00_CSR_SIZE SZ_4K
#define MSM_DMOV_BASE IOMEM(0xE0002000)
#define MSM_DMOV_PHYS 0xA9700000
@@ -130,10 +126,4 @@
#define MSM_AD5_SIZE (SZ_1M*13)
-#if defined(CONFIG_ARCH_MSM7X30)
-#define MSM_GCC_BASE IOMEM(0xF8009000)
-#define MSM_GCC_PHYS 0xC0182000
-#define MSM_GCC_SIZE SZ_4K
-#endif
-
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h b/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h
index 0fd7b68ca114..4d84be15955e 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2008-2011 Code Aurora Forum. All rights reserved.
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
@@ -39,16 +39,8 @@
#define MSM_VIC_PHYS 0xC0080000
#define MSM_VIC_SIZE SZ_4K
-#define MSM_CSR_BASE IOMEM(0xE0001000)
-#define MSM_CSR_PHYS 0xC0100000
-#define MSM_CSR_SIZE SZ_4K
-
-#define MSM_TMR_PHYS MSM_CSR_PHYS
-#define MSM_TMR_BASE MSM_CSR_BASE
-#define MSM_TMR_SIZE SZ_4K
-
-#define MSM_GPT_BASE (MSM_TMR_BASE + 0x4)
-#define MSM_DGT_BASE (MSM_TMR_BASE + 0x24)
+#define MSM7X30_CSR_PHYS 0xC0100000
+#define MSM7X30_CSR_SIZE SZ_4K
#define MSM_DMOV_BASE IOMEM(0xE0002000)
#define MSM_DMOV_PHYS 0xAC400000
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-8960.h b/arch/arm/mach-msm/include/mach/msm_iomap-8960.h
new file mode 100644
index 000000000000..3c9d9602a318
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-8960.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * The MSM peripherals are spread all over across 768MB of physical
+ * space, which makes just having a simple IO_ADDRESS macro to slide
+ * them into the right virtual location rough. Instead, we will
+ * provide a master phys->virt mapping for peripherals here.
+ *
+ */
+
+#ifndef __ASM_ARCH_MSM_IOMAP_8960_H
+#define __ASM_ARCH_MSM_IOMAP_8960_H
+
+/* Physical base address and size of peripherals.
+ * Ordered by the virtual base addresses they will be mapped at.
+ *
+ * If you add or remove entries here, you'll want to edit the
+ * msm_io_desc array in arch/arm/mach-msm/io.c to reflect your
+ * changes.
+ *
+ */
+
+
+#define MSM8960_QGIC_DIST_PHYS 0x02000000
+#define MSM8960_QGIC_DIST_SIZE SZ_4K
+
+#define MSM8960_QGIC_CPU_PHYS 0x02002000
+#define MSM8960_QGIC_CPU_SIZE SZ_4K
+
+#define MSM8960_TMR_PHYS 0x0200A000
+#define MSM8960_TMR_SIZE SZ_4K
+
+#define MSM8960_TMR0_PHYS 0x0208A000
+#define MSM8960_TMR0_SIZE SZ_4K
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-8x50.h b/arch/arm/mach-msm/include/mach/msm_iomap-8x50.h
index acc819eb76e5..d4143201999f 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-8x50.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-8x50.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2008-2011 Code Aurora Forum. All rights reserved.
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
@@ -39,16 +39,8 @@
#define MSM_VIC_PHYS 0xAC000000
#define MSM_VIC_SIZE SZ_4K
-#define MSM_CSR_BASE IOMEM(0xE0001000)
-#define MSM_CSR_PHYS 0xAC100000
-#define MSM_CSR_SIZE SZ_4K
-
-#define MSM_TMR_PHYS MSM_CSR_PHYS
-#define MSM_TMR_BASE MSM_CSR_BASE
-#define MSM_TMR_SIZE SZ_4K
-
-#define MSM_GPT_BASE MSM_TMR_BASE
-#define MSM_DGT_BASE (MSM_TMR_BASE + 0x10)
+#define QSD8X50_CSR_PHYS 0xAC100000
+#define QSD8X50_CSR_SIZE SZ_4K
#define MSM_DMOV_BASE IOMEM(0xE0002000)
#define MSM_DMOV_PHYS 0xA9700000
@@ -132,16 +124,16 @@
#define MSM_UART2DM_PHYS 0xA0900000
-#define MSM_SDC1_PHYS 0xA0400000
+#define MSM_SDC1_PHYS 0xA0300000
#define MSM_SDC1_SIZE SZ_4K
-#define MSM_SDC2_PHYS 0xA0500000
+#define MSM_SDC2_PHYS 0xA0400000
#define MSM_SDC2_SIZE SZ_4K
-#define MSM_SDC3_PHYS 0xA0600000
+#define MSM_SDC3_PHYS 0xA0500000
#define MSM_SDC3_SIZE SZ_4K
-#define MSM_SDC4_PHYS 0xA0700000
+#define MSM_SDC4_PHYS 0xA0600000
#define MSM_SDC4_SIZE SZ_4K
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-8x60.h b/arch/arm/mach-msm/include/mach/msm_iomap-8x60.h
index a54e33b0882e..3b19b8f244b8 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-8x60.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-8x60.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
@@ -35,13 +35,11 @@
*
*/
-#define MSM_QGIC_DIST_BASE IOMEM(0xF0000000)
-#define MSM_QGIC_DIST_PHYS 0x02080000
-#define MSM_QGIC_DIST_SIZE SZ_4K
+#define MSM8X60_QGIC_DIST_PHYS 0x02080000
+#define MSM8X60_QGIC_DIST_SIZE SZ_4K
-#define MSM_QGIC_CPU_BASE IOMEM(0xF0001000)
-#define MSM_QGIC_CPU_PHYS 0x02081000
-#define MSM_QGIC_CPU_SIZE SZ_4K
+#define MSM8X60_QGIC_CPU_PHYS 0x02081000
+#define MSM8X60_QGIC_CPU_SIZE SZ_4K
#define MSM_ACC_BASE IOMEM(0xF0002000)
#define MSM_ACC_PHYS 0x02001000
@@ -58,51 +56,10 @@
#define MSM_SHARED_RAM_BASE IOMEM(0xF0100000)
#define MSM_SHARED_RAM_SIZE SZ_1M
-#define MSM_TMR_BASE IOMEM(0xF0200000)
-#define MSM_TMR_PHYS 0x02000000
-#define MSM_TMR_SIZE SZ_4K
+#define MSM8X60_TMR_PHYS 0x02000000
+#define MSM8X60_TMR_SIZE SZ_4K
-#define MSM_TMR0_BASE IOMEM(0xF0201000)
-#define MSM_TMR0_PHYS 0x02040000
-#define MSM_TMR0_SIZE SZ_4K
-
-#define MSM_GPT_BASE (MSM_TMR_BASE + 0x4)
-#define MSM_DGT_BASE (MSM_TMR_BASE + 0x24)
-
-#define MSM_IOMMU_JPEGD_PHYS 0x07300000
-#define MSM_IOMMU_JPEGD_SIZE SZ_1M
-
-#define MSM_IOMMU_VPE_PHYS 0x07400000
-#define MSM_IOMMU_VPE_SIZE SZ_1M
-
-#define MSM_IOMMU_MDP0_PHYS 0x07500000
-#define MSM_IOMMU_MDP0_SIZE SZ_1M
-
-#define MSM_IOMMU_MDP1_PHYS 0x07600000
-#define MSM_IOMMU_MDP1_SIZE SZ_1M
-
-#define MSM_IOMMU_ROT_PHYS 0x07700000
-#define MSM_IOMMU_ROT_SIZE SZ_1M
-
-#define MSM_IOMMU_IJPEG_PHYS 0x07800000
-#define MSM_IOMMU_IJPEG_SIZE SZ_1M
-
-#define MSM_IOMMU_VFE_PHYS 0x07900000
-#define MSM_IOMMU_VFE_SIZE SZ_1M
-
-#define MSM_IOMMU_VCODEC_A_PHYS 0x07A00000
-#define MSM_IOMMU_VCODEC_A_SIZE SZ_1M
-
-#define MSM_IOMMU_VCODEC_B_PHYS 0x07B00000
-#define MSM_IOMMU_VCODEC_B_SIZE SZ_1M
-
-#define MSM_IOMMU_GFX3D_PHYS 0x07C00000
-#define MSM_IOMMU_GFX3D_SIZE SZ_1M
-
-#define MSM_IOMMU_GFX2D0_PHYS 0x07D00000
-#define MSM_IOMMU_GFX2D0_SIZE SZ_1M
-
-#define MSM_IOMMU_GFX2D1_PHYS 0x07E00000
-#define MSM_IOMMU_GFX2D1_SIZE SZ_1M
+#define MSM8X60_TMR0_PHYS 0x02040000
+#define MSM8X60_TMR0_SIZE SZ_4K
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap.h b/arch/arm/mach-msm/include/mach/msm_iomap.h
index 8e24dd812139..c98c7591f3b8 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
@@ -53,6 +53,13 @@
#include "msm_iomap-7x00.h"
#endif
+#include "msm_iomap-8960.h"
+/* Virtual addressses shared across all MSM targets. */
+#define MSM_CSR_BASE IOMEM(0xE0001000)
+#define MSM_QGIC_DIST_BASE IOMEM(0xF0000000)
+#define MSM_QGIC_CPU_BASE IOMEM(0xF0001000)
+#define MSM_TMR_BASE IOMEM(0xF0200000)
+#define MSM_TMR0_BASE IOMEM(0xF0201000)
#endif
diff --git a/arch/arm/mach-msm/include/mach/sirc.h b/arch/arm/mach-msm/include/mach/sirc.h
index 7281337ee28d..ef55868a5b8a 100644
--- a/arch/arm/mach-msm/include/mach/sirc.h
+++ b/arch/arm/mach-msm/include/mach/sirc.h
@@ -1,30 +1,13 @@
/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of Code Aurora Forum, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
- * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
*
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
#ifndef __ASM_ARCH_MSM_SIRC_H
diff --git a/arch/arm/mach-msm/include/mach/smp.h b/arch/arm/mach-msm/include/mach/smp.h
index a95f7b9efe31..3c01000ecc80 100644
--- a/arch/arm/mach-msm/include/mach/smp.h
+++ b/arch/arm/mach-msm/include/mach/smp.h
@@ -1,29 +1,13 @@
/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Code Aurora nor
- * the names of its contributors may be used to endorse or promote
- * products derived from this software without specific prior written
- * permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
*
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
#ifndef __ASM_ARCH_MSM_SMP_H
diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c
index 1260007a9dd1..cec6ed1c91d3 100644
--- a/arch/arm/mach-msm/io.c
+++ b/arch/arm/mach-msm/io.c
@@ -3,7 +3,7 @@
* MSM7K, QSD io support
*
* Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
@@ -28,19 +28,20 @@
#include <mach/board.h>
-#define MSM_DEVICE(name) { \
+#define MSM_CHIP_DEVICE(name, chip) { \
.virtual = (unsigned long) MSM_##name##_BASE, \
- .pfn = __phys_to_pfn(MSM_##name##_PHYS), \
- .length = MSM_##name##_SIZE, \
+ .pfn = __phys_to_pfn(chip##_##name##_PHYS), \
+ .length = chip##_##name##_SIZE, \
.type = MT_DEVICE_NONSHARED, \
}
+#define MSM_DEVICE(name) MSM_CHIP_DEVICE(name, MSM)
+
#if defined(CONFIG_ARCH_MSM7X00A) || defined(CONFIG_ARCH_MSM7X27) \
|| defined(CONFIG_ARCH_MSM7X25)
static struct map_desc msm_io_desc[] __initdata = {
MSM_DEVICE(VIC),
- MSM_DEVICE(CSR),
- MSM_DEVICE(GPT),
+ MSM_CHIP_DEVICE(CSR, MSM7X00),
MSM_DEVICE(DMOV),
MSM_DEVICE(GPIO1),
MSM_DEVICE(GPIO2),
@@ -73,8 +74,7 @@ void __init msm_map_common_io(void)
#ifdef CONFIG_ARCH_QSD8X50
static struct map_desc qsd8x50_io_desc[] __initdata = {
MSM_DEVICE(VIC),
- MSM_DEVICE(CSR),
- MSM_DEVICE(TMR),
+ MSM_CHIP_DEVICE(CSR, QSD8X50),
MSM_DEVICE(DMOV),
MSM_DEVICE(GPIO1),
MSM_DEVICE(GPIO2),
@@ -102,10 +102,10 @@ void __init msm_map_qsd8x50_io(void)
#ifdef CONFIG_ARCH_MSM8X60
static struct map_desc msm8x60_io_desc[] __initdata = {
- MSM_DEVICE(QGIC_DIST),
- MSM_DEVICE(QGIC_CPU),
- MSM_DEVICE(TMR),
- MSM_DEVICE(TMR0),
+ MSM_CHIP_DEVICE(QGIC_DIST, MSM8X60),
+ MSM_CHIP_DEVICE(QGIC_CPU, MSM8X60),
+ MSM_CHIP_DEVICE(TMR, MSM8X60),
+ MSM_CHIP_DEVICE(TMR0, MSM8X60),
MSM_DEVICE(ACC),
MSM_DEVICE(GCC),
};
@@ -116,11 +116,24 @@ void __init msm_map_msm8x60_io(void)
}
#endif /* CONFIG_ARCH_MSM8X60 */
+#ifdef CONFIG_ARCH_MSM8960
+static struct map_desc msm8960_io_desc[] __initdata = {
+ MSM_CHIP_DEVICE(QGIC_DIST, MSM8960),
+ MSM_CHIP_DEVICE(QGIC_CPU, MSM8960),
+ MSM_CHIP_DEVICE(TMR, MSM8960),
+ MSM_CHIP_DEVICE(TMR0, MSM8960),
+};
+
+void __init msm_map_msm8960_io(void)
+{
+ iotable_init(msm8960_io_desc, ARRAY_SIZE(msm8960_io_desc));
+}
+#endif /* CONFIG_ARCH_MSM8960 */
+
#ifdef CONFIG_ARCH_MSM7X30
static struct map_desc msm7x30_io_desc[] __initdata = {
MSM_DEVICE(VIC),
- MSM_DEVICE(CSR),
- MSM_DEVICE(TMR),
+ MSM_CHIP_DEVICE(CSR, MSM7X30),
MSM_DEVICE(DMOV),
MSM_DEVICE(GPIO1),
MSM_DEVICE(GPIO2),
diff --git a/arch/arm/mach-msm/scm-boot.h b/arch/arm/mach-msm/scm-boot.h
index 68f9b6153d74..7be32ff5d687 100644
--- a/arch/arm/mach-msm/scm-boot.h
+++ b/arch/arm/mach-msm/scm-boot.h
@@ -1,29 +1,13 @@
/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of Code Aurora Forum, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
*
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
- * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
#ifndef __MACH_SCM_BOOT_H
#define __MACH_SCM_BOOT_H
diff --git a/arch/arm/mach-msm/scm.h b/arch/arm/mach-msm/scm.h
index 261786be11c5..00b31ea58f29 100644
--- a/arch/arm/mach-msm/scm.h
+++ b/arch/arm/mach-msm/scm.h
@@ -1,29 +1,13 @@
/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of Code Aurora Forum, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
*
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
- * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
#ifndef __MACH_SCM_H
#define __MACH_SCM_H
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index c105d28b53e3..56f920c55b6a 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -24,10 +24,7 @@
#include <asm/mach/time.h>
#include <mach/msm_iomap.h>
-
-#ifndef MSM_DGT_BASE
-#define MSM_DGT_BASE (MSM_GPT_BASE + 0x10)
-#endif
+#include <mach/cpu.h>
#define TIMER_MATCH_VAL 0x0000
#define TIMER_COUNT_VAL 0x0004
@@ -52,18 +49,14 @@ enum timer_location {
GLOBAL_TIMER = 1,
};
-#ifdef MSM_TMR0_BASE
-#define MSM_TMR_GLOBAL (MSM_TMR0_BASE - MSM_TMR_BASE)
-#else
-#define MSM_TMR_GLOBAL 0
-#endif
-
#define MSM_GLOBAL_TIMER MSM_CLOCK_DGT
+/* TODO: Remove these ifdefs */
#if defined(CONFIG_ARCH_QSD8X50)
#define DGT_HZ (19200000 / 4) /* 19.2 MHz / 4 by default */
#define MSM_DGT_SHIFT (0)
-#elif defined(CONFIG_ARCH_MSM7X30) || defined(CONFIG_ARCH_MSM8X60)
+#elif defined(CONFIG_ARCH_MSM7X30) || defined(CONFIG_ARCH_MSM8X60) || \
+ defined(CONFIG_ARCH_MSM8960)
#define DGT_HZ (24576000 / 4) /* 24.576 MHz (LPXO) / 4 by default */
#define MSM_DGT_SHIFT (0)
#else
@@ -177,11 +170,7 @@ static struct msm_clock msm_clocks[] = {
.dev_id = &msm_clocks[0].clockevent,
.irq = INT_GP_TIMER_EXP
},
- .regbase = MSM_GPT_BASE,
.freq = GPT_HZ,
- .local_counter = MSM_GPT_BASE + TIMER_COUNT_VAL,
- .global_counter = MSM_GPT_BASE + TIMER_COUNT_VAL +
- MSM_TMR_GLOBAL,
},
[MSM_CLOCK_DGT] = {
.clockevent = {
@@ -206,12 +195,8 @@ static struct msm_clock msm_clocks[] = {
.dev_id = &msm_clocks[1].clockevent,
.irq = INT_DEBUG_TIMER_EXP
},
- .regbase = MSM_DGT_BASE,
.freq = DGT_HZ >> MSM_DGT_SHIFT,
.shift = MSM_DGT_SHIFT,
- .local_counter = MSM_DGT_BASE + TIMER_COUNT_VAL,
- .global_counter = MSM_DGT_BASE + TIMER_COUNT_VAL +
- MSM_TMR_GLOBAL,
}
};
@@ -219,6 +204,25 @@ static void __init msm_timer_init(void)
{
int i;
int res;
+ int global_offset = 0;
+
+ if (cpu_is_msm7x01()) {
+ msm_clocks[MSM_CLOCK_GPT].regbase = MSM_CSR_BASE;
+ msm_clocks[MSM_CLOCK_DGT].regbase = MSM_CSR_BASE + 0x10;
+ } else if (cpu_is_msm7x30()) {
+ msm_clocks[MSM_CLOCK_GPT].regbase = MSM_CSR_BASE + 0x04;
+ msm_clocks[MSM_CLOCK_DGT].regbase = MSM_CSR_BASE + 0x24;
+ } else if (cpu_is_qsd8x50()) {
+ msm_clocks[MSM_CLOCK_GPT].regbase = MSM_CSR_BASE;
+ msm_clocks[MSM_CLOCK_DGT].regbase = MSM_CSR_BASE + 0x10;
+ } else if (cpu_is_msm8x60() || cpu_is_msm8960()) {
+ msm_clocks[MSM_CLOCK_GPT].regbase = MSM_TMR_BASE + 0x04;
+ msm_clocks[MSM_CLOCK_DGT].regbase = MSM_TMR_BASE + 0x24;
+
+ /* Use CPU0's timer as the global timer. */
+ global_offset = MSM_TMR0_BASE - MSM_TMR_BASE;
+ } else
+ BUG();
#ifdef CONFIG_ARCH_MSM_SCORPIONMP
writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL);
@@ -228,6 +232,10 @@ static void __init msm_timer_init(void)
struct msm_clock *clock = &msm_clocks[i];
struct clock_event_device *ce = &clock->clockevent;
struct clocksource *cs = &clock->clocksource;
+
+ clock->local_counter = clock->regbase + TIMER_COUNT_VAL;
+ clock->global_counter = clock->local_counter + global_offset;
+
writel(0, clock->regbase + TIMER_ENABLE);
writel(0, clock->regbase + TIMER_CLEAR);
writel(~0, clock->regbase + TIMER_MATCH_VAL);
@@ -255,7 +263,7 @@ static void __init msm_timer_init(void)
}
#ifdef CONFIG_SMP
-void __cpuinit local_timer_setup(struct clock_event_device *evt)
+int __cpuinit local_timer_setup(struct clock_event_device *evt)
{
struct msm_clock *clock = &msm_clocks[MSM_GLOBAL_TIMER];
@@ -287,6 +295,7 @@ void __cpuinit local_timer_setup(struct clock_event_device *evt)
gic_enable_ppi(clock->irq.irq);
clockevents_register_device(evt);
+ return 0;
}
inline int local_timer_ack(void)
diff --git a/arch/arm/mach-mv78xx0/include/mach/memory.h b/arch/arm/mach-mv78xx0/include/mach/memory.h
index e663042d307f..a648c51f2e42 100644
--- a/arch/arm/mach-mv78xx0/include/mach/memory.h
+++ b/arch/arm/mach-mv78xx0/include/mach/memory.h
@@ -5,6 +5,6 @@
#ifndef __ASM_ARCH_MEMORY_H
#define __ASM_ARCH_MEMORY_H
-#define PHYS_OFFSET UL(0x00000000)
+#define PLAT_PHYS_OFFSET UL(0x00000000)
#endif
diff --git a/arch/arm/mach-mx3/Kconfig b/arch/arm/mach-mx3/Kconfig
index 0717f887cba0..340809a7d233 100644
--- a/arch/arm/mach-mx3/Kconfig
+++ b/arch/arm/mach-mx3/Kconfig
@@ -94,6 +94,7 @@ config MACH_MX31_3DS
select MXC_DEBUG_BOARD
select IMX_HAVE_PLATFORM_FSL_USB2_UDC
select IMX_HAVE_PLATFORM_IMX2_WDT
+ select IMX_HAVE_PLATFORM_IMX_I2C
select IMX_HAVE_PLATFORM_IMX_KEYPAD
select IMX_HAVE_PLATFORM_IMX_UART
select IMX_HAVE_PLATFORM_MXC_EHCI
@@ -183,6 +184,7 @@ config MACH_MX35_3DS
select MXC_DEBUG_BOARD
select IMX_HAVE_PLATFORM_FSL_USB2_UDC
select IMX_HAVE_PLATFORM_IMX2_WDT
+ select IMX_HAVE_PLATFORM_IMX_I2C
select IMX_HAVE_PLATFORM_IMX_UART
select IMX_HAVE_PLATFORM_MXC_EHCI
select IMX_HAVE_PLATFORM_MXC_NAND
@@ -199,6 +201,15 @@ config MACH_KZM_ARM11_01
Include support for KZM-ARM11-01. This includes specific
configurations for the board and its peripherals.
+config MACH_BUG
+ bool "Support Buglabs BUGBase platform"
+ select SOC_IMX31
+ select IMX_HAVE_PLATFORM_IMX_UART
+ default y
+ help
+ Include support for BUGBase 1.3 platform. This includes specific
+ configurations for the board and its peripherals.
+
config MACH_EUKREA_CPUIMX35
bool "Support Eukrea CPUIMX35 Platform"
select SOC_IMX35
@@ -229,4 +240,18 @@ config MACH_EUKREA_MBIMXSD35_BASEBOARD
endchoice
+config MACH_VPR200
+ bool "Support VPR200 platform"
+ select SOC_IMX35
+ select IMX_HAVE_PLATFORM_FSL_USB2_UDC
+ select IMX_HAVE_PLATFORM_IMX2_WDT
+ select IMX_HAVE_PLATFORM_IMX_UART
+ select IMX_HAVE_PLATFORM_IMX_I2C
+ select IMX_HAVE_PLATFORM_MXC_EHCI
+ select IMX_HAVE_PLATFORM_MXC_NAND
+ select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+ help
+ Include support for VPR200 platform. This includes specific
+ configurations for the board and its peripherals.
+
endif
diff --git a/arch/arm/mach-mx3/Makefile b/arch/arm/mach-mx3/Makefile
index 8db13294ad27..a54faf2cf5fa 100644
--- a/arch/arm/mach-mx3/Makefile
+++ b/arch/arm/mach-mx3/Makefile
@@ -5,8 +5,8 @@
# Object file lists.
obj-y := mm.o devices.o cpu.o
-obj-$(CONFIG_SOC_IMX31) += clock-imx31.o iomux-imx31.o
-obj-$(CONFIG_SOC_IMX35) += clock-imx35.o
+obj-$(CONFIG_SOC_IMX31) += clock-imx31.o iomux-imx31.o ehci-imx31.o
+obj-$(CONFIG_SOC_IMX35) += clock-imx35.o ehci-imx35.o
obj-$(CONFIG_MACH_MX31ADS) += mach-mx31ads.o
obj-$(CONFIG_MACH_MX31LILLY) += mach-mx31lilly.o mx31lilly-db.o
obj-$(CONFIG_MACH_MX31LITE) += mach-mx31lite.o mx31lite-db.o
@@ -20,5 +20,7 @@ obj-$(CONFIG_MACH_PCM043) += mach-pcm043.o
obj-$(CONFIG_MACH_ARMADILLO5X0) += mach-armadillo5x0.o
obj-$(CONFIG_MACH_MX35_3DS) += mach-mx35_3ds.o
obj-$(CONFIG_MACH_KZM_ARM11_01) += mach-kzm_arm11_01.o
+obj-$(CONFIG_MACH_BUG) += mach-bug.o
obj-$(CONFIG_MACH_EUKREA_CPUIMX35) += mach-cpuimx35.o
obj-$(CONFIG_MACH_EUKREA_MBIMXSD35_BASEBOARD) += eukrea_mbimxsd-baseboard.o
+obj-$(CONFIG_MACH_VPR200) += mach-vpr200.o
diff --git a/arch/arm/mach-mx3/devices-imx35.h b/arch/arm/mach-mx3/devices-imx35.h
index 677b18aa7ae6..d545d86cc202 100644
--- a/arch/arm/mach-mx3/devices-imx35.h
+++ b/arch/arm/mach-mx3/devices-imx35.h
@@ -35,7 +35,7 @@ extern const struct imx_imx_i2c_data imx35_imx_i2c_data[] __initconst;
#define imx35_add_imx_i2c2(pdata) imx35_add_imx_i2c(2, pdata)
extern const struct imx_imx_keypad_data imx35_imx_keypad_data __initconst;
-#define imx31_add_imx_keypad(pdata) \
+#define imx35_add_imx_keypad(pdata) \
imx_add_imx_keypad(&imx35_imx_keypad_data, pdata)
extern const struct imx_imx_ssi_data imx35_imx_ssi_data[] __initconst;
diff --git a/arch/arm/mach-mx3/ehci-imx31.c b/arch/arm/mach-mx3/ehci-imx31.c
new file mode 100644
index 000000000000..314a983ac614
--- /dev/null
+++ b/arch/arm/mach-mx3/ehci-imx31.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
+ * Copyright (C) 2010 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+#include <mach/mxc_ehci.h>
+
+#define USBCTRL_OTGBASE_OFFSET 0x600
+
+#define MX31_OTG_SIC_SHIFT 29
+#define MX31_OTG_SIC_MASK (0x3 << MX31_OTG_SIC_SHIFT)
+#define MX31_OTG_PM_BIT (1 << 24)
+
+#define MX31_H2_SIC_SHIFT 21
+#define MX31_H2_SIC_MASK (0x3 << MX31_H2_SIC_SHIFT)
+#define MX31_H2_PM_BIT (1 << 16)
+#define MX31_H2_DT_BIT (1 << 5)
+
+#define MX31_H1_SIC_SHIFT 13
+#define MX31_H1_SIC_MASK (0x3 << MX31_H1_SIC_SHIFT)
+#define MX31_H1_PM_BIT (1 << 8)
+#define MX31_H1_DT_BIT (1 << 4)
+
+int mx31_initialize_usb_hw(int port, unsigned int flags)
+{
+ unsigned int v;
+
+ v = readl(MX31_IO_ADDRESS(MX31_USB_BASE_ADDR + USBCTRL_OTGBASE_OFFSET));
+
+ switch (port) {
+ case 0: /* OTG port */
+ v &= ~(MX31_OTG_SIC_MASK | MX31_OTG_PM_BIT);
+ v |= (flags & MXC_EHCI_INTERFACE_MASK) << MX31_OTG_SIC_SHIFT;
+
+ if (!(flags & MXC_EHCI_POWER_PINS_ENABLED))
+ v |= MX31_OTG_PM_BIT;
+
+ break;
+ case 1: /* H1 port */
+ v &= ~(MX31_H1_SIC_MASK | MX31_H1_PM_BIT | MX31_H1_DT_BIT);
+ v |= (flags & MXC_EHCI_INTERFACE_MASK) << MX31_H1_SIC_SHIFT;
+
+ if (!(flags & MXC_EHCI_POWER_PINS_ENABLED))
+ v |= MX31_H1_PM_BIT;
+
+ if (!(flags & MXC_EHCI_TTL_ENABLED))
+ v |= MX31_H1_DT_BIT;
+
+ break;
+ case 2: /* H2 port */
+ v &= ~(MX31_H2_SIC_MASK | MX31_H2_PM_BIT | MX31_H2_DT_BIT);
+ v |= (flags & MXC_EHCI_INTERFACE_MASK) << MX31_H2_SIC_SHIFT;
+
+ if (!(flags & MXC_EHCI_POWER_PINS_ENABLED))
+ v |= MX31_H2_PM_BIT;
+
+ if (!(flags & MXC_EHCI_TTL_ENABLED))
+ v |= MX31_H2_DT_BIT;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ writel(v, MX31_IO_ADDRESS(MX31_USB_BASE_ADDR + USBCTRL_OTGBASE_OFFSET));
+
+ return 0;
+}
+
diff --git a/arch/arm/mach-mx3/ehci-imx35.c b/arch/arm/mach-mx3/ehci-imx35.c
new file mode 100644
index 000000000000..33983a478c6b
--- /dev/null
+++ b/arch/arm/mach-mx3/ehci-imx35.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
+ * Copyright (C) 2010 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+#include <mach/mxc_ehci.h>
+
+#define USBCTRL_OTGBASE_OFFSET 0x600
+
+#define MX35_OTG_SIC_SHIFT 29
+#define MX35_OTG_SIC_MASK (0x3 << MX35_OTG_SIC_SHIFT)
+#define MX35_OTG_PM_BIT (1 << 24)
+
+#define MX35_H1_SIC_SHIFT 21
+#define MX35_H1_SIC_MASK (0x3 << MX35_H1_SIC_SHIFT)
+#define MX35_H1_PM_BIT (1 << 8)
+#define MX35_H1_IPPUE_UP_BIT (1 << 7)
+#define MX35_H1_IPPUE_DOWN_BIT (1 << 6)
+#define MX35_H1_TLL_BIT (1 << 5)
+#define MX35_H1_USBTE_BIT (1 << 4)
+
+int mx35_initialize_usb_hw(int port, unsigned int flags)
+{
+ unsigned int v;
+
+ v = readl(MX35_IO_ADDRESS(MX35_USB_BASE_ADDR + USBCTRL_OTGBASE_OFFSET));
+
+ switch (port) {
+ case 0: /* OTG port */
+ v &= ~(MX35_OTG_SIC_MASK | MX35_OTG_PM_BIT);
+ v |= (flags & MXC_EHCI_INTERFACE_MASK) << MX35_OTG_SIC_SHIFT;
+
+ if (!(flags & MXC_EHCI_POWER_PINS_ENABLED))
+ v |= MX35_OTG_PM_BIT;
+
+ break;
+ case 1: /* H1 port */
+ v &= ~(MX35_H1_SIC_MASK | MX35_H1_PM_BIT | MX35_H1_TLL_BIT |
+ MX35_H1_USBTE_BIT | MX35_H1_IPPUE_DOWN_BIT | MX35_H1_IPPUE_UP_BIT);
+ v |= (flags & MXC_EHCI_INTERFACE_MASK) << MX35_H1_SIC_SHIFT;
+
+ if (!(flags & MXC_EHCI_POWER_PINS_ENABLED))
+ v |= MX35_H1_PM_BIT;
+
+ if (!(flags & MXC_EHCI_TTL_ENABLED))
+ v |= MX35_H1_TLL_BIT;
+
+ if (flags & MXC_EHCI_INTERNAL_PHY)
+ v |= MX35_H1_USBTE_BIT;
+
+ if (flags & MXC_EHCI_IPPUE_DOWN)
+ v |= MX35_H1_IPPUE_DOWN_BIT;
+
+ if (flags & MXC_EHCI_IPPUE_UP)
+ v |= MX35_H1_IPPUE_UP_BIT;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ writel(v, MX35_IO_ADDRESS(MX35_USB_BASE_ADDR + USBCTRL_OTGBASE_OFFSET));
+
+ return 0;
+}
+
diff --git a/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c b/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c
index 14a5ffc939ad..80761474c0f8 100644
--- a/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c
+++ b/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c
@@ -165,8 +165,8 @@ static iomux_v3_cfg_t eukrea_mbimxsd_pads[] = {
MX35_PAD_SD1_DATA3__ESDHC1_DAT3,
};
-#define GPIO_LED1 (2 * 32 + 29)
-#define GPIO_SWITCH1 (2 * 32 + 25)
+#define GPIO_LED1 IMX_GPIO_NR(3, 29)
+#define GPIO_SWITCH1 IMX_GPIO_NR(3, 25)
#define GPIO_LCDPWR (4)
static void eukrea_mbimxsd_lcd_power_set(struct plat_lcd_data *pd,
diff --git a/arch/arm/mach-mx3/iomux-imx31.c b/arch/arm/mach-mx3/iomux-imx31.c
index a1d7fa5123dc..cf8f8099ebd7 100644
--- a/arch/arm/mach-mx3/iomux-imx31.c
+++ b/arch/arm/mach-mx3/iomux-imx31.c
@@ -97,7 +97,7 @@ EXPORT_SYMBOL(mxc_iomux_set_pad);
* - reserves the pin so that it is not claimed by another driver
* - setups the iomux according to the configuration
*/
-int mxc_iomux_alloc_pin(const unsigned int pin, const char *label)
+int mxc_iomux_alloc_pin(unsigned int pin, const char *label)
{
unsigned pad = pin & IOMUX_PADNUM_MASK;
@@ -118,10 +118,10 @@ int mxc_iomux_alloc_pin(const unsigned int pin, const char *label)
}
EXPORT_SYMBOL(mxc_iomux_alloc_pin);
-int mxc_iomux_setup_multiple_pins(unsigned int *pin_list, unsigned count,
+int mxc_iomux_setup_multiple_pins(const unsigned int *pin_list, unsigned count,
const char *label)
{
- unsigned int *p = pin_list;
+ const unsigned int *p = pin_list;
int i;
int ret = -EINVAL;
@@ -139,7 +139,7 @@ setup_error:
}
EXPORT_SYMBOL(mxc_iomux_setup_multiple_pins);
-void mxc_iomux_release_pin(const unsigned int pin)
+void mxc_iomux_release_pin(unsigned int pin)
{
unsigned pad = pin & IOMUX_PADNUM_MASK;
@@ -148,9 +148,9 @@ void mxc_iomux_release_pin(const unsigned int pin)
}
EXPORT_SYMBOL(mxc_iomux_release_pin);
-void mxc_iomux_release_multiple_pins(unsigned int *pin_list, int count)
+void mxc_iomux_release_multiple_pins(const unsigned int *pin_list, int count)
{
- unsigned int *p = pin_list;
+ const unsigned int *p = pin_list;
int i;
for (i = 0; i < count; i++) {
diff --git a/arch/arm/mach-mx3/mach-armadillo5x0.c b/arch/arm/mach-mx3/mach-armadillo5x0.c
index 28b6f414b5d5..34e619e811e6 100644
--- a/arch/arm/mach-mx3/mach-armadillo5x0.c
+++ b/arch/arm/mach-mx3/mach-armadillo5x0.c
@@ -176,8 +176,10 @@ static int usbotg_init(struct platform_device *pdev)
gpio_set_value(OTG_RESET, 0/*LOW*/);
mdelay(5);
gpio_set_value(OTG_RESET, 1/*HIGH*/);
+ mdelay(10);
- return 0;
+ return mx31_initialize_usb_hw(pdev->id, MXC_EHCI_POWER_PINS_ENABLED |
+ MXC_EHCI_INTERFACE_DIFF_UNI);
otg_free_reset:
gpio_free(OTG_RESET);
@@ -233,8 +235,10 @@ static int usbh2_init(struct platform_device *pdev)
gpio_set_value(USBH2_RESET, 0/*LOW*/);
mdelay(5);
gpio_set_value(USBH2_RESET, 1/*HIGH*/);
+ mdelay(10);
- return 0;
+ return mx31_initialize_usb_hw(pdev->id, MXC_EHCI_POWER_PINS_ENABLED |
+ MXC_EHCI_INTERFACE_DIFF_UNI);
h2_free_reset:
gpio_free(USBH2_RESET);
@@ -246,13 +250,11 @@ h2_free_cs:
static struct mxc_usbh_platform_data usbotg_pdata __initdata = {
.init = usbotg_init,
.portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT,
- .flags = MXC_EHCI_POWER_PINS_ENABLED | MXC_EHCI_INTERFACE_DIFF_UNI,
};
static struct mxc_usbh_platform_data usbh2_pdata __initdata = {
.init = usbh2_init,
.portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT,
- .flags = MXC_EHCI_POWER_PINS_ENABLED | MXC_EHCI_INTERFACE_DIFF_UNI,
};
#endif /* CONFIG_USB_ULPI */
@@ -569,9 +571,10 @@ static struct sys_timer armadillo5x0_timer = {
MACHINE_START(ARMADILLO5X0, "Armadillo-500")
/* Maintainer: Alberto Panizzo */
- .boot_params = MX3x_PHYS_OFFSET + 0x100,
- .map_io = mx31_map_io,
- .init_irq = mx31_init_irq,
- .timer = &armadillo5x0_timer,
- .init_machine = armadillo5x0_init,
+ .boot_params = MX3x_PHYS_OFFSET + 0x100,
+ .map_io = mx31_map_io,
+ .init_early = imx31_init_early,
+ .init_irq = mx31_init_irq,
+ .timer = &armadillo5x0_timer,
+ .init_machine = armadillo5x0_init,
MACHINE_END
diff --git a/arch/arm/mach-mx3/mach-bug.c b/arch/arm/mach-mx3/mach-bug.c
new file mode 100644
index 000000000000..d137d7078ee9
--- /dev/null
+++ b/arch/arm/mach-mx3/mach-bug.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2000 Deep Blue Solutions Ltd
+ * Copyright (C) 2002 Shane Nay (shane@minirl.com)
+ * Copyright 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2011 Denis 'GNUtoo' Carikli <GNUtoo@no-log.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+
+#include <mach/iomux-mx3.h>
+#include <mach/imx-uart.h>
+#include <mach/hardware.h>
+#include <mach/common.h>
+
+#include <asm/mach/time.h>
+#include <asm/mach/arch.h>
+#include <asm/mach-types.h>
+
+#include "devices-imx31.h"
+
+static const struct imxuart_platform_data uart_pdata __initconst = {
+ .flags = IMXUART_HAVE_RTSCTS,
+};
+
+static const unsigned int bug_pins[] __initconst = {
+ MX31_PIN_PC_RST__CTS5,
+ MX31_PIN_PC_VS2__RTS5,
+ MX31_PIN_PC_BVD2__TXD5,
+ MX31_PIN_PC_BVD1__RXD5,
+};
+
+static void __init bug_board_init(void)
+{
+ mxc_iomux_setup_multiple_pins(bug_pins,
+ ARRAY_SIZE(bug_pins), "uart-4");
+ imx31_add_imx_uart4(&uart_pdata);
+}
+
+static void __init bug_timer_init(void)
+{
+ mx31_clocks_init(26000000);
+}
+
+static struct sys_timer bug_timer = {
+ .init = bug_timer_init,
+};
+
+MACHINE_START(BUG, "BugLabs BUGBase")
+ .map_io = mx31_map_io,
+ .init_early = imx31_init_early,
+ .init_irq = mx31_init_irq,
+ .timer = &bug_timer,
+ .init_machine = bug_board_init,
+MACHINE_END
diff --git a/arch/arm/mach-mx3/mach-cpuimx35.c b/arch/arm/mach-mx3/mach-cpuimx35.c
index 26ae90f02582..ec63d998f647 100644
--- a/arch/arm/mach-mx3/mach-cpuimx35.c
+++ b/arch/arm/mach-mx3/mach-cpuimx35.c
@@ -60,7 +60,7 @@ static struct tsc2007_platform_data tsc2007_info = {
.x_plate_ohms = 180,
};
-#define TSC2007_IRQGPIO (2 * 32 + 2)
+#define TSC2007_IRQGPIO IMX_GPIO_NR(3, 2)
static struct i2c_board_info eukrea_cpuimx35_i2c_devices[] = {
{
I2C_BOARD_INFO("pcf8563", 0x51),
@@ -111,15 +111,25 @@ static const struct mxc_nand_platform_data
.flash_bbt = 1,
};
+static int eukrea_cpuimx35_otg_init(struct platform_device *pdev)
+{
+ return mx35_initialize_usb_hw(pdev->id, MXC_EHCI_INTERFACE_DIFF_UNI);
+}
+
static const struct mxc_usbh_platform_data otg_pdata __initconst = {
+ .init = eukrea_cpuimx35_otg_init,
.portsc = MXC_EHCI_MODE_UTMI,
- .flags = MXC_EHCI_INTERFACE_DIFF_UNI,
};
+static int eukrea_cpuimx35_usbh1_init(struct platform_device *pdev)
+{
+ return mx35_initialize_usb_hw(pdev->id, MXC_EHCI_INTERFACE_SINGLE_UNI |
+ MXC_EHCI_INTERNAL_PHY | MXC_EHCI_IPPUE_DOWN);
+}
+
static const struct mxc_usbh_platform_data usbh1_pdata __initconst = {
+ .init = eukrea_cpuimx35_usbh1_init,
.portsc = MXC_EHCI_MODE_SERIAL,
- .flags = MXC_EHCI_INTERFACE_SINGLE_UNI | MXC_EHCI_INTERNAL_PHY |
- MXC_EHCI_IPPUE_DOWN,
};
static const struct fsl_usb2_platform_data otg_device_pdata __initconst = {
@@ -146,7 +156,7 @@ __setup("otg_mode=", eukrea_cpuimx35_otg_mode);
/*
* Board specific initialization.
*/
-static void __init mxc_board_init(void)
+static void __init eukrea_cpuimx35_init(void)
{
mxc_iomux_v3_setup_multiple_pads(eukrea_cpuimx35_pads,
ARRAY_SIZE(eukrea_cpuimx35_pads));
@@ -184,9 +194,10 @@ struct sys_timer eukrea_cpuimx35_timer = {
MACHINE_START(EUKREA_CPUIMX35, "Eukrea CPUIMX35")
/* Maintainer: Eukrea Electromatique */
- .boot_params = MX3x_PHYS_OFFSET + 0x100,
- .map_io = mx35_map_io,
- .init_irq = mx35_init_irq,
- .init_machine = mxc_board_init,
- .timer = &eukrea_cpuimx35_timer,
+ .boot_params = MX3x_PHYS_OFFSET + 0x100,
+ .map_io = mx35_map_io,
+ .init_early = imx35_init_early,
+ .init_irq = mx35_init_irq,
+ .timer = &eukrea_cpuimx35_timer,
+ .init_machine = eukrea_cpuimx35_init,
MACHINE_END
diff --git a/arch/arm/mach-mx3/mach-kzm_arm11_01.c b/arch/arm/mach-mx3/mach-kzm_arm11_01.c
index a5f3eb24e4d5..d35621d62b4d 100644
--- a/arch/arm/mach-mx3/mach-kzm_arm11_01.c
+++ b/arch/arm/mach-mx3/mach-kzm_arm11_01.c
@@ -27,6 +27,7 @@
#include <asm/irq.h>
#include <asm/mach-types.h>
+#include <asm/memory.h>
#include <asm/setup.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
@@ -36,7 +37,6 @@
#include <mach/clock.h>
#include <mach/common.h>
#include <mach/iomux-mx3.h>
-#include <mach/memory.h>
#include "devices-imx31.h"
#include "devices.h"
@@ -266,17 +266,14 @@ static void __init kzm_timer_init(void)
}
static struct sys_timer kzm_timer = {
- .init = kzm_timer_init,
+ .init = kzm_timer_init,
};
-/*
- * The following uses standard kernel macros define in arch.h in order to
- * initialize __mach_desc_KZM_ARM11_01 data structure.
- */
MACHINE_START(KZM_ARM11_01, "Kyoto Microcomputer Co., Ltd. KZM-ARM11-01")
- .boot_params = MX3x_PHYS_OFFSET + 0x100,
- .map_io = kzm_map_io,
- .init_irq = mx31_init_irq,
- .init_machine = kzm_board_init,
- .timer = &kzm_timer,
+ .boot_params = MX3x_PHYS_OFFSET + 0x100,
+ .map_io = kzm_map_io,
+ .init_early = imx31_init_early,
+ .init_irq = mx31_init_irq,
+ .timer = &kzm_timer,
+ .init_machine = kzm_board_init,
MACHINE_END
diff --git a/arch/arm/mach-mx3/mach-mx31_3ds.c b/arch/arm/mach-mx3/mach-mx31_3ds.c
index 0d65db885be7..95b7f2bebc26 100644
--- a/arch/arm/mach-mx3/mach-mx31_3ds.c
+++ b/arch/arm/mach-mx3/mach-mx31_3ds.c
@@ -42,10 +42,6 @@
/* CPLD IRQ line for external uart, external ethernet etc */
#define EXPIO_PARENT_INT IOMUX_TO_IRQ(MX31_PIN_GPIO1_1)
-/*
- * This file contains the board-specific initialization routines.
- */
-
static int mx31_3ds_pins[] = {
/* UART1 */
MX31_PIN_CTS1__CTS1,
@@ -100,6 +96,9 @@ static int mx31_3ds_pins[] = {
IOMUX_MODE(MX31_PIN_PC_RW_B, IOMUX_CONFIG_ALT1),
/* USB Host2 reset */
IOMUX_MODE(MX31_PIN_USB_BYP, IOMUX_CONFIG_GPIO),
+ /* I2C1 */
+ MX31_PIN_I2C_CLK__I2C1_SCL,
+ MX31_PIN_I2C_DAT__I2C1_SDA,
};
/*
@@ -138,7 +137,7 @@ static struct regulator_init_data gpo_init = {
}
};
-static struct mc13783_regulator_init_data mx31_3ds_regulators[] = {
+static struct mc13xxx_regulator_init_data mx31_3ds_regulators[] = {
{
.id = MC13783_REG_PWGT1SPI, /* Power Gate for ARM core. */
.init_data = &pwgtx_init,
@@ -156,10 +155,13 @@ static struct mc13783_regulator_init_data mx31_3ds_regulators[] = {
};
/* MC13783 */
-static struct mc13783_platform_data mc13783_pdata __initdata = {
- .regulators = mx31_3ds_regulators,
- .num_regulators = ARRAY_SIZE(mx31_3ds_regulators),
- .flags = MC13783_USE_REGULATOR | MC13783_USE_TOUCHSCREEN,
+static struct mc13xxx_platform_data mc13783_pdata __initdata = {
+static struct mc13783_platform_data mc13783_pdata = {
+ .regulators = {
+ .regulators = mx31_3ds_regulators,
+ .num_regulators = ARRAY_SIZE(mx31_3ds_regulators),
+ },
+ .flags = MC13XXX_USE_REGULATOR | MC13XXX_USE_TOUCHSCREEN,
};
/* SPI */
@@ -245,6 +247,12 @@ usbotg_free_reset:
return err;
}
+#if defined(CONFIG_USB_ULPI)
+static int mx31_3ds_otg_init(struct platform_device *pdev)
+{
+ return mx31_initialize_usb_hw(pdev->id, MXC_EHCI_POWER_PINS_ENABLED);
+}
+
static int mx31_3ds_host2_init(struct platform_device *pdev)
{
int err;
@@ -276,23 +284,24 @@ static int mx31_3ds_host2_init(struct platform_device *pdev)
mdelay(1);
gpio_set_value(USBH2_RST_B, 1);
- return 0;
+
+ mdelay(10);
+
+ return mx31_initialize_usb_hw(pdev->id, MXC_EHCI_POWER_PINS_ENABLED);
usbotg_free_reset:
gpio_free(USBH2_RST_B);
return err;
}
-#if defined(CONFIG_USB_ULPI)
static struct mxc_usbh_platform_data otg_pdata __initdata = {
+ .init = mx31_3ds_otg_init,
.portsc = MXC_EHCI_MODE_ULPI,
- .flags = MXC_EHCI_POWER_PINS_ENABLED,
};
static struct mxc_usbh_platform_data usbh2_pdata __initdata = {
.init = mx31_3ds_host2_init,
.portsc = MXC_EHCI_MODE_ULPI,
- .flags = MXC_EHCI_POWER_PINS_ENABLED,
};
#endif
@@ -320,18 +329,11 @@ static const struct imxuart_platform_data uart_pdata __initconst = {
.flags = IMXUART_HAVE_RTSCTS,
};
-/*
- * Set up static virtual mappings.
- */
-static void __init mx31_3ds_map_io(void)
-{
- mx31_map_io();
-}
+static const struct imxi2c_platform_data mx31_3ds_i2c0_data __initconst = {
+ .bitrate = 100000,
+};
-/*!
- * Board specific initialization.
- */
-static void __init mxc_board_init(void)
+static void __init mx31_3ds_init(void)
{
mxc_iomux_setup_multiple_pins(mx31_3ds_pins, ARRAY_SIZE(mx31_3ds_pins),
"mx31_3ds");
@@ -364,6 +366,7 @@ static void __init mxc_board_init(void)
printk(KERN_WARNING "Init of the debug board failed, all "
"devices on the debug board are unusable.\n");
imx31_add_imx2_wdt(NULL);
+ imx31_add_imx_i2c0(&mx31_3ds_i2c0_data);
}
static void __init mx31_3ds_timer_init(void)
@@ -375,15 +378,12 @@ static struct sys_timer mx31_3ds_timer = {
.init = mx31_3ds_timer_init,
};
-/*
- * The following uses standard kernel macros defined in arch.h in order to
- * initialize __mach_desc_MX31_3DS data structure.
- */
MACHINE_START(MX31_3DS, "Freescale MX31PDK (3DS)")
/* Maintainer: Freescale Semiconductor, Inc. */
- .boot_params = MX3x_PHYS_OFFSET + 0x100,
- .map_io = mx31_3ds_map_io,
- .init_irq = mx31_init_irq,
- .init_machine = mxc_board_init,
- .timer = &mx31_3ds_timer,
+ .boot_params = MX3x_PHYS_OFFSET + 0x100,
+ .map_io = mx31_map_io,
+ .init_early = imx31_init_early,
+ .init_irq = mx31_init_irq,
+ .timer = &mx31_3ds_timer,
+ .init_machine = mx31_3ds_init,
MACHINE_END
diff --git a/arch/arm/mach-mx3/mach-mx31ads.c b/arch/arm/mach-mx3/mach-mx31ads.c
index 88b97d62b57e..4e4b780c481d 100644
--- a/arch/arm/mach-mx3/mach-mx31ads.c
+++ b/arch/arm/mach-mx3/mach-mx31ads.c
@@ -69,12 +69,8 @@
#define EXPIO_INT_XUART_INTB (MXC_EXP_IO_BASE + 11)
#define MXC_MAX_EXP_IO_LINES 16
-/*
- * This file contains the board-specific initialization routines.
- */
-#if defined(CONFIG_SERIAL_8250) || defined(CONFIG_SERIAL_8250_MODULE)
-/*!
+/*
* The serial port definition structure.
*/
static struct plat_serial8250_port serial_platform_data[] = {
@@ -110,14 +106,7 @@ static int __init mxc_init_extuart(void)
{
return platform_device_register(&serial_device);
}
-#else
-static inline int mxc_init_extuart(void)
-{
- return 0;
-}
-#endif
-#if defined(CONFIG_SERIAL_IMX) || defined(CONFIG_SERIAL_IMX_MODULE)
static const struct imxuart_platform_data uart_pdata __initconst = {
.flags = IMXUART_HAVE_RTSCTS,
};
@@ -134,11 +123,6 @@ static inline void mxc_init_imx_uart(void)
mxc_iomux_setup_multiple_pins(uart_pins, ARRAY_SIZE(uart_pins), "uart-0");
imx31_add_imx_uart0(&uart_pdata);
}
-#else /* !SERIAL_IMX */
-static inline void mxc_init_imx_uart(void)
-{
-}
-#endif /* !SERIAL_IMX */
static void mx31ads_expio_irq_handler(u32 irq, struct irq_desc *desc)
{
@@ -160,7 +144,7 @@ static void mx31ads_expio_irq_handler(u32 irq, struct irq_desc *desc)
/*
* Disable an expio pin's interrupt by setting the bit in the imr.
- * @param irq an expio virtual irq number
+ * @param d an expio virtual irq description
*/
static void expio_mask_irq(struct irq_data *d)
{
@@ -172,7 +156,7 @@ static void expio_mask_irq(struct irq_data *d)
/*
* Acknowledge an expanded io pin's interrupt by clearing the bit in the isr.
- * @param irq an expanded io virtual irq number
+ * @param d an expio virtual irq description
*/
static void expio_ack_irq(struct irq_data *d)
{
@@ -183,7 +167,7 @@ static void expio_ack_irq(struct irq_data *d)
/*
* Enable a expio pin's interrupt by clearing the bit in the imr.
- * @param irq a expio virtual irq number
+ * @param d an expio virtual irq description
*/
static void expio_unmask_irq(struct irq_data *d)
{
@@ -476,7 +460,6 @@ static struct wm8350_platform_data __initdata mx31_wm8350_pdata = {
};
#endif
-#if defined(CONFIG_I2C_IMX) || defined(CONFIG_I2C_IMX_MODULE)
static struct i2c_board_info __initdata mx31ads_i2c1_devices[] = {
#ifdef CONFIG_MACH_MX31ADS_WM1133_EV1
{
@@ -497,11 +480,6 @@ static void mxc_init_i2c(void)
imx31_add_imx_i2c1(NULL);
}
-#else
-static void mxc_init_i2c(void)
-{
-}
-#endif
static unsigned int ssi_pins[] = {
MX31_PIN_SFS5__SFS5,
@@ -516,9 +494,7 @@ static void mxc_init_audio(void)
mxc_iomux_setup_multiple_pins(ssi_pins, ARRAY_SIZE(ssi_pins), "ssi");
}
-/*!
- * This structure defines static mappings for the i.MX31ADS board.
- */
+/* static mappings */
static struct map_desc mx31ads_io_desc[] __initdata = {
{
.virtual = MX31_CS4_BASE_ADDR_VIRT,
@@ -528,9 +504,6 @@ static struct map_desc mx31ads_io_desc[] __initdata = {
},
};
-/*!
- * Set up static virtual mappings.
- */
static void __init mx31ads_map_io(void)
{
mx31_map_io();
@@ -543,10 +516,7 @@ static void __init mx31ads_init_irq(void)
mx31ads_init_expio();
}
-/*!
- * Board specific initialization.
- */
-static void __init mxc_board_init(void)
+static void __init mx31ads_init(void)
{
mxc_init_extuart();
mxc_init_imx_uart();
@@ -563,15 +533,12 @@ static struct sys_timer mx31ads_timer = {
.init = mx31ads_timer_init,
};
-/*
- * The following uses standard kernel macros defined in arch.h in order to
- * initialize __mach_desc_MX31ADS data structure.
- */
MACHINE_START(MX31ADS, "Freescale MX31ADS")
/* Maintainer: Freescale Semiconductor, Inc. */
- .boot_params = MX3x_PHYS_OFFSET + 0x100,
- .map_io = mx31ads_map_io,
- .init_irq = mx31ads_init_irq,
- .init_machine = mxc_board_init,
- .timer = &mx31ads_timer,
+ .boot_params = MX3x_PHYS_OFFSET + 0x100,
+ .map_io = mx31ads_map_io,
+ .init_early = imx31_init_early,
+ .init_irq = mx31ads_init_irq,
+ .timer = &mx31ads_timer,
+ .init_machine = mx31ads_init,
MACHINE_END
diff --git a/arch/arm/mach-mx3/mach-mx31lilly.c b/arch/arm/mach-mx3/mach-mx31lilly.c
index 2c595483f356..be79a0d6301e 100644
--- a/arch/arm/mach-mx3/mach-mx31lilly.c
+++ b/arch/arm/mach-mx3/mach-mx31lilly.c
@@ -24,6 +24,7 @@
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/gpio.h>
+#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/smsc911x.h>
@@ -156,7 +157,9 @@ static int usbotg_init(struct platform_device *pdev)
gpio_request(IOMUX_TO_GPIO(MX31_PIN_DTR_DCE2), "USBH1 CS");
gpio_direction_output(IOMUX_TO_GPIO(MX31_PIN_DTR_DCE2), 0);
- return 0;
+ mdelay(10);
+
+ return mx31_initialize_usb_hw(pdev->id, MXC_EHCI_POWER_PINS_ENABLED);
}
static int usbh1_init(struct platform_device *pdev)
@@ -183,7 +186,10 @@ static int usbh1_init(struct platform_device *pdev)
mxc_iomux_set_gpr(MUX_PGP_USB_SUSPEND, true);
- return 0;
+ mdelay(10);
+
+ return mx31_initialize_usb_hw(pdev->id, MXC_EHCI_POWER_PINS_ENABLED |
+ MXC_EHCI_INTERFACE_SINGLE_UNI);
}
static int usbh2_init(struct platform_device *pdev)
@@ -220,25 +226,24 @@ static int usbh2_init(struct platform_device *pdev)
gpio_request(IOMUX_TO_GPIO(MX31_PIN_DTR_DCE1), "USBH2 CS");
gpio_direction_output(IOMUX_TO_GPIO(MX31_PIN_DTR_DCE1), 0);
- return 0;
+ mdelay(10);
+
+ return mx31_initialize_usb_hw(pdev->id, MXC_EHCI_POWER_PINS_ENABLED);
}
static struct mxc_usbh_platform_data usbotg_pdata = {
.init = usbotg_init,
.portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT,
- .flags = MXC_EHCI_POWER_PINS_ENABLED,
};
static const struct mxc_usbh_platform_data usbh1_pdata __initconst = {
.init = usbh1_init,
.portsc = MXC_EHCI_MODE_UTMI | MXC_EHCI_SERIAL,
- .flags = MXC_EHCI_POWER_PINS_ENABLED | MXC_EHCI_INTERFACE_SINGLE_UNI,
};
static struct mxc_usbh_platform_data usbh2_pdata __initdata = {
.init = usbh2_init,
.portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT,
- .flags = MXC_EHCI_POWER_PINS_ENABLED,
};
static void lilly1131_usb_init(void)
@@ -274,8 +279,8 @@ static const struct spi_imx_master spi1_pdata __initconst = {
.num_chipselect = ARRAY_SIZE(spi_internal_chipselect),
};
-static struct mc13783_platform_data mc13783_pdata __initdata = {
- .flags = MC13783_USE_RTC | MC13783_USE_TOUCHSCREEN,
+static struct mc13xxx_platform_data mc13783_pdata __initdata = {
+ .flags = MC13XXX_USE_RTC | MC13XXX_USE_TOUCHSCREEN,
};
static struct spi_board_info mc13783_dev __initdata = {
@@ -347,10 +352,10 @@ static struct sys_timer mx31lilly_timer = {
};
MACHINE_START(LILLY1131, "INCO startec LILLY-1131")
- .boot_params = MX3x_PHYS_OFFSET + 0x100,
- .map_io = mx31_map_io,
- .init_irq = mx31_init_irq,
- .init_machine = mx31lilly_board_init,
- .timer = &mx31lilly_timer,
+ .boot_params = MX3x_PHYS_OFFSET + 0x100,
+ .map_io = mx31_map_io,
+ .init_early = imx31_init_early,
+ .init_irq = mx31_init_irq,
+ .timer = &mx31lilly_timer,
+ .init_machine = mx31lilly_board_init,
MACHINE_END
-
diff --git a/arch/arm/mach-mx3/mach-mx31lite.c b/arch/arm/mach-mx3/mach-mx31lite.c
index 9e64c66396e0..81021bf8f6b3 100644
--- a/arch/arm/mach-mx3/mach-mx31lite.c
+++ b/arch/arm/mach-mx3/mach-mx31lite.c
@@ -27,6 +27,7 @@
#include <linux/usb/otg.h>
#include <linux/usb/ulpi.h>
#include <linux/mtd/physmap.h>
+#include <linux/delay.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -111,9 +112,9 @@ static const struct spi_imx_master spi1_pdata __initconst = {
.num_chipselect = ARRAY_SIZE(spi_internal_chipselect),
};
-static struct mc13783_platform_data mc13783_pdata __initdata = {
- .flags = MC13783_USE_RTC |
- MC13783_USE_REGULATOR,
+static struct mc13xxx_platform_data mc13783_pdata __initdata = {
+ .flags = MC13XXX_USE_RTC |
+ MC13XXX_USE_REGULATOR,
};
static struct spi_board_info mc13783_spi_dev __initdata = {
@@ -167,13 +168,14 @@ static int usbh2_init(struct platform_device *pdev)
gpio_request(IOMUX_TO_GPIO(MX31_PIN_DTR_DCE1), "USBH2 CS");
gpio_direction_output(IOMUX_TO_GPIO(MX31_PIN_DTR_DCE1), 0);
- return 0;
+ mdelay(10);
+
+ return mx31_initialize_usb_hw(pdev->id, MXC_EHCI_POWER_PINS_ENABLED);
}
static struct mxc_usbh_platform_data usbh2_pdata __initdata = {
.init = usbh2_init,
.portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT,
- .flags = MXC_EHCI_POWER_PINS_ENABLED,
};
#endif
@@ -227,7 +229,7 @@ void __init mx31lite_map_io(void)
static int mx31lite_baseboard;
core_param(mx31lite_baseboard, mx31lite_baseboard, int, 0444);
-static void __init mxc_board_init(void)
+static void __init mx31lite_init(void)
{
int ret;
@@ -281,9 +283,10 @@ struct sys_timer mx31lite_timer = {
MACHINE_START(MX31LITE, "LogicPD i.MX31 SOM")
/* Maintainer: Freescale Semiconductor, Inc. */
- .boot_params = MX3x_PHYS_OFFSET + 0x100,
- .map_io = mx31lite_map_io,
- .init_irq = mx31_init_irq,
- .init_machine = mxc_board_init,
- .timer = &mx31lite_timer,
+ .boot_params = MX3x_PHYS_OFFSET + 0x100,
+ .map_io = mx31lite_map_io,
+ .init_early = imx31_init_early,
+ .init_irq = mx31_init_irq,
+ .timer = &mx31lite_timer,
+ .init_machine = mx31lite_init,
MACHINE_END
diff --git a/arch/arm/mach-mx3/mach-mx31moboard.c b/arch/arm/mach-mx3/mach-mx31moboard.c
index 1aa8d65fccbb..ecedffc8756b 100644
--- a/arch/arm/mach-mx3/mach-mx31moboard.c
+++ b/arch/arm/mach-mx3/mach-mx31moboard.c
@@ -214,7 +214,7 @@ static struct regulator_init_data cam_vreg_data = {
.consumer_supplies = cam_consumers,
};
-static struct mc13783_regulator_init_data moboard_regulators[] = {
+static struct mc13xxx_regulator_init_data moboard_regulators[] = {
{
.id = MC13783_REG_VMMC1,
.init_data = &sdhc_vreg_data,
@@ -267,12 +267,14 @@ static struct mc13783_leds_platform_data moboard_leds = {
.tc2_period = MC13783_LED_PERIOD_10MS,
};
-static struct mc13783_platform_data moboard_pmic = {
- .regulators = moboard_regulators,
- .num_regulators = ARRAY_SIZE(moboard_regulators),
+static struct mc13xxx_platform_data moboard_pmic = {
+ .regulators = {
+ .regulators = moboard_regulators,
+ .num_regulators = ARRAY_SIZE(moboard_regulators),
+ },
.leds = &moboard_leds,
- .flags = MC13783_USE_REGULATOR | MC13783_USE_RTC |
- MC13783_USE_ADC | MC13783_USE_LED,
+ .flags = MC13XXX_USE_REGULATOR | MC13XXX_USE_RTC |
+ MC13XXX_USE_ADC | MC13XXX_USE_LED,
};
static struct spi_board_info moboard_spi_board_info[] __initdata = {
@@ -401,10 +403,14 @@ static void usb_xcvr_reset(void)
}
#if defined(CONFIG_USB_ULPI)
+static int moboard_usbh2_init_hw(struct platform_device *pdev)
+{
+ return mx31_initialize_usb_hw(pdev->id, MXC_EHCI_POWER_PINS_ENABLED);
+}
static struct mxc_usbh_platform_data usbh2_pdata __initdata = {
+ .init = moboard_usbh2_init_hw,
.portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT,
- .flags = MXC_EHCI_POWER_PINS_ENABLED,
};
static int __init moboard_usbh2_init(void)
@@ -503,7 +509,7 @@ core_param(mx31moboard_baseboard, mx31moboard_baseboard, int, 0444);
/*
* Board specific initialization.
*/
-static void __init mxc_board_init(void)
+static void __init mx31moboard_init(void)
{
mxc_iomux_setup_multiple_pins(moboard_pins, ARRAY_SIZE(moboard_pins),
"moboard");
@@ -564,10 +570,10 @@ struct sys_timer mx31moboard_timer = {
MACHINE_START(MX31MOBOARD, "EPFL Mobots mx31moboard")
/* Maintainer: Valentin Longchamp, EPFL Mobots group */
- .boot_params = MX3x_PHYS_OFFSET + 0x100,
- .map_io = mx31_map_io,
- .init_irq = mx31_init_irq,
- .init_machine = mxc_board_init,
- .timer = &mx31moboard_timer,
+ .boot_params = MX3x_PHYS_OFFSET + 0x100,
+ .map_io = mx31_map_io,
+ .init_early = imx31_init_early,
+ .init_irq = mx31_init_irq,
+ .timer = &mx31moboard_timer,
+ .init_machine = mx31moboard_init,
MACHINE_END
-
diff --git a/arch/arm/mach-mx3/mach-mx35_3ds.c b/arch/arm/mach-mx3/mach-mx35_3ds.c
index b1963f257c20..5f35d5c88f19 100644
--- a/arch/arm/mach-mx3/mach-mx35_3ds.c
+++ b/arch/arm/mach-mx3/mach-mx35_3ds.c
@@ -118,8 +118,16 @@ static iomux_v3_cfg_t mx35pdk_pads[] = {
MX35_PAD_SD1_DATA1__ESDHC1_DAT1,
MX35_PAD_SD1_DATA2__ESDHC1_DAT2,
MX35_PAD_SD1_DATA3__ESDHC1_DAT3,
+ /* I2C1 */
+ MX35_PAD_I2C1_CLK__I2C1_SCL,
+ MX35_PAD_I2C1_DAT__I2C1_SDA,
};
+static int mx35_3ds_otg_init(struct platform_device *pdev)
+{
+ return mx35_initialize_usb_hw(pdev->id, MXC_EHCI_INTERNAL_PHY);
+}
+
/* OTG config */
static const struct fsl_usb2_platform_data usb_otg_pdata __initconst = {
.operating_mode = FSL_USB2_DR_DEVICE,
@@ -127,15 +135,20 @@ static const struct fsl_usb2_platform_data usb_otg_pdata __initconst = {
};
static struct mxc_usbh_platform_data otg_pdata __initdata = {
+ .init = mx35_3ds_otg_init,
.portsc = MXC_EHCI_MODE_UTMI,
- .flags = MXC_EHCI_INTERNAL_PHY,
};
+static int mx35_3ds_usbh_init(struct platform_device *pdev)
+{
+ return mx35_initialize_usb_hw(pdev->id, MXC_EHCI_INTERFACE_SINGLE_UNI |
+ MXC_EHCI_INTERNAL_PHY);
+}
+
/* USB HOST config */
static const struct mxc_usbh_platform_data usb_host_pdata __initconst = {
+ .init = mx35_3ds_usbh_init,
.portsc = MXC_EHCI_MODE_SERIAL,
- .flags = MXC_EHCI_INTERFACE_SINGLE_UNI |
- MXC_EHCI_INTERNAL_PHY,
};
static int otg_mode_host;
@@ -153,10 +166,14 @@ static int __init mx35_3ds_otg_mode(char *options)
}
__setup("otg_mode=", mx35_3ds_otg_mode);
+static const struct imxi2c_platform_data mx35_3ds_i2c0_data __initconst = {
+ .bitrate = 100000,
+};
+
/*
* Board specific initialization.
*/
-static void __init mxc_board_init(void)
+static void __init mx35_3ds_init(void)
{
mxc_iomux_v3_setup_multiple_pads(mx35pdk_pads, ARRAY_SIZE(mx35pdk_pads));
@@ -180,6 +197,7 @@ static void __init mxc_board_init(void)
if (mxc_expio_init(MX35_CS5_BASE_ADDR, EXPIO_PARENT_INT))
pr_warn("Init of the debugboard failed, all "
"devices on the debugboard are unusable.\n");
+ imx35_add_imx_i2c0(&mx35_3ds_i2c0_data);
}
static void __init mx35pdk_timer_init(void)
@@ -193,9 +211,10 @@ struct sys_timer mx35pdk_timer = {
MACHINE_START(MX35_3DS, "Freescale MX35PDK")
/* Maintainer: Freescale Semiconductor, Inc */
- .boot_params = MX3x_PHYS_OFFSET + 0x100,
- .map_io = mx35_map_io,
- .init_irq = mx35_init_irq,
- .init_machine = mxc_board_init,
- .timer = &mx35pdk_timer,
+ .boot_params = MX3x_PHYS_OFFSET + 0x100,
+ .map_io = mx35_map_io,
+ .init_early = imx35_init_early,
+ .init_irq = mx35_init_irq,
+ .timer = &mx35pdk_timer,
+ .init_machine = mx35_3ds_init,
MACHINE_END
diff --git a/arch/arm/mach-mx3/mach-pcm037.c b/arch/arm/mach-mx3/mach-pcm037.c
index b752f6bc20a2..783d31b8e8a7 100644
--- a/arch/arm/mach-mx3/mach-pcm037.c
+++ b/arch/arm/mach-mx3/mach-pcm037.c
@@ -534,14 +534,24 @@ static struct platform_device pcm970_sja1000 = {
};
#if defined(CONFIG_USB_ULPI)
+static int pcm037_otg_init(struct platform_device *pdev)
+{
+ return mx31_initialize_usb_hw(pdev->id, MXC_EHCI_INTERFACE_DIFF_UNI);
+}
+
static struct mxc_usbh_platform_data otg_pdata __initdata = {
+ .init = pcm037_otg_init,
.portsc = MXC_EHCI_MODE_ULPI,
- .flags = MXC_EHCI_INTERFACE_DIFF_UNI,
};
+static int pcm037_usbh2_init(struct platform_device *pdev)
+{
+ return mx31_initialize_usb_hw(pdev->id, MXC_EHCI_INTERFACE_DIFF_UNI);
+}
+
static struct mxc_usbh_platform_data usbh2_pdata __initdata = {
+ .init = pcm037_usbh2_init,
.portsc = MXC_EHCI_MODE_ULPI,
- .flags = MXC_EHCI_INTERFACE_DIFF_UNI,
};
#endif
@@ -568,7 +578,7 @@ __setup("otg_mode=", pcm037_otg_mode);
/*
* Board specific initialization.
*/
-static void __init mxc_board_init(void)
+static void __init pcm037_init(void)
{
int ret;
@@ -675,9 +685,10 @@ struct sys_timer pcm037_timer = {
MACHINE_START(PCM037, "Phytec Phycore pcm037")
/* Maintainer: Pengutronix */
- .boot_params = MX3x_PHYS_OFFSET + 0x100,
- .map_io = mx31_map_io,
- .init_irq = mx31_init_irq,
- .init_machine = mxc_board_init,
- .timer = &pcm037_timer,
+ .boot_params = MX3x_PHYS_OFFSET + 0x100,
+ .map_io = mx31_map_io,
+ .init_early = imx31_init_early,
+ .init_irq = mx31_init_irq,
+ .timer = &pcm037_timer,
+ .init_machine = pcm037_init,
MACHINE_END
diff --git a/arch/arm/mach-mx3/mach-pcm037_eet.c b/arch/arm/mach-mx3/mach-pcm037_eet.c
index fda56545d2fd..df6fb07d037e 100644
--- a/arch/arm/mach-mx3/mach-pcm037_eet.c
+++ b/arch/arm/mach-mx3/mach-pcm037_eet.c
@@ -180,9 +180,7 @@ static int __init eet_init_devices(void)
/* SPI */
spi_register_board_info(pcm037_spi_dev, ARRAY_SIZE(pcm037_spi_dev));
-#if defined(CONFIG_SPI_IMX) || defined(CONFIG_SPI_IMX_MODULE)
imx31_add_spi_imx0(&pcm037_spi1_pdata);
-#endif
platform_device_register(&pcm037_gpio_keys_device);
diff --git a/arch/arm/mach-mx3/mach-pcm043.c b/arch/arm/mach-mx3/mach-pcm043.c
index bcf83fc7e701..262af17db44c 100644
--- a/arch/arm/mach-mx3/mach-pcm043.c
+++ b/arch/arm/mach-mx3/mach-pcm043.c
@@ -115,7 +115,6 @@ static const struct imxuart_platform_data uart_pdata __initconst = {
.flags = IMXUART_HAVE_RTSCTS,
};
-#if defined CONFIG_I2C_IMX || defined CONFIG_I2C_IMX_MODULE
static const struct imxi2c_platform_data pcm043_i2c0_data __initconst = {
.bitrate = 50000,
};
@@ -134,7 +133,6 @@ static struct i2c_board_info pcm043_i2c_devices[] = {
I2C_BOARD_INFO("pcf8563", 0x51),
}
};
-#endif
static struct platform_device *devices[] __initdata = {
&pcm043_flash,
@@ -221,9 +219,9 @@ static iomux_v3_cfg_t pcm043_pads[] = {
MX35_PAD_SD1_DATA3__ESDHC1_DAT3,
};
-#define AC97_GPIO_TXFS (1 * 32 + 31)
-#define AC97_GPIO_TXD (1 * 32 + 28)
-#define AC97_GPIO_RESET (1 * 32 + 0)
+#define AC97_GPIO_TXFS IMX_GPIO_NR(2, 31)
+#define AC97_GPIO_TXD IMX_GPIO_NR(2, 28)
+#define AC97_GPIO_RESET IMX_GPIO_NR(2, 0)
static void pcm043_ac97_warm_reset(struct snd_ac97 *ac97)
{
@@ -308,17 +306,27 @@ pcm037_nand_board_info __initconst = {
};
#if defined(CONFIG_USB_ULPI)
+static int pcm043_otg_init(struct platform_device *pdev)
+{
+ return mx35_initialize_usb_hw(pdev->id, MXC_EHCI_INTERFACE_DIFF_UNI);
+}
+
static struct mxc_usbh_platform_data otg_pdata __initdata = {
+ .init = pcm043_otg_init,
.portsc = MXC_EHCI_MODE_UTMI,
- .flags = MXC_EHCI_INTERFACE_DIFF_UNI,
};
+#endif
+
+static int pcm043_usbh1_init(struct platform_device *pdev)
+{
+ return mx35_initialize_usb_hw(pdev->id, MXC_EHCI_INTERFACE_SINGLE_UNI |
+ MXC_EHCI_INTERNAL_PHY | MXC_EHCI_IPPUE_DOWN);
+}
static const struct mxc_usbh_platform_data usbh1_pdata __initconst = {
+ .init = pcm043_usbh1_init,
.portsc = MXC_EHCI_MODE_SERIAL,
- .flags = MXC_EHCI_INTERFACE_SINGLE_UNI | MXC_EHCI_INTERNAL_PHY |
- MXC_EHCI_IPPUE_DOWN,
};
-#endif
static const struct fsl_usb2_platform_data otg_device_pdata __initconst = {
.operating_mode = FSL_USB2_DR_DEVICE,
@@ -343,7 +351,7 @@ __setup("otg_mode=", pcm043_otg_mode);
/*
* Board specific initialization.
*/
-static void __init mxc_board_init(void)
+static void __init pcm043_init(void)
{
mxc_iomux_v3_setup_multiple_pads(pcm043_pads, ARRAY_SIZE(pcm043_pads));
@@ -369,12 +377,10 @@ static void __init mxc_board_init(void)
imx35_add_imx_uart1(&uart_pdata);
-#if defined CONFIG_I2C_IMX || defined CONFIG_I2C_IMX_MODULE
i2c_register_board_info(0, pcm043_i2c_devices,
ARRAY_SIZE(pcm043_i2c_devices));
imx35_add_imx_i2c0(&pcm043_i2c0_data);
-#endif
mxc_register_device(&mx3_ipu, &mx3_ipu_data);
mxc_register_device(&mx3_fb, &mx3fb_pdata);
@@ -386,9 +392,9 @@ static void __init mxc_board_init(void)
imx35_add_mxc_ehci_otg(&otg_pdata);
}
-
- imx35_add_mxc_ehci_hs(&usbh1_pdata);
#endif
+ imx35_add_mxc_ehci_hs(&usbh1_pdata);
+
if (!otg_mode_host)
imx35_add_fsl_usb2_udc(&otg_device_pdata);
@@ -407,10 +413,10 @@ struct sys_timer pcm043_timer = {
MACHINE_START(PCM043, "Phytec Phycore pcm043")
/* Maintainer: Pengutronix */
- .boot_params = MX3x_PHYS_OFFSET + 0x100,
- .map_io = mx35_map_io,
- .init_irq = mx35_init_irq,
- .init_machine = mxc_board_init,
- .timer = &pcm043_timer,
+ .boot_params = MX3x_PHYS_OFFSET + 0x100,
+ .map_io = mx35_map_io,
+ .init_early = imx35_init_early,
+ .init_irq = mx35_init_irq,
+ .timer = &pcm043_timer,
+ .init_machine = pcm043_init,
MACHINE_END
-
diff --git a/arch/arm/mach-mx3/mach-qong.c b/arch/arm/mach-mx3/mach-qong.c
index fd1050c40964..17f758b77623 100644
--- a/arch/arm/mach-mx3/mach-qong.c
+++ b/arch/arm/mach-mx3/mach-qong.c
@@ -54,10 +54,6 @@
#define QONG_FPGA_IRQ IOMUX_TO_IRQ(MX31_PIN_DTR_DCE1)
-/*
- * This file contains the board-specific initialization routines.
- */
-
static const struct imxuart_platform_data uart_pdata __initconst = {
.flags = IMXUART_HAVE_RTSCTS,
};
@@ -247,7 +243,7 @@ static void __init qong_init_fpga(void)
/*
* Board specific initialization.
*/
-static void __init mxc_board_init(void)
+static void __init qong_init(void)
{
mxc_init_imx_uart();
qong_init_nor_mtd();
@@ -263,16 +259,12 @@ static struct sys_timer qong_timer = {
.init = qong_timer_init,
};
-/*
- * The following uses standard kernel macros defined in arch.h in order to
- * initialize __mach_desc_QONG data structure.
- */
-
MACHINE_START(QONG, "Dave/DENX QongEVB-LITE")
/* Maintainer: DENX Software Engineering GmbH */
- .boot_params = MX3x_PHYS_OFFSET + 0x100,
- .map_io = mx31_map_io,
- .init_irq = mx31_init_irq,
- .init_machine = mxc_board_init,
- .timer = &qong_timer,
+ .boot_params = MX3x_PHYS_OFFSET + 0x100,
+ .map_io = mx31_map_io,
+ .init_early = imx31_init_early,
+ .init_irq = mx31_init_irq,
+ .timer = &qong_timer,
+ .init_machine = qong_init,
MACHINE_END
diff --git a/arch/arm/mach-mx3/mach-vpr200.c b/arch/arm/mach-mx3/mach-vpr200.c
new file mode 100644
index 000000000000..2cf390fbd980
--- /dev/null
+++ b/arch/arm/mach-mx3/mach-vpr200.c
@@ -0,0 +1,328 @@
+/*
+ * Copyright 2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright (C) 2009 Marc Kleine-Budde, Pengutronix
+ * Copyright 2010 Creative Product Design
+ *
+ * Derived from mx35 3stack.
+ * Original author: Fabio Estevam <fabio.estevam@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/physmap.h>
+#include <linux/memory.h>
+#include <linux/gpio.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/time.h>
+
+#include <mach/hardware.h>
+#include <mach/common.h>
+#include <mach/iomux-mx35.h>
+#include <mach/irqs.h>
+#include <mach/ipu.h>
+#include <mach/mx3fb.h>
+
+#include <linux/i2c.h>
+#include <linux/i2c/at24.h>
+#include <linux/mfd/mc13xxx.h>
+#include <linux/gpio_keys.h>
+
+#include "devices-imx35.h"
+#include "devices.h"
+
+#define GPIO_LCDPWR IMX_GPIO_NR(1, 2)
+#define GPIO_PMIC_INT IMX_GPIO_NR(2, 0)
+
+#define GPIO_BUTTON1 IMX_GPIO_NR(1, 4)
+#define GPIO_BUTTON2 IMX_GPIO_NR(1, 5)
+#define GPIO_BUTTON3 IMX_GPIO_NR(1, 7)
+#define GPIO_BUTTON4 IMX_GPIO_NR(1, 8)
+#define GPIO_BUTTON5 IMX_GPIO_NR(1, 9)
+#define GPIO_BUTTON6 IMX_GPIO_NR(1, 10)
+#define GPIO_BUTTON7 IMX_GPIO_NR(1, 11)
+#define GPIO_BUTTON8 IMX_GPIO_NR(1, 12)
+
+static const struct fb_videomode fb_modedb[] = {
+ {
+ /* 800x480 @ 60 Hz */
+ .name = "PT0708048",
+ .refresh = 60,
+ .xres = 800,
+ .yres = 480,
+ .pixclock = KHZ2PICOS(33260),
+ .left_margin = 50,
+ .right_margin = 156,
+ .upper_margin = 10,
+ .lower_margin = 10,
+ .hsync_len = 1, /* note: DE only display */
+ .vsync_len = 1, /* note: DE only display */
+ .sync = FB_SYNC_CLK_IDLE_EN | FB_SYNC_OE_ACT_HIGH,
+ .vmode = FB_VMODE_NONINTERLACED,
+ .flag = 0,
+ }, {
+ /* 800x480 @ 60 Hz */
+ .name = "CTP-CLAA070LC0ACW",
+ .refresh = 60,
+ .xres = 800,
+ .yres = 480,
+ .pixclock = KHZ2PICOS(27000),
+ .left_margin = 50,
+ .right_margin = 50, /* whole line should have 900 clocks */
+ .upper_margin = 10,
+ .lower_margin = 10, /* whole frame should have 500 lines */
+ .hsync_len = 1, /* note: DE only display */
+ .vsync_len = 1, /* note: DE only display */
+ .sync = FB_SYNC_CLK_IDLE_EN | FB_SYNC_OE_ACT_HIGH,
+ .vmode = FB_VMODE_NONINTERLACED,
+ .flag = 0,
+ }
+};
+
+static struct ipu_platform_data mx3_ipu_data = {
+ .irq_base = MXC_IPU_IRQ_START,
+};
+
+static struct mx3fb_platform_data mx3fb_pdata = {
+ .dma_dev = &mx3_ipu.dev,
+ .name = "PT0708048",
+ .mode = fb_modedb,
+ .num_modes = ARRAY_SIZE(fb_modedb),
+};
+
+static struct physmap_flash_data vpr200_flash_data = {
+ .width = 2,
+};
+
+static struct resource vpr200_flash_resource = {
+ .start = MX35_CS0_BASE_ADDR,
+ .end = MX35_CS0_BASE_ADDR + SZ_64M - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device vpr200_flash = {
+ .name = "physmap-flash",
+ .id = 0,
+ .dev = {
+ .platform_data = &vpr200_flash_data,
+ },
+ .resource = &vpr200_flash_resource,
+ .num_resources = 1,
+};
+
+static const struct mxc_nand_platform_data
+ vpr200_nand_board_info __initconst = {
+ .width = 1,
+ .hw_ecc = 1,
+ .flash_bbt = 1,
+};
+
+#define VPR_KEY_DEBOUNCE 500
+static struct gpio_keys_button vpr200_gpio_keys_table[] = {
+ {KEY_F2, GPIO_BUTTON1, 1, "vpr-keys: F2", 0, VPR_KEY_DEBOUNCE},
+ {KEY_F3, GPIO_BUTTON2, 1, "vpr-keys: F3", 0, VPR_KEY_DEBOUNCE},
+ {KEY_F4, GPIO_BUTTON3, 1, "vpr-keys: F4", 0, VPR_KEY_DEBOUNCE},
+ {KEY_F5, GPIO_BUTTON4, 1, "vpr-keys: F5", 0, VPR_KEY_DEBOUNCE},
+ {KEY_F6, GPIO_BUTTON5, 1, "vpr-keys: F6", 0, VPR_KEY_DEBOUNCE},
+ {KEY_F7, GPIO_BUTTON6, 1, "vpr-keys: F7", 0, VPR_KEY_DEBOUNCE},
+ {KEY_F8, GPIO_BUTTON7, 1, "vpr-keys: F8", 1, VPR_KEY_DEBOUNCE},
+ {KEY_F9, GPIO_BUTTON8, 1, "vpr-keys: F9", 1, VPR_KEY_DEBOUNCE},
+};
+
+static struct gpio_keys_platform_data vpr200_gpio_keys_data = {
+ .buttons = vpr200_gpio_keys_table,
+ .nbuttons = ARRAY_SIZE(vpr200_gpio_keys_table),
+};
+
+static struct platform_device vpr200_device_gpiokeys = {
+ .name = "gpio-keys",
+ .dev = {
+ .platform_data = &vpr200_gpio_keys_data,
+ }
+};
+
+static struct mc13xxx_platform_data vpr200_pmic = {
+ .flags = MC13XXX_USE_ADC | MC13XXX_USE_TOUCHSCREEN,
+};
+
+static const struct imxi2c_platform_data vpr200_i2c0_data __initconst = {
+ .bitrate = 50000,
+};
+
+static struct at24_platform_data vpr200_eeprom = {
+ .byte_len = 2048 / 8,
+ .page_size = 1,
+};
+
+static struct i2c_board_info vpr200_i2c_devices[] = {
+ {
+ I2C_BOARD_INFO("at24", 0x50), /* E0=0, E1=0, E2=0 */
+ .platform_data = &vpr200_eeprom,
+ }, {
+ I2C_BOARD_INFO("mc13892", 0x08),
+ .platform_data = &vpr200_pmic,
+ .irq = gpio_to_irq(GPIO_PMIC_INT),
+ }
+};
+
+static iomux_v3_cfg_t vpr200_pads[] = {
+ /* UART1 */
+ MX35_PAD_TXD1__UART1_TXD_MUX,
+ MX35_PAD_RXD1__UART1_RXD_MUX,
+ /* UART3 */
+ MX35_PAD_ATA_DATA10__UART3_RXD_MUX,
+ MX35_PAD_ATA_DATA11__UART3_TXD_MUX,
+ /* FEC */
+ MX35_PAD_FEC_TX_CLK__FEC_TX_CLK,
+ MX35_PAD_FEC_RX_CLK__FEC_RX_CLK,
+ MX35_PAD_FEC_RX_DV__FEC_RX_DV,
+ MX35_PAD_FEC_COL__FEC_COL,
+ MX35_PAD_FEC_RDATA0__FEC_RDATA_0,
+ MX35_PAD_FEC_TDATA0__FEC_TDATA_0,
+ MX35_PAD_FEC_TX_EN__FEC_TX_EN,
+ MX35_PAD_FEC_MDC__FEC_MDC,
+ MX35_PAD_FEC_MDIO__FEC_MDIO,
+ MX35_PAD_FEC_TX_ERR__FEC_TX_ERR,
+ MX35_PAD_FEC_RX_ERR__FEC_RX_ERR,
+ MX35_PAD_FEC_CRS__FEC_CRS,
+ MX35_PAD_FEC_RDATA1__FEC_RDATA_1,
+ MX35_PAD_FEC_TDATA1__FEC_TDATA_1,
+ MX35_PAD_FEC_RDATA2__FEC_RDATA_2,
+ MX35_PAD_FEC_TDATA2__FEC_TDATA_2,
+ MX35_PAD_FEC_RDATA3__FEC_RDATA_3,
+ MX35_PAD_FEC_TDATA3__FEC_TDATA_3,
+ /* Display */
+ MX35_PAD_LD0__IPU_DISPB_DAT_0,
+ MX35_PAD_LD1__IPU_DISPB_DAT_1,
+ MX35_PAD_LD2__IPU_DISPB_DAT_2,
+ MX35_PAD_LD3__IPU_DISPB_DAT_3,
+ MX35_PAD_LD4__IPU_DISPB_DAT_4,
+ MX35_PAD_LD5__IPU_DISPB_DAT_5,
+ MX35_PAD_LD6__IPU_DISPB_DAT_6,
+ MX35_PAD_LD7__IPU_DISPB_DAT_7,
+ MX35_PAD_LD8__IPU_DISPB_DAT_8,
+ MX35_PAD_LD9__IPU_DISPB_DAT_9,
+ MX35_PAD_LD10__IPU_DISPB_DAT_10,
+ MX35_PAD_LD11__IPU_DISPB_DAT_11,
+ MX35_PAD_LD12__IPU_DISPB_DAT_12,
+ MX35_PAD_LD13__IPU_DISPB_DAT_13,
+ MX35_PAD_LD14__IPU_DISPB_DAT_14,
+ MX35_PAD_LD15__IPU_DISPB_DAT_15,
+ MX35_PAD_LD16__IPU_DISPB_DAT_16,
+ MX35_PAD_LD17__IPU_DISPB_DAT_17,
+ MX35_PAD_D3_FPSHIFT__IPU_DISPB_D3_CLK,
+ MX35_PAD_D3_DRDY__IPU_DISPB_D3_DRDY,
+ MX35_PAD_CONTRAST__IPU_DISPB_CONTR,
+ /* LCD Enable */
+ MX35_PAD_D3_VSYNC__GPIO1_2,
+ /* USBOTG */
+ MX35_PAD_USBOTG_PWR__USB_TOP_USBOTG_PWR,
+ MX35_PAD_USBOTG_OC__USB_TOP_USBOTG_OC,
+ /* SDCARD */
+ MX35_PAD_SD1_CMD__ESDHC1_CMD,
+ MX35_PAD_SD1_CLK__ESDHC1_CLK,
+ MX35_PAD_SD1_DATA0__ESDHC1_DAT0,
+ MX35_PAD_SD1_DATA1__ESDHC1_DAT1,
+ MX35_PAD_SD1_DATA2__ESDHC1_DAT2,
+ MX35_PAD_SD1_DATA3__ESDHC1_DAT3,
+ /* PMIC */
+ MX35_PAD_GPIO2_0__GPIO2_0,
+ /* GPIO keys */
+ MX35_PAD_SCKR__GPIO1_4,
+ MX35_PAD_COMPARE__GPIO1_5,
+ MX35_PAD_SCKT__GPIO1_7,
+ MX35_PAD_FST__GPIO1_8,
+ MX35_PAD_HCKT__GPIO1_9,
+ MX35_PAD_TX5_RX0__GPIO1_10,
+ MX35_PAD_TX4_RX1__GPIO1_11,
+ MX35_PAD_TX3_RX2__GPIO1_12,
+};
+
+/* USB Device config */
+static const struct fsl_usb2_platform_data otg_device_pdata __initconst = {
+ .operating_mode = FSL_USB2_DR_DEVICE,
+ .phy_mode = FSL_USB2_PHY_UTMI,
+ .workaround = FLS_USB2_WORKAROUND_ENGCM09152,
+};
+
+/* USB HOST config */
+static const struct mxc_usbh_platform_data usb_host_pdata __initconst = {
+ .portsc = MXC_EHCI_MODE_SERIAL,
+ .flags = MXC_EHCI_INTERFACE_SINGLE_UNI |
+ MXC_EHCI_INTERNAL_PHY,
+};
+
+static struct platform_device *devices[] __initdata = {
+ &vpr200_flash,
+ &vpr200_device_gpiokeys,
+};
+
+/*
+ * Board specific initialization.
+ */
+static void __init vpr200_board_init(void)
+{
+ mxc_iomux_v3_setup_multiple_pads(vpr200_pads, ARRAY_SIZE(vpr200_pads));
+
+ imx35_add_fec(NULL);
+ imx35_add_imx2_wdt(NULL);
+
+ platform_add_devices(devices, ARRAY_SIZE(devices));
+
+ if (0 != gpio_request(GPIO_LCDPWR, "LCDPWR"))
+ printk(KERN_WARNING "vpr200: Couldn't get LCDPWR gpio\n");
+ else
+ gpio_direction_output(GPIO_LCDPWR, 0);
+
+ if (0 != gpio_request(GPIO_PMIC_INT, "PMIC_INT"))
+ printk(KERN_WARNING "vpr200: Couldn't get PMIC_INT gpio\n");
+ else
+ gpio_direction_input(GPIO_PMIC_INT);
+
+ imx35_add_imx_uart0(NULL);
+ imx35_add_imx_uart2(NULL);
+
+ mxc_register_device(&mx3_ipu, &mx3_ipu_data);
+ mxc_register_device(&mx3_fb, &mx3fb_pdata);
+
+ imx35_add_fsl_usb2_udc(&otg_device_pdata);
+ imx35_add_mxc_ehci_hs(&usb_host_pdata);
+
+ imx35_add_mxc_nand(&vpr200_nand_board_info);
+ imx35_add_sdhci_esdhc_imx(0, NULL);
+
+ i2c_register_board_info(0, vpr200_i2c_devices,
+ ARRAY_SIZE(vpr200_i2c_devices));
+
+ imx35_add_imx_i2c0(&vpr200_i2c0_data);
+}
+
+static void __init vpr200_timer_init(void)
+{
+ mx35_clocks_init();
+}
+
+struct sys_timer vpr200_timer = {
+ .init = vpr200_timer_init,
+};
+
+MACHINE_START(VPR200, "VPR200")
+ /* Maintainer: Creative Product Design */
+ .map_io = mx35_map_io,
+ .init_early = imx35_init_early,
+ .init_irq = mx35_init_irq,
+ .timer = &vpr200_timer,
+ .init_machine = vpr200_board_init,
+MACHINE_END
diff --git a/arch/arm/mach-mx3/mm.c b/arch/arm/mach-mx3/mm.c
index 47118f760244..eefd4cf0a693 100644
--- a/arch/arm/mach-mx3/mm.c
+++ b/arch/arm/mach-mx3/mm.c
@@ -28,14 +28,6 @@
#include <mach/hardware.h>
#include <mach/iomux-v3.h>
-/*!
- * @file mm.c
- *
- * @brief This file creates static virtual to physical mappings, common to all MX3 boards.
- *
- * @ingroup Memory
- */
-
#ifdef CONFIG_SOC_IMX31
static struct map_desc mx31_io_desc[] __initdata = {
imx_map_entry(MX31, X_MEMC, MT_DEVICE),
@@ -52,10 +44,13 @@ static struct map_desc mx31_io_desc[] __initdata = {
*/
void __init mx31_map_io(void)
{
+ iotable_init(mx31_io_desc, ARRAY_SIZE(mx31_io_desc));
+}
+
+void __init imx31_init_early(void)
+{
mxc_set_cpu_type(MXC_CPU_MX31);
mxc_arch_reset_init(MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR));
-
- iotable_init(mx31_io_desc, ARRAY_SIZE(mx31_io_desc));
}
int imx31_register_gpios(void);
@@ -77,11 +72,14 @@ static struct map_desc mx35_io_desc[] __initdata = {
void __init mx35_map_io(void)
{
+ iotable_init(mx35_io_desc, ARRAY_SIZE(mx35_io_desc));
+}
+
+void __init imx35_init_early(void)
+{
mxc_set_cpu_type(MXC_CPU_MX35);
mxc_iomux_v3_init(MX35_IO_ADDRESS(MX35_IOMUXC_BASE_ADDR));
mxc_arch_reset_init(MX35_IO_ADDRESS(MX35_WDOG_BASE_ADDR));
-
- iotable_init(mx35_io_desc, ARRAY_SIZE(mx35_io_desc));
}
int imx35_register_gpios(void);
@@ -129,4 +127,3 @@ static int mxc_init_l2x0(void)
arch_initcall(mxc_init_l2x0);
#endif
-
diff --git a/arch/arm/mach-mx3/mx31moboard-devboard.c b/arch/arm/mach-mx3/mx31moboard-devboard.c
index 94a0b9e4b7f3..6410b9c48a02 100644
--- a/arch/arm/mach-mx3/mx31moboard-devboard.c
+++ b/arch/arm/mach-mx3/mx31moboard-devboard.c
@@ -15,6 +15,7 @@
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -149,7 +150,10 @@ static int devboard_usbh1_hw_init(struct platform_device *pdev)
mxc_iomux_set_pad(MX31_PIN_CSPI1_SPI_RDY, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_SFS6, USB_PAD_CFG);
- return 0;
+ mdelay(10);
+
+ return mx31_initialize_usb_hw(pdev->id, MXC_EHCI_POWER_PINS_ENABLED |
+ MXC_EHCI_INTERFACE_SINGLE_UNI);
}
#define USBH1_VBUSEN_B IOMUX_TO_GPIO(MX31_PIN_NFRE_B)
@@ -187,7 +191,6 @@ static int devboard_isp1105_set_vbus(struct otg_transceiver *otg, bool on)
static struct mxc_usbh_platform_data usbh1_pdata __initdata = {
.init = devboard_usbh1_hw_init,
.portsc = MXC_EHCI_MODE_UTMI | MXC_EHCI_SERIAL,
- .flags = MXC_EHCI_POWER_PINS_ENABLED | MXC_EHCI_INTERFACE_SINGLE_UNI,
};
static int __init devboard_usbh1_init(void)
diff --git a/arch/arm/mach-mx3/mx31moboard-marxbot.c b/arch/arm/mach-mx3/mx31moboard-marxbot.c
index f449a97ae1a2..57f7b00cb709 100644
--- a/arch/arm/mach-mx3/mx31moboard-marxbot.c
+++ b/arch/arm/mach-mx3/mx31moboard-marxbot.c
@@ -265,7 +265,10 @@ static int marxbot_usbh1_hw_init(struct platform_device *pdev)
mxc_iomux_set_pad(MX31_PIN_CSPI1_SPI_RDY, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_SFS6, USB_PAD_CFG);
- return 0;
+ mdelay(10);
+
+ return mx31_initialize_usb_hw(pdev->id, MXC_EHCI_POWER_PINS_ENABLED |
+ MXC_EHCI_INTERFACE_SINGLE_UNI);
}
#define USBH1_VBUSEN_B IOMUX_TO_GPIO(MX31_PIN_NFRE_B)
@@ -303,7 +306,6 @@ static int marxbot_isp1105_set_vbus(struct otg_transceiver *otg, bool on)
static struct mxc_usbh_platform_data usbh1_pdata __initdata = {
.init = marxbot_usbh1_hw_init,
.portsc = MXC_EHCI_MODE_UTMI | MXC_EHCI_SERIAL,
- .flags = MXC_EHCI_POWER_PINS_ENABLED | MXC_EHCI_INTERFACE_SINGLE_UNI,
};
static int __init marxbot_usbh1_init(void)
diff --git a/arch/arm/mach-mx3/mx31moboard-smartbot.c b/arch/arm/mach-mx3/mx31moboard-smartbot.c
index bbec3c82264a..87d556f40ecf 100644
--- a/arch/arm/mach-mx3/mx31moboard-smartbot.c
+++ b/arch/arm/mach-mx3/mx31moboard-smartbot.c
@@ -123,9 +123,14 @@ static const struct fsl_usb2_platform_data usb_pdata __initconst = {
#if defined(CONFIG_USB_ULPI)
+static int smartbot_otg_init(struct platform_device *pdev)
+{
+ return mx31_initialize_usb_hw(pdev->id, MXC_EHCI_POWER_PINS_ENABLED);
+}
+
static struct mxc_usbh_platform_data otg_host_pdata __initdata = {
+ .init = smartbot_otg_init,
.portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT,
- .flags = MXC_EHCI_POWER_PINS_ENABLED,
};
static int __init smartbot_otg_host_init(void)
diff --git a/arch/arm/mach-mx5/Kconfig b/arch/arm/mach-mx5/Kconfig
index de4fa992fc3e..03ec6e90bb46 100644
--- a/arch/arm/mach-mx5/Kconfig
+++ b/arch/arm/mach-mx5/Kconfig
@@ -50,6 +50,7 @@ config MACH_MX51_BABBAGE
config MACH_MX51_3DS
bool "Support MX51PDK (3DS)"
select SOC_IMX51
+ select IMX_HAVE_PLATFORM_IMX2_WDT
select IMX_HAVE_PLATFORM_IMX_KEYPAD
select IMX_HAVE_PLATFORM_IMX_UART
select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
@@ -112,19 +113,32 @@ config MACH_EUKREA_MBIMXSD51_BASEBOARD
endchoice
-config MACH_MX51_EFIKAMX
- bool "Support MX51 Genesi Efika MX nettop"
+config MX51_EFIKA_COMMON
+ bool
select SOC_IMX51
select IMX_HAVE_PLATFORM_IMX_UART
select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
select IMX_HAVE_PLATFORM_SPI_IMX
+ select MXC_ULPI if USB_ULPI
+
+config MACH_MX51_EFIKAMX
+ bool "Support MX51 Genesi Efika MX nettop"
+ select MX51_EFIKA_COMMON
help
Include support for Genesi Efika MX nettop. This includes specific
configurations for the board and its peripherals.
+config MACH_MX51_EFIKASB
+ bool "Support MX51 Genesi Efika Smartbook"
+ select MX51_EFIKA_COMMON
+ help
+ Include support for Genesi Efika Smartbook. This includes specific
+ configurations for the board and its peripherals.
+
config MACH_MX53_EVK
bool "Support MX53 EVK platforms"
select SOC_IMX53
+ select IMX_HAVE_PLATFORM_IMX2_WDT
select IMX_HAVE_PLATFORM_IMX_UART
select IMX_HAVE_PLATFORM_IMX_I2C
select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
@@ -136,6 +150,8 @@ config MACH_MX53_EVK
config MACH_MX53_SMD
bool "Support MX53 SMD platforms"
select SOC_IMX53
+ select IMX_HAVE_PLATFORM_IMX2_WDT
+ select IMX_HAVE_PLATFORM_IMX_I2C
select IMX_HAVE_PLATFORM_IMX_UART
help
Include support for MX53 SMD platform. This includes specific
@@ -144,7 +160,10 @@ config MACH_MX53_SMD
config MACH_MX53_LOCO
bool "Support MX53 LOCO platforms"
select SOC_IMX53
+ select IMX_HAVE_PLATFORM_IMX2_WDT
+ select IMX_HAVE_PLATFORM_IMX_I2C
select IMX_HAVE_PLATFORM_IMX_UART
+ select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
help
Include support for MX53 LOCO platform. This includes specific
configurations for the board and its peripherals.
@@ -157,6 +176,7 @@ config MACH_MX50_RDP
select IMX_HAVE_PLATFORM_IMX_UART
select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
select IMX_HAVE_PLATFORM_SPI_IMX
+ select IMX_HAVE_PLATFORM_FEC
help
Include support for MX50 reference design platform (RDP) board. This
includes specific configurations for the board and its peripherals.
diff --git a/arch/arm/mach-mx5/Makefile b/arch/arm/mach-mx5/Makefile
index 0d43be98e51c..4f63048be3ca 100644
--- a/arch/arm/mach-mx5/Makefile
+++ b/arch/arm/mach-mx5/Makefile
@@ -3,7 +3,7 @@
#
# Object file lists.
-obj-y := cpu.o mm.o clock-mx51-mx53.o devices.o
+obj-y := cpu.o mm.o clock-mx51-mx53.o devices.o ehci.o
obj-$(CONFIG_SOC_IMX50) += mm-mx50.o
obj-$(CONFIG_CPU_FREQ_IMX) += cpu_op-mx51.o
@@ -16,5 +16,7 @@ obj-$(CONFIG_MACH_EUKREA_CPUIMX51) += board-cpuimx51.o
obj-$(CONFIG_MACH_EUKREA_MBIMX51_BASEBOARD) += eukrea_mbimx51-baseboard.o
obj-$(CONFIG_MACH_EUKREA_CPUIMX51SD) += board-cpuimx51sd.o
obj-$(CONFIG_MACH_EUKREA_MBIMXSD51_BASEBOARD) += eukrea_mbimxsd-baseboard.o
+obj-$(CONFIG_MX51_EFIKA_COMMON) += mx51_efika.o
obj-$(CONFIG_MACH_MX51_EFIKAMX) += board-mx51_efikamx.o
+obj-$(CONFIG_MACH_MX51_EFIKASB) += board-mx51_efikasb.o
obj-$(CONFIG_MACH_MX50_RDP) += board-mx50_rdp.o
diff --git a/arch/arm/mach-mx5/board-cpuimx51.c b/arch/arm/mach-mx5/board-cpuimx51.c
index f8652ef25f85..d0296a94c475 100644
--- a/arch/arm/mach-mx5/board-cpuimx51.c
+++ b/arch/arm/mach-mx5/board-cpuimx51.c
@@ -60,7 +60,6 @@
#define MX51_USB_PLL_DIV_19_2_MHZ 0x01
#define MX51_USB_PLL_DIV_24_MHZ 0x02
-#if defined(CONFIG_SERIAL_8250) || defined(CONFIG_SERIAL_8250_MODULE)
static struct plat_serial8250_port serial_platform_data[] = {
{
.mapbase = (unsigned long)(MX51_CS1_BASE_ADDR + 0x400000),
@@ -105,12 +104,9 @@ static struct platform_device serial_device = {
.platform_data = serial_platform_data,
},
};
-#endif
static struct platform_device *devices[] __initdata = {
-#if defined(CONFIG_SERIAL_8250) || defined(CONFIG_SERIAL_8250_MODULE)
&serial_device,
-#endif
};
static iomux_v3_cfg_t eukrea_cpuimx51_pads[] = {
@@ -188,7 +184,10 @@ static int initialize_otg_port(struct platform_device *pdev)
v |= MX51_USB_PLL_DIV_19_2_MHZ;
__raw_writel(v, usbother_base + MXC_USB_PHY_CTR_FUNC2_OFFSET);
iounmap(usb_base);
- return 0;
+
+ mdelay(10);
+
+ return mx51_initialize_usb_hw(0, MXC_EHCI_INTERNAL_PHY);
}
static int initialize_usbh1_port(struct platform_device *pdev)
@@ -206,13 +205,16 @@ static int initialize_usbh1_port(struct platform_device *pdev)
v = __raw_readl(usbother_base + MX51_USB_CTRL_1_OFFSET);
__raw_writel(v | MX51_USB_CTRL_UH1_EXT_CLK_EN, usbother_base + MX51_USB_CTRL_1_OFFSET);
iounmap(usb_base);
- return 0;
+
+ mdelay(10);
+
+ return mx51_initialize_usb_hw(1, MXC_EHCI_POWER_PINS_ENABLED |
+ MXC_EHCI_ITC_NO_THRESHOLD);
}
static struct mxc_usbh_platform_data dr_utmi_config = {
.init = initialize_otg_port,
.portsc = MXC_EHCI_UTMI_16BIT,
- .flags = MXC_EHCI_INTERNAL_PHY,
};
static struct fsl_usb2_platform_data usb_pdata = {
@@ -223,7 +225,6 @@ static struct fsl_usb2_platform_data usb_pdata = {
static struct mxc_usbh_platform_data usbh1_config = {
.init = initialize_usbh1_port,
.portsc = MXC_EHCI_MODE_ULPI,
- .flags = (MXC_EHCI_POWER_PINS_ENABLED | MXC_EHCI_ITC_NO_THRESHOLD),
};
static int otg_mode_host;
@@ -298,7 +299,8 @@ MACHINE_START(EUKREA_CPUIMX51, "Eukrea CPUIMX51 Module")
/* Maintainer: Eric Bénard <eric@eukrea.com> */
.boot_params = MX51_PHYS_OFFSET + 0x100,
.map_io = mx51_map_io,
+ .init_early = imx51_init_early,
.init_irq = mx51_init_irq,
- .init_machine = eukrea_cpuimx51_init,
.timer = &mxc_timer,
+ .init_machine = eukrea_cpuimx51_init,
MACHINE_END
diff --git a/arch/arm/mach-mx5/board-cpuimx51sd.c b/arch/arm/mach-mx5/board-cpuimx51sd.c
index ad931895d8b6..29b180823bf5 100644
--- a/arch/arm/mach-mx5/board-cpuimx51sd.c
+++ b/arch/arm/mach-mx5/board-cpuimx51sd.c
@@ -42,6 +42,7 @@
#include "devices-imx51.h"
#include "devices.h"
+#include "cpu_op-mx51.h"
#define USBH1_RST IMX_GPIO_NR(2, 28)
#define ETH_RST IMX_GPIO_NR(2, 31)
@@ -109,7 +110,7 @@ static iomux_v3_cfg_t eukrea_cpuimx51sd_pads[] = {
/* Touchscreen */
/* IRQ */
- _MX51_PAD_CSI1_D8__GPIO3_12 | MUX_PAD_CTRL(PAD_CTL_PUS_22K_UP |
+ _MX51_PAD_GPIO_NAND__GPIO_NAND | MUX_PAD_CTRL(PAD_CTL_PUS_22K_UP |
PAD_CTL_PKE | PAD_CTL_SRE_FAST |
PAD_CTL_DSE_HIGH | PAD_CTL_PUE | PAD_CTL_HYS),
};
@@ -118,15 +119,9 @@ static const struct imxuart_platform_data uart_pdata __initconst = {
.flags = IMXUART_HAVE_RTSCTS,
};
-static int ts_get_pendown_state(void)
-{
- return gpio_get_value(TSC2007_IRQGPIO) ? 0 : 1;
-}
-
static struct tsc2007_platform_data tsc2007_info = {
.model = 2007,
.x_plate_ohms = 180,
- .get_pendown_state = ts_get_pendown_state,
};
static struct i2c_board_info eukrea_cpuimx51sd_i2c_devices[] = {
@@ -167,7 +162,10 @@ static int initialize_otg_port(struct platform_device *pdev)
v |= MX51_USB_PLL_DIV_19_2_MHZ;
__raw_writel(v, usbother_base + MXC_USB_PHY_CTR_FUNC2_OFFSET);
iounmap(usb_base);
- return 0;
+
+ mdelay(10);
+
+ return mx51_initialize_usb_hw(0, MXC_EHCI_INTERNAL_PHY);
}
static int initialize_usbh1_port(struct platform_device *pdev)
@@ -186,13 +184,16 @@ static int initialize_usbh1_port(struct platform_device *pdev)
__raw_writel(v | MX51_USB_CTRL_UH1_EXT_CLK_EN,
usbother_base + MX51_USB_CTRL_1_OFFSET);
iounmap(usb_base);
- return 0;
+
+ mdelay(10);
+
+ return mx51_initialize_usb_hw(1, MXC_EHCI_POWER_PINS_ENABLED |
+ MXC_EHCI_ITC_NO_THRESHOLD);
}
static struct mxc_usbh_platform_data dr_utmi_config = {
.init = initialize_otg_port,
.portsc = MXC_EHCI_UTMI_16BIT,
- .flags = MXC_EHCI_INTERNAL_PHY,
};
static struct fsl_usb2_platform_data usb_pdata = {
@@ -203,7 +204,6 @@ static struct fsl_usb2_platform_data usb_pdata = {
static struct mxc_usbh_platform_data usbh1_config = {
.init = initialize_usbh1_port,
.portsc = MXC_EHCI_MODE_ULPI,
- .flags = (MXC_EHCI_POWER_PINS_ENABLED | MXC_EHCI_ITC_NO_THRESHOLD),
};
static int otg_mode_host;
@@ -242,7 +242,7 @@ static struct mcp251x_platform_data mcp251x_info = {
static struct spi_board_info cpuimx51sd_spi_device[] = {
{
.modalias = "mcp2515",
- .max_speed_hz = 6500000,
+ .max_speed_hz = 10000000,
.bus_num = 0,
.mode = SPI_MODE_0,
.chip_select = 0,
@@ -269,6 +269,10 @@ static void __init eukrea_cpuimx51sd_init(void)
mxc_iomux_v3_setup_multiple_pads(eukrea_cpuimx51sd_pads,
ARRAY_SIZE(eukrea_cpuimx51sd_pads));
+#if defined(CONFIG_CPU_FREQ_IMX)
+ get_cpu_op = mx51_get_cpu_op;
+#endif
+
imx51_add_imx_uart(0, &uart_pdata);
imx51_add_mxc_nand(&eukrea_cpuimx51sd_nand_board_info);
@@ -329,7 +333,8 @@ MACHINE_START(EUKREA_CPUIMX51SD, "Eukrea CPUIMX51SD")
/* Maintainer: Eric Bénard <eric@eukrea.com> */
.boot_params = MX51_PHYS_OFFSET + 0x100,
.map_io = mx51_map_io,
+ .init_early = imx51_init_early,
.init_irq = mx51_init_irq,
- .init_machine = eukrea_cpuimx51sd_init,
.timer = &mxc_timer,
+ .init_machine = eukrea_cpuimx51sd_init,
MACHINE_END
diff --git a/arch/arm/mach-mx5/board-mx50_rdp.c b/arch/arm/mach-mx5/board-mx50_rdp.c
index fd32e4c450e8..dedf7f2d6d0f 100644
--- a/arch/arm/mach-mx5/board-mx50_rdp.c
+++ b/arch/arm/mach-mx5/board-mx50_rdp.c
@@ -35,7 +35,10 @@
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
-#include "devices-mx50.h"
+#include "devices-imx50.h"
+
+#define FEC_EN IMX_GPIO_NR(6, 23)
+#define FEC_RESET_B IMX_GPIO_NR(4, 12)
static iomux_v3_cfg_t mx50_rdp_pads[] __initdata = {
/* SD1 */
@@ -102,7 +105,7 @@ static iomux_v3_cfg_t mx50_rdp_pads[] __initdata = {
MX50_PAD_I2C3_SCL__USBOTG_OC,
MX50_PAD_SSI_RXC__FEC_MDIO,
- MX50_PAD_SSI_RXC__FEC_MDIO,
+ MX50_PAD_SSI_RXFS__FEC_MDC,
MX50_PAD_DISP_D0__FEC_TXCLK,
MX50_PAD_DISP_D1__FEC_RX_ER,
MX50_PAD_DISP_D2__FEC_RX_DV,
@@ -111,7 +114,6 @@ static iomux_v3_cfg_t mx50_rdp_pads[] __initdata = {
MX50_PAD_DISP_D5__FEC_TX_EN,
MX50_PAD_DISP_D6__FEC_TXD1,
MX50_PAD_DISP_D7__FEC_TXD0,
- MX50_PAD_SSI_RXFS__FEC_MDC,
MX50_PAD_I2C3_SDA__GPIO_6_23,
MX50_PAD_ECSPI1_SCLK__GPIO_4_12,
@@ -168,6 +170,24 @@ static const struct imxuart_platform_data uart_pdata __initconst = {
.flags = IMXUART_HAVE_RTSCTS,
};
+static const struct fec_platform_data fec_data __initconst = {
+ .phy = PHY_INTERFACE_MODE_RMII,
+};
+
+static inline void mx50_rdp_fec_reset(void)
+{
+ gpio_request(FEC_EN, "fec-en");
+ gpio_direction_output(FEC_EN, 0);
+ gpio_request(FEC_RESET_B, "fec-reset_b");
+ gpio_direction_output(FEC_RESET_B, 0);
+ msleep(1);
+ gpio_set_value(FEC_RESET_B, 1);
+}
+
+static const struct imxi2c_platform_data i2c_data __initconst = {
+ .bitrate = 100000,
+};
+
/*
* Board specific initialization.
*/
@@ -178,6 +198,11 @@ static void __init mx50_rdp_board_init(void)
imx50_add_imx_uart(0, &uart_pdata);
imx50_add_imx_uart(1, &uart_pdata);
+ mx50_rdp_fec_reset();
+ imx50_add_fec(&fec_data);
+ imx50_add_imx_i2c(0, &i2c_data);
+ imx50_add_imx_i2c(1, &i2c_data);
+ imx50_add_imx_i2c(2, &i2c_data);
}
static void __init mx50_rdp_timer_init(void)
@@ -191,7 +216,8 @@ static struct sys_timer mx50_rdp_timer = {
MACHINE_START(MX50_RDP, "Freescale MX50 Reference Design Platform")
.map_io = mx50_map_io,
+ .init_early = imx50_init_early,
.init_irq = mx50_init_irq,
- .init_machine = mx50_rdp_board_init,
.timer = &mx50_rdp_timer,
+ .init_machine = mx50_rdp_board_init,
MACHINE_END
diff --git a/arch/arm/mach-mx5/board-mx51_3ds.c b/arch/arm/mach-mx5/board-mx51_3ds.c
index 49d644842379..63dfbeafbc1e 100644
--- a/arch/arm/mach-mx5/board-mx51_3ds.c
+++ b/arch/arm/mach-mx5/board-mx51_3ds.c
@@ -71,24 +71,10 @@ static iomux_v3_cfg_t mx51_3ds_pads[] = {
};
/* Serial ports */
-#if defined(CONFIG_SERIAL_IMX) || defined(CONFIG_SERIAL_IMX_MODULE)
static const struct imxuart_platform_data uart_pdata __initconst = {
.flags = IMXUART_HAVE_RTSCTS,
};
-static inline void mxc_init_imx_uart(void)
-{
- imx51_add_imx_uart(0, &uart_pdata);
- imx51_add_imx_uart(1, &uart_pdata);
- imx51_add_imx_uart(2, &uart_pdata);
-}
-#else /* !SERIAL_IMX */
-static inline void mxc_init_imx_uart(void)
-{
-}
-#endif /* SERIAL_IMX */
-
-#if defined(CONFIG_KEYBOARD_IMX) || defined(CONFIG_KEYBOARD_IMX_MODULE)
static int mx51_3ds_board_keymap[] = {
KEY(0, 0, KEY_1),
KEY(0, 1, KEY_2),
@@ -124,16 +110,6 @@ static const struct matrix_keymap_data mx51_3ds_map_data __initconst = {
.keymap_size = ARRAY_SIZE(mx51_3ds_board_keymap),
};
-static void mxc_init_keypad(void)
-{
- imx51_add_imx_keypad(&mx51_3ds_map_data);
-}
-#else
-static inline void mxc_init_keypad(void)
-{
-}
-#endif
-
static int mx51_3ds_spi2_cs[] = {
MXC_SPI_CS(0),
MX51_3DS_ECSPI2_CS,
@@ -157,11 +133,14 @@ static struct spi_board_info mx51_3ds_spi_nor_device[] = {
/*
* Board specific initialization.
*/
-static void __init mxc_board_init(void)
+static void __init mx51_3ds_init(void)
{
mxc_iomux_v3_setup_multiple_pads(mx51_3ds_pads,
ARRAY_SIZE(mx51_3ds_pads));
- mxc_init_imx_uart();
+
+ imx51_add_imx_uart(0, &uart_pdata);
+ imx51_add_imx_uart(1, &uart_pdata);
+ imx51_add_imx_uart(2, &uart_pdata);
imx51_add_ecspi(1, &mx51_3ds_ecspi2_pdata);
spi_register_board_info(mx51_3ds_spi_nor_device,
@@ -172,7 +151,8 @@ static void __init mxc_board_init(void)
"devices on the board are unusable.\n");
imx51_add_sdhci_esdhc_imx(0, NULL);
- mxc_init_keypad();
+ imx51_add_imx_keypad(&mx51_3ds_map_data);
+ imx51_add_imx2_wdt(0, NULL);
}
static void __init mx51_3ds_timer_init(void)
@@ -180,15 +160,16 @@ static void __init mx51_3ds_timer_init(void)
mx51_clocks_init(32768, 24000000, 22579200, 0);
}
-static struct sys_timer mxc_timer = {
- .init = mx51_3ds_timer_init,
+static struct sys_timer mx51_3ds_timer = {
+ .init = mx51_3ds_timer_init,
};
MACHINE_START(MX51_3DS, "Freescale MX51 3-Stack Board")
/* Maintainer: Freescale Semiconductor, Inc. */
.boot_params = MX51_PHYS_OFFSET + 0x100,
.map_io = mx51_map_io,
+ .init_early = imx51_init_early,
.init_irq = mx51_init_irq,
- .init_machine = mxc_board_init,
- .timer = &mxc_timer,
+ .timer = &mx51_3ds_timer,
+ .init_machine = mx51_3ds_init,
MACHINE_END
diff --git a/arch/arm/mach-mx5/board-mx51_babbage.c b/arch/arm/mach-mx5/board-mx51_babbage.c
index 1d231e84107c..b2ecd194e76d 100644
--- a/arch/arm/mach-mx5/board-mx51_babbage.c
+++ b/arch/arm/mach-mx5/board-mx51_babbage.c
@@ -161,23 +161,10 @@ static iomux_v3_cfg_t mx51babbage_pads[] = {
};
/* Serial ports */
-#if defined(CONFIG_SERIAL_IMX) || defined(CONFIG_SERIAL_IMX_MODULE)
static const struct imxuart_platform_data uart_pdata __initconst = {
.flags = IMXUART_HAVE_RTSCTS,
};
-static inline void mxc_init_imx_uart(void)
-{
- imx51_add_imx_uart(0, &uart_pdata);
- imx51_add_imx_uart(1, &uart_pdata);
- imx51_add_imx_uart(2, &uart_pdata);
-}
-#else /* !SERIAL_IMX */
-static inline void mxc_init_imx_uart(void)
-{
-}
-#endif /* SERIAL_IMX */
-
static const struct imxi2c_platform_data babbage_i2c_data __initconst = {
.bitrate = 100000,
};
@@ -272,7 +259,10 @@ static int initialize_otg_port(struct platform_device *pdev)
v |= MX51_USB_PLL_DIV_19_2_MHZ;
__raw_writel(v, usbother_base + MXC_USB_PHY_CTR_FUNC2_OFFSET);
iounmap(usb_base);
- return 0;
+
+ mdelay(10);
+
+ return mx51_initialize_usb_hw(0, MXC_EHCI_INTERNAL_PHY);
}
static int initialize_usbh1_port(struct platform_device *pdev)
@@ -290,13 +280,16 @@ static int initialize_usbh1_port(struct platform_device *pdev)
v = __raw_readl(usbother_base + MX51_USB_CTRL_1_OFFSET);
__raw_writel(v | MX51_USB_CTRL_UH1_EXT_CLK_EN, usbother_base + MX51_USB_CTRL_1_OFFSET);
iounmap(usb_base);
- return 0;
+
+ mdelay(10);
+
+ return mx51_initialize_usb_hw(1, MXC_EHCI_POWER_PINS_ENABLED |
+ MXC_EHCI_ITC_NO_THRESHOLD);
}
static struct mxc_usbh_platform_data dr_utmi_config = {
.init = initialize_otg_port,
.portsc = MXC_EHCI_UTMI_16BIT,
- .flags = MXC_EHCI_INTERNAL_PHY,
};
static struct fsl_usb2_platform_data usb_pdata = {
@@ -307,7 +300,6 @@ static struct fsl_usb2_platform_data usb_pdata = {
static struct mxc_usbh_platform_data usbh1_config = {
.init = initialize_usbh1_port,
.portsc = MXC_EHCI_MODE_ULPI,
- .flags = (MXC_EHCI_POWER_PINS_ENABLED | MXC_EHCI_ITC_NO_THRESHOLD),
};
static int otg_mode_host;
@@ -349,7 +341,7 @@ static const struct spi_imx_master mx51_babbage_spi_pdata __initconst = {
/*
* Board specific initialization.
*/
-static void __init mxc_board_init(void)
+static void __init mx51_babbage_init(void)
{
iomux_v3_cfg_t usbh1stp = MX51_PAD_USBH1_STP__USBH1_STP;
iomux_v3_cfg_t power_key = _MX51_PAD_EIM_A27__GPIO2_21 |
@@ -360,7 +352,11 @@ static void __init mxc_board_init(void)
#endif
mxc_iomux_v3_setup_multiple_pads(mx51babbage_pads,
ARRAY_SIZE(mx51babbage_pads));
- mxc_init_imx_uart();
+
+ imx51_add_imx_uart(0, &uart_pdata);
+ imx51_add_imx_uart(1, &uart_pdata);
+ imx51_add_imx_uart(2, &uart_pdata);
+
babbage_fec_reset();
imx51_add_fec(NULL);
@@ -399,15 +395,16 @@ static void __init mx51_babbage_timer_init(void)
mx51_clocks_init(32768, 24000000, 22579200, 0);
}
-static struct sys_timer mxc_timer = {
- .init = mx51_babbage_timer_init,
+static struct sys_timer mx51_babbage_timer = {
+ .init = mx51_babbage_timer_init,
};
MACHINE_START(MX51_BABBAGE, "Freescale MX51 Babbage Board")
/* Maintainer: Amit Kucheria <amit.kucheria@canonical.com> */
.boot_params = MX51_PHYS_OFFSET + 0x100,
.map_io = mx51_map_io,
+ .init_early = imx51_init_early,
.init_irq = mx51_init_irq,
- .init_machine = mxc_board_init,
- .timer = &mxc_timer,
+ .timer = &mx51_babbage_timer,
+ .init_machine = mx51_babbage_init,
MACHINE_END
diff --git a/arch/arm/mach-mx5/board-mx51_efikamx.c b/arch/arm/mach-mx5/board-mx51_efikamx.c
index b7946f8e8d40..acab1911cb3c 100644
--- a/arch/arm/mach-mx5/board-mx51_efikamx.c
+++ b/arch/arm/mach-mx5/board-mx51_efikamx.c
@@ -25,6 +25,9 @@
#include <linux/fsl_devices.h>
#include <linux/spi/flash.h>
#include <linux/spi/spi.h>
+#include <linux/mfd/mc13892.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/consumer.h>
#include <mach/common.h>
#include <mach/hardware.h>
@@ -40,8 +43,7 @@
#include "devices-imx51.h"
#include "devices.h"
-
-#define MX51_USB_PLL_DIV_24_MHZ 0x01
+#include "efika.h"
#define EFIKAMX_PCBID0 IMX_GPIO_NR(3, 16)
#define EFIKAMX_PCBID1 IMX_GPIO_NR(3, 17)
@@ -53,13 +55,14 @@
#define EFIKAMX_POWER_KEY IMX_GPIO_NR(2, 31)
-#define EFIKAMX_SPI_CS0 IMX_GPIO_NR(4, 24)
-#define EFIKAMX_SPI_CS1 IMX_GPIO_NR(4, 25)
-
/* board 1.1 doesn't have same reset gpio */
#define EFIKAMX_RESET1_1 IMX_GPIO_NR(3, 2)
#define EFIKAMX_RESET IMX_GPIO_NR(1, 4)
+#define EFIKAMX_POWEROFF IMX_GPIO_NR(4, 13)
+
+#define EFIKAMX_PMIC IMX_GPIO_NR(1, 6)
+
/* the pci ids pin have pull up. they're driven low according to board id */
#define MX51_PAD_PCBID0 IOMUX_PAD(0x518, 0x130, 3, 0x0, 0, PAD_CTL_PUS_100K_UP)
#define MX51_PAD_PCBID1 IOMUX_PAD(0x51C, 0x134, 3, 0x0, 0, PAD_CTL_PUS_100K_UP)
@@ -67,38 +70,11 @@
#define MX51_PAD_PWRKEY IOMUX_PAD(0x48c, 0x0f8, 1, 0x0, 0, PAD_CTL_PUS_100K_UP | PAD_CTL_PKE)
static iomux_v3_cfg_t mx51efikamx_pads[] = {
- /* UART1 */
- MX51_PAD_UART1_RXD__UART1_RXD,
- MX51_PAD_UART1_TXD__UART1_TXD,
- MX51_PAD_UART1_RTS__UART1_RTS,
- MX51_PAD_UART1_CTS__UART1_CTS,
/* board id */
MX51_PAD_PCBID0,
MX51_PAD_PCBID1,
MX51_PAD_PCBID2,
- /* SD 1 */
- MX51_PAD_SD1_CMD__SD1_CMD,
- MX51_PAD_SD1_CLK__SD1_CLK,
- MX51_PAD_SD1_DATA0__SD1_DATA0,
- MX51_PAD_SD1_DATA1__SD1_DATA1,
- MX51_PAD_SD1_DATA2__SD1_DATA2,
- MX51_PAD_SD1_DATA3__SD1_DATA3,
-
- /* SD 2 */
- MX51_PAD_SD2_CMD__SD2_CMD,
- MX51_PAD_SD2_CLK__SD2_CLK,
- MX51_PAD_SD2_DATA0__SD2_DATA0,
- MX51_PAD_SD2_DATA1__SD2_DATA1,
- MX51_PAD_SD2_DATA2__SD2_DATA2,
- MX51_PAD_SD2_DATA3__SD2_DATA3,
-
- /* SD/MMC WP/CD */
- MX51_PAD_GPIO1_0__SD1_CD,
- MX51_PAD_GPIO1_1__SD1_WP,
- MX51_PAD_GPIO1_7__SD2_WP,
- MX51_PAD_GPIO1_8__SD2_CD,
-
/* leds */
MX51_PAD_CSI1_D9__GPIO3_13,
MX51_PAD_CSI1_VSYNC__GPIO3_14,
@@ -107,64 +83,12 @@ static iomux_v3_cfg_t mx51efikamx_pads[] = {
/* power key */
MX51_PAD_PWRKEY,
- /* spi */
- MX51_PAD_CSPI1_MOSI__ECSPI1_MOSI,
- MX51_PAD_CSPI1_MISO__ECSPI1_MISO,
- MX51_PAD_CSPI1_SS0__GPIO4_24,
- MX51_PAD_CSPI1_SS1__GPIO4_25,
- MX51_PAD_CSPI1_RDY__ECSPI1_RDY,
- MX51_PAD_CSPI1_SCLK__ECSPI1_SCLK,
-
/* reset */
MX51_PAD_DI1_PIN13__GPIO3_2,
MX51_PAD_GPIO1_4__GPIO1_4,
-};
-/* Serial ports */
-#if defined(CONFIG_SERIAL_IMX) || defined(CONFIG_SERIAL_IMX_MODULE)
-static const struct imxuart_platform_data uart_pdata = {
- .flags = IMXUART_HAVE_RTSCTS,
-};
-
-static inline void mxc_init_imx_uart(void)
-{
- imx51_add_imx_uart(0, &uart_pdata);
- imx51_add_imx_uart(1, &uart_pdata);
- imx51_add_imx_uart(2, &uart_pdata);
-}
-#else /* !SERIAL_IMX */
-static inline void mxc_init_imx_uart(void)
-{
-}
-#endif /* SERIAL_IMX */
-
-/* This function is board specific as the bit mask for the plldiv will also
- * be different for other Freescale SoCs, thus a common bitmask is not
- * possible and cannot get place in /plat-mxc/ehci.c.
- */
-static int initialize_otg_port(struct platform_device *pdev)
-{
- u32 v;
- void __iomem *usb_base;
- void __iomem *usbother_base;
- usb_base = ioremap(MX51_OTG_BASE_ADDR, SZ_4K);
- if (!usb_base)
- return -ENOMEM;
- usbother_base = (void __iomem *)(usb_base + MX5_USBOTHER_REGS_OFFSET);
-
- /* Set the PHY clock to 19.2MHz */
- v = __raw_readl(usbother_base + MXC_USB_PHY_CTR_FUNC2_OFFSET);
- v &= ~MX5_USB_UTMI_PHYCTRL1_PLLDIV_MASK;
- v |= MX51_USB_PLL_DIV_24_MHZ;
- __raw_writel(v, usbother_base + MXC_USB_PHY_CTR_FUNC2_OFFSET);
- iounmap(usb_base);
- return 0;
-}
-
-static struct mxc_usbh_platform_data dr_utmi_config = {
- .init = initialize_otg_port,
- .portsc = MXC_EHCI_UTMI_16BIT,
- .flags = MXC_EHCI_INTERNAL_PHY,
+ /* power off */
+ MX51_PAD_CSI2_VSYNC__GPIO4_13,
};
/* PCBID2 PCBID1 PCBID0 STATE
@@ -265,47 +189,6 @@ static const struct gpio_keys_platform_data mx51_efikamx_powerkey_data __initcon
.nbuttons = ARRAY_SIZE(mx51_efikamx_powerkey),
};
-static struct mtd_partition mx51_efikamx_spi_nor_partitions[] = {
- {
- .name = "u-boot",
- .offset = 0,
- .size = SZ_256K,
- },
- {
- .name = "config",
- .offset = MTDPART_OFS_APPEND,
- .size = SZ_64K,
- },
-};
-
-static struct flash_platform_data mx51_efikamx_spi_flash_data = {
- .name = "spi_flash",
- .parts = mx51_efikamx_spi_nor_partitions,
- .nr_parts = ARRAY_SIZE(mx51_efikamx_spi_nor_partitions),
- .type = "sst25vf032b",
-};
-
-static struct spi_board_info mx51_efikamx_spi_board_info[] __initdata = {
- {
- .modalias = "m25p80",
- .max_speed_hz = 25000000,
- .bus_num = 0,
- .chip_select = 1,
- .platform_data = &mx51_efikamx_spi_flash_data,
- .irq = -1,
- },
-};
-
-static int mx51_efikamx_spi_cs[] = {
- EFIKAMX_SPI_CS0,
- EFIKAMX_SPI_CS1,
-};
-
-static const struct spi_imx_master mx51_efikamx_spi_pdata __initconst = {
- .chipselect = mx51_efikamx_spi_cs,
- .num_chipselect = ARRAY_SIZE(mx51_efikamx_spi_cs),
-};
-
void mx51_efikamx_reset(void)
{
if (system_rev == 0x11)
@@ -314,14 +197,53 @@ void mx51_efikamx_reset(void)
gpio_direction_output(EFIKAMX_RESET, 0);
}
-static void __init mxc_board_init(void)
+static struct regulator *pwgt1, *pwgt2, *coincell;
+
+static void mx51_efikamx_power_off(void)
+{
+ if (!IS_ERR(coincell))
+ regulator_disable(coincell);
+
+ if (!IS_ERR(pwgt1) && !IS_ERR(pwgt2)) {
+ regulator_disable(pwgt2);
+ regulator_disable(pwgt1);
+ }
+ gpio_direction_output(EFIKAMX_POWEROFF, 1);
+}
+
+static int __init mx51_efikamx_power_init(void)
+{
+ if (machine_is_mx51_efikamx()) {
+ pwgt1 = regulator_get(NULL, "pwgt1");
+ pwgt2 = regulator_get(NULL, "pwgt2");
+ if (!IS_ERR(pwgt1) && !IS_ERR(pwgt2)) {
+ regulator_enable(pwgt1);
+ regulator_enable(pwgt2);
+ }
+ gpio_request(EFIKAMX_POWEROFF, "poweroff");
+ pm_power_off = mx51_efikamx_power_off;
+
+ /* enable coincell charger. maybe need a small power driver ? */
+ coincell = regulator_get(NULL, "coincell");
+ if (!IS_ERR(coincell)) {
+ regulator_set_voltage(coincell, 3000000, 3000000);
+ regulator_enable(coincell);
+ }
+
+ regulator_has_full_constraints();
+ }
+
+ return 0;
+}
+late_initcall(mx51_efikamx_power_init);
+
+static void __init mx51_efikamx_init(void)
{
mxc_iomux_v3_setup_multiple_pads(mx51efikamx_pads,
ARRAY_SIZE(mx51efikamx_pads));
+ efika_board_common_init();
+
mx51_efikamx_board_id();
- mxc_register_device(&mxc_usbdr_host_device, &dr_utmi_config);
- mxc_init_imx_uart();
- imx51_add_sdhci_esdhc_imx(0, NULL);
/* on < 1.2 boards both SD controllers are used */
if (system_rev < 0x12) {
@@ -332,10 +254,6 @@ static void __init mxc_board_init(void)
platform_device_register(&mx51_efikamx_leds_device);
imx51_add_gpio_keys(&mx51_efikamx_powerkey_data);
- spi_register_board_info(mx51_efikamx_spi_board_info,
- ARRAY_SIZE(mx51_efikamx_spi_board_info));
- imx51_add_ecspi(0, &mx51_efikamx_spi_pdata);
-
if (system_rev == 0x11) {
gpio_request(EFIKAMX_RESET1_1, "reset");
gpio_direction_output(EFIKAMX_RESET1_1, 1);
@@ -343,6 +261,20 @@ static void __init mxc_board_init(void)
gpio_request(EFIKAMX_RESET, "reset");
gpio_direction_output(EFIKAMX_RESET, 1);
}
+
+ /*
+ * enable wifi by default only on mx
+ * sb and mx have same wlan pin but the value to enable it are
+ * different :/
+ */
+ gpio_request(EFIKA_WLAN_EN, "wlan_en");
+ gpio_direction_output(EFIKA_WLAN_EN, 0);
+ msleep(10);
+
+ gpio_request(EFIKA_WLAN_RESET, "wlan_rst");
+ gpio_direction_output(EFIKA_WLAN_RESET, 0);
+ msleep(10);
+ gpio_set_value(EFIKA_WLAN_RESET, 1);
}
static void __init mx51_efikamx_timer_init(void)
@@ -350,15 +282,16 @@ static void __init mx51_efikamx_timer_init(void)
mx51_clocks_init(32768, 24000000, 22579200, 24576000);
}
-static struct sys_timer mxc_timer = {
- .init = mx51_efikamx_timer_init,
+static struct sys_timer mx51_efikamx_timer = {
+ .init = mx51_efikamx_timer_init,
};
MACHINE_START(MX51_EFIKAMX, "Genesi EfikaMX nettop")
/* Maintainer: Amit Kucheria <amit.kucheria@linaro.org> */
.boot_params = MX51_PHYS_OFFSET + 0x100,
.map_io = mx51_map_io,
+ .init_early = imx51_init_early,
.init_irq = mx51_init_irq,
- .init_machine = mxc_board_init,
- .timer = &mxc_timer,
+ .timer = &mx51_efikamx_timer,
+ .init_machine = mx51_efikamx_init,
MACHINE_END
diff --git a/arch/arm/mach-mx5/board-mx51_efikasb.c b/arch/arm/mach-mx5/board-mx51_efikasb.c
new file mode 100644
index 000000000000..18e29b9b088b
--- /dev/null
+++ b/arch/arm/mach-mx5/board-mx51_efikasb.c
@@ -0,0 +1,283 @@
+/*
+ * Copyright (C) Arnaud Patard <arnaud.patard@rtp-net.org>
+ *
+ * based on code from the following
+ * Copyright 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2009-2010 Pegatron Corporation. All Rights Reserved.
+ * Copyright 2009-2010 Genesi USA, Inc. All Rights Reserved.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/leds.h>
+#include <linux/input.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/fsl_devices.h>
+#include <linux/spi/flash.h>
+#include <linux/spi/spi.h>
+#include <linux/mfd/mc13892.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/consumer.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/ulpi.h>
+#include <mach/ulpi.h>
+
+#include <mach/common.h>
+#include <mach/hardware.h>
+#include <mach/iomux-mx51.h>
+#include <mach/i2c.h>
+#include <mach/mxc_ehci.h>
+
+#include <asm/irq.h>
+#include <asm/setup.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/time.h>
+
+#include "devices-imx51.h"
+#include "devices.h"
+#include "efika.h"
+
+#define EFIKASB_USBH2_STP IMX_GPIO_NR(2, 20)
+#define EFIKASB_GREEN_LED IMX_GPIO_NR(1, 3)
+#define EFIKASB_WHITE_LED IMX_GPIO_NR(2, 25)
+#define EFIKASB_PCBID0 IMX_GPIO_NR(2, 28)
+#define EFIKASB_PCBID1 IMX_GPIO_NR(2, 29)
+#define EFIKASB_PWRKEY IMX_GPIO_NR(2, 31)
+#define EFIKASB_LID IMX_GPIO_NR(3, 14)
+#define EFIKASB_POWEROFF IMX_GPIO_NR(4, 13)
+#define EFIKASB_RFKILL IMX_GPIO_NR(3, 1)
+
+#define MX51_PAD_PWRKEY IOMUX_PAD(0x48c, 0x0f8, 1, 0x0, 0, PAD_CTL_PUS_100K_UP | PAD_CTL_PKE)
+
+static iomux_v3_cfg_t mx51efikasb_pads[] = {
+ /* USB HOST2 */
+ MX51_PAD_EIM_D16__USBH2_DATA0,
+ MX51_PAD_EIM_D17__USBH2_DATA1,
+ MX51_PAD_EIM_D18__USBH2_DATA2,
+ MX51_PAD_EIM_D19__USBH2_DATA3,
+ MX51_PAD_EIM_D20__USBH2_DATA4,
+ MX51_PAD_EIM_D21__USBH2_DATA5,
+ MX51_PAD_EIM_D22__USBH2_DATA6,
+ MX51_PAD_EIM_D23__USBH2_DATA7,
+ MX51_PAD_EIM_A24__USBH2_CLK,
+ MX51_PAD_EIM_A25__USBH2_DIR,
+ MX51_PAD_EIM_A26__USBH2_STP,
+ MX51_PAD_EIM_A27__USBH2_NXT,
+
+ /* leds */
+ MX51_PAD_EIM_CS0__GPIO2_25,
+ MX51_PAD_GPIO1_3__GPIO1_3,
+
+ /* pcb id */
+ MX51_PAD_EIM_CS3__GPIO2_28,
+ MX51_PAD_EIM_CS4__GPIO2_29,
+
+ /* lid */
+ MX51_PAD_CSI1_VSYNC__GPIO3_14,
+
+ /* power key*/
+ MX51_PAD_PWRKEY,
+
+ /* wifi/bt button */
+ MX51_PAD_DI1_PIN12__GPIO3_1,
+
+ /* power off */
+ MX51_PAD_CSI2_VSYNC__GPIO4_13,
+
+ /* wdog reset */
+ MX51_PAD_GPIO1_4__WDOG1_WDOG_B,
+
+ /* BT */
+ MX51_PAD_EIM_A17__GPIO2_11,
+};
+
+static int initialize_usbh2_port(struct platform_device *pdev)
+{
+ iomux_v3_cfg_t usbh2stp = MX51_PAD_EIM_A26__USBH2_STP;
+ iomux_v3_cfg_t usbh2gpio = MX51_PAD_EIM_A26__GPIO2_20;
+
+ mxc_iomux_v3_setup_pad(usbh2gpio);
+ gpio_request(EFIKASB_USBH2_STP, "usbh2_stp");
+ gpio_direction_output(EFIKASB_USBH2_STP, 0);
+ msleep(1);
+ gpio_set_value(EFIKASB_USBH2_STP, 1);
+ msleep(1);
+
+ gpio_free(EFIKASB_USBH2_STP);
+ mxc_iomux_v3_setup_pad(usbh2stp);
+
+ mdelay(10);
+
+ return mx51_initialize_usb_hw(pdev->id, MXC_EHCI_ITC_NO_THRESHOLD);
+}
+
+static struct mxc_usbh_platform_data usbh2_config = {
+ .init = initialize_usbh2_port,
+ .portsc = MXC_EHCI_MODE_ULPI,
+};
+
+static void __init mx51_efikasb_usb(void)
+{
+ usbh2_config.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
+ ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT |
+ ULPI_OTG_EXTVBUSIND);
+ mxc_register_device(&mxc_usbh2_device, &usbh2_config);
+}
+
+static struct gpio_led mx51_efikasb_leds[] = {
+ {
+ .name = "efikasb:green",
+ .default_trigger = "default-on",
+ .gpio = EFIKASB_GREEN_LED,
+ .active_low = 1,
+ },
+ {
+ .name = "efikasb:white",
+ .default_trigger = "caps",
+ .gpio = EFIKASB_WHITE_LED,
+ },
+};
+
+static struct gpio_led_platform_data mx51_efikasb_leds_data = {
+ .leds = mx51_efikasb_leds,
+ .num_leds = ARRAY_SIZE(mx51_efikasb_leds),
+};
+
+static struct platform_device mx51_efikasb_leds_device = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = &mx51_efikasb_leds_data,
+ },
+};
+
+static struct gpio_keys_button mx51_efikasb_keys[] = {
+ {
+ .code = KEY_POWER,
+ .gpio = EFIKASB_PWRKEY,
+ .type = EV_PWR,
+ .desc = "Power Button",
+ .wakeup = 1,
+ .debounce_interval = 10, /* ms */
+ },
+ {
+ .code = SW_LID,
+ .gpio = EFIKASB_LID,
+ .type = EV_SW,
+ .desc = "Lid Switch",
+ },
+ {
+ /* SW_RFKILLALL vs KEY_RFKILL ? */
+ .code = SW_RFKILL_ALL,
+ .gpio = EFIKASB_RFKILL,
+ .type = EV_SW,
+ .desc = "rfkill",
+ },
+};
+
+static const struct gpio_keys_platform_data mx51_efikasb_keys_data __initconst = {
+ .buttons = mx51_efikasb_keys,
+ .nbuttons = ARRAY_SIZE(mx51_efikasb_keys),
+};
+
+static struct regulator *pwgt1, *pwgt2;
+
+static void mx51_efikasb_power_off(void)
+{
+ gpio_set_value(EFIKA_USB_PHY_RESET, 0);
+
+ if (!IS_ERR(pwgt1) && !IS_ERR(pwgt2)) {
+ regulator_disable(pwgt2);
+ regulator_disable(pwgt1);
+ }
+ gpio_direction_output(EFIKASB_POWEROFF, 1);
+}
+
+static int __init mx51_efikasb_power_init(void)
+{
+ if (machine_is_mx51_efikasb()) {
+ pwgt1 = regulator_get(NULL, "pwgt1");
+ pwgt2 = regulator_get(NULL, "pwgt2");
+ if (!IS_ERR(pwgt1) && !IS_ERR(pwgt2)) {
+ regulator_enable(pwgt1);
+ regulator_enable(pwgt2);
+ }
+ gpio_request(EFIKASB_POWEROFF, "poweroff");
+ pm_power_off = mx51_efikasb_power_off;
+
+ regulator_has_full_constraints();
+ }
+
+ return 0;
+}
+late_initcall(mx51_efikasb_power_init);
+
+/* 01 R1.3 board
+ 10 R2.0 board */
+static void __init mx51_efikasb_board_id(void)
+{
+ int id;
+
+ gpio_request(EFIKASB_PCBID0, "pcb id0");
+ gpio_direction_input(EFIKASB_PCBID0);
+ gpio_request(EFIKASB_PCBID1, "pcb id1");
+ gpio_direction_input(EFIKASB_PCBID1);
+
+ id = gpio_get_value(EFIKASB_PCBID0);
+ id |= gpio_get_value(EFIKASB_PCBID1) << 1;
+
+ switch (id) {
+ default:
+ break;
+ case 1:
+ system_rev = 0x13;
+ break;
+ case 2:
+ system_rev = 0x20;
+ break;
+ }
+}
+
+static void __init efikasb_board_init(void)
+{
+ mxc_iomux_v3_setup_multiple_pads(mx51efikasb_pads,
+ ARRAY_SIZE(mx51efikasb_pads));
+ efika_board_common_init();
+
+ mx51_efikasb_board_id();
+ mx51_efikasb_usb();
+ imx51_add_sdhci_esdhc_imx(1, NULL);
+
+ platform_device_register(&mx51_efikasb_leds_device);
+ imx51_add_gpio_keys(&mx51_efikasb_keys_data);
+
+}
+
+static void __init mx51_efikasb_timer_init(void)
+{
+ mx51_clocks_init(32768, 24000000, 22579200, 24576000);
+}
+
+static struct sys_timer mx51_efikasb_timer = {
+ .init = mx51_efikasb_timer_init,
+};
+
+MACHINE_START(MX51_EFIKASB, "Genesi Efika Smartbook")
+ .boot_params = MX51_PHYS_OFFSET + 0x100,
+ .map_io = mx51_map_io,
+ .init_early = imx51_init_early,
+ .init_irq = mx51_init_irq,
+ .init_machine = efikasb_board_init,
+ .timer = &mx51_efikasb_timer,
+MACHINE_END
diff --git a/arch/arm/mach-mx5/board-mx53_evk.c b/arch/arm/mach-mx5/board-mx53_evk.c
index caee04c08238..652e1e0131b3 100644
--- a/arch/arm/mach-mx5/board-mx53_evk.c
+++ b/arch/arm/mach-mx5/board-mx53_evk.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. All Rights Reserved.
* Copyright (C) 2010 Yong Shen. <Yong.Shen@linaro.org>
*/
@@ -42,28 +42,26 @@
#include "devices-imx53.h"
static iomux_v3_cfg_t mx53_evk_pads[] = {
- MX53_PAD_CSI0_D10__UART1_TXD,
- MX53_PAD_CSI0_D11__UART1_RXD,
- MX53_PAD_ATA_DIOW__UART1_TXD,
- MX53_PAD_ATA_DMACK__UART1_RXD,
+ MX53_PAD_CSI0_DAT10__UART1_TXD_MUX,
+ MX53_PAD_CSI0_DAT11__UART1_RXD_MUX,
- MX53_PAD_ATA_BUFFER_EN__UART2_RXD,
- MX53_PAD_ATA_DMARQ__UART2_TXD,
- MX53_PAD_ATA_DIOR__UART2_RTS,
- MX53_PAD_ATA_INTRQ__UART2_CTS,
+ MX53_PAD_PATA_BUFFER_EN__UART2_RXD_MUX,
+ MX53_PAD_PATA_DMARQ__UART2_TXD_MUX,
+ MX53_PAD_PATA_DIOR__UART2_RTS,
+ MX53_PAD_PATA_INTRQ__UART2_CTS,
- MX53_PAD_ATA_CS_0__UART3_TXD,
- MX53_PAD_ATA_CS_1__UART3_RXD,
- MX53_PAD_ATA_DA_1__UART3_CTS,
- MX53_PAD_ATA_DA_2__UART3_RTS,
+ MX53_PAD_PATA_CS_0__UART3_TXD_MUX,
+ MX53_PAD_PATA_CS_1__UART3_RXD_MUX,
+ MX53_PAD_PATA_DA_1__UART3_CTS,
+ MX53_PAD_PATA_DA_2__UART3_RTS,
- MX53_PAD_EIM_D16__CSPI1_SCLK,
- MX53_PAD_EIM_D17__CSPI1_MISO,
- MX53_PAD_EIM_D18__CSPI1_MOSI,
+ MX53_PAD_EIM_D16__ECSPI1_SCLK,
+ MX53_PAD_EIM_D17__ECSPI1_MISO,
+ MX53_PAD_EIM_D18__ECSPI1_MOSI,
/* ecspi chip select lines */
- MX53_PAD_EIM_EB2__GPIO_2_30,
- MX53_PAD_EIM_D19__GPIO_3_19,
+ MX53_PAD_EIM_EB2__GPIO2_30,
+ MX53_PAD_EIM_D19__GPIO3_19,
};
static const struct imxuart_platform_data mx53_evk_uart_pdata __initconst = {
@@ -72,7 +70,7 @@ static const struct imxuart_platform_data mx53_evk_uart_pdata __initconst = {
static inline void mx53_evk_init_uart(void)
{
- imx53_add_imx_uart(0, &mx53_evk_uart_pdata);
+ imx53_add_imx_uart(0, NULL);
imx53_add_imx_uart(1, &mx53_evk_uart_pdata);
imx53_add_imx_uart(2, &mx53_evk_uart_pdata);
}
@@ -139,6 +137,7 @@ static void __init mx53_evk_board_init(void)
spi_register_board_info(mx53_evk_spi_board_info,
ARRAY_SIZE(mx53_evk_spi_board_info));
imx53_add_ecspi(0, &mx53_evk_spi_data);
+ imx53_add_imx2_wdt(0, NULL);
}
static void __init mx53_evk_timer_init(void)
@@ -152,7 +151,8 @@ static struct sys_timer mx53_evk_timer = {
MACHINE_START(MX53_EVK, "Freescale MX53 EVK Board")
.map_io = mx53_map_io,
+ .init_early = imx53_init_early,
.init_irq = mx53_init_irq,
- .init_machine = mx53_evk_board_init,
.timer = &mx53_evk_timer,
+ .init_machine = mx53_evk_board_init,
MACHINE_END
diff --git a/arch/arm/mach-mx5/board-mx53_loco.c b/arch/arm/mach-mx5/board-mx53_loco.c
index d1348e04ace3..0a18f8d23eb0 100644
--- a/arch/arm/mach-mx5/board-mx53_loco.c
+++ b/arch/arm/mach-mx5/board-mx53_loco.c
@@ -39,33 +39,147 @@
#define LOCO_FEC_PHY_RST IMX_GPIO_NR(7, 6)
static iomux_v3_cfg_t mx53_loco_pads[] = {
- MX53_PAD_CSI0_D10__UART1_TXD,
- MX53_PAD_CSI0_D11__UART1_RXD,
- MX53_PAD_ATA_DIOW__UART1_TXD,
- MX53_PAD_ATA_DMACK__UART1_RXD,
-
- MX53_PAD_ATA_BUFFER_EN__UART2_RXD,
- MX53_PAD_ATA_DMARQ__UART2_TXD,
- MX53_PAD_ATA_DIOR__UART2_RTS,
- MX53_PAD_ATA_INTRQ__UART2_CTS,
-
- MX53_PAD_ATA_CS_0__UART3_TXD,
- MX53_PAD_ATA_CS_1__UART3_RXD,
- MX53_PAD_ATA_DA_1__UART3_CTS,
- MX53_PAD_ATA_DA_2__UART3_RTS,
+ /* FEC */
+ MX53_PAD_FEC_MDC__FEC_MDC,
+ MX53_PAD_FEC_MDIO__FEC_MDIO,
+ MX53_PAD_FEC_REF_CLK__FEC_TX_CLK,
+ MX53_PAD_FEC_RX_ER__FEC_RX_ER,
+ MX53_PAD_FEC_CRS_DV__FEC_RX_DV,
+ MX53_PAD_FEC_RXD1__FEC_RDATA_1,
+ MX53_PAD_FEC_RXD0__FEC_RDATA_0,
+ MX53_PAD_FEC_TX_EN__FEC_TX_EN,
+ MX53_PAD_FEC_TXD1__FEC_TDATA_1,
+ MX53_PAD_FEC_TXD0__FEC_TDATA_0,
+ /* FEC_nRST */
+ MX53_PAD_PATA_DA_0__GPIO7_6,
+ /* FEC_nINT */
+ MX53_PAD_PATA_DATA4__GPIO2_4,
+ /* AUDMUX5 */
+ MX53_PAD_KEY_COL0__AUDMUX_AUD5_TXC,
+ MX53_PAD_KEY_ROW0__AUDMUX_AUD5_TXD,
+ MX53_PAD_KEY_COL1__AUDMUX_AUD5_TXFS,
+ MX53_PAD_KEY_ROW1__AUDMUX_AUD5_RXD,
+ /* I2C2 */
+ MX53_PAD_KEY_COL3__I2C2_SCL,
+ MX53_PAD_KEY_ROW3__I2C2_SDA,
+ /* SD1 */
+ MX53_PAD_SD1_CMD__ESDHC1_CMD,
+ MX53_PAD_SD1_CLK__ESDHC1_CLK,
+ MX53_PAD_SD1_DATA0__ESDHC1_DAT0,
+ MX53_PAD_SD1_DATA1__ESDHC1_DAT1,
+ MX53_PAD_SD1_DATA2__ESDHC1_DAT2,
+ MX53_PAD_SD1_DATA3__ESDHC1_DAT3,
+ /* SD3 */
+ MX53_PAD_PATA_DATA8__ESDHC3_DAT0,
+ MX53_PAD_PATA_DATA9__ESDHC3_DAT1,
+ MX53_PAD_PATA_DATA10__ESDHC3_DAT2,
+ MX53_PAD_PATA_DATA11__ESDHC3_DAT3,
+ MX53_PAD_PATA_DATA0__ESDHC3_DAT4,
+ MX53_PAD_PATA_DATA1__ESDHC3_DAT5,
+ MX53_PAD_PATA_DATA2__ESDHC3_DAT6,
+ MX53_PAD_PATA_DATA3__ESDHC3_DAT7,
+ MX53_PAD_PATA_IORDY__ESDHC3_CLK,
+ MX53_PAD_PATA_RESET_B__ESDHC3_CMD,
+ /* SD3_CD */
+ MX53_PAD_EIM_DA11__GPIO3_11,
+ /* SD3_WP */
+ MX53_PAD_EIM_DA12__GPIO3_12,
+ /* VGA */
+ MX53_PAD_EIM_OE__IPU_DI1_PIN7,
+ MX53_PAD_EIM_RW__IPU_DI1_PIN8,
+ /* DISPLB */
+ MX53_PAD_EIM_D20__IPU_SER_DISP0_CS,
+ MX53_PAD_EIM_D21__IPU_DISPB0_SER_CLK,
+ MX53_PAD_EIM_D22__IPU_DISPB0_SER_DIN,
+ MX53_PAD_EIM_D23__IPU_DI0_D0_CS,
+ /* DISP0_POWER_EN */
+ MX53_PAD_EIM_D24__GPIO3_24,
+ /* DISP0 DET INT */
+ MX53_PAD_EIM_D31__GPIO3_31,
+ /* LVDS */
+ MX53_PAD_LVDS0_TX3_P__LDB_LVDS0_TX3,
+ MX53_PAD_LVDS0_CLK_P__LDB_LVDS0_CLK,
+ MX53_PAD_LVDS0_TX2_P__LDB_LVDS0_TX2,
+ MX53_PAD_LVDS0_TX1_P__LDB_LVDS0_TX1,
+ MX53_PAD_LVDS0_TX0_P__LDB_LVDS0_TX0,
+ MX53_PAD_LVDS1_TX3_P__LDB_LVDS1_TX3,
+ MX53_PAD_LVDS1_TX2_P__LDB_LVDS1_TX2,
+ MX53_PAD_LVDS1_CLK_P__LDB_LVDS1_CLK,
+ MX53_PAD_LVDS1_TX1_P__LDB_LVDS1_TX1,
+ MX53_PAD_LVDS1_TX0_P__LDB_LVDS1_TX0,
+ /* I2C1 */
+ MX53_PAD_CSI0_DAT8__I2C1_SDA,
+ MX53_PAD_CSI0_DAT9__I2C1_SCL,
+ /* UART1 */
+ MX53_PAD_CSI0_DAT10__UART1_TXD_MUX,
+ MX53_PAD_CSI0_DAT11__UART1_RXD_MUX,
+ /* CSI0 */
+ MX53_PAD_CSI0_DAT12__IPU_CSI0_D_12,
+ MX53_PAD_CSI0_DAT13__IPU_CSI0_D_13,
+ MX53_PAD_CSI0_DAT14__IPU_CSI0_D_14,
+ MX53_PAD_CSI0_DAT15__IPU_CSI0_D_15,
+ MX53_PAD_CSI0_DAT16__IPU_CSI0_D_16,
+ MX53_PAD_CSI0_DAT17__IPU_CSI0_D_17,
+ MX53_PAD_CSI0_DAT18__IPU_CSI0_D_18,
+ MX53_PAD_CSI0_DAT19__IPU_CSI0_D_19,
+ MX53_PAD_CSI0_VSYNC__IPU_CSI0_VSYNC,
+ MX53_PAD_CSI0_MCLK__IPU_CSI0_HSYNC,
+ MX53_PAD_CSI0_PIXCLK__IPU_CSI0_PIXCLK,
+ /* DISPLAY */
+ MX53_PAD_DI0_DISP_CLK__IPU_DI0_DISP_CLK,
+ MX53_PAD_DI0_PIN15__IPU_DI0_PIN15,
+ MX53_PAD_DI0_PIN2__IPU_DI0_PIN2,
+ MX53_PAD_DI0_PIN3__IPU_DI0_PIN3,
+ MX53_PAD_DISP0_DAT0__IPU_DISP0_DAT_0,
+ MX53_PAD_DISP0_DAT1__IPU_DISP0_DAT_1,
+ MX53_PAD_DISP0_DAT2__IPU_DISP0_DAT_2,
+ MX53_PAD_DISP0_DAT3__IPU_DISP0_DAT_3,
+ MX53_PAD_DISP0_DAT4__IPU_DISP0_DAT_4,
+ MX53_PAD_DISP0_DAT5__IPU_DISP0_DAT_5,
+ MX53_PAD_DISP0_DAT6__IPU_DISP0_DAT_6,
+ MX53_PAD_DISP0_DAT7__IPU_DISP0_DAT_7,
+ MX53_PAD_DISP0_DAT8__IPU_DISP0_DAT_8,
+ MX53_PAD_DISP0_DAT9__IPU_DISP0_DAT_9,
+ MX53_PAD_DISP0_DAT10__IPU_DISP0_DAT_10,
+ MX53_PAD_DISP0_DAT11__IPU_DISP0_DAT_11,
+ MX53_PAD_DISP0_DAT12__IPU_DISP0_DAT_12,
+ MX53_PAD_DISP0_DAT13__IPU_DISP0_DAT_13,
+ MX53_PAD_DISP0_DAT14__IPU_DISP0_DAT_14,
+ MX53_PAD_DISP0_DAT15__IPU_DISP0_DAT_15,
+ MX53_PAD_DISP0_DAT16__IPU_DISP0_DAT_16,
+ MX53_PAD_DISP0_DAT17__IPU_DISP0_DAT_17,
+ MX53_PAD_DISP0_DAT18__IPU_DISP0_DAT_18,
+ MX53_PAD_DISP0_DAT19__IPU_DISP0_DAT_19,
+ MX53_PAD_DISP0_DAT20__IPU_DISP0_DAT_20,
+ MX53_PAD_DISP0_DAT21__IPU_DISP0_DAT_21,
+ MX53_PAD_DISP0_DAT22__IPU_DISP0_DAT_22,
+ MX53_PAD_DISP0_DAT23__IPU_DISP0_DAT_23,
+ /* Audio CLK*/
+ MX53_PAD_GPIO_0__CCM_SSI_EXT1_CLK,
+ /* PWM */
+ MX53_PAD_GPIO_1__PWM2_PWMO,
+ /* SPDIF */
+ MX53_PAD_GPIO_7__SPDIF_PLOCK,
+ MX53_PAD_GPIO_17__SPDIF_OUT1,
+ /* GPIO */
+ MX53_PAD_PATA_DA_1__GPIO7_7,
+ MX53_PAD_PATA_DA_2__GPIO7_8,
+ MX53_PAD_PATA_DATA5__GPIO2_5,
+ MX53_PAD_PATA_DATA6__GPIO2_6,
+ MX53_PAD_PATA_DATA14__GPIO2_14,
+ MX53_PAD_PATA_DATA15__GPIO2_15,
+ MX53_PAD_PATA_INTRQ__GPIO7_2,
+ MX53_PAD_EIM_WAIT__GPIO5_0,
+ MX53_PAD_NANDF_WP_B__GPIO6_9,
+ MX53_PAD_NANDF_RB0__GPIO6_10,
+ MX53_PAD_NANDF_CS1__GPIO6_14,
+ MX53_PAD_NANDF_CS2__GPIO6_15,
+ MX53_PAD_NANDF_CS3__GPIO6_16,
+ MX53_PAD_GPIO_5__GPIO1_5,
+ MX53_PAD_GPIO_16__GPIO7_11,
+ MX53_PAD_GPIO_8__GPIO1_8,
};
-static const struct imxuart_platform_data mx53_loco_uart_data __initconst = {
- .flags = IMXUART_HAVE_RTSCTS,
-};
-
-static inline void mx53_loco_init_uart(void)
-{
- imx53_add_imx_uart(0, &mx53_loco_uart_data);
- imx53_add_imx_uart(1, &mx53_loco_uart_data);
- imx53_add_imx_uart(2, &mx53_loco_uart_data);
-}
-
static inline void mx53_loco_fec_reset(void)
{
int ret;
@@ -85,13 +199,22 @@ static struct fec_platform_data mx53_loco_fec_data = {
.phy = PHY_INTERFACE_MODE_RMII,
};
+static const struct imxi2c_platform_data mx53_loco_i2c_data __initconst = {
+ .bitrate = 100000,
+};
+
static void __init mx53_loco_board_init(void)
{
mxc_iomux_v3_setup_multiple_pads(mx53_loco_pads,
ARRAY_SIZE(mx53_loco_pads));
- mx53_loco_init_uart();
+ imx53_add_imx_uart(0, NULL);
mx53_loco_fec_reset();
imx53_add_fec(&mx53_loco_fec_data);
+ imx53_add_imx2_wdt(0, NULL);
+ imx53_add_imx_i2c(0, &mx53_loco_i2c_data);
+ imx53_add_imx_i2c(1, &mx53_loco_i2c_data);
+ imx53_add_sdhci_esdhc_imx(0, NULL);
+ imx53_add_sdhci_esdhc_imx(2, NULL);
}
static void __init mx53_loco_timer_init(void)
@@ -105,7 +228,8 @@ static struct sys_timer mx53_loco_timer = {
MACHINE_START(MX53_LOCO, "Freescale MX53 LOCO Board")
.map_io = mx53_map_io,
+ .init_early = imx53_init_early,
.init_irq = mx53_init_irq,
- .init_machine = mx53_loco_board_init,
.timer = &mx53_loco_timer,
+ .init_machine = mx53_loco_board_init,
MACHINE_END
diff --git a/arch/arm/mach-mx5/board-mx53_smd.c b/arch/arm/mach-mx5/board-mx53_smd.c
index 7970f7a48588..5eb1638f913e 100644
--- a/arch/arm/mach-mx5/board-mx53_smd.c
+++ b/arch/arm/mach-mx5/board-mx53_smd.c
@@ -39,20 +39,21 @@
#define SMD_FEC_PHY_RST IMX_GPIO_NR(7, 6)
static iomux_v3_cfg_t mx53_smd_pads[] = {
- MX53_PAD_CSI0_D10__UART1_TXD,
- MX53_PAD_CSI0_D11__UART1_RXD,
- MX53_PAD_ATA_DIOW__UART1_TXD,
- MX53_PAD_ATA_DMACK__UART1_RXD,
-
- MX53_PAD_ATA_BUFFER_EN__UART2_RXD,
- MX53_PAD_ATA_DMARQ__UART2_TXD,
- MX53_PAD_ATA_DIOR__UART2_RTS,
- MX53_PAD_ATA_INTRQ__UART2_CTS,
-
- MX53_PAD_ATA_CS_0__UART3_TXD,
- MX53_PAD_ATA_CS_1__UART3_RXD,
- MX53_PAD_ATA_DA_1__UART3_CTS,
- MX53_PAD_ATA_DA_2__UART3_RTS,
+ MX53_PAD_CSI0_DAT10__UART1_TXD_MUX,
+ MX53_PAD_CSI0_DAT11__UART1_RXD_MUX,
+
+ MX53_PAD_PATA_BUFFER_EN__UART2_RXD_MUX,
+ MX53_PAD_PATA_DMARQ__UART2_TXD_MUX,
+ MX53_PAD_PATA_DIOR__UART2_RTS,
+ MX53_PAD_PATA_INTRQ__UART2_CTS,
+
+ MX53_PAD_PATA_CS_0__UART3_TXD_MUX,
+ MX53_PAD_PATA_CS_1__UART3_RXD_MUX,
+ MX53_PAD_PATA_DA_1__UART3_CTS,
+ MX53_PAD_PATA_DA_2__UART3_RTS,
+ /* I2C1 */
+ MX53_PAD_CSI0_DAT8__I2C1_SDA,
+ MX53_PAD_CSI0_DAT9__I2C1_SCL,
};
static const struct imxuart_platform_data mx53_smd_uart_data __initconst = {
@@ -61,7 +62,7 @@ static const struct imxuart_platform_data mx53_smd_uart_data __initconst = {
static inline void mx53_smd_init_uart(void)
{
- imx53_add_imx_uart(0, &mx53_smd_uart_data);
+ imx53_add_imx_uart(0, NULL);
imx53_add_imx_uart(1, &mx53_smd_uart_data);
imx53_add_imx_uart(2, &mx53_smd_uart_data);
}
@@ -85,6 +86,10 @@ static struct fec_platform_data mx53_smd_fec_data = {
.phy = PHY_INTERFACE_MODE_RMII,
};
+static const struct imxi2c_platform_data mx53_smd_i2c_data __initconst = {
+ .bitrate = 100000,
+};
+
static void __init mx53_smd_board_init(void)
{
mxc_iomux_v3_setup_multiple_pads(mx53_smd_pads,
@@ -92,6 +97,8 @@ static void __init mx53_smd_board_init(void)
mx53_smd_init_uart();
mx53_smd_fec_reset();
imx53_add_fec(&mx53_smd_fec_data);
+ imx53_add_imx2_wdt(0, NULL);
+ imx53_add_imx_i2c(0, &mx53_smd_i2c_data);
}
static void __init mx53_smd_timer_init(void)
@@ -105,7 +112,8 @@ static struct sys_timer mx53_smd_timer = {
MACHINE_START(MX53_SMD, "Freescale MX53 SMD Board")
.map_io = mx53_map_io,
+ .init_early = imx53_init_early,
.init_irq = mx53_init_irq,
- .init_machine = mx53_smd_board_init,
.timer = &mx53_smd_timer,
+ .init_machine = mx53_smd_board_init,
MACHINE_END
diff --git a/arch/arm/mach-mx5/clock-mx51-mx53.c b/arch/arm/mach-mx5/clock-mx51-mx53.c
index 0a19e7567c0b..652ace413825 100644
--- a/arch/arm/mach-mx5/clock-mx51-mx53.c
+++ b/arch/arm/mach-mx5/clock-mx51-mx53.c
@@ -42,6 +42,9 @@ static struct clk usboh3_clk;
static struct clk emi_fast_clk;
static struct clk ipu_clk;
static struct clk mipi_hsc1_clk;
+static struct clk esdhc1_clk;
+static struct clk esdhc2_clk;
+static struct clk esdhc3_mx53_clk;
#define MAX_DPLL_WAIT_TRIES 1000 /* 1000 * udelay(1) = 1ms */
@@ -867,10 +870,6 @@ static struct clk gpt_32k_clk = {
.parent = &ckil_clk,
};
-static struct clk kpp_clk = {
- .id = 0,
-};
-
static struct clk dummy_clk = {
.id = 0,
};
@@ -1147,10 +1146,80 @@ CLK_GET_RATE(esdhc1, 1, ESDHC1_MSHC1)
CLK_SET_PARENT(esdhc1, 1, ESDHC1_MSHC1)
CLK_SET_RATE(esdhc1, 1, ESDHC1_MSHC1)
+/* mx51 specific */
CLK_GET_RATE(esdhc2, 1, ESDHC2_MSHC2)
CLK_SET_PARENT(esdhc2, 1, ESDHC2_MSHC2)
CLK_SET_RATE(esdhc2, 1, ESDHC2_MSHC2)
+static int clk_esdhc3_set_parent(struct clk *clk, struct clk *parent)
+{
+ u32 reg;
+
+ reg = __raw_readl(MXC_CCM_CSCMR1);
+ if (parent == &esdhc1_clk)
+ reg &= ~MXC_CCM_CSCMR1_ESDHC3_CLK_SEL;
+ else if (parent == &esdhc2_clk)
+ reg |= MXC_CCM_CSCMR1_ESDHC3_CLK_SEL;
+ else
+ return -EINVAL;
+ __raw_writel(reg, MXC_CCM_CSCMR1);
+
+ return 0;
+}
+
+static int clk_esdhc4_set_parent(struct clk *clk, struct clk *parent)
+{
+ u32 reg;
+
+ reg = __raw_readl(MXC_CCM_CSCMR1);
+ if (parent == &esdhc1_clk)
+ reg &= ~MXC_CCM_CSCMR1_ESDHC4_CLK_SEL;
+ else if (parent == &esdhc2_clk)
+ reg |= MXC_CCM_CSCMR1_ESDHC4_CLK_SEL;
+ else
+ return -EINVAL;
+ __raw_writel(reg, MXC_CCM_CSCMR1);
+
+ return 0;
+}
+
+/* mx53 specific */
+static int clk_esdhc2_mx53_set_parent(struct clk *clk, struct clk *parent)
+{
+ u32 reg;
+
+ reg = __raw_readl(MXC_CCM_CSCMR1);
+ if (parent == &esdhc1_clk)
+ reg &= ~MXC_CCM_CSCMR1_ESDHC2_MSHC2_MX53_CLK_SEL;
+ else if (parent == &esdhc3_mx53_clk)
+ reg |= MXC_CCM_CSCMR1_ESDHC2_MSHC2_MX53_CLK_SEL;
+ else
+ return -EINVAL;
+ __raw_writel(reg, MXC_CCM_CSCMR1);
+
+ return 0;
+}
+
+CLK_GET_RATE(esdhc3_mx53, 1, ESDHC3_MX53)
+CLK_SET_PARENT(esdhc3_mx53, 1, ESDHC3_MX53)
+CLK_SET_RATE(esdhc3_mx53, 1, ESDHC3_MX53)
+
+static int clk_esdhc4_mx53_set_parent(struct clk *clk, struct clk *parent)
+{
+ u32 reg;
+
+ reg = __raw_readl(MXC_CCM_CSCMR1);
+ if (parent == &esdhc1_clk)
+ reg &= ~MXC_CCM_CSCMR1_ESDHC4_CLK_SEL;
+ else if (parent == &esdhc3_mx53_clk)
+ reg |= MXC_CCM_CSCMR1_ESDHC4_CLK_SEL;
+ else
+ return -EINVAL;
+ __raw_writel(reg, MXC_CCM_CSCMR1);
+
+ return 0;
+}
+
#define DEFINE_CLOCK_FULL(name, i, er, es, gr, sr, e, d, p, s) \
static struct clk name = { \
.id = i, \
@@ -1255,9 +1324,62 @@ DEFINE_CLOCK_MAX(esdhc1_clk, 0, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG1_OFFSET,
clk_esdhc1, &pll2_sw_clk, &esdhc1_ipg_clk);
DEFINE_CLOCK_FULL(esdhc2_ipg_clk, 1, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG2_OFFSET,
NULL, NULL, _clk_max_enable, _clk_max_disable, &ipg_clk, NULL);
+DEFINE_CLOCK_FULL(esdhc3_ipg_clk, 2, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG4_OFFSET,
+ NULL, NULL, _clk_max_enable, _clk_max_disable, &ipg_clk, NULL);
+DEFINE_CLOCK_FULL(esdhc4_ipg_clk, 3, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG6_OFFSET,
+ NULL, NULL, _clk_max_enable, _clk_max_disable, &ipg_clk, NULL);
+
+/* mx51 specific */
DEFINE_CLOCK_MAX(esdhc2_clk, 1, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG3_OFFSET,
clk_esdhc2, &pll2_sw_clk, &esdhc2_ipg_clk);
+static struct clk esdhc3_clk = {
+ .id = 2,
+ .parent = &esdhc1_clk,
+ .set_parent = clk_esdhc3_set_parent,
+ .enable_reg = MXC_CCM_CCGR3,
+ .enable_shift = MXC_CCM_CCGRx_CG5_OFFSET,
+ .enable = _clk_max_enable,
+ .disable = _clk_max_disable,
+ .secondary = &esdhc3_ipg_clk,
+};
+static struct clk esdhc4_clk = {
+ .id = 3,
+ .parent = &esdhc1_clk,
+ .set_parent = clk_esdhc4_set_parent,
+ .enable_reg = MXC_CCM_CCGR3,
+ .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
+ .enable = _clk_max_enable,
+ .disable = _clk_max_disable,
+ .secondary = &esdhc4_ipg_clk,
+};
+
+/* mx53 specific */
+static struct clk esdhc2_mx53_clk = {
+ .id = 2,
+ .parent = &esdhc1_clk,
+ .set_parent = clk_esdhc2_mx53_set_parent,
+ .enable_reg = MXC_CCM_CCGR3,
+ .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
+ .enable = _clk_max_enable,
+ .disable = _clk_max_disable,
+ .secondary = &esdhc3_ipg_clk,
+};
+
+DEFINE_CLOCK_MAX(esdhc3_mx53_clk, 2, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG5_OFFSET,
+ clk_esdhc3_mx53, &pll2_sw_clk, &esdhc2_ipg_clk);
+
+static struct clk esdhc4_mx53_clk = {
+ .id = 3,
+ .parent = &esdhc1_clk,
+ .set_parent = clk_esdhc4_mx53_set_parent,
+ .enable_reg = MXC_CCM_CCGR3,
+ .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
+ .enable = _clk_max_enable,
+ .disable = _clk_max_disable,
+ .secondary = &esdhc4_ipg_clk,
+};
+
DEFINE_CLOCK(mipi_esc_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG5_OFFSET, NULL, NULL, NULL, &pll2_sw_clk);
DEFINE_CLOCK(mipi_hsc2_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG4_OFFSET, NULL, NULL, &mipi_esc_clk, &pll2_sw_clk);
DEFINE_CLOCK(mipi_hsc1_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG3_OFFSET, NULL, NULL, &mipi_hsc2_clk, &pll2_sw_clk);
@@ -1302,7 +1424,7 @@ static struct clk_lookup mx51_lookups[] = {
_REGISTER_CLOCK("mxc-ehci.2", "usb_ahb", usb_ahb_clk)
_REGISTER_CLOCK("fsl-usb2-udc", "usb", usboh3_clk)
_REGISTER_CLOCK("fsl-usb2-udc", "usb_ahb", ahb_clk)
- _REGISTER_CLOCK("imx-keypad", NULL, kpp_clk)
+ _REGISTER_CLOCK("imx-keypad", NULL, dummy_clk)
_REGISTER_CLOCK("mxc_nand", NULL, nfc_clk)
_REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
_REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
@@ -1316,6 +1438,8 @@ static struct clk_lookup mx51_lookups[] = {
_REGISTER_CLOCK("imx51-cspi.0", NULL, cspi_clk)
_REGISTER_CLOCK("sdhci-esdhc-imx.0", NULL, esdhc1_clk)
_REGISTER_CLOCK("sdhci-esdhc-imx.1", NULL, esdhc2_clk)
+ _REGISTER_CLOCK("sdhci-esdhc-imx.2", NULL, esdhc3_clk)
+ _REGISTER_CLOCK("sdhci-esdhc-imx.3", NULL, esdhc4_clk)
_REGISTER_CLOCK(NULL, "cpu_clk", cpu_clk)
_REGISTER_CLOCK(NULL, "iim_clk", iim_clk)
_REGISTER_CLOCK("imx2-wdt.0", NULL, dummy_clk)
@@ -1336,10 +1460,14 @@ static struct clk_lookup mx53_lookups[] = {
_REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
_REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
_REGISTER_CLOCK("sdhci-esdhc-imx.0", NULL, esdhc1_clk)
- _REGISTER_CLOCK("sdhci-esdhc-imx.1", NULL, esdhc2_clk)
+ _REGISTER_CLOCK("sdhci-esdhc-imx.1", NULL, esdhc2_mx53_clk)
+ _REGISTER_CLOCK("sdhci-esdhc-imx.2", NULL, esdhc3_mx53_clk)
+ _REGISTER_CLOCK("sdhci-esdhc-imx.3", NULL, esdhc4_mx53_clk)
_REGISTER_CLOCK("imx53-ecspi.0", NULL, ecspi1_clk)
_REGISTER_CLOCK("imx53-ecspi.1", NULL, ecspi2_clk)
_REGISTER_CLOCK("imx53-cspi.0", NULL, cspi_clk)
+ _REGISTER_CLOCK("imx2-wdt.0", NULL, dummy_clk)
+ _REGISTER_CLOCK("imx2-wdt.1", NULL, dummy_clk)
};
static void clk_tree_init(void)
@@ -1427,6 +1555,14 @@ int __init mx53_clocks_init(unsigned long ckil, unsigned long osc,
mx53_revision();
clk_disable(&iim_clk);
+ /* Set SDHC parents to be PLL2 */
+ clk_set_parent(&esdhc1_clk, &pll2_sw_clk);
+ clk_set_parent(&esdhc3_mx53_clk, &pll2_sw_clk);
+
+ /* set SDHC root clock as 200MHZ*/
+ clk_set_rate(&esdhc1_clk, 200000000);
+ clk_set_rate(&esdhc3_mx53_clk, 200000000);
+
/* System timer */
mxc_timer_init(&gpt_clk, MX53_IO_ADDRESS(MX53_GPT1_BASE_ADDR),
MX53_INT_GPT);
diff --git a/arch/arm/mach-mx5/cpu.c b/arch/arm/mach-mx5/cpu.c
index d40671da4372..df46b5e60857 100644
--- a/arch/arm/mach-mx5/cpu.c
+++ b/arch/arm/mach-mx5/cpu.c
@@ -78,11 +78,16 @@ static int get_mx53_srev(void)
void __iomem *iim_base = MX51_IO_ADDRESS(MX53_IIM_BASE_ADDR);
u32 rev = readl(iim_base + IIM_SREV) & 0xff;
- if (rev == 0x0)
+ switch (rev) {
+ case 0x0:
return IMX_CHIP_REVISION_1_0;
- else if (rev == 0x10)
+ case 0x2:
return IMX_CHIP_REVISION_2_0;
- return 0;
+ case 0x3:
+ return IMX_CHIP_REVISION_2_1;
+ default:
+ return IMX_CHIP_REVISION_UNKNOWN;
+ }
}
/*
diff --git a/arch/arm/mach-mx5/crm_regs.h b/arch/arm/mach-mx5/crm_regs.h
index b462c22f53d8..87c0c58f27a7 100644
--- a/arch/arm/mach-mx5/crm_regs.h
+++ b/arch/arm/mach-mx5/crm_regs.h
@@ -217,9 +217,12 @@
#define MXC_CCM_CSCMR1_ESDHC1_MSHC1_CLK_SEL_OFFSET (20)
#define MXC_CCM_CSCMR1_ESDHC1_MSHC1_CLK_SEL_MASK (0x3 << 20)
#define MXC_CCM_CSCMR1_ESDHC3_CLK_SEL (0x1 << 19)
+#define MXC_CCM_CSCMR1_ESDHC2_MSHC2_MX53_CLK_SEL (0x1 << 19)
#define MXC_CCM_CSCMR1_ESDHC4_CLK_SEL (0x1 << 18)
#define MXC_CCM_CSCMR1_ESDHC2_MSHC2_CLK_SEL_OFFSET (16)
#define MXC_CCM_CSCMR1_ESDHC2_MSHC2_CLK_SEL_MASK (0x3 << 16)
+#define MXC_CCM_CSCMR1_ESDHC3_MX53_CLK_SEL_OFFSET (16)
+#define MXC_CCM_CSCMR1_ESDHC3_MX53_CLK_SEL_MASK (0x3 << 16)
#define MXC_CCM_CSCMR1_SSI1_CLK_SEL_OFFSET (14)
#define MXC_CCM_CSCMR1_SSI1_CLK_SEL_MASK (0x3 << 14)
#define MXC_CCM_CSCMR1_SSI2_CLK_SEL_OFFSET (12)
@@ -271,6 +274,10 @@
#define MXC_CCM_CSCDR1_ESDHC2_MSHC2_CLK_PRED_MASK (0x7 << 22)
#define MXC_CCM_CSCDR1_ESDHC2_MSHC2_CLK_PODF_OFFSET (19)
#define MXC_CCM_CSCDR1_ESDHC2_MSHC2_CLK_PODF_MASK (0x7 << 19)
+#define MXC_CCM_CSCDR1_ESDHC3_MX53_CLK_PRED_OFFSET (22)
+#define MXC_CCM_CSCDR1_ESDHC3_MX53_CLK_PRED_MASK (0x7 << 22)
+#define MXC_CCM_CSCDR1_ESDHC3_MX53_CLK_PODF_OFFSET (19)
+#define MXC_CCM_CSCDR1_ESDHC3_MX53_CLK_PODF_MASK (0x7 << 19)
#define MXC_CCM_CSCDR1_ESDHC1_MSHC1_CLK_PRED_OFFSET (16)
#define MXC_CCM_CSCDR1_ESDHC1_MSHC1_CLK_PRED_MASK (0x7 << 16)
#define MXC_CCM_CSCDR1_PGC_CLK_PODF_OFFSET (14)
diff --git a/arch/arm/mach-mx5/devices-mx50.h b/arch/arm/mach-mx5/devices-imx50.h
index 98ab07468a0e..c9e42823c7e3 100644
--- a/arch/arm/mach-mx5/devices-mx50.h
+++ b/arch/arm/mach-mx5/devices-imx50.h
@@ -24,3 +24,11 @@
extern const struct imx_imx_uart_1irq_data imx50_imx_uart_data[] __initconst;
#define imx50_add_imx_uart(id, pdata) \
imx_add_imx_uart_1irq(&imx50_imx_uart_data[id], pdata)
+
+extern const struct imx_fec_data imx50_fec_data __initconst;
+#define imx50_add_fec(pdata) \
+ imx_add_fec(&imx50_fec_data, pdata)
+
+extern const struct imx_imx_i2c_data imx50_imx_i2c_data[] __initconst;
+#define imx50_add_imx_i2c(id, pdata) \
+ imx_add_imx_i2c(&imx50_imx_i2c_data[id], pdata)
diff --git a/arch/arm/mach-mx5/devices-imx53.h b/arch/arm/mach-mx5/devices-imx53.h
index 8639735a117b..9251008dad1f 100644
--- a/arch/arm/mach-mx5/devices-imx53.h
+++ b/arch/arm/mach-mx5/devices-imx53.h
@@ -29,3 +29,7 @@ imx53_sdhci_esdhc_imx_data[] __initconst;
extern const struct imx_spi_imx_data imx53_ecspi_data[] __initconst;
#define imx53_add_ecspi(id, pdata) \
imx_add_spi_imx(&imx53_ecspi_data[id], pdata)
+
+extern const struct imx_imx2_wdt_data imx53_imx2_wdt_data[] __initconst;
+#define imx53_add_imx2_wdt(id, pdata) \
+ imx_add_imx2_wdt(&imx53_imx2_wdt_data[id])
diff --git a/arch/arm/mach-mx5/efika.h b/arch/arm/mach-mx5/efika.h
new file mode 100644
index 000000000000..014aa985faae
--- /dev/null
+++ b/arch/arm/mach-mx5/efika.h
@@ -0,0 +1,10 @@
+#ifndef _EFIKA_H
+#define _EFIKA_H
+
+#define EFIKA_WLAN_EN IMX_GPIO_NR(2, 16)
+#define EFIKA_WLAN_RESET IMX_GPIO_NR(2, 10)
+#define EFIKA_USB_PHY_RESET IMX_GPIO_NR(2, 9)
+
+void __init efika_board_common_init(void);
+
+#endif
diff --git a/arch/arm/mach-mx5/ehci.c b/arch/arm/mach-mx5/ehci.c
new file mode 100644
index 000000000000..7ce12c804a32
--- /dev/null
+++ b/arch/arm/mach-mx5/ehci.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
+ * Copyright (C) 2010 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+#include <mach/mxc_ehci.h>
+
+#define MXC_OTG_OFFSET 0
+#define MXC_H1_OFFSET 0x200
+#define MXC_H2_OFFSET 0x400
+
+/* USB_CTRL */
+#define MXC_OTG_UCTRL_OWIE_BIT (1 << 27) /* OTG wakeup intr enable */
+#define MXC_OTG_UCTRL_OPM_BIT (1 << 24) /* OTG power mask */
+#define MXC_H1_UCTRL_H1UIE_BIT (1 << 12) /* Host1 ULPI interrupt enable */
+#define MXC_H1_UCTRL_H1WIE_BIT (1 << 11) /* HOST1 wakeup intr enable */
+#define MXC_H1_UCTRL_H1PM_BIT (1 << 8) /* HOST1 power mask */
+
+/* USB_PHY_CTRL_FUNC */
+#define MXC_OTG_PHYCTRL_OC_DIS_BIT (1 << 8) /* OTG Disable Overcurrent Event */
+#define MXC_H1_OC_DIS_BIT (1 << 5) /* UH1 Disable Overcurrent Event */
+
+/* USBH2CTRL */
+#define MXC_H2_UCTRL_H2UIE_BIT (1 << 8)
+#define MXC_H2_UCTRL_H2WIE_BIT (1 << 7)
+#define MXC_H2_UCTRL_H2PM_BIT (1 << 4)
+
+#define MXC_USBCMD_OFFSET 0x140
+
+/* USBCMD */
+#define MXC_UCMD_ITC_NO_THRESHOLD_MASK (~(0xff << 16)) /* Interrupt Threshold Control */
+
+int mx51_initialize_usb_hw(int port, unsigned int flags)
+{
+ unsigned int v;
+ void __iomem *usb_base;
+ void __iomem *usbotg_base;
+ void __iomem *usbother_base;
+ int ret = 0;
+
+ usb_base = ioremap(MX51_OTG_BASE_ADDR, SZ_4K);
+ if (!usb_base) {
+ printk(KERN_ERR "%s(): ioremap failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ switch (port) {
+ case 0: /* OTG port */
+ usbotg_base = usb_base + MXC_OTG_OFFSET;
+ break;
+ case 1: /* Host 1 port */
+ usbotg_base = usb_base + MXC_H1_OFFSET;
+ break;
+ case 2: /* Host 2 port */
+ usbotg_base = usb_base + MXC_H2_OFFSET;
+ break;
+ default:
+ printk(KERN_ERR"%s no such port %d\n", __func__, port);
+ ret = -ENOENT;
+ goto error;
+ }
+ usbother_base = usb_base + MX5_USBOTHER_REGS_OFFSET;
+
+ switch (port) {
+ case 0: /*OTG port */
+ if (flags & MXC_EHCI_INTERNAL_PHY) {
+ v = __raw_readl(usbother_base + MXC_USB_PHY_CTR_FUNC_OFFSET);
+
+ if (flags & MXC_EHCI_POWER_PINS_ENABLED) {
+ /* OC/USBPWR is not used */
+ v |= MXC_OTG_PHYCTRL_OC_DIS_BIT;
+ } else {
+ /* OC/USBPWR is used */
+ v &= ~MXC_OTG_PHYCTRL_OC_DIS_BIT;
+ }
+ __raw_writel(v, usbother_base + MXC_USB_PHY_CTR_FUNC_OFFSET);
+
+ v = __raw_readl(usbother_base + MXC_USBCTRL_OFFSET);
+ if (flags & MXC_EHCI_WAKEUP_ENABLED)
+ v |= MXC_OTG_UCTRL_OWIE_BIT;/* OTG wakeup enable */
+ else
+ v &= ~MXC_OTG_UCTRL_OWIE_BIT;/* OTG wakeup disable */
+ if (flags & MXC_EHCI_POWER_PINS_ENABLED)
+ v |= MXC_OTG_UCTRL_OPM_BIT;
+ else
+ v &= ~MXC_OTG_UCTRL_OPM_BIT;
+ __raw_writel(v, usbother_base + MXC_USBCTRL_OFFSET);
+ }
+ break;
+ case 1: /* Host 1 */
+ /*Host ULPI */
+ v = __raw_readl(usbother_base + MXC_USBCTRL_OFFSET);
+ if (flags & MXC_EHCI_WAKEUP_ENABLED) {
+ /* HOST1 wakeup/ULPI intr enable */
+ v |= (MXC_H1_UCTRL_H1WIE_BIT | MXC_H1_UCTRL_H1UIE_BIT);
+ } else {
+ /* HOST1 wakeup/ULPI intr disable */
+ v &= ~(MXC_H1_UCTRL_H1WIE_BIT | MXC_H1_UCTRL_H1UIE_BIT);
+ }
+
+ if (flags & MXC_EHCI_POWER_PINS_ENABLED)
+ v &= ~MXC_H1_UCTRL_H1PM_BIT; /* HOST1 power mask used*/
+ else
+ v |= MXC_H1_UCTRL_H1PM_BIT; /* HOST1 power mask used*/
+ __raw_writel(v, usbother_base + MXC_USBCTRL_OFFSET);
+
+ v = __raw_readl(usbother_base + MXC_USB_PHY_CTR_FUNC_OFFSET);
+ if (flags & MXC_EHCI_POWER_PINS_ENABLED)
+ v &= ~MXC_H1_OC_DIS_BIT; /* OC is used */
+ else
+ v |= MXC_H1_OC_DIS_BIT; /* OC is not used */
+ __raw_writel(v, usbother_base + MXC_USB_PHY_CTR_FUNC_OFFSET);
+
+ v = __raw_readl(usbotg_base + MXC_USBCMD_OFFSET);
+ if (flags & MXC_EHCI_ITC_NO_THRESHOLD)
+ /* Interrupt Threshold Control:Immediate (no threshold) */
+ v &= MXC_UCMD_ITC_NO_THRESHOLD_MASK;
+ __raw_writel(v, usbotg_base + MXC_USBCMD_OFFSET);
+ break;
+ case 2: /* Host 2 ULPI */
+ v = __raw_readl(usbother_base + MXC_USBH2CTRL_OFFSET);
+ if (flags & MXC_EHCI_WAKEUP_ENABLED) {
+ /* HOST1 wakeup/ULPI intr enable */
+ v |= (MXC_H2_UCTRL_H2WIE_BIT | MXC_H2_UCTRL_H2UIE_BIT);
+ } else {
+ /* HOST1 wakeup/ULPI intr disable */
+ v &= ~(MXC_H2_UCTRL_H2WIE_BIT | MXC_H2_UCTRL_H2UIE_BIT);
+ }
+
+ if (flags & MXC_EHCI_POWER_PINS_ENABLED)
+ v &= ~MXC_H2_UCTRL_H2PM_BIT; /* HOST2 power mask used*/
+ else
+ v |= MXC_H2_UCTRL_H2PM_BIT; /* HOST2 power mask used*/
+ __raw_writel(v, usbother_base + MXC_USBH2CTRL_OFFSET);
+ break;
+ }
+
+error:
+ iounmap(usb_base);
+ return ret;
+}
+
diff --git a/arch/arm/mach-mx5/mm-mx50.c b/arch/arm/mach-mx5/mm-mx50.c
index 8c6540e58390..69b34269dae5 100644
--- a/arch/arm/mach-mx5/mm-mx50.c
+++ b/arch/arm/mach-mx5/mm-mx50.c
@@ -44,10 +44,14 @@ static struct map_desc mx50_io_desc[] __initdata = {
*/
void __init mx50_map_io(void)
{
+ iotable_init(mx50_io_desc, ARRAY_SIZE(mx50_io_desc));
+}
+
+void __init imx50_init_early(void)
+{
mxc_set_cpu_type(MXC_CPU_MX50);
mxc_iomux_v3_init(MX50_IO_ADDRESS(MX50_IOMUXC_BASE_ADDR));
mxc_arch_reset_init(MX50_IO_ADDRESS(MX50_WDOG_BASE_ADDR));
- iotable_init(mx50_io_desc, ARRAY_SIZE(mx50_io_desc));
}
int imx50_register_gpios(void);
diff --git a/arch/arm/mach-mx5/mm.c b/arch/arm/mach-mx5/mm.c
index 457f9f95204b..ff557301b42b 100644
--- a/arch/arm/mach-mx5/mm.c
+++ b/arch/arm/mach-mx5/mm.c
@@ -47,18 +47,26 @@ static struct map_desc mx53_io_desc[] __initdata = {
*/
void __init mx51_map_io(void)
{
+ iotable_init(mx51_io_desc, ARRAY_SIZE(mx51_io_desc));
+}
+
+void __init imx51_init_early(void)
+{
mxc_set_cpu_type(MXC_CPU_MX51);
mxc_iomux_v3_init(MX51_IO_ADDRESS(MX51_IOMUXC_BASE_ADDR));
mxc_arch_reset_init(MX51_IO_ADDRESS(MX51_WDOG1_BASE_ADDR));
- iotable_init(mx51_io_desc, ARRAY_SIZE(mx51_io_desc));
}
void __init mx53_map_io(void)
{
+ iotable_init(mx53_io_desc, ARRAY_SIZE(mx53_io_desc));
+}
+
+void __init imx53_init_early(void)
+{
mxc_set_cpu_type(MXC_CPU_MX53);
mxc_iomux_v3_init(MX53_IO_ADDRESS(MX53_IOMUXC_BASE_ADDR));
- mxc_arch_reset_init(MX53_IO_ADDRESS(MX53_WDOG_BASE_ADDR));
- iotable_init(mx53_io_desc, ARRAY_SIZE(mx53_io_desc));
+ mxc_arch_reset_init(MX53_IO_ADDRESS(MX53_WDOG1_BASE_ADDR));
}
int imx51_register_gpios(void);
diff --git a/arch/arm/mach-mx5/mx51_efika.c b/arch/arm/mach-mx5/mx51_efika.c
new file mode 100644
index 000000000000..74991c913bf6
--- /dev/null
+++ b/arch/arm/mach-mx5/mx51_efika.c
@@ -0,0 +1,636 @@
+/*
+ * based on code from the following
+ * Copyright 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2009-2010 Pegatron Corporation. All Rights Reserved.
+ * Copyright 2009-2010 Genesi USA, Inc. All Rights Reserved.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/leds.h>
+#include <linux/input.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/fsl_devices.h>
+#include <linux/spi/flash.h>
+#include <linux/spi/spi.h>
+#include <linux/mfd/mc13892.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/consumer.h>
+
+#include <mach/common.h>
+#include <mach/hardware.h>
+#include <mach/iomux-mx51.h>
+#include <mach/i2c.h>
+#include <mach/mxc_ehci.h>
+
+#include <linux/usb/otg.h>
+#include <linux/usb/ulpi.h>
+#include <mach/ulpi.h>
+
+#include <asm/irq.h>
+#include <asm/setup.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/time.h>
+#include <asm/mach-types.h>
+
+#include "devices-imx51.h"
+#include "devices.h"
+#include "efika.h"
+#include "cpu_op-mx51.h"
+
+#define MX51_USB_CTRL_1_OFFSET 0x10
+#define MX51_USB_CTRL_UH1_EXT_CLK_EN (1 << 25)
+#define MX51_USB_PLL_DIV_19_2_MHZ 0x01
+
+#define EFIKAMX_USB_HUB_RESET IMX_GPIO_NR(1, 5)
+#define EFIKAMX_USBH1_STP IMX_GPIO_NR(1, 27)
+
+#define EFIKAMX_SPI_CS0 IMX_GPIO_NR(4, 24)
+#define EFIKAMX_SPI_CS1 IMX_GPIO_NR(4, 25)
+
+#define EFIKAMX_PMIC IMX_GPIO_NR(1, 6)
+
+static iomux_v3_cfg_t mx51efika_pads[] = {
+ /* UART1 */
+ MX51_PAD_UART1_RXD__UART1_RXD,
+ MX51_PAD_UART1_TXD__UART1_TXD,
+ MX51_PAD_UART1_RTS__UART1_RTS,
+ MX51_PAD_UART1_CTS__UART1_CTS,
+
+ /* SD 1 */
+ MX51_PAD_SD1_CMD__SD1_CMD,
+ MX51_PAD_SD1_CLK__SD1_CLK,
+ MX51_PAD_SD1_DATA0__SD1_DATA0,
+ MX51_PAD_SD1_DATA1__SD1_DATA1,
+ MX51_PAD_SD1_DATA2__SD1_DATA2,
+ MX51_PAD_SD1_DATA3__SD1_DATA3,
+
+ /* SD 2 */
+ MX51_PAD_SD2_CMD__SD2_CMD,
+ MX51_PAD_SD2_CLK__SD2_CLK,
+ MX51_PAD_SD2_DATA0__SD2_DATA0,
+ MX51_PAD_SD2_DATA1__SD2_DATA1,
+ MX51_PAD_SD2_DATA2__SD2_DATA2,
+ MX51_PAD_SD2_DATA3__SD2_DATA3,
+
+ /* SD/MMC WP/CD */
+ MX51_PAD_GPIO1_0__SD1_CD,
+ MX51_PAD_GPIO1_1__SD1_WP,
+ MX51_PAD_GPIO1_7__SD2_WP,
+ MX51_PAD_GPIO1_8__SD2_CD,
+
+ /* spi */
+ MX51_PAD_CSPI1_MOSI__ECSPI1_MOSI,
+ MX51_PAD_CSPI1_MISO__ECSPI1_MISO,
+ MX51_PAD_CSPI1_SS0__GPIO4_24,
+ MX51_PAD_CSPI1_SS1__GPIO4_25,
+ MX51_PAD_CSPI1_RDY__ECSPI1_RDY,
+ MX51_PAD_CSPI1_SCLK__ECSPI1_SCLK,
+ MX51_PAD_GPIO1_6__GPIO1_6,
+
+ /* USB HOST1 */
+ MX51_PAD_USBH1_CLK__USBH1_CLK,
+ MX51_PAD_USBH1_DIR__USBH1_DIR,
+ MX51_PAD_USBH1_NXT__USBH1_NXT,
+ MX51_PAD_USBH1_DATA0__USBH1_DATA0,
+ MX51_PAD_USBH1_DATA1__USBH1_DATA1,
+ MX51_PAD_USBH1_DATA2__USBH1_DATA2,
+ MX51_PAD_USBH1_DATA3__USBH1_DATA3,
+ MX51_PAD_USBH1_DATA4__USBH1_DATA4,
+ MX51_PAD_USBH1_DATA5__USBH1_DATA5,
+ MX51_PAD_USBH1_DATA6__USBH1_DATA6,
+ MX51_PAD_USBH1_DATA7__USBH1_DATA7,
+
+ /* USB HUB RESET */
+ MX51_PAD_GPIO1_5__GPIO1_5,
+
+ /* WLAN */
+ MX51_PAD_EIM_A22__GPIO2_16,
+ MX51_PAD_EIM_A16__GPIO2_10,
+
+ /* USB PHY RESET */
+ MX51_PAD_EIM_D27__GPIO2_9,
+};
+
+/* Serial ports */
+static const struct imxuart_platform_data uart_pdata = {
+ .flags = IMXUART_HAVE_RTSCTS,
+};
+
+/* This function is board specific as the bit mask for the plldiv will also
+ * be different for other Freescale SoCs, thus a common bitmask is not
+ * possible and cannot get place in /plat-mxc/ehci.c.
+ */
+static int initialize_otg_port(struct platform_device *pdev)
+{
+ u32 v;
+ void __iomem *usb_base;
+ void __iomem *usbother_base;
+ usb_base = ioremap(MX51_OTG_BASE_ADDR, SZ_4K);
+ if (!usb_base)
+ return -ENOMEM;
+ usbother_base = (void __iomem *)(usb_base + MX5_USBOTHER_REGS_OFFSET);
+
+ /* Set the PHY clock to 19.2MHz */
+ v = __raw_readl(usbother_base + MXC_USB_PHY_CTR_FUNC2_OFFSET);
+ v &= ~MX5_USB_UTMI_PHYCTRL1_PLLDIV_MASK;
+ v |= MX51_USB_PLL_DIV_19_2_MHZ;
+ __raw_writel(v, usbother_base + MXC_USB_PHY_CTR_FUNC2_OFFSET);
+ iounmap(usb_base);
+
+ mdelay(10);
+
+ return mx51_initialize_usb_hw(pdev->id, MXC_EHCI_INTERNAL_PHY);
+}
+
+static struct mxc_usbh_platform_data dr_utmi_config = {
+ .init = initialize_otg_port,
+ .portsc = MXC_EHCI_UTMI_16BIT,
+};
+
+static int initialize_usbh1_port(struct platform_device *pdev)
+{
+ iomux_v3_cfg_t usbh1stp = MX51_PAD_USBH1_STP__USBH1_STP;
+ iomux_v3_cfg_t usbh1gpio = MX51_PAD_USBH1_STP__GPIO1_27;
+ u32 v;
+ void __iomem *usb_base;
+ void __iomem *socregs_base;
+
+ mxc_iomux_v3_setup_pad(usbh1gpio);
+ gpio_request(EFIKAMX_USBH1_STP, "usbh1_stp");
+ gpio_direction_output(EFIKAMX_USBH1_STP, 0);
+ msleep(1);
+ gpio_set_value(EFIKAMX_USBH1_STP, 1);
+ msleep(1);
+
+ usb_base = ioremap(MX51_OTG_BASE_ADDR, SZ_4K);
+ socregs_base = (void __iomem *)(usb_base + MX5_USBOTHER_REGS_OFFSET);
+
+ /* The clock for the USBH1 ULPI port will come externally */
+ /* from the PHY. */
+ v = __raw_readl(socregs_base + MX51_USB_CTRL_1_OFFSET);
+ __raw_writel(v | MX51_USB_CTRL_UH1_EXT_CLK_EN,
+ socregs_base + MX51_USB_CTRL_1_OFFSET);
+
+ iounmap(usb_base);
+
+ gpio_free(EFIKAMX_USBH1_STP);
+ mxc_iomux_v3_setup_pad(usbh1stp);
+
+ mdelay(10);
+
+ return mx51_initialize_usb_hw(0, MXC_EHCI_ITC_NO_THRESHOLD);
+}
+
+static struct mxc_usbh_platform_data usbh1_config = {
+ .init = initialize_usbh1_port,
+ .portsc = MXC_EHCI_MODE_ULPI,
+};
+
+static void mx51_efika_hubreset(void)
+{
+ gpio_request(EFIKAMX_USB_HUB_RESET, "usb_hub_rst");
+ gpio_direction_output(EFIKAMX_USB_HUB_RESET, 1);
+ msleep(1);
+ gpio_set_value(EFIKAMX_USB_HUB_RESET, 0);
+ msleep(1);
+ gpio_set_value(EFIKAMX_USB_HUB_RESET, 1);
+}
+
+static void __init mx51_efika_usb(void)
+{
+ mx51_efika_hubreset();
+
+ /* pulling it low, means no USB at all... */
+ gpio_request(EFIKA_USB_PHY_RESET, "usb_phy_reset");
+ gpio_direction_output(EFIKA_USB_PHY_RESET, 0);
+ msleep(1);
+ gpio_set_value(EFIKA_USB_PHY_RESET, 1);
+
+ usbh1_config.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
+ ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT |
+ ULPI_OTG_EXTVBUSIND);
+
+ mxc_register_device(&mxc_usbdr_host_device, &dr_utmi_config);
+ mxc_register_device(&mxc_usbh1_device, &usbh1_config);
+}
+
+static struct mtd_partition mx51_efika_spi_nor_partitions[] = {
+ {
+ .name = "u-boot",
+ .offset = 0,
+ .size = SZ_256K,
+ },
+ {
+ .name = "config",
+ .offset = MTDPART_OFS_APPEND,
+ .size = SZ_64K,
+ },
+};
+
+static struct flash_platform_data mx51_efika_spi_flash_data = {
+ .name = "spi_flash",
+ .parts = mx51_efika_spi_nor_partitions,
+ .nr_parts = ARRAY_SIZE(mx51_efika_spi_nor_partitions),
+ .type = "sst25vf032b",
+};
+
+static struct regulator_consumer_supply sw1_consumers[] = {
+ {
+ .supply = "cpu_vcc",
+ }
+};
+
+static struct regulator_consumer_supply vdig_consumers[] = {
+ /* sgtl5000 */
+ REGULATOR_SUPPLY("VDDA", "1-000a"),
+ REGULATOR_SUPPLY("VDDD", "1-000a"),
+};
+
+static struct regulator_consumer_supply vvideo_consumers[] = {
+ /* sgtl5000 */
+ REGULATOR_SUPPLY("VDDIO", "1-000a"),
+};
+
+static struct regulator_consumer_supply vsd_consumers[] = {
+ REGULATOR_SUPPLY("vmmc", "sdhci-esdhc-imx.0"),
+ REGULATOR_SUPPLY("vmmc", "sdhci-esdhc-imx.1"),
+};
+
+static struct regulator_consumer_supply pwgt1_consumer[] = {
+ {
+ .supply = "pwgt1",
+ }
+};
+
+static struct regulator_consumer_supply pwgt2_consumer[] = {
+ {
+ .supply = "pwgt2",
+ }
+};
+
+static struct regulator_consumer_supply coincell_consumer[] = {
+ {
+ .supply = "coincell",
+ }
+};
+
+static struct regulator_init_data sw1_init = {
+ .constraints = {
+ .name = "SW1",
+ .min_uV = 600000,
+ .max_uV = 1375000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
+ .valid_modes_mask = 0,
+ .always_on = 1,
+ .boot_on = 1,
+ .state_mem = {
+ .uV = 850000,
+ .mode = REGULATOR_MODE_NORMAL,
+ .enabled = 1,
+ },
+ },
+ .num_consumer_supplies = ARRAY_SIZE(sw1_consumers),
+ .consumer_supplies = sw1_consumers,
+};
+
+static struct regulator_init_data sw2_init = {
+ .constraints = {
+ .name = "SW2",
+ .min_uV = 900000,
+ .max_uV = 1850000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
+ .always_on = 1,
+ .boot_on = 1,
+ .state_mem = {
+ .uV = 950000,
+ .mode = REGULATOR_MODE_NORMAL,
+ .enabled = 1,
+ },
+ }
+};
+
+static struct regulator_init_data sw3_init = {
+ .constraints = {
+ .name = "SW3",
+ .min_uV = 1100000,
+ .max_uV = 1850000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
+ .always_on = 1,
+ .boot_on = 1,
+ }
+};
+
+static struct regulator_init_data sw4_init = {
+ .constraints = {
+ .name = "SW4",
+ .min_uV = 1100000,
+ .max_uV = 1850000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
+ .always_on = 1,
+ .boot_on = 1,
+ }
+};
+
+static struct regulator_init_data viohi_init = {
+ .constraints = {
+ .name = "VIOHI",
+ .boot_on = 1,
+ .always_on = 1,
+ }
+};
+
+static struct regulator_init_data vusb_init = {
+ .constraints = {
+ .name = "VUSB",
+ .boot_on = 1,
+ .always_on = 1,
+ }
+};
+
+static struct regulator_init_data swbst_init = {
+ .constraints = {
+ .name = "SWBST",
+ }
+};
+
+static struct regulator_init_data vdig_init = {
+ .constraints = {
+ .name = "VDIG",
+ .min_uV = 1050000,
+ .max_uV = 1800000,
+ .valid_ops_mask =
+ REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_STATUS,
+ .boot_on = 1,
+ .always_on = 1,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(vdig_consumers),
+ .consumer_supplies = vdig_consumers,
+};
+
+static struct regulator_init_data vpll_init = {
+ .constraints = {
+ .name = "VPLL",
+ .min_uV = 1050000,
+ .max_uV = 1800000,
+ .valid_ops_mask =
+ REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_STATUS,
+ .boot_on = 1,
+ .always_on = 1,
+ }
+};
+
+static struct regulator_init_data vusb2_init = {
+ .constraints = {
+ .name = "VUSB2",
+ .min_uV = 2400000,
+ .max_uV = 2775000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
+ .boot_on = 1,
+ .always_on = 1,
+ }
+};
+
+static struct regulator_init_data vvideo_init = {
+ .constraints = {
+ .name = "VVIDEO",
+ .min_uV = 2775000,
+ .max_uV = 2775000,
+ .valid_ops_mask =
+ REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_STATUS,
+ .boot_on = 1,
+ .apply_uV = 1,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(vvideo_consumers),
+ .consumer_supplies = vvideo_consumers,
+};
+
+static struct regulator_init_data vaudio_init = {
+ .constraints = {
+ .name = "VAUDIO",
+ .min_uV = 2300000,
+ .max_uV = 3000000,
+ .valid_ops_mask =
+ REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_STATUS,
+ .boot_on = 1,
+ }
+};
+
+static struct regulator_init_data vsd_init = {
+ .constraints = {
+ .name = "VSD",
+ .min_uV = 1800000,
+ .max_uV = 3150000,
+ .valid_ops_mask =
+ REGULATOR_CHANGE_VOLTAGE,
+ .boot_on = 1,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(vsd_consumers),
+ .consumer_supplies = vsd_consumers,
+};
+
+static struct regulator_init_data vcam_init = {
+ .constraints = {
+ .name = "VCAM",
+ .min_uV = 2500000,
+ .max_uV = 3000000,
+ .valid_ops_mask =
+ REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS,
+ .valid_modes_mask = REGULATOR_MODE_FAST | REGULATOR_MODE_NORMAL,
+ .boot_on = 1,
+ }
+};
+
+static struct regulator_init_data vgen1_init = {
+ .constraints = {
+ .name = "VGEN1",
+ .min_uV = 1200000,
+ .max_uV = 3150000,
+ .valid_ops_mask =
+ REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_STATUS,
+ .boot_on = 1,
+ .always_on = 1,
+ }
+};
+
+static struct regulator_init_data vgen2_init = {
+ .constraints = {
+ .name = "VGEN2",
+ .min_uV = 1200000,
+ .max_uV = 3150000,
+ .valid_ops_mask =
+ REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_STATUS,
+ .boot_on = 1,
+ .always_on = 1,
+ }
+};
+
+static struct regulator_init_data vgen3_init = {
+ .constraints = {
+ .name = "VGEN3",
+ .min_uV = 1800000,
+ .max_uV = 2900000,
+ .valid_ops_mask =
+ REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_STATUS,
+ .boot_on = 1,
+ .always_on = 1,
+ }
+};
+
+static struct regulator_init_data gpo1_init = {
+ .constraints = {
+ .name = "GPO1",
+ }
+};
+
+static struct regulator_init_data gpo2_init = {
+ .constraints = {
+ .name = "GPO2",
+ }
+};
+
+static struct regulator_init_data gpo3_init = {
+ .constraints = {
+ .name = "GPO3",
+ }
+};
+
+static struct regulator_init_data gpo4_init = {
+ .constraints = {
+ .name = "GPO4",
+ }
+};
+
+static struct regulator_init_data pwgt1_init = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ .boot_on = 1,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(pwgt1_consumer),
+ .consumer_supplies = pwgt1_consumer,
+};
+
+static struct regulator_init_data pwgt2_init = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ .boot_on = 1,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(pwgt2_consumer),
+ .consumer_supplies = pwgt2_consumer,
+};
+
+static struct regulator_init_data vcoincell_init = {
+ .constraints = {
+ .name = "COINCELL",
+ .min_uV = 3000000,
+ .max_uV = 3000000,
+ .valid_ops_mask =
+ REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(coincell_consumer),
+ .consumer_supplies = coincell_consumer,
+};
+
+static struct mc13xxx_regulator_init_data mx51_efika_regulators[] = {
+ { .id = MC13892_SW1, .init_data = &sw1_init },
+ { .id = MC13892_SW2, .init_data = &sw2_init },
+ { .id = MC13892_SW3, .init_data = &sw3_init },
+ { .id = MC13892_SW4, .init_data = &sw4_init },
+ { .id = MC13892_SWBST, .init_data = &swbst_init },
+ { .id = MC13892_VIOHI, .init_data = &viohi_init },
+ { .id = MC13892_VPLL, .init_data = &vpll_init },
+ { .id = MC13892_VDIG, .init_data = &vdig_init },
+ { .id = MC13892_VSD, .init_data = &vsd_init },
+ { .id = MC13892_VUSB2, .init_data = &vusb2_init },
+ { .id = MC13892_VVIDEO, .init_data = &vvideo_init },
+ { .id = MC13892_VAUDIO, .init_data = &vaudio_init },
+ { .id = MC13892_VCAM, .init_data = &vcam_init },
+ { .id = MC13892_VGEN1, .init_data = &vgen1_init },
+ { .id = MC13892_VGEN2, .init_data = &vgen2_init },
+ { .id = MC13892_VGEN3, .init_data = &vgen3_init },
+ { .id = MC13892_VUSB, .init_data = &vusb_init },
+ { .id = MC13892_GPO1, .init_data = &gpo1_init },
+ { .id = MC13892_GPO2, .init_data = &gpo2_init },
+ { .id = MC13892_GPO3, .init_data = &gpo3_init },
+ { .id = MC13892_GPO4, .init_data = &gpo4_init },
+ { .id = MC13892_PWGT1SPI, .init_data = &pwgt1_init },
+ { .id = MC13892_PWGT2SPI, .init_data = &pwgt2_init },
+ { .id = MC13892_VCOINCELL, .init_data = &vcoincell_init },
+};
+
+static struct mc13xxx_platform_data mx51_efika_mc13892_data = {
+ .flags = MC13XXX_USE_RTC | MC13XXX_USE_REGULATOR,
+ .num_regulators = ARRAY_SIZE(mx51_efika_regulators),
+ .regulators = mx51_efika_regulators,
+};
+
+static struct spi_board_info mx51_efika_spi_board_info[] __initdata = {
+ {
+ .modalias = "m25p80",
+ .max_speed_hz = 25000000,
+ .bus_num = 0,
+ .chip_select = 1,
+ .platform_data = &mx51_efika_spi_flash_data,
+ .irq = -1,
+ },
+ {
+ .modalias = "mc13892",
+ .max_speed_hz = 1000000,
+ .bus_num = 0,
+ .chip_select = 0,
+ .platform_data = &mx51_efika_mc13892_data,
+ .irq = gpio_to_irq(EFIKAMX_PMIC),
+ },
+};
+
+static int mx51_efika_spi_cs[] = {
+ EFIKAMX_SPI_CS0,
+ EFIKAMX_SPI_CS1,
+};
+
+static const struct spi_imx_master mx51_efika_spi_pdata __initconst = {
+ .chipselect = mx51_efika_spi_cs,
+ .num_chipselect = ARRAY_SIZE(mx51_efika_spi_cs),
+};
+
+void __init efika_board_common_init(void)
+{
+ mxc_iomux_v3_setup_multiple_pads(mx51efika_pads,
+ ARRAY_SIZE(mx51efika_pads));
+ imx51_add_imx_uart(0, &uart_pdata);
+ mx51_efika_usb();
+ imx51_add_sdhci_esdhc_imx(0, NULL);
+
+ /* FIXME: comes from original code. check this. */
+ if (mx51_revision() < IMX_CHIP_REVISION_2_0)
+ sw2_init.constraints.state_mem.uV = 1100000;
+ else if (mx51_revision() == IMX_CHIP_REVISION_2_0) {
+ sw2_init.constraints.state_mem.uV = 1250000;
+ sw1_init.constraints.state_mem.uV = 1000000;
+ }
+ if (machine_is_mx51_efikasb())
+ vgen1_init.constraints.max_uV = 1200000;
+
+ gpio_request(EFIKAMX_PMIC, "pmic irq");
+ gpio_direction_input(EFIKAMX_PMIC);
+ spi_register_board_info(mx51_efika_spi_board_info,
+ ARRAY_SIZE(mx51_efika_spi_board_info));
+ imx51_add_ecspi(0, &mx51_efika_spi_pdata);
+
+#if defined(CONFIG_CPU_FREQ_IMX)
+ get_cpu_op = mx51_get_cpu_op;
+#endif
+}
+
diff --git a/arch/arm/mach-mxc91231/iomux.c b/arch/arm/mach-mxc91231/iomux.c
index 405d9b19d891..66fc41cbf2ca 100644
--- a/arch/arm/mach-mxc91231/iomux.c
+++ b/arch/arm/mach-mxc91231/iomux.c
@@ -50,7 +50,7 @@ unsigned long mxc_pin_alloc_map[NB_PORTS * 32 / BITS_PER_LONG];
/*
* set the mode for a IOMUX pin.
*/
-int mxc_iomux_mode(const unsigned int pin_mode)
+int mxc_iomux_mode(unsigned int pin_mode)
{
u32 side, field, l, mode, ret = 0;
void __iomem *reg;
@@ -114,7 +114,7 @@ EXPORT_SYMBOL(mxc_iomux_set_pad);
* - reserves the pin so that it is not claimed by another driver
* - setups the iomux according to the configuration
*/
-int mxc_iomux_alloc_pin(const unsigned int pin_mode, const char *label)
+int mxc_iomux_alloc_pin(unsigned int pin_mode, const char *label)
{
unsigned pad = PIN_GLOBAL_NUM(pin_mode);
if (pad >= (PIN_MAX + 1)) {
@@ -134,10 +134,10 @@ int mxc_iomux_alloc_pin(const unsigned int pin_mode, const char *label)
}
EXPORT_SYMBOL(mxc_iomux_alloc_pin);
-int mxc_iomux_setup_multiple_pins(unsigned int *pin_list, unsigned count,
+int mxc_iomux_setup_multiple_pins(const unsigned int *pin_list, unsigned count,
const char *label)
{
- unsigned int *p = pin_list;
+ const unsigned int *p = pin_list;
int i;
int ret = -EINVAL;
@@ -155,7 +155,7 @@ setup_error:
}
EXPORT_SYMBOL(mxc_iomux_setup_multiple_pins);
-void mxc_iomux_release_pin(const unsigned int pin_mode)
+void mxc_iomux_release_pin(unsigned int pin_mode)
{
unsigned pad = PIN_GLOBAL_NUM(pin_mode);
@@ -164,9 +164,9 @@ void mxc_iomux_release_pin(const unsigned int pin_mode)
}
EXPORT_SYMBOL(mxc_iomux_release_pin);
-void mxc_iomux_release_multiple_pins(unsigned int *pin_list, int count)
+void mxc_iomux_release_multiple_pins(const unsigned int *pin_list, int count)
{
- unsigned int *p = pin_list;
+ const unsigned int *p = pin_list;
int i;
for (i = 0; i < count; i++) {
diff --git a/arch/arm/mach-mxc91231/magx-zn5.c b/arch/arm/mach-mxc91231/magx-zn5.c
index 395d83be8c98..f31a45e5a0b8 100644
--- a/arch/arm/mach-mxc91231/magx-zn5.c
+++ b/arch/arm/mach-mxc91231/magx-zn5.c
@@ -53,9 +53,10 @@ struct sys_timer zn5_timer = {
};
MACHINE_START(MAGX_ZN5, "Motorola Zn5")
- .boot_params = MXC91231_PHYS_OFFSET + 0x100,
- .map_io = mxc91231_map_io,
- .init_irq = mxc91231_init_irq,
- .timer = &zn5_timer,
- .init_machine = zn5_init,
+ .boot_params = MXC91231_PHYS_OFFSET + 0x100,
+ .map_io = mxc91231_map_io,
+ .init_early = mxc91231_init_early,
+ .init_irq = mxc91231_init_irq,
+ .timer = &zn5_timer,
+ .init_machine = zn5_init,
MACHINE_END
diff --git a/arch/arm/mach-mxc91231/mm.c b/arch/arm/mach-mxc91231/mm.c
index 7652c301da88..a77f6daf6a26 100644
--- a/arch/arm/mach-mxc91231/mm.c
+++ b/arch/arm/mach-mxc91231/mm.c
@@ -45,11 +45,14 @@ static struct map_desc mxc91231_io_desc[] __initdata = {
*/
void __init mxc91231_map_io(void)
{
- mxc_set_cpu_type(MXC_CPU_MXC91231);
-
iotable_init(mxc91231_io_desc, ARRAY_SIZE(mxc91231_io_desc));
}
+void __init mxc91231_init_early(void)
+{
+ mxc_set_cpu_type(MXC_CPU_MXC91231);
+}
+
int mxc91231_register_gpios(void);
void __init mxc91231_init_irq(void)
diff --git a/arch/arm/mach-mxs/Kconfig b/arch/arm/mach-mxs/Kconfig
index 8bfc8df54617..4f0b67345179 100644
--- a/arch/arm/mach-mxs/Kconfig
+++ b/arch/arm/mach-mxs/Kconfig
@@ -2,13 +2,18 @@ if ARCH_MXS
source "arch/arm/mach-mxs/devices/Kconfig"
+config MXS_OCOTP
+ bool
+
config SOC_IMX23
bool
select CPU_ARM926T
+ select HAVE_PWM
config SOC_IMX28
bool
select CPU_ARM926T
+ select HAVE_PWM
comment "MXS platforms:"
@@ -16,6 +21,7 @@ config MACH_MX23EVK
bool "Support MX23EVK Platform"
select SOC_IMX23
select MXS_HAVE_AMBA_DUART
+ select MXS_HAVE_PLATFORM_AUART
default y
help
Include support for MX23EVK platform. This includes specific
@@ -25,10 +31,25 @@ config MACH_MX28EVK
bool "Support MX28EVK Platform"
select SOC_IMX28
select MXS_HAVE_AMBA_DUART
+ select MXS_HAVE_PLATFORM_AUART
select MXS_HAVE_PLATFORM_FEC
+ select MXS_OCOTP
default y
help
Include support for MX28EVK platform. This includes specific
configurations for the board and its peripherals.
+config MODULE_TX28
+ bool
+ select SOC_IMX28
+ select MXS_HAVE_AMBA_DUART
+ select MXS_HAVE_PLATFORM_AUART
+ select MXS_HAVE_PLATFORM_FEC
+ select MXS_HAVE_PLATFORM_MXS_I2C
+ select MXS_HAVE_PLATFORM_MXS_PWM
+
+config MACH_TX28
+ bool "Ka-Ro TX28 module"
+ select MODULE_TX28
+
endif
diff --git a/arch/arm/mach-mxs/Makefile b/arch/arm/mach-mxs/Makefile
index 39d3f9c2a841..2f1f6141ca71 100644
--- a/arch/arm/mach-mxs/Makefile
+++ b/arch/arm/mach-mxs/Makefile
@@ -1,10 +1,15 @@
# Common support
obj-y := clock.o devices.o gpio.o icoll.o iomux.o system.o timer.o
+obj-$(CONFIG_MXS_OCOTP) += ocotp.o
+obj-$(CONFIG_PM) += pm.o
+
obj-$(CONFIG_SOC_IMX23) += clock-mx23.o mm-mx23.o
obj-$(CONFIG_SOC_IMX28) += clock-mx28.o mm-mx28.o
obj-$(CONFIG_MACH_MX23EVK) += mach-mx23evk.o
obj-$(CONFIG_MACH_MX28EVK) += mach-mx28evk.o
+obj-$(CONFIG_MODULE_TX28) += module-tx28.o
+obj-$(CONFIG_MACH_TX28) += mach-tx28.o
obj-y += devices/
diff --git a/arch/arm/mach-mxs/clock-mx23.c b/arch/arm/mach-mxs/clock-mx23.c
index ca72a05ed9c1..2415b33d79c0 100644
--- a/arch/arm/mach-mxs/clock-mx23.c
+++ b/arch/arm/mach-mxs/clock-mx23.c
@@ -442,11 +442,17 @@ static struct clk_lookup lookups[] = {
_REGISTER_CLOCK("duart", "apb_pclk", xbus_clk)
/* for amba-pl011 driver */
_REGISTER_CLOCK("duart", NULL, uart_clk)
+ _REGISTER_CLOCK("mxs-auart.0", NULL, uart_clk)
_REGISTER_CLOCK("rtc", NULL, rtc_clk)
_REGISTER_CLOCK(NULL, "hclk", hbus_clk)
_REGISTER_CLOCK(NULL, "usb", usb_clk)
_REGISTER_CLOCK(NULL, "audio", audio_clk)
- _REGISTER_CLOCK(NULL, "pwm", pwm_clk)
+ _REGISTER_CLOCK("mxs-pwm.0", NULL, pwm_clk)
+ _REGISTER_CLOCK("mxs-pwm.1", NULL, pwm_clk)
+ _REGISTER_CLOCK("mxs-pwm.2", NULL, pwm_clk)
+ _REGISTER_CLOCK("mxs-pwm.3", NULL, pwm_clk)
+ _REGISTER_CLOCK("mxs-pwm.4", NULL, pwm_clk)
+ _REGISTER_CLOCK("imx23-fb", NULL, lcdif_clk)
};
static int clk_misc_init(void)
diff --git a/arch/arm/mach-mxs/clock-mx28.c b/arch/arm/mach-mxs/clock-mx28.c
index fd1c4c54b8e5..82770568845e 100644
--- a/arch/arm/mach-mxs/clock-mx28.c
+++ b/arch/arm/mach-mxs/clock-mx28.c
@@ -609,17 +609,30 @@ static struct clk_lookup lookups[] = {
_REGISTER_CLOCK("duart", NULL, uart_clk)
_REGISTER_CLOCK("imx28-fec.0", NULL, fec_clk)
_REGISTER_CLOCK("imx28-fec.1", NULL, fec_clk)
+ _REGISTER_CLOCK("mxs-auart.0", NULL, uart_clk)
+ _REGISTER_CLOCK("mxs-auart.1", NULL, uart_clk)
+ _REGISTER_CLOCK("mxs-auart.2", NULL, uart_clk)
+ _REGISTER_CLOCK("mxs-auart.3", NULL, uart_clk)
+ _REGISTER_CLOCK("mxs-auart.4", NULL, uart_clk)
_REGISTER_CLOCK("rtc", NULL, rtc_clk)
_REGISTER_CLOCK("pll2", NULL, pll2_clk)
_REGISTER_CLOCK(NULL, "hclk", hbus_clk)
_REGISTER_CLOCK(NULL, "xclk", xbus_clk)
- _REGISTER_CLOCK(NULL, "can0", can0_clk)
- _REGISTER_CLOCK(NULL, "can1", can1_clk)
+ _REGISTER_CLOCK("flexcan.0", NULL, can0_clk)
+ _REGISTER_CLOCK("flexcan.1", NULL, can1_clk)
_REGISTER_CLOCK(NULL, "usb0", usb0_clk)
_REGISTER_CLOCK(NULL, "usb1", usb1_clk)
- _REGISTER_CLOCK(NULL, "pwm", pwm_clk)
+ _REGISTER_CLOCK("mxs-pwm.0", NULL, pwm_clk)
+ _REGISTER_CLOCK("mxs-pwm.1", NULL, pwm_clk)
+ _REGISTER_CLOCK("mxs-pwm.2", NULL, pwm_clk)
+ _REGISTER_CLOCK("mxs-pwm.3", NULL, pwm_clk)
+ _REGISTER_CLOCK("mxs-pwm.4", NULL, pwm_clk)
+ _REGISTER_CLOCK("mxs-pwm.5", NULL, pwm_clk)
+ _REGISTER_CLOCK("mxs-pwm.6", NULL, pwm_clk)
+ _REGISTER_CLOCK("mxs-pwm.7", NULL, pwm_clk)
_REGISTER_CLOCK(NULL, "lradc", lradc_clk)
_REGISTER_CLOCK(NULL, "spdif", spdif_clk)
+ _REGISTER_CLOCK("imx28-fb", NULL, lcdif_clk)
};
static int clk_misc_init(void)
diff --git a/arch/arm/mach-mxs/devices-mx23.h b/arch/arm/mach-mxs/devices-mx23.h
index 1256788561d0..c7e14f4e3669 100644
--- a/arch/arm/mach-mxs/devices-mx23.h
+++ b/arch/arm/mach-mxs/devices-mx23.h
@@ -10,7 +10,18 @@
*/
#include <mach/mx23.h>
#include <mach/devices-common.h>
+#include <mach/mxsfb.h>
extern const struct amba_device mx23_duart_device __initconst;
#define mx23_add_duart() \
mxs_add_duart(&mx23_duart_device)
+
+extern const struct mxs_auart_data mx23_auart_data[] __initconst;
+#define mx23_add_auart(id) mxs_add_auart(&mx23_auart_data[id])
+#define mx23_add_auart0() mx23_add_auart(0)
+#define mx23_add_auart1() mx23_add_auart(1)
+
+#define mx23_add_mxs_pwm(id) mxs_add_mxs_pwm(MX23_PWM_BASE_ADDR, id)
+
+struct platform_device *__init mx23_add_mxsfb(
+ const struct mxsfb_platform_data *pdata);
diff --git a/arch/arm/mach-mxs/devices-mx28.h b/arch/arm/mach-mxs/devices-mx28.h
index 33773a6333a2..9d08555c4cf0 100644
--- a/arch/arm/mach-mxs/devices-mx28.h
+++ b/arch/arm/mach-mxs/devices-mx28.h
@@ -10,11 +10,34 @@
*/
#include <mach/mx28.h>
#include <mach/devices-common.h>
+#include <mach/mxsfb.h>
extern const struct amba_device mx28_duart_device __initconst;
#define mx28_add_duart() \
mxs_add_duart(&mx28_duart_device)
+extern const struct mxs_auart_data mx28_auart_data[] __initconst;
+#define mx28_add_auart(id) mxs_add_auart(&mx28_auart_data[id])
+#define mx28_add_auart0() mx28_add_auart(0)
+#define mx28_add_auart1() mx28_add_auart(1)
+#define mx28_add_auart2() mx28_add_auart(2)
+#define mx28_add_auart3() mx28_add_auart(3)
+#define mx28_add_auart4() mx28_add_auart(4)
+
extern const struct mxs_fec_data mx28_fec_data[] __initconst;
#define mx28_add_fec(id, pdata) \
mxs_add_fec(&mx28_fec_data[id], pdata)
+
+extern const struct mxs_flexcan_data mx28_flexcan_data[] __initconst;
+#define mx28_add_flexcan(id, pdata) \
+ mxs_add_flexcan(&mx28_flexcan_data[id], pdata)
+#define mx28_add_flexcan0(pdata) mx28_add_flexcan(0, pdata)
+#define mx28_add_flexcan1(pdata) mx28_add_flexcan(1, pdata)
+
+extern const struct mxs_i2c_data mx28_mxs_i2c_data[] __initconst;
+#define mx28_add_mxs_i2c(id) mxs_add_mxs_i2c(&mx28_mxs_i2c_data[id])
+
+#define mx28_add_mxs_pwm(id) mxs_add_mxs_pwm(MX28_PWM_BASE_ADDR, id)
+
+struct platform_device *__init mx28_add_mxsfb(
+ const struct mxsfb_platform_data *pdata);
diff --git a/arch/arm/mach-mxs/devices.c b/arch/arm/mach-mxs/devices.c
index c20d54740b0b..cfdb6b284702 100644
--- a/arch/arm/mach-mxs/devices.c
+++ b/arch/arm/mach-mxs/devices.c
@@ -66,6 +66,8 @@ struct platform_device *__init mxs_add_platform_device_dmamask(
ret = platform_device_add(pdev);
if (ret) {
err:
+ if (dmamask)
+ kfree(pdev->dev.dma_mask);
platform_device_put(pdev);
return ERR_PTR(ret);
}
diff --git a/arch/arm/mach-mxs/devices/Kconfig b/arch/arm/mach-mxs/devices/Kconfig
index cf7dc1ae575b..1451ad060d82 100644
--- a/arch/arm/mach-mxs/devices/Kconfig
+++ b/arch/arm/mach-mxs/devices/Kconfig
@@ -2,5 +2,21 @@ config MXS_HAVE_AMBA_DUART
bool
select ARM_AMBA
+config MXS_HAVE_PLATFORM_AUART
+ bool
+
config MXS_HAVE_PLATFORM_FEC
bool
+
+config MXS_HAVE_PLATFORM_FLEXCAN
+ select HAVE_CAN_FLEXCAN if CAN
+ bool
+
+config MXS_HAVE_PLATFORM_MXS_I2C
+ bool
+
+config MXS_HAVE_PLATFORM_MXS_PWM
+ bool
+
+config MXS_HAVE_PLATFORM_MXSFB
+ bool
diff --git a/arch/arm/mach-mxs/devices/Makefile b/arch/arm/mach-mxs/devices/Makefile
index d0a09f6934b8..39bd77348a42 100644
--- a/arch/arm/mach-mxs/devices/Makefile
+++ b/arch/arm/mach-mxs/devices/Makefile
@@ -1,2 +1,7 @@
obj-$(CONFIG_MXS_HAVE_AMBA_DUART) += amba-duart.o
+obj-$(CONFIG_MXS_HAVE_PLATFORM_AUART) += platform-auart.o
obj-$(CONFIG_MXS_HAVE_PLATFORM_FEC) += platform-fec.o
+obj-$(CONFIG_MXS_HAVE_PLATFORM_FLEXCAN) += platform-flexcan.o
+obj-$(CONFIG_MXS_HAVE_PLATFORM_MXS_I2C) += platform-mxs-i2c.o
+obj-$(CONFIG_MXS_HAVE_PLATFORM_MXS_PWM) += platform-mxs-pwm.o
+obj-$(CONFIG_MXS_HAVE_PLATFORM_MXSFB) += platform-mxsfb.o
diff --git a/arch/arm/mach-mxs/devices/platform-auart.c b/arch/arm/mach-mxs/devices/platform-auart.c
new file mode 100644
index 000000000000..796606cce0ce
--- /dev/null
+++ b/arch/arm/mach-mxs/devices/platform-auart.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2010 Pengutronix
+ * Sascha Hauer <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+#include <asm/sizes.h>
+#include <mach/mx23.h>
+#include <mach/mx28.h>
+#include <mach/devices-common.h>
+
+#define mxs_auart_data_entry_single(soc, _id, hwid) \
+ { \
+ .id = _id, \
+ .iobase = soc ## _AUART ## hwid ## _BASE_ADDR, \
+ .irq = soc ## _INT_AUART ## hwid, \
+ }
+
+#define mxs_auart_data_entry(soc, _id, hwid) \
+ [_id] = mxs_auart_data_entry_single(soc, _id, hwid)
+
+#ifdef CONFIG_SOC_IMX23
+const struct mxs_auart_data mx23_auart_data[] __initconst = {
+#define mx23_auart_data_entry(_id, hwid) \
+ mxs_auart_data_entry(MX23, _id, hwid)
+ mx23_auart_data_entry(0, 1),
+ mx23_auart_data_entry(1, 2),
+};
+#endif
+
+#ifdef CONFIG_SOC_IMX28
+const struct mxs_auart_data mx28_auart_data[] __initconst = {
+#define mx28_auart_data_entry(_id) \
+ mxs_auart_data_entry(MX28, _id, _id)
+ mx28_auart_data_entry(0),
+ mx28_auart_data_entry(1),
+ mx28_auart_data_entry(2),
+ mx28_auart_data_entry(3),
+ mx28_auart_data_entry(4),
+};
+#endif
+
+struct platform_device *__init mxs_add_auart(
+ const struct mxs_auart_data *data)
+{
+ struct resource res[] = {
+ {
+ .start = data->iobase,
+ .end = data->iobase + SZ_8K - 1,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = data->irq,
+ .end = data->irq,
+ .flags = IORESOURCE_IRQ,
+ },
+ };
+
+ return mxs_add_platform_device_dmamask("mxs-auart", data->id,
+ res, ARRAY_SIZE(res), NULL, 0,
+ DMA_BIT_MASK(32));
+}
+
diff --git a/arch/arm/mach-mxs/devices/platform-fec.c b/arch/arm/mach-mxs/devices/platform-fec.c
index c42dff72b46c..9859cf283335 100644
--- a/arch/arm/mach-mxs/devices/platform-fec.c
+++ b/arch/arm/mach-mxs/devices/platform-fec.c
@@ -45,6 +45,7 @@ struct platform_device *__init mxs_add_fec(
},
};
- return mxs_add_platform_device("imx28-fec", data->id,
- res, ARRAY_SIZE(res), pdata, sizeof(*pdata));
+ return mxs_add_platform_device_dmamask("imx28-fec", data->id,
+ res, ARRAY_SIZE(res), pdata, sizeof(*pdata),
+ DMA_BIT_MASK(32));
}
diff --git a/arch/arm/mach-mxs/devices/platform-flexcan.c b/arch/arm/mach-mxs/devices/platform-flexcan.c
new file mode 100644
index 000000000000..43a6b4bae6fe
--- /dev/null
+++ b/arch/arm/mach-mxs/devices/platform-flexcan.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2010, 2011 Pengutronix,
+ * Marc Kleine-Budde <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+#include <asm/sizes.h>
+#include <mach/mx28.h>
+#include <mach/devices-common.h>
+
+#define mxs_flexcan_data_entry_single(soc, _id, _hwid, _size) \
+ { \
+ .id = _id, \
+ .iobase = soc ## _CAN ## _hwid ## _BASE_ADDR, \
+ .iosize = _size, \
+ .irq = soc ## _INT_CAN ## _hwid, \
+ }
+
+#define mxs_flexcan_data_entry(soc, _id, _hwid, _size) \
+ [_id] = mxs_flexcan_data_entry_single(soc, _id, _hwid, _size)
+
+#ifdef CONFIG_SOC_IMX28
+const struct mxs_flexcan_data mx28_flexcan_data[] __initconst = {
+#define mx28_flexcan_data_entry(_id, _hwid) \
+ mxs_flexcan_data_entry_single(MX28, _id, _hwid, SZ_8K)
+ mx28_flexcan_data_entry(0, 0),
+ mx28_flexcan_data_entry(1, 1),
+};
+#endif /* ifdef CONFIG_SOC_IMX28 */
+
+struct platform_device *__init mxs_add_flexcan(
+ const struct mxs_flexcan_data *data,
+ const struct flexcan_platform_data *pdata)
+{
+ struct resource res[] = {
+ {
+ .start = data->iobase,
+ .end = data->iobase + data->iosize - 1,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = data->irq,
+ .end = data->irq,
+ .flags = IORESOURCE_IRQ,
+ },
+ };
+
+ return mxs_add_platform_device("flexcan", data->id,
+ res, ARRAY_SIZE(res), pdata, sizeof(*pdata));
+}
diff --git a/arch/arm/mach-mxs/devices/platform-mxs-i2c.c b/arch/arm/mach-mxs/devices/platform-mxs-i2c.c
new file mode 100644
index 000000000000..eab3a06836d6
--- /dev/null
+++ b/arch/arm/mach-mxs/devices/platform-mxs-i2c.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2011 Pengutronix
+ * Wolfram Sang <w.sang@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+#include <asm/sizes.h>
+#include <mach/mx28.h>
+#include <mach/devices-common.h>
+
+#define mxs_i2c_data_entry_single(soc, _id) \
+ { \
+ .id = _id, \
+ .iobase = soc ## _I2C ## _id ## _BASE_ADDR, \
+ .errirq = soc ## _INT_I2C ## _id ## _ERROR, \
+ .dmairq = soc ## _INT_I2C ## _id ## _DMA, \
+ }
+
+#define mxs_i2c_data_entry(soc, _id) \
+ [_id] = mxs_i2c_data_entry_single(soc, _id)
+
+#ifdef CONFIG_SOC_IMX28
+const struct mxs_i2c_data mx28_mxs_i2c_data[] __initconst = {
+ mxs_i2c_data_entry(MX28, 0),
+ mxs_i2c_data_entry(MX28, 1),
+};
+#endif
+
+struct platform_device *__init mxs_add_mxs_i2c(const struct mxs_i2c_data *data)
+{
+ struct resource res[] = {
+ {
+ .start = data->iobase,
+ .end = data->iobase + SZ_8K - 1,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = data->errirq,
+ .end = data->errirq,
+ .flags = IORESOURCE_IRQ,
+ }, {
+ .start = data->dmairq,
+ .end = data->dmairq,
+ .flags = IORESOURCE_IRQ,
+ },
+ };
+
+ return mxs_add_platform_device("mxs-i2c", data->id, res,
+ ARRAY_SIZE(res), NULL, 0);
+}
diff --git a/arch/arm/mach-mxs/devices/platform-mxs-pwm.c b/arch/arm/mach-mxs/devices/platform-mxs-pwm.c
new file mode 100644
index 000000000000..680f5a902936
--- /dev/null
+++ b/arch/arm/mach-mxs/devices/platform-mxs-pwm.c
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2010 Pengutronix
+ * Sascha Hauer <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+#include <asm/sizes.h>
+#include <mach/devices-common.h>
+
+struct platform_device *__init mxs_add_mxs_pwm(resource_size_t iobase, int id)
+{
+ struct resource res = {
+ .flags = IORESOURCE_MEM,
+ };
+
+ res.start = iobase + 0x10 + 0x20 * id;
+ res.end = res.start + 0x1f;
+
+ return mxs_add_platform_device("mxs-pwm", id, &res, 1, NULL, 0);
+}
diff --git a/arch/arm/mach-mxs/devices/platform-mxsfb.c b/arch/arm/mach-mxs/devices/platform-mxsfb.c
new file mode 100644
index 000000000000..bf72c9b8dbdd
--- /dev/null
+++ b/arch/arm/mach-mxs/devices/platform-mxsfb.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2011 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+#include <asm/sizes.h>
+#include <mach/mx23.h>
+#include <mach/mx28.h>
+#include <mach/devices-common.h>
+#include <mach/mxsfb.h>
+
+#ifdef CONFIG_SOC_IMX23
+struct platform_device *__init mx23_add_mxsfb(
+ const struct mxsfb_platform_data *pdata)
+{
+ struct resource res[] = {
+ {
+ .start = MX23_LCDIF_BASE_ADDR,
+ .end = MX23_LCDIF_BASE_ADDR + SZ_8K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ };
+
+ return mxs_add_platform_device_dmamask("imx23-fb", -1,
+ res, ARRAY_SIZE(res), pdata, sizeof(*pdata), DMA_BIT_MASK(32));
+}
+#endif /* ifdef CONFIG_SOC_IMX23 */
+
+#ifdef CONFIG_SOC_IMX28
+struct platform_device *__init mx28_add_mxsfb(
+ const struct mxsfb_platform_data *pdata)
+{
+ struct resource res[] = {
+ {
+ .start = MX28_LCDIF_BASE_ADDR,
+ .end = MX28_LCDIF_BASE_ADDR + SZ_8K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ };
+
+ return mxs_add_platform_device_dmamask("imx28-fb", -1,
+ res, ARRAY_SIZE(res), pdata, sizeof(*pdata), DMA_BIT_MASK(32));
+}
+#endif /* ifdef CONFIG_SOC_IMX28 */
diff --git a/arch/arm/mach-mxs/gpio.c b/arch/arm/mach-mxs/gpio.c
index cb0c0e83a527..d51c31aa6cdf 100644
--- a/arch/arm/mach-mxs/gpio.c
+++ b/arch/arm/mach-mxs/gpio.c
@@ -68,29 +68,29 @@ static void set_gpio_irqenable(struct mxs_gpio_port *port, u32 index,
}
}
-static void mxs_gpio_ack_irq(u32 irq)
+static void mxs_gpio_ack_irq(struct irq_data *d)
{
- u32 gpio = irq_to_gpio(irq);
+ u32 gpio = irq_to_gpio(d->irq);
clear_gpio_irqstatus(&mxs_gpio_ports[gpio / 32], gpio & 0x1f);
}
-static void mxs_gpio_mask_irq(u32 irq)
+static void mxs_gpio_mask_irq(struct irq_data *d)
{
- u32 gpio = irq_to_gpio(irq);
+ u32 gpio = irq_to_gpio(d->irq);
set_gpio_irqenable(&mxs_gpio_ports[gpio / 32], gpio & 0x1f, 0);
}
-static void mxs_gpio_unmask_irq(u32 irq)
+static void mxs_gpio_unmask_irq(struct irq_data *d)
{
- u32 gpio = irq_to_gpio(irq);
+ u32 gpio = irq_to_gpio(d->irq);
set_gpio_irqenable(&mxs_gpio_ports[gpio / 32], gpio & 0x1f, 1);
}
static int mxs_gpio_get(struct gpio_chip *chip, unsigned offset);
-static int mxs_gpio_set_irq_type(u32 irq, u32 type)
+static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
{
- u32 gpio = irq_to_gpio(irq);
+ u32 gpio = irq_to_gpio(d->irq);
u32 pin_mask = 1 << (gpio & 31);
struct mxs_gpio_port *port = &mxs_gpio_ports[gpio / 32];
void __iomem *pin_addr;
@@ -160,9 +160,9 @@ static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc)
* @param enable enable as wake-up if equal to non-zero
* @return This function returns 0 on success.
*/
-static int mxs_gpio_set_wake_irq(u32 irq, u32 enable)
+static int mxs_gpio_set_wake_irq(struct irq_data *d, unsigned int enable)
{
- u32 gpio = irq_to_gpio(irq);
+ u32 gpio = irq_to_gpio(d->irq);
u32 gpio_idx = gpio & 0x1f;
struct mxs_gpio_port *port = &mxs_gpio_ports[gpio / 32];
@@ -182,11 +182,11 @@ static int mxs_gpio_set_wake_irq(u32 irq, u32 enable)
}
static struct irq_chip gpio_irq_chip = {
- .ack = mxs_gpio_ack_irq,
- .mask = mxs_gpio_mask_irq,
- .unmask = mxs_gpio_unmask_irq,
- .set_type = mxs_gpio_set_irq_type,
- .set_wake = mxs_gpio_set_wake_irq,
+ .irq_ack = mxs_gpio_ack_irq,
+ .irq_mask = mxs_gpio_mask_irq,
+ .irq_unmask = mxs_gpio_unmask_irq,
+ .irq_set_type = mxs_gpio_set_irq_type,
+ .irq_set_wake = mxs_gpio_set_wake_irq,
};
static void mxs_set_gpio_direction(struct gpio_chip *chip, unsigned offset,
@@ -289,39 +289,42 @@ int __init mxs_gpio_init(struct mxs_gpio_port *port, int cnt)
return 0;
}
-#define DEFINE_MXS_GPIO_PORT(soc, _id) \
+#define MX23_GPIO_BASE MX23_IO_ADDRESS(MX23_PINCTRL_BASE_ADDR)
+#define MX28_GPIO_BASE MX28_IO_ADDRESS(MX28_PINCTRL_BASE_ADDR)
+
+#define DEFINE_MXS_GPIO_PORT(_base, _irq, _id) \
{ \
.chip.label = "gpio-" #_id, \
.id = _id, \
- .irq = soc ## _INT_GPIO ## _id, \
- .base = soc ## _IO_ADDRESS( \
- soc ## _PINCTRL ## _BASE_ADDR), \
+ .irq = _irq, \
+ .base = _base, \
.virtual_irq_start = MXS_GPIO_IRQ_START + (_id) * 32, \
}
-#define DEFINE_REGISTER_FUNCTION(prefix) \
-int __init prefix ## _register_gpios(void) \
-{ \
- return mxs_gpio_init(prefix ## _gpio_ports, \
- ARRAY_SIZE(prefix ## _gpio_ports)); \
-}
-
#ifdef CONFIG_SOC_IMX23
static struct mxs_gpio_port mx23_gpio_ports[] = {
- DEFINE_MXS_GPIO_PORT(MX23, 0),
- DEFINE_MXS_GPIO_PORT(MX23, 1),
- DEFINE_MXS_GPIO_PORT(MX23, 2),
+ DEFINE_MXS_GPIO_PORT(MX23_GPIO_BASE, MX23_INT_GPIO0, 0),
+ DEFINE_MXS_GPIO_PORT(MX23_GPIO_BASE, MX23_INT_GPIO1, 1),
+ DEFINE_MXS_GPIO_PORT(MX23_GPIO_BASE, MX23_INT_GPIO2, 2),
};
-DEFINE_REGISTER_FUNCTION(mx23)
+
+int __init mx23_register_gpios(void)
+{
+ return mxs_gpio_init(mx23_gpio_ports, ARRAY_SIZE(mx23_gpio_ports));
+}
#endif
#ifdef CONFIG_SOC_IMX28
static struct mxs_gpio_port mx28_gpio_ports[] = {
- DEFINE_MXS_GPIO_PORT(MX28, 0),
- DEFINE_MXS_GPIO_PORT(MX28, 1),
- DEFINE_MXS_GPIO_PORT(MX28, 2),
- DEFINE_MXS_GPIO_PORT(MX28, 3),
- DEFINE_MXS_GPIO_PORT(MX28, 4),
+ DEFINE_MXS_GPIO_PORT(MX28_GPIO_BASE, MX28_INT_GPIO0, 0),
+ DEFINE_MXS_GPIO_PORT(MX28_GPIO_BASE, MX28_INT_GPIO1, 1),
+ DEFINE_MXS_GPIO_PORT(MX28_GPIO_BASE, MX28_INT_GPIO2, 2),
+ DEFINE_MXS_GPIO_PORT(MX28_GPIO_BASE, MX28_INT_GPIO3, 3),
+ DEFINE_MXS_GPIO_PORT(MX28_GPIO_BASE, MX28_INT_GPIO4, 4),
};
-DEFINE_REGISTER_FUNCTION(mx28)
+
+int __init mx28_register_gpios(void)
+{
+ return mxs_gpio_init(mx28_gpio_ports, ARRAY_SIZE(mx28_gpio_ports));
+}
#endif
diff --git a/arch/arm/mach-mxs/icoll.c b/arch/arm/mach-mxs/icoll.c
index 5dd43ba70058..0f4c120fc169 100644
--- a/arch/arm/mach-mxs/icoll.c
+++ b/arch/arm/mach-mxs/icoll.c
@@ -34,7 +34,7 @@
static void __iomem *icoll_base = MXS_IO_ADDRESS(MXS_ICOLL_BASE_ADDR);
-static void icoll_ack_irq(unsigned int irq)
+static void icoll_ack_irq(struct irq_data *d)
{
/*
* The Interrupt Collector is able to prioritize irqs.
@@ -45,22 +45,22 @@ static void icoll_ack_irq(unsigned int irq)
icoll_base + HW_ICOLL_LEVELACK);
}
-static void icoll_mask_irq(unsigned int irq)
+static void icoll_mask_irq(struct irq_data *d)
{
__raw_writel(BM_ICOLL_INTERRUPTn_ENABLE,
- icoll_base + HW_ICOLL_INTERRUPTn_CLR(irq));
+ icoll_base + HW_ICOLL_INTERRUPTn_CLR(d->irq));
}
-static void icoll_unmask_irq(unsigned int irq)
+static void icoll_unmask_irq(struct irq_data *d)
{
__raw_writel(BM_ICOLL_INTERRUPTn_ENABLE,
- icoll_base + HW_ICOLL_INTERRUPTn_SET(irq));
+ icoll_base + HW_ICOLL_INTERRUPTn_SET(d->irq));
}
static struct irq_chip mxs_icoll_chip = {
- .ack = icoll_ack_irq,
- .mask = icoll_mask_irq,
- .unmask = icoll_unmask_irq,
+ .irq_ack = icoll_ack_irq,
+ .irq_mask = icoll_mask_irq,
+ .irq_unmask = icoll_unmask_irq,
};
void __init icoll_init_irq(void)
diff --git a/arch/arm/mach-mxs/include/mach/common.h b/arch/arm/mach-mxs/include/mach/common.h
index 59133eb3cc96..635bb5d9a20a 100644
--- a/arch/arm/mach-mxs/include/mach/common.h
+++ b/arch/arm/mach-mxs/include/mach/common.h
@@ -13,6 +13,7 @@
struct clk;
+extern const u32 *mxs_get_ocotp(void);
extern int mxs_reset_block(void __iomem *);
extern void mxs_timer_init(struct clk *, int);
diff --git a/arch/arm/mach-mxs/include/mach/devices-common.h b/arch/arm/mach-mxs/include/mach/devices-common.h
index 6c3d1a103433..71f24484b044 100644
--- a/arch/arm/mach-mxs/include/mach/devices-common.h
+++ b/arch/arm/mach-mxs/include/mach/devices-common.h
@@ -30,6 +30,16 @@ int __init mxs_add_amba_device(const struct amba_device *dev);
/* duart */
int __init mxs_add_duart(const struct amba_device *dev);
+/* auart */
+struct mxs_auart_data {
+ int id;
+ resource_size_t iobase;
+ resource_size_t iosize;
+ resource_size_t irq;
+};
+struct platform_device *__init mxs_add_auart(
+ const struct mxs_auart_data *data);
+
/* fec */
#include <linux/fec.h>
struct mxs_fec_data {
@@ -41,3 +51,28 @@ struct mxs_fec_data {
struct platform_device *__init mxs_add_fec(
const struct mxs_fec_data *data,
const struct fec_platform_data *pdata);
+
+/* flexcan */
+#include <linux/can/platform/flexcan.h>
+struct mxs_flexcan_data {
+ int id;
+ resource_size_t iobase;
+ resource_size_t iosize;
+ resource_size_t irq;
+};
+struct platform_device *__init mxs_add_flexcan(
+ const struct mxs_flexcan_data *data,
+ const struct flexcan_platform_data *pdata);
+
+/* i2c */
+struct mxs_i2c_data {
+ int id;
+ resource_size_t iobase;
+ resource_size_t errirq;
+ resource_size_t dmairq;
+};
+struct platform_device * __init mxs_add_mxs_i2c(const struct mxs_i2c_data *data);
+
+/* pwm */
+struct platform_device *__init mxs_add_mxs_pwm(
+ resource_size_t iobase, int id);
diff --git a/arch/arm/mach-mxs/include/mach/mmc.h b/arch/arm/mach-mxs/include/mach/mmc.h
new file mode 100644
index 000000000000..211547a05564
--- /dev/null
+++ b/arch/arm/mach-mxs/include/mach/mmc.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MACH_MXS_MMC_H__
+#define __MACH_MXS_MMC_H__
+
+struct mxs_mmc_platform_data {
+ int wp_gpio; /* write protect pin */
+ unsigned int flags;
+#define SLOTF_4_BIT_CAPABLE (1 << 0)
+#define SLOTF_8_BIT_CAPABLE (1 << 1)
+};
+#endif /* __MACH_MXS_MMC_H__ */
diff --git a/arch/arm/mach-mxs/include/mach/mx23.h b/arch/arm/mach-mxs/include/mach/mx23.h
index 9edd02ec8e30..4768402b9485 100644
--- a/arch/arm/mach-mxs/include/mach/mx23.h
+++ b/arch/arm/mach-mxs/include/mach/mx23.h
@@ -101,9 +101,9 @@
#define MX23_INT_SSP2_DMA 20
#define MX23_INT_ECC8_IRQ 21
#define MX23_INT_RTC_ALARM 22
-#define MX23_INT_UARTAPP_TX_DMA 23
-#define MX23_INT_UARTAPP_INTERNAL 24
-#define MX23_INT_UARTAPP_RX_DMA 25
+#define MX23_INT_AUART1_TX_DMA 23
+#define MX23_INT_AUART1 24
+#define MX23_INT_AUART1_RX_DMA 25
#define MX23_INT_I2C_DMA 26
#define MX23_INT_I2C_ERROR 27
#define MX23_INT_TIMER0 28
@@ -135,9 +135,9 @@
#define MX23_INT_DCP 54
#define MX23_INT_BCH 56
#define MX23_INT_PXP 57
-#define MX23_INT_UARTAPP2_TX_DMA 58
-#define MX23_INT_UARTAPP2_INTERNAL 59
-#define MX23_INT_UARTAPP2_RX_DMA 60
+#define MX23_INT_AUART2_TX_DMA 58
+#define MX23_INT_AUART2 59
+#define MX23_INT_AUART2_RX_DMA 60
#define MX23_INT_VDAC_DETECT 61
#define MX23_INT_VDD5V_DROOP 64
#define MX23_INT_DCDC4P2_BO 65
diff --git a/arch/arm/mach-mxs/include/mach/mxs.h b/arch/arm/mach-mxs/include/mach/mxs.h
index f186c08c2911..35a89dd27242 100644
--- a/arch/arm/mach-mxs/include/mach/mxs.h
+++ b/arch/arm/mach-mxs/include/mach/mxs.h
@@ -28,8 +28,13 @@
/*
* MXS CPU types
*/
-#define cpu_is_mx23() (machine_is_mx23evk())
-#define cpu_is_mx28() (machine_is_mx28evk())
+#define cpu_is_mx23() ( \
+ machine_is_mx23evk() || \
+ 0)
+#define cpu_is_mx28() ( \
+ machine_is_mx28evk() || \
+ machine_is_tx28() || \
+ 0)
/*
* IO addresses common to MXS-based
diff --git a/arch/arm/mach-mxs/include/mach/mxsfb.h b/arch/arm/mach-mxs/include/mach/mxsfb.h
new file mode 100644
index 000000000000..923f397dbdcf
--- /dev/null
+++ b/arch/arm/mach-mxs/include/mach/mxsfb.h
@@ -0,0 +1,48 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#ifndef __MACH_FB_H
+#define __MACH_FB_H
+
+#include <linux/fb.h>
+
+#define STMLCDIF_8BIT 1 /** pixel data bus to the display is of 8 bit width */
+#define STMLCDIF_16BIT 0 /** pixel data bus to the display is of 16 bit width */
+#define STMLCDIF_18BIT 2 /** pixel data bus to the display is of 18 bit width */
+#define STMLCDIF_24BIT 3 /** pixel data bus to the display is of 24 bit width */
+
+#define FB_SYNC_DATA_ENABLE_HIGH_ACT 64
+
+struct mxsfb_platform_data {
+ struct fb_videomode *mode_list;
+ unsigned mode_count;
+
+ unsigned default_bpp;
+
+ unsigned dotclk_delay; /* refer manual HW_LCDIF_VDCTRL4 register */
+ unsigned ld_intf_width; /* refer STMLCDIF_* macros */
+
+ unsigned fb_size; /* Size of the video memory. If zero a
+ * default will be used
+ */
+ unsigned long fb_phys; /* physical address for the video memory. If
+ * zero the framebuffer memory will be dynamically
+ * allocated. If specified,fb_size must also be specified.
+ * fb_phys must be unused by Linux.
+ */
+};
+
+#endif /* __MACH_FB_H */
diff --git a/arch/arm/mach-mxs/include/mach/uncompress.h b/arch/arm/mach-mxs/include/mach/uncompress.h
index a005e76f34f9..f12a1732d8b8 100644
--- a/arch/arm/mach-mxs/include/mach/uncompress.h
+++ b/arch/arm/mach-mxs/include/mach/uncompress.h
@@ -63,6 +63,7 @@ static inline void __arch_decomp_setup(unsigned long arch_id)
mxs_duart_base = MX23_DUART_BASE_ADDR;
break;
case MACH_TYPE_MX28EVK:
+ case MACH_TYPE_TX28:
mxs_duart_base = MX28_DUART_BASE_ADDR;
break;
default:
diff --git a/arch/arm/mach-mxs/mach-mx23evk.c b/arch/arm/mach-mxs/mach-mx23evk.c
index aa0640052f58..0737ce2e6cfb 100644
--- a/arch/arm/mach-mxs/mach-mx23evk.c
+++ b/arch/arm/mach-mxs/mach-mx23evk.c
@@ -30,6 +30,16 @@ static const iomux_cfg_t mx23evk_pads[] __initconst = {
/* duart */
MX23_PAD_PWM0__DUART_RX | MXS_PAD_4MA,
MX23_PAD_PWM1__DUART_TX | MXS_PAD_4MA,
+
+ /* auart */
+ MX23_PAD_AUART1_RX__AUART1_RX |
+ (MXS_PAD_4MA | MXS_PAD_3V3 | MXS_PAD_NOPULL),
+ MX23_PAD_AUART1_TX__AUART1_TX |
+ (MXS_PAD_4MA | MXS_PAD_3V3 | MXS_PAD_NOPULL),
+ MX23_PAD_AUART1_CTS__AUART1_CTS |
+ (MXS_PAD_4MA | MXS_PAD_3V3 | MXS_PAD_NOPULL),
+ MX23_PAD_AUART1_RTS__AUART1_RTS |
+ (MXS_PAD_4MA | MXS_PAD_3V3 | MXS_PAD_NOPULL),
};
static void __init mx23evk_init(void)
@@ -37,6 +47,7 @@ static void __init mx23evk_init(void)
mxs_iomux_setup_multiple_pads(mx23evk_pads, ARRAY_SIZE(mx23evk_pads));
mx23_add_duart();
+ mx23_add_auart0();
}
static void __init mx23evk_timer_init(void)
diff --git a/arch/arm/mach-mxs/mach-mx28evk.c b/arch/arm/mach-mxs/mach-mx28evk.c
index 8e2c5975001e..1f0b708138fe 100644
--- a/arch/arm/mach-mxs/mach-mx28evk.c
+++ b/arch/arm/mach-mxs/mach-mx28evk.c
@@ -38,6 +38,25 @@ static const iomux_cfg_t mx28evk_pads[] __initconst = {
MX28_PAD_PWM1__DUART_TX |
(MXS_PAD_4MA | MXS_PAD_3V3 | MXS_PAD_NOPULL),
+ /* auart0 */
+ MX28_PAD_AUART0_RX__AUART0_RX |
+ (MXS_PAD_4MA | MXS_PAD_3V3 | MXS_PAD_NOPULL),
+ MX28_PAD_AUART0_TX__AUART0_TX |
+ (MXS_PAD_4MA | MXS_PAD_3V3 | MXS_PAD_NOPULL),
+ MX28_PAD_AUART0_CTS__AUART0_CTS |
+ (MXS_PAD_4MA | MXS_PAD_3V3 | MXS_PAD_NOPULL),
+ MX28_PAD_AUART0_RTS__AUART0_RTS |
+ (MXS_PAD_4MA | MXS_PAD_3V3 | MXS_PAD_NOPULL),
+ /* auart3 */
+ MX28_PAD_AUART3_RX__AUART3_RX |
+ (MXS_PAD_4MA | MXS_PAD_3V3 | MXS_PAD_NOPULL),
+ MX28_PAD_AUART3_TX__AUART3_TX |
+ (MXS_PAD_4MA | MXS_PAD_3V3 | MXS_PAD_NOPULL),
+ MX28_PAD_AUART3_CTS__AUART3_CTS |
+ (MXS_PAD_4MA | MXS_PAD_3V3 | MXS_PAD_NOPULL),
+ MX28_PAD_AUART3_RTS__AUART3_RTS |
+ (MXS_PAD_4MA | MXS_PAD_3V3 | MXS_PAD_NOPULL),
+
/* fec0 */
MX28_PAD_ENET0_MDC__ENET0_MDC |
(MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
@@ -119,7 +138,7 @@ static void __init mx28evk_fec_reset(void)
gpio_set_value(MX28EVK_FEC_PHY_RESET, 1);
}
-static struct fec_platform_data mx28_fec_pdata[] = {
+static struct fec_platform_data mx28_fec_pdata[] __initdata = {
{
/* fec0 */
.phy = PHY_INTERFACE_MODE_RMII,
@@ -129,11 +148,46 @@ static struct fec_platform_data mx28_fec_pdata[] = {
},
};
+static int __init mx28evk_fec_get_mac(void)
+{
+ int i;
+ u32 val;
+ const u32 *ocotp = mxs_get_ocotp();
+
+ if (!ocotp)
+ goto error;
+
+ /*
+ * OCOTP only stores the last 4 octets for each mac address,
+ * so hard-code Freescale OUI (00:04:9f) here.
+ */
+ for (i = 0; i < 2; i++) {
+ val = ocotp[i * 4];
+ mx28_fec_pdata[i].mac[0] = 0x00;
+ mx28_fec_pdata[i].mac[1] = 0x04;
+ mx28_fec_pdata[i].mac[2] = 0x9f;
+ mx28_fec_pdata[i].mac[3] = (val >> 16) & 0xff;
+ mx28_fec_pdata[i].mac[4] = (val >> 8) & 0xff;
+ mx28_fec_pdata[i].mac[5] = (val >> 0) & 0xff;
+ }
+
+ return 0;
+
+error:
+ pr_err("%s: timeout when reading fec mac from OCOTP\n", __func__);
+ return -ETIMEDOUT;
+}
+
static void __init mx28evk_init(void)
{
mxs_iomux_setup_multiple_pads(mx28evk_pads, ARRAY_SIZE(mx28evk_pads));
mx28_add_duart();
+ mx28_add_auart0();
+ mx28_add_auart3();
+
+ if (mx28evk_fec_get_mac())
+ pr_warn("%s: failed on fec mac setup\n", __func__);
mx28evk_fec_reset();
mx28_add_fec(0, &mx28_fec_pdata[0]);
diff --git a/arch/arm/mach-mxs/mach-tx28.c b/arch/arm/mach-mxs/mach-tx28.c
new file mode 100644
index 000000000000..b65e3719cbc4
--- /dev/null
+++ b/arch/arm/mach-mxs/mach-tx28.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2010 <LW@KARO-electronics.de>
+ *
+ * based on: mach-mx28_evk.c
+ * Copyright 2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation
+ */
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/leds.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_gpio.h>
+#include <linux/i2c.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/time.h>
+
+#include <mach/common.h>
+#include <mach/iomux-mx28.h>
+
+#include "devices-mx28.h"
+#include "module-tx28.h"
+
+#define TX28_STK5_GPIO_LED MXS_GPIO_NR(4, 10)
+
+static const iomux_cfg_t tx28_stk5v3_pads[] __initconst = {
+ /* LED */
+ MX28_PAD_ENET0_RXD3__GPIO_4_10 |
+ MXS_PAD_3V3 | MXS_PAD_4MA | MXS_PAD_NOPULL,
+
+ /* framebuffer */
+#define LCD_MODE (MXS_PAD_3V3 | MXS_PAD_4MA)
+ MX28_PAD_LCD_D00__LCD_D0 | LCD_MODE,
+ MX28_PAD_LCD_D01__LCD_D1 | LCD_MODE,
+ MX28_PAD_LCD_D02__LCD_D2 | LCD_MODE,
+ MX28_PAD_LCD_D03__LCD_D3 | LCD_MODE,
+ MX28_PAD_LCD_D04__LCD_D4 | LCD_MODE,
+ MX28_PAD_LCD_D05__LCD_D5 | LCD_MODE,
+ MX28_PAD_LCD_D06__LCD_D6 | LCD_MODE,
+ MX28_PAD_LCD_D07__LCD_D7 | LCD_MODE,
+ MX28_PAD_LCD_D08__LCD_D8 | LCD_MODE,
+ MX28_PAD_LCD_D09__LCD_D9 | LCD_MODE,
+ MX28_PAD_LCD_D10__LCD_D10 | LCD_MODE,
+ MX28_PAD_LCD_D11__LCD_D11 | LCD_MODE,
+ MX28_PAD_LCD_D12__LCD_D12 | LCD_MODE,
+ MX28_PAD_LCD_D13__LCD_D13 | LCD_MODE,
+ MX28_PAD_LCD_D14__LCD_D14 | LCD_MODE,
+ MX28_PAD_LCD_D15__LCD_D15 | LCD_MODE,
+ MX28_PAD_LCD_D16__LCD_D16 | LCD_MODE,
+ MX28_PAD_LCD_D17__LCD_D17 | LCD_MODE,
+ MX28_PAD_LCD_D18__LCD_D18 | LCD_MODE,
+ MX28_PAD_LCD_D19__LCD_D19 | LCD_MODE,
+ MX28_PAD_LCD_D20__LCD_D20 | LCD_MODE,
+ MX28_PAD_LCD_D21__LCD_D21 | LCD_MODE,
+ MX28_PAD_LCD_D22__LCD_D22 | LCD_MODE,
+ MX28_PAD_LCD_D23__LCD_D23 | LCD_MODE,
+ MX28_PAD_LCD_RD_E__LCD_VSYNC | LCD_MODE,
+ MX28_PAD_LCD_WR_RWN__LCD_HSYNC | LCD_MODE,
+ MX28_PAD_LCD_RS__LCD_DOTCLK | LCD_MODE,
+ MX28_PAD_LCD_CS__LCD_CS | LCD_MODE,
+ MX28_PAD_LCD_VSYNC__LCD_VSYNC | LCD_MODE,
+ MX28_PAD_LCD_HSYNC__LCD_HSYNC | LCD_MODE,
+ MX28_PAD_LCD_DOTCLK__LCD_DOTCLK | LCD_MODE,
+ MX28_PAD_LCD_ENABLE__GPIO_1_31 | LCD_MODE,
+ MX28_PAD_LCD_RESET__GPIO_3_30 | LCD_MODE,
+ MX28_PAD_PWM0__PWM_0 | LCD_MODE,
+
+ /* UART1 */
+ MX28_PAD_AUART0_CTS__DUART_RX,
+ MX28_PAD_AUART0_RTS__DUART_TX,
+ MX28_PAD_AUART0_TX__DUART_RTS,
+ MX28_PAD_AUART0_RX__DUART_CTS,
+
+ /* UART2 */
+ MX28_PAD_AUART1_RX__AUART1_RX,
+ MX28_PAD_AUART1_TX__AUART1_TX,
+ MX28_PAD_AUART1_RTS__AUART1_RTS,
+ MX28_PAD_AUART1_CTS__AUART1_CTS,
+
+ /* CAN */
+ MX28_PAD_GPMI_RDY2__CAN0_TX,
+ MX28_PAD_GPMI_RDY3__CAN0_RX,
+
+ /* I2C */
+ MX28_PAD_I2C0_SCL__I2C0_SCL,
+ MX28_PAD_I2C0_SDA__I2C0_SDA,
+
+ /* TSC2007 */
+ MX28_PAD_SAIF0_MCLK__GPIO_3_20 | MXS_PAD_3V3 | MXS_PAD_4MA | MXS_PAD_PULLUP,
+
+ /* MMC0 */
+ MX28_PAD_SSP0_DATA0__SSP0_D0 |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX28_PAD_SSP0_DATA1__SSP0_D1 |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX28_PAD_SSP0_DATA2__SSP0_D2 |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX28_PAD_SSP0_DATA3__SSP0_D3 |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX28_PAD_SSP0_DATA4__SSP0_D4 |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX28_PAD_SSP0_DATA5__SSP0_D5 |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX28_PAD_SSP0_DATA6__SSP0_D6 |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX28_PAD_SSP0_DATA7__SSP0_D7 |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX28_PAD_SSP0_CMD__SSP0_CMD |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX28_PAD_SSP0_DETECT__SSP0_CARD_DETECT |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_NOPULL),
+ MX28_PAD_SSP0_SCK__SSP0_SCK |
+ (MXS_PAD_12MA | MXS_PAD_3V3 | MXS_PAD_NOPULL),
+};
+
+static struct gpio_led tx28_stk5v3_leds[] = {
+ {
+ .name = "GPIO-LED",
+ .default_trigger = "heartbeat",
+ .gpio = TX28_STK5_GPIO_LED,
+ },
+};
+
+static const struct gpio_led_platform_data tx28_stk5v3_led_data __initconst = {
+ .leds = tx28_stk5v3_leds,
+ .num_leds = ARRAY_SIZE(tx28_stk5v3_leds),
+};
+
+static struct spi_board_info tx28_spi_board_info[] = {
+ {
+ .modalias = "spidev",
+ .max_speed_hz = 20000000,
+ .bus_num = 0,
+ .chip_select = 1,
+ .controller_data = (void *)SPI_GPIO_NO_CHIPSELECT,
+ .mode = SPI_MODE_0,
+ },
+};
+
+static struct i2c_board_info tx28_stk5v3_i2c_boardinfo[] __initdata = {
+ {
+ I2C_BOARD_INFO("ds1339", 0x68),
+ },
+};
+
+static void __init tx28_stk5v3_init(void)
+{
+ mxs_iomux_setup_multiple_pads(tx28_stk5v3_pads,
+ ARRAY_SIZE(tx28_stk5v3_pads));
+
+ mx28_add_duart(); /* UART1 */
+ mx28_add_auart(1); /* UART2 */
+
+ tx28_add_fec0();
+ /* spi via ssp will be added when available */
+ spi_register_board_info(tx28_spi_board_info,
+ ARRAY_SIZE(tx28_spi_board_info));
+ mxs_add_platform_device("leds-gpio", 0, NULL, 0,
+ &tx28_stk5v3_led_data, sizeof(tx28_stk5v3_led_data));
+ mx28_add_mxs_i2c(0);
+ i2c_register_board_info(0, tx28_stk5v3_i2c_boardinfo,
+ ARRAY_SIZE(tx28_stk5v3_i2c_boardinfo));
+}
+
+static void __init tx28_timer_init(void)
+{
+ mx28_clocks_init();
+}
+
+static struct sys_timer tx28_timer = {
+ .init = tx28_timer_init,
+};
+
+MACHINE_START(TX28, "Ka-Ro electronics TX28 module")
+ .map_io = mx28_map_io,
+ .init_irq = mx28_init_irq,
+ .init_machine = tx28_stk5v3_init,
+ .timer = &tx28_timer,
+MACHINE_END
diff --git a/arch/arm/mach-mxs/module-tx28.c b/arch/arm/mach-mxs/module-tx28.c
new file mode 100644
index 000000000000..fa0b154da67b
--- /dev/null
+++ b/arch/arm/mach-mxs/module-tx28.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2010 <LW@KARO-electronics.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/fec.h>
+#include <linux/gpio.h>
+
+#include <mach/iomux-mx28.h>
+#include "../devices-mx28.h"
+
+#include "module-tx28.h"
+
+#define TX28_FEC_PHY_POWER MXS_GPIO_NR(3, 29)
+#define TX28_FEC_PHY_RESET MXS_GPIO_NR(4, 13)
+
+static const iomux_cfg_t tx28_fec_gpio_pads[] __initconst = {
+ /* PHY POWER */
+ MX28_PAD_PWM4__GPIO_3_29 |
+ MXS_PAD_4MA | MXS_PAD_NOPULL | MXS_PAD_3V3,
+ /* PHY RESET */
+ MX28_PAD_ENET0_RX_CLK__GPIO_4_13 |
+ MXS_PAD_4MA | MXS_PAD_NOPULL | MXS_PAD_3V3,
+ /* Mode strap pins 0-2 */
+ MX28_PAD_ENET0_RXD0__GPIO_4_3 |
+ MXS_PAD_8MA | MXS_PAD_PULLUP | MXS_PAD_3V3,
+ MX28_PAD_ENET0_RXD1__GPIO_4_4 |
+ MXS_PAD_8MA | MXS_PAD_PULLUP | MXS_PAD_3V3,
+ MX28_PAD_ENET0_RX_EN__GPIO_4_2 |
+ MXS_PAD_8MA | MXS_PAD_PULLUP | MXS_PAD_3V3,
+ /* nINT */
+ MX28_PAD_ENET0_TX_CLK__GPIO_4_5 |
+ MXS_PAD_4MA | MXS_PAD_NOPULL | MXS_PAD_3V3,
+
+ MX28_PAD_ENET0_MDC__GPIO_4_0,
+ MX28_PAD_ENET0_MDIO__GPIO_4_1,
+ MX28_PAD_ENET0_TX_EN__GPIO_4_6,
+ MX28_PAD_ENET0_TXD0__GPIO_4_7,
+ MX28_PAD_ENET0_TXD1__GPIO_4_8,
+ MX28_PAD_ENET_CLK__GPIO_4_16,
+};
+
+#define FEC_MODE (MXS_PAD_8MA | MXS_PAD_PULLUP | MXS_PAD_3V3)
+static const iomux_cfg_t tx28_fec_pads[] __initconst = {
+ MX28_PAD_ENET0_MDC__ENET0_MDC | FEC_MODE,
+ MX28_PAD_ENET0_MDIO__ENET0_MDIO | FEC_MODE,
+ MX28_PAD_ENET0_RX_EN__ENET0_RX_EN | FEC_MODE,
+ MX28_PAD_ENET0_RXD0__ENET0_RXD0 | FEC_MODE,
+ MX28_PAD_ENET0_RXD1__ENET0_RXD1 | FEC_MODE,
+ MX28_PAD_ENET0_TX_EN__ENET0_TX_EN | FEC_MODE,
+ MX28_PAD_ENET0_TXD0__ENET0_TXD0 | FEC_MODE,
+ MX28_PAD_ENET0_TXD1__ENET0_TXD1 | FEC_MODE,
+ MX28_PAD_ENET_CLK__CLKCTRL_ENET | FEC_MODE,
+};
+
+static const struct fec_platform_data tx28_fec_data __initconst = {
+ .phy = PHY_INTERFACE_MODE_RMII,
+};
+
+int __init tx28_add_fec0(void)
+{
+ int i, ret;
+
+ pr_debug("%s: Switching FEC PHY power off\n", __func__);
+ ret = mxs_iomux_setup_multiple_pads(tx28_fec_gpio_pads,
+ ARRAY_SIZE(tx28_fec_gpio_pads));
+ for (i = 0; i < ARRAY_SIZE(tx28_fec_gpio_pads); i++) {
+ unsigned int gpio = MXS_GPIO_NR(PAD_BANK(tx28_fec_gpio_pads[i]),
+ PAD_PIN(tx28_fec_gpio_pads[i]));
+
+ ret = gpio_request(gpio, "FEC");
+ if (ret) {
+ pr_err("Failed to request GPIO_%d_%d: %d\n",
+ PAD_BANK(tx28_fec_gpio_pads[i]),
+ PAD_PIN(tx28_fec_gpio_pads[i]), ret);
+ goto free_gpios;
+ }
+ ret = gpio_direction_output(gpio, 0);
+ if (ret) {
+ pr_err("Failed to set direction of GPIO_%d_%d to output: %d\n",
+ gpio / 32 + 1, gpio % 32, ret);
+ goto free_gpios;
+ }
+ }
+
+ /* Power up fec phy */
+ pr_debug("%s: Switching FEC PHY power on\n", __func__);
+ ret = gpio_direction_output(TX28_FEC_PHY_POWER, 1);
+ if (ret) {
+ pr_err("Failed to power on PHY: %d\n", ret);
+ goto free_gpios;
+ }
+ mdelay(26); /* 25ms according to data sheet */
+
+ /* nINT */
+ gpio_direction_input(MXS_GPIO_NR(4, 5));
+ /* Mode strap pins */
+ gpio_direction_output(MXS_GPIO_NR(4, 2), 1);
+ gpio_direction_output(MXS_GPIO_NR(4, 3), 1);
+ gpio_direction_output(MXS_GPIO_NR(4, 4), 1);
+
+ udelay(100); /* minimum assertion time for nRST */
+
+ pr_debug("%s: Deasserting FEC PHY RESET\n", __func__);
+ gpio_set_value(TX28_FEC_PHY_RESET, 1);
+
+ ret = mxs_iomux_setup_multiple_pads(tx28_fec_pads,
+ ARRAY_SIZE(tx28_fec_pads));
+ if (ret) {
+ pr_debug("%s: mxs_iomux_setup_multiple_pads() failed with rc: %d\n",
+ __func__, ret);
+ goto free_gpios;
+ }
+ pr_debug("%s: Registering FEC device\n", __func__);
+ mx28_add_fec(0, &tx28_fec_data);
+ return 0;
+
+free_gpios:
+ while (--i >= 0) {
+ unsigned int gpio = MXS_GPIO_NR(PAD_BANK(tx28_fec_gpio_pads[i]),
+ PAD_PIN(tx28_fec_gpio_pads[i]));
+
+ gpio_free(gpio);
+ }
+
+ return ret;
+}
diff --git a/arch/arm/mach-mxs/module-tx28.h b/arch/arm/mach-mxs/module-tx28.h
new file mode 100644
index 000000000000..df9e1b6e81bf
--- /dev/null
+++ b/arch/arm/mach-mxs/module-tx28.h
@@ -0,0 +1,9 @@
+/*
+ * Copyright (C) 2010 Pengutronix
+ * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+int __init tx28_add_fec0(void);
diff --git a/arch/arm/mach-mxs/ocotp.c b/arch/arm/mach-mxs/ocotp.c
new file mode 100644
index 000000000000..65157a35dbba
--- /dev/null
+++ b/arch/arm/mach-mxs/ocotp.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+
+#include <mach/mxs.h>
+
+#define OCOTP_WORD_OFFSET 0x20
+#define OCOTP_WORD_COUNT 0x20
+
+#define BM_OCOTP_CTRL_BUSY (1 << 8)
+#define BM_OCOTP_CTRL_ERROR (1 << 9)
+#define BM_OCOTP_CTRL_RD_BANK_OPEN (1 << 12)
+
+static DEFINE_MUTEX(ocotp_mutex);
+static u32 ocotp_words[OCOTP_WORD_COUNT];
+
+const u32 *mxs_get_ocotp(void)
+{
+ void __iomem *ocotp_base = MXS_IO_ADDRESS(MXS_OCOTP_BASE_ADDR);
+ int timeout = 0x400;
+ size_t i;
+ static int once = 0;
+
+ if (once)
+ return ocotp_words;
+
+ mutex_lock(&ocotp_mutex);
+
+ /*
+ * clk_enable(hbus_clk) for ocotp can be skipped
+ * as it must be on when system is running.
+ */
+
+ /* try to clear ERROR bit */
+ __mxs_clrl(BM_OCOTP_CTRL_ERROR, ocotp_base);
+
+ /* check both BUSY and ERROR cleared */
+ while ((__raw_readl(ocotp_base) &
+ (BM_OCOTP_CTRL_BUSY | BM_OCOTP_CTRL_ERROR)) && --timeout)
+ cpu_relax();
+
+ if (unlikely(!timeout))
+ goto error_unlock;
+
+ /* open OCOTP banks for read */
+ __mxs_setl(BM_OCOTP_CTRL_RD_BANK_OPEN, ocotp_base);
+
+ /* approximately wait 32 hclk cycles */
+ udelay(1);
+
+ /* poll BUSY bit becoming cleared */
+ timeout = 0x400;
+ while ((__raw_readl(ocotp_base) & BM_OCOTP_CTRL_BUSY) && --timeout)
+ cpu_relax();
+
+ if (unlikely(!timeout))
+ goto error_unlock;
+
+ for (i = 0; i < OCOTP_WORD_COUNT; i++)
+ ocotp_words[i] = __raw_readl(ocotp_base + OCOTP_WORD_OFFSET +
+ i * 0x10);
+
+ /* close banks for power saving */
+ __mxs_clrl(BM_OCOTP_CTRL_RD_BANK_OPEN, ocotp_base);
+
+ once = 1;
+
+ mutex_unlock(&ocotp_mutex);
+
+ return ocotp_words;
+
+error_unlock:
+ mutex_unlock(&ocotp_mutex);
+ pr_err("%s: timeout in reading OCOTP\n", __func__);
+ return NULL;
+}
diff --git a/arch/arm/mach-mxs/pm.c b/arch/arm/mach-mxs/pm.c
new file mode 100644
index 000000000000..fb042da29bda
--- /dev/null
+++ b/arch/arm/mach-mxs/pm.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2010 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/suspend.h>
+#include <linux/io.h>
+#include <mach/system.h>
+
+static int mxs_suspend_enter(suspend_state_t state)
+{
+ switch (state) {
+ case PM_SUSPEND_MEM:
+ arch_idle();
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static struct platform_suspend_ops mxs_suspend_ops = {
+ .enter = mxs_suspend_enter,
+ .valid = suspend_valid_only_mem,
+};
+
+static int __init mxs_pm_init(void)
+{
+ suspend_set_ops(&mxs_suspend_ops);
+ return 0;
+}
+device_initcall(mxs_pm_init);
diff --git a/arch/arm/mach-mxs/regs-clkctrl-mx23.h b/arch/arm/mach-mxs/regs-clkctrl-mx23.h
index dbc04747b691..0ea5c9d0e2b2 100644
--- a/arch/arm/mach-mxs/regs-clkctrl-mx23.h
+++ b/arch/arm/mach-mxs/regs-clkctrl-mx23.h
@@ -33,10 +33,6 @@
#define HW_CLKCTRL_PLLCTRL0_CLR (0x00000008)
#define HW_CLKCTRL_PLLCTRL0_TOG (0x0000000c)
-#define BP_CLKCTRL_PLLCTRL0_RSRVD6 30
-#define BM_CLKCTRL_PLLCTRL0_RSRVD6 0xC0000000
-#define BF_CLKCTRL_PLLCTRL0_RSRVD6(v) \
- (((v) << 30) & BM_CLKCTRL_PLLCTRL0_RSRVD6)
#define BP_CLKCTRL_PLLCTRL0_LFR_SEL 28
#define BM_CLKCTRL_PLLCTRL0_LFR_SEL 0x30000000
#define BF_CLKCTRL_PLLCTRL0_LFR_SEL(v) \
@@ -45,10 +41,6 @@
#define BV_CLKCTRL_PLLCTRL0_LFR_SEL__TIMES_2 0x1
#define BV_CLKCTRL_PLLCTRL0_LFR_SEL__TIMES_05 0x2
#define BV_CLKCTRL_PLLCTRL0_LFR_SEL__UNDEFINED 0x3
-#define BP_CLKCTRL_PLLCTRL0_RSRVD5 26
-#define BM_CLKCTRL_PLLCTRL0_RSRVD5 0x0C000000
-#define BF_CLKCTRL_PLLCTRL0_RSRVD5(v) \
- (((v) << 26) & BM_CLKCTRL_PLLCTRL0_RSRVD5)
#define BP_CLKCTRL_PLLCTRL0_CP_SEL 24
#define BM_CLKCTRL_PLLCTRL0_CP_SEL 0x03000000
#define BF_CLKCTRL_PLLCTRL0_CP_SEL(v) \
@@ -57,10 +49,6 @@
#define BV_CLKCTRL_PLLCTRL0_CP_SEL__TIMES_2 0x1
#define BV_CLKCTRL_PLLCTRL0_CP_SEL__TIMES_05 0x2
#define BV_CLKCTRL_PLLCTRL0_CP_SEL__UNDEFINED 0x3
-#define BP_CLKCTRL_PLLCTRL0_RSRVD4 22
-#define BM_CLKCTRL_PLLCTRL0_RSRVD4 0x00C00000
-#define BF_CLKCTRL_PLLCTRL0_RSRVD4(v) \
- (((v) << 22) & BM_CLKCTRL_PLLCTRL0_RSRVD4)
#define BP_CLKCTRL_PLLCTRL0_DIV_SEL 20
#define BM_CLKCTRL_PLLCTRL0_DIV_SEL 0x00300000
#define BF_CLKCTRL_PLLCTRL0_DIV_SEL(v) \
@@ -69,23 +57,13 @@
#define BV_CLKCTRL_PLLCTRL0_DIV_SEL__LOWER 0x1
#define BV_CLKCTRL_PLLCTRL0_DIV_SEL__LOWEST 0x2
#define BV_CLKCTRL_PLLCTRL0_DIV_SEL__UNDEFINED 0x3
-#define BM_CLKCTRL_PLLCTRL0_RSRVD3 0x00080000
#define BM_CLKCTRL_PLLCTRL0_EN_USB_CLKS 0x00040000
-#define BM_CLKCTRL_PLLCTRL0_RSRVD2 0x00020000
#define BM_CLKCTRL_PLLCTRL0_POWER 0x00010000
-#define BP_CLKCTRL_PLLCTRL0_RSRVD1 0
-#define BM_CLKCTRL_PLLCTRL0_RSRVD1 0x0000FFFF
-#define BF_CLKCTRL_PLLCTRL0_RSRVD1(v) \
- (((v) << 0) & BM_CLKCTRL_PLLCTRL0_RSRVD1)
#define HW_CLKCTRL_PLLCTRL1 (0x00000010)
#define BM_CLKCTRL_PLLCTRL1_LOCK 0x80000000
#define BM_CLKCTRL_PLLCTRL1_FORCE_LOCK 0x40000000
-#define BP_CLKCTRL_PLLCTRL1_RSRVD1 16
-#define BM_CLKCTRL_PLLCTRL1_RSRVD1 0x3FFF0000
-#define BF_CLKCTRL_PLLCTRL1_RSRVD1(v) \
- (((v) << 16) & BM_CLKCTRL_PLLCTRL1_RSRVD1)
#define BP_CLKCTRL_PLLCTRL1_LOCK_COUNT 0
#define BM_CLKCTRL_PLLCTRL1_LOCK_COUNT 0x0000FFFF
#define BF_CLKCTRL_PLLCTRL1_LOCK_COUNT(v) \
@@ -96,29 +74,15 @@
#define HW_CLKCTRL_CPU_CLR (0x00000028)
#define HW_CLKCTRL_CPU_TOG (0x0000002c)
-#define BP_CLKCTRL_CPU_RSRVD5 30
-#define BM_CLKCTRL_CPU_RSRVD5 0xC0000000
-#define BF_CLKCTRL_CPU_RSRVD5(v) \
- (((v) << 30) & BM_CLKCTRL_CPU_RSRVD5)
#define BM_CLKCTRL_CPU_BUSY_REF_XTAL 0x20000000
#define BM_CLKCTRL_CPU_BUSY_REF_CPU 0x10000000
-#define BM_CLKCTRL_CPU_RSRVD4 0x08000000
#define BM_CLKCTRL_CPU_DIV_XTAL_FRAC_EN 0x04000000
#define BP_CLKCTRL_CPU_DIV_XTAL 16
#define BM_CLKCTRL_CPU_DIV_XTAL 0x03FF0000
#define BF_CLKCTRL_CPU_DIV_XTAL(v) \
(((v) << 16) & BM_CLKCTRL_CPU_DIV_XTAL)
-#define BP_CLKCTRL_CPU_RSRVD3 13
-#define BM_CLKCTRL_CPU_RSRVD3 0x0000E000
-#define BF_CLKCTRL_CPU_RSRVD3(v) \
- (((v) << 13) & BM_CLKCTRL_CPU_RSRVD3)
#define BM_CLKCTRL_CPU_INTERRUPT_WAIT 0x00001000
-#define BM_CLKCTRL_CPU_RSRVD2 0x00000800
#define BM_CLKCTRL_CPU_DIV_CPU_FRAC_EN 0x00000400
-#define BP_CLKCTRL_CPU_RSRVD1 6
-#define BM_CLKCTRL_CPU_RSRVD1 0x000003C0
-#define BF_CLKCTRL_CPU_RSRVD1(v) \
- (((v) << 6) & BM_CLKCTRL_CPU_RSRVD1)
#define BP_CLKCTRL_CPU_DIV_CPU 0
#define BM_CLKCTRL_CPU_DIV_CPU 0x0000003F
#define BF_CLKCTRL_CPU_DIV_CPU(v) \
@@ -129,10 +93,6 @@
#define HW_CLKCTRL_HBUS_CLR (0x00000038)
#define HW_CLKCTRL_HBUS_TOG (0x0000003c)
-#define BP_CLKCTRL_HBUS_RSRVD4 30
-#define BM_CLKCTRL_HBUS_RSRVD4 0xC0000000
-#define BF_CLKCTRL_HBUS_RSRVD4(v) \
- (((v) << 30) & BM_CLKCTRL_HBUS_RSRVD4)
#define BM_CLKCTRL_HBUS_BUSY 0x20000000
#define BM_CLKCTRL_HBUS_DCP_AS_ENABLE 0x10000000
#define BM_CLKCTRL_HBUS_PXP_AS_ENABLE 0x08000000
@@ -143,7 +103,6 @@
#define BM_CLKCTRL_HBUS_CPU_DATA_AS_ENABLE 0x00400000
#define BM_CLKCTRL_HBUS_CPU_INSTR_AS_ENABLE 0x00200000
#define BM_CLKCTRL_HBUS_AUTO_SLOW_MODE 0x00100000
-#define BM_CLKCTRL_HBUS_RSRVD2 0x00080000
#define BP_CLKCTRL_HBUS_SLOW_DIV 16
#define BM_CLKCTRL_HBUS_SLOW_DIV 0x00070000
#define BF_CLKCTRL_HBUS_SLOW_DIV(v) \
@@ -154,10 +113,6 @@
#define BV_CLKCTRL_HBUS_SLOW_DIV__BY8 0x3
#define BV_CLKCTRL_HBUS_SLOW_DIV__BY16 0x4
#define BV_CLKCTRL_HBUS_SLOW_DIV__BY32 0x5
-#define BP_CLKCTRL_HBUS_RSRVD1 6
-#define BM_CLKCTRL_HBUS_RSRVD1 0x0000FFC0
-#define BF_CLKCTRL_HBUS_RSRVD1(v) \
- (((v) << 6) & BM_CLKCTRL_HBUS_RSRVD1)
#define BM_CLKCTRL_HBUS_DIV_FRAC_EN 0x00000020
#define BP_CLKCTRL_HBUS_DIV 0
#define BM_CLKCTRL_HBUS_DIV 0x0000001F
@@ -167,10 +122,6 @@
#define HW_CLKCTRL_XBUS (0x00000040)
#define BM_CLKCTRL_XBUS_BUSY 0x80000000
-#define BP_CLKCTRL_XBUS_RSRVD1 11
-#define BM_CLKCTRL_XBUS_RSRVD1 0x7FFFF800
-#define BF_CLKCTRL_XBUS_RSRVD1(v) \
- (((v) << 11) & BM_CLKCTRL_XBUS_RSRVD1)
#define BM_CLKCTRL_XBUS_DIV_FRAC_EN 0x00000400
#define BP_CLKCTRL_XBUS_DIV 0
#define BM_CLKCTRL_XBUS_DIV 0x000003FF
@@ -192,10 +143,6 @@
#define BM_CLKCTRL_XTAL_DIGCTRL_CLK1M_GATE 0x08000000
#define BP_CLKCTRL_XTAL_TIMROT_CLK32K_GATE 26
#define BM_CLKCTRL_XTAL_TIMROT_CLK32K_GATE 0x04000000
-#define BP_CLKCTRL_XTAL_RSRVD1 2
-#define BM_CLKCTRL_XTAL_RSRVD1 0x03FFFFFC
-#define BF_CLKCTRL_XTAL_RSRVD1(v) \
- (((v) << 2) & BM_CLKCTRL_XTAL_RSRVD1)
#define BP_CLKCTRL_XTAL_DIV_UART 0
#define BM_CLKCTRL_XTAL_DIV_UART 0x00000003
#define BF_CLKCTRL_XTAL_DIV_UART(v) \
@@ -205,12 +152,7 @@
#define BP_CLKCTRL_PIX_CLKGATE 31
#define BM_CLKCTRL_PIX_CLKGATE 0x80000000
-#define BM_CLKCTRL_PIX_RSRVD2 0x40000000
#define BM_CLKCTRL_PIX_BUSY 0x20000000
-#define BP_CLKCTRL_PIX_RSRVD1 13
-#define BM_CLKCTRL_PIX_RSRVD1 0x1FFFE000
-#define BF_CLKCTRL_PIX_RSRVD1(v) \
- (((v) << 13) & BM_CLKCTRL_PIX_RSRVD1)
#define BM_CLKCTRL_PIX_DIV_FRAC_EN 0x00001000
#define BP_CLKCTRL_PIX_DIV 0
#define BM_CLKCTRL_PIX_DIV 0x00000FFF
@@ -221,12 +163,7 @@
#define BP_CLKCTRL_SSP_CLKGATE 31
#define BM_CLKCTRL_SSP_CLKGATE 0x80000000
-#define BM_CLKCTRL_SSP_RSRVD2 0x40000000
#define BM_CLKCTRL_SSP_BUSY 0x20000000
-#define BP_CLKCTRL_SSP_RSRVD1 10
-#define BM_CLKCTRL_SSP_RSRVD1 0x1FFFFC00
-#define BF_CLKCTRL_SSP_RSRVD1(v) \
- (((v) << 10) & BM_CLKCTRL_SSP_RSRVD1)
#define BM_CLKCTRL_SSP_DIV_FRAC_EN 0x00000200
#define BP_CLKCTRL_SSP_DIV 0
#define BM_CLKCTRL_SSP_DIV 0x000001FF
@@ -237,12 +174,7 @@
#define BP_CLKCTRL_GPMI_CLKGATE 31
#define BM_CLKCTRL_GPMI_CLKGATE 0x80000000
-#define BM_CLKCTRL_GPMI_RSRVD2 0x40000000
#define BM_CLKCTRL_GPMI_BUSY 0x20000000
-#define BP_CLKCTRL_GPMI_RSRVD1 11
-#define BM_CLKCTRL_GPMI_RSRVD1 0x1FFFF800
-#define BF_CLKCTRL_GPMI_RSRVD1(v) \
- (((v) << 11) & BM_CLKCTRL_GPMI_RSRVD1)
#define BM_CLKCTRL_GPMI_DIV_FRAC_EN 0x00000400
#define BP_CLKCTRL_GPMI_DIV 0
#define BM_CLKCTRL_GPMI_DIV 0x000003FF
@@ -252,10 +184,6 @@
#define HW_CLKCTRL_SPDIF (0x00000090)
#define BM_CLKCTRL_SPDIF_CLKGATE 0x80000000
-#define BP_CLKCTRL_SPDIF_RSRVD 0
-#define BM_CLKCTRL_SPDIF_RSRVD 0x7FFFFFFF
-#define BF_CLKCTRL_SPDIF_RSRVD(v) \
- (((v) << 0) & BM_CLKCTRL_SPDIF_RSRVD)
#define HW_CLKCTRL_EMI (0x000000a0)
@@ -266,24 +194,12 @@
#define BM_CLKCTRL_EMI_BUSY_REF_EMI 0x10000000
#define BM_CLKCTRL_EMI_BUSY_REF_CPU 0x08000000
#define BM_CLKCTRL_EMI_BUSY_SYNC_MODE 0x04000000
-#define BP_CLKCTRL_EMI_RSRVD3 18
-#define BM_CLKCTRL_EMI_RSRVD3 0x03FC0000
-#define BF_CLKCTRL_EMI_RSRVD3(v) \
- (((v) << 18) & BM_CLKCTRL_EMI_RSRVD3)
#define BM_CLKCTRL_EMI_BUSY_DCC_RESYNC 0x00020000
#define BM_CLKCTRL_EMI_DCC_RESYNC_ENABLE 0x00010000
-#define BP_CLKCTRL_EMI_RSRVD2 12
-#define BM_CLKCTRL_EMI_RSRVD2 0x0000F000
-#define BF_CLKCTRL_EMI_RSRVD2(v) \
- (((v) << 12) & BM_CLKCTRL_EMI_RSRVD2)
#define BP_CLKCTRL_EMI_DIV_XTAL 8
#define BM_CLKCTRL_EMI_DIV_XTAL 0x00000F00
#define BF_CLKCTRL_EMI_DIV_XTAL(v) \
(((v) << 8) & BM_CLKCTRL_EMI_DIV_XTAL)
-#define BP_CLKCTRL_EMI_RSRVD1 6
-#define BM_CLKCTRL_EMI_RSRVD1 0x000000C0
-#define BF_CLKCTRL_EMI_RSRVD1(v) \
- (((v) << 6) & BM_CLKCTRL_EMI_RSRVD1)
#define BP_CLKCTRL_EMI_DIV_EMI 0
#define BM_CLKCTRL_EMI_DIV_EMI 0x0000003F
#define BF_CLKCTRL_EMI_DIV_EMI(v) \
@@ -292,22 +208,13 @@
#define HW_CLKCTRL_IR (0x000000b0)
#define BM_CLKCTRL_IR_CLKGATE 0x80000000
-#define BM_CLKCTRL_IR_RSRVD3 0x40000000
#define BM_CLKCTRL_IR_AUTO_DIV 0x20000000
#define BM_CLKCTRL_IR_IR_BUSY 0x10000000
#define BM_CLKCTRL_IR_IROV_BUSY 0x08000000
-#define BP_CLKCTRL_IR_RSRVD2 25
-#define BM_CLKCTRL_IR_RSRVD2 0x06000000
-#define BF_CLKCTRL_IR_RSRVD2(v) \
- (((v) << 25) & BM_CLKCTRL_IR_RSRVD2)
#define BP_CLKCTRL_IR_IROV_DIV 16
#define BM_CLKCTRL_IR_IROV_DIV 0x01FF0000
#define BF_CLKCTRL_IR_IROV_DIV(v) \
(((v) << 16) & BM_CLKCTRL_IR_IROV_DIV)
-#define BP_CLKCTRL_IR_RSRVD1 10
-#define BM_CLKCTRL_IR_RSRVD1 0x0000FC00
-#define BF_CLKCTRL_IR_RSRVD1(v) \
- (((v) << 10) & BM_CLKCTRL_IR_RSRVD1)
#define BP_CLKCTRL_IR_IR_DIV 0
#define BM_CLKCTRL_IR_IR_DIV 0x000003FF
#define BF_CLKCTRL_IR_IR_DIV(v) \
@@ -316,12 +223,7 @@
#define HW_CLKCTRL_SAIF (0x000000c0)
#define BM_CLKCTRL_SAIF_CLKGATE 0x80000000
-#define BM_CLKCTRL_SAIF_RSRVD2 0x40000000
#define BM_CLKCTRL_SAIF_BUSY 0x20000000
-#define BP_CLKCTRL_SAIF_RSRVD1 17
-#define BM_CLKCTRL_SAIF_RSRVD1 0x1FFE0000
-#define BF_CLKCTRL_SAIF_RSRVD1(v) \
- (((v) << 17) & BM_CLKCTRL_SAIF_RSRVD1)
#define BM_CLKCTRL_SAIF_DIV_FRAC_EN 0x00010000
#define BP_CLKCTRL_SAIF_DIV 0
#define BM_CLKCTRL_SAIF_DIV 0x0000FFFF
@@ -332,20 +234,11 @@
#define BM_CLKCTRL_TV_CLK_TV108M_GATE 0x80000000
#define BM_CLKCTRL_TV_CLK_TV_GATE 0x40000000
-#define BP_CLKCTRL_TV_RSRVD 0
-#define BM_CLKCTRL_TV_RSRVD 0x3FFFFFFF
-#define BF_CLKCTRL_TV_RSRVD(v) \
- (((v) << 0) & BM_CLKCTRL_TV_RSRVD)
#define HW_CLKCTRL_ETM (0x000000e0)
#define BM_CLKCTRL_ETM_CLKGATE 0x80000000
-#define BM_CLKCTRL_ETM_RSRVD2 0x40000000
#define BM_CLKCTRL_ETM_BUSY 0x20000000
-#define BP_CLKCTRL_ETM_RSRVD1 7
-#define BM_CLKCTRL_ETM_RSRVD1 0x1FFFFF80
-#define BF_CLKCTRL_ETM_RSRVD1(v) \
- (((v) << 7) & BM_CLKCTRL_ETM_RSRVD1)
#define BM_CLKCTRL_ETM_DIV_FRAC_EN 0x00000040
#define BP_CLKCTRL_ETM_DIV 0
#define BM_CLKCTRL_ETM_DIV 0x0000003F
@@ -393,36 +286,23 @@
#define BM_CLKCTRL_FRAC1_CLKGATEVID 0x80000000
#define BM_CLKCTRL_FRAC1_VID_STABLE 0x40000000
-#define BP_CLKCTRL_FRAC1_RSRVD1 0
-#define BM_CLKCTRL_FRAC1_RSRVD1 0x3FFFFFFF
-#define BF_CLKCTRL_FRAC1_RSRVD1(v) \
- (((v) << 0) & BM_CLKCTRL_FRAC1_RSRVD1)
#define HW_CLKCTRL_CLKSEQ (0x00000110)
#define HW_CLKCTRL_CLKSEQ_SET (0x00000114)
#define HW_CLKCTRL_CLKSEQ_CLR (0x00000118)
#define HW_CLKCTRL_CLKSEQ_TOG (0x0000011c)
-#define BP_CLKCTRL_CLKSEQ_RSRVD1 9
-#define BM_CLKCTRL_CLKSEQ_RSRVD1 0xFFFFFE00
-#define BF_CLKCTRL_CLKSEQ_RSRVD1(v) \
- (((v) << 9) & BM_CLKCTRL_CLKSEQ_RSRVD1)
#define BM_CLKCTRL_CLKSEQ_BYPASS_ETM 0x00000100
#define BM_CLKCTRL_CLKSEQ_BYPASS_CPU 0x00000080
#define BM_CLKCTRL_CLKSEQ_BYPASS_EMI 0x00000040
#define BM_CLKCTRL_CLKSEQ_BYPASS_SSP 0x00000020
#define BM_CLKCTRL_CLKSEQ_BYPASS_GPMI 0x00000010
#define BM_CLKCTRL_CLKSEQ_BYPASS_IR 0x00000008
-#define BM_CLKCTRL_CLKSEQ_RSRVD0 0x00000004
#define BM_CLKCTRL_CLKSEQ_BYPASS_PIX 0x00000002
#define BM_CLKCTRL_CLKSEQ_BYPASS_SAIF 0x00000001
#define HW_CLKCTRL_RESET (0x00000120)
-#define BP_CLKCTRL_RESET_RSRVD 2
-#define BM_CLKCTRL_RESET_RSRVD 0xFFFFFFFC
-#define BF_CLKCTRL_RESET_RSRVD(v) \
- (((v) << 2) & BM_CLKCTRL_RESET_RSRVD)
#define BM_CLKCTRL_RESET_CHIP 0x00000002
#define BM_CLKCTRL_RESET_DIG 0x00000001
@@ -432,10 +312,6 @@
#define BM_CLKCTRL_STATUS_CPU_LIMIT 0xC0000000
#define BF_CLKCTRL_STATUS_CPU_LIMIT(v) \
(((v) << 30) & BM_CLKCTRL_STATUS_CPU_LIMIT)
-#define BP_CLKCTRL_STATUS_RSRVD 0
-#define BM_CLKCTRL_STATUS_RSRVD 0x3FFFFFFF
-#define BF_CLKCTRL_STATUS_RSRVD(v) \
- (((v) << 0) & BM_CLKCTRL_STATUS_RSRVD)
#define HW_CLKCTRL_VERSION (0x00000140)
diff --git a/arch/arm/mach-mxs/regs-clkctrl-mx28.h b/arch/arm/mach-mxs/regs-clkctrl-mx28.h
index 661df18755f7..7d1b061d7943 100644
--- a/arch/arm/mach-mxs/regs-clkctrl-mx28.h
+++ b/arch/arm/mach-mxs/regs-clkctrl-mx28.h
@@ -31,10 +31,6 @@
#define HW_CLKCTRL_PLL0CTRL0_CLR (0x00000008)
#define HW_CLKCTRL_PLL0CTRL0_TOG (0x0000000c)
-#define BP_CLKCTRL_PLL0CTRL0_RSRVD6 30
-#define BM_CLKCTRL_PLL0CTRL0_RSRVD6 0xC0000000
-#define BF_CLKCTRL_PLL0CTRL0_RSRVD6(v) \
- (((v) << 30) & BM_CLKCTRL_PLL0CTRL0_RSRVD6)
#define BP_CLKCTRL_PLL0CTRL0_LFR_SEL 28
#define BM_CLKCTRL_PLL0CTRL0_LFR_SEL 0x30000000
#define BF_CLKCTRL_PLL0CTRL0_LFR_SEL(v) \
@@ -43,10 +39,6 @@
#define BV_CLKCTRL_PLL0CTRL0_LFR_SEL__TIMES_2 0x1
#define BV_CLKCTRL_PLL0CTRL0_LFR_SEL__TIMES_05 0x2
#define BV_CLKCTRL_PLL0CTRL0_LFR_SEL__UNDEFINED 0x3
-#define BP_CLKCTRL_PLL0CTRL0_RSRVD5 26
-#define BM_CLKCTRL_PLL0CTRL0_RSRVD5 0x0C000000
-#define BF_CLKCTRL_PLL0CTRL0_RSRVD5(v) \
- (((v) << 26) & BM_CLKCTRL_PLL0CTRL0_RSRVD5)
#define BP_CLKCTRL_PLL0CTRL0_CP_SEL 24
#define BM_CLKCTRL_PLL0CTRL0_CP_SEL 0x03000000
#define BF_CLKCTRL_PLL0CTRL0_CP_SEL(v) \
@@ -55,10 +47,6 @@
#define BV_CLKCTRL_PLL0CTRL0_CP_SEL__TIMES_2 0x1
#define BV_CLKCTRL_PLL0CTRL0_CP_SEL__TIMES_05 0x2
#define BV_CLKCTRL_PLL0CTRL0_CP_SEL__UNDEFINED 0x3
-#define BP_CLKCTRL_PLL0CTRL0_RSRVD4 22
-#define BM_CLKCTRL_PLL0CTRL0_RSRVD4 0x00C00000
-#define BF_CLKCTRL_PLL0CTRL0_RSRVD4(v) \
- (((v) << 22) & BM_CLKCTRL_PLL0CTRL0_RSRVD4)
#define BP_CLKCTRL_PLL0CTRL0_DIV_SEL 20
#define BM_CLKCTRL_PLL0CTRL0_DIV_SEL 0x00300000
#define BF_CLKCTRL_PLL0CTRL0_DIV_SEL(v) \
@@ -67,22 +55,13 @@
#define BV_CLKCTRL_PLL0CTRL0_DIV_SEL__LOWER 0x1
#define BV_CLKCTRL_PLL0CTRL0_DIV_SEL__LOWEST 0x2
#define BV_CLKCTRL_PLL0CTRL0_DIV_SEL__UNDEFINED 0x3
-#define BM_CLKCTRL_PLL0CTRL0_RSRVD3 0x00080000
#define BM_CLKCTRL_PLL0CTRL0_EN_USB_CLKS 0x00040000
#define BM_CLKCTRL_PLL0CTRL0_POWER 0x00020000
-#define BP_CLKCTRL_PLL0CTRL0_RSRVD1 0
-#define BM_CLKCTRL_PLL0CTRL0_RSRVD1 0x0001FFFF
-#define BF_CLKCTRL_PLL0CTRL0_RSRVD1(v) \
- (((v) << 0) & BM_CLKCTRL_PLL0CTRL0_RSRVD1)
#define HW_CLKCTRL_PLL0CTRL1 (0x00000010)
#define BM_CLKCTRL_PLL0CTRL1_LOCK 0x80000000
#define BM_CLKCTRL_PLL0CTRL1_FORCE_LOCK 0x40000000
-#define BP_CLKCTRL_PLL0CTRL1_RSRVD1 16
-#define BM_CLKCTRL_PLL0CTRL1_RSRVD1 0x3FFF0000
-#define BF_CLKCTRL_PLL0CTRL1_RSRVD1(v) \
- (((v) << 16) & BM_CLKCTRL_PLL0CTRL1_RSRVD1)
#define BP_CLKCTRL_PLL0CTRL1_LOCK_COUNT 0
#define BM_CLKCTRL_PLL0CTRL1_LOCK_COUNT 0x0000FFFF
#define BF_CLKCTRL_PLL0CTRL1_LOCK_COUNT(v) \
@@ -94,7 +73,6 @@
#define HW_CLKCTRL_PLL1CTRL0_TOG (0x0000002c)
#define BM_CLKCTRL_PLL1CTRL0_CLKGATEEMI 0x80000000
-#define BM_CLKCTRL_PLL1CTRL0_RSRVD6 0x40000000
#define BP_CLKCTRL_PLL1CTRL0_LFR_SEL 28
#define BM_CLKCTRL_PLL1CTRL0_LFR_SEL 0x30000000
#define BF_CLKCTRL_PLL1CTRL0_LFR_SEL(v) \
@@ -103,10 +81,6 @@
#define BV_CLKCTRL_PLL1CTRL0_LFR_SEL__TIMES_2 0x1
#define BV_CLKCTRL_PLL1CTRL0_LFR_SEL__TIMES_05 0x2
#define BV_CLKCTRL_PLL1CTRL0_LFR_SEL__UNDEFINED 0x3
-#define BP_CLKCTRL_PLL1CTRL0_RSRVD5 26
-#define BM_CLKCTRL_PLL1CTRL0_RSRVD5 0x0C000000
-#define BF_CLKCTRL_PLL1CTRL0_RSRVD5(v) \
- (((v) << 26) & BM_CLKCTRL_PLL1CTRL0_RSRVD5)
#define BP_CLKCTRL_PLL1CTRL0_CP_SEL 24
#define BM_CLKCTRL_PLL1CTRL0_CP_SEL 0x03000000
#define BF_CLKCTRL_PLL1CTRL0_CP_SEL(v) \
@@ -115,10 +89,6 @@
#define BV_CLKCTRL_PLL1CTRL0_CP_SEL__TIMES_2 0x1
#define BV_CLKCTRL_PLL1CTRL0_CP_SEL__TIMES_05 0x2
#define BV_CLKCTRL_PLL1CTRL0_CP_SEL__UNDEFINED 0x3
-#define BP_CLKCTRL_PLL1CTRL0_RSRVD4 22
-#define BM_CLKCTRL_PLL1CTRL0_RSRVD4 0x00C00000
-#define BF_CLKCTRL_PLL1CTRL0_RSRVD4(v) \
- (((v) << 22) & BM_CLKCTRL_PLL1CTRL0_RSRVD4)
#define BP_CLKCTRL_PLL1CTRL0_DIV_SEL 20
#define BM_CLKCTRL_PLL1CTRL0_DIV_SEL 0x00300000
#define BF_CLKCTRL_PLL1CTRL0_DIV_SEL(v) \
@@ -127,22 +97,13 @@
#define BV_CLKCTRL_PLL1CTRL0_DIV_SEL__LOWER 0x1
#define BV_CLKCTRL_PLL1CTRL0_DIV_SEL__LOWEST 0x2
#define BV_CLKCTRL_PLL1CTRL0_DIV_SEL__UNDEFINED 0x3
-#define BM_CLKCTRL_PLL1CTRL0_RSRVD3 0x00080000
#define BM_CLKCTRL_PLL1CTRL0_EN_USB_CLKS 0x00040000
#define BM_CLKCTRL_PLL1CTRL0_POWER 0x00020000
-#define BP_CLKCTRL_PLL1CTRL0_RSRVD1 0
-#define BM_CLKCTRL_PLL1CTRL0_RSRVD1 0x0001FFFF
-#define BF_CLKCTRL_PLL1CTRL0_RSRVD1(v) \
- (((v) << 0) & BM_CLKCTRL_PLL1CTRL0_RSRVD1)
#define HW_CLKCTRL_PLL1CTRL1 (0x00000030)
#define BM_CLKCTRL_PLL1CTRL1_LOCK 0x80000000
#define BM_CLKCTRL_PLL1CTRL1_FORCE_LOCK 0x40000000
-#define BP_CLKCTRL_PLL1CTRL1_RSRVD1 16
-#define BM_CLKCTRL_PLL1CTRL1_RSRVD1 0x3FFF0000
-#define BF_CLKCTRL_PLL1CTRL1_RSRVD1(v) \
- (((v) << 16) & BM_CLKCTRL_PLL1CTRL1_RSRVD1)
#define BP_CLKCTRL_PLL1CTRL1_LOCK_COUNT 0
#define BM_CLKCTRL_PLL1CTRL1_LOCK_COUNT 0x0000FFFF
#define BF_CLKCTRL_PLL1CTRL1_LOCK_COUNT(v) \
@@ -154,51 +115,31 @@
#define HW_CLKCTRL_PLL2CTRL0_TOG (0x0000004c)
#define BM_CLKCTRL_PLL2CTRL0_CLKGATE 0x80000000
-#define BM_CLKCTRL_PLL2CTRL0_RSRVD3 0x40000000
#define BP_CLKCTRL_PLL2CTRL0_LFR_SEL 28
#define BM_CLKCTRL_PLL2CTRL0_LFR_SEL 0x30000000
#define BF_CLKCTRL_PLL2CTRL0_LFR_SEL(v) \
(((v) << 28) & BM_CLKCTRL_PLL2CTRL0_LFR_SEL)
-#define BM_CLKCTRL_PLL2CTRL0_RSRVD2 0x08000000
#define BM_CLKCTRL_PLL2CTRL0_HOLD_RING_OFF_B 0x04000000
#define BP_CLKCTRL_PLL2CTRL0_CP_SEL 24
#define BM_CLKCTRL_PLL2CTRL0_CP_SEL 0x03000000
#define BF_CLKCTRL_PLL2CTRL0_CP_SEL(v) \
(((v) << 24) & BM_CLKCTRL_PLL2CTRL0_CP_SEL)
#define BM_CLKCTRL_PLL2CTRL0_POWER 0x00800000
-#define BP_CLKCTRL_PLL2CTRL0_RSRVD1 0
-#define BM_CLKCTRL_PLL2CTRL0_RSRVD1 0x007FFFFF
-#define BF_CLKCTRL_PLL2CTRL0_RSRVD1(v) \
- (((v) << 0) & BM_CLKCTRL_PLL2CTRL0_RSRVD1)
#define HW_CLKCTRL_CPU (0x00000050)
#define HW_CLKCTRL_CPU_SET (0x00000054)
#define HW_CLKCTRL_CPU_CLR (0x00000058)
#define HW_CLKCTRL_CPU_TOG (0x0000005c)
-#define BP_CLKCTRL_CPU_RSRVD5 30
-#define BM_CLKCTRL_CPU_RSRVD5 0xC0000000
-#define BF_CLKCTRL_CPU_RSRVD5(v) \
- (((v) << 30) & BM_CLKCTRL_CPU_RSRVD5)
#define BM_CLKCTRL_CPU_BUSY_REF_XTAL 0x20000000
#define BM_CLKCTRL_CPU_BUSY_REF_CPU 0x10000000
-#define BM_CLKCTRL_CPU_RSRVD4 0x08000000
#define BM_CLKCTRL_CPU_DIV_XTAL_FRAC_EN 0x04000000
#define BP_CLKCTRL_CPU_DIV_XTAL 16
#define BM_CLKCTRL_CPU_DIV_XTAL 0x03FF0000
#define BF_CLKCTRL_CPU_DIV_XTAL(v) \
(((v) << 16) & BM_CLKCTRL_CPU_DIV_XTAL)
-#define BP_CLKCTRL_CPU_RSRVD3 13
-#define BM_CLKCTRL_CPU_RSRVD3 0x0000E000
-#define BF_CLKCTRL_CPU_RSRVD3(v) \
- (((v) << 13) & BM_CLKCTRL_CPU_RSRVD3)
#define BM_CLKCTRL_CPU_INTERRUPT_WAIT 0x00001000
-#define BM_CLKCTRL_CPU_RSRVD2 0x00000800
#define BM_CLKCTRL_CPU_DIV_CPU_FRAC_EN 0x00000400
-#define BP_CLKCTRL_CPU_RSRVD1 6
-#define BM_CLKCTRL_CPU_RSRVD1 0x000003C0
-#define BF_CLKCTRL_CPU_RSRVD1(v) \
- (((v) << 6) & BM_CLKCTRL_CPU_RSRVD1)
#define BP_CLKCTRL_CPU_DIV_CPU 0
#define BM_CLKCTRL_CPU_DIV_CPU 0x0000003F
#define BF_CLKCTRL_CPU_DIV_CPU(v) \
@@ -212,7 +153,6 @@
#define BM_CLKCTRL_HBUS_ASM_BUSY 0x80000000
#define BM_CLKCTRL_HBUS_DCP_AS_ENABLE 0x40000000
#define BM_CLKCTRL_HBUS_PXP_AS_ENABLE 0x20000000
-#define BM_CLKCTRL_HBUS_RSRVD2 0x10000000
#define BM_CLKCTRL_HBUS_ASM_EMIPORT_AS_ENABLE 0x08000000
#define BM_CLKCTRL_HBUS_APBHDMA_AS_ENABLE 0x04000000
#define BM_CLKCTRL_HBUS_APBXDMA_AS_ENABLE 0x02000000
@@ -232,10 +172,6 @@
#define BV_CLKCTRL_HBUS_SLOW_DIV__BY8 0x3
#define BV_CLKCTRL_HBUS_SLOW_DIV__BY16 0x4
#define BV_CLKCTRL_HBUS_SLOW_DIV__BY32 0x5
-#define BP_CLKCTRL_HBUS_RSRVD1 6
-#define BM_CLKCTRL_HBUS_RSRVD1 0x0000FFC0
-#define BF_CLKCTRL_HBUS_RSRVD1(v) \
- (((v) << 6) & BM_CLKCTRL_HBUS_RSRVD1)
#define BM_CLKCTRL_HBUS_DIV_FRAC_EN 0x00000020
#define BP_CLKCTRL_HBUS_DIV 0
#define BM_CLKCTRL_HBUS_DIV 0x0000001F
@@ -245,10 +181,6 @@
#define HW_CLKCTRL_XBUS (0x00000070)
#define BM_CLKCTRL_XBUS_BUSY 0x80000000
-#define BP_CLKCTRL_XBUS_RSRVD1 12
-#define BM_CLKCTRL_XBUS_RSRVD1 0x7FFFF000
-#define BF_CLKCTRL_XBUS_RSRVD1(v) \
- (((v) << 12) & BM_CLKCTRL_XBUS_RSRVD1)
#define BM_CLKCTRL_XBUS_AUTO_CLEAR_DIV_ENABLE 0x00000800
#define BM_CLKCTRL_XBUS_DIV_FRAC_EN 0x00000400
#define BP_CLKCTRL_XBUS_DIV 0
@@ -263,19 +195,10 @@
#define BP_CLKCTRL_XTAL_UART_CLK_GATE 31
#define BM_CLKCTRL_XTAL_UART_CLK_GATE 0x80000000
-#define BM_CLKCTRL_XTAL_RSRVD3 0x40000000
#define BP_CLKCTRL_XTAL_PWM_CLK24M_GATE 29
#define BM_CLKCTRL_XTAL_PWM_CLK24M_GATE 0x20000000
-#define BP_CLKCTRL_XTAL_RSRVD2 27
-#define BM_CLKCTRL_XTAL_RSRVD2 0x18000000
-#define BF_CLKCTRL_XTAL_RSRVD2(v) \
- (((v) << 27) & BM_CLKCTRL_XTAL_RSRVD2)
#define BP_CLKCTRL_XTAL_TIMROT_CLK32K_GATE 26
#define BM_CLKCTRL_XTAL_TIMROT_CLK32K_GATE 0x04000000
-#define BP_CLKCTRL_XTAL_RSRVD1 2
-#define BM_CLKCTRL_XTAL_RSRVD1 0x03FFFFFC
-#define BF_CLKCTRL_XTAL_RSRVD1(v) \
- (((v) << 2) & BM_CLKCTRL_XTAL_RSRVD1)
#define BP_CLKCTRL_XTAL_DIV_UART 0
#define BM_CLKCTRL_XTAL_DIV_UART 0x00000003
#define BF_CLKCTRL_XTAL_DIV_UART(v) \
@@ -285,12 +208,7 @@
#define BP_CLKCTRL_SSP0_CLKGATE 31
#define BM_CLKCTRL_SSP0_CLKGATE 0x80000000
-#define BM_CLKCTRL_SSP0_RSRVD2 0x40000000
#define BM_CLKCTRL_SSP0_BUSY 0x20000000
-#define BP_CLKCTRL_SSP0_RSRVD1 10
-#define BM_CLKCTRL_SSP0_RSRVD1 0x1FFFFC00
-#define BF_CLKCTRL_SSP0_RSRVD1(v) \
- (((v) << 10) & BM_CLKCTRL_SSP0_RSRVD1)
#define BM_CLKCTRL_SSP0_DIV_FRAC_EN 0x00000200
#define BP_CLKCTRL_SSP0_DIV 0
#define BM_CLKCTRL_SSP0_DIV 0x000001FF
@@ -301,12 +219,7 @@
#define BP_CLKCTRL_SSP1_CLKGATE 31
#define BM_CLKCTRL_SSP1_CLKGATE 0x80000000
-#define BM_CLKCTRL_SSP1_RSRVD2 0x40000000
#define BM_CLKCTRL_SSP1_BUSY 0x20000000
-#define BP_CLKCTRL_SSP1_RSRVD1 10
-#define BM_CLKCTRL_SSP1_RSRVD1 0x1FFFFC00
-#define BF_CLKCTRL_SSP1_RSRVD1(v) \
- (((v) << 10) & BM_CLKCTRL_SSP1_RSRVD1)
#define BM_CLKCTRL_SSP1_DIV_FRAC_EN 0x00000200
#define BP_CLKCTRL_SSP1_DIV 0
#define BM_CLKCTRL_SSP1_DIV 0x000001FF
@@ -317,12 +230,7 @@
#define BP_CLKCTRL_SSP2_CLKGATE 31
#define BM_CLKCTRL_SSP2_CLKGATE 0x80000000
-#define BM_CLKCTRL_SSP2_RSRVD2 0x40000000
#define BM_CLKCTRL_SSP2_BUSY 0x20000000
-#define BP_CLKCTRL_SSP2_RSRVD1 10
-#define BM_CLKCTRL_SSP2_RSRVD1 0x1FFFFC00
-#define BF_CLKCTRL_SSP2_RSRVD1(v) \
- (((v) << 10) & BM_CLKCTRL_SSP2_RSRVD1)
#define BM_CLKCTRL_SSP2_DIV_FRAC_EN 0x00000200
#define BP_CLKCTRL_SSP2_DIV 0
#define BM_CLKCTRL_SSP2_DIV 0x000001FF
@@ -333,12 +241,7 @@
#define BP_CLKCTRL_SSP3_CLKGATE 31
#define BM_CLKCTRL_SSP3_CLKGATE 0x80000000
-#define BM_CLKCTRL_SSP3_RSRVD2 0x40000000
#define BM_CLKCTRL_SSP3_BUSY 0x20000000
-#define BP_CLKCTRL_SSP3_RSRVD1 10
-#define BM_CLKCTRL_SSP3_RSRVD1 0x1FFFFC00
-#define BF_CLKCTRL_SSP3_RSRVD1(v) \
- (((v) << 10) & BM_CLKCTRL_SSP3_RSRVD1)
#define BM_CLKCTRL_SSP3_DIV_FRAC_EN 0x00000200
#define BP_CLKCTRL_SSP3_DIV 0
#define BM_CLKCTRL_SSP3_DIV 0x000001FF
@@ -349,12 +252,7 @@
#define BP_CLKCTRL_GPMI_CLKGATE 31
#define BM_CLKCTRL_GPMI_CLKGATE 0x80000000
-#define BM_CLKCTRL_GPMI_RSRVD2 0x40000000
#define BM_CLKCTRL_GPMI_BUSY 0x20000000
-#define BP_CLKCTRL_GPMI_RSRVD1 11
-#define BM_CLKCTRL_GPMI_RSRVD1 0x1FFFF800
-#define BF_CLKCTRL_GPMI_RSRVD1(v) \
- (((v) << 11) & BM_CLKCTRL_GPMI_RSRVD1)
#define BM_CLKCTRL_GPMI_DIV_FRAC_EN 0x00000400
#define BP_CLKCTRL_GPMI_DIV 0
#define BM_CLKCTRL_GPMI_DIV 0x000003FF
@@ -365,10 +263,6 @@
#define BP_CLKCTRL_SPDIF_CLKGATE 31
#define BM_CLKCTRL_SPDIF_CLKGATE 0x80000000
-#define BP_CLKCTRL_SPDIF_RSRVD 0
-#define BM_CLKCTRL_SPDIF_RSRVD 0x7FFFFFFF
-#define BF_CLKCTRL_SPDIF_RSRVD(v) \
- (((v) << 0) & BM_CLKCTRL_SPDIF_RSRVD)
#define HW_CLKCTRL_EMI (0x000000f0)
@@ -379,24 +273,12 @@
#define BM_CLKCTRL_EMI_BUSY_REF_EMI 0x10000000
#define BM_CLKCTRL_EMI_BUSY_REF_CPU 0x08000000
#define BM_CLKCTRL_EMI_BUSY_SYNC_MODE 0x04000000
-#define BP_CLKCTRL_EMI_RSRVD3 18
-#define BM_CLKCTRL_EMI_RSRVD3 0x03FC0000
-#define BF_CLKCTRL_EMI_RSRVD3(v) \
- (((v) << 18) & BM_CLKCTRL_EMI_RSRVD3)
#define BM_CLKCTRL_EMI_BUSY_DCC_RESYNC 0x00020000
#define BM_CLKCTRL_EMI_DCC_RESYNC_ENABLE 0x00010000
-#define BP_CLKCTRL_EMI_RSRVD2 12
-#define BM_CLKCTRL_EMI_RSRVD2 0x0000F000
-#define BF_CLKCTRL_EMI_RSRVD2(v) \
- (((v) << 12) & BM_CLKCTRL_EMI_RSRVD2)
#define BP_CLKCTRL_EMI_DIV_XTAL 8
#define BM_CLKCTRL_EMI_DIV_XTAL 0x00000F00
#define BF_CLKCTRL_EMI_DIV_XTAL(v) \
(((v) << 8) & BM_CLKCTRL_EMI_DIV_XTAL)
-#define BP_CLKCTRL_EMI_RSRVD1 6
-#define BM_CLKCTRL_EMI_RSRVD1 0x000000C0
-#define BF_CLKCTRL_EMI_RSRVD1(v) \
- (((v) << 6) & BM_CLKCTRL_EMI_RSRVD1)
#define BP_CLKCTRL_EMI_DIV_EMI 0
#define BM_CLKCTRL_EMI_DIV_EMI 0x0000003F
#define BF_CLKCTRL_EMI_DIV_EMI(v) \
@@ -406,12 +288,7 @@
#define BP_CLKCTRL_SAIF0_CLKGATE 31
#define BM_CLKCTRL_SAIF0_CLKGATE 0x80000000
-#define BM_CLKCTRL_SAIF0_RSRVD2 0x40000000
#define BM_CLKCTRL_SAIF0_BUSY 0x20000000
-#define BP_CLKCTRL_SAIF0_RSRVD1 17
-#define BM_CLKCTRL_SAIF0_RSRVD1 0x1FFE0000
-#define BF_CLKCTRL_SAIF0_RSRVD1(v) \
- (((v) << 17) & BM_CLKCTRL_SAIF0_RSRVD1)
#define BM_CLKCTRL_SAIF0_DIV_FRAC_EN 0x00010000
#define BP_CLKCTRL_SAIF0_DIV 0
#define BM_CLKCTRL_SAIF0_DIV 0x0000FFFF
@@ -422,12 +299,7 @@
#define BP_CLKCTRL_SAIF1_CLKGATE 31
#define BM_CLKCTRL_SAIF1_CLKGATE 0x80000000
-#define BM_CLKCTRL_SAIF1_RSRVD2 0x40000000
#define BM_CLKCTRL_SAIF1_BUSY 0x20000000
-#define BP_CLKCTRL_SAIF1_RSRVD1 17
-#define BM_CLKCTRL_SAIF1_RSRVD1 0x1FFE0000
-#define BF_CLKCTRL_SAIF1_RSRVD1(v) \
- (((v) << 17) & BM_CLKCTRL_SAIF1_RSRVD1)
#define BM_CLKCTRL_SAIF1_DIV_FRAC_EN 0x00010000
#define BP_CLKCTRL_SAIF1_DIV 0
#define BM_CLKCTRL_SAIF1_DIV 0x0000FFFF
@@ -438,12 +310,7 @@
#define BP_CLKCTRL_DIS_LCDIF_CLKGATE 31
#define BM_CLKCTRL_DIS_LCDIF_CLKGATE 0x80000000
-#define BM_CLKCTRL_DIS_LCDIF_RSRVD2 0x40000000
#define BM_CLKCTRL_DIS_LCDIF_BUSY 0x20000000
-#define BP_CLKCTRL_DIS_LCDIF_RSRVD1 14
-#define BM_CLKCTRL_DIS_LCDIF_RSRVD1 0x1FFFC000
-#define BF_CLKCTRL_DIS_LCDIF_RSRVD1(v) \
- (((v) << 14) & BM_CLKCTRL_DIS_LCDIF_RSRVD1)
#define BM_CLKCTRL_DIS_LCDIF_DIV_FRAC_EN 0x00002000
#define BP_CLKCTRL_DIS_LCDIF_DIV 0
#define BM_CLKCTRL_DIS_LCDIF_DIV 0x00001FFF
@@ -453,12 +320,7 @@
#define HW_CLKCTRL_ETM (0x00000130)
#define BM_CLKCTRL_ETM_CLKGATE 0x80000000
-#define BM_CLKCTRL_ETM_RSRVD2 0x40000000
#define BM_CLKCTRL_ETM_BUSY 0x20000000
-#define BP_CLKCTRL_ETM_RSRVD1 8
-#define BM_CLKCTRL_ETM_RSRVD1 0x1FFFFF00
-#define BF_CLKCTRL_ETM_RSRVD1(v) \
- (((v) << 8) & BM_CLKCTRL_ETM_RSRVD1)
#define BM_CLKCTRL_ETM_DIV_FRAC_EN 0x00000080
#define BP_CLKCTRL_ETM_DIV 0
#define BM_CLKCTRL_ETM_DIV 0x0000007F
@@ -471,7 +333,6 @@
#define BP_CLKCTRL_ENET_DISABLE 30
#define BM_CLKCTRL_ENET_DISABLE 0x40000000
#define BM_CLKCTRL_ENET_STATUS 0x20000000
-#define BM_CLKCTRL_ENET_RSRVD1 0x10000000
#define BM_CLKCTRL_ENET_BUSY_TIME 0x08000000
#define BP_CLKCTRL_ENET_DIV_TIME 21
#define BM_CLKCTRL_ENET_DIV_TIME 0x07E00000
@@ -493,37 +354,23 @@
#define BM_CLKCTRL_ENET_CLK_OUT_EN 0x00040000
#define BM_CLKCTRL_ENET_RESET_BY_SW_CHIP 0x00020000
#define BM_CLKCTRL_ENET_RESET_BY_SW 0x00010000
-#define BP_CLKCTRL_ENET_RSRVD0 0
-#define BM_CLKCTRL_ENET_RSRVD0 0x0000FFFF
-#define BF_CLKCTRL_ENET_RSRVD0(v) \
- (((v) << 0) & BM_CLKCTRL_ENET_RSRVD0)
#define HW_CLKCTRL_HSADC (0x00000150)
-#define BM_CLKCTRL_HSADC_RSRVD2 0x80000000
#define BM_CLKCTRL_HSADC_RESETB 0x40000000
#define BP_CLKCTRL_HSADC_FREQDIV 28
#define BM_CLKCTRL_HSADC_FREQDIV 0x30000000
#define BF_CLKCTRL_HSADC_FREQDIV(v) \
(((v) << 28) & BM_CLKCTRL_HSADC_FREQDIV)
-#define BP_CLKCTRL_HSADC_RSRVD1 0
-#define BM_CLKCTRL_HSADC_RSRVD1 0x0FFFFFFF
-#define BF_CLKCTRL_HSADC_RSRVD1(v) \
- (((v) << 0) & BM_CLKCTRL_HSADC_RSRVD1)
#define HW_CLKCTRL_FLEXCAN (0x00000160)
-#define BM_CLKCTRL_FLEXCAN_RSRVD2 0x80000000
#define BP_CLKCTRL_FLEXCAN_STOP_CAN0 30
#define BM_CLKCTRL_FLEXCAN_STOP_CAN0 0x40000000
#define BM_CLKCTRL_FLEXCAN_CAN0_STATUS 0x20000000
#define BP_CLKCTRL_FLEXCAN_STOP_CAN1 28
#define BM_CLKCTRL_FLEXCAN_STOP_CAN1 0x10000000
#define BM_CLKCTRL_FLEXCAN_CAN1_STATUS 0x08000000
-#define BP_CLKCTRL_FLEXCAN_RSRVD1 0
-#define BM_CLKCTRL_FLEXCAN_RSRVD1 0x07FFFFFF
-#define BF_CLKCTRL_FLEXCAN_RSRVD1(v) \
- (((v) << 0) & BM_CLKCTRL_FLEXCAN_RSRVD1)
#define HW_CLKCTRL_FRAC0 (0x000001b0)
#define HW_CLKCTRL_FRAC0_SET (0x000001b4)
@@ -564,10 +411,6 @@
#define HW_CLKCTRL_FRAC1_CLR (0x000001c8)
#define HW_CLKCTRL_FRAC1_TOG (0x000001cc)
-#define BP_CLKCTRL_FRAC1_RSRVD2 24
-#define BM_CLKCTRL_FRAC1_RSRVD2 0xFF000000
-#define BF_CLKCTRL_FRAC1_RSRVD2(v) \
- (((v) << 24) & BM_CLKCTRL_FRAC1_RSRVD2)
#define BP_CLKCTRL_FRAC1_CLKGATEGPMI 23
#define BM_CLKCTRL_FRAC1_CLKGATEGPMI 0x00800000
#define BM_CLKCTRL_FRAC1_GPMI_STABLE 0x00400000
@@ -595,22 +438,10 @@
#define HW_CLKCTRL_CLKSEQ_CLR (0x000001d8)
#define HW_CLKCTRL_CLKSEQ_TOG (0x000001dc)
-#define BP_CLKCTRL_CLKSEQ_RSRVD0 19
-#define BM_CLKCTRL_CLKSEQ_RSRVD0 0xFFF80000
-#define BF_CLKCTRL_CLKSEQ_RSRVD0(v) \
- (((v) << 19) & BM_CLKCTRL_CLKSEQ_RSRVD0)
#define BM_CLKCTRL_CLKSEQ_BYPASS_CPU 0x00040000
-#define BP_CLKCTRL_CLKSEQ_RSRVD1 15
-#define BM_CLKCTRL_CLKSEQ_RSRVD1 0x00038000
-#define BF_CLKCTRL_CLKSEQ_RSRVD1(v) \
- (((v) << 15) & BM_CLKCTRL_CLKSEQ_RSRVD1)
#define BM_CLKCTRL_CLKSEQ_BYPASS_DIS_LCDIF 0x00004000
#define BV_CLKCTRL_CLKSEQ_BYPASS_DIS_LCDIF__BYPASS 0x1
#define BV_CLKCTRL_CLKSEQ_BYPASS_DIS_LCDIF__PFD 0x0
-#define BP_CLKCTRL_CLKSEQ_RSRVD2 9
-#define BM_CLKCTRL_CLKSEQ_RSRVD2 0x00003E00
-#define BF_CLKCTRL_CLKSEQ_RSRVD2(v) \
- (((v) << 9) & BM_CLKCTRL_CLKSEQ_RSRVD2)
#define BM_CLKCTRL_CLKSEQ_BYPASS_ETM 0x00000100
#define BM_CLKCTRL_CLKSEQ_BYPASS_EMI 0x00000080
#define BM_CLKCTRL_CLKSEQ_BYPASS_SSP3 0x00000040
@@ -623,10 +454,6 @@
#define HW_CLKCTRL_RESET (0x000001e0)
-#define BP_CLKCTRL_RESET_RSRVD 6
-#define BM_CLKCTRL_RESET_RSRVD 0xFFFFFFC0
-#define BF_CLKCTRL_RESET_RSRVD(v) \
- (((v) << 6) & BM_CLKCTRL_RESET_RSRVD)
#define BM_CLKCTRL_RESET_WDOG_POR_DISABLE 0x00000020
#define BM_CLKCTRL_RESET_EXTERNAL_RESET_ENABLE 0x00000010
#define BM_CLKCTRL_RESET_THERMAL_RESET_ENABLE 0x00000008
@@ -640,10 +467,6 @@
#define BM_CLKCTRL_STATUS_CPU_LIMIT 0xC0000000
#define BF_CLKCTRL_STATUS_CPU_LIMIT(v) \
(((v) << 30) & BM_CLKCTRL_STATUS_CPU_LIMIT)
-#define BP_CLKCTRL_STATUS_RSRVD 0
-#define BM_CLKCTRL_STATUS_RSRVD 0x3FFFFFFF
-#define BF_CLKCTRL_STATUS_RSRVD(v) \
- (((v) << 0) & BM_CLKCTRL_STATUS_RSRVD)
#define HW_CLKCTRL_VERSION (0x00000200)
diff --git a/arch/arm/mach-mxs/system.c b/arch/arm/mach-mxs/system.c
index 9343d7edd4f6..20ec3bddf7cd 100644
--- a/arch/arm/mach-mxs/system.c
+++ b/arch/arm/mach-mxs/system.c
@@ -22,6 +22,7 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/module.h>
#include <asm/proc-fns.h>
#include <asm/system.h>
@@ -135,3 +136,4 @@ error:
pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
return -ETIMEDOUT;
}
+EXPORT_SYMBOL(mxs_reset_block);
diff --git a/arch/arm/mach-netx/include/mach/memory.h b/arch/arm/mach-netx/include/mach/memory.h
index 9a363f297f90..59561496c36e 100644
--- a/arch/arm/mach-netx/include/mach/memory.h
+++ b/arch/arm/mach-netx/include/mach/memory.h
@@ -20,7 +20,7 @@
#ifndef __ASM_ARCH_MEMORY_H
#define __ASM_ARCH_MEMORY_H
-#define PHYS_OFFSET UL(0x80000000)
+#define PLAT_PHYS_OFFSET UL(0x80000000)
#endif
diff --git a/arch/arm/mach-nomadik/include/mach/memory.h b/arch/arm/mach-nomadik/include/mach/memory.h
index 1e5689d98ecd..d3325211ba6a 100644
--- a/arch/arm/mach-nomadik/include/mach/memory.h
+++ b/arch/arm/mach-nomadik/include/mach/memory.h
@@ -23,6 +23,6 @@
/*
* Physical DRAM offset.
*/
-#define PHYS_OFFSET UL(0x00000000)
+#define PLAT_PHYS_OFFSET UL(0x00000000)
#endif
diff --git a/arch/arm/mach-ns9xxx/include/mach/memory.h b/arch/arm/mach-ns9xxx/include/mach/memory.h
index 6107193adbfe..5c65aee6e7a9 100644
--- a/arch/arm/mach-ns9xxx/include/mach/memory.h
+++ b/arch/arm/mach-ns9xxx/include/mach/memory.h
@@ -19,6 +19,6 @@
#define NS9XXX_CS2STAT_LENGTH UL(0x1000)
#define NS9XXX_CS3STAT_LENGTH UL(0x1000)
-#define PHYS_OFFSET UL(0x00000000)
+#define PLAT_PHYS_OFFSET UL(0x00000000)
#endif
diff --git a/arch/arm/mach-nuc93x/include/mach/memory.h b/arch/arm/mach-nuc93x/include/mach/memory.h
index 323ab0db3f7d..ef9864b002a6 100644
--- a/arch/arm/mach-nuc93x/include/mach/memory.h
+++ b/arch/arm/mach-nuc93x/include/mach/memory.h
@@ -16,6 +16,6 @@
#ifndef __ASM_ARCH_MEMORY_H
#define __ASM_ARCH_MEMORY_H
-#define PHYS_OFFSET UL(0x00000000)
+#define PLAT_PHYS_OFFSET UL(0x00000000)
#endif
diff --git a/arch/arm/mach-omap1/Makefile b/arch/arm/mach-omap1/Makefile
index ba6009f27677..af98117043d2 100644
--- a/arch/arm/mach-omap1/Makefile
+++ b/arch/arm/mach-omap1/Makefile
@@ -4,7 +4,7 @@
# Common support
obj-y := io.o id.o sram.o time.o irq.o mux.o flash.o serial.o devices.o dma.o
-obj-y += clock.o clock_data.o opp_data.o
+obj-y += clock.o clock_data.o opp_data.o reset.o
obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index 22cc8c8df6cb..de88c9297b68 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -165,7 +165,7 @@ static struct map_desc ams_delta_io_desc[] __initdata = {
}
};
-static struct omap_lcd_config ams_delta_lcd_config __initdata = {
+static struct omap_lcd_config ams_delta_lcd_config = {
.ctrl_name = "internal",
};
@@ -175,7 +175,7 @@ static struct omap_usb_config ams_delta_usb_config __initdata = {
.pins[0] = 2,
};
-static struct omap_board_config_kernel ams_delta_config[] = {
+static struct omap_board_config_kernel ams_delta_config[] __initdata = {
{ OMAP_TAG_LCD, &ams_delta_lcd_config },
};
@@ -208,14 +208,14 @@ static const struct matrix_keymap_data ams_delta_keymap_data = {
.keymap_size = ARRAY_SIZE(ams_delta_keymap),
};
-static struct omap_kp_platform_data ams_delta_kp_data = {
+static struct omap_kp_platform_data ams_delta_kp_data __initdata = {
.rows = 8,
.cols = 8,
.keymap_data = &ams_delta_keymap_data,
.delay = 9,
};
-static struct platform_device ams_delta_kp_device = {
+static struct platform_device ams_delta_kp_device __initdata = {
.name = "omap-keypad",
.id = -1,
.dev = {
@@ -225,12 +225,12 @@ static struct platform_device ams_delta_kp_device = {
.resource = ams_delta_kp_resources,
};
-static struct platform_device ams_delta_lcd_device = {
+static struct platform_device ams_delta_lcd_device __initdata = {
.name = "lcd_ams_delta",
.id = -1,
};
-static struct platform_device ams_delta_led_device = {
+static struct platform_device ams_delta_led_device __initdata = {
.name = "ams-delta-led",
.id = -1
};
@@ -259,7 +259,7 @@ static int ams_delta_camera_power(struct device *dev, int power)
#define ams_delta_camera_power NULL
#endif
-static struct soc_camera_link __initdata ams_delta_iclink = {
+static struct soc_camera_link ams_delta_iclink = {
.bus_id = 0, /* OMAP1 SoC camera bus */
.i2c_adapter_id = 1,
.board_info = &ams_delta_camera_board_info[0],
@@ -267,7 +267,7 @@ static struct soc_camera_link __initdata ams_delta_iclink = {
.power = ams_delta_camera_power,
};
-static struct platform_device ams_delta_camera_device = {
+static struct platform_device ams_delta_camera_device __initdata = {
.name = "soc-camera-pdrv",
.id = 0,
.dev = {
diff --git a/arch/arm/mach-omap1/board-fsample.c b/arch/arm/mach-omap1/board-fsample.c
index 0efb9dbae44c..87f173d93557 100644
--- a/arch/arm/mach-omap1/board-fsample.c
+++ b/arch/arm/mach-omap1/board-fsample.c
@@ -287,11 +287,11 @@ static struct platform_device *devices[] __initdata = {
&lcd_device,
};
-static struct omap_lcd_config fsample_lcd_config __initdata = {
+static struct omap_lcd_config fsample_lcd_config = {
.ctrl_name = "internal",
};
-static struct omap_board_config_kernel fsample_config[] = {
+static struct omap_board_config_kernel fsample_config[] __initdata = {
{ OMAP_TAG_LCD, &fsample_lcd_config },
};
diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
index 28b84aa9bdba..ba3bd09c4754 100644
--- a/arch/arm/mach-omap1/board-h2.c
+++ b/arch/arm/mach-omap1/board-h2.c
@@ -202,7 +202,7 @@ static int h2_nand_dev_ready(struct mtd_info *mtd)
static const char *h2_part_probes[] = { "cmdlinepart", NULL };
-struct platform_nand_data h2_nand_platdata = {
+static struct platform_nand_data h2_nand_platdata = {
.chip = {
.nr_chips = 1,
.chip_offset = 0,
diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
index dbc8b8d882ba..ac48677672ee 100644
--- a/arch/arm/mach-omap1/board-h3.c
+++ b/arch/arm/mach-omap1/board-h3.c
@@ -204,7 +204,7 @@ static int nand_dev_ready(struct mtd_info *mtd)
static const char *part_probes[] = { "cmdlinepart", NULL };
-struct platform_nand_data nand_platdata = {
+static struct platform_nand_data nand_platdata = {
.chip = {
.nr_chips = 1,
.chip_offset = 0,
diff --git a/arch/arm/mach-omap1/board-htcherald.c b/arch/arm/mach-omap1/board-htcherald.c
index f2c5c585bc83..ba05a51f9408 100644
--- a/arch/arm/mach-omap1/board-htcherald.c
+++ b/arch/arm/mach-omap1/board-htcherald.c
@@ -331,7 +331,7 @@ static struct resource htcpld_resources[] = {
},
};
-struct htcpld_chip_platform_data htcpld_chips[] = {
+static struct htcpld_chip_platform_data htcpld_chips[] = {
[0] = {
.addr = 0x03,
.reset = 0x04,
@@ -366,7 +366,7 @@ struct htcpld_chip_platform_data htcpld_chips[] = {
},
};
-struct htcpld_core_platform_data htcpld_pfdata = {
+static struct htcpld_core_platform_data htcpld_pfdata = {
.int_reset_gpio_hi = HTCPLD_GPIO_INT_RESET_HI,
.int_reset_gpio_lo = HTCPLD_GPIO_INT_RESET_LO,
.i2c_adapter_id = 1,
diff --git a/arch/arm/mach-omap1/board-innovator.c b/arch/arm/mach-omap1/board-innovator.c
index a36e6742bf9b..2d9b8cbd7a14 100644
--- a/arch/arm/mach-omap1/board-innovator.c
+++ b/arch/arm/mach-omap1/board-innovator.c
@@ -365,7 +365,7 @@ static struct omap_mmc_platform_data mmc1_data = {
static struct omap_mmc_platform_data *mmc_data[OMAP16XX_NR_MMC];
-void __init innovator_mmc_init(void)
+static void __init innovator_mmc_init(void)
{
mmc_data[0] = &mmc1_data;
omap1_init_mmc(mmc_data, OMAP15XX_NR_MMC);
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index d21f09dc78f4..cfd084926146 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -115,7 +115,7 @@ static struct mipid_platform_data nokia770_mipid_platform_data = {
.shutdown = mipid_shutdown,
};
-static void mipid_dev_init(void)
+static void __init mipid_dev_init(void)
{
const struct omap_lcd_config *conf;
@@ -126,7 +126,7 @@ static void mipid_dev_init(void)
}
}
-static void ads7846_dev_init(void)
+static void __init ads7846_dev_init(void)
{
if (gpio_request(ADS7846_PENDOWN_GPIO, "ADS7846 pendown") < 0)
printk(KERN_ERR "can't get ads7846 pen down GPIO\n");
@@ -170,7 +170,7 @@ static struct hwa742_platform_data nokia770_hwa742_platform_data = {
.te_connected = 1,
};
-static void hwa742_dev_init(void)
+static void __init hwa742_dev_init(void)
{
clk_add_alias("hwa_sys_ck", NULL, "bclk", NULL);
omapfb_set_ctrl_platform_data(&nokia770_hwa742_platform_data);
diff --git a/arch/arm/mach-omap1/board-palmte.c b/arch/arm/mach-omap1/board-palmte.c
index fb51ce6123d8..c9d38f47845f 100644
--- a/arch/arm/mach-omap1/board-palmte.c
+++ b/arch/arm/mach-omap1/board-palmte.c
@@ -230,19 +230,6 @@ static struct spi_board_info palmte_spi_info[] __initdata = {
},
};
-static void palmte_headphones_detect(void *data, int state)
-{
- if (state) {
- /* Headphones connected, disable speaker */
- gpio_set_value(PALMTE_SPEAKER_GPIO, 0);
- printk(KERN_INFO "PM: speaker off\n");
- } else {
- /* Headphones unplugged, re-enable speaker */
- gpio_set_value(PALMTE_SPEAKER_GPIO, 1);
- printk(KERN_INFO "PM: speaker on\n");
- }
-}
-
static void __init palmte_misc_gpio_setup(void)
{
/* Set TSC2102 PINTDAV pin as input (used by TSC2102 driver) */
diff --git a/arch/arm/mach-omap1/board-voiceblue.c b/arch/arm/mach-omap1/board-voiceblue.c
index 815a69ce821d..bdc0ac8dc21f 100644
--- a/arch/arm/mach-omap1/board-voiceblue.c
+++ b/arch/arm/mach-omap1/board-voiceblue.c
@@ -26,10 +26,12 @@
#include <linux/smc91x.h>
#include <mach/hardware.h>
+#include <mach/system.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
+#include <plat/board-voiceblue.h>
#include <plat/common.h>
#include <mach/gpio.h>
#include <plat/flash.h>
@@ -163,52 +165,6 @@ static void __init voiceblue_init_irq(void)
omap_init_irq();
}
-static void __init voiceblue_init(void)
-{
- /* mux pins for uarts */
- omap_cfg_reg(UART1_TX);
- omap_cfg_reg(UART1_RTS);
- omap_cfg_reg(UART2_TX);
- omap_cfg_reg(UART2_RTS);
- omap_cfg_reg(UART3_TX);
- omap_cfg_reg(UART3_RX);
-
- /* Watchdog */
- gpio_request(0, "Watchdog");
- /* smc91x reset */
- gpio_request(7, "SMC91x reset");
- gpio_direction_output(7, 1);
- udelay(2); /* wait at least 100ns */
- gpio_set_value(7, 0);
- mdelay(50); /* 50ms until PHY ready */
- /* smc91x interrupt pin */
- gpio_request(8, "SMC91x irq");
- /* 16C554 reset*/
- gpio_request(6, "16C554 reset");
- gpio_direction_output(6, 0);
- /* 16C554 interrupt pins */
- gpio_request(12, "16C554 irq");
- gpio_request(13, "16C554 irq");
- gpio_request(14, "16C554 irq");
- gpio_request(15, "16C554 irq");
- set_irq_type(gpio_to_irq(12), IRQ_TYPE_EDGE_RISING);
- set_irq_type(gpio_to_irq(13), IRQ_TYPE_EDGE_RISING);
- set_irq_type(gpio_to_irq(14), IRQ_TYPE_EDGE_RISING);
- set_irq_type(gpio_to_irq(15), IRQ_TYPE_EDGE_RISING);
-
- platform_add_devices(voiceblue_devices, ARRAY_SIZE(voiceblue_devices));
- omap_board_config = voiceblue_config;
- omap_board_config_size = ARRAY_SIZE(voiceblue_config);
- omap_serial_init();
- omap1_usb_init(&voiceblue_usb_config);
- omap_register_i2c_bus(1, 100, NULL, 0);
-
- /* There is a good chance board is going up, so enable power LED
- * (it is connected through invertor) */
- omap_writeb(0x00, OMAP_LPG1_LCR);
- omap_writeb(0x00, OMAP_LPG1_PMR); /* Disable clock */
-}
-
static void __init voiceblue_map_io(void)
{
omap1_map_common_io();
@@ -275,8 +231,17 @@ void voiceblue_wdt_ping(void)
gpio_set_value(0, wdt_gpio_state);
}
-void voiceblue_reset(void)
+static void voiceblue_reset(char mode, const char *cmd)
{
+ /*
+ * Workaround for 5912/1611b bug mentioned in sprz209d.pdf p. 28
+ * "Global Software Reset Affects Traffic Controller Frequency".
+ */
+ if (cpu_is_omap5912()) {
+ omap_writew(omap_readw(DPLL_CTL) & ~(1 << 4), DPLL_CTL);
+ omap_writew(0x8, ARM_RSTCT1);
+ }
+
set_bit(MACHINE_REBOOT, &machine_state);
voiceblue_wdt_enable();
while (1) ;
@@ -286,6 +251,54 @@ EXPORT_SYMBOL(voiceblue_wdt_enable);
EXPORT_SYMBOL(voiceblue_wdt_disable);
EXPORT_SYMBOL(voiceblue_wdt_ping);
+static void __init voiceblue_init(void)
+{
+ /* mux pins for uarts */
+ omap_cfg_reg(UART1_TX);
+ omap_cfg_reg(UART1_RTS);
+ omap_cfg_reg(UART2_TX);
+ omap_cfg_reg(UART2_RTS);
+ omap_cfg_reg(UART3_TX);
+ omap_cfg_reg(UART3_RX);
+
+ /* Watchdog */
+ gpio_request(0, "Watchdog");
+ /* smc91x reset */
+ gpio_request(7, "SMC91x reset");
+ gpio_direction_output(7, 1);
+ udelay(2); /* wait at least 100ns */
+ gpio_set_value(7, 0);
+ mdelay(50); /* 50ms until PHY ready */
+ /* smc91x interrupt pin */
+ gpio_request(8, "SMC91x irq");
+ /* 16C554 reset*/
+ gpio_request(6, "16C554 reset");
+ gpio_direction_output(6, 0);
+ /* 16C554 interrupt pins */
+ gpio_request(12, "16C554 irq");
+ gpio_request(13, "16C554 irq");
+ gpio_request(14, "16C554 irq");
+ gpio_request(15, "16C554 irq");
+ set_irq_type(gpio_to_irq(12), IRQ_TYPE_EDGE_RISING);
+ set_irq_type(gpio_to_irq(13), IRQ_TYPE_EDGE_RISING);
+ set_irq_type(gpio_to_irq(14), IRQ_TYPE_EDGE_RISING);
+ set_irq_type(gpio_to_irq(15), IRQ_TYPE_EDGE_RISING);
+
+ platform_add_devices(voiceblue_devices, ARRAY_SIZE(voiceblue_devices));
+ omap_board_config = voiceblue_config;
+ omap_board_config_size = ARRAY_SIZE(voiceblue_config);
+ omap_serial_init();
+ omap1_usb_init(&voiceblue_usb_config);
+ omap_register_i2c_bus(1, 100, NULL, 0);
+
+ /* There is a good chance board is going up, so enable power LED
+ * (it is connected through invertor) */
+ omap_writeb(0x00, OMAP_LPG1_LCR);
+ omap_writeb(0x00, OMAP_LPG1_PMR); /* Disable clock */
+
+ arch_reset = voiceblue_reset;
+}
+
MACHINE_START(VOICEBLUE, "VoiceBlue OMAP5910")
/* Maintainer: Ladislav Michl <michl@2n.cz> */
.boot_params = 0x10000100,
diff --git a/arch/arm/mach-omap1/include/mach/debug-macro.S b/arch/arm/mach-omap1/include/mach/debug-macro.S
index 6a0fa0462365..62856044eb63 100644
--- a/arch/arm/mach-omap1/include/mach/debug-macro.S
+++ b/arch/arm/mach-omap1/include/mach/debug-macro.S
@@ -17,6 +17,9 @@
#include <plat/serial.h>
+#define omap_uart_v2p(x) ((x) - PAGE_OFFSET + PLAT_PHYS_OFFSET)
+#define omap_uart_p2v(x) ((x) - PLAT_PHYS_OFFSET + PAGE_OFFSET)
+
.pushsection .data
omap_uart_phys: .word 0x0
omap_uart_virt: .word 0x0
@@ -33,7 +36,7 @@ omap_uart_virt: .word 0x0
/* Use omap_uart_phys/virt if already configured */
9: mrc p15, 0, \rp, c1, c0
tst \rp, #1 @ MMU enabled?
- ldreq \rp, =__virt_to_phys(omap_uart_phys) @ MMU not enabled
+ ldreq \rp, =omap_uart_v2p(omap_uart_phys) @ MMU disabled
ldrne \rp, =omap_uart_phys @ MMU enabled
add \rv, \rp, #4 @ omap_uart_virt
ldr \rp, [\rp, #0]
@@ -46,7 +49,7 @@ omap_uart_virt: .word 0x0
mrc p15, 0, \rp, c1, c0
tst \rp, #1 @ MMU enabled?
ldreq \rp, =OMAP_UART_INFO @ MMU not enabled
- ldrne \rp, =__phys_to_virt(OMAP_UART_INFO) @ MMU enabled
+ ldrne \rp, =omap_uart_p2v(OMAP_UART_INFO) @ MMU enabled
ldr \rp, [\rp, #0]
/* Select the UART to use based on the UART1 scratchpad value */
@@ -73,7 +76,7 @@ omap_uart_virt: .word 0x0
98: add \rp, \rp, #0xff000000 @ phys base
mrc p15, 0, \rv, c1, c0
tst \rv, #1 @ MMU enabled?
- ldreq \rv, =__virt_to_phys(omap_uart_phys) @ MMU not enabled
+ ldreq \rv, =omap_uart_v2p(omap_uart_phys) @ MMU disabled
ldrne \rv, =omap_uart_phys @ MMU enabled
str \rp, [\rv, #0]
sub \rp, \rp, #0xff000000 @ phys base
diff --git a/arch/arm/mach-omap1/mcbsp.c b/arch/arm/mach-omap1/mcbsp.c
index 820973666f34..e68f6c012fde 100644
--- a/arch/arm/mach-omap1/mcbsp.c
+++ b/arch/arm/mach-omap1/mcbsp.c
@@ -10,6 +10,7 @@
*
* Multichannel mode not supported.
*/
+#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/clk.h>
@@ -78,100 +79,288 @@ static struct omap_mcbsp_ops omap1_mcbsp_ops = {
};
#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
+struct resource omap7xx_mcbsp_res[][6] = {
+ {
+ {
+ .start = OMAP7XX_MCBSP1_BASE,
+ .end = OMAP7XX_MCBSP1_BASE + SZ_256,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "rx",
+ .start = INT_7XX_McBSP1RX,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "tx",
+ .start = INT_7XX_McBSP1TX,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "rx",
+ .start = OMAP_DMA_MCBSP1_RX,
+ .flags = IORESOURCE_DMA,
+ },
+ {
+ .name = "tx",
+ .start = OMAP_DMA_MCBSP1_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ },
+ {
+ {
+ .start = OMAP7XX_MCBSP2_BASE,
+ .end = OMAP7XX_MCBSP2_BASE + SZ_256,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "rx",
+ .start = INT_7XX_McBSP2RX,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "tx",
+ .start = INT_7XX_McBSP2TX,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "rx",
+ .start = OMAP_DMA_MCBSP3_RX,
+ .flags = IORESOURCE_DMA,
+ },
+ {
+ .name = "tx",
+ .start = OMAP_DMA_MCBSP3_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ },
+};
+
static struct omap_mcbsp_platform_data omap7xx_mcbsp_pdata[] = {
{
- .phys_base = OMAP7XX_MCBSP1_BASE,
- .dma_rx_sync = OMAP_DMA_MCBSP1_RX,
- .dma_tx_sync = OMAP_DMA_MCBSP1_TX,
- .rx_irq = INT_7XX_McBSP1RX,
- .tx_irq = INT_7XX_McBSP1TX,
.ops = &omap1_mcbsp_ops,
},
{
- .phys_base = OMAP7XX_MCBSP2_BASE,
- .dma_rx_sync = OMAP_DMA_MCBSP3_RX,
- .dma_tx_sync = OMAP_DMA_MCBSP3_TX,
- .rx_irq = INT_7XX_McBSP2RX,
- .tx_irq = INT_7XX_McBSP2TX,
.ops = &omap1_mcbsp_ops,
},
};
-#define OMAP7XX_MCBSP_PDATA_SZ ARRAY_SIZE(omap7xx_mcbsp_pdata)
-#define OMAP7XX_MCBSP_REG_NUM (OMAP_MCBSP_REG_XCERH / sizeof(u16) + 1)
+#define OMAP7XX_MCBSP_RES_SZ ARRAY_SIZE(omap7xx_mcbsp_res[1])
+#define OMAP7XX_MCBSP_COUNT ARRAY_SIZE(omap7xx_mcbsp_res)
#else
+#define omap7xx_mcbsp_res NULL
#define omap7xx_mcbsp_pdata NULL
-#define OMAP7XX_MCBSP_PDATA_SZ 0
-#define OMAP7XX_MCBSP_REG_NUM 0
+#define OMAP7XX_MCBSP_RES_SZ 0
+#define OMAP7XX_MCBSP_COUNT 0
#endif
#ifdef CONFIG_ARCH_OMAP15XX
+struct resource omap15xx_mcbsp_res[][6] = {
+ {
+ {
+ .start = OMAP1510_MCBSP1_BASE,
+ .end = OMAP1510_MCBSP1_BASE + SZ_256,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "rx",
+ .start = INT_McBSP1RX,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "tx",
+ .start = INT_McBSP1TX,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "rx",
+ .start = OMAP_DMA_MCBSP1_RX,
+ .flags = IORESOURCE_DMA,
+ },
+ {
+ .name = "tx",
+ .start = OMAP_DMA_MCBSP1_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ },
+ {
+ {
+ .start = OMAP1510_MCBSP2_BASE,
+ .end = OMAP1510_MCBSP2_BASE + SZ_256,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "rx",
+ .start = INT_1510_SPI_RX,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "tx",
+ .start = INT_1510_SPI_TX,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "rx",
+ .start = OMAP_DMA_MCBSP2_RX,
+ .flags = IORESOURCE_DMA,
+ },
+ {
+ .name = "tx",
+ .start = OMAP_DMA_MCBSP2_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ },
+ {
+ {
+ .start = OMAP1510_MCBSP3_BASE,
+ .end = OMAP1510_MCBSP3_BASE + SZ_256,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "rx",
+ .start = INT_McBSP3RX,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "tx",
+ .start = INT_McBSP3TX,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "rx",
+ .start = OMAP_DMA_MCBSP3_RX,
+ .flags = IORESOURCE_DMA,
+ },
+ {
+ .name = "tx",
+ .start = OMAP_DMA_MCBSP3_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ },
+};
+
static struct omap_mcbsp_platform_data omap15xx_mcbsp_pdata[] = {
{
- .phys_base = OMAP1510_MCBSP1_BASE,
- .dma_rx_sync = OMAP_DMA_MCBSP1_RX,
- .dma_tx_sync = OMAP_DMA_MCBSP1_TX,
- .rx_irq = INT_McBSP1RX,
- .tx_irq = INT_McBSP1TX,
.ops = &omap1_mcbsp_ops,
},
{
- .phys_base = OMAP1510_MCBSP2_BASE,
- .dma_rx_sync = OMAP_DMA_MCBSP2_RX,
- .dma_tx_sync = OMAP_DMA_MCBSP2_TX,
- .rx_irq = INT_1510_SPI_RX,
- .tx_irq = INT_1510_SPI_TX,
.ops = &omap1_mcbsp_ops,
},
{
- .phys_base = OMAP1510_MCBSP3_BASE,
- .dma_rx_sync = OMAP_DMA_MCBSP3_RX,
- .dma_tx_sync = OMAP_DMA_MCBSP3_TX,
- .rx_irq = INT_McBSP3RX,
- .tx_irq = INT_McBSP3TX,
.ops = &omap1_mcbsp_ops,
},
};
-#define OMAP15XX_MCBSP_PDATA_SZ ARRAY_SIZE(omap15xx_mcbsp_pdata)
-#define OMAP15XX_MCBSP_REG_NUM (OMAP_MCBSP_REG_XCERH / sizeof(u16) + 1)
+#define OMAP15XX_MCBSP_RES_SZ ARRAY_SIZE(omap15xx_mcbsp_res[1])
+#define OMAP15XX_MCBSP_COUNT ARRAY_SIZE(omap15xx_mcbsp_res)
#else
+#define omap15xx_mcbsp_res NULL
#define omap15xx_mcbsp_pdata NULL
-#define OMAP15XX_MCBSP_PDATA_SZ 0
-#define OMAP15XX_MCBSP_REG_NUM 0
+#define OMAP15XX_MCBSP_RES_SZ 0
+#define OMAP15XX_MCBSP_COUNT 0
#endif
#ifdef CONFIG_ARCH_OMAP16XX
+struct resource omap16xx_mcbsp_res[][6] = {
+ {
+ {
+ .start = OMAP1610_MCBSP1_BASE,
+ .end = OMAP1610_MCBSP1_BASE + SZ_256,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "rx",
+ .start = INT_McBSP1RX,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "tx",
+ .start = INT_McBSP1TX,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "rx",
+ .start = OMAP_DMA_MCBSP1_RX,
+ .flags = IORESOURCE_DMA,
+ },
+ {
+ .name = "tx",
+ .start = OMAP_DMA_MCBSP1_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ },
+ {
+ {
+ .start = OMAP1610_MCBSP2_BASE,
+ .end = OMAP1610_MCBSP2_BASE + SZ_256,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "rx",
+ .start = INT_1610_McBSP2_RX,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "tx",
+ .start = INT_1610_McBSP2_TX,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "rx",
+ .start = OMAP_DMA_MCBSP2_RX,
+ .flags = IORESOURCE_DMA,
+ },
+ {
+ .name = "tx",
+ .start = OMAP_DMA_MCBSP2_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ },
+ {
+ {
+ .start = OMAP1610_MCBSP3_BASE,
+ .end = OMAP1610_MCBSP3_BASE + SZ_256,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "rx",
+ .start = INT_McBSP3RX,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "tx",
+ .start = INT_McBSP3TX,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "rx",
+ .start = OMAP_DMA_MCBSP3_RX,
+ .flags = IORESOURCE_DMA,
+ },
+ {
+ .name = "tx",
+ .start = OMAP_DMA_MCBSP3_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ },
+};
+
static struct omap_mcbsp_platform_data omap16xx_mcbsp_pdata[] = {
{
- .phys_base = OMAP1610_MCBSP1_BASE,
- .dma_rx_sync = OMAP_DMA_MCBSP1_RX,
- .dma_tx_sync = OMAP_DMA_MCBSP1_TX,
- .rx_irq = INT_McBSP1RX,
- .tx_irq = INT_McBSP1TX,
.ops = &omap1_mcbsp_ops,
},
{
- .phys_base = OMAP1610_MCBSP2_BASE,
- .dma_rx_sync = OMAP_DMA_MCBSP2_RX,
- .dma_tx_sync = OMAP_DMA_MCBSP2_TX,
- .rx_irq = INT_1610_McBSP2_RX,
- .tx_irq = INT_1610_McBSP2_TX,
.ops = &omap1_mcbsp_ops,
},
{
- .phys_base = OMAP1610_MCBSP3_BASE,
- .dma_rx_sync = OMAP_DMA_MCBSP3_RX,
- .dma_tx_sync = OMAP_DMA_MCBSP3_TX,
- .rx_irq = INT_McBSP3RX,
- .tx_irq = INT_McBSP3TX,
.ops = &omap1_mcbsp_ops,
},
};
-#define OMAP16XX_MCBSP_PDATA_SZ ARRAY_SIZE(omap16xx_mcbsp_pdata)
-#define OMAP16XX_MCBSP_REG_NUM (OMAP_MCBSP_REG_XCERH / sizeof(u16) + 1)
+#define OMAP16XX_MCBSP_RES_SZ ARRAY_SIZE(omap16xx_mcbsp_res[1])
+#define OMAP16XX_MCBSP_COUNT ARRAY_SIZE(omap16xx_mcbsp_res)
#else
+#define omap16xx_mcbsp_res NULL
#define omap16xx_mcbsp_pdata NULL
-#define OMAP16XX_MCBSP_PDATA_SZ 0
-#define OMAP16XX_MCBSP_REG_NUM 0
+#define OMAP16XX_MCBSP_RES_SZ 0
+#define OMAP16XX_MCBSP_COUNT 0
#endif
static int __init omap1_mcbsp_init(void)
@@ -179,16 +368,12 @@ static int __init omap1_mcbsp_init(void)
if (!cpu_class_is_omap1())
return -ENODEV;
- if (cpu_is_omap7xx()) {
- omap_mcbsp_count = OMAP7XX_MCBSP_PDATA_SZ;
- omap_mcbsp_cache_size = OMAP7XX_MCBSP_REG_NUM * sizeof(u16);
- } else if (cpu_is_omap15xx()) {
- omap_mcbsp_count = OMAP15XX_MCBSP_PDATA_SZ;
- omap_mcbsp_cache_size = OMAP15XX_MCBSP_REG_NUM * sizeof(u16);
- } else if (cpu_is_omap16xx()) {
- omap_mcbsp_count = OMAP16XX_MCBSP_PDATA_SZ;
- omap_mcbsp_cache_size = OMAP16XX_MCBSP_REG_NUM * sizeof(u16);
- }
+ if (cpu_is_omap7xx())
+ omap_mcbsp_count = OMAP7XX_MCBSP_COUNT;
+ else if (cpu_is_omap15xx())
+ omap_mcbsp_count = OMAP15XX_MCBSP_COUNT;
+ else if (cpu_is_omap16xx())
+ omap_mcbsp_count = OMAP16XX_MCBSP_COUNT;
mcbsp_ptr = kzalloc(omap_mcbsp_count * sizeof(struct omap_mcbsp *),
GFP_KERNEL);
@@ -196,16 +381,22 @@ static int __init omap1_mcbsp_init(void)
return -ENOMEM;
if (cpu_is_omap7xx())
- omap_mcbsp_register_board_cfg(omap7xx_mcbsp_pdata,
- OMAP7XX_MCBSP_PDATA_SZ);
+ omap_mcbsp_register_board_cfg(omap7xx_mcbsp_res[0],
+ OMAP7XX_MCBSP_RES_SZ,
+ omap7xx_mcbsp_pdata,
+ OMAP7XX_MCBSP_COUNT);
if (cpu_is_omap15xx())
- omap_mcbsp_register_board_cfg(omap15xx_mcbsp_pdata,
- OMAP15XX_MCBSP_PDATA_SZ);
+ omap_mcbsp_register_board_cfg(omap15xx_mcbsp_res[0],
+ OMAP15XX_MCBSP_RES_SZ,
+ omap15xx_mcbsp_pdata,
+ OMAP15XX_MCBSP_COUNT);
if (cpu_is_omap16xx())
- omap_mcbsp_register_board_cfg(omap16xx_mcbsp_pdata,
- OMAP16XX_MCBSP_PDATA_SZ);
+ omap_mcbsp_register_board_cfg(omap16xx_mcbsp_res[0],
+ OMAP16XX_MCBSP_RES_SZ,
+ omap16xx_mcbsp_pdata,
+ OMAP16XX_MCBSP_COUNT);
return omap_mcbsp_init();
}
diff --git a/arch/arm/mach-omap1/pm.h b/arch/arm/mach-omap1/pm.h
index 56a647986ae9..cd926dcb5e7f 100644
--- a/arch/arm/mach-omap1/pm.h
+++ b/arch/arm/mach-omap1/pm.h
@@ -123,9 +123,9 @@ extern void allow_idle_sleep(void);
extern void omap1_pm_idle(void);
extern void omap1_pm_suspend(void);
-extern void omap7xx_cpu_suspend(unsigned short, unsigned short);
-extern void omap1510_cpu_suspend(unsigned short, unsigned short);
-extern void omap1610_cpu_suspend(unsigned short, unsigned short);
+extern void omap7xx_cpu_suspend(unsigned long, unsigned long);
+extern void omap1510_cpu_suspend(unsigned long, unsigned long);
+extern void omap1610_cpu_suspend(unsigned long, unsigned long);
extern void omap7xx_idle_loop_suspend(void);
extern void omap1510_idle_loop_suspend(void);
extern void omap1610_idle_loop_suspend(void);
diff --git a/arch/arm/mach-omap1/reset.c b/arch/arm/mach-omap1/reset.c
new file mode 100644
index 000000000000..ad951ee69205
--- /dev/null
+++ b/arch/arm/mach-omap1/reset.c
@@ -0,0 +1,25 @@
+/*
+ * OMAP1 reset support
+ */
+#include <linux/kernel.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+#include <mach/system.h>
+#include <plat/prcm.h>
+
+void omap1_arch_reset(char mode, const char *cmd)
+{
+ /*
+ * Workaround for 5912/1611b bug mentioned in sprz209d.pdf p. 28
+ * "Global Software Reset Affects Traffic Controller Frequency".
+ */
+ if (cpu_is_omap5912()) {
+ omap_writew(omap_readw(DPLL_CTL) & ~(1 << 4), DPLL_CTL);
+ omap_writew(0x8, ARM_RSTCT1);
+ }
+
+ omap_writew(1, ARM_RSTCT1);
+}
+
+void (*arch_reset)(char, const char *) = omap1_arch_reset;
diff --git a/arch/arm/mach-omap1/sleep.S b/arch/arm/mach-omap1/sleep.S
index ef771ce8b030..c875bdc902c5 100644
--- a/arch/arm/mach-omap1/sleep.S
+++ b/arch/arm/mach-omap1/sleep.S
@@ -58,6 +58,7 @@
*/
#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
+ .align 3
ENTRY(omap7xx_cpu_suspend)
@ save registers on stack
@@ -137,6 +138,7 @@ ENTRY(omap7xx_cpu_suspend_sz)
#endif /* CONFIG_ARCH_OMAP730 || CONFIG_ARCH_OMAP850 */
#ifdef CONFIG_ARCH_OMAP15XX
+ .align 3
ENTRY(omap1510_cpu_suspend)
@ save registers on stack
@@ -211,6 +213,7 @@ ENTRY(omap1510_cpu_suspend_sz)
#endif /* CONFIG_ARCH_OMAP15XX */
#if defined(CONFIG_ARCH_OMAP16XX)
+ .align 3
ENTRY(omap1610_cpu_suspend)
@ save registers on stack
diff --git a/arch/arm/mach-omap1/sram.S b/arch/arm/mach-omap1/sram.S
index 7724e520d07c..692587d07ea5 100644
--- a/arch/arm/mach-omap1/sram.S
+++ b/arch/arm/mach-omap1/sram.S
@@ -18,6 +18,7 @@
/*
* Reprograms ULPD and CKCTL.
*/
+ .align 3
ENTRY(omap1_sram_reprogram_clock)
stmfd sp!, {r0 - r12, lr} @ save registers on stack
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 1a2cf6226a55..77cf974dd26d 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -44,6 +44,7 @@ config ARCH_OMAP4
depends on ARCH_OMAP2PLUS
select CPU_V7
select ARM_GIC
+ select LOCAL_TIMERS if SMP
select PL310_ERRATA_588369
select ARM_ERRATA_720789
select ARCH_HAS_OPP
@@ -53,25 +54,30 @@ config ARCH_OMAP4
comment "OMAP Core Type"
depends on ARCH_OMAP2
-config ARCH_OMAP2420
+config SOC_OMAP2420
bool "OMAP2420 support"
depends on ARCH_OMAP2
default y
select OMAP_DM_TIMER
select ARCH_OMAP_OTG
-config ARCH_OMAP2430
+config SOC_OMAP2430
bool "OMAP2430 support"
depends on ARCH_OMAP2
default y
select ARCH_OMAP_OTG
-config ARCH_OMAP3430
+config SOC_OMAP3430
bool "OMAP3430 support"
depends on ARCH_OMAP3
default y
select ARCH_OMAP_OTG
+config SOC_OMAPTI816X
+ bool "TI816X support"
+ depends on ARCH_OMAP3
+ default y
+
config OMAP_PACKAGE_ZAF
bool
@@ -106,25 +112,25 @@ config MACH_OMAP_GENERIC
config MACH_OMAP2_TUSB6010
bool
- depends on ARCH_OMAP2 && ARCH_OMAP2420
+ depends on ARCH_OMAP2 && SOC_OMAP2420
default y if MACH_NOKIA_N8X0
config MACH_OMAP_H4
bool "OMAP 2420 H4 board"
- depends on ARCH_OMAP2420
+ depends on SOC_OMAP2420
default y
select OMAP_PACKAGE_ZAF
select OMAP_DEBUG_DEVICES
config MACH_OMAP_APOLLON
bool "OMAP 2420 Apollon board"
- depends on ARCH_OMAP2420
+ depends on SOC_OMAP2420
default y
select OMAP_PACKAGE_ZAC
config MACH_OMAP_2430SDP
bool "OMAP 2430 SDP board"
- depends on ARCH_OMAP2430
+ depends on SOC_OMAP2430
default y
select OMAP_PACKAGE_ZAC
@@ -219,7 +225,7 @@ config MACH_NOKIA_N810_WIMAX
config MACH_NOKIA_N8X0
bool "Nokia N800/N810"
- depends on ARCH_OMAP2420
+ depends on SOC_OMAP2420
default y
select OMAP_PACKAGE_ZAC
select MACH_NOKIA_N800
@@ -294,12 +300,18 @@ config MACH_OMAP_3630SDP
default y
select OMAP_PACKAGE_CBP
+config MACH_TI8168EVM
+ bool "TI8168 Evaluation Module"
+ depends on SOC_OMAPTI816X
+ default y
+
config MACH_OMAP_4430SDP
bool "OMAP 4430 SDP board"
default y
depends on ARCH_OMAP4
select OMAP_PACKAGE_CBL
select OMAP_PACKAGE_CBS
+ select REGULATOR_FIXED_VOLTAGE
config MACH_OMAP4_PANDA
bool "OMAP4 Panda Board"
@@ -307,6 +319,7 @@ config MACH_OMAP4_PANDA
depends on ARCH_OMAP4
select OMAP_PACKAGE_CBL
select OMAP_PACKAGE_CBS
+ select REGULATOR_FIXED_VOLTAGE
config OMAP3_EMU
bool "OMAP3 debugging peripherals"
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 1c0c2b02d870..1c3635d7f4cf 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -31,8 +31,8 @@ AFLAGS_omap-headsmp.o :=-Wa,-march=armv7-a$(plus_sec)
AFLAGS_omap44xx-smc.o :=-Wa,-march=armv7-a$(plus_sec)
# Functions loaded to SRAM
-obj-$(CONFIG_ARCH_OMAP2420) += sram242x.o
-obj-$(CONFIG_ARCH_OMAP2430) += sram243x.o
+obj-$(CONFIG_SOC_OMAP2420) += sram242x.o
+obj-$(CONFIG_SOC_OMAP2430) += sram243x.o
obj-$(CONFIG_ARCH_OMAP3) += sram34xx.o
AFLAGS_sram242x.o :=-Wa,-march=armv6
@@ -40,8 +40,8 @@ AFLAGS_sram243x.o :=-Wa,-march=armv6
AFLAGS_sram34xx.o :=-Wa,-march=armv7-a
# Pin multiplexing
-obj-$(CONFIG_ARCH_OMAP2420) += mux2420.o
-obj-$(CONFIG_ARCH_OMAP2430) += mux2430.o
+obj-$(CONFIG_SOC_OMAP2420) += mux2420.o
+obj-$(CONFIG_SOC_OMAP2430) += mux2430.o
obj-$(CONFIG_ARCH_OMAP3) += mux34xx.o
obj-$(CONFIG_ARCH_OMAP4) += mux44xx.o
@@ -113,8 +113,8 @@ obj-$(CONFIG_ARCH_OMAP2) += $(clock-common) clock2xxx.o \
clkt2xxx_dpllcore.o \
clkt2xxx_virt_prcm_set.o \
clkt2xxx_apll.o clkt2xxx_osc.o
-obj-$(CONFIG_ARCH_OMAP2420) += clock2420_data.o
-obj-$(CONFIG_ARCH_OMAP2430) += clock2430.o clock2430_data.o
+obj-$(CONFIG_SOC_OMAP2420) += clock2420_data.o
+obj-$(CONFIG_SOC_OMAP2430) += clock2430.o clock2430_data.o
obj-$(CONFIG_ARCH_OMAP3) += $(clock-common) clock3xxx.o \
clock34xx.o clkt34xx_dpll3m2.o \
clock3517.o clock36xx.o \
@@ -123,12 +123,12 @@ obj-$(CONFIG_ARCH_OMAP4) += $(clock-common) clock44xx_data.o \
dpll3xxx.o
# OMAP2 clock rate set data (old "OPP" data)
-obj-$(CONFIG_ARCH_OMAP2420) += opp2420_data.o
-obj-$(CONFIG_ARCH_OMAP2430) += opp2430_data.o
+obj-$(CONFIG_SOC_OMAP2420) += opp2420_data.o
+obj-$(CONFIG_SOC_OMAP2430) += opp2430_data.o
# hwmod data
-obj-$(CONFIG_ARCH_OMAP2420) += omap_hwmod_2420_data.o
-obj-$(CONFIG_ARCH_OMAP2430) += omap_hwmod_2430_data.o
+obj-$(CONFIG_SOC_OMAP2420) += omap_hwmod_2420_data.o
+obj-$(CONFIG_SOC_OMAP2430) += omap_hwmod_2430_data.o
obj-$(CONFIG_ARCH_OMAP3) += omap_hwmod_3xxx_data.o
obj-$(CONFIG_ARCH_OMAP4) += omap_hwmod_44xx_data.o
@@ -218,12 +218,14 @@ obj-$(CONFIG_MACH_OMAP4_PANDA) += board-omap4panda.o \
hsmmc.o \
omap_phy_internal.o
-obj-$(CONFIG_MACH_OMAP3517EVM) += board-am3517evm.o
+obj-$(CONFIG_MACH_OMAP3517EVM) += board-am3517evm.o \
+ omap_phy_internal.o \
obj-$(CONFIG_MACH_CRANEBOARD) += board-am3517crane.o
obj-$(CONFIG_MACH_SBC3530) += board-omap3stalker.o \
hsmmc.o
+obj-$(CONFIG_MACH_TI8168EVM) += board-ti8168evm.o
# Platform specific device init code
usbfs-$(CONFIG_ARCH_OMAP_OTG) := usb-fs.o
obj-y += $(usbfs-m) $(usbfs-y)
@@ -242,3 +244,7 @@ obj-y += $(smc91x-m) $(smc91x-y)
smsc911x-$(CONFIG_SMSC911X) := gpmc-smsc911x.o
obj-y += $(smsc911x-m) $(smsc911x-y)
+obj-$(CONFIG_ARCH_OMAP4) += hwspinlock.o
+
+disp-$(CONFIG_OMAP2_DSS) := display.o
+obj-y += $(disp-m) $(disp-y)
diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c
index e0661777f599..ab0880ba3c48 100644
--- a/arch/arm/mach-omap2/board-2430sdp.c
+++ b/arch/arm/mach-omap2/board-2430sdp.c
@@ -22,6 +22,7 @@
#include <linux/mmc/host.h>
#include <linux/delay.h>
#include <linux/i2c/twl.h>
+#include <linux/regulator/machine.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
@@ -139,15 +140,33 @@ static struct omap_board_config_kernel sdp2430_config[] __initdata = {
{OMAP_TAG_LCD, &sdp2430_lcd_config},
};
-static void __init omap_2430sdp_init_irq(void)
+static void __init omap_2430sdp_init_early(void)
{
omap_board_config = sdp2430_config;
omap_board_config_size = ARRAY_SIZE(sdp2430_config);
omap2_init_common_infrastructure();
omap2_init_common_devices(NULL, NULL);
- omap_init_irq();
}
+static struct regulator_consumer_supply sdp2430_vmmc1_supplies[] = {
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
+};
+
+/* VMMC1 for OMAP VDD_MMC1 (i/o) and MMC1 card */
+static struct regulator_init_data sdp2430_vmmc1 = {
+ .constraints = {
+ .min_uV = 1850000,
+ .max_uV = 3150000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
+ | REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(sdp2430_vmmc1_supplies),
+ .consumer_supplies = &sdp2430_vmmc1_supplies[0],
+};
+
static struct twl4030_gpio_platform_data sdp2430_gpio_data = {
.gpio_base = OMAP_MAX_GPIO_LINES,
.irq_base = TWL4030_GPIO_IRQ_BASE,
@@ -160,6 +179,7 @@ static struct twl4030_platform_data sdp2430_twldata = {
/* platform_data for children goes here */
.gpio = &sdp2430_gpio_data,
+ .vmmc1 = &sdp2430_vmmc1,
};
static struct i2c_board_info __initdata sdp2430_i2c_boardinfo[] = {
@@ -253,9 +273,10 @@ static void __init omap_2430sdp_map_io(void)
MACHINE_START(OMAP_2430SDP, "OMAP2430 sdp2430 board")
/* Maintainer: Syed Khasim - Texas Instruments Inc */
.boot_params = 0x80000100,
- .map_io = omap_2430sdp_map_io,
.reserve = omap_reserve,
- .init_irq = omap_2430sdp_init_irq,
+ .map_io = omap_2430sdp_map_io,
+ .init_early = omap_2430sdp_init_early,
+ .init_irq = omap_init_irq,
.init_machine = omap_2430sdp_init,
.timer = &omap_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index d4e41ef86aa5..5aac1ecfad1a 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -307,34 +307,16 @@ static struct omap_dss_board_info sdp3430_dss_data = {
.default_device = &sdp3430_lcd_device,
};
-static struct platform_device sdp3430_dss_device = {
- .name = "omapdss",
- .id = -1,
- .dev = {
- .platform_data = &sdp3430_dss_data,
- },
-};
-
-static struct regulator_consumer_supply sdp3430_vdda_dac_supply = {
- .supply = "vdda_dac",
- .dev = &sdp3430_dss_device.dev,
-};
-
-static struct platform_device *sdp3430_devices[] __initdata = {
- &sdp3430_dss_device,
-};
-
static struct omap_board_config_kernel sdp3430_config[] __initdata = {
};
-static void __init omap_3430sdp_init_irq(void)
+static void __init omap_3430sdp_init_early(void)
{
omap_board_config = sdp3430_config;
omap_board_config_size = ARRAY_SIZE(sdp3430_config);
omap3_pm_init_cpuidle(omap3_cpuidle_params_table);
omap2_init_common_infrastructure();
omap2_init_common_devices(hyb18m512160af6_sdrc_params, NULL);
- omap_init_irq();
}
static int sdp3430_batt_table[] = {
@@ -370,18 +352,6 @@ static struct omap2_hsmmc_info mmc[] = {
{} /* Terminator */
};
-static struct regulator_consumer_supply sdp3430_vmmc1_supply = {
- .supply = "vmmc",
-};
-
-static struct regulator_consumer_supply sdp3430_vsim_supply = {
- .supply = "vmmc_aux",
-};
-
-static struct regulator_consumer_supply sdp3430_vmmc2_supply = {
- .supply = "vmmc",
-};
-
static int sdp3430_twl_gpio_setup(struct device *dev,
unsigned gpio, unsigned ngpio)
{
@@ -392,13 +362,6 @@ static int sdp3430_twl_gpio_setup(struct device *dev,
mmc[1].gpio_cd = gpio + 1;
omap2_hsmmc_init(mmc);
- /* link regulators to MMC adapters ... we "know" the
- * regulators will be set up only *after* we return.
- */
- sdp3430_vmmc1_supply.dev = mmc[0].dev;
- sdp3430_vsim_supply.dev = mmc[0].dev;
- sdp3430_vmmc2_supply.dev = mmc[1].dev;
-
/* gpio + 7 is "sub_lcd_en_bkl" (output/PWM1) */
gpio_request(gpio + 7, "sub_lcd_en_bkl");
gpio_direction_output(gpio + 7, 0);
@@ -427,6 +390,35 @@ static struct twl4030_madc_platform_data sdp3430_madc_data = {
.irq_line = 1,
};
+/* regulator consumer mappings */
+
+/* ads7846 on SPI */
+static struct regulator_consumer_supply sdp3430_vaux3_supplies[] = {
+ REGULATOR_SUPPLY("vcc", "spi1.0"),
+};
+
+static struct regulator_consumer_supply sdp3430_vdda_dac_supplies[] = {
+ REGULATOR_SUPPLY("vdda_dac", "omap_venc"),
+};
+
+/* VPLL2 for digital video outputs */
+static struct regulator_consumer_supply sdp3430_vpll2_supplies[] = {
+ REGULATOR_SUPPLY("vdds_dsi", "omap_display"),
+ REGULATOR_SUPPLY("vdds_dsi", "omap_dsi1"),
+};
+
+static struct regulator_consumer_supply sdp3430_vmmc1_supplies[] = {
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
+};
+
+static struct regulator_consumer_supply sdp3430_vsim_supplies[] = {
+ REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.0"),
+};
+
+static struct regulator_consumer_supply sdp3430_vmmc2_supplies[] = {
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1"),
+};
+
/*
* Apply all the fixed voltages since most versions of U-Boot
* don't bother with that initialization.
@@ -469,6 +461,8 @@ static struct regulator_init_data sdp3430_vaux3 = {
.valid_ops_mask = REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
+ .num_consumer_supplies = ARRAY_SIZE(sdp3430_vaux3_supplies),
+ .consumer_supplies = sdp3430_vaux3_supplies,
};
/* VAUX4 for OMAP VDD_CSI2 (camera) */
@@ -495,8 +489,8 @@ static struct regulator_init_data sdp3430_vmmc1 = {
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &sdp3430_vmmc1_supply,
+ .num_consumer_supplies = ARRAY_SIZE(sdp3430_vmmc1_supplies),
+ .consumer_supplies = sdp3430_vmmc1_supplies,
};
/* VMMC2 for MMC2 card */
@@ -510,8 +504,8 @@ static struct regulator_init_data sdp3430_vmmc2 = {
.valid_ops_mask = REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &sdp3430_vmmc2_supply,
+ .num_consumer_supplies = ARRAY_SIZE(sdp3430_vmmc2_supplies),
+ .consumer_supplies = sdp3430_vmmc2_supplies,
};
/* VSIM for OMAP VDD_MMC1A (i/o for DAT4..DAT7) */
@@ -525,8 +519,8 @@ static struct regulator_init_data sdp3430_vsim = {
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &sdp3430_vsim_supply,
+ .num_consumer_supplies = ARRAY_SIZE(sdp3430_vsim_supplies),
+ .consumer_supplies = sdp3430_vsim_supplies,
};
/* VDAC for DSS driving S-Video */
@@ -540,16 +534,8 @@ static struct regulator_init_data sdp3430_vdac = {
.valid_ops_mask = REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &sdp3430_vdda_dac_supply,
-};
-
-/* VPLL2 for digital video outputs */
-static struct regulator_consumer_supply sdp3430_vpll2_supplies[] = {
- {
- .supply = "vdds_dsi",
- .dev = &sdp3430_dss_device.dev,
- }
+ .num_consumer_supplies = ARRAY_SIZE(sdp3430_vdda_dac_supplies),
+ .consumer_supplies = sdp3430_vdda_dac_supplies,
};
static struct regulator_init_data sdp3430_vpll2 = {
@@ -801,7 +787,7 @@ static void __init omap_3430sdp_init(void)
{
omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
omap3430_i2c_init();
- platform_add_devices(sdp3430_devices, ARRAY_SIZE(sdp3430_devices));
+ omap_display_init(&sdp3430_dss_data);
if (omap_rev() > OMAP3430_REV_ES1_0)
ts_gpio = SDP3430_TS_GPIO_IRQ_SDPV2;
else
@@ -813,7 +799,7 @@ static void __init omap_3430sdp_init(void)
omap_serial_init();
usb_musb_init(&musb_board_data);
board_smc91x_init();
- board_flash_init(sdp_flash_partitions, chip_sel_3430);
+ board_flash_init(sdp_flash_partitions, chip_sel_3430, 0);
sdp3430_display_init();
enable_board_wakeup_source();
usb_ehci_init(&ehci_pdata);
@@ -822,9 +808,10 @@ static void __init omap_3430sdp_init(void)
MACHINE_START(OMAP_3430SDP, "OMAP3430 3430SDP board")
/* Maintainer: Syed Khasim - Texas Instruments Inc */
.boot_params = 0x80000100,
- .map_io = omap3_map_io,
.reserve = omap_reserve,
- .init_irq = omap_3430sdp_init_irq,
+ .map_io = omap3_map_io,
+ .init_early = omap_3430sdp_init_early,
+ .init_irq = omap_init_irq,
.init_machine = omap_3430sdp_init,
.timer = &omap_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-3630sdp.c b/arch/arm/mach-omap2/board-3630sdp.c
index 62645640f5e4..8d1c4358ecf9 100644
--- a/arch/arm/mach-omap2/board-3630sdp.c
+++ b/arch/arm/mach-omap2/board-3630sdp.c
@@ -11,6 +11,7 @@
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/gpio.h>
+#include <linux/mtd/nand.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -69,14 +70,13 @@ static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
static struct omap_board_config_kernel sdp_config[] __initdata = {
};
-static void __init omap_sdp_init_irq(void)
+static void __init omap_sdp_init_early(void)
{
omap_board_config = sdp_config;
omap_board_config_size = ARRAY_SIZE(sdp_config);
omap2_init_common_infrastructure();
omap2_init_common_devices(h8mbx00u0mer0em_sdrc_params,
h8mbx00u0mer0em_sdrc_params);
- omap_init_irq();
}
#ifdef CONFIG_OMAP_MUX
@@ -209,16 +209,17 @@ static void __init omap_sdp_init(void)
zoom_peripherals_init();
zoom_display_init();
board_smc91x_init();
- board_flash_init(sdp_flash_partitions, chip_sel_sdp);
+ board_flash_init(sdp_flash_partitions, chip_sel_sdp, NAND_BUSWIDTH_16);
enable_board_wakeup_source();
usb_ehci_init(&ehci_pdata);
}
MACHINE_START(OMAP_3630SDP, "OMAP 3630SDP board")
.boot_params = 0x80000100,
- .map_io = omap3_map_io,
.reserve = omap_reserve,
- .init_irq = omap_sdp_init_irq,
+ .map_io = omap3_map_io,
+ .init_early = omap_sdp_init_early,
+ .init_irq = omap_init_irq,
.init_machine = omap_sdp_init,
.timer = &omap_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index 07d1b20b1148..40eff72e55e0 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -35,6 +35,7 @@
#include <plat/common.h>
#include <plat/usb.h>
#include <plat/mmc.h>
+#include <plat/omap4-keypad.h>
#include "mux.h"
#include "hsmmc.h"
@@ -44,10 +45,93 @@
#define ETH_KS8851_IRQ 34
#define ETH_KS8851_POWER_ON 48
#define ETH_KS8851_QUART 138
-#define OMAP4SDP_MDM_PWR_EN_GPIO 157
#define OMAP4_SFH7741_SENSOR_OUTPUT_GPIO 184
#define OMAP4_SFH7741_ENABLE_GPIO 188
+static const int sdp4430_keymap[] = {
+ KEY(0, 0, KEY_E),
+ KEY(0, 1, KEY_R),
+ KEY(0, 2, KEY_T),
+ KEY(0, 3, KEY_HOME),
+ KEY(0, 4, KEY_F5),
+ KEY(0, 5, KEY_UNKNOWN),
+ KEY(0, 6, KEY_I),
+ KEY(0, 7, KEY_LEFTSHIFT),
+
+ KEY(1, 0, KEY_D),
+ KEY(1, 1, KEY_F),
+ KEY(1, 2, KEY_G),
+ KEY(1, 3, KEY_SEND),
+ KEY(1, 4, KEY_F6),
+ KEY(1, 5, KEY_UNKNOWN),
+ KEY(1, 6, KEY_K),
+ KEY(1, 7, KEY_ENTER),
+
+ KEY(2, 0, KEY_X),
+ KEY(2, 1, KEY_C),
+ KEY(2, 2, KEY_V),
+ KEY(2, 3, KEY_END),
+ KEY(2, 4, KEY_F7),
+ KEY(2, 5, KEY_UNKNOWN),
+ KEY(2, 6, KEY_DOT),
+ KEY(2, 7, KEY_CAPSLOCK),
+
+ KEY(3, 0, KEY_Z),
+ KEY(3, 1, KEY_KPPLUS),
+ KEY(3, 2, KEY_B),
+ KEY(3, 3, KEY_F1),
+ KEY(3, 4, KEY_F8),
+ KEY(3, 5, KEY_UNKNOWN),
+ KEY(3, 6, KEY_O),
+ KEY(3, 7, KEY_SPACE),
+
+ KEY(4, 0, KEY_W),
+ KEY(4, 1, KEY_Y),
+ KEY(4, 2, KEY_U),
+ KEY(4, 3, KEY_F2),
+ KEY(4, 4, KEY_VOLUMEUP),
+ KEY(4, 5, KEY_UNKNOWN),
+ KEY(4, 6, KEY_L),
+ KEY(4, 7, KEY_LEFT),
+
+ KEY(5, 0, KEY_S),
+ KEY(5, 1, KEY_H),
+ KEY(5, 2, KEY_J),
+ KEY(5, 3, KEY_F3),
+ KEY(5, 4, KEY_F9),
+ KEY(5, 5, KEY_VOLUMEDOWN),
+ KEY(5, 6, KEY_M),
+ KEY(5, 7, KEY_RIGHT),
+
+ KEY(6, 0, KEY_Q),
+ KEY(6, 1, KEY_A),
+ KEY(6, 2, KEY_N),
+ KEY(6, 3, KEY_BACK),
+ KEY(6, 4, KEY_BACKSPACE),
+ KEY(6, 5, KEY_UNKNOWN),
+ KEY(6, 6, KEY_P),
+ KEY(6, 7, KEY_UP),
+
+ KEY(7, 0, KEY_PROG1),
+ KEY(7, 1, KEY_PROG2),
+ KEY(7, 2, KEY_PROG3),
+ KEY(7, 3, KEY_PROG4),
+ KEY(7, 4, KEY_F4),
+ KEY(7, 5, KEY_UNKNOWN),
+ KEY(7, 6, KEY_OK),
+ KEY(7, 7, KEY_DOWN),
+};
+
+static struct matrix_keymap_data sdp4430_keymap_data = {
+ .keymap = sdp4430_keymap,
+ .keymap_size = ARRAY_SIZE(sdp4430_keymap),
+};
+
+static struct omap4_keypad_platform_data sdp4430_keypad_data = {
+ .keymap_data = &sdp4430_keymap_data,
+ .rows = 8,
+ .cols = 8,
+};
static struct gpio_led sdp4430_gpio_leds[] = {
{
.name = "omap4:green:debug0",
@@ -239,7 +323,7 @@ static struct omap_board_config_kernel sdp4430_config[] __initdata = {
{ OMAP_TAG_LCD, &sdp4430_lcd_config },
};
-static void __init omap_4430sdp_init_irq(void)
+static void __init omap_4430sdp_init_early(void)
{
omap_board_config = sdp4430_config;
omap_board_config_size = ARRAY_SIZE(sdp4430_config);
@@ -248,19 +332,8 @@ static void __init omap_4430sdp_init_irq(void)
#ifdef CONFIG_OMAP_32K_TIMER
omap2_gp_clockevent_set_gptimer(1);
#endif
- gic_init_irq();
}
-static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
- .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY,
- .port_mode[1] = EHCI_HCD_OMAP_MODE_UNKNOWN,
- .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN,
- .phy_reset = false,
- .reset_gpio_port[0] = -EINVAL,
- .reset_gpio_port[1] = -EINVAL,
- .reset_gpio_port[2] = -EINVAL,
-};
-
static struct omap_musb_board_data musb_board_data = {
.interface_type = MUSB_INTERFACE_UTMI,
.mode = MUSB_OTG,
@@ -276,11 +349,6 @@ static struct twl4030_usb_data omap4_usbphy_data = {
static struct omap2_hsmmc_info mmc[] = {
{
- .mmc = 1,
- .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
- .gpio_wp = -EINVAL,
- },
- {
.mmc = 2,
.caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
.gpio_cd = -EINVAL,
@@ -288,19 +356,24 @@ static struct omap2_hsmmc_info mmc[] = {
.nonremovable = true,
.ocr_mask = MMC_VDD_29_30,
},
+ {
+ .mmc = 1,
+ .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
+ .gpio_wp = -EINVAL,
+ },
{} /* Terminator */
};
static struct regulator_consumer_supply sdp4430_vaux_supply[] = {
{
.supply = "vmmc",
- .dev_name = "mmci-omap-hs.1",
+ .dev_name = "omap_hsmmc.1",
},
};
static struct regulator_consumer_supply sdp4430_vmmc_supply[] = {
{
.supply = "vmmc",
- .dev_name = "mmci-omap-hs.0",
+ .dev_name = "omap_hsmmc.0",
},
};
@@ -434,7 +507,6 @@ static struct regulator_init_data sdp4430_vana = {
.constraints = {
.min_uV = 2100000,
.max_uV = 2100000,
- .apply_uV = true,
.valid_modes_mask = REGULATOR_MODE_NORMAL
| REGULATOR_MODE_STANDBY,
.valid_ops_mask = REGULATOR_CHANGE_MODE
@@ -446,7 +518,6 @@ static struct regulator_init_data sdp4430_vcxio = {
.constraints = {
.min_uV = 1800000,
.max_uV = 1800000,
- .apply_uV = true,
.valid_modes_mask = REGULATOR_MODE_NORMAL
| REGULATOR_MODE_STANDBY,
.valid_ops_mask = REGULATOR_CHANGE_MODE
@@ -458,7 +529,6 @@ static struct regulator_init_data sdp4430_vdac = {
.constraints = {
.min_uV = 1800000,
.max_uV = 1800000,
- .apply_uV = true,
.valid_modes_mask = REGULATOR_MODE_NORMAL
| REGULATOR_MODE_STANDBY,
.valid_ops_mask = REGULATOR_CHANGE_MODE
@@ -478,6 +548,12 @@ static struct regulator_init_data sdp4430_vusb = {
},
};
+static struct regulator_init_data sdp4430_clk32kg = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+};
+
static struct twl4030_platform_data sdp4430_twldata = {
.irq_base = TWL6030_IRQ_BASE,
.irq_end = TWL6030_IRQ_END,
@@ -493,6 +569,7 @@ static struct twl4030_platform_data sdp4430_twldata = {
.vaux1 = &sdp4430_vaux1,
.vaux2 = &sdp4430_vaux2,
.vaux3 = &sdp4430_vaux3,
+ .clk32kg = &sdp4430_clk32kg,
.usb = &omap4_usbphy_data
};
@@ -576,14 +653,6 @@ static void __init omap_4430sdp_init(void)
omap_serial_init();
omap4_twl6030_hsmmc_init(mmc);
- /* Power on the ULPI PHY */
- status = gpio_request(OMAP4SDP_MDM_PWR_EN_GPIO, "USBB1 PHY VMDM_3V3");
- if (status)
- pr_err("%s: Could not get USBB1 PHY GPIO\n", __func__);
- else
- gpio_direction_output(OMAP4SDP_MDM_PWR_EN_GPIO, 1);
-
- usb_ehci_init(&ehci_pdata);
usb_musb_init(&musb_board_data);
status = omap_ethernet_init();
@@ -594,6 +663,10 @@ static void __init omap_4430sdp_init(void)
spi_register_board_info(sdp4430_spi_board_info,
ARRAY_SIZE(sdp4430_spi_board_info));
}
+
+ status = omap4_keyboard_init(&sdp4430_keypad_data);
+ if (status)
+ pr_err("Keypad initialization failed: %d\n", status);
}
static void __init omap_4430sdp_map_io(void)
@@ -605,9 +678,10 @@ static void __init omap_4430sdp_map_io(void)
MACHINE_START(OMAP_4430SDP, "OMAP4430 4430SDP board")
/* Maintainer: Santosh Shilimkar - Texas Instruments Inc */
.boot_params = 0x80000100,
- .map_io = omap_4430sdp_map_io,
.reserve = omap_reserve,
- .init_irq = omap_4430sdp_init_irq,
+ .map_io = omap_4430sdp_map_io,
+ .init_early = omap_4430sdp_init_early,
+ .init_irq = gic_init_irq,
.init_machine = omap_4430sdp_init,
.timer = &omap_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-am3517crane.c b/arch/arm/mach-omap2/board-am3517crane.c
index 71acb5ab281c..ae3a83d47dab 100644
--- a/arch/arm/mach-omap2/board-am3517crane.c
+++ b/arch/arm/mach-omap2/board-am3517crane.c
@@ -49,14 +49,13 @@ static struct omap_board_mux board_mux[] __initdata = {
#define board_mux NULL
#endif
-static void __init am3517_crane_init_irq(void)
+static void __init am3517_crane_init_early(void)
{
omap_board_config = am3517_crane_config;
omap_board_config_size = ARRAY_SIZE(am3517_crane_config);
omap2_init_common_infrastructure();
omap2_init_common_devices(NULL, NULL);
- omap_init_irq();
}
static struct ehci_hcd_omap_platform_data ehci_pdata __initdata = {
@@ -108,9 +107,10 @@ static void __init am3517_crane_init(void)
MACHINE_START(CRANEBOARD, "AM3517/05 CRANEBOARD")
.boot_params = 0x80000100,
- .map_io = omap3_map_io,
.reserve = omap_reserve,
- .init_irq = am3517_crane_init_irq,
+ .map_io = omap3_map_io,
+ .init_early = am3517_crane_init_early,
+ .init_irq = omap_init_irq,
.init_machine = am3517_crane_init,
.timer = &omap_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c
index 10d60b7743cf..634fe65b33c8 100644
--- a/arch/arm/mach-omap2/board-am3517evm.c
+++ b/arch/arm/mach-omap2/board-am3517evm.c
@@ -378,37 +378,28 @@ static struct omap_dss_board_info am3517_evm_dss_data = {
.default_device = &am3517_evm_lcd_device,
};
-static struct platform_device am3517_evm_dss_device = {
- .name = "omapdss",
- .id = -1,
- .dev = {
- .platform_data = &am3517_evm_dss_data,
- },
-};
-
/*
* Board initialization
*/
static struct omap_board_config_kernel am3517_evm_config[] __initdata = {
};
-static struct platform_device *am3517_evm_devices[] __initdata = {
- &am3517_evm_dss_device,
-};
-
-static void __init am3517_evm_init_irq(void)
+static void __init am3517_evm_init_early(void)
{
omap_board_config = am3517_evm_config;
omap_board_config_size = ARRAY_SIZE(am3517_evm_config);
omap2_init_common_infrastructure();
omap2_init_common_devices(NULL, NULL);
- omap_init_irq();
}
static struct omap_musb_board_data musb_board_data = {
.interface_type = MUSB_INTERFACE_ULPI,
.mode = MUSB_OTG,
.power = 500,
+ .set_phy_power = am35x_musb_phy_power,
+ .clear_irq = am35x_musb_clear_irq,
+ .set_mode = am35x_musb_set_mode,
+ .reset = am35x_musb_reset,
};
static __init void am3517_evm_musb_init(void)
@@ -495,9 +486,7 @@ static void __init am3517_evm_init(void)
omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
am3517_evm_i2c_init();
- platform_add_devices(am3517_evm_devices,
- ARRAY_SIZE(am3517_evm_devices));
-
+ omap_display_init(&am3517_evm_dss_data);
omap_serial_init();
/* Configure GPIO for EHCI port */
@@ -521,9 +510,10 @@ static void __init am3517_evm_init(void)
MACHINE_START(OMAP3517EVM, "OMAP3517/AM3517 EVM")
.boot_params = 0x80000100,
- .map_io = omap3_map_io,
.reserve = omap_reserve,
- .init_irq = am3517_evm_init_irq,
+ .map_io = omap3_map_io,
+ .init_early = am3517_evm_init_early,
+ .init_irq = omap_init_irq,
.init_machine = am3517_evm_init,
.timer = &omap_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-apollon.c b/arch/arm/mach-omap2/board-apollon.c
index 9f55b68687f7..4ef4aad4e719 100644
--- a/arch/arm/mach-omap2/board-apollon.c
+++ b/arch/arm/mach-omap2/board-apollon.c
@@ -274,13 +274,12 @@ static struct omap_board_config_kernel apollon_config[] __initdata = {
{ OMAP_TAG_LCD, &apollon_lcd_config },
};
-static void __init omap_apollon_init_irq(void)
+static void __init omap_apollon_init_early(void)
{
omap_board_config = apollon_config;
omap_board_config_size = ARRAY_SIZE(apollon_config);
omap2_init_common_infrastructure();
omap2_init_common_devices(NULL, NULL);
- omap_init_irq();
}
static void __init apollon_led_init(void)
@@ -355,9 +354,10 @@ static void __init omap_apollon_map_io(void)
MACHINE_START(OMAP_APOLLON, "OMAP24xx Apollon")
/* Maintainer: Kyungmin Park <kyungmin.park@samsung.com> */
.boot_params = 0x80000100,
- .map_io = omap_apollon_map_io,
.reserve = omap_reserve,
- .init_irq = omap_apollon_init_irq,
+ .map_io = omap_apollon_map_io,
+ .init_early = omap_apollon_init_early,
+ .init_irq = omap_init_irq,
.init_machine = omap_apollon_init,
.timer = &omap_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
index dac141610666..862fe5f55656 100644
--- a/arch/arm/mach-omap2/board-cm-t35.c
+++ b/arch/arm/mach-omap2/board-cm-t35.c
@@ -401,14 +401,6 @@ static struct omap_dss_board_info cm_t35_dss_data = {
.default_device = &cm_t35_dvi_device,
};
-static struct platform_device cm_t35_dss_device = {
- .name = "omapdss",
- .id = -1,
- .dev = {
- .platform_data = &cm_t35_dss_data,
- },
-};
-
static struct omap2_mcspi_device_config tdo24m_mcspi_config = {
.turbo_mode = 0,
.single_channel = 1, /* 0: slave, 1: master */
@@ -468,7 +460,7 @@ static void __init cm_t35_init_display(void)
msleep(50);
gpio_set_value(lcd_en_gpio, 1);
- err = platform_device_register(&cm_t35_dss_device);
+ err = omap_display_init(&cm_t35_dss_data);
if (err) {
pr_err("CM-T35: failed to register DSS device\n");
goto err_dev_reg;
@@ -495,15 +487,11 @@ static struct regulator_consumer_supply cm_t35_vsim_supply = {
.supply = "vmmc_aux",
};
-static struct regulator_consumer_supply cm_t35_vdac_supply = {
- .supply = "vdda_dac",
- .dev = &cm_t35_dss_device.dev,
-};
+static struct regulator_consumer_supply cm_t35_vdac_supply =
+ REGULATOR_SUPPLY("vdda_dac", "omap_venc");
-static struct regulator_consumer_supply cm_t35_vdvi_supply = {
- .supply = "vdvi",
- .dev = &cm_t35_dss_device.dev,
-};
+static struct regulator_consumer_supply cm_t35_vdvi_supply =
+ REGULATOR_SUPPLY("vdvi", "omap_display");
/* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
static struct regulator_init_data cm_t35_vmmc1 = {
@@ -683,7 +671,7 @@ static void __init cm_t35_init_i2c(void)
static struct omap_board_config_kernel cm_t35_config[] __initdata = {
};
-static void __init cm_t35_init_irq(void)
+static void __init cm_t35_init_early(void)
{
omap_board_config = cm_t35_config;
omap_board_config_size = ARRAY_SIZE(cm_t35_config);
@@ -691,7 +679,6 @@ static void __init cm_t35_init_irq(void)
omap2_init_common_infrastructure();
omap2_init_common_devices(mt46h32m32lf6_sdrc_params,
mt46h32m32lf6_sdrc_params);
- omap_init_irq();
}
static struct omap_board_mux board_mux[] __initdata = {
@@ -815,9 +802,10 @@ static void __init cm_t35_init(void)
MACHINE_START(CM_T35, "Compulab CM-T35")
.boot_params = 0x80000100,
- .map_io = omap3_map_io,
.reserve = omap_reserve,
- .init_irq = cm_t35_init_irq,
+ .map_io = omap3_map_io,
+ .init_early = cm_t35_init_early,
+ .init_irq = omap_init_irq,
.init_machine = cm_t35_init,
.timer = &omap_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-cm-t3517.c b/arch/arm/mach-omap2/board-cm-t3517.c
index 8f9a64d650ee..38bef6d004c9 100644
--- a/arch/arm/mach-omap2/board-cm-t3517.c
+++ b/arch/arm/mach-omap2/board-cm-t3517.c
@@ -254,14 +254,13 @@ static inline void cm_t3517_init_nand(void) {}
static struct omap_board_config_kernel cm_t3517_config[] __initdata = {
};
-static void __init cm_t3517_init_irq(void)
+static void __init cm_t3517_init_early(void)
{
omap_board_config = cm_t3517_config;
omap_board_config_size = ARRAY_SIZE(cm_t3517_config);
omap2_init_common_infrastructure();
omap2_init_common_devices(NULL, NULL);
- omap_init_irq();
}
static struct omap_board_mux board_mux[] __initdata = {
@@ -303,9 +302,10 @@ static void __init cm_t3517_init(void)
MACHINE_START(CM_T3517, "Compulab CM-T3517")
.boot_params = 0x80000100,
- .map_io = omap3_map_io,
.reserve = omap_reserve,
- .init_irq = cm_t3517_init_irq,
+ .map_io = omap3_map_io,
+ .init_early = cm_t3517_init_early,
+ .init_irq = omap_init_irq,
.init_machine = cm_t3517_init,
.timer = &omap_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index 9a2a31e011ce..f1af34ff9796 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -140,7 +140,7 @@ static void devkit8000_panel_disable_dvi(struct omap_dss_device *dssdev)
}
static struct regulator_consumer_supply devkit8000_vmmc1_supply =
- REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.0");
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0");
/* ads7846 on SPI */
@@ -195,16 +195,8 @@ static struct omap_dss_board_info devkit8000_dss_data = {
.default_device = &devkit8000_lcd_device,
};
-static struct platform_device devkit8000_dss_device = {
- .name = "omapdss",
- .id = -1,
- .dev = {
- .platform_data = &devkit8000_dss_data,
- },
-};
-
static struct regulator_consumer_supply devkit8000_vdda_dac_supply =
- REGULATOR_SUPPLY("vdda_dac", "omapdss");
+ REGULATOR_SUPPLY("vdda_dac", "omap_venc");
static uint32_t board_keymap[] = {
KEY(0, 0, KEY_1),
@@ -285,8 +277,10 @@ static struct twl4030_gpio_platform_data devkit8000_gpio_data = {
.setup = devkit8000_twl_gpio_setup,
};
-static struct regulator_consumer_supply devkit8000_vpll1_supply =
- REGULATOR_SUPPLY("vdds_dsi", "omapdss");
+static struct regulator_consumer_supply devkit8000_vpll1_supplies[] = {
+ REGULATOR_SUPPLY("vdds_dsi", "omap_display"),
+ REGULATOR_SUPPLY("vdds_dsi", "omap_dsi1"),
+};
/* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
static struct regulator_init_data devkit8000_vmmc1 = {
@@ -327,8 +321,8 @@ static struct regulator_init_data devkit8000_vpll1 = {
.valid_ops_mask = REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &devkit8000_vpll1_supply,
+ .num_consumer_supplies = ARRAY_SIZE(devkit8000_vpll1_supplies),
+ .consumer_supplies = devkit8000_vpll1_supplies,
};
/* VAUX4 for ads7846 and nubs */
@@ -456,11 +450,15 @@ static struct platform_device keys_gpio = {
};
-static void __init devkit8000_init_irq(void)
+static void __init devkit8000_init_early(void)
{
omap2_init_common_infrastructure();
omap2_init_common_devices(mt46h32m32lf6_sdrc_params,
mt46h32m32lf6_sdrc_params);
+}
+
+static void __init devkit8000_init_irq(void)
+{
omap_init_irq();
#ifdef CONFIG_OMAP_32K_TIMER
omap2_gp_clockevent_set_gptimer(12);
@@ -575,7 +573,6 @@ static void __init omap_dm9000_init(void)
}
static struct platform_device *devkit8000_devices[] __initdata = {
- &devkit8000_dss_device,
&leds_gpio,
&keys_gpio,
&omap_dm9000_dev,
@@ -797,6 +794,7 @@ static void __init devkit8000_init(void)
platform_add_devices(devkit8000_devices,
ARRAY_SIZE(devkit8000_devices));
+ omap_display_init(&devkit8000_dss_data);
spi_register_board_info(devkit8000_spi_board_info,
ARRAY_SIZE(devkit8000_spi_board_info));
@@ -813,8 +811,9 @@ static void __init devkit8000_init(void)
MACHINE_START(DEVKIT8000, "OMAP3 Devkit8000")
.boot_params = 0x80000100,
- .map_io = omap3_map_io,
.reserve = omap_reserve,
+ .map_io = omap3_map_io,
+ .init_early = devkit8000_init_early,
.init_irq = devkit8000_init_irq,
.init_machine = devkit8000_init,
.timer = &omap_timer,
diff --git a/arch/arm/mach-omap2/board-flash.c b/arch/arm/mach-omap2/board-flash.c
index fd38c05bb47f..c32c06828f08 100644
--- a/arch/arm/mach-omap2/board-flash.c
+++ b/arch/arm/mach-omap2/board-flash.c
@@ -1,5 +1,5 @@
/*
- * board-sdp-flash.c
+ * board-flash.c
* Modified from mach-omap2/board-3430sdp-flash.c
*
* Copyright (C) 2009 Nokia Corporation
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/mtd/physmap.h>
#include <linux/io.h>
+#include <plat/irqs.h>
#include <plat/gpmc.h>
#include <plat/nand.h>
@@ -73,11 +74,11 @@ __init board_nor_init(struct mtd_partition *nor_parts, u8 nr_parts, u8 cs)
+ FLASH_SIZE_SDPV1 - 1;
}
if (err < 0) {
- printk(KERN_ERR "NOR: Can't request GPMC CS\n");
+ pr_err("NOR: Can't request GPMC CS\n");
return;
}
if (platform_device_register(&board_nor_device) < 0)
- printk(KERN_ERR "Unable to register NOR device\n");
+ pr_err("Unable to register NOR device\n");
}
#if defined(CONFIG_MTD_ONENAND_OMAP2) || \
@@ -139,12 +140,16 @@ static struct omap_nand_platform_data board_nand_data = {
};
void
-__init board_nand_init(struct mtd_partition *nand_parts, u8 nr_parts, u8 cs)
+__init board_nand_init(struct mtd_partition *nand_parts,
+ u8 nr_parts, u8 cs, int nand_type)
{
board_nand_data.cs = cs;
board_nand_data.parts = nand_parts;
- board_nand_data.nr_parts = nr_parts;
+ board_nand_data.nr_parts = nr_parts;
+ board_nand_data.devsize = nand_type;
+ board_nand_data.ecc_opt = OMAP_ECC_HAMMING_CODE_DEFAULT;
+ board_nand_data.gpmc_irq = OMAP_GPMC_IRQ_BASE + cs;
gpmc_nand_init(&board_nand_data);
}
#else
@@ -189,12 +194,12 @@ unmap:
}
/**
- * sdp3430_flash_init - Identify devices connected to GPMC and register.
+ * board_flash_init - Identify devices connected to GPMC and register.
*
* @return - void.
*/
void board_flash_init(struct flash_partitions partition_info[],
- char chip_sel_board[][GPMC_CS_NUM])
+ char chip_sel_board[][GPMC_CS_NUM], int nand_type)
{
u8 cs = 0;
u8 norcs = GPMC_CS_NUM + 1;
@@ -208,7 +213,7 @@ void board_flash_init(struct flash_partitions partition_info[],
*/
idx = get_gpmc0_type();
if (idx >= MAX_SUPPORTED_GPMC_CONFIG) {
- printk(KERN_ERR "%s: Invalid chip select: %d\n", __func__, cs);
+ pr_err("%s: Invalid chip select: %d\n", __func__, cs);
return;
}
config_sel = (unsigned char *)(chip_sel_board[idx]);
@@ -232,23 +237,20 @@ void board_flash_init(struct flash_partitions partition_info[],
}
if (norcs > GPMC_CS_NUM)
- printk(KERN_INFO "NOR: Unable to find configuration "
- "in GPMC\n");
+ pr_err("NOR: Unable to find configuration in GPMC\n");
else
board_nor_init(partition_info[0].parts,
partition_info[0].nr_parts, norcs);
if (onenandcs > GPMC_CS_NUM)
- printk(KERN_INFO "OneNAND: Unable to find configuration "
- "in GPMC\n");
+ pr_err("OneNAND: Unable to find configuration in GPMC\n");
else
board_onenand_init(partition_info[1].parts,
partition_info[1].nr_parts, onenandcs);
if (nandcs > GPMC_CS_NUM)
- printk(KERN_INFO "NAND: Unable to find configuration "
- "in GPMC\n");
+ pr_err("NAND: Unable to find configuration in GPMC\n");
else
board_nand_init(partition_info[2].parts,
- partition_info[2].nr_parts, nandcs);
+ partition_info[2].nr_parts, nandcs, nand_type);
}
diff --git a/arch/arm/mach-omap2/board-flash.h b/arch/arm/mach-omap2/board-flash.h
index 69befe00dd2f..c240a3f8d163 100644
--- a/arch/arm/mach-omap2/board-flash.h
+++ b/arch/arm/mach-omap2/board-flash.h
@@ -25,6 +25,6 @@ struct flash_partitions {
};
extern void board_flash_init(struct flash_partitions [],
- char chip_sel[][GPMC_CS_NUM]);
+ char chip_sel[][GPMC_CS_NUM], int nand_type);
extern void board_nand_init(struct mtd_partition *nand_parts,
- u8 nr_parts, u8 cs);
+ u8 nr_parts, u8 cs, int nand_type);
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 0e3d81e09f89..682da9251db6 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -33,13 +33,12 @@
static struct omap_board_config_kernel generic_config[] = {
};
-static void __init omap_generic_init_irq(void)
+static void __init omap_generic_init_early(void)
{
omap_board_config = generic_config;
omap_board_config_size = ARRAY_SIZE(generic_config);
omap2_init_common_infrastructure();
omap2_init_common_devices(NULL, NULL);
- omap_init_irq();
}
static void __init omap_generic_init(void)
@@ -68,9 +67,10 @@ static void __init omap_generic_map_io(void)
MACHINE_START(OMAP_GENERIC, "Generic OMAP24xx")
/* Maintainer: Paul Mundt <paul.mundt@nokia.com> */
.boot_params = 0x80000100,
- .map_io = omap_generic_map_io,
.reserve = omap_reserve,
- .init_irq = omap_generic_init_irq,
+ .map_io = omap_generic_map_io,
+ .init_early = omap_generic_init_early,
+ .init_irq = omap_init_irq,
.init_machine = omap_generic_init,
.timer = &omap_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c
index 25cc9dad4b02..f6a3872f72fa 100644
--- a/arch/arm/mach-omap2/board-h4.c
+++ b/arch/arm/mach-omap2/board-h4.c
@@ -290,12 +290,16 @@ static struct omap_board_config_kernel h4_config[] __initdata = {
{ OMAP_TAG_LCD, &h4_lcd_config },
};
-static void __init omap_h4_init_irq(void)
+static void __init omap_h4_init_early(void)
{
omap_board_config = h4_config;
omap_board_config_size = ARRAY_SIZE(h4_config);
omap2_init_common_infrastructure();
omap2_init_common_devices(NULL, NULL);
+}
+
+static void __init omap_h4_init_irq(void)
+{
omap_init_irq();
h4_init_flash();
}
@@ -378,8 +382,9 @@ static void __init omap_h4_map_io(void)
MACHINE_START(OMAP_H4, "OMAP2420 H4 board")
/* Maintainer: Paul Mundt <paul.mundt@nokia.com> */
.boot_params = 0x80000100,
- .map_io = omap_h4_map_io,
.reserve = omap_reserve,
+ .map_io = omap_h4_map_io,
+ .init_early = omap_h4_init_early,
.init_irq = omap_h4_init_irq,
.init_machine = omap_h4_init,
.timer = &omap_timer,
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c
index 3be85a1f55f4..155e95ca5550 100644
--- a/arch/arm/mach-omap2/board-igep0020.c
+++ b/arch/arm/mach-omap2/board-igep0020.c
@@ -250,7 +250,7 @@ static inline void __init igep2_init_smsc911x(void) { }
#endif
static struct regulator_consumer_supply igep2_vmmc1_supply =
- REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.0");
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0");
/* VMMC1 for OMAP VDD_MMC1 (i/o) and MMC1 card */
static struct regulator_init_data igep2_vmmc1 = {
@@ -268,7 +268,7 @@ static struct regulator_init_data igep2_vmmc1 = {
};
static struct regulator_consumer_supply igep2_vio_supply =
- REGULATOR_SUPPLY("vmmc_aux", "mmci-omap-hs.1");
+ REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.1");
static struct regulator_init_data igep2_vio = {
.constraints = {
@@ -286,7 +286,7 @@ static struct regulator_init_data igep2_vio = {
};
static struct regulator_consumer_supply igep2_vmmc2_supply =
- REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.1");
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1");
static struct regulator_init_data igep2_vmmc2 = {
.constraints = {
@@ -485,17 +485,9 @@ static struct omap_dss_board_info igep2_dss_data = {
.default_device = &igep2_dvi_device,
};
-static struct platform_device igep2_dss_device = {
- .name = "omapdss",
- .id = -1,
- .dev = {
- .platform_data = &igep2_dss_data,
- },
-};
-
-static struct regulator_consumer_supply igep2_vpll2_supply = {
- .supply = "vdds_dsi",
- .dev = &igep2_dss_device.dev,
+static struct regulator_consumer_supply igep2_vpll2_supplies[] = {
+ REGULATOR_SUPPLY("vdds_dsi", "omap_display"),
+ REGULATOR_SUPPLY("vdds_dsi", "omap_dsi1"),
};
static struct regulator_init_data igep2_vpll2 = {
@@ -509,8 +501,8 @@ static struct regulator_init_data igep2_vpll2 = {
.valid_ops_mask = REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &igep2_vpll2_supply,
+ .num_consumer_supplies = ARRAY_SIZE(igep2_vpll2_supplies),
+ .consumer_supplies = igep2_vpll2_supplies,
};
static void __init igep2_display_init(void)
@@ -521,16 +513,14 @@ static void __init igep2_display_init(void)
}
static struct platform_device *igep2_devices[] __initdata = {
- &igep2_dss_device,
&igep2_vwlan_device,
};
-static void __init igep2_init_irq(void)
+static void __init igep2_init_early(void)
{
omap2_init_common_infrastructure();
omap2_init_common_devices(m65kxxxxam_sdrc_params,
m65kxxxxam_sdrc_params);
- omap_init_irq();
}
static struct twl4030_codec_audio_data igep2_audio_data = {
@@ -697,6 +687,7 @@ static void __init igep2_init(void)
/* Register I2C busses and drivers */
igep2_i2c_init();
platform_add_devices(igep2_devices, ARRAY_SIZE(igep2_devices));
+ omap_display_init(&igep2_dss_data);
omap_serial_init();
usb_musb_init(&musb_board_data);
usb_ehci_init(&ehci_pdata);
@@ -716,9 +707,10 @@ static void __init igep2_init(void)
MACHINE_START(IGEP0020, "IGEP v2 board")
.boot_params = 0x80000100,
- .map_io = omap3_map_io,
.reserve = omap_reserve,
- .init_irq = igep2_init_irq,
+ .map_io = omap3_map_io,
+ .init_early = igep2_init_early,
+ .init_irq = omap_init_irq,
.init_machine = igep2_init,
.timer = &omap_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-igep0030.c b/arch/arm/mach-omap2/board-igep0030.c
index 4dc62a9b9cb2..4273d0672ef6 100644
--- a/arch/arm/mach-omap2/board-igep0030.c
+++ b/arch/arm/mach-omap2/board-igep0030.c
@@ -142,7 +142,7 @@ static void __init igep3_flash_init(void) {}
#endif
static struct regulator_consumer_supply igep3_vmmc1_supply =
- REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.0");
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0");
/* VMMC1 for OMAP VDD_MMC1 (i/o) and MMC1 card */
static struct regulator_init_data igep3_vmmc1 = {
@@ -160,7 +160,7 @@ static struct regulator_init_data igep3_vmmc1 = {
};
static struct regulator_consumer_supply igep3_vio_supply =
- REGULATOR_SUPPLY("vmmc_aux", "mmci-omap-hs.1");
+ REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.1");
static struct regulator_init_data igep3_vio = {
.constraints = {
@@ -178,7 +178,7 @@ static struct regulator_init_data igep3_vio = {
};
static struct regulator_consumer_supply igep3_vmmc2_supply =
- REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.1");
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1");
static struct regulator_init_data igep3_vmmc2 = {
.constraints = {
@@ -331,12 +331,11 @@ static struct platform_device *igep3_devices[] __initdata = {
&igep3_vwlan_device,
};
-static void __init igep3_init_irq(void)
+static void __init igep3_init_early(void)
{
omap2_init_common_infrastructure();
omap2_init_common_devices(m65kxxxxam_sdrc_params,
m65kxxxxam_sdrc_params);
- omap_init_irq();
}
static struct twl4030_platform_data igep3_twl4030_pdata = {
@@ -452,7 +451,8 @@ MACHINE_START(IGEP0030, "IGEP OMAP3 module")
.boot_params = 0x80000100,
.reserve = omap_reserve,
.map_io = omap3_map_io,
- .init_irq = igep3_init_irq,
+ .init_early = igep3_init_early,
+ .init_irq = omap_init_irq,
.init_machine = igep3_init,
.timer = &omap_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index e5dc74875f9d..a3fae5697a72 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -288,13 +288,12 @@ static struct omap_board_config_kernel ldp_config[] __initdata = {
{ OMAP_TAG_LCD, &ldp_lcd_config },
};
-static void __init omap_ldp_init_irq(void)
+static void __init omap_ldp_init_early(void)
{
omap_board_config = ldp_config;
omap_board_config_size = ARRAY_SIZE(ldp_config);
omap2_init_common_infrastructure();
omap2_init_common_devices(NULL, NULL);
- omap_init_irq();
}
static struct twl4030_usb_data ldp_usb_data = {
@@ -434,7 +433,7 @@ static void __init omap_ldp_init(void)
omap_serial_init();
usb_musb_init(&musb_board_data);
board_nand_init(ldp_nand_partitions,
- ARRAY_SIZE(ldp_nand_partitions), ZOOM_NAND_CS);
+ ARRAY_SIZE(ldp_nand_partitions), ZOOM_NAND_CS, 0);
omap2_hsmmc_init(mmc);
/* link regulators to MMC adapters */
@@ -443,9 +442,10 @@ static void __init omap_ldp_init(void)
MACHINE_START(OMAP_LDP, "OMAP LDP board")
.boot_params = 0x80000100,
- .map_io = omap3_map_io,
.reserve = omap_reserve,
- .init_irq = omap_ldp_init_irq,
+ .map_io = omap3_map_io,
+ .init_early = omap_ldp_init_early,
+ .init_irq = omap_init_irq,
.init_machine = omap_ldp_init,
.timer = &omap_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index f396756872b7..e710cd9e079b 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -536,7 +536,7 @@ static void __init n8x0_mmc_init(void)
}
mmc_data[0] = &mmc1_data;
- omap2_init_mmc(mmc_data, OMAP24XX_NR_MMC);
+ omap242x_init_mmc(mmc_data);
}
#else
@@ -628,11 +628,10 @@ static void __init n8x0_map_io(void)
omap242x_map_common_io();
}
-static void __init n8x0_init_irq(void)
+static void __init n8x0_init_early(void)
{
omap2_init_common_infrastructure();
omap2_init_common_devices(NULL, NULL);
- omap_init_irq();
}
#ifdef CONFIG_OMAP_MUX
@@ -703,27 +702,30 @@ static void __init n8x0_init_machine(void)
MACHINE_START(NOKIA_N800, "Nokia N800")
.boot_params = 0x80000100,
- .map_io = n8x0_map_io,
.reserve = omap_reserve,
- .init_irq = n8x0_init_irq,
+ .map_io = n8x0_map_io,
+ .init_early = n8x0_init_early,
+ .init_irq = omap_init_irq,
.init_machine = n8x0_init_machine,
.timer = &omap_timer,
MACHINE_END
MACHINE_START(NOKIA_N810, "Nokia N810")
.boot_params = 0x80000100,
- .map_io = n8x0_map_io,
.reserve = omap_reserve,
- .init_irq = n8x0_init_irq,
+ .map_io = n8x0_map_io,
+ .init_early = n8x0_init_early,
+ .init_irq = omap_init_irq,
.init_machine = n8x0_init_machine,
.timer = &omap_timer,
MACHINE_END
MACHINE_START(NOKIA_N810_WIMAX, "Nokia N810 WiMAX")
.boot_params = 0x80000100,
- .map_io = n8x0_map_io,
.reserve = omap_reserve,
- .init_irq = n8x0_init_irq,
+ .map_io = n8x0_map_io,
+ .init_early = n8x0_init_early,
+ .init_irq = omap_init_irq,
.init_machine = n8x0_init_machine,
.timer = &omap_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index 46d814ab5656..ee2b763f90de 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -228,19 +228,13 @@ static struct omap_dss_board_info beagle_dss_data = {
.default_device = &beagle_dvi_device,
};
-static struct platform_device beagle_dss_device = {
- .name = "omapdss",
- .id = -1,
- .dev = {
- .platform_data = &beagle_dss_data,
- },
-};
-
static struct regulator_consumer_supply beagle_vdac_supply =
- REGULATOR_SUPPLY("vdda_dac", "omapdss");
+ REGULATOR_SUPPLY("vdda_dac", "omap_venc");
-static struct regulator_consumer_supply beagle_vdvi_supply =
- REGULATOR_SUPPLY("vdds_dsi", "omapdss");
+static struct regulator_consumer_supply beagle_vdvi_supplies[] = {
+ REGULATOR_SUPPLY("vdds_dsi", "omap_display"),
+ REGULATOR_SUPPLY("vdds_dsi", "omap_dsi1"),
+};
static void __init beagle_display_init(void)
{
@@ -427,8 +421,8 @@ static struct regulator_init_data beagle_vpll2 = {
.valid_ops_mask = REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &beagle_vdvi_supply,
+ .num_consumer_supplies = ARRAY_SIZE(beagle_vdvi_supplies),
+ .consumer_supplies = beagle_vdvi_supplies,
};
static struct twl4030_usb_data beagle_usb_data = {
@@ -536,11 +530,15 @@ static struct platform_device keys_gpio = {
},
};
-static void __init omap3_beagle_init_irq(void)
+static void __init omap3_beagle_init_early(void)
{
omap2_init_common_infrastructure();
omap2_init_common_devices(mt46h32m32lf6_sdrc_params,
mt46h32m32lf6_sdrc_params);
+}
+
+static void __init omap3_beagle_init_irq(void)
+{
omap_init_irq();
#ifdef CONFIG_OMAP_32K_TIMER
omap2_gp_clockevent_set_gptimer(12);
@@ -550,7 +548,6 @@ static void __init omap3_beagle_init_irq(void)
static struct platform_device *omap3_beagle_devices[] __initdata = {
&leds_gpio,
&keys_gpio,
- &beagle_dss_device,
};
static void __init omap3beagle_flash_init(void)
@@ -617,6 +614,7 @@ static void __init omap3_beagle_init(void)
omap3_beagle_i2c_init();
platform_add_devices(omap3_beagle_devices,
ARRAY_SIZE(omap3_beagle_devices));
+ omap_display_init(&beagle_dss_data);
omap_serial_init();
omap_mux_init_gpio(170, OMAP_PIN_INPUT);
@@ -638,8 +636,9 @@ static void __init omap3_beagle_init(void)
MACHINE_START(OMAP3_BEAGLE, "OMAP3 Beagle Board")
/* Maintainer: Syed Mohammed Khasim - http://beagleboard.org */
.boot_params = 0x80000100,
- .map_io = omap3_map_io,
.reserve = omap_reserve,
+ .map_io = omap3_map_io,
+ .init_early = omap3_beagle_init_early,
.init_irq = omap3_beagle_init_irq,
.init_machine = omap3_beagle_init,
.timer = &omap_timer,
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index 323c3809ce39..730e29778f9d 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -30,6 +30,8 @@
#include <linux/usb/otg.h>
#include <linux/smsc911x.h>
+#include <linux/wl12xx.h>
+#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
#include <linux/mmc/host.h>
@@ -58,6 +60,13 @@
#define OMAP3EVM_ETHR_ID_REV 0x50
#define OMAP3EVM_ETHR_GPIO_IRQ 176
#define OMAP3EVM_SMSC911X_CS 5
+/*
+ * Eth Reset signal
+ * 64 = Generation 1 (<=RevD)
+ * 7 = Generation 2 (>=RevE)
+ */
+#define OMAP3EVM_GEN1_ETHR_GPIO_RST 64
+#define OMAP3EVM_GEN2_ETHR_GPIO_RST 7
static u8 omap3_evm_version;
@@ -124,10 +133,15 @@ static struct platform_device omap3evm_smsc911x_device = {
static inline void __init omap3evm_init_smsc911x(void)
{
- int eth_cs;
+ int eth_cs, eth_rst;
struct clk *l3ck;
unsigned int rate;
+ if (get_omap3_evm_rev() == OMAP3EVM_BOARD_GEN_1)
+ eth_rst = OMAP3EVM_GEN1_ETHR_GPIO_RST;
+ else
+ eth_rst = OMAP3EVM_GEN2_ETHR_GPIO_RST;
+
eth_cs = OMAP3EVM_SMSC911X_CS;
l3ck = clk_get(NULL, "l3_ck");
@@ -136,6 +150,27 @@ static inline void __init omap3evm_init_smsc911x(void)
else
rate = clk_get_rate(l3ck);
+ /* Configure ethernet controller reset gpio */
+ if (cpu_is_omap3430()) {
+ if (gpio_request(eth_rst, "SMSC911x gpio") < 0) {
+ pr_err(KERN_ERR "Failed to request %d for smsc911x\n",
+ eth_rst);
+ return;
+ }
+
+ if (gpio_direction_output(eth_rst, 1) < 0) {
+ pr_err(KERN_ERR "Failed to set direction of %d for" \
+ " smsc911x\n", eth_rst);
+ return;
+ }
+ /* reset pulse to ethernet controller*/
+ usleep_range(150, 220);
+ gpio_set_value(eth_rst, 0);
+ usleep_range(150, 220);
+ gpio_set_value(eth_rst, 1);
+ usleep_range(1, 2);
+ }
+
if (gpio_request(OMAP3EVM_ETHR_GPIO_IRQ, "SMSC911x irq") < 0) {
printk(KERN_ERR "Failed to request GPIO%d for smsc911x IRQ\n",
OMAP3EVM_ETHR_GPIO_IRQ);
@@ -235,9 +270,9 @@ static int omap3_evm_enable_lcd(struct omap_dss_device *dssdev)
gpio_set_value(OMAP3EVM_LCD_PANEL_ENVDD, 0);
if (get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2)
- gpio_set_value(OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO, 0);
+ gpio_set_value_cansleep(OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO, 0);
else
- gpio_set_value(OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO, 1);
+ gpio_set_value_cansleep(OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO, 1);
lcd_enabled = 1;
return 0;
@@ -248,9 +283,9 @@ static void omap3_evm_disable_lcd(struct omap_dss_device *dssdev)
gpio_set_value(OMAP3EVM_LCD_PANEL_ENVDD, 1);
if (get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2)
- gpio_set_value(OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO, 1);
+ gpio_set_value_cansleep(OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO, 1);
else
- gpio_set_value(OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO, 0);
+ gpio_set_value_cansleep(OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO, 0);
lcd_enabled = 0;
}
@@ -289,7 +324,7 @@ static int omap3_evm_enable_dvi(struct omap_dss_device *dssdev)
return -EINVAL;
}
- gpio_set_value(OMAP3EVM_DVI_PANEL_EN_GPIO, 1);
+ gpio_set_value_cansleep(OMAP3EVM_DVI_PANEL_EN_GPIO, 1);
dvi_enabled = 1;
return 0;
@@ -297,7 +332,7 @@ static int omap3_evm_enable_dvi(struct omap_dss_device *dssdev)
static void omap3_evm_disable_dvi(struct omap_dss_device *dssdev)
{
- gpio_set_value(OMAP3EVM_DVI_PANEL_EN_GPIO, 0);
+ gpio_set_value_cansleep(OMAP3EVM_DVI_PANEL_EN_GPIO, 0);
dvi_enabled = 0;
}
@@ -328,14 +363,6 @@ static struct omap_dss_board_info omap3_evm_dss_data = {
.default_device = &omap3_evm_lcd_device,
};
-static struct platform_device omap3_evm_dss_device = {
- .name = "omapdss",
- .id = -1,
- .dev = {
- .platform_data = &omap3_evm_dss_data,
- },
-};
-
static struct regulator_consumer_supply omap3evm_vmmc1_supply = {
.supply = "vmmc",
};
@@ -381,6 +408,16 @@ static struct omap2_hsmmc_info mmc[] = {
.gpio_cd = -EINVAL,
.gpio_wp = 63,
},
+#ifdef CONFIG_WL12XX_PLATFORM_DATA
+ {
+ .name = "wl1271",
+ .mmc = 2,
+ .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_POWER_OFF_CARD,
+ .gpio_wp = -EINVAL,
+ .gpio_cd = -EINVAL,
+ .nonremovable = true,
+ },
+#endif
{} /* Terminator */
};
@@ -411,6 +448,8 @@ static struct platform_device leds_gpio = {
static int omap3evm_twl_gpio_setup(struct device *dev,
unsigned gpio, unsigned ngpio)
{
+ int r;
+
/* gpio + 0 is "mmc0_cd" (input/IRQ) */
omap_mux_init_gpio(63, OMAP_PIN_INPUT);
mmc[0].gpio_cd = gpio + 0;
@@ -426,8 +465,12 @@ static int omap3evm_twl_gpio_setup(struct device *dev,
*/
/* TWL4030_GPIO_MAX + 0 == ledA, LCD Backlight control */
- gpio_request(gpio + TWL4030_GPIO_MAX, "EN_LCD_BKL");
- gpio_direction_output(gpio + TWL4030_GPIO_MAX, 0);
+ r = gpio_request(gpio + TWL4030_GPIO_MAX, "EN_LCD_BKL");
+ if (!r)
+ r = gpio_direction_output(gpio + TWL4030_GPIO_MAX,
+ (get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2) ? 1 : 0);
+ if (r)
+ printk(KERN_ERR "failed to get/set lcd_bkl gpio\n");
/* gpio + 7 == DVI Enable */
gpio_request(gpio + 7, "EN_DVI");
@@ -500,10 +543,8 @@ static struct twl4030_codec_data omap3evm_codec_data = {
.audio = &omap3evm_audio_data,
};
-static struct regulator_consumer_supply omap3_evm_vdda_dac_supply = {
- .supply = "vdda_dac",
- .dev = &omap3_evm_dss_device.dev,
-};
+static struct regulator_consumer_supply omap3_evm_vdda_dac_supply =
+ REGULATOR_SUPPLY("vdda_dac", "omap_venc");
/* VDAC for DSS driving S-Video */
static struct regulator_init_data omap3_evm_vdac = {
@@ -521,8 +562,10 @@ static struct regulator_init_data omap3_evm_vdac = {
};
/* VPLL2 for digital video outputs */
-static struct regulator_consumer_supply omap3_evm_vpll2_supply =
- REGULATOR_SUPPLY("vdds_dsi", "omapdss");
+static struct regulator_consumer_supply omap3_evm_vpll2_supplies[] = {
+ REGULATOR_SUPPLY("vdds_dsi", "omap_display"),
+ REGULATOR_SUPPLY("vdds_dsi", "omap_dsi1"),
+};
static struct regulator_init_data omap3_evm_vpll2 = {
.constraints = {
@@ -534,10 +577,70 @@ static struct regulator_init_data omap3_evm_vpll2 = {
.valid_ops_mask = REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
+ .num_consumer_supplies = ARRAY_SIZE(omap3_evm_vpll2_supplies),
+ .consumer_supplies = omap3_evm_vpll2_supplies,
+};
+
+/* ads7846 on SPI */
+static struct regulator_consumer_supply omap3evm_vio_supply =
+ REGULATOR_SUPPLY("vcc", "spi1.0");
+
+/* VIO for ads7846 */
+static struct regulator_init_data omap3evm_vio = {
+ .constraints = {
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .apply_uV = true,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &omap3evm_vio_supply,
+};
+
+#ifdef CONFIG_WL12XX_PLATFORM_DATA
+
+#define OMAP3EVM_WLAN_PMENA_GPIO (150)
+#define OMAP3EVM_WLAN_IRQ_GPIO (149)
+
+static struct regulator_consumer_supply omap3evm_vmmc2_supply =
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1");
+
+/* VMMC2 for driving the WL12xx module */
+static struct regulator_init_data omap3evm_vmmc2 = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
.num_consumer_supplies = 1,
- .consumer_supplies = &omap3_evm_vpll2_supply,
+ .consumer_supplies = &omap3evm_vmmc2_supply,
+};
+
+static struct fixed_voltage_config omap3evm_vwlan = {
+ .supply_name = "vwl1271",
+ .microvolts = 1800000, /* 1.80V */
+ .gpio = OMAP3EVM_WLAN_PMENA_GPIO,
+ .startup_delay = 70000, /* 70ms */
+ .enable_high = 1,
+ .enabled_at_boot = 0,
+ .init_data = &omap3evm_vmmc2,
+};
+
+static struct platform_device omap3evm_wlan_regulator = {
+ .name = "reg-fixed-voltage",
+ .id = 1,
+ .dev = {
+ .platform_data = &omap3evm_vwlan,
+ },
};
+struct wl12xx_platform_data omap3evm_wlan_data __initdata = {
+ .irq = OMAP_GPIO_IRQ(OMAP3EVM_WLAN_IRQ_GPIO),
+ .board_ref_clock = WL12XX_REFCLOCK_38, /* 38.4 MHz */
+};
+#endif
+
static struct twl4030_platform_data omap3evm_twldata = {
.irq_base = TWL4030_IRQ_BASE,
.irq_end = TWL4030_IRQ_END,
@@ -550,6 +653,7 @@ static struct twl4030_platform_data omap3evm_twldata = {
.codec = &omap3evm_codec_data,
.vdac = &omap3_evm_vdac,
.vpll2 = &omap3_evm_vpll2,
+ .vio = &omap3evm_vio,
};
static struct i2c_board_info __initdata omap3evm_i2c_boardinfo[] = {
@@ -625,19 +729,14 @@ static struct spi_board_info omap3evm_spi_board_info[] = {
static struct omap_board_config_kernel omap3_evm_config[] __initdata = {
};
-static void __init omap3_evm_init_irq(void)
+static void __init omap3_evm_init_early(void)
{
omap_board_config = omap3_evm_config;
omap_board_config_size = ARRAY_SIZE(omap3_evm_config);
omap2_init_common_infrastructure();
omap2_init_common_devices(mt46h32m32lf6_sdrc_params, NULL);
- omap_init_irq();
}
-static struct platform_device *omap3_evm_devices[] __initdata = {
- &omap3_evm_dss_device,
-};
-
static struct ehci_hcd_omap_platform_data ehci_pdata __initdata = {
.port_mode[0] = EHCI_HCD_OMAP_MODE_UNKNOWN,
@@ -652,14 +751,76 @@ static struct ehci_hcd_omap_platform_data ehci_pdata __initdata = {
};
#ifdef CONFIG_OMAP_MUX
-static struct omap_board_mux board_mux[] __initdata = {
+static struct omap_board_mux omap35x_board_mux[] __initdata = {
OMAP3_MUX(SYS_NIRQ, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP |
OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_OUTPUT_LOW |
OMAP_PIN_OFF_WAKEUPENABLE),
OMAP3_MUX(MCSPI1_CS1, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP |
- OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_OUTPUT_LOW),
+ OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_OUTPUT_LOW |
+ OMAP_PIN_OFF_WAKEUPENABLE),
+ OMAP3_MUX(SYS_BOOT5, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP |
+ OMAP_PIN_OFF_NONE),
+ OMAP3_MUX(GPMC_WAIT2, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP |
+ OMAP_PIN_OFF_NONE),
+#ifdef CONFIG_WL12XX_PLATFORM_DATA
+ /* WLAN IRQ - GPIO 149 */
+ OMAP3_MUX(UART1_RTS, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
+
+ /* WLAN POWER ENABLE - GPIO 150 */
+ OMAP3_MUX(UART1_CTS, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
+
+ /* MMC2 SDIO pin muxes for WL12xx */
+ OMAP3_MUX(SDMMC2_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
+ OMAP3_MUX(SDMMC2_CMD, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
+ OMAP3_MUX(SDMMC2_DAT0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
+ OMAP3_MUX(SDMMC2_DAT1, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
+ OMAP3_MUX(SDMMC2_DAT2, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
+ OMAP3_MUX(SDMMC2_DAT3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
+#endif
{ .reg_offset = OMAP_MUX_TERMINATOR },
};
+
+static struct omap_board_mux omap36x_board_mux[] __initdata = {
+ OMAP3_MUX(SYS_NIRQ, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP |
+ OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_OUTPUT_LOW |
+ OMAP_PIN_OFF_WAKEUPENABLE),
+ OMAP3_MUX(MCSPI1_CS1, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP |
+ OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_OUTPUT_LOW |
+ OMAP_PIN_OFF_WAKEUPENABLE),
+ /* AM/DM37x EVM: DSS data bus muxed with sys_boot */
+ OMAP3_MUX(DSS_DATA18, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
+ OMAP3_MUX(DSS_DATA19, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
+ OMAP3_MUX(DSS_DATA22, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
+ OMAP3_MUX(DSS_DATA21, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
+ OMAP3_MUX(DSS_DATA22, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
+ OMAP3_MUX(DSS_DATA23, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
+ OMAP3_MUX(SYS_BOOT0, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
+ OMAP3_MUX(SYS_BOOT1, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
+ OMAP3_MUX(SYS_BOOT3, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
+ OMAP3_MUX(SYS_BOOT4, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
+ OMAP3_MUX(SYS_BOOT5, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
+ OMAP3_MUX(SYS_BOOT6, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
+#ifdef CONFIG_WL12XX_PLATFORM_DATA
+ /* WLAN IRQ - GPIO 149 */
+ OMAP3_MUX(UART1_RTS, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
+
+ /* WLAN POWER ENABLE - GPIO 150 */
+ OMAP3_MUX(UART1_CTS, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
+
+ /* MMC2 SDIO pin muxes for WL12xx */
+ OMAP3_MUX(SDMMC2_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
+ OMAP3_MUX(SDMMC2_CMD, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
+ OMAP3_MUX(SDMMC2_DAT0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
+ OMAP3_MUX(SDMMC2_DAT1, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
+ OMAP3_MUX(SDMMC2_DAT2, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
+ OMAP3_MUX(SDMMC2_DAT3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
+#endif
+
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+};
+#else
+#define omap35x_board_mux NULL
+#define omap36x_board_mux NULL
#endif
static struct omap_musb_board_data musb_board_data = {
@@ -671,11 +832,15 @@ static struct omap_musb_board_data musb_board_data = {
static void __init omap3_evm_init(void)
{
omap3_evm_get_revision();
- omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
+
+ if (cpu_is_omap3630())
+ omap3_mux_init(omap36x_board_mux, OMAP_PACKAGE_CBB);
+ else
+ omap3_mux_init(omap35x_board_mux, OMAP_PACKAGE_CBB);
omap3_evm_i2c_init();
- platform_add_devices(omap3_evm_devices, ARRAY_SIZE(omap3_evm_devices));
+ omap_display_init(&omap3_evm_dss_data);
spi_register_board_info(omap3evm_spi_board_info,
ARRAY_SIZE(omap3evm_spi_board_info));
@@ -715,14 +880,22 @@ static void __init omap3_evm_init(void)
ads7846_dev_init();
omap3evm_init_smsc911x();
omap3_evm_display_init();
+
+#ifdef CONFIG_WL12XX_PLATFORM_DATA
+ /* WL12xx WLAN Init */
+ if (wl12xx_set_platform_data(&omap3evm_wlan_data))
+ pr_err("error setting wl12xx data\n");
+ platform_device_register(&omap3evm_wlan_regulator);
+#endif
}
MACHINE_START(OMAP3EVM, "OMAP3 EVM")
/* Maintainer: Syed Mohammed Khasim - Texas Instruments */
.boot_params = 0x80000100,
- .map_io = omap3_map_io,
.reserve = omap_reserve,
- .init_irq = omap3_evm_init_irq,
+ .map_io = omap3_map_io,
+ .init_early = omap3_evm_init_early,
+ .init_irq = omap_init_irq,
.init_machine = omap3_evm_init,
.timer = &omap_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3logic.c b/arch/arm/mach-omap2/board-omap3logic.c
index 15e4b08e99ba..b726943d7c93 100644
--- a/arch/arm/mach-omap2/board-omap3logic.c
+++ b/arch/arm/mach-omap2/board-omap3logic.c
@@ -195,11 +195,10 @@ static inline void __init board_smsc911x_init(void)
gpmc_smsc911x_init(&board_smsc911x_data);
}
-static void __init omap3logic_init_irq(void)
+static void __init omap3logic_init_early(void)
{
omap2_init_common_infrastructure();
omap2_init_common_devices(NULL, NULL);
- omap_init_irq();
}
#ifdef CONFIG_OMAP_MUX
@@ -225,7 +224,8 @@ static void __init omap3logic_init(void)
MACHINE_START(OMAP3_TORPEDO, "Logic OMAP3 Torpedo board")
.boot_params = 0x80000100,
.map_io = omap3_map_io,
- .init_irq = omap3logic_init_irq,
+ .init_early = omap3logic_init_early,
+ .init_irq = omap_init_irq,
.init_machine = omap3logic_init,
.timer = &omap_timer,
MACHINE_END
@@ -233,7 +233,8 @@ MACHINE_END
MACHINE_START(OMAP3530_LV_SOM, "OMAP Logic 3530 LV SOM board")
.boot_params = 0x80000100,
.map_io = omap3_map_io,
- .init_irq = omap3logic_init_irq,
+ .init_early = omap3logic_init_early,
+ .init_irq = omap_init_irq,
.init_machine = omap3logic_init,
.timer = &omap_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
index 0b34beded11f..8e35bc32a491 100644
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ b/arch/arm/mach-omap2/board-omap3pandora.c
@@ -253,14 +253,6 @@ static struct omap_dss_board_info pandora_dss_data = {
.default_device = &pandora_lcd_device,
};
-static struct platform_device pandora_dss_device = {
- .name = "omapdss",
- .id = -1,
- .dev = {
- .platform_data = &pandora_dss_data,
- },
-};
-
static void pandora_wl1251_init_card(struct mmc_card *card)
{
/*
@@ -341,20 +333,21 @@ static struct twl4030_gpio_platform_data omap3pandora_gpio_data = {
};
static struct regulator_consumer_supply pandora_vmmc1_supply =
- REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.0");
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0");
static struct regulator_consumer_supply pandora_vmmc2_supply =
- REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.1");
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1");
static struct regulator_consumer_supply pandora_vmmc3_supply =
- REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.2");
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.2");
static struct regulator_consumer_supply pandora_vdda_dac_supply =
- REGULATOR_SUPPLY("vdda_dac", "omapdss");
+ REGULATOR_SUPPLY("vdda_dac", "omap_venc");
static struct regulator_consumer_supply pandora_vdds_supplies[] = {
- REGULATOR_SUPPLY("vdds_sdi", "omapdss"),
- REGULATOR_SUPPLY("vdds_dsi", "omapdss"),
+ REGULATOR_SUPPLY("vdds_sdi", "omap_display"),
+ REGULATOR_SUPPLY("vdds_dsi", "omap_display"),
+ REGULATOR_SUPPLY("vdds_dsi", "omap_dsi1"),
};
static struct regulator_consumer_supply pandora_vcc_lcd_supply =
@@ -634,12 +627,11 @@ static struct spi_board_info omap3pandora_spi_board_info[] __initdata = {
}
};
-static void __init omap3pandora_init_irq(void)
+static void __init omap3pandora_init_early(void)
{
omap2_init_common_infrastructure();
omap2_init_common_devices(mt46h32m32lf6_sdrc_params,
mt46h32m32lf6_sdrc_params);
- omap_init_irq();
}
static void __init pandora_wl1251_init(void)
@@ -677,7 +669,6 @@ fail:
static struct platform_device *omap3pandora_devices[] __initdata = {
&pandora_leds_gpio,
&pandora_keys_gpio,
- &pandora_dss_device,
&pandora_vwlan_device,
};
@@ -712,6 +703,7 @@ static void __init omap3pandora_init(void)
pandora_wl1251_init();
platform_add_devices(omap3pandora_devices,
ARRAY_SIZE(omap3pandora_devices));
+ omap_display_init(&pandora_dss_data);
omap_serial_init();
spi_register_board_info(omap3pandora_spi_board_info,
ARRAY_SIZE(omap3pandora_spi_board_info));
@@ -727,9 +719,10 @@ static void __init omap3pandora_init(void)
MACHINE_START(OMAP3_PANDORA, "Pandora Handheld Console")
.boot_params = 0x80000100,
- .map_io = omap3_map_io,
.reserve = omap_reserve,
- .init_irq = omap3pandora_init_irq,
+ .map_io = omap3_map_io,
+ .init_early = omap3pandora_init_early,
+ .init_irq = omap_init_irq,
.init_machine = omap3pandora_init,
.timer = &omap_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c
index 2a2dad447e86..2628aa718547 100644
--- a/arch/arm/mach-omap2/board-omap3stalker.c
+++ b/arch/arm/mach-omap2/board-omap3stalker.c
@@ -240,14 +240,6 @@ static struct omap_dss_board_info omap3_stalker_dss_data = {
.default_device = &omap3_stalker_dvi_device,
};
-static struct platform_device omap3_stalker_dss_device = {
- .name = "omapdss",
- .id = -1,
- .dev = {
- .platform_data = &omap3_stalker_dss_data,
- },
-};
-
static struct regulator_consumer_supply omap3stalker_vmmc1_supply = {
.supply = "vmmc",
};
@@ -448,10 +440,8 @@ static struct twl4030_codec_data omap3stalker_codec_data = {
.audio = &omap3stalker_audio_data,
};
-static struct regulator_consumer_supply omap3_stalker_vdda_dac_supply = {
- .supply = "vdda_dac",
- .dev = &omap3_stalker_dss_device.dev,
-};
+static struct regulator_consumer_supply omap3_stalker_vdda_dac_supply =
+ REGULATOR_SUPPLY("vdda_dac", "omap_venc");
/* VDAC for DSS driving S-Video */
static struct regulator_init_data omap3_stalker_vdac = {
@@ -469,9 +459,9 @@ static struct regulator_init_data omap3_stalker_vdac = {
};
/* VPLL2 for digital video outputs */
-static struct regulator_consumer_supply omap3_stalker_vpll2_supply = {
- .supply = "vdds_dsi",
- .dev = &omap3_stalker_lcd_device.dev,
+static struct regulator_consumer_supply omap3_stalker_vpll2_supplies[] = {
+ REGULATOR_SUPPLY("vdds_dsi", "omap_display"),
+ REGULATOR_SUPPLY("vdds_dsi", "omap_dsi1"),
};
static struct regulator_init_data omap3_stalker_vpll2 = {
@@ -485,8 +475,8 @@ static struct regulator_init_data omap3_stalker_vpll2 = {
.valid_ops_mask = REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &omap3_stalker_vpll2_supply,
+ .num_consumer_supplies = ARRAY_SIZE(omap3_stalker_vpll2_supplies),
+ .consumer_supplies = omap3_stalker_vpll2_supplies,
};
static struct twl4030_platform_data omap3stalker_twldata = {
@@ -591,12 +581,16 @@ static struct spi_board_info omap3stalker_spi_board_info[] = {
static struct omap_board_config_kernel omap3_stalker_config[] __initdata = {
};
-static void __init omap3_stalker_init_irq(void)
+static void __init omap3_stalker_init_early(void)
{
omap_board_config = omap3_stalker_config;
omap_board_config_size = ARRAY_SIZE(omap3_stalker_config);
omap2_init_common_infrastructure();
omap2_init_common_devices(mt46h32m32lf6_sdrc_params, NULL);
+}
+
+static void __init omap3_stalker_init_irq(void)
+{
omap_init_irq();
#ifdef CONFIG_OMAP_32K_TIMER
omap2_gp_clockevent_set_gptimer(12);
@@ -604,7 +598,6 @@ static void __init omap3_stalker_init_irq(void)
}
static struct platform_device *omap3_stalker_devices[] __initdata = {
- &omap3_stalker_dss_device,
&keys_gpio,
};
@@ -644,6 +637,7 @@ static void __init omap3_stalker_init(void)
platform_add_devices(omap3_stalker_devices,
ARRAY_SIZE(omap3_stalker_devices));
+ omap_display_init(&omap3_stalker_dss_data);
spi_register_board_info(omap3stalker_spi_board_info,
ARRAY_SIZE(omap3stalker_spi_board_info));
@@ -666,6 +660,7 @@ MACHINE_START(SBC3530, "OMAP3 STALKER")
/* Maintainer: Jason Lam -lzg@ema-tech.com */
.boot_params = 0x80000100,
.map_io = omap3_map_io,
+ .init_early = omap3_stalker_init_early,
.init_irq = omap3_stalker_init_irq,
.init_machine = omap3_stalker_init,
.timer = &omap_timer,
diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c
index db1f74fe6c4f..6a60f79dcccb 100644
--- a/arch/arm/mach-omap2/board-omap3touchbook.c
+++ b/arch/arm/mach-omap2/board-omap3touchbook.c
@@ -415,7 +415,7 @@ static struct omap_board_mux board_mux[] __initdata = {
};
#endif
-static void __init omap3_touchbook_init_irq(void)
+static void __init omap3_touchbook_init_early(void)
{
omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
omap_board_config = omap3_touchbook_config;
@@ -423,6 +423,10 @@ static void __init omap3_touchbook_init_irq(void)
omap2_init_common_infrastructure();
omap2_init_common_devices(mt46h32m32lf6_sdrc_params,
mt46h32m32lf6_sdrc_params);
+}
+
+static void __init omap3_touchbook_init_irq(void)
+{
omap_init_irq();
#ifdef CONFIG_OMAP_32K_TIMER
omap2_gp_clockevent_set_gptimer(12);
@@ -538,8 +542,9 @@ static void __init omap3_touchbook_init(void)
MACHINE_START(TOUCHBOOK, "OMAP3 touchbook Board")
/* Maintainer: Gregoire Gentil - http://www.alwaysinnovating.com */
.boot_params = 0x80000100,
- .map_io = omap3_map_io,
.reserve = omap_reserve,
+ .map_io = omap3_map_io,
+ .init_early = omap3_touchbook_init_early,
.init_irq = omap3_touchbook_init_irq,
.init_machine = omap3_touchbook_init,
.timer = &omap_timer,
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
index e944025d5ef8..a52b6c33cfc5 100644
--- a/arch/arm/mach-omap2/board-omap4panda.c
+++ b/arch/arm/mach-omap2/board-omap4panda.c
@@ -26,6 +26,8 @@
#include <linux/usb/otg.h>
#include <linux/i2c/twl.h>
#include <linux/regulator/machine.h>
+#include <linux/regulator/fixed.h>
+#include <linux/wl12xx.h>
#include <mach/hardware.h>
#include <mach/omap4-common.h>
@@ -45,6 +47,8 @@
#define GPIO_HUB_POWER 1
#define GPIO_HUB_NRESET 62
+#define GPIO_WIFI_PMENA 43
+#define GPIO_WIFI_IRQ 53
static struct gpio_led gpio_leds[] = {
{
@@ -76,11 +80,10 @@ static struct platform_device *panda_devices[] __initdata = {
&leds_gpio,
};
-static void __init omap4_panda_init_irq(void)
+static void __init omap4_panda_init_early(void)
{
omap2_init_common_infrastructure();
omap2_init_common_devices(NULL, NULL);
- gic_init_irq();
}
static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
@@ -162,16 +165,62 @@ static struct omap2_hsmmc_info mmc[] = {
.gpio_wp = -EINVAL,
.gpio_cd = -EINVAL,
},
+ {
+ .name = "wl1271",
+ .mmc = 5,
+ .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_POWER_OFF_CARD,
+ .gpio_wp = -EINVAL,
+ .gpio_cd = -EINVAL,
+ .ocr_mask = MMC_VDD_165_195,
+ .nonremovable = true,
+ },
{} /* Terminator */
};
static struct regulator_consumer_supply omap4_panda_vmmc_supply[] = {
{
.supply = "vmmc",
- .dev_name = "mmci-omap-hs.0",
+ .dev_name = "omap_hsmmc.0",
},
};
+static struct regulator_consumer_supply omap4_panda_vmmc5_supply = {
+ .supply = "vmmc",
+ .dev_name = "omap_hsmmc.4",
+};
+
+static struct regulator_init_data panda_vmmc5 = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &omap4_panda_vmmc5_supply,
+};
+
+static struct fixed_voltage_config panda_vwlan = {
+ .supply_name = "vwl1271",
+ .microvolts = 1800000, /* 1.8V */
+ .gpio = GPIO_WIFI_PMENA,
+ .startup_delay = 70000, /* 70msec */
+ .enable_high = 1,
+ .enabled_at_boot = 0,
+ .init_data = &panda_vmmc5,
+};
+
+static struct platform_device omap_vwlan_device = {
+ .name = "reg-fixed-voltage",
+ .id = 1,
+ .dev = {
+ .platform_data = &panda_vwlan,
+ },
+};
+
+struct wl12xx_platform_data omap_panda_wlan_data __initdata = {
+ .irq = OMAP_GPIO_IRQ(GPIO_WIFI_IRQ),
+ /* PANDA ref clock is 38.4 MHz */
+ .board_ref_clock = 2,
+};
+
static int omap4_twl6030_hsmmc_late_init(struct device *dev)
{
int ret = 0;
@@ -305,7 +354,6 @@ static struct regulator_init_data omap4_panda_vana = {
.constraints = {
.min_uV = 2100000,
.max_uV = 2100000,
- .apply_uV = true,
.valid_modes_mask = REGULATOR_MODE_NORMAL
| REGULATOR_MODE_STANDBY,
.valid_ops_mask = REGULATOR_CHANGE_MODE
@@ -317,7 +365,6 @@ static struct regulator_init_data omap4_panda_vcxio = {
.constraints = {
.min_uV = 1800000,
.max_uV = 1800000,
- .apply_uV = true,
.valid_modes_mask = REGULATOR_MODE_NORMAL
| REGULATOR_MODE_STANDBY,
.valid_ops_mask = REGULATOR_CHANGE_MODE
@@ -329,7 +376,6 @@ static struct regulator_init_data omap4_panda_vdac = {
.constraints = {
.min_uV = 1800000,
.max_uV = 1800000,
- .apply_uV = true,
.valid_modes_mask = REGULATOR_MODE_NORMAL
| REGULATOR_MODE_STANDBY,
.valid_ops_mask = REGULATOR_CHANGE_MODE
@@ -349,6 +395,12 @@ static struct regulator_init_data omap4_panda_vusb = {
},
};
+static struct regulator_init_data omap4_panda_clk32kg = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+};
+
static struct twl4030_platform_data omap4_panda_twldata = {
.irq_base = TWL6030_IRQ_BASE,
.irq_end = TWL6030_IRQ_END,
@@ -364,6 +416,7 @@ static struct twl4030_platform_data omap4_panda_twldata = {
.vaux1 = &omap4_panda_vaux1,
.vaux2 = &omap4_panda_vaux2,
.vaux3 = &omap4_panda_vaux3,
+ .clk32kg = &omap4_panda_clk32kg,
.usb = &omap4_usbphy_data,
};
@@ -391,6 +444,19 @@ static int __init omap4_panda_i2c_init(void)
#ifdef CONFIG_OMAP_MUX
static struct omap_board_mux board_mux[] __initdata = {
+ /* WLAN IRQ - GPIO 53 */
+ OMAP4_MUX(GPMC_NCS3, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+ /* WLAN POWER ENABLE - GPIO 43 */
+ OMAP4_MUX(GPMC_A19, OMAP_MUX_MODE3 | OMAP_PIN_OUTPUT),
+ /* WLAN SDIO: MMC5 CMD */
+ OMAP4_MUX(SDMMC5_CMD, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
+ /* WLAN SDIO: MMC5 CLK */
+ OMAP4_MUX(SDMMC5_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
+ /* WLAN SDIO: MMC5 DAT[0-3] */
+ OMAP4_MUX(SDMMC5_DAT0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
+ OMAP4_MUX(SDMMC5_DAT1, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
+ OMAP4_MUX(SDMMC5_DAT2, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
+ OMAP4_MUX(SDMMC5_DAT3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
{ .reg_offset = OMAP_MUX_TERMINATOR },
};
#else
@@ -405,8 +471,12 @@ static void __init omap4_panda_init(void)
package = OMAP_PACKAGE_CBL;
omap4_mux_init(board_mux, package);
+ if (wl12xx_set_platform_data(&omap_panda_wlan_data))
+ pr_err("error setting wl12xx data\n");
+
omap4_panda_i2c_init();
platform_add_devices(panda_devices, ARRAY_SIZE(panda_devices));
+ platform_device_register(&omap_vwlan_device);
omap_serial_init();
omap4_twl6030_hsmmc_init(mmc);
omap4_ehci_init();
@@ -424,7 +494,8 @@ MACHINE_START(OMAP4_PANDA, "OMAP4 Panda board")
.boot_params = 0x80000100,
.reserve = omap_reserve,
.map_io = omap4_panda_map_io,
- .init_irq = omap4_panda_init_irq,
+ .init_early = omap4_panda_init_early,
+ .init_irq = gic_init_irq,
.init_machine = omap4_panda_init,
.timer = &omap_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c
index cb26e5d8268d..a33ec0edec13 100644
--- a/arch/arm/mach-omap2/board-overo.c
+++ b/arch/arm/mach-omap2/board-overo.c
@@ -409,14 +409,13 @@ static struct omap_board_config_kernel overo_config[] __initdata = {
{ OMAP_TAG_LCD, &overo_lcd_config },
};
-static void __init overo_init_irq(void)
+static void __init overo_init_early(void)
{
omap_board_config = overo_config;
omap_board_config_size = ARRAY_SIZE(overo_config);
omap2_init_common_infrastructure();
omap2_init_common_devices(mt46h32m32lf6_sdrc_params,
mt46h32m32lf6_sdrc_params);
- omap_init_irq();
}
static struct platform_device *overo_devices[] __initdata = {
@@ -501,9 +500,10 @@ static void __init overo_init(void)
MACHINE_START(OVERO, "Gumstix Overo")
.boot_params = 0x80000100,
- .map_io = omap3_map_io,
.reserve = omap_reserve,
- .init_irq = overo_init_irq,
+ .map_io = omap3_map_io,
+ .init_early = overo_init_early,
+ .init_irq = omap_init_irq,
.init_machine = overo_init,
.timer = &omap_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-rm680.c b/arch/arm/mach-omap2/board-rm680.c
index 39a71bb8a308..2af8b05e786d 100644
--- a/arch/arm/mach-omap2/board-rm680.c
+++ b/arch/arm/mach-omap2/board-rm680.c
@@ -33,7 +33,7 @@
#include "sdram-nokia.h"
static struct regulator_consumer_supply rm680_vemmc_consumers[] = {
- REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.1"),
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1"),
};
/* Fixed regulator for internal eMMC */
@@ -138,14 +138,13 @@ static void __init rm680_peripherals_init(void)
omap2_hsmmc_init(mmc);
}
-static void __init rm680_init_irq(void)
+static void __init rm680_init_early(void)
{
struct omap_sdrc_params *sdrc_params;
omap2_init_common_infrastructure();
sdrc_params = nokia_get_sdram_timings();
omap2_init_common_devices(sdrc_params, sdrc_params);
- omap_init_irq();
}
#ifdef CONFIG_OMAP_MUX
@@ -176,9 +175,10 @@ static void __init rm680_map_io(void)
MACHINE_START(NOKIA_RM680, "Nokia RM-680 board")
.boot_params = 0x80000100,
- .map_io = rm680_map_io,
.reserve = omap_reserve,
- .init_irq = rm680_init_irq,
+ .map_io = rm680_map_io,
+ .init_early = rm680_init_early,
+ .init_irq = omap_init_irq,
.init_machine = rm680_init,
.timer = &omap_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index e75e240cad67..e787a931e50d 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -331,13 +331,13 @@ static struct omap2_hsmmc_info mmc[] __initdata = {
};
static struct regulator_consumer_supply rx51_vmmc1_supply =
- REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.0");
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0");
static struct regulator_consumer_supply rx51_vaux3_supply =
- REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.1");
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1");
static struct regulator_consumer_supply rx51_vsim_supply =
- REGULATOR_SUPPLY("vmmc_aux", "mmci-omap-hs.1");
+ REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.1");
static struct regulator_consumer_supply rx51_vmmc2_supplies[] = {
/* tlv320aic3x analog supplies */
@@ -348,7 +348,7 @@ static struct regulator_consumer_supply rx51_vmmc2_supplies[] = {
/* tpa6130a2 */
REGULATOR_SUPPLY("Vdd", "2-0060"),
/* Keep vmmc as last item. It is not iterated for newer boards */
- REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.1"),
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1"),
};
static struct regulator_consumer_supply rx51_vio_supplies[] = {
@@ -360,11 +360,11 @@ static struct regulator_consumer_supply rx51_vio_supplies[] = {
};
static struct regulator_consumer_supply rx51_vaux1_consumers[] = {
- REGULATOR_SUPPLY("vdds_sdi", "omapdss"),
+ REGULATOR_SUPPLY("vdds_sdi", "omap_display"),
};
static struct regulator_consumer_supply rx51_vdac_supply[] = {
- REGULATOR_SUPPLY("vdda_dac", "omapdss"),
+ REGULATOR_SUPPLY("vdda_dac", "omap_venc"),
};
static struct regulator_init_data rx51_vaux1 = {
diff --git a/arch/arm/mach-omap2/board-rx51-video.c b/arch/arm/mach-omap2/board-rx51-video.c
index acd670054d9a..89a66db8b77d 100644
--- a/arch/arm/mach-omap2/board-rx51-video.c
+++ b/arch/arm/mach-omap2/board-rx51-video.c
@@ -66,18 +66,6 @@ static struct omap_dss_board_info rx51_dss_board_info = {
.default_device = &rx51_lcd_device,
};
-struct platform_device rx51_display_device = {
- .name = "omapdss",
- .id = -1,
- .dev = {
- .platform_data = &rx51_dss_board_info,
- },
-};
-
-static struct platform_device *rx51_video_devices[] __initdata = {
- &rx51_display_device,
-};
-
static int __init rx51_video_init(void)
{
if (!machine_is_nokia_rx51())
@@ -95,8 +83,7 @@ static int __init rx51_video_init(void)
gpio_direction_output(RX51_LCD_RESET_GPIO, 1);
- platform_add_devices(rx51_video_devices,
- ARRAY_SIZE(rx51_video_devices));
+ omap_display_init(&rx51_dss_board_info);
return 0;
}
diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c
index f53fc551c58f..3cf72fe6d75b 100644
--- a/arch/arm/mach-omap2/board-rx51.c
+++ b/arch/arm/mach-omap2/board-rx51.c
@@ -98,7 +98,7 @@ static struct omap_board_config_kernel rx51_config[] = {
{ OMAP_TAG_LCD, &rx51_lcd_config },
};
-static void __init rx51_init_irq(void)
+static void __init rx51_init_early(void)
{
struct omap_sdrc_params *sdrc_params;
@@ -108,7 +108,6 @@ static void __init rx51_init_irq(void)
omap2_init_common_infrastructure();
sdrc_params = nokia_get_sdram_timings();
omap2_init_common_devices(sdrc_params, sdrc_params);
- omap_init_irq();
}
extern void __init rx51_peripherals_init(void);
@@ -149,9 +148,10 @@ static void __init rx51_map_io(void)
MACHINE_START(NOKIA_RX51, "Nokia RX-51 board")
/* Maintainer: Lauri Leukkunen <lauri.leukkunen@nokia.com> */
.boot_params = 0x80000100,
- .map_io = rx51_map_io,
.reserve = omap_reserve,
- .init_irq = rx51_init_irq,
+ .map_io = rx51_map_io,
+ .init_early = rx51_init_early,
+ .init_irq = omap_init_irq,
.init_machine = rx51_init,
.timer = &omap_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-ti8168evm.c b/arch/arm/mach-omap2/board-ti8168evm.c
new file mode 100644
index 000000000000..f2b097190e07
--- /dev/null
+++ b/arch/arm/mach-omap2/board-ti8168evm.c
@@ -0,0 +1,62 @@
+/*
+ * Code for TI8168 EVM.
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc. - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <mach/hardware.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include <plat/irqs.h>
+#include <plat/board.h>
+#include <plat/common.h>
+
+static struct omap_board_config_kernel ti8168_evm_config[] __initdata = {
+};
+
+static void __init ti8168_init_early(void)
+{
+ omap_board_config = ti8168_evm_config;
+ omap_board_config_size = ARRAY_SIZE(ti8168_evm_config);
+ omap2_init_common_infrastructure();
+ omap2_init_common_devices(NULL, NULL);
+}
+
+static void __init ti8168_evm_init_irq(void)
+{
+ omap_init_irq();
+}
+
+static void __init ti8168_evm_init(void)
+{
+ omap_serial_init();
+}
+
+static void __init ti8168_evm_map_io(void)
+{
+ omap2_set_globals_ti816x();
+ omapti816x_map_common_io();
+}
+
+MACHINE_START(TI8168EVM, "ti8168evm")
+ /* Maintainer: Texas Instruments */
+ .boot_params = 0x80000100,
+ .map_io = ti8168_evm_map_io,
+ .init_early = ti8168_init_early,
+ .init_irq = ti8168_evm_init_irq,
+ .timer = &omap_timer,
+ .init_machine = ti8168_evm_init,
+MACHINE_END
diff --git a/arch/arm/mach-omap2/board-zoom-display.c b/arch/arm/mach-omap2/board-zoom-display.c
index 6bcd43657aed..37b84c2b850f 100644
--- a/arch/arm/mach-omap2/board-zoom-display.c
+++ b/arch/arm/mach-omap2/board-zoom-display.c
@@ -130,14 +130,6 @@ static struct omap_dss_board_info zoom_dss_data = {
.default_device = &zoom_lcd_device,
};
-static struct platform_device zoom_dss_device = {
- .name = "omapdss",
- .id = -1,
- .dev = {
- .platform_data = &zoom_dss_data,
- },
-};
-
static struct omap2_mcspi_device_config dss_lcd_mcspi_config = {
.turbo_mode = 1,
.single_channel = 1, /* 0: slave, 1: master */
@@ -153,14 +145,9 @@ static struct spi_board_info nec_8048_spi_board_info[] __initdata = {
},
};
-static struct platform_device *zoom_display_devices[] __initdata = {
- &zoom_dss_device,
-};
-
void __init zoom_display_init(void)
{
- platform_add_devices(zoom_display_devices,
- ARRAY_SIZE(zoom_display_devices));
+ omap_display_init(&zoom_dss_data);
spi_register_board_info(nec_8048_spi_board_info,
ARRAY_SIZE(nec_8048_spi_board_info));
zoom_lcd_panel_init();
diff --git a/arch/arm/mach-omap2/board-zoom-peripherals.c b/arch/arm/mach-omap2/board-zoom-peripherals.c
index e0e040f34c68..c387301ac120 100644
--- a/arch/arm/mach-omap2/board-zoom-peripherals.c
+++ b/arch/arm/mach-omap2/board-zoom-peripherals.c
@@ -118,7 +118,7 @@ static struct regulator_consumer_supply zoom_vmmc2_supply = {
static struct regulator_consumer_supply zoom_vmmc3_supply = {
.supply = "vmmc",
- .dev_name = "mmci-omap-hs.2",
+ .dev_name = "omap_hsmmc.2",
};
/* VMMC1 for OMAP VDD_MMC1 (i/o) and MMC1 card */
@@ -226,11 +226,13 @@ static struct omap2_hsmmc_info mmc[] = {
{} /* Terminator */
};
-static struct regulator_consumer_supply zoom_vpll2_supply =
- REGULATOR_SUPPLY("vdds_dsi", "omapdss");
+static struct regulator_consumer_supply zoom_vpll2_supplies[] = {
+ REGULATOR_SUPPLY("vdds_dsi", "omap_display"),
+ REGULATOR_SUPPLY("vdds_dsi", "omap_dsi1"),
+};
static struct regulator_consumer_supply zoom_vdda_dac_supply =
- REGULATOR_SUPPLY("vdda_dac", "omapdss");
+ REGULATOR_SUPPLY("vdda_dac", "omap_venc");
static struct regulator_init_data zoom_vpll2 = {
.constraints = {
@@ -241,8 +243,8 @@ static struct regulator_init_data zoom_vpll2 = {
.valid_ops_mask = REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &zoom_vpll2_supply,
+ .num_consumer_supplies = ARRAY_SIZE(zoom_vpll2_supplies),
+ .consumer_supplies = zoom_vpll2_supplies,
};
static struct regulator_init_data zoom_vdac = {
diff --git a/arch/arm/mach-omap2/board-zoom.c b/arch/arm/mach-omap2/board-zoom.c
index e26754c24ee8..7e3f1595d77b 100644
--- a/arch/arm/mach-omap2/board-zoom.c
+++ b/arch/arm/mach-omap2/board-zoom.c
@@ -16,6 +16,7 @@
#include <linux/input.h>
#include <linux/gpio.h>
#include <linux/i2c/twl.h>
+#include <linux/mtd/nand.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -33,7 +34,7 @@
#define ZOOM3_EHCI_RESET_GPIO 64
-static void __init omap_zoom_init_irq(void)
+static void __init omap_zoom_init_early(void)
{
omap2_init_common_infrastructure();
if (machine_is_omap_zoom2())
@@ -42,8 +43,6 @@ static void __init omap_zoom_init_irq(void)
else if (machine_is_omap_zoom3())
omap2_init_common_devices(h8mbx00u0mer0em_sdrc_params,
h8mbx00u0mer0em_sdrc_params);
-
- omap_init_irq();
}
#ifdef CONFIG_OMAP_MUX
@@ -126,8 +125,8 @@ static void __init omap_zoom_init(void)
usb_ehci_init(&ehci_pdata);
}
- board_nand_init(zoom_nand_partitions,
- ARRAY_SIZE(zoom_nand_partitions), ZOOM_NAND_CS);
+ board_nand_init(zoom_nand_partitions, ARRAY_SIZE(zoom_nand_partitions),
+ ZOOM_NAND_CS, NAND_BUSWIDTH_16);
zoom_debugboard_init();
zoom_peripherals_init();
zoom_display_init();
@@ -135,18 +134,20 @@ static void __init omap_zoom_init(void)
MACHINE_START(OMAP_ZOOM2, "OMAP Zoom2 board")
.boot_params = 0x80000100,
- .map_io = omap3_map_io,
.reserve = omap_reserve,
- .init_irq = omap_zoom_init_irq,
+ .map_io = omap3_map_io,
+ .init_early = omap_zoom_init_early,
+ .init_irq = omap_init_irq,
.init_machine = omap_zoom_init,
.timer = &omap_timer,
MACHINE_END
MACHINE_START(OMAP_ZOOM3, "OMAP Zoom3 board")
.boot_params = 0x80000100,
- .map_io = omap3_map_io,
.reserve = omap_reserve,
- .init_irq = omap_zoom_init_irq,
+ .map_io = omap3_map_io,
+ .init_early = omap_zoom_init_early,
+ .init_irq = omap_init_irq,
.init_machine = omap_zoom_init,
.timer = &omap_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/clkt_clksel.c b/arch/arm/mach-omap2/clkt_clksel.c
index a781cd6795a4..e25364de028a 100644
--- a/arch/arm/mach-omap2/clkt_clksel.c
+++ b/arch/arm/mach-omap2/clkt_clksel.c
@@ -97,7 +97,7 @@ static u8 _get_div_and_fieldval(struct clk *src_clk, struct clk *clk,
u32 *field_val)
{
const struct clksel *clks;
- const struct clksel_rate *clkr, *max_clkr;
+ const struct clksel_rate *clkr, *max_clkr = NULL;
u8 max_div = 0;
clks = _get_clksel_by_parent(clk, src_clk);
diff --git a/arch/arm/mach-omap2/clkt_dpll.c b/arch/arm/mach-omap2/clkt_dpll.c
index 337392c3f549..acb7ae5b0a25 100644
--- a/arch/arm/mach-omap2/clkt_dpll.c
+++ b/arch/arm/mach-omap2/clkt_dpll.c
@@ -77,7 +77,7 @@ static int _dpll_test_fint(struct clk *clk, u8 n)
dd = clk->dpll_data;
/* DPLL divider must result in a valid jitter correction val */
- fint = clk->parent->rate / (n + 1);
+ fint = clk->parent->rate / n;
if (fint < DPLL_FINT_BAND1_MIN) {
pr_debug("rejecting n=%d due to Fint failure, "
diff --git a/arch/arm/mach-omap2/clock2420_data.c b/arch/arm/mach-omap2/clock2420_data.c
index 0a992bc8d0d8..3c1712b045aa 100644
--- a/arch/arm/mach-omap2/clock2420_data.c
+++ b/arch/arm/mach-omap2/clock2420_data.c
@@ -1786,10 +1786,10 @@ static struct omap_clk omap2420_clks[] = {
CLK(NULL, "gfx_2d_fck", &gfx_2d_fck, CK_242X),
CLK(NULL, "gfx_ick", &gfx_ick, CK_242X),
/* DSS domain clocks */
- CLK("omapdss", "ick", &dss_ick, CK_242X),
- CLK("omapdss", "dss1_fck", &dss1_fck, CK_242X),
- CLK("omapdss", "dss2_fck", &dss2_fck, CK_242X),
- CLK("omapdss", "tv_fck", &dss_54m_fck, CK_242X),
+ CLK("omap_dss", "ick", &dss_ick, CK_242X),
+ CLK("omap_dss", "fck", &dss1_fck, CK_242X),
+ CLK("omap_dss", "sys_clk", &dss2_fck, CK_242X),
+ CLK("omap_dss", "tv_clk", &dss_54m_fck, CK_242X),
/* L3 domain clocks */
CLK(NULL, "core_l3_ck", &core_l3_ck, CK_242X),
CLK(NULL, "ssi_fck", &ssi_ssr_sst_fck, CK_242X),
diff --git a/arch/arm/mach-omap2/clock2430_data.c b/arch/arm/mach-omap2/clock2430_data.c
index c047dcd007e5..947d7dffc15b 100644
--- a/arch/arm/mach-omap2/clock2430_data.c
+++ b/arch/arm/mach-omap2/clock2430_data.c
@@ -1890,10 +1890,10 @@ static struct omap_clk omap2430_clks[] = {
CLK(NULL, "mdm_ick", &mdm_ick, CK_243X),
CLK(NULL, "mdm_osc_ck", &mdm_osc_ck, CK_243X),
/* DSS domain clocks */
- CLK("omapdss", "ick", &dss_ick, CK_243X),
- CLK("omapdss", "dss1_fck", &dss1_fck, CK_243X),
- CLK("omapdss", "dss2_fck", &dss2_fck, CK_243X),
- CLK("omapdss", "tv_fck", &dss_54m_fck, CK_243X),
+ CLK("omap_dss", "ick", &dss_ick, CK_243X),
+ CLK("omap_dss", "fck", &dss1_fck, CK_243X),
+ CLK("omap_dss", "sys_clk", &dss2_fck, CK_243X),
+ CLK("omap_dss", "tv_clk", &dss_54m_fck, CK_243X),
/* L3 domain clocks */
CLK(NULL, "core_l3_ck", &core_l3_ck, CK_243X),
CLK(NULL, "ssi_fck", &ssi_ssr_sst_fck, CK_243X),
@@ -1984,15 +1984,15 @@ static struct omap_clk omap2430_clks[] = {
CLK(NULL, "pka_ick", &pka_ick, CK_243X),
CLK(NULL, "usb_fck", &usb_fck, CK_243X),
CLK("musb-omap2430", "ick", &usbhs_ick, CK_243X),
- CLK("mmci-omap-hs.0", "ick", &mmchs1_ick, CK_243X),
- CLK("mmci-omap-hs.0", "fck", &mmchs1_fck, CK_243X),
- CLK("mmci-omap-hs.1", "ick", &mmchs2_ick, CK_243X),
- CLK("mmci-omap-hs.1", "fck", &mmchs2_fck, CK_243X),
+ CLK("omap_hsmmc.0", "ick", &mmchs1_ick, CK_243X),
+ CLK("omap_hsmmc.0", "fck", &mmchs1_fck, CK_243X),
+ CLK("omap_hsmmc.1", "ick", &mmchs2_ick, CK_243X),
+ CLK("omap_hsmmc.1", "fck", &mmchs2_fck, CK_243X),
CLK(NULL, "gpio5_ick", &gpio5_ick, CK_243X),
CLK(NULL, "gpio5_fck", &gpio5_fck, CK_243X),
CLK(NULL, "mdm_intc_ick", &mdm_intc_ick, CK_243X),
- CLK("mmci-omap-hs.0", "mmchsdb_fck", &mmchsdb1_fck, CK_243X),
- CLK("mmci-omap-hs.1", "mmchsdb_fck", &mmchsdb2_fck, CK_243X),
+ CLK("omap_hsmmc.0", "mmchsdb_fck", &mmchsdb1_fck, CK_243X),
+ CLK("omap_hsmmc.1", "mmchsdb_fck", &mmchsdb2_fck, CK_243X),
};
/*
diff --git a/arch/arm/mach-omap2/clock2xxx.h b/arch/arm/mach-omap2/clock2xxx.h
index 6a658b890c17..cc5c8d422c5b 100644
--- a/arch/arm/mach-omap2/clock2xxx.h
+++ b/arch/arm/mach-omap2/clock2xxx.h
@@ -20,13 +20,13 @@ u32 omap2xxx_get_apll_clkin(void);
u32 omap2xxx_get_sysclkdiv(void);
void omap2xxx_clk_prepare_for_reboot(void);
-#ifdef CONFIG_ARCH_OMAP2420
+#ifdef CONFIG_SOC_OMAP2420
int omap2420_clk_init(void);
#else
#define omap2420_clk_init() 0
#endif
-#ifdef CONFIG_ARCH_OMAP2430
+#ifdef CONFIG_SOC_OMAP2430
int omap2430_clk_init(void);
#else
#define omap2430_clk_init() 0
diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c
index 403a4a1d3f9c..f43e7ece7b8f 100644
--- a/arch/arm/mach-omap2/clock3xxx_data.c
+++ b/arch/arm/mach-omap2/clock3xxx_data.c
@@ -3290,10 +3290,10 @@ static struct omap_clk omap3xxx_clks[] = {
CLK("omap-mcbsp.1", "prcm_fck", &core_96m_fck, CK_3XXX),
CLK("omap-mcbsp.5", "prcm_fck", &core_96m_fck, CK_3XXX),
CLK(NULL, "core_96m_fck", &core_96m_fck, CK_3XXX),
- CLK("mmci-omap-hs.2", "fck", &mmchs3_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK("mmci-omap-hs.1", "fck", &mmchs2_fck, CK_3XXX),
+ CLK("omap_hsmmc.2", "fck", &mmchs3_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK("omap_hsmmc.1", "fck", &mmchs2_fck, CK_3XXX),
CLK(NULL, "mspro_fck", &mspro_fck, CK_34XX | CK_36XX),
- CLK("mmci-omap-hs.0", "fck", &mmchs1_fck, CK_3XXX),
+ CLK("omap_hsmmc.0", "fck", &mmchs1_fck, CK_3XXX),
CLK("omap_i2c.3", "fck", &i2c3_fck, CK_3XXX),
CLK("omap_i2c.2", "fck", &i2c2_fck, CK_3XXX),
CLK("omap_i2c.1", "fck", &i2c1_fck, CK_3XXX),
@@ -3323,13 +3323,13 @@ static struct omap_clk omap3xxx_clks[] = {
CLK(NULL, "core_l4_ick", &core_l4_ick, CK_3XXX),
CLK(NULL, "usbtll_ick", &usbtll_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
CLK("ehci-omap.0", "usbtll_ick", &usbtll_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK("mmci-omap-hs.2", "ick", &mmchs3_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK("omap_hsmmc.2", "ick", &mmchs3_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
CLK(NULL, "icr_ick", &icr_ick, CK_34XX | CK_36XX),
CLK("omap-aes", "ick", &aes2_ick, CK_34XX | CK_36XX),
CLK("omap-sham", "ick", &sha12_ick, CK_34XX | CK_36XX),
CLK(NULL, "des2_ick", &des2_ick, CK_34XX | CK_36XX),
- CLK("mmci-omap-hs.1", "ick", &mmchs2_ick, CK_3XXX),
- CLK("mmci-omap-hs.0", "ick", &mmchs1_ick, CK_3XXX),
+ CLK("omap_hsmmc.1", "ick", &mmchs2_ick, CK_3XXX),
+ CLK("omap_hsmmc.0", "ick", &mmchs1_ick, CK_3XXX),
CLK(NULL, "mspro_ick", &mspro_ick, CK_34XX | CK_36XX),
CLK("omap_hdq.0", "ick", &hdq_ick, CK_3XXX),
CLK("omap2_mcspi.4", "ick", &mcspi4_ick, CK_3XXX),
@@ -3357,13 +3357,13 @@ static struct omap_clk omap3xxx_clks[] = {
CLK("omap_rng", "ick", &rng_ick, CK_34XX | CK_36XX),
CLK(NULL, "sha11_ick", &sha11_ick, CK_34XX | CK_36XX),
CLK(NULL, "des1_ick", &des1_ick, CK_34XX | CK_36XX),
- CLK("omapdss", "dss1_fck", &dss1_alwon_fck_3430es1, CK_3430ES1),
- CLK("omapdss", "dss1_fck", &dss1_alwon_fck_3430es2, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK("omapdss", "tv_fck", &dss_tv_fck, CK_3XXX),
- CLK("omapdss", "video_fck", &dss_96m_fck, CK_3XXX),
- CLK("omapdss", "dss2_fck", &dss2_alwon_fck, CK_3XXX),
- CLK("omapdss", "ick", &dss_ick_3430es1, CK_3430ES1),
- CLK("omapdss", "ick", &dss_ick_3430es2, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK("omap_dss", "fck", &dss1_alwon_fck_3430es1, CK_3430ES1),
+ CLK("omap_dss", "fck", &dss1_alwon_fck_3430es2, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK("omap_dss", "tv_clk", &dss_tv_fck, CK_3XXX),
+ CLK("omap_dss", "video_clk", &dss_96m_fck, CK_3XXX),
+ CLK("omap_dss", "sys_clk", &dss2_alwon_fck, CK_3XXX),
+ CLK("omap_dss", "ick", &dss_ick_3430es1, CK_3430ES1),
+ CLK("omap_dss", "ick", &dss_ick_3430es2, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
CLK(NULL, "cam_mclk", &cam_mclk, CK_34XX | CK_36XX),
CLK(NULL, "cam_ick", &cam_ick, CK_34XX | CK_36XX),
CLK(NULL, "csi2_96m_fck", &csi2_96m_fck, CK_34XX | CK_36XX),
@@ -3471,6 +3471,9 @@ int __init omap3xxx_clk_init(void)
} else if (cpu_is_omap3630()) {
cpu_mask = (RATE_IN_34XX | RATE_IN_36XX);
cpu_clkflg = CK_36XX;
+ } else if (cpu_is_ti816x()) {
+ cpu_mask = RATE_IN_TI816X;
+ cpu_clkflg = CK_TI816X;
} else if (cpu_is_omap34xx()) {
if (omap_rev() == OMAP3430_REV_ES1_0) {
cpu_mask = RATE_IN_3430ES1;
@@ -3550,7 +3553,7 @@ int __init omap3xxx_clk_init(void)
/*
* Lock DPLL5 and put it in autoidle.
*/
- if (omap_rev() >= OMAP3430_REV_ES2_0)
+ if (!cpu_is_ti816x() && (omap_rev() >= OMAP3430_REV_ES2_0))
omap3_clk_lock_dpll5();
/* Avoid sleeping during omap3_core_dpll_m2_set_rate() */
diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c
index de9ec8ddd2ae..01153d88085f 100644
--- a/arch/arm/mach-omap2/clock44xx_data.c
+++ b/arch/arm/mach-omap2/clock44xx_data.c
@@ -3106,11 +3106,16 @@ static struct omap_clk omap44xx_clks[] = {
CLK(NULL, "dmic_sync_mux_ck", &dmic_sync_mux_ck, CK_443X),
CLK(NULL, "dmic_fck", &dmic_fck, CK_443X),
CLK(NULL, "dsp_fck", &dsp_fck, CK_443X),
- CLK(NULL, "dss_sys_clk", &dss_sys_clk, CK_443X),
- CLK(NULL, "dss_tv_clk", &dss_tv_clk, CK_443X),
- CLK(NULL, "dss_dss_clk", &dss_dss_clk, CK_443X),
- CLK(NULL, "dss_48mhz_clk", &dss_48mhz_clk, CK_443X),
- CLK(NULL, "dss_fck", &dss_fck, CK_443X),
+ CLK("omap_dss", "sys_clk", &dss_sys_clk, CK_443X),
+ CLK("omap_dss", "tv_clk", &dss_tv_clk, CK_443X),
+ CLK("omap_dss", "dss_clk", &dss_dss_clk, CK_443X),
+ CLK("omap_dss", "video_clk", &dss_48mhz_clk, CK_443X),
+ CLK("omap_dss", "fck", &dss_fck, CK_443X),
+ /*
+ * On OMAP4, DSS ick is a dummy clock; this is needed for compatibility
+ * with OMAP2/3.
+ */
+ CLK("omap_dss", "ick", &dummy_ck, CK_443X),
CLK(NULL, "efuse_ctrl_cust_fck", &efuse_ctrl_cust_fck, CK_443X),
CLK(NULL, "emif1_fck", &emif1_fck, CK_443X),
CLK(NULL, "emif2_fck", &emif2_fck, CK_443X),
@@ -3158,11 +3163,11 @@ static struct omap_clk omap44xx_clks[] = {
CLK("omap2_mcspi.2", "fck", &mcspi2_fck, CK_443X),
CLK("omap2_mcspi.3", "fck", &mcspi3_fck, CK_443X),
CLK("omap2_mcspi.4", "fck", &mcspi4_fck, CK_443X),
- CLK("mmci-omap-hs.0", "fck", &mmc1_fck, CK_443X),
- CLK("mmci-omap-hs.1", "fck", &mmc2_fck, CK_443X),
- CLK("mmci-omap-hs.2", "fck", &mmc3_fck, CK_443X),
- CLK("mmci-omap-hs.3", "fck", &mmc4_fck, CK_443X),
- CLK("mmci-omap-hs.4", "fck", &mmc5_fck, CK_443X),
+ CLK("omap_hsmmc.0", "fck", &mmc1_fck, CK_443X),
+ CLK("omap_hsmmc.1", "fck", &mmc2_fck, CK_443X),
+ CLK("omap_hsmmc.2", "fck", &mmc3_fck, CK_443X),
+ CLK("omap_hsmmc.3", "fck", &mmc4_fck, CK_443X),
+ CLK("omap_hsmmc.4", "fck", &mmc5_fck, CK_443X),
CLK(NULL, "ocp2scp_usb_phy_phy_48m", &ocp2scp_usb_phy_phy_48m, CK_443X),
CLK(NULL, "ocp2scp_usb_phy_ick", &ocp2scp_usb_phy_ick, CK_443X),
CLK(NULL, "ocp_wp_noc_ick", &ocp_wp_noc_ick, CK_443X),
@@ -3245,11 +3250,11 @@ static struct omap_clk omap44xx_clks[] = {
CLK("omap_i2c.2", "ick", &dummy_ck, CK_443X),
CLK("omap_i2c.3", "ick", &dummy_ck, CK_443X),
CLK("omap_i2c.4", "ick", &dummy_ck, CK_443X),
- CLK("mmci-omap-hs.0", "ick", &dummy_ck, CK_443X),
- CLK("mmci-omap-hs.1", "ick", &dummy_ck, CK_443X),
- CLK("mmci-omap-hs.2", "ick", &dummy_ck, CK_443X),
- CLK("mmci-omap-hs.3", "ick", &dummy_ck, CK_443X),
- CLK("mmci-omap-hs.4", "ick", &dummy_ck, CK_443X),
+ CLK("omap_hsmmc.0", "ick", &dummy_ck, CK_443X),
+ CLK("omap_hsmmc.1", "ick", &dummy_ck, CK_443X),
+ CLK("omap_hsmmc.2", "ick", &dummy_ck, CK_443X),
+ CLK("omap_hsmmc.3", "ick", &dummy_ck, CK_443X),
+ CLK("omap_hsmmc.4", "ick", &dummy_ck, CK_443X),
CLK("omap-mcbsp.1", "ick", &dummy_ck, CK_443X),
CLK("omap-mcbsp.2", "ick", &dummy_ck, CK_443X),
CLK("omap-mcbsp.3", "ick", &dummy_ck, CK_443X),
diff --git a/arch/arm/mach-omap2/clockdomains2xxx_3xxx_data.c b/arch/arm/mach-omap2/clockdomains2xxx_3xxx_data.c
index e4a7133ea3b3..e6f0d18d5e8d 100644
--- a/arch/arm/mach-omap2/clockdomains2xxx_3xxx_data.c
+++ b/arch/arm/mach-omap2/clockdomains2xxx_3xxx_data.c
@@ -171,7 +171,7 @@ static struct clkdm_dep core_24xx_wkdeps[] = {
/* 2430-specific possible wakeup dependencies */
-#ifdef CONFIG_ARCH_OMAP2430
+#ifdef CONFIG_SOC_OMAP2430
/* 2430 PM_WKDEP_MDM: CORE, MPU, WKUP */
static struct clkdm_dep mdm_2430_wkdeps[] = {
@@ -194,7 +194,7 @@ static struct clkdm_dep mdm_2430_wkdeps[] = {
{ NULL },
};
-#endif /* CONFIG_ARCH_OMAP2430 */
+#endif /* CONFIG_SOC_OMAP2430 */
/* OMAP3-specific possible dependencies */
@@ -450,7 +450,7 @@ static struct clockdomain cm_clkdm = {
* 2420-only clockdomains
*/
-#if defined(CONFIG_ARCH_OMAP2420)
+#if defined(CONFIG_SOC_OMAP2420)
static struct clockdomain mpu_2420_clkdm = {
.name = "mpu_clkdm",
@@ -514,14 +514,14 @@ static struct clockdomain dss_2420_clkdm = {
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
};
-#endif /* CONFIG_ARCH_OMAP2420 */
+#endif /* CONFIG_SOC_OMAP2420 */
/*
* 2430-only clockdomains
*/
-#if defined(CONFIG_ARCH_OMAP2430)
+#if defined(CONFIG_SOC_OMAP2430)
static struct clockdomain mpu_2430_clkdm = {
.name = "mpu_clkdm",
@@ -600,7 +600,7 @@ static struct clockdomain dss_2430_clkdm = {
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
};
-#endif /* CONFIG_ARCH_OMAP2430 */
+#endif /* CONFIG_SOC_OMAP2430 */
/*
@@ -811,7 +811,7 @@ static struct clockdomain *clockdomains_omap2[] __initdata = {
&cm_clkdm,
&prm_clkdm,
-#ifdef CONFIG_ARCH_OMAP2420
+#ifdef CONFIG_SOC_OMAP2420
&mpu_2420_clkdm,
&iva1_2420_clkdm,
&dsp_2420_clkdm,
@@ -821,7 +821,7 @@ static struct clockdomain *clockdomains_omap2[] __initdata = {
&dss_2420_clkdm,
#endif
-#ifdef CONFIG_ARCH_OMAP2430
+#ifdef CONFIG_SOC_OMAP2430
&mpu_2430_clkdm,
&mdm_clkdm,
&dsp_2430_clkdm,
diff --git a/arch/arm/mach-omap2/common.c b/arch/arm/mach-omap2/common.c
index 778929f7e92d..48de4513de49 100644
--- a/arch/arm/mach-omap2/common.c
+++ b/arch/arm/mach-omap2/common.c
@@ -40,7 +40,7 @@ static void __init __omap2_set_globals(struct omap_globals *omap2_globals)
#endif
-#if defined(CONFIG_ARCH_OMAP2420)
+#if defined(CONFIG_SOC_OMAP2420)
static struct omap_globals omap242x_globals = {
.class = OMAP242X_CLASS,
@@ -61,7 +61,7 @@ void __init omap2_set_globals_242x(void)
}
#endif
-#if defined(CONFIG_ARCH_OMAP2430)
+#if defined(CONFIG_SOC_OMAP2430)
static struct omap_globals omap243x_globals = {
.class = OMAP243X_CLASS,
@@ -108,6 +108,27 @@ void __init omap3_map_io(void)
omap2_set_globals_3xxx();
omap34xx_map_common_io();
}
+
+/*
+ * Adjust TAP register base such that omap3_check_revision accesses the correct
+ * TI816X register for checking device ID (it adds 0x204 to tap base while
+ * TI816X DEVICE ID register is at offset 0x600 from control base).
+ */
+#define TI816X_TAP_BASE (TI816X_CTRL_BASE + \
+ TI816X_CONTROL_DEVICE_ID - 0x204)
+
+static struct omap_globals ti816x_globals = {
+ .class = OMAP343X_CLASS,
+ .tap = OMAP2_L4_IO_ADDRESS(TI816X_TAP_BASE),
+ .ctrl = TI816X_CTRL_BASE,
+ .prm = TI816X_PRCM_BASE,
+ .cm = TI816X_PRCM_BASE,
+};
+
+void __init omap2_set_globals_ti816x(void)
+{
+ __omap2_set_globals(&ti816x_globals);
+}
#endif
#if defined(CONFIG_ARCH_OMAP4)
diff --git a/arch/arm/mach-omap2/control.h b/arch/arm/mach-omap2/control.h
index f0629ae04102..c2804c1c4efd 100644
--- a/arch/arm/mach-omap2/control.h
+++ b/arch/arm/mach-omap2/control.h
@@ -52,6 +52,9 @@
#define OMAP343X_CONTROL_PADCONFS_WKUP 0xa00
#define OMAP343X_CONTROL_GENERAL_WKUP 0xa60
+/* TI816X spefic control submodules */
+#define TI816X_CONTROL_DEVCONF 0x600
+
/* Control register offsets - read/write with omap_ctrl_{read,write}{bwl}() */
#define OMAP2_CONTROL_SYSCONFIG (OMAP2_CONTROL_INTERFACE + 0x10)
@@ -241,6 +244,9 @@
#define OMAP3_PADCONF_SAD2D_MSTANDBY 0x250
#define OMAP3_PADCONF_SAD2D_IDLEACK 0x254
+/* TI816X CONTROL_DEVCONF register offsets */
+#define TI816X_CONTROL_DEVICE_ID (TI816X_CONTROL_DEVCONF + 0x000)
+
/*
* REVISIT: This list of registers is not comprehensive - there are more
* that should be added.
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 2c9c912f2c42..2cb720b5b12e 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -15,6 +15,7 @@
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/err.h>
+#include <linux/slab.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
@@ -30,6 +31,7 @@
#include <plat/dma.h>
#include <plat/omap_hwmod.h>
#include <plat/omap_device.h>
+#include <plat/omap4-keypad.h>
#include "mux.h"
#include "control.h"
@@ -141,96 +143,70 @@ static inline void omap_init_camera(void)
}
#endif
-#if defined(CONFIG_OMAP_MBOX_FWK) || defined(CONFIG_OMAP_MBOX_FWK_MODULE)
-
-#define MBOX_REG_SIZE 0x120
-
-#ifdef CONFIG_ARCH_OMAP2
-static struct resource omap2_mbox_resources[] = {
- {
- .start = OMAP24XX_MAILBOX_BASE,
- .end = OMAP24XX_MAILBOX_BASE + MBOX_REG_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = INT_24XX_MAIL_U0_MPU,
- .flags = IORESOURCE_IRQ,
- .name = "dsp",
- },
+struct omap_device_pm_latency omap_keyboard_latency[] = {
{
- .start = INT_24XX_MAIL_U3_MPU,
- .flags = IORESOURCE_IRQ,
- .name = "iva",
+ .deactivate_func = omap_device_idle_hwmods,
+ .activate_func = omap_device_enable_hwmods,
+ .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
},
};
-static int omap2_mbox_resources_sz = ARRAY_SIZE(omap2_mbox_resources);
-#else
-#define omap2_mbox_resources NULL
-#define omap2_mbox_resources_sz 0
-#endif
-#ifdef CONFIG_ARCH_OMAP3
-static struct resource omap3_mbox_resources[] = {
- {
- .start = OMAP34XX_MAILBOX_BASE,
- .end = OMAP34XX_MAILBOX_BASE + MBOX_REG_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = INT_24XX_MAIL_U0_MPU,
- .flags = IORESOURCE_IRQ,
- .name = "dsp",
- },
-};
-static int omap3_mbox_resources_sz = ARRAY_SIZE(omap3_mbox_resources);
-#else
-#define omap3_mbox_resources NULL
-#define omap3_mbox_resources_sz 0
-#endif
+int __init omap4_keyboard_init(struct omap4_keypad_platform_data
+ *sdp4430_keypad_data)
+{
+ struct omap_device *od;
+ struct omap_hwmod *oh;
+ struct omap4_keypad_platform_data *keypad_data;
+ unsigned int id = -1;
+ char *oh_name = "kbd";
+ char *name = "omap4-keypad";
-#ifdef CONFIG_ARCH_OMAP4
+ oh = omap_hwmod_lookup(oh_name);
+ if (!oh) {
+ pr_err("Could not look up %s\n", oh_name);
+ return -ENODEV;
+ }
-#define OMAP4_MBOX_REG_SIZE 0x130
-static struct resource omap4_mbox_resources[] = {
- {
- .start = OMAP44XX_MAILBOX_BASE,
- .end = OMAP44XX_MAILBOX_BASE +
- OMAP4_MBOX_REG_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = OMAP44XX_IRQ_MAIL_U0,
- .flags = IORESOURCE_IRQ,
- .name = "mbox",
- },
-};
-static int omap4_mbox_resources_sz = ARRAY_SIZE(omap4_mbox_resources);
-#else
-#define omap4_mbox_resources NULL
-#define omap4_mbox_resources_sz 0
-#endif
+ keypad_data = sdp4430_keypad_data;
-static struct platform_device mbox_device = {
- .name = "omap-mailbox",
- .id = -1,
+ od = omap_device_build(name, id, oh, keypad_data,
+ sizeof(struct omap4_keypad_platform_data),
+ omap_keyboard_latency,
+ ARRAY_SIZE(omap_keyboard_latency), 0);
+
+ if (IS_ERR(od)) {
+ WARN(1, "Cant build omap_device for %s:%s.\n",
+ name, oh->name);
+ return PTR_ERR(od);
+ }
+
+ return 0;
+}
+
+#if defined(CONFIG_OMAP_MBOX_FWK) || defined(CONFIG_OMAP_MBOX_FWK_MODULE)
+static struct omap_device_pm_latency mbox_latencies[] = {
+ [0] = {
+ .activate_func = omap_device_enable_hwmods,
+ .deactivate_func = omap_device_idle_hwmods,
+ .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
+ },
};
static inline void omap_init_mbox(void)
{
- if (cpu_is_omap24xx()) {
- mbox_device.resource = omap2_mbox_resources;
- mbox_device.num_resources = omap2_mbox_resources_sz;
- } else if (cpu_is_omap34xx()) {
- mbox_device.resource = omap3_mbox_resources;
- mbox_device.num_resources = omap3_mbox_resources_sz;
- } else if (cpu_is_omap44xx()) {
- mbox_device.resource = omap4_mbox_resources;
- mbox_device.num_resources = omap4_mbox_resources_sz;
- } else {
- pr_err("%s: platform not supported\n", __func__);
+ struct omap_hwmod *oh;
+ struct omap_device *od;
+
+ oh = omap_hwmod_lookup("mailbox");
+ if (!oh) {
+ pr_err("%s: unable to find hwmod\n", __func__);
return;
}
- platform_device_register(&mbox_device);
+
+ od = omap_device_build("omap-mailbox", -1, oh, NULL, 0,
+ mbox_latencies, ARRAY_SIZE(mbox_latencies), 0);
+ WARN(IS_ERR(od), "%s: could not build device, err %ld\n",
+ __func__, PTR_ERR(od));
}
#else
static inline void omap_init_mbox(void) { }
@@ -279,163 +255,55 @@ static inline void omap_init_audio(void) {}
#include <plat/mcspi.h>
-#define OMAP2_MCSPI1_BASE 0x48098000
-#define OMAP2_MCSPI2_BASE 0x4809a000
-#define OMAP2_MCSPI3_BASE 0x480b8000
-#define OMAP2_MCSPI4_BASE 0x480ba000
-
-#define OMAP4_MCSPI1_BASE 0x48098100
-#define OMAP4_MCSPI2_BASE 0x4809a100
-#define OMAP4_MCSPI3_BASE 0x480b8100
-#define OMAP4_MCSPI4_BASE 0x480ba100
-
-static struct omap2_mcspi_platform_config omap2_mcspi1_config = {
- .num_cs = 4,
-};
-
-static struct resource omap2_mcspi1_resources[] = {
- {
- .start = OMAP2_MCSPI1_BASE,
- .end = OMAP2_MCSPI1_BASE + 0xff,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct platform_device omap2_mcspi1 = {
- .name = "omap2_mcspi",
- .id = 1,
- .num_resources = ARRAY_SIZE(omap2_mcspi1_resources),
- .resource = omap2_mcspi1_resources,
- .dev = {
- .platform_data = &omap2_mcspi1_config,
- },
-};
-
-static struct omap2_mcspi_platform_config omap2_mcspi2_config = {
- .num_cs = 2,
-};
-
-static struct resource omap2_mcspi2_resources[] = {
- {
- .start = OMAP2_MCSPI2_BASE,
- .end = OMAP2_MCSPI2_BASE + 0xff,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct platform_device omap2_mcspi2 = {
- .name = "omap2_mcspi",
- .id = 2,
- .num_resources = ARRAY_SIZE(omap2_mcspi2_resources),
- .resource = omap2_mcspi2_resources,
- .dev = {
- .platform_data = &omap2_mcspi2_config,
- },
-};
-
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \
- defined(CONFIG_ARCH_OMAP4)
-static struct omap2_mcspi_platform_config omap2_mcspi3_config = {
- .num_cs = 2,
-};
-
-static struct resource omap2_mcspi3_resources[] = {
- {
- .start = OMAP2_MCSPI3_BASE,
- .end = OMAP2_MCSPI3_BASE + 0xff,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct platform_device omap2_mcspi3 = {
- .name = "omap2_mcspi",
- .id = 3,
- .num_resources = ARRAY_SIZE(omap2_mcspi3_resources),
- .resource = omap2_mcspi3_resources,
- .dev = {
- .platform_data = &omap2_mcspi3_config,
- },
-};
-#endif
-
-#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
-static struct omap2_mcspi_platform_config omap2_mcspi4_config = {
- .num_cs = 1,
-};
-
-static struct resource omap2_mcspi4_resources[] = {
- {
- .start = OMAP2_MCSPI4_BASE,
- .end = OMAP2_MCSPI4_BASE + 0xff,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct platform_device omap2_mcspi4 = {
- .name = "omap2_mcspi",
- .id = 4,
- .num_resources = ARRAY_SIZE(omap2_mcspi4_resources),
- .resource = omap2_mcspi4_resources,
- .dev = {
- .platform_data = &omap2_mcspi4_config,
+struct omap_device_pm_latency omap_mcspi_latency[] = {
+ [0] = {
+ .deactivate_func = omap_device_idle_hwmods,
+ .activate_func = omap_device_enable_hwmods,
+ .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
},
};
-#endif
-#ifdef CONFIG_ARCH_OMAP4
-static inline void omap4_mcspi_fixup(void)
-{
- omap2_mcspi1_resources[0].start = OMAP4_MCSPI1_BASE;
- omap2_mcspi1_resources[0].end = OMAP4_MCSPI1_BASE + 0xff;
- omap2_mcspi2_resources[0].start = OMAP4_MCSPI2_BASE;
- omap2_mcspi2_resources[0].end = OMAP4_MCSPI2_BASE + 0xff;
- omap2_mcspi3_resources[0].start = OMAP4_MCSPI3_BASE;
- omap2_mcspi3_resources[0].end = OMAP4_MCSPI3_BASE + 0xff;
- omap2_mcspi4_resources[0].start = OMAP4_MCSPI4_BASE;
- omap2_mcspi4_resources[0].end = OMAP4_MCSPI4_BASE + 0xff;
-}
-#else
-static inline void omap4_mcspi_fixup(void)
+static int omap_mcspi_init(struct omap_hwmod *oh, void *unused)
{
-}
-#endif
+ struct omap_device *od;
+ char *name = "omap2_mcspi";
+ struct omap2_mcspi_platform_config *pdata;
+ static int spi_num;
+ struct omap2_mcspi_dev_attr *mcspi_attrib = oh->dev_attr;
+
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ pr_err("Memory allocation for McSPI device failed\n");
+ return -ENOMEM;
+ }
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \
- defined(CONFIG_ARCH_OMAP4)
-static inline void omap2_mcspi3_init(void)
-{
- platform_device_register(&omap2_mcspi3);
-}
-#else
-static inline void omap2_mcspi3_init(void)
-{
-}
-#endif
+ pdata->num_cs = mcspi_attrib->num_chipselect;
+ switch (oh->class->rev) {
+ case OMAP2_MCSPI_REV:
+ case OMAP3_MCSPI_REV:
+ pdata->regs_offset = 0;
+ break;
+ case OMAP4_MCSPI_REV:
+ pdata->regs_offset = OMAP4_MCSPI_REG_OFFSET;
+ break;
+ default:
+ pr_err("Invalid McSPI Revision value\n");
+ return -EINVAL;
+ }
-#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
-static inline void omap2_mcspi4_init(void)
-{
- platform_device_register(&omap2_mcspi4);
-}
-#else
-static inline void omap2_mcspi4_init(void)
-{
+ spi_num++;
+ od = omap_device_build(name, spi_num, oh, pdata,
+ sizeof(*pdata), omap_mcspi_latency,
+ ARRAY_SIZE(omap_mcspi_latency), 0);
+ WARN(IS_ERR(od), "Cant build omap_device for %s:%s\n",
+ name, oh->name);
+ kfree(pdata);
+ return 0;
}
-#endif
static void omap_init_mcspi(void)
{
- if (cpu_is_omap44xx())
- omap4_mcspi_fixup();
-
- platform_device_register(&omap2_mcspi1);
- platform_device_register(&omap2_mcspi2);
-
- if (cpu_is_omap2430() || cpu_is_omap343x() || cpu_is_omap44xx())
- omap2_mcspi3_init();
-
- if (cpu_is_omap343x() || cpu_is_omap44xx())
- omap2_mcspi4_init();
+ omap_hwmod_for_each_by_class("mcspi", omap_mcspi_init, NULL);
}
#else
@@ -610,117 +478,10 @@ static inline void omap_init_aes(void) { }
/*-------------------------------------------------------------------------*/
-#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
-
-#define MMCHS_SYSCONFIG 0x0010
-#define MMCHS_SYSCONFIG_SWRESET (1 << 1)
-#define MMCHS_SYSSTATUS 0x0014
-#define MMCHS_SYSSTATUS_RESETDONE (1 << 0)
-
-static struct platform_device dummy_pdev = {
- .dev = {
- .bus = &platform_bus_type,
- },
-};
-
-/**
- * omap_hsmmc_reset() - Full reset of each HS-MMC controller
- *
- * Ensure that each MMC controller is fully reset. Controllers
- * left in an unknown state (by bootloader) may prevent retention
- * or OFF-mode. This is especially important in cases where the
- * MMC driver is not enabled, _or_ built as a module.
- *
- * In order for reset to work, interface, functional and debounce
- * clocks must be enabled. The debounce clock comes from func_32k_clk
- * and is not under SW control, so we only enable i- and f-clocks.
- **/
-static void __init omap_hsmmc_reset(void)
-{
- u32 i, nr_controllers;
- struct clk *iclk, *fclk;
-
- if (cpu_is_omap242x())
- return;
-
- nr_controllers = cpu_is_omap44xx() ? OMAP44XX_NR_MMC :
- (cpu_is_omap34xx() ? OMAP34XX_NR_MMC : OMAP24XX_NR_MMC);
-
- for (i = 0; i < nr_controllers; i++) {
- u32 v, base = 0;
- struct device *dev = &dummy_pdev.dev;
-
- switch (i) {
- case 0:
- base = OMAP2_MMC1_BASE;
- break;
- case 1:
- base = OMAP2_MMC2_BASE;
- break;
- case 2:
- base = OMAP3_MMC3_BASE;
- break;
- case 3:
- if (!cpu_is_omap44xx())
- return;
- base = OMAP4_MMC4_BASE;
- break;
- case 4:
- if (!cpu_is_omap44xx())
- return;
- base = OMAP4_MMC5_BASE;
- break;
- }
-
- if (cpu_is_omap44xx())
- base += OMAP4_MMC_REG_OFFSET;
-
- dummy_pdev.id = i;
- dev_set_name(&dummy_pdev.dev, "mmci-omap-hs.%d", i);
- iclk = clk_get(dev, "ick");
- if (IS_ERR(iclk))
- goto err1;
- if (clk_enable(iclk))
- goto err2;
-
- fclk = clk_get(dev, "fck");
- if (IS_ERR(fclk))
- goto err3;
- if (clk_enable(fclk))
- goto err4;
-
- omap_writel(MMCHS_SYSCONFIG_SWRESET, base + MMCHS_SYSCONFIG);
- v = omap_readl(base + MMCHS_SYSSTATUS);
- while (!(omap_readl(base + MMCHS_SYSSTATUS) &
- MMCHS_SYSSTATUS_RESETDONE))
- cpu_relax();
-
- clk_disable(fclk);
- clk_put(fclk);
- clk_disable(iclk);
- clk_put(iclk);
- }
- return;
-
-err4:
- clk_put(fclk);
-err3:
- clk_disable(iclk);
-err2:
- clk_put(iclk);
-err1:
- printk(KERN_WARNING "%s: Unable to enable clocks for MMC%d, "
- "cannot reset.\n", __func__, i);
-}
-#else
-static inline void omap_hsmmc_reset(void) {}
-#endif
+#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
-#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE) || \
- defined(CONFIG_MMC_OMAP_HS) || defined(CONFIG_MMC_OMAP_HS_MODULE)
-
-static inline void omap2_mmc_mux(struct omap_mmc_platform_data *mmc_controller,
- int controller_nr)
+static inline void omap242x_mmc_mux(struct omap_mmc_platform_data
+ *mmc_controller)
{
if ((mmc_controller->slots[0].switch_pin > 0) && \
(mmc_controller->slots[0].switch_pin < OMAP_MAX_GPIO_LINES))
@@ -731,163 +492,44 @@ static inline void omap2_mmc_mux(struct omap_mmc_platform_data *mmc_controller,
omap_mux_init_gpio(mmc_controller->slots[0].gpio_wp,
OMAP_PIN_INPUT_PULLUP);
- if (cpu_is_omap2420() && controller_nr == 0) {
- omap_mux_init_signal("sdmmc_cmd", 0);
- omap_mux_init_signal("sdmmc_clki", 0);
- omap_mux_init_signal("sdmmc_clko", 0);
- omap_mux_init_signal("sdmmc_dat0", 0);
- omap_mux_init_signal("sdmmc_dat_dir0", 0);
- omap_mux_init_signal("sdmmc_cmd_dir", 0);
- if (mmc_controller->slots[0].caps & MMC_CAP_4_BIT_DATA) {
- omap_mux_init_signal("sdmmc_dat1", 0);
- omap_mux_init_signal("sdmmc_dat2", 0);
- omap_mux_init_signal("sdmmc_dat3", 0);
- omap_mux_init_signal("sdmmc_dat_dir1", 0);
- omap_mux_init_signal("sdmmc_dat_dir2", 0);
- omap_mux_init_signal("sdmmc_dat_dir3", 0);
- }
-
- /*
- * Use internal loop-back in MMC/SDIO Module Input Clock
- * selection
- */
- if (mmc_controller->slots[0].internal_clock) {
- u32 v = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0);
- v |= (1 << 24);
- omap_ctrl_writel(v, OMAP2_CONTROL_DEVCONF0);
- }
+ omap_mux_init_signal("sdmmc_cmd", 0);
+ omap_mux_init_signal("sdmmc_clki", 0);
+ omap_mux_init_signal("sdmmc_clko", 0);
+ omap_mux_init_signal("sdmmc_dat0", 0);
+ omap_mux_init_signal("sdmmc_dat_dir0", 0);
+ omap_mux_init_signal("sdmmc_cmd_dir", 0);
+ if (mmc_controller->slots[0].caps & MMC_CAP_4_BIT_DATA) {
+ omap_mux_init_signal("sdmmc_dat1", 0);
+ omap_mux_init_signal("sdmmc_dat2", 0);
+ omap_mux_init_signal("sdmmc_dat3", 0);
+ omap_mux_init_signal("sdmmc_dat_dir1", 0);
+ omap_mux_init_signal("sdmmc_dat_dir2", 0);
+ omap_mux_init_signal("sdmmc_dat_dir3", 0);
}
- if (cpu_is_omap34xx()) {
- if (controller_nr == 0) {
- omap_mux_init_signal("sdmmc1_clk",
- OMAP_PIN_INPUT_PULLUP);
- omap_mux_init_signal("sdmmc1_cmd",
- OMAP_PIN_INPUT_PULLUP);
- omap_mux_init_signal("sdmmc1_dat0",
- OMAP_PIN_INPUT_PULLUP);
- if (mmc_controller->slots[0].caps &
- (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)) {
- omap_mux_init_signal("sdmmc1_dat1",
- OMAP_PIN_INPUT_PULLUP);
- omap_mux_init_signal("sdmmc1_dat2",
- OMAP_PIN_INPUT_PULLUP);
- omap_mux_init_signal("sdmmc1_dat3",
- OMAP_PIN_INPUT_PULLUP);
- }
- if (mmc_controller->slots[0].caps &
- MMC_CAP_8_BIT_DATA) {
- omap_mux_init_signal("sdmmc1_dat4",
- OMAP_PIN_INPUT_PULLUP);
- omap_mux_init_signal("sdmmc1_dat5",
- OMAP_PIN_INPUT_PULLUP);
- omap_mux_init_signal("sdmmc1_dat6",
- OMAP_PIN_INPUT_PULLUP);
- omap_mux_init_signal("sdmmc1_dat7",
- OMAP_PIN_INPUT_PULLUP);
- }
- }
- if (controller_nr == 1) {
- /* MMC2 */
- omap_mux_init_signal("sdmmc2_clk",
- OMAP_PIN_INPUT_PULLUP);
- omap_mux_init_signal("sdmmc2_cmd",
- OMAP_PIN_INPUT_PULLUP);
- omap_mux_init_signal("sdmmc2_dat0",
- OMAP_PIN_INPUT_PULLUP);
-
- /*
- * For 8 wire configurations, Lines DAT4, 5, 6 and 7 need to be muxed
- * in the board-*.c files
- */
- if (mmc_controller->slots[0].caps &
- (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)) {
- omap_mux_init_signal("sdmmc2_dat1",
- OMAP_PIN_INPUT_PULLUP);
- omap_mux_init_signal("sdmmc2_dat2",
- OMAP_PIN_INPUT_PULLUP);
- omap_mux_init_signal("sdmmc2_dat3",
- OMAP_PIN_INPUT_PULLUP);
- }
- if (mmc_controller->slots[0].caps &
- MMC_CAP_8_BIT_DATA) {
- omap_mux_init_signal("sdmmc2_dat4.sdmmc2_dat4",
- OMAP_PIN_INPUT_PULLUP);
- omap_mux_init_signal("sdmmc2_dat5.sdmmc2_dat5",
- OMAP_PIN_INPUT_PULLUP);
- omap_mux_init_signal("sdmmc2_dat6.sdmmc2_dat6",
- OMAP_PIN_INPUT_PULLUP);
- omap_mux_init_signal("sdmmc2_dat7.sdmmc2_dat7",
- OMAP_PIN_INPUT_PULLUP);
- }
- }
-
- /*
- * For MMC3 the pins need to be muxed in the board-*.c files
- */
+ /*
+ * Use internal loop-back in MMC/SDIO Module Input Clock
+ * selection
+ */
+ if (mmc_controller->slots[0].internal_clock) {
+ u32 v = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0);
+ v |= (1 << 24);
+ omap_ctrl_writel(v, OMAP2_CONTROL_DEVCONF0);
}
}
-void __init omap2_init_mmc(struct omap_mmc_platform_data **mmc_data,
- int nr_controllers)
+void __init omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data)
{
- int i;
- char *name;
-
- for (i = 0; i < nr_controllers; i++) {
- unsigned long base, size;
- unsigned int irq = 0;
+ char *name = "mmci-omap";
- if (!mmc_data[i])
- continue;
-
- omap2_mmc_mux(mmc_data[i], i);
+ if (!mmc_data[0]) {
+ pr_err("%s fails: Incomplete platform data\n", __func__);
+ return;
+ }
- switch (i) {
- case 0:
- base = OMAP2_MMC1_BASE;
- irq = INT_24XX_MMC_IRQ;
- break;
- case 1:
- base = OMAP2_MMC2_BASE;
- irq = INT_24XX_MMC2_IRQ;
- break;
- case 2:
- if (!cpu_is_omap44xx() && !cpu_is_omap34xx())
- return;
- base = OMAP3_MMC3_BASE;
- irq = INT_34XX_MMC3_IRQ;
- break;
- case 3:
- if (!cpu_is_omap44xx())
- return;
- base = OMAP4_MMC4_BASE;
- irq = OMAP44XX_IRQ_MMC4;
- break;
- case 4:
- if (!cpu_is_omap44xx())
- return;
- base = OMAP4_MMC5_BASE;
- irq = OMAP44XX_IRQ_MMC5;
- break;
- default:
- continue;
- }
-
- if (cpu_is_omap2420()) {
- size = OMAP2420_MMC_SIZE;
- name = "mmci-omap";
- } else if (cpu_is_omap44xx()) {
- if (i < 3)
- irq += OMAP44XX_IRQ_GIC_START;
- size = OMAP4_HSMMC_SIZE;
- name = "mmci-omap-hs";
- } else {
- size = OMAP3_HSMMC_SIZE;
- name = "mmci-omap-hs";
- }
- omap_mmc_add(name, i, base, size, irq, mmc_data[i]);
- };
+ omap242x_mmc_mux(mmc_data[0]);
+ omap_mmc_add(name, 0, OMAP2_MMC1_BASE, OMAP2420_MMC_SIZE,
+ INT_24XX_MMC_IRQ, mmc_data[0]);
}
#endif
@@ -895,7 +537,7 @@ void __init omap2_init_mmc(struct omap_mmc_platform_data **mmc_data,
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_HDQ_MASTER_OMAP) || defined(CONFIG_HDQ_MASTER_OMAP_MODULE)
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
+#if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_SOC_OMAP3430)
#define OMAP_HDQ_BASE 0x480B2000
#endif
static struct resource omap_hdq_resources[] = {
@@ -961,7 +603,6 @@ static int __init omap2_init_devices(void)
* please keep these calls, and their implementations above,
* in alphabetical order so they're easier to sort through.
*/
- omap_hsmmc_reset();
omap_init_audio();
omap_init_camera();
omap_init_mbox();
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
new file mode 100644
index 000000000000..5ab6a7469b4b
--- /dev/null
+++ b/arch/arm/mach-omap2/display.c
@@ -0,0 +1,102 @@
+/*
+ * OMAP2plus display device setup / initialization.
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ * Senthilvadivu Guruswamy
+ * Sumit Semwal
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+#include <plat/display.h>
+#include <plat/omap_hwmod.h>
+#include <plat/omap_device.h>
+
+static struct platform_device omap_display_device = {
+ .name = "omap_display",
+ .id = -1,
+ .dev = {
+ .platform_data = NULL,
+ },
+};
+
+static struct omap_device_pm_latency omap_dss_latency[] = {
+ [0] = {
+ .deactivate_func = omap_device_idle_hwmods,
+ .activate_func = omap_device_enable_hwmods,
+ .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
+ },
+};
+
+int __init omap_display_init(struct omap_dss_board_info *board_data)
+{
+ int r = 0;
+ struct omap_hwmod *oh;
+ struct omap_device *od;
+ int i;
+ struct omap_display_platform_data pdata;
+
+ /*
+ * omap: valid DSS hwmod names
+ * omap2,3,4: dss_core, dss_dispc, dss_rfbi, dss_venc
+ * omap3,4: dss_dsi1
+ * omap4: dss_dsi2, dss_hdmi
+ */
+ char *oh_name[] = {"dss_core", "dss_dispc", "dss_rfbi", "dss_venc", "dss_dsi1",
+ "dss_dsi2", "dss_hdmi"};
+ char *dev_name[] = {"omap_dss", "omap_dispc", "omap_rfbi", "omap_venc", "omap_dsi1",
+ "omap_dsi2", "omap_hdmi"};
+ int oh_count;
+
+ memset(&pdata, 0, sizeof(pdata));
+
+ if (cpu_is_omap24xx())
+ oh_count = ARRAY_SIZE(oh_name) - 3;
+ /* last 3 hwmod dev in oh_name are not available for omap2 */
+ else if (cpu_is_omap44xx())
+ oh_count = ARRAY_SIZE(oh_name);
+ else
+ oh_count = ARRAY_SIZE(oh_name) - 2;
+ /* last 2 hwmod dev in oh_name are not available for omap3 */
+
+
+ pdata.board_data = board_data;
+ pdata.board_data->get_last_off_on_transaction_id = NULL;
+
+ for (i = 0; i < oh_count; i++) {
+ oh = omap_hwmod_lookup(oh_name[i]);
+ if (!oh) {
+ pr_err("Could not look up %s\n", oh_name[i]);
+ return -ENODEV;
+ }
+ od = omap_device_build(dev_name[i], -1, oh, &pdata,
+ sizeof(struct omap_display_platform_data),
+ omap_dss_latency,
+ ARRAY_SIZE(omap_dss_latency), 0);
+
+ if (WARN((IS_ERR(od)), "Could not build omap_device for %s\n",
+ oh_name[i]))
+ return -ENODEV;
+ }
+ omap_display_device.dev.platform_data = board_data;
+
+ r = platform_device_register(&omap_display_device);
+ if (r < 0)
+ printk(KERN_ERR "Unable to register OMAP-Display device\n");
+
+ return r;
+}
diff --git a/arch/arm/mach-omap2/gpmc-nand.c b/arch/arm/mach-omap2/gpmc-nand.c
index 2bb29c160702..c1791d08ae56 100644
--- a/arch/arm/mach-omap2/gpmc-nand.c
+++ b/arch/arm/mach-omap2/gpmc-nand.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/mtd/nand.h>
#include <asm/mach/flash.h>
@@ -69,8 +70,10 @@ static int omap2_nand_gpmc_retime(void)
t.wr_cycle = gpmc_round_ns_to_ticks(gpmc_nand_data->gpmc_t->wr_cycle);
/* Configure GPMC */
- gpmc_cs_configure(gpmc_nand_data->cs,
- GPMC_CONFIG_DEV_SIZE, gpmc_nand_data->devsize);
+ if (gpmc_nand_data->devsize == NAND_BUSWIDTH_16)
+ gpmc_cs_configure(gpmc_nand_data->cs, GPMC_CONFIG_DEV_SIZE, 1);
+ else
+ gpmc_cs_configure(gpmc_nand_data->cs, GPMC_CONFIG_DEV_SIZE, 0);
gpmc_cs_configure(gpmc_nand_data->cs,
GPMC_CONFIG_DEV_TYPE, GPMC_DEVICETYPE_NAND);
err = gpmc_cs_set_timings(gpmc_nand_data->cs, &t);
diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
index 3a7d25fb00ef..d776ded9830d 100644
--- a/arch/arm/mach-omap2/gpmc-onenand.c
+++ b/arch/arm/mach-omap2/gpmc-onenand.c
@@ -94,7 +94,7 @@ static int omap2_onenand_set_async_mode(int cs, void __iomem *onenand_base)
}
static void set_onenand_cfg(void __iomem *onenand_base, int latency,
- int sync_read, int sync_write, int hf)
+ int sync_read, int sync_write, int hf, int vhf)
{
u32 reg;
@@ -114,12 +114,57 @@ static void set_onenand_cfg(void __iomem *onenand_base, int latency,
reg |= ONENAND_SYS_CFG1_HF;
else
reg &= ~ONENAND_SYS_CFG1_HF;
+ if (vhf)
+ reg |= ONENAND_SYS_CFG1_VHF;
+ else
+ reg &= ~ONENAND_SYS_CFG1_VHF;
writew(reg, onenand_base + ONENAND_REG_SYS_CFG1);
}
+static int omap2_onenand_get_freq(struct omap_onenand_platform_data *cfg,
+ void __iomem *onenand_base, bool *clk_dep)
+{
+ u16 ver = readw(onenand_base + ONENAND_REG_VERSION_ID);
+ int freq = 0;
+
+ if (cfg->get_freq) {
+ struct onenand_freq_info fi;
+
+ fi.maf_id = readw(onenand_base + ONENAND_REG_MANUFACTURER_ID);
+ fi.dev_id = readw(onenand_base + ONENAND_REG_DEVICE_ID);
+ fi.ver_id = ver;
+ freq = cfg->get_freq(&fi, clk_dep);
+ if (freq)
+ return freq;
+ }
+
+ switch ((ver >> 4) & 0xf) {
+ case 0:
+ freq = 40;
+ break;
+ case 1:
+ freq = 54;
+ break;
+ case 2:
+ freq = 66;
+ break;
+ case 3:
+ freq = 83;
+ break;
+ case 4:
+ freq = 104;
+ break;
+ default:
+ freq = 54;
+ break;
+ }
+
+ return freq;
+}
+
static int omap2_onenand_set_sync_mode(struct omap_onenand_platform_data *cfg,
void __iomem *onenand_base,
- int freq)
+ int *freq_ptr)
{
struct gpmc_timings t;
const int t_cer = 15;
@@ -130,10 +175,11 @@ static int omap2_onenand_set_sync_mode(struct omap_onenand_platform_data *cfg,
const int t_wph = 30;
int min_gpmc_clk_period, t_ces, t_avds, t_avdh, t_ach, t_aavdh, t_rdyo;
int tick_ns, div, fclk_offset_ns, fclk_offset, gpmc_clk_ns, latency;
- int first_time = 0, hf = 0, sync_read = 0, sync_write = 0;
+ int first_time = 0, hf = 0, vhf = 0, sync_read = 0, sync_write = 0;
int err, ticks_cez;
- int cs = cfg->cs;
+ int cs = cfg->cs, freq = *freq_ptr;
u32 reg;
+ bool clk_dep = false;
if (cfg->flags & ONENAND_SYNC_READ) {
sync_read = 1;
@@ -148,27 +194,7 @@ static int omap2_onenand_set_sync_mode(struct omap_onenand_platform_data *cfg,
err = omap2_onenand_set_async_mode(cs, onenand_base);
if (err)
return err;
- reg = readw(onenand_base + ONENAND_REG_VERSION_ID);
- switch ((reg >> 4) & 0xf) {
- case 0:
- freq = 40;
- break;
- case 1:
- freq = 54;
- break;
- case 2:
- freq = 66;
- break;
- case 3:
- freq = 83;
- break;
- case 4:
- freq = 104;
- break;
- default:
- freq = 54;
- break;
- }
+ freq = omap2_onenand_get_freq(cfg, onenand_base, &clk_dep);
first_time = 1;
}
@@ -180,7 +206,7 @@ static int omap2_onenand_set_sync_mode(struct omap_onenand_platform_data *cfg,
t_avdh = 2;
t_ach = 3;
t_aavdh = 6;
- t_rdyo = 9;
+ t_rdyo = 6;
break;
case 83:
min_gpmc_clk_period = 12000; /* 83 MHz */
@@ -217,16 +243,36 @@ static int omap2_onenand_set_sync_mode(struct omap_onenand_platform_data *cfg,
gpmc_clk_ns = gpmc_ticks_to_ns(div);
if (gpmc_clk_ns < 15) /* >66Mhz */
hf = 1;
- if (hf)
+ if (gpmc_clk_ns < 12) /* >83Mhz */
+ vhf = 1;
+ if (vhf)
+ latency = 8;
+ else if (hf)
latency = 6;
else if (gpmc_clk_ns >= 25) /* 40 MHz*/
latency = 3;
else
latency = 4;
+ if (clk_dep) {
+ if (gpmc_clk_ns < 12) { /* >83Mhz */
+ t_ces = 3;
+ t_avds = 4;
+ } else if (gpmc_clk_ns < 15) { /* >66Mhz */
+ t_ces = 5;
+ t_avds = 4;
+ } else if (gpmc_clk_ns < 25) { /* >40Mhz */
+ t_ces = 6;
+ t_avds = 5;
+ } else {
+ t_ces = 7;
+ t_avds = 7;
+ }
+ }
+
if (first_time)
set_onenand_cfg(onenand_base, latency,
- sync_read, sync_write, hf);
+ sync_read, sync_write, hf, vhf);
if (div == 1) {
reg = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG2);
@@ -264,6 +310,9 @@ static int omap2_onenand_set_sync_mode(struct omap_onenand_platform_data *cfg,
/* Read */
t.adv_rd_off = gpmc_ticks_to_ns(fclk_offset + gpmc_ns_to_ticks(t_avdh));
t.oe_on = gpmc_ticks_to_ns(fclk_offset + gpmc_ns_to_ticks(t_ach));
+ /* Force at least 1 clk between AVD High to OE Low */
+ if (t.oe_on <= t.adv_rd_off)
+ t.oe_on = t.adv_rd_off + gpmc_round_ns_to_ticks(1);
t.access = gpmc_ticks_to_ns(fclk_offset + (latency + 1) * div);
t.oe_off = t.access + gpmc_round_ns_to_ticks(1);
t.cs_rd_off = t.oe_off;
@@ -317,18 +366,20 @@ static int omap2_onenand_set_sync_mode(struct omap_onenand_platform_data *cfg,
if (err)
return err;
- set_onenand_cfg(onenand_base, latency, sync_read, sync_write, hf);
+ set_onenand_cfg(onenand_base, latency, sync_read, sync_write, hf, vhf);
+
+ *freq_ptr = freq;
return 0;
}
-static int gpmc_onenand_setup(void __iomem *onenand_base, int freq)
+static int gpmc_onenand_setup(void __iomem *onenand_base, int *freq_ptr)
{
struct device *dev = &gpmc_onenand_device.dev;
/* Set sync timings in GPMC */
if (omap2_onenand_set_sync_mode(gpmc_onenand_data, onenand_base,
- freq) < 0) {
+ freq_ptr) < 0) {
dev_err(dev, "Unable to set synchronous mode\n");
return -EINVAL;
}
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index 1b7b3e7d02f7..674174365f78 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -14,6 +14,7 @@
*/
#undef DEBUG
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/err.h>
@@ -22,6 +23,7 @@
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/interrupt.h>
#include <asm/mach-types.h>
#include <plat/gpmc.h>
@@ -58,7 +60,6 @@
#define GPMC_CHUNK_SHIFT 24 /* 16 MB */
#define GPMC_SECTION_SHIFT 28 /* 128 MB */
-#define PREFETCH_FIFOTHRESHOLD (0x40 << 8)
#define CS_NUM_SHIFT 24
#define ENABLE_PREFETCH (0x1 << 7)
#define DMA_MPU_MODE 2
@@ -100,6 +101,8 @@ static void __iomem *gpmc_base;
static struct clk *gpmc_l3_clk;
+static irqreturn_t gpmc_handle_irq(int irq, void *dev);
+
static void gpmc_write_reg(int idx, u32 val)
{
__raw_writel(val, gpmc_base + idx);
@@ -497,6 +500,10 @@ int gpmc_cs_configure(int cs, int cmd, int wval)
u32 regval = 0;
switch (cmd) {
+ case GPMC_ENABLE_IRQ:
+ gpmc_write_reg(GPMC_IRQENABLE, wval);
+ break;
+
case GPMC_SET_IRQ_STATUS:
gpmc_write_reg(GPMC_IRQSTATUS, wval);
break;
@@ -598,15 +605,19 @@ EXPORT_SYMBOL(gpmc_nand_write);
/**
* gpmc_prefetch_enable - configures and starts prefetch transfer
* @cs: cs (chip select) number
+ * @fifo_th: fifo threshold to be used for read/ write
* @dma_mode: dma mode enable (1) or disable (0)
* @u32_count: number of bytes to be transferred
* @is_write: prefetch read(0) or write post(1) mode
*/
-int gpmc_prefetch_enable(int cs, int dma_mode,
+int gpmc_prefetch_enable(int cs, int fifo_th, int dma_mode,
unsigned int u32_count, int is_write)
{
- if (!(gpmc_read_reg(GPMC_PREFETCH_CONTROL))) {
+ if (fifo_th > PREFETCH_FIFOTHRESHOLD_MAX) {
+ pr_err("gpmc: fifo threshold is not supported\n");
+ return -1;
+ } else if (!(gpmc_read_reg(GPMC_PREFETCH_CONTROL))) {
/* Set the amount of bytes to be prefetched */
gpmc_write_reg(GPMC_PREFETCH_CONFIG2, u32_count);
@@ -614,7 +625,7 @@ int gpmc_prefetch_enable(int cs, int dma_mode,
* enable the engine. Set which cs is has requested for.
*/
gpmc_write_reg(GPMC_PREFETCH_CONFIG1, ((cs << CS_NUM_SHIFT) |
- PREFETCH_FIFOTHRESHOLD |
+ PREFETCH_FIFOTHRESHOLD(fifo_th) |
ENABLE_PREFETCH |
(dma_mode << DMA_MPU_MODE) |
(0x1 & is_write)));
@@ -678,9 +689,10 @@ static void __init gpmc_mem_init(void)
}
}
-void __init gpmc_init(void)
+static int __init gpmc_init(void)
{
- u32 l;
+ u32 l, irq;
+ int cs, ret = -EINVAL;
char *ck = NULL;
if (cpu_is_omap24xx()) {
@@ -698,7 +710,7 @@ void __init gpmc_init(void)
}
if (WARN_ON(!ck))
- return;
+ return ret;
gpmc_l3_clk = clk_get(NULL, ck);
if (IS_ERR(gpmc_l3_clk)) {
@@ -723,6 +735,36 @@ void __init gpmc_init(void)
l |= (0x02 << 3) | (1 << 0);
gpmc_write_reg(GPMC_SYSCONFIG, l);
gpmc_mem_init();
+
+ /* initalize the irq_chained */
+ irq = OMAP_GPMC_IRQ_BASE;
+ for (cs = 0; cs < GPMC_CS_NUM; cs++) {
+ set_irq_handler(irq, handle_simple_irq);
+ set_irq_flags(irq, IRQF_VALID);
+ irq++;
+ }
+
+ ret = request_irq(INT_34XX_GPMC_IRQ,
+ gpmc_handle_irq, IRQF_SHARED, "gpmc", gpmc_base);
+ if (ret)
+ pr_err("gpmc: irq-%d could not claim: err %d\n",
+ INT_34XX_GPMC_IRQ, ret);
+ return ret;
+}
+postcore_initcall(gpmc_init);
+
+static irqreturn_t gpmc_handle_irq(int irq, void *dev)
+{
+ u8 cs;
+
+ if (irq != INT_34XX_GPMC_IRQ)
+ return IRQ_HANDLED;
+ /* check cs to invoke the irq */
+ cs = ((gpmc_read_reg(GPMC_PREFETCH_CONFIG1)) >> CS_NUM_SHIFT) & 0x7;
+ if (OMAP_GPMC_IRQ_BASE+cs <= OMAP_GPMC_IRQ_END)
+ generic_handle_irq(OMAP_GPMC_IRQ_BASE+cs);
+
+ return IRQ_HANDLED;
}
#ifdef CONFIG_ARCH_OMAP3
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index 34272e4863fd..137e1a5f3d85 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -16,7 +16,10 @@
#include <mach/hardware.h>
#include <plat/mmc.h>
#include <plat/omap-pm.h>
+#include <plat/mux.h>
+#include <plat/omap_device.h>
+#include "mux.h"
#include "hsmmc.h"
#include "control.h"
@@ -28,10 +31,6 @@ static u16 control_mmc1;
#define HSMMC_NAME_LEN 9
-static struct hsmmc_controller {
- char name[HSMMC_NAME_LEN + 1];
-} hsmmc[OMAP34XX_NR_MMC];
-
#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
static int hsmmc_get_context_loss(struct device *dev)
@@ -204,174 +203,312 @@ static int nop_mmc_set_power(struct device *dev, int slot, int power_on,
return 0;
}
-static struct omap_mmc_platform_data *hsmmc_data[OMAP34XX_NR_MMC] __initdata;
-
-void __init omap2_hsmmc_init(struct omap2_hsmmc_info *controllers)
+static inline void omap_hsmmc_mux(struct omap_mmc_platform_data *mmc_controller,
+ int controller_nr)
{
- struct omap2_hsmmc_info *c;
- int nr_hsmmc = ARRAY_SIZE(hsmmc_data);
- int i;
- u32 reg;
-
- if (!cpu_is_omap44xx()) {
- if (cpu_is_omap2430()) {
- control_pbias_offset = OMAP243X_CONTROL_PBIAS_LITE;
- control_devconf1_offset = OMAP243X_CONTROL_DEVCONF1;
- } else {
- control_pbias_offset = OMAP343X_CONTROL_PBIAS_LITE;
- control_devconf1_offset = OMAP343X_CONTROL_DEVCONF1;
- }
- } else {
- control_pbias_offset =
- OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_PBIASLITE;
- control_mmc1 = OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_MMC1;
- reg = omap4_ctrl_pad_readl(control_mmc1);
- reg |= (OMAP4_SDMMC1_PUSTRENGTH_GRP0_MASK |
- OMAP4_SDMMC1_PUSTRENGTH_GRP1_MASK);
- reg &= ~(OMAP4_SDMMC1_PUSTRENGTH_GRP2_MASK |
- OMAP4_SDMMC1_PUSTRENGTH_GRP3_MASK);
- reg |= (OMAP4_USBC1_DR0_SPEEDCTRL_MASK|
- OMAP4_SDMMC1_DR1_SPEEDCTRL_MASK |
- OMAP4_SDMMC1_DR2_SPEEDCTRL_MASK);
- omap4_ctrl_pad_writel(reg, control_mmc1);
- }
-
- for (c = controllers; c->mmc; c++) {
- struct hsmmc_controller *hc = hsmmc + c->mmc - 1;
- struct omap_mmc_platform_data *mmc = hsmmc_data[c->mmc - 1];
-
- if (!c->mmc || c->mmc > nr_hsmmc) {
- pr_debug("MMC%d: no such controller\n", c->mmc);
- continue;
- }
- if (mmc) {
- pr_debug("MMC%d: already configured\n", c->mmc);
- continue;
+ if ((mmc_controller->slots[0].switch_pin > 0) && \
+ (mmc_controller->slots[0].switch_pin < OMAP_MAX_GPIO_LINES))
+ omap_mux_init_gpio(mmc_controller->slots[0].switch_pin,
+ OMAP_PIN_INPUT_PULLUP);
+ if ((mmc_controller->slots[0].gpio_wp > 0) && \
+ (mmc_controller->slots[0].gpio_wp < OMAP_MAX_GPIO_LINES))
+ omap_mux_init_gpio(mmc_controller->slots[0].gpio_wp,
+ OMAP_PIN_INPUT_PULLUP);
+ if (cpu_is_omap34xx()) {
+ if (controller_nr == 0) {
+ omap_mux_init_signal("sdmmc1_clk",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("sdmmc1_cmd",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("sdmmc1_dat0",
+ OMAP_PIN_INPUT_PULLUP);
+ if (mmc_controller->slots[0].caps &
+ (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)) {
+ omap_mux_init_signal("sdmmc1_dat1",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("sdmmc1_dat2",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("sdmmc1_dat3",
+ OMAP_PIN_INPUT_PULLUP);
+ }
+ if (mmc_controller->slots[0].caps &
+ MMC_CAP_8_BIT_DATA) {
+ omap_mux_init_signal("sdmmc1_dat4",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("sdmmc1_dat5",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("sdmmc1_dat6",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("sdmmc1_dat7",
+ OMAP_PIN_INPUT_PULLUP);
+ }
}
-
- mmc = kzalloc(sizeof(struct omap_mmc_platform_data),
- GFP_KERNEL);
- if (!mmc) {
- pr_err("Cannot allocate memory for mmc device!\n");
- goto done;
+ if (controller_nr == 1) {
+ /* MMC2 */
+ omap_mux_init_signal("sdmmc2_clk",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("sdmmc2_cmd",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("sdmmc2_dat0",
+ OMAP_PIN_INPUT_PULLUP);
+
+ /*
+ * For 8 wire configurations, Lines DAT4, 5, 6 and 7
+ * need to be muxed in the board-*.c files
+ */
+ if (mmc_controller->slots[0].caps &
+ (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)) {
+ omap_mux_init_signal("sdmmc2_dat1",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("sdmmc2_dat2",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("sdmmc2_dat3",
+ OMAP_PIN_INPUT_PULLUP);
+ }
+ if (mmc_controller->slots[0].caps &
+ MMC_CAP_8_BIT_DATA) {
+ omap_mux_init_signal("sdmmc2_dat4.sdmmc2_dat4",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("sdmmc2_dat5.sdmmc2_dat5",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("sdmmc2_dat6.sdmmc2_dat6",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("sdmmc2_dat7.sdmmc2_dat7",
+ OMAP_PIN_INPUT_PULLUP);
+ }
}
- if (c->name)
- strncpy(hc->name, c->name, HSMMC_NAME_LEN);
- else
- snprintf(hc->name, ARRAY_SIZE(hc->name),
- "mmc%islot%i", c->mmc, 1);
- mmc->slots[0].name = hc->name;
- mmc->nr_slots = 1;
- mmc->slots[0].caps = c->caps;
- mmc->slots[0].internal_clock = !c->ext_clock;
- mmc->dma_mask = 0xffffffff;
- if (cpu_is_omap44xx())
- mmc->reg_offset = OMAP4_MMC_REG_OFFSET;
- else
- mmc->reg_offset = 0;
+ /*
+ * For MMC3 the pins need to be muxed in the board-*.c files
+ */
+ }
+}
- mmc->get_context_loss_count = hsmmc_get_context_loss;
+static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
+ struct omap_mmc_platform_data *mmc)
+{
+ char *hc_name;
- mmc->slots[0].switch_pin = c->gpio_cd;
- mmc->slots[0].gpio_wp = c->gpio_wp;
+ hc_name = kzalloc(sizeof(char) * (HSMMC_NAME_LEN + 1), GFP_KERNEL);
+ if (!hc_name) {
+ pr_err("Cannot allocate memory for controller slot name\n");
+ kfree(hc_name);
+ return -ENOMEM;
+ }
- mmc->slots[0].remux = c->remux;
- mmc->slots[0].init_card = c->init_card;
+ if (c->name)
+ strncpy(hc_name, c->name, HSMMC_NAME_LEN);
+ else
+ snprintf(hc_name, (HSMMC_NAME_LEN + 1), "mmc%islot%i",
+ c->mmc, 1);
+ mmc->slots[0].name = hc_name;
+ mmc->nr_slots = 1;
+ mmc->slots[0].caps = c->caps;
+ mmc->slots[0].internal_clock = !c->ext_clock;
+ mmc->dma_mask = 0xffffffff;
+ if (cpu_is_omap44xx())
+ mmc->reg_offset = OMAP4_MMC_REG_OFFSET;
+ else
+ mmc->reg_offset = 0;
- if (c->cover_only)
- mmc->slots[0].cover = 1;
+ mmc->get_context_loss_count = hsmmc_get_context_loss;
- if (c->nonremovable)
- mmc->slots[0].nonremovable = 1;
+ mmc->slots[0].switch_pin = c->gpio_cd;
+ mmc->slots[0].gpio_wp = c->gpio_wp;
- if (c->power_saving)
- mmc->slots[0].power_saving = 1;
+ mmc->slots[0].remux = c->remux;
+ mmc->slots[0].init_card = c->init_card;
- if (c->no_off)
- mmc->slots[0].no_off = 1;
+ if (c->cover_only)
+ mmc->slots[0].cover = 1;
- if (c->vcc_aux_disable_is_sleep)
- mmc->slots[0].vcc_aux_disable_is_sleep = 1;
+ if (c->nonremovable)
+ mmc->slots[0].nonremovable = 1;
- /* NOTE: MMC slots should have a Vcc regulator set up.
- * This may be from a TWL4030-family chip, another
- * controllable regulator, or a fixed supply.
- *
- * temporary HACK: ocr_mask instead of fixed supply
- */
- mmc->slots[0].ocr_mask = c->ocr_mask;
+ if (c->power_saving)
+ mmc->slots[0].power_saving = 1;
- if (cpu_is_omap3517() || cpu_is_omap3505())
- mmc->slots[0].set_power = nop_mmc_set_power;
- else
- mmc->slots[0].features |= HSMMC_HAS_PBIAS;
+ if (c->no_off)
+ mmc->slots[0].no_off = 1;
- if (cpu_is_omap44xx() && (omap_rev() > OMAP4430_REV_ES1_0))
- mmc->slots[0].features |= HSMMC_HAS_UPDATED_RESET;
+ if (c->vcc_aux_disable_is_sleep)
+ mmc->slots[0].vcc_aux_disable_is_sleep = 1;
- switch (c->mmc) {
- case 1:
- if (mmc->slots[0].features & HSMMC_HAS_PBIAS) {
- /* on-chip level shifting via PBIAS0/PBIAS1 */
- if (cpu_is_omap44xx()) {
- mmc->slots[0].before_set_reg =
+ /*
+ * NOTE: MMC slots should have a Vcc regulator set up.
+ * This may be from a TWL4030-family chip, another
+ * controllable regulator, or a fixed supply.
+ *
+ * temporary HACK: ocr_mask instead of fixed supply
+ */
+ mmc->slots[0].ocr_mask = c->ocr_mask;
+
+ if (cpu_is_omap3517() || cpu_is_omap3505())
+ mmc->slots[0].set_power = nop_mmc_set_power;
+ else
+ mmc->slots[0].features |= HSMMC_HAS_PBIAS;
+
+ if (cpu_is_omap44xx() && (omap_rev() > OMAP4430_REV_ES1_0))
+ mmc->slots[0].features |= HSMMC_HAS_UPDATED_RESET;
+
+ switch (c->mmc) {
+ case 1:
+ if (mmc->slots[0].features & HSMMC_HAS_PBIAS) {
+ /* on-chip level shifting via PBIAS0/PBIAS1 */
+ if (cpu_is_omap44xx()) {
+ mmc->slots[0].before_set_reg =
omap4_hsmmc1_before_set_reg;
- mmc->slots[0].after_set_reg =
+ mmc->slots[0].after_set_reg =
omap4_hsmmc1_after_set_reg;
- } else {
- mmc->slots[0].before_set_reg =
+ } else {
+ mmc->slots[0].before_set_reg =
omap_hsmmc1_before_set_reg;
- mmc->slots[0].after_set_reg =
+ mmc->slots[0].after_set_reg =
omap_hsmmc1_after_set_reg;
- }
}
+ }
- /* Omap3630 HSMMC1 supports only 4-bit */
- if (cpu_is_omap3630() &&
- (c->caps & MMC_CAP_8_BIT_DATA)) {
- c->caps &= ~MMC_CAP_8_BIT_DATA;
- c->caps |= MMC_CAP_4_BIT_DATA;
- mmc->slots[0].caps = c->caps;
- }
- break;
- case 2:
- if (c->ext_clock)
- c->transceiver = 1;
- if (c->transceiver && (c->caps & MMC_CAP_8_BIT_DATA)) {
- c->caps &= ~MMC_CAP_8_BIT_DATA;
- c->caps |= MMC_CAP_4_BIT_DATA;
- }
- /* FALLTHROUGH */
- case 3:
- if (mmc->slots[0].features & HSMMC_HAS_PBIAS) {
- /* off-chip level shifting, or none */
- mmc->slots[0].before_set_reg = hsmmc23_before_set_reg;
- mmc->slots[0].after_set_reg = NULL;
- }
- break;
- default:
- pr_err("MMC%d configuration not supported!\n", c->mmc);
- kfree(mmc);
- continue;
+ /* OMAP3630 HSMMC1 supports only 4-bit */
+ if (cpu_is_omap3630() &&
+ (c->caps & MMC_CAP_8_BIT_DATA)) {
+ c->caps &= ~MMC_CAP_8_BIT_DATA;
+ c->caps |= MMC_CAP_4_BIT_DATA;
+ mmc->slots[0].caps = c->caps;
+ }
+ break;
+ case 2:
+ if (c->ext_clock)
+ c->transceiver = 1;
+ if (c->transceiver && (c->caps & MMC_CAP_8_BIT_DATA)) {
+ c->caps &= ~MMC_CAP_8_BIT_DATA;
+ c->caps |= MMC_CAP_4_BIT_DATA;
}
- hsmmc_data[c->mmc - 1] = mmc;
+ /* FALLTHROUGH */
+ case 3:
+ if (mmc->slots[0].features & HSMMC_HAS_PBIAS) {
+ /* off-chip level shifting, or none */
+ mmc->slots[0].before_set_reg = hsmmc23_before_set_reg;
+ mmc->slots[0].after_set_reg = NULL;
+ }
+ break;
+ case 4:
+ case 5:
+ mmc->slots[0].before_set_reg = NULL;
+ mmc->slots[0].after_set_reg = NULL;
+ break;
+ default:
+ pr_err("MMC%d configuration not supported!\n", c->mmc);
+ kfree(hc_name);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static struct omap_device_pm_latency omap_hsmmc_latency[] = {
+ [0] = {
+ .deactivate_func = omap_device_idle_hwmods,
+ .activate_func = omap_device_enable_hwmods,
+ .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
+ },
+ /*
+ * XXX There should also be an entry here to power off/on the
+ * MMC regulators/PBIAS cells, etc.
+ */
+};
+
+#define MAX_OMAP_MMC_HWMOD_NAME_LEN 16
+
+void __init omap_init_hsmmc(struct omap2_hsmmc_info *hsmmcinfo, int ctrl_nr)
+{
+ struct omap_hwmod *oh;
+ struct omap_device *od;
+ struct omap_device_pm_latency *ohl;
+ char oh_name[MAX_OMAP_MMC_HWMOD_NAME_LEN];
+ struct omap_mmc_platform_data *mmc_data;
+ struct omap_mmc_dev_attr *mmc_dev_attr;
+ char *name;
+ int l;
+ int ohl_cnt = 0;
+
+ mmc_data = kzalloc(sizeof(struct omap_mmc_platform_data), GFP_KERNEL);
+ if (!mmc_data) {
+ pr_err("Cannot allocate memory for mmc device!\n");
+ goto done;
}
- omap2_init_mmc(hsmmc_data, OMAP34XX_NR_MMC);
+ if (omap_hsmmc_pdata_init(hsmmcinfo, mmc_data) < 0) {
+ pr_err("%s fails!\n", __func__);
+ goto done;
+ }
+ omap_hsmmc_mux(mmc_data, (ctrl_nr - 1));
+
+ name = "omap_hsmmc";
+ ohl = omap_hsmmc_latency;
+ ohl_cnt = ARRAY_SIZE(omap_hsmmc_latency);
+
+ l = snprintf(oh_name, MAX_OMAP_MMC_HWMOD_NAME_LEN,
+ "mmc%d", ctrl_nr);
+ WARN(l >= MAX_OMAP_MMC_HWMOD_NAME_LEN,
+ "String buffer overflow in MMC%d device setup\n", ctrl_nr);
+ oh = omap_hwmod_lookup(oh_name);
+ if (!oh) {
+ pr_err("Could not look up %s\n", oh_name);
+ kfree(mmc_data->slots[0].name);
+ goto done;
+ }
- /* pass the device nodes back to board setup code */
- for (c = controllers; c->mmc; c++) {
- struct omap_mmc_platform_data *mmc = hsmmc_data[c->mmc - 1];
+ if (oh->dev_attr != NULL) {
+ mmc_dev_attr = oh->dev_attr;
+ mmc_data->controller_flags = mmc_dev_attr->flags;
+ }
- if (!c->mmc || c->mmc > nr_hsmmc)
- continue;
- c->dev = mmc->dev;
+ od = omap_device_build(name, ctrl_nr - 1, oh, mmc_data,
+ sizeof(struct omap_mmc_platform_data), ohl, ohl_cnt, false);
+ if (IS_ERR(od)) {
+ WARN(1, "Cant build omap_device for %s:%s.\n", name, oh->name);
+ kfree(mmc_data->slots[0].name);
+ goto done;
}
+ /*
+ * return device handle to board setup code
+ * required to populate for regulator framework structure
+ */
+ hsmmcinfo->dev = &od->pdev.dev;
done:
- for (i = 0; i < nr_hsmmc; i++)
- kfree(hsmmc_data[i]);
+ kfree(mmc_data);
+}
+
+void __init omap2_hsmmc_init(struct omap2_hsmmc_info *controllers)
+{
+ u32 reg;
+
+ if (!cpu_is_omap44xx()) {
+ if (cpu_is_omap2430()) {
+ control_pbias_offset = OMAP243X_CONTROL_PBIAS_LITE;
+ control_devconf1_offset = OMAP243X_CONTROL_DEVCONF1;
+ } else {
+ control_pbias_offset = OMAP343X_CONTROL_PBIAS_LITE;
+ control_devconf1_offset = OMAP343X_CONTROL_DEVCONF1;
+ }
+ } else {
+ control_pbias_offset =
+ OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_PBIASLITE;
+ control_mmc1 = OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_MMC1;
+ reg = omap4_ctrl_pad_readl(control_mmc1);
+ reg |= (OMAP4_SDMMC1_PUSTRENGTH_GRP0_MASK |
+ OMAP4_SDMMC1_PUSTRENGTH_GRP1_MASK);
+ reg &= ~(OMAP4_SDMMC1_PUSTRENGTH_GRP2_MASK |
+ OMAP4_SDMMC1_PUSTRENGTH_GRP3_MASK);
+ reg |= (OMAP4_USBC1_DR0_SPEEDCTRL_MASK|
+ OMAP4_SDMMC1_DR1_SPEEDCTRL_MASK |
+ OMAP4_SDMMC1_DR2_SPEEDCTRL_MASK);
+ omap4_ctrl_pad_writel(reg, control_mmc1);
+ }
+
+ for (; controllers->mmc; controllers++)
+ omap_init_hsmmc(controllers, controllers->mmc);
+
}
#endif
diff --git a/arch/arm/mach-omap2/hwspinlock.c b/arch/arm/mach-omap2/hwspinlock.c
new file mode 100644
index 000000000000..06d4a80660a5
--- /dev/null
+++ b/arch/arm/mach-omap2/hwspinlock.c
@@ -0,0 +1,63 @@
+/*
+ * OMAP hardware spinlock device initialization
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Contact: Simon Que <sque@ti.com>
+ * Hari Kanigeri <h-kanigeri2@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+
+#include <plat/omap_hwmod.h>
+#include <plat/omap_device.h>
+
+struct omap_device_pm_latency omap_spinlock_latency[] = {
+ {
+ .deactivate_func = omap_device_idle_hwmods,
+ .activate_func = omap_device_enable_hwmods,
+ .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
+ }
+};
+
+int __init hwspinlocks_init(void)
+{
+ int retval = 0;
+ struct omap_hwmod *oh;
+ struct omap_device *od;
+ const char *oh_name = "spinlock";
+ const char *dev_name = "omap_hwspinlock";
+
+ /*
+ * Hwmod lookup will fail in case our platform doesn't support the
+ * hardware spinlock module, so it is safe to run this initcall
+ * on all omaps
+ */
+ oh = omap_hwmod_lookup(oh_name);
+ if (oh == NULL)
+ return -EINVAL;
+
+ od = omap_device_build(dev_name, 0, oh, NULL, 0,
+ omap_spinlock_latency,
+ ARRAY_SIZE(omap_spinlock_latency), false);
+ if (IS_ERR(od)) {
+ pr_err("Can't build omap_device for %s:%s\n", dev_name,
+ oh_name);
+ retval = PTR_ERR(od);
+ }
+
+ return retval;
+}
+/* early board code might need to reserve specific hwspinlock instances */
+postcore_initcall(hwspinlocks_init);
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 5f9086c65e48..3168b17bc264 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -6,7 +6,7 @@
* Copyright (C) 2005 Nokia Corporation
* Written by Tony Lindgren <tony@atomide.com>
*
- * Copyright (C) 2009 Texas Instruments
+ * Copyright (C) 2009-11 Texas Instruments
* Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -191,12 +191,19 @@ static void __init omap3_check_features(void)
if (!cpu_is_omap3505() && !cpu_is_omap3517())
omap3_features |= OMAP3_HAS_IO_WAKEUP;
+ omap3_features |= OMAP3_HAS_SDRC;
+
/*
* TODO: Get additional info (where applicable)
* e.g. Size of L2 cache.
*/
}
+static void __init ti816x_check_features(void)
+{
+ omap3_features = OMAP3_HAS_NEON;
+}
+
static void __init omap3_check_revision(void)
{
u32 cpuid, idcode;
@@ -287,6 +294,20 @@ static void __init omap3_check_revision(void)
omap_chip.oc |= CHIP_IS_OMAP3630ES1_2;
}
break;
+ case 0xb81e:
+ omap_chip.oc = CHIP_IS_TI816X;
+
+ switch (rev) {
+ case 0:
+ omap_revision = TI8168_REV_ES1_0;
+ break;
+ case 1:
+ omap_revision = TI8168_REV_ES1_1;
+ break;
+ default:
+ omap_revision = TI8168_REV_ES1_1;
+ }
+ break;
default:
/* Unknown default to latest silicon rev as default*/
omap_revision = OMAP3630_REV_ES1_2;
@@ -307,7 +328,7 @@ static void __init omap4_check_revision(void)
*/
idcode = read_tap_reg(OMAP_TAP_IDCODE);
hawkeye = (idcode >> 12) & 0xffff;
- rev = (idcode >> 28) & 0xff;
+ rev = (idcode >> 28) & 0xf;
/*
* Few initial ES2.0 samples IDCODE is same as ES1.0
@@ -326,22 +347,31 @@ static void __init omap4_check_revision(void)
omap_chip.oc |= CHIP_IS_OMAP4430ES1;
break;
case 1:
+ default:
omap_revision = OMAP4430_REV_ES2_0;
omap_chip.oc |= CHIP_IS_OMAP4430ES2;
+ }
+ break;
+ case 0xb95c:
+ switch (rev) {
+ case 3:
+ omap_revision = OMAP4430_REV_ES2_1;
+ omap_chip.oc |= CHIP_IS_OMAP4430ES2_1;
break;
+ case 4:
default:
- omap_revision = OMAP4430_REV_ES2_0;
- omap_chip.oc |= CHIP_IS_OMAP4430ES2;
- }
- break;
+ omap_revision = OMAP4430_REV_ES2_2;
+ omap_chip.oc |= CHIP_IS_OMAP4430ES2_2;
+ }
+ break;
default:
- /* Unknown default to latest silicon rev as default*/
- omap_revision = OMAP4430_REV_ES2_0;
- omap_chip.oc |= CHIP_IS_OMAP4430ES2;
+ /* Unknown default to latest silicon rev as default */
+ omap_revision = OMAP4430_REV_ES2_2;
+ omap_chip.oc |= CHIP_IS_OMAP4430ES2_2;
}
- pr_info("OMAP%04x ES%d.0\n",
- omap_rev() >> 16, ((omap_rev() >> 12) & 0xf) + 1);
+ pr_info("OMAP%04x ES%d.%d\n", omap_rev() >> 16,
+ ((omap_rev() >> 12) & 0xf), ((omap_rev() >> 8) & 0xf));
}
#define OMAP3_SHOW_FEATURE(feat) \
@@ -372,6 +402,8 @@ static void __init omap3_cpuinfo(void)
/* Already set in omap3_check_revision() */
strcpy(cpu_name, "AM3505");
}
+ } else if (cpu_is_ti816x()) {
+ strcpy(cpu_name, "TI816X");
} else if (omap3_has_iva() && omap3_has_sgx()) {
/* OMAP3430, OMAP3525, OMAP3515, OMAP3503 devices */
strcpy(cpu_name, "OMAP3430/3530");
@@ -386,7 +418,7 @@ static void __init omap3_cpuinfo(void)
strcpy(cpu_name, "OMAP3503");
}
- if (cpu_is_omap3630()) {
+ if (cpu_is_omap3630() || cpu_is_ti816x()) {
switch (rev) {
case OMAP_REVBITS_00:
strcpy(cpu_rev, "1.0");
@@ -462,7 +494,13 @@ void __init omap2_check_revision(void)
omap24xx_check_revision();
} else if (cpu_is_omap34xx()) {
omap3_check_revision();
- omap3_check_features();
+
+ /* TI816X doesn't have feature register */
+ if (!cpu_is_ti816x())
+ omap3_check_features();
+ else
+ ti816x_check_features();
+
omap3_cpuinfo();
return;
} else if (cpu_is_omap44xx()) {
diff --git a/arch/arm/mach-omap2/include/mach/debug-macro.S b/arch/arm/mach-omap2/include/mach/debug-macro.S
index 6a4d4136002e..48adfe9fe4f3 100644
--- a/arch/arm/mach-omap2/include/mach/debug-macro.S
+++ b/arch/arm/mach-omap2/include/mach/debug-macro.S
@@ -19,6 +19,9 @@
#define UART_OFFSET(addr) ((addr) & 0x00ffffff)
+#define omap_uart_v2p(x) ((x) - PAGE_OFFSET + PLAT_PHYS_OFFSET)
+#define omap_uart_p2v(x) ((x) - PLAT_PHYS_OFFSET + PAGE_OFFSET)
+
.pushsection .data
omap_uart_phys: .word 0
omap_uart_virt: .word 0
@@ -36,7 +39,7 @@ omap_uart_lsr: .word 0
/* Use omap_uart_phys/virt if already configured */
10: mrc p15, 0, \rp, c1, c0
tst \rp, #1 @ MMU enabled?
- ldreq \rp, =__virt_to_phys(omap_uart_phys) @ MMU not enabled
+ ldreq \rp, =omap_uart_v2p(omap_uart_phys) @ MMU disabled
ldrne \rp, =omap_uart_phys @ MMU enabled
add \rv, \rp, #4 @ omap_uart_virt
ldr \rp, [\rp, #0]
@@ -49,7 +52,7 @@ omap_uart_lsr: .word 0
mrc p15, 0, \rp, c1, c0
tst \rp, #1 @ MMU enabled?
ldreq \rp, =OMAP_UART_INFO @ MMU not enabled
- ldrne \rp, =__phys_to_virt(OMAP_UART_INFO) @ MMU enabled
+ ldrne \rp, =omap_uart_p2v(OMAP_UART_INFO) @ MMU enabled
ldr \rp, [\rp, #0]
/* Select the UART to use based on the UART1 scratchpad value */
@@ -69,6 +72,12 @@ omap_uart_lsr: .word 0
beq 34f @ configure OMAP3UART4
cmp \rp, #OMAP4UART4 @ only on 44xx
beq 44f @ configure OMAP4UART4
+ cmp \rp, #TI816XUART1 @ ti816x UART offsets different
+ beq 81f @ configure UART1
+ cmp \rp, #TI816XUART2 @ ti816x UART offsets different
+ beq 82f @ configure UART2
+ cmp \rp, #TI816XUART3 @ ti816x UART offsets different
+ beq 83f @ configure UART3
cmp \rp, #ZOOM_UART @ only on zoom2/3
beq 95f @ configure ZOOM_UART
@@ -91,10 +100,16 @@ omap_uart_lsr: .word 0
b 98f
44: mov \rp, #UART_OFFSET(OMAP4_UART4_BASE)
b 98f
+81: mov \rp, #UART_OFFSET(TI816X_UART1_BASE)
+ b 98f
+82: mov \rp, #UART_OFFSET(TI816X_UART2_BASE)
+ b 98f
+83: mov \rp, #UART_OFFSET(TI816X_UART3_BASE)
+ b 98f
95: ldr \rp, =ZOOM_UART_BASE
mrc p15, 0, \rv, c1, c0
tst \rv, #1 @ MMU enabled?
- ldreq \rv, =__virt_to_phys(omap_uart_phys) @ MMU not enabled
+ ldreq \rv, =omap_uart_v2p(omap_uart_phys) @ MMU disabled
ldrne \rv, =omap_uart_phys @ MMU enabled
str \rp, [\rv, #0]
ldr \rp, =ZOOM_UART_VIRT
@@ -109,7 +124,7 @@ omap_uart_lsr: .word 0
98: add \rp, \rp, #0x48000000 @ phys base
mrc p15, 0, \rv, c1, c0
tst \rv, #1 @ MMU enabled?
- ldreq \rv, =__virt_to_phys(omap_uart_phys) @ MMU not enabled
+ ldreq \rv, =omap_uart_v2p(omap_uart_phys) @ MMU disabled
ldrne \rv, =omap_uart_phys @ MMU enabled
str \rp, [\rv, #0]
sub \rp, \rp, #0x48000000 @ phys base
@@ -131,7 +146,7 @@ omap_uart_lsr: .word 0
.macro busyuart,rd,rx
1001: mrc p15, 0, \rd, c1, c0
tst \rd, #1 @ MMU enabled?
- ldreq \rd, =__virt_to_phys(omap_uart_lsr) @ MMU not enabled
+ ldreq \rd, =omap_uart_v2p(omap_uart_lsr) @ MMU disabled
ldrne \rd, =omap_uart_lsr @ MMU enabled
ldr \rd, [\rd, #0]
ldrb \rd, [\rx, \rd]
diff --git a/arch/arm/mach-omap2/include/mach/entry-macro.S b/arch/arm/mach-omap2/include/mach/entry-macro.S
index 81985a665cb3..a48690b90990 100644
--- a/arch/arm/mach-omap2/include/mach/entry-macro.S
+++ b/arch/arm/mach-omap2/include/mach/entry-macro.S
@@ -61,6 +61,14 @@
bne 9998f
ldr \irqnr, [\base, #0xd8] /* IRQ pending reg 3 */
cmp \irqnr, #0x0
+ bne 9998f
+
+ /*
+ * ti816x has additional IRQ pending register. Checking this
+ * register on omap2 & omap3 has no effect (read as 0).
+ */
+ ldr \irqnr, [\base, #0xf8] /* IRQ pending reg 4 */
+ cmp \irqnr, #0x0
9998:
ldrne \irqnr, [\base, #INTCPS_SIR_IRQ_OFFSET]
and \irqnr, \irqnr, #ACTIVEIRQ_MASK /* Clear spurious bits */
@@ -133,6 +141,11 @@
bne 9999f
ldr \irqnr, [\base, #0xd8] /* IRQ pending reg 3 */
cmp \irqnr, #0x0
+#ifdef CONFIG_SOC_OMAPTI816X
+ bne 9999f
+ ldr \irqnr, [\base, #0xf8] /* IRQ pending reg 4 */
+ cmp \irqnr, #0x0
+#endif
9999:
ldrne \irqnr, [\base, #INTCPS_SIR_IRQ_OFFSET]
and \irqnr, \irqnr, #ACTIVEIRQ_MASK /* Clear spurious bits */
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index c2032041d26f..657f3c84687c 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -30,7 +30,6 @@
#include <plat/sram.h>
#include <plat/sdrc.h>
-#include <plat/gpmc.h>
#include <plat/serial.h>
#include "clock2xxx.h"
@@ -66,7 +65,7 @@ static struct map_desc omap24xx_io_desc[] __initdata = {
},
};
-#ifdef CONFIG_ARCH_OMAP2420
+#ifdef CONFIG_SOC_OMAP2420
static struct map_desc omap242x_io_desc[] __initdata = {
{
.virtual = DSP_MEM_2420_VIRT,
@@ -90,7 +89,7 @@ static struct map_desc omap242x_io_desc[] __initdata = {
#endif
-#ifdef CONFIG_ARCH_OMAP2430
+#ifdef CONFIG_SOC_OMAP2430
static struct map_desc omap243x_io_desc[] __initdata = {
{
.virtual = L4_WK_243X_VIRT,
@@ -175,6 +174,18 @@ static struct map_desc omap34xx_io_desc[] __initdata = {
#endif
};
#endif
+
+#ifdef CONFIG_SOC_OMAPTI816X
+static struct map_desc omapti816x_io_desc[] __initdata = {
+ {
+ .virtual = L4_34XX_VIRT,
+ .pfn = __phys_to_pfn(L4_34XX_PHYS),
+ .length = L4_34XX_SIZE,
+ .type = MT_DEVICE
+ },
+};
+#endif
+
#ifdef CONFIG_ARCH_OMAP4
static struct map_desc omap44xx_io_desc[] __initdata = {
{
@@ -241,7 +252,7 @@ static void __init _omap2_map_common_io(void)
omap_sram_init();
}
-#ifdef CONFIG_ARCH_OMAP2420
+#ifdef CONFIG_SOC_OMAP2420
void __init omap242x_map_common_io(void)
{
iotable_init(omap24xx_io_desc, ARRAY_SIZE(omap24xx_io_desc));
@@ -250,7 +261,7 @@ void __init omap242x_map_common_io(void)
}
#endif
-#ifdef CONFIG_ARCH_OMAP2430
+#ifdef CONFIG_SOC_OMAP2430
void __init omap243x_map_common_io(void)
{
iotable_init(omap24xx_io_desc, ARRAY_SIZE(omap24xx_io_desc));
@@ -267,6 +278,14 @@ void __init omap34xx_map_common_io(void)
}
#endif
+#ifdef CONFIG_SOC_OMAPTI816X
+void __init omapti816x_map_common_io(void)
+{
+ iotable_init(omapti816x_io_desc, ARRAY_SIZE(omapti816x_io_desc));
+ _omap2_map_common_io();
+}
+#endif
+
#ifdef CONFIG_ARCH_OMAP4
void __init omap44xx_map_common_io(void)
{
@@ -398,15 +417,10 @@ void __init omap2_init_common_infrastructure(void)
void __init omap2_init_common_devices(struct omap_sdrc_params *sdrc_cs0,
struct omap_sdrc_params *sdrc_cs1)
{
- omap_serial_early_init();
-
- omap_hwmod_late_init();
-
- if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
+ if (cpu_is_omap24xx() || omap3_has_sdrc()) {
omap2_sdrc_init(sdrc_cs0, sdrc_cs1);
_omap2_init_reprogram_sdrc();
}
- gpmc_init();
omap_irq_base_init();
}
diff --git a/arch/arm/mach-omap2/iommu2.c b/arch/arm/mach-omap2/iommu2.c
index 14ee686b6492..adb083e41acd 100644
--- a/arch/arm/mach-omap2/iommu2.c
+++ b/arch/arm/mach-omap2/iommu2.c
@@ -145,35 +145,32 @@ static void omap2_iommu_set_twl(struct iommu *obj, bool on)
static u32 omap2_iommu_fault_isr(struct iommu *obj, u32 *ra)
{
- int i;
u32 stat, da;
- const char *err_msg[] = {
- "tlb miss",
- "translation fault",
- "emulation miss",
- "table walk fault",
- "multi hit fault",
- };
+ u32 errs = 0;
stat = iommu_read_reg(obj, MMU_IRQSTATUS);
stat &= MMU_IRQ_MASK;
- if (!stat)
+ if (!stat) {
+ *ra = 0;
return 0;
+ }
da = iommu_read_reg(obj, MMU_FAULT_AD);
*ra = da;
- dev_err(obj->dev, "%s:\tda:%08x ", __func__, da);
-
- for (i = 0; i < ARRAY_SIZE(err_msg); i++) {
- if (stat & (1 << i))
- printk("%s ", err_msg[i]);
- }
- printk("\n");
-
+ if (stat & MMU_IRQ_TLBMISS)
+ errs |= OMAP_IOMMU_ERR_TLB_MISS;
+ if (stat & MMU_IRQ_TRANSLATIONFAULT)
+ errs |= OMAP_IOMMU_ERR_TRANS_FAULT;
+ if (stat & MMU_IRQ_EMUMISS)
+ errs |= OMAP_IOMMU_ERR_EMU_MISS;
+ if (stat & MMU_IRQ_TABLEWALKFAULT)
+ errs |= OMAP_IOMMU_ERR_TBLWALK_FAULT;
+ if (stat & MMU_IRQ_MULTIHITFAULT)
+ errs |= OMAP_IOMMU_ERR_MULTIHIT_FAULT;
iommu_write_reg(obj, stat, MMU_IRQSTATUS);
- return stat;
+ return errs;
}
static void omap2_tlb_read_cr(struct iommu *obj, struct cr_regs *cr)
diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c
index 23049c487c47..bc524b94fd59 100644
--- a/arch/arm/mach-omap2/irq.c
+++ b/arch/arm/mach-omap2/irq.c
@@ -61,8 +61,6 @@ struct omap3_intc_regs {
u32 mir[INTCPS_NR_MIR_REGS];
};
-static struct omap3_intc_regs intc_context[ARRAY_SIZE(irq_banks)];
-
/* INTC bank register get/set */
static void intc_bank_write_reg(u32 val, struct omap_irq_bank *bank, u16 reg)
@@ -110,7 +108,7 @@ static void omap_mask_irq(struct irq_data *d)
unsigned int irq = d->irq;
int offset = irq & (~(IRQ_BITS_PER_REG - 1));
- if (cpu_is_omap34xx()) {
+ if (cpu_is_omap34xx() && !cpu_is_ti816x()) {
int spurious = 0;
/*
@@ -205,6 +203,9 @@ void __init omap_init_irq(void)
BUG_ON(!base);
+ if (cpu_is_ti816x())
+ bank->nr_irqs = 128;
+
/* Static mapping, never released */
bank->base_reg = ioremap(base, SZ_4K);
if (!bank->base_reg) {
@@ -229,6 +230,8 @@ void __init omap_init_irq(void)
}
#ifdef CONFIG_ARCH_OMAP3
+static struct omap3_intc_regs intc_context[ARRAY_SIZE(irq_banks)];
+
void omap_intc_save_context(void)
{
int ind = 0, i = 0;
diff --git a/arch/arm/mach-omap2/mailbox.c b/arch/arm/mach-omap2/mailbox.c
index 394413dc7deb..6e15e3d7c65e 100644
--- a/arch/arm/mach-omap2/mailbox.c
+++ b/arch/arm/mach-omap2/mailbox.c
@@ -14,12 +14,11 @@
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/pm_runtime.h>
#include <plat/mailbox.h>
#include <mach/irqs.h>
#define MAILBOX_REVISION 0x000
-#define MAILBOX_SYSCONFIG 0x010
-#define MAILBOX_SYSSTATUS 0x014
#define MAILBOX_MESSAGE(m) (0x040 + 4 * (m))
#define MAILBOX_FIFOSTATUS(m) (0x080 + 4 * (m))
#define MAILBOX_MSGSTATUS(m) (0x0c0 + 4 * (m))
@@ -33,17 +32,6 @@
#define MAILBOX_IRQ_NEWMSG(m) (1 << (2 * (m)))
#define MAILBOX_IRQ_NOTFULL(m) (1 << (2 * (m) + 1))
-/* SYSCONFIG: register bit definition */
-#define AUTOIDLE (1 << 0)
-#define SOFTRESET (1 << 1)
-#define SMARTIDLE (2 << 3)
-#define OMAP4_SOFTRESET (1 << 0)
-#define OMAP4_NOIDLE (1 << 2)
-#define OMAP4_SMARTIDLE (2 << 2)
-
-/* SYSSTATUS: register bit definition */
-#define RESETDONE (1 << 0)
-
#define MBOX_REG_SIZE 0x120
#define OMAP4_MBOX_REG_SIZE 0x130
@@ -70,8 +58,6 @@ struct omap_mbox2_priv {
unsigned long irqdisable;
};
-static struct clk *mbox_ick_handle;
-
static void omap2_mbox_enable_irq(struct omap_mbox *mbox,
omap_mbox_type_t irq);
@@ -89,53 +75,13 @@ static inline void mbox_write_reg(u32 val, size_t ofs)
static int omap2_mbox_startup(struct omap_mbox *mbox)
{
u32 l;
- unsigned long timeout;
- mbox_ick_handle = clk_get(NULL, "mailboxes_ick");
- if (IS_ERR(mbox_ick_handle)) {
- printk(KERN_ERR "Could not get mailboxes_ick: %ld\n",
- PTR_ERR(mbox_ick_handle));
- return PTR_ERR(mbox_ick_handle);
- }
- clk_enable(mbox_ick_handle);
-
- if (cpu_is_omap44xx()) {
- mbox_write_reg(OMAP4_SOFTRESET, MAILBOX_SYSCONFIG);
- timeout = jiffies + msecs_to_jiffies(20);
- do {
- l = mbox_read_reg(MAILBOX_SYSCONFIG);
- if (!(l & OMAP4_SOFTRESET))
- break;
- } while (!time_after(jiffies, timeout));
-
- if (l & OMAP4_SOFTRESET) {
- pr_err("Can't take mailbox out of reset\n");
- return -ENODEV;
- }
- } else {
- mbox_write_reg(SOFTRESET, MAILBOX_SYSCONFIG);
- timeout = jiffies + msecs_to_jiffies(20);
- do {
- l = mbox_read_reg(MAILBOX_SYSSTATUS);
- if (l & RESETDONE)
- break;
- } while (!time_after(jiffies, timeout));
-
- if (!(l & RESETDONE)) {
- pr_err("Can't take mailbox out of reset\n");
- return -ENODEV;
- }
- }
+ pm_runtime_enable(mbox->dev->parent);
+ pm_runtime_get_sync(mbox->dev->parent);
l = mbox_read_reg(MAILBOX_REVISION);
pr_debug("omap mailbox rev %d.%d\n", (l & 0xf0) >> 4, (l & 0x0f));
- if (cpu_is_omap44xx())
- l = OMAP4_SMARTIDLE;
- else
- l = SMARTIDLE | AUTOIDLE;
- mbox_write_reg(l, MAILBOX_SYSCONFIG);
-
omap2_mbox_enable_irq(mbox, IRQ_RX);
return 0;
@@ -143,9 +89,8 @@ static int omap2_mbox_startup(struct omap_mbox *mbox)
static void omap2_mbox_shutdown(struct omap_mbox *mbox)
{
- clk_disable(mbox_ick_handle);
- clk_put(mbox_ick_handle);
- mbox_ick_handle = NULL;
+ pm_runtime_put_sync(mbox->dev->parent);
+ pm_runtime_disable(mbox->dev->parent);
}
/* Mailbox FIFO handle functions */
@@ -310,7 +255,7 @@ struct omap_mbox mbox_dsp_info = {
struct omap_mbox *omap3_mboxes[] = { &mbox_dsp_info, NULL };
#endif
-#if defined(CONFIG_ARCH_OMAP2420)
+#if defined(CONFIG_SOC_OMAP2420)
/* IVA */
static struct omap_mbox2_priv omap2_mbox_iva_priv = {
.tx_fifo = {
@@ -334,7 +279,7 @@ static struct omap_mbox mbox_iva_info = {
.priv = &omap2_mbox_iva_priv,
};
-struct omap_mbox *omap2_mboxes[] = { &mbox_iva_info, &mbox_dsp_info, NULL };
+struct omap_mbox *omap2_mboxes[] = { &mbox_dsp_info, &mbox_iva_info, NULL };
#endif
#if defined(CONFIG_ARCH_OMAP4)
@@ -398,14 +343,14 @@ static int __devinit omap2_mbox_probe(struct platform_device *pdev)
else if (cpu_is_omap34xx()) {
list = omap3_mboxes;
- list[0]->irq = platform_get_irq_byname(pdev, "dsp");
+ list[0]->irq = platform_get_irq(pdev, 0);
}
#endif
#if defined(CONFIG_ARCH_OMAP2)
else if (cpu_is_omap2430()) {
list = omap2_mboxes;
- list[0]->irq = platform_get_irq_byname(pdev, "dsp");
+ list[0]->irq = platform_get_irq(pdev, 0);
} else if (cpu_is_omap2420()) {
list = omap2_mboxes;
@@ -417,8 +362,7 @@ static int __devinit omap2_mbox_probe(struct platform_device *pdev)
else if (cpu_is_omap44xx()) {
list = omap4_mboxes;
- list[0]->irq = list[1]->irq =
- platform_get_irq_byname(pdev, "mbox");
+ list[0]->irq = list[1]->irq = platform_get_irq(pdev, 0);
}
#endif
else {
diff --git a/arch/arm/mach-omap2/mcbsp.c b/arch/arm/mach-omap2/mcbsp.c
index f9c9df5b5ff1..565b9064a328 100644
--- a/arch/arm/mach-omap2/mcbsp.c
+++ b/arch/arm/mach-omap2/mcbsp.c
@@ -22,10 +22,11 @@
#include <plat/dma.h>
#include <plat/cpu.h>
#include <plat/mcbsp.h>
+#include <plat/omap_device.h>
+#include <linux/pm_runtime.h>
#include "control.h"
-
/* McBSP internal signal muxing functions */
void omap2_mcbsp1_mux_clkr_src(u8 mux)
@@ -83,7 +84,7 @@ int omap2_mcbsp_set_clks_src(u8 id, u8 fck_src_id)
return -EINVAL;
}
- clk_disable(mcbsp->fclk);
+ pm_runtime_put_sync(mcbsp->dev);
r = clk_set_parent(mcbsp->fclk, fck_src);
if (IS_ERR_VALUE(r)) {
@@ -93,7 +94,7 @@ int omap2_mcbsp_set_clks_src(u8 id, u8 fck_src_id)
return -EINVAL;
}
- clk_enable(mcbsp->fclk);
+ pm_runtime_get_sync(mcbsp->dev);
clk_put(fck_src);
@@ -101,196 +102,70 @@ int omap2_mcbsp_set_clks_src(u8 id, u8 fck_src_id)
}
EXPORT_SYMBOL(omap2_mcbsp_set_clks_src);
-
-/* Platform data */
-
-#ifdef CONFIG_ARCH_OMAP2420
-static struct omap_mcbsp_platform_data omap2420_mcbsp_pdata[] = {
+struct omap_device_pm_latency omap2_mcbsp_latency[] = {
{
- .phys_base = OMAP24XX_MCBSP1_BASE,
- .dma_rx_sync = OMAP24XX_DMA_MCBSP1_RX,
- .dma_tx_sync = OMAP24XX_DMA_MCBSP1_TX,
- .rx_irq = INT_24XX_MCBSP1_IRQ_RX,
- .tx_irq = INT_24XX_MCBSP1_IRQ_TX,
- },
- {
- .phys_base = OMAP24XX_MCBSP2_BASE,
- .dma_rx_sync = OMAP24XX_DMA_MCBSP2_RX,
- .dma_tx_sync = OMAP24XX_DMA_MCBSP2_TX,
- .rx_irq = INT_24XX_MCBSP2_IRQ_RX,
- .tx_irq = INT_24XX_MCBSP2_IRQ_TX,
+ .deactivate_func = omap_device_idle_hwmods,
+ .activate_func = omap_device_enable_hwmods,
+ .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
},
};
-#define OMAP2420_MCBSP_PDATA_SZ ARRAY_SIZE(omap2420_mcbsp_pdata)
-#define OMAP2420_MCBSP_REG_NUM (OMAP_MCBSP_REG_RCCR / sizeof(u32) + 1)
-#else
-#define omap2420_mcbsp_pdata NULL
-#define OMAP2420_MCBSP_PDATA_SZ 0
-#define OMAP2420_MCBSP_REG_NUM 0
-#endif
-#ifdef CONFIG_ARCH_OMAP2430
-static struct omap_mcbsp_platform_data omap2430_mcbsp_pdata[] = {
- {
- .phys_base = OMAP24XX_MCBSP1_BASE,
- .dma_rx_sync = OMAP24XX_DMA_MCBSP1_RX,
- .dma_tx_sync = OMAP24XX_DMA_MCBSP1_TX,
- .rx_irq = INT_24XX_MCBSP1_IRQ_RX,
- .tx_irq = INT_24XX_MCBSP1_IRQ_TX,
- },
- {
- .phys_base = OMAP24XX_MCBSP2_BASE,
- .dma_rx_sync = OMAP24XX_DMA_MCBSP2_RX,
- .dma_tx_sync = OMAP24XX_DMA_MCBSP2_TX,
- .rx_irq = INT_24XX_MCBSP2_IRQ_RX,
- .tx_irq = INT_24XX_MCBSP2_IRQ_TX,
- },
- {
- .phys_base = OMAP2430_MCBSP3_BASE,
- .dma_rx_sync = OMAP24XX_DMA_MCBSP3_RX,
- .dma_tx_sync = OMAP24XX_DMA_MCBSP3_TX,
- .rx_irq = INT_24XX_MCBSP3_IRQ_RX,
- .tx_irq = INT_24XX_MCBSP3_IRQ_TX,
- },
- {
- .phys_base = OMAP2430_MCBSP4_BASE,
- .dma_rx_sync = OMAP24XX_DMA_MCBSP4_RX,
- .dma_tx_sync = OMAP24XX_DMA_MCBSP4_TX,
- .rx_irq = INT_24XX_MCBSP4_IRQ_RX,
- .tx_irq = INT_24XX_MCBSP4_IRQ_TX,
- },
- {
- .phys_base = OMAP2430_MCBSP5_BASE,
- .dma_rx_sync = OMAP24XX_DMA_MCBSP5_RX,
- .dma_tx_sync = OMAP24XX_DMA_MCBSP5_TX,
- .rx_irq = INT_24XX_MCBSP5_IRQ_RX,
- .tx_irq = INT_24XX_MCBSP5_IRQ_TX,
- },
-};
-#define OMAP2430_MCBSP_PDATA_SZ ARRAY_SIZE(omap2430_mcbsp_pdata)
-#define OMAP2430_MCBSP_REG_NUM (OMAP_MCBSP_REG_RCCR / sizeof(u32) + 1)
-#else
-#define omap2430_mcbsp_pdata NULL
-#define OMAP2430_MCBSP_PDATA_SZ 0
-#define OMAP2430_MCBSP_REG_NUM 0
-#endif
+static int omap_init_mcbsp(struct omap_hwmod *oh, void *unused)
+{
+ int id, count = 1;
+ char *name = "omap-mcbsp";
+ struct omap_hwmod *oh_device[2];
+ struct omap_mcbsp_platform_data *pdata = NULL;
+ struct omap_device *od;
-#ifdef CONFIG_ARCH_OMAP3
-static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = {
- {
- .phys_base = OMAP34XX_MCBSP1_BASE,
- .dma_rx_sync = OMAP24XX_DMA_MCBSP1_RX,
- .dma_tx_sync = OMAP24XX_DMA_MCBSP1_TX,
- .rx_irq = INT_24XX_MCBSP1_IRQ_RX,
- .tx_irq = INT_24XX_MCBSP1_IRQ_TX,
- .buffer_size = 0x80, /* The FIFO has 128 locations */
- },
- {
- .phys_base = OMAP34XX_MCBSP2_BASE,
- .phys_base_st = OMAP34XX_MCBSP2_ST_BASE,
- .dma_rx_sync = OMAP24XX_DMA_MCBSP2_RX,
- .dma_tx_sync = OMAP24XX_DMA_MCBSP2_TX,
- .rx_irq = INT_24XX_MCBSP2_IRQ_RX,
- .tx_irq = INT_24XX_MCBSP2_IRQ_TX,
- .buffer_size = 0x500, /* The FIFO has 1024 + 256 locations */
- },
- {
- .phys_base = OMAP34XX_MCBSP3_BASE,
- .phys_base_st = OMAP34XX_MCBSP3_ST_BASE,
- .dma_rx_sync = OMAP24XX_DMA_MCBSP3_RX,
- .dma_tx_sync = OMAP24XX_DMA_MCBSP3_TX,
- .rx_irq = INT_24XX_MCBSP3_IRQ_RX,
- .tx_irq = INT_24XX_MCBSP3_IRQ_TX,
- .buffer_size = 0x80, /* The FIFO has 128 locations */
- },
- {
- .phys_base = OMAP34XX_MCBSP4_BASE,
- .dma_rx_sync = OMAP24XX_DMA_MCBSP4_RX,
- .dma_tx_sync = OMAP24XX_DMA_MCBSP4_TX,
- .rx_irq = INT_24XX_MCBSP4_IRQ_RX,
- .tx_irq = INT_24XX_MCBSP4_IRQ_TX,
- .buffer_size = 0x80, /* The FIFO has 128 locations */
- },
- {
- .phys_base = OMAP34XX_MCBSP5_BASE,
- .dma_rx_sync = OMAP24XX_DMA_MCBSP5_RX,
- .dma_tx_sync = OMAP24XX_DMA_MCBSP5_TX,
- .rx_irq = INT_24XX_MCBSP5_IRQ_RX,
- .tx_irq = INT_24XX_MCBSP5_IRQ_TX,
- .buffer_size = 0x80, /* The FIFO has 128 locations */
- },
-};
-#define OMAP34XX_MCBSP_PDATA_SZ ARRAY_SIZE(omap34xx_mcbsp_pdata)
-#define OMAP34XX_MCBSP_REG_NUM (OMAP_MCBSP_REG_RCCR / sizeof(u32) + 1)
-#else
-#define omap34xx_mcbsp_pdata NULL
-#define OMAP34XX_MCBSP_PDATA_SZ 0
-#define OMAP34XX_MCBSP_REG_NUM 0
-#endif
+ sscanf(oh->name, "mcbsp%d", &id);
-static struct omap_mcbsp_platform_data omap44xx_mcbsp_pdata[] = {
- {
- .phys_base = OMAP44XX_MCBSP1_BASE,
- .dma_rx_sync = OMAP44XX_DMA_MCBSP1_RX,
- .dma_tx_sync = OMAP44XX_DMA_MCBSP1_TX,
- .tx_irq = OMAP44XX_IRQ_MCBSP1,
- },
- {
- .phys_base = OMAP44XX_MCBSP2_BASE,
- .dma_rx_sync = OMAP44XX_DMA_MCBSP2_RX,
- .dma_tx_sync = OMAP44XX_DMA_MCBSP2_TX,
- .tx_irq = OMAP44XX_IRQ_MCBSP2,
- },
- {
- .phys_base = OMAP44XX_MCBSP3_BASE,
- .dma_rx_sync = OMAP44XX_DMA_MCBSP3_RX,
- .dma_tx_sync = OMAP44XX_DMA_MCBSP3_TX,
- .tx_irq = OMAP44XX_IRQ_MCBSP3,
- },
- {
- .phys_base = OMAP44XX_MCBSP4_BASE,
- .dma_rx_sync = OMAP44XX_DMA_MCBSP4_RX,
- .dma_tx_sync = OMAP44XX_DMA_MCBSP4_TX,
- .tx_irq = OMAP44XX_IRQ_MCBSP4,
- },
-};
-#define OMAP44XX_MCBSP_PDATA_SZ ARRAY_SIZE(omap44xx_mcbsp_pdata)
-#define OMAP44XX_MCBSP_REG_NUM (OMAP_MCBSP_REG_RCCR / sizeof(u32) + 1)
+ pdata = kzalloc(sizeof(struct omap_mcbsp_platform_data), GFP_KERNEL);
+ if (!pdata) {
+ pr_err("%s: No memory for mcbsp\n", __func__);
+ return -ENOMEM;
+ }
+
+ pdata->mcbsp_config_type = oh->class->rev;
+
+ if (oh->class->rev == MCBSP_CONFIG_TYPE3) {
+ if (id == 2)
+ /* The FIFO has 1024 + 256 locations */
+ pdata->buffer_size = 0x500;
+ else
+ /* The FIFO has 128 locations */
+ pdata->buffer_size = 0x80;
+ }
+
+ oh_device[0] = oh;
+
+ if (oh->dev_attr) {
+ oh_device[1] = omap_hwmod_lookup((
+ (struct omap_mcbsp_dev_attr *)(oh->dev_attr))->sidetone);
+ count++;
+ }
+ od = omap_device_build_ss(name, id, oh_device, count, pdata,
+ sizeof(*pdata), omap2_mcbsp_latency,
+ ARRAY_SIZE(omap2_mcbsp_latency), false);
+ kfree(pdata);
+ if (IS_ERR(od)) {
+ pr_err("%s: Cant build omap_device for %s:%s.\n", __func__,
+ name, oh->name);
+ return PTR_ERR(od);
+ }
+ omap_mcbsp_count++;
+ return 0;
+}
static int __init omap2_mcbsp_init(void)
{
- if (cpu_is_omap2420()) {
- omap_mcbsp_count = OMAP2420_MCBSP_PDATA_SZ;
- omap_mcbsp_cache_size = OMAP2420_MCBSP_REG_NUM * sizeof(u16);
- } else if (cpu_is_omap2430()) {
- omap_mcbsp_count = OMAP2430_MCBSP_PDATA_SZ;
- omap_mcbsp_cache_size = OMAP2430_MCBSP_REG_NUM * sizeof(u32);
- } else if (cpu_is_omap34xx()) {
- omap_mcbsp_count = OMAP34XX_MCBSP_PDATA_SZ;
- omap_mcbsp_cache_size = OMAP34XX_MCBSP_REG_NUM * sizeof(u32);
- } else if (cpu_is_omap44xx()) {
- omap_mcbsp_count = OMAP44XX_MCBSP_PDATA_SZ;
- omap_mcbsp_cache_size = OMAP44XX_MCBSP_REG_NUM * sizeof(u32);
- }
+ omap_hwmod_for_each_by_class("mcbsp", omap_init_mcbsp, NULL);
mcbsp_ptr = kzalloc(omap_mcbsp_count * sizeof(struct omap_mcbsp *),
GFP_KERNEL);
if (!mcbsp_ptr)
return -ENOMEM;
- if (cpu_is_omap2420())
- omap_mcbsp_register_board_cfg(omap2420_mcbsp_pdata,
- OMAP2420_MCBSP_PDATA_SZ);
- if (cpu_is_omap2430())
- omap_mcbsp_register_board_cfg(omap2430_mcbsp_pdata,
- OMAP2430_MCBSP_PDATA_SZ);
- if (cpu_is_omap34xx())
- omap_mcbsp_register_board_cfg(omap34xx_mcbsp_pdata,
- OMAP34XX_MCBSP_PDATA_SZ);
- if (cpu_is_omap44xx())
- omap_mcbsp_register_board_cfg(omap44xx_mcbsp_pdata,
- OMAP44XX_MCBSP_PDATA_SZ);
-
return omap_mcbsp_init();
}
arch_initcall(omap2_mcbsp_init);
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index 98148b6c36e9..b2d097a60313 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -258,7 +258,7 @@ struct omap_hwmod_mux_info * __init
omap_hwmod_mux_init(struct omap_device_pad *bpads, int nr_pads)
{
struct omap_hwmod_mux_info *hmux;
- int i;
+ int i, nr_pads_dynamic = 0;
if (!bpads || nr_pads < 1)
return NULL;
@@ -302,9 +302,40 @@ omap_hwmod_mux_init(struct omap_device_pad *bpads, int nr_pads)
pad->enable = bpad->enable;
pad->idle = bpad->idle;
pad->off = bpad->off;
+
+ if (pad->flags & OMAP_DEVICE_PAD_REMUX)
+ nr_pads_dynamic++;
+
pr_debug("%s: Initialized %s\n", __func__, pad->name);
}
+ if (!nr_pads_dynamic)
+ return hmux;
+
+ /*
+ * Add pads that need dynamic muxing into a separate list
+ */
+
+ hmux->nr_pads_dynamic = nr_pads_dynamic;
+ hmux->pads_dynamic = kzalloc(sizeof(struct omap_device_pad *) *
+ nr_pads_dynamic, GFP_KERNEL);
+ if (!hmux->pads_dynamic) {
+ pr_err("%s: Could not allocate dynamic pads\n", __func__);
+ return hmux;
+ }
+
+ nr_pads_dynamic = 0;
+ for (i = 0; i < hmux->nr_pads; i++) {
+ struct omap_device_pad *pad = &hmux->pads[i];
+
+ if (pad->flags & OMAP_DEVICE_PAD_REMUX) {
+ pr_debug("%s: pad %s tagged dynamic\n",
+ __func__, pad->name);
+ hmux->pads_dynamic[nr_pads_dynamic] = pad;
+ nr_pads_dynamic++;
+ }
+ }
+
return hmux;
err3:
@@ -322,6 +353,46 @@ void omap_hwmod_mux(struct omap_hwmod_mux_info *hmux, u8 state)
{
int i;
+ /* Runtime idling of dynamic pads */
+ if (state == _HWMOD_STATE_IDLE && hmux->enabled) {
+ for (i = 0; i < hmux->nr_pads_dynamic; i++) {
+ struct omap_device_pad *pad = &hmux->pads[i];
+ int val = -EINVAL;
+
+ pad->flags &= ~OMAP_DEVICE_PAD_ENABLED;
+ pad->flags |= OMAP_DEVICE_PAD_IDLE;
+ val = pad->idle;
+ omap_mux_write(pad->partition, val,
+ pad->mux->reg_offset);
+ }
+
+ return;
+ }
+
+ /* Runtime enabling of dynamic pads */
+ if ((state == _HWMOD_STATE_ENABLED) && hmux->pads_dynamic) {
+ int idled = 0;
+
+ for (i = 0; i < hmux->nr_pads_dynamic; i++) {
+ struct omap_device_pad *pad = &hmux->pads[i];
+ int val = -EINVAL;
+
+ if (!(pad->flags & OMAP_DEVICE_PAD_IDLE))
+ continue;
+
+ pad->flags &= ~OMAP_DEVICE_PAD_IDLE;
+ pad->flags |= OMAP_DEVICE_PAD_ENABLED;
+ val = pad->enable;
+ omap_mux_write(pad->partition, val,
+ pad->mux->reg_offset);
+ idled++;
+ }
+
+ if (idled)
+ return;
+ }
+
+ /* Enabling, disabling or idling of all pads */
for (i = 0; i < hmux->nr_pads; i++) {
struct omap_device_pad *pad = &hmux->pads[i];
int flags, val = -EINVAL;
@@ -363,6 +434,11 @@ void omap_hwmod_mux(struct omap_hwmod_mux_info *hmux, u8 state)
pad->flags = flags;
}
}
+
+ if (state == _HWMOD_STATE_ENABLED)
+ hmux->enabled = true;
+ else
+ hmux->enabled = false;
}
#ifdef CONFIG_DEBUG_FS
@@ -605,7 +681,7 @@ static void __init omap_mux_dbg_create_entry(
list_for_each_entry(e, &partition->muxmodes, node) {
struct omap_mux *m = &e->mux;
- (void)debugfs_create_file(m->muxnames[0], S_IWUGO, mux_dbg_dir,
+ (void)debugfs_create_file(m->muxnames[0], S_IWUSR, mux_dbg_dir,
m, &omap_mux_dbg_signal_fops);
}
}
diff --git a/arch/arm/mach-omap2/mux.h b/arch/arm/mach-omap2/mux.h
index a4ab17a737a6..8f6e3265ccab 100644
--- a/arch/arm/mach-omap2/mux.h
+++ b/arch/arm/mach-omap2/mux.h
@@ -159,7 +159,8 @@ struct omap_board_mux {
u16 value;
};
-#define OMAP_DEVICE_PAD_ENABLED BIT(7) /* Not needed for board-*.c */
+#define OMAP_DEVICE_PAD_IDLE BIT(7) /* Not needed for board-*.c */
+#define OMAP_DEVICE_PAD_ENABLED BIT(6) /* Not needed for board-*.c */
#define OMAP_DEVICE_PAD_REMUX BIT(1) /* Dynamically remux a pad,
needs enable, idle and off
values */
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index e282e35769fd..028efda07618 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1,7 +1,7 @@
/*
* omap_hwmod implementation for OMAP2/3/4
*
- * Copyright (C) 2009-2010 Nokia Corporation
+ * Copyright (C) 2009-2011 Nokia Corporation
*
* Paul Walmsley, Benoît Cousson, Kevin Hilman
*
@@ -162,9 +162,6 @@ static LIST_HEAD(omap_hwmod_list);
/* mpu_oh: used to add/remove MPU initiator from sleepdep list */
static struct omap_hwmod *mpu_oh;
-/* inited: 0 if omap_hwmod_init() has not yet been called; 1 otherwise */
-static u8 inited;
-
/* Private functions */
@@ -904,18 +901,16 @@ static struct omap_hwmod *_lookup(const char *name)
* @oh: struct omap_hwmod *
* @data: not used; pass NULL
*
- * Called by omap_hwmod_late_init() (after omap2_clk_init()).
- * Resolves all clock names embedded in the hwmod. Returns -EINVAL if
- * the omap_hwmod has not yet been registered or if the clocks have
- * already been initialized, 0 on success, or a non-zero error on
- * failure.
+ * Called by omap_hwmod_setup_*() (after omap2_clk_init()).
+ * Resolves all clock names embedded in the hwmod. Returns 0 on
+ * success, or a negative error code on failure.
*/
static int _init_clocks(struct omap_hwmod *oh, void *data)
{
int ret = 0;
- if (!oh || (oh->_state != _HWMOD_STATE_REGISTERED))
- return -EINVAL;
+ if (oh->_state != _HWMOD_STATE_REGISTERED)
+ return 0;
pr_debug("omap_hwmod: %s: looking up clocks\n", oh->name);
@@ -1230,8 +1225,9 @@ static int _enable(struct omap_hwmod *oh)
_deassert_hardreset(oh, oh->rst_lines[0].name);
/* Mux pins for device runtime if populated */
- if (oh->mux)
- omap_hwmod_mux(oh->mux, _HWMOD_STATE_ENABLED);
+ if (oh->mux && ((oh->_state == _HWMOD_STATE_DISABLED) ||
+ ((oh->_state == _HWMOD_STATE_IDLE) && oh->mux->pads_dynamic)))
+ omap_hwmod_mux(oh->mux, _HWMOD_STATE_ENABLED);
_add_initiator_dep(oh, mpu_oh);
_enable_clocks(oh);
@@ -1279,7 +1275,7 @@ static int _idle(struct omap_hwmod *oh)
_disable_clocks(oh);
/* Mux pins for device idle if populated */
- if (oh->mux)
+ if (oh->mux && oh->mux->pads_dynamic)
omap_hwmod_mux(oh->mux, _HWMOD_STATE_IDLE);
oh->_state = _HWMOD_STATE_IDLE;
@@ -1354,14 +1350,16 @@ static int _shutdown(struct omap_hwmod *oh)
* @oh: struct omap_hwmod *
*
* Writes the CLOCKACTIVITY bits @clockact to the hwmod @oh
- * OCP_SYSCONFIG register. Returns -EINVAL if the hwmod is in the
- * wrong state or returns 0.
+ * OCP_SYSCONFIG register. Returns 0.
*/
static int _setup(struct omap_hwmod *oh, void *data)
{
int i, r;
u8 postsetup_state;
+ if (oh->_state != _HWMOD_STATE_CLKS_INITED)
+ return 0;
+
/* Set iclk autoidle mode */
if (oh->slaves_cnt > 0) {
for (i = 0; i < oh->slaves_cnt; i++) {
@@ -1455,7 +1453,7 @@ static int _setup(struct omap_hwmod *oh, void *data)
*/
static int __init _register(struct omap_hwmod *oh)
{
- int ret, ms_id;
+ int ms_id;
if (!oh || !oh->name || !oh->class || !oh->class->name ||
(oh->_state != _HWMOD_STATE_UNKNOWN))
@@ -1467,12 +1465,10 @@ static int __init _register(struct omap_hwmod *oh)
return -EEXIST;
ms_id = _find_mpu_port_index(oh);
- if (!IS_ERR_VALUE(ms_id)) {
+ if (!IS_ERR_VALUE(ms_id))
oh->_mpu_port_index = ms_id;
- oh->_mpu_rt_va = _find_mpu_rt_base(oh, oh->_mpu_port_index);
- } else {
+ else
oh->_int_flags |= _HWMOD_NO_MPU_PORT;
- }
list_add_tail(&oh->node, &omap_hwmod_list);
@@ -1480,9 +1476,14 @@ static int __init _register(struct omap_hwmod *oh)
oh->_state = _HWMOD_STATE_REGISTERED;
- ret = 0;
+ /*
+ * XXX Rather than doing a strcmp(), this should test a flag
+ * set in the hwmod data, inserted by the autogenerator code.
+ */
+ if (!strcmp(oh->name, MPU_INITIATOR_NAME))
+ mpu_oh = oh;
- return ret;
+ return 0;
}
@@ -1585,65 +1586,132 @@ int omap_hwmod_for_each(int (*fn)(struct omap_hwmod *oh, void *data),
return ret;
}
-
/**
- * omap_hwmod_init - init omap_hwmod code and register hwmods
+ * omap_hwmod_register - register an array of hwmods
* @ohs: pointer to an array of omap_hwmods to register
*
* Intended to be called early in boot before the clock framework is
* initialized. If @ohs is not null, will register all omap_hwmods
- * listed in @ohs that are valid for this chip. Returns -EINVAL if
- * omap_hwmod_init() has already been called or 0 otherwise.
+ * listed in @ohs that are valid for this chip. Returns 0.
*/
-int __init omap_hwmod_init(struct omap_hwmod **ohs)
+int __init omap_hwmod_register(struct omap_hwmod **ohs)
+{
+ int r, i;
+
+ if (!ohs)
+ return 0;
+
+ i = 0;
+ do {
+ if (!omap_chip_is(ohs[i]->omap_chip))
+ continue;
+
+ r = _register(ohs[i]);
+ WARN(r, "omap_hwmod: %s: _register returned %d\n", ohs[i]->name,
+ r);
+ } while (ohs[++i]);
+
+ return 0;
+}
+
+/*
+ * _populate_mpu_rt_base - populate the virtual address for a hwmod
+ *
+ * Must be called only from omap_hwmod_setup_*() so ioremap works properly.
+ * Assumes the caller takes care of locking if needed.
+ */
+static int __init _populate_mpu_rt_base(struct omap_hwmod *oh, void *data)
+{
+ if (oh->_state != _HWMOD_STATE_REGISTERED)
+ return 0;
+
+ if (oh->_int_flags & _HWMOD_NO_MPU_PORT)
+ return 0;
+
+ oh->_mpu_rt_va = _find_mpu_rt_base(oh, oh->_mpu_port_index);
+ if (!oh->_mpu_rt_va)
+ pr_warning("omap_hwmod: %s found no _mpu_rt_va for %s\n",
+ __func__, oh->name);
+
+ return 0;
+}
+
+/**
+ * omap_hwmod_setup_one - set up a single hwmod
+ * @oh_name: const char * name of the already-registered hwmod to set up
+ *
+ * Must be called after omap2_clk_init(). Resolves the struct clk
+ * names to struct clk pointers for each registered omap_hwmod. Also
+ * calls _setup() on each hwmod. Returns -EINVAL upon error or 0 upon
+ * success.
+ */
+int __init omap_hwmod_setup_one(const char *oh_name)
{
struct omap_hwmod *oh;
int r;
- if (inited)
+ pr_debug("omap_hwmod: %s: %s\n", oh_name, __func__);
+
+ if (!mpu_oh) {
+ pr_err("omap_hwmod: %s: cannot setup_one: MPU initiator hwmod %s not yet registered\n",
+ oh_name, MPU_INITIATOR_NAME);
+ return -EINVAL;
+ }
+
+ oh = _lookup(oh_name);
+ if (!oh) {
+ WARN(1, "omap_hwmod: %s: hwmod not yet registered\n", oh_name);
return -EINVAL;
+ }
- inited = 1;
+ if (mpu_oh->_state == _HWMOD_STATE_REGISTERED && oh != mpu_oh)
+ omap_hwmod_setup_one(MPU_INITIATOR_NAME);
- if (!ohs)
- return 0;
+ r = _populate_mpu_rt_base(oh, NULL);
+ if (IS_ERR_VALUE(r)) {
+ WARN(1, "omap_hwmod: %s: couldn't set mpu_rt_base\n", oh_name);
+ return -EINVAL;
+ }
- oh = *ohs;
- while (oh) {
- if (omap_chip_is(oh->omap_chip)) {
- r = _register(oh);
- WARN(r, "omap_hwmod: %s: _register returned "
- "%d\n", oh->name, r);
- }
- oh = *++ohs;
+ r = _init_clocks(oh, NULL);
+ if (IS_ERR_VALUE(r)) {
+ WARN(1, "omap_hwmod: %s: couldn't init clocks\n", oh_name);
+ return -EINVAL;
}
+ _setup(oh, NULL);
+
return 0;
}
/**
- * omap_hwmod_late_init - do some post-clock framework initialization
+ * omap_hwmod_setup - do some post-clock framework initialization
*
* Must be called after omap2_clk_init(). Resolves the struct clk names
* to struct clk pointers for each registered omap_hwmod. Also calls
- * _setup() on each hwmod. Returns 0.
+ * _setup() on each hwmod. Returns 0 upon success.
*/
-int omap_hwmod_late_init(void)
+static int __init omap_hwmod_setup_all(void)
{
int r;
- /* XXX check return value */
- r = omap_hwmod_for_each(_init_clocks, NULL);
- WARN(r, "omap_hwmod: omap_hwmod_late_init(): _init_clocks failed\n");
+ if (!mpu_oh) {
+ pr_err("omap_hwmod: %s: MPU initiator hwmod %s not yet registered\n",
+ __func__, MPU_INITIATOR_NAME);
+ return -EINVAL;
+ }
+
+ r = omap_hwmod_for_each(_populate_mpu_rt_base, NULL);
- mpu_oh = omap_hwmod_lookup(MPU_INITIATOR_NAME);
- WARN(!mpu_oh, "omap_hwmod: could not find MPU initiator hwmod %s\n",
- MPU_INITIATOR_NAME);
+ r = omap_hwmod_for_each(_init_clocks, NULL);
+ WARN(IS_ERR_VALUE(r),
+ "omap_hwmod: %s: _init_clocks failed\n", __func__);
omap_hwmod_for_each(_setup, NULL);
return 0;
}
+core_initcall(omap_hwmod_setup_all);
/**
* omap_hwmod_enable - enable an omap_hwmod
@@ -1862,6 +1930,7 @@ int omap_hwmod_fill_resources(struct omap_hwmod *oh, struct resource *res)
os = oh->slaves[i];
for (j = 0; j < os->addr_cnt; j++) {
+ (res + r)->name = (os->addr + j)->name;
(res + r)->start = (os->addr + j)->pa_start;
(res + r)->end = (os->addr + j)->pa_end;
(res + r)->flags = IORESOURCE_MEM;
@@ -2162,11 +2231,11 @@ int omap_hwmod_for_each_by_class(const char *classname,
* @oh: struct omap_hwmod *
* @state: state that _setup() should leave the hwmod in
*
- * Sets the hwmod state that @oh will enter at the end of _setup() (called by
- * omap_hwmod_late_init()). Only valid to call between calls to
- * omap_hwmod_init() and omap_hwmod_late_init(). Returns 0 upon success or
- * -EINVAL if there is a problem with the arguments or if the hwmod is
- * in the wrong state.
+ * Sets the hwmod state that @oh will enter at the end of _setup()
+ * (called by omap_hwmod_setup_*()). Only valid to call between
+ * calling omap_hwmod_register() and omap_hwmod_setup_*(). Returns
+ * 0 upon success or -EINVAL if there is a problem with the arguments
+ * or if the hwmod is in the wrong state.
*/
int omap_hwmod_set_postsetup_state(struct omap_hwmod *oh, u8 state)
{
diff --git a/arch/arm/mach-omap2/omap_hwmod_2420_data.c b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
index b85c630b64d6..76f2f9d17e62 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2420_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
@@ -18,6 +18,10 @@
#include <plat/serial.h>
#include <plat/i2c.h>
#include <plat/gpio.h>
+#include <plat/mcspi.h>
+#include <plat/dmtimer.h>
+#include <plat/l3_2xxx.h>
+#include <plat/l4_2xxx.h>
#include "omap_hwmod_common_data.h"
@@ -38,12 +42,18 @@ static struct omap_hwmod omap2420_mpu_hwmod;
static struct omap_hwmod omap2420_iva_hwmod;
static struct omap_hwmod omap2420_l3_main_hwmod;
static struct omap_hwmod omap2420_l4_core_hwmod;
+static struct omap_hwmod omap2420_dss_core_hwmod;
+static struct omap_hwmod omap2420_dss_dispc_hwmod;
+static struct omap_hwmod omap2420_dss_rfbi_hwmod;
+static struct omap_hwmod omap2420_dss_venc_hwmod;
static struct omap_hwmod omap2420_wd_timer2_hwmod;
static struct omap_hwmod omap2420_gpio1_hwmod;
static struct omap_hwmod omap2420_gpio2_hwmod;
static struct omap_hwmod omap2420_gpio3_hwmod;
static struct omap_hwmod omap2420_gpio4_hwmod;
static struct omap_hwmod omap2420_dma_system_hwmod;
+static struct omap_hwmod omap2420_mcspi1_hwmod;
+static struct omap_hwmod omap2420_mcspi2_hwmod;
/* L3 -> L4_CORE interface */
static struct omap_hwmod_ocp_if omap2420_l3_main__l4_core = {
@@ -64,6 +74,19 @@ static struct omap_hwmod_ocp_if *omap2420_l3_main_slaves[] = {
&omap2420_mpu__l3_main,
};
+/* DSS -> l3 */
+static struct omap_hwmod_ocp_if omap2420_dss__l3 = {
+ .master = &omap2420_dss_core_hwmod,
+ .slave = &omap2420_l3_main_hwmod,
+ .fw = {
+ .omap2 = {
+ .l3_perm_bit = OMAP2_L3_CORE_FW_CONNID_DSS,
+ .flags = OMAP_FIREWALL_L3,
+ }
+ },
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
/* Master interfaces on the L3 interconnect */
static struct omap_hwmod_ocp_if *omap2420_l3_main_masters[] = {
&omap2420_l3_main__l4_core,
@@ -87,6 +110,44 @@ static struct omap_hwmod omap2420_uart2_hwmod;
static struct omap_hwmod omap2420_uart3_hwmod;
static struct omap_hwmod omap2420_i2c1_hwmod;
static struct omap_hwmod omap2420_i2c2_hwmod;
+static struct omap_hwmod omap2420_mcbsp1_hwmod;
+static struct omap_hwmod omap2420_mcbsp2_hwmod;
+
+/* l4 core -> mcspi1 interface */
+static struct omap_hwmod_addr_space omap2420_mcspi1_addr_space[] = {
+ {
+ .pa_start = 0x48098000,
+ .pa_end = 0x480980ff,
+ .flags = ADDR_TYPE_RT,
+ },
+};
+
+static struct omap_hwmod_ocp_if omap2420_l4_core__mcspi1 = {
+ .master = &omap2420_l4_core_hwmod,
+ .slave = &omap2420_mcspi1_hwmod,
+ .clk = "mcspi1_ick",
+ .addr = omap2420_mcspi1_addr_space,
+ .addr_cnt = ARRAY_SIZE(omap2420_mcspi1_addr_space),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4 core -> mcspi2 interface */
+static struct omap_hwmod_addr_space omap2420_mcspi2_addr_space[] = {
+ {
+ .pa_start = 0x4809a000,
+ .pa_end = 0x4809a0ff,
+ .flags = ADDR_TYPE_RT,
+ },
+};
+
+static struct omap_hwmod_ocp_if omap2420_l4_core__mcspi2 = {
+ .master = &omap2420_l4_core_hwmod,
+ .slave = &omap2420_mcspi2_hwmod,
+ .clk = "mcspi2_ick",
+ .addr = omap2420_mcspi2_addr_space,
+ .addr_cnt = ARRAY_SIZE(omap2420_mcspi2_addr_space),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
/* L4_CORE -> L4_WKUP interface */
static struct omap_hwmod_ocp_if omap2420_l4_core__l4_wkup = {
@@ -279,6 +340,625 @@ static struct omap_hwmod omap2420_iva_hwmod = {
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
};
+/* Timer Common */
+static struct omap_hwmod_class_sysconfig omap2420_timer_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_CLOCKACTIVITY |
+ SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_AUTOIDLE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap2420_timer_hwmod_class = {
+ .name = "timer",
+ .sysc = &omap2420_timer_sysc,
+ .rev = OMAP_TIMER_IP_VERSION_1,
+};
+
+/* timer1 */
+static struct omap_hwmod omap2420_timer1_hwmod;
+static struct omap_hwmod_irq_info omap2420_timer1_mpu_irqs[] = {
+ { .irq = 37, },
+};
+
+static struct omap_hwmod_addr_space omap2420_timer1_addrs[] = {
+ {
+ .pa_start = 0x48028000,
+ .pa_end = 0x48028000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_wkup -> timer1 */
+static struct omap_hwmod_ocp_if omap2420_l4_wkup__timer1 = {
+ .master = &omap2420_l4_wkup_hwmod,
+ .slave = &omap2420_timer1_hwmod,
+ .clk = "gpt1_ick",
+ .addr = omap2420_timer1_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2420_timer1_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer1 slave port */
+static struct omap_hwmod_ocp_if *omap2420_timer1_slaves[] = {
+ &omap2420_l4_wkup__timer1,
+};
+
+/* timer1 hwmod */
+static struct omap_hwmod omap2420_timer1_hwmod = {
+ .name = "timer1",
+ .mpu_irqs = omap2420_timer1_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2420_timer1_mpu_irqs),
+ .main_clk = "gpt1_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_GPT1_SHIFT,
+ .module_offs = WKUP_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_GPT1_SHIFT,
+ },
+ },
+ .slaves = omap2420_timer1_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2420_timer1_slaves),
+ .class = &omap2420_timer_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
+};
+
+/* timer2 */
+static struct omap_hwmod omap2420_timer2_hwmod;
+static struct omap_hwmod_irq_info omap2420_timer2_mpu_irqs[] = {
+ { .irq = 38, },
+};
+
+static struct omap_hwmod_addr_space omap2420_timer2_addrs[] = {
+ {
+ .pa_start = 0x4802a000,
+ .pa_end = 0x4802a000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> timer2 */
+static struct omap_hwmod_ocp_if omap2420_l4_core__timer2 = {
+ .master = &omap2420_l4_core_hwmod,
+ .slave = &omap2420_timer2_hwmod,
+ .clk = "gpt2_ick",
+ .addr = omap2420_timer2_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2420_timer2_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer2 slave port */
+static struct omap_hwmod_ocp_if *omap2420_timer2_slaves[] = {
+ &omap2420_l4_core__timer2,
+};
+
+/* timer2 hwmod */
+static struct omap_hwmod omap2420_timer2_hwmod = {
+ .name = "timer2",
+ .mpu_irqs = omap2420_timer2_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2420_timer2_mpu_irqs),
+ .main_clk = "gpt2_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_GPT2_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_GPT2_SHIFT,
+ },
+ },
+ .slaves = omap2420_timer2_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2420_timer2_slaves),
+ .class = &omap2420_timer_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
+};
+
+/* timer3 */
+static struct omap_hwmod omap2420_timer3_hwmod;
+static struct omap_hwmod_irq_info omap2420_timer3_mpu_irqs[] = {
+ { .irq = 39, },
+};
+
+static struct omap_hwmod_addr_space omap2420_timer3_addrs[] = {
+ {
+ .pa_start = 0x48078000,
+ .pa_end = 0x48078000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> timer3 */
+static struct omap_hwmod_ocp_if omap2420_l4_core__timer3 = {
+ .master = &omap2420_l4_core_hwmod,
+ .slave = &omap2420_timer3_hwmod,
+ .clk = "gpt3_ick",
+ .addr = omap2420_timer3_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2420_timer3_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer3 slave port */
+static struct omap_hwmod_ocp_if *omap2420_timer3_slaves[] = {
+ &omap2420_l4_core__timer3,
+};
+
+/* timer3 hwmod */
+static struct omap_hwmod omap2420_timer3_hwmod = {
+ .name = "timer3",
+ .mpu_irqs = omap2420_timer3_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2420_timer3_mpu_irqs),
+ .main_clk = "gpt3_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_GPT3_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_GPT3_SHIFT,
+ },
+ },
+ .slaves = omap2420_timer3_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2420_timer3_slaves),
+ .class = &omap2420_timer_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
+};
+
+/* timer4 */
+static struct omap_hwmod omap2420_timer4_hwmod;
+static struct omap_hwmod_irq_info omap2420_timer4_mpu_irqs[] = {
+ { .irq = 40, },
+};
+
+static struct omap_hwmod_addr_space omap2420_timer4_addrs[] = {
+ {
+ .pa_start = 0x4807a000,
+ .pa_end = 0x4807a000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> timer4 */
+static struct omap_hwmod_ocp_if omap2420_l4_core__timer4 = {
+ .master = &omap2420_l4_core_hwmod,
+ .slave = &omap2420_timer4_hwmod,
+ .clk = "gpt4_ick",
+ .addr = omap2420_timer4_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2420_timer4_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer4 slave port */
+static struct omap_hwmod_ocp_if *omap2420_timer4_slaves[] = {
+ &omap2420_l4_core__timer4,
+};
+
+/* timer4 hwmod */
+static struct omap_hwmod omap2420_timer4_hwmod = {
+ .name = "timer4",
+ .mpu_irqs = omap2420_timer4_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2420_timer4_mpu_irqs),
+ .main_clk = "gpt4_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_GPT4_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_GPT4_SHIFT,
+ },
+ },
+ .slaves = omap2420_timer4_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2420_timer4_slaves),
+ .class = &omap2420_timer_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
+};
+
+/* timer5 */
+static struct omap_hwmod omap2420_timer5_hwmod;
+static struct omap_hwmod_irq_info omap2420_timer5_mpu_irqs[] = {
+ { .irq = 41, },
+};
+
+static struct omap_hwmod_addr_space omap2420_timer5_addrs[] = {
+ {
+ .pa_start = 0x4807c000,
+ .pa_end = 0x4807c000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> timer5 */
+static struct omap_hwmod_ocp_if omap2420_l4_core__timer5 = {
+ .master = &omap2420_l4_core_hwmod,
+ .slave = &omap2420_timer5_hwmod,
+ .clk = "gpt5_ick",
+ .addr = omap2420_timer5_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2420_timer5_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer5 slave port */
+static struct omap_hwmod_ocp_if *omap2420_timer5_slaves[] = {
+ &omap2420_l4_core__timer5,
+};
+
+/* timer5 hwmod */
+static struct omap_hwmod omap2420_timer5_hwmod = {
+ .name = "timer5",
+ .mpu_irqs = omap2420_timer5_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2420_timer5_mpu_irqs),
+ .main_clk = "gpt5_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_GPT5_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_GPT5_SHIFT,
+ },
+ },
+ .slaves = omap2420_timer5_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2420_timer5_slaves),
+ .class = &omap2420_timer_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
+};
+
+
+/* timer6 */
+static struct omap_hwmod omap2420_timer6_hwmod;
+static struct omap_hwmod_irq_info omap2420_timer6_mpu_irqs[] = {
+ { .irq = 42, },
+};
+
+static struct omap_hwmod_addr_space omap2420_timer6_addrs[] = {
+ {
+ .pa_start = 0x4807e000,
+ .pa_end = 0x4807e000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> timer6 */
+static struct omap_hwmod_ocp_if omap2420_l4_core__timer6 = {
+ .master = &omap2420_l4_core_hwmod,
+ .slave = &omap2420_timer6_hwmod,
+ .clk = "gpt6_ick",
+ .addr = omap2420_timer6_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2420_timer6_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer6 slave port */
+static struct omap_hwmod_ocp_if *omap2420_timer6_slaves[] = {
+ &omap2420_l4_core__timer6,
+};
+
+/* timer6 hwmod */
+static struct omap_hwmod omap2420_timer6_hwmod = {
+ .name = "timer6",
+ .mpu_irqs = omap2420_timer6_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2420_timer6_mpu_irqs),
+ .main_clk = "gpt6_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_GPT6_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_GPT6_SHIFT,
+ },
+ },
+ .slaves = omap2420_timer6_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2420_timer6_slaves),
+ .class = &omap2420_timer_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
+};
+
+/* timer7 */
+static struct omap_hwmod omap2420_timer7_hwmod;
+static struct omap_hwmod_irq_info omap2420_timer7_mpu_irqs[] = {
+ { .irq = 43, },
+};
+
+static struct omap_hwmod_addr_space omap2420_timer7_addrs[] = {
+ {
+ .pa_start = 0x48080000,
+ .pa_end = 0x48080000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> timer7 */
+static struct omap_hwmod_ocp_if omap2420_l4_core__timer7 = {
+ .master = &omap2420_l4_core_hwmod,
+ .slave = &omap2420_timer7_hwmod,
+ .clk = "gpt7_ick",
+ .addr = omap2420_timer7_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2420_timer7_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer7 slave port */
+static struct omap_hwmod_ocp_if *omap2420_timer7_slaves[] = {
+ &omap2420_l4_core__timer7,
+};
+
+/* timer7 hwmod */
+static struct omap_hwmod omap2420_timer7_hwmod = {
+ .name = "timer7",
+ .mpu_irqs = omap2420_timer7_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2420_timer7_mpu_irqs),
+ .main_clk = "gpt7_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_GPT7_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_GPT7_SHIFT,
+ },
+ },
+ .slaves = omap2420_timer7_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2420_timer7_slaves),
+ .class = &omap2420_timer_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
+};
+
+/* timer8 */
+static struct omap_hwmod omap2420_timer8_hwmod;
+static struct omap_hwmod_irq_info omap2420_timer8_mpu_irqs[] = {
+ { .irq = 44, },
+};
+
+static struct omap_hwmod_addr_space omap2420_timer8_addrs[] = {
+ {
+ .pa_start = 0x48082000,
+ .pa_end = 0x48082000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> timer8 */
+static struct omap_hwmod_ocp_if omap2420_l4_core__timer8 = {
+ .master = &omap2420_l4_core_hwmod,
+ .slave = &omap2420_timer8_hwmod,
+ .clk = "gpt8_ick",
+ .addr = omap2420_timer8_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2420_timer8_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer8 slave port */
+static struct omap_hwmod_ocp_if *omap2420_timer8_slaves[] = {
+ &omap2420_l4_core__timer8,
+};
+
+/* timer8 hwmod */
+static struct omap_hwmod omap2420_timer8_hwmod = {
+ .name = "timer8",
+ .mpu_irqs = omap2420_timer8_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2420_timer8_mpu_irqs),
+ .main_clk = "gpt8_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_GPT8_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_GPT8_SHIFT,
+ },
+ },
+ .slaves = omap2420_timer8_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2420_timer8_slaves),
+ .class = &omap2420_timer_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
+};
+
+/* timer9 */
+static struct omap_hwmod omap2420_timer9_hwmod;
+static struct omap_hwmod_irq_info omap2420_timer9_mpu_irqs[] = {
+ { .irq = 45, },
+};
+
+static struct omap_hwmod_addr_space omap2420_timer9_addrs[] = {
+ {
+ .pa_start = 0x48084000,
+ .pa_end = 0x48084000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> timer9 */
+static struct omap_hwmod_ocp_if omap2420_l4_core__timer9 = {
+ .master = &omap2420_l4_core_hwmod,
+ .slave = &omap2420_timer9_hwmod,
+ .clk = "gpt9_ick",
+ .addr = omap2420_timer9_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2420_timer9_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer9 slave port */
+static struct omap_hwmod_ocp_if *omap2420_timer9_slaves[] = {
+ &omap2420_l4_core__timer9,
+};
+
+/* timer9 hwmod */
+static struct omap_hwmod omap2420_timer9_hwmod = {
+ .name = "timer9",
+ .mpu_irqs = omap2420_timer9_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2420_timer9_mpu_irqs),
+ .main_clk = "gpt9_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_GPT9_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_GPT9_SHIFT,
+ },
+ },
+ .slaves = omap2420_timer9_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2420_timer9_slaves),
+ .class = &omap2420_timer_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
+};
+
+/* timer10 */
+static struct omap_hwmod omap2420_timer10_hwmod;
+static struct omap_hwmod_irq_info omap2420_timer10_mpu_irqs[] = {
+ { .irq = 46, },
+};
+
+static struct omap_hwmod_addr_space omap2420_timer10_addrs[] = {
+ {
+ .pa_start = 0x48086000,
+ .pa_end = 0x48086000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> timer10 */
+static struct omap_hwmod_ocp_if omap2420_l4_core__timer10 = {
+ .master = &omap2420_l4_core_hwmod,
+ .slave = &omap2420_timer10_hwmod,
+ .clk = "gpt10_ick",
+ .addr = omap2420_timer10_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2420_timer10_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer10 slave port */
+static struct omap_hwmod_ocp_if *omap2420_timer10_slaves[] = {
+ &omap2420_l4_core__timer10,
+};
+
+/* timer10 hwmod */
+static struct omap_hwmod omap2420_timer10_hwmod = {
+ .name = "timer10",
+ .mpu_irqs = omap2420_timer10_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2420_timer10_mpu_irqs),
+ .main_clk = "gpt10_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_GPT10_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_GPT10_SHIFT,
+ },
+ },
+ .slaves = omap2420_timer10_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2420_timer10_slaves),
+ .class = &omap2420_timer_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
+};
+
+/* timer11 */
+static struct omap_hwmod omap2420_timer11_hwmod;
+static struct omap_hwmod_irq_info omap2420_timer11_mpu_irqs[] = {
+ { .irq = 47, },
+};
+
+static struct omap_hwmod_addr_space omap2420_timer11_addrs[] = {
+ {
+ .pa_start = 0x48088000,
+ .pa_end = 0x48088000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> timer11 */
+static struct omap_hwmod_ocp_if omap2420_l4_core__timer11 = {
+ .master = &omap2420_l4_core_hwmod,
+ .slave = &omap2420_timer11_hwmod,
+ .clk = "gpt11_ick",
+ .addr = omap2420_timer11_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2420_timer11_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer11 slave port */
+static struct omap_hwmod_ocp_if *omap2420_timer11_slaves[] = {
+ &omap2420_l4_core__timer11,
+};
+
+/* timer11 hwmod */
+static struct omap_hwmod omap2420_timer11_hwmod = {
+ .name = "timer11",
+ .mpu_irqs = omap2420_timer11_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2420_timer11_mpu_irqs),
+ .main_clk = "gpt11_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_GPT11_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_GPT11_SHIFT,
+ },
+ },
+ .slaves = omap2420_timer11_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2420_timer11_slaves),
+ .class = &omap2420_timer_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
+};
+
+/* timer12 */
+static struct omap_hwmod omap2420_timer12_hwmod;
+static struct omap_hwmod_irq_info omap2420_timer12_mpu_irqs[] = {
+ { .irq = 48, },
+};
+
+static struct omap_hwmod_addr_space omap2420_timer12_addrs[] = {
+ {
+ .pa_start = 0x4808a000,
+ .pa_end = 0x4808a000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> timer12 */
+static struct omap_hwmod_ocp_if omap2420_l4_core__timer12 = {
+ .master = &omap2420_l4_core_hwmod,
+ .slave = &omap2420_timer12_hwmod,
+ .clk = "gpt12_ick",
+ .addr = omap2420_timer12_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2420_timer12_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer12 slave port */
+static struct omap_hwmod_ocp_if *omap2420_timer12_slaves[] = {
+ &omap2420_l4_core__timer12,
+};
+
+/* timer12 hwmod */
+static struct omap_hwmod omap2420_timer12_hwmod = {
+ .name = "timer12",
+ .mpu_irqs = omap2420_timer12_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2420_timer12_mpu_irqs),
+ .main_clk = "gpt12_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_GPT12_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_GPT12_SHIFT,
+ },
+ },
+ .slaves = omap2420_timer12_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2420_timer12_slaves),
+ .class = &omap2420_timer_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
+};
+
/* l4_wkup -> wd_timer2 */
static struct omap_hwmod_addr_space omap2420_wd_timer2_addrs[] = {
{
@@ -470,6 +1150,290 @@ static struct omap_hwmod omap2420_uart3_hwmod = {
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
};
+/*
+ * 'dss' class
+ * display sub-system
+ */
+
+static struct omap_hwmod_class_sysconfig omap2420_dss_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap2420_dss_hwmod_class = {
+ .name = "dss",
+ .sysc = &omap2420_dss_sysc,
+};
+
+static struct omap_hwmod_dma_info omap2420_dss_sdma_chs[] = {
+ { .name = "dispc", .dma_req = 5 },
+};
+
+/* dss */
+/* dss master ports */
+static struct omap_hwmod_ocp_if *omap2420_dss_masters[] = {
+ &omap2420_dss__l3,
+};
+
+static struct omap_hwmod_addr_space omap2420_dss_addrs[] = {
+ {
+ .pa_start = 0x48050000,
+ .pa_end = 0x480503FF,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> dss */
+static struct omap_hwmod_ocp_if omap2420_l4_core__dss = {
+ .master = &omap2420_l4_core_hwmod,
+ .slave = &omap2420_dss_core_hwmod,
+ .clk = "dss_ick",
+ .addr = omap2420_dss_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2420_dss_addrs),
+ .fw = {
+ .omap2 = {
+ .l4_fw_region = OMAP2420_L4_CORE_FW_DSS_CORE_REGION,
+ .flags = OMAP_FIREWALL_L4,
+ }
+ },
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* dss slave ports */
+static struct omap_hwmod_ocp_if *omap2420_dss_slaves[] = {
+ &omap2420_l4_core__dss,
+};
+
+static struct omap_hwmod_opt_clk dss_opt_clks[] = {
+ { .role = "tv_clk", .clk = "dss_54m_fck" },
+ { .role = "sys_clk", .clk = "dss2_fck" },
+};
+
+static struct omap_hwmod omap2420_dss_core_hwmod = {
+ .name = "dss_core",
+ .class = &omap2420_dss_hwmod_class,
+ .main_clk = "dss1_fck", /* instead of dss_fck */
+ .sdma_reqs = omap2420_dss_sdma_chs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap2420_dss_sdma_chs),
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_DSS1_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_stdby_bit = OMAP24XX_ST_DSS_SHIFT,
+ },
+ },
+ .opt_clks = dss_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(dss_opt_clks),
+ .slaves = omap2420_dss_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2420_dss_slaves),
+ .masters = omap2420_dss_masters,
+ .masters_cnt = ARRAY_SIZE(omap2420_dss_masters),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
+ .flags = HWMOD_NO_IDLEST,
+};
+
+/*
+ * 'dispc' class
+ * display controller
+ */
+
+static struct omap_hwmod_class_sysconfig omap2420_dispc_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
+ SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap2420_dispc_hwmod_class = {
+ .name = "dispc",
+ .sysc = &omap2420_dispc_sysc,
+};
+
+static struct omap_hwmod_irq_info omap2420_dispc_irqs[] = {
+ { .irq = 25 },
+};
+
+static struct omap_hwmod_addr_space omap2420_dss_dispc_addrs[] = {
+ {
+ .pa_start = 0x48050400,
+ .pa_end = 0x480507FF,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> dss_dispc */
+static struct omap_hwmod_ocp_if omap2420_l4_core__dss_dispc = {
+ .master = &omap2420_l4_core_hwmod,
+ .slave = &omap2420_dss_dispc_hwmod,
+ .clk = "dss_ick",
+ .addr = omap2420_dss_dispc_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2420_dss_dispc_addrs),
+ .fw = {
+ .omap2 = {
+ .l4_fw_region = OMAP2420_L4_CORE_FW_DSS_DISPC_REGION,
+ .flags = OMAP_FIREWALL_L4,
+ }
+ },
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* dss_dispc slave ports */
+static struct omap_hwmod_ocp_if *omap2420_dss_dispc_slaves[] = {
+ &omap2420_l4_core__dss_dispc,
+};
+
+static struct omap_hwmod omap2420_dss_dispc_hwmod = {
+ .name = "dss_dispc",
+ .class = &omap2420_dispc_hwmod_class,
+ .mpu_irqs = omap2420_dispc_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2420_dispc_irqs),
+ .main_clk = "dss1_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_DSS1_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_stdby_bit = OMAP24XX_ST_DSS_SHIFT,
+ },
+ },
+ .slaves = omap2420_dss_dispc_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2420_dss_dispc_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
+ .flags = HWMOD_NO_IDLEST,
+};
+
+/*
+ * 'rfbi' class
+ * remote frame buffer interface
+ */
+
+static struct omap_hwmod_class_sysconfig omap2420_rfbi_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_AUTOIDLE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap2420_rfbi_hwmod_class = {
+ .name = "rfbi",
+ .sysc = &omap2420_rfbi_sysc,
+};
+
+static struct omap_hwmod_addr_space omap2420_dss_rfbi_addrs[] = {
+ {
+ .pa_start = 0x48050800,
+ .pa_end = 0x48050BFF,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> dss_rfbi */
+static struct omap_hwmod_ocp_if omap2420_l4_core__dss_rfbi = {
+ .master = &omap2420_l4_core_hwmod,
+ .slave = &omap2420_dss_rfbi_hwmod,
+ .clk = "dss_ick",
+ .addr = omap2420_dss_rfbi_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2420_dss_rfbi_addrs),
+ .fw = {
+ .omap2 = {
+ .l4_fw_region = OMAP2420_L4_CORE_FW_DSS_CORE_REGION,
+ .flags = OMAP_FIREWALL_L4,
+ }
+ },
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* dss_rfbi slave ports */
+static struct omap_hwmod_ocp_if *omap2420_dss_rfbi_slaves[] = {
+ &omap2420_l4_core__dss_rfbi,
+};
+
+static struct omap_hwmod omap2420_dss_rfbi_hwmod = {
+ .name = "dss_rfbi",
+ .class = &omap2420_rfbi_hwmod_class,
+ .main_clk = "dss1_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_DSS1_SHIFT,
+ .module_offs = CORE_MOD,
+ },
+ },
+ .slaves = omap2420_dss_rfbi_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2420_dss_rfbi_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
+ .flags = HWMOD_NO_IDLEST,
+};
+
+/*
+ * 'venc' class
+ * video encoder
+ */
+
+static struct omap_hwmod_class omap2420_venc_hwmod_class = {
+ .name = "venc",
+};
+
+/* dss_venc */
+static struct omap_hwmod_addr_space omap2420_dss_venc_addrs[] = {
+ {
+ .pa_start = 0x48050C00,
+ .pa_end = 0x48050FFF,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> dss_venc */
+static struct omap_hwmod_ocp_if omap2420_l4_core__dss_venc = {
+ .master = &omap2420_l4_core_hwmod,
+ .slave = &omap2420_dss_venc_hwmod,
+ .clk = "dss_54m_fck",
+ .addr = omap2420_dss_venc_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2420_dss_venc_addrs),
+ .fw = {
+ .omap2 = {
+ .l4_fw_region = OMAP2420_L4_CORE_FW_DSS_VENC_REGION,
+ .flags = OMAP_FIREWALL_L4,
+ }
+ },
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* dss_venc slave ports */
+static struct omap_hwmod_ocp_if *omap2420_dss_venc_slaves[] = {
+ &omap2420_l4_core__dss_venc,
+};
+
+static struct omap_hwmod omap2420_dss_venc_hwmod = {
+ .name = "dss_venc",
+ .class = &omap2420_venc_hwmod_class,
+ .main_clk = "dss1_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_DSS1_SHIFT,
+ .module_offs = CORE_MOD,
+ },
+ },
+ .slaves = omap2420_dss_venc_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2420_dss_venc_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
+ .flags = HWMOD_NO_IDLEST,
+};
+
/* I2C common */
static struct omap_hwmod_class_sysconfig i2c_sysc = {
.rev_offs = 0x00,
@@ -864,16 +1828,342 @@ static struct omap_hwmod omap2420_dma_system_hwmod = {
.flags = HWMOD_NO_IDLEST,
};
+/*
+ * 'mailbox' class
+ * mailbox module allowing communication between the on-chip processors
+ * using a queued mailbox-interrupt mechanism.
+ */
+
+static struct omap_hwmod_class_sysconfig omap2420_mailbox_sysc = {
+ .rev_offs = 0x000,
+ .sysc_offs = 0x010,
+ .syss_offs = 0x014,
+ .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap2420_mailbox_hwmod_class = {
+ .name = "mailbox",
+ .sysc = &omap2420_mailbox_sysc,
+};
+
+/* mailbox */
+static struct omap_hwmod omap2420_mailbox_hwmod;
+static struct omap_hwmod_irq_info omap2420_mailbox_irqs[] = {
+ { .name = "dsp", .irq = 26 },
+ { .name = "iva", .irq = 34 },
+};
+
+static struct omap_hwmod_addr_space omap2420_mailbox_addrs[] = {
+ {
+ .pa_start = 0x48094000,
+ .pa_end = 0x480941ff,
+ .flags = ADDR_TYPE_RT,
+ },
+};
+
+/* l4_core -> mailbox */
+static struct omap_hwmod_ocp_if omap2420_l4_core__mailbox = {
+ .master = &omap2420_l4_core_hwmod,
+ .slave = &omap2420_mailbox_hwmod,
+ .addr = omap2420_mailbox_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2420_mailbox_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* mailbox slave ports */
+static struct omap_hwmod_ocp_if *omap2420_mailbox_slaves[] = {
+ &omap2420_l4_core__mailbox,
+};
+
+static struct omap_hwmod omap2420_mailbox_hwmod = {
+ .name = "mailbox",
+ .class = &omap2420_mailbox_hwmod_class,
+ .mpu_irqs = omap2420_mailbox_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2420_mailbox_irqs),
+ .main_clk = "mailboxes_ick",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_MAILBOXES_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_MAILBOXES_SHIFT,
+ },
+ },
+ .slaves = omap2420_mailbox_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2420_mailbox_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
+};
+
+/*
+ * 'mcspi' class
+ * multichannel serial port interface (mcspi) / master/slave synchronous serial
+ * bus
+ */
+
+static struct omap_hwmod_class_sysconfig omap2420_mcspi_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap2420_mcspi_class = {
+ .name = "mcspi",
+ .sysc = &omap2420_mcspi_sysc,
+ .rev = OMAP2_MCSPI_REV,
+};
+
+/* mcspi1 */
+static struct omap_hwmod_irq_info omap2420_mcspi1_mpu_irqs[] = {
+ { .irq = 65 },
+};
+
+static struct omap_hwmod_dma_info omap2420_mcspi1_sdma_reqs[] = {
+ { .name = "tx0", .dma_req = 35 }, /* DMA_SPI1_TX0 */
+ { .name = "rx0", .dma_req = 36 }, /* DMA_SPI1_RX0 */
+ { .name = "tx1", .dma_req = 37 }, /* DMA_SPI1_TX1 */
+ { .name = "rx1", .dma_req = 38 }, /* DMA_SPI1_RX1 */
+ { .name = "tx2", .dma_req = 39 }, /* DMA_SPI1_TX2 */
+ { .name = "rx2", .dma_req = 40 }, /* DMA_SPI1_RX2 */
+ { .name = "tx3", .dma_req = 41 }, /* DMA_SPI1_TX3 */
+ { .name = "rx3", .dma_req = 42 }, /* DMA_SPI1_RX3 */
+};
+
+static struct omap_hwmod_ocp_if *omap2420_mcspi1_slaves[] = {
+ &omap2420_l4_core__mcspi1,
+};
+
+static struct omap2_mcspi_dev_attr omap_mcspi1_dev_attr = {
+ .num_chipselect = 4,
+};
+
+static struct omap_hwmod omap2420_mcspi1_hwmod = {
+ .name = "mcspi1_hwmod",
+ .mpu_irqs = omap2420_mcspi1_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2420_mcspi1_mpu_irqs),
+ .sdma_reqs = omap2420_mcspi1_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap2420_mcspi1_sdma_reqs),
+ .main_clk = "mcspi1_fck",
+ .prcm = {
+ .omap2 = {
+ .module_offs = CORE_MOD,
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_MCSPI1_SHIFT,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_MCSPI1_SHIFT,
+ },
+ },
+ .slaves = omap2420_mcspi1_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2420_mcspi1_slaves),
+ .class = &omap2420_mcspi_class,
+ .dev_attr = &omap_mcspi1_dev_attr,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
+};
+
+/* mcspi2 */
+static struct omap_hwmod_irq_info omap2420_mcspi2_mpu_irqs[] = {
+ { .irq = 66 },
+};
+
+static struct omap_hwmod_dma_info omap2420_mcspi2_sdma_reqs[] = {
+ { .name = "tx0", .dma_req = 43 }, /* DMA_SPI2_TX0 */
+ { .name = "rx0", .dma_req = 44 }, /* DMA_SPI2_RX0 */
+ { .name = "tx1", .dma_req = 45 }, /* DMA_SPI2_TX1 */
+ { .name = "rx1", .dma_req = 46 }, /* DMA_SPI2_RX1 */
+};
+
+static struct omap_hwmod_ocp_if *omap2420_mcspi2_slaves[] = {
+ &omap2420_l4_core__mcspi2,
+};
+
+static struct omap2_mcspi_dev_attr omap_mcspi2_dev_attr = {
+ .num_chipselect = 2,
+};
+
+static struct omap_hwmod omap2420_mcspi2_hwmod = {
+ .name = "mcspi2_hwmod",
+ .mpu_irqs = omap2420_mcspi2_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2420_mcspi2_mpu_irqs),
+ .sdma_reqs = omap2420_mcspi2_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap2420_mcspi2_sdma_reqs),
+ .main_clk = "mcspi2_fck",
+ .prcm = {
+ .omap2 = {
+ .module_offs = CORE_MOD,
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_MCSPI2_SHIFT,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_MCSPI2_SHIFT,
+ },
+ },
+ .slaves = omap2420_mcspi2_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2420_mcspi2_slaves),
+ .class = &omap2420_mcspi_class,
+ .dev_attr = &omap_mcspi2_dev_attr,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
+};
+
+/*
+ * 'mcbsp' class
+ * multi channel buffered serial port controller
+ */
+
+static struct omap_hwmod_class omap2420_mcbsp_hwmod_class = {
+ .name = "mcbsp",
+};
+
+/* mcbsp1 */
+static struct omap_hwmod_irq_info omap2420_mcbsp1_irqs[] = {
+ { .name = "tx", .irq = 59 },
+ { .name = "rx", .irq = 60 },
+};
+
+static struct omap_hwmod_dma_info omap2420_mcbsp1_sdma_chs[] = {
+ { .name = "rx", .dma_req = 32 },
+ { .name = "tx", .dma_req = 31 },
+};
+
+static struct omap_hwmod_addr_space omap2420_mcbsp1_addrs[] = {
+ {
+ .name = "mpu",
+ .pa_start = 0x48074000,
+ .pa_end = 0x480740ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> mcbsp1 */
+static struct omap_hwmod_ocp_if omap2420_l4_core__mcbsp1 = {
+ .master = &omap2420_l4_core_hwmod,
+ .slave = &omap2420_mcbsp1_hwmod,
+ .clk = "mcbsp1_ick",
+ .addr = omap2420_mcbsp1_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2420_mcbsp1_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* mcbsp1 slave ports */
+static struct omap_hwmod_ocp_if *omap2420_mcbsp1_slaves[] = {
+ &omap2420_l4_core__mcbsp1,
+};
+
+static struct omap_hwmod omap2420_mcbsp1_hwmod = {
+ .name = "mcbsp1",
+ .class = &omap2420_mcbsp_hwmod_class,
+ .mpu_irqs = omap2420_mcbsp1_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2420_mcbsp1_irqs),
+ .sdma_reqs = omap2420_mcbsp1_sdma_chs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap2420_mcbsp1_sdma_chs),
+ .main_clk = "mcbsp1_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_MCBSP1_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_MCBSP1_SHIFT,
+ },
+ },
+ .slaves = omap2420_mcbsp1_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2420_mcbsp1_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
+};
+
+/* mcbsp2 */
+static struct omap_hwmod_irq_info omap2420_mcbsp2_irqs[] = {
+ { .name = "tx", .irq = 62 },
+ { .name = "rx", .irq = 63 },
+};
+
+static struct omap_hwmod_dma_info omap2420_mcbsp2_sdma_chs[] = {
+ { .name = "rx", .dma_req = 34 },
+ { .name = "tx", .dma_req = 33 },
+};
+
+static struct omap_hwmod_addr_space omap2420_mcbsp2_addrs[] = {
+ {
+ .name = "mpu",
+ .pa_start = 0x48076000,
+ .pa_end = 0x480760ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> mcbsp2 */
+static struct omap_hwmod_ocp_if omap2420_l4_core__mcbsp2 = {
+ .master = &omap2420_l4_core_hwmod,
+ .slave = &omap2420_mcbsp2_hwmod,
+ .clk = "mcbsp2_ick",
+ .addr = omap2420_mcbsp2_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2420_mcbsp2_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* mcbsp2 slave ports */
+static struct omap_hwmod_ocp_if *omap2420_mcbsp2_slaves[] = {
+ &omap2420_l4_core__mcbsp2,
+};
+
+static struct omap_hwmod omap2420_mcbsp2_hwmod = {
+ .name = "mcbsp2",
+ .class = &omap2420_mcbsp_hwmod_class,
+ .mpu_irqs = omap2420_mcbsp2_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2420_mcbsp2_irqs),
+ .sdma_reqs = omap2420_mcbsp2_sdma_chs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap2420_mcbsp2_sdma_chs),
+ .main_clk = "mcbsp2_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_MCBSP2_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_MCBSP2_SHIFT,
+ },
+ },
+ .slaves = omap2420_mcbsp2_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2420_mcbsp2_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
+};
+
static __initdata struct omap_hwmod *omap2420_hwmods[] = {
&omap2420_l3_main_hwmod,
&omap2420_l4_core_hwmod,
&omap2420_l4_wkup_hwmod,
&omap2420_mpu_hwmod,
&omap2420_iva_hwmod,
+
+ &omap2420_timer1_hwmod,
+ &omap2420_timer2_hwmod,
+ &omap2420_timer3_hwmod,
+ &omap2420_timer4_hwmod,
+ &omap2420_timer5_hwmod,
+ &omap2420_timer6_hwmod,
+ &omap2420_timer7_hwmod,
+ &omap2420_timer8_hwmod,
+ &omap2420_timer9_hwmod,
+ &omap2420_timer10_hwmod,
+ &omap2420_timer11_hwmod,
+ &omap2420_timer12_hwmod,
+
&omap2420_wd_timer2_hwmod,
&omap2420_uart1_hwmod,
&omap2420_uart2_hwmod,
&omap2420_uart3_hwmod,
+ /* dss class */
+ &omap2420_dss_core_hwmod,
+ &omap2420_dss_dispc_hwmod,
+ &omap2420_dss_rfbi_hwmod,
+ &omap2420_dss_venc_hwmod,
+ /* i2c class */
&omap2420_i2c1_hwmod,
&omap2420_i2c2_hwmod,
@@ -885,10 +2175,21 @@ static __initdata struct omap_hwmod *omap2420_hwmods[] = {
/* dma_system class*/
&omap2420_dma_system_hwmod,
+
+ /* mailbox class */
+ &omap2420_mailbox_hwmod,
+
+ /* mcbsp class */
+ &omap2420_mcbsp1_hwmod,
+ &omap2420_mcbsp2_hwmod,
+
+ /* mcspi class */
+ &omap2420_mcspi1_hwmod,
+ &omap2420_mcspi2_hwmod,
NULL,
};
int __init omap2420_hwmod_init(void)
{
- return omap_hwmod_init(omap2420_hwmods);
+ return omap_hwmod_register(omap2420_hwmods);
}
diff --git a/arch/arm/mach-omap2/omap_hwmod_2430_data.c b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
index 8ecfbcde13ba..1c8940486b86 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2430_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
@@ -18,6 +18,11 @@
#include <plat/serial.h>
#include <plat/i2c.h>
#include <plat/gpio.h>
+#include <plat/mcbsp.h>
+#include <plat/mcspi.h>
+#include <plat/dmtimer.h>
+#include <plat/mmc.h>
+#include <plat/l3_2xxx.h>
#include "omap_hwmod_common_data.h"
@@ -38,6 +43,10 @@ static struct omap_hwmod omap2430_mpu_hwmod;
static struct omap_hwmod omap2430_iva_hwmod;
static struct omap_hwmod omap2430_l3_main_hwmod;
static struct omap_hwmod omap2430_l4_core_hwmod;
+static struct omap_hwmod omap2430_dss_core_hwmod;
+static struct omap_hwmod omap2430_dss_dispc_hwmod;
+static struct omap_hwmod omap2430_dss_rfbi_hwmod;
+static struct omap_hwmod omap2430_dss_venc_hwmod;
static struct omap_hwmod omap2430_wd_timer2_hwmod;
static struct omap_hwmod omap2430_gpio1_hwmod;
static struct omap_hwmod omap2430_gpio2_hwmod;
@@ -45,6 +54,16 @@ static struct omap_hwmod omap2430_gpio3_hwmod;
static struct omap_hwmod omap2430_gpio4_hwmod;
static struct omap_hwmod omap2430_gpio5_hwmod;
static struct omap_hwmod omap2430_dma_system_hwmod;
+static struct omap_hwmod omap2430_mcbsp1_hwmod;
+static struct omap_hwmod omap2430_mcbsp2_hwmod;
+static struct omap_hwmod omap2430_mcbsp3_hwmod;
+static struct omap_hwmod omap2430_mcbsp4_hwmod;
+static struct omap_hwmod omap2430_mcbsp5_hwmod;
+static struct omap_hwmod omap2430_mcspi1_hwmod;
+static struct omap_hwmod omap2430_mcspi2_hwmod;
+static struct omap_hwmod omap2430_mcspi3_hwmod;
+static struct omap_hwmod omap2430_mmc1_hwmod;
+static struct omap_hwmod omap2430_mmc2_hwmod;
/* L3 -> L4_CORE interface */
static struct omap_hwmod_ocp_if omap2430_l3_main__l4_core = {
@@ -65,6 +84,19 @@ static struct omap_hwmod_ocp_if *omap2430_l3_main_slaves[] = {
&omap2430_mpu__l3_main,
};
+/* DSS -> l3 */
+static struct omap_hwmod_ocp_if omap2430_dss__l3 = {
+ .master = &omap2430_dss_core_hwmod,
+ .slave = &omap2430_l3_main_hwmod,
+ .fw = {
+ .omap2 = {
+ .l3_perm_bit = OMAP2_L3_CORE_FW_CONNID_DSS,
+ .flags = OMAP_FIREWALL_L3,
+ }
+ },
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
/* Master interfaces on the L3 interconnect */
static struct omap_hwmod_ocp_if *omap2430_l3_main_masters[] = {
&omap2430_l3_main__l4_core,
@@ -89,6 +121,16 @@ static struct omap_hwmod omap2430_uart3_hwmod;
static struct omap_hwmod omap2430_i2c1_hwmod;
static struct omap_hwmod omap2430_i2c2_hwmod;
+static struct omap_hwmod omap2430_usbhsotg_hwmod;
+
+/* l3_core -> usbhsotg interface */
+static struct omap_hwmod_ocp_if omap2430_usbhsotg__l3 = {
+ .master = &omap2430_usbhsotg_hwmod,
+ .slave = &omap2430_l3_main_hwmod,
+ .clk = "core_l3_ck",
+ .user = OCP_USER_MPU,
+};
+
/* I2C IP block address space length (in bytes) */
#define OMAP2_I2C_AS_LEN 128
@@ -189,6 +231,71 @@ static struct omap_hwmod_ocp_if omap2_l4_core__uart3 = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
+/*
+* usbhsotg interface data
+*/
+static struct omap_hwmod_addr_space omap2430_usbhsotg_addrs[] = {
+ {
+ .pa_start = OMAP243X_HS_BASE,
+ .pa_end = OMAP243X_HS_BASE + SZ_4K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core ->usbhsotg interface */
+static struct omap_hwmod_ocp_if omap2430_l4_core__usbhsotg = {
+ .master = &omap2430_l4_core_hwmod,
+ .slave = &omap2430_usbhsotg_hwmod,
+ .clk = "usb_l4_ick",
+ .addr = omap2430_usbhsotg_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2430_usbhsotg_addrs),
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_ocp_if *omap2430_usbhsotg_masters[] = {
+ &omap2430_usbhsotg__l3,
+};
+
+static struct omap_hwmod_ocp_if *omap2430_usbhsotg_slaves[] = {
+ &omap2430_l4_core__usbhsotg,
+};
+
+/* L4 CORE -> MMC1 interface */
+static struct omap_hwmod_addr_space omap2430_mmc1_addr_space[] = {
+ {
+ .pa_start = 0x4809c000,
+ .pa_end = 0x4809c1ff,
+ .flags = ADDR_TYPE_RT,
+ },
+};
+
+static struct omap_hwmod_ocp_if omap2430_l4_core__mmc1 = {
+ .master = &omap2430_l4_core_hwmod,
+ .slave = &omap2430_mmc1_hwmod,
+ .clk = "mmchs1_ick",
+ .addr = omap2430_mmc1_addr_space,
+ .addr_cnt = ARRAY_SIZE(omap2430_mmc1_addr_space),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* L4 CORE -> MMC2 interface */
+static struct omap_hwmod_addr_space omap2430_mmc2_addr_space[] = {
+ {
+ .pa_start = 0x480b4000,
+ .pa_end = 0x480b41ff,
+ .flags = ADDR_TYPE_RT,
+ },
+};
+
+static struct omap_hwmod_ocp_if omap2430_l4_core__mmc2 = {
+ .master = &omap2430_l4_core_hwmod,
+ .slave = &omap2430_mmc2_hwmod,
+ .addr = omap2430_mmc2_addr_space,
+ .clk = "mmchs2_ick",
+ .addr_cnt = ARRAY_SIZE(omap2430_mmc2_addr_space),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
/* Slave interfaces on the L4_CORE interconnect */
static struct omap_hwmod_ocp_if *omap2430_l4_core_slaves[] = {
&omap2430_l3_main__l4_core,
@@ -197,6 +304,8 @@ static struct omap_hwmod_ocp_if *omap2430_l4_core_slaves[] = {
/* Master interfaces on the L4_CORE interconnect */
static struct omap_hwmod_ocp_if *omap2430_l4_core_masters[] = {
&omap2430_l4_core__l4_wkup,
+ &omap2430_l4_core__mmc1,
+ &omap2430_l4_core__mmc2,
};
/* L4 CORE */
@@ -223,6 +332,60 @@ static struct omap_hwmod_ocp_if *omap2430_l4_wkup_slaves[] = {
static struct omap_hwmod_ocp_if *omap2430_l4_wkup_masters[] = {
};
+/* l4 core -> mcspi1 interface */
+static struct omap_hwmod_addr_space omap2430_mcspi1_addr_space[] = {
+ {
+ .pa_start = 0x48098000,
+ .pa_end = 0x480980ff,
+ .flags = ADDR_TYPE_RT,
+ },
+};
+
+static struct omap_hwmod_ocp_if omap2430_l4_core__mcspi1 = {
+ .master = &omap2430_l4_core_hwmod,
+ .slave = &omap2430_mcspi1_hwmod,
+ .clk = "mcspi1_ick",
+ .addr = omap2430_mcspi1_addr_space,
+ .addr_cnt = ARRAY_SIZE(omap2430_mcspi1_addr_space),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4 core -> mcspi2 interface */
+static struct omap_hwmod_addr_space omap2430_mcspi2_addr_space[] = {
+ {
+ .pa_start = 0x4809a000,
+ .pa_end = 0x4809a0ff,
+ .flags = ADDR_TYPE_RT,
+ },
+};
+
+static struct omap_hwmod_ocp_if omap2430_l4_core__mcspi2 = {
+ .master = &omap2430_l4_core_hwmod,
+ .slave = &omap2430_mcspi2_hwmod,
+ .clk = "mcspi2_ick",
+ .addr = omap2430_mcspi2_addr_space,
+ .addr_cnt = ARRAY_SIZE(omap2430_mcspi2_addr_space),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4 core -> mcspi3 interface */
+static struct omap_hwmod_addr_space omap2430_mcspi3_addr_space[] = {
+ {
+ .pa_start = 0x480b8000,
+ .pa_end = 0x480b80ff,
+ .flags = ADDR_TYPE_RT,
+ },
+};
+
+static struct omap_hwmod_ocp_if omap2430_l4_core__mcspi3 = {
+ .master = &omap2430_l4_core_hwmod,
+ .slave = &omap2430_mcspi3_hwmod,
+ .clk = "mcspi3_ick",
+ .addr = omap2430_mcspi3_addr_space,
+ .addr_cnt = ARRAY_SIZE(omap2430_mcspi3_addr_space),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
/* L4 WKUP */
static struct omap_hwmod omap2430_l4_wkup_hwmod = {
.name = "l4_wkup",
@@ -278,6 +441,624 @@ static struct omap_hwmod omap2430_iva_hwmod = {
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
};
+/* Timer Common */
+static struct omap_hwmod_class_sysconfig omap2430_timer_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_CLOCKACTIVITY |
+ SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_AUTOIDLE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap2430_timer_hwmod_class = {
+ .name = "timer",
+ .sysc = &omap2430_timer_sysc,
+ .rev = OMAP_TIMER_IP_VERSION_1,
+};
+
+/* timer1 */
+static struct omap_hwmod omap2430_timer1_hwmod;
+static struct omap_hwmod_irq_info omap2430_timer1_mpu_irqs[] = {
+ { .irq = 37, },
+};
+
+static struct omap_hwmod_addr_space omap2430_timer1_addrs[] = {
+ {
+ .pa_start = 0x49018000,
+ .pa_end = 0x49018000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_wkup -> timer1 */
+static struct omap_hwmod_ocp_if omap2430_l4_wkup__timer1 = {
+ .master = &omap2430_l4_wkup_hwmod,
+ .slave = &omap2430_timer1_hwmod,
+ .clk = "gpt1_ick",
+ .addr = omap2430_timer1_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2430_timer1_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer1 slave port */
+static struct omap_hwmod_ocp_if *omap2430_timer1_slaves[] = {
+ &omap2430_l4_wkup__timer1,
+};
+
+/* timer1 hwmod */
+static struct omap_hwmod omap2430_timer1_hwmod = {
+ .name = "timer1",
+ .mpu_irqs = omap2430_timer1_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2430_timer1_mpu_irqs),
+ .main_clk = "gpt1_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_GPT1_SHIFT,
+ .module_offs = WKUP_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_GPT1_SHIFT,
+ },
+ },
+ .slaves = omap2430_timer1_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2430_timer1_slaves),
+ .class = &omap2430_timer_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
+};
+
+/* timer2 */
+static struct omap_hwmod omap2430_timer2_hwmod;
+static struct omap_hwmod_irq_info omap2430_timer2_mpu_irqs[] = {
+ { .irq = 38, },
+};
+
+static struct omap_hwmod_addr_space omap2430_timer2_addrs[] = {
+ {
+ .pa_start = 0x4802a000,
+ .pa_end = 0x4802a000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> timer2 */
+static struct omap_hwmod_ocp_if omap2430_l4_core__timer2 = {
+ .master = &omap2430_l4_core_hwmod,
+ .slave = &omap2430_timer2_hwmod,
+ .clk = "gpt2_ick",
+ .addr = omap2430_timer2_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2430_timer2_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer2 slave port */
+static struct omap_hwmod_ocp_if *omap2430_timer2_slaves[] = {
+ &omap2430_l4_core__timer2,
+};
+
+/* timer2 hwmod */
+static struct omap_hwmod omap2430_timer2_hwmod = {
+ .name = "timer2",
+ .mpu_irqs = omap2430_timer2_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2430_timer2_mpu_irqs),
+ .main_clk = "gpt2_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_GPT2_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_GPT2_SHIFT,
+ },
+ },
+ .slaves = omap2430_timer2_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2430_timer2_slaves),
+ .class = &omap2430_timer_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
+};
+
+/* timer3 */
+static struct omap_hwmod omap2430_timer3_hwmod;
+static struct omap_hwmod_irq_info omap2430_timer3_mpu_irqs[] = {
+ { .irq = 39, },
+};
+
+static struct omap_hwmod_addr_space omap2430_timer3_addrs[] = {
+ {
+ .pa_start = 0x48078000,
+ .pa_end = 0x48078000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> timer3 */
+static struct omap_hwmod_ocp_if omap2430_l4_core__timer3 = {
+ .master = &omap2430_l4_core_hwmod,
+ .slave = &omap2430_timer3_hwmod,
+ .clk = "gpt3_ick",
+ .addr = omap2430_timer3_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2430_timer3_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer3 slave port */
+static struct omap_hwmod_ocp_if *omap2430_timer3_slaves[] = {
+ &omap2430_l4_core__timer3,
+};
+
+/* timer3 hwmod */
+static struct omap_hwmod omap2430_timer3_hwmod = {
+ .name = "timer3",
+ .mpu_irqs = omap2430_timer3_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2430_timer3_mpu_irqs),
+ .main_clk = "gpt3_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_GPT3_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_GPT3_SHIFT,
+ },
+ },
+ .slaves = omap2430_timer3_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2430_timer3_slaves),
+ .class = &omap2430_timer_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
+};
+
+/* timer4 */
+static struct omap_hwmod omap2430_timer4_hwmod;
+static struct omap_hwmod_irq_info omap2430_timer4_mpu_irqs[] = {
+ { .irq = 40, },
+};
+
+static struct omap_hwmod_addr_space omap2430_timer4_addrs[] = {
+ {
+ .pa_start = 0x4807a000,
+ .pa_end = 0x4807a000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> timer4 */
+static struct omap_hwmod_ocp_if omap2430_l4_core__timer4 = {
+ .master = &omap2430_l4_core_hwmod,
+ .slave = &omap2430_timer4_hwmod,
+ .clk = "gpt4_ick",
+ .addr = omap2430_timer4_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2430_timer4_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer4 slave port */
+static struct omap_hwmod_ocp_if *omap2430_timer4_slaves[] = {
+ &omap2430_l4_core__timer4,
+};
+
+/* timer4 hwmod */
+static struct omap_hwmod omap2430_timer4_hwmod = {
+ .name = "timer4",
+ .mpu_irqs = omap2430_timer4_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2430_timer4_mpu_irqs),
+ .main_clk = "gpt4_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_GPT4_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_GPT4_SHIFT,
+ },
+ },
+ .slaves = omap2430_timer4_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2430_timer4_slaves),
+ .class = &omap2430_timer_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
+};
+
+/* timer5 */
+static struct omap_hwmod omap2430_timer5_hwmod;
+static struct omap_hwmod_irq_info omap2430_timer5_mpu_irqs[] = {
+ { .irq = 41, },
+};
+
+static struct omap_hwmod_addr_space omap2430_timer5_addrs[] = {
+ {
+ .pa_start = 0x4807c000,
+ .pa_end = 0x4807c000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> timer5 */
+static struct omap_hwmod_ocp_if omap2430_l4_core__timer5 = {
+ .master = &omap2430_l4_core_hwmod,
+ .slave = &omap2430_timer5_hwmod,
+ .clk = "gpt5_ick",
+ .addr = omap2430_timer5_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2430_timer5_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer5 slave port */
+static struct omap_hwmod_ocp_if *omap2430_timer5_slaves[] = {
+ &omap2430_l4_core__timer5,
+};
+
+/* timer5 hwmod */
+static struct omap_hwmod omap2430_timer5_hwmod = {
+ .name = "timer5",
+ .mpu_irqs = omap2430_timer5_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2430_timer5_mpu_irqs),
+ .main_clk = "gpt5_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_GPT5_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_GPT5_SHIFT,
+ },
+ },
+ .slaves = omap2430_timer5_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2430_timer5_slaves),
+ .class = &omap2430_timer_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
+};
+
+/* timer6 */
+static struct omap_hwmod omap2430_timer6_hwmod;
+static struct omap_hwmod_irq_info omap2430_timer6_mpu_irqs[] = {
+ { .irq = 42, },
+};
+
+static struct omap_hwmod_addr_space omap2430_timer6_addrs[] = {
+ {
+ .pa_start = 0x4807e000,
+ .pa_end = 0x4807e000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> timer6 */
+static struct omap_hwmod_ocp_if omap2430_l4_core__timer6 = {
+ .master = &omap2430_l4_core_hwmod,
+ .slave = &omap2430_timer6_hwmod,
+ .clk = "gpt6_ick",
+ .addr = omap2430_timer6_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2430_timer6_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer6 slave port */
+static struct omap_hwmod_ocp_if *omap2430_timer6_slaves[] = {
+ &omap2430_l4_core__timer6,
+};
+
+/* timer6 hwmod */
+static struct omap_hwmod omap2430_timer6_hwmod = {
+ .name = "timer6",
+ .mpu_irqs = omap2430_timer6_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2430_timer6_mpu_irqs),
+ .main_clk = "gpt6_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_GPT6_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_GPT6_SHIFT,
+ },
+ },
+ .slaves = omap2430_timer6_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2430_timer6_slaves),
+ .class = &omap2430_timer_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
+};
+
+/* timer7 */
+static struct omap_hwmod omap2430_timer7_hwmod;
+static struct omap_hwmod_irq_info omap2430_timer7_mpu_irqs[] = {
+ { .irq = 43, },
+};
+
+static struct omap_hwmod_addr_space omap2430_timer7_addrs[] = {
+ {
+ .pa_start = 0x48080000,
+ .pa_end = 0x48080000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> timer7 */
+static struct omap_hwmod_ocp_if omap2430_l4_core__timer7 = {
+ .master = &omap2430_l4_core_hwmod,
+ .slave = &omap2430_timer7_hwmod,
+ .clk = "gpt7_ick",
+ .addr = omap2430_timer7_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2430_timer7_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer7 slave port */
+static struct omap_hwmod_ocp_if *omap2430_timer7_slaves[] = {
+ &omap2430_l4_core__timer7,
+};
+
+/* timer7 hwmod */
+static struct omap_hwmod omap2430_timer7_hwmod = {
+ .name = "timer7",
+ .mpu_irqs = omap2430_timer7_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2430_timer7_mpu_irqs),
+ .main_clk = "gpt7_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_GPT7_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_GPT7_SHIFT,
+ },
+ },
+ .slaves = omap2430_timer7_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2430_timer7_slaves),
+ .class = &omap2430_timer_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
+};
+
+/* timer8 */
+static struct omap_hwmod omap2430_timer8_hwmod;
+static struct omap_hwmod_irq_info omap2430_timer8_mpu_irqs[] = {
+ { .irq = 44, },
+};
+
+static struct omap_hwmod_addr_space omap2430_timer8_addrs[] = {
+ {
+ .pa_start = 0x48082000,
+ .pa_end = 0x48082000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> timer8 */
+static struct omap_hwmod_ocp_if omap2430_l4_core__timer8 = {
+ .master = &omap2430_l4_core_hwmod,
+ .slave = &omap2430_timer8_hwmod,
+ .clk = "gpt8_ick",
+ .addr = omap2430_timer8_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2430_timer8_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer8 slave port */
+static struct omap_hwmod_ocp_if *omap2430_timer8_slaves[] = {
+ &omap2430_l4_core__timer8,
+};
+
+/* timer8 hwmod */
+static struct omap_hwmod omap2430_timer8_hwmod = {
+ .name = "timer8",
+ .mpu_irqs = omap2430_timer8_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2430_timer8_mpu_irqs),
+ .main_clk = "gpt8_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_GPT8_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_GPT8_SHIFT,
+ },
+ },
+ .slaves = omap2430_timer8_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2430_timer8_slaves),
+ .class = &omap2430_timer_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
+};
+
+/* timer9 */
+static struct omap_hwmod omap2430_timer9_hwmod;
+static struct omap_hwmod_irq_info omap2430_timer9_mpu_irqs[] = {
+ { .irq = 45, },
+};
+
+static struct omap_hwmod_addr_space omap2430_timer9_addrs[] = {
+ {
+ .pa_start = 0x48084000,
+ .pa_end = 0x48084000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> timer9 */
+static struct omap_hwmod_ocp_if omap2430_l4_core__timer9 = {
+ .master = &omap2430_l4_core_hwmod,
+ .slave = &omap2430_timer9_hwmod,
+ .clk = "gpt9_ick",
+ .addr = omap2430_timer9_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2430_timer9_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer9 slave port */
+static struct omap_hwmod_ocp_if *omap2430_timer9_slaves[] = {
+ &omap2430_l4_core__timer9,
+};
+
+/* timer9 hwmod */
+static struct omap_hwmod omap2430_timer9_hwmod = {
+ .name = "timer9",
+ .mpu_irqs = omap2430_timer9_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2430_timer9_mpu_irqs),
+ .main_clk = "gpt9_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_GPT9_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_GPT9_SHIFT,
+ },
+ },
+ .slaves = omap2430_timer9_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2430_timer9_slaves),
+ .class = &omap2430_timer_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
+};
+
+/* timer10 */
+static struct omap_hwmod omap2430_timer10_hwmod;
+static struct omap_hwmod_irq_info omap2430_timer10_mpu_irqs[] = {
+ { .irq = 46, },
+};
+
+static struct omap_hwmod_addr_space omap2430_timer10_addrs[] = {
+ {
+ .pa_start = 0x48086000,
+ .pa_end = 0x48086000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> timer10 */
+static struct omap_hwmod_ocp_if omap2430_l4_core__timer10 = {
+ .master = &omap2430_l4_core_hwmod,
+ .slave = &omap2430_timer10_hwmod,
+ .clk = "gpt10_ick",
+ .addr = omap2430_timer10_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2430_timer10_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer10 slave port */
+static struct omap_hwmod_ocp_if *omap2430_timer10_slaves[] = {
+ &omap2430_l4_core__timer10,
+};
+
+/* timer10 hwmod */
+static struct omap_hwmod omap2430_timer10_hwmod = {
+ .name = "timer10",
+ .mpu_irqs = omap2430_timer10_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2430_timer10_mpu_irqs),
+ .main_clk = "gpt10_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_GPT10_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_GPT10_SHIFT,
+ },
+ },
+ .slaves = omap2430_timer10_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2430_timer10_slaves),
+ .class = &omap2430_timer_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
+};
+
+/* timer11 */
+static struct omap_hwmod omap2430_timer11_hwmod;
+static struct omap_hwmod_irq_info omap2430_timer11_mpu_irqs[] = {
+ { .irq = 47, },
+};
+
+static struct omap_hwmod_addr_space omap2430_timer11_addrs[] = {
+ {
+ .pa_start = 0x48088000,
+ .pa_end = 0x48088000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> timer11 */
+static struct omap_hwmod_ocp_if omap2430_l4_core__timer11 = {
+ .master = &omap2430_l4_core_hwmod,
+ .slave = &omap2430_timer11_hwmod,
+ .clk = "gpt11_ick",
+ .addr = omap2430_timer11_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2430_timer11_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer11 slave port */
+static struct omap_hwmod_ocp_if *omap2430_timer11_slaves[] = {
+ &omap2430_l4_core__timer11,
+};
+
+/* timer11 hwmod */
+static struct omap_hwmod omap2430_timer11_hwmod = {
+ .name = "timer11",
+ .mpu_irqs = omap2430_timer11_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2430_timer11_mpu_irqs),
+ .main_clk = "gpt11_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_GPT11_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_GPT11_SHIFT,
+ },
+ },
+ .slaves = omap2430_timer11_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2430_timer11_slaves),
+ .class = &omap2430_timer_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
+};
+
+/* timer12 */
+static struct omap_hwmod omap2430_timer12_hwmod;
+static struct omap_hwmod_irq_info omap2430_timer12_mpu_irqs[] = {
+ { .irq = 48, },
+};
+
+static struct omap_hwmod_addr_space omap2430_timer12_addrs[] = {
+ {
+ .pa_start = 0x4808a000,
+ .pa_end = 0x4808a000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> timer12 */
+static struct omap_hwmod_ocp_if omap2430_l4_core__timer12 = {
+ .master = &omap2430_l4_core_hwmod,
+ .slave = &omap2430_timer12_hwmod,
+ .clk = "gpt12_ick",
+ .addr = omap2430_timer12_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2430_timer12_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer12 slave port */
+static struct omap_hwmod_ocp_if *omap2430_timer12_slaves[] = {
+ &omap2430_l4_core__timer12,
+};
+
+/* timer12 hwmod */
+static struct omap_hwmod omap2430_timer12_hwmod = {
+ .name = "timer12",
+ .mpu_irqs = omap2430_timer12_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2430_timer12_mpu_irqs),
+ .main_clk = "gpt12_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_GPT12_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_GPT12_SHIFT,
+ },
+ },
+ .slaves = omap2430_timer12_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2430_timer12_slaves),
+ .class = &omap2430_timer_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
+};
+
/* l4_wkup -> wd_timer2 */
static struct omap_hwmod_addr_space omap2430_wd_timer2_addrs[] = {
{
@@ -469,6 +1250,266 @@ static struct omap_hwmod omap2430_uart3_hwmod = {
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
};
+/*
+ * 'dss' class
+ * display sub-system
+ */
+
+static struct omap_hwmod_class_sysconfig omap2430_dss_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap2430_dss_hwmod_class = {
+ .name = "dss",
+ .sysc = &omap2430_dss_sysc,
+};
+
+static struct omap_hwmod_dma_info omap2430_dss_sdma_chs[] = {
+ { .name = "dispc", .dma_req = 5 },
+};
+
+/* dss */
+/* dss master ports */
+static struct omap_hwmod_ocp_if *omap2430_dss_masters[] = {
+ &omap2430_dss__l3,
+};
+
+static struct omap_hwmod_addr_space omap2430_dss_addrs[] = {
+ {
+ .pa_start = 0x48050000,
+ .pa_end = 0x480503FF,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> dss */
+static struct omap_hwmod_ocp_if omap2430_l4_core__dss = {
+ .master = &omap2430_l4_core_hwmod,
+ .slave = &omap2430_dss_core_hwmod,
+ .clk = "dss_ick",
+ .addr = omap2430_dss_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2430_dss_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* dss slave ports */
+static struct omap_hwmod_ocp_if *omap2430_dss_slaves[] = {
+ &omap2430_l4_core__dss,
+};
+
+static struct omap_hwmod_opt_clk dss_opt_clks[] = {
+ { .role = "tv_clk", .clk = "dss_54m_fck" },
+ { .role = "sys_clk", .clk = "dss2_fck" },
+};
+
+static struct omap_hwmod omap2430_dss_core_hwmod = {
+ .name = "dss_core",
+ .class = &omap2430_dss_hwmod_class,
+ .main_clk = "dss1_fck", /* instead of dss_fck */
+ .sdma_reqs = omap2430_dss_sdma_chs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap2430_dss_sdma_chs),
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_DSS1_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_stdby_bit = OMAP24XX_ST_DSS_SHIFT,
+ },
+ },
+ .opt_clks = dss_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(dss_opt_clks),
+ .slaves = omap2430_dss_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2430_dss_slaves),
+ .masters = omap2430_dss_masters,
+ .masters_cnt = ARRAY_SIZE(omap2430_dss_masters),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+ .flags = HWMOD_NO_IDLEST,
+};
+
+/*
+ * 'dispc' class
+ * display controller
+ */
+
+static struct omap_hwmod_class_sysconfig omap2430_dispc_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
+ SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap2430_dispc_hwmod_class = {
+ .name = "dispc",
+ .sysc = &omap2430_dispc_sysc,
+};
+
+static struct omap_hwmod_irq_info omap2430_dispc_irqs[] = {
+ { .irq = 25 },
+};
+
+static struct omap_hwmod_addr_space omap2430_dss_dispc_addrs[] = {
+ {
+ .pa_start = 0x48050400,
+ .pa_end = 0x480507FF,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> dss_dispc */
+static struct omap_hwmod_ocp_if omap2430_l4_core__dss_dispc = {
+ .master = &omap2430_l4_core_hwmod,
+ .slave = &omap2430_dss_dispc_hwmod,
+ .clk = "dss_ick",
+ .addr = omap2430_dss_dispc_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2430_dss_dispc_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* dss_dispc slave ports */
+static struct omap_hwmod_ocp_if *omap2430_dss_dispc_slaves[] = {
+ &omap2430_l4_core__dss_dispc,
+};
+
+static struct omap_hwmod omap2430_dss_dispc_hwmod = {
+ .name = "dss_dispc",
+ .class = &omap2430_dispc_hwmod_class,
+ .mpu_irqs = omap2430_dispc_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2430_dispc_irqs),
+ .main_clk = "dss1_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_DSS1_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_stdby_bit = OMAP24XX_ST_DSS_SHIFT,
+ },
+ },
+ .slaves = omap2430_dss_dispc_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2430_dss_dispc_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+ .flags = HWMOD_NO_IDLEST,
+};
+
+/*
+ * 'rfbi' class
+ * remote frame buffer interface
+ */
+
+static struct omap_hwmod_class_sysconfig omap2430_rfbi_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_AUTOIDLE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap2430_rfbi_hwmod_class = {
+ .name = "rfbi",
+ .sysc = &omap2430_rfbi_sysc,
+};
+
+static struct omap_hwmod_addr_space omap2430_dss_rfbi_addrs[] = {
+ {
+ .pa_start = 0x48050800,
+ .pa_end = 0x48050BFF,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> dss_rfbi */
+static struct omap_hwmod_ocp_if omap2430_l4_core__dss_rfbi = {
+ .master = &omap2430_l4_core_hwmod,
+ .slave = &omap2430_dss_rfbi_hwmod,
+ .clk = "dss_ick",
+ .addr = omap2430_dss_rfbi_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2430_dss_rfbi_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* dss_rfbi slave ports */
+static struct omap_hwmod_ocp_if *omap2430_dss_rfbi_slaves[] = {
+ &omap2430_l4_core__dss_rfbi,
+};
+
+static struct omap_hwmod omap2430_dss_rfbi_hwmod = {
+ .name = "dss_rfbi",
+ .class = &omap2430_rfbi_hwmod_class,
+ .main_clk = "dss1_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_DSS1_SHIFT,
+ .module_offs = CORE_MOD,
+ },
+ },
+ .slaves = omap2430_dss_rfbi_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2430_dss_rfbi_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+ .flags = HWMOD_NO_IDLEST,
+};
+
+/*
+ * 'venc' class
+ * video encoder
+ */
+
+static struct omap_hwmod_class omap2430_venc_hwmod_class = {
+ .name = "venc",
+};
+
+/* dss_venc */
+static struct omap_hwmod_addr_space omap2430_dss_venc_addrs[] = {
+ {
+ .pa_start = 0x48050C00,
+ .pa_end = 0x48050FFF,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> dss_venc */
+static struct omap_hwmod_ocp_if omap2430_l4_core__dss_venc = {
+ .master = &omap2430_l4_core_hwmod,
+ .slave = &omap2430_dss_venc_hwmod,
+ .clk = "dss_54m_fck",
+ .addr = omap2430_dss_venc_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2430_dss_venc_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* dss_venc slave ports */
+static struct omap_hwmod_ocp_if *omap2430_dss_venc_slaves[] = {
+ &omap2430_l4_core__dss_venc,
+};
+
+static struct omap_hwmod omap2430_dss_venc_hwmod = {
+ .name = "dss_venc",
+ .class = &omap2430_venc_hwmod_class,
+ .main_clk = "dss1_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_DSS1_SHIFT,
+ .module_offs = CORE_MOD,
+ },
+ },
+ .slaves = omap2430_dss_venc_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2430_dss_venc_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+ .flags = HWMOD_NO_IDLEST,
+};
+
/* I2C common */
static struct omap_hwmod_class_sysconfig i2c_sysc = {
.rev_offs = 0x00,
@@ -919,18 +1960,741 @@ static struct omap_hwmod omap2430_dma_system_hwmod = {
.flags = HWMOD_NO_IDLEST,
};
+/*
+ * 'mailbox' class
+ * mailbox module allowing communication between the on-chip processors
+ * using a queued mailbox-interrupt mechanism.
+ */
+
+static struct omap_hwmod_class_sysconfig omap2430_mailbox_sysc = {
+ .rev_offs = 0x000,
+ .sysc_offs = 0x010,
+ .syss_offs = 0x014,
+ .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap2430_mailbox_hwmod_class = {
+ .name = "mailbox",
+ .sysc = &omap2430_mailbox_sysc,
+};
+
+/* mailbox */
+static struct omap_hwmod omap2430_mailbox_hwmod;
+static struct omap_hwmod_irq_info omap2430_mailbox_irqs[] = {
+ { .irq = 26 },
+};
+
+static struct omap_hwmod_addr_space omap2430_mailbox_addrs[] = {
+ {
+ .pa_start = 0x48094000,
+ .pa_end = 0x480941ff,
+ .flags = ADDR_TYPE_RT,
+ },
+};
+
+/* l4_core -> mailbox */
+static struct omap_hwmod_ocp_if omap2430_l4_core__mailbox = {
+ .master = &omap2430_l4_core_hwmod,
+ .slave = &omap2430_mailbox_hwmod,
+ .addr = omap2430_mailbox_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2430_mailbox_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* mailbox slave ports */
+static struct omap_hwmod_ocp_if *omap2430_mailbox_slaves[] = {
+ &omap2430_l4_core__mailbox,
+};
+
+static struct omap_hwmod omap2430_mailbox_hwmod = {
+ .name = "mailbox",
+ .class = &omap2430_mailbox_hwmod_class,
+ .mpu_irqs = omap2430_mailbox_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2430_mailbox_irqs),
+ .main_clk = "mailboxes_ick",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_MAILBOXES_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_MAILBOXES_SHIFT,
+ },
+ },
+ .slaves = omap2430_mailbox_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2430_mailbox_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+};
+
+/*
+ * 'mcspi' class
+ * multichannel serial port interface (mcspi) / master/slave synchronous serial
+ * bus
+ */
+
+static struct omap_hwmod_class_sysconfig omap2430_mcspi_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap2430_mcspi_class = {
+ .name = "mcspi",
+ .sysc = &omap2430_mcspi_sysc,
+ .rev = OMAP2_MCSPI_REV,
+};
+
+/* mcspi1 */
+static struct omap_hwmod_irq_info omap2430_mcspi1_mpu_irqs[] = {
+ { .irq = 65 },
+};
+
+static struct omap_hwmod_dma_info omap2430_mcspi1_sdma_reqs[] = {
+ { .name = "tx0", .dma_req = 35 }, /* DMA_SPI1_TX0 */
+ { .name = "rx0", .dma_req = 36 }, /* DMA_SPI1_RX0 */
+ { .name = "tx1", .dma_req = 37 }, /* DMA_SPI1_TX1 */
+ { .name = "rx1", .dma_req = 38 }, /* DMA_SPI1_RX1 */
+ { .name = "tx2", .dma_req = 39 }, /* DMA_SPI1_TX2 */
+ { .name = "rx2", .dma_req = 40 }, /* DMA_SPI1_RX2 */
+ { .name = "tx3", .dma_req = 41 }, /* DMA_SPI1_TX3 */
+ { .name = "rx3", .dma_req = 42 }, /* DMA_SPI1_RX3 */
+};
+
+static struct omap_hwmod_ocp_if *omap2430_mcspi1_slaves[] = {
+ &omap2430_l4_core__mcspi1,
+};
+
+static struct omap2_mcspi_dev_attr omap_mcspi1_dev_attr = {
+ .num_chipselect = 4,
+};
+
+static struct omap_hwmod omap2430_mcspi1_hwmod = {
+ .name = "mcspi1_hwmod",
+ .mpu_irqs = omap2430_mcspi1_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2430_mcspi1_mpu_irqs),
+ .sdma_reqs = omap2430_mcspi1_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap2430_mcspi1_sdma_reqs),
+ .main_clk = "mcspi1_fck",
+ .prcm = {
+ .omap2 = {
+ .module_offs = CORE_MOD,
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_MCSPI1_SHIFT,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_MCSPI1_SHIFT,
+ },
+ },
+ .slaves = omap2430_mcspi1_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2430_mcspi1_slaves),
+ .class = &omap2430_mcspi_class,
+ .dev_attr = &omap_mcspi1_dev_attr,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+};
+
+/* mcspi2 */
+static struct omap_hwmod_irq_info omap2430_mcspi2_mpu_irqs[] = {
+ { .irq = 66 },
+};
+
+static struct omap_hwmod_dma_info omap2430_mcspi2_sdma_reqs[] = {
+ { .name = "tx0", .dma_req = 43 }, /* DMA_SPI2_TX0 */
+ { .name = "rx0", .dma_req = 44 }, /* DMA_SPI2_RX0 */
+ { .name = "tx1", .dma_req = 45 }, /* DMA_SPI2_TX1 */
+ { .name = "rx1", .dma_req = 46 }, /* DMA_SPI2_RX1 */
+};
+
+static struct omap_hwmod_ocp_if *omap2430_mcspi2_slaves[] = {
+ &omap2430_l4_core__mcspi2,
+};
+
+static struct omap2_mcspi_dev_attr omap_mcspi2_dev_attr = {
+ .num_chipselect = 2,
+};
+
+static struct omap_hwmod omap2430_mcspi2_hwmod = {
+ .name = "mcspi2_hwmod",
+ .mpu_irqs = omap2430_mcspi2_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2430_mcspi2_mpu_irqs),
+ .sdma_reqs = omap2430_mcspi2_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap2430_mcspi2_sdma_reqs),
+ .main_clk = "mcspi2_fck",
+ .prcm = {
+ .omap2 = {
+ .module_offs = CORE_MOD,
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_MCSPI2_SHIFT,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_MCSPI2_SHIFT,
+ },
+ },
+ .slaves = omap2430_mcspi2_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2430_mcspi2_slaves),
+ .class = &omap2430_mcspi_class,
+ .dev_attr = &omap_mcspi2_dev_attr,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+};
+
+/* mcspi3 */
+static struct omap_hwmod_irq_info omap2430_mcspi3_mpu_irqs[] = {
+ { .irq = 91 },
+};
+
+static struct omap_hwmod_dma_info omap2430_mcspi3_sdma_reqs[] = {
+ { .name = "tx0", .dma_req = 15 }, /* DMA_SPI3_TX0 */
+ { .name = "rx0", .dma_req = 16 }, /* DMA_SPI3_RX0 */
+ { .name = "tx1", .dma_req = 23 }, /* DMA_SPI3_TX1 */
+ { .name = "rx1", .dma_req = 24 }, /* DMA_SPI3_RX1 */
+};
+
+static struct omap_hwmod_ocp_if *omap2430_mcspi3_slaves[] = {
+ &omap2430_l4_core__mcspi3,
+};
+
+static struct omap2_mcspi_dev_attr omap_mcspi3_dev_attr = {
+ .num_chipselect = 2,
+};
+
+static struct omap_hwmod omap2430_mcspi3_hwmod = {
+ .name = "mcspi3_hwmod",
+ .mpu_irqs = omap2430_mcspi3_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2430_mcspi3_mpu_irqs),
+ .sdma_reqs = omap2430_mcspi3_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap2430_mcspi3_sdma_reqs),
+ .main_clk = "mcspi3_fck",
+ .prcm = {
+ .omap2 = {
+ .module_offs = CORE_MOD,
+ .prcm_reg_id = 2,
+ .module_bit = OMAP2430_EN_MCSPI3_SHIFT,
+ .idlest_reg_id = 2,
+ .idlest_idle_bit = OMAP2430_ST_MCSPI3_SHIFT,
+ },
+ },
+ .slaves = omap2430_mcspi3_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2430_mcspi3_slaves),
+ .class = &omap2430_mcspi_class,
+ .dev_attr = &omap_mcspi3_dev_attr,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+};
+
+/*
+ * usbhsotg
+ */
+static struct omap_hwmod_class_sysconfig omap2430_usbhsotg_sysc = {
+ .rev_offs = 0x0400,
+ .sysc_offs = 0x0404,
+ .syss_offs = 0x0408,
+ .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE|
+ SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_AUTOIDLE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class usbotg_class = {
+ .name = "usbotg",
+ .sysc = &omap2430_usbhsotg_sysc,
+};
+
+/* usb_otg_hs */
+static struct omap_hwmod_irq_info omap2430_usbhsotg_mpu_irqs[] = {
+
+ { .name = "mc", .irq = 92 },
+ { .name = "dma", .irq = 93 },
+};
+
+static struct omap_hwmod omap2430_usbhsotg_hwmod = {
+ .name = "usb_otg_hs",
+ .mpu_irqs = omap2430_usbhsotg_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2430_usbhsotg_mpu_irqs),
+ .main_clk = "usbhs_ick",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP2430_EN_USBHS_MASK,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP2430_ST_USBHS_SHIFT,
+ },
+ },
+ .masters = omap2430_usbhsotg_masters,
+ .masters_cnt = ARRAY_SIZE(omap2430_usbhsotg_masters),
+ .slaves = omap2430_usbhsotg_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2430_usbhsotg_slaves),
+ .class = &usbotg_class,
+ /*
+ * Erratum ID: i479 idle_req / idle_ack mechanism potentially
+ * broken when autoidle is enabled
+ * workaround is to disable the autoidle bit at module level.
+ */
+ .flags = HWMOD_NO_OCP_AUTOIDLE | HWMOD_SWSUP_SIDLE
+ | HWMOD_SWSUP_MSTANDBY,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
+};
+
+/*
+ * 'mcbsp' class
+ * multi channel buffered serial port controller
+ */
+
+static struct omap_hwmod_class_sysconfig omap2430_mcbsp_sysc = {
+ .rev_offs = 0x007C,
+ .sysc_offs = 0x008C,
+ .sysc_flags = (SYSC_HAS_SOFTRESET),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap2430_mcbsp_hwmod_class = {
+ .name = "mcbsp",
+ .sysc = &omap2430_mcbsp_sysc,
+ .rev = MCBSP_CONFIG_TYPE2,
+};
+
+/* mcbsp1 */
+static struct omap_hwmod_irq_info omap2430_mcbsp1_irqs[] = {
+ { .name = "tx", .irq = 59 },
+ { .name = "rx", .irq = 60 },
+ { .name = "ovr", .irq = 61 },
+ { .name = "common", .irq = 64 },
+};
+
+static struct omap_hwmod_dma_info omap2430_mcbsp1_sdma_chs[] = {
+ { .name = "rx", .dma_req = 32 },
+ { .name = "tx", .dma_req = 31 },
+};
+
+static struct omap_hwmod_addr_space omap2430_mcbsp1_addrs[] = {
+ {
+ .name = "mpu",
+ .pa_start = 0x48074000,
+ .pa_end = 0x480740ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> mcbsp1 */
+static struct omap_hwmod_ocp_if omap2430_l4_core__mcbsp1 = {
+ .master = &omap2430_l4_core_hwmod,
+ .slave = &omap2430_mcbsp1_hwmod,
+ .clk = "mcbsp1_ick",
+ .addr = omap2430_mcbsp1_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2430_mcbsp1_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* mcbsp1 slave ports */
+static struct omap_hwmod_ocp_if *omap2430_mcbsp1_slaves[] = {
+ &omap2430_l4_core__mcbsp1,
+};
+
+static struct omap_hwmod omap2430_mcbsp1_hwmod = {
+ .name = "mcbsp1",
+ .class = &omap2430_mcbsp_hwmod_class,
+ .mpu_irqs = omap2430_mcbsp1_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2430_mcbsp1_irqs),
+ .sdma_reqs = omap2430_mcbsp1_sdma_chs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap2430_mcbsp1_sdma_chs),
+ .main_clk = "mcbsp1_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_MCBSP1_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_MCBSP1_SHIFT,
+ },
+ },
+ .slaves = omap2430_mcbsp1_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2430_mcbsp1_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+};
+
+/* mcbsp2 */
+static struct omap_hwmod_irq_info omap2430_mcbsp2_irqs[] = {
+ { .name = "tx", .irq = 62 },
+ { .name = "rx", .irq = 63 },
+ { .name = "common", .irq = 16 },
+};
+
+static struct omap_hwmod_dma_info omap2430_mcbsp2_sdma_chs[] = {
+ { .name = "rx", .dma_req = 34 },
+ { .name = "tx", .dma_req = 33 },
+};
+
+static struct omap_hwmod_addr_space omap2430_mcbsp2_addrs[] = {
+ {
+ .name = "mpu",
+ .pa_start = 0x48076000,
+ .pa_end = 0x480760ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> mcbsp2 */
+static struct omap_hwmod_ocp_if omap2430_l4_core__mcbsp2 = {
+ .master = &omap2430_l4_core_hwmod,
+ .slave = &omap2430_mcbsp2_hwmod,
+ .clk = "mcbsp2_ick",
+ .addr = omap2430_mcbsp2_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2430_mcbsp2_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* mcbsp2 slave ports */
+static struct omap_hwmod_ocp_if *omap2430_mcbsp2_slaves[] = {
+ &omap2430_l4_core__mcbsp2,
+};
+
+static struct omap_hwmod omap2430_mcbsp2_hwmod = {
+ .name = "mcbsp2",
+ .class = &omap2430_mcbsp_hwmod_class,
+ .mpu_irqs = omap2430_mcbsp2_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2430_mcbsp2_irqs),
+ .sdma_reqs = omap2430_mcbsp2_sdma_chs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap2430_mcbsp2_sdma_chs),
+ .main_clk = "mcbsp2_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP24XX_EN_MCBSP2_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP24XX_ST_MCBSP2_SHIFT,
+ },
+ },
+ .slaves = omap2430_mcbsp2_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2430_mcbsp2_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+};
+
+/* mcbsp3 */
+static struct omap_hwmod_irq_info omap2430_mcbsp3_irqs[] = {
+ { .name = "tx", .irq = 89 },
+ { .name = "rx", .irq = 90 },
+ { .name = "common", .irq = 17 },
+};
+
+static struct omap_hwmod_dma_info omap2430_mcbsp3_sdma_chs[] = {
+ { .name = "rx", .dma_req = 18 },
+ { .name = "tx", .dma_req = 17 },
+};
+
+static struct omap_hwmod_addr_space omap2430_mcbsp3_addrs[] = {
+ {
+ .name = "mpu",
+ .pa_start = 0x4808C000,
+ .pa_end = 0x4808C0ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> mcbsp3 */
+static struct omap_hwmod_ocp_if omap2430_l4_core__mcbsp3 = {
+ .master = &omap2430_l4_core_hwmod,
+ .slave = &omap2430_mcbsp3_hwmod,
+ .clk = "mcbsp3_ick",
+ .addr = omap2430_mcbsp3_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2430_mcbsp3_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* mcbsp3 slave ports */
+static struct omap_hwmod_ocp_if *omap2430_mcbsp3_slaves[] = {
+ &omap2430_l4_core__mcbsp3,
+};
+
+static struct omap_hwmod omap2430_mcbsp3_hwmod = {
+ .name = "mcbsp3",
+ .class = &omap2430_mcbsp_hwmod_class,
+ .mpu_irqs = omap2430_mcbsp3_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2430_mcbsp3_irqs),
+ .sdma_reqs = omap2430_mcbsp3_sdma_chs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap2430_mcbsp3_sdma_chs),
+ .main_clk = "mcbsp3_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP2430_EN_MCBSP3_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 2,
+ .idlest_idle_bit = OMAP2430_ST_MCBSP3_SHIFT,
+ },
+ },
+ .slaves = omap2430_mcbsp3_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2430_mcbsp3_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+};
+
+/* mcbsp4 */
+static struct omap_hwmod_irq_info omap2430_mcbsp4_irqs[] = {
+ { .name = "tx", .irq = 54 },
+ { .name = "rx", .irq = 55 },
+ { .name = "common", .irq = 18 },
+};
+
+static struct omap_hwmod_dma_info omap2430_mcbsp4_sdma_chs[] = {
+ { .name = "rx", .dma_req = 20 },
+ { .name = "tx", .dma_req = 19 },
+};
+
+static struct omap_hwmod_addr_space omap2430_mcbsp4_addrs[] = {
+ {
+ .name = "mpu",
+ .pa_start = 0x4808E000,
+ .pa_end = 0x4808E0ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> mcbsp4 */
+static struct omap_hwmod_ocp_if omap2430_l4_core__mcbsp4 = {
+ .master = &omap2430_l4_core_hwmod,
+ .slave = &omap2430_mcbsp4_hwmod,
+ .clk = "mcbsp4_ick",
+ .addr = omap2430_mcbsp4_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2430_mcbsp4_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* mcbsp4 slave ports */
+static struct omap_hwmod_ocp_if *omap2430_mcbsp4_slaves[] = {
+ &omap2430_l4_core__mcbsp4,
+};
+
+static struct omap_hwmod omap2430_mcbsp4_hwmod = {
+ .name = "mcbsp4",
+ .class = &omap2430_mcbsp_hwmod_class,
+ .mpu_irqs = omap2430_mcbsp4_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2430_mcbsp4_irqs),
+ .sdma_reqs = omap2430_mcbsp4_sdma_chs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap2430_mcbsp4_sdma_chs),
+ .main_clk = "mcbsp4_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP2430_EN_MCBSP4_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 2,
+ .idlest_idle_bit = OMAP2430_ST_MCBSP4_SHIFT,
+ },
+ },
+ .slaves = omap2430_mcbsp4_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2430_mcbsp4_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+};
+
+/* mcbsp5 */
+static struct omap_hwmod_irq_info omap2430_mcbsp5_irqs[] = {
+ { .name = "tx", .irq = 81 },
+ { .name = "rx", .irq = 82 },
+ { .name = "common", .irq = 19 },
+};
+
+static struct omap_hwmod_dma_info omap2430_mcbsp5_sdma_chs[] = {
+ { .name = "rx", .dma_req = 22 },
+ { .name = "tx", .dma_req = 21 },
+};
+
+static struct omap_hwmod_addr_space omap2430_mcbsp5_addrs[] = {
+ {
+ .name = "mpu",
+ .pa_start = 0x48096000,
+ .pa_end = 0x480960ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> mcbsp5 */
+static struct omap_hwmod_ocp_if omap2430_l4_core__mcbsp5 = {
+ .master = &omap2430_l4_core_hwmod,
+ .slave = &omap2430_mcbsp5_hwmod,
+ .clk = "mcbsp5_ick",
+ .addr = omap2430_mcbsp5_addrs,
+ .addr_cnt = ARRAY_SIZE(omap2430_mcbsp5_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* mcbsp5 slave ports */
+static struct omap_hwmod_ocp_if *omap2430_mcbsp5_slaves[] = {
+ &omap2430_l4_core__mcbsp5,
+};
+
+static struct omap_hwmod omap2430_mcbsp5_hwmod = {
+ .name = "mcbsp5",
+ .class = &omap2430_mcbsp_hwmod_class,
+ .mpu_irqs = omap2430_mcbsp5_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2430_mcbsp5_irqs),
+ .sdma_reqs = omap2430_mcbsp5_sdma_chs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap2430_mcbsp5_sdma_chs),
+ .main_clk = "mcbsp5_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP2430_EN_MCBSP5_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 2,
+ .idlest_idle_bit = OMAP2430_ST_MCBSP5_SHIFT,
+ },
+ },
+ .slaves = omap2430_mcbsp5_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2430_mcbsp5_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+};
+
+/* MMC/SD/SDIO common */
+
+static struct omap_hwmod_class_sysconfig omap2430_mmc_sysc = {
+ .rev_offs = 0x1fc,
+ .sysc_offs = 0x10,
+ .syss_offs = 0x14,
+ .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap2430_mmc_class = {
+ .name = "mmc",
+ .sysc = &omap2430_mmc_sysc,
+};
+
+/* MMC/SD/SDIO1 */
+
+static struct omap_hwmod_irq_info omap2430_mmc1_mpu_irqs[] = {
+ { .irq = 83 },
+};
+
+static struct omap_hwmod_dma_info omap2430_mmc1_sdma_reqs[] = {
+ { .name = "tx", .dma_req = 61 }, /* DMA_MMC1_TX */
+ { .name = "rx", .dma_req = 62 }, /* DMA_MMC1_RX */
+};
+
+static struct omap_hwmod_opt_clk omap2430_mmc1_opt_clks[] = {
+ { .role = "dbck", .clk = "mmchsdb1_fck" },
+};
+
+static struct omap_hwmod_ocp_if *omap2430_mmc1_slaves[] = {
+ &omap2430_l4_core__mmc1,
+};
+
+static struct omap_mmc_dev_attr mmc1_dev_attr = {
+ .flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT,
+};
+
+static struct omap_hwmod omap2430_mmc1_hwmod = {
+ .name = "mmc1",
+ .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
+ .mpu_irqs = omap2430_mmc1_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2430_mmc1_mpu_irqs),
+ .sdma_reqs = omap2430_mmc1_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap2430_mmc1_sdma_reqs),
+ .opt_clks = omap2430_mmc1_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(omap2430_mmc1_opt_clks),
+ .main_clk = "mmchs1_fck",
+ .prcm = {
+ .omap2 = {
+ .module_offs = CORE_MOD,
+ .prcm_reg_id = 2,
+ .module_bit = OMAP2430_EN_MMCHS1_SHIFT,
+ .idlest_reg_id = 2,
+ .idlest_idle_bit = OMAP2430_ST_MMCHS1_SHIFT,
+ },
+ },
+ .dev_attr = &mmc1_dev_attr,
+ .slaves = omap2430_mmc1_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2430_mmc1_slaves),
+ .class = &omap2430_mmc_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+};
+
+/* MMC/SD/SDIO2 */
+
+static struct omap_hwmod_irq_info omap2430_mmc2_mpu_irqs[] = {
+ { .irq = 86 },
+};
+
+static struct omap_hwmod_dma_info omap2430_mmc2_sdma_reqs[] = {
+ { .name = "tx", .dma_req = 47 }, /* DMA_MMC2_TX */
+ { .name = "rx", .dma_req = 48 }, /* DMA_MMC2_RX */
+};
+
+static struct omap_hwmod_opt_clk omap2430_mmc2_opt_clks[] = {
+ { .role = "dbck", .clk = "mmchsdb2_fck" },
+};
+
+static struct omap_hwmod_ocp_if *omap2430_mmc2_slaves[] = {
+ &omap2430_l4_core__mmc2,
+};
+
+static struct omap_hwmod omap2430_mmc2_hwmod = {
+ .name = "mmc2",
+ .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
+ .mpu_irqs = omap2430_mmc2_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap2430_mmc2_mpu_irqs),
+ .sdma_reqs = omap2430_mmc2_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap2430_mmc2_sdma_reqs),
+ .opt_clks = omap2430_mmc2_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(omap2430_mmc2_opt_clks),
+ .main_clk = "mmchs2_fck",
+ .prcm = {
+ .omap2 = {
+ .module_offs = CORE_MOD,
+ .prcm_reg_id = 2,
+ .module_bit = OMAP2430_EN_MMCHS2_SHIFT,
+ .idlest_reg_id = 2,
+ .idlest_idle_bit = OMAP2430_ST_MMCHS2_SHIFT,
+ },
+ },
+ .slaves = omap2430_mmc2_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap2430_mmc2_slaves),
+ .class = &omap2430_mmc_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+};
+
static __initdata struct omap_hwmod *omap2430_hwmods[] = {
&omap2430_l3_main_hwmod,
&omap2430_l4_core_hwmod,
&omap2430_l4_wkup_hwmod,
&omap2430_mpu_hwmod,
&omap2430_iva_hwmod,
+
+ &omap2430_timer1_hwmod,
+ &omap2430_timer2_hwmod,
+ &omap2430_timer3_hwmod,
+ &omap2430_timer4_hwmod,
+ &omap2430_timer5_hwmod,
+ &omap2430_timer6_hwmod,
+ &omap2430_timer7_hwmod,
+ &omap2430_timer8_hwmod,
+ &omap2430_timer9_hwmod,
+ &omap2430_timer10_hwmod,
+ &omap2430_timer11_hwmod,
+ &omap2430_timer12_hwmod,
+
&omap2430_wd_timer2_hwmod,
&omap2430_uart1_hwmod,
&omap2430_uart2_hwmod,
&omap2430_uart3_hwmod,
+ /* dss class */
+ &omap2430_dss_core_hwmod,
+ &omap2430_dss_dispc_hwmod,
+ &omap2430_dss_rfbi_hwmod,
+ &omap2430_dss_venc_hwmod,
+ /* i2c class */
&omap2430_i2c1_hwmod,
&omap2430_i2c2_hwmod,
+ &omap2430_mmc1_hwmod,
+ &omap2430_mmc2_hwmod,
/* gpio class */
&omap2430_gpio1_hwmod,
@@ -941,10 +2705,29 @@ static __initdata struct omap_hwmod *omap2430_hwmods[] = {
/* dma_system class*/
&omap2430_dma_system_hwmod,
+
+ /* mcbsp class */
+ &omap2430_mcbsp1_hwmod,
+ &omap2430_mcbsp2_hwmod,
+ &omap2430_mcbsp3_hwmod,
+ &omap2430_mcbsp4_hwmod,
+ &omap2430_mcbsp5_hwmod,
+
+ /* mailbox class */
+ &omap2430_mailbox_hwmod,
+
+ /* mcspi class */
+ &omap2430_mcspi1_hwmod,
+ &omap2430_mcspi2_hwmod,
+ &omap2430_mcspi3_hwmod,
+
+ /* usbotg class*/
+ &omap2430_usbhsotg_hwmod,
+
NULL,
};
int __init omap2430_hwmod_init(void)
{
- return omap_hwmod_init(omap2430_hwmods);
+ return omap_hwmod_register(omap2430_hwmods);
}
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 8d8181334f86..086d7272e8b8 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -18,16 +18,22 @@
#include <plat/cpu.h>
#include <plat/dma.h>
#include <plat/serial.h>
+#include <plat/l3_3xxx.h>
#include <plat/l4_3xxx.h>
#include <plat/i2c.h>
#include <plat/gpio.h>
+#include <plat/mmc.h>
#include <plat/smartreflex.h>
+#include <plat/mcbsp.h>
+#include <plat/mcspi.h>
+#include <plat/dmtimer.h>
#include "omap_hwmod_common_data.h"
#include "prm-regbits-34xx.h"
#include "cm-regbits-34xx.h"
#include "wd_timer.h"
+#include <mach/am35xx.h>
/*
* OMAP3xxx hardware module integration data
@@ -44,6 +50,12 @@ static struct omap_hwmod omap3xxx_l3_main_hwmod;
static struct omap_hwmod omap3xxx_l4_core_hwmod;
static struct omap_hwmod omap3xxx_l4_per_hwmod;
static struct omap_hwmod omap3xxx_wd_timer2_hwmod;
+static struct omap_hwmod omap3430es1_dss_core_hwmod;
+static struct omap_hwmod omap3xxx_dss_core_hwmod;
+static struct omap_hwmod omap3xxx_dss_dispc_hwmod;
+static struct omap_hwmod omap3xxx_dss_dsi1_hwmod;
+static struct omap_hwmod omap3xxx_dss_rfbi_hwmod;
+static struct omap_hwmod omap3xxx_dss_venc_hwmod;
static struct omap_hwmod omap3xxx_i2c1_hwmod;
static struct omap_hwmod omap3xxx_i2c2_hwmod;
static struct omap_hwmod omap3xxx_i2c3_hwmod;
@@ -55,9 +67,25 @@ static struct omap_hwmod omap3xxx_gpio5_hwmod;
static struct omap_hwmod omap3xxx_gpio6_hwmod;
static struct omap_hwmod omap34xx_sr1_hwmod;
static struct omap_hwmod omap34xx_sr2_hwmod;
+static struct omap_hwmod omap34xx_mcspi1;
+static struct omap_hwmod omap34xx_mcspi2;
+static struct omap_hwmod omap34xx_mcspi3;
+static struct omap_hwmod omap34xx_mcspi4;
+static struct omap_hwmod omap3xxx_mmc1_hwmod;
+static struct omap_hwmod omap3xxx_mmc2_hwmod;
+static struct omap_hwmod omap3xxx_mmc3_hwmod;
+static struct omap_hwmod am35xx_usbhsotg_hwmod;
static struct omap_hwmod omap3xxx_dma_system_hwmod;
+static struct omap_hwmod omap3xxx_mcbsp1_hwmod;
+static struct omap_hwmod omap3xxx_mcbsp2_hwmod;
+static struct omap_hwmod omap3xxx_mcbsp3_hwmod;
+static struct omap_hwmod omap3xxx_mcbsp4_hwmod;
+static struct omap_hwmod omap3xxx_mcbsp5_hwmod;
+static struct omap_hwmod omap3xxx_mcbsp2_sidetone_hwmod;
+static struct omap_hwmod omap3xxx_mcbsp3_sidetone_hwmod;
+
/* L3 -> L4_CORE interface */
static struct omap_hwmod_ocp_if omap3xxx_l3_main__l4_core = {
.master = &omap3xxx_l3_main_hwmod,
@@ -84,6 +112,19 @@ static struct omap_hwmod_ocp_if *omap3xxx_l3_main_slaves[] = {
&omap3xxx_mpu__l3_main,
};
+/* DSS -> l3 */
+static struct omap_hwmod_ocp_if omap3xxx_dss__l3 = {
+ .master = &omap3xxx_dss_core_hwmod,
+ .slave = &omap3xxx_l3_main_hwmod,
+ .fw = {
+ .omap2 = {
+ .l3_perm_bit = OMAP3_L3_CORE_FW_INIT_ID_DSS,
+ .flags = OMAP_FIREWALL_L3,
+ }
+ },
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
/* Master interfaces on the L3 interconnect */
static struct omap_hwmod_ocp_if *omap3xxx_l3_main_masters[] = {
&omap3xxx_l3_main__l4_core,
@@ -107,7 +148,23 @@ static struct omap_hwmod omap3xxx_uart1_hwmod;
static struct omap_hwmod omap3xxx_uart2_hwmod;
static struct omap_hwmod omap3xxx_uart3_hwmod;
static struct omap_hwmod omap3xxx_uart4_hwmod;
+static struct omap_hwmod omap3xxx_usbhsotg_hwmod;
+
+/* l3_core -> usbhsotg interface */
+static struct omap_hwmod_ocp_if omap3xxx_usbhsotg__l3 = {
+ .master = &omap3xxx_usbhsotg_hwmod,
+ .slave = &omap3xxx_l3_main_hwmod,
+ .clk = "core_l3_ick",
+ .user = OCP_USER_MPU,
+};
+/* l3_core -> am35xx_usbhsotg interface */
+static struct omap_hwmod_ocp_if am35xx_usbhsotg__l3 = {
+ .master = &am35xx_usbhsotg_hwmod,
+ .slave = &omap3xxx_l3_main_hwmod,
+ .clk = "core_l3_ick",
+ .user = OCP_USER_MPU,
+};
/* L4_CORE -> L4_WKUP interface */
static struct omap_hwmod_ocp_if omap3xxx_l4_core__l4_wkup = {
.master = &omap3xxx_l4_core_hwmod,
@@ -115,6 +172,63 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__l4_wkup = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
+/* L4 CORE -> MMC1 interface */
+static struct omap_hwmod_addr_space omap3xxx_mmc1_addr_space[] = {
+ {
+ .pa_start = 0x4809c000,
+ .pa_end = 0x4809c1ff,
+ .flags = ADDR_TYPE_RT,
+ },
+};
+
+static struct omap_hwmod_ocp_if omap3xxx_l4_core__mmc1 = {
+ .master = &omap3xxx_l4_core_hwmod,
+ .slave = &omap3xxx_mmc1_hwmod,
+ .clk = "mmchs1_ick",
+ .addr = omap3xxx_mmc1_addr_space,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_mmc1_addr_space),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+ .flags = OMAP_FIREWALL_L4
+};
+
+/* L4 CORE -> MMC2 interface */
+static struct omap_hwmod_addr_space omap3xxx_mmc2_addr_space[] = {
+ {
+ .pa_start = 0x480b4000,
+ .pa_end = 0x480b41ff,
+ .flags = ADDR_TYPE_RT,
+ },
+};
+
+static struct omap_hwmod_ocp_if omap3xxx_l4_core__mmc2 = {
+ .master = &omap3xxx_l4_core_hwmod,
+ .slave = &omap3xxx_mmc2_hwmod,
+ .clk = "mmchs2_ick",
+ .addr = omap3xxx_mmc2_addr_space,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_mmc2_addr_space),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+ .flags = OMAP_FIREWALL_L4
+};
+
+/* L4 CORE -> MMC3 interface */
+static struct omap_hwmod_addr_space omap3xxx_mmc3_addr_space[] = {
+ {
+ .pa_start = 0x480ad000,
+ .pa_end = 0x480ad1ff,
+ .flags = ADDR_TYPE_RT,
+ },
+};
+
+static struct omap_hwmod_ocp_if omap3xxx_l4_core__mmc3 = {
+ .master = &omap3xxx_l4_core_hwmod,
+ .slave = &omap3xxx_mmc3_hwmod,
+ .clk = "mmchs3_ick",
+ .addr = omap3xxx_mmc3_addr_space,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_mmc3_addr_space),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+ .flags = OMAP_FIREWALL_L4
+};
+
/* L4 CORE -> UART1 interface */
static struct omap_hwmod_addr_space omap3xxx_uart1_addr_space[] = {
{
@@ -301,6 +415,61 @@ static struct omap_hwmod_ocp_if omap3_l4_core__sr2 = {
.user = OCP_USER_MPU,
};
+/*
+* usbhsotg interface data
+*/
+
+static struct omap_hwmod_addr_space omap3xxx_usbhsotg_addrs[] = {
+ {
+ .pa_start = OMAP34XX_HSUSB_OTG_BASE,
+ .pa_end = OMAP34XX_HSUSB_OTG_BASE + SZ_4K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> usbhsotg */
+static struct omap_hwmod_ocp_if omap3xxx_l4_core__usbhsotg = {
+ .master = &omap3xxx_l4_core_hwmod,
+ .slave = &omap3xxx_usbhsotg_hwmod,
+ .clk = "l4_ick",
+ .addr = omap3xxx_usbhsotg_addrs,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_usbhsotg_addrs),
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_ocp_if *omap3xxx_usbhsotg_masters[] = {
+ &omap3xxx_usbhsotg__l3,
+};
+
+static struct omap_hwmod_ocp_if *omap3xxx_usbhsotg_slaves[] = {
+ &omap3xxx_l4_core__usbhsotg,
+};
+
+static struct omap_hwmod_addr_space am35xx_usbhsotg_addrs[] = {
+ {
+ .pa_start = AM35XX_IPSS_USBOTGSS_BASE,
+ .pa_end = AM35XX_IPSS_USBOTGSS_BASE + SZ_4K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> usbhsotg */
+static struct omap_hwmod_ocp_if am35xx_l4_core__usbhsotg = {
+ .master = &omap3xxx_l4_core_hwmod,
+ .slave = &am35xx_usbhsotg_hwmod,
+ .clk = "l4_ick",
+ .addr = am35xx_usbhsotg_addrs,
+ .addr_cnt = ARRAY_SIZE(am35xx_usbhsotg_addrs),
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_ocp_if *am35xx_usbhsotg_masters[] = {
+ &am35xx_usbhsotg__l3,
+};
+
+static struct omap_hwmod_ocp_if *am35xx_usbhsotg_slaves[] = {
+ &am35xx_l4_core__usbhsotg,
+};
/* Slave interfaces on the L4_CORE interconnect */
static struct omap_hwmod_ocp_if *omap3xxx_l4_core_slaves[] = {
&omap3xxx_l3_main__l4_core,
@@ -417,6 +586,640 @@ static struct omap_hwmod omap3xxx_iva_hwmod = {
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
};
+/* timer class */
+static struct omap_hwmod_class_sysconfig omap3xxx_timer_1ms_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_CLOCKACTIVITY |
+ SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_EMUFREE | SYSC_HAS_AUTOIDLE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap3xxx_timer_1ms_hwmod_class = {
+ .name = "timer",
+ .sysc = &omap3xxx_timer_1ms_sysc,
+ .rev = OMAP_TIMER_IP_VERSION_1,
+};
+
+static struct omap_hwmod_class_sysconfig omap3xxx_timer_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP |
+ SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap3xxx_timer_hwmod_class = {
+ .name = "timer",
+ .sysc = &omap3xxx_timer_sysc,
+ .rev = OMAP_TIMER_IP_VERSION_1,
+};
+
+/* timer1 */
+static struct omap_hwmod omap3xxx_timer1_hwmod;
+static struct omap_hwmod_irq_info omap3xxx_timer1_mpu_irqs[] = {
+ { .irq = 37, },
+};
+
+static struct omap_hwmod_addr_space omap3xxx_timer1_addrs[] = {
+ {
+ .pa_start = 0x48318000,
+ .pa_end = 0x48318000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_wkup -> timer1 */
+static struct omap_hwmod_ocp_if omap3xxx_l4_wkup__timer1 = {
+ .master = &omap3xxx_l4_wkup_hwmod,
+ .slave = &omap3xxx_timer1_hwmod,
+ .clk = "gpt1_ick",
+ .addr = omap3xxx_timer1_addrs,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_timer1_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer1 slave port */
+static struct omap_hwmod_ocp_if *omap3xxx_timer1_slaves[] = {
+ &omap3xxx_l4_wkup__timer1,
+};
+
+/* timer1 hwmod */
+static struct omap_hwmod omap3xxx_timer1_hwmod = {
+ .name = "timer1",
+ .mpu_irqs = omap3xxx_timer1_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer1_mpu_irqs),
+ .main_clk = "gpt1_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_GPT1_SHIFT,
+ .module_offs = WKUP_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_ST_GPT1_SHIFT,
+ },
+ },
+ .slaves = omap3xxx_timer1_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_timer1_slaves),
+ .class = &omap3xxx_timer_1ms_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+};
+
+/* timer2 */
+static struct omap_hwmod omap3xxx_timer2_hwmod;
+static struct omap_hwmod_irq_info omap3xxx_timer2_mpu_irqs[] = {
+ { .irq = 38, },
+};
+
+static struct omap_hwmod_addr_space omap3xxx_timer2_addrs[] = {
+ {
+ .pa_start = 0x49032000,
+ .pa_end = 0x49032000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> timer2 */
+static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer2 = {
+ .master = &omap3xxx_l4_per_hwmod,
+ .slave = &omap3xxx_timer2_hwmod,
+ .clk = "gpt2_ick",
+ .addr = omap3xxx_timer2_addrs,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_timer2_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer2 slave port */
+static struct omap_hwmod_ocp_if *omap3xxx_timer2_slaves[] = {
+ &omap3xxx_l4_per__timer2,
+};
+
+/* timer2 hwmod */
+static struct omap_hwmod omap3xxx_timer2_hwmod = {
+ .name = "timer2",
+ .mpu_irqs = omap3xxx_timer2_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer2_mpu_irqs),
+ .main_clk = "gpt2_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_GPT2_SHIFT,
+ .module_offs = OMAP3430_PER_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_ST_GPT2_SHIFT,
+ },
+ },
+ .slaves = omap3xxx_timer2_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_timer2_slaves),
+ .class = &omap3xxx_timer_1ms_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+};
+
+/* timer3 */
+static struct omap_hwmod omap3xxx_timer3_hwmod;
+static struct omap_hwmod_irq_info omap3xxx_timer3_mpu_irqs[] = {
+ { .irq = 39, },
+};
+
+static struct omap_hwmod_addr_space omap3xxx_timer3_addrs[] = {
+ {
+ .pa_start = 0x49034000,
+ .pa_end = 0x49034000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> timer3 */
+static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer3 = {
+ .master = &omap3xxx_l4_per_hwmod,
+ .slave = &omap3xxx_timer3_hwmod,
+ .clk = "gpt3_ick",
+ .addr = omap3xxx_timer3_addrs,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_timer3_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer3 slave port */
+static struct omap_hwmod_ocp_if *omap3xxx_timer3_slaves[] = {
+ &omap3xxx_l4_per__timer3,
+};
+
+/* timer3 hwmod */
+static struct omap_hwmod omap3xxx_timer3_hwmod = {
+ .name = "timer3",
+ .mpu_irqs = omap3xxx_timer3_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer3_mpu_irqs),
+ .main_clk = "gpt3_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_GPT3_SHIFT,
+ .module_offs = OMAP3430_PER_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_ST_GPT3_SHIFT,
+ },
+ },
+ .slaves = omap3xxx_timer3_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_timer3_slaves),
+ .class = &omap3xxx_timer_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+};
+
+/* timer4 */
+static struct omap_hwmod omap3xxx_timer4_hwmod;
+static struct omap_hwmod_irq_info omap3xxx_timer4_mpu_irqs[] = {
+ { .irq = 40, },
+};
+
+static struct omap_hwmod_addr_space omap3xxx_timer4_addrs[] = {
+ {
+ .pa_start = 0x49036000,
+ .pa_end = 0x49036000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> timer4 */
+static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer4 = {
+ .master = &omap3xxx_l4_per_hwmod,
+ .slave = &omap3xxx_timer4_hwmod,
+ .clk = "gpt4_ick",
+ .addr = omap3xxx_timer4_addrs,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_timer4_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer4 slave port */
+static struct omap_hwmod_ocp_if *omap3xxx_timer4_slaves[] = {
+ &omap3xxx_l4_per__timer4,
+};
+
+/* timer4 hwmod */
+static struct omap_hwmod omap3xxx_timer4_hwmod = {
+ .name = "timer4",
+ .mpu_irqs = omap3xxx_timer4_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer4_mpu_irqs),
+ .main_clk = "gpt4_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_GPT4_SHIFT,
+ .module_offs = OMAP3430_PER_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_ST_GPT4_SHIFT,
+ },
+ },
+ .slaves = omap3xxx_timer4_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_timer4_slaves),
+ .class = &omap3xxx_timer_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+};
+
+/* timer5 */
+static struct omap_hwmod omap3xxx_timer5_hwmod;
+static struct omap_hwmod_irq_info omap3xxx_timer5_mpu_irqs[] = {
+ { .irq = 41, },
+};
+
+static struct omap_hwmod_addr_space omap3xxx_timer5_addrs[] = {
+ {
+ .pa_start = 0x49038000,
+ .pa_end = 0x49038000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> timer5 */
+static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer5 = {
+ .master = &omap3xxx_l4_per_hwmod,
+ .slave = &omap3xxx_timer5_hwmod,
+ .clk = "gpt5_ick",
+ .addr = omap3xxx_timer5_addrs,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_timer5_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer5 slave port */
+static struct omap_hwmod_ocp_if *omap3xxx_timer5_slaves[] = {
+ &omap3xxx_l4_per__timer5,
+};
+
+/* timer5 hwmod */
+static struct omap_hwmod omap3xxx_timer5_hwmod = {
+ .name = "timer5",
+ .mpu_irqs = omap3xxx_timer5_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer5_mpu_irqs),
+ .main_clk = "gpt5_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_GPT5_SHIFT,
+ .module_offs = OMAP3430_PER_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_ST_GPT5_SHIFT,
+ },
+ },
+ .slaves = omap3xxx_timer5_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_timer5_slaves),
+ .class = &omap3xxx_timer_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+};
+
+/* timer6 */
+static struct omap_hwmod omap3xxx_timer6_hwmod;
+static struct omap_hwmod_irq_info omap3xxx_timer6_mpu_irqs[] = {
+ { .irq = 42, },
+};
+
+static struct omap_hwmod_addr_space omap3xxx_timer6_addrs[] = {
+ {
+ .pa_start = 0x4903A000,
+ .pa_end = 0x4903A000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> timer6 */
+static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer6 = {
+ .master = &omap3xxx_l4_per_hwmod,
+ .slave = &omap3xxx_timer6_hwmod,
+ .clk = "gpt6_ick",
+ .addr = omap3xxx_timer6_addrs,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_timer6_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer6 slave port */
+static struct omap_hwmod_ocp_if *omap3xxx_timer6_slaves[] = {
+ &omap3xxx_l4_per__timer6,
+};
+
+/* timer6 hwmod */
+static struct omap_hwmod omap3xxx_timer6_hwmod = {
+ .name = "timer6",
+ .mpu_irqs = omap3xxx_timer6_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer6_mpu_irqs),
+ .main_clk = "gpt6_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_GPT6_SHIFT,
+ .module_offs = OMAP3430_PER_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_ST_GPT6_SHIFT,
+ },
+ },
+ .slaves = omap3xxx_timer6_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_timer6_slaves),
+ .class = &omap3xxx_timer_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+};
+
+/* timer7 */
+static struct omap_hwmod omap3xxx_timer7_hwmod;
+static struct omap_hwmod_irq_info omap3xxx_timer7_mpu_irqs[] = {
+ { .irq = 43, },
+};
+
+static struct omap_hwmod_addr_space omap3xxx_timer7_addrs[] = {
+ {
+ .pa_start = 0x4903C000,
+ .pa_end = 0x4903C000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> timer7 */
+static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer7 = {
+ .master = &omap3xxx_l4_per_hwmod,
+ .slave = &omap3xxx_timer7_hwmod,
+ .clk = "gpt7_ick",
+ .addr = omap3xxx_timer7_addrs,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_timer7_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer7 slave port */
+static struct omap_hwmod_ocp_if *omap3xxx_timer7_slaves[] = {
+ &omap3xxx_l4_per__timer7,
+};
+
+/* timer7 hwmod */
+static struct omap_hwmod omap3xxx_timer7_hwmod = {
+ .name = "timer7",
+ .mpu_irqs = omap3xxx_timer7_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer7_mpu_irqs),
+ .main_clk = "gpt7_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_GPT7_SHIFT,
+ .module_offs = OMAP3430_PER_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_ST_GPT7_SHIFT,
+ },
+ },
+ .slaves = omap3xxx_timer7_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_timer7_slaves),
+ .class = &omap3xxx_timer_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+};
+
+/* timer8 */
+static struct omap_hwmod omap3xxx_timer8_hwmod;
+static struct omap_hwmod_irq_info omap3xxx_timer8_mpu_irqs[] = {
+ { .irq = 44, },
+};
+
+static struct omap_hwmod_addr_space omap3xxx_timer8_addrs[] = {
+ {
+ .pa_start = 0x4903E000,
+ .pa_end = 0x4903E000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> timer8 */
+static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer8 = {
+ .master = &omap3xxx_l4_per_hwmod,
+ .slave = &omap3xxx_timer8_hwmod,
+ .clk = "gpt8_ick",
+ .addr = omap3xxx_timer8_addrs,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_timer8_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer8 slave port */
+static struct omap_hwmod_ocp_if *omap3xxx_timer8_slaves[] = {
+ &omap3xxx_l4_per__timer8,
+};
+
+/* timer8 hwmod */
+static struct omap_hwmod omap3xxx_timer8_hwmod = {
+ .name = "timer8",
+ .mpu_irqs = omap3xxx_timer8_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer8_mpu_irqs),
+ .main_clk = "gpt8_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_GPT8_SHIFT,
+ .module_offs = OMAP3430_PER_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_ST_GPT8_SHIFT,
+ },
+ },
+ .slaves = omap3xxx_timer8_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_timer8_slaves),
+ .class = &omap3xxx_timer_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+};
+
+/* timer9 */
+static struct omap_hwmod omap3xxx_timer9_hwmod;
+static struct omap_hwmod_irq_info omap3xxx_timer9_mpu_irqs[] = {
+ { .irq = 45, },
+};
+
+static struct omap_hwmod_addr_space omap3xxx_timer9_addrs[] = {
+ {
+ .pa_start = 0x49040000,
+ .pa_end = 0x49040000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> timer9 */
+static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer9 = {
+ .master = &omap3xxx_l4_per_hwmod,
+ .slave = &omap3xxx_timer9_hwmod,
+ .clk = "gpt9_ick",
+ .addr = omap3xxx_timer9_addrs,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_timer9_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer9 slave port */
+static struct omap_hwmod_ocp_if *omap3xxx_timer9_slaves[] = {
+ &omap3xxx_l4_per__timer9,
+};
+
+/* timer9 hwmod */
+static struct omap_hwmod omap3xxx_timer9_hwmod = {
+ .name = "timer9",
+ .mpu_irqs = omap3xxx_timer9_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer9_mpu_irqs),
+ .main_clk = "gpt9_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_GPT9_SHIFT,
+ .module_offs = OMAP3430_PER_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_ST_GPT9_SHIFT,
+ },
+ },
+ .slaves = omap3xxx_timer9_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_timer9_slaves),
+ .class = &omap3xxx_timer_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+};
+
+/* timer10 */
+static struct omap_hwmod omap3xxx_timer10_hwmod;
+static struct omap_hwmod_irq_info omap3xxx_timer10_mpu_irqs[] = {
+ { .irq = 46, },
+};
+
+static struct omap_hwmod_addr_space omap3xxx_timer10_addrs[] = {
+ {
+ .pa_start = 0x48086000,
+ .pa_end = 0x48086000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> timer10 */
+static struct omap_hwmod_ocp_if omap3xxx_l4_core__timer10 = {
+ .master = &omap3xxx_l4_core_hwmod,
+ .slave = &omap3xxx_timer10_hwmod,
+ .clk = "gpt10_ick",
+ .addr = omap3xxx_timer10_addrs,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_timer10_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer10 slave port */
+static struct omap_hwmod_ocp_if *omap3xxx_timer10_slaves[] = {
+ &omap3xxx_l4_core__timer10,
+};
+
+/* timer10 hwmod */
+static struct omap_hwmod omap3xxx_timer10_hwmod = {
+ .name = "timer10",
+ .mpu_irqs = omap3xxx_timer10_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer10_mpu_irqs),
+ .main_clk = "gpt10_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_GPT10_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_ST_GPT10_SHIFT,
+ },
+ },
+ .slaves = omap3xxx_timer10_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_timer10_slaves),
+ .class = &omap3xxx_timer_1ms_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+};
+
+/* timer11 */
+static struct omap_hwmod omap3xxx_timer11_hwmod;
+static struct omap_hwmod_irq_info omap3xxx_timer11_mpu_irqs[] = {
+ { .irq = 47, },
+};
+
+static struct omap_hwmod_addr_space omap3xxx_timer11_addrs[] = {
+ {
+ .pa_start = 0x48088000,
+ .pa_end = 0x48088000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> timer11 */
+static struct omap_hwmod_ocp_if omap3xxx_l4_core__timer11 = {
+ .master = &omap3xxx_l4_core_hwmod,
+ .slave = &omap3xxx_timer11_hwmod,
+ .clk = "gpt11_ick",
+ .addr = omap3xxx_timer11_addrs,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_timer11_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer11 slave port */
+static struct omap_hwmod_ocp_if *omap3xxx_timer11_slaves[] = {
+ &omap3xxx_l4_core__timer11,
+};
+
+/* timer11 hwmod */
+static struct omap_hwmod omap3xxx_timer11_hwmod = {
+ .name = "timer11",
+ .mpu_irqs = omap3xxx_timer11_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer11_mpu_irqs),
+ .main_clk = "gpt11_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_GPT11_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_ST_GPT11_SHIFT,
+ },
+ },
+ .slaves = omap3xxx_timer11_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_timer11_slaves),
+ .class = &omap3xxx_timer_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+};
+
+/* timer12*/
+static struct omap_hwmod omap3xxx_timer12_hwmod;
+static struct omap_hwmod_irq_info omap3xxx_timer12_mpu_irqs[] = {
+ { .irq = 95, },
+};
+
+static struct omap_hwmod_addr_space omap3xxx_timer12_addrs[] = {
+ {
+ .pa_start = 0x48304000,
+ .pa_end = 0x48304000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> timer12 */
+static struct omap_hwmod_ocp_if omap3xxx_l4_core__timer12 = {
+ .master = &omap3xxx_l4_core_hwmod,
+ .slave = &omap3xxx_timer12_hwmod,
+ .clk = "gpt12_ick",
+ .addr = omap3xxx_timer12_addrs,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_timer12_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer12 slave port */
+static struct omap_hwmod_ocp_if *omap3xxx_timer12_slaves[] = {
+ &omap3xxx_l4_core__timer12,
+};
+
+/* timer12 hwmod */
+static struct omap_hwmod omap3xxx_timer12_hwmod = {
+ .name = "timer12",
+ .mpu_irqs = omap3xxx_timer12_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer12_mpu_irqs),
+ .main_clk = "gpt12_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_GPT12_SHIFT,
+ .module_offs = WKUP_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_ST_GPT12_SHIFT,
+ },
+ },
+ .slaves = omap3xxx_timer12_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_timer12_slaves),
+ .class = &omap3xxx_timer_hwmod_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+};
+
/* l4_wkup -> wd_timer2 */
static struct omap_hwmod_addr_space omap3xxx_wd_timer2_addrs[] = {
{
@@ -664,6 +1467,413 @@ static struct omap_hwmod_class i2c_class = {
.sysc = &i2c_sysc,
};
+/*
+ * 'dss' class
+ * display sub-system
+ */
+
+static struct omap_hwmod_class_sysconfig omap3xxx_dss_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap3xxx_dss_hwmod_class = {
+ .name = "dss",
+ .sysc = &omap3xxx_dss_sysc,
+};
+
+static struct omap_hwmod_dma_info omap3xxx_dss_sdma_chs[] = {
+ { .name = "dispc", .dma_req = 5 },
+ { .name = "dsi1", .dma_req = 74 },
+};
+
+/* dss */
+/* dss master ports */
+static struct omap_hwmod_ocp_if *omap3xxx_dss_masters[] = {
+ &omap3xxx_dss__l3,
+};
+
+static struct omap_hwmod_addr_space omap3xxx_dss_addrs[] = {
+ {
+ .pa_start = 0x48050000,
+ .pa_end = 0x480503FF,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> dss */
+static struct omap_hwmod_ocp_if omap3430es1_l4_core__dss = {
+ .master = &omap3xxx_l4_core_hwmod,
+ .slave = &omap3430es1_dss_core_hwmod,
+ .clk = "dss_ick",
+ .addr = omap3xxx_dss_addrs,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_dss_addrs),
+ .fw = {
+ .omap2 = {
+ .l4_fw_region = OMAP3ES1_L4_CORE_FW_DSS_CORE_REGION,
+ .l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP,
+ .flags = OMAP_FIREWALL_L4,
+ }
+ },
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss = {
+ .master = &omap3xxx_l4_core_hwmod,
+ .slave = &omap3xxx_dss_core_hwmod,
+ .clk = "dss_ick",
+ .addr = omap3xxx_dss_addrs,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_dss_addrs),
+ .fw = {
+ .omap2 = {
+ .l4_fw_region = OMAP3_L4_CORE_FW_DSS_CORE_REGION,
+ .l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP,
+ .flags = OMAP_FIREWALL_L4,
+ }
+ },
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* dss slave ports */
+static struct omap_hwmod_ocp_if *omap3430es1_dss_slaves[] = {
+ &omap3430es1_l4_core__dss,
+};
+
+static struct omap_hwmod_ocp_if *omap3xxx_dss_slaves[] = {
+ &omap3xxx_l4_core__dss,
+};
+
+static struct omap_hwmod_opt_clk dss_opt_clks[] = {
+ { .role = "tv_clk", .clk = "dss_tv_fck" },
+ { .role = "video_clk", .clk = "dss_96m_fck" },
+ { .role = "sys_clk", .clk = "dss2_alwon_fck" },
+};
+
+static struct omap_hwmod omap3430es1_dss_core_hwmod = {
+ .name = "dss_core",
+ .class = &omap3xxx_dss_hwmod_class,
+ .main_clk = "dss1_alwon_fck", /* instead of dss_fck */
+ .sdma_reqs = omap3xxx_dss_sdma_chs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap3xxx_dss_sdma_chs),
+
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_DSS1_SHIFT,
+ .module_offs = OMAP3430_DSS_MOD,
+ .idlest_reg_id = 1,
+ .idlest_stdby_bit = OMAP3430ES1_ST_DSS_SHIFT,
+ },
+ },
+ .opt_clks = dss_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(dss_opt_clks),
+ .slaves = omap3430es1_dss_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3430es1_dss_slaves),
+ .masters = omap3xxx_dss_masters,
+ .masters_cnt = ARRAY_SIZE(omap3xxx_dss_masters),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES1),
+ .flags = HWMOD_NO_IDLEST,
+};
+
+static struct omap_hwmod omap3xxx_dss_core_hwmod = {
+ .name = "dss_core",
+ .class = &omap3xxx_dss_hwmod_class,
+ .main_clk = "dss1_alwon_fck", /* instead of dss_fck */
+ .sdma_reqs = omap3xxx_dss_sdma_chs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap3xxx_dss_sdma_chs),
+
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_DSS1_SHIFT,
+ .module_offs = OMAP3430_DSS_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430ES2_ST_DSS_IDLE_SHIFT,
+ .idlest_stdby_bit = OMAP3430ES2_ST_DSS_STDBY_SHIFT,
+ },
+ },
+ .opt_clks = dss_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(dss_opt_clks),
+ .slaves = omap3xxx_dss_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_slaves),
+ .masters = omap3xxx_dss_masters,
+ .masters_cnt = ARRAY_SIZE(omap3xxx_dss_masters),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_GE_OMAP3430ES2 |
+ CHIP_IS_OMAP3630ES1 | CHIP_GE_OMAP3630ES1_1),
+};
+
+/*
+ * 'dispc' class
+ * display controller
+ */
+
+static struct omap_hwmod_class_sysconfig omap3xxx_dispc_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_CLOCKACTIVITY |
+ SYSC_HAS_MIDLEMODE | SYSC_HAS_ENAWAKEUP |
+ SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap3xxx_dispc_hwmod_class = {
+ .name = "dispc",
+ .sysc = &omap3xxx_dispc_sysc,
+};
+
+static struct omap_hwmod_irq_info omap3xxx_dispc_irqs[] = {
+ { .irq = 25 },
+};
+
+static struct omap_hwmod_addr_space omap3xxx_dss_dispc_addrs[] = {
+ {
+ .pa_start = 0x48050400,
+ .pa_end = 0x480507FF,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> dss_dispc */
+static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dispc = {
+ .master = &omap3xxx_l4_core_hwmod,
+ .slave = &omap3xxx_dss_dispc_hwmod,
+ .clk = "dss_ick",
+ .addr = omap3xxx_dss_dispc_addrs,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_dss_dispc_addrs),
+ .fw = {
+ .omap2 = {
+ .l4_fw_region = OMAP3_L4_CORE_FW_DSS_DISPC_REGION,
+ .l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP,
+ .flags = OMAP_FIREWALL_L4,
+ }
+ },
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* dss_dispc slave ports */
+static struct omap_hwmod_ocp_if *omap3xxx_dss_dispc_slaves[] = {
+ &omap3xxx_l4_core__dss_dispc,
+};
+
+static struct omap_hwmod omap3xxx_dss_dispc_hwmod = {
+ .name = "dss_dispc",
+ .class = &omap3xxx_dispc_hwmod_class,
+ .mpu_irqs = omap3xxx_dispc_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_dispc_irqs),
+ .main_clk = "dss1_alwon_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_DSS1_SHIFT,
+ .module_offs = OMAP3430_DSS_MOD,
+ },
+ },
+ .slaves = omap3xxx_dss_dispc_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_dispc_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES1 |
+ CHIP_GE_OMAP3430ES2 | CHIP_IS_OMAP3630ES1 |
+ CHIP_GE_OMAP3630ES1_1),
+ .flags = HWMOD_NO_IDLEST,
+};
+
+/*
+ * 'dsi' class
+ * display serial interface controller
+ */
+
+static struct omap_hwmod_class omap3xxx_dsi_hwmod_class = {
+ .name = "dsi",
+};
+
+static struct omap_hwmod_irq_info omap3xxx_dsi1_irqs[] = {
+ { .irq = 25 },
+};
+
+/* dss_dsi1 */
+static struct omap_hwmod_addr_space omap3xxx_dss_dsi1_addrs[] = {
+ {
+ .pa_start = 0x4804FC00,
+ .pa_end = 0x4804FFFF,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> dss_dsi1 */
+static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dsi1 = {
+ .master = &omap3xxx_l4_core_hwmod,
+ .slave = &omap3xxx_dss_dsi1_hwmod,
+ .addr = omap3xxx_dss_dsi1_addrs,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_dss_dsi1_addrs),
+ .fw = {
+ .omap2 = {
+ .l4_fw_region = OMAP3_L4_CORE_FW_DSS_DSI_REGION,
+ .l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP,
+ .flags = OMAP_FIREWALL_L4,
+ }
+ },
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* dss_dsi1 slave ports */
+static struct omap_hwmod_ocp_if *omap3xxx_dss_dsi1_slaves[] = {
+ &omap3xxx_l4_core__dss_dsi1,
+};
+
+static struct omap_hwmod omap3xxx_dss_dsi1_hwmod = {
+ .name = "dss_dsi1",
+ .class = &omap3xxx_dsi_hwmod_class,
+ .mpu_irqs = omap3xxx_dsi1_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_dsi1_irqs),
+ .main_clk = "dss1_alwon_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_DSS1_SHIFT,
+ .module_offs = OMAP3430_DSS_MOD,
+ },
+ },
+ .slaves = omap3xxx_dss_dsi1_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_dsi1_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES1 |
+ CHIP_GE_OMAP3430ES2 | CHIP_IS_OMAP3630ES1 |
+ CHIP_GE_OMAP3630ES1_1),
+ .flags = HWMOD_NO_IDLEST,
+};
+
+/*
+ * 'rfbi' class
+ * remote frame buffer interface
+ */
+
+static struct omap_hwmod_class_sysconfig omap3xxx_rfbi_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_AUTOIDLE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap3xxx_rfbi_hwmod_class = {
+ .name = "rfbi",
+ .sysc = &omap3xxx_rfbi_sysc,
+};
+
+static struct omap_hwmod_addr_space omap3xxx_dss_rfbi_addrs[] = {
+ {
+ .pa_start = 0x48050800,
+ .pa_end = 0x48050BFF,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> dss_rfbi */
+static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_rfbi = {
+ .master = &omap3xxx_l4_core_hwmod,
+ .slave = &omap3xxx_dss_rfbi_hwmod,
+ .clk = "dss_ick",
+ .addr = omap3xxx_dss_rfbi_addrs,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_dss_rfbi_addrs),
+ .fw = {
+ .omap2 = {
+ .l4_fw_region = OMAP3_L4_CORE_FW_DSS_RFBI_REGION,
+ .l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP ,
+ .flags = OMAP_FIREWALL_L4,
+ }
+ },
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* dss_rfbi slave ports */
+static struct omap_hwmod_ocp_if *omap3xxx_dss_rfbi_slaves[] = {
+ &omap3xxx_l4_core__dss_rfbi,
+};
+
+static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = {
+ .name = "dss_rfbi",
+ .class = &omap3xxx_rfbi_hwmod_class,
+ .main_clk = "dss1_alwon_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_DSS1_SHIFT,
+ .module_offs = OMAP3430_DSS_MOD,
+ },
+ },
+ .slaves = omap3xxx_dss_rfbi_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_rfbi_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES1 |
+ CHIP_GE_OMAP3430ES2 | CHIP_IS_OMAP3630ES1 |
+ CHIP_GE_OMAP3630ES1_1),
+ .flags = HWMOD_NO_IDLEST,
+};
+
+/*
+ * 'venc' class
+ * video encoder
+ */
+
+static struct omap_hwmod_class omap3xxx_venc_hwmod_class = {
+ .name = "venc",
+};
+
+/* dss_venc */
+static struct omap_hwmod_addr_space omap3xxx_dss_venc_addrs[] = {
+ {
+ .pa_start = 0x48050C00,
+ .pa_end = 0x48050FFF,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> dss_venc */
+static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_venc = {
+ .master = &omap3xxx_l4_core_hwmod,
+ .slave = &omap3xxx_dss_venc_hwmod,
+ .clk = "dss_tv_fck",
+ .addr = omap3xxx_dss_venc_addrs,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_dss_venc_addrs),
+ .fw = {
+ .omap2 = {
+ .l4_fw_region = OMAP3_L4_CORE_FW_DSS_VENC_REGION,
+ .l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP,
+ .flags = OMAP_FIREWALL_L4,
+ }
+ },
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* dss_venc slave ports */
+static struct omap_hwmod_ocp_if *omap3xxx_dss_venc_slaves[] = {
+ &omap3xxx_l4_core__dss_venc,
+};
+
+static struct omap_hwmod omap3xxx_dss_venc_hwmod = {
+ .name = "dss_venc",
+ .class = &omap3xxx_venc_hwmod_class,
+ .main_clk = "dss1_alwon_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_DSS1_SHIFT,
+ .module_offs = OMAP3430_DSS_MOD,
+ },
+ },
+ .slaves = omap3xxx_dss_venc_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_venc_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES1 |
+ CHIP_GE_OMAP3430ES2 | CHIP_IS_OMAP3630ES1 |
+ CHIP_GE_OMAP3630ES1_1),
+ .flags = HWMOD_NO_IDLEST,
+};
+
/* I2C1 */
static struct omap_i2c_dev_attr i2c1_dev_attr = {
@@ -1227,6 +2437,437 @@ static struct omap_hwmod omap3xxx_dma_system_hwmod = {
.flags = HWMOD_NO_IDLEST,
};
+/*
+ * 'mcbsp' class
+ * multi channel buffered serial port controller
+ */
+
+static struct omap_hwmod_class_sysconfig omap3xxx_mcbsp_sysc = {
+ .sysc_offs = 0x008c,
+ .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_ENAWAKEUP |
+ SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+ .clockact = 0x2,
+};
+
+static struct omap_hwmod_class omap3xxx_mcbsp_hwmod_class = {
+ .name = "mcbsp",
+ .sysc = &omap3xxx_mcbsp_sysc,
+ .rev = MCBSP_CONFIG_TYPE3,
+};
+
+/* mcbsp1 */
+static struct omap_hwmod_irq_info omap3xxx_mcbsp1_irqs[] = {
+ { .name = "irq", .irq = 16 },
+ { .name = "tx", .irq = 59 },
+ { .name = "rx", .irq = 60 },
+};
+
+static struct omap_hwmod_dma_info omap3xxx_mcbsp1_sdma_chs[] = {
+ { .name = "rx", .dma_req = 32 },
+ { .name = "tx", .dma_req = 31 },
+};
+
+static struct omap_hwmod_addr_space omap3xxx_mcbsp1_addrs[] = {
+ {
+ .name = "mpu",
+ .pa_start = 0x48074000,
+ .pa_end = 0x480740ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> mcbsp1 */
+static struct omap_hwmod_ocp_if omap3xxx_l4_core__mcbsp1 = {
+ .master = &omap3xxx_l4_core_hwmod,
+ .slave = &omap3xxx_mcbsp1_hwmod,
+ .clk = "mcbsp1_ick",
+ .addr = omap3xxx_mcbsp1_addrs,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_mcbsp1_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* mcbsp1 slave ports */
+static struct omap_hwmod_ocp_if *omap3xxx_mcbsp1_slaves[] = {
+ &omap3xxx_l4_core__mcbsp1,
+};
+
+static struct omap_hwmod omap3xxx_mcbsp1_hwmod = {
+ .name = "mcbsp1",
+ .class = &omap3xxx_mcbsp_hwmod_class,
+ .mpu_irqs = omap3xxx_mcbsp1_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp1_irqs),
+ .sdma_reqs = omap3xxx_mcbsp1_sdma_chs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp1_sdma_chs),
+ .main_clk = "mcbsp1_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_MCBSP1_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_ST_MCBSP1_SHIFT,
+ },
+ },
+ .slaves = omap3xxx_mcbsp1_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_mcbsp1_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+/* mcbsp2 */
+static struct omap_hwmod_irq_info omap3xxx_mcbsp2_irqs[] = {
+ { .name = "irq", .irq = 17 },
+ { .name = "tx", .irq = 62 },
+ { .name = "rx", .irq = 63 },
+};
+
+static struct omap_hwmod_dma_info omap3xxx_mcbsp2_sdma_chs[] = {
+ { .name = "rx", .dma_req = 34 },
+ { .name = "tx", .dma_req = 33 },
+};
+
+static struct omap_hwmod_addr_space omap3xxx_mcbsp2_addrs[] = {
+ {
+ .name = "mpu",
+ .pa_start = 0x49022000,
+ .pa_end = 0x490220ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> mcbsp2 */
+static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp2 = {
+ .master = &omap3xxx_l4_per_hwmod,
+ .slave = &omap3xxx_mcbsp2_hwmod,
+ .clk = "mcbsp2_ick",
+ .addr = omap3xxx_mcbsp2_addrs,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_mcbsp2_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* mcbsp2 slave ports */
+static struct omap_hwmod_ocp_if *omap3xxx_mcbsp2_slaves[] = {
+ &omap3xxx_l4_per__mcbsp2,
+};
+
+static struct omap_mcbsp_dev_attr omap34xx_mcbsp2_dev_attr = {
+ .sidetone = "mcbsp2_sidetone",
+};
+
+static struct omap_hwmod omap3xxx_mcbsp2_hwmod = {
+ .name = "mcbsp2",
+ .class = &omap3xxx_mcbsp_hwmod_class,
+ .mpu_irqs = omap3xxx_mcbsp2_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp2_irqs),
+ .sdma_reqs = omap3xxx_mcbsp2_sdma_chs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp2_sdma_chs),
+ .main_clk = "mcbsp2_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_MCBSP2_SHIFT,
+ .module_offs = OMAP3430_PER_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_ST_MCBSP2_SHIFT,
+ },
+ },
+ .slaves = omap3xxx_mcbsp2_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_mcbsp2_slaves),
+ .dev_attr = &omap34xx_mcbsp2_dev_attr,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+/* mcbsp3 */
+static struct omap_hwmod_irq_info omap3xxx_mcbsp3_irqs[] = {
+ { .name = "irq", .irq = 22 },
+ { .name = "tx", .irq = 89 },
+ { .name = "rx", .irq = 90 },
+};
+
+static struct omap_hwmod_dma_info omap3xxx_mcbsp3_sdma_chs[] = {
+ { .name = "rx", .dma_req = 18 },
+ { .name = "tx", .dma_req = 17 },
+};
+
+static struct omap_hwmod_addr_space omap3xxx_mcbsp3_addrs[] = {
+ {
+ .name = "mpu",
+ .pa_start = 0x49024000,
+ .pa_end = 0x490240ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> mcbsp3 */
+static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp3 = {
+ .master = &omap3xxx_l4_per_hwmod,
+ .slave = &omap3xxx_mcbsp3_hwmod,
+ .clk = "mcbsp3_ick",
+ .addr = omap3xxx_mcbsp3_addrs,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_mcbsp3_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* mcbsp3 slave ports */
+static struct omap_hwmod_ocp_if *omap3xxx_mcbsp3_slaves[] = {
+ &omap3xxx_l4_per__mcbsp3,
+};
+
+static struct omap_mcbsp_dev_attr omap34xx_mcbsp3_dev_attr = {
+ .sidetone = "mcbsp3_sidetone",
+};
+
+static struct omap_hwmod omap3xxx_mcbsp3_hwmod = {
+ .name = "mcbsp3",
+ .class = &omap3xxx_mcbsp_hwmod_class,
+ .mpu_irqs = omap3xxx_mcbsp3_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp3_irqs),
+ .sdma_reqs = omap3xxx_mcbsp3_sdma_chs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp3_sdma_chs),
+ .main_clk = "mcbsp3_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_MCBSP3_SHIFT,
+ .module_offs = OMAP3430_PER_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_ST_MCBSP3_SHIFT,
+ },
+ },
+ .slaves = omap3xxx_mcbsp3_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_mcbsp3_slaves),
+ .dev_attr = &omap34xx_mcbsp3_dev_attr,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+/* mcbsp4 */
+static struct omap_hwmod_irq_info omap3xxx_mcbsp4_irqs[] = {
+ { .name = "irq", .irq = 23 },
+ { .name = "tx", .irq = 54 },
+ { .name = "rx", .irq = 55 },
+};
+
+static struct omap_hwmod_dma_info omap3xxx_mcbsp4_sdma_chs[] = {
+ { .name = "rx", .dma_req = 20 },
+ { .name = "tx", .dma_req = 19 },
+};
+
+static struct omap_hwmod_addr_space omap3xxx_mcbsp4_addrs[] = {
+ {
+ .name = "mpu",
+ .pa_start = 0x49026000,
+ .pa_end = 0x490260ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> mcbsp4 */
+static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp4 = {
+ .master = &omap3xxx_l4_per_hwmod,
+ .slave = &omap3xxx_mcbsp4_hwmod,
+ .clk = "mcbsp4_ick",
+ .addr = omap3xxx_mcbsp4_addrs,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_mcbsp4_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* mcbsp4 slave ports */
+static struct omap_hwmod_ocp_if *omap3xxx_mcbsp4_slaves[] = {
+ &omap3xxx_l4_per__mcbsp4,
+};
+
+static struct omap_hwmod omap3xxx_mcbsp4_hwmod = {
+ .name = "mcbsp4",
+ .class = &omap3xxx_mcbsp_hwmod_class,
+ .mpu_irqs = omap3xxx_mcbsp4_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp4_irqs),
+ .sdma_reqs = omap3xxx_mcbsp4_sdma_chs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp4_sdma_chs),
+ .main_clk = "mcbsp4_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_MCBSP4_SHIFT,
+ .module_offs = OMAP3430_PER_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_ST_MCBSP4_SHIFT,
+ },
+ },
+ .slaves = omap3xxx_mcbsp4_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_mcbsp4_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+/* mcbsp5 */
+static struct omap_hwmod_irq_info omap3xxx_mcbsp5_irqs[] = {
+ { .name = "irq", .irq = 27 },
+ { .name = "tx", .irq = 81 },
+ { .name = "rx", .irq = 82 },
+};
+
+static struct omap_hwmod_dma_info omap3xxx_mcbsp5_sdma_chs[] = {
+ { .name = "rx", .dma_req = 22 },
+ { .name = "tx", .dma_req = 21 },
+};
+
+static struct omap_hwmod_addr_space omap3xxx_mcbsp5_addrs[] = {
+ {
+ .name = "mpu",
+ .pa_start = 0x48096000,
+ .pa_end = 0x480960ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_core -> mcbsp5 */
+static struct omap_hwmod_ocp_if omap3xxx_l4_core__mcbsp5 = {
+ .master = &omap3xxx_l4_core_hwmod,
+ .slave = &omap3xxx_mcbsp5_hwmod,
+ .clk = "mcbsp5_ick",
+ .addr = omap3xxx_mcbsp5_addrs,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_mcbsp5_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* mcbsp5 slave ports */
+static struct omap_hwmod_ocp_if *omap3xxx_mcbsp5_slaves[] = {
+ &omap3xxx_l4_core__mcbsp5,
+};
+
+static struct omap_hwmod omap3xxx_mcbsp5_hwmod = {
+ .name = "mcbsp5",
+ .class = &omap3xxx_mcbsp_hwmod_class,
+ .mpu_irqs = omap3xxx_mcbsp5_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp5_irqs),
+ .sdma_reqs = omap3xxx_mcbsp5_sdma_chs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp5_sdma_chs),
+ .main_clk = "mcbsp5_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_MCBSP5_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_ST_MCBSP5_SHIFT,
+ },
+ },
+ .slaves = omap3xxx_mcbsp5_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_mcbsp5_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+/* 'mcbsp sidetone' class */
+
+static struct omap_hwmod_class_sysconfig omap3xxx_mcbsp_sidetone_sysc = {
+ .sysc_offs = 0x0010,
+ .sysc_flags = SYSC_HAS_AUTOIDLE,
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap3xxx_mcbsp_sidetone_hwmod_class = {
+ .name = "mcbsp_sidetone",
+ .sysc = &omap3xxx_mcbsp_sidetone_sysc,
+};
+
+/* mcbsp2_sidetone */
+static struct omap_hwmod_irq_info omap3xxx_mcbsp2_sidetone_irqs[] = {
+ { .name = "irq", .irq = 4 },
+};
+
+static struct omap_hwmod_addr_space omap3xxx_mcbsp2_sidetone_addrs[] = {
+ {
+ .name = "sidetone",
+ .pa_start = 0x49028000,
+ .pa_end = 0x490280ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> mcbsp2_sidetone */
+static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp2_sidetone = {
+ .master = &omap3xxx_l4_per_hwmod,
+ .slave = &omap3xxx_mcbsp2_sidetone_hwmod,
+ .clk = "mcbsp2_ick",
+ .addr = omap3xxx_mcbsp2_sidetone_addrs,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_mcbsp2_sidetone_addrs),
+ .user = OCP_USER_MPU,
+};
+
+/* mcbsp2_sidetone slave ports */
+static struct omap_hwmod_ocp_if *omap3xxx_mcbsp2_sidetone_slaves[] = {
+ &omap3xxx_l4_per__mcbsp2_sidetone,
+};
+
+static struct omap_hwmod omap3xxx_mcbsp2_sidetone_hwmod = {
+ .name = "mcbsp2_sidetone",
+ .class = &omap3xxx_mcbsp_sidetone_hwmod_class,
+ .mpu_irqs = omap3xxx_mcbsp2_sidetone_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp2_sidetone_irqs),
+ .main_clk = "mcbsp2_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_MCBSP2_SHIFT,
+ .module_offs = OMAP3430_PER_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_ST_MCBSP2_SHIFT,
+ },
+ },
+ .slaves = omap3xxx_mcbsp2_sidetone_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_mcbsp2_sidetone_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+/* mcbsp3_sidetone */
+static struct omap_hwmod_irq_info omap3xxx_mcbsp3_sidetone_irqs[] = {
+ { .name = "irq", .irq = 5 },
+};
+
+static struct omap_hwmod_addr_space omap3xxx_mcbsp3_sidetone_addrs[] = {
+ {
+ .name = "sidetone",
+ .pa_start = 0x4902A000,
+ .pa_end = 0x4902A0ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> mcbsp3_sidetone */
+static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp3_sidetone = {
+ .master = &omap3xxx_l4_per_hwmod,
+ .slave = &omap3xxx_mcbsp3_sidetone_hwmod,
+ .clk = "mcbsp3_ick",
+ .addr = omap3xxx_mcbsp3_sidetone_addrs,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_mcbsp3_sidetone_addrs),
+ .user = OCP_USER_MPU,
+};
+
+/* mcbsp3_sidetone slave ports */
+static struct omap_hwmod_ocp_if *omap3xxx_mcbsp3_sidetone_slaves[] = {
+ &omap3xxx_l4_per__mcbsp3_sidetone,
+};
+
+static struct omap_hwmod omap3xxx_mcbsp3_sidetone_hwmod = {
+ .name = "mcbsp3_sidetone",
+ .class = &omap3xxx_mcbsp_sidetone_hwmod_class,
+ .mpu_irqs = omap3xxx_mcbsp3_sidetone_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp3_sidetone_irqs),
+ .main_clk = "mcbsp3_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_MCBSP3_SHIFT,
+ .module_offs = OMAP3430_PER_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_ST_MCBSP3_SHIFT,
+ },
+ },
+ .slaves = omap3xxx_mcbsp3_sidetone_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_mcbsp3_sidetone_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+
/* SR common */
static struct omap_hwmod_sysc_fields omap34xx_sr_sysc_fields = {
.clkact_shift = 20,
@@ -1356,18 +2997,617 @@ static struct omap_hwmod omap36xx_sr2_hwmod = {
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3630ES1),
};
+/*
+ * 'mailbox' class
+ * mailbox module allowing communication between the on-chip processors
+ * using a queued mailbox-interrupt mechanism.
+ */
+
+static struct omap_hwmod_class_sysconfig omap3xxx_mailbox_sysc = {
+ .rev_offs = 0x000,
+ .sysc_offs = 0x010,
+ .syss_offs = 0x014,
+ .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap3xxx_mailbox_hwmod_class = {
+ .name = "mailbox",
+ .sysc = &omap3xxx_mailbox_sysc,
+};
+
+static struct omap_hwmod omap3xxx_mailbox_hwmod;
+static struct omap_hwmod_irq_info omap3xxx_mailbox_irqs[] = {
+ { .irq = 26 },
+};
+
+static struct omap_hwmod_addr_space omap3xxx_mailbox_addrs[] = {
+ {
+ .pa_start = 0x48094000,
+ .pa_end = 0x480941ff,
+ .flags = ADDR_TYPE_RT,
+ },
+};
+
+/* l4_core -> mailbox */
+static struct omap_hwmod_ocp_if omap3xxx_l4_core__mailbox = {
+ .master = &omap3xxx_l4_core_hwmod,
+ .slave = &omap3xxx_mailbox_hwmod,
+ .addr = omap3xxx_mailbox_addrs,
+ .addr_cnt = ARRAY_SIZE(omap3xxx_mailbox_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* mailbox slave ports */
+static struct omap_hwmod_ocp_if *omap3xxx_mailbox_slaves[] = {
+ &omap3xxx_l4_core__mailbox,
+};
+
+static struct omap_hwmod omap3xxx_mailbox_hwmod = {
+ .name = "mailbox",
+ .class = &omap3xxx_mailbox_hwmod_class,
+ .mpu_irqs = omap3xxx_mailbox_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_mailbox_irqs),
+ .main_clk = "mailboxes_ick",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_MAILBOXES_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_ST_MAILBOXES_SHIFT,
+ },
+ },
+ .slaves = omap3xxx_mailbox_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_mailbox_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+/* l4 core -> mcspi1 interface */
+static struct omap_hwmod_addr_space omap34xx_mcspi1_addr_space[] = {
+ {
+ .pa_start = 0x48098000,
+ .pa_end = 0x480980ff,
+ .flags = ADDR_TYPE_RT,
+ },
+};
+
+static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi1 = {
+ .master = &omap3xxx_l4_core_hwmod,
+ .slave = &omap34xx_mcspi1,
+ .clk = "mcspi1_ick",
+ .addr = omap34xx_mcspi1_addr_space,
+ .addr_cnt = ARRAY_SIZE(omap34xx_mcspi1_addr_space),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4 core -> mcspi2 interface */
+static struct omap_hwmod_addr_space omap34xx_mcspi2_addr_space[] = {
+ {
+ .pa_start = 0x4809a000,
+ .pa_end = 0x4809a0ff,
+ .flags = ADDR_TYPE_RT,
+ },
+};
+
+static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi2 = {
+ .master = &omap3xxx_l4_core_hwmod,
+ .slave = &omap34xx_mcspi2,
+ .clk = "mcspi2_ick",
+ .addr = omap34xx_mcspi2_addr_space,
+ .addr_cnt = ARRAY_SIZE(omap34xx_mcspi2_addr_space),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4 core -> mcspi3 interface */
+static struct omap_hwmod_addr_space omap34xx_mcspi3_addr_space[] = {
+ {
+ .pa_start = 0x480b8000,
+ .pa_end = 0x480b80ff,
+ .flags = ADDR_TYPE_RT,
+ },
+};
+
+static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi3 = {
+ .master = &omap3xxx_l4_core_hwmod,
+ .slave = &omap34xx_mcspi3,
+ .clk = "mcspi3_ick",
+ .addr = omap34xx_mcspi3_addr_space,
+ .addr_cnt = ARRAY_SIZE(omap34xx_mcspi3_addr_space),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4 core -> mcspi4 interface */
+static struct omap_hwmod_addr_space omap34xx_mcspi4_addr_space[] = {
+ {
+ .pa_start = 0x480ba000,
+ .pa_end = 0x480ba0ff,
+ .flags = ADDR_TYPE_RT,
+ },
+};
+
+static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi4 = {
+ .master = &omap3xxx_l4_core_hwmod,
+ .slave = &omap34xx_mcspi4,
+ .clk = "mcspi4_ick",
+ .addr = omap34xx_mcspi4_addr_space,
+ .addr_cnt = ARRAY_SIZE(omap34xx_mcspi4_addr_space),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/*
+ * 'mcspi' class
+ * multichannel serial port interface (mcspi) / master/slave synchronous serial
+ * bus
+ */
+
+static struct omap_hwmod_class_sysconfig omap34xx_mcspi_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap34xx_mcspi_class = {
+ .name = "mcspi",
+ .sysc = &omap34xx_mcspi_sysc,
+ .rev = OMAP3_MCSPI_REV,
+};
+
+/* mcspi1 */
+static struct omap_hwmod_irq_info omap34xx_mcspi1_mpu_irqs[] = {
+ { .name = "irq", .irq = 65 },
+};
+
+static struct omap_hwmod_dma_info omap34xx_mcspi1_sdma_reqs[] = {
+ { .name = "tx0", .dma_req = 35 },
+ { .name = "rx0", .dma_req = 36 },
+ { .name = "tx1", .dma_req = 37 },
+ { .name = "rx1", .dma_req = 38 },
+ { .name = "tx2", .dma_req = 39 },
+ { .name = "rx2", .dma_req = 40 },
+ { .name = "tx3", .dma_req = 41 },
+ { .name = "rx3", .dma_req = 42 },
+};
+
+static struct omap_hwmod_ocp_if *omap34xx_mcspi1_slaves[] = {
+ &omap34xx_l4_core__mcspi1,
+};
+
+static struct omap2_mcspi_dev_attr omap_mcspi1_dev_attr = {
+ .num_chipselect = 4,
+};
+
+static struct omap_hwmod omap34xx_mcspi1 = {
+ .name = "mcspi1",
+ .mpu_irqs = omap34xx_mcspi1_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap34xx_mcspi1_mpu_irqs),
+ .sdma_reqs = omap34xx_mcspi1_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap34xx_mcspi1_sdma_reqs),
+ .main_clk = "mcspi1_fck",
+ .prcm = {
+ .omap2 = {
+ .module_offs = CORE_MOD,
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_MCSPI1_SHIFT,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_ST_MCSPI1_SHIFT,
+ },
+ },
+ .slaves = omap34xx_mcspi1_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap34xx_mcspi1_slaves),
+ .class = &omap34xx_mcspi_class,
+ .dev_attr = &omap_mcspi1_dev_attr,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+/* mcspi2 */
+static struct omap_hwmod_irq_info omap34xx_mcspi2_mpu_irqs[] = {
+ { .name = "irq", .irq = 66 },
+};
+
+static struct omap_hwmod_dma_info omap34xx_mcspi2_sdma_reqs[] = {
+ { .name = "tx0", .dma_req = 43 },
+ { .name = "rx0", .dma_req = 44 },
+ { .name = "tx1", .dma_req = 45 },
+ { .name = "rx1", .dma_req = 46 },
+};
+
+static struct omap_hwmod_ocp_if *omap34xx_mcspi2_slaves[] = {
+ &omap34xx_l4_core__mcspi2,
+};
+
+static struct omap2_mcspi_dev_attr omap_mcspi2_dev_attr = {
+ .num_chipselect = 2,
+};
+
+static struct omap_hwmod omap34xx_mcspi2 = {
+ .name = "mcspi2",
+ .mpu_irqs = omap34xx_mcspi2_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap34xx_mcspi2_mpu_irqs),
+ .sdma_reqs = omap34xx_mcspi2_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap34xx_mcspi2_sdma_reqs),
+ .main_clk = "mcspi2_fck",
+ .prcm = {
+ .omap2 = {
+ .module_offs = CORE_MOD,
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_MCSPI2_SHIFT,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_ST_MCSPI2_SHIFT,
+ },
+ },
+ .slaves = omap34xx_mcspi2_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap34xx_mcspi2_slaves),
+ .class = &omap34xx_mcspi_class,
+ .dev_attr = &omap_mcspi2_dev_attr,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+/* mcspi3 */
+static struct omap_hwmod_irq_info omap34xx_mcspi3_mpu_irqs[] = {
+ { .name = "irq", .irq = 91 }, /* 91 */
+};
+
+static struct omap_hwmod_dma_info omap34xx_mcspi3_sdma_reqs[] = {
+ { .name = "tx0", .dma_req = 15 },
+ { .name = "rx0", .dma_req = 16 },
+ { .name = "tx1", .dma_req = 23 },
+ { .name = "rx1", .dma_req = 24 },
+};
+
+static struct omap_hwmod_ocp_if *omap34xx_mcspi3_slaves[] = {
+ &omap34xx_l4_core__mcspi3,
+};
+
+static struct omap2_mcspi_dev_attr omap_mcspi3_dev_attr = {
+ .num_chipselect = 2,
+};
+
+static struct omap_hwmod omap34xx_mcspi3 = {
+ .name = "mcspi3",
+ .mpu_irqs = omap34xx_mcspi3_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap34xx_mcspi3_mpu_irqs),
+ .sdma_reqs = omap34xx_mcspi3_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap34xx_mcspi3_sdma_reqs),
+ .main_clk = "mcspi3_fck",
+ .prcm = {
+ .omap2 = {
+ .module_offs = CORE_MOD,
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_MCSPI3_SHIFT,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_ST_MCSPI3_SHIFT,
+ },
+ },
+ .slaves = omap34xx_mcspi3_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap34xx_mcspi3_slaves),
+ .class = &omap34xx_mcspi_class,
+ .dev_attr = &omap_mcspi3_dev_attr,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+/* SPI4 */
+static struct omap_hwmod_irq_info omap34xx_mcspi4_mpu_irqs[] = {
+ { .name = "irq", .irq = INT_34XX_SPI4_IRQ }, /* 48 */
+};
+
+static struct omap_hwmod_dma_info omap34xx_mcspi4_sdma_reqs[] = {
+ { .name = "tx0", .dma_req = 70 }, /* DMA_SPI4_TX0 */
+ { .name = "rx0", .dma_req = 71 }, /* DMA_SPI4_RX0 */
+};
+
+static struct omap_hwmod_ocp_if *omap34xx_mcspi4_slaves[] = {
+ &omap34xx_l4_core__mcspi4,
+};
+
+static struct omap2_mcspi_dev_attr omap_mcspi4_dev_attr = {
+ .num_chipselect = 1,
+};
+
+static struct omap_hwmod omap34xx_mcspi4 = {
+ .name = "mcspi4",
+ .mpu_irqs = omap34xx_mcspi4_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap34xx_mcspi4_mpu_irqs),
+ .sdma_reqs = omap34xx_mcspi4_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap34xx_mcspi4_sdma_reqs),
+ .main_clk = "mcspi4_fck",
+ .prcm = {
+ .omap2 = {
+ .module_offs = CORE_MOD,
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_MCSPI4_SHIFT,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_ST_MCSPI4_SHIFT,
+ },
+ },
+ .slaves = omap34xx_mcspi4_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap34xx_mcspi4_slaves),
+ .class = &omap34xx_mcspi_class,
+ .dev_attr = &omap_mcspi4_dev_attr,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+/*
+ * usbhsotg
+ */
+static struct omap_hwmod_class_sysconfig omap3xxx_usbhsotg_sysc = {
+ .rev_offs = 0x0400,
+ .sysc_offs = 0x0404,
+ .syss_offs = 0x0408,
+ .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE|
+ SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_AUTOIDLE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class usbotg_class = {
+ .name = "usbotg",
+ .sysc = &omap3xxx_usbhsotg_sysc,
+};
+/* usb_otg_hs */
+static struct omap_hwmod_irq_info omap3xxx_usbhsotg_mpu_irqs[] = {
+
+ { .name = "mc", .irq = 92 },
+ { .name = "dma", .irq = 93 },
+};
+
+static struct omap_hwmod omap3xxx_usbhsotg_hwmod = {
+ .name = "usb_otg_hs",
+ .mpu_irqs = omap3xxx_usbhsotg_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_usbhsotg_mpu_irqs),
+ .main_clk = "hsotgusb_ick",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_HSOTGUSB_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT,
+ .idlest_stdby_bit = OMAP3430ES2_ST_HSOTGUSB_STDBY_SHIFT
+ },
+ },
+ .masters = omap3xxx_usbhsotg_masters,
+ .masters_cnt = ARRAY_SIZE(omap3xxx_usbhsotg_masters),
+ .slaves = omap3xxx_usbhsotg_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_usbhsotg_slaves),
+ .class = &usbotg_class,
+
+ /*
+ * Erratum ID: i479 idle_req / idle_ack mechanism potentially
+ * broken when autoidle is enabled
+ * workaround is to disable the autoidle bit at module level.
+ */
+ .flags = HWMOD_NO_OCP_AUTOIDLE | HWMOD_SWSUP_SIDLE
+ | HWMOD_SWSUP_MSTANDBY,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+};
+
+/* usb_otg_hs */
+static struct omap_hwmod_irq_info am35xx_usbhsotg_mpu_irqs[] = {
+
+ { .name = "mc", .irq = 71 },
+};
+
+static struct omap_hwmod_class am35xx_usbotg_class = {
+ .name = "am35xx_usbotg",
+ .sysc = NULL,
+};
+
+static struct omap_hwmod am35xx_usbhsotg_hwmod = {
+ .name = "am35x_otg_hs",
+ .mpu_irqs = am35xx_usbhsotg_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(am35xx_usbhsotg_mpu_irqs),
+ .main_clk = NULL,
+ .prcm = {
+ .omap2 = {
+ },
+ },
+ .masters = am35xx_usbhsotg_masters,
+ .masters_cnt = ARRAY_SIZE(am35xx_usbhsotg_masters),
+ .slaves = am35xx_usbhsotg_slaves,
+ .slaves_cnt = ARRAY_SIZE(am35xx_usbhsotg_slaves),
+ .class = &am35xx_usbotg_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES3_1)
+};
+
+/* MMC/SD/SDIO common */
+
+static struct omap_hwmod_class_sysconfig omap34xx_mmc_sysc = {
+ .rev_offs = 0x1fc,
+ .sysc_offs = 0x10,
+ .syss_offs = 0x14,
+ .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap34xx_mmc_class = {
+ .name = "mmc",
+ .sysc = &omap34xx_mmc_sysc,
+};
+
+/* MMC/SD/SDIO1 */
+
+static struct omap_hwmod_irq_info omap34xx_mmc1_mpu_irqs[] = {
+ { .irq = 83, },
+};
+
+static struct omap_hwmod_dma_info omap34xx_mmc1_sdma_reqs[] = {
+ { .name = "tx", .dma_req = 61, },
+ { .name = "rx", .dma_req = 62, },
+};
+
+static struct omap_hwmod_opt_clk omap34xx_mmc1_opt_clks[] = {
+ { .role = "dbck", .clk = "omap_32k_fck", },
+};
+
+static struct omap_hwmod_ocp_if *omap3xxx_mmc1_slaves[] = {
+ &omap3xxx_l4_core__mmc1,
+};
+
+static struct omap_mmc_dev_attr mmc1_dev_attr = {
+ .flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT,
+};
+
+static struct omap_hwmod omap3xxx_mmc1_hwmod = {
+ .name = "mmc1",
+ .mpu_irqs = omap34xx_mmc1_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap34xx_mmc1_mpu_irqs),
+ .sdma_reqs = omap34xx_mmc1_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap34xx_mmc1_sdma_reqs),
+ .opt_clks = omap34xx_mmc1_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(omap34xx_mmc1_opt_clks),
+ .main_clk = "mmchs1_fck",
+ .prcm = {
+ .omap2 = {
+ .module_offs = CORE_MOD,
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_MMC1_SHIFT,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_ST_MMC1_SHIFT,
+ },
+ },
+ .dev_attr = &mmc1_dev_attr,
+ .slaves = omap3xxx_mmc1_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_mmc1_slaves),
+ .class = &omap34xx_mmc_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+/* MMC/SD/SDIO2 */
+
+static struct omap_hwmod_irq_info omap34xx_mmc2_mpu_irqs[] = {
+ { .irq = INT_24XX_MMC2_IRQ, },
+};
+
+static struct omap_hwmod_dma_info omap34xx_mmc2_sdma_reqs[] = {
+ { .name = "tx", .dma_req = 47, },
+ { .name = "rx", .dma_req = 48, },
+};
+
+static struct omap_hwmod_opt_clk omap34xx_mmc2_opt_clks[] = {
+ { .role = "dbck", .clk = "omap_32k_fck", },
+};
+
+static struct omap_hwmod_ocp_if *omap3xxx_mmc2_slaves[] = {
+ &omap3xxx_l4_core__mmc2,
+};
+
+static struct omap_hwmod omap3xxx_mmc2_hwmod = {
+ .name = "mmc2",
+ .mpu_irqs = omap34xx_mmc2_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap34xx_mmc2_mpu_irqs),
+ .sdma_reqs = omap34xx_mmc2_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap34xx_mmc2_sdma_reqs),
+ .opt_clks = omap34xx_mmc2_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(omap34xx_mmc2_opt_clks),
+ .main_clk = "mmchs2_fck",
+ .prcm = {
+ .omap2 = {
+ .module_offs = CORE_MOD,
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_MMC2_SHIFT,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_ST_MMC2_SHIFT,
+ },
+ },
+ .slaves = omap3xxx_mmc2_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_mmc2_slaves),
+ .class = &omap34xx_mmc_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+/* MMC/SD/SDIO3 */
+
+static struct omap_hwmod_irq_info omap34xx_mmc3_mpu_irqs[] = {
+ { .irq = 94, },
+};
+
+static struct omap_hwmod_dma_info omap34xx_mmc3_sdma_reqs[] = {
+ { .name = "tx", .dma_req = 77, },
+ { .name = "rx", .dma_req = 78, },
+};
+
+static struct omap_hwmod_opt_clk omap34xx_mmc3_opt_clks[] = {
+ { .role = "dbck", .clk = "omap_32k_fck", },
+};
+
+static struct omap_hwmod_ocp_if *omap3xxx_mmc3_slaves[] = {
+ &omap3xxx_l4_core__mmc3,
+};
+
+static struct omap_hwmod omap3xxx_mmc3_hwmod = {
+ .name = "mmc3",
+ .mpu_irqs = omap34xx_mmc3_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap34xx_mmc3_mpu_irqs),
+ .sdma_reqs = omap34xx_mmc3_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap34xx_mmc3_sdma_reqs),
+ .opt_clks = omap34xx_mmc3_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(omap34xx_mmc3_opt_clks),
+ .main_clk = "mmchs3_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_MMC3_SHIFT,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_ST_MMC3_SHIFT,
+ },
+ },
+ .slaves = omap3xxx_mmc3_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_mmc3_slaves),
+ .class = &omap34xx_mmc_class,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
static __initdata struct omap_hwmod *omap3xxx_hwmods[] = {
&omap3xxx_l3_main_hwmod,
&omap3xxx_l4_core_hwmod,
&omap3xxx_l4_per_hwmod,
&omap3xxx_l4_wkup_hwmod,
+ &omap3xxx_mmc1_hwmod,
+ &omap3xxx_mmc2_hwmod,
+ &omap3xxx_mmc3_hwmod,
&omap3xxx_mpu_hwmod,
&omap3xxx_iva_hwmod,
+
+ &omap3xxx_timer1_hwmod,
+ &omap3xxx_timer2_hwmod,
+ &omap3xxx_timer3_hwmod,
+ &omap3xxx_timer4_hwmod,
+ &omap3xxx_timer5_hwmod,
+ &omap3xxx_timer6_hwmod,
+ &omap3xxx_timer7_hwmod,
+ &omap3xxx_timer8_hwmod,
+ &omap3xxx_timer9_hwmod,
+ &omap3xxx_timer10_hwmod,
+ &omap3xxx_timer11_hwmod,
+ &omap3xxx_timer12_hwmod,
+
&omap3xxx_wd_timer2_hwmod,
&omap3xxx_uart1_hwmod,
&omap3xxx_uart2_hwmod,
&omap3xxx_uart3_hwmod,
&omap3xxx_uart4_hwmod,
+ /* dss class */
+ &omap3430es1_dss_core_hwmod,
+ &omap3xxx_dss_core_hwmod,
+ &omap3xxx_dss_dispc_hwmod,
+ &omap3xxx_dss_dsi1_hwmod,
+ &omap3xxx_dss_rfbi_hwmod,
+ &omap3xxx_dss_venc_hwmod,
+
+ /* i2c class */
&omap3xxx_i2c1_hwmod,
&omap3xxx_i2c2_hwmod,
&omap3xxx_i2c3_hwmod,
@@ -1387,10 +3627,35 @@ static __initdata struct omap_hwmod *omap3xxx_hwmods[] = {
/* dma_system class*/
&omap3xxx_dma_system_hwmod,
+
+ /* mcbsp class */
+ &omap3xxx_mcbsp1_hwmod,
+ &omap3xxx_mcbsp2_hwmod,
+ &omap3xxx_mcbsp3_hwmod,
+ &omap3xxx_mcbsp4_hwmod,
+ &omap3xxx_mcbsp5_hwmod,
+ &omap3xxx_mcbsp2_sidetone_hwmod,
+ &omap3xxx_mcbsp3_sidetone_hwmod,
+
+ /* mailbox class */
+ &omap3xxx_mailbox_hwmod,
+
+ /* mcspi class */
+ &omap34xx_mcspi1,
+ &omap34xx_mcspi2,
+ &omap34xx_mcspi3,
+ &omap34xx_mcspi4,
+
+ /* usbotg class */
+ &omap3xxx_usbhsotg_hwmod,
+
+ /* usbotg for am35x */
+ &am35xx_usbhsotg_hwmod,
+
NULL,
};
int __init omap3xxx_hwmod_init(void)
{
- return omap_hwmod_init(omap3xxx_hwmods);
+ return omap_hwmod_register(omap3xxx_hwmods);
}
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index c2806bd11fbf..7dbcdf751c02 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -1,7 +1,7 @@
/*
* Hardware modules present on the OMAP44xx chips
*
- * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
* Copyright (C) 2009-2010 Nokia Corporation
*
* Paul Walmsley
@@ -24,6 +24,9 @@
#include <plat/cpu.h>
#include <plat/gpio.h>
#include <plat/dma.h>
+#include <plat/mcspi.h>
+#include <plat/mcbsp.h>
+#include <plat/mmc.h>
#include "omap_hwmod_common_data.h"
@@ -40,10 +43,15 @@
#define OMAP44XX_DMA_REQ_START 1
/* Backward references (IPs with Bus Master capability) */
+static struct omap_hwmod omap44xx_aess_hwmod;
static struct omap_hwmod omap44xx_dma_system_hwmod;
static struct omap_hwmod omap44xx_dmm_hwmod;
static struct omap_hwmod omap44xx_dsp_hwmod;
+static struct omap_hwmod omap44xx_dss_hwmod;
static struct omap_hwmod omap44xx_emif_fw_hwmod;
+static struct omap_hwmod omap44xx_hsi_hwmod;
+static struct omap_hwmod omap44xx_ipu_hwmod;
+static struct omap_hwmod omap44xx_iss_hwmod;
static struct omap_hwmod omap44xx_iva_hwmod;
static struct omap_hwmod omap44xx_l3_instr_hwmod;
static struct omap_hwmod omap44xx_l3_main_1_hwmod;
@@ -53,8 +61,11 @@ static struct omap_hwmod omap44xx_l4_abe_hwmod;
static struct omap_hwmod omap44xx_l4_cfg_hwmod;
static struct omap_hwmod omap44xx_l4_per_hwmod;
static struct omap_hwmod omap44xx_l4_wkup_hwmod;
+static struct omap_hwmod omap44xx_mmc1_hwmod;
+static struct omap_hwmod omap44xx_mmc2_hwmod;
static struct omap_hwmod omap44xx_mpu_hwmod;
static struct omap_hwmod omap44xx_mpu_private_hwmod;
+static struct omap_hwmod omap44xx_usb_otg_hs_hwmod;
/*
* Interconnects omap_hwmod structures
@@ -213,6 +224,14 @@ static struct omap_hwmod_ocp_if omap44xx_dsp__l3_main_1 = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
+/* dss -> l3_main_1 */
+static struct omap_hwmod_ocp_if omap44xx_dss__l3_main_1 = {
+ .master = &omap44xx_dss_hwmod,
+ .slave = &omap44xx_l3_main_1_hwmod,
+ .clk = "l3_div_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
/* l3_main_2 -> l3_main_1 */
static struct omap_hwmod_ocp_if omap44xx_l3_main_2__l3_main_1 = {
.master = &omap44xx_l3_main_2_hwmod,
@@ -229,6 +248,22 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_1 = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
+/* mmc1 -> l3_main_1 */
+static struct omap_hwmod_ocp_if omap44xx_mmc1__l3_main_1 = {
+ .master = &omap44xx_mmc1_hwmod,
+ .slave = &omap44xx_l3_main_1_hwmod,
+ .clk = "l3_div_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* mmc2 -> l3_main_1 */
+static struct omap_hwmod_ocp_if omap44xx_mmc2__l3_main_1 = {
+ .master = &omap44xx_mmc2_hwmod,
+ .slave = &omap44xx_l3_main_1_hwmod,
+ .clk = "l3_div_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
/* mpu -> l3_main_1 */
static struct omap_hwmod_ocp_if omap44xx_mpu__l3_main_1 = {
.master = &omap44xx_mpu_hwmod,
@@ -240,8 +275,11 @@ static struct omap_hwmod_ocp_if omap44xx_mpu__l3_main_1 = {
/* l3_main_1 slave ports */
static struct omap_hwmod_ocp_if *omap44xx_l3_main_1_slaves[] = {
&omap44xx_dsp__l3_main_1,
+ &omap44xx_dss__l3_main_1,
&omap44xx_l3_main_2__l3_main_1,
&omap44xx_l4_cfg__l3_main_1,
+ &omap44xx_mmc1__l3_main_1,
+ &omap44xx_mmc2__l3_main_1,
&omap44xx_mpu__l3_main_1,
};
@@ -262,6 +300,30 @@ static struct omap_hwmod_ocp_if omap44xx_dma_system__l3_main_2 = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
+/* hsi -> l3_main_2 */
+static struct omap_hwmod_ocp_if omap44xx_hsi__l3_main_2 = {
+ .master = &omap44xx_hsi_hwmod,
+ .slave = &omap44xx_l3_main_2_hwmod,
+ .clk = "l3_div_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* ipu -> l3_main_2 */
+static struct omap_hwmod_ocp_if omap44xx_ipu__l3_main_2 = {
+ .master = &omap44xx_ipu_hwmod,
+ .slave = &omap44xx_l3_main_2_hwmod,
+ .clk = "l3_div_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* iss -> l3_main_2 */
+static struct omap_hwmod_ocp_if omap44xx_iss__l3_main_2 = {
+ .master = &omap44xx_iss_hwmod,
+ .slave = &omap44xx_l3_main_2_hwmod,
+ .clk = "l3_div_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
/* iva -> l3_main_2 */
static struct omap_hwmod_ocp_if omap44xx_iva__l3_main_2 = {
.master = &omap44xx_iva_hwmod,
@@ -286,12 +348,24 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_2 = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
+/* usb_otg_hs -> l3_main_2 */
+static struct omap_hwmod_ocp_if omap44xx_usb_otg_hs__l3_main_2 = {
+ .master = &omap44xx_usb_otg_hs_hwmod,
+ .slave = &omap44xx_l3_main_2_hwmod,
+ .clk = "l3_div_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
/* l3_main_2 slave ports */
static struct omap_hwmod_ocp_if *omap44xx_l3_main_2_slaves[] = {
&omap44xx_dma_system__l3_main_2,
+ &omap44xx_hsi__l3_main_2,
+ &omap44xx_ipu__l3_main_2,
+ &omap44xx_iss__l3_main_2,
&omap44xx_iva__l3_main_2,
&omap44xx_l3_main_1__l3_main_2,
&omap44xx_l4_cfg__l3_main_2,
+ &omap44xx_usb_otg_hs__l3_main_2,
};
static struct omap_hwmod omap44xx_l3_main_2_hwmod = {
@@ -351,6 +425,14 @@ static struct omap_hwmod_class omap44xx_l4_hwmod_class = {
};
/* l4_abe interface data */
+/* aess -> l4_abe */
+static struct omap_hwmod_ocp_if omap44xx_aess__l4_abe = {
+ .master = &omap44xx_aess_hwmod,
+ .slave = &omap44xx_l4_abe_hwmod,
+ .clk = "ocp_abe_iclk",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
/* dsp -> l4_abe */
static struct omap_hwmod_ocp_if omap44xx_dsp__l4_abe = {
.master = &omap44xx_dsp_hwmod,
@@ -377,6 +459,7 @@ static struct omap_hwmod_ocp_if omap44xx_mpu__l4_abe = {
/* l4_abe slave ports */
static struct omap_hwmod_ocp_if *omap44xx_l4_abe_slaves[] = {
+ &omap44xx_aess__l4_abe,
&omap44xx_dsp__l4_abe,
&omap44xx_l3_main_1__l4_abe,
&omap44xx_mpu__l4_abe,
@@ -494,26 +577,15 @@ static struct omap_hwmod omap44xx_mpu_private_hwmod = {
* - They still need to be validated with the driver
* properly adapted to omap_hwmod / omap_device
*
- * aess
- * bandgap
* c2c
* c2c_target_fw
* cm_core
* cm_core_aon
- * counter_32k
* ctrl_module_core
* ctrl_module_pad_core
* ctrl_module_pad_wkup
* ctrl_module_wkup
* debugss
- * dmic
- * dss
- * dss_dispc
- * dss_dsi1
- * dss_dsi2
- * dss_hdmi
- * dss_rfbi
- * dss_venc
* efuse_ctrl_cust
* efuse_ctrl_std
* elm
@@ -524,58 +596,211 @@ static struct omap_hwmod omap44xx_mpu_private_hwmod = {
* gpu
* hdq1w
* hsi
- * ipu
- * iss
- * kbd
- * mailbox
- * mcasp
- * mcbsp1
- * mcbsp2
- * mcbsp3
- * mcbsp4
- * mcpdm
- * mcspi1
- * mcspi2
- * mcspi3
- * mcspi4
- * mmc1
- * mmc2
- * mmc3
- * mmc4
- * mmc5
- * mpu_c0
- * mpu_c1
* ocmc_ram
* ocp2scp_usb_phy
* ocp_wp_noc
- * prcm
* prcm_mpu
* prm
* scrm
* sl2if
* slimbus1
* slimbus2
- * spinlock
- * timer1
- * timer10
- * timer11
- * timer2
- * timer3
- * timer4
- * timer5
- * timer6
- * timer7
- * timer8
- * timer9
* usb_host_fs
* usb_host_hs
- * usb_otg_hs
* usb_phy_cm
* usb_tll_hs
* usim
*/
/*
+ * 'aess' class
+ * audio engine sub system
+ */
+
+static struct omap_hwmod_class_sysconfig omap44xx_aess_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class omap44xx_aess_hwmod_class = {
+ .name = "aess",
+ .sysc = &omap44xx_aess_sysc,
+};
+
+/* aess */
+static struct omap_hwmod_irq_info omap44xx_aess_irqs[] = {
+ { .irq = 99 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_dma_info omap44xx_aess_sdma_reqs[] = {
+ { .name = "fifo0", .dma_req = 100 + OMAP44XX_DMA_REQ_START },
+ { .name = "fifo1", .dma_req = 101 + OMAP44XX_DMA_REQ_START },
+ { .name = "fifo2", .dma_req = 102 + OMAP44XX_DMA_REQ_START },
+ { .name = "fifo3", .dma_req = 103 + OMAP44XX_DMA_REQ_START },
+ { .name = "fifo4", .dma_req = 104 + OMAP44XX_DMA_REQ_START },
+ { .name = "fifo5", .dma_req = 105 + OMAP44XX_DMA_REQ_START },
+ { .name = "fifo6", .dma_req = 106 + OMAP44XX_DMA_REQ_START },
+ { .name = "fifo7", .dma_req = 107 + OMAP44XX_DMA_REQ_START },
+};
+
+/* aess master ports */
+static struct omap_hwmod_ocp_if *omap44xx_aess_masters[] = {
+ &omap44xx_aess__l4_abe,
+};
+
+static struct omap_hwmod_addr_space omap44xx_aess_addrs[] = {
+ {
+ .pa_start = 0x401f1000,
+ .pa_end = 0x401f13ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_abe -> aess */
+static struct omap_hwmod_ocp_if omap44xx_l4_abe__aess = {
+ .master = &omap44xx_l4_abe_hwmod,
+ .slave = &omap44xx_aess_hwmod,
+ .clk = "ocp_abe_iclk",
+ .addr = omap44xx_aess_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_aess_addrs),
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_addr_space omap44xx_aess_dma_addrs[] = {
+ {
+ .pa_start = 0x490f1000,
+ .pa_end = 0x490f13ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_abe -> aess (dma) */
+static struct omap_hwmod_ocp_if omap44xx_l4_abe__aess_dma = {
+ .master = &omap44xx_l4_abe_hwmod,
+ .slave = &omap44xx_aess_hwmod,
+ .clk = "ocp_abe_iclk",
+ .addr = omap44xx_aess_dma_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_aess_dma_addrs),
+ .user = OCP_USER_SDMA,
+};
+
+/* aess slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_aess_slaves[] = {
+ &omap44xx_l4_abe__aess,
+ &omap44xx_l4_abe__aess_dma,
+};
+
+static struct omap_hwmod omap44xx_aess_hwmod = {
+ .name = "aess",
+ .class = &omap44xx_aess_hwmod_class,
+ .mpu_irqs = omap44xx_aess_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_aess_irqs),
+ .sdma_reqs = omap44xx_aess_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_aess_sdma_reqs),
+ .main_clk = "aess_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM1_ABE_AESS_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_aess_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_aess_slaves),
+ .masters = omap44xx_aess_masters,
+ .masters_cnt = ARRAY_SIZE(omap44xx_aess_masters),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/*
+ * 'bandgap' class
+ * bangap reference for ldo regulators
+ */
+
+static struct omap_hwmod_class omap44xx_bandgap_hwmod_class = {
+ .name = "bandgap",
+};
+
+/* bandgap */
+static struct omap_hwmod_opt_clk bandgap_opt_clks[] = {
+ { .role = "fclk", .clk = "bandgap_fclk" },
+};
+
+static struct omap_hwmod omap44xx_bandgap_hwmod = {
+ .name = "bandgap",
+ .class = &omap44xx_bandgap_hwmod_class,
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_WKUP_BANDGAP_CLKCTRL,
+ },
+ },
+ .opt_clks = bandgap_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(bandgap_opt_clks),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/*
+ * 'counter' class
+ * 32-bit ordinary counter, clocked by the falling edge of the 32 khz clock
+ */
+
+static struct omap_hwmod_class_sysconfig omap44xx_counter_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0004,
+ .sysc_flags = SYSC_HAS_SIDLEMODE,
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ SIDLE_SMART_WKUP),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap44xx_counter_hwmod_class = {
+ .name = "counter",
+ .sysc = &omap44xx_counter_sysc,
+};
+
+/* counter_32k */
+static struct omap_hwmod omap44xx_counter_32k_hwmod;
+static struct omap_hwmod_addr_space omap44xx_counter_32k_addrs[] = {
+ {
+ .pa_start = 0x4a304000,
+ .pa_end = 0x4a30401f,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_wkup -> counter_32k */
+static struct omap_hwmod_ocp_if omap44xx_l4_wkup__counter_32k = {
+ .master = &omap44xx_l4_wkup_hwmod,
+ .slave = &omap44xx_counter_32k_hwmod,
+ .clk = "l4_wkup_clk_mux_ck",
+ .addr = omap44xx_counter_32k_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_counter_32k_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* counter_32k slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_counter_32k_slaves[] = {
+ &omap44xx_l4_wkup__counter_32k,
+};
+
+static struct omap_hwmod omap44xx_counter_32k_hwmod = {
+ .name = "counter_32k",
+ .class = &omap44xx_counter_hwmod_class,
+ .flags = HWMOD_SWSUP_SIDLE,
+ .main_clk = "sys_32k_ck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_WKUP_SYNCTIMER_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_counter_32k_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_counter_32k_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/*
* 'dma' class
* dma controller for data exchange between memory to memory (i.e. internal or
* external memory) and gp peripherals to memory or memory to gp peripherals
@@ -662,6 +887,96 @@ static struct omap_hwmod omap44xx_dma_system_hwmod = {
};
/*
+ * 'dmic' class
+ * digital microphone controller
+ */
+
+static struct omap_hwmod_class_sysconfig omap44xx_dmic_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_RESET_STATUS |
+ SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ SIDLE_SMART_WKUP),
+ .sysc_fields = &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class omap44xx_dmic_hwmod_class = {
+ .name = "dmic",
+ .sysc = &omap44xx_dmic_sysc,
+};
+
+/* dmic */
+static struct omap_hwmod omap44xx_dmic_hwmod;
+static struct omap_hwmod_irq_info omap44xx_dmic_irqs[] = {
+ { .irq = 114 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_dma_info omap44xx_dmic_sdma_reqs[] = {
+ { .dma_req = 66 + OMAP44XX_DMA_REQ_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_dmic_addrs[] = {
+ {
+ .pa_start = 0x4012e000,
+ .pa_end = 0x4012e07f,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_abe -> dmic */
+static struct omap_hwmod_ocp_if omap44xx_l4_abe__dmic = {
+ .master = &omap44xx_l4_abe_hwmod,
+ .slave = &omap44xx_dmic_hwmod,
+ .clk = "ocp_abe_iclk",
+ .addr = omap44xx_dmic_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_dmic_addrs),
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_addr_space omap44xx_dmic_dma_addrs[] = {
+ {
+ .pa_start = 0x4902e000,
+ .pa_end = 0x4902e07f,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_abe -> dmic (dma) */
+static struct omap_hwmod_ocp_if omap44xx_l4_abe__dmic_dma = {
+ .master = &omap44xx_l4_abe_hwmod,
+ .slave = &omap44xx_dmic_hwmod,
+ .clk = "ocp_abe_iclk",
+ .addr = omap44xx_dmic_dma_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_dmic_dma_addrs),
+ .user = OCP_USER_SDMA,
+};
+
+/* dmic slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_dmic_slaves[] = {
+ &omap44xx_l4_abe__dmic,
+ &omap44xx_l4_abe__dmic_dma,
+};
+
+static struct omap_hwmod omap44xx_dmic_hwmod = {
+ .name = "dmic",
+ .class = &omap44xx_dmic_hwmod_class,
+ .mpu_irqs = omap44xx_dmic_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_dmic_irqs),
+ .sdma_reqs = omap44xx_dmic_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_dmic_sdma_reqs),
+ .main_clk = "dmic_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM1_ABE_DMIC_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_dmic_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_dmic_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/*
* 'dsp' class
* dsp sub-system
*/
@@ -747,6 +1062,590 @@ static struct omap_hwmod omap44xx_dsp_hwmod = {
};
/*
+ * 'dss' class
+ * display sub-system
+ */
+
+static struct omap_hwmod_class_sysconfig omap44xx_dss_sysc = {
+ .rev_offs = 0x0000,
+ .syss_offs = 0x0014,
+ .sysc_flags = SYSS_HAS_RESET_STATUS,
+};
+
+static struct omap_hwmod_class omap44xx_dss_hwmod_class = {
+ .name = "dss",
+ .sysc = &omap44xx_dss_sysc,
+};
+
+/* dss */
+/* dss master ports */
+static struct omap_hwmod_ocp_if *omap44xx_dss_masters[] = {
+ &omap44xx_dss__l3_main_1,
+};
+
+static struct omap_hwmod_addr_space omap44xx_dss_dma_addrs[] = {
+ {
+ .pa_start = 0x58000000,
+ .pa_end = 0x5800007f,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l3_main_2 -> dss */
+static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss = {
+ .master = &omap44xx_l3_main_2_hwmod,
+ .slave = &omap44xx_dss_hwmod,
+ .clk = "l3_div_ck",
+ .addr = omap44xx_dss_dma_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_dss_dma_addrs),
+ .user = OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_addr_space omap44xx_dss_addrs[] = {
+ {
+ .pa_start = 0x48040000,
+ .pa_end = 0x4804007f,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> dss */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__dss = {
+ .master = &omap44xx_l4_per_hwmod,
+ .slave = &omap44xx_dss_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_dss_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_dss_addrs),
+ .user = OCP_USER_MPU,
+};
+
+/* dss slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_dss_slaves[] = {
+ &omap44xx_l3_main_2__dss,
+ &omap44xx_l4_per__dss,
+};
+
+static struct omap_hwmod_opt_clk dss_opt_clks[] = {
+ { .role = "sys_clk", .clk = "dss_sys_clk" },
+ { .role = "tv_clk", .clk = "dss_tv_clk" },
+ { .role = "dss_clk", .clk = "dss_dss_clk" },
+ { .role = "video_clk", .clk = "dss_48mhz_clk" },
+};
+
+static struct omap_hwmod omap44xx_dss_hwmod = {
+ .name = "dss_core",
+ .class = &omap44xx_dss_hwmod_class,
+ .main_clk = "dss_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_DSS_DSS_CLKCTRL,
+ },
+ },
+ .opt_clks = dss_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(dss_opt_clks),
+ .slaves = omap44xx_dss_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_dss_slaves),
+ .masters = omap44xx_dss_masters,
+ .masters_cnt = ARRAY_SIZE(omap44xx_dss_masters),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/*
+ * 'dispc' class
+ * display controller
+ */
+
+static struct omap_hwmod_class_sysconfig omap44xx_dispc_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
+ SYSC_HAS_ENAWAKEUP | SYSC_HAS_MIDLEMODE |
+ SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+ SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap44xx_dispc_hwmod_class = {
+ .name = "dispc",
+ .sysc = &omap44xx_dispc_sysc,
+};
+
+/* dss_dispc */
+static struct omap_hwmod omap44xx_dss_dispc_hwmod;
+static struct omap_hwmod_irq_info omap44xx_dss_dispc_irqs[] = {
+ { .irq = 25 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_dma_info omap44xx_dss_dispc_sdma_reqs[] = {
+ { .dma_req = 5 + OMAP44XX_DMA_REQ_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_dss_dispc_dma_addrs[] = {
+ {
+ .pa_start = 0x58001000,
+ .pa_end = 0x58001fff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l3_main_2 -> dss_dispc */
+static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_dispc = {
+ .master = &omap44xx_l3_main_2_hwmod,
+ .slave = &omap44xx_dss_dispc_hwmod,
+ .clk = "l3_div_ck",
+ .addr = omap44xx_dss_dispc_dma_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_dss_dispc_dma_addrs),
+ .user = OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_addr_space omap44xx_dss_dispc_addrs[] = {
+ {
+ .pa_start = 0x48041000,
+ .pa_end = 0x48041fff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> dss_dispc */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_dispc = {
+ .master = &omap44xx_l4_per_hwmod,
+ .slave = &omap44xx_dss_dispc_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_dss_dispc_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_dss_dispc_addrs),
+ .user = OCP_USER_MPU,
+};
+
+/* dss_dispc slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_dss_dispc_slaves[] = {
+ &omap44xx_l3_main_2__dss_dispc,
+ &omap44xx_l4_per__dss_dispc,
+};
+
+static struct omap_hwmod omap44xx_dss_dispc_hwmod = {
+ .name = "dss_dispc",
+ .class = &omap44xx_dispc_hwmod_class,
+ .mpu_irqs = omap44xx_dss_dispc_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_dss_dispc_irqs),
+ .sdma_reqs = omap44xx_dss_dispc_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_dss_dispc_sdma_reqs),
+ .main_clk = "dss_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_DSS_DSS_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_dss_dispc_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_dss_dispc_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/*
+ * 'dsi' class
+ * display serial interface controller
+ */
+
+static struct omap_hwmod_class_sysconfig omap44xx_dsi_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
+ SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap44xx_dsi_hwmod_class = {
+ .name = "dsi",
+ .sysc = &omap44xx_dsi_sysc,
+};
+
+/* dss_dsi1 */
+static struct omap_hwmod omap44xx_dss_dsi1_hwmod;
+static struct omap_hwmod_irq_info omap44xx_dss_dsi1_irqs[] = {
+ { .irq = 53 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_dma_info omap44xx_dss_dsi1_sdma_reqs[] = {
+ { .dma_req = 74 + OMAP44XX_DMA_REQ_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_dss_dsi1_dma_addrs[] = {
+ {
+ .pa_start = 0x58004000,
+ .pa_end = 0x580041ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l3_main_2 -> dss_dsi1 */
+static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_dsi1 = {
+ .master = &omap44xx_l3_main_2_hwmod,
+ .slave = &omap44xx_dss_dsi1_hwmod,
+ .clk = "l3_div_ck",
+ .addr = omap44xx_dss_dsi1_dma_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_dss_dsi1_dma_addrs),
+ .user = OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_addr_space omap44xx_dss_dsi1_addrs[] = {
+ {
+ .pa_start = 0x48044000,
+ .pa_end = 0x480441ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> dss_dsi1 */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_dsi1 = {
+ .master = &omap44xx_l4_per_hwmod,
+ .slave = &omap44xx_dss_dsi1_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_dss_dsi1_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_dss_dsi1_addrs),
+ .user = OCP_USER_MPU,
+};
+
+/* dss_dsi1 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_dss_dsi1_slaves[] = {
+ &omap44xx_l3_main_2__dss_dsi1,
+ &omap44xx_l4_per__dss_dsi1,
+};
+
+static struct omap_hwmod omap44xx_dss_dsi1_hwmod = {
+ .name = "dss_dsi1",
+ .class = &omap44xx_dsi_hwmod_class,
+ .mpu_irqs = omap44xx_dss_dsi1_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_dss_dsi1_irqs),
+ .sdma_reqs = omap44xx_dss_dsi1_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_dss_dsi1_sdma_reqs),
+ .main_clk = "dss_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_DSS_DSS_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_dss_dsi1_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_dss_dsi1_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* dss_dsi2 */
+static struct omap_hwmod omap44xx_dss_dsi2_hwmod;
+static struct omap_hwmod_irq_info omap44xx_dss_dsi2_irqs[] = {
+ { .irq = 84 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_dma_info omap44xx_dss_dsi2_sdma_reqs[] = {
+ { .dma_req = 83 + OMAP44XX_DMA_REQ_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_dss_dsi2_dma_addrs[] = {
+ {
+ .pa_start = 0x58005000,
+ .pa_end = 0x580051ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l3_main_2 -> dss_dsi2 */
+static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_dsi2 = {
+ .master = &omap44xx_l3_main_2_hwmod,
+ .slave = &omap44xx_dss_dsi2_hwmod,
+ .clk = "l3_div_ck",
+ .addr = omap44xx_dss_dsi2_dma_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_dss_dsi2_dma_addrs),
+ .user = OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_addr_space omap44xx_dss_dsi2_addrs[] = {
+ {
+ .pa_start = 0x48045000,
+ .pa_end = 0x480451ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> dss_dsi2 */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_dsi2 = {
+ .master = &omap44xx_l4_per_hwmod,
+ .slave = &omap44xx_dss_dsi2_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_dss_dsi2_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_dss_dsi2_addrs),
+ .user = OCP_USER_MPU,
+};
+
+/* dss_dsi2 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_dss_dsi2_slaves[] = {
+ &omap44xx_l3_main_2__dss_dsi2,
+ &omap44xx_l4_per__dss_dsi2,
+};
+
+static struct omap_hwmod omap44xx_dss_dsi2_hwmod = {
+ .name = "dss_dsi2",
+ .class = &omap44xx_dsi_hwmod_class,
+ .mpu_irqs = omap44xx_dss_dsi2_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_dss_dsi2_irqs),
+ .sdma_reqs = omap44xx_dss_dsi2_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_dss_dsi2_sdma_reqs),
+ .main_clk = "dss_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_DSS_DSS_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_dss_dsi2_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_dss_dsi2_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/*
+ * 'hdmi' class
+ * hdmi controller
+ */
+
+static struct omap_hwmod_class_sysconfig omap44xx_hdmi_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .sysc_flags = (SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_SOFTRESET),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ SIDLE_SMART_WKUP),
+ .sysc_fields = &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class omap44xx_hdmi_hwmod_class = {
+ .name = "hdmi",
+ .sysc = &omap44xx_hdmi_sysc,
+};
+
+/* dss_hdmi */
+static struct omap_hwmod omap44xx_dss_hdmi_hwmod;
+static struct omap_hwmod_irq_info omap44xx_dss_hdmi_irqs[] = {
+ { .irq = 101 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_dma_info omap44xx_dss_hdmi_sdma_reqs[] = {
+ { .dma_req = 75 + OMAP44XX_DMA_REQ_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_dss_hdmi_dma_addrs[] = {
+ {
+ .pa_start = 0x58006000,
+ .pa_end = 0x58006fff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l3_main_2 -> dss_hdmi */
+static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_hdmi = {
+ .master = &omap44xx_l3_main_2_hwmod,
+ .slave = &omap44xx_dss_hdmi_hwmod,
+ .clk = "l3_div_ck",
+ .addr = omap44xx_dss_hdmi_dma_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_dss_hdmi_dma_addrs),
+ .user = OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_addr_space omap44xx_dss_hdmi_addrs[] = {
+ {
+ .pa_start = 0x48046000,
+ .pa_end = 0x48046fff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> dss_hdmi */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_hdmi = {
+ .master = &omap44xx_l4_per_hwmod,
+ .slave = &omap44xx_dss_hdmi_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_dss_hdmi_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_dss_hdmi_addrs),
+ .user = OCP_USER_MPU,
+};
+
+/* dss_hdmi slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_dss_hdmi_slaves[] = {
+ &omap44xx_l3_main_2__dss_hdmi,
+ &omap44xx_l4_per__dss_hdmi,
+};
+
+static struct omap_hwmod omap44xx_dss_hdmi_hwmod = {
+ .name = "dss_hdmi",
+ .class = &omap44xx_hdmi_hwmod_class,
+ .mpu_irqs = omap44xx_dss_hdmi_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_dss_hdmi_irqs),
+ .sdma_reqs = omap44xx_dss_hdmi_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_dss_hdmi_sdma_reqs),
+ .main_clk = "dss_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_DSS_DSS_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_dss_hdmi_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_dss_hdmi_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/*
+ * 'rfbi' class
+ * remote frame buffer interface
+ */
+
+static struct omap_hwmod_class_sysconfig omap44xx_rfbi_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap44xx_rfbi_hwmod_class = {
+ .name = "rfbi",
+ .sysc = &omap44xx_rfbi_sysc,
+};
+
+/* dss_rfbi */
+static struct omap_hwmod omap44xx_dss_rfbi_hwmod;
+static struct omap_hwmod_dma_info omap44xx_dss_rfbi_sdma_reqs[] = {
+ { .dma_req = 13 + OMAP44XX_DMA_REQ_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_dss_rfbi_dma_addrs[] = {
+ {
+ .pa_start = 0x58002000,
+ .pa_end = 0x580020ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l3_main_2 -> dss_rfbi */
+static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_rfbi = {
+ .master = &omap44xx_l3_main_2_hwmod,
+ .slave = &omap44xx_dss_rfbi_hwmod,
+ .clk = "l3_div_ck",
+ .addr = omap44xx_dss_rfbi_dma_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_dss_rfbi_dma_addrs),
+ .user = OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_addr_space omap44xx_dss_rfbi_addrs[] = {
+ {
+ .pa_start = 0x48042000,
+ .pa_end = 0x480420ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> dss_rfbi */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_rfbi = {
+ .master = &omap44xx_l4_per_hwmod,
+ .slave = &omap44xx_dss_rfbi_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_dss_rfbi_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_dss_rfbi_addrs),
+ .user = OCP_USER_MPU,
+};
+
+/* dss_rfbi slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_dss_rfbi_slaves[] = {
+ &omap44xx_l3_main_2__dss_rfbi,
+ &omap44xx_l4_per__dss_rfbi,
+};
+
+static struct omap_hwmod omap44xx_dss_rfbi_hwmod = {
+ .name = "dss_rfbi",
+ .class = &omap44xx_rfbi_hwmod_class,
+ .sdma_reqs = omap44xx_dss_rfbi_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_dss_rfbi_sdma_reqs),
+ .main_clk = "dss_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_DSS_DSS_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_dss_rfbi_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_dss_rfbi_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/*
+ * 'venc' class
+ * video encoder
+ */
+
+static struct omap_hwmod_class omap44xx_venc_hwmod_class = {
+ .name = "venc",
+};
+
+/* dss_venc */
+static struct omap_hwmod omap44xx_dss_venc_hwmod;
+static struct omap_hwmod_addr_space omap44xx_dss_venc_dma_addrs[] = {
+ {
+ .pa_start = 0x58003000,
+ .pa_end = 0x580030ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l3_main_2 -> dss_venc */
+static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_venc = {
+ .master = &omap44xx_l3_main_2_hwmod,
+ .slave = &omap44xx_dss_venc_hwmod,
+ .clk = "l3_div_ck",
+ .addr = omap44xx_dss_venc_dma_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_dss_venc_dma_addrs),
+ .user = OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_addr_space omap44xx_dss_venc_addrs[] = {
+ {
+ .pa_start = 0x48043000,
+ .pa_end = 0x480430ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> dss_venc */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_venc = {
+ .master = &omap44xx_l4_per_hwmod,
+ .slave = &omap44xx_dss_venc_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_dss_venc_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_dss_venc_addrs),
+ .user = OCP_USER_MPU,
+};
+
+/* dss_venc slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_dss_venc_slaves[] = {
+ &omap44xx_l3_main_2__dss_venc,
+ &omap44xx_l4_per__dss_venc,
+};
+
+static struct omap_hwmod omap44xx_dss_venc_hwmod = {
+ .name = "dss_venc",
+ .class = &omap44xx_venc_hwmod_class,
+ .main_clk = "dss_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_DSS_DSS_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_dss_venc_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_dss_venc_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/*
* 'gpio' class
* general purpose io module
*/
@@ -1093,6 +1992,83 @@ static struct omap_hwmod omap44xx_gpio6_hwmod = {
};
/*
+ * 'hsi' class
+ * mipi high-speed synchronous serial interface (multichannel and full-duplex
+ * serial if)
+ */
+
+static struct omap_hwmod_class_sysconfig omap44xx_hsi_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_EMUFREE |
+ SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
+ MSTANDBY_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap44xx_hsi_hwmod_class = {
+ .name = "hsi",
+ .sysc = &omap44xx_hsi_sysc,
+};
+
+/* hsi */
+static struct omap_hwmod_irq_info omap44xx_hsi_irqs[] = {
+ { .name = "mpu_p1", .irq = 67 + OMAP44XX_IRQ_GIC_START },
+ { .name = "mpu_p2", .irq = 68 + OMAP44XX_IRQ_GIC_START },
+ { .name = "mpu_dma", .irq = 71 + OMAP44XX_IRQ_GIC_START },
+};
+
+/* hsi master ports */
+static struct omap_hwmod_ocp_if *omap44xx_hsi_masters[] = {
+ &omap44xx_hsi__l3_main_2,
+};
+
+static struct omap_hwmod_addr_space omap44xx_hsi_addrs[] = {
+ {
+ .pa_start = 0x4a058000,
+ .pa_end = 0x4a05bfff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_cfg -> hsi */
+static struct omap_hwmod_ocp_if omap44xx_l4_cfg__hsi = {
+ .master = &omap44xx_l4_cfg_hwmod,
+ .slave = &omap44xx_hsi_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_hsi_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_hsi_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* hsi slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_hsi_slaves[] = {
+ &omap44xx_l4_cfg__hsi,
+};
+
+static struct omap_hwmod omap44xx_hsi_hwmod = {
+ .name = "hsi",
+ .class = &omap44xx_hsi_hwmod_class,
+ .mpu_irqs = omap44xx_hsi_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_hsi_irqs),
+ .main_clk = "hsi_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_L3INIT_HSI_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_hsi_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_hsi_slaves),
+ .masters = omap44xx_hsi_masters,
+ .masters_cnt = ARRAY_SIZE(omap44xx_hsi_masters),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/*
* 'i2c' class
* multimaster high-speed i2c controller
*/
@@ -1326,6 +2302,188 @@ static struct omap_hwmod omap44xx_i2c4_hwmod = {
};
/*
+ * 'ipu' class
+ * imaging processor unit
+ */
+
+static struct omap_hwmod_class omap44xx_ipu_hwmod_class = {
+ .name = "ipu",
+};
+
+/* ipu */
+static struct omap_hwmod_irq_info omap44xx_ipu_irqs[] = {
+ { .irq = 100 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_rst_info omap44xx_ipu_c0_resets[] = {
+ { .name = "cpu0", .rst_shift = 0 },
+};
+
+static struct omap_hwmod_rst_info omap44xx_ipu_c1_resets[] = {
+ { .name = "cpu1", .rst_shift = 1 },
+};
+
+static struct omap_hwmod_rst_info omap44xx_ipu_resets[] = {
+ { .name = "mmu_cache", .rst_shift = 2 },
+};
+
+/* ipu master ports */
+static struct omap_hwmod_ocp_if *omap44xx_ipu_masters[] = {
+ &omap44xx_ipu__l3_main_2,
+};
+
+/* l3_main_2 -> ipu */
+static struct omap_hwmod_ocp_if omap44xx_l3_main_2__ipu = {
+ .master = &omap44xx_l3_main_2_hwmod,
+ .slave = &omap44xx_ipu_hwmod,
+ .clk = "l3_div_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* ipu slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_ipu_slaves[] = {
+ &omap44xx_l3_main_2__ipu,
+};
+
+/* Pseudo hwmod for reset control purpose only */
+static struct omap_hwmod omap44xx_ipu_c0_hwmod = {
+ .name = "ipu_c0",
+ .class = &omap44xx_ipu_hwmod_class,
+ .flags = HWMOD_INIT_NO_RESET,
+ .rst_lines = omap44xx_ipu_c0_resets,
+ .rst_lines_cnt = ARRAY_SIZE(omap44xx_ipu_c0_resets),
+ .prcm = {
+ .omap4 = {
+ .rstctrl_reg = OMAP4430_RM_DUCATI_RSTCTRL,
+ },
+ },
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* Pseudo hwmod for reset control purpose only */
+static struct omap_hwmod omap44xx_ipu_c1_hwmod = {
+ .name = "ipu_c1",
+ .class = &omap44xx_ipu_hwmod_class,
+ .flags = HWMOD_INIT_NO_RESET,
+ .rst_lines = omap44xx_ipu_c1_resets,
+ .rst_lines_cnt = ARRAY_SIZE(omap44xx_ipu_c1_resets),
+ .prcm = {
+ .omap4 = {
+ .rstctrl_reg = OMAP4430_RM_DUCATI_RSTCTRL,
+ },
+ },
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+static struct omap_hwmod omap44xx_ipu_hwmod = {
+ .name = "ipu",
+ .class = &omap44xx_ipu_hwmod_class,
+ .mpu_irqs = omap44xx_ipu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_ipu_irqs),
+ .rst_lines = omap44xx_ipu_resets,
+ .rst_lines_cnt = ARRAY_SIZE(omap44xx_ipu_resets),
+ .main_clk = "ipu_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_DUCATI_DUCATI_CLKCTRL,
+ .rstctrl_reg = OMAP4430_RM_DUCATI_RSTCTRL,
+ },
+ },
+ .slaves = omap44xx_ipu_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_ipu_slaves),
+ .masters = omap44xx_ipu_masters,
+ .masters_cnt = ARRAY_SIZE(omap44xx_ipu_masters),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/*
+ * 'iss' class
+ * external images sensor pixel data processor
+ */
+
+static struct omap_hwmod_class_sysconfig omap44xx_iss_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_RESET_STATUS |
+ SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
+ MSTANDBY_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class omap44xx_iss_hwmod_class = {
+ .name = "iss",
+ .sysc = &omap44xx_iss_sysc,
+};
+
+/* iss */
+static struct omap_hwmod_irq_info omap44xx_iss_irqs[] = {
+ { .irq = 24 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_dma_info omap44xx_iss_sdma_reqs[] = {
+ { .name = "1", .dma_req = 8 + OMAP44XX_DMA_REQ_START },
+ { .name = "2", .dma_req = 9 + OMAP44XX_DMA_REQ_START },
+ { .name = "3", .dma_req = 11 + OMAP44XX_DMA_REQ_START },
+ { .name = "4", .dma_req = 12 + OMAP44XX_DMA_REQ_START },
+};
+
+/* iss master ports */
+static struct omap_hwmod_ocp_if *omap44xx_iss_masters[] = {
+ &omap44xx_iss__l3_main_2,
+};
+
+static struct omap_hwmod_addr_space omap44xx_iss_addrs[] = {
+ {
+ .pa_start = 0x52000000,
+ .pa_end = 0x520000ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l3_main_2 -> iss */
+static struct omap_hwmod_ocp_if omap44xx_l3_main_2__iss = {
+ .master = &omap44xx_l3_main_2_hwmod,
+ .slave = &omap44xx_iss_hwmod,
+ .clk = "l3_div_ck",
+ .addr = omap44xx_iss_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_iss_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* iss slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_iss_slaves[] = {
+ &omap44xx_l3_main_2__iss,
+};
+
+static struct omap_hwmod_opt_clk iss_opt_clks[] = {
+ { .role = "ctrlclk", .clk = "iss_ctrlclk" },
+};
+
+static struct omap_hwmod omap44xx_iss_hwmod = {
+ .name = "iss",
+ .class = &omap44xx_iss_hwmod_class,
+ .mpu_irqs = omap44xx_iss_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_iss_irqs),
+ .sdma_reqs = omap44xx_iss_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_iss_sdma_reqs),
+ .main_clk = "iss_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_CAM_ISS_CLKCTRL,
+ },
+ },
+ .opt_clks = iss_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(iss_opt_clks),
+ .slaves = omap44xx_iss_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_iss_slaves),
+ .masters = omap44xx_iss_masters,
+ .masters_cnt = ARRAY_SIZE(omap44xx_iss_masters),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/*
* 'iva' class
* multi-standard video encoder/decoder hardware accelerator
*/
@@ -1435,6 +2593,1084 @@ static struct omap_hwmod omap44xx_iva_hwmod = {
};
/*
+ * 'kbd' class
+ * keyboard controller
+ */
+
+static struct omap_hwmod_class_sysconfig omap44xx_kbd_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
+ SYSC_HAS_EMUFREE | SYSC_HAS_ENAWAKEUP |
+ SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+ SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap44xx_kbd_hwmod_class = {
+ .name = "kbd",
+ .sysc = &omap44xx_kbd_sysc,
+};
+
+/* kbd */
+static struct omap_hwmod omap44xx_kbd_hwmod;
+static struct omap_hwmod_irq_info omap44xx_kbd_irqs[] = {
+ { .irq = 120 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_kbd_addrs[] = {
+ {
+ .pa_start = 0x4a31c000,
+ .pa_end = 0x4a31c07f,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_wkup -> kbd */
+static struct omap_hwmod_ocp_if omap44xx_l4_wkup__kbd = {
+ .master = &omap44xx_l4_wkup_hwmod,
+ .slave = &omap44xx_kbd_hwmod,
+ .clk = "l4_wkup_clk_mux_ck",
+ .addr = omap44xx_kbd_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_kbd_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* kbd slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_kbd_slaves[] = {
+ &omap44xx_l4_wkup__kbd,
+};
+
+static struct omap_hwmod omap44xx_kbd_hwmod = {
+ .name = "kbd",
+ .class = &omap44xx_kbd_hwmod_class,
+ .mpu_irqs = omap44xx_kbd_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_kbd_irqs),
+ .main_clk = "kbd_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_WKUP_KEYBOARD_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_kbd_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_kbd_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/*
+ * 'mailbox' class
+ * mailbox module allowing communication between the on-chip processors using a
+ * queued mailbox-interrupt mechanism.
+ */
+
+static struct omap_hwmod_class_sysconfig omap44xx_mailbox_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .sysc_flags = (SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_SOFTRESET),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class omap44xx_mailbox_hwmod_class = {
+ .name = "mailbox",
+ .sysc = &omap44xx_mailbox_sysc,
+};
+
+/* mailbox */
+static struct omap_hwmod omap44xx_mailbox_hwmod;
+static struct omap_hwmod_irq_info omap44xx_mailbox_irqs[] = {
+ { .irq = 26 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_mailbox_addrs[] = {
+ {
+ .pa_start = 0x4a0f4000,
+ .pa_end = 0x4a0f41ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_cfg -> mailbox */
+static struct omap_hwmod_ocp_if omap44xx_l4_cfg__mailbox = {
+ .master = &omap44xx_l4_cfg_hwmod,
+ .slave = &omap44xx_mailbox_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_mailbox_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_mailbox_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* mailbox slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_mailbox_slaves[] = {
+ &omap44xx_l4_cfg__mailbox,
+};
+
+static struct omap_hwmod omap44xx_mailbox_hwmod = {
+ .name = "mailbox",
+ .class = &omap44xx_mailbox_hwmod_class,
+ .mpu_irqs = omap44xx_mailbox_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mailbox_irqs),
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_L4CFG_MAILBOX_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_mailbox_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_mailbox_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/*
+ * 'mcbsp' class
+ * multi channel buffered serial port controller
+ */
+
+static struct omap_hwmod_class_sysconfig omap44xx_mcbsp_sysc = {
+ .sysc_offs = 0x008c,
+ .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_ENAWAKEUP |
+ SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap44xx_mcbsp_hwmod_class = {
+ .name = "mcbsp",
+ .sysc = &omap44xx_mcbsp_sysc,
+ .rev = MCBSP_CONFIG_TYPE4,
+};
+
+/* mcbsp1 */
+static struct omap_hwmod omap44xx_mcbsp1_hwmod;
+static struct omap_hwmod_irq_info omap44xx_mcbsp1_irqs[] = {
+ { .irq = 17 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_dma_info omap44xx_mcbsp1_sdma_reqs[] = {
+ { .name = "tx", .dma_req = 32 + OMAP44XX_DMA_REQ_START },
+ { .name = "rx", .dma_req = 33 + OMAP44XX_DMA_REQ_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_mcbsp1_addrs[] = {
+ {
+ .name = "mpu",
+ .pa_start = 0x40122000,
+ .pa_end = 0x401220ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_abe -> mcbsp1 */
+static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcbsp1 = {
+ .master = &omap44xx_l4_abe_hwmod,
+ .slave = &omap44xx_mcbsp1_hwmod,
+ .clk = "ocp_abe_iclk",
+ .addr = omap44xx_mcbsp1_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_mcbsp1_addrs),
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_addr_space omap44xx_mcbsp1_dma_addrs[] = {
+ {
+ .name = "dma",
+ .pa_start = 0x49022000,
+ .pa_end = 0x490220ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_abe -> mcbsp1 (dma) */
+static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcbsp1_dma = {
+ .master = &omap44xx_l4_abe_hwmod,
+ .slave = &omap44xx_mcbsp1_hwmod,
+ .clk = "ocp_abe_iclk",
+ .addr = omap44xx_mcbsp1_dma_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_mcbsp1_dma_addrs),
+ .user = OCP_USER_SDMA,
+};
+
+/* mcbsp1 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_mcbsp1_slaves[] = {
+ &omap44xx_l4_abe__mcbsp1,
+ &omap44xx_l4_abe__mcbsp1_dma,
+};
+
+static struct omap_hwmod omap44xx_mcbsp1_hwmod = {
+ .name = "mcbsp1",
+ .class = &omap44xx_mcbsp_hwmod_class,
+ .mpu_irqs = omap44xx_mcbsp1_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mcbsp1_irqs),
+ .sdma_reqs = omap44xx_mcbsp1_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_mcbsp1_sdma_reqs),
+ .main_clk = "mcbsp1_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM1_ABE_MCBSP1_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_mcbsp1_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_mcbsp1_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* mcbsp2 */
+static struct omap_hwmod omap44xx_mcbsp2_hwmod;
+static struct omap_hwmod_irq_info omap44xx_mcbsp2_irqs[] = {
+ { .irq = 22 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_dma_info omap44xx_mcbsp2_sdma_reqs[] = {
+ { .name = "tx", .dma_req = 16 + OMAP44XX_DMA_REQ_START },
+ { .name = "rx", .dma_req = 17 + OMAP44XX_DMA_REQ_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_mcbsp2_addrs[] = {
+ {
+ .name = "mpu",
+ .pa_start = 0x40124000,
+ .pa_end = 0x401240ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_abe -> mcbsp2 */
+static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcbsp2 = {
+ .master = &omap44xx_l4_abe_hwmod,
+ .slave = &omap44xx_mcbsp2_hwmod,
+ .clk = "ocp_abe_iclk",
+ .addr = omap44xx_mcbsp2_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_mcbsp2_addrs),
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_addr_space omap44xx_mcbsp2_dma_addrs[] = {
+ {
+ .name = "dma",
+ .pa_start = 0x49024000,
+ .pa_end = 0x490240ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_abe -> mcbsp2 (dma) */
+static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcbsp2_dma = {
+ .master = &omap44xx_l4_abe_hwmod,
+ .slave = &omap44xx_mcbsp2_hwmod,
+ .clk = "ocp_abe_iclk",
+ .addr = omap44xx_mcbsp2_dma_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_mcbsp2_dma_addrs),
+ .user = OCP_USER_SDMA,
+};
+
+/* mcbsp2 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_mcbsp2_slaves[] = {
+ &omap44xx_l4_abe__mcbsp2,
+ &omap44xx_l4_abe__mcbsp2_dma,
+};
+
+static struct omap_hwmod omap44xx_mcbsp2_hwmod = {
+ .name = "mcbsp2",
+ .class = &omap44xx_mcbsp_hwmod_class,
+ .mpu_irqs = omap44xx_mcbsp2_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mcbsp2_irqs),
+ .sdma_reqs = omap44xx_mcbsp2_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_mcbsp2_sdma_reqs),
+ .main_clk = "mcbsp2_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM1_ABE_MCBSP2_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_mcbsp2_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_mcbsp2_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* mcbsp3 */
+static struct omap_hwmod omap44xx_mcbsp3_hwmod;
+static struct omap_hwmod_irq_info omap44xx_mcbsp3_irqs[] = {
+ { .irq = 23 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_dma_info omap44xx_mcbsp3_sdma_reqs[] = {
+ { .name = "tx", .dma_req = 18 + OMAP44XX_DMA_REQ_START },
+ { .name = "rx", .dma_req = 19 + OMAP44XX_DMA_REQ_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_mcbsp3_addrs[] = {
+ {
+ .name = "mpu",
+ .pa_start = 0x40126000,
+ .pa_end = 0x401260ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_abe -> mcbsp3 */
+static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcbsp3 = {
+ .master = &omap44xx_l4_abe_hwmod,
+ .slave = &omap44xx_mcbsp3_hwmod,
+ .clk = "ocp_abe_iclk",
+ .addr = omap44xx_mcbsp3_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_mcbsp3_addrs),
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_addr_space omap44xx_mcbsp3_dma_addrs[] = {
+ {
+ .name = "dma",
+ .pa_start = 0x49026000,
+ .pa_end = 0x490260ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_abe -> mcbsp3 (dma) */
+static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcbsp3_dma = {
+ .master = &omap44xx_l4_abe_hwmod,
+ .slave = &omap44xx_mcbsp3_hwmod,
+ .clk = "ocp_abe_iclk",
+ .addr = omap44xx_mcbsp3_dma_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_mcbsp3_dma_addrs),
+ .user = OCP_USER_SDMA,
+};
+
+/* mcbsp3 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_mcbsp3_slaves[] = {
+ &omap44xx_l4_abe__mcbsp3,
+ &omap44xx_l4_abe__mcbsp3_dma,
+};
+
+static struct omap_hwmod omap44xx_mcbsp3_hwmod = {
+ .name = "mcbsp3",
+ .class = &omap44xx_mcbsp_hwmod_class,
+ .mpu_irqs = omap44xx_mcbsp3_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mcbsp3_irqs),
+ .sdma_reqs = omap44xx_mcbsp3_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_mcbsp3_sdma_reqs),
+ .main_clk = "mcbsp3_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM1_ABE_MCBSP3_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_mcbsp3_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_mcbsp3_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* mcbsp4 */
+static struct omap_hwmod omap44xx_mcbsp4_hwmod;
+static struct omap_hwmod_irq_info omap44xx_mcbsp4_irqs[] = {
+ { .irq = 16 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_dma_info omap44xx_mcbsp4_sdma_reqs[] = {
+ { .name = "tx", .dma_req = 30 + OMAP44XX_DMA_REQ_START },
+ { .name = "rx", .dma_req = 31 + OMAP44XX_DMA_REQ_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_mcbsp4_addrs[] = {
+ {
+ .pa_start = 0x48096000,
+ .pa_end = 0x480960ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> mcbsp4 */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__mcbsp4 = {
+ .master = &omap44xx_l4_per_hwmod,
+ .slave = &omap44xx_mcbsp4_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_mcbsp4_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_mcbsp4_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* mcbsp4 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_mcbsp4_slaves[] = {
+ &omap44xx_l4_per__mcbsp4,
+};
+
+static struct omap_hwmod omap44xx_mcbsp4_hwmod = {
+ .name = "mcbsp4",
+ .class = &omap44xx_mcbsp_hwmod_class,
+ .mpu_irqs = omap44xx_mcbsp4_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mcbsp4_irqs),
+ .sdma_reqs = omap44xx_mcbsp4_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_mcbsp4_sdma_reqs),
+ .main_clk = "mcbsp4_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_L4PER_MCBSP4_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_mcbsp4_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_mcbsp4_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/*
+ * 'mcpdm' class
+ * multi channel pdm controller (proprietary interface with phoenix power
+ * ic)
+ */
+
+static struct omap_hwmod_class_sysconfig omap44xx_mcpdm_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_RESET_STATUS |
+ SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ SIDLE_SMART_WKUP),
+ .sysc_fields = &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class omap44xx_mcpdm_hwmod_class = {
+ .name = "mcpdm",
+ .sysc = &omap44xx_mcpdm_sysc,
+};
+
+/* mcpdm */
+static struct omap_hwmod omap44xx_mcpdm_hwmod;
+static struct omap_hwmod_irq_info omap44xx_mcpdm_irqs[] = {
+ { .irq = 112 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_dma_info omap44xx_mcpdm_sdma_reqs[] = {
+ { .name = "up_link", .dma_req = 64 + OMAP44XX_DMA_REQ_START },
+ { .name = "dn_link", .dma_req = 65 + OMAP44XX_DMA_REQ_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_mcpdm_addrs[] = {
+ {
+ .pa_start = 0x40132000,
+ .pa_end = 0x4013207f,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_abe -> mcpdm */
+static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcpdm = {
+ .master = &omap44xx_l4_abe_hwmod,
+ .slave = &omap44xx_mcpdm_hwmod,
+ .clk = "ocp_abe_iclk",
+ .addr = omap44xx_mcpdm_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_mcpdm_addrs),
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_addr_space omap44xx_mcpdm_dma_addrs[] = {
+ {
+ .pa_start = 0x49032000,
+ .pa_end = 0x4903207f,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_abe -> mcpdm (dma) */
+static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcpdm_dma = {
+ .master = &omap44xx_l4_abe_hwmod,
+ .slave = &omap44xx_mcpdm_hwmod,
+ .clk = "ocp_abe_iclk",
+ .addr = omap44xx_mcpdm_dma_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_mcpdm_dma_addrs),
+ .user = OCP_USER_SDMA,
+};
+
+/* mcpdm slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_mcpdm_slaves[] = {
+ &omap44xx_l4_abe__mcpdm,
+ &omap44xx_l4_abe__mcpdm_dma,
+};
+
+static struct omap_hwmod omap44xx_mcpdm_hwmod = {
+ .name = "mcpdm",
+ .class = &omap44xx_mcpdm_hwmod_class,
+ .mpu_irqs = omap44xx_mcpdm_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mcpdm_irqs),
+ .sdma_reqs = omap44xx_mcpdm_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_mcpdm_sdma_reqs),
+ .main_clk = "mcpdm_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM1_ABE_PDM_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_mcpdm_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_mcpdm_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/*
+ * 'mcspi' class
+ * multichannel serial port interface (mcspi) / master/slave synchronous serial
+ * bus
+ */
+
+static struct omap_hwmod_class_sysconfig omap44xx_mcspi_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_RESET_STATUS |
+ SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ SIDLE_SMART_WKUP),
+ .sysc_fields = &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class omap44xx_mcspi_hwmod_class = {
+ .name = "mcspi",
+ .sysc = &omap44xx_mcspi_sysc,
+ .rev = OMAP4_MCSPI_REV,
+};
+
+/* mcspi1 */
+static struct omap_hwmod omap44xx_mcspi1_hwmod;
+static struct omap_hwmod_irq_info omap44xx_mcspi1_irqs[] = {
+ { .irq = 65 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_dma_info omap44xx_mcspi1_sdma_reqs[] = {
+ { .name = "tx0", .dma_req = 34 + OMAP44XX_DMA_REQ_START },
+ { .name = "rx0", .dma_req = 35 + OMAP44XX_DMA_REQ_START },
+ { .name = "tx1", .dma_req = 36 + OMAP44XX_DMA_REQ_START },
+ { .name = "rx1", .dma_req = 37 + OMAP44XX_DMA_REQ_START },
+ { .name = "tx2", .dma_req = 38 + OMAP44XX_DMA_REQ_START },
+ { .name = "rx2", .dma_req = 39 + OMAP44XX_DMA_REQ_START },
+ { .name = "tx3", .dma_req = 40 + OMAP44XX_DMA_REQ_START },
+ { .name = "rx3", .dma_req = 41 + OMAP44XX_DMA_REQ_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_mcspi1_addrs[] = {
+ {
+ .pa_start = 0x48098000,
+ .pa_end = 0x480981ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> mcspi1 */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__mcspi1 = {
+ .master = &omap44xx_l4_per_hwmod,
+ .slave = &omap44xx_mcspi1_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_mcspi1_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_mcspi1_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* mcspi1 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_mcspi1_slaves[] = {
+ &omap44xx_l4_per__mcspi1,
+};
+
+/* mcspi1 dev_attr */
+static struct omap2_mcspi_dev_attr mcspi1_dev_attr = {
+ .num_chipselect = 4,
+};
+
+static struct omap_hwmod omap44xx_mcspi1_hwmod = {
+ .name = "mcspi1",
+ .class = &omap44xx_mcspi_hwmod_class,
+ .mpu_irqs = omap44xx_mcspi1_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mcspi1_irqs),
+ .sdma_reqs = omap44xx_mcspi1_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_mcspi1_sdma_reqs),
+ .main_clk = "mcspi1_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_L4PER_MCSPI1_CLKCTRL,
+ },
+ },
+ .dev_attr = &mcspi1_dev_attr,
+ .slaves = omap44xx_mcspi1_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_mcspi1_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* mcspi2 */
+static struct omap_hwmod omap44xx_mcspi2_hwmod;
+static struct omap_hwmod_irq_info omap44xx_mcspi2_irqs[] = {
+ { .irq = 66 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_dma_info omap44xx_mcspi2_sdma_reqs[] = {
+ { .name = "tx0", .dma_req = 42 + OMAP44XX_DMA_REQ_START },
+ { .name = "rx0", .dma_req = 43 + OMAP44XX_DMA_REQ_START },
+ { .name = "tx1", .dma_req = 44 + OMAP44XX_DMA_REQ_START },
+ { .name = "rx1", .dma_req = 45 + OMAP44XX_DMA_REQ_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_mcspi2_addrs[] = {
+ {
+ .pa_start = 0x4809a000,
+ .pa_end = 0x4809a1ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> mcspi2 */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__mcspi2 = {
+ .master = &omap44xx_l4_per_hwmod,
+ .slave = &omap44xx_mcspi2_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_mcspi2_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_mcspi2_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* mcspi2 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_mcspi2_slaves[] = {
+ &omap44xx_l4_per__mcspi2,
+};
+
+/* mcspi2 dev_attr */
+static struct omap2_mcspi_dev_attr mcspi2_dev_attr = {
+ .num_chipselect = 2,
+};
+
+static struct omap_hwmod omap44xx_mcspi2_hwmod = {
+ .name = "mcspi2",
+ .class = &omap44xx_mcspi_hwmod_class,
+ .mpu_irqs = omap44xx_mcspi2_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mcspi2_irqs),
+ .sdma_reqs = omap44xx_mcspi2_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_mcspi2_sdma_reqs),
+ .main_clk = "mcspi2_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_L4PER_MCSPI2_CLKCTRL,
+ },
+ },
+ .dev_attr = &mcspi2_dev_attr,
+ .slaves = omap44xx_mcspi2_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_mcspi2_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* mcspi3 */
+static struct omap_hwmod omap44xx_mcspi3_hwmod;
+static struct omap_hwmod_irq_info omap44xx_mcspi3_irqs[] = {
+ { .irq = 91 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_dma_info omap44xx_mcspi3_sdma_reqs[] = {
+ { .name = "tx0", .dma_req = 14 + OMAP44XX_DMA_REQ_START },
+ { .name = "rx0", .dma_req = 15 + OMAP44XX_DMA_REQ_START },
+ { .name = "tx1", .dma_req = 22 + OMAP44XX_DMA_REQ_START },
+ { .name = "rx1", .dma_req = 23 + OMAP44XX_DMA_REQ_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_mcspi3_addrs[] = {
+ {
+ .pa_start = 0x480b8000,
+ .pa_end = 0x480b81ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> mcspi3 */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__mcspi3 = {
+ .master = &omap44xx_l4_per_hwmod,
+ .slave = &omap44xx_mcspi3_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_mcspi3_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_mcspi3_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* mcspi3 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_mcspi3_slaves[] = {
+ &omap44xx_l4_per__mcspi3,
+};
+
+/* mcspi3 dev_attr */
+static struct omap2_mcspi_dev_attr mcspi3_dev_attr = {
+ .num_chipselect = 2,
+};
+
+static struct omap_hwmod omap44xx_mcspi3_hwmod = {
+ .name = "mcspi3",
+ .class = &omap44xx_mcspi_hwmod_class,
+ .mpu_irqs = omap44xx_mcspi3_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mcspi3_irqs),
+ .sdma_reqs = omap44xx_mcspi3_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_mcspi3_sdma_reqs),
+ .main_clk = "mcspi3_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_L4PER_MCSPI3_CLKCTRL,
+ },
+ },
+ .dev_attr = &mcspi3_dev_attr,
+ .slaves = omap44xx_mcspi3_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_mcspi3_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* mcspi4 */
+static struct omap_hwmod omap44xx_mcspi4_hwmod;
+static struct omap_hwmod_irq_info omap44xx_mcspi4_irqs[] = {
+ { .irq = 48 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_dma_info omap44xx_mcspi4_sdma_reqs[] = {
+ { .name = "tx0", .dma_req = 69 + OMAP44XX_DMA_REQ_START },
+ { .name = "rx0", .dma_req = 70 + OMAP44XX_DMA_REQ_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_mcspi4_addrs[] = {
+ {
+ .pa_start = 0x480ba000,
+ .pa_end = 0x480ba1ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> mcspi4 */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__mcspi4 = {
+ .master = &omap44xx_l4_per_hwmod,
+ .slave = &omap44xx_mcspi4_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_mcspi4_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_mcspi4_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* mcspi4 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_mcspi4_slaves[] = {
+ &omap44xx_l4_per__mcspi4,
+};
+
+/* mcspi4 dev_attr */
+static struct omap2_mcspi_dev_attr mcspi4_dev_attr = {
+ .num_chipselect = 1,
+};
+
+static struct omap_hwmod omap44xx_mcspi4_hwmod = {
+ .name = "mcspi4",
+ .class = &omap44xx_mcspi_hwmod_class,
+ .mpu_irqs = omap44xx_mcspi4_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mcspi4_irqs),
+ .sdma_reqs = omap44xx_mcspi4_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_mcspi4_sdma_reqs),
+ .main_clk = "mcspi4_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_L4PER_MCSPI4_CLKCTRL,
+ },
+ },
+ .dev_attr = &mcspi4_dev_attr,
+ .slaves = omap44xx_mcspi4_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_mcspi4_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/*
+ * 'mmc' class
+ * multimedia card high-speed/sd/sdio (mmc/sd/sdio) host controller
+ */
+
+static struct omap_hwmod_class_sysconfig omap44xx_mmc_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_MIDLEMODE |
+ SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_SOFTRESET),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
+ MSTANDBY_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class omap44xx_mmc_hwmod_class = {
+ .name = "mmc",
+ .sysc = &omap44xx_mmc_sysc,
+};
+
+/* mmc1 */
+
+static struct omap_hwmod_irq_info omap44xx_mmc1_irqs[] = {
+ { .irq = 83 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_dma_info omap44xx_mmc1_sdma_reqs[] = {
+ { .name = "tx", .dma_req = 60 + OMAP44XX_DMA_REQ_START },
+ { .name = "rx", .dma_req = 61 + OMAP44XX_DMA_REQ_START },
+};
+
+/* mmc1 master ports */
+static struct omap_hwmod_ocp_if *omap44xx_mmc1_masters[] = {
+ &omap44xx_mmc1__l3_main_1,
+};
+
+static struct omap_hwmod_addr_space omap44xx_mmc1_addrs[] = {
+ {
+ .pa_start = 0x4809c000,
+ .pa_end = 0x4809c3ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> mmc1 */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__mmc1 = {
+ .master = &omap44xx_l4_per_hwmod,
+ .slave = &omap44xx_mmc1_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_mmc1_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_mmc1_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* mmc1 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_mmc1_slaves[] = {
+ &omap44xx_l4_per__mmc1,
+};
+
+/* mmc1 dev_attr */
+static struct omap_mmc_dev_attr mmc1_dev_attr = {
+ .flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT,
+};
+
+static struct omap_hwmod omap44xx_mmc1_hwmod = {
+ .name = "mmc1",
+ .class = &omap44xx_mmc_hwmod_class,
+ .mpu_irqs = omap44xx_mmc1_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mmc1_irqs),
+ .sdma_reqs = omap44xx_mmc1_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_mmc1_sdma_reqs),
+ .main_clk = "mmc1_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_L3INIT_MMC1_CLKCTRL,
+ },
+ },
+ .dev_attr = &mmc1_dev_attr,
+ .slaves = omap44xx_mmc1_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_mmc1_slaves),
+ .masters = omap44xx_mmc1_masters,
+ .masters_cnt = ARRAY_SIZE(omap44xx_mmc1_masters),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* mmc2 */
+static struct omap_hwmod_irq_info omap44xx_mmc2_irqs[] = {
+ { .irq = 86 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_dma_info omap44xx_mmc2_sdma_reqs[] = {
+ { .name = "tx", .dma_req = 46 + OMAP44XX_DMA_REQ_START },
+ { .name = "rx", .dma_req = 47 + OMAP44XX_DMA_REQ_START },
+};
+
+/* mmc2 master ports */
+static struct omap_hwmod_ocp_if *omap44xx_mmc2_masters[] = {
+ &omap44xx_mmc2__l3_main_1,
+};
+
+static struct omap_hwmod_addr_space omap44xx_mmc2_addrs[] = {
+ {
+ .pa_start = 0x480b4000,
+ .pa_end = 0x480b43ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> mmc2 */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__mmc2 = {
+ .master = &omap44xx_l4_per_hwmod,
+ .slave = &omap44xx_mmc2_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_mmc2_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_mmc2_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* mmc2 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_mmc2_slaves[] = {
+ &omap44xx_l4_per__mmc2,
+};
+
+static struct omap_hwmod omap44xx_mmc2_hwmod = {
+ .name = "mmc2",
+ .class = &omap44xx_mmc_hwmod_class,
+ .mpu_irqs = omap44xx_mmc2_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mmc2_irqs),
+ .sdma_reqs = omap44xx_mmc2_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_mmc2_sdma_reqs),
+ .main_clk = "mmc2_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_L3INIT_MMC2_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_mmc2_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_mmc2_slaves),
+ .masters = omap44xx_mmc2_masters,
+ .masters_cnt = ARRAY_SIZE(omap44xx_mmc2_masters),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* mmc3 */
+static struct omap_hwmod omap44xx_mmc3_hwmod;
+static struct omap_hwmod_irq_info omap44xx_mmc3_irqs[] = {
+ { .irq = 94 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_dma_info omap44xx_mmc3_sdma_reqs[] = {
+ { .name = "tx", .dma_req = 76 + OMAP44XX_DMA_REQ_START },
+ { .name = "rx", .dma_req = 77 + OMAP44XX_DMA_REQ_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_mmc3_addrs[] = {
+ {
+ .pa_start = 0x480ad000,
+ .pa_end = 0x480ad3ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> mmc3 */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__mmc3 = {
+ .master = &omap44xx_l4_per_hwmod,
+ .slave = &omap44xx_mmc3_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_mmc3_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_mmc3_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* mmc3 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_mmc3_slaves[] = {
+ &omap44xx_l4_per__mmc3,
+};
+
+static struct omap_hwmod omap44xx_mmc3_hwmod = {
+ .name = "mmc3",
+ .class = &omap44xx_mmc_hwmod_class,
+ .mpu_irqs = omap44xx_mmc3_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mmc3_irqs),
+ .sdma_reqs = omap44xx_mmc3_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_mmc3_sdma_reqs),
+ .main_clk = "mmc3_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_L4PER_MMCSD3_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_mmc3_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_mmc3_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* mmc4 */
+static struct omap_hwmod omap44xx_mmc4_hwmod;
+static struct omap_hwmod_irq_info omap44xx_mmc4_irqs[] = {
+ { .irq = 96 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_dma_info omap44xx_mmc4_sdma_reqs[] = {
+ { .name = "tx", .dma_req = 56 + OMAP44XX_DMA_REQ_START },
+ { .name = "rx", .dma_req = 57 + OMAP44XX_DMA_REQ_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_mmc4_addrs[] = {
+ {
+ .pa_start = 0x480d1000,
+ .pa_end = 0x480d13ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> mmc4 */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__mmc4 = {
+ .master = &omap44xx_l4_per_hwmod,
+ .slave = &omap44xx_mmc4_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_mmc4_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_mmc4_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* mmc4 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_mmc4_slaves[] = {
+ &omap44xx_l4_per__mmc4,
+};
+
+static struct omap_hwmod omap44xx_mmc4_hwmod = {
+ .name = "mmc4",
+ .class = &omap44xx_mmc_hwmod_class,
+ .mpu_irqs = omap44xx_mmc4_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mmc4_irqs),
+ .sdma_reqs = omap44xx_mmc4_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_mmc4_sdma_reqs),
+ .main_clk = "mmc4_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_L4PER_MMCSD4_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_mmc4_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_mmc4_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* mmc5 */
+static struct omap_hwmod omap44xx_mmc5_hwmod;
+static struct omap_hwmod_irq_info omap44xx_mmc5_irqs[] = {
+ { .irq = 59 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_dma_info omap44xx_mmc5_sdma_reqs[] = {
+ { .name = "tx", .dma_req = 58 + OMAP44XX_DMA_REQ_START },
+ { .name = "rx", .dma_req = 59 + OMAP44XX_DMA_REQ_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_mmc5_addrs[] = {
+ {
+ .pa_start = 0x480d5000,
+ .pa_end = 0x480d53ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> mmc5 */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__mmc5 = {
+ .master = &omap44xx_l4_per_hwmod,
+ .slave = &omap44xx_mmc5_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_mmc5_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_mmc5_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* mmc5 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_mmc5_slaves[] = {
+ &omap44xx_l4_per__mmc5,
+};
+
+static struct omap_hwmod omap44xx_mmc5_hwmod = {
+ .name = "mmc5",
+ .class = &omap44xx_mmc_hwmod_class,
+ .mpu_irqs = omap44xx_mmc5_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mmc5_irqs),
+ .sdma_reqs = omap44xx_mmc5_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_mmc5_sdma_reqs),
+ .main_clk = "mmc5_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_L4PER_MMCSD5_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_mmc5_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_mmc5_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/*
* 'mpu' class
* mpu sub-system
*/
@@ -1639,6 +3875,677 @@ static struct omap_hwmod omap44xx_smartreflex_mpu_hwmod = {
};
/*
+ * 'spinlock' class
+ * spinlock provides hardware assistance for synchronizing the processes
+ * running on multiple processors
+ */
+
+static struct omap_hwmod_class_sysconfig omap44xx_spinlock_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
+ SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ SIDLE_SMART_WKUP),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap44xx_spinlock_hwmod_class = {
+ .name = "spinlock",
+ .sysc = &omap44xx_spinlock_sysc,
+};
+
+/* spinlock */
+static struct omap_hwmod omap44xx_spinlock_hwmod;
+static struct omap_hwmod_addr_space omap44xx_spinlock_addrs[] = {
+ {
+ .pa_start = 0x4a0f6000,
+ .pa_end = 0x4a0f6fff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_cfg -> spinlock */
+static struct omap_hwmod_ocp_if omap44xx_l4_cfg__spinlock = {
+ .master = &omap44xx_l4_cfg_hwmod,
+ .slave = &omap44xx_spinlock_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_spinlock_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_spinlock_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* spinlock slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_spinlock_slaves[] = {
+ &omap44xx_l4_cfg__spinlock,
+};
+
+static struct omap_hwmod omap44xx_spinlock_hwmod = {
+ .name = "spinlock",
+ .class = &omap44xx_spinlock_hwmod_class,
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_L4CFG_HW_SEM_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_spinlock_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_spinlock_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/*
+ * 'timer' class
+ * general purpose timer module with accurate 1ms tick
+ * This class contains several variants: ['timer_1ms', 'timer']
+ */
+
+static struct omap_hwmod_class_sysconfig omap44xx_timer_1ms_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
+ SYSC_HAS_EMUFREE | SYSC_HAS_ENAWAKEUP |
+ SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+ SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap44xx_timer_1ms_hwmod_class = {
+ .name = "timer",
+ .sysc = &omap44xx_timer_1ms_sysc,
+};
+
+static struct omap_hwmod_class_sysconfig omap44xx_timer_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_RESET_STATUS |
+ SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ SIDLE_SMART_WKUP),
+ .sysc_fields = &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class omap44xx_timer_hwmod_class = {
+ .name = "timer",
+ .sysc = &omap44xx_timer_sysc,
+};
+
+/* timer1 */
+static struct omap_hwmod omap44xx_timer1_hwmod;
+static struct omap_hwmod_irq_info omap44xx_timer1_irqs[] = {
+ { .irq = 37 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_timer1_addrs[] = {
+ {
+ .pa_start = 0x4a318000,
+ .pa_end = 0x4a31807f,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_wkup -> timer1 */
+static struct omap_hwmod_ocp_if omap44xx_l4_wkup__timer1 = {
+ .master = &omap44xx_l4_wkup_hwmod,
+ .slave = &omap44xx_timer1_hwmod,
+ .clk = "l4_wkup_clk_mux_ck",
+ .addr = omap44xx_timer1_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_timer1_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer1 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_timer1_slaves[] = {
+ &omap44xx_l4_wkup__timer1,
+};
+
+static struct omap_hwmod omap44xx_timer1_hwmod = {
+ .name = "timer1",
+ .class = &omap44xx_timer_1ms_hwmod_class,
+ .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET,
+ .mpu_irqs = omap44xx_timer1_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_timer1_irqs),
+ .main_clk = "timer1_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_WKUP_TIMER1_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_timer1_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_timer1_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* timer2 */
+static struct omap_hwmod omap44xx_timer2_hwmod;
+static struct omap_hwmod_irq_info omap44xx_timer2_irqs[] = {
+ { .irq = 38 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_timer2_addrs[] = {
+ {
+ .pa_start = 0x48032000,
+ .pa_end = 0x4803207f,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> timer2 */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__timer2 = {
+ .master = &omap44xx_l4_per_hwmod,
+ .slave = &omap44xx_timer2_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_timer2_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_timer2_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer2 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_timer2_slaves[] = {
+ &omap44xx_l4_per__timer2,
+};
+
+static struct omap_hwmod omap44xx_timer2_hwmod = {
+ .name = "timer2",
+ .class = &omap44xx_timer_1ms_hwmod_class,
+ .mpu_irqs = omap44xx_timer2_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_timer2_irqs),
+ .main_clk = "timer2_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_L4PER_DMTIMER2_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_timer2_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_timer2_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* timer3 */
+static struct omap_hwmod omap44xx_timer3_hwmod;
+static struct omap_hwmod_irq_info omap44xx_timer3_irqs[] = {
+ { .irq = 39 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_timer3_addrs[] = {
+ {
+ .pa_start = 0x48034000,
+ .pa_end = 0x4803407f,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> timer3 */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__timer3 = {
+ .master = &omap44xx_l4_per_hwmod,
+ .slave = &omap44xx_timer3_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_timer3_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_timer3_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer3 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_timer3_slaves[] = {
+ &omap44xx_l4_per__timer3,
+};
+
+static struct omap_hwmod omap44xx_timer3_hwmod = {
+ .name = "timer3",
+ .class = &omap44xx_timer_hwmod_class,
+ .mpu_irqs = omap44xx_timer3_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_timer3_irqs),
+ .main_clk = "timer3_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_L4PER_DMTIMER3_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_timer3_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_timer3_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* timer4 */
+static struct omap_hwmod omap44xx_timer4_hwmod;
+static struct omap_hwmod_irq_info omap44xx_timer4_irqs[] = {
+ { .irq = 40 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_timer4_addrs[] = {
+ {
+ .pa_start = 0x48036000,
+ .pa_end = 0x4803607f,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> timer4 */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__timer4 = {
+ .master = &omap44xx_l4_per_hwmod,
+ .slave = &omap44xx_timer4_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_timer4_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_timer4_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer4 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_timer4_slaves[] = {
+ &omap44xx_l4_per__timer4,
+};
+
+static struct omap_hwmod omap44xx_timer4_hwmod = {
+ .name = "timer4",
+ .class = &omap44xx_timer_hwmod_class,
+ .mpu_irqs = omap44xx_timer4_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_timer4_irqs),
+ .main_clk = "timer4_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_L4PER_DMTIMER4_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_timer4_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_timer4_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* timer5 */
+static struct omap_hwmod omap44xx_timer5_hwmod;
+static struct omap_hwmod_irq_info omap44xx_timer5_irqs[] = {
+ { .irq = 41 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_timer5_addrs[] = {
+ {
+ .pa_start = 0x40138000,
+ .pa_end = 0x4013807f,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_abe -> timer5 */
+static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer5 = {
+ .master = &omap44xx_l4_abe_hwmod,
+ .slave = &omap44xx_timer5_hwmod,
+ .clk = "ocp_abe_iclk",
+ .addr = omap44xx_timer5_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_timer5_addrs),
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_addr_space omap44xx_timer5_dma_addrs[] = {
+ {
+ .pa_start = 0x49038000,
+ .pa_end = 0x4903807f,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_abe -> timer5 (dma) */
+static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer5_dma = {
+ .master = &omap44xx_l4_abe_hwmod,
+ .slave = &omap44xx_timer5_hwmod,
+ .clk = "ocp_abe_iclk",
+ .addr = omap44xx_timer5_dma_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_timer5_dma_addrs),
+ .user = OCP_USER_SDMA,
+};
+
+/* timer5 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_timer5_slaves[] = {
+ &omap44xx_l4_abe__timer5,
+ &omap44xx_l4_abe__timer5_dma,
+};
+
+static struct omap_hwmod omap44xx_timer5_hwmod = {
+ .name = "timer5",
+ .class = &omap44xx_timer_hwmod_class,
+ .mpu_irqs = omap44xx_timer5_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_timer5_irqs),
+ .main_clk = "timer5_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM1_ABE_TIMER5_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_timer5_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_timer5_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* timer6 */
+static struct omap_hwmod omap44xx_timer6_hwmod;
+static struct omap_hwmod_irq_info omap44xx_timer6_irqs[] = {
+ { .irq = 42 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_timer6_addrs[] = {
+ {
+ .pa_start = 0x4013a000,
+ .pa_end = 0x4013a07f,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_abe -> timer6 */
+static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer6 = {
+ .master = &omap44xx_l4_abe_hwmod,
+ .slave = &omap44xx_timer6_hwmod,
+ .clk = "ocp_abe_iclk",
+ .addr = omap44xx_timer6_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_timer6_addrs),
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_addr_space omap44xx_timer6_dma_addrs[] = {
+ {
+ .pa_start = 0x4903a000,
+ .pa_end = 0x4903a07f,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_abe -> timer6 (dma) */
+static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer6_dma = {
+ .master = &omap44xx_l4_abe_hwmod,
+ .slave = &omap44xx_timer6_hwmod,
+ .clk = "ocp_abe_iclk",
+ .addr = omap44xx_timer6_dma_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_timer6_dma_addrs),
+ .user = OCP_USER_SDMA,
+};
+
+/* timer6 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_timer6_slaves[] = {
+ &omap44xx_l4_abe__timer6,
+ &omap44xx_l4_abe__timer6_dma,
+};
+
+static struct omap_hwmod omap44xx_timer6_hwmod = {
+ .name = "timer6",
+ .class = &omap44xx_timer_hwmod_class,
+ .mpu_irqs = omap44xx_timer6_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_timer6_irqs),
+ .main_clk = "timer6_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM1_ABE_TIMER6_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_timer6_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_timer6_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* timer7 */
+static struct omap_hwmod omap44xx_timer7_hwmod;
+static struct omap_hwmod_irq_info omap44xx_timer7_irqs[] = {
+ { .irq = 43 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_timer7_addrs[] = {
+ {
+ .pa_start = 0x4013c000,
+ .pa_end = 0x4013c07f,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_abe -> timer7 */
+static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer7 = {
+ .master = &omap44xx_l4_abe_hwmod,
+ .slave = &omap44xx_timer7_hwmod,
+ .clk = "ocp_abe_iclk",
+ .addr = omap44xx_timer7_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_timer7_addrs),
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_addr_space omap44xx_timer7_dma_addrs[] = {
+ {
+ .pa_start = 0x4903c000,
+ .pa_end = 0x4903c07f,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_abe -> timer7 (dma) */
+static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer7_dma = {
+ .master = &omap44xx_l4_abe_hwmod,
+ .slave = &omap44xx_timer7_hwmod,
+ .clk = "ocp_abe_iclk",
+ .addr = omap44xx_timer7_dma_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_timer7_dma_addrs),
+ .user = OCP_USER_SDMA,
+};
+
+/* timer7 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_timer7_slaves[] = {
+ &omap44xx_l4_abe__timer7,
+ &omap44xx_l4_abe__timer7_dma,
+};
+
+static struct omap_hwmod omap44xx_timer7_hwmod = {
+ .name = "timer7",
+ .class = &omap44xx_timer_hwmod_class,
+ .mpu_irqs = omap44xx_timer7_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_timer7_irqs),
+ .main_clk = "timer7_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM1_ABE_TIMER7_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_timer7_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_timer7_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* timer8 */
+static struct omap_hwmod omap44xx_timer8_hwmod;
+static struct omap_hwmod_irq_info omap44xx_timer8_irqs[] = {
+ { .irq = 44 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_timer8_addrs[] = {
+ {
+ .pa_start = 0x4013e000,
+ .pa_end = 0x4013e07f,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_abe -> timer8 */
+static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer8 = {
+ .master = &omap44xx_l4_abe_hwmod,
+ .slave = &omap44xx_timer8_hwmod,
+ .clk = "ocp_abe_iclk",
+ .addr = omap44xx_timer8_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_timer8_addrs),
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_addr_space omap44xx_timer8_dma_addrs[] = {
+ {
+ .pa_start = 0x4903e000,
+ .pa_end = 0x4903e07f,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_abe -> timer8 (dma) */
+static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer8_dma = {
+ .master = &omap44xx_l4_abe_hwmod,
+ .slave = &omap44xx_timer8_hwmod,
+ .clk = "ocp_abe_iclk",
+ .addr = omap44xx_timer8_dma_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_timer8_dma_addrs),
+ .user = OCP_USER_SDMA,
+};
+
+/* timer8 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_timer8_slaves[] = {
+ &omap44xx_l4_abe__timer8,
+ &omap44xx_l4_abe__timer8_dma,
+};
+
+static struct omap_hwmod omap44xx_timer8_hwmod = {
+ .name = "timer8",
+ .class = &omap44xx_timer_hwmod_class,
+ .mpu_irqs = omap44xx_timer8_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_timer8_irqs),
+ .main_clk = "timer8_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM1_ABE_TIMER8_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_timer8_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_timer8_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* timer9 */
+static struct omap_hwmod omap44xx_timer9_hwmod;
+static struct omap_hwmod_irq_info omap44xx_timer9_irqs[] = {
+ { .irq = 45 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_timer9_addrs[] = {
+ {
+ .pa_start = 0x4803e000,
+ .pa_end = 0x4803e07f,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> timer9 */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__timer9 = {
+ .master = &omap44xx_l4_per_hwmod,
+ .slave = &omap44xx_timer9_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_timer9_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_timer9_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer9 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_timer9_slaves[] = {
+ &omap44xx_l4_per__timer9,
+};
+
+static struct omap_hwmod omap44xx_timer9_hwmod = {
+ .name = "timer9",
+ .class = &omap44xx_timer_hwmod_class,
+ .mpu_irqs = omap44xx_timer9_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_timer9_irqs),
+ .main_clk = "timer9_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_L4PER_DMTIMER9_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_timer9_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_timer9_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* timer10 */
+static struct omap_hwmod omap44xx_timer10_hwmod;
+static struct omap_hwmod_irq_info omap44xx_timer10_irqs[] = {
+ { .irq = 46 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_timer10_addrs[] = {
+ {
+ .pa_start = 0x48086000,
+ .pa_end = 0x4808607f,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> timer10 */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__timer10 = {
+ .master = &omap44xx_l4_per_hwmod,
+ .slave = &omap44xx_timer10_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_timer10_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_timer10_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer10 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_timer10_slaves[] = {
+ &omap44xx_l4_per__timer10,
+};
+
+static struct omap_hwmod omap44xx_timer10_hwmod = {
+ .name = "timer10",
+ .class = &omap44xx_timer_1ms_hwmod_class,
+ .mpu_irqs = omap44xx_timer10_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_timer10_irqs),
+ .main_clk = "timer10_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_L4PER_DMTIMER10_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_timer10_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_timer10_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* timer11 */
+static struct omap_hwmod omap44xx_timer11_hwmod;
+static struct omap_hwmod_irq_info omap44xx_timer11_irqs[] = {
+ { .irq = 47 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_timer11_addrs[] = {
+ {
+ .pa_start = 0x48088000,
+ .pa_end = 0x4808807f,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_per -> timer11 */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__timer11 = {
+ .master = &omap44xx_l4_per_hwmod,
+ .slave = &omap44xx_timer11_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_timer11_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_timer11_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* timer11 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_timer11_slaves[] = {
+ &omap44xx_l4_per__timer11,
+};
+
+static struct omap_hwmod omap44xx_timer11_hwmod = {
+ .name = "timer11",
+ .class = &omap44xx_timer_hwmod_class,
+ .mpu_irqs = omap44xx_timer11_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_timer11_irqs),
+ .main_clk = "timer11_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_L4PER_DMTIMER11_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_timer11_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_timer11_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/*
* 'uart' class
* universal asynchronous receiver/transmitter (uart)
*/
@@ -1870,6 +4777,88 @@ static struct omap_hwmod omap44xx_uart4_hwmod = {
};
/*
+ * 'usb_otg_hs' class
+ * high-speed on-the-go universal serial bus (usb_otg_hs) controller
+ */
+
+static struct omap_hwmod_class_sysconfig omap44xx_usb_otg_hs_sysc = {
+ .rev_offs = 0x0400,
+ .sysc_offs = 0x0404,
+ .syss_offs = 0x0408,
+ .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP |
+ SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
+ MSTANDBY_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap44xx_usb_otg_hs_hwmod_class = {
+ .name = "usb_otg_hs",
+ .sysc = &omap44xx_usb_otg_hs_sysc,
+};
+
+/* usb_otg_hs */
+static struct omap_hwmod_irq_info omap44xx_usb_otg_hs_irqs[] = {
+ { .name = "mc", .irq = 92 + OMAP44XX_IRQ_GIC_START },
+ { .name = "dma", .irq = 93 + OMAP44XX_IRQ_GIC_START },
+};
+
+/* usb_otg_hs master ports */
+static struct omap_hwmod_ocp_if *omap44xx_usb_otg_hs_masters[] = {
+ &omap44xx_usb_otg_hs__l3_main_2,
+};
+
+static struct omap_hwmod_addr_space omap44xx_usb_otg_hs_addrs[] = {
+ {
+ .pa_start = 0x4a0ab000,
+ .pa_end = 0x4a0ab003,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_cfg -> usb_otg_hs */
+static struct omap_hwmod_ocp_if omap44xx_l4_cfg__usb_otg_hs = {
+ .master = &omap44xx_l4_cfg_hwmod,
+ .slave = &omap44xx_usb_otg_hs_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_usb_otg_hs_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_usb_otg_hs_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* usb_otg_hs slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_usb_otg_hs_slaves[] = {
+ &omap44xx_l4_cfg__usb_otg_hs,
+};
+
+static struct omap_hwmod_opt_clk usb_otg_hs_opt_clks[] = {
+ { .role = "xclk", .clk = "usb_otg_hs_xclk" },
+};
+
+static struct omap_hwmod omap44xx_usb_otg_hs_hwmod = {
+ .name = "usb_otg_hs",
+ .class = &omap44xx_usb_otg_hs_hwmod_class,
+ .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
+ .mpu_irqs = omap44xx_usb_otg_hs_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_usb_otg_hs_irqs),
+ .main_clk = "usb_otg_hs_ick",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_L3INIT_USB_OTG_CLKCTRL,
+ },
+ },
+ .opt_clks = usb_otg_hs_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(usb_otg_hs_opt_clks),
+ .slaves = omap44xx_usb_otg_hs_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_usb_otg_hs_slaves),
+ .masters = omap44xx_usb_otg_hs_masters,
+ .masters_cnt = ARRAY_SIZE(omap44xx_usb_otg_hs_masters),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/*
* 'wd_timer' class
* 32-bit watchdog upward counter that generates a pulse on the reset pin on
* overflow condition
@@ -2024,13 +5013,34 @@ static __initdata struct omap_hwmod *omap44xx_hwmods[] = {
/* mpu_bus class */
&omap44xx_mpu_private_hwmod,
+ /* aess class */
+/* &omap44xx_aess_hwmod, */
+
+ /* bandgap class */
+ &omap44xx_bandgap_hwmod,
+
+ /* counter class */
+/* &omap44xx_counter_32k_hwmod, */
+
/* dma class */
&omap44xx_dma_system_hwmod,
+ /* dmic class */
+ &omap44xx_dmic_hwmod,
+
/* dsp class */
&omap44xx_dsp_hwmod,
&omap44xx_dsp_c0_hwmod,
+ /* dss class */
+ &omap44xx_dss_hwmod,
+ &omap44xx_dss_dispc_hwmod,
+ &omap44xx_dss_dsi1_hwmod,
+ &omap44xx_dss_dsi2_hwmod,
+ &omap44xx_dss_hdmi_hwmod,
+ &omap44xx_dss_rfbi_hwmod,
+ &omap44xx_dss_venc_hwmod,
+
/* gpio class */
&omap44xx_gpio1_hwmod,
&omap44xx_gpio2_hwmod,
@@ -2039,17 +5049,56 @@ static __initdata struct omap_hwmod *omap44xx_hwmods[] = {
&omap44xx_gpio5_hwmod,
&omap44xx_gpio6_hwmod,
+ /* hsi class */
+/* &omap44xx_hsi_hwmod, */
+
/* i2c class */
&omap44xx_i2c1_hwmod,
&omap44xx_i2c2_hwmod,
&omap44xx_i2c3_hwmod,
&omap44xx_i2c4_hwmod,
+ /* ipu class */
+ &omap44xx_ipu_hwmod,
+ &omap44xx_ipu_c0_hwmod,
+ &omap44xx_ipu_c1_hwmod,
+
+ /* iss class */
+/* &omap44xx_iss_hwmod, */
+
/* iva class */
&omap44xx_iva_hwmod,
&omap44xx_iva_seq0_hwmod,
&omap44xx_iva_seq1_hwmod,
+ /* kbd class */
+/* &omap44xx_kbd_hwmod, */
+
+ /* mailbox class */
+ &omap44xx_mailbox_hwmod,
+
+ /* mcbsp class */
+ &omap44xx_mcbsp1_hwmod,
+ &omap44xx_mcbsp2_hwmod,
+ &omap44xx_mcbsp3_hwmod,
+ &omap44xx_mcbsp4_hwmod,
+
+ /* mcpdm class */
+/* &omap44xx_mcpdm_hwmod, */
+
+ /* mcspi class */
+ &omap44xx_mcspi1_hwmod,
+ &omap44xx_mcspi2_hwmod,
+ &omap44xx_mcspi3_hwmod,
+ &omap44xx_mcspi4_hwmod,
+
+ /* mmc class */
+ &omap44xx_mmc1_hwmod,
+ &omap44xx_mmc2_hwmod,
+ &omap44xx_mmc3_hwmod,
+ &omap44xx_mmc4_hwmod,
+ &omap44xx_mmc5_hwmod,
+
/* mpu class */
&omap44xx_mpu_hwmod,
@@ -2058,12 +5107,31 @@ static __initdata struct omap_hwmod *omap44xx_hwmods[] = {
&omap44xx_smartreflex_iva_hwmod,
&omap44xx_smartreflex_mpu_hwmod,
+ /* spinlock class */
+ &omap44xx_spinlock_hwmod,
+
+ /* timer class */
+ &omap44xx_timer1_hwmod,
+ &omap44xx_timer2_hwmod,
+ &omap44xx_timer3_hwmod,
+ &omap44xx_timer4_hwmod,
+ &omap44xx_timer5_hwmod,
+ &omap44xx_timer6_hwmod,
+ &omap44xx_timer7_hwmod,
+ &omap44xx_timer8_hwmod,
+ &omap44xx_timer9_hwmod,
+ &omap44xx_timer10_hwmod,
+ &omap44xx_timer11_hwmod,
+
/* uart class */
&omap44xx_uart1_hwmod,
&omap44xx_uart2_hwmod,
&omap44xx_uart3_hwmod,
&omap44xx_uart4_hwmod,
+ /* usb_otg_hs class */
+ &omap44xx_usb_otg_hs_hwmod,
+
/* wd_timer class */
&omap44xx_wd_timer2_hwmod,
&omap44xx_wd_timer3_hwmod,
@@ -2073,6 +5141,6 @@ static __initdata struct omap_hwmod *omap44xx_hwmods[] = {
int __init omap44xx_hwmod_init(void)
{
- return omap_hwmod_init(omap44xx_hwmods);
+ return omap_hwmod_register(omap44xx_hwmods);
}
diff --git a/arch/arm/mach-omap2/omap_phy_internal.c b/arch/arm/mach-omap2/omap_phy_internal.c
index 745252c60e32..f172ec06c06a 100644
--- a/arch/arm/mach-omap2/omap_phy_internal.c
+++ b/arch/arm/mach-omap2/omap_phy_internal.c
@@ -29,6 +29,7 @@
#include <linux/usb.h>
#include <plat/usb.h>
+#include "control.h"
/* OMAP control module register for UTMI PHY */
#define CONTROL_DEV_CONF 0x300
@@ -147,3 +148,95 @@ int omap4430_phy_exit(struct device *dev)
return 0;
}
+
+void am35x_musb_reset(void)
+{
+ u32 regval;
+
+ /* Reset the musb interface */
+ regval = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET);
+
+ regval |= AM35XX_USBOTGSS_SW_RST;
+ omap_ctrl_writel(regval, AM35XX_CONTROL_IP_SW_RESET);
+
+ regval &= ~AM35XX_USBOTGSS_SW_RST;
+ omap_ctrl_writel(regval, AM35XX_CONTROL_IP_SW_RESET);
+
+ regval = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET);
+}
+
+void am35x_musb_phy_power(u8 on)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(100);
+ u32 devconf2;
+
+ if (on) {
+ /*
+ * Start the on-chip PHY and its PLL.
+ */
+ devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
+
+ devconf2 &= ~(CONF2_RESET | CONF2_PHYPWRDN | CONF2_OTGPWRDN);
+ devconf2 |= CONF2_PHY_PLLON;
+
+ omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
+
+ pr_info(KERN_INFO "Waiting for PHY clock good...\n");
+ while (!(omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2)
+ & CONF2_PHYCLKGD)) {
+ cpu_relax();
+
+ if (time_after(jiffies, timeout)) {
+ pr_err(KERN_ERR "musb PHY clock good timed out\n");
+ break;
+ }
+ }
+ } else {
+ /*
+ * Power down the on-chip PHY.
+ */
+ devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
+
+ devconf2 &= ~CONF2_PHY_PLLON;
+ devconf2 |= CONF2_PHYPWRDN | CONF2_OTGPWRDN;
+ omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
+ }
+}
+
+void am35x_musb_clear_irq(void)
+{
+ u32 regval;
+
+ regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
+ regval |= AM35XX_USBOTGSS_INT_CLR;
+ omap_ctrl_writel(regval, AM35XX_CONTROL_LVL_INTR_CLEAR);
+ regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
+}
+
+void am35x_musb_set_mode(u8 musb_mode)
+{
+ u32 devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
+
+ devconf2 &= ~CONF2_OTGMODE;
+ switch (musb_mode) {
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ case MUSB_HOST: /* Force VBUS valid, ID = 0 */
+ devconf2 |= CONF2_FORCE_HOST;
+ break;
+#endif
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ case MUSB_PERIPHERAL: /* Force VBUS valid, ID = 1 */
+ devconf2 |= CONF2_FORCE_DEVICE;
+ break;
+#endif
+#ifdef CONFIG_USB_MUSB_OTG
+ case MUSB_OTG: /* Don't override the VBUS/ID comparators */
+ devconf2 |= CONF2_NO_OVERRIDE;
+ break;
+#endif
+ default:
+ pr_info(KERN_INFO "Unsupported mode %u\n", musb_mode);
+ }
+
+ omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
+}
diff --git a/arch/arm/mach-omap2/opp2xxx.h b/arch/arm/mach-omap2/opp2xxx.h
index 38b730550506..8affc66a92c2 100644
--- a/arch/arm/mach-omap2/opp2xxx.h
+++ b/arch/arm/mach-omap2/opp2xxx.h
@@ -418,7 +418,7 @@ struct prcm_config {
extern const struct prcm_config omap2420_rate_table[];
-#ifdef CONFIG_ARCH_OMAP2430
+#ifdef CONFIG_SOC_OMAP2430
extern const struct prcm_config omap2430_rate_table[];
#else
#define omap2430_rate_table NULL
diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c
index 125f56591fb5..a5a83b358ddd 100644
--- a/arch/arm/mach-omap2/pm-debug.c
+++ b/arch/arm/mach-omap2/pm-debug.c
@@ -637,14 +637,14 @@ static int __init pm_dbg_init(void)
}
- (void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUGO, d,
+ (void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUSR, d,
&enable_off_mode, &pm_dbg_option_fops);
- (void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUGO, d,
+ (void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUSR, d,
&sleep_while_idle, &pm_dbg_option_fops);
- (void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUGO, d,
+ (void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUSR, d,
&wakeup_timer_seconds, &pm_dbg_option_fops);
(void) debugfs_create_file("wakeup_timer_milliseconds",
- S_IRUGO | S_IWUGO, d, &wakeup_timer_milliseconds,
+ S_IRUGO | S_IWUSR, d, &wakeup_timer_milliseconds,
&pm_dbg_option_fops);
pm_dbg_init_done = 1;
diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h
index 1c1b0ab5b978..39580e6060e8 100644
--- a/arch/arm/mach-omap2/pm.h
+++ b/arch/arm/mach-omap2/pm.h
@@ -92,7 +92,7 @@ extern void omap24xx_idle_loop_suspend(void);
extern void omap24xx_cpu_suspend(u32 dll_ctrl, void __iomem *sdrc_dlla_ctrl,
void __iomem *sdrc_power);
extern void omap34xx_cpu_suspend(u32 *addr, int save_state);
-extern void save_secure_ram_context(u32 *addr);
+extern int save_secure_ram_context(u32 *addr);
extern void omap3_save_scratchpad_contents(void);
extern unsigned int omap24xx_idle_loop_suspend_sz;
diff --git a/arch/arm/mach-omap2/powerdomains2xxx_data.c b/arch/arm/mach-omap2/powerdomains2xxx_data.c
index 9b1a33500577..78739e10f5b9 100644
--- a/arch/arm/mach-omap2/powerdomains2xxx_data.c
+++ b/arch/arm/mach-omap2/powerdomains2xxx_data.c
@@ -78,7 +78,7 @@ static struct powerdomain core_24xx_pwrdm = {
* 2430-specific powerdomains
*/
-#ifdef CONFIG_ARCH_OMAP2430
+#ifdef CONFIG_SOC_OMAP2430
/* XXX 2430 KILLDOMAINWKUP bit? No current users apparently */
@@ -97,7 +97,7 @@ static struct powerdomain mdm_pwrdm = {
},
};
-#endif /* CONFIG_ARCH_OMAP2430 */
+#endif /* CONFIG_SOC_OMAP2430 */
/* As powerdomains are added or removed above, this list must also be changed */
static struct powerdomain *powerdomains_omap2xxx[] __initdata = {
@@ -111,7 +111,7 @@ static struct powerdomain *powerdomains_omap2xxx[] __initdata = {
&core_24xx_pwrdm,
#endif
-#ifdef CONFIG_ARCH_OMAP2430
+#ifdef CONFIG_SOC_OMAP2430
&mdm_pwrdm,
#endif
NULL
diff --git a/arch/arm/mach-omap2/prcm-common.h b/arch/arm/mach-omap2/prcm-common.h
index 87486f559784..0363dcb0ef93 100644
--- a/arch/arm/mach-omap2/prcm-common.h
+++ b/arch/arm/mach-omap2/prcm-common.h
@@ -121,6 +121,10 @@
#define OMAP24XX_ST_MCSPI2_MASK (1 << 18)
#define OMAP24XX_ST_MCSPI1_SHIFT 17
#define OMAP24XX_ST_MCSPI1_MASK (1 << 17)
+#define OMAP24XX_ST_MCBSP2_SHIFT 16
+#define OMAP24XX_ST_MCBSP2_MASK (1 << 16)
+#define OMAP24XX_ST_MCBSP1_SHIFT 15
+#define OMAP24XX_ST_MCBSP1_MASK (1 << 15)
#define OMAP24XX_ST_GPT12_SHIFT 14
#define OMAP24XX_ST_GPT12_MASK (1 << 14)
#define OMAP24XX_ST_GPT11_SHIFT 13
@@ -191,6 +195,8 @@
#define OMAP3430_AUTOIDLE_MASK (1 << 0)
/* CM_FCLKEN1_CORE, CM_ICLKEN1_CORE, PM_WKEN1_CORE shared bits */
+#define OMAP3430_EN_MMC3_MASK (1 << 30)
+#define OMAP3430_EN_MMC3_SHIFT 30
#define OMAP3430_EN_MMC2_MASK (1 << 25)
#define OMAP3430_EN_MMC2_SHIFT 25
#define OMAP3430_EN_MMC1_MASK (1 << 24)
@@ -231,6 +237,8 @@
#define OMAP3430_EN_HSOTGUSB_SHIFT 4
/* PM_WKST1_CORE, CM_IDLEST1_CORE shared bits */
+#define OMAP3430_ST_MMC3_SHIFT 30
+#define OMAP3430_ST_MMC3_MASK (1 << 30)
#define OMAP3430_ST_MMC2_SHIFT 25
#define OMAP3430_ST_MMC2_MASK (1 << 25)
#define OMAP3430_ST_MMC1_SHIFT 24
diff --git a/arch/arm/mach-omap2/prcm.c b/arch/arm/mach-omap2/prcm.c
index 679bcd28576e..6be14389e4f3 100644
--- a/arch/arm/mach-omap2/prcm.c
+++ b/arch/arm/mach-omap2/prcm.c
@@ -24,6 +24,7 @@
#include <linux/io.h>
#include <linux/delay.h>
+#include <mach/system.h>
#include <plat/common.h>
#include <plat/prcm.h>
#include <plat/irqs.h>
@@ -57,7 +58,7 @@ u32 omap_prcm_get_reset_sources(void)
EXPORT_SYMBOL(omap_prcm_get_reset_sources);
/* Resets clock rates and reboots the system. Only called from system.h */
-void omap_prcm_arch_reset(char mode, const char *cmd)
+static void omap_prcm_arch_reset(char mode, const char *cmd)
{
s16 prcm_offs = 0;
@@ -108,6 +109,8 @@ void omap_prcm_arch_reset(char mode, const char *cmd)
omap2_prm_read_mod_reg(prcm_offs, OMAP2_RM_RSTCTRL); /* OCP barrier */
}
+void (*arch_reset)(char, const char *) = omap_prcm_arch_reset;
+
/**
* omap2_cm_wait_idlest - wait for IDLEST bit to indicate module readiness
* @reg: physical address of module IDLEST register
diff --git a/arch/arm/mach-omap2/prcm_mpu44xx.h b/arch/arm/mach-omap2/prcm_mpu44xx.h
index 729a644ce852..3300ff6e3cfe 100644
--- a/arch/arm/mach-omap2/prcm_mpu44xx.h
+++ b/arch/arm/mach-omap2/prcm_mpu44xx.h
@@ -38,8 +38,8 @@
#define OMAP4430_PRCM_MPU_CPU1_INST 0x0800
/* PRCM_MPU clockdomain register offsets (from instance start) */
-#define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS 0x0000
-#define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS 0x0000
+#define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS 0x0018
+#define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS 0x0018
/*
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index 32e91a9c8b6b..1ac361b7b8cb 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -486,7 +486,7 @@ static void omap_uart_idle_init(struct omap_uart_state *uart)
mod_timer(&uart->timer, jiffies + uart->timeout);
omap_uart_smart_idle_enable(uart, 0);
- if (cpu_is_omap34xx()) {
+ if (cpu_is_omap34xx() && !cpu_is_ti816x()) {
u32 mod = (uart->num > 1) ? OMAP3430_PER_MOD : CORE_MOD;
u32 wk_mask = 0;
u32 padconf = 0;
@@ -655,7 +655,7 @@ static void serial_out_override(struct uart_port *up, int offset, int value)
}
#endif
-void __init omap_serial_early_init(void)
+static int __init omap_serial_early_init(void)
{
int i = 0;
@@ -672,7 +672,7 @@ void __init omap_serial_early_init(void)
uart = kzalloc(sizeof(struct omap_uart_state), GFP_KERNEL);
if (WARN_ON(!uart))
- return;
+ return -ENODEV;
uart->oh = oh;
uart->num = i++;
@@ -680,7 +680,7 @@ void __init omap_serial_early_init(void)
num_uarts++;
/*
- * NOTE: omap_hwmod_init() has not yet been called,
+ * NOTE: omap_hwmod_setup*() has not yet been called,
* so no hwmod functions will work yet.
*/
@@ -691,7 +691,10 @@ void __init omap_serial_early_init(void)
*/
uart->oh->flags |= HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET;
} while (1);
+
+ return 0;
}
+core_initcall(omap_serial_early_init);
/**
* omap_serial_init_port() - initialize single serial port
@@ -759,13 +762,13 @@ void __init omap_serial_init_port(struct omap_board_data *bdata)
p->private_data = uart;
/*
- * omap44xx: Never read empty UART fifo
+ * omap44xx, ti816x: Never read empty UART fifo
* omap3xxx: Never read empty UART fifo on UARTs
* with IP rev >=0x52
*/
uart->regshift = p->regshift;
uart->membase = p->membase;
- if (cpu_is_omap44xx())
+ if (cpu_is_omap44xx() || cpu_is_ti816x())
uart->errata |= UART_ERRATA_FIFO_FULL_ABORT;
else if ((serial_read_reg(uart, UART_OMAP_MVER) & 0xFF)
>= UART_OMAP_NO_EMPTY_FIFO_READ_IP_REV)
@@ -847,7 +850,7 @@ void __init omap_serial_init_port(struct omap_board_data *bdata)
}
/* Enable the MDR1 errata for OMAP3 */
- if (cpu_is_omap34xx())
+ if (cpu_is_omap34xx() && !cpu_is_ti816x())
uart->errata |= UART_ERRATA_i202_MDR1_ACCESS;
}
diff --git a/arch/arm/mach-omap2/sleep24xx.S b/arch/arm/mach-omap2/sleep24xx.S
index c7780cc8d919..b5071a47ec39 100644
--- a/arch/arm/mach-omap2/sleep24xx.S
+++ b/arch/arm/mach-omap2/sleep24xx.S
@@ -47,6 +47,7 @@
* Note: This code get's copied to internal SRAM at boot. When the OMAP
* wakes up it continues execution at the point it went to sleep.
*/
+ .align 3
ENTRY(omap24xx_idle_loop_suspend)
stmfd sp!, {r0, lr} @ save registers on stack
mov r0, #0 @ clear for mcr setup
@@ -82,6 +83,7 @@ ENTRY(omap24xx_idle_loop_suspend_sz)
* The DLL load value is not kept in RETENTION or OFF. It needs to be restored
* at wake
*/
+ .align 3
ENTRY(omap24xx_cpu_suspend)
stmfd sp!, {r0 - r12, lr} @ save registers on stack
mov r3, #0x0 @ clear for mcr call
diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S
index 98d8232808b8..951a0be66cf7 100644
--- a/arch/arm/mach-omap2/sleep34xx.S
+++ b/arch/arm/mach-omap2/sleep34xx.S
@@ -118,6 +118,7 @@ ENTRY(enable_omap3630_toggle_l2_on_restore)
.text
/* Function to call rom code to save secure ram context */
+ .align 3
ENTRY(save_secure_ram_context)
stmfd sp!, {r1-r12, lr} @ save registers on stack
adr r3, api_params @ r3 points to parameters
@@ -169,6 +170,7 @@ ENTRY(save_secure_ram_context_sz)
* depending on the low power mode (non-OFF vs OFF modes),
* cf. 'Resume path for xxx mode' comments.
*/
+ .align 3
ENTRY(omap34xx_cpu_suspend)
stmfd sp!, {r0-r12, lr} @ save registers on stack
diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c
index c37e823266d3..95ac336fe3f7 100644
--- a/arch/arm/mach-omap2/smartreflex.c
+++ b/arch/arm/mach-omap2/smartreflex.c
@@ -900,7 +900,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
return PTR_ERR(dbg_dir);
}
- (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUGO, dbg_dir,
+ (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, dbg_dir,
(void *)sr_info, &pm_sr_fops);
(void) debugfs_create_x32("errweight", S_IRUGO, dbg_dir,
&sr_info->err_weight);
@@ -939,7 +939,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
strcpy(name, "volt_");
sprintf(volt_name, "%d", volt_data[i].volt_nominal);
strcat(name, volt_name);
- (void) debugfs_create_x32(name, S_IRUGO | S_IWUGO, nvalue_dir,
+ (void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir,
&(sr_info->nvalue_table[i].nvalue));
}
diff --git a/arch/arm/mach-omap2/sram242x.S b/arch/arm/mach-omap2/sram242x.S
index 055310cc77de..ff9b9dbcb30e 100644
--- a/arch/arm/mach-omap2/sram242x.S
+++ b/arch/arm/mach-omap2/sram242x.S
@@ -39,6 +39,7 @@
.text
+ .align 3
ENTRY(omap242x_sram_ddr_init)
stmfd sp!, {r0 - r12, lr} @ save registers on stack
@@ -143,6 +144,7 @@ ENTRY(omap242x_sram_ddr_init_sz)
* r0 = [PRCM_FULL | PRCM_HALF] r1 = SDRC_DLLA_CTRL value r2 = [DDR | SDR]
* PRCM_FULL = 2, PRCM_HALF = 1, DDR = 1, SDR = 0
*/
+ .align 3
ENTRY(omap242x_sram_reprogram_sdrc)
stmfd sp!, {r0 - r10, lr} @ save registers on stack
mov r3, #0x0 @ clear for mrc call
@@ -238,6 +240,7 @@ ENTRY(omap242x_sram_reprogram_sdrc_sz)
/*
* Set dividers and pll. Also recalculate DLL value for DDR and unlock mode.
*/
+ .align 3
ENTRY(omap242x_sram_set_prcm)
stmfd sp!, {r0-r12, lr} @ regs to stack
adr r4, pbegin @ addr of preload start
diff --git a/arch/arm/mach-omap2/sram243x.S b/arch/arm/mach-omap2/sram243x.S
index f9007580aea3..76730209fa0e 100644
--- a/arch/arm/mach-omap2/sram243x.S
+++ b/arch/arm/mach-omap2/sram243x.S
@@ -39,6 +39,7 @@
.text
+ .align 3
ENTRY(omap243x_sram_ddr_init)
stmfd sp!, {r0 - r12, lr} @ save registers on stack
@@ -143,6 +144,7 @@ ENTRY(omap243x_sram_ddr_init_sz)
* r0 = [PRCM_FULL | PRCM_HALF] r1 = SDRC_DLLA_CTRL value r2 = [DDR | SDR]
* PRCM_FULL = 2, PRCM_HALF = 1, DDR = 1, SDR = 0
*/
+ .align 3
ENTRY(omap243x_sram_reprogram_sdrc)
stmfd sp!, {r0 - r10, lr} @ save registers on stack
mov r3, #0x0 @ clear for mrc call
@@ -238,6 +240,7 @@ ENTRY(omap243x_sram_reprogram_sdrc_sz)
/*
* Set dividers and pll. Also recalculate DLL value for DDR and unlock mode.
*/
+ .align 3
ENTRY(omap243x_sram_set_prcm)
stmfd sp!, {r0-r12, lr} @ regs to stack
adr r4, pbegin @ addr of preload start
diff --git a/arch/arm/mach-omap2/sram34xx.S b/arch/arm/mach-omap2/sram34xx.S
index 7f893a29d500..25011ca2145d 100644
--- a/arch/arm/mach-omap2/sram34xx.S
+++ b/arch/arm/mach-omap2/sram34xx.S
@@ -111,6 +111,7 @@
* since it will cause the ARM MMU to attempt to walk the page tables.
* These crashes may be intermittent.
*/
+ .align 3
ENTRY(omap3_sram_configure_core_dpll)
stmfd sp!, {r1-r12, lr} @ store regs to stack
diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c
index 7b7c2683ae7b..3b9cf85f4bb9 100644
--- a/arch/arm/mach-omap2/timer-gp.c
+++ b/arch/arm/mach-omap2/timer-gp.c
@@ -39,10 +39,12 @@
#include <asm/mach/time.h>
#include <plat/dmtimer.h>
#include <asm/localtimer.h>
+#include <asm/sched_clock.h>
+#include <plat/common.h>
+#include <plat/omap_hwmod.h>
#include "timer-gp.h"
-#include <plat/common.h>
/* MAX_GPTIMER_ID: number of GPTIMERs on the chip */
#define MAX_GPTIMER_ID 12
@@ -132,9 +134,13 @@ static void __init omap2_gp_clockevent_init(void)
{
u32 tick_rate;
int src;
+ char clockevent_hwmod_name[8]; /* 8 = sizeof("timerXX0") */
inited = 1;
+ sprintf(clockevent_hwmod_name, "timer%d", gptimer_id);
+ omap_hwmod_setup_one(clockevent_hwmod_name);
+
gptimer = omap_dm_timer_request_specific(gptimer_id);
BUG_ON(gptimer == NULL);
gptimer_wakeup = gptimer;
@@ -190,6 +196,7 @@ static void __init omap2_gp_clocksource_init(void)
/*
* clocksource
*/
+static DEFINE_CLOCK_DATA(cd);
static struct omap_dm_timer *gpt_clocksource;
static cycle_t clocksource_read_cycles(struct clocksource *cs)
{
@@ -204,6 +211,15 @@ static struct clocksource clocksource_gpt = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
+static void notrace dmtimer_update_sched_clock(void)
+{
+ u32 cyc;
+
+ cyc = omap_dm_timer_read_counter(gpt_clocksource);
+
+ update_sched_clock(&cd, cyc, (u32)~0);
+}
+
/* Setup free-running counter for clocksource */
static void __init omap2_gp_clocksource_init(void)
{
@@ -224,6 +240,8 @@ static void __init omap2_gp_clocksource_init(void)
omap_dm_timer_set_load_start(gpt, 1, 0);
+ init_sched_clock(&cd, dmtimer_update_sched_clock, 32, tick_rate);
+
if (clocksource_register_hz(&clocksource_gpt, tick_rate))
printk(err2, clocksource_gpt.name);
}
diff --git a/arch/arm/mach-omap2/timer-mpu.c b/arch/arm/mach-omap2/timer-mpu.c
index 954682e64399..31c0ac4cd66a 100644
--- a/arch/arm/mach-omap2/timer-mpu.c
+++ b/arch/arm/mach-omap2/timer-mpu.c
@@ -26,9 +26,14 @@
/*
* Setup the local clock events for a CPU.
*/
-void __cpuinit local_timer_setup(struct clock_event_device *evt)
+int __cpuinit local_timer_setup(struct clock_event_device *evt)
{
+ /* Local timers are not supprted on OMAP4430 ES1.0 */
+ if (omap_rev() == OMAP4430_REV_ES1_0)
+ return -ENXIO;
+
evt->irq = OMAP44XX_IRQ_LOCALTIMER;
twd_timer_setup(evt);
+ return 0;
}
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c
index 5298949d4b11..a9d4d143086d 100644
--- a/arch/arm/mach-omap2/usb-musb.c
+++ b/arch/arm/mach-omap2/usb-musb.c
@@ -30,118 +30,11 @@
#include <mach/irqs.h>
#include <mach/am35xx.h>
#include <plat/usb.h>
-#include "control.h"
+#include <plat/omap_device.h>
+#include "mux.h"
#if defined(CONFIG_USB_MUSB_OMAP2PLUS) || defined (CONFIG_USB_MUSB_AM35X)
-static void am35x_musb_reset(void)
-{
- u32 regval;
-
- /* Reset the musb interface */
- regval = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET);
-
- regval |= AM35XX_USBOTGSS_SW_RST;
- omap_ctrl_writel(regval, AM35XX_CONTROL_IP_SW_RESET);
-
- regval &= ~AM35XX_USBOTGSS_SW_RST;
- omap_ctrl_writel(regval, AM35XX_CONTROL_IP_SW_RESET);
-
- regval = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET);
-}
-
-static void am35x_musb_phy_power(u8 on)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(100);
- u32 devconf2;
-
- if (on) {
- /*
- * Start the on-chip PHY and its PLL.
- */
- devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
-
- devconf2 &= ~(CONF2_RESET | CONF2_PHYPWRDN | CONF2_OTGPWRDN);
- devconf2 |= CONF2_PHY_PLLON;
-
- omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
-
- pr_info(KERN_INFO "Waiting for PHY clock good...\n");
- while (!(omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2)
- & CONF2_PHYCLKGD)) {
- cpu_relax();
-
- if (time_after(jiffies, timeout)) {
- pr_err(KERN_ERR "musb PHY clock good timed out\n");
- break;
- }
- }
- } else {
- /*
- * Power down the on-chip PHY.
- */
- devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
-
- devconf2 &= ~CONF2_PHY_PLLON;
- devconf2 |= CONF2_PHYPWRDN | CONF2_OTGPWRDN;
- omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
- }
-}
-
-static void am35x_musb_clear_irq(void)
-{
- u32 regval;
-
- regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
- regval |= AM35XX_USBOTGSS_INT_CLR;
- omap_ctrl_writel(regval, AM35XX_CONTROL_LVL_INTR_CLEAR);
- regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
-}
-
-static void am35x_musb_set_mode(u8 musb_mode)
-{
- u32 devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
-
- devconf2 &= ~CONF2_OTGMODE;
- switch (musb_mode) {
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
- case MUSB_HOST: /* Force VBUS valid, ID = 0 */
- devconf2 |= CONF2_FORCE_HOST;
- break;
-#endif
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
- case MUSB_PERIPHERAL: /* Force VBUS valid, ID = 1 */
- devconf2 |= CONF2_FORCE_DEVICE;
- break;
-#endif
-#ifdef CONFIG_USB_MUSB_OTG
- case MUSB_OTG: /* Don't override the VBUS/ID comparators */
- devconf2 |= CONF2_NO_OVERRIDE;
- break;
-#endif
- default:
- pr_info(KERN_INFO "Unsupported mode %u\n", musb_mode);
- }
-
- omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
-}
-
-static struct resource musb_resources[] = {
- [0] = { /* start and end set dynamically */
- .flags = IORESOURCE_MEM,
- },
- [1] = { /* general IRQ */
- .start = INT_243X_HS_USB_MC,
- .flags = IORESOURCE_IRQ,
- .name = "mc",
- },
- [2] = { /* DMA IRQ */
- .start = INT_243X_HS_USB_DMA,
- .flags = IORESOURCE_IRQ,
- .name = "dma",
- },
-};
-
static struct musb_hdrc_config musb_config = {
.multipoint = 1,
.dyn_fifo = 1,
@@ -169,38 +62,65 @@ static struct musb_hdrc_platform_data musb_plat = {
static u64 musb_dmamask = DMA_BIT_MASK(32);
-static struct platform_device musb_device = {
- .name = "musb-omap2430",
- .id = -1,
- .dev = {
- .dma_mask = &musb_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &musb_plat,
+static struct omap_device_pm_latency omap_musb_latency[] = {
+ {
+ .deactivate_func = omap_device_idle_hwmods,
+ .activate_func = omap_device_enable_hwmods,
+ .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
},
- .num_resources = ARRAY_SIZE(musb_resources),
- .resource = musb_resources,
};
+static void usb_musb_mux_init(struct omap_musb_board_data *board_data)
+{
+ switch (board_data->interface_type) {
+ case MUSB_INTERFACE_UTMI:
+ omap_mux_init_signal("usba0_otg_dp", OMAP_PIN_INPUT);
+ omap_mux_init_signal("usba0_otg_dm", OMAP_PIN_INPUT);
+ break;
+ case MUSB_INTERFACE_ULPI:
+ omap_mux_init_signal("usba0_ulpiphy_clk",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("usba0_ulpiphy_stp",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("usba0_ulpiphy_dir",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("usba0_ulpiphy_nxt",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("usba0_ulpiphy_dat0",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("usba0_ulpiphy_dat1",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("usba0_ulpiphy_dat2",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("usba0_ulpiphy_dat3",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("usba0_ulpiphy_dat4",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("usba0_ulpiphy_dat5",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("usba0_ulpiphy_dat6",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("usba0_ulpiphy_dat7",
+ OMAP_PIN_INPUT_PULLDOWN);
+ break;
+ default:
+ break;
+ }
+}
+
void __init usb_musb_init(struct omap_musb_board_data *board_data)
{
- if (cpu_is_omap243x()) {
- musb_resources[0].start = OMAP243X_HS_BASE;
- } else if (cpu_is_omap3517() || cpu_is_omap3505()) {
- musb_device.name = "musb-am35x";
- musb_resources[0].start = AM35XX_IPSS_USBOTGSS_BASE;
- musb_resources[1].start = INT_35XX_USBOTG_IRQ;
- board_data->set_phy_power = am35x_musb_phy_power;
- board_data->clear_irq = am35x_musb_clear_irq;
- board_data->set_mode = am35x_musb_set_mode;
- board_data->reset = am35x_musb_reset;
- } else if (cpu_is_omap34xx()) {
- musb_resources[0].start = OMAP34XX_HSUSB_OTG_BASE;
+ struct omap_hwmod *oh;
+ struct omap_device *od;
+ struct platform_device *pdev;
+ struct device *dev;
+ int bus_id = -1;
+ const char *oh_name, *name;
+
+ if (cpu_is_omap3517() || cpu_is_omap3505()) {
} else if (cpu_is_omap44xx()) {
- musb_resources[0].start = OMAP44XX_HSUSB_OTG_BASE;
- musb_resources[1].start = OMAP44XX_IRQ_HS_USB_MC_N;
- musb_resources[2].start = OMAP44XX_IRQ_HS_USB_DMA_N;
+ usb_musb_mux_init(board_data);
}
- musb_resources[0].end = musb_resources[0].start + SZ_4K - 1;
/*
* REVISIT: This line can be removed once all the platforms using
@@ -212,8 +132,35 @@ void __init usb_musb_init(struct omap_musb_board_data *board_data)
musb_plat.mode = board_data->mode;
musb_plat.extvbus = board_data->extvbus;
- if (platform_device_register(&musb_device) < 0)
- printk(KERN_ERR "Unable to register HS-USB (MUSB) device\n");
+ if (cpu_is_omap3517() || cpu_is_omap3505()) {
+ oh_name = "am35x_otg_hs";
+ name = "musb-am35x";
+ } else {
+ oh_name = "usb_otg_hs";
+ name = "musb-omap2430";
+ }
+
+ oh = omap_hwmod_lookup(oh_name);
+ if (!oh) {
+ pr_err("Could not look up %s\n", oh_name);
+ return;
+ }
+
+ od = omap_device_build(name, bus_id, oh, &musb_plat,
+ sizeof(musb_plat), omap_musb_latency,
+ ARRAY_SIZE(omap_musb_latency), false);
+ if (IS_ERR(od)) {
+ pr_err("Could not build omap_device for %s %s\n",
+ name, oh_name);
+ return;
+ }
+
+ pdev = &od->pdev;
+ dev = &pdev->dev;
+ get_device(dev);
+ dev->dma_mask = &musb_dmamask;
+ dev->coherent_dma_mask = musb_dmamask;
+ put_device(dev);
}
#else
diff --git a/arch/arm/mach-orion5x/include/mach/memory.h b/arch/arm/mach-orion5x/include/mach/memory.h
index 52a2955d0f87..6769917882fe 100644
--- a/arch/arm/mach-orion5x/include/mach/memory.h
+++ b/arch/arm/mach-orion5x/include/mach/memory.h
@@ -7,6 +7,6 @@
#ifndef __ASM_ARCH_MEMORY_H
#define __ASM_ARCH_MEMORY_H
-#define PHYS_OFFSET UL(0x00000000)
+#define PLAT_PHYS_OFFSET UL(0x00000000)
#endif
diff --git a/arch/arm/mach-pnx4008/include/mach/memory.h b/arch/arm/mach-pnx4008/include/mach/memory.h
index 0e8770081058..1275db61cee5 100644
--- a/arch/arm/mach-pnx4008/include/mach/memory.h
+++ b/arch/arm/mach-pnx4008/include/mach/memory.h
@@ -16,6 +16,6 @@
/*
* Physical DRAM offset.
*/
-#define PHYS_OFFSET UL(0x80000000)
+#define PLAT_PHYS_OFFSET UL(0x80000000)
#endif
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c
index a134a1413e01..d2af73321dae 100644
--- a/arch/arm/mach-pxa/balloon3.c
+++ b/arch/arm/mach-pxa/balloon3.c
@@ -27,6 +27,7 @@
#include <linux/mtd/partitions.h>
#include <linux/types.h>
#include <linux/i2c/pcf857x.h>
+#include <linux/i2c/pxa-i2c.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/physmap.h>
#include <linux/regulator/max1586.h>
@@ -51,8 +52,6 @@
#include <mach/irda.h>
#include <mach/ohci.h>
-#include <plat/i2c.h>
-
#include "generic.h"
#include "devices.h"
@@ -829,5 +828,5 @@ MACHINE_START(BALLOON3, "Balloon3")
.init_irq = balloon3_init_irq,
.timer = &pxa_timer,
.init_machine = balloon3_init,
- .boot_params = PHYS_OFFSET + 0x100,
+ .boot_params = PLAT_PHYS_OFFSET + 0x100,
MACHINE_END
diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c
index 7984268508b6..bfca7ed2fea3 100644
--- a/arch/arm/mach-pxa/cm-x300.c
+++ b/arch/arm/mach-pxa/cm-x300.c
@@ -29,6 +29,7 @@
#include <linux/i2c.h>
#include <linux/i2c/pca953x.h>
+#include <linux/i2c/pxa-i2c.h>
#include <linux/mfd/da903x.h>
#include <linux/regulator/machine.h>
@@ -48,7 +49,6 @@
#include <mach/pxafb.h>
#include <mach/mmc.h>
#include <mach/ohci.h>
-#include <plat/i2c.h>
#include <plat/pxa3xx_nand.h>
#include <mach/audio.h>
#include <mach/pxa3xx-u2d.h>
diff --git a/arch/arm/mach-pxa/colibri-evalboard.c b/arch/arm/mach-pxa/colibri-evalboard.c
index 28f667e52ef9..81c3c433e2d6 100644
--- a/arch/arm/mach-pxa/colibri-evalboard.c
+++ b/arch/arm/mach-pxa/colibri-evalboard.c
@@ -20,6 +20,7 @@
#include <mach/hardware.h>
#include <asm/mach/arch.h>
#include <linux/i2c.h>
+#include <linux/i2c/pxa-i2c.h>
#include <mach/pxa27x.h>
#include <mach/colibri.h>
@@ -27,8 +28,6 @@
#include <mach/ohci.h>
#include <mach/pxa27x-udc.h>
-#include <plat/i2c.h>
-
#include "generic.h"
#include "devices.h"
diff --git a/arch/arm/mach-pxa/colibri-pxa270-income.c b/arch/arm/mach-pxa/colibri-pxa270-income.c
index 07b62a096f17..ee797397dc5b 100644
--- a/arch/arm/mach-pxa/colibri-pxa270-income.c
+++ b/arch/arm/mach-pxa/colibri-pxa270-income.c
@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/pwm_backlight.h>
+#include <linux/i2c/pxa-i2c.h>
#include <linux/sysdev.h>
#include <asm/irq.h>
@@ -33,8 +34,6 @@
#include <mach/pxa27x-udc.h>
#include <mach/pxafb.h>
-#include <plat/i2c.h>
-
#include "devices.h"
#include "generic.h"
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index a5452a3a276d..d4e705caefea 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -24,6 +24,7 @@
#include <linux/gpio.h>
#include <linux/backlight.h>
#include <linux/i2c.h>
+#include <linux/i2c/pxa-i2c.h>
#include <linux/io.h>
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
@@ -45,7 +46,6 @@
#include <asm/mach/irq.h>
#include <mach/pxa25x.h>
-#include <plat/i2c.h>
#include <mach/irda.h>
#include <mach/mmc.h>
#include <mach/udc.h>
diff --git a/arch/arm/mach-pxa/csb726.c b/arch/arm/mach-pxa/csb726.c
index a305424a967d..0481c29a70e8 100644
--- a/arch/arm/mach-pxa/csb726.c
+++ b/arch/arm/mach-pxa/csb726.c
@@ -17,12 +17,12 @@
#include <linux/mtd/partitions.h>
#include <linux/sm501.h>
#include <linux/smsc911x.h>
+#include <linux/i2c/pxa-i2c.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/csb726.h>
#include <mach/mfp-pxa27x.h>
-#include <plat/i2c.h>
#include <mach/mmc.h>
#include <mach/ohci.h>
#include <mach/pxa2xx-regs.h>
diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c
index 4c766e3b4af3..c4bf08b3eb61 100644
--- a/arch/arm/mach-pxa/devices.c
+++ b/arch/arm/mach-pxa/devices.c
@@ -4,6 +4,7 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/spi/pxa2xx_spi.h>
+#include <linux/i2c/pxa-i2c.h>
#include <asm/pmu.h>
#include <mach/udc.h>
@@ -16,7 +17,6 @@
#include <mach/camera.h>
#include <mach/audio.h>
#include <mach/hardware.h>
-#include <plat/i2c.h>
#include <plat/pxa3xx_nand.h>
#include "devices.h"
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index a78bb3097739..b411d7cbf5a1 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -31,6 +31,7 @@
#include <linux/apm-emulation.h>
#include <linux/i2c.h>
#include <linux/i2c/pca953x.h>
+#include <linux/i2c/pxa-i2c.h>
#include <linux/regulator/userspace-consumer.h>
#include <media/soc_camera.h>
@@ -45,7 +46,6 @@
#include <mach/ohci.h>
#include <mach/mmc.h>
#include <plat/pxa27x_keypad.h>
-#include <plat/i2c.h>
#include <mach/camera.h>
#include "generic.h"
diff --git a/arch/arm/mach-pxa/ezx.c b/arch/arm/mach-pxa/ezx.c
index 87cec0abe5b0..93f05e024313 100644
--- a/arch/arm/mach-pxa/ezx.c
+++ b/arch/arm/mach-pxa/ezx.c
@@ -20,6 +20,7 @@
#include <linux/gpio.h>
#include <linux/gpio_keys.h>
#include <linux/leds-lp3944.h>
+#include <linux/i2c/pxa-i2c.h>
#include <media/soc_camera.h>
@@ -30,7 +31,6 @@
#include <mach/pxa27x.h>
#include <mach/pxafb.h>
#include <mach/ohci.h>
-#include <plat/i2c.h>
#include <mach/hardware.h>
#include <plat/pxa27x_keypad.h>
#include <mach/camera.h>
diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c
index a908e0a5f396..6de0ad0eea65 100644
--- a/arch/arm/mach-pxa/hx4700.c
+++ b/arch/arm/mach-pxa/hx4700.c
@@ -35,6 +35,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/pxa2xx_spi.h>
#include <linux/usb/gpio_vbus.h>
+#include <linux/i2c/pxa-i2c.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
@@ -42,7 +43,6 @@
#include <mach/pxa27x.h>
#include <mach/hx4700.h>
-#include <plat/i2c.h>
#include <mach/irda.h>
#include <video/platform_lcd.h>
diff --git a/arch/arm/mach-pxa/include/mach/memory.h b/arch/arm/mach-pxa/include/mach/memory.h
index 92361a66b223..7f68724dcc27 100644
--- a/arch/arm/mach-pxa/include/mach/memory.h
+++ b/arch/arm/mach-pxa/include/mach/memory.h
@@ -15,7 +15,7 @@
/*
* Physical DRAM offset.
*/
-#define PHYS_OFFSET UL(0xa0000000)
+#define PLAT_PHYS_OFFSET UL(0xa0000000)
#if !defined(__ASSEMBLY__) && defined(CONFIG_MACH_ARMCORE) && defined(CONFIG_PCI)
void cmx2xx_pci_adjust_zones(unsigned long *size, unsigned long *holes);
diff --git a/arch/arm/mach-pxa/include/mach/pm.h b/arch/arm/mach-pxa/include/mach/pm.h
index fd8360c6839d..f15afe012995 100644
--- a/arch/arm/mach-pxa/include/mach/pm.h
+++ b/arch/arm/mach-pxa/include/mach/pm.h
@@ -22,9 +22,8 @@ struct pxa_cpu_pm_fns {
extern struct pxa_cpu_pm_fns *pxa_cpu_pm_fns;
/* sleep.S */
-extern void pxa25x_cpu_suspend(unsigned int);
-extern void pxa27x_cpu_suspend(unsigned int);
-extern void pxa_cpu_resume(void);
+extern void pxa25x_cpu_suspend(unsigned int, long);
+extern void pxa27x_cpu_suspend(unsigned int, long);
extern int pxa_pm_enter(suspend_state_t state);
extern int pxa_pm_prepare(void);
diff --git a/arch/arm/mach-pxa/littleton.c b/arch/arm/mach-pxa/littleton.c
index ccb7bfad17ca..87c1ed9ccd2f 100644
--- a/arch/arm/mach-pxa/littleton.c
+++ b/arch/arm/mach-pxa/littleton.c
@@ -28,6 +28,7 @@
#include <linux/leds.h>
#include <linux/mfd/da903x.h>
#include <linux/i2c/max732x.h>
+#include <linux/i2c/pxa-i2c.h>
#include <asm/types.h>
#include <asm/setup.h>
@@ -45,7 +46,6 @@
#include <mach/mmc.h>
#include <plat/pxa27x_keypad.h>
#include <mach/littleton.h>
-#include <plat/i2c.h>
#include <plat/pxa3xx_nand.h>
#include "generic.h"
diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c
index 41198f0dc3ac..5535991c4a3c 100644
--- a/arch/arm/mach-pxa/magician.c
+++ b/arch/arm/mach-pxa/magician.c
@@ -28,6 +28,7 @@
#include <linux/regulator/bq24022.h>
#include <linux/regulator/machine.h>
#include <linux/usb/gpio_vbus.h>
+#include <linux/i2c/pxa-i2c.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
@@ -36,7 +37,6 @@
#include <mach/pxa27x.h>
#include <mach/magician.h>
#include <mach/pxafb.h>
-#include <plat/i2c.h>
#include <mach/mmc.h>
#include <mach/irda.h>
#include <mach/ohci.h>
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
index d4b6f2375f2c..f9542220595a 100644
--- a/arch/arm/mach-pxa/mainstone.c
+++ b/arch/arm/mach-pxa/mainstone.c
@@ -27,6 +27,7 @@
#include <linux/gpio_keys.h>
#include <linux/pwm_backlight.h>
#include <linux/smc91x.h>
+#include <linux/i2c/pxa-i2c.h>
#include <asm/types.h>
#include <asm/setup.h>
@@ -46,7 +47,6 @@
#include <mach/mainstone.h>
#include <mach/audio.h>
#include <mach/pxafb.h>
-#include <plat/i2c.h>
#include <mach/mmc.h>
#include <mach/irda.h>
#include <mach/ohci.h>
diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c
index faafea3542fb..78d98a8607ec 100644
--- a/arch/arm/mach-pxa/mioa701.c
+++ b/arch/arm/mach-pxa/mioa701.c
@@ -39,6 +39,7 @@
#include <linux/usb/gpio_vbus.h>
#include <linux/regulator/max1586.h>
#include <linux/slab.h>
+#include <linux/i2c/pxa-i2c.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -50,7 +51,6 @@
#include <mach/mmc.h>
#include <mach/udc.h>
#include <mach/pxa27x-udc.h>
-#include <plat/i2c.h>
#include <mach/camera.h>
#include <mach/audio.h>
#include <media/soc_camera.h>
diff --git a/arch/arm/mach-pxa/mxm8x10.c b/arch/arm/mach-pxa/mxm8x10.c
index cdf7f41e2bb3..b5a8fd3fce04 100644
--- a/arch/arm/mach-pxa/mxm8x10.c
+++ b/arch/arm/mach-pxa/mxm8x10.c
@@ -22,8 +22,8 @@
#include <linux/serial_8250.h>
#include <linux/dm9000.h>
#include <linux/gpio.h>
+#include <linux/i2c/pxa-i2c.h>
-#include <plat/i2c.h>
#include <plat/pxa3xx_nand.h>
#include <mach/pxafb.h>
diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c
index 35572c427fa8..72adb3ae2b43 100644
--- a/arch/arm/mach-pxa/palm27x.c
+++ b/arch/arm/mach-pxa/palm27x.c
@@ -22,6 +22,7 @@
#include <linux/power_supply.h>
#include <linux/usb/gpio_vbus.h>
#include <linux/regulator/max1586.h>
+#include <linux/i2c/pxa-i2c.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -36,8 +37,6 @@
#include <mach/palmasoc.h>
#include <mach/palm27x.h>
-#include <plat/i2c.h>
-
#include "generic.h"
#include "devices.h"
diff --git a/arch/arm/mach-pxa/palmz72.c b/arch/arm/mach-pxa/palmz72.c
index 7bf4017326e3..3010193b081e 100644
--- a/arch/arm/mach-pxa/palmz72.c
+++ b/arch/arm/mach-pxa/palmz72.c
@@ -212,7 +212,7 @@ static unsigned long store_ptr;
static int palmz72_pm_suspend(struct sys_device *dev, pm_message_t msg)
{
/* setup the resume_info struct for the original bootloader */
- palmz72_resume_info.resume_addr = (u32) pxa_cpu_resume;
+ palmz72_resume_info.resume_addr = (u32) cpu_resume;
/* Storing memory touched by ROM */
store_ptr = *PALMZ72_SAVE_DWORD;
diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c
index 90820faa711a..9dbf3ccd4150 100644
--- a/arch/arm/mach-pxa/pcm990-baseboard.c
+++ b/arch/arm/mach-pxa/pcm990-baseboard.c
@@ -23,12 +23,12 @@
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
+#include <linux/i2c/pxa-i2c.h>
#include <linux/pwm_backlight.h>
#include <media/soc_camera.h>
#include <asm/gpio.h>
-#include <plat/i2c.h>
#include <mach/camera.h>
#include <asm/mach/map.h>
#include <mach/pxa27x.h>
diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
index 1807c9abdde0..51e1583265b2 100644
--- a/arch/arm/mach-pxa/pm.c
+++ b/arch/arm/mach-pxa/pm.c
@@ -67,11 +67,6 @@ int pxa_pm_enter(suspend_state_t state)
EXPORT_SYMBOL_GPL(pxa_pm_enter);
-unsigned long sleep_phys_sp(void *sp)
-{
- return virt_to_phys(sp);
-}
-
static int pxa_pm_valid(suspend_state_t state)
{
if (pxa_cpu_pm_fns)
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
index 4f0ff1ab623d..35353af345d5 100644
--- a/arch/arm/mach-pxa/poodle.c
+++ b/arch/arm/mach-pxa/poodle.c
@@ -23,6 +23,7 @@
#include <linux/mtd/physmap.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
+#include <linux/i2c/pxa-i2c.h>
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
#include <linux/spi/pxa2xx_spi.h>
@@ -44,7 +45,6 @@
#include <mach/irda.h>
#include <mach/poodle.h>
#include <mach/pxafb.h>
-#include <plat/i2c.h>
#include <asm/hardware/scoop.h>
#include <asm/hardware/locomo.h>
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c
index fbc5b775f895..0727e48a97ff 100644
--- a/arch/arm/mach-pxa/pxa25x.c
+++ b/arch/arm/mach-pxa/pxa25x.c
@@ -244,7 +244,7 @@ static void pxa25x_cpu_pm_enter(suspend_state_t state)
switch (state) {
case PM_SUSPEND_MEM:
- pxa25x_cpu_suspend(PWRMODE_SLEEP);
+ pxa25x_cpu_suspend(PWRMODE_SLEEP, PLAT_PHYS_OFFSET - PAGE_OFFSET);
break;
}
}
@@ -252,7 +252,7 @@ static void pxa25x_cpu_pm_enter(suspend_state_t state)
static int pxa25x_cpu_pm_prepare(void)
{
/* set resume return address */
- PSPR = virt_to_phys(pxa_cpu_resume);
+ PSPR = virt_to_phys(cpu_resume);
return 0;
}
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
index 987301ff4c33..1cb5d0f9723f 100644
--- a/arch/arm/mach-pxa/pxa27x.c
+++ b/arch/arm/mach-pxa/pxa27x.c
@@ -19,6 +19,7 @@
#include <linux/sysdev.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/i2c/pxa-i2c.h>
#include <asm/mach/map.h>
#include <mach/hardware.h>
@@ -32,8 +33,6 @@
#include <mach/dma.h>
#include <mach/smemc.h>
-#include <plat/i2c.h>
-
#include "generic.h"
#include "devices.h"
#include "clock.h"
@@ -300,7 +299,7 @@ void pxa27x_cpu_pm_enter(suspend_state_t state)
pxa_cpu_standby();
break;
case PM_SUSPEND_MEM:
- pxa27x_cpu_suspend(pwrmode);
+ pxa27x_cpu_suspend(pwrmode, PLAT_PHYS_OFFSET - PAGE_OFFSET);
break;
}
}
@@ -313,7 +312,7 @@ static int pxa27x_cpu_pm_valid(suspend_state_t state)
static int pxa27x_cpu_pm_prepare(void)
{
/* set resume return address */
- PSPR = virt_to_phys(pxa_cpu_resume);
+ PSPR = virt_to_phys(cpu_resume);
return 0;
}
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c
index a7a19e1cd640..f374247b8466 100644
--- a/arch/arm/mach-pxa/pxa3xx.c
+++ b/arch/arm/mach-pxa/pxa3xx.c
@@ -21,6 +21,7 @@
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/sysdev.h>
+#include <linux/i2c/pxa-i2c.h>
#include <asm/mach/map.h>
#include <mach/hardware.h>
@@ -32,7 +33,6 @@
#include <mach/dma.h>
#include <mach/regs-intc.h>
#include <mach/smemc.h>
-#include <plat/i2c.h>
#include "generic.h"
#include "devices.h"
@@ -142,8 +142,7 @@ static void pxa3xx_cpu_pm_suspend(void)
volatile unsigned long *p = (volatile void *)0xc0000000;
unsigned long saved_data = *p;
- extern void pxa3xx_cpu_suspend(void);
- extern void pxa3xx_cpu_resume(void);
+ extern void pxa3xx_cpu_suspend(long);
/* resuming from D2 requires the HSIO2/BOOT/TPM clocks enabled */
CKENA |= (1 << CKEN_BOOT) | (1 << CKEN_TPM);
@@ -161,9 +160,9 @@ static void pxa3xx_cpu_pm_suspend(void)
PSPR = 0x5c014000;
/* overwrite with the resume address */
- *p = virt_to_phys(pxa3xx_cpu_resume);
+ *p = virt_to_phys(cpu_resume);
- pxa3xx_cpu_suspend();
+ pxa3xx_cpu_suspend(PLAT_PHYS_OFFSET - PAGE_OFFSET);
*p = saved_data;
diff --git a/arch/arm/mach-pxa/pxa95x.c b/arch/arm/mach-pxa/pxa95x.c
index 437980f72710..23b229bd06e9 100644
--- a/arch/arm/mach-pxa/pxa95x.c
+++ b/arch/arm/mach-pxa/pxa95x.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/pm.h>
#include <linux/platform_device.h>
+#include <linux/i2c/pxa-i2c.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/sysdev.h>
@@ -27,7 +28,6 @@
#include <mach/pm.h>
#include <mach/dma.h>
#include <mach/regs-intc.h>
-#include <plat/i2c.h>
#include "generic.h"
#include "devices.h"
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
index 8361151be054..47094188e029 100644
--- a/arch/arm/mach-pxa/raumfeld.c
+++ b/arch/arm/mach-pxa/raumfeld.c
@@ -32,6 +32,7 @@
#include <linux/sched.h>
#include <linux/pwm_backlight.h>
#include <linux/i2c.h>
+#include <linux/i2c/pxa-i2c.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_gpio.h>
#include <linux/lis3lv02d.h>
@@ -53,7 +54,6 @@
#include <mach/ohci.h>
#include <mach/pxafb.h>
#include <mach/mmc.h>
-#include <plat/i2c.h>
#include <plat/pxa3xx_nand.h>
#include "generic.h"
diff --git a/arch/arm/mach-pxa/saar.c b/arch/arm/mach-pxa/saar.c
index c1ca8cb467fc..eb83c89428ef 100644
--- a/arch/arm/mach-pxa/saar.c
+++ b/arch/arm/mach-pxa/saar.c
@@ -20,6 +20,7 @@
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/i2c.h>
+#include <linux/i2c/pxa-i2c.h>
#include <linux/smc91x.h>
#include <linux/mfd/da903x.h>
#include <linux/mtd/mtd.h>
@@ -31,7 +32,6 @@
#include <asm/mach/flash.h>
#include <mach/pxa930.h>
-#include <plat/i2c.h>
#include <mach/pxafb.h>
#include "devices.h"
diff --git a/arch/arm/mach-pxa/saarb.c b/arch/arm/mach-pxa/saarb.c
index e497922f761a..9322fe527c7f 100644
--- a/arch/arm/mach-pxa/saarb.c
+++ b/arch/arm/mach-pxa/saarb.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/i2c.h>
+#include <linux/i2c/pxa-i2c.h>
#include <linux/mfd/88pm860x.h>
#include <asm/mach-types.h>
@@ -24,8 +25,6 @@
#include <mach/mfp-pxa930.h>
#include <mach/gpio.h>
-#include <plat/i2c.h>
-
#include "generic.h"
#define SAARB_NR_IRQS (IRQ_BOARD_START + 40)
diff --git a/arch/arm/mach-pxa/sleep.S b/arch/arm/mach-pxa/sleep.S
index c551da86baf6..6f5368899d84 100644
--- a/arch/arm/mach-pxa/sleep.S
+++ b/arch/arm/mach-pxa/sleep.S
@@ -22,133 +22,26 @@
.text
-pxa_cpu_save_cp:
- @ get coprocessor registers
- mrc p14, 0, r3, c6, c0, 0 @ clock configuration, for turbo mode
- mrc p15, 0, r4, c15, c1, 0 @ CP access reg
- mrc p15, 0, r5, c13, c0, 0 @ PID
- mrc p15, 0, r6, c3, c0, 0 @ domain ID
- mrc p15, 0, r7, c2, c0, 0 @ translation table base addr
- mrc p15, 0, r8, c1, c1, 0 @ auxiliary control reg
- mrc p15, 0, r9, c1, c0, 0 @ control reg
-
- bic r3, r3, #2 @ clear frequency change bit
-
- @ store them plus current virtual stack ptr on stack
- mov r10, sp
- stmfd sp!, {r3 - r10}
-
- mov pc, lr
-
-pxa_cpu_save_sp:
- @ preserve phys address of stack
- mov r0, sp
- str lr, [sp, #-4]!
- bl sleep_phys_sp
- ldr r1, =sleep_save_sp
- str r0, [r1]
- ldr pc, [sp], #4
-
#ifdef CONFIG_PXA3xx
/*
* pxa3xx_cpu_suspend() - forces CPU into sleep state (S2D3C4)
*
- * NOTE: unfortunately, pxa_cpu_save_cp can not be reused here since
- * the auxiliary control register address is different between pxa3xx
- * and pxa{25x,27x}
+ * r0 = v:p offset
*/
-
ENTRY(pxa3xx_cpu_suspend)
#ifndef CONFIG_IWMMXT
mra r2, r3, acc0
#endif
stmfd sp!, {r2 - r12, lr} @ save registers on stack
-
- mrc p14, 0, r3, c6, c0, 0 @ clock configuration, for turbo mode
- mrc p15, 0, r4, c15, c1, 0 @ CP access reg
- mrc p15, 0, r5, c13, c0, 0 @ PID
- mrc p15, 0, r6, c3, c0, 0 @ domain ID
- mrc p15, 0, r7, c2, c0, 0 @ translation table base addr
- mrc p15, 0, r8, c1, c0, 1 @ auxiliary control reg
- mrc p15, 0, r9, c1, c0, 0 @ control reg
-
- bic r3, r3, #2 @ clear frequency change bit
-
- @ store them plus current virtual stack ptr on stack
- mov r10, sp
- stmfd sp!, {r3 - r10}
-
- @ store physical address of stack pointer
- mov r0, sp
- bl sleep_phys_sp
- ldr r1, =sleep_save_sp
- str r0, [r1]
-
- @ clean data cache
- bl xsc3_flush_kern_cache_all
+ mov r1, r0
+ ldr r3, =pxa_cpu_resume @ resume function
+ bl cpu_suspend
mov r0, #0x06 @ S2D3C4 mode
mcr p14, 0, r0, c7, c0, 0 @ enter sleep
20: b 20b @ waiting for sleep
-
- .data
- .align 5
-/*
- * pxa3xx_cpu_resume
- */
-
-ENTRY(pxa3xx_cpu_resume)
-
- mov r0, #PSR_I_BIT | PSR_F_BIT | SVC_MODE @ set SVC, irqs off
- msr cpsr_c, r0
-
- ldr r0, sleep_save_sp @ stack phys addr
- ldmfd r0, {r3 - r9, sp} @ CP regs + virt stack ptr
-
- mov r1, #0
- mcr p15, 0, r1, c7, c7, 0 @ invalidate I & D caches, BTB
- mcr p15, 0, r1, c7, c10, 4 @ drain write (&fill) buffer
- mcr p15, 0, r1, c7, c5, 4 @ flush prefetch buffer
- mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
-
- mcr p14, 0, r3, c6, c0, 0 @ clock configuration, turbo mode.
- mcr p15, 0, r4, c15, c1, 0 @ CP access reg
- mcr p15, 0, r5, c13, c0, 0 @ PID
- mcr p15, 0, r6, c3, c0, 0 @ domain ID
- mcr p15, 0, r7, c2, c0, 0 @ translation table base addr
- mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg
-
- @ temporarily map resume_turn_on_mmu into the page table,
- @ otherwise prefetch abort occurs after MMU is turned on
- mov r1, r7
- bic r1, r1, #0x00ff
- bic r1, r1, #0x3f00
- ldr r2, =0x542e
-
- adr r3, resume_turn_on_mmu
- mov r3, r3, lsr #20
- orr r4, r2, r3, lsl #20
- ldr r5, [r1, r3, lsl #2]
- str r4, [r1, r3, lsl #2]
-
- @ Mapping page table address in the page table
- mov r6, r1, lsr #20
- orr r7, r2, r6, lsl #20
- ldr r8, [r1, r6, lsl #2]
- str r7, [r1, r6, lsl #2]
-
- ldr r2, =pxa3xx_resume_after_mmu @ absolute virtual address
- b resume_turn_on_mmu @ cache align execution
-
- .text
-pxa3xx_resume_after_mmu:
- /* restore the temporary mapping */
- str r5, [r1, r3, lsl #2]
- str r8, [r1, r6, lsl #2]
- b resume_after_mmu
-
#endif /* CONFIG_PXA3xx */
#ifdef CONFIG_PXA27x
@@ -158,28 +51,23 @@ pxa3xx_resume_after_mmu:
* Forces CPU into sleep state.
*
* r0 = value for PWRMODE M field for desired sleep state
+ * r1 = v:p offset
*/
-
ENTRY(pxa27x_cpu_suspend)
#ifndef CONFIG_IWMMXT
mra r2, r3, acc0
#endif
stmfd sp!, {r2 - r12, lr} @ save registers on stack
-
- bl pxa_cpu_save_cp
-
- mov r5, r0 @ save sleep mode
- bl pxa_cpu_save_sp
-
- @ clean data cache
- bl xscale_flush_kern_cache_all
+ mov r4, r0 @ save sleep mode
+ ldr r3, =pxa_cpu_resume @ resume function
+ bl cpu_suspend
@ Put the processor to sleep
@ (also workaround for sighting 28071)
@ prepare value for sleep mode
- mov r1, r5 @ sleep mode
+ mov r1, r4 @ sleep mode
@ prepare pointer to physical address 0 (virtual mapping in generic.c)
mov r2, #UNCACHED_PHYS_0
@@ -216,21 +104,16 @@ ENTRY(pxa27x_cpu_suspend)
* Forces CPU into sleep state.
*
* r0 = value for PWRMODE M field for desired sleep state
+ * r1 = v:p offset
*/
ENTRY(pxa25x_cpu_suspend)
stmfd sp!, {r2 - r12, lr} @ save registers on stack
-
- bl pxa_cpu_save_cp
-
- mov r5, r0 @ save sleep mode
- bl pxa_cpu_save_sp
-
- @ clean data cache
- bl xscale_flush_kern_cache_all
-
+ mov r4, r0 @ save sleep mode
+ ldr r3, =pxa_cpu_resume @ resume function
+ bl cpu_suspend
@ prepare value for sleep mode
- mov r1, r5 @ sleep mode
+ mov r1, r4 @ sleep mode
@ prepare pointer to physical address 0 (virtual mapping in generic.c)
mov r2, #UNCACHED_PHYS_0
@@ -317,53 +200,9 @@ pxa_cpu_do_suspend:
* pxa_cpu_resume()
*
* entry point from bootloader into kernel during resume
- *
- * Note: Yes, part of the following code is located into the .data section.
- * This is to allow sleep_save_sp to be accessed with a relative load
- * while we can't rely on any MMU translation. We could have put
- * sleep_save_sp in the .text section as well, but some setups might
- * insist on it to be truly read-only.
*/
-
- .data
- .align 5
-ENTRY(pxa_cpu_resume)
- mov r0, #PSR_I_BIT | PSR_F_BIT | SVC_MODE @ set SVC, irqs off
- msr cpsr_c, r0
-
- ldr r0, sleep_save_sp @ stack phys addr
- ldr r2, =resume_after_mmu @ its absolute virtual address
- ldmfd r0, {r3 - r9, sp} @ CP regs + virt stack ptr
-
- mov r1, #0
- mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
- mcr p15, 0, r1, c7, c7, 0 @ invalidate I & D caches, BTB
-
- mcr p14, 0, r3, c6, c0, 0 @ clock configuration, turbo mode.
- mcr p15, 0, r4, c15, c1, 0 @ CP access reg
- mcr p15, 0, r5, c13, c0, 0 @ PID
- mcr p15, 0, r6, c3, c0, 0 @ domain ID
- mcr p15, 0, r7, c2, c0, 0 @ translation table base addr
- mcr p15, 0, r8, c1, c1, 0 @ auxiliary control reg
- b resume_turn_on_mmu @ cache align execution
-
.align 5
-resume_turn_on_mmu:
- mcr p15, 0, r9, c1, c0, 0 @ turn on MMU, caches, etc.
-
- @ Let us ensure we jump to resume_after_mmu only when the mcr above
- @ actually took effect. They call it the "cpwait" operation.
- mrc p15, 0, r0, c2, c0, 0 @ queue a dependency on CP15
- sub pc, r2, r0, lsr #32 @ jump to virtual addr
- nop
- nop
- nop
-
-sleep_save_sp:
- .word 0 @ preserve stack phys ptr here
-
- .text
-resume_after_mmu:
+pxa_cpu_resume:
ldmfd sp!, {r2, r3}
#ifndef CONFIG_IWMMXT
mar acc0, r2, r3
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index b49a2c21124c..38e2c0912b9a 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -19,6 +19,7 @@
#include <linux/gpio.h>
#include <linux/leds.h>
#include <linux/i2c.h>
+#include <linux/i2c/pxa-i2c.h>
#include <linux/i2c/pca953x.h>
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
@@ -47,8 +48,6 @@
#include <mach/sharpsl_pm.h>
#include <mach/smemc.h>
-#include <plat/i2c.h>
-
#include "generic.h"
#include "devices.h"
diff --git a/arch/arm/mach-pxa/stargate2.c b/arch/arm/mach-pxa/stargate2.c
index 9a14fdb83c82..cb5611daf5fe 100644
--- a/arch/arm/mach-pxa/stargate2.c
+++ b/arch/arm/mach-pxa/stargate2.c
@@ -25,6 +25,7 @@
#include <linux/mtd/plat-ram.h>
#include <linux/mtd/partitions.h>
+#include <linux/i2c/pxa-i2c.h>
#include <linux/i2c/pcf857x.h>
#include <linux/i2c/at24.h>
#include <linux/smc91x.h>
@@ -43,7 +44,6 @@
#include <asm/mach/flash.h>
#include <mach/pxa27x.h>
-#include <plat/i2c.h>
#include <mach/mmc.h>
#include <mach/udc.h>
#include <mach/pxa27x-udc.h>
diff --git a/arch/arm/mach-pxa/tavorevb3.c b/arch/arm/mach-pxa/tavorevb3.c
index 70191a9450eb..79f4422f12f4 100644
--- a/arch/arm/mach-pxa/tavorevb3.c
+++ b/arch/arm/mach-pxa/tavorevb3.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
+#include <linux/i2c/pxa-i2c.h>
#include <linux/gpio.h>
#include <linux/mfd/88pm860x.h>
@@ -23,8 +24,6 @@
#include <mach/pxa930.h>
-#include <plat/i2c.h>
-
#include "devices.h"
#include "generic.h"
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
index af152e70cfcf..2de14927a264 100644
--- a/arch/arm/mach-pxa/tosa.c
+++ b/arch/arm/mach-pxa/tosa.c
@@ -34,6 +34,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/pxa2xx_spi.h>
#include <linux/input/matrix_keypad.h>
+#include <linux/i2c/pxa-i2c.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
@@ -41,7 +42,6 @@
#include <mach/pxa25x.h>
#include <mach/reset.h>
#include <mach/irda.h>
-#include <plat/i2c.h>
#include <mach/mmc.h>
#include <mach/udc.h>
#include <mach/tosa_bt.h>
diff --git a/arch/arm/mach-pxa/trizeps4.c b/arch/arm/mach-pxa/trizeps4.c
index 423261d63d07..857bb2e63486 100644
--- a/arch/arm/mach-pxa/trizeps4.c
+++ b/arch/arm/mach-pxa/trizeps4.c
@@ -26,6 +26,7 @@
#include <linux/dm9000.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/partitions.h>
+#include <linux/i2c/pxa-i2c.h>
#include <asm/types.h>
#include <asm/setup.h>
@@ -47,7 +48,6 @@
#include <mach/irda.h>
#include <mach/ohci.h>
#include <mach/smemc.h>
-#include <plat/i2c.h>
#include "generic.h"
#include "devices.h"
diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c
index 49eeeab23689..12279214c875 100644
--- a/arch/arm/mach-pxa/viper.c
+++ b/arch/arm/mach-pxa/viper.c
@@ -36,6 +36,7 @@
#include <linux/gpio.h>
#include <linux/jiffies.h>
#include <linux/i2c-gpio.h>
+#include <linux/i2c/pxa-i2c.h>
#include <linux/serial_8250.h>
#include <linux/smc91x.h>
#include <linux/pwm_backlight.h>
@@ -47,7 +48,6 @@
#include <mach/pxa25x.h>
#include <mach/audio.h>
#include <mach/pxafb.h>
-#include <plat/i2c.h>
#include <mach/regs-uart.h>
#include <mach/arcom-pcmcia.h>
#include <mach/viper.h>
diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c
index b9b579715ff6..e709fd459268 100644
--- a/arch/arm/mach-pxa/vpac270.c
+++ b/arch/arm/mach-pxa/vpac270.c
@@ -26,6 +26,7 @@
#include <linux/ucb1400.h>
#include <linux/ata_platform.h>
#include <linux/regulator/max1586.h>
+#include <linux/i2c/pxa-i2c.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -40,8 +41,6 @@
#include <mach/udc.h>
#include <mach/pata_pxa.h>
-#include <plat/i2c.h>
-
#include "generic.h"
#include "devices.h"
diff --git a/arch/arm/mach-pxa/xcep.c b/arch/arm/mach-pxa/xcep.c
index 51c0281c6e0a..f55f8f2e0db3 100644
--- a/arch/arm/mach-pxa/xcep.c
+++ b/arch/arm/mach-pxa/xcep.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/i2c.h>
+#include <linux/i2c/pxa-i2c.h>
#include <linux/smc91x.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
@@ -26,8 +27,6 @@
#include <asm/mach/irq.h>
#include <asm/mach/map.h>
-#include <plat/i2c.h>
-
#include <mach/hardware.h>
#include <mach/pxa2xx-regs.h>
#include <mach/mfp-pxa25x.h>
diff --git a/arch/arm/mach-pxa/z2.c b/arch/arm/mach-pxa/z2.c
index a323e076129e..aaf883754ef4 100644
--- a/arch/arm/mach-pxa/z2.c
+++ b/arch/arm/mach-pxa/z2.c
@@ -29,6 +29,7 @@
#include <linux/gpio_keys.h>
#include <linux/delay.h>
#include <linux/regulator/machine.h>
+#include <linux/i2c/pxa-i2c.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -40,8 +41,6 @@
#include <mach/mmc.h>
#include <plat/pxa27x_keypad.h>
-#include <plat/i2c.h>
-
#include "generic.h"
#include "devices.h"
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index f4b053b35815..730f51e57c17 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -25,6 +25,7 @@
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/i2c.h>
+#include <linux/i2c/pxa-i2c.h>
#include <linux/i2c/pca953x.h>
#include <linux/apm-emulation.h>
#include <linux/can/platform/mcp251x.h>
@@ -33,8 +34,6 @@
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
-#include <plat/i2c.h>
-
#include <mach/pxa2xx-regs.h>
#include <mach/regs-uart.h>
#include <mach/ohci.h>
@@ -676,7 +675,7 @@ static struct pxa2xx_udc_mach_info zeus_udc_info = {
static void zeus_power_off(void)
{
local_irq_disable();
- pxa27x_cpu_suspend(PWRMODE_DEEPSLEEP);
+ pxa27x_cpu_suspend(PWRMODE_DEEPSLEEP, PLAT_PHYS_OFFSET - PAGE_OFFSET);
}
#else
#define zeus_power_off NULL
diff --git a/arch/arm/mach-pxa/zylonite_pxa300.c b/arch/arm/mach-pxa/zylonite_pxa300.c
index 3aa73b3e33f2..93c64d8d7de9 100644
--- a/arch/arm/mach-pxa/zylonite_pxa300.c
+++ b/arch/arm/mach-pxa/zylonite_pxa300.c
@@ -17,11 +17,11 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/i2c.h>
+#include <linux/i2c/pxa-i2c.h>
#include <linux/i2c/pca953x.h>
#include <linux/gpio.h>
#include <mach/pxa300.h>
-#include <plat/i2c.h>
#include <mach/zylonite.h>
#include "generic.h"
diff --git a/arch/arm/mach-realview/Kconfig b/arch/arm/mach-realview/Kconfig
index 7ca138a943a9..b9a9805e4828 100644
--- a/arch/arm/mach-realview/Kconfig
+++ b/arch/arm/mach-realview/Kconfig
@@ -19,7 +19,7 @@ config REALVIEW_EB_A9MP
config REALVIEW_EB_ARM11MP
bool "Support ARM11MPCore Tile"
depends on MACH_REALVIEW_EB
- select CPU_V6
+ select CPU_V6K
select ARCH_HAS_BARRIERS if SMP
help
Enable support for the ARM11MPCore tile fitted to the Realview(R)
@@ -36,7 +36,7 @@ config REALVIEW_EB_ARM11MP_REVB
config MACH_REALVIEW_PB11MP
bool "Support RealView(R) Platform Baseboard for ARM11MPCore"
- select CPU_V6
+ select CPU_V6K
select ARM_GIC
select HAVE_PATA_PLATFORM
select ARCH_HAS_BARRIERS if SMP
@@ -45,6 +45,7 @@ config MACH_REALVIEW_PB11MP
the ARM11MPCore. This platform has an on-board ARM11MPCore and has
support for PCI-E and Compact Flash.
+# ARMv6 CPU without K extensions, but does have the new exclusive ops
config MACH_REALVIEW_PB1176
bool "Support RealView(R) Platform Baseboard for ARM1176JZF-S"
select CPU_V6
diff --git a/arch/arm/mach-realview/Makefile b/arch/arm/mach-realview/Makefile
index a01b76b7c956..541fa4c109ef 100644
--- a/arch/arm/mach-realview/Makefile
+++ b/arch/arm/mach-realview/Makefile
@@ -8,6 +8,5 @@ obj-$(CONFIG_MACH_REALVIEW_PB11MP) += realview_pb11mp.o
obj-$(CONFIG_MACH_REALVIEW_PB1176) += realview_pb1176.o
obj-$(CONFIG_MACH_REALVIEW_PBA8) += realview_pba8.o
obj-$(CONFIG_MACH_REALVIEW_PBX) += realview_pbx.o
-obj-$(CONFIG_SMP) += platsmp.o headsmp.o
+obj-$(CONFIG_SMP) += platsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
-obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o
diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c
index 1c6602cf50e4..75dbc8791d05 100644
--- a/arch/arm/mach-realview/core.c
+++ b/arch/arm/mach-realview/core.c
@@ -51,6 +51,7 @@
#include <mach/irqs.h>
#include <asm/hardware/timer-sp.h>
+#include <plat/clcd.h>
#include <plat/sched_clock.h>
#include "core.h"
@@ -359,18 +360,19 @@ static struct clk_lookup lookups[] = {
}
};
-static int __init clk_init(void)
+void __init realview_init_early(void)
{
+ void __iomem *sys = __io_address(REALVIEW_SYS_BASE);
+
if (machine_is_realview_pb1176())
- oscvco_clk.vcoreg = __io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_OSC0_OFFSET;
+ oscvco_clk.vcoreg = sys + REALVIEW_SYS_OSC0_OFFSET;
else
- oscvco_clk.vcoreg = __io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_OSC4_OFFSET;
+ oscvco_clk.vcoreg = sys + REALVIEW_SYS_OSC4_OFFSET;
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
- return 0;
+ versatile_sched_clock_init(sys + REALVIEW_SYS_24MHz_OFFSET, 24000000);
}
-core_initcall(clk_init);
/*
* CLCD support.
@@ -385,157 +387,6 @@ core_initcall(clk_init);
#define SYS_CLCD_ID_SANYO_2_5 (0x07 << 8)
#define SYS_CLCD_ID_VGA (0x1f << 8)
-static struct clcd_panel vga = {
- .mode = {
- .name = "VGA",
- .refresh = 60,
- .xres = 640,
- .yres = 480,
- .pixclock = 39721,
- .left_margin = 40,
- .right_margin = 24,
- .upper_margin = 32,
- .lower_margin = 11,
- .hsync_len = 96,
- .vsync_len = 2,
- .sync = 0,
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = TIM2_BCD | TIM2_IPC,
- .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1),
- .bpp = 16,
-};
-
-static struct clcd_panel xvga = {
- .mode = {
- .name = "XVGA",
- .refresh = 60,
- .xres = 1024,
- .yres = 768,
- .pixclock = 15748,
- .left_margin = 152,
- .right_margin = 48,
- .upper_margin = 23,
- .lower_margin = 3,
- .hsync_len = 104,
- .vsync_len = 4,
- .sync = 0,
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = TIM2_BCD | TIM2_IPC,
- .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1),
- .bpp = 16,
-};
-
-static struct clcd_panel sanyo_3_8_in = {
- .mode = {
- .name = "Sanyo QVGA",
- .refresh = 116,
- .xres = 320,
- .yres = 240,
- .pixclock = 100000,
- .left_margin = 6,
- .right_margin = 6,
- .upper_margin = 5,
- .lower_margin = 5,
- .hsync_len = 6,
- .vsync_len = 6,
- .sync = 0,
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = TIM2_BCD,
- .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1),
- .bpp = 16,
-};
-
-static struct clcd_panel sanyo_2_5_in = {
- .mode = {
- .name = "Sanyo QVGA Portrait",
- .refresh = 116,
- .xres = 240,
- .yres = 320,
- .pixclock = 100000,
- .left_margin = 20,
- .right_margin = 10,
- .upper_margin = 2,
- .lower_margin = 2,
- .hsync_len = 10,
- .vsync_len = 2,
- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = TIM2_IVS | TIM2_IHS | TIM2_IPC,
- .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1),
- .bpp = 16,
-};
-
-static struct clcd_panel epson_2_2_in = {
- .mode = {
- .name = "Epson QCIF",
- .refresh = 390,
- .xres = 176,
- .yres = 220,
- .pixclock = 62500,
- .left_margin = 3,
- .right_margin = 2,
- .upper_margin = 1,
- .lower_margin = 0,
- .hsync_len = 3,
- .vsync_len = 2,
- .sync = 0,
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = TIM2_BCD | TIM2_IPC,
- .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1),
- .bpp = 16,
-};
-
-/*
- * Detect which LCD panel is connected, and return the appropriate
- * clcd_panel structure. Note: we do not have any information on
- * the required timings for the 8.4in panel, so we presently assume
- * VGA timings.
- */
-static struct clcd_panel *realview_clcd_panel(void)
-{
- void __iomem *sys_clcd = __io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_CLCD_OFFSET;
- struct clcd_panel *vga_panel;
- struct clcd_panel *panel;
- u32 val;
-
- if (machine_is_realview_eb())
- vga_panel = &vga;
- else
- vga_panel = &xvga;
-
- val = readl(sys_clcd) & SYS_CLCD_ID_MASK;
- if (val == SYS_CLCD_ID_SANYO_3_8)
- panel = &sanyo_3_8_in;
- else if (val == SYS_CLCD_ID_SANYO_2_5)
- panel = &sanyo_2_5_in;
- else if (val == SYS_CLCD_ID_EPSON_2_2)
- panel = &epson_2_2_in;
- else if (val == SYS_CLCD_ID_VGA)
- panel = vga_panel;
- else {
- printk(KERN_ERR "CLCD: unknown LCD panel ID 0x%08x, using VGA\n",
- val);
- panel = vga_panel;
- }
-
- return panel;
-}
-
/*
* Disable all display connectors on the interface module.
*/
@@ -565,56 +416,60 @@ static void realview_clcd_enable(struct clcd_fb *fb)
writel(val, sys_clcd);
}
+/*
+ * Detect which LCD panel is connected, and return the appropriate
+ * clcd_panel structure. Note: we do not have any information on
+ * the required timings for the 8.4in panel, so we presently assume
+ * VGA timings.
+ */
static int realview_clcd_setup(struct clcd_fb *fb)
{
+ void __iomem *sys_clcd = __io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_CLCD_OFFSET;
+ const char *panel_name, *vga_panel_name;
unsigned long framesize;
- dma_addr_t dma;
+ u32 val;
- if (machine_is_realview_eb())
+ if (machine_is_realview_eb()) {
/* VGA, 16bpp */
framesize = 640 * 480 * 2;
- else
+ vga_panel_name = "VGA";
+ } else {
/* XVGA, 16bpp */
framesize = 1024 * 768 * 2;
-
- fb->panel = realview_clcd_panel();
-
- fb->fb.screen_base = dma_alloc_writecombine(&fb->dev->dev, framesize,
- &dma, GFP_KERNEL | GFP_DMA);
- if (!fb->fb.screen_base) {
- printk(KERN_ERR "CLCD: unable to map framebuffer\n");
- return -ENOMEM;
+ vga_panel_name = "XVGA";
}
- fb->fb.fix.smem_start = dma;
- fb->fb.fix.smem_len = framesize;
-
- return 0;
-}
+ val = readl(sys_clcd) & SYS_CLCD_ID_MASK;
+ if (val == SYS_CLCD_ID_SANYO_3_8)
+ panel_name = "Sanyo TM38QV67A02A";
+ else if (val == SYS_CLCD_ID_SANYO_2_5)
+ panel_name = "Sanyo QVGA Portrait";
+ else if (val == SYS_CLCD_ID_EPSON_2_2)
+ panel_name = "Epson L2F50113T00";
+ else if (val == SYS_CLCD_ID_VGA)
+ panel_name = vga_panel_name;
+ else {
+ pr_err("CLCD: unknown LCD panel ID 0x%08x, using VGA\n", val);
+ panel_name = vga_panel_name;
+ }
-static int realview_clcd_mmap(struct clcd_fb *fb, struct vm_area_struct *vma)
-{
- return dma_mmap_writecombine(&fb->dev->dev, vma,
- fb->fb.screen_base,
- fb->fb.fix.smem_start,
- fb->fb.fix.smem_len);
-}
+ fb->panel = versatile_clcd_get_panel(panel_name);
+ if (!fb->panel)
+ return -EINVAL;
-static void realview_clcd_remove(struct clcd_fb *fb)
-{
- dma_free_writecombine(&fb->dev->dev, fb->fb.fix.smem_len,
- fb->fb.screen_base, fb->fb.fix.smem_start);
+ return versatile_clcd_setup_dma(fb, framesize);
}
struct clcd_board clcd_plat_data = {
.name = "RealView",
+ .caps = CLCD_CAP_ALL,
.check = clcdfb_check,
.decode = clcdfb_decode,
.disable = realview_clcd_disable,
.enable = realview_clcd_enable,
.setup = realview_clcd_setup,
- .mmap = realview_clcd_mmap,
- .remove = realview_clcd_remove,
+ .mmap = versatile_clcd_mmap_dma,
+ .remove = versatile_clcd_remove_dma,
};
#ifdef CONFIG_LEDS
@@ -656,12 +511,6 @@ void realview_leds_event(led_event_t ledevt)
#endif /* CONFIG_LEDS */
/*
- * The sched_clock counter
- */
-#define REFCOUNTER (__io_address(REALVIEW_SYS_BASE) + \
- REALVIEW_SYS_24MHz_OFFSET)
-
-/*
* Where is the timer (VA)?
*/
void __iomem *timer0_va_base;
@@ -676,8 +525,6 @@ void __init realview_timer_init(unsigned int timer_irq)
{
u32 val;
- versatile_sched_clock_init(REFCOUNTER, 24000000);
-
/*
* set clock frequency:
* REALVIEW_REFCLK is 32KHz
diff --git a/arch/arm/mach-realview/core.h b/arch/arm/mach-realview/core.h
index 693239ddc39e..5c83d1e87a03 100644
--- a/arch/arm/mach-realview/core.h
+++ b/arch/arm/mach-realview/core.h
@@ -42,7 +42,6 @@ static struct amba_device name##_device = { \
}, \
.dma_mask = ~0, \
.irq = base##_IRQ, \
- /* .dma = base##_DMA,*/ \
}
struct machine_desc;
@@ -63,6 +62,7 @@ extern void realview_timer_init(unsigned int timer_irq);
extern int realview_flash_register(struct resource *res, u32 num);
extern int realview_eth_register(const char *name, struct resource *res);
extern int realview_usb_register(struct resource *res);
+extern void realview_init_early(void);
extern void realview_fixup(struct machine_desc *mdesc, struct tag *tags,
char **from, struct meminfo *meminfo);
extern void (*realview_reset)(char);
diff --git a/arch/arm/mach-realview/headsmp.S b/arch/arm/mach-realview/headsmp.S
deleted file mode 100644
index b34be4554d40..000000000000
--- a/arch/arm/mach-realview/headsmp.S
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * linux/arch/arm/mach-realview/headsmp.S
- *
- * Copyright (c) 2003 ARM Limited
- * All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/linkage.h>
-#include <linux/init.h>
-
- __INIT
-
-/*
- * Realview specific entry point for secondary CPUs. This provides
- * a "holding pen" into which all secondary cores are held until we're
- * ready for them to initialise.
- */
-ENTRY(realview_secondary_startup)
- mrc p15, 0, r0, c0, c0, 5
- and r0, r0, #15
- adr r4, 1f
- ldmia r4, {r5, r6}
- sub r4, r4, r5
- add r6, r6, r4
-pen: ldr r7, [r6]
- cmp r7, r0
- bne pen
-
- /*
- * we've been released from the holding pen: secondary_stack
- * should now contain the SVC stack for this core
- */
- b secondary_startup
-
- .align
-1: .long .
- .long pen_release
diff --git a/arch/arm/mach-realview/include/mach/memory.h b/arch/arm/mach-realview/include/mach/memory.h
index 5dafc157b276..e05fc2c4c080 100644
--- a/arch/arm/mach-realview/include/mach/memory.h
+++ b/arch/arm/mach-realview/include/mach/memory.h
@@ -24,9 +24,9 @@
* Physical DRAM offset.
*/
#ifdef CONFIG_REALVIEW_HIGH_PHYS_OFFSET
-#define PHYS_OFFSET UL(0x70000000)
+#define PLAT_PHYS_OFFSET UL(0x70000000)
#else
-#define PHYS_OFFSET UL(0x00000000)
+#define PLAT_PHYS_OFFSET UL(0x00000000)
#endif
#if !defined(__ASSEMBLY__) && defined(CONFIG_ZONE_DMA)
diff --git a/arch/arm/mach-realview/localtimer.c b/arch/arm/mach-realview/localtimer.c
deleted file mode 100644
index 60b4e111f459..000000000000
--- a/arch/arm/mach-realview/localtimer.c
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * linux/arch/arm/mach-realview/localtimer.c
- *
- * Copyright (C) 2002 ARM Ltd.
- * All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/clockchips.h>
-
-#include <asm/irq.h>
-#include <asm/smp_twd.h>
-#include <asm/localtimer.h>
-
-/*
- * Setup the local clock events for a CPU.
- */
-void __cpuinit local_timer_setup(struct clock_event_device *evt)
-{
- evt->irq = IRQ_LOCALTIMER;
- twd_timer_setup(evt);
-}
diff --git a/arch/arm/mach-realview/platsmp.c b/arch/arm/mach-realview/platsmp.c
index 6959d13d908a..23919229e12d 100644
--- a/arch/arm/mach-realview/platsmp.c
+++ b/arch/arm/mach-realview/platsmp.c
@@ -10,44 +10,21 @@
*/
#include <linux/init.h>
#include <linux/errno.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/jiffies.h>
#include <linux/smp.h>
#include <linux/io.h>
-#include <asm/cacheflush.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
+#include <asm/smp_scu.h>
#include <asm/unified.h>
#include <mach/board-eb.h>
#include <mach/board-pb11mp.h>
#include <mach/board-pbx.h>
-#include <asm/smp_scu.h>
#include "core.h"
-extern void realview_secondary_startup(void);
-
-/*
- * control for which core is the next to come out of the secondary
- * boot "holding pen"
- */
-volatile int __cpuinitdata pen_release = -1;
-
-/*
- * Write pen_release in a way that is guaranteed to be visible to all
- * observers, irrespective of whether they're taking part in coherency
- * or not. This is necessary for the hotplug code to work reliably.
- */
-static void __cpuinit write_pen_release(int val)
-{
- pen_release = val;
- smp_wmb();
- __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
- outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
-}
+extern void versatile_secondary_startup(void);
static void __iomem *scu_base_addr(void)
{
@@ -62,75 +39,6 @@ static void __iomem *scu_base_addr(void)
return (void __iomem *)0;
}
-static DEFINE_SPINLOCK(boot_lock);
-
-void __cpuinit platform_secondary_init(unsigned int cpu)
-{
- /*
- * if any interrupts are already enabled for the primary
- * core (e.g. timer irq), then they will not have been enabled
- * for us: do so
- */
- gic_secondary_init(0);
-
- /*
- * let the primary processor know we're out of the
- * pen, then head off into the C entry point
- */
- write_pen_release(-1);
-
- /*
- * Synchronise with the boot thread.
- */
- spin_lock(&boot_lock);
- spin_unlock(&boot_lock);
-}
-
-int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
-{
- unsigned long timeout;
-
- /*
- * set synchronisation state between this boot processor
- * and the secondary one
- */
- spin_lock(&boot_lock);
-
- /*
- * The secondary processor is waiting to be released from
- * the holding pen - release it, then wait for it to flag
- * that it has been released by resetting pen_release.
- *
- * Note that "pen_release" is the hardware CPU ID, whereas
- * "cpu" is Linux's internal ID.
- */
- write_pen_release(cpu);
-
- /*
- * Send the secondary CPU a soft interrupt, thereby causing
- * the boot monitor to read the system wide flags register,
- * and branch to the address found there.
- */
- smp_cross_call(cpumask_of(cpu), 1);
-
- timeout = jiffies + (1 * HZ);
- while (time_before(jiffies, timeout)) {
- smp_rmb();
- if (pen_release == -1)
- break;
-
- udelay(10);
- }
-
- /*
- * now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
- spin_unlock(&boot_lock);
-
- return pen_release != -1 ? -ENOSYS : 0;
-}
-
/*
* Initialise the CPU possible map early - this describes the CPUs
* which may be present or become present in the system.
@@ -174,6 +82,6 @@ void __init platform_smp_prepare_cpus(unsigned int max_cpus)
* until it receives a soft interrupt, and then the
* secondary CPU branches to this address.
*/
- __raw_writel(BSYM(virt_to_phys(realview_secondary_startup)),
+ __raw_writel(BSYM(virt_to_phys(versatile_secondary_startup)),
__io_address(REALVIEW_SYS_FLAGSSET));
}
diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c
index 6ef5c5e528b2..2ecc1d94284e 100644
--- a/arch/arm/mach-realview/realview_eb.c
+++ b/arch/arm/mach-realview/realview_eb.c
@@ -144,60 +144,39 @@ static struct pl022_ssp_controller ssp0_plat_data = {
* These devices are connected via the core APB bridge
*/
#define GPIO2_IRQ { IRQ_EB_GPIO2, NO_IRQ }
-#define GPIO2_DMA { 0, 0 }
#define GPIO3_IRQ { IRQ_EB_GPIO3, NO_IRQ }
-#define GPIO3_DMA { 0, 0 }
#define AACI_IRQ { IRQ_EB_AACI, NO_IRQ }
-#define AACI_DMA { 0x80, 0x81 }
#define MMCI0_IRQ { IRQ_EB_MMCI0A, IRQ_EB_MMCI0B }
-#define MMCI0_DMA { 0x84, 0 }
#define KMI0_IRQ { IRQ_EB_KMI0, NO_IRQ }
-#define KMI0_DMA { 0, 0 }
#define KMI1_IRQ { IRQ_EB_KMI1, NO_IRQ }
-#define KMI1_DMA { 0, 0 }
/*
* These devices are connected directly to the multi-layer AHB switch
*/
#define EB_SMC_IRQ { NO_IRQ, NO_IRQ }
-#define EB_SMC_DMA { 0, 0 }
#define MPMC_IRQ { NO_IRQ, NO_IRQ }
-#define MPMC_DMA { 0, 0 }
#define EB_CLCD_IRQ { IRQ_EB_CLCD, NO_IRQ }
-#define EB_CLCD_DMA { 0, 0 }
#define DMAC_IRQ { IRQ_EB_DMA, NO_IRQ }
-#define DMAC_DMA { 0, 0 }
/*
* These devices are connected via the core APB bridge
*/
#define SCTL_IRQ { NO_IRQ, NO_IRQ }
-#define SCTL_DMA { 0, 0 }
#define EB_WATCHDOG_IRQ { IRQ_EB_WDOG, NO_IRQ }
-#define EB_WATCHDOG_DMA { 0, 0 }
#define EB_GPIO0_IRQ { IRQ_EB_GPIO0, NO_IRQ }
-#define EB_GPIO0_DMA { 0, 0 }
#define GPIO1_IRQ { IRQ_EB_GPIO1, NO_IRQ }
-#define GPIO1_DMA { 0, 0 }
#define EB_RTC_IRQ { IRQ_EB_RTC, NO_IRQ }
-#define EB_RTC_DMA { 0, 0 }
/*
* These devices are connected via the DMA APB bridge
*/
#define SCI_IRQ { IRQ_EB_SCI, NO_IRQ }
-#define SCI_DMA { 7, 6 }
#define EB_UART0_IRQ { IRQ_EB_UART0, NO_IRQ }
-#define EB_UART0_DMA { 15, 14 }
#define EB_UART1_IRQ { IRQ_EB_UART1, NO_IRQ }
-#define EB_UART1_DMA { 13, 12 }
#define EB_UART2_IRQ { IRQ_EB_UART2, NO_IRQ }
-#define EB_UART2_DMA { 11, 10 }
#define EB_UART3_IRQ { IRQ_EB_UART3, NO_IRQ }
-#define EB_UART3_DMA { 0x86, 0x87 }
#define EB_SSP_IRQ { IRQ_EB_SSP, NO_IRQ }
-#define EB_SSP_DMA { 9, 8 }
/* FPGA Primecells */
AMBA_DEVICE(aaci, "fpga:aaci", AACI, NULL);
@@ -484,9 +463,10 @@ static void __init realview_eb_init(void)
MACHINE_START(REALVIEW_EB, "ARM-RealView EB")
/* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */
- .boot_params = PHYS_OFFSET + 0x00000100,
+ .boot_params = PLAT_PHYS_OFFSET + 0x00000100,
.fixup = realview_fixup,
.map_io = realview_eb_map_io,
+ .init_early = realview_init_early,
.init_irq = gic_init_irq,
.timer = &realview_eb_timer,
.init_machine = realview_eb_init,
diff --git a/arch/arm/mach-realview/realview_pb1176.c b/arch/arm/mach-realview/realview_pb1176.c
index cbdc97a5685f..eab6070f66d0 100644
--- a/arch/arm/mach-realview/realview_pb1176.c
+++ b/arch/arm/mach-realview/realview_pb1176.c
@@ -134,47 +134,26 @@ static struct pl022_ssp_controller ssp0_plat_data = {
* RealView PB1176 AMBA devices
*/
#define GPIO2_IRQ { IRQ_PB1176_GPIO2, NO_IRQ }
-#define GPIO2_DMA { 0, 0 }
#define GPIO3_IRQ { IRQ_PB1176_GPIO3, NO_IRQ }
-#define GPIO3_DMA { 0, 0 }
#define AACI_IRQ { IRQ_PB1176_AACI, NO_IRQ }
-#define AACI_DMA { 0x80, 0x81 }
#define MMCI0_IRQ { IRQ_PB1176_MMCI0A, IRQ_PB1176_MMCI0B }
-#define MMCI0_DMA { 0x84, 0 }
#define KMI0_IRQ { IRQ_PB1176_KMI0, NO_IRQ }
-#define KMI0_DMA { 0, 0 }
#define KMI1_IRQ { IRQ_PB1176_KMI1, NO_IRQ }
-#define KMI1_DMA { 0, 0 }
#define PB1176_SMC_IRQ { NO_IRQ, NO_IRQ }
-#define PB1176_SMC_DMA { 0, 0 }
#define MPMC_IRQ { NO_IRQ, NO_IRQ }
-#define MPMC_DMA { 0, 0 }
#define PB1176_CLCD_IRQ { IRQ_DC1176_CLCD, NO_IRQ }
-#define PB1176_CLCD_DMA { 0, 0 }
#define SCTL_IRQ { NO_IRQ, NO_IRQ }
-#define SCTL_DMA { 0, 0 }
#define PB1176_WATCHDOG_IRQ { IRQ_DC1176_WATCHDOG, NO_IRQ }
-#define PB1176_WATCHDOG_DMA { 0, 0 }
#define PB1176_GPIO0_IRQ { IRQ_PB1176_GPIO0, NO_IRQ }
-#define PB1176_GPIO0_DMA { 0, 0 }
#define GPIO1_IRQ { IRQ_PB1176_GPIO1, NO_IRQ }
-#define GPIO1_DMA { 0, 0 }
#define PB1176_RTC_IRQ { IRQ_DC1176_RTC, NO_IRQ }
-#define PB1176_RTC_DMA { 0, 0 }
#define SCI_IRQ { IRQ_PB1176_SCI, NO_IRQ }
-#define SCI_DMA { 7, 6 }
#define PB1176_UART0_IRQ { IRQ_DC1176_UART0, NO_IRQ }
-#define PB1176_UART0_DMA { 15, 14 }
#define PB1176_UART1_IRQ { IRQ_DC1176_UART1, NO_IRQ }
-#define PB1176_UART1_DMA { 13, 12 }
#define PB1176_UART2_IRQ { IRQ_DC1176_UART2, NO_IRQ }
-#define PB1176_UART2_DMA { 11, 10 }
#define PB1176_UART3_IRQ { IRQ_DC1176_UART3, NO_IRQ }
-#define PB1176_UART3_DMA { 0x86, 0x87 }
#define PB1176_UART4_IRQ { IRQ_PB1176_UART4, NO_IRQ }
-#define PB1176_UART4_DMA { 0, 0 }
#define PB1176_SSP_IRQ { IRQ_DC1176_SSP, NO_IRQ }
-#define PB1176_SSP_DMA { 9, 8 }
/* FPGA Primecells */
AMBA_DEVICE(aaci, "fpga:aaci", AACI, NULL);
@@ -379,9 +358,10 @@ static void __init realview_pb1176_init(void)
MACHINE_START(REALVIEW_PB1176, "ARM-RealView PB1176")
/* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */
- .boot_params = PHYS_OFFSET + 0x00000100,
+ .boot_params = PLAT_PHYS_OFFSET + 0x00000100,
.fixup = realview_pb1176_fixup,
.map_io = realview_pb1176_map_io,
+ .init_early = realview_init_early,
.init_irq = gic_init_irq,
.timer = &realview_pb1176_timer,
.init_machine = realview_pb1176_init,
diff --git a/arch/arm/mach-realview/realview_pb11mp.c b/arch/arm/mach-realview/realview_pb11mp.c
index 8e8ab7d29a6a..b2985fc7cd4e 100644
--- a/arch/arm/mach-realview/realview_pb11mp.c
+++ b/arch/arm/mach-realview/realview_pb11mp.c
@@ -136,47 +136,26 @@ static struct pl022_ssp_controller ssp0_plat_data = {
*/
#define GPIO2_IRQ { IRQ_PB11MP_GPIO2, NO_IRQ }
-#define GPIO2_DMA { 0, 0 }
#define GPIO3_IRQ { IRQ_PB11MP_GPIO3, NO_IRQ }
-#define GPIO3_DMA { 0, 0 }
#define AACI_IRQ { IRQ_TC11MP_AACI, NO_IRQ }
-#define AACI_DMA { 0x80, 0x81 }
#define MMCI0_IRQ { IRQ_TC11MP_MMCI0A, IRQ_TC11MP_MMCI0B }
-#define MMCI0_DMA { 0x84, 0 }
#define KMI0_IRQ { IRQ_TC11MP_KMI0, NO_IRQ }
-#define KMI0_DMA { 0, 0 }
#define KMI1_IRQ { IRQ_TC11MP_KMI1, NO_IRQ }
-#define KMI1_DMA { 0, 0 }
#define PB11MP_SMC_IRQ { NO_IRQ, NO_IRQ }
-#define PB11MP_SMC_DMA { 0, 0 }
#define MPMC_IRQ { NO_IRQ, NO_IRQ }
-#define MPMC_DMA { 0, 0 }
#define PB11MP_CLCD_IRQ { IRQ_PB11MP_CLCD, NO_IRQ }
-#define PB11MP_CLCD_DMA { 0, 0 }
#define DMAC_IRQ { IRQ_PB11MP_DMAC, NO_IRQ }
-#define DMAC_DMA { 0, 0 }
#define SCTL_IRQ { NO_IRQ, NO_IRQ }
-#define SCTL_DMA { 0, 0 }
#define PB11MP_WATCHDOG_IRQ { IRQ_PB11MP_WATCHDOG, NO_IRQ }
-#define PB11MP_WATCHDOG_DMA { 0, 0 }
#define PB11MP_GPIO0_IRQ { IRQ_PB11MP_GPIO0, NO_IRQ }
-#define PB11MP_GPIO0_DMA { 0, 0 }
#define GPIO1_IRQ { IRQ_PB11MP_GPIO1, NO_IRQ }
-#define GPIO1_DMA { 0, 0 }
#define PB11MP_RTC_IRQ { IRQ_TC11MP_RTC, NO_IRQ }
-#define PB11MP_RTC_DMA { 0, 0 }
#define SCI_IRQ { IRQ_PB11MP_SCI, NO_IRQ }
-#define SCI_DMA { 7, 6 }
#define PB11MP_UART0_IRQ { IRQ_TC11MP_UART0, NO_IRQ }
-#define PB11MP_UART0_DMA { 15, 14 }
#define PB11MP_UART1_IRQ { IRQ_TC11MP_UART1, NO_IRQ }
-#define PB11MP_UART1_DMA { 13, 12 }
#define PB11MP_UART2_IRQ { IRQ_PB11MP_UART2, NO_IRQ }
-#define PB11MP_UART2_DMA { 11, 10 }
#define PB11MP_UART3_IRQ { IRQ_PB11MP_UART3, NO_IRQ }
-#define PB11MP_UART3_DMA { 0x86, 0x87 }
#define PB11MP_SSP_IRQ { IRQ_PB11MP_SSP, NO_IRQ }
-#define PB11MP_SSP_DMA { 9, 8 }
/* FPGA Primecells */
AMBA_DEVICE(aaci, "fpga:aaci", AACI, NULL);
@@ -381,9 +360,10 @@ static void __init realview_pb11mp_init(void)
MACHINE_START(REALVIEW_PB11MP, "ARM-RealView PB11MPCore")
/* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */
- .boot_params = PHYS_OFFSET + 0x00000100,
+ .boot_params = PLAT_PHYS_OFFSET + 0x00000100,
.fixup = realview_fixup,
.map_io = realview_pb11mp_map_io,
+ .init_early = realview_init_early,
.init_irq = gic_init_irq,
.timer = &realview_pb11mp_timer,
.init_machine = realview_pb11mp_init,
diff --git a/arch/arm/mach-realview/realview_pba8.c b/arch/arm/mach-realview/realview_pba8.c
index 841118e3e118..fb6866558760 100644
--- a/arch/arm/mach-realview/realview_pba8.c
+++ b/arch/arm/mach-realview/realview_pba8.c
@@ -126,47 +126,26 @@ static struct pl022_ssp_controller ssp0_plat_data = {
*/
#define GPIO2_IRQ { IRQ_PBA8_GPIO2, NO_IRQ }
-#define GPIO2_DMA { 0, 0 }
#define GPIO3_IRQ { IRQ_PBA8_GPIO3, NO_IRQ }
-#define GPIO3_DMA { 0, 0 }
#define AACI_IRQ { IRQ_PBA8_AACI, NO_IRQ }
-#define AACI_DMA { 0x80, 0x81 }
#define MMCI0_IRQ { IRQ_PBA8_MMCI0A, IRQ_PBA8_MMCI0B }
-#define MMCI0_DMA { 0x84, 0 }
#define KMI0_IRQ { IRQ_PBA8_KMI0, NO_IRQ }
-#define KMI0_DMA { 0, 0 }
#define KMI1_IRQ { IRQ_PBA8_KMI1, NO_IRQ }
-#define KMI1_DMA { 0, 0 }
#define PBA8_SMC_IRQ { NO_IRQ, NO_IRQ }
-#define PBA8_SMC_DMA { 0, 0 }
#define MPMC_IRQ { NO_IRQ, NO_IRQ }
-#define MPMC_DMA { 0, 0 }
#define PBA8_CLCD_IRQ { IRQ_PBA8_CLCD, NO_IRQ }
-#define PBA8_CLCD_DMA { 0, 0 }
#define DMAC_IRQ { IRQ_PBA8_DMAC, NO_IRQ }
-#define DMAC_DMA { 0, 0 }
#define SCTL_IRQ { NO_IRQ, NO_IRQ }
-#define SCTL_DMA { 0, 0 }
#define PBA8_WATCHDOG_IRQ { IRQ_PBA8_WATCHDOG, NO_IRQ }
-#define PBA8_WATCHDOG_DMA { 0, 0 }
#define PBA8_GPIO0_IRQ { IRQ_PBA8_GPIO0, NO_IRQ }
-#define PBA8_GPIO0_DMA { 0, 0 }
#define GPIO1_IRQ { IRQ_PBA8_GPIO1, NO_IRQ }
-#define GPIO1_DMA { 0, 0 }
#define PBA8_RTC_IRQ { IRQ_PBA8_RTC, NO_IRQ }
-#define PBA8_RTC_DMA { 0, 0 }
#define SCI_IRQ { IRQ_PBA8_SCI, NO_IRQ }
-#define SCI_DMA { 7, 6 }
#define PBA8_UART0_IRQ { IRQ_PBA8_UART0, NO_IRQ }
-#define PBA8_UART0_DMA { 15, 14 }
#define PBA8_UART1_IRQ { IRQ_PBA8_UART1, NO_IRQ }
-#define PBA8_UART1_DMA { 13, 12 }
#define PBA8_UART2_IRQ { IRQ_PBA8_UART2, NO_IRQ }
-#define PBA8_UART2_DMA { 11, 10 }
#define PBA8_UART3_IRQ { IRQ_PBA8_UART3, NO_IRQ }
-#define PBA8_UART3_DMA { 0x86, 0x87 }
#define PBA8_SSP_IRQ { IRQ_PBA8_SSP, NO_IRQ }
-#define PBA8_SSP_DMA { 9, 8 }
/* FPGA Primecells */
AMBA_DEVICE(aaci, "fpga:aaci", AACI, NULL);
@@ -331,9 +310,10 @@ static void __init realview_pba8_init(void)
MACHINE_START(REALVIEW_PBA8, "ARM-RealView PB-A8")
/* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */
- .boot_params = PHYS_OFFSET + 0x00000100,
+ .boot_params = PLAT_PHYS_OFFSET + 0x00000100,
.fixup = realview_fixup,
.map_io = realview_pba8_map_io,
+ .init_early = realview_init_early,
.init_irq = gic_init_irq,
.timer = &realview_pba8_timer,
.init_machine = realview_pba8_init,
diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c
index 02b755b009db..92ace2cf2b2c 100644
--- a/arch/arm/mach-realview/realview_pbx.c
+++ b/arch/arm/mach-realview/realview_pbx.c
@@ -148,47 +148,26 @@ static struct pl022_ssp_controller ssp0_plat_data = {
*/
#define GPIO2_IRQ { IRQ_PBX_GPIO2, NO_IRQ }
-#define GPIO2_DMA { 0, 0 }
#define GPIO3_IRQ { IRQ_PBX_GPIO3, NO_IRQ }
-#define GPIO3_DMA { 0, 0 }
#define AACI_IRQ { IRQ_PBX_AACI, NO_IRQ }
-#define AACI_DMA { 0x80, 0x81 }
#define MMCI0_IRQ { IRQ_PBX_MMCI0A, IRQ_PBX_MMCI0B }
-#define MMCI0_DMA { 0x84, 0 }
#define KMI0_IRQ { IRQ_PBX_KMI0, NO_IRQ }
-#define KMI0_DMA { 0, 0 }
#define KMI1_IRQ { IRQ_PBX_KMI1, NO_IRQ }
-#define KMI1_DMA { 0, 0 }
#define PBX_SMC_IRQ { NO_IRQ, NO_IRQ }
-#define PBX_SMC_DMA { 0, 0 }
#define MPMC_IRQ { NO_IRQ, NO_IRQ }
-#define MPMC_DMA { 0, 0 }
#define PBX_CLCD_IRQ { IRQ_PBX_CLCD, NO_IRQ }
-#define PBX_CLCD_DMA { 0, 0 }
#define DMAC_IRQ { IRQ_PBX_DMAC, NO_IRQ }
-#define DMAC_DMA { 0, 0 }
#define SCTL_IRQ { NO_IRQ, NO_IRQ }
-#define SCTL_DMA { 0, 0 }
#define PBX_WATCHDOG_IRQ { IRQ_PBX_WATCHDOG, NO_IRQ }
-#define PBX_WATCHDOG_DMA { 0, 0 }
#define PBX_GPIO0_IRQ { IRQ_PBX_GPIO0, NO_IRQ }
-#define PBX_GPIO0_DMA { 0, 0 }
#define GPIO1_IRQ { IRQ_PBX_GPIO1, NO_IRQ }
-#define GPIO1_DMA { 0, 0 }
#define PBX_RTC_IRQ { IRQ_PBX_RTC, NO_IRQ }
-#define PBX_RTC_DMA { 0, 0 }
#define SCI_IRQ { IRQ_PBX_SCI, NO_IRQ }
-#define SCI_DMA { 7, 6 }
#define PBX_UART0_IRQ { IRQ_PBX_UART0, NO_IRQ }
-#define PBX_UART0_DMA { 15, 14 }
#define PBX_UART1_IRQ { IRQ_PBX_UART1, NO_IRQ }
-#define PBX_UART1_DMA { 13, 12 }
#define PBX_UART2_IRQ { IRQ_PBX_UART2, NO_IRQ }
-#define PBX_UART2_DMA { 11, 10 }
#define PBX_UART3_IRQ { IRQ_PBX_UART3, NO_IRQ }
-#define PBX_UART3_DMA { 0x86, 0x87 }
#define PBX_SSP_IRQ { IRQ_PBX_SSP, NO_IRQ }
-#define PBX_SSP_DMA { 9, 8 }
/* FPGA Primecells */
AMBA_DEVICE(aaci, "fpga:aaci", AACI, NULL);
@@ -414,9 +393,10 @@ static void __init realview_pbx_init(void)
MACHINE_START(REALVIEW_PBX, "ARM-RealView PBX")
/* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */
- .boot_params = PHYS_OFFSET + 0x00000100,
+ .boot_params = PLAT_PHYS_OFFSET + 0x00000100,
.fixup = realview_pbx_fixup,
.map_io = realview_pbx_map_io,
+ .init_early = realview_init_early,
.init_irq = gic_init_irq,
.timer = &realview_pbx_timer,
.init_machine = realview_pbx_init,
diff --git a/arch/arm/mach-rpc/include/mach/memory.h b/arch/arm/mach-rpc/include/mach/memory.h
index 78191bf25192..18a221093bf5 100644
--- a/arch/arm/mach-rpc/include/mach/memory.h
+++ b/arch/arm/mach-rpc/include/mach/memory.h
@@ -21,7 +21,7 @@
/*
* Physical DRAM offset.
*/
-#define PHYS_OFFSET UL(0x10000000)
+#define PLAT_PHYS_OFFSET UL(0x10000000)
/*
* Cache flushing area - ROM
diff --git a/arch/arm/mach-s3c2400/include/mach/memory.h b/arch/arm/mach-s3c2400/include/mach/memory.h
index cf5901ffd385..3f33670dd012 100644
--- a/arch/arm/mach-s3c2400/include/mach/memory.h
+++ b/arch/arm/mach-s3c2400/include/mach/memory.h
@@ -15,6 +15,6 @@
#ifndef __ASM_ARCH_MEMORY_H
#define __ASM_ARCH_MEMORY_H
-#define PHYS_OFFSET UL(0x0C000000)
+#define PLAT_PHYS_OFFSET UL(0x0C000000)
#endif
diff --git a/arch/arm/mach-s3c2410/h1940-bluetooth.c b/arch/arm/mach-s3c2410/h1940-bluetooth.c
index 6b86a722a7db..2c126bbca08d 100644
--- a/arch/arm/mach-s3c2410/h1940-bluetooth.c
+++ b/arch/arm/mach-s3c2410/h1940-bluetooth.c
@@ -18,12 +18,14 @@
#include <linux/leds.h>
#include <linux/gpio.h>
#include <linux/rfkill.h>
+#include <linux/leds.h>
#include <mach/regs-gpio.h>
#include <mach/hardware.h>
#include <mach/h1940-latch.h>
+#include <mach/h1940.h>
-#define DRV_NAME "h1940-bt"
+#define DRV_NAME "h1940-bt"
/* Bluetooth control */
static void h1940bt_enable(int on)
@@ -37,6 +39,8 @@ static void h1940bt_enable(int on)
gpio_set_value(S3C2410_GPH(1), 1);
mdelay(10);
gpio_set_value(S3C2410_GPH(1), 0);
+
+ h1940_led_blink_set(-EINVAL, GPIO_LED_BLINK, NULL, NULL);
}
else {
gpio_set_value(S3C2410_GPH(1), 1);
@@ -44,6 +48,8 @@ static void h1940bt_enable(int on)
gpio_set_value(S3C2410_GPH(1), 0);
mdelay(10);
gpio_set_value(H1940_LATCH_BLUETOOTH_POWER, 0);
+
+ h1940_led_blink_set(-EINVAL, GPIO_LED_NO_BLINK_LOW, NULL, NULL);
}
}
@@ -85,7 +91,6 @@ static int __devinit h1940bt_probe(struct platform_device *pdev)
s3c_gpio_cfgpin(S3C2410_GPH(3), S3C2410_GPH3_RXD0);
s3c_gpio_setpull(S3C2410_GPH(3), S3C_GPIO_PULL_NONE);
-
rfk = rfkill_alloc(DRV_NAME, &pdev->dev, RFKILL_TYPE_BLUETOOTH,
&h1940bt_rfkill_ops, NULL);
if (!rfk) {
@@ -93,8 +98,6 @@ static int __devinit h1940bt_probe(struct platform_device *pdev)
goto err_rfk_alloc;
}
- rfkill_set_led_trigger_name(rfk, "h1940-bluetooth");
-
ret = rfkill_register(rfk);
if (ret)
goto err_rfkill;
diff --git a/arch/arm/mach-s3c2410/include/mach/h1940.h b/arch/arm/mach-s3c2410/include/mach/h1940.h
index 4559784129c0..2aa683c8d3d6 100644
--- a/arch/arm/mach-s3c2410/include/mach/h1940.h
+++ b/arch/arm/mach-s3c2410/include/mach/h1940.h
@@ -17,5 +17,8 @@
#define H1940_SUSPEND_CHECK (0x30080000)
extern void h1940_pm_return(void);
+extern int h1940_led_blink_set(unsigned gpio, int state,
+ unsigned long *delay_on, unsigned long *delay_off);
+
#endif /* __ASM_ARCH_H1940_H */
diff --git a/arch/arm/mach-s3c2410/include/mach/memory.h b/arch/arm/mach-s3c2410/include/mach/memory.h
index 6f1e5871ae4b..f92b97b89c0c 100644
--- a/arch/arm/mach-s3c2410/include/mach/memory.h
+++ b/arch/arm/mach-s3c2410/include/mach/memory.h
@@ -11,6 +11,6 @@
#ifndef __ASM_ARCH_MEMORY_H
#define __ASM_ARCH_MEMORY_H
-#define PHYS_OFFSET UL(0x30000000)
+#define PLAT_PHYS_OFFSET UL(0x30000000)
#endif
diff --git a/arch/arm/mach-s3c2410/mach-h1940.c b/arch/arm/mach-s3c2410/mach-h1940.c
index 1a81fe12ccd7..5531c4cd5477 100644
--- a/arch/arm/mach-s3c2410/mach-h1940.c
+++ b/arch/arm/mach-s3c2410/mach-h1940.c
@@ -23,8 +23,15 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/gpio.h>
+#include <linux/input.h>
+#include <linux/gpio_keys.h>
#include <linux/pwm_backlight.h>
#include <linux/i2c.h>
+#include <linux/leds.h>
+#include <linux/pda_power.h>
+#include <linux/s3c_adc_battery.h>
+#include <linux/delay.h>
+
#include <video/platform_lcd.h>
#include <linux/mmc/host.h>
@@ -222,20 +229,238 @@ static struct s3c2410fb_mach_info h1940_fb_info __initdata = {
.num_displays = 1,
.default_display = 0,
- .lpcsel= 0x02,
- .gpccon= 0xaa940659,
- .gpccon_mask= 0xffffffff,
- .gpcup= 0x0000ffff,
- .gpcup_mask= 0xffffffff,
- .gpdcon= 0xaa84aaa0,
- .gpdcon_mask= 0xffffffff,
- .gpdup= 0x0000faff,
- .gpdup_mask= 0xffffffff,
+ .lpcsel = 0x02,
+ .gpccon = 0xaa940659,
+ .gpccon_mask = 0xffffc0f0,
+ .gpcup = 0x0000ffff,
+ .gpcup_mask = 0xffffffff,
+ .gpdcon = 0xaa84aaa0,
+ .gpdcon_mask = 0xffffffff,
+ .gpdup = 0x0000faff,
+ .gpdup_mask = 0xffffffff,
};
-static struct platform_device h1940_device_leds = {
- .name = "h1940-leds",
+static int power_supply_init(struct device *dev)
+{
+ return gpio_request(S3C2410_GPF(2), "cable plugged");
+}
+
+static int h1940_is_ac_online(void)
+{
+ return !gpio_get_value(S3C2410_GPF(2));
+}
+
+static void power_supply_exit(struct device *dev)
+{
+ gpio_free(S3C2410_GPF(2));
+}
+
+static char *h1940_supplicants[] = {
+ "main-battery",
+ "backup-battery",
+};
+
+static struct pda_power_pdata power_supply_info = {
+ .init = power_supply_init,
+ .is_ac_online = h1940_is_ac_online,
+ .exit = power_supply_exit,
+ .supplied_to = h1940_supplicants,
+ .num_supplicants = ARRAY_SIZE(h1940_supplicants),
+};
+
+static struct resource power_supply_resources[] = {
+ [0] = {
+ .name = "ac",
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE |
+ IORESOURCE_IRQ_HIGHEDGE,
+ .start = IRQ_EINT2,
+ .end = IRQ_EINT2,
+ },
+};
+
+static struct platform_device power_supply = {
+ .name = "pda-power",
+ .id = -1,
+ .dev = {
+ .platform_data =
+ &power_supply_info,
+ },
+ .resource = power_supply_resources,
+ .num_resources = ARRAY_SIZE(power_supply_resources),
+};
+
+static const struct s3c_adc_bat_thresh bat_lut_noac[] = {
+ { .volt = 4070, .cur = 162, .level = 100},
+ { .volt = 4040, .cur = 165, .level = 95},
+ { .volt = 4016, .cur = 164, .level = 90},
+ { .volt = 3996, .cur = 166, .level = 85},
+ { .volt = 3971, .cur = 168, .level = 80},
+ { .volt = 3951, .cur = 168, .level = 75},
+ { .volt = 3931, .cur = 170, .level = 70},
+ { .volt = 3903, .cur = 172, .level = 65},
+ { .volt = 3886, .cur = 172, .level = 60},
+ { .volt = 3858, .cur = 176, .level = 55},
+ { .volt = 3842, .cur = 176, .level = 50},
+ { .volt = 3818, .cur = 176, .level = 45},
+ { .volt = 3789, .cur = 180, .level = 40},
+ { .volt = 3769, .cur = 180, .level = 35},
+ { .volt = 3749, .cur = 184, .level = 30},
+ { .volt = 3732, .cur = 184, .level = 25},
+ { .volt = 3716, .cur = 184, .level = 20},
+ { .volt = 3708, .cur = 184, .level = 15},
+ { .volt = 3716, .cur = 96, .level = 10},
+ { .volt = 3700, .cur = 96, .level = 5},
+ { .volt = 3684, .cur = 96, .level = 0},
+};
+
+static const struct s3c_adc_bat_thresh bat_lut_acin[] = {
+ { .volt = 4130, .cur = 0, .level = 100},
+ { .volt = 3982, .cur = 0, .level = 50},
+ { .volt = 3854, .cur = 0, .level = 10},
+ { .volt = 3841, .cur = 0, .level = 0},
+};
+
+int h1940_bat_init(void)
+{
+ int ret;
+
+ ret = gpio_request(H1940_LATCH_SM803_ENABLE, "h1940-charger-enable");
+ if (ret)
+ return ret;
+ gpio_direction_output(H1940_LATCH_SM803_ENABLE, 0);
+
+ return 0;
+
+}
+
+void h1940_bat_exit(void)
+{
+ gpio_free(H1940_LATCH_SM803_ENABLE);
+}
+
+void h1940_enable_charger(void)
+{
+ gpio_set_value(H1940_LATCH_SM803_ENABLE, 1);
+}
+
+void h1940_disable_charger(void)
+{
+ gpio_set_value(H1940_LATCH_SM803_ENABLE, 0);
+}
+
+static struct s3c_adc_bat_pdata h1940_bat_cfg = {
+ .init = h1940_bat_init,
+ .exit = h1940_bat_exit,
+ .enable_charger = h1940_enable_charger,
+ .disable_charger = h1940_disable_charger,
+ .gpio_charge_finished = S3C2410_GPF(3),
+ .gpio_inverted = 1,
+ .lut_noac = bat_lut_noac,
+ .lut_noac_cnt = ARRAY_SIZE(bat_lut_noac),
+ .lut_acin = bat_lut_acin,
+ .lut_acin_cnt = ARRAY_SIZE(bat_lut_acin),
+ .volt_channel = 0,
+ .current_channel = 1,
+ .volt_mult = 4056,
+ .current_mult = 1893,
+ .internal_impedance = 200,
+ .backup_volt_channel = 3,
+ /* TODO Check backup volt multiplier */
+ .backup_volt_mult = 4056,
+ .backup_volt_min = 0,
+ .backup_volt_max = 4149288
+};
+
+static struct platform_device h1940_battery = {
+ .name = "s3c-adc-battery",
.id = -1,
+ .dev = {
+ .parent = &s3c_device_adc.dev,
+ .platform_data = &h1940_bat_cfg,
+ },
+};
+
+DEFINE_SPINLOCK(h1940_blink_spin);
+
+int h1940_led_blink_set(unsigned gpio, int state,
+ unsigned long *delay_on, unsigned long *delay_off)
+{
+ int blink_gpio, check_gpio1, check_gpio2;
+
+ switch (gpio) {
+ case H1940_LATCH_LED_GREEN:
+ blink_gpio = S3C2410_GPA(7);
+ check_gpio1 = S3C2410_GPA(1);
+ check_gpio2 = S3C2410_GPA(3);
+ break;
+ case H1940_LATCH_LED_RED:
+ blink_gpio = S3C2410_GPA(1);
+ check_gpio1 = S3C2410_GPA(7);
+ check_gpio2 = S3C2410_GPA(3);
+ break;
+ default:
+ blink_gpio = S3C2410_GPA(3);
+ check_gpio1 = S3C2410_GPA(1);
+ check_gpio1 = S3C2410_GPA(7);
+ break;
+ }
+
+ if (delay_on && delay_off && !*delay_on && !*delay_off)
+ *delay_on = *delay_off = 500;
+
+ spin_lock(&h1940_blink_spin);
+
+ switch (state) {
+ case GPIO_LED_NO_BLINK_LOW:
+ case GPIO_LED_NO_BLINK_HIGH:
+ if (!gpio_get_value(check_gpio1) &&
+ !gpio_get_value(check_gpio2))
+ gpio_set_value(H1940_LATCH_LED_FLASH, 0);
+ gpio_set_value(blink_gpio, 0);
+ if (gpio_is_valid(gpio))
+ gpio_set_value(gpio, state);
+ break;
+ case GPIO_LED_BLINK:
+ if (gpio_is_valid(gpio))
+ gpio_set_value(gpio, 0);
+ gpio_set_value(H1940_LATCH_LED_FLASH, 1);
+ gpio_set_value(blink_gpio, 1);
+ break;
+ }
+
+ spin_unlock(&h1940_blink_spin);
+
+ return 0;
+}
+EXPORT_SYMBOL(h1940_led_blink_set);
+
+static struct gpio_led h1940_leds_desc[] = {
+ {
+ .name = "Green",
+ .default_trigger = "main-battery-charging-or-full",
+ .gpio = H1940_LATCH_LED_GREEN,
+ .retain_state_suspended = 1,
+ },
+ {
+ .name = "Red",
+ .default_trigger = "main-battery-full",
+ .gpio = H1940_LATCH_LED_RED,
+ .retain_state_suspended = 1,
+ },
+};
+
+static struct gpio_led_platform_data h1940_leds_pdata = {
+ .num_leds = ARRAY_SIZE(h1940_leds_desc),
+ .leds = h1940_leds_desc,
+ .gpio_blink_set = h1940_led_blink_set,
+};
+
+static struct platform_device h1940_device_leds = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = &h1940_leds_pdata,
+ },
};
static struct platform_device h1940_device_bluetooth = {
@@ -321,14 +546,14 @@ static struct platform_device h1940_backlight = {
static void h1940_lcd_power_set(struct plat_lcd_data *pd,
unsigned int power)
{
- int value;
+ int value, retries = 100;
if (!power) {
gpio_set_value(S3C2410_GPC(0), 0);
/* wait for 3ac */
do {
value = gpio_get_value(S3C2410_GPC(6));
- } while (value);
+ } while (value && retries--);
gpio_set_value(H1940_LATCH_LCD_P2, 0);
gpio_set_value(H1940_LATCH_LCD_P3, 0);
@@ -346,6 +571,9 @@ static void h1940_lcd_power_set(struct plat_lcd_data *pd,
gpio_set_value(H1940_LATCH_LCD_P0, 1);
gpio_set_value(H1940_LATCH_LCD_P1, 1);
+ gpio_direction_input(S3C2410_GPC(1));
+ gpio_direction_input(S3C2410_GPC(4));
+ mdelay(10);
s3c_gpio_cfgpin(S3C2410_GPC(1), S3C_GPIO_SFN(2));
s3c_gpio_cfgpin(S3C2410_GPC(4), S3C_GPIO_SFN(2));
@@ -381,7 +609,44 @@ static struct i2c_board_info h1940_i2c_devices[] = {
},
};
+#define DECLARE_BUTTON(p, k, n, w) \
+ { \
+ .gpio = p, \
+ .code = k, \
+ .desc = n, \
+ .wakeup = w, \
+ .active_low = 1, \
+ }
+
+static struct gpio_keys_button h1940_buttons[] = {
+ DECLARE_BUTTON(S3C2410_GPF(0), KEY_POWER, "Power", 1),
+ DECLARE_BUTTON(S3C2410_GPF(6), KEY_ENTER, "Select", 1),
+ DECLARE_BUTTON(S3C2410_GPF(7), KEY_RECORD, "Record", 0),
+ DECLARE_BUTTON(S3C2410_GPG(0), KEY_F11, "Calendar", 0),
+ DECLARE_BUTTON(S3C2410_GPG(2), KEY_F12, "Contacts", 0),
+ DECLARE_BUTTON(S3C2410_GPG(3), KEY_MAIL, "Mail", 0),
+ DECLARE_BUTTON(S3C2410_GPG(6), KEY_LEFT, "Left_arrow", 0),
+ DECLARE_BUTTON(S3C2410_GPG(7), KEY_HOMEPAGE, "Home", 0),
+ DECLARE_BUTTON(S3C2410_GPG(8), KEY_RIGHT, "Right_arrow", 0),
+ DECLARE_BUTTON(S3C2410_GPG(9), KEY_UP, "Up_arrow", 0),
+ DECLARE_BUTTON(S3C2410_GPG(10), KEY_DOWN, "Down_arrow", 0),
+};
+
+static struct gpio_keys_platform_data h1940_buttons_data = {
+ .buttons = h1940_buttons,
+ .nbuttons = ARRAY_SIZE(h1940_buttons),
+};
+
+static struct platform_device h1940_dev_buttons = {
+ .name = "gpio-keys",
+ .id = -1,
+ .dev = {
+ .platform_data = &h1940_buttons_data,
+ }
+};
+
static struct platform_device *h1940_devices[] __initdata = {
+ &h1940_dev_buttons,
&s3c_device_ohci,
&s3c_device_lcd,
&s3c_device_wdt,
@@ -398,6 +663,8 @@ static struct platform_device *h1940_devices[] __initdata = {
&h1940_lcd_powerdev,
&s3c_device_adc,
&s3c_device_ts,
+ &power_supply,
+ &h1940_battery,
};
static void __init h1940_map_io(void)
@@ -483,6 +750,15 @@ static void __init h1940_init(void)
platform_add_devices(h1940_devices, ARRAY_SIZE(h1940_devices));
+ gpio_request(S3C2410_GPA(1), "Red LED blink");
+ gpio_request(S3C2410_GPA(3), "Blue LED blink");
+ gpio_request(S3C2410_GPA(7), "Green LED blink");
+ gpio_request(H1940_LATCH_LED_FLASH, "LED blink");
+ gpio_direction_output(S3C2410_GPA(1), 0);
+ gpio_direction_output(S3C2410_GPA(3), 0);
+ gpio_direction_output(S3C2410_GPA(7), 0);
+ gpio_direction_output(H1940_LATCH_LED_FLASH, 0);
+
i2c_register_board_info(0, h1940_i2c_devices,
ARRAY_SIZE(h1940_i2c_devices));
}
diff --git a/arch/arm/mach-s3c2440/Kconfig b/arch/arm/mach-s3c2440/Kconfig
index a0cb2581894f..50825a3f91cc 100644
--- a/arch/arm/mach-s3c2440/Kconfig
+++ b/arch/arm/mach-s3c2440/Kconfig
@@ -99,6 +99,7 @@ config MACH_NEO1973_GTA02
select POWER_SUPPLY
select MACH_NEO1973
select S3C2410_PWM
+ select S3C_DEV_USB_HOST
help
Say Y here if you are using the Openmoko GTA02 / Freerunner GSM Phone
diff --git a/arch/arm/mach-s3c2440/include/mach/gta02.h b/arch/arm/mach-s3c2440/include/mach/gta02.h
index 953331d8d56a..3a56a229cac6 100644
--- a/arch/arm/mach-s3c2440/include/mach/gta02.h
+++ b/arch/arm/mach-s3c2440/include/mach/gta02.h
@@ -44,19 +44,19 @@
#define GTA02v3_GPIO_nUSB_FLT S3C2410_GPG(10) /* v3 + v4 only */
#define GTA02v3_GPIO_nGSM_OC S3C2410_GPG(11) /* v3 + v4 only */
-#define GTA02_GPIO_AMP_SHUT S3C2440_GPJ1 /* v2 + v3 + v4 only */
-#define GTA02v1_GPIO_WLAN_GPIO10 S3C2440_GPJ2
-#define GTA02_GPIO_HP_IN S3C2440_GPJ2 /* v2 + v3 + v4 only */
-#define GTA02_GPIO_INT0 S3C2440_GPJ3 /* v2 + v3 + v4 only */
-#define GTA02_GPIO_nGSM_EN S3C2440_GPJ4
-#define GTA02_GPIO_3D_RESET S3C2440_GPJ5
-#define GTA02_GPIO_nDL_GSM S3C2440_GPJ6 /* v4 + v5 only */
-#define GTA02_GPIO_WLAN_GPIO0 S3C2440_GPJ7
-#define GTA02v1_GPIO_BAT_ID S3C2440_GPJ8
-#define GTA02_GPIO_KEEPACT S3C2440_GPJ8
-#define GTA02v1_GPIO_HP_IN S3C2440_GPJ10
-#define GTA02_CHIP_PWD S3C2440_GPJ11 /* v2 + v3 + v4 only */
-#define GTA02_GPIO_nWLAN_RESET S3C2440_GPJ12 /* v2 + v3 + v4 only */
+#define GTA02_GPIO_AMP_SHUT S3C2410_GPJ(1) /* v2 + v3 + v4 only */
+#define GTA02v1_GPIO_WLAN_GPIO10 S3C2410_GPJ(2)
+#define GTA02_GPIO_HP_IN S3C2410_GPJ(2) /* v2 + v3 + v4 only */
+#define GTA02_GPIO_INT0 S3C2410_GPJ(3) /* v2 + v3 + v4 only */
+#define GTA02_GPIO_nGSM_EN S3C2410_GPJ(4)
+#define GTA02_GPIO_3D_RESET S3C2410_GPJ(5)
+#define GTA02_GPIO_nDL_GSM S3C2410_GPJ(6) /* v4 + v5 only */
+#define GTA02_GPIO_WLAN_GPIO0 S3C2410_GPJ(7)
+#define GTA02v1_GPIO_BAT_ID S3C2410_GPJ(8)
+#define GTA02_GPIO_KEEPACT S3C2410_GPJ(8)
+#define GTA02v1_GPIO_HP_IN S3C2410_GPJ(10)
+#define GTA02_CHIP_PWD S3C2410_GPJ(11) /* v2 + v3 + v4 only */
+#define GTA02_GPIO_nWLAN_RESET S3C2410_GPJ(12) /* v2 + v3 + v4 only */
#define GTA02_IRQ_GSENSOR_1 IRQ_EINT0
#define GTA02_IRQ_MODEM IRQ_EINT1
diff --git a/arch/arm/mach-s3c2440/mach-gta02.c b/arch/arm/mach-s3c2440/mach-gta02.c
index 9f2c14ec7181..d217ef3cd86a 100644
--- a/arch/arm/mach-s3c2440/mach-gta02.c
+++ b/arch/arm/mach-s3c2440/mach-gta02.c
@@ -58,6 +58,9 @@
#include <linux/mfd/pcf50633/pmic.h>
#include <linux/mfd/pcf50633/backlight.h>
+#include <linux/input.h>
+#include <linux/gpio_keys.h>
+
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
@@ -86,6 +89,8 @@
#include <plat/udc.h>
#include <plat/gpio-cfg.h>
#include <plat/iic.h>
+#include <plat/ts.h>
+
static struct pcf50633 *gta02_pcf;
@@ -280,9 +285,6 @@ struct pcf50633_platform_data gta02_pcf_pdata = {
.valid_modes_mask = REGULATOR_MODE_NORMAL,
.always_on = 1,
.apply_uV = 1,
- .state_mem = {
- .enabled = 1,
- },
},
},
[PCF50633_REGULATOR_DOWN1] = {
@@ -301,9 +303,6 @@ struct pcf50633_platform_data gta02_pcf_pdata = {
.valid_modes_mask = REGULATOR_MODE_NORMAL,
.apply_uV = 1,
.always_on = 1,
- .state_mem = {
- .enabled = 1,
- },
},
},
[PCF50633_REGULATOR_HCLDO] = {
@@ -311,8 +310,8 @@ struct pcf50633_platform_data gta02_pcf_pdata = {
.min_uV = 2000000,
.max_uV = 3300000,
.valid_modes_mask = REGULATOR_MODE_NORMAL,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
- .always_on = 1,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS,
},
},
[PCF50633_REGULATOR_LDO1] = {
@@ -320,10 +319,8 @@ struct pcf50633_platform_data gta02_pcf_pdata = {
.min_uV = 3300000,
.max_uV = 3300000,
.valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
.apply_uV = 1,
- .state_mem = {
- .enabled = 0,
- },
},
},
[PCF50633_REGULATOR_LDO2] = {
@@ -347,6 +344,7 @@ struct pcf50633_platform_data gta02_pcf_pdata = {
.min_uV = 3200000,
.max_uV = 3200000,
.valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
.apply_uV = 1,
},
},
@@ -355,10 +353,8 @@ struct pcf50633_platform_data gta02_pcf_pdata = {
.min_uV = 3000000,
.max_uV = 3000000,
.valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
.apply_uV = 1,
- .state_mem = {
- .enabled = 1,
- },
},
},
[PCF50633_REGULATOR_LDO6] = {
@@ -373,9 +369,6 @@ struct pcf50633_platform_data gta02_pcf_pdata = {
.min_uV = 1800000,
.max_uV = 1800000,
.valid_modes_mask = REGULATOR_MODE_NORMAL,
- .state_mem = {
- .enabled = 1,
- },
},
},
@@ -489,6 +482,43 @@ static struct s3c2410_hcd_info gta02_usb_info __initdata = {
},
};
+/* Touchscreen */
+static struct s3c2410_ts_mach_info gta02_ts_info = {
+ .delay = 10000,
+ .presc = 0xff, /* slow as we can go */
+ .oversampling_shift = 2,
+};
+
+/* Buttons */
+static struct gpio_keys_button gta02_buttons[] = {
+ {
+ .gpio = GTA02_GPIO_AUX_KEY,
+ .code = KEY_PHONE,
+ .desc = "Aux",
+ .type = EV_KEY,
+ .debounce_interval = 100,
+ },
+ {
+ .gpio = GTA02_GPIO_HOLD_KEY,
+ .code = KEY_PAUSE,
+ .desc = "Hold",
+ .type = EV_KEY,
+ .debounce_interval = 100,
+ },
+};
+
+static struct gpio_keys_platform_data gta02_buttons_pdata = {
+ .buttons = gta02_buttons,
+ .nbuttons = ARRAY_SIZE(gta02_buttons),
+};
+
+static struct platform_device gta02_buttons_device = {
+ .name = "gpio-keys",
+ .id = -1,
+ .dev = {
+ .platform_data = &gta02_buttons_pdata,
+ },
+};
static void __init gta02_map_io(void)
{
@@ -509,7 +539,11 @@ static struct platform_device *gta02_devices[] __initdata = {
&gta02_nor_flash,
&s3c24xx_pwm_device,
&s3c_device_iis,
+ &samsung_asoc_dma,
&s3c_device_i2c0,
+ &gta02_buttons_device,
+ &s3c_device_adc,
+ &s3c_device_ts,
};
/* These guys DO need to be children of PMU. */
@@ -559,6 +593,7 @@ static void __init gta02_machine_init(void)
#endif
s3c24xx_udc_set_platdata(&gta02_udc_cfg);
+ s3c24xx_ts_set_platdata(&gta02_ts_info);
s3c_ohci_set_platdata(&gta02_usb_info);
s3c_nand_set_platdata(&gta02_nand_info);
s3c_i2c0_set_platdata(NULL);
@@ -567,6 +602,8 @@ static void __init gta02_machine_init(void)
platform_add_devices(gta02_devices, ARRAY_SIZE(gta02_devices));
pm_power_off = gta02_poweroff;
+
+ regulator_has_full_constraints();
}
diff --git a/arch/arm/mach-s3c2440/mach-rx1950.c b/arch/arm/mach-s3c2440/mach-rx1950.c
index eab6ae50683c..2d2f4e4d33b5 100644
--- a/arch/arm/mach-s3c2440/mach-rx1950.c
+++ b/arch/arm/mach-s3c2440/mach-rx1950.c
@@ -263,27 +263,77 @@ void rx1950_disable_charger(void)
gpio_direction_output(S3C2410_GPJ(3), 0);
}
+DEFINE_SPINLOCK(rx1950_blink_spin);
+
+static int rx1950_led_blink_set(unsigned gpio, int state,
+ unsigned long *delay_on, unsigned long *delay_off)
+{
+ int blink_gpio, check_gpio;
+
+ switch (gpio) {
+ case S3C2410_GPA(6):
+ blink_gpio = S3C2410_GPA(4);
+ check_gpio = S3C2410_GPA(3);
+ break;
+ case S3C2410_GPA(7):
+ blink_gpio = S3C2410_GPA(3);
+ check_gpio = S3C2410_GPA(4);
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+
+ if (delay_on && delay_off && !*delay_on && !*delay_off)
+ *delay_on = *delay_off = 500;
+
+ spin_lock(&rx1950_blink_spin);
+
+ switch (state) {
+ case GPIO_LED_NO_BLINK_LOW:
+ case GPIO_LED_NO_BLINK_HIGH:
+ if (!gpio_get_value(check_gpio))
+ gpio_set_value(S3C2410_GPJ(6), 0);
+ gpio_set_value(blink_gpio, 0);
+ gpio_set_value(gpio, state);
+ break;
+ case GPIO_LED_BLINK:
+ gpio_set_value(gpio, 0);
+ gpio_set_value(S3C2410_GPJ(6), 1);
+ gpio_set_value(blink_gpio, 1);
+ break;
+ }
+
+ spin_unlock(&rx1950_blink_spin);
+
+ return 0;
+}
+
static struct gpio_led rx1950_leds_desc[] = {
{
.name = "Green",
.default_trigger = "main-battery-charging-or-full",
.gpio = S3C2410_GPA(6),
+ .retain_state_suspended = 1,
},
{
.name = "Red",
.default_trigger = "main-battery-full",
.gpio = S3C2410_GPA(7),
+ .retain_state_suspended = 1,
},
{
.name = "Blue",
.default_trigger = "rx1950-acx-mem",
.gpio = S3C2410_GPA(11),
+ .retain_state_suspended = 1,
},
};
static struct gpio_led_platform_data rx1950_leds_pdata = {
.num_leds = ARRAY_SIZE(rx1950_leds_desc),
.leds = rx1950_leds_desc,
+ .gpio_blink_set = rx1950_led_blink_set,
};
static struct platform_device rx1950_leds = {
@@ -771,6 +821,13 @@ static void __init rx1950_init_machine(void)
WARN_ON(gpio_request(S3C2410_GPB(1), "LCD power"));
+ WARN_ON(gpio_request(S3C2410_GPA(3), "Red blink"));
+ WARN_ON(gpio_request(S3C2410_GPA(4), "Green blink"));
+ WARN_ON(gpio_request(S3C2410_GPJ(6), "LED blink"));
+ gpio_direction_output(S3C2410_GPA(3), 0);
+ gpio_direction_output(S3C2410_GPA(4), 0);
+ gpio_direction_output(S3C2410_GPJ(6), 0);
+
platform_add_devices(rx1950_devices, ARRAY_SIZE(rx1950_devices));
i2c_register_board_info(0, rx1950_i2c_devices,
diff --git a/arch/arm/mach-s3c24a0/include/mach/memory.h b/arch/arm/mach-s3c24a0/include/mach/memory.h
index 7d74fd5c8d66..7d208a71b172 100644
--- a/arch/arm/mach-s3c24a0/include/mach/memory.h
+++ b/arch/arm/mach-s3c24a0/include/mach/memory.h
@@ -11,7 +11,7 @@
#ifndef __ASM_ARCH_24A0_MEMORY_H
#define __ASM_ARCH_24A0_MEMORY_H __FILE__
-#define PHYS_OFFSET UL(0x10000000)
+#define PLAT_PHYS_OFFSET UL(0x10000000)
#define __virt_to_bus(x) __virt_to_phys(x)
#define __bus_to_virt(x) __phys_to_virt(x)
diff --git a/arch/arm/mach-s3c64xx/include/mach/memory.h b/arch/arm/mach-s3c64xx/include/mach/memory.h
index 42cc54e2ee30..4760cdae1eb6 100644
--- a/arch/arm/mach-s3c64xx/include/mach/memory.h
+++ b/arch/arm/mach-s3c64xx/include/mach/memory.h
@@ -13,7 +13,7 @@
#ifndef __ASM_ARCH_MEMORY_H
#define __ASM_ARCH_MEMORY_H
-#define PHYS_OFFSET UL(0x50000000)
+#define PLAT_PHYS_OFFSET UL(0x50000000)
#define CONSISTENT_DMA_SIZE SZ_8M
diff --git a/arch/arm/mach-s3c64xx/sleep.S b/arch/arm/mach-s3c64xx/sleep.S
index b2ef44317368..afe5a762f46e 100644
--- a/arch/arm/mach-s3c64xx/sleep.S
+++ b/arch/arm/mach-s3c64xx/sleep.S
@@ -32,25 +32,13 @@
* code after resume.
*
* entry:
- * r0 = pointer to the save block
+ * r1 = v:p offset
*/
ENTRY(s3c_cpu_save)
stmfd sp!, { r4 - r12, lr }
-
- mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
- mrc p15, 0, r5, c3, c0, 0 @ Domain ID
- mrc p15, 0, r6, c2, c0, 0 @ Translation Table BASE0
- mrc p15, 0, r7, c2, c0, 1 @ Translation Table BASE1
- mrc p15, 0, r8, c2, c0, 2 @ Translation Table Control
- mrc p15, 0, r9, c1, c0, 0 @ Control register
- mrc p15, 0, r10, c1, c0, 1 @ Auxiliary control register
- mrc p15, 0, r11, c1, c0, 2 @ Co-processor access controls
-
- stmia r0, { r4 - r13 } @ Save CP registers and SP
-
- @@ save our state to ram
- bl s3c_pm_cb_flushcache
+ ldr r3, =resume_with_mmu
+ bl cpu_suspend
@@ call final suspend code
ldr r0, =pm_cpu_sleep
@@ -61,18 +49,6 @@ ENTRY(s3c_cpu_save)
resume_with_mmu:
ldmfd sp!, { r4 - r12, pc } @ return, from sp from s3c_cpu_save
- .data
-
- /* the next bit is code, but it requires easy access to the
- * s3c_sleep_save_phys data before the MMU is switched on, so
- * we store the code that needs this variable in the .data where
- * the value can be written to (the .text segment is RO).
- */
-
- .global s3c_sleep_save_phys
-s3c_sleep_save_phys:
- .word 0
-
/* Sleep magic, the word before the resume entry point so that the
* bootloader can check for a resumeable image. */
@@ -110,35 +86,4 @@ ENTRY(s3c_cpu_resume)
orr r0, r0, #1 << 15 @ GPN15
str r0, [ r3, #S3C64XX_GPNDAT ]
#endif
-
- /* __v6_setup from arch/arm/mm/proc-v6.S, ensure that the caches
- * are thoroughly cleaned just in case the bootloader didn't do it
- * for us. */
- mov r0, #0
- mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache
- mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
- mcr p15, 0, r0, c7, c15, 0 @ clean+invalidate cache
- mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
- @@mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs
- @@mcr p15, 0, r0, c7, c7, 0 @ Invalidate I + D caches
-
- ldr r0, s3c_sleep_save_phys
- ldmia r0, { r4 - r13 }
-
- mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID
- mcr p15, 0, r5, c3, c0, 0 @ Domain ID
- mcr p15, 0, r6, c2, c0, 0 @ Translation Table BASE0
- mcr p15, 0, r7, c2, c0, 1 @ Translation Table BASE1
- mcr p15, 0, r8, c2, c0, 2 @ Translation Table Control
- mcr p15, 0, r10, c1, c0, 1 @ Auxiliary control register
-
- mov r0, #0 @ restore copro access controls
- mcr p15, 0, r11, c1, c0, 2 @ Co-processor access controls
- mcr p15, 0, r0, c7, c5, 4
-
- ldr r2, =resume_with_mmu
- mcr p15, 0, r9, c1, c0, 0 /* turn mmu back on */
- nop
- mov pc, r2 /* jump back */
-
- .end
+ b cpu_resume
diff --git a/arch/arm/mach-s5p6442/include/mach/map.h b/arch/arm/mach-s5p6442/include/mach/map.h
index 203dd5a18bd5..058dab4482a1 100644
--- a/arch/arm/mach-s5p6442/include/mach/map.h
+++ b/arch/arm/mach-s5p6442/include/mach/map.h
@@ -1,6 +1,6 @@
/* linux/arch/arm/mach-s5p6442/include/mach/map.h
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* S5P6442 - Memory map definitions
@@ -16,56 +16,61 @@
#include <plat/map-base.h>
#include <plat/map-s5p.h>
-#define S5P6442_PA_CHIPID (0xE0000000)
-#define S5P_PA_CHIPID S5P6442_PA_CHIPID
+#define S5P6442_PA_SDRAM 0x20000000
-#define S5P6442_PA_SYSCON (0xE0100000)
-#define S5P_PA_SYSCON S5P6442_PA_SYSCON
+#define S5P6442_PA_I2S0 0xC0B00000
+#define S5P6442_PA_I2S1 0xF2200000
-#define S5P6442_PA_GPIO (0xE0200000)
+#define S5P6442_PA_CHIPID 0xE0000000
-#define S5P6442_PA_VIC0 (0xE4000000)
-#define S5P6442_PA_VIC1 (0xE4100000)
-#define S5P6442_PA_VIC2 (0xE4200000)
+#define S5P6442_PA_SYSCON 0xE0100000
-#define S5P6442_PA_SROMC (0xE7000000)
-#define S5P_PA_SROMC S5P6442_PA_SROMC
+#define S5P6442_PA_GPIO 0xE0200000
-#define S5P6442_PA_MDMA 0xE8000000
-#define S5P6442_PA_PDMA 0xE9000000
+#define S5P6442_PA_VIC0 0xE4000000
+#define S5P6442_PA_VIC1 0xE4100000
+#define S5P6442_PA_VIC2 0xE4200000
-#define S5P6442_PA_TIMER (0xEA000000)
-#define S5P_PA_TIMER S5P6442_PA_TIMER
+#define S5P6442_PA_SROMC 0xE7000000
-#define S5P6442_PA_SYSTIMER (0xEA100000)
+#define S5P6442_PA_MDMA 0xE8000000
+#define S5P6442_PA_PDMA 0xE9000000
-#define S5P6442_PA_WATCHDOG (0xEA200000)
+#define S5P6442_PA_TIMER 0xEA000000
-#define S5P6442_PA_UART (0xEC000000)
+#define S5P6442_PA_SYSTIMER 0xEA100000
-#define S5P_PA_UART0 (S5P6442_PA_UART + 0x0)
-#define S5P_PA_UART1 (S5P6442_PA_UART + 0x400)
-#define S5P_PA_UART2 (S5P6442_PA_UART + 0x800)
-#define S5P_SZ_UART SZ_256
+#define S5P6442_PA_WATCHDOG 0xEA200000
-#define S5P6442_PA_IIC0 (0xEC100000)
+#define S5P6442_PA_UART 0xEC000000
-#define S5P6442_PA_SDRAM (0x20000000)
-#define S5P_PA_SDRAM S5P6442_PA_SDRAM
+#define S5P6442_PA_IIC0 0xEC100000
#define S5P6442_PA_SPI 0xEC300000
-/* I2S */
-#define S5P6442_PA_I2S0 0xC0B00000
-#define S5P6442_PA_I2S1 0xF2200000
-
-/* PCM */
#define S5P6442_PA_PCM0 0xF2400000
#define S5P6442_PA_PCM1 0xF2500000
-/* compatibiltiy defines. */
+/* Compatibiltiy Defines */
+
+#define S3C_PA_IIC S5P6442_PA_IIC0
#define S3C_PA_WDT S5P6442_PA_WATCHDOG
+
+#define S5P_PA_CHIPID S5P6442_PA_CHIPID
+#define S5P_PA_SDRAM S5P6442_PA_SDRAM
+#define S5P_PA_SROMC S5P6442_PA_SROMC
+#define S5P_PA_SYSCON S5P6442_PA_SYSCON
+#define S5P_PA_TIMER S5P6442_PA_TIMER
+
+/* UART */
+
#define S3C_PA_UART S5P6442_PA_UART
-#define S3C_PA_IIC S5P6442_PA_IIC0
+
+#define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET))
+#define S5P_PA_UART0 S5P_PA_UART(0)
+#define S5P_PA_UART1 S5P_PA_UART(1)
+#define S5P_PA_UART2 S5P_PA_UART(2)
+
+#define S5P_SZ_UART SZ_256
#endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/memory.h b/arch/arm/mach-s5p6442/include/mach/memory.h
index 9ddd877ba2ea..cfe259dded33 100644
--- a/arch/arm/mach-s5p6442/include/mach/memory.h
+++ b/arch/arm/mach-s5p6442/include/mach/memory.h
@@ -13,7 +13,7 @@
#ifndef __ASM_ARCH_MEMORY_H
#define __ASM_ARCH_MEMORY_H
-#define PHYS_OFFSET UL(0x20000000)
+#define PLAT_PHYS_OFFSET UL(0x20000000)
#define CONSISTENT_DMA_SIZE SZ_8M
#endif /* __ASM_ARCH_MEMORY_H */
diff --git a/arch/arm/mach-s5p64x0/include/mach/gpio.h b/arch/arm/mach-s5p64x0/include/mach/gpio.h
index 5486c8f01f1d..adb5f298ead8 100644
--- a/arch/arm/mach-s5p64x0/include/mach/gpio.h
+++ b/arch/arm/mach-s5p64x0/include/mach/gpio.h
@@ -23,7 +23,7 @@
#define S5P6440_GPIO_A_NR (6)
#define S5P6440_GPIO_B_NR (7)
#define S5P6440_GPIO_C_NR (8)
-#define S5P6440_GPIO_F_NR (2)
+#define S5P6440_GPIO_F_NR (16)
#define S5P6440_GPIO_G_NR (7)
#define S5P6440_GPIO_H_NR (10)
#define S5P6440_GPIO_I_NR (16)
@@ -36,7 +36,7 @@
#define S5P6450_GPIO_B_NR (7)
#define S5P6450_GPIO_C_NR (8)
#define S5P6450_GPIO_D_NR (8)
-#define S5P6450_GPIO_F_NR (2)
+#define S5P6450_GPIO_F_NR (16)
#define S5P6450_GPIO_G_NR (14)
#define S5P6450_GPIO_H_NR (10)
#define S5P6450_GPIO_I_NR (16)
diff --git a/arch/arm/mach-s5p64x0/include/mach/map.h b/arch/arm/mach-s5p64x0/include/mach/map.h
index a9365e5ba614..95c91257c7ca 100644
--- a/arch/arm/mach-s5p64x0/include/mach/map.h
+++ b/arch/arm/mach-s5p64x0/include/mach/map.h
@@ -1,6 +1,6 @@
/* linux/arch/arm/mach-s5p64x0/include/mach/map.h
*
- * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2009-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* S5P64X0 - Memory map definitions
@@ -16,64 +16,46 @@
#include <plat/map-base.h>
#include <plat/map-s5p.h>
-#define S5P64X0_PA_SDRAM (0x20000000)
+#define S5P64X0_PA_SDRAM 0x20000000
-#define S5P64X0_PA_CHIPID (0xE0000000)
-#define S5P_PA_CHIPID S5P64X0_PA_CHIPID
-
-#define S5P64X0_PA_SYSCON (0xE0100000)
-#define S5P_PA_SYSCON S5P64X0_PA_SYSCON
-
-#define S5P64X0_PA_GPIO (0xE0308000)
-
-#define S5P64X0_PA_VIC0 (0xE4000000)
-#define S5P64X0_PA_VIC1 (0xE4100000)
+#define S5P64X0_PA_CHIPID 0xE0000000
-#define S5P64X0_PA_SROMC (0xE7000000)
-#define S5P_PA_SROMC S5P64X0_PA_SROMC
-
-#define S5P64X0_PA_PDMA (0xE9000000)
-
-#define S5P64X0_PA_TIMER (0xEA000000)
-#define S5P_PA_TIMER S5P64X0_PA_TIMER
+#define S5P64X0_PA_SYSCON 0xE0100000
-#define S5P64X0_PA_RTC (0xEA100000)
+#define S5P64X0_PA_GPIO 0xE0308000
-#define S5P64X0_PA_WDT (0xEA200000)
+#define S5P64X0_PA_VIC0 0xE4000000
+#define S5P64X0_PA_VIC1 0xE4100000
-#define S5P6440_PA_UART(x) (0xEC000000 + ((x) * S3C_UART_OFFSET))
-#define S5P6450_PA_UART(x) ((x < 5) ? (0xEC800000 + ((x) * S3C_UART_OFFSET)) : (0xEC000000))
+#define S5P64X0_PA_SROMC 0xE7000000
-#define S5P_PA_UART0 S5P6450_PA_UART(0)
-#define S5P_PA_UART1 S5P6450_PA_UART(1)
-#define S5P_PA_UART2 S5P6450_PA_UART(2)
-#define S5P_PA_UART3 S5P6450_PA_UART(3)
-#define S5P_PA_UART4 S5P6450_PA_UART(4)
-#define S5P_PA_UART5 S5P6450_PA_UART(5)
+#define S5P64X0_PA_PDMA 0xE9000000
-#define S5P_SZ_UART SZ_256
+#define S5P64X0_PA_TIMER 0xEA000000
+#define S5P64X0_PA_RTC 0xEA100000
+#define S5P64X0_PA_WDT 0xEA200000
-#define S5P6440_PA_IIC0 (0xEC104000)
-#define S5P6440_PA_IIC1 (0xEC20F000)
-#define S5P6450_PA_IIC0 (0xEC100000)
-#define S5P6450_PA_IIC1 (0xEC200000)
+#define S5P6440_PA_IIC0 0xEC104000
+#define S5P6440_PA_IIC1 0xEC20F000
+#define S5P6450_PA_IIC0 0xEC100000
+#define S5P6450_PA_IIC1 0xEC200000
-#define S5P64X0_PA_SPI0 (0xEC400000)
-#define S5P64X0_PA_SPI1 (0xEC500000)
+#define S5P64X0_PA_SPI0 0xEC400000
+#define S5P64X0_PA_SPI1 0xEC500000
-#define S5P64X0_PA_HSOTG (0xED100000)
+#define S5P64X0_PA_HSOTG 0xED100000
#define S5P64X0_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000))
-#define S5P64X0_PA_I2S (0xF2000000)
+#define S5P64X0_PA_I2S 0xF2000000
#define S5P6450_PA_I2S1 0xF2800000
#define S5P6450_PA_I2S2 0xF2900000
-#define S5P64X0_PA_PCM (0xF2100000)
+#define S5P64X0_PA_PCM 0xF2100000
-#define S5P64X0_PA_ADC (0xF3000000)
+#define S5P64X0_PA_ADC 0xF3000000
-/* compatibiltiy defines. */
+/* Compatibiltiy Defines */
#define S3C_PA_HSMMC0 S5P64X0_PA_HSMMC(0)
#define S3C_PA_HSMMC1 S5P64X0_PA_HSMMC(1)
@@ -83,6 +65,25 @@
#define S3C_PA_RTC S5P64X0_PA_RTC
#define S3C_PA_WDT S5P64X0_PA_WDT
+#define S5P_PA_CHIPID S5P64X0_PA_CHIPID
+#define S5P_PA_SROMC S5P64X0_PA_SROMC
+#define S5P_PA_SYSCON S5P64X0_PA_SYSCON
+#define S5P_PA_TIMER S5P64X0_PA_TIMER
+
#define SAMSUNG_PA_ADC S5P64X0_PA_ADC
+/* UART */
+
+#define S5P6440_PA_UART(x) (0xEC000000 + ((x) * S3C_UART_OFFSET))
+#define S5P6450_PA_UART(x) ((x < 5) ? (0xEC800000 + ((x) * S3C_UART_OFFSET)) : (0xEC000000))
+
+#define S5P_PA_UART0 S5P6450_PA_UART(0)
+#define S5P_PA_UART1 S5P6450_PA_UART(1)
+#define S5P_PA_UART2 S5P6450_PA_UART(2)
+#define S5P_PA_UART3 S5P6450_PA_UART(3)
+#define S5P_PA_UART4 S5P6450_PA_UART(4)
+#define S5P_PA_UART5 S5P6450_PA_UART(5)
+
+#define S5P_SZ_UART SZ_256
+
#endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/mach-s5p64x0/include/mach/memory.h b/arch/arm/mach-s5p64x0/include/mach/memory.h
index 1b036b0a24ce..365a6eb4b88f 100644
--- a/arch/arm/mach-s5p64x0/include/mach/memory.h
+++ b/arch/arm/mach-s5p64x0/include/mach/memory.h
@@ -13,7 +13,7 @@
#ifndef __ASM_ARCH_MEMORY_H
#define __ASM_ARCH_MEMORY_H __FILE__
-#define PHYS_OFFSET UL(0x20000000)
+#define PLAT_PHYS_OFFSET UL(0x20000000)
#define CONSISTENT_DMA_SIZE SZ_8M
#endif /* __ASM_ARCH_MEMORY_H */
diff --git a/arch/arm/mach-s5pc100/include/mach/map.h b/arch/arm/mach-s5pc100/include/mach/map.h
index 328467b346aa..ccbe6b767f7d 100644
--- a/arch/arm/mach-s5pc100/include/mach/map.h
+++ b/arch/arm/mach-s5pc100/include/mach/map.h
@@ -1,5 +1,8 @@
/* linux/arch/arm/mach-s5pc100/include/mach/map.h
*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
* Copyright 2009 Samsung Electronics Co.
* Byungho Min <bhmin@samsung.com>
*
@@ -16,145 +19,115 @@
#include <plat/map-base.h>
#include <plat/map-s5p.h>
-/*
- * map-base.h has already defined virtual memory address
- * S3C_VA_IRQ S3C_ADDR(0x00000000) irq controller(s)
- * S3C_VA_SYS S3C_ADDR(0x00100000) system control
- * S3C_VA_MEM S3C_ADDR(0x00200000) system control (not used)
- * S3C_VA_TIMER S3C_ADDR(0x00300000) timer block
- * S3C_VA_WATCHDOG S3C_ADDR(0x00400000) watchdog
- * S3C_VA_UART S3C_ADDR(0x01000000) UART
- *
- * S5PC100 specific virtual memory address can be defined here
- * S5PC1XX_VA_GPIO S3C_ADDR(0x00500000) GPIO
- *
- */
+#define S5PC100_PA_SDRAM 0x20000000
+
+#define S5PC100_PA_ONENAND 0xE7100000
+#define S5PC100_PA_ONENAND_BUF 0xB0000000
+
+#define S5PC100_PA_CHIPID 0xE0000000
-#define S5PC100_PA_ONENAND_BUF (0xB0000000)
-#define S5PC100_SZ_ONENAND_BUF (SZ_256M - SZ_32M)
+#define S5PC100_PA_SYSCON 0xE0100000
-/* Chip ID */
+#define S5PC100_PA_OTHERS 0xE0200000
-#define S5PC100_PA_CHIPID (0xE0000000)
-#define S5P_PA_CHIPID S5PC100_PA_CHIPID
+#define S5PC100_PA_GPIO 0xE0300000
-#define S5PC100_PA_SYSCON (0xE0100000)
-#define S5P_PA_SYSCON S5PC100_PA_SYSCON
+#define S5PC100_PA_VIC0 0xE4000000
+#define S5PC100_PA_VIC1 0xE4100000
+#define S5PC100_PA_VIC2 0xE4200000
-#define S5PC100_PA_OTHERS (0xE0200000)
-#define S5PC100_VA_OTHERS (S3C_VA_SYS + 0x10000)
+#define S5PC100_PA_SROMC 0xE7000000
-#define S5PC100_PA_GPIO (0xE0300000)
-#define S5PC1XX_VA_GPIO S3C_ADDR(0x00500000)
+#define S5PC100_PA_CFCON 0xE7800000
-/* Interrupt */
-#define S5PC100_PA_VIC0 (0xE4000000)
-#define S5PC100_PA_VIC1 (0xE4100000)
-#define S5PC100_PA_VIC2 (0xE4200000)
-#define S5PC100_VA_VIC S3C_VA_IRQ
-#define S5PC100_VA_VIC_OFFSET 0x10000
-#define S5PC1XX_VA_VIC(x) (S5PC100_VA_VIC + ((x) * S5PC100_VA_VIC_OFFSET))
+#define S5PC100_PA_MDMA 0xE8100000
+#define S5PC100_PA_PDMA0 0xE9000000
+#define S5PC100_PA_PDMA1 0xE9200000
-#define S5PC100_PA_SROMC (0xE7000000)
-#define S5P_PA_SROMC S5PC100_PA_SROMC
+#define S5PC100_PA_TIMER 0xEA000000
+#define S5PC100_PA_SYSTIMER 0xEA100000
+#define S5PC100_PA_WATCHDOG 0xEA200000
+#define S5PC100_PA_RTC 0xEA300000
-#define S5PC100_PA_ONENAND (0xE7100000)
+#define S5PC100_PA_UART 0xEC000000
-#define S5PC100_PA_CFCON (0xE7800000)
+#define S5PC100_PA_IIC0 0xEC100000
+#define S5PC100_PA_IIC1 0xEC200000
-/* DMA */
-#define S5PC100_PA_MDMA (0xE8100000)
-#define S5PC100_PA_PDMA0 (0xE9000000)
-#define S5PC100_PA_PDMA1 (0xE9200000)
+#define S5PC100_PA_SPI0 0xEC300000
+#define S5PC100_PA_SPI1 0xEC400000
+#define S5PC100_PA_SPI2 0xEC500000
-/* Timer */
-#define S5PC100_PA_TIMER (0xEA000000)
-#define S5P_PA_TIMER S5PC100_PA_TIMER
+#define S5PC100_PA_USB_HSOTG 0xED200000
+#define S5PC100_PA_USB_HSPHY 0xED300000
-#define S5PC100_PA_SYSTIMER (0xEA100000)
+#define S5PC100_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000))
-#define S5PC100_PA_WATCHDOG (0xEA200000)
-#define S5PC100_PA_RTC (0xEA300000)
+#define S5PC100_PA_FB 0xEE000000
-#define S5PC100_PA_UART (0xEC000000)
+#define S5PC100_PA_FIMC0 0xEE200000
+#define S5PC100_PA_FIMC1 0xEE300000
+#define S5PC100_PA_FIMC2 0xEE400000
-#define S5P_PA_UART0 (S5PC100_PA_UART + 0x0)
-#define S5P_PA_UART1 (S5PC100_PA_UART + 0x400)
-#define S5P_PA_UART2 (S5PC100_PA_UART + 0x800)
-#define S5P_PA_UART3 (S5PC100_PA_UART + 0xC00)
-#define S5P_SZ_UART SZ_256
+#define S5PC100_PA_I2S0 0xF2000000
+#define S5PC100_PA_I2S1 0xF2100000
+#define S5PC100_PA_I2S2 0xF2200000
-#define S5PC100_PA_IIC0 (0xEC100000)
-#define S5PC100_PA_IIC1 (0xEC200000)
+#define S5PC100_PA_AC97 0xF2300000
-/* SPI */
-#define S5PC100_PA_SPI0 0xEC300000
-#define S5PC100_PA_SPI1 0xEC400000
-#define S5PC100_PA_SPI2 0xEC500000
+#define S5PC100_PA_PCM0 0xF2400000
+#define S5PC100_PA_PCM1 0xF2500000
-/* USB HS OTG */
-#define S5PC100_PA_USB_HSOTG (0xED200000)
-#define S5PC100_PA_USB_HSPHY (0xED300000)
+#define S5PC100_PA_SPDIF 0xF2600000
-#define S5PC100_PA_FB (0xEE000000)
+#define S5PC100_PA_TSADC 0xF3000000
-#define S5PC100_PA_FIMC0 (0xEE200000)
-#define S5PC100_PA_FIMC1 (0xEE300000)
-#define S5PC100_PA_FIMC2 (0xEE400000)
+#define S5PC100_PA_KEYPAD 0xF3100000
-#define S5PC100_PA_I2S0 (0xF2000000)
-#define S5PC100_PA_I2S1 (0xF2100000)
-#define S5PC100_PA_I2S2 (0xF2200000)
+/* Compatibiltiy Defines */
-#define S5PC100_PA_AC97 0xF2300000
+#define S3C_PA_FB S5PC100_PA_FB
+#define S3C_PA_HSMMC0 S5PC100_PA_HSMMC(0)
+#define S3C_PA_HSMMC1 S5PC100_PA_HSMMC(1)
+#define S3C_PA_HSMMC2 S5PC100_PA_HSMMC(2)
+#define S3C_PA_IIC S5PC100_PA_IIC0
+#define S3C_PA_IIC1 S5PC100_PA_IIC1
+#define S3C_PA_KEYPAD S5PC100_PA_KEYPAD
+#define S3C_PA_ONENAND S5PC100_PA_ONENAND
+#define S3C_PA_ONENAND_BUF S5PC100_PA_ONENAND_BUF
+#define S3C_PA_RTC S5PC100_PA_RTC
+#define S3C_PA_TSADC S5PC100_PA_TSADC
+#define S3C_PA_USB_HSOTG S5PC100_PA_USB_HSOTG
+#define S3C_PA_USB_HSPHY S5PC100_PA_USB_HSPHY
+#define S3C_PA_WDT S5PC100_PA_WATCHDOG
-/* PCM */
-#define S5PC100_PA_PCM0 0xF2400000
-#define S5PC100_PA_PCM1 0xF2500000
+#define S5P_PA_CHIPID S5PC100_PA_CHIPID
+#define S5P_PA_FIMC0 S5PC100_PA_FIMC0
+#define S5P_PA_FIMC1 S5PC100_PA_FIMC1
+#define S5P_PA_FIMC2 S5PC100_PA_FIMC2
+#define S5P_PA_SDRAM S5PC100_PA_SDRAM
+#define S5P_PA_SROMC S5PC100_PA_SROMC
+#define S5P_PA_SYSCON S5PC100_PA_SYSCON
+#define S5P_PA_TIMER S5PC100_PA_TIMER
-#define S5PC100_PA_SPDIF 0xF2600000
+#define SAMSUNG_PA_ADC S5PC100_PA_TSADC
+#define SAMSUNG_PA_CFCON S5PC100_PA_CFCON
+#define SAMSUNG_PA_KEYPAD S5PC100_PA_KEYPAD
-#define S5PC100_PA_TSADC (0xF3000000)
+#define S5PC100_VA_OTHERS (S3C_VA_SYS + 0x10000)
-/* KEYPAD */
-#define S5PC100_PA_KEYPAD (0xF3100000)
+#define S3C_SZ_ONENAND_BUF (SZ_256M - SZ_32M)
-#define S5PC100_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000))
+/* UART */
-#define S5PC100_PA_SDRAM (0x20000000)
-#define S5P_PA_SDRAM S5PC100_PA_SDRAM
+#define S3C_PA_UART S5PC100_PA_UART
-/* compatibiltiy defines. */
-#define S3C_PA_UART S5PC100_PA_UART
-#define S3C_PA_IIC S5PC100_PA_IIC0
-#define S3C_PA_IIC1 S5PC100_PA_IIC1
-#define S3C_PA_FB S5PC100_PA_FB
-#define S3C_PA_G2D S5PC100_PA_G2D
-#define S3C_PA_G3D S5PC100_PA_G3D
-#define S3C_PA_JPEG S5PC100_PA_JPEG
-#define S3C_PA_ROTATOR S5PC100_PA_ROTATOR
-#define S5P_VA_VIC0 S5PC1XX_VA_VIC(0)
-#define S5P_VA_VIC1 S5PC1XX_VA_VIC(1)
-#define S5P_VA_VIC2 S5PC1XX_VA_VIC(2)
-#define S3C_PA_USB_HSOTG S5PC100_PA_USB_HSOTG
-#define S3C_PA_USB_HSPHY S5PC100_PA_USB_HSPHY
-#define S3C_PA_HSMMC0 S5PC100_PA_HSMMC(0)
-#define S3C_PA_HSMMC1 S5PC100_PA_HSMMC(1)
-#define S3C_PA_HSMMC2 S5PC100_PA_HSMMC(2)
-#define S3C_PA_KEYPAD S5PC100_PA_KEYPAD
-#define S3C_PA_WDT S5PC100_PA_WATCHDOG
-#define S3C_PA_TSADC S5PC100_PA_TSADC
-#define S3C_PA_ONENAND S5PC100_PA_ONENAND
-#define S3C_PA_ONENAND_BUF S5PC100_PA_ONENAND_BUF
-#define S3C_SZ_ONENAND_BUF S5PC100_SZ_ONENAND_BUF
-#define S3C_PA_RTC S5PC100_PA_RTC
-
-#define SAMSUNG_PA_ADC S5PC100_PA_TSADC
-#define SAMSUNG_PA_CFCON S5PC100_PA_CFCON
-#define SAMSUNG_PA_KEYPAD S5PC100_PA_KEYPAD
+#define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET))
+#define S5P_PA_UART0 S5P_PA_UART(0)
+#define S5P_PA_UART1 S5P_PA_UART(1)
+#define S5P_PA_UART2 S5P_PA_UART(2)
+#define S5P_PA_UART3 S5P_PA_UART(3)
-#define S5P_PA_FIMC0 S5PC100_PA_FIMC0
-#define S5P_PA_FIMC1 S5PC100_PA_FIMC1
-#define S5P_PA_FIMC2 S5PC100_PA_FIMC2
+#define S5P_SZ_UART SZ_256
-#endif /* __ASM_ARCH_C100_MAP_H */
+#endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/mach-s5pc100/include/mach/memory.h b/arch/arm/mach-s5pc100/include/mach/memory.h
index 4b60d18179f7..bda4e79fd5fc 100644
--- a/arch/arm/mach-s5pc100/include/mach/memory.h
+++ b/arch/arm/mach-s5pc100/include/mach/memory.h
@@ -13,6 +13,6 @@
#ifndef __ASM_ARCH_MEMORY_H
#define __ASM_ARCH_MEMORY_H
-#define PHYS_OFFSET UL(0x20000000)
+#define PLAT_PHYS_OFFSET UL(0x20000000)
#endif
diff --git a/arch/arm/mach-s5pv210/include/mach/map.h b/arch/arm/mach-s5pv210/include/mach/map.h
index 3611492ad681..1dd58836fd4f 100644
--- a/arch/arm/mach-s5pv210/include/mach/map.h
+++ b/arch/arm/mach-s5pv210/include/mach/map.h
@@ -1,6 +1,6 @@
/* linux/arch/arm/mach-s5pv210/include/mach/map.h
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* S5PV210 - Memory map definitions
@@ -16,122 +16,120 @@
#include <plat/map-base.h>
#include <plat/map-s5p.h>
-#define S5PV210_PA_SROM_BANK5 (0xA8000000)
+#define S5PV210_PA_SDRAM 0x20000000
-#define S5PC110_PA_ONENAND (0xB0000000)
-#define S5P_PA_ONENAND S5PC110_PA_ONENAND
+#define S5PV210_PA_SROM_BANK5 0xA8000000
-#define S5PC110_PA_ONENAND_DMA (0xB0600000)
-#define S5P_PA_ONENAND_DMA S5PC110_PA_ONENAND_DMA
+#define S5PC110_PA_ONENAND 0xB0000000
+#define S5PC110_PA_ONENAND_DMA 0xB0600000
-#define S5PV210_PA_CHIPID (0xE0000000)
-#define S5P_PA_CHIPID S5PV210_PA_CHIPID
+#define S5PV210_PA_CHIPID 0xE0000000
-#define S5PV210_PA_SYSCON (0xE0100000)
-#define S5P_PA_SYSCON S5PV210_PA_SYSCON
+#define S5PV210_PA_SYSCON 0xE0100000
-#define S5PV210_PA_GPIO (0xE0200000)
+#define S5PV210_PA_GPIO 0xE0200000
-/* SPI */
-#define S5PV210_PA_SPI0 0xE1300000
-#define S5PV210_PA_SPI1 0xE1400000
+#define S5PV210_PA_SPDIF 0xE1100000
-#define S5PV210_PA_KEYPAD (0xE1600000)
+#define S5PV210_PA_SPI0 0xE1300000
+#define S5PV210_PA_SPI1 0xE1400000
-#define S5PV210_PA_IIC0 (0xE1800000)
-#define S5PV210_PA_IIC1 (0xFAB00000)
-#define S5PV210_PA_IIC2 (0xE1A00000)
+#define S5PV210_PA_KEYPAD 0xE1600000
-#define S5PV210_PA_TIMER (0xE2500000)
-#define S5P_PA_TIMER S5PV210_PA_TIMER
+#define S5PV210_PA_ADC 0xE1700000
-#define S5PV210_PA_SYSTIMER (0xE2600000)
+#define S5PV210_PA_IIC0 0xE1800000
+#define S5PV210_PA_IIC1 0xFAB00000
+#define S5PV210_PA_IIC2 0xE1A00000
-#define S5PV210_PA_WATCHDOG (0xE2700000)
+#define S5PV210_PA_AC97 0xE2200000
-#define S5PV210_PA_RTC (0xE2800000)
-#define S5PV210_PA_UART (0xE2900000)
+#define S5PV210_PA_PCM0 0xE2300000
+#define S5PV210_PA_PCM1 0xE1200000
+#define S5PV210_PA_PCM2 0xE2B00000
-#define S5P_PA_UART0 (S5PV210_PA_UART + 0x0)
-#define S5P_PA_UART1 (S5PV210_PA_UART + 0x400)
-#define S5P_PA_UART2 (S5PV210_PA_UART + 0x800)
-#define S5P_PA_UART3 (S5PV210_PA_UART + 0xC00)
+#define S5PV210_PA_TIMER 0xE2500000
+#define S5PV210_PA_SYSTIMER 0xE2600000
+#define S5PV210_PA_WATCHDOG 0xE2700000
+#define S5PV210_PA_RTC 0xE2800000
-#define S5P_SZ_UART SZ_256
+#define S5PV210_PA_UART 0xE2900000
-#define S3C_VA_UARTx(x) (S3C_VA_UART + ((x) * S3C_UART_OFFSET))
+#define S5PV210_PA_SROMC 0xE8000000
-#define S5PV210_PA_SROMC (0xE8000000)
-#define S5P_PA_SROMC S5PV210_PA_SROMC
+#define S5PV210_PA_CFCON 0xE8200000
-#define S5PV210_PA_CFCON (0xE8200000)
+#define S5PV210_PA_HSMMC(x) (0xEB000000 + ((x) * 0x100000))
-#define S5PV210_PA_MDMA 0xFA200000
-#define S5PV210_PA_PDMA0 0xE0900000
-#define S5PV210_PA_PDMA1 0xE0A00000
+#define S5PV210_PA_HSOTG 0xEC000000
+#define S5PV210_PA_HSPHY 0xEC100000
-#define S5PV210_PA_FB (0xF8000000)
+#define S5PV210_PA_IIS0 0xEEE30000
+#define S5PV210_PA_IIS1 0xE2100000
+#define S5PV210_PA_IIS2 0xE2A00000
-#define S5PV210_PA_FIMC0 (0xFB200000)
-#define S5PV210_PA_FIMC1 (0xFB300000)
-#define S5PV210_PA_FIMC2 (0xFB400000)
+#define S5PV210_PA_DMC0 0xF0000000
+#define S5PV210_PA_DMC1 0xF1400000
-#define S5PV210_PA_HSMMC(x) (0xEB000000 + ((x) * 0x100000))
+#define S5PV210_PA_VIC0 0xF2000000
+#define S5PV210_PA_VIC1 0xF2100000
+#define S5PV210_PA_VIC2 0xF2200000
+#define S5PV210_PA_VIC3 0xF2300000
-#define S5PV210_PA_HSOTG (0xEC000000)
-#define S5PV210_PA_HSPHY (0xEC100000)
+#define S5PV210_PA_FB 0xF8000000
-#define S5PV210_PA_VIC0 (0xF2000000)
-#define S5PV210_PA_VIC1 (0xF2100000)
-#define S5PV210_PA_VIC2 (0xF2200000)
-#define S5PV210_PA_VIC3 (0xF2300000)
+#define S5PV210_PA_MDMA 0xFA200000
+#define S5PV210_PA_PDMA0 0xE0900000
+#define S5PV210_PA_PDMA1 0xE0A00000
-#define S5PV210_PA_SDRAM (0x20000000)
-#define S5P_PA_SDRAM S5PV210_PA_SDRAM
+#define S5PV210_PA_MIPI_CSIS 0xFA600000
-/* S/PDIF */
-#define S5PV210_PA_SPDIF 0xE1100000
+#define S5PV210_PA_FIMC0 0xFB200000
+#define S5PV210_PA_FIMC1 0xFB300000
+#define S5PV210_PA_FIMC2 0xFB400000
-/* I2S */
-#define S5PV210_PA_IIS0 0xEEE30000
-#define S5PV210_PA_IIS1 0xE2100000
-#define S5PV210_PA_IIS2 0xE2A00000
+/* Compatibiltiy Defines */
-/* PCM */
-#define S5PV210_PA_PCM0 0xE2300000
-#define S5PV210_PA_PCM1 0xE1200000
-#define S5PV210_PA_PCM2 0xE2B00000
+#define S3C_PA_FB S5PV210_PA_FB
+#define S3C_PA_HSMMC0 S5PV210_PA_HSMMC(0)
+#define S3C_PA_HSMMC1 S5PV210_PA_HSMMC(1)
+#define S3C_PA_HSMMC2 S5PV210_PA_HSMMC(2)
+#define S3C_PA_HSMMC3 S5PV210_PA_HSMMC(3)
+#define S3C_PA_IIC S5PV210_PA_IIC0
+#define S3C_PA_IIC1 S5PV210_PA_IIC1
+#define S3C_PA_IIC2 S5PV210_PA_IIC2
+#define S3C_PA_RTC S5PV210_PA_RTC
+#define S3C_PA_USB_HSOTG S5PV210_PA_HSOTG
+#define S3C_PA_WDT S5PV210_PA_WATCHDOG
-/* AC97 */
-#define S5PV210_PA_AC97 0xE2200000
+#define S5P_PA_CHIPID S5PV210_PA_CHIPID
+#define S5P_PA_FIMC0 S5PV210_PA_FIMC0
+#define S5P_PA_FIMC1 S5PV210_PA_FIMC1
+#define S5P_PA_FIMC2 S5PV210_PA_FIMC2
+#define S5P_PA_MIPI_CSIS0 S5PV210_PA_MIPI_CSIS
+#define S5P_PA_ONENAND S5PC110_PA_ONENAND
+#define S5P_PA_ONENAND_DMA S5PC110_PA_ONENAND_DMA
+#define S5P_PA_SDRAM S5PV210_PA_SDRAM
+#define S5P_PA_SROMC S5PV210_PA_SROMC
+#define S5P_PA_SYSCON S5PV210_PA_SYSCON
+#define S5P_PA_TIMER S5PV210_PA_TIMER
-#define S5PV210_PA_ADC (0xE1700000)
+#define SAMSUNG_PA_ADC S5PV210_PA_ADC
+#define SAMSUNG_PA_CFCON S5PV210_PA_CFCON
+#define SAMSUNG_PA_KEYPAD S5PV210_PA_KEYPAD
-#define S5PV210_PA_DMC0 (0xF0000000)
-#define S5PV210_PA_DMC1 (0xF1400000)
+/* UART */
-#define S5PV210_PA_MIPI_CSIS 0xFA600000
+#define S3C_VA_UARTx(x) (S3C_VA_UART + ((x) * S3C_UART_OFFSET))
-/* compatibiltiy defines. */
-#define S3C_PA_UART S5PV210_PA_UART
-#define S3C_PA_HSMMC0 S5PV210_PA_HSMMC(0)
-#define S3C_PA_HSMMC1 S5PV210_PA_HSMMC(1)
-#define S3C_PA_HSMMC2 S5PV210_PA_HSMMC(2)
-#define S3C_PA_HSMMC3 S5PV210_PA_HSMMC(3)
-#define S3C_PA_IIC S5PV210_PA_IIC0
-#define S3C_PA_IIC1 S5PV210_PA_IIC1
-#define S3C_PA_IIC2 S5PV210_PA_IIC2
-#define S3C_PA_FB S5PV210_PA_FB
-#define S3C_PA_RTC S5PV210_PA_RTC
-#define S3C_PA_WDT S5PV210_PA_WATCHDOG
-#define S3C_PA_USB_HSOTG S5PV210_PA_HSOTG
-#define S5P_PA_FIMC0 S5PV210_PA_FIMC0
-#define S5P_PA_FIMC1 S5PV210_PA_FIMC1
-#define S5P_PA_FIMC2 S5PV210_PA_FIMC2
-#define S5P_PA_MIPI_CSIS0 S5PV210_PA_MIPI_CSIS
+#define S3C_PA_UART S5PV210_PA_UART
-#define SAMSUNG_PA_ADC S5PV210_PA_ADC
-#define SAMSUNG_PA_CFCON S5PV210_PA_CFCON
-#define SAMSUNG_PA_KEYPAD S5PV210_PA_KEYPAD
+#define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET))
+#define S5P_PA_UART0 S5P_PA_UART(0)
+#define S5P_PA_UART1 S5P_PA_UART(1)
+#define S5P_PA_UART2 S5P_PA_UART(2)
+#define S5P_PA_UART3 S5P_PA_UART(3)
+
+#define S5P_SZ_UART SZ_256
#endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/mach-s5pv210/include/mach/memory.h b/arch/arm/mach-s5pv210/include/mach/memory.h
index d503e0c4ce4f..7b5fcf0da0c4 100644
--- a/arch/arm/mach-s5pv210/include/mach/memory.h
+++ b/arch/arm/mach-s5pv210/include/mach/memory.h
@@ -13,7 +13,7 @@
#ifndef __ASM_ARCH_MEMORY_H
#define __ASM_ARCH_MEMORY_H
-#define PHYS_OFFSET UL(0x20000000)
+#define PLAT_PHYS_OFFSET UL(0x20000000)
#define CONSISTENT_DMA_SIZE (SZ_8M + SZ_4M + SZ_2M)
/*
diff --git a/arch/arm/mach-s5pv210/mach-aquila.c b/arch/arm/mach-s5pv210/mach-aquila.c
index 461aa035afc0..e5acb36a7a51 100644
--- a/arch/arm/mach-s5pv210/mach-aquila.c
+++ b/arch/arm/mach-s5pv210/mach-aquila.c
@@ -149,7 +149,7 @@ static struct regulator_init_data aquila_ldo2_data = {
static struct regulator_init_data aquila_ldo3_data = {
.constraints = {
- .name = "VUSB/MIPI_1.1V",
+ .name = "VUSB+MIPI_1.1V",
.min_uV = 1100000,
.max_uV = 1100000,
.apply_uV = 1,
@@ -197,7 +197,7 @@ static struct regulator_init_data aquila_ldo7_data = {
static struct regulator_init_data aquila_ldo8_data = {
.constraints = {
- .name = "VUSB/VADC_3.3V",
+ .name = "VUSB+VADC_3.3V",
.min_uV = 3300000,
.max_uV = 3300000,
.apply_uV = 1,
@@ -207,7 +207,7 @@ static struct regulator_init_data aquila_ldo8_data = {
static struct regulator_init_data aquila_ldo9_data = {
.constraints = {
- .name = "VCC/VCAM_2.8V",
+ .name = "VCC+VCAM_2.8V",
.min_uV = 2800000,
.max_uV = 2800000,
.apply_uV = 1,
@@ -296,13 +296,11 @@ static struct regulator_init_data aquila_ldo17_data = {
};
/* BUCK */
-static struct regulator_consumer_supply buck1_consumer[] = {
- { .supply = "vddarm", },
-};
+static struct regulator_consumer_supply buck1_consumer =
+ REGULATOR_SUPPLY("vddarm", NULL);
-static struct regulator_consumer_supply buck2_consumer[] = {
- { .supply = "vddint", },
-};
+static struct regulator_consumer_supply buck2_consumer =
+ REGULATOR_SUPPLY("vddint", NULL);
static struct regulator_init_data aquila_buck1_data = {
.constraints = {
@@ -313,8 +311,8 @@ static struct regulator_init_data aquila_buck1_data = {
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = ARRAY_SIZE(buck1_consumer),
- .consumer_supplies = buck1_consumer,
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &buck1_consumer,
};
static struct regulator_init_data aquila_buck2_data = {
@@ -326,8 +324,8 @@ static struct regulator_init_data aquila_buck2_data = {
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = ARRAY_SIZE(buck2_consumer),
- .consumer_supplies = buck2_consumer,
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &buck2_consumer,
};
static struct regulator_init_data aquila_buck3_data = {
@@ -381,33 +379,24 @@ static struct max8998_platform_data aquila_max8998_pdata = {
.buck1_set1 = S5PV210_GPH0(3),
.buck1_set2 = S5PV210_GPH0(4),
.buck2_set3 = S5PV210_GPH0(5),
- .buck1_max_voltage1 = 1200000,
- .buck1_max_voltage2 = 1200000,
- .buck2_max_voltage = 1200000,
+ .buck1_voltage1 = 1200000,
+ .buck1_voltage2 = 1200000,
+ .buck1_voltage3 = 1200000,
+ .buck1_voltage4 = 1200000,
+ .buck2_voltage1 = 1200000,
+ .buck2_voltage2 = 1200000,
};
#endif
static struct regulator_consumer_supply wm8994_fixed_voltage0_supplies[] = {
- {
- .dev_name = "5-001a",
- .supply = "DBVDD",
- }, {
- .dev_name = "5-001a",
- .supply = "AVDD2",
- }, {
- .dev_name = "5-001a",
- .supply = "CPVDD",
- },
+ REGULATOR_SUPPLY("DBVDD", "5-001a"),
+ REGULATOR_SUPPLY("AVDD2", "5-001a"),
+ REGULATOR_SUPPLY("CPVDD", "5-001a"),
};
static struct regulator_consumer_supply wm8994_fixed_voltage1_supplies[] = {
- {
- .dev_name = "5-001a",
- .supply = "SPKVDD1",
- }, {
- .dev_name = "5-001a",
- .supply = "SPKVDD2",
- },
+ REGULATOR_SUPPLY("SPKVDD1", "5-001a"),
+ REGULATOR_SUPPLY("SPKVDD2", "5-001a"),
};
static struct regulator_init_data wm8994_fixed_voltage0_init_data = {
@@ -456,15 +445,11 @@ static struct platform_device wm8994_fixed_voltage1 = {
},
};
-static struct regulator_consumer_supply wm8994_avdd1_supply = {
- .dev_name = "5-001a",
- .supply = "AVDD1",
-};
+static struct regulator_consumer_supply wm8994_avdd1_supply =
+ REGULATOR_SUPPLY("AVDD1", "5-001a");
-static struct regulator_consumer_supply wm8994_dcvdd_supply = {
- .dev_name = "5-001a",
- .supply = "DCVDD",
-};
+static struct regulator_consumer_supply wm8994_dcvdd_supply =
+ REGULATOR_SUPPLY("DCVDD", "5-001a");
static struct regulator_init_data wm8994_ldo1_data = {
.constraints = {
diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c
index e22d5112fd44..a097683ae4e0 100644
--- a/arch/arm/mach-s5pv210/mach-goni.c
+++ b/arch/arm/mach-s5pv210/mach-goni.c
@@ -15,7 +15,7 @@
#include <linux/fb.h>
#include <linux/i2c.h>
#include <linux/i2c-gpio.h>
-#include <linux/i2c/qt602240_ts.h>
+#include <linux/i2c/atmel_mxt_ts.h>
#include <linux/mfd/max8998.h>
#include <linux/mfd/wm8994/pdata.h>
#include <linux/regulator/fixed.h>
@@ -25,6 +25,7 @@
#include <linux/gpio_keys.h>
#include <linux/input.h>
#include <linux/gpio.h>
+#include <linux/interrupt.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -108,6 +109,8 @@ static struct s3c_fb_pd_win goni_fb_win0 = {
},
.max_bpp = 32,
.default_bpp = 16,
+ .virtual_x = 480,
+ .virtual_y = 2 * 800,
};
static struct s3c_fb_platdata goni_lcd_pdata __initdata = {
@@ -222,7 +225,7 @@ static void __init goni_radio_init(void)
}
/* TSP */
-static struct qt602240_platform_data qt602240_platform_data = {
+static struct mxt_platform_data qt602240_platform_data = {
.x_line = 17,
.y_line = 11,
.x_size = 800,
@@ -230,7 +233,8 @@ static struct qt602240_platform_data qt602240_platform_data = {
.blen = 0x21,
.threshold = 0x28,
.voltage = 2800000, /* 2.8V */
- .orient = QT602240_DIAGONAL,
+ .orient = MXT_DIAGONAL,
+ .irqflags = IRQF_TRIGGER_FALLING,
};
static struct s3c2410_platform_i2c i2c2_data __initdata = {
@@ -269,10 +273,30 @@ static void __init goni_tsp_init(void)
/* MAX8998 regulators */
#if defined(CONFIG_REGULATOR_MAX8998) || defined(CONFIG_REGULATOR_MAX8998_MODULE)
+static struct regulator_consumer_supply goni_ldo3_consumers[] = {
+ REGULATOR_SUPPLY("vusb_a", "s3c-hsotg"),
+};
+
static struct regulator_consumer_supply goni_ldo5_consumers[] = {
REGULATOR_SUPPLY("vmmc", "s3c-sdhci.0"),
};
+static struct regulator_consumer_supply goni_ldo8_consumers[] = {
+ REGULATOR_SUPPLY("vusb_d", "s3c-hsotg"),
+};
+
+static struct regulator_consumer_supply goni_ldo11_consumers[] = {
+ REGULATOR_SUPPLY("vddio", "0-0030"), /* "CAM_IO_2.8V" */
+};
+
+static struct regulator_consumer_supply goni_ldo13_consumers[] = {
+ REGULATOR_SUPPLY("vdda", "0-0030"), /* "CAM_A_2.8V" */
+};
+
+static struct regulator_consumer_supply goni_ldo14_consumers[] = {
+ REGULATOR_SUPPLY("vdd_core", "0-0030"), /* "CAM_CIF_1.8V" */
+};
+
static struct regulator_init_data goni_ldo2_data = {
.constraints = {
.name = "VALIVE_1.1V",
@@ -288,12 +312,14 @@ static struct regulator_init_data goni_ldo2_data = {
static struct regulator_init_data goni_ldo3_data = {
.constraints = {
- .name = "VUSB/MIPI_1.1V",
+ .name = "VUSB+MIPI_1.1V",
.min_uV = 1100000,
.max_uV = 1100000,
.apply_uV = 1,
- .always_on = 1,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
+ .num_consumer_supplies = ARRAY_SIZE(goni_ldo3_consumers),
+ .consumer_supplies = goni_ldo3_consumers,
};
static struct regulator_init_data goni_ldo4_data = {
@@ -311,6 +337,7 @@ static struct regulator_init_data goni_ldo5_data = {
.min_uV = 2800000,
.max_uV = 2800000,
.apply_uV = 1,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = ARRAY_SIZE(goni_ldo5_consumers),
.consumer_supplies = goni_ldo5_consumers,
@@ -337,21 +364,22 @@ static struct regulator_init_data goni_ldo7_data = {
static struct regulator_init_data goni_ldo8_data = {
.constraints = {
- .name = "VUSB/VADC_3.3V",
+ .name = "VUSB+VADC_3.3V",
.min_uV = 3300000,
.max_uV = 3300000,
.apply_uV = 1,
- .always_on = 1,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
+ .num_consumer_supplies = ARRAY_SIZE(goni_ldo8_consumers),
+ .consumer_supplies = goni_ldo8_consumers,
};
static struct regulator_init_data goni_ldo9_data = {
.constraints = {
- .name = "VCC/VCAM_2.8V",
+ .name = "VCC+VCAM_2.8V",
.min_uV = 2800000,
.max_uV = 2800000,
.apply_uV = 1,
- .always_on = 1,
},
};
@@ -371,8 +399,10 @@ static struct regulator_init_data goni_ldo11_data = {
.min_uV = 2800000,
.max_uV = 2800000,
.apply_uV = 1,
- .always_on = 1,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
+ .num_consumer_supplies = ARRAY_SIZE(goni_ldo11_consumers),
+ .consumer_supplies = goni_ldo11_consumers,
};
static struct regulator_init_data goni_ldo12_data = {
@@ -381,7 +411,6 @@ static struct regulator_init_data goni_ldo12_data = {
.min_uV = 1200000,
.max_uV = 1200000,
.apply_uV = 1,
- .always_on = 1,
},
};
@@ -391,8 +420,10 @@ static struct regulator_init_data goni_ldo13_data = {
.min_uV = 2800000,
.max_uV = 2800000,
.apply_uV = 1,
- .always_on = 1,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
+ .num_consumer_supplies = ARRAY_SIZE(goni_ldo13_consumers),
+ .consumer_supplies = goni_ldo13_consumers,
};
static struct regulator_init_data goni_ldo14_data = {
@@ -401,8 +432,10 @@ static struct regulator_init_data goni_ldo14_data = {
.min_uV = 1800000,
.max_uV = 1800000,
.apply_uV = 1,
- .always_on = 1,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
+ .num_consumer_supplies = ARRAY_SIZE(goni_ldo14_consumers),
+ .consumer_supplies = goni_ldo14_consumers,
};
static struct regulator_init_data goni_ldo15_data = {
@@ -411,7 +444,6 @@ static struct regulator_init_data goni_ldo15_data = {
.min_uV = 3300000,
.max_uV = 3300000,
.apply_uV = 1,
- .always_on = 1,
},
};
@@ -421,7 +453,6 @@ static struct regulator_init_data goni_ldo16_data = {
.min_uV = 1800000,
.max_uV = 1800000,
.apply_uV = 1,
- .always_on = 1,
},
};
@@ -436,13 +467,11 @@ static struct regulator_init_data goni_ldo17_data = {
};
/* BUCK */
-static struct regulator_consumer_supply buck1_consumer[] = {
- { .supply = "vddarm", },
-};
+static struct regulator_consumer_supply buck1_consumer =
+ REGULATOR_SUPPLY("vddarm", NULL);
-static struct regulator_consumer_supply buck2_consumer[] = {
- { .supply = "vddint", },
-};
+static struct regulator_consumer_supply buck2_consumer =
+ REGULATOR_SUPPLY("vddint", NULL);
static struct regulator_init_data goni_buck1_data = {
.constraints = {
@@ -453,8 +482,8 @@ static struct regulator_init_data goni_buck1_data = {
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = ARRAY_SIZE(buck1_consumer),
- .consumer_supplies = buck1_consumer,
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &buck1_consumer,
};
static struct regulator_init_data goni_buck2_data = {
@@ -466,8 +495,8 @@ static struct regulator_init_data goni_buck2_data = {
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = ARRAY_SIZE(buck2_consumer),
- .consumer_supplies = buck2_consumer,
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &buck2_consumer,
};
static struct regulator_init_data goni_buck3_data = {
@@ -521,33 +550,24 @@ static struct max8998_platform_data goni_max8998_pdata = {
.buck1_set1 = S5PV210_GPH0(3),
.buck1_set2 = S5PV210_GPH0(4),
.buck2_set3 = S5PV210_GPH0(5),
- .buck1_max_voltage1 = 1200000,
- .buck1_max_voltage2 = 1200000,
- .buck2_max_voltage = 1200000,
+ .buck1_voltage1 = 1200000,
+ .buck1_voltage2 = 1200000,
+ .buck1_voltage3 = 1200000,
+ .buck1_voltage4 = 1200000,
+ .buck2_voltage1 = 1200000,
+ .buck2_voltage2 = 1200000,
};
#endif
static struct regulator_consumer_supply wm8994_fixed_voltage0_supplies[] = {
- {
- .dev_name = "5-001a",
- .supply = "DBVDD",
- }, {
- .dev_name = "5-001a",
- .supply = "AVDD2",
- }, {
- .dev_name = "5-001a",
- .supply = "CPVDD",
- },
+ REGULATOR_SUPPLY("DBVDD", "5-001a"),
+ REGULATOR_SUPPLY("AVDD2", "5-001a"),
+ REGULATOR_SUPPLY("CPVDD", "5-001a"),
};
static struct regulator_consumer_supply wm8994_fixed_voltage1_supplies[] = {
- {
- .dev_name = "5-001a",
- .supply = "SPKVDD1",
- }, {
- .dev_name = "5-001a",
- .supply = "SPKVDD2",
- },
+ REGULATOR_SUPPLY("SPKVDD1", "5-001a"),
+ REGULATOR_SUPPLY("SPKVDD2", "5-001a"),
};
static struct regulator_init_data wm8994_fixed_voltage0_init_data = {
@@ -596,15 +616,11 @@ static struct platform_device wm8994_fixed_voltage1 = {
},
};
-static struct regulator_consumer_supply wm8994_avdd1_supply = {
- .dev_name = "5-001a",
- .supply = "AVDD1",
-};
+static struct regulator_consumer_supply wm8994_avdd1_supply =
+ REGULATOR_SUPPLY("AVDD1", "5-001a");
-static struct regulator_consumer_supply wm8994_dcvdd_supply = {
- .dev_name = "5-001a",
- .supply = "DCVDD",
-};
+static struct regulator_consumer_supply wm8994_dcvdd_supply =
+ REGULATOR_SUPPLY("DCVDD", "5-001a");
static struct regulator_init_data wm8994_ldo1_data = {
.constraints = {
@@ -791,6 +807,7 @@ static struct platform_device *goni_devices[] __initdata = {
&goni_i2c_gpio5,
&mmc2_fixed_voltage,
&goni_device_gpiokeys,
+ &s3c_device_i2c0,
&s5p_device_fimc0,
&s5p_device_fimc1,
&s5p_device_fimc2,
@@ -827,6 +844,9 @@ static void __init goni_machine_init(void)
/* Radio: call before I2C 1 registeration */
goni_radio_init();
+ /* I2C0 */
+ s3c_i2c0_set_platdata(NULL);
+
/* I2C1 */
s3c_i2c1_set_platdata(NULL);
i2c_register_board_info(1, i2c1_devs, ARRAY_SIZE(i2c1_devs));
diff --git a/arch/arm/mach-s5pv210/sleep.S b/arch/arm/mach-s5pv210/sleep.S
index d4d222b716b4..a3d649466fb1 100644
--- a/arch/arm/mach-s5pv210/sleep.S
+++ b/arch/arm/mach-s5pv210/sleep.S
@@ -35,50 +35,24 @@
/* s3c_cpu_save
*
* entry:
- * r0 = save address (virtual addr of s3c_sleep_save_phys)
+ * r1 = v:p offset
*/
ENTRY(s3c_cpu_save)
stmfd sp!, { r3 - r12, lr }
-
- mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
- mrc p15, 0, r5, c3, c0, 0 @ Domain ID
- mrc p15, 0, r6, c2, c0, 0 @ Translation Table BASE0
- mrc p15, 0, r7, c2, c0, 1 @ Translation Table BASE1
- mrc p15, 0, r8, c2, c0, 2 @ Translation Table Control
- mrc p15, 0, r9, c1, c0, 0 @ Control register
- mrc p15, 0, r10, c1, c0, 1 @ Auxiliary control register
- mrc p15, 0, r11, c1, c0, 2 @ Co-processor access controls
- mrc p15, 0, r12, c10, c2, 0 @ Read PRRR
- mrc p15, 0, r3, c10, c2, 1 @ READ NMRR
-
- stmia r0, { r3 - r13 }
-
- bl s3c_pm_cb_flushcache
+ ldr r3, =resume_with_mmu
+ bl cpu_suspend
ldr r0, =pm_cpu_sleep
ldr r0, [ r0 ]
mov pc, r0
resume_with_mmu:
- /*
- * After MMU is turned on, restore the previous MMU table.
- */
- ldr r9 , =(PAGE_OFFSET - PHYS_OFFSET)
- add r4, r4, r9
- str r12, [r4]
-
ldmfd sp!, { r3 - r12, pc }
.ltorg
- .data
-
- .global s3c_sleep_save_phys
-s3c_sleep_save_phys:
- .word 0
-
/* sleep magic, to allow the bootloader to check for an valid
* image to resume to. Must be the first word before the
* s3c_cpu_resume entry.
@@ -96,75 +70,4 @@ s3c_sleep_save_phys:
*/
ENTRY(s3c_cpu_resume)
- mov r0, #PSR_I_BIT | PSR_F_BIT | SVC_MODE
- msr cpsr_c, r0
-
- mov r1, #0
- mcr p15, 0, r1, c8, c7, 0 @ invalidate TLBs
- mcr p15, 0, r1, c7, c5, 0 @ invalidate I Cache
-
- ldr r0, s3c_sleep_save_phys @ address of restore block
- ldmia r0, { r3 - r13 }
-
- mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID
- mcr p15, 0, r5, c3, c0, 0 @ Domain ID
-
- mcr p15, 0, r8, c2, c0, 2 @ Translation Table Control
- mcr p15, 0, r7, c2, c0, 1 @ Translation Table BASE1
- mcr p15, 0, r6, c2, c0, 0 @ Translation Table BASE0
-
- mcr p15, 0, r10, c1, c0, 1 @ Auxiliary control register
-
- mov r0, #0
- mcr p15, 0, r0, c8, c7, 0 @ Invalidate I & D TLB
-
- mov r0, #0 @ restore copro access
- mcr p15, 0, r11, c1, c0, 2 @ Co-processor access
- mcr p15, 0, r0, c7, c5, 4
-
- mcr p15, 0, r12, c10, c2, 0 @ write PRRR
- mcr p15, 0, r3, c10, c2, 1 @ write NMRR
-
- /*
- * In Cortex-A8, when MMU is turned on, the pipeline is flushed.
- * And there are no valid entries in the MMU table at this point.
- * So before turning on the MMU, the MMU entry for the DRAM address
- * range is added. After the MMU is turned on, the other entries
- * in the MMU table will be restored.
- */
-
- /* r6 = Translation Table BASE0 */
- mov r4, r6
- mov r4, r4, LSR #14
- mov r4, r4, LSL #14
-
- /* Load address for adding to MMU table list */
- ldr r11, =0xE010F000 @ INFORM0 reg.
- ldr r10, [r11, #0]
- mov r10, r10, LSR #18
- bic r10, r10, #0x3
- orr r4, r4, r10
-
- /* Calculate MMU table entry */
- mov r10, r10, LSL #18
- ldr r5, =0x40E
- orr r10, r10, r5
-
- /* Back up originally data */
- ldr r12, [r4]
-
- /* Add calculated MMU table entry into MMU table list */
- str r10, [r4]
-
- ldr r2, =resume_with_mmu
- mcr p15, 0, r9, c1, c0, 0 @ turn on MMU, etc
-
- nop
- nop
- nop
- nop
- nop @ second-to-last before mmu
-
- mov pc, r2 @ go back to virtual address
-
- .ltorg
+ b cpu_resume
diff --git a/arch/arm/mach-s5pv310/Makefile b/arch/arm/mach-s5pv310/Makefile
deleted file mode 100644
index 036fb383b830..000000000000
--- a/arch/arm/mach-s5pv310/Makefile
+++ /dev/null
@@ -1,43 +0,0 @@
-# arch/arm/mach-s5pv310/Makefile
-#
-# Copyright (c) 2010 Samsung Electronics Co., Ltd.
-# http://www.samsung.com/
-#
-# Licensed under GPLv2
-
-obj-y :=
-obj-m :=
-obj-n :=
-obj- :=
-
-# Core support for S5PV310 system
-
-obj-$(CONFIG_CPU_S5PV310) += cpu.o init.o clock.o irq-combiner.o
-obj-$(CONFIG_CPU_S5PV310) += setup-i2c0.o time.o gpiolib.o irq-eint.o dma.o
-obj-$(CONFIG_CPU_FREQ) += cpufreq.o
-
-obj-$(CONFIG_SMP) += platsmp.o headsmp.o
-obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o
-obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
-
-# machine support
-
-obj-$(CONFIG_MACH_SMDKC210) += mach-smdkc210.o
-obj-$(CONFIG_MACH_SMDKV310) += mach-smdkv310.o
-obj-$(CONFIG_MACH_UNIVERSAL_C210) += mach-universal_c210.o
-
-# device support
-
-obj-y += dev-audio.o
-obj-$(CONFIG_S5PV310_DEV_PD) += dev-pd.o
-obj-$(CONFIG_S5PV310_DEV_SYSMMU) += dev-sysmmu.o
-
-obj-$(CONFIG_S5PV310_SETUP_I2C1) += setup-i2c1.o
-obj-$(CONFIG_S5PV310_SETUP_I2C2) += setup-i2c2.o
-obj-$(CONFIG_S5PV310_SETUP_I2C3) += setup-i2c3.o
-obj-$(CONFIG_S5PV310_SETUP_I2C4) += setup-i2c4.o
-obj-$(CONFIG_S5PV310_SETUP_I2C5) += setup-i2c5.o
-obj-$(CONFIG_S5PV310_SETUP_I2C6) += setup-i2c6.o
-obj-$(CONFIG_S5PV310_SETUP_I2C7) += setup-i2c7.o
-obj-$(CONFIG_S5PV310_SETUP_SDHCI) += setup-sdhci.o
-obj-$(CONFIG_S5PV310_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o
diff --git a/arch/arm/mach-s5pv310/include/mach/gpio.h b/arch/arm/mach-s5pv310/include/mach/gpio.h
deleted file mode 100644
index 20cb80c23466..000000000000
--- a/arch/arm/mach-s5pv310/include/mach/gpio.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/* linux/arch/arm/mach-s5pv310/include/mach/gpio.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * S5PV310 - GPIO lib support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_GPIO_H
-#define __ASM_ARCH_GPIO_H __FILE__
-
-#define gpio_get_value __gpio_get_value
-#define gpio_set_value __gpio_set_value
-#define gpio_cansleep __gpio_cansleep
-#define gpio_to_irq __gpio_to_irq
-
-/* Practically, GPIO banks upto GPZ are the configurable gpio banks */
-
-/* GPIO bank sizes */
-#define S5PV310_GPIO_A0_NR (8)
-#define S5PV310_GPIO_A1_NR (6)
-#define S5PV310_GPIO_B_NR (8)
-#define S5PV310_GPIO_C0_NR (5)
-#define S5PV310_GPIO_C1_NR (5)
-#define S5PV310_GPIO_D0_NR (4)
-#define S5PV310_GPIO_D1_NR (4)
-#define S5PV310_GPIO_E0_NR (5)
-#define S5PV310_GPIO_E1_NR (8)
-#define S5PV310_GPIO_E2_NR (6)
-#define S5PV310_GPIO_E3_NR (8)
-#define S5PV310_GPIO_E4_NR (8)
-#define S5PV310_GPIO_F0_NR (8)
-#define S5PV310_GPIO_F1_NR (8)
-#define S5PV310_GPIO_F2_NR (8)
-#define S5PV310_GPIO_F3_NR (6)
-#define S5PV310_GPIO_J0_NR (8)
-#define S5PV310_GPIO_J1_NR (5)
-#define S5PV310_GPIO_K0_NR (7)
-#define S5PV310_GPIO_K1_NR (7)
-#define S5PV310_GPIO_K2_NR (7)
-#define S5PV310_GPIO_K3_NR (7)
-#define S5PV310_GPIO_L0_NR (8)
-#define S5PV310_GPIO_L1_NR (3)
-#define S5PV310_GPIO_L2_NR (8)
-#define S5PV310_GPIO_X0_NR (8)
-#define S5PV310_GPIO_X1_NR (8)
-#define S5PV310_GPIO_X2_NR (8)
-#define S5PV310_GPIO_X3_NR (8)
-#define S5PV310_GPIO_Z_NR (7)
-
-/* GPIO bank numbers */
-
-#define S5PV310_GPIO_NEXT(__gpio) \
- ((__gpio##_START) + (__gpio##_NR) + CONFIG_S3C_GPIO_SPACE + 1)
-
-enum s5p_gpio_number {
- S5PV310_GPIO_A0_START = 0,
- S5PV310_GPIO_A1_START = S5PV310_GPIO_NEXT(S5PV310_GPIO_A0),
- S5PV310_GPIO_B_START = S5PV310_GPIO_NEXT(S5PV310_GPIO_A1),
- S5PV310_GPIO_C0_START = S5PV310_GPIO_NEXT(S5PV310_GPIO_B),
- S5PV310_GPIO_C1_START = S5PV310_GPIO_NEXT(S5PV310_GPIO_C0),
- S5PV310_GPIO_D0_START = S5PV310_GPIO_NEXT(S5PV310_GPIO_C1),
- S5PV310_GPIO_D1_START = S5PV310_GPIO_NEXT(S5PV310_GPIO_D0),
- S5PV310_GPIO_E0_START = S5PV310_GPIO_NEXT(S5PV310_GPIO_D1),
- S5PV310_GPIO_E1_START = S5PV310_GPIO_NEXT(S5PV310_GPIO_E0),
- S5PV310_GPIO_E2_START = S5PV310_GPIO_NEXT(S5PV310_GPIO_E1),
- S5PV310_GPIO_E3_START = S5PV310_GPIO_NEXT(S5PV310_GPIO_E2),
- S5PV310_GPIO_E4_START = S5PV310_GPIO_NEXT(S5PV310_GPIO_E3),
- S5PV310_GPIO_F0_START = S5PV310_GPIO_NEXT(S5PV310_GPIO_E4),
- S5PV310_GPIO_F1_START = S5PV310_GPIO_NEXT(S5PV310_GPIO_F0),
- S5PV310_GPIO_F2_START = S5PV310_GPIO_NEXT(S5PV310_GPIO_F1),
- S5PV310_GPIO_F3_START = S5PV310_GPIO_NEXT(S5PV310_GPIO_F2),
- S5PV310_GPIO_J0_START = S5PV310_GPIO_NEXT(S5PV310_GPIO_F3),
- S5PV310_GPIO_J1_START = S5PV310_GPIO_NEXT(S5PV310_GPIO_J0),
- S5PV310_GPIO_K0_START = S5PV310_GPIO_NEXT(S5PV310_GPIO_J1),
- S5PV310_GPIO_K1_START = S5PV310_GPIO_NEXT(S5PV310_GPIO_K0),
- S5PV310_GPIO_K2_START = S5PV310_GPIO_NEXT(S5PV310_GPIO_K1),
- S5PV310_GPIO_K3_START = S5PV310_GPIO_NEXT(S5PV310_GPIO_K2),
- S5PV310_GPIO_L0_START = S5PV310_GPIO_NEXT(S5PV310_GPIO_K3),
- S5PV310_GPIO_L1_START = S5PV310_GPIO_NEXT(S5PV310_GPIO_L0),
- S5PV310_GPIO_L2_START = S5PV310_GPIO_NEXT(S5PV310_GPIO_L1),
- S5PV310_GPIO_X0_START = S5PV310_GPIO_NEXT(S5PV310_GPIO_L2),
- S5PV310_GPIO_X1_START = S5PV310_GPIO_NEXT(S5PV310_GPIO_X0),
- S5PV310_GPIO_X2_START = S5PV310_GPIO_NEXT(S5PV310_GPIO_X1),
- S5PV310_GPIO_X3_START = S5PV310_GPIO_NEXT(S5PV310_GPIO_X2),
- S5PV310_GPIO_Z_START = S5PV310_GPIO_NEXT(S5PV310_GPIO_X3),
-};
-
-/* S5PV310 GPIO number definitions */
-#define S5PV310_GPA0(_nr) (S5PV310_GPIO_A0_START + (_nr))
-#define S5PV310_GPA1(_nr) (S5PV310_GPIO_A1_START + (_nr))
-#define S5PV310_GPB(_nr) (S5PV310_GPIO_B_START + (_nr))
-#define S5PV310_GPC0(_nr) (S5PV310_GPIO_C0_START + (_nr))
-#define S5PV310_GPC1(_nr) (S5PV310_GPIO_C1_START + (_nr))
-#define S5PV310_GPD0(_nr) (S5PV310_GPIO_D0_START + (_nr))
-#define S5PV310_GPD1(_nr) (S5PV310_GPIO_D1_START + (_nr))
-#define S5PV310_GPE0(_nr) (S5PV310_GPIO_E0_START + (_nr))
-#define S5PV310_GPE1(_nr) (S5PV310_GPIO_E1_START + (_nr))
-#define S5PV310_GPE2(_nr) (S5PV310_GPIO_E2_START + (_nr))
-#define S5PV310_GPE3(_nr) (S5PV310_GPIO_E3_START + (_nr))
-#define S5PV310_GPE4(_nr) (S5PV310_GPIO_E4_START + (_nr))
-#define S5PV310_GPF0(_nr) (S5PV310_GPIO_F0_START + (_nr))
-#define S5PV310_GPF1(_nr) (S5PV310_GPIO_F1_START + (_nr))
-#define S5PV310_GPF2(_nr) (S5PV310_GPIO_F2_START + (_nr))
-#define S5PV310_GPF3(_nr) (S5PV310_GPIO_F3_START + (_nr))
-#define S5PV310_GPJ0(_nr) (S5PV310_GPIO_J0_START + (_nr))
-#define S5PV310_GPJ1(_nr) (S5PV310_GPIO_J1_START + (_nr))
-#define S5PV310_GPK0(_nr) (S5PV310_GPIO_K0_START + (_nr))
-#define S5PV310_GPK1(_nr) (S5PV310_GPIO_K1_START + (_nr))
-#define S5PV310_GPK2(_nr) (S5PV310_GPIO_K2_START + (_nr))
-#define S5PV310_GPK3(_nr) (S5PV310_GPIO_K3_START + (_nr))
-#define S5PV310_GPL0(_nr) (S5PV310_GPIO_L0_START + (_nr))
-#define S5PV310_GPL1(_nr) (S5PV310_GPIO_L1_START + (_nr))
-#define S5PV310_GPL2(_nr) (S5PV310_GPIO_L2_START + (_nr))
-#define S5PV310_GPX0(_nr) (S5PV310_GPIO_X0_START + (_nr))
-#define S5PV310_GPX1(_nr) (S5PV310_GPIO_X1_START + (_nr))
-#define S5PV310_GPX2(_nr) (S5PV310_GPIO_X2_START + (_nr))
-#define S5PV310_GPX3(_nr) (S5PV310_GPIO_X3_START + (_nr))
-#define S5PV310_GPZ(_nr) (S5PV310_GPIO_Z_START + (_nr))
-
-/* the end of the S5PV310 specific gpios */
-#define S5PV310_GPIO_END (S5PV310_GPZ(S5PV310_GPIO_Z_NR) + 1)
-#define S3C_GPIO_END S5PV310_GPIO_END
-
-/* define the number of gpios we need to the one after the GPZ() range */
-#define ARCH_NR_GPIOS (S5PV310_GPZ(S5PV310_GPIO_Z_NR) + \
- CONFIG_SAMSUNG_GPIO_EXTRA + 1)
-
-#include <asm-generic/gpio.h>
-
-#endif /* __ASM_ARCH_GPIO_H */
diff --git a/arch/arm/mach-s5pv310/include/mach/map.h b/arch/arm/mach-s5pv310/include/mach/map.h
deleted file mode 100644
index 3060f78e12ab..000000000000
--- a/arch/arm/mach-s5pv310/include/mach/map.h
+++ /dev/null
@@ -1,147 +0,0 @@
-/* linux/arch/arm/mach-s5pv310/include/mach/map.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * S5PV310 - Memory map definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_MAP_H
-#define __ASM_ARCH_MAP_H __FILE__
-
-#include <plat/map-base.h>
-
-/*
- * S5PV310 UART offset is 0x10000 but the older S5P SoCs are 0x400.
- * So need to define it, and here is to avoid redefinition warning.
- */
-#define S3C_UART_OFFSET (0x10000)
-
-#include <plat/map-s5p.h>
-
-#define S5PV310_PA_SYSRAM (0x02025000)
-
-#define S5PV310_PA_SROM_BANK(x) (0x04000000 + ((x) * 0x01000000))
-
-#define S5PC210_PA_ONENAND (0x0C000000)
-#define S5P_PA_ONENAND S5PC210_PA_ONENAND
-
-#define S5PC210_PA_ONENAND_DMA (0x0C600000)
-#define S5P_PA_ONENAND_DMA S5PC210_PA_ONENAND_DMA
-
-#define S5PV310_PA_CHIPID (0x10000000)
-#define S5P_PA_CHIPID S5PV310_PA_CHIPID
-
-#define S5PV310_PA_SYSCON (0x10010000)
-#define S5P_PA_SYSCON S5PV310_PA_SYSCON
-
-#define S5PV310_PA_PMU (0x10020000)
-
-#define S5PV310_PA_CMU (0x10030000)
-
-#define S5PV310_PA_WATCHDOG (0x10060000)
-#define S5PV310_PA_RTC (0x10070000)
-
-#define S5PV310_PA_DMC0 (0x10400000)
-
-#define S5PV310_PA_COMBINER (0x10448000)
-
-#define S5PV310_PA_COREPERI (0x10500000)
-#define S5PV310_PA_GIC_CPU (0x10500100)
-#define S5PV310_PA_TWD (0x10500600)
-#define S5PV310_PA_GIC_DIST (0x10501000)
-#define S5PV310_PA_L2CC (0x10502000)
-
-/* DMA */
-#define S5PV310_PA_MDMA 0x10810000
-#define S5PV310_PA_PDMA0 0x12680000
-#define S5PV310_PA_PDMA1 0x12690000
-
-#define S5PV310_PA_GPIO1 (0x11400000)
-#define S5PV310_PA_GPIO2 (0x11000000)
-#define S5PV310_PA_GPIO3 (0x03860000)
-
-#define S5PV310_PA_MIPI_CSIS0 0x11880000
-#define S5PV310_PA_MIPI_CSIS1 0x11890000
-
-#define S5PV310_PA_HSMMC(x) (0x12510000 + ((x) * 0x10000))
-
-#define S5PV310_PA_SROMC (0x12570000)
-#define S5P_PA_SROMC S5PV310_PA_SROMC
-
-/* S/PDIF */
-#define S5PV310_PA_SPDIF 0xE1100000
-
-/* I2S */
-#define S5PV310_PA_I2S0 0x03830000
-#define S5PV310_PA_I2S1 0xE3100000
-#define S5PV310_PA_I2S2 0xE2A00000
-
-/* PCM */
-#define S5PV310_PA_PCM0 0x03840000
-#define S5PV310_PA_PCM1 0x13980000
-#define S5PV310_PA_PCM2 0x13990000
-
-/* AC97 */
-#define S5PV310_PA_AC97 0x139A0000
-
-#define S5PV310_PA_UART (0x13800000)
-
-#define S5P_PA_UART(x) (S5PV310_PA_UART + ((x) * S3C_UART_OFFSET))
-#define S5P_PA_UART0 S5P_PA_UART(0)
-#define S5P_PA_UART1 S5P_PA_UART(1)
-#define S5P_PA_UART2 S5P_PA_UART(2)
-#define S5P_PA_UART3 S5P_PA_UART(3)
-#define S5P_PA_UART4 S5P_PA_UART(4)
-
-#define S5P_SZ_UART SZ_256
-
-#define S5PV310_PA_IIC(x) (0x13860000 + ((x) * 0x10000))
-
-#define S5PV310_PA_TIMER (0x139D0000)
-#define S5P_PA_TIMER S5PV310_PA_TIMER
-
-#define S5PV310_PA_SDRAM (0x40000000)
-#define S5P_PA_SDRAM S5PV310_PA_SDRAM
-
-#define S5PV310_PA_SYSMMU_MDMA 0x10A40000
-#define S5PV310_PA_SYSMMU_SSS 0x10A50000
-#define S5PV310_PA_SYSMMU_FIMC0 0x11A20000
-#define S5PV310_PA_SYSMMU_FIMC1 0x11A30000
-#define S5PV310_PA_SYSMMU_FIMC2 0x11A40000
-#define S5PV310_PA_SYSMMU_FIMC3 0x11A50000
-#define S5PV310_PA_SYSMMU_JPEG 0x11A60000
-#define S5PV310_PA_SYSMMU_FIMD0 0x11E20000
-#define S5PV310_PA_SYSMMU_FIMD1 0x12220000
-#define S5PV310_PA_SYSMMU_PCIe 0x12620000
-#define S5PV310_PA_SYSMMU_G2D 0x12A20000
-#define S5PV310_PA_SYSMMU_ROTATOR 0x12A30000
-#define S5PV310_PA_SYSMMU_MDMA2 0x12A40000
-#define S5PV310_PA_SYSMMU_TV 0x12E20000
-#define S5PV310_PA_SYSMMU_MFC_L 0x13620000
-#define S5PV310_PA_SYSMMU_MFC_R 0x13630000
-
-/* compatibiltiy defines. */
-#define S3C_PA_UART S5PV310_PA_UART
-#define S3C_PA_HSMMC0 S5PV310_PA_HSMMC(0)
-#define S3C_PA_HSMMC1 S5PV310_PA_HSMMC(1)
-#define S3C_PA_HSMMC2 S5PV310_PA_HSMMC(2)
-#define S3C_PA_HSMMC3 S5PV310_PA_HSMMC(3)
-#define S3C_PA_IIC S5PV310_PA_IIC(0)
-#define S3C_PA_IIC1 S5PV310_PA_IIC(1)
-#define S3C_PA_IIC2 S5PV310_PA_IIC(2)
-#define S3C_PA_IIC3 S5PV310_PA_IIC(3)
-#define S3C_PA_IIC4 S5PV310_PA_IIC(4)
-#define S3C_PA_IIC5 S5PV310_PA_IIC(5)
-#define S3C_PA_IIC6 S5PV310_PA_IIC(6)
-#define S3C_PA_IIC7 S5PV310_PA_IIC(7)
-#define S3C_PA_RTC S5PV310_PA_RTC
-#define S3C_PA_WDT S5PV310_PA_WATCHDOG
-#define S5P_PA_MIPI_CSIS0 S5PV310_PA_MIPI_CSIS0
-#define S5P_PA_MIPI_CSIS1 S5PV310_PA_MIPI_CSIS1
-
-#endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/mach-s5pv310/include/mach/regs-gpio.h b/arch/arm/mach-s5pv310/include/mach/regs-gpio.h
deleted file mode 100644
index 82e9e0c9d452..000000000000
--- a/arch/arm/mach-s5pv310/include/mach/regs-gpio.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* linux/arch/arm/mach-s5pv310/include/mach/regs-gpio.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * S5PV310 - GPIO (including EINT) register definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_REGS_GPIO_H
-#define __ASM_ARCH_REGS_GPIO_H __FILE__
-
-#include <mach/map.h>
-#include <mach/irqs.h>
-
-#define S5PV310_EINT40CON (S5P_VA_GPIO2 + 0xE00)
-#define S5P_EINT_CON(x) (S5PV310_EINT40CON + ((x) * 0x4))
-
-#define S5PV310_EINT40FLTCON0 (S5P_VA_GPIO2 + 0xE80)
-#define S5P_EINT_FLTCON(x) (S5PV310_EINT40FLTCON0 + ((x) * 0x4))
-
-#define S5PV310_EINT40MASK (S5P_VA_GPIO2 + 0xF00)
-#define S5P_EINT_MASK(x) (S5PV310_EINT40MASK + ((x) * 0x4))
-
-#define S5PV310_EINT40PEND (S5P_VA_GPIO2 + 0xF40)
-#define S5P_EINT_PEND(x) (S5PV310_EINT40PEND + ((x) * 0x4))
-
-#define EINT_REG_NR(x) (EINT_OFFSET(x) >> 3)
-
-#define eint_irq_to_bit(irq) (1 << (EINT_OFFSET(irq) & 0x7))
-
-#define EINT_MODE S3C_GPIO_SFN(0xf)
-
-#define EINT_GPIO_0(x) S5PV310_GPX0(x)
-#define EINT_GPIO_1(x) S5PV310_GPX1(x)
-#define EINT_GPIO_2(x) S5PV310_GPX2(x)
-#define EINT_GPIO_3(x) S5PV310_GPX3(x)
-
-#endif /* __ASM_ARCH_REGS_GPIO_H */
diff --git a/arch/arm/mach-sa1100/include/mach/memory.h b/arch/arm/mach-sa1100/include/mach/memory.h
index 128a1dfa96b9..a44da6a2916c 100644
--- a/arch/arm/mach-sa1100/include/mach/memory.h
+++ b/arch/arm/mach-sa1100/include/mach/memory.h
@@ -12,7 +12,7 @@
/*
* Physical DRAM offset is 0xc0000000 on the SA1100
*/
-#define PHYS_OFFSET UL(0xc0000000)
+#define PLAT_PHYS_OFFSET UL(0xc0000000)
#ifndef __ASSEMBLY__
diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
index ab9fc4470d36..c4661aab22fb 100644
--- a/arch/arm/mach-sa1100/pm.c
+++ b/arch/arm/mach-sa1100/pm.c
@@ -32,8 +32,7 @@
#include <asm/system.h>
#include <asm/mach/time.h>
-extern void sa1100_cpu_suspend(void);
-extern void sa1100_cpu_resume(void);
+extern void sa1100_cpu_suspend(long);
#define SAVE(x) sleep_save[SLEEP_SAVE_##x] = x
#define RESTORE(x) x = sleep_save[SLEEP_SAVE_##x]
@@ -73,10 +72,10 @@ static int sa11x0_pm_enter(suspend_state_t state)
RCSR = RCSR_HWR | RCSR_SWR | RCSR_WDR | RCSR_SMR;
/* set resume return address */
- PSPR = virt_to_phys(sa1100_cpu_resume);
+ PSPR = virt_to_phys(cpu_resume);
/* go zzz */
- sa1100_cpu_suspend();
+ sa1100_cpu_suspend(PLAT_PHYS_OFFSET - PAGE_OFFSET);
cpu_init();
@@ -115,11 +114,6 @@ static int sa11x0_pm_enter(suspend_state_t state)
return 0;
}
-unsigned long sleep_phys_sp(void *sp)
-{
- return virt_to_phys(sp);
-}
-
static const struct platform_suspend_ops sa11x0_pm_ops = {
.enter = sa11x0_pm_enter,
.valid = suspend_valid_only_mem,
diff --git a/arch/arm/mach-sa1100/sleep.S b/arch/arm/mach-sa1100/sleep.S
index 80f31bad707c..04f2a618d4ef 100644
--- a/arch/arm/mach-sa1100/sleep.S
+++ b/arch/arm/mach-sa1100/sleep.S
@@ -20,12 +20,7 @@
#include <asm/assembler.h>
#include <mach/hardware.h>
-
-
.text
-
-
-
/*
* sa1100_cpu_suspend()
*
@@ -34,27 +29,10 @@
*/
ENTRY(sa1100_cpu_suspend)
-
stmfd sp!, {r4 - r12, lr} @ save registers on stack
-
- @ get coprocessor registers
- mrc p15, 0, r4, c3, c0, 0 @ domain ID
- mrc p15, 0, r5, c2, c0, 0 @ translation table base addr
- mrc p15, 0, r6, c13, c0, 0 @ PID
- mrc p15, 0, r7, c1, c0, 0 @ control reg
-
- @ store them plus current virtual stack ptr on stack
- mov r8, sp
- stmfd sp!, {r4 - r8}
-
- @ preserve phys address of stack
- mov r0, sp
- bl sleep_phys_sp
- ldr r1, =sleep_save_sp
- str r0, [r1]
-
- @ clean data cache and invalidate WB
- bl v4wb_flush_kern_cache_all
+ mov r1, r0
+ ldr r3, =sa1100_cpu_resume @ return function
+ bl cpu_suspend
@ disable clock switching
mcr p15, 0, r1, c15, c2, 2
@@ -166,50 +144,8 @@ sa1110_sdram_controller_fix:
* cpu_sa1100_resume()
*
* entry point from bootloader into kernel during resume
- *
- * Note: Yes, part of the following code is located into the .data section.
- * This is to allow sleep_save_sp to be accessed with a relative load
- * while we can't rely on any MMU translation. We could have put
- * sleep_save_sp in the .text section as well, but some setups might
- * insist on it to be truly read-only.
*/
-
- .data
- .align 5
-ENTRY(sa1100_cpu_resume)
- mov r0, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
- msr cpsr_c, r0 @ set SVC, irqs off
-
- ldr r0, sleep_save_sp @ stack phys addr
- ldr r2, =resume_after_mmu @ its absolute virtual address
- ldmfd r0, {r4 - r7, sp} @ CP regs + virt stack ptr
-
- mov r1, #0
- mcr p15, 0, r1, c8, c7, 0 @ flush I+D TLBs
- mcr p15, 0, r1, c7, c7, 0 @ flush I&D cache
- mcr p15, 0, r1, c9, c0, 0 @ invalidate RB
- mcr p15, 0, r1, c9, c0, 5 @ allow user space to use RB
-
- mcr p15, 0, r4, c3, c0, 0 @ domain ID
- mcr p15, 0, r5, c2, c0, 0 @ translation table base addr
- mcr p15, 0, r6, c13, c0, 0 @ PID
- b resume_turn_on_mmu @ cache align execution
-
.align 5
-resume_turn_on_mmu:
- mcr p15, 0, r7, c1, c0, 0 @ turn on MMU, caches, etc.
- nop
- mov pc, r2 @ jump to virtual addr
- nop
- nop
- nop
-
-sleep_save_sp:
- .word 0 @ preserve stack phys ptr here
-
- .text
-resume_after_mmu:
+sa1100_cpu_resume:
mcr p15, 0, r1, c15, c1, 2 @ enable clock switching
ldmfd sp!, {r4 - r12, pc} @ return to caller
-
-
diff --git a/arch/arm/mach-shark/include/mach/memory.h b/arch/arm/mach-shark/include/mach/memory.h
index d9c4812f1c31..9afb17000008 100644
--- a/arch/arm/mach-shark/include/mach/memory.h
+++ b/arch/arm/mach-shark/include/mach/memory.h
@@ -15,7 +15,7 @@
/*
* Physical DRAM offset.
*/
-#define PHYS_OFFSET UL(0x08000000)
+#define PLAT_PHYS_OFFSET UL(0x08000000)
#ifndef __ASSEMBLY__
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
index 2123b96b5638..3e6f0aab460b 100644
--- a/arch/arm/mach-shmobile/board-ag5evm.c
+++ b/arch/arm/mach-shmobile/board-ag5evm.c
@@ -119,13 +119,6 @@ static struct platform_device keysc_device = {
};
/* FSI A */
-static struct sh_fsi_platform_info fsi_info = {
- .porta_flags = SH_FSI_OUT_SLAVE_MODE |
- SH_FSI_IN_SLAVE_MODE |
- SH_FSI_OFMT(I2S) |
- SH_FSI_IFMT(I2S),
-};
-
static struct resource fsi_resources[] = {
[0] = {
.name = "FSI",
@@ -144,9 +137,6 @@ static struct platform_device fsi_device = {
.id = -1,
.num_resources = ARRAY_SIZE(fsi_resources),
.resource = fsi_resources,
- .dev = {
- .platform_data = &fsi_info,
- },
};
static struct resource sh_mmcif_resources[] = {
@@ -454,6 +444,7 @@ static void __init ag5evm_init(void)
gpio_direction_output(GPIO_PORT217, 0);
mdelay(1);
gpio_set_value(GPIO_PORT217, 1);
+ mdelay(100);
/* LCD backlight controller */
gpio_request(GPIO_PORT235, NULL); /* RESET */
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index 3cf0951caa2d..17f528a76a1c 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -673,16 +673,12 @@ static int fsi_set_rate(struct device *dev, int is_porta, int rate, int enable)
}
static struct sh_fsi_platform_info fsi_info = {
- .porta_flags = SH_FSI_BRS_INV |
- SH_FSI_OUT_SLAVE_MODE |
- SH_FSI_IN_SLAVE_MODE |
- SH_FSI_OFMT(PCM) |
- SH_FSI_IFMT(PCM),
+ .porta_flags = SH_FSI_BRS_INV,
.portb_flags = SH_FSI_BRS_INV |
SH_FSI_BRM_INV |
SH_FSI_LRS_INV |
- SH_FSI_OFMT(SPDIF),
+ SH_FSI_FMT_SPDIF,
.set_rate = fsi_set_rate,
};
@@ -783,6 +779,10 @@ static struct platform_device hdmi_device = {
},
};
+static struct platform_device fsi_hdmi_device = {
+ .name = "sh_fsi2_b_hdmi",
+};
+
static long ap4evb_clk_optimize(unsigned long target, unsigned long *best_freq,
unsigned long *parent_freq)
{
@@ -936,6 +936,7 @@ static struct platform_device *ap4evb_devices[] __initdata = {
&usb1_host_device,
&fsi_device,
&fsi_ak4643_device,
+ &fsi_hdmi_device,
&sh_mmcif_device,
&lcdc1_device,
&lcdc_device,
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index fb4213a4e15a..2fa1d299d948 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -399,6 +399,10 @@ static struct platform_device hdmi_device = {
},
};
+static struct platform_device fsi_hdmi_device = {
+ .name = "sh_fsi2_b_hdmi",
+};
+
static int __init hdmi_init_pm_clock(void)
{
struct clk *hdmi_ick = clk_get(&hdmi_device.dev, "ick");
@@ -609,16 +613,12 @@ fsi_set_rate_end:
}
static struct sh_fsi_platform_info fsi_info = {
- .porta_flags = SH_FSI_BRS_INV |
- SH_FSI_OUT_SLAVE_MODE |
- SH_FSI_IN_SLAVE_MODE |
- SH_FSI_OFMT(PCM) |
- SH_FSI_IFMT(PCM),
+ .porta_flags = SH_FSI_BRS_INV,
.portb_flags = SH_FSI_BRS_INV |
SH_FSI_BRM_INV |
SH_FSI_LRS_INV |
- SH_FSI_OFMT(SPDIF),
+ SH_FSI_FMT_SPDIF,
.set_rate = fsi_set_rate,
};
@@ -921,6 +921,7 @@ static struct platform_device *mackerel_devices[] __initdata = {
&leds_device,
&fsi_device,
&fsi_ak4643_device,
+ &fsi_hdmi_device,
&sdhi0_device,
#if !defined(CONFIG_MMC_SH_MMCIF)
&sdhi1_device,
diff --git a/arch/arm/mach-shmobile/clock-sh73a0.c b/arch/arm/mach-shmobile/clock-sh73a0.c
index ddd4a1b775f0..7e58904c1c8c 100644
--- a/arch/arm/mach-shmobile/clock-sh73a0.c
+++ b/arch/arm/mach-shmobile/clock-sh73a0.c
@@ -263,7 +263,7 @@ static struct clk div6_clks[DIV6_NR] = {
};
enum { MSTP001,
- MSTP125, MSTP118, MSTP116, MSTP100,
+ MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, MSTP118, MSTP116, MSTP100,
MSTP219,
MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
MSTP331, MSTP329, MSTP325, MSTP323, MSTP312,
@@ -275,6 +275,10 @@ enum { MSTP001,
static struct clk mstp_clks[MSTP_NR] = {
[MSTP001] = MSTP(&div4_clks[DIV4_HP], SMSTPCR0, 1, 0), /* IIC2 */
+ [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* CEU1 */
+ [MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* CSI2-RX1 */
+ [MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU0 */
+ [MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2-RX0 */
[MSTP125] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */
[MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX0 */
[MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */
@@ -306,6 +310,9 @@ static struct clk_lookup lookups[] = {
CLKDEV_CON_ID("r_clk", &r_clk),
/* DIV6 clocks */
+ CLKDEV_CON_ID("vck1_clk", &div6_clks[DIV6_VCK1]),
+ CLKDEV_CON_ID("vck2_clk", &div6_clks[DIV6_VCK2]),
+ CLKDEV_CON_ID("vck3_clk", &div6_clks[DIV6_VCK3]),
CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]),
CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]),
CLKDEV_ICK_ID("dsi0p_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]),
@@ -313,11 +320,15 @@ static struct clk_lookup lookups[] = {
/* MSTP32 clocks */
CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* I2C2 */
- CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */
+ CLKDEV_DEV_ID("sh_mobile_ceu.1", &mstp_clks[MSTP129]), /* CEU1 */
+ CLKDEV_DEV_ID("sh-mobile-csi2.1", &mstp_clks[MSTP128]), /* CSI2-RX1 */
+ CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU0 */
+ CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2-RX0 */
CLKDEV_DEV_ID("sh_tmu.0", &mstp_clks[MSTP125]), /* TMU00 */
CLKDEV_DEV_ID("sh_tmu.1", &mstp_clks[MSTP125]), /* TMU01 */
- CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */
CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */
+ CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */
+ CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */
CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */
CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */
diff --git a/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt b/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt
index efd3687ba190..3029aba38688 100644
--- a/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt
+++ b/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt
@@ -6,13 +6,10 @@ LIST "RWT Setting"
EW 0xE6020004, 0xA500
EW 0xE6030004, 0xA500
-DD 0x01001000, 0x01001000
-
LIST "GPIO Setting"
EB 0xE6051013, 0xA2
LIST "CPG"
-ED 0xE6150080, 0x00000180
ED 0xE61500C0, 0x00000002
WAIT 1, 0xFE40009C
@@ -37,6 +34,9 @@ ED 0xE615002C, 0x93000040
WAIT 1, 0xFE40009C
+LIST "SUB/USBClk"
+ED 0xE6150080, 0x00000180
+
LIST "BSC"
ED 0xFEC10000, 0x00E0001B
@@ -53,7 +53,7 @@ ED 0xFE400048, 0x20C18505
ED 0xFE40004C, 0x00110209
ED 0xFE400010, 0x00000087
-WAIT 10, 0xFE40009C
+WAIT 30, 0xFE40009C
ED 0xFE400084, 0x0000003F
EB 0xFE500000, 0x00
@@ -84,7 +84,7 @@ ED 0xE6150004, 0x80331050
WAIT 1, 0xFE40009C
-ED 0xE6150354, 0x00000002
+ED 0xFE400354, 0x01AD8002
LIST "SCIF0 - Serial port for earlyprintk"
EB 0xE6053098, 0x11
diff --git a/arch/arm/mach-shmobile/include/mach/head-mackerel.txt b/arch/arm/mach-shmobile/include/mach/head-mackerel.txt
index efd3687ba190..3029aba38688 100644
--- a/arch/arm/mach-shmobile/include/mach/head-mackerel.txt
+++ b/arch/arm/mach-shmobile/include/mach/head-mackerel.txt
@@ -6,13 +6,10 @@ LIST "RWT Setting"
EW 0xE6020004, 0xA500
EW 0xE6030004, 0xA500
-DD 0x01001000, 0x01001000
-
LIST "GPIO Setting"
EB 0xE6051013, 0xA2
LIST "CPG"
-ED 0xE6150080, 0x00000180
ED 0xE61500C0, 0x00000002
WAIT 1, 0xFE40009C
@@ -37,6 +34,9 @@ ED 0xE615002C, 0x93000040
WAIT 1, 0xFE40009C
+LIST "SUB/USBClk"
+ED 0xE6150080, 0x00000180
+
LIST "BSC"
ED 0xFEC10000, 0x00E0001B
@@ -53,7 +53,7 @@ ED 0xFE400048, 0x20C18505
ED 0xFE40004C, 0x00110209
ED 0xFE400010, 0x00000087
-WAIT 10, 0xFE40009C
+WAIT 30, 0xFE40009C
ED 0xFE400084, 0x0000003F
EB 0xFE500000, 0x00
@@ -84,7 +84,7 @@ ED 0xE6150004, 0x80331050
WAIT 1, 0xFE40009C
-ED 0xE6150354, 0x00000002
+ED 0xFE400354, 0x01AD8002
LIST "SCIF0 - Serial port for earlyprintk"
EB 0xE6053098, 0x11
diff --git a/arch/arm/mach-shmobile/include/mach/memory.h b/arch/arm/mach-shmobile/include/mach/memory.h
index 377584e57e03..ad00c3c258f4 100644
--- a/arch/arm/mach-shmobile/include/mach/memory.h
+++ b/arch/arm/mach-shmobile/include/mach/memory.h
@@ -1,7 +1,7 @@
#ifndef __ASM_MACH_MEMORY_H
#define __ASM_MACH_MEMORY_H
-#define PHYS_OFFSET UL(CONFIG_MEMORY_START)
+#define PLAT_PHYS_OFFSET UL(CONFIG_MEMORY_START)
#define MEM_SIZE UL(CONFIG_MEMORY_SIZE)
/* DMA memory at 0xf6000000 - 0xffdfffff */
diff --git a/arch/arm/mach-shmobile/include/mach/mmcif-ap4eb.h b/arch/arm/mach-shmobile/include/mach/mmcif-ap4eb.h
new file mode 100644
index 000000000000..a8d02be8d2b6
--- /dev/null
+++ b/arch/arm/mach-shmobile/include/mach/mmcif-ap4eb.h
@@ -0,0 +1,29 @@
+#ifndef MMCIF_AP4EB_H
+#define MMCIF_AP4EB_H
+
+#define PORT185CR (void __iomem *)0xe60520b9
+#define PORT186CR (void __iomem *)0xe60520ba
+#define PORT187CR (void __iomem *)0xe60520bb
+#define PORT188CR (void __iomem *)0xe60520bc
+
+#define PORTR191_160DR (void __iomem *)0xe6056014
+
+static inline void mmcif_init_progress(void)
+{
+ /* Initialise LEDS1-4
+ * registers: PORT185CR-PORT188CR (LED1-LED4 Control)
+ * value: 0x10 - enable output
+ */
+ __raw_writeb(0x10, PORT185CR);
+ __raw_writeb(0x10, PORT186CR);
+ __raw_writeb(0x10, PORT187CR);
+ __raw_writeb(0x10, PORT188CR);
+}
+
+static inline void mmcif_update_progress(int n)
+{
+ __raw_writel((__raw_readl(PORTR191_160DR) & ~(0xf << 25)) |
+ (1 << (25 + n)), PORTR191_160DR);
+}
+
+#endif /* MMCIF_AP4EB_H */
diff --git a/arch/arm/mach-shmobile/include/mach/mmcif-mackerel.h b/arch/arm/mach-shmobile/include/mach/mmcif-mackerel.h
new file mode 100644
index 000000000000..4b4f6949a868
--- /dev/null
+++ b/arch/arm/mach-shmobile/include/mach/mmcif-mackerel.h
@@ -0,0 +1,39 @@
+#ifndef MMCIF_MACKEREL_H
+#define MMCIF_MACKEREL_H
+
+#define PORT0CR (void __iomem *)0xe6051000
+#define PORT1CR (void __iomem *)0xe6051001
+#define PORT2CR (void __iomem *)0xe6051002
+#define PORT159CR (void __iomem *)0xe605009f
+
+#define PORTR031_000DR (void __iomem *)0xe6055000
+#define PORTL159_128DR (void __iomem *)0xe6054010
+
+static inline void mmcif_init_progress(void)
+{
+ /* Initialise LEDS0-3
+ * registers: PORT0CR-PORT2CR,PORT159CR (LED0-LED3 Control)
+ * value: 0x10 - enable output
+ */
+ __raw_writeb(0x10, PORT0CR);
+ __raw_writeb(0x10, PORT1CR);
+ __raw_writeb(0x10, PORT2CR);
+ __raw_writeb(0x10, PORT159CR);
+}
+
+static inline void mmcif_update_progress(int n)
+{
+ unsigned a = 0, b = 0;
+
+ if (n < 3)
+ a = 1 << n;
+ else
+ b = 1 << 31;
+
+ __raw_writel((__raw_readl(PORTR031_000DR) & ~0x7) | a,
+ PORTR031_000DR);
+ __raw_writel((__raw_readl(PORTL159_128DR) & ~(1 << 31)) | b,
+ PORTL159_128DR);
+}
+
+#endif /* MMCIF_MACKEREL_H */
diff --git a/arch/arm/mach-shmobile/include/mach/mmcif.h b/arch/arm/mach-shmobile/include/mach/mmcif.h
new file mode 100644
index 000000000000..f4dc3279cf03
--- /dev/null
+++ b/arch/arm/mach-shmobile/include/mach/mmcif.h
@@ -0,0 +1,18 @@
+#ifndef MMCIF_H
+#define MMCIF_H
+
+/**************************************************
+ *
+ * board specific settings
+ *
+ **************************************************/
+
+#ifdef CONFIG_MACH_AP4EVB
+#include "mach/mmcif-ap4eb.h"
+#elif CONFIG_MACH_MACKEREL
+#include "mach/mmcif-mackerel.h"
+#else
+#error "unsupported board."
+#endif
+
+#endif /* MMCIF_H */
diff --git a/arch/arm/mach-shmobile/localtimer.c b/arch/arm/mach-shmobile/localtimer.c
index 2111c28b724e..ad9ccc9900c8 100644
--- a/arch/arm/mach-shmobile/localtimer.c
+++ b/arch/arm/mach-shmobile/localtimer.c
@@ -18,8 +18,9 @@
/*
* Setup the local clock events for a CPU.
*/
-void __cpuinit local_timer_setup(struct clock_event_device *evt)
+int __cpuinit local_timer_setup(struct clock_event_device *evt)
{
evt->irq = 29;
twd_timer_setup(evt);
+ return 0;
}
diff --git a/arch/arm/mach-spear3xx/clock.c b/arch/arm/mach-spear3xx/clock.c
index 18febf92f20a..8ae4ad0326a9 100644
--- a/arch/arm/mach-spear3xx/clock.c
+++ b/arch/arm/mach-spear3xx/clock.c
@@ -39,18 +39,43 @@ static struct clk rtc_clk = {
};
/* clock derived from 24 MHz osc clk */
+/* pll masks structure */
+static struct pll_clk_masks pll1_masks = {
+ .mode_mask = PLL_MODE_MASK,
+ .mode_shift = PLL_MODE_SHIFT,
+ .norm_fdbk_m_mask = PLL_NORM_FDBK_M_MASK,
+ .norm_fdbk_m_shift = PLL_NORM_FDBK_M_SHIFT,
+ .dith_fdbk_m_mask = PLL_DITH_FDBK_M_MASK,
+ .dith_fdbk_m_shift = PLL_DITH_FDBK_M_SHIFT,
+ .div_p_mask = PLL_DIV_P_MASK,
+ .div_p_shift = PLL_DIV_P_SHIFT,
+ .div_n_mask = PLL_DIV_N_MASK,
+ .div_n_shift = PLL_DIV_N_SHIFT,
+};
+
/* pll1 configuration structure */
static struct pll_clk_config pll1_config = {
.mode_reg = PLL1_CTR,
.cfg_reg = PLL1_FRQ,
+ .masks = &pll1_masks,
+};
+
+/* pll rate configuration table, in ascending order of rates */
+struct pll_rate_tbl pll_rtbl[] = {
+ {.mode = 0, .m = 0x85, .n = 0x0C, .p = 0x1}, /* 266 MHz */
+ {.mode = 0, .m = 0xA6, .n = 0x0C, .p = 0x1}, /* 332 MHz */
};
/* PLL1 clock */
static struct clk pll1_clk = {
+ .flags = ENABLED_ON_INIT,
.pclk = &osc_24m_clk,
.en_reg = PLL1_CTR,
.en_reg_bit = PLL_ENABLE,
- .recalc = &pll1_clk_recalc,
+ .calc_rate = &pll_calc_rate,
+ .recalc = &pll_clk_recalc,
+ .set_rate = &pll_clk_set_rate,
+ .rate_config = {pll_rtbl, ARRAY_SIZE(pll_rtbl), 1},
.private_data = &pll1_config,
};
@@ -76,36 +101,83 @@ static struct clk cpu_clk = {
.recalc = &follow_parent,
};
+/* ahb masks structure */
+static struct bus_clk_masks ahb_masks = {
+ .mask = PLL_HCLK_RATIO_MASK,
+ .shift = PLL_HCLK_RATIO_SHIFT,
+};
+
/* ahb configuration structure */
static struct bus_clk_config ahb_config = {
.reg = CORE_CLK_CFG,
- .mask = PLL_HCLK_RATIO_MASK,
- .shift = PLL_HCLK_RATIO_SHIFT,
+ .masks = &ahb_masks,
+};
+
+/* ahb rate configuration table, in ascending order of rates */
+struct bus_rate_tbl bus_rtbl[] = {
+ {.div = 3}, /* == parent divided by 4 */
+ {.div = 2}, /* == parent divided by 3 */
+ {.div = 1}, /* == parent divided by 2 */
+ {.div = 0}, /* == parent divided by 1 */
};
/* ahb clock */
static struct clk ahb_clk = {
.flags = ALWAYS_ENABLED,
.pclk = &pll1_clk,
+ .calc_rate = &bus_calc_rate,
.recalc = &bus_clk_recalc,
+ .set_rate = &bus_clk_set_rate,
+ .rate_config = {bus_rtbl, ARRAY_SIZE(bus_rtbl), 2},
.private_data = &ahb_config,
};
-/* uart configurations */
-static struct aux_clk_config uart_config = {
+/* auxiliary synthesizers masks */
+static struct aux_clk_masks aux_masks = {
+ .eq_sel_mask = AUX_EQ_SEL_MASK,
+ .eq_sel_shift = AUX_EQ_SEL_SHIFT,
+ .eq1_mask = AUX_EQ1_SEL,
+ .eq2_mask = AUX_EQ2_SEL,
+ .xscale_sel_mask = AUX_XSCALE_MASK,
+ .xscale_sel_shift = AUX_XSCALE_SHIFT,
+ .yscale_sel_mask = AUX_YSCALE_MASK,
+ .yscale_sel_shift = AUX_YSCALE_SHIFT,
+};
+
+/* uart synth configurations */
+static struct aux_clk_config uart_synth_config = {
.synth_reg = UART_CLK_SYNT,
+ .masks = &aux_masks,
+};
+
+/* aux rate configuration table, in ascending order of rates */
+struct aux_rate_tbl aux_rtbl[] = {
+ /* For PLL1 = 332 MHz */
+ {.xscale = 1, .yscale = 8, .eq = 1}, /* 41.5 MHz */
+ {.xscale = 1, .yscale = 4, .eq = 1}, /* 83 MHz */
+ {.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */
+};
+
+/* uart synth clock */
+static struct clk uart_synth_clk = {
+ .en_reg = UART_CLK_SYNT,
+ .en_reg_bit = AUX_SYNT_ENB,
+ .pclk = &pll1_clk,
+ .calc_rate = &aux_calc_rate,
+ .recalc = &aux_clk_recalc,
+ .set_rate = &aux_clk_set_rate,
+ .rate_config = {aux_rtbl, ARRAY_SIZE(aux_rtbl), 1},
+ .private_data = &uart_synth_config,
};
/* uart parents */
static struct pclk_info uart_pclk_info[] = {
{
- .pclk = &pll1_clk,
- .pclk_mask = AUX_CLK_PLL1_MASK,
- .scalable = 1,
+ .pclk = &uart_synth_clk,
+ .pclk_val = AUX_CLK_PLL1_VAL,
}, {
.pclk = &pll3_48m_clk,
- .pclk_mask = AUX_CLK_PLL3_MASK,
- .scalable = 0,
+ .pclk_val = AUX_CLK_PLL3_VAL,
},
};
@@ -123,25 +195,35 @@ static struct clk uart_clk = {
.en_reg_bit = UART_CLK_ENB,
.pclk_sel = &uart_pclk_sel,
.pclk_sel_shift = UART_CLK_SHIFT,
- .recalc = &aux_clk_recalc,
- .private_data = &uart_config,
+ .recalc = &follow_parent,
};
/* firda configurations */
-static struct aux_clk_config firda_config = {
+static struct aux_clk_config firda_synth_config = {
.synth_reg = FIRDA_CLK_SYNT,
+ .masks = &aux_masks,
+};
+
+/* firda synth clock */
+static struct clk firda_synth_clk = {
+ .en_reg = FIRDA_CLK_SYNT,
+ .en_reg_bit = AUX_SYNT_ENB,
+ .pclk = &pll1_clk,
+ .calc_rate = &aux_calc_rate,
+ .recalc = &aux_clk_recalc,
+ .set_rate = &aux_clk_set_rate,
+ .rate_config = {aux_rtbl, ARRAY_SIZE(aux_rtbl), 1},
+ .private_data = &firda_synth_config,
};
/* firda parents */
static struct pclk_info firda_pclk_info[] = {
{
- .pclk = &pll1_clk,
- .pclk_mask = AUX_CLK_PLL1_MASK,
- .scalable = 1,
+ .pclk = &firda_synth_clk,
+ .pclk_val = AUX_CLK_PLL1_VAL,
}, {
.pclk = &pll3_48m_clk,
- .pclk_mask = AUX_CLK_PLL3_MASK,
- .scalable = 0,
+ .pclk_val = AUX_CLK_PLL3_VAL,
},
};
@@ -159,73 +241,155 @@ static struct clk firda_clk = {
.en_reg_bit = FIRDA_CLK_ENB,
.pclk_sel = &firda_pclk_sel,
.pclk_sel_shift = FIRDA_CLK_SHIFT,
- .recalc = &aux_clk_recalc,
- .private_data = &firda_config,
+ .recalc = &follow_parent,
+};
+
+/* gpt synthesizer masks */
+static struct gpt_clk_masks gpt_masks = {
+ .mscale_sel_mask = GPT_MSCALE_MASK,
+ .mscale_sel_shift = GPT_MSCALE_SHIFT,
+ .nscale_sel_mask = GPT_NSCALE_MASK,
+ .nscale_sel_shift = GPT_NSCALE_SHIFT,
+};
+
+/* gpt rate configuration table, in ascending order of rates */
+struct gpt_rate_tbl gpt_rtbl[] = {
+ /* For pll1 = 332 MHz */
+ {.mscale = 4, .nscale = 0}, /* 41.5 MHz */
+ {.mscale = 2, .nscale = 0}, /* 55.3 MHz */
+ {.mscale = 1, .nscale = 0}, /* 83 MHz */
+};
+
+/* gpt0 synth clk config*/
+static struct gpt_clk_config gpt0_synth_config = {
+ .synth_reg = PRSC1_CLK_CFG,
+ .masks = &gpt_masks,
+};
+
+/* gpt synth clock */
+static struct clk gpt0_synth_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &pll1_clk,
+ .calc_rate = &gpt_calc_rate,
+ .recalc = &gpt_clk_recalc,
+ .set_rate = &gpt_clk_set_rate,
+ .rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
+ .private_data = &gpt0_synth_config,
};
/* gpt parents */
-static struct pclk_info gpt_pclk_info[] = {
+static struct pclk_info gpt0_pclk_info[] = {
{
- .pclk = &pll1_clk,
- .pclk_mask = AUX_CLK_PLL1_MASK,
- .scalable = 1,
+ .pclk = &gpt0_synth_clk,
+ .pclk_val = AUX_CLK_PLL1_VAL,
}, {
.pclk = &pll3_48m_clk,
- .pclk_mask = AUX_CLK_PLL3_MASK,
- .scalable = 0,
+ .pclk_val = AUX_CLK_PLL3_VAL,
},
};
/* gpt parent select structure */
-static struct pclk_sel gpt_pclk_sel = {
- .pclk_info = gpt_pclk_info,
- .pclk_count = ARRAY_SIZE(gpt_pclk_info),
+static struct pclk_sel gpt0_pclk_sel = {
+ .pclk_info = gpt0_pclk_info,
+ .pclk_count = ARRAY_SIZE(gpt0_pclk_info),
.pclk_sel_reg = PERIP_CLK_CFG,
.pclk_sel_mask = GPT_CLK_MASK,
};
-/* gpt0 configurations */
-static struct aux_clk_config gpt0_config = {
- .synth_reg = PRSC1_CLK_CFG,
-};
-
/* gpt0 timer clock */
static struct clk gpt0_clk = {
.flags = ALWAYS_ENABLED,
- .pclk_sel = &gpt_pclk_sel,
+ .pclk_sel = &gpt0_pclk_sel,
.pclk_sel_shift = GPT0_CLK_SHIFT,
- .recalc = &gpt_clk_recalc,
- .private_data = &gpt0_config,
+ .recalc = &follow_parent,
};
-/* gpt1 configurations */
-static struct aux_clk_config gpt1_config = {
+/* gpt1 synth clk configurations */
+static struct gpt_clk_config gpt1_synth_config = {
.synth_reg = PRSC2_CLK_CFG,
+ .masks = &gpt_masks,
+};
+
+/* gpt1 synth clock */
+static struct clk gpt1_synth_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &pll1_clk,
+ .calc_rate = &gpt_calc_rate,
+ .recalc = &gpt_clk_recalc,
+ .set_rate = &gpt_clk_set_rate,
+ .rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
+ .private_data = &gpt1_synth_config,
+};
+
+static struct pclk_info gpt1_pclk_info[] = {
+ {
+ .pclk = &gpt1_synth_clk,
+ .pclk_val = AUX_CLK_PLL1_VAL,
+ }, {
+ .pclk = &pll3_48m_clk,
+ .pclk_val = AUX_CLK_PLL3_VAL,
+ },
+};
+
+/* gpt parent select structure */
+static struct pclk_sel gpt1_pclk_sel = {
+ .pclk_info = gpt1_pclk_info,
+ .pclk_count = ARRAY_SIZE(gpt1_pclk_info),
+ .pclk_sel_reg = PERIP_CLK_CFG,
+ .pclk_sel_mask = GPT_CLK_MASK,
};
/* gpt1 timer clock */
static struct clk gpt1_clk = {
.en_reg = PERIP1_CLK_ENB,
.en_reg_bit = GPT1_CLK_ENB,
- .pclk_sel = &gpt_pclk_sel,
+ .pclk_sel = &gpt1_pclk_sel,
.pclk_sel_shift = GPT1_CLK_SHIFT,
- .recalc = &gpt_clk_recalc,
- .private_data = &gpt1_config,
+ .recalc = &follow_parent,
};
-/* gpt2 configurations */
-static struct aux_clk_config gpt2_config = {
+/* gpt2 synth clk configurations */
+static struct gpt_clk_config gpt2_synth_config = {
.synth_reg = PRSC3_CLK_CFG,
+ .masks = &gpt_masks,
+};
+
+/* gpt1 synth clock */
+static struct clk gpt2_synth_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &pll1_clk,
+ .calc_rate = &gpt_calc_rate,
+ .recalc = &gpt_clk_recalc,
+ .set_rate = &gpt_clk_set_rate,
+ .rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
+ .private_data = &gpt2_synth_config,
+};
+
+static struct pclk_info gpt2_pclk_info[] = {
+ {
+ .pclk = &gpt2_synth_clk,
+ .pclk_val = AUX_CLK_PLL1_VAL,
+ }, {
+ .pclk = &pll3_48m_clk,
+ .pclk_val = AUX_CLK_PLL3_VAL,
+ },
+};
+
+/* gpt parent select structure */
+static struct pclk_sel gpt2_pclk_sel = {
+ .pclk_info = gpt2_pclk_info,
+ .pclk_count = ARRAY_SIZE(gpt2_pclk_info),
+ .pclk_sel_reg = PERIP_CLK_CFG,
+ .pclk_sel_mask = GPT_CLK_MASK,
};
/* gpt2 timer clock */
static struct clk gpt2_clk = {
.en_reg = PERIP1_CLK_ENB,
.en_reg_bit = GPT2_CLK_ENB,
- .pclk_sel = &gpt_pclk_sel,
+ .pclk_sel = &gpt2_pclk_sel,
.pclk_sel_shift = GPT2_CLK_SHIFT,
- .recalc = &gpt_clk_recalc,
- .private_data = &gpt2_config,
+ .recalc = &follow_parent,
};
/* clock derived from pll3 clk */
@@ -245,26 +409,27 @@ static struct clk usbd_clk = {
.recalc = &follow_parent,
};
-/* clcd clock */
-static struct clk clcd_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &pll3_48m_clk,
- .recalc = &follow_parent,
+/* clock derived from ahb clk */
+/* apb masks structure */
+static struct bus_clk_masks apb_masks = {
+ .mask = HCLK_PCLK_RATIO_MASK,
+ .shift = HCLK_PCLK_RATIO_SHIFT,
};
-/* clock derived from ahb clk */
/* apb configuration structure */
static struct bus_clk_config apb_config = {
.reg = CORE_CLK_CFG,
- .mask = HCLK_PCLK_RATIO_MASK,
- .shift = HCLK_PCLK_RATIO_SHIFT,
+ .masks = &apb_masks,
};
/* apb clock */
static struct clk apb_clk = {
.flags = ALWAYS_ENABLED,
.pclk = &ahb_clk,
+ .calc_rate = &bus_calc_rate,
.recalc = &bus_clk_recalc,
+ .set_rate = &bus_clk_set_rate,
+ .rate_config = {bus_rtbl, ARRAY_SIZE(bus_rtbl), 2},
.private_data = &apb_config,
};
@@ -325,8 +490,17 @@ static struct clk adc_clk = {
.recalc = &follow_parent,
};
+#if defined(CONFIG_MACH_SPEAR310) || defined(CONFIG_MACH_SPEAR320)
+/* emi clock */
+static struct clk emi_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &ahb_clk,
+ .recalc = &follow_parent,
+};
+#endif
+
/* ssp clock */
-static struct clk ssp_clk = {
+static struct clk ssp0_clk = {
.pclk = &apb_clk,
.en_reg = PERIP1_CLK_ENB,
.en_reg_bit = SSP_CLK_ENB,
@@ -343,6 +517,137 @@ static struct clk gpio_clk = {
static struct clk dummy_apb_pclk;
+#if defined(CONFIG_MACH_SPEAR300) || defined(CONFIG_MACH_SPEAR310) || \
+ defined(CONFIG_MACH_SPEAR320)
+/* fsmc clock */
+static struct clk fsmc_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &ahb_clk,
+ .recalc = &follow_parent,
+};
+#endif
+
+/* common clocks to spear310 and spear320 */
+#if defined(CONFIG_MACH_SPEAR310) || defined(CONFIG_MACH_SPEAR320)
+/* uart1 clock */
+static struct clk uart1_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &apb_clk,
+ .recalc = &follow_parent,
+};
+
+/* uart2 clock */
+static struct clk uart2_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &apb_clk,
+ .recalc = &follow_parent,
+};
+#endif /* CONFIG_MACH_SPEAR310 || CONFIG_MACH_SPEAR320 */
+
+/* common clocks to spear300 and spear320 */
+#if defined(CONFIG_MACH_SPEAR300) || defined(CONFIG_MACH_SPEAR320)
+/* clcd clock */
+static struct clk clcd_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &pll3_48m_clk,
+ .recalc = &follow_parent,
+};
+
+/* sdhci clock */
+static struct clk sdhci_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &ahb_clk,
+ .recalc = &follow_parent,
+};
+#endif /* CONFIG_MACH_SPEAR300 || CONFIG_MACH_SPEAR320 */
+
+/* spear300 machine specific clock structures */
+#ifdef CONFIG_MACH_SPEAR300
+/* gpio1 clock */
+static struct clk gpio1_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &apb_clk,
+ .recalc = &follow_parent,
+};
+
+/* keyboard clock */
+static struct clk kbd_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &apb_clk,
+ .recalc = &follow_parent,
+};
+
+#endif
+
+/* spear310 machine specific clock structures */
+#ifdef CONFIG_MACH_SPEAR310
+/* uart3 clock */
+static struct clk uart3_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &apb_clk,
+ .recalc = &follow_parent,
+};
+
+/* uart4 clock */
+static struct clk uart4_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &apb_clk,
+ .recalc = &follow_parent,
+};
+
+/* uart5 clock */
+static struct clk uart5_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &apb_clk,
+ .recalc = &follow_parent,
+};
+#endif
+
+/* spear320 machine specific clock structures */
+#ifdef CONFIG_MACH_SPEAR320
+/* can0 clock */
+static struct clk can0_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &apb_clk,
+ .recalc = &follow_parent,
+};
+
+/* can1 clock */
+static struct clk can1_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &apb_clk,
+ .recalc = &follow_parent,
+};
+
+/* i2c1 clock */
+static struct clk i2c1_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &ahb_clk,
+ .recalc = &follow_parent,
+};
+
+/* ssp1 clock */
+static struct clk ssp1_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &apb_clk,
+ .recalc = &follow_parent,
+};
+
+/* ssp2 clock */
+static struct clk ssp2_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &apb_clk,
+ .recalc = &follow_parent,
+};
+
+/* pwm clock */
+static struct clk pwm_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &apb_clk,
+ .recalc = &follow_parent,
+};
+#endif
+
/* array of all spear 3xx clock lookups */
static struct clk_lookup spear_clk_lookups[] = {
{ .con_id = "apb_pclk", .clk = &dummy_apb_pclk},
@@ -350,7 +655,7 @@ static struct clk_lookup spear_clk_lookups[] = {
{ .con_id = "osc_32k_clk", .clk = &osc_32k_clk},
{ .con_id = "osc_24m_clk", .clk = &osc_24m_clk},
/* clock derived from 32 KHz osc clk */
- { .dev_id = "rtc", .clk = &rtc_clk},
+ { .dev_id = "rtc-spear", .clk = &rtc_clk},
/* clock derived from 24 MHz osc clk */
{ .con_id = "pll1_clk", .clk = &pll1_clk},
{ .con_id = "pll3_48m_clk", .clk = &pll3_48m_clk},
@@ -358,18 +663,22 @@ static struct clk_lookup spear_clk_lookups[] = {
/* clock derived from pll1 clk */
{ .con_id = "cpu_clk", .clk = &cpu_clk},
{ .con_id = "ahb_clk", .clk = &ahb_clk},
+ { .con_id = "uart_synth_clk", .clk = &uart_synth_clk},
+ { .con_id = "firda_synth_clk", .clk = &firda_synth_clk},
+ { .con_id = "gpt0_synth_clk", .clk = &gpt0_synth_clk},
+ { .con_id = "gpt1_synth_clk", .clk = &gpt1_synth_clk},
+ { .con_id = "gpt2_synth_clk", .clk = &gpt2_synth_clk},
{ .dev_id = "uart", .clk = &uart_clk},
{ .dev_id = "firda", .clk = &firda_clk},
{ .dev_id = "gpt0", .clk = &gpt0_clk},
{ .dev_id = "gpt1", .clk = &gpt1_clk},
{ .dev_id = "gpt2", .clk = &gpt2_clk},
/* clock derived from pll3 clk */
- { .dev_id = "usbh", .clk = &usbh_clk},
+ { .con_id = "usbh_clk", .clk = &usbh_clk},
{ .dev_id = "usbd", .clk = &usbd_clk},
- { .dev_id = "clcd", .clk = &clcd_clk},
/* clock derived from ahb clk */
{ .con_id = "apb_clk", .clk = &apb_clk},
- { .dev_id = "i2c", .clk = &i2c_clk},
+ { .dev_id = "i2c_designware.0", .clk = &i2c_clk},
{ .dev_id = "dma", .clk = &dma_clk},
{ .dev_id = "jpeg", .clk = &jpeg_clk},
{ .dev_id = "gmac", .clk = &gmac_clk},
@@ -377,8 +686,50 @@ static struct clk_lookup spear_clk_lookups[] = {
{ .dev_id = "c3", .clk = &c3_clk},
/* clock derived from apb clk */
{ .dev_id = "adc", .clk = &adc_clk},
- { .dev_id = "ssp", .clk = &ssp_clk},
+ { .dev_id = "ssp-pl022.0", .clk = &ssp0_clk},
{ .dev_id = "gpio", .clk = &gpio_clk},
+#if defined(CONFIG_MACH_SPEAR310) || defined(CONFIG_MACH_SPEAR320)
+ { .dev_id = "physmap-flash", .clk = &emi_clk},
+#endif
+#if defined(CONFIG_MACH_SPEAR300) || defined(CONFIG_MACH_SPEAR310) || \
+ defined(CONFIG_MACH_SPEAR320)
+ { .con_id = "fsmc", .clk = &fsmc_clk},
+#endif
+
+/* common clocks to spear310 and spear320 */
+#if defined(CONFIG_MACH_SPEAR310) || defined(CONFIG_MACH_SPEAR320)
+ { .dev_id = "uart1", .clk = &uart1_clk},
+ { .dev_id = "uart2", .clk = &uart2_clk},
+#endif
+
+ /* common clock to spear300 and spear320 */
+#if defined(CONFIG_MACH_SPEAR300) || defined(CONFIG_MACH_SPEAR320)
+ { .dev_id = "clcd", .clk = &clcd_clk},
+ { .dev_id = "sdhci", .clk = &sdhci_clk},
+#endif /* CONFIG_MACH_SPEAR300 || CONFIG_MACH_SPEAR320 */
+
+ /* spear300 machine specific clock structures */
+#ifdef CONFIG_MACH_SPEAR300
+ { .dev_id = "gpio1", .clk = &gpio1_clk},
+ { .dev_id = "keyboard", .clk = &kbd_clk},
+#endif
+
+ /* spear310 machine specific clock structures */
+#ifdef CONFIG_MACH_SPEAR310
+ { .dev_id = "uart3", .clk = &uart3_clk},
+ { .dev_id = "uart4", .clk = &uart4_clk},
+ { .dev_id = "uart5", .clk = &uart5_clk},
+
+#endif
+ /* spear320 machine specific clock structures */
+#ifdef CONFIG_MACH_SPEAR320
+ { .dev_id = "c_can_platform.0", .clk = &can0_clk},
+ { .dev_id = "c_can_platform.1", .clk = &can1_clk},
+ { .dev_id = "i2c_designware.1", .clk = &i2c1_clk},
+ { .dev_id = "ssp-pl022.1", .clk = &ssp1_clk},
+ { .dev_id = "ssp-pl022.2", .clk = &ssp2_clk},
+ { .dev_id = "pwm", .clk = &pwm_clk},
+#endif
};
void __init clk_init(void)
diff --git a/arch/arm/mach-spear3xx/include/mach/generic.h b/arch/arm/mach-spear3xx/include/mach/generic.h
index af7e02c909a3..e7d2de84e9a5 100644
--- a/arch/arm/mach-spear3xx/include/mach/generic.h
+++ b/arch/arm/mach-spear3xx/include/mach/generic.h
@@ -33,14 +33,14 @@
/* Add spear3xx family device structure declarations here */
extern struct amba_device gpio_device;
extern struct amba_device uart_device;
-extern struct sys_timer spear_sys_timer;
+extern struct sys_timer spear3xx_timer;
/* Add spear3xx family function declarations here */
void __init clk_init(void);
+void __init spear_setup_timer(void);
void __init spear3xx_map_io(void);
void __init spear3xx_init_irq(void);
void __init spear3xx_init(void);
-void spear_pmx_init(struct pmx_driver *pmx_driver, uint base, uint size);
/* pad mux declarations */
#define PMX_FIRDA_MASK (1 << 14)
@@ -133,8 +133,6 @@ extern struct pmx_dev pmx_telecom_sdio_4bit;
extern struct pmx_dev pmx_telecom_sdio_8bit;
extern struct pmx_dev pmx_gpio1;
-void spear300_pmx_init(void);
-
/* Add spear300 machine function declarations here */
void __init spear300_init(void);
@@ -154,8 +152,6 @@ extern struct pmx_dev pmx_fsmc;
extern struct pmx_dev pmx_rs485_0_1;
extern struct pmx_dev pmx_tdm0;
-void spear310_pmx_init(void);
-
/* Add spear310 machine function declarations here */
void __init spear310_init(void);
@@ -195,8 +191,6 @@ extern struct pmx_dev pmx_smii0;
extern struct pmx_dev pmx_smii1;
extern struct pmx_dev pmx_i2c1;
-void spear320_pmx_init(void);
-
/* Add spear320 machine function declarations here */
void __init spear320_init(void);
diff --git a/arch/arm/mach-spear3xx/include/mach/hardware.h b/arch/arm/mach-spear3xx/include/mach/hardware.h
index 4a86e6a3c444..490e86a6b0c7 100644
--- a/arch/arm/mach-spear3xx/include/mach/hardware.h
+++ b/arch/arm/mach-spear3xx/include/mach/hardware.h
@@ -14,6 +14,8 @@
#ifndef __MACH_HARDWARE_H
#define __MACH_HARDWARE_H
+#include <plat/hardware.h>
+
/* Vitual to physical translation of statically mapped space */
#define IO_ADDRESS(x) (x | 0xF0000000)
diff --git a/arch/arm/mach-spear3xx/include/mach/misc_regs.h b/arch/arm/mach-spear3xx/include/mach/misc_regs.h
index 38d767a1aba0..0b93347c0f11 100644
--- a/arch/arm/mach-spear3xx/include/mach/misc_regs.h
+++ b/arch/arm/mach-spear3xx/include/mach/misc_regs.h
@@ -16,14 +16,14 @@
#include <mach/spear.h>
-#define MISC_BASE VA_SPEAR3XX_ICM3_MISC_REG_BASE
+#define MISC_BASE IOMEM(VA_SPEAR3XX_ICM3_MISC_REG_BASE)
-#define SOC_CFG_CTR ((unsigned int *)(MISC_BASE + 0x000))
-#define DIAG_CFG_CTR ((unsigned int *)(MISC_BASE + 0x004))
-#define PLL1_CTR ((unsigned int *)(MISC_BASE + 0x008))
-#define PLL1_FRQ ((unsigned int *)(MISC_BASE + 0x00C))
-#define PLL1_MOD ((unsigned int *)(MISC_BASE + 0x010))
-#define PLL2_CTR ((unsigned int *)(MISC_BASE + 0x014))
+#define SOC_CFG_CTR (MISC_BASE + 0x000)
+#define DIAG_CFG_CTR (MISC_BASE + 0x004)
+#define PLL1_CTR (MISC_BASE + 0x008)
+#define PLL1_FRQ (MISC_BASE + 0x00C)
+#define PLL1_MOD (MISC_BASE + 0x010)
+#define PLL2_CTR (MISC_BASE + 0x014)
/* PLL_CTR register masks */
#define PLL_ENABLE 2
#define PLL_MODE_SHIFT 4
@@ -33,7 +33,7 @@
#define PLL_MODE_DITH_DSB 2
#define PLL_MODE_DITH_SSB 3
-#define PLL2_FRQ ((unsigned int *)(MISC_BASE + 0x018))
+#define PLL2_FRQ (MISC_BASE + 0x018)
/* PLL FRQ register masks */
#define PLL_DIV_N_SHIFT 0
#define PLL_DIV_N_MASK 0xFF
@@ -44,16 +44,16 @@
#define PLL_DITH_FDBK_M_SHIFT 16
#define PLL_DITH_FDBK_M_MASK 0xFFFF
-#define PLL2_MOD ((unsigned int *)(MISC_BASE + 0x01C))
-#define PLL_CLK_CFG ((unsigned int *)(MISC_BASE + 0x020))
-#define CORE_CLK_CFG ((unsigned int *)(MISC_BASE + 0x024))
+#define PLL2_MOD (MISC_BASE + 0x01C)
+#define PLL_CLK_CFG (MISC_BASE + 0x020)
+#define CORE_CLK_CFG (MISC_BASE + 0x024)
/* CORE CLK CFG register masks */
#define PLL_HCLK_RATIO_SHIFT 10
#define PLL_HCLK_RATIO_MASK 0x3
#define HCLK_PCLK_RATIO_SHIFT 8
#define HCLK_PCLK_RATIO_MASK 0x3
-#define PERIP_CLK_CFG ((unsigned int *)(MISC_BASE + 0x028))
+#define PERIP_CLK_CFG (MISC_BASE + 0x028)
/* PERIP_CLK_CFG register masks */
#define UART_CLK_SHIFT 4
#define UART_CLK_MASK 0x1
@@ -63,10 +63,10 @@
#define GPT1_CLK_SHIFT 11
#define GPT2_CLK_SHIFT 12
#define GPT_CLK_MASK 0x1
-#define AUX_CLK_PLL3_MASK 0
-#define AUX_CLK_PLL1_MASK 1
+#define AUX_CLK_PLL3_VAL 0
+#define AUX_CLK_PLL1_VAL 1
-#define PERIP1_CLK_ENB ((unsigned int *)(MISC_BASE + 0x02C))
+#define PERIP1_CLK_ENB (MISC_BASE + 0x02C)
/* PERIP1_CLK_ENB register masks */
#define UART_CLK_ENB 3
#define SSP_CLK_ENB 5
@@ -85,34 +85,35 @@
#define USBH_CLK_ENB 25
#define C3_CLK_ENB 31
-#define SOC_CORE_ID ((unsigned int *)(MISC_BASE + 0x030))
-#define RAS_CLK_ENB ((unsigned int *)(MISC_BASE + 0x034))
-#define PERIP1_SOF_RST ((unsigned int *)(MISC_BASE + 0x038))
+#define SOC_CORE_ID (MISC_BASE + 0x030)
+#define RAS_CLK_ENB (MISC_BASE + 0x034)
+#define PERIP1_SOF_RST (MISC_BASE + 0x038)
/* PERIP1_SOF_RST register masks */
#define JPEG_SOF_RST 8
-#define SOC_USER_ID ((unsigned int *)(MISC_BASE + 0x03C))
-#define RAS_SOF_RST ((unsigned int *)(MISC_BASE + 0x040))
-#define PRSC1_CLK_CFG ((unsigned int *)(MISC_BASE + 0x044))
-#define PRSC2_CLK_CFG ((unsigned int *)(MISC_BASE + 0x048))
-#define PRSC3_CLK_CFG ((unsigned int *)(MISC_BASE + 0x04C))
+#define SOC_USER_ID (MISC_BASE + 0x03C)
+#define RAS_SOF_RST (MISC_BASE + 0x040)
+#define PRSC1_CLK_CFG (MISC_BASE + 0x044)
+#define PRSC2_CLK_CFG (MISC_BASE + 0x048)
+#define PRSC3_CLK_CFG (MISC_BASE + 0x04C)
/* gpt synthesizer register masks */
#define GPT_MSCALE_SHIFT 0
#define GPT_MSCALE_MASK 0xFFF
#define GPT_NSCALE_SHIFT 12
#define GPT_NSCALE_MASK 0xF
-#define AMEM_CLK_CFG ((unsigned int *)(MISC_BASE + 0x050))
-#define EXPI_CLK_CFG ((unsigned int *)(MISC_BASE + 0x054))
-#define CLCD_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x05C))
-#define FIRDA_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x060))
-#define UART_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x064))
-#define GMAC_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x068))
-#define RAS1_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x06C))
-#define RAS2_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x070))
-#define RAS3_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x074))
-#define RAS4_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x078))
+#define AMEM_CLK_CFG (MISC_BASE + 0x050)
+#define EXPI_CLK_CFG (MISC_BASE + 0x054)
+#define CLCD_CLK_SYNT (MISC_BASE + 0x05C)
+#define FIRDA_CLK_SYNT (MISC_BASE + 0x060)
+#define UART_CLK_SYNT (MISC_BASE + 0x064)
+#define GMAC_CLK_SYNT (MISC_BASE + 0x068)
+#define RAS1_CLK_SYNT (MISC_BASE + 0x06C)
+#define RAS2_CLK_SYNT (MISC_BASE + 0x070)
+#define RAS3_CLK_SYNT (MISC_BASE + 0x074)
+#define RAS4_CLK_SYNT (MISC_BASE + 0x078)
/* aux clk synthesiser register masks for irda to ras4 */
+#define AUX_SYNT_ENB 31
#define AUX_EQ_SEL_SHIFT 30
#define AUX_EQ_SEL_MASK 1
#define AUX_EQ1_SEL 0
@@ -122,42 +123,42 @@
#define AUX_YSCALE_SHIFT 0
#define AUX_YSCALE_MASK 0xFFF
-#define ICM1_ARB_CFG ((unsigned int *)(MISC_BASE + 0x07C))
-#define ICM2_ARB_CFG ((unsigned int *)(MISC_BASE + 0x080))
-#define ICM3_ARB_CFG ((unsigned int *)(MISC_BASE + 0x084))
-#define ICM4_ARB_CFG ((unsigned int *)(MISC_BASE + 0x088))
-#define ICM5_ARB_CFG ((unsigned int *)(MISC_BASE + 0x08C))
-#define ICM6_ARB_CFG ((unsigned int *)(MISC_BASE + 0x090))
-#define ICM7_ARB_CFG ((unsigned int *)(MISC_BASE + 0x094))
-#define ICM8_ARB_CFG ((unsigned int *)(MISC_BASE + 0x098))
-#define ICM9_ARB_CFG ((unsigned int *)(MISC_BASE + 0x09C))
-#define DMA_CHN_CFG ((unsigned int *)(MISC_BASE + 0x0A0))
-#define USB2_PHY_CFG ((unsigned int *)(MISC_BASE + 0x0A4))
-#define GMAC_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0A8))
-#define EXPI_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0AC))
-#define PRC1_LOCK_CTR ((unsigned int *)(MISC_BASE + 0x0C0))
-#define PRC2_LOCK_CTR ((unsigned int *)(MISC_BASE + 0x0C4))
-#define PRC3_LOCK_CTR ((unsigned int *)(MISC_BASE + 0x0C8))
-#define PRC4_LOCK_CTR ((unsigned int *)(MISC_BASE + 0x0CC))
-#define PRC1_IRQ_CTR ((unsigned int *)(MISC_BASE + 0x0D0))
-#define PRC2_IRQ_CTR ((unsigned int *)(MISC_BASE + 0x0D4))
-#define PRC3_IRQ_CTR ((unsigned int *)(MISC_BASE + 0x0D8))
-#define PRC4_IRQ_CTR ((unsigned int *)(MISC_BASE + 0x0DC))
-#define PWRDOWN_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0E0))
-#define COMPSSTL_1V8_CFG ((unsigned int *)(MISC_BASE + 0x0E4))
-#define COMPSSTL_2V5_CFG ((unsigned int *)(MISC_BASE + 0x0E8))
-#define COMPCOR_3V3_CFG ((unsigned int *)(MISC_BASE + 0x0EC))
-#define SSTLPAD_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0F0))
-#define BIST1_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0F4))
-#define BIST2_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0F8))
-#define BIST3_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0FC))
-#define BIST4_CFG_CTR ((unsigned int *)(MISC_BASE + 0x100))
-#define BIST5_CFG_CTR ((unsigned int *)(MISC_BASE + 0x104))
-#define BIST1_STS_RES ((unsigned int *)(MISC_BASE + 0x108))
-#define BIST2_STS_RES ((unsigned int *)(MISC_BASE + 0x10C))
-#define BIST3_STS_RES ((unsigned int *)(MISC_BASE + 0x110))
-#define BIST4_STS_RES ((unsigned int *)(MISC_BASE + 0x114))
-#define BIST5_STS_RES ((unsigned int *)(MISC_BASE + 0x118))
-#define SYSERR_CFG_CTR ((unsigned int *)(MISC_BASE + 0x11C))
+#define ICM1_ARB_CFG (MISC_BASE + 0x07C)
+#define ICM2_ARB_CFG (MISC_BASE + 0x080)
+#define ICM3_ARB_CFG (MISC_BASE + 0x084)
+#define ICM4_ARB_CFG (MISC_BASE + 0x088)
+#define ICM5_ARB_CFG (MISC_BASE + 0x08C)
+#define ICM6_ARB_CFG (MISC_BASE + 0x090)
+#define ICM7_ARB_CFG (MISC_BASE + 0x094)
+#define ICM8_ARB_CFG (MISC_BASE + 0x098)
+#define ICM9_ARB_CFG (MISC_BASE + 0x09C)
+#define DMA_CHN_CFG (MISC_BASE + 0x0A0)
+#define USB2_PHY_CFG (MISC_BASE + 0x0A4)
+#define GMAC_CFG_CTR (MISC_BASE + 0x0A8)
+#define EXPI_CFG_CTR (MISC_BASE + 0x0AC)
+#define PRC1_LOCK_CTR (MISC_BASE + 0x0C0)
+#define PRC2_LOCK_CTR (MISC_BASE + 0x0C4)
+#define PRC3_LOCK_CTR (MISC_BASE + 0x0C8)
+#define PRC4_LOCK_CTR (MISC_BASE + 0x0CC)
+#define PRC1_IRQ_CTR (MISC_BASE + 0x0D0)
+#define PRC2_IRQ_CTR (MISC_BASE + 0x0D4)
+#define PRC3_IRQ_CTR (MISC_BASE + 0x0D8)
+#define PRC4_IRQ_CTR (MISC_BASE + 0x0DC)
+#define PWRDOWN_CFG_CTR (MISC_BASE + 0x0E0)
+#define COMPSSTL_1V8_CFG (MISC_BASE + 0x0E4)
+#define COMPSSTL_2V5_CFG (MISC_BASE + 0x0E8)
+#define COMPCOR_3V3_CFG (MISC_BASE + 0x0EC)
+#define SSTLPAD_CFG_CTR (MISC_BASE + 0x0F0)
+#define BIST1_CFG_CTR (MISC_BASE + 0x0F4)
+#define BIST2_CFG_CTR (MISC_BASE + 0x0F8)
+#define BIST3_CFG_CTR (MISC_BASE + 0x0FC)
+#define BIST4_CFG_CTR (MISC_BASE + 0x100)
+#define BIST5_CFG_CTR (MISC_BASE + 0x104)
+#define BIST1_STS_RES (MISC_BASE + 0x108)
+#define BIST2_STS_RES (MISC_BASE + 0x10C)
+#define BIST3_STS_RES (MISC_BASE + 0x110)
+#define BIST4_STS_RES (MISC_BASE + 0x114)
+#define BIST5_STS_RES (MISC_BASE + 0x118)
+#define SYSERR_CFG_CTR (MISC_BASE + 0x11C)
#endif /* __MACH_MISC_REGS_H */
diff --git a/arch/arm/mach-spear3xx/include/mach/spear320.h b/arch/arm/mach-spear3xx/include/mach/spear320.h
index cacf17a958cd..53677e464d4b 100644
--- a/arch/arm/mach-spear3xx/include/mach/spear320.h
+++ b/arch/arm/mach-spear3xx/include/mach/spear320.h
@@ -62,7 +62,7 @@
#define SPEAR320_SMII1_BASE 0xAB000000
#define SPEAR320_SMII1_SIZE 0x01000000
-#define SPEAR320_SOC_CONFIG_BASE 0xB4000000
+#define SPEAR320_SOC_CONFIG_BASE 0xB3000000
#define SPEAR320_SOC_CONFIG_SIZE 0x00000070
/* Interrupt registers offsets and masks */
#define INT_STS_MASK_REG 0x04
diff --git a/arch/arm/mach-spear3xx/spear300.c b/arch/arm/mach-spear3xx/spear300.c
index 5aa2d54ebfaa..7e01677e2fc2 100644
--- a/arch/arm/mach-spear3xx/spear300.c
+++ b/arch/arm/mach-spear3xx/spear300.c
@@ -459,10 +459,16 @@ void __init spear300_init(void)
if (ret)
printk(KERN_ERR "Error registering Shared IRQ\n");
}
-}
-void spear300_pmx_init(void)
-{
- spear_pmx_init(&pmx_driver, SPEAR300_SOC_CONFIG_BASE,
+ /* pmx initialization */
+ pmx_driver.base = ioremap(SPEAR300_SOC_CONFIG_BASE,
SPEAR300_SOC_CONFIG_SIZE);
+ if (pmx_driver.base) {
+ ret = pmx_register(&pmx_driver);
+ if (ret)
+ printk(KERN_ERR "padmux: registeration failed. err no"
+ ": %d\n", ret);
+ /* Free Mapping, device selection already done */
+ iounmap(pmx_driver.base);
+ }
}
diff --git a/arch/arm/mach-spear3xx/spear300_evb.c b/arch/arm/mach-spear3xx/spear300_evb.c
index bb21db152a23..cd23c98b2ffd 100644
--- a/arch/arm/mach-spear3xx/spear300_evb.c
+++ b/arch/arm/mach-spear3xx/spear300_evb.c
@@ -51,14 +51,13 @@ static void __init spear300_evb_init(void)
{
unsigned int i;
- /* call spear300 machine init function */
- spear300_init();
-
- /* padmux initialization */
+ /* padmux initialization, must be done before spear300_init */
pmx_driver.mode = &photo_frame_mode;
pmx_driver.devs = pmx_devs;
pmx_driver.devs_count = ARRAY_SIZE(pmx_devs);
- spear300_pmx_init();
+
+ /* call spear300 machine init function */
+ spear300_init();
/* Add Platform Devices */
platform_add_devices(plat_devs, ARRAY_SIZE(plat_devs));
@@ -72,6 +71,6 @@ MACHINE_START(SPEAR300, "ST-SPEAR300-EVB")
.boot_params = 0x00000100,
.map_io = spear3xx_map_io,
.init_irq = spear3xx_init_irq,
- .timer = &spear_sys_timer,
+ .timer = &spear3xx_timer,
.init_machine = spear300_evb_init,
MACHINE_END
diff --git a/arch/arm/mach-spear3xx/spear310.c b/arch/arm/mach-spear3xx/spear310.c
index 53b41b52d7ee..f38eb2146e2f 100644
--- a/arch/arm/mach-spear3xx/spear310.c
+++ b/arch/arm/mach-spear3xx/spear310.c
@@ -293,10 +293,11 @@ void __init spear310_init(void)
if (ret)
printk(KERN_ERR "Error registering Shared IRQ 4\n");
}
-}
-void spear310_pmx_init(void)
-{
- spear_pmx_init(&pmx_driver, SPEAR310_SOC_CONFIG_BASE,
- SPEAR310_SOC_CONFIG_SIZE);
+ /* pmx initialization */
+ pmx_driver.base = base;
+ ret = pmx_register(&pmx_driver);
+ if (ret)
+ printk(KERN_ERR "padmux: registeration failed. err no: %d\n",
+ ret);
}
diff --git a/arch/arm/mach-spear3xx/spear310_evb.c b/arch/arm/mach-spear3xx/spear310_evb.c
index 7facf6643199..385543108262 100644
--- a/arch/arm/mach-spear3xx/spear310_evb.c
+++ b/arch/arm/mach-spear3xx/spear310_evb.c
@@ -58,14 +58,13 @@ static void __init spear310_evb_init(void)
{
unsigned int i;
- /* call spear310 machine init function */
- spear310_init();
-
- /* padmux initialization */
+ /* padmux initialization, must be done before spear310_init */
pmx_driver.mode = NULL;
pmx_driver.devs = pmx_devs;
pmx_driver.devs_count = ARRAY_SIZE(pmx_devs);
- spear310_pmx_init();
+
+ /* call spear310 machine init function */
+ spear310_init();
/* Add Platform Devices */
platform_add_devices(plat_devs, ARRAY_SIZE(plat_devs));
@@ -79,6 +78,6 @@ MACHINE_START(SPEAR310, "ST-SPEAR310-EVB")
.boot_params = 0x00000100,
.map_io = spear3xx_map_io,
.init_irq = spear3xx_init_irq,
- .timer = &spear_sys_timer,
+ .timer = &spear3xx_timer,
.init_machine = spear310_evb_init,
MACHINE_END
diff --git a/arch/arm/mach-spear3xx/spear320.c b/arch/arm/mach-spear3xx/spear320.c
index 88b465284c36..456bb331340f 100644
--- a/arch/arm/mach-spear3xx/spear320.c
+++ b/arch/arm/mach-spear3xx/spear320.c
@@ -540,10 +540,11 @@ void __init spear320_init(void)
if (ret)
printk(KERN_ERR "Error registering Shared IRQ 4\n");
}
-}
-void spear320_pmx_init(void)
-{
- spear_pmx_init(&pmx_driver, SPEAR320_SOC_CONFIG_BASE,
- SPEAR320_SOC_CONFIG_SIZE);
+ /* pmx initialization */
+ pmx_driver.base = base;
+ ret = pmx_register(&pmx_driver);
+ if (ret)
+ printk(KERN_ERR "padmux: registeration failed. err no: %d\n",
+ ret);
}
diff --git a/arch/arm/mach-spear3xx/spear320_evb.c b/arch/arm/mach-spear3xx/spear320_evb.c
index 62ac685a4135..4a7ce35fdf8c 100644
--- a/arch/arm/mach-spear3xx/spear320_evb.c
+++ b/arch/arm/mach-spear3xx/spear320_evb.c
@@ -55,14 +55,13 @@ static void __init spear320_evb_init(void)
{
unsigned int i;
- /* call spear320 machine init function */
- spear320_init();
-
- /* padmux initialization */
+ /* padmux initialization, must be done before spear320_init */
pmx_driver.mode = &auto_net_mii_mode;
pmx_driver.devs = pmx_devs;
pmx_driver.devs_count = ARRAY_SIZE(pmx_devs);
- spear320_pmx_init();
+
+ /* call spear320 machine init function */
+ spear320_init();
/* Add Platform Devices */
platform_add_devices(plat_devs, ARRAY_SIZE(plat_devs));
@@ -76,6 +75,6 @@ MACHINE_START(SPEAR320, "ST-SPEAR320-EVB")
.boot_params = 0x00000100,
.map_io = spear3xx_map_io,
.init_irq = spear3xx_init_irq,
- .timer = &spear_sys_timer,
+ .timer = &spear3xx_timer,
.init_machine = spear320_evb_init,
MACHINE_END
diff --git a/arch/arm/mach-spear3xx/spear3xx.c b/arch/arm/mach-spear3xx/spear3xx.c
index 52f553c8c46d..e12a06c3b85b 100644
--- a/arch/arm/mach-spear3xx/spear3xx.c
+++ b/arch/arm/mach-spear3xx/spear3xx.c
@@ -523,26 +523,35 @@ struct pmx_dev pmx_plgpio_45_46_49_50 = {
.mode_count = ARRAY_SIZE(pmx_plgpio_45_46_49_50_modes),
.enb_on_reset = 1,
};
+#endif /* CONFIG_MACH_SPEAR310 || CONFIG_MACH_SPEAR320 */
-#endif
-
-/* spear padmux initialization function */
-void spear_pmx_init(struct pmx_driver *pmx_driver, uint base, uint size)
+static void __init spear3xx_timer_init(void)
{
- int ret = 0;
+ char pclk_name[] = "pll3_48m_clk";
+ struct clk *gpt_clk, *pclk;
+
+ /* get the system timer clock */
+ gpt_clk = clk_get_sys("gpt0", NULL);
+ if (IS_ERR(gpt_clk)) {
+ pr_err("%s:couldn't get clk for gpt\n", __func__);
+ BUG();
+ }
- /* pad mux initialization */
- pmx_driver->base = ioremap(base, size);
- if (!pmx_driver->base) {
- ret = -ENOMEM;
- goto pmx_fail;
+ /* get the suitable parent clock for timer*/
+ pclk = clk_get(NULL, pclk_name);
+ if (IS_ERR(pclk)) {
+ pr_err("%s:couldn't get %s as parent for gpt\n",
+ __func__, pclk_name);
+ BUG();
}
- ret = pmx_register(pmx_driver);
- iounmap(pmx_driver->base);
+ clk_set_parent(gpt_clk, pclk);
+ clk_put(gpt_clk);
+ clk_put(pclk);
-pmx_fail:
- if (ret)
- printk(KERN_ERR "padmux: registration failed. err no: %d\n",
- ret);
+ spear_setup_timer();
}
+
+struct sys_timer spear3xx_timer = {
+ .init = spear3xx_timer_init,
+};
diff --git a/arch/arm/mach-spear6xx/clock.c b/arch/arm/mach-spear6xx/clock.c
index 36ff056b7321..91719524766b 100644
--- a/arch/arm/mach-spear6xx/clock.c
+++ b/arch/arm/mach-spear6xx/clock.c
@@ -39,18 +39,43 @@ static struct clk rtc_clk = {
};
/* clock derived from 30 MHz osc clk */
+/* pll masks structure */
+static struct pll_clk_masks pll1_masks = {
+ .mode_mask = PLL_MODE_MASK,
+ .mode_shift = PLL_MODE_SHIFT,
+ .norm_fdbk_m_mask = PLL_NORM_FDBK_M_MASK,
+ .norm_fdbk_m_shift = PLL_NORM_FDBK_M_SHIFT,
+ .dith_fdbk_m_mask = PLL_DITH_FDBK_M_MASK,
+ .dith_fdbk_m_shift = PLL_DITH_FDBK_M_SHIFT,
+ .div_p_mask = PLL_DIV_P_MASK,
+ .div_p_shift = PLL_DIV_P_SHIFT,
+ .div_n_mask = PLL_DIV_N_MASK,
+ .div_n_shift = PLL_DIV_N_SHIFT,
+};
+
/* pll1 configuration structure */
static struct pll_clk_config pll1_config = {
.mode_reg = PLL1_CTR,
.cfg_reg = PLL1_FRQ,
+ .masks = &pll1_masks,
+};
+
+/* pll rate configuration table, in ascending order of rates */
+struct pll_rate_tbl pll_rtbl[] = {
+ {.mode = 0, .m = 0x85, .n = 0x0C, .p = 0x1}, /* 266 MHz */
+ {.mode = 0, .m = 0xA6, .n = 0x0C, .p = 0x1}, /* 332 MHz */
};
/* PLL1 clock */
static struct clk pll1_clk = {
+ .flags = ENABLED_ON_INIT,
.pclk = &osc_30m_clk,
.en_reg = PLL1_CTR,
.en_reg_bit = PLL_ENABLE,
- .recalc = &pll1_clk_recalc,
+ .calc_rate = &pll_calc_rate,
+ .recalc = &pll_clk_recalc,
+ .set_rate = &pll_clk_set_rate,
+ .rate_config = {pll_rtbl, ARRAY_SIZE(pll_rtbl), 1},
.private_data = &pll1_config,
};
@@ -76,31 +101,83 @@ static struct clk cpu_clk = {
.recalc = &follow_parent,
};
+/* ahb masks structure */
+static struct bus_clk_masks ahb_masks = {
+ .mask = PLL_HCLK_RATIO_MASK,
+ .shift = PLL_HCLK_RATIO_SHIFT,
+};
+
/* ahb configuration structure */
static struct bus_clk_config ahb_config = {
.reg = CORE_CLK_CFG,
- .mask = PLL_HCLK_RATIO_MASK,
- .shift = PLL_HCLK_RATIO_SHIFT,
+ .masks = &ahb_masks,
+};
+
+/* ahb rate configuration table, in ascending order of rates */
+struct bus_rate_tbl bus_rtbl[] = {
+ {.div = 3}, /* == parent divided by 4 */
+ {.div = 2}, /* == parent divided by 3 */
+ {.div = 1}, /* == parent divided by 2 */
+ {.div = 0}, /* == parent divided by 1 */
};
/* ahb clock */
static struct clk ahb_clk = {
.flags = ALWAYS_ENABLED,
.pclk = &pll1_clk,
+ .calc_rate = &bus_calc_rate,
.recalc = &bus_clk_recalc,
+ .set_rate = &bus_clk_set_rate,
+ .rate_config = {bus_rtbl, ARRAY_SIZE(bus_rtbl), 2},
.private_data = &ahb_config,
};
+/* auxiliary synthesizers masks */
+static struct aux_clk_masks aux_masks = {
+ .eq_sel_mask = AUX_EQ_SEL_MASK,
+ .eq_sel_shift = AUX_EQ_SEL_SHIFT,
+ .eq1_mask = AUX_EQ1_SEL,
+ .eq2_mask = AUX_EQ2_SEL,
+ .xscale_sel_mask = AUX_XSCALE_MASK,
+ .xscale_sel_shift = AUX_XSCALE_SHIFT,
+ .yscale_sel_mask = AUX_YSCALE_MASK,
+ .yscale_sel_shift = AUX_YSCALE_SHIFT,
+};
+
+/* uart configurations */
+static struct aux_clk_config uart_synth_config = {
+ .synth_reg = UART_CLK_SYNT,
+ .masks = &aux_masks,
+};
+
+/* aux rate configuration table, in ascending order of rates */
+struct aux_rate_tbl aux_rtbl[] = {
+ /* For PLL1 = 332 MHz */
+ {.xscale = 1, .yscale = 8, .eq = 1}, /* 41.5 MHz */
+ {.xscale = 1, .yscale = 4, .eq = 1}, /* 83 MHz */
+ {.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */
+};
+
+/* uart synth clock */
+static struct clk uart_synth_clk = {
+ .en_reg = UART_CLK_SYNT,
+ .en_reg_bit = AUX_SYNT_ENB,
+ .pclk = &pll1_clk,
+ .calc_rate = &aux_calc_rate,
+ .recalc = &aux_clk_recalc,
+ .set_rate = &aux_clk_set_rate,
+ .rate_config = {aux_rtbl, ARRAY_SIZE(aux_rtbl), 2},
+ .private_data = &uart_synth_config,
+};
+
/* uart parents */
static struct pclk_info uart_pclk_info[] = {
{
- .pclk = &pll1_clk,
- .pclk_mask = AUX_CLK_PLL1_MASK,
- .scalable = 1,
+ .pclk = &uart_synth_clk,
+ .pclk_val = AUX_CLK_PLL1_VAL,
}, {
.pclk = &pll3_48m_clk,
- .pclk_mask = AUX_CLK_PLL3_MASK,
- .scalable = 0,
+ .pclk_val = AUX_CLK_PLL3_VAL,
},
};
@@ -112,19 +189,13 @@ static struct pclk_sel uart_pclk_sel = {
.pclk_sel_mask = UART_CLK_MASK,
};
-/* uart configurations */
-static struct aux_clk_config uart_config = {
- .synth_reg = UART_CLK_SYNT,
-};
-
/* uart0 clock */
static struct clk uart0_clk = {
.en_reg = PERIP1_CLK_ENB,
.en_reg_bit = UART0_CLK_ENB,
.pclk_sel = &uart_pclk_sel,
.pclk_sel_shift = UART_CLK_SHIFT,
- .recalc = &aux_clk_recalc,
- .private_data = &uart_config,
+ .recalc = &follow_parent,
};
/* uart1 clock */
@@ -133,25 +204,35 @@ static struct clk uart1_clk = {
.en_reg_bit = UART1_CLK_ENB,
.pclk_sel = &uart_pclk_sel,
.pclk_sel_shift = UART_CLK_SHIFT,
- .recalc = &aux_clk_recalc,
- .private_data = &uart_config,
+ .recalc = &follow_parent,
};
/* firda configurations */
-static struct aux_clk_config firda_config = {
+static struct aux_clk_config firda_synth_config = {
.synth_reg = FIRDA_CLK_SYNT,
+ .masks = &aux_masks,
+};
+
+/* firda synth clock */
+static struct clk firda_synth_clk = {
+ .en_reg = FIRDA_CLK_SYNT,
+ .en_reg_bit = AUX_SYNT_ENB,
+ .pclk = &pll1_clk,
+ .calc_rate = &aux_calc_rate,
+ .recalc = &aux_clk_recalc,
+ .set_rate = &aux_clk_set_rate,
+ .rate_config = {aux_rtbl, ARRAY_SIZE(aux_rtbl), 2},
+ .private_data = &firda_synth_config,
};
/* firda parents */
static struct pclk_info firda_pclk_info[] = {
{
- .pclk = &pll1_clk,
- .pclk_mask = AUX_CLK_PLL1_MASK,
- .scalable = 1,
+ .pclk = &firda_synth_clk,
+ .pclk_val = AUX_CLK_PLL1_VAL,
}, {
.pclk = &pll3_48m_clk,
- .pclk_mask = AUX_CLK_PLL3_MASK,
- .scalable = 0,
+ .pclk_val = AUX_CLK_PLL3_VAL,
},
};
@@ -169,25 +250,35 @@ static struct clk firda_clk = {
.en_reg_bit = FIRDA_CLK_ENB,
.pclk_sel = &firda_pclk_sel,
.pclk_sel_shift = FIRDA_CLK_SHIFT,
- .recalc = &aux_clk_recalc,
- .private_data = &firda_config,
+ .recalc = &follow_parent,
};
/* clcd configurations */
-static struct aux_clk_config clcd_config = {
+static struct aux_clk_config clcd_synth_config = {
.synth_reg = CLCD_CLK_SYNT,
+ .masks = &aux_masks,
+};
+
+/* firda synth clock */
+static struct clk clcd_synth_clk = {
+ .en_reg = CLCD_CLK_SYNT,
+ .en_reg_bit = AUX_SYNT_ENB,
+ .pclk = &pll1_clk,
+ .calc_rate = &aux_calc_rate,
+ .recalc = &aux_clk_recalc,
+ .set_rate = &aux_clk_set_rate,
+ .rate_config = {aux_rtbl, ARRAY_SIZE(aux_rtbl), 2},
+ .private_data = &clcd_synth_config,
};
/* clcd parents */
static struct pclk_info clcd_pclk_info[] = {
{
- .pclk = &pll1_clk,
- .pclk_mask = AUX_CLK_PLL1_MASK,
- .scalable = 1,
+ .pclk = &clcd_synth_clk,
+ .pclk_val = AUX_CLK_PLL1_VAL,
}, {
.pclk = &pll3_48m_clk,
- .pclk_mask = AUX_CLK_PLL3_MASK,
- .scalable = 0,
+ .pclk_val = AUX_CLK_PLL3_VAL,
},
};
@@ -205,82 +296,173 @@ static struct clk clcd_clk = {
.en_reg_bit = CLCD_CLK_ENB,
.pclk_sel = &clcd_pclk_sel,
.pclk_sel_shift = CLCD_CLK_SHIFT,
- .recalc = &aux_clk_recalc,
- .private_data = &clcd_config,
+ .recalc = &follow_parent,
+};
+
+/* gpt synthesizer masks */
+static struct gpt_clk_masks gpt_masks = {
+ .mscale_sel_mask = GPT_MSCALE_MASK,
+ .mscale_sel_shift = GPT_MSCALE_SHIFT,
+ .nscale_sel_mask = GPT_NSCALE_MASK,
+ .nscale_sel_shift = GPT_NSCALE_SHIFT,
+};
+
+/* gpt rate configuration table, in ascending order of rates */
+struct gpt_rate_tbl gpt_rtbl[] = {
+ /* For pll1 = 332 MHz */
+ {.mscale = 4, .nscale = 0}, /* 41.5 MHz */
+ {.mscale = 2, .nscale = 0}, /* 55.3 MHz */
+ {.mscale = 1, .nscale = 0}, /* 83 MHz */
+};
+
+/* gpt0 synth clk config*/
+static struct gpt_clk_config gpt0_synth_config = {
+ .synth_reg = PRSC1_CLK_CFG,
+ .masks = &gpt_masks,
+};
+
+/* gpt synth clock */
+static struct clk gpt0_synth_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &pll1_clk,
+ .calc_rate = &gpt_calc_rate,
+ .recalc = &gpt_clk_recalc,
+ .set_rate = &gpt_clk_set_rate,
+ .rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
+ .private_data = &gpt0_synth_config,
};
/* gpt parents */
-static struct pclk_info gpt_pclk_info[] = {
+static struct pclk_info gpt0_pclk_info[] = {
{
- .pclk = &pll1_clk,
- .pclk_mask = AUX_CLK_PLL1_MASK,
- .scalable = 1,
+ .pclk = &gpt0_synth_clk,
+ .pclk_val = AUX_CLK_PLL1_VAL,
}, {
.pclk = &pll3_48m_clk,
- .pclk_mask = AUX_CLK_PLL3_MASK,
- .scalable = 0,
+ .pclk_val = AUX_CLK_PLL3_VAL,
},
};
/* gpt parent select structure */
-static struct pclk_sel gpt_pclk_sel = {
- .pclk_info = gpt_pclk_info,
- .pclk_count = ARRAY_SIZE(gpt_pclk_info),
+static struct pclk_sel gpt0_pclk_sel = {
+ .pclk_info = gpt0_pclk_info,
+ .pclk_count = ARRAY_SIZE(gpt0_pclk_info),
.pclk_sel_reg = PERIP_CLK_CFG,
.pclk_sel_mask = GPT_CLK_MASK,
};
-/* gpt0_1 configurations */
-static struct aux_clk_config gpt0_1_config = {
- .synth_reg = PRSC1_CLK_CFG,
-};
-
/* gpt0 ARM1 subsystem timer clock */
static struct clk gpt0_clk = {
.flags = ALWAYS_ENABLED,
- .pclk_sel = &gpt_pclk_sel,
+ .pclk_sel = &gpt0_pclk_sel,
.pclk_sel_shift = GPT0_CLK_SHIFT,
- .recalc = &gpt_clk_recalc,
- .private_data = &gpt0_1_config,
+ .recalc = &follow_parent,
+};
+
+
+/* Note: gpt0 and gpt1 share same parent clocks */
+/* gpt parent select structure */
+static struct pclk_sel gpt1_pclk_sel = {
+ .pclk_info = gpt0_pclk_info,
+ .pclk_count = ARRAY_SIZE(gpt0_pclk_info),
+ .pclk_sel_reg = PERIP_CLK_CFG,
+ .pclk_sel_mask = GPT_CLK_MASK,
};
/* gpt1 timer clock */
static struct clk gpt1_clk = {
.flags = ALWAYS_ENABLED,
- .pclk_sel = &gpt_pclk_sel,
+ .pclk_sel = &gpt1_pclk_sel,
.pclk_sel_shift = GPT1_CLK_SHIFT,
- .recalc = &gpt_clk_recalc,
- .private_data = &gpt0_1_config,
+ .recalc = &follow_parent,
};
-/* gpt2 configurations */
-static struct aux_clk_config gpt2_config = {
+/* gpt2 synth clk config*/
+static struct gpt_clk_config gpt2_synth_config = {
.synth_reg = PRSC2_CLK_CFG,
+ .masks = &gpt_masks,
+};
+
+/* gpt synth clock */
+static struct clk gpt2_synth_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &pll1_clk,
+ .calc_rate = &gpt_calc_rate,
+ .recalc = &gpt_clk_recalc,
+ .set_rate = &gpt_clk_set_rate,
+ .rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
+ .private_data = &gpt2_synth_config,
+};
+
+/* gpt parents */
+static struct pclk_info gpt2_pclk_info[] = {
+ {
+ .pclk = &gpt2_synth_clk,
+ .pclk_val = AUX_CLK_PLL1_VAL,
+ }, {
+ .pclk = &pll3_48m_clk,
+ .pclk_val = AUX_CLK_PLL3_VAL,
+ },
+};
+
+/* gpt parent select structure */
+static struct pclk_sel gpt2_pclk_sel = {
+ .pclk_info = gpt2_pclk_info,
+ .pclk_count = ARRAY_SIZE(gpt2_pclk_info),
+ .pclk_sel_reg = PERIP_CLK_CFG,
+ .pclk_sel_mask = GPT_CLK_MASK,
};
/* gpt2 timer clock */
static struct clk gpt2_clk = {
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = GPT2_CLK_ENB,
- .pclk_sel = &gpt_pclk_sel,
+ .flags = ALWAYS_ENABLED,
+ .pclk_sel = &gpt2_pclk_sel,
.pclk_sel_shift = GPT2_CLK_SHIFT,
- .recalc = &gpt_clk_recalc,
- .private_data = &gpt2_config,
+ .recalc = &follow_parent,
};
-/* gpt3 configurations */
-static struct aux_clk_config gpt3_config = {
+/* gpt3 synth clk config*/
+static struct gpt_clk_config gpt3_synth_config = {
.synth_reg = PRSC3_CLK_CFG,
+ .masks = &gpt_masks,
+};
+
+/* gpt synth clock */
+static struct clk gpt3_synth_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &pll1_clk,
+ .calc_rate = &gpt_calc_rate,
+ .recalc = &gpt_clk_recalc,
+ .set_rate = &gpt_clk_set_rate,
+ .rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
+ .private_data = &gpt3_synth_config,
+};
+
+/* gpt parents */
+static struct pclk_info gpt3_pclk_info[] = {
+ {
+ .pclk = &gpt3_synth_clk,
+ .pclk_val = AUX_CLK_PLL1_VAL,
+ }, {
+ .pclk = &pll3_48m_clk,
+ .pclk_val = AUX_CLK_PLL3_VAL,
+ },
+};
+
+/* gpt parent select structure */
+static struct pclk_sel gpt3_pclk_sel = {
+ .pclk_info = gpt3_pclk_info,
+ .pclk_count = ARRAY_SIZE(gpt3_pclk_info),
+ .pclk_sel_reg = PERIP_CLK_CFG,
+ .pclk_sel_mask = GPT_CLK_MASK,
};
/* gpt3 timer clock */
static struct clk gpt3_clk = {
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = GPT3_CLK_ENB,
- .pclk_sel = &gpt_pclk_sel,
+ .flags = ALWAYS_ENABLED,
+ .pclk_sel = &gpt3_pclk_sel,
.pclk_sel_shift = GPT3_CLK_SHIFT,
- .recalc = &gpt_clk_recalc,
- .private_data = &gpt3_config,
+ .recalc = &follow_parent,
};
/* clock derived from pll3 clk */
@@ -309,18 +491,26 @@ static struct clk usbd_clk = {
};
/* clock derived from ahb clk */
+/* apb masks structure */
+static struct bus_clk_masks apb_masks = {
+ .mask = HCLK_PCLK_RATIO_MASK,
+ .shift = HCLK_PCLK_RATIO_SHIFT,
+};
+
/* apb configuration structure */
static struct bus_clk_config apb_config = {
.reg = CORE_CLK_CFG,
- .mask = HCLK_PCLK_RATIO_MASK,
- .shift = HCLK_PCLK_RATIO_SHIFT,
+ .masks = &apb_masks,
};
/* apb clock */
static struct clk apb_clk = {
.flags = ALWAYS_ENABLED,
.pclk = &ahb_clk,
+ .calc_rate = &bus_calc_rate,
.recalc = &bus_clk_recalc,
+ .set_rate = &bus_clk_set_rate,
+ .rate_config = {bus_rtbl, ARRAY_SIZE(bus_rtbl), 2},
.private_data = &apb_config,
};
@@ -437,7 +627,7 @@ static struct clk_lookup spear_clk_lookups[] = {
{ .con_id = "osc_32k_clk", .clk = &osc_32k_clk},
{ .con_id = "osc_30m_clk", .clk = &osc_30m_clk},
/* clock derived from 32 KHz os clk */
- { .dev_id = "rtc", .clk = &rtc_clk},
+ { .dev_id = "rtc-spear", .clk = &rtc_clk},
/* clock derived from 30 MHz os clk */
{ .con_id = "pll1_clk", .clk = &pll1_clk},
{ .con_id = "pll3_48m_clk", .clk = &pll3_48m_clk},
@@ -445,6 +635,12 @@ static struct clk_lookup spear_clk_lookups[] = {
/* clock derived from pll1 clk */
{ .con_id = "cpu_clk", .clk = &cpu_clk},
{ .con_id = "ahb_clk", .clk = &ahb_clk},
+ { .con_id = "uart_synth_clk", .clk = &uart_synth_clk},
+ { .con_id = "firda_synth_clk", .clk = &firda_synth_clk},
+ { .con_id = "clcd_synth_clk", .clk = &clcd_synth_clk},
+ { .con_id = "gpt0_synth_clk", .clk = &gpt0_synth_clk},
+ { .con_id = "gpt2_synth_clk", .clk = &gpt2_synth_clk},
+ { .con_id = "gpt3_synth_clk", .clk = &gpt3_synth_clk},
{ .dev_id = "uart0", .clk = &uart0_clk},
{ .dev_id = "uart1", .clk = &uart1_clk},
{ .dev_id = "firda", .clk = &firda_clk},
@@ -454,22 +650,22 @@ static struct clk_lookup spear_clk_lookups[] = {
{ .dev_id = "gpt2", .clk = &gpt2_clk},
{ .dev_id = "gpt3", .clk = &gpt3_clk},
/* clock derived from pll3 clk */
- { .dev_id = "usbh0", .clk = &usbh0_clk},
- { .dev_id = "usbh1", .clk = &usbh1_clk},
+ { .con_id = "usbh.0_clk", .clk = &usbh0_clk},
+ { .con_id = "usbh.1_clk", .clk = &usbh1_clk},
{ .dev_id = "usbd", .clk = &usbd_clk},
/* clock derived from ahb clk */
{ .con_id = "apb_clk", .clk = &apb_clk},
- { .dev_id = "i2c", .clk = &i2c_clk},
+ { .dev_id = "i2c_designware.0", .clk = &i2c_clk},
{ .dev_id = "dma", .clk = &dma_clk},
{ .dev_id = "jpeg", .clk = &jpeg_clk},
{ .dev_id = "gmac", .clk = &gmac_clk},
{ .dev_id = "smi", .clk = &smi_clk},
- { .dev_id = "fsmc", .clk = &fsmc_clk},
+ { .con_id = "fsmc", .clk = &fsmc_clk},
/* clock derived from apb clk */
{ .dev_id = "adc", .clk = &adc_clk},
- { .dev_id = "ssp0", .clk = &ssp0_clk},
- { .dev_id = "ssp1", .clk = &ssp1_clk},
- { .dev_id = "ssp2", .clk = &ssp2_clk},
+ { .dev_id = "ssp-pl022.0", .clk = &ssp0_clk},
+ { .dev_id = "ssp-pl022.1", .clk = &ssp1_clk},
+ { .dev_id = "ssp-pl022.2", .clk = &ssp2_clk},
{ .dev_id = "gpio0", .clk = &gpio0_clk},
{ .dev_id = "gpio1", .clk = &gpio1_clk},
{ .dev_id = "gpio2", .clk = &gpio2_clk},
diff --git a/arch/arm/mach-spear6xx/include/mach/generic.h b/arch/arm/mach-spear6xx/include/mach/generic.h
index 16205a538756..e5967ededdc3 100644
--- a/arch/arm/mach-spear6xx/include/mach/generic.h
+++ b/arch/arm/mach-spear6xx/include/mach/generic.h
@@ -31,9 +31,10 @@
/* Add spear6xx family device structure declarations here */
extern struct amba_device gpio_device[];
extern struct amba_device uart_device[];
-extern struct sys_timer spear_sys_timer;
+extern struct sys_timer spear6xx_timer;
/* Add spear6xx family function declarations here */
+void __init spear_setup_timer(void);
void __init spear6xx_map_io(void);
void __init spear6xx_init_irq(void);
void __init spear6xx_init(void);
diff --git a/arch/arm/mach-spear6xx/include/mach/hardware.h b/arch/arm/mach-spear6xx/include/mach/hardware.h
index 7545116deca9..0291476ca6da 100644
--- a/arch/arm/mach-spear6xx/include/mach/hardware.h
+++ b/arch/arm/mach-spear6xx/include/mach/hardware.h
@@ -14,8 +14,9 @@
#ifndef __MACH_HARDWARE_H
#define __MACH_HARDWARE_H
+#include <plat/hardware.h>
+
/* Vitual to physical translation of statically mapped space */
#define IO_ADDRESS(x) (x | 0xF0000000)
#endif /* __MACH_HARDWARE_H */
-
diff --git a/arch/arm/mach-spear6xx/include/mach/misc_regs.h b/arch/arm/mach-spear6xx/include/mach/misc_regs.h
index 03908036b0d0..45571c13227a 100644
--- a/arch/arm/mach-spear6xx/include/mach/misc_regs.h
+++ b/arch/arm/mach-spear6xx/include/mach/misc_regs.h
@@ -16,14 +16,14 @@
#include <mach/spear.h>
-#define MISC_BASE VA_SPEAR6XX_ICM3_MISC_REG_BASE
+#define MISC_BASE IOMEM(VA_SPEAR6XX_ICM3_MISC_REG_BASE)
-#define SOC_CFG_CTR ((unsigned int *)(MISC_BASE + 0x000))
-#define DIAG_CFG_CTR ((unsigned int *)(MISC_BASE + 0x004))
-#define PLL1_CTR ((unsigned int *)(MISC_BASE + 0x008))
-#define PLL1_FRQ ((unsigned int *)(MISC_BASE + 0x00C))
-#define PLL1_MOD ((unsigned int *)(MISC_BASE + 0x010))
-#define PLL2_CTR ((unsigned int *)(MISC_BASE + 0x014))
+#define SOC_CFG_CTR (MISC_BASE + 0x000)
+#define DIAG_CFG_CTR (MISC_BASE + 0x004)
+#define PLL1_CTR (MISC_BASE + 0x008)
+#define PLL1_FRQ (MISC_BASE + 0x00C)
+#define PLL1_MOD (MISC_BASE + 0x010)
+#define PLL2_CTR (MISC_BASE + 0x014)
/* PLL_CTR register masks */
#define PLL_ENABLE 2
#define PLL_MODE_SHIFT 4
@@ -33,7 +33,7 @@
#define PLL_MODE_DITH_DSB 2
#define PLL_MODE_DITH_SSB 3
-#define PLL2_FRQ ((unsigned int *)(MISC_BASE + 0x018))
+#define PLL2_FRQ (MISC_BASE + 0x018)
/* PLL FRQ register masks */
#define PLL_DIV_N_SHIFT 0
#define PLL_DIV_N_MASK 0xFF
@@ -44,16 +44,16 @@
#define PLL_DITH_FDBK_M_SHIFT 16
#define PLL_DITH_FDBK_M_MASK 0xFFFF
-#define PLL2_MOD ((unsigned int *)(MISC_BASE + 0x01C))
-#define PLL_CLK_CFG ((unsigned int *)(MISC_BASE + 0x020))
-#define CORE_CLK_CFG ((unsigned int *)(MISC_BASE + 0x024))
+#define PLL2_MOD (MISC_BASE + 0x01C)
+#define PLL_CLK_CFG (MISC_BASE + 0x020)
+#define CORE_CLK_CFG (MISC_BASE + 0x024)
/* CORE CLK CFG register masks */
#define PLL_HCLK_RATIO_SHIFT 10
#define PLL_HCLK_RATIO_MASK 0x3
#define HCLK_PCLK_RATIO_SHIFT 8
#define HCLK_PCLK_RATIO_MASK 0x3
-#define PERIP_CLK_CFG ((unsigned int *)(MISC_BASE + 0x028))
+#define PERIP_CLK_CFG (MISC_BASE + 0x028)
/* PERIP_CLK_CFG register masks */
#define CLCD_CLK_SHIFT 2
#define CLCD_CLK_MASK 0x3
@@ -66,10 +66,10 @@
#define GPT2_CLK_SHIFT 11
#define GPT3_CLK_SHIFT 12
#define GPT_CLK_MASK 0x1
-#define AUX_CLK_PLL3_MASK 0
-#define AUX_CLK_PLL1_MASK 1
+#define AUX_CLK_PLL3_VAL 0
+#define AUX_CLK_PLL1_VAL 1
-#define PERIP1_CLK_ENB ((unsigned int *)(MISC_BASE + 0x02C))
+#define PERIP1_CLK_ENB (MISC_BASE + 0x02C)
/* PERIP1_CLK_ENB register masks */
#define UART0_CLK_ENB 3
#define UART1_CLK_ENB 4
@@ -95,34 +95,35 @@
#define USBH0_CLK_ENB 25
#define USBH1_CLK_ENB 26
-#define SOC_CORE_ID ((unsigned int *)(MISC_BASE + 0x030))
-#define RAS_CLK_ENB ((unsigned int *)(MISC_BASE + 0x034))
-#define PERIP1_SOF_RST ((unsigned int *)(MISC_BASE + 0x038))
+#define SOC_CORE_ID (MISC_BASE + 0x030)
+#define RAS_CLK_ENB (MISC_BASE + 0x034)
+#define PERIP1_SOF_RST (MISC_BASE + 0x038)
/* PERIP1_SOF_RST register masks */
#define JPEG_SOF_RST 8
-#define SOC_USER_ID ((unsigned int *)(MISC_BASE + 0x03C))
-#define RAS_SOF_RST ((unsigned int *)(MISC_BASE + 0x040))
-#define PRSC1_CLK_CFG ((unsigned int *)(MISC_BASE + 0x044))
-#define PRSC2_CLK_CFG ((unsigned int *)(MISC_BASE + 0x048))
-#define PRSC3_CLK_CFG ((unsigned int *)(MISC_BASE + 0x04C))
+#define SOC_USER_ID (MISC_BASE + 0x03C)
+#define RAS_SOF_RST (MISC_BASE + 0x040)
+#define PRSC1_CLK_CFG (MISC_BASE + 0x044)
+#define PRSC2_CLK_CFG (MISC_BASE + 0x048)
+#define PRSC3_CLK_CFG (MISC_BASE + 0x04C)
/* gpt synthesizer register masks */
#define GPT_MSCALE_SHIFT 0
#define GPT_MSCALE_MASK 0xFFF
#define GPT_NSCALE_SHIFT 12
#define GPT_NSCALE_MASK 0xF
-#define AMEM_CLK_CFG ((unsigned int *)(MISC_BASE + 0x050))
-#define EXPI_CLK_CFG ((unsigned int *)(MISC_BASE + 0x054))
-#define CLCD_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x05C))
-#define FIRDA_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x060))
-#define UART_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x064))
-#define GMAC_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x068))
-#define RAS1_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x06C))
-#define RAS2_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x070))
-#define RAS3_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x074))
-#define RAS4_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x078))
+#define AMEM_CLK_CFG (MISC_BASE + 0x050)
+#define EXPI_CLK_CFG (MISC_BASE + 0x054)
+#define CLCD_CLK_SYNT (MISC_BASE + 0x05C)
+#define FIRDA_CLK_SYNT (MISC_BASE + 0x060)
+#define UART_CLK_SYNT (MISC_BASE + 0x064)
+#define GMAC_CLK_SYNT (MISC_BASE + 0x068)
+#define RAS1_CLK_SYNT (MISC_BASE + 0x06C)
+#define RAS2_CLK_SYNT (MISC_BASE + 0x070)
+#define RAS3_CLK_SYNT (MISC_BASE + 0x074)
+#define RAS4_CLK_SYNT (MISC_BASE + 0x078)
/* aux clk synthesiser register masks for irda to ras4 */
+#define AUX_SYNT_ENB 31
#define AUX_EQ_SEL_SHIFT 30
#define AUX_EQ_SEL_MASK 1
#define AUX_EQ1_SEL 0
@@ -132,42 +133,42 @@
#define AUX_YSCALE_SHIFT 0
#define AUX_YSCALE_MASK 0xFFF
-#define ICM1_ARB_CFG ((unsigned int *)(MISC_BASE + 0x07C))
-#define ICM2_ARB_CFG ((unsigned int *)(MISC_BASE + 0x080))
-#define ICM3_ARB_CFG ((unsigned int *)(MISC_BASE + 0x084))
-#define ICM4_ARB_CFG ((unsigned int *)(MISC_BASE + 0x088))
-#define ICM5_ARB_CFG ((unsigned int *)(MISC_BASE + 0x08C))
-#define ICM6_ARB_CFG ((unsigned int *)(MISC_BASE + 0x090))
-#define ICM7_ARB_CFG ((unsigned int *)(MISC_BASE + 0x094))
-#define ICM8_ARB_CFG ((unsigned int *)(MISC_BASE + 0x098))
-#define ICM9_ARB_CFG ((unsigned int *)(MISC_BASE + 0x09C))
-#define DMA_CHN_CFG ((unsigned int *)(MISC_BASE + 0x0A0))
-#define USB2_PHY_CFG ((unsigned int *)(MISC_BASE + 0x0A4))
-#define GMAC_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0A8))
-#define EXPI_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0AC))
-#define PRC1_LOCK_CTR ((unsigned int *)(MISC_BASE + 0x0C0))
-#define PRC2_LOCK_CTR ((unsigned int *)(MISC_BASE + 0x0C4))
-#define PRC3_LOCK_CTR ((unsigned int *)(MISC_BASE + 0x0C8))
-#define PRC4_LOCK_CTR ((unsigned int *)(MISC_BASE + 0x0CC))
-#define PRC1_IRQ_CTR ((unsigned int *)(MISC_BASE + 0x0D0))
-#define PRC2_IRQ_CTR ((unsigned int *)(MISC_BASE + 0x0D4))
-#define PRC3_IRQ_CTR ((unsigned int *)(MISC_BASE + 0x0D8))
-#define PRC4_IRQ_CTR ((unsigned int *)(MISC_BASE + 0x0DC))
-#define PWRDOWN_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0E0))
-#define COMPSSTL_1V8_CFG ((unsigned int *)(MISC_BASE + 0x0E4))
-#define COMPSSTL_2V5_CFG ((unsigned int *)(MISC_BASE + 0x0E8))
-#define COMPCOR_3V3_CFG ((unsigned int *)(MISC_BASE + 0x0EC))
-#define SSTLPAD_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0F0))
-#define BIST1_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0F4))
-#define BIST2_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0F8))
-#define BIST3_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0FC))
-#define BIST4_CFG_CTR ((unsigned int *)(MISC_BASE + 0x100))
-#define BIST5_CFG_CTR ((unsigned int *)(MISC_BASE + 0x104))
-#define BIST1_STS_RES ((unsigned int *)(MISC_BASE + 0x108))
-#define BIST2_STS_RES ((unsigned int *)(MISC_BASE + 0x10C))
-#define BIST3_STS_RES ((unsigned int *)(MISC_BASE + 0x110))
-#define BIST4_STS_RES ((unsigned int *)(MISC_BASE + 0x114))
-#define BIST5_STS_RES ((unsigned int *)(MISC_BASE + 0x118))
-#define SYSERR_CFG_CTR ((unsigned int *)(MISC_BASE + 0x11C))
+#define ICM1_ARB_CFG (MISC_BASE + 0x07C)
+#define ICM2_ARB_CFG (MISC_BASE + 0x080)
+#define ICM3_ARB_CFG (MISC_BASE + 0x084)
+#define ICM4_ARB_CFG (MISC_BASE + 0x088)
+#define ICM5_ARB_CFG (MISC_BASE + 0x08C)
+#define ICM6_ARB_CFG (MISC_BASE + 0x090)
+#define ICM7_ARB_CFG (MISC_BASE + 0x094)
+#define ICM8_ARB_CFG (MISC_BASE + 0x098)
+#define ICM9_ARB_CFG (MISC_BASE + 0x09C)
+#define DMA_CHN_CFG (MISC_BASE + 0x0A0)
+#define USB2_PHY_CFG (MISC_BASE + 0x0A4)
+#define GMAC_CFG_CTR (MISC_BASE + 0x0A8)
+#define EXPI_CFG_CTR (MISC_BASE + 0x0AC)
+#define PRC1_LOCK_CTR (MISC_BASE + 0x0C0)
+#define PRC2_LOCK_CTR (MISC_BASE + 0x0C4)
+#define PRC3_LOCK_CTR (MISC_BASE + 0x0C8)
+#define PRC4_LOCK_CTR (MISC_BASE + 0x0CC)
+#define PRC1_IRQ_CTR (MISC_BASE + 0x0D0)
+#define PRC2_IRQ_CTR (MISC_BASE + 0x0D4)
+#define PRC3_IRQ_CTR (MISC_BASE + 0x0D8)
+#define PRC4_IRQ_CTR (MISC_BASE + 0x0DC)
+#define PWRDOWN_CFG_CTR (MISC_BASE + 0x0E0)
+#define COMPSSTL_1V8_CFG (MISC_BASE + 0x0E4)
+#define COMPSSTL_2V5_CFG (MISC_BASE + 0x0E8)
+#define COMPCOR_3V3_CFG (MISC_BASE + 0x0EC)
+#define SSTLPAD_CFG_CTR (MISC_BASE + 0x0F0)
+#define BIST1_CFG_CTR (MISC_BASE + 0x0F4)
+#define BIST2_CFG_CTR (MISC_BASE + 0x0F8)
+#define BIST3_CFG_CTR (MISC_BASE + 0x0FC)
+#define BIST4_CFG_CTR (MISC_BASE + 0x100)
+#define BIST5_CFG_CTR (MISC_BASE + 0x104)
+#define BIST1_STS_RES (MISC_BASE + 0x108)
+#define BIST2_STS_RES (MISC_BASE + 0x10C)
+#define BIST3_STS_RES (MISC_BASE + 0x110)
+#define BIST4_STS_RES (MISC_BASE + 0x114)
+#define BIST5_STS_RES (MISC_BASE + 0x118)
+#define SYSERR_CFG_CTR (MISC_BASE + 0x11C)
#endif /* __MACH_MISC_REGS_H */
diff --git a/arch/arm/mach-spear6xx/spear600_evb.c b/arch/arm/mach-spear6xx/spear600_evb.c
index daff8d04f7b6..b0ed0dfe9b01 100644
--- a/arch/arm/mach-spear6xx/spear600_evb.c
+++ b/arch/arm/mach-spear6xx/spear600_evb.c
@@ -46,6 +46,6 @@ MACHINE_START(SPEAR600, "ST-SPEAR600-EVB")
.boot_params = 0x00000100,
.map_io = spear6xx_map_io,
.init_irq = spear6xx_init_irq,
- .timer = &spear_sys_timer,
+ .timer = &spear6xx_timer,
.init_machine = spear600_evb_init,
MACHINE_END
diff --git a/arch/arm/mach-spear6xx/spear6xx.c b/arch/arm/mach-spear6xx/spear6xx.c
index f2fe14e8471d..9cd3a688f6dc 100644
--- a/arch/arm/mach-spear6xx/spear6xx.c
+++ b/arch/arm/mach-spear6xx/spear6xx.c
@@ -155,3 +155,34 @@ void __init spear6xx_map_io(void)
/* This will initialize clock framework */
clk_init();
}
+
+static void __init spear6xx_timer_init(void)
+{
+ char pclk_name[] = "pll3_48m_clk";
+ struct clk *gpt_clk, *pclk;
+
+ /* get the system timer clock */
+ gpt_clk = clk_get_sys("gpt0", NULL);
+ if (IS_ERR(gpt_clk)) {
+ pr_err("%s:couldn't get clk for gpt\n", __func__);
+ BUG();
+ }
+
+ /* get the suitable parent clock for timer*/
+ pclk = clk_get(NULL, pclk_name);
+ if (IS_ERR(pclk)) {
+ pr_err("%s:couldn't get %s as parent for gpt\n",
+ __func__, pclk_name);
+ BUG();
+ }
+
+ clk_set_parent(gpt_clk, pclk);
+ clk_put(gpt_clk);
+ clk_put(pclk);
+
+ spear_setup_timer();
+}
+
+struct sys_timer spear6xx_timer = {
+ .init = spear6xx_timer_init,
+};
diff --git a/arch/arm/mach-tcc8k/board-tcc8000-sdk.c b/arch/arm/mach-tcc8k/board-tcc8000-sdk.c
index 7991415e666b..fb6426ddeb77 100644
--- a/arch/arm/mach-tcc8k/board-tcc8000-sdk.c
+++ b/arch/arm/mach-tcc8k/board-tcc8000-sdk.c
@@ -54,7 +54,7 @@ static void __init tcc8k_map_io(void)
}
MACHINE_START(TCC8000_SDK, "Telechips TCC8000-SDK Demo Board")
- .boot_params = PHYS_OFFSET + 0x00000100,
+ .boot_params = PLAT_PHYS_OFFSET + 0x00000100,
.map_io = tcc8k_map_io,
.init_irq = tcc8k_init_irq,
.init_machine = tcc8k_init,
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig
index acd9552f8ada..b94e52df0d8e 100644
--- a/arch/arm/mach-tegra/Kconfig
+++ b/arch/arm/mach-tegra/Kconfig
@@ -27,6 +27,31 @@ config MACH_HARMONY
help
Support for nVidia Harmony development platform
+config MACH_KAEN
+ bool "Kaen board"
+ select MACH_SEABOARD
+ help
+ Support for the Kaen version of Seaboard
+
+config MACH_SEABOARD
+ bool "Seaboard board"
+ help
+ Support for nVidia Seaboard development platform. It will
+ also be included for some of the derivative boards that
+ have large similarities with the seaboard design.
+
+config MACH_TRIMSLICE
+ bool "TrimSlice board"
+ select TEGRA_PCI
+ help
+ Support for CompuLab TrimSlice platform
+
+config MACH_WARIO
+ bool "Wario board"
+ select MACH_SEABOARD
+ help
+ Support for the Wario version of Seaboard
+
choice
prompt "Low-level debug console UART"
default TEGRA_DEBUG_UART_NONE
@@ -58,4 +83,7 @@ config TEGRA_SYSTEM_DMA
Adds system DMA functionality for NVIDIA Tegra SoCs, used by
several Tegra device drivers
+config TEGRA_EMC_SCALING_ENABLE
+ bool "Enable scaling the memory frequency"
+
endif
diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile
index cdbc68e4c0ca..9bf39fa34d85 100644
--- a/arch/arm/mach-tegra/Makefile
+++ b/arch/arm/mach-tegra/Makefile
@@ -1,14 +1,16 @@
obj-y += common.o
+obj-y += devices.o
obj-y += io.o
obj-y += irq.o legacy_irq.o
obj-y += clock.o
obj-y += timer.o
obj-y += gpio.o
obj-y += pinmux.o
+obj-y += powergate.o
obj-y += fuse.o
obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clock.o
obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_clocks.o
-obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_dvfs.o
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_emc.o
obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += pinmux-t2-tables.o
obj-$(CONFIG_SMP) += platsmp.o localtimer.o headsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
@@ -19,3 +21,9 @@ obj-$(CONFIG_TEGRA_PCI) += pcie.o
obj-${CONFIG_MACH_HARMONY} += board-harmony.o
obj-${CONFIG_MACH_HARMONY} += board-harmony-pinmux.o
obj-${CONFIG_MACH_HARMONY} += board-harmony-pcie.o
+
+obj-${CONFIG_MACH_SEABOARD} += board-seaboard.o
+obj-${CONFIG_MACH_SEABOARD} += board-seaboard-pinmux.o
+
+obj-${CONFIG_MACH_TRIMSLICE} += board-trimslice.o
+obj-${CONFIG_MACH_TRIMSLICE} += board-trimslice-pinmux.o
diff --git a/arch/arm/mach-tegra/board-harmony-pinmux.c b/arch/arm/mach-tegra/board-harmony-pinmux.c
index 50b15d500cac..98368d947be3 100644
--- a/arch/arm/mach-tegra/board-harmony-pinmux.c
+++ b/arch/arm/mach-tegra/board-harmony-pinmux.c
@@ -15,8 +15,10 @@
*/
#include <linux/kernel.h>
+#include <linux/gpio.h>
#include <mach/pinmux.h>
+#include "gpio-names.h"
#include "board-harmony.h"
static struct tegra_pingroup_config harmony_pinmux[] = {
@@ -34,10 +36,10 @@ static struct tegra_pingroup_config harmony_pinmux[] = {
{TEGRA_PINGROUP_DAP3, TEGRA_MUX_DAP3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_DAP4, TEGRA_MUX_DAP4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_DDC, TEGRA_MUX_I2C2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
- {TEGRA_PINGROUP_DTA, TEGRA_MUX_SDIO2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
- {TEGRA_PINGROUP_DTB, TEGRA_MUX_RSVD1, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_DTA, TEGRA_MUX_SDIO2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DTB, TEGRA_MUX_RSVD1, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_DTC, TEGRA_MUX_RSVD1, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
- {TEGRA_PINGROUP_DTD, TEGRA_MUX_SDIO2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_DTD, TEGRA_MUX_SDIO2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_DTE, TEGRA_MUX_RSVD1, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_DTF, TEGRA_MUX_I2C3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_GMA, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
@@ -138,7 +140,18 @@ static struct tegra_pingroup_config harmony_pinmux[] = {
{TEGRA_PINGROUP_XM2D, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
};
+static struct tegra_gpio_table gpio_table[] = {
+ { .gpio = TEGRA_GPIO_PI5, .enable = true }, /* mmc2 cd */
+ { .gpio = TEGRA_GPIO_PH1, .enable = true }, /* mmc2 wp */
+ { .gpio = TEGRA_GPIO_PT3, .enable = true }, /* mmc2 pwr */
+ { .gpio = TEGRA_GPIO_PH2, .enable = true }, /* mmc4 cd */
+ { .gpio = TEGRA_GPIO_PH3, .enable = true }, /* mmc4 wp */
+ { .gpio = TEGRA_GPIO_PI6, .enable = true }, /* mmc4 pwr */
+};
+
void harmony_pinmux_init(void)
{
tegra_pinmux_config_table(harmony_pinmux, ARRAY_SIZE(harmony_pinmux));
+
+ tegra_gpio_config(gpio_table, ARRAY_SIZE(gpio_table));
}
diff --git a/arch/arm/mach-tegra/board-harmony.c b/arch/arm/mach-tegra/board-harmony.c
index b9dbdb1289d0..49224e936eb4 100644
--- a/arch/arm/mach-tegra/board-harmony.c
+++ b/arch/arm/mach-tegra/board-harmony.c
@@ -30,35 +30,13 @@
#include <mach/iomap.h>
#include <mach/irqs.h>
+#include <mach/sdhci.h>
#include "board.h"
#include "board-harmony.h"
#include "clock.h"
-
-/* NVidia bootloader tags */
-#define ATAG_NVIDIA 0x41000801
-
-#define ATAG_NVIDIA_RM 0x1
-#define ATAG_NVIDIA_DISPLAY 0x2
-#define ATAG_NVIDIA_FRAMEBUFFER 0x3
-#define ATAG_NVIDIA_CHIPSHMOO 0x4
-#define ATAG_NVIDIA_CHIPSHMOOPHYS 0x5
-#define ATAG_NVIDIA_PRESERVED_MEM_0 0x10000
-#define ATAG_NVIDIA_PRESERVED_MEM_N 2
-#define ATAG_NVIDIA_FORCE_32 0x7fffffff
-
-struct tag_tegra {
- __u32 bootarg_key;
- __u32 bootarg_len;
- char bootarg[1];
-};
-
-static int __init parse_tag_nvidia(const struct tag *tag)
-{
-
- return 0;
-}
-__tagtable(ATAG_NVIDIA, parse_tag_nvidia);
+#include "devices.h"
+#include "gpio-names.h"
static struct plat_serial8250_port debug_uart_platform_data[] = {
{
@@ -84,6 +62,9 @@ static struct platform_device debug_uart = {
static struct platform_device *harmony_devices[] __initdata = {
&debug_uart,
+ &tegra_sdhci_device1,
+ &tegra_sdhci_device2,
+ &tegra_sdhci_device4,
};
static void __init tegra_harmony_fixup(struct machine_desc *desc,
@@ -102,22 +83,45 @@ static __initdata struct tegra_clk_init_table harmony_clk_init_table[] = {
{ NULL, NULL, 0, 0},
};
+
+static struct tegra_sdhci_platform_data sdhci_pdata1 = {
+ .cd_gpio = -1,
+ .wp_gpio = -1,
+ .power_gpio = -1,
+};
+
+static struct tegra_sdhci_platform_data sdhci_pdata2 = {
+ .cd_gpio = TEGRA_GPIO_PI5,
+ .wp_gpio = TEGRA_GPIO_PH1,
+ .power_gpio = TEGRA_GPIO_PT3,
+};
+
+static struct tegra_sdhci_platform_data sdhci_pdata4 = {
+ .cd_gpio = TEGRA_GPIO_PH2,
+ .wp_gpio = TEGRA_GPIO_PH3,
+ .power_gpio = TEGRA_GPIO_PI6,
+ .is_8bit = 1,
+};
+
static void __init tegra_harmony_init(void)
{
- tegra_common_init();
-
tegra_clk_init_from_table(harmony_clk_init_table);
harmony_pinmux_init();
+ tegra_sdhci_device1.dev.platform_data = &sdhci_pdata1;
+ tegra_sdhci_device2.dev.platform_data = &sdhci_pdata2;
+ tegra_sdhci_device4.dev.platform_data = &sdhci_pdata4;
+
platform_add_devices(harmony_devices, ARRAY_SIZE(harmony_devices));
}
MACHINE_START(HARMONY, "harmony")
.boot_params = 0x00000100,
.fixup = tegra_harmony_fixup,
- .init_irq = tegra_init_irq,
- .init_machine = tegra_harmony_init,
.map_io = tegra_map_common_io,
+ .init_early = tegra_init_early,
+ .init_irq = tegra_init_irq,
.timer = &tegra_timer,
+ .init_machine = tegra_harmony_init,
MACHINE_END
diff --git a/arch/arm/mach-tegra/board-seaboard-pinmux.c b/arch/arm/mach-tegra/board-seaboard-pinmux.c
new file mode 100644
index 000000000000..2d6ad83ed4b2
--- /dev/null
+++ b/arch/arm/mach-tegra/board-seaboard-pinmux.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright (C) 2010 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+
+#include <mach/pinmux.h>
+#include <mach/pinmux-t2.h>
+
+#include "gpio-names.h"
+#include "board-seaboard.h"
+
+#define DEFAULT_DRIVE(_name) \
+ { \
+ .pingroup = TEGRA_DRIVE_PINGROUP_##_name, \
+ .hsm = TEGRA_HSM_DISABLE, \
+ .schmitt = TEGRA_SCHMITT_ENABLE, \
+ .drive = TEGRA_DRIVE_DIV_1, \
+ .pull_down = TEGRA_PULL_31, \
+ .pull_up = TEGRA_PULL_31, \
+ .slew_rising = TEGRA_SLEW_SLOWEST, \
+ .slew_falling = TEGRA_SLEW_SLOWEST, \
+ }
+
+static __initdata struct tegra_drive_pingroup_config seaboard_drive_pinmux[] = {
+ DEFAULT_DRIVE(SDIO1),
+};
+
+static __initdata struct tegra_pingroup_config seaboard_pinmux[] = {
+ {TEGRA_PINGROUP_ATA, TEGRA_MUX_IDE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_ATB, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_ATC, TEGRA_MUX_NAND, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_ATD, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_ATE, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_CDEV1, TEGRA_MUX_PLLA_OUT, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_CDEV2, TEGRA_MUX_PLLP_OUT4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_CRTP, TEGRA_MUX_CRT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_CSUS, TEGRA_MUX_VI_SENSOR_CLK, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_DAP1, TEGRA_MUX_DAP1, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DAP2, TEGRA_MUX_DAP2, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_DAP3, TEGRA_MUX_DAP3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_DAP4, TEGRA_MUX_DAP4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DDC, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_DTA, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DTB, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DTC, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DTD, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DTE, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_DTF, TEGRA_MUX_I2C3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GMA, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GMB, TEGRA_MUX_GMI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_GMC, TEGRA_MUX_UARTD, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GMD, TEGRA_MUX_SFLASH, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GME, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GPU, TEGRA_MUX_PWM, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GPU7, TEGRA_MUX_RTCK, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GPV, TEGRA_MUX_PCIE, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_HDINT, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_I2CP, TEGRA_MUX_I2C, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_IRRX, TEGRA_MUX_UARTB, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_IRTX, TEGRA_MUX_UARTB, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_KBCA, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_KBCB, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_KBCC, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_KBCD, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_KBCE, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_KBCF, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LCSN, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LD0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD10, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD11, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD12, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD13, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD14, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD15, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD16, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD17, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD2, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD3, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD4, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD5, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD6, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD7, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD8, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD9, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LDC, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LDI, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LHP0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LHP1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LHP2, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LHS, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LM0, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LM1, TEGRA_MUX_CRT, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LPP, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LPW0, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LPW1, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LPW2, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LSC0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LSC1, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LSCK, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LSDA, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LSDI, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LSPI, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LVP0, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LVP1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LVS, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_OWC, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_PMC, TEGRA_MUX_PWR_ON, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PTA, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_RM, TEGRA_MUX_I2C, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SDB, TEGRA_MUX_SDIO3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SDC, TEGRA_MUX_SDIO3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SDD, TEGRA_MUX_SDIO3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SDIO1, TEGRA_MUX_SDIO1, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SLXA, TEGRA_MUX_PCIE, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SLXC, TEGRA_MUX_SPDIF, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SLXD, TEGRA_MUX_SPDIF, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SLXK, TEGRA_MUX_PCIE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SPDI, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SPDO, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SPIA, TEGRA_MUX_GMI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPIB, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPIC, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPID, TEGRA_MUX_SPI1, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPIE, TEGRA_MUX_SPI1, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPIF, TEGRA_MUX_SPI1, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPIG, TEGRA_MUX_SPI2_ALT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPIH, TEGRA_MUX_SPI2_ALT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_UAA, TEGRA_MUX_ULPI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UAB, TEGRA_MUX_ULPI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UAC, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UAD, TEGRA_MUX_IRDA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UCA, TEGRA_MUX_UARTC, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UCB, TEGRA_MUX_UARTC, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UDA, TEGRA_MUX_ULPI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_CK32, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DDRC, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PMCA, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PMCB, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PMCC, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PMCD, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PMCE, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_XM2C, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_XM2D, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+};
+
+
+
+
+static struct tegra_gpio_table gpio_table[] = {
+ { .gpio = TEGRA_GPIO_PI5, .enable = true }, /* mmc2 cd */
+ { .gpio = TEGRA_GPIO_PH1, .enable = true }, /* mmc2 wp */
+ { .gpio = TEGRA_GPIO_PI6, .enable = true }, /* mmc2 pwr */
+ { .gpio = TEGRA_GPIO_LIDSWITCH, .enable = true }, /* lid switch */
+ { .gpio = TEGRA_GPIO_POWERKEY, .enable = true }, /* power key */
+};
+
+void __init seaboard_pinmux_init(void)
+{
+ tegra_pinmux_config_table(seaboard_pinmux, ARRAY_SIZE(seaboard_pinmux));
+
+ tegra_drive_pinmux_config_table(seaboard_drive_pinmux,
+ ARRAY_SIZE(seaboard_drive_pinmux));
+
+ tegra_gpio_config(gpio_table, ARRAY_SIZE(gpio_table));
+}
diff --git a/arch/arm/mach-tegra/board-seaboard.c b/arch/arm/mach-tegra/board-seaboard.c
new file mode 100644
index 000000000000..6ca9e61f6cd0
--- /dev/null
+++ b/arch/arm/mach-tegra/board-seaboard.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2010, 2011 NVIDIA Corporation.
+ * Copyright (C) 2010, 2011 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/serial_8250.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/io.h>
+#include <linux/gpio_keys.h>
+
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+#include <mach/sdhci.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+
+#include "board.h"
+#include "board-seaboard.h"
+#include "clock.h"
+#include "devices.h"
+#include "gpio-names.h"
+
+static struct plat_serial8250_port debug_uart_platform_data[] = {
+ {
+ /* Memory and IRQ filled in before registration */
+ .flags = UPF_BOOT_AUTOCONF,
+ .iotype = UPIO_MEM,
+ .regshift = 2,
+ .uartclk = 216000000,
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device debug_uart = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = debug_uart_platform_data,
+ },
+};
+
+static __initdata struct tegra_clk_init_table seaboard_clk_init_table[] = {
+ /* name parent rate enabled */
+ { "uartb", "pll_p", 216000000, true},
+ { "uartd", "pll_p", 216000000, true},
+ { NULL, NULL, 0, 0},
+};
+
+static struct gpio_keys_button seaboard_gpio_keys_buttons[] = {
+ {
+ .code = SW_LID,
+ .gpio = TEGRA_GPIO_LIDSWITCH,
+ .active_low = 0,
+ .desc = "Lid",
+ .type = EV_SW,
+ .wakeup = 1,
+ .debounce_interval = 1,
+ },
+ {
+ .code = KEY_POWER,
+ .gpio = TEGRA_GPIO_POWERKEY,
+ .active_low = 1,
+ .desc = "Power",
+ .type = EV_KEY,
+ .wakeup = 1,
+ },
+};
+
+static struct gpio_keys_platform_data seaboard_gpio_keys = {
+ .buttons = seaboard_gpio_keys_buttons,
+ .nbuttons = ARRAY_SIZE(seaboard_gpio_keys_buttons),
+};
+
+static struct platform_device seaboard_gpio_keys_device = {
+ .name = "gpio-keys",
+ .id = -1,
+ .dev = {
+ .platform_data = &seaboard_gpio_keys,
+ }
+};
+
+static struct tegra_sdhci_platform_data sdhci_pdata1 = {
+ .cd_gpio = -1,
+ .wp_gpio = -1,
+ .power_gpio = -1,
+};
+
+static struct tegra_sdhci_platform_data sdhci_pdata3 = {
+ .cd_gpio = TEGRA_GPIO_PI5,
+ .wp_gpio = TEGRA_GPIO_PH1,
+ .power_gpio = TEGRA_GPIO_PI6,
+};
+
+static struct tegra_sdhci_platform_data sdhci_pdata4 = {
+ .cd_gpio = -1,
+ .wp_gpio = -1,
+ .power_gpio = -1,
+ .is_8bit = 1,
+};
+
+static struct platform_device *seaboard_devices[] __initdata = {
+ &debug_uart,
+ &tegra_pmu_device,
+ &tegra_sdhci_device1,
+ &tegra_sdhci_device3,
+ &tegra_sdhci_device4,
+ &seaboard_gpio_keys_device,
+};
+
+static void __init __tegra_seaboard_init(void)
+{
+ seaboard_pinmux_init();
+
+ tegra_clk_init_from_table(seaboard_clk_init_table);
+
+ tegra_sdhci_device1.dev.platform_data = &sdhci_pdata1;
+ tegra_sdhci_device3.dev.platform_data = &sdhci_pdata3;
+ tegra_sdhci_device4.dev.platform_data = &sdhci_pdata4;
+
+ platform_add_devices(seaboard_devices, ARRAY_SIZE(seaboard_devices));
+}
+
+static void __init tegra_seaboard_init(void)
+{
+ /* Seaboard uses UARTD for the debug port. */
+ debug_uart_platform_data[0].membase = IO_ADDRESS(TEGRA_UARTD_BASE);
+ debug_uart_platform_data[0].mapbase = TEGRA_UARTD_BASE;
+ debug_uart_platform_data[0].irq = INT_UARTD;
+
+ __tegra_seaboard_init();
+}
+
+static void __init tegra_kaen_init(void)
+{
+ /* Kaen uses UARTB for the debug port. */
+ debug_uart_platform_data[0].membase = IO_ADDRESS(TEGRA_UARTB_BASE);
+ debug_uart_platform_data[0].mapbase = TEGRA_UARTB_BASE;
+ debug_uart_platform_data[0].irq = INT_UARTB;
+
+ __tegra_seaboard_init();
+}
+
+static void __init tegra_wario_init(void)
+{
+ /* Wario uses UARTB for the debug port. */
+ debug_uart_platform_data[0].membase = IO_ADDRESS(TEGRA_UARTB_BASE);
+ debug_uart_platform_data[0].mapbase = TEGRA_UARTB_BASE;
+ debug_uart_platform_data[0].irq = INT_UARTB;
+
+ __tegra_seaboard_init();
+}
+
+
+MACHINE_START(SEABOARD, "seaboard")
+ .boot_params = 0x00000100,
+ .map_io = tegra_map_common_io,
+ .init_early = tegra_init_early,
+ .init_irq = tegra_init_irq,
+ .timer = &tegra_timer,
+ .init_machine = tegra_seaboard_init,
+MACHINE_END
+
+MACHINE_START(KAEN, "kaen")
+ .boot_params = 0x00000100,
+ .map_io = tegra_map_common_io,
+ .init_early = tegra_init_early,
+ .init_irq = tegra_init_irq,
+ .timer = &tegra_timer,
+ .init_machine = tegra_kaen_init,
+MACHINE_END
+
+MACHINE_START(WARIO, "wario")
+ .boot_params = 0x00000100,
+ .map_io = tegra_map_common_io,
+ .init_early = tegra_init_early,
+ .init_irq = tegra_init_irq,
+ .timer = &tegra_timer,
+ .init_machine = tegra_wario_init,
+MACHINE_END
diff --git a/arch/arm/mach-tegra/board-seaboard.h b/arch/arm/mach-tegra/board-seaboard.h
new file mode 100644
index 000000000000..a098e3599731
--- /dev/null
+++ b/arch/arm/mach-tegra/board-seaboard.h
@@ -0,0 +1,38 @@
+/*
+ * arch/arm/mach-tegra/board-seaboard.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MACH_TEGRA_BOARD_SEABOARD_H
+#define _MACH_TEGRA_BOARD_SEABOARD_H
+
+#define TEGRA_GPIO_LIDSWITCH TEGRA_GPIO_PC7
+#define TEGRA_GPIO_USB1 TEGRA_GPIO_PD0
+#define TEGRA_GPIO_POWERKEY TEGRA_GPIO_PV2
+#define TEGRA_GPIO_BACKLIGHT TEGRA_GPIO_PD4
+#define TEGRA_GPIO_LVDS_SHUTDOWN TEGRA_GPIO_PB2
+#define TEGRA_GPIO_BACKLIGHT_PWM TEGRA_GPIO_PU5
+#define TEGRA_GPIO_BACKLIGHT_VDD TEGRA_GPIO_PW0
+#define TEGRA_GPIO_EN_VDD_PNL TEGRA_GPIO_PC6
+#define TEGRA_GPIO_MAGNETOMETER TEGRA_GPIO_PN5
+#define TEGRA_GPIO_ISL29018_IRQ TEGRA_GPIO_PZ2
+#define TEGRA_GPIO_AC_ONLINE TEGRA_GPIO_PV3
+
+#define TPS_GPIO_BASE TEGRA_NR_GPIOS
+
+#define TPS_GPIO_WWAN_PWR (TPS_GPIO_BASE + 2)
+
+void seaboard_pinmux_init(void);
+
+#endif
diff --git a/arch/arm/mach-tegra/board-trimslice-pinmux.c b/arch/arm/mach-tegra/board-trimslice-pinmux.c
new file mode 100644
index 000000000000..6d4fc9f7f1fb
--- /dev/null
+++ b/arch/arm/mach-tegra/board-trimslice-pinmux.c
@@ -0,0 +1,145 @@
+/*
+ * arch/arm/mach-tegra/board-trimslice-pinmux.c
+ *
+ * Copyright (C) 2011 CompuLab, Ltd.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <mach/pinmux.h>
+
+#include "board-trimslice.h"
+
+static __initdata struct tegra_pingroup_config trimslice_pinmux[] = {
+ {TEGRA_PINGROUP_ATA, TEGRA_MUX_IDE, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_ATB, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_ATC, TEGRA_MUX_NAND, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_ATD, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_ATE, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_CDEV1, TEGRA_MUX_OSC, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_CDEV2, TEGRA_MUX_PLLP_OUT4, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_CRTP, TEGRA_MUX_CRT, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_CSUS, TEGRA_MUX_VI_SENSOR_CLK, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_DAP1, TEGRA_MUX_DAP1, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DAP2, TEGRA_MUX_DAP2, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_DAP3, TEGRA_MUX_DAP3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DAP4, TEGRA_MUX_DAP4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_DDC, TEGRA_MUX_I2C2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DTA, TEGRA_MUX_VI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_DTB, TEGRA_MUX_VI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_DTC, TEGRA_MUX_VI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_DTD, TEGRA_MUX_VI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_DTE, TEGRA_MUX_VI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_DTF, TEGRA_MUX_I2C3, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GMA, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GMB, TEGRA_MUX_NAND, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_GMC, TEGRA_MUX_SFLASH, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GMD, TEGRA_MUX_SFLASH, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GME, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_GPU, TEGRA_MUX_UARTA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GPU7, TEGRA_MUX_RTCK, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GPV, TEGRA_MUX_PCIE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_HDINT, TEGRA_MUX_HDMI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_I2CP, TEGRA_MUX_I2C, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_IRRX, TEGRA_MUX_UARTB, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_IRTX, TEGRA_MUX_UARTB, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_KBCA, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_KBCB, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_KBCC, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_KBCD, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_KBCE, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_KBCF, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LCSN, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LD0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD2, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD3, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD4, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD5, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD6, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD7, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD8, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD9, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD10, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD11, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD12, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD13, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD14, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD15, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD16, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD17, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LDC, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LDI, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LHP0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LHP1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LHP2, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LHS, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LM0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LM1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LPP, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LPW0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LPW1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LPW2, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LSC0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LSC1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LSCK, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LSDA, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LSDI, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LSPI, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LVP0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LVP1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LVS, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_OWC, TEGRA_MUX_RSVD2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_PMC, TEGRA_MUX_PWR_ON, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_PTA, TEGRA_MUX_RSVD3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_RM, TEGRA_MUX_I2C, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SDB, TEGRA_MUX_PWM, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SDC, TEGRA_MUX_PWM, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SDD, TEGRA_MUX_PWM, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SDIO1, TEGRA_MUX_SDIO1, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SLXA, TEGRA_MUX_PCIE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SLXC, TEGRA_MUX_SDIO3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SLXD, TEGRA_MUX_SDIO3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SLXK, TEGRA_MUX_PCIE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SPDI, TEGRA_MUX_SPDIF, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPDO, TEGRA_MUX_SPDIF, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPIA, TEGRA_MUX_SPI2, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPIB, TEGRA_MUX_SPI2, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPIC, TEGRA_MUX_SPI2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPID, TEGRA_MUX_SPI1, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPIE, TEGRA_MUX_SPI1, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPIF, TEGRA_MUX_SPI1, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPIG, TEGRA_MUX_SPI2_ALT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPIH, TEGRA_MUX_SPI2_ALT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_UAA, TEGRA_MUX_ULPI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_UAB, TEGRA_MUX_ULPI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_UAC, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_UAD, TEGRA_MUX_IRDA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_UCA, TEGRA_MUX_UARTC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_UCB, TEGRA_MUX_UARTC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_UDA, TEGRA_MUX_ULPI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_CK32, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DDRC, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PMCA, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PMCB, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PMCC, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PMCD, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PMCE, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_XM2C, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_XM2D, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+};
+
+void __init trimslice_pinmux_init(void)
+{
+ tegra_pinmux_config_table(trimslice_pinmux, ARRAY_SIZE(trimslice_pinmux));
+}
diff --git a/arch/arm/mach-tegra/board-trimslice.c b/arch/arm/mach-tegra/board-trimslice.c
new file mode 100644
index 000000000000..0f3081a97126
--- /dev/null
+++ b/arch/arm/mach-tegra/board-trimslice.c
@@ -0,0 +1,103 @@
+/*
+ * arch/arm/mach-tegra/board-trimslice.c
+ *
+ * Copyright (C) 2011 CompuLab, Ltd.
+ * Author: Mike Rapoport <mike@compulab.co.il>
+ *
+ * Based on board-harmony.c
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/serial_8250.h>
+#include <linux/io.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/setup.h>
+
+#include <mach/iomap.h>
+
+#include "board.h"
+#include "clock.h"
+
+#include "board-trimslice.h"
+
+static struct plat_serial8250_port debug_uart_platform_data[] = {
+ {
+ .membase = IO_ADDRESS(TEGRA_UARTA_BASE),
+ .mapbase = TEGRA_UARTA_BASE,
+ .irq = INT_UARTA,
+ .flags = UPF_BOOT_AUTOCONF,
+ .iotype = UPIO_MEM,
+ .regshift = 2,
+ .uartclk = 216000000,
+ }, {
+ .flags = 0
+ }
+};
+
+static struct platform_device debug_uart = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = debug_uart_platform_data,
+ },
+};
+
+static struct platform_device *trimslice_devices[] __initdata = {
+ &debug_uart,
+};
+
+static void __init tegra_trimslice_fixup(struct machine_desc *desc,
+ struct tag *tags, char **cmdline, struct meminfo *mi)
+{
+ mi->nr_banks = 2;
+ mi->bank[0].start = PHYS_OFFSET;
+ mi->bank[0].size = 448 * SZ_1M;
+ mi->bank[1].start = SZ_512M;
+ mi->bank[1].size = SZ_512M;
+}
+
+static __initdata struct tegra_clk_init_table trimslice_clk_init_table[] = {
+ /* name parent rate enabled */
+ { "uarta", "pll_p", 216000000, true },
+ { NULL, NULL, 0, 0},
+};
+
+static int __init tegra_trimslice_pci_init(void)
+{
+ return tegra_pcie_init(true, true);
+}
+subsys_initcall(tegra_trimslice_pci_init);
+
+static void __init tegra_trimslice_init(void)
+{
+ tegra_clk_init_from_table(trimslice_clk_init_table);
+
+ trimslice_pinmux_init();
+
+ platform_add_devices(trimslice_devices, ARRAY_SIZE(trimslice_devices));
+}
+
+MACHINE_START(TRIMSLICE, "trimslice")
+ .boot_params = 0x00000100,
+ .fixup = tegra_trimslice_fixup,
+ .map_io = tegra_map_common_io,
+ .init_early = tegra_init_early,
+ .init_irq = tegra_init_irq,
+ .timer = &tegra_timer,
+ .init_machine = tegra_trimslice_init,
+MACHINE_END
diff --git a/arch/arm/mach-tegra/board-trimslice.h b/arch/arm/mach-tegra/board-trimslice.h
new file mode 100644
index 000000000000..16ec0f0d3bb1
--- /dev/null
+++ b/arch/arm/mach-tegra/board-trimslice.h
@@ -0,0 +1,22 @@
+/*
+ * arch/arm/mach-tegra/board-trimslice.h
+ *
+ * Copyright (C) 2011 CompuLab, Ltd.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MACH_TEGRA_BOARD_TRIMSLICE_H
+#define _MACH_TEGRA_BOARD_TRIMSLICE_H
+
+void trimslice_pinmux_init(void);
+
+#endif
diff --git a/arch/arm/mach-tegra/board.h b/arch/arm/mach-tegra/board.h
index 0de565ca37c5..1d14df7eb7de 100644
--- a/arch/arm/mach-tegra/board.h
+++ b/arch/arm/mach-tegra/board.h
@@ -23,7 +23,9 @@
#include <linux/types.h>
-void __init tegra_common_init(void);
+void tegra_assert_system_reset(char mode, const char *cmd);
+
+void __init tegra_init_early(void);
void __init tegra_map_common_io(void);
void __init tegra_init_irq(void);
void __init tegra_init_clock(void);
diff --git a/arch/arm/mach-tegra/clock.c b/arch/arm/mach-tegra/clock.c
index 77948e0f4909..e028320ab423 100644
--- a/arch/arm/mach-tegra/clock.c
+++ b/arch/arm/mach-tegra/clock.c
@@ -18,238 +18,177 @@
#include <linux/kernel.h>
#include <linux/clk.h>
-#include <linux/list.h>
+#include <linux/clkdev.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/list.h>
#include <linux/module.h>
-#include <linux/debugfs.h>
-#include <linux/slab.h>
+#include <linux/sched.h>
#include <linux/seq_file.h>
-#include <linux/regulator/consumer.h>
-#include <linux/clkdev.h>
+#include <linux/slab.h>
+
+#include <mach/clk.h>
-#include "clock.h"
#include "board.h"
-#include "fuse.h"
+#include "clock.h"
+/*
+ * Locking:
+ *
+ * Each struct clk has a spinlock.
+ *
+ * To avoid AB-BA locking problems, locks must always be traversed from child
+ * clock to parent clock. For example, when enabling a clock, the clock's lock
+ * is taken, and then clk_enable is called on the parent, which take's the
+ * parent clock's lock. There is one exceptions to this ordering: When dumping
+ * the clock tree through debugfs. In this case, clk_lock_all is called,
+ * which attemps to iterate through the entire list of clocks and take every
+ * clock lock. If any call to spin_trylock fails, all locked clocks are
+ * unlocked, and the process is retried. When all the locks are held,
+ * the only clock operation that can be called is clk_get_rate_all_locked.
+ *
+ * Within a single clock, no clock operation can call another clock operation
+ * on itself, except for clk_get_rate_locked and clk_set_rate_locked. Any
+ * clock operation can call any other clock operation on any of it's possible
+ * parents.
+ *
+ * An additional mutex, clock_list_lock, is used to protect the list of all
+ * clocks.
+ *
+ * The clock operations must lock internally to protect against
+ * read-modify-write on registers that are shared by multiple clocks
+ */
+static DEFINE_MUTEX(clock_list_lock);
static LIST_HEAD(clocks);
-static DEFINE_SPINLOCK(clock_lock);
-static DEFINE_MUTEX(dvfs_lock);
-
-static int clk_is_dvfs(struct clk *c)
-{
- return (c->dvfs != NULL);
-};
-
-static int dvfs_set_rate(struct dvfs *d, unsigned long rate)
-{
- struct dvfs_table *t;
-
- if (d->table == NULL)
- return -ENODEV;
-
- for (t = d->table; t->rate != 0; t++) {
- if (rate <= t->rate) {
- if (!d->reg)
- return 0;
-
- return regulator_set_voltage(d->reg,
- t->millivolts * 1000,
- d->max_millivolts * 1000);
- }
- }
-
- return -EINVAL;
-}
-
-static void dvfs_init(struct clk *c)
-{
- int process_id;
- int i;
- struct dvfs_table *table;
-
- process_id = c->dvfs->cpu ? tegra_core_process_id() :
- tegra_cpu_process_id();
-
- for (i = 0; i < c->dvfs->process_id_table_length; i++)
- if (process_id == c->dvfs->process_id_table[i].process_id)
- c->dvfs->table = c->dvfs->process_id_table[i].table;
-
- if (c->dvfs->table == NULL) {
- pr_err("Failed to find dvfs table for clock %s process %d\n",
- c->name, process_id);
- return;
- }
-
- c->dvfs->max_millivolts = 0;
- for (table = c->dvfs->table; table->rate != 0; table++)
- if (c->dvfs->max_millivolts < table->millivolts)
- c->dvfs->max_millivolts = table->millivolts;
-
- c->dvfs->reg = regulator_get(NULL, c->dvfs->reg_id);
-
- if (IS_ERR(c->dvfs->reg)) {
- pr_err("Failed to get regulator %s for clock %s\n",
- c->dvfs->reg_id, c->name);
- c->dvfs->reg = NULL;
- return;
- }
-
- if (c->refcnt > 0)
- dvfs_set_rate(c->dvfs, c->rate);
-}
-
struct clk *tegra_get_clock_by_name(const char *name)
{
struct clk *c;
struct clk *ret = NULL;
- unsigned long flags;
- spin_lock_irqsave(&clock_lock, flags);
+ mutex_lock(&clock_list_lock);
list_for_each_entry(c, &clocks, node) {
if (strcmp(c->name, name) == 0) {
ret = c;
break;
}
}
- spin_unlock_irqrestore(&clock_lock, flags);
+ mutex_unlock(&clock_list_lock);
return ret;
}
-static void clk_recalculate_rate(struct clk *c)
+/* Must be called with c->spinlock held */
+static unsigned long clk_predict_rate_from_parent(struct clk *c, struct clk *p)
{
u64 rate;
- if (!c->parent)
- return;
-
- rate = c->parent->rate;
+ rate = clk_get_rate(p);
if (c->mul != 0 && c->div != 0) {
- rate = rate * c->mul;
+ rate *= c->mul;
+ rate += c->div - 1; /* round up */
do_div(rate, c->div);
}
- if (rate > c->max_rate)
- pr_warn("clocks: Set clock %s to rate %llu, max is %lu\n",
- c->name, rate, c->max_rate);
-
- c->rate = rate;
+ return rate;
}
-int clk_reparent(struct clk *c, struct clk *parent)
+/* Must be called with c->spinlock held */
+unsigned long clk_get_rate_locked(struct clk *c)
{
- pr_debug("%s: %s\n", __func__, c->name);
- c->parent = parent;
- list_del(&c->sibling);
- list_add_tail(&c->sibling, &parent->children);
- return 0;
-}
+ unsigned long rate;
-static void propagate_rate(struct clk *c)
-{
- struct clk *clkp;
- pr_debug("%s: %s\n", __func__, c->name);
- list_for_each_entry(clkp, &c->children, sibling) {
- pr_debug(" %s\n", clkp->name);
- clk_recalculate_rate(clkp);
- propagate_rate(clkp);
- }
+ if (c->parent)
+ rate = clk_predict_rate_from_parent(c, c->parent);
+ else
+ rate = c->rate;
+
+ return rate;
}
-void clk_init(struct clk *c)
+unsigned long clk_get_rate(struct clk *c)
{
unsigned long flags;
+ unsigned long rate;
+
+ spin_lock_irqsave(&c->spinlock, flags);
- pr_debug("%s: %s\n", __func__, c->name);
+ rate = clk_get_rate_locked(c);
- spin_lock_irqsave(&clock_lock, flags);
+ spin_unlock_irqrestore(&c->spinlock, flags);
- INIT_LIST_HEAD(&c->children);
- INIT_LIST_HEAD(&c->sibling);
+ return rate;
+}
+EXPORT_SYMBOL(clk_get_rate);
+
+int clk_reparent(struct clk *c, struct clk *parent)
+{
+ c->parent = parent;
+ return 0;
+}
+
+void clk_init(struct clk *c)
+{
+ spin_lock_init(&c->spinlock);
if (c->ops && c->ops->init)
c->ops->init(c);
- clk_recalculate_rate(c);
+ if (!c->ops || !c->ops->enable) {
+ c->refcnt++;
+ c->set = true;
+ if (c->parent)
+ c->state = c->parent->state;
+ else
+ c->state = ON;
+ }
+ mutex_lock(&clock_list_lock);
list_add(&c->node, &clocks);
-
- if (c->parent)
- list_add_tail(&c->sibling, &c->parent->children);
-
- spin_unlock_irqrestore(&clock_lock, flags);
+ mutex_unlock(&clock_list_lock);
}
-int clk_enable_locked(struct clk *c)
+int clk_enable(struct clk *c)
{
- int ret;
- pr_debug("%s: %s\n", __func__, c->name);
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->spinlock, flags);
+
if (c->refcnt == 0) {
if (c->parent) {
- ret = clk_enable_locked(c->parent);
+ ret = clk_enable(c->parent);
if (ret)
- return ret;
+ goto out;
}
if (c->ops && c->ops->enable) {
ret = c->ops->enable(c);
if (ret) {
if (c->parent)
- clk_disable_locked(c->parent);
- return ret;
+ clk_disable(c->parent);
+ goto out;
}
c->state = ON;
-#ifdef CONFIG_DEBUG_FS
- c->set = 1;
-#endif
+ c->set = true;
}
}
c->refcnt++;
-
- return 0;
-}
-
-int clk_enable_cansleep(struct clk *c)
-{
- int ret;
- unsigned long flags;
-
- mutex_lock(&dvfs_lock);
-
- if (clk_is_dvfs(c) && c->refcnt > 0)
- dvfs_set_rate(c->dvfs, c->rate);
-
- spin_lock_irqsave(&clock_lock, flags);
- ret = clk_enable_locked(c);
- spin_unlock_irqrestore(&clock_lock, flags);
-
- mutex_unlock(&dvfs_lock);
-
+out:
+ spin_unlock_irqrestore(&c->spinlock, flags);
return ret;
}
-EXPORT_SYMBOL(clk_enable_cansleep);
+EXPORT_SYMBOL(clk_enable);
-int clk_enable(struct clk *c)
+void clk_disable(struct clk *c)
{
- int ret;
unsigned long flags;
- if (clk_is_dvfs(c))
- BUG();
-
- spin_lock_irqsave(&clock_lock, flags);
- ret = clk_enable_locked(c);
- spin_unlock_irqrestore(&clock_lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(clk_enable);
+ spin_lock_irqsave(&c->spinlock, flags);
-void clk_disable_locked(struct clk *c)
-{
- pr_debug("%s: %s\n", __func__, c->name);
if (c->refcnt == 0) {
WARN(1, "Attempting to disable clock %s with refcnt 0", c->name);
+ spin_unlock_irqrestore(&c->spinlock, flags);
return;
}
if (c->refcnt == 1) {
@@ -257,71 +196,39 @@ void clk_disable_locked(struct clk *c)
c->ops->disable(c);
if (c->parent)
- clk_disable_locked(c->parent);
+ clk_disable(c->parent);
c->state = OFF;
}
c->refcnt--;
-}
-
-void clk_disable_cansleep(struct clk *c)
-{
- unsigned long flags;
-
- mutex_lock(&dvfs_lock);
-
- spin_lock_irqsave(&clock_lock, flags);
- clk_disable_locked(c);
- spin_unlock_irqrestore(&clock_lock, flags);
- if (clk_is_dvfs(c) && c->refcnt == 0)
- dvfs_set_rate(c->dvfs, c->rate);
-
- mutex_unlock(&dvfs_lock);
-}
-EXPORT_SYMBOL(clk_disable_cansleep);
-
-void clk_disable(struct clk *c)
-{
- unsigned long flags;
-
- if (clk_is_dvfs(c))
- BUG();
-
- spin_lock_irqsave(&clock_lock, flags);
- clk_disable_locked(c);
- spin_unlock_irqrestore(&clock_lock, flags);
+ spin_unlock_irqrestore(&c->spinlock, flags);
}
EXPORT_SYMBOL(clk_disable);
-int clk_set_parent_locked(struct clk *c, struct clk *parent)
+int clk_set_parent(struct clk *c, struct clk *parent)
{
int ret;
+ unsigned long flags;
+ unsigned long new_rate;
+ unsigned long old_rate;
- pr_debug("%s: %s\n", __func__, c->name);
+ spin_lock_irqsave(&c->spinlock, flags);
- if (!c->ops || !c->ops->set_parent)
- return -ENOSYS;
+ if (!c->ops || !c->ops->set_parent) {
+ ret = -ENOSYS;
+ goto out;
+ }
- ret = c->ops->set_parent(c, parent);
+ new_rate = clk_predict_rate_from_parent(c, parent);
+ old_rate = clk_get_rate_locked(c);
+ ret = c->ops->set_parent(c, parent);
if (ret)
- return ret;
-
- clk_recalculate_rate(c);
-
- propagate_rate(c);
-
- return 0;
-}
+ goto out;
-int clk_set_parent(struct clk *c, struct clk *parent)
-{
- int ret;
- unsigned long flags;
- spin_lock_irqsave(&clock_lock, flags);
- ret = clk_set_parent_locked(c, parent);
- spin_unlock_irqrestore(&clock_lock, flags);
+out:
+ spin_unlock_irqrestore(&c->spinlock, flags);
return ret;
}
EXPORT_SYMBOL(clk_set_parent);
@@ -334,100 +241,86 @@ EXPORT_SYMBOL(clk_get_parent);
int clk_set_rate_locked(struct clk *c, unsigned long rate)
{
- int ret;
-
- if (rate > c->max_rate)
- rate = c->max_rate;
+ long new_rate;
if (!c->ops || !c->ops->set_rate)
return -ENOSYS;
- ret = c->ops->set_rate(c, rate);
-
- if (ret)
- return ret;
-
- clk_recalculate_rate(c);
-
- propagate_rate(c);
-
- return 0;
-}
-
-int clk_set_rate_cansleep(struct clk *c, unsigned long rate)
-{
- int ret = 0;
- unsigned long flags;
-
- pr_debug("%s: %s\n", __func__, c->name);
-
- mutex_lock(&dvfs_lock);
-
- if (rate > c->rate)
- ret = dvfs_set_rate(c->dvfs, rate);
- if (ret)
- goto out;
+ if (rate > c->max_rate)
+ rate = c->max_rate;
- spin_lock_irqsave(&clock_lock, flags);
- ret = clk_set_rate_locked(c, rate);
- spin_unlock_irqrestore(&clock_lock, flags);
+ if (c->ops && c->ops->round_rate) {
+ new_rate = c->ops->round_rate(c, rate);
- if (ret)
- goto out;
+ if (new_rate < 0)
+ return new_rate;
- ret = dvfs_set_rate(c->dvfs, rate);
+ rate = new_rate;
+ }
-out:
- mutex_unlock(&dvfs_lock);
- return ret;
+ return c->ops->set_rate(c, rate);
}
-EXPORT_SYMBOL(clk_set_rate_cansleep);
int clk_set_rate(struct clk *c, unsigned long rate)
{
- int ret = 0;
+ int ret;
unsigned long flags;
- pr_debug("%s: %s\n", __func__, c->name);
-
- if (clk_is_dvfs(c))
- BUG();
+ spin_lock_irqsave(&c->spinlock, flags);
- spin_lock_irqsave(&clock_lock, flags);
ret = clk_set_rate_locked(c, rate);
- spin_unlock_irqrestore(&clock_lock, flags);
+
+ spin_unlock_irqrestore(&c->spinlock, flags);
return ret;
}
EXPORT_SYMBOL(clk_set_rate);
-unsigned long clk_get_rate(struct clk *c)
-{
- unsigned long flags;
- unsigned long ret;
-
- spin_lock_irqsave(&clock_lock, flags);
- pr_debug("%s: %s\n", __func__, c->name);
+/* Must be called with clocks lock and all indvidual clock locks held */
+unsigned long clk_get_rate_all_locked(struct clk *c)
+{
+ u64 rate;
+ int mul = 1;
+ int div = 1;
+ struct clk *p = c;
+
+ while (p) {
+ c = p;
+ if (c->mul != 0 && c->div != 0) {
+ mul *= c->mul;
+ div *= c->div;
+ }
+ p = c->parent;
+ }
- ret = c->rate;
+ rate = c->rate;
+ rate *= mul;
+ do_div(rate, div);
- spin_unlock_irqrestore(&clock_lock, flags);
- return ret;
+ return rate;
}
-EXPORT_SYMBOL(clk_get_rate);
long clk_round_rate(struct clk *c, unsigned long rate)
{
- pr_debug("%s: %s\n", __func__, c->name);
+ unsigned long flags;
+ long ret;
- if (!c->ops || !c->ops->round_rate)
- return -ENOSYS;
+ spin_lock_irqsave(&c->spinlock, flags);
+
+ if (!c->ops || !c->ops->round_rate) {
+ ret = -ENOSYS;
+ goto out;
+ }
if (rate > c->max_rate)
rate = c->max_rate;
- return c->ops->round_rate(c, rate);
+ ret = c->ops->round_rate(c, rate);
+
+out:
+ spin_unlock_irqrestore(&c->spinlock, flags);
+ return ret;
}
EXPORT_SYMBOL(clk_round_rate);
@@ -509,31 +402,90 @@ void __init tegra_init_clock(void)
tegra2_init_clocks();
}
-int __init tegra_init_dvfs(void)
+/*
+ * The SDMMC controllers have extra bits in the clock source register that
+ * adjust the delay between the clock and data to compenstate for delays
+ * on the PCB.
+ */
+void tegra_sdmmc_tap_delay(struct clk *c, int delay)
{
- struct clk *c, *safe;
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->spinlock, flags);
+ tegra2_sdmmc_tap_delay(c, delay);
+ spin_unlock_irqrestore(&c->spinlock, flags);
+}
- mutex_lock(&dvfs_lock);
+#ifdef CONFIG_DEBUG_FS
- list_for_each_entry_safe(c, safe, &clocks, node)
- if (c->dvfs)
- dvfs_init(c);
+static int __clk_lock_all_spinlocks(void)
+{
+ struct clk *c;
- mutex_unlock(&dvfs_lock);
+ list_for_each_entry(c, &clocks, node)
+ if (!spin_trylock(&c->spinlock))
+ goto unlock_spinlocks;
return 0;
+
+unlock_spinlocks:
+ list_for_each_entry_continue_reverse(c, &clocks, node)
+ spin_unlock(&c->spinlock);
+
+ return -EAGAIN;
}
-late_initcall(tegra_init_dvfs);
+static void __clk_unlock_all_spinlocks(void)
+{
+ struct clk *c;
+
+ list_for_each_entry_reverse(c, &clocks, node)
+ spin_unlock(&c->spinlock);
+}
+
+/*
+ * This function retries until it can take all locks, and may take
+ * an arbitrarily long time to complete.
+ * Must be called with irqs enabled, returns with irqs disabled
+ * Must be called with clock_list_lock held
+ */
+static void clk_lock_all(void)
+{
+ int ret;
+retry:
+ local_irq_disable();
+
+ ret = __clk_lock_all_spinlocks();
+ if (ret)
+ goto failed_spinlocks;
+
+ /* All locks taken successfully, return */
+ return;
+
+failed_spinlocks:
+ local_irq_enable();
+ yield();
+ goto retry;
+}
+
+/*
+ * Unlocks all clocks after a clk_lock_all
+ * Must be called with irqs disabled, returns with irqs enabled
+ * Must be called with clock_list_lock held
+ */
+static void clk_unlock_all(void)
+{
+ __clk_unlock_all_spinlocks();
+
+ local_irq_enable();
+}
-#ifdef CONFIG_DEBUG_FS
static struct dentry *clk_debugfs_root;
static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level)
{
struct clk *child;
- struct clk *safe;
const char *state = "uninit";
char div[8] = {0};
@@ -564,8 +516,12 @@ static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level)
c->rate > c->max_rate ? '!' : ' ',
!c->set ? '*' : ' ',
30 - level * 3, c->name,
- state, c->refcnt, div, c->rate);
- list_for_each_entry_safe(child, safe, &c->children, sibling) {
+ state, c->refcnt, div, clk_get_rate_all_locked(c));
+
+ list_for_each_entry(child, &clocks, node) {
+ if (child->parent != c)
+ continue;
+
clock_tree_show_one(s, child, level + 1);
}
}
@@ -573,14 +529,20 @@ static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level)
static int clock_tree_show(struct seq_file *s, void *data)
{
struct clk *c;
- unsigned long flags;
seq_printf(s, " clock state ref div rate\n");
seq_printf(s, "--------------------------------------------------------------\n");
- spin_lock_irqsave(&clock_lock, flags);
+
+ mutex_lock(&clock_list_lock);
+
+ clk_lock_all();
+
list_for_each_entry(c, &clocks, node)
if (c->parent == NULL)
clock_tree_show_one(s, c, 0);
- spin_unlock_irqrestore(&clock_lock, flags);
+
+ clk_unlock_all();
+
+ mutex_unlock(&clock_list_lock);
return 0;
}
diff --git a/arch/arm/mach-tegra/clock.h b/arch/arm/mach-tegra/clock.h
index 083a4cfc6cf0..688316abc64e 100644
--- a/arch/arm/mach-tegra/clock.h
+++ b/arch/arm/mach-tegra/clock.h
@@ -20,8 +20,9 @@
#ifndef __MACH_TEGRA_CLOCK_H
#define __MACH_TEGRA_CLOCK_H
-#include <linux/list.h>
#include <linux/clkdev.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
#define DIV_BUS (1 << 0)
#define DIV_U71 (1 << 1)
@@ -41,36 +42,13 @@
#define ENABLE_ON_INIT (1 << 28)
struct clk;
-struct regulator;
-
-struct dvfs_table {
- unsigned long rate;
- int millivolts;
-};
-
-struct dvfs_process_id_table {
- int process_id;
- struct dvfs_table *table;
-};
-
-
-struct dvfs {
- struct regulator *reg;
- struct dvfs_table *table;
- int max_millivolts;
-
- int process_id_table_length;
- const char *reg_id;
- bool cpu;
- struct dvfs_process_id_table process_id_table[];
-};
struct clk_mux_sel {
struct clk *input;
u32 value;
};
-struct clk_pll_table {
+struct clk_pll_freq_table {
unsigned long input_rate;
unsigned long output_rate;
u16 n;
@@ -86,6 +64,7 @@ struct clk_ops {
int (*set_parent)(struct clk *, struct clk *);
int (*set_rate)(struct clk *, unsigned long);
long (*round_rate)(struct clk *, unsigned long);
+ void (*reset)(struct clk *, bool);
};
enum clk_state {
@@ -96,55 +75,64 @@ enum clk_state {
struct clk {
/* node for master clocks list */
- struct list_head node;
- struct list_head children; /* list of children */
- struct list_head sibling; /* node for children */
-#ifdef CONFIG_DEBUG_FS
- struct dentry *dent;
- struct dentry *parent_dent;
-#endif
- struct clk_ops *ops;
- struct clk *parent;
- struct clk_lookup lookup;
- unsigned long rate;
- unsigned long max_rate;
- u32 flags;
- u32 refcnt;
- const char *name;
- u32 reg;
- u32 reg_shift;
- unsigned int clk_num;
- enum clk_state state;
+ struct list_head node; /* node for list of all clocks */
+ struct clk_lookup lookup;
+
#ifdef CONFIG_DEBUG_FS
- bool set;
+ struct dentry *dent;
#endif
+ bool set;
+ struct clk_ops *ops;
+ unsigned long rate;
+ unsigned long max_rate;
+ unsigned long min_rate;
+ u32 flags;
+ const char *name;
+
+ u32 refcnt;
+ enum clk_state state;
+ struct clk *parent;
+ u32 div;
+ u32 mul;
- /* PLL */
- unsigned long input_min;
- unsigned long input_max;
- unsigned long cf_min;
- unsigned long cf_max;
- unsigned long vco_min;
- unsigned long vco_max;
- const struct clk_pll_table *pll_table;
-
- /* DIV */
- u32 div;
- u32 mul;
-
- /* MUX */
const struct clk_mux_sel *inputs;
- u32 sel;
- u32 reg_mask;
-
- /* Virtual cpu clock */
- struct clk *main;
- struct clk *backup;
+ u32 reg;
+ u32 reg_shift;
- struct dvfs *dvfs;
+ struct list_head shared_bus_list;
+
+ union {
+ struct {
+ unsigned int clk_num;
+ } periph;
+ struct {
+ unsigned long input_min;
+ unsigned long input_max;
+ unsigned long cf_min;
+ unsigned long cf_max;
+ unsigned long vco_min;
+ unsigned long vco_max;
+ const struct clk_pll_freq_table *freq_table;
+ int lock_delay;
+ } pll;
+ struct {
+ u32 sel;
+ u32 reg_mask;
+ } mux;
+ struct {
+ struct clk *main;
+ struct clk *backup;
+ } cpu;
+ struct {
+ struct list_head node;
+ bool enabled;
+ unsigned long rate;
+ } shared_bus_user;
+ } u;
+
+ spinlock_t spinlock;
};
-
struct clk_duplicate {
const char *name;
struct clk_lookup lookup;
@@ -163,11 +151,10 @@ void tegra2_periph_reset_assert(struct clk *c);
void clk_init(struct clk *clk);
struct clk *tegra_get_clock_by_name(const char *name);
unsigned long clk_measure_input_freq(void);
-void clk_disable_locked(struct clk *c);
-int clk_enable_locked(struct clk *c);
-int clk_set_parent_locked(struct clk *c, struct clk *parent);
-int clk_set_rate_locked(struct clk *c, unsigned long rate);
int clk_reparent(struct clk *c, struct clk *parent);
void tegra_clk_init_from_table(struct tegra_clk_init_table *table);
+unsigned long clk_get_rate_locked(struct clk *c);
+int clk_set_rate_locked(struct clk *c, unsigned long rate);
+void tegra2_sdmmc_tap_delay(struct clk *c, int delay);
#endif
diff --git a/arch/arm/mach-tegra/common.c b/arch/arm/mach-tegra/common.c
index 7c91e2b9d643..d5e3f89b05af 100644
--- a/arch/arm/mach-tegra/common.c
+++ b/arch/arm/mach-tegra/common.c
@@ -25,12 +25,25 @@
#include <asm/hardware/cache-l2x0.h>
#include <mach/iomap.h>
-#include <mach/dma.h>
+#include <mach/system.h>
#include "board.h"
#include "clock.h"
#include "fuse.h"
+void (*arch_reset)(char mode, const char *cmd) = tegra_assert_system_reset;
+
+void tegra_assert_system_reset(char mode, const char *cmd)
+{
+ void __iomem *reset = IO_ADDRESS(TEGRA_CLK_RESET_BASE + 0x04);
+ u32 reg;
+
+ /* use *_related to avoid spinlock since caches are off */
+ reg = readl_relaxed(reset);
+ reg |= 0x04;
+ writel_relaxed(reg, reset);
+}
+
static __initdata struct tegra_clk_init_table common_clk_init_table[] = {
/* name parent rate enabled */
{ "clk_m", NULL, 0, true },
@@ -42,6 +55,9 @@ static __initdata struct tegra_clk_init_table common_clk_init_table[] = {
{ "sclk", "pll_p_out4", 108000000, true },
{ "hclk", "sclk", 108000000, true },
{ "pclk", "hclk", 54000000, true },
+ { "csite", NULL, 0, true },
+ { "emc", NULL, 0, true },
+ { "cpu", NULL, 0, true },
{ NULL, NULL, 0, 0},
};
@@ -50,21 +66,18 @@ void __init tegra_init_cache(void)
#ifdef CONFIG_CACHE_L2X0
void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000;
- writel(0x331, p + L2X0_TAG_LATENCY_CTRL);
- writel(0x441, p + L2X0_DATA_LATENCY_CTRL);
+ writel_relaxed(0x331, p + L2X0_TAG_LATENCY_CTRL);
+ writel_relaxed(0x441, p + L2X0_DATA_LATENCY_CTRL);
l2x0_init(p, 0x6C080001, 0x8200c3fe);
#endif
}
-void __init tegra_common_init(void)
+void __init tegra_init_early(void)
{
tegra_init_fuse();
tegra_init_clock();
tegra_clk_init_from_table(common_clk_init_table);
tegra_init_cache();
-#ifdef CONFIG_TEGRA_SYSTEM_DMA
- tegra_dma_init();
-#endif
}
diff --git a/arch/arm/mach-tegra/cpu-tegra.c b/arch/arm/mach-tegra/cpu-tegra.c
index fea5719c7072..0e1016a827ac 100644
--- a/arch/arm/mach-tegra/cpu-tegra.c
+++ b/arch/arm/mach-tegra/cpu-tegra.c
@@ -28,6 +28,7 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/suspend.h>
#include <asm/system.h>
@@ -36,21 +37,25 @@
/* Frequency table index must be sequential starting at 0 */
static struct cpufreq_frequency_table freq_table[] = {
- { 0, 312000 },
- { 1, 456000 },
- { 2, 608000 },
- { 3, 760000 },
- { 4, 816000 },
- { 5, 912000 },
- { 6, 1000000 },
- { 7, CPUFREQ_TABLE_END },
+ { 0, 216000 },
+ { 1, 312000 },
+ { 2, 456000 },
+ { 3, 608000 },
+ { 4, 760000 },
+ { 5, 816000 },
+ { 6, 912000 },
+ { 7, 1000000 },
+ { 8, CPUFREQ_TABLE_END },
};
#define NUM_CPUS 2
static struct clk *cpu_clk;
+static struct clk *emc_clk;
static unsigned long target_cpu_speed[NUM_CPUS];
+static DEFINE_MUTEX(tegra_cpu_lock);
+static bool is_suspended;
int tegra_verify_speed(struct cpufreq_policy *policy)
{
@@ -68,22 +73,28 @@ unsigned int tegra_getspeed(unsigned int cpu)
return rate;
}
-static int tegra_update_cpu_speed(void)
+static int tegra_update_cpu_speed(unsigned long rate)
{
- int i;
- unsigned long rate = 0;
int ret = 0;
struct cpufreq_freqs freqs;
- for_each_online_cpu(i)
- rate = max(rate, target_cpu_speed[i]);
-
freqs.old = tegra_getspeed(0);
freqs.new = rate;
if (freqs.old == freqs.new)
return ret;
+ /*
+ * Vote on memory bus frequency based on cpu frequency
+ * This sets the minimum frequency, display or avp may request higher
+ */
+ if (rate >= 816000)
+ clk_set_rate(emc_clk, 600000000); /* cpu 816 MHz, emc max */
+ else if (rate >= 456000)
+ clk_set_rate(emc_clk, 300000000); /* cpu 456 MHz, emc 150Mhz */
+ else
+ clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */
+
for_each_online_cpu(freqs.cpu)
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
@@ -92,7 +103,7 @@ static int tegra_update_cpu_speed(void)
freqs.old, freqs.new);
#endif
- ret = clk_set_rate_cansleep(cpu_clk, freqs.new * 1000);
+ ret = clk_set_rate(cpu_clk, freqs.new * 1000);
if (ret) {
pr_err("cpu-tegra: Failed to set cpu frequency to %d kHz\n",
freqs.new);
@@ -105,12 +116,30 @@ static int tegra_update_cpu_speed(void)
return 0;
}
+static unsigned long tegra_cpu_highest_speed(void)
+{
+ unsigned long rate = 0;
+ int i;
+
+ for_each_online_cpu(i)
+ rate = max(rate, target_cpu_speed[i]);
+ return rate;
+}
+
static int tegra_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
int idx;
unsigned int freq;
+ int ret = 0;
+
+ mutex_lock(&tegra_cpu_lock);
+
+ if (is_suspended) {
+ ret = -EBUSY;
+ goto out;
+ }
cpufreq_frequency_table_target(policy, freq_table, target_freq,
relation, &idx);
@@ -119,9 +148,34 @@ static int tegra_target(struct cpufreq_policy *policy,
target_cpu_speed[policy->cpu] = freq;
- return tegra_update_cpu_speed();
+ ret = tegra_update_cpu_speed(tegra_cpu_highest_speed());
+
+out:
+ mutex_unlock(&tegra_cpu_lock);
+ return ret;
}
+static int tegra_pm_notify(struct notifier_block *nb, unsigned long event,
+ void *dummy)
+{
+ mutex_lock(&tegra_cpu_lock);
+ if (event == PM_SUSPEND_PREPARE) {
+ is_suspended = true;
+ pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n",
+ freq_table[0].frequency);
+ tegra_update_cpu_speed(freq_table[0].frequency);
+ } else if (event == PM_POST_SUSPEND) {
+ is_suspended = false;
+ }
+ mutex_unlock(&tegra_cpu_lock);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block tegra_cpu_pm_notifier = {
+ .notifier_call = tegra_pm_notify,
+};
+
static int tegra_cpu_init(struct cpufreq_policy *policy)
{
if (policy->cpu >= NUM_CPUS)
@@ -131,6 +185,15 @@ static int tegra_cpu_init(struct cpufreq_policy *policy)
if (IS_ERR(cpu_clk))
return PTR_ERR(cpu_clk);
+ emc_clk = clk_get_sys("cpu", "emc");
+ if (IS_ERR(emc_clk)) {
+ clk_put(cpu_clk);
+ return PTR_ERR(emc_clk);
+ }
+
+ clk_enable(emc_clk);
+ clk_enable(cpu_clk);
+
cpufreq_frequency_table_cpuinfo(policy, freq_table);
cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
policy->cur = tegra_getspeed(policy->cpu);
@@ -142,12 +205,17 @@ static int tegra_cpu_init(struct cpufreq_policy *policy)
policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
cpumask_copy(policy->related_cpus, cpu_possible_mask);
+ if (policy->cpu == 0)
+ register_pm_notifier(&tegra_cpu_pm_notifier);
+
return 0;
}
static int tegra_cpu_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_cpuinfo(policy, freq_table);
+ clk_disable(emc_clk);
+ clk_put(emc_clk);
clk_put(cpu_clk);
return 0;
}
diff --git a/arch/arm/mach-tegra/devices.c b/arch/arm/mach-tegra/devices.c
new file mode 100644
index 000000000000..682e6d33108c
--- /dev/null
+++ b/arch/arm/mach-tegra/devices.c
@@ -0,0 +1,505 @@
+/*
+ * Copyright (C) 2010,2011 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@android.com>
+ * Erik Gilling <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+
+#include <linux/resource.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/fsl_devices.h>
+#include <linux/serial_8250.h>
+#include <asm/pmu.h>
+#include <mach/irqs.h>
+#include <mach/iomap.h>
+#include <mach/dma.h>
+
+static struct resource i2c_resource1[] = {
+ [0] = {
+ .start = INT_I2C,
+ .end = INT_I2C,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_I2C_BASE,
+ .end = TEGRA_I2C_BASE + TEGRA_I2C_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource i2c_resource2[] = {
+ [0] = {
+ .start = INT_I2C2,
+ .end = INT_I2C2,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_I2C2_BASE,
+ .end = TEGRA_I2C2_BASE + TEGRA_I2C2_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource i2c_resource3[] = {
+ [0] = {
+ .start = INT_I2C3,
+ .end = INT_I2C3,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_I2C3_BASE,
+ .end = TEGRA_I2C3_BASE + TEGRA_I2C3_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource i2c_resource4[] = {
+ [0] = {
+ .start = INT_DVC,
+ .end = INT_DVC,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_DVC_BASE,
+ .end = TEGRA_DVC_BASE + TEGRA_DVC_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device tegra_i2c_device1 = {
+ .name = "tegra-i2c",
+ .id = 0,
+ .resource = i2c_resource1,
+ .num_resources = ARRAY_SIZE(i2c_resource1),
+ .dev = {
+ .platform_data = 0,
+ },
+};
+
+struct platform_device tegra_i2c_device2 = {
+ .name = "tegra-i2c",
+ .id = 1,
+ .resource = i2c_resource2,
+ .num_resources = ARRAY_SIZE(i2c_resource2),
+ .dev = {
+ .platform_data = 0,
+ },
+};
+
+struct platform_device tegra_i2c_device3 = {
+ .name = "tegra-i2c",
+ .id = 2,
+ .resource = i2c_resource3,
+ .num_resources = ARRAY_SIZE(i2c_resource3),
+ .dev = {
+ .platform_data = 0,
+ },
+};
+
+struct platform_device tegra_i2c_device4 = {
+ .name = "tegra-i2c",
+ .id = 3,
+ .resource = i2c_resource4,
+ .num_resources = ARRAY_SIZE(i2c_resource4),
+ .dev = {
+ .platform_data = 0,
+ },
+};
+
+static struct resource spi_resource1[] = {
+ [0] = {
+ .start = INT_S_LINK1,
+ .end = INT_S_LINK1,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SPI1_BASE,
+ .end = TEGRA_SPI1_BASE + TEGRA_SPI1_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource spi_resource2[] = {
+ [0] = {
+ .start = INT_SPI_2,
+ .end = INT_SPI_2,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SPI2_BASE,
+ .end = TEGRA_SPI2_BASE + TEGRA_SPI2_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource spi_resource3[] = {
+ [0] = {
+ .start = INT_SPI_3,
+ .end = INT_SPI_3,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SPI3_BASE,
+ .end = TEGRA_SPI3_BASE + TEGRA_SPI3_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource spi_resource4[] = {
+ [0] = {
+ .start = INT_SPI_4,
+ .end = INT_SPI_4,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SPI4_BASE,
+ .end = TEGRA_SPI4_BASE + TEGRA_SPI4_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device tegra_spi_device1 = {
+ .name = "spi_tegra",
+ .id = 0,
+ .resource = spi_resource1,
+ .num_resources = ARRAY_SIZE(spi_resource1),
+ .dev = {
+ .coherent_dma_mask = 0xffffffff,
+ },
+};
+
+struct platform_device tegra_spi_device2 = {
+ .name = "spi_tegra",
+ .id = 1,
+ .resource = spi_resource2,
+ .num_resources = ARRAY_SIZE(spi_resource2),
+ .dev = {
+ .coherent_dma_mask = 0xffffffff,
+ },
+};
+
+struct platform_device tegra_spi_device3 = {
+ .name = "spi_tegra",
+ .id = 2,
+ .resource = spi_resource3,
+ .num_resources = ARRAY_SIZE(spi_resource3),
+ .dev = {
+ .coherent_dma_mask = 0xffffffff,
+ },
+};
+
+struct platform_device tegra_spi_device4 = {
+ .name = "spi_tegra",
+ .id = 3,
+ .resource = spi_resource4,
+ .num_resources = ARRAY_SIZE(spi_resource4),
+ .dev = {
+ .coherent_dma_mask = 0xffffffff,
+ },
+};
+
+
+static struct resource sdhci_resource1[] = {
+ [0] = {
+ .start = INT_SDMMC1,
+ .end = INT_SDMMC1,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC1_BASE,
+ .end = TEGRA_SDMMC1_BASE + TEGRA_SDMMC1_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource sdhci_resource2[] = {
+ [0] = {
+ .start = INT_SDMMC2,
+ .end = INT_SDMMC2,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC2_BASE,
+ .end = TEGRA_SDMMC2_BASE + TEGRA_SDMMC2_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource sdhci_resource3[] = {
+ [0] = {
+ .start = INT_SDMMC3,
+ .end = INT_SDMMC3,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC3_BASE,
+ .end = TEGRA_SDMMC3_BASE + TEGRA_SDMMC3_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource sdhci_resource4[] = {
+ [0] = {
+ .start = INT_SDMMC4,
+ .end = INT_SDMMC4,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC4_BASE,
+ .end = TEGRA_SDMMC4_BASE + TEGRA_SDMMC4_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+/* board files should fill in platform_data register the devices themselvs.
+ * See board-harmony.c for an example
+ */
+struct platform_device tegra_sdhci_device1 = {
+ .name = "sdhci-tegra",
+ .id = 0,
+ .resource = sdhci_resource1,
+ .num_resources = ARRAY_SIZE(sdhci_resource1),
+};
+
+struct platform_device tegra_sdhci_device2 = {
+ .name = "sdhci-tegra",
+ .id = 1,
+ .resource = sdhci_resource2,
+ .num_resources = ARRAY_SIZE(sdhci_resource2),
+};
+
+struct platform_device tegra_sdhci_device3 = {
+ .name = "sdhci-tegra",
+ .id = 2,
+ .resource = sdhci_resource3,
+ .num_resources = ARRAY_SIZE(sdhci_resource3),
+};
+
+struct platform_device tegra_sdhci_device4 = {
+ .name = "sdhci-tegra",
+ .id = 3,
+ .resource = sdhci_resource4,
+ .num_resources = ARRAY_SIZE(sdhci_resource4),
+};
+
+static struct resource tegra_usb1_resources[] = {
+ [0] = {
+ .start = TEGRA_USB_BASE,
+ .end = TEGRA_USB_BASE + TEGRA_USB_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = INT_USB,
+ .end = INT_USB,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource tegra_usb2_resources[] = {
+ [0] = {
+ .start = TEGRA_USB2_BASE,
+ .end = TEGRA_USB2_BASE + TEGRA_USB2_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = INT_USB2,
+ .end = INT_USB2,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource tegra_usb3_resources[] = {
+ [0] = {
+ .start = TEGRA_USB3_BASE,
+ .end = TEGRA_USB3_BASE + TEGRA_USB3_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = INT_USB3,
+ .end = INT_USB3,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static u64 tegra_ehci_dmamask = DMA_BIT_MASK(32);
+
+struct platform_device tegra_ehci1_device = {
+ .name = "tegra-ehci",
+ .id = 0,
+ .dev = {
+ .dma_mask = &tegra_ehci_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ .resource = tegra_usb1_resources,
+ .num_resources = ARRAY_SIZE(tegra_usb1_resources),
+};
+
+struct platform_device tegra_ehci2_device = {
+ .name = "tegra-ehci",
+ .id = 1,
+ .dev = {
+ .dma_mask = &tegra_ehci_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ .resource = tegra_usb2_resources,
+ .num_resources = ARRAY_SIZE(tegra_usb2_resources),
+};
+
+struct platform_device tegra_ehci3_device = {
+ .name = "tegra-ehci",
+ .id = 2,
+ .dev = {
+ .dma_mask = &tegra_ehci_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ .resource = tegra_usb3_resources,
+ .num_resources = ARRAY_SIZE(tegra_usb3_resources),
+};
+
+static struct resource tegra_pmu_resources[] = {
+ [0] = {
+ .start = INT_CPU0_PMU_INTR,
+ .end = INT_CPU0_PMU_INTR,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = INT_CPU1_PMU_INTR,
+ .end = INT_CPU1_PMU_INTR,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device tegra_pmu_device = {
+ .name = "arm-pmu",
+ .id = ARM_PMU_DEVICE_CPU,
+ .num_resources = ARRAY_SIZE(tegra_pmu_resources),
+ .resource = tegra_pmu_resources,
+};
+
+static struct resource tegra_uarta_resources[] = {
+ [0] = {
+ .start = TEGRA_UARTA_BASE,
+ .end = TEGRA_UARTA_BASE + TEGRA_UARTA_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = INT_UARTA,
+ .end = INT_UARTA,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource tegra_uartb_resources[] = {
+ [0] = {
+ .start = TEGRA_UARTB_BASE,
+ .end = TEGRA_UARTB_BASE + TEGRA_UARTB_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = INT_UARTB,
+ .end = INT_UARTB,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource tegra_uartc_resources[] = {
+ [0] = {
+ .start = TEGRA_UARTC_BASE,
+ .end = TEGRA_UARTC_BASE + TEGRA_UARTC_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = INT_UARTC,
+ .end = INT_UARTC,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource tegra_uartd_resources[] = {
+ [0] = {
+ .start = TEGRA_UARTD_BASE,
+ .end = TEGRA_UARTD_BASE + TEGRA_UARTD_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = INT_UARTD,
+ .end = INT_UARTD,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource tegra_uarte_resources[] = {
+ [0] = {
+ .start = TEGRA_UARTE_BASE,
+ .end = TEGRA_UARTE_BASE + TEGRA_UARTE_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = INT_UARTE,
+ .end = INT_UARTE,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device tegra_uarta_device = {
+ .name = "tegra_uart",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(tegra_uarta_resources),
+ .resource = tegra_uarta_resources,
+ .dev = {
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+struct platform_device tegra_uartb_device = {
+ .name = "tegra_uart",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(tegra_uartb_resources),
+ .resource = tegra_uartb_resources,
+ .dev = {
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+struct platform_device tegra_uartc_device = {
+ .name = "tegra_uart",
+ .id = 2,
+ .num_resources = ARRAY_SIZE(tegra_uartc_resources),
+ .resource = tegra_uartc_resources,
+ .dev = {
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+struct platform_device tegra_uartd_device = {
+ .name = "tegra_uart",
+ .id = 3,
+ .num_resources = ARRAY_SIZE(tegra_uartd_resources),
+ .resource = tegra_uartd_resources,
+ .dev = {
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+struct platform_device tegra_uarte_device = {
+ .name = "tegra_uart",
+ .id = 4,
+ .num_resources = ARRAY_SIZE(tegra_uarte_resources),
+ .resource = tegra_uarte_resources,
+ .dev = {
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
diff --git a/arch/arm/mach-tegra/devices.h b/arch/arm/mach-tegra/devices.h
new file mode 100644
index 000000000000..888810c37ee9
--- /dev/null
+++ b/arch/arm/mach-tegra/devices.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2010,2011 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@android.com>
+ * Erik Gilling <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MACH_TEGRA_DEVICES_H
+#define __MACH_TEGRA_DEVICES_H
+
+#include <linux/platform_device.h>
+
+extern struct platform_device tegra_sdhci_device1;
+extern struct platform_device tegra_sdhci_device2;
+extern struct platform_device tegra_sdhci_device3;
+extern struct platform_device tegra_sdhci_device4;
+extern struct platform_device tegra_i2c_device1;
+extern struct platform_device tegra_i2c_device2;
+extern struct platform_device tegra_i2c_device3;
+extern struct platform_device tegra_i2c_device4;
+extern struct platform_device tegra_spi_device1;
+extern struct platform_device tegra_spi_device2;
+extern struct platform_device tegra_spi_device3;
+extern struct platform_device tegra_spi_device4;
+extern struct platform_device tegra_ehci1_device;
+extern struct platform_device tegra_ehci2_device;
+extern struct platform_device tegra_ehci3_device;
+extern struct platform_device tegra_uarta_device;
+extern struct platform_device tegra_uartb_device;
+extern struct platform_device tegra_uartc_device;
+extern struct platform_device tegra_uartd_device;
+extern struct platform_device tegra_uarte_device;
+extern struct platform_device tegra_pmu_device;
+
+#endif
diff --git a/arch/arm/mach-tegra/dma.c b/arch/arm/mach-tegra/dma.c
index edda6ec5e925..e945ae28ee77 100644
--- a/arch/arm/mach-tegra/dma.c
+++ b/arch/arm/mach-tegra/dma.c
@@ -27,9 +27,11 @@
#include <linux/err.h>
#include <linux/irq.h>
#include <linux/delay.h>
+#include <linux/clk.h>
#include <mach/dma.h>
#include <mach/irqs.h>
#include <mach/iomap.h>
+#include <mach/suspend.h>
#define APB_DMA_GEN 0x000
#define GEN_ENABLE (1<<31)
@@ -120,17 +122,14 @@ struct tegra_dma_channel {
void __iomem *addr;
int mode;
int irq;
-
- /* Register shadow */
- u32 csr;
- u32 ahb_seq;
- u32 ahb_ptr;
- u32 apb_seq;
- u32 apb_ptr;
+ int req_transfer_count;
};
#define NV_DMA_MAX_CHANNELS 32
+static bool tegra_dma_initialized;
+static DEFINE_MUTEX(tegra_dma_lock);
+
static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS);
static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS];
@@ -138,7 +137,6 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
struct tegra_dma_req *req);
static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
struct tegra_dma_req *req);
-static void tegra_dma_init_hw(struct tegra_dma_channel *ch);
static void tegra_dma_stop(struct tegra_dma_channel *ch);
void tegra_dma_flush(struct tegra_dma_channel *ch)
@@ -150,6 +148,9 @@ void tegra_dma_dequeue(struct tegra_dma_channel *ch)
{
struct tegra_dma_req *req;
+ if (tegra_dma_is_empty(ch))
+ return;
+
req = list_entry(ch->list.next, typeof(*req), node);
tegra_dma_dequeue_req(ch, req);
@@ -158,10 +159,10 @@ void tegra_dma_dequeue(struct tegra_dma_channel *ch)
void tegra_dma_stop(struct tegra_dma_channel *ch)
{
- unsigned int csr;
- unsigned int status;
+ u32 csr;
+ u32 status;
- csr = ch->csr;
+ csr = readl(ch->addr + APB_DMA_CHAN_CSR);
csr &= ~CSR_IE_EOC;
writel(csr, ch->addr + APB_DMA_CHAN_CSR);
@@ -175,19 +176,16 @@ void tegra_dma_stop(struct tegra_dma_channel *ch)
int tegra_dma_cancel(struct tegra_dma_channel *ch)
{
- unsigned int csr;
+ u32 csr;
unsigned long irq_flags;
spin_lock_irqsave(&ch->lock, irq_flags);
while (!list_empty(&ch->list))
list_del(ch->list.next);
- csr = ch->csr;
+ csr = readl(ch->addr + APB_DMA_CHAN_CSR);
csr &= ~CSR_REQ_SEL_MASK;
csr |= CSR_REQ_SEL_INVALID;
-
- /* Set the enable as that is not shadowed */
- csr |= CSR_ENB;
writel(csr, ch->addr + APB_DMA_CHAN_CSR);
tegra_dma_stop(ch);
@@ -229,18 +227,15 @@ int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
* - Finally stop or program the DMA to the next buffer in the
* list.
*/
- csr = ch->csr;
+ csr = readl(ch->addr + APB_DMA_CHAN_CSR);
csr &= ~CSR_REQ_SEL_MASK;
csr |= CSR_REQ_SEL_INVALID;
-
- /* Set the enable as that is not shadowed */
- csr |= CSR_ENB;
writel(csr, ch->addr + APB_DMA_CHAN_CSR);
/* Get the transfer count */
status = readl(ch->addr + APB_DMA_CHAN_STA);
to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT;
- req_transfer_count = (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT;
+ req_transfer_count = ch->req_transfer_count;
req_transfer_count += 1;
to_transfer += 1;
@@ -318,6 +313,7 @@ int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
struct tegra_dma_req *req)
{
unsigned long irq_flags;
+ struct tegra_dma_req *_req;
int start_dma = 0;
if (req->size > NV_DMA_MAX_TRASFER_SIZE ||
@@ -328,6 +324,13 @@ int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
spin_lock_irqsave(&ch->lock, irq_flags);
+ list_for_each_entry(_req, &ch->list, node) {
+ if (req == _req) {
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ return -EEXIST;
+ }
+ }
+
req->bytes_transferred = 0;
req->status = 0;
req->buffer_status = 0;
@@ -348,7 +351,12 @@ EXPORT_SYMBOL(tegra_dma_enqueue_req);
struct tegra_dma_channel *tegra_dma_allocate_channel(int mode)
{
int channel;
- struct tegra_dma_channel *ch;
+ struct tegra_dma_channel *ch = NULL;
+
+ if (WARN_ON(!tegra_dma_initialized))
+ return NULL;
+
+ mutex_lock(&tegra_dma_lock);
/* first channel is the shared channel */
if (mode & TEGRA_DMA_SHARED) {
@@ -357,11 +365,14 @@ struct tegra_dma_channel *tegra_dma_allocate_channel(int mode)
channel = find_first_zero_bit(channel_usage,
ARRAY_SIZE(dma_channels));
if (channel >= ARRAY_SIZE(dma_channels))
- return NULL;
+ goto out;
}
__set_bit(channel, channel_usage);
ch = &dma_channels[channel];
ch->mode = mode;
+
+out:
+ mutex_unlock(&tegra_dma_lock);
return ch;
}
EXPORT_SYMBOL(tegra_dma_allocate_channel);
@@ -371,22 +382,27 @@ void tegra_dma_free_channel(struct tegra_dma_channel *ch)
if (ch->mode & TEGRA_DMA_SHARED)
return;
tegra_dma_cancel(ch);
+ mutex_lock(&tegra_dma_lock);
__clear_bit(ch->id, channel_usage);
+ mutex_unlock(&tegra_dma_lock);
}
EXPORT_SYMBOL(tegra_dma_free_channel);
static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
struct tegra_dma_req *req)
{
+ u32 apb_ptr;
+ u32 ahb_ptr;
+
if (req->to_memory) {
- ch->apb_ptr = req->source_addr;
- ch->ahb_ptr = req->dest_addr;
+ apb_ptr = req->source_addr;
+ ahb_ptr = req->dest_addr;
} else {
- ch->apb_ptr = req->dest_addr;
- ch->ahb_ptr = req->source_addr;
+ apb_ptr = req->dest_addr;
+ ahb_ptr = req->source_addr;
}
- writel(ch->apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
- writel(ch->ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
+ writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
+ writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
req->status = TEGRA_DMA_REQ_INFLIGHT;
return;
@@ -400,38 +416,39 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
int ahb_bus_width;
int apb_bus_width;
int index;
- unsigned long csr;
+ u32 ahb_seq;
+ u32 apb_seq;
+ u32 ahb_ptr;
+ u32 apb_ptr;
+ u32 csr;
+
+ csr = CSR_IE_EOC | CSR_FLOW;
+ ahb_seq = AHB_SEQ_INTR_ENB | AHB_SEQ_BURST_1;
+ apb_seq = 0;
- ch->csr |= CSR_FLOW;
- ch->csr &= ~CSR_REQ_SEL_MASK;
- ch->csr |= req->req_sel << CSR_REQ_SEL_SHIFT;
- ch->ahb_seq &= ~AHB_SEQ_BURST_MASK;
- ch->ahb_seq |= AHB_SEQ_BURST_1;
+ csr |= req->req_sel << CSR_REQ_SEL_SHIFT;
/* One shot mode is always single buffered,
* continuous mode is always double buffered
* */
if (ch->mode & TEGRA_DMA_MODE_ONESHOT) {
- ch->csr |= CSR_ONCE;
- ch->ahb_seq &= ~AHB_SEQ_DBL_BUF;
- ch->csr &= ~CSR_WCOUNT_MASK;
- ch->csr |= ((req->size>>2) - 1) << CSR_WCOUNT_SHIFT;
+ csr |= CSR_ONCE;
+ ch->req_transfer_count = (req->size >> 2) - 1;
} else {
- ch->csr &= ~CSR_ONCE;
- ch->ahb_seq |= AHB_SEQ_DBL_BUF;
+ ahb_seq |= AHB_SEQ_DBL_BUF;
/* In double buffered mode, we set the size to half the
* requested size and interrupt when half the buffer
* is full */
- ch->csr &= ~CSR_WCOUNT_MASK;
- ch->csr |= ((req->size>>3) - 1) << CSR_WCOUNT_SHIFT;
+ ch->req_transfer_count = (req->size >> 3) - 1;
}
+ csr |= ch->req_transfer_count << CSR_WCOUNT_SHIFT;
+
if (req->to_memory) {
- ch->csr &= ~CSR_DIR;
- ch->apb_ptr = req->source_addr;
- ch->ahb_ptr = req->dest_addr;
+ apb_ptr = req->source_addr;
+ ahb_ptr = req->dest_addr;
apb_addr_wrap = req->source_wrap;
ahb_addr_wrap = req->dest_wrap;
@@ -439,9 +456,9 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
ahb_bus_width = req->dest_bus_width;
} else {
- ch->csr |= CSR_DIR;
- ch->apb_ptr = req->dest_addr;
- ch->ahb_ptr = req->source_addr;
+ csr |= CSR_DIR;
+ apb_ptr = req->dest_addr;
+ ahb_ptr = req->source_addr;
apb_addr_wrap = req->dest_wrap;
ahb_addr_wrap = req->source_wrap;
@@ -460,8 +477,7 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
index++;
} while (index < ARRAY_SIZE(apb_addr_wrap_table));
BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table));
- ch->apb_seq &= ~APB_SEQ_WRAP_MASK;
- ch->apb_seq |= index << APB_SEQ_WRAP_SHIFT;
+ apb_seq |= index << APB_SEQ_WRAP_SHIFT;
/* set address wrap for AHB size */
index = 0;
@@ -471,55 +487,42 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
index++;
} while (index < ARRAY_SIZE(ahb_addr_wrap_table));
BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table));
- ch->ahb_seq &= ~AHB_SEQ_WRAP_MASK;
- ch->ahb_seq |= index << AHB_SEQ_WRAP_SHIFT;
+ ahb_seq |= index << AHB_SEQ_WRAP_SHIFT;
for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
if (bus_width_table[index] == ahb_bus_width)
break;
}
BUG_ON(index == ARRAY_SIZE(bus_width_table));
- ch->ahb_seq &= ~AHB_SEQ_BUS_WIDTH_MASK;
- ch->ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT;
+ ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT;
for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
if (bus_width_table[index] == apb_bus_width)
break;
}
BUG_ON(index == ARRAY_SIZE(bus_width_table));
- ch->apb_seq &= ~APB_SEQ_BUS_WIDTH_MASK;
- ch->apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT;
-
- ch->csr |= CSR_IE_EOC;
+ apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT;
- /* update hw registers with the shadow */
- writel(ch->csr, ch->addr + APB_DMA_CHAN_CSR);
- writel(ch->apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ);
- writel(ch->apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
- writel(ch->ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ);
- writel(ch->ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
+ writel(csr, ch->addr + APB_DMA_CHAN_CSR);
+ writel(apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ);
+ writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
+ writel(ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ);
+ writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
- csr = ch->csr | CSR_ENB;
+ csr |= CSR_ENB;
writel(csr, ch->addr + APB_DMA_CHAN_CSR);
req->status = TEGRA_DMA_REQ_INFLIGHT;
}
-static void tegra_dma_init_hw(struct tegra_dma_channel *ch)
-{
- /* One shot with an interrupt to CPU after transfer */
- ch->csr = CSR_ONCE | CSR_IE_EOC;
- ch->ahb_seq = AHB_SEQ_BUS_WIDTH_32 | AHB_SEQ_INTR_ENB;
- ch->apb_seq = APB_SEQ_BUS_WIDTH_32 | 1 << APB_SEQ_WRAP_SHIFT;
-}
-
static void handle_oneshot_dma(struct tegra_dma_channel *ch)
{
struct tegra_dma_req *req;
+ unsigned long irq_flags;
- spin_lock(&ch->lock);
+ spin_lock_irqsave(&ch->lock, irq_flags);
if (list_empty(&ch->list)) {
- spin_unlock(&ch->lock);
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
return;
}
@@ -527,8 +530,7 @@ static void handle_oneshot_dma(struct tegra_dma_channel *ch)
if (req) {
int bytes_transferred;
- bytes_transferred =
- (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT;
+ bytes_transferred = ch->req_transfer_count;
bytes_transferred += 1;
bytes_transferred <<= 2;
@@ -536,12 +538,12 @@ static void handle_oneshot_dma(struct tegra_dma_channel *ch)
req->bytes_transferred = bytes_transferred;
req->status = TEGRA_DMA_REQ_SUCCESS;
- spin_unlock(&ch->lock);
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
/* Callback should be called without any lock */
pr_debug("%s: transferred %d bytes\n", __func__,
req->bytes_transferred);
req->complete(req);
- spin_lock(&ch->lock);
+ spin_lock_irqsave(&ch->lock, irq_flags);
}
if (!list_empty(&ch->list)) {
@@ -551,22 +553,55 @@ static void handle_oneshot_dma(struct tegra_dma_channel *ch)
if (req->status != TEGRA_DMA_REQ_INFLIGHT)
tegra_dma_update_hw(ch, req);
}
- spin_unlock(&ch->lock);
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
}
static void handle_continuous_dma(struct tegra_dma_channel *ch)
{
struct tegra_dma_req *req;
+ unsigned long irq_flags;
- spin_lock(&ch->lock);
+ spin_lock_irqsave(&ch->lock, irq_flags);
if (list_empty(&ch->list)) {
- spin_unlock(&ch->lock);
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
return;
}
req = list_entry(ch->list.next, typeof(*req), node);
if (req) {
if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) {
+ bool is_dma_ping_complete;
+ is_dma_ping_complete = (readl(ch->addr + APB_DMA_CHAN_STA)
+ & STA_PING_PONG) ? true : false;
+ if (req->to_memory)
+ is_dma_ping_complete = !is_dma_ping_complete;
+ /* Out of sync - Release current buffer */
+ if (!is_dma_ping_complete) {
+ int bytes_transferred;
+
+ bytes_transferred = ch->req_transfer_count;
+ bytes_transferred += 1;
+ bytes_transferred <<= 3;
+ req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
+ req->bytes_transferred = bytes_transferred;
+ req->status = TEGRA_DMA_REQ_SUCCESS;
+ tegra_dma_stop(ch);
+
+ if (!list_is_last(&req->node, &ch->list)) {
+ struct tegra_dma_req *next_req;
+
+ next_req = list_entry(req->node.next,
+ typeof(*next_req), node);
+ tegra_dma_update_hw(ch, next_req);
+ }
+
+ list_del(&req->node);
+
+ /* DMA lock is NOT held when callbak is called */
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ req->complete(req);
+ return;
+ }
/* Load the next request into the hardware, if available
* */
if (!list_is_last(&req->node, &ch->list)) {
@@ -579,7 +614,7 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch)
req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL;
req->status = TEGRA_DMA_REQ_SUCCESS;
/* DMA lock is NOT held when callback is called */
- spin_unlock(&ch->lock);
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
if (likely(req->threshold))
req->threshold(req);
return;
@@ -590,8 +625,7 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch)
* the second interrupt */
int bytes_transferred;
- bytes_transferred =
- (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT;
+ bytes_transferred = ch->req_transfer_count;
bytes_transferred += 1;
bytes_transferred <<= 3;
@@ -601,7 +635,7 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch)
list_del(&req->node);
/* DMA lock is NOT held when callbak is called */
- spin_unlock(&ch->lock);
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
req->complete(req);
return;
@@ -609,7 +643,7 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch)
BUG();
}
}
- spin_unlock(&ch->lock);
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
}
static irqreturn_t dma_isr(int irq, void *data)
@@ -646,6 +680,21 @@ int __init tegra_dma_init(void)
int i;
unsigned int irq;
void __iomem *addr;
+ struct clk *c;
+
+ bitmap_fill(channel_usage, NV_DMA_MAX_CHANNELS);
+
+ c = clk_get_sys("tegra-dma", NULL);
+ if (IS_ERR(c)) {
+ pr_err("Unable to get clock for APB DMA\n");
+ ret = PTR_ERR(c);
+ goto fail;
+ }
+ ret = clk_enable(c);
+ if (ret != 0) {
+ pr_err("Unable to enable clock for APB DMA\n");
+ goto fail;
+ }
addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
writel(GEN_ENABLE, addr + APB_DMA_GEN);
@@ -653,18 +702,9 @@ int __init tegra_dma_init(void)
writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX),
addr + APB_DMA_IRQ_MASK_SET);
- memset(channel_usage, 0, sizeof(channel_usage));
- memset(dma_channels, 0, sizeof(dma_channels));
-
- /* Reserve all the channels we are not supposed to touch */
- for (i = 0; i < TEGRA_SYSTEM_DMA_CH_MIN; i++)
- __set_bit(i, channel_usage);
-
for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
struct tegra_dma_channel *ch = &dma_channels[i];
- __clear_bit(i, channel_usage);
-
ch->id = i;
snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i);
@@ -673,7 +713,6 @@ int __init tegra_dma_init(void)
spin_lock_init(&ch->lock);
INIT_LIST_HEAD(&ch->list);
- tegra_dma_init_hw(ch);
irq = INT_APB_DMA_CH0 + i;
ret = request_threaded_irq(irq, dma_isr, dma_thread_fn, 0,
@@ -684,14 +723,15 @@ int __init tegra_dma_init(void)
goto fail;
}
ch->irq = irq;
+
+ __clear_bit(i, channel_usage);
}
/* mark the shared channel allocated */
__set_bit(TEGRA_SYSTEM_DMA_CH_MIN, channel_usage);
- for (i = TEGRA_SYSTEM_DMA_CH_MAX+1; i < NV_DMA_MAX_CHANNELS; i++)
- __set_bit(i, channel_usage);
+ tegra_dma_initialized = true;
- return ret;
+ return 0;
fail:
writel(0, addr + APB_DMA_GEN);
for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
@@ -701,6 +741,7 @@ fail:
}
return ret;
}
+postcore_initcall(tegra_dma_init);
#ifdef CONFIG_PM
static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3];
diff --git a/arch/arm/mach-tegra/gpio.c b/arch/arm/mach-tegra/gpio.c
index ad8048801513..12090a2cf3e0 100644
--- a/arch/arm/mach-tegra/gpio.c
+++ b/arch/arm/mach-tegra/gpio.c
@@ -25,6 +25,7 @@
#include <linux/gpio.h>
#include <mach/iomap.h>
+#include <mach/suspend.h>
#define GPIO_BANK(x) ((x) >> 5)
#define GPIO_PORT(x) (((x) >> 3) & 0x3)
@@ -380,6 +381,20 @@ static int __init tegra_gpio_init(void)
postcore_initcall(tegra_gpio_init);
+void __init tegra_gpio_config(struct tegra_gpio_table *table, int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ int gpio = table[i].gpio;
+
+ if (table[i].enable)
+ tegra_gpio_enable(gpio);
+ else
+ tegra_gpio_disable(gpio);
+ }
+}
+
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
diff --git a/arch/arm/mach-tegra/include/mach/clk.h b/arch/arm/mach-tegra/include/mach/clk.h
index a217f68ba57c..c8baf8f80d23 100644
--- a/arch/arm/mach-tegra/include/mach/clk.h
+++ b/arch/arm/mach-tegra/include/mach/clk.h
@@ -25,9 +25,7 @@ struct clk;
void tegra_periph_reset_deassert(struct clk *c);
void tegra_periph_reset_assert(struct clk *c);
-int clk_enable_cansleep(struct clk *clk);
-void clk_disable_cansleep(struct clk *clk);
-int clk_set_rate_cansleep(struct clk *clk, unsigned long rate);
-int clk_set_parent_cansleep(struct clk *clk, struct clk *parent);
+unsigned long clk_get_rate_all_locked(struct clk *c);
+void tegra_sdmmc_tap_delay(struct clk *c, int delay);
#endif
diff --git a/arch/arm/mach-tegra/include/mach/debug-macro.S b/arch/arm/mach-tegra/include/mach/debug-macro.S
index a0e7c12868bd..e0ebe65c1657 100644
--- a/arch/arm/mach-tegra/include/mach/debug-macro.S
+++ b/arch/arm/mach-tegra/include/mach/debug-macro.S
@@ -19,30 +19,15 @@
*/
#include <mach/io.h>
+#include <mach/iomap.h>
.macro addruart, rp, rv
ldr \rp, =IO_APB_PHYS @ physical
ldr \rv, =IO_APB_VIRT @ virtual
-#if defined(CONFIG_TEGRA_DEBUG_UART_NONE)
-#error "A debug UART must be selected in the kernel config to use DEBUG_LL"
-#elif defined(CONFIG_TEGRA_DEBUG_UARTA)
- orr \rp, \rp, #0x6000
- orr \rv, \rv, #0x6000
-#elif defined(CONFIG_TEGRA_DEBUG_UARTB)
- orr \rp, \rp, #0x6000
- orr \rp, \rp, #0x40
- orr \rv, \rv, #0x6000
- orr \rv, \rv, #0x40
-#elif defined(CONFIG_TEGRA_DEBUG_UARTC)
- orr \rp, \rp, #0x6200
- orr \rv, \rv, #0x6200
-#elif defined(CONFIG_TEGRA_DEBUG_UARTD)
- orr \rp, \rp, #0x6300
- orr \rv, \rv, #0x6300
-#elif defined(CONFIG_TEGRA_DEBUG_UARTE)
- orr \rp, \rp, #0x6400
- orr \rv, \rv, #0x6400
-#endif
+ orr \rp, \rp, #(TEGRA_DEBUG_UART_BASE & 0xFF)
+ orr \rp, \rp, #(TEGRA_DEBUG_UART_BASE & 0xFF00)
+ orr \rv, \rv, #(TEGRA_DEBUG_UART_BASE & 0xFF)
+ orr \rv, \rv, #(TEGRA_DEBUG_UART_BASE & 0xFF00)
.endm
#define UART_SHIFT 2
diff --git a/arch/arm/mach-tegra/include/mach/gpio.h b/arch/arm/mach-tegra/include/mach/gpio.h
index e31f486d69a2..196f114dc241 100644
--- a/arch/arm/mach-tegra/include/mach/gpio.h
+++ b/arch/arm/mach-tegra/include/mach/gpio.h
@@ -20,6 +20,7 @@
#ifndef __MACH_TEGRA_GPIO_H
#define __MACH_TEGRA_GPIO_H
+#include <linux/init.h>
#include <mach/irqs.h>
#define TEGRA_NR_GPIOS INT_GPIO_NR
@@ -31,7 +32,7 @@
#define gpio_cansleep __gpio_cansleep
#define TEGRA_GPIO_TO_IRQ(gpio) (INT_GPIO_BASE + (gpio))
-#define TEGRA_IRQ_TO_GPIO(irq) ((gpio) - INT_GPIO_BASE)
+#define TEGRA_IRQ_TO_GPIO(irq) ((irq) - INT_GPIO_BASE)
static inline int gpio_to_irq(unsigned int gpio)
{
@@ -47,6 +48,12 @@ static inline int irq_to_gpio(unsigned int irq)
return -EINVAL;
}
+struct tegra_gpio_table {
+ int gpio; /* GPIO number */
+ bool enable; /* Enable for GPIO at init? */
+};
+
+void tegra_gpio_config(struct tegra_gpio_table *table, int num);
void tegra_gpio_enable(int gpio);
void tegra_gpio_disable(int gpio);
diff --git a/arch/arm/mach-tegra/include/mach/harmony_audio.h b/arch/arm/mach-tegra/include/mach/harmony_audio.h
new file mode 100644
index 000000000000..af086500ab7d
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/harmony_audio.h
@@ -0,0 +1,22 @@
+/*
+ * arch/arm/mach-tegra/include/mach/harmony_audio.h
+ *
+ * Copyright 2011 NVIDIA, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+struct harmony_audio_platform_data {
+ int gpio_spkr_en;
+ int gpio_hp_det;
+ int gpio_int_mic_en;
+ int gpio_ext_mic_en;
+};
diff --git a/arch/arm/mach-tegra/include/mach/iomap.h b/arch/arm/mach-tegra/include/mach/iomap.h
index 44a4f4bcf91f..691cdabd69cf 100644
--- a/arch/arm/mach-tegra/include/mach/iomap.h
+++ b/arch/arm/mach-tegra/include/mach/iomap.h
@@ -26,6 +26,9 @@
#define TEGRA_IRAM_BASE 0x40000000
#define TEGRA_IRAM_SIZE SZ_256K
+#define TEGRA_HOST1X_BASE 0x50000000
+#define TEGRA_HOST1X_SIZE 0x24000
+
#define TEGRA_ARM_PERIF_BASE 0x50040000
#define TEGRA_ARM_PERIF_SIZE SZ_8K
@@ -35,12 +38,30 @@
#define TEGRA_ARM_INT_DIST_BASE 0x50041000
#define TEGRA_ARM_INT_DIST_SIZE SZ_4K
+#define TEGRA_MPE_BASE 0x54040000
+#define TEGRA_MPE_SIZE SZ_256K
+
+#define TEGRA_VI_BASE 0x54080000
+#define TEGRA_VI_SIZE SZ_256K
+
+#define TEGRA_ISP_BASE 0x54100000
+#define TEGRA_ISP_SIZE SZ_256K
+
#define TEGRA_DISPLAY_BASE 0x54200000
#define TEGRA_DISPLAY_SIZE SZ_256K
#define TEGRA_DISPLAY2_BASE 0x54240000
#define TEGRA_DISPLAY2_SIZE SZ_256K
+#define TEGRA_HDMI_BASE 0x54280000
+#define TEGRA_HDMI_SIZE SZ_256K
+
+#define TEGRA_GART_BASE 0x58000000
+#define TEGRA_GART_SIZE SZ_32M
+
+#define TEGRA_RES_SEMA_BASE 0x60001000
+#define TEGRA_RES_SEMA_SIZE SZ_4K
+
#define TEGRA_PRIMARY_ICTLR_BASE 0x60004000
#define TEGRA_PRIMARY_ICTLR_SIZE SZ_64
@@ -140,6 +161,18 @@
#define TEGRA_PWFM_BASE 0x7000A000
#define TEGRA_PWFM_SIZE SZ_256
+#define TEGRA_PWFM0_BASE 0x7000A000
+#define TEGRA_PWFM0_SIZE 4
+
+#define TEGRA_PWFM1_BASE 0x7000A010
+#define TEGRA_PWFM1_SIZE 4
+
+#define TEGRA_PWFM2_BASE 0x7000A020
+#define TEGRA_PWFM2_SIZE 4
+
+#define TEGRA_PWFM3_BASE 0x7000A030
+#define TEGRA_PWFM3_SIZE 4
+
#define TEGRA_MIPI_BASE 0x7000B000
#define TEGRA_MIPI_SIZE SZ_256
@@ -221,4 +254,18 @@
#define TEGRA_SDMMC4_BASE 0xC8000600
#define TEGRA_SDMMC4_SIZE SZ_512
+#if defined(CONFIG_TEGRA_DEBUG_UART_NONE)
+# define TEGRA_DEBUG_UART_BASE 0
+#elif defined(CONFIG_TEGRA_DEBUG_UARTA)
+# define TEGRA_DEBUG_UART_BASE TEGRA_UARTA_BASE
+#elif defined(CONFIG_TEGRA_DEBUG_UARTB)
+# define TEGRA_DEBUG_UART_BASE TEGRA_UARTB_BASE
+#elif defined(CONFIG_TEGRA_DEBUG_UARTC)
+# define TEGRA_DEBUG_UART_BASE TEGRA_UARTC_BASE
+#elif defined(CONFIG_TEGRA_DEBUG_UARTD)
+# define TEGRA_DEBUG_UART_BASE TEGRA_UARTD_BASE
+#elif defined(CONFIG_TEGRA_DEBUG_UARTE)
+# define TEGRA_DEBUG_UART_BASE TEGRA_UARTE_BASE
+#endif
+
#endif
diff --git a/arch/arm/mach-tegra/include/mach/irqs.h b/arch/arm/mach-tegra/include/mach/irqs.h
index 71bbf3422953..73265af4dda3 100644
--- a/arch/arm/mach-tegra/include/mach/irqs.h
+++ b/arch/arm/mach-tegra/include/mach/irqs.h
@@ -88,7 +88,7 @@
#define INT_SYS_STATS_MON (INT_SEC_BASE + 22)
#define INT_GPIO5 (INT_SEC_BASE + 23)
#define INT_CPU0_PMU_INTR (INT_SEC_BASE + 24)
-#define INT_CPU2_PMU_INTR (INT_SEC_BASE + 25)
+#define INT_CPU1_PMU_INTR (INT_SEC_BASE + 25)
#define INT_SEC_RES_26 (INT_SEC_BASE + 26)
#define INT_S_LINK1 (INT_SEC_BASE + 27)
#define INT_APB_DMA_COP (INT_SEC_BASE + 28)
@@ -166,10 +166,18 @@
#define INT_QUAD_RES_30 (INT_QUAD_BASE + 30)
#define INT_QUAD_RES_31 (INT_QUAD_BASE + 31)
-#define INT_GPIO_BASE (INT_QUAD_BASE + 32)
+#define INT_MAIN_NR (INT_QUAD_BASE + 32 - INT_PRI_BASE)
+
+#define INT_GPIO_BASE (INT_PRI_BASE + INT_MAIN_NR)
+
#define INT_GPIO_NR (28 * 8)
-#define NR_IRQS (INT_GPIO_BASE + INT_GPIO_NR)
+#define TEGRA_NR_IRQS (INT_GPIO_BASE + INT_GPIO_NR)
+
+#define INT_BOARD_BASE TEGRA_NR_IRQS
+#define NR_BOARD_IRQS 32
+
+#define NR_IRQS (INT_BOARD_BASE + NR_BOARD_IRQS)
#endif
#endif
diff --git a/arch/arm/mach-tegra/include/mach/kbc.h b/arch/arm/mach-tegra/include/mach/kbc.h
index 66ad2760c621..04c779832c78 100644
--- a/arch/arm/mach-tegra/include/mach/kbc.h
+++ b/arch/arm/mach-tegra/include/mach/kbc.h
@@ -57,5 +57,6 @@ struct tegra_kbc_platform_data {
const struct matrix_keymap_data *keymap_data;
bool wakeup;
+ bool use_fn_map;
};
#endif
diff --git a/arch/arm/mach-tegra/include/mach/legacy_irq.h b/arch/arm/mach-tegra/include/mach/legacy_irq.h
index db1eb3dd04c8..d898c0e3d905 100644
--- a/arch/arm/mach-tegra/include/mach/legacy_irq.h
+++ b/arch/arm/mach-tegra/include/mach/legacy_irq.h
@@ -27,5 +27,9 @@ int tegra_legacy_force_irq_status(unsigned int irq);
void tegra_legacy_select_fiq(unsigned int irq, bool fiq);
unsigned long tegra_legacy_vfiq(int nr);
unsigned long tegra_legacy_class(int nr);
+int tegra_legacy_irq_set_wake(int irq, int enable);
+void tegra_legacy_irq_set_lp1_wake_mask(void);
+void tegra_legacy_irq_restore_mask(void);
+void tegra_init_legacy_irq(void);
#endif
diff --git a/arch/arm/mach-tegra/include/mach/memory.h b/arch/arm/mach-tegra/include/mach/memory.h
index 6151bab62af2..537db3aa81a7 100644
--- a/arch/arm/mach-tegra/include/mach/memory.h
+++ b/arch/arm/mach-tegra/include/mach/memory.h
@@ -22,7 +22,7 @@
#define __MACH_TEGRA_MEMORY_H
/* physical offset of RAM */
-#define PHYS_OFFSET UL(0)
+#define PLAT_PHYS_OFFSET UL(0)
#endif
diff --git a/arch/arm/mach-tegra/include/mach/pinmux-t2.h b/arch/arm/mach-tegra/include/mach/pinmux-t2.h
index e5b9d740f973..4c2626347263 100644
--- a/arch/arm/mach-tegra/include/mach/pinmux-t2.h
+++ b/arch/arm/mach-tegra/include/mach/pinmux-t2.h
@@ -167,6 +167,16 @@ enum tegra_drive_pingroup {
TEGRA_DRIVE_PINGROUP_XM2D,
TEGRA_DRIVE_PINGROUP_XM2CLK,
TEGRA_DRIVE_PINGROUP_MEMCOMP,
+ TEGRA_DRIVE_PINGROUP_SDIO1,
+ TEGRA_DRIVE_PINGROUP_CRT,
+ TEGRA_DRIVE_PINGROUP_DDC,
+ TEGRA_DRIVE_PINGROUP_GMA,
+ TEGRA_DRIVE_PINGROUP_GMB,
+ TEGRA_DRIVE_PINGROUP_GMC,
+ TEGRA_DRIVE_PINGROUP_GMD,
+ TEGRA_DRIVE_PINGROUP_GME,
+ TEGRA_DRIVE_PINGROUP_OWR,
+ TEGRA_DRIVE_PINGROUP_UAD,
TEGRA_MAX_DRIVE_PINGROUP,
};
diff --git a/arch/arm/mach-tegra/include/mach/powergate.h b/arch/arm/mach-tegra/include/mach/powergate.h
new file mode 100644
index 000000000000..401d1b725291
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/powergate.h
@@ -0,0 +1,40 @@
+/*
+ * drivers/regulator/tegra-regulator.c
+ *
+ * Copyright (c) 2010 Google, Inc
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MACH_TEGRA_POWERGATE_H_
+#define _MACH_TEGRA_POWERGATE_H_
+
+#define TEGRA_POWERGATE_CPU 0
+#define TEGRA_POWERGATE_3D 1
+#define TEGRA_POWERGATE_VENC 2
+#define TEGRA_POWERGATE_PCIE 3
+#define TEGRA_POWERGATE_VDEC 4
+#define TEGRA_POWERGATE_L2 5
+#define TEGRA_POWERGATE_MPE 6
+#define TEGRA_NUM_POWERGATE 7
+
+int tegra_powergate_power_on(int id);
+int tegra_powergate_power_off(int id);
+bool tegra_powergate_is_powered(int id);
+int tegra_powergate_remove_clamping(int id);
+
+/* Must be called with clk disabled, and returns with clk enabled */
+int tegra_powergate_sequence_power_up(int id, struct clk *clk);
+
+#endif /* _MACH_TEGRA_POWERGATE_H_ */
diff --git a/arch/arm/mach-tegra/include/mach/suspend.h b/arch/arm/mach-tegra/include/mach/suspend.h
new file mode 100644
index 000000000000..5af8715d2e1e
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/suspend.h
@@ -0,0 +1,38 @@
+/*
+ * arch/arm/mach-tegra/include/mach/suspend.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+
+#ifndef _MACH_TEGRA_SUSPEND_H_
+#define _MACH_TEGRA_SUSPEND_H_
+
+void tegra_pinmux_suspend(void);
+void tegra_irq_suspend(void);
+void tegra_gpio_suspend(void);
+void tegra_clk_suspend(void);
+void tegra_dma_suspend(void);
+void tegra_timer_suspend(void);
+
+void tegra_pinmux_resume(void);
+void tegra_irq_resume(void);
+void tegra_gpio_resume(void);
+void tegra_clk_resume(void);
+void tegra_dma_resume(void);
+void tegra_timer_resume(void);
+
+#endif /* _MACH_TEGRA_SUSPEND_H_ */
diff --git a/arch/arm/mach-tegra/include/mach/system.h b/arch/arm/mach-tegra/include/mach/system.h
index 84d5d46113f7..d0183d876c3b 100644
--- a/arch/arm/mach-tegra/include/mach/system.h
+++ b/arch/arm/mach-tegra/include/mach/system.h
@@ -24,16 +24,10 @@
#include <mach/hardware.h>
#include <mach/iomap.h>
-static inline void arch_idle(void)
-{
-}
+extern void (*arch_reset)(char mode, const char *cmd);
-static inline void arch_reset(char mode, const char *cmd)
+static inline void arch_idle(void)
{
- void __iomem *reset = IO_ADDRESS(TEGRA_CLK_RESET_BASE + 0x04);
- u32 reg = readl(reset);
- reg |= 0x04;
- writel(reg, reset);
}
#endif
diff --git a/arch/arm/mach-tegra/include/mach/uncompress.h b/arch/arm/mach-tegra/include/mach/uncompress.h
index 6c4dd815abd7..4e8323770c79 100644
--- a/arch/arm/mach-tegra/include/mach/uncompress.h
+++ b/arch/arm/mach-tegra/include/mach/uncompress.h
@@ -26,23 +26,9 @@
#include <mach/iomap.h>
-#if defined(CONFIG_TEGRA_DEBUG_UARTA)
-#define DEBUG_UART_BASE TEGRA_UARTA_BASE
-#elif defined(CONFIG_TEGRA_DEBUG_UARTB)
-#define DEBUG_UART_BASE TEGRA_UARTB_BASE
-#elif defined(CONFIG_TEGRA_DEBUG_UARTC)
-#define DEBUG_UART_BASE TEGRA_UARTC_BASE
-#elif defined(CONFIG_TEGRA_DEBUG_UARTD)
-#define DEBUG_UART_BASE TEGRA_UARTD_BASE
-#elif defined(CONFIG_TEGRA_DEBUG_UARTE)
-#define DEBUG_UART_BASE TEGRA_UARTE_BASE
-#else
-#define DEBUG_UART_BASE NULL
-#endif
-
static void putc(int c)
{
- volatile u8 *uart = (volatile u8 *)DEBUG_UART_BASE;
+ volatile u8 *uart = (volatile u8 *)TEGRA_DEBUG_UART_BASE;
int shift = 2;
if (uart == NULL)
@@ -59,7 +45,7 @@ static inline void flush(void)
static inline void arch_decomp_setup(void)
{
- volatile u8 *uart = (volatile u8 *)DEBUG_UART_BASE;
+ volatile u8 *uart = (volatile u8 *)TEGRA_DEBUG_UART_BASE;
int shift = 2;
if (uart == NULL)
diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
index 17c74d21077c..dfbc219ea492 100644
--- a/arch/arm/mach-tegra/irq.c
+++ b/arch/arm/mach-tegra/irq.c
@@ -18,6 +18,7 @@
*/
#include <linux/kernel.h>
+#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -26,73 +27,119 @@
#include <asm/hardware/gic.h>
#include <mach/iomap.h>
+#include <mach/legacy_irq.h>
+#include <mach/suspend.h>
#include "board.h"
-#define INT_SYS_NR (INT_GPIO_BASE - INT_PRI_BASE)
-#define INT_SYS_SZ (INT_SEC_BASE - INT_PRI_BASE)
-#define PPI_NR ((INT_SYS_NR+INT_SYS_SZ-1)/INT_SYS_SZ)
+#define PMC_CTRL 0x0
+#define PMC_CTRL_LATCH_WAKEUPS (1 << 5)
+#define PMC_WAKE_MASK 0xc
+#define PMC_WAKE_LEVEL 0x10
+#define PMC_WAKE_STATUS 0x14
+#define PMC_SW_WAKE_STATUS 0x18
+#define PMC_DPD_SAMPLE 0x20
-#define APBDMA_IRQ_STA_CPU 0x14
-#define APBDMA_IRQ_MASK_SET 0x20
-#define APBDMA_IRQ_MASK_CLR 0x24
+static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
-#define ICTLR_CPU_IER 0x20
-#define ICTLR_CPU_IER_SET 0x24
-#define ICTLR_CPU_IER_CLR 0x28
-#define ICTLR_CPU_IEP_CLASS 0x2c
-#define ICTLR_COP_IER 0x30
-#define ICTLR_COP_IER_SET 0x34
-#define ICTLR_COP_IER_CLR 0x38
-#define ICTLR_COP_IEP_CLASS 0x3c
+static u32 tegra_lp0_wake_enb;
+static u32 tegra_lp0_wake_level;
+static u32 tegra_lp0_wake_level_any;
static void (*tegra_gic_mask_irq)(struct irq_data *d);
static void (*tegra_gic_unmask_irq)(struct irq_data *d);
+static void (*tegra_gic_ack_irq)(struct irq_data *d);
-#define irq_to_ictlr(irq) (((irq) - 32) >> 5)
-static void __iomem *tegra_ictlr_base = IO_ADDRESS(TEGRA_PRIMARY_ICTLR_BASE);
-#define ictlr_to_virt(ictlr) (tegra_ictlr_base + (ictlr) * 0x100)
+/* ensures that sufficient time is passed for a register write to
+ * serialize into the 32KHz domain */
+static void pmc_32kwritel(u32 val, unsigned long offs)
+{
+ writel(val, pmc + offs);
+ udelay(130);
+}
+
+int tegra_set_lp1_wake(int irq, int enable)
+{
+ return tegra_legacy_irq_set_wake(irq, enable);
+}
+
+void tegra_set_lp0_wake_pads(u32 wake_enb, u32 wake_level, u32 wake_any)
+{
+ u32 temp;
+ u32 status;
+ u32 lvl;
+
+ wake_level &= wake_enb;
+ wake_any &= wake_enb;
+
+ wake_level |= (tegra_lp0_wake_level & tegra_lp0_wake_enb);
+ wake_any |= (tegra_lp0_wake_level_any & tegra_lp0_wake_enb);
+
+ wake_enb |= tegra_lp0_wake_enb;
+
+ pmc_32kwritel(0, PMC_SW_WAKE_STATUS);
+ temp = readl(pmc + PMC_CTRL);
+ temp |= PMC_CTRL_LATCH_WAKEUPS;
+ pmc_32kwritel(temp, PMC_CTRL);
+ temp &= ~PMC_CTRL_LATCH_WAKEUPS;
+ pmc_32kwritel(temp, PMC_CTRL);
+ status = readl(pmc + PMC_SW_WAKE_STATUS);
+ lvl = readl(pmc + PMC_WAKE_LEVEL);
+
+ /* flip the wakeup trigger for any-edge triggered pads
+ * which are currently asserting as wakeups */
+ lvl ^= status;
+ lvl &= wake_any;
+
+ wake_level |= lvl;
+
+ writel(wake_level, pmc + PMC_WAKE_LEVEL);
+ /* Enable DPD sample to trigger sampling pads data and direction
+ * in which pad will be driven during lp0 mode*/
+ writel(0x1, pmc + PMC_DPD_SAMPLE);
+
+ writel(wake_enb, pmc + PMC_WAKE_MASK);
+}
static void tegra_mask(struct irq_data *d)
{
- void __iomem *addr = ictlr_to_virt(irq_to_ictlr(d->irq));
tegra_gic_mask_irq(d);
- writel(1 << (d->irq & 31), addr+ICTLR_CPU_IER_CLR);
+ tegra_legacy_mask_irq(d->irq);
}
static void tegra_unmask(struct irq_data *d)
{
- void __iomem *addr = ictlr_to_virt(irq_to_ictlr(d->irq));
tegra_gic_unmask_irq(d);
- writel(1<<(d->irq&31), addr+ICTLR_CPU_IER_SET);
+ tegra_legacy_unmask_irq(d->irq);
}
-#ifdef CONFIG_PM
+static void tegra_ack(struct irq_data *d)
+{
+ tegra_legacy_force_irq_clr(d->irq);
+ tegra_gic_ack_irq(d);
+}
-static int tegra_set_wake(struct irq_data *d, unsigned int on)
+static int tegra_retrigger(struct irq_data *d)
{
- return 0;
+ tegra_legacy_force_irq_set(d->irq);
+ return 1;
}
-#endif
static struct irq_chip tegra_irq = {
- .name = "PPI",
- .irq_mask = tegra_mask,
- .irq_unmask = tegra_unmask,
-#ifdef CONFIG_PM
- .irq_set_wake = tegra_set_wake,
-#endif
+ .name = "PPI",
+ .irq_ack = tegra_ack,
+ .irq_mask = tegra_mask,
+ .irq_unmask = tegra_unmask,
+ .irq_retrigger = tegra_retrigger,
};
void __init tegra_init_irq(void)
{
struct irq_chip *gic;
unsigned int i;
+ int irq;
- for (i = 0; i < PPI_NR; i++) {
- writel(~0, ictlr_to_virt(i) + ICTLR_CPU_IER_CLR);
- writel(0, ictlr_to_virt(i) + ICTLR_CPU_IEP_CLASS);
- }
+ tegra_init_legacy_irq();
gic_init(0, 29, IO_ADDRESS(TEGRA_ARM_INT_DIST_BASE),
IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x100));
@@ -100,72 +147,15 @@ void __init tegra_init_irq(void)
gic = get_irq_chip(29);
tegra_gic_unmask_irq = gic->irq_unmask;
tegra_gic_mask_irq = gic->irq_mask;
- tegra_irq.irq_ack = gic->irq_ack;
+ tegra_gic_ack_irq = gic->irq_ack;
#ifdef CONFIG_SMP
tegra_irq.irq_set_affinity = gic->irq_set_affinity;
#endif
- for (i = INT_PRI_BASE; i < INT_GPIO_BASE; i++) {
- set_irq_chip(i, &tegra_irq);
- set_irq_handler(i, handle_level_irq);
- set_irq_flags(i, IRQF_VALID);
+ for (i = 0; i < INT_MAIN_NR; i++) {
+ irq = INT_PRI_BASE + i;
+ set_irq_chip(irq, &tegra_irq);
+ set_irq_handler(irq, handle_level_irq);
+ set_irq_flags(irq, IRQF_VALID);
}
}
-
-#ifdef CONFIG_PM
-static u32 cop_ier[PPI_NR];
-static u32 cpu_ier[PPI_NR];
-static u32 cpu_iep[PPI_NR];
-
-void tegra_irq_suspend(void)
-{
- unsigned long flags;
- int i;
-
- for (i = INT_PRI_BASE; i < INT_GPIO_BASE; i++) {
- struct irq_desc *desc = irq_to_desc(i);
- if (!desc)
- continue;
- if (desc->status & IRQ_WAKEUP) {
- pr_debug("irq %d is wakeup\n", i);
- continue;
- }
- disable_irq(i);
- }
-
- local_irq_save(flags);
- for (i = 0; i < PPI_NR; i++) {
- void __iomem *ictlr = ictlr_to_virt(i);
- cpu_ier[i] = readl(ictlr + ICTLR_CPU_IER);
- cpu_iep[i] = readl(ictlr + ICTLR_CPU_IEP_CLASS);
- cop_ier[i] = readl(ictlr + ICTLR_COP_IER);
- writel(~0, ictlr + ICTLR_COP_IER_CLR);
- }
- local_irq_restore(flags);
-}
-
-void tegra_irq_resume(void)
-{
- unsigned long flags;
- int i;
-
- local_irq_save(flags);
- for (i = 0; i < PPI_NR; i++) {
- void __iomem *ictlr = ictlr_to_virt(i);
- writel(cpu_iep[i], ictlr + ICTLR_CPU_IEP_CLASS);
- writel(~0ul, ictlr + ICTLR_CPU_IER_CLR);
- writel(cpu_ier[i], ictlr + ICTLR_CPU_IER_SET);
- writel(0, ictlr + ICTLR_COP_IEP_CLASS);
- writel(~0ul, ictlr + ICTLR_COP_IER_CLR);
- writel(cop_ier[i], ictlr + ICTLR_COP_IER_SET);
- }
- local_irq_restore(flags);
-
- for (i = INT_PRI_BASE; i < INT_GPIO_BASE; i++) {
- struct irq_desc *desc = irq_to_desc(i);
- if (!desc || (desc->status & IRQ_WAKEUP))
- continue;
- enable_irq(i);
- }
-}
-#endif
diff --git a/arch/arm/mach-tegra/legacy_irq.c b/arch/arm/mach-tegra/legacy_irq.c
index 7cc8601c19ff..38eb719a4f53 100644
--- a/arch/arm/mach-tegra/legacy_irq.c
+++ b/arch/arm/mach-tegra/legacy_irq.c
@@ -18,17 +18,30 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <mach/iomap.h>
+#include <mach/irqs.h>
#include <mach/legacy_irq.h>
-#define ICTLR_CPU_IER 0x20
-#define ICTLR_CPU_IER_SET 0x24
-#define ICTLR_CPU_IER_CLR 0x28
-#define ICTLR_CPU_IEP_CLASS 0x2C
+#define INT_SYS_NR (INT_GPIO_BASE - INT_PRI_BASE)
+#define INT_SYS_SZ (INT_SEC_BASE - INT_PRI_BASE)
+#define PPI_NR ((INT_SYS_NR+INT_SYS_SZ-1)/INT_SYS_SZ)
+
#define ICTLR_CPU_IEP_VFIQ 0x08
#define ICTLR_CPU_IEP_FIR 0x14
#define ICTLR_CPU_IEP_FIR_SET 0x18
#define ICTLR_CPU_IEP_FIR_CLR 0x1c
+#define ICTLR_CPU_IER 0x20
+#define ICTLR_CPU_IER_SET 0x24
+#define ICTLR_CPU_IER_CLR 0x28
+#define ICTLR_CPU_IEP_CLASS 0x2C
+
+#define ICTLR_COP_IER 0x30
+#define ICTLR_COP_IER_SET 0x34
+#define ICTLR_COP_IER_CLR 0x38
+#define ICTLR_COP_IEP_CLASS 0x3c
+
+#define NUM_ICTLRS 4
+
static void __iomem *ictlr_reg_base[] = {
IO_ADDRESS(TEGRA_PRIMARY_ICTLR_BASE),
IO_ADDRESS(TEGRA_SECONDARY_ICTLR_BASE),
@@ -36,6 +49,9 @@ static void __iomem *ictlr_reg_base[] = {
IO_ADDRESS(TEGRA_QUATERNARY_ICTLR_BASE),
};
+static u32 tegra_legacy_wake_mask[4];
+static u32 tegra_legacy_saved_mask[4];
+
/* When going into deep sleep, the CPU is powered down, taking the GIC with it
In order to wake, the wake interrupts need to be enabled in the legacy
interrupt controller. */
@@ -112,3 +128,88 @@ unsigned long tegra_legacy_class(int nr)
base = ictlr_reg_base[nr];
return readl(base + ICTLR_CPU_IEP_CLASS);
}
+
+int tegra_legacy_irq_set_wake(int irq, int enable)
+{
+ irq -= 32;
+ if (enable)
+ tegra_legacy_wake_mask[irq >> 5] |= 1 << (irq & 31);
+ else
+ tegra_legacy_wake_mask[irq >> 5] &= ~(1 << (irq & 31));
+
+ return 0;
+}
+
+void tegra_legacy_irq_set_lp1_wake_mask(void)
+{
+ void __iomem *base;
+ int i;
+
+ for (i = 0; i < NUM_ICTLRS; i++) {
+ base = ictlr_reg_base[i];
+ tegra_legacy_saved_mask[i] = readl(base + ICTLR_CPU_IER);
+ writel(tegra_legacy_wake_mask[i], base + ICTLR_CPU_IER);
+ }
+}
+
+void tegra_legacy_irq_restore_mask(void)
+{
+ void __iomem *base;
+ int i;
+
+ for (i = 0; i < NUM_ICTLRS; i++) {
+ base = ictlr_reg_base[i];
+ writel(tegra_legacy_saved_mask[i], base + ICTLR_CPU_IER);
+ }
+}
+
+void tegra_init_legacy_irq(void)
+{
+ int i;
+
+ for (i = 0; i < NUM_ICTLRS; i++) {
+ void __iomem *ictlr = ictlr_reg_base[i];
+ writel(~0, ictlr + ICTLR_CPU_IER_CLR);
+ writel(0, ictlr + ICTLR_CPU_IEP_CLASS);
+ }
+}
+
+#ifdef CONFIG_PM
+static u32 cop_ier[NUM_ICTLRS];
+static u32 cpu_ier[NUM_ICTLRS];
+static u32 cpu_iep[NUM_ICTLRS];
+
+void tegra_irq_suspend(void)
+{
+ unsigned long flags;
+ int i;
+
+ local_irq_save(flags);
+ for (i = 0; i < NUM_ICTLRS; i++) {
+ void __iomem *ictlr = ictlr_reg_base[i];
+ cpu_ier[i] = readl(ictlr + ICTLR_CPU_IER);
+ cpu_iep[i] = readl(ictlr + ICTLR_CPU_IEP_CLASS);
+ cop_ier[i] = readl(ictlr + ICTLR_COP_IER);
+ writel(~0, ictlr + ICTLR_COP_IER_CLR);
+ }
+ local_irq_restore(flags);
+}
+
+void tegra_irq_resume(void)
+{
+ unsigned long flags;
+ int i;
+
+ local_irq_save(flags);
+ for (i = 0; i < NUM_ICTLRS; i++) {
+ void __iomem *ictlr = ictlr_reg_base[i];
+ writel(cpu_iep[i], ictlr + ICTLR_CPU_IEP_CLASS);
+ writel(~0ul, ictlr + ICTLR_CPU_IER_CLR);
+ writel(cpu_ier[i], ictlr + ICTLR_CPU_IER_SET);
+ writel(0, ictlr + ICTLR_COP_IEP_CLASS);
+ writel(~0ul, ictlr + ICTLR_COP_IER_CLR);
+ writel(cop_ier[i], ictlr + ICTLR_COP_IER_SET);
+ }
+ local_irq_restore(flags);
+}
+#endif
diff --git a/arch/arm/mach-tegra/localtimer.c b/arch/arm/mach-tegra/localtimer.c
index f81ca7cbbc1f..e91d681d45a2 100644
--- a/arch/arm/mach-tegra/localtimer.c
+++ b/arch/arm/mach-tegra/localtimer.c
@@ -18,8 +18,9 @@
/*
* Setup the local clock events for a CPU.
*/
-void __cpuinit local_timer_setup(struct clock_event_device *evt)
+int __cpuinit local_timer_setup(struct clock_event_device *evt)
{
evt->irq = IRQ_LOCALTIMER;
twd_timer_setup(evt);
+ return 0;
}
diff --git a/arch/arm/mach-tegra/pinmux-t2-tables.c b/arch/arm/mach-tegra/pinmux-t2-tables.c
index a6ea34e782dc..a475367befa3 100644
--- a/arch/arm/mach-tegra/pinmux-t2-tables.c
+++ b/arch/arm/mach-tegra/pinmux-t2-tables.c
@@ -29,6 +29,7 @@
#include <mach/iomap.h>
#include <mach/pinmux.h>
+#include <mach/suspend.h>
#define DRIVE_PINGROUP(pg_name, r) \
[TEGRA_DRIVE_PINGROUP_ ## pg_name] = { \
@@ -65,6 +66,16 @@ const struct tegra_drive_pingroup_desc tegra_soc_drive_pingroups[TEGRA_MAX_DRIVE
DRIVE_PINGROUP(XM2D, 0x8cc),
DRIVE_PINGROUP(XM2CLK, 0x8d0),
DRIVE_PINGROUP(MEMCOMP, 0x8d4),
+ DRIVE_PINGROUP(SDIO1, 0x8e0),
+ DRIVE_PINGROUP(CRT, 0x8ec),
+ DRIVE_PINGROUP(DDC, 0x8f0),
+ DRIVE_PINGROUP(GMA, 0x8f4),
+ DRIVE_PINGROUP(GMB, 0x8f8),
+ DRIVE_PINGROUP(GMC, 0x8fc),
+ DRIVE_PINGROUP(GMD, 0x900),
+ DRIVE_PINGROUP(GME, 0x904),
+ DRIVE_PINGROUP(OWR, 0x908),
+ DRIVE_PINGROUP(UAD, 0x90c),
};
#define PINGROUP(pg_name, vdd, f0, f1, f2, f3, f_safe, \
@@ -216,7 +227,8 @@ const struct tegra_pingroup_desc tegra_soc_pingroups[TEGRA_MAX_PINGROUP] = {
#define PULLUPDOWN_REG_NUM 5
static u32 pinmux_reg[TRISTATE_REG_NUM + PIN_MUX_CTL_REG_NUM +
- PULLUPDOWN_REG_NUM];
+ PULLUPDOWN_REG_NUM +
+ ARRAY_SIZE(tegra_soc_drive_pingroups)];
static inline unsigned long pg_readl(unsigned long offset)
{
@@ -233,14 +245,17 @@ void tegra_pinmux_suspend(void)
unsigned int i;
u32 *ctx = pinmux_reg;
- for (i = 0; i < TRISTATE_REG_NUM; i++)
- *ctx++ = pg_readl(TRISTATE_REG_A + i*4);
-
for (i = 0; i < PIN_MUX_CTL_REG_NUM; i++)
*ctx++ = pg_readl(PIN_MUX_CTL_REG_A + i*4);
for (i = 0; i < PULLUPDOWN_REG_NUM; i++)
*ctx++ = pg_readl(PULLUPDOWN_REG_A + i*4);
+
+ for (i = 0; i < TRISTATE_REG_NUM; i++)
+ *ctx++ = pg_readl(TRISTATE_REG_A + i*4);
+
+ for (i = 0; i < ARRAY_SIZE(tegra_soc_drive_pingroups); i++)
+ *ctx++ = pg_readl(tegra_soc_drive_pingroups[i].reg);
}
void tegra_pinmux_resume(void)
@@ -256,5 +271,8 @@ void tegra_pinmux_resume(void)
for (i = 0; i < TRISTATE_REG_NUM; i++)
pg_writel(*ctx++, TRISTATE_REG_A + i*4);
+
+ for (i = 0; i < ARRAY_SIZE(tegra_soc_drive_pingroups); i++)
+ pg_writel(*ctx++, tegra_soc_drive_pingroups[i].reg);
}
#endif
diff --git a/arch/arm/mach-tegra/powergate.c b/arch/arm/mach-tegra/powergate.c
new file mode 100644
index 000000000000..3cee9aa1f2c8
--- /dev/null
+++ b/arch/arm/mach-tegra/powergate.c
@@ -0,0 +1,212 @@
+/*
+ * drivers/powergate/tegra-powergate.c
+ *
+ * Copyright (c) 2010 Google, Inc
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+
+#include <mach/clk.h>
+#include <mach/iomap.h>
+#include <mach/powergate.h>
+
+#define PWRGATE_TOGGLE 0x30
+#define PWRGATE_TOGGLE_START (1 << 8)
+
+#define REMOVE_CLAMPING 0x34
+
+#define PWRGATE_STATUS 0x38
+
+static DEFINE_SPINLOCK(tegra_powergate_lock);
+
+static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
+
+static u32 pmc_read(unsigned long reg)
+{
+ return readl(pmc + reg);
+}
+
+static void pmc_write(u32 val, unsigned long reg)
+{
+ writel(val, pmc + reg);
+}
+
+static int tegra_powergate_set(int id, bool new_state)
+{
+ bool status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tegra_powergate_lock, flags);
+
+ status = pmc_read(PWRGATE_STATUS) & (1 << id);
+
+ if (status == new_state) {
+ spin_unlock_irqrestore(&tegra_powergate_lock, flags);
+ return -EINVAL;
+ }
+
+ pmc_write(PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
+
+ spin_unlock_irqrestore(&tegra_powergate_lock, flags);
+
+ return 0;
+}
+
+int tegra_powergate_power_on(int id)
+{
+ if (id < 0 || id >= TEGRA_NUM_POWERGATE)
+ return -EINVAL;
+
+ return tegra_powergate_set(id, true);
+}
+
+int tegra_powergate_power_off(int id)
+{
+ if (id < 0 || id >= TEGRA_NUM_POWERGATE)
+ return -EINVAL;
+
+ return tegra_powergate_set(id, false);
+}
+
+bool tegra_powergate_is_powered(int id)
+{
+ u32 status;
+
+ if (id < 0 || id >= TEGRA_NUM_POWERGATE)
+ return -EINVAL;
+
+ status = pmc_read(PWRGATE_STATUS) & (1 << id);
+ return !!status;
+}
+
+int tegra_powergate_remove_clamping(int id)
+{
+ u32 mask;
+
+ if (id < 0 || id >= TEGRA_NUM_POWERGATE)
+ return -EINVAL;
+
+ /*
+ * Tegra 2 has a bug where PCIE and VDE clamping masks are
+ * swapped relatively to the partition ids
+ */
+ if (id == TEGRA_POWERGATE_VDEC)
+ mask = (1 << TEGRA_POWERGATE_PCIE);
+ else if (id == TEGRA_POWERGATE_PCIE)
+ mask = (1 << TEGRA_POWERGATE_VDEC);
+ else
+ mask = (1 << id);
+
+ pmc_write(mask, REMOVE_CLAMPING);
+
+ return 0;
+}
+
+/* Must be called with clk disabled, and returns with clk enabled */
+int tegra_powergate_sequence_power_up(int id, struct clk *clk)
+{
+ int ret;
+
+ tegra_periph_reset_assert(clk);
+
+ ret = tegra_powergate_power_on(id);
+ if (ret)
+ goto err_power;
+
+ ret = clk_enable(clk);
+ if (ret)
+ goto err_clk;
+
+ udelay(10);
+
+ ret = tegra_powergate_remove_clamping(id);
+ if (ret)
+ goto err_clamp;
+
+ udelay(10);
+ tegra_periph_reset_deassert(clk);
+
+ return 0;
+
+err_clamp:
+ clk_disable(clk);
+err_clk:
+ tegra_powergate_power_off(id);
+err_power:
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static const char * const powergate_name[] = {
+ [TEGRA_POWERGATE_CPU] = "cpu",
+ [TEGRA_POWERGATE_3D] = "3d",
+ [TEGRA_POWERGATE_VENC] = "venc",
+ [TEGRA_POWERGATE_VDEC] = "vdec",
+ [TEGRA_POWERGATE_PCIE] = "pcie",
+ [TEGRA_POWERGATE_L2] = "l2",
+ [TEGRA_POWERGATE_MPE] = "mpe",
+};
+
+static int powergate_show(struct seq_file *s, void *data)
+{
+ int i;
+
+ seq_printf(s, " powergate powered\n");
+ seq_printf(s, "------------------\n");
+
+ for (i = 0; i < TEGRA_NUM_POWERGATE; i++)
+ seq_printf(s, " %9s %7s\n", powergate_name[i],
+ tegra_powergate_is_powered(i) ? "yes" : "no");
+ return 0;
+}
+
+static int powergate_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, powergate_show, inode->i_private);
+}
+
+static const struct file_operations powergate_fops = {
+ .open = powergate_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init powergate_debugfs_init(void)
+{
+ struct dentry *d;
+ int err = -ENOMEM;
+
+ d = debugfs_create_file("powergate", S_IRUGO, NULL, NULL,
+ &powergate_fops);
+ if (!d)
+ return -ENOMEM;
+
+ return err;
+}
+
+late_initcall(powergate_debugfs_init);
+
+#endif
diff --git a/arch/arm/mach-tegra/tegra2_clocks.c b/arch/arm/mach-tegra/tegra2_clocks.c
index f0dae6d8ba52..6d7c4eea4dcb 100644
--- a/arch/arm/mach-tegra/tegra2_clocks.c
+++ b/arch/arm/mach-tegra/tegra2_clocks.c
@@ -23,14 +23,15 @@
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/io.h>
-#include <linux/hrtimer.h>
#include <linux/clkdev.h>
+#include <linux/clk.h>
#include <mach/iomap.h>
+#include <mach/suspend.h>
#include "clock.h"
#include "fuse.h"
-#include "tegra2_dvfs.h"
+#include "tegra2_emc.h"
#define RST_DEVICES 0x004
#define RST_DEVICES_SET 0x300
@@ -51,7 +52,7 @@
#define OSC_CTRL_OSC_FREQ_19_2MHZ (1<<30)
#define OSC_CTRL_OSC_FREQ_12MHZ (2<<30)
#define OSC_CTRL_OSC_FREQ_26MHZ (3<<30)
-#define OSC_CTRL_MASK 0x3f2
+#define OSC_CTRL_MASK (0x3f2 | OSC_CTRL_OSC_FREQ_MASK)
#define OSC_FREQ_DET 0x58
#define OSC_FREQ_DET_TRIG (1<<31)
@@ -73,12 +74,15 @@
#define PERIPH_CLK_SOURCE_DIVU16_MASK 0xFFFF
#define PERIPH_CLK_SOURCE_DIV_SHIFT 0
+#define SDMMC_CLK_INT_FB_SEL (1 << 23)
+#define SDMMC_CLK_INT_FB_DLY_SHIFT 16
+#define SDMMC_CLK_INT_FB_DLY_MASK (0xF << SDMMC_CLK_INT_FB_DLY_SHIFT)
+
#define PLL_BASE 0x0
#define PLL_BASE_BYPASS (1<<31)
#define PLL_BASE_ENABLE (1<<30)
#define PLL_BASE_REF_ENABLE (1<<29)
#define PLL_BASE_OVERRIDE (1<<28)
-#define PLL_BASE_LOCK (1<<27)
#define PLL_BASE_DIVP_MASK (0x7<<20)
#define PLL_BASE_DIVP_SHIFT 20
#define PLL_BASE_DIVN_MASK (0x3FF<<8)
@@ -93,7 +97,6 @@
#define PLL_OUT_RESET_DISABLE (1<<0)
#define PLL_MISC(c) (((c)->flags & PLL_ALT_MISC_REG) ? 0x4 : 0xc)
-#define PLL_MISC_LOCK_ENABLE(c) (((c)->flags & PLLU) ? (1<<22) : (1<<18))
#define PLL_MISC_DCCON_SHIFT 20
#define PLL_MISC_CPCON_SHIFT 8
@@ -111,9 +114,9 @@
#define PLLE_MISC_READY (1 << 15)
-#define PERIPH_CLK_TO_ENB_REG(c) ((c->clk_num / 32) * 4)
-#define PERIPH_CLK_TO_ENB_SET_REG(c) ((c->clk_num / 32) * 8)
-#define PERIPH_CLK_TO_ENB_BIT(c) (1 << (c->clk_num % 32))
+#define PERIPH_CLK_TO_ENB_REG(c) ((c->u.periph.clk_num / 32) * 4)
+#define PERIPH_CLK_TO_ENB_SET_REG(c) ((c->u.periph.clk_num / 32) * 8)
+#define PERIPH_CLK_TO_ENB_BIT(c) (1 << (c->u.periph.clk_num % 32))
#define SUPER_CLK_MUX 0x00
#define SUPER_STATE_SHIFT 28
@@ -134,12 +137,42 @@
#define BUS_CLK_DISABLE (1<<3)
#define BUS_CLK_DIV_MASK 0x3
+#define PMC_CTRL 0x0
+ #define PMC_CTRL_BLINK_ENB (1 << 7)
+
+#define PMC_DPD_PADS_ORIDE 0x1c
+ #define PMC_DPD_PADS_ORIDE_BLINK_ENB (1 << 20)
+
+#define PMC_BLINK_TIMER_DATA_ON_SHIFT 0
+#define PMC_BLINK_TIMER_DATA_ON_MASK 0x7fff
+#define PMC_BLINK_TIMER_ENB (1 << 15)
+#define PMC_BLINK_TIMER_DATA_OFF_SHIFT 16
+#define PMC_BLINK_TIMER_DATA_OFF_MASK 0xffff
+
static void __iomem *reg_clk_base = IO_ADDRESS(TEGRA_CLK_RESET_BASE);
+static void __iomem *reg_pmc_base = IO_ADDRESS(TEGRA_PMC_BASE);
+
+/*
+ * Some clocks share a register with other clocks. Any clock op that
+ * non-atomically modifies a register used by another clock must lock
+ * clock_register_lock first.
+ */
+static DEFINE_SPINLOCK(clock_register_lock);
+
+/*
+ * Some peripheral clocks share an enable bit, so refcount the enable bits
+ * in registers CLK_ENABLE_L, CLK_ENABLE_H, and CLK_ENABLE_U
+ */
+static int tegra_periph_clk_enable_refcount[3 * 32];
#define clk_writel(value, reg) \
__raw_writel(value, (u32)reg_clk_base + (reg))
#define clk_readl(reg) \
__raw_readl((u32)reg_clk_base + (reg))
+#define pmc_writel(value, reg) \
+ __raw_writel(value, (u32)reg_pmc_base + (reg))
+#define pmc_readl(reg) \
+ __raw_readl((u32)reg_pmc_base + (reg))
unsigned long clk_measure_input_freq(void)
{
@@ -245,6 +278,18 @@ static struct clk_ops tegra_clk_m_ops = {
.disable = tegra2_clk_m_disable,
};
+void tegra2_periph_reset_assert(struct clk *c)
+{
+ BUG_ON(!c->ops->reset);
+ c->ops->reset(c, true);
+}
+
+void tegra2_periph_reset_deassert(struct clk *c)
+{
+ BUG_ON(!c->ops->reset);
+ c->ops->reset(c, false);
+}
+
/* super clock functions */
/* "super clocks" on tegra have two-stage muxes and a clock skipping
* super divider. We will ignore the clock skipping divider, since we
@@ -303,12 +348,12 @@ static int tegra2_super_clk_set_parent(struct clk *c, struct clk *p)
val |= sel->value << shift;
if (c->refcnt)
- clk_enable_locked(p);
+ clk_enable(p);
clk_writel(val, c->reg);
if (c->refcnt && c->parent)
- clk_disable_locked(c->parent);
+ clk_disable(c->parent);
clk_reparent(c, p);
return 0;
@@ -317,11 +362,24 @@ static int tegra2_super_clk_set_parent(struct clk *c, struct clk *p)
return -EINVAL;
}
+/*
+ * Super clocks have "clock skippers" instead of dividers. Dividing using
+ * a clock skipper does not allow the voltage to be scaled down, so instead
+ * adjust the rate of the parent clock. This requires that the parent of a
+ * super clock have no other children, otherwise the rate will change
+ * underneath the other children.
+ */
+static int tegra2_super_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ return clk_set_rate(c->parent, rate);
+}
+
static struct clk_ops tegra_super_ops = {
.init = tegra2_super_clk_init,
.enable = tegra2_super_clk_enable,
.disable = tegra2_super_clk_disable,
.set_parent = tegra2_super_clk_set_parent,
+ .set_rate = tegra2_super_clk_set_rate,
};
/* virtual cpu clock functions */
@@ -351,25 +409,36 @@ static void tegra2_cpu_clk_disable(struct clk *c)
static int tegra2_cpu_clk_set_rate(struct clk *c, unsigned long rate)
{
int ret;
- ret = clk_set_parent_locked(c->parent, c->backup);
+ /*
+ * Take an extra reference to the main pll so it doesn't turn
+ * off when we move the cpu off of it
+ */
+ clk_enable(c->u.cpu.main);
+
+ ret = clk_set_parent(c->parent, c->u.cpu.backup);
if (ret) {
- pr_err("Failed to switch cpu to clock %s\n", c->backup->name);
- return ret;
+ pr_err("Failed to switch cpu to clock %s\n", c->u.cpu.backup->name);
+ goto out;
}
- ret = clk_set_rate_locked(c->main, rate);
+ if (rate == clk_get_rate(c->u.cpu.backup))
+ goto out;
+
+ ret = clk_set_rate(c->u.cpu.main, rate);
if (ret) {
pr_err("Failed to change cpu pll to %lu\n", rate);
- return ret;
+ goto out;
}
- ret = clk_set_parent_locked(c->parent, c->main);
+ ret = clk_set_parent(c->parent, c->u.cpu.main);
if (ret) {
- pr_err("Failed to switch cpu to clock %s\n", c->main->name);
- return ret;
+ pr_err("Failed to switch cpu to clock %s\n", c->u.cpu.main->name);
+ goto out;
}
- return 0;
+out:
+ clk_disable(c->u.cpu.main);
+ return ret;
}
static struct clk_ops tegra_cpu_ops = {
@@ -379,6 +448,20 @@ static struct clk_ops tegra_cpu_ops = {
.set_rate = tegra2_cpu_clk_set_rate,
};
+/* virtual cop clock functions. Used to acquire the fake 'cop' clock to
+ * reset the COP block (i.e. AVP) */
+static void tegra2_cop_clk_reset(struct clk *c, bool assert)
+{
+ unsigned long reg = assert ? RST_DEVICES_SET : RST_DEVICES_CLR;
+
+ pr_debug("%s %s\n", __func__, assert ? "assert" : "deassert");
+ clk_writel(1 << 1, reg);
+}
+
+static struct clk_ops tegra_cop_ops = {
+ .reset = tegra2_cop_clk_reset,
+};
+
/* bus clock functions */
static void tegra2_bus_clk_init(struct clk *c)
{
@@ -390,24 +473,45 @@ static void tegra2_bus_clk_init(struct clk *c)
static int tegra2_bus_clk_enable(struct clk *c)
{
- u32 val = clk_readl(c->reg);
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&clock_register_lock, flags);
+
+ val = clk_readl(c->reg);
val &= ~(BUS_CLK_DISABLE << c->reg_shift);
clk_writel(val, c->reg);
+
+ spin_unlock_irqrestore(&clock_register_lock, flags);
+
return 0;
}
static void tegra2_bus_clk_disable(struct clk *c)
{
- u32 val = clk_readl(c->reg);
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&clock_register_lock, flags);
+
+ val = clk_readl(c->reg);
val |= BUS_CLK_DISABLE << c->reg_shift;
clk_writel(val, c->reg);
+
+ spin_unlock_irqrestore(&clock_register_lock, flags);
}
static int tegra2_bus_clk_set_rate(struct clk *c, unsigned long rate)
{
- u32 val = clk_readl(c->reg);
- unsigned long parent_rate = c->parent->rate;
+ u32 val;
+ unsigned long parent_rate = clk_get_rate(c->parent);
+ unsigned long flags;
+ int ret = -EINVAL;
int i;
+
+ spin_lock_irqsave(&clock_register_lock, flags);
+
+ val = clk_readl(c->reg);
for (i = 1; i <= 4; i++) {
if (rate == parent_rate / i) {
val &= ~(BUS_CLK_DIV_MASK << c->reg_shift);
@@ -415,10 +519,14 @@ static int tegra2_bus_clk_set_rate(struct clk *c, unsigned long rate)
clk_writel(val, c->reg);
c->div = i;
c->mul = 1;
- return 0;
+ ret = 0;
+ break;
}
}
- return -EINVAL;
+
+ spin_unlock_irqrestore(&clock_register_lock, flags);
+
+ return ret;
}
static struct clk_ops tegra_bus_ops = {
@@ -428,24 +536,96 @@ static struct clk_ops tegra_bus_ops = {
.set_rate = tegra2_bus_clk_set_rate,
};
-/* PLL Functions */
-static int tegra2_pll_clk_wait_for_lock(struct clk *c)
+/* Blink output functions */
+
+static void tegra2_blink_clk_init(struct clk *c)
{
- ktime_t before;
+ u32 val;
- before = ktime_get();
+ val = pmc_readl(PMC_CTRL);
+ c->state = (val & PMC_CTRL_BLINK_ENB) ? ON : OFF;
+ c->mul = 1;
+ val = pmc_readl(c->reg);
+
+ if (val & PMC_BLINK_TIMER_ENB) {
+ unsigned int on_off;
+
+ on_off = (val >> PMC_BLINK_TIMER_DATA_ON_SHIFT) &
+ PMC_BLINK_TIMER_DATA_ON_MASK;
+ val >>= PMC_BLINK_TIMER_DATA_OFF_SHIFT;
+ val &= PMC_BLINK_TIMER_DATA_OFF_MASK;
+ on_off += val;
+ /* each tick in the blink timer is 4 32KHz clocks */
+ c->div = on_off * 4;
+ } else {
+ c->div = 1;
+ }
+}
- while (!(clk_readl(c->reg + PLL_BASE) & PLL_BASE_LOCK)) {
- if (ktime_us_delta(ktime_get(), before) > 5000) {
- pr_err("Timed out waiting for lock bit on pll %s",
- c->name);
- return -1;
- }
+static int tegra2_blink_clk_enable(struct clk *c)
+{
+ u32 val;
+
+ val = pmc_readl(PMC_DPD_PADS_ORIDE);
+ pmc_writel(val | PMC_DPD_PADS_ORIDE_BLINK_ENB, PMC_DPD_PADS_ORIDE);
+
+ val = pmc_readl(PMC_CTRL);
+ pmc_writel(val | PMC_CTRL_BLINK_ENB, PMC_CTRL);
+
+ return 0;
+}
+
+static void tegra2_blink_clk_disable(struct clk *c)
+{
+ u32 val;
+
+ val = pmc_readl(PMC_CTRL);
+ pmc_writel(val & ~PMC_CTRL_BLINK_ENB, PMC_CTRL);
+
+ val = pmc_readl(PMC_DPD_PADS_ORIDE);
+ pmc_writel(val & ~PMC_DPD_PADS_ORIDE_BLINK_ENB, PMC_DPD_PADS_ORIDE);
+}
+
+static int tegra2_blink_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ unsigned long parent_rate = clk_get_rate(c->parent);
+ if (rate >= parent_rate) {
+ c->div = 1;
+ pmc_writel(0, c->reg);
+ } else {
+ unsigned int on_off;
+ u32 val;
+
+ on_off = DIV_ROUND_UP(parent_rate / 8, rate);
+ c->div = on_off * 8;
+
+ val = (on_off & PMC_BLINK_TIMER_DATA_ON_MASK) <<
+ PMC_BLINK_TIMER_DATA_ON_SHIFT;
+ on_off &= PMC_BLINK_TIMER_DATA_OFF_MASK;
+ on_off <<= PMC_BLINK_TIMER_DATA_OFF_SHIFT;
+ val |= on_off;
+ val |= PMC_BLINK_TIMER_ENB;
+ pmc_writel(val, c->reg);
}
return 0;
}
+static struct clk_ops tegra_blink_clk_ops = {
+ .init = &tegra2_blink_clk_init,
+ .enable = &tegra2_blink_clk_enable,
+ .disable = &tegra2_blink_clk_disable,
+ .set_rate = &tegra2_blink_clk_set_rate,
+};
+
+/* PLL Functions */
+static int tegra2_pll_clk_wait_for_lock(struct clk *c)
+{
+ udelay(c->u.pll.lock_delay);
+
+ return 0;
+}
+
static void tegra2_pll_clk_init(struct clk *c)
{
u32 val = clk_readl(c->reg + PLL_BASE);
@@ -479,10 +659,6 @@ static int tegra2_pll_clk_enable(struct clk *c)
val |= PLL_BASE_ENABLE;
clk_writel(val, c->reg + PLL_BASE);
- val = clk_readl(c->reg + PLL_MISC(c));
- val |= PLL_MISC_LOCK_ENABLE(c);
- clk_writel(val, c->reg + PLL_MISC(c));
-
tegra2_pll_clk_wait_for_lock(c);
return 0;
@@ -502,13 +678,12 @@ static int tegra2_pll_clk_set_rate(struct clk *c, unsigned long rate)
{
u32 val;
unsigned long input_rate;
- const struct clk_pll_table *sel;
+ const struct clk_pll_freq_table *sel;
pr_debug("%s: %s %lu\n", __func__, c->name, rate);
- BUG_ON(c->refcnt != 0);
- input_rate = c->parent->rate;
- for (sel = c->pll_table; sel->input_rate != 0; sel++) {
+ input_rate = clk_get_rate(c->parent);
+ for (sel = c->u.pll.freq_table; sel->input_rate != 0; sel++) {
if (sel->input_rate == input_rate && sel->output_rate == rate) {
c->mul = sel->n;
c->div = sel->m * sel->p;
@@ -620,9 +795,11 @@ static int tegra2_pll_div_clk_enable(struct clk *c)
{
u32 val;
u32 new_val;
+ unsigned long flags;
pr_debug("%s: %s\n", __func__, c->name);
if (c->flags & DIV_U71) {
+ spin_lock_irqsave(&clock_register_lock, flags);
val = clk_readl(c->reg);
new_val = val >> c->reg_shift;
new_val &= 0xFFFF;
@@ -632,12 +809,15 @@ static int tegra2_pll_div_clk_enable(struct clk *c)
val &= ~(0xFFFF << c->reg_shift);
val |= new_val << c->reg_shift;
clk_writel(val, c->reg);
+ spin_unlock_irqrestore(&clock_register_lock, flags);
return 0;
} else if (c->flags & DIV_2) {
BUG_ON(!(c->flags & PLLD));
+ spin_lock_irqsave(&clock_register_lock, flags);
val = clk_readl(c->reg);
val &= ~PLLD_MISC_DIV_RST;
clk_writel(val, c->reg);
+ spin_unlock_irqrestore(&clock_register_lock, flags);
return 0;
}
return -EINVAL;
@@ -647,9 +827,11 @@ static void tegra2_pll_div_clk_disable(struct clk *c)
{
u32 val;
u32 new_val;
+ unsigned long flags;
pr_debug("%s: %s\n", __func__, c->name);
if (c->flags & DIV_U71) {
+ spin_lock_irqsave(&clock_register_lock, flags);
val = clk_readl(c->reg);
new_val = val >> c->reg_shift;
new_val &= 0xFFFF;
@@ -659,11 +841,14 @@ static void tegra2_pll_div_clk_disable(struct clk *c)
val &= ~(0xFFFF << c->reg_shift);
val |= new_val << c->reg_shift;
clk_writel(val, c->reg);
+ spin_unlock_irqrestore(&clock_register_lock, flags);
} else if (c->flags & DIV_2) {
BUG_ON(!(c->flags & PLLD));
+ spin_lock_irqsave(&clock_register_lock, flags);
val = clk_readl(c->reg);
val |= PLLD_MISC_DIV_RST;
clk_writel(val, c->reg);
+ spin_unlock_irqrestore(&clock_register_lock, flags);
}
}
@@ -672,10 +857,14 @@ static int tegra2_pll_div_clk_set_rate(struct clk *c, unsigned long rate)
u32 val;
u32 new_val;
int divider_u71;
+ unsigned long parent_rate = clk_get_rate(c->parent);
+ unsigned long flags;
+
pr_debug("%s: %s %lu\n", __func__, c->name, rate);
if (c->flags & DIV_U71) {
- divider_u71 = clk_div71_get_divider(c->parent->rate, rate);
+ divider_u71 = clk_div71_get_divider(parent_rate, rate);
if (divider_u71 >= 0) {
+ spin_lock_irqsave(&clock_register_lock, flags);
val = clk_readl(c->reg);
new_val = val >> c->reg_shift;
new_val &= 0xFFFF;
@@ -689,10 +878,11 @@ static int tegra2_pll_div_clk_set_rate(struct clk *c, unsigned long rate)
clk_writel(val, c->reg);
c->div = divider_u71 + 2;
c->mul = 2;
+ spin_unlock_irqrestore(&clock_register_lock, flags);
return 0;
}
} else if (c->flags & DIV_2) {
- if (c->parent->rate == rate * 2)
+ if (parent_rate == rate * 2)
return 0;
}
return -EINVAL;
@@ -701,15 +891,16 @@ static int tegra2_pll_div_clk_set_rate(struct clk *c, unsigned long rate)
static long tegra2_pll_div_clk_round_rate(struct clk *c, unsigned long rate)
{
int divider;
+ unsigned long parent_rate = clk_get_rate(c->parent);
pr_debug("%s: %s %lu\n", __func__, c->name, rate);
if (c->flags & DIV_U71) {
- divider = clk_div71_get_divider(c->parent->rate, rate);
+ divider = clk_div71_get_divider(parent_rate, rate);
if (divider < 0)
return divider;
- return c->parent->rate * 2 / (divider + 2);
+ return DIV_ROUND_UP(parent_rate * 2, divider + 2);
} else if (c->flags & DIV_2) {
- return c->parent->rate / 2;
+ return DIV_ROUND_UP(parent_rate, 2);
}
return -EINVAL;
}
@@ -755,9 +946,14 @@ static void tegra2_periph_clk_init(struct clk *c)
}
c->state = ON;
+
+ if (!c->u.periph.clk_num)
+ return;
+
if (!(clk_readl(CLK_OUT_ENB + PERIPH_CLK_TO_ENB_REG(c)) &
PERIPH_CLK_TO_ENB_BIT(c)))
c->state = OFF;
+
if (!(c->flags & PERIPH_NO_RESET))
if (clk_readl(RST_DEVICES + PERIPH_CLK_TO_ENB_REG(c)) &
PERIPH_CLK_TO_ENB_BIT(c))
@@ -767,8 +963,20 @@ static void tegra2_periph_clk_init(struct clk *c)
static int tegra2_periph_clk_enable(struct clk *c)
{
u32 val;
+ unsigned long flags;
+ int refcount;
pr_debug("%s on clock %s\n", __func__, c->name);
+ if (!c->u.periph.clk_num)
+ return 0;
+
+ spin_lock_irqsave(&clock_register_lock, flags);
+
+ refcount = tegra_periph_clk_enable_refcount[c->u.periph.clk_num]++;
+
+ if (refcount > 1)
+ goto out;
+
clk_writel(PERIPH_CLK_TO_ENB_BIT(c),
CLK_OUT_ENB_SET + PERIPH_CLK_TO_ENB_SET_REG(c));
if (!(c->flags & PERIPH_NO_RESET) && !(c->flags & PERIPH_MANUAL_RESET))
@@ -781,34 +989,48 @@ static int tegra2_periph_clk_enable(struct clk *c)
val |= 0x3 << 24;
clk_writel(val, c->reg);
}
+
+out:
+ spin_unlock_irqrestore(&clock_register_lock, flags);
+
return 0;
}
static void tegra2_periph_clk_disable(struct clk *c)
{
+ unsigned long flags;
+
pr_debug("%s on clock %s\n", __func__, c->name);
- clk_writel(PERIPH_CLK_TO_ENB_BIT(c),
- CLK_OUT_ENB_CLR + PERIPH_CLK_TO_ENB_SET_REG(c));
-}
+ if (!c->u.periph.clk_num)
+ return;
-void tegra2_periph_reset_deassert(struct clk *c)
-{
- pr_debug("%s on clock %s\n", __func__, c->name);
- if (!(c->flags & PERIPH_NO_RESET))
+ spin_lock_irqsave(&clock_register_lock, flags);
+
+ if (c->refcnt)
+ tegra_periph_clk_enable_refcount[c->u.periph.clk_num]--;
+
+ if (tegra_periph_clk_enable_refcount[c->u.periph.clk_num] == 0)
clk_writel(PERIPH_CLK_TO_ENB_BIT(c),
- RST_DEVICES_CLR + PERIPH_CLK_TO_ENB_SET_REG(c));
+ CLK_OUT_ENB_CLR + PERIPH_CLK_TO_ENB_SET_REG(c));
+
+ spin_unlock_irqrestore(&clock_register_lock, flags);
}
-void tegra2_periph_reset_assert(struct clk *c)
+static void tegra2_periph_clk_reset(struct clk *c, bool assert)
{
- pr_debug("%s on clock %s\n", __func__, c->name);
+ unsigned long base = assert ? RST_DEVICES_SET : RST_DEVICES_CLR;
+
+ pr_debug("%s %s on clock %s\n", __func__,
+ assert ? "assert" : "deassert", c->name);
+
+ BUG_ON(!c->u.periph.clk_num);
+
if (!(c->flags & PERIPH_NO_RESET))
clk_writel(PERIPH_CLK_TO_ENB_BIT(c),
- RST_DEVICES_SET + PERIPH_CLK_TO_ENB_SET_REG(c));
+ base + PERIPH_CLK_TO_ENB_SET_REG(c));
}
-
static int tegra2_periph_clk_set_parent(struct clk *c, struct clk *p)
{
u32 val;
@@ -821,12 +1043,12 @@ static int tegra2_periph_clk_set_parent(struct clk *c, struct clk *p)
val |= (sel->value) << PERIPH_CLK_SOURCE_SHIFT;
if (c->refcnt)
- clk_enable_locked(p);
+ clk_enable(p);
clk_writel(val, c->reg);
if (c->refcnt && c->parent)
- clk_disable_locked(c->parent);
+ clk_disable(c->parent);
clk_reparent(c, p);
return 0;
@@ -840,9 +1062,10 @@ static int tegra2_periph_clk_set_rate(struct clk *c, unsigned long rate)
{
u32 val;
int divider;
- pr_debug("%s: %lu\n", __func__, rate);
+ unsigned long parent_rate = clk_get_rate(c->parent);
+
if (c->flags & DIV_U71) {
- divider = clk_div71_get_divider(c->parent->rate, rate);
+ divider = clk_div71_get_divider(parent_rate, rate);
if (divider >= 0) {
val = clk_readl(c->reg);
val &= ~PERIPH_CLK_SOURCE_DIVU71_MASK;
@@ -853,7 +1076,7 @@ static int tegra2_periph_clk_set_rate(struct clk *c, unsigned long rate)
return 0;
}
} else if (c->flags & DIV_U16) {
- divider = clk_div16_get_divider(c->parent->rate, rate);
+ divider = clk_div16_get_divider(parent_rate, rate);
if (divider >= 0) {
val = clk_readl(c->reg);
val &= ~PERIPH_CLK_SOURCE_DIVU16_MASK;
@@ -863,7 +1086,7 @@ static int tegra2_periph_clk_set_rate(struct clk *c, unsigned long rate)
c->mul = 1;
return 0;
}
- } else if (c->parent->rate <= rate) {
+ } else if (parent_rate <= rate) {
c->div = 1;
c->mul = 1;
return 0;
@@ -875,19 +1098,20 @@ static long tegra2_periph_clk_round_rate(struct clk *c,
unsigned long rate)
{
int divider;
+ unsigned long parent_rate = clk_get_rate(c->parent);
pr_debug("%s: %s %lu\n", __func__, c->name, rate);
if (c->flags & DIV_U71) {
- divider = clk_div71_get_divider(c->parent->rate, rate);
+ divider = clk_div71_get_divider(parent_rate, rate);
if (divider < 0)
return divider;
- return c->parent->rate * 2 / (divider + 2);
+ return DIV_ROUND_UP(parent_rate * 2, divider + 2);
} else if (c->flags & DIV_U16) {
- divider = clk_div16_get_divider(c->parent->rate, rate);
+ divider = clk_div16_get_divider(parent_rate, rate);
if (divider < 0)
return divider;
- return c->parent->rate / (divider + 1);
+ return DIV_ROUND_UP(parent_rate, divider + 1);
}
return -EINVAL;
}
@@ -899,6 +1123,71 @@ static struct clk_ops tegra_periph_clk_ops = {
.set_parent = &tegra2_periph_clk_set_parent,
.set_rate = &tegra2_periph_clk_set_rate,
.round_rate = &tegra2_periph_clk_round_rate,
+ .reset = &tegra2_periph_clk_reset,
+};
+
+/* The SDMMC controllers have extra bits in the clock source register that
+ * adjust the delay between the clock and data to compenstate for delays
+ * on the PCB. */
+void tegra2_sdmmc_tap_delay(struct clk *c, int delay)
+{
+ u32 reg;
+
+ delay = clamp(delay, 0, 15);
+ reg = clk_readl(c->reg);
+ reg &= ~SDMMC_CLK_INT_FB_DLY_MASK;
+ reg |= SDMMC_CLK_INT_FB_SEL;
+ reg |= delay << SDMMC_CLK_INT_FB_DLY_SHIFT;
+ clk_writel(reg, c->reg);
+}
+
+/* External memory controller clock ops */
+static void tegra2_emc_clk_init(struct clk *c)
+{
+ tegra2_periph_clk_init(c);
+ c->max_rate = clk_get_rate_locked(c);
+}
+
+static long tegra2_emc_clk_round_rate(struct clk *c, unsigned long rate)
+{
+ long new_rate = rate;
+
+ new_rate = tegra_emc_round_rate(new_rate);
+ if (new_rate < 0)
+ return c->max_rate;
+
+ BUG_ON(new_rate != tegra2_periph_clk_round_rate(c, new_rate));
+
+ return new_rate;
+}
+
+static int tegra2_emc_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ int ret;
+ /*
+ * The Tegra2 memory controller has an interlock with the clock
+ * block that allows memory shadowed registers to be updated,
+ * and then transfer them to the main registers at the same
+ * time as the clock update without glitches.
+ */
+ ret = tegra_emc_set_rate(rate);
+ if (ret < 0)
+ return ret;
+
+ ret = tegra2_periph_clk_set_rate(c, rate);
+ udelay(1);
+
+ return ret;
+}
+
+static struct clk_ops tegra_emc_clk_ops = {
+ .init = &tegra2_emc_clk_init,
+ .enable = &tegra2_periph_clk_enable,
+ .disable = &tegra2_periph_clk_disable,
+ .set_parent = &tegra2_periph_clk_set_parent,
+ .set_rate = &tegra2_emc_clk_set_rate,
+ .round_rate = &tegra2_emc_clk_round_rate,
+ .reset = &tegra2_periph_clk_reset,
};
/* Clock doubler ops */
@@ -907,6 +1196,10 @@ static void tegra2_clk_double_init(struct clk *c)
c->mul = 2;
c->div = 1;
c->state = ON;
+
+ if (!c->u.periph.clk_num)
+ return;
+
if (!(clk_readl(CLK_OUT_ENB + PERIPH_CLK_TO_ENB_REG(c)) &
PERIPH_CLK_TO_ENB_BIT(c)))
c->state = OFF;
@@ -914,7 +1207,7 @@ static void tegra2_clk_double_init(struct clk *c)
static int tegra2_clk_double_set_rate(struct clk *c, unsigned long rate)
{
- if (rate != 2 * c->parent->rate)
+ if (rate != 2 * clk_get_rate(c->parent))
return -EINVAL;
c->mul = 2;
c->div = 1;
@@ -928,6 +1221,7 @@ static struct clk_ops tegra_clk_double_ops = {
.set_rate = &tegra2_clk_double_set_rate,
};
+/* Audio sync clock ops */
static void tegra2_audio_sync_clk_init(struct clk *c)
{
int source;
@@ -964,12 +1258,12 @@ static int tegra2_audio_sync_clk_set_parent(struct clk *c, struct clk *p)
val |= sel->value;
if (c->refcnt)
- clk_enable_locked(p);
+ clk_enable(p);
clk_writel(val, c->reg);
if (c->refcnt && c->parent)
- clk_disable_locked(c->parent);
+ clk_disable(c->parent);
clk_reparent(c, p);
return 0;
@@ -979,33 +1273,153 @@ static int tegra2_audio_sync_clk_set_parent(struct clk *c, struct clk *p)
return -EINVAL;
}
-static int tegra2_audio_sync_clk_set_rate(struct clk *c, unsigned long rate)
-{
- unsigned long parent_rate;
- if (!c->parent) {
- pr_err("%s: clock has no parent\n", __func__);
- return -EINVAL;
- }
- parent_rate = c->parent->rate;
- if (rate != parent_rate) {
- pr_err("%s: %s/%ld differs from parent %s/%ld\n",
- __func__,
- c->name, rate,
- c->parent->name, parent_rate);
- return -EINVAL;
- }
- c->rate = parent_rate;
- return 0;
-}
-
static struct clk_ops tegra_audio_sync_clk_ops = {
.init = tegra2_audio_sync_clk_init,
.enable = tegra2_audio_sync_clk_enable,
.disable = tegra2_audio_sync_clk_disable,
- .set_rate = tegra2_audio_sync_clk_set_rate,
.set_parent = tegra2_audio_sync_clk_set_parent,
};
+/* cdev1 and cdev2 (dap_mclk1 and dap_mclk2) ops */
+
+static void tegra2_cdev_clk_init(struct clk *c)
+{
+ /* We could un-tristate the cdev1 or cdev2 pingroup here; this is
+ * currently done in the pinmux code. */
+ c->state = ON;
+
+ BUG_ON(!c->u.periph.clk_num);
+
+ if (!(clk_readl(CLK_OUT_ENB + PERIPH_CLK_TO_ENB_REG(c)) &
+ PERIPH_CLK_TO_ENB_BIT(c)))
+ c->state = OFF;
+}
+
+static int tegra2_cdev_clk_enable(struct clk *c)
+{
+ BUG_ON(!c->u.periph.clk_num);
+
+ clk_writel(PERIPH_CLK_TO_ENB_BIT(c),
+ CLK_OUT_ENB_SET + PERIPH_CLK_TO_ENB_SET_REG(c));
+ return 0;
+}
+
+static void tegra2_cdev_clk_disable(struct clk *c)
+{
+ BUG_ON(!c->u.periph.clk_num);
+
+ clk_writel(PERIPH_CLK_TO_ENB_BIT(c),
+ CLK_OUT_ENB_CLR + PERIPH_CLK_TO_ENB_SET_REG(c));
+}
+
+static struct clk_ops tegra_cdev_clk_ops = {
+ .init = &tegra2_cdev_clk_init,
+ .enable = &tegra2_cdev_clk_enable,
+ .disable = &tegra2_cdev_clk_disable,
+};
+
+/* shared bus ops */
+/*
+ * Some clocks may have multiple downstream users that need to request a
+ * higher clock rate. Shared bus clocks provide a unique shared_bus_user
+ * clock to each user. The frequency of the bus is set to the highest
+ * enabled shared_bus_user clock, with a minimum value set by the
+ * shared bus.
+ */
+static int tegra_clk_shared_bus_update(struct clk *bus)
+{
+ struct clk *c;
+ unsigned long rate = bus->min_rate;
+
+ list_for_each_entry(c, &bus->shared_bus_list, u.shared_bus_user.node)
+ if (c->u.shared_bus_user.enabled)
+ rate = max(c->u.shared_bus_user.rate, rate);
+
+ if (rate == clk_get_rate_locked(bus))
+ return 0;
+
+ return clk_set_rate_locked(bus, rate);
+};
+
+static void tegra_clk_shared_bus_init(struct clk *c)
+{
+ unsigned long flags;
+
+ c->max_rate = c->parent->max_rate;
+ c->u.shared_bus_user.rate = c->parent->max_rate;
+ c->state = OFF;
+ c->set = true;
+
+ spin_lock_irqsave(&c->parent->spinlock, flags);
+
+ list_add_tail(&c->u.shared_bus_user.node,
+ &c->parent->shared_bus_list);
+
+ spin_unlock_irqrestore(&c->parent->spinlock, flags);
+}
+
+static int tegra_clk_shared_bus_set_rate(struct clk *c, unsigned long rate)
+{
+ unsigned long flags;
+ int ret;
+
+ rate = clk_round_rate(c->parent, rate);
+ if (rate < 0)
+ return rate;
+
+ spin_lock_irqsave(&c->parent->spinlock, flags);
+
+ c->u.shared_bus_user.rate = rate;
+ ret = tegra_clk_shared_bus_update(c->parent);
+
+ spin_unlock_irqrestore(&c->parent->spinlock, flags);
+
+ return ret;
+}
+
+static long tegra_clk_shared_bus_round_rate(struct clk *c, unsigned long rate)
+{
+ return clk_round_rate(c->parent, rate);
+}
+
+static int tegra_clk_shared_bus_enable(struct clk *c)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&c->parent->spinlock, flags);
+
+ c->u.shared_bus_user.enabled = true;
+ ret = tegra_clk_shared_bus_update(c->parent);
+
+ spin_unlock_irqrestore(&c->parent->spinlock, flags);
+
+ return ret;
+}
+
+static void tegra_clk_shared_bus_disable(struct clk *c)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&c->parent->spinlock, flags);
+
+ c->u.shared_bus_user.enabled = false;
+ ret = tegra_clk_shared_bus_update(c->parent);
+ WARN_ON_ONCE(ret);
+
+ spin_unlock_irqrestore(&c->parent->spinlock, flags);
+}
+
+static struct clk_ops tegra_clk_shared_bus_ops = {
+ .init = tegra_clk_shared_bus_init,
+ .enable = tegra_clk_shared_bus_enable,
+ .disable = tegra_clk_shared_bus_disable,
+ .set_rate = tegra_clk_shared_bus_set_rate,
+ .round_rate = tegra_clk_shared_bus_round_rate,
+};
+
+
/* Clock definitions */
static struct clk tegra_clk_32k = {
.name = "clk_32k",
@@ -1014,7 +1428,7 @@ static struct clk tegra_clk_32k = {
.max_rate = 32768,
};
-static struct clk_pll_table tegra_pll_s_table[] = {
+static struct clk_pll_freq_table tegra_pll_s_freq_table[] = {
{32768, 12000000, 366, 1, 1, 0},
{32768, 13000000, 397, 1, 1, 0},
{32768, 19200000, 586, 1, 1, 0},
@@ -1026,16 +1440,19 @@ static struct clk tegra_pll_s = {
.name = "pll_s",
.flags = PLL_ALT_MISC_REG,
.ops = &tegra_pll_ops,
- .reg = 0xf0,
- .input_min = 32768,
- .input_max = 32768,
.parent = &tegra_clk_32k,
- .cf_min = 0, /* FIXME */
- .cf_max = 0, /* FIXME */
- .vco_min = 12000000,
- .vco_max = 26000000,
- .pll_table = tegra_pll_s_table,
.max_rate = 26000000,
+ .reg = 0xf0,
+ .u.pll = {
+ .input_min = 32768,
+ .input_max = 32768,
+ .cf_min = 0, /* FIXME */
+ .cf_max = 0, /* FIXME */
+ .vco_min = 12000000,
+ .vco_max = 26000000,
+ .freq_table = tegra_pll_s_freq_table,
+ .lock_delay = 300,
+ },
};
static struct clk_mux_sel tegra_clk_m_sel[] = {
@@ -1043,18 +1460,18 @@ static struct clk_mux_sel tegra_clk_m_sel[] = {
{ .input = &tegra_pll_s, .value = 1},
{ 0, 0},
};
+
static struct clk tegra_clk_m = {
.name = "clk_m",
.flags = ENABLE_ON_INIT,
.ops = &tegra_clk_m_ops,
.inputs = tegra_clk_m_sel,
.reg = 0x1fc,
- .reg_mask = (1<<28),
.reg_shift = 28,
.max_rate = 26000000,
};
-static struct clk_pll_table tegra_pll_c_table[] = {
+static struct clk_pll_freq_table tegra_pll_c_freq_table[] = {
{ 0, 0, 0, 0, 0, 0 },
};
@@ -1063,15 +1480,18 @@ static struct clk tegra_pll_c = {
.flags = PLL_HAS_CPCON,
.ops = &tegra_pll_ops,
.reg = 0x80,
- .input_min = 2000000,
- .input_max = 31000000,
.parent = &tegra_clk_m,
- .cf_min = 1000000,
- .cf_max = 6000000,
- .vco_min = 20000000,
- .vco_max = 1400000000,
- .pll_table = tegra_pll_c_table,
.max_rate = 600000000,
+ .u.pll = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1400000000,
+ .freq_table = tegra_pll_c_freq_table,
+ .lock_delay = 300,
+ },
};
static struct clk tegra_pll_c_out1 = {
@@ -1084,7 +1504,7 @@ static struct clk tegra_pll_c_out1 = {
.max_rate = 600000000,
};
-static struct clk_pll_table tegra_pll_m_table[] = {
+static struct clk_pll_freq_table tegra_pll_m_freq_table[] = {
{ 12000000, 666000000, 666, 12, 1, 8},
{ 13000000, 666000000, 666, 13, 1, 8},
{ 19200000, 666000000, 555, 16, 1, 8},
@@ -1101,15 +1521,18 @@ static struct clk tegra_pll_m = {
.flags = PLL_HAS_CPCON,
.ops = &tegra_pll_ops,
.reg = 0x90,
- .input_min = 2000000,
- .input_max = 31000000,
.parent = &tegra_clk_m,
- .cf_min = 1000000,
- .cf_max = 6000000,
- .vco_min = 20000000,
- .vco_max = 1200000000,
- .pll_table = tegra_pll_m_table,
.max_rate = 800000000,
+ .u.pll = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1200000000,
+ .freq_table = tegra_pll_m_freq_table,
+ .lock_delay = 300,
+ },
};
static struct clk tegra_pll_m_out1 = {
@@ -1122,7 +1545,7 @@ static struct clk tegra_pll_m_out1 = {
.max_rate = 600000000,
};
-static struct clk_pll_table tegra_pll_p_table[] = {
+static struct clk_pll_freq_table tegra_pll_p_freq_table[] = {
{ 12000000, 216000000, 432, 12, 2, 8},
{ 13000000, 216000000, 432, 13, 2, 8},
{ 19200000, 216000000, 90, 4, 2, 1},
@@ -1139,15 +1562,18 @@ static struct clk tegra_pll_p = {
.flags = ENABLE_ON_INIT | PLL_FIXED | PLL_HAS_CPCON,
.ops = &tegra_pll_ops,
.reg = 0xa0,
- .input_min = 2000000,
- .input_max = 31000000,
.parent = &tegra_clk_m,
- .cf_min = 1000000,
- .cf_max = 6000000,
- .vco_min = 20000000,
- .vco_max = 1400000000,
- .pll_table = tegra_pll_p_table,
.max_rate = 432000000,
+ .u.pll = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1400000000,
+ .freq_table = tegra_pll_p_freq_table,
+ .lock_delay = 300,
+ },
};
static struct clk tegra_pll_p_out1 = {
@@ -1190,11 +1616,9 @@ static struct clk tegra_pll_p_out4 = {
.max_rate = 432000000,
};
-static struct clk_pll_table tegra_pll_a_table[] = {
+static struct clk_pll_freq_table tegra_pll_a_freq_table[] = {
{ 28800000, 56448000, 49, 25, 1, 1},
{ 28800000, 73728000, 64, 25, 1, 1},
- { 28800000, 11289600, 49, 25, 1, 1},
- { 28800000, 12288000, 64, 25, 1, 1},
{ 28800000, 24000000, 5, 6, 1, 1},
{ 0, 0, 0, 0, 0, 0 },
};
@@ -1204,15 +1628,18 @@ static struct clk tegra_pll_a = {
.flags = PLL_HAS_CPCON,
.ops = &tegra_pll_ops,
.reg = 0xb0,
- .input_min = 2000000,
- .input_max = 31000000,
.parent = &tegra_pll_p_out1,
- .cf_min = 1000000,
- .cf_max = 6000000,
- .vco_min = 20000000,
- .vco_max = 1400000000,
- .pll_table = tegra_pll_a_table,
- .max_rate = 56448000,
+ .max_rate = 73728000,
+ .u.pll = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1400000000,
+ .freq_table = tegra_pll_a_freq_table,
+ .lock_delay = 300,
+ },
};
static struct clk tegra_pll_a_out0 = {
@@ -1222,14 +1649,25 @@ static struct clk tegra_pll_a_out0 = {
.parent = &tegra_pll_a,
.reg = 0xb4,
.reg_shift = 0,
- .max_rate = 56448000,
+ .max_rate = 73728000,
};
-static struct clk_pll_table tegra_pll_d_table[] = {
+static struct clk_pll_freq_table tegra_pll_d_freq_table[] = {
+ { 12000000, 216000000, 216, 12, 1, 4},
+ { 13000000, 216000000, 216, 13, 1, 4},
+ { 19200000, 216000000, 135, 12, 1, 3},
+ { 26000000, 216000000, 216, 26, 1, 4},
+
+ { 12000000, 594000000, 594, 12, 1, 8},
+ { 13000000, 594000000, 594, 13, 1, 8},
+ { 19200000, 594000000, 495, 16, 1, 8},
+ { 26000000, 594000000, 594, 26, 1, 8},
+
{ 12000000, 1000000000, 1000, 12, 1, 12},
{ 13000000, 1000000000, 1000, 13, 1, 12},
{ 19200000, 1000000000, 625, 12, 1, 8},
{ 26000000, 1000000000, 1000, 26, 1, 12},
+
{ 0, 0, 0, 0, 0, 0 },
};
@@ -1238,15 +1676,18 @@ static struct clk tegra_pll_d = {
.flags = PLL_HAS_CPCON | PLLD,
.ops = &tegra_pll_ops,
.reg = 0xd0,
- .input_min = 2000000,
- .input_max = 40000000,
.parent = &tegra_clk_m,
- .cf_min = 1000000,
- .cf_max = 6000000,
- .vco_min = 40000000,
- .vco_max = 1000000000,
- .pll_table = tegra_pll_d_table,
.max_rate = 1000000000,
+ .u.pll = {
+ .input_min = 2000000,
+ .input_max = 40000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 40000000,
+ .vco_max = 1000000000,
+ .freq_table = tegra_pll_d_freq_table,
+ .lock_delay = 1000,
+ },
};
static struct clk tegra_pll_d_out0 = {
@@ -1257,7 +1698,7 @@ static struct clk tegra_pll_d_out0 = {
.max_rate = 500000000,
};
-static struct clk_pll_table tegra_pll_u_table[] = {
+static struct clk_pll_freq_table tegra_pll_u_freq_table[] = {
{ 12000000, 480000000, 960, 12, 2, 0},
{ 13000000, 480000000, 960, 13, 2, 0},
{ 19200000, 480000000, 200, 4, 2, 0},
@@ -1270,18 +1711,21 @@ static struct clk tegra_pll_u = {
.flags = PLLU,
.ops = &tegra_pll_ops,
.reg = 0xc0,
- .input_min = 2000000,
- .input_max = 40000000,
.parent = &tegra_clk_m,
- .cf_min = 1000000,
- .cf_max = 6000000,
- .vco_min = 480000000,
- .vco_max = 960000000,
- .pll_table = tegra_pll_u_table,
.max_rate = 480000000,
-};
-
-static struct clk_pll_table tegra_pll_x_table[] = {
+ .u.pll = {
+ .input_min = 2000000,
+ .input_max = 40000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 480000000,
+ .vco_max = 960000000,
+ .freq_table = tegra_pll_u_freq_table,
+ .lock_delay = 1000,
+ },
+};
+
+static struct clk_pll_freq_table tegra_pll_x_freq_table[] = {
/* 1 GHz */
{ 12000000, 1000000000, 1000, 12, 1, 12},
{ 13000000, 1000000000, 1000, 13, 1, 12},
@@ -1307,10 +1751,10 @@ static struct clk_pll_table tegra_pll_x_table[] = {
{ 26000000, 760000000, 760, 26, 1, 12},
/* 608 MHz */
- { 12000000, 608000000, 760, 12, 1, 12},
- { 13000000, 608000000, 760, 13, 1, 12},
+ { 12000000, 608000000, 608, 12, 1, 12},
+ { 13000000, 608000000, 608, 13, 1, 12},
{ 19200000, 608000000, 380, 12, 1, 8},
- { 26000000, 608000000, 760, 26, 1, 12},
+ { 26000000, 608000000, 608, 26, 1, 12},
/* 456 MHz */
{ 12000000, 456000000, 456, 12, 1, 12},
@@ -1332,18 +1776,21 @@ static struct clk tegra_pll_x = {
.flags = PLL_HAS_CPCON | PLL_ALT_MISC_REG,
.ops = &tegra_pllx_ops,
.reg = 0xe0,
- .input_min = 2000000,
- .input_max = 31000000,
.parent = &tegra_clk_m,
- .cf_min = 1000000,
- .cf_max = 6000000,
- .vco_min = 20000000,
- .vco_max = 1200000000,
- .pll_table = tegra_pll_x_table,
.max_rate = 1000000000,
-};
-
-static struct clk_pll_table tegra_pll_e_table[] = {
+ .u.pll = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1200000000,
+ .freq_table = tegra_pll_x_freq_table,
+ .lock_delay = 300,
+ },
+};
+
+static struct clk_pll_freq_table tegra_pll_e_freq_table[] = {
{ 12000000, 100000000, 200, 24, 1, 0 },
{ 0, 0, 0, 0, 0, 0 },
};
@@ -1352,23 +1799,49 @@ static struct clk tegra_pll_e = {
.name = "pll_e",
.flags = PLL_ALT_MISC_REG,
.ops = &tegra_plle_ops,
- .input_min = 12000000,
- .input_max = 12000000,
- .max_rate = 100000000,
.parent = &tegra_clk_m,
.reg = 0xe8,
- .pll_table = tegra_pll_e_table,
+ .max_rate = 100000000,
+ .u.pll = {
+ .input_min = 12000000,
+ .input_max = 12000000,
+ .freq_table = tegra_pll_e_freq_table,
+ },
};
static struct clk tegra_clk_d = {
.name = "clk_d",
.flags = PERIPH_NO_RESET,
.ops = &tegra_clk_double_ops,
- .clk_num = 90,
.reg = 0x34,
.reg_shift = 12,
.parent = &tegra_clk_m,
.max_rate = 52000000,
+ .u.periph = {
+ .clk_num = 90,
+ },
+};
+
+/* dap_mclk1, belongs to the cdev1 pingroup. */
+static struct clk tegra_clk_cdev1 = {
+ .name = "cdev1",
+ .ops = &tegra_cdev_clk_ops,
+ .rate = 26000000,
+ .max_rate = 26000000,
+ .u.periph = {
+ .clk_num = 94,
+ },
+};
+
+/* dap_mclk2, belongs to the cdev2 pingroup. */
+static struct clk tegra_clk_cdev2 = {
+ .name = "cdev2",
+ .ops = &tegra_cdev_clk_ops,
+ .rate = 26000000,
+ .max_rate = 26000000,
+ .u.periph = {
+ .clk_num = 93,
+ },
};
/* initialized before peripheral clocks */
@@ -1394,7 +1867,7 @@ static struct clk tegra_clk_audio = {
.name = "audio",
.inputs = mux_audio_sync_clk,
.reg = 0x38,
- .max_rate = 24000000,
+ .max_rate = 73728000,
.ops = &tegra_audio_sync_clk_ops
};
@@ -1403,10 +1876,12 @@ static struct clk tegra_clk_audio_2x = {
.flags = PERIPH_NO_RESET,
.max_rate = 48000000,
.ops = &tegra_clk_double_ops,
- .clk_num = 89,
.reg = 0x34,
.reg_shift = 8,
.parent = &tegra_clk_audio,
+ .u.periph = {
+ .clk_num = 89,
+ },
};
struct clk_lookup tegra_audio_clk_lookups[] = {
@@ -1478,17 +1953,26 @@ static struct clk tegra_clk_sclk = {
.inputs = mux_sclk,
.reg = 0x28,
.ops = &tegra_super_ops,
- .max_rate = 600000000,
+ .max_rate = 240000000,
+ .min_rate = 120000000,
};
static struct clk tegra_clk_virtual_cpu = {
.name = "cpu",
.parent = &tegra_clk_cclk,
- .main = &tegra_pll_x,
- .backup = &tegra_clk_m,
.ops = &tegra_cpu_ops,
.max_rate = 1000000000,
- .dvfs = &tegra_dvfs_virtual_cpu_dvfs,
+ .u.cpu = {
+ .main = &tegra_pll_x,
+ .backup = &tegra_pll_p,
+ },
+};
+
+static struct clk tegra_clk_cop = {
+ .name = "cop",
+ .parent = &tegra_clk_sclk,
+ .ops = &tegra_cop_ops,
+ .max_rate = 240000000,
};
static struct clk tegra_clk_hclk = {
@@ -1508,7 +1992,15 @@ static struct clk tegra_clk_pclk = {
.reg = 0x30,
.reg_shift = 0,
.ops = &tegra_bus_ops,
- .max_rate = 108000000,
+ .max_rate = 120000000,
+};
+
+static struct clk tegra_clk_blink = {
+ .name = "blink",
+ .parent = &tegra_clk_32k,
+ .reg = 0x40,
+ .ops = &tegra_blink_clk_ops,
+ .max_rate = 32768,
};
static struct clk_mux_sel mux_pllm_pllc_pllp_plla[] = {
@@ -1587,6 +2079,23 @@ static struct clk_mux_sel mux_clk_32k[] = {
{ 0, 0},
};
+static struct clk_mux_sel mux_pclk[] = {
+ { .input = &tegra_clk_pclk, .value = 0},
+ { 0, 0},
+};
+
+static struct clk tegra_clk_emc = {
+ .name = "emc",
+ .ops = &tegra_emc_clk_ops,
+ .reg = 0x19c,
+ .max_rate = 800000000,
+ .inputs = mux_pllm_pllc_pllp_clkm,
+ .flags = MUX | DIV_U71 | PERIPH_EMC_ENB,
+ .u.periph = {
+ .clk_num = 57,
+ },
+};
+
#define PERIPH_CLK(_name, _dev, _con, _clk_num, _reg, _max, _inputs, _flags) \
{ \
.name = _name, \
@@ -1595,19 +2104,32 @@ static struct clk_mux_sel mux_clk_32k[] = {
.con_id = _con, \
}, \
.ops = &tegra_periph_clk_ops, \
- .clk_num = _clk_num, \
.reg = _reg, \
.inputs = _inputs, \
.flags = _flags, \
.max_rate = _max, \
+ .u.periph = { \
+ .clk_num = _clk_num, \
+ }, \
+ }
+
+#define SHARED_CLK(_name, _dev, _con, _parent) \
+ { \
+ .name = _name, \
+ .lookup = { \
+ .dev_id = _dev, \
+ .con_id = _con, \
+ }, \
+ .ops = &tegra_clk_shared_bus_ops, \
+ .parent = _parent, \
}
-struct clk tegra_periph_clks[] = {
+struct clk tegra_list_clks[] = {
+ PERIPH_CLK("apbdma", "tegra-dma", NULL, 34, 0, 108000000, mux_pclk, 0),
PERIPH_CLK("rtc", "rtc-tegra", NULL, 4, 0, 32768, mux_clk_32k, PERIPH_NO_RESET),
PERIPH_CLK("timer", "timer", NULL, 5, 0, 26000000, mux_clk_m, 0),
- PERIPH_CLK("i2s1", "i2s.0", NULL, 11, 0x100, 26000000, mux_pllaout0_audio2x_pllp_clkm, MUX | DIV_U71),
- PERIPH_CLK("i2s2", "i2s.1", NULL, 18, 0x104, 26000000, mux_pllaout0_audio2x_pllp_clkm, MUX | DIV_U71),
- /* FIXME: spdif has 2 clocks but 1 enable */
+ PERIPH_CLK("i2s1", "tegra-i2s.0", NULL, 11, 0x100, 26000000, mux_pllaout0_audio2x_pllp_clkm, MUX | DIV_U71),
+ PERIPH_CLK("i2s2", "tegra-i2s.1", NULL, 18, 0x104, 26000000, mux_pllaout0_audio2x_pllp_clkm, MUX | DIV_U71),
PERIPH_CLK("spdif_out", "spdif_out", NULL, 10, 0x108, 100000000, mux_pllaout0_audio2x_pllp_clkm, MUX | DIV_U71),
PERIPH_CLK("spdif_in", "spdif_in", NULL, 10, 0x10c, 100000000, mux_pllp_pllc_pllm, MUX | DIV_U71),
PERIPH_CLK("pwm", "pwm", NULL, 17, 0x110, 432000000, mux_pllp_pllc_audio_clkm_clk32, MUX | DIV_U71),
@@ -1620,13 +2142,15 @@ struct clk tegra_periph_clks[] = {
PERIPH_CLK("sbc4", "spi_tegra.3", NULL, 68, 0x1b4, 160000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
PERIPH_CLK("ide", "ide", NULL, 25, 0x144, 100000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* requires min voltage */
PERIPH_CLK("ndflash", "tegra_nand", NULL, 13, 0x160, 164000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
- /* FIXME: vfir shares an enable with uartb */
PERIPH_CLK("vfir", "vfir", NULL, 7, 0x168, 72000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
PERIPH_CLK("sdmmc1", "sdhci-tegra.0", NULL, 14, 0x150, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
PERIPH_CLK("sdmmc2", "sdhci-tegra.1", NULL, 9, 0x154, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
PERIPH_CLK("sdmmc3", "sdhci-tegra.2", NULL, 69, 0x1bc, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
- PERIPH_CLK("sdmmc4", "sdhci-tegra.3", NULL, 15, 0x160, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
- PERIPH_CLK("vde", "vde", NULL, 61, 0x1c8, 250000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage and process_id */
+ PERIPH_CLK("sdmmc4", "sdhci-tegra.3", NULL, 15, 0x164, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
+ PERIPH_CLK("vcp", "tegra-avp", "vcp", 29, 0, 250000000, mux_clk_m, 0),
+ PERIPH_CLK("bsea", "tegra-avp", "bsea", 62, 0, 250000000, mux_clk_m, 0),
+ PERIPH_CLK("bsev", "tegra-aes", "bsev", 63, 0, 250000000, mux_clk_m, 0),
+ PERIPH_CLK("vde", "tegra-avp", "vde", 61, 0x1c8, 250000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage and process_id */
PERIPH_CLK("csite", "csite", NULL, 73, 0x1d4, 144000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* max rate ??? */
/* FIXME: what is la? */
PERIPH_CLK("la", "la", NULL, 76, 0x1f8, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
@@ -1641,37 +2165,46 @@ struct clk tegra_periph_clks[] = {
PERIPH_CLK("i2c2_i2c", "tegra-i2c.1", "i2c", 0, 0, 72000000, mux_pllp_out3, 0),
PERIPH_CLK("i2c3_i2c", "tegra-i2c.2", "i2c", 0, 0, 72000000, mux_pllp_out3, 0),
PERIPH_CLK("dvc_i2c", "tegra-i2c.3", "i2c", 0, 0, 72000000, mux_pllp_out3, 0),
- PERIPH_CLK("uarta", "uart.0", NULL, 6, 0x178, 216000000, mux_pllp_pllc_pllm_clkm, MUX),
- PERIPH_CLK("uartb", "uart.1", NULL, 7, 0x17c, 216000000, mux_pllp_pllc_pllm_clkm, MUX),
- PERIPH_CLK("uartc", "uart.2", NULL, 55, 0x1a0, 216000000, mux_pllp_pllc_pllm_clkm, MUX),
- PERIPH_CLK("uartd", "uart.3", NULL, 65, 0x1c0, 216000000, mux_pllp_pllc_pllm_clkm, MUX),
- PERIPH_CLK("uarte", "uart.4", NULL, 66, 0x1c4, 216000000, mux_pllp_pllc_pllm_clkm, MUX),
+ PERIPH_CLK("uarta", "uart.0", NULL, 6, 0x178, 600000000, mux_pllp_pllc_pllm_clkm, MUX),
+ PERIPH_CLK("uartb", "uart.1", NULL, 7, 0x17c, 600000000, mux_pllp_pllc_pllm_clkm, MUX),
+ PERIPH_CLK("uartc", "uart.2", NULL, 55, 0x1a0, 600000000, mux_pllp_pllc_pllm_clkm, MUX),
+ PERIPH_CLK("uartd", "uart.3", NULL, 65, 0x1c0, 600000000, mux_pllp_pllc_pllm_clkm, MUX),
+ PERIPH_CLK("uarte", "uart.4", NULL, 66, 0x1c4, 600000000, mux_pllp_pllc_pllm_clkm, MUX),
PERIPH_CLK("3d", "3d", NULL, 24, 0x158, 300000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | PERIPH_MANUAL_RESET), /* scales with voltage and process_id */
PERIPH_CLK("2d", "2d", NULL, 21, 0x15c, 300000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */
- /* FIXME: vi and vi_sensor share an enable */
- PERIPH_CLK("vi", "vi", NULL, 20, 0x148, 150000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */
- PERIPH_CLK("vi_sensor", "vi_sensor", NULL, 20, 0x1a8, 150000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | PERIPH_NO_RESET), /* scales with voltage and process_id */
+ PERIPH_CLK("vi", "tegra_camera", "vi", 20, 0x148, 150000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */
+ PERIPH_CLK("vi_sensor", "tegra_camera", "vi_sensor", 20, 0x1a8, 150000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | PERIPH_NO_RESET), /* scales with voltage and process_id */
PERIPH_CLK("epp", "epp", NULL, 19, 0x16c, 300000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */
PERIPH_CLK("mpe", "mpe", NULL, 60, 0x170, 250000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */
PERIPH_CLK("host1x", "host1x", NULL, 28, 0x180, 166000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */
- /* FIXME: cve and tvo share an enable */
PERIPH_CLK("cve", "cve", NULL, 49, 0x140, 250000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */
PERIPH_CLK("tvo", "tvo", NULL, 49, 0x188, 250000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */
- PERIPH_CLK("hdmi", "hdmi", NULL, 51, 0x18c, 148500000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */
+ PERIPH_CLK("hdmi", "hdmi", NULL, 51, 0x18c, 600000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */
PERIPH_CLK("tvdac", "tvdac", NULL, 53, 0x194, 250000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */
- PERIPH_CLK("disp1", "tegrafb.0", NULL, 27, 0x138, 190000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* scales with voltage and process_id */
- PERIPH_CLK("disp2", "tegrafb.1", NULL, 26, 0x13c, 190000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* scales with voltage and process_id */
+ PERIPH_CLK("disp1", "tegradc.0", NULL, 27, 0x138, 600000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* scales with voltage and process_id */
+ PERIPH_CLK("disp2", "tegradc.1", NULL, 26, 0x13c, 600000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* scales with voltage and process_id */
PERIPH_CLK("usbd", "fsl-tegra-udc", NULL, 22, 0, 480000000, mux_clk_m, 0), /* requires min voltage */
PERIPH_CLK("usb2", "tegra-ehci.1", NULL, 58, 0, 480000000, mux_clk_m, 0), /* requires min voltage */
PERIPH_CLK("usb3", "tegra-ehci.2", NULL, 59, 0, 480000000, mux_clk_m, 0), /* requires min voltage */
- PERIPH_CLK("emc", "emc", NULL, 57, 0x19c, 800000000, mux_pllm_pllc_pllp_clkm, MUX | DIV_U71 | PERIPH_EMC_ENB),
PERIPH_CLK("dsi", "dsi", NULL, 48, 0, 500000000, mux_plld, 0), /* scales with voltage */
- PERIPH_CLK("csi", "csi", NULL, 52, 0, 72000000, mux_pllp_out3, 0),
- PERIPH_CLK("isp", "isp", NULL, 23, 0, 150000000, mux_clk_m, 0), /* same frequency as VI */
- PERIPH_CLK("csus", "csus", NULL, 92, 0, 150000000, mux_clk_m, PERIPH_NO_RESET),
+ PERIPH_CLK("csi", "tegra_camera", "csi", 52, 0, 72000000, mux_pllp_out3, 0),
+ PERIPH_CLK("isp", "tegra_camera", "isp", 23, 0, 150000000, mux_clk_m, 0), /* same frequency as VI */
+ PERIPH_CLK("csus", "tegra_camera", "csus", 92, 0, 150000000, mux_clk_m, PERIPH_NO_RESET),
PERIPH_CLK("pex", NULL, "pex", 70, 0, 26000000, mux_clk_m, PERIPH_MANUAL_RESET),
PERIPH_CLK("afi", NULL, "afi", 72, 0, 26000000, mux_clk_m, PERIPH_MANUAL_RESET),
PERIPH_CLK("pcie_xclk", NULL, "pcie_xclk", 74, 0, 26000000, mux_clk_m, PERIPH_MANUAL_RESET),
+
+ SHARED_CLK("avp.sclk", "tegra-avp", "sclk", &tegra_clk_sclk),
+ SHARED_CLK("avp.emc", "tegra-avp", "emc", &tegra_clk_emc),
+ SHARED_CLK("cpu.emc", "cpu", "emc", &tegra_clk_emc),
+ SHARED_CLK("disp1.emc", "tegradc.0", "emc", &tegra_clk_emc),
+ SHARED_CLK("disp2.emc", "tegradc.1", "emc", &tegra_clk_emc),
+ SHARED_CLK("hdmi.emc", "hdmi", "emc", &tegra_clk_emc),
+ SHARED_CLK("host.emc", "tegra_grhost", "emc", &tegra_clk_emc),
+ SHARED_CLK("usbd.emc", "fsl-tegra-udc", "emc", &tegra_clk_emc),
+ SHARED_CLK("usb1.emc", "tegra-ehci.0", "emc", &tegra_clk_emc),
+ SHARED_CLK("usb2.emc", "tegra-ehci.1", "emc", &tegra_clk_emc),
+ SHARED_CLK("usb3.emc", "tegra-ehci.2", "emc", &tegra_clk_emc),
};
#define CLK_DUPLICATE(_name, _dev, _con) \
@@ -1693,9 +2226,22 @@ struct clk_duplicate tegra_clk_duplicates[] = {
CLK_DUPLICATE("uartc", "tegra_uart.2", NULL),
CLK_DUPLICATE("uartd", "tegra_uart.3", NULL),
CLK_DUPLICATE("uarte", "tegra_uart.4", NULL),
- CLK_DUPLICATE("host1x", "tegrafb.0", "host1x"),
- CLK_DUPLICATE("host1x", "tegrafb.1", "host1x"),
+ CLK_DUPLICATE("usbd", "utmip-pad", NULL),
CLK_DUPLICATE("usbd", "tegra-ehci.0", NULL),
+ CLK_DUPLICATE("usbd", "tegra-otg", NULL),
+ CLK_DUPLICATE("hdmi", "tegradc.0", "hdmi"),
+ CLK_DUPLICATE("hdmi", "tegradc.1", "hdmi"),
+ CLK_DUPLICATE("pwm", "tegra_pwm.0", NULL),
+ CLK_DUPLICATE("pwm", "tegra_pwm.1", NULL),
+ CLK_DUPLICATE("pwm", "tegra_pwm.2", NULL),
+ CLK_DUPLICATE("pwm", "tegra_pwm.3", NULL),
+ CLK_DUPLICATE("host1x", "tegra_grhost", "host1x"),
+ CLK_DUPLICATE("2d", "tegra_grhost", "gr2d"),
+ CLK_DUPLICATE("3d", "tegra_grhost", "gr3d"),
+ CLK_DUPLICATE("epp", "tegra_grhost", "epp"),
+ CLK_DUPLICATE("mpe", "tegra_grhost", "mpe"),
+ CLK_DUPLICATE("cop", "tegra-avp", "cop"),
+ CLK_DUPLICATE("vde", "tegra-aes", "vde"),
};
#define CLK(dev, con, ck) \
@@ -1705,68 +2251,70 @@ struct clk_duplicate tegra_clk_duplicates[] = {
.clk = ck, \
}
-struct clk_lookup tegra_clk_lookups[] = {
- /* external root sources */
- CLK(NULL, "32k_clk", &tegra_clk_32k),
- CLK(NULL, "pll_s", &tegra_pll_s),
- CLK(NULL, "clk_m", &tegra_clk_m),
- CLK(NULL, "pll_m", &tegra_pll_m),
- CLK(NULL, "pll_m_out1", &tegra_pll_m_out1),
- CLK(NULL, "pll_c", &tegra_pll_c),
- CLK(NULL, "pll_c_out1", &tegra_pll_c_out1),
- CLK(NULL, "pll_p", &tegra_pll_p),
- CLK(NULL, "pll_p_out1", &tegra_pll_p_out1),
- CLK(NULL, "pll_p_out2", &tegra_pll_p_out2),
- CLK(NULL, "pll_p_out3", &tegra_pll_p_out3),
- CLK(NULL, "pll_p_out4", &tegra_pll_p_out4),
- CLK(NULL, "pll_a", &tegra_pll_a),
- CLK(NULL, "pll_a_out0", &tegra_pll_a_out0),
- CLK(NULL, "pll_d", &tegra_pll_d),
- CLK(NULL, "pll_d_out0", &tegra_pll_d_out0),
- CLK(NULL, "pll_u", &tegra_pll_u),
- CLK(NULL, "pll_x", &tegra_pll_x),
- CLK(NULL, "pll_e", &tegra_pll_e),
- CLK(NULL, "cclk", &tegra_clk_cclk),
- CLK(NULL, "sclk", &tegra_clk_sclk),
- CLK(NULL, "hclk", &tegra_clk_hclk),
- CLK(NULL, "pclk", &tegra_clk_pclk),
- CLK(NULL, "clk_d", &tegra_clk_d),
- CLK(NULL, "cpu", &tegra_clk_virtual_cpu),
-};
+struct clk *tegra_ptr_clks[] = {
+ &tegra_clk_32k,
+ &tegra_pll_s,
+ &tegra_clk_m,
+ &tegra_pll_m,
+ &tegra_pll_m_out1,
+ &tegra_pll_c,
+ &tegra_pll_c_out1,
+ &tegra_pll_p,
+ &tegra_pll_p_out1,
+ &tegra_pll_p_out2,
+ &tegra_pll_p_out3,
+ &tegra_pll_p_out4,
+ &tegra_pll_a,
+ &tegra_pll_a_out0,
+ &tegra_pll_d,
+ &tegra_pll_d_out0,
+ &tegra_pll_u,
+ &tegra_pll_x,
+ &tegra_pll_e,
+ &tegra_clk_cclk,
+ &tegra_clk_sclk,
+ &tegra_clk_hclk,
+ &tegra_clk_pclk,
+ &tegra_clk_d,
+ &tegra_clk_cdev1,
+ &tegra_clk_cdev2,
+ &tegra_clk_virtual_cpu,
+ &tegra_clk_blink,
+ &tegra_clk_cop,
+ &tegra_clk_emc,
+};
+
+static void tegra2_init_one_clock(struct clk *c)
+{
+ clk_init(c);
+ INIT_LIST_HEAD(&c->shared_bus_list);
+ if (!c->lookup.dev_id && !c->lookup.con_id)
+ c->lookup.con_id = c->name;
+ c->lookup.clk = c;
+ clkdev_add(&c->lookup);
+}
void __init tegra2_init_clocks(void)
{
int i;
- struct clk_lookup *cl;
struct clk *c;
- struct clk_duplicate *cd;
-
- for (i = 0; i < ARRAY_SIZE(tegra_clk_lookups); i++) {
- cl = &tegra_clk_lookups[i];
- clk_init(cl->clk);
- clkdev_add(cl);
- }
- for (i = 0; i < ARRAY_SIZE(tegra_periph_clks); i++) {
- c = &tegra_periph_clks[i];
- cl = &c->lookup;
- cl->clk = c;
+ for (i = 0; i < ARRAY_SIZE(tegra_ptr_clks); i++)
+ tegra2_init_one_clock(tegra_ptr_clks[i]);
- clk_init(cl->clk);
- clkdev_add(cl);
- }
+ for (i = 0; i < ARRAY_SIZE(tegra_list_clks); i++)
+ tegra2_init_one_clock(&tegra_list_clks[i]);
for (i = 0; i < ARRAY_SIZE(tegra_clk_duplicates); i++) {
- cd = &tegra_clk_duplicates[i];
- c = tegra_get_clock_by_name(cd->name);
- if (c) {
- cl = &cd->lookup;
- cl->clk = c;
- clkdev_add(cl);
- } else {
+ c = tegra_get_clock_by_name(tegra_clk_duplicates[i].name);
+ if (!c) {
pr_err("%s: Unknown duplicate clock %s\n", __func__,
- cd->name);
+ tegra_clk_duplicates[i].name);
+ continue;
}
+
+ tegra_clk_duplicates[i].lookup.clk = c;
+ clkdev_add(&tegra_clk_duplicates[i].lookup);
}
init_audio_sync_clock_mux();
@@ -1774,7 +2322,7 @@ void __init tegra2_init_clocks(void)
#ifdef CONFIG_PM
static u32 clk_rst_suspend[RST_DEVICES_NUM + CLK_OUT_ENB_NUM +
- PERIPH_CLK_SOURCE_NUM + 3];
+ PERIPH_CLK_SOURCE_NUM + 22];
void tegra_clk_suspend(void)
{
@@ -1782,6 +2330,29 @@ void tegra_clk_suspend(void)
u32 *ctx = clk_rst_suspend;
*ctx++ = clk_readl(OSC_CTRL) & OSC_CTRL_MASK;
+ *ctx++ = clk_readl(tegra_pll_c.reg + PLL_BASE);
+ *ctx++ = clk_readl(tegra_pll_c.reg + PLL_MISC(&tegra_pll_c));
+ *ctx++ = clk_readl(tegra_pll_a.reg + PLL_BASE);
+ *ctx++ = clk_readl(tegra_pll_a.reg + PLL_MISC(&tegra_pll_a));
+ *ctx++ = clk_readl(tegra_pll_s.reg + PLL_BASE);
+ *ctx++ = clk_readl(tegra_pll_s.reg + PLL_MISC(&tegra_pll_s));
+ *ctx++ = clk_readl(tegra_pll_d.reg + PLL_BASE);
+ *ctx++ = clk_readl(tegra_pll_d.reg + PLL_MISC(&tegra_pll_d));
+ *ctx++ = clk_readl(tegra_pll_u.reg + PLL_BASE);
+ *ctx++ = clk_readl(tegra_pll_u.reg + PLL_MISC(&tegra_pll_u));
+
+ *ctx++ = clk_readl(tegra_pll_m_out1.reg);
+ *ctx++ = clk_readl(tegra_pll_a_out0.reg);
+ *ctx++ = clk_readl(tegra_pll_c_out1.reg);
+
+ *ctx++ = clk_readl(tegra_clk_cclk.reg);
+ *ctx++ = clk_readl(tegra_clk_cclk.reg + SUPER_CLK_DIVIDER);
+
+ *ctx++ = clk_readl(tegra_clk_sclk.reg);
+ *ctx++ = clk_readl(tegra_clk_sclk.reg + SUPER_CLK_DIVIDER);
+ *ctx++ = clk_readl(tegra_clk_pclk.reg);
+
+ *ctx++ = clk_readl(tegra_clk_audio.reg);
for (off = PERIPH_CLK_SOURCE_I2S1; off <= PERIPH_CLK_SOURCE_OSC;
off += 4) {
@@ -1800,6 +2371,8 @@ void tegra_clk_suspend(void)
*ctx++ = clk_readl(MISC_CLK_ENB);
*ctx++ = clk_readl(CLK_MASK_ARM);
+
+ BUG_ON(ctx - clk_rst_suspend != ARRAY_SIZE(clk_rst_suspend));
}
void tegra_clk_resume(void)
@@ -1812,6 +2385,31 @@ void tegra_clk_resume(void)
val |= *ctx++;
clk_writel(val, OSC_CTRL);
+ clk_writel(*ctx++, tegra_pll_c.reg + PLL_BASE);
+ clk_writel(*ctx++, tegra_pll_c.reg + PLL_MISC(&tegra_pll_c));
+ clk_writel(*ctx++, tegra_pll_a.reg + PLL_BASE);
+ clk_writel(*ctx++, tegra_pll_a.reg + PLL_MISC(&tegra_pll_a));
+ clk_writel(*ctx++, tegra_pll_s.reg + PLL_BASE);
+ clk_writel(*ctx++, tegra_pll_s.reg + PLL_MISC(&tegra_pll_s));
+ clk_writel(*ctx++, tegra_pll_d.reg + PLL_BASE);
+ clk_writel(*ctx++, tegra_pll_d.reg + PLL_MISC(&tegra_pll_d));
+ clk_writel(*ctx++, tegra_pll_u.reg + PLL_BASE);
+ clk_writel(*ctx++, tegra_pll_u.reg + PLL_MISC(&tegra_pll_u));
+ udelay(1000);
+
+ clk_writel(*ctx++, tegra_pll_m_out1.reg);
+ clk_writel(*ctx++, tegra_pll_a_out0.reg);
+ clk_writel(*ctx++, tegra_pll_c_out1.reg);
+
+ clk_writel(*ctx++, tegra_clk_cclk.reg);
+ clk_writel(*ctx++, tegra_clk_cclk.reg + SUPER_CLK_DIVIDER);
+
+ clk_writel(*ctx++, tegra_clk_sclk.reg);
+ clk_writel(*ctx++, tegra_clk_sclk.reg + SUPER_CLK_DIVIDER);
+ clk_writel(*ctx++, tegra_clk_pclk.reg);
+
+ clk_writel(*ctx++, tegra_clk_audio.reg);
+
/* enable all clocks before configuring clock sources */
clk_writel(0xbffffff9ul, CLK_OUT_ENB);
clk_writel(0xfefffff7ul, CLK_OUT_ENB + 4);
diff --git a/arch/arm/mach-tegra/tegra2_dvfs.c b/arch/arm/mach-tegra/tegra2_dvfs.c
deleted file mode 100644
index 5529c238dd77..000000000000
--- a/arch/arm/mach-tegra/tegra2_dvfs.c
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * arch/arm/mach-tegra/tegra2_dvfs.c
- *
- * Copyright (C) 2010 Google, Inc.
- *
- * Author:
- * Colin Cross <ccross@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/kernel.h>
-
-#include "clock.h"
-#include "tegra2_dvfs.h"
-
-static struct dvfs_table virtual_cpu_process_0[] = {
- {314000000, 750},
- {456000000, 825},
- {608000000, 900},
- {760000000, 975},
- {817000000, 1000},
- {912000000, 1050},
- {1000000000, 1100},
- {0, 0},
-};
-
-static struct dvfs_table virtual_cpu_process_1[] = {
- {314000000, 750},
- {456000000, 825},
- {618000000, 900},
- {770000000, 975},
- {827000000, 1000},
- {922000000, 1050},
- {1000000000, 1100},
- {0, 0},
-};
-
-static struct dvfs_table virtual_cpu_process_2[] = {
- {494000000, 750},
- {675000000, 825},
- {817000000, 875},
- {922000000, 925},
- {1000000000, 975},
- {0, 0},
-};
-
-static struct dvfs_table virtual_cpu_process_3[] = {
- {730000000, 750},
- {760000000, 775},
- {845000000, 800},
- {1000000000, 875},
- {0, 0},
-};
-
-struct dvfs tegra_dvfs_virtual_cpu_dvfs = {
- .reg_id = "vdd_cpu",
- .process_id_table = {
- {
- .process_id = 0,
- .table = virtual_cpu_process_0,
- },
- {
- .process_id = 1,
- .table = virtual_cpu_process_1,
- },
- {
- .process_id = 2,
- .table = virtual_cpu_process_2,
- },
- {
- .process_id = 3,
- .table = virtual_cpu_process_3,
- },
- },
- .process_id_table_length = 4,
- .cpu = 1,
-};
diff --git a/arch/arm/mach-tegra/tegra2_emc.c b/arch/arm/mach-tegra/tegra2_emc.c
new file mode 100644
index 000000000000..0f7ae6e90b55
--- /dev/null
+++ b/arch/arm/mach-tegra/tegra2_emc.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+
+#include <mach/iomap.h>
+
+#include "tegra2_emc.h"
+
+#ifdef CONFIG_TEGRA_EMC_SCALING_ENABLE
+static bool emc_enable = true;
+#else
+static bool emc_enable;
+#endif
+module_param(emc_enable, bool, 0644);
+
+static void __iomem *emc = IO_ADDRESS(TEGRA_EMC_BASE);
+static const struct tegra_emc_table *tegra_emc_table;
+static int tegra_emc_table_size;
+
+static inline void emc_writel(u32 val, unsigned long addr)
+{
+ writel(val, emc + addr);
+}
+
+static inline u32 emc_readl(unsigned long addr)
+{
+ return readl(emc + addr);
+}
+
+static const unsigned long emc_reg_addr[TEGRA_EMC_NUM_REGS] = {
+ 0x2c, /* RC */
+ 0x30, /* RFC */
+ 0x34, /* RAS */
+ 0x38, /* RP */
+ 0x3c, /* R2W */
+ 0x40, /* W2R */
+ 0x44, /* R2P */
+ 0x48, /* W2P */
+ 0x4c, /* RD_RCD */
+ 0x50, /* WR_RCD */
+ 0x54, /* RRD */
+ 0x58, /* REXT */
+ 0x5c, /* WDV */
+ 0x60, /* QUSE */
+ 0x64, /* QRST */
+ 0x68, /* QSAFE */
+ 0x6c, /* RDV */
+ 0x70, /* REFRESH */
+ 0x74, /* BURST_REFRESH_NUM */
+ 0x78, /* PDEX2WR */
+ 0x7c, /* PDEX2RD */
+ 0x80, /* PCHG2PDEN */
+ 0x84, /* ACT2PDEN */
+ 0x88, /* AR2PDEN */
+ 0x8c, /* RW2PDEN */
+ 0x90, /* TXSR */
+ 0x94, /* TCKE */
+ 0x98, /* TFAW */
+ 0x9c, /* TRPAB */
+ 0xa0, /* TCLKSTABLE */
+ 0xa4, /* TCLKSTOP */
+ 0xa8, /* TREFBW */
+ 0xac, /* QUSE_EXTRA */
+ 0x114, /* FBIO_CFG6 */
+ 0xb0, /* ODT_WRITE */
+ 0xb4, /* ODT_READ */
+ 0x104, /* FBIO_CFG5 */
+ 0x2bc, /* CFG_DIG_DLL */
+ 0x2c0, /* DLL_XFORM_DQS */
+ 0x2c4, /* DLL_XFORM_QUSE */
+ 0x2e0, /* ZCAL_REF_CNT */
+ 0x2e4, /* ZCAL_WAIT_CNT */
+ 0x2a8, /* AUTO_CAL_INTERVAL */
+ 0x2d0, /* CFG_CLKTRIM_0 */
+ 0x2d4, /* CFG_CLKTRIM_1 */
+ 0x2d8, /* CFG_CLKTRIM_2 */
+};
+
+/* Select the closest EMC rate that is higher than the requested rate */
+long tegra_emc_round_rate(unsigned long rate)
+{
+ int i;
+ int best = -1;
+ unsigned long distance = ULONG_MAX;
+
+ if (!tegra_emc_table)
+ return -EINVAL;
+
+ if (!emc_enable)
+ return -EINVAL;
+
+ pr_debug("%s: %lu\n", __func__, rate);
+
+ /*
+ * The EMC clock rate is twice the bus rate, and the bus rate is
+ * measured in kHz
+ */
+ rate = rate / 2 / 1000;
+
+ for (i = 0; i < tegra_emc_table_size; i++) {
+ if (tegra_emc_table[i].rate >= rate &&
+ (tegra_emc_table[i].rate - rate) < distance) {
+ distance = tegra_emc_table[i].rate - rate;
+ best = i;
+ }
+ }
+
+ if (best < 0)
+ return -EINVAL;
+
+ pr_debug("%s: using %lu\n", __func__, tegra_emc_table[best].rate);
+
+ return tegra_emc_table[best].rate * 2 * 1000;
+}
+
+/*
+ * The EMC registers have shadow registers. When the EMC clock is updated
+ * in the clock controller, the shadow registers are copied to the active
+ * registers, allowing glitchless memory bus frequency changes.
+ * This function updates the shadow registers for a new clock frequency,
+ * and relies on the clock lock on the emc clock to avoid races between
+ * multiple frequency changes
+ */
+int tegra_emc_set_rate(unsigned long rate)
+{
+ int i;
+ int j;
+
+ if (!tegra_emc_table)
+ return -EINVAL;
+
+ /*
+ * The EMC clock rate is twice the bus rate, and the bus rate is
+ * measured in kHz
+ */
+ rate = rate / 2 / 1000;
+
+ for (i = 0; i < tegra_emc_table_size; i++)
+ if (tegra_emc_table[i].rate == rate)
+ break;
+
+ if (i >= tegra_emc_table_size)
+ return -EINVAL;
+
+ pr_debug("%s: setting to %lu\n", __func__, rate);
+
+ for (j = 0; j < TEGRA_EMC_NUM_REGS; j++)
+ emc_writel(tegra_emc_table[i].regs[j], emc_reg_addr[j]);
+
+ emc_readl(tegra_emc_table[i].regs[TEGRA_EMC_NUM_REGS - 1]);
+
+ return 0;
+}
+
+void tegra_init_emc(const struct tegra_emc_table *table, int table_size)
+{
+ tegra_emc_table = table;
+ tegra_emc_table_size = table_size;
+}
diff --git a/arch/arm/mach-tegra/tegra2_emc.h b/arch/arm/mach-tegra/tegra2_emc.h
new file mode 100644
index 000000000000..19f08cb31603
--- /dev/null
+++ b/arch/arm/mach-tegra/tegra2_emc.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define TEGRA_EMC_NUM_REGS 46
+
+struct tegra_emc_table {
+ unsigned long rate;
+ u32 regs[TEGRA_EMC_NUM_REGS];
+};
+
+int tegra_emc_set_rate(unsigned long rate);
+long tegra_emc_round_rate(unsigned long rate);
+void tegra_init_emc(const struct tegra_emc_table *table, int table_size);
diff --git a/arch/arm/mach-tegra/timer.c b/arch/arm/mach-tegra/timer.c
index 7b8ad1f98f44..0fcb1eb4214d 100644
--- a/arch/arm/mach-tegra/timer.c
+++ b/arch/arm/mach-tegra/timer.c
@@ -18,6 +18,7 @@
*/
#include <linux/init.h>
+#include <linux/err.h>
#include <linux/sched.h>
#include <linux/time.h>
#include <linux/interrupt.h>
@@ -33,10 +34,15 @@
#include <mach/iomap.h>
#include <mach/irqs.h>
+#include <mach/suspend.h>
#include "board.h"
#include "clock.h"
+#define RTC_SECONDS 0x08
+#define RTC_SHADOW_SECONDS 0x0c
+#define RTC_MILLISECONDS 0x10
+
#define TIMERUS_CNTR_1US 0x10
#define TIMERUS_USEC_CFG 0x14
#define TIMERUS_CNTR_FREEZE 0x4c
@@ -49,9 +55,11 @@
#define TIMER_PTV 0x0
#define TIMER_PCR 0x4
-struct tegra_timer;
-
static void __iomem *timer_reg_base = IO_ADDRESS(TEGRA_TMR1_BASE);
+static void __iomem *rtc_base = IO_ADDRESS(TEGRA_RTC_BASE);
+
+static struct timespec persistent_ts;
+static u64 persistent_ms, last_persistent_ms;
#define timer_writel(value, reg) \
__raw_writel(value, (u32)timer_reg_base + (reg))
@@ -132,6 +140,42 @@ static void notrace tegra_update_sched_clock(void)
update_sched_clock(&cd, cyc, (u32)~0);
}
+/*
+ * tegra_rtc_read - Reads the Tegra RTC registers
+ * Care must be taken that this funciton is not called while the
+ * tegra_rtc driver could be executing to avoid race conditions
+ * on the RTC shadow register
+ */
+u64 tegra_rtc_read_ms(void)
+{
+ u32 ms = readl(rtc_base + RTC_MILLISECONDS);
+ u32 s = readl(rtc_base + RTC_SHADOW_SECONDS);
+ return (u64)s * MSEC_PER_SEC + ms;
+}
+
+/*
+ * read_persistent_clock - Return time from a persistent clock.
+ *
+ * Reads the time from a source which isn't disabled during PM, the
+ * 32k sync timer. Convert the cycles elapsed since last read into
+ * nsecs and adds to a monotonically increasing timespec.
+ * Care must be taken that this funciton is not called while the
+ * tegra_rtc driver could be executing to avoid race conditions
+ * on the RTC shadow register
+ */
+void read_persistent_clock(struct timespec *ts)
+{
+ u64 delta;
+ struct timespec *tsp = &persistent_ts;
+
+ last_persistent_ms = persistent_ms;
+ persistent_ms = tegra_rtc_read_ms();
+ delta = persistent_ms - last_persistent_ms;
+
+ timespec_add_ns(tsp, delta * NSEC_PER_MSEC);
+ *ts = *tsp;
+}
+
static irqreturn_t tegra_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = (struct clock_event_device *)dev_id;
@@ -150,9 +194,22 @@ static struct irqaction tegra_timer_irq = {
static void __init tegra_init_timer(void)
{
+ struct clk *clk;
unsigned long rate = clk_measure_input_freq();
int ret;
+ clk = clk_get_sys("timer", NULL);
+ BUG_ON(IS_ERR(clk));
+ clk_enable(clk);
+
+ /*
+ * rtc registers are used by read_persistent_clock, keep the rtc clock
+ * enabled
+ */
+ clk = clk_get_sys("rtc-tegra", NULL);
+ BUG_ON(IS_ERR(clk));
+ clk_enable(clk);
+
#ifdef CONFIG_HAVE_ARM_TWD
twd_base = IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x600);
#endif
@@ -196,10 +253,22 @@ static void __init tegra_init_timer(void)
tegra_clockevent.cpumask = cpu_all_mask;
tegra_clockevent.irq = tegra_timer_irq.irq;
clockevents_register_device(&tegra_clockevent);
-
- return;
}
struct sys_timer tegra_timer = {
.init = tegra_init_timer,
};
+
+#ifdef CONFIG_PM
+static u32 usec_config;
+
+void tegra_timer_suspend(void)
+{
+ usec_config = timer_readl(TIMERUS_USEC_CFG);
+}
+
+void tegra_timer_resume(void)
+{
+ timer_writel(usec_config, TIMERUS_USEC_CFG);
+}
+#endif
diff --git a/arch/arm/mach-u300/core.c b/arch/arm/mach-u300/core.c
index aa53ee22438f..513d6abec1f5 100644
--- a/arch/arm/mach-u300/core.c
+++ b/arch/arm/mach-u300/core.c
@@ -3,7 +3,7 @@
* arch/arm/mach-u300/core.c
*
*
- * Copyright (C) 2007-2010 ST-Ericsson AB
+ * Copyright (C) 2007-2010 ST-Ericsson SA
* License terms: GNU General Public License (GPL) version 2
* Core platform support, IRQ handling and device definitions.
* Author: Linus Walleij <linus.walleij@stericsson.com>
@@ -16,7 +16,9 @@
#include <linux/device.h>
#include <linux/mm.h>
#include <linux/termios.h>
+#include <linux/dmaengine.h>
#include <linux/amba/bus.h>
+#include <linux/amba/serial.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/clk.h>
@@ -96,10 +98,20 @@ void __init u300_map_io(void)
* Declaration of devices found on the U300 board and
* their respective memory locations.
*/
+
+static struct amba_pl011_data uart0_plat_data = {
+#ifdef CONFIG_COH901318
+ .dma_filter = coh901318_filter_id,
+ .dma_rx_param = (void *) U300_DMA_UART0_RX,
+ .dma_tx_param = (void *) U300_DMA_UART0_TX,
+#endif
+};
+
static struct amba_device uart0_device = {
.dev = {
+ .coherent_dma_mask = ~0,
.init_name = "uart0", /* Slow device at 0x3000 offset */
- .platform_data = NULL,
+ .platform_data = &uart0_plat_data,
},
.res = {
.start = U300_UART0_BASE,
@@ -111,10 +123,19 @@ static struct amba_device uart0_device = {
/* The U335 have an additional UART1 on the APP CPU */
#ifdef CONFIG_MACH_U300_BS335
+static struct amba_pl011_data uart1_plat_data = {
+#ifdef CONFIG_COH901318
+ .dma_filter = coh901318_filter_id,
+ .dma_rx_param = (void *) U300_DMA_UART1_RX,
+ .dma_tx_param = (void *) U300_DMA_UART1_TX,
+#endif
+};
+
static struct amba_device uart1_device = {
.dev = {
+ .coherent_dma_mask = ~0,
.init_name = "uart1", /* Fast device at 0x7000 offset */
- .platform_data = NULL,
+ .platform_data = &uart1_plat_data,
},
.res = {
.start = U300_UART1_BASE,
@@ -960,42 +981,37 @@ const struct coh_dma_channel chan_config[U300_DMA_CHANNELS] = {
.priority_high = 0,
.dev_addr = U300_MSL_BASE + 6 * 0x40 + 0x220,
},
+ /*
+ * Don't set up device address, burst count or size of src
+ * or dst bus for this peripheral - handled by PrimeCell
+ * DMA extension.
+ */
{
.number = U300_DMA_MMCSD_RX_TX,
.name = "MMCSD RX TX",
.priority_high = 0,
- .dev_addr = U300_MMCSD_BASE + 0x080,
.param.config = COH901318_CX_CFG_CH_DISABLE |
COH901318_CX_CFG_LCR_DISABLE |
COH901318_CX_CFG_TC_IRQ_ENABLE |
COH901318_CX_CFG_BE_IRQ_ENABLE,
.param.ctrl_lli_chained = 0 |
COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_MASTER_MODE_M1RW |
COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_LEGACY,
.param.ctrl_lli = 0 |
COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_MASTER_MODE_M1RW |
COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_LEGACY,
.param.ctrl_lli_last = 0 |
COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_MASTER_MODE_M1RW |
COH901318_CX_CTRL_TCP_DISABLE |
COH901318_CX_CTRL_TC_IRQ_ENABLE |
@@ -1014,15 +1030,76 @@ const struct coh_dma_channel chan_config[U300_DMA_CHANNELS] = {
.name = "MSPRO RX",
.priority_high = 0,
},
+ /*
+ * Don't set up device address, burst count or size of src
+ * or dst bus for this peripheral - handled by PrimeCell
+ * DMA extension.
+ */
{
.number = U300_DMA_UART0_TX,
.name = "UART0 TX",
.priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
},
{
.number = U300_DMA_UART0_RX,
.name = "UART0 RX",
.priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
},
{
.number = U300_DMA_APEX_TX,
@@ -1080,7 +1157,7 @@ const struct coh_dma_channel chan_config[U300_DMA_CHANNELS] = {
COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
COH901318_CX_CTRL_MASTER_MODE_M1RW |
COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_LEGACY |
@@ -1252,15 +1329,77 @@ const struct coh_dma_channel chan_config[U300_DMA_CHANNELS] = {
.name = "XGAM PDI",
.priority_high = 0,
},
+ /*
+ * Don't set up device address, burst count or size of src
+ * or dst bus for this peripheral - handled by PrimeCell
+ * DMA extension.
+ */
{
.number = U300_DMA_SPI_TX,
.name = "SPI TX",
.priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
},
{
.number = U300_DMA_SPI_RX,
.name = "SPI RX",
.priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+
},
{
.number = U300_DMA_GENERAL_PURPOSE_0,
@@ -1617,7 +1756,7 @@ static void __init u300_init_check_chip(void)
#endif
#ifdef CONFIG_MACH_U300_BS335
if ((val & 0xFF00U) != 0xf000 && (val & 0xFF00U) != 0xf100) {
- printk(KERN_ERR "Platform configured for BS365 " \
+ printk(KERN_ERR "Platform configured for BS335 " \
" with DB3350 but %s detected, expect problems!",
chipname);
}
@@ -1692,12 +1831,12 @@ void __init u300_init_devices(void)
/* Register subdevices on the I2C buses */
u300_i2c_register_board_devices();
- /* Register subdevices on the SPI bus */
- u300_spi_register_board_devices();
-
/* Register the platform devices */
platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs));
+ /* Register subdevices on the SPI bus */
+ u300_spi_register_board_devices();
+
#ifndef CONFIG_MACH_U300_SEMI_IS_SHARED
/*
* Enable SEMI self refresh. Self-refresh of the SDRAM is entered when
diff --git a/arch/arm/mach-u300/include/mach/memory.h b/arch/arm/mach-u300/include/mach/memory.h
index bf134bcc129d..888e2e351ee1 100644
--- a/arch/arm/mach-u300/include/mach/memory.h
+++ b/arch/arm/mach-u300/include/mach/memory.h
@@ -15,17 +15,17 @@
#ifdef CONFIG_MACH_U300_DUAL_RAM
-#define PHYS_OFFSET UL(0x48000000)
+#define PLAT_PHYS_OFFSET UL(0x48000000)
#define BOOT_PARAMS_OFFSET (PHYS_OFFSET + 0x100)
#else
#ifdef CONFIG_MACH_U300_2MB_ALIGNMENT_FIX
-#define PHYS_OFFSET (0x28000000 + \
+#define PLAT_PHYS_OFFSET (0x28000000 + \
(CONFIG_MACH_U300_ACCESS_MEM_SIZE - \
(CONFIG_MACH_U300_ACCESS_MEM_SIZE & 1))*1024*1024)
#else
-#define PHYS_OFFSET (0x28000000 + \
+#define PLAT_PHYS_OFFSET (0x28000000 + \
(CONFIG_MACH_U300_ACCESS_MEM_SIZE + \
(CONFIG_MACH_U300_ACCESS_MEM_SIZE & 1))*1024*1024)
#endif
diff --git a/arch/arm/mach-u300/mmc.c b/arch/arm/mach-u300/mmc.c
index de1ac9ad2213..677ccef5cd32 100644
--- a/arch/arm/mach-u300/mmc.c
+++ b/arch/arm/mach-u300/mmc.c
@@ -3,159 +3,52 @@
* arch/arm/mach-u300/mmc.c
*
*
- * Copyright (C) 2009 ST-Ericsson AB
+ * Copyright (C) 2009 ST-Ericsson SA
* License terms: GNU General Public License (GPL) version 2
*
* Author: Linus Walleij <linus.walleij@stericsson.com>
- * Author: Johan Lundin <johan.lundin@stericsson.com>
+ * Author: Johan Lundin
* Author: Jonas Aaberg <jonas.aberg@stericsson.com>
*/
#include <linux/device.h>
#include <linux/amba/bus.h>
#include <linux/mmc/host.h>
-#include <linux/input.h>
-#include <linux/workqueue.h>
-#include <linux/delay.h>
-#include <linux/regulator/consumer.h>
-#include <linux/regulator/machine.h>
#include <linux/gpio.h>
+#include <linux/dmaengine.h>
#include <linux/amba/mmci.h>
#include <linux/slab.h>
+#include <mach/coh901318.h>
+#include <mach/dma_channels.h>
#include "mmc.h"
#include "padmux.h"
-struct mmci_card_event {
- struct input_dev *mmc_input;
- int mmc_inserted;
- struct work_struct workq;
- struct mmci_platform_data mmc0_plat_data;
+static struct mmci_platform_data mmc0_plat_data = {
+ /*
+ * Do not set ocr_mask or voltage translation function,
+ * we have a regulator we can control instead.
+ */
+ /* Nominally 2.85V on our platform */
+ .f_max = 24000000,
+ .gpio_wp = -1,
+ .gpio_cd = U300_GPIO_PIN_MMC_CD,
+ .cd_invert = true,
+ .capabilities = MMC_CAP_MMC_HIGHSPEED |
+ MMC_CAP_SD_HIGHSPEED | MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
+#ifdef CONFIG_COH901318
+ .dma_filter = coh901318_filter_id,
+ .dma_rx_param = (void *) U300_DMA_MMCSD_RX_TX,
+ /* Don't specify a TX channel, this RX channel is bidirectional */
+#endif
};
-static unsigned int mmc_status(struct device *dev)
-{
- struct mmci_card_event *mmci_card = container_of(
- dev->platform_data,
- struct mmci_card_event, mmc0_plat_data);
-
- return mmci_card->mmc_inserted;
-}
-
-static int mmci_callback(void *data)
-{
- struct mmci_card_event *mmci_card = data;
-
- disable_irq_on_gpio_pin(U300_GPIO_PIN_MMC_CD);
- schedule_work(&mmci_card->workq);
-
- return 0;
-}
-
-
-static ssize_t gpio_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct mmci_card_event *mmci_card = container_of(
- dev->platform_data,
- struct mmci_card_event, mmc0_plat_data);
-
-
- return sprintf(buf, "%d\n", !mmci_card->mmc_inserted);
-}
-
-static DEVICE_ATTR(mmc_inserted, S_IRUGO, gpio_show, NULL);
-
-static void _mmci_callback(struct work_struct *ws)
-{
-
- struct mmci_card_event *mmci_card = container_of(
- ws,
- struct mmci_card_event, workq);
-
- mdelay(20);
-
- mmci_card->mmc_inserted = !gpio_get_value(U300_GPIO_PIN_MMC_CD);
-
- input_report_switch(mmci_card->mmc_input, KEY_INSERT,
- mmci_card->mmc_inserted);
- input_sync(mmci_card->mmc_input);
-
- pr_debug("MMC/SD card was %s\n",
- mmci_card->mmc_inserted ? "inserted" : "removed");
-
- enable_irq_on_gpio_pin(U300_GPIO_PIN_MMC_CD, mmci_card->mmc_inserted);
-}
-
int __devinit mmc_init(struct amba_device *adev)
{
- struct mmci_card_event *mmci_card;
struct device *mmcsd_device = &adev->dev;
struct pmx *pmx;
int ret = 0;
- mmci_card = kzalloc(sizeof(struct mmci_card_event), GFP_KERNEL);
- if (!mmci_card)
- return -ENOMEM;
-
- /*
- * Do not set ocr_mask or voltage translation function,
- * we have a regulator we can control instead.
- */
- /* Nominally 2.85V on our platform */
- mmci_card->mmc0_plat_data.f_max = 24000000;
- mmci_card->mmc0_plat_data.status = mmc_status;
- mmci_card->mmc0_plat_data.gpio_wp = -1;
- mmci_card->mmc0_plat_data.gpio_cd = -1;
- mmci_card->mmc0_plat_data.capabilities = MMC_CAP_MMC_HIGHSPEED |
- MMC_CAP_SD_HIGHSPEED | MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
-
- mmcsd_device->platform_data = (void *) &mmci_card->mmc0_plat_data;
-
- INIT_WORK(&mmci_card->workq, _mmci_callback);
-
- ret = gpio_request(U300_GPIO_PIN_MMC_CD, "MMC card detection");
- if (ret) {
- printk(KERN_CRIT "Could not allocate MMC card detection " \
- "GPIO pin\n");
- goto out;
- }
-
- ret = gpio_direction_input(U300_GPIO_PIN_MMC_CD);
- if (ret) {
- printk(KERN_CRIT "Invalid GPIO pin requested\n");
- goto out;
- }
-
- ret = sysfs_create_file(&mmcsd_device->kobj,
- &dev_attr_mmc_inserted.attr);
- if (ret)
- goto out;
-
- mmci_card->mmc_input = input_allocate_device();
- if (!mmci_card->mmc_input) {
- printk(KERN_CRIT "Could not allocate MMC input device\n");
- return -ENOMEM;
- }
-
- mmci_card->mmc_input->name = "MMC insert notification";
- mmci_card->mmc_input->id.bustype = BUS_HOST;
- mmci_card->mmc_input->id.vendor = 0;
- mmci_card->mmc_input->id.product = 0;
- mmci_card->mmc_input->id.version = 0x0100;
- mmci_card->mmc_input->dev.parent = mmcsd_device;
- input_set_capability(mmci_card->mmc_input, EV_SW, KEY_INSERT);
-
- /*
- * Since this must always be compiled into the kernel, this input
- * is never unregistered or free:ed.
- */
- ret = input_register_device(mmci_card->mmc_input);
- if (ret) {
- input_free_device(mmci_card->mmc_input);
- goto out;
- }
-
- input_set_drvdata(mmci_card->mmc_input, mmci_card);
+ mmcsd_device->platform_data = &mmc0_plat_data;
/*
* Setup padmuxing for MMC. Since this must always be
@@ -171,12 +64,5 @@ int __devinit mmc_init(struct amba_device *adev)
pr_warning("Could not activate padmuxing\n");
}
- ret = gpio_register_callback(U300_GPIO_PIN_MMC_CD, mmci_callback,
- mmci_card);
-
- schedule_work(&mmci_card->workq);
-
- printk(KERN_INFO "Registered MMC insert/remove notification\n");
-out:
return ret;
}
diff --git a/arch/arm/mach-u300/spi.c b/arch/arm/mach-u300/spi.c
index 00869def5420..5767208f1c1d 100644
--- a/arch/arm/mach-u300/spi.c
+++ b/arch/arm/mach-u300/spi.c
@@ -11,6 +11,9 @@
#include <linux/spi/spi.h>
#include <linux/amba/pl022.h>
#include <linux/err.h>
+#include <mach/coh901318.h>
+#include <mach/dma_channels.h>
+
#include "padmux.h"
/*
@@ -30,11 +33,8 @@ static void select_dummy_chip(u32 chipselect)
}
struct pl022_config_chip dummy_chip_info = {
- /*
- * available POLLING_TRANSFER and INTERRUPT_TRANSFER,
- * DMA_TRANSFER does not work
- */
- .com_mode = INTERRUPT_TRANSFER,
+ /* available POLLING_TRANSFER, INTERRUPT_TRANSFER, DMA_TRANSFER */
+ .com_mode = DMA_TRANSFER,
.iface = SSP_INTERFACE_MOTOROLA_SPI,
/* We can only act as master but SSP_SLAVE is possible in theory */
.hierarchy = SSP_MASTER,
@@ -75,8 +75,6 @@ static struct spi_board_info u300_spi_devices[] = {
static struct pl022_ssp_controller ssp_platform_data = {
/* If you have several SPI buses this varies, we have only bus 0 */
.bus_id = 0,
- /* Set this to 1 when we think we got DMA working */
- .enable_dma = 0,
/*
* On the APP CPU GPIO 4, 5 and 6 are connected as generic
* chip selects for SPI. (Same on U330, U335 and U365.)
@@ -84,6 +82,14 @@ static struct pl022_ssp_controller ssp_platform_data = {
* and do padmuxing accordingly too.
*/
.num_chipselect = 3,
+#ifdef CONFIG_COH901318
+ .enable_dma = 1,
+ .dma_filter = coh901318_filter_id,
+ .dma_rx_param = (void *) U300_DMA_SPI_RX,
+ .dma_tx_param = (void *) U300_DMA_SPI_TX,
+#else
+ .enable_dma = 0,
+#endif
};
@@ -109,6 +115,7 @@ void __init u300_spi_init(struct amba_device *adev)
}
}
+
void __init u300_spi_register_board_devices(void)
{
/* Register any SPI devices */
diff --git a/arch/arm/mach-u300/u300.c b/arch/arm/mach-u300/u300.c
index 07c35a846424..48b3b7f39966 100644
--- a/arch/arm/mach-u300/u300.c
+++ b/arch/arm/mach-u300/u300.c
@@ -19,9 +19,9 @@
#include <linux/io.h>
#include <mach/hardware.h>
#include <mach/platform.h>
-#include <mach/memory.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
+#include <asm/memory.h>
static void __init u300_reserve(void)
{
diff --git a/arch/arm/mach-ux500/Makefile b/arch/arm/mach-ux500/Makefile
index 53ebb429e971..b549a8fb4231 100644
--- a/arch/arm/mach-ux500/Makefile
+++ b/arch/arm/mach-ux500/Makefile
@@ -3,16 +3,18 @@
#
obj-y := clock.o cpu.o devices.o devices-common.o \
- id.o
+ id.o usb.o
obj-$(CONFIG_UX500_SOC_DB5500) += cpu-db5500.o dma-db5500.o
obj-$(CONFIG_UX500_SOC_DB8500) += cpu-db8500.o devices-db8500.o prcmu.o
obj-$(CONFIG_MACH_U8500) += board-mop500.o board-mop500-sdi.o \
- board-mop500-keypads.o
+ board-mop500-regulators.o \
+ board-mop500-uib.o board-mop500-stuib.o \
+ board-mop500-u8500uib.o \
+ board-mop500-pins.o
obj-$(CONFIG_MACH_U5500) += board-u5500.o board-u5500-sdi.o
obj-$(CONFIG_SMP) += platsmp.o headsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o
-obj-$(CONFIG_REGULATOR_AB8500) += board-mop500-regulators.o
obj-$(CONFIG_U5500_MODEM_IRQ) += modem-irq-db5500.o
obj-$(CONFIG_U5500_MBOX) += mbox-db5500.o
obj-$(CONFIG_CPU_FREQ) += cpufreq.o
diff --git a/arch/arm/mach-ux500/board-mop500-keypads.c b/arch/arm/mach-ux500/board-mop500-keypads.c
deleted file mode 100644
index 70318c354d32..000000000000
--- a/arch/arm/mach-ux500/board-mop500-keypads.c
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2010
- *
- * License Terms: GNU General Public License v2
- *
- * Keypad layouts for various boards
- */
-
-#include <linux/i2c.h>
-#include <linux/gpio.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/mfd/stmpe.h>
-#include <linux/mfd/tc3589x.h>
-#include <linux/input/matrix_keypad.h>
-
-#include <plat/pincfg.h>
-#include <plat/ske.h>
-
-#include <mach/devices.h>
-#include <mach/hardware.h>
-
-#include "devices-db8500.h"
-#include "board-mop500.h"
-
-/* STMPE/SKE keypad use this key layout */
-static const unsigned int mop500_keymap[] = {
- KEY(2, 5, KEY_END),
- KEY(4, 1, KEY_POWER),
- KEY(3, 5, KEY_VOLUMEDOWN),
- KEY(1, 3, KEY_3),
- KEY(5, 2, KEY_RIGHT),
- KEY(5, 0, KEY_9),
-
- KEY(0, 5, KEY_MENU),
- KEY(7, 6, KEY_ENTER),
- KEY(4, 5, KEY_0),
- KEY(6, 7, KEY_2),
- KEY(3, 4, KEY_UP),
- KEY(3, 3, KEY_DOWN),
-
- KEY(6, 4, KEY_SEND),
- KEY(6, 2, KEY_BACK),
- KEY(4, 2, KEY_VOLUMEUP),
- KEY(5, 5, KEY_1),
- KEY(4, 3, KEY_LEFT),
- KEY(3, 2, KEY_7),
-};
-
-static const struct matrix_keymap_data mop500_keymap_data = {
- .keymap = mop500_keymap,
- .keymap_size = ARRAY_SIZE(mop500_keymap),
-};
-
-/*
- * Nomadik SKE keypad
- */
-#define ROW_PIN_I0 164
-#define ROW_PIN_I1 163
-#define ROW_PIN_I2 162
-#define ROW_PIN_I3 161
-#define ROW_PIN_I4 156
-#define ROW_PIN_I5 155
-#define ROW_PIN_I6 154
-#define ROW_PIN_I7 153
-#define COL_PIN_O0 168
-#define COL_PIN_O1 167
-#define COL_PIN_O2 166
-#define COL_PIN_O3 165
-#define COL_PIN_O4 160
-#define COL_PIN_O5 159
-#define COL_PIN_O6 158
-#define COL_PIN_O7 157
-
-#define SKE_KPD_MAX_ROWS 8
-#define SKE_KPD_MAX_COLS 8
-
-static int ske_kp_rows[] = {
- ROW_PIN_I0, ROW_PIN_I1, ROW_PIN_I2, ROW_PIN_I3,
- ROW_PIN_I4, ROW_PIN_I5, ROW_PIN_I6, ROW_PIN_I7,
-};
-
-/*
- * ske_set_gpio_row: request and set gpio rows
- */
-static int ske_set_gpio_row(int gpio)
-{
- int ret;
-
- ret = gpio_request(gpio, "ske-kp");
- if (ret < 0) {
- pr_err("ske_set_gpio_row: gpio request failed\n");
- return ret;
- }
-
- ret = gpio_direction_output(gpio, 1);
- if (ret < 0) {
- pr_err("ske_set_gpio_row: gpio direction failed\n");
- gpio_free(gpio);
- }
-
- return ret;
-}
-
-/*
- * ske_kp_init - enable the gpio configuration
- */
-static int ske_kp_init(void)
-{
- int ret, i;
-
- for (i = 0; i < SKE_KPD_MAX_ROWS; i++) {
- ret = ske_set_gpio_row(ske_kp_rows[i]);
- if (ret < 0) {
- pr_err("ske_kp_init: failed init\n");
- return ret;
- }
- }
-
- return 0;
-}
-
-static struct ske_keypad_platform_data ske_keypad_board = {
- .init = ske_kp_init,
- .keymap_data = &mop500_keymap_data,
- .no_autorepeat = true,
- .krow = SKE_KPD_MAX_ROWS, /* 8x8 matrix */
- .kcol = SKE_KPD_MAX_COLS,
- .debounce_ms = 40, /* in millisecs */
-};
-
-/*
- * STMPE1601
- */
-static struct stmpe_keypad_platform_data stmpe1601_keypad_data = {
- .debounce_ms = 64,
- .scan_count = 8,
- .no_autorepeat = true,
- .keymap_data = &mop500_keymap_data,
-};
-
-static struct stmpe_platform_data stmpe1601_data = {
- .id = 1,
- .blocks = STMPE_BLOCK_KEYPAD,
- .irq_trigger = IRQF_TRIGGER_FALLING,
- .irq_base = MOP500_STMPE1601_IRQ(0),
- .keypad = &stmpe1601_keypad_data,
- .autosleep = true,
- .autosleep_timeout = 1024,
-};
-
-static struct i2c_board_info mop500_i2c0_devices_stuib[] = {
- {
- I2C_BOARD_INFO("stmpe1601", 0x40),
- .irq = NOMADIK_GPIO_TO_IRQ(218),
- .platform_data = &stmpe1601_data,
- .flags = I2C_CLIENT_WAKE,
- },
-};
-
-/*
- * TC35893
- */
-
-static const unsigned int uib_keymap[] = {
- KEY(3, 1, KEY_END),
- KEY(4, 1, KEY_POWER),
- KEY(6, 4, KEY_VOLUMEDOWN),
- KEY(4, 2, KEY_EMAIL),
- KEY(3, 3, KEY_RIGHT),
- KEY(2, 5, KEY_BACKSPACE),
-
- KEY(6, 7, KEY_MENU),
- KEY(5, 0, KEY_ENTER),
- KEY(4, 3, KEY_0),
- KEY(3, 4, KEY_DOT),
- KEY(5, 2, KEY_UP),
- KEY(3, 5, KEY_DOWN),
-
- KEY(4, 5, KEY_SEND),
- KEY(0, 5, KEY_BACK),
- KEY(6, 2, KEY_VOLUMEUP),
- KEY(1, 3, KEY_SPACE),
- KEY(7, 6, KEY_LEFT),
- KEY(5, 5, KEY_SEARCH),
-};
-
-static struct matrix_keymap_data uib_keymap_data = {
- .keymap = uib_keymap,
- .keymap_size = ARRAY_SIZE(uib_keymap),
-};
-
-static struct tc3589x_keypad_platform_data tc35893_data = {
- .krow = TC_KPD_ROWS,
- .kcol = TC_KPD_COLUMNS,
- .debounce_period = TC_KPD_DEBOUNCE_PERIOD,
- .settle_time = TC_KPD_SETTLE_TIME,
- .irqtype = IRQF_TRIGGER_FALLING,
- .enable_wakeup = true,
- .keymap_data = &uib_keymap_data,
- .no_autorepeat = true,
-};
-
-static struct tc3589x_platform_data tc3589x_keypad_data = {
- .block = TC3589x_BLOCK_KEYPAD,
- .keypad = &tc35893_data,
- .irq_base = MOP500_EGPIO_IRQ_BASE,
-};
-
-static struct i2c_board_info mop500_i2c0_devices_uib[] = {
- {
- I2C_BOARD_INFO("tc3589x", 0x44),
- .platform_data = &tc3589x_keypad_data,
- .irq = NOMADIK_GPIO_TO_IRQ(218),
- .flags = I2C_CLIENT_WAKE,
- },
-};
-
-void mop500_keypad_init(void)
-{
- db8500_add_ske_keypad(&ske_keypad_board);
-
- i2c_register_board_info(0, mop500_i2c0_devices_stuib,
- ARRAY_SIZE(mop500_i2c0_devices_stuib));
-
- i2c_register_board_info(0, mop500_i2c0_devices_uib,
- ARRAY_SIZE(mop500_i2c0_devices_uib));
-
-}
diff --git a/arch/arm/mach-ux500/board-mop500-pins.c b/arch/arm/mach-ux500/board-mop500-pins.c
new file mode 100644
index 000000000000..fd4cf1ca5efd
--- /dev/null
+++ b/arch/arm/mach-ux500/board-mop500-pins.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+
+#include <asm/mach-types.h>
+#include <plat/pincfg.h>
+#include <mach/hardware.h>
+
+#include "pins-db8500.h"
+
+static pin_cfg_t mop500_pins_common[] = {
+ /* I2C */
+ GPIO147_I2C0_SCL,
+ GPIO148_I2C0_SDA,
+ GPIO16_I2C1_SCL,
+ GPIO17_I2C1_SDA,
+ GPIO10_I2C2_SDA,
+ GPIO11_I2C2_SCL,
+ GPIO229_I2C3_SDA,
+ GPIO230_I2C3_SCL,
+
+ /* MSP0 */
+ GPIO12_MSP0_TXD,
+ GPIO13_MSP0_TFS,
+ GPIO14_MSP0_TCK,
+ GPIO15_MSP0_RXD,
+
+ /* MSP2: HDMI */
+ GPIO193_MSP2_TXD,
+ GPIO194_MSP2_TCK,
+ GPIO195_MSP2_TFS,
+ GPIO196_MSP2_RXD | PIN_OUTPUT_LOW,
+
+ /* Touch screen INTERFACE */
+ GPIO84_GPIO | PIN_INPUT_PULLUP, /* TOUCH_INT1 */
+
+ /* STMPE1601/tc35893 keypad IRQ */
+ GPIO218_GPIO | PIN_INPUT_PULLUP,
+
+ /* MMC0 (MicroSD card) */
+ GPIO18_MC0_CMDDIR | PIN_OUTPUT_HIGH,
+ GPIO19_MC0_DAT0DIR | PIN_OUTPUT_HIGH,
+ GPIO20_MC0_DAT2DIR | PIN_OUTPUT_HIGH,
+
+ GPIO22_MC0_FBCLK | PIN_INPUT_NOPULL,
+ GPIO23_MC0_CLK | PIN_OUTPUT_LOW,
+ GPIO24_MC0_CMD | PIN_INPUT_PULLUP,
+ GPIO25_MC0_DAT0 | PIN_INPUT_PULLUP,
+ GPIO26_MC0_DAT1 | PIN_INPUT_PULLUP,
+ GPIO27_MC0_DAT2 | PIN_INPUT_PULLUP,
+ GPIO28_MC0_DAT3 | PIN_INPUT_PULLUP,
+
+ /* SDI1 (SDIO) */
+ GPIO208_MC1_CLK | PIN_OUTPUT_LOW,
+ GPIO209_MC1_FBCLK | PIN_INPUT_NOPULL,
+ GPIO210_MC1_CMD | PIN_INPUT_PULLUP,
+ GPIO211_MC1_DAT0 | PIN_INPUT_PULLUP,
+ GPIO212_MC1_DAT1 | PIN_INPUT_PULLUP,
+ GPIO213_MC1_DAT2 | PIN_INPUT_PULLUP,
+ GPIO214_MC1_DAT3 | PIN_INPUT_PULLUP,
+
+ /* MMC2 (On-board DATA INTERFACE eMMC) */
+ GPIO128_MC2_CLK | PIN_OUTPUT_LOW,
+ GPIO129_MC2_CMD | PIN_INPUT_PULLUP,
+ GPIO130_MC2_FBCLK | PIN_INPUT_NOPULL,
+ GPIO131_MC2_DAT0 | PIN_INPUT_PULLUP,
+ GPIO132_MC2_DAT1 | PIN_INPUT_PULLUP,
+ GPIO133_MC2_DAT2 | PIN_INPUT_PULLUP,
+ GPIO134_MC2_DAT3 | PIN_INPUT_PULLUP,
+ GPIO135_MC2_DAT4 | PIN_INPUT_PULLUP,
+ GPIO136_MC2_DAT5 | PIN_INPUT_PULLUP,
+ GPIO137_MC2_DAT6 | PIN_INPUT_PULLUP,
+ GPIO138_MC2_DAT7 | PIN_INPUT_PULLUP,
+
+ /* MMC4 (On-board STORAGE INTERFACE eMMC) */
+ GPIO197_MC4_DAT3 | PIN_INPUT_PULLUP,
+ GPIO198_MC4_DAT2 | PIN_INPUT_PULLUP,
+ GPIO199_MC4_DAT1 | PIN_INPUT_PULLUP,
+ GPIO200_MC4_DAT0 | PIN_INPUT_PULLUP,
+ GPIO201_MC4_CMD | PIN_INPUT_PULLUP,
+ GPIO202_MC4_FBCLK | PIN_INPUT_NOPULL,
+ GPIO203_MC4_CLK | PIN_OUTPUT_LOW,
+ GPIO204_MC4_DAT7 | PIN_INPUT_PULLUP,
+ GPIO205_MC4_DAT6 | PIN_INPUT_PULLUP,
+ GPIO206_MC4_DAT5 | PIN_INPUT_PULLUP,
+ GPIO207_MC4_DAT4 | PIN_INPUT_PULLUP,
+
+ /* SKE keypad */
+ GPIO153_KP_I7,
+ GPIO154_KP_I6,
+ GPIO155_KP_I5,
+ GPIO156_KP_I4,
+ GPIO157_KP_O7,
+ GPIO158_KP_O6,
+ GPIO159_KP_O5,
+ GPIO160_KP_O4,
+ GPIO161_KP_I3,
+ GPIO162_KP_I2,
+ GPIO163_KP_I1,
+ GPIO164_KP_I0,
+ GPIO165_KP_O3,
+ GPIO166_KP_O2,
+ GPIO167_KP_O1,
+ GPIO168_KP_O0,
+
+ /* UART */
+ GPIO0_U0_CTSn | PIN_INPUT_PULLUP,
+ GPIO1_U0_RTSn | PIN_OUTPUT_HIGH,
+ GPIO2_U0_RXD | PIN_INPUT_PULLUP,
+ GPIO3_U0_TXD | PIN_OUTPUT_HIGH,
+
+ GPIO29_U2_RXD | PIN_INPUT_PULLUP,
+ GPIO30_U2_TXD | PIN_OUTPUT_HIGH,
+ GPIO31_U2_CTSn | PIN_INPUT_PULLUP,
+ GPIO32_U2_RTSn | PIN_OUTPUT_HIGH,
+
+ /* Display & HDMI HW sync */
+ GPIO68_LCD_VSI0 | PIN_INPUT_PULLUP,
+ GPIO69_LCD_VSI1 | PIN_INPUT_PULLUP,
+};
+
+static pin_cfg_t mop500_pins_default[] = {
+ /* SSP0 */
+ GPIO143_SSP0_CLK,
+ GPIO144_SSP0_FRM,
+ GPIO145_SSP0_RXD | PIN_PULL_DOWN,
+ GPIO146_SSP0_TXD,
+
+
+ GPIO217_GPIO | PIN_INPUT_PULLUP, /* TC35892 IRQ */
+
+ /* SDI0 (MicroSD card) */
+ GPIO21_MC0_DAT31DIR | PIN_OUTPUT_HIGH,
+
+ /* UART */
+ GPIO4_U1_RXD | PIN_INPUT_PULLUP,
+ GPIO5_U1_TXD | PIN_OUTPUT_HIGH,
+ GPIO6_U1_CTSn | PIN_INPUT_PULLUP,
+ GPIO7_U1_RTSn | PIN_OUTPUT_HIGH,
+};
+
+static pin_cfg_t mop500_pins_hrefv60[] = {
+ /* WLAN */
+ GPIO4_GPIO | PIN_INPUT_PULLUP,/* WLAN_IRQ */
+ GPIO85_GPIO | PIN_OUTPUT_LOW,/* WLAN_ENA */
+
+ /* XENON Flashgun INTERFACE */
+ GPIO6_IP_GPIO0 | PIN_INPUT_PULLUP,/* XENON_FLASH_ID */
+ GPIO7_IP_GPIO1 | PIN_INPUT_PULLUP,/* XENON_READY */
+ GPIO170_GPIO | PIN_OUTPUT_LOW, /* XENON_CHARGE */
+
+ /* Assistant LED INTERFACE */
+ GPIO21_GPIO | PIN_OUTPUT_LOW, /* XENON_EN1 */
+ GPIO64_IP_GPIO4 | PIN_OUTPUT_LOW, /* XENON_EN2 */
+
+ /* Magnetometer */
+ GPIO31_GPIO | PIN_INPUT_PULLUP, /* magnetometer_INT */
+ GPIO32_GPIO | PIN_INPUT_PULLDOWN, /* Magnetometer DRDY */
+
+ /* Display Interface */
+ GPIO65_GPIO | PIN_OUTPUT_LOW, /* DISP1 RST */
+ GPIO66_GPIO | PIN_OUTPUT_LOW, /* DISP2 RST */
+
+ /* Touch screen INTERFACE */
+ GPIO143_GPIO | PIN_OUTPUT_LOW,/*TOUCH_RST1 */
+
+ /* Touch screen INTERFACE 2 */
+ GPIO67_GPIO | PIN_INPUT_PULLUP, /* TOUCH_INT2 */
+ GPIO146_GPIO | PIN_OUTPUT_LOW,/*TOUCH_RST2 */
+
+ /* ETM_PTM_TRACE INTERFACE */
+ GPIO70_GPIO | PIN_OUTPUT_LOW,/* ETM_PTM_DATA23 */
+ GPIO71_GPIO | PIN_OUTPUT_LOW,/* ETM_PTM_DATA22 */
+ GPIO72_GPIO | PIN_OUTPUT_LOW,/* ETM_PTM_DATA21 */
+ GPIO73_GPIO | PIN_OUTPUT_LOW,/* ETM_PTM_DATA20 */
+ GPIO74_GPIO | PIN_OUTPUT_LOW,/* ETM_PTM_DATA19 */
+
+ /* NAHJ INTERFACE */
+ GPIO76_GPIO | PIN_OUTPUT_LOW,/* NAHJ_CTRL */
+ GPIO216_GPIO | PIN_OUTPUT_HIGH,/* NAHJ_CTRL_INV */
+
+ /* NFC INTERFACE */
+ GPIO77_GPIO | PIN_OUTPUT_LOW, /* NFC_ENA */
+ GPIO144_GPIO | PIN_INPUT_PULLDOWN, /* NFC_IRQ */
+ GPIO142_GPIO | PIN_OUTPUT_LOW, /* NFC_RESET */
+
+ /* Keyboard MATRIX INTERFACE */
+ GPIO90_MC5_CMD | PIN_OUTPUT_LOW, /* KP_O_1 */
+ GPIO87_MC5_DAT1 | PIN_OUTPUT_LOW, /* KP_O_2 */
+ GPIO86_MC5_DAT0 | PIN_OUTPUT_LOW, /* KP_O_3 */
+ GPIO96_KP_O6 | PIN_OUTPUT_LOW, /* KP_O_6 */
+ GPIO94_KP_O7 | PIN_OUTPUT_LOW, /* KP_O_7 */
+ GPIO93_MC5_DAT4 | PIN_INPUT_PULLUP, /* KP_I_0 */
+ GPIO89_MC5_DAT3 | PIN_INPUT_PULLUP, /* KP_I_2 */
+ GPIO88_MC5_DAT2 | PIN_INPUT_PULLUP, /* KP_I_3 */
+ GPIO91_GPIO | PIN_INPUT_PULLUP, /* FORCE_SENSING_INT */
+ GPIO92_GPIO | PIN_OUTPUT_LOW, /* FORCE_SENSING_RST */
+ GPIO97_GPIO | PIN_OUTPUT_LOW, /* FORCE_SENSING_WU */
+
+ /* DiPro Sensor Interface */
+ GPIO139_GPIO | PIN_INPUT_PULLUP, /* DIPRO_INT */
+
+ /* HAL SWITCH INTERFACE */
+ GPIO145_GPIO | PIN_INPUT_PULLDOWN,/* HAL_SW */
+
+ /* Audio Amplifier Interface */
+ GPIO149_GPIO | PIN_OUTPUT_LOW, /* VAUDIO_HF_EN */
+
+ /* GBF INTERFACE */
+ GPIO171_GPIO | PIN_OUTPUT_LOW, /* GBF_ENA_RESET */
+
+ /* MSP : HDTV INTERFACE */
+ GPIO192_GPIO | PIN_INPUT_PULLDOWN,
+
+ /* ACCELEROMETER_INTERFACE */
+ GPIO82_GPIO | PIN_INPUT_PULLUP, /* ACC_INT1 */
+ GPIO83_GPIO | PIN_INPUT_PULLUP, /* ACC_INT2 */
+
+ /* Proximity Sensor */
+ GPIO217_GPIO | PIN_INPUT_PULLUP,
+
+
+};
+
+void __init mop500_pins_init(void)
+{
+ nmk_config_pins(mop500_pins_common,
+ ARRAY_SIZE(mop500_pins_common));
+ if (machine_is_hrefv60())
+ nmk_config_pins(mop500_pins_hrefv60,
+ ARRAY_SIZE(mop500_pins_hrefv60));
+ else
+ nmk_config_pins(mop500_pins_default,
+ ARRAY_SIZE(mop500_pins_default));
+}
diff --git a/arch/arm/mach-ux500/board-mop500-regulators.c b/arch/arm/mach-ux500/board-mop500-regulators.c
index 533967c2d095..875c91b2f8a4 100644
--- a/arch/arm/mach-ux500/board-mop500-regulators.c
+++ b/arch/arm/mach-ux500/board-mop500-regulators.c
@@ -11,6 +11,56 @@
#include <linux/kernel.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/ab8500.h>
+#include "board-mop500-regulators.h"
+
+static struct regulator_consumer_supply ab8500_vaux1_consumers[] = {
+ /* External displays, connector on board 2v5 power supply */
+ REGULATOR_SUPPLY("vaux12v5", "mcde.0"),
+ /* SFH7741 proximity sensor */
+ REGULATOR_SUPPLY("vcc", "gpio-keys.0"),
+ /* BH1780GLS ambient light sensor */
+ REGULATOR_SUPPLY("vcc", "2-0029"),
+ /* lsm303dlh accelerometer */
+ REGULATOR_SUPPLY("vdd", "3-0018"),
+ /* lsm303dlh magnetometer */
+ REGULATOR_SUPPLY("vdd", "3-001e"),
+ /* Rohm BU21013 Touchscreen devices */
+ REGULATOR_SUPPLY("avdd", "3-005c"),
+ REGULATOR_SUPPLY("avdd", "3-005d"),
+ /* Synaptics RMI4 Touchscreen device */
+ REGULATOR_SUPPLY("vdd", "3-004b"),
+};
+
+static struct regulator_consumer_supply ab8500_vaux2_consumers[] = {
+ /* On-board eMMC power */
+ REGULATOR_SUPPLY("vmmc", "sdi4"),
+ /* AB8500 audio codec */
+ REGULATOR_SUPPLY("vcc-N2158", "ab8500-codec.0"),
+};
+
+static struct regulator_consumer_supply ab8500_vaux3_consumers[] = {
+ /* External MMC slot power */
+ REGULATOR_SUPPLY("vmmc", "sdi0"),
+};
+
+static struct regulator_consumer_supply ab8500_vtvout_consumers[] = {
+ /* TV-out DENC supply */
+ REGULATOR_SUPPLY("vtvout", "ab8500-denc.0"),
+ /* Internal general-purpose ADC */
+ REGULATOR_SUPPLY("vddadc", "ab8500-gpadc.0"),
+};
+
+static struct regulator_consumer_supply ab8500_vintcore_consumers[] = {
+ /* SoC core supply, no device */
+ REGULATOR_SUPPLY("v-intcore", NULL),
+ /* USB Transciever */
+ REGULATOR_SUPPLY("vddulpivio18", "ab8500-usb.0"),
+};
+
+static struct regulator_consumer_supply ab8500_vana_consumers[] = {
+ /* External displays, connector on board, 1v8 power supply */
+ REGULATOR_SUPPLY("vsmps2", "mcde.0"),
+};
/* AB8500 regulators */
struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
@@ -23,6 +73,8 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
REGULATOR_CHANGE_STATUS,
},
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux1_consumers),
+ .consumer_supplies = ab8500_vaux1_consumers,
},
/* supplies to the on-board eMMC */
[AB8500_LDO_AUX2] = {
@@ -33,6 +85,8 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
REGULATOR_CHANGE_STATUS,
},
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux2_consumers),
+ .consumer_supplies = ab8500_vaux2_consumers,
},
/* supply for VAUX3, supplies to SDcard slots */
[AB8500_LDO_AUX3] = {
@@ -43,6 +97,8 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
REGULATOR_CHANGE_STATUS,
},
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux3_consumers),
+ .consumer_supplies = ab8500_vaux3_consumers,
},
/* supply for tvout, gpadc, TVOUT LDO */
[AB8500_LDO_TVOUT] = {
@@ -50,6 +106,8 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
.name = "V-TVOUT",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vtvout_consumers),
+ .consumer_supplies = ab8500_vtvout_consumers,
},
/* supply for ab8500-vaudio, VAUDIO LDO */
[AB8500_LDO_AUDIO] = {
@@ -85,6 +143,8 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
.name = "V-INTCORE",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vintcore_consumers),
+ .consumer_supplies = ab8500_vintcore_consumers,
},
/* supply for U8500 CSI/DSI, VANA LDO */
[AB8500_LDO_ANA] = {
@@ -92,5 +152,7 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
.name = "V-CSI/DSI",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vana_consumers),
+ .consumer_supplies = ab8500_vana_consumers,
},
};
diff --git a/arch/arm/mach-ux500/board-mop500-sdi.c b/arch/arm/mach-ux500/board-mop500-sdi.c
index 4b996676594e..bf0b02414e5b 100644
--- a/arch/arm/mach-ux500/board-mop500-sdi.c
+++ b/arch/arm/mach-ux500/board-mop500-sdi.c
@@ -12,56 +12,14 @@
#include <linux/mmc/host.h>
#include <linux/platform_device.h>
-#include <plat/pincfg.h>
+#include <asm/mach-types.h>
+#include <plat/ste_dma40.h>
#include <mach/devices.h>
#include <mach/hardware.h>
#include "devices-db8500.h"
-#include "pins-db8500.h"
#include "board-mop500.h"
-
-static pin_cfg_t mop500_sdi_pins[] = {
- /* SDI0 (MicroSD slot) */
- GPIO18_MC0_CMDDIR,
- GPIO19_MC0_DAT0DIR,
- GPIO20_MC0_DAT2DIR,
- GPIO21_MC0_DAT31DIR,
- GPIO22_MC0_FBCLK,
- GPIO23_MC0_CLK,
- GPIO24_MC0_CMD,
- GPIO25_MC0_DAT0,
- GPIO26_MC0_DAT1,
- GPIO27_MC0_DAT2,
- GPIO28_MC0_DAT3,
-
- /* SDI4 (on-board eMMC) */
- GPIO197_MC4_DAT3,
- GPIO198_MC4_DAT2,
- GPIO199_MC4_DAT1,
- GPIO200_MC4_DAT0,
- GPIO201_MC4_CMD,
- GPIO202_MC4_FBCLK,
- GPIO203_MC4_CLK,
- GPIO204_MC4_DAT7,
- GPIO205_MC4_DAT6,
- GPIO206_MC4_DAT5,
- GPIO207_MC4_DAT4,
-};
-
-static pin_cfg_t mop500_sdi2_pins[] = {
- /* SDI2 (POP eMMC) */
- GPIO128_MC2_CLK,
- GPIO129_MC2_CMD,
- GPIO130_MC2_FBCLK,
- GPIO131_MC2_DAT0,
- GPIO132_MC2_DAT1,
- GPIO133_MC2_DAT2,
- GPIO134_MC2_DAT3,
- GPIO135_MC2_DAT4,
- GPIO136_MC2_DAT5,
- GPIO137_MC2_DAT6,
- GPIO138_MC2_DAT7,
-};
+#include "ste-dma40-db8500.h"
/*
* SDI 0 (MicroSD slot)
@@ -86,48 +44,134 @@ static u32 mop500_sdi0_vdd_handler(struct device *dev, unsigned int vdd,
MCI_DATA2DIREN | MCI_DATA31DIREN;
}
+#ifdef CONFIG_STE_DMA40
+struct stedma40_chan_cfg mop500_sdi0_dma_cfg_rx = {
+ .mode = STEDMA40_MODE_LOGICAL,
+ .dir = STEDMA40_PERIPH_TO_MEM,
+ .src_dev_type = DB8500_DMA_DEV29_SD_MM0_RX,
+ .dst_dev_type = STEDMA40_DEV_DST_MEMORY,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+};
+
+static struct stedma40_chan_cfg mop500_sdi0_dma_cfg_tx = {
+ .mode = STEDMA40_MODE_LOGICAL,
+ .dir = STEDMA40_MEM_TO_PERIPH,
+ .src_dev_type = STEDMA40_DEV_SRC_MEMORY,
+ .dst_dev_type = DB8500_DMA_DEV29_SD_MM0_TX,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+};
+#endif
+
static struct mmci_platform_data mop500_sdi0_data = {
.vdd_handler = mop500_sdi0_vdd_handler,
.ocr_mask = MMC_VDD_29_30,
.f_max = 100000000,
.capabilities = MMC_CAP_4_BIT_DATA,
- .gpio_cd = GPIO_SDMMC_CD,
.gpio_wp = -1,
+#ifdef CONFIG_STE_DMA40
+ .dma_filter = stedma40_filter,
+ .dma_rx_param = &mop500_sdi0_dma_cfg_rx,
+ .dma_tx_param = &mop500_sdi0_dma_cfg_tx,
+#endif
};
-void mop500_sdi_tc35892_init(void)
+/* GPIO pins used by the sdi0 level shifter */
+static int sdi0_en = -1;
+static int sdi0_vsel = -1;
+
+static void sdi0_configure(void)
{
int ret;
- ret = gpio_request(GPIO_SDMMC_EN, "SDMMC_EN");
+ ret = gpio_request(sdi0_en, "level shifter enable");
if (!ret)
- ret = gpio_request(GPIO_SDMMC_1V8_3V_SEL,
- "GPIO_SDMMC_1V8_3V_SEL");
- if (ret)
+ ret = gpio_request(sdi0_vsel,
+ "level shifter 1v8-3v select");
+
+ if (ret) {
+ pr_warning("unable to config sdi0 gpios for level shifter.\n");
return;
+ }
- gpio_direction_output(GPIO_SDMMC_1V8_3V_SEL, 1);
- gpio_direction_output(GPIO_SDMMC_EN, 0);
+ /* Select the default 2.9V and enable level shifter */
+ gpio_direction_output(sdi0_vsel, 0);
+ gpio_direction_output(sdi0_en, 1);
+ /* Add the device */
db8500_add_sdi0(&mop500_sdi0_data);
}
+void mop500_sdi_tc35892_init(void)
+{
+ mop500_sdi0_data.gpio_cd = GPIO_SDMMC_CD;
+ sdi0_en = GPIO_SDMMC_EN;
+ sdi0_vsel = GPIO_SDMMC_1V8_3V_SEL;
+ sdi0_configure();
+}
+
/*
* SDI 2 (POP eMMC, not on DB8500ed)
*/
+#ifdef CONFIG_STE_DMA40
+struct stedma40_chan_cfg mop500_sdi2_dma_cfg_rx = {
+ .mode = STEDMA40_MODE_LOGICAL,
+ .dir = STEDMA40_PERIPH_TO_MEM,
+ .src_dev_type = DB8500_DMA_DEV28_SD_MM2_RX,
+ .dst_dev_type = STEDMA40_DEV_DST_MEMORY,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+};
+
+static struct stedma40_chan_cfg mop500_sdi2_dma_cfg_tx = {
+ .mode = STEDMA40_MODE_LOGICAL,
+ .dir = STEDMA40_MEM_TO_PERIPH,
+ .src_dev_type = STEDMA40_DEV_SRC_MEMORY,
+ .dst_dev_type = DB8500_DMA_DEV28_SD_MM2_TX,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+};
+#endif
+
static struct mmci_platform_data mop500_sdi2_data = {
.ocr_mask = MMC_VDD_165_195,
.f_max = 100000000,
.capabilities = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
.gpio_cd = -1,
.gpio_wp = -1,
+#ifdef CONFIG_STE_DMA40
+ .dma_filter = stedma40_filter,
+ .dma_rx_param = &mop500_sdi2_dma_cfg_rx,
+ .dma_tx_param = &mop500_sdi2_dma_cfg_tx,
+#endif
};
/*
* SDI 4 (on-board eMMC)
*/
+#ifdef CONFIG_STE_DMA40
+struct stedma40_chan_cfg mop500_sdi4_dma_cfg_rx = {
+ .mode = STEDMA40_MODE_LOGICAL,
+ .dir = STEDMA40_PERIPH_TO_MEM,
+ .src_dev_type = DB8500_DMA_DEV42_SD_MM4_RX,
+ .dst_dev_type = STEDMA40_DEV_DST_MEMORY,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+};
+
+static struct stedma40_chan_cfg mop500_sdi4_dma_cfg_tx = {
+ .mode = STEDMA40_MODE_LOGICAL,
+ .dir = STEDMA40_MEM_TO_PERIPH,
+ .src_dev_type = STEDMA40_DEV_SRC_MEMORY,
+ .dst_dev_type = DB8500_DMA_DEV42_SD_MM4_TX,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+};
+#endif
+
static struct mmci_platform_data mop500_sdi4_data = {
.ocr_mask = MMC_VDD_29_30,
.f_max = 100000000,
@@ -135,26 +179,32 @@ static struct mmci_platform_data mop500_sdi4_data = {
MMC_CAP_MMC_HIGHSPEED,
.gpio_cd = -1,
.gpio_wp = -1,
+#ifdef CONFIG_STE_DMA40
+ .dma_filter = stedma40_filter,
+ .dma_rx_param = &mop500_sdi4_dma_cfg_rx,
+ .dma_tx_param = &mop500_sdi4_dma_cfg_tx,
+#endif
};
void __init mop500_sdi_init(void)
{
- nmk_config_pins(mop500_sdi_pins, ARRAY_SIZE(mop500_sdi_pins));
+ /* PoP:ed eMMC on top of DB8500 v1.0 has problems with high speed */
+ if (!cpu_is_u8500v10())
+ mop500_sdi2_data.capabilities |= MMC_CAP_MMC_HIGHSPEED;
+ db8500_add_sdi2(&mop500_sdi2_data);
+
+ /* On-board eMMC */
+ db8500_add_sdi4(&mop500_sdi4_data);
+ if (machine_is_hrefv60()) {
+ mop500_sdi0_data.gpio_cd = HREFV60_SDMMC_CD_GPIO;
+ sdi0_en = HREFV60_SDMMC_EN_GPIO;
+ sdi0_vsel = HREFV60_SDMMC_1V8_3V_GPIO;
+ sdi0_configure();
+ }
/*
- * sdi0 will finally be added when the TC35892 initializes and calls
+ * On boards with the TC35892 GPIO expander, sdi0 will finally
+ * be added when the TC35892 initializes and calls
* mop500_sdi_tc35892_init() above.
*/
-
- /* PoP:ed eMMC */
- if (!cpu_is_u8500ed()) {
- nmk_config_pins(mop500_sdi2_pins, ARRAY_SIZE(mop500_sdi2_pins));
- /* POP eMMC on v1.0 has problems with high speed */
- if (!cpu_is_u8500v10())
- mop500_sdi2_data.capabilities |= MMC_CAP_MMC_HIGHSPEED;
- db8500_add_sdi2(&mop500_sdi2_data);
- }
-
- /* On-board eMMC */
- db8500_add_sdi4(&mop500_sdi4_data);
}
diff --git a/arch/arm/mach-ux500/board-mop500-stuib.c b/arch/arm/mach-ux500/board-mop500-stuib.c
new file mode 100644
index 000000000000..8c979770d872
--- /dev/null
+++ b/arch/arm/mach-ux500/board-mop500-stuib.c
@@ -0,0 +1,205 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mfd/stmpe.h>
+#include <linux/input/bu21013.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/input/matrix_keypad.h>
+#include <asm/mach-types.h>
+
+#include "board-mop500.h"
+
+/* STMPE/SKE keypad use this key layout */
+static const unsigned int mop500_keymap[] = {
+ KEY(2, 5, KEY_END),
+ KEY(4, 1, KEY_POWER),
+ KEY(3, 5, KEY_VOLUMEDOWN),
+ KEY(1, 3, KEY_3),
+ KEY(5, 2, KEY_RIGHT),
+ KEY(5, 0, KEY_9),
+
+ KEY(0, 5, KEY_MENU),
+ KEY(7, 6, KEY_ENTER),
+ KEY(4, 5, KEY_0),
+ KEY(6, 7, KEY_2),
+ KEY(3, 4, KEY_UP),
+ KEY(3, 3, KEY_DOWN),
+
+ KEY(6, 4, KEY_SEND),
+ KEY(6, 2, KEY_BACK),
+ KEY(4, 2, KEY_VOLUMEUP),
+ KEY(5, 5, KEY_1),
+ KEY(4, 3, KEY_LEFT),
+ KEY(3, 2, KEY_7),
+};
+
+static const struct matrix_keymap_data mop500_keymap_data = {
+ .keymap = mop500_keymap,
+ .keymap_size = ARRAY_SIZE(mop500_keymap),
+};
+/*
+ * STMPE1601
+ */
+static struct stmpe_keypad_platform_data stmpe1601_keypad_data = {
+ .debounce_ms = 64,
+ .scan_count = 8,
+ .no_autorepeat = true,
+ .keymap_data = &mop500_keymap_data,
+};
+
+static struct stmpe_platform_data stmpe1601_data = {
+ .id = 1,
+ .blocks = STMPE_BLOCK_KEYPAD,
+ .irq_trigger = IRQF_TRIGGER_FALLING,
+ .irq_base = MOP500_STMPE1601_IRQ(0),
+ .keypad = &stmpe1601_keypad_data,
+ .autosleep = true,
+ .autosleep_timeout = 1024,
+};
+
+static struct i2c_board_info __initdata mop500_i2c0_devices_stuib[] = {
+ {
+ I2C_BOARD_INFO("stmpe1601", 0x40),
+ .irq = NOMADIK_GPIO_TO_IRQ(218),
+ .platform_data = &stmpe1601_data,
+ .flags = I2C_CLIENT_WAKE,
+ },
+};
+
+/*
+ * BU21013 ROHM touchscreen interface on the STUIBs
+ */
+
+/* tracks number of bu21013 devices being enabled */
+static int bu21013_devices;
+
+#define TOUCH_GPIO_PIN 84
+
+#define TOUCH_XMAX 384
+#define TOUCH_YMAX 704
+
+#define PRCMU_CLOCK_OCR 0x1CC
+#define TSC_EXT_CLOCK_9_6MHZ 0x840000
+
+/**
+ * bu21013_gpio_board_init : configures the touch panel.
+ * @reset_pin: reset pin number
+ * This function can be used to configures
+ * the voltage and reset the touch panel controller.
+ */
+static int bu21013_gpio_board_init(int reset_pin)
+{
+ int retval = 0;
+
+ bu21013_devices++;
+ if (bu21013_devices == 1) {
+ retval = gpio_request(reset_pin, "touchp_reset");
+ if (retval) {
+ printk(KERN_ERR "Unable to request gpio reset_pin");
+ return retval;
+ }
+ retval = gpio_direction_output(reset_pin, 1);
+ if (retval < 0) {
+ printk(KERN_ERR "%s: gpio direction failed\n",
+ __func__);
+ return retval;
+ }
+ }
+
+ return retval;
+}
+
+/**
+ * bu21013_gpio_board_exit : deconfigures the touch panel controller
+ * @reset_pin: reset pin number
+ * This function can be used to deconfigures the chip selection
+ * for touch panel controller.
+ */
+static int bu21013_gpio_board_exit(int reset_pin)
+{
+ int retval = 0;
+
+ if (bu21013_devices == 1) {
+ retval = gpio_direction_output(reset_pin, 0);
+ if (retval < 0) {
+ printk(KERN_ERR "%s: gpio direction failed\n",
+ __func__);
+ return retval;
+ }
+ gpio_set_value(reset_pin, 0);
+ }
+ bu21013_devices--;
+
+ return retval;
+}
+
+/**
+ * bu21013_read_pin_val : get the interrupt pin value
+ * This function can be used to get the interrupt pin value for touch panel
+ * controller.
+ */
+static int bu21013_read_pin_val(void)
+{
+ return gpio_get_value(TOUCH_GPIO_PIN);
+}
+
+static struct bu21013_platform_device tsc_plat_device = {
+ .cs_en = bu21013_gpio_board_init,
+ .cs_dis = bu21013_gpio_board_exit,
+ .irq_read_val = bu21013_read_pin_val,
+ .irq = NOMADIK_GPIO_TO_IRQ(TOUCH_GPIO_PIN),
+ .touch_x_max = TOUCH_XMAX,
+ .touch_y_max = TOUCH_YMAX,
+ .ext_clk = false,
+ .x_flip = false,
+ .y_flip = true,
+};
+
+static struct bu21013_platform_device tsc_plat2_device = {
+ .cs_en = bu21013_gpio_board_init,
+ .cs_dis = bu21013_gpio_board_exit,
+ .irq_read_val = bu21013_read_pin_val,
+ .irq = NOMADIK_GPIO_TO_IRQ(TOUCH_GPIO_PIN),
+ .touch_x_max = TOUCH_XMAX,
+ .touch_y_max = TOUCH_YMAX,
+ .ext_clk = false,
+ .x_flip = false,
+ .y_flip = true,
+};
+
+static struct i2c_board_info __initdata u8500_i2c3_devices_stuib[] = {
+ {
+ I2C_BOARD_INFO("bu21013_tp", 0x5C),
+ .platform_data = &tsc_plat_device,
+ },
+ {
+ I2C_BOARD_INFO("bu21013_tp", 0x5D),
+ .platform_data = &tsc_plat2_device,
+ },
+
+};
+
+void __init mop500_stuib_init(void)
+{
+ if (machine_is_hrefv60()) {
+ tsc_plat_device.cs_pin = HREFV60_TOUCH_RST_GPIO;
+ tsc_plat2_device.cs_pin = HREFV60_TOUCH_RST_GPIO;
+ } else {
+ tsc_plat_device.cs_pin = GPIO_BU21013_CS;
+ tsc_plat2_device.cs_pin = GPIO_BU21013_CS;
+
+ }
+
+ mop500_uib_i2c_add(0, mop500_i2c0_devices_stuib,
+ ARRAY_SIZE(mop500_i2c0_devices_stuib));
+
+ mop500_uib_i2c_add(3, u8500_i2c3_devices_stuib,
+ ARRAY_SIZE(u8500_i2c3_devices_stuib));
+}
diff --git a/arch/arm/mach-ux500/board-mop500-u8500uib.c b/arch/arm/mach-ux500/board-mop500-u8500uib.c
new file mode 100644
index 000000000000..d8a8734a0eba
--- /dev/null
+++ b/arch/arm/mach-ux500/board-mop500-u8500uib.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Board data for the U8500 UIB, also known as the New UIB
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/tc3589x.h>
+#include <linux/input/matrix_keypad.h>
+#include <../drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h>
+
+#include <mach/gpio.h>
+#include <mach/irqs.h>
+
+#include "board-mop500.h"
+
+/*
+ * Synaptics RMI4 touchscreen interface on the U8500 UIB
+ */
+
+/*
+ * Descriptor structure.
+ * Describes the number of i2c devices on the bus that speak RMI.
+ */
+static struct synaptics_rmi4_platform_data rmi4_i2c_dev_platformdata = {
+ .irq_number = NOMADIK_GPIO_TO_IRQ(84),
+ .irq_type = (IRQF_TRIGGER_FALLING | IRQF_SHARED),
+ .x_flip = false,
+ .y_flip = true,
+ .regulator_en = false,
+};
+
+static struct i2c_board_info __initdata mop500_i2c3_devices_u8500[] = {
+ {
+ I2C_BOARD_INFO("synaptics_rmi4_i2c", 0x4B),
+ .platform_data = &rmi4_i2c_dev_platformdata,
+ },
+};
+
+/*
+ * TC35893
+ */
+static const unsigned int u8500_keymap[] = {
+ KEY(3, 1, KEY_END),
+ KEY(4, 1, KEY_POWER),
+ KEY(6, 4, KEY_VOLUMEDOWN),
+ KEY(4, 2, KEY_EMAIL),
+ KEY(3, 3, KEY_RIGHT),
+ KEY(2, 5, KEY_BACKSPACE),
+
+ KEY(6, 7, KEY_MENU),
+ KEY(5, 0, KEY_ENTER),
+ KEY(4, 3, KEY_0),
+ KEY(3, 4, KEY_DOT),
+ KEY(5, 2, KEY_UP),
+ KEY(3, 5, KEY_DOWN),
+
+ KEY(4, 5, KEY_SEND),
+ KEY(0, 5, KEY_BACK),
+ KEY(6, 2, KEY_VOLUMEUP),
+ KEY(1, 3, KEY_SPACE),
+ KEY(7, 6, KEY_LEFT),
+ KEY(5, 5, KEY_SEARCH),
+};
+
+static struct matrix_keymap_data u8500_keymap_data = {
+ .keymap = u8500_keymap,
+ .keymap_size = ARRAY_SIZE(u8500_keymap),
+};
+
+static struct tc3589x_keypad_platform_data tc35893_data = {
+ .krow = TC_KPD_ROWS,
+ .kcol = TC_KPD_COLUMNS,
+ .debounce_period = TC_KPD_DEBOUNCE_PERIOD,
+ .settle_time = TC_KPD_SETTLE_TIME,
+ .irqtype = IRQF_TRIGGER_FALLING,
+ .enable_wakeup = true,
+ .keymap_data = &u8500_keymap_data,
+ .no_autorepeat = true,
+};
+
+static struct tc3589x_platform_data tc3589x_keypad_data = {
+ .block = TC3589x_BLOCK_KEYPAD,
+ .keypad = &tc35893_data,
+ .irq_base = MOP500_EGPIO_IRQ_BASE,
+};
+
+static struct i2c_board_info __initdata mop500_i2c0_devices_u8500[] = {
+ {
+ I2C_BOARD_INFO("tc3589x", 0x44),
+ .platform_data = &tc3589x_keypad_data,
+ .irq = NOMADIK_GPIO_TO_IRQ(218),
+ .flags = I2C_CLIENT_WAKE,
+ },
+};
+
+
+void __init mop500_u8500uib_init(void)
+{
+ mop500_uib_i2c_add(3, mop500_i2c3_devices_u8500,
+ ARRAY_SIZE(mop500_i2c3_devices_u8500));
+
+ mop500_uib_i2c_add(0, mop500_i2c0_devices_u8500,
+ ARRAY_SIZE(mop500_i2c0_devices_u8500));
+
+}
diff --git a/arch/arm/mach-ux500/board-mop500-uib.c b/arch/arm/mach-ux500/board-mop500-uib.c
new file mode 100644
index 000000000000..69cce41f602a
--- /dev/null
+++ b/arch/arm/mach-ux500/board-mop500-uib.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#define pr_fmt(fmt) "mop500-uib: " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+
+#include <mach/hardware.h>
+#include "board-mop500.h"
+
+enum mop500_uib {
+ STUIB,
+ U8500UIB,
+};
+
+struct uib {
+ const char *name;
+ const char *option;
+ void (*init)(void);
+};
+
+static struct __initdata uib mop500_uibs[] = {
+ [STUIB] = {
+ .name = "ST-UIB",
+ .option = "stuib",
+ .init = mop500_stuib_init,
+ },
+ [U8500UIB] = {
+ .name = "U8500-UIB",
+ .option = "u8500uib",
+ .init = mop500_u8500uib_init,
+ },
+};
+
+static struct uib *mop500_uib;
+
+static int __init mop500_uib_setup(char *str)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mop500_uibs); i++) {
+ struct uib *uib = &mop500_uibs[i];
+
+ if (!strcmp(str, uib->option)) {
+ mop500_uib = uib;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(mop500_uibs))
+ pr_err("invalid uib= option (%s)\n", str);
+
+ return 1;
+}
+__setup("uib=", mop500_uib_setup);
+
+/*
+ * The UIBs are detected after the I2C host controllers are registered, so
+ * i2c_register_board_info() can't be used.
+ */
+void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info,
+ unsigned n)
+{
+ struct i2c_adapter *adap;
+ struct i2c_client *client;
+ int i;
+
+ adap = i2c_get_adapter(busnum);
+ if (!adap) {
+ pr_err("failed to get adapter i2c%d\n", busnum);
+ return;
+ }
+
+ for (i = 0; i < n; i++) {
+ client = i2c_new_device(adap, &info[i]);
+ if (!client)
+ pr_err("failed to register %s to i2c%d\n",
+ info[i].type, busnum);
+ }
+
+ i2c_put_adapter(adap);
+}
+
+static void __init __mop500_uib_init(struct uib *uib, const char *why)
+{
+ pr_info("%s (%s)\n", uib->name, why);
+ uib->init();
+}
+
+/*
+ * Detect the UIB attached based on the presence or absence of i2c devices.
+ */
+static int __init mop500_uib_init(void)
+{
+ struct uib *uib = mop500_uib;
+ struct i2c_adapter *i2c0;
+ int ret;
+
+ if (!cpu_is_u8500())
+ return -ENODEV;
+
+ if (uib) {
+ __mop500_uib_init(uib, "from uib= boot argument");
+ return 0;
+ }
+
+ i2c0 = i2c_get_adapter(0);
+ if (!i2c0) {
+ __mop500_uib_init(&mop500_uibs[STUIB],
+ "fallback, could not get i2c0");
+ return -ENODEV;
+ }
+
+ /* U8500-UIB has the TC35893 at 0x44 on I2C0, the ST-UIB doesn't. */
+ ret = i2c_smbus_xfer(i2c0, 0x44, 0, I2C_SMBUS_WRITE, 0,
+ I2C_SMBUS_QUICK, NULL);
+ i2c_put_adapter(i2c0);
+
+ if (ret == 0)
+ uib = &mop500_uibs[U8500UIB];
+ else
+ uib = &mop500_uibs[STUIB];
+
+ __mop500_uib_init(uib, "detected");
+
+ return 0;
+}
+
+module_init(mop500_uib_init);
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index a393f57ed2a8..8790d984cac8 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -17,68 +17,30 @@
#include <linux/gpio.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl022.h>
+#include <linux/amba/serial.h>
#include <linux/spi/spi.h>
#include <linux/mfd/ab8500.h>
#include <linux/mfd/tc3589x.h>
+#include <linux/leds-lp5521.h>
+#include <linux/input.h>
+#include <linux/gpio_keys.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
-#include <plat/pincfg.h>
#include <plat/i2c.h>
+#include <plat/ste_dma40.h>
#include <mach/hardware.h>
#include <mach/setup.h>
#include <mach/devices.h>
#include <mach/irqs.h>
+#include "ste-dma40-db8500.h"
#include "devices-db8500.h"
-#include "pins-db8500.h"
#include "board-mop500.h"
#include "board-mop500-regulators.h"
-static pin_cfg_t mop500_pins[] = {
- /* SSP0 */
- GPIO143_SSP0_CLK,
- GPIO144_SSP0_FRM,
- GPIO145_SSP0_RXD,
- GPIO146_SSP0_TXD,
-
- /* I2C */
- GPIO147_I2C0_SCL,
- GPIO148_I2C0_SDA,
- GPIO16_I2C1_SCL,
- GPIO17_I2C1_SDA,
- GPIO10_I2C2_SDA,
- GPIO11_I2C2_SCL,
- GPIO229_I2C3_SDA,
- GPIO230_I2C3_SCL,
-
- /* SKE keypad */
- GPIO153_KP_I7,
- GPIO154_KP_I6,
- GPIO155_KP_I5,
- GPIO156_KP_I4,
- GPIO157_KP_O7,
- GPIO158_KP_O6,
- GPIO159_KP_O5,
- GPIO160_KP_O4,
- GPIO161_KP_I3,
- GPIO162_KP_I2,
- GPIO163_KP_I1,
- GPIO164_KP_I0,
- GPIO165_KP_O3,
- GPIO166_KP_O2,
- GPIO167_KP_O1,
- GPIO168_KP_O0,
-
- /* GPIO_EXP_INT */
- GPIO217_GPIO,
-
- /* STMPE1601 IRQ */
- GPIO218_GPIO | PIN_INPUT_PULLUP,
-};
-
static struct ab8500_platform_data ab8500_platdata = {
.irq_base = MOP500_AB8500_IRQ_BASE,
.regulator = ab8500_regulators,
@@ -103,16 +65,6 @@ struct platform_device ab8500_device = {
.resource = ab8500_resources,
};
-static struct pl022_ssp_controller ssp0_platform_data = {
- .bus_id = 0,
- /* pl022 not yet supports dma */
- .enable_dma = 0,
- /* on this platform, gpio 31,142,144,214 &
- * 224 are connected as chip selects
- */
- .num_chipselect = 5,
-};
-
/*
* TC35892
*/
@@ -133,14 +85,81 @@ static struct tc3589x_platform_data mop500_tc35892_data = {
.irq_base = MOP500_EGPIO_IRQ_BASE,
};
+static struct lp5521_led_config lp5521_pri_led[] = {
+ [0] = {
+ .chan_nr = 0,
+ .led_current = 0x2f,
+ .max_current = 0x5f,
+ },
+ [1] = {
+ .chan_nr = 1,
+ .led_current = 0x2f,
+ .max_current = 0x5f,
+ },
+ [2] = {
+ .chan_nr = 2,
+ .led_current = 0x2f,
+ .max_current = 0x5f,
+ },
+};
+
+static struct lp5521_platform_data __initdata lp5521_pri_data = {
+ .label = "lp5521_pri",
+ .led_config = &lp5521_pri_led[0],
+ .num_channels = 3,
+ .clock_mode = LP5521_CLOCK_EXT,
+};
+
+static struct lp5521_led_config lp5521_sec_led[] = {
+ [0] = {
+ .chan_nr = 0,
+ .led_current = 0x2f,
+ .max_current = 0x5f,
+ },
+ [1] = {
+ .chan_nr = 1,
+ .led_current = 0x2f,
+ .max_current = 0x5f,
+ },
+ [2] = {
+ .chan_nr = 2,
+ .led_current = 0x2f,
+ .max_current = 0x5f,
+ },
+};
+
+static struct lp5521_platform_data __initdata lp5521_sec_data = {
+ .label = "lp5521_sec",
+ .led_config = &lp5521_sec_led[0],
+ .num_channels = 3,
+ .clock_mode = LP5521_CLOCK_EXT,
+};
+
static struct i2c_board_info mop500_i2c0_devices[] = {
{
I2C_BOARD_INFO("tc3589x", 0x42),
- .irq = NOMADIK_GPIO_TO_IRQ(217),
+ .irq = NOMADIK_GPIO_TO_IRQ(217),
.platform_data = &mop500_tc35892_data,
},
};
+static struct i2c_board_info __initdata mop500_i2c2_devices[] = {
+ {
+ /* lp5521 LED driver, 1st device */
+ I2C_BOARD_INFO("lp5521", 0x33),
+ .platform_data = &lp5521_pri_data,
+ },
+ {
+ /* lp5521 LED driver, 2st device */
+ I2C_BOARD_INFO("lp5521", 0x34),
+ .platform_data = &lp5521_sec_data,
+ },
+ {
+ /* Light sensor Rohm BH1780GLI */
+ I2C_BOARD_INFO("bh1780", 0x29),
+ },
+};
+
#define U8500_I2C_CONTROLLER(id, _slsu, _tft, _rft, clk, _sm) \
static struct nmk_i2c_controller u8500_i2c##id##_data = { \
/* \
@@ -178,8 +197,93 @@ static void __init mop500_i2c_init(void)
db8500_add_i2c3(&u8500_i2c3_data);
}
+static struct gpio_keys_button mop500_gpio_keys[] = {
+ {
+ .desc = "SFH7741 Proximity Sensor",
+ .type = EV_SW,
+ .code = SW_FRONT_PROXIMITY,
+ .active_low = 0,
+ .can_disable = 1,
+ }
+};
+
+static struct regulator *prox_regulator;
+static int mop500_prox_activate(struct device *dev);
+static void mop500_prox_deactivate(struct device *dev);
+
+static struct gpio_keys_platform_data mop500_gpio_keys_data = {
+ .buttons = mop500_gpio_keys,
+ .nbuttons = ARRAY_SIZE(mop500_gpio_keys),
+ .enable = mop500_prox_activate,
+ .disable = mop500_prox_deactivate,
+};
+
+static struct platform_device mop500_gpio_keys_device = {
+ .name = "gpio-keys",
+ .id = 0,
+ .dev = {
+ .platform_data = &mop500_gpio_keys_data,
+ },
+};
+
+static int mop500_prox_activate(struct device *dev)
+{
+ prox_regulator = regulator_get(&mop500_gpio_keys_device.dev,
+ "vcc");
+ if (IS_ERR(prox_regulator)) {
+ dev_err(&mop500_gpio_keys_device.dev,
+ "no regulator\n");
+ return PTR_ERR(prox_regulator);
+ }
+ regulator_enable(prox_regulator);
+ return 0;
+}
+
+static void mop500_prox_deactivate(struct device *dev)
+{
+ regulator_disable(prox_regulator);
+ regulator_put(prox_regulator);
+}
+
/* add any platform devices here - TODO */
static struct platform_device *platform_devs[] __initdata = {
+ &mop500_gpio_keys_device,
+};
+
+#ifdef CONFIG_STE_DMA40
+static struct stedma40_chan_cfg ssp0_dma_cfg_rx = {
+ .mode = STEDMA40_MODE_LOGICAL,
+ .dir = STEDMA40_PERIPH_TO_MEM,
+ .src_dev_type = DB8500_DMA_DEV8_SSP0_RX,
+ .dst_dev_type = STEDMA40_DEV_DST_MEMORY,
+ .src_info.data_width = STEDMA40_BYTE_WIDTH,
+ .dst_info.data_width = STEDMA40_BYTE_WIDTH,
+};
+
+static struct stedma40_chan_cfg ssp0_dma_cfg_tx = {
+ .mode = STEDMA40_MODE_LOGICAL,
+ .dir = STEDMA40_MEM_TO_PERIPH,
+ .src_dev_type = STEDMA40_DEV_SRC_MEMORY,
+ .dst_dev_type = DB8500_DMA_DEV8_SSP0_TX,
+ .src_info.data_width = STEDMA40_BYTE_WIDTH,
+ .dst_info.data_width = STEDMA40_BYTE_WIDTH,
+};
+#endif
+
+static struct pl022_ssp_controller ssp0_platform_data = {
+ .bus_id = 0,
+#ifdef CONFIG_STE_DMA40
+ .enable_dma = 1,
+ .dma_filter = stedma40_filter,
+ .dma_rx_param = &ssp0_dma_cfg_rx,
+ .dma_tx_param = &ssp0_dma_cfg_tx,
+#else
+ .enable_dma = 0,
+#endif
+ /* on this platform, gpio 31,142,144,214 &
+ * 224 are connected as chip selects
+ */
+ .num_chipselect = 5,
};
static void __init mop500_spi_init(void)
@@ -187,18 +291,108 @@ static void __init mop500_spi_init(void)
db8500_add_ssp0(&ssp0_platform_data);
}
+#ifdef CONFIG_STE_DMA40
+static struct stedma40_chan_cfg uart0_dma_cfg_rx = {
+ .mode = STEDMA40_MODE_LOGICAL,
+ .dir = STEDMA40_PERIPH_TO_MEM,
+ .src_dev_type = DB8500_DMA_DEV13_UART0_RX,
+ .dst_dev_type = STEDMA40_DEV_DST_MEMORY,
+ .src_info.data_width = STEDMA40_BYTE_WIDTH,
+ .dst_info.data_width = STEDMA40_BYTE_WIDTH,
+};
+
+static struct stedma40_chan_cfg uart0_dma_cfg_tx = {
+ .mode = STEDMA40_MODE_LOGICAL,
+ .dir = STEDMA40_MEM_TO_PERIPH,
+ .src_dev_type = STEDMA40_DEV_SRC_MEMORY,
+ .dst_dev_type = DB8500_DMA_DEV13_UART0_TX,
+ .src_info.data_width = STEDMA40_BYTE_WIDTH,
+ .dst_info.data_width = STEDMA40_BYTE_WIDTH,
+};
+
+static struct stedma40_chan_cfg uart1_dma_cfg_rx = {
+ .mode = STEDMA40_MODE_LOGICAL,
+ .dir = STEDMA40_PERIPH_TO_MEM,
+ .src_dev_type = DB8500_DMA_DEV12_UART1_RX,
+ .dst_dev_type = STEDMA40_DEV_DST_MEMORY,
+ .src_info.data_width = STEDMA40_BYTE_WIDTH,
+ .dst_info.data_width = STEDMA40_BYTE_WIDTH,
+};
+
+static struct stedma40_chan_cfg uart1_dma_cfg_tx = {
+ .mode = STEDMA40_MODE_LOGICAL,
+ .dir = STEDMA40_MEM_TO_PERIPH,
+ .src_dev_type = STEDMA40_DEV_SRC_MEMORY,
+ .dst_dev_type = DB8500_DMA_DEV12_UART1_TX,
+ .src_info.data_width = STEDMA40_BYTE_WIDTH,
+ .dst_info.data_width = STEDMA40_BYTE_WIDTH,
+};
+
+static struct stedma40_chan_cfg uart2_dma_cfg_rx = {
+ .mode = STEDMA40_MODE_LOGICAL,
+ .dir = STEDMA40_PERIPH_TO_MEM,
+ .src_dev_type = DB8500_DMA_DEV11_UART2_RX,
+ .dst_dev_type = STEDMA40_DEV_DST_MEMORY,
+ .src_info.data_width = STEDMA40_BYTE_WIDTH,
+ .dst_info.data_width = STEDMA40_BYTE_WIDTH,
+};
+
+static struct stedma40_chan_cfg uart2_dma_cfg_tx = {
+ .mode = STEDMA40_MODE_LOGICAL,
+ .dir = STEDMA40_MEM_TO_PERIPH,
+ .src_dev_type = STEDMA40_DEV_SRC_MEMORY,
+ .dst_dev_type = DB8500_DMA_DEV11_UART2_TX,
+ .src_info.data_width = STEDMA40_BYTE_WIDTH,
+ .dst_info.data_width = STEDMA40_BYTE_WIDTH,
+};
+#endif
+
+static struct amba_pl011_data uart0_plat = {
+#ifdef CONFIG_STE_DMA40
+ .dma_filter = stedma40_filter,
+ .dma_rx_param = &uart0_dma_cfg_rx,
+ .dma_tx_param = &uart0_dma_cfg_tx,
+#endif
+};
+
+static struct amba_pl011_data uart1_plat = {
+#ifdef CONFIG_STE_DMA40
+ .dma_filter = stedma40_filter,
+ .dma_rx_param = &uart1_dma_cfg_rx,
+ .dma_tx_param = &uart1_dma_cfg_tx,
+#endif
+};
+
+static struct amba_pl011_data uart2_plat = {
+#ifdef CONFIG_STE_DMA40
+ .dma_filter = stedma40_filter,
+ .dma_rx_param = &uart2_dma_cfg_rx,
+ .dma_tx_param = &uart2_dma_cfg_tx,
+#endif
+};
+
static void __init mop500_uart_init(void)
{
- db8500_add_uart0();
- db8500_add_uart1();
- db8500_add_uart2();
+ db8500_add_uart0(&uart0_plat);
+ db8500_add_uart1(&uart1_plat);
+ db8500_add_uart2(&uart2_plat);
}
-static void __init u8500_init_machine(void)
+static void __init mop500_init_machine(void)
{
+ /*
+ * The HREFv60 board removed a GPIO expander and routed
+ * all these GPIO pins to the internal GPIO controller
+ * instead.
+ */
+ if (machine_is_hrefv60())
+ mop500_gpio_keys[0].gpio = HREFV60_PROX_SENSE_GPIO;
+ else
+ mop500_gpio_keys[0].gpio = GPIO_PROX_SENSOR;
+
u8500_init_devices();
- nmk_config_pins(mop500_pins, ARRAY_SIZE(mop500_pins));
+ mop500_pins_init();
platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs));
@@ -207,12 +401,12 @@ static void __init u8500_init_machine(void)
mop500_spi_init();
mop500_uart_init();
- mop500_keypad_init();
-
platform_device_register(&ab8500_device);
i2c_register_board_info(0, mop500_i2c0_devices,
ARRAY_SIZE(mop500_i2c0_devices));
+ i2c_register_board_info(2, mop500_i2c2_devices,
+ ARRAY_SIZE(mop500_i2c2_devices));
}
MACHINE_START(U8500, "ST-Ericsson MOP500 platform")
@@ -222,5 +416,13 @@ MACHINE_START(U8500, "ST-Ericsson MOP500 platform")
.init_irq = ux500_init_irq,
/* we re-use nomadik timer here */
.timer = &ux500_timer,
- .init_machine = u8500_init_machine,
+ .init_machine = mop500_init_machine,
+MACHINE_END
+
+MACHINE_START(HREFV60, "ST-Ericsson U8500 Platform HREFv60+")
+ .boot_params = 0x100,
+ .map_io = u8500_map_io,
+ .init_irq = ux500_init_irq,
+ .timer = &ux500_timer,
+ .init_machine = mop500_init_machine,
MACHINE_END
diff --git a/arch/arm/mach-ux500/board-mop500.h b/arch/arm/mach-ux500/board-mop500.h
index 3104ae2a02c2..56722f4be71b 100644
--- a/arch/arm/mach-ux500/board-mop500.h
+++ b/arch/arm/mach-ux500/board-mop500.h
@@ -7,15 +7,36 @@
#ifndef __BOARD_MOP500_H
#define __BOARD_MOP500_H
-#define MOP500_EGPIO(x) (NOMADIK_NR_GPIO + (x))
+/* HREFv60-specific GPIO assignments, this board has no GPIO expander */
+#define HREFV60_TOUCH_RST_GPIO 143
+#define HREFV60_PROX_SENSE_GPIO 217
+#define HREFV60_HAL_SW_GPIO 145
+#define HREFV60_SDMMC_EN_GPIO 169
+#define HREFV60_SDMMC_1V8_3V_GPIO 5
+#define HREFV60_SDMMC_CD_GPIO 95
+#define HREFV60_ACCEL_INT1_GPIO 82
+#define HREFV60_ACCEL_INT2_GPIO 83
+#define HREFV60_MAGNET_DRDY_GPIO 32
+#define HREFV60_DISP1_RST_GPIO 65
+#define HREFV60_DISP2_RST_GPIO 66
/* GPIOs on the TC35892 expander */
+#define MOP500_EGPIO(x) (NOMADIK_NR_GPIO + (x))
#define GPIO_SDMMC_CD MOP500_EGPIO(3)
+#define GPIO_PROX_SENSOR MOP500_EGPIO(7)
+#define GPIO_BU21013_CS MOP500_EGPIO(13)
#define GPIO_SDMMC_EN MOP500_EGPIO(17)
#define GPIO_SDMMC_1V8_3V_SEL MOP500_EGPIO(18)
+struct i2c_board_info;
+
extern void mop500_sdi_init(void);
extern void mop500_sdi_tc35892_init(void);
-extern void mop500_keypad_init(void);
+void __init mop500_u8500uib_init(void);
+void __init mop500_stuib_init(void);
+void __init mop500_pins_init(void);
+
+void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info,
+ unsigned n);
#endif
diff --git a/arch/arm/mach-ux500/board-u5500-sdi.c b/arch/arm/mach-ux500/board-u5500-sdi.c
index 54712acc0394..739fb4c5b160 100644
--- a/arch/arm/mach-ux500/board-u5500-sdi.c
+++ b/arch/arm/mach-ux500/board-u5500-sdi.c
@@ -31,6 +31,26 @@ static pin_cfg_t u5500_sdi_pins[] = {
GPIO14_MC0_CLK | PIN_DIR_OUTPUT | PIN_VAL_LOW,
};
+#ifdef CONFIG_STE_DMA40
+struct stedma40_chan_cfg u5500_sdi0_dma_cfg_rx = {
+ .mode = STEDMA40_MODE_LOGICAL,
+ .dir = STEDMA40_PERIPH_TO_MEM,
+ .src_dev_type = DB5500_DMA_DEV24_SDMMC0_RX,
+ .dst_dev_type = STEDMA40_DEV_DST_MEMORY,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+};
+
+static struct stedma40_chan_cfg u5500_sdi0_dma_cfg_tx = {
+ .mode = STEDMA40_MODE_LOGICAL,
+ .dir = STEDMA40_MEM_TO_PERIPH,
+ .src_dev_type = STEDMA40_DEV_SRC_MEMORY,
+ .dst_dev_type = DB5500_DMA_DEV24_SDMMC0_TX,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+};
+#endif
+
static struct mmci_platform_data u5500_sdi0_data = {
.ocr_mask = MMC_VDD_165_195,
.f_max = 50000000,
@@ -39,6 +59,11 @@ static struct mmci_platform_data u5500_sdi0_data = {
MMC_CAP_MMC_HIGHSPEED,
.gpio_cd = -1,
.gpio_wp = -1,
+#ifdef CONFIG_STE_DMA40
+ .dma_filter = stedma40_filter,
+ .dma_rx_param = &u5500_sdi0_dma_cfg_rx,
+ .dma_tx_param = &u5500_sdi0_dma_cfg_tx,
+#endif
};
void __init u5500_sdi_init(void)
diff --git a/arch/arm/mach-ux500/board-u5500.c b/arch/arm/mach-ux500/board-u5500.c
index 39d370c1f3b4..44fd3b5c33ec 100644
--- a/arch/arm/mach-ux500/board-u5500.c
+++ b/arch/arm/mach-ux500/board-u5500.c
@@ -22,9 +22,9 @@
static void __init u5500_uart_init(void)
{
- db5500_add_uart0();
- db5500_add_uart1();
- db5500_add_uart2();
+ db5500_add_uart0(NULL);
+ db5500_add_uart1(NULL);
+ db5500_add_uart2(NULL);
}
static void __init u5500_init_machine(void)
diff --git a/arch/arm/mach-ux500/clock.c b/arch/arm/mach-ux500/clock.c
index b2b0a3b9be8f..32ce90840ee1 100644
--- a/arch/arm/mach-ux500/clock.c
+++ b/arch/arm/mach-ux500/clock.c
@@ -313,7 +313,7 @@ static DEFINE_PRCMU_CLK_RATE(uartclk, 0x0, 5, UARTCLK, 38400000);
static DEFINE_PRCMU_CLK(msp02clk, 0x0, 6, MSP02CLK);
static DEFINE_PRCMU_CLK(msp1clk, 0x0, 7, MSP1CLK); /* v1 */
static DEFINE_PRCMU_CLK_RATE(i2cclk, 0x0, 8, I2CCLK, 48000000);
-static DEFINE_PRCMU_CLK_RATE(sdmmcclk, 0x0, 9, SDMMCCLK, 50000000);
+static DEFINE_PRCMU_CLK_RATE(sdmmcclk, 0x0, 9, SDMMCCLK, 100000000);
static DEFINE_PRCMU_CLK(slimclk, 0x0, 10, SLIMCLK);
static DEFINE_PRCMU_CLK(per1clk, 0x0, 11, PER1CLK);
static DEFINE_PRCMU_CLK(per2clk, 0x0, 12, PER2CLK);
@@ -520,7 +520,7 @@ static struct clk_lookup u8500_ed_clks[] = {
CLK(ssp0_ed, "ssp0", NULL),
/* Peripheral Cluster #5 */
- CLK(usb_ed, "musb_hdrc.0", "usb"),
+ CLK(usb_ed, "musb-ux500.0", "usb"),
/* Peripheral Cluster #6 */
CLK(dmc_ed, "dmc", NULL),
@@ -561,7 +561,7 @@ static struct clk_lookup u8500_v1_clks[] = {
CLK(ssp0_v1, "ssp0", NULL),
/* Peripheral Cluster #5 */
- CLK(usb_v1, "musb_hdrc.0", "usb"),
+ CLK(usb_v1, "musb-ux500.0", "usb"),
/* Peripheral Cluster #6 */
CLK(mtu1_v1, "mtu1", NULL),
diff --git a/arch/arm/mach-ux500/cpu-db5500.c b/arch/arm/mach-ux500/cpu-db5500.c
index af04e0891a78..c9dc2eff3cb2 100644
--- a/arch/arm/mach-ux500/cpu-db5500.c
+++ b/arch/arm/mach-ux500/cpu-db5500.c
@@ -11,6 +11,7 @@
#include <linux/irq.h>
#include <asm/mach/map.h>
+#include <asm/pmu.h>
#include <plat/gpio.h>
@@ -18,8 +19,10 @@
#include <mach/devices.h>
#include <mach/setup.h>
#include <mach/irqs.h>
+#include <mach/usb.h>
#include "devices-db5500.h"
+#include "ste-dma40-db5500.h"
static struct map_desc u5500_uart_io_desc[] __initdata = {
__IO_DEV_DESC(U5500_UART0_BASE, SZ_4K),
@@ -43,6 +46,26 @@ static struct map_desc u5500_io_desc[] __initdata = {
__IO_DEV_DESC(U5500_PRCMU_BASE, SZ_4K),
};
+static struct resource db5500_pmu_resources[] = {
+ [0] = {
+ .start = IRQ_DB5500_PMU0,
+ .end = IRQ_DB5500_PMU0,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = IRQ_DB5500_PMU1,
+ .end = IRQ_DB5500_PMU1,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device db5500_pmu_device = {
+ .name = "arm-pmu",
+ .id = ARM_PMU_DEVICE_CPU,
+ .num_resources = ARRAY_SIZE(db5500_pmu_resources),
+ .resource = db5500_pmu_resources,
+};
+
static struct resource mbox0_resources[] = {
{
.name = "mbox_peer",
@@ -127,7 +150,8 @@ static struct platform_device mbox2_device = {
.num_resources = ARRAY_SIZE(mbox2_resources),
};
-static struct platform_device *u5500_platform_devs[] __initdata = {
+static struct platform_device *db5500_platform_devs[] __initdata = {
+ &db5500_pmu_device,
&mbox0_device,
&mbox1_device,
&mbox2_device,
@@ -166,12 +190,35 @@ void __init u5500_map_io(void)
iotable_init(u5500_io_desc, ARRAY_SIZE(u5500_io_desc));
}
+static int usb_db5500_rx_dma_cfg[] = {
+ DB5500_DMA_DEV4_USB_OTG_IEP_1_9,
+ DB5500_DMA_DEV5_USB_OTG_IEP_2_10,
+ DB5500_DMA_DEV6_USB_OTG_IEP_3_11,
+ DB5500_DMA_DEV20_USB_OTG_IEP_4_12,
+ DB5500_DMA_DEV21_USB_OTG_IEP_5_13,
+ DB5500_DMA_DEV22_USB_OTG_IEP_6_14,
+ DB5500_DMA_DEV23_USB_OTG_IEP_7_15,
+ DB5500_DMA_DEV38_USB_OTG_IEP_8
+};
+
+static int usb_db5500_tx_dma_cfg[] = {
+ DB5500_DMA_DEV4_USB_OTG_OEP_1_9,
+ DB5500_DMA_DEV5_USB_OTG_OEP_2_10,
+ DB5500_DMA_DEV6_USB_OTG_OEP_3_11,
+ DB5500_DMA_DEV20_USB_OTG_OEP_4_12,
+ DB5500_DMA_DEV21_USB_OTG_OEP_5_13,
+ DB5500_DMA_DEV22_USB_OTG_OEP_6_14,
+ DB5500_DMA_DEV23_USB_OTG_OEP_7_15,
+ DB5500_DMA_DEV38_USB_OTG_OEP_8
+};
+
void __init u5500_init_devices(void)
{
db5500_add_gpios();
db5500_dma_init();
db5500_add_rtc();
+ db5500_add_usb(usb_db5500_rx_dma_cfg, usb_db5500_tx_dma_cfg);
- platform_add_devices(u5500_platform_devs,
- ARRAY_SIZE(u5500_platform_devs));
+ platform_add_devices(db5500_platform_devs,
+ ARRAY_SIZE(db5500_platform_devs));
}
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 1748fbc58530..516126cb357d 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -12,21 +12,21 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/amba/bus.h>
+#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <asm/mach/map.h>
+#include <asm/pmu.h>
#include <mach/hardware.h>
#include <mach/setup.h>
#include <mach/devices.h>
+#include <mach/usb.h>
#include "devices-db8500.h"
-
-static struct platform_device *platform_devs[] __initdata = {
- &u8500_dma40_device,
-};
+#include "ste-dma40-db8500.h"
/* minimum static i/o mapping required to boot U8500 platforms */
static struct map_desc u8500_uart_io_desc[] __initdata = {
@@ -89,6 +89,51 @@ void __init u8500_map_io(void)
iotable_init(u8500_v2_io_desc, ARRAY_SIZE(u8500_v2_io_desc));
}
+static struct resource db8500_pmu_resources[] = {
+ [0] = {
+ .start = IRQ_DB8500_PMU,
+ .end = IRQ_DB8500_PMU,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+/*
+ * The PMU IRQ lines of two cores are wired together into a single interrupt.
+ * Bounce the interrupt to the other core if it's not ours.
+ */
+static irqreturn_t db8500_pmu_handler(int irq, void *dev, irq_handler_t handler)
+{
+ irqreturn_t ret = handler(irq, dev);
+ int other = !smp_processor_id();
+
+ if (ret == IRQ_NONE && cpu_online(other))
+ irq_set_affinity(irq, cpumask_of(other));
+
+ /*
+ * We should be able to get away with the amount of IRQ_NONEs we give,
+ * while still having the spurious IRQ detection code kick in if the
+ * interrupt really starts hitting spuriously.
+ */
+ return ret;
+}
+
+static struct arm_pmu_platdata db8500_pmu_platdata = {
+ .handle_irq = db8500_pmu_handler,
+};
+
+static struct platform_device db8500_pmu_device = {
+ .name = "arm-pmu",
+ .id = ARM_PMU_DEVICE_CPU,
+ .num_resources = ARRAY_SIZE(db8500_pmu_resources),
+ .resource = db8500_pmu_resources,
+ .dev.platform_data = &db8500_pmu_platdata,
+};
+
+static struct platform_device *platform_devs[] __initdata = {
+ &u8500_dma40_device,
+ &db8500_pmu_device,
+};
+
static resource_size_t __initdata db8500_gpio_base[] = {
U8500_GPIOBANK0_BASE,
U8500_GPIOBANK1_BASE,
@@ -111,6 +156,28 @@ static void __init db8500_add_gpios(void)
IRQ_DB8500_GPIO0, &pdata);
}
+static int usb_db8500_rx_dma_cfg[] = {
+ DB8500_DMA_DEV38_USB_OTG_IEP_1_9,
+ DB8500_DMA_DEV37_USB_OTG_IEP_2_10,
+ DB8500_DMA_DEV36_USB_OTG_IEP_3_11,
+ DB8500_DMA_DEV19_USB_OTG_IEP_4_12,
+ DB8500_DMA_DEV18_USB_OTG_IEP_5_13,
+ DB8500_DMA_DEV17_USB_OTG_IEP_6_14,
+ DB8500_DMA_DEV16_USB_OTG_IEP_7_15,
+ DB8500_DMA_DEV39_USB_OTG_IEP_8
+};
+
+static int usb_db8500_tx_dma_cfg[] = {
+ DB8500_DMA_DEV38_USB_OTG_OEP_1_9,
+ DB8500_DMA_DEV37_USB_OTG_OEP_2_10,
+ DB8500_DMA_DEV36_USB_OTG_OEP_3_11,
+ DB8500_DMA_DEV19_USB_OTG_OEP_4_12,
+ DB8500_DMA_DEV18_USB_OTG_OEP_5_13,
+ DB8500_DMA_DEV17_USB_OTG_OEP_6_14,
+ DB8500_DMA_DEV16_USB_OTG_OEP_7_15,
+ DB8500_DMA_DEV39_USB_OTG_OEP_8
+};
+
/*
* This function is called from the board init
*/
@@ -121,6 +188,7 @@ void __init u8500_init_devices(void)
db8500_add_rtc();
db8500_add_gpios();
+ db8500_add_usb(usb_db8500_rx_dma_cfg, usb_db8500_tx_dma_cfg);
platform_device_register_simple("cpufreq-u8500", -1, NULL, 0);
platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs));
diff --git a/arch/arm/mach-ux500/devices-common.c b/arch/arm/mach-ux500/devices-common.c
index fe69f5fac1bb..13a4ce046ae5 100644
--- a/arch/arm/mach-ux500/devices-common.c
+++ b/arch/arm/mach-ux500/devices-common.c
@@ -139,6 +139,7 @@ void dbx500_add_gpios(resource_size_t *base, int num, int irq,
for (i = 0; i < num; i++, first += 32, irq++) {
pdata->first_gpio = first;
pdata->first_irq = NOMADIK_GPIO_TO_IRQ(first);
+ pdata->num_gpio = 32;
dbx500_add_gpio(i, base[i], irq, pdata);
}
diff --git a/arch/arm/mach-ux500/devices-common.h b/arch/arm/mach-ux500/devices-common.h
index cbadc117d2db..c719b5a1d913 100644
--- a/arch/arm/mach-ux500/devices-common.h
+++ b/arch/arm/mach-ux500/devices-common.h
@@ -42,10 +42,13 @@ dbx500_add_sdi(const char *name, resource_size_t base, int irq,
return dbx500_add_amba_device(name, base, irq, pdata, 0);
}
+struct amba_pl011_data;
+
static inline struct amba_device *
-dbx500_add_uart(const char *name, resource_size_t base, int irq)
+dbx500_add_uart(const char *name, resource_size_t base, int irq,
+ struct amba_pl011_data *pdata)
{
- return dbx500_add_amba_device(name, base, irq, NULL, 0);
+ return dbx500_add_amba_device(name, base, irq, pdata, 0);
}
struct nmk_i2c_controller;
diff --git a/arch/arm/mach-ux500/devices-db5500.h b/arch/arm/mach-ux500/devices-db5500.h
index c8d7901c1f2d..94627f7783b0 100644
--- a/arch/arm/mach-ux500/devices-db5500.h
+++ b/arch/arm/mach-ux500/devices-db5500.h
@@ -34,6 +34,9 @@
#define db5500_add_rtc() \
dbx500_add_rtc(U5500_RTC_BASE, IRQ_DB5500_RTC);
+#define db5500_add_usb(rx_cfg, tx_cfg) \
+ ux500_add_usb(U5500_USBOTG_BASE, IRQ_DB5500_USBOTG, rx_cfg, tx_cfg)
+
#define db5500_add_sdi0(pdata) \
dbx500_add_sdi("sdi0", U5500_SDI0_BASE, IRQ_DB5500_SDMMC0, pdata)
#define db5500_add_sdi1(pdata) \
@@ -54,13 +57,13 @@
#define db5500_add_spi3(pdata) \
dbx500_add_spi("spi3", U5500_SPI3_BASE, IRQ_DB5500_SPI3, pdata)
-#define db5500_add_uart0() \
- dbx500_add_uart("uart0", U5500_UART0_BASE, IRQ_DB5500_UART0)
-#define db5500_add_uart1() \
- dbx500_add_uart("uart1", U5500_UART1_BASE, IRQ_DB5500_UART1)
-#define db5500_add_uart2() \
- dbx500_add_uart("uart2", U5500_UART2_BASE, IRQ_DB5500_UART2)
-#define db5500_add_uart3() \
- dbx500_add_uart("uart3", U5500_UART3_BASE, IRQ_DB5500_UART3)
+#define db5500_add_uart0(plat) \
+ dbx500_add_uart("uart0", U5500_UART0_BASE, IRQ_DB5500_UART0, plat)
+#define db5500_add_uart1(plat) \
+ dbx500_add_uart("uart1", U5500_UART1_BASE, IRQ_DB5500_UART1, plat)
+#define db5500_add_uart2(plat) \
+ dbx500_add_uart("uart2", U5500_UART2_BASE, IRQ_DB5500_UART2, plat)
+#define db5500_add_uart3(plat) \
+ dbx500_add_uart("uart3", U5500_UART3_BASE, IRQ_DB5500_UART3, plat)
#endif
diff --git a/arch/arm/mach-ux500/devices-db8500.c b/arch/arm/mach-ux500/devices-db8500.c
index 23c695d54977..73b17404b194 100644
--- a/arch/arm/mach-ux500/devices-db8500.c
+++ b/arch/arm/mach-ux500/devices-db8500.c
@@ -11,6 +11,7 @@
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/amba/bus.h>
+#include <linux/amba/pl022.h>
#include <plat/ste_dma40.h>
@@ -67,12 +68,72 @@ struct stedma40_chan_cfg dma40_memcpy_conf_log = {
/*
* Mapping between destination event lines and physical device address.
- * The event line is tied to a device and therefor the address is constant.
+ * The event line is tied to a device and therefore the address is constant.
+ * When the address comes from a primecell it will be configured in runtime
+ * and we set the address to -1 as a placeholder.
*/
-static const dma_addr_t dma40_tx_map[DB8500_DMA_NR_DEV];
+static const dma_addr_t dma40_tx_map[DB8500_DMA_NR_DEV] = {
+ /* MUSB - these will be runtime-reconfigured */
+ [DB8500_DMA_DEV39_USB_OTG_OEP_8] = -1,
+ [DB8500_DMA_DEV16_USB_OTG_OEP_7_15] = -1,
+ [DB8500_DMA_DEV17_USB_OTG_OEP_6_14] = -1,
+ [DB8500_DMA_DEV18_USB_OTG_OEP_5_13] = -1,
+ [DB8500_DMA_DEV19_USB_OTG_OEP_4_12] = -1,
+ [DB8500_DMA_DEV36_USB_OTG_OEP_3_11] = -1,
+ [DB8500_DMA_DEV37_USB_OTG_OEP_2_10] = -1,
+ [DB8500_DMA_DEV38_USB_OTG_OEP_1_9] = -1,
+ /* PrimeCells - run-time configured */
+ [DB8500_DMA_DEV0_SPI0_TX] = -1,
+ [DB8500_DMA_DEV1_SD_MMC0_TX] = -1,
+ [DB8500_DMA_DEV2_SD_MMC1_TX] = -1,
+ [DB8500_DMA_DEV3_SD_MMC2_TX] = -1,
+ [DB8500_DMA_DEV8_SSP0_TX] = -1,
+ [DB8500_DMA_DEV9_SSP1_TX] = -1,
+ [DB8500_DMA_DEV11_UART2_TX] = -1,
+ [DB8500_DMA_DEV12_UART1_TX] = -1,
+ [DB8500_DMA_DEV13_UART0_TX] = -1,
+ [DB8500_DMA_DEV28_SD_MM2_TX] = -1,
+ [DB8500_DMA_DEV29_SD_MM0_TX] = -1,
+ [DB8500_DMA_DEV32_SD_MM1_TX] = -1,
+ [DB8500_DMA_DEV33_SPI2_TX] = -1,
+ [DB8500_DMA_DEV35_SPI1_TX] = -1,
+ [DB8500_DMA_DEV40_SPI3_TX] = -1,
+ [DB8500_DMA_DEV41_SD_MM3_TX] = -1,
+ [DB8500_DMA_DEV42_SD_MM4_TX] = -1,
+ [DB8500_DMA_DEV43_SD_MM5_TX] = -1,
+};
/* Mapping between source event lines and physical device address */
-static const dma_addr_t dma40_rx_map[DB8500_DMA_NR_DEV];
+static const dma_addr_t dma40_rx_map[DB8500_DMA_NR_DEV] = {
+ /* MUSB - these will be runtime-reconfigured */
+ [DB8500_DMA_DEV39_USB_OTG_IEP_8] = -1,
+ [DB8500_DMA_DEV16_USB_OTG_IEP_7_15] = -1,
+ [DB8500_DMA_DEV17_USB_OTG_IEP_6_14] = -1,
+ [DB8500_DMA_DEV18_USB_OTG_IEP_5_13] = -1,
+ [DB8500_DMA_DEV19_USB_OTG_IEP_4_12] = -1,
+ [DB8500_DMA_DEV36_USB_OTG_IEP_3_11] = -1,
+ [DB8500_DMA_DEV37_USB_OTG_IEP_2_10] = -1,
+ [DB8500_DMA_DEV38_USB_OTG_IEP_1_9] = -1,
+ /* PrimeCells */
+ [DB8500_DMA_DEV0_SPI0_RX] = -1,
+ [DB8500_DMA_DEV1_SD_MMC0_RX] = -1,
+ [DB8500_DMA_DEV2_SD_MMC1_RX] = -1,
+ [DB8500_DMA_DEV3_SD_MMC2_RX] = -1,
+ [DB8500_DMA_DEV8_SSP0_RX] = -1,
+ [DB8500_DMA_DEV9_SSP1_RX] = -1,
+ [DB8500_DMA_DEV11_UART2_RX] = -1,
+ [DB8500_DMA_DEV12_UART1_RX] = -1,
+ [DB8500_DMA_DEV13_UART0_RX] = -1,
+ [DB8500_DMA_DEV28_SD_MM2_RX] = -1,
+ [DB8500_DMA_DEV29_SD_MM0_RX] = -1,
+ [DB8500_DMA_DEV32_SD_MM1_RX] = -1,
+ [DB8500_DMA_DEV33_SPI2_RX] = -1,
+ [DB8500_DMA_DEV35_SPI1_RX] = -1,
+ [DB8500_DMA_DEV40_SPI3_RX] = -1,
+ [DB8500_DMA_DEV41_SD_MM3_RX] = -1,
+ [DB8500_DMA_DEV42_SD_MM4_RX] = -1,
+ [DB8500_DMA_DEV43_SD_MM5_RX] = -1,
+};
/* Reserved event lines for memcpy only */
static int dma40_memcpy_event[] = {
diff --git a/arch/arm/mach-ux500/devices-db8500.h b/arch/arm/mach-ux500/devices-db8500.h
index 3a770c756979..9cc6f8f5d3e6 100644
--- a/arch/arm/mach-ux500/devices-db8500.h
+++ b/arch/arm/mach-ux500/devices-db8500.h
@@ -61,6 +61,9 @@ db8500_add_ssp(const char *name, resource_size_t base, int irq,
#define db8500_add_rtc() \
dbx500_add_rtc(U8500_RTC_BASE, IRQ_DB8500_RTC);
+#define db8500_add_usb(rx_cfg, tx_cfg) \
+ ux500_add_usb(U8500_USBOTG_BASE, IRQ_DB8500_USBOTG, rx_cfg, tx_cfg)
+
#define db8500_add_sdi0(pdata) \
dbx500_add_sdi("sdi0", U8500_SDI0_BASE, IRQ_DB8500_SDMMC0, pdata)
#define db8500_add_sdi1(pdata) \
@@ -88,11 +91,11 @@ db8500_add_ssp(const char *name, resource_size_t base, int irq,
#define db8500_add_spi3(pdata) \
dbx500_add_spi("spi3", U8500_SPI3_BASE, IRQ_DB8500_SPI3, pdata)
-#define db8500_add_uart0() \
- dbx500_add_uart("uart0", U8500_UART0_BASE, IRQ_DB8500_UART0)
-#define db8500_add_uart1() \
- dbx500_add_uart("uart1", U8500_UART1_BASE, IRQ_DB8500_UART1)
-#define db8500_add_uart2() \
- dbx500_add_uart("uart2", U8500_UART2_BASE, IRQ_DB8500_UART2)
+#define db8500_add_uart0(pdata) \
+ dbx500_add_uart("uart0", U8500_UART0_BASE, IRQ_DB8500_UART0, pdata)
+#define db8500_add_uart1(pdata) \
+ dbx500_add_uart("uart1", U8500_UART1_BASE, IRQ_DB8500_UART1, pdata)
+#define db8500_add_uart2(pdata) \
+ dbx500_add_uart("uart2", U8500_UART2_BASE, IRQ_DB8500_UART2, pdata)
#endif
diff --git a/arch/arm/mach-ux500/dma-db5500.c b/arch/arm/mach-ux500/dma-db5500.c
index 32a061f8a95b..1cfab68ae417 100644
--- a/arch/arm/mach-ux500/dma-db5500.c
+++ b/arch/arm/mach-ux500/dma-db5500.c
@@ -73,11 +73,27 @@ static struct stedma40_chan_cfg dma40_memcpy_conf_log = {
*/
static const dma_addr_t dma40_rx_map[DB5500_DMA_NR_DEV] = {
[DB5500_DMA_DEV24_SDMMC0_RX] = -1,
+ [DB5500_DMA_DEV38_USB_OTG_IEP_8] = -1,
+ [DB5500_DMA_DEV23_USB_OTG_IEP_7_15] = -1,
+ [DB5500_DMA_DEV22_USB_OTG_IEP_6_14] = -1,
+ [DB5500_DMA_DEV21_USB_OTG_IEP_5_13] = -1,
+ [DB5500_DMA_DEV20_USB_OTG_IEP_4_12] = -1,
+ [DB5500_DMA_DEV6_USB_OTG_IEP_3_11] = -1,
+ [DB5500_DMA_DEV5_USB_OTG_IEP_2_10] = -1,
+ [DB5500_DMA_DEV4_USB_OTG_IEP_1_9] = -1,
};
/* Mapping between destination event lines and physical device address */
static const dma_addr_t dma40_tx_map[DB5500_DMA_NR_DEV] = {
[DB5500_DMA_DEV24_SDMMC0_TX] = -1,
+ [DB5500_DMA_DEV38_USB_OTG_OEP_8] = -1,
+ [DB5500_DMA_DEV23_USB_OTG_OEP_7_15] = -1,
+ [DB5500_DMA_DEV22_USB_OTG_OEP_6_14] = -1,
+ [DB5500_DMA_DEV21_USB_OTG_OEP_5_13] = -1,
+ [DB5500_DMA_DEV20_USB_OTG_OEP_4_12] = -1,
+ [DB5500_DMA_DEV6_USB_OTG_OEP_3_11] = -1,
+ [DB5500_DMA_DEV5_USB_OTG_OEP_2_10] = -1,
+ [DB5500_DMA_DEV4_USB_OTG_OEP_1_9] = -1,
};
static int dma40_memcpy_event[] = {
diff --git a/arch/arm/mach-ux500/include/mach/memory.h b/arch/arm/mach-ux500/include/mach/memory.h
index 510571a59e25..2ef697a67006 100644
--- a/arch/arm/mach-ux500/include/mach/memory.h
+++ b/arch/arm/mach-ux500/include/mach/memory.h
@@ -12,7 +12,7 @@
/*
* Physical DRAM offset.
*/
-#define PHYS_OFFSET UL(0x00000000)
+#define PLAT_PHYS_OFFSET UL(0x00000000)
#define BUS_OFFSET UL(0x00000000)
#endif
diff --git a/arch/arm/mach-ux500/include/mach/uncompress.h b/arch/arm/mach-ux500/include/mach/uncompress.h
index 9a6614c6808e..ab0fe1432fae 100644
--- a/arch/arm/mach-ux500/include/mach/uncompress.h
+++ b/arch/arm/mach-ux500/include/mach/uncompress.h
@@ -50,7 +50,11 @@ static void flush(void)
static inline void arch_decomp_setup(void)
{
- if (machine_is_u8500())
+ /* Check in run time if we run on an U8500 or U5500 */
+ if (machine_is_u8500() ||
+ machine_is_svp8500v1() ||
+ machine_is_svp8500v2() ||
+ machine_is_hrefv60())
ux500_uart_base = U8500_UART2_BASE;
else if (machine_is_u5500())
ux500_uart_base = U5500_UART0_BASE;
diff --git a/arch/arm/mach-ux500/include/mach/usb.h b/arch/arm/mach-ux500/include/mach/usb.h
new file mode 100644
index 000000000000..d3739d418813
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/usb.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef __ASM_ARCH_USB_H
+#define __ASM_ARCH_USB_H
+
+#include <linux/dmaengine.h>
+
+#define UX500_MUSB_DMA_NUM_RX_CHANNELS 8
+#define UX500_MUSB_DMA_NUM_TX_CHANNELS 8
+
+struct ux500_musb_board_data {
+ void **dma_rx_param_array;
+ void **dma_tx_param_array;
+ u32 num_rx_channels;
+ u32 num_tx_channels;
+ bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
+};
+
+void ux500_add_usb(resource_size_t base, int irq, int *dma_rx_cfg,
+ int *dma_tx_cfg);
+#endif
diff --git a/arch/arm/mach-ux500/localtimer.c b/arch/arm/mach-ux500/localtimer.c
index 2288f6a7c518..5ba113309a0b 100644
--- a/arch/arm/mach-ux500/localtimer.c
+++ b/arch/arm/mach-ux500/localtimer.c
@@ -21,8 +21,9 @@
/*
* Setup the local clock events for a CPU.
*/
-void __cpuinit local_timer_setup(struct clock_event_device *evt)
+int __cpuinit local_timer_setup(struct clock_event_device *evt)
{
evt->irq = IRQ_LOCALTIMER;
twd_timer_setup(evt);
+ return 0;
}
diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
index cbf15718fc3c..a4ffb9f4f461 100644
--- a/arch/arm/mach-ux500/mbox-db5500.c
+++ b/arch/arm/mach-ux500/mbox-db5500.c
@@ -498,7 +498,7 @@ struct mbox *mbox_setup(u8 mbox_id, mbox_recv_cb_t *mbox_cb, void *priv)
#endif
dev_info(&(mbox->pdev->dev),
- "Mailbox driver with index %d initated!\n", mbox_id);
+ "Mailbox driver with index %d initiated!\n", mbox_id);
exit:
return mbox;
diff --git a/arch/arm/mach-ux500/usb.c b/arch/arm/mach-ux500/usb.c
new file mode 100644
index 000000000000..82e535953fd9
--- /dev/null
+++ b/arch/arm/mach-ux500/usb.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#include <linux/platform_device.h>
+#include <linux/usb/musb.h>
+#include <plat/ste_dma40.h>
+#include <mach/hardware.h>
+#include <mach/usb.h>
+
+#define MUSB_DMA40_RX_CH { \
+ .mode = STEDMA40_MODE_LOGICAL, \
+ .dir = STEDMA40_PERIPH_TO_MEM, \
+ .dst_dev_type = STEDMA40_DEV_DST_MEMORY, \
+ .src_info.data_width = STEDMA40_WORD_WIDTH, \
+ .dst_info.data_width = STEDMA40_WORD_WIDTH, \
+ .src_info.psize = STEDMA40_PSIZE_LOG_16, \
+ .dst_info.psize = STEDMA40_PSIZE_LOG_16, \
+ }
+
+#define MUSB_DMA40_TX_CH { \
+ .mode = STEDMA40_MODE_LOGICAL, \
+ .dir = STEDMA40_MEM_TO_PERIPH, \
+ .src_dev_type = STEDMA40_DEV_SRC_MEMORY, \
+ .src_info.data_width = STEDMA40_WORD_WIDTH, \
+ .dst_info.data_width = STEDMA40_WORD_WIDTH, \
+ .src_info.psize = STEDMA40_PSIZE_LOG_16, \
+ .dst_info.psize = STEDMA40_PSIZE_LOG_16, \
+ }
+
+static struct stedma40_chan_cfg musb_dma_rx_ch[UX500_MUSB_DMA_NUM_RX_CHANNELS]
+ = {
+ MUSB_DMA40_RX_CH,
+ MUSB_DMA40_RX_CH,
+ MUSB_DMA40_RX_CH,
+ MUSB_DMA40_RX_CH,
+ MUSB_DMA40_RX_CH,
+ MUSB_DMA40_RX_CH,
+ MUSB_DMA40_RX_CH,
+ MUSB_DMA40_RX_CH
+};
+
+static struct stedma40_chan_cfg musb_dma_tx_ch[UX500_MUSB_DMA_NUM_TX_CHANNELS]
+ = {
+ MUSB_DMA40_TX_CH,
+ MUSB_DMA40_TX_CH,
+ MUSB_DMA40_TX_CH,
+ MUSB_DMA40_TX_CH,
+ MUSB_DMA40_TX_CH,
+ MUSB_DMA40_TX_CH,
+ MUSB_DMA40_TX_CH,
+ MUSB_DMA40_TX_CH,
+};
+
+static void *ux500_dma_rx_param_array[UX500_MUSB_DMA_NUM_RX_CHANNELS] = {
+ &musb_dma_rx_ch[0],
+ &musb_dma_rx_ch[1],
+ &musb_dma_rx_ch[2],
+ &musb_dma_rx_ch[3],
+ &musb_dma_rx_ch[4],
+ &musb_dma_rx_ch[5],
+ &musb_dma_rx_ch[6],
+ &musb_dma_rx_ch[7]
+};
+
+static void *ux500_dma_tx_param_array[UX500_MUSB_DMA_NUM_TX_CHANNELS] = {
+ &musb_dma_tx_ch[0],
+ &musb_dma_tx_ch[1],
+ &musb_dma_tx_ch[2],
+ &musb_dma_tx_ch[3],
+ &musb_dma_tx_ch[4],
+ &musb_dma_tx_ch[5],
+ &musb_dma_tx_ch[6],
+ &musb_dma_tx_ch[7]
+};
+
+static struct ux500_musb_board_data musb_board_data = {
+ .dma_rx_param_array = ux500_dma_rx_param_array,
+ .dma_tx_param_array = ux500_dma_tx_param_array,
+ .num_rx_channels = UX500_MUSB_DMA_NUM_RX_CHANNELS,
+ .num_tx_channels = UX500_MUSB_DMA_NUM_TX_CHANNELS,
+ .dma_filter = stedma40_filter,
+};
+
+static u64 ux500_musb_dmamask = DMA_BIT_MASK(32);
+
+static struct musb_hdrc_config musb_hdrc_config = {
+ .multipoint = true,
+ .dyn_fifo = true,
+ .num_eps = 16,
+ .ram_bits = 16,
+};
+
+static struct musb_hdrc_platform_data musb_platform_data = {
+#if defined(CONFIG_USB_MUSB_OTG)
+ .mode = MUSB_OTG,
+#elif defined(CONFIG_USB_MUSB_PERIPHERAL)
+ .mode = MUSB_PERIPHERAL,
+#else /* defined(CONFIG_USB_MUSB_HOST) */
+ .mode = MUSB_HOST,
+#endif
+ .config = &musb_hdrc_config,
+ .board_data = &musb_board_data,
+};
+
+static struct resource usb_resources[] = {
+ [0] = {
+ .name = "usb-mem",
+ .flags = IORESOURCE_MEM,
+ },
+
+ [1] = {
+ .name = "mc", /* hard-coded in musb */
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device ux500_musb_device = {
+ .name = "musb-ux500",
+ .id = 0,
+ .dev = {
+ .platform_data = &musb_platform_data,
+ .dma_mask = &ux500_musb_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ .num_resources = ARRAY_SIZE(usb_resources),
+ .resource = usb_resources,
+};
+
+static inline void ux500_usb_dma_update_rx_ch_config(int *src_dev_type)
+{
+ u32 idx;
+
+ for (idx = 0; idx < UX500_MUSB_DMA_NUM_RX_CHANNELS; idx++)
+ musb_dma_rx_ch[idx].src_dev_type = src_dev_type[idx];
+}
+
+static inline void ux500_usb_dma_update_tx_ch_config(int *dst_dev_type)
+{
+ u32 idx;
+
+ for (idx = 0; idx < UX500_MUSB_DMA_NUM_TX_CHANNELS; idx++)
+ musb_dma_tx_ch[idx].dst_dev_type = dst_dev_type[idx];
+}
+
+void ux500_add_usb(resource_size_t base, int irq, int *dma_rx_cfg,
+ int *dma_tx_cfg)
+{
+ ux500_musb_device.resource[0].start = base;
+ ux500_musb_device.resource[0].end = base + SZ_64K - 1;
+ ux500_musb_device.resource[1].start = irq;
+ ux500_musb_device.resource[1].end = irq;
+
+ ux500_usb_dma_update_rx_ch_config(dma_rx_cfg);
+ ux500_usb_dma_update_tx_ch_config(dma_tx_cfg);
+
+ platform_device_register(&ux500_musb_device);
+}
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index 136c32e7ed8e..eb7ffa0ee8b5 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -50,6 +50,8 @@
#include <mach/platform.h>
#include <asm/hardware/timer-sp.h>
+#include <plat/clcd.h>
+#include <plat/fpga-irq.h>
#include <plat/sched_clock.h>
#include "core.h"
@@ -63,47 +65,12 @@
#define VA_VIC_BASE __io_address(VERSATILE_VIC_BASE)
#define VA_SIC_BASE __io_address(VERSATILE_SIC_BASE)
-static void sic_mask_irq(struct irq_data *d)
-{
- unsigned int irq = d->irq - IRQ_SIC_START;
-
- writel(1 << irq, VA_SIC_BASE + SIC_IRQ_ENABLE_CLEAR);
-}
-
-static void sic_unmask_irq(struct irq_data *d)
-{
- unsigned int irq = d->irq - IRQ_SIC_START;
-
- writel(1 << irq, VA_SIC_BASE + SIC_IRQ_ENABLE_SET);
-}
-
-static struct irq_chip sic_chip = {
- .name = "SIC",
- .irq_ack = sic_mask_irq,
- .irq_mask = sic_mask_irq,
- .irq_unmask = sic_unmask_irq,
+static struct fpga_irq_data sic_irq = {
+ .base = VA_SIC_BASE,
+ .irq_start = IRQ_SIC_START,
+ .chip.name = "SIC",
};
-static void
-sic_handle_irq(unsigned int irq, struct irq_desc *desc)
-{
- unsigned long status = readl(VA_SIC_BASE + SIC_IRQ_STATUS);
-
- if (status == 0) {
- do_bad_IRQ(irq, desc);
- return;
- }
-
- do {
- irq = ffs(status) - 1;
- status &= ~(1 << irq);
-
- irq += IRQ_SIC_START;
-
- generic_handle_irq(irq);
- } while (status);
-}
-
#if 1
#define IRQ_MMCI0A IRQ_VICSOURCE22
#define IRQ_AACI IRQ_VICSOURCE24
@@ -118,22 +85,11 @@ sic_handle_irq(unsigned int irq, struct irq_desc *desc)
void __init versatile_init_irq(void)
{
- unsigned int i;
-
vic_init(VA_VIC_BASE, IRQ_VIC_START, ~0, 0);
- set_irq_chained_handler(IRQ_VICSOURCE31, sic_handle_irq);
-
- /* Do second interrupt controller */
writel(~0, VA_SIC_BASE + SIC_IRQ_ENABLE_CLEAR);
- for (i = IRQ_SIC_START; i <= IRQ_SIC_END; i++) {
- if ((PIC_MASK & (1 << (i - IRQ_SIC_START))) == 0) {
- set_irq_chip(i, &sic_chip);
- set_irq_handler(i, handle_level_irq);
- set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
- }
- }
+ fpga_irq_init(IRQ_VICSOURCE31, ~PIC_MASK, &sic_irq);
/*
* Interrupts on secondary controller from 0 to 8 are routed to
@@ -476,127 +432,7 @@ static struct clk_lookup lookups[] = {
#define SYS_CLCD_ID_SANYO_2_5 (0x07 << 8)
#define SYS_CLCD_ID_VGA (0x1f << 8)
-static struct clcd_panel vga = {
- .mode = {
- .name = "VGA",
- .refresh = 60,
- .xres = 640,
- .yres = 480,
- .pixclock = 39721,
- .left_margin = 40,
- .right_margin = 24,
- .upper_margin = 32,
- .lower_margin = 11,
- .hsync_len = 96,
- .vsync_len = 2,
- .sync = 0,
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = TIM2_BCD | TIM2_IPC,
- .cntl = CNTL_LCDTFT | CNTL_LCDVCOMP(1),
- .bpp = 16,
-};
-
-static struct clcd_panel sanyo_3_8_in = {
- .mode = {
- .name = "Sanyo QVGA",
- .refresh = 116,
- .xres = 320,
- .yres = 240,
- .pixclock = 100000,
- .left_margin = 6,
- .right_margin = 6,
- .upper_margin = 5,
- .lower_margin = 5,
- .hsync_len = 6,
- .vsync_len = 6,
- .sync = 0,
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = TIM2_BCD,
- .cntl = CNTL_LCDTFT | CNTL_LCDVCOMP(1),
- .bpp = 16,
-};
-
-static struct clcd_panel sanyo_2_5_in = {
- .mode = {
- .name = "Sanyo QVGA Portrait",
- .refresh = 116,
- .xres = 240,
- .yres = 320,
- .pixclock = 100000,
- .left_margin = 20,
- .right_margin = 10,
- .upper_margin = 2,
- .lower_margin = 2,
- .hsync_len = 10,
- .vsync_len = 2,
- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = TIM2_IVS | TIM2_IHS | TIM2_IPC,
- .cntl = CNTL_LCDTFT | CNTL_LCDVCOMP(1),
- .bpp = 16,
-};
-
-static struct clcd_panel epson_2_2_in = {
- .mode = {
- .name = "Epson QCIF",
- .refresh = 390,
- .xres = 176,
- .yres = 220,
- .pixclock = 62500,
- .left_margin = 3,
- .right_margin = 2,
- .upper_margin = 1,
- .lower_margin = 0,
- .hsync_len = 3,
- .vsync_len = 2,
- .sync = 0,
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = TIM2_BCD | TIM2_IPC,
- .cntl = CNTL_LCDTFT | CNTL_LCDVCOMP(1),
- .bpp = 16,
-};
-
-/*
- * Detect which LCD panel is connected, and return the appropriate
- * clcd_panel structure. Note: we do not have any information on
- * the required timings for the 8.4in panel, so we presently assume
- * VGA timings.
- */
-static struct clcd_panel *versatile_clcd_panel(void)
-{
- void __iomem *sys_clcd = __io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_CLCD_OFFSET;
- struct clcd_panel *panel = &vga;
- u32 val;
-
- val = readl(sys_clcd) & SYS_CLCD_ID_MASK;
- if (val == SYS_CLCD_ID_SANYO_3_8)
- panel = &sanyo_3_8_in;
- else if (val == SYS_CLCD_ID_SANYO_2_5)
- panel = &sanyo_2_5_in;
- else if (val == SYS_CLCD_ID_EPSON_2_2)
- panel = &epson_2_2_in;
- else if (val == SYS_CLCD_ID_VGA)
- panel = &vga;
- else {
- printk(KERN_ERR "CLCD: unknown LCD panel ID 0x%08x, using VGA\n",
- val);
- panel = &vga;
- }
-
- return panel;
-}
+static bool is_sanyo_2_5_lcd;
/*
* Disable all display connectors on the interface module.
@@ -614,7 +450,7 @@ static void versatile_clcd_disable(struct clcd_fb *fb)
/*
* If the LCD is Sanyo 2x5 in on the IB2 board, turn the back-light off
*/
- if (machine_is_versatile_ab() && fb->panel == &sanyo_2_5_in) {
+ if (machine_is_versatile_ab() && is_sanyo_2_5_lcd) {
void __iomem *versatile_ib2_ctrl = __io_address(VERSATILE_IB2_CTRL);
unsigned long ctrl;
@@ -630,18 +466,22 @@ static void versatile_clcd_disable(struct clcd_fb *fb)
*/
static void versatile_clcd_enable(struct clcd_fb *fb)
{
+ struct fb_var_screeninfo *var = &fb->fb.var;
void __iomem *sys_clcd = __io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_CLCD_OFFSET;
u32 val;
val = readl(sys_clcd);
val &= ~SYS_CLCD_MODE_MASK;
- switch (fb->fb.var.green.length) {
+ switch (var->green.length) {
case 5:
val |= SYS_CLCD_MODE_5551;
break;
case 6:
- val |= SYS_CLCD_MODE_565_RLSB;
+ if (var->red.offset == 0)
+ val |= SYS_CLCD_MODE_565_RLSB;
+ else
+ val |= SYS_CLCD_MODE_565_BLSB;
break;
case 8:
val |= SYS_CLCD_MODE_888;
@@ -663,7 +503,7 @@ static void versatile_clcd_enable(struct clcd_fb *fb)
/*
* If the LCD is Sanyo 2x5 in on the IB2 board, turn the back-light on
*/
- if (machine_is_versatile_ab() && fb->panel == &sanyo_2_5_in) {
+ if (machine_is_versatile_ab() && is_sanyo_2_5_lcd) {
void __iomem *versatile_ib2_ctrl = __io_address(VERSATILE_IB2_CTRL);
unsigned long ctrl;
@@ -674,50 +514,62 @@ static void versatile_clcd_enable(struct clcd_fb *fb)
#endif
}
-static unsigned long framesize = SZ_1M;
-
+/*
+ * Detect which LCD panel is connected, and return the appropriate
+ * clcd_panel structure. Note: we do not have any information on
+ * the required timings for the 8.4in panel, so we presently assume
+ * VGA timings.
+ */
static int versatile_clcd_setup(struct clcd_fb *fb)
{
- dma_addr_t dma;
+ void __iomem *sys_clcd = __io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_CLCD_OFFSET;
+ const char *panel_name;
+ u32 val;
- fb->panel = versatile_clcd_panel();
+ is_sanyo_2_5_lcd = false;
- fb->fb.screen_base = dma_alloc_writecombine(&fb->dev->dev, framesize,
- &dma, GFP_KERNEL);
- if (!fb->fb.screen_base) {
- printk(KERN_ERR "CLCD: unable to map framebuffer\n");
- return -ENOMEM;
+ val = readl(sys_clcd) & SYS_CLCD_ID_MASK;
+ if (val == SYS_CLCD_ID_SANYO_3_8)
+ panel_name = "Sanyo TM38QV67A02A";
+ else if (val == SYS_CLCD_ID_SANYO_2_5) {
+ panel_name = "Sanyo QVGA Portrait";
+ is_sanyo_2_5_lcd = true;
+ } else if (val == SYS_CLCD_ID_EPSON_2_2)
+ panel_name = "Epson L2F50113T00";
+ else if (val == SYS_CLCD_ID_VGA)
+ panel_name = "VGA";
+ else {
+ printk(KERN_ERR "CLCD: unknown LCD panel ID 0x%08x, using VGA\n",
+ val);
+ panel_name = "VGA";
}
- fb->fb.fix.smem_start = dma;
- fb->fb.fix.smem_len = framesize;
+ fb->panel = versatile_clcd_get_panel(panel_name);
+ if (!fb->panel)
+ return -EINVAL;
- return 0;
+ return versatile_clcd_setup_dma(fb, SZ_1M);
}
-static int versatile_clcd_mmap(struct clcd_fb *fb, struct vm_area_struct *vma)
+static void versatile_clcd_decode(struct clcd_fb *fb, struct clcd_regs *regs)
{
- return dma_mmap_writecombine(&fb->dev->dev, vma,
- fb->fb.screen_base,
- fb->fb.fix.smem_start,
- fb->fb.fix.smem_len);
-}
+ clcdfb_decode(fb, regs);
-static void versatile_clcd_remove(struct clcd_fb *fb)
-{
- dma_free_writecombine(&fb->dev->dev, fb->fb.fix.smem_len,
- fb->fb.screen_base, fb->fb.fix.smem_start);
+ /* Always clear BGR for RGB565: we do the routing externally */
+ if (fb->fb.var.green.length == 6)
+ regs->cntl &= ~CNTL_BGR;
}
static struct clcd_board clcd_plat_data = {
.name = "Versatile",
+ .caps = CLCD_CAP_5551 | CLCD_CAP_565 | CLCD_CAP_888,
.check = clcdfb_check,
- .decode = clcdfb_decode,
+ .decode = versatile_clcd_decode,
.disable = versatile_clcd_disable,
.enable = versatile_clcd_enable,
.setup = versatile_clcd_setup,
- .mmap = versatile_clcd_mmap,
- .remove = versatile_clcd_remove,
+ .mmap = versatile_clcd_mmap_dma,
+ .remove = versatile_clcd_remove_dma,
};
static struct pl061_platform_data gpio0_plat_data = {
@@ -737,53 +589,35 @@ static struct pl022_ssp_controller ssp0_plat_data = {
};
#define AACI_IRQ { IRQ_AACI, NO_IRQ }
-#define AACI_DMA { 0x80, 0x81 }
#define MMCI0_IRQ { IRQ_MMCI0A,IRQ_SIC_MMCI0B }
-#define MMCI0_DMA { 0x84, 0 }
#define KMI0_IRQ { IRQ_SIC_KMI0, NO_IRQ }
-#define KMI0_DMA { 0, 0 }
#define KMI1_IRQ { IRQ_SIC_KMI1, NO_IRQ }
-#define KMI1_DMA { 0, 0 }
/*
* These devices are connected directly to the multi-layer AHB switch
*/
#define SMC_IRQ { NO_IRQ, NO_IRQ }
-#define SMC_DMA { 0, 0 }
#define MPMC_IRQ { NO_IRQ, NO_IRQ }
-#define MPMC_DMA { 0, 0 }
#define CLCD_IRQ { IRQ_CLCDINT, NO_IRQ }
-#define CLCD_DMA { 0, 0 }
#define DMAC_IRQ { IRQ_DMAINT, NO_IRQ }
-#define DMAC_DMA { 0, 0 }
/*
* These devices are connected via the core APB bridge
*/
#define SCTL_IRQ { NO_IRQ, NO_IRQ }
-#define SCTL_DMA { 0, 0 }
#define WATCHDOG_IRQ { IRQ_WDOGINT, NO_IRQ }
-#define WATCHDOG_DMA { 0, 0 }
#define GPIO0_IRQ { IRQ_GPIOINT0, NO_IRQ }
-#define GPIO0_DMA { 0, 0 }
#define GPIO1_IRQ { IRQ_GPIOINT1, NO_IRQ }
-#define GPIO1_DMA { 0, 0 }
#define RTC_IRQ { IRQ_RTCINT, NO_IRQ }
-#define RTC_DMA { 0, 0 }
/*
* These devices are connected via the DMA APB bridge
*/
#define SCI_IRQ { IRQ_SCIINT, NO_IRQ }
-#define SCI_DMA { 7, 6 }
#define UART0_IRQ { IRQ_UARTINT0, NO_IRQ }
-#define UART0_DMA { 15, 14 }
#define UART1_IRQ { IRQ_UARTINT1, NO_IRQ }
-#define UART1_DMA { 13, 12 }
#define UART2_IRQ { IRQ_UARTINT2, NO_IRQ }
-#define UART2_DMA { 11, 10 }
#define SSP_IRQ { IRQ_SSPINT, NO_IRQ }
-#define SSP_DMA { 9, 8 }
/* FPGA Primecells */
AMBA_DEVICE(aaci, "fpga:04", AACI, NULL);
@@ -865,14 +699,21 @@ static void versatile_leds_event(led_event_t ledevt)
}
#endif /* CONFIG_LEDS */
-void __init versatile_init(void)
+/* Early initializations */
+void __init versatile_init_early(void)
{
- int i;
-
- osc4_clk.vcoreg = __io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_OSCCLCD_OFFSET;
+ void __iomem *sys = __io_address(VERSATILE_SYS_BASE);
+ osc4_clk.vcoreg = sys + VERSATILE_SYS_OSCCLCD_OFFSET;
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+ versatile_sched_clock_init(sys + VERSATILE_SYS_24MHz_OFFSET, 24000000);
+}
+
+void __init versatile_init(void)
+{
+ int i;
+
platform_device_register(&versatile_flash_device);
platform_device_register(&versatile_i2c_device);
platform_device_register(&smc91x_device);
@@ -889,12 +730,6 @@ void __init versatile_init(void)
}
/*
- * The sched_clock counter
- */
-#define REFCOUNTER (__io_address(VERSATILE_SYS_BASE) + \
- VERSATILE_SYS_24MHz_OFFSET)
-
-/*
* Where is the timer (VA)?
*/
#define TIMER0_VA_BASE __io_address(VERSATILE_TIMER0_1_BASE)
@@ -909,8 +744,6 @@ static void __init versatile_timer_init(void)
{
u32 val;
- versatile_sched_clock_init(REFCOUNTER, 24000000);
-
/*
* set clock frequency:
* VERSATILE_REFCLK is 32KHz
diff --git a/arch/arm/mach-versatile/core.h b/arch/arm/mach-versatile/core.h
index 9d39886a8351..fd6404e5d788 100644
--- a/arch/arm/mach-versatile/core.h
+++ b/arch/arm/mach-versatile/core.h
@@ -25,6 +25,7 @@
#include <linux/amba/bus.h>
extern void __init versatile_init(void);
+extern void __init versatile_init_early(void);
extern void __init versatile_init_irq(void);
extern void __init versatile_map_io(void);
extern struct sys_timer versatile_timer;
@@ -44,7 +45,6 @@ static struct amba_device name##_device = { \
}, \
.dma_mask = ~0, \
.irq = base##_IRQ, \
- /* .dma = base##_DMA,*/ \
}
#endif
diff --git a/arch/arm/mach-versatile/include/mach/hardware.h b/arch/arm/mach-versatile/include/mach/hardware.h
index b5e75bb44965..6911e1f5f156 100644
--- a/arch/arm/mach-versatile/include/mach/hardware.h
+++ b/arch/arm/mach-versatile/include/mach/hardware.h
@@ -39,6 +39,6 @@
/* macro to get at IO space when running virtually */
#define IO_ADDRESS(x) (((x) & 0x0fffffff) + (((x) >> 4) & 0x0f000000) + 0xf0000000)
-#define __io_address(n) __io(IO_ADDRESS(n))
+#define __io_address(n) ((void __iomem __force *)IO_ADDRESS(n))
#endif
diff --git a/arch/arm/mach-versatile/include/mach/memory.h b/arch/arm/mach-versatile/include/mach/memory.h
index 79aeab86b903..dacc9d8e4e6a 100644
--- a/arch/arm/mach-versatile/include/mach/memory.h
+++ b/arch/arm/mach-versatile/include/mach/memory.h
@@ -23,6 +23,6 @@
/*
* Physical DRAM offset.
*/
-#define PHYS_OFFSET UL(0x00000000)
+#define PLAT_PHYS_OFFSET UL(0x00000000)
#endif
diff --git a/arch/arm/mach-versatile/versatile_ab.c b/arch/arm/mach-versatile/versatile_ab.c
index aa9730fb13bf..f8ae64b3eed0 100644
--- a/arch/arm/mach-versatile/versatile_ab.c
+++ b/arch/arm/mach-versatile/versatile_ab.c
@@ -37,6 +37,7 @@ MACHINE_START(VERSATILE_AB, "ARM-Versatile AB")
/* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */
.boot_params = 0x00000100,
.map_io = versatile_map_io,
+ .init_early = versatile_init_early,
.init_irq = versatile_init_irq,
.timer = &versatile_timer,
.init_machine = versatile_init,
diff --git a/arch/arm/mach-versatile/versatile_pb.c b/arch/arm/mach-versatile/versatile_pb.c
index bf469642a3f8..37c23dfeefb7 100644
--- a/arch/arm/mach-versatile/versatile_pb.c
+++ b/arch/arm/mach-versatile/versatile_pb.c
@@ -59,19 +59,14 @@ static struct pl061_platform_data gpio3_plat_data = {
};
#define UART3_IRQ { IRQ_SIC_UART3, NO_IRQ }
-#define UART3_DMA { 0x86, 0x87 }
#define SCI1_IRQ { IRQ_SIC_SCI3, NO_IRQ }
-#define SCI1_DMA { 0x88, 0x89 }
#define MMCI1_IRQ { IRQ_MMCI1A, IRQ_SIC_MMCI1B }
-#define MMCI1_DMA { 0x85, 0 }
/*
* These devices are connected via the core APB bridge
*/
#define GPIO2_IRQ { IRQ_GPIOINT2, NO_IRQ }
-#define GPIO2_DMA { 0, 0 }
#define GPIO3_IRQ { IRQ_GPIOINT3, NO_IRQ }
-#define GPIO3_DMA { 0, 0 }
/*
* These devices are connected via the DMA APB bridge
@@ -110,6 +105,7 @@ MACHINE_START(VERSATILE_PB, "ARM-Versatile PB")
/* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */
.boot_params = 0x00000100,
.map_io = versatile_map_io,
+ .init_early = versatile_init_early,
.init_irq = versatile_init_irq,
.timer = &versatile_timer,
.init_machine = versatile_pb_init,
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
index 3f19b660a165..931148487f0b 100644
--- a/arch/arm/mach-vexpress/Kconfig
+++ b/arch/arm/mach-vexpress/Kconfig
@@ -5,5 +5,8 @@ config ARCH_VEXPRESS_CA9X4
bool "Versatile Express Cortex-A9x4 tile"
select CPU_V7
select ARM_GIC
+ select ARM_ERRATA_720789
+ select ARM_ERRATA_751472
+ select ARM_ERRATA_753970
endmenu
diff --git a/arch/arm/mach-vexpress/Makefile b/arch/arm/mach-vexpress/Makefile
index 2c0ac7de2814..90551b9780ab 100644
--- a/arch/arm/mach-vexpress/Makefile
+++ b/arch/arm/mach-vexpress/Makefile
@@ -4,6 +4,5 @@
obj-y := v2m.o
obj-$(CONFIG_ARCH_VEXPRESS_CA9X4) += ct-ca9x4.o
-obj-$(CONFIG_SMP) += platsmp.o headsmp.o
+obj-$(CONFIG_SMP) += platsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
-obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o
diff --git a/arch/arm/mach-vexpress/core.h b/arch/arm/mach-vexpress/core.h
index 362780d868de..e0312a1dce3a 100644
--- a/arch/arm/mach-vexpress/core.h
+++ b/arch/arm/mach-vexpress/core.h
@@ -21,4 +21,5 @@ struct amba_device name##_device = { \
struct map_desc;
void v2m_map_io(struct map_desc *tile, size_t num);
+void v2m_init_early(void);
extern struct sys_timer v2m_timer;
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c
index e628402b754c..30d5a5b0ac21 100644
--- a/arch/arm/mach-vexpress/ct-ca9x4.c
+++ b/arch/arm/mach-vexpress/ct-ca9x4.c
@@ -30,6 +30,8 @@
#include <mach/motherboard.h>
+#include <plat/clcd.h>
+
#define V2M_PA_CS7 0x10000000
static struct map_desc ct_ca9x4_io_desc[] __initdata = {
@@ -80,29 +82,6 @@ static struct sys_timer ct_ca9x4_timer = {
};
#endif
-static struct clcd_panel xvga_panel = {
- .mode = {
- .name = "XVGA",
- .refresh = 60,
- .xres = 1024,
- .yres = 768,
- .pixclock = 15384,
- .left_margin = 168,
- .right_margin = 8,
- .upper_margin = 29,
- .lower_margin = 3,
- .hsync_len = 144,
- .vsync_len = 6,
- .sync = 0,
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = TIM2_BCD | TIM2_IPC,
- .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1),
- .bpp = 16,
-};
-
static void ct_ca9x4_clcd_enable(struct clcd_fb *fb)
{
v2m_cfg_write(SYS_CFG_MUXFPGA | SYS_CFG_SITE_DB1, 0);
@@ -112,42 +91,23 @@ static void ct_ca9x4_clcd_enable(struct clcd_fb *fb)
static int ct_ca9x4_clcd_setup(struct clcd_fb *fb)
{
unsigned long framesize = 1024 * 768 * 2;
- dma_addr_t dma;
-
- fb->panel = &xvga_panel;
- fb->fb.screen_base = dma_alloc_writecombine(&fb->dev->dev, framesize,
- &dma, GFP_KERNEL);
- if (!fb->fb.screen_base) {
- printk(KERN_ERR "CLCD: unable to map frame buffer\n");
- return -ENOMEM;
- }
- fb->fb.fix.smem_start = dma;
- fb->fb.fix.smem_len = framesize;
-
- return 0;
-}
-
-static int ct_ca9x4_clcd_mmap(struct clcd_fb *fb, struct vm_area_struct *vma)
-{
- return dma_mmap_writecombine(&fb->dev->dev, vma, fb->fb.screen_base,
- fb->fb.fix.smem_start, fb->fb.fix.smem_len);
-}
+ fb->panel = versatile_clcd_get_panel("XVGA");
+ if (!fb->panel)
+ return -EINVAL;
-static void ct_ca9x4_clcd_remove(struct clcd_fb *fb)
-{
- dma_free_writecombine(&fb->dev->dev, fb->fb.fix.smem_len,
- fb->fb.screen_base, fb->fb.fix.smem_start);
+ return versatile_clcd_setup_dma(fb, framesize);
}
static struct clcd_board ct_ca9x4_clcd_data = {
.name = "CT-CA9X4",
+ .caps = CLCD_CAP_5551 | CLCD_CAP_565,
.check = clcdfb_check,
.decode = clcdfb_decode,
.enable = ct_ca9x4_clcd_enable,
.setup = ct_ca9x4_clcd_setup,
- .mmap = ct_ca9x4_clcd_mmap,
- .remove = ct_ca9x4_clcd_remove,
+ .mmap = versatile_clcd_mmap_dma,
+ .remove = versatile_clcd_remove_dma,
};
static AMBA_DEVICE(clcd, "ct:clcd", CT_CA9X4_CLCDC, &ct_ca9x4_clcd_data);
@@ -220,6 +180,13 @@ static struct platform_device pmu_device = {
.resource = pmu_resources,
};
+static void __init ct_ca9x4_init_early(void)
+{
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
+ v2m_init_early();
+}
+
static void __init ct_ca9x4_init(void)
{
int i;
@@ -234,8 +201,6 @@ static void __init ct_ca9x4_init(void)
l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff);
#endif
- clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
for (i = 0; i < ARRAY_SIZE(ct_ca9x4_amba_devs); i++)
amba_device_register(ct_ca9x4_amba_devs[i], &iomem_resource);
@@ -243,9 +208,10 @@ static void __init ct_ca9x4_init(void)
}
MACHINE_START(VEXPRESS, "ARM-Versatile Express CA9x4")
- .boot_params = PHYS_OFFSET + 0x00000100,
+ .boot_params = PLAT_PHYS_OFFSET + 0x00000100,
.map_io = ct_ca9x4_map_io,
.init_irq = ct_ca9x4_init_irq,
+ .init_early = ct_ca9x4_init_early,
#if 0
.timer = &ct_ca9x4_timer,
#else
diff --git a/arch/arm/mach-vexpress/include/mach/memory.h b/arch/arm/mach-vexpress/include/mach/memory.h
index be28232ae639..5b7fcd439d87 100644
--- a/arch/arm/mach-vexpress/include/mach/memory.h
+++ b/arch/arm/mach-vexpress/include/mach/memory.h
@@ -20,6 +20,6 @@
#ifndef __ASM_ARCH_MEMORY_H
#define __ASM_ARCH_MEMORY_H
-#define PHYS_OFFSET UL(0x60000000)
+#define PLAT_PHYS_OFFSET UL(0x60000000)
#endif
diff --git a/arch/arm/mach-vexpress/platsmp.c b/arch/arm/mach-vexpress/platsmp.c
index 634bf1d3a311..18927023c2cc 100644
--- a/arch/arm/mach-vexpress/platsmp.c
+++ b/arch/arm/mach-vexpress/platsmp.c
@@ -10,13 +10,9 @@
*/
#include <linux/init.h>
#include <linux/errno.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/jiffies.h>
#include <linux/smp.h>
#include <linux/io.h>
-#include <asm/cacheflush.h>
#include <asm/smp_scu.h>
#include <asm/unified.h>
@@ -26,99 +22,13 @@
#include "core.h"
-extern void vexpress_secondary_startup(void);
-
-/*
- * control for which core is the next to come out of the secondary
- * boot "holding pen"
- */
-volatile int __cpuinitdata pen_release = -1;
-
-/*
- * Write pen_release in a way that is guaranteed to be visible to all
- * observers, irrespective of whether they're taking part in coherency
- * or not. This is necessary for the hotplug code to work reliably.
- */
-static void __cpuinit write_pen_release(int val)
-{
- pen_release = val;
- smp_wmb();
- __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
- outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
-}
+extern void versatile_secondary_startup(void);
static void __iomem *scu_base_addr(void)
{
return MMIO_P2V(A9_MPCORE_SCU);
}
-static DEFINE_SPINLOCK(boot_lock);
-
-void __cpuinit platform_secondary_init(unsigned int cpu)
-{
- /*
- * if any interrupts are already enabled for the primary
- * core (e.g. timer irq), then they will not have been enabled
- * for us: do so
- */
- gic_secondary_init(0);
-
- /*
- * let the primary processor know we're out of the
- * pen, then head off into the C entry point
- */
- write_pen_release(-1);
-
- /*
- * Synchronise with the boot thread.
- */
- spin_lock(&boot_lock);
- spin_unlock(&boot_lock);
-}
-
-int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
-{
- unsigned long timeout;
-
- /*
- * Set synchronisation state between this boot processor
- * and the secondary one
- */
- spin_lock(&boot_lock);
-
- /*
- * This is really belt and braces; we hold unintended secondary
- * CPUs in the holding pen until we're ready for them. However,
- * since we haven't sent them a soft interrupt, they shouldn't
- * be there.
- */
- write_pen_release(cpu);
-
- /*
- * Send the secondary CPU a soft interrupt, thereby causing
- * the boot monitor to read the system wide flags register,
- * and branch to the address found there.
- */
- smp_cross_call(cpumask_of(cpu), 1);
-
- timeout = jiffies + (1 * HZ);
- while (time_before(jiffies, timeout)) {
- smp_rmb();
- if (pen_release == -1)
- break;
-
- udelay(10);
- }
-
- /*
- * now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
- spin_unlock(&boot_lock);
-
- return pen_release != -1 ? -ENOSYS : 0;
-}
-
/*
* Initialise the CPU possible map early - this describes the CPUs
* which may be present or become present in the system.
@@ -163,6 +73,6 @@ void __init platform_smp_prepare_cpus(unsigned int max_cpus)
* secondary CPU branches to this address.
*/
writel(~0, MMIO_P2V(V2M_SYS_FLAGSCLR));
- writel(BSYM(virt_to_phys(vexpress_secondary_startup)),
+ writel(BSYM(virt_to_phys(versatile_secondary_startup)),
MMIO_P2V(V2M_SYS_FLAGSSET));
}
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index 1edae65a0e72..63ef663fb0be 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -7,6 +7,7 @@
#include <linux/io.h>
#include <linux/init.h>
#include <linux/platform_device.h>
+#include <linux/ata_platform.h>
#include <linux/smsc911x.h>
#include <linux/spinlock.h>
#include <linux/sysdev.h>
@@ -48,13 +49,15 @@ void __init v2m_map_io(struct map_desc *tile, size_t num)
iotable_init(tile, num);
}
+void __init v2m_init_early(void)
+{
+ versatile_sched_clock_init(MMIO_P2V(V2M_SYS_24MHZ), 24000000);
+}
static void __init v2m_timer_init(void)
{
u32 scctrl;
- versatile_sched_clock_init(MMIO_P2V(V2M_SYS_24MHZ), 24000000);
-
/* Select 1MHz TIMCLK as the reference clock for SP804 timers */
scctrl = readl(MMIO_P2V(V2M_SYSCTL + SCCTRL));
scctrl |= SCCTRL_TIMEREN0SEL_TIMCLK;
@@ -249,6 +252,29 @@ static struct platform_device v2m_flash_device = {
.dev.platform_data = &v2m_flash_data,
};
+static struct pata_platform_info v2m_pata_data = {
+ .ioport_shift = 2,
+};
+
+static struct resource v2m_pata_resources[] = {
+ {
+ .start = V2M_CF,
+ .end = V2M_CF + 0xff,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = V2M_CF + 0x100,
+ .end = V2M_CF + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device v2m_cf_device = {
+ .name = "pata_platform",
+ .id = -1,
+ .resource = v2m_pata_resources,
+ .num_resources = ARRAY_SIZE(v2m_pata_resources),
+ .dev.platform_data = &v2m_pata_data,
+};
static unsigned int v2m_mmci_status(struct device *dev)
{
@@ -363,6 +389,7 @@ static int __init v2m_init(void)
platform_device_register(&v2m_pcie_i2c_device);
platform_device_register(&v2m_ddc_i2c_device);
platform_device_register(&v2m_flash_device);
+ platform_device_register(&v2m_cf_device);
platform_device_register(&v2m_eth_device);
platform_device_register(&v2m_usb_device);
diff --git a/arch/arm/mach-vt8500/Kconfig b/arch/arm/mach-vt8500/Kconfig
new file mode 100644
index 000000000000..2c20a341c11a
--- /dev/null
+++ b/arch/arm/mach-vt8500/Kconfig
@@ -0,0 +1,73 @@
+if ARCH_VT8500
+
+config VTWM_VERSION_VT8500
+ bool
+
+config VTWM_VERSION_WM8505
+ bool
+
+config MACH_BV07
+ bool "Benign BV07-8500 Mini Netbook"
+ depends on ARCH_VT8500
+ select VTWM_VERSION_VT8500
+ help
+ Add support for the inexpensive 7-inch netbooks sold by many
+ Chinese distributors under various names. Note that there are
+ many hardware implementations in identical exterior, make sure
+ that yours is indeed based on a VIA VT8500 chip.
+
+config MACH_WM8505_7IN_NETBOOK
+ bool "WM8505 7-inch generic netbook"
+ depends on ARCH_VT8500
+ select VTWM_VERSION_WM8505
+ help
+ Add support for the inexpensive 7-inch netbooks sold by many
+ Chinese distributors under various names. Note that there are
+ many hardware implementations in identical exterior, make sure
+ that yours is indeed based on a WonderMedia WM8505 chip.
+
+comment "LCD panel size"
+
+config WMT_PANEL_800X480
+ bool "7-inch with 800x480 resolution"
+ depends on (FB_VT8500 || FB_WM8505)
+ default y
+ help
+ These are found in most of the netbooks in generic cases, as
+ well as in Eken M001 tablets and possibly elsewhere.
+
+ To select this panel at runtime, say y here and append
+ 'panel=800x480' to your kernel command line. Otherwise, the
+ largest one available will be used.
+
+config WMT_PANEL_800X600
+ bool "8-inch with 800x600 resolution"
+ depends on (FB_VT8500 || FB_WM8505)
+ help
+ These are found in Eken M003 tablets and possibly elsewhere.
+
+ To select this panel at runtime, say y here and append
+ 'panel=800x600' to your kernel command line. Otherwise, the
+ largest one available will be used.
+
+config WMT_PANEL_1024X576
+ bool "10-inch with 1024x576 resolution"
+ depends on (FB_VT8500 || FB_WM8505)
+ help
+ These are found in CherryPal netbooks and possibly elsewhere.
+
+ To select this panel at runtime, say y here and append
+ 'panel=1024x576' to your kernel command line. Otherwise, the
+ largest one available will be used.
+
+config WMT_PANEL_1024X600
+ bool "10-inch with 1024x600 resolution"
+ depends on (FB_VT8500 || FB_WM8505)
+ help
+ These are found in Eken M006 tablets and possibly elsewhere.
+
+ To select this panel at runtime, say y here and append
+ 'panel=1024x600' to your kernel command line. Otherwise, the
+ largest one available will be used.
+
+endif
diff --git a/arch/arm/mach-vt8500/Makefile b/arch/arm/mach-vt8500/Makefile
new file mode 100644
index 000000000000..81aedb7c893c
--- /dev/null
+++ b/arch/arm/mach-vt8500/Makefile
@@ -0,0 +1,9 @@
+obj-y += devices.o gpio.o irq.o timer.o
+
+obj-$(CONFIG_VTWM_VERSION_VT8500) += devices-vt8500.o
+obj-$(CONFIG_VTWM_VERSION_WM8505) += devices-wm8505.o
+
+obj-$(CONFIG_MACH_BV07) += bv07.o
+obj-$(CONFIG_MACH_WM8505_7IN_NETBOOK) += wm8505_7in.o
+
+obj-$(CONFIG_HAVE_PWM) += pwm.o
diff --git a/arch/arm/mach-vt8500/Makefile.boot b/arch/arm/mach-vt8500/Makefile.boot
new file mode 100644
index 000000000000..a8acc4e24902
--- /dev/null
+++ b/arch/arm/mach-vt8500/Makefile.boot
@@ -0,0 +1,3 @@
+ zreladdr-y := 0x00008000
+params_phys-y := 0x00000100
+initrd_phys-y := 0x01000000
diff --git a/arch/arm/mach-vt8500/bv07.c b/arch/arm/mach-vt8500/bv07.c
new file mode 100644
index 000000000000..94a261d86bf0
--- /dev/null
+++ b/arch/arm/mach-vt8500/bv07.c
@@ -0,0 +1,77 @@
+/*
+ * arch/arm/mach-vt8500/bv07.c
+ *
+ * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/io.h>
+#include <linux/pm.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+
+#include "devices.h"
+
+static void __iomem *pmc_hiber;
+
+static struct platform_device *devices[] __initdata = {
+ &vt8500_device_uart0,
+ &vt8500_device_lcdc,
+ &vt8500_device_ehci,
+ &vt8500_device_ge_rops,
+ &vt8500_device_pwm,
+ &vt8500_device_pwmbl,
+ &vt8500_device_rtc,
+};
+
+static void vt8500_power_off(void)
+{
+ local_irq_disable();
+ writew(5, pmc_hiber);
+ asm("mcr%? p15, 0, %0, c7, c0, 4" : : "r" (0));
+}
+
+void __init bv07_init(void)
+{
+#ifdef CONFIG_FB_VT8500
+ void __iomem *gpio_mux_reg = ioremap(wmt_gpio_base + 0x200, 4);
+ if (gpio_mux_reg) {
+ writel(readl(gpio_mux_reg) | 1, gpio_mux_reg);
+ iounmap(gpio_mux_reg);
+ } else {
+ printk(KERN_ERR "Could not remap the GPIO mux register, display may not work properly!\n");
+ }
+#endif
+ pmc_hiber = ioremap(wmt_pmc_base + 0x12, 2);
+ if (pmc_hiber)
+ pm_power_off = &vt8500_power_off;
+ else
+ printk(KERN_ERR "PMC Hibernation register could not be remapped, not enabling power off!\n");
+
+ vt8500_set_resources();
+ platform_add_devices(devices, ARRAY_SIZE(devices));
+ vt8500_gpio_init();
+}
+
+MACHINE_START(BV07, "Benign BV07 Mini Netbook")
+ .boot_params = 0x00000100,
+ .reserve = vt8500_reserve_mem,
+ .map_io = vt8500_map_io,
+ .init_irq = vt8500_init_irq,
+ .timer = &vt8500_timer,
+ .init_machine = bv07_init,
+MACHINE_END
diff --git a/arch/arm/mach-vt8500/devices-vt8500.c b/arch/arm/mach-vt8500/devices-vt8500.c
new file mode 100644
index 000000000000..19519aeecf37
--- /dev/null
+++ b/arch/arm/mach-vt8500/devices-vt8500.c
@@ -0,0 +1,91 @@
+/* linux/arch/arm/mach-vt8500/devices-vt8500.c
+ *
+ * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+
+#include <mach/vt8500_regs.h>
+#include <mach/vt8500_irqs.h>
+#include <mach/i8042.h>
+#include "devices.h"
+
+void __init vt8500_set_resources(void)
+{
+ struct resource tmp[3];
+
+ tmp[0] = wmt_mmio_res(VT8500_LCDC_BASE, SZ_1K);
+ tmp[1] = wmt_irq_res(IRQ_LCDC);
+ wmt_res_add(&vt8500_device_lcdc, tmp, 2);
+
+ tmp[0] = wmt_mmio_res(VT8500_UART0_BASE, 0x1040);
+ tmp[1] = wmt_irq_res(IRQ_UART0);
+ wmt_res_add(&vt8500_device_uart0, tmp, 2);
+
+ tmp[0] = wmt_mmio_res(VT8500_UART1_BASE, 0x1040);
+ tmp[1] = wmt_irq_res(IRQ_UART1);
+ wmt_res_add(&vt8500_device_uart1, tmp, 2);
+
+ tmp[0] = wmt_mmio_res(VT8500_UART2_BASE, 0x1040);
+ tmp[1] = wmt_irq_res(IRQ_UART2);
+ wmt_res_add(&vt8500_device_uart2, tmp, 2);
+
+ tmp[0] = wmt_mmio_res(VT8500_UART3_BASE, 0x1040);
+ tmp[1] = wmt_irq_res(IRQ_UART3);
+ wmt_res_add(&vt8500_device_uart3, tmp, 2);
+
+ tmp[0] = wmt_mmio_res(VT8500_EHCI_BASE, SZ_512);
+ tmp[1] = wmt_irq_res(IRQ_EHCI);
+ wmt_res_add(&vt8500_device_ehci, tmp, 2);
+
+ tmp[0] = wmt_mmio_res(VT8500_GEGEA_BASE, SZ_256);
+ wmt_res_add(&vt8500_device_ge_rops, tmp, 1);
+
+ tmp[0] = wmt_mmio_res(VT8500_PWM_BASE, 0x44);
+ wmt_res_add(&vt8500_device_pwm, tmp, 1);
+
+ tmp[0] = wmt_mmio_res(VT8500_RTC_BASE, 0x2c);
+ tmp[1] = wmt_irq_res(IRQ_RTC);
+ tmp[2] = wmt_irq_res(IRQ_RTCSM);
+ wmt_res_add(&vt8500_device_rtc, tmp, 3);
+}
+
+static void __init vt8500_set_externs(void)
+{
+ /* Non-resource-aware stuff */
+ wmt_ic_base = VT8500_IC_BASE;
+ wmt_gpio_base = VT8500_GPIO_BASE;
+ wmt_pmc_base = VT8500_PMC_BASE;
+ wmt_i8042_base = VT8500_PS2_BASE;
+
+ wmt_nr_irqs = VT8500_NR_IRQS;
+ wmt_timer_irq = IRQ_PMCOS0;
+ wmt_gpio_ext_irq[0] = IRQ_EXT0;
+ wmt_gpio_ext_irq[1] = IRQ_EXT1;
+ wmt_gpio_ext_irq[2] = IRQ_EXT2;
+ wmt_gpio_ext_irq[3] = IRQ_EXT3;
+ wmt_gpio_ext_irq[4] = IRQ_EXT4;
+ wmt_gpio_ext_irq[5] = IRQ_EXT5;
+ wmt_gpio_ext_irq[6] = IRQ_EXT6;
+ wmt_gpio_ext_irq[7] = IRQ_EXT7;
+ wmt_i8042_kbd_irq = IRQ_PS2KBD;
+ wmt_i8042_aux_irq = IRQ_PS2MOUSE;
+}
+
+void __init vt8500_map_io(void)
+{
+ iotable_init(wmt_io_desc, ARRAY_SIZE(wmt_io_desc));
+
+ /* Should be done before interrupts and timers are initialized */
+ vt8500_set_externs();
+}
diff --git a/arch/arm/mach-vt8500/devices-wm8505.c b/arch/arm/mach-vt8500/devices-wm8505.c
new file mode 100644
index 000000000000..db4594e029f4
--- /dev/null
+++ b/arch/arm/mach-vt8500/devices-wm8505.c
@@ -0,0 +1,99 @@
+/* linux/arch/arm/mach-vt8500/devices-wm8505.c
+ *
+ * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+
+#include <mach/wm8505_regs.h>
+#include <mach/wm8505_irqs.h>
+#include <mach/i8042.h>
+#include "devices.h"
+
+void __init wm8505_set_resources(void)
+{
+ struct resource tmp[3];
+
+ tmp[0] = wmt_mmio_res(WM8505_GOVR_BASE, SZ_512);
+ wmt_res_add(&vt8500_device_wm8505_fb, tmp, 1);
+
+ tmp[0] = wmt_mmio_res(WM8505_UART0_BASE, 0x1040);
+ tmp[1] = wmt_irq_res(IRQ_UART0);
+ wmt_res_add(&vt8500_device_uart0, tmp, 2);
+
+ tmp[0] = wmt_mmio_res(WM8505_UART1_BASE, 0x1040);
+ tmp[1] = wmt_irq_res(IRQ_UART1);
+ wmt_res_add(&vt8500_device_uart1, tmp, 2);
+
+ tmp[0] = wmt_mmio_res(WM8505_UART2_BASE, 0x1040);
+ tmp[1] = wmt_irq_res(IRQ_UART2);
+ wmt_res_add(&vt8500_device_uart2, tmp, 2);
+
+ tmp[0] = wmt_mmio_res(WM8505_UART3_BASE, 0x1040);
+ tmp[1] = wmt_irq_res(IRQ_UART3);
+ wmt_res_add(&vt8500_device_uart3, tmp, 2);
+
+ tmp[0] = wmt_mmio_res(WM8505_UART4_BASE, 0x1040);
+ tmp[1] = wmt_irq_res(IRQ_UART4);
+ wmt_res_add(&vt8500_device_uart4, tmp, 2);
+
+ tmp[0] = wmt_mmio_res(WM8505_UART5_BASE, 0x1040);
+ tmp[1] = wmt_irq_res(IRQ_UART5);
+ wmt_res_add(&vt8500_device_uart5, tmp, 2);
+
+ tmp[0] = wmt_mmio_res(WM8505_EHCI_BASE, SZ_512);
+ tmp[1] = wmt_irq_res(IRQ_EHCI);
+ wmt_res_add(&vt8500_device_ehci, tmp, 2);
+
+ tmp[0] = wmt_mmio_res(WM8505_GEGEA_BASE, SZ_256);
+ wmt_res_add(&vt8500_device_ge_rops, tmp, 1);
+
+ tmp[0] = wmt_mmio_res(WM8505_PWM_BASE, 0x44);
+ wmt_res_add(&vt8500_device_pwm, tmp, 1);
+
+ tmp[0] = wmt_mmio_res(WM8505_RTC_BASE, 0x2c);
+ tmp[1] = wmt_irq_res(IRQ_RTC);
+ tmp[2] = wmt_irq_res(IRQ_RTCSM);
+ wmt_res_add(&vt8500_device_rtc, tmp, 3);
+}
+
+static void __init wm8505_set_externs(void)
+{
+ /* Non-resource-aware stuff */
+ wmt_ic_base = WM8505_IC_BASE;
+ wmt_sic_base = WM8505_SIC_BASE;
+ wmt_gpio_base = WM8505_GPIO_BASE;
+ wmt_pmc_base = WM8505_PMC_BASE;
+ wmt_i8042_base = WM8505_PS2_BASE;
+
+ wmt_nr_irqs = WM8505_NR_IRQS;
+ wmt_timer_irq = IRQ_PMCOS0;
+ wmt_gpio_ext_irq[0] = IRQ_EXT0;
+ wmt_gpio_ext_irq[1] = IRQ_EXT1;
+ wmt_gpio_ext_irq[2] = IRQ_EXT2;
+ wmt_gpio_ext_irq[3] = IRQ_EXT3;
+ wmt_gpio_ext_irq[4] = IRQ_EXT4;
+ wmt_gpio_ext_irq[5] = IRQ_EXT5;
+ wmt_gpio_ext_irq[6] = IRQ_EXT6;
+ wmt_gpio_ext_irq[7] = IRQ_EXT7;
+ wmt_i8042_kbd_irq = IRQ_PS2KBD;
+ wmt_i8042_aux_irq = IRQ_PS2MOUSE;
+}
+
+void __init wm8505_map_io(void)
+{
+ iotable_init(wmt_io_desc, ARRAY_SIZE(wmt_io_desc));
+
+ /* Should be done before interrupts and timers are initialized */
+ wm8505_set_externs();
+}
diff --git a/arch/arm/mach-vt8500/devices.c b/arch/arm/mach-vt8500/devices.c
new file mode 100644
index 000000000000..1fcdc36b358d
--- /dev/null
+++ b/arch/arm/mach-vt8500/devices.c
@@ -0,0 +1,270 @@
+/* linux/arch/arm/mach-vt8500/devices.c
+ *
+ * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/pwm_backlight.h>
+#include <linux/memblock.h>
+
+#include <asm/mach/arch.h>
+
+#include <mach/vt8500fb.h>
+#include <mach/i8042.h>
+#include "devices.h"
+
+/* These can't use resources currently */
+unsigned long wmt_ic_base __initdata;
+unsigned long wmt_sic_base __initdata;
+unsigned long wmt_gpio_base __initdata;
+unsigned long wmt_pmc_base __initdata;
+unsigned long wmt_i8042_base __initdata;
+
+int wmt_nr_irqs __initdata;
+int wmt_timer_irq __initdata;
+int wmt_gpio_ext_irq[8] __initdata;
+
+/* Should remain accessible after init.
+ * i8042 driver desperately calls for attention...
+ */
+int wmt_i8042_kbd_irq;
+int wmt_i8042_aux_irq;
+
+static u64 fb_dma_mask = DMA_BIT_MASK(32);
+
+struct platform_device vt8500_device_lcdc = {
+ .name = "vt8500-lcd",
+ .id = 0,
+ .dev = {
+ .dma_mask = &fb_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+struct platform_device vt8500_device_wm8505_fb = {
+ .name = "wm8505-fb",
+ .id = 0,
+};
+
+/* Smallest to largest */
+static struct vt8500fb_platform_data panels[] = {
+#ifdef CONFIG_WMT_PANEL_800X480
+{
+ .xres_virtual = 800,
+ .yres_virtual = 480 * 2,
+ .mode = {
+ .name = "800x480",
+ .xres = 800,
+ .yres = 480,
+ .left_margin = 88,
+ .right_margin = 40,
+ .upper_margin = 32,
+ .lower_margin = 11,
+ .hsync_len = 0,
+ .vsync_len = 1,
+ .vmode = FB_VMODE_NONINTERLACED,
+ },
+},
+#endif
+#ifdef CONFIG_WMT_PANEL_800X600
+{
+ .xres_virtual = 800,
+ .yres_virtual = 600 * 2,
+ .mode = {
+ .name = "800x600",
+ .xres = 800,
+ .yres = 600,
+ .left_margin = 88,
+ .right_margin = 40,
+ .upper_margin = 32,
+ .lower_margin = 11,
+ .hsync_len = 0,
+ .vsync_len = 1,
+ .vmode = FB_VMODE_NONINTERLACED,
+ },
+},
+#endif
+#ifdef CONFIG_WMT_PANEL_1024X576
+{
+ .xres_virtual = 1024,
+ .yres_virtual = 576 * 2,
+ .mode = {
+ .name = "1024x576",
+ .xres = 1024,
+ .yres = 576,
+ .left_margin = 40,
+ .right_margin = 24,
+ .upper_margin = 32,
+ .lower_margin = 11,
+ .hsync_len = 96,
+ .vsync_len = 2,
+ .vmode = FB_VMODE_NONINTERLACED,
+ },
+},
+#endif
+#ifdef CONFIG_WMT_PANEL_1024X600
+{
+ .xres_virtual = 1024,
+ .yres_virtual = 600 * 2,
+ .mode = {
+ .name = "1024x600",
+ .xres = 1024,
+ .yres = 600,
+ .left_margin = 66,
+ .right_margin = 2,
+ .upper_margin = 19,
+ .lower_margin = 1,
+ .hsync_len = 23,
+ .vsync_len = 8,
+ .vmode = FB_VMODE_NONINTERLACED,
+ },
+},
+#endif
+};
+
+static int current_panel_idx __initdata = ARRAY_SIZE(panels) - 1;
+
+static int __init panel_setup(char *str)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(panels); i++) {
+ if (strcmp(panels[i].mode.name, str) == 0) {
+ current_panel_idx = i;
+ break;
+ }
+ }
+ return 0;
+}
+
+early_param("panel", panel_setup);
+
+static inline void preallocate_fb(struct vt8500fb_platform_data *p,
+ unsigned long align) {
+ p->video_mem_len = (p->xres_virtual * p->yres_virtual * 4) >>
+ (p->bpp > 16 ? 0 : (p->bpp > 8 ? 1 :
+ (8 / p->bpp) + 1));
+ p->video_mem_phys = (unsigned long)memblock_alloc(p->video_mem_len,
+ align);
+ p->video_mem_virt = phys_to_virt(p->video_mem_phys);
+}
+
+struct platform_device vt8500_device_uart0 = {
+ .name = "vt8500_serial",
+ .id = 0,
+};
+
+struct platform_device vt8500_device_uart1 = {
+ .name = "vt8500_serial",
+ .id = 1,
+};
+
+struct platform_device vt8500_device_uart2 = {
+ .name = "vt8500_serial",
+ .id = 2,
+};
+
+struct platform_device vt8500_device_uart3 = {
+ .name = "vt8500_serial",
+ .id = 3,
+};
+
+struct platform_device vt8500_device_uart4 = {
+ .name = "vt8500_serial",
+ .id = 4,
+};
+
+struct platform_device vt8500_device_uart5 = {
+ .name = "vt8500_serial",
+ .id = 5,
+};
+
+static u64 ehci_dma_mask = DMA_BIT_MASK(32);
+
+struct platform_device vt8500_device_ehci = {
+ .name = "vt8500-ehci",
+ .id = 0,
+ .dev = {
+ .dma_mask = &ehci_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+struct platform_device vt8500_device_ge_rops = {
+ .name = "wmt_ge_rops",
+ .id = -1,
+};
+
+struct platform_device vt8500_device_pwm = {
+ .name = "vt8500-pwm",
+ .id = 0,
+};
+
+static struct platform_pwm_backlight_data vt8500_pwmbl_data = {
+ .pwm_id = 0,
+ .max_brightness = 128,
+ .dft_brightness = 70,
+ .pwm_period_ns = 250000, /* revisit when clocks are implemented */
+};
+
+struct platform_device vt8500_device_pwmbl = {
+ .name = "pwm-backlight",
+ .id = 0,
+ .dev = {
+ .platform_data = &vt8500_pwmbl_data,
+ },
+};
+
+struct platform_device vt8500_device_rtc = {
+ .name = "vt8500-rtc",
+ .id = 0,
+};
+
+struct map_desc wmt_io_desc[] __initdata = {
+ /* SoC MMIO registers */
+ [0] = {
+ .virtual = 0xf8000000,
+ .pfn = __phys_to_pfn(0xd8000000),
+ .length = 0x00390000, /* max of all chip variants */
+ .type = MT_DEVICE
+ },
+ /* PCI I/O space, numbers tied to those in <mach/io.h> */
+ [1] = {
+ .virtual = 0xf0000000,
+ .pfn = __phys_to_pfn(0xc0000000),
+ .length = SZ_64K,
+ .type = MT_DEVICE
+ },
+};
+
+void __init vt8500_reserve_mem(void)
+{
+#ifdef CONFIG_FB_VT8500
+ panels[current_panel_idx].bpp = 16; /* Always use RGB565 */
+ preallocate_fb(&panels[current_panel_idx], SZ_4M);
+ vt8500_device_lcdc.dev.platform_data = &panels[current_panel_idx];
+#endif
+}
+
+void __init wm8505_reserve_mem(void)
+{
+#if defined CONFIG_FB_WM8505
+ panels[current_panel_idx].bpp = 32; /* Always use RGB888 */
+ preallocate_fb(&panels[current_panel_idx], 32);
+ vt8500_device_wm8505_fb.dev.platform_data = &panels[current_panel_idx];
+#endif
+}
diff --git a/arch/arm/mach-vt8500/devices.h b/arch/arm/mach-vt8500/devices.h
new file mode 100644
index 000000000000..188d4e17f35c
--- /dev/null
+++ b/arch/arm/mach-vt8500/devices.h
@@ -0,0 +1,88 @@
+/* linux/arch/arm/mach-vt8500/devices.h
+ *
+ * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_VT8500_DEVICES_H
+#define __ARCH_ARM_MACH_VT8500_DEVICES_H
+
+#include <linux/platform_device.h>
+#include <asm/mach/map.h>
+
+void __init vt8500_init_irq(void);
+void __init wm8505_init_irq(void);
+void __init vt8500_map_io(void);
+void __init wm8505_map_io(void);
+void __init vt8500_reserve_mem(void);
+void __init wm8505_reserve_mem(void);
+void __init vt8500_gpio_init(void);
+void __init vt8500_set_resources(void);
+void __init wm8505_set_resources(void);
+
+extern unsigned long wmt_ic_base __initdata;
+extern unsigned long wmt_sic_base __initdata;
+extern unsigned long wmt_gpio_base __initdata;
+extern unsigned long wmt_pmc_base __initdata;
+
+extern int wmt_nr_irqs __initdata;
+extern int wmt_timer_irq __initdata;
+extern int wmt_gpio_ext_irq[8] __initdata;
+
+extern struct map_desc wmt_io_desc[2] __initdata;
+
+static inline struct resource wmt_mmio_res(u32 start, u32 size)
+{
+ struct resource tmp = {
+ .flags = IORESOURCE_MEM,
+ .start = start,
+ .end = start + size - 1,
+ };
+
+ return tmp;
+}
+
+static inline struct resource wmt_irq_res(int irq)
+{
+ struct resource tmp = {
+ .flags = IORESOURCE_IRQ,
+ .start = irq,
+ .end = irq,
+ };
+
+ return tmp;
+}
+
+static inline void wmt_res_add(struct platform_device *pdev,
+ const struct resource *res, unsigned int num)
+{
+ if (unlikely(platform_device_add_resources(pdev, res, num)))
+ pr_err("Failed to assign resources\n");
+}
+
+extern struct sys_timer vt8500_timer;
+
+extern struct platform_device vt8500_device_uart0;
+extern struct platform_device vt8500_device_uart1;
+extern struct platform_device vt8500_device_uart2;
+extern struct platform_device vt8500_device_uart3;
+extern struct platform_device vt8500_device_uart4;
+extern struct platform_device vt8500_device_uart5;
+
+extern struct platform_device vt8500_device_lcdc;
+extern struct platform_device vt8500_device_wm8505_fb;
+extern struct platform_device vt8500_device_ehci;
+extern struct platform_device vt8500_device_ge_rops;
+extern struct platform_device vt8500_device_pwm;
+extern struct platform_device vt8500_device_pwmbl;
+extern struct platform_device vt8500_device_rtc;
+#endif
diff --git a/arch/arm/mach-vt8500/gpio.c b/arch/arm/mach-vt8500/gpio.c
new file mode 100644
index 000000000000..2bcc0ec783df
--- /dev/null
+++ b/arch/arm/mach-vt8500/gpio.c
@@ -0,0 +1,240 @@
+/* linux/arch/arm/mach-vt8500/gpio.c
+ *
+ * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+
+#include "devices.h"
+
+#define to_vt8500(__chip) container_of(__chip, struct vt8500_gpio_chip, chip)
+
+#define ENABLE_REGS 0x0
+#define DIRECTION_REGS 0x20
+#define OUTVALUE_REGS 0x40
+#define INVALUE_REGS 0x60
+
+#define EXT_REGOFF 0x1c
+
+static void __iomem *regbase;
+
+struct vt8500_gpio_chip {
+ struct gpio_chip chip;
+ unsigned int shift;
+ unsigned int regoff;
+};
+
+static int gpio_to_irq_map[8];
+
+static int vt8500_muxed_gpio_request(struct gpio_chip *chip,
+ unsigned offset)
+{
+ struct vt8500_gpio_chip *vt8500_chip = to_vt8500(chip);
+ unsigned val = readl(regbase + ENABLE_REGS + vt8500_chip->regoff);
+
+ val |= (1 << vt8500_chip->shift << offset);
+ writel(val, regbase + ENABLE_REGS + vt8500_chip->regoff);
+
+ return 0;
+}
+
+static void vt8500_muxed_gpio_free(struct gpio_chip *chip,
+ unsigned offset)
+{
+ struct vt8500_gpio_chip *vt8500_chip = to_vt8500(chip);
+ unsigned val = readl(regbase + ENABLE_REGS + vt8500_chip->regoff);
+
+ val &= ~(1 << vt8500_chip->shift << offset);
+ writel(val, regbase + ENABLE_REGS + vt8500_chip->regoff);
+}
+
+static int vt8500_muxed_gpio_direction_input(struct gpio_chip *chip,
+ unsigned offset)
+{
+ struct vt8500_gpio_chip *vt8500_chip = to_vt8500(chip);
+ unsigned val = readl(regbase + DIRECTION_REGS + vt8500_chip->regoff);
+
+ val &= ~(1 << vt8500_chip->shift << offset);
+ writel(val, regbase + DIRECTION_REGS + vt8500_chip->regoff);
+
+ return 0;
+}
+
+static int vt8500_muxed_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct vt8500_gpio_chip *vt8500_chip = to_vt8500(chip);
+ unsigned val = readl(regbase + DIRECTION_REGS + vt8500_chip->regoff);
+
+ val |= (1 << vt8500_chip->shift << offset);
+ writel(val, regbase + DIRECTION_REGS + vt8500_chip->regoff);
+
+ if (value) {
+ val = readl(regbase + OUTVALUE_REGS + vt8500_chip->regoff);
+ val |= (1 << vt8500_chip->shift << offset);
+ writel(val, regbase + OUTVALUE_REGS + vt8500_chip->regoff);
+ }
+ return 0;
+}
+
+static int vt8500_muxed_gpio_get_value(struct gpio_chip *chip,
+ unsigned offset)
+{
+ struct vt8500_gpio_chip *vt8500_chip = to_vt8500(chip);
+
+ return (readl(regbase + INVALUE_REGS + vt8500_chip->regoff)
+ >> vt8500_chip->shift >> offset) & 1;
+}
+
+static void vt8500_muxed_gpio_set_value(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct vt8500_gpio_chip *vt8500_chip = to_vt8500(chip);
+ unsigned val = readl(regbase + INVALUE_REGS + vt8500_chip->regoff);
+
+ if (value)
+ val |= (1 << vt8500_chip->shift << offset);
+ else
+ val &= ~(1 << vt8500_chip->shift << offset);
+
+ writel(val, regbase + INVALUE_REGS + vt8500_chip->regoff);
+}
+
+#define VT8500_GPIO_BANK(__name, __shift, __off, __base, __num) \
+{ \
+ .chip = { \
+ .label = __name, \
+ .request = vt8500_muxed_gpio_request, \
+ .free = vt8500_muxed_gpio_free, \
+ .direction_input = vt8500_muxed_gpio_direction_input, \
+ .direction_output = vt8500_muxed_gpio_direction_output, \
+ .get = vt8500_muxed_gpio_get_value, \
+ .set = vt8500_muxed_gpio_set_value, \
+ .can_sleep = 0, \
+ .base = __base, \
+ .ngpio = __num, \
+ }, \
+ .shift = __shift, \
+ .regoff = __off, \
+}
+
+static struct vt8500_gpio_chip vt8500_muxed_gpios[] = {
+ VT8500_GPIO_BANK("uart0", 0, 0x0, 8, 4),
+ VT8500_GPIO_BANK("uart1", 4, 0x0, 12, 4),
+ VT8500_GPIO_BANK("spi0", 8, 0x0, 16, 4),
+ VT8500_GPIO_BANK("spi1", 12, 0x0, 20, 4),
+ VT8500_GPIO_BANK("spi2", 16, 0x0, 24, 4),
+ VT8500_GPIO_BANK("pwmout", 24, 0x0, 28, 2),
+
+ VT8500_GPIO_BANK("sdmmc", 0, 0x4, 30, 11),
+ VT8500_GPIO_BANK("ms", 16, 0x4, 41, 7),
+ VT8500_GPIO_BANK("i2c0", 24, 0x4, 48, 2),
+ VT8500_GPIO_BANK("i2c1", 26, 0x4, 50, 2),
+
+ VT8500_GPIO_BANK("mii", 0, 0x8, 52, 20),
+ VT8500_GPIO_BANK("see", 20, 0x8, 72, 4),
+ VT8500_GPIO_BANK("ide", 24, 0x8, 76, 7),
+
+ VT8500_GPIO_BANK("ccir", 0, 0xc, 83, 19),
+
+ VT8500_GPIO_BANK("ts", 8, 0x10, 102, 11),
+
+ VT8500_GPIO_BANK("lcd", 0, 0x14, 113, 23),
+};
+
+static int vt8500_gpio_direction_input(struct gpio_chip *chip,
+ unsigned offset)
+{
+ unsigned val = readl(regbase + DIRECTION_REGS + EXT_REGOFF);
+
+ val &= ~(1 << offset);
+ writel(val, regbase + DIRECTION_REGS + EXT_REGOFF);
+ return 0;
+}
+
+static int vt8500_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ unsigned val = readl(regbase + DIRECTION_REGS + EXT_REGOFF);
+
+ val |= (1 << offset);
+ writel(val, regbase + DIRECTION_REGS + EXT_REGOFF);
+
+ if (value) {
+ val = readl(regbase + OUTVALUE_REGS + EXT_REGOFF);
+ val |= (1 << offset);
+ writel(val, regbase + OUTVALUE_REGS + EXT_REGOFF);
+ }
+ return 0;
+}
+
+static int vt8500_gpio_get_value(struct gpio_chip *chip,
+ unsigned offset)
+{
+ return (readl(regbase + INVALUE_REGS + EXT_REGOFF) >> offset) & 1;
+}
+
+static void vt8500_gpio_set_value(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ unsigned val = readl(regbase + OUTVALUE_REGS + EXT_REGOFF);
+
+ if (value)
+ val |= (1 << offset);
+ else
+ val &= ~(1 << offset);
+
+ writel(val, regbase + OUTVALUE_REGS + EXT_REGOFF);
+}
+
+static int vt8500_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ if (offset > 7)
+ return -EINVAL;
+
+ return gpio_to_irq_map[offset];
+}
+
+static struct gpio_chip vt8500_external_gpios = {
+ .label = "extgpio",
+ .direction_input = vt8500_gpio_direction_input,
+ .direction_output = vt8500_gpio_direction_output,
+ .get = vt8500_gpio_get_value,
+ .set = vt8500_gpio_set_value,
+ .to_irq = vt8500_gpio_to_irq,
+ .can_sleep = 0,
+ .base = 0,
+ .ngpio = 8,
+};
+
+void __init vt8500_gpio_init(void)
+{
+ int i;
+
+ for (i = 0; i < 8; i++)
+ gpio_to_irq_map[i] = wmt_gpio_ext_irq[i];
+
+ regbase = ioremap(wmt_gpio_base, SZ_64K);
+ if (!regbase) {
+ printk(KERN_ERR "Failed to map MMIO registers for GPIO\n");
+ return;
+ }
+
+ gpiochip_add(&vt8500_external_gpios);
+
+ for (i = 0; i < ARRAY_SIZE(vt8500_muxed_gpios); i++)
+ gpiochip_add(&vt8500_muxed_gpios[i].chip);
+}
diff --git a/arch/arm/mach-vt8500/include/mach/debug-macro.S b/arch/arm/mach-vt8500/include/mach/debug-macro.S
new file mode 100644
index 000000000000..f1191626ad51
--- /dev/null
+++ b/arch/arm/mach-vt8500/include/mach/debug-macro.S
@@ -0,0 +1,31 @@
+/*
+ * arch/arm/mach-vt8500/include/mach/debug-macro.S
+ *
+ * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ *
+ * Debugging macro include header
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+*/
+
+ .macro addruart, rp, rv
+ mov \rp, #0x00200000
+ orr \rv, \rp, #0xf8000000
+ orr \rp, \rp, #0xd8000000
+ .endm
+
+ .macro senduart,rd,rx
+ strb \rd, [\rx, #0]
+ .endm
+
+ .macro busyuart,rd,rx
+1001: ldr \rd, [\rx, #0x1c]
+ ands \rd, \rd, #0x2
+ bne 1001b
+ .endm
+
+ .macro waituart,rd,rx
+ .endm
diff --git a/arch/arm/mach-vt8500/include/mach/entry-macro.S b/arch/arm/mach-vt8500/include/mach/entry-macro.S
new file mode 100644
index 000000000000..92684c7eaed3
--- /dev/null
+++ b/arch/arm/mach-vt8500/include/mach/entry-macro.S
@@ -0,0 +1,32 @@
+/*
+ * arch/arm/mach-vt8500/include/mach/entry-macro.S
+ *
+ * Low-level IRQ helper macros for VIA VT8500
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+ .macro disable_fiq
+ .endm
+
+ .macro get_irqnr_preamble, base, tmp
+ @ physical 0xd8140000 is virtual 0xf8140000
+ mov \base, #0xf8000000
+ orr \base, \base, #0x00140000
+ .endm
+
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
+
+ .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+ ldr \irqnr, [\base]
+ cmp \irqnr, #63 @ may be false positive, check interrupt status
+ bne 1001f
+ ldr \irqstat, [\base, #0x84]
+ ands \irqstat, #0x80000000
+ moveq \irqnr, #0
+1001:
+ .endm
+
diff --git a/arch/arm/mach-vt8500/include/mach/gpio.h b/arch/arm/mach-vt8500/include/mach/gpio.h
new file mode 100644
index 000000000000..94ff27678a46
--- /dev/null
+++ b/arch/arm/mach-vt8500/include/mach/gpio.h
@@ -0,0 +1,6 @@
+#include <asm-generic/gpio.h>
+
+#define gpio_get_value __gpio_get_value
+#define gpio_set_value __gpio_set_value
+#define gpio_cansleep __gpio_cansleep
+#define gpio_to_irq __gpio_to_irq
diff --git a/arch/arm/mach-tegra/tegra2_dvfs.h b/arch/arm/mach-vt8500/include/mach/hardware.h
index f8c1adba96a6..db4163f72c39 100644
--- a/arch/arm/mach-tegra/tegra2_dvfs.h
+++ b/arch/arm/mach-vt8500/include/mach/hardware.h
@@ -1,10 +1,4 @@
-/*
- * arch/arm/mach-tegra/tegra2_dvfs.h
- *
- * Copyright (C) 2010 Google, Inc.
- *
- * Author:
- * Colin Cross <ccross@google.com>
+/* arch/arm/mach-vt8500/include/mach/hardware.h
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -16,5 +10,3 @@
* GNU General Public License for more details.
*
*/
-
-extern struct dvfs tegra_dvfs_virtual_cpu_dvfs;
diff --git a/arch/arm/mach-vt8500/include/mach/i8042.h b/arch/arm/mach-vt8500/include/mach/i8042.h
new file mode 100644
index 000000000000..cd7143cad6f3
--- /dev/null
+++ b/arch/arm/mach-vt8500/include/mach/i8042.h
@@ -0,0 +1,18 @@
+/* arch/arm/mach-vt8500/include/mach/i8042.h
+ *
+ * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+extern unsigned long wmt_i8042_base __initdata;
+extern int wmt_i8042_kbd_irq;
+extern int wmt_i8042_aux_irq;
diff --git a/arch/arm/mach-vt8500/include/mach/io.h b/arch/arm/mach-vt8500/include/mach/io.h
new file mode 100644
index 000000000000..9077239f78c9
--- /dev/null
+++ b/arch/arm/mach-vt8500/include/mach/io.h
@@ -0,0 +1,28 @@
+/*
+ * arch/arm/mach-vt8500/include/mach/io.h
+ *
+ * Copyright (C) 2010 Alexey Charkov
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef __ASM_ARM_ARCH_IO_H
+#define __ASM_ARM_ARCH_IO_H
+
+#define IO_SPACE_LIMIT 0xffff
+
+#define __io(a) __typesafe_io((a) + 0xf0000000)
+#define __mem_pci(a) (a)
+
+#endif
diff --git a/arch/arm/mach-vt8500/include/mach/irqs.h b/arch/arm/mach-vt8500/include/mach/irqs.h
new file mode 100644
index 000000000000..a129fd1222fb
--- /dev/null
+++ b/arch/arm/mach-vt8500/include/mach/irqs.h
@@ -0,0 +1,22 @@
+/*
+ * arch/arm/mach-vt8500/include/mach/irqs.h
+ *
+ * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* This value is just to make the core happy, never used otherwise */
+#define NR_IRQS 128
diff --git a/arch/arm/mach-vt8500/include/mach/memory.h b/arch/arm/mach-vt8500/include/mach/memory.h
new file mode 100644
index 000000000000..175f914eff93
--- /dev/null
+++ b/arch/arm/mach-vt8500/include/mach/memory.h
@@ -0,0 +1,28 @@
+/*
+ * arch/arm/mach-vt8500/include/mach/memory.h
+ *
+ * Copyright (C) 2003 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef __ASM_ARCH_MEMORY_H
+#define __ASM_ARCH_MEMORY_H
+
+/*
+ * Physical DRAM offset.
+ */
+#define PHYS_OFFSET UL(0x00000000)
+
+#endif
diff --git a/arch/arm/mach-vt8500/include/mach/system.h b/arch/arm/mach-vt8500/include/mach/system.h
new file mode 100644
index 000000000000..d6c757eaf26b
--- /dev/null
+++ b/arch/arm/mach-vt8500/include/mach/system.h
@@ -0,0 +1,18 @@
+/*
+ * arch/arm/mach-vt8500/include/mach/system.h
+ *
+ */
+#include <asm/io.h>
+
+/* PM Software Reset request register */
+#define VT8500_PMSR_VIRT 0xf8130060
+
+static inline void arch_idle(void)
+{
+ cpu_do_idle();
+}
+
+static inline void arch_reset(char mode, const char *cmd)
+{
+ writel(1, VT8500_PMSR_VIRT);
+}
diff --git a/arch/arm/mach-vt8500/include/mach/timex.h b/arch/arm/mach-vt8500/include/mach/timex.h
new file mode 100644
index 000000000000..8487e4c690b7
--- /dev/null
+++ b/arch/arm/mach-vt8500/include/mach/timex.h
@@ -0,0 +1,26 @@
+/*
+ * arch/arm/mach-vt8500/include/mach/timex.h
+ *
+ * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef MACH_TIMEX_H
+#define MACH_TIMEX_H
+
+#define CLOCK_TICK_RATE (3000000)
+
+#endif /* MACH_TIMEX_H */
diff --git a/arch/arm/mach-vt8500/include/mach/uncompress.h b/arch/arm/mach-vt8500/include/mach/uncompress.h
new file mode 100644
index 000000000000..bb9e2d23fee3
--- /dev/null
+++ b/arch/arm/mach-vt8500/include/mach/uncompress.h
@@ -0,0 +1,37 @@
+/* arch/arm/mach-vt8500/include/mach/uncompress.h
+ *
+ * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ *
+ * Based on arch/arm/mach-dove/include/mach/uncompress.h
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define UART0_PHYS 0xd8200000
+#include <asm/io.h>
+
+static void putc(const char c)
+{
+ while (readb(UART0_PHYS + 0x1c) & 0x2)
+ /* Tx busy, wait and poll */;
+
+ writeb(c, UART0_PHYS);
+}
+
+static void flush(void)
+{
+}
+
+/*
+ * nothing to do
+ */
+#define arch_decomp_setup()
+#define arch_decomp_wdog()
diff --git a/arch/arm/mach-vt8500/include/mach/vmalloc.h b/arch/arm/mach-vt8500/include/mach/vmalloc.h
new file mode 100644
index 000000000000..4642290ce416
--- /dev/null
+++ b/arch/arm/mach-vt8500/include/mach/vmalloc.h
@@ -0,0 +1,20 @@
+/*
+ * arch/arm/mach-vt8500/include/mach/vmalloc.h
+ *
+ * Copyright (C) 2000 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#define VMALLOC_END 0xd0000000UL
diff --git a/arch/arm/mach-vt8500/include/mach/vt8500_irqs.h b/arch/arm/mach-vt8500/include/mach/vt8500_irqs.h
new file mode 100644
index 000000000000..ecfee9124711
--- /dev/null
+++ b/arch/arm/mach-vt8500/include/mach/vt8500_irqs.h
@@ -0,0 +1,88 @@
+/*
+ * arch/arm/mach-vt8500/include/mach/vt8500_irqs.h
+ *
+ * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* VT8500 Interrupt Sources */
+
+#define IRQ_JPEGENC 0 /* JPEG Encoder */
+#define IRQ_JPEGDEC 1 /* JPEG Decoder */
+ /* Reserved */
+#define IRQ_PATA 3 /* PATA Controller */
+ /* Reserved */
+#define IRQ_DMA 5 /* DMA Controller */
+#define IRQ_EXT0 6 /* External Interrupt 0 */
+#define IRQ_EXT1 7 /* External Interrupt 1 */
+#define IRQ_GE 8 /* Graphic Engine */
+#define IRQ_GOV 9 /* Graphic Overlay Engine */
+#define IRQ_ETHER 10 /* Ethernet MAC */
+#define IRQ_MPEGTS 11 /* Transport Stream Interface */
+#define IRQ_LCDC 12 /* LCD Controller */
+#define IRQ_EXT2 13 /* External Interrupt 2 */
+#define IRQ_EXT3 14 /* External Interrupt 3 */
+#define IRQ_EXT4 15 /* External Interrupt 4 */
+#define IRQ_CIPHER 16 /* Cipher */
+#define IRQ_VPP 17 /* Video Post-Processor */
+#define IRQ_I2C1 18 /* I2C 1 */
+#define IRQ_I2C0 19 /* I2C 0 */
+#define IRQ_SDMMC 20 /* SD/MMC Controller */
+#define IRQ_SDMMC_DMA 21 /* SD/MMC Controller DMA */
+#define IRQ_PMC_WU 22 /* Power Management Controller Wakeup */
+ /* Reserved */
+#define IRQ_SPI0 24 /* SPI 0 */
+#define IRQ_SPI1 25 /* SPI 1 */
+#define IRQ_SPI2 26 /* SPI 2 */
+#define IRQ_LCDDF 27 /* LCD Data Formatter */
+#define IRQ_NAND 28 /* NAND Flash Controller */
+#define IRQ_NAND_DMA 29 /* NAND Flash Controller DMA */
+#define IRQ_MS 30 /* MemoryStick Controller */
+#define IRQ_MS_DMA 31 /* MemoryStick Controller DMA */
+#define IRQ_UART0 32 /* UART 0 */
+#define IRQ_UART1 33 /* UART 1 */
+#define IRQ_I2S 34 /* I2S */
+#define IRQ_PCM 35 /* PCM */
+#define IRQ_PMCOS0 36 /* PMC OS Timer 0 */
+#define IRQ_PMCOS1 37 /* PMC OS Timer 1 */
+#define IRQ_PMCOS2 38 /* PMC OS Timer 2 */
+#define IRQ_PMCOS3 39 /* PMC OS Timer 3 */
+#define IRQ_VPU 40 /* Video Processing Unit */
+#define IRQ_VID 41 /* Video Digital Input Interface */
+#define IRQ_AC97 42 /* AC97 Interface */
+#define IRQ_EHCI 43 /* USB */
+#define IRQ_NOR 44 /* NOR Flash Controller */
+#define IRQ_PS2MOUSE 45 /* PS/2 Mouse */
+#define IRQ_PS2KBD 46 /* PS/2 Keyboard */
+#define IRQ_UART2 47 /* UART 2 */
+#define IRQ_RTC 48 /* RTC Interrupt */
+#define IRQ_RTCSM 49 /* RTC Second/Minute Update Interrupt */
+#define IRQ_UART3 50 /* UART 3 */
+#define IRQ_ADC 51 /* ADC */
+#define IRQ_EXT5 52 /* External Interrupt 5 */
+#define IRQ_EXT6 53 /* External Interrupt 6 */
+#define IRQ_EXT7 54 /* External Interrupt 7 */
+#define IRQ_CIR 55 /* CIR */
+#define IRQ_DMA0 56 /* DMA Channel 0 */
+#define IRQ_DMA1 57 /* DMA Channel 1 */
+#define IRQ_DMA2 58 /* DMA Channel 2 */
+#define IRQ_DMA3 59 /* DMA Channel 3 */
+#define IRQ_DMA4 60 /* DMA Channel 4 */
+#define IRQ_DMA5 61 /* DMA Channel 5 */
+#define IRQ_DMA6 62 /* DMA Channel 6 */
+#define IRQ_DMA7 63 /* DMA Channel 7 */
+
+#define VT8500_NR_IRQS 64
diff --git a/arch/arm/mach-vt8500/include/mach/vt8500_regs.h b/arch/arm/mach-vt8500/include/mach/vt8500_regs.h
new file mode 100644
index 000000000000..29c63ecb2383
--- /dev/null
+++ b/arch/arm/mach-vt8500/include/mach/vt8500_regs.h
@@ -0,0 +1,79 @@
+/*
+ * arch/arm/mach-vt8500/include/mach/vt8500_regs.h
+ *
+ * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef __ASM_ARM_ARCH_VT8500_REGS_H
+#define __ASM_ARM_ARCH_VT8500_REGS_H
+
+/* VT8500 Registers Map */
+
+#define VT8500_REGS_START_PHYS 0xd8000000 /* Start of MMIO registers */
+#define VT8500_REGS_START_VIRT 0xf8000000 /* Virtual mapping start */
+
+#define VT8500_DDR_BASE 0xd8000000 /* 1k DDR/DDR2 Memory
+ Controller */
+#define VT8500_DMA_BASE 0xd8001000 /* 1k DMA Controller */
+#define VT8500_SFLASH_BASE 0xd8002000 /* 1k Serial Flash Memory
+ Controller */
+#define VT8500_ETHER_BASE 0xd8004000 /* 1k Ethernet MAC 0 */
+#define VT8500_CIPHER_BASE 0xd8006000 /* 4k Cipher */
+#define VT8500_USB_BASE 0xd8007800 /* 2k USB OTG */
+# define VT8500_EHCI_BASE 0xd8007900 /* EHCI */
+# define VT8500_UHCI_BASE 0xd8007b01 /* UHCI */
+#define VT8500_PATA_BASE 0xd8008000 /* 512 PATA */
+#define VT8500_PS2_BASE 0xd8008800 /* 1k PS/2 */
+#define VT8500_NAND_BASE 0xd8009000 /* 1k NAND Controller */
+#define VT8500_NOR_BASE 0xd8009400 /* 1k NOR Controller */
+#define VT8500_SDMMC_BASE 0xd800a000 /* 1k SD/MMC Controller */
+#define VT8500_MS_BASE 0xd800b000 /* 1k MS/MSPRO Controller */
+#define VT8500_LCDC_BASE 0xd800e400 /* 1k LCD Controller */
+#define VT8500_VPU_BASE 0xd8050000 /* 256 VPU */
+#define VT8500_GOV_BASE 0xd8050300 /* 256 GOV */
+#define VT8500_GEGEA_BASE 0xd8050400 /* 768 GE/GE Alpha Mixing */
+#define VT8500_LCDF_BASE 0xd8050900 /* 256 LCD Formatter */
+#define VT8500_VID_BASE 0xd8050a00 /* 256 VID */
+#define VT8500_VPP_BASE 0xd8050b00 /* 256 VPP */
+#define VT8500_TSBK_BASE 0xd80f4000 /* 4k TSBK */
+#define VT8500_JPEGDEC_BASE 0xd80fe000 /* 4k JPEG Decoder */
+#define VT8500_JPEGENC_BASE 0xd80ff000 /* 4k JPEG Encoder */
+#define VT8500_RTC_BASE 0xd8100000 /* 64k RTC */
+#define VT8500_GPIO_BASE 0xd8110000 /* 64k GPIO Configuration */
+#define VT8500_SCC_BASE 0xd8120000 /* 64k System Configuration*/
+#define VT8500_PMC_BASE 0xd8130000 /* 64k PMC Configuration */
+#define VT8500_IC_BASE 0xd8140000 /* 64k Interrupt Controller*/
+#define VT8500_UART0_BASE 0xd8200000 /* 64k UART 0 */
+#define VT8500_UART2_BASE 0xd8210000 /* 64k UART 2 */
+#define VT8500_PWM_BASE 0xd8220000 /* 64k PWM Configuration */
+#define VT8500_SPI0_BASE 0xd8240000 /* 64k SPI 0 */
+#define VT8500_SPI1_BASE 0xd8250000 /* 64k SPI 1 */
+#define VT8500_CIR_BASE 0xd8270000 /* 64k CIR */
+#define VT8500_I2C0_BASE 0xd8280000 /* 64k I2C 0 */
+#define VT8500_AC97_BASE 0xd8290000 /* 64k AC97 */
+#define VT8500_SPI2_BASE 0xd82a0000 /* 64k SPI 2 */
+#define VT8500_UART1_BASE 0xd82b0000 /* 64k UART 1 */
+#define VT8500_UART3_BASE 0xd82c0000 /* 64k UART 3 */
+#define VT8500_PCM_BASE 0xd82d0000 /* 64k PCM */
+#define VT8500_I2C1_BASE 0xd8320000 /* 64k I2C 1 */
+#define VT8500_I2S_BASE 0xd8330000 /* 64k I2S */
+#define VT8500_ADC_BASE 0xd8340000 /* 64k ADC */
+
+#define VT8500_REGS_END_PHYS 0xd834ffff /* End of MMIO registers */
+#define VT8500_REGS_LENGTH (VT8500_REGS_END_PHYS \
+ - VT8500_REGS_START_PHYS + 1)
+
+#endif
diff --git a/arch/arm/mach-vt8500/include/mach/vt8500fb.h b/arch/arm/mach-vt8500/include/mach/vt8500fb.h
new file mode 100644
index 000000000000..7f399c370fe0
--- /dev/null
+++ b/arch/arm/mach-vt8500/include/mach/vt8500fb.h
@@ -0,0 +1,31 @@
+/*
+ * VT8500/WM8505 Frame Buffer platform data definitions
+ *
+ * Copyright (C) 2010 Ed Spiridonov <edo.rus@gmail.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _VT8500FB_H
+#define _VT8500FB_H
+
+#include <linux/fb.h>
+
+struct vt8500fb_platform_data {
+ struct fb_videomode mode;
+ u32 xres_virtual;
+ u32 yres_virtual;
+ u32 bpp;
+ unsigned long video_mem_phys;
+ void *video_mem_virt;
+ unsigned long video_mem_len;
+};
+
+#endif /* _VT8500FB_H */
diff --git a/arch/arm/mach-vt8500/include/mach/wm8505_irqs.h b/arch/arm/mach-vt8500/include/mach/wm8505_irqs.h
new file mode 100644
index 000000000000..6128627ac753
--- /dev/null
+++ b/arch/arm/mach-vt8500/include/mach/wm8505_irqs.h
@@ -0,0 +1,115 @@
+/*
+ * arch/arm/mach-vt8500/include/mach/wm8505_irqs.h
+ *
+ * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* WM8505 Interrupt Sources */
+
+#define IRQ_UHCI 0 /* UHC FS (UHCI?) */
+#define IRQ_EHCI 1 /* UHC HS */
+#define IRQ_UDCDMA 2 /* UDC DMA */
+ /* Reserved */
+#define IRQ_PS2MOUSE 4 /* PS/2 Mouse */
+#define IRQ_UDC 5 /* UDC */
+#define IRQ_EXT0 6 /* External Interrupt 0 */
+#define IRQ_EXT1 7 /* External Interrupt 1 */
+#define IRQ_KEYPAD 8 /* Keypad */
+#define IRQ_DMA 9 /* DMA Controller */
+#define IRQ_ETHER 10 /* Ethernet MAC */
+ /* Reserved */
+ /* Reserved */
+#define IRQ_EXT2 13 /* External Interrupt 2 */
+#define IRQ_EXT3 14 /* External Interrupt 3 */
+#define IRQ_EXT4 15 /* External Interrupt 4 */
+#define IRQ_APB 16 /* APB Bridge */
+#define IRQ_DMA0 17 /* DMA Channel 0 */
+#define IRQ_I2C1 18 /* I2C 1 */
+#define IRQ_I2C0 19 /* I2C 0 */
+#define IRQ_SDMMC 20 /* SD/MMC Controller */
+#define IRQ_SDMMC_DMA 21 /* SD/MMC Controller DMA */
+#define IRQ_PMC_WU 22 /* Power Management Controller Wakeup */
+#define IRQ_PS2KBD 23 /* PS/2 Keyboard */
+#define IRQ_SPI0 24 /* SPI 0 */
+#define IRQ_SPI1 25 /* SPI 1 */
+#define IRQ_SPI2 26 /* SPI 2 */
+#define IRQ_DMA1 27 /* DMA Channel 1 */
+#define IRQ_NAND 28 /* NAND Flash Controller */
+#define IRQ_NAND_DMA 29 /* NAND Flash Controller DMA */
+#define IRQ_UART5 30 /* UART 5 */
+#define IRQ_UART4 31 /* UART 4 */
+#define IRQ_UART0 32 /* UART 0 */
+#define IRQ_UART1 33 /* UART 1 */
+#define IRQ_DMA2 34 /* DMA Channel 2 */
+#define IRQ_I2S 35 /* I2S */
+#define IRQ_PMCOS0 36 /* PMC OS Timer 0 */
+#define IRQ_PMCOS1 37 /* PMC OS Timer 1 */
+#define IRQ_PMCOS2 38 /* PMC OS Timer 2 */
+#define IRQ_PMCOS3 39 /* PMC OS Timer 3 */
+#define IRQ_DMA3 40 /* DMA Channel 3 */
+#define IRQ_DMA4 41 /* DMA Channel 4 */
+#define IRQ_AC97 42 /* AC97 Interface */
+ /* Reserved */
+#define IRQ_NOR 44 /* NOR Flash Controller */
+#define IRQ_DMA5 45 /* DMA Channel 5 */
+#define IRQ_DMA6 46 /* DMA Channel 6 */
+#define IRQ_UART2 47 /* UART 2 */
+#define IRQ_RTC 48 /* RTC Interrupt */
+#define IRQ_RTCSM 49 /* RTC Second/Minute Update Interrupt */
+#define IRQ_UART3 50 /* UART 3 */
+#define IRQ_DMA7 51 /* DMA Channel 7 */
+#define IRQ_EXT5 52 /* External Interrupt 5 */
+#define IRQ_EXT6 53 /* External Interrupt 6 */
+#define IRQ_EXT7 54 /* External Interrupt 7 */
+#define IRQ_CIR 55 /* CIR */
+#define IRQ_SIC0 56 /* SIC IRQ0 */
+#define IRQ_SIC1 57 /* SIC IRQ1 */
+#define IRQ_SIC2 58 /* SIC IRQ2 */
+#define IRQ_SIC3 59 /* SIC IRQ3 */
+#define IRQ_SIC4 60 /* SIC IRQ4 */
+#define IRQ_SIC5 61 /* SIC IRQ5 */
+#define IRQ_SIC6 62 /* SIC IRQ6 */
+#define IRQ_SIC7 63 /* SIC IRQ7 */
+ /* Reserved */
+#define IRQ_JPEGDEC 65 /* JPEG Decoder */
+#define IRQ_SAE 66 /* SAE (?) */
+ /* Reserved */
+#define IRQ_VPU 79 /* Video Processing Unit */
+#define IRQ_VPP 80 /* Video Post-Processor */
+#define IRQ_VID 81 /* Video Digital Input Interface */
+#define IRQ_SPU 82 /* SPU (?) */
+#define IRQ_PIP 83 /* PIP Error */
+#define IRQ_GE 84 /* Graphic Engine */
+#define IRQ_GOV 85 /* Graphic Overlay Engine */
+#define IRQ_DVO 86 /* Digital Video Output */
+ /* Reserved */
+#define IRQ_DMA8 92 /* DMA Channel 8 */
+#define IRQ_DMA9 93 /* DMA Channel 9 */
+#define IRQ_DMA10 94 /* DMA Channel 10 */
+#define IRQ_DMA11 95 /* DMA Channel 11 */
+#define IRQ_DMA12 96 /* DMA Channel 12 */
+#define IRQ_DMA13 97 /* DMA Channel 13 */
+#define IRQ_DMA14 98 /* DMA Channel 14 */
+#define IRQ_DMA15 99 /* DMA Channel 15 */
+ /* Reserved */
+#define IRQ_GOVW 111 /* GOVW (?) */
+#define IRQ_GOVRSDSCD 112 /* GOVR SDSCD (?) */
+#define IRQ_GOVRSDMIF 113 /* GOVR SDMIF (?) */
+#define IRQ_GOVRHDSCD 114 /* GOVR HDSCD (?) */
+#define IRQ_GOVRHDMIF 115 /* GOVR HDMIF (?) */
+
+#define WM8505_NR_IRQS 116
diff --git a/arch/arm/mach-vt8500/include/mach/wm8505_regs.h b/arch/arm/mach-vt8500/include/mach/wm8505_regs.h
new file mode 100644
index 000000000000..df1550941efb
--- /dev/null
+++ b/arch/arm/mach-vt8500/include/mach/wm8505_regs.h
@@ -0,0 +1,78 @@
+/*
+ * arch/arm/mach-vt8500/include/mach/wm8505_regs.h
+ *
+ * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef __ASM_ARM_ARCH_WM8505_REGS_H
+#define __ASM_ARM_ARCH_WM8505_REGS_H
+
+/* WM8505 Registers Map */
+
+#define WM8505_REGS_START_PHYS 0xd8000000 /* Start of MMIO registers */
+#define WM8505_REGS_START_VIRT 0xf8000000 /* Virtual mapping start */
+
+#define WM8505_DDR_BASE 0xd8000400 /* 1k DDR/DDR2 Memory
+ Controller */
+#define WM8505_DMA_BASE 0xd8001800 /* 1k DMA Controller */
+#define WM8505_VDMA_BASE 0xd8001c00 /* 1k VDMA */
+#define WM8505_SFLASH_BASE 0xd8002000 /* 1k Serial Flash Memory
+ Controller */
+#define WM8505_ETHER_BASE 0xd8004000 /* 1k Ethernet MAC 0 */
+#define WM8505_CIPHER_BASE 0xd8006000 /* 4k Cipher */
+#define WM8505_USB_BASE 0xd8007000 /* 2k USB 2.0 Host */
+# define WM8505_EHCI_BASE 0xd8007100 /* EHCI */
+# define WM8505_UHCI_BASE 0xd8007301 /* UHCI */
+#define WM8505_PS2_BASE 0xd8008800 /* 1k PS/2 */
+#define WM8505_NAND_BASE 0xd8009000 /* 1k NAND Controller */
+#define WM8505_NOR_BASE 0xd8009400 /* 1k NOR Controller */
+#define WM8505_SDMMC_BASE 0xd800a000 /* 1k SD/MMC Controller */
+#define WM8505_VPU_BASE 0xd8050000 /* 256 VPU */
+#define WM8505_GOV_BASE 0xd8050300 /* 256 GOV */
+#define WM8505_GEGEA_BASE 0xd8050400 /* 768 GE/GE Alpha Mixing */
+#define WM8505_GOVR_BASE 0xd8050800 /* 512 GOVR (frambuffer) */
+#define WM8505_VID_BASE 0xd8050a00 /* 256 VID */
+#define WM8505_SCL_BASE 0xd8050d00 /* 256 SCL */
+#define WM8505_VPP_BASE 0xd8050f00 /* 256 VPP */
+#define WM8505_JPEGDEC_BASE 0xd80fe000 /* 4k JPEG Decoder */
+#define WM8505_RTC_BASE 0xd8100000 /* 64k RTC */
+#define WM8505_GPIO_BASE 0xd8110000 /* 64k GPIO Configuration */
+#define WM8505_SCC_BASE 0xd8120000 /* 64k System Configuration*/
+#define WM8505_PMC_BASE 0xd8130000 /* 64k PMC Configuration */
+#define WM8505_IC_BASE 0xd8140000 /* 64k Interrupt Controller*/
+#define WM8505_SIC_BASE 0xd8150000 /* 64k Secondary IC */
+#define WM8505_UART0_BASE 0xd8200000 /* 64k UART 0 */
+#define WM8505_UART2_BASE 0xd8210000 /* 64k UART 2 */
+#define WM8505_PWM_BASE 0xd8220000 /* 64k PWM Configuration */
+#define WM8505_SPI0_BASE 0xd8240000 /* 64k SPI 0 */
+#define WM8505_SPI1_BASE 0xd8250000 /* 64k SPI 1 */
+#define WM8505_KEYPAD_BASE 0xd8260000 /* 64k Keypad control */
+#define WM8505_CIR_BASE 0xd8270000 /* 64k CIR */
+#define WM8505_I2C0_BASE 0xd8280000 /* 64k I2C 0 */
+#define WM8505_AC97_BASE 0xd8290000 /* 64k AC97 */
+#define WM8505_SPI2_BASE 0xd82a0000 /* 64k SPI 2 */
+#define WM8505_UART1_BASE 0xd82b0000 /* 64k UART 1 */
+#define WM8505_UART3_BASE 0xd82c0000 /* 64k UART 3 */
+#define WM8505_I2C1_BASE 0xd8320000 /* 64k I2C 1 */
+#define WM8505_I2S_BASE 0xd8330000 /* 64k I2S */
+#define WM8505_UART4_BASE 0xd8370000 /* 64k UART 4 */
+#define WM8505_UART5_BASE 0xd8380000 /* 64k UART 5 */
+
+#define WM8505_REGS_END_PHYS 0xd838ffff /* End of MMIO registers */
+#define WM8505_REGS_LENGTH (WM8505_REGS_END_PHYS \
+ - WM8505_REGS_START_PHYS + 1)
+
+#endif
diff --git a/arch/arm/mach-vt8500/irq.c b/arch/arm/mach-vt8500/irq.c
new file mode 100644
index 000000000000..5f4ddde4f02a
--- /dev/null
+++ b/arch/arm/mach-vt8500/irq.c
@@ -0,0 +1,177 @@
+/*
+ * arch/arm/mach-vt8500/irq.c
+ *
+ * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+
+#include <asm/irq.h>
+
+#include "devices.h"
+
+#define VT8500_IC_DCTR 0x40 /* Destination control
+ register, 64*u8 */
+#define VT8500_INT_ENABLE (1 << 3)
+#define VT8500_TRIGGER_HIGH (0 << 4)
+#define VT8500_TRIGGER_RISING (1 << 4)
+#define VT8500_TRIGGER_FALLING (2 << 4)
+#define VT8500_EDGE ( VT8500_TRIGGER_RISING \
+ | VT8500_TRIGGER_FALLING)
+#define VT8500_IC_STATUS 0x80 /* Interrupt status, 2*u32 */
+
+static void __iomem *ic_regbase;
+static void __iomem *sic_regbase;
+
+static void vt8500_irq_mask(unsigned int irq)
+{
+ void __iomem *base = ic_regbase;
+ u8 edge;
+
+ if (irq >= 64) {
+ base = sic_regbase;
+ irq -= 64;
+ }
+ edge = readb(base + VT8500_IC_DCTR + irq) & VT8500_EDGE;
+ if (edge) {
+ void __iomem *stat_reg = base + VT8500_IC_STATUS
+ + (irq < 32 ? 0 : 4);
+ unsigned status = readl(stat_reg);
+
+ status |= (1 << (irq & 0x1f));
+ writel(status, stat_reg);
+ } else {
+ u8 dctr = readb(base + VT8500_IC_DCTR + irq);
+
+ dctr &= ~VT8500_INT_ENABLE;
+ writeb(dctr, base + VT8500_IC_DCTR + irq);
+ }
+}
+
+static void vt8500_irq_unmask(unsigned int irq)
+{
+ void __iomem *base = ic_regbase;
+ u8 dctr;
+
+ if (irq >= 64) {
+ base = sic_regbase;
+ irq -= 64;
+ }
+ dctr = readb(base + VT8500_IC_DCTR + irq);
+ dctr |= VT8500_INT_ENABLE;
+ writeb(dctr, base + VT8500_IC_DCTR + irq);
+}
+
+static int vt8500_irq_set_type(unsigned int irq, unsigned int flow_type)
+{
+ void __iomem *base = ic_regbase;
+ unsigned int orig_irq = irq;
+ u8 dctr;
+
+ if (irq >= 64) {
+ base = sic_regbase;
+ irq -= 64;
+ }
+
+ dctr = readb(base + VT8500_IC_DCTR + irq);
+ dctr &= ~VT8500_EDGE;
+
+ switch (flow_type) {
+ case IRQF_TRIGGER_LOW:
+ return -EINVAL;
+ case IRQF_TRIGGER_HIGH:
+ dctr |= VT8500_TRIGGER_HIGH;
+ irq_desc[orig_irq].handle_irq = handle_level_irq;
+ break;
+ case IRQF_TRIGGER_FALLING:
+ dctr |= VT8500_TRIGGER_FALLING;
+ irq_desc[orig_irq].handle_irq = handle_edge_irq;
+ break;
+ case IRQF_TRIGGER_RISING:
+ dctr |= VT8500_TRIGGER_RISING;
+ irq_desc[orig_irq].handle_irq = handle_edge_irq;
+ break;
+ }
+ writeb(dctr, base + VT8500_IC_DCTR + irq);
+
+ return 0;
+}
+
+static struct irq_chip vt8500_irq_chip = {
+ .name = "vt8500",
+ .ack = vt8500_irq_mask,
+ .mask = vt8500_irq_mask,
+ .unmask = vt8500_irq_unmask,
+ .set_type = vt8500_irq_set_type,
+};
+
+void __init vt8500_init_irq(void)
+{
+ unsigned int i;
+
+ ic_regbase = ioremap(wmt_ic_base, SZ_64K);
+
+ if (ic_regbase) {
+ /* Enable rotating priority for IRQ */
+ writel((1 << 6), ic_regbase + 0x20);
+ writel(0, ic_regbase + 0x24);
+
+ for (i = 0; i < wmt_nr_irqs; i++) {
+ /* Disable all interrupts and route them to IRQ */
+ writeb(0x00, ic_regbase + VT8500_IC_DCTR + i);
+
+ set_irq_chip(i, &vt8500_irq_chip);
+ set_irq_handler(i, handle_level_irq);
+ set_irq_flags(i, IRQF_VALID);
+ }
+ } else {
+ printk(KERN_ERR "Unable to remap the Interrupt Controller registers, not enabling IRQs!\n");
+ }
+}
+
+void __init wm8505_init_irq(void)
+{
+ unsigned int i;
+
+ ic_regbase = ioremap(wmt_ic_base, SZ_64K);
+ sic_regbase = ioremap(wmt_sic_base, SZ_64K);
+
+ if (ic_regbase && sic_regbase) {
+ /* Enable rotating priority for IRQ */
+ writel((1 << 6), ic_regbase + 0x20);
+ writel(0, ic_regbase + 0x24);
+ writel((1 << 6), sic_regbase + 0x20);
+ writel(0, sic_regbase + 0x24);
+
+ for (i = 0; i < wmt_nr_irqs; i++) {
+ /* Disable all interrupts and route them to IRQ */
+ if (i < 64)
+ writeb(0x00, ic_regbase + VT8500_IC_DCTR + i);
+ else
+ writeb(0x00, sic_regbase + VT8500_IC_DCTR
+ + i - 64);
+
+ set_irq_chip(i, &vt8500_irq_chip);
+ set_irq_handler(i, handle_level_irq);
+ set_irq_flags(i, IRQF_VALID);
+ }
+ } else {
+ printk(KERN_ERR "Unable to remap the Interrupt Controller registers, not enabling IRQs!\n");
+ }
+}
diff --git a/arch/arm/mach-vt8500/pwm.c b/arch/arm/mach-vt8500/pwm.c
new file mode 100644
index 000000000000..8ad825e93592
--- /dev/null
+++ b/arch/arm/mach-vt8500/pwm.c
@@ -0,0 +1,265 @@
+/*
+ * arch/arm/mach-vt8500/pwm.c
+ *
+ * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/pwm.h>
+#include <linux/delay.h>
+
+#include <asm/div64.h>
+
+#define VT8500_NR_PWMS 4
+
+static DEFINE_MUTEX(pwm_lock);
+static LIST_HEAD(pwm_list);
+
+struct pwm_device {
+ struct list_head node;
+ struct platform_device *pdev;
+
+ const char *label;
+
+ void __iomem *regbase;
+
+ unsigned int use_count;
+ unsigned int pwm_id;
+};
+
+#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
+static inline void pwm_busy_wait(void __iomem *reg, u8 bitmask)
+{
+ int loops = msecs_to_loops(10);
+ while ((readb(reg) & bitmask) && --loops)
+ cpu_relax();
+
+ if (unlikely(!loops))
+ pr_warning("Waiting for status bits 0x%x to clear timed out\n",
+ bitmask);
+}
+
+int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
+{
+ unsigned long long c;
+ unsigned long period_cycles, prescale, pv, dc;
+
+ if (pwm == NULL || period_ns == 0 || duty_ns > period_ns)
+ return -EINVAL;
+
+ c = 25000000/2; /* wild guess --- need to implement clocks */
+ c = c * period_ns;
+ do_div(c, 1000000000);
+ period_cycles = c;
+
+ if (period_cycles < 1)
+ period_cycles = 1;
+ prescale = (period_cycles - 1) / 4096;
+ pv = period_cycles / (prescale + 1) - 1;
+ if (pv > 4095)
+ pv = 4095;
+
+ if (prescale > 1023)
+ return -EINVAL;
+
+ c = (unsigned long long)pv * duty_ns;
+ do_div(c, period_ns);
+ dc = c;
+
+ pwm_busy_wait(pwm->regbase + 0x40 + pwm->pwm_id, (1 << 1));
+ writel(prescale, pwm->regbase + 0x4 + (pwm->pwm_id << 4));
+
+ pwm_busy_wait(pwm->regbase + 0x40 + pwm->pwm_id, (1 << 2));
+ writel(pv, pwm->regbase + 0x8 + (pwm->pwm_id << 4));
+
+ pwm_busy_wait(pwm->regbase + 0x40 + pwm->pwm_id, (1 << 3));
+ writel(dc, pwm->regbase + 0xc + (pwm->pwm_id << 4));
+
+ return 0;
+}
+EXPORT_SYMBOL(pwm_config);
+
+int pwm_enable(struct pwm_device *pwm)
+{
+ pwm_busy_wait(pwm->regbase + 0x40 + pwm->pwm_id, (1 << 0));
+ writel(5, pwm->regbase + (pwm->pwm_id << 4));
+ return 0;
+}
+EXPORT_SYMBOL(pwm_enable);
+
+void pwm_disable(struct pwm_device *pwm)
+{
+ pwm_busy_wait(pwm->regbase + 0x40 + pwm->pwm_id, (1 << 0));
+ writel(0, pwm->regbase + (pwm->pwm_id << 4));
+}
+EXPORT_SYMBOL(pwm_disable);
+
+struct pwm_device *pwm_request(int pwm_id, const char *label)
+{
+ struct pwm_device *pwm;
+ int found = 0;
+
+ mutex_lock(&pwm_lock);
+
+ list_for_each_entry(pwm, &pwm_list, node) {
+ if (pwm->pwm_id == pwm_id) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found) {
+ if (pwm->use_count == 0) {
+ pwm->use_count++;
+ pwm->label = label;
+ } else {
+ pwm = ERR_PTR(-EBUSY);
+ }
+ } else {
+ pwm = ERR_PTR(-ENOENT);
+ }
+
+ mutex_unlock(&pwm_lock);
+ return pwm;
+}
+EXPORT_SYMBOL(pwm_request);
+
+void pwm_free(struct pwm_device *pwm)
+{
+ mutex_lock(&pwm_lock);
+
+ if (pwm->use_count) {
+ pwm->use_count--;
+ pwm->label = NULL;
+ } else {
+ pr_warning("PWM device already freed\n");
+ }
+
+ mutex_unlock(&pwm_lock);
+}
+EXPORT_SYMBOL(pwm_free);
+
+static inline void __add_pwm(struct pwm_device *pwm)
+{
+ mutex_lock(&pwm_lock);
+ list_add_tail(&pwm->node, &pwm_list);
+ mutex_unlock(&pwm_lock);
+}
+
+static int __devinit pwm_probe(struct platform_device *pdev)
+{
+ struct pwm_device *pwms;
+ struct resource *r;
+ int ret = 0;
+ int i;
+
+ pwms = kzalloc(sizeof(struct pwm_device) * VT8500_NR_PWMS, GFP_KERNEL);
+ if (pwms == NULL) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < VT8500_NR_PWMS; i++) {
+ pwms[i].use_count = 0;
+ pwms[i].pwm_id = i;
+ pwms[i].pdev = pdev;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ dev_err(&pdev->dev, "no memory resource defined\n");
+ ret = -ENODEV;
+ goto err_free;
+ }
+
+ r = request_mem_region(r->start, resource_size(r), pdev->name);
+ if (r == NULL) {
+ dev_err(&pdev->dev, "failed to request memory resource\n");
+ ret = -EBUSY;
+ goto err_free;
+ }
+
+ pwms[0].regbase = ioremap(r->start, resource_size(r));
+ if (pwms[0].regbase == NULL) {
+ dev_err(&pdev->dev, "failed to ioremap() registers\n");
+ ret = -ENODEV;
+ goto err_free_mem;
+ }
+
+ for (i = 1; i < VT8500_NR_PWMS; i++)
+ pwms[i].regbase = pwms[0].regbase;
+
+ for (i = 0; i < VT8500_NR_PWMS; i++)
+ __add_pwm(&pwms[i]);
+
+ platform_set_drvdata(pdev, pwms);
+ return 0;
+
+err_free_mem:
+ release_mem_region(r->start, resource_size(r));
+err_free:
+ kfree(pwms);
+ return ret;
+}
+
+static int __devexit pwm_remove(struct platform_device *pdev)
+{
+ struct pwm_device *pwms;
+ struct resource *r;
+ int i;
+
+ pwms = platform_get_drvdata(pdev);
+ if (pwms == NULL)
+ return -ENODEV;
+
+ mutex_lock(&pwm_lock);
+
+ for (i = 0; i < VT8500_NR_PWMS; i++)
+ list_del(&pwms[i].node);
+ mutex_unlock(&pwm_lock);
+
+ iounmap(pwms[0].regbase);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(r->start, resource_size(r));
+
+ kfree(pwms);
+ return 0;
+}
+
+static struct platform_driver pwm_driver = {
+ .driver = {
+ .name = "vt8500-pwm",
+ .owner = THIS_MODULE,
+ },
+ .probe = pwm_probe,
+ .remove = __devexit_p(pwm_remove),
+};
+
+static int __init pwm_init(void)
+{
+ return platform_driver_register(&pwm_driver);
+}
+arch_initcall(pwm_init);
+
+static void __exit pwm_exit(void)
+{
+ platform_driver_unregister(&pwm_driver);
+}
+module_exit(pwm_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-vt8500/timer.c b/arch/arm/mach-vt8500/timer.c
new file mode 100644
index 000000000000..d5376c592ab6
--- /dev/null
+++ b/arch/arm/mach-vt8500/timer.c
@@ -0,0 +1,155 @@
+/*
+ * arch/arm/mach-vt8500/timer.c
+ *
+ * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/delay.h>
+
+#include <asm/mach/time.h>
+
+#include "devices.h"
+
+#define VT8500_TIMER_OFFSET 0x0100
+#define TIMER_MATCH_VAL 0x0000
+#define TIMER_COUNT_VAL 0x0010
+#define TIMER_STATUS_VAL 0x0014
+#define TIMER_IER_VAL 0x001c /* interrupt enable */
+#define TIMER_CTRL_VAL 0x0020
+#define TIMER_AS_VAL 0x0024 /* access status */
+#define TIMER_COUNT_R_ACTIVE (1 << 5) /* not ready for read */
+#define TIMER_COUNT_W_ACTIVE (1 << 4) /* not ready for write */
+#define TIMER_MATCH_W_ACTIVE (1 << 0) /* not ready for write */
+#define VT8500_TIMER_HZ 3000000
+
+#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
+
+static void __iomem *regbase;
+
+static cycle_t vt8500_timer_read(struct clocksource *cs)
+{
+ int loops = msecs_to_loops(10);
+ writel(3, regbase + TIMER_CTRL_VAL);
+ while ((readl((regbase + TIMER_AS_VAL)) & TIMER_COUNT_R_ACTIVE)
+ && --loops)
+ cpu_relax();
+ return readl(regbase + TIMER_COUNT_VAL);
+}
+
+struct clocksource clocksource = {
+ .name = "vt8500_timer",
+ .rating = 200,
+ .read = vt8500_timer_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static int vt8500_timer_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ int loops = msecs_to_loops(10);
+ cycle_t alarm = clocksource.read(&clocksource) + cycles;
+ while ((readl(regbase + TIMER_AS_VAL) & TIMER_MATCH_W_ACTIVE)
+ && --loops)
+ cpu_relax();
+ writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL);
+
+ if ((signed)(alarm - clocksource.read(&clocksource)) <= 16)
+ return -ETIME;
+
+ writel(1, regbase + TIMER_IER_VAL);
+
+ return 0;
+}
+
+static void vt8500_timer_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ switch (mode) {
+ case CLOCK_EVT_MODE_RESUME:
+ case CLOCK_EVT_MODE_PERIODIC:
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ writel(readl(regbase + TIMER_CTRL_VAL) | 1,
+ regbase + TIMER_CTRL_VAL);
+ writel(0, regbase + TIMER_IER_VAL);
+ break;
+ }
+}
+
+struct clock_event_device clockevent = {
+ .name = "vt8500_timer",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 200,
+ .set_next_event = vt8500_timer_set_next_event,
+ .set_mode = vt8500_timer_set_mode,
+};
+
+static irqreturn_t vt8500_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+ writel(0xf, regbase + TIMER_STATUS_VAL);
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+struct irqaction irq = {
+ .name = "vt8500_timer",
+ .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .handler = vt8500_timer_interrupt,
+ .dev_id = &clockevent,
+};
+
+static void __init vt8500_timer_init(void)
+{
+ regbase = ioremap(wmt_pmc_base + VT8500_TIMER_OFFSET, 0x28);
+ if (!regbase)
+ printk(KERN_ERR "vt8500_timer_init: failed to map MMIO registers\n");
+
+ writel(1, regbase + TIMER_CTRL_VAL);
+ writel(0xf, regbase + TIMER_STATUS_VAL);
+ writel(~0, regbase + TIMER_MATCH_VAL);
+
+ if (clocksource_register_hz(&clocksource, VT8500_TIMER_HZ))
+ printk(KERN_ERR "vt8500_timer_init: clocksource_register failed for %s\n",
+ clocksource.name);
+
+ clockevents_calc_mult_shift(&clockevent, VT8500_TIMER_HZ, 4);
+
+ /* copy-pasted from mach-msm; no idea */
+ clockevent.max_delta_ns =
+ clockevent_delta2ns(0xf0000000, &clockevent);
+ clockevent.min_delta_ns = clockevent_delta2ns(4, &clockevent);
+ clockevent.cpumask = cpumask_of(0);
+
+ if (setup_irq(wmt_timer_irq, &irq))
+ printk(KERN_ERR "vt8500_timer_init: setup_irq failed for %s\n",
+ clockevent.name);
+ clockevents_register_device(&clockevent);
+}
+
+struct sys_timer vt8500_timer = {
+ .init = vt8500_timer_init
+};
diff --git a/arch/arm/mach-vt8500/wm8505_7in.c b/arch/arm/mach-vt8500/wm8505_7in.c
new file mode 100644
index 000000000000..e73aadbcafd6
--- /dev/null
+++ b/arch/arm/mach-vt8500/wm8505_7in.c
@@ -0,0 +1,77 @@
+/*
+ * arch/arm/mach-vt8500/wm8505_7in.c
+ *
+ * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/io.h>
+#include <linux/pm.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+
+#include "devices.h"
+
+static void __iomem *pmc_hiber;
+
+static struct platform_device *devices[] __initdata = {
+ &vt8500_device_uart0,
+ &vt8500_device_ehci,
+ &vt8500_device_wm8505_fb,
+ &vt8500_device_ge_rops,
+ &vt8500_device_pwm,
+ &vt8500_device_pwmbl,
+ &vt8500_device_rtc,
+};
+
+static void vt8500_power_off(void)
+{
+ local_irq_disable();
+ writew(5, pmc_hiber);
+ asm("mcr%? p15, 0, %0, c7, c0, 4" : : "r" (0));
+}
+
+void __init wm8505_7in_init(void)
+{
+#ifdef CONFIG_FB_WM8505
+ void __iomem *gpio_mux_reg = ioremap(wmt_gpio_base + 0x200, 4);
+ if (gpio_mux_reg) {
+ writel(readl(gpio_mux_reg) | 0x80000000, gpio_mux_reg);
+ iounmap(gpio_mux_reg);
+ } else {
+ printk(KERN_ERR "Could not remap the GPIO mux register, display may not work properly!\n");
+ }
+#endif
+ pmc_hiber = ioremap(wmt_pmc_base + 0x12, 2);
+ if (pmc_hiber)
+ pm_power_off = &vt8500_power_off;
+ else
+ printk(KERN_ERR "PMC Hibernation register could not be remapped, not enabling power off!\n");
+
+ wm8505_set_resources();
+ platform_add_devices(devices, ARRAY_SIZE(devices));
+ vt8500_gpio_init();
+}
+
+MACHINE_START(WM8505_7IN_NETBOOK, "WM8505 7-inch generic netbook")
+ .boot_params = 0x00000100,
+ .reserve = wm8505_reserve_mem,
+ .map_io = wm8505_map_io,
+ .init_irq = wm8505_init_irq,
+ .timer = &vt8500_timer,
+ .init_machine = wm8505_7in_init,
+MACHINE_END
diff --git a/arch/arm/mach-w90x900/include/mach/memory.h b/arch/arm/mach-w90x900/include/mach/memory.h
index 971b80702c27..f02905ba7746 100644
--- a/arch/arm/mach-w90x900/include/mach/memory.h
+++ b/arch/arm/mach-w90x900/include/mach/memory.h
@@ -18,6 +18,6 @@
#ifndef __ASM_ARCH_MEMORY_H
#define __ASM_ARCH_MEMORY_H
-#define PHYS_OFFSET UL(0x00000000)
+#define PLAT_PHYS_OFFSET UL(0x00000000)
#endif
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index e4509bae8fc4..6cba0b0c915b 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -390,7 +390,7 @@ config CPU_PJ4
# ARMv6
config CPU_V6
- bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX || ARCH_DOVE
+ bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX
select CPU_32v6
select CPU_ABRT_EV6
select CPU_PABRT_V6
@@ -402,16 +402,18 @@ config CPU_V6
select CPU_TLB_V6 if MMU
# ARMv6k
-config CPU_32v6K
- bool "Support ARM V6K processor extensions" if !SMP
- depends on CPU_V6 || CPU_V7
- default y if SMP
- help
- Say Y here if your ARMv6 processor supports the 'K' extension.
- This enables the kernel to use some instructions not present
- on previous processors, and as such a kernel build with this
- enabled will not boot on processors with do not support these
- instructions.
+config CPU_V6K
+ bool "Support ARM V6K processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX
+ select CPU_32v6
+ select CPU_32v6K
+ select CPU_ABRT_EV6
+ select CPU_PABRT_V6
+ select CPU_CACHE_V6
+ select CPU_CACHE_VIPT
+ select CPU_CP15_MMU
+ select CPU_HAS_ASID if MMU
+ select CPU_COPY_V6 if MMU
+ select CPU_TLB_V6 if MMU
# ARMv7
config CPU_V7
@@ -433,25 +435,33 @@ config CPU_32v3
bool
select TLS_REG_EMUL if SMP || !MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
+ select CPU_USE_DOMAINS if MMU
config CPU_32v4
bool
select TLS_REG_EMUL if SMP || !MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
+ select CPU_USE_DOMAINS if MMU
config CPU_32v4T
bool
select TLS_REG_EMUL if SMP || !MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
+ select CPU_USE_DOMAINS if MMU
config CPU_32v5
bool
select TLS_REG_EMUL if SMP || !MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
+ select CPU_USE_DOMAINS if MMU
config CPU_32v6
bool
select TLS_REG_EMUL if !CPU_32v6K && !MMU
+ select CPU_USE_DOMAINS if CPU_V6 && MMU
+
+config CPU_32v6K
+ bool
config CPU_32v7
bool
@@ -607,8 +617,6 @@ config CPU_CP15_MPU
config CPU_USE_DOMAINS
bool
- depends on MMU
- default y if !CPU_32v6K
help
This option enables or disables the use of domain switching
via the set_fs() function.
@@ -623,7 +631,7 @@ comment "Processor Features"
config ARM_THUMB
bool "Support Thumb user binaries"
- depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V7 || CPU_FEROCEON
+ depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON
default y
help
Say Y if you want to include kernel support for running user space
@@ -644,7 +652,7 @@ config ARM_THUMBEE
config SWP_EMULATE
bool "Emulate SWP/SWPB instructions"
- depends on !CPU_USE_DOMAINS && CPU_V7 && !CPU_V6
+ depends on !CPU_USE_DOMAINS && CPU_V7
select HAVE_PROC_CPU if PROC_FS
default y if SMP
help
@@ -681,7 +689,7 @@ config CPU_BIG_ENDIAN
config CPU_ENDIAN_BE8
bool
depends on CPU_BIG_ENDIAN
- default CPU_V6 || CPU_V7
+ default CPU_V6 || CPU_V6K || CPU_V7
help
Support for the BE-8 (big-endian) mode on ARMv6 and ARMv7 processors.
@@ -747,7 +755,7 @@ config CPU_CACHE_ROUND_ROBIN
config CPU_BPREDICT_DISABLE
bool "Disable branch prediction"
- depends on CPU_ARM1020 || CPU_V6 || CPU_MOHAWK || CPU_XSC3 || CPU_V7 || CPU_FA526
+ depends on CPU_ARM1020 || CPU_V6 || CPU_V6K || CPU_MOHAWK || CPU_XSC3 || CPU_V7 || CPU_FA526
help
Say Y here to disable branch prediction. If unsure, say N.
@@ -767,7 +775,7 @@ config NEEDS_SYSCALL_FOR_CMPXCHG
config DMA_CACHE_RWFO
bool "Enable read/write for ownership DMA cache maintenance"
- depends on CPU_V6 && SMP
+ depends on CPU_V6K && SMP
default y
help
The Snoop Control Unit on ARM11MPCore does not detect the
@@ -812,7 +820,7 @@ config CACHE_L2X0
bool "Enable the L2x0 outer cache controller"
depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \
REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || \
- ARCH_NOMADIK || ARCH_OMAP4 || ARCH_S5PV310 || ARCH_TEGRA || \
+ ARCH_NOMADIK || ARCH_OMAP4 || ARCH_EXYNOS4 || ARCH_TEGRA || \
ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || ARCH_SHMOBILE
default y
select OUTER_CACHE
@@ -823,7 +831,7 @@ config CACHE_L2X0
config CACHE_PL310
bool
depends on CACHE_L2X0
- default y if CPU_V7 && !CPU_V6
+ default y if CPU_V7 && !(CPU_V6 || CPU_V6K)
help
This option enables optimisations for the PL310 cache
controller.
@@ -845,16 +853,21 @@ config CACHE_XSC3L2
help
This option enables the L2 cache on XScale3.
+config ARM_L1_CACHE_SHIFT_6
+ bool
+ help
+ Setting ARM L1 cache line size to 64 Bytes.
+
config ARM_L1_CACHE_SHIFT
int
default 6 if ARM_L1_CACHE_SHIFT_6
default 5
config ARM_DMA_MEM_BUFFERABLE
- bool "Use non-cacheable memory for DMA" if CPU_V6 && !CPU_V7
+ bool "Use non-cacheable memory for DMA" if (CPU_V6 || CPU_V6K) && !CPU_V7
depends on !(MACH_REALVIEW_PB1176 || REALVIEW_EB_ARM11MP || \
MACH_REALVIEW_PB11MP)
- default y if CPU_V6 || CPU_V7
+ default y if CPU_V6 || CPU_V6K || CPU_V7
help
Historically, the kernel has used strongly ordered mappings to
provide DMA coherent memory. With the advent of ARMv7, mapping
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 00d74a04af3a..bca7e61928c7 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -90,6 +90,7 @@ obj-$(CONFIG_CPU_XSC3) += proc-xsc3.o
obj-$(CONFIG_CPU_MOHAWK) += proc-mohawk.o
obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o
obj-$(CONFIG_CPU_V6) += proc-v6.o
+obj-$(CONFIG_CPU_V6K) += proc-v6.o
obj-$(CONFIG_CPU_V7) += proc-v7.o
AFLAGS_proc-v6.o :=-Wa,-march=armv6
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index f332df7f0d37..1478aa522144 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -20,11 +20,11 @@
*/
.align 5
ENTRY(v6_early_abort)
-#ifdef CONFIG_CPU_32v6K
- clrex
-#else
+#ifdef CONFIG_CPU_V6
sub r1, sp, #4 @ Get unused stack location
strex r0, r1, [r1] @ Clear the exclusive monitor
+#elif defined(CONFIG_CPU_32v6K)
+ clrex
#endif
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 170c9bb95866..f2ce38e085d2 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -49,7 +49,13 @@ static inline void cache_wait(void __iomem *reg, unsigned long mask)
static inline void cache_sync(void)
{
void __iomem *base = l2x0_base;
+
+#ifdef CONFIG_ARM_ERRATA_753970
+ /* write to an unmmapped register */
+ writel_relaxed(0, base + L2X0_DUMMY_REG);
+#else
writel_relaxed(0, base + L2X0_CACHE_SYNC);
+#endif
cache_wait(base + L2X0_CACHE_SYNC, 1);
}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 4771dba61448..82a093cee09a 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -149,6 +149,7 @@ static int __init consistent_init(void)
{
int ret = 0;
pgd_t *pgd;
+ pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int i = 0;
@@ -156,7 +157,15 @@ static int __init consistent_init(void)
do {
pgd = pgd_offset(&init_mm, base);
- pmd = pmd_alloc(&init_mm, pgd, base);
+
+ pud = pud_alloc(&init_mm, pgd, base);
+ if (!pud) {
+ printk(KERN_ERR "%s: no pud tables\n", __func__);
+ ret = -ENOMEM;
+ break;
+ }
+
+ pmd = pmd_alloc(&init_mm, pud, base);
if (!pmd) {
printk(KERN_ERR "%s: no pmd tables\n", __func__);
ret = -ENOMEM;
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 01210dba0221..7cab79179421 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -95,6 +95,7 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
{
spinlock_t *ptl;
pgd_t *pgd;
+ pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int ret;
@@ -103,7 +104,11 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
if (pgd_none_or_clear_bad(pgd))
return 0;
- pmd = pmd_offset(pgd, address);
+ pud = pud_offset(pgd, address);
+ if (pud_none_or_clear_bad(pud))
+ return 0;
+
+ pmd = pmd_offset(pud, address);
if (pmd_none_or_clear_bad(pmd))
return 0;
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index f10f9bac2206..bc0e1d88fd3b 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -76,9 +76,11 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
printk(KERN_ALERT "pgd = %p\n", mm->pgd);
pgd = pgd_offset(mm, addr);
- printk(KERN_ALERT "[%08lx] *pgd=%08lx", addr, pgd_val(*pgd));
+ printk(KERN_ALERT "[%08lx] *pgd=%08llx",
+ addr, (long long)pgd_val(*pgd));
do {
+ pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@@ -90,9 +92,21 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
break;
}
- pmd = pmd_offset(pgd, addr);
+ pud = pud_offset(pgd, addr);
+ if (PTRS_PER_PUD != 1)
+ printk(", *pud=%08lx", pud_val(*pud));
+
+ if (pud_none(*pud))
+ break;
+
+ if (pud_bad(*pud)) {
+ printk("(bad)");
+ break;
+ }
+
+ pmd = pmd_offset(pud, addr);
if (PTRS_PER_PMD != 1)
- printk(", *pmd=%08lx", pmd_val(*pmd));
+ printk(", *pmd=%08llx", (long long)pmd_val(*pmd));
if (pmd_none(*pmd))
break;
@@ -107,8 +121,9 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
break;
pte = pte_offset_map(pmd, addr);
- printk(", *pte=%08lx", pte_val(*pte));
- printk(", *ppte=%08lx", pte_val(pte[PTE_HWTABLE_PTRS]));
+ printk(", *pte=%08llx", (long long)pte_val(*pte));
+ printk(", *ppte=%08llx",
+ (long long)pte_val(pte[PTE_HWTABLE_PTRS]));
pte_unmap(pte);
} while(0);
@@ -388,6 +403,7 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
{
unsigned int index;
pgd_t *pgd, *pgd_k;
+ pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
if (addr < TASK_SIZE)
@@ -406,12 +422,19 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
if (pgd_none(*pgd_k))
goto bad_area;
-
if (!pgd_present(*pgd))
set_pgd(pgd, *pgd_k);
- pmd_k = pmd_offset(pgd_k, addr);
- pmd = pmd_offset(pgd, addr);
+ pud = pud_offset(pgd, addr);
+ pud_k = pud_offset(pgd_k, addr);
+
+ if (pud_none(*pud_k))
+ goto bad_area;
+ if (!pud_present(*pud))
+ set_pud(pud, *pud_k);
+
+ pmd = pmd_offset(pud, addr);
+ pmd_k = pmd_offset(pud_k, addr);
/*
* On ARM one Linux PGD entry contains two hardware entries (see page
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index 57299446f787..2be9139a4ef3 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -4,10 +4,10 @@
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
-static void idmap_add_pmd(pgd_t *pgd, unsigned long addr, unsigned long end,
+static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
unsigned long prot)
{
- pmd_t *pmd = pmd_offset(pgd, addr);
+ pmd_t *pmd = pmd_offset(pud, addr);
addr = (addr & PMD_MASK) | prot;
pmd[0] = __pmd(addr);
@@ -16,6 +16,18 @@ static void idmap_add_pmd(pgd_t *pgd, unsigned long addr, unsigned long end,
flush_pmd_entry(pmd);
}
+static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
+ unsigned long prot)
+{
+ pud_t *pud = pud_offset(pgd, addr);
+ unsigned long next;
+
+ do {
+ next = pud_addr_end(addr, end);
+ idmap_add_pmd(pud, addr, next, prot);
+ } while (pud++, addr = next, addr != end);
+}
+
void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
{
unsigned long prot, next;
@@ -27,17 +39,28 @@ void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
pgd += pgd_index(addr);
do {
next = pgd_addr_end(addr, end);
- idmap_add_pmd(pgd, addr, next, prot);
+ idmap_add_pud(pgd, addr, next, prot);
} while (pgd++, addr = next, addr != end);
}
#ifdef CONFIG_SMP
-static void idmap_del_pmd(pgd_t *pgd, unsigned long addr, unsigned long end)
+static void idmap_del_pmd(pud_t *pud, unsigned long addr, unsigned long end)
{
- pmd_t *pmd = pmd_offset(pgd, addr);
+ pmd_t *pmd = pmd_offset(pud, addr);
pmd_clear(pmd);
}
+static void idmap_del_pud(pgd_t *pgd, unsigned long addr, unsigned long end)
+{
+ pud_t *pud = pud_offset(pgd, addr);
+ unsigned long next;
+
+ do {
+ next = pud_addr_end(addr, end);
+ idmap_del_pmd(pud, addr, next);
+ } while (pud++, addr = next, addr != end);
+}
+
void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end)
{
unsigned long next;
@@ -45,7 +68,7 @@ void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end)
pgd += pgd_index(addr);
do {
next = pgd_addr_end(addr, end);
- idmap_del_pmd(pgd, addr, next);
+ idmap_del_pud(pgd, addr, next);
} while (pgd++, addr = next, addr != end);
}
#endif
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index cddd684364da..b3b0f0f5053d 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -350,7 +350,7 @@ void __init bootmem_init(void)
*/
arm_bootmem_free(min, max_low, max_high);
- high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1;
+ high_memory = __va(((phys_addr_t)max_low << PAGE_SHIFT) - 1) + 1;
/*
* This doesn't seem to be used by the Linux memory manager any
@@ -398,8 +398,8 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn)
* Convert to physical addresses, and
* round start upwards and end downwards.
*/
- pg = PAGE_ALIGN(__pa(start_pg));
- pgend = __pa(end_pg) & PAGE_MASK;
+ pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
+ pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
/*
* If there are free pages between these,
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 36960df5fb76..d2384106af9c 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -7,7 +7,7 @@ extern pmd_t *top_pmd;
static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt)
{
- return pmd_offset(pgd, virt);
+ return pmd_offset(pud_offset(pgd, virt), virt);
}
static inline pmd_t *pmd_off_k(unsigned long virt)
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index b0a98305055c..afe209e1e1f8 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -31,7 +31,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long start_addr;
-#ifdef CONFIG_CPU_V6
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
unsigned int cache_type;
int do_align = 0, aliasing = 0;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 3c67e92f7d59..6cf76b3b68d1 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -533,7 +533,7 @@ static void __init *early_alloc(unsigned long sz)
static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
{
if (pmd_none(*pmd)) {
- pte_t *pte = early_alloc(2 * PTRS_PER_PTE * sizeof(pte_t));
+ pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
__pmd_populate(pmd, __pa(pte), prot);
}
BUG_ON(pmd_bad(*pmd));
@@ -551,11 +551,11 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
} while (pte++, addr += PAGE_SIZE, addr != end);
}
-static void __init alloc_init_section(pgd_t *pgd, unsigned long addr,
+static void __init alloc_init_section(pud_t *pud, unsigned long addr,
unsigned long end, phys_addr_t phys,
const struct mem_type *type)
{
- pmd_t *pmd = pmd_offset(pgd, addr);
+ pmd_t *pmd = pmd_offset(pud, addr);
/*
* Try a section mapping - end, addr and phys must all be aligned
@@ -584,6 +584,19 @@ static void __init alloc_init_section(pgd_t *pgd, unsigned long addr,
}
}
+static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
+ unsigned long phys, const struct mem_type *type)
+{
+ pud_t *pud = pud_offset(pgd, addr);
+ unsigned long next;
+
+ do {
+ next = pud_addr_end(addr, end);
+ alloc_init_section(pud, addr, next, phys, type);
+ phys += next - addr;
+ } while (pud++, addr = next, addr != end);
+}
+
static void __init create_36bit_mapping(struct map_desc *md,
const struct mem_type *type)
{
@@ -592,13 +605,13 @@ static void __init create_36bit_mapping(struct map_desc *md,
pgd_t *pgd;
addr = md->virtual;
- phys = (unsigned long)__pfn_to_phys(md->pfn);
+ phys = __pfn_to_phys(md->pfn);
length = PAGE_ALIGN(md->length);
if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
printk(KERN_ERR "MM: CPU does not support supersection "
"mapping for 0x%08llx at 0x%08lx\n",
- __pfn_to_phys((u64)md->pfn), addr);
+ (long long)__pfn_to_phys((u64)md->pfn), addr);
return;
}
@@ -611,14 +624,14 @@ static void __init create_36bit_mapping(struct map_desc *md,
if (type->domain) {
printk(KERN_ERR "MM: invalid domain in supersection "
"mapping for 0x%08llx at 0x%08lx\n",
- __pfn_to_phys((u64)md->pfn), addr);
+ (long long)__pfn_to_phys((u64)md->pfn), addr);
return;
}
if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
- printk(KERN_ERR "MM: cannot create mapping for "
- "0x%08llx at 0x%08lx invalid alignment\n",
- __pfn_to_phys((u64)md->pfn), addr);
+ printk(KERN_ERR "MM: cannot create mapping for 0x%08llx"
+ " at 0x%08lx invalid alignment\n",
+ (long long)__pfn_to_phys((u64)md->pfn), addr);
return;
}
@@ -631,7 +644,8 @@ static void __init create_36bit_mapping(struct map_desc *md,
pgd = pgd_offset_k(addr);
end = addr + length;
do {
- pmd_t *pmd = pmd_offset(pgd, addr);
+ pud_t *pud = pud_offset(pgd, addr);
+ pmd_t *pmd = pmd_offset(pud, addr);
int i;
for (i = 0; i < 16; i++)
@@ -652,22 +666,23 @@ static void __init create_36bit_mapping(struct map_desc *md,
*/
static void __init create_mapping(struct map_desc *md)
{
- unsigned long phys, addr, length, end;
+ unsigned long addr, length, end;
+ phys_addr_t phys;
const struct mem_type *type;
pgd_t *pgd;
if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
- printk(KERN_WARNING "BUG: not creating mapping for "
- "0x%08llx at 0x%08lx in user region\n",
- __pfn_to_phys((u64)md->pfn), md->virtual);
+ printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx"
+ " at 0x%08lx in user region\n",
+ (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
return;
}
if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
- printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx "
- "overlaps vmalloc space\n",
- __pfn_to_phys((u64)md->pfn), md->virtual);
+ printk(KERN_WARNING "BUG: mapping for 0x%08llx"
+ " at 0x%08lx overlaps vmalloc space\n",
+ (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
}
type = &mem_types[md->type];
@@ -681,13 +696,13 @@ static void __init create_mapping(struct map_desc *md)
}
addr = md->virtual & PAGE_MASK;
- phys = (unsigned long)__pfn_to_phys(md->pfn);
+ phys = __pfn_to_phys(md->pfn);
length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
- printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not "
+ printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not "
"be mapped using pages, ignoring.\n",
- __pfn_to_phys(md->pfn), addr);
+ (long long)__pfn_to_phys(md->pfn), addr);
return;
}
@@ -696,7 +711,7 @@ static void __init create_mapping(struct map_desc *md)
do {
unsigned long next = pgd_addr_end(addr, end);
- alloc_init_section(pgd, addr, next, phys, type);
+ alloc_init_pud(pgd, addr, next, phys, type);
phys += next - addr;
addr = next;
@@ -794,9 +809,10 @@ static void __init sanity_check_meminfo(void)
*/
if (__va(bank->start) >= vmalloc_min ||
__va(bank->start) < (void *)PAGE_OFFSET) {
- printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx "
+ printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
"(vmalloc region overlap).\n",
- bank->start, bank->start + bank->size - 1);
+ (unsigned long long)bank->start,
+ (unsigned long long)bank->start + bank->size - 1);
continue;
}
@@ -807,10 +823,11 @@ static void __init sanity_check_meminfo(void)
if (__va(bank->start + bank->size) > vmalloc_min ||
__va(bank->start + bank->size) < __va(bank->start)) {
unsigned long newsize = vmalloc_min - __va(bank->start);
- printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx "
- "to -%.8lx (vmalloc region overlap).\n",
- bank->start, bank->start + bank->size - 1,
- bank->start + newsize - 1);
+ printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
+ "to -%.8llx (vmalloc region overlap).\n",
+ (unsigned long long)bank->start,
+ (unsigned long long)bank->start + bank->size - 1,
+ (unsigned long long)bank->start + newsize - 1);
bank->size = newsize;
}
#endif
@@ -827,16 +844,6 @@ static void __init sanity_check_meminfo(void)
* rather difficult.
*/
reason = "with VIPT aliasing cache";
- } else if (is_smp() && tlb_ops_need_broadcast()) {
- /*
- * kmap_high needs to occasionally flush TLB entries,
- * however, if the TLB entries need to be broadcast
- * we may deadlock:
- * kmap_high(irqs off)->flush_all_zero_pkmaps->
- * flush_tlb_kernel_range->smp_call_function_many
- * (must not be called with irqs off)
- */
- reason = "without hardware TLB ops broadcasting";
}
if (reason) {
printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index 709244c66fa3..b2027c154b2a 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -23,6 +23,7 @@
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *new_pgd, *init_pgd;
+ pud_t *new_pud, *init_pud;
pmd_t *new_pmd, *init_pmd;
pte_t *new_pte, *init_pte;
@@ -46,7 +47,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
* On ARM, first page must always be allocated since it
* contains the machine vectors.
*/
- new_pmd = pmd_alloc(mm, new_pgd, 0);
+ new_pud = pud_alloc(mm, new_pgd, 0);
+ if (!new_pud)
+ goto no_pud;
+
+ new_pmd = pmd_alloc(mm, new_pud, 0);
if (!new_pmd)
goto no_pmd;
@@ -54,7 +59,8 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
if (!new_pte)
goto no_pte;
- init_pmd = pmd_offset(init_pgd, 0);
+ init_pud = pud_offset(init_pgd, 0);
+ init_pmd = pmd_offset(init_pud, 0);
init_pte = pte_offset_map(init_pmd, 0);
set_pte_ext(new_pte, *init_pte, 0);
pte_unmap(init_pte);
@@ -66,6 +72,8 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
no_pte:
pmd_free(mm, new_pmd);
no_pmd:
+ pud_free(mm, new_pud);
+no_pud:
free_pages((unsigned long)new_pgd, 2);
no_pgd:
return NULL;
@@ -74,6 +82,7 @@ no_pgd:
void pgd_free(struct mm_struct *mm, pgd_t *pgd_base)
{
pgd_t *pgd;
+ pud_t *pud;
pmd_t *pmd;
pgtable_t pte;
@@ -84,7 +93,11 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd_base)
if (pgd_none_or_clear_bad(pgd))
goto no_pgd;
- pmd = pmd_offset(pgd, 0);
+ pud = pud_offset(pgd, 0);
+ if (pud_none_or_clear_bad(pud))
+ goto no_pud;
+
+ pmd = pmd_offset(pud, 0);
if (pmd_none_or_clear_bad(pmd))
goto no_pmd;
@@ -92,8 +105,11 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd_base)
pmd_clear(pmd);
pte_free(mm, pte);
no_pmd:
- pgd_clear(pgd);
+ pud_clear(pud);
pmd_free(mm, pmd);
+no_pud:
+ pgd_clear(pgd);
+ pud_free(mm, pud);
no_pgd:
free_pages((unsigned long) pgd_base, 2);
}
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index bcf748d9f4e2..226e3d8351c2 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -493,6 +493,9 @@ arm1020_processor_functions:
.word cpu_arm1020_dcache_clean_area
.word cpu_arm1020_switch_mm
.word cpu_arm1020_set_pte_ext
+ .word 0
+ .word 0
+ .word 0
.size arm1020_processor_functions, . - arm1020_processor_functions
.section ".rodata"
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index ab7ec26657ea..86d9c2cf0bce 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -474,6 +474,9 @@ arm1020e_processor_functions:
.word cpu_arm1020e_dcache_clean_area
.word cpu_arm1020e_switch_mm
.word cpu_arm1020e_set_pte_ext
+ .word 0
+ .word 0
+ .word 0
.size arm1020e_processor_functions, . - arm1020e_processor_functions
.section ".rodata"
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index 831c5e54e22f..83d3dd34f846 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -457,6 +457,9 @@ arm1022_processor_functions:
.word cpu_arm1022_dcache_clean_area
.word cpu_arm1022_switch_mm
.word cpu_arm1022_set_pte_ext
+ .word 0
+ .word 0
+ .word 0
.size arm1022_processor_functions, . - arm1022_processor_functions
.section ".rodata"
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index e3f7e9a166bf..686043ee7281 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -452,6 +452,9 @@ arm1026_processor_functions:
.word cpu_arm1026_dcache_clean_area
.word cpu_arm1026_switch_mm
.word cpu_arm1026_set_pte_ext
+ .word 0
+ .word 0
+ .word 0
.size arm1026_processor_functions, . - arm1026_processor_functions
.section .rodata
diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S
index 6a7be1863edd..5f79dc4ce3fb 100644
--- a/arch/arm/mm/proc-arm6_7.S
+++ b/arch/arm/mm/proc-arm6_7.S
@@ -284,6 +284,9 @@ ENTRY(arm6_processor_functions)
.word cpu_arm6_dcache_clean_area
.word cpu_arm6_switch_mm
.word cpu_arm6_set_pte_ext
+ .word 0
+ .word 0
+ .word 0
.size arm6_processor_functions, . - arm6_processor_functions
/*
@@ -301,6 +304,9 @@ ENTRY(arm7_processor_functions)
.word cpu_arm7_dcache_clean_area
.word cpu_arm7_switch_mm
.word cpu_arm7_set_pte_ext
+ .word 0
+ .word 0
+ .word 0
.size arm7_processor_functions, . - arm7_processor_functions
.section ".rodata"
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index c285395f44b2..665266da143c 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -185,6 +185,9 @@ ENTRY(arm720_processor_functions)
.word cpu_arm720_dcache_clean_area
.word cpu_arm720_switch_mm
.word cpu_arm720_set_pte_ext
+ .word 0
+ .word 0
+ .word 0
.size arm720_processor_functions, . - arm720_processor_functions
.section ".rodata"
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index 38b27dcba727..6f9d12effee1 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -130,6 +130,9 @@ ENTRY(arm740_processor_functions)
.word cpu_arm740_dcache_clean_area
.word cpu_arm740_switch_mm
.word 0 @ cpu_*_set_pte
+ .word 0
+ .word 0
+ .word 0
.size arm740_processor_functions, . - arm740_processor_functions
.section ".rodata"
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
index 0c9786de20af..e4c165ca6696 100644
--- a/arch/arm/mm/proc-arm7tdmi.S
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -70,6 +70,9 @@ ENTRY(arm7tdmi_processor_functions)
.word cpu_arm7tdmi_dcache_clean_area
.word cpu_arm7tdmi_switch_mm
.word 0 @ cpu_*_set_pte
+ .word 0
+ .word 0
+ .word 0
.size arm7tdmi_processor_functions, . - arm7tdmi_processor_functions
.section ".rodata"
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 6109f278a904..219980ec8b6e 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -387,6 +387,40 @@ ENTRY(cpu_arm920_set_pte_ext)
#endif
mov pc, lr
+/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
+.globl cpu_arm920_suspend_size
+.equ cpu_arm920_suspend_size, 4 * 3
+#ifdef CONFIG_PM
+ENTRY(cpu_arm920_do_suspend)
+ stmfd sp!, {r4 - r7, lr}
+ mrc p15, 0, r4, c13, c0, 0 @ PID
+ mrc p15, 0, r5, c3, c0, 0 @ Domain ID
+ mrc p15, 0, r6, c2, c0, 0 @ TTB address
+ mrc p15, 0, r7, c1, c0, 0 @ Control register
+ stmia r0, {r4 - r7}
+ ldmfd sp!, {r4 - r7, pc}
+ENDPROC(cpu_arm920_do_suspend)
+
+ENTRY(cpu_arm920_do_resume)
+ mov ip, #0
+ mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs
+ mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches
+ ldmia r0, {r4 - r7}
+ mcr p15, 0, r4, c13, c0, 0 @ PID
+ mcr p15, 0, r5, c3, c0, 0 @ Domain ID
+ mcr p15, 0, r6, c2, c0, 0 @ TTB address
+ mov r0, r7 @ control register
+ mov r2, r6, lsr #14 @ get TTB0 base
+ mov r2, r2, lsl #14
+ ldr r3, =PMD_TYPE_SECT | PMD_SECT_BUFFERABLE | \
+ PMD_SECT_CACHEABLE | PMD_BIT4 | PMD_SECT_AP_WRITE
+ b cpu_resume_mmu
+ENDPROC(cpu_arm920_do_resume)
+#else
+#define cpu_arm920_do_suspend 0
+#define cpu_arm920_do_resume 0
+#endif
+
__CPUINIT
.type __arm920_setup, #function
@@ -432,6 +466,9 @@ arm920_processor_functions:
.word cpu_arm920_dcache_clean_area
.word cpu_arm920_switch_mm
.word cpu_arm920_set_pte_ext
+ .word cpu_arm920_suspend_size
+ .word cpu_arm920_do_suspend
+ .word cpu_arm920_do_resume
.size arm920_processor_functions, . - arm920_processor_functions
.section ".rodata"
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index bb2f0f46a5e6..36154b1e792a 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -436,6 +436,9 @@ arm922_processor_functions:
.word cpu_arm922_dcache_clean_area
.word cpu_arm922_switch_mm
.word cpu_arm922_set_pte_ext
+ .word 0
+ .word 0
+ .word 0
.size arm922_processor_functions, . - arm922_processor_functions
.section ".rodata"
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index c13e01accfe2..89c5e0009c4c 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -503,6 +503,9 @@ arm925_processor_functions:
.word cpu_arm925_dcache_clean_area
.word cpu_arm925_switch_mm
.word cpu_arm925_set_pte_ext
+ .word 0
+ .word 0
+ .word 0
.size arm925_processor_functions, . - arm925_processor_functions
.section ".rodata"
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 42eb4315740b..6a4bdb2c94a7 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -401,6 +401,40 @@ ENTRY(cpu_arm926_set_pte_ext)
#endif
mov pc, lr
+/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
+.globl cpu_arm926_suspend_size
+.equ cpu_arm926_suspend_size, 4 * 3
+#ifdef CONFIG_PM
+ENTRY(cpu_arm926_do_suspend)
+ stmfd sp!, {r4 - r7, lr}
+ mrc p15, 0, r4, c13, c0, 0 @ PID
+ mrc p15, 0, r5, c3, c0, 0 @ Domain ID
+ mrc p15, 0, r6, c2, c0, 0 @ TTB address
+ mrc p15, 0, r7, c1, c0, 0 @ Control register
+ stmia r0, {r4 - r7}
+ ldmfd sp!, {r4 - r7, pc}
+ENDPROC(cpu_arm926_do_suspend)
+
+ENTRY(cpu_arm926_do_resume)
+ mov ip, #0
+ mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs
+ mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches
+ ldmia r0, {r4 - r7}
+ mcr p15, 0, r4, c13, c0, 0 @ PID
+ mcr p15, 0, r5, c3, c0, 0 @ Domain ID
+ mcr p15, 0, r6, c2, c0, 0 @ TTB address
+ mov r0, r7 @ control register
+ mov r2, r6, lsr #14 @ get TTB0 base
+ mov r2, r2, lsl #14
+ ldr r3, =PMD_TYPE_SECT | PMD_SECT_BUFFERABLE | \
+ PMD_SECT_CACHEABLE | PMD_BIT4 | PMD_SECT_AP_WRITE
+ b cpu_resume_mmu
+ENDPROC(cpu_arm926_do_resume)
+#else
+#define cpu_arm926_do_suspend 0
+#define cpu_arm926_do_resume 0
+#endif
+
__CPUINIT
.type __arm926_setup, #function
@@ -456,6 +490,9 @@ arm926_processor_functions:
.word cpu_arm926_dcache_clean_area
.word cpu_arm926_switch_mm
.word cpu_arm926_set_pte_ext
+ .word cpu_arm926_suspend_size
+ .word cpu_arm926_do_suspend
+ .word cpu_arm926_do_resume
.size arm926_processor_functions, . - arm926_processor_functions
.section ".rodata"
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index 7b11cdb9935f..26aea3f71c26 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -363,6 +363,9 @@ ENTRY(arm940_processor_functions)
.word cpu_arm940_dcache_clean_area
.word cpu_arm940_switch_mm
.word 0 @ cpu_*_set_pte
+ .word 0
+ .word 0
+ .word 0
.size arm940_processor_functions, . - arm940_processor_functions
.section ".rodata"
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index 1a5bbf080342..8063345406fe 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -419,6 +419,9 @@ ENTRY(arm946_processor_functions)
.word cpu_arm946_dcache_clean_area
.word cpu_arm946_switch_mm
.word 0 @ cpu_*_set_pte
+ .word 0
+ .word 0
+ .word 0
.size arm946_processor_functions, . - arm946_processor_functions
.section ".rodata"
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
index db67e3134d7a..7b7ebd4d096d 100644
--- a/arch/arm/mm/proc-arm9tdmi.S
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -70,6 +70,9 @@ ENTRY(arm9tdmi_processor_functions)
.word cpu_arm9tdmi_dcache_clean_area
.word cpu_arm9tdmi_switch_mm
.word 0 @ cpu_*_set_pte
+ .word 0
+ .word 0
+ .word 0
.size arm9tdmi_processor_functions, . - arm9tdmi_processor_functions
.section ".rodata"
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S
index 7c9ad621f0e6..fc2a4ae15cf4 100644
--- a/arch/arm/mm/proc-fa526.S
+++ b/arch/arm/mm/proc-fa526.S
@@ -195,6 +195,9 @@ fa526_processor_functions:
.word cpu_fa526_dcache_clean_area
.word cpu_fa526_switch_mm
.word cpu_fa526_set_pte_ext
+ .word 0
+ .word 0
+ .word 0
.size fa526_processor_functions, . - fa526_processor_functions
.section ".rodata"
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index b4597edbff97..d3883eed7a4a 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -554,6 +554,9 @@ feroceon_processor_functions:
.word cpu_feroceon_dcache_clean_area
.word cpu_feroceon_switch_mm
.word cpu_feroceon_set_pte_ext
+ .word 0
+ .word 0
+ .word 0
.size feroceon_processor_functions, . - feroceon_processor_functions
.section ".rodata"
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index 4458ee6aa713..9d4f2ae63370 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -388,6 +388,9 @@ mohawk_processor_functions:
.word cpu_mohawk_dcache_clean_area
.word cpu_mohawk_switch_mm
.word cpu_mohawk_set_pte_ext
+ .word 0
+ .word 0
+ .word 0
.size mohawk_processor_functions, . - mohawk_processor_functions
.section ".rodata"
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index 5aa8d59c2e85..46f09ed16b98 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -203,6 +203,9 @@ ENTRY(sa110_processor_functions)
.word cpu_sa110_dcache_clean_area
.word cpu_sa110_switch_mm
.word cpu_sa110_set_pte_ext
+ .word 0
+ .word 0
+ .word 0
.size sa110_processor_functions, . - sa110_processor_functions
.section ".rodata"
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 2ac4e6f10713..74483d1977fe 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -169,6 +169,42 @@ ENTRY(cpu_sa1100_set_pte_ext)
#endif
mov pc, lr
+.globl cpu_sa1100_suspend_size
+.equ cpu_sa1100_suspend_size, 4*4
+#ifdef CONFIG_PM
+ENTRY(cpu_sa1100_do_suspend)
+ stmfd sp!, {r4 - r7, lr}
+ mrc p15, 0, r4, c3, c0, 0 @ domain ID
+ mrc p15, 0, r5, c2, c0, 0 @ translation table base addr
+ mrc p15, 0, r6, c13, c0, 0 @ PID
+ mrc p15, 0, r7, c1, c0, 0 @ control reg
+ stmia r0, {r4 - r7} @ store cp regs
+ ldmfd sp!, {r4 - r7, pc}
+ENDPROC(cpu_sa1100_do_suspend)
+
+ENTRY(cpu_sa1100_do_resume)
+ ldmia r0, {r4 - r7} @ load cp regs
+ mov r1, #0
+ mcr p15, 0, r1, c8, c7, 0 @ flush I+D TLBs
+ mcr p15, 0, r1, c7, c7, 0 @ flush I&D cache
+ mcr p15, 0, r1, c9, c0, 0 @ invalidate RB
+ mcr p15, 0, r1, c9, c0, 5 @ allow user space to use RB
+
+ mcr p15, 0, r4, c3, c0, 0 @ domain ID
+ mcr p15, 0, r5, c2, c0, 0 @ translation table base addr
+ mcr p15, 0, r6, c13, c0, 0 @ PID
+ mov r0, r7 @ control register
+ mov r2, r5, lsr #14 @ get TTB0 base
+ mov r2, r2, lsl #14
+ ldr r3, =PMD_TYPE_SECT | PMD_SECT_BUFFERABLE | \
+ PMD_SECT_CACHEABLE | PMD_SECT_AP_WRITE
+ b cpu_resume_mmu
+ENDPROC(cpu_sa1100_do_resume)
+#else
+#define cpu_sa1100_do_suspend 0
+#define cpu_sa1100_do_resume 0
+#endif
+
__CPUINIT
.type __sa1100_setup, #function
@@ -218,6 +254,9 @@ ENTRY(sa1100_processor_functions)
.word cpu_sa1100_dcache_clean_area
.word cpu_sa1100_switch_mm
.word cpu_sa1100_set_pte_ext
+ .word cpu_sa1100_suspend_size
+ .word cpu_sa1100_do_suspend
+ .word cpu_sa1100_do_resume
.size sa1100_processor_functions, . - sa1100_processor_functions
.section ".rodata"
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 59a7e1ffe7bc..832b6bdc192c 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -121,6 +121,53 @@ ENTRY(cpu_v6_set_pte_ext)
#endif
mov pc, lr
+/* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */
+.globl cpu_v6_suspend_size
+.equ cpu_v6_suspend_size, 4 * 8
+#ifdef CONFIG_PM
+ENTRY(cpu_v6_do_suspend)
+ stmfd sp!, {r4 - r11, lr}
+ mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
+ mrc p15, 0, r5, c13, c0, 1 @ Context ID
+ mrc p15, 0, r6, c3, c0, 0 @ Domain ID
+ mrc p15, 0, r7, c2, c0, 0 @ Translation table base 0
+ mrc p15, 0, r8, c2, c0, 1 @ Translation table base 1
+ mrc p15, 0, r9, c1, c0, 1 @ auxillary control register
+ mrc p15, 0, r10, c1, c0, 2 @ co-processor access control
+ mrc p15, 0, r11, c1, c0, 0 @ control register
+ stmia r0, {r4 - r11}
+ ldmfd sp!, {r4- r11, pc}
+ENDPROC(cpu_v6_do_suspend)
+
+ENTRY(cpu_v6_do_resume)
+ mov ip, #0
+ mcr p15, 0, ip, c7, c14, 0 @ clean+invalidate D cache
+ mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
+ mcr p15, 0, ip, c7, c15, 0 @ clean+invalidate cache
+ mcr p15, 0, ip, c7, c10, 4 @ drain write buffer
+ ldmia r0, {r4 - r11}
+ mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID
+ mcr p15, 0, r5, c13, c0, 1 @ Context ID
+ mcr p15, 0, r6, c3, c0, 0 @ Domain ID
+ mcr p15, 0, r7, c2, c0, 0 @ Translation table base 0
+ mcr p15, 0, r8, c2, c0, 1 @ Translation table base 1
+ mcr p15, 0, r9, c1, c0, 1 @ auxillary control register
+ mcr p15, 0, r10, c1, c0, 2 @ co-processor access control
+ mcr p15, 0, ip, c2, c0, 2 @ TTB control register
+ mcr p15, 0, ip, c7, c5, 4 @ ISB
+ mov r0, r11 @ control register
+ mov r2, r7, lsr #14 @ get TTB0 base
+ mov r2, r2, lsl #14
+ ldr r3, cpu_resume_l1_flags
+ b cpu_resume_mmu
+ENDPROC(cpu_v6_do_resume)
+cpu_resume_l1_flags:
+ ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_SMP)
+ ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_UP)
+#else
+#define cpu_v6_do_suspend 0
+#define cpu_v6_do_resume 0
+#endif
.type cpu_v6_name, #object
@@ -206,6 +253,9 @@ ENTRY(v6_processor_functions)
.word cpu_v6_dcache_clean_area
.word cpu_v6_switch_mm
.word cpu_v6_set_pte_ext
+ .word cpu_v6_suspend_size
+ .word cpu_v6_do_suspend
+ .word cpu_v6_do_resume
.size v6_processor_functions, . - v6_processor_functions
.section ".rodata"
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 0c1172b56b4e..860a8ad79ac8 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -171,6 +171,87 @@ cpu_v7_name:
.ascii "ARMv7 Processor"
.align
+ /*
+ * Memory region attributes with SCTLR.TRE=1
+ *
+ * n = TEX[0],C,B
+ * TR = PRRR[2n+1:2n] - memory type
+ * IR = NMRR[2n+1:2n] - inner cacheable property
+ * OR = NMRR[2n+17:2n+16] - outer cacheable property
+ *
+ * n TR IR OR
+ * UNCACHED 000 00
+ * BUFFERABLE 001 10 00 00
+ * WRITETHROUGH 010 10 10 10
+ * WRITEBACK 011 10 11 11
+ * reserved 110
+ * WRITEALLOC 111 10 01 01
+ * DEV_SHARED 100 01
+ * DEV_NONSHARED 100 01
+ * DEV_WC 001 10
+ * DEV_CACHED 011 10
+ *
+ * Other attributes:
+ *
+ * DS0 = PRRR[16] = 0 - device shareable property
+ * DS1 = PRRR[17] = 1 - device shareable property
+ * NS0 = PRRR[18] = 0 - normal shareable property
+ * NS1 = PRRR[19] = 1 - normal shareable property
+ * NOS = PRRR[24+n] = 1 - not outer shareable
+ */
+.equ PRRR, 0xff0a81a8
+.equ NMRR, 0x40e040e0
+
+/* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */
+.globl cpu_v7_suspend_size
+.equ cpu_v7_suspend_size, 4 * 8
+#ifdef CONFIG_PM
+ENTRY(cpu_v7_do_suspend)
+ stmfd sp!, {r4 - r11, lr}
+ mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
+ mrc p15, 0, r5, c13, c0, 1 @ Context ID
+ mrc p15, 0, r6, c3, c0, 0 @ Domain ID
+ mrc p15, 0, r7, c2, c0, 0 @ TTB 0
+ mrc p15, 0, r8, c2, c0, 1 @ TTB 1
+ mrc p15, 0, r9, c1, c0, 0 @ Control register
+ mrc p15, 0, r10, c1, c0, 1 @ Auxiliary control register
+ mrc p15, 0, r11, c1, c0, 2 @ Co-processor access control
+ stmia r0, {r4 - r11}
+ ldmfd sp!, {r4 - r11, pc}
+ENDPROC(cpu_v7_do_suspend)
+
+ENTRY(cpu_v7_do_resume)
+ mov ip, #0
+ mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs
+ mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
+ ldmia r0, {r4 - r11}
+ mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID
+ mcr p15, 0, r5, c13, c0, 1 @ Context ID
+ mcr p15, 0, r6, c3, c0, 0 @ Domain ID
+ mcr p15, 0, r7, c2, c0, 0 @ TTB 0
+ mcr p15, 0, r8, c2, c0, 1 @ TTB 1
+ mcr p15, 0, ip, c2, c0, 2 @ TTB control register
+ mcr p15, 0, r10, c1, c0, 1 @ Auxillary control register
+ mcr p15, 0, r11, c1, c0, 2 @ Co-processor access control
+ ldr r4, =PRRR @ PRRR
+ ldr r5, =NMRR @ NMRR
+ mcr p15, 0, r4, c10, c2, 0 @ write PRRR
+ mcr p15, 0, r5, c10, c2, 1 @ write NMRR
+ isb
+ mov r0, r9 @ control register
+ mov r2, r7, lsr #14 @ get TTB0 base
+ mov r2, r2, lsl #14
+ ldr r3, cpu_resume_l1_flags
+ b cpu_resume_mmu
+ENDPROC(cpu_v7_do_resume)
+cpu_resume_l1_flags:
+ ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_SMP)
+ ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_UP)
+#else
+#define cpu_v7_do_suspend 0
+#define cpu_v7_do_resume 0
+#endif
+
__CPUINIT
/*
@@ -264,6 +345,12 @@ __v7_setup:
orreq r10, r10, #1 << 6 @ set bit #6
mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
#endif
+#ifdef CONFIG_ARM_ERRATA_751472
+ cmp r6, #0x30 @ present prior to r3p0
+ mrclt p15, 0, r10, c15, c0, 1 @ read diagnostic register
+ orrlt r10, r10, #1 << 11 @ set bit #11
+ mcrlt p15, 0, r10, c15, c0, 1 @ write diagnostic register
+#endif
3: mov r10, #0
#ifdef HARVARD_CACHE
@@ -276,36 +363,8 @@ __v7_setup:
ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
mcr p15, 0, r4, c2, c0, 1 @ load TTB1
- /*
- * Memory region attributes with SCTLR.TRE=1
- *
- * n = TEX[0],C,B
- * TR = PRRR[2n+1:2n] - memory type
- * IR = NMRR[2n+1:2n] - inner cacheable property
- * OR = NMRR[2n+17:2n+16] - outer cacheable property
- *
- * n TR IR OR
- * UNCACHED 000 00
- * BUFFERABLE 001 10 00 00
- * WRITETHROUGH 010 10 10 10
- * WRITEBACK 011 10 11 11
- * reserved 110
- * WRITEALLOC 111 10 01 01
- * DEV_SHARED 100 01
- * DEV_NONSHARED 100 01
- * DEV_WC 001 10
- * DEV_CACHED 011 10
- *
- * Other attributes:
- *
- * DS0 = PRRR[16] = 0 - device shareable property
- * DS1 = PRRR[17] = 1 - device shareable property
- * NS0 = PRRR[18] = 0 - normal shareable property
- * NS1 = PRRR[19] = 1 - normal shareable property
- * NOS = PRRR[24+n] = 1 - not outer shareable
- */
- ldr r5, =0xff0a81a8 @ PRRR
- ldr r6, =0x40e040e0 @ NMRR
+ ldr r5, =PRRR @ PRRR
+ ldr r6, =NMRR @ NMRR
mcr p15, 0, r5, c10, c2, 0 @ write PRRR
mcr p15, 0, r6, c10, c2, 1 @ write NMRR
#endif
@@ -351,6 +410,9 @@ ENTRY(v7_processor_functions)
.word cpu_v7_dcache_clean_area
.word cpu_v7_switch_mm
.word cpu_v7_set_pte_ext
+ .word 0
+ .word 0
+ .word 0
.size v7_processor_functions, . - v7_processor_functions
.section ".rodata"
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index ec26355cb7c2..63d8b2044e84 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -413,9 +413,52 @@ ENTRY(cpu_xsc3_set_pte_ext)
mov pc, lr
.ltorg
-
.align
+.globl cpu_xsc3_suspend_size
+.equ cpu_xsc3_suspend_size, 4 * 8
+#ifdef CONFIG_PM
+ENTRY(cpu_xsc3_do_suspend)
+ stmfd sp!, {r4 - r10, lr}
+ mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
+ mrc p15, 0, r5, c15, c1, 0 @ CP access reg
+ mrc p15, 0, r6, c13, c0, 0 @ PID
+ mrc p15, 0, r7, c3, c0, 0 @ domain ID
+ mrc p15, 0, r8, c2, c0, 0 @ translation table base addr
+ mrc p15, 0, r9, c1, c0, 1 @ auxiliary control reg
+ mrc p15, 0, r10, c1, c0, 0 @ control reg
+ bic r4, r4, #2 @ clear frequency change bit
+ stmia r0, {r1, r4 - r10} @ store v:p offset + cp regs
+ ldmia sp!, {r4 - r10, pc}
+ENDPROC(cpu_xsc3_do_suspend)
+
+ENTRY(cpu_xsc3_do_resume)
+ ldmia r0, {r1, r4 - r10} @ load v:p offset + cp regs
+ mov ip, #0
+ mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB
+ mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer
+ mcr p15, 0, ip, c7, c5, 4 @ flush prefetch buffer
+ mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
+ mcr p14, 0, r4, c6, c0, 0 @ clock configuration, turbo mode.
+ mcr p15, 0, r5, c15, c1, 0 @ CP access reg
+ mcr p15, 0, r6, c13, c0, 0 @ PID
+ mcr p15, 0, r7, c3, c0, 0 @ domain ID
+ mcr p15, 0, r8, c2, c0, 0 @ translation table base addr
+ mcr p15, 0, r9, c1, c0, 1 @ auxiliary control reg
+
+ @ temporarily map resume_turn_on_mmu into the page table,
+ @ otherwise prefetch abort occurs after MMU is turned on
+ mov r0, r10 @ control register
+ mov r2, r8, lsr #14 @ get TTB0 base
+ mov r2, r2, lsl #14
+ ldr r3, =0x542e @ section flags
+ b cpu_resume_mmu
+ENDPROC(cpu_xsc3_do_resume)
+#else
+#define cpu_xsc3_do_suspend 0
+#define cpu_xsc3_do_resume 0
+#endif
+
__CPUINIT
.type __xsc3_setup, #function
@@ -476,6 +519,9 @@ ENTRY(xsc3_processor_functions)
.word cpu_xsc3_dcache_clean_area
.word cpu_xsc3_switch_mm
.word cpu_xsc3_set_pte_ext
+ .word cpu_xsc3_suspend_size
+ .word cpu_xsc3_do_suspend
+ .word cpu_xsc3_do_resume
.size xsc3_processor_functions, . - xsc3_processor_functions
.section ".rodata"
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 5a37c5e45c41..086038cd86ab 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -513,11 +513,49 @@ ENTRY(cpu_xscale_set_pte_ext)
xscale_set_pte_ext_epilogue
mov pc, lr
-
.ltorg
-
.align
+.globl cpu_xscale_suspend_size
+.equ cpu_xscale_suspend_size, 4 * 7
+#ifdef CONFIG_PM
+ENTRY(cpu_xscale_do_suspend)
+ stmfd sp!, {r4 - r10, lr}
+ mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
+ mrc p15, 0, r5, c15, c1, 0 @ CP access reg
+ mrc p15, 0, r6, c13, c0, 0 @ PID
+ mrc p15, 0, r7, c3, c0, 0 @ domain ID
+ mrc p15, 0, r8, c2, c0, 0 @ translation table base addr
+ mrc p15, 0, r9, c1, c1, 0 @ auxiliary control reg
+ mrc p15, 0, r10, c1, c0, 0 @ control reg
+ bic r4, r4, #2 @ clear frequency change bit
+ stmia r0, {r4 - r10} @ store cp regs
+ ldmfd sp!, {r4 - r10, pc}
+ENDPROC(cpu_xscale_do_suspend)
+
+ENTRY(cpu_xscale_do_resume)
+ ldmia r0, {r4 - r10} @ load cp regs
+ mov ip, #0
+ mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
+ mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB
+ mcr p14, 0, r4, c6, c0, 0 @ clock configuration, turbo mode.
+ mcr p15, 0, r5, c15, c1, 0 @ CP access reg
+ mcr p15, 0, r6, c13, c0, 0 @ PID
+ mcr p15, 0, r7, c3, c0, 0 @ domain ID
+ mcr p15, 0, r8, c2, c0, 0 @ translation table base addr
+ mcr p15, 0, r9, c1, c1, 0 @ auxiliary control reg
+ mov r0, r10 @ control register
+ mov r2, r8, lsr #14 @ get TTB0 base
+ mov r2, r2, lsl #14
+ ldr r3, =PMD_TYPE_SECT | PMD_SECT_BUFFERABLE | \
+ PMD_SECT_CACHEABLE | PMD_SECT_AP_WRITE
+ b cpu_resume_mmu
+ENDPROC(cpu_xscale_do_resume)
+#else
+#define cpu_xscale_do_suspend 0
+#define cpu_xscale_do_resume 0
+#endif
+
__CPUINIT
.type __xscale_setup, #function
@@ -565,6 +603,9 @@ ENTRY(xscale_processor_functions)
.word cpu_xscale_dcache_clean_area
.word cpu_xscale_switch_mm
.word cpu_xscale_set_pte_ext
+ .word cpu_xscale_suspend_size
+ .word cpu_xscale_do_suspend
+ .word cpu_xscale_do_resume
.size xscale_processor_functions, . - xscale_processor_functions
.section ".rodata"
diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c
index 935993e1b1ef..036fdbfdd62f 100644
--- a/arch/arm/mm/vmregion.c
+++ b/arch/arm/mm/vmregion.c
@@ -38,7 +38,7 @@ struct arm_vmregion *
arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align,
size_t size, gfp_t gfp)
{
- unsigned long addr = head->vm_start, end = head->vm_end - size;
+ unsigned long start = head->vm_start, addr = head->vm_end;
unsigned long flags;
struct arm_vmregion *c, *new;
@@ -54,21 +54,20 @@ arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align,
spin_lock_irqsave(&head->vm_lock, flags);
- list_for_each_entry(c, &head->vm_list, vm_list) {
- if ((addr + size) < addr)
- goto nospc;
- if ((addr + size) <= c->vm_start)
+ addr = rounddown(addr - size, align);
+ list_for_each_entry_reverse(c, &head->vm_list, vm_list) {
+ if (addr >= c->vm_end)
goto found;
- addr = ALIGN(c->vm_end, align);
- if (addr > end)
+ addr = rounddown(c->vm_start - size, align);
+ if (addr < start)
goto nospc;
}
found:
/*
- * Insert this entry _before_ the one we found.
+ * Insert this entry after the one we found.
*/
- list_add_tail(&new->vm_list, &c->vm_list);
+ list_add(&new->vm_list, &c->vm_list);
new->vm_start = addr;
new->vm_end = addr + size;
new->vm_active = 1;
diff --git a/arch/arm/plat-mxc/Makefile b/arch/arm/plat-mxc/Makefile
index 5fd20e96876c..a1387875a491 100644
--- a/arch/arm/plat-mxc/Makefile
+++ b/arch/arm/plat-mxc/Makefile
@@ -13,7 +13,6 @@ obj-$(CONFIG_IMX_HAVE_IOMUX_V1) += iomux-v1.o
obj-$(CONFIG_ARCH_MXC_IOMUX_V3) += iomux-v3.o
obj-$(CONFIG_IRAM_ALLOC) += iram_alloc.o
obj-$(CONFIG_MXC_PWM) += pwm.o
-obj-$(CONFIG_USB_EHCI_MXC) += ehci.o
obj-$(CONFIG_MXC_ULPI) += ulpi.o
obj-$(CONFIG_MXC_USE_EPIT) += epit.o
obj-$(CONFIG_ARCH_MXC_AUDMUX_V1) += audmux-v1.o
diff --git a/arch/arm/plat-mxc/devices.c b/arch/arm/plat-mxc/devices.c
index e9bcefe79a43..eee1b6096a08 100644
--- a/arch/arm/plat-mxc/devices.c
+++ b/arch/arm/plat-mxc/devices.c
@@ -81,6 +81,8 @@ struct platform_device *__init imx_add_platform_device_dmamask(
ret = platform_device_add(pdev);
if (ret) {
err:
+ if (dmamask)
+ kfree(pdev->dev.dma_mask);
platform_device_put(pdev);
return ERR_PTR(ret);
}
diff --git a/arch/arm/plat-mxc/devices/platform-fec.c b/arch/arm/plat-mxc/devices/platform-fec.c
index b50c3517d083..6561c9df5f0d 100644
--- a/arch/arm/plat-mxc/devices/platform-fec.c
+++ b/arch/arm/plat-mxc/devices/platform-fec.c
@@ -31,6 +31,11 @@ const struct imx_fec_data imx35_fec_data __initconst =
imx_fec_data_entry_single(MX35);
#endif
+#ifdef CONFIG_SOC_IMX50
+const struct imx_fec_data imx50_fec_data __initconst =
+ imx_fec_data_entry_single(MX50);
+#endif
+
#ifdef CONFIG_SOC_IMX51
const struct imx_fec_data imx51_fec_data __initconst =
imx_fec_data_entry_single(MX51);
@@ -57,7 +62,7 @@ struct platform_device *__init imx_add_fec(
},
};
- return imx_add_platform_device("fec", 0 /* -1? */,
+ return imx_add_platform_device_dmamask("fec", 0,
res, ARRAY_SIZE(res),
- pdata, sizeof(*pdata));
+ pdata, sizeof(*pdata), DMA_BIT_MASK(32));
}
diff --git a/arch/arm/plat-mxc/devices/platform-imx-i2c.c b/arch/arm/plat-mxc/devices/platform-imx-i2c.c
index 7ba94e1bbda3..2ab74f0da9a6 100644
--- a/arch/arm/plat-mxc/devices/platform-imx-i2c.c
+++ b/arch/arm/plat-mxc/devices/platform-imx-i2c.c
@@ -69,6 +69,16 @@ const struct imx_imx_i2c_data imx35_imx_i2c_data[] __initconst = {
};
#endif /* ifdef CONFIG_SOC_IMX35 */
+#ifdef CONFIG_SOC_IMX50
+const struct imx_imx_i2c_data imx50_imx_i2c_data[] __initconst = {
+#define imx50_imx_i2c_data_entry(_id, _hwid) \
+ imx_imx_i2c_data_entry(MX50, _id, _hwid, SZ_4K)
+ imx50_imx_i2c_data_entry(0, 1),
+ imx50_imx_i2c_data_entry(1, 2),
+ imx50_imx_i2c_data_entry(2, 3),
+};
+#endif /* ifdef CONFIG_SOC_IMX51 */
+
#ifdef CONFIG_SOC_IMX51
const struct imx_imx_i2c_data imx51_imx_i2c_data[] __initconst = {
#define imx51_imx_i2c_data_entry(_id, _hwid) \
diff --git a/arch/arm/plat-mxc/devices/platform-imx2-wdt.c b/arch/arm/plat-mxc/devices/platform-imx2-wdt.c
index e0aec61177f4..5e07ef2bf1c4 100644
--- a/arch/arm/plat-mxc/devices/platform-imx2-wdt.c
+++ b/arch/arm/plat-mxc/devices/platform-imx2-wdt.c
@@ -53,6 +53,15 @@ const struct imx_imx2_wdt_data imx51_imx2_wdt_data[] __initconst = {
};
#endif /* ifdef CONFIG_SOC_IMX51 */
+#ifdef CONFIG_SOC_IMX53
+const struct imx_imx2_wdt_data imx53_imx2_wdt_data[] __initconst = {
+#define imx53_imx2_wdt_data_entry(_id, _hwid) \
+ imx_imx2_wdt_data_entry(MX53, _id, _hwid, SZ_16K)
+ imx53_imx2_wdt_data_entry(0, 1),
+ imx53_imx2_wdt_data_entry(1, 2),
+};
+#endif /* ifdef CONFIG_SOC_IMX53 */
+
struct platform_device *__init imx_add_imx2_wdt(
const struct imx_imx2_wdt_data *data)
{
diff --git a/arch/arm/plat-mxc/devices/platform-spi_imx.c b/arch/arm/plat-mxc/devices/platform-spi_imx.c
index 013c85f20b58..f4a60ab6763b 100644
--- a/arch/arm/plat-mxc/devices/platform-spi_imx.c
+++ b/arch/arm/plat-mxc/devices/platform-spi_imx.c
@@ -21,6 +21,15 @@
#define imx_spi_imx_data_entry(soc, type, devid, id, hwid, size) \
[id] = imx_spi_imx_data_entry_single(soc, type, devid, id, hwid, size)
+#ifdef CONFIG_SOC_IMX1
+const struct imx_spi_imx_data imx1_cspi_data[] __initconst = {
+#define imx1_cspi_data_entry(_id, _hwid) \
+ imx_spi_imx_data_entry(MX1, CSPI, "imx1-cspi", _id, _hwid, SZ_4K)
+ imx1_cspi_data_entry(0, 1),
+ imx1_cspi_data_entry(1, 2),
+};
+#endif
+
#ifdef CONFIG_SOC_IMX21
const struct imx_spi_imx_data imx21_cspi_data[] __initconst = {
#define imx21_cspi_data_entry(_id, _hwid) \
diff --git a/arch/arm/plat-mxc/ehci.c b/arch/arm/plat-mxc/ehci.c
deleted file mode 100644
index 8772ce346a58..000000000000
--- a/arch/arm/plat-mxc/ehci.c
+++ /dev/null
@@ -1,369 +0,0 @@
-/*
- * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
- * Copyright (C) 2010 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- */
-
-#include <linux/platform_device.h>
-#include <linux/io.h>
-
-#include <mach/hardware.h>
-#include <mach/mxc_ehci.h>
-
-#define USBCTRL_OTGBASE_OFFSET 0x600
-
-#define MX31_OTG_SIC_SHIFT 29
-#define MX31_OTG_SIC_MASK (0x3 << MX31_OTG_SIC_SHIFT)
-#define MX31_OTG_PM_BIT (1 << 24)
-
-#define MX31_H2_SIC_SHIFT 21
-#define MX31_H2_SIC_MASK (0x3 << MX31_H2_SIC_SHIFT)
-#define MX31_H2_PM_BIT (1 << 16)
-#define MX31_H2_DT_BIT (1 << 5)
-
-#define MX31_H1_SIC_SHIFT 13
-#define MX31_H1_SIC_MASK (0x3 << MX31_H1_SIC_SHIFT)
-#define MX31_H1_PM_BIT (1 << 8)
-#define MX31_H1_DT_BIT (1 << 4)
-
-#define MX35_OTG_SIC_SHIFT 29
-#define MX35_OTG_SIC_MASK (0x3 << MX35_OTG_SIC_SHIFT)
-#define MX35_OTG_PM_BIT (1 << 24)
-
-#define MX35_H1_SIC_SHIFT 21
-#define MX35_H1_SIC_MASK (0x3 << MX35_H1_SIC_SHIFT)
-#define MX35_H1_PM_BIT (1 << 8)
-#define MX35_H1_IPPUE_UP_BIT (1 << 7)
-#define MX35_H1_IPPUE_DOWN_BIT (1 << 6)
-#define MX35_H1_TLL_BIT (1 << 5)
-#define MX35_H1_USBTE_BIT (1 << 4)
-
-#define MXC_OTG_OFFSET 0
-#define MXC_H1_OFFSET 0x200
-#define MXC_H2_OFFSET 0x400
-
-/* USB_CTRL */
-#define MXC_OTG_UCTRL_OWIE_BIT (1 << 27) /* OTG wakeup intr enable */
-#define MXC_OTG_UCTRL_OPM_BIT (1 << 24) /* OTG power mask */
-#define MXC_H1_UCTRL_H1UIE_BIT (1 << 12) /* Host1 ULPI interrupt enable */
-#define MXC_H1_UCTRL_H1WIE_BIT (1 << 11) /* HOST1 wakeup intr enable */
-#define MXC_H1_UCTRL_H1PM_BIT (1 << 8) /* HOST1 power mask */
-
-/* USB_PHY_CTRL_FUNC */
-#define MXC_OTG_PHYCTRL_OC_DIS_BIT (1 << 8) /* OTG Disable Overcurrent Event */
-#define MXC_H1_OC_DIS_BIT (1 << 5) /* UH1 Disable Overcurrent Event */
-
-/* USBH2CTRL */
-#define MXC_H2_UCTRL_H2UIE_BIT (1 << 8)
-#define MXC_H2_UCTRL_H2WIE_BIT (1 << 7)
-#define MXC_H2_UCTRL_H2PM_BIT (1 << 4)
-
-#define MXC_USBCMD_OFFSET 0x140
-
-/* USBCMD */
-#define MXC_UCMD_ITC_NO_THRESHOLD_MASK (~(0xff << 16)) /* Interrupt Threshold Control */
-
-int mxc_initialize_usb_hw(int port, unsigned int flags)
-{
- unsigned int v;
-#if defined(CONFIG_SOC_IMX25)
- if (cpu_is_mx25()) {
- v = readl(MX25_IO_ADDRESS(MX25_USB_BASE_ADDR +
- USBCTRL_OTGBASE_OFFSET));
-
- switch (port) {
- case 0: /* OTG port */
- v &= ~(MX35_OTG_SIC_MASK | MX35_OTG_PM_BIT);
- v |= (flags & MXC_EHCI_INTERFACE_MASK)
- << MX35_OTG_SIC_SHIFT;
- if (!(flags & MXC_EHCI_POWER_PINS_ENABLED))
- v |= MX35_OTG_PM_BIT;
-
- break;
- case 1: /* H1 port */
- v &= ~(MX35_H1_SIC_MASK | MX35_H1_PM_BIT | MX35_H1_TLL_BIT |
- MX35_H1_USBTE_BIT | MX35_H1_IPPUE_DOWN_BIT | MX35_H1_IPPUE_UP_BIT);
- v |= (flags & MXC_EHCI_INTERFACE_MASK)
- << MX35_H1_SIC_SHIFT;
- if (!(flags & MXC_EHCI_POWER_PINS_ENABLED))
- v |= MX35_H1_PM_BIT;
-
- if (!(flags & MXC_EHCI_TTL_ENABLED))
- v |= MX35_H1_TLL_BIT;
-
- if (flags & MXC_EHCI_INTERNAL_PHY)
- v |= MX35_H1_USBTE_BIT;
-
- if (flags & MXC_EHCI_IPPUE_DOWN)
- v |= MX35_H1_IPPUE_DOWN_BIT;
-
- if (flags & MXC_EHCI_IPPUE_UP)
- v |= MX35_H1_IPPUE_UP_BIT;
-
- break;
- default:
- return -EINVAL;
- }
-
- writel(v, MX25_IO_ADDRESS(MX25_USB_BASE_ADDR +
- USBCTRL_OTGBASE_OFFSET));
- return 0;
- }
-#endif /* if defined(CONFIG_SOC_IMX25) */
-#if defined(CONFIG_ARCH_MX3)
- if (cpu_is_mx31()) {
- v = readl(MX31_IO_ADDRESS(MX31_USB_BASE_ADDR +
- USBCTRL_OTGBASE_OFFSET));
-
- switch (port) {
- case 0: /* OTG port */
- v &= ~(MX31_OTG_SIC_MASK | MX31_OTG_PM_BIT);
- v |= (flags & MXC_EHCI_INTERFACE_MASK)
- << MX31_OTG_SIC_SHIFT;
- if (!(flags & MXC_EHCI_POWER_PINS_ENABLED))
- v |= MX31_OTG_PM_BIT;
-
- break;
- case 1: /* H1 port */
- v &= ~(MX31_H1_SIC_MASK | MX31_H1_PM_BIT | MX31_H1_DT_BIT);
- v |= (flags & MXC_EHCI_INTERFACE_MASK)
- << MX31_H1_SIC_SHIFT;
- if (!(flags & MXC_EHCI_POWER_PINS_ENABLED))
- v |= MX31_H1_PM_BIT;
-
- if (!(flags & MXC_EHCI_TTL_ENABLED))
- v |= MX31_H1_DT_BIT;
-
- break;
- case 2: /* H2 port */
- v &= ~(MX31_H2_SIC_MASK | MX31_H2_PM_BIT | MX31_H2_DT_BIT);
- v |= (flags & MXC_EHCI_INTERFACE_MASK)
- << MX31_H2_SIC_SHIFT;
- if (!(flags & MXC_EHCI_POWER_PINS_ENABLED))
- v |= MX31_H2_PM_BIT;
-
- if (!(flags & MXC_EHCI_TTL_ENABLED))
- v |= MX31_H2_DT_BIT;
-
- break;
- default:
- return -EINVAL;
- }
-
- writel(v, MX31_IO_ADDRESS(MX31_USB_BASE_ADDR +
- USBCTRL_OTGBASE_OFFSET));
- return 0;
- }
-
- if (cpu_is_mx35()) {
- v = readl(MX35_IO_ADDRESS(MX35_USB_BASE_ADDR +
- USBCTRL_OTGBASE_OFFSET));
-
- switch (port) {
- case 0: /* OTG port */
- v &= ~(MX35_OTG_SIC_MASK | MX35_OTG_PM_BIT);
- v |= (flags & MXC_EHCI_INTERFACE_MASK)
- << MX35_OTG_SIC_SHIFT;
- if (!(flags & MXC_EHCI_POWER_PINS_ENABLED))
- v |= MX35_OTG_PM_BIT;
-
- break;
- case 1: /* H1 port */
- v &= ~(MX35_H1_SIC_MASK | MX35_H1_PM_BIT | MX35_H1_TLL_BIT |
- MX35_H1_USBTE_BIT | MX35_H1_IPPUE_DOWN_BIT | MX35_H1_IPPUE_UP_BIT);
- v |= (flags & MXC_EHCI_INTERFACE_MASK)
- << MX35_H1_SIC_SHIFT;
- if (!(flags & MXC_EHCI_POWER_PINS_ENABLED))
- v |= MX35_H1_PM_BIT;
-
- if (!(flags & MXC_EHCI_TTL_ENABLED))
- v |= MX35_H1_TLL_BIT;
-
- if (flags & MXC_EHCI_INTERNAL_PHY)
- v |= MX35_H1_USBTE_BIT;
-
- if (flags & MXC_EHCI_IPPUE_DOWN)
- v |= MX35_H1_IPPUE_DOWN_BIT;
-
- if (flags & MXC_EHCI_IPPUE_UP)
- v |= MX35_H1_IPPUE_UP_BIT;
-
- break;
- default:
- return -EINVAL;
- }
-
- writel(v, MX35_IO_ADDRESS(MX35_USB_BASE_ADDR +
- USBCTRL_OTGBASE_OFFSET));
- return 0;
- }
-#endif /* CONFIG_ARCH_MX3 */
-#ifdef CONFIG_MACH_MX27
- if (cpu_is_mx27()) {
- /* On i.MX27 we can use the i.MX31 USBCTRL bits, they
- * are identical
- */
- v = readl(MX27_IO_ADDRESS(MX27_USB_BASE_ADDR +
- USBCTRL_OTGBASE_OFFSET));
- switch (port) {
- case 0: /* OTG port */
- v &= ~(MX31_OTG_SIC_MASK | MX31_OTG_PM_BIT);
- v |= (flags & MXC_EHCI_INTERFACE_MASK)
- << MX31_OTG_SIC_SHIFT;
- if (!(flags & MXC_EHCI_POWER_PINS_ENABLED))
- v |= MX31_OTG_PM_BIT;
- break;
- case 1: /* H1 port */
- v &= ~(MX31_H1_SIC_MASK | MX31_H1_PM_BIT | MX31_H1_DT_BIT);
- v |= (flags & MXC_EHCI_INTERFACE_MASK)
- << MX31_H1_SIC_SHIFT;
- if (!(flags & MXC_EHCI_POWER_PINS_ENABLED))
- v |= MX31_H1_PM_BIT;
-
- if (!(flags & MXC_EHCI_TTL_ENABLED))
- v |= MX31_H1_DT_BIT;
-
- break;
- case 2: /* H2 port */
- v &= ~(MX31_H2_SIC_MASK | MX31_H2_PM_BIT | MX31_H2_DT_BIT);
- v |= (flags & MXC_EHCI_INTERFACE_MASK)
- << MX31_H2_SIC_SHIFT;
- if (!(flags & MXC_EHCI_POWER_PINS_ENABLED))
- v |= MX31_H2_PM_BIT;
-
- if (!(flags & MXC_EHCI_TTL_ENABLED))
- v |= MX31_H2_DT_BIT;
-
- break;
- default:
- return -EINVAL;
- }
- writel(v, MX27_IO_ADDRESS(MX27_USB_BASE_ADDR +
- USBCTRL_OTGBASE_OFFSET));
- return 0;
- }
-#endif /* CONFIG_MACH_MX27 */
-#ifdef CONFIG_SOC_IMX51
- if (cpu_is_mx51()) {
- void __iomem *usb_base;
- void __iomem *usbotg_base;
- void __iomem *usbother_base;
- int ret = 0;
-
- usb_base = ioremap(MX51_OTG_BASE_ADDR, SZ_4K);
- if (!usb_base) {
- printk(KERN_ERR "%s(): ioremap failed\n", __func__);
- return -ENOMEM;
- }
-
- switch (port) {
- case 0: /* OTG port */
- usbotg_base = usb_base + MXC_OTG_OFFSET;
- break;
- case 1: /* Host 1 port */
- usbotg_base = usb_base + MXC_H1_OFFSET;
- break;
- case 2: /* Host 2 port */
- usbotg_base = usb_base + MXC_H2_OFFSET;
- break;
- default:
- printk(KERN_ERR"%s no such port %d\n", __func__, port);
- ret = -ENOENT;
- goto error;
- }
- usbother_base = usb_base + MX5_USBOTHER_REGS_OFFSET;
-
- switch (port) {
- case 0: /*OTG port */
- if (flags & MXC_EHCI_INTERNAL_PHY) {
- v = __raw_readl(usbother_base + MXC_USB_PHY_CTR_FUNC_OFFSET);
-
- if (flags & MXC_EHCI_POWER_PINS_ENABLED) {
- /* OC/USBPWR is not used */
- v |= MXC_OTG_PHYCTRL_OC_DIS_BIT;
- } else {
- /* OC/USBPWR is used */
- v &= ~MXC_OTG_PHYCTRL_OC_DIS_BIT;
- }
- __raw_writel(v, usbother_base + MXC_USB_PHY_CTR_FUNC_OFFSET);
-
- v = __raw_readl(usbother_base + MXC_USBCTRL_OFFSET);
- if (flags & MXC_EHCI_WAKEUP_ENABLED)
- v |= MXC_OTG_UCTRL_OWIE_BIT;/* OTG wakeup enable */
- else
- v &= ~MXC_OTG_UCTRL_OWIE_BIT;/* OTG wakeup disable */
- if (flags & MXC_EHCI_POWER_PINS_ENABLED)
- v |= MXC_OTG_UCTRL_OPM_BIT;
- else
- v &= ~MXC_OTG_UCTRL_OPM_BIT;
- __raw_writel(v, usbother_base + MXC_USBCTRL_OFFSET);
- }
- break;
- case 1: /* Host 1 */
- /*Host ULPI */
- v = __raw_readl(usbother_base + MXC_USBCTRL_OFFSET);
- if (flags & MXC_EHCI_WAKEUP_ENABLED) {
- /* HOST1 wakeup/ULPI intr enable */
- v |= (MXC_H1_UCTRL_H1WIE_BIT | MXC_H1_UCTRL_H1UIE_BIT);
- } else {
- /* HOST1 wakeup/ULPI intr disable */
- v &= ~(MXC_H1_UCTRL_H1WIE_BIT | MXC_H1_UCTRL_H1UIE_BIT);
- }
-
- if (flags & MXC_EHCI_POWER_PINS_ENABLED)
- v &= ~MXC_H1_UCTRL_H1PM_BIT; /* HOST1 power mask used*/
- else
- v |= MXC_H1_UCTRL_H1PM_BIT; /* HOST1 power mask used*/
- __raw_writel(v, usbother_base + MXC_USBCTRL_OFFSET);
-
- v = __raw_readl(usbother_base + MXC_USB_PHY_CTR_FUNC_OFFSET);
- if (flags & MXC_EHCI_POWER_PINS_ENABLED)
- v &= ~MXC_H1_OC_DIS_BIT; /* OC is used */
- else
- v |= MXC_H1_OC_DIS_BIT; /* OC is not used */
- __raw_writel(v, usbother_base + MXC_USB_PHY_CTR_FUNC_OFFSET);
-
- v = __raw_readl(usbotg_base + MXC_USBCMD_OFFSET);
- if (flags & MXC_EHCI_ITC_NO_THRESHOLD)
- /* Interrupt Threshold Control:Immediate (no threshold) */
- v &= MXC_UCMD_ITC_NO_THRESHOLD_MASK;
- __raw_writel(v, usbotg_base + MXC_USBCMD_OFFSET);
- break;
- case 2: /* Host 2 ULPI */
- v = __raw_readl(usbother_base + MXC_USBH2CTRL_OFFSET);
- if (flags & MXC_EHCI_WAKEUP_ENABLED) {
- /* HOST1 wakeup/ULPI intr enable */
- v |= (MXC_H2_UCTRL_H2WIE_BIT | MXC_H2_UCTRL_H2UIE_BIT);
- } else {
- /* HOST1 wakeup/ULPI intr disable */
- v &= ~(MXC_H2_UCTRL_H2WIE_BIT | MXC_H2_UCTRL_H2UIE_BIT);
- }
-
- if (flags & MXC_EHCI_POWER_PINS_ENABLED)
- v &= ~MXC_H2_UCTRL_H2PM_BIT; /* HOST2 power mask used*/
- else
- v |= MXC_H2_UCTRL_H2PM_BIT; /* HOST2 power mask used*/
- __raw_writel(v, usbother_base + MXC_USBH2CTRL_OFFSET);
- break;
- }
-
-error:
- iounmap(usb_base);
- return ret;
- }
-#endif
- printk(KERN_WARNING
- "%s() unable to setup USBCONTROL for this CPU\n", __func__);
- return -EINVAL;
-}
-EXPORT_SYMBOL(mxc_initialize_usb_hw);
-
diff --git a/arch/arm/plat-mxc/gpio.c b/arch/arm/plat-mxc/gpio.c
index d17b3c996b84..e396df8faef8 100644
--- a/arch/arm/plat-mxc/gpio.c
+++ b/arch/arm/plat-mxc/gpio.c
@@ -233,6 +233,7 @@ static int gpio_set_wake_irq(struct irq_data *d, u32 enable)
}
static struct irq_chip gpio_irq_chip = {
+ .name = "GPIO",
.irq_ack = gpio_ack_irq,
.irq_mask = gpio_mask_irq,
.irq_unmask = gpio_unmask_irq,
diff --git a/arch/arm/plat-mxc/include/mach/common.h b/arch/arm/plat-mxc/include/mach/common.h
index aea2cd3b6d15..a22ebe11a602 100644
--- a/arch/arm/plat-mxc/include/mach/common.h
+++ b/arch/arm/plat-mxc/include/mach/common.h
@@ -24,6 +24,16 @@ extern void mx50_map_io(void);
extern void mx51_map_io(void);
extern void mx53_map_io(void);
extern void mxc91231_map_io(void);
+extern void imx1_init_early(void);
+extern void imx21_init_early(void);
+extern void imx25_init_early(void);
+extern void imx27_init_early(void);
+extern void imx31_init_early(void);
+extern void imx35_init_early(void);
+extern void imx50_init_early(void);
+extern void imx51_init_early(void);
+extern void imx53_init_early(void);
+extern void mxc91231_init_early(void);
extern void mxc_init_irq(void __iomem *);
extern void tzic_init_irq(void __iomem *);
extern void mx1_init_irq(void);
diff --git a/arch/arm/plat-mxc/include/mach/esdhc.h b/arch/arm/plat-mxc/include/mach/esdhc.h
index a48a9aaa56b1..86003f411755 100644
--- a/arch/arm/plat-mxc/include/mach/esdhc.h
+++ b/arch/arm/plat-mxc/include/mach/esdhc.h
@@ -10,7 +10,17 @@
#ifndef __ASM_ARCH_IMX_ESDHC_H
#define __ASM_ARCH_IMX_ESDHC_H
+/**
+ * struct esdhc_platform_data - optional platform data for esdhc on i.MX
+ *
+ * strongly recommended for i.MX25/35, not needed for other variants
+ *
+ * @wp_gpio: gpio for write_protect (-EINVAL if unused)
+ * @cd_gpio: gpio for card_detect interrupt (-EINVAL if unused)
+ */
+
struct esdhc_platform_data {
- unsigned int wp_gpio; /* write protect pin */
+ unsigned int wp_gpio;
+ unsigned int cd_gpio;
};
#endif /* __ASM_ARCH_IMX_ESDHC_H */
diff --git a/arch/arm/plat-mxc/include/mach/iomux-mx3.h b/arch/arm/plat-mxc/include/mach/iomux-mx3.h
index cbaed295a2bf..c92f0b1f216f 100644
--- a/arch/arm/plat-mxc/include/mach/iomux-mx3.h
+++ b/arch/arm/plat-mxc/include/mach/iomux-mx3.h
@@ -112,12 +112,12 @@ enum iomux_gp_func {
* - setups the iomux according to the configuration
* - if the pin is configured as a GPIO, we claim it through kernel gpiolib
*/
-int mxc_iomux_alloc_pin(const unsigned int pin, const char *label);
+int mxc_iomux_alloc_pin(unsigned int pin, const char *label);
/*
* setups mutliple pins
* convenient way to call the above function with tables
*/
-int mxc_iomux_setup_multiple_pins(unsigned int *pin_list, unsigned count,
+int mxc_iomux_setup_multiple_pins(const unsigned int *pin_list, unsigned count,
const char *label);
/*
@@ -126,12 +126,12 @@ int mxc_iomux_setup_multiple_pins(unsigned int *pin_list, unsigned count,
* - frees the GPIO if the pin was configured as GPIO
* - DOES NOT reconfigure the IOMUX in its reset state
*/
-void mxc_iomux_release_pin(const unsigned int pin);
+void mxc_iomux_release_pin(unsigned int pin);
/*
* releases multiple pins
* convenvient way to call the above function with tables
*/
-void mxc_iomux_release_multiple_pins(unsigned int *pin_list, int count);
+void mxc_iomux_release_multiple_pins(const unsigned int *pin_list, int count);
/*
* This function enables/disables the general purpose function for a particular
diff --git a/arch/arm/plat-mxc/include/mach/iomux-mx35.h b/arch/arm/plat-mxc/include/mach/iomux-mx35.h
index 2a24bae1b878..3117c18bbbd9 100644
--- a/arch/arm/plat-mxc/include/mach/iomux-mx35.h
+++ b/arch/arm/plat-mxc/include/mach/iomux-mx35.h
@@ -989,13 +989,13 @@
#define MX35_PAD_ATA_DATA2__IPU_DIAGB_9 IOMUX_PAD(0x6e8, 0x284, 6, 0x0, 0, NO_PAD_CTRL)
#define MX35_PAD_ATA_DATA2__ARM11P_TOP_TRACE_28 IOMUX_PAD(0x6e8, 0x284, 7, 0x0, 0, NO_PAD_CTRL)
-#define MX35_PAD_ATA_DATA3__ATA_DATA_3 IOMUX_PAD(0x6e8, 0x288, 0, 0x0, 0, NO_PAD_CTRL)
-#define MX35_PAD_ATA_DATA3__ESDHC3_CLK IOMUX_PAD(0x6e8, 0x288, 1, 0x814, 1, NO_PAD_CTRL)
-#define MX35_PAD_ATA_DATA3__USB_TOP_USBOTG_DATA_5 IOMUX_PAD(0x6e8, 0x288, 2, 0x9b8, 1, NO_PAD_CTRL)
-#define MX35_PAD_ATA_DATA3__CSPI2_SCLK IOMUX_PAD(0x6e8, 0x288, 4, 0x7e0, 2, NO_PAD_CTRL)
-#define MX35_PAD_ATA_DATA3__GPIO2_16 IOMUX_PAD(0x6e8, 0x288, 5, 0x884, 1, NO_PAD_CTRL)
-#define MX35_PAD_ATA_DATA3__IPU_DIAGB_10 IOMUX_PAD(0x6e8, 0x288, 6, 0x0, 0, NO_PAD_CTRL)
-#define MX35_PAD_ATA_DATA3__ARM11P_TOP_TRACE_29 IOMUX_PAD(0x6e8, 0x288, 7, 0x0, 0, NO_PAD_CTRL)
+#define MX35_PAD_ATA_DATA3__ATA_DATA_3 IOMUX_PAD(0x6ec, 0x288, 0, 0x0, 0, NO_PAD_CTRL)
+#define MX35_PAD_ATA_DATA3__ESDHC3_CLK IOMUX_PAD(0x6ec, 0x288, 1, 0x814, 1, NO_PAD_CTRL)
+#define MX35_PAD_ATA_DATA3__USB_TOP_USBOTG_DATA_5 IOMUX_PAD(0x6ec, 0x288, 2, 0x9b8, 1, NO_PAD_CTRL)
+#define MX35_PAD_ATA_DATA3__CSPI2_SCLK IOMUX_PAD(0x6ec, 0x288, 4, 0x7e0, 2, NO_PAD_CTRL)
+#define MX35_PAD_ATA_DATA3__GPIO2_16 IOMUX_PAD(0x6ec, 0x288, 5, 0x884, 1, NO_PAD_CTRL)
+#define MX35_PAD_ATA_DATA3__IPU_DIAGB_10 IOMUX_PAD(0x6ec, 0x288, 6, 0x0, 0, NO_PAD_CTRL)
+#define MX35_PAD_ATA_DATA3__ARM11P_TOP_TRACE_29 IOMUX_PAD(0x6ec, 0x288, 7, 0x0, 0, NO_PAD_CTRL)
#define MX35_PAD_ATA_DATA4__ATA_DATA_4 IOMUX_PAD(0x6f0, 0x28c, 0, 0x0, 0, NO_PAD_CTRL)
#define MX35_PAD_ATA_DATA4__ESDHC3_CMD IOMUX_PAD(0x6f0, 0x28c, 1, 0x818, 1, NO_PAD_CTRL)
diff --git a/arch/arm/plat-mxc/include/mach/iomux-mx50.h b/arch/arm/plat-mxc/include/mach/iomux-mx50.h
index 058a922ca147..98e7fd0b9083 100644
--- a/arch/arm/plat-mxc/include/mach/iomux-mx50.h
+++ b/arch/arm/plat-mxc/include/mach/iomux-mx50.h
@@ -86,7 +86,7 @@
#define MX50_PAD_I2C1_SCL__I2C1_SCL IOMUX_PAD(0x2EC, 0x40, IOMUX_CONFIG_SION, 0x0, 0, \
MX50_I2C_PAD_CTRL)
#define MX50_PAD_I2C1_SCL__GPIO_6_18 IOMUX_PAD(0x2EC, 0x40, 1, 0x0, 0, NO_PAD_CTRL)
-#define MX50_PAD_I2C1_SCL__UART2_TXD IOMUX_PAD(0x2EC, 0x40, 2, 0x7cc, 0, MX50_UART_PAD_CTRL)
+#define MX50_PAD_I2C1_SCL__UART2_TXD IOMUX_PAD(0x2EC, 0x40, 2, 0x0, 0, MX50_UART_PAD_CTRL)
#define MX50_PAD_I2C1_SDA__I2C1_SDA IOMUX_PAD(0x2F0, 0x44, IOMUX_CONFIG_SION, 0x0, 0, \
MX50_I2C_PAD_CTRL)
@@ -96,7 +96,7 @@
#define MX50_PAD_I2C2_SCL__I2C2_SCL IOMUX_PAD(0x2F4, 0x48, IOMUX_CONFIG_SION, 0x0, 0, \
MX50_I2C_PAD_CTRL)
#define MX50_PAD_I2C2_SCL__GPIO_6_20 IOMUX_PAD(0x2F4, 0x48, 1, 0x0, 0, NO_PAD_CTRL)
-#define MX50_PAD_I2C2_SCL__UART2_CTS IOMUX_PAD(0x2F4, 0x48, 2, 0x7c8, 0, MX50_UART_PAD_CTRL)
+#define MX50_PAD_I2C2_SCL__UART2_CTS IOMUX_PAD(0x2F4, 0x48, 2, 0x0, 0, MX50_UART_PAD_CTRL)
#define MX50_PAD_I2C2_SCL__DCDC_OK IOMUX_PAD(0x2F4, 0x48, 7, 0x0, 0, NO_PAD_CTRL)
#define MX50_PAD_I2C2_SDA__I2C2_SDA IOMUX_PAD(0x2F8, 0x4C, IOMUX_CONFIG_SION, 0x0, 0, \
@@ -172,7 +172,7 @@
#define MX50_PAD_SSI_RXFS__AUD3_RXFS IOMUX_PAD(0x328, 0x7C, 0, 0x0, 0, NO_PAD_CTRL)
#define MX50_PAD_SSI_RXFS__GPIO_6_4 IOMUX_PAD(0x328, 0x7C, 1, 0x0, 0, NO_PAD_CTRL)
-#define MX50_PAD_SSI_RXFS__UART5_TXD IOMUX_PAD(0x328, 0x7C, 2, 0x7e4, 0, MX50_UART_PAD_CTRL)
+#define MX50_PAD_SSI_RXFS__UART5_TXD IOMUX_PAD(0x328, 0x7C, 2, 0x0, 0, MX50_UART_PAD_CTRL)
#define MX50_PAD_SSI_RXFS__WEIM_D6 IOMUX_PAD(0x328, 0x7C, 3, 0x804, 0, NO_PAD_CTRL)
#define MX50_PAD_SSI_RXFS__CSPI_SS2 IOMUX_PAD(0x328, 0x7C, 4, 0x6f0, 0, MX50_CSPI_SS_PAD)
#define MX50_PAD_SSI_RXFS__FEC_COL IOMUX_PAD(0x328, 0x7C, 5, 0x770, 0, PAD_CTL_DSE_HIGH)
@@ -186,25 +186,25 @@
#define MX50_PAD_SSI_RXC__FEC_RX_CLK IOMUX_PAD(0x32C, 0x80, 5, 0x780, 0, NO_PAD_CTRL)
#define MX50_PAD_SSI_RXC__FEC_MDIO IOMUX_PAD(0x32C, 0x80, 6, 0x774, 1, MX50_FEC_PAD_CTRL)
-#define MX50_PAD_UART1_TXD__UART1_TXD IOMUX_PAD(0x330, 0x84, 0, 0x7c4, 0, MX50_UART_PAD_CTRL)
+#define MX50_PAD_UART1_TXD__UART1_TXD IOMUX_PAD(0x330, 0x84, 0, 0x0, 0, MX50_UART_PAD_CTRL)
#define MX50_PAD_UART1_TXD__GPIO_6_6 IOMUX_PAD(0x330, 0x84, 1, 0x0, 0, NO_PAD_CTRL)
#define MX50_PAD_UART1_RXD__UART1_RXD IOMUX_PAD(0x334, 0x88, 0, 0x7c4, 1, MX50_UART_PAD_CTRL)
#define MX50_PAD_UART1_RXD__GPIO_6_7 IOMUX_PAD(0x334, 0x88, 1, 0x0, 0, NO_PAD_CTRL)
-#define MX50_PAD_UART1_CTS__UART1_CTS IOMUX_PAD(0x338, 0x8C, 0, 0x7c0, 0, MX50_UART_PAD_CTRL)
+#define MX50_PAD_UART1_CTS__UART1_CTS IOMUX_PAD(0x338, 0x8C, 0, 0x0, 0, MX50_UART_PAD_CTRL)
#define MX50_PAD_UART1_CTS__GPIO_6_8 IOMUX_PAD(0x338, 0x8C, 1, 0x0, 0, NO_PAD_CTRL)
-#define MX50_PAD_UART1_CTS__UART5_TXD IOMUX_PAD(0x338, 0x8C, 2, 0x7e4, 2, MX50_UART_PAD_CTRL)
+#define MX50_PAD_UART1_CTS__UART5_TXD IOMUX_PAD(0x338, 0x8C, 2, 0x0, 0, MX50_UART_PAD_CTRL)
#define MX50_PAD_UART1_CTS__SD4_D4 IOMUX_PAD(0x338, 0x8C, 4, 0x760, 0, MX50_SD_PAD_CTRL)
#define MX50_PAD_UART1_CTS__SD4_CMD IOMUX_PAD(0x338, 0x8C, 5, 0x74c, 0, MX50_SD_PAD_CTRL)
#define MX50_PAD_UART1_RTS__UART1_RTS IOMUX_PAD(0x33C, 0x90, 0, 0x7c0, 1, MX50_UART_PAD_CTRL)
#define MX50_PAD_UART1_RTS__GPIO_6_9 IOMUX_PAD(0x33C, 0x90, 1, 0x0, 0, NO_PAD_CTRL)
#define MX50_PAD_UART1_RTS__UART5_RXD IOMUX_PAD(0x33C, 0x90, 2, 0x7e4, 3, MX50_UART_PAD_CTRL)
-#define MX50_PAD_UART1_RTS__SD4_D5 IOMUX_PAD(0x33C, 0x90, 4, 0x0, 1, MX50_SD_PAD_CTRL)
-#define MX50_PAD_UART1_RTS__SD4_CLK IOMUX_PAD(0x33C, 0x90, 5, 0x0, 1, MX50_SD_PAD_CTRL)
+#define MX50_PAD_UART1_RTS__SD4_D5 IOMUX_PAD(0x33C, 0x90, 4, 0x764, 0, MX50_SD_PAD_CTRL)
+#define MX50_PAD_UART1_RTS__SD4_CLK IOMUX_PAD(0x33C, 0x90, 5, 0x748, 0, MX50_SD_PAD_CTRL)
-#define MX50_PAD_UART2_TXD__UART2_TXD IOMUX_PAD(0x340, 0x94, 0, 0x7cc, 2, MX50_UART_PAD_CTRL)
+#define MX50_PAD_UART2_TXD__UART2_TXD IOMUX_PAD(0x340, 0x94, 0, 0x0, 0, MX50_UART_PAD_CTRL)
#define MX50_PAD_UART2_TXD__GPIO_6_10 IOMUX_PAD(0x340, 0x94, 1, 0x0, 0, NO_PAD_CTRL)
#define MX50_PAD_UART2_TXD__SD4_D6 IOMUX_PAD(0x340, 0x94, 4, 0x768, 0, MX50_SD_PAD_CTRL)
#define MX50_PAD_UART2_TXD__SD4_D4 IOMUX_PAD(0x340, 0x94, 5, 0x760, 1, MX50_SD_PAD_CTRL)
@@ -214,7 +214,7 @@
#define MX50_PAD_UART2_RXD__SD4_D7 IOMUX_PAD(0x344, 0x98, 4, 0x76c, 0, MX50_SD_PAD_CTRL)
#define MX50_PAD_UART2_RXD__SD4_D5 IOMUX_PAD(0x344, 0x98, 5, 0x764, 1, MX50_SD_PAD_CTRL)
-#define MX50_PAD_UART2_CTS__UART2_CTS IOMUX_PAD(0x348, 0x9C, 0, 0x7c8, 2, MX50_UART_PAD_CTRL)
+#define MX50_PAD_UART2_CTS__UART2_CTS IOMUX_PAD(0x348, 0x9C, 0, 0x0, 0, MX50_UART_PAD_CTRL)
#define MX50_PAD_UART2_CTS__GPIO_6_12 IOMUX_PAD(0x348, 0x9C, 1, 0x0, 0, NO_PAD_CTRL)
#define MX50_PAD_UART2_CTS__SD4_CMD IOMUX_PAD(0x348, 0x9C, 4, 0x74c, 1, MX50_SD_PAD_CTRL)
#define MX50_PAD_UART2_CTS__SD4_D6 IOMUX_PAD(0x348, 0x9C, 5, 0x768, 1, MX50_SD_PAD_CTRL)
@@ -224,7 +224,7 @@
#define MX50_PAD_UART2_RTS__SD4_CLK IOMUX_PAD(0x34C, 0xA0, 4, 0x748, 1, MX50_SD_PAD_CTRL)
#define MX50_PAD_UART2_RTS__SD4_D7 IOMUX_PAD(0x34C, 0xA0, 5, 0x76c, 1, MX50_SD_PAD_CTRL)
-#define MX50_PAD_UART3_TXD__UART3_TXD IOMUX_PAD(0x350, 0xA4, 0, 0x7d4, 0, MX50_UART_PAD_CTRL)
+#define MX50_PAD_UART3_TXD__UART3_TXD IOMUX_PAD(0x350, 0xA4, 0, 0x0, 0, MX50_UART_PAD_CTRL)
#define MX50_PAD_UART3_TXD__GPIO_6_14 IOMUX_PAD(0x350, 0xA4, 1, 0x0, 0, NO_PAD_CTRL)
#define MX50_PAD_UART3_TXD__SD1_D4 IOMUX_PAD(0x350, 0xA4, 3, 0x0, 0, MX50_SD_PAD_CTRL)
#define MX50_PAD_UART3_TXD__SD4_D0 IOMUX_PAD(0x350, 0xA4, 4, 0x750, 0, MX50_SD_PAD_CTRL)
@@ -238,9 +238,9 @@
#define MX50_PAD_UART3_RXD__SD2_CD IOMUX_PAD(0x354, 0xA8, 5, 0x740, 0, MX50_SD_PAD_CTRL)
#define MX50_PAD_UART3_RXD__WEIM_D13 IOMUX_PAD(0x354, 0xA8, 6, 0x820, 0, NO_PAD_CTRL)
-#define MX50_PAD_UART4_TXD__UART4_TXD IOMUX_PAD(0x358, 0xAC, 0, 0x7dc, 0, MX50_UART_PAD_CTRL)
+#define MX50_PAD_UART4_TXD__UART4_TXD IOMUX_PAD(0x358, 0xAC, 0, 0x0, 0, MX50_UART_PAD_CTRL)
#define MX50_PAD_UART4_TXD__GPIO_6_16 IOMUX_PAD(0x358, 0xAC, 1, 0x0, 0, NO_PAD_CTRL)
-#define MX50_PAD_UART4_TXD__UART3_CTS IOMUX_PAD(0x358, 0xAC, 2, 0x7d0, 0, MX50_UART_PAD_CTRL)
+#define MX50_PAD_UART4_TXD__UART3_CTS IOMUX_PAD(0x358, 0xAC, 2, 0x0, 0, MX50_UART_PAD_CTRL)
#define MX50_PAD_UART4_TXD__SD1_D6 IOMUX_PAD(0x358, 0xAC, 3, 0x0, 0, MX50_SD_PAD_CTRL)
#define MX50_PAD_UART4_TXD__SD4_D2 IOMUX_PAD(0x358, 0xAC, 4, 0x758, 0, MX50_SD_PAD_CTRL)
#define MX50_PAD_UART4_TXD__SD2_LCTL IOMUX_PAD(0x358, 0xAC, 5, 0x0, 0, MX50_SD_PAD_CTRL)
@@ -278,7 +278,7 @@
#define MX50_PAD_ECSPI1_MOSI__GPIO_4_13 IOMUX_PAD(0x374, 0xC8, 1, 0x0, 0, NO_PAD_CTRL)
#define MX50_PAD_ECSPI1_MOSI__CSPI_SS1 IOMUX_PAD(0x374, 0xC8, 2, 0x6ec, 1, MX50_CSPI_SS_PAD)
#define MX50_PAD_ECSPI1_MOSI__ECSPI2_SS1 IOMUX_PAD(0x374, 0xC8, 3, 0x0, 0, MX50_CSPI_SS_PAD)
-#define MX50_PAD_ECSPI1_MOSI__UART3_CTS IOMUX_PAD(0x374, 0xC8, 4, 0x7d0, 3, MX50_UART_PAD_CTRL)
+#define MX50_PAD_ECSPI1_MOSI__UART3_CTS IOMUX_PAD(0x374, 0xC8, 4, 0x0, 0, MX50_UART_PAD_CTRL)
#define MX50_PAD_ECSPI1_MOSI__EPDC_SDCE7 IOMUX_PAD(0x374, 0xC8, 5, 0x0, 0, NO_PAD_CTRL)
#define MX50_PAD_ECSPI1_MOSI__WEIM_D9 IOMUX_PAD(0x374, 0xC8, 7, 0x810, 0, NO_PAD_CTRL)
@@ -294,7 +294,7 @@
#define MX50_PAD_ECSPI1_SS0__GPIO_4_15 IOMUX_PAD(0x37C, 0xD0, 1, 0x0, 0, PAD_CTL_PUS_100K_UP)
#define MX50_PAD_ECSPI1_SS0__CSPI_SS3 IOMUX_PAD(0x37C, 0xD0, 2, 0x6f4, 1, MX50_CSPI_SS_PAD)
#define MX50_PAD_ECSPI1_SS0__ECSPI2_SS3 IOMUX_PAD(0x37C, 0xD0, 3, 0x0, 0, MX50_CSPI_SS_PAD)
-#define MX50_PAD_ECSPI1_SS0__UART4_CTS IOMUX_PAD(0x37C, 0xD0, 4, 0x7d8, 1, MX50_UART_PAD_CTRL)
+#define MX50_PAD_ECSPI1_SS0__UART4_CTS IOMUX_PAD(0x37C, 0xD0, 4, 0x0, 0, MX50_UART_PAD_CTRL)
#define MX50_PAD_ECSPI1_SS0__EPDC_SDCE9 IOMUX_PAD(0x37C, 0xD0, 5, 0x0, 0, NO_PAD_CTRL)
#define MX50_PAD_ECSPI1_SS0__WEIM_D11 IOMUX_PAD(0x37C, 0xD0, 7, 0x818, 0, NO_PAD_CTRL)
@@ -311,17 +311,17 @@
#define MX50_PAD_ECSPI2_MOSI__GPIO_4_17 IOMUX_PAD(0x384, 0xD8, 1, 0x0, 0, NO_PAD_CTRL)
#define MX50_PAD_ECSPI2_MOSI__ELCDIF_RD IOMUX_PAD(0x384, 0xD8, 2, 0x0, 0, NO_PAD_CTRL)
#define MX50_PAD_ECSPI2_MOSI__ECSPI1_SS1 IOMUX_PAD(0x384, 0xD8, 3, 0x0, 0, MX50_CSPI_SS_PAD)
-#define MX50_PAD_ECSPI2_MOSI__UART5_CTS IOMUX_PAD(0x384, 0xD8, 4, 0x7e0, 1, MX50_UART_PAD_CTRL)
+#define MX50_PAD_ECSPI2_MOSI__UART5_CTS IOMUX_PAD(0x384, 0xD8, 4, 0x0, 0, MX50_UART_PAD_CTRL)
#define MX50_PAD_ECSPI2_MOSI__ELCDIF_EN IOMUX_PAD(0x384, 0xD8, 5, 0x0, 0, NO_PAD_CTRL)
#define MX50_PAD_ECSPI2_MOSI__NANDF_CEN5 IOMUX_PAD(0x384, 0xD8, 6, 0x0, 0, NO_PAD_CTRL)
#define MX50_PAD_ECSPI2_MOSI__WEIM_D9 IOMUX_PAD(0x384, 0xD8, 7, 0x810, 1, NO_PAD_CTRL)
-#define MX50_PAD_ECSPI2_MISO__ECSPI2_MISO IOMUX_PAD(0x388, 0xDC, 0, 0x73c, 0, NO_PAD_CTRL)
+#define MX50_PAD_ECSPI2_MISO__ECSPI2_MISO IOMUX_PAD(0x388, 0xDC, 0, 0x0, 0, NO_PAD_CTRL)
#define MX50_PAD_ECSPI2_MISO__GPIO_4_18 IOMUX_PAD(0x388, 0xDC, 1, 0x0, 0, PAD_CTL_PUS_100K_UP)
#define MX50_PAD_ECSPI2_MISO__ELCDIF_RS IOMUX_PAD(0x388, 0xDC, 2, 0x0, 0, NO_PAD_CTRL)
#define MX50_PAD_ECSPI2_MISO__ECSPI1_SS2 IOMUX_PAD(0x388, 0xDC, 3, 0x0, 0, MX50_CSPI_SS_PAD)
-#define MX50_PAD_ECSPI2_MISO__UART5_TXD IOMUX_PAD(0x388, 0xDC, 4, 0x7e4, 4, MX50_UART_PAD_CTRL)
-#define MX50_PAD_ECSPI2_MISO__ELCDIF_VSYNC IOMUX_PAD(0x388, 0xDC, 5, 0x0, 0, NO_PAD_CTRL)
+#define MX50_PAD_ECSPI2_MISO__UART5_TXD IOMUX_PAD(0x388, 0xDC, 4, 0x0, 0, MX50_UART_PAD_CTRL)
+#define MX50_PAD_ECSPI2_MISO__ELCDIF_VSYNC IOMUX_PAD(0x388, 0xDC, 5, 0x73c, 0, NO_PAD_CTRL)
#define MX50_PAD_ECSPI2_MISO__NANDF_CEN6 IOMUX_PAD(0x388, 0xDC, 6, 0x0, 0, NO_PAD_CTRL)
#define MX50_PAD_ECSPI2_MISO__WEIM_D10 IOMUX_PAD(0x388, 0xDC, 7, 0x814, 1, NO_PAD_CTRL)
@@ -503,7 +503,7 @@
#define MX50_PAD_DISP_RD__ELCDIF_EN IOMUX_PAD(0x430, 0x150, 2, 0x0, 0, MX50_ELCDIF_PAD_CTRL)
#define MX50_PAD_DISP_RD__WEIM_A25 IOMUX_PAD(0x430, 0x150, 3, 0x0, 0, NO_PAD_CTRL)
-#define MX50_PAD_DISP_RS__ELCDIF_RS IOMUX_PAD(0x434, 0x154, 0, 0x73c, 1, MX50_ELCDIF_PAD_CTRL)
+#define MX50_PAD_DISP_RS__ELCDIF_RS IOMUX_PAD(0x434, 0x154, 0, 0x0, 0, MX50_ELCDIF_PAD_CTRL)
#define MX50_PAD_DISP_RS__GPIO_2_17 IOMUX_PAD(0x434, 0x154, 1, 0x0, 0, NO_PAD_CTRL)
#define MX50_PAD_DISP_RS__ELCDIF_VSYNC IOMUX_PAD(0x434, 0x154, 2, 0x73c, 1, MX50_ELCDIF_PAD_CTRL)
#define MX50_PAD_DISP_RS__WEIM_A26 IOMUX_PAD(0x434, 0x154, 3, 0x0, 0, NO_PAD_CTRL)
@@ -691,8 +691,8 @@
#define MX50_PAD_EPDC_D9__EPDC_D9 IOMUX_PAD(0x570, 0x1D4, 0, 0x0, 0, NO_PAD_CTRL)
#define MX50_PAD_EPDC_D9__GPIO_3_9 IOMUX_PAD(0x570, 0x1D4, 1, 0x0, 0, NO_PAD_CTRL)
-#define MX50_PAD_EPDC_D9__WEIM_D9 IOMUX_PAD(0x570, 0x1D4, 2, 0x0, 0, NO_PAD_CTRL)
-#define MX50_PAD_EPDC_D9__ELCDIF_D25 IOMUX_PAD(0x570, 0x1D4, 3, 0x810, 2, MX50_ELCDIF_PAD_CTRL)
+#define MX50_PAD_EPDC_D9__WEIM_D9 IOMUX_PAD(0x570, 0x1D4, 2, 0x810, 2, NO_PAD_CTRL)
+#define MX50_PAD_EPDC_D9__ELCDIF_D25 IOMUX_PAD(0x570, 0x1D4, 3, 0x0, 0, MX50_ELCDIF_PAD_CTRL)
#define MX50_PAD_EPDC_D10__EPDC_D10 IOMUX_PAD(0x574, 0x1D8, 0, 0x0, 0, NO_PAD_CTRL)
#define MX50_PAD_EPDC_D10__GPIO_3_10 IOMUX_PAD(0x574, 0x1D8, 1, 0x0, 0, NO_PAD_CTRL)
diff --git a/arch/arm/plat-mxc/include/mach/iomux-mx51.h b/arch/arm/plat-mxc/include/mach/iomux-mx51.h
index b6767f90ef14..6056cf2cd006 100644
--- a/arch/arm/plat-mxc/include/mach/iomux-mx51.h
+++ b/arch/arm/plat-mxc/include/mach/iomux-mx51.h
@@ -473,7 +473,7 @@
#define _MX51_PAD_UART2_RXD__UART2_RXD IOMUX_PAD(0x628, 0x238, 0, 0x09ec, 2, 0)
#define _MX51_PAD_UART2_TXD__FIRI_RXD IOMUX_PAD(0x62c, 0x23c, 1, 0x0000, 0, 0)
#define _MX51_PAD_UART2_TXD__GPIO1_21 IOMUX_PAD(0x62c, 0x23c, 3, 0x0000, 0, 0)
-#define _MX51_PAD_UART2_TXD__UART2_TXD IOMUX_PAD(0x62c, 0x23c, 0, 0x09ec, 3, 0)
+#define _MX51_PAD_UART2_TXD__UART2_TXD IOMUX_PAD(0x62c, 0x23c, 0, 0x0000, 0, 0)
#define _MX51_PAD_UART3_RXD__CSI1_D0 IOMUX_PAD(0x630, 0x240, 2, 0x0000, 0, 0)
#define _MX51_PAD_UART3_RXD__GPIO1_22 IOMUX_PAD(0x630, 0x240, 3, 0x0000, 0, 0)
#define _MX51_PAD_UART3_RXD__UART1_DTR IOMUX_PAD(0x630, 0x240, 0, 0x0000, 0, 0)
@@ -528,7 +528,7 @@
#define _MX51_PAD_USBH1_DATA1__UART2_RXD IOMUX_PAD(0x68c, 0x28c, 1, 0x09ec, 4, 0)
#define _MX51_PAD_USBH1_DATA1__USBH1_DATA1 IOMUX_PAD(0x68c, 0x28c, 0, 0x0000, 0, 0)
#define _MX51_PAD_USBH1_DATA2__GPIO1_13 IOMUX_PAD(0x690, 0x290, 2, 0x0000, 0, 0)
-#define _MX51_PAD_USBH1_DATA2__UART2_TXD IOMUX_PAD(0x690, 0x290, 1, 0x09ec, 5, 0)
+#define _MX51_PAD_USBH1_DATA2__UART2_TXD IOMUX_PAD(0x690, 0x290, 1, 0x0000, 0, 0)
#define _MX51_PAD_USBH1_DATA2__USBH1_DATA2 IOMUX_PAD(0x690, 0x290, 0, 0x0000, 0, 0)
#define _MX51_PAD_USBH1_DATA3__GPIO1_14 IOMUX_PAD(0x694, 0x294, 2, 0x0000, 0, 0)
#define _MX51_PAD_USBH1_DATA3__UART2_RTS IOMUX_PAD(0x694, 0x294, 1, 0x09e8, 5, 0)
diff --git a/arch/arm/plat-mxc/include/mach/iomux-mx53.h b/arch/arm/plat-mxc/include/mach/iomux-mx53.h
index 68e11d7ab79d..e95d9cb8aeb7 100644
--- a/arch/arm/plat-mxc/include/mach/iomux-mx53.h
+++ b/arch/arm/plat-mxc/include/mach/iomux-mx53.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,305 +21,2358 @@
#include <mach/iomux-v3.h>
-/*
- * various IOMUX alternate output functions (1-7)
- */
-typedef enum iomux_config {
- IOMUX_CONFIG_ALT0,
- IOMUX_CONFIG_ALT1,
- IOMUX_CONFIG_ALT2,
- IOMUX_CONFIG_ALT3,
- IOMUX_CONFIG_ALT4,
- IOMUX_CONFIG_ALT5,
- IOMUX_CONFIG_ALT6,
- IOMUX_CONFIG_ALT7,
- IOMUX_CONFIG_GPIO, /* added to help user use GPIO mode */
-} iomux_pin_cfg_t;
-
/* These 2 defines are for pins that may not have a mux register, but could
* have a pad setting register, and vice-versa. */
-#define NON_MUX_I 0x00
#define NON_PAD_I 0x00
#define MX53_UART_PAD_CTRL (PAD_CTL_PKE | PAD_CTL_PUE | \
PAD_CTL_DSE_HIGH | PAD_CTL_SRE_FAST | PAD_CTL_HYS)
-/* UART1 */
-#define MX53_PAD_CSI0_D10__UART1_TXD IOMUX_PAD(0x414, 0xE8, 2, 0x0, 0, MX53_UART_PAD_CTRL)
-#define MX53_PAD_CSI0_D11__UART1_RXD IOMUX_PAD(0x418, 0xEC, 2, 0x878, 1, MX53_UART_PAD_CTRL)
-#define MX53_PAD_ATA_DIOW__UART1_TXD IOMUX_PAD(0x5F0, 0x270, 3, 0x0, 0, MX53_UART_PAD_CTRL)
-#define MX53_PAD_ATA_DMACK__UART1_RXD IOMUX_PAD(0x5F4, 0x274, 3, 0x880, 3, MX53_UART_PAD_CTRL)
-
-/* UART2 */
-#define MX53_PAD_ATA_BUFFER_EN__UART2_RXD IOMUX_PAD(0x5FC, 0x27C, 3, 0x880, 3, MX53_UART_PAD_CTRL)
-#define MX53_PAD_ATA_DMARQ__UART2_TXD IOMUX_PAD(0x5F8, 0x278, 3, 0x0, 0, MX53_UART_PAD_CTRL)
-#define MX53_PAD_ATA_DIOR__UART2_RTS IOMUX_PAD(0x604, 0x284, 3, 0x87C, 3, MX53_UART_PAD_CTRL)
-#define MX53_PAD_ATA_INTRQ__UART2_CTS IOMUX_PAD(0x600, 0x280, 3, 0x0, 0, MX53_UART_PAD_CTRL)
+#define MX53_SDHC_PAD_CTRL (PAD_CTL_HYS | PAD_CTL_PKE | PAD_CTL_PUE | \
+ PAD_CTL_PUS_47K_UP | PAD_CTL_DSE_HIGH | \
+ PAD_CTL_SRE_FAST)
-/* UART3 */
-#define MX53_PAD_ATA_CS_0__UART3_TXD IOMUX_PAD(0x61C, 0x29C, 4, 0x0, 0, MX53_UART_PAD_CTRL)
-#define MX53_PAD_ATA_CS_1__UART3_RXD IOMUX_PAD(0x620, 0x2A0, 4, 0x888, 3, MX53_UART_PAD_CTRL)
-#define MX53_PAD_ATA_DA_1__UART3_CTS IOMUX_PAD(0x614, 0x294, 4, 0x0, 0, MX53_UART_PAD_CTRL)
-#define MX53_PAD_ATA_DA_2__UART3_RTS IOMUX_PAD(0x618, 0x298, 4, 0x884, 5, MX53_UART_PAD_CTRL)
+#define _MX53_PAD_GPIO_19__KPP_COL_5 IOMUX_PAD(0x348, 0x20, 0, 0x840, 0, 0)
+#define _MX53_PAD_GPIO_19__GPIO4_5 IOMUX_PAD(0x348, 0x20, 1, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_19__CCM_CLKO IOMUX_PAD(0x348, 0x20, 2, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_19__SPDIF_OUT1 IOMUX_PAD(0x348, 0x20, 3, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_19__RTC_CE_RTC_EXT_TRIG2 IOMUX_PAD(0x348, 0x20, 4, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_19__ECSPI1_RDY IOMUX_PAD(0x348, 0x20, 5, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_19__FEC_TDATA_3 IOMUX_PAD(0x348, 0x20, 6, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_19__SRC_INT_BOOT IOMUX_PAD(0x348, 0x20,7, 0x0, 0, 0)
+#define _MX53_PAD_KEY_COL0__KPP_COL_0 IOMUX_PAD(0x34C, 0x24, o, 0x0, 0, 0)
+#define _MX53_PAD_KEY_COL0__GPIO4_6 IOMUX_PAD(0x34C, 0x24, 1, 0x0, 0, 0)
+#define _MX53_PAD_KEY_COL0__AUDMUX_AUD5_TXC IOMUX_PAD(0x34C, 0x24, 2, 0x758, 0, 0)
+#define _MX53_PAD_KEY_COL0__UART4_TXD_MUX IOMUX_PAD(0x34C, 0x24, 4, 0x890, 0, 0)
+#define _MX53_PAD_KEY_COL0__ECSPI1_SCLK IOMUX_PAD(0x34C, 0x24, 5, 0x79C, 0, 0)
+#define _MX53_PAD_KEY_COL0__FEC_RDATA_3 IOMUX_PAD(0x34C, 0x24, 6, 0x0, 0, 0)
+#define _MX53_PAD_KEY_COL0__SRC_ANY_PU_RST IOMUX_PAD(0x34C, 0x24, 7, 0x0, 0, 0)
+#define _MX53_PAD_KEY_ROW0__KPP_ROW_0 IOMUX_PAD(0x350, 0x28, 0, 0x0, 0, 0)
+#define _MX53_PAD_KEY_ROW0__GPIO4_7 IOMUX_PAD(0x350, 0x28, 1, 0x0, 0, 0)
+#define _MX53_PAD_KEY_ROW0__AUDMUX_AUD5_TXD IOMUX_PAD(0x350, 0x28, 2, 0x74C, 0, 0)
+#define _MX53_PAD_KEY_ROW0__UART4_RXD_MUX IOMUX_PAD(0x350, 0x28, 4, 0x890, 1, 0)
+#define _MX53_PAD_KEY_ROW0__ECSPI1_MOSI IOMUX_PAD(0x350, 0x28, 5, 0x7A4, 0, 0)
+#define _MX53_PAD_KEY_ROW0__FEC_TX_ER IOMUX_PAD(0x350, 0x28, 6, 0x0, 0, 0)
+#define _MX53_PAD_KEY_COL1__KPP_COL_1 IOMUX_PAD(0x354, 0x2C, 0, 0x0, 0, 0)
+#define _MX53_PAD_KEY_COL1__GPIO4_8 IOMUX_PAD(0x354, 0x2C, 1, 0x0, 0, 0)
+#define _MX53_PAD_KEY_COL1__AUDMUX_AUD5_TXFS IOMUX_PAD(0x354, 0x2C, 2, 0x75C, 0, 0)
+#define _MX53_PAD_KEY_COL1__UART5_TXD_MUX IOMUX_PAD(0x354, 0x2C, 4, 0x898, 0, 0)
+#define _MX53_PAD_KEY_COL1__ECSPI1_MISO IOMUX_PAD(0x354, 0x2C, 5, 0x7A0, 0, 0)
+#define _MX53_PAD_KEY_COL1__FEC_RX_CLK IOMUX_PAD(0x354, 0x2C, 6, 0x808, 0, 0)
+#define _MX53_PAD_KEY_COL1__USBPHY1_TXREADY IOMUX_PAD(0x354, 0x2C, 7, 0x0, 0, 0)
+#define _MX53_PAD_KEY_ROW1__KPP_ROW_1 IOMUX_PAD(0x358, 0x30, 0, 0x0, 0, 0)
+#define _MX53_PAD_KEY_ROW1__GPIO4_9 IOMUX_PAD(0x358, 0x30, 1, 0x0, 0, 0)
+#define _MX53_PAD_KEY_ROW1__AUDMUX_AUD5_RXD IOMUX_PAD(0x358, 0x30, 2, 0x748, 0, 0)
+#define _MX53_PAD_KEY_ROW1__UART5_RXD_MUX IOMUX_PAD(0x358, 0x30, 4, 0x898, 1, 0)
+#define _MX53_PAD_KEY_ROW1__ECSPI1_SS0 IOMUX_PAD(0x358, 0x30, 5, 0x7A8, 0, 0)
+#define _MX53_PAD_KEY_ROW1__FEC_COL IOMUX_PAD(0x358, 0x30, 6, 0x800, 0, 0)
+#define _MX53_PAD_KEY_ROW1__USBPHY1_RXVALID IOMUX_PAD(0x358, 0x30, 7, 0x0, 0, 0)
+#define _MX53_PAD_KEY_COL2__KPP_COL_2 IOMUX_PAD(0x35C, 0x34, 0, 0x0, 0, 0)
+#define _MX53_PAD_KEY_COL2__GPIO4_10 IOMUX_PAD(0x35C, 0x34, 1, 0x0, 0, 0)
+#define _MX53_PAD_KEY_COL2__CAN1_TXCAN IOMUX_PAD(0x35C, 0x34, 2, 0x0, 0, 0)
+#define _MX53_PAD_KEY_COL2__FEC_MDIO IOMUX_PAD(0x35C, 0x34, 4, 0x804, 0, 0)
+#define _MX53_PAD_KEY_COL2__ECSPI1_SS1 IOMUX_PAD(0x35C, 0x34, 5, 0x7AC, 0, 0)
+#define _MX53_PAD_KEY_COL2__FEC_RDATA_2 IOMUX_PAD(0x35C, 0x34, 6, 0x0, 0, 0)
+#define _MX53_PAD_KEY_COL2__USBPHY1_RXACTIVE IOMUX_PAD(0x35C, 0x34, 7, 0x0, 0, 0)
+#define _MX53_PAD_KEY_ROW2__KPP_ROW_2 IOMUX_PAD(0x360, 0x38, 0, 0x0, 0, 0)
+#define _MX53_PAD_KEY_ROW2__GPIO4_11 IOMUX_PAD(0x360, 0x38, 1, 0x0, 0, 0)
+#define _MX53_PAD_KEY_ROW2__CAN1_RXCAN IOMUX_PAD(0x360, 0x38, 2, 0x760, 0, 0)
+#define _MX53_PAD_KEY_ROW2__FEC_MDC IOMUX_PAD(0x360, 0x38, 4, 0x0, 0, 0)
+#define _MX53_PAD_KEY_ROW2__ECSPI1_SS2 IOMUX_PAD(0x360, 0x38, 5, 0x7B0, 0, 0)
+#define _MX53_PAD_KEY_ROW2__FEC_TDATA_2 IOMUX_PAD(0x360, 0x38, 6, 0x0, 0, 0)
+#define _MX53_PAD_KEY_ROW2__USBPHY1_RXERROR IOMUX_PAD(0x360, 0x38, 7, 0x0, 0, 0)
+#define _MX53_PAD_KEY_COL3__KPP_COL_3 IOMUX_PAD(0x364, 0x3C, 0, 0x0, 0, 0)
+#define _MX53_PAD_KEY_COL3__GPIO4_12 IOMUX_PAD(0x364, 0x3C, 1, 0x0, 0, 0)
+#define _MX53_PAD_KEY_COL3__USBOH3_H2_DP IOMUX_PAD(0x364, 0x3C, 2, 0x0, 0, 0)
+#define _MX53_PAD_KEY_COL3__SPDIF_IN1 IOMUX_PAD(0x364, 0x3C, 3, 0x870, 0, 0)
+#define _MX53_PAD_KEY_COL3__I2C2_SCL IOMUX_PAD(0x364, 0x3C, 4 | IOMUX_CONFIG_SION, 0x81C, 0, 0)
+#define _MX53_PAD_KEY_COL3__ECSPI1_SS3 IOMUX_PAD(0x364, 0x3C, 5, 0x7B4, 0, 0)
+#define _MX53_PAD_KEY_COL3__FEC_CRS IOMUX_PAD(0x364, 0x3C, 6, 0x0, 0, 0)
+#define _MX53_PAD_KEY_COL3__USBPHY1_SIECLOCK IOMUX_PAD(0x364, 0x3C, 7, 0x0, 0, 0)
+#define _MX53_PAD_KEY_ROW3__KPP_ROW_3 IOMUX_PAD(0x368, 0x40, 0, 0x0, 0, 0)
+#define _MX53_PAD_KEY_ROW3__GPIO4_13 IOMUX_PAD(0x368, 0x40, 1, 0x0, 0, 0)
+#define _MX53_PAD_KEY_ROW3__USBOH3_H2_DM IOMUX_PAD(0x368, 0x40, 2, 0x0, 0, 0)
+#define _MX53_PAD_KEY_ROW3__CCM_ASRC_EXT_CLK IOMUX_PAD(0x368, 0x40, 3, 0x768, 0, 0)
+#define _MX53_PAD_KEY_ROW3__I2C2_SDA IOMUX_PAD(0x368, 0x40, 4 | IOMUX_CONFIG_SION, 0x820, 0, 0)
+#define _MX53_PAD_KEY_ROW3__OSC32K_32K_OUT IOMUX_PAD(0x368, 0x40, 5, 0x0, 0, 0)
+#define _MX53_PAD_KEY_ROW3__CCM_PLL4_BYP IOMUX_PAD(0x368, 0x40, 6, 0x77C, 0, 0)
+#define _MX53_PAD_KEY_ROW3__USBPHY1_LINESTATE_0 IOMUX_PAD(0x368, 0x40, 7, 0x0, 0, 0)
+#define _MX53_PAD_KEY_COL4__KPP_COL_4 IOMUX_PAD(0x36C, 0x44, 0, 0x0, 0, 0)
+#define _MX53_PAD_KEY_COL4__GPIO4_14 IOMUX_PAD(0x36C, 0x44, 1, 0x0, 0, 0)
+#define _MX53_PAD_KEY_COL4__CAN2_TXCAN IOMUX_PAD(0x36C, 0x44, 2, 0x0, 0, 0)
+#define _MX53_PAD_KEY_COL4__IPU_SISG_4 IOMUX_PAD(0x36C, 0x44, 3, 0x0, 0, 0)
+#define _MX53_PAD_KEY_COL4__UART5_RTS IOMUX_PAD(0x36C, 0x44, 4, 0x894, 0, 0)
+#define _MX53_PAD_KEY_COL4__USBOH3_USBOTG_OC IOMUX_PAD(0x36C, 0x44, 5, 0x89C, 0, 0)
+#define _MX53_PAD_KEY_COL4__USBPHY1_LINESTATE_1 IOMUX_PAD(0x36C, 0x44, 7, 0x0, 0, 0)
+#define _MX53_PAD_KEY_ROW4__KPP_ROW_4 IOMUX_PAD(0x370, 0x48, 0, 0x0, 0, 0)
+#define _MX53_PAD_KEY_ROW4__GPIO4_15 IOMUX_PAD(0x370, 0x48, 1, 0x0, 0, 0)
+#define _MX53_PAD_KEY_ROW4__CAN2_RXCAN IOMUX_PAD(0x370, 0x48, 2, 0x764, 0, 0)
+#define _MX53_PAD_KEY_ROW4__IPU_SISG_5 IOMUX_PAD(0x370, 0x48, 3, 0x0, 0, 0)
+#define _MX53_PAD_KEY_ROW4__UART5_CTS IOMUX_PAD(0x370, 0x48, 4, 0x894, 1, 0)
+#define _MX53_PAD_KEY_ROW4__USBOH3_USBOTG_PWR IOMUX_PAD(0x370, 0x48, 5, 0x0, 0, 0)
+#define _MX53_PAD_KEY_ROW4__USBPHY1_VBUSVALID IOMUX_PAD(0x370, 0x48, 7, 0x0, 0, 0)
+#define _MX53_PAD_DI0_DISP_CLK__IPU_DI0_DISP_CLK IOMUX_PAD(0x378, 0x4C, 0, 0x0, 0, 0)
+#define _MX53_PAD_DI0_DISP_CLK__GPIO4_16 IOMUX_PAD(0x378, 0x4C, 1, 0x0, 0, 0)
+#define _MX53_PAD_DI0_DISP_CLK__USBOH3_USBH2_DIR IOMUX_PAD(0x378, 0x4C, 2, 0x0, 0, 0)
+#define _MX53_PAD_DI0_DISP_CLK__SDMA_DEBUG_CORE_STATE_0 IOMUX_PAD(0x378, 0x4C, 5, 0x0, 0, 0)
+#define _MX53_PAD_DI0_DISP_CLK__EMI_EMI_DEBUG_0 IOMUX_PAD(0x378, 0x4C, 6, 0x0, 0, 0)
+#define _MX53_PAD_DI0_DISP_CLK__USBPHY1_AVALID IOMUX_PAD(0x378, 0x4C, 7, 0x0, 0, 0)
+#define _MX53_PAD_DI0_PIN15__IPU_DI0_PIN15 IOMUX_PAD(0x37C, 0x50, 0, 0x0, 0, 0)
+#define _MX53_PAD_DI0_PIN15__GPIO4_17 IOMUX_PAD(0x37C, 0x50, 1, 0x0, 0, 0)
+#define _MX53_PAD_DI0_PIN15__AUDMUX_AUD6_TXC IOMUX_PAD(0x37C, 0x50, 2, 0x0, 0, 0)
+#define _MX53_PAD_DI0_PIN15__SDMA_DEBUG_CORE_STATE_1 IOMUX_PAD(0x37C, 0x50, 5, 0x0, 0, 0)
+#define _MX53_PAD_DI0_PIN15__EMI_EMI_DEBUG_1 IOMUX_PAD(0x37C, 0x50, 6, 0x0, 0, 0)
+#define _MX53_PAD_DI0_PIN15__USBPHY1_BVALID IOMUX_PAD(0x37C, 0x50, 7, 0x0, 0, 0)
+#define _MX53_PAD_DI0_PIN2__IPU_DI0_PIN2 IOMUX_PAD(0x380, 0x54, 0, 0x0, 0, 0)
+#define _MX53_PAD_DI0_PIN2__GPIO4_18 IOMUX_PAD(0x380, 0x54, 1, 0x0, 0, 0)
+#define _MX53_PAD_DI0_PIN2__AUDMUX_AUD6_TXD IOMUX_PAD(0x380, 0x54, 2, 0x0, 0, 0)
+#define _MX53_PAD_DI0_PIN2__SDMA_DEBUG_CORE_STATE_2 IOMUX_PAD(0x380, 0x54, 5, 0x0, 0, 0)
+#define _MX53_PAD_DI0_PIN2__EMI_EMI_DEBUG_2 IOMUX_PAD(0x380, 0x54, 6, 0x0, 0, 0)
+#define _MX53_PAD_DI0_PIN2__USBPHY1_ENDSESSION IOMUX_PAD(0x380, 0x54, 7, 0x0, 0, 0)
+#define _MX53_PAD_DI0_PIN3__IPU_DI0_PIN3 IOMUX_PAD(0x384, 0x58, 0, 0x0, 0, 0)
+#define _MX53_PAD_DI0_PIN3__GPIO4_19 IOMUX_PAD(0x384, 0x58, 1, 0x0, 0, 0)
+#define _MX53_PAD_DI0_PIN3__AUDMUX_AUD6_TXFS IOMUX_PAD(0x384, 0x58, 2, 0x0, 0, 0)
+#define _MX53_PAD_DI0_PIN3__SDMA_DEBUG_CORE_STATE_3 IOMUX_PAD(0x384, 0x58, 5, 0x0, 0, 0)
+#define _MX53_PAD_DI0_PIN3__EMI_EMI_DEBUG_3 IOMUX_PAD(0x384, 0x58, 6, 0x0, 0, 0)
+#define _MX53_PAD_DI0_PIN3__USBPHY1_IDDIG IOMUX_PAD(0x384, 0x58, 7, 0x0, 0, 0)
+#define _MX53_PAD_DI0_PIN4__IPU_DI0_PIN4 IOMUX_PAD(0x388, 0x5C, 0, 0x0, 0, 0)
+#define _MX53_PAD_DI0_PIN4__GPIO4_20 IOMUX_PAD(0x388, 0x5C, 1, 0x0, 0, 0)
+#define _MX53_PAD_DI0_PIN4__AUDMUX_AUD6_RXD IOMUX_PAD(0x388, 0x5C, 2, 0x0, 0, 0)
+#define _MX53_PAD_DI0_PIN4__ESDHC1_WP IOMUX_PAD(0x388, 0x5C, 3, 0x7FC, 0, 0)
+#define _MX53_PAD_DI0_PIN4__SDMA_DEBUG_YIELD IOMUX_PAD(0x388, 0x5C, 5, 0x0, 0, 0)
+#define _MX53_PAD_DI0_PIN4__EMI_EMI_DEBUG_4 IOMUX_PAD(0x388, 0x5C, 6, 0x0, 0, 0)
+#define _MX53_PAD_DI0_PIN4__USBPHY1_HOSTDISCONNECT IOMUX_PAD(0x388, 0x5C, 7, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT0__IPU_DISP0_DAT_0 IOMUX_PAD(0x38C, 0x60, 0, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT0__GPIO4_21 IOMUX_PAD(0x38C, 0x60, 1, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT0__CSPI_SCLK IOMUX_PAD(0x38C, 0x60, 2, 0x780, 0, 0)
+#define _MX53_PAD_DISP0_DAT0__USBOH3_USBH2_DATA_0 IOMUX_PAD(0x38C, 0x60, 3, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT0__SDMA_DEBUG_CORE_RUN IOMUX_PAD(0x38C, 0x60, 5, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT0__EMI_EMI_DEBUG_5 IOMUX_PAD(0x38C, 0x60, 6, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT0__USBPHY2_TXREADY IOMUX_PAD(0x38C, 0x60, 7, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT1__IPU_DISP0_DAT_1 IOMUX_PAD(0x390, 0x64, 0, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT1__GPIO4_22 IOMUX_PAD(0x390, 0x64, 1, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT1__CSPI_MOSI IOMUX_PAD(0x390, 0x64, 2, 0x788, 0, 0)
+#define _MX53_PAD_DISP0_DAT1__USBOH3_USBH2_DATA_1 IOMUX_PAD(0x390, 0x64, 3, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT1__SDMA_DEBUG_EVENT_CHANNEL_SEL IOMUX_PAD(0x390, 0x64, 5, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT1__EMI_EMI_DEBUG_6 IOMUX_PAD(0x390, 0x64, 6, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT1__USBPHY2_RXVALID IOMUX_PAD(0x390, 0x64, 7, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT2__IPU_DISP0_DAT_2 IOMUX_PAD(0x394, 0x68, 0, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT2__GPIO4_23 IOMUX_PAD(0x394, 0x68, 1, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT2__CSPI_MISO IOMUX_PAD(0x394, 0x68, 2, 0x784, 0, 0)
+#define _MX53_PAD_DISP0_DAT2__USBOH3_USBH2_DATA_2 IOMUX_PAD(0x394, 0x68, 3, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT2__SDMA_DEBUG_MODE IOMUX_PAD(0x394, 0x68, 5, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT2__EMI_EMI_DEBUG_7 IOMUX_PAD(0x394, 0x68, 6, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT2__USBPHY2_RXACTIVE IOMUX_PAD(0x394, 0x68, 7, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT3__IPU_DISP0_DAT_3 IOMUX_PAD(0x398, 0x6C, 0, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT3__GPIO4_24 IOMUX_PAD(0x398, 0x6C, 1, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT3__CSPI_SS0 IOMUX_PAD(0x398, 0x6C, 2, 0x78C, 0, 0)
+#define _MX53_PAD_DISP0_DAT3__USBOH3_USBH2_DATA_3 IOMUX_PAD(0x398, 0x6C, 3, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT3__SDMA_DEBUG_BUS_ERROR IOMUX_PAD(0x398, 0x6C, 5, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT3__EMI_EMI_DEBUG_8 IOMUX_PAD(0x398, 0x6C, 6, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT3__USBPHY2_RXERROR IOMUX_PAD(0x398, 0x6C, 7, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT4__IPU_DISP0_DAT_4 IOMUX_PAD(0x39C, 0x70, 0, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT4__GPIO4_25 IOMUX_PAD(0x39C, 0x70, 1, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT4__CSPI_SS1 IOMUX_PAD(0x39C, 0x70, 2, 0x790, 0, 0)
+#define _MX53_PAD_DISP0_DAT4__USBOH3_USBH2_DATA_4 IOMUX_PAD(0x39C, 0x70, 3, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT4__SDMA_DEBUG_BUS_RWB IOMUX_PAD(0x39C, 0x70, 5, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT4__EMI_EMI_DEBUG_9 IOMUX_PAD(0x39C, 0x70, 6, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT4__USBPHY2_SIECLOCK IOMUX_PAD(0x39C, 0x70, 7, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT5__IPU_DISP0_DAT_5 IOMUX_PAD(0x3A0, 0x74, 0, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT5__GPIO4_26 IOMUX_PAD(0x3A0, 0x74, 1, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT5__CSPI_SS2 IOMUX_PAD(0x3A0, 0x74, 2, 0x794, 0, 0)
+#define _MX53_PAD_DISP0_DAT5__USBOH3_USBH2_DATA_5 IOMUX_PAD(0x3A0, 0x74, 3, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT5__SDMA_DEBUG_MATCHED_DMBUS IOMUX_PAD(0x3A0, 0x74, 5, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT5__EMI_EMI_DEBUG_10 IOMUX_PAD(0x3A0, 0x74, 6, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT5__USBPHY2_LINESTATE_0 IOMUX_PAD(0x3A0, 0x74, 7, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT6__IPU_DISP0_DAT_6 IOMUX_PAD(0x3A4, 0x78, 0, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT6__GPIO4_27 IOMUX_PAD(0x3A4, 0x78, 1, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT6__CSPI_SS3 IOMUX_PAD(0x3A4, 0x78, 2, 0x798, 0, 0)
+#define _MX53_PAD_DISP0_DAT6__USBOH3_USBH2_DATA_6 IOMUX_PAD(0x3A4, 0x78, 3, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT6__SDMA_DEBUG_RTBUFFER_WRITE IOMUX_PAD(0x3A4, 0x78, 5, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT6__EMI_EMI_DEBUG_11 IOMUX_PAD(0x3A4, 0x78, 6, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT6__USBPHY2_LINESTATE_1 IOMUX_PAD(0x3A4, 0x78, 7, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT7__IPU_DISP0_DAT_7 IOMUX_PAD(0x3A8, 0x7C, 0, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT7__GPIO4_28 IOMUX_PAD(0x3A8, 0x7C, 1, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT7__CSPI_RDY IOMUX_PAD(0x3A8, 0x7C, 2, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT7__USBOH3_USBH2_DATA_7 IOMUX_PAD(0x3A8, 0x7C, 3, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT7__SDMA_DEBUG_EVENT_CHANNEL_0 IOMUX_PAD(0x3A8, 0x7C, 5, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT7__EMI_EMI_DEBUG_12 IOMUX_PAD(0x3A8, 0x7C, 6, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT7__USBPHY2_VBUSVALID IOMUX_PAD(0x3A8, 0x7C, 7, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT8__IPU_DISP0_DAT_8 IOMUX_PAD(0x3AC, 0x80, 0, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT8__GPIO4_29 IOMUX_PAD(0x3AC, 0x80, 1, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT8__PWM1_PWMO IOMUX_PAD(0x3AC, 0x80, 2, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT8__WDOG1_WDOG_B IOMUX_PAD(0x3AC, 0x80, 3, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT8__SDMA_DEBUG_EVENT_CHANNEL_1 IOMUX_PAD(0x3AC, 0x80, 5, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT8__EMI_EMI_DEBUG_13 IOMUX_PAD(0x3AC, 0x80, 6, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT8__USBPHY2_AVALID IOMUX_PAD(0x3AC, 0x80, 7, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT9__IPU_DISP0_DAT_9 IOMUX_PAD(0x3B0, 0x84, 0, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT9__GPIO4_30 IOMUX_PAD(0x3B0, 0x84, 1, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT9__PWM2_PWMO IOMUX_PAD(0x3B0, 0x84, 2, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT9__WDOG2_WDOG_B IOMUX_PAD(0x3B0, 0x84, 3, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT9__SDMA_DEBUG_EVENT_CHANNEL_2 IOMUX_PAD(0x3B0, 0x84, 5, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT9__EMI_EMI_DEBUG_14 IOMUX_PAD(0x3B0, 0x84, 6, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT9__USBPHY2_VSTATUS_0 IOMUX_PAD(0x3B0, 0x84, 7, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT10__IPU_DISP0_DAT_10 IOMUX_PAD(0x3B4, 0x88, 0, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT10__GPIO4_31 IOMUX_PAD(0x3B4, 0x88, 1, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT10__USBOH3_USBH2_STP IOMUX_PAD(0x3B4, 0x88, 2, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT10__SDMA_DEBUG_EVENT_CHANNEL_3 IOMUX_PAD(0x3B4, 0x88, 5, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT10__EMI_EMI_DEBUG_15 IOMUX_PAD(0x3B4, 0x88, 6, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT10__USBPHY2_VSTATUS_1 IOMUX_PAD(0x3B4, 0x88, 7, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT11__IPU_DISP0_DAT_11 IOMUX_PAD(0x3B8, 0x8C, 0, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT11__GPIO5_5 IOMUX_PAD(0x3B8, 0x8C, 1, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT11__USBOH3_USBH2_NXT IOMUX_PAD(0x3B8, 0x8C, 2, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT11__SDMA_DEBUG_EVENT_CHANNEL_4 IOMUX_PAD(0x3B8, 0x8C, 5, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT11__EMI_EMI_DEBUG_16 IOMUX_PAD(0x3B8, 0x8C, 6, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT11__USBPHY2_VSTATUS_2 IOMUX_PAD(0x3B8, 0x8C, 7, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT12__IPU_DISP0_DAT_12 IOMUX_PAD(0x3BC, 0x90, 0, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT12__GPIO5_6 IOMUX_PAD(0x3BC, 0x90, 1, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT12__USBOH3_USBH2_CLK IOMUX_PAD(0x3BC, 0x90, 2, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT12__SDMA_DEBUG_EVENT_CHANNEL_5 IOMUX_PAD(0x3BC, 0x90, 5, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT12__EMI_EMI_DEBUG_17 IOMUX_PAD(0x3BC, 0x90, 6, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT12__USBPHY2_VSTATUS_3 IOMUX_PAD(0x3BC, 0x90, 7, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT13__IPU_DISP0_DAT_13 IOMUX_PAD(0x3C0, 0x94, 0, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT13__GPIO5_7 IOMUX_PAD(0x3C0, 0x94, 1, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT13__AUDMUX_AUD5_RXFS IOMUX_PAD(0x3C0, 0x94, 3, 0x754, 0, 0)
+#define _MX53_PAD_DISP0_DAT13__SDMA_DEBUG_EVT_CHN_LINES_0 IOMUX_PAD(0x3C0, 0x94, 5, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT13__EMI_EMI_DEBUG_18 IOMUX_PAD(0x3C0, 0x94, 6, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT13__USBPHY2_VSTATUS_4 IOMUX_PAD(0x3C0, 0x94, 7, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT14__IPU_DISP0_DAT_14 IOMUX_PAD(0x3C4, 0x98, 0, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT14__GPIO5_8 IOMUX_PAD(0x3C4, 0x98, 1, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT14__AUDMUX_AUD5_RXC IOMUX_PAD(0x3C4, 0x98, 3, 0x750, 0, 0)
+#define _MX53_PAD_DISP0_DAT14__SDMA_DEBUG_EVT_CHN_LINES_1 IOMUX_PAD(0x3C4, 0x98, 5, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT14__EMI_EMI_DEBUG_19 IOMUX_PAD(0x3C4, 0x98, 6, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT14__USBPHY2_VSTATUS_5 IOMUX_PAD(0x3C4, 0x98, 7, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT15__IPU_DISP0_DAT_15 IOMUX_PAD(0x3C8, 0x9C, 0, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT15__GPIO5_9 IOMUX_PAD(0x3C8, 0x9C, 1, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT15__ECSPI1_SS1 IOMUX_PAD(0x3C8, 0x9C, 2, 0x7AC, 1, 0)
+#define _MX53_PAD_DISP0_DAT15__ECSPI2_SS1 IOMUX_PAD(0x3C8, 0x9C, 3, 0x7C8, 0, 0)
+#define _MX53_PAD_DISP0_DAT15__SDMA_DEBUG_EVT_CHN_LINES_2 IOMUX_PAD(0x3C8, 0x9C, 5, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT15__EMI_EMI_DEBUG_20 IOMUX_PAD(0x3C8, 0x9C, 6, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT15__USBPHY2_VSTATUS_6 IOMUX_PAD(0x3C8, 0x9C, 7, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT16__IPU_DISP0_DAT_16 IOMUX_PAD(0x3CC, 0xA0, 0, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT16__GPIO5_10 IOMUX_PAD(0x3CC, 0xA0, 1, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT16__ECSPI2_MOSI IOMUX_PAD(0x3CC, 0xA0, 2, 0x7C0, 0, 0)
+#define _MX53_PAD_DISP0_DAT16__AUDMUX_AUD5_TXC IOMUX_PAD(0x3CC, 0xA0, 3, 0x758, 1, 0)
+#define _MX53_PAD_DISP0_DAT16__SDMA_EXT_EVENT_0 IOMUX_PAD(0x3CC, 0xA0, 4, 0x868, 0, 0)
+#define _MX53_PAD_DISP0_DAT16__SDMA_DEBUG_EVT_CHN_LINES_3 IOMUX_PAD(0x3CC, 0xA0, 5, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT16__EMI_EMI_DEBUG_21 IOMUX_PAD(0x3CC, 0xA0, 6, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT16__USBPHY2_VSTATUS_7 IOMUX_PAD(0x3CC, 0xA0, 7, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT17__IPU_DISP0_DAT_17 IOMUX_PAD(0x3D0, 0xA4, 0, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT17__GPIO5_11 IOMUX_PAD(0x3D0, 0xA4, 1, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT17__ECSPI2_MISO IOMUX_PAD(0x3D0, 0xA4, 2, 0x7BC, 0, 0)
+#define _MX53_PAD_DISP0_DAT17__AUDMUX_AUD5_TXD IOMUX_PAD(0x3D0, 0xA4, 3, 0x74C, 1, 0)
+#define _MX53_PAD_DISP0_DAT17__SDMA_EXT_EVENT_1 IOMUX_PAD(0x3D0, 0xA4, 4, 0x86C, 0, 0)
+#define _MX53_PAD_DISP0_DAT17__SDMA_DEBUG_EVT_CHN_LINES_4 IOMUX_PAD(0x3D0, 0xA4, 5, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT17__EMI_EMI_DEBUG_22 IOMUX_PAD(0x3D0, 0xA4, 6, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT18__IPU_DISP0_DAT_18 IOMUX_PAD(0x3D4, 0xA8, 0, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT18__GPIO5_12 IOMUX_PAD(0x3D4, 0xA8, 1, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT18__ECSPI2_SS0 IOMUX_PAD(0x3D4, 0xA8, 2, 0x7C4, 0, 0)
+#define _MX53_PAD_DISP0_DAT18__AUDMUX_AUD5_TXFS IOMUX_PAD(0x3D4, 0xA8, 3, 0x75C, 1, 0)
+#define _MX53_PAD_DISP0_DAT18__AUDMUX_AUD4_RXFS IOMUX_PAD(0x3D4, 0xA8, 4, 0x73C, 0, 0)
+#define _MX53_PAD_DISP0_DAT18__SDMA_DEBUG_EVT_CHN_LINES_5 IOMUX_PAD(0x3D4, 0xA8, 5, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT18__EMI_EMI_DEBUG_23 IOMUX_PAD(0x3D4, 0xA8, 6, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT18__EMI_WEIM_CS_2 IOMUX_PAD(0x3D4, 0xA8, 7, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT19__IPU_DISP0_DAT_19 IOMUX_PAD(0x3D8, 0xAC, 0, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT19__GPIO5_13 IOMUX_PAD(0x3D8, 0xAC, 1, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT19__ECSPI2_SCLK IOMUX_PAD(0x3D8, 0xAC, 2, 0x7B8, 0, 0)
+#define _MX53_PAD_DISP0_DAT19__AUDMUX_AUD5_RXD IOMUX_PAD(0x3D8, 0xAC, 3, 0x748, 1, 0)
+#define _MX53_PAD_DISP0_DAT19__AUDMUX_AUD4_RXC IOMUX_PAD(0x3D8, 0xAC, 4, 0x738, 0, 0)
+#define _MX53_PAD_DISP0_DAT19__SDMA_DEBUG_EVT_CHN_LINES_6 IOMUX_PAD(0x3D8, 0xAC, 5, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT19__EMI_EMI_DEBUG_24 IOMUX_PAD(0x3D8, 0xAC, 6, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT19__EMI_WEIM_CS_3 IOMUX_PAD(0x3D8, 0xAC, 7, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT20__IPU_DISP0_DAT_20 IOMUX_PAD(0x3DC, 0xB0, 0, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT20__GPIO5_14 IOMUX_PAD(0x3DC, 0xB0, 1, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT20__ECSPI1_SCLK IOMUX_PAD(0x3DC, 0xB0, 2, 0x79C, 1, 0)
+#define _MX53_PAD_DISP0_DAT20__AUDMUX_AUD4_TXC IOMUX_PAD(0x3DC, 0xB0, 3, 0x740, 0, 0)
+#define _MX53_PAD_DISP0_DAT20__SDMA_DEBUG_EVT_CHN_LINES_7 IOMUX_PAD(0x3DC, 0xB0, 5, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT20__EMI_EMI_DEBUG_25 IOMUX_PAD(0x3DC, 0xB0, 6, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT20__SATA_PHY_TDI IOMUX_PAD(0x3DC, 0xB0, 7, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT21__IPU_DISP0_DAT_21 IOMUX_PAD(0x3E0, 0xB4, 0, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT21__GPIO5_15 IOMUX_PAD(0x3E0, 0xB4, 1, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT21__ECSPI1_MOSI IOMUX_PAD(0x3E0, 0xB4, 2, 0x7A4, 1, 0)
+#define _MX53_PAD_DISP0_DAT21__AUDMUX_AUD4_TXD IOMUX_PAD(0x3E0, 0xB4, 3, 0x734, 0, 0)
+#define _MX53_PAD_DISP0_DAT21__SDMA_DEBUG_BUS_DEVICE_0 IOMUX_PAD(0x3E0, 0xB4, 5, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT21__EMI_EMI_DEBUG_26 IOMUX_PAD(0x3E0, 0xB4, 6, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT21__SATA_PHY_TDO IOMUX_PAD(0x3E0, 0xB4, 7, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT22__IPU_DISP0_DAT_22 IOMUX_PAD(0x3E4, 0xB8, 0, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT22__GPIO5_16 IOMUX_PAD(0x3E4, 0xB8, 1, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT22__ECSPI1_MISO IOMUX_PAD(0x3E4, 0xB8, 2, 0x7A0, 1, 0)
+#define _MX53_PAD_DISP0_DAT22__AUDMUX_AUD4_TXFS IOMUX_PAD(0x3E4, 0xB8, 3, 0x744, 0, 0)
+#define _MX53_PAD_DISP0_DAT22__SDMA_DEBUG_BUS_DEVICE_1 IOMUX_PAD(0x3E4, 0xB8, 5, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT22__EMI_EMI_DEBUG_27 IOMUX_PAD(0x3E4, 0xB8, 6, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT22__SATA_PHY_TCK IOMUX_PAD(0x3E4, 0xB8, 7, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT23__IPU_DISP0_DAT_23 IOMUX_PAD(0x3E8, 0xBC, 0, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT23__GPIO5_17 IOMUX_PAD(0x3E8, 0xBC, 1, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT23__ECSPI1_SS0 IOMUX_PAD(0x3E8, 0xBC, 2, 0x7A8, 1, 0)
+#define _MX53_PAD_DISP0_DAT23__AUDMUX_AUD4_RXD IOMUX_PAD(0x3E8, 0xBC, 3, 0x730, 0, 0)
+#define _MX53_PAD_DISP0_DAT23__SDMA_DEBUG_BUS_DEVICE_2 IOMUX_PAD(0x3E8, 0xBC, 5, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT23__EMI_EMI_DEBUG_28 IOMUX_PAD(0x3E8, 0xBC, 6, 0x0, 0, 0)
+#define _MX53_PAD_DISP0_DAT23__SATA_PHY_TMS IOMUX_PAD(0x3E8, 0xBC, 7, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_PIXCLK__IPU_CSI0_PIXCLK IOMUX_PAD(0x3EC, 0xC0, 0, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_PIXCLK__GPIO5_18 IOMUX_PAD(0x3EC, 0xC0, 1, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_PIXCLK__SDMA_DEBUG_PC_0 IOMUX_PAD(0x3EC, 0xC0, 5, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_PIXCLK__EMI_EMI_DEBUG_29 IOMUX_PAD(0x3EC, 0xC0, 6, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_MCLK__IPU_CSI0_HSYNC IOMUX_PAD(0x3F0, 0xC4, 0, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_MCLK__GPIO5_19 IOMUX_PAD(0x3F0, 0xC4, 1, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_MCLK__CCM_CSI0_MCLK IOMUX_PAD(0x3F0, 0xC4, 2, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_MCLK__SDMA_DEBUG_PC_1 IOMUX_PAD(0x3F0, 0xC4, 5, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_MCLK__EMI_EMI_DEBUG_30 IOMUX_PAD(0x3F0, 0xC4, 6, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_MCLK__TPIU_TRCTL IOMUX_PAD(0x3F0, 0xC4, 7, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DATA_EN__IPU_CSI0_DATA_EN IOMUX_PAD(0x3F4, 0xC8, 0, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DATA_EN__GPIO5_20 IOMUX_PAD(0x3F4, 0xC8, 1, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DATA_EN__SDMA_DEBUG_PC_2 IOMUX_PAD(0x3F4, 0xC8, 5, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DATA_EN__EMI_EMI_DEBUG_31 IOMUX_PAD(0x3F4, 0xC8, 6, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DATA_EN__TPIU_TRCLK IOMUX_PAD(0x3F4, 0xC8, 7, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_VSYNC__IPU_CSI0_VSYNC IOMUX_PAD(0x3F8, 0xCC, 0, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_VSYNC__GPIO5_21 IOMUX_PAD(0x3F8, 0xCC, 1, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_VSYNC__SDMA_DEBUG_PC_3 IOMUX_PAD(0x3F8, 0xCC, 5, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_VSYNC__EMI_EMI_DEBUG_32 IOMUX_PAD(0x3F8, 0xCC, 6, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_VSYNC__TPIU_TRACE_0 IOMUX_PAD(0x3F8, 0xCC, 7, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT4__IPU_CSI0_D_4 IOMUX_PAD(0x3FC, 0xD0, 0, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT4__GPIO5_22 IOMUX_PAD(0x3FC, 0xD0, 1, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT4__KPP_COL_5 IOMUX_PAD(0x3FC, 0xD0, 2, 0x840, 1, 0)
+#define _MX53_PAD_CSI0_DAT4__ECSPI1_SCLK IOMUX_PAD(0x3FC, 0xD0, 3, 0x79C, 2, 0)
+#define _MX53_PAD_CSI0_DAT4__USBOH3_USBH3_STP IOMUX_PAD(0x3FC, 0xD0, 4, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT4__AUDMUX_AUD3_TXC IOMUX_PAD(0x3FC, 0xD0, 5, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT4__EMI_EMI_DEBUG_33 IOMUX_PAD(0x3FC, 0xD0, 6, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT4__TPIU_TRACE_1 IOMUX_PAD(0x3FC, 0xD0, 7, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT5__IPU_CSI0_D_5 IOMUX_PAD(0x400, 0xD4, 0, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT5__GPIO5_23 IOMUX_PAD(0x400, 0xD4, 1, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT5__KPP_ROW_5 IOMUX_PAD(0x400, 0xD4, 2, 0x84C, 0, 0)
+#define _MX53_PAD_CSI0_DAT5__ECSPI1_MOSI IOMUX_PAD(0x400, 0xD4, 3, 0x7A4, 2, 0)
+#define _MX53_PAD_CSI0_DAT5__USBOH3_USBH3_NXT IOMUX_PAD(0x400, 0xD4, 4, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT5__AUDMUX_AUD3_TXD IOMUX_PAD(0x400, 0xD4, 5, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT5__EMI_EMI_DEBUG_34 IOMUX_PAD(0x400, 0xD4, 6, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT5__TPIU_TRACE_2 IOMUX_PAD(0x400, 0xD4, 7, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT6__IPU_CSI0_D_6 IOMUX_PAD(0x404, 0xD8, 0, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT6__GPIO5_24 IOMUX_PAD(0x404, 0xD8, 1, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT6__KPP_COL_6 IOMUX_PAD(0x404, 0xD8, 2, 0x844, 0, 0)
+#define _MX53_PAD_CSI0_DAT6__ECSPI1_MISO IOMUX_PAD(0x404, 0xD8, 3, 0x7A0, 2, 0)
+#define _MX53_PAD_CSI0_DAT6__USBOH3_USBH3_CLK IOMUX_PAD(0x404, 0xD8, 4, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT6__AUDMUX_AUD3_TXFS IOMUX_PAD(0x404, 0xD8, 5, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT6__EMI_EMI_DEBUG_35 IOMUX_PAD(0x404, 0xD8, 6, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT6__TPIU_TRACE_3 IOMUX_PAD(0x404, 0xD8, 7, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT7__IPU_CSI0_D_7 IOMUX_PAD(0x408, 0xDC, 0, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT7__GPIO5_25 IOMUX_PAD(0x408, 0xDC, 1, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT7__KPP_ROW_6 IOMUX_PAD(0x408, 0xDC, 2, 0x850, 0, 0)
+#define _MX53_PAD_CSI0_DAT7__ECSPI1_SS0 IOMUX_PAD(0x408, 0xDC, 3, 0x7A8, 2, 0)
+#define _MX53_PAD_CSI0_DAT7__USBOH3_USBH3_DIR IOMUX_PAD(0x408, 0xDC, 4, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT7__AUDMUX_AUD3_RXD IOMUX_PAD(0x408, 0xDC, 5, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT7__EMI_EMI_DEBUG_36 IOMUX_PAD(0x408, 0xDC, 6, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT7__TPIU_TRACE_4 IOMUX_PAD(0x408, 0xDC, 7, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT8__IPU_CSI0_D_8 IOMUX_PAD(0x40C, 0xE0, 0, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT8__GPIO5_26 IOMUX_PAD(0x40C, 0xE0, 1, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT8__KPP_COL_7 IOMUX_PAD(0x40C, 0xE0, 2, 0x848, 0, 0)
+#define _MX53_PAD_CSI0_DAT8__ECSPI2_SCLK IOMUX_PAD(0x40C, 0xE0, 3, 0x7B8, 1, 0)
+#define _MX53_PAD_CSI0_DAT8__USBOH3_USBH3_OC IOMUX_PAD(0x40C, 0xE0, 4, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT8__I2C1_SDA IOMUX_PAD(0x40C, 0xE0, 5 | IOMUX_CONFIG_SION, 0x818, 0, 0)
+#define _MX53_PAD_CSI0_DAT8__EMI_EMI_DEBUG_37 IOMUX_PAD(0x40C, 0xE0, 6, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT8__TPIU_TRACE_5 IOMUX_PAD(0x40C, 0xE0, 7, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT9__IPU_CSI0_D_9 IOMUX_PAD(0x410, 0xE4, 0, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT9__GPIO5_27 IOMUX_PAD(0x410, 0xE4, 1, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT9__KPP_ROW_7 IOMUX_PAD(0x410, 0xE4, 2, 0x854, 0, 0)
+#define _MX53_PAD_CSI0_DAT9__ECSPI2_MOSI IOMUX_PAD(0x410, 0xE4, 3, 0x7C0, 1, 0)
+#define _MX53_PAD_CSI0_DAT9__USBOH3_USBH3_PWR IOMUX_PAD(0x410, 0xE4, 4, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT9__I2C1_SCL IOMUX_PAD(0x410, 0xE4, 5 | IOMUX_CONFIG_SION, 0x814, 0, 0)
+#define _MX53_PAD_CSI0_DAT9__EMI_EMI_DEBUG_38 IOMUX_PAD(0x410, 0xE4, 6, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT9__TPIU_TRACE_6 IOMUX_PAD(0x410, 0xE4, 7, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT10__IPU_CSI0_D_10 IOMUX_PAD(0x414, 0xE8, 0, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT10__GPIO5_28 IOMUX_PAD(0x414, 0xE8, 1, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT10__UART1_TXD_MUX IOMUX_PAD(0x414, 0xE8, 2, 0x878, 0, 0)
+#define _MX53_PAD_CSI0_DAT10__ECSPI2_MISO IOMUX_PAD(0x414, 0xE8, 3, 0x7BC, 1, 0)
+#define _MX53_PAD_CSI0_DAT10__AUDMUX_AUD3_RXC IOMUX_PAD(0x414, 0xE8, 4, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT10__SDMA_DEBUG_PC_4 IOMUX_PAD(0x414, 0xE8, 5, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT10__EMI_EMI_DEBUG_39 IOMUX_PAD(0x414, 0xE8, 6, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT10__TPIU_TRACE_7 IOMUX_PAD(0x414, 0xE8, 7, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT11__IPU_CSI0_D_11 IOMUX_PAD(0x418, 0xEC, 0, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT11__GPIO5_29 IOMUX_PAD(0x418, 0xEC, 1, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT11__UART1_RXD_MUX IOMUX_PAD(0x418, 0xEC, 2, 0x878, 1, 0)
+#define _MX53_PAD_CSI0_DAT11__ECSPI2_SS0 IOMUX_PAD(0x418, 0xEC, 3, 0x7C4, 1, 0)
+#define _MX53_PAD_CSI0_DAT11__AUDMUX_AUD3_RXFS IOMUX_PAD(0x418, 0xEC, 4, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT11__SDMA_DEBUG_PC_5 IOMUX_PAD(0x418, 0xEC, 5, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT11__EMI_EMI_DEBUG_40 IOMUX_PAD(0x418, 0xEC, 6, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT11__TPIU_TRACE_8 IOMUX_PAD(0x418, 0xEC, 7, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT12__IPU_CSI0_D_12 IOMUX_PAD(0x41C, 0xF0, 0, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT12__GPIO5_30 IOMUX_PAD(0x41C, 0xF0, 1, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT12__UART4_TXD_MUX IOMUX_PAD(0x41C, 0xF0, 2, 0x890, 2, 0)
+#define _MX53_PAD_CSI0_DAT12__USBOH3_USBH3_DATA_0 IOMUX_PAD(0x41C, 0xF0, 4, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT12__SDMA_DEBUG_PC_6 IOMUX_PAD(0x41C, 0xF0, 5, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT12__EMI_EMI_DEBUG_41 IOMUX_PAD(0x41C, 0xF0, 6, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT12__TPIU_TRACE_9 IOMUX_PAD(0x41C, 0xF0, 7, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT13__IPU_CSI0_D_13 IOMUX_PAD(0x420, 0xF4, 0, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT13__GPIO5_31 IOMUX_PAD(0x420, 0xF4, 1, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT13__UART4_RXD_MUX IOMUX_PAD(0x420, 0xF4, 2, 0x890, 3, 0)
+#define _MX53_PAD_CSI0_DAT13__USBOH3_USBH3_DATA_1 IOMUX_PAD(0x420, 0xF4, 4, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT13__SDMA_DEBUG_PC_7 IOMUX_PAD(0x420, 0xF4, 5, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT13__EMI_EMI_DEBUG_42 IOMUX_PAD(0x420, 0xF4, 6, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT13__TPIU_TRACE_10 IOMUX_PAD(0x420, 0xF4, 7, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT14__IPU_CSI0_D_14 IOMUX_PAD(0x424, 0xF8, 0, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT14__GPIO6_0 IOMUX_PAD(0x424, 0xF8, 1, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT14__UART5_TXD_MUX IOMUX_PAD(0x424, 0xF8, 2, 0x898, 2, 0)
+#define _MX53_PAD_CSI0_DAT14__USBOH3_USBH3_DATA_2 IOMUX_PAD(0x424, 0xF8, 4, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT14__SDMA_DEBUG_PC_8 IOMUX_PAD(0x424, 0xF8, 5, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT14__EMI_EMI_DEBUG_43 IOMUX_PAD(0x424, 0xF8, 6, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT14__TPIU_TRACE_11 IOMUX_PAD(0x424, 0xF8, 7, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT15__IPU_CSI0_D_15 IOMUX_PAD(0x428, 0xFC, 0, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT15__GPIO6_1 IOMUX_PAD(0x428, 0xFC, 1, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT15__UART5_RXD_MUX IOMUX_PAD(0x428, 0xFC, 2, 0x898, 3, 0)
+#define _MX53_PAD_CSI0_DAT15__USBOH3_USBH3_DATA_3 IOMUX_PAD(0x428, 0xFC, 4, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT15__SDMA_DEBUG_PC_9 IOMUX_PAD(0x428, 0xFC, 5, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT15__EMI_EMI_DEBUG_44 IOMUX_PAD(0x428, 0xFC, 6, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT15__TPIU_TRACE_12 IOMUX_PAD(0x428, 0xFC, 7, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT16__IPU_CSI0_D_16 IOMUX_PAD(0x42C, 0x100, 0, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT16__GPIO6_2 IOMUX_PAD(0x42C, 0x100, 1, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT16__UART4_RTS IOMUX_PAD(0x42C, 0x100, 2, 0x88C, 0, 0)
+#define _MX53_PAD_CSI0_DAT16__USBOH3_USBH3_DATA_4 IOMUX_PAD(0x42C, 0x100, 4, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT16__SDMA_DEBUG_PC_10 IOMUX_PAD(0x42C, 0x100, 5, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT16__EMI_EMI_DEBUG_45 IOMUX_PAD(0x42C, 0x100, 6, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT16__TPIU_TRACE_13 IOMUX_PAD(0x42C, 0x100, 7, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT17__IPU_CSI0_D_17 IOMUX_PAD(0x430, 0x104, 0, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT17__GPIO6_3 IOMUX_PAD(0x430, 0x104, 1, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT17__UART4_CTS IOMUX_PAD(0x430, 0x104, 2, 0x88C, 1, 0)
+#define _MX53_PAD_CSI0_DAT17__USBOH3_USBH3_DATA_5 IOMUX_PAD(0x430, 0x104, 4, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT17__SDMA_DEBUG_PC_11 IOMUX_PAD(0x430, 0x104, 5, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT17__EMI_EMI_DEBUG_46 IOMUX_PAD(0x430, 0x104, 6, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT17__TPIU_TRACE_14 IOMUX_PAD(0x430, 0x104, 7, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT18__IPU_CSI0_D_18 IOMUX_PAD(0x434, 0x108, 0, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT18__GPIO6_4 IOMUX_PAD(0x434, 0x108, 1, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT18__UART5_RTS IOMUX_PAD(0x434, 0x108, 2, 0x894, 2, 0)
+#define _MX53_PAD_CSI0_DAT18__USBOH3_USBH3_DATA_6 IOMUX_PAD(0x434, 0x108, 4, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT18__SDMA_DEBUG_PC_12 IOMUX_PAD(0x434, 0x108, 5, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT18__EMI_EMI_DEBUG_47 IOMUX_PAD(0x434, 0x108, 6, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT18__TPIU_TRACE_15 IOMUX_PAD(0x434, 0x108, 7, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT19__IPU_CSI0_D_19 IOMUX_PAD(0x438, 0x10C, 0, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT19__GPIO6_5 IOMUX_PAD(0x438, 0x10C, 1, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT19__UART5_CTS IOMUX_PAD(0x438, 0x10C, 2, 0x894, 3, 0)
+#define _MX53_PAD_CSI0_DAT19__USBOH3_USBH3_DATA_7 IOMUX_PAD(0x438, 0x10C, 4, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT19__SDMA_DEBUG_PC_13 IOMUX_PAD(0x438, 0x10C, 5, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT19__EMI_EMI_DEBUG_48 IOMUX_PAD(0x438, 0x10C, 6, 0x0, 0, 0)
+#define _MX53_PAD_CSI0_DAT19__USBPHY2_BISTOK IOMUX_PAD(0x438, 0x10C, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A25__EMI_WEIM_A_25 IOMUX_PAD(0x458, 0x110, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A25__GPIO5_2 IOMUX_PAD(0x458, 0x110, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A25__ECSPI2_RDY IOMUX_PAD(0x458, 0x110, 2, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A25__IPU_DI1_PIN12 IOMUX_PAD(0x458, 0x110, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A25__CSPI_SS1 IOMUX_PAD(0x458, 0x110, 4, 0x790, 1, 0)
+#define _MX53_PAD_EIM_A25__IPU_DI0_D1_CS IOMUX_PAD(0x458, 0x110, 6, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A25__USBPHY1_BISTOK IOMUX_PAD(0x458, 0x110, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_EB2__EMI_WEIM_EB_2 IOMUX_PAD(0x45C, 0x114, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_EB2__GPIO2_30 IOMUX_PAD(0x45C, 0x114, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_EB2__CCM_DI1_EXT_CLK IOMUX_PAD(0x45C, 0x114, 2, 0x76C, 0, 0)
+#define _MX53_PAD_EIM_EB2__IPU_SER_DISP1_CS IOMUX_PAD(0x45C, 0x114, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_EB2__ECSPI1_SS0 IOMUX_PAD(0x45C, 0x114, 4, 0x7A8, 3, 0)
+#define _MX53_PAD_EIM_EB2__I2C2_SCL IOMUX_PAD(0x45C, 0x114, 5 | IOMUX_CONFIG_SION, 0x81C, 1, 0)
+#define _MX53_PAD_EIM_D16__EMI_WEIM_D_16 IOMUX_PAD(0x460, 0x118, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D16__GPIO3_16 IOMUX_PAD(0x460, 0x118, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D16__IPU_DI0_PIN5 IOMUX_PAD(0x460, 0x118, 2, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D16__IPU_DISPB1_SER_CLK IOMUX_PAD(0x460, 0x118, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D16__ECSPI1_SCLK IOMUX_PAD(0x460, 0x118, 4, 0x79C, 3, 0)
+#define _MX53_PAD_EIM_D16__I2C2_SDA IOMUX_PAD(0x460, 0x118, 5, 0x820, 1, 0)
+#define _MX53_PAD_EIM_D17__EMI_WEIM_D_17 IOMUX_PAD(0x464, 0x11C, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D17__GPIO3_17 IOMUX_PAD(0x464, 0x11C, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D17__IPU_DI0_PIN6 IOMUX_PAD(0x464, 0x11C, 2, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D17__IPU_DISPB1_SER_DIN IOMUX_PAD(0x464, 0x11C, 3, 0x830, 0, 0)
+#define _MX53_PAD_EIM_D17__ECSPI1_MISO IOMUX_PAD(0x464, 0x11C, 4, 0x7A0, 3, 0)
+#define _MX53_PAD_EIM_D17__I2C3_SCL IOMUX_PAD(0x464, 0x11C, 5, 0x824, 0, 0)
+#define _MX53_PAD_EIM_D18__EMI_WEIM_D_18 IOMUX_PAD(0x468, 0x120, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D18__GPIO3_18 IOMUX_PAD(0x468, 0x120, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D18__IPU_DI0_PIN7 IOMUX_PAD(0x468, 0x120, 2, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D18__IPU_DISPB1_SER_DIO IOMUX_PAD(0x468, 0x120, 3, 0x830, 1, 0)
+#define _MX53_PAD_EIM_D18__ECSPI1_MOSI IOMUX_PAD(0x468, 0x120, 4, 0x7A4, 3, 0)
+#define _MX53_PAD_EIM_D18__I2C3_SDA IOMUX_PAD(0x468, 0x120, 5, 0x828, 0, 0)
+#define _MX53_PAD_EIM_D18__IPU_DI1_D0_CS IOMUX_PAD(0x468, 0x120, 6, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D19__EMI_WEIM_D_19 IOMUX_PAD(0x46C, 0x124, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D19__GPIO3_19 IOMUX_PAD(0x46C, 0x124, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D19__IPU_DI0_PIN8 IOMUX_PAD(0x46C, 0x124, 2, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D19__IPU_DISPB1_SER_RS IOMUX_PAD(0x46C, 0x124, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D19__ECSPI1_SS1 IOMUX_PAD(0x46C, 0x124, 4, 0x7AC, 2, 0)
+#define _MX53_PAD_EIM_D19__EPIT1_EPITO IOMUX_PAD(0x46C, 0x124, 5, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D19__UART1_CTS IOMUX_PAD(0x46C, 0x124, 6, 0x874, 0, 0)
+#define _MX53_PAD_EIM_D19__USBOH3_USBH2_OC IOMUX_PAD(0x46C, 0x124, 7, 0x8A4, 0, 0)
+#define _MX53_PAD_EIM_D20__EMI_WEIM_D_20 IOMUX_PAD(0x470, 0x128, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D20__GPIO3_20 IOMUX_PAD(0x470, 0x128, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D20__IPU_DI0_PIN16 IOMUX_PAD(0x470, 0x128, 2, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D20__IPU_SER_DISP0_CS IOMUX_PAD(0x470, 0x128, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D20__CSPI_SS0 IOMUX_PAD(0x470, 0x128, 4, 0x78C, 1, 0)
+#define _MX53_PAD_EIM_D20__EPIT2_EPITO IOMUX_PAD(0x470, 0x128, 5, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D20__UART1_RTS IOMUX_PAD(0x470, 0x128, 6, 0x874, 1, 0)
+#define _MX53_PAD_EIM_D20__USBOH3_USBH2_PWR IOMUX_PAD(0x470, 0x128, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D21__EMI_WEIM_D_21 IOMUX_PAD(0x474, 0x12C, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D21__GPIO3_21 IOMUX_PAD(0x474, 0x12C, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D21__IPU_DI0_PIN17 IOMUX_PAD(0x474, 0x12C, 2, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D21__IPU_DISPB0_SER_CLK IOMUX_PAD(0x474, 0x12C, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D21__CSPI_SCLK IOMUX_PAD(0x474, 0x12C, 4, 0x780, 1, 0)
+#define _MX53_PAD_EIM_D21__I2C1_SCL IOMUX_PAD(0x474, 0x12C, 5, 0x814, 1, 0)
+#define _MX53_PAD_EIM_D21__USBOH3_USBOTG_OC IOMUX_PAD(0x474, 0x12C, 6, 0x89C, 1, 0)
+#define _MX53_PAD_EIM_D22__EMI_WEIM_D_22 IOMUX_PAD(0x478, 0x130, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D22__GPIO3_22 IOMUX_PAD(0x478, 0x130, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D22__IPU_DI0_PIN1 IOMUX_PAD(0x478, 0x130, 2, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D22__IPU_DISPB0_SER_DIN IOMUX_PAD(0x478, 0x130, 3, 0x82C, 0, 0)
+#define _MX53_PAD_EIM_D22__CSPI_MISO IOMUX_PAD(0x478, 0x130, 4, 0x784, 1, 0)
+#define _MX53_PAD_EIM_D22__USBOH3_USBOTG_PWR IOMUX_PAD(0x478, 0x130, 6, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D23__EMI_WEIM_D_23 IOMUX_PAD(0x47C, 0x134, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D23__GPIO3_23 IOMUX_PAD(0x47C, 0x134, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D23__UART3_CTS IOMUX_PAD(0x47C, 0x134, 2, 0x884, 0, 0)
+#define _MX53_PAD_EIM_D23__UART1_DCD IOMUX_PAD(0x47C, 0x134, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D23__IPU_DI0_D0_CS IOMUX_PAD(0x47C, 0x134, 4, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D23__IPU_DI1_PIN2 IOMUX_PAD(0x47C, 0x134, 5, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D23__IPU_CSI1_DATA_EN IOMUX_PAD(0x47C, 0x134, 6, 0x834, 0, 0)
+#define _MX53_PAD_EIM_D23__IPU_DI1_PIN14 IOMUX_PAD(0x47C, 0x134, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_EB3__EMI_WEIM_EB_3 IOMUX_PAD(0x480, 0x138, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_EB3__GPIO2_31 IOMUX_PAD(0x480, 0x138, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_EB3__UART3_RTS IOMUX_PAD(0x480, 0x138, 2, 0x884, 1, 0)
+#define _MX53_PAD_EIM_EB3__UART1_RI IOMUX_PAD(0x480, 0x138, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_EB3__IPU_DI1_PIN3 IOMUX_PAD(0x480, 0x138, 5, 0x0, 0, 0)
+#define _MX53_PAD_EIM_EB3__IPU_CSI1_HSYNC IOMUX_PAD(0x480, 0x138, 6, 0x838, 0, 0)
+#define _MX53_PAD_EIM_EB3__IPU_DI1_PIN16 IOMUX_PAD(0x480, 0x138, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D24__EMI_WEIM_D_24 IOMUX_PAD(0x484, 0x13C, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D24__GPIO3_24 IOMUX_PAD(0x484, 0x13C, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D24__UART3_TXD_MUX IOMUX_PAD(0x484, 0x13C, 2, 0x888, 0, 0)
+#define _MX53_PAD_EIM_D24__ECSPI1_SS2 IOMUX_PAD(0x484, 0x13C, 3, 0x7B0, 1, 0)
+#define _MX53_PAD_EIM_D24__CSPI_SS2 IOMUX_PAD(0x484, 0x13C, 4, 0x794, 1, 0)
+#define _MX53_PAD_EIM_D24__AUDMUX_AUD5_RXFS IOMUX_PAD(0x484, 0x13C, 5, 0x754, 1, 0)
+#define _MX53_PAD_EIM_D24__ECSPI2_SS2 IOMUX_PAD(0x484, 0x13C, 6, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D24__UART1_DTR IOMUX_PAD(0x484, 0x13C, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D25__EMI_WEIM_D_25 IOMUX_PAD(0x488, 0x140, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D25__GPIO3_25 IOMUX_PAD(0x488, 0x140, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D25__UART3_RXD_MUX IOMUX_PAD(0x488, 0x140, 2, 0x888, 1, 0)
+#define _MX53_PAD_EIM_D25__ECSPI1_SS3 IOMUX_PAD(0x488, 0x140, 3, 0x7B4, 1, 0)
+#define _MX53_PAD_EIM_D25__CSPI_SS3 IOMUX_PAD(0x488, 0x140, 4, 0x798, 1, 0)
+#define _MX53_PAD_EIM_D25__AUDMUX_AUD5_RXC IOMUX_PAD(0x488, 0x140, 5, 0x750, 1, 0)
+#define _MX53_PAD_EIM_D25__ECSPI2_SS3 IOMUX_PAD(0x488, 0x140, 6, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D25__UART1_DSR IOMUX_PAD(0x488, 0x140, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D26__EMI_WEIM_D_26 IOMUX_PAD(0x48C, 0x144, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D26__GPIO3_26 IOMUX_PAD(0x48C, 0x144, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D26__UART2_TXD_MUX IOMUX_PAD(0x48C, 0x144, 2, 0x880, 0, 0)
+#define _MX53_PAD_EIM_D26__FIRI_RXD IOMUX_PAD(0x48C, 0x144, 3, 0x80C, 0, 0)
+#define _MX53_PAD_EIM_D26__IPU_CSI0_D_1 IOMUX_PAD(0x48C, 0x144, 4, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D26__IPU_DI1_PIN11 IOMUX_PAD(0x48C, 0x144, 5, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D26__IPU_SISG_2 IOMUX_PAD(0x48C, 0x144, 6, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D26__IPU_DISP1_DAT_22 IOMUX_PAD(0x48C, 0x144, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D27__EMI_WEIM_D_27 IOMUX_PAD(0x490, 0x148, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D27__GPIO3_27 IOMUX_PAD(0x490, 0x148, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D27__UART2_RXD_MUX IOMUX_PAD(0x490, 0x148, 2, 0x880, 1, 0)
+#define _MX53_PAD_EIM_D27__FIRI_TXD IOMUX_PAD(0x490, 0x148, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D27__IPU_CSI0_D_0 IOMUX_PAD(0x490, 0x148, 4, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D27__IPU_DI1_PIN13 IOMUX_PAD(0x490, 0x148, 5, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D27__IPU_SISG_3 IOMUX_PAD(0x490, 0x148, 6, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D27__IPU_DISP1_DAT_23 IOMUX_PAD(0x490, 0x148, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D28__EMI_WEIM_D_28 IOMUX_PAD(0x494, 0x14C, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D28__GPIO3_28 IOMUX_PAD(0x494, 0x14C, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D28__UART2_CTS IOMUX_PAD(0x494, 0x14C, 2, 0x87C, 0, 0)
+#define _MX53_PAD_EIM_D28__IPU_DISPB0_SER_DIO IOMUX_PAD(0x494, 0x14C, 3, 0x82C, 1, 0)
+#define _MX53_PAD_EIM_D28__CSPI_MOSI IOMUX_PAD(0x494, 0x14C, 4, 0x788, 1, 0)
+#define _MX53_PAD_EIM_D28__I2C1_SDA IOMUX_PAD(0x494, 0x14C, 5, 0x818, 1, 0)
+#define _MX53_PAD_EIM_D28__IPU_EXT_TRIG IOMUX_PAD(0x494, 0x14C, 6, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D28__IPU_DI0_PIN13 IOMUX_PAD(0x494, 0x14C, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D29__EMI_WEIM_D_29 IOMUX_PAD(0x498, 0x150, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D29__GPIO3_29 IOMUX_PAD(0x498, 0x150, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D29__UART2_RTS IOMUX_PAD(0x498, 0x150, 2, 0x87C, 1, 0)
+#define _MX53_PAD_EIM_D29__IPU_DISPB0_SER_RS IOMUX_PAD(0x498, 0x150, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D29__CSPI_SS0 IOMUX_PAD(0x498, 0x150, 4, 0x78C, 2, 0)
+#define _MX53_PAD_EIM_D29__IPU_DI1_PIN15 IOMUX_PAD(0x498, 0x150, 5, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D29__IPU_CSI1_VSYNC IOMUX_PAD(0x498, 0x150, 6, 0x83C, 0, 0)
+#define _MX53_PAD_EIM_D29__IPU_DI0_PIN14 IOMUX_PAD(0x498, 0x150, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D30__EMI_WEIM_D_30 IOMUX_PAD(0x49C, 0x154, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D30__GPIO3_30 IOMUX_PAD(0x49C, 0x154, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D30__UART3_CTS IOMUX_PAD(0x49C, 0x154, 2, 0x884, 2, 0)
+#define _MX53_PAD_EIM_D30__IPU_CSI0_D_3 IOMUX_PAD(0x49C, 0x154, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D30__IPU_DI0_PIN11 IOMUX_PAD(0x49C, 0x154, 4, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D30__IPU_DISP1_DAT_21 IOMUX_PAD(0x49C, 0x154, 5, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D30__USBOH3_USBH1_OC IOMUX_PAD(0x49C, 0x154, 6, 0x8A0, 0, 0)
+#define _MX53_PAD_EIM_D30__USBOH3_USBH2_OC IOMUX_PAD(0x49C, 0x154, 7, 0x8A4, 1, 0)
+#define _MX53_PAD_EIM_D31__EMI_WEIM_D_31 IOMUX_PAD(0x4A0, 0x158, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D31__GPIO3_31 IOMUX_PAD(0x4A0, 0x158, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D31__UART3_RTS IOMUX_PAD(0x4A0, 0x158, 2, 0x884, 3, 0)
+#define _MX53_PAD_EIM_D31__IPU_CSI0_D_2 IOMUX_PAD(0x4A0, 0x158, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D31__IPU_DI0_PIN12 IOMUX_PAD(0x4A0, 0x158, 4, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D31__IPU_DISP1_DAT_20 IOMUX_PAD(0x4A0, 0x158, 5, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D31__USBOH3_USBH1_PWR IOMUX_PAD(0x4A0, 0x158, 6, 0x0, 0, 0)
+#define _MX53_PAD_EIM_D31__USBOH3_USBH2_PWR IOMUX_PAD(0x4A0, 0x158, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A24__EMI_WEIM_A_24 IOMUX_PAD(0x4A8, 0x15C, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A24__GPIO5_4 IOMUX_PAD(0x4A8, 0x15C, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A24__IPU_DISP1_DAT_19 IOMUX_PAD(0x4A8, 0x15C, 2, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A24__IPU_CSI1_D_19 IOMUX_PAD(0x4A8, 0x15C, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A24__IPU_SISG_2 IOMUX_PAD(0x4A8, 0x15C, 6, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A24__USBPHY2_BVALID IOMUX_PAD(0x4A8, 0x15C, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A23__EMI_WEIM_A_23 IOMUX_PAD(0x4AC, 0x160, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A23__GPIO6_6 IOMUX_PAD(0x4AC, 0x160, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A23__IPU_DISP1_DAT_18 IOMUX_PAD(0x4AC, 0x160, 2, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A23__IPU_CSI1_D_18 IOMUX_PAD(0x4AC, 0x160, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A23__IPU_SISG_3 IOMUX_PAD(0x4AC, 0x160, 6, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A23__USBPHY2_ENDSESSION IOMUX_PAD(0x4AC, 0x160, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A22__EMI_WEIM_A_22 IOMUX_PAD(0x4B0, 0x164, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A22__GPIO2_16 IOMUX_PAD(0x4B0, 0x164, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A22__IPU_DISP1_DAT_17 IOMUX_PAD(0x4B0, 0x164, 2, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A22__IPU_CSI1_D_17 IOMUX_PAD(0x4B0, 0x164, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A22__SRC_BT_CFG1_7 IOMUX_PAD(0x4B0, 0x164, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A21__EMI_WEIM_A_21 IOMUX_PAD(0x4B4, 0x168, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A21__GPIO2_17 IOMUX_PAD(0x4B4, 0x168, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A21__IPU_DISP1_DAT_16 IOMUX_PAD(0x4B4, 0x168, 2, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A21__IPU_CSI1_D_16 IOMUX_PAD(0x4B4, 0x168, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A21__SRC_BT_CFG1_6 IOMUX_PAD(0x4B4, 0x168, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A20__EMI_WEIM_A_20 IOMUX_PAD(0x4B8, 0x16C, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A20__GPIO2_18 IOMUX_PAD(0x4B8, 0x16C, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A20__IPU_DISP1_DAT_15 IOMUX_PAD(0x4B8, 0x16C, 2, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A20__IPU_CSI1_D_15 IOMUX_PAD(0x4B8, 0x16C, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A20__SRC_BT_CFG1_5 IOMUX_PAD(0x4B8, 0x16C, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A19__EMI_WEIM_A_19 IOMUX_PAD(0x4BC, 0x170, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A19__GPIO2_19 IOMUX_PAD(0x4BC, 0x170, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A19__IPU_DISP1_DAT_14 IOMUX_PAD(0x4BC, 0x170, 2, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A19__IPU_CSI1_D_14 IOMUX_PAD(0x4BC, 0x170, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A19__SRC_BT_CFG1_4 IOMUX_PAD(0x4BC, 0x170, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A18__EMI_WEIM_A_18 IOMUX_PAD(0x4C0, 0x174, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A18__GPIO2_20 IOMUX_PAD(0x4C0, 0x174, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A18__IPU_DISP1_DAT_13 IOMUX_PAD(0x4C0, 0x174, 2, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A18__IPU_CSI1_D_13 IOMUX_PAD(0x4C0, 0x174, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A18__SRC_BT_CFG1_3 IOMUX_PAD(0x4C0, 0x174, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A17__EMI_WEIM_A_17 IOMUX_PAD(0x4C4, 0x178, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A17__GPIO2_21 IOMUX_PAD(0x4C4, 0x178, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A17__IPU_DISP1_DAT_12 IOMUX_PAD(0x4C4, 0x178, 2, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A17__IPU_CSI1_D_12 IOMUX_PAD(0x4C4, 0x178, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A17__SRC_BT_CFG1_2 IOMUX_PAD(0x4C4, 0x178, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A16__EMI_WEIM_A_16 IOMUX_PAD(0x4C8, 0x17C, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A16__GPIO2_22 IOMUX_PAD(0x4C8, 0x17C, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A16__IPU_DI1_DISP_CLK IOMUX_PAD(0x4C8, 0x17C, 2, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A16__IPU_CSI1_PIXCLK IOMUX_PAD(0x4C8, 0x17C, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_A16__SRC_BT_CFG1_1 IOMUX_PAD(0x4C8, 0x17C, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_CS0__EMI_WEIM_CS_0 IOMUX_PAD(0x4CC, 0x180, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_CS0__GPIO2_23 IOMUX_PAD(0x4CC, 0x180, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_CS0__ECSPI2_SCLK IOMUX_PAD(0x4CC, 0x180, 2, 0x7B8, 2, 0)
+#define _MX53_PAD_EIM_CS0__IPU_DI1_PIN5 IOMUX_PAD(0x4CC, 0x180, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_CS1__EMI_WEIM_CS_1 IOMUX_PAD(0x4D0, 0x184, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_CS1__GPIO2_24 IOMUX_PAD(0x4D0, 0x184, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_CS1__ECSPI2_MOSI IOMUX_PAD(0x4D0, 0x184, 2, 0x7C0, 2, 0)
+#define _MX53_PAD_EIM_CS1__IPU_DI1_PIN6 IOMUX_PAD(0x4D0, 0x184, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_OE__EMI_WEIM_OE IOMUX_PAD(0x4D4, 0x188, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_OE__GPIO2_25 IOMUX_PAD(0x4D4, 0x188, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_OE__ECSPI2_MISO IOMUX_PAD(0x4D4, 0x188, 2, 0x7BC, 2, 0)
+#define _MX53_PAD_EIM_OE__IPU_DI1_PIN7 IOMUX_PAD(0x4D4, 0x188, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_OE__USBPHY2_IDDIG IOMUX_PAD(0x4D4, 0x188, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_RW__EMI_WEIM_RW IOMUX_PAD(0x4D8, 0x18C, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_RW__GPIO2_26 IOMUX_PAD(0x4D8, 0x18C, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_RW__ECSPI2_SS0 IOMUX_PAD(0x4D8, 0x18C, 2, 0x7C4, 2, 0)
+#define _MX53_PAD_EIM_RW__IPU_DI1_PIN8 IOMUX_PAD(0x4D8, 0x18C, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_RW__USBPHY2_HOSTDISCONNECT IOMUX_PAD(0x4D8, 0x18C, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_LBA__EMI_WEIM_LBA IOMUX_PAD(0x4DC, 0x190, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_LBA__GPIO2_27 IOMUX_PAD(0x4DC, 0x190, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_LBA__ECSPI2_SS1 IOMUX_PAD(0x4DC, 0x190, 2, 0x7C8, 1, 0)
+#define _MX53_PAD_EIM_LBA__IPU_DI1_PIN17 IOMUX_PAD(0x4DC, 0x190, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_LBA__SRC_BT_CFG1_0 IOMUX_PAD(0x4DC, 0x190, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_EB0__EMI_WEIM_EB_0 IOMUX_PAD(0x4E4, 0x194, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_EB0__GPIO2_28 IOMUX_PAD(0x4E4, 0x194, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_EB0__IPU_DISP1_DAT_11 IOMUX_PAD(0x4E4, 0x194, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_EB0__IPU_CSI1_D_11 IOMUX_PAD(0x4E4, 0x194, 4, 0x0, 0, 0)
+#define _MX53_PAD_EIM_EB0__GPC_PMIC_RDY IOMUX_PAD(0x4E4, 0x194, 5, 0x810, 0, 0)
+#define _MX53_PAD_EIM_EB0__SRC_BT_CFG2_7 IOMUX_PAD(0x4E4, 0x194, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_EB1__EMI_WEIM_EB_1 IOMUX_PAD(0x4E8, 0x198, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_EB1__GPIO2_29 IOMUX_PAD(0x4E8, 0x198, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_EB1__IPU_DISP1_DAT_10 IOMUX_PAD(0x4E8, 0x198, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_EB1__IPU_CSI1_D_10 IOMUX_PAD(0x4E8, 0x198, 4, 0x0, 0, 0)
+#define _MX53_PAD_EIM_EB1__SRC_BT_CFG2_6 IOMUX_PAD(0x4E8, 0x198, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA0__EMI_NAND_WEIM_DA_0 IOMUX_PAD(0x4EC, 0x19C, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA0__GPIO3_0 IOMUX_PAD(0x4EC, 0x19C, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA0__IPU_DISP1_DAT_9 IOMUX_PAD(0x4EC, 0x19C, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA0__IPU_CSI1_D_9 IOMUX_PAD(0x4EC, 0x19C, 4, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA0__SRC_BT_CFG2_5 IOMUX_PAD(0x4EC, 0x19C, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA1__EMI_NAND_WEIM_DA_1 IOMUX_PAD(0x4F0, 0x1A0, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA1__GPIO3_1 IOMUX_PAD(0x4F0, 0x1A0, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA1__IPU_DISP1_DAT_8 IOMUX_PAD(0x4F0, 0x1A0, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA1__IPU_CSI1_D_8 IOMUX_PAD(0x4F0, 0x1A0, 4, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA1__SRC_BT_CFG2_4 IOMUX_PAD(0x4F0, 0x1A0, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA2__EMI_NAND_WEIM_DA_2 IOMUX_PAD(0x4F4, 0x1A4, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA2__GPIO3_2 IOMUX_PAD(0x4F4, 0x1A4, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA2__IPU_DISP1_DAT_7 IOMUX_PAD(0x4F4, 0x1A4, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA2__IPU_CSI1_D_7 IOMUX_PAD(0x4F4, 0x1A4, 4, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA2__SRC_BT_CFG2_3 IOMUX_PAD(0x4F4, 0x1A4, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA3__EMI_NAND_WEIM_DA_3 IOMUX_PAD(0x4F8, 0x1A8, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA3__GPIO3_3 IOMUX_PAD(0x4F8, 0x1A8, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA3__IPU_DISP1_DAT_6 IOMUX_PAD(0x4F8, 0x1A8, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA3__IPU_CSI1_D_6 IOMUX_PAD(0x4F8, 0x1A8, 4, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA3__SRC_BT_CFG2_2 IOMUX_PAD(0x4F8, 0x1A8, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA4__EMI_NAND_WEIM_DA_4 IOMUX_PAD(0x4FC, 0x1AC, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA4__GPIO3_4 IOMUX_PAD(0x4FC, 0x1AC, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA4__IPU_DISP1_DAT_5 IOMUX_PAD(0x4FC, 0x1AC, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA4__IPU_CSI1_D_5 IOMUX_PAD(0x4FC, 0x1AC, 4, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA4__SRC_BT_CFG3_7 IOMUX_PAD(0x4FC, 0x1AC, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA5__EMI_NAND_WEIM_DA_5 IOMUX_PAD(0x500, 0x1B0, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA5__GPIO3_5 IOMUX_PAD(0x500, 0x1B0, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA5__IPU_DISP1_DAT_4 IOMUX_PAD(0x500, 0x1B0, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA5__IPU_CSI1_D_4 IOMUX_PAD(0x500, 0x1B0, 4, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA5__SRC_BT_CFG3_6 IOMUX_PAD(0x500, 0x1B0, 17, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA6__EMI_NAND_WEIM_DA_6 IOMUX_PAD(0x504, 0x1B4, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA6__GPIO3_6 IOMUX_PAD(0x504, 0x1B4, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA6__IPU_DISP1_DAT_3 IOMUX_PAD(0x504, 0x1B4, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA6__IPU_CSI1_D_3 IOMUX_PAD(0x504, 0x1B4, 4, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA6__SRC_BT_CFG3_5 IOMUX_PAD(0x504, 0x1B4, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA7__EMI_NAND_WEIM_DA_7 IOMUX_PAD(0x508, 0x1B8, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA7__GPIO3_7 IOMUX_PAD(0x508, 0x1B8, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA7__IPU_DISP1_DAT_2 IOMUX_PAD(0x508, 0x1B8, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA7__IPU_CSI1_D_2 IOMUX_PAD(0x508, 0x1B8, 4, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA7__SRC_BT_CFG3_4 IOMUX_PAD(0x508, 0x1B8, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA8__EMI_NAND_WEIM_DA_8 IOMUX_PAD(0x50C, 0x1BC, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA8__GPIO3_8 IOMUX_PAD(0x50C, 0x1BC, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA8__IPU_DISP1_DAT_1 IOMUX_PAD(0x50C, 0x1BC, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA8__IPU_CSI1_D_1 IOMUX_PAD(0x50C, 0x1BC, 4, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA8__SRC_BT_CFG3_3 IOMUX_PAD(0x50C, 0x1BC, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA9__EMI_NAND_WEIM_DA_9 IOMUX_PAD(0x510, 0x1C0, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA9__GPIO3_9 IOMUX_PAD(0x510, 0x1C0, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA9__IPU_DISP1_DAT_0 IOMUX_PAD(0x510, 0x1C0, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA9__IPU_CSI1_D_0 IOMUX_PAD(0x510, 0x1C0, 4, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA9__SRC_BT_CFG3_2 IOMUX_PAD(0x510, 0x1C0, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA10__EMI_NAND_WEIM_DA_10 IOMUX_PAD(0x514, 0x1C4, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA10__GPIO3_10 IOMUX_PAD(0x514, 0x1C4, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA10__IPU_DI1_PIN15 IOMUX_PAD(0x514, 0x1C4, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA10__IPU_CSI1_DATA_EN IOMUX_PAD(0x514, 0x1C4, 4, 0x834, 1, 0)
+#define _MX53_PAD_EIM_DA10__SRC_BT_CFG3_1 IOMUX_PAD(0x514, 0x1C4, 7, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA11__EMI_NAND_WEIM_DA_11 IOMUX_PAD(0x518, 0x1C8, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA11__GPIO3_11 IOMUX_PAD(0x518, 0x1C8, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA11__IPU_DI1_PIN2 IOMUX_PAD(0x518, 0x1C8, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA11__IPU_CSI1_HSYNC IOMUX_PAD(0x518, 0x1C8, 4, 0x838, 1, 0)
+#define _MX53_PAD_EIM_DA12__EMI_NAND_WEIM_DA_12 IOMUX_PAD(0x51C, 0x1CC, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA12__GPIO3_12 IOMUX_PAD(0x51C, 0x1CC, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA12__IPU_DI1_PIN3 IOMUX_PAD(0x51C, 0x1CC, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA12__IPU_CSI1_VSYNC IOMUX_PAD(0x51C, 0x1CC, 4, 0x83C, 1, 0)
+#define _MX53_PAD_EIM_DA13__EMI_NAND_WEIM_DA_13 IOMUX_PAD(0x520, 0x1D0, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA13__GPIO3_13 IOMUX_PAD(0x520, 0x1D0, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA13__IPU_DI1_D0_CS IOMUX_PAD(0x520, 0x1D0, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA13__CCM_DI1_EXT_CLK IOMUX_PAD(0x520, 0x1D0, 4, 0x76C, 1, 0)
+#define _MX53_PAD_EIM_DA14__EMI_NAND_WEIM_DA_14 IOMUX_PAD(0x524, 0x1D4, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA14__GPIO3_14 IOMUX_PAD(0x524, 0x1D4, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA14__IPU_DI1_D1_CS IOMUX_PAD(0x524, 0x1D4, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA14__CCM_DI0_EXT_CLK IOMUX_PAD(0x524, 0x1D4, 4, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA15__EMI_NAND_WEIM_DA_15 IOMUX_PAD(0x528, 0x1D8, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA15__GPIO3_15 IOMUX_PAD(0x528, 0x1D8, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA15__IPU_DI1_PIN1 IOMUX_PAD(0x528, 0x1D8, 3, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA15__IPU_DI1_PIN4 IOMUX_PAD(0x528, 0x1D8, 4, 0x0, 0, 0)
+#define _MX53_PAD_NANDF_WE_B__EMI_NANDF_WE_B IOMUX_PAD(0x52C, 0x1DC, 0, 0x0, 0, 0)
+#define _MX53_PAD_NANDF_WE_B__GPIO6_12 IOMUX_PAD(0x52C, 0x1DC, 1, 0x0, 0, 0)
+#define _MX53_PAD_NANDF_RE_B__EMI_NANDF_RE_B IOMUX_PAD(0x530, 0x1E0, 0, 0x0, 0, 0)
+#define _MX53_PAD_NANDF_RE_B__GPIO6_13 IOMUX_PAD(0x530, 0x1E0, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_WAIT__EMI_WEIM_WAIT IOMUX_PAD(0x534, 0x1E4, 0, 0x0, 0, 0)
+#define _MX53_PAD_EIM_WAIT__GPIO5_0 IOMUX_PAD(0x534, 0x1E4, 1, 0x0, 0, 0)
+#define _MX53_PAD_EIM_WAIT__EMI_WEIM_DTACK_B IOMUX_PAD(0x534, 0x1E4, 2, 0x0, 0, 0)
+#define _MX53_PAD_LVDS1_TX3_P__GPIO6_22 IOMUX_PAD(NON_PAD_I, 0x1EC, 0, 0x0, 0, 0)
+#define _MX53_PAD_LVDS1_TX3_P__LDB_LVDS1_TX3 IOMUX_PAD(NON_PAD_I, 0x1EC, 1, 0x0, 0, 0)
+#define _MX53_PAD_LVDS1_TX2_P__GPIO6_24 IOMUX_PAD(NON_PAD_I, 0x1F0, 0, 0x0, 0, 0)
+#define _MX53_PAD_LVDS1_TX2_P__LDB_LVDS1_TX2 IOMUX_PAD(NON_PAD_I, 0x1F0, 1, 0x0, 0, 0)
+#define _MX53_PAD_LVDS1_CLK_P__GPIO6_26 IOMUX_PAD(NON_PAD_I, 0x1F4, 0, 0x0, 0, 0)
+#define _MX53_PAD_LVDS1_CLK_P__LDB_LVDS1_CLK IOMUX_PAD(NON_PAD_I, 0x1F4, 1, 0x0, 0, 0)
+#define _MX53_PAD_LVDS1_TX1_P__GPIO6_28 IOMUX_PAD(NON_PAD_I, 0x1F8, 0, 0x0, 0, 0)
+#define _MX53_PAD_LVDS1_TX1_P__LDB_LVDS1_TX1 IOMUX_PAD(NON_PAD_I, 0x1F8, 1, 0x0, 0, 0)
+#define _MX53_PAD_LVDS1_TX0_P__GPIO6_30 IOMUX_PAD(NON_PAD_I, 0x1FC, 0, 0x0, 0, 0)
+#define _MX53_PAD_LVDS1_TX0_P__LDB_LVDS1_TX0 IOMUX_PAD(NON_PAD_I, 0x1FC, 1, 0x0, 0, 0)
+#define _MX53_PAD_LVDS0_TX3_P__GPIO7_22 IOMUX_PAD(NON_PAD_I, 0x200, 0, 0x0, 0, 0)
+#define _MX53_PAD_LVDS0_TX3_P__LDB_LVDS0_TX3 IOMUX_PAD(NON_PAD_I, 0x200, 1, 0x0, 0, 0)
+#define _MX53_PAD_LVDS0_CLK_P__GPIO7_24 IOMUX_PAD(NON_PAD_I, 0x204, 0, 0x0, 0, 0)
+#define _MX53_PAD_LVDS0_CLK_P__LDB_LVDS0_CLK IOMUX_PAD(NON_PAD_I, 0x204, 1, 0x0, 0, 0)
+#define _MX53_PAD_LVDS0_TX2_P__GPIO7_26 IOMUX_PAD(NON_PAD_I, 0x208, 0, 0x0, 0, 0)
+#define _MX53_PAD_LVDS0_TX2_P__LDB_LVDS0_TX2 IOMUX_PAD(NON_PAD_I, 0x208, 1, 0x0, 0, 0)
+#define _MX53_PAD_LVDS0_TX1_P__GPIO7_28 IOMUX_PAD(NON_PAD_I, 0x20C, 0, 0x0, 0, 0)
+#define _MX53_PAD_LVDS0_TX1_P__LDB_LVDS0_TX1 IOMUX_PAD(NON_PAD_I, 0x20C, 1, 0x0, 0, 0)
+#define _MX53_PAD_LVDS0_TX0_P__GPIO7_30 IOMUX_PAD(NON_PAD_I, 0x210, 0, 0x0, 0, 0)
+#define _MX53_PAD_LVDS0_TX0_P__LDB_LVDS0_TX0 IOMUX_PAD(NON_PAD_I, 0x210, 1, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_10__GPIO4_0 IOMUX_PAD(0x540, 0x214, 0, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_10__OSC32k_32K_OUT IOMUX_PAD(0x540, 0x214, 1, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_11__GPIO4_1 IOMUX_PAD(0x544, 0x218, 0, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_12__GPIO4_2 IOMUX_PAD(0x548, 0x21C, 0, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_13__GPIO4_3 IOMUX_PAD(0x54C, 0x220, 0, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_14__GPIO4_4 IOMUX_PAD(0x550, 0x224, 0, 0x0, 0, 0)
+#define _MX53_PAD_NANDF_CLE__EMI_NANDF_CLE IOMUX_PAD(0x5A0, 0x228, 0, 0x0, 0, 0)
+#define _MX53_PAD_NANDF_CLE__GPIO6_7 IOMUX_PAD(0x5A0, 0x228, 1, 0x0, 0, 0)
+#define _MX53_PAD_NANDF_CLE__USBPHY1_VSTATUS_0 IOMUX_PAD(0x5A0, 0x228, 7, 0x0, 0, 0)
+#define _MX53_PAD_NANDF_ALE__EMI_NANDF_ALE IOMUX_PAD(0x5A4, 0x22C, 0, 0x0, 0, 0)
+#define _MX53_PAD_NANDF_ALE__GPIO6_8 IOMUX_PAD(0x5A4, 0x22C, 1, 0x0, 0, 0)
+#define _MX53_PAD_NANDF_ALE__USBPHY1_VSTATUS_1 IOMUX_PAD(0x5A4, 0x22C, 7, 0x0, 0, 0)
+#define _MX53_PAD_NANDF_WP_B__EMI_NANDF_WP_B IOMUX_PAD(0x5A8, 0x230, 0, 0x0, 0, 0)
+#define _MX53_PAD_NANDF_WP_B__GPIO6_9 IOMUX_PAD(0x5A8, 0x230, 1, 0x0, 0, 0)
+#define _MX53_PAD_NANDF_WP_B__USBPHY1_VSTATUS_2 IOMUX_PAD(0x5A8, 0x230, 7, 0x0, 0, 0)
+#define _MX53_PAD_NANDF_RB0__EMI_NANDF_RB_0 IOMUX_PAD(0x5AC, 0x234, 0, 0x0, 0, 0)
+#define _MX53_PAD_NANDF_RB0__GPIO6_10 IOMUX_PAD(0x5AC, 0x234, 1, 0x0, 0, 0)
+#define _MX53_PAD_NANDF_RB0__USBPHY1_VSTATUS_3 IOMUX_PAD(0x5AC, 0x234, 7, 0x0, 0, 0)
+#define _MX53_PAD_NANDF_CS0__EMI_NANDF_CS_0 IOMUX_PAD(0x5B0, 0x238, 0, 0x0, 0, 0)
+#define _MX53_PAD_NANDF_CS0__GPIO6_11 IOMUX_PAD(0x5B0, 0x238, 1, 0x0, 0, 0)
+#define _MX53_PAD_NANDF_CS0__USBPHY1_VSTATUS_4 IOMUX_PAD(0x5B0, 0x238, 7, 0x0, 0, 0)
+#define _MX53_PAD_NANDF_CS1__EMI_NANDF_CS_1 IOMUX_PAD(0x5B4, 0x23C, 0, 0x0, 0, 0)
+#define _MX53_PAD_NANDF_CS1__GPIO6_14 IOMUX_PAD(0x5B4, 0x23C, 1, 0x0, 0, 0)
+#define _MX53_PAD_NANDF_CS1__MLB_MLBCLK IOMUX_PAD(0x5B4, 0x23C, 6, 0x858, 0, 0)
+#define _MX53_PAD_NANDF_CS1__USBPHY1_VSTATUS_5 IOMUX_PAD(0x5B4, 0x23C, 7, 0x0, 0, 0)
+#define _MX53_PAD_NANDF_CS2__EMI_NANDF_CS_2 IOMUX_PAD(0x5B8, 0x240, 0, 0x0, 0, 0)
+#define _MX53_PAD_NANDF_CS2__GPIO6_15 IOMUX_PAD(0x5B8, 0x240, 1, 0x0, 0, 0)
+#define _MX53_PAD_NANDF_CS2__IPU_SISG_0 IOMUX_PAD(0x5B8, 0x240, 2, 0x0, 0, 0)
+#define _MX53_PAD_NANDF_CS2__ESAI1_TX0 IOMUX_PAD(0x5B8, 0x240, 3, 0x7E4, 0, 0)
+#define _MX53_PAD_NANDF_CS2__EMI_WEIM_CRE IOMUX_PAD(0x5B8, 0x240, 4, 0x0, 0, 0)
+#define _MX53_PAD_NANDF_CS2__CCM_CSI0_MCLK IOMUX_PAD(0x5B8, 0x240, 5, 0x0, 0, 0)
+#define _MX53_PAD_NANDF_CS2__MLB_MLBSIG IOMUX_PAD(0x5B8, 0x240, 6, 0x860, 0, 0)
+#define _MX53_PAD_NANDF_CS2__USBPHY1_VSTATUS_6 IOMUX_PAD(0x5B8, 0x240, 7, 0x0, 0, 0)
+#define _MX53_PAD_NANDF_CS3__EMI_NANDF_CS_3 IOMUX_PAD(0x5BC, 0x244, 0, 0x0, 0, 0)
+#define _MX53_PAD_NANDF_CS3__GPIO6_16 IOMUX_PAD(0x5BC, 0x244, 1, 0x0, 0, 0)
+#define _MX53_PAD_NANDF_CS3__IPU_SISG_1 IOMUX_PAD(0x5BC, 0x244, 2, 0x0, 0, 0)
+#define _MX53_PAD_NANDF_CS3__ESAI1_TX1 IOMUX_PAD(0x5BC, 0x244, 3, 0x7E8, 0, 0)
+#define _MX53_PAD_NANDF_CS3__EMI_WEIM_A_26 IOMUX_PAD(0x5BC, 0x244, 4, 0x0, 0, 0)
+#define _MX53_PAD_NANDF_CS3__MLB_MLBDAT IOMUX_PAD(0x5BC, 0x244, 6, 0x85C, 0, 0)
+#define _MX53_PAD_NANDF_CS3__USBPHY1_VSTATUS_7 IOMUX_PAD(0x5BC, 0x244, 7, 0x0, 0, 0)
+#define _MX53_PAD_FEC_MDIO__FEC_MDIO IOMUX_PAD(0x5C4, 0x248, 0, 0x804, 1, 0)
+#define _MX53_PAD_FEC_MDIO__GPIO1_22 IOMUX_PAD(0x5C4, 0x248, 1, 0x0, 0, 0)
+#define _MX53_PAD_FEC_MDIO__ESAI1_SCKR IOMUX_PAD(0x5C4, 0x248, 2, 0x7DC, 0, 0)
+#define _MX53_PAD_FEC_MDIO__FEC_COL IOMUX_PAD(0x5C4, 0x248, 3, 0x800, 1, 0)
+#define _MX53_PAD_FEC_MDIO__RTC_CE_RTC_PS2 IOMUX_PAD(0x5C4, 0x248, 4, 0x0, 0, 0)
+#define _MX53_PAD_FEC_MDIO__SDMA_DEBUG_BUS_DEVICE_3 IOMUX_PAD(0x5C4, 0x248, 5, 0x0, 0, 0)
+#define _MX53_PAD_FEC_MDIO__EMI_EMI_DEBUG_49 IOMUX_PAD(0x5C4, 0x248, 6, 0x0, 0, 0)
+#define _MX53_PAD_FEC_REF_CLK__FEC_TX_CLK IOMUX_PAD(0x5C8, 0x24C, 0, 0x0, 0, 0)
+#define _MX53_PAD_FEC_REF_CLK__GPIO1_23 IOMUX_PAD(0x5C8, 0x24C, 1, 0x0, 0, 0)
+#define _MX53_PAD_FEC_REF_CLK__ESAI1_FSR IOMUX_PAD(0x5C8, 0x24C, 2, 0x7CC, 0, 0)
+#define _MX53_PAD_FEC_REF_CLK__SDMA_DEBUG_BUS_DEVICE_4 IOMUX_PAD(0x5C8, 0x24C, 5, 0x0, 0, 0)
+#define _MX53_PAD_FEC_REF_CLK__EMI_EMI_DEBUG_50 IOMUX_PAD(0x5C8, 0x24C, 6, 0x0, 0, 0)
+#define _MX53_PAD_FEC_RX_ER__FEC_RX_ER IOMUX_PAD(0x5CC, 0x250, 0, 0x0, 0, 0)
+#define _MX53_PAD_FEC_RX_ER__GPIO1_24 IOMUX_PAD(0x5CC, 0x250, 1, 0x0, 0, 0)
+#define _MX53_PAD_FEC_RX_ER__ESAI1_HCKR IOMUX_PAD(0x5CC, 0x250, 2, 0x7D4, 0, 0)
+#define _MX53_PAD_FEC_RX_ER__FEC_RX_CLK IOMUX_PAD(0x5CC, 0x250, 3, 0x808, 1, 0)
+#define _MX53_PAD_FEC_RX_ER__RTC_CE_RTC_PS3 IOMUX_PAD(0x5CC, 0x250, 4, 0x0, 0, 0)
+#define _MX53_PAD_FEC_CRS_DV__FEC_RX_DV IOMUX_PAD(0x5D0, 0x254, 0, 0x0, 0, 0)
+#define _MX53_PAD_FEC_CRS_DV__GPIO1_25 IOMUX_PAD(0x5D0, 0x254, 1, 0x0, 0, 0)
+#define _MX53_PAD_FEC_CRS_DV__ESAI1_SCKT IOMUX_PAD(0x5D0, 0x254, 2, 0x7E0, 0, 0)
+#define _MX53_PAD_FEC_RXD1__FEC_RDATA_1 IOMUX_PAD(0x5D4, 0x258, 0, 0x0, 0, 0)
+#define _MX53_PAD_FEC_RXD1__GPIO1_26 IOMUX_PAD(0x5D4, 0x258, 1, 0x0, 0, 0)
+#define _MX53_PAD_FEC_RXD1__ESAI1_FST IOMUX_PAD(0x5D4, 0x258, 2, 0x7D0, 0, 0)
+#define _MX53_PAD_FEC_RXD1__MLB_MLBSIG IOMUX_PAD(0x5D4, 0x258, 3, 0x860, 1, 0)
+#define _MX53_PAD_FEC_RXD1__RTC_CE_RTC_PS1 IOMUX_PAD(0x5D4, 0x258, 4, 0x0, 0, 0)
+#define _MX53_PAD_FEC_RXD0__FEC_RDATA_0 IOMUX_PAD(0x5D8, 0x25C, 0, 0x0, 0, 0)
+#define _MX53_PAD_FEC_RXD0__GPIO1_27 IOMUX_PAD(0x5D8, 0x25C, 1, 0x0, 0, 0)
+#define _MX53_PAD_FEC_RXD0__ESAI1_HCKT IOMUX_PAD(0x5D8, 0x25C, 2, 0x7D8, 0, 0)
+#define _MX53_PAD_FEC_RXD0__OSC32k_32K_OUT IOMUX_PAD(0x5D8, 0x25C, 3, 0x0, 0, 0)
+#define _MX53_PAD_FEC_TX_EN__FEC_TX_EN IOMUX_PAD(0x5DC, 0x260, 0, 0x0, 0, 0)
+#define _MX53_PAD_FEC_TX_EN__GPIO1_28 IOMUX_PAD(0x5DC, 0x260, 1, 0x0, 0, 0)
+#define _MX53_PAD_FEC_TX_EN__ESAI1_TX3_RX2 IOMUX_PAD(0x5DC, 0x260, 2, 0x7F0, 0, 0)
+#define _MX53_PAD_FEC_TXD1__FEC_TDATA_1 IOMUX_PAD(0x5E0, 0x264, 0, 0x0, 0, 0)
+#define _MX53_PAD_FEC_TXD1__GPIO1_29 IOMUX_PAD(0x5E0, 0x264, 1, 0x0, 0, 0)
+#define _MX53_PAD_FEC_TXD1__ESAI1_TX2_RX3 IOMUX_PAD(0x5E0, 0x264, 2, 0x7EC, 0, 0)
+#define _MX53_PAD_FEC_TXD1__MLB_MLBCLK IOMUX_PAD(0x5E0, 0x264, 3, 0x858, 1, 0)
+#define _MX53_PAD_FEC_TXD1__RTC_CE_RTC_PRSC_CLK IOMUX_PAD(0x5E0, 0x264, 4, 0x0, 0, 0)
+#define _MX53_PAD_FEC_TXD0__FEC_TDATA_0 IOMUX_PAD(0x5E4, 0x268, 0, 0x0, 0, 0)
+#define _MX53_PAD_FEC_TXD0__GPIO1_30 IOMUX_PAD(0x5E4, 0x268, 1, 0x0, 0, 0)
+#define _MX53_PAD_FEC_TXD0__ESAI1_TX4_RX1 IOMUX_PAD(0x5E4, 0x268, 2, 0x7F4, 0, 0)
+#define _MX53_PAD_FEC_TXD0__USBPHY2_DATAOUT_0 IOMUX_PAD(0x5E4, 0x268, 7, 0x0, 0, 0)
+#define _MX53_PAD_FEC_MDC__FEC_MDC IOMUX_PAD(0x5E8, 0x26C, 0, 0x0, 0, 0)
+#define _MX53_PAD_FEC_MDC__GPIO1_31 IOMUX_PAD(0x5E8, 0x26C, 1, 0x0, 0, 0)
+#define _MX53_PAD_FEC_MDC__ESAI1_TX5_RX0 IOMUX_PAD(0x5E8, 0x26C, 2, 0x7F8, 0, 0)
+#define _MX53_PAD_FEC_MDC__MLB_MLBDAT IOMUX_PAD(0x5E8, 0x26C, 3, 0x85C, 1, 0)
+#define _MX53_PAD_FEC_MDC__RTC_CE_RTC_ALARM1_TRIG IOMUX_PAD(0x5E8, 0x26C, 4, 0x0, 0, 0)
+#define _MX53_PAD_FEC_MDC__USBPHY2_DATAOUT_1 IOMUX_PAD(0x5E8, 0x26C, 7, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DIOW__PATA_DIOW IOMUX_PAD(0x5F0, 0x270, 0, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DIOW__GPIO6_17 IOMUX_PAD(0x5F0, 0x270, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DIOW__UART1_TXD_MUX IOMUX_PAD(0x5F0, 0x270, 3, 0x878, 2, 0)
+#define _MX53_PAD_PATA_DIOW__USBPHY2_DATAOUT_2 IOMUX_PAD(0x5F0, 0x270, 7, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DMACK__PATA_DMACK IOMUX_PAD(0x5F4, 0x274, 0, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DMACK__GPIO6_18 IOMUX_PAD(0x5F4, 0x274, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DMACK__UART1_RXD_MUX IOMUX_PAD(0x5F4, 0x274, 3, 0x878, 3, 0)
+#define _MX53_PAD_PATA_DMACK__USBPHY2_DATAOUT_3 IOMUX_PAD(0x5F4, 0x274, 7, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DMARQ__PATA_DMARQ IOMUX_PAD(0x5F8, 0x278, 0, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DMARQ__GPIO7_0 IOMUX_PAD(0x5F8, 0x278, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DMARQ__UART2_TXD_MUX IOMUX_PAD(0x5F8, 0x278, 3, 0x880, 2, 0)
+#define _MX53_PAD_PATA_DMARQ__CCM_CCM_OUT_0 IOMUX_PAD(0x5F8, 0x278, 5, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DMARQ__USBPHY2_DATAOUT_4 IOMUX_PAD(0x5F8, 0x278, 7, 0x0, 0, 0)
+#define _MX53_PAD_PATA_BUFFER_EN__PATA_BUFFER_EN IOMUX_PAD(0x5FC, 0x27C, 0, 0x0, 0, 0)
+#define _MX53_PAD_PATA_BUFFER_EN__GPIO7_1 IOMUX_PAD(0x5FC, 0x27C, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_BUFFER_EN__UART2_RXD_MUX IOMUX_PAD(0x5FC, 0x27C, 3, 0x880, 3, 0)
+#define _MX53_PAD_PATA_BUFFER_EN__CCM_CCM_OUT_1 IOMUX_PAD(0x5FC, 0x27C, 5, 0x0, 0, 0)
+#define _MX53_PAD_PATA_BUFFER_EN__USBPHY2_DATAOUT_5 IOMUX_PAD(0x5FC, 0x27C, 7, 0x0, 0, 0)
+#define _MX53_PAD_PATA_INTRQ__PATA_INTRQ IOMUX_PAD(0x600, 0x280, 0, 0x0, 0, 0)
+#define _MX53_PAD_PATA_INTRQ__GPIO7_2 IOMUX_PAD(0x600, 0x280, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_INTRQ__UART2_CTS IOMUX_PAD(0x600, 0x280, 3, 0x87C, 2, 0)
+#define _MX53_PAD_PATA_INTRQ__CAN1_TXCAN IOMUX_PAD(0x600, 0x280, 4, 0x0, 0, 0)
+#define _MX53_PAD_PATA_INTRQ__CCM_CCM_OUT_2 IOMUX_PAD(0x600, 0x280, 5, 0x0, 0, 0)
+#define _MX53_PAD_PATA_INTRQ__USBPHY2_DATAOUT_6 IOMUX_PAD(0x600, 0x280, 7, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DIOR__PATA_DIOR IOMUX_PAD(0x604, 0x284, 0, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DIOR__GPIO7_3 IOMUX_PAD(0x604, 0x284, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DIOR__UART2_RTS IOMUX_PAD(0x604, 0x284, 3, 0x87C, 3, 0)
+#define _MX53_PAD_PATA_DIOR__CAN1_RXCAN IOMUX_PAD(0x604, 0x284, 4, 0x760, 1, 0)
+#define _MX53_PAD_PATA_DIOR__USBPHY2_DATAOUT_7 IOMUX_PAD(0x604, 0x284, 7, 0x0, 0, 0)
+#define _MX53_PAD_PATA_RESET_B__PATA_PATA_RESET_B IOMUX_PAD(0x608, 0x288, 0, 0x0, 0, 0)
+#define _MX53_PAD_PATA_RESET_B__GPIO7_4 IOMUX_PAD(0x608, 0x288, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_RESET_B__ESDHC3_CMD IOMUX_PAD(0x608, 0x288, 2, 0x0, 0, 0)
+#define _MX53_PAD_PATA_RESET_B__UART1_CTS IOMUX_PAD(0x608, 0x288, 3, 0x874, 2, 0)
+#define _MX53_PAD_PATA_RESET_B__CAN2_TXCAN IOMUX_PAD(0x608, 0x288, 4, 0x0, 0, 0)
+#define _MX53_PAD_PATA_RESET_B__USBPHY1_DATAOUT_0 IOMUX_PAD(0x608, 0x288, 7, 0x0, 0, 0)
+#define _MX53_PAD_PATA_IORDY__PATA_IORDY IOMUX_PAD(0x60C, 0x28C, 0, 0x0, 0, 0)
+#define _MX53_PAD_PATA_IORDY__GPIO7_5 IOMUX_PAD(0x60C, 0x28C, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_IORDY__ESDHC3_CLK IOMUX_PAD(0x60C, 0x28C, 2, 0x0, 0, 0)
+#define _MX53_PAD_PATA_IORDY__UART1_RTS IOMUX_PAD(0x60C, 0x28C, 3, 0x874, 3, 0)
+#define _MX53_PAD_PATA_IORDY__CAN2_RXCAN IOMUX_PAD(0x60C, 0x28C, 4, 0x764, 1, 0)
+#define _MX53_PAD_PATA_IORDY__USBPHY1_DATAOUT_1 IOMUX_PAD(0x60C, 0x28C, 7, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DA_0__PATA_DA_0 IOMUX_PAD(0x610, 0x290, 0, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DA_0__GPIO7_6 IOMUX_PAD(0x610, 0x290, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DA_0__ESDHC3_RST IOMUX_PAD(0x610, 0x290, 2, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DA_0__OWIRE_LINE IOMUX_PAD(0x610, 0x290, 4, 0x864, 0, 0)
+#define _MX53_PAD_PATA_DA_0__USBPHY1_DATAOUT_2 IOMUX_PAD(0x610, 0x290, 7, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DA_1__PATA_DA_1 IOMUX_PAD(0x614, 0x294, 0, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DA_1__GPIO7_7 IOMUX_PAD(0x614, 0x294, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DA_1__ESDHC4_CMD IOMUX_PAD(0x614, 0x294, 2, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DA_1__UART3_CTS IOMUX_PAD(0x614, 0x294, 4, 0x884, 4, 0)
+#define _MX53_PAD_PATA_DA_1__USBPHY1_DATAOUT_3 IOMUX_PAD(0x614, 0x294, 7, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DA_2__PATA_DA_2 IOMUX_PAD(0x618, 0x298, 0, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DA_2__GPIO7_8 IOMUX_PAD(0x618, 0x298, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DA_2__ESDHC4_CLK IOMUX_PAD(0x618, 0x298, 2, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DA_2__UART3_RTS IOMUX_PAD(0x618, 0x298, 4, 0x884, 5, 0)
+#define _MX53_PAD_PATA_DA_2__USBPHY1_DATAOUT_4 IOMUX_PAD(0x618, 0x298, 7, 0x0, 0, 0)
+#define _MX53_PAD_PATA_CS_0__PATA_CS_0 IOMUX_PAD(0x61C, 0x29C, 0, 0x0, 0, 0)
+#define _MX53_PAD_PATA_CS_0__GPIO7_9 IOMUX_PAD(0x61C, 0x29C, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_CS_0__UART3_TXD_MUX IOMUX_PAD(0x61C, 0x29C, 4, 0x888, 2, 0)
+#define _MX53_PAD_PATA_CS_0__USBPHY1_DATAOUT_5 IOMUX_PAD(0x61C, 0x29C, 7, 0x0, 0, 0)
+#define _MX53_PAD_PATA_CS_1__PATA_CS_1 IOMUX_PAD(0x620, 0x2A0, 0, 0x0, 0, 0)
+#define _MX53_PAD_PATA_CS_1__GPIO7_10 IOMUX_PAD(0x620, 0x2A0, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_CS_1__UART3_RXD_MUX IOMUX_PAD(0x620, 0x2A0, 4, 0x888, 3, 0)
+#define _MX53_PAD_PATA_CS_1__USBPHY1_DATAOUT_6 IOMUX_PAD(0x620, 0x2A0, 7, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA0__PATA_DATA_0 IOMUX_PAD(0x628, 0x2A4, 0, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA0__GPIO2_0 IOMUX_PAD(0x628, 0x2A4, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA0__EMI_NANDF_D_0 IOMUX_PAD(0x628, 0x2A4, 3, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA0__ESDHC3_DAT4 IOMUX_PAD(0x628, 0x2A4, 4, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA0__GPU3d_GPU_DEBUG_OUT_0 IOMUX_PAD(0x628, 0x2A4, 5, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA0__IPU_DIAG_BUS_0 IOMUX_PAD(0x628, 0x2A4, 6, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA0__USBPHY1_DATAOUT_7 IOMUX_PAD(0x628, 0x2A4, 7, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA1__PATA_DATA_1 IOMUX_PAD(0x62C, 0x2A8, 0, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA1__GPIO2_1 IOMUX_PAD(0x62C, 0x2A8, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA1__EMI_NANDF_D_1 IOMUX_PAD(0x62C, 0x2A8, 3, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA1__ESDHC3_DAT5 IOMUX_PAD(0x62C, 0x2A8, 4, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA1__GPU3d_GPU_DEBUG_OUT_1 IOMUX_PAD(0x62C, 0x2A8, 5, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA1__IPU_DIAG_BUS_1 IOMUX_PAD(0x62C, 0x2A8, 6, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA2__PATA_DATA_2 IOMUX_PAD(0x630, 0x2AC, 0, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA2__GPIO2_2 IOMUX_PAD(0x630, 0x2AC, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA2__EMI_NANDF_D_2 IOMUX_PAD(0x630, 0x2AC, 3, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA2__ESDHC3_DAT6 IOMUX_PAD(0x630, 0x2AC, 4, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA2__GPU3d_GPU_DEBUG_OUT_2 IOMUX_PAD(0x630, 0x2AC, 5, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA2__IPU_DIAG_BUS_2 IOMUX_PAD(0x630, 0x2AC, 6, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA3__PATA_DATA_3 IOMUX_PAD(0x634, 0x2B0, 0, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA3__GPIO2_3 IOMUX_PAD(0x634, 0x2B0, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA3__EMI_NANDF_D_3 IOMUX_PAD(0x634, 0x2B0, 3, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA3__ESDHC3_DAT7 IOMUX_PAD(0x634, 0x2B0, 4, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA3__GPU3d_GPU_DEBUG_OUT_3 IOMUX_PAD(0x634, 0x2B0, 5, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA3__IPU_DIAG_BUS_3 IOMUX_PAD(0x634, 0x2B0, 6, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA4__PATA_DATA_4 IOMUX_PAD(0x638, 0x2B4, 0, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA4__GPIO2_4 IOMUX_PAD(0x638, 0x2B4, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA4__EMI_NANDF_D_4 IOMUX_PAD(0x638, 0x2B4, 3, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA4__ESDHC4_DAT4 IOMUX_PAD(0x638, 0x2B4, 4, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA4__GPU3d_GPU_DEBUG_OUT_4 IOMUX_PAD(0x638, 0x2B4, 5, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA4__IPU_DIAG_BUS_4 IOMUX_PAD(0x638, 0x2B4, 6, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA5__PATA_DATA_5 IOMUX_PAD(0x63C, 0x2B8, 0, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA5__GPIO2_5 IOMUX_PAD(0x63C, 0x2B8, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA5__EMI_NANDF_D_5 IOMUX_PAD(0x63C, 0x2B8, 3, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA5__ESDHC4_DAT5 IOMUX_PAD(0x63C, 0x2B8, 4, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA5__GPU3d_GPU_DEBUG_OUT_5 IOMUX_PAD(0x63C, 0x2B8, 5, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA5__IPU_DIAG_BUS_5 IOMUX_PAD(0x63C, 0x2B8, 6, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA6__PATA_DATA_6 IOMUX_PAD(0x640, 0x2BC, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA6__GPIO2_6 IOMUX_PAD(0x640, 0x2BC, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA6__EMI_NANDF_D_6 IOMUX_PAD(0x640, 0x2BC, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA6__ESDHC4_DAT6 IOMUX_PAD(0x640, 0x2BC, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA6__GPU3d_GPU_DEBUG_OUT_6 IOMUX_PAD(0x640, 0x2BC, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA6__IPU_DIAG_BUS_6 IOMUX_PAD(0x640, 0x2BC, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA7__PATA_DATA_7 IOMUX_PAD(0x644, 0x2C0, 0, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA7__GPIO2_7 IOMUX_PAD(0x644, 0x2C0, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA7__EMI_NANDF_D_7 IOMUX_PAD(0x644, 0x2C0, 3, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA7__ESDHC4_DAT7 IOMUX_PAD(0x644, 0x2C0, 4, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA7__GPU3d_GPU_DEBUG_OUT_7 IOMUX_PAD(0x644, 0x2C0, 5, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA7__IPU_DIAG_BUS_7 IOMUX_PAD(0x644, 0x2C0, 6, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA8__PATA_DATA_8 IOMUX_PAD(0x648, 0x2C4, 0, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA8__GPIO2_8 IOMUX_PAD(0x648, 0x2C4, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA8__ESDHC1_DAT4 IOMUX_PAD(0x648, 0x2C4, 2, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA8__EMI_NANDF_D_8 IOMUX_PAD(0x648, 0x2C4, 3, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA8__ESDHC3_DAT0 IOMUX_PAD(0x648, 0x2C4, 4, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA8__GPU3d_GPU_DEBUG_OUT_8 IOMUX_PAD(0x648, 0x2C4, 5, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA8__IPU_DIAG_BUS_8 IOMUX_PAD(0x648, 0x2C4, 6, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA9__PATA_DATA_9 IOMUX_PAD(0x64C, 0x2C8, 0, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA9__GPIO2_9 IOMUX_PAD(0x64C, 0x2C8, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA9__ESDHC1_DAT5 IOMUX_PAD(0x64C, 0x2C8, 2, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA9__EMI_NANDF_D_9 IOMUX_PAD(0x64C, 0x2C8, 3, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA9__ESDHC3_DAT1 IOMUX_PAD(0x64C, 0x2C8, 4, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA9__GPU3d_GPU_DEBUG_OUT_9 IOMUX_PAD(0x64C, 0x2C8, 5, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA9__IPU_DIAG_BUS_9 IOMUX_PAD(0x64C, 0x2C8, 6, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA10__PATA_DATA_10 IOMUX_PAD(0x650, 0x2CC, 0, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA10__GPIO2_10 IOMUX_PAD(0x650, 0x2CC, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA10__ESDHC1_DAT6 IOMUX_PAD(0x650, 0x2CC, 2, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA10__EMI_NANDF_D_10 IOMUX_PAD(0x650, 0x2CC, 3, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA10__ESDHC3_DAT2 IOMUX_PAD(0x650, 0x2CC, 4, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA10__GPU3d_GPU_DEBUG_OUT_10 IOMUX_PAD(0x650, 0x2CC, 5, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA10__IPU_DIAG_BUS_10 IOMUX_PAD(0x650, 0x2CC, 6, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA11__PATA_DATA_11 IOMUX_PAD(0x654, 0x2D0, 0, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA11__GPIO2_11 IOMUX_PAD(0x654, 0x2D0, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA11__ESDHC1_DAT7 IOMUX_PAD(0x654, 0x2D0, 2, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA11__EMI_NANDF_D_11 IOMUX_PAD(0x654, 0x2D0, 3, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA11__ESDHC3_DAT3 IOMUX_PAD(0x654, 0x2D0, 4, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA11__GPU3d_GPU_DEBUG_OUT_11 IOMUX_PAD(0x654, 0x2D0, 5, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA11__IPU_DIAG_BUS_11 IOMUX_PAD(0x654, 0x2D0, 6, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA12__PATA_DATA_12 IOMUX_PAD(0x658, 0x2D4, 0, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA12__GPIO2_12 IOMUX_PAD(0x658, 0x2D4, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA12__ESDHC2_DAT4 IOMUX_PAD(0x658, 0x2D4, 2, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA12__EMI_NANDF_D_12 IOMUX_PAD(0x658, 0x2D4, 3, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA12__ESDHC4_DAT0 IOMUX_PAD(0x658, 0x2D4, 4, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA12__GPU3d_GPU_DEBUG_OUT_12 IOMUX_PAD(0x658, 0x2D4, 5, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA12__IPU_DIAG_BUS_12 IOMUX_PAD(0x658, 0x2D4, 6, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA13__PATA_DATA_13 IOMUX_PAD(0x65C, 0x2D8, 0, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA13__GPIO2_13 IOMUX_PAD(0x65C, 0x2D8, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA13__ESDHC2_DAT5 IOMUX_PAD(0x65C, 0x2D8, 2, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA13__EMI_NANDF_D_13 IOMUX_PAD(0x65C, 0x2D8, 3, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA13__ESDHC4_DAT1 IOMUX_PAD(0x65C, 0x2D8, 4, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA13__GPU3d_GPU_DEBUG_OUT_13 IOMUX_PAD(0x65C, 0x2D8, 5, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA13__IPU_DIAG_BUS_13 IOMUX_PAD(0x65C, 0x2D8, 6, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA14__PATA_DATA_14 IOMUX_PAD(0x660, 0x2DC, 0, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA14__GPIO2_14 IOMUX_PAD(0x660, 0x2DC, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA14__ESDHC2_DAT6 IOMUX_PAD(0x660, 0x2DC, 2, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA14__EMI_NANDF_D_14 IOMUX_PAD(0x660, 0x2DC, 3, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA14__ESDHC4_DAT2 IOMUX_PAD(0x660, 0x2DC, 4, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA14__GPU3d_GPU_DEBUG_OUT_14 IOMUX_PAD(0x660, 0x2DC, 5, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA14__IPU_DIAG_BUS_14 IOMUX_PAD(0x660, 0x2DC, 6, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA15__PATA_DATA_15 IOMUX_PAD(0x664, 0x2E0, 0, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA15__GPIO2_15 IOMUX_PAD(0x664, 0x2E0, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA15__ESDHC2_DAT7 IOMUX_PAD(0x664, 0x2E0, 2, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA15__EMI_NANDF_D_15 IOMUX_PAD(0x664, 0x2E0, 3, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA15__ESDHC4_DAT3 IOMUX_PAD(0x664, 0x2E0, 4, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA15__GPU3d_GPU_DEBUG_OUT_15 IOMUX_PAD(0x664, 0x2E0, 5, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA15__IPU_DIAG_BUS_15 IOMUX_PAD(0x664, 0x2E0, 6, 0x0, 0, 0)
+#define _MX53_PAD_SD1_DATA0__ESDHC1_DAT0 IOMUX_PAD(0x66C, 0x2E4, 0, 0x0, 0, 0)
+#define _MX53_PAD_SD1_DATA0__GPIO1_16 IOMUX_PAD(0x66C, 0x2E4, 1, 0x0, 0, 0)
+#define _MX53_PAD_SD1_DATA0__GPT_CAPIN1 IOMUX_PAD(0x66C, 0x2E4, 3, 0x0, 0, 0)
+#define _MX53_PAD_SD1_DATA0__CSPI_MISO IOMUX_PAD(0x66C, 0x2E4, 5, 0x784, 2, 0)
+#define _MX53_PAD_SD1_DATA0__CCM_PLL3_BYP IOMUX_PAD(0x66C, 0x2E4, 7, 0x778, 0, 0)
+#define _MX53_PAD_SD1_DATA1__ESDHC1_DAT1 IOMUX_PAD(0x670, 0x2E8, 0, 0x0, 0, 0)
+#define _MX53_PAD_SD1_DATA1__GPIO1_17 IOMUX_PAD(0x670, 0x2E8, 1, 0x0, 0, 0)
+#define _MX53_PAD_SD1_DATA1__GPT_CAPIN2 IOMUX_PAD(0x670, 0x2E8, 3, 0x0, 0, 0)
+#define _MX53_PAD_SD1_DATA1__CSPI_SS0 IOMUX_PAD(0x670, 0x2E8, 5, 0x78C, 3, 0)
+#define _MX53_PAD_SD1_DATA1__CCM_PLL4_BYP IOMUX_PAD(0x670, 0x2E8, 7, 0x77C, 1, 0)
+#define _MX53_PAD_SD1_CMD__ESDHC1_CMD IOMUX_PAD(0x674, 0x2EC, IOMUX_CONFIG_SION, 0x0, 0, 0)
+#define _MX53_PAD_SD1_CMD__GPIO1_18 IOMUX_PAD(0x674, 0x2EC, 1, 0x0, 0, 0)
+#define _MX53_PAD_SD1_CMD__GPT_CMPOUT1 IOMUX_PAD(0x674, 0x2EC, 3, 0x0, 0, 0)
+#define _MX53_PAD_SD1_CMD__CSPI_MOSI IOMUX_PAD(0x674, 0x2EC, 5, 0x788, 2, 0)
+#define _MX53_PAD_SD1_CMD__CCM_PLL1_BYP IOMUX_PAD(0x674, 0x2EC, 7, 0x770, 0, 0)
+#define _MX53_PAD_SD1_DATA2__ESDHC1_DAT2 IOMUX_PAD(0x678, 0x2F0, 0, 0x0, 0, 0)
+#define _MX53_PAD_SD1_DATA2__GPIO1_19 IOMUX_PAD(0x678, 0x2F0, 1, 0x0, 0, 0)
+#define _MX53_PAD_SD1_DATA2__GPT_CMPOUT2 IOMUX_PAD(0x678, 0x2F0, 2, 0x0, 0, 0)
+#define _MX53_PAD_SD1_DATA2__PWM2_PWMO IOMUX_PAD(0x678, 0x2F0, 3, 0x0, 0, 0)
+#define _MX53_PAD_SD1_DATA2__WDOG1_WDOG_B IOMUX_PAD(0x678, 0x2F0, 4, 0x0, 0, 0)
+#define _MX53_PAD_SD1_DATA2__CSPI_SS1 IOMUX_PAD(0x678, 0x2F0, 5, 0x790, 2, 0)
+#define _MX53_PAD_SD1_DATA2__WDOG1_WDOG_RST_B_DEB IOMUX_PAD(0x678, 0x2F0, 6, 0x0, 0, 0)
+#define _MX53_PAD_SD1_DATA2__CCM_PLL2_BYP IOMUX_PAD(0x678, 0x2F0, 7, 0x774, 0, 0)
+#define _MX53_PAD_SD1_CLK__ESDHC1_CLK IOMUX_PAD(0x67C, 0x2F4, 0, 0x0, 0, 0)
+#define _MX53_PAD_SD1_CLK__GPIO1_20 IOMUX_PAD(0x67C, 0x2F4, 1, 0x0, 0, 0)
+#define _MX53_PAD_SD1_CLK__OSC32k_32K_OUT IOMUX_PAD(0x67C, 0x2F4, 2, 0x0, 0, 0)
+#define _MX53_PAD_SD1_CLK__GPT_CLKIN IOMUX_PAD(0x67C, 0x2F4, 3, 0x0, 0, 0)
+#define _MX53_PAD_SD1_CLK__CSPI_SCLK IOMUX_PAD(0x67C, 0x2F4, 5, 0x780, 2, 0)
+#define _MX53_PAD_SD1_CLK__SATA_PHY_DTB_0 IOMUX_PAD(0x67C, 0x2F4, 7, 0x0, 0, 0)
+#define _MX53_PAD_SD1_DATA3__ESDHC1_DAT3 IOMUX_PAD(0x680, 0x2F8, 0, 0x0, 0, 0)
+#define _MX53_PAD_SD1_DATA3__GPIO1_21 IOMUX_PAD(0x680, 0x2F8, 1, 0x0, 0, 0)
+#define _MX53_PAD_SD1_DATA3__GPT_CMPOUT3 IOMUX_PAD(0x680, 0x2F8, 2, 0x0, 0, 0)
+#define _MX53_PAD_SD1_DATA3__PWM1_PWMO IOMUX_PAD(0x680, 0x2F8, 3, 0x0, 0, 0)
+#define _MX53_PAD_SD1_DATA3__WDOG2_WDOG_B IOMUX_PAD(0x680, 0x2F8, 4, 0x0, 0, 0)
+#define _MX53_PAD_SD1_DATA3__CSPI_SS2 IOMUX_PAD(0x680, 0x2F8, 5, 0x794, 2, 0)
+#define _MX53_PAD_SD1_DATA3__WDOG2_WDOG_RST_B_DEB IOMUX_PAD(0x680, 0x2F8, 6, 0x0, 0, 0)
+#define _MX53_PAD_SD1_DATA3__SATA_PHY_DTB_1 IOMUX_PAD(0x680, 0x2F8, 7, 0x0, 0, 0)
+#define _MX53_PAD_SD2_CLK__ESDHC2_CLK IOMUX_PAD(0x688, 0x2FC, 0, 0x0, 0, 0)
+#define _MX53_PAD_SD2_CLK__GPIO1_10 IOMUX_PAD(0x688, 0x2FC, 1, 0x0, 0, 0)
+#define _MX53_PAD_SD2_CLK__KPP_COL_5 IOMUX_PAD(0x688, 0x2FC, 2, 0x840, 2, 0)
+#define _MX53_PAD_SD2_CLK__AUDMUX_AUD4_RXFS IOMUX_PAD(0x688, 0x2FC, 3, 0x73C, 1, 0)
+#define _MX53_PAD_SD2_CLK__CSPI_SCLK IOMUX_PAD(0x688, 0x2FC, 5, 0x780, 3, 0)
+#define _MX53_PAD_SD2_CLK__SCC_RANDOM_V IOMUX_PAD(0x688, 0x2FC, 7, 0x0, 0, 0)
+#define _MX53_PAD_SD2_CMD__ESDHC2_CMD IOMUX_PAD(0x68C, 0x300, 0, 0x0, 0, 0)
+#define _MX53_PAD_SD2_CMD__GPIO1_11 IOMUX_PAD(0x68C, 0x300, 1, 0x0, 0, 0)
+#define _MX53_PAD_SD2_CMD__KPP_ROW_5 IOMUX_PAD(0x68C, 0x300, 2, 0x84C, 1, 0)
+#define _MX53_PAD_SD2_CMD__AUDMUX_AUD4_RXC IOMUX_PAD(0x68C, 0x300, 3, 0x738, 1, 0)
+#define _MX53_PAD_SD2_CMD__CSPI_MOSI IOMUX_PAD(0x68C, 0x300, 5, 0x788, 3, 0)
+#define _MX53_PAD_SD2_CMD__SCC_RANDOM IOMUX_PAD(0x68C, 0x300, 7, 0x0, 0, 0)
+#define _MX53_PAD_SD2_DATA3__ESDHC2_DAT3 IOMUX_PAD(0x690, 0x304, 0, 0x0, 0, 0)
+#define _MX53_PAD_SD2_DATA3__GPIO1_12 IOMUX_PAD(0x690, 0x304, 1, 0x0, 0, 0)
+#define _MX53_PAD_SD2_DATA3__KPP_COL_6 IOMUX_PAD(0x690, 0x304, 2, 0x844, 1, 0)
+#define _MX53_PAD_SD2_DATA3__AUDMUX_AUD4_TXC IOMUX_PAD(0x690, 0x304, 3, 0x740, 1, 0)
+#define _MX53_PAD_SD2_DATA3__CSPI_SS2 IOMUX_PAD(0x690, 0x304, 5, 0x794, 3, 0)
+#define _MX53_PAD_SD2_DATA3__SJC_DONE IOMUX_PAD(0x690, 0x304, 7, 0x0, 0, 0)
+#define _MX53_PAD_SD2_DATA2__ESDHC2_DAT2 IOMUX_PAD(0x694, 0x308, 0, 0x0, 0, 0)
+#define _MX53_PAD_SD2_DATA2__GPIO1_13 IOMUX_PAD(0x694, 0x308, 1, 0x0, 0, 0)
+#define _MX53_PAD_SD2_DATA2__KPP_ROW_6 IOMUX_PAD(0x694, 0x308, 2, 0x850, 1, 0)
+#define _MX53_PAD_SD2_DATA2__AUDMUX_AUD4_TXD IOMUX_PAD(0x694, 0x308, 3, 0x734, 1, 0)
+#define _MX53_PAD_SD2_DATA2__CSPI_SS1 IOMUX_PAD(0x694, 0x308, 5, 0x790, 3, 0)
+#define _MX53_PAD_SD2_DATA2__SJC_FAIL IOMUX_PAD(0x694, 0x308, 7, 0x0, 0, 0)
+#define _MX53_PAD_SD2_DATA1__ESDHC2_DAT1 IOMUX_PAD(0x698, 0x30C, 0, 0x0, 0, 0)
+#define _MX53_PAD_SD2_DATA1__GPIO1_14 IOMUX_PAD(0x698, 0x30C, 1, 0x0, 0, 0)
+#define _MX53_PAD_SD2_DATA1__KPP_COL_7 IOMUX_PAD(0x698, 0x30C, 2, 0x848, 1, 0)
+#define _MX53_PAD_SD2_DATA1__AUDMUX_AUD4_TXFS IOMUX_PAD(0x698, 0x30C, 3, 0x744, 0, 0)
+#define _MX53_PAD_SD2_DATA1__CSPI_SS0 IOMUX_PAD(0x698, 0x30C, 5, 0x78C, 4, 0)
+#define _MX53_PAD_SD2_DATA1__RTIC_SEC_VIO IOMUX_PAD(0x698, 0x30C, 7, 0x0, 0, 0)
+#define _MX53_PAD_SD2_DATA0__ESDHC2_DAT0 IOMUX_PAD(0x69C, 0x310, 0, 0x0, 0, 0)
+#define _MX53_PAD_SD2_DATA0__GPIO1_15 IOMUX_PAD(0x69C, 0x310, 1, 0x0, 0, 0)
+#define _MX53_PAD_SD2_DATA0__KPP_ROW_7 IOMUX_PAD(0x69C, 0x310, 2, 0x854, 1, 0)
+#define _MX53_PAD_SD2_DATA0__AUDMUX_AUD4_RXD IOMUX_PAD(0x69C, 0x310, 3, 0x730, 1, 0)
+#define _MX53_PAD_SD2_DATA0__CSPI_MISO IOMUX_PAD(0x69C, 0x310, 5, 0x784, 3, 0)
+#define _MX53_PAD_SD2_DATA0__RTIC_DONE_INT IOMUX_PAD(0x69C, 0x310, 7, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_0__CCM_CLKO IOMUX_PAD(0x6A4, 0x314, 0, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_0__GPIO1_0 IOMUX_PAD(0x6A4, 0x314, 1, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_0__KPP_COL_5 IOMUX_PAD(0x6A4, 0x314, 2, 0x840, 3, 0)
+#define _MX53_PAD_GPIO_0__CCM_SSI_EXT1_CLK IOMUX_PAD(0x6A4, 0x314, 3, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_0__EPIT1_EPITO IOMUX_PAD(0x6A4, 0x314, 4, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_0__SRTC_ALARM_DEB IOMUX_PAD(0x6A4, 0x314, 5, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_0__USBOH3_USBH1_PWR IOMUX_PAD(0x6A4, 0x314, 6, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_0__CSU_TD IOMUX_PAD(0x6A4, 0x314, 7, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_1__ESAI1_SCKR IOMUX_PAD(0x6A8, 0x318, 0, 0x7DC, 1, 0)
+#define _MX53_PAD_GPIO_1__GPIO1_1 IOMUX_PAD(0x6A8, 0x318, 1, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_1__KPP_ROW_5 IOMUX_PAD(0x6A8, 0x318, 2, 0x84C, 2, 0)
+#define _MX53_PAD_GPIO_1__CCM_SSI_EXT2_CLK IOMUX_PAD(0x6A8, 0x318, 3, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_1__PWM2_PWMO IOMUX_PAD(0x6A8, 0x318, 4, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_1__WDOG2_WDOG_B IOMUX_PAD(0x6A8, 0x318, 5, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_1__ESDHC1_CD IOMUX_PAD(0x6A8, 0x318, 6, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_1__SRC_TESTER_ACK IOMUX_PAD(0x6A8, 0x318, 7, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_9__ESAI1_FSR IOMUX_PAD(0x6AC, 0x31C, 0, 0x7CC, 1, 0)
+#define _MX53_PAD_GPIO_9__GPIO1_9 IOMUX_PAD(0x6AC, 0x31C, 1, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_9__KPP_COL_6 IOMUX_PAD(0x6AC, 0x31C, 2, 0x844, 2, 0)
+#define _MX53_PAD_GPIO_9__CCM_REF_EN_B IOMUX_PAD(0x6AC, 0x31C, 3, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_9__PWM1_PWMO IOMUX_PAD(0x6AC, 0x31C, 4, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_9__WDOG1_WDOG_B IOMUX_PAD(0x6AC, 0x31C, 5, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_9__ESDHC1_WP IOMUX_PAD(0x6AC, 0x31C, 6, 0x7FC, 1, 0)
+#define _MX53_PAD_GPIO_9__SCC_FAIL_STATE IOMUX_PAD(0x6AC, 0x31C, 7, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_3__ESAI1_HCKR IOMUX_PAD(0x6B0, 0x320, 0, 0x7D4, 1, 0)
+#define _MX53_PAD_GPIO_3__GPIO1_3 IOMUX_PAD(0x6B0, 0x320, 1, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_3__I2C3_SCL IOMUX_PAD(0x6B0, 0x320, 2 | IOMUX_CONFIG_SION, 0x824, 1, 0)
+#define _MX53_PAD_GPIO_3__DPLLIP1_TOG_EN IOMUX_PAD(0x6B0, 0x320, 3, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_3__CCM_CLKO2 IOMUX_PAD(0x6B0, 0x320, 4, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_3__OBSERVE_MUX_OBSRV_INT_OUT0 IOMUX_PAD(0x6B0, 0x320, 5, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_3__USBOH3_USBH1_OC IOMUX_PAD(0x6B0, 0x320, 6, 0x8A0, 1, 0)
+#define _MX53_PAD_GPIO_3__MLB_MLBCLK IOMUX_PAD(0x6B0, 0x320, 7, 0x858, 2, 0)
+#define _MX53_PAD_GPIO_6__ESAI1_SCKT IOMUX_PAD(0x6B4, 0x324, 0, 0x7E0, 1, 0)
+#define _MX53_PAD_GPIO_6__GPIO1_6 IOMUX_PAD(0x6B4, 0x324, 1, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_6__I2C3_SDA IOMUX_PAD(0x6B4, 0x324, 2 | IOMUX_CONFIG_SION, 0x828, 1, 0)
+#define _MX53_PAD_GPIO_6__CCM_CCM_OUT_0 IOMUX_PAD(0x6B4, 0x324, 3, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_6__CSU_CSU_INT_DEB IOMUX_PAD(0x6B4, 0x324, 4, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_6__OBSERVE_MUX_OBSRV_INT_OUT1 IOMUX_PAD(0x6B4, 0x324, 5, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_6__ESDHC2_LCTL IOMUX_PAD(0x6B4, 0x324, 6, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_6__MLB_MLBSIG IOMUX_PAD(0x6B4, 0x324, 7, 0x860, 2, 0)
+#define _MX53_PAD_GPIO_2__ESAI1_FST IOMUX_PAD(0x6B8, 0x328, 0, 0x7D0, 1, 0)
+#define _MX53_PAD_GPIO_2__GPIO1_2 IOMUX_PAD(0x6B8, 0x328, 1, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_2__KPP_ROW_6 IOMUX_PAD(0x6B8, 0x328, 2, 0x850, 2, 0)
+#define _MX53_PAD_GPIO_2__CCM_CCM_OUT_1 IOMUX_PAD(0x6B8, 0x328, 3, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_2__CSU_CSU_ALARM_AUT_0 IOMUX_PAD(0x6B8, 0x328, 4, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_2__OBSERVE_MUX_OBSRV_INT_OUT2 IOMUX_PAD(0x6B8, 0x328, 5, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_2__ESDHC2_WP IOMUX_PAD(0x6B8, 0x328, 6, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_2__MLB_MLBDAT IOMUX_PAD(0x6B8, 0x328, 7, 0x85C, 2, 0)
+#define _MX53_PAD_GPIO_4__ESAI1_HCKT IOMUX_PAD(0x6BC, 0x32C, 0, 0x7D8, 1, 0)
+#define _MX53_PAD_GPIO_4__GPIO1_4 IOMUX_PAD(0x6BC, 0x32C, 1, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_4__KPP_COL_7 IOMUX_PAD(0x6BC, 0x32C, 2, 0x848, 2, 0)
+#define _MX53_PAD_GPIO_4__CCM_CCM_OUT_2 IOMUX_PAD(0x6BC, 0x32C, 3, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_4__CSU_CSU_ALARM_AUT_1 IOMUX_PAD(0x6BC, 0x32C, 4, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_4__OBSERVE_MUX_OBSRV_INT_OUT3 IOMUX_PAD(0x6BC, 0x32C, 5, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_4__ESDHC2_CD IOMUX_PAD(0x6BC, 0x32C, 6, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_4__SCC_SEC_STATE IOMUX_PAD(0x6BC, 0x32C, 7, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_5__ESAI1_TX2_RX3 IOMUX_PAD(0x6C0, 0x330, 0, 0x7EC, 1, 0)
+#define _MX53_PAD_GPIO_5__GPIO1_5 IOMUX_PAD(0x6C0, 0x330, 1, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_5__KPP_ROW_7 IOMUX_PAD(0x6C0, 0x330, 2, 0x854, 2, 0)
+#define _MX53_PAD_GPIO_5__CCM_CLKO IOMUX_PAD(0x6C0, 0x330, 3, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_5__CSU_CSU_ALARM_AUT_2 IOMUX_PAD(0x6C0, 0x330, 4, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_5__OBSERVE_MUX_OBSRV_INT_OUT4 IOMUX_PAD(0x6C0, 0x330, 5, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_5__I2C3_SCL IOMUX_PAD(0x6C0, 0x330, 6, 0x824, 2, 0)
+#define _MX53_PAD_GPIO_5__CCM_PLL1_BYP IOMUX_PAD(0x6C0, 0x330, 7, 0x770, 1, 0)
+#define _MX53_PAD_GPIO_7__ESAI1_TX4_RX1 IOMUX_PAD(0x6C4, 0x334, 0, 0x7F4, 1, 0)
+#define _MX53_PAD_GPIO_7__GPIO1_7 IOMUX_PAD(0x6C4, 0x334, 1, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_7__EPIT1_EPITO IOMUX_PAD(0x6C4, 0x334, 2, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_7__CAN1_TXCAN IOMUX_PAD(0x6C4, 0x334, 3, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_7__UART2_TXD_MUX IOMUX_PAD(0x6C4, 0x334, 4, 0x880, 4, 0)
+#define _MX53_PAD_GPIO_7__FIRI_RXD IOMUX_PAD(0x6C4, 0x334, 5, 0x80C, 1, 0)
+#define _MX53_PAD_GPIO_7__SPDIF_PLOCK IOMUX_PAD(0x6C4, 0x334, 6, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_7__CCM_PLL2_BYP IOMUX_PAD(0x6C4, 0x334, 7, 0x774, 1, 0)
+#define _MX53_PAD_GPIO_8__ESAI1_TX5_RX0 IOMUX_PAD(0x6C8, 0x338, 0, 0x7F8, 1, 0)
+#define _MX53_PAD_GPIO_8__GPIO1_8 IOMUX_PAD(0x6C8, 0x338, 1, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_8__EPIT2_EPITO IOMUX_PAD(0x6C8, 0x338, 2, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_8__CAN1_RXCAN IOMUX_PAD(0x6C8, 0x338, 3, 0x760, 3, 0)
+#define _MX53_PAD_GPIO_8__UART2_RXD_MUX IOMUX_PAD(0x6C8, 0x338, 4, 0x880, 5, 0)
+#define _MX53_PAD_GPIO_8__FIRI_TXD IOMUX_PAD(0x6C8, 0x338, 5, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_8__SPDIF_SRCLK IOMUX_PAD(0x6C8, 0x338, 6, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_8__CCM_PLL3_BYP IOMUX_PAD(0x6C8, 0x338, 7, 0x778, 1, 0)
+#define _MX53_PAD_GPIO_16__ESAI1_TX3_RX2 IOMUX_PAD(0x6CC, 0x33C, 0, 0x7F0, 1, 0)
+#define _MX53_PAD_GPIO_16__GPIO7_11 IOMUX_PAD(0x6CC, 0x33C, 1, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_16__TZIC_PWRFAIL_INT IOMUX_PAD(0x6CC, 0x33C, 2, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_16__RTC_CE_RTC_EXT_TRIG1 IOMUX_PAD(0x6CC, 0x33C, 4, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_16__SPDIF_IN1 IOMUX_PAD(0x6CC, 0x33C, 5, 0x870, 1, 0)
+#define _MX53_PAD_GPIO_16__I2C3_SDA IOMUX_PAD(0x6CC, 0x33C, 6 | IOMUX_CONFIG_SION, 0x828, 2, 0)
+#define _MX53_PAD_GPIO_16__SJC_DE_B IOMUX_PAD(0x6CC, 0x33C, 7, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_17__ESAI1_TX0 IOMUX_PAD(0x6D0, 0x340, 0, 0x7E4, 1, 0)
+#define _MX53_PAD_GPIO_17__GPIO7_12 IOMUX_PAD(0x6D0, 0x340, 1, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_17__SDMA_EXT_EVENT_0 IOMUX_PAD(0x6D0, 0x340, 2, 0x868, 1, 0)
+#define _MX53_PAD_GPIO_17__GPC_PMIC_RDY IOMUX_PAD(0x6D0, 0x340, 3, 0x810, 1, 0)
+#define _MX53_PAD_GPIO_17__RTC_CE_RTC_FSV_TRIG IOMUX_PAD(0x6D0, 0x340, 4, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_17__SPDIF_OUT1 IOMUX_PAD(0x6D0, 0x340, 5, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_17__IPU_SNOOP2 IOMUX_PAD(0x6D0, 0x340, 6, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_17__SJC_JTAG_ACT IOMUX_PAD(0x6D0, 0x340, 7, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_18__ESAI1_TX1 IOMUX_PAD(0x6D4, 0x344, 0, 0x7E8, 1, 0)
+#define _MX53_PAD_GPIO_18__GPIO7_13 IOMUX_PAD(0x6D4, 0x344, 1, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_18__SDMA_EXT_EVENT_1 IOMUX_PAD(0x6D4, 0x344, 2, 0x86C, 1, 0)
+#define _MX53_PAD_GPIO_18__OWIRE_LINE IOMUX_PAD(0x6D4, 0x344, 3, 0x864, 1, 0)
+#define _MX53_PAD_GPIO_18__RTC_CE_RTC_ALARM2_TRIG IOMUX_PAD(0x6D4, 0x344, 4, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_18__CCM_ASRC_EXT_CLK IOMUX_PAD(0x6D4, 0x344, 5, 0x768, 1, 0)
+#define _MX53_PAD_GPIO_18__ESDHC1_LCTL IOMUX_PAD(0x6D4, 0x344, 6, 0x0, 0, 0)
+#define _MX53_PAD_GPIO_18__SRC_SYSTEM_RST IOMUX_PAD(0x6D4, 0x344, 7, 0x0, 0, 0)
-#define MX53_PAD_GPIO_19__GPIO_4_5 IOMUX_PAD(0x348, 0x20,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_KEY_COL0__GPIO_4_6 IOMUX_PAD(0x34C, 0x24,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_KEY_ROW0__GPIO_4_7 IOMUX_PAD(0x350, 0x28,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_KEY_COL1__GPIO_4_8 IOMUX_PAD(0x354, 0x2C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_KEY_ROW1__GPIO_4_9 IOMUX_PAD(0x358, 0x30,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_KEY_COL2__GPIO_4_10 IOMUX_PAD(0x35C, 0x34,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_KEY_ROW2__GPIO_4_11 IOMUX_PAD(0x360, 0x38,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_KEY_COL3__GPIO_4_12 IOMUX_PAD(0x364, 0x3C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_KEY_ROW3__GPIO_4_13 IOMUX_PAD(0x368, 0x40,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_KEY_COL4__GPIO_4_14 IOMUX_PAD(0x36C, 0x44,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_KEY_ROW4__GPIO_4_15 IOMUX_PAD(0x370, 0x48,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_NVCC_KEYPAD__NVCC_KEYPAD IOMUX_PAD(0x374, NON_MUX_I,IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DI0_DISP_CLK__GPIO_4_16 IOMUX_PAD(0x378, 0x4C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DI0_PIN15__GPIO_4_17 IOMUX_PAD(0x37C, 0x50,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DI0_PIN2__GPIO_4_18 IOMUX_PAD(0x380, 0x54,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DI0_PIN3__GPIO_4_19 IOMUX_PAD(0x384, 0x58,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DI0_PIN4__GPIO_4_20 IOMUX_PAD(0x388, 0x5C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DISP0_DAT0__GPIO_4_21 IOMUX_PAD(0x38C, 0x60,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DISP0_DAT1__GPIO_4_22 IOMUX_PAD(0x390, 0x64,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DISP0_DAT2__GPIO_4_23 IOMUX_PAD(0x394, 0x68,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DISP0_DAT3__GPIO_4_24 IOMUX_PAD(0x398, 0x6C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DISP0_DAT4__GPIO_4_25 IOMUX_PAD(0x39C, 0x70,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DISP0_DAT5__GPIO_4_26 IOMUX_PAD(0x3A0, 0x74,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DISP0_DAT6__GPIO_4_27 IOMUX_PAD(0x3A4, 0x78,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DISP0_DAT7__GPIO_4_28 IOMUX_PAD(0x3A8, 0x7C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DISP0_DAT8__GPIO_4_29 IOMUX_PAD(0x3AC, 0x80,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DISP0_DAT9__GPIO_4_30 IOMUX_PAD(0x3B0, 0x84,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DISP0_DAT10__GPIO_4_31 IOMUX_PAD(0x3B4, 0x88,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DISP0_DAT11__GPIO_5_5 IOMUX_PAD(0x3B8, 0x8C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DISP0_DAT12__GPIO_5_6 IOMUX_PAD(0x3BC, 0x90,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DISP0_DAT13__GPIO_5_7 IOMUX_PAD(0x3C0, 0x94,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DISP0_DAT14__GPIO_5_8 IOMUX_PAD(0x3C4, 0x98,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DISP0_DAT15__GPIO_5_9 IOMUX_PAD(0x3C8, 0x9C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DISP0_DAT16__GPIO_5_10 IOMUX_PAD(0x3CC, 0xA0,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DISP0_DAT17__GPIO_5_11 IOMUX_PAD(0x3D0, 0xA4,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DISP0_DAT18__GPIO_5_12 IOMUX_PAD(0x3D4, 0xA8,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DISP0_DAT19__GPIO_5_13 IOMUX_PAD(0x3D8, 0xAC,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DISP0_DAT20__GPIO_5_14 IOMUX_PAD(0x3DC, 0xB0,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DISP0_DAT21__GPIO_5_15 IOMUX_PAD(0x3E0, 0xB4,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DISP0_DAT22__GPIO_5_16 IOMUX_PAD(0x3E4, 0xB8,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DISP0_DAT23__GPIO_5_17 IOMUX_PAD(0x3E8, 0xBC,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_CSI0_PIXCLK__GPIO_5_18 IOMUX_PAD(0x3EC, 0xC0,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_CSI0_MCLK__GPIO_5_19 IOMUX_PAD(0x3F0, 0xC4,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_CSI0_DATA_EN__GPIO_5_20 IOMUX_PAD(0x3F4, 0xC8,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_CSI0_VSYNC__GPIO_5_21 IOMUX_PAD(0x3F8, 0xCC,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_CSI0_D4__GPIO_5_22 IOMUX_PAD(0x3FC, 0xD0,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_CSI0_D5__GPIO_5_23 IOMUX_PAD(0x400, 0xD4,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_CSI0_D6__GPIO_5_24 IOMUX_PAD(0x404, 0xD8,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_CSI0_D7__GPIO_5_25 IOMUX_PAD(0x408, 0xDC,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_CSI0_D8__GPIO_5_26 IOMUX_PAD(0x40C, 0xE0,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_CSI0_D9__GPIO_5_27 IOMUX_PAD(0x410, 0xE4,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_CSI0_D10__GPIO_5_28 IOMUX_PAD(0x414, 0xE8,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_CSI0_D11__GPIO_5_29 IOMUX_PAD(0x418, 0xEC,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_CSI0_D12__GPIO_5_30 IOMUX_PAD(0x41C, 0xF0,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_CSI0_D13__GPIO_5_31 IOMUX_PAD(0x420, 0xF4,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_CSI0_D14__GPIO_6_0 IOMUX_PAD(0x424, 0xF8,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_CSI0_D15__GPIO_6_1 IOMUX_PAD(0x428, 0xFC,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_CSI0_D16__GPIO_6_2 IOMUX_PAD(0x42C, 0x100,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_CSI0_D17__GPIO_6_3 IOMUX_PAD(0x430, 0x104,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_CSI0_D18__GPIO_6_4 IOMUX_PAD(0x434, 0x108,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_CSI0_D19__GPIO_6_5 IOMUX_PAD(0x438, 0x10C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_NVCC_CSI0__NVCC_CSI0 IOMUX_PAD(0x43C, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_JTAG_TMS__JTAG_TMS IOMUX_PAD(0x440, NON_MUX_I,IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_JTAG_MOD__JTAG_MOD IOMUX_PAD(0x444, NON_MUX_I,IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_JTAG_TRSTB__JTAG_TRSTB IOMUX_PAD(0x448, NON_MUX_I,IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_JTAG_TDI__JTAG_TDI IOMUX_PAD(0x44C, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_JTAG_TCK__JTAG_TCK IOMUX_PAD(0x450, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_JTAG_TDO__JTAG_TDO IOMUX_PAD(0x454, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_A25__GPIO_5_2 IOMUX_PAD(0x458, 0x110,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_EB2__GPIO_2_30 IOMUX_PAD(0x45C, 0x114,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_D16__GPIO_3_16 IOMUX_PAD(0x460, 0x118,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_D17__GPIO_3_17 IOMUX_PAD(0x464, 0x11C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_D18__GPIO_3_18 IOMUX_PAD(0x468, 0x120,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_D16__CSPI1_SCLK IOMUX_PAD(0x460, 0x118,IOMUX_CONFIG_ALT4, 0x79c, 3, NO_PAD_CTRL)
-#define MX53_PAD_EIM_D17__CSPI1_MISO IOMUX_PAD(0x464, 0x11C,IOMUX_CONFIG_ALT4, 0x7a0, 3, NO_PAD_CTRL)
-#define MX53_PAD_EIM_D18__CSPI1_MOSI IOMUX_PAD(0x468, 0x120,IOMUX_CONFIG_ALT4, 0x7a4, 3, NO_PAD_CTRL)
-#define MX53_PAD_EIM_D19__GPIO_3_19 IOMUX_PAD(0x46C, 0x124,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_D20__GPIO_3_20 IOMUX_PAD(0x470, 0x128,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_D21__GPIO_3_21 IOMUX_PAD(0x474, 0x12C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_D22__GPIO_3_22 IOMUX_PAD(0x478, 0x130,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_D23__GPIO_3_23 IOMUX_PAD(0x47C, 0x134,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_EB3__GPIO_2_31 IOMUX_PAD(0x480, 0x138,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_D24__GPIO_3_24 IOMUX_PAD(0x484, 0x13C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_D25__GPIO_3_25 IOMUX_PAD(0x488, 0x140,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_D26__GPIO_3_26 IOMUX_PAD(0x48C, 0x144,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_D27__GPIO_3_27 IOMUX_PAD(0x490, 0x148,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_D28__GPIO_3_28 IOMUX_PAD(0x494, 0x14C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_D29__GPIO_3_29 IOMUX_PAD(0x498, 0x150,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_D30__GPIO_3_30 IOMUX_PAD(0x49C, 0x154,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_D31__GPIO_3_31 IOMUX_PAD(0x4A0, 0x158,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_NVCC_EIM1__NVCC_EIM1 IOMUX_PAD(0x4A4, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_A24__GPIO_5_4 IOMUX_PAD(0x4A8, 0x15C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_A23__GPIO_6_6 IOMUX_PAD(0x4AC, 0x160,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_A22__GPIO_2_16 IOMUX_PAD(0x4B0, 0x164,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_A21__GPIO_2_17 IOMUX_PAD(0x4B4, 0x168,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_A20__GPIO_2_18 IOMUX_PAD(0x4B8, 0x16C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_A19__GPIO_2_19 IOMUX_PAD(0x4BC, 0x170,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_A18__GPIO_2_20 IOMUX_PAD(0x4C0, 0x174,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_A17__GPIO_2_21 IOMUX_PAD(0x4C4, 0x178,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_A16__GPIO_2_22 IOMUX_PAD(0x4C8, 0x17C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_CS0__GPIO_2_23 IOMUX_PAD(0x4CC, 0x180,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_CS1__GPIO_2_24 IOMUX_PAD(0x4D0, 0x184,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_OE__GPIO_2_25 IOMUX_PAD(0x4D4, 0x188,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_RW__GPIO_2_26 IOMUX_PAD(0x4D8, 0x18C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_LBA__GPIO_2_27 IOMUX_PAD(0x4DC, 0x190,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_NVCC_EIM4__NVCC_EIM4 IOMUX_PAD(0x4E0, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_EB0__GPIO_2_28 IOMUX_PAD(0x4E4, 0x194,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_EB1__GPIO_2_29 IOMUX_PAD(0x4E8, 0x198,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_DA0__GPIO_3_0 IOMUX_PAD(0x4EC, 0x19C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_DA1__GPIO_3_1 IOMUX_PAD(0x4F0, 0x1A0,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_DA2__GPIO_3_2 IOMUX_PAD(0x4F4, 0x1A4,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_DA3__GPIO_3_3 IOMUX_PAD(0x4F8, 0x1A8,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_DA4__GPIO_3_4 IOMUX_PAD(0x4FC, 0x1AC,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_DA5__GPIO_3_5 IOMUX_PAD(0x500, 0x1B0,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_DA6__GPIO_3_6 IOMUX_PAD(0x504, 0x1B4,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_DA7__GPIO_3_7 IOMUX_PAD(0x508, 0x1B8,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_DA8__GPIO_3_8 IOMUX_PAD(0x50C, 0x1BC,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_DA9__GPIO_3_9 IOMUX_PAD(0x510, 0x1C0,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_DA10__GPIO_3_10 IOMUX_PAD(0x514, 0x1C4,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_DA11__GPIO_3_11 IOMUX_PAD(0x518, 0x1C8,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_DA12__GPIO_3_12 IOMUX_PAD(0x51C, 0x1CC,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_DA13__GPIO_3_13 IOMUX_PAD(0x520, 0x1D0,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_DA14__GPIO_3_14 IOMUX_PAD(0x524, 0x1D4,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_DA15__GPIO_3_15 IOMUX_PAD(0x528, 0x1D8,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_NANDF_WE_B__GPIO_6_12 IOMUX_PAD(0x52C, 0x1DC,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_NANDF_RE_B__GPIO_6_13 IOMUX_PAD(0x530, 0x1E0,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_WAIT__GPIO_5_0 IOMUX_PAD(0x534, 0x1E4,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_EIM_BCLK__EIM_BCLK IOMUX_PAD(0x538, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_NVCC_EIM7__NVCC_EIM7 IOMUX_PAD(0x53C, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_LVDS1_TX3_P__GPIO_6_22 IOMUX_PAD(NON_PAD_I, 0x1EC, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_LVDS1_TX2_P__GPIO_6_24 IOMUX_PAD(NON_PAD_I, 0x1F0, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_LVDS1_CLK_P__GPIO_6_26 IOMUX_PAD(NON_PAD_I, 0x1F4, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_LVDS1_TX1_P__GPIO_6_28 IOMUX_PAD(NON_PAD_I, 0x1F8, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_LVDS1_TX0_P__GPIO_6_30 IOMUX_PAD(NON_PAD_I, 0x1FC, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_LVDS0_TX3_P__GPIO_7_22 IOMUX_PAD(NON_PAD_I, 0x200, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_LVDS0_CLK_P__GPIO_7_24 IOMUX_PAD(NON_PAD_I, 0x204, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_LVDS0_TX2_P__GPIO_7_26 IOMUX_PAD(NON_PAD_I, 0x208, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_LVDS0_TX1_P__GPIO_7_28 IOMUX_PAD(NON_PAD_I, 0x20C, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_LVDS0_TX0_P__GPIO_7_30 IOMUX_PAD(NON_PAD_I, 0x210, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_GPIO_10__GPIO_4_0 IOMUX_PAD(0x540, 0x214, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_GPIO_11__GPIO_4_1 IOMUX_PAD(0x544, 0x218, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_GPIO_12__GPIO_4_2 IOMUX_PAD(0x548, 0x21C, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_GPIO_13__GPIO_4_3 IOMUX_PAD(0x54C, 0x220, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_GPIO_14__GPIO_4_4 IOMUX_PAD(0x550, 0x224, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DRAM_DQM3__DRAM_DQM3 IOMUX_PAD(0x554, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DRAM_SDQS3__DRAM_SDQS3 IOMUX_PAD(0x558, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DRAM_SDCKE1__DRAM_SDCKE1 IOMUX_PAD(0x55C, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DRAM_DQM2__DRAM_DQM2 IOMUX_PAD(0x560, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DRAM_SDODT1__DRAM_SDODT1 IOMUX_PAD(0x564, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DRAM_SDQS2__DRAM_SDQS2 IOMUX_PAD(0x568, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DRAM_RESET__DRAM_RESET IOMUX_PAD(0x56C, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DRAM_SDCLK1__DRAM_SDCLK1 IOMUX_PAD(0x570, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DRAM_CAS__DRAM_CAS IOMUX_PAD(0x574, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DRAM_SDCLK0__DRAM_SDCLK0 IOMUX_PAD(0x578, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DRAM_SDQS0__DRAM_SDQS0 IOMUX_PAD(0x57C, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DRAM_SDODT0__DRAM_SDODT0 IOMUX_PAD(0x580, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DRAM_DQM0__DRAM_DQM0 IOMUX_PAD(0x584, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DRAM_RAS__DRAM_RAS IOMUX_PAD(0x588, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DRAM_SDCKE0__DRAM_SDCKE0 IOMUX_PAD(0x58C, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DRAM_SDQS1__DRAM_SDQS1 IOMUX_PAD(0x590, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_DRAM_DQM1__DRAM_DQM1 IOMUX_PAD(0x594, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_PMIC_ON_REQ__PMIC_ON_REQ IOMUX_PAD(0x598, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_PMIC_STBY_REQ__PMIC_STBY_REQ IOMUX_PAD(0x59C, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_NANDF_CLE__GPIO_6_7 IOMUX_PAD(0x5A0, 0x228,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_NANDF_ALE__GPIO_6_8 IOMUX_PAD(0x5A4, 0x22C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_NANDF_WP_B__GPIO_6_9 IOMUX_PAD(0x5A8, 0x230,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_NANDF_RB0__GPIO_6_10 IOMUX_PAD(0x5AC, 0x234,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_NANDF_CS0__GPIO_6_11 IOMUX_PAD(0x5B0, 0x238,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_NANDF_CS1__GPIO_6_14 IOMUX_PAD(0x5B4, 0x23C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_NANDF_CS2__GPIO_6_15 IOMUX_PAD(0x5B8, 0x240,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_NANDF_CS3__GPIO_6_16 IOMUX_PAD(0x5BC, 0x244,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_NVCC_NANDF__NVCC_NANDF IOMUX_PAD(0x5C0, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_FEC_MDIO__GPIO_1_22 IOMUX_PAD(0x5C4, 0x248,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_FEC_REF_CLK__GPIO_1_23 IOMUX_PAD(0x5C8, 0x24C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_FEC_RX_ER__GPIO_1_24 IOMUX_PAD(0x5CC, 0x250,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_FEC_CRS_DV__GPIO_1_25 IOMUX_PAD(0x5D0, 0x254,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_FEC_RXD1__GPIO_1_26 IOMUX_PAD(0x5D4, 0x258,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_FEC_RXD0__GPIO_1_27 IOMUX_PAD(0x5D8, 0x25C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_FEC_TX_EN__GPIO_1_28 IOMUX_PAD(0x5DC, 0x260,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_FEC_TXD1__GPIO_1_29 IOMUX_PAD(0x5E0, 0x264,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_FEC_TXD0__GPIO_1_30 IOMUX_PAD(0x5E4, 0x268,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_FEC_MDC__GPIO_1_31 IOMUX_PAD(0x5E8, 0x26C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_NVCC_FEC__NVCC_FEC IOMUX_PAD(0x5EC, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_ATA_DIOW__GPIO_6_17 IOMUX_PAD(0x5F0, 0x270,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_ATA_DMACK__GPIO_6_18 IOMUX_PAD(0x5F4, 0x274,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_ATA_DMARQ__GPIO_7_0 IOMUX_PAD(0x5F8, 0x278,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_ATA_BUFFER_EN__GPIO_7_1 IOMUX_PAD(0x5FC, 0x27C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_ATA_INTRQ__GPIO_7_2 IOMUX_PAD(0x600, 0x280,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_ATA_DIOR__GPIO_7_3 IOMUX_PAD(0x604, 0x284,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_ATA_RESET_B__GPIO_7_4 IOMUX_PAD(0x608, 0x288,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_ATA_IORDY__GPIO_7_5 IOMUX_PAD(0x60C, 0x28C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_ATA_DA_0__GPIO_7_6 IOMUX_PAD(0x610, 0x290,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_ATA_DA_1__GPIO_7_7 IOMUX_PAD(0x614, 0x294,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_ATA_DA_2__GPIO_7_8 IOMUX_PAD(0x618, 0x298,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_ATA_CS_0__GPIO_7_9 IOMUX_PAD(0x61C, 0x29C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_ATA_CS_1__GPIO_7_10 IOMUX_PAD(0x620, 0x2A0,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_NVCC_ATA2__NVCC_ATA2 IOMUX_PAD(0x624, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_ATA_DATA0__GPIO_2_0 IOMUX_PAD(0x628, 0x2A4,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_ATA_DATA1__GPIO_2_1 IOMUX_PAD(0x62C, 0x2A8,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_ATA_DATA2__GPIO_2_2 IOMUX_PAD(0x630, 0x2AC,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_ATA_DATA3__GPIO_2_3 IOMUX_PAD(0x634, 0x2B0,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_ATA_DATA4__GPIO_2_4 IOMUX_PAD(0x638, 0x2B4,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_ATA_DATA5__GPIO_2_5 IOMUX_PAD(0x63C, 0x2B8,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_ATA_DATA6__GPIO_2_6 IOMUX_PAD(0x640, 0x2BC,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_ATA_DATA7__GPIO_2_7 IOMUX_PAD(0x644, 0x2C0,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_ATA_DATA8__GPIO_2_8 IOMUX_PAD(0x648, 0x2C4,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_ATA_DATA9__GPIO_2_9 IOMUX_PAD(0x64C, 0x2C8,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_ATA_DATA10__GPIO_2_10 IOMUX_PAD(0x650, 0x2CC,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_ATA_DATA11__GPIO_2_11 IOMUX_PAD(0x654, 0x2D0,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_ATA_DATA12__GPIO_2_12 IOMUX_PAD(0x658, 0x2D4,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_ATA_DATA13__GPIO_2_13 IOMUX_PAD(0x65C, 0x2D8,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_ATA_DATA14__GPIO_2_14 IOMUX_PAD(0x660, 0x2DC,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_ATA_DATA15__GPIO_2_15 IOMUX_PAD(0x664, 0x2E0,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_NVCC_ATA0__NVCC_ATA0 IOMUX_PAD(0x668, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_SD1_DATA0__GPIO_1_16 IOMUX_PAD(0x66C, 0x2E4,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_SD1_DATA1__GPIO_1_17 IOMUX_PAD(0x670, 0x2E8,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_SD1_CMD__GPIO_1_18 IOMUX_PAD(0x674, 0x2EC,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_SD1_DATA2__GPIO_1_19 IOMUX_PAD(0x678, 0x2F0,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_SD1_CLK__GPIO_1_20 IOMUX_PAD(0x67C, 0x2F4,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_SD1_DATA3__GPIO_1_21 IOMUX_PAD(0x680, 0x2F8,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_NVCC_SD1__NVCC_SD1 IOMUX_PAD(0x684, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_SD2_CLK__GPIO_1_10 IOMUX_PAD(0x688, 0x2FC,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_SD2_CMD__GPIO_1_11 IOMUX_PAD(0x68C, 0x300,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_SD2_DATA3__GPIO_1_12 IOMUX_PAD(0x690, 0x304,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_SD2_DATA2__GPIO_1_13 IOMUX_PAD(0x694, 0x308,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_SD2_DATA1__GPIO_1_14 IOMUX_PAD(0x698, 0x30C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_SD2_DATA0__GPIO_1_15 IOMUX_PAD(0x69C, 0x310,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_NVCC_SD2__NVCC_SD2 IOMUX_PAD(0x6A0, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_GPIO_0__GPIO_1_0 IOMUX_PAD(0x6A4, 0x314,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_GPIO_1__GPIO_1_1 IOMUX_PAD(0x6A8, 0x318,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_GPIO_9__GPIO_1_9 IOMUX_PAD(0x6AC, 0x31C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_GPIO_3__GPIO_1_3 IOMUX_PAD(0x6B0, 0x320,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_GPIO_6__GPIO_1_6 IOMUX_PAD(0x6B4, 0x324,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_GPIO_2__GPIO_1_2 IOMUX_PAD(0x6B8, 0x328,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_GPIO_4__GPIO_1_4 IOMUX_PAD(0x6BC, 0x32C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_GPIO_5__GPIO_1_5 IOMUX_PAD(0x6C0, 0x330,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_GPIO_7__GPIO_1_7 IOMUX_PAD(0x6C4, 0x334,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_GPIO_8__GPIO_1_8 IOMUX_PAD(0x6C8, 0x338,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_GPIO_16__GPIO_7_11 IOMUX_PAD(0x6CC, 0x33C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_GPIO_17__GPIO_7_12 IOMUX_PAD(0x6D0, 0x340,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_GPIO_18__GPIO_7_13 IOMUX_PAD(0x6D4, 0x344,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_NVCC_GPIO__NVCC_GPIO IOMUX_PAD(0x6D8, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_POR_B__POR_B IOMUX_PAD(0x6DC, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_BOOT_MODE1__BOOT_MODE1 IOMUX_PAD(0x6E0, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_RESET_IN_B__RESET_IN_B IOMUX_PAD(0x6E4, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_BOOT_MODE0__BOOT_MODE0 IOMUX_PAD(0x6E8, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_TEST_MODE__TEST_MODE IOMUX_PAD(0x6EC, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_GRP_ADDDS__GRP_ADDDS IOMUX_PAD(0x6F0, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_GRP_DDRMODE_CTL__GRP_DDRMODE_CTL IOMUX_PAD(0x6F4, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_GRP_DDRPKE__GRP_DDRPKE IOMUX_PAD(0x6FC, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_GRP_DDRPK__GRP_DDRPK IOMUX_PAD(0x708, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_GRP_TERM_CTL3__GRP_TERM_CTL3 IOMUX_PAD(0x70C, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_GRP_DDRHYS__GRP_DDRHYS IOMUX_PAD(0x710, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_GRP_DDRMODE__GRP_DDRMODE IOMUX_PAD(0x714, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_GRP_B0DS__GRP_B0DS IOMUX_PAD(0x718, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_GRP_B1DS__GRP_B1DS IOMUX_PAD(0x71C, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_GRP_CTLDS__GRP_CTLDS IOMUX_PAD(0x720, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_GRP_DDR_TYPE__GRP_DDR_TYPE IOMUX_PAD(0x724, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_GRP_B2DS__GRP_B2DS IOMUX_PAD(0x728, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
-#define MX53_PAD_GRP_B3DS__GRP_B3DS IOMUX_PAD(0x72C, NON_MUX_I, IOMUX_CONFIG_ALT0, 0x0, 0, NO_PAD_CTRL)
+#define MX53_PAD_GPIO_19__KPP_COL_5 (_MX53_PAD_GPIO_19__KPP_COL_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_19__GPIO4_5 (_MX53_PAD_GPIO_19__GPIO4_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_19__CCM_CLKO (_MX53_PAD_GPIO_19__CCM_CLKO | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_19__SPDIF_OUT1 (_MX53_PAD_GPIO_19__SPDIF_OUT1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_19__RTC_CE_RTC_EXT_TRIG2 (_MX53_PAD_GPIO_19__RTC_CE_RTC_EXT_TRIG2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_19__ECSPI1_RDY (_MX53_PAD_GPIO_19__ECSPI1_RDY | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_19__FEC_TDATA_3 (_MX53_PAD_GPIO_19__FEC_TDATA_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_19__SRC_INT_BOOT (_MX53_PAD_GPIO_19__SRC_INT_BOOT | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL0__KPP_COL_0 (_MX53_PAD_KEY_COL0__KPP_COL_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL0__GPIO4_6 (_MX53_PAD_KEY_COL0__GPIO4_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL0__AUDMUX_AUD5_TXC (_MX53_PAD_KEY_COL0__AUDMUX_AUD5_TXC | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL0__UART4_TXD_MUX (_MX53_PAD_KEY_COL0__UART4_TXD_MUX | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL0__ECSPI1_SCLK (_MX53_PAD_KEY_COL0__ECSPI1_SCLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL0__FEC_RDATA_3 (_MX53_PAD_KEY_COL0__FEC_RDATA_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL0__SRC_ANY_PU_RST (_MX53_PAD_KEY_COL0__SRC_ANY_PU_RST | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW0__KPP_ROW_0 (_MX53_PAD_KEY_ROW0__KPP_ROW_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW0__GPIO4_7 (_MX53_PAD_KEY_ROW0__GPIO4_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW0__AUDMUX_AUD5_TXD (_MX53_PAD_KEY_ROW0__AUDMUX_AUD5_TXD | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW0__UART4_RXD_MUX (_MX53_PAD_KEY_ROW0__UART4_RXD_MUX | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW0__ECSPI1_MOSI (_MX53_PAD_KEY_ROW0__ECSPI1_MOSI | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW0__FEC_TX_ER (_MX53_PAD_KEY_ROW0__FEC_TX_ER | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL1__KPP_COL_1 (_MX53_PAD_KEY_COL1__KPP_COL_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL1__GPIO4_8 (_MX53_PAD_KEY_COL1__GPIO4_8 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL1__AUDMUX_AUD5_TXFS (_MX53_PAD_KEY_COL1__AUDMUX_AUD5_TXFS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL1__UART5_TXD_MUX (_MX53_PAD_KEY_COL1__UART5_TXD_MUX | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL1__ECSPI1_MISO (_MX53_PAD_KEY_COL1__ECSPI1_MISO | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL1__FEC_RX_CLK (_MX53_PAD_KEY_COL1__FEC_RX_CLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL1__USBPHY1_TXREADY (_MX53_PAD_KEY_COL1__USBPHY1_TXREADY | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW1__KPP_ROW_1 (_MX53_PAD_KEY_ROW1__KPP_ROW_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW1__GPIO4_9 (_MX53_PAD_KEY_ROW1__GPIO4_9 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW1__AUDMUX_AUD5_RXD (_MX53_PAD_KEY_ROW1__AUDMUX_AUD5_RXD | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW1__UART5_RXD_MUX (_MX53_PAD_KEY_ROW1__UART5_RXD_MUX | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW1__ECSPI1_SS0 (_MX53_PAD_KEY_ROW1__ECSPI1_SS0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW1__FEC_COL (_MX53_PAD_KEY_ROW1__FEC_COL | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW1__USBPHY1_RXVALID (_MX53_PAD_KEY_ROW1__USBPHY1_RXVALID | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL2__KPP_COL_2 (_MX53_PAD_KEY_COL2__KPP_COL_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL2__GPIO4_10 (_MX53_PAD_KEY_COL2__GPIO4_10 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL2__CAN1_TXCAN (_MX53_PAD_KEY_COL2__CAN1_TXCAN | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL2__FEC_MDIO (_MX53_PAD_KEY_COL2__FEC_MDIO | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL2__ECSPI1_SS1 (_MX53_PAD_KEY_COL2__ECSPI1_SS1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL2__FEC_RDATA_2 (_MX53_PAD_KEY_COL2__FEC_RDATA_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL2__USBPHY1_RXACTIVE (_MX53_PAD_KEY_COL2__USBPHY1_RXACTIVE | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW2__KPP_ROW_2 (_MX53_PAD_KEY_ROW2__KPP_ROW_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW2__GPIO4_11 (_MX53_PAD_KEY_ROW2__GPIO4_11 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW2__CAN1_RXCAN (_MX53_PAD_KEY_ROW2__CAN1_RXCAN | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW2__FEC_MDC (_MX53_PAD_KEY_ROW2__FEC_MDC | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW2__ECSPI1_SS2 (_MX53_PAD_KEY_ROW2__ECSPI1_SS2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW2__FEC_TDATA_2 (_MX53_PAD_KEY_ROW2__FEC_TDATA_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW2__USBPHY1_RXERROR (_MX53_PAD_KEY_ROW2__USBPHY1_RXERROR | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL3__KPP_COL_3 (_MX53_PAD_KEY_COL3__KPP_COL_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL3__GPIO4_12 (_MX53_PAD_KEY_COL3__GPIO4_12 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL3__USBOH3_H2_DP (_MX53_PAD_KEY_COL3__USBOH3_H2_DP | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL3__SPDIF_IN1 (_MX53_PAD_KEY_COL3__SPDIF_IN1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL3__I2C2_SCL (_MX53_PAD_KEY_COL3__I2C2_SCL | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL3__ECSPI1_SS3 (_MX53_PAD_KEY_COL3__ECSPI1_SS3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL3__FEC_CRS (_MX53_PAD_KEY_COL3__FEC_CRS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL3__USBPHY1_SIECLOCK (_MX53_PAD_KEY_COL3__USBPHY1_SIECLOCK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW3__KPP_ROW_3 (_MX53_PAD_KEY_ROW3__KPP_ROW_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW3__GPIO4_13 (_MX53_PAD_KEY_ROW3__GPIO4_13 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW3__USBOH3_H2_DM (_MX53_PAD_KEY_ROW3__USBOH3_H2_DM | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW3__CCM_ASRC_EXT_CLK (_MX53_PAD_KEY_ROW3__CCM_ASRC_EXT_CLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW3__I2C2_SDA (_MX53_PAD_KEY_ROW3__I2C2_SDA | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW3__OSC32K_32K_OUT (_MX53_PAD_KEY_ROW3__OSC32K_32K_OUT | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW3__CCM_PLL4_BYP (_MX53_PAD_KEY_ROW3__CCM_PLL4_BYP | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW3__USBPHY1_LINESTATE_0 (_MX53_PAD_KEY_ROW3__USBPHY1_LINESTATE_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL4__KPP_COL_4 (_MX53_PAD_KEY_COL4__KPP_COL_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL4__GPIO4_14 (_MX53_PAD_KEY_COL4__GPIO4_14 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL4__CAN2_TXCAN (_MX53_PAD_KEY_COL4__CAN2_TXCAN | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL4__IPU_SISG_4 (_MX53_PAD_KEY_COL4__IPU_SISG_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL4__UART5_RTS (_MX53_PAD_KEY_COL4__UART5_RTS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL4__USBOH3_USBOTG_OC (_MX53_PAD_KEY_COL4__USBOH3_USBOTG_OC | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL4__USBPHY1_LINESTATE_1 (_MX53_PAD_KEY_COL4__USBPHY1_LINESTATE_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW4__KPP_ROW_4 (_MX53_PAD_KEY_ROW4__KPP_ROW_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW4__GPIO4_15 (_MX53_PAD_KEY_ROW4__GPIO4_15 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW4__CAN2_RXCAN (_MX53_PAD_KEY_ROW4__CAN2_RXCAN | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW4__IPU_SISG_5 (_MX53_PAD_KEY_ROW4__IPU_SISG_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW4__UART5_CTS (_MX53_PAD_KEY_ROW4__UART5_CTS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW4__USBOH3_USBOTG_PWR (_MX53_PAD_KEY_ROW4__USBOH3_USBOTG_PWR | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW4__USBPHY1_VBUSVALID (_MX53_PAD_KEY_ROW4__USBPHY1_VBUSVALID | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DI0_DISP_CLK__IPU_DI0_DISP_CLK (_MX53_PAD_DI0_DISP_CLK__IPU_DI0_DISP_CLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DI0_DISP_CLK__GPIO4_16 (_MX53_PAD_DI0_DISP_CLK__GPIO4_16 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DI0_DISP_CLK__USBOH3_USBH2_DIR (_MX53_PAD_DI0_DISP_CLK__USBOH3_USBH2_DIR | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DI0_DISP_CLK__SDMA_DEBUG_CORE_STATE_0 (_MX53_PAD_DI0_DISP_CLK__SDMA_DEBUG_CORE_STATE_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DI0_DISP_CLK__EMI_EMI_DEBUG_0 (_MX53_PAD_DI0_DISP_CLK__EMI_EMI_DEBUG_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DI0_DISP_CLK__USBPHY1_AVALID (_MX53_PAD_DI0_DISP_CLK__USBPHY1_AVALID | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DI0_PIN15__IPU_DI0_PIN15 (_MX53_PAD_DI0_PIN15__IPU_DI0_PIN15 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DI0_PIN15__GPIO4_17 (_MX53_PAD_DI0_PIN15__GPIO4_17 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DI0_PIN15__AUDMUX_AUD6_TXC (_MX53_PAD_DI0_PIN15__AUDMUX_AUD6_TXC | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DI0_PIN15__SDMA_DEBUG_CORE_STATE_1 (_MX53_PAD_DI0_PIN15__SDMA_DEBUG_CORE_STATE_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DI0_PIN15__EMI_EMI_DEBUG_1 (_MX53_PAD_DI0_PIN15__EMI_EMI_DEBUG_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DI0_PIN15__USBPHY1_BVALID (_MX53_PAD_DI0_PIN15__USBPHY1_BVALID | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DI0_PIN2__IPU_DI0_PIN2 (_MX53_PAD_DI0_PIN2__IPU_DI0_PIN2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DI0_PIN2__GPIO4_18 (_MX53_PAD_DI0_PIN2__GPIO4_18 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DI0_PIN2__AUDMUX_AUD6_TXD (_MX53_PAD_DI0_PIN2__AUDMUX_AUD6_TXD | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DI0_PIN2__SDMA_DEBUG_CORE_STATE_2 (_MX53_PAD_DI0_PIN2__SDMA_DEBUG_CORE_STATE_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DI0_PIN2__EMI_EMI_DEBUG_2 (_MX53_PAD_DI0_PIN2__EMI_EMI_DEBUG_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DI0_PIN2__USBPHY1_ENDSESSION (_MX53_PAD_DI0_PIN2__USBPHY1_ENDSESSION | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DI0_PIN3__IPU_DI0_PIN3 (_MX53_PAD_DI0_PIN3__IPU_DI0_PIN3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DI0_PIN3__GPIO4_19 (_MX53_PAD_DI0_PIN3__GPIO4_19 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DI0_PIN3__AUDMUX_AUD6_TXFS (_MX53_PAD_DI0_PIN3__AUDMUX_AUD6_TXFS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DI0_PIN3__SDMA_DEBUG_CORE_STATE_3 (_MX53_PAD_DI0_PIN3__SDMA_DEBUG_CORE_STATE_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DI0_PIN3__EMI_EMI_DEBUG_3 (_MX53_PAD_DI0_PIN3__EMI_EMI_DEBUG_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DI0_PIN3__USBPHY1_IDDIG (_MX53_PAD_DI0_PIN3__USBPHY1_IDDIG | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DI0_PIN4__IPU_DI0_PIN4 (_MX53_PAD_DI0_PIN4__IPU_DI0_PIN4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DI0_PIN4__GPIO4_20 (_MX53_PAD_DI0_PIN4__GPIO4_20 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DI0_PIN4__AUDMUX_AUD6_RXD (_MX53_PAD_DI0_PIN4__AUDMUX_AUD6_RXD | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DI0_PIN4__ESDHC1_WP (_MX53_PAD_DI0_PIN4__ESDHC1_WP | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DI0_PIN4__SDMA_DEBUG_YIELD (_MX53_PAD_DI0_PIN4__SDMA_DEBUG_YIELD | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DI0_PIN4__EMI_EMI_DEBUG_4 (_MX53_PAD_DI0_PIN4__EMI_EMI_DEBUG_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DI0_PIN4__USBPHY1_HOSTDISCONNECT (_MX53_PAD_DI0_PIN4__USBPHY1_HOSTDISCONNECT | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT0__IPU_DISP0_DAT_0 (_MX53_PAD_DISP0_DAT0__IPU_DISP0_DAT_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT0__GPIO4_21 (_MX53_PAD_DISP0_DAT0__GPIO4_21 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT0__CSPI_SCLK (_MX53_PAD_DISP0_DAT0__CSPI_SCLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT0__USBOH3_USBH2_DATA_0 (_MX53_PAD_DISP0_DAT0__USBOH3_USBH2_DATA_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT0__SDMA_DEBUG_CORE_RUN (_MX53_PAD_DISP0_DAT0__SDMA_DEBUG_CORE_RUN | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT0__EMI_EMI_DEBUG_5 (_MX53_PAD_DISP0_DAT0__EMI_EMI_DEBUG_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT0__USBPHY2_TXREADY (_MX53_PAD_DISP0_DAT0__USBPHY2_TXREADY | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT1__IPU_DISP0_DAT_1 (_MX53_PAD_DISP0_DAT1__IPU_DISP0_DAT_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT1__GPIO4_22 (_MX53_PAD_DISP0_DAT1__GPIO4_22 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT1__CSPI_MOSI (_MX53_PAD_DISP0_DAT1__CSPI_MOSI | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT1__USBOH3_USBH2_DATA_1 (_MX53_PAD_DISP0_DAT1__USBOH3_USBH2_DATA_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT1__SDMA_DEBUG_EVENT_CHANNEL_SEL (_MX53_PAD_DISP0_DAT1__SDMA_DEBUG_EVENT_CHANNEL_SEL | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT1__EMI_EMI_DEBUG_6 (_MX53_PAD_DISP0_DAT1__EMI_EMI_DEBUG_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT1__USBPHY2_RXVALID (_MX53_PAD_DISP0_DAT1__USBPHY2_RXVALID | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT2__IPU_DISP0_DAT_2 (_MX53_PAD_DISP0_DAT2__IPU_DISP0_DAT_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT2__GPIO4_23 (_MX53_PAD_DISP0_DAT2__GPIO4_23 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT2__CSPI_MISO (_MX53_PAD_DISP0_DAT2__CSPI_MISO | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT2__USBOH3_USBH2_DATA_2 (_MX53_PAD_DISP0_DAT2__USBOH3_USBH2_DATA_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT2__SDMA_DEBUG_MODE (_MX53_PAD_DISP0_DAT2__SDMA_DEBUG_MODE | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT2__EMI_EMI_DEBUG_7 (_MX53_PAD_DISP0_DAT2__EMI_EMI_DEBUG_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT2__USBPHY2_RXACTIVE (_MX53_PAD_DISP0_DAT2__USBPHY2_RXACTIVE | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT3__IPU_DISP0_DAT_3 (_MX53_PAD_DISP0_DAT3__IPU_DISP0_DAT_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT3__GPIO4_24 (_MX53_PAD_DISP0_DAT3__GPIO4_24 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT3__CSPI_SS0 (_MX53_PAD_DISP0_DAT3__CSPI_SS0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT3__USBOH3_USBH2_DATA_3 (_MX53_PAD_DISP0_DAT3__USBOH3_USBH2_DATA_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT3__SDMA_DEBUG_BUS_ERROR (_MX53_PAD_DISP0_DAT3__SDMA_DEBUG_BUS_ERROR | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT3__EMI_EMI_DEBUG_8 (_MX53_PAD_DISP0_DAT3__EMI_EMI_DEBUG_8 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT3__USBPHY2_RXERROR (_MX53_PAD_DISP0_DAT3__USBPHY2_RXERROR | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT4__IPU_DISP0_DAT_4 (_MX53_PAD_DISP0_DAT4__IPU_DISP0_DAT_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT4__GPIO4_25 (_MX53_PAD_DISP0_DAT4__GPIO4_25 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT4__CSPI_SS1 (_MX53_PAD_DISP0_DAT4__CSPI_SS1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT4__USBOH3_USBH2_DATA_4 (_MX53_PAD_DISP0_DAT4__USBOH3_USBH2_DATA_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT4__SDMA_DEBUG_BUS_RWB (_MX53_PAD_DISP0_DAT4__SDMA_DEBUG_BUS_RWB | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT4__EMI_EMI_DEBUG_9 (_MX53_PAD_DISP0_DAT4__EMI_EMI_DEBUG_9 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT4__USBPHY2_SIECLOCK (_MX53_PAD_DISP0_DAT4__USBPHY2_SIECLOCK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT5__IPU_DISP0_DAT_5 (_MX53_PAD_DISP0_DAT5__IPU_DISP0_DAT_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT5__GPIO4_26 (_MX53_PAD_DISP0_DAT5__GPIO4_26 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT5__CSPI_SS2 (_MX53_PAD_DISP0_DAT5__CSPI_SS2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT5__USBOH3_USBH2_DATA_5 (_MX53_PAD_DISP0_DAT5__USBOH3_USBH2_DATA_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT5__SDMA_DEBUG_MATCHED_DMBUS (_MX53_PAD_DISP0_DAT5__SDMA_DEBUG_MATCHED_DMBUS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT5__EMI_EMI_DEBUG_10 (_MX53_PAD_DISP0_DAT5__EMI_EMI_DEBUG_10 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT5__USBPHY2_LINESTATE_0 (_MX53_PAD_DISP0_DAT5__USBPHY2_LINESTATE_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT6__IPU_DISP0_DAT_6 (_MX53_PAD_DISP0_DAT6__IPU_DISP0_DAT_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT6__GPIO4_27 (_MX53_PAD_DISP0_DAT6__GPIO4_27 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT6__CSPI_SS3 (_MX53_PAD_DISP0_DAT6__CSPI_SS3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT6__USBOH3_USBH2_DATA_6 (_MX53_PAD_DISP0_DAT6__USBOH3_USBH2_DATA_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT6__SDMA_DEBUG_RTBUFFER_WRITE (_MX53_PAD_DISP0_DAT6__SDMA_DEBUG_RTBUFFER_WRITE | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT6__EMI_EMI_DEBUG_11 (_MX53_PAD_DISP0_DAT6__EMI_EMI_DEBUG_11 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT6__USBPHY2_LINESTATE_1 (_MX53_PAD_DISP0_DAT6__USBPHY2_LINESTATE_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT7__IPU_DISP0_DAT_7 (_MX53_PAD_DISP0_DAT7__IPU_DISP0_DAT_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT7__GPIO4_28 (_MX53_PAD_DISP0_DAT7__GPIO4_28 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT7__CSPI_RDY (_MX53_PAD_DISP0_DAT7__CSPI_RDY | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT7__USBOH3_USBH2_DATA_7 (_MX53_PAD_DISP0_DAT7__USBOH3_USBH2_DATA_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT7__SDMA_DEBUG_EVENT_CHANNEL_0 (_MX53_PAD_DISP0_DAT7__SDMA_DEBUG_EVENT_CHANNEL_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT7__EMI_EMI_DEBUG_12 (_MX53_PAD_DISP0_DAT7__EMI_EMI_DEBUG_12 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT7__USBPHY2_VBUSVALID (_MX53_PAD_DISP0_DAT7__USBPHY2_VBUSVALID | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT8__IPU_DISP0_DAT_8 (_MX53_PAD_DISP0_DAT8__IPU_DISP0_DAT_8 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT8__GPIO4_29 (_MX53_PAD_DISP0_DAT8__GPIO4_29 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT8__PWM1_PWMO (_MX53_PAD_DISP0_DAT8__PWM1_PWMO | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT8__WDOG1_WDOG_B (_MX53_PAD_DISP0_DAT8__WDOG1_WDOG_B | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT8__SDMA_DEBUG_EVENT_CHANNEL_1 (_MX53_PAD_DISP0_DAT8__SDMA_DEBUG_EVENT_CHANNEL_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT8__EMI_EMI_DEBUG_13 (_MX53_PAD_DISP0_DAT8__EMI_EMI_DEBUG_13 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT8__USBPHY2_AVALID (_MX53_PAD_DISP0_DAT8__USBPHY2_AVALID | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT9__IPU_DISP0_DAT_9 (_MX53_PAD_DISP0_DAT9__IPU_DISP0_DAT_9 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT9__GPIO4_30 (_MX53_PAD_DISP0_DAT9__GPIO4_30 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT9__PWM2_PWMO (_MX53_PAD_DISP0_DAT9__PWM2_PWMO | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT9__WDOG2_WDOG_B (_MX53_PAD_DISP0_DAT9__WDOG2_WDOG_B | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT9__SDMA_DEBUG_EVENT_CHANNEL_2 (_MX53_PAD_DISP0_DAT9__SDMA_DEBUG_EVENT_CHANNEL_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT9__EMI_EMI_DEBUG_14 (_MX53_PAD_DISP0_DAT9__EMI_EMI_DEBUG_14 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT9__USBPHY2_VSTATUS_0 (_MX53_PAD_DISP0_DAT9__USBPHY2_VSTATUS_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT10__IPU_DISP0_DAT_10 (_MX53_PAD_DISP0_DAT10__IPU_DISP0_DAT_10 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT10__GPIO4_31 (_MX53_PAD_DISP0_DAT10__GPIO4_31 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT10__USBOH3_USBH2_STP (_MX53_PAD_DISP0_DAT10__USBOH3_USBH2_STP | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT10__SDMA_DEBUG_EVENT_CHANNEL_3 (_MX53_PAD_DISP0_DAT10__SDMA_DEBUG_EVENT_CHANNEL_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT10__EMI_EMI_DEBUG_15 (_MX53_PAD_DISP0_DAT10__EMI_EMI_DEBUG_15 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT10__USBPHY2_VSTATUS_1 (_MX53_PAD_DISP0_DAT10__USBPHY2_VSTATUS_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT11__IPU_DISP0_DAT_11 (_MX53_PAD_DISP0_DAT11__IPU_DISP0_DAT_11 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT11__GPIO5_5 (_MX53_PAD_DISP0_DAT11__GPIO5_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT11__USBOH3_USBH2_NXT (_MX53_PAD_DISP0_DAT11__USBOH3_USBH2_NXT | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT11__SDMA_DEBUG_EVENT_CHANNEL_4 (_MX53_PAD_DISP0_DAT11__SDMA_DEBUG_EVENT_CHANNEL_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT11__EMI_EMI_DEBUG_16 (_MX53_PAD_DISP0_DAT11__EMI_EMI_DEBUG_16 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT11__USBPHY2_VSTATUS_2 (_MX53_PAD_DISP0_DAT11__USBPHY2_VSTATUS_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT12__IPU_DISP0_DAT_12 (_MX53_PAD_DISP0_DAT12__IPU_DISP0_DAT_12 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT12__GPIO5_6 (_MX53_PAD_DISP0_DAT12__GPIO5_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT12__USBOH3_USBH2_CLK (_MX53_PAD_DISP0_DAT12__USBOH3_USBH2_CLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT12__SDMA_DEBUG_EVENT_CHANNEL_5 (_MX53_PAD_DISP0_DAT12__SDMA_DEBUG_EVENT_CHANNEL_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT12__EMI_EMI_DEBUG_17 (_MX53_PAD_DISP0_DAT12__EMI_EMI_DEBUG_17 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT12__USBPHY2_VSTATUS_3 (_MX53_PAD_DISP0_DAT12__USBPHY2_VSTATUS_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT13__IPU_DISP0_DAT_13 (_MX53_PAD_DISP0_DAT13__IPU_DISP0_DAT_13 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT13__GPIO5_7 (_MX53_PAD_DISP0_DAT13__GPIO5_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT13__AUDMUX_AUD5_RXFS (_MX53_PAD_DISP0_DAT13__AUDMUX_AUD5_RXFS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT13__SDMA_DEBUG_EVT_CHN_LINES_0 (_MX53_PAD_DISP0_DAT13__SDMA_DEBUG_EVT_CHN_LINES_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT13__EMI_EMI_DEBUG_18 (_MX53_PAD_DISP0_DAT13__EMI_EMI_DEBUG_18 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT13__USBPHY2_VSTATUS_4 (_MX53_PAD_DISP0_DAT13__USBPHY2_VSTATUS_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT14__IPU_DISP0_DAT_14 (_MX53_PAD_DISP0_DAT14__IPU_DISP0_DAT_14 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT14__GPIO5_8 (_MX53_PAD_DISP0_DAT14__GPIO5_8 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT14__AUDMUX_AUD5_RXC (_MX53_PAD_DISP0_DAT14__AUDMUX_AUD5_RXC | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT14__SDMA_DEBUG_EVT_CHN_LINES_1 (_MX53_PAD_DISP0_DAT14__SDMA_DEBUG_EVT_CHN_LINES_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT14__EMI_EMI_DEBUG_19 (_MX53_PAD_DISP0_DAT14__EMI_EMI_DEBUG_19 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT14__USBPHY2_VSTATUS_5 (_MX53_PAD_DISP0_DAT14__USBPHY2_VSTATUS_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT15__IPU_DISP0_DAT_15 (_MX53_PAD_DISP0_DAT15__IPU_DISP0_DAT_15 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT15__GPIO5_9 (_MX53_PAD_DISP0_DAT15__GPIO5_9 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT15__ECSPI1_SS1 (_MX53_PAD_DISP0_DAT15__ECSPI1_SS1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT15__ECSPI2_SS1 (_MX53_PAD_DISP0_DAT15__ECSPI2_SS1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT15__SDMA_DEBUG_EVT_CHN_LINES_2 (_MX53_PAD_DISP0_DAT15__SDMA_DEBUG_EVT_CHN_LINES_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT15__EMI_EMI_DEBUG_20 (_MX53_PAD_DISP0_DAT15__EMI_EMI_DEBUG_20 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT15__USBPHY2_VSTATUS_6 (_MX53_PAD_DISP0_DAT15__USBPHY2_VSTATUS_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT16__IPU_DISP0_DAT_16 (_MX53_PAD_DISP0_DAT16__IPU_DISP0_DAT_16 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT16__GPIO5_10 (_MX53_PAD_DISP0_DAT16__GPIO5_10 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT16__ECSPI2_MOSI (_MX53_PAD_DISP0_DAT16__ECSPI2_MOSI | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT16__AUDMUX_AUD5_TXC (_MX53_PAD_DISP0_DAT16__AUDMUX_AUD5_TXC | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT16__SDMA_EXT_EVENT_0 (_MX53_PAD_DISP0_DAT16__SDMA_EXT_EVENT_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT16__SDMA_DEBUG_EVT_CHN_LINES_3 (_MX53_PAD_DISP0_DAT16__SDMA_DEBUG_EVT_CHN_LINES_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT16__EMI_EMI_DEBUG_21 (_MX53_PAD_DISP0_DAT16__EMI_EMI_DEBUG_21 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT16__USBPHY2_VSTATUS_7 (_MX53_PAD_DISP0_DAT16__USBPHY2_VSTATUS_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT17__IPU_DISP0_DAT_17 (_MX53_PAD_DISP0_DAT17__IPU_DISP0_DAT_17 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT17__GPIO5_11 (_MX53_PAD_DISP0_DAT17__GPIO5_11 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT17__ECSPI2_MISO (_MX53_PAD_DISP0_DAT17__ECSPI2_MISO | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT17__AUDMUX_AUD5_TXD (_MX53_PAD_DISP0_DAT17__AUDMUX_AUD5_TXD | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT17__SDMA_EXT_EVENT_1 (_MX53_PAD_DISP0_DAT17__SDMA_EXT_EVENT_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT17__SDMA_DEBUG_EVT_CHN_LINES_4 (_MX53_PAD_DISP0_DAT17__SDMA_DEBUG_EVT_CHN_LINES_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT17__EMI_EMI_DEBUG_22 (_MX53_PAD_DISP0_DAT17__EMI_EMI_DEBUG_22 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT18__IPU_DISP0_DAT_18 (_MX53_PAD_DISP0_DAT18__IPU_DISP0_DAT_18 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT18__GPIO5_12 (_MX53_PAD_DISP0_DAT18__GPIO5_12 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT18__ECSPI2_SS0 (_MX53_PAD_DISP0_DAT18__ECSPI2_SS0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT18__AUDMUX_AUD5_TXFS (_MX53_PAD_DISP0_DAT18__AUDMUX_AUD5_TXFS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT18__AUDMUX_AUD4_RXFS (_MX53_PAD_DISP0_DAT18__AUDMUX_AUD4_RXFS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT18__SDMA_DEBUG_EVT_CHN_LINES_5 (_MX53_PAD_DISP0_DAT18__SDMA_DEBUG_EVT_CHN_LINES_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT18__EMI_EMI_DEBUG_23 (_MX53_PAD_DISP0_DAT18__EMI_EMI_DEBUG_23 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT18__EMI_WEIM_CS_2 (_MX53_PAD_DISP0_DAT18__EMI_WEIM_CS_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT19__IPU_DISP0_DAT_19 (_MX53_PAD_DISP0_DAT19__IPU_DISP0_DAT_19 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT19__GPIO5_13 (_MX53_PAD_DISP0_DAT19__GPIO5_13 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT19__ECSPI2_SCLK (_MX53_PAD_DISP0_DAT19__ECSPI2_SCLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT19__AUDMUX_AUD5_RXD (_MX53_PAD_DISP0_DAT19__AUDMUX_AUD5_RXD | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT19__AUDMUX_AUD4_RXC (_MX53_PAD_DISP0_DAT19__AUDMUX_AUD4_RXC | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT19__SDMA_DEBUG_EVT_CHN_LINES_6 (_MX53_PAD_DISP0_DAT19__SDMA_DEBUG_EVT_CHN_LINES_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT19__EMI_EMI_DEBUG_24 (_MX53_PAD_DISP0_DAT19__EMI_EMI_DEBUG_24 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT19__EMI_WEIM_CS_3 (_MX53_PAD_DISP0_DAT19__EMI_WEIM_CS_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT20__IPU_DISP0_DAT_20 (_MX53_PAD_DISP0_DAT20__IPU_DISP0_DAT_20 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT20__GPIO5_14 (_MX53_PAD_DISP0_DAT20__GPIO5_14 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT20__ECSPI1_SCLK (_MX53_PAD_DISP0_DAT20__ECSPI1_SCLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT20__AUDMUX_AUD4_TXC (_MX53_PAD_DISP0_DAT20__AUDMUX_AUD4_TXC | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT20__SDMA_DEBUG_EVT_CHN_LINES_7 (_MX53_PAD_DISP0_DAT20__SDMA_DEBUG_EVT_CHN_LINES_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT20__EMI_EMI_DEBUG_25 (_MX53_PAD_DISP0_DAT20__EMI_EMI_DEBUG_25 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT20__SATA_PHY_TDI (_MX53_PAD_DISP0_DAT20__SATA_PHY_TDI | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT21__IPU_DISP0_DAT_21 (_MX53_PAD_DISP0_DAT21__IPU_DISP0_DAT_21 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT21__GPIO5_15 (_MX53_PAD_DISP0_DAT21__GPIO5_15 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT21__ECSPI1_MOSI (_MX53_PAD_DISP0_DAT21__ECSPI1_MOSI | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT21__AUDMUX_AUD4_TXD (_MX53_PAD_DISP0_DAT21__AUDMUX_AUD4_TXD | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT21__SDMA_DEBUG_BUS_DEVICE_0 (_MX53_PAD_DISP0_DAT21__SDMA_DEBUG_BUS_DEVICE_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT21__EMI_EMI_DEBUG_26 (_MX53_PAD_DISP0_DAT21__EMI_EMI_DEBUG_26 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT21__SATA_PHY_TDO (_MX53_PAD_DISP0_DAT21__SATA_PHY_TDO | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT22__IPU_DISP0_DAT_22 (_MX53_PAD_DISP0_DAT22__IPU_DISP0_DAT_22 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT22__GPIO5_16 (_MX53_PAD_DISP0_DAT22__GPIO5_16 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT22__ECSPI1_MISO (_MX53_PAD_DISP0_DAT22__ECSPI1_MISO | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT22__AUDMUX_AUD4_TXFS (_MX53_PAD_DISP0_DAT22__AUDMUX_AUD4_TXFS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT22__SDMA_DEBUG_BUS_DEVICE_1 (_MX53_PAD_DISP0_DAT22__SDMA_DEBUG_BUS_DEVICE_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT22__EMI_EMI_DEBUG_27 (_MX53_PAD_DISP0_DAT22__EMI_EMI_DEBUG_27 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT22__SATA_PHY_TCK (_MX53_PAD_DISP0_DAT22__SATA_PHY_TCK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT23__IPU_DISP0_DAT_23 (_MX53_PAD_DISP0_DAT23__IPU_DISP0_DAT_23 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT23__GPIO5_17 (_MX53_PAD_DISP0_DAT23__GPIO5_17 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT23__ECSPI1_SS0 (_MX53_PAD_DISP0_DAT23__ECSPI1_SS0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT23__AUDMUX_AUD4_RXD (_MX53_PAD_DISP0_DAT23__AUDMUX_AUD4_RXD | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT23__SDMA_DEBUG_BUS_DEVICE_2 (_MX53_PAD_DISP0_DAT23__SDMA_DEBUG_BUS_DEVICE_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT23__EMI_EMI_DEBUG_28 (_MX53_PAD_DISP0_DAT23__EMI_EMI_DEBUG_28 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_DISP0_DAT23__SATA_PHY_TMS (_MX53_PAD_DISP0_DAT23__SATA_PHY_TMS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_PIXCLK__IPU_CSI0_PIXCLK (_MX53_PAD_CSI0_PIXCLK__IPU_CSI0_PIXCLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_PIXCLK__GPIO5_18 (_MX53_PAD_CSI0_PIXCLK__GPIO5_18 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_PIXCLK__SDMA_DEBUG_PC_0 (_MX53_PAD_CSI0_PIXCLK__SDMA_DEBUG_PC_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_PIXCLK__EMI_EMI_DEBUG_29 (_MX53_PAD_CSI0_PIXCLK__EMI_EMI_DEBUG_29 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_MCLK__IPU_CSI0_HSYNC (_MX53_PAD_CSI0_MCLK__IPU_CSI0_HSYNC | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_MCLK__GPIO5_19 (_MX53_PAD_CSI0_MCLK__GPIO5_19 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_MCLK__CCM_CSI0_MCLK (_MX53_PAD_CSI0_MCLK__CCM_CSI0_MCLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_MCLK__SDMA_DEBUG_PC_1 (_MX53_PAD_CSI0_MCLK__SDMA_DEBUG_PC_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_MCLK__EMI_EMI_DEBUG_30 (_MX53_PAD_CSI0_MCLK__EMI_EMI_DEBUG_30 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_MCLK__TPIU_TRCTL (_MX53_PAD_CSI0_MCLK__TPIU_TRCTL | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DATA_EN__IPU_CSI0_DATA_EN (_MX53_PAD_CSI0_DATA_EN__IPU_CSI0_DATA_EN | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DATA_EN__GPIO5_20 (_MX53_PAD_CSI0_DATA_EN__GPIO5_20 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DATA_EN__SDMA_DEBUG_PC_2 (_MX53_PAD_CSI0_DATA_EN__SDMA_DEBUG_PC_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DATA_EN__EMI_EMI_DEBUG_31 (_MX53_PAD_CSI0_DATA_EN__EMI_EMI_DEBUG_31 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DATA_EN__TPIU_TRCLK (_MX53_PAD_CSI0_DATA_EN__TPIU_TRCLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_VSYNC__IPU_CSI0_VSYNC (_MX53_PAD_CSI0_VSYNC__IPU_CSI0_VSYNC | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_VSYNC__GPIO5_21 (_MX53_PAD_CSI0_VSYNC__GPIO5_21 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_VSYNC__SDMA_DEBUG_PC_3 (_MX53_PAD_CSI0_VSYNC__SDMA_DEBUG_PC_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_VSYNC__EMI_EMI_DEBUG_32 (_MX53_PAD_CSI0_VSYNC__EMI_EMI_DEBUG_32 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_VSYNC__TPIU_TRACE_0 (_MX53_PAD_CSI0_VSYNC__TPIU_TRACE_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT4__IPU_CSI0_D_4 (_MX53_PAD_CSI0_DAT4__IPU_CSI0_D_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT4__GPIO5_22 (_MX53_PAD_CSI0_DAT4__GPIO5_22 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT4__KPP_COL_5 (_MX53_PAD_CSI0_DAT4__KPP_COL_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT4__ECSPI1_SCLK (_MX53_PAD_CSI0_DAT4__ECSPI1_SCLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT4__USBOH3_USBH3_STP (_MX53_PAD_CSI0_DAT4__USBOH3_USBH3_STP | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT4__AUDMUX_AUD3_TXC (_MX53_PAD_CSI0_DAT4__AUDMUX_AUD3_TXC | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT4__EMI_EMI_DEBUG_33 (_MX53_PAD_CSI0_DAT4__EMI_EMI_DEBUG_33 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT4__TPIU_TRACE_1 (_MX53_PAD_CSI0_DAT4__TPIU_TRACE_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT5__IPU_CSI0_D_5 (_MX53_PAD_CSI0_DAT5__IPU_CSI0_D_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT5__GPIO5_23 (_MX53_PAD_CSI0_DAT5__GPIO5_23 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT5__KPP_ROW_5 (_MX53_PAD_CSI0_DAT5__KPP_ROW_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT5__ECSPI1_MOSI (_MX53_PAD_CSI0_DAT5__ECSPI1_MOSI | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT5__USBOH3_USBH3_NXT (_MX53_PAD_CSI0_DAT5__USBOH3_USBH3_NXT | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT5__AUDMUX_AUD3_TXD (_MX53_PAD_CSI0_DAT5__AUDMUX_AUD3_TXD | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT5__EMI_EMI_DEBUG_34 (_MX53_PAD_CSI0_DAT5__EMI_EMI_DEBUG_34 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT5__TPIU_TRACE_2 (_MX53_PAD_CSI0_DAT5__TPIU_TRACE_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT6__IPU_CSI0_D_6 (_MX53_PAD_CSI0_DAT6__IPU_CSI0_D_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT6__GPIO5_24 (_MX53_PAD_CSI0_DAT6__GPIO5_24 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT6__KPP_COL_6 (_MX53_PAD_CSI0_DAT6__KPP_COL_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT6__ECSPI1_MISO (_MX53_PAD_CSI0_DAT6__ECSPI1_MISO | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT6__USBOH3_USBH3_CLK (_MX53_PAD_CSI0_DAT6__USBOH3_USBH3_CLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT6__AUDMUX_AUD3_TXFS (_MX53_PAD_CSI0_DAT6__AUDMUX_AUD3_TXFS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT6__EMI_EMI_DEBUG_35 (_MX53_PAD_CSI0_DAT6__EMI_EMI_DEBUG_35 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT6__TPIU_TRACE_3 (_MX53_PAD_CSI0_DAT6__TPIU_TRACE_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT7__IPU_CSI0_D_7 (_MX53_PAD_CSI0_DAT7__IPU_CSI0_D_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT7__GPIO5_25 (_MX53_PAD_CSI0_DAT7__GPIO5_25 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT7__KPP_ROW_6 (_MX53_PAD_CSI0_DAT7__KPP_ROW_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT7__ECSPI1_SS0 (_MX53_PAD_CSI0_DAT7__ECSPI1_SS0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT7__USBOH3_USBH3_DIR (_MX53_PAD_CSI0_DAT7__USBOH3_USBH3_DIR | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT7__AUDMUX_AUD3_RXD (_MX53_PAD_CSI0_DAT7__AUDMUX_AUD3_RXD | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT7__EMI_EMI_DEBUG_36 (_MX53_PAD_CSI0_DAT7__EMI_EMI_DEBUG_36 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT7__TPIU_TRACE_4 (_MX53_PAD_CSI0_DAT7__TPIU_TRACE_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT8__IPU_CSI0_D_8 (_MX53_PAD_CSI0_DAT8__IPU_CSI0_D_8 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT8__GPIO5_26 (_MX53_PAD_CSI0_DAT8__GPIO5_26 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT8__KPP_COL_7 (_MX53_PAD_CSI0_DAT8__KPP_COL_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT8__ECSPI2_SCLK (_MX53_PAD_CSI0_DAT8__ECSPI2_SCLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT8__USBOH3_USBH3_OC (_MX53_PAD_CSI0_DAT8__USBOH3_USBH3_OC | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT8__I2C1_SDA (_MX53_PAD_CSI0_DAT8__I2C1_SDA | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT8__EMI_EMI_DEBUG_37 (_MX53_PAD_CSI0_DAT8__EMI_EMI_DEBUG_37 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT8__TPIU_TRACE_5 (_MX53_PAD_CSI0_DAT8__TPIU_TRACE_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT9__IPU_CSI0_D_9 (_MX53_PAD_CSI0_DAT9__IPU_CSI0_D_9 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT9__GPIO5_27 (_MX53_PAD_CSI0_DAT9__GPIO5_27 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT9__KPP_ROW_7 (_MX53_PAD_CSI0_DAT9__KPP_ROW_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT9__ECSPI2_MOSI (_MX53_PAD_CSI0_DAT9__ECSPI2_MOSI | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT9__USBOH3_USBH3_PWR (_MX53_PAD_CSI0_DAT9__USBOH3_USBH3_PWR | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT9__I2C1_SCL (_MX53_PAD_CSI0_DAT9__I2C1_SCL | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT9__EMI_EMI_DEBUG_38 (_MX53_PAD_CSI0_DAT9__EMI_EMI_DEBUG_38 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT9__TPIU_TRACE_6 (_MX53_PAD_CSI0_DAT9__TPIU_TRACE_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT10__IPU_CSI0_D_10 (_MX53_PAD_CSI0_DAT10__IPU_CSI0_D_10 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT10__GPIO5_28 (_MX53_PAD_CSI0_DAT10__GPIO5_28 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT10__UART1_TXD_MUX (_MX53_PAD_CSI0_DAT10__UART1_TXD_MUX | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT10__ECSPI2_MISO (_MX53_PAD_CSI0_DAT10__ECSPI2_MISO | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT10__AUDMUX_AUD3_RXC (_MX53_PAD_CSI0_DAT10__AUDMUX_AUD3_RXC | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT10__SDMA_DEBUG_PC_4 (_MX53_PAD_CSI0_DAT10__SDMA_DEBUG_PC_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT10__EMI_EMI_DEBUG_39 (_MX53_PAD_CSI0_DAT10__EMI_EMI_DEBUG_39 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT10__TPIU_TRACE_7 (_MX53_PAD_CSI0_DAT10__TPIU_TRACE_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT11__IPU_CSI0_D_11 (_MX53_PAD_CSI0_DAT11__IPU_CSI0_D_11 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT11__GPIO5_29 (_MX53_PAD_CSI0_DAT11__GPIO5_29 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT11__UART1_RXD_MUX (_MX53_PAD_CSI0_DAT11__UART1_RXD_MUX | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT11__ECSPI2_SS0 (_MX53_PAD_CSI0_DAT11__ECSPI2_SS0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT11__AUDMUX_AUD3_RXFS (_MX53_PAD_CSI0_DAT11__AUDMUX_AUD3_RXFS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT11__SDMA_DEBUG_PC_5 (_MX53_PAD_CSI0_DAT11__SDMA_DEBUG_PC_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT11__EMI_EMI_DEBUG_40 (_MX53_PAD_CSI0_DAT11__EMI_EMI_DEBUG_40 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT11__TPIU_TRACE_8 (_MX53_PAD_CSI0_DAT11__TPIU_TRACE_8 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT12__IPU_CSI0_D_12 (_MX53_PAD_CSI0_DAT12__IPU_CSI0_D_12 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT12__GPIO5_30 (_MX53_PAD_CSI0_DAT12__GPIO5_30 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT12__UART4_TXD_MUX (_MX53_PAD_CSI0_DAT12__UART4_TXD_MUX | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT12__USBOH3_USBH3_DATA_0 (_MX53_PAD_CSI0_DAT12__USBOH3_USBH3_DATA_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT12__SDMA_DEBUG_PC_6 (_MX53_PAD_CSI0_DAT12__SDMA_DEBUG_PC_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT12__EMI_EMI_DEBUG_41 (_MX53_PAD_CSI0_DAT12__EMI_EMI_DEBUG_41 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT12__TPIU_TRACE_9 (_MX53_PAD_CSI0_DAT12__TPIU_TRACE_9 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT13__IPU_CSI0_D_13 (_MX53_PAD_CSI0_DAT13__IPU_CSI0_D_13 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT13__GPIO5_31 (_MX53_PAD_CSI0_DAT13__GPIO5_31 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT13__UART4_RXD_MUX (_MX53_PAD_CSI0_DAT13__UART4_RXD_MUX | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT13__USBOH3_USBH3_DATA_1 (_MX53_PAD_CSI0_DAT13__USBOH3_USBH3_DATA_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT13__SDMA_DEBUG_PC_7 (_MX53_PAD_CSI0_DAT13__SDMA_DEBUG_PC_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT13__EMI_EMI_DEBUG_42 (_MX53_PAD_CSI0_DAT13__EMI_EMI_DEBUG_42 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT13__TPIU_TRACE_10 (_MX53_PAD_CSI0_DAT13__TPIU_TRACE_10 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT14__IPU_CSI0_D_14 (_MX53_PAD_CSI0_DAT14__IPU_CSI0_D_14 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT14__GPIO6_0 (_MX53_PAD_CSI0_DAT14__GPIO6_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT14__UART5_TXD_MUX (_MX53_PAD_CSI0_DAT14__UART5_TXD_MUX | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT14__USBOH3_USBH3_DATA_2 (_MX53_PAD_CSI0_DAT14__USBOH3_USBH3_DATA_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT14__SDMA_DEBUG_PC_8 (_MX53_PAD_CSI0_DAT14__SDMA_DEBUG_PC_8 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT14__EMI_EMI_DEBUG_43 (_MX53_PAD_CSI0_DAT14__EMI_EMI_DEBUG_43 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT14__TPIU_TRACE_11 (_MX53_PAD_CSI0_DAT14__TPIU_TRACE_11 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT15__IPU_CSI0_D_15 (_MX53_PAD_CSI0_DAT15__IPU_CSI0_D_15 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT15__GPIO6_1 (_MX53_PAD_CSI0_DAT15__GPIO6_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT15__UART5_RXD_MUX (_MX53_PAD_CSI0_DAT15__UART5_RXD_MUX | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT15__USBOH3_USBH3_DATA_3 (_MX53_PAD_CSI0_DAT15__USBOH3_USBH3_DATA_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT15__SDMA_DEBUG_PC_9 (_MX53_PAD_CSI0_DAT15__SDMA_DEBUG_PC_9 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT15__EMI_EMI_DEBUG_44 (_MX53_PAD_CSI0_DAT15__EMI_EMI_DEBUG_44 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT15__TPIU_TRACE_12 (_MX53_PAD_CSI0_DAT15__TPIU_TRACE_12 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT16__IPU_CSI0_D_16 (_MX53_PAD_CSI0_DAT16__IPU_CSI0_D_16 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT16__GPIO6_2 (_MX53_PAD_CSI0_DAT16__GPIO6_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT16__UART4_RTS (_MX53_PAD_CSI0_DAT16__UART4_RTS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT16__USBOH3_USBH3_DATA_4 (_MX53_PAD_CSI0_DAT16__USBOH3_USBH3_DATA_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT16__SDMA_DEBUG_PC_10 (_MX53_PAD_CSI0_DAT16__SDMA_DEBUG_PC_10 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT16__EMI_EMI_DEBUG_45 (_MX53_PAD_CSI0_DAT16__EMI_EMI_DEBUG_45 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT16__TPIU_TRACE_13 (_MX53_PAD_CSI0_DAT16__TPIU_TRACE_13 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT17__IPU_CSI0_D_17 (_MX53_PAD_CSI0_DAT17__IPU_CSI0_D_17 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT17__GPIO6_3 (_MX53_PAD_CSI0_DAT17__GPIO6_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT17__UART4_CTS (_MX53_PAD_CSI0_DAT17__UART4_CTS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT17__USBOH3_USBH3_DATA_5 (_MX53_PAD_CSI0_DAT17__USBOH3_USBH3_DATA_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT17__SDMA_DEBUG_PC_11 (_MX53_PAD_CSI0_DAT17__SDMA_DEBUG_PC_11 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT17__EMI_EMI_DEBUG_46 (_MX53_PAD_CSI0_DAT17__EMI_EMI_DEBUG_46 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT17__TPIU_TRACE_14 (_MX53_PAD_CSI0_DAT17__TPIU_TRACE_14 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT18__IPU_CSI0_D_18 (_MX53_PAD_CSI0_DAT18__IPU_CSI0_D_18 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT18__GPIO6_4 (_MX53_PAD_CSI0_DAT18__GPIO6_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT18__UART5_RTS (_MX53_PAD_CSI0_DAT18__UART5_RTS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT18__USBOH3_USBH3_DATA_6 (_MX53_PAD_CSI0_DAT18__USBOH3_USBH3_DATA_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT18__SDMA_DEBUG_PC_12 (_MX53_PAD_CSI0_DAT18__SDMA_DEBUG_PC_12 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT18__EMI_EMI_DEBUG_47 (_MX53_PAD_CSI0_DAT18__EMI_EMI_DEBUG_47 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT18__TPIU_TRACE_15 (_MX53_PAD_CSI0_DAT18__TPIU_TRACE_15 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT19__IPU_CSI0_D_19 (_MX53_PAD_CSI0_DAT19__IPU_CSI0_D_19 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT19__GPIO6_5 (_MX53_PAD_CSI0_DAT19__GPIO6_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT19__UART5_CTS (_MX53_PAD_CSI0_DAT19__UART5_CTS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT19__USBOH3_USBH3_DATA_7 (_MX53_PAD_CSI0_DAT19__USBOH3_USBH3_DATA_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT19__SDMA_DEBUG_PC_13 (_MX53_PAD_CSI0_DAT19__SDMA_DEBUG_PC_13 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT19__EMI_EMI_DEBUG_48 (_MX53_PAD_CSI0_DAT19__EMI_EMI_DEBUG_48 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT19__USBPHY2_BISTOK (_MX53_PAD_CSI0_DAT19__USBPHY2_BISTOK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A25__EMI_WEIM_A_25 (_MX53_PAD_EIM_A25__EMI_WEIM_A_25 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A25__GPIO5_2 (_MX53_PAD_EIM_A25__GPIO5_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A25__ECSPI2_RDY (_MX53_PAD_EIM_A25__ECSPI2_RDY | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A25__IPU_DI1_PIN12 (_MX53_PAD_EIM_A25__IPU_DI1_PIN12 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A25__CSPI_SS1 (_MX53_PAD_EIM_A25__CSPI_SS1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A25__IPU_DI0_D1_CS (_MX53_PAD_EIM_A25__IPU_DI0_D1_CS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A25__USBPHY1_BISTOK (_MX53_PAD_EIM_A25__USBPHY1_BISTOK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_EB2__EMI_WEIM_EB_2 (_MX53_PAD_EIM_EB2__EMI_WEIM_EB_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_EB2__GPIO2_30 (_MX53_PAD_EIM_EB2__GPIO2_30 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_EB2__CCM_DI1_EXT_CLK (_MX53_PAD_EIM_EB2__CCM_DI1_EXT_CLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_EB2__IPU_SER_DISP1_CS (_MX53_PAD_EIM_EB2__IPU_SER_DISP1_CS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_EB2__ECSPI1_SS0 (_MX53_PAD_EIM_EB2__ECSPI1_SS0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_EB2__I2C2_SCL (_MX53_PAD_EIM_EB2__I2C2_SCL | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D16__EMI_WEIM_D_16 (_MX53_PAD_EIM_D16__EMI_WEIM_D_16 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D16__GPIO3_16 (_MX53_PAD_EIM_D16__GPIO3_16 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D16__IPU_DI0_PIN5 (_MX53_PAD_EIM_D16__IPU_DI0_PIN5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D16__IPU_DISPB1_SER_CLK (_MX53_PAD_EIM_D16__IPU_DISPB1_SER_CLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D16__ECSPI1_SCLK (_MX53_PAD_EIM_D16__ECSPI1_SCLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D16__I2C2_SDA (_MX53_PAD_EIM_D16__I2C2_SDA | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D17__EMI_WEIM_D_17 (_MX53_PAD_EIM_D17__EMI_WEIM_D_17 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D17__GPIO3_17 (_MX53_PAD_EIM_D17__GPIO3_17 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D17__IPU_DI0_PIN6 (_MX53_PAD_EIM_D17__IPU_DI0_PIN6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D17__IPU_DISPB1_SER_DIN (_MX53_PAD_EIM_D17__IPU_DISPB1_SER_DIN | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D17__ECSPI1_MISO (_MX53_PAD_EIM_D17__ECSPI1_MISO | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D17__I2C3_SCL (_MX53_PAD_EIM_D17__I2C3_SCL | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D18__EMI_WEIM_D_18 (_MX53_PAD_EIM_D18__EMI_WEIM_D_18 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D18__GPIO3_18 (_MX53_PAD_EIM_D18__GPIO3_18 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D18__IPU_DI0_PIN7 (_MX53_PAD_EIM_D18__IPU_DI0_PIN7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D18__IPU_DISPB1_SER_DIO (_MX53_PAD_EIM_D18__IPU_DISPB1_SER_DIO | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D18__ECSPI1_MOSI (_MX53_PAD_EIM_D18__ECSPI1_MOSI | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D18__I2C3_SDA (_MX53_PAD_EIM_D18__I2C3_SDA | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D18__IPU_DI1_D0_CS (_MX53_PAD_EIM_D18__IPU_DI1_D0_CS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D19__EMI_WEIM_D_19 (_MX53_PAD_EIM_D19__EMI_WEIM_D_19 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D19__GPIO3_19 (_MX53_PAD_EIM_D19__GPIO3_19 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D19__IPU_DI0_PIN8 (_MX53_PAD_EIM_D19__IPU_DI0_PIN8 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D19__IPU_DISPB1_SER_RS (_MX53_PAD_EIM_D19__IPU_DISPB1_SER_RS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D19__ECSPI1_SS1 (_MX53_PAD_EIM_D19__ECSPI1_SS1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D19__EPIT1_EPITO (_MX53_PAD_EIM_D19__EPIT1_EPITO | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D19__UART1_CTS (_MX53_PAD_EIM_D19__UART1_CTS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D19__USBOH3_USBH2_OC (_MX53_PAD_EIM_D19__USBOH3_USBH2_OC | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D20__EMI_WEIM_D_20 (_MX53_PAD_EIM_D20__EMI_WEIM_D_20 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D20__GPIO3_20 (_MX53_PAD_EIM_D20__GPIO3_20 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D20__IPU_DI0_PIN16 (_MX53_PAD_EIM_D20__IPU_DI0_PIN16 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D20__IPU_SER_DISP0_CS (_MX53_PAD_EIM_D20__IPU_SER_DISP0_CS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D20__CSPI_SS0 (_MX53_PAD_EIM_D20__CSPI_SS0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D20__EPIT2_EPITO (_MX53_PAD_EIM_D20__EPIT2_EPITO | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D20__UART1_RTS (_MX53_PAD_EIM_D20__UART1_RTS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D20__USBOH3_USBH2_PWR (_MX53_PAD_EIM_D20__USBOH3_USBH2_PWR | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D21__EMI_WEIM_D_21 (_MX53_PAD_EIM_D21__EMI_WEIM_D_21 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D21__GPIO3_21 (_MX53_PAD_EIM_D21__GPIO3_21 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D21__IPU_DI0_PIN17 (_MX53_PAD_EIM_D21__IPU_DI0_PIN17 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D21__IPU_DISPB0_SER_CLK (_MX53_PAD_EIM_D21__IPU_DISPB0_SER_CLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D21__CSPI_SCLK (_MX53_PAD_EIM_D21__CSPI_SCLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D21__I2C1_SCL (_MX53_PAD_EIM_D21__I2C1_SCL | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D21__USBOH3_USBOTG_OC (_MX53_PAD_EIM_D21__USBOH3_USBOTG_OC | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D22__EMI_WEIM_D_22 (_MX53_PAD_EIM_D22__EMI_WEIM_D_22 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D22__GPIO3_22 (_MX53_PAD_EIM_D22__GPIO3_22 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D22__IPU_DI0_PIN1 (_MX53_PAD_EIM_D22__IPU_DI0_PIN1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D22__IPU_DISPB0_SER_DIN (_MX53_PAD_EIM_D22__IPU_DISPB0_SER_DIN | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D22__CSPI_MISO (_MX53_PAD_EIM_D22__CSPI_MISO | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D22__USBOH3_USBOTG_PWR (_MX53_PAD_EIM_D22__USBOH3_USBOTG_PWR | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D23__EMI_WEIM_D_23 (_MX53_PAD_EIM_D23__EMI_WEIM_D_23 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D23__GPIO3_23 (_MX53_PAD_EIM_D23__GPIO3_23 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D23__UART3_CTS (_MX53_PAD_EIM_D23__UART3_CTS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D23__UART1_DCD (_MX53_PAD_EIM_D23__UART1_DCD | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D23__IPU_DI0_D0_CS (_MX53_PAD_EIM_D23__IPU_DI0_D0_CS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D23__IPU_DI1_PIN2 (_MX53_PAD_EIM_D23__IPU_DI1_PIN2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D23__IPU_CSI1_DATA_EN (_MX53_PAD_EIM_D23__IPU_CSI1_DATA_EN | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D23__IPU_DI1_PIN14 (_MX53_PAD_EIM_D23__IPU_DI1_PIN14 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_EB3__EMI_WEIM_EB_3 (_MX53_PAD_EIM_EB3__EMI_WEIM_EB_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_EB3__GPIO2_31 (_MX53_PAD_EIM_EB3__GPIO2_31 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_EB3__UART3_RTS (_MX53_PAD_EIM_EB3__UART3_RTS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_EB3__UART1_RI (_MX53_PAD_EIM_EB3__UART1_RI | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_EB3__IPU_DI1_PIN3 (_MX53_PAD_EIM_EB3__IPU_DI1_PIN3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_EB3__IPU_CSI1_HSYNC (_MX53_PAD_EIM_EB3__IPU_CSI1_HSYNC | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_EB3__IPU_DI1_PIN16 (_MX53_PAD_EIM_EB3__IPU_DI1_PIN16 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D24__EMI_WEIM_D_24 (_MX53_PAD_EIM_D24__EMI_WEIM_D_24 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D24__GPIO3_24 (_MX53_PAD_EIM_D24__GPIO3_24 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D24__UART3_TXD_MUX (_MX53_PAD_EIM_D24__UART3_TXD_MUX | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D24__ECSPI1_SS2 (_MX53_PAD_EIM_D24__ECSPI1_SS2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D24__CSPI_SS2 (_MX53_PAD_EIM_D24__CSPI_SS2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D24__AUDMUX_AUD5_RXFS (_MX53_PAD_EIM_D24__AUDMUX_AUD5_RXFS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D24__ECSPI2_SS2 (_MX53_PAD_EIM_D24__ECSPI2_SS2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D24__UART1_DTR (_MX53_PAD_EIM_D24__UART1_DTR | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D25__EMI_WEIM_D_25 (_MX53_PAD_EIM_D25__EMI_WEIM_D_25 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D25__GPIO3_25 (_MX53_PAD_EIM_D25__GPIO3_25 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D25__UART3_RXD_MUX (_MX53_PAD_EIM_D25__UART3_RXD_MUX | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D25__ECSPI1_SS3 (_MX53_PAD_EIM_D25__ECSPI1_SS3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D25__CSPI_SS3 (_MX53_PAD_EIM_D25__CSPI_SS3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D25__AUDMUX_AUD5_RXC (_MX53_PAD_EIM_D25__AUDMUX_AUD5_RXC | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D25__ECSPI2_SS3 (_MX53_PAD_EIM_D25__ECSPI2_SS3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D25__UART1_DSR (_MX53_PAD_EIM_D25__UART1_DSR | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D26__EMI_WEIM_D_26 (_MX53_PAD_EIM_D26__EMI_WEIM_D_26 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D26__GPIO3_26 (_MX53_PAD_EIM_D26__GPIO3_26 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D26__UART2_TXD_MUX (_MX53_PAD_EIM_D26__UART2_TXD_MUX | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D26__FIRI_RXD (_MX53_PAD_EIM_D26__FIRI_RXD | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D26__IPU_CSI0_D_1 (_MX53_PAD_EIM_D26__IPU_CSI0_D_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D26__IPU_DI1_PIN11 (_MX53_PAD_EIM_D26__IPU_DI1_PIN11 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D26__IPU_SISG_2 (_MX53_PAD_EIM_D26__IPU_SISG_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D26__IPU_DISP1_DAT_22 (_MX53_PAD_EIM_D26__IPU_DISP1_DAT_22 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D27__EMI_WEIM_D_27 (_MX53_PAD_EIM_D27__EMI_WEIM_D_27 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D27__GPIO3_27 (_MX53_PAD_EIM_D27__GPIO3_27 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D27__UART2_RXD_MUX (_MX53_PAD_EIM_D27__UART2_RXD_MUX | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D27__FIRI_TXD (_MX53_PAD_EIM_D27__FIRI_TXD | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D27__IPU_CSI0_D_0 (_MX53_PAD_EIM_D27__IPU_CSI0_D_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D27__IPU_DI1_PIN13 (_MX53_PAD_EIM_D27__IPU_DI1_PIN13 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D27__IPU_SISG_3 (_MX53_PAD_EIM_D27__IPU_SISG_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D27__IPU_DISP1_DAT_23 (_MX53_PAD_EIM_D27__IPU_DISP1_DAT_23 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D28__EMI_WEIM_D_28 (_MX53_PAD_EIM_D28__EMI_WEIM_D_28 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D28__GPIO3_28 (_MX53_PAD_EIM_D28__GPIO3_28 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D28__UART2_CTS (_MX53_PAD_EIM_D28__UART2_CTS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D28__IPU_DISPB0_SER_DIO (_MX53_PAD_EIM_D28__IPU_DISPB0_SER_DIO | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D28__CSPI_MOSI (_MX53_PAD_EIM_D28__CSPI_MOSI | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D28__I2C1_SDA (_MX53_PAD_EIM_D28__I2C1_SDA | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D28__IPU_EXT_TRIG (_MX53_PAD_EIM_D28__IPU_EXT_TRIG | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D28__IPU_DI0_PIN13 (_MX53_PAD_EIM_D28__IPU_DI0_PIN13 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D29__EMI_WEIM_D_29 (_MX53_PAD_EIM_D29__EMI_WEIM_D_29 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D29__GPIO3_29 (_MX53_PAD_EIM_D29__GPIO3_29 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D29__UART2_RTS (_MX53_PAD_EIM_D29__UART2_RTS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D29__IPU_DISPB0_SER_RS (_MX53_PAD_EIM_D29__IPU_DISPB0_SER_RS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D29__CSPI_SS0 (_MX53_PAD_EIM_D29__CSPI_SS0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D29__IPU_DI1_PIN15 (_MX53_PAD_EIM_D29__IPU_DI1_PIN15 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D29__IPU_CSI1_VSYNC (_MX53_PAD_EIM_D29__IPU_CSI1_VSYNC | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D29__IPU_DI0_PIN14 (_MX53_PAD_EIM_D29__IPU_DI0_PIN14 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D30__EMI_WEIM_D_30 (_MX53_PAD_EIM_D30__EMI_WEIM_D_30 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D30__GPIO3_30 (_MX53_PAD_EIM_D30__GPIO3_30 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D30__UART3_CTS (_MX53_PAD_EIM_D30__UART3_CTS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D30__IPU_CSI0_D_3 (_MX53_PAD_EIM_D30__IPU_CSI0_D_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D30__IPU_DI0_PIN11 (_MX53_PAD_EIM_D30__IPU_DI0_PIN11 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D30__IPU_DISP1_DAT_21 (_MX53_PAD_EIM_D30__IPU_DISP1_DAT_21 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D30__USBOH3_USBH1_OC (_MX53_PAD_EIM_D30__USBOH3_USBH1_OC | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D30__USBOH3_USBH2_OC (_MX53_PAD_EIM_D30__USBOH3_USBH2_OC | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D31__EMI_WEIM_D_31 (_MX53_PAD_EIM_D31__EMI_WEIM_D_31 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D31__GPIO3_31 (_MX53_PAD_EIM_D31__GPIO3_31 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D31__UART3_RTS (_MX53_PAD_EIM_D31__UART3_RTS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D31__IPU_CSI0_D_2 (_MX53_PAD_EIM_D31__IPU_CSI0_D_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D31__IPU_DI0_PIN12 (_MX53_PAD_EIM_D31__IPU_DI0_PIN12 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D31__IPU_DISP1_DAT_20 (_MX53_PAD_EIM_D31__IPU_DISP1_DAT_20 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D31__USBOH3_USBH1_PWR (_MX53_PAD_EIM_D31__USBOH3_USBH1_PWR | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D31__USBOH3_USBH2_PWR (_MX53_PAD_EIM_D31__USBOH3_USBH2_PWR | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A24__EMI_WEIM_A_24 (_MX53_PAD_EIM_A24__EMI_WEIM_A_24 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A24__GPIO5_4 (_MX53_PAD_EIM_A24__GPIO5_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A24__IPU_DISP1_DAT_19 (_MX53_PAD_EIM_A24__IPU_DISP1_DAT_19 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A24__IPU_CSI1_D_19 (_MX53_PAD_EIM_A24__IPU_CSI1_D_19 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A24__IPU_SISG_2 (_MX53_PAD_EIM_A24__IPU_SISG_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A24__USBPHY2_BVALID (_MX53_PAD_EIM_A24__USBPHY2_BVALID | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A23__EMI_WEIM_A_23 (_MX53_PAD_EIM_A23__EMI_WEIM_A_23 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A23__GPIO6_6 (_MX53_PAD_EIM_A23__GPIO6_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A23__IPU_DISP1_DAT_18 (_MX53_PAD_EIM_A23__IPU_DISP1_DAT_18 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A23__IPU_CSI1_D_18 (_MX53_PAD_EIM_A23__IPU_CSI1_D_18 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A23__IPU_SISG_3 (_MX53_PAD_EIM_A23__IPU_SISG_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A23__USBPHY2_ENDSESSION (_MX53_PAD_EIM_A23__USBPHY2_ENDSESSION | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A22__EMI_WEIM_A_22 (_MX53_PAD_EIM_A22__EMI_WEIM_A_22 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A22__GPIO2_16 (_MX53_PAD_EIM_A22__GPIO2_16 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A22__IPU_DISP1_DAT_17 (_MX53_PAD_EIM_A22__IPU_DISP1_DAT_17 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A22__IPU_CSI1_D_17 (_MX53_PAD_EIM_A22__IPU_CSI1_D_17 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A22__SRC_BT_CFG1_7 (_MX53_PAD_EIM_A22__SRC_BT_CFG1_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A21__EMI_WEIM_A_21 (_MX53_PAD_EIM_A21__EMI_WEIM_A_21 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A21__GPIO2_17 (_MX53_PAD_EIM_A21__GPIO2_17 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A21__IPU_DISP1_DAT_16 (_MX53_PAD_EIM_A21__IPU_DISP1_DAT_16 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A21__IPU_CSI1_D_16 (_MX53_PAD_EIM_A21__IPU_CSI1_D_16 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A21__SRC_BT_CFG1_6 (_MX53_PAD_EIM_A21__SRC_BT_CFG1_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A20__EMI_WEIM_A_20 (_MX53_PAD_EIM_A20__EMI_WEIM_A_20 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A20__GPIO2_18 (_MX53_PAD_EIM_A20__GPIO2_18 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A20__IPU_DISP1_DAT_15 (_MX53_PAD_EIM_A20__IPU_DISP1_DAT_15 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A20__IPU_CSI1_D_15 (_MX53_PAD_EIM_A20__IPU_CSI1_D_15 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A20__SRC_BT_CFG1_5 (_MX53_PAD_EIM_A20__SRC_BT_CFG1_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A19__EMI_WEIM_A_19 (_MX53_PAD_EIM_A19__EMI_WEIM_A_19 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A19__GPIO2_19 (_MX53_PAD_EIM_A19__GPIO2_19 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A19__IPU_DISP1_DAT_14 (_MX53_PAD_EIM_A19__IPU_DISP1_DAT_14 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A19__IPU_CSI1_D_14 (_MX53_PAD_EIM_A19__IPU_CSI1_D_14 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A19__SRC_BT_CFG1_4 (_MX53_PAD_EIM_A19__SRC_BT_CFG1_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A18__EMI_WEIM_A_18 (_MX53_PAD_EIM_A18__EMI_WEIM_A_18 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A18__GPIO2_20 (_MX53_PAD_EIM_A18__GPIO2_20 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A18__IPU_DISP1_DAT_13 (_MX53_PAD_EIM_A18__IPU_DISP1_DAT_13 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A18__IPU_CSI1_D_13 (_MX53_PAD_EIM_A18__IPU_CSI1_D_13 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A18__SRC_BT_CFG1_3 (_MX53_PAD_EIM_A18__SRC_BT_CFG1_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A17__EMI_WEIM_A_17 (_MX53_PAD_EIM_A17__EMI_WEIM_A_17 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A17__GPIO2_21 (_MX53_PAD_EIM_A17__GPIO2_21 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A17__IPU_DISP1_DAT_12 (_MX53_PAD_EIM_A17__IPU_DISP1_DAT_12 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A17__IPU_CSI1_D_12 (_MX53_PAD_EIM_A17__IPU_CSI1_D_12 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A17__SRC_BT_CFG1_2 (_MX53_PAD_EIM_A17__SRC_BT_CFG1_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A16__EMI_WEIM_A_16 (_MX53_PAD_EIM_A16__EMI_WEIM_A_16 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A16__GPIO2_22 (_MX53_PAD_EIM_A16__GPIO2_22 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A16__IPU_DI1_DISP_CLK (_MX53_PAD_EIM_A16__IPU_DI1_DISP_CLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A16__IPU_CSI1_PIXCLK (_MX53_PAD_EIM_A16__IPU_CSI1_PIXCLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_A16__SRC_BT_CFG1_1 (_MX53_PAD_EIM_A16__SRC_BT_CFG1_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_CS0__EMI_WEIM_CS_0 (_MX53_PAD_EIM_CS0__EMI_WEIM_CS_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_CS0__GPIO2_23 (_MX53_PAD_EIM_CS0__GPIO2_23 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_CS0__ECSPI2_SCLK (_MX53_PAD_EIM_CS0__ECSPI2_SCLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_CS0__IPU_DI1_PIN5 (_MX53_PAD_EIM_CS0__IPU_DI1_PIN5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_CS1__EMI_WEIM_CS_1 (_MX53_PAD_EIM_CS1__EMI_WEIM_CS_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_CS1__GPIO2_24 (_MX53_PAD_EIM_CS1__GPIO2_24 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_CS1__ECSPI2_MOSI (_MX53_PAD_EIM_CS1__ECSPI2_MOSI | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_CS1__IPU_DI1_PIN6 (_MX53_PAD_EIM_CS1__IPU_DI1_PIN6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_OE__EMI_WEIM_OE (_MX53_PAD_EIM_OE__EMI_WEIM_OE | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_OE__GPIO2_25 (_MX53_PAD_EIM_OE__GPIO2_25 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_OE__ECSPI2_MISO (_MX53_PAD_EIM_OE__ECSPI2_MISO | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_OE__IPU_DI1_PIN7 (_MX53_PAD_EIM_OE__IPU_DI1_PIN7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_OE__USBPHY2_IDDIG (_MX53_PAD_EIM_OE__USBPHY2_IDDIG | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_RW__EMI_WEIM_RW (_MX53_PAD_EIM_RW__EMI_WEIM_RW | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_RW__GPIO2_26 (_MX53_PAD_EIM_RW__GPIO2_26 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_RW__ECSPI2_SS0 (_MX53_PAD_EIM_RW__ECSPI2_SS0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_RW__IPU_DI1_PIN8 (_MX53_PAD_EIM_RW__IPU_DI1_PIN8 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_RW__USBPHY2_HOSTDISCONNECT (_MX53_PAD_EIM_RW__USBPHY2_HOSTDISCONNECT | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_LBA__EMI_WEIM_LBA (_MX53_PAD_EIM_LBA__EMI_WEIM_LBA | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_LBA__GPIO2_27 (_MX53_PAD_EIM_LBA__GPIO2_27 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_LBA__ECSPI2_SS1 (_MX53_PAD_EIM_LBA__ECSPI2_SS1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_LBA__IPU_DI1_PIN17 (_MX53_PAD_EIM_LBA__IPU_DI1_PIN17 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_LBA__SRC_BT_CFG1_0 (_MX53_PAD_EIM_LBA__SRC_BT_CFG1_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_EB0__EMI_WEIM_EB_0 (_MX53_PAD_EIM_EB0__EMI_WEIM_EB_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_EB0__GPIO2_28 (_MX53_PAD_EIM_EB0__GPIO2_28 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_EB0__IPU_DISP1_DAT_11 (_MX53_PAD_EIM_EB0__IPU_DISP1_DAT_11 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_EB0__IPU_CSI1_D_11 (_MX53_PAD_EIM_EB0__IPU_CSI1_D_11 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_EB0__GPC_PMIC_RDY (_MX53_PAD_EIM_EB0__GPC_PMIC_RDY | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_EB0__SRC_BT_CFG2_7 (_MX53_PAD_EIM_EB0__SRC_BT_CFG2_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_EB1__EMI_WEIM_EB_1 (_MX53_PAD_EIM_EB1__EMI_WEIM_EB_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_EB1__GPIO2_29 (_MX53_PAD_EIM_EB1__GPIO2_29 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_EB1__IPU_DISP1_DAT_10 (_MX53_PAD_EIM_EB1__IPU_DISP1_DAT_10 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_EB1__IPU_CSI1_D_10 (_MX53_PAD_EIM_EB1__IPU_CSI1_D_10 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_EB1__SRC_BT_CFG2_6 (_MX53_PAD_EIM_EB1__SRC_BT_CFG2_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA0__EMI_NAND_WEIM_DA_0 (_MX53_PAD_EIM_DA0__EMI_NAND_WEIM_DA_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA0__GPIO3_0 (_MX53_PAD_EIM_DA0__GPIO3_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA0__IPU_DISP1_DAT_9 (_MX53_PAD_EIM_DA0__IPU_DISP1_DAT_9 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA0__IPU_CSI1_D_9 (_MX53_PAD_EIM_DA0__IPU_CSI1_D_9 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA0__SRC_BT_CFG2_5 (_MX53_PAD_EIM_DA0__SRC_BT_CFG2_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA1__EMI_NAND_WEIM_DA_1 (_MX53_PAD_EIM_DA1__EMI_NAND_WEIM_DA_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA1__GPIO3_1 (_MX53_PAD_EIM_DA1__GPIO3_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA1__IPU_DISP1_DAT_8 (_MX53_PAD_EIM_DA1__IPU_DISP1_DAT_8 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA1__IPU_CSI1_D_8 (_MX53_PAD_EIM_DA1__IPU_CSI1_D_8 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA1__SRC_BT_CFG2_4 (_MX53_PAD_EIM_DA1__SRC_BT_CFG2_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA2__EMI_NAND_WEIM_DA_2 (_MX53_PAD_EIM_DA2__EMI_NAND_WEIM_DA_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA2__GPIO3_2 (_MX53_PAD_EIM_DA2__GPIO3_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA2__IPU_DISP1_DAT_7 (_MX53_PAD_EIM_DA2__IPU_DISP1_DAT_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA2__IPU_CSI1_D_7 (_MX53_PAD_EIM_DA2__IPU_CSI1_D_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA2__SRC_BT_CFG2_3 (_MX53_PAD_EIM_DA2__SRC_BT_CFG2_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA3__EMI_NAND_WEIM_DA_3 (_MX53_PAD_EIM_DA3__EMI_NAND_WEIM_DA_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA3__GPIO3_3 (_MX53_PAD_EIM_DA3__GPIO3_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA3__IPU_DISP1_DAT_6 (_MX53_PAD_EIM_DA3__IPU_DISP1_DAT_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA3__IPU_CSI1_D_6 (_MX53_PAD_EIM_DA3__IPU_CSI1_D_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA3__SRC_BT_CFG2_2 (_MX53_PAD_EIM_DA3__SRC_BT_CFG2_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA4__EMI_NAND_WEIM_DA_4 (_MX53_PAD_EIM_DA4__EMI_NAND_WEIM_DA_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA4__GPIO3_4 (_MX53_PAD_EIM_DA4__GPIO3_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA4__IPU_DISP1_DAT_5 (_MX53_PAD_EIM_DA4__IPU_DISP1_DAT_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA4__IPU_CSI1_D_5 (_MX53_PAD_EIM_DA4__IPU_CSI1_D_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA4__SRC_BT_CFG3_7 (_MX53_PAD_EIM_DA4__SRC_BT_CFG3_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA5__EMI_NAND_WEIM_DA_5 (_MX53_PAD_EIM_DA5__EMI_NAND_WEIM_DA_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA5__GPIO3_5 (_MX53_PAD_EIM_DA5__GPIO3_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA5__IPU_DISP1_DAT_4 (_MX53_PAD_EIM_DA5__IPU_DISP1_DAT_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA5__IPU_CSI1_D_4 (_MX53_PAD_EIM_DA5__IPU_CSI1_D_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA5__SRC_BT_CFG3_6 (_MX53_PAD_EIM_DA5__SRC_BT_CFG3_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA6__EMI_NAND_WEIM_DA_6 (_MX53_PAD_EIM_DA6__EMI_NAND_WEIM_DA_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA6__GPIO3_6 (_MX53_PAD_EIM_DA6__GPIO3_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA6__IPU_DISP1_DAT_3 (_MX53_PAD_EIM_DA6__IPU_DISP1_DAT_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA6__IPU_CSI1_D_3 (_MX53_PAD_EIM_DA6__IPU_CSI1_D_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA6__SRC_BT_CFG3_5 (_MX53_PAD_EIM_DA6__SRC_BT_CFG3_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA7__EMI_NAND_WEIM_DA_7 (_MX53_PAD_EIM_DA7__EMI_NAND_WEIM_DA_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA7__GPIO3_7 (_MX53_PAD_EIM_DA7__GPIO3_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA7__IPU_DISP1_DAT_2 (_MX53_PAD_EIM_DA7__IPU_DISP1_DAT_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA7__IPU_CSI1_D_2 (_MX53_PAD_EIM_DA7__IPU_CSI1_D_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA7__SRC_BT_CFG3_4 (_MX53_PAD_EIM_DA7__SRC_BT_CFG3_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA8__EMI_NAND_WEIM_DA_8 (_MX53_PAD_EIM_DA8__EMI_NAND_WEIM_DA_8 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA8__GPIO3_8 (_MX53_PAD_EIM_DA8__GPIO3_8 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA8__IPU_DISP1_DAT_1 (_MX53_PAD_EIM_DA8__IPU_DISP1_DAT_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA8__IPU_CSI1_D_1 (_MX53_PAD_EIM_DA8__IPU_CSI1_D_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA8__SRC_BT_CFG3_3 (_MX53_PAD_EIM_DA8__SRC_BT_CFG3_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA9__EMI_NAND_WEIM_DA_9 (_MX53_PAD_EIM_DA9__EMI_NAND_WEIM_DA_9 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA9__GPIO3_9 (_MX53_PAD_EIM_DA9__GPIO3_9 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA9__IPU_DISP1_DAT_0 (_MX53_PAD_EIM_DA9__IPU_DISP1_DAT_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA9__IPU_CSI1_D_0 (_MX53_PAD_EIM_DA9__IPU_CSI1_D_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA9__SRC_BT_CFG3_2 (_MX53_PAD_EIM_DA9__SRC_BT_CFG3_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA10__EMI_NAND_WEIM_DA_10 (_MX53_PAD_EIM_DA10__EMI_NAND_WEIM_DA_10 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA10__GPIO3_10 (_MX53_PAD_EIM_DA10__GPIO3_10 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA10__IPU_DI1_PIN15 (_MX53_PAD_EIM_DA10__IPU_DI1_PIN15 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA10__IPU_CSI1_DATA_EN (_MX53_PAD_EIM_DA10__IPU_CSI1_DATA_EN | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA10__SRC_BT_CFG3_1 (_MX53_PAD_EIM_DA10__SRC_BT_CFG3_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA11__EMI_NAND_WEIM_DA_11 (_MX53_PAD_EIM_DA11__EMI_NAND_WEIM_DA_11 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA11__GPIO3_11 (_MX53_PAD_EIM_DA11__GPIO3_11 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA11__IPU_DI1_PIN2 (_MX53_PAD_EIM_DA11__IPU_DI1_PIN2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA11__IPU_CSI1_HSYNC (_MX53_PAD_EIM_DA11__IPU_CSI1_HSYNC | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA12__EMI_NAND_WEIM_DA_12 (_MX53_PAD_EIM_DA12__EMI_NAND_WEIM_DA_12 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA12__GPIO3_12 (_MX53_PAD_EIM_DA12__GPIO3_12 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA12__IPU_DI1_PIN3 (_MX53_PAD_EIM_DA12__IPU_DI1_PIN3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA12__IPU_CSI1_VSYNC (_MX53_PAD_EIM_DA12__IPU_CSI1_VSYNC | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA13__EMI_NAND_WEIM_DA_13 (_MX53_PAD_EIM_DA13__EMI_NAND_WEIM_DA_13 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA13__GPIO3_13 (_MX53_PAD_EIM_DA13__GPIO3_13 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA13__IPU_DI1_D0_CS (_MX53_PAD_EIM_DA13__IPU_DI1_D0_CS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA13__CCM_DI1_EXT_CLK (_MX53_PAD_EIM_DA13__CCM_DI1_EXT_CLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA14__EMI_NAND_WEIM_DA_14 (_MX53_PAD_EIM_DA14__EMI_NAND_WEIM_DA_14 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA14__GPIO3_14 (_MX53_PAD_EIM_DA14__GPIO3_14 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA14__IPU_DI1_D1_CS (_MX53_PAD_EIM_DA14__IPU_DI1_D1_CS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA14__CCM_DI0_EXT_CLK (_MX53_PAD_EIM_DA14__CCM_DI0_EXT_CLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA15__EMI_NAND_WEIM_DA_15 (_MX53_PAD_EIM_DA15__EMI_NAND_WEIM_DA_15 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA15__GPIO3_15 (_MX53_PAD_EIM_DA15__GPIO3_15 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA15__IPU_DI1_PIN1 (_MX53_PAD_EIM_DA15__IPU_DI1_PIN1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_DA15__IPU_DI1_PIN4 (_MX53_PAD_EIM_DA15__IPU_DI1_PIN4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_WE_B__EMI_NANDF_WE_B (_MX53_PAD_NANDF_WE_B__EMI_NANDF_WE_B | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_WE_B__GPIO6_12 (_MX53_PAD_NANDF_WE_B__GPIO6_12 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_RE_B__EMI_NANDF_RE_B (_MX53_PAD_NANDF_RE_B__EMI_NANDF_RE_B | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_RE_B__GPIO6_13 (_MX53_PAD_NANDF_RE_B__GPIO6_13 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_WAIT__EMI_WEIM_WAIT (_MX53_PAD_EIM_WAIT__EMI_WEIM_WAIT | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_WAIT__GPIO5_0 (_MX53_PAD_EIM_WAIT__GPIO5_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_WAIT__EMI_WEIM_DTACK_B (_MX53_PAD_EIM_WAIT__EMI_WEIM_DTACK_B | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_LVDS1_TX3_P__GPIO6_22 (_MX53_PAD_LVDS1_TX3_P__GPIO6_22 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_LVDS1_TX3_P__LDB_LVDS1_TX3 (_MX53_PAD_LVDS1_TX3_P__LDB_LVDS1_TX3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_LVDS1_TX2_P__GPIO6_24 (_MX53_PAD_LVDS1_TX2_P__GPIO6_24 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_LVDS1_TX2_P__LDB_LVDS1_TX2 (_MX53_PAD_LVDS1_TX2_P__LDB_LVDS1_TX2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_LVDS1_CLK_P__GPIO6_26 (_MX53_PAD_LVDS1_CLK_P__GPIO6_26 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_LVDS1_CLK_P__LDB_LVDS1_CLK (_MX53_PAD_LVDS1_CLK_P__LDB_LVDS1_CLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_LVDS1_TX1_P__GPIO6_28 (_MX53_PAD_LVDS1_TX1_P__GPIO6_28 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_LVDS1_TX1_P__LDB_LVDS1_TX1 (_MX53_PAD_LVDS1_TX1_P__LDB_LVDS1_TX1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_LVDS1_TX0_P__GPIO6_30 (_MX53_PAD_LVDS1_TX0_P__GPIO6_30 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_LVDS1_TX0_P__LDB_LVDS1_TX0 (_MX53_PAD_LVDS1_TX0_P__LDB_LVDS1_TX0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_LVDS0_TX3_P__GPIO7_22 (_MX53_PAD_LVDS0_TX3_P__GPIO7_22 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_LVDS0_TX3_P__LDB_LVDS0_TX3 (_MX53_PAD_LVDS0_TX3_P__LDB_LVDS0_TX3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_LVDS0_CLK_P__GPIO7_24 (_MX53_PAD_LVDS0_CLK_P__GPIO7_24 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_LVDS0_CLK_P__LDB_LVDS0_CLK (_MX53_PAD_LVDS0_CLK_P__LDB_LVDS0_CLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_LVDS0_TX2_P__GPIO7_26 (_MX53_PAD_LVDS0_TX2_P__GPIO7_26 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_LVDS0_TX2_P__LDB_LVDS0_TX2 (_MX53_PAD_LVDS0_TX2_P__LDB_LVDS0_TX2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_LVDS0_TX1_P__GPIO7_28 (_MX53_PAD_LVDS0_TX1_P__GPIO7_28 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_LVDS0_TX1_P__LDB_LVDS0_TX1 (_MX53_PAD_LVDS0_TX1_P__LDB_LVDS0_TX1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_LVDS0_TX0_P__GPIO7_30 (_MX53_PAD_LVDS0_TX0_P__GPIO7_30 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_LVDS0_TX0_P__LDB_LVDS0_TX0 (_MX53_PAD_LVDS0_TX0_P__LDB_LVDS0_TX0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_10__GPIO4_0 (_MX53_PAD_GPIO_10__GPIO4_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_10__OSC32k_32K_OUT (_MX53_PAD_GPIO_10__OSC32k_32K_OUT | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_11__GPIO4_1 (_MX53_PAD_GPIO_11__GPIO4_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_12__GPIO4_2 (_MX53_PAD_GPIO_12__GPIO4_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_13__GPIO4_3 (_MX53_PAD_GPIO_13__GPIO4_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_14__GPIO4_4 (_MX53_PAD_GPIO_14__GPIO4_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_CLE__EMI_NANDF_CLE (_MX53_PAD_NANDF_CLE__EMI_NANDF_CLE | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_CLE__GPIO6_7 (_MX53_PAD_NANDF_CLE__GPIO6_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_CLE__USBPHY1_VSTATUS_0 (_MX53_PAD_NANDF_CLE__USBPHY1_VSTATUS_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_ALE__EMI_NANDF_ALE (_MX53_PAD_NANDF_ALE__EMI_NANDF_ALE | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_ALE__GPIO6_8 (_MX53_PAD_NANDF_ALE__GPIO6_8 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_ALE__USBPHY1_VSTATUS_1 (_MX53_PAD_NANDF_ALE__USBPHY1_VSTATUS_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_WP_B__EMI_NANDF_WP_B (_MX53_PAD_NANDF_WP_B__EMI_NANDF_WP_B | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_WP_B__GPIO6_9 (_MX53_PAD_NANDF_WP_B__GPIO6_9 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_WP_B__USBPHY1_VSTATUS_2 (_MX53_PAD_NANDF_WP_B__USBPHY1_VSTATUS_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_RB0__EMI_NANDF_RB_0 (_MX53_PAD_NANDF_RB0__EMI_NANDF_RB_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_RB0__GPIO6_10 (_MX53_PAD_NANDF_RB0__GPIO6_10 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_RB0__USBPHY1_VSTATUS_3 (_MX53_PAD_NANDF_RB0__USBPHY1_VSTATUS_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_CS0__EMI_NANDF_CS_0 (_MX53_PAD_NANDF_CS0__EMI_NANDF_CS_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_CS0__GPIO6_11 (_MX53_PAD_NANDF_CS0__GPIO6_11 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_CS0__USBPHY1_VSTATUS_4 (_MX53_PAD_NANDF_CS0__USBPHY1_VSTATUS_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_CS1__EMI_NANDF_CS_1 (_MX53_PAD_NANDF_CS1__EMI_NANDF_CS_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_CS1__GPIO6_14 (_MX53_PAD_NANDF_CS1__GPIO6_14 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_CS1__MLB_MLBCLK (_MX53_PAD_NANDF_CS1__MLB_MLBCLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_CS1__USBPHY1_VSTATUS_5 (_MX53_PAD_NANDF_CS1__USBPHY1_VSTATUS_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_CS2__EMI_NANDF_CS_2 (_MX53_PAD_NANDF_CS2__EMI_NANDF_CS_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_CS2__GPIO6_15 (_MX53_PAD_NANDF_CS2__GPIO6_15 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_CS2__IPU_SISG_0 (_MX53_PAD_NANDF_CS2__IPU_SISG_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_CS2__ESAI1_TX0 (_MX53_PAD_NANDF_CS2__ESAI1_TX0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_CS2__EMI_WEIM_CRE (_MX53_PAD_NANDF_CS2__EMI_WEIM_CRE | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_CS2__CCM_CSI0_MCLK (_MX53_PAD_NANDF_CS2__CCM_CSI0_MCLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_CS2__MLB_MLBSIG (_MX53_PAD_NANDF_CS2__MLB_MLBSIG | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_CS2__USBPHY1_VSTATUS_6 (_MX53_PAD_NANDF_CS2__USBPHY1_VSTATUS_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_CS3__EMI_NANDF_CS_3 (_MX53_PAD_NANDF_CS3__EMI_NANDF_CS_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_CS3__GPIO6_16 (_MX53_PAD_NANDF_CS3__GPIO6_16 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_CS3__IPU_SISG_1 (_MX53_PAD_NANDF_CS3__IPU_SISG_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_CS3__ESAI1_TX1 (_MX53_PAD_NANDF_CS3__ESAI1_TX1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_CS3__EMI_WEIM_A_26 (_MX53_PAD_NANDF_CS3__EMI_WEIM_A_26 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_CS3__MLB_MLBDAT (_MX53_PAD_NANDF_CS3__MLB_MLBDAT | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_NANDF_CS3__USBPHY1_VSTATUS_7 (_MX53_PAD_NANDF_CS3__USBPHY1_VSTATUS_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_MDIO__FEC_MDIO (_MX53_PAD_FEC_MDIO__FEC_MDIO | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_MDIO__GPIO1_22 (_MX53_PAD_FEC_MDIO__GPIO1_22 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_MDIO__ESAI1_SCKR (_MX53_PAD_FEC_MDIO__ESAI1_SCKR | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_MDIO__FEC_COL (_MX53_PAD_FEC_MDIO__FEC_COL | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_MDIO__RTC_CE_RTC_PS2 (_MX53_PAD_FEC_MDIO__RTC_CE_RTC_PS2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_MDIO__SDMA_DEBUG_BUS_DEVICE_3 (_MX53_PAD_FEC_MDIO__SDMA_DEBUG_BUS_DEVICE_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_MDIO__EMI_EMI_DEBUG_49 (_MX53_PAD_FEC_MDIO__EMI_EMI_DEBUG_49 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_REF_CLK__FEC_TX_CLK (_MX53_PAD_FEC_REF_CLK__FEC_TX_CLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_REF_CLK__GPIO1_23 (_MX53_PAD_FEC_REF_CLK__GPIO1_23 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_REF_CLK__ESAI1_FSR (_MX53_PAD_FEC_REF_CLK__ESAI1_FSR | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_REF_CLK__SDMA_DEBUG_BUS_DEVICE_4 (_MX53_PAD_FEC_REF_CLK__SDMA_DEBUG_BUS_DEVICE_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_REF_CLK__EMI_EMI_DEBUG_50 (_MX53_PAD_FEC_REF_CLK__EMI_EMI_DEBUG_50 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_RX_ER__FEC_RX_ER (_MX53_PAD_FEC_RX_ER__FEC_RX_ER | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_RX_ER__GPIO1_24 (_MX53_PAD_FEC_RX_ER__GPIO1_24 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_RX_ER__ESAI1_HCKR (_MX53_PAD_FEC_RX_ER__ESAI1_HCKR | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_RX_ER__FEC_RX_CLK (_MX53_PAD_FEC_RX_ER__FEC_RX_CLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_RX_ER__RTC_CE_RTC_PS3 (_MX53_PAD_FEC_RX_ER__RTC_CE_RTC_PS3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_CRS_DV__FEC_RX_DV (_MX53_PAD_FEC_CRS_DV__FEC_RX_DV | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_CRS_DV__GPIO1_25 (_MX53_PAD_FEC_CRS_DV__GPIO1_25 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_CRS_DV__ESAI1_SCKT (_MX53_PAD_FEC_CRS_DV__ESAI1_SCKT | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_RXD1__FEC_RDATA_1 (_MX53_PAD_FEC_RXD1__FEC_RDATA_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_RXD1__GPIO1_26 (_MX53_PAD_FEC_RXD1__GPIO1_26 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_RXD1__ESAI1_FST (_MX53_PAD_FEC_RXD1__ESAI1_FST | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_RXD1__MLB_MLBSIG (_MX53_PAD_FEC_RXD1__MLB_MLBSIG | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_RXD1__RTC_CE_RTC_PS1 (_MX53_PAD_FEC_RXD1__RTC_CE_RTC_PS1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_RXD0__FEC_RDATA_0 (_MX53_PAD_FEC_RXD0__FEC_RDATA_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_RXD0__GPIO1_27 (_MX53_PAD_FEC_RXD0__GPIO1_27 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_RXD0__ESAI1_HCKT (_MX53_PAD_FEC_RXD0__ESAI1_HCKT | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_RXD0__OSC32k_32K_OUT (_MX53_PAD_FEC_RXD0__OSC32k_32K_OUT | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_TX_EN__FEC_TX_EN (_MX53_PAD_FEC_TX_EN__FEC_TX_EN | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_TX_EN__GPIO1_28 (_MX53_PAD_FEC_TX_EN__GPIO1_28 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_TX_EN__ESAI1_TX3_RX2 (_MX53_PAD_FEC_TX_EN__ESAI1_TX3_RX2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_TXD1__FEC_TDATA_1 (_MX53_PAD_FEC_TXD1__FEC_TDATA_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_TXD1__GPIO1_29 (_MX53_PAD_FEC_TXD1__GPIO1_29 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_TXD1__ESAI1_TX2_RX3 (_MX53_PAD_FEC_TXD1__ESAI1_TX2_RX3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_TXD1__MLB_MLBCLK (_MX53_PAD_FEC_TXD1__MLB_MLBCLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_TXD1__RTC_CE_RTC_PRSC_CLK (_MX53_PAD_FEC_TXD1__RTC_CE_RTC_PRSC_CLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_TXD0__FEC_TDATA_0 (_MX53_PAD_FEC_TXD0__FEC_TDATA_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_TXD0__GPIO1_30 (_MX53_PAD_FEC_TXD0__GPIO1_30 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_TXD0__ESAI1_TX4_RX1 (_MX53_PAD_FEC_TXD0__ESAI1_TX4_RX1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_TXD0__USBPHY2_DATAOUT_0 (_MX53_PAD_FEC_TXD0__USBPHY2_DATAOUT_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_MDC__FEC_MDC (_MX53_PAD_FEC_MDC__FEC_MDC | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_MDC__GPIO1_31 (_MX53_PAD_FEC_MDC__GPIO1_31 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_MDC__ESAI1_TX5_RX0 (_MX53_PAD_FEC_MDC__ESAI1_TX5_RX0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_MDC__MLB_MLBDAT (_MX53_PAD_FEC_MDC__MLB_MLBDAT | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_MDC__RTC_CE_RTC_ALARM1_TRIG (_MX53_PAD_FEC_MDC__RTC_CE_RTC_ALARM1_TRIG | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_FEC_MDC__USBPHY2_DATAOUT_1 (_MX53_PAD_FEC_MDC__USBPHY2_DATAOUT_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DIOW__PATA_DIOW (_MX53_PAD_PATA_DIOW__PATA_DIOW | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DIOW__GPIO6_17 (_MX53_PAD_PATA_DIOW__GPIO6_17 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DIOW__UART1_TXD_MUX (_MX53_PAD_PATA_DIOW__UART1_TXD_MUX | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
+#define MX53_PAD_PATA_DIOW__USBPHY2_DATAOUT_2 (_MX53_PAD_PATA_DIOW__USBPHY2_DATAOUT_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DMACK__PATA_DMACK (_MX53_PAD_PATA_DMACK__PATA_DMACK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DMACK__GPIO6_18 (_MX53_PAD_PATA_DMACK__GPIO6_18 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DMACK__UART1_RXD_MUX (_MX53_PAD_PATA_DMACK__UART1_RXD_MUX | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
+#define MX53_PAD_PATA_DMACK__USBPHY2_DATAOUT_3 (_MX53_PAD_PATA_DMACK__USBPHY2_DATAOUT_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DMARQ__PATA_DMARQ (_MX53_PAD_PATA_DMARQ__PATA_DMARQ | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DMARQ__GPIO7_0 (_MX53_PAD_PATA_DMARQ__GPIO7_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DMARQ__UART2_TXD_MUX (_MX53_PAD_PATA_DMARQ__UART2_TXD_MUX | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
+#define MX53_PAD_PATA_DMARQ__CCM_CCM_OUT_0 (_MX53_PAD_PATA_DMARQ__CCM_CCM_OUT_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DMARQ__USBPHY2_DATAOUT_4 (_MX53_PAD_PATA_DMARQ__USBPHY2_DATAOUT_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_BUFFER_EN__PATA_BUFFER_EN (_MX53_PAD_PATA_BUFFER_EN__PATA_BUFFER_EN | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_BUFFER_EN__GPIO7_1 (_MX53_PAD_PATA_BUFFER_EN__GPIO7_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_BUFFER_EN__UART2_RXD_MUX (_MX53_PAD_PATA_BUFFER_EN__UART2_RXD_MUX | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
+#define MX53_PAD_PATA_BUFFER_EN__CCM_CCM_OUT_1 (_MX53_PAD_PATA_BUFFER_EN__CCM_CCM_OUT_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_BUFFER_EN__USBPHY2_DATAOUT_5 (_MX53_PAD_PATA_BUFFER_EN__USBPHY2_DATAOUT_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_INTRQ__PATA_INTRQ (_MX53_PAD_PATA_INTRQ__PATA_INTRQ | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_INTRQ__GPIO7_2 (_MX53_PAD_PATA_INTRQ__GPIO7_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_INTRQ__UART2_CTS (_MX53_PAD_PATA_INTRQ__UART2_CTS | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
+#define MX53_PAD_PATA_INTRQ__CAN1_TXCAN (_MX53_PAD_PATA_INTRQ__CAN1_TXCAN | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_INTRQ__CCM_CCM_OUT_2 (_MX53_PAD_PATA_INTRQ__CCM_CCM_OUT_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_INTRQ__USBPHY2_DATAOUT_6 (_MX53_PAD_PATA_INTRQ__USBPHY2_DATAOUT_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DIOR__PATA_DIOR (_MX53_PAD_PATA_DIOR__PATA_DIOR | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DIOR__GPIO7_3 (_MX53_PAD_PATA_DIOR__GPIO7_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DIOR__UART2_RTS (_MX53_PAD_PATA_DIOR__UART2_RTS | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
+#define MX53_PAD_PATA_DIOR__CAN1_RXCAN (_MX53_PAD_PATA_DIOR__CAN1_RXCAN | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DIOR__USBPHY2_DATAOUT_7 (_MX53_PAD_PATA_DIOR__USBPHY2_DATAOUT_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_RESET_B__PATA_PATA_RESET_B (_MX53_PAD_PATA_RESET_B__PATA_PATA_RESET_B | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_RESET_B__GPIO7_4 (_MX53_PAD_PATA_RESET_B__GPIO7_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_RESET_B__ESDHC3_CMD (_MX53_PAD_PATA_RESET_B__ESDHC3_CMD | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_PATA_RESET_B__UART1_CTS (_MX53_PAD_PATA_RESET_B__UART1_CTS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_RESET_B__CAN2_TXCAN (_MX53_PAD_PATA_RESET_B__CAN2_TXCAN | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_RESET_B__USBPHY1_DATAOUT_0 (_MX53_PAD_PATA_RESET_B__USBPHY1_DATAOUT_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_IORDY__PATA_IORDY (_MX53_PAD_PATA_IORDY__PATA_IORDY | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_IORDY__GPIO7_5 (_MX53_PAD_PATA_IORDY__GPIO7_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_IORDY__ESDHC3_CLK (_MX53_PAD_PATA_IORDY__ESDHC3_CLK | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_PATA_IORDY__UART1_RTS (_MX53_PAD_PATA_IORDY__UART1_RTS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_IORDY__CAN2_RXCAN (_MX53_PAD_PATA_IORDY__CAN2_RXCAN | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_IORDY__USBPHY1_DATAOUT_1 (_MX53_PAD_PATA_IORDY__USBPHY1_DATAOUT_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DA_0__PATA_DA_0 (_MX53_PAD_PATA_DA_0__PATA_DA_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DA_0__GPIO7_6 (_MX53_PAD_PATA_DA_0__GPIO7_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DA_0__ESDHC3_RST (_MX53_PAD_PATA_DA_0__ESDHC3_RST | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DA_0__OWIRE_LINE (_MX53_PAD_PATA_DA_0__OWIRE_LINE | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DA_0__USBPHY1_DATAOUT_2 (_MX53_PAD_PATA_DA_0__USBPHY1_DATAOUT_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DA_1__PATA_DA_1 (_MX53_PAD_PATA_DA_1__PATA_DA_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DA_1__GPIO7_7 (_MX53_PAD_PATA_DA_1__GPIO7_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DA_1__ESDHC4_CMD (_MX53_PAD_PATA_DA_1__ESDHC4_CMD | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_PATA_DA_1__UART3_CTS (_MX53_PAD_PATA_DA_1__UART3_CTS | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
+#define MX53_PAD_PATA_DA_1__USBPHY1_DATAOUT_3 (_MX53_PAD_PATA_DA_1__USBPHY1_DATAOUT_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DA_2__PATA_DA_2 (_MX53_PAD_PATA_DA_2__PATA_DA_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DA_2__GPIO7_8 (_MX53_PAD_PATA_DA_2__GPIO7_8 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DA_2__ESDHC4_CLK (_MX53_PAD_PATA_DA_2__ESDHC4_CLK | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_PATA_DA_2__UART3_RTS (_MX53_PAD_PATA_DA_2__UART3_RTS | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
+#define MX53_PAD_PATA_DA_2__USBPHY1_DATAOUT_4 (_MX53_PAD_PATA_DA_2__USBPHY1_DATAOUT_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_CS_0__PATA_CS_0 (_MX53_PAD_PATA_CS_0__PATA_CS_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_CS_0__GPIO7_9 (_MX53_PAD_PATA_CS_0__GPIO7_9 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_CS_0__UART3_TXD_MUX (_MX53_PAD_PATA_CS_0__UART3_TXD_MUX | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
+#define MX53_PAD_PATA_CS_0__USBPHY1_DATAOUT_5 (_MX53_PAD_PATA_CS_0__USBPHY1_DATAOUT_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_CS_1__PATA_CS_1 (_MX53_PAD_PATA_CS_1__PATA_CS_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_CS_1__GPIO7_10 (_MX53_PAD_PATA_CS_1__GPIO7_10 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_CS_1__UART3_RXD_MUX (_MX53_PAD_PATA_CS_1__UART3_RXD_MUX | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
+#define MX53_PAD_PATA_CS_1__USBPHY1_DATAOUT_6 (_MX53_PAD_PATA_CS_1__USBPHY1_DATAOUT_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA0__PATA_DATA_0 (_MX53_PAD_PATA_DATA0__PATA_DATA_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA0__GPIO2_0 (_MX53_PAD_PATA_DATA0__GPIO2_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA0__EMI_NANDF_D_0 (_MX53_PAD_PATA_DATA0__EMI_NANDF_D_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA0__ESDHC3_DAT4 (_MX53_PAD_PATA_DATA0__ESDHC3_DAT4 | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_PATA_DATA0__GPU3d_GPU_DEBUG_OUT_0 (_MX53_PAD_PATA_DATA0__GPU3d_GPU_DEBUG_OUT_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA0__IPU_DIAG_BUS_0 (_MX53_PAD_PATA_DATA0__IPU_DIAG_BUS_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA0__USBPHY1_DATAOUT_7 (_MX53_PAD_PATA_DATA0__USBPHY1_DATAOUT_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA1__PATA_DATA_1 (_MX53_PAD_PATA_DATA1__PATA_DATA_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA1__GPIO2_1 (_MX53_PAD_PATA_DATA1__GPIO2_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA1__EMI_NANDF_D_1 (_MX53_PAD_PATA_DATA1__EMI_NANDF_D_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA1__ESDHC3_DAT5 (_MX53_PAD_PATA_DATA1__ESDHC3_DAT5 | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_PATA_DATA1__GPU3d_GPU_DEBUG_OUT_1 (_MX53_PAD_PATA_DATA1__GPU3d_GPU_DEBUG_OUT_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA1__IPU_DIAG_BUS_1 (_MX53_PAD_PATA_DATA1__IPU_DIAG_BUS_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA2__PATA_DATA_2 (_MX53_PAD_PATA_DATA2__PATA_DATA_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA2__GPIO2_2 (_MX53_PAD_PATA_DATA2__GPIO2_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA2__EMI_NANDF_D_2 (_MX53_PAD_PATA_DATA2__EMI_NANDF_D_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA2__ESDHC3_DAT6 (_MX53_PAD_PATA_DATA2__ESDHC3_DAT6 | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_PATA_DATA2__GPU3d_GPU_DEBUG_OUT_2 (_MX53_PAD_PATA_DATA2__GPU3d_GPU_DEBUG_OUT_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA2__IPU_DIAG_BUS_2 (_MX53_PAD_PATA_DATA2__IPU_DIAG_BUS_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA3__PATA_DATA_3 (_MX53_PAD_PATA_DATA3__PATA_DATA_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA3__GPIO2_3 (_MX53_PAD_PATA_DATA3__GPIO2_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA3__EMI_NANDF_D_3 (_MX53_PAD_PATA_DATA3__EMI_NANDF_D_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA3__ESDHC3_DAT7 (_MX53_PAD_PATA_DATA3__ESDHC3_DAT7 | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_PATA_DATA3__GPU3d_GPU_DEBUG_OUT_3 (_MX53_PAD_PATA_DATA3__GPU3d_GPU_DEBUG_OUT_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA3__IPU_DIAG_BUS_3 (_MX53_PAD_PATA_DATA3__IPU_DIAG_BUS_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA4__PATA_DATA_4 (_MX53_PAD_PATA_DATA4__PATA_DATA_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA4__GPIO2_4 (_MX53_PAD_PATA_DATA4__GPIO2_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA4__EMI_NANDF_D_4 (_MX53_PAD_PATA_DATA4__EMI_NANDF_D_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA4__ESDHC4_DAT4 (_MX53_PAD_PATA_DATA4__ESDHC4_DAT4 | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_PATA_DATA4__GPU3d_GPU_DEBUG_OUT_4 (_MX53_PAD_PATA_DATA4__GPU3d_GPU_DEBUG_OUT_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA4__IPU_DIAG_BUS_4 (_MX53_PAD_PATA_DATA4__IPU_DIAG_BUS_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA5__PATA_DATA_5 (_MX53_PAD_PATA_DATA5__PATA_DATA_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA5__GPIO2_5 (_MX53_PAD_PATA_DATA5__GPIO2_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA5__EMI_NANDF_D_5 (_MX53_PAD_PATA_DATA5__EMI_NANDF_D_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA5__ESDHC4_DAT5 (_MX53_PAD_PATA_DATA5__ESDHC4_DAT5 | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_PATA_DATA5__GPU3d_GPU_DEBUG_OUT_5 (_MX53_PAD_PATA_DATA5__GPU3d_GPU_DEBUG_OUT_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA5__IPU_DIAG_BUS_5 (_MX53_PAD_PATA_DATA5__IPU_DIAG_BUS_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA6__PATA_DATA_6 (_MX53_PAD_PATA_DATA6__PATA_DATA_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA6__GPIO2_6 (_MX53_PAD_PATA_DATA6__GPIO2_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA6__EMI_NANDF_D_6 (_MX53_PAD_PATA_DATA6__EMI_NANDF_D_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA6__ESDHC4_DAT6 (_MX53_PAD_PATA_DATA6__ESDHC4_DAT6 | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_PATA_DATA6__GPU3d_GPU_DEBUG_OUT_6 (_MX53_PAD_PATA_DATA6__GPU3d_GPU_DEBUG_OUT_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA6__IPU_DIAG_BUS_6 (_MX53_PAD_PATA_DATA6__IPU_DIAG_BUS_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA7__PATA_DATA_7 (_MX53_PAD_PATA_DATA7__PATA_DATA_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA7__GPIO2_7 (_MX53_PAD_PATA_DATA7__GPIO2_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA7__EMI_NANDF_D_7 (_MX53_PAD_PATA_DATA7__EMI_NANDF_D_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA7__ESDHC4_DAT7 (_MX53_PAD_PATA_DATA7__ESDHC4_DAT7 | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_PATA_DATA7__GPU3d_GPU_DEBUG_OUT_7 (_MX53_PAD_PATA_DATA7__GPU3d_GPU_DEBUG_OUT_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA7__IPU_DIAG_BUS_7 (_MX53_PAD_PATA_DATA7__IPU_DIAG_BUS_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA8__PATA_DATA_8 (_MX53_PAD_PATA_DATA8__PATA_DATA_8 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA8__GPIO2_8 (_MX53_PAD_PATA_DATA8__GPIO2_8 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA8__ESDHC1_DAT4 (_MX53_PAD_PATA_DATA8__ESDHC1_DAT4 | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_PATA_DATA8__EMI_NANDF_D_8 (_MX53_PAD_PATA_DATA8__EMI_NANDF_D_8 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA8__ESDHC3_DAT0 (_MX53_PAD_PATA_DATA8__ESDHC3_DAT0 | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_PATA_DATA8__GPU3d_GPU_DEBUG_OUT_8 (_MX53_PAD_PATA_DATA8__GPU3d_GPU_DEBUG_OUT_8 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA8__IPU_DIAG_BUS_8 (_MX53_PAD_PATA_DATA8__IPU_DIAG_BUS_8 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA9__PATA_DATA_9 (_MX53_PAD_PATA_DATA9__PATA_DATA_9 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA9__GPIO2_9 (_MX53_PAD_PATA_DATA9__GPIO2_9 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA9__ESDHC1_DAT5 (_MX53_PAD_PATA_DATA9__ESDHC1_DAT5 | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_PATA_DATA9__EMI_NANDF_D_9 (_MX53_PAD_PATA_DATA9__EMI_NANDF_D_9 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA9__ESDHC3_DAT1 (_MX53_PAD_PATA_DATA9__ESDHC3_DAT1 | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_PATA_DATA9__GPU3d_GPU_DEBUG_OUT_9 (_MX53_PAD_PATA_DATA9__GPU3d_GPU_DEBUG_OUT_9 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA9__IPU_DIAG_BUS_9 (_MX53_PAD_PATA_DATA9__IPU_DIAG_BUS_9 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA10__PATA_DATA_10 (_MX53_PAD_PATA_DATA10__PATA_DATA_10 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA10__GPIO2_10 (_MX53_PAD_PATA_DATA10__GPIO2_10 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA10__ESDHC1_DAT6 (_MX53_PAD_PATA_DATA10__ESDHC1_DAT6 | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_PATA_DATA10__EMI_NANDF_D_10 (_MX53_PAD_PATA_DATA10__EMI_NANDF_D_10 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA10__ESDHC3_DAT2 (_MX53_PAD_PATA_DATA10__ESDHC3_DAT2 | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_PATA_DATA10__GPU3d_GPU_DEBUG_OUT_10 (_MX53_PAD_PATA_DATA10__GPU3d_GPU_DEBUG_OUT_10 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA10__IPU_DIAG_BUS_10 (_MX53_PAD_PATA_DATA10__IPU_DIAG_BUS_10 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA11__PATA_DATA_11 (_MX53_PAD_PATA_DATA11__PATA_DATA_11 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA11__GPIO2_11 (_MX53_PAD_PATA_DATA11__GPIO2_11 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA11__ESDHC1_DAT7 (_MX53_PAD_PATA_DATA11__ESDHC1_DAT7 | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_PATA_DATA11__EMI_NANDF_D_11 (_MX53_PAD_PATA_DATA11__EMI_NANDF_D_11 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA11__ESDHC3_DAT3 (_MX53_PAD_PATA_DATA11__ESDHC3_DAT3 | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_PATA_DATA11__GPU3d_GPU_DEBUG_OUT_11 (_MX53_PAD_PATA_DATA11__GPU3d_GPU_DEBUG_OUT_11 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA11__IPU_DIAG_BUS_11 (_MX53_PAD_PATA_DATA11__IPU_DIAG_BUS_11 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA12__PATA_DATA_12 (_MX53_PAD_PATA_DATA12__PATA_DATA_12 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA12__GPIO2_12 (_MX53_PAD_PATA_DATA12__GPIO2_12 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA12__ESDHC2_DAT4 (_MX53_PAD_PATA_DATA12__ESDHC2_DAT4 | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_PATA_DATA12__EMI_NANDF_D_12 (_MX53_PAD_PATA_DATA12__EMI_NANDF_D_12 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA12__ESDHC4_DAT0 (_MX53_PAD_PATA_DATA12__ESDHC4_DAT0 | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_PATA_DATA12__GPU3d_GPU_DEBUG_OUT_12 (_MX53_PAD_PATA_DATA12__GPU3d_GPU_DEBUG_OUT_12 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA12__IPU_DIAG_BUS_12 (_MX53_PAD_PATA_DATA12__IPU_DIAG_BUS_12 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA13__PATA_DATA_13 (_MX53_PAD_PATA_DATA13__PATA_DATA_13 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA13__GPIO2_13 (_MX53_PAD_PATA_DATA13__GPIO2_13 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA13__ESDHC2_DAT5 (_MX53_PAD_PATA_DATA13__ESDHC2_DAT5 | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_PATA_DATA13__EMI_NANDF_D_13 (_MX53_PAD_PATA_DATA13__EMI_NANDF_D_13 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA13__ESDHC4_DAT1 (_MX53_PAD_PATA_DATA13__ESDHC4_DAT1 | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_PATA_DATA13__GPU3d_GPU_DEBUG_OUT_13 (_MX53_PAD_PATA_DATA13__GPU3d_GPU_DEBUG_OUT_13 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA13__IPU_DIAG_BUS_13 (_MX53_PAD_PATA_DATA13__IPU_DIAG_BUS_13 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA14__PATA_DATA_14 (_MX53_PAD_PATA_DATA14__PATA_DATA_14 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA14__GPIO2_14 (_MX53_PAD_PATA_DATA14__GPIO2_14 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA14__ESDHC2_DAT6 (_MX53_PAD_PATA_DATA14__ESDHC2_DAT6 | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_PATA_DATA14__EMI_NANDF_D_14 (_MX53_PAD_PATA_DATA14__EMI_NANDF_D_14 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA14__ESDHC4_DAT2 (_MX53_PAD_PATA_DATA14__ESDHC4_DAT2 | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_PATA_DATA14__GPU3d_GPU_DEBUG_OUT_14 (_MX53_PAD_PATA_DATA14__GPU3d_GPU_DEBUG_OUT_14 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA14__IPU_DIAG_BUS_14 (_MX53_PAD_PATA_DATA14__IPU_DIAG_BUS_14 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA15__PATA_DATA_15 (_MX53_PAD_PATA_DATA15__PATA_DATA_15 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA15__GPIO2_15 (_MX53_PAD_PATA_DATA15__GPIO2_15 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA15__ESDHC2_DAT7 (_MX53_PAD_PATA_DATA15__ESDHC2_DAT7 | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_PATA_DATA15__EMI_NANDF_D_15 (_MX53_PAD_PATA_DATA15__EMI_NANDF_D_15 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA15__ESDHC4_DAT3 (_MX53_PAD_PATA_DATA15__ESDHC4_DAT3 | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_PATA_DATA15__GPU3d_GPU_DEBUG_OUT_15 (_MX53_PAD_PATA_DATA15__GPU3d_GPU_DEBUG_OUT_15 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_DATA15__IPU_DIAG_BUS_15 (_MX53_PAD_PATA_DATA15__IPU_DIAG_BUS_15 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD1_DATA0__ESDHC1_DAT0 (_MX53_PAD_SD1_DATA0__ESDHC1_DAT0 | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_SD1_DATA0__GPIO1_16 (_MX53_PAD_SD1_DATA0__GPIO1_16 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD1_DATA0__GPT_CAPIN1 (_MX53_PAD_SD1_DATA0__GPT_CAPIN1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD1_DATA0__CSPI_MISO (_MX53_PAD_SD1_DATA0__CSPI_MISO | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD1_DATA0__CCM_PLL3_BYP (_MX53_PAD_SD1_DATA0__CCM_PLL3_BYP | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD1_DATA1__ESDHC1_DAT1 (_MX53_PAD_SD1_DATA1__ESDHC1_DAT1 | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_SD1_DATA1__GPIO1_17 (_MX53_PAD_SD1_DATA1__GPIO1_17 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD1_DATA1__GPT_CAPIN2 (_MX53_PAD_SD1_DATA1__GPT_CAPIN2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD1_DATA1__CSPI_SS0 (_MX53_PAD_SD1_DATA1__CSPI_SS0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD1_DATA1__CCM_PLL4_BYP (_MX53_PAD_SD1_DATA1__CCM_PLL4_BYP | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD1_CMD__ESDHC1_CMD (_MX53_PAD_SD1_CMD__ESDHC1_CMD | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_SD1_CMD__GPIO1_18 (_MX53_PAD_SD1_CMD__GPIO1_18 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD1_CMD__GPT_CMPOUT1 (_MX53_PAD_SD1_CMD__GPT_CMPOUT1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD1_CMD__CSPI_MOSI (_MX53_PAD_SD1_CMD__CSPI_MOSI | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD1_CMD__CCM_PLL1_BYP (_MX53_PAD_SD1_CMD__CCM_PLL1_BYP | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD1_DATA2__ESDHC1_DAT2 (_MX53_PAD_SD1_DATA2__ESDHC1_DAT2 | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_SD1_DATA2__GPIO1_19 (_MX53_PAD_SD1_DATA2__GPIO1_19 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD1_DATA2__GPT_CMPOUT2 (_MX53_PAD_SD1_DATA2__GPT_CMPOUT2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD1_DATA2__PWM2_PWMO (_MX53_PAD_SD1_DATA2__PWM2_PWMO | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD1_DATA2__WDOG1_WDOG_B (_MX53_PAD_SD1_DATA2__WDOG1_WDOG_B | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD1_DATA2__CSPI_SS1 (_MX53_PAD_SD1_DATA2__CSPI_SS1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD1_DATA2__WDOG1_WDOG_RST_B_DEB (_MX53_PAD_SD1_DATA2__WDOG1_WDOG_RST_B_DEB | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD1_DATA2__CCM_PLL2_BYP (_MX53_PAD_SD1_DATA2__CCM_PLL2_BYP | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD1_CLK__ESDHC1_CLK (_MX53_PAD_SD1_CLK__ESDHC1_CLK | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_SD1_CLK__GPIO1_20 (_MX53_PAD_SD1_CLK__GPIO1_20 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD1_CLK__OSC32k_32K_OUT (_MX53_PAD_SD1_CLK__OSC32k_32K_OUT | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD1_CLK__GPT_CLKIN (_MX53_PAD_SD1_CLK__GPT_CLKIN | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD1_CLK__CSPI_SCLK (_MX53_PAD_SD1_CLK__CSPI_SCLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD1_CLK__SATA_PHY_DTB_0 (_MX53_PAD_SD1_CLK__SATA_PHY_DTB_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD1_DATA3__ESDHC1_DAT3 (_MX53_PAD_SD1_DATA3__ESDHC1_DAT3 | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_SD1_DATA3__GPIO1_21 (_MX53_PAD_SD1_DATA3__GPIO1_21 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD1_DATA3__GPT_CMPOUT3 (_MX53_PAD_SD1_DATA3__GPT_CMPOUT3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD1_DATA3__PWM1_PWMO (_MX53_PAD_SD1_DATA3__PWM1_PWMO | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD1_DATA3__WDOG2_WDOG_B (_MX53_PAD_SD1_DATA3__WDOG2_WDOG_B | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD1_DATA3__CSPI_SS2 (_MX53_PAD_SD1_DATA3__CSPI_SS2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD1_DATA3__WDOG2_WDOG_RST_B_DEB (_MX53_PAD_SD1_DATA3__WDOG2_WDOG_RST_B_DEB | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD1_DATA3__SATA_PHY_DTB_1 (_MX53_PAD_SD1_DATA3__SATA_PHY_DTB_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD2_CLK__ESDHC2_CLK (_MX53_PAD_SD2_CLK__ESDHC2_CLK | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_SD2_CLK__GPIO1_10 (_MX53_PAD_SD2_CLK__GPIO1_10 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD2_CLK__KPP_COL_5 (_MX53_PAD_SD2_CLK__KPP_COL_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD2_CLK__AUDMUX_AUD4_RXFS (_MX53_PAD_SD2_CLK__AUDMUX_AUD4_RXFS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD2_CLK__CSPI_SCLK (_MX53_PAD_SD2_CLK__CSPI_SCLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD2_CLK__SCC_RANDOM_V (_MX53_PAD_SD2_CLK__SCC_RANDOM_V | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD2_CMD__ESDHC2_CMD (_MX53_PAD_SD2_CMD__ESDHC2_CMD | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_SD2_CMD__GPIO1_11 (_MX53_PAD_SD2_CMD__GPIO1_11 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD2_CMD__KPP_ROW_5 (_MX53_PAD_SD2_CMD__KPP_ROW_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD2_CMD__AUDMUX_AUD4_RXC (_MX53_PAD_SD2_CMD__AUDMUX_AUD4_RXC | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD2_CMD__CSPI_MOSI (_MX53_PAD_SD2_CMD__CSPI_MOSI | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD2_CMD__SCC_RANDOM (_MX53_PAD_SD2_CMD__SCC_RANDOM | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD2_DATA3__ESDHC2_DAT3 (_MX53_PAD_SD2_DATA3__ESDHC2_DAT3 | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_SD2_DATA3__GPIO1_12 (_MX53_PAD_SD2_DATA3__GPIO1_12 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD2_DATA3__KPP_COL_6 (_MX53_PAD_SD2_DATA3__KPP_COL_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD2_DATA3__AUDMUX_AUD4_TXC (_MX53_PAD_SD2_DATA3__AUDMUX_AUD4_TXC | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD2_DATA3__CSPI_SS2 (_MX53_PAD_SD2_DATA3__CSPI_SS2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD2_DATA3__SJC_DONE (_MX53_PAD_SD2_DATA3__SJC_DONE | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD2_DATA2__ESDHC2_DAT2 (_MX53_PAD_SD2_DATA2__ESDHC2_DAT2 | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_SD2_DATA2__GPIO1_13 (_MX53_PAD_SD2_DATA2__GPIO1_13 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD2_DATA2__KPP_ROW_6 (_MX53_PAD_SD2_DATA2__KPP_ROW_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD2_DATA2__AUDMUX_AUD4_TXD (_MX53_PAD_SD2_DATA2__AUDMUX_AUD4_TXD | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD2_DATA2__CSPI_SS1 (_MX53_PAD_SD2_DATA2__CSPI_SS1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD2_DATA2__SJC_FAIL (_MX53_PAD_SD2_DATA2__SJC_FAIL | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD2_DATA1__ESDHC2_DAT1 (_MX53_PAD_SD2_DATA1__ESDHC2_DAT1 | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_SD2_DATA1__GPIO1_14 (_MX53_PAD_SD2_DATA1__GPIO1_14 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD2_DATA1__KPP_COL_7 (_MX53_PAD_SD2_DATA1__KPP_COL_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD2_DATA1__AUDMUX_AUD4_TXFS (_MX53_PAD_SD2_DATA1__AUDMUX_AUD4_TXFS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD2_DATA1__CSPI_SS0 (_MX53_PAD_SD2_DATA1__CSPI_SS0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD2_DATA1__RTIC_SEC_VIO (_MX53_PAD_SD2_DATA1__RTIC_SEC_VIO | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD2_DATA0__ESDHC2_DAT0 (_MX53_PAD_SD2_DATA0__ESDHC2_DAT0 | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
+#define MX53_PAD_SD2_DATA0__GPIO1_15 (_MX53_PAD_SD2_DATA0__GPIO1_15 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD2_DATA0__KPP_ROW_7 (_MX53_PAD_SD2_DATA0__KPP_ROW_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD2_DATA0__AUDMUX_AUD4_RXD (_MX53_PAD_SD2_DATA0__AUDMUX_AUD4_RXD | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD2_DATA0__CSPI_MISO (_MX53_PAD_SD2_DATA0__CSPI_MISO | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_SD2_DATA0__RTIC_DONE_INT (_MX53_PAD_SD2_DATA0__RTIC_DONE_INT | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_0__CCM_CLKO (_MX53_PAD_GPIO_0__CCM_CLKO | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_0__GPIO1_0 (_MX53_PAD_GPIO_0__GPIO1_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_0__KPP_COL_5 (_MX53_PAD_GPIO_0__KPP_COL_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_0__CCM_SSI_EXT1_CLK (_MX53_PAD_GPIO_0__CCM_SSI_EXT1_CLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_0__EPIT1_EPITO (_MX53_PAD_GPIO_0__EPIT1_EPITO | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_0__SRTC_ALARM_DEB (_MX53_PAD_GPIO_0__SRTC_ALARM_DEB | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_0__USBOH3_USBH1_PWR (_MX53_PAD_GPIO_0__USBOH3_USBH1_PWR | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_0__CSU_TD (_MX53_PAD_GPIO_0__CSU_TD | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_1__ESAI1_SCKR (_MX53_PAD_GPIO_1__ESAI1_SCKR | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_1__GPIO1_1 (_MX53_PAD_GPIO_1__GPIO1_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_1__KPP_ROW_5 (_MX53_PAD_GPIO_1__KPP_ROW_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_1__CCM_SSI_EXT2_CLK (_MX53_PAD_GPIO_1__CCM_SSI_EXT2_CLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_1__PWM2_PWMO (_MX53_PAD_GPIO_1__PWM2_PWMO | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_1__WDOG2_WDOG_B (_MX53_PAD_GPIO_1__WDOG2_WDOG_B | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_1__ESDHC1_CD (_MX53_PAD_GPIO_1__ESDHC1_CD | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_1__SRC_TESTER_ACK (_MX53_PAD_GPIO_1__SRC_TESTER_ACK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_9__ESAI1_FSR (_MX53_PAD_GPIO_9__ESAI1_FSR | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_9__GPIO1_9 (_MX53_PAD_GPIO_9__GPIO1_9 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_9__KPP_COL_6 (_MX53_PAD_GPIO_9__KPP_COL_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_9__CCM_REF_EN_B (_MX53_PAD_GPIO_9__CCM_REF_EN_B | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_9__PWM1_PWMO (_MX53_PAD_GPIO_9__PWM1_PWMO | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_9__WDOG1_WDOG_B (_MX53_PAD_GPIO_9__WDOG1_WDOG_B | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_9__ESDHC1_WP (_MX53_PAD_GPIO_9__ESDHC1_WP | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_9__SCC_FAIL_STATE (_MX53_PAD_GPIO_9__SCC_FAIL_STATE | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_3__ESAI1_HCKR (_MX53_PAD_GPIO_3__ESAI1_HCKR | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_3__GPIO1_3 (_MX53_PAD_GPIO_3__GPIO1_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_3__I2C3_SCL (_MX53_PAD_GPIO_3__I2C3_SCL | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_3__DPLLIP1_TOG_EN (_MX53_PAD_GPIO_3__DPLLIP1_TOG_EN | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_3__CCM_CLKO2 (_MX53_PAD_GPIO_3__CCM_CLKO2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_3__OBSERVE_MUX_OBSRV_INT_OUT0 (_MX53_PAD_GPIO_3__OBSERVE_MUX_OBSRV_INT_OUT0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_3__USBOH3_USBH1_OC (_MX53_PAD_GPIO_3__USBOH3_USBH1_OC | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_3__MLB_MLBCLK (_MX53_PAD_GPIO_3__MLB_MLBCLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_6__ESAI1_SCKT (_MX53_PAD_GPIO_6__ESAI1_SCKT | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_6__GPIO1_6 (_MX53_PAD_GPIO_6__GPIO1_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_6__I2C3_SDA (_MX53_PAD_GPIO_6__I2C3_SDA | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_6__CCM_CCM_OUT_0 (_MX53_PAD_GPIO_6__CCM_CCM_OUT_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_6__CSU_CSU_INT_DEB (_MX53_PAD_GPIO_6__CSU_CSU_INT_DEB | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_6__OBSERVE_MUX_OBSRV_INT_OUT1 (_MX53_PAD_GPIO_6__OBSERVE_MUX_OBSRV_INT_OUT1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_6__ESDHC2_LCTL (_MX53_PAD_GPIO_6__ESDHC2_LCTL | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_6__MLB_MLBSIG (_MX53_PAD_GPIO_6__MLB_MLBSIG | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_2__ESAI1_FST (_MX53_PAD_GPIO_2__ESAI1_FST | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_2__GPIO1_2 (_MX53_PAD_GPIO_2__GPIO1_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_2__KPP_ROW_6 (_MX53_PAD_GPIO_2__KPP_ROW_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_2__CCM_CCM_OUT_1 (_MX53_PAD_GPIO_2__CCM_CCM_OUT_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_2__CSU_CSU_ALARM_AUT_0 (_MX53_PAD_GPIO_2__CSU_CSU_ALARM_AUT_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_2__OBSERVE_MUX_OBSRV_INT_OUT2 (_MX53_PAD_GPIO_2__OBSERVE_MUX_OBSRV_INT_OUT2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_2__ESDHC2_WP (_MX53_PAD_GPIO_2__ESDHC2_WP | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_2__MLB_MLBDAT (_MX53_PAD_GPIO_2__MLB_MLBDAT | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_4__ESAI1_HCKT (_MX53_PAD_GPIO_4__ESAI1_HCKT | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_4__GPIO1_4 (_MX53_PAD_GPIO_4__GPIO1_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_4__KPP_COL_7 (_MX53_PAD_GPIO_4__KPP_COL_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_4__CCM_CCM_OUT_2 (_MX53_PAD_GPIO_4__CCM_CCM_OUT_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_4__CSU_CSU_ALARM_AUT_1 (_MX53_PAD_GPIO_4__CSU_CSU_ALARM_AUT_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_4__OBSERVE_MUX_OBSRV_INT_OUT3 (_MX53_PAD_GPIO_4__OBSERVE_MUX_OBSRV_INT_OUT3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_4__ESDHC2_CD (_MX53_PAD_GPIO_4__ESDHC2_CD | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_4__SCC_SEC_STATE (_MX53_PAD_GPIO_4__SCC_SEC_STATE | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_5__ESAI1_TX2_RX3 (_MX53_PAD_GPIO_5__ESAI1_TX2_RX3 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_5__GPIO1_5 (_MX53_PAD_GPIO_5__GPIO1_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_5__KPP_ROW_7 (_MX53_PAD_GPIO_5__KPP_ROW_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_5__CCM_CLKO (_MX53_PAD_GPIO_5__CCM_CLKO | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_5__CSU_CSU_ALARM_AUT_2 (_MX53_PAD_GPIO_5__CSU_CSU_ALARM_AUT_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_5__OBSERVE_MUX_OBSRV_INT_OUT4 (_MX53_PAD_GPIO_5__OBSERVE_MUX_OBSRV_INT_OUT4 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_5__I2C3_SCL (_MX53_PAD_GPIO_5__I2C3_SCL | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_5__CCM_PLL1_BYP (_MX53_PAD_GPIO_5__CCM_PLL1_BYP | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_7__ESAI1_TX4_RX1 (_MX53_PAD_GPIO_7__ESAI1_TX4_RX1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_7__GPIO1_7 (_MX53_PAD_GPIO_7__GPIO1_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_7__EPIT1_EPITO (_MX53_PAD_GPIO_7__EPIT1_EPITO | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_7__CAN1_TXCAN (_MX53_PAD_GPIO_7__CAN1_TXCAN | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_7__UART2_TXD_MUX (_MX53_PAD_GPIO_7__UART2_TXD_MUX | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_7__FIRI_RXD (_MX53_PAD_GPIO_7__FIRI_RXD | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_7__SPDIF_PLOCK (_MX53_PAD_GPIO_7__SPDIF_PLOCK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_7__CCM_PLL2_BYP (_MX53_PAD_GPIO_7__CCM_PLL2_BYP | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_8__ESAI1_TX5_RX0 (_MX53_PAD_GPIO_8__ESAI1_TX5_RX0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_8__GPIO1_8 (_MX53_PAD_GPIO_8__GPIO1_8 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_8__EPIT2_EPITO (_MX53_PAD_GPIO_8__EPIT2_EPITO | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_8__CAN1_RXCAN (_MX53_PAD_GPIO_8__CAN1_RXCAN | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_8__UART2_RXD_MUX (_MX53_PAD_GPIO_8__UART2_RXD_MUX | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_8__FIRI_TXD (_MX53_PAD_GPIO_8__FIRI_TXD | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_8__SPDIF_SRCLK (_MX53_PAD_GPIO_8__SPDIF_SRCLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_8__CCM_PLL3_BYP (_MX53_PAD_GPIO_8__CCM_PLL3_BYP | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_16__ESAI1_TX3_RX2 (_MX53_PAD_GPIO_16__ESAI1_TX3_RX2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_16__GPIO7_11 (_MX53_PAD_GPIO_16__GPIO7_11 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_16__TZIC_PWRFAIL_INT (_MX53_PAD_GPIO_16__TZIC_PWRFAIL_INT | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_16__RTC_CE_RTC_EXT_TRIG1 (_MX53_PAD_GPIO_16__RTC_CE_RTC_EXT_TRIG1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_16__SPDIF_IN1 (_MX53_PAD_GPIO_16__SPDIF_IN1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_16__I2C3_SDA (_MX53_PAD_GPIO_16__I2C3_SDA | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_16__SJC_DE_B (_MX53_PAD_GPIO_16__SJC_DE_B | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_17__ESAI1_TX0 (_MX53_PAD_GPIO_17__ESAI1_TX0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_17__GPIO7_12 (_MX53_PAD_GPIO_17__GPIO7_12 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_17__SDMA_EXT_EVENT_0 (_MX53_PAD_GPIO_17__SDMA_EXT_EVENT_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_17__GPC_PMIC_RDY (_MX53_PAD_GPIO_17__GPC_PMIC_RDY | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_17__RTC_CE_RTC_FSV_TRIG (_MX53_PAD_GPIO_17__RTC_CE_RTC_FSV_TRIG | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_17__SPDIF_OUT1 (_MX53_PAD_GPIO_17__SPDIF_OUT1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_17__IPU_SNOOP2 (_MX53_PAD_GPIO_17__IPU_SNOOP2 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_17__SJC_JTAG_ACT (_MX53_PAD_GPIO_17__SJC_JTAG_ACT | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_18__ESAI1_TX1 (_MX53_PAD_GPIO_18__ESAI1_TX1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_18__GPIO7_13 (_MX53_PAD_GPIO_18__GPIO7_13 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_18__SDMA_EXT_EVENT_1 (_MX53_PAD_GPIO_18__SDMA_EXT_EVENT_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_18__OWIRE_LINE (_MX53_PAD_GPIO_18__OWIRE_LINE | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_18__RTC_CE_RTC_ALARM2_TRIG (_MX53_PAD_GPIO_18__RTC_CE_RTC_ALARM2_TRIG | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_18__CCM_ASRC_EXT_CLK (_MX53_PAD_GPIO_18__CCM_ASRC_EXT_CLK | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_18__ESDHC1_LCTL (_MX53_PAD_GPIO_18__ESDHC1_LCTL | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_18__SRC_SYSTEM_RST (_MX53_PAD_GPIO_18__SRC_SYSTEM_RST | MUX_PAD_CTRL(NO_PAD_CTRL))
#endif /* __MACH_IOMUX_MX53_H__ */
diff --git a/arch/arm/plat-mxc/include/mach/iomux-mxc91231.h b/arch/arm/plat-mxc/include/mach/iomux-mxc91231.h
index 15d59510f597..bf28df0d58b7 100644
--- a/arch/arm/plat-mxc/include/mach/iomux-mxc91231.h
+++ b/arch/arm/plat-mxc/include/mach/iomux-mxc91231.h
@@ -46,12 +46,12 @@
* - setups the iomux according to the configuration
* - if the pin is configured as a GPIO, we claim it through kernel gpiolib
*/
-int mxc_iomux_alloc_pin(const unsigned int pin_mode, const char *label);
+int mxc_iomux_alloc_pin(unsigned int pin_mode, const char *label);
/*
* setups mutliple pins
* convenient way to call the above function with tables
*/
-int mxc_iomux_setup_multiple_pins(unsigned int *pin_list, unsigned count,
+int mxc_iomux_setup_multiple_pins(const unsigned int *pin_list, unsigned count,
const char *label);
/*
@@ -60,12 +60,12 @@ int mxc_iomux_setup_multiple_pins(unsigned int *pin_list, unsigned count,
* - frees the GPIO if the pin was configured as GPIO
* - DOES NOT reconfigure the IOMUX in its reset state
*/
-void mxc_iomux_release_pin(const unsigned int pin_mode);
+void mxc_iomux_release_pin(unsigned int pin_mode);
/*
* releases multiple pins
* convenvient way to call the above function with tables
*/
-void mxc_iomux_release_multiple_pins(unsigned int *pin_list, int count);
+void mxc_iomux_release_multiple_pins(const unsigned int *pin_list, int count);
#define MUX_SIDE_AP (0)
#define MUX_SIDE_SP (1)
diff --git a/arch/arm/plat-mxc/include/mach/memory.h b/arch/arm/plat-mxc/include/mach/memory.h
index 83861408133f..5d51cbb98893 100644
--- a/arch/arm/plat-mxc/include/mach/memory.h
+++ b/arch/arm/plat-mxc/include/mach/memory.h
@@ -23,23 +23,23 @@
#if !defined(CONFIG_RUNTIME_PHYS_OFFSET)
# if defined CONFIG_ARCH_MX1
-# define PHYS_OFFSET MX1_PHYS_OFFSET
+# define PLAT_PHYS_OFFSET MX1_PHYS_OFFSET
# elif defined CONFIG_MACH_MX21
-# define PHYS_OFFSET MX21_PHYS_OFFSET
+# define PLAT_PHYS_OFFSET MX21_PHYS_OFFSET
# elif defined CONFIG_ARCH_MX25
-# define PHYS_OFFSET MX25_PHYS_OFFSET
+# define PLAT_PHYS_OFFSET MX25_PHYS_OFFSET
# elif defined CONFIG_MACH_MX27
-# define PHYS_OFFSET MX27_PHYS_OFFSET
+# define PLAT_PHYS_OFFSET MX27_PHYS_OFFSET
# elif defined CONFIG_ARCH_MX3
-# define PHYS_OFFSET MX3x_PHYS_OFFSET
+# define PLAT_PHYS_OFFSET MX3x_PHYS_OFFSET
# elif defined CONFIG_ARCH_MXC91231
-# define PHYS_OFFSET MXC91231_PHYS_OFFSET
+# define PLAT_PHYS_OFFSET MXC91231_PHYS_OFFSET
# elif defined CONFIG_ARCH_MX50
-# define PHYS_OFFSET MX50_PHYS_OFFSET
+# define PLAT_PHYS_OFFSET MX50_PHYS_OFFSET
# elif defined CONFIG_ARCH_MX51
-# define PHYS_OFFSET MX51_PHYS_OFFSET
+# define PLAT_PHYS_OFFSET MX51_PHYS_OFFSET
# elif defined CONFIG_ARCH_MX53
-# define PHYS_OFFSET MX53_PHYS_OFFSET
+# define PLAT_PHYS_OFFSET MX53_PHYS_OFFSET
# endif
#endif
diff --git a/arch/arm/plat-mxc/include/mach/mx1.h b/arch/arm/plat-mxc/include/mach/mx1.h
index 75d96214b831..7c871b87e97a 100644
--- a/arch/arm/plat-mxc/include/mach/mx1.h
+++ b/arch/arm/plat-mxc/include/mach/mx1.h
@@ -54,13 +54,13 @@
#define MX1_AIPI2_BASE_ADDR (0x10000 + MX1_IO_BASE_ADDR)
#define MX1_SIM_BASE_ADDR (0x11000 + MX1_IO_BASE_ADDR)
#define MX1_USBD_BASE_ADDR (0x12000 + MX1_IO_BASE_ADDR)
-#define MX1_SPI1_BASE_ADDR (0x13000 + MX1_IO_BASE_ADDR)
+#define MX1_CSPI1_BASE_ADDR (0x13000 + MX1_IO_BASE_ADDR)
#define MX1_MMC_BASE_ADDR (0x14000 + MX1_IO_BASE_ADDR)
#define MX1_ASP_BASE_ADDR (0x15000 + MX1_IO_BASE_ADDR)
#define MX1_BTA_BASE_ADDR (0x16000 + MX1_IO_BASE_ADDR)
#define MX1_I2C_BASE_ADDR (0x17000 + MX1_IO_BASE_ADDR)
#define MX1_SSI_BASE_ADDR (0x18000 + MX1_IO_BASE_ADDR)
-#define MX1_SPI2_BASE_ADDR (0x19000 + MX1_IO_BASE_ADDR)
+#define MX1_CSPI2_BASE_ADDR (0x19000 + MX1_IO_BASE_ADDR)
#define MX1_MSHC_BASE_ADDR (0x1A000 + MX1_IO_BASE_ADDR)
#define MX1_CCM_BASE_ADDR (0x1B000 + MX1_IO_BASE_ADDR)
#define MX1_SCM_BASE_ADDR (0x1B804 + MX1_IO_BASE_ADDR)
@@ -112,7 +112,8 @@
#define MX1_PWM_INT 34
#define MX1_SDHC_INT 35
#define MX1_INT_I2C 39
-#define MX1_CSPI_INT 41
+#define MX1_INT_CSPI2 40
+#define MX1_INT_CSPI1 41
#define MX1_SSI_TX_INT 42
#define MX1_SSI_TX_ERR_INT 43
#define MX1_SSI_RX_INT 44
diff --git a/arch/arm/plat-mxc/include/mach/mx53.h b/arch/arm/plat-mxc/include/mach/mx53.h
index d7a8e52181ea..ace17864575e 100644
--- a/arch/arm/plat-mxc/include/mach/mx53.h
+++ b/arch/arm/plat-mxc/include/mach/mx53.h
@@ -79,7 +79,7 @@
#define MX53_GPIO3_BASE_ADDR (MX53_AIPS1_BASE_ADDR + 0x0008C000)
#define MX53_GPIO4_BASE_ADDR (MX53_AIPS1_BASE_ADDR + 0x00090000)
#define MX53_KPP_BASE_ADDR (MX53_AIPS1_BASE_ADDR + 0x00094000)
-#define MX53_WDOG_BASE_ADDR (MX53_AIPS1_BASE_ADDR + 0x00098000)
+#define MX53_WDOG1_BASE_ADDR (MX53_AIPS1_BASE_ADDR + 0x00098000)
#define MX53_WDOG2_BASE_ADDR (MX53_AIPS1_BASE_ADDR + 0x0009C000)
#define MX53_GPT1_BASE_ADDR (MX53_AIPS1_BASE_ADDR + 0x000A0000)
#define MX53_SRTC_BASE_ADDR (MX53_AIPS1_BASE_ADDR + 0x000A4000)
diff --git a/arch/arm/plat-mxc/include/mach/mxc_ehci.h b/arch/arm/plat-mxc/include/mach/mxc_ehci.h
index a523a4079299..2c159dc2398b 100644
--- a/arch/arm/plat-mxc/include/mach/mxc_ehci.h
+++ b/arch/arm/plat-mxc/include/mach/mxc_ehci.h
@@ -44,11 +44,14 @@ struct mxc_usbh_platform_data {
int (*exit)(struct platform_device *pdev);
unsigned int portsc;
- unsigned int flags;
struct otg_transceiver *otg;
};
-int mxc_initialize_usb_hw(int port, unsigned int flags);
+int mx51_initialize_usb_hw(int port, unsigned int flags);
+int mx25_initialize_usb_hw(int port, unsigned int flags);
+int mx31_initialize_usb_hw(int port, unsigned int flags);
+int mx35_initialize_usb_hw(int port, unsigned int flags);
+int mx27_initialize_usb_hw(int port, unsigned int flags);
#endif /* __INCLUDE_ASM_ARCH_MXC_EHCI_H */
diff --git a/arch/arm/plat-mxc/include/mach/uncompress.h b/arch/arm/plat-mxc/include/mach/uncompress.h
index ff469c4f1d76..4864b0afd440 100644
--- a/arch/arm/plat-mxc/include/mach/uncompress.h
+++ b/arch/arm/plat-mxc/include/mach/uncompress.h
@@ -62,6 +62,7 @@ static inline void flush(void)
#define MX2X_UART1_BASE_ADDR 0x1000a000
#define MX3X_UART1_BASE_ADDR 0x43F90000
#define MX3X_UART2_BASE_ADDR 0x43F94000
+#define MX3X_UART5_BASE_ADDR 0x43FB4000
#define MX51_UART1_BASE_ADDR 0x73fbc000
#define MX50_UART1_BASE_ADDR 0x53fbc000
#define MX53_UART1_BASE_ADDR 0x53fbc000
@@ -83,6 +84,7 @@ static __inline__ void __arch_decomp_setup(unsigned long arch_id)
case MACH_TYPE_MX21ADS:
case MACH_TYPE_PCA100:
case MACH_TYPE_MXT_TD60:
+ case MACH_TYPE_IMX27IPCAM:
uart_base = MX2X_UART1_BASE_ADDR;
break;
case MACH_TYPE_MX31LITE:
@@ -101,6 +103,9 @@ static __inline__ void __arch_decomp_setup(unsigned long arch_id)
case MACH_TYPE_MAGX_ZN5:
uart_base = MX3X_UART2_BASE_ADDR;
break;
+ case MACH_TYPE_BUG:
+ uart_base = MX3X_UART5_BASE_ADDR;
+ break;
case MACH_TYPE_MX51_BABBAGE:
case MACH_TYPE_EUKREA_CPUIMX51SD:
case MACH_TYPE_MX51_3DS:
@@ -110,6 +115,8 @@ static __inline__ void __arch_decomp_setup(unsigned long arch_id)
uart_base = MX50_UART1_BASE_ADDR;
break;
case MACH_TYPE_MX53_EVK:
+ case MACH_TYPE_MX53_LOCO:
+ case MACH_TYPE_MX53_SMD:
uart_base = MX53_UART1_BASE_ADDR;
break;
default:
diff --git a/arch/arm/plat-nomadik/gpio.c b/arch/arm/plat-nomadik/gpio.c
index 1e88ecb846d1..45b1cf95e378 100644
--- a/arch/arm/plat-nomadik/gpio.c
+++ b/arch/arm/plat-nomadik/gpio.c
@@ -30,23 +30,35 @@
/*
* The GPIO module in the Nomadik family of Systems-on-Chip is an
* AMBA device, managing 32 pins and alternate functions. The logic block
- * is currently only used in the Nomadik.
+ * is currently used in the Nomadik and ux500.
*
* Symbols in this file are called "nmk_gpio" for "nomadik gpio"
*/
-#define NMK_GPIO_PER_CHIP 32
+#define NMK_GPIO_PER_CHIP 32
+
struct nmk_gpio_chip {
struct gpio_chip chip;
void __iomem *addr;
struct clk *clk;
+ unsigned int bank;
unsigned int parent_irq;
+ int secondary_parent_irq;
+ u32 (*get_secondary_status)(unsigned int bank);
+ void (*set_ioforce)(bool enable);
spinlock_t lock;
/* Keep track of configured edges */
u32 edge_rising;
u32 edge_falling;
};
+static struct nmk_gpio_chip *
+nmk_gpio_chips[DIV_ROUND_UP(ARCH_NR_GPIOS, NMK_GPIO_PER_CHIP)];
+
+static DEFINE_SPINLOCK(nmk_gpio_slpm_lock);
+
+#define NUM_BANKS ARRAY_SIZE(nmk_gpio_chips)
+
static void __nmk_gpio_set_mode(struct nmk_gpio_chip *nmk_chip,
unsigned offset, int gpio_mode)
{
@@ -118,8 +130,35 @@ static void __nmk_gpio_make_output(struct nmk_gpio_chip *nmk_chip,
__nmk_gpio_set_output(nmk_chip, offset, val);
}
+static void __nmk_gpio_set_mode_safe(struct nmk_gpio_chip *nmk_chip,
+ unsigned offset, int gpio_mode,
+ bool glitch)
+{
+ u32 rwimsc = readl(nmk_chip->addr + NMK_GPIO_RWIMSC);
+ u32 fwimsc = readl(nmk_chip->addr + NMK_GPIO_FWIMSC);
+
+ if (glitch && nmk_chip->set_ioforce) {
+ u32 bit = BIT(offset);
+
+ /* Prevent spurious wakeups */
+ writel(rwimsc & ~bit, nmk_chip->addr + NMK_GPIO_RWIMSC);
+ writel(fwimsc & ~bit, nmk_chip->addr + NMK_GPIO_FWIMSC);
+
+ nmk_chip->set_ioforce(true);
+ }
+
+ __nmk_gpio_set_mode(nmk_chip, offset, gpio_mode);
+
+ if (glitch && nmk_chip->set_ioforce) {
+ nmk_chip->set_ioforce(false);
+
+ writel(rwimsc, nmk_chip->addr + NMK_GPIO_RWIMSC);
+ writel(fwimsc, nmk_chip->addr + NMK_GPIO_FWIMSC);
+ }
+}
+
static void __nmk_config_pin(struct nmk_gpio_chip *nmk_chip, unsigned offset,
- pin_cfg_t cfg, bool sleep)
+ pin_cfg_t cfg, bool sleep, unsigned int *slpmregs)
{
static const char *afnames[] = {
[NMK_GPIO_ALT_GPIO] = "GPIO",
@@ -144,6 +183,7 @@ static void __nmk_config_pin(struct nmk_gpio_chip *nmk_chip, unsigned offset,
int slpm = PIN_SLPM(cfg);
int output = PIN_DIR(cfg);
int val = PIN_VAL(cfg);
+ bool glitch = af == NMK_GPIO_ALT_C;
dev_dbg(nmk_chip->chip.dev, "pin %d [%#lx]: af %s, pull %s, slpm %s (%s%s)\n",
pin, cfg, afnames[af], pullnames[pull], slpmnames[slpm],
@@ -155,6 +195,8 @@ static void __nmk_config_pin(struct nmk_gpio_chip *nmk_chip, unsigned offset,
int slpm_output = PIN_SLPM_DIR(cfg);
int slpm_val = PIN_SLPM_VAL(cfg);
+ af = NMK_GPIO_ALT_GPIO;
+
/*
* The SLPM_* values are normal values + 1 to allow zero to
* mean "same as normal".
@@ -180,8 +222,116 @@ static void __nmk_config_pin(struct nmk_gpio_chip *nmk_chip, unsigned offset,
__nmk_gpio_set_pull(nmk_chip, offset, pull);
}
- __nmk_gpio_set_slpm(nmk_chip, offset, slpm);
- __nmk_gpio_set_mode(nmk_chip, offset, af);
+ /*
+ * If we've backed up the SLPM registers (glitch workaround), modify
+ * the backups since they will be restored.
+ */
+ if (slpmregs) {
+ if (slpm == NMK_GPIO_SLPM_NOCHANGE)
+ slpmregs[nmk_chip->bank] |= BIT(offset);
+ else
+ slpmregs[nmk_chip->bank] &= ~BIT(offset);
+ } else
+ __nmk_gpio_set_slpm(nmk_chip, offset, slpm);
+
+ __nmk_gpio_set_mode_safe(nmk_chip, offset, af, glitch);
+}
+
+/*
+ * Safe sequence used to switch IOs between GPIO and Alternate-C mode:
+ * - Save SLPM registers
+ * - Set SLPM=0 for the IOs you want to switch and others to 1
+ * - Configure the GPIO registers for the IOs that are being switched
+ * - Set IOFORCE=1
+ * - Modify the AFLSA/B registers for the IOs that are being switched
+ * - Set IOFORCE=0
+ * - Restore SLPM registers
+ * - Any spurious wake up event during switch sequence to be ignored and
+ * cleared
+ */
+static void nmk_gpio_glitch_slpm_init(unsigned int *slpm)
+{
+ int i;
+
+ for (i = 0; i < NUM_BANKS; i++) {
+ struct nmk_gpio_chip *chip = nmk_gpio_chips[i];
+ unsigned int temp = slpm[i];
+
+ if (!chip)
+ break;
+
+ slpm[i] = readl(chip->addr + NMK_GPIO_SLPC);
+ writel(temp, chip->addr + NMK_GPIO_SLPC);
+ }
+}
+
+static void nmk_gpio_glitch_slpm_restore(unsigned int *slpm)
+{
+ int i;
+
+ for (i = 0; i < NUM_BANKS; i++) {
+ struct nmk_gpio_chip *chip = nmk_gpio_chips[i];
+
+ if (!chip)
+ break;
+
+ writel(slpm[i], chip->addr + NMK_GPIO_SLPC);
+ }
+}
+
+static int __nmk_config_pins(pin_cfg_t *cfgs, int num, bool sleep)
+{
+ static unsigned int slpm[NUM_BANKS];
+ unsigned long flags;
+ bool glitch = false;
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < num; i++) {
+ if (PIN_ALT(cfgs[i]) == NMK_GPIO_ALT_C) {
+ glitch = true;
+ break;
+ }
+ }
+
+ spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
+
+ if (glitch) {
+ memset(slpm, 0xff, sizeof(slpm));
+
+ for (i = 0; i < num; i++) {
+ int pin = PIN_NUM(cfgs[i]);
+ int offset = pin % NMK_GPIO_PER_CHIP;
+
+ if (PIN_ALT(cfgs[i]) == NMK_GPIO_ALT_C)
+ slpm[pin / NMK_GPIO_PER_CHIP] &= ~BIT(offset);
+ }
+
+ nmk_gpio_glitch_slpm_init(slpm);
+ }
+
+ for (i = 0; i < num; i++) {
+ struct nmk_gpio_chip *nmk_chip;
+ int pin = PIN_NUM(cfgs[i]);
+
+ nmk_chip = get_irq_chip_data(NOMADIK_GPIO_TO_IRQ(pin));
+ if (!nmk_chip) {
+ ret = -EINVAL;
+ break;
+ }
+
+ spin_lock(&nmk_chip->lock);
+ __nmk_config_pin(nmk_chip, pin - nmk_chip->chip.base,
+ cfgs[i], sleep, glitch ? slpm : NULL);
+ spin_unlock(&nmk_chip->lock);
+ }
+
+ if (glitch)
+ nmk_gpio_glitch_slpm_restore(slpm);
+
+ spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
+
+ return ret;
}
/**
@@ -200,19 +350,7 @@ static void __nmk_config_pin(struct nmk_gpio_chip *nmk_chip, unsigned offset,
*/
int nmk_config_pin(pin_cfg_t cfg, bool sleep)
{
- struct nmk_gpio_chip *nmk_chip;
- int gpio = PIN_NUM(cfg);
- unsigned long flags;
-
- nmk_chip = get_irq_chip_data(NOMADIK_GPIO_TO_IRQ(gpio));
- if (!nmk_chip)
- return -EINVAL;
-
- spin_lock_irqsave(&nmk_chip->lock, flags);
- __nmk_config_pin(nmk_chip, gpio - nmk_chip->chip.base, cfg, sleep);
- spin_unlock_irqrestore(&nmk_chip->lock, flags);
-
- return 0;
+ return __nmk_config_pins(&cfg, 1, sleep);
}
EXPORT_SYMBOL(nmk_config_pin);
@@ -226,31 +364,13 @@ EXPORT_SYMBOL(nmk_config_pin);
*/
int nmk_config_pins(pin_cfg_t *cfgs, int num)
{
- int ret = 0;
- int i;
-
- for (i = 0; i < num; i++) {
- ret = nmk_config_pin(cfgs[i], false);
- if (ret)
- break;
- }
-
- return ret;
+ return __nmk_config_pins(cfgs, num, false);
}
EXPORT_SYMBOL(nmk_config_pins);
int nmk_config_pins_sleep(pin_cfg_t *cfgs, int num)
{
- int ret = 0;
- int i;
-
- for (i = 0; i < num; i++) {
- ret = nmk_config_pin(cfgs[i], true);
- if (ret)
- break;
- }
-
- return ret;
+ return __nmk_config_pins(cfgs, num, true);
}
EXPORT_SYMBOL(nmk_config_pins_sleep);
@@ -277,9 +397,13 @@ int nmk_gpio_set_slpm(int gpio, enum nmk_gpio_slpm mode)
if (!nmk_chip)
return -EINVAL;
- spin_lock_irqsave(&nmk_chip->lock, flags);
+ spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
+ spin_lock(&nmk_chip->lock);
+
__nmk_gpio_set_slpm(nmk_chip, gpio - nmk_chip->chip.base, mode);
- spin_unlock_irqrestore(&nmk_chip->lock, flags);
+
+ spin_unlock(&nmk_chip->lock);
+ spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
return 0;
}
@@ -314,6 +438,15 @@ int nmk_gpio_set_pull(int gpio, enum nmk_gpio_pull pull)
}
/* Mode functions */
+/**
+ * nmk_gpio_set_mode() - set the mux mode of a gpio pin
+ * @gpio: pin number
+ * @gpio_mode: one of NMK_GPIO_ALT_GPIO, NMK_GPIO_ALT_A,
+ * NMK_GPIO_ALT_B, and NMK_GPIO_ALT_C
+ *
+ * Sets the mode of the specified pin to one of the alternate functions or
+ * plain GPIO.
+ */
int nmk_gpio_set_mode(int gpio, int gpio_mode)
{
struct nmk_gpio_chip *nmk_chip;
@@ -443,16 +576,20 @@ static int nmk_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
if (!nmk_chip)
return -EINVAL;
- spin_lock_irqsave(&nmk_chip->lock, flags);
+ spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
+ spin_lock(&nmk_chip->lock);
+
#ifdef CONFIG_ARCH_U8500
if (cpu_is_u8500v2()) {
- __nmk_gpio_set_slpm(nmk_chip, gpio,
+ __nmk_gpio_set_slpm(nmk_chip, gpio - nmk_chip->chip.base,
on ? NMK_GPIO_SLPM_WAKEUP_ENABLE
: NMK_GPIO_SLPM_WAKEUP_DISABLE);
}
#endif
__nmk_gpio_irq_modify(nmk_chip, gpio, WAKE, on);
- spin_unlock_irqrestore(&nmk_chip->lock, flags);
+
+ spin_unlock(&nmk_chip->lock);
+ spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
return 0;
}
@@ -514,12 +651,11 @@ static struct irq_chip nmk_gpio_irq_chip = {
.irq_set_wake = nmk_gpio_irq_set_wake,
};
-static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void __nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc,
+ u32 status)
{
struct nmk_gpio_chip *nmk_chip;
struct irq_chip *host_chip = get_irq_chip(irq);
- unsigned int gpio_irq;
- u32 pending;
unsigned int first_irq;
if (host_chip->irq_mask_ack)
@@ -532,29 +668,56 @@ static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
nmk_chip = get_irq_data(irq);
first_irq = NOMADIK_GPIO_TO_IRQ(nmk_chip->chip.base);
- while ( (pending = readl(nmk_chip->addr + NMK_GPIO_IS)) ) {
- gpio_irq = first_irq + __ffs(pending);
- generic_handle_irq(gpio_irq);
+ while (status) {
+ int bit = __ffs(status);
+
+ generic_handle_irq(first_irq + bit);
+ status &= ~BIT(bit);
}
host_chip->irq_unmask(&desc->irq_data);
}
+static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ struct nmk_gpio_chip *nmk_chip = get_irq_data(irq);
+ u32 status = readl(nmk_chip->addr + NMK_GPIO_IS);
+
+ __nmk_gpio_irq_handler(irq, desc, status);
+}
+
+static void nmk_gpio_secondary_irq_handler(unsigned int irq,
+ struct irq_desc *desc)
+{
+ struct nmk_gpio_chip *nmk_chip = get_irq_data(irq);
+ u32 status = nmk_chip->get_secondary_status(nmk_chip->bank);
+
+ __nmk_gpio_irq_handler(irq, desc, status);
+}
+
static int nmk_gpio_init_irq(struct nmk_gpio_chip *nmk_chip)
{
unsigned int first_irq;
int i;
first_irq = NOMADIK_GPIO_TO_IRQ(nmk_chip->chip.base);
- for (i = first_irq; i < first_irq + NMK_GPIO_PER_CHIP; i++) {
+ for (i = first_irq; i < first_irq + nmk_chip->chip.ngpio; i++) {
set_irq_chip(i, &nmk_gpio_irq_chip);
set_irq_handler(i, handle_edge_irq);
set_irq_flags(i, IRQF_VALID);
set_irq_chip_data(i, nmk_chip);
set_irq_type(i, IRQ_TYPE_EDGE_FALLING);
}
+
set_irq_chained_handler(nmk_chip->parent_irq, nmk_gpio_irq_handler);
set_irq_data(nmk_chip->parent_irq, nmk_chip);
+
+ if (nmk_chip->secondary_parent_irq >= 0) {
+ set_irq_chained_handler(nmk_chip->secondary_parent_irq,
+ nmk_gpio_secondary_irq_handler);
+ set_irq_data(nmk_chip->secondary_parent_irq, nmk_chip);
+ }
+
return 0;
}
@@ -605,6 +768,97 @@ static int nmk_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
return NOMADIK_GPIO_TO_IRQ(nmk_chip->chip.base) + offset;
}
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/seq_file.h>
+
+static void nmk_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+ int mode;
+ unsigned i;
+ unsigned gpio = chip->base;
+ int is_out;
+ struct nmk_gpio_chip *nmk_chip =
+ container_of(chip, struct nmk_gpio_chip, chip);
+ const char *modes[] = {
+ [NMK_GPIO_ALT_GPIO] = "gpio",
+ [NMK_GPIO_ALT_A] = "altA",
+ [NMK_GPIO_ALT_B] = "altB",
+ [NMK_GPIO_ALT_C] = "altC",
+ };
+
+ for (i = 0; i < chip->ngpio; i++, gpio++) {
+ const char *label = gpiochip_is_requested(chip, i);
+ bool pull;
+ u32 bit = 1 << i;
+
+ if (!label)
+ continue;
+
+ is_out = readl(nmk_chip->addr + NMK_GPIO_DIR) & bit;
+ pull = !(readl(nmk_chip->addr + NMK_GPIO_PDIS) & bit);
+ mode = nmk_gpio_get_mode(gpio);
+ seq_printf(s, " gpio-%-3d (%-20.20s) %s %s %s %s",
+ gpio, label,
+ is_out ? "out" : "in ",
+ chip->get
+ ? (chip->get(chip, i) ? "hi" : "lo")
+ : "? ",
+ (mode < 0) ? "unknown" : modes[mode],
+ pull ? "pull" : "none");
+
+ if (!is_out) {
+ int irq = gpio_to_irq(gpio);
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ /* This races with request_irq(), set_irq_type(),
+ * and set_irq_wake() ... but those are "rare".
+ *
+ * More significantly, trigger type flags aren't
+ * currently maintained by genirq.
+ */
+ if (irq >= 0 && desc->action) {
+ char *trigger;
+
+ switch (desc->status & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_NONE:
+ trigger = "(default)";
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ trigger = "edge-falling";
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ trigger = "edge-rising";
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ trigger = "edge-both";
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ trigger = "level-high";
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ trigger = "level-low";
+ break;
+ default:
+ trigger = "?trigger?";
+ break;
+ }
+
+ seq_printf(s, " irq-%d %s%s",
+ irq, trigger,
+ (desc->status & IRQ_WAKEUP)
+ ? " wakeup" : "");
+ }
+ }
+
+ seq_printf(s, "\n");
+ }
+}
+
+#else
+#define nmk_gpio_dbg_show NULL
+#endif
+
/* This structure is replicated for each GPIO block allocated at probe time */
static struct gpio_chip nmk_gpio_template = {
.direction_input = nmk_gpio_make_input,
@@ -612,7 +866,7 @@ static struct gpio_chip nmk_gpio_template = {
.direction_output = nmk_gpio_make_output,
.set = nmk_gpio_set_output,
.to_irq = nmk_gpio_to_irq,
- .ngpio = NMK_GPIO_PER_CHIP,
+ .dbg_show = nmk_gpio_dbg_show,
.can_sleep = 0,
};
@@ -623,6 +877,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
struct gpio_chip *chip;
struct resource *res;
struct clk *clk;
+ int secondary_irq;
int irq;
int ret;
@@ -641,6 +896,12 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
goto out;
}
+ secondary_irq = platform_get_irq(dev, 1);
+ if (secondary_irq >= 0 && !pdata->get_secondary_status) {
+ ret = -EINVAL;
+ goto out;
+ }
+
if (request_mem_region(res->start, resource_size(res),
dev_name(&dev->dev)) == NULL) {
ret = -EBUSY;
@@ -664,14 +925,19 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
* The virt address in nmk_chip->addr is in the nomadik register space,
* so we can simply convert the resource address, without remapping
*/
+ nmk_chip->bank = dev->id;
nmk_chip->clk = clk;
nmk_chip->addr = io_p2v(res->start);
nmk_chip->chip = nmk_gpio_template;
nmk_chip->parent_irq = irq;
+ nmk_chip->secondary_parent_irq = secondary_irq;
+ nmk_chip->get_secondary_status = pdata->get_secondary_status;
+ nmk_chip->set_ioforce = pdata->set_ioforce;
spin_lock_init(&nmk_chip->lock);
chip = &nmk_chip->chip;
chip->base = pdata->first_gpio;
+ chip->ngpio = pdata->num_gpio;
chip->label = pdata->name ?: dev_name(&dev->dev);
chip->dev = &dev->dev;
chip->owner = THIS_MODULE;
@@ -680,6 +946,9 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
if (ret)
goto out_free;
+ BUG_ON(nmk_chip->bank >= ARRAY_SIZE(nmk_gpio_chips));
+
+ nmk_gpio_chips[nmk_chip->bank] = nmk_chip;
platform_set_drvdata(dev, nmk_chip);
nmk_gpio_init_irq(nmk_chip);
@@ -705,10 +974,8 @@ static struct platform_driver nmk_gpio_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "gpio",
- },
+ },
.probe = nmk_gpio_probe,
- .suspend = NULL, /* to be done */
- .resume = NULL,
};
static int __init nmk_gpio_init(void)
diff --git a/arch/arm/plat-nomadik/include/plat/gpio.h b/arch/arm/plat-nomadik/include/plat/gpio.h
index 67b113d639d8..e3a4837e86f4 100644
--- a/arch/arm/plat-nomadik/include/plat/gpio.h
+++ b/arch/arm/plat-nomadik/include/plat/gpio.h
@@ -82,6 +82,9 @@ struct nmk_gpio_platform_data {
char *name;
int first_gpio;
int first_irq;
+ int num_gpio;
+ u32 (*get_secondary_status)(unsigned int bank);
+ void (*set_ioforce)(bool enable);
};
#endif /* __ASM_PLAT_GPIO_H */
diff --git a/arch/arm/plat-nomadik/include/plat/ste_dma40.h b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
index 4d6dd4c39b75..c44886062f8e 100644
--- a/arch/arm/plat-nomadik/include/plat/ste_dma40.h
+++ b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
@@ -104,6 +104,8 @@ struct stedma40_half_channel_info {
*
* @dir: MEM 2 MEM, PERIPH 2 MEM , MEM 2 PERIPH, PERIPH 2 PERIPH
* @high_priority: true if high-priority
+ * @realtime: true if realtime mode is to be enabled. Only available on DMA40
+ * version 3+, i.e DB8500v2+
* @mode: channel mode: physical, logical, or operation
* @mode_opt: options for the chosen channel mode
* @src_dev_type: Src device type
@@ -119,6 +121,7 @@ struct stedma40_half_channel_info {
struct stedma40_chan_cfg {
enum stedma40_xfer_dir dir;
bool high_priority;
+ bool realtime;
enum stedma40_mode mode;
enum stedma40_mode_opt mode_opt;
int src_dev_type;
@@ -169,25 +172,6 @@ struct stedma40_platform_data {
bool stedma40_filter(struct dma_chan *chan, void *data);
/**
- * stedma40_memcpy_sg() - extension of the dma framework, memcpy to/from
- * scattergatter lists.
- *
- * @chan: dmaengine handle
- * @sgl_dst: Destination scatter list
- * @sgl_src: Source scatter list
- * @sgl_len: The length of each scatterlist. Both lists must be of equal length
- * and each element must match the corresponding element in the other scatter
- * list.
- * @flags: is actually enum dma_ctrl_flags. See dmaengine.h
- */
-
-struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
- struct scatterlist *sgl_dst,
- struct scatterlist *sgl_src,
- unsigned int sgl_len,
- unsigned long flags);
-
-/**
* stedma40_slave_mem() - Transfers a raw data buffer to or from a slave
* (=device)
*
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index b6333ae3f92a..cd5f993612fd 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -54,7 +54,7 @@ config OMAP_SMARTREFLEX
user must write 1 to
/debug/voltage/vdd_<X>/smartreflex/autocomp,
where X is mpu or core for OMAP3.
- Optionallly autocompensation can be enabled in the kernel
+ Optionally autocompensation can be enabled in the kernel
by default during system init via the enable_on_init flag
which an be passed as platform data to the smartreflex driver.
diff --git a/arch/arm/plat-omap/common.c b/arch/arm/plat-omap/common.c
index f04731820301..d9f10a31e604 100644
--- a/arch/arm/plat-omap/common.c
+++ b/arch/arm/plat-omap/common.c
@@ -24,10 +24,11 @@
#define NO_LENGTH_CHECK 0xffffffff
-struct omap_board_config_kernel *omap_board_config;
+struct omap_board_config_kernel *omap_board_config __initdata;
int omap_board_config_size;
-static const void *get_config(u16 tag, size_t len, int skip, size_t *len_out)
+static const void *__init get_config(u16 tag, size_t len,
+ int skip, size_t *len_out)
{
struct omap_board_config_kernel *kinfo = NULL;
int i;
@@ -49,17 +50,15 @@ static const void *get_config(u16 tag, size_t len, int skip, size_t *len_out)
return kinfo->data;
}
-const void *__omap_get_config(u16 tag, size_t len, int nr)
+const void *__init __omap_get_config(u16 tag, size_t len, int nr)
{
return get_config(tag, len, nr, NULL);
}
-EXPORT_SYMBOL(__omap_get_config);
-const void *omap_get_var_config(u16 tag, size_t *len)
+const void *__init omap_get_var_config(u16 tag, size_t *len)
{
return get_config(tag, NO_LENGTH_CHECK, 0, len);
}
-EXPORT_SYMBOL(omap_get_var_config);
void __init omap_reserve(void)
{
diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c
index 862dda95d61d..f7fed6080190 100644
--- a/arch/arm/plat-omap/counter_32k.c
+++ b/arch/arm/plat-omap/counter_32k.c
@@ -54,7 +54,7 @@ static cycle_t notrace omap16xx_32k_read(struct clocksource *cs)
#define omap16xx_32k_read NULL
#endif
-#ifdef CONFIG_ARCH_OMAP2420
+#ifdef CONFIG_SOC_OMAP2420
static cycle_t notrace omap2420_32k_read(struct clocksource *cs)
{
return omap_readl(OMAP2420_32KSYNCT_BASE + 0x10) - offset_32k;
@@ -63,7 +63,7 @@ static cycle_t notrace omap2420_32k_read(struct clocksource *cs)
#define omap2420_32k_read NULL
#endif
-#ifdef CONFIG_ARCH_OMAP2430
+#ifdef CONFIG_SOC_OMAP2430
static cycle_t notrace omap2430_32k_read(struct clocksource *cs)
{
return omap_readl(OMAP2430_32KSYNCT_BASE + 0x10) - offset_32k;
diff --git a/arch/arm/plat-omap/devices.c b/arch/arm/plat-omap/devices.c
index 10245b837c10..7d9f815cedec 100644
--- a/arch/arm/plat-omap/devices.c
+++ b/arch/arm/plat-omap/devices.c
@@ -35,8 +35,8 @@
static struct platform_device **omap_mcbsp_devices;
-void omap_mcbsp_register_board_cfg(struct omap_mcbsp_platform_data *config,
- int size)
+void omap_mcbsp_register_board_cfg(struct resource *res, int res_count,
+ struct omap_mcbsp_platform_data *config, int size)
{
int i;
@@ -54,6 +54,8 @@ void omap_mcbsp_register_board_cfg(struct omap_mcbsp_platform_data *config,
new_mcbsp = platform_device_alloc("omap-mcbsp", i + 1);
if (!new_mcbsp)
continue;
+ platform_device_add_resources(new_mcbsp, &res[i * res_count],
+ res_count);
new_mcbsp->dev.platform_data = &config[i];
ret = platform_device_add(new_mcbsp);
if (ret) {
@@ -65,8 +67,8 @@ void omap_mcbsp_register_board_cfg(struct omap_mcbsp_platform_data *config,
}
#else
-void omap_mcbsp_register_board_cfg(struct omap_mcbsp_platform_data *config,
- int size)
+void omap_mcbsp_register_board_cfg(struct resource *res, int res_count,
+ struct omap_mcbsp_platform_data *config, int size)
{ }
#endif
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index 85363084cc1a..2ec3b5d9f214 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -134,7 +134,7 @@ static inline void omap_enable_channel_irq(int lch);
#ifdef CONFIG_ARCH_OMAP15XX
/* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
-int omap_dma_in_1510_mode(void)
+static int omap_dma_in_1510_mode(void)
{
return enable_1510_mode;
}
diff --git a/arch/arm/plat-omap/i2c.c b/arch/arm/plat-omap/i2c.c
index a4f8003de664..3341ca4703e9 100644
--- a/arch/arm/plat-omap/i2c.c
+++ b/arch/arm/plat-omap/i2c.c
@@ -112,6 +112,7 @@ static inline int omap1_i2c_add_bus(int bus_id)
}
+#ifdef CONFIG_ARCH_OMAP2PLUS
/*
* XXX This function is a temporary compatibility wrapper - only
* needed until the I2C driver can be converted to call
@@ -130,7 +131,6 @@ static struct omap_device_pm_latency omap_i2c_latency[] = {
},
};
-#ifdef CONFIG_ARCH_OMAP2PLUS
static inline int omap2_i2c_add_bus(int bus_id)
{
int l;
diff --git a/arch/arm/plat-omap/include/plat/board.h b/arch/arm/plat-omap/include/plat/board.h
index 3cf4fa25ab3d..97126dfd2888 100644
--- a/arch/arm/plat-omap/include/plat/board.h
+++ b/arch/arm/plat-omap/include/plat/board.h
@@ -151,14 +151,14 @@ struct omap_board_config_kernel {
const void *data;
};
-extern const void *__omap_get_config(u16 tag, size_t len, int nr);
+extern const void *__init __omap_get_config(u16 tag, size_t len, int nr);
#define omap_get_config(tag, type) \
((const type *) __omap_get_config((tag), sizeof(type), 0))
#define omap_get_nr_config(tag, type, nr) \
((const type *) __omap_get_config((tag), sizeof(type), (nr)))
-extern const void *omap_get_var_config(u16 tag, size_t *len);
+extern const void *__init omap_get_var_config(u16 tag, size_t *len);
extern struct omap_board_config_kernel *omap_board_config;
extern int omap_board_config_size;
diff --git a/arch/arm/plat-omap/include/plat/clkdev_omap.h b/arch/arm/plat-omap/include/plat/clkdev_omap.h
index 256ab3f1ec8f..f1899a3e4174 100644
--- a/arch/arm/plat-omap/include/plat/clkdev_omap.h
+++ b/arch/arm/plat-omap/include/plat/clkdev_omap.h
@@ -38,6 +38,7 @@ struct omap_clk {
#define CK_3517 (1 << 9)
#define CK_36XX (1 << 10) /* 36xx/37xx-specific clocks */
#define CK_443X (1 << 11)
+#define CK_TI816X (1 << 12)
#define CK_34XX (CK_3430ES1 | CK_3430ES2PLUS)
diff --git a/arch/arm/plat-omap/include/plat/clock.h b/arch/arm/plat-omap/include/plat/clock.h
index 8eb0adab19ea..d43e6234dbbb 100644
--- a/arch/arm/plat-omap/include/plat/clock.h
+++ b/arch/arm/plat-omap/include/plat/clock.h
@@ -53,6 +53,7 @@ struct clkops {
#define RATE_IN_3430ES2PLUS (1 << 3) /* 3430 ES >= 2 rates only */
#define RATE_IN_36XX (1 << 4)
#define RATE_IN_4430 (1 << 5)
+#define RATE_IN_TI816X (1 << 6)
#define RATE_IN_24XX (RATE_IN_242X | RATE_IN_243X)
#define RATE_IN_34XX (RATE_IN_3430ES1 | RATE_IN_3430ES2PLUS)
diff --git a/arch/arm/plat-omap/include/plat/common.h b/arch/arm/plat-omap/include/plat/common.h
index 29b2afb4288f..1dd97e7461c9 100644
--- a/arch/arm/plat-omap/include/plat/common.h
+++ b/arch/arm/plat-omap/include/plat/common.h
@@ -66,6 +66,7 @@ void omap2_set_globals_242x(void);
void omap2_set_globals_243x(void);
void omap2_set_globals_3xxx(void);
void omap2_set_globals_443x(void);
+void omap2_set_globals_ti816x(void);
/* These get called from omap2_set_globals_xxxx(), do not call these */
void omap2_set_globals_tap(struct omap_globals *);
diff --git a/arch/arm/plat-omap/include/plat/cpu.h b/arch/arm/plat-omap/include/plat/cpu.h
index 3fd8b4055727..8198bb6cdb5e 100644
--- a/arch/arm/plat-omap/include/plat/cpu.h
+++ b/arch/arm/plat-omap/include/plat/cpu.h
@@ -5,7 +5,7 @@
*
* Copyright (C) 2004, 2008 Nokia Corporation
*
- * Copyright (C) 2009 Texas Instruments.
+ * Copyright (C) 2009-11 Texas Instruments.
*
* Written by Tony Lindgren <tony.lindgren@nokia.com>
*
@@ -105,6 +105,12 @@ static inline int is_omap ##subclass (void) \
return (GET_OMAP_SUBCLASS == (id)) ? 1 : 0; \
}
+#define IS_TI_SUBCLASS(subclass, id) \
+static inline int is_ti ##subclass (void) \
+{ \
+ return (GET_OMAP_SUBCLASS == (id)) ? 1 : 0; \
+}
+
IS_OMAP_CLASS(7xx, 0x07)
IS_OMAP_CLASS(15xx, 0x15)
IS_OMAP_CLASS(16xx, 0x16)
@@ -118,6 +124,8 @@ IS_OMAP_SUBCLASS(343x, 0x343)
IS_OMAP_SUBCLASS(363x, 0x363)
IS_OMAP_SUBCLASS(443x, 0x443)
+IS_TI_SUBCLASS(816x, 0x816)
+
#define cpu_is_omap7xx() 0
#define cpu_is_omap15xx() 0
#define cpu_is_omap16xx() 0
@@ -126,6 +134,7 @@ IS_OMAP_SUBCLASS(443x, 0x443)
#define cpu_is_omap243x() 0
#define cpu_is_omap34xx() 0
#define cpu_is_omap343x() 0
+#define cpu_is_ti816x() 0
#define cpu_is_omap44xx() 0
#define cpu_is_omap443x() 0
@@ -170,11 +179,11 @@ IS_OMAP_SUBCLASS(443x, 0x443)
# undef cpu_is_omap24xx
# define cpu_is_omap24xx() is_omap24xx()
# endif
-# if defined (CONFIG_ARCH_OMAP2420)
+# if defined (CONFIG_SOC_OMAP2420)
# undef cpu_is_omap242x
# define cpu_is_omap242x() is_omap242x()
# endif
-# if defined (CONFIG_ARCH_OMAP2430)
+# if defined (CONFIG_SOC_OMAP2430)
# undef cpu_is_omap243x
# define cpu_is_omap243x() is_omap243x()
# endif
@@ -189,11 +198,11 @@ IS_OMAP_SUBCLASS(443x, 0x443)
# undef cpu_is_omap24xx
# define cpu_is_omap24xx() 1
# endif
-# if defined(CONFIG_ARCH_OMAP2420)
+# if defined(CONFIG_SOC_OMAP2420)
# undef cpu_is_omap242x
# define cpu_is_omap242x() 1
# endif
-# if defined(CONFIG_ARCH_OMAP2430)
+# if defined(CONFIG_SOC_OMAP2430)
# undef cpu_is_omap243x
# define cpu_is_omap243x() 1
# endif
@@ -201,7 +210,7 @@ IS_OMAP_SUBCLASS(443x, 0x443)
# undef cpu_is_omap34xx
# define cpu_is_omap34xx() 1
# endif
-# if defined(CONFIG_ARCH_OMAP3430)
+# if defined(CONFIG_SOC_OMAP3430)
# undef cpu_is_omap343x
# define cpu_is_omap343x() 1
# endif
@@ -330,6 +339,7 @@ IS_OMAP_TYPE(3517, 0x3517)
# undef cpu_is_omap3530
# undef cpu_is_omap3505
# undef cpu_is_omap3517
+# undef cpu_is_ti816x
# define cpu_is_omap3430() is_omap3430()
# define cpu_is_omap3503() (cpu_is_omap3430() && \
(!omap3_has_iva()) && \
@@ -345,6 +355,7 @@ IS_OMAP_TYPE(3517, 0x3517)
# define cpu_is_omap3517() is_omap3517()
# undef cpu_is_omap3630
# define cpu_is_omap3630() is_omap363x()
+# define cpu_is_ti816x() is_ti816x()
#endif
# if defined(CONFIG_ARCH_OMAP4)
@@ -389,9 +400,15 @@ IS_OMAP_TYPE(3517, 0x3517)
#define OMAP3505_REV(v) (OMAP35XX_CLASS | (0x3505 << 16) | (v << 8))
#define OMAP3517_REV(v) (OMAP35XX_CLASS | (0x3517 << 16) | (v << 8))
+#define TI816X_CLASS 0x81600034
+#define TI8168_REV_ES1_0 TI816X_CLASS
+#define TI8168_REV_ES1_1 (TI816X_CLASS | (OMAP_REVBITS_01 << 8))
+
#define OMAP443X_CLASS 0x44300044
-#define OMAP4430_REV_ES1_0 OMAP443X_CLASS
-#define OMAP4430_REV_ES2_0 0x44301044
+#define OMAP4430_REV_ES1_0 (OMAP443X_CLASS | (0x10 << 8))
+#define OMAP4430_REV_ES2_0 (OMAP443X_CLASS | (0x20 << 8))
+#define OMAP4430_REV_ES2_1 (OMAP443X_CLASS | (0x21 << 8))
+#define OMAP4430_REV_ES2_2 (OMAP443X_CLASS | (0x22 << 8))
/*
* omap_chip bits
@@ -419,11 +436,16 @@ IS_OMAP_TYPE(3517, 0x3517)
#define CHIP_IS_OMAP3630ES1_1 (1 << 9)
#define CHIP_IS_OMAP3630ES1_2 (1 << 10)
#define CHIP_IS_OMAP4430ES2 (1 << 11)
+#define CHIP_IS_OMAP4430ES2_1 (1 << 12)
+#define CHIP_IS_OMAP4430ES2_2 (1 << 13)
+#define CHIP_IS_TI816X (1 << 14)
#define CHIP_IS_OMAP24XX (CHIP_IS_OMAP2420 | CHIP_IS_OMAP2430)
-#define CHIP_IS_OMAP4430 (CHIP_IS_OMAP4430ES1 | \
- CHIP_IS_OMAP4430ES2)
+#define CHIP_IS_OMAP4430 (CHIP_IS_OMAP4430ES1 | \
+ CHIP_IS_OMAP4430ES2 | \
+ CHIP_IS_OMAP4430ES2_1 | \
+ CHIP_IS_OMAP4430ES2_2)
/*
* "GE" here represents "greater than or equal to" in terms of ES
@@ -455,6 +477,7 @@ extern u32 omap3_features;
#define OMAP3_HAS_ISP BIT(4)
#define OMAP3_HAS_192MHZ_CLK BIT(5)
#define OMAP3_HAS_IO_WAKEUP BIT(6)
+#define OMAP3_HAS_SDRC BIT(7)
#define OMAP3_HAS_FEATURE(feat,flag) \
static inline unsigned int omap3_has_ ##feat(void) \
@@ -469,5 +492,6 @@ OMAP3_HAS_FEATURE(neon, NEON)
OMAP3_HAS_FEATURE(isp, ISP)
OMAP3_HAS_FEATURE(192mhz_clk, 192MHZ_CLK)
OMAP3_HAS_FEATURE(io_wakeup, IO_WAKEUP)
+OMAP3_HAS_FEATURE(sdrc, SDRC)
#endif
diff --git a/arch/arm/plat-omap/include/plat/display.h b/arch/arm/plat-omap/include/plat/display.h
index 537f4e449f50..2fb057e1cb98 100644
--- a/arch/arm/plat-omap/include/plat/display.h
+++ b/arch/arm/plat-omap/include/plat/display.h
@@ -23,6 +23,7 @@
#include <linux/list.h>
#include <linux/kobject.h>
#include <linux/device.h>
+#include <linux/platform_device.h>
#include <asm/atomic.h>
#define DISPC_IRQ_FRAMEDONE (1 << 0)
@@ -226,6 +227,21 @@ struct omap_dss_board_info {
struct omap_dss_device *default_device;
};
+#if defined(CONFIG_OMAP2_DSS_MODULE) || defined(CONFIG_OMAP2_DSS)
+/* Init with the board info */
+extern int omap_display_init(struct omap_dss_board_info *board_data);
+#else
+static inline int omap_display_init(struct omap_dss_board_info *board_data)
+{
+ return 0;
+}
+#endif
+
+struct omap_display_platform_data {
+ struct omap_dss_board_info *board_data;
+ /* TODO: Additional members to be added when PM is considered */
+};
+
struct omap_video_timings {
/* Unit: pixels */
u16 x_res;
diff --git a/arch/arm/plat-omap/include/plat/dmtimer.h b/arch/arm/plat-omap/include/plat/dmtimer.h
index dfa3aff9761b..d6c70d2f4030 100644
--- a/arch/arm/plat-omap/include/plat/dmtimer.h
+++ b/arch/arm/plat-omap/include/plat/dmtimer.h
@@ -3,6 +3,12 @@
*
* OMAP Dual-Mode Timers
*
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ * Tarun Kanti DebBarma <tarun.kanti@ti.com>
+ * Thara Gopinath <thara@ti.com>
+ *
+ * Platform device conversion and hwmod support.
+ *
* Copyright (C) 2005 Nokia Corporation
* Author: Lauri Leukkunen <lauri.leukkunen@nokia.com>
* PWM and clock framwork support by Timo Teras.
@@ -44,6 +50,11 @@
#define OMAP_TIMER_TRIGGER_OVERFLOW 0x01
#define OMAP_TIMER_TRIGGER_OVERFLOW_AND_COMPARE 0x02
+/*
+ * IP revision identifier so that Highlander IP
+ * in OMAP4 can be distinguished.
+ */
+#define OMAP_TIMER_IP_VERSION_1 0x1
struct omap_dm_timer;
extern struct omap_dm_timer *gptimer_wakeup;
extern struct sys_timer omap_timer;
diff --git a/arch/arm/plat-omap/include/plat/fpga.h b/arch/arm/plat-omap/include/plat/fpga.h
index ae39bcb3f5ba..bd3c6324ae1f 100644
--- a/arch/arm/plat-omap/include/plat/fpga.h
+++ b/arch/arm/plat-omap/include/plat/fpga.h
@@ -30,18 +30,18 @@ extern void omap1510_fpga_init_irq(void);
* ---------------------------------------------------------------------------
*/
/* maps in the FPGA registers and the ETHR registers */
-#define H2P2_DBG_FPGA_BASE IOMEM(0xE8000000) /* VA */
+#define H2P2_DBG_FPGA_BASE 0xE8000000 /* VA */
#define H2P2_DBG_FPGA_SIZE SZ_4K /* SIZE */
#define H2P2_DBG_FPGA_START 0x04000000 /* PA */
#define H2P2_DBG_FPGA_ETHR_START (H2P2_DBG_FPGA_START + 0x300)
-#define H2P2_DBG_FPGA_FPGA_REV (H2P2_DBG_FPGA_BASE + 0x10) /* FPGA Revision */
-#define H2P2_DBG_FPGA_BOARD_REV (H2P2_DBG_FPGA_BASE + 0x12) /* Board Revision */
-#define H2P2_DBG_FPGA_GPIO (H2P2_DBG_FPGA_BASE + 0x14) /* GPIO outputs */
-#define H2P2_DBG_FPGA_LEDS (H2P2_DBG_FPGA_BASE + 0x16) /* LEDs outputs */
-#define H2P2_DBG_FPGA_MISC_INPUTS (H2P2_DBG_FPGA_BASE + 0x18) /* Misc inputs */
-#define H2P2_DBG_FPGA_LAN_STATUS (H2P2_DBG_FPGA_BASE + 0x1A) /* LAN Status line */
-#define H2P2_DBG_FPGA_LAN_RESET (H2P2_DBG_FPGA_BASE + 0x1C) /* LAN Reset line */
+#define H2P2_DBG_FPGA_FPGA_REV IOMEM(H2P2_DBG_FPGA_BASE + 0x10) /* FPGA Revision */
+#define H2P2_DBG_FPGA_BOARD_REV IOMEM(H2P2_DBG_FPGA_BASE + 0x12) /* Board Revision */
+#define H2P2_DBG_FPGA_GPIO IOMEM(H2P2_DBG_FPGA_BASE + 0x14) /* GPIO outputs */
+#define H2P2_DBG_FPGA_LEDS IOMEM(H2P2_DBG_FPGA_BASE + 0x16) /* LEDs outputs */
+#define H2P2_DBG_FPGA_MISC_INPUTS IOMEM(H2P2_DBG_FPGA_BASE + 0x18) /* Misc inputs */
+#define H2P2_DBG_FPGA_LAN_STATUS IOMEM(H2P2_DBG_FPGA_BASE + 0x1A) /* LAN Status line */
+#define H2P2_DBG_FPGA_LAN_RESET IOMEM(H2P2_DBG_FPGA_BASE + 0x1C) /* LAN Reset line */
/* NOTE: most boards don't have a static mapping for the FPGA ... */
struct h2p2_dbg_fpga {
@@ -81,55 +81,55 @@ struct h2p2_dbg_fpga {
* OMAP-1510 FPGA
* ---------------------------------------------------------------------------
*/
-#define OMAP1510_FPGA_BASE IOMEM(0xE8000000) /* VA */
+#define OMAP1510_FPGA_BASE 0xE8000000 /* VA */
#define OMAP1510_FPGA_SIZE SZ_4K
#define OMAP1510_FPGA_START 0x08000000 /* PA */
/* Revision */
-#define OMAP1510_FPGA_REV_LOW (OMAP1510_FPGA_BASE + 0x0)
-#define OMAP1510_FPGA_REV_HIGH (OMAP1510_FPGA_BASE + 0x1)
+#define OMAP1510_FPGA_REV_LOW IOMEM(OMAP1510_FPGA_BASE + 0x0)
+#define OMAP1510_FPGA_REV_HIGH IOMEM(OMAP1510_FPGA_BASE + 0x1)
-#define OMAP1510_FPGA_LCD_PANEL_CONTROL (OMAP1510_FPGA_BASE + 0x2)
-#define OMAP1510_FPGA_LED_DIGIT (OMAP1510_FPGA_BASE + 0x3)
-#define INNOVATOR_FPGA_HID_SPI (OMAP1510_FPGA_BASE + 0x4)
-#define OMAP1510_FPGA_POWER (OMAP1510_FPGA_BASE + 0x5)
+#define OMAP1510_FPGA_LCD_PANEL_CONTROL IOMEM(OMAP1510_FPGA_BASE + 0x2)
+#define OMAP1510_FPGA_LED_DIGIT IOMEM(OMAP1510_FPGA_BASE + 0x3)
+#define INNOVATOR_FPGA_HID_SPI IOMEM(OMAP1510_FPGA_BASE + 0x4)
+#define OMAP1510_FPGA_POWER IOMEM(OMAP1510_FPGA_BASE + 0x5)
/* Interrupt status */
-#define OMAP1510_FPGA_ISR_LO (OMAP1510_FPGA_BASE + 0x6)
-#define OMAP1510_FPGA_ISR_HI (OMAP1510_FPGA_BASE + 0x7)
+#define OMAP1510_FPGA_ISR_LO IOMEM(OMAP1510_FPGA_BASE + 0x6)
+#define OMAP1510_FPGA_ISR_HI IOMEM(OMAP1510_FPGA_BASE + 0x7)
/* Interrupt mask */
-#define OMAP1510_FPGA_IMR_LO (OMAP1510_FPGA_BASE + 0x8)
-#define OMAP1510_FPGA_IMR_HI (OMAP1510_FPGA_BASE + 0x9)
+#define OMAP1510_FPGA_IMR_LO IOMEM(OMAP1510_FPGA_BASE + 0x8)
+#define OMAP1510_FPGA_IMR_HI IOMEM(OMAP1510_FPGA_BASE + 0x9)
/* Reset registers */
-#define OMAP1510_FPGA_HOST_RESET (OMAP1510_FPGA_BASE + 0xa)
-#define OMAP1510_FPGA_RST (OMAP1510_FPGA_BASE + 0xb)
-
-#define OMAP1510_FPGA_AUDIO (OMAP1510_FPGA_BASE + 0xc)
-#define OMAP1510_FPGA_DIP (OMAP1510_FPGA_BASE + 0xe)
-#define OMAP1510_FPGA_FPGA_IO (OMAP1510_FPGA_BASE + 0xf)
-#define OMAP1510_FPGA_UART1 (OMAP1510_FPGA_BASE + 0x14)
-#define OMAP1510_FPGA_UART2 (OMAP1510_FPGA_BASE + 0x15)
-#define OMAP1510_FPGA_OMAP1510_STATUS (OMAP1510_FPGA_BASE + 0x16)
-#define OMAP1510_FPGA_BOARD_REV (OMAP1510_FPGA_BASE + 0x18)
-#define OMAP1510P1_PPT_DATA (OMAP1510_FPGA_BASE + 0x100)
-#define OMAP1510P1_PPT_STATUS (OMAP1510_FPGA_BASE + 0x101)
-#define OMAP1510P1_PPT_CONTROL (OMAP1510_FPGA_BASE + 0x102)
-
-#define OMAP1510_FPGA_TOUCHSCREEN (OMAP1510_FPGA_BASE + 0x204)
-
-#define INNOVATOR_FPGA_INFO (OMAP1510_FPGA_BASE + 0x205)
-#define INNOVATOR_FPGA_LCD_BRIGHT_LO (OMAP1510_FPGA_BASE + 0x206)
-#define INNOVATOR_FPGA_LCD_BRIGHT_HI (OMAP1510_FPGA_BASE + 0x207)
-#define INNOVATOR_FPGA_LED_GRN_LO (OMAP1510_FPGA_BASE + 0x208)
-#define INNOVATOR_FPGA_LED_GRN_HI (OMAP1510_FPGA_BASE + 0x209)
-#define INNOVATOR_FPGA_LED_RED_LO (OMAP1510_FPGA_BASE + 0x20a)
-#define INNOVATOR_FPGA_LED_RED_HI (OMAP1510_FPGA_BASE + 0x20b)
-#define INNOVATOR_FPGA_CAM_USB_CONTROL (OMAP1510_FPGA_BASE + 0x20c)
-#define INNOVATOR_FPGA_EXP_CONTROL (OMAP1510_FPGA_BASE + 0x20d)
-#define INNOVATOR_FPGA_ISR2 (OMAP1510_FPGA_BASE + 0x20e)
-#define INNOVATOR_FPGA_IMR2 (OMAP1510_FPGA_BASE + 0x210)
+#define OMAP1510_FPGA_HOST_RESET IOMEM(OMAP1510_FPGA_BASE + 0xa)
+#define OMAP1510_FPGA_RST IOMEM(OMAP1510_FPGA_BASE + 0xb)
+
+#define OMAP1510_FPGA_AUDIO IOMEM(OMAP1510_FPGA_BASE + 0xc)
+#define OMAP1510_FPGA_DIP IOMEM(OMAP1510_FPGA_BASE + 0xe)
+#define OMAP1510_FPGA_FPGA_IO IOMEM(OMAP1510_FPGA_BASE + 0xf)
+#define OMAP1510_FPGA_UART1 IOMEM(OMAP1510_FPGA_BASE + 0x14)
+#define OMAP1510_FPGA_UART2 IOMEM(OMAP1510_FPGA_BASE + 0x15)
+#define OMAP1510_FPGA_OMAP1510_STATUS IOMEM(OMAP1510_FPGA_BASE + 0x16)
+#define OMAP1510_FPGA_BOARD_REV IOMEM(OMAP1510_FPGA_BASE + 0x18)
+#define OMAP1510P1_PPT_DATA IOMEM(OMAP1510_FPGA_BASE + 0x100)
+#define OMAP1510P1_PPT_STATUS IOMEM(OMAP1510_FPGA_BASE + 0x101)
+#define OMAP1510P1_PPT_CONTROL IOMEM(OMAP1510_FPGA_BASE + 0x102)
+
+#define OMAP1510_FPGA_TOUCHSCREEN IOMEM(OMAP1510_FPGA_BASE + 0x204)
+
+#define INNOVATOR_FPGA_INFO IOMEM(OMAP1510_FPGA_BASE + 0x205)
+#define INNOVATOR_FPGA_LCD_BRIGHT_LO IOMEM(OMAP1510_FPGA_BASE + 0x206)
+#define INNOVATOR_FPGA_LCD_BRIGHT_HI IOMEM(OMAP1510_FPGA_BASE + 0x207)
+#define INNOVATOR_FPGA_LED_GRN_LO IOMEM(OMAP1510_FPGA_BASE + 0x208)
+#define INNOVATOR_FPGA_LED_GRN_HI IOMEM(OMAP1510_FPGA_BASE + 0x209)
+#define INNOVATOR_FPGA_LED_RED_LO IOMEM(OMAP1510_FPGA_BASE + 0x20a)
+#define INNOVATOR_FPGA_LED_RED_HI IOMEM(OMAP1510_FPGA_BASE + 0x20b)
+#define INNOVATOR_FPGA_CAM_USB_CONTROL IOMEM(OMAP1510_FPGA_BASE + 0x20c)
+#define INNOVATOR_FPGA_EXP_CONTROL IOMEM(OMAP1510_FPGA_BASE + 0x20d)
+#define INNOVATOR_FPGA_ISR2 IOMEM(OMAP1510_FPGA_BASE + 0x20e)
+#define INNOVATOR_FPGA_IMR2 IOMEM(OMAP1510_FPGA_BASE + 0x210)
#define OMAP1510_FPGA_ETHR_START (OMAP1510_FPGA_START + 0x300)
diff --git a/arch/arm/plat-omap/include/plat/gpmc.h b/arch/arm/plat-omap/include/plat/gpmc.h
index 85ded598853e..12b316165037 100644
--- a/arch/arm/plat-omap/include/plat/gpmc.h
+++ b/arch/arm/plat-omap/include/plat/gpmc.h
@@ -41,6 +41,8 @@
#define GPMC_NAND_ADDRESS 0x0000000b
#define GPMC_NAND_DATA 0x0000000c
+#define GPMC_ENABLE_IRQ 0x0000000d
+
/* ECC commands */
#define GPMC_ECC_READ 0 /* Reset Hardware ECC for read */
#define GPMC_ECC_WRITE 1 /* Reset Hardware ECC for write */
@@ -78,6 +80,19 @@
#define WR_RD_PIN_MONITORING 0x00600000
#define GPMC_PREFETCH_STATUS_FIFO_CNT(val) ((val >> 24) & 0x7F)
#define GPMC_PREFETCH_STATUS_COUNT(val) (val & 0x00003fff)
+#define GPMC_IRQ_FIFOEVENTENABLE 0x01
+#define GPMC_IRQ_COUNT_EVENT 0x02
+
+#define PREFETCH_FIFOTHRESHOLD_MAX 0x40
+#define PREFETCH_FIFOTHRESHOLD(val) ((val) << 8)
+
+enum omap_ecc {
+ /* 1-bit ecc: stored at end of spare area */
+ OMAP_ECC_HAMMING_CODE_DEFAULT = 0, /* Default, s/w method */
+ OMAP_ECC_HAMMING_CODE_HW, /* gpmc to detect the error */
+ /* 1-bit ecc: stored at begining of spare area as romcode */
+ OMAP_ECC_HAMMING_CODE_HW_ROMCODE, /* gpmc method & romcode layout */
+};
/*
* Note that all values in this struct are in nanoseconds except sync_clk
@@ -130,12 +145,11 @@ extern int gpmc_cs_request(int cs, unsigned long size, unsigned long *base);
extern void gpmc_cs_free(int cs);
extern int gpmc_cs_set_reserved(int cs, int reserved);
extern int gpmc_cs_reserved(int cs);
-extern int gpmc_prefetch_enable(int cs, int dma_mode,
+extern int gpmc_prefetch_enable(int cs, int fifo_th, int dma_mode,
unsigned int u32_count, int is_write);
extern int gpmc_prefetch_reset(int cs);
extern void omap3_gpmc_save_context(void);
extern void omap3_gpmc_restore_context(void);
-extern void gpmc_init(void);
extern int gpmc_read_status(int cmd);
extern int gpmc_cs_configure(int cs, int cmd, int wval);
extern int gpmc_nand_read(int cs, int cmd);
diff --git a/arch/arm/plat-omap/include/plat/hardware.h b/arch/arm/plat-omap/include/plat/hardware.h
index d5b26adfb890..e87efe1499b8 100644
--- a/arch/arm/plat-omap/include/plat/hardware.h
+++ b/arch/arm/plat-omap/include/plat/hardware.h
@@ -286,5 +286,6 @@
#include <plat/omap24xx.h>
#include <plat/omap34xx.h>
#include <plat/omap44xx.h>
+#include <plat/ti816x.h>
#endif /* __ASM_ARCH_OMAP_HARDWARE_H */
diff --git a/arch/arm/plat-omap/include/plat/io.h b/arch/arm/plat-omap/include/plat/io.h
index ef4106c13183..d72ec85c97e6 100644
--- a/arch/arm/plat-omap/include/plat/io.h
+++ b/arch/arm/plat-omap/include/plat/io.h
@@ -259,7 +259,7 @@ struct omap_sdrc_params;
extern void omap1_map_common_io(void);
extern void omap1_init_common_hw(void);
-#ifdef CONFIG_ARCH_OMAP2420
+#ifdef CONFIG_SOC_OMAP2420
extern void omap242x_map_common_io(void);
#else
static inline void omap242x_map_common_io(void)
@@ -267,7 +267,7 @@ static inline void omap242x_map_common_io(void)
}
#endif
-#ifdef CONFIG_ARCH_OMAP2430
+#ifdef CONFIG_SOC_OMAP2430
extern void omap243x_map_common_io(void);
#else
static inline void omap243x_map_common_io(void)
@@ -283,6 +283,14 @@ static inline void omap34xx_map_common_io(void)
}
#endif
+#ifdef CONFIG_SOC_OMAPTI816X
+extern void omapti816x_map_common_io(void);
+#else
+static inline void omapti816x_map_common_io(void)
+{
+}
+#endif
+
#ifdef CONFIG_ARCH_OMAP4
extern void omap44xx_map_common_io(void);
#else
diff --git a/arch/arm/plat-omap/include/plat/iommu.h b/arch/arm/plat-omap/include/plat/iommu.h
index 69230d685538..174f1b9c8c03 100644
--- a/arch/arm/plat-omap/include/plat/iommu.h
+++ b/arch/arm/plat-omap/include/plat/iommu.h
@@ -31,6 +31,7 @@ struct iommu {
struct clk *clk;
void __iomem *regbase;
struct device *dev;
+ void *isr_priv;
unsigned int refcount;
struct mutex iommu_lock; /* global for this whole object */
@@ -47,7 +48,7 @@ struct iommu {
struct list_head mmap;
struct mutex mmap_lock; /* protect mmap */
- int (*isr)(struct iommu *obj);
+ int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs, void *priv);
void *ctx; /* iommu context: registres saved area */
u32 da_start;
@@ -109,6 +110,13 @@ struct iommu_platform_data {
u32 da_end;
};
+/* IOMMU errors */
+#define OMAP_IOMMU_ERR_TLB_MISS (1 << 0)
+#define OMAP_IOMMU_ERR_TRANS_FAULT (1 << 1)
+#define OMAP_IOMMU_ERR_EMU_MISS (1 << 2)
+#define OMAP_IOMMU_ERR_TBLWALK_FAULT (1 << 3)
+#define OMAP_IOMMU_ERR_MULTIHIT_FAULT (1 << 4)
+
#if defined(CONFIG_ARCH_OMAP1)
#error "iommu for this processor not implemented yet"
#else
@@ -154,11 +162,17 @@ extern void flush_iotlb_range(struct iommu *obj, u32 start, u32 end);
extern void flush_iotlb_all(struct iommu *obj);
extern int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e);
+extern void iopgtable_lookup_entry(struct iommu *obj, u32 da, u32 **ppgd,
+ u32 **ppte);
extern size_t iopgtable_clear_entry(struct iommu *obj, u32 iova);
extern int iommu_set_da_range(struct iommu *obj, u32 start, u32 end);
extern struct iommu *iommu_get(const char *name);
extern void iommu_put(struct iommu *obj);
+extern int iommu_set_isr(const char *name,
+ int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs,
+ void *priv),
+ void *isr_priv);
extern void iommu_save_ctx(struct iommu *obj);
extern void iommu_restore_ctx(struct iommu *obj);
diff --git a/arch/arm/plat-omap/include/plat/irqs.h b/arch/arm/plat-omap/include/plat/irqs.h
index 2910de921c52..1b911681e911 100644
--- a/arch/arm/plat-omap/include/plat/irqs.h
+++ b/arch/arm/plat-omap/include/plat/irqs.h
@@ -318,6 +318,7 @@
#define INT_34XX_PRCM_MPU_IRQ 11
#define INT_34XX_MCBSP1_IRQ 16
#define INT_34XX_MCBSP2_IRQ 17
+#define INT_34XX_GPMC_IRQ 20
#define INT_34XX_MCBSP3_IRQ 22
#define INT_34XX_MCBSP4_IRQ 23
#define INT_34XX_CAM_IRQ 24
@@ -411,7 +412,13 @@
#define TWL_IRQ_END TWL6030_IRQ_END
#endif
-#define NR_IRQS TWL_IRQ_END
+/* GPMC related */
+#define OMAP_GPMC_IRQ_BASE (TWL_IRQ_END)
+#define OMAP_GPMC_NR_IRQS 7
+#define OMAP_GPMC_IRQ_END (OMAP_GPMC_IRQ_BASE + OMAP_GPMC_NR_IRQS)
+
+
+#define NR_IRQS OMAP_GPMC_IRQ_END
#define OMAP_IRQ_BIT(irq) (1 << ((irq) % 32))
diff --git a/arch/arm/plat-omap/include/plat/l3_2xxx.h b/arch/arm/plat-omap/include/plat/l3_2xxx.h
new file mode 100644
index 000000000000..b8b5641379b0
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/l3_2xxx.h
@@ -0,0 +1,20 @@
+/*
+ * arch/arm/plat-omap/include/plat/l3_2xxx.h - L3 firewall definitions
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ * Sumit Semwal
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+#ifndef __ARCH_ARM_PLAT_OMAP_INCLUDE_PLAT_L3_2XXX_H
+#define __ARCH_ARM_PLAT_OMAP_INCLUDE_PLAT_L3_2XXX_H
+
+/* L3 CONNIDs */
+/* Display Sub system (DSS) */
+#define OMAP2_L3_CORE_FW_CONNID_DSS 8
+
+#endif
diff --git a/arch/arm/plat-omap/include/plat/l3_3xxx.h b/arch/arm/plat-omap/include/plat/l3_3xxx.h
new file mode 100644
index 000000000000..cde1938c5f82
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/l3_3xxx.h
@@ -0,0 +1,20 @@
+/*
+ * arch/arm/plat-omap/include/plat/l3_3xxx.h - L3 firewall definitions
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ * Sumit Semwal
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+#ifndef __ARCH_ARM_PLAT_OMAP_INCLUDE_PLAT_L3_3XXX_H
+#define __ARCH_ARM_PLAT_OMAP_INCLUDE_PLAT_L3_3XXX_H
+
+/* L3 Initiator IDs */
+/* Display Sub system (DSS) */
+#define OMAP3_L3_CORE_FW_INIT_ID_DSS 29
+
+#endif
diff --git a/arch/arm/plat-omap/include/plat/l4_2xxx.h b/arch/arm/plat-omap/include/plat/l4_2xxx.h
new file mode 100644
index 000000000000..3f39cf8a35c6
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/l4_2xxx.h
@@ -0,0 +1,24 @@
+/*
+ * arch/arm/plat-omap/include/plat/l4_2xxx.h - L4 firewall definitions
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ * Sumit Semwal
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+#ifndef __ARCH_ARM_PLAT_OMAP_INCLUDE_PLAT_L4_2XXX_H
+#define __ARCH_ARM_PLAT_OMAP_INCLUDE_PLAT_L4_2XXX_H
+
+/* L4 CORE */
+/* Display Sub system (DSS) */
+#define OMAP2420_L4_CORE_FW_DSS_CORE_REGION 28
+#define OMAP2420_L4_CORE_FW_DSS_DISPC_REGION 29
+#define OMAP2420_L4_CORE_FW_DSS_RFBI_REGION 30
+#define OMAP2420_L4_CORE_FW_DSS_VENC_REGION 31
+#define OMAP2420_L4_CORE_FW_DSS_TA_REGION 32
+
+#endif
diff --git a/arch/arm/plat-omap/include/plat/l4_3xxx.h b/arch/arm/plat-omap/include/plat/l4_3xxx.h
index 5e1949375422..881a858b1ffc 100644
--- a/arch/arm/plat-omap/include/plat/l4_3xxx.h
+++ b/arch/arm/plat-omap/include/plat/l4_3xxx.h
@@ -21,4 +21,14 @@
#define OMAP3_L4_CORE_FW_I2C3_REGION 73
#define OMAP3_L4_CORE_FW_I2C3_TA_REGION 74
+/* Display Sub system (DSS) */
+#define OMAP3_L4_CORE_FW_DSS_PROT_GROUP 2
+
+#define OMAP3_L4_CORE_FW_DSS_DSI_REGION 104
+#define OMAP3ES1_L4_CORE_FW_DSS_CORE_REGION 3
+#define OMAP3_L4_CORE_FW_DSS_CORE_REGION 4
+#define OMAP3_L4_CORE_FW_DSS_DISPC_REGION 4
+#define OMAP3_L4_CORE_FW_DSS_RFBI_REGION 5
+#define OMAP3_L4_CORE_FW_DSS_VENC_REGION 6
+#define OMAP3_L4_CORE_FW_DSS_TA_REGION 7
#endif
diff --git a/arch/arm/plat-omap/include/plat/mcbsp.h b/arch/arm/plat-omap/include/plat/mcbsp.h
index b87d83ccd545..f8f690ab2997 100644
--- a/arch/arm/plat-omap/include/plat/mcbsp.h
+++ b/arch/arm/plat-omap/include/plat/mcbsp.h
@@ -37,6 +37,10 @@ static struct platform_device omap_mcbsp##port_nr = { \
.id = OMAP_MCBSP##port_nr, \
}
+#define MCBSP_CONFIG_TYPE2 0x2
+#define MCBSP_CONFIG_TYPE3 0x3
+#define MCBSP_CONFIG_TYPE4 0x4
+
#define OMAP7XX_MCBSP1_BASE 0xfffb1000
#define OMAP7XX_MCBSP2_BASE 0xfffb1800
@@ -48,32 +52,14 @@ static struct platform_device omap_mcbsp##port_nr = { \
#define OMAP1610_MCBSP2_BASE 0xfffb1000
#define OMAP1610_MCBSP3_BASE 0xe1017000
-#define OMAP24XX_MCBSP1_BASE 0x48074000
-#define OMAP24XX_MCBSP2_BASE 0x48076000
-#define OMAP2430_MCBSP3_BASE 0x4808c000
-#define OMAP2430_MCBSP4_BASE 0x4808e000
-#define OMAP2430_MCBSP5_BASE 0x48096000
-
-#define OMAP34XX_MCBSP1_BASE 0x48074000
-#define OMAP34XX_MCBSP2_BASE 0x49022000
-#define OMAP34XX_MCBSP2_ST_BASE 0x49028000
-#define OMAP34XX_MCBSP3_BASE 0x49024000
-#define OMAP34XX_MCBSP3_ST_BASE 0x4902A000
-#define OMAP34XX_MCBSP3_BASE 0x49024000
-#define OMAP34XX_MCBSP4_BASE 0x49026000
-#define OMAP34XX_MCBSP5_BASE 0x48096000
-
-#define OMAP44XX_MCBSP1_BASE 0x49022000
-#define OMAP44XX_MCBSP2_BASE 0x49024000
-#define OMAP44XX_MCBSP3_BASE 0x49026000
-#define OMAP44XX_MCBSP4_BASE 0x48096000
-
-#if defined(CONFIG_ARCH_OMAP15XX) || defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
+#ifdef CONFIG_ARCH_OMAP1
#define OMAP_MCBSP_REG_DRR2 0x00
#define OMAP_MCBSP_REG_DRR1 0x02
#define OMAP_MCBSP_REG_DXR2 0x04
#define OMAP_MCBSP_REG_DXR1 0x06
+#define OMAP_MCBSP_REG_DRR 0x02
+#define OMAP_MCBSP_REG_DXR 0x06
#define OMAP_MCBSP_REG_SPCR2 0x08
#define OMAP_MCBSP_REG_SPCR1 0x0a
#define OMAP_MCBSP_REG_RCR2 0x0c
@@ -106,13 +92,6 @@ static struct platform_device omap_mcbsp##port_nr = { \
#define OMAP_MCBSP_REG_XCCR 0x00
#define OMAP_MCBSP_REG_RCCR 0x00
-#define AUDIO_MCBSP_DATAWRITE (OMAP1510_MCBSP1_BASE + OMAP_MCBSP_REG_DXR1)
-#define AUDIO_MCBSP_DATAREAD (OMAP1510_MCBSP1_BASE + OMAP_MCBSP_REG_DRR1)
-
-#define AUDIO_MCBSP OMAP_MCBSP1
-#define AUDIO_DMA_TX OMAP_DMA_MCBSP1_TX
-#define AUDIO_DMA_RX OMAP_DMA_MCBSP1_RX
-
#else
#define OMAP_MCBSP_REG_DRR2 0x00
@@ -168,13 +147,6 @@ static struct platform_device omap_mcbsp##port_nr = { \
#define OMAP_ST_REG_SFIRCR 0x28
#define OMAP_ST_REG_SSELCR 0x2C
-#define AUDIO_MCBSP_DATAWRITE (OMAP24XX_MCBSP2_BASE + OMAP_MCBSP_REG_DXR1)
-#define AUDIO_MCBSP_DATAREAD (OMAP24XX_MCBSP2_BASE + OMAP_MCBSP_REG_DRR1)
-
-#define AUDIO_MCBSP OMAP_MCBSP2
-#define AUDIO_DMA_TX OMAP24XX_DMA_MCBSP2_TX
-#define AUDIO_DMA_RX OMAP24XX_DMA_MCBSP2_RX
-
#endif
/************************** McBSP SPCR1 bit definitions ***********************/
@@ -428,8 +400,9 @@ struct omap_mcbsp_platform_data {
#ifdef CONFIG_ARCH_OMAP3
/* Sidetone block for McBSP 2 and 3 */
unsigned long phys_base_st;
- u16 buffer_size;
#endif
+ u16 buffer_size;
+ unsigned int mcbsp_config_type;
};
struct omap_mcbsp_st_data {
@@ -445,6 +418,7 @@ struct omap_mcbsp_st_data {
struct omap_mcbsp {
struct device *dev;
unsigned long phys_base;
+ unsigned long phys_dma_base;
void __iomem *io_base;
u8 id;
u8 free;
@@ -471,7 +445,6 @@ struct omap_mcbsp {
/* Protect the field .free, while checking if the mcbsp is in use */
spinlock_t lock;
struct omap_mcbsp_platform_data *pdata;
- struct clk *iclk;
struct clk *fclk;
#ifdef CONFIG_ARCH_OMAP3
struct omap_mcbsp_st_data *st_data;
@@ -480,7 +453,17 @@ struct omap_mcbsp {
u16 max_rx_thres;
#endif
void *reg_cache;
+ unsigned int mcbsp_config_type;
};
+
+/**
+ * omap_mcbsp_dev_attr - OMAP McBSP device attributes for omap_hwmod
+ * @sidetone: name of the sidetone device
+ */
+struct omap_mcbsp_dev_attr {
+ const char *sidetone;
+};
+
extern struct omap_mcbsp **mcbsp_ptr;
extern int omap_mcbsp_count, omap_mcbsp_cache_size;
@@ -488,8 +471,8 @@ extern int omap_mcbsp_count, omap_mcbsp_cache_size;
#define id_to_mcbsp_ptr(id) mcbsp_ptr[id];
int omap_mcbsp_init(void);
-void omap_mcbsp_register_board_cfg(struct omap_mcbsp_platform_data *config,
- int size);
+void omap_mcbsp_register_board_cfg(struct resource *res, int res_count,
+ struct omap_mcbsp_platform_data *config, int size);
void omap_mcbsp_config(unsigned int id, const struct omap_mcbsp_reg_cfg * config);
#ifdef CONFIG_ARCH_OMAP3
void omap_mcbsp_set_tx_threshold(unsigned int id, u16 threshold);
@@ -539,6 +522,9 @@ int omap_mcbsp_set_io_type(unsigned int id, omap_mcbsp_io_type_t io_type);
void omap2_mcbsp1_mux_clkr_src(u8 mux);
void omap2_mcbsp1_mux_fsr_src(u8 mux);
+int omap_mcbsp_dma_ch_params(unsigned int id, unsigned int stream);
+int omap_mcbsp_dma_reg_params(unsigned int id, unsigned int stream);
+
#ifdef CONFIG_ARCH_OMAP3
/* Sidetone specific API */
int omap_st_set_chgain(unsigned int id, int channel, s16 chgain);
diff --git a/arch/arm/plat-omap/include/plat/mcspi.h b/arch/arm/plat-omap/include/plat/mcspi.h
index 1254e4945b6f..3d51b18131cc 100644
--- a/arch/arm/plat-omap/include/plat/mcspi.h
+++ b/arch/arm/plat-omap/include/plat/mcspi.h
@@ -1,8 +1,19 @@
#ifndef _OMAP2_MCSPI_H
#define _OMAP2_MCSPI_H
+#define OMAP2_MCSPI_REV 0
+#define OMAP3_MCSPI_REV 1
+#define OMAP4_MCSPI_REV 2
+
+#define OMAP4_MCSPI_REG_OFFSET 0x100
+
struct omap2_mcspi_platform_config {
unsigned short num_cs;
+ unsigned int regs_offset;
+};
+
+struct omap2_mcspi_dev_attr {
+ unsigned short num_chipselect;
};
struct omap2_mcspi_device_config {
diff --git a/arch/arm/plat-omap/include/plat/memory.h b/arch/arm/plat-omap/include/plat/memory.h
index f8d922fb5584..e6720aa2d553 100644
--- a/arch/arm/plat-omap/include/plat/memory.h
+++ b/arch/arm/plat-omap/include/plat/memory.h
@@ -37,9 +37,9 @@
* Physical DRAM offset.
*/
#if defined(CONFIG_ARCH_OMAP1)
-#define PHYS_OFFSET UL(0x10000000)
+#define PLAT_PHYS_OFFSET UL(0x10000000)
#else
-#define PHYS_OFFSET UL(0x80000000)
+#define PLAT_PHYS_OFFSET UL(0x80000000)
#endif
/*
diff --git a/arch/arm/plat-omap/include/plat/mmc.h b/arch/arm/plat-omap/include/plat/mmc.h
index f57f36abb07e..f38fef9f1310 100644
--- a/arch/arm/plat-omap/include/plat/mmc.h
+++ b/arch/arm/plat-omap/include/plat/mmc.h
@@ -24,25 +24,19 @@
#define OMAP1_MMC2_BASE 0xfffb7c00 /* omap16xx only */
#define OMAP24XX_NR_MMC 2
-#define OMAP34XX_NR_MMC 3
-#define OMAP44XX_NR_MMC 5
#define OMAP2420_MMC_SIZE OMAP1_MMC_SIZE
-#define OMAP3_HSMMC_SIZE 0x200
-#define OMAP4_HSMMC_SIZE 0x1000
#define OMAP2_MMC1_BASE 0x4809c000
-#define OMAP2_MMC2_BASE 0x480b4000
-#define OMAP3_MMC3_BASE 0x480ad000
-#define OMAP4_MMC4_BASE 0x480d1000
-#define OMAP4_MMC5_BASE 0x480d5000
+
#define OMAP4_MMC_REG_OFFSET 0x100
-#define HSMMC5 (1 << 4)
-#define HSMMC4 (1 << 3)
-#define HSMMC3 (1 << 2)
-#define HSMMC2 (1 << 1)
-#define HSMMC1 (1 << 0)
#define OMAP_MMC_MAX_SLOTS 2
+#define OMAP_HSMMC_SUPPORTS_DUAL_VOLT BIT(1)
+
+struct omap_mmc_dev_attr {
+ u8 flags;
+};
+
struct omap_mmc_platform_data {
/* back-link to device */
struct device *dev;
@@ -71,6 +65,9 @@ struct omap_mmc_platform_data {
u64 dma_mask;
+ /* Integrating attributes from the omap_hwmod layer */
+ u8 controller_flags;
+
/* Register offset deviation */
u16 reg_offset;
@@ -159,8 +156,7 @@ extern void omap_mmc_notify_cover_event(struct device *dev, int slot,
defined(CONFIG_MMC_OMAP_HS) || defined(CONFIG_MMC_OMAP_HS_MODULE)
void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
int nr_controllers);
-void omap2_init_mmc(struct omap_mmc_platform_data **mmc_data,
- int nr_controllers);
+void omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data);
int omap_mmc_add(const char *name, int id, unsigned long base,
unsigned long size, unsigned int irq,
struct omap_mmc_platform_data *data);
@@ -169,8 +165,7 @@ static inline void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
int nr_controllers)
{
}
-static inline void omap2_init_mmc(struct omap_mmc_platform_data **mmc_data,
- int nr_controllers)
+static inline void omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data)
{
}
static inline int omap_mmc_add(const char *name, int id, unsigned long base,
diff --git a/arch/arm/plat-omap/include/plat/multi.h b/arch/arm/plat-omap/include/plat/multi.h
index ffd909fa5287..999ffba2690c 100644
--- a/arch/arm/plat-omap/include/plat/multi.h
+++ b/arch/arm/plat-omap/include/plat/multi.h
@@ -66,7 +66,7 @@
# error "OMAP1 and OMAP2PLUS can't be selected at the same time"
# endif
#endif
-#ifdef CONFIG_ARCH_OMAP2420
+#ifdef CONFIG_SOC_OMAP2420
# ifdef OMAP_NAME
# undef MULTI_OMAP2
# define MULTI_OMAP2
@@ -74,7 +74,7 @@
# define OMAP_NAME omap2420
# endif
#endif
-#ifdef CONFIG_ARCH_OMAP2430
+#ifdef CONFIG_SOC_OMAP2430
# ifdef OMAP_NAME
# undef MULTI_OMAP2
# define MULTI_OMAP2
diff --git a/arch/arm/plat-omap/include/plat/nand.h b/arch/arm/plat-omap/include/plat/nand.h
index 6562cd082bb1..d86d1ecf0068 100644
--- a/arch/arm/plat-omap/include/plat/nand.h
+++ b/arch/arm/plat-omap/include/plat/nand.h
@@ -8,8 +8,16 @@
* published by the Free Software Foundation.
*/
+#include <plat/gpmc.h>
#include <linux/mtd/partitions.h>
+enum nand_io {
+ NAND_OMAP_PREFETCH_POLLED = 0, /* prefetch polled mode, default */
+ NAND_OMAP_POLLED, /* polled mode, without prefetch */
+ NAND_OMAP_PREFETCH_DMA, /* prefetch enabled sDMA mode */
+ NAND_OMAP_PREFETCH_IRQ /* prefetch enabled irq mode */
+};
+
struct omap_nand_platform_data {
unsigned int options;
int cs;
@@ -20,8 +28,11 @@ struct omap_nand_platform_data {
int (*nand_setup)(void);
int (*dev_ready)(struct omap_nand_platform_data *);
int dma_channel;
+ int gpmc_irq;
+ enum nand_io xfer_type;
unsigned long phys_base;
int devsize;
+ enum omap_ecc ecc_opt;
};
/* minimum size for IO mapping */
diff --git a/arch/arm/plat-omap/include/plat/omap_hwmod.h b/arch/arm/plat-omap/include/plat/omap_hwmod.h
index 1eee85a8abb3..709fb672e1e9 100644
--- a/arch/arm/plat-omap/include/plat/omap_hwmod.h
+++ b/arch/arm/plat-omap/include/plat/omap_hwmod.h
@@ -1,7 +1,7 @@
/*
* omap_hwmod macros, structures
*
- * Copyright (C) 2009-2010 Nokia Corporation
+ * Copyright (C) 2009-2011 Nokia Corporation
* Paul Walmsley
*
* Created in collaboration with (alphabetical order): Benoît Cousson,
@@ -30,6 +30,7 @@
#define __ARCH_ARM_PLAT_OMAP_INCLUDE_MACH_OMAP_HWMOD_H
#include <linux/kernel.h>
+#include <linux/init.h>
#include <linux/list.h>
#include <linux/ioport.h>
#include <linux/spinlock.h>
@@ -90,6 +91,9 @@ extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type2;
struct omap_hwmod_mux_info {
int nr_pads;
struct omap_device_pad *pads;
+ int nr_pads_dynamic;
+ struct omap_device_pad **pads_dynamic;
+ bool enabled;
};
/**
@@ -178,7 +182,8 @@ struct omap_hwmod_omap2_firewall {
#define ADDR_TYPE_RT (1 << 1)
/**
- * struct omap_hwmod_addr_space - MPU address space handled by the hwmod
+ * struct omap_hwmod_addr_space - address space handled by the hwmod
+ * @name: name of the address space
* @pa_start: starting physical address
* @pa_end: ending physical address
* @flags: (see omap_hwmod_addr_space.flags macros above)
@@ -187,6 +192,7 @@ struct omap_hwmod_omap2_firewall {
* structure. GPMC is one example.
*/
struct omap_hwmod_addr_space {
+ const char *name;
u32 pa_start;
u32 pa_end;
u8 flags;
@@ -370,8 +376,10 @@ struct omap_hwmod_omap4_prcm {
* of standby, rather than relying on module smart-standby
* HWMOD_INIT_NO_RESET: don't reset this module at boot - important for
* SDRAM controller, etc. XXX probably belongs outside the main hwmod file
+ * XXX Should be HWMOD_SETUP_NO_RESET
* HWMOD_INIT_NO_IDLE: don't idle this module at boot - important for SDRAM
* controller, etc. XXX probably belongs outside the main hwmod file
+ * XXX Should be HWMOD_SETUP_NO_IDLE
* HWMOD_NO_AUTOIDLE: disable module autoidle (OCP_SYSCONFIG.AUTOIDLE)
* when module is enabled, rather than the default, which is to
* enable autoidle
@@ -535,11 +543,12 @@ struct omap_hwmod {
const struct omap_chip_id omap_chip;
};
-int omap_hwmod_init(struct omap_hwmod **ohs);
+int omap_hwmod_register(struct omap_hwmod **ohs);
struct omap_hwmod *omap_hwmod_lookup(const char *name);
int omap_hwmod_for_each(int (*fn)(struct omap_hwmod *oh, void *data),
void *data);
-int omap_hwmod_late_init(void);
+
+int __init omap_hwmod_setup_one(const char *name);
int omap_hwmod_enable(struct omap_hwmod *oh);
int _omap_hwmod_enable(struct omap_hwmod *oh);
diff --git a/arch/arm/plat-omap/include/plat/onenand.h b/arch/arm/plat-omap/include/plat/onenand.h
index affe87e9ece7..cbe897ca7f9e 100644
--- a/arch/arm/plat-omap/include/plat/onenand.h
+++ b/arch/arm/plat-omap/include/plat/onenand.h
@@ -15,12 +15,20 @@
#define ONENAND_SYNC_READ (1 << 0)
#define ONENAND_SYNC_READWRITE (1 << 1)
+struct onenand_freq_info {
+ u16 maf_id;
+ u16 dev_id;
+ u16 ver_id;
+};
+
struct omap_onenand_platform_data {
int cs;
int gpio_irq;
struct mtd_partition *parts;
int nr_parts;
- int (*onenand_setup)(void __iomem *, int freq);
+ int (*onenand_setup)(void __iomem *, int *freq_ptr);
+ int (*get_freq)(const struct onenand_freq_info *freq_info,
+ bool *clk_dep);
int dma_channel;
u8 flags;
u8 regulator_can_sleep;
diff --git a/arch/arm/plat-omap/include/plat/prcm.h b/arch/arm/plat-omap/include/plat/prcm.h
index 2fdf8c80d390..267f43bb2a4e 100644
--- a/arch/arm/plat-omap/include/plat/prcm.h
+++ b/arch/arm/plat-omap/include/plat/prcm.h
@@ -28,7 +28,6 @@
#define __ASM_ARM_ARCH_OMAP_PRCM_H
u32 omap_prcm_get_reset_sources(void);
-void omap_prcm_arch_reset(char mode, const char *cmd);
int omap2_cm_wait_idlest(void __iomem *reg, u32 mask, u8 idlest,
const char *name);
diff --git a/arch/arm/plat-omap/include/plat/sdrc.h b/arch/arm/plat-omap/include/plat/sdrc.h
index efd87c8dda69..925b12b500dc 100644
--- a/arch/arm/plat-omap/include/plat/sdrc.h
+++ b/arch/arm/plat-omap/include/plat/sdrc.h
@@ -124,8 +124,14 @@ struct omap_sdrc_params {
u32 mr;
};
-void __init omap2_sdrc_init(struct omap_sdrc_params *sdrc_cs0,
+#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
+void omap2_sdrc_init(struct omap_sdrc_params *sdrc_cs0,
struct omap_sdrc_params *sdrc_cs1);
+#else
+static inline void __init omap2_sdrc_init(struct omap_sdrc_params *sdrc_cs0,
+ struct omap_sdrc_params *sdrc_cs1) {};
+#endif
+
int omap2_sdrc_get_params(unsigned long r,
struct omap_sdrc_params **sdrc_cs0,
struct omap_sdrc_params **sdrc_cs1);
diff --git a/arch/arm/plat-omap/include/plat/serial.h b/arch/arm/plat-omap/include/plat/serial.h
index cec5d56db2eb..2723f9166ea2 100644
--- a/arch/arm/plat-omap/include/plat/serial.h
+++ b/arch/arm/plat-omap/include/plat/serial.h
@@ -27,7 +27,7 @@
* 2. We assume printascii is called at least once before paging_init,
* and addruart has a chance to read OMAP_UART_INFO
*/
-#define OMAP_UART_INFO (PHYS_OFFSET + 0x3ffc)
+#define OMAP_UART_INFO (PLAT_PHYS_OFFSET + 0x3ffc)
/* OMAP1 serial ports */
#define OMAP1_UART1_BASE 0xfffb0000
@@ -51,6 +51,11 @@
#define OMAP4_UART3_BASE 0x48020000
#define OMAP4_UART4_BASE 0x4806e000
+/* TI816X serial ports */
+#define TI816X_UART1_BASE 0x48020000
+#define TI816X_UART2_BASE 0x48022000
+#define TI816X_UART3_BASE 0x48024000
+
/* External port on Zoom2/3 */
#define ZOOM_UART_BASE 0x10000000
#define ZOOM_UART_VIRT 0xfa400000
@@ -81,6 +86,9 @@
#define OMAP4UART2 OMAP2UART2
#define OMAP4UART3 43
#define OMAP4UART4 44
+#define TI816XUART1 81
+#define TI816XUART2 82
+#define TI816XUART3 83
#define ZOOM_UART 95 /* Only on zoom2/3 */
/* This is only used by 8250.c for omap1510 */
@@ -96,7 +104,6 @@
struct omap_board_data;
-extern void __init omap_serial_early_init(void);
extern void omap_serial_init(void);
extern void omap_serial_init_port(struct omap_board_data *bdata);
extern int omap_uart_can_sleep(void);
diff --git a/arch/arm/plat-omap/include/plat/sram.h b/arch/arm/plat-omap/include/plat/sram.h
index 9967d5e855c7..f500fc34d065 100644
--- a/arch/arm/plat-omap/include/plat/sram.h
+++ b/arch/arm/plat-omap/include/plat/sram.h
@@ -12,7 +12,19 @@
#define __ARCH_ARM_OMAP_SRAM_H
#ifndef __ASSEMBLY__
-extern void * omap_sram_push(void * start, unsigned long size);
+#include <asm/fncpy.h>
+
+extern void *omap_sram_push_address(unsigned long size);
+
+/* Macro to push a function to the internal SRAM, using the fncpy API */
+#define omap_sram_push(funcp, size) ({ \
+ typeof(&(funcp)) _res = NULL; \
+ void *_sram_address = omap_sram_push_address(size); \
+ if (_sram_address) \
+ _res = fncpy(_sram_address, &(funcp), size); \
+ _res; \
+})
+
extern void omap_sram_reprogram_clock(u32 dpllctl, u32 ckctl);
extern void omap2_sram_ddr_init(u32 *slow_dll_ctrl, u32 fast_dll_ctrl,
diff --git a/arch/arm/plat-omap/include/plat/system.h b/arch/arm/plat-omap/include/plat/system.h
index d0a119f735b4..c5fa9e929009 100644
--- a/arch/arm/plat-omap/include/plat/system.h
+++ b/arch/arm/plat-omap/include/plat/system.h
@@ -4,48 +4,14 @@
*/
#ifndef __ASM_ARCH_SYSTEM_H
#define __ASM_ARCH_SYSTEM_H
-#include <linux/clk.h>
-#include <asm/mach-types.h>
-#include <mach/hardware.h>
-
-#include <plat/prcm.h>
-
-#ifndef CONFIG_MACH_VOICEBLUE
-#define voiceblue_reset() do {} while (0)
-#else
-extern void voiceblue_reset(void);
-#endif
+#include <asm/proc-fns.h>
static inline void arch_idle(void)
{
cpu_do_idle();
}
-static inline void omap1_arch_reset(char mode, const char *cmd)
-{
- /*
- * Workaround for 5912/1611b bug mentioned in sprz209d.pdf p. 28
- * "Global Software Reset Affects Traffic Controller Frequency".
- */
- if (cpu_is_omap5912()) {
- omap_writew(omap_readw(DPLL_CTL) & ~(1 << 4),
- DPLL_CTL);
- omap_writew(0x8, ARM_RSTCT1);
- }
-
- if (machine_is_voiceblue())
- voiceblue_reset();
- else
- omap_writew(1, ARM_RSTCT1);
-}
-
-static inline void arch_reset(char mode, const char *cmd)
-{
- if (!cpu_class_is_omap2())
- omap1_arch_reset(mode, cmd);
- else
- omap_prcm_arch_reset(mode, cmd);
-}
+extern void (*arch_reset)(char, const char *);
#endif
diff --git a/arch/arm/plat-omap/include/plat/ti816x.h b/arch/arm/plat-omap/include/plat/ti816x.h
new file mode 100644
index 000000000000..50510f5dda1e
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/ti816x.h
@@ -0,0 +1,27 @@
+/*
+ * This file contains the address data for various TI816X modules.
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc. - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_ARCH_TI816X_H
+#define __ASM_ARCH_TI816X_H
+
+#define L4_SLOW_TI816X_BASE 0x48000000
+
+#define TI816X_SCM_BASE 0x48140000
+#define TI816X_CTRL_BASE TI816X_SCM_BASE
+#define TI816X_PRCM_BASE 0x48180000
+
+#define TI816X_ARM_INTC_BASE 0x48200000
+
+#endif /* __ASM_ARCH_TI816X_H */
diff --git a/arch/arm/plat-omap/include/plat/uncompress.h b/arch/arm/plat-omap/include/plat/uncompress.h
index ad98b85cae21..30b891c4a93f 100644
--- a/arch/arm/plat-omap/include/plat/uncompress.h
+++ b/arch/arm/plat-omap/include/plat/uncompress.h
@@ -93,6 +93,10 @@ static inline void flush(void)
#define DEBUG_LL_ZOOM(mach) \
_DEBUG_LL_ENTRY(mach, ZOOM_UART_BASE, ZOOM_PORT_SHIFT, ZOOM_UART)
+#define DEBUG_LL_TI816X(p, mach) \
+ _DEBUG_LL_ENTRY(mach, TI816X_UART##p##_BASE, OMAP_PORT_SHIFT, \
+ TI816XUART##p)
+
static inline void __arch_decomp_setup(unsigned long arch_id)
{
int port = 0;
@@ -166,6 +170,9 @@ static inline void __arch_decomp_setup(unsigned long arch_id)
DEBUG_LL_ZOOM(omap_zoom2);
DEBUG_LL_ZOOM(omap_zoom3);
+ /* TI8168 base boards using UART3 */
+ DEBUG_LL_TI816X(3, ti8168evm);
+
} while (0);
}
diff --git a/arch/arm/plat-omap/include/plat/usb.h b/arch/arm/plat-omap/include/plat/usb.h
index 450a332f1009..077192759afc 100644
--- a/arch/arm/plat-omap/include/plat/usb.h
+++ b/arch/arm/plat-omap/include/plat/usb.h
@@ -91,6 +91,10 @@ extern int omap4430_phy_exit(struct device *dev);
#endif
+extern void am35x_musb_reset(void);
+extern void am35x_musb_phy_power(u8 on);
+extern void am35x_musb_clear_irq(void);
+extern void am35x_musb_set_mode(u8 musb_mode);
/*
* FIXME correct answer depends on hmc_mode,
diff --git a/arch/arm/plat-omap/io.c b/arch/arm/plat-omap/io.c
index f1295fafcd31..f1ecfa9fc61d 100644
--- a/arch/arm/plat-omap/io.c
+++ b/arch/arm/plat-omap/io.c
@@ -85,7 +85,10 @@ void __iomem *omap_ioremap(unsigned long p, size_t size, unsigned int type)
}
#endif
#ifdef CONFIG_ARCH_OMAP3
- if (cpu_is_omap34xx()) {
+ if (cpu_is_ti816x()) {
+ if (BETWEEN(p, L4_34XX_PHYS, L4_34XX_SIZE))
+ return XLATE(p, L4_34XX_PHYS, L4_34XX_VIRT);
+ } else if (cpu_is_omap34xx()) {
if (BETWEEN(p, L3_34XX_PHYS, L3_34XX_SIZE))
return XLATE(p, L3_34XX_PHYS, L3_34XX_VIRT);
if (BETWEEN(p, L4_34XX_PHYS, L4_34XX_SIZE))
diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c
index b1107c08da56..e3eb0380090a 100644
--- a/arch/arm/plat-omap/iommu.c
+++ b/arch/arm/plat-omap/iommu.c
@@ -104,6 +104,9 @@ static int iommu_enable(struct iommu *obj)
if (!obj)
return -EINVAL;
+ if (!arch_iommu)
+ return -ENODEV;
+
clk_enable(obj->clk);
err = arch_iommu->enable(obj);
@@ -780,25 +783,19 @@ static void iopgtable_clear_entry_all(struct iommu *obj)
*/
static irqreturn_t iommu_fault_handler(int irq, void *data)
{
- u32 stat, da;
+ u32 da, errs;
u32 *iopgd, *iopte;
- int err = -EIO;
struct iommu *obj = data;
if (!obj->refcount)
return IRQ_NONE;
- /* Dynamic loading TLB or PTE */
- if (obj->isr)
- err = obj->isr(obj);
-
- if (!err)
- return IRQ_HANDLED;
-
clk_enable(obj->clk);
- stat = iommu_report_fault(obj, &da);
+ errs = iommu_report_fault(obj, &da);
clk_disable(obj->clk);
- if (!stat)
+
+ /* Fault callback or TLB/PTE Dynamic loading */
+ if (obj->isr && !obj->isr(obj, da, errs, obj->isr_priv))
return IRQ_HANDLED;
iommu_disable(obj);
@@ -806,15 +803,16 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
iopgd = iopgd_offset(obj, da);
if (!iopgd_is_table(*iopgd)) {
- dev_err(obj->dev, "%s: da:%08x pgd:%p *pgd:%08x\n", __func__,
- da, iopgd, *iopgd);
+ dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p "
+ "*pgd:px%08x\n", obj->name, errs, da, iopgd, *iopgd);
return IRQ_NONE;
}
iopte = iopte_offset(iopgd, da);
- dev_err(obj->dev, "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
- __func__, da, iopgd, *iopgd, iopte, *iopte);
+ dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x "
+ "pte:0x%p *pte:0x%08x\n", obj->name, errs, da, iopgd, *iopgd,
+ iopte, *iopte);
return IRQ_NONE;
}
@@ -917,6 +915,33 @@ void iommu_put(struct iommu *obj)
}
EXPORT_SYMBOL_GPL(iommu_put);
+int iommu_set_isr(const char *name,
+ int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs,
+ void *priv),
+ void *isr_priv)
+{
+ struct device *dev;
+ struct iommu *obj;
+
+ dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
+ device_match_by_alias);
+ if (!dev)
+ return -ENODEV;
+
+ obj = to_iommu(dev);
+ mutex_lock(&obj->iommu_lock);
+ if (obj->refcount != 0) {
+ mutex_unlock(&obj->iommu_lock);
+ return -EBUSY;
+ }
+ obj->isr = isr;
+ obj->isr_priv = isr_priv;
+ mutex_unlock(&obj->iommu_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iommu_set_isr);
+
/*
* OMAP Device MMU(IOMMU) detection
*/
diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c
index 459b319a9fad..69ddc9f76c13 100644
--- a/arch/arm/plat-omap/mailbox.c
+++ b/arch/arm/plat-omap/mailbox.c
@@ -32,7 +32,6 @@
#include <plat/mailbox.h>
-static struct workqueue_struct *mboxd;
static struct omap_mbox **mboxes;
static int mbox_configured;
@@ -197,7 +196,7 @@ static void __mbox_rx_interrupt(struct omap_mbox *mbox)
/* no more messages in the fifo. clear IRQ source. */
ack_mbox_irq(mbox, IRQ_RX);
nomem:
- queue_work(mboxd, &mbox->rxq->work);
+ schedule_work(&mbox->rxq->work);
}
static irqreturn_t mbox_interrupt(int irq, void *p)
@@ -307,7 +306,7 @@ static void omap_mbox_fini(struct omap_mbox *mbox)
if (!--mbox->use_count) {
free_irq(mbox->irq, mbox);
tasklet_kill(&mbox->txq->tasklet);
- flush_work(&mbox->rxq->work);
+ flush_work_sync(&mbox->rxq->work);
mbox_queue_free(mbox->txq);
mbox_queue_free(mbox->rxq);
}
@@ -322,15 +321,18 @@ static void omap_mbox_fini(struct omap_mbox *mbox)
struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb)
{
- struct omap_mbox *mbox;
- int ret;
+ struct omap_mbox *_mbox, *mbox = NULL;
+ int i, ret;
if (!mboxes)
return ERR_PTR(-EINVAL);
- for (mbox = *mboxes; mbox; mbox++)
- if (!strcmp(mbox->name, name))
+ for (i = 0; (_mbox = mboxes[i]); i++) {
+ if (!strcmp(_mbox->name, name)) {
+ mbox = _mbox;
break;
+ }
+ }
if (!mbox)
return ERR_PTR(-ENOENT);
@@ -406,10 +408,6 @@ static int __init omap_mbox_init(void)
if (err)
return err;
- mboxd = create_workqueue("mboxd");
- if (!mboxd)
- return -ENOMEM;
-
/* kfifo size sanity check: alignment and minimal size */
mbox_kfifo_size = ALIGN(mbox_kfifo_size, sizeof(mbox_msg_t));
mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size,
@@ -421,7 +419,6 @@ subsys_initcall(omap_mbox_init);
static void __exit omap_mbox_exit(void)
{
- destroy_workqueue(mboxd);
class_unregister(&omap_mbox_class);
}
module_exit(omap_mbox_exit);
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c
index b5a6e178a7f9..d598d9fd65ac 100644
--- a/arch/arm/plat-omap/mcbsp.c
+++ b/arch/arm/plat-omap/mcbsp.c
@@ -27,6 +27,8 @@
#include <plat/dma.h>
#include <plat/mcbsp.h>
+#include <plat/omap_device.h>
+#include <linux/pm_runtime.h>
/* XXX These "sideways" includes are a sign that something is wrong */
#include "../mach-omap2/cm2xxx_3xxx.h"
@@ -227,10 +229,83 @@ void omap_mcbsp_config(unsigned int id, const struct omap_mcbsp_reg_cfg *config)
}
EXPORT_SYMBOL(omap_mcbsp_config);
+/**
+ * omap_mcbsp_dma_params - returns the dma channel number
+ * @id - mcbsp id
+ * @stream - indicates the direction of data flow (rx or tx)
+ *
+ * Returns the dma channel number for the rx channel or tx channel
+ * based on the value of @stream for the requested mcbsp given by @id
+ */
+int omap_mcbsp_dma_ch_params(unsigned int id, unsigned int stream)
+{
+ struct omap_mcbsp *mcbsp;
+
+ if (!omap_mcbsp_check_valid_id(id)) {
+ printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
+ return -ENODEV;
+ }
+ mcbsp = id_to_mcbsp_ptr(id);
+
+ if (stream)
+ return mcbsp->dma_rx_sync;
+ else
+ return mcbsp->dma_tx_sync;
+}
+EXPORT_SYMBOL(omap_mcbsp_dma_ch_params);
+
+/**
+ * omap_mcbsp_dma_reg_params - returns the address of mcbsp data register
+ * @id - mcbsp id
+ * @stream - indicates the direction of data flow (rx or tx)
+ *
+ * Returns the address of mcbsp data transmit register or data receive register
+ * to be used by DMA for transferring/receiving data based on the value of
+ * @stream for the requested mcbsp given by @id
+ */
+int omap_mcbsp_dma_reg_params(unsigned int id, unsigned int stream)
+{
+ struct omap_mcbsp *mcbsp;
+ int data_reg;
+
+ if (!omap_mcbsp_check_valid_id(id)) {
+ printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
+ return -ENODEV;
+ }
+ mcbsp = id_to_mcbsp_ptr(id);
+
+ data_reg = mcbsp->phys_dma_base;
+
+ if (mcbsp->mcbsp_config_type < MCBSP_CONFIG_TYPE2) {
+ if (stream)
+ data_reg += OMAP_MCBSP_REG_DRR1;
+ else
+ data_reg += OMAP_MCBSP_REG_DXR1;
+ } else {
+ if (stream)
+ data_reg += OMAP_MCBSP_REG_DRR;
+ else
+ data_reg += OMAP_MCBSP_REG_DXR;
+ }
+
+ return data_reg;
+}
+EXPORT_SYMBOL(omap_mcbsp_dma_reg_params);
+
#ifdef CONFIG_ARCH_OMAP3
+static struct omap_device *find_omap_device_by_dev(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device, dev);
+ return container_of(pdev, struct omap_device, pdev);
+}
+
static void omap_st_on(struct omap_mcbsp *mcbsp)
{
unsigned int w;
+ struct omap_device *od;
+
+ od = find_omap_device_by_dev(mcbsp->dev);
/*
* Sidetone uses McBSP ICLK - which must not idle when sidetones
@@ -244,9 +319,6 @@ static void omap_st_on(struct omap_mcbsp *mcbsp)
w = MCBSP_READ(mcbsp, SSELCR);
MCBSP_WRITE(mcbsp, SSELCR, w | SIDETONEEN);
- w = MCBSP_ST_READ(mcbsp, SYSCONFIG);
- MCBSP_ST_WRITE(mcbsp, SYSCONFIG, w & ~(ST_AUTOIDLE));
-
/* Enable Sidetone from Sidetone Core */
w = MCBSP_ST_READ(mcbsp, SSELCR);
MCBSP_ST_WRITE(mcbsp, SSELCR, w | ST_SIDETONEEN);
@@ -255,13 +327,13 @@ static void omap_st_on(struct omap_mcbsp *mcbsp)
static void omap_st_off(struct omap_mcbsp *mcbsp)
{
unsigned int w;
+ struct omap_device *od;
+
+ od = find_omap_device_by_dev(mcbsp->dev);
w = MCBSP_ST_READ(mcbsp, SSELCR);
MCBSP_ST_WRITE(mcbsp, SSELCR, w & ~(ST_SIDETONEEN));
- w = MCBSP_ST_READ(mcbsp, SYSCONFIG);
- MCBSP_ST_WRITE(mcbsp, SYSCONFIG, w | ST_AUTOIDLE);
-
w = MCBSP_READ(mcbsp, SSELCR);
MCBSP_WRITE(mcbsp, SSELCR, w & ~(SIDETONEEN));
@@ -273,9 +345,9 @@ static void omap_st_off(struct omap_mcbsp *mcbsp)
static void omap_st_fir_write(struct omap_mcbsp *mcbsp, s16 *fir)
{
u16 val, i;
+ struct omap_device *od;
- val = MCBSP_ST_READ(mcbsp, SYSCONFIG);
- MCBSP_ST_WRITE(mcbsp, SYSCONFIG, val & ~(ST_AUTOIDLE));
+ od = find_omap_device_by_dev(mcbsp->dev);
val = MCBSP_ST_READ(mcbsp, SSELCR);
@@ -303,9 +375,9 @@ static void omap_st_chgain(struct omap_mcbsp *mcbsp)
{
u16 w;
struct omap_mcbsp_st_data *st_data = mcbsp->st_data;
+ struct omap_device *od;
- w = MCBSP_ST_READ(mcbsp, SYSCONFIG);
- MCBSP_ST_WRITE(mcbsp, SYSCONFIG, w & ~(ST_AUTOIDLE));
+ od = find_omap_device_by_dev(mcbsp->dev);
w = MCBSP_ST_READ(mcbsp, SSELCR);
@@ -648,48 +720,33 @@ EXPORT_SYMBOL(omap_mcbsp_get_dma_op_mode);
static inline void omap34xx_mcbsp_request(struct omap_mcbsp *mcbsp)
{
+ struct omap_device *od;
+
+ od = find_omap_device_by_dev(mcbsp->dev);
/*
* Enable wakup behavior, smart idle and all wakeups
* REVISIT: some wakeups may be unnecessary
*/
if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
- u16 syscon;
-
- syscon = MCBSP_READ(mcbsp, SYSCON);
- syscon &= ~(ENAWAKEUP | SIDLEMODE(0x03) | CLOCKACTIVITY(0x03));
-
- if (mcbsp->dma_op_mode == MCBSP_DMA_MODE_THRESHOLD) {
- syscon |= (ENAWAKEUP | SIDLEMODE(0x02) |
- CLOCKACTIVITY(0x02));
- MCBSP_WRITE(mcbsp, WAKEUPEN, XRDYEN | RRDYEN);
- } else {
- syscon |= SIDLEMODE(0x01);
- }
-
- MCBSP_WRITE(mcbsp, SYSCON, syscon);
+ MCBSP_WRITE(mcbsp, WAKEUPEN, XRDYEN | RRDYEN);
}
}
static inline void omap34xx_mcbsp_free(struct omap_mcbsp *mcbsp)
{
+ struct omap_device *od;
+
+ od = find_omap_device_by_dev(mcbsp->dev);
+
/*
* Disable wakup behavior, smart idle and all wakeups
*/
if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
- u16 syscon;
-
- syscon = MCBSP_READ(mcbsp, SYSCON);
- syscon &= ~(ENAWAKEUP | SIDLEMODE(0x03) | CLOCKACTIVITY(0x03));
/*
* HW bug workaround - If no_idle mode is taken, we need to
* go to smart_idle before going to always_idle, or the
* device will not hit retention anymore.
*/
- syscon |= SIDLEMODE(0x02);
- MCBSP_WRITE(mcbsp, SYSCON, syscon);
-
- syscon &= ~(SIDLEMODE(0x03));
- MCBSP_WRITE(mcbsp, SYSCON, syscon);
MCBSP_WRITE(mcbsp, WAKEUPEN, 0);
}
@@ -764,8 +821,7 @@ int omap_mcbsp_request(unsigned int id)
if (mcbsp->pdata && mcbsp->pdata->ops && mcbsp->pdata->ops->request)
mcbsp->pdata->ops->request(id);
- clk_enable(mcbsp->iclk);
- clk_enable(mcbsp->fclk);
+ pm_runtime_get_sync(mcbsp->dev);
/* Do procedure specific to omap34xx arch, if applicable */
omap34xx_mcbsp_request(mcbsp);
@@ -813,8 +869,7 @@ err_clk_disable:
/* Do procedure specific to omap34xx arch, if applicable */
omap34xx_mcbsp_free(mcbsp);
- clk_disable(mcbsp->fclk);
- clk_disable(mcbsp->iclk);
+ pm_runtime_put_sync(mcbsp->dev);
spin_lock(&mcbsp->lock);
mcbsp->free = true;
@@ -844,8 +899,7 @@ void omap_mcbsp_free(unsigned int id)
/* Do procedure specific to omap34xx arch, if applicable */
omap34xx_mcbsp_free(mcbsp);
- clk_disable(mcbsp->fclk);
- clk_disable(mcbsp->iclk);
+ pm_runtime_put_sync(mcbsp->dev);
if (mcbsp->io_type == OMAP_MCBSP_IRQ_IO) {
/* Free IRQs */
@@ -1649,7 +1703,8 @@ static const struct attribute_group sidetone_attr_group = {
static int __devinit omap_st_add(struct omap_mcbsp *mcbsp)
{
- struct omap_mcbsp_platform_data *pdata = mcbsp->pdata;
+ struct platform_device *pdev;
+ struct resource *res;
struct omap_mcbsp_st_data *st_data;
int err;
@@ -1659,7 +1714,10 @@ static int __devinit omap_st_add(struct omap_mcbsp *mcbsp)
goto err1;
}
- st_data->io_base_st = ioremap(pdata->phys_base_st, SZ_4K);
+ pdev = container_of(mcbsp->dev, struct platform_device, dev);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sidetone");
+ st_data->io_base_st = ioremap(res->start, resource_size(res));
if (!st_data->io_base_st) {
err = -ENOMEM;
goto err2;
@@ -1748,6 +1806,7 @@ static int __devinit omap_mcbsp_probe(struct platform_device *pdev)
struct omap_mcbsp_platform_data *pdata = pdev->dev.platform_data;
struct omap_mcbsp *mcbsp;
int id = pdev->id - 1;
+ struct resource *res;
int ret = 0;
if (!pdata) {
@@ -1777,47 +1836,78 @@ static int __devinit omap_mcbsp_probe(struct platform_device *pdev)
mcbsp->dma_tx_lch = -1;
mcbsp->dma_rx_lch = -1;
- mcbsp->phys_base = pdata->phys_base;
- mcbsp->io_base = ioremap(pdata->phys_base, SZ_4K);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpu");
+ if (!res) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "%s:mcbsp%d has invalid memory"
+ "resource\n", __func__, pdev->id);
+ ret = -ENOMEM;
+ goto exit;
+ }
+ }
+ mcbsp->phys_base = res->start;
+ omap_mcbsp_cache_size = resource_size(res);
+ mcbsp->io_base = ioremap(res->start, resource_size(res));
if (!mcbsp->io_base) {
ret = -ENOMEM;
goto err_ioremap;
}
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma");
+ if (!res)
+ mcbsp->phys_dma_base = mcbsp->phys_base;
+ else
+ mcbsp->phys_dma_base = res->start;
+
/* Default I/O is IRQ based */
mcbsp->io_type = OMAP_MCBSP_IRQ_IO;
- mcbsp->tx_irq = pdata->tx_irq;
- mcbsp->rx_irq = pdata->rx_irq;
- mcbsp->dma_rx_sync = pdata->dma_rx_sync;
- mcbsp->dma_tx_sync = pdata->dma_tx_sync;
- mcbsp->iclk = clk_get(&pdev->dev, "ick");
- if (IS_ERR(mcbsp->iclk)) {
- ret = PTR_ERR(mcbsp->iclk);
- dev_err(&pdev->dev, "unable to get ick: %d\n", ret);
- goto err_iclk;
+ mcbsp->tx_irq = platform_get_irq_byname(pdev, "tx");
+ mcbsp->rx_irq = platform_get_irq_byname(pdev, "rx");
+
+ /* From OMAP4 there will be a single irq line */
+ if (mcbsp->tx_irq == -ENXIO)
+ mcbsp->tx_irq = platform_get_irq(pdev, 0);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
+ if (!res) {
+ dev_err(&pdev->dev, "%s:mcbsp%d has invalid rx DMA channel\n",
+ __func__, pdev->id);
+ ret = -ENODEV;
+ goto err_res;
+ }
+ mcbsp->dma_rx_sync = res->start;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
+ if (!res) {
+ dev_err(&pdev->dev, "%s:mcbsp%d has invalid tx DMA channel\n",
+ __func__, pdev->id);
+ ret = -ENODEV;
+ goto err_res;
}
+ mcbsp->dma_tx_sync = res->start;
mcbsp->fclk = clk_get(&pdev->dev, "fck");
if (IS_ERR(mcbsp->fclk)) {
ret = PTR_ERR(mcbsp->fclk);
dev_err(&pdev->dev, "unable to get fck: %d\n", ret);
- goto err_fclk;
+ goto err_res;
}
mcbsp->pdata = pdata;
mcbsp->dev = &pdev->dev;
mcbsp_ptr[id] = mcbsp;
+ mcbsp->mcbsp_config_type = pdata->mcbsp_config_type;
platform_set_drvdata(pdev, mcbsp);
+ pm_runtime_enable(mcbsp->dev);
/* Initialize mcbsp properties for OMAP34XX if needed / applicable */
omap34xx_device_init(mcbsp);
return 0;
-err_fclk:
- clk_put(mcbsp->iclk);
-err_iclk:
+err_res:
iounmap(mcbsp->io_base);
err_ioremap:
kfree(mcbsp);
@@ -1839,7 +1929,6 @@ static int __devexit omap_mcbsp_remove(struct platform_device *pdev)
omap34xx_device_exit(mcbsp);
clk_put(mcbsp->fclk);
- clk_put(mcbsp->iclk);
iounmap(mcbsp->io_base);
kfree(mcbsp);
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index e26e50487d60..a3f50b34a90d 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -242,7 +242,14 @@ static void __init omap_map_sram(void)
omap_sram_size - SRAM_BOOTLOADER_SZ);
}
-void * omap_sram_push(void * start, unsigned long size)
+/*
+ * Memory allocator for SRAM: calculates the new ceiling address
+ * for pushing a function using the fncpy API.
+ *
+ * Note that fncpy requires the returned address to be aligned
+ * to an 8-byte boundary.
+ */
+void *omap_sram_push_address(unsigned long size)
{
if (size > (omap_sram_ceil - (omap_sram_base + SRAM_BOOTLOADER_SZ))) {
printk(KERN_ERR "Not enough space in SRAM\n");
@@ -250,10 +257,7 @@ void * omap_sram_push(void * start, unsigned long size)
}
omap_sram_ceil -= size;
- omap_sram_ceil = ROUND_DOWN(omap_sram_ceil, sizeof(void *));
- memcpy((void *)omap_sram_ceil, start, size);
- flush_icache_range((unsigned long)omap_sram_ceil,
- (unsigned long)(omap_sram_ceil + size));
+ omap_sram_ceil = ROUND_DOWN(omap_sram_ceil, FNCPY_ALIGN);
return (void *)omap_sram_ceil;
}
@@ -312,7 +316,7 @@ u32 omap2_set_prcm(u32 dpll_ctrl_val, u32 sdrc_rfr_val, int bypass)
}
#endif
-#ifdef CONFIG_ARCH_OMAP2420
+#ifdef CONFIG_SOC_OMAP2420
static int __init omap242x_sram_init(void)
{
_omap2_sram_ddr_init = omap_sram_push(omap242x_sram_ddr_init,
@@ -333,7 +337,7 @@ static inline int omap242x_sram_init(void)
}
#endif
-#ifdef CONFIG_ARCH_OMAP2430
+#ifdef CONFIG_SOC_OMAP2430
static int __init omap243x_sram_init(void)
{
_omap2_sram_ddr_init = omap_sram_push(omap243x_sram_ddr_init,
@@ -405,20 +409,6 @@ static inline int omap34xx_sram_init(void)
}
#endif
-#ifdef CONFIG_ARCH_OMAP4
-static int __init omap44xx_sram_init(void)
-{
- printk(KERN_ERR "FIXME: %s not implemented\n", __func__);
-
- return -ENODEV;
-}
-#else
-static inline int omap44xx_sram_init(void)
-{
- return 0;
-}
-#endif
-
int __init omap_sram_init(void)
{
omap_detect_sram();
@@ -432,8 +422,6 @@ int __init omap_sram_init(void)
omap243x_sram_init();
else if (cpu_is_omap34xx())
omap34xx_sram_init();
- else if (cpu_is_omap44xx())
- omap44xx_sram_init();
return 0;
}
diff --git a/arch/arm/plat-s3c24xx/sleep.S b/arch/arm/plat-s3c24xx/sleep.S
index e73e3b6e88d2..fd7032f84ae7 100644
--- a/arch/arm/plat-s3c24xx/sleep.S
+++ b/arch/arm/plat-s3c24xx/sleep.S
@@ -44,23 +44,13 @@
/* s3c_cpu_save
*
* entry:
- * r0 = save address (virtual addr of s3c_sleep_save_phys)
+ * r1 = v:p offset
*/
ENTRY(s3c_cpu_save)
stmfd sp!, { r4 - r12, lr }
-
- @@ store co-processor registers
-
- mrc p15, 0, r4, c13, c0, 0 @ PID
- mrc p15, 0, r5, c3, c0, 0 @ Domain ID
- mrc p15, 0, r6, c2, c0, 0 @ translation table base address
- mrc p15, 0, r7, c1, c0, 0 @ control register
-
- stmia r0, { r4 - r13 }
-
- @@ write our state back to RAM
- bl s3c_pm_cb_flushcache
+ ldr r3, =resume_with_mmu
+ bl cpu_suspend
@@ jump to final code to send system to sleep
ldr r0, =pm_cpu_sleep
@@ -76,20 +66,6 @@ resume_with_mmu:
.ltorg
- @@ the next bits sit in the .data segment, even though they
- @@ happen to be code... the s3c_sleep_save_phys needs to be
- @@ accessed by the resume code before it can restore the MMU.
- @@ This means that the variable has to be close enough for the
- @@ code to read it... since the .text segment needs to be RO,
- @@ the data segment can be the only place to put this code.
-
- .data
-
- .global s3c_sleep_save_phys
-s3c_sleep_save_phys:
- .word 0
-
-
/* sleep magic, to allow the bootloader to check for an valid
* image to resume to. Must be the first word before the
* s3c_cpu_resume entry.
@@ -100,10 +76,6 @@ s3c_sleep_save_phys:
/* s3c_cpu_resume
*
* resume code entry for bootloader to call
- *
- * we must put this code here in the data segment as we have no
- * other way of restoring the stack pointer after sleep, and we
- * must not write to the code segment (code is read-only)
*/
ENTRY(s3c_cpu_resume)
@@ -134,25 +106,4 @@ ENTRY(s3c_cpu_resume)
beq 1001b
#endif /* CONFIG_DEBUG_RESUME */
- mov r1, #0
- mcr p15, 0, r1, c8, c7, 0 @@ invalidate I & D TLBs
- mcr p15, 0, r1, c7, c7, 0 @@ invalidate I & D caches
-
- ldr r0, s3c_sleep_save_phys @ address of restore block
- ldmia r0, { r4 - r13 }
-
- mcr p15, 0, r4, c13, c0, 0 @ PID
- mcr p15, 0, r5, c3, c0, 0 @ Domain ID
- mcr p15, 0, r6, c2, c0, 0 @ translation table base
-
-#ifdef CONFIG_DEBUG_RESUME
- mov r3, #'R'
- strb r3, [ r2, #S3C2410_UTXH ]
-#endif
-
- ldr r2, =resume_with_mmu
- mcr p15, 0, r7, c1, c0, 0 @ turn on MMU, etc
- nop @ second-to-last before mmu
- mov pc, r2 @ go back to virtual address
-
- .ltorg
+ b cpu_resume
diff --git a/arch/arm/plat-s5p/Kconfig b/arch/arm/plat-s5p/Kconfig
index 557f8c507f6d..6390ac728b35 100644
--- a/arch/arm/plat-s5p/Kconfig
+++ b/arch/arm/plat-s5p/Kconfig
@@ -7,10 +7,10 @@
config PLAT_S5P
bool
- depends on (ARCH_S5P64X0 || ARCH_S5P6442 || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_S5PV310)
+ depends on (ARCH_S5P64X0 || ARCH_S5P6442 || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS4)
default y
- select ARM_VIC if !ARCH_S5PV310
- select ARM_GIC if ARCH_S5PV310
+ select ARM_VIC if !ARCH_EXYNOS4
+ select ARM_GIC if ARCH_EXYNOS4
select NO_IOPORT
select ARCH_REQUIRE_GPIOLIB
select S3C_GPIO_TRACK
@@ -41,7 +41,7 @@ comment "System MMU"
config S5P_SYSTEM_MMU
bool "S5P SYSTEM MMU"
- depends on ARCH_S5PV310
+ depends on ARCH_EXYNOS4
help
Say Y here if you want to enable System MMU
diff --git a/arch/arm/plat-s5p/cpu.c b/arch/arm/plat-s5p/cpu.c
index 047d31c1bbd8..c3bfe9b13acf 100644
--- a/arch/arm/plat-s5p/cpu.c
+++ b/arch/arm/plat-s5p/cpu.c
@@ -1,7 +1,7 @@
/* linux/arch/arm/plat-s5p/cpu.c
*
- * Copyright (c) 2009 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
+ * Copyright (c) 2009-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
*
* S5P CPU Support
*
@@ -12,17 +12,20 @@
#include <linux/init.h>
#include <linux/module.h>
-#include <mach/map.h>
+
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
+
+#include <mach/map.h>
#include <mach/regs-clock.h>
+
#include <plat/cpu.h>
#include <plat/s5p6440.h>
#include <plat/s5p6442.h>
#include <plat/s5p6450.h>
#include <plat/s5pc100.h>
#include <plat/s5pv210.h>
-#include <plat/s5pv310.h>
+#include <plat/exynos4.h>
/* table of supported CPUs */
@@ -31,7 +34,7 @@ static const char name_s5p6442[] = "S5P6442";
static const char name_s5p6450[] = "S5P6450";
static const char name_s5pc100[] = "S5PC100";
static const char name_s5pv210[] = "S5PV210/S5PC110";
-static const char name_s5pv310[] = "S5PV310";
+static const char name_exynos4210[] = "EXYNOS4210";
static struct cpu_table cpu_ids[] __initdata = {
{
@@ -75,13 +78,13 @@ static struct cpu_table cpu_ids[] __initdata = {
.init = s5pv210_init,
.name = name_s5pv210,
}, {
- .idcode = 0x43200000,
+ .idcode = 0x43210000,
.idmask = 0xfffff000,
- .map_io = s5pv310_map_io,
- .init_clocks = s5pv310_init_clocks,
- .init_uarts = s5pv310_init_uarts,
- .init = s5pv310_init,
- .name = name_s5pv310,
+ .map_io = exynos4_map_io,
+ .init_clocks = exynos4_init_clocks,
+ .init_uarts = exynos4_init_uarts,
+ .init = exynos4_init,
+ .name = name_exynos4210,
},
};
diff --git a/arch/arm/plat-s5p/dev-uart.c b/arch/arm/plat-s5p/dev-uart.c
index 6a7342886171..afaf87fdb93e 100644
--- a/arch/arm/plat-s5p/dev-uart.c
+++ b/arch/arm/plat-s5p/dev-uart.c
@@ -28,7 +28,7 @@
static struct resource s5p_uart0_resource[] = {
[0] = {
.start = S5P_PA_UART0,
- .end = S5P_PA_UART0 + S5P_SZ_UART,
+ .end = S5P_PA_UART0 + S5P_SZ_UART - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -51,7 +51,7 @@ static struct resource s5p_uart0_resource[] = {
static struct resource s5p_uart1_resource[] = {
[0] = {
.start = S5P_PA_UART1,
- .end = S5P_PA_UART1 + S5P_SZ_UART,
+ .end = S5P_PA_UART1 + S5P_SZ_UART - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -74,7 +74,7 @@ static struct resource s5p_uart1_resource[] = {
static struct resource s5p_uart2_resource[] = {
[0] = {
.start = S5P_PA_UART2,
- .end = S5P_PA_UART2 + S5P_SZ_UART,
+ .end = S5P_PA_UART2 + S5P_SZ_UART - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -98,7 +98,7 @@ static struct resource s5p_uart3_resource[] = {
#if CONFIG_SERIAL_SAMSUNG_UARTS > 3
[0] = {
.start = S5P_PA_UART3,
- .end = S5P_PA_UART3 + S5P_SZ_UART,
+ .end = S5P_PA_UART3 + S5P_SZ_UART - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -123,7 +123,7 @@ static struct resource s5p_uart4_resource[] = {
#if CONFIG_SERIAL_SAMSUNG_UARTS > 4
[0] = {
.start = S5P_PA_UART4,
- .end = S5P_PA_UART4 + S5P_SZ_UART,
+ .end = S5P_PA_UART4 + S5P_SZ_UART - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -148,7 +148,7 @@ static struct resource s5p_uart5_resource[] = {
#if CONFIG_SERIAL_SAMSUNG_UARTS > 5
[0] = {
.start = S5P_PA_UART5,
- .end = S5P_PA_UART5 + S5P_SZ_UART,
+ .end = S5P_PA_UART5 + S5P_SZ_UART - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
diff --git a/arch/arm/plat-s5p/include/plat/exynos4.h b/arch/arm/plat-s5p/include/plat/exynos4.h
new file mode 100644
index 000000000000..907caab53dcf
--- /dev/null
+++ b/arch/arm/plat-s5p/include/plat/exynos4.h
@@ -0,0 +1,34 @@
+/* linux/arch/arm/plat-s5p/include/plat/exynos4.h
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Header file for exynos4 cpu support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+/* Common init code for EXYNOS4 related SoCs */
+
+extern void exynos4_common_init_uarts(struct s3c2410_uartcfg *cfg, int no);
+extern void exynos4_register_clocks(void);
+extern void exynos4_setup_clocks(void);
+
+#ifdef CONFIG_CPU_EXYNOS4210
+
+extern int exynos4_init(void);
+extern void exynos4_init_irq(void);
+extern void exynos4_map_io(void);
+extern void exynos4_init_clocks(int xtal);
+extern struct sys_timer exynos4_timer;
+
+#define exynos4_init_uarts exynos4_common_init_uarts
+
+#else
+#define exynos4_init_clocks NULL
+#define exynos4_init_uarts NULL
+#define exynos4_map_io NULL
+#define exynos4_init NULL
+#endif
diff --git a/arch/arm/plat-s5p/include/plat/s5pv310.h b/arch/arm/plat-s5p/include/plat/s5pv310.h
deleted file mode 100644
index 769c991ceb37..000000000000
--- a/arch/arm/plat-s5p/include/plat/s5pv310.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* linux/arch/arm/plat-s5p/include/plat/s5pv310.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * Header file for s5pv310 cpu support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-/* Common init code for S5PV310 related SoCs */
-
-extern void s5pv310_common_init_uarts(struct s3c2410_uartcfg *cfg, int no);
-extern void s5pv310_register_clocks(void);
-extern void s5pv310_setup_clocks(void);
-
-#ifdef CONFIG_CPU_S5PV310
-
-extern int s5pv310_init(void);
-extern void s5pv310_init_irq(void);
-extern void s5pv310_map_io(void);
-extern void s5pv310_init_clocks(int xtal);
-extern struct sys_timer s5pv310_timer;
-
-#define s5pv310_init_uarts s5pv310_common_init_uarts
-
-#else
-#define s5pv310_init_clocks NULL
-#define s5pv310_init_uarts NULL
-#define s5pv310_map_io NULL
-#define s5pv310_init NULL
-#endif
diff --git a/arch/arm/plat-samsung/dev-ts.c b/arch/arm/plat-samsung/dev-ts.c
index 236ef8427d7d..3e4bd8147bf4 100644
--- a/arch/arm/plat-samsung/dev-ts.c
+++ b/arch/arm/plat-samsung/dev-ts.c
@@ -58,4 +58,3 @@ void __init s3c24xx_ts_set_platdata(struct s3c2410_ts_mach_info *pd)
s3c_device_ts.dev.platform_data = npd;
}
-EXPORT_SYMBOL(s3c24xx_ts_set_platdata);
diff --git a/arch/arm/plat-samsung/include/plat/devs.h b/arch/arm/plat-samsung/include/plat/devs.h
index b4d208b42957..e2b3ab997cff 100644
--- a/arch/arm/plat-samsung/include/plat/devs.h
+++ b/arch/arm/plat-samsung/include/plat/devs.h
@@ -1,5 +1,8 @@
/* arch/arm/plat-samsung/include/plat/devs.h
*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
* Copyright (c) 2004 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
@@ -96,15 +99,15 @@ extern struct platform_device s5pv210_device_iis1;
extern struct platform_device s5pv210_device_iis2;
extern struct platform_device s5pv210_device_spdif;
-extern struct platform_device s5pv310_device_ac97;
-extern struct platform_device s5pv310_device_pcm0;
-extern struct platform_device s5pv310_device_pcm1;
-extern struct platform_device s5pv310_device_pcm2;
-extern struct platform_device s5pv310_device_i2s0;
-extern struct platform_device s5pv310_device_i2s1;
-extern struct platform_device s5pv310_device_i2s2;
-extern struct platform_device s5pv310_device_spdif;
-extern struct platform_device s5pv310_device_pd[];
+extern struct platform_device exynos4_device_ac97;
+extern struct platform_device exynos4_device_pcm0;
+extern struct platform_device exynos4_device_pcm1;
+extern struct platform_device exynos4_device_pcm2;
+extern struct platform_device exynos4_device_i2s0;
+extern struct platform_device exynos4_device_i2s1;
+extern struct platform_device exynos4_device_i2s2;
+extern struct platform_device exynos4_device_spdif;
+extern struct platform_device exynos4_device_pd[];
extern struct platform_device s5p6442_device_pcm0;
extern struct platform_device s5p6442_device_pcm1;
@@ -137,7 +140,7 @@ extern struct platform_device s5p_device_fimc2;
extern struct platform_device s5p_device_mipi_csis0;
extern struct platform_device s5p_device_mipi_csis1;
-extern struct platform_device s5pv310_device_sysmmu;
+extern struct platform_device exynos4_device_sysmmu;
/* s3c2440 specific devices */
diff --git a/arch/arm/plat-samsung/include/plat/pd.h b/arch/arm/plat-samsung/include/plat/pd.h
index 5f0ad85783db..abb4bc32716a 100644
--- a/arch/arm/plat-samsung/include/plat/pd.h
+++ b/arch/arm/plat-samsung/include/plat/pd.h
@@ -1,6 +1,6 @@
/* linux/arch/arm/plat-samsung/include/plat/pd.h
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* This program is free software; you can redistribute it and/or modify
@@ -17,7 +17,7 @@ struct samsung_pd_info {
void __iomem *base;
};
-enum s5pv310_pd_block {
+enum exynos4_pd_block {
PD_MFC,
PD_G3D,
PD_LCD0,
diff --git a/arch/arm/plat-samsung/include/plat/pm.h b/arch/arm/plat-samsung/include/plat/pm.h
index 30518cc9a67c..937cc2ace517 100644
--- a/arch/arm/plat-samsung/include/plat/pm.h
+++ b/arch/arm/plat-samsung/include/plat/pm.h
@@ -52,13 +52,11 @@ extern unsigned char pm_uart_udivslot; /* true to save UART UDIVSLOT */
/* from sleep.S */
-extern int s3c_cpu_save(unsigned long *saveblk);
+extern int s3c_cpu_save(unsigned long *saveblk, long);
extern void s3c_cpu_resume(void);
extern void s3c2410_cpu_suspend(void);
-extern unsigned long s3c_sleep_save_phys;
-
/* sleep save info */
/**
@@ -181,13 +179,5 @@ extern void s3c_pm_restore_gpios(void);
*/
extern void s3c_pm_save_gpios(void);
-/**
- * s3c_pm_cb_flushcache - callback for assembly code
- *
- * Callback to issue flush_cache_all() as this call is
- * not a directly callable object.
- */
-extern void s3c_pm_cb_flushcache(void);
-
extern void s3c_pm_save_core(void);
extern void s3c_pm_restore_core(void);
diff --git a/arch/arm/plat-samsung/include/plat/sdhci.h b/arch/arm/plat-samsung/include/plat/sdhci.h
index 5a41a0b69eec..b0bdf16549d5 100644
--- a/arch/arm/plat-samsung/include/plat/sdhci.h
+++ b/arch/arm/plat-samsung/include/plat/sdhci.h
@@ -1,4 +1,7 @@
-/* linux/arch/arm/plat-s3c/include/plat/sdhci.h
+/* linux/arch/arm/plat-samsung/include/plat/sdhci.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
*
* Copyright 2008 Openmoko, Inc.
* Copyright 2008 Simtec Electronics
@@ -119,10 +122,10 @@ extern void s5pv210_setup_sdhci0_cfg_gpio(struct platform_device *, int w);
extern void s5pv210_setup_sdhci1_cfg_gpio(struct platform_device *, int w);
extern void s5pv210_setup_sdhci2_cfg_gpio(struct platform_device *, int w);
extern void s5pv210_setup_sdhci3_cfg_gpio(struct platform_device *, int w);
-extern void s5pv310_setup_sdhci0_cfg_gpio(struct platform_device *, int w);
-extern void s5pv310_setup_sdhci1_cfg_gpio(struct platform_device *, int w);
-extern void s5pv310_setup_sdhci2_cfg_gpio(struct platform_device *, int w);
-extern void s5pv310_setup_sdhci3_cfg_gpio(struct platform_device *, int w);
+extern void exynos4_setup_sdhci0_cfg_gpio(struct platform_device *, int w);
+extern void exynos4_setup_sdhci1_cfg_gpio(struct platform_device *, int w);
+extern void exynos4_setup_sdhci2_cfg_gpio(struct platform_device *, int w);
+extern void exynos4_setup_sdhci3_cfg_gpio(struct platform_device *, int w);
/* S3C2416 SDHCI setup */
@@ -334,57 +337,57 @@ static inline void s5pv210_default_sdhci3(void) { }
#endif /* CONFIG_S5PV210_SETUP_SDHCI */
-/* S5PV310 SDHCI setup */
-#ifdef CONFIG_S5PV310_SETUP_SDHCI
-extern char *s5pv310_hsmmc_clksrcs[4];
+/* EXYNOS4 SDHCI setup */
+#ifdef CONFIG_EXYNOS4_SETUP_SDHCI
+extern char *exynos4_hsmmc_clksrcs[4];
-extern void s5pv310_setup_sdhci_cfg_card(struct platform_device *dev,
+extern void exynos4_setup_sdhci_cfg_card(struct platform_device *dev,
void __iomem *r,
struct mmc_ios *ios,
struct mmc_card *card);
-static inline void s5pv310_default_sdhci0(void)
+static inline void exynos4_default_sdhci0(void)
{
#ifdef CONFIG_S3C_DEV_HSMMC
- s3c_hsmmc0_def_platdata.clocks = s5pv310_hsmmc_clksrcs;
- s3c_hsmmc0_def_platdata.cfg_gpio = s5pv310_setup_sdhci0_cfg_gpio;
- s3c_hsmmc0_def_platdata.cfg_card = s5pv310_setup_sdhci_cfg_card;
+ s3c_hsmmc0_def_platdata.clocks = exynos4_hsmmc_clksrcs;
+ s3c_hsmmc0_def_platdata.cfg_gpio = exynos4_setup_sdhci0_cfg_gpio;
+ s3c_hsmmc0_def_platdata.cfg_card = exynos4_setup_sdhci_cfg_card;
#endif
}
-static inline void s5pv310_default_sdhci1(void)
+static inline void exynos4_default_sdhci1(void)
{
#ifdef CONFIG_S3C_DEV_HSMMC1
- s3c_hsmmc1_def_platdata.clocks = s5pv310_hsmmc_clksrcs;
- s3c_hsmmc1_def_platdata.cfg_gpio = s5pv310_setup_sdhci1_cfg_gpio;
- s3c_hsmmc1_def_platdata.cfg_card = s5pv310_setup_sdhci_cfg_card;
+ s3c_hsmmc1_def_platdata.clocks = exynos4_hsmmc_clksrcs;
+ s3c_hsmmc1_def_platdata.cfg_gpio = exynos4_setup_sdhci1_cfg_gpio;
+ s3c_hsmmc1_def_platdata.cfg_card = exynos4_setup_sdhci_cfg_card;
#endif
}
-static inline void s5pv310_default_sdhci2(void)
+static inline void exynos4_default_sdhci2(void)
{
#ifdef CONFIG_S3C_DEV_HSMMC2
- s3c_hsmmc2_def_platdata.clocks = s5pv310_hsmmc_clksrcs;
- s3c_hsmmc2_def_platdata.cfg_gpio = s5pv310_setup_sdhci2_cfg_gpio;
- s3c_hsmmc2_def_platdata.cfg_card = s5pv310_setup_sdhci_cfg_card;
+ s3c_hsmmc2_def_platdata.clocks = exynos4_hsmmc_clksrcs;
+ s3c_hsmmc2_def_platdata.cfg_gpio = exynos4_setup_sdhci2_cfg_gpio;
+ s3c_hsmmc2_def_platdata.cfg_card = exynos4_setup_sdhci_cfg_card;
#endif
}
-static inline void s5pv310_default_sdhci3(void)
+static inline void exynos4_default_sdhci3(void)
{
#ifdef CONFIG_S3C_DEV_HSMMC3
- s3c_hsmmc3_def_platdata.clocks = s5pv310_hsmmc_clksrcs;
- s3c_hsmmc3_def_platdata.cfg_gpio = s5pv310_setup_sdhci3_cfg_gpio;
- s3c_hsmmc3_def_platdata.cfg_card = s5pv310_setup_sdhci_cfg_card;
+ s3c_hsmmc3_def_platdata.clocks = exynos4_hsmmc_clksrcs;
+ s3c_hsmmc3_def_platdata.cfg_gpio = exynos4_setup_sdhci3_cfg_gpio;
+ s3c_hsmmc3_def_platdata.cfg_card = exynos4_setup_sdhci_cfg_card;
#endif
}
#else
-static inline void s5pv310_default_sdhci0(void) { }
-static inline void s5pv310_default_sdhci1(void) { }
-static inline void s5pv310_default_sdhci2(void) { }
-static inline void s5pv310_default_sdhci3(void) { }
+static inline void exynos4_default_sdhci0(void) { }
+static inline void exynos4_default_sdhci1(void) { }
+static inline void exynos4_default_sdhci2(void) { }
+static inline void exynos4_default_sdhci3(void) { }
-#endif /* CONFIG_S5PV310_SETUP_SDHCI */
+#endif /* CONFIG_EXYNOS4_SETUP_SDHCI */
#endif /* __PLAT_S3C_SDHCI_H */
diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c
index 02d531fb3f81..d5b58d31903c 100644
--- a/arch/arm/plat-samsung/pm.c
+++ b/arch/arm/plat-samsung/pm.c
@@ -241,8 +241,6 @@ void (*pm_cpu_sleep)(void);
static int s3c_pm_enter(suspend_state_t state)
{
- static unsigned long regs_save[16];
-
/* ensure the debug is initialised (if enabled) */
s3c_pm_debug_init();
@@ -266,12 +264,6 @@ static int s3c_pm_enter(suspend_state_t state)
return -EINVAL;
}
- /* store the physical address of the register recovery block */
-
- s3c_sleep_save_phys = virt_to_phys(regs_save);
-
- S3C_PMDBG("s3c_sleep_save_phys=0x%08lx\n", s3c_sleep_save_phys);
-
/* save all necessary core registers not covered by the drivers */
s3c_pm_save_gpios();
@@ -305,7 +297,7 @@ static int s3c_pm_enter(suspend_state_t state)
* we resume as it saves its own register state and restores it
* during the resume. */
- s3c_cpu_save(regs_save);
+ s3c_cpu_save(0, PLAT_PHYS_OFFSET - PAGE_OFFSET);
/* restore the cpu state using the kernel's cpu init code. */
@@ -336,12 +328,6 @@ static int s3c_pm_enter(suspend_state_t state)
return 0;
}
-/* callback from assembly code */
-void s3c_pm_cb_flushcache(void)
-{
- flush_cache_all();
-}
-
static int s3c_pm_prepare(void)
{
/* prepare check area if configured */
diff --git a/arch/arm/plat-spear/Makefile b/arch/arm/plat-spear/Makefile
index eb89540aeda9..b4f340b8f1f1 100644
--- a/arch/arm/plat-spear/Makefile
+++ b/arch/arm/plat-spear/Makefile
@@ -3,6 +3,6 @@
#
# Common support
-obj-y := clock.o padmux.o time.o
+obj-y := clock.o time.o
-obj-$(CONFIG_ARCH_SPEAR3XX) += shirq.o
+obj-$(CONFIG_ARCH_SPEAR3XX) += shirq.o padmux.o
diff --git a/arch/arm/plat-spear/clock.c b/arch/arm/plat-spear/clock.c
index ee4f90e534d8..bdbd7ec9cb6b 100644
--- a/arch/arm/plat-spear/clock.c
+++ b/arch/arm/plat-spear/clock.c
@@ -12,18 +12,25 @@
*/
#include <linux/bug.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/spinlock.h>
-#include <mach/misc_regs.h>
#include <plat/clock.h>
static DEFINE_SPINLOCK(clocks_lock);
static LIST_HEAD(root_clks);
+#ifdef CONFIG_DEBUG_FS
+static LIST_HEAD(clocks);
+#endif
-static void propagate_rate(struct list_head *);
+static void propagate_rate(struct clk *, int on_init);
+#ifdef CONFIG_DEBUG_FS
+static int clk_debugfs_reparent(struct clk *);
+#endif
static int generic_clk_enable(struct clk *clk)
{
@@ -65,6 +72,104 @@ static struct clkops generic_clkops = {
.disable = generic_clk_disable,
};
+/* returns current programmed clocks clock info structure */
+static struct pclk_info *pclk_info_get(struct clk *clk)
+{
+ unsigned int val, i;
+ struct pclk_info *info = NULL;
+
+ val = (readl(clk->pclk_sel->pclk_sel_reg) >> clk->pclk_sel_shift)
+ & clk->pclk_sel->pclk_sel_mask;
+
+ for (i = 0; i < clk->pclk_sel->pclk_count; i++) {
+ if (clk->pclk_sel->pclk_info[i].pclk_val == val)
+ info = &clk->pclk_sel->pclk_info[i];
+ }
+
+ return info;
+}
+
+/*
+ * Set Update pclk, and pclk_info of clk and add clock sibling node to current
+ * parents children list
+ */
+static void clk_reparent(struct clk *clk, struct pclk_info *pclk_info)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&clocks_lock, flags);
+ list_del(&clk->sibling);
+ list_add(&clk->sibling, &pclk_info->pclk->children);
+
+ clk->pclk = pclk_info->pclk;
+ spin_unlock_irqrestore(&clocks_lock, flags);
+
+#ifdef CONFIG_DEBUG_FS
+ clk_debugfs_reparent(clk);
+#endif
+}
+
+static void do_clk_disable(struct clk *clk)
+{
+ if (!clk)
+ return;
+
+ if (!clk->usage_count) {
+ WARN_ON(1);
+ return;
+ }
+
+ clk->usage_count--;
+
+ if (clk->usage_count == 0) {
+ /*
+ * Surely, there are no active childrens or direct users
+ * of this clock
+ */
+ if (clk->pclk)
+ do_clk_disable(clk->pclk);
+
+ if (clk->ops && clk->ops->disable)
+ clk->ops->disable(clk);
+ }
+}
+
+static int do_clk_enable(struct clk *clk)
+{
+ int ret = 0;
+
+ if (!clk)
+ return -EFAULT;
+
+ if (clk->usage_count == 0) {
+ if (clk->pclk) {
+ ret = do_clk_enable(clk->pclk);
+ if (ret)
+ goto err;
+ }
+ if (clk->ops && clk->ops->enable) {
+ ret = clk->ops->enable(clk);
+ if (ret) {
+ if (clk->pclk)
+ do_clk_disable(clk->pclk);
+ goto err;
+ }
+ }
+ /*
+ * Since the clock is going to be used for the first
+ * time please reclac
+ */
+ if (clk->recalc) {
+ ret = clk->recalc(clk);
+ if (ret)
+ goto err;
+ }
+ }
+ clk->usage_count++;
+err:
+ return ret;
+}
+
/*
* clk_enable - inform the system when the clock source should be running.
* @clk: clock source
@@ -78,17 +183,9 @@ int clk_enable(struct clk *clk)
unsigned long flags;
int ret = 0;
- if (!clk || IS_ERR(clk))
- return -EFAULT;
-
spin_lock_irqsave(&clocks_lock, flags);
- if (clk->usage_count == 0) {
- if (clk->ops && clk->ops->enable)
- ret = clk->ops->enable(clk);
- }
- clk->usage_count++;
+ ret = do_clk_enable(clk);
spin_unlock_irqrestore(&clocks_lock, flags);
-
return ret;
}
EXPORT_SYMBOL(clk_enable);
@@ -109,17 +206,8 @@ void clk_disable(struct clk *clk)
{
unsigned long flags;
- if (!clk || IS_ERR(clk))
- return;
-
- WARN_ON(clk->usage_count == 0);
-
spin_lock_irqsave(&clocks_lock, flags);
- clk->usage_count--;
- if (clk->usage_count == 0) {
- if (clk->ops && clk->ops->disable)
- clk->ops->disable(clk);
- }
+ do_clk_disable(clk);
spin_unlock_irqrestore(&clocks_lock, flags);
}
EXPORT_SYMBOL(clk_disable);
@@ -153,15 +241,14 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
int i, found = 0, val = 0;
unsigned long flags;
- if (!clk || IS_ERR(clk) || !parent || IS_ERR(parent))
+ if (!clk || !parent)
return -EFAULT;
- if (clk->usage_count)
- return -EBUSY;
- if (!clk->pclk_sel)
- return -EPERM;
if (clk->pclk == parent)
return 0;
+ if (!clk->pclk_sel)
+ return -EPERM;
+ /* check if requested parent is in clk parent list */
for (i = 0; i < clk->pclk_sel->pclk_count; i++) {
if (clk->pclk_sel->pclk_info[i].pclk == parent) {
found = 1;
@@ -176,25 +263,58 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
/* reflect parent change in hardware */
val = readl(clk->pclk_sel->pclk_sel_reg);
val &= ~(clk->pclk_sel->pclk_sel_mask << clk->pclk_sel_shift);
- val |= clk->pclk_sel->pclk_info[i].pclk_mask << clk->pclk_sel_shift;
+ val |= clk->pclk_sel->pclk_info[i].pclk_val << clk->pclk_sel_shift;
writel(val, clk->pclk_sel->pclk_sel_reg);
spin_unlock_irqrestore(&clocks_lock, flags);
/* reflect parent change in software */
- clk->recalc(clk);
- propagate_rate(&clk->children);
+ clk_reparent(clk, &clk->pclk_sel->pclk_info[i]);
+
+ propagate_rate(clk, 0);
return 0;
}
EXPORT_SYMBOL(clk_set_parent);
+/**
+ * clk_set_rate - set the clock rate for a clock source
+ * @clk: clock source
+ * @rate: desired clock rate in Hz
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ if (!clk || !rate)
+ return -EFAULT;
+
+ if (clk->set_rate) {
+ spin_lock_irqsave(&clocks_lock, flags);
+ ret = clk->set_rate(clk, rate);
+ if (!ret)
+ /* if successful -> propagate */
+ propagate_rate(clk, 0);
+ spin_unlock_irqrestore(&clocks_lock, flags);
+ } else if (clk->pclk) {
+ u32 mult = clk->div_factor ? clk->div_factor : 1;
+ ret = clk_set_rate(clk->pclk, mult * rate);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(clk_set_rate);
+
/* registers clock in platform clock framework */
void clk_register(struct clk_lookup *cl)
{
- struct clk *clk = cl->clk;
+ struct clk *clk;
unsigned long flags;
- if (!clk || IS_ERR(clk))
+ if (!cl || !cl->clk)
return;
+ clk = cl->clk;
spin_lock_irqsave(&clocks_lock, flags);
@@ -207,71 +327,173 @@ void clk_register(struct clk_lookup *cl)
/* root clock don't have any parents */
if (!clk->pclk && !clk->pclk_sel) {
list_add(&clk->sibling, &root_clks);
- /* add clocks with only one parent to parent's children list */
} else if (clk->pclk && !clk->pclk_sel) {
+ /* add clocks with only one parent to parent's children list */
list_add(&clk->sibling, &clk->pclk->children);
} else {
- /* add clocks with > 1 parent to 1st parent's children list */
- list_add(&clk->sibling,
- &clk->pclk_sel->pclk_info[0].pclk->children);
+ /* clocks with more than one parent */
+ struct pclk_info *pclk_info;
+
+ pclk_info = pclk_info_get(clk);
+ if (!pclk_info) {
+ pr_err("CLKDEV: invalid pclk info of clk with"
+ " %s dev_id and %s con_id\n",
+ cl->dev_id, cl->con_id);
+ } else {
+ clk->pclk = pclk_info->pclk;
+ list_add(&clk->sibling, &pclk_info->pclk->children);
+ }
}
+
spin_unlock_irqrestore(&clocks_lock, flags);
+ /* debugfs specific */
+#ifdef CONFIG_DEBUG_FS
+ list_add(&clk->node, &clocks);
+ clk->cl = cl;
+#endif
+
/* add clock to arm clockdev framework */
clkdev_add(cl);
}
/**
- * propagate_rate - recalculate and propagate all clocks in list head
+ * propagate_rate - recalculate and propagate all clocks to children
+ * @pclk: parent clock required to be propogated
+ * @on_init: flag for enabling clocks which are ENABLED_ON_INIT.
*
- * Recalculates all root clocks in list head, which if the clock's .recalc is
- * set correctly, should also propagate their rates.
+ * Recalculates all children clocks
*/
-static void propagate_rate(struct list_head *lhead)
+void propagate_rate(struct clk *pclk, int on_init)
{
- struct clk *clkp, *_temp;
+ struct clk *clk, *_temp;
+ int ret = 0;
- list_for_each_entry_safe(clkp, _temp, lhead, sibling) {
- if (clkp->recalc)
- clkp->recalc(clkp);
- propagate_rate(&clkp->children);
+ list_for_each_entry_safe(clk, _temp, &pclk->children, sibling) {
+ if (clk->recalc) {
+ ret = clk->recalc(clk);
+ /*
+ * recalc will return error if clk out is not programmed
+ * In this case configure default rate.
+ */
+ if (ret && clk->set_rate)
+ clk->set_rate(clk, 0);
+ }
+ propagate_rate(clk, on_init);
+
+ if (!on_init)
+ continue;
+
+ /* Enable clks enabled on init, in software view */
+ if (clk->flags & ENABLED_ON_INIT)
+ do_clk_enable(clk);
}
}
-/* returns current programmed clocks clock info structure */
-static struct pclk_info *pclk_info_get(struct clk *clk)
+/**
+ * round_rate_index - return closest programmable rate index in rate_config tbl
+ * @clk: ptr to clock structure
+ * @drate: desired rate
+ * @rate: final rate will be returned in this variable only.
+ *
+ * Finds index in rate_config for highest clk rate which is less than
+ * requested rate. If there is no clk rate lesser than requested rate then
+ * -EINVAL is returned. This routine assumes that rate_config is written
+ * in incrementing order of clk rates.
+ * If drate passed is zero then default rate is programmed.
+ */
+static int
+round_rate_index(struct clk *clk, unsigned long drate, unsigned long *rate)
{
- unsigned int mask, i;
- unsigned long flags;
- struct pclk_info *info = NULL;
+ unsigned long tmp = 0, prev_rate = 0;
+ int index;
- spin_lock_irqsave(&clocks_lock, flags);
- mask = (readl(clk->pclk_sel->pclk_sel_reg) >> clk->pclk_sel_shift)
- & clk->pclk_sel->pclk_sel_mask;
+ if (!clk->calc_rate)
+ return -EFAULT;
- for (i = 0; i < clk->pclk_sel->pclk_count; i++) {
- if (clk->pclk_sel->pclk_info[i].pclk_mask == mask)
- info = &clk->pclk_sel->pclk_info[i];
+ if (!drate)
+ return -EINVAL;
+
+ /*
+ * This loops ends on two conditions:
+ * - as soon as clk is found with rate greater than requested rate.
+ * - if all clks in rate_config are smaller than requested rate.
+ */
+ for (index = 0; index < clk->rate_config.count; index++) {
+ prev_rate = tmp;
+ tmp = clk->calc_rate(clk, index);
+ if (drate < tmp) {
+ index--;
+ break;
+ }
}
- spin_unlock_irqrestore(&clocks_lock, flags);
+ /* return if can't find suitable clock */
+ if (index < 0) {
+ index = -EINVAL;
+ *rate = 0;
+ } else if (index == clk->rate_config.count) {
+ /* program with highest clk rate possible */
+ index = clk->rate_config.count - 1;
+ *rate = tmp;
+ } else
+ *rate = prev_rate;
- return info;
+ return index;
}
-/*
- * Set pclk as cclk's parent and add clock sibling node to current parents
- * children list
+/**
+ * clk_round_rate - adjust a rate to the exact rate a clock can provide
+ * @clk: clock source
+ * @rate: desired clock rate in Hz
+ *
+ * Returns rounded clock rate in Hz, or negative errno.
*/
-static void change_parent(struct clk *cclk, struct clk *pclk)
+long clk_round_rate(struct clk *clk, unsigned long drate)
{
- unsigned long flags;
+ long rate = 0;
+ int index;
+
+ /*
+ * propagate call to parent who supports calc_rate. Similar approach is
+ * used in clk_set_rate.
+ */
+ if (!clk->calc_rate) {
+ u32 mult;
+ if (!clk->pclk)
+ return clk->rate;
+
+ mult = clk->div_factor ? clk->div_factor : 1;
+ return clk_round_rate(clk->pclk, mult * drate) / mult;
+ }
- spin_lock_irqsave(&clocks_lock, flags);
- list_del(&cclk->sibling);
- list_add(&cclk->sibling, &pclk->children);
+ index = round_rate_index(clk, drate, &rate);
+ if (index >= 0)
+ return rate;
+ else
+ return index;
+}
+EXPORT_SYMBOL(clk_round_rate);
- cclk->pclk = pclk;
- spin_unlock_irqrestore(&clocks_lock, flags);
+/*All below functions are called with lock held */
+
+/*
+ * Calculates pll clk rate for specific value of mode, m, n and p
+ *
+ * In normal mode
+ * rate = (2 * M[15:8] * Fin)/(N * 2^P)
+ *
+ * In Dithered mode
+ * rate = (2 * M[15:0] * Fin)/(256 * N * 2^P)
+ */
+unsigned long pll_calc_rate(struct clk *clk, int index)
+{
+ unsigned long rate = clk->pclk->rate;
+ struct pll_rate_tbl *tbls = clk->rate_config.tbls;
+ unsigned int mode;
+
+ mode = tbls[index].mode ? 256 : 1;
+ return (((2 * rate / 10000) * tbls[index].m) /
+ (mode * tbls[index].n * (1 << tbls[index].p))) * 10000;
}
/*
@@ -283,47 +505,146 @@ static void change_parent(struct clk *cclk, struct clk *pclk)
* In Dithered mode
* rate = (2 * M[15:0] * Fin)/(256 * N * 2^P)
*/
-void pll1_clk_recalc(struct clk *clk)
+int pll_clk_recalc(struct clk *clk)
{
struct pll_clk_config *config = clk->private_data;
unsigned int num = 2, den = 0, val, mode = 0;
- unsigned long flags;
- spin_lock_irqsave(&clocks_lock, flags);
- mode = (readl(config->mode_reg) >> PLL_MODE_SHIFT) &
- PLL_MODE_MASK;
+ mode = (readl(config->mode_reg) >> config->masks->mode_shift) &
+ config->masks->mode_mask;
val = readl(config->cfg_reg);
/* calculate denominator */
- den = (val >> PLL_DIV_P_SHIFT) & PLL_DIV_P_MASK;
+ den = (val >> config->masks->div_p_shift) & config->masks->div_p_mask;
den = 1 << den;
- den *= (val >> PLL_DIV_N_SHIFT) & PLL_DIV_N_MASK;
+ den *= (val >> config->masks->div_n_shift) & config->masks->div_n_mask;
/* calculate numerator & denominator */
if (!mode) {
/* Normal mode */
- num *= (val >> PLL_NORM_FDBK_M_SHIFT) & PLL_NORM_FDBK_M_MASK;
+ num *= (val >> config->masks->norm_fdbk_m_shift) &
+ config->masks->norm_fdbk_m_mask;
} else {
/* Dithered mode */
- num *= (val >> PLL_DITH_FDBK_M_SHIFT) & PLL_DITH_FDBK_M_MASK;
+ num *= (val >> config->masks->dith_fdbk_m_shift) &
+ config->masks->dith_fdbk_m_mask;
den *= 256;
}
+ if (!den)
+ return -EINVAL;
+
clk->rate = (((clk->pclk->rate/10000) * num) / den) * 10000;
- spin_unlock_irqrestore(&clocks_lock, flags);
+ return 0;
+}
+
+/*
+ * Configures new clock rate of pll
+ */
+int pll_clk_set_rate(struct clk *clk, unsigned long desired_rate)
+{
+ struct pll_rate_tbl *tbls = clk->rate_config.tbls;
+ struct pll_clk_config *config = clk->private_data;
+ unsigned long val, rate;
+ int i;
+
+ i = round_rate_index(clk, desired_rate, &rate);
+ if (i < 0)
+ return i;
+
+ val = readl(config->mode_reg) &
+ ~(config->masks->mode_mask << config->masks->mode_shift);
+ val |= (tbls[i].mode & config->masks->mode_mask) <<
+ config->masks->mode_shift;
+ writel(val, config->mode_reg);
+
+ val = readl(config->cfg_reg) &
+ ~(config->masks->div_p_mask << config->masks->div_p_shift);
+ val |= (tbls[i].p & config->masks->div_p_mask) <<
+ config->masks->div_p_shift;
+ val &= ~(config->masks->div_n_mask << config->masks->div_n_shift);
+ val |= (tbls[i].n & config->masks->div_n_mask) <<
+ config->masks->div_n_shift;
+ val &= ~(config->masks->dith_fdbk_m_mask <<
+ config->masks->dith_fdbk_m_shift);
+ if (tbls[i].mode)
+ val |= (tbls[i].m & config->masks->dith_fdbk_m_mask) <<
+ config->masks->dith_fdbk_m_shift;
+ else
+ val |= (tbls[i].m & config->masks->norm_fdbk_m_mask) <<
+ config->masks->norm_fdbk_m_shift;
+
+ writel(val, config->cfg_reg);
+
+ clk->rate = rate;
+
+ return 0;
+}
+
+/*
+ * Calculates ahb, apb clk rate for specific value of div
+ */
+unsigned long bus_calc_rate(struct clk *clk, int index)
+{
+ unsigned long rate = clk->pclk->rate;
+ struct bus_rate_tbl *tbls = clk->rate_config.tbls;
+
+ return rate / (tbls[index].div + 1);
}
/* calculates current programmed rate of ahb or apb bus */
-void bus_clk_recalc(struct clk *clk)
+int bus_clk_recalc(struct clk *clk)
{
struct bus_clk_config *config = clk->private_data;
unsigned int div;
- unsigned long flags;
- spin_lock_irqsave(&clocks_lock, flags);
- div = ((readl(config->reg) >> config->shift) & config->mask) + 1;
+ div = ((readl(config->reg) >> config->masks->shift) &
+ config->masks->mask) + 1;
+
+ if (!div)
+ return -EINVAL;
+
clk->rate = (unsigned long)clk->pclk->rate / div;
- spin_unlock_irqrestore(&clocks_lock, flags);
+ return 0;
+}
+
+/* Configures new clock rate of AHB OR APB bus */
+int bus_clk_set_rate(struct clk *clk, unsigned long desired_rate)
+{
+ struct bus_rate_tbl *tbls = clk->rate_config.tbls;
+ struct bus_clk_config *config = clk->private_data;
+ unsigned long val, rate;
+ int i;
+
+ i = round_rate_index(clk, desired_rate, &rate);
+ if (i < 0)
+ return i;
+
+ val = readl(config->reg) &
+ ~(config->masks->mask << config->masks->shift);
+ val |= (tbls[i].div & config->masks->mask) << config->masks->shift;
+ writel(val, config->reg);
+
+ clk->rate = rate;
+
+ return 0;
+}
+
+/*
+ * gives rate for different values of eq, x and y
+ *
+ * Fout from synthesizer can be given from two equations:
+ * Fout1 = (Fin * X/Y)/2 EQ1
+ * Fout2 = Fin * X/Y EQ2
+ */
+unsigned long aux_calc_rate(struct clk *clk, int index)
+{
+ unsigned long rate = clk->pclk->rate;
+ struct aux_rate_tbl *tbls = clk->rate_config.tbls;
+ u8 eq = tbls[index].eq ? 1 : 2;
+
+ return (((rate/10000) * tbls[index].xscale) /
+ (tbls[index].yscale * eq)) * 10000;
}
/*
@@ -336,44 +657,76 @@ void bus_clk_recalc(struct clk *clk)
*
* Selection of eqn 1 or 2 is programmed in register
*/
-void aux_clk_recalc(struct clk *clk)
+int aux_clk_recalc(struct clk *clk)
{
struct aux_clk_config *config = clk->private_data;
- struct pclk_info *pclk_info = NULL;
unsigned int num = 1, den = 1, val, eqn;
- unsigned long flags;
- /* get current programmed parent */
- pclk_info = pclk_info_get(clk);
- if (!pclk_info) {
- spin_lock_irqsave(&clocks_lock, flags);
- clk->pclk = NULL;
- clk->rate = 0;
- spin_unlock_irqrestore(&clocks_lock, flags);
- return;
- }
+ val = readl(config->synth_reg);
- change_parent(clk, pclk_info->pclk);
+ eqn = (val >> config->masks->eq_sel_shift) &
+ config->masks->eq_sel_mask;
+ if (eqn == config->masks->eq1_mask)
+ den *= 2;
- spin_lock_irqsave(&clocks_lock, flags);
- if (pclk_info->scalable) {
- val = readl(config->synth_reg);
+ /* calculate numerator */
+ num = (val >> config->masks->xscale_sel_shift) &
+ config->masks->xscale_sel_mask;
- eqn = (val >> AUX_EQ_SEL_SHIFT) & AUX_EQ_SEL_MASK;
- if (eqn == AUX_EQ1_SEL)
- den *= 2;
+ /* calculate denominator */
+ den *= (val >> config->masks->yscale_sel_shift) &
+ config->masks->yscale_sel_mask;
- /* calculate numerator */
- num = (val >> AUX_XSCALE_SHIFT) & AUX_XSCALE_MASK;
+ if (!den)
+ return -EINVAL;
- /* calculate denominator */
- den *= (val >> AUX_YSCALE_SHIFT) & AUX_YSCALE_MASK;
- val = (((clk->pclk->rate/10000) * num) / den) * 10000;
- } else
- val = clk->pclk->rate;
+ clk->rate = (((clk->pclk->rate/10000) * num) / den) * 10000;
+ return 0;
+}
- clk->rate = val;
- spin_unlock_irqrestore(&clocks_lock, flags);
+/* Configures new clock rate of auxiliary synthesizers used by: UART, FIRDA*/
+int aux_clk_set_rate(struct clk *clk, unsigned long desired_rate)
+{
+ struct aux_rate_tbl *tbls = clk->rate_config.tbls;
+ struct aux_clk_config *config = clk->private_data;
+ unsigned long val, rate;
+ int i;
+
+ i = round_rate_index(clk, desired_rate, &rate);
+ if (i < 0)
+ return i;
+
+ val = readl(config->synth_reg) &
+ ~(config->masks->eq_sel_mask << config->masks->eq_sel_shift);
+ val |= (tbls[i].eq & config->masks->eq_sel_mask) <<
+ config->masks->eq_sel_shift;
+ val &= ~(config->masks->xscale_sel_mask <<
+ config->masks->xscale_sel_shift);
+ val |= (tbls[i].xscale & config->masks->xscale_sel_mask) <<
+ config->masks->xscale_sel_shift;
+ val &= ~(config->masks->yscale_sel_mask <<
+ config->masks->yscale_sel_shift);
+ val |= (tbls[i].yscale & config->masks->yscale_sel_mask) <<
+ config->masks->yscale_sel_shift;
+ writel(val, config->synth_reg);
+
+ clk->rate = rate;
+
+ return 0;
+}
+
+/*
+ * Calculates gpt clk rate for different values of mscale and nscale
+ *
+ * Fout= Fin/((2 ^ (N+1)) * (M+1))
+ */
+unsigned long gpt_calc_rate(struct clk *clk, int index)
+{
+ unsigned long rate = clk->pclk->rate;
+ struct gpt_rate_tbl *tbls = clk->rate_config.tbls;
+
+ return rate / ((1 << (tbls[index].nscale + 1)) *
+ (tbls[index].mscale + 1));
}
/*
@@ -381,46 +734,142 @@ void aux_clk_recalc(struct clk *clk)
* Fout from synthesizer can be given from below equations:
* Fout= Fin/((2 ^ (N+1)) * (M+1))
*/
-void gpt_clk_recalc(struct clk *clk)
+int gpt_clk_recalc(struct clk *clk)
{
- struct aux_clk_config *config = clk->private_data;
- struct pclk_info *pclk_info = NULL;
+ struct gpt_clk_config *config = clk->private_data;
unsigned int div = 1, val;
- unsigned long flags;
- pclk_info = pclk_info_get(clk);
- if (!pclk_info) {
- spin_lock_irqsave(&clocks_lock, flags);
- clk->pclk = NULL;
- clk->rate = 0;
- spin_unlock_irqrestore(&clocks_lock, flags);
- return;
- }
-
- change_parent(clk, pclk_info->pclk);
+ val = readl(config->synth_reg);
+ div += (val >> config->masks->mscale_sel_shift) &
+ config->masks->mscale_sel_mask;
+ div *= 1 << (((val >> config->masks->nscale_sel_shift) &
+ config->masks->nscale_sel_mask) + 1);
- spin_lock_irqsave(&clocks_lock, flags);
- if (pclk_info->scalable) {
- val = readl(config->synth_reg);
- div += (val >> GPT_MSCALE_SHIFT) & GPT_MSCALE_MASK;
- div *= 1 << (((val >> GPT_NSCALE_SHIFT) & GPT_NSCALE_MASK) + 1);
- }
+ if (!div)
+ return -EINVAL;
clk->rate = (unsigned long)clk->pclk->rate / div;
- spin_unlock_irqrestore(&clocks_lock, flags);
+ return 0;
+}
+
+/* Configures new clock rate of gptiliary synthesizers used by: UART, FIRDA*/
+int gpt_clk_set_rate(struct clk *clk, unsigned long desired_rate)
+{
+ struct gpt_rate_tbl *tbls = clk->rate_config.tbls;
+ struct gpt_clk_config *config = clk->private_data;
+ unsigned long val, rate;
+ int i;
+
+ i = round_rate_index(clk, desired_rate, &rate);
+ if (i < 0)
+ return i;
+
+ val = readl(config->synth_reg) & ~(config->masks->mscale_sel_mask <<
+ config->masks->mscale_sel_shift);
+ val |= (tbls[i].mscale & config->masks->mscale_sel_mask) <<
+ config->masks->mscale_sel_shift;
+ val &= ~(config->masks->nscale_sel_mask <<
+ config->masks->nscale_sel_shift);
+ val |= (tbls[i].nscale & config->masks->nscale_sel_mask) <<
+ config->masks->nscale_sel_shift;
+ writel(val, config->synth_reg);
+
+ clk->rate = rate;
+
+ return 0;
}
/*
- * Used for clocks that always have same value as the parent clock divided by a
+ * Calculates clcd clk rate for different values of div
+ *
+ * Fout from synthesizer can be given from below equation:
+ * Fout= Fin/2*div (division factor)
+ * div is 17 bits:-
+ * 0-13 (fractional part)
+ * 14-16 (integer part)
+ * To calculate Fout we left shift val by 14 bits and divide Fin by
+ * complete div (including fractional part) and then right shift the
+ * result by 14 places.
+ */
+unsigned long clcd_calc_rate(struct clk *clk, int index)
+{
+ unsigned long rate = clk->pclk->rate;
+ struct clcd_rate_tbl *tbls = clk->rate_config.tbls;
+
+ rate /= 1000;
+ rate <<= 12;
+ rate /= (2 * tbls[index].div);
+ rate >>= 12;
+ rate *= 1000;
+
+ return rate;
+}
+
+/*
+ * calculates current programmed rate of clcd synthesizer
+ * Fout from synthesizer can be given from below equation:
+ * Fout= Fin/2*div (division factor)
+ * div is 17 bits:-
+ * 0-13 (fractional part)
+ * 14-16 (integer part)
+ * To calculate Fout we left shift val by 14 bits and divide Fin by
+ * complete div (including fractional part) and then right shift the
+ * result by 14 places.
+ */
+int clcd_clk_recalc(struct clk *clk)
+{
+ struct clcd_clk_config *config = clk->private_data;
+ unsigned int div = 1;
+ unsigned long prate;
+ unsigned int val;
+
+ val = readl(config->synth_reg);
+ div = (val >> config->masks->div_factor_shift) &
+ config->masks->div_factor_mask;
+
+ if (!div)
+ return -EINVAL;
+
+ prate = clk->pclk->rate / 1000; /* first level division, make it KHz */
+
+ clk->rate = (((unsigned long)prate << 12) / (2 * div)) >> 12;
+ clk->rate *= 1000;
+ return 0;
+}
+
+/* Configures new clock rate of auxiliary synthesizers used by: UART, FIRDA*/
+int clcd_clk_set_rate(struct clk *clk, unsigned long desired_rate)
+{
+ struct clcd_rate_tbl *tbls = clk->rate_config.tbls;
+ struct clcd_clk_config *config = clk->private_data;
+ unsigned long val, rate;
+ int i;
+
+ i = round_rate_index(clk, desired_rate, &rate);
+ if (i < 0)
+ return i;
+
+ val = readl(config->synth_reg) & ~(config->masks->div_factor_mask <<
+ config->masks->div_factor_shift);
+ val |= (tbls[i].div & config->masks->div_factor_mask) <<
+ config->masks->div_factor_shift;
+ writel(val, config->synth_reg);
+
+ clk->rate = rate;
+
+ return 0;
+}
+
+/*
+ * Used for clocks that always have value as the parent clock divided by a
* fixed divisor
*/
-void follow_parent(struct clk *clk)
+int follow_parent(struct clk *clk)
{
- unsigned long flags;
+ unsigned int div_factor = (clk->div_factor < 1) ? 1 : clk->div_factor;
- spin_lock_irqsave(&clocks_lock, flags);
- clk->rate = clk->pclk->rate;
- spin_unlock_irqrestore(&clocks_lock, flags);
+ clk->rate = clk->pclk->rate/div_factor;
+ return 0;
}
/**
@@ -431,5 +880,124 @@ void follow_parent(struct clk *clk)
*/
void recalc_root_clocks(void)
{
- propagate_rate(&root_clks);
+ struct clk *pclk;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&clocks_lock, flags);
+ list_for_each_entry(pclk, &root_clks, sibling) {
+ if (pclk->recalc) {
+ ret = pclk->recalc(pclk);
+ /*
+ * recalc will return error if clk out is not programmed
+ * In this case configure default clock.
+ */
+ if (ret && pclk->set_rate)
+ pclk->set_rate(pclk, 0);
+ }
+ propagate_rate(pclk, 1);
+ /* Enable clks enabled on init, in software view */
+ if (pclk->flags & ENABLED_ON_INIT)
+ do_clk_enable(pclk);
+ }
+ spin_unlock_irqrestore(&clocks_lock, flags);
+}
+
+#ifdef CONFIG_DEBUG_FS
+/*
+ * debugfs support to trace clock tree hierarchy and attributes
+ */
+static struct dentry *clk_debugfs_root;
+static int clk_debugfs_register_one(struct clk *c)
+{
+ int err;
+ struct dentry *d, *child;
+ struct clk *pa = c->pclk;
+ char s[255];
+ char *p = s;
+
+ if (c) {
+ if (c->cl->con_id)
+ p += sprintf(p, "%s", c->cl->con_id);
+ if (c->cl->dev_id)
+ p += sprintf(p, "%s", c->cl->dev_id);
+ }
+ d = debugfs_create_dir(s, pa ? pa->dent : clk_debugfs_root);
+ if (!d)
+ return -ENOMEM;
+ c->dent = d;
+
+ d = debugfs_create_u32("usage_count", S_IRUGO, c->dent,
+ (u32 *)&c->usage_count);
+ if (!d) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
+ if (!d) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
+ if (!d) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ return 0;
+
+err_out:
+ d = c->dent;
+ list_for_each_entry(child, &d->d_subdirs, d_u.d_child)
+ debugfs_remove(child);
+ debugfs_remove(c->dent);
+ return err;
+}
+
+static int clk_debugfs_register(struct clk *c)
+{
+ int err;
+ struct clk *pa = c->pclk;
+
+ if (pa && !pa->dent) {
+ err = clk_debugfs_register(pa);
+ if (err)
+ return err;
+ }
+
+ if (!c->dent) {
+ err = clk_debugfs_register_one(c);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int __init clk_debugfs_init(void)
+{
+ struct clk *c;
+ struct dentry *d;
+ int err;
+
+ d = debugfs_create_dir("clock", NULL);
+ if (!d)
+ return -ENOMEM;
+ clk_debugfs_root = d;
+
+ list_for_each_entry(c, &clocks, node) {
+ err = clk_debugfs_register(c);
+ if (err)
+ goto err_out;
+ }
+ return 0;
+err_out:
+ debugfs_remove_recursive(clk_debugfs_root);
+ return err;
+}
+late_initcall(clk_debugfs_init);
+
+static int clk_debugfs_reparent(struct clk *c)
+{
+ debugfs_remove(c->dent);
+ return clk_debugfs_register_one(c);
}
+#endif /* CONFIG_DEBUG_FS */
diff --git a/arch/arm/plat-spear/include/plat/clock.h b/arch/arm/plat-spear/include/plat/clock.h
index 2572260f990f..2ae6606930a6 100644
--- a/arch/arm/plat-spear/include/plat/clock.h
+++ b/arch/arm/plat-spear/include/plat/clock.h
@@ -21,6 +21,7 @@
/* clk structure flags */
#define ALWAYS_ENABLED (1 << 0) /* clock always enabled */
#define RESET_TO_ENABLE (1 << 1) /* reset register bit to enable clk */
+#define ENABLED_ON_INIT (1 << 2) /* clocks enabled at init */
/**
* struct clkops - clock operations
@@ -35,13 +36,11 @@ struct clkops {
/**
* struct pclk_info - parents info
* @pclk: pointer to parent clk
- * @pclk_mask: value to be written for selecting this parent
- * @scalable: Is parent scalable (1 - YES, 0 - NO)
+ * @pclk_val: value to be written for selecting this parent
*/
struct pclk_info {
struct clk *pclk;
- u8 pclk_mask;
- u8 scalable;
+ u8 pclk_val;
};
/**
@@ -54,11 +53,23 @@ struct pclk_info {
struct pclk_sel {
struct pclk_info *pclk_info;
u8 pclk_count;
- unsigned int *pclk_sel_reg;
+ void __iomem *pclk_sel_reg;
unsigned int pclk_sel_mask;
};
/**
+ * struct rate_config - clk rate configurations
+ * @tbls: array of device specific clk rate tables, in ascending order of rates
+ * @count: size of tbls array
+ * @default_index: default setting when originally disabled
+ */
+struct rate_config {
+ void *tbls;
+ u8 count;
+ u8 default_index;
+};
+
+/**
* struct clk - clock structure
* @usage_count: num of users who enabled this clock
* @flags: flags for clock properties
@@ -67,21 +78,32 @@ struct pclk_sel {
* @en_reg_bit: clk enable/disable bit
* @ops: clk enable/disable ops - generic_clkops selected if NULL
* @recalc: pointer to clock rate recalculate function
+ * @set_rate: pointer to clock set rate function
+ * @calc_rate: pointer to clock get rate function for index
+ * @rate_config: rate configuration information, used by set_rate
+ * @div_factor: division factor to parent clock.
* @pclk: current parent clk
* @pclk_sel: pointer to parent selection structure
* @pclk_sel_shift: register shift for selecting parent of this clock
* @children: list for childrens or this clock
* @sibling: node for list of clocks having same parents
* @private_data: clock specific private data
+ * @node: list to maintain clocks linearly
+ * @cl: clocklook up assoicated with this clock
+ * @dent: object for debugfs
*/
struct clk {
unsigned int usage_count;
unsigned int flags;
unsigned long rate;
- unsigned int *en_reg;
+ void __iomem *en_reg;
u8 en_reg_bit;
const struct clkops *ops;
- void (*recalc) (struct clk *);
+ int (*recalc) (struct clk *);
+ int (*set_rate) (struct clk *, unsigned long rate);
+ unsigned long (*calc_rate)(struct clk *, int index);
+ struct rate_config rate_config;
+ unsigned int div_factor;
struct clk *pclk;
struct pclk_sel *pclk_sel;
@@ -90,37 +112,137 @@ struct clk {
struct list_head children;
struct list_head sibling;
void *private_data;
+#ifdef CONFIG_DEBUG_FS
+ struct list_head node;
+ struct clk_lookup *cl;
+ struct dentry *dent;
+#endif
};
/* pll configuration structure */
+struct pll_clk_masks {
+ u32 mode_mask;
+ u32 mode_shift;
+
+ u32 norm_fdbk_m_mask;
+ u32 norm_fdbk_m_shift;
+ u32 dith_fdbk_m_mask;
+ u32 dith_fdbk_m_shift;
+ u32 div_p_mask;
+ u32 div_p_shift;
+ u32 div_n_mask;
+ u32 div_n_shift;
+};
+
struct pll_clk_config {
- unsigned int *mode_reg;
- unsigned int *cfg_reg;
+ void __iomem *mode_reg;
+ void __iomem *cfg_reg;
+ struct pll_clk_masks *masks;
+};
+
+/* pll clk rate config structure */
+struct pll_rate_tbl {
+ u8 mode;
+ u16 m;
+ u8 n;
+ u8 p;
};
/* ahb and apb bus configuration structure */
+struct bus_clk_masks {
+ u32 mask;
+ u32 shift;
+};
+
struct bus_clk_config {
- unsigned int *reg;
- unsigned int mask;
- unsigned int shift;
+ void __iomem *reg;
+ struct bus_clk_masks *masks;
+};
+
+/* ahb and apb clk bus rate config structure */
+struct bus_rate_tbl {
+ u8 div;
+};
+
+/* Aux clk configuration structure: applicable to UART and FIRDA */
+struct aux_clk_masks {
+ u32 eq_sel_mask;
+ u32 eq_sel_shift;
+ u32 eq1_mask;
+ u32 eq2_mask;
+ u32 xscale_sel_mask;
+ u32 xscale_sel_shift;
+ u32 yscale_sel_mask;
+ u32 yscale_sel_shift;
};
-/*
- * Aux clk configuration structure: applicable to GPT, UART and FIRDA
- */
struct aux_clk_config {
- unsigned int *synth_reg;
+ void __iomem *synth_reg;
+ struct aux_clk_masks *masks;
+};
+
+/* aux clk rate config structure */
+struct aux_rate_tbl {
+ u16 xscale;
+ u16 yscale;
+ u8 eq;
+};
+
+/* GPT clk configuration structure */
+struct gpt_clk_masks {
+ u32 mscale_sel_mask;
+ u32 mscale_sel_shift;
+ u32 nscale_sel_mask;
+ u32 nscale_sel_shift;
+};
+
+struct gpt_clk_config {
+ void __iomem *synth_reg;
+ struct gpt_clk_masks *masks;
+};
+
+/* gpt clk rate config structure */
+struct gpt_rate_tbl {
+ u16 mscale;
+ u16 nscale;
+};
+
+/* clcd clk configuration structure */
+struct clcd_synth_masks {
+ u32 div_factor_mask;
+ u32 div_factor_shift;
+};
+
+struct clcd_clk_config {
+ void __iomem *synth_reg;
+ struct clcd_synth_masks *masks;
+};
+
+/* clcd clk rate config structure */
+struct clcd_rate_tbl {
+ u16 div;
};
/* platform specific clock functions */
void clk_register(struct clk_lookup *cl);
void recalc_root_clocks(void);
-/* clock recalc functions */
-void follow_parent(struct clk *clk);
-void pll1_clk_recalc(struct clk *clk);
-void bus_clk_recalc(struct clk *clk);
-void gpt_clk_recalc(struct clk *clk);
-void aux_clk_recalc(struct clk *clk);
+/* clock recalc & set rate functions */
+int follow_parent(struct clk *clk);
+unsigned long pll_calc_rate(struct clk *clk, int index);
+int pll_clk_recalc(struct clk *clk);
+int pll_clk_set_rate(struct clk *clk, unsigned long desired_rate);
+unsigned long bus_calc_rate(struct clk *clk, int index);
+int bus_clk_recalc(struct clk *clk);
+int bus_clk_set_rate(struct clk *clk, unsigned long desired_rate);
+unsigned long gpt_calc_rate(struct clk *clk, int index);
+int gpt_clk_recalc(struct clk *clk);
+int gpt_clk_set_rate(struct clk *clk, unsigned long desired_rate);
+unsigned long aux_calc_rate(struct clk *clk, int index);
+int aux_clk_recalc(struct clk *clk);
+int aux_clk_set_rate(struct clk *clk, unsigned long desired_rate);
+unsigned long clcd_calc_rate(struct clk *clk, int index);
+int clcd_clk_recalc(struct clk *clk);
+int clcd_clk_set_rate(struct clk *clk, unsigned long desired_rate);
#endif /* __PLAT_CLOCK_H */
diff --git a/arch/arm/plat-spear/include/plat/hardware.h b/arch/arm/plat-spear/include/plat/hardware.h
new file mode 100644
index 000000000000..66d677225d15
--- /dev/null
+++ b/arch/arm/plat-spear/include/plat/hardware.h
@@ -0,0 +1,23 @@
+/*
+ * arch/arm/plat-spear/include/plat/hardware.h
+ *
+ * Hardware definitions for SPEAr
+ *
+ * Copyright (C) 2010 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __PLAT_HARDWARE_H
+#define __PLAT_HARDWARE_H
+
+#ifndef __ASSEMBLY__
+#define IOMEM(x) ((void __iomem __force *)(x))
+#else
+#define IOMEM(x) (x)
+#endif
+
+#endif /* __PLAT_HARDWARE_H */
diff --git a/arch/arm/plat-spear/include/plat/memory.h b/arch/arm/plat-spear/include/plat/memory.h
index 27a4aba77343..7e3599e1104e 100644
--- a/arch/arm/plat-spear/include/plat/memory.h
+++ b/arch/arm/plat-spear/include/plat/memory.h
@@ -15,6 +15,6 @@
#define __PLAT_MEMORY_H
/* Physical DRAM offset */
-#define PHYS_OFFSET UL(0x00000000)
+#define PLAT_PHYS_OFFSET UL(0x00000000)
#endif /* __PLAT_MEMORY_H */
diff --git a/arch/arm/plat-spear/include/plat/uncompress.h b/arch/arm/plat-spear/include/plat/uncompress.h
index 99ba6789cc97..6dd455bafdfd 100644
--- a/arch/arm/plat-spear/include/plat/uncompress.h
+++ b/arch/arm/plat-spear/include/plat/uncompress.h
@@ -24,10 +24,10 @@ static inline void putc(int c)
{
void __iomem *base = (void __iomem *)SPEAR_DBG_UART_BASE;
- while (readl(base + UART01x_FR) & UART01x_FR_TXFF)
+ while (readl_relaxed(base + UART01x_FR) & UART01x_FR_TXFF)
barrier();
- writel(c, base + UART01x_DR);
+ writel_relaxed(c, base + UART01x_DR);
}
static inline void flush(void)
diff --git a/arch/arm/plat-spear/include/plat/vmalloc.h b/arch/arm/plat-spear/include/plat/vmalloc.h
index 09e9372aea21..8c8b24d07046 100644
--- a/arch/arm/plat-spear/include/plat/vmalloc.h
+++ b/arch/arm/plat-spear/include/plat/vmalloc.h
@@ -14,6 +14,6 @@
#ifndef __PLAT_VMALLOC_H
#define __PLAT_VMALLOC_H
-#define VMALLOC_END 0xF0000000
+#define VMALLOC_END 0xF0000000UL
#endif /* __PLAT_VMALLOC_H */
diff --git a/arch/arm/plat-spear/time.c b/arch/arm/plat-spear/time.c
index 839c88df9994..100672fa48e1 100644
--- a/arch/arm/plat-spear/time.c
+++ b/arch/arm/plat-spear/time.c
@@ -1,7 +1,7 @@
/*
* arch/arm/plat-spear/time.c
*
- * Copyright (C) 2009 ST Microelectronics
+ * Copyright (C) 2010 ST Microelectronics
* Shiraz Hashim<shiraz.hashim@st.com>
*
* This file is licensed under the terms of the GNU General Public
@@ -211,7 +211,7 @@ static void __init spear_clockevent_init(void)
void __init spear_setup_timer(void)
{
- struct clk *pll3_clk;
+ int ret;
if (!request_mem_region(SPEAR_GPT0_BASE, SZ_1K, "gpt0")) {
pr_err("%s:cannot get IO addr\n", __func__);
@@ -230,26 +230,21 @@ void __init spear_setup_timer(void)
goto err_iomap;
}
- pll3_clk = clk_get(NULL, "pll3_48m_clk");
- if (!pll3_clk) {
- pr_err("%s:couldn't get PLL3 as parent for gpt\n", __func__);
- goto err_iomap;
+ ret = clk_enable(gpt_clk);
+ if (ret < 0) {
+ pr_err("%s:couldn't enable gpt clock\n", __func__);
+ goto err_clk;
}
- clk_set_parent(gpt_clk, pll3_clk);
-
spear_clockevent_init();
spear_clocksource_init();
return;
+err_clk:
+ clk_put(gpt_clk);
err_iomap:
iounmap(gpt_base);
-
err_mem:
release_mem_region(SPEAR_GPT0_BASE, SZ_1K);
}
-
-struct sys_timer spear_sys_timer = {
- .init = spear_setup_timer,
-};
diff --git a/arch/arm/plat-stmp3xxx/include/mach/memory.h b/arch/arm/plat-stmp3xxx/include/mach/memory.h
index 7b875a07a1a7..61fa54882e12 100644
--- a/arch/arm/plat-stmp3xxx/include/mach/memory.h
+++ b/arch/arm/plat-stmp3xxx/include/mach/memory.h
@@ -17,6 +17,6 @@
/*
* Physical DRAM offset.
*/
-#define PHYS_OFFSET UL(0x40000000)
+#define PLAT_PHYS_OFFSET UL(0x40000000)
#endif
diff --git a/arch/arm/plat-tcc/include/mach/memory.h b/arch/arm/plat-tcc/include/mach/memory.h
index cd91ba8a670b..28a6e0cd13b3 100644
--- a/arch/arm/plat-tcc/include/mach/memory.h
+++ b/arch/arm/plat-tcc/include/mach/memory.h
@@ -13,6 +13,6 @@
/*
* Physical DRAM offset.
*/
-#define PHYS_OFFSET UL(0x20000000)
+#define PLAT_PHYS_OFFSET UL(0x20000000)
#endif
diff --git a/arch/arm/plat-versatile/Kconfig b/arch/arm/plat-versatile/Kconfig
new file mode 100644
index 000000000000..52353beb369d
--- /dev/null
+++ b/arch/arm/plat-versatile/Kconfig
@@ -0,0 +1,17 @@
+if PLAT_VERSATILE
+
+config PLAT_VERSATILE_CLCD
+ bool
+
+config PLAT_VERSATILE_FPGA_IRQ
+ bool
+
+config PLAT_VERSATILE_LEDS
+ def_bool y if LEDS_CLASS
+ depends on ARCH_REALVIEW || ARCH_VERSATILE
+
+config PLAT_VERSATILE_SCHED_CLOCK
+ def_bool y if !ARCH_INTEGRATOR_AP
+ select HAVE_SCHED_CLOCK
+
+endif
diff --git a/arch/arm/plat-versatile/Makefile b/arch/arm/plat-versatile/Makefile
index 16dde0819934..69714db47c33 100644
--- a/arch/arm/plat-versatile/Makefile
+++ b/arch/arm/plat-versatile/Makefile
@@ -1,8 +1,7 @@
obj-y := clock.o
-ifneq ($(CONFIG_ARCH_INTEGRATOR),y)
-obj-y += sched-clock.o
-endif
-ifeq ($(CONFIG_LEDS_CLASS),y)
-obj-$(CONFIG_ARCH_REALVIEW) += leds.o
-obj-$(CONFIG_ARCH_VERSATILE) += leds.o
-endif
+obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o
+obj-$(CONFIG_PLAT_VERSATILE_CLCD) += clcd.o
+obj-$(CONFIG_PLAT_VERSATILE_FPGA_IRQ) += fpga-irq.o
+obj-$(CONFIG_PLAT_VERSATILE_LEDS) += leds.o
+obj-$(CONFIG_PLAT_VERSATILE_SCHED_CLOCK) += sched-clock.o
+obj-$(CONFIG_SMP) += headsmp.o platsmp.o
diff --git a/arch/arm/plat-versatile/clcd.c b/arch/arm/plat-versatile/clcd.c
new file mode 100644
index 000000000000..6628cc27efc5
--- /dev/null
+++ b/arch/arm/plat-versatile/clcd.c
@@ -0,0 +1,182 @@
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/amba/bus.h>
+#include <linux/amba/clcd.h>
+#include <plat/clcd.h>
+
+static struct clcd_panel vga = {
+ .mode = {
+ .name = "VGA",
+ .refresh = 60,
+ .xres = 640,
+ .yres = 480,
+ .pixclock = 39721,
+ .left_margin = 40,
+ .right_margin = 24,
+ .upper_margin = 32,
+ .lower_margin = 11,
+ .hsync_len = 96,
+ .vsync_len = 2,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED,
+ },
+ .width = -1,
+ .height = -1,
+ .tim2 = TIM2_BCD | TIM2_IPC,
+ .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1),
+ .caps = CLCD_CAP_5551 | CLCD_CAP_565 | CLCD_CAP_888,
+ .bpp = 16,
+};
+
+static struct clcd_panel xvga = {
+ .mode = {
+ .name = "XVGA",
+ .refresh = 60,
+ .xres = 1024,
+ .yres = 768,
+ .pixclock = 15748,
+ .left_margin = 152,
+ .right_margin = 48,
+ .upper_margin = 23,
+ .lower_margin = 3,
+ .hsync_len = 104,
+ .vsync_len = 4,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED,
+ },
+ .width = -1,
+ .height = -1,
+ .tim2 = TIM2_BCD | TIM2_IPC,
+ .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1),
+ .caps = CLCD_CAP_5551 | CLCD_CAP_565 | CLCD_CAP_888,
+ .bpp = 16,
+};
+
+/* Sanyo TM38QV67A02A - 3.8 inch QVGA (320x240) Color TFT */
+static struct clcd_panel sanyo_tm38qv67a02a = {
+ .mode = {
+ .name = "Sanyo TM38QV67A02A",
+ .refresh = 116,
+ .xres = 320,
+ .yres = 240,
+ .pixclock = 100000,
+ .left_margin = 6,
+ .right_margin = 6,
+ .upper_margin = 5,
+ .lower_margin = 5,
+ .hsync_len = 6,
+ .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED,
+ },
+ .width = -1,
+ .height = -1,
+ .tim2 = TIM2_BCD,
+ .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1),
+ .caps = CLCD_CAP_5551,
+ .bpp = 16,
+};
+
+static struct clcd_panel sanyo_2_5_in = {
+ .mode = {
+ .name = "Sanyo QVGA Portrait",
+ .refresh = 116,
+ .xres = 240,
+ .yres = 320,
+ .pixclock = 100000,
+ .left_margin = 20,
+ .right_margin = 10,
+ .upper_margin = 2,
+ .lower_margin = 2,
+ .hsync_len = 10,
+ .vsync_len = 2,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED,
+ },
+ .width = -1,
+ .height = -1,
+ .tim2 = TIM2_IVS | TIM2_IHS | TIM2_IPC,
+ .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1),
+ .caps = CLCD_CAP_5551,
+ .bpp = 16,
+};
+
+/* Epson L2F50113T00 - 2.2 inch 176x220 Color TFT */
+static struct clcd_panel epson_l2f50113t00 = {
+ .mode = {
+ .name = "Epson L2F50113T00",
+ .refresh = 390,
+ .xres = 176,
+ .yres = 220,
+ .pixclock = 62500,
+ .left_margin = 3,
+ .right_margin = 2,
+ .upper_margin = 1,
+ .lower_margin = 0,
+ .hsync_len = 3,
+ .vsync_len = 2,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED,
+ },
+ .width = -1,
+ .height = -1,
+ .tim2 = TIM2_BCD | TIM2_IPC,
+ .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1),
+ .caps = CLCD_CAP_5551,
+ .bpp = 16,
+};
+
+static struct clcd_panel *panels[] = {
+ &vga,
+ &xvga,
+ &sanyo_tm38qv67a02a,
+ &sanyo_2_5_in,
+ &epson_l2f50113t00,
+};
+
+struct clcd_panel *versatile_clcd_get_panel(const char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(panels); i++)
+ if (strcmp(panels[i]->mode.name, name) == 0)
+ break;
+
+ if (i < ARRAY_SIZE(panels))
+ return panels[i];
+
+ pr_err("CLCD: couldn't get parameters for panel %s\n", name);
+
+ return NULL;
+}
+
+int versatile_clcd_setup_dma(struct clcd_fb *fb, unsigned long framesize)
+{
+ dma_addr_t dma;
+
+ fb->fb.screen_base = dma_alloc_writecombine(&fb->dev->dev, framesize,
+ &dma, GFP_KERNEL);
+ if (!fb->fb.screen_base) {
+ pr_err("CLCD: unable to map framebuffer\n");
+ return -ENOMEM;
+ }
+
+ fb->fb.fix.smem_start = dma;
+ fb->fb.fix.smem_len = framesize;
+
+ return 0;
+}
+
+int versatile_clcd_mmap_dma(struct clcd_fb *fb, struct vm_area_struct *vma)
+{
+ return dma_mmap_writecombine(&fb->dev->dev, vma,
+ fb->fb.screen_base,
+ fb->fb.fix.smem_start,
+ fb->fb.fix.smem_len);
+}
+
+void versatile_clcd_remove_dma(struct clcd_fb *fb)
+{
+ dma_free_writecombine(&fb->dev->dev, fb->fb.fix.smem_len,
+ fb->fb.screen_base, fb->fb.fix.smem_start);
+}
diff --git a/arch/arm/plat-versatile/fpga-irq.c b/arch/arm/plat-versatile/fpga-irq.c
new file mode 100644
index 000000000000..31d945d37e4f
--- /dev/null
+++ b/arch/arm/plat-versatile/fpga-irq.c
@@ -0,0 +1,72 @@
+/*
+ * Support for Versatile FPGA-based IRQ controllers
+ */
+#include <linux/irq.h>
+#include <linux/io.h>
+
+#include <asm/mach/irq.h>
+#include <plat/fpga-irq.h>
+
+#define IRQ_STATUS 0x00
+#define IRQ_RAW_STATUS 0x04
+#define IRQ_ENABLE_SET 0x08
+#define IRQ_ENABLE_CLEAR 0x0c
+
+static void fpga_irq_mask(struct irq_data *d)
+{
+ struct fpga_irq_data *f = irq_data_get_irq_chip_data(d);
+ u32 mask = 1 << (d->irq - f->irq_start);
+
+ writel(mask, f->base + IRQ_ENABLE_CLEAR);
+}
+
+static void fpga_irq_unmask(struct irq_data *d)
+{
+ struct fpga_irq_data *f = irq_data_get_irq_chip_data(d);
+ u32 mask = 1 << (d->irq - f->irq_start);
+
+ writel(mask, f->base + IRQ_ENABLE_SET);
+}
+
+static void fpga_irq_handle(unsigned int irq, struct irq_desc *desc)
+{
+ struct fpga_irq_data *f = get_irq_desc_data(desc);
+ u32 status = readl(f->base + IRQ_STATUS);
+
+ if (status == 0) {
+ do_bad_IRQ(irq, desc);
+ return;
+ }
+
+ do {
+ irq = ffs(status) - 1;
+ status &= ~(1 << irq);
+
+ generic_handle_irq(irq + f->irq_start);
+ } while (status);
+}
+
+void __init fpga_irq_init(int parent_irq, u32 valid, struct fpga_irq_data *f)
+{
+ unsigned int i;
+
+ f->chip.irq_ack = fpga_irq_mask;
+ f->chip.irq_mask = fpga_irq_mask;
+ f->chip.irq_unmask = fpga_irq_unmask;
+
+ if (parent_irq != -1) {
+ set_irq_data(parent_irq, f);
+ set_irq_chained_handler(parent_irq, fpga_irq_handle);
+ }
+
+ for (i = 0; i < 32; i++) {
+ if (valid & (1 << i)) {
+ unsigned int irq = f->irq_start + i;
+
+ set_irq_chip_data(irq, f);
+ set_irq_chip(irq, &f->chip);
+ set_irq_handler(irq, handle_level_irq);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ }
+ }
+}
diff --git a/arch/arm/mach-vexpress/headsmp.S b/arch/arm/plat-versatile/headsmp.S
index 7a3f0632947c..d397a1fb2f54 100644
--- a/arch/arm/mach-vexpress/headsmp.S
+++ b/arch/arm/plat-versatile/headsmp.S
@@ -1,5 +1,5 @@
/*
- * linux/arch/arm/mach-vexpress/headsmp.S
+ * linux/arch/arm/plat-versatile/headsmp.S
*
* Copyright (c) 2003 ARM Limited
* All Rights Reserved
@@ -14,11 +14,11 @@
__INIT
/*
- * Versatile Express specific entry point for secondary CPUs. This
- * provides a "holding pen" into which all secondary cores are held
+ * Realview/Versatile Express specific entry point for secondary CPUs.
+ * This provides a "holding pen" into which all secondary cores are held
* until we're ready for them to initialise.
*/
-ENTRY(vexpress_secondary_startup)
+ENTRY(versatile_secondary_startup)
mrc p15, 0, r0, c0, c0, 5
and r0, r0, #15
adr r4, 1f
diff --git a/arch/arm/plat-versatile/include/plat/clcd.h b/arch/arm/plat-versatile/include/plat/clcd.h
new file mode 100644
index 000000000000..6bb6a1d2019b
--- /dev/null
+++ b/arch/arm/plat-versatile/include/plat/clcd.h
@@ -0,0 +1,9 @@
+#ifndef PLAT_CLCD_H
+#define PLAT_CLCD_H
+
+struct clcd_panel *versatile_clcd_get_panel(const char *);
+int versatile_clcd_setup_dma(struct clcd_fb *, unsigned long);
+int versatile_clcd_mmap_dma(struct clcd_fb *, struct vm_area_struct *);
+void versatile_clcd_remove_dma(struct clcd_fb *);
+
+#endif
diff --git a/arch/arm/plat-versatile/include/plat/fpga-irq.h b/arch/arm/plat-versatile/include/plat/fpga-irq.h
new file mode 100644
index 000000000000..627fafd1e595
--- /dev/null
+++ b/arch/arm/plat-versatile/include/plat/fpga-irq.h
@@ -0,0 +1,12 @@
+#ifndef PLAT_FPGA_IRQ_H
+#define PLAT_FPGA_IRQ_H
+
+struct fpga_irq_data {
+ void __iomem *base;
+ unsigned int irq_start;
+ struct irq_chip chip;
+};
+
+void fpga_irq_init(int, u32, struct fpga_irq_data *);
+
+#endif
diff --git a/arch/arm/mach-vexpress/localtimer.c b/arch/arm/plat-versatile/localtimer.c
index c0e3a59a0bfc..0fb3961999b5 100644
--- a/arch/arm/mach-vexpress/localtimer.c
+++ b/arch/arm/plat-versatile/localtimer.c
@@ -1,5 +1,5 @@
/*
- * linux/arch/arm/mach-vexpress/localtimer.c
+ * linux/arch/arm/plat-versatile/localtimer.c
*
* Copyright (C) 2002 ARM Ltd.
* All Rights Reserved
@@ -19,8 +19,9 @@
/*
* Setup the local clock events for a CPU.
*/
-void __cpuinit local_timer_setup(struct clock_event_device *evt)
+int __cpuinit local_timer_setup(struct clock_event_device *evt)
{
evt->irq = IRQ_LOCALTIMER;
twd_timer_setup(evt);
+ return 0;
}
diff --git a/arch/arm/plat-versatile/platsmp.c b/arch/arm/plat-versatile/platsmp.c
new file mode 100644
index 000000000000..ba3d471d4bcf
--- /dev/null
+++ b/arch/arm/plat-versatile/platsmp.c
@@ -0,0 +1,104 @@
+/*
+ * linux/arch/arm/plat-versatile/platsmp.c
+ *
+ * Copyright (C) 2002 ARM Ltd.
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/jiffies.h>
+#include <linux/smp.h>
+
+#include <asm/cacheflush.h>
+
+/*
+ * control for which core is the next to come out of the secondary
+ * boot "holding pen"
+ */
+volatile int __cpuinitdata pen_release = -1;
+
+/*
+ * Write pen_release in a way that is guaranteed to be visible to all
+ * observers, irrespective of whether they're taking part in coherency
+ * or not. This is necessary for the hotplug code to work reliably.
+ */
+static void __cpuinit write_pen_release(int val)
+{
+ pen_release = val;
+ smp_wmb();
+ __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
+ outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
+}
+
+static DEFINE_SPINLOCK(boot_lock);
+
+void __cpuinit platform_secondary_init(unsigned int cpu)
+{
+ /*
+ * if any interrupts are already enabled for the primary
+ * core (e.g. timer irq), then they will not have been enabled
+ * for us: do so
+ */
+ gic_secondary_init(0);
+
+ /*
+ * let the primary processor know we're out of the
+ * pen, then head off into the C entry point
+ */
+ write_pen_release(-1);
+
+ /*
+ * Synchronise with the boot thread.
+ */
+ spin_lock(&boot_lock);
+ spin_unlock(&boot_lock);
+}
+
+int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+ unsigned long timeout;
+
+ /*
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+ spin_lock(&boot_lock);
+
+ /*
+ * This is really belt and braces; we hold unintended secondary
+ * CPUs in the holding pen until we're ready for them. However,
+ * since we haven't sent them a soft interrupt, they shouldn't
+ * be there.
+ */
+ write_pen_release(cpu);
+
+ /*
+ * Send the secondary CPU a soft interrupt, thereby causing
+ * the boot monitor to read the system wide flags register,
+ * and branch to the address found there.
+ */
+ smp_cross_call(cpumask_of(cpu), 1);
+
+ timeout = jiffies + (1 * HZ);
+ while (time_before(jiffies, timeout)) {
+ smp_rmb();
+ if (pen_release == -1)
+ break;
+
+ udelay(10);
+ }
+
+ /*
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+ spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+}
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 0797cb528b46..bbf3da012afd 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -153,7 +153,7 @@ static struct notifier_block vfp_notifier_block = {
* Raise a SIGFPE for the current process.
* sicode describes the signal being raised.
*/
-void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs)
+static void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs)
{
siginfo_t info;
@@ -489,8 +489,11 @@ void vfp_flush_hwstate(struct thread_info *thread)
/*
* VFP hardware can lose all context when a CPU goes offline.
- * Safely clear our held state when a CPU has been killed, and
- * re-enable access to VFP when the CPU comes back online.
+ * As we will be running in SMP mode with CPU hotplug, we will save the
+ * hardware state at every thread switch. We clear our held state when
+ * a CPU has been killed, indicating that the VFP hardware doesn't contain
+ * a threads VFP state. When a CPU starts up, we re-enable access to the
+ * VFP hardware.
*
* Both CPU_DYING and CPU_STARTING are called on the CPU which
* is being offlined/onlined.
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index cd2062fe0f61..7385a3ac2277 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -6,6 +6,7 @@ config AVR32
select HAVE_CLK
select HAVE_OPROFILE
select HAVE_KPROBES
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG
help
AVR32 is a high-performance 32-bit RISC microprocessor core,
designed for cost-sensitive embedded applications, with particular
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index e67c99945428..2747cde8c9a7 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -2048,6 +2048,8 @@ at32_add_device_ac97c(unsigned int id, struct ac97c_platform_data *data,
rx_dws->reg_width = DW_DMA_SLAVE_WIDTH_16BIT;
rx_dws->cfg_hi = DWC_CFGH_SRC_PER(3);
rx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL);
+ rx_dws->src_master = 0;
+ rx_dws->dst_master = 1;
}
/* Check if DMA slave interface for playback should be configured. */
@@ -2056,6 +2058,8 @@ at32_add_device_ac97c(unsigned int id, struct ac97c_platform_data *data,
tx_dws->reg_width = DW_DMA_SLAVE_WIDTH_16BIT;
tx_dws->cfg_hi = DWC_CFGH_DST_PER(4);
tx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL);
+ rx_dws->src_master = 0;
+ rx_dws->dst_master = 1;
}
if (platform_device_add_data(pdev, data,
@@ -2128,6 +2132,8 @@ at32_add_device_abdac(unsigned int id, struct atmel_abdac_pdata *data)
dws->reg_width = DW_DMA_SLAVE_WIDTH_32BIT;
dws->cfg_hi = DWC_CFGH_DST_PER(2);
dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL);
+ dws->src_master = 0;
+ dws->dst_master = 1;
if (platform_device_add_data(pdev, data,
sizeof(struct atmel_abdac_pdata)))
diff --git a/arch/avr32/mm/cache.c b/arch/avr32/mm/cache.c
index 24a74d1ca7d9..6a46ecd56cfd 100644
--- a/arch/avr32/mm/cache.c
+++ b/arch/avr32/mm/cache.c
@@ -113,7 +113,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
}
/*
- * This one is called from do_no_page(), do_swap_page() and install_page().
+ * This one is called from __do_fault() and do_swap_page().
*/
void flush_icache_page(struct vm_area_struct *vma, struct page *page)
{
diff --git a/arch/blackfin/kernel/time.c b/arch/blackfin/kernel/time.c
index c9113619029f..8d73724c0092 100644
--- a/arch/blackfin/kernel/time.c
+++ b/arch/blackfin/kernel/time.c
@@ -114,16 +114,14 @@ u32 arch_gettimeoffset(void)
/*
* timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "do_timer()" routine every clocktick
+ * as well as call the "xtime_update()" routine every clocktick
*/
#ifdef CONFIG_CORE_TIMER_IRQ_L1
__attribute__((l1_text))
#endif
irqreturn_t timer_interrupt(int irq, void *dummy)
{
- write_seqlock(&xtime_lock);
- do_timer(1);
- write_sequnlock(&xtime_lock);
+ xtime_update(1);
#ifdef CONFIG_IPIPE
update_root_process_times(get_irq_regs());
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index 4122678529c0..c40d07f708e8 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -136,7 +136,7 @@ SECTIONS
. = ALIGN(16);
INIT_DATA_SECTION(16)
- PERCPU(4)
+ PERCPU(32, 4)
.exit.data :
{
diff --git a/arch/cris/arch-v10/kernel/time.c b/arch/cris/arch-v10/kernel/time.c
index 00eb36f8debf..20c85b5dc7d0 100644
--- a/arch/cris/arch-v10/kernel/time.c
+++ b/arch/cris/arch-v10/kernel/time.c
@@ -140,7 +140,7 @@ stop_watchdog(void)
/*
* timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "do_timer()" routine every clocktick
+ * as well as call the "xtime_update()" routine every clocktick
*/
//static unsigned short myjiff; /* used by our debug routine print_timestamp */
@@ -176,7 +176,7 @@ timer_interrupt(int irq, void *dev_id)
/* call the real timer interrupt handler */
- do_timer(1);
+ xtime_update(1);
cris_do_profile(regs); /* Save profiling information */
return IRQ_HANDLED;
diff --git a/arch/cris/arch-v10/mm/init.c b/arch/cris/arch-v10/mm/init.c
index baa746ce4e74..e7f8066105aa 100644
--- a/arch/cris/arch-v10/mm/init.c
+++ b/arch/cris/arch-v10/mm/init.c
@@ -241,7 +241,7 @@ flush_etrax_cacherange(void *startadr, int length)
}
/* Due to a bug in Etrax100(LX) all versions, receiving DMA buffers
- * will occationally corrupt certain CPU writes if the DMA buffers
+ * will occasionally corrupt certain CPU writes if the DMA buffers
* happen to be hot in the cache.
*
* As a workaround, we have to flush the relevant parts of the cache
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c
index 84fed3b4b079..4c9e3e1ba5d1 100644
--- a/arch/cris/arch-v32/kernel/smp.c
+++ b/arch/cris/arch-v32/kernel/smp.c
@@ -26,7 +26,9 @@
#define FLUSH_ALL (void*)0xffffffff
/* Vector of locks used for various atomic operations */
-spinlock_t cris_atomic_locks[] = { [0 ... LOCK_COUNT - 1] = SPIN_LOCK_UNLOCKED};
+spinlock_t cris_atomic_locks[] = {
+ [0 ... LOCK_COUNT - 1] = __SPIN_LOCK_UNLOCKED(cris_atomic_locks)
+};
/* CPU masks */
cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
diff --git a/arch/cris/arch-v32/kernel/time.c b/arch/cris/arch-v32/kernel/time.c
index a545211e999d..bb978ede8985 100644
--- a/arch/cris/arch-v32/kernel/time.c
+++ b/arch/cris/arch-v32/kernel/time.c
@@ -183,7 +183,7 @@ void handle_watchdog_bite(struct pt_regs *regs)
/*
* timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "do_timer()" routine every clocktick.
+ * as well as call the "xtime_update()" routine every clocktick.
*/
extern void cris_do_profile(struct pt_regs *regs);
@@ -216,9 +216,7 @@ static inline irqreturn_t timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
/* Call the real timer interrupt handler */
- write_seqlock(&xtime_lock);
- do_timer(1);
- write_sequnlock(&xtime_lock);
+ xtime_update(1);
return IRQ_HANDLED;
}
diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S
index 442218980db0..728bbd9e7d4c 100644
--- a/arch/cris/kernel/vmlinux.lds.S
+++ b/arch/cris/kernel/vmlinux.lds.S
@@ -72,11 +72,6 @@ SECTIONS
INIT_TEXT_SECTION(PAGE_SIZE)
.init.data : { INIT_DATA }
.init.setup : { INIT_SETUP(16) }
-#ifdef CONFIG_ETRAX_ARCH_V32
- __start___param = .;
- __param : { *(__param) }
- __stop___param = .;
-#endif
.initcall.init : {
INIT_CALLS
}
@@ -107,7 +102,7 @@ SECTIONS
#endif
__vmlinux_end = .; /* Last address of the physical file. */
#ifdef CONFIG_ETRAX_ARCH_V32
- PERCPU(PAGE_SIZE)
+ PERCPU(32, PAGE_SIZE)
.init.ramfs : {
INIT_RAM_FS
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index 747499a1b31e..ccaf73189744 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -6,6 +6,7 @@ config FRV
select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select HAVE_GENERIC_HARDIRQS
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG
config ZONE_DMA
bool
diff --git a/arch/frv/kernel/time.c b/arch/frv/kernel/time.c
index 0ddbbae83cb2..b457de496b70 100644
--- a/arch/frv/kernel/time.c
+++ b/arch/frv/kernel/time.c
@@ -50,21 +50,13 @@ static struct irqaction timer_irq = {
/*
* timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "do_timer()" routine every clocktick
+ * as well as call the "xtime_update()" routine every clocktick
*/
static irqreturn_t timer_interrupt(int irq, void *dummy)
{
profile_tick(CPU_PROFILING);
- /*
- * Here we are in the timer irq handler. We just have irqs locally
- * disabled but we don't know if the timer_bh is running on the other
- * CPU. We need to avoid to SMP race with it. NOTE: we don't need
- * the irq version of write_lock because as just said we have irq
- * locally disabled. -arca
- */
- write_seqlock(&xtime_lock);
- do_timer(1);
+ xtime_update(1);
#ifdef CONFIG_HEARTBEAT
static unsigned short n;
@@ -72,8 +64,6 @@ static irqreturn_t timer_interrupt(int irq, void *dummy)
__set_LEDS(n);
#endif /* CONFIG_HEARTBEAT */
- write_sequnlock(&xtime_lock);
-
update_process_times(user_mode(get_irq_regs()));
return IRQ_HANDLED;
diff --git a/arch/frv/kernel/vmlinux.lds.S b/arch/frv/kernel/vmlinux.lds.S
index 8b973f3cc90e..0daae8af5787 100644
--- a/arch/frv/kernel/vmlinux.lds.S
+++ b/arch/frv/kernel/vmlinux.lds.S
@@ -37,7 +37,7 @@ SECTIONS
_einittext = .;
INIT_DATA_SECTION(8)
- PERCPU(4096)
+ PERCPU(L1_CACHE_BYTES, 4096)
. = ALIGN(PAGE_SIZE);
__init_end = .;
diff --git a/arch/h8300/kernel/time.c b/arch/h8300/kernel/time.c
index 165005aff9df..32263a138aa6 100644
--- a/arch/h8300/kernel/time.c
+++ b/arch/h8300/kernel/time.c
@@ -35,9 +35,7 @@ void h8300_timer_tick(void)
{
if (current->pid)
profile_tick(CPU_PROFILING);
- write_seqlock(&xtime_lock);
- do_timer(1);
- write_sequnlock(&xtime_lock);
+ xtime_update(1);
update_process_times(user_mode(get_irq_regs()));
}
diff --git a/arch/h8300/kernel/timer/timer8.c b/arch/h8300/kernel/timer/timer8.c
index 3946c0fa8374..7a1533fad47d 100644
--- a/arch/h8300/kernel/timer/timer8.c
+++ b/arch/h8300/kernel/timer/timer8.c
@@ -61,7 +61,7 @@
/*
* timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "do_timer()" routine every clocktick
+ * as well as call the "xtime_update()" routine every clocktick
*/
static irqreturn_t timer_interrupt(int irq, void *dev_id)
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index fcf3b437a2d9..bf9a93fc3ee9 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -26,6 +26,7 @@ config IA64
select GENERIC_IRQ_PROBE
select GENERIC_PENDING_IRQ if SMP
select IRQ_PER_CPU
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG
default y
help
The Itanium Processor Family is Intel's 64-bit successor to
diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig
index 3ded8fe62759..1d7bca0a396d 100644
--- a/arch/ia64/configs/generic_defconfig
+++ b/arch/ia64/configs/generic_defconfig
@@ -233,3 +233,4 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRC_T10DIF=y
+CONFIG_MISC_DEVICES=y
diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig
index 3a98b2dd58ac..b11fa880e4b6 100644
--- a/arch/ia64/configs/gensparse_defconfig
+++ b/arch/ia64/configs/gensparse_defconfig
@@ -208,3 +208,4 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_CRYPTO_MD5=y
+CONFIG_MISC_DEVICES=y
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index 837dc82a013e..a06dfb13d518 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -128,9 +128,9 @@ static inline const char *acpi_get_sysname (void)
int acpi_request_vector (u32 int_type);
int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
-/* routines for saving/restoring kernel state */
-extern int acpi_save_state_mem(void);
-extern void acpi_restore_state_mem(void);
+/* Low-level suspend routine. */
+extern int acpi_suspend_lowlevel(void);
+
extern unsigned long acpi_wakeup_address;
/*
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index a2e7368a0150..4336d080b241 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -12,6 +12,8 @@
#define ARCH_HAS_DMA_GET_REQUIRED_MASK
+#define DMA_ERROR_CODE 0
+
extern struct dma_map_ops *dma_ops;
extern struct ia64_machine_vector ia64_mv;
extern void set_iommu_machvec(void);
diff --git a/arch/ia64/include/asm/perfmon.h b/arch/ia64/include/asm/perfmon.h
index 7f3333dd00e4..d551183fee90 100644
--- a/arch/ia64/include/asm/perfmon.h
+++ b/arch/ia64/include/asm/perfmon.h
@@ -7,7 +7,7 @@
#define _ASM_IA64_PERFMON_H
/*
- * perfmon comamnds supported on all CPU models
+ * perfmon commands supported on all CPU models
*/
#define PFM_WRITE_PMCS 0x01
#define PFM_WRITE_PMDS 0x02
diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h
index 215d5454c7d3..3027e7516d85 100644
--- a/arch/ia64/include/asm/rwsem.h
+++ b/arch/ia64/include/asm/rwsem.h
@@ -25,20 +25,8 @@
#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
#endif
-#include <linux/list.h>
-#include <linux/spinlock.h>
-
#include <asm/intrinsics.h>
-/*
- * the semaphore definition
- */
-struct rw_semaphore {
- signed long count;
- spinlock_t wait_lock;
- struct list_head wait_list;
-};
-
#define RWSEM_UNLOCKED_VALUE __IA64_UL_CONST(0x0000000000000000)
#define RWSEM_ACTIVE_BIAS (1L)
#define RWSEM_ACTIVE_MASK (0xffffffffL)
@@ -46,26 +34,6 @@ struct rw_semaphore {
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-#define __RWSEM_INITIALIZER(name) \
- { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
- LIST_HEAD_INIT((name).wait_list) }
-
-#define DECLARE_RWSEM(name) \
- struct rw_semaphore name = __RWSEM_INITIALIZER(name)
-
-extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
-
-static inline void
-init_rwsem (struct rw_semaphore *sem)
-{
- sem->count = RWSEM_UNLOCKED_VALUE;
- spin_lock_init(&sem->wait_lock);
- INIT_LIST_HEAD(&sem->wait_list);
-}
-
/*
* lock for reading
*/
@@ -174,9 +142,4 @@ __downgrade_write (struct rw_semaphore *sem)
#define rwsem_atomic_add(delta, sem) atomic64_add(delta, (atomic64_t *)(&(sem)->count))
#define rwsem_atomic_update(delta, sem) atomic64_add_return(delta, (atomic64_t *)(&(sem)->count))
-static inline int rwsem_is_locked(struct rw_semaphore *sem)
-{
- return (sem->count != 0);
-}
-
#endif /* _ASM_IA64_RWSEM_H */
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 90ebceb899a0..a54d054ed4b0 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -1034,18 +1034,8 @@ int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
EXPORT_SYMBOL(acpi_unregister_ioapic);
/*
- * acpi_save_state_mem() - save kernel state
+ * acpi_suspend_lowlevel() - save kernel state and suspend.
*
* TBD when when IA64 starts to support suspend...
*/
-int acpi_save_state_mem(void) { return 0; }
-
-/*
- * acpi_restore_state()
- */
-void acpi_restore_state_mem(void) {}
-
-/*
- * do_suspend_lowlevel()
- */
-void do_suspend_lowlevel(void) {}
+int acpi_suspend_lowlevel(void) { return 0; }
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 1753f6a30d55..80d50b83d419 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -582,6 +582,8 @@ out:
/* Get the CPE error record and log it */
ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
+ local_irq_disable();
+
return IRQ_HANDLED;
}
@@ -1859,7 +1861,8 @@ ia64_mca_cpu_init(void *cpu_data)
data = mca_bootmem();
first_time = 0;
} else
- data = __get_free_pages(GFP_KERNEL, get_order(sz));
+ data = (void *)__get_free_pages(GFP_KERNEL,
+ get_order(sz));
if (!data)
panic("Could not allocate MCA memory for cpu %d\n",
cpu);
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 9702fa92489e..156ad803d5b7 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -190,19 +190,10 @@ timer_interrupt (int irq, void *dev_id)
new_itm += local_cpu_data->itm_delta;
- if (smp_processor_id() == time_keeper_id) {
- /*
- * Here we are in the timer irq handler. We have irqs locally
- * disabled, but we don't know if the timer_bh is running on
- * another CPU. We need to avoid to SMP race by acquiring the
- * xtime_lock.
- */
- write_seqlock(&xtime_lock);
- do_timer(1);
- local_cpu_data->itm_next = new_itm;
- write_sequnlock(&xtime_lock);
- } else
- local_cpu_data->itm_next = new_itm;
+ if (smp_processor_id() == time_keeper_id)
+ xtime_update(1);
+
+ local_cpu_data->itm_next = new_itm;
if (time_after(new_itm, ia64_get_itc()))
break;
@@ -222,7 +213,7 @@ skip_process_time_accounting:
* comfort, we increase the safety margin by
* intentionally dropping the next tick(s). We do NOT
* update itm.next because that would force us to call
- * do_timer() which in turn would let our clock run
+ * xtime_update() which in turn would let our clock run
* too fast (with the potentially devastating effect
* of losing monotony of time).
*/
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 5a4d044dcb1c..787de4a77d82 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -198,7 +198,7 @@ SECTIONS {
/* Per-cpu data: */
. = ALIGN(PERCPU_PAGE_SIZE);
- PERCPU_VADDR(PERCPU_ADDR, :percpu)
+ PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
__phys_per_cpu_start = __per_cpu_load;
/*
* ensure percpu data fits
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 70d224d4264c..8213efe1998c 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -662,6 +662,7 @@ again:
goto vcpu_run_fail;
srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ vcpu->mode = IN_GUEST_MODE;
kvm_guest_enter();
/*
@@ -683,6 +684,7 @@ again:
*/
barrier();
kvm_guest_exit();
+ vcpu->mode = OUTSIDE_GUEST_MODE;
preempt_enable();
idx = srcu_read_lock(&vcpu->kvm->srcu);
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index dbc4cbecb5ed..77db0b514fa4 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -592,7 +592,7 @@ void __cpuinit sn_cpu_init(void)
/*
* Don't check status. The SAL call is not supported on all PROMs
* but a failure is harmless.
- * Architechtuallly, cpu_init is always called twice on cpu 0. We
+ * Architecturally, cpu_init is always called twice on cpu 0. We
* should set cpu_number on cpu 0 once.
*/
if (cpuid == 0) {
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c
index 4d4536e3b6f3..9c271be9919a 100644
--- a/arch/ia64/sn/pci/tioca_provider.c
+++ b/arch/ia64/sn/pci/tioca_provider.c
@@ -509,7 +509,7 @@ tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
* use the GART mapped mode.
*/
static u64
-tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
+tioca_dma_map(struct pci_dev *pdev, unsigned long paddr, size_t byte_count, int dma_flags)
{
u64 mapaddr;
diff --git a/arch/ia64/xen/suspend.c b/arch/ia64/xen/suspend.c
index fd66b048c6fa..419c8620945a 100644
--- a/arch/ia64/xen/suspend.c
+++ b/arch/ia64/xen/suspend.c
@@ -37,19 +37,14 @@ xen_mm_unpin_all(void)
/* nothing */
}
-void xen_pre_device_suspend(void)
-{
- /* nothing */
-}
-
void
-xen_pre_suspend()
+xen_arch_pre_suspend()
{
/* nothing */
}
void
-xen_post_suspend(int suspend_cancelled)
+xen_arch_post_suspend(int suspend_cancelled)
{
if (suspend_cancelled)
return;
diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c
index c1c544513e8d..1f8244a78bee 100644
--- a/arch/ia64/xen/time.c
+++ b/arch/ia64/xen/time.c
@@ -139,14 +139,11 @@ consider_steal_time(unsigned long new_itm)
run_posix_cpu_timers(p);
delta_itm += local_cpu_data->itm_delta * (stolen + blocked);
- if (cpu == time_keeper_id) {
- write_seqlock(&xtime_lock);
- do_timer(stolen + blocked);
- local_cpu_data->itm_next = delta_itm + new_itm;
- write_sequnlock(&xtime_lock);
- } else {
- local_cpu_data->itm_next = delta_itm + new_itm;
- }
+ if (cpu == time_keeper_id)
+ xtime_update(stolen + blocked);
+
+ local_cpu_data->itm_next = delta_itm + new_itm;
+
per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen;
per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked;
}
diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c
index bda86820bffd..84dd04048db9 100644
--- a/arch/m32r/kernel/time.c
+++ b/arch/m32r/kernel/time.c
@@ -107,15 +107,14 @@ u32 arch_gettimeoffset(void)
/*
* timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "do_timer()" routine every clocktick
+ * as well as call the "xtime_update()" routine every clocktick
*/
static irqreturn_t timer_interrupt(int irq, void *dev_id)
{
#ifndef CONFIG_SMP
profile_tick(CPU_PROFILING);
#endif
- /* XXX FIXME. Uh, the xtime_lock should be held here, no? */
- do_timer(1);
+ xtime_update(1);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
diff --git a/arch/m32r/kernel/vmlinux.lds.S b/arch/m32r/kernel/vmlinux.lds.S
index 7da94eaa082b..c194d64cdbb9 100644
--- a/arch/m32r/kernel/vmlinux.lds.S
+++ b/arch/m32r/kernel/vmlinux.lds.S
@@ -53,7 +53,7 @@ SECTIONS
__init_begin = .;
INIT_TEXT_SECTION(PAGE_SIZE)
INIT_DATA_SECTION(16)
- PERCPU(PAGE_SIZE)
+ PERCPU(32, PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index bc9271b85759..4d6acf5d3e49 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -4,6 +4,7 @@ config M68K
select HAVE_AOUT
select HAVE_IDE
select GENERIC_ATOMIC64
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
config MMU
bool
@@ -18,11 +19,9 @@ config RWSEM_XCHGADD_ALGORITHM
config ARCH_HAS_ILOG2_U32
bool
- default n
config ARCH_HAS_ILOG2_U64
bool
- default n
config GENERIC_HWEIGHT
bool
@@ -242,6 +241,37 @@ config SUN3
If you don't want to compile a kernel exclusively for a Sun 3, say N.
+config NATFEAT
+ bool "ARAnyM emulator support"
+ depends on ATARI
+ help
+ This option enables support for ARAnyM native features, such as
+ access to a disk image as /dev/hda.
+
+config NFBLOCK
+ tristate "NatFeat block device support"
+ depends on BLOCK && NATFEAT
+ help
+ Say Y to include support for the ARAnyM NatFeat block device
+ which allows direct access to the hard drives without using
+ the hardware emulation.
+
+config NFCON
+ tristate "NatFeat console driver"
+ depends on NATFEAT
+ help
+ Say Y to include support for the ARAnyM NatFeat console driver
+ which allows the console output to be redirected to the stderr
+ output of ARAnyM.
+
+config NFETH
+ tristate "NatFeat Ethernet support"
+ depends on NET_ETHERNET && NATFEAT
+ help
+ Say Y to include support for the ARAnyM NatFeat network device
+ which will emulate a regular ethernet device while presenting an
+ ethertap device to the host system.
+
comment "Processor type"
config M68020
diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile
index b06a7e3cbcd6..b793163abc61 100644
--- a/arch/m68k/Makefile
+++ b/arch/m68k/Makefile
@@ -76,6 +76,7 @@ core-$(CONFIG_MVME16x) += arch/m68k/mvme16x/
core-$(CONFIG_BVME6000) += arch/m68k/bvme6000/
core-$(CONFIG_SUN3X) += arch/m68k/sun3x/ arch/m68k/sun3/
core-$(CONFIG_SUN3) += arch/m68k/sun3/ arch/m68k/sun3/prom/
+core-$(CONFIG_NATFEAT) += arch/m68k/emu/
core-$(CONFIG_M68040) += arch/m68k/fpsp040/
core-$(CONFIG_M68060) += arch/m68k/ifpsp060/
core-$(CONFIG_M68KFPU_EMU) += arch/m68k/math-emu/
diff --git a/arch/m68k/amiga/chipram.c b/arch/m68k/amiga/chipram.c
index 61df1d33c050..dd0447db1c90 100644
--- a/arch/m68k/amiga/chipram.c
+++ b/arch/m68k/amiga/chipram.c
@@ -33,10 +33,6 @@ void __init amiga_chip_init(void)
if (!AMIGAHW_PRESENT(CHIP_RAM))
return;
- /*
- * Remove the first 4 pages where PPC exception handlers will be located
- */
- amiga_chip_size -= 0x4000;
chipram_res.end = amiga_chip_size-1;
request_resource(&iomem_resource, &chipram_res);
diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c
index 9fe6fefb5e14..1edd95095cb4 100644
--- a/arch/m68k/bvme6000/config.c
+++ b/arch/m68k/bvme6000/config.c
@@ -45,8 +45,8 @@ extern int bvme6000_set_clock_mmss (unsigned long);
extern void bvme6000_reset (void);
void bvme6000_set_vectors (void);
-/* Save tick handler routine pointer, will point to do_timer() in
- * kernel/sched.c, called via bvme6000_process_int() */
+/* Save tick handler routine pointer, will point to xtime_update() in
+ * kernel/timer/timekeeping.c, called via bvme6000_process_int() */
static irq_handler_t tick_handler;
diff --git a/arch/m68k/emu/Makefile b/arch/m68k/emu/Makefile
new file mode 100644
index 000000000000..7dc201080308
--- /dev/null
+++ b/arch/m68k/emu/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for Linux arch/m68k/emu source directory
+#
+
+obj-y += natfeat.o
+
+obj-$(CONFIG_NFBLOCK) += nfblock.o
+obj-$(CONFIG_NFCON) += nfcon.o
+obj-$(CONFIG_NFETH) += nfeth.o
diff --git a/arch/m68k/emu/natfeat.c b/arch/m68k/emu/natfeat.c
new file mode 100644
index 000000000000..2291a7d69d49
--- /dev/null
+++ b/arch/m68k/emu/natfeat.c
@@ -0,0 +1,78 @@
+/*
+ * natfeat.c - ARAnyM hardware support via Native Features (natfeats)
+ *
+ * Copyright (c) 2005 Petr Stehlik of ARAnyM dev team
+ *
+ * Reworked for Linux by Roman Zippel <zippel@linux-m68k.org>
+ *
+ * This software may be used and distributed according to the terms of
+ * the GNU General Public License (GPL), incorporated herein by reference.
+ */
+
+#include <linux/types.h>
+#include <linux/console.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <asm/machdep.h>
+#include <asm/natfeat.h>
+
+asm("\n"
+" .global nf_get_id,nf_call\n"
+"nf_get_id:\n"
+" .short 0x7300\n"
+" rts\n"
+"nf_call:\n"
+" .short 0x7301\n"
+" rts\n"
+"1: moveq.l #0,%d0\n"
+" rts\n"
+" .section __ex_table,\"a\"\n"
+" .long nf_get_id,1b\n"
+" .long nf_call,1b\n"
+" .previous");
+EXPORT_SYMBOL_GPL(nf_get_id);
+EXPORT_SYMBOL_GPL(nf_call);
+
+void nfprint(const char *fmt, ...)
+{
+ static char buf[256];
+ va_list ap;
+ int n;
+
+ va_start(ap, fmt);
+ n = vsnprintf(buf, 256, fmt, ap);
+ nf_call(nf_get_id("NF_STDERR"), buf);
+ va_end(ap);
+}
+
+static void nf_poweroff(void)
+{
+ long id = nf_get_id("NF_SHUTDOWN");
+
+ if (id)
+ nf_call(id);
+}
+
+void nf_init(void)
+{
+ unsigned long id, version;
+ char buf[256];
+
+ id = nf_get_id("NF_VERSION");
+ if (!id)
+ return;
+ version = nf_call(id);
+
+ id = nf_get_id("NF_NAME");
+ if (!id)
+ return;
+ nf_call(id, buf, 256);
+ buf[255] = 0;
+
+ pr_info("NatFeats found (%s, %lu.%lu)\n", buf, version >> 16,
+ version & 0xffff);
+
+ mach_power_off = nf_poweroff;
+}
diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c
new file mode 100644
index 000000000000..48e50f8c1c7e
--- /dev/null
+++ b/arch/m68k/emu/nfblock.c
@@ -0,0 +1,195 @@
+/*
+ * ARAnyM block device driver
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/genhd.h>
+#include <linux/blkdev.h>
+#include <linux/hdreg.h>
+#include <linux/slab.h>
+
+#include <asm/natfeat.h>
+
+static long nfhd_id;
+
+enum {
+ /* emulation entry points */
+ NFHD_READ_WRITE = 10,
+ NFHD_GET_CAPACITY = 14,
+
+ /* skip ACSI devices */
+ NFHD_DEV_OFFSET = 8,
+};
+
+static inline s32 nfhd_read_write(u32 major, u32 minor, u32 rwflag, u32 recno,
+ u32 count, u32 buf)
+{
+ return nf_call(nfhd_id + NFHD_READ_WRITE, major, minor, rwflag, recno,
+ count, buf);
+}
+
+static inline s32 nfhd_get_capacity(u32 major, u32 minor, u32 *blocks,
+ u32 *blocksize)
+{
+ return nf_call(nfhd_id + NFHD_GET_CAPACITY, major, minor, blocks,
+ blocksize);
+}
+
+static LIST_HEAD(nfhd_list);
+
+static int major_num;
+module_param(major_num, int, 0);
+
+struct nfhd_device {
+ struct list_head list;
+ int id;
+ u32 blocks, bsize;
+ int bshift;
+ struct request_queue *queue;
+ struct gendisk *disk;
+};
+
+static int nfhd_make_request(struct request_queue *queue, struct bio *bio)
+{
+ struct nfhd_device *dev = queue->queuedata;
+ struct bio_vec *bvec;
+ int i, dir, len, shift;
+ sector_t sec = bio->bi_sector;
+
+ dir = bio_data_dir(bio);
+ shift = dev->bshift;
+ bio_for_each_segment(bvec, bio, i) {
+ len = bvec->bv_len;
+ len >>= 9;
+ nfhd_read_write(dev->id, 0, dir, sec >> shift, len >> shift,
+ bvec_to_phys(bvec));
+ sec += len;
+ }
+ bio_endio(bio, 0);
+ return 0;
+}
+
+static int nfhd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ struct nfhd_device *dev = bdev->bd_disk->private_data;
+
+ geo->cylinders = dev->blocks >> (6 - dev->bshift);
+ geo->heads = 4;
+ geo->sectors = 16;
+
+ return 0;
+}
+
+static const struct block_device_operations nfhd_ops = {
+ .owner = THIS_MODULE,
+ .getgeo = nfhd_getgeo,
+};
+
+static int __init nfhd_init_one(int id, u32 blocks, u32 bsize)
+{
+ struct nfhd_device *dev;
+ int dev_id = id - NFHD_DEV_OFFSET;
+
+ pr_info("nfhd%u: found device with %u blocks (%u bytes)\n", dev_id,
+ blocks, bsize);
+
+ if (bsize < 512 || (bsize & (bsize - 1))) {
+ pr_warn("nfhd%u: invalid block size\n", dev_id);
+ return -EINVAL;
+ }
+
+ dev = kmalloc(sizeof(struct nfhd_device), GFP_KERNEL);
+ if (!dev)
+ goto out;
+
+ dev->id = id;
+ dev->blocks = blocks;
+ dev->bsize = bsize;
+ dev->bshift = ffs(bsize) - 10;
+
+ dev->queue = blk_alloc_queue(GFP_KERNEL);
+ if (dev->queue == NULL)
+ goto free_dev;
+
+ dev->queue->queuedata = dev;
+ blk_queue_make_request(dev->queue, nfhd_make_request);
+ blk_queue_logical_block_size(dev->queue, bsize);
+
+ dev->disk = alloc_disk(16);
+ if (!dev->disk)
+ goto free_queue;
+
+ dev->disk->major = major_num;
+ dev->disk->first_minor = dev_id * 16;
+ dev->disk->fops = &nfhd_ops;
+ dev->disk->private_data = dev;
+ sprintf(dev->disk->disk_name, "nfhd%u", dev_id);
+ set_capacity(dev->disk, (sector_t)blocks * (bsize / 512));
+ dev->disk->queue = dev->queue;
+
+ add_disk(dev->disk);
+
+ list_add_tail(&dev->list, &nfhd_list);
+
+ return 0;
+
+free_queue:
+ blk_cleanup_queue(dev->queue);
+free_dev:
+ kfree(dev);
+out:
+ return -ENOMEM;
+}
+
+static int __init nfhd_init(void)
+{
+ u32 blocks, bsize;
+ int i;
+
+ nfhd_id = nf_get_id("XHDI");
+ if (!nfhd_id)
+ return -ENODEV;
+
+ major_num = register_blkdev(major_num, "nfhd");
+ if (major_num <= 0) {
+ pr_warn("nfhd: unable to get major number\n");
+ return major_num;
+ }
+
+ for (i = NFHD_DEV_OFFSET; i < 24; i++) {
+ if (nfhd_get_capacity(i, 0, &blocks, &bsize))
+ continue;
+ nfhd_init_one(i, blocks, bsize);
+ }
+
+ return 0;
+}
+
+static void __exit nfhd_exit(void)
+{
+ struct nfhd_device *dev, *next;
+
+ list_for_each_entry_safe(dev, next, &nfhd_list, list) {
+ list_del(&dev->list);
+ del_gendisk(dev->disk);
+ put_disk(dev->disk);
+ blk_cleanup_queue(dev->queue);
+ kfree(dev);
+ }
+ unregister_blkdev(major_num, "nfhd");
+}
+
+module_init(nfhd_init);
+module_exit(nfhd_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/arch/m68k/emu/nfcon.c b/arch/m68k/emu/nfcon.c
new file mode 100644
index 000000000000..ab20dc0ff63b
--- /dev/null
+++ b/arch/m68k/emu/nfcon.c
@@ -0,0 +1,162 @@
+/*
+ * ARAnyM console driver
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/uaccess.h>
+
+#include <asm/natfeat.h>
+
+static int stderr_id;
+static struct tty_driver *nfcon_tty_driver;
+
+static void nfputs(const char *str, unsigned int count)
+{
+ char buf[68];
+
+ buf[64] = 0;
+ while (count > 64) {
+ memcpy(buf, str, 64);
+ nf_call(stderr_id, buf);
+ str += 64;
+ count -= 64;
+ }
+ memcpy(buf, str, count);
+ buf[count] = 0;
+ nf_call(stderr_id, buf);
+}
+
+static void nfcon_write(struct console *con, const char *str,
+ unsigned int count)
+{
+ nfputs(str, count);
+}
+
+static struct tty_driver *nfcon_device(struct console *con, int *index)
+{
+ *index = 0;
+ return (con->flags & CON_ENABLED) ? nfcon_tty_driver : NULL;
+}
+
+static struct console nf_console = {
+ .name = "nfcon",
+ .write = nfcon_write,
+ .device = nfcon_device,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+
+static int nfcon_tty_open(struct tty_struct *tty, struct file *filp)
+{
+ return 0;
+}
+
+static void nfcon_tty_close(struct tty_struct *tty, struct file *filp)
+{
+}
+
+static int nfcon_tty_write(struct tty_struct *tty, const unsigned char *buf,
+ int count)
+{
+ nfputs(buf, count);
+ return count;
+}
+
+static int nfcon_tty_put_char(struct tty_struct *tty, unsigned char ch)
+{
+ char temp[2] = { ch, 0 };
+
+ nf_call(stderr_id, temp);
+ return 1;
+}
+
+static int nfcon_tty_write_room(struct tty_struct *tty)
+{
+ return 64;
+}
+
+static const struct tty_operations nfcon_tty_ops = {
+ .open = nfcon_tty_open,
+ .close = nfcon_tty_close,
+ .write = nfcon_tty_write,
+ .put_char = nfcon_tty_put_char,
+ .write_room = nfcon_tty_write_room,
+};
+
+#ifndef MODULE
+
+static int __init nf_debug_setup(char *arg)
+{
+ if (strcmp(arg, "nfcon"))
+ return 0;
+
+ stderr_id = nf_get_id("NF_STDERR");
+ if (stderr_id) {
+ nf_console.flags |= CON_ENABLED;
+ register_console(&nf_console);
+ }
+
+ return 0;
+}
+
+early_param("debug", nf_debug_setup);
+
+#endif /* !MODULE */
+
+static int __init nfcon_init(void)
+{
+ int res;
+
+ stderr_id = nf_get_id("NF_STDERR");
+ if (!stderr_id)
+ return -ENODEV;
+
+ nfcon_tty_driver = alloc_tty_driver(1);
+ if (!nfcon_tty_driver)
+ return -ENOMEM;
+
+ nfcon_tty_driver->owner = THIS_MODULE;
+ nfcon_tty_driver->driver_name = "nfcon";
+ nfcon_tty_driver->name = "nfcon";
+ nfcon_tty_driver->type = TTY_DRIVER_TYPE_SYSTEM;
+ nfcon_tty_driver->subtype = SYSTEM_TYPE_TTY;
+ nfcon_tty_driver->init_termios = tty_std_termios;
+ nfcon_tty_driver->flags = TTY_DRIVER_REAL_RAW;
+
+ tty_set_operations(nfcon_tty_driver, &nfcon_tty_ops);
+ res = tty_register_driver(nfcon_tty_driver);
+ if (res) {
+ pr_err("failed to register nfcon tty driver\n");
+ put_tty_driver(nfcon_tty_driver);
+ return res;
+ }
+
+ if (!(nf_console.flags & CON_ENABLED))
+ register_console(&nf_console);
+
+ return 0;
+}
+
+static void __exit nfcon_exit(void)
+{
+ unregister_console(&nf_console);
+ tty_unregister_driver(nfcon_tty_driver);
+ put_tty_driver(nfcon_tty_driver);
+}
+
+module_init(nfcon_init);
+module_exit(nfcon_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/arch/m68k/emu/nfeth.c b/arch/m68k/emu/nfeth.c
new file mode 100644
index 000000000000..8b6e201b2c20
--- /dev/null
+++ b/arch/m68k/emu/nfeth.c
@@ -0,0 +1,270 @@
+/*
+ * atari_nfeth.c - ARAnyM ethernet card driver for GNU/Linux
+ *
+ * Copyright (c) 2005 Milan Jurik, Petr Stehlik of ARAnyM dev team
+ *
+ * Based on ARAnyM driver for FreeMiNT written by Standa Opichal
+ *
+ * This software may be used and distributed according to the terms of
+ * the GNU General Public License (GPL), incorporated herein by reference.
+ */
+
+#define DRV_VERSION "0.3"
+#define DRV_RELDATE "10/12/2005"
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <asm/natfeat.h>
+#include <asm/virtconvert.h>
+
+enum {
+ GET_VERSION = 0,/* no parameters, return NFAPI_VERSION in d0 */
+ XIF_INTLEVEL, /* no parameters, return Interrupt Level in d0 */
+ XIF_IRQ, /* acknowledge interrupt from host */
+ XIF_START, /* (ethX), called on 'ifup', start receiver thread */
+ XIF_STOP, /* (ethX), called on 'ifdown', stop the thread */
+ XIF_READLENGTH, /* (ethX), return size of network data block to read */
+ XIF_READBLOCK, /* (ethX, buffer, size), read block of network data */
+ XIF_WRITEBLOCK, /* (ethX, buffer, size), write block of network data */
+ XIF_GET_MAC, /* (ethX, buffer, size), return MAC HW addr in buffer */
+ XIF_GET_IPHOST, /* (ethX, buffer, size), return IP address of host */
+ XIF_GET_IPATARI,/* (ethX, buffer, size), return IP address of atari */
+ XIF_GET_NETMASK /* (ethX, buffer, size), return IP netmask */
+};
+
+#define MAX_UNIT 8
+
+/* These identify the driver base version and may not be removed. */
+static const char version[] __devinitdata =
+ KERN_INFO KBUILD_MODNAME ".c:v" DRV_VERSION " " DRV_RELDATE
+ " S.Opichal, M.Jurik, P.Stehlik\n"
+ KERN_INFO " http://aranym.org/\n";
+
+MODULE_AUTHOR("Milan Jurik");
+MODULE_DESCRIPTION("Atari NFeth driver");
+MODULE_LICENSE("GPL");
+/*
+MODULE_PARM(nfeth_debug, "i");
+MODULE_PARM_DESC(nfeth_debug, "nfeth_debug level (1-2)");
+*/
+
+
+static long nfEtherID;
+static int nfEtherIRQ;
+
+struct nfeth_private {
+ int ethX;
+};
+
+static struct net_device *nfeth_dev[MAX_UNIT];
+
+static int nfeth_open(struct net_device *dev)
+{
+ struct nfeth_private *priv = netdev_priv(dev);
+ int res;
+
+ res = nf_call(nfEtherID + XIF_START, priv->ethX);
+ netdev_dbg(dev, "%s: %d\n", __func__, res);
+
+ /* Ready for data */
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int nfeth_stop(struct net_device *dev)
+{
+ struct nfeth_private *priv = netdev_priv(dev);
+
+ /* No more data */
+ netif_stop_queue(dev);
+
+ nf_call(nfEtherID + XIF_STOP, priv->ethX);
+
+ return 0;
+}
+
+/*
+ * Read a packet out of the adapter and pass it to the upper layers
+ */
+static inline void recv_packet(struct net_device *dev)
+{
+ struct nfeth_private *priv = netdev_priv(dev);
+ unsigned short pktlen;
+ struct sk_buff *skb;
+
+ /* read packet length (excluding 32 bit crc) */
+ pktlen = nf_call(nfEtherID + XIF_READLENGTH, priv->ethX);
+
+ netdev_dbg(dev, "%s: %u\n", __func__, pktlen);
+
+ if (!pktlen) {
+ netdev_dbg(dev, "%s: pktlen == 0\n", __func__);
+ dev->stats.rx_errors++;
+ return;
+ }
+
+ skb = dev_alloc_skb(pktlen + 2);
+ if (!skb) {
+ netdev_dbg(dev, "%s: out of mem (buf_alloc failed)\n",
+ __func__);
+ dev->stats.rx_dropped++;
+ return;
+ }
+
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 Byte align */
+ skb_put(skb, pktlen); /* make room */
+ nf_call(nfEtherID + XIF_READBLOCK, priv->ethX, virt_to_phys(skb->data),
+ pktlen);
+
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pktlen;
+
+ /* and enqueue packet */
+ return;
+}
+
+static irqreturn_t nfeth_interrupt(int irq, void *dev_id)
+{
+ int i, m, mask;
+
+ mask = nf_call(nfEtherID + XIF_IRQ, 0);
+ for (i = 0, m = 1; i < MAX_UNIT; m <<= 1, i++) {
+ if (mask & m && nfeth_dev[i]) {
+ recv_packet(nfeth_dev[i]);
+ nf_call(nfEtherID + XIF_IRQ, m);
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static int nfeth_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned int len;
+ char *data, shortpkt[ETH_ZLEN];
+ struct nfeth_private *priv = netdev_priv(dev);
+
+ data = skb->data;
+ len = skb->len;
+ if (len < ETH_ZLEN) {
+ memset(shortpkt, 0, ETH_ZLEN);
+ memcpy(shortpkt, data, len);
+ data = shortpkt;
+ len = ETH_ZLEN;
+ }
+
+ netdev_dbg(dev, "%s: send %u bytes\n", __func__, len);
+ nf_call(nfEtherID + XIF_WRITEBLOCK, priv->ethX, virt_to_phys(data),
+ len);
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += len;
+
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+static void nfeth_tx_timeout(struct net_device *dev)
+{
+ dev->stats.tx_errors++;
+ netif_wake_queue(dev);
+}
+
+static const struct net_device_ops nfeth_netdev_ops = {
+ .ndo_open = nfeth_open,
+ .ndo_stop = nfeth_stop,
+ .ndo_start_xmit = nfeth_xmit,
+ .ndo_tx_timeout = nfeth_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+static struct net_device * __init nfeth_probe(int unit)
+{
+ struct net_device *dev;
+ struct nfeth_private *priv;
+ char mac[ETH_ALEN], host_ip[32], local_ip[32];
+ int err;
+
+ if (!nf_call(nfEtherID + XIF_GET_MAC, unit, mac, ETH_ALEN))
+ return NULL;
+
+ dev = alloc_etherdev(sizeof(struct nfeth_private));
+ if (!dev)
+ return NULL;
+
+ dev->irq = nfEtherIRQ;
+ dev->netdev_ops = &nfeth_netdev_ops;
+
+ dev->flags |= NETIF_F_NO_CSUM;
+ memcpy(dev->dev_addr, mac, ETH_ALEN);
+
+ priv = netdev_priv(dev);
+ priv->ethX = unit;
+
+ err = register_netdev(dev);
+ if (err) {
+ free_netdev(dev);
+ return NULL;
+ }
+
+ nf_call(nfEtherID + XIF_GET_IPHOST, unit,
+ host_ip, sizeof(host_ip));
+ nf_call(nfEtherID + XIF_GET_IPATARI, unit,
+ local_ip, sizeof(local_ip));
+
+ netdev_info(dev, KBUILD_MODNAME " addr:%s (%s) HWaddr:%pM\n", host_ip,
+ local_ip, mac);
+
+ return dev;
+}
+
+static int __init nfeth_init(void)
+{
+ long ver;
+ int error, i;
+
+ nfEtherID = nf_get_id("ETHERNET");
+ if (!nfEtherID)
+ return -ENODEV;
+
+ ver = nf_call(nfEtherID + GET_VERSION);
+ pr_info("API %lu\n", ver);
+
+ nfEtherIRQ = nf_call(nfEtherID + XIF_INTLEVEL);
+ error = request_irq(nfEtherIRQ, nfeth_interrupt, IRQF_SHARED,
+ "eth emu", nfeth_interrupt);
+ if (error) {
+ pr_err("request for irq %d failed %d", nfEtherIRQ, error);
+ return error;
+ }
+
+ for (i = 0; i < MAX_UNIT; i++)
+ nfeth_dev[i] = nfeth_probe(i);
+
+ return 0;
+}
+
+static void __exit nfeth_cleanup(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_UNIT; i++) {
+ if (nfeth_dev[i]) {
+ unregister_netdev(nfeth_dev[0]);
+ free_netdev(nfeth_dev[0]);
+ }
+ }
+ free_irq(nfEtherIRQ, nfeth_interrupt);
+}
+
+module_init(nfeth_init);
+module_exit(nfeth_cleanup);
diff --git a/arch/m68k/include/asm/natfeat.h b/arch/m68k/include/asm/natfeat.h
new file mode 100644
index 000000000000..a3521b80c3b9
--- /dev/null
+++ b/arch/m68k/include/asm/natfeat.h
@@ -0,0 +1,22 @@
+/*
+ * ARAnyM hardware support via Native Features (natfeats)
+ *
+ * Copyright (c) 2005 Petr Stehlik of ARAnyM dev team
+ *
+ * This software may be used and distributed according to the terms of
+ * the GNU General Public License (GPL), incorporated herein by reference.
+ */
+
+#ifndef _NATFEAT_H
+#define _NATFEAT_H
+
+long nf_get_id(const char *feature_name);
+long nf_call(long id, ...);
+
+void nf_init(void);
+void nf_shutdown(void);
+
+void nfprint(const char *fmt, ...)
+ __attribute__ ((format (printf, 1, 2)));
+
+# endif /* _NATFEAT_H */
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h
index 278c69bad57a..f111b02b704f 100644
--- a/arch/m68k/include/asm/processor.h
+++ b/arch/m68k/include/asm/processor.h
@@ -113,6 +113,8 @@ static inline void start_thread(struct pt_regs * regs, unsigned long pc,
wrusp(usp);
}
+extern int handle_kernel_fault(struct pt_regs *regs);
+
#else
/*
diff --git a/arch/m68k/kernel/setup.c b/arch/m68k/kernel/setup.c
index b3963ab3d149..334d83640376 100644
--- a/arch/m68k/kernel/setup.c
+++ b/arch/m68k/kernel/setup.c
@@ -42,6 +42,7 @@
#ifdef CONFIG_SUN3X
#include <asm/dvma.h>
#endif
+#include <asm/natfeat.h>
#if !FPSTATESIZE || !NR_IRQS
#warning No CPU/platform type selected, your kernel will not work!
@@ -324,6 +325,10 @@ void __init setup_arch(char **cmdline_p)
panic("No configuration setup");
}
+#ifdef CONFIG_NATFEAT
+ nf_init();
+#endif
+
paging_init();
#ifndef CONFIG_SUN3
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index d12c3b0d9e4f..a0afc239304e 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -42,6 +42,7 @@
#include <linux/personality.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
+#include <linux/module.h>
#include <asm/setup.h>
#include <asm/uaccess.h>
@@ -51,7 +52,7 @@
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-const int frame_extra_sizes[16] = {
+static const int frame_extra_sizes[16] = {
[1] = -1, /* sizeof(((struct frame *)0)->un.fmt1), */
[2] = sizeof(((struct frame *)0)->un.fmt2),
[3] = sizeof(((struct frame *)0)->un.fmt3),
@@ -69,6 +70,27 @@ const int frame_extra_sizes[16] = {
[15] = -1, /* sizeof(((struct frame *)0)->un.fmtf), */
};
+int handle_kernel_fault(struct pt_regs *regs)
+{
+ const struct exception_table_entry *fixup;
+ struct pt_regs *tregs;
+
+ /* Are we prepared to handle this kernel fault? */
+ fixup = search_exception_tables(regs->pc);
+ if (!fixup)
+ return 0;
+
+ /* Create a new four word stack frame, discarding the old one. */
+ regs->stkadj = frame_extra_sizes[regs->format];
+ tregs = (struct pt_regs *)((long)regs + regs->stkadj);
+ tregs->vector = regs->vector;
+ tregs->format = 0;
+ tregs->pc = fixup->fixup;
+ tregs->sr = regs->sr;
+
+ return 1;
+}
+
/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
index 06438dac08ff..18b34ee5db3b 100644
--- a/arch/m68k/kernel/time.c
+++ b/arch/m68k/kernel/time.c
@@ -37,11 +37,11 @@ static inline int set_rtc_mmss(unsigned long nowtime)
/*
* timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "do_timer()" routine every clocktick
+ * as well as call the "xtime_update()" routine every clocktick
*/
static irqreturn_t timer_interrupt(int irq, void *dummy)
{
- do_timer(1);
+ xtime_update(1);
update_process_times(user_mode(get_irq_regs()));
profile_tick(CPU_PROFILING);
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
index ada4f4cca811..4022bbc28878 100644
--- a/arch/m68k/kernel/traps.c
+++ b/arch/m68k/kernel/traps.c
@@ -48,10 +48,7 @@ asmlinkage void nmihandler(void);
asmlinkage void fpu_emu(void);
#endif
-e_vector vectors[256] = {
- [VEC_BUSERR] = buserr,
- [VEC_SYS] = system_call,
-};
+e_vector vectors[256];
/* nmi handler for the Amiga */
asm(".text\n"
@@ -61,10 +58,11 @@ asm(".text\n"
/*
* this must be called very early as the kernel might
* use some instruction that are emulated on the 060
+ * and so we're prepared for early probe attempts (e.g. nf_init).
*/
void __init base_trap_init(void)
{
- if(MACH_IS_SUN3X) {
+ if (MACH_IS_SUN3X) {
extern e_vector *sun3x_prom_vbr;
__asm__ volatile ("movec %%vbr, %0" : "=r" (sun3x_prom_vbr));
@@ -79,6 +77,10 @@ void __init base_trap_init(void)
vectors[VEC_UNIMPII] = unimp_vec;
}
+
+ vectors[VEC_BUSERR] = buserr;
+ vectors[VEC_ILLEGAL] = trap;
+ vectors[VEC_SYS] = system_call;
}
void __init trap_init (void)
@@ -1055,9 +1057,11 @@ asmlinkage void trap_c(struct frame *fp)
siginfo_t info;
if (fp->ptregs.sr & PS_S) {
- if ((fp->ptregs.vector >> 2) == VEC_TRACE) {
- /* traced a trapping instruction */
- } else
+ if (fp->ptregs.vector == VEC_TRACE << 2) {
+ /* traced a trapping instruction on a 68020/30,
+ * real exception will be executed afterwards.
+ */
+ } else if (!handle_kernel_fault(&fp->ptregs))
bad_super_trap(fp);
return;
}
diff --git a/arch/m68k/math-emu/Makefile b/arch/m68k/math-emu/Makefile
index a0935bf98362..547c23c6e40e 100644
--- a/arch/m68k/math-emu/Makefile
+++ b/arch/m68k/math-emu/Makefile
@@ -2,8 +2,8 @@
# Makefile for the linux kernel.
#
-#EXTRA_AFLAGS += -DFPU_EMU_DEBUG
-#EXTRA_CFLAGS += -DFPU_EMU_DEBUG
+#asflags-y := -DFPU_EMU_DEBUG
+#ccflags-y := -DFPU_EMU_DEBUG
obj-y := fp_entry.o fp_scan.o fp_util.o fp_move.o fp_movem.o \
fp_cond.o fp_arith.o fp_log.o fp_trig.o
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index a96394a0333d..2db6099784ba 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -18,7 +18,6 @@
#include <asm/pgalloc.h>
extern void die_if_kernel(char *, struct pt_regs *, long);
-extern const int frame_extra_sizes[]; /* in m68k/kernel/signal.c */
int send_fault_sig(struct pt_regs *regs)
{
@@ -35,21 +34,8 @@ int send_fault_sig(struct pt_regs *regs)
force_sig_info(siginfo.si_signo,
&siginfo, current);
} else {
- const struct exception_table_entry *fixup;
-
- /* Are we prepared to handle this kernel fault? */
- if ((fixup = search_exception_tables(regs->pc))) {
- struct pt_regs *tregs;
- /* Create a new four word stack frame, discarding the old
- one. */
- regs->stkadj = frame_extra_sizes[regs->format];
- tregs = (struct pt_regs *)((ulong)regs + regs->stkadj);
- tregs->vector = regs->vector;
- tregs->format = 0;
- tregs->pc = fixup->fixup;
- tregs->sr = regs->sr;
+ if (handle_kernel_fault(regs))
return -1;
- }
//if (siginfo.si_signo == SIGBUS)
// force_sig_info(siginfo.si_signo,
diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
index 100baaa692a1..6cb9c3a9b6c9 100644
--- a/arch/m68k/mvme147/config.c
+++ b/arch/m68k/mvme147/config.c
@@ -46,8 +46,8 @@ extern void mvme147_reset (void);
static int bcd2int (unsigned char b);
-/* Save tick handler routine pointer, will point to do_timer() in
- * kernel/sched.c, called via mvme147_process_int() */
+/* Save tick handler routine pointer, will point to xtime_update() in
+ * kernel/time/timekeeping.c, called via mvme147_process_int() */
irq_handler_t tick_handler;
diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
index 11edf61cc2c4..0b28e2621653 100644
--- a/arch/m68k/mvme16x/config.c
+++ b/arch/m68k/mvme16x/config.c
@@ -51,8 +51,8 @@ extern void mvme16x_reset (void);
int bcd2int (unsigned char b);
-/* Save tick handler routine pointer, will point to do_timer() in
- * kernel/sched.c, called via mvme16x_process_int() */
+/* Save tick handler routine pointer, will point to xtime_update() in
+ * kernel/time/timekeeping.c, called via mvme16x_process_int() */
static irq_handler_t tick_handler;
diff --git a/arch/m68k/sun3/sun3ints.c b/arch/m68k/sun3/sun3ints.c
index 2d9e21bd313a..6464ad3ae3e6 100644
--- a/arch/m68k/sun3/sun3ints.c
+++ b/arch/m68k/sun3/sun3ints.c
@@ -66,7 +66,7 @@ static irqreturn_t sun3_int5(int irq, void *dev_id)
#ifdef CONFIG_SUN3
intersil_clear();
#endif
- do_timer(1);
+ xtime_update(1);
update_process_times(user_mode(get_irq_regs()));
if (!(kstat_cpu(0).irqs[irq] % 20))
sun3_leds(led_pattern[(kstat_cpu(0).irqs[irq] % 160) / 20]);
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig
index 8b9dacaa0f6e..04c7d348fbe7 100644
--- a/arch/m68knommu/Kconfig
+++ b/arch/m68knommu/Kconfig
@@ -3,6 +3,7 @@ config M68K
default y
select HAVE_IDE
select HAVE_GENERIC_HARDIRQS
+ select GENERIC_HARDIRQS_NO_DEPRECATED
config MMU
bool
diff --git a/arch/m68knommu/kernel/irq.c b/arch/m68knommu/kernel/irq.c
index c9cac36d4422..c7dd48f37bee 100644
--- a/arch/m68knommu/kernel/irq.c
+++ b/arch/m68knommu/kernel/irq.c
@@ -38,11 +38,13 @@ int show_interrupts(struct seq_file *p, void *v)
seq_puts(p, " CPU0\n");
if (irq < NR_IRQS) {
- ap = irq_desc[irq].action;
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ ap = desc->action;
if (ap) {
seq_printf(p, "%3d: ", irq);
seq_printf(p, "%10u ", kstat_irqs(irq));
- seq_printf(p, "%14s ", irq_desc[irq].chip->name);
+ seq_printf(p, "%14s ", get_irq_desc_chip(desc)->name);
seq_printf(p, "%s", ap->name);
for (ap = ap->next; ap; ap = ap->next)
diff --git a/arch/m68knommu/kernel/time.c b/arch/m68knommu/kernel/time.c
index d6ac2a43453c..6623909f70e6 100644
--- a/arch/m68knommu/kernel/time.c
+++ b/arch/m68knommu/kernel/time.c
@@ -36,7 +36,7 @@ static inline int set_rtc_mmss(unsigned long nowtime)
#ifndef CONFIG_GENERIC_CLOCKEVENTS
/*
* timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "do_timer()" routine every clocktick
+ * as well as call the "xtime_update()" routine every clocktick
*/
irqreturn_t arch_timer_interrupt(int irq, void *dummy)
{
@@ -44,11 +44,7 @@ irqreturn_t arch_timer_interrupt(int irq, void *dummy)
if (current->pid)
profile_tick(CPU_PROFILING);
- write_seqlock(&xtime_lock);
-
- do_timer(1);
-
- write_sequnlock(&xtime_lock);
+ xtime_update(1);
update_process_times(user_mode(get_irq_regs()));
diff --git a/arch/m68knommu/platform/5249/intc2.c b/arch/m68knommu/platform/5249/intc2.c
index c5151f846591..8f4b63e17366 100644
--- a/arch/m68knommu/platform/5249/intc2.c
+++ b/arch/m68knommu/platform/5249/intc2.c
@@ -17,32 +17,32 @@
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
-static void intc2_irq_gpio_mask(unsigned int irq)
+static void intc2_irq_gpio_mask(struct irq_data *d)
{
u32 imr;
imr = readl(MCF_MBAR2 + MCFSIM2_GPIOINTENABLE);
- imr &= ~(0x1 << (irq - MCFINTC2_GPIOIRQ0));
+ imr &= ~(0x1 << (d->irq - MCFINTC2_GPIOIRQ0));
writel(imr, MCF_MBAR2 + MCFSIM2_GPIOINTENABLE);
}
-static void intc2_irq_gpio_unmask(unsigned int irq)
+static void intc2_irq_gpio_unmask(struct irq_data *d)
{
u32 imr;
imr = readl(MCF_MBAR2 + MCFSIM2_GPIOINTENABLE);
- imr |= (0x1 << (irq - MCFINTC2_GPIOIRQ0));
+ imr |= (0x1 << (d->irq - MCFINTC2_GPIOIRQ0));
writel(imr, MCF_MBAR2 + MCFSIM2_GPIOINTENABLE);
}
-static void intc2_irq_gpio_ack(unsigned int irq)
+static void intc2_irq_gpio_ack(struct irq_data *d)
{
- writel(0x1 << (irq - MCFINTC2_GPIOIRQ0), MCF_MBAR2 + MCFSIM2_GPIOINTCLEAR);
+ writel(0x1 << (d->irq - MCFINTC2_GPIOIRQ0), MCF_MBAR2 + MCFSIM2_GPIOINTCLEAR);
}
static struct irq_chip intc2_irq_gpio_chip = {
.name = "CF-INTC2",
- .mask = intc2_irq_gpio_mask,
- .unmask = intc2_irq_gpio_unmask,
- .ack = intc2_irq_gpio_ack,
+ .irq_mask = intc2_irq_gpio_mask,
+ .irq_unmask = intc2_irq_gpio_unmask,
+ .irq_ack = intc2_irq_gpio_ack,
};
static int __init mcf_intc2_init(void)
@@ -51,7 +51,7 @@ static int __init mcf_intc2_init(void)
/* GPIO interrupt sources */
for (irq = MCFINTC2_GPIOIRQ0; (irq <= MCFINTC2_GPIOIRQ7); irq++) {
- irq_desc[irq].chip = &intc2_irq_gpio_chip;
+ set_irq_chip(irq, &intc2_irq_gpio_chip);
set_irq_handler(irq, handle_edge_irq);
}
diff --git a/arch/m68knommu/platform/5272/intc.c b/arch/m68knommu/platform/5272/intc.c
index 3cf681c177aa..969ff0a467c6 100644
--- a/arch/m68knommu/platform/5272/intc.c
+++ b/arch/m68knommu/platform/5272/intc.c
@@ -78,8 +78,10 @@ static struct irqmap intc_irqmap[MCFINT_VECMAX - MCFINT_VECBASE] = {
* an interrupt on this irq (for the external irqs). So this mask function
* is also an ack_mask function.
*/
-static void intc_irq_mask(unsigned int irq)
+static void intc_irq_mask(struct irq_data *d)
{
+ unsigned int irq = d->irq;
+
if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX)) {
u32 v;
irq -= MCFINT_VECBASE;
@@ -88,8 +90,10 @@ static void intc_irq_mask(unsigned int irq)
}
}
-static void intc_irq_unmask(unsigned int irq)
+static void intc_irq_unmask(struct irq_data *d)
{
+ unsigned int irq = d->irq;
+
if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX)) {
u32 v;
irq -= MCFINT_VECBASE;
@@ -98,8 +102,10 @@ static void intc_irq_unmask(unsigned int irq)
}
}
-static void intc_irq_ack(unsigned int irq)
+static void intc_irq_ack(struct irq_data *d)
{
+ unsigned int irq = d->irq;
+
/* Only external interrupts are acked */
if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX)) {
irq -= MCFINT_VECBASE;
@@ -113,8 +119,10 @@ static void intc_irq_ack(unsigned int irq)
}
}
-static int intc_irq_set_type(unsigned int irq, unsigned int type)
+static int intc_irq_set_type(struct irq_data *d, unsigned int type)
{
+ unsigned int irq = d->irq;
+
if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX)) {
irq -= MCFINT_VECBASE;
if (intc_irqmap[irq].ack) {
@@ -137,20 +145,17 @@ static int intc_irq_set_type(unsigned int irq, unsigned int type)
*/
static void intc_external_irq(unsigned int irq, struct irq_desc *desc)
{
- kstat_incr_irqs_this_cpu(irq, desc);
- desc->status |= IRQ_INPROGRESS;
- desc->chip->ack(irq);
- handle_IRQ_event(irq, desc->action);
- desc->status &= ~IRQ_INPROGRESS;
+ get_irq_desc_chip(desc)->irq_ack(&desc->irq_data);
+ handle_simple_irq(irq, desc);
}
static struct irq_chip intc_irq_chip = {
.name = "CF-INTC",
- .mask = intc_irq_mask,
- .unmask = intc_irq_unmask,
- .mask_ack = intc_irq_mask,
- .ack = intc_irq_ack,
- .set_type = intc_irq_set_type,
+ .irq_mask = intc_irq_mask,
+ .irq_unmask = intc_irq_unmask,
+ .irq_mask_ack = intc_irq_mask,
+ .irq_ack = intc_irq_ack,
+ .irq_set_type = intc_irq_set_type,
};
void __init init_IRQ(void)
diff --git a/arch/m68knommu/platform/68328/ints.c b/arch/m68knommu/platform/68328/ints.c
index 2a3af193ccd3..e5631831a200 100644
--- a/arch/m68knommu/platform/68328/ints.c
+++ b/arch/m68knommu/platform/68328/ints.c
@@ -135,20 +135,20 @@ void process_int(int vec, struct pt_regs *fp)
}
}
-static void intc_irq_unmask(unsigned int irq)
+static void intc_irq_unmask(struct irq_data *d)
{
- IMR &= ~(1<<irq);
+ IMR &= ~(1 << d->irq);
}
-static void intc_irq_mask(unsigned int irq)
+static void intc_irq_mask(struct irq_data *d)
{
- IMR |= (1<<irq);
+ IMR |= (1 << d->irq);
}
static struct irq_chip intc_irq_chip = {
.name = "M68K-INTC",
- .mask = intc_irq_mask,
- .unmask = intc_irq_unmask,
+ .irq_mask = intc_irq_mask,
+ .irq_unmask = intc_irq_unmask,
};
/*
diff --git a/arch/m68knommu/platform/68360/ints.c b/arch/m68knommu/platform/68360/ints.c
index a29041c1a8a0..8de3feb568c6 100644
--- a/arch/m68knommu/platform/68360/ints.c
+++ b/arch/m68knommu/platform/68360/ints.c
@@ -37,26 +37,26 @@ extern void *_ramvec[];
/* The number of spurious interrupts */
volatile unsigned int num_spurious;
-static void intc_irq_unmask(unsigned int irq)
+static void intc_irq_unmask(struct irq_data *d)
{
- pquicc->intr_cimr |= (1 << irq);
+ pquicc->intr_cimr |= (1 << d->irq);
}
-static void intc_irq_mask(unsigned int irq)
+static void intc_irq_mask(struct irq_data *d)
{
- pquicc->intr_cimr &= ~(1 << irq);
+ pquicc->intr_cimr &= ~(1 << d->irq);
}
-static void intc_irq_ack(unsigned int irq)
+static void intc_irq_ack(struct irq_data *d)
{
- pquicc->intr_cisr = (1 << irq);
+ pquicc->intr_cisr = (1 << d->irq);
}
static struct irq_chip intc_irq_chip = {
.name = "M68K-INTC",
- .mask = intc_irq_mask,
- .unmask = intc_irq_unmask,
- .ack = intc_irq_ack,
+ .irq_mask = intc_irq_mask,
+ .irq_unmask = intc_irq_unmask,
+ .irq_ack = intc_irq_ack,
};
/*
diff --git a/arch/m68knommu/platform/coldfire/intc-2.c b/arch/m68knommu/platform/coldfire/intc-2.c
index 85daa2b3001a..71e07fb69302 100644
--- a/arch/m68knommu/platform/coldfire/intc-2.c
+++ b/arch/m68knommu/platform/coldfire/intc-2.c
@@ -43,8 +43,10 @@ static u8 intc_intpri = MCFSIM_ICR_LEVEL(6) | MCFSIM_ICR_PRI(6);
#define NR_VECS 64
#endif
-static void intc_irq_mask(unsigned int irq)
+static void intc_irq_mask(struct irq_data *d)
{
+ unsigned int irq = d->irq;
+
if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECBASE + NR_VECS)) {
unsigned long imraddr;
u32 val, imrbit;
@@ -64,8 +66,10 @@ static void intc_irq_mask(unsigned int irq)
}
}
-static void intc_irq_unmask(unsigned int irq)
+static void intc_irq_unmask(struct irq_data *d)
{
+ unsigned int irq = d->irq;
+
if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECBASE + NR_VECS)) {
unsigned long intaddr, imraddr, icraddr;
u32 val, imrbit;
@@ -93,16 +97,16 @@ static void intc_irq_unmask(unsigned int irq)
}
}
-static int intc_irq_set_type(unsigned int irq, unsigned int type)
+static int intc_irq_set_type(struct irq_data *d, unsigned int type)
{
return 0;
}
static struct irq_chip intc_irq_chip = {
.name = "CF-INTC",
- .mask = intc_irq_mask,
- .unmask = intc_irq_unmask,
- .set_type = intc_irq_set_type,
+ .irq_mask = intc_irq_mask,
+ .irq_unmask = intc_irq_unmask,
+ .irq_set_type = intc_irq_set_type,
};
void __init init_IRQ(void)
diff --git a/arch/m68knommu/platform/coldfire/intc-simr.c b/arch/m68knommu/platform/coldfire/intc-simr.c
index bb7048636140..69a3b679730b 100644
--- a/arch/m68knommu/platform/coldfire/intc-simr.c
+++ b/arch/m68knommu/platform/coldfire/intc-simr.c
@@ -20,8 +20,10 @@
#include <asm/mcfsim.h>
#include <asm/traps.h>
-static void intc_irq_mask(unsigned int irq)
+static void intc_irq_mask(struct irq_data *d)
{
+ unsigned int irq = d->irq;
+
if (irq >= MCFINT_VECBASE) {
if (irq < MCFINT_VECBASE + 64)
__raw_writeb(irq - MCFINT_VECBASE, MCFINTC0_SIMR);
@@ -30,8 +32,10 @@ static void intc_irq_mask(unsigned int irq)
}
}
-static void intc_irq_unmask(unsigned int irq)
+static void intc_irq_unmask(struct irq_data *d)
{
+ unsigned int irq = d->irq;
+
if (irq >= MCFINT_VECBASE) {
if (irq < MCFINT_VECBASE + 64)
__raw_writeb(irq - MCFINT_VECBASE, MCFINTC0_CIMR);
@@ -40,8 +44,10 @@ static void intc_irq_unmask(unsigned int irq)
}
}
-static int intc_irq_set_type(unsigned int irq, unsigned int type)
+static int intc_irq_set_type(struct irq_data *d, unsigned int type)
{
+ unsigned int irq = d->irq;
+
if (irq >= MCFINT_VECBASE) {
if (irq < MCFINT_VECBASE + 64)
__raw_writeb(5, MCFINTC0_ICR0 + irq - MCFINT_VECBASE);
@@ -53,9 +59,9 @@ static int intc_irq_set_type(unsigned int irq, unsigned int type)
static struct irq_chip intc_irq_chip = {
.name = "CF-INTC",
- .mask = intc_irq_mask,
- .unmask = intc_irq_unmask,
- .set_type = intc_irq_set_type,
+ .irq_mask = intc_irq_mask,
+ .irq_unmask = intc_irq_unmask,
+ .irq_set_type = intc_irq_set_type,
};
void __init init_IRQ(void)
diff --git a/arch/m68knommu/platform/coldfire/intc.c b/arch/m68knommu/platform/coldfire/intc.c
index 60d2fcbe182b..d648081a63f6 100644
--- a/arch/m68knommu/platform/coldfire/intc.c
+++ b/arch/m68knommu/platform/coldfire/intc.c
@@ -111,28 +111,28 @@ void mcf_autovector(int irq)
#endif
}
-static void intc_irq_mask(unsigned int irq)
+static void intc_irq_mask(struct irq_data *d)
{
- if (mcf_irq2imr[irq])
- mcf_setimr(mcf_irq2imr[irq]);
+ if (mcf_irq2imr[d->irq])
+ mcf_setimr(mcf_irq2imr[d->irq]);
}
-static void intc_irq_unmask(unsigned int irq)
+static void intc_irq_unmask(struct irq_data *d)
{
- if (mcf_irq2imr[irq])
- mcf_clrimr(mcf_irq2imr[irq]);
+ if (mcf_irq2imr[d->irq])
+ mcf_clrimr(mcf_irq2imr[d->irq]);
}
-static int intc_irq_set_type(unsigned int irq, unsigned int type)
+static int intc_irq_set_type(struct irq_data *d, unsigned int type)
{
return 0;
}
static struct irq_chip intc_irq_chip = {
.name = "CF-INTC",
- .mask = intc_irq_mask,
- .unmask = intc_irq_unmask,
- .set_type = intc_irq_set_type,
+ .irq_mask = intc_irq_mask,
+ .irq_unmask = intc_irq_unmask,
+ .irq_set_type = intc_irq_set_type,
};
void __init init_IRQ(void)
diff --git a/arch/microblaze/include/asm/pci-bridge.h b/arch/microblaze/include/asm/pci-bridge.h
index 0c68764ab547..10717669e0c2 100644
--- a/arch/microblaze/include/asm/pci-bridge.h
+++ b/arch/microblaze/include/asm/pci-bridge.h
@@ -104,11 +104,22 @@ struct pci_controller {
int global_number; /* PCI domain number */
};
+#ifdef CONFIG_PCI
static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
{
return bus->sysdata;
}
+static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
+{
+ struct pci_controller *host;
+
+ if (bus->self)
+ return pci_device_to_OF_node(bus->self);
+ host = pci_bus_to_host(bus);
+ return host ? host->dn : NULL;
+}
+
static inline int isa_vaddr_is_ioport(void __iomem *address)
{
/* No specific ISA handling on ppc32 at this stage, it
@@ -116,6 +127,7 @@ static inline int isa_vaddr_is_ioport(void __iomem *address)
*/
return 0;
}
+#endif /* CONFIG_PCI */
/* These are used for config access before all the PCI probing
has been done. */
diff --git a/arch/microblaze/include/asm/prom.h b/arch/microblaze/include/asm/prom.h
index 2e72af078b05..d0890d36ef61 100644
--- a/arch/microblaze/include/asm/prom.h
+++ b/arch/microblaze/include/asm/prom.h
@@ -64,21 +64,6 @@ extern void kdump_move_device_tree(void);
/* CPU OF node matching */
struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
-/**
- * of_irq_map_pci - Resolve the interrupt for a PCI device
- * @pdev: the device whose interrupt is to be resolved
- * @out_irq: structure of_irq filled by this function
- *
- * This function resolves the PCI interrupt for a given PCI device. If a
- * device-node exists for a given pci_dev, it will use normal OF tree
- * walking. If not, it will implement standard swizzling and walk up the
- * PCI tree until an device-node is found, at which point it will finish
- * resolving using the OF tree walking.
- */
-struct pci_dev;
-struct of_irq;
-extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
-
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/microblaze/kernel/prom_parse.c b/arch/microblaze/kernel/prom_parse.c
index 9ae24f4b882b..47187cc2cf00 100644
--- a/arch/microblaze/kernel/prom_parse.c
+++ b/arch/microblaze/kernel/prom_parse.c
@@ -2,88 +2,11 @@
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/pci_regs.h>
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/etherdevice.h>
#include <linux/of_address.h>
#include <asm/prom.h>
-#include <asm/pci-bridge.h>
-
-#ifdef CONFIG_PCI
-int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
-{
- struct device_node *dn, *ppnode;
- struct pci_dev *ppdev;
- u32 lspec;
- u32 laddr[3];
- u8 pin;
- int rc;
-
- /* Check if we have a device node, if yes, fallback to standard OF
- * parsing
- */
- dn = pci_device_to_OF_node(pdev);
- if (dn)
- return of_irq_map_one(dn, 0, out_irq);
-
- /* Ok, we don't, time to have fun. Let's start by building up an
- * interrupt spec. we assume #interrupt-cells is 1, which is standard
- * for PCI. If you do different, then don't use that routine.
- */
- rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
- if (rc != 0)
- return rc;
- /* No pin, exit */
- if (pin == 0)
- return -ENODEV;
-
- /* Now we walk up the PCI tree */
- lspec = pin;
- for (;;) {
- /* Get the pci_dev of our parent */
- ppdev = pdev->bus->self;
-
- /* Ouch, it's a host bridge... */
- if (ppdev == NULL) {
- struct pci_controller *host;
- host = pci_bus_to_host(pdev->bus);
- ppnode = host ? host->dn : NULL;
- /* No node for host bridge ? give up */
- if (ppnode == NULL)
- return -EINVAL;
- } else
- /* We found a P2P bridge, check if it has a node */
- ppnode = pci_device_to_OF_node(ppdev);
-
- /* Ok, we have found a parent with a device-node, hand over to
- * the OF parsing code.
- * We build a unit address from the linux device to be used for
- * resolution. Note that we use the linux bus number which may
- * not match your firmware bus numbering.
- * Fortunately, in most cases, interrupt-map-mask doesn't
- * include the bus number as part of the matching.
- * You should still be careful about that though if you intend
- * to rely on this function (you ship a firmware that doesn't
- * create device nodes for all PCI devices).
- */
- if (ppnode)
- break;
-
- /* We can only get here if we hit a P2P bridge with no node,
- * let's do standard swizzling and try again
- */
- lspec = pci_swizzle_interrupt_pin(pdev, lspec);
- pdev = ppdev;
- }
-
- laddr[0] = (pdev->bus->number << 16)
- | (pdev->devfn << 8);
- laddr[1] = laddr[2] = 0;
- return of_irq_map_raw(ppnode, &lspec, 1, laddr, out_irq);
-}
-EXPORT_SYMBOL_GPL(of_irq_map_pci);
-#endif /* CONFIG_PCI */
void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
unsigned long *busno, unsigned long *phys, unsigned long *size)
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index e363615d6798..1e01a1253631 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -29,6 +29,7 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_pci.h>
#include <asm/processor.h>
#include <asm/io.h>
diff --git a/arch/microblaze/pci/pci_32.c b/arch/microblaze/pci/pci_32.c
index 3c3d808d7ce0..92728a6cfd80 100644
--- a/arch/microblaze/pci/pci_32.c
+++ b/arch/microblaze/pci/pci_32.c
@@ -332,6 +332,7 @@ static void __devinit pcibios_scan_phb(struct pci_controller *hose)
hose->global_number);
return;
}
+ bus.dev->of_node = of_node_get(node);
bus->secondary = hose->first_busno;
hose->bus = bus;
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index f5ecc0566bc2..6ee7f7286bf0 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -854,6 +854,9 @@ config GPIO_TXX9
config CFE
bool
+config ARCH_DMA_ADDR_T_64BIT
+ def_bool (HIGHMEM && 64BIT_PHYS_ADDR) || 64BIT
+
config DMA_COHERENT
bool
diff --git a/arch/mips/include/asm/errno.h b/arch/mips/include/asm/errno.h
index a0efc73819e4..6dcd3583ed04 100644
--- a/arch/mips/include/asm/errno.h
+++ b/arch/mips/include/asm/errno.h
@@ -121,6 +121,8 @@
#define ERFKILL 167 /* Operation not possible due to RF-kill */
+#define EHWPOISON 168 /* Memory page has hardware error */
+
#define EDQUOT 1133 /* Quota exceeded */
#ifdef __KERNEL__
diff --git a/arch/mips/include/asm/mach-jz4740/platform.h b/arch/mips/include/asm/mach-jz4740/platform.h
index 8987a76e9676..564ab81d6cdc 100644
--- a/arch/mips/include/asm/mach-jz4740/platform.h
+++ b/arch/mips/include/asm/mach-jz4740/platform.h
@@ -30,6 +30,7 @@ extern struct platform_device jz4740_i2s_device;
extern struct platform_device jz4740_pcm_device;
extern struct platform_device jz4740_codec_device;
extern struct platform_device jz4740_adc_device;
+extern struct platform_device jz4740_wdt_device;
void jz4740_serial_device_register(void);
diff --git a/arch/mips/jz4740/platform.c b/arch/mips/jz4740/platform.c
index 1cc9e544d16b..10929e2bc6d8 100644
--- a/arch/mips/jz4740/platform.c
+++ b/arch/mips/jz4740/platform.c
@@ -289,3 +289,19 @@ void jz4740_serial_device_register(void)
platform_device_register(&jz4740_uart_device);
}
+
+/* Watchdog */
+static struct resource jz4740_wdt_resources[] = {
+ {
+ .start = JZ4740_WDT_BASE_ADDR,
+ .end = JZ4740_WDT_BASE_ADDR + 0x10 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device jz4740_wdt_device = {
+ .name = "jz4740-wdt",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(jz4740_wdt_resources),
+ .resource = jz4740_wdt_resources,
+};
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 570607b376b5..832afbb87588 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -115,7 +115,7 @@ SECTIONS
EXIT_DATA
}
- PERCPU(PAGE_SIZE)
+ PERCPU(1 << CONFIG_MIPS_L1_CACHE_SHIFT, PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 6a1fdfef8fde..ab52b7cf3b6b 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -148,9 +148,9 @@ struct {
spinlock_t tc_list_lock;
struct list_head tc_list; /* Thread contexts */
} vpecontrol = {
- .vpe_list_lock = SPIN_LOCK_UNLOCKED,
+ .vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock),
.vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list),
- .tc_list_lock = SPIN_LOCK_UNLOCKED,
+ .tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock),
.tc_list = LIST_HEAD_INIT(vpecontrol.tc_list)
};
diff --git a/arch/mn10300/kernel/time.c b/arch/mn10300/kernel/time.c
index 75da468090b9..5b955000626d 100644
--- a/arch/mn10300/kernel/time.c
+++ b/arch/mn10300/kernel/time.c
@@ -104,8 +104,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
unsigned tsc, elapse;
irqreturn_t ret;
- write_seqlock(&xtime_lock);
-
while (tsc = get_cycles(),
elapse = tsc - mn10300_last_tsc, /* time elapsed since last
* tick */
@@ -114,11 +112,9 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
mn10300_last_tsc += MN10300_TSC_PER_HZ;
/* advance the kernel's time tracking system */
- do_timer(1);
+ xtime_update(1);
}
- write_sequnlock(&xtime_lock);
-
ret = local_timer_interrupt();
#ifdef CONFIG_SMP
send_IPI_allbutself(LOCAL_TIMER_IPI);
diff --git a/arch/mn10300/kernel/vmlinux.lds.S b/arch/mn10300/kernel/vmlinux.lds.S
index febbeee7f2f5..968bcd2cb022 100644
--- a/arch/mn10300/kernel/vmlinux.lds.S
+++ b/arch/mn10300/kernel/vmlinux.lds.S
@@ -70,7 +70,7 @@ SECTIONS
.exit.text : { EXIT_TEXT; }
.exit.data : { EXIT_DATA; }
- PERCPU(PAGE_SIZE)
+ PERCPU(32, PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index fed2946f7335..2e3d62b2b150 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -15,6 +15,8 @@ config PARISC
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_PROBE
select IRQ_PER_CPU
+ select GENERIC_HARDIRQS_NO_DEPRECATED
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG
help
The PA-RISC microprocessor is designed by Hewlett-Packard and used
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index f388a85bba11..d18328b3f938 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -26,8 +26,6 @@ void flush_user_dcache_range_asm(unsigned long, unsigned long);
void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
void flush_kernel_dcache_page_asm(void *);
void flush_kernel_icache_page(void *);
-void flush_user_dcache_page(unsigned long);
-void flush_user_icache_page(unsigned long);
void flush_user_dcache_range(unsigned long, unsigned long);
void flush_user_icache_range(unsigned long, unsigned long);
@@ -37,6 +35,13 @@ void flush_cache_all_local(void);
void flush_cache_all(void);
void flush_cache_mm(struct mm_struct *mm);
+#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
+void flush_kernel_dcache_page_addr(void *addr);
+static inline void flush_kernel_dcache_page(struct page *page)
+{
+ flush_kernel_dcache_page_addr(page_address(page));
+}
+
#define flush_kernel_dcache_range(start,size) \
flush_kernel_dcache_range_asm((start), (start)+(size));
/* vmap range flushes and invalidates. Architecturally, we don't need
@@ -50,6 +55,16 @@ static inline void flush_kernel_vmap_range(void *vaddr, int size)
}
static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
{
+ unsigned long start = (unsigned long)vaddr;
+ void *cursor = vaddr;
+
+ for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) {
+ struct page *page = vmalloc_to_page(cursor);
+
+ if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
+ flush_kernel_dcache_page(page);
+ }
+ flush_kernel_dcache_range_asm(start, start + size);
}
#define flush_cache_vmap(start, end) flush_cache_all()
@@ -90,19 +105,15 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
+/* defined in pacache.S exported in cache.c used by flush_anon_page */
+void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
+
#define ARCH_HAS_FLUSH_ANON_PAGE
static inline void
flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
{
if (PageAnon(page))
- flush_user_dcache_page(vmaddr);
-}
-
-#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
-void flush_kernel_dcache_page_addr(void *addr);
-static inline void flush_kernel_dcache_page(struct page *page)
-{
- flush_kernel_dcache_page_addr(page_address(page));
+ flush_dcache_page_asm(page_to_phys(page), vmaddr);
}
#ifdef CONFIG_DEBUG_RODATA
diff --git a/arch/parisc/include/asm/errno.h b/arch/parisc/include/asm/errno.h
index 9992abdd782d..135ad6047e51 100644
--- a/arch/parisc/include/asm/errno.h
+++ b/arch/parisc/include/asm/errno.h
@@ -122,4 +122,6 @@
#define ERFKILL 256 /* Operation not possible due to RF-kill */
+#define EHWPOISON 257 /* Memory page has hardware error */
+
#endif
diff --git a/arch/parisc/include/asm/irq.h b/arch/parisc/include/asm/irq.h
index c67dccf2e31f..1073599a7be9 100644
--- a/arch/parisc/include/asm/irq.h
+++ b/arch/parisc/include/asm/irq.h
@@ -32,15 +32,10 @@ static __inline__ int irq_canonicalize(int irq)
}
struct irq_chip;
+struct irq_data;
-/*
- * Some useful "we don't have to do anything here" handlers. Should
- * probably be provided by the generic code.
- */
-void no_ack_irq(unsigned int irq);
-void no_end_irq(unsigned int irq);
-void cpu_ack_irq(unsigned int irq);
-void cpu_eoi_irq(unsigned int irq);
+void cpu_ack_irq(struct irq_data *d);
+void cpu_eoi_irq(struct irq_data *d);
extern int txn_alloc_irq(unsigned int nbits);
extern int txn_claim_irq(int);
@@ -49,7 +44,7 @@ extern unsigned long txn_alloc_addr(unsigned int);
extern unsigned long txn_affinity_addr(unsigned int irq, int cpu);
extern int cpu_claim_irq(unsigned int irq, struct irq_chip *, void *);
-extern int cpu_check_affinity(unsigned int irq, const struct cpumask *dest);
+extern int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest);
/* soft power switch support (power.c) */
extern struct tasklet_struct power_tasklet;
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 6f1f65d3c0ef..5d7b8ce9fdf3 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -138,8 +138,7 @@ struct vm_area_struct;
#define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */
#define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */
#define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */
-#define _PAGE_FLUSH_BIT 21 /* (0x400) Software: translation valid */
- /* for cache flushing only */
+/* bit 21 was formerly the FLUSH bit but is now unused */
#define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */
/* N.B. The bits are defined in terms of a 32 bit word above, so the */
@@ -173,7 +172,6 @@ struct vm_area_struct;
#define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT))
#define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT))
#define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT))
-#define _PAGE_FLUSH (1 << xlate_pabit(_PAGE_FLUSH_BIT))
#define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT))
#define _PAGE_FILE (1 << xlate_pabit(_PAGE_FILE_BIT))
@@ -213,7 +211,6 @@ struct vm_area_struct;
#define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
#define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
#define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ)
-#define PAGE_FLUSH __pgprot(_PAGE_FLUSH)
/*
@@ -261,7 +258,7 @@ extern unsigned long *empty_zero_page;
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-#define pte_none(x) ((pte_val(x) == 0) || (pte_val(x) & _PAGE_FLUSH))
+#define pte_none(x) (pte_val(x) == 0)
#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
#define pte_clear(mm,addr,xp) do { pte_val(*(xp)) = 0; } while (0)
@@ -444,13 +441,10 @@ struct mm_struct;
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
pte_t old_pte;
- pte_t pte;
spin_lock(&pa_dbit_lock);
- pte = old_pte = *ptep;
- pte_val(pte) &= ~_PAGE_PRESENT;
- pte_val(pte) |= _PAGE_FLUSH;
- set_pte_at(mm,addr,ptep,pte);
+ old_pte = *ptep;
+ pte_clear(mm,addr,ptep);
spin_unlock(&pa_dbit_lock);
return old_pte;
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index d054f3da3ff5..3f11331c2775 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -27,12 +27,17 @@
#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/sections.h>
+#include <asm/shmparam.h>
int split_tlb __read_mostly;
int dcache_stride __read_mostly;
int icache_stride __read_mostly;
EXPORT_SYMBOL(dcache_stride);
+void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
+EXPORT_SYMBOL(flush_dcache_page_asm);
+void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
+
/* On some machines (e.g. ones with the Merced bus), there can be
* only a single PxTLB broadcast at a time; this must be guaranteed
@@ -259,81 +264,13 @@ void disable_sr_hashing(void)
panic("SpaceID hashing is still on!\n");
}
-/* Simple function to work out if we have an existing address translation
- * for a user space vma. */
-static inline int translation_exists(struct vm_area_struct *vma,
- unsigned long addr, unsigned long pfn)
-{
- pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
- pmd_t *pmd;
- pte_t pte;
-
- if(pgd_none(*pgd))
- return 0;
-
- pmd = pmd_offset(pgd, addr);
- if(pmd_none(*pmd) || pmd_bad(*pmd))
- return 0;
-
- /* We cannot take the pte lock here: flush_cache_page is usually
- * called with pte lock already held. Whereas flush_dcache_page
- * takes flush_dcache_mmap_lock, which is lower in the hierarchy:
- * the vma itself is secure, but the pte might come or go racily.
- */
- pte = *pte_offset_map(pmd, addr);
- /* But pte_unmap() does nothing on this architecture */
-
- /* Filter out coincidental file entries and swap entries */
- if (!(pte_val(pte) & (_PAGE_FLUSH|_PAGE_PRESENT)))
- return 0;
-
- return pte_pfn(pte) == pfn;
-}
-
-/* Private function to flush a page from the cache of a non-current
- * process. cr25 contains the Page Directory of the current user
- * process; we're going to hijack both it and the user space %sr3 to
- * temporarily make the non-current process current. We have to do
- * this because cache flushing may cause a non-access tlb miss which
- * the handlers have to fill in from the pgd of the non-current
- * process. */
static inline void
-flush_user_cache_page_non_current(struct vm_area_struct *vma,
- unsigned long vmaddr)
+__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
+ unsigned long physaddr)
{
- /* save the current process space and pgd */
- unsigned long space = mfsp(3), pgd = mfctl(25);
-
- /* we don't mind taking interrupts since they may not
- * do anything with user space, but we can't
- * be preempted here */
- preempt_disable();
-
- /* make us current */
- mtctl(__pa(vma->vm_mm->pgd), 25);
- mtsp(vma->vm_mm->context, 3);
-
- flush_user_dcache_page(vmaddr);
- if(vma->vm_flags & VM_EXEC)
- flush_user_icache_page(vmaddr);
-
- /* put the old current process back */
- mtsp(space, 3);
- mtctl(pgd, 25);
- preempt_enable();
-}
-
-
-static inline void
-__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
-{
- if (likely(vma->vm_mm->context == mfsp(3))) {
- flush_user_dcache_page(vmaddr);
- if (vma->vm_flags & VM_EXEC)
- flush_user_icache_page(vmaddr);
- } else {
- flush_user_cache_page_non_current(vma, vmaddr);
- }
+ flush_dcache_page_asm(physaddr, vmaddr);
+ if (vma->vm_flags & VM_EXEC)
+ flush_icache_page_asm(physaddr, vmaddr);
}
void flush_dcache_page(struct page *page)
@@ -342,10 +279,8 @@ void flush_dcache_page(struct page *page)
struct vm_area_struct *mpnt;
struct prio_tree_iter iter;
unsigned long offset;
- unsigned long addr;
+ unsigned long addr, old_addr = 0;
pgoff_t pgoff;
- unsigned long pfn = page_to_pfn(page);
-
if (mapping && !mapping_mapped(mapping)) {
set_bit(PG_dcache_dirty, &page->flags);
@@ -369,20 +304,11 @@ void flush_dcache_page(struct page *page)
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
addr = mpnt->vm_start + offset;
- /* Flush instructions produce non access tlb misses.
- * On PA, we nullify these instructions rather than
- * taking a page fault if the pte doesn't exist.
- * This is just for speed. If the page translation
- * isn't there, there's no point exciting the
- * nadtlb handler into a nullification frenzy.
- *
- * Make sure we really have this page: the private
- * mappings may cover this area but have COW'd this
- * particular page.
- */
- if (translation_exists(mpnt, addr, pfn)) {
- __flush_cache_page(mpnt, addr);
- break;
+ if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) {
+ __flush_cache_page(mpnt, addr, page_to_phys(page));
+ if (old_addr)
+ printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
+ old_addr = addr;
}
}
flush_dcache_mmap_unlock(mapping);
@@ -573,7 +499,6 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
{
BUG_ON(!vma->vm_mm->context);
- if (likely(translation_exists(vma, vmaddr, pfn)))
- __flush_cache_page(vma, vmaddr);
+ __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn)));
}
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 6337adef30f6..e5477092a5d4 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -225,22 +225,13 @@
#ifndef CONFIG_64BIT
/*
* naitlb miss interruption handler (parisc 1.1 - 32 bit)
- *
- * Note: naitlb misses will be treated
- * as an ordinary itlb miss for now.
- * However, note that naitlb misses
- * have the faulting address in the
- * IOR/ISR.
*/
.macro naitlb_11 code
mfctl %isr,spc
- b itlb_miss_11
+ b naitlb_miss_11
mfctl %ior,va
- /* FIXME: If user causes a naitlb miss, the priv level may not be in
- * lower bits of va, where the itlb miss handler is expecting them
- */
.align 32
.endm
@@ -248,26 +239,17 @@
/*
* naitlb miss interruption handler (parisc 2.0)
- *
- * Note: naitlb misses will be treated
- * as an ordinary itlb miss for now.
- * However, note that naitlb misses
- * have the faulting address in the
- * IOR/ISR.
*/
.macro naitlb_20 code
mfctl %isr,spc
#ifdef CONFIG_64BIT
- b itlb_miss_20w
+ b naitlb_miss_20w
#else
- b itlb_miss_20
+ b naitlb_miss_20
#endif
mfctl %ior,va
- /* FIXME: If user causes a naitlb miss, the priv level may not be in
- * lower bits of va, where the itlb miss handler is expecting them
- */
.align 32
.endm
@@ -581,7 +563,24 @@
copy \va,\tmp1
depi 0,31,23,\tmp1
cmpb,COND(<>),n \tmp,\tmp1,\fault
- ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot
+ mfctl %cr19,\tmp /* iir */
+ /* get the opcode (first six bits) into \tmp */
+ extrw,u \tmp,5,6,\tmp
+ /*
+ * Only setting the T bit prevents data cache movein
+ * Setting access rights to zero prevents instruction cache movein
+ *
+ * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
+ * to type field and _PAGE_READ goes to top bit of PL1
+ */
+ ldi (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
+ /*
+ * so if the opcode is one (i.e. this is a memory management
+ * instruction) nullify the next load so \prot is only T.
+ * Otherwise this is a normal data operation
+ */
+ cmpiclr,= 0x01,\tmp,%r0
+ ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
depd,z \prot,8,7,\prot
/*
* OK, it is in the temp alias region, check whether "from" or "to".
@@ -631,11 +630,7 @@ ENTRY(fault_vector_20)
def 13
def 14
dtlb_20 15
-#if 0
naitlb_20 16
-#else
- def 16
-#endif
nadtlb_20 17
def 18
def 19
@@ -678,11 +673,7 @@ ENTRY(fault_vector_11)
def 13
def 14
dtlb_11 15
-#if 0
naitlb_11 16
-#else
- def 16
-#endif
nadtlb_11 17
def 18
def 19
@@ -1203,7 +1194,7 @@ nadtlb_miss_20w:
get_pgd spc,ptp
space_check spc,t0,nadtlb_fault
- L3_ptep ptp,pte,t0,va,nadtlb_check_flush_20w
+ L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
update_ptep ptp,pte,t0,t1
@@ -1214,16 +1205,8 @@ nadtlb_miss_20w:
rfir
nop
-nadtlb_check_flush_20w:
- bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
-
- /* Insert a "flush only" translation */
-
- depdi,z 7,7,3,prot
- depdi 1,10,1,prot
-
- /* Drop prot bits from pte and convert to page addr for idtlbt */
- convert_for_tlb_insert20 pte
+nadtlb_check_alias_20w:
+ do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate
idtlbt pte,prot
@@ -1255,25 +1238,7 @@ dtlb_miss_11:
nop
dtlb_check_alias_11:
-
- /* Check to see if fault is in the temporary alias region */
-
- cmpib,<>,n 0,spc,dtlb_fault /* forward */
- ldil L%(TMPALIAS_MAP_START),t0
- copy va,t1
- depwi 0,31,23,t1
- cmpb,<>,n t0,t1,dtlb_fault /* forward */
- ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot
- depw,z prot,8,7,prot
-
- /*
- * OK, it is in the temp alias region, check whether "from" or "to".
- * Check "subtle" note in pacache.S re: r23/r26.
- */
-
- extrw,u,= va,9,1,r0
- or,tr %r23,%r0,pte /* If "from" use "from" page */
- or %r26,%r0,pte /* else "to", use "to" page */
+ do_alias spc,t0,t1,va,pte,prot,dtlb_fault
idtlba pte,(va)
idtlbp prot,(va)
@@ -1286,7 +1251,7 @@ nadtlb_miss_11:
space_check spc,t0,nadtlb_fault
- L2_ptep ptp,pte,t0,va,nadtlb_check_flush_11
+ L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
update_ptep ptp,pte,t0,t1
@@ -1304,26 +1269,11 @@ nadtlb_miss_11:
rfir
nop
-nadtlb_check_flush_11:
- bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
-
- /* Insert a "flush only" translation */
-
- zdepi 7,7,3,prot
- depi 1,10,1,prot
+nadtlb_check_alias_11:
+ do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate
- /* Get rid of prot bits and convert to page addr for idtlba */
-
- depi 0,31,ASM_PFN_PTE_SHIFT,pte
- SHRREG pte,(ASM_PFN_PTE_SHIFT-(31-26)),pte
-
- mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
- mtsp spc,%sr1
-
- idtlba pte,(%sr1,va)
- idtlbp prot,(%sr1,va)
-
- mtsp t0, %sr1 /* Restore sr1 */
+ idtlba pte,(va)
+ idtlbp prot,(va)
rfir
nop
@@ -1359,7 +1309,7 @@ nadtlb_miss_20:
space_check spc,t0,nadtlb_fault
- L2_ptep ptp,pte,t0,va,nadtlb_check_flush_20
+ L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
update_ptep ptp,pte,t0,t1
@@ -1372,21 +1322,14 @@ nadtlb_miss_20:
rfir
nop
-nadtlb_check_flush_20:
- bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
-
- /* Insert a "flush only" translation */
-
- depdi,z 7,7,3,prot
- depdi 1,10,1,prot
-
- /* Drop prot bits from pte and convert to page addr for idtlbt */
- convert_for_tlb_insert20 pte
+nadtlb_check_alias_20:
+ do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate
idtlbt pte,prot
rfir
nop
+
#endif
nadtlb_emulate:
@@ -1484,6 +1427,36 @@ itlb_miss_20w:
rfir
nop
+naitlb_miss_20w:
+
+ /*
+ * I miss is a little different, since we allow users to fault
+ * on the gateway page which is in the kernel address space.
+ */
+
+ space_adjust spc,va,t0
+ get_pgd spc,ptp
+ space_check spc,t0,naitlb_fault
+
+ L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
+
+ update_ptep ptp,pte,t0,t1
+
+ make_insert_tlb spc,pte,prot
+
+ iitlbt pte,prot
+
+ rfir
+ nop
+
+naitlb_check_alias_20w:
+ do_alias spc,t0,t1,va,pte,prot,naitlb_fault
+
+ iitlbt pte,prot
+
+ rfir
+ nop
+
#else
itlb_miss_11:
@@ -1508,6 +1481,38 @@ itlb_miss_11:
rfir
nop
+naitlb_miss_11:
+ get_pgd spc,ptp
+
+ space_check spc,t0,naitlb_fault
+
+ L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
+
+ update_ptep ptp,pte,t0,t1
+
+ make_insert_tlb_11 spc,pte,prot
+
+ mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
+ mtsp spc,%sr1
+
+ iitlba pte,(%sr1,va)
+ iitlbp prot,(%sr1,va)
+
+ mtsp t0, %sr1 /* Restore sr1 */
+
+ rfir
+ nop
+
+naitlb_check_alias_11:
+ do_alias spc,t0,t1,va,pte,prot,itlb_fault
+
+ iitlba pte,(%sr0, va)
+ iitlbp prot,(%sr0, va)
+
+ rfir
+ nop
+
+
itlb_miss_20:
get_pgd spc,ptp
@@ -1526,6 +1531,32 @@ itlb_miss_20:
rfir
nop
+naitlb_miss_20:
+ get_pgd spc,ptp
+
+ space_check spc,t0,naitlb_fault
+
+ L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
+
+ update_ptep ptp,pte,t0,t1
+
+ make_insert_tlb spc,pte,prot
+
+ f_extend pte,t0
+
+ iitlbt pte,prot
+
+ rfir
+ nop
+
+naitlb_check_alias_20:
+ do_alias spc,t0,t1,va,pte,prot,naitlb_fault
+
+ iitlbt pte,prot
+
+ rfir
+ nop
+
#endif
#ifdef CONFIG_64BIT
@@ -1662,6 +1693,10 @@ nadtlb_fault:
b intr_save
ldi 17,%r8
+naitlb_fault:
+ b intr_save
+ ldi 16,%r8
+
dtlb_fault:
b intr_save
ldi 15,%r8
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index d7d94b845dc2..cb450e1e79b3 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -52,9 +52,9 @@ static volatile unsigned long cpu_eiem = 0;
*/
static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL;
-static void cpu_mask_irq(unsigned int irq)
+static void cpu_mask_irq(struct irq_data *d)
{
- unsigned long eirr_bit = EIEM_MASK(irq);
+ unsigned long eirr_bit = EIEM_MASK(d->irq);
cpu_eiem &= ~eirr_bit;
/* Do nothing on the other CPUs. If they get this interrupt,
@@ -63,7 +63,7 @@ static void cpu_mask_irq(unsigned int irq)
* then gets disabled */
}
-static void cpu_unmask_irq(unsigned int irq)
+static void __cpu_unmask_irq(unsigned int irq)
{
unsigned long eirr_bit = EIEM_MASK(irq);
@@ -75,9 +75,14 @@ static void cpu_unmask_irq(unsigned int irq)
smp_send_all_nop();
}
-void cpu_ack_irq(unsigned int irq)
+static void cpu_unmask_irq(struct irq_data *d)
+{
+ __cpu_unmask_irq(d->irq);
+}
+
+void cpu_ack_irq(struct irq_data *d)
{
- unsigned long mask = EIEM_MASK(irq);
+ unsigned long mask = EIEM_MASK(d->irq);
int cpu = smp_processor_id();
/* Clear in EIEM so we can no longer process */
@@ -90,9 +95,9 @@ void cpu_ack_irq(unsigned int irq)
mtctl(mask, 23);
}
-void cpu_eoi_irq(unsigned int irq)
+void cpu_eoi_irq(struct irq_data *d)
{
- unsigned long mask = EIEM_MASK(irq);
+ unsigned long mask = EIEM_MASK(d->irq);
int cpu = smp_processor_id();
/* set it in the eiems---it's no longer in process */
@@ -103,15 +108,16 @@ void cpu_eoi_irq(unsigned int irq)
}
#ifdef CONFIG_SMP
-int cpu_check_affinity(unsigned int irq, const struct cpumask *dest)
+int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest)
{
int cpu_dest;
/* timer and ipi have to always be received on all CPUs */
- if (CHECK_IRQ_PER_CPU(irq)) {
+ if (CHECK_IRQ_PER_CPU(irq_to_desc(d->irq)->status)) {
/* Bad linux design decision. The mask has already
- * been set; we must reset it */
- cpumask_setall(irq_desc[irq].affinity);
+ * been set; we must reset it. Will fix - tglx
+ */
+ cpumask_setall(d->affinity);
return -EINVAL;
}
@@ -121,33 +127,34 @@ int cpu_check_affinity(unsigned int irq, const struct cpumask *dest)
return cpu_dest;
}
-static int cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
+static int cpu_set_affinity_irq(struct irq_data *d, const struct cpumask *dest,
+ bool force)
{
int cpu_dest;
- cpu_dest = cpu_check_affinity(irq, dest);
+ cpu_dest = cpu_check_affinity(d, dest);
if (cpu_dest < 0)
return -1;
- cpumask_copy(irq_desc[irq].affinity, dest);
+ cpumask_copy(d->affinity, dest);
return 0;
}
#endif
static struct irq_chip cpu_interrupt_type = {
- .name = "CPU",
- .mask = cpu_mask_irq,
- .unmask = cpu_unmask_irq,
- .ack = cpu_ack_irq,
- .eoi = cpu_eoi_irq,
+ .name = "CPU",
+ .irq_mask = cpu_mask_irq,
+ .irq_unmask = cpu_unmask_irq,
+ .irq_ack = cpu_ack_irq,
+ .irq_eoi = cpu_eoi_irq,
#ifdef CONFIG_SMP
- .set_affinity = cpu_set_affinity_irq,
+ .irq_set_affinity = cpu_set_affinity_irq,
#endif
/* XXX: Needs to be written. We managed without it so far, but
* we really ought to write it.
*/
- .retrigger = NULL,
+ .irq_retrigger = NULL,
};
int show_interrupts(struct seq_file *p, void *v)
@@ -181,7 +188,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, "%10u ", kstat_irqs(i));
#endif
- seq_printf(p, " %14s", irq_desc[i].chip->name);
+ seq_printf(p, " %14s", irq_desc[i].irq_data.chip->name);
#ifndef PARISC_IRQ_CR16_COUNTS
seq_printf(p, " %s", action->name);
@@ -233,14 +240,14 @@ int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data)
{
if (irq_desc[irq].action)
return -EBUSY;
- if (irq_desc[irq].chip != &cpu_interrupt_type)
+ if (get_irq_chip(irq) != &cpu_interrupt_type)
return -EBUSY;
/* for iosapic interrupts */
if (type) {
set_irq_chip_and_handler(irq, type, handle_percpu_irq);
set_irq_chip_data(irq, data);
- cpu_unmask_irq(irq);
+ __cpu_unmask_irq(irq);
}
return 0;
}
@@ -289,7 +296,8 @@ int txn_alloc_irq(unsigned int bits_wide)
unsigned long txn_affinity_addr(unsigned int irq, int cpu)
{
#ifdef CONFIG_SMP
- cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
+ struct irq_data *d = irq_get_irq_data(irq);
+ cpumask_copy(d->affinity, cpumask_of(cpu));
#endif
return per_cpu(cpu_data, cpu).txn_addr;
@@ -333,6 +341,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
unsigned long eirr_val;
int irq, cpu = smp_processor_id();
#ifdef CONFIG_SMP
+ struct irq_desc *desc;
cpumask_t dest;
#endif
@@ -346,8 +355,9 @@ void do_cpu_irq_mask(struct pt_regs *regs)
irq = eirr_to_irq(eirr_val);
#ifdef CONFIG_SMP
- cpumask_copy(&dest, irq_desc[irq].affinity);
- if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) &&
+ desc = irq_to_desc(irq);
+ cpumask_copy(&dest, desc->irq_data.affinity);
+ if (CHECK_IRQ_PER_CPU(desc->status) &&
!cpu_isset(smp_processor_id(), dest)) {
int cpu = first_cpu(dest);
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index 09b77b2553c6..a85823668cba 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -608,93 +608,131 @@ ENTRY(__clear_user_page_asm)
.procend
ENDPROC(__clear_user_page_asm)
-ENTRY(flush_kernel_dcache_page_asm)
+ENTRY(flush_dcache_page_asm)
.proc
.callinfo NO_CALLS
.entry
+ ldil L%(TMPALIAS_MAP_START), %r28
+#ifdef CONFIG_64BIT
+#if (TMPALIAS_MAP_START >= 0x80000000)
+ depdi 0, 31,32, %r28 /* clear any sign extension */
+ /* FIXME: page size dependend */
+#endif
+ extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
+ depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
+ depdi 0, 63,12, %r28 /* Clear any offset bits */
+#else
+ extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
+ depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
+ depwi 0, 31,12, %r28 /* Clear any offset bits */
+#endif
+
+ /* Purge any old translation */
+
+ pdtlb 0(%r28)
+
ldil L%dcache_stride, %r1
- ldw R%dcache_stride(%r1), %r23
+ ldw R%dcache_stride(%r1), %r1
#ifdef CONFIG_64BIT
depdi,z 1, 63-PAGE_SHIFT,1, %r25
#else
depwi,z 1, 31-PAGE_SHIFT,1, %r25
#endif
- add %r26, %r25, %r25
- sub %r25, %r23, %r25
-
-
-1: fdc,m %r23(%r26)
- fdc,m %r23(%r26)
- fdc,m %r23(%r26)
- fdc,m %r23(%r26)
- fdc,m %r23(%r26)
- fdc,m %r23(%r26)
- fdc,m %r23(%r26)
- fdc,m %r23(%r26)
- fdc,m %r23(%r26)
- fdc,m %r23(%r26)
- fdc,m %r23(%r26)
- fdc,m %r23(%r26)
- fdc,m %r23(%r26)
- fdc,m %r23(%r26)
- fdc,m %r23(%r26)
- cmpb,COND(<<) %r26, %r25,1b
- fdc,m %r23(%r26)
+ add %r28, %r25, %r25
+ sub %r25, %r1, %r25
+
+
+1: fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ cmpb,COND(<<) %r28, %r25,1b
+ fdc,m %r1(%r28)
sync
bv %r0(%r2)
- nop
+ pdtlb (%r25)
.exit
.procend
-ENDPROC(flush_kernel_dcache_page_asm)
-
-ENTRY(flush_user_dcache_page)
+ENDPROC(flush_dcache_page_asm)
+
+ENTRY(flush_icache_page_asm)
.proc
.callinfo NO_CALLS
.entry
- ldil L%dcache_stride, %r1
- ldw R%dcache_stride(%r1), %r23
-
+ ldil L%(TMPALIAS_MAP_START), %r28
#ifdef CONFIG_64BIT
- depdi,z 1,63-PAGE_SHIFT,1, %r25
+#if (TMPALIAS_MAP_START >= 0x80000000)
+ depdi 0, 31,32, %r28 /* clear any sign extension */
+ /* FIXME: page size dependend */
+#endif
+ extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
+ depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
+ depdi 0, 63,12, %r28 /* Clear any offset bits */
#else
- depwi,z 1,31-PAGE_SHIFT,1, %r25
+ extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
+ depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
+ depwi 0, 31,12, %r28 /* Clear any offset bits */
#endif
- add %r26, %r25, %r25
- sub %r25, %r23, %r25
+ /* Purge any old translation */
-1: fdc,m %r23(%sr3, %r26)
- fdc,m %r23(%sr3, %r26)
- fdc,m %r23(%sr3, %r26)
- fdc,m %r23(%sr3, %r26)
- fdc,m %r23(%sr3, %r26)
- fdc,m %r23(%sr3, %r26)
- fdc,m %r23(%sr3, %r26)
- fdc,m %r23(%sr3, %r26)
- fdc,m %r23(%sr3, %r26)
- fdc,m %r23(%sr3, %r26)
- fdc,m %r23(%sr3, %r26)
- fdc,m %r23(%sr3, %r26)
- fdc,m %r23(%sr3, %r26)
- fdc,m %r23(%sr3, %r26)
- fdc,m %r23(%sr3, %r26)
- cmpb,COND(<<) %r26, %r25,1b
- fdc,m %r23(%sr3, %r26)
+ pitlb (%sr0,%r28)
+
+ ldil L%icache_stride, %r1
+ ldw R%icache_stride(%r1), %r1
+
+#ifdef CONFIG_64BIT
+ depdi,z 1, 63-PAGE_SHIFT,1, %r25
+#else
+ depwi,z 1, 31-PAGE_SHIFT,1, %r25
+#endif
+ add %r28, %r25, %r25
+ sub %r25, %r1, %r25
+
+
+1: fic,m %r1(%r28)
+ fic,m %r1(%r28)
+ fic,m %r1(%r28)
+ fic,m %r1(%r28)
+ fic,m %r1(%r28)
+ fic,m %r1(%r28)
+ fic,m %r1(%r28)
+ fic,m %r1(%r28)
+ fic,m %r1(%r28)
+ fic,m %r1(%r28)
+ fic,m %r1(%r28)
+ fic,m %r1(%r28)
+ fic,m %r1(%r28)
+ fic,m %r1(%r28)
+ fic,m %r1(%r28)
+ cmpb,COND(<<) %r28, %r25,1b
+ fic,m %r1(%r28)
sync
bv %r0(%r2)
- nop
+ pitlb (%sr0,%r25)
.exit
.procend
-ENDPROC(flush_user_dcache_page)
+ENDPROC(flush_icache_page_asm)
-ENTRY(flush_user_icache_page)
+ENTRY(flush_kernel_dcache_page_asm)
.proc
.callinfo NO_CALLS
.entry
@@ -711,23 +749,23 @@ ENTRY(flush_user_icache_page)
sub %r25, %r23, %r25
-1: fic,m %r23(%sr3, %r26)
- fic,m %r23(%sr3, %r26)
- fic,m %r23(%sr3, %r26)
- fic,m %r23(%sr3, %r26)
- fic,m %r23(%sr3, %r26)
- fic,m %r23(%sr3, %r26)
- fic,m %r23(%sr3, %r26)
- fic,m %r23(%sr3, %r26)
- fic,m %r23(%sr3, %r26)
- fic,m %r23(%sr3, %r26)
- fic,m %r23(%sr3, %r26)
- fic,m %r23(%sr3, %r26)
- fic,m %r23(%sr3, %r26)
- fic,m %r23(%sr3, %r26)
- fic,m %r23(%sr3, %r26)
+1: fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
cmpb,COND(<<) %r26, %r25,1b
- fic,m %r23(%sr3, %r26)
+ fdc,m %r23(%r26)
sync
bv %r0(%r2)
@@ -735,8 +773,7 @@ ENTRY(flush_user_icache_page)
.exit
.procend
-ENDPROC(flush_user_icache_page)
-
+ENDPROC(flush_kernel_dcache_page_asm)
ENTRY(purge_kernel_dcache_page)
.proc
@@ -780,69 +817,6 @@ ENTRY(purge_kernel_dcache_page)
.procend
ENDPROC(purge_kernel_dcache_page)
-#if 0
- /* Currently not used, but it still is a possible alternate
- * solution.
- */
-
-ENTRY(flush_alias_page)
- .proc
- .callinfo NO_CALLS
- .entry
-
- tophys_r1 %r26
-
- ldil L%(TMPALIAS_MAP_START), %r28
-#ifdef CONFIG_64BIT
- extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
- depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
- depdi 0, 63,12, %r28 /* Clear any offset bits */
-#else
- extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
- depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
- depwi 0, 31,12, %r28 /* Clear any offset bits */
-#endif
-
- /* Purge any old translation */
-
- pdtlb 0(%r28)
-
- ldil L%dcache_stride, %r1
- ldw R%dcache_stride(%r1), %r23
-
-#ifdef CONFIG_64BIT
- depdi,z 1, 63-PAGE_SHIFT,1, %r29
-#else
- depwi,z 1, 31-PAGE_SHIFT,1, %r29
-#endif
- add %r28, %r29, %r29
- sub %r29, %r23, %r29
-
-1: fdc,m %r23(%r28)
- fdc,m %r23(%r28)
- fdc,m %r23(%r28)
- fdc,m %r23(%r28)
- fdc,m %r23(%r28)
- fdc,m %r23(%r28)
- fdc,m %r23(%r28)
- fdc,m %r23(%r28)
- fdc,m %r23(%r28)
- fdc,m %r23(%r28)
- fdc,m %r23(%r28)
- fdc,m %r23(%r28)
- fdc,m %r23(%r28)
- fdc,m %r23(%r28)
- fdc,m %r23(%r28)
- cmpb,COND(<<) %r28, %r29, 1b
- fdc,m %r23(%r28)
-
- sync
- bv %r0(%r2)
- nop
- .exit
-
- .procend
-#endif
.export flush_user_dcache_range_asm
@@ -865,7 +839,6 @@ flush_user_dcache_range_asm:
.exit
.procend
-ENDPROC(flush_alias_page)
ENTRY(flush_kernel_dcache_range_asm)
.proc
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 05511ccb61d2..45b7389d77aa 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -162,11 +162,8 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
update_process_times(user_mode(get_irq_regs()));
}
- if (cpu == 0) {
- write_seqlock(&xtime_lock);
- do_timer(ticks_elapsed);
- write_sequnlock(&xtime_lock);
- }
+ if (cpu == 0)
+ xtime_update(ticks_elapsed);
return IRQ_HANDLED;
}
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index d64a6bbec2aa..8f1e4efd143e 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -145,7 +145,7 @@ SECTIONS
EXIT_DATA
}
- PERCPU(PAGE_SIZE)
+ PERCPU(L1_CACHE_BYTES, PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 7d69e9bf5e64..15fb3e79ae28 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -134,6 +134,7 @@ config PPC
select HAVE_GENERIC_HARDIRQS
select HAVE_SPARSE_IRQ
select IRQ_PER_CPU
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG
config EARLY_PRINTK
bool
diff --git a/arch/powerpc/boot/dts/canyonlands.dts b/arch/powerpc/boot/dts/canyonlands.dts
index 5b27a4b74b79..2779f08313a5 100644
--- a/arch/powerpc/boot/dts/canyonlands.dts
+++ b/arch/powerpc/boot/dts/canyonlands.dts
@@ -172,6 +172,19 @@
interrupts = <0x1e 4>;
};
+ USBOTG0: usbotg@bff80000 {
+ compatible = "amcc,dwc-otg";
+ reg = <0x4 0xbff80000 0x10000>;
+ interrupt-parent = <&USBOTG0>;
+ #interrupt-cells = <1>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ interrupts = <0x0 0x1 0x2>;
+ interrupt-map = </* USB-OTG */ 0x0 &UIC2 0x1c 0x4
+ /* HIGH-POWER */ 0x1 &UIC1 0x1a 0x8
+ /* DMA */ 0x2 &UIC0 0xc 0x4>;
+ };
+
SATA0: sata@bffd1000 {
compatible = "amcc,sata-460ex";
reg = <4 0xbffd1000 0x800 4 0xbffd0800 0x400>;
@@ -233,6 +246,11 @@
};
};
+ cpld@2,0 {
+ compatible = "amcc,ppc460ex-bcsr";
+ reg = <2 0x0 0x9>;
+ };
+
ndfc@3,0 {
compatible = "ibm,ndfc";
reg = <0x00000003 0x00000000 0x00002000>;
@@ -307,6 +325,12 @@
interrupts = <0x3 0x4>;
};
+ GPIO0: gpio@ef600b00 {
+ compatible = "ibm,ppc4xx-gpio";
+ reg = <0xef600b00 0x00000048>;
+ gpio-controller;
+ };
+
ZMII0: emac-zmii@ef600d00 {
compatible = "ibm,zmii-460ex", "ibm,zmii";
reg = <0xef600d00 0x0000000c>;
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index f0a211d96923..be3cdf9134ce 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -154,6 +154,7 @@ extern const char *powerpc_base_platform;
#define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x0000000000002000)
#define CPU_FTR_DUAL_PLL_750FX ASM_CONST(0x0000000000004000)
#define CPU_FTR_NO_DPM ASM_CONST(0x0000000000008000)
+#define CPU_FTR_476_DD2 ASM_CONST(0x0000000000010000)
#define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000)
#define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000)
#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000)
@@ -465,7 +466,7 @@ enum {
CPU_FTRS_44X | CPU_FTRS_440x6 |
#endif
#ifdef CONFIG_PPC_47x
- CPU_FTRS_47X |
+ CPU_FTRS_47X | CPU_FTR_476_DD2 |
#endif
#ifdef CONFIG_E200
CPU_FTRS_E200 |
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 991d5998d6be..fe56a23e1ff0 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -240,6 +240,12 @@ struct machdep_calls {
* claims to support kexec.
*/
int (*machine_kexec_prepare)(struct kimage *image);
+
+ /* Called to perform the _real_ kexec.
+ * Do NOT allocate memory or fail here. We are past the point of
+ * no return.
+ */
+ void (*machine_kexec)(struct kimage *image);
#endif /* CONFIG_KEXEC */
#ifdef CONFIG_SUSPEND
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 51e9e6f90d12..5e156e034fe2 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -164,13 +164,23 @@ extern void setup_indirect_pci(struct pci_controller* hose,
resource_size_t cfg_addr,
resource_size_t cfg_data, u32 flags);
-#ifndef CONFIG_PPC64
-
static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
{
return bus->sysdata;
}
+#ifndef CONFIG_PPC64
+
+static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
+{
+ struct pci_controller *host;
+
+ if (bus->self)
+ return pci_device_to_OF_node(bus->self);
+ host = pci_bus_to_host(bus);
+ return host ? host->dn : NULL;
+}
+
static inline int isa_vaddr_is_ioport(void __iomem *address)
{
/* No specific ISA handling on ppc32 at this stage, it
@@ -218,19 +228,10 @@ extern void * update_dn_pci_info(struct device_node *dn, void *data);
/* Get a device_node from a pci_dev. This code must be fast except
* in the case where the sysdata is incorrect and needs to be fixed
- * up (this will only happen once).
- * In this case the sysdata will have been inherited from a PCI host
- * bridge or a PCI-PCI bridge further up the tree, so it will point
- * to a valid struct pci_dn, just not the one we want.
- */
+ * up (this will only happen once). */
static inline struct device_node *pci_device_to_OF_node(struct pci_dev *dev)
{
- struct device_node *dn = dev->sysdata;
- struct pci_dn *pdn = dn->data;
-
- if (pdn && pdn->devfn == dev->devfn && pdn->busno == dev->bus->number)
- return dn; /* fast path. sysdata is good */
- return fetch_dev_dn(dev);
+ return dev->dev.of_node ? dev->dev.of_node : fetch_dev_dn(dev);
}
static inline int pci_device_from_OF_node(struct device_node *np,
@@ -248,7 +249,7 @@ static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
if (bus->self)
return pci_device_to_OF_node(bus->self);
else
- return bus->sysdata; /* Must be root bus (PHB) */
+ return bus->dev.of_node; /* Must be root bus (PHB) */
}
/** Find the bus corresponding to the indicated device node */
@@ -260,14 +261,6 @@ extern void pcibios_remove_pci_devices(struct pci_bus *bus);
/** Discover new pci devices under this bus, and add them */
extern void pcibios_add_pci_devices(struct pci_bus *bus);
-static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
-{
- struct device_node *busdn = bus->sysdata;
-
- BUG_ON(busdn == NULL);
- return PCI_DN(busdn)->phb;
-}
-
extern void isa_bridge_find_early(struct pci_controller *hose);
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index a20a9ad2258b..7d7790954e02 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -201,7 +201,7 @@ extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
extern void pcibios_setup_bus_devices(struct pci_bus *bus);
extern void pcibios_setup_bus_self(struct pci_bus *bus);
extern void pcibios_setup_phb_io_space(struct pci_controller *hose);
-extern void pcibios_scan_phb(struct pci_controller *hose, void *sysdata);
+extern void pcibios_scan_phb(struct pci_controller *hose);
#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_PCI_H */
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index d72757585595..c189aa5fe1f4 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -70,21 +70,6 @@ static inline int of_node_to_nid(struct device_node *device) { return 0; }
#endif
#define of_node_to_nid of_node_to_nid
-/**
- * of_irq_map_pci - Resolve the interrupt for a PCI device
- * @pdev: the device whose interrupt is to be resolved
- * @out_irq: structure of_irq filled by this function
- *
- * This function resolves the PCI interrupt for a given PCI device. If a
- * device-node exists for a given pci_dev, it will use normal OF tree
- * walking. If not, it will implement standard swizzling and walk up the
- * PCI tree until an device-node is found, at which point it will finish
- * resolving using the OF tree walking.
- */
-struct pci_dev;
-struct of_irq;
-extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
-
extern void of_instantiate_rtc(void);
/* These includes are put at the bottom because they may contain things
diff --git a/arch/powerpc/include/asm/rwsem.h b/arch/powerpc/include/asm/rwsem.h
index 8447d89fbe72..bb1e2cdeb9bf 100644
--- a/arch/powerpc/include/asm/rwsem.h
+++ b/arch/powerpc/include/asm/rwsem.h
@@ -13,11 +13,6 @@
* by Paul Mackerras <paulus@samba.org>.
*/
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <asm/atomic.h>
-#include <asm/system.h>
-
/*
* the semaphore definition
*/
@@ -33,47 +28,6 @@
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-struct rw_semaphore {
- long count;
- spinlock_t wait_lock;
- struct list_head wait_list;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-};
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
-#else
-# define __RWSEM_DEP_MAP_INIT(lockname)
-#endif
-
-#define __RWSEM_INITIALIZER(name) \
-{ \
- RWSEM_UNLOCKED_VALUE, \
- __SPIN_LOCK_UNLOCKED((name).wait_lock), \
- LIST_HEAD_INIT((name).wait_list) \
- __RWSEM_DEP_MAP_INIT(name) \
-}
-
-#define DECLARE_RWSEM(name) \
- struct rw_semaphore name = __RWSEM_INITIALIZER(name)
-
-extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
-
-extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
- struct lock_class_key *key);
-
-#define init_rwsem(sem) \
- do { \
- static struct lock_class_key __key; \
- \
- __init_rwsem((sem), #sem, &__key); \
- } while (0)
-
/*
* lock for reading
*/
@@ -174,10 +128,5 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
}
-static inline int rwsem_is_locked(struct rw_semaphore *sem)
-{
- return sem->count != 0;
-}
-
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_RWSEM_H */
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index e8e915ce3d8d..c9b68d07ac4f 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1811,11 +1811,11 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_440A,
.platform = "ppc440",
},
- { /* 476 core */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x11a50000,
+ { /* 476 DD2 core */
+ .pvr_mask = 0xffffffff,
+ .pvr_value = 0x11a52080,
.cpu_name = "476",
- .cpu_features = CPU_FTRS_47X,
+ .cpu_features = CPU_FTRS_47X | CPU_FTR_476_DD2,
.cpu_user_features = COMMON_USER_BOOKE |
PPC_FEATURE_HAS_FPU,
.mmu_features = MMU_FTR_TYPE_47x |
@@ -1839,6 +1839,20 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_47x,
.platform = "ppc470",
},
+ { /* 476 others */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x11a50000,
+ .cpu_name = "476",
+ .cpu_features = CPU_FTRS_47X,
+ .cpu_user_features = COMMON_USER_BOOKE |
+ PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_47x |
+ MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL,
+ .icache_bsize = 32,
+ .dcache_bsize = 128,
+ .machine_check = machine_check_47x,
+ .platform = "ppc470",
+ },
{ /* default match */
.pvr_mask = 0x00000000,
.pvr_value = 0x00000000,
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index f62efdfd1769..c00d4ca1ee15 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -201,13 +201,14 @@ int ibmebus_register_driver(struct of_platform_driver *drv)
/* If the driver uses devices that ibmebus doesn't know, add them */
ibmebus_create_devices(drv->driver.of_match_table);
- return of_register_driver(drv, &ibmebus_bus_type);
+ drv->driver.bus = &ibmebus_bus_type;
+ return driver_register(&drv->driver);
}
EXPORT_SYMBOL(ibmebus_register_driver);
void ibmebus_unregister_driver(struct of_platform_driver *drv)
{
- of_unregister_driver(drv);
+ driver_unregister(&drv->driver);
}
EXPORT_SYMBOL(ibmebus_unregister_driver);
@@ -308,15 +309,410 @@ static ssize_t ibmebus_store_remove(struct bus_type *bus,
}
}
+
static struct bus_attribute ibmebus_bus_attrs[] = {
__ATTR(probe, S_IWUSR, NULL, ibmebus_store_probe),
__ATTR(remove, S_IWUSR, NULL, ibmebus_store_remove),
__ATTR_NULL
};
+static int ibmebus_bus_bus_match(struct device *dev, struct device_driver *drv)
+{
+ const struct of_device_id *matches = drv->of_match_table;
+
+ if (!matches)
+ return 0;
+
+ return of_match_device(matches, dev) != NULL;
+}
+
+static int ibmebus_bus_device_probe(struct device *dev)
+{
+ int error = -ENODEV;
+ struct of_platform_driver *drv;
+ struct platform_device *of_dev;
+ const struct of_device_id *match;
+
+ drv = to_of_platform_driver(dev->driver);
+ of_dev = to_platform_device(dev);
+
+ if (!drv->probe)
+ return error;
+
+ of_dev_get(of_dev);
+
+ match = of_match_device(drv->driver.of_match_table, dev);
+ if (match)
+ error = drv->probe(of_dev, match);
+ if (error)
+ of_dev_put(of_dev);
+
+ return error;
+}
+
+static int ibmebus_bus_device_remove(struct device *dev)
+{
+ struct platform_device *of_dev = to_platform_device(dev);
+ struct of_platform_driver *drv = to_of_platform_driver(dev->driver);
+
+ if (dev->driver && drv->remove)
+ drv->remove(of_dev);
+ return 0;
+}
+
+static void ibmebus_bus_device_shutdown(struct device *dev)
+{
+ struct platform_device *of_dev = to_platform_device(dev);
+ struct of_platform_driver *drv = to_of_platform_driver(dev->driver);
+
+ if (dev->driver && drv->shutdown)
+ drv->shutdown(of_dev);
+}
+
+/*
+ * ibmebus_bus_device_attrs
+ */
+static ssize_t devspec_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *ofdev;
+
+ ofdev = to_platform_device(dev);
+ return sprintf(buf, "%s\n", ofdev->dev.of_node->full_name);
+}
+
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *ofdev;
+
+ ofdev = to_platform_device(dev);
+ return sprintf(buf, "%s\n", ofdev->dev.of_node->name);
+}
+
+static ssize_t modalias_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t len = of_device_get_modalias(dev, buf, PAGE_SIZE - 2);
+ buf[len] = '\n';
+ buf[len+1] = 0;
+ return len+1;
+}
+
+struct device_attribute ibmebus_bus_device_attrs[] = {
+ __ATTR_RO(devspec),
+ __ATTR_RO(name),
+ __ATTR_RO(modalias),
+ __ATTR_NULL
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int ibmebus_bus_legacy_suspend(struct device *dev, pm_message_t mesg)
+{
+ struct platform_device *of_dev = to_platform_device(dev);
+ struct of_platform_driver *drv = to_of_platform_driver(dev->driver);
+ int ret = 0;
+
+ if (dev->driver && drv->suspend)
+ ret = drv->suspend(of_dev, mesg);
+ return ret;
+}
+
+static int ibmebus_bus_legacy_resume(struct device *dev)
+{
+ struct platform_device *of_dev = to_platform_device(dev);
+ struct of_platform_driver *drv = to_of_platform_driver(dev->driver);
+ int ret = 0;
+
+ if (dev->driver && drv->resume)
+ ret = drv->resume(of_dev);
+ return ret;
+}
+
+static int ibmebus_bus_pm_prepare(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (drv && drv->pm && drv->pm->prepare)
+ ret = drv->pm->prepare(dev);
+
+ return ret;
+}
+
+static void ibmebus_bus_pm_complete(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+
+ if (drv && drv->pm && drv->pm->complete)
+ drv->pm->complete(dev);
+}
+
+#ifdef CONFIG_SUSPEND
+
+static int ibmebus_bus_pm_suspend(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->suspend)
+ ret = drv->pm->suspend(dev);
+ } else {
+ ret = ibmebus_bus_legacy_suspend(dev, PMSG_SUSPEND);
+ }
+
+ return ret;
+}
+
+static int ibmebus_bus_pm_suspend_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->suspend_noirq)
+ ret = drv->pm->suspend_noirq(dev);
+ }
+
+ return ret;
+}
+
+static int ibmebus_bus_pm_resume(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->resume)
+ ret = drv->pm->resume(dev);
+ } else {
+ ret = ibmebus_bus_legacy_resume(dev);
+ }
+
+ return ret;
+}
+
+static int ibmebus_bus_pm_resume_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->resume_noirq)
+ ret = drv->pm->resume_noirq(dev);
+ }
+
+ return ret;
+}
+
+#else /* !CONFIG_SUSPEND */
+
+#define ibmebus_bus_pm_suspend NULL
+#define ibmebus_bus_pm_resume NULL
+#define ibmebus_bus_pm_suspend_noirq NULL
+#define ibmebus_bus_pm_resume_noirq NULL
+
+#endif /* !CONFIG_SUSPEND */
+
+#ifdef CONFIG_HIBERNATION
+
+static int ibmebus_bus_pm_freeze(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->freeze)
+ ret = drv->pm->freeze(dev);
+ } else {
+ ret = ibmebus_bus_legacy_suspend(dev, PMSG_FREEZE);
+ }
+
+ return ret;
+}
+
+static int ibmebus_bus_pm_freeze_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->freeze_noirq)
+ ret = drv->pm->freeze_noirq(dev);
+ }
+
+ return ret;
+}
+
+static int ibmebus_bus_pm_thaw(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->thaw)
+ ret = drv->pm->thaw(dev);
+ } else {
+ ret = ibmebus_bus_legacy_resume(dev);
+ }
+
+ return ret;
+}
+
+static int ibmebus_bus_pm_thaw_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->thaw_noirq)
+ ret = drv->pm->thaw_noirq(dev);
+ }
+
+ return ret;
+}
+
+static int ibmebus_bus_pm_poweroff(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->poweroff)
+ ret = drv->pm->poweroff(dev);
+ } else {
+ ret = ibmebus_bus_legacy_suspend(dev, PMSG_HIBERNATE);
+ }
+
+ return ret;
+}
+
+static int ibmebus_bus_pm_poweroff_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->poweroff_noirq)
+ ret = drv->pm->poweroff_noirq(dev);
+ }
+
+ return ret;
+}
+
+static int ibmebus_bus_pm_restore(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->restore)
+ ret = drv->pm->restore(dev);
+ } else {
+ ret = ibmebus_bus_legacy_resume(dev);
+ }
+
+ return ret;
+}
+
+static int ibmebus_bus_pm_restore_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->restore_noirq)
+ ret = drv->pm->restore_noirq(dev);
+ }
+
+ return ret;
+}
+
+#else /* !CONFIG_HIBERNATION */
+
+#define ibmebus_bus_pm_freeze NULL
+#define ibmebus_bus_pm_thaw NULL
+#define ibmebus_bus_pm_poweroff NULL
+#define ibmebus_bus_pm_restore NULL
+#define ibmebus_bus_pm_freeze_noirq NULL
+#define ibmebus_bus_pm_thaw_noirq NULL
+#define ibmebus_bus_pm_poweroff_noirq NULL
+#define ibmebus_bus_pm_restore_noirq NULL
+
+#endif /* !CONFIG_HIBERNATION */
+
+static struct dev_pm_ops ibmebus_bus_dev_pm_ops = {
+ .prepare = ibmebus_bus_pm_prepare,
+ .complete = ibmebus_bus_pm_complete,
+ .suspend = ibmebus_bus_pm_suspend,
+ .resume = ibmebus_bus_pm_resume,
+ .freeze = ibmebus_bus_pm_freeze,
+ .thaw = ibmebus_bus_pm_thaw,
+ .poweroff = ibmebus_bus_pm_poweroff,
+ .restore = ibmebus_bus_pm_restore,
+ .suspend_noirq = ibmebus_bus_pm_suspend_noirq,
+ .resume_noirq = ibmebus_bus_pm_resume_noirq,
+ .freeze_noirq = ibmebus_bus_pm_freeze_noirq,
+ .thaw_noirq = ibmebus_bus_pm_thaw_noirq,
+ .poweroff_noirq = ibmebus_bus_pm_poweroff_noirq,
+ .restore_noirq = ibmebus_bus_pm_restore_noirq,
+};
+
+#define IBMEBUS_BUS_PM_OPS_PTR (&ibmebus_bus_dev_pm_ops)
+
+#else /* !CONFIG_PM_SLEEP */
+
+#define IBMEBUS_BUS_PM_OPS_PTR NULL
+
+#endif /* !CONFIG_PM_SLEEP */
+
struct bus_type ibmebus_bus_type = {
+ .name = "ibmebus",
.uevent = of_device_uevent,
- .bus_attrs = ibmebus_bus_attrs
+ .bus_attrs = ibmebus_bus_attrs,
+ .match = ibmebus_bus_bus_match,
+ .probe = ibmebus_bus_device_probe,
+ .remove = ibmebus_bus_device_remove,
+ .shutdown = ibmebus_bus_device_shutdown,
+ .dev_attrs = ibmebus_bus_device_attrs,
+ .pm = IBMEBUS_BUS_PM_OPS_PTR,
};
EXPORT_SYMBOL(ibmebus_bus_type);
@@ -326,7 +722,7 @@ static int __init ibmebus_bus_init(void)
printk(KERN_INFO "IBM eBus Device Driver\n");
- err = of_bus_type_init(&ibmebus_bus_type, "ibmebus");
+ err = bus_register(&ibmebus_bus_type);
if (err) {
printk(KERN_ERR "%s: failed to register IBM eBus.\n",
__func__);
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index 49a170af8145..a5f8672eeff3 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -87,7 +87,10 @@ void machine_kexec(struct kimage *image)
save_ftrace_enabled = __ftrace_enabled_save();
- default_machine_kexec(image);
+ if (ppc_md.machine_kexec)
+ ppc_md.machine_kexec(image);
+ else
+ default_machine_kexec(image);
__ftrace_enabled_restore(save_ftrace_enabled);
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index b2c363ef38ad..24582181b6ec 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -36,8 +36,7 @@
* lacking some bits needed here.
*/
-static int __devinit of_pci_phb_probe(struct platform_device *dev,
- const struct of_device_id *match)
+static int __devinit of_pci_phb_probe(struct platform_device *dev)
{
struct pci_controller *phb;
@@ -74,7 +73,7 @@ static int __devinit of_pci_phb_probe(struct platform_device *dev,
#endif /* CONFIG_EEH */
/* Scan the bus */
- pcibios_scan_phb(phb, dev->dev.of_node);
+ pcibios_scan_phb(phb);
if (phb->bus == NULL)
return -ENXIO;
@@ -104,7 +103,7 @@ static struct of_device_id of_pci_phb_ids[] = {
{}
};
-static struct of_platform_driver of_pci_phb_driver = {
+static struct platform_driver of_pci_phb_driver = {
.probe = of_pci_phb_probe,
.driver = {
.name = "of-pci",
@@ -115,7 +114,7 @@ static struct of_platform_driver of_pci_phb_driver = {
static __init int of_pci_phb_init(void)
{
- return of_register_platform_driver(&of_pci_phb_driver);
+ return platform_driver_register(&of_pci_phb_driver);
}
device_initcall(of_pci_phb_init);
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 10a44e68ef11..3cd85faa8ac6 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -22,6 +22,7 @@
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/of_address.h>
+#include <linux/of_pci.h>
#include <linux/mm.h>
#include <linux/list.h>
#include <linux/syscalls.h>
@@ -1687,13 +1688,8 @@ int early_find_capability(struct pci_controller *hose, int bus, int devfn,
/**
* pci_scan_phb - Given a pci_controller, setup and scan the PCI bus
* @hose: Pointer to the PCI host controller instance structure
- * @sysdata: value to use for sysdata pointer. ppc32 and ppc64 differ here
- *
- * Note: the 'data' pointer is a temporary measure. As 32 and 64 bit
- * pci code gets merged, this parameter should become unnecessary because
- * both will use the same value.
*/
-void __devinit pcibios_scan_phb(struct pci_controller *hose, void *sysdata)
+void __devinit pcibios_scan_phb(struct pci_controller *hose)
{
struct pci_bus *bus;
struct device_node *node = hose->dn;
@@ -1703,13 +1699,13 @@ void __devinit pcibios_scan_phb(struct pci_controller *hose, void *sysdata)
node ? node->full_name : "<NO NAME>");
/* Create an empty bus for the toplevel */
- bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops,
- sysdata);
+ bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, hose);
if (bus == NULL) {
pr_err("Failed to create bus for PCI domain %04x\n",
hose->global_number);
return;
}
+ bus->dev.of_node = of_node_get(node);
bus->secondary = hose->first_busno;
hose->bus = bus;
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index e7db5b48004a..bedb370459f2 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -381,7 +381,7 @@ static int __init pcibios_init(void)
if (pci_assign_all_buses)
hose->first_busno = next_busno;
hose->last_busno = 0xff;
- pcibios_scan_phb(hose, hose);
+ pcibios_scan_phb(hose);
pci_bus_add_devices(hose->bus);
if (pci_assign_all_buses || next_busno <= hose->last_busno)
next_busno = hose->last_busno + pcibios_assign_bus_offset;
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 851577608a78..fc6452b6be9f 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -64,7 +64,7 @@ static int __init pcibios_init(void)
/* Scan all of the recorded PCI controllers. */
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
- pcibios_scan_phb(hose, hose->dn);
+ pcibios_scan_phb(hose);
pci_bus_add_devices(hose->bus);
}
@@ -242,10 +242,10 @@ long sys_pciconfig_iobase(long which, unsigned long in_bus,
break;
bus = NULL;
}
- if (bus == NULL || bus->sysdata == NULL)
+ if (bus == NULL || bus->dev.of_node == NULL)
return -ENODEV;
- hose_node = (struct device_node *)bus->sysdata;
+ hose_node = bus->dev.of_node;
hose = PCI_DN(hose_node)->phb;
switch (which) {
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index d56b35ee7f74..29852688ceaa 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -161,7 +161,7 @@ static void *is_devfn_node(struct device_node *dn, void *data)
/*
* This is the "slow" path for looking up a device_node from a
* pci_dev. It will hunt for the device under its parent's
- * phb and then update sysdata for a future fastpath.
+ * phb and then update of_node pointer.
*
* It may also do fixups on the actual device since this happens
* on the first read/write.
@@ -170,16 +170,19 @@ static void *is_devfn_node(struct device_node *dn, void *data)
* In this case it may probe for real hardware ("just in case")
* and add a device_node to the device tree if necessary.
*
+ * Is this function necessary anymore now that dev->dev.of_node is
+ * used to store the node pointer?
+ *
*/
struct device_node *fetch_dev_dn(struct pci_dev *dev)
{
- struct device_node *orig_dn = dev->sysdata;
+ struct device_node *orig_dn = dev->dev.of_node;
struct device_node *dn;
unsigned long searchval = (dev->bus->number << 8) | dev->devfn;
dn = traverse_pci_devices(orig_dn, is_devfn_node, (void *)searchval);
if (dn)
- dev->sysdata = dn;
+ dev->dev.of_node = dn;
return dn;
}
EXPORT_SYMBOL(fetch_dev_dn);
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index e751506323b4..1e89a72fd030 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -135,7 +135,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
pr_debug(" create device, devfn: %x, type: %s\n", devfn, type);
dev->bus = bus;
- dev->sysdata = node;
+ dev->dev.of_node = of_node_get(node);
dev->dev.parent = bus->bridge;
dev->dev.bus = &pci_bus_type;
dev->devfn = devfn;
@@ -238,7 +238,7 @@ void __devinit of_scan_pci_bridge(struct device_node *node,
bus->primary = dev->bus->number;
bus->subordinate = busrange[1];
bus->bridge_ctl = 0;
- bus->sysdata = node;
+ bus->dev.of_node = of_node_get(node);
/* parse ranges property */
/* PCI #address-cells == 3 and #size-cells == 2 always */
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 7a1d5cb76932..8303a6c65ef7 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -353,6 +353,7 @@ static void switch_booke_debug_regs(struct thread_struct *new_thread)
prime_debug_regs(new_thread);
}
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
+#ifndef CONFIG_HAVE_HW_BREAKPOINT
static void set_debug_reg_defaults(struct thread_struct *thread)
{
if (thread->dabr) {
@@ -360,6 +361,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
set_dabr(0);
}
}
+#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
int set_dabr(unsigned long dabr)
@@ -670,11 +672,11 @@ void flush_thread(void)
{
discard_lazy_cpu_state();
-#ifdef CONFIG_HAVE_HW_BREAKPOINTS
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
flush_ptrace_hw_breakpoint(current);
-#else /* CONFIG_HAVE_HW_BREAKPOINTS */
+#else /* CONFIG_HAVE_HW_BREAKPOINT */
set_debug_reg_defaults(&current->thread);
-#endif /* CONFIG_HAVE_HW_BREAKPOINTS */
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
}
void
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c
index c2b7a07cc3d3..47187cc2cf00 100644
--- a/arch/powerpc/kernel/prom_parse.c
+++ b/arch/powerpc/kernel/prom_parse.c
@@ -2,95 +2,11 @@
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/pci_regs.h>
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/etherdevice.h>
#include <linux/of_address.h>
#include <asm/prom.h>
-#include <asm/pci-bridge.h>
-
-#ifdef CONFIG_PCI
-int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
-{
- struct device_node *dn, *ppnode;
- struct pci_dev *ppdev;
- u32 lspec;
- u32 laddr[3];
- u8 pin;
- int rc;
-
- /* Check if we have a device node, if yes, fallback to standard OF
- * parsing
- */
- dn = pci_device_to_OF_node(pdev);
- if (dn) {
- rc = of_irq_map_one(dn, 0, out_irq);
- if (!rc)
- return rc;
- }
-
- /* Ok, we don't, time to have fun. Let's start by building up an
- * interrupt spec. we assume #interrupt-cells is 1, which is standard
- * for PCI. If you do different, then don't use that routine.
- */
- rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
- if (rc != 0)
- return rc;
- /* No pin, exit */
- if (pin == 0)
- return -ENODEV;
-
- /* Now we walk up the PCI tree */
- lspec = pin;
- for (;;) {
- /* Get the pci_dev of our parent */
- ppdev = pdev->bus->self;
-
- /* Ouch, it's a host bridge... */
- if (ppdev == NULL) {
-#ifdef CONFIG_PPC64
- ppnode = pci_bus_to_OF_node(pdev->bus);
-#else
- struct pci_controller *host;
- host = pci_bus_to_host(pdev->bus);
- ppnode = host ? host->dn : NULL;
-#endif
- /* No node for host bridge ? give up */
- if (ppnode == NULL)
- return -EINVAL;
- } else
- /* We found a P2P bridge, check if it has a node */
- ppnode = pci_device_to_OF_node(ppdev);
-
- /* Ok, we have found a parent with a device-node, hand over to
- * the OF parsing code.
- * We build a unit address from the linux device to be used for
- * resolution. Note that we use the linux bus number which may
- * not match your firmware bus numbering.
- * Fortunately, in most cases, interrupt-map-mask doesn't include
- * the bus number as part of the matching.
- * You should still be careful about that though if you intend
- * to rely on this function (you ship a firmware that doesn't
- * create device nodes for all PCI devices).
- */
- if (ppnode)
- break;
-
- /* We can only get here if we hit a P2P bridge with no node,
- * let's do standard swizzling and try again
- */
- lspec = pci_swizzle_interrupt_pin(pdev, lspec);
- pdev = ppdev;
- }
-
- laddr[0] = (pdev->bus->number << 16)
- | (pdev->devfn << 8);
- laddr[1] = laddr[2] = 0;
- return of_irq_map_raw(ppnode, &lspec, 1, laddr, out_irq);
-}
-EXPORT_SYMBOL_GPL(of_irq_map_pci);
-#endif /* CONFIG_PCI */
void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
unsigned long *busno, unsigned long *phys, unsigned long *size)
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 8a0deefac08d..b9150f07d266 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -160,7 +160,7 @@ SECTIONS
INIT_RAM_FS
}
- PERCPU(PAGE_SIZE)
+ PERCPU(L1_CACHE_BYTES, PAGE_SIZE)
. = ALIGN(8);
.machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) {
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index badc983031b3..c961de40c676 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -1141,9 +1141,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
regs->sprg1 = vcpu->arch.shared->sprg1;
regs->sprg2 = vcpu->arch.shared->sprg2;
regs->sprg3 = vcpu->arch.shared->sprg3;
- regs->sprg5 = vcpu->arch.sprg4;
- regs->sprg6 = vcpu->arch.sprg5;
- regs->sprg7 = vcpu->arch.sprg6;
+ regs->sprg4 = vcpu->arch.sprg4;
+ regs->sprg5 = vcpu->arch.sprg5;
+ regs->sprg6 = vcpu->arch.sprg6;
+ regs->sprg7 = vcpu->arch.sprg7;
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
@@ -1167,9 +1168,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu->arch.shared->sprg1 = regs->sprg1;
vcpu->arch.shared->sprg2 = regs->sprg2;
vcpu->arch.shared->sprg3 = regs->sprg3;
- vcpu->arch.sprg5 = regs->sprg4;
- vcpu->arch.sprg6 = regs->sprg5;
- vcpu->arch.sprg7 = regs->sprg6;
+ vcpu->arch.sprg4 = regs->sprg4;
+ vcpu->arch.sprg5 = regs->sprg5;
+ vcpu->arch.sprg6 = regs->sprg6;
+ vcpu->arch.sprg7 = regs->sprg7;
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 77575d08c818..ef76acb455c3 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -546,9 +546,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
regs->sprg1 = vcpu->arch.shared->sprg1;
regs->sprg2 = vcpu->arch.shared->sprg2;
regs->sprg3 = vcpu->arch.shared->sprg3;
- regs->sprg5 = vcpu->arch.sprg4;
- regs->sprg6 = vcpu->arch.sprg5;
- regs->sprg7 = vcpu->arch.sprg6;
+ regs->sprg4 = vcpu->arch.sprg4;
+ regs->sprg5 = vcpu->arch.sprg5;
+ regs->sprg6 = vcpu->arch.sprg6;
+ regs->sprg7 = vcpu->arch.sprg7;
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
@@ -572,9 +573,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu->arch.shared->sprg1 = regs->sprg1;
vcpu->arch.shared->sprg2 = regs->sprg2;
vcpu->arch.shared->sprg3 = regs->sprg3;
- vcpu->arch.sprg5 = regs->sprg4;
- vcpu->arch.sprg6 = regs->sprg5;
- vcpu->arch.sprg7 = regs->sprg6;
+ vcpu->arch.sprg4 = regs->sprg4;
+ vcpu->arch.sprg5 = regs->sprg5;
+ vcpu->arch.sprg6 = regs->sprg6;
+ vcpu->arch.sprg7 = regs->sprg7;
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 1ec06576f619..c14d09f614f3 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -38,13 +38,11 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
* neesd to be flushed. This function will either perform the flush
* immediately or will batch it up if the current CPU has an active
* batch on it.
- *
- * Must be called from within some kind of spinlock/non-preempt region...
*/
void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long pte, int huge)
{
- struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
+ struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
unsigned long vsid, vaddr;
unsigned int psize;
int ssize;
@@ -99,6 +97,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
*/
if (!batch->active) {
flush_hash_page(vaddr, rpte, psize, ssize, 0);
+ put_cpu_var(ppc64_tlb_batch);
return;
}
@@ -127,6 +126,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
batch->index = ++i;
if (i >= PPC64_TLB_BATCH_NR)
__flush_tlb_pending(batch);
+ put_cpu_var(ppc64_tlb_batch);
}
/*
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
index af405eefe48d..7c63c0ed4f1b 100644
--- a/arch/powerpc/mm/tlb_nohash_low.S
+++ b/arch/powerpc/mm/tlb_nohash_low.S
@@ -189,6 +189,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
blr
#ifdef CONFIG_PPC_47x
+
+/*
+ * 47x variant of icbt
+ */
+# define ICBT(CT,RA,RB) \
+ .long 0x7c00002c | ((CT) << 21) | ((RA) << 16) | ((RB) << 11)
+
/*
* _tlbivax_bcast is only on 47x. We don't bother doing a runtime
* check though, it will blow up soon enough if we mistakenly try
@@ -206,7 +213,35 @@ _GLOBAL(_tlbivax_bcast)
isync
eieio
tlbsync
+BEGIN_FTR_SECTION
+ b 1f
+END_FTR_SECTION_IFSET(CPU_FTR_476_DD2)
+ sync
+ wrtee r10
+ blr
+/*
+ * DD2 HW could hang if in instruction fetch happens before msync completes.
+ * Touch enough instruction cache lines to ensure cache hits
+ */
+1: mflr r9
+ bl 2f
+2: mflr r6
+ li r7,32
+ ICBT(0,r6,r7) /* touch next cache line */
+ add r6,r6,r7
+ ICBT(0,r6,r7) /* touch next cache line */
+ add r6,r6,r7
+ ICBT(0,r6,r7) /* touch next cache line */
sync
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ mtlr r9
wrtee r10
blr
#endif /* CONFIG_PPC_47x */
diff --git a/arch/powerpc/platforms/44x/44x.h b/arch/powerpc/platforms/44x/44x.h
index dbc4d2b4301a..63f703ecd23c 100644
--- a/arch/powerpc/platforms/44x/44x.h
+++ b/arch/powerpc/platforms/44x/44x.h
@@ -4,4 +4,8 @@
extern u8 as1_readb(volatile u8 __iomem *addr);
extern void as1_writeb(u8 data, volatile u8 __iomem *addr);
+#define GPIO0_OSRH 0xC
+#define GPIO0_TSRH 0x14
+#define GPIO0_ISR1H 0x34
+
#endif /* __POWERPC_PLATFORMS_44X_44X_H */
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 0f979c5c756b..f485fc5f6d5e 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -115,7 +115,6 @@ config CANYONLANDS
bool "Canyonlands"
depends on 44x
default n
- select PPC44x_SIMPLE
select 460EX
select PCI
select PPC4xx_PCI_EXPRESS
diff --git a/arch/powerpc/platforms/44x/Makefile b/arch/powerpc/platforms/44x/Makefile
index c04d16df8488..553db6007217 100644
--- a/arch/powerpc/platforms/44x/Makefile
+++ b/arch/powerpc/platforms/44x/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_WARP) += warp.o
obj-$(CONFIG_XILINX_VIRTEX_5_FXT) += virtex.o
obj-$(CONFIG_XILINX_ML510) += virtex_ml510.o
obj-$(CONFIG_ISS4xx) += iss4xx.o
+obj-$(CONFIG_CANYONLANDS)+= canyonlands.o
diff --git a/arch/powerpc/platforms/44x/canyonlands.c b/arch/powerpc/platforms/44x/canyonlands.c
new file mode 100644
index 000000000000..afc5e8ea3775
--- /dev/null
+++ b/arch/powerpc/platforms/44x/canyonlands.c
@@ -0,0 +1,134 @@
+/*
+ * This contain platform specific code for APM PPC460EX based Canyonlands
+ * board.
+ *
+ * Copyright (c) 2010, Applied Micro Circuits Corporation
+ * Author: Rupjyoti Sarmah <rsarmah@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/pci-bridge.h>
+#include <asm/ppc4xx.h>
+#include <asm/udbg.h>
+#include <asm/uic.h>
+#include <linux/of_platform.h>
+#include <linux/delay.h>
+#include "44x.h"
+
+#define BCSR_USB_EN 0x11
+
+static __initdata struct of_device_id ppc460ex_of_bus[] = {
+ { .compatible = "ibm,plb4", },
+ { .compatible = "ibm,opb", },
+ { .compatible = "ibm,ebc", },
+ { .compatible = "simple-bus", },
+ {},
+};
+
+static int __init ppc460ex_device_probe(void)
+{
+ of_platform_bus_probe(NULL, ppc460ex_of_bus, NULL);
+
+ return 0;
+}
+machine_device_initcall(canyonlands, ppc460ex_device_probe);
+
+/* Using this code only for the Canyonlands board. */
+
+static int __init ppc460ex_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+ if (of_flat_dt_is_compatible(root, "amcc,canyonlands")) {
+ ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC);
+ return 1;
+ }
+ return 0;
+}
+
+/* USB PHY fixup code on Canyonlands kit. */
+
+static int __init ppc460ex_canyonlands_fixup(void)
+{
+ u8 __iomem *bcsr ;
+ void __iomem *vaddr;
+ struct device_node *np;
+ int ret = 0;
+
+ np = of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-bcsr");
+ if (!np) {
+ printk(KERN_ERR "failed did not find amcc, ppc460ex bcsr node\n");
+ return -ENODEV;
+ }
+
+ bcsr = of_iomap(np, 0);
+ of_node_put(np);
+
+ if (!bcsr) {
+ printk(KERN_CRIT "Could not remap bcsr\n");
+ ret = -ENODEV;
+ goto err_bcsr;
+ }
+
+ np = of_find_compatible_node(NULL, NULL, "ibm,ppc4xx-gpio");
+ if (!np) {
+ printk(KERN_ERR "failed did not find ibm,ppc4xx-gpio node\n");
+ return -ENODEV;
+ }
+
+ vaddr = of_iomap(np, 0);
+ of_node_put(np);
+
+ if (!vaddr) {
+ printk(KERN_CRIT "Could not get gpio node address\n");
+ ret = -ENODEV;
+ goto err_gpio;
+ }
+ /* Disable USB, through the BCSR7 bits */
+ setbits8(&bcsr[7], BCSR_USB_EN);
+
+ /* Wait for a while after reset */
+ msleep(100);
+
+ /* Enable USB here */
+ clrbits8(&bcsr[7], BCSR_USB_EN);
+
+ /*
+ * Configure multiplexed gpio16 and gpio19 as alternate1 output
+ * source after USB reset. In this configuration gpio16 will be
+ * USB2HStop and gpio19 will be USB2DStop. For more details refer to
+ * table 34-7 of PPC460EX user manual.
+ */
+ setbits32((vaddr + GPIO0_OSRH), 0x42000000);
+ setbits32((vaddr + GPIO0_TSRH), 0x42000000);
+err_gpio:
+ iounmap(vaddr);
+err_bcsr:
+ iounmap(bcsr);
+ return ret;
+}
+machine_device_initcall(canyonlands, ppc460ex_canyonlands_fixup);
+define_machine(canyonlands) {
+ .name = "Canyonlands",
+ .probe = ppc460ex_probe,
+ .progress = udbg_progress,
+ .init_IRQ = uic_init_tree,
+ .get_irq = uic_get_irq,
+ .restart = ppc4xx_reset_system,
+ .calibrate_decr = generic_calibrate_decr,
+};
diff --git a/arch/powerpc/platforms/44x/ppc44x_simple.c b/arch/powerpc/platforms/44x/ppc44x_simple.c
index 7ddcba3b9397..c81c19c0b3d4 100644
--- a/arch/powerpc/platforms/44x/ppc44x_simple.c
+++ b/arch/powerpc/platforms/44x/ppc44x_simple.c
@@ -53,7 +53,6 @@ static char *board[] __initdata = {
"amcc,arches",
"amcc,bamboo",
"amcc,bluestone",
- "amcc,canyonlands",
"amcc,glacier",
"ibm,ebony",
"amcc,eiger",
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c b/arch/powerpc/platforms/52xx/mpc52xx_gpio.c
index 0dad9a935eb5..1757d1db4b51 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpio.c
@@ -147,8 +147,7 @@ mpc52xx_wkup_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
return 0;
}
-static int __devinit mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev)
{
struct mpc52xx_gpiochip *chip;
struct mpc52xx_gpio_wkup __iomem *regs;
@@ -191,7 +190,7 @@ static const struct of_device_id mpc52xx_wkup_gpiochip_match[] = {
{}
};
-static struct of_platform_driver mpc52xx_wkup_gpiochip_driver = {
+static struct platform_driver mpc52xx_wkup_gpiochip_driver = {
.driver = {
.name = "gpio_wkup",
.owner = THIS_MODULE,
@@ -310,8 +309,7 @@ mpc52xx_simple_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
return 0;
}
-static int __devinit mpc52xx_simple_gpiochip_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit mpc52xx_simple_gpiochip_probe(struct platform_device *ofdev)
{
struct mpc52xx_gpiochip *chip;
struct gpio_chip *gc;
@@ -349,7 +347,7 @@ static const struct of_device_id mpc52xx_simple_gpiochip_match[] = {
{}
};
-static struct of_platform_driver mpc52xx_simple_gpiochip_driver = {
+static struct platform_driver mpc52xx_simple_gpiochip_driver = {
.driver = {
.name = "gpio",
.owner = THIS_MODULE,
@@ -361,10 +359,10 @@ static struct of_platform_driver mpc52xx_simple_gpiochip_driver = {
static int __init mpc52xx_gpio_init(void)
{
- if (of_register_platform_driver(&mpc52xx_wkup_gpiochip_driver))
+ if (platform_driver_register(&mpc52xx_wkup_gpiochip_driver))
printk(KERN_ERR "Unable to register wakeup GPIO driver\n");
- if (of_register_platform_driver(&mpc52xx_simple_gpiochip_driver))
+ if (platform_driver_register(&mpc52xx_simple_gpiochip_driver))
printk(KERN_ERR "Unable to register simple GPIO driver\n");
return 0;
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
index e0d703c7fdf7..859abf1c6d4b 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
@@ -721,8 +721,7 @@ static inline int mpc52xx_gpt_wdt_setup(struct mpc52xx_gpt_priv *gpt,
/* ---------------------------------------------------------------------
* of_platform bus binding code
*/
-static int __devinit mpc52xx_gpt_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit mpc52xx_gpt_probe(struct platform_device *ofdev)
{
struct mpc52xx_gpt_priv *gpt;
@@ -781,7 +780,7 @@ static const struct of_device_id mpc52xx_gpt_match[] = {
{}
};
-static struct of_platform_driver mpc52xx_gpt_driver = {
+static struct platform_driver mpc52xx_gpt_driver = {
.driver = {
.name = "mpc52xx-gpt",
.owner = THIS_MODULE,
@@ -793,10 +792,7 @@ static struct of_platform_driver mpc52xx_gpt_driver = {
static int __init mpc52xx_gpt_init(void)
{
- if (of_register_platform_driver(&mpc52xx_gpt_driver))
- pr_err("error registering MPC52xx GPT driver\n");
-
- return 0;
+ return platform_driver_register(&mpc52xx_gpt_driver);
}
/* Make sure GPIOs and IRQs get set up before anyone tries to use them */
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
index f4ac213c89c0..6385d883cb8d 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
@@ -436,8 +436,7 @@ void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req)
}
EXPORT_SYMBOL(mpc52xx_lpbfifo_abort);
-static int __devinit mpc52xx_lpbfifo_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit mpc52xx_lpbfifo_probe(struct platform_device *op)
{
struct resource res;
int rc = -ENOMEM;
@@ -536,7 +535,7 @@ static struct of_device_id mpc52xx_lpbfifo_match[] __devinitconst = {
{},
};
-static struct of_platform_driver mpc52xx_lpbfifo_driver = {
+static struct platform_driver mpc52xx_lpbfifo_driver = {
.driver = {
.name = "mpc52xx-lpbfifo",
.owner = THIS_MODULE,
@@ -551,14 +550,12 @@ static struct of_platform_driver mpc52xx_lpbfifo_driver = {
*/
static int __init mpc52xx_lpbfifo_init(void)
{
- pr_debug("Registering LocalPlus bus FIFO driver\n");
- return of_register_platform_driver(&mpc52xx_lpbfifo_driver);
+ return platform_driver_register(&mpc52xx_lpbfifo_driver);
}
module_init(mpc52xx_lpbfifo_init);
static void __exit mpc52xx_lpbfifo_exit(void)
{
- pr_debug("Unregistering LocalPlus bus FIFO driver\n");
- of_unregister_platform_driver(&mpc52xx_lpbfifo_driver);
+ platform_driver_unregister(&mpc52xx_lpbfifo_driver);
}
module_exit(mpc52xx_lpbfifo_exit);
diff --git a/arch/powerpc/platforms/82xx/ep8248e.c b/arch/powerpc/platforms/82xx/ep8248e.c
index 1565e0446dc8..10ff526cd046 100644
--- a/arch/powerpc/platforms/82xx/ep8248e.c
+++ b/arch/powerpc/platforms/82xx/ep8248e.c
@@ -111,8 +111,7 @@ static struct mdiobb_ctrl ep8248e_mdio_ctrl = {
.ops = &ep8248e_mdio_ops,
};
-static int __devinit ep8248e_mdio_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit ep8248e_mdio_probe(struct platform_device *ofdev)
{
struct mii_bus *bus;
struct resource res;
@@ -167,7 +166,7 @@ static const struct of_device_id ep8248e_mdio_match[] = {
{},
};
-static struct of_platform_driver ep8248e_mdio_driver = {
+static struct platform_driver ep8248e_mdio_driver = {
.driver = {
.name = "ep8248e-mdio-bitbang",
.owner = THIS_MODULE,
@@ -308,7 +307,7 @@ static __initdata struct of_device_id of_bus_ids[] = {
static int __init declare_of_platform_devices(void)
{
of_platform_bus_probe(NULL, of_bus_ids, NULL);
- of_register_platform_driver(&ep8248e_mdio_driver);
+ platform_driver_register(&ep8248e_mdio_driver);
return 0;
}
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
index fd4f2f2f19e6..188272934cfb 100644
--- a/arch/powerpc/platforms/83xx/suspend.c
+++ b/arch/powerpc/platforms/83xx/suspend.c
@@ -318,14 +318,18 @@ static const struct platform_suspend_ops mpc83xx_suspend_ops = {
.end = mpc83xx_suspend_end,
};
-static int pmc_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int pmc_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct resource res;
- struct pmc_type *type = match->data;
+ struct pmc_type *type;
int ret = 0;
+ if (!ofdev->dev.of_match)
+ return -EINVAL;
+
+ type = ofdev->dev.of_match->data;
+
if (!of_device_is_available(np))
return -ENODEV;
@@ -422,7 +426,7 @@ static struct of_device_id pmc_match[] = {
{}
};
-static struct of_platform_driver pmc_driver = {
+static struct platform_driver pmc_driver = {
.driver = {
.name = "mpc83xx-pmc",
.owner = THIS_MODULE,
@@ -434,7 +438,7 @@ static struct of_platform_driver pmc_driver = {
static int pmc_init(void)
{
- return of_register_platform_driver(&pmc_driver);
+ return platform_driver_register(&pmc_driver);
}
module_init(pmc_init);
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index e3e379c6caa7..c35099af340e 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -328,7 +328,7 @@ static struct irq_host_ops msic_host_ops = {
.map = msic_host_map,
};
-static int axon_msi_shutdown(struct platform_device *device)
+static void axon_msi_shutdown(struct platform_device *device)
{
struct axon_msic *msic = dev_get_drvdata(&device->dev);
u32 tmp;
@@ -338,12 +338,9 @@ static int axon_msi_shutdown(struct platform_device *device)
tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG);
tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE;
msic_dcr_write(msic, MSIC_CTRL_REG, tmp);
-
- return 0;
}
-static int axon_msi_probe(struct platform_device *device,
- const struct of_device_id *device_id)
+static int axon_msi_probe(struct platform_device *device)
{
struct device_node *dn = device->dev.of_node;
struct axon_msic *msic;
@@ -446,7 +443,7 @@ static const struct of_device_id axon_msi_device_id[] = {
{}
};
-static struct of_platform_driver axon_msi_driver = {
+static struct platform_driver axon_msi_driver = {
.probe = axon_msi_probe,
.shutdown = axon_msi_shutdown,
.driver = {
@@ -458,7 +455,7 @@ static struct of_platform_driver axon_msi_driver = {
static int __init axon_msi_init(void)
{
- return of_register_platform_driver(&axon_msi_driver);
+ return platform_driver_register(&axon_msi_driver);
}
subsys_initcall(axon_msi_init);
diff --git a/arch/powerpc/platforms/pasemi/gpio_mdio.c b/arch/powerpc/platforms/pasemi/gpio_mdio.c
index a5d907b5a4c2..9886296e08da 100644
--- a/arch/powerpc/platforms/pasemi/gpio_mdio.c
+++ b/arch/powerpc/platforms/pasemi/gpio_mdio.c
@@ -216,8 +216,7 @@ static int gpio_mdio_reset(struct mii_bus *bus)
}
-static int __devinit gpio_mdio_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit gpio_mdio_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
@@ -299,7 +298,7 @@ static struct of_device_id gpio_mdio_match[] =
};
MODULE_DEVICE_TABLE(of, gpio_mdio_match);
-static struct of_platform_driver gpio_mdio_driver =
+static struct platform_driver gpio_mdio_driver =
{
.probe = gpio_mdio_probe,
.remove = gpio_mdio_remove,
@@ -326,13 +325,13 @@ int gpio_mdio_init(void)
if (!gpio_regs)
return -ENODEV;
- return of_register_platform_driver(&gpio_mdio_driver);
+ return platform_driver_register(&gpio_mdio_driver);
}
module_init(gpio_mdio_init);
void gpio_mdio_exit(void)
{
- of_unregister_platform_driver(&gpio_mdio_driver);
+ platform_driver_unregister(&gpio_mdio_driver);
if (gpio_regs)
iounmap(gpio_regs);
}
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index 5fcc92a12d3e..3bf4488aaec6 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -149,7 +149,7 @@ struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn)
if (dn->child)
eeh_add_device_tree_early(dn);
- pcibios_scan_phb(phb, dn);
+ pcibios_scan_phb(phb);
pcibios_finish_adding_to_bus(phb->bus);
return phb;
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index 2659a60bd7b8..27402c7d309d 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -172,10 +172,9 @@ static const struct block_device_operations axon_ram_devops = {
/**
* axon_ram_probe - probe() method for platform driver
- * @device, @device_id: see of_platform_driver method
+ * @device: see platform_driver method
*/
-static int axon_ram_probe(struct platform_device *device,
- const struct of_device_id *device_id)
+static int axon_ram_probe(struct platform_device *device)
{
static int axon_ram_bank_id = -1;
struct axon_ram_bank *bank;
@@ -326,7 +325,7 @@ static struct of_device_id axon_ram_device_id[] = {
{}
};
-static struct of_platform_driver axon_ram_driver = {
+static struct platform_driver axon_ram_driver = {
.probe = axon_ram_probe,
.remove = axon_ram_remove,
.driver = {
@@ -350,7 +349,7 @@ axon_ram_init(void)
}
azfs_minor = 0;
- return of_register_platform_driver(&axon_ram_driver);
+ return platform_driver_register(&axon_ram_driver);
}
/**
@@ -359,7 +358,7 @@ axon_ram_init(void)
static void __exit
axon_ram_exit(void)
{
- of_unregister_platform_driver(&axon_ram_driver);
+ platform_driver_unregister(&axon_ram_driver);
unregister_blkdev(azfs_major, AXON_RAM_DEVICE_NAME);
}
diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.c b/arch/powerpc/sysdev/bestcomm/bestcomm.c
index 650256115064..b3fbb271be87 100644
--- a/arch/powerpc/sysdev/bestcomm/bestcomm.c
+++ b/arch/powerpc/sysdev/bestcomm/bestcomm.c
@@ -365,8 +365,7 @@ bcom_engine_cleanup(void)
/* OF platform driver */
/* ======================================================================== */
-static int __devinit mpc52xx_bcom_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit mpc52xx_bcom_probe(struct platform_device *op)
{
struct device_node *ofn_sram;
struct resource res_bcom;
@@ -492,7 +491,7 @@ static struct of_device_id mpc52xx_bcom_of_match[] = {
MODULE_DEVICE_TABLE(of, mpc52xx_bcom_of_match);
-static struct of_platform_driver mpc52xx_bcom_of_platform_driver = {
+static struct platform_driver mpc52xx_bcom_of_platform_driver = {
.probe = mpc52xx_bcom_probe,
.remove = mpc52xx_bcom_remove,
.driver = {
@@ -510,13 +509,13 @@ static struct of_platform_driver mpc52xx_bcom_of_platform_driver = {
static int __init
mpc52xx_bcom_init(void)
{
- return of_register_platform_driver(&mpc52xx_bcom_of_platform_driver);
+ return platform_driver_register(&mpc52xx_bcom_of_platform_driver);
}
static void __exit
mpc52xx_bcom_exit(void)
{
- of_unregister_platform_driver(&mpc52xx_bcom_of_platform_driver);
+ platform_driver_unregister(&mpc52xx_bcom_of_platform_driver);
}
/* If we're not a module, we must make sure everything is setup before */
diff --git a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
index cc8d6556d799..2b9f0c925326 100644
--- a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
+++ b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
@@ -71,8 +71,7 @@ static int __init get_offset_from_cmdline(char *str)
__setup("cache-sram-size=", get_size_from_cmdline);
__setup("cache-sram-offset=", get_offset_from_cmdline);
-static int __devinit mpc85xx_l2ctlr_of_probe(struct platform_device *dev,
- const struct of_device_id *match)
+static int __devinit mpc85xx_l2ctlr_of_probe(struct platform_device *dev)
{
long rval;
unsigned int rem;
@@ -204,7 +203,7 @@ static struct of_device_id mpc85xx_l2ctlr_of_match[] = {
{},
};
-static struct of_platform_driver mpc85xx_l2ctlr_of_platform_driver = {
+static struct platform_driver mpc85xx_l2ctlr_of_platform_driver = {
.driver = {
.name = "fsl-l2ctlr",
.owner = THIS_MODULE,
@@ -216,12 +215,12 @@ static struct of_platform_driver mpc85xx_l2ctlr_of_platform_driver = {
static __init int mpc85xx_l2ctlr_of_init(void)
{
- return of_register_platform_driver(&mpc85xx_l2ctlr_of_platform_driver);
+ return platform_driver_register(&mpc85xx_l2ctlr_of_platform_driver);
}
static void __exit mpc85xx_l2ctlr_of_exit(void)
{
- of_unregister_platform_driver(&mpc85xx_l2ctlr_of_platform_driver);
+ platform_driver_unregister(&mpc85xx_l2ctlr_of_platform_driver);
}
subsys_initcall(mpc85xx_l2ctlr_of_init);
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 108d76fa8f1c..ee6a8a52ac71 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -273,8 +273,7 @@ static int fsl_of_msi_remove(struct platform_device *ofdev)
return 0;
}
-static int __devinit fsl_of_msi_probe(struct platform_device *dev,
- const struct of_device_id *match)
+static int __devinit fsl_of_msi_probe(struct platform_device *dev)
{
struct fsl_msi *msi;
struct resource res;
@@ -282,11 +281,15 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev,
int rc;
int virt_msir;
const u32 *p;
- struct fsl_msi_feature *features = match->data;
+ struct fsl_msi_feature *features;
struct fsl_msi_cascade_data *cascade_data = NULL;
int len;
u32 offset;
+ if (!dev->dev.of_match)
+ return -EINVAL;
+ features = dev->dev.of_match->data;
+
printk(KERN_DEBUG "Setting up Freescale MSI support\n");
msi = kzalloc(sizeof(struct fsl_msi), GFP_KERNEL);
@@ -411,7 +414,7 @@ static const struct of_device_id fsl_of_msi_ids[] = {
{}
};
-static struct of_platform_driver fsl_of_msi_driver = {
+static struct platform_driver fsl_of_msi_driver = {
.driver = {
.name = "fsl-msi",
.owner = THIS_MODULE,
@@ -423,7 +426,7 @@ static struct of_platform_driver fsl_of_msi_driver = {
static __init int fsl_of_msi_init(void)
{
- return of_register_platform_driver(&fsl_of_msi_driver);
+ return platform_driver_register(&fsl_of_msi_driver);
}
subsys_initcall(fsl_of_msi_init);
diff --git a/arch/powerpc/sysdev/fsl_pmc.c b/arch/powerpc/sysdev/fsl_pmc.c
index e9381bfefb21..f122e8961d32 100644
--- a/arch/powerpc/sysdev/fsl_pmc.c
+++ b/arch/powerpc/sysdev/fsl_pmc.c
@@ -58,8 +58,7 @@ static const struct platform_suspend_ops pmc_suspend_ops = {
.enter = pmc_suspend_enter,
};
-static int pmc_probe(struct platform_device *ofdev,
- const struct of_device_id *id)
+static int pmc_probe(struct platform_device *ofdev)
{
pmc_regs = of_iomap(ofdev->dev.of_node, 0);
if (!pmc_regs)
@@ -76,7 +75,7 @@ static const struct of_device_id pmc_ids[] = {
{ },
};
-static struct of_platform_driver pmc_driver = {
+static struct platform_driver pmc_driver = {
.driver = {
.name = "fsl-pmc",
.owner = THIS_MODULE,
@@ -87,6 +86,6 @@ static struct of_platform_driver pmc_driver = {
static int __init pmc_init(void)
{
- return of_register_platform_driver(&pmc_driver);
+ return platform_driver_register(&pmc_driver);
}
device_initcall(pmc_init);
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 8c6cab013278..3eff2c3a9ad5 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -1570,8 +1570,7 @@ err_ops:
/* The probe function for RapidIO peer-to-peer network.
*/
-static int __devinit fsl_of_rio_rpn_probe(struct platform_device *dev,
- const struct of_device_id *match)
+static int __devinit fsl_of_rio_rpn_probe(struct platform_device *dev)
{
int rc;
printk(KERN_INFO "Setting up RapidIO peer-to-peer network %s\n",
@@ -1594,7 +1593,7 @@ static const struct of_device_id fsl_of_rio_rpn_ids[] = {
{},
};
-static struct of_platform_driver fsl_of_rio_rpn_driver = {
+static struct platform_driver fsl_of_rio_rpn_driver = {
.driver = {
.name = "fsl-of-rio",
.owner = THIS_MODULE,
@@ -1605,7 +1604,7 @@ static struct of_platform_driver fsl_of_rio_rpn_driver = {
static __init int fsl_of_rio_rpn_init(void)
{
- return of_register_platform_driver(&fsl_of_rio_rpn_driver);
+ return platform_driver_register(&fsl_of_rio_rpn_driver);
}
subsys_initcall(fsl_of_rio_rpn_init);
diff --git a/arch/powerpc/sysdev/pmi.c b/arch/powerpc/sysdev/pmi.c
index 4260f368db52..8ce4fc3d9828 100644
--- a/arch/powerpc/sysdev/pmi.c
+++ b/arch/powerpc/sysdev/pmi.c
@@ -121,8 +121,7 @@ static void pmi_notify_handlers(struct work_struct *work)
spin_unlock(&data->handler_spinlock);
}
-static int pmi_of_probe(struct platform_device *dev,
- const struct of_device_id *match)
+static int pmi_of_probe(struct platform_device *dev)
{
struct device_node *np = dev->dev.of_node;
int rc;
@@ -205,7 +204,7 @@ static int pmi_of_remove(struct platform_device *dev)
return 0;
}
-static struct of_platform_driver pmi_of_platform_driver = {
+static struct platform_driver pmi_of_platform_driver = {
.probe = pmi_of_probe,
.remove = pmi_of_remove,
.driver = {
@@ -217,13 +216,13 @@ static struct of_platform_driver pmi_of_platform_driver = {
static int __init pmi_module_init(void)
{
- return of_register_platform_driver(&pmi_of_platform_driver);
+ return platform_driver_register(&pmi_of_platform_driver);
}
module_init(pmi_module_init);
static void __exit pmi_module_exit(void)
{
- of_unregister_platform_driver(&pmi_of_platform_driver);
+ platform_driver_unregister(&pmi_of_platform_driver);
}
module_exit(pmi_module_exit);
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
index 90020de4dcf2..904c6cbaf45b 100644
--- a/arch/powerpc/sysdev/qe_lib/qe.c
+++ b/arch/powerpc/sysdev/qe_lib/qe.c
@@ -659,8 +659,7 @@ static int qe_resume(struct platform_device *ofdev)
return 0;
}
-static int qe_probe(struct platform_device *ofdev,
- const struct of_device_id *id)
+static int qe_probe(struct platform_device *ofdev)
{
return 0;
}
@@ -670,7 +669,7 @@ static const struct of_device_id qe_ids[] = {
{ },
};
-static struct of_platform_driver qe_driver = {
+static struct platform_driver qe_driver = {
.driver = {
.name = "fsl-qe",
.owner = THIS_MODULE,
@@ -682,7 +681,7 @@ static struct of_platform_driver qe_driver = {
static int __init qe_drv_init(void)
{
- return of_register_platform_driver(&qe_driver);
+ return platform_driver_register(&qe_driver);
}
device_initcall(qe_drv_init);
#endif /* defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) */
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 636bcb81d068..63ed4c97f774 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -81,6 +81,7 @@ config S390
select INIT_ALL_POSSIBLE
select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_LZMA
@@ -115,6 +116,7 @@ config S390
select ARCH_INLINE_WRITE_UNLOCK_BH
select ARCH_INLINE_WRITE_UNLOCK_IRQ
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
+ select HAVE_HWSAMPLER
config SCHED_OMIT_FRAME_POINTER
def_bool y
@@ -341,26 +343,16 @@ config STACK_GUARD
The minimum size for the stack guard should be 256 for 31 bit and
512 for 64 bit.
-config WARN_STACK
+config WARN_DYNAMIC_STACK
def_bool n
- prompt "Emit compiler warnings for function with broken stack usage"
+ prompt "Emit compiler warnings for function with dynamic stack usage"
help
- This option enables the compiler options -mwarn-framesize and
- -mwarn-dynamicstack. If the compiler supports these options it
- will generate warnings for function which either use alloca or
- create a stack frame bigger than CONFIG_WARN_STACK_SIZE.
+ This option enables the compiler option -mwarn-dynamicstack. If the
+ compiler supports this options generates warnings for functions
+ that dynamically allocate stack space using alloca.
Say N if you are unsure.
-config WARN_STACK_SIZE
- int "Maximum frame size considered safe (128-2048)"
- range 128 2048
- depends on WARN_STACK
- default "2048"
- help
- This allows you to specify the maximum frame size a function may
- have without the compiler complaining about it.
-
config ARCH_POPULATES_NODE_MAP
def_bool y
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index d5b8a6ade525..27a0b5df5ead 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -80,8 +80,7 @@ endif
endif
ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y)
-cflags-$(CONFIG_WARN_STACK) += -mwarn-dynamicstack
-cflags-$(CONFIG_WARN_STACK) += -mwarn-framesize=$(CONFIG_WARN_STACK_SIZE)
+cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack
endif
KBUILD_CFLAGS += -mbackchain -msoft-float $(cflags-y)
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index 0851eb1e919e..2751b3a8a66f 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -133,11 +133,12 @@ unsigned long decompress_kernel(void)
unsigned long output_addr;
unsigned char *output;
- check_ipl_parmblock((void *) 0, (unsigned long) output + SZ__bss_start);
+ output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL;
+ check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start);
memset(&_bss, 0, &_ebss - &_bss);
free_mem_ptr = (unsigned long)&_end;
free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
- output = (unsigned char *) ((free_mem_end_ptr + 4095UL) & -4096UL);
+ output = (unsigned char *) output_addr;
#ifdef CONFIG_BLK_DEV_INITRD
/*
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 76daea117181..5c5ba10384c2 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -36,14 +36,19 @@
static inline int atomic_read(const atomic_t *v)
{
- barrier();
- return v->counter;
+ int c;
+
+ asm volatile(
+ " l %0,%1\n"
+ : "=d" (c) : "Q" (v->counter));
+ return c;
}
static inline void atomic_set(atomic_t *v, int i)
{
- v->counter = i;
- barrier();
+ asm volatile(
+ " st %1,%0\n"
+ : "=Q" (v->counter) : "d" (i));
}
static inline int atomic_add_return(int i, atomic_t *v)
@@ -128,14 +133,19 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
static inline long long atomic64_read(const atomic64_t *v)
{
- barrier();
- return v->counter;
+ long long c;
+
+ asm volatile(
+ " lg %0,%1\n"
+ : "=d" (c) : "Q" (v->counter));
+ return c;
}
static inline void atomic64_set(atomic64_t *v, long long i)
{
- v->counter = i;
- barrier();
+ asm volatile(
+ " stg %1,%0\n"
+ : "=Q" (v->counter) : "d" (i));
}
static inline long long atomic64_add_return(long long i, atomic64_t *v)
diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
index 24aafa68b643..2a30d5ac0667 100644
--- a/arch/s390/include/asm/cache.h
+++ b/arch/s390/include/asm/cache.h
@@ -13,6 +13,7 @@
#define L1_CACHE_BYTES 256
#define L1_CACHE_SHIFT 8
+#define NET_SKB_PAD 32
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h
index 423fdda2322d..d0eb4653cebd 100644
--- a/arch/s390/include/asm/rwsem.h
+++ b/arch/s390/include/asm/rwsem.h
@@ -43,29 +43,6 @@
#ifdef __KERNEL__
-#include <linux/list.h>
-#include <linux/spinlock.h>
-
-struct rwsem_waiter;
-
-extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *);
-extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *);
-extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
-extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *);
-extern struct rw_semaphore *rwsem_downgrade_write(struct rw_semaphore *);
-
-/*
- * the semaphore definition
- */
-struct rw_semaphore {
- signed long count;
- spinlock_t wait_lock;
- struct list_head wait_list;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-};
-
#ifndef __s390x__
#define RWSEM_UNLOCKED_VALUE 0x00000000
#define RWSEM_ACTIVE_BIAS 0x00000001
@@ -81,41 +58,6 @@ struct rw_semaphore {
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
/*
- * initialisation
- */
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
-#else
-# define __RWSEM_DEP_MAP_INIT(lockname)
-#endif
-
-#define __RWSEM_INITIALIZER(name) \
- { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait.lock), \
- LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
-
-#define DECLARE_RWSEM(name) \
- struct rw_semaphore name = __RWSEM_INITIALIZER(name)
-
-static inline void init_rwsem(struct rw_semaphore *sem)
-{
- sem->count = RWSEM_UNLOCKED_VALUE;
- spin_lock_init(&sem->wait_lock);
- INIT_LIST_HEAD(&sem->wait_list);
-}
-
-extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
- struct lock_class_key *key);
-
-#define init_rwsem(sem) \
-do { \
- static struct lock_class_key __key; \
- \
- __init_rwsem((sem), #sem, &__key); \
-} while (0)
-
-
-/*
* lock for reading
*/
static inline void __down_read(struct rw_semaphore *sem)
@@ -377,10 +319,5 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
return new;
}
-static inline int rwsem_is_locked(struct rw_semaphore *sem)
-{
- return (sem->count != 0);
-}
-
#endif /* __KERNEL__ */
#endif /* _S390_RWSEM_H */
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index a68ac10213b2..1bc18cdb525b 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -77,7 +77,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
INIT_DATA_SECTION(0x100)
- PERCPU(PAGE_SIZE)
+ PERCPU(0x100, PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
__init_end = .; /* freed after init ends here */
diff --git a/arch/s390/oprofile/Makefile b/arch/s390/oprofile/Makefile
index 537b2d840e69..00d8fc8e4429 100644
--- a/arch/s390/oprofile/Makefile
+++ b/arch/s390/oprofile/Makefile
@@ -6,4 +6,4 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
oprofilefs.o oprofile_stats.o \
timer_int.o )
-oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
+oprofile-y := $(DRIVER_OBJS) init.o backtrace.o hwsampler.o hwsampler_files.o
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c
new file mode 100644
index 000000000000..3d48f4db246d
--- /dev/null
+++ b/arch/s390/oprofile/hwsampler.c
@@ -0,0 +1,1256 @@
+/**
+ * arch/s390/oprofile/hwsampler.c
+ *
+ * Copyright IBM Corp. 2010
+ * Author: Heinz Graalfs <graalfs@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/smp.h>
+#include <linux/errno.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/semaphore.h>
+#include <linux/oom.h>
+#include <linux/oprofile.h>
+
+#include <asm/lowcore.h>
+#include <asm/s390_ext.h>
+
+#include "hwsampler.h"
+
+#define MAX_NUM_SDB 511
+#define MIN_NUM_SDB 1
+
+#define ALERT_REQ_MASK 0x4000000000000000ul
+#define BUFFER_FULL_MASK 0x8000000000000000ul
+
+#define EI_IEA (1 << 31) /* invalid entry address */
+#define EI_ISE (1 << 30) /* incorrect SDBT entry */
+#define EI_PRA (1 << 29) /* program request alert */
+#define EI_SACA (1 << 23) /* sampler authorization change alert */
+#define EI_LSDA (1 << 22) /* loss of sample data alert */
+
+DECLARE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer);
+
+struct hws_execute_parms {
+ void *buffer;
+ signed int rc;
+};
+
+DEFINE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer);
+EXPORT_PER_CPU_SYMBOL(sampler_cpu_buffer);
+
+static DEFINE_MUTEX(hws_sem);
+static DEFINE_MUTEX(hws_sem_oom);
+
+static unsigned char hws_flush_all;
+static unsigned int hws_oom;
+static struct workqueue_struct *hws_wq;
+
+static unsigned int hws_state;
+enum {
+ HWS_INIT = 1,
+ HWS_DEALLOCATED,
+ HWS_STOPPED,
+ HWS_STARTED,
+ HWS_STOPPING };
+
+/* set to 1 if called by kernel during memory allocation */
+static unsigned char oom_killer_was_active;
+/* size of SDBT and SDB as of allocate API */
+static unsigned long num_sdbt = 100;
+static unsigned long num_sdb = 511;
+/* sampling interval (machine cycles) */
+static unsigned long interval;
+
+static unsigned long min_sampler_rate;
+static unsigned long max_sampler_rate;
+
+static int ssctl(void *buffer)
+{
+ int cc;
+
+ /* set in order to detect a program check */
+ cc = 1;
+
+ asm volatile(
+ "0: .insn s,0xB2870000,0(%1)\n"
+ "1: ipm %0\n"
+ " srl %0,28\n"
+ "2:\n"
+ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
+ : "+d" (cc), "+a" (buffer)
+ : "m" (*((struct hws_ssctl_request_block *)buffer))
+ : "cc", "memory");
+
+ return cc ? -EINVAL : 0 ;
+}
+
+static int qsi(void *buffer)
+{
+ int cc;
+ cc = 1;
+
+ asm volatile(
+ "0: .insn s,0xB2860000,0(%1)\n"
+ "1: lhi %0,0\n"
+ "2:\n"
+ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
+ : "=d" (cc), "+a" (buffer)
+ : "m" (*((struct hws_qsi_info_block *)buffer))
+ : "cc", "memory");
+
+ return cc ? -EINVAL : 0;
+}
+
+static void execute_qsi(void *parms)
+{
+ struct hws_execute_parms *ep = parms;
+
+ ep->rc = qsi(ep->buffer);
+}
+
+static void execute_ssctl(void *parms)
+{
+ struct hws_execute_parms *ep = parms;
+
+ ep->rc = ssctl(ep->buffer);
+}
+
+static int smp_ctl_ssctl_stop(int cpu)
+{
+ int rc;
+ struct hws_execute_parms ep;
+ struct hws_cpu_buffer *cb;
+
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+
+ cb->ssctl.es = 0;
+ cb->ssctl.cs = 0;
+
+ ep.buffer = &cb->ssctl;
+ smp_call_function_single(cpu, execute_ssctl, &ep, 1);
+ rc = ep.rc;
+ if (rc) {
+ printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu);
+ dump_stack();
+ }
+
+ ep.buffer = &cb->qsi;
+ smp_call_function_single(cpu, execute_qsi, &ep, 1);
+
+ if (cb->qsi.es || cb->qsi.cs) {
+ printk(KERN_EMERG "CPUMF sampling did not stop properly.\n");
+ dump_stack();
+ }
+
+ return rc;
+}
+
+static int smp_ctl_ssctl_deactivate(int cpu)
+{
+ int rc;
+ struct hws_execute_parms ep;
+ struct hws_cpu_buffer *cb;
+
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+
+ cb->ssctl.es = 1;
+ cb->ssctl.cs = 0;
+
+ ep.buffer = &cb->ssctl;
+ smp_call_function_single(cpu, execute_ssctl, &ep, 1);
+ rc = ep.rc;
+ if (rc)
+ printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu);
+
+ ep.buffer = &cb->qsi;
+ smp_call_function_single(cpu, execute_qsi, &ep, 1);
+
+ if (cb->qsi.cs)
+ printk(KERN_EMERG "CPUMF sampling was not set inactive.\n");
+
+ return rc;
+}
+
+static int smp_ctl_ssctl_enable_activate(int cpu, unsigned long interval)
+{
+ int rc;
+ struct hws_execute_parms ep;
+ struct hws_cpu_buffer *cb;
+
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+
+ cb->ssctl.h = 1;
+ cb->ssctl.tear = cb->first_sdbt;
+ cb->ssctl.dear = *(unsigned long *) cb->first_sdbt;
+ cb->ssctl.interval = interval;
+ cb->ssctl.es = 1;
+ cb->ssctl.cs = 1;
+
+ ep.buffer = &cb->ssctl;
+ smp_call_function_single(cpu, execute_ssctl, &ep, 1);
+ rc = ep.rc;
+ if (rc)
+ printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu);
+
+ ep.buffer = &cb->qsi;
+ smp_call_function_single(cpu, execute_qsi, &ep, 1);
+ if (ep.rc)
+ printk(KERN_ERR "hwsampler: CPU %d CPUMF QSI failed.\n", cpu);
+
+ return rc;
+}
+
+static int smp_ctl_qsi(int cpu)
+{
+ struct hws_execute_parms ep;
+ struct hws_cpu_buffer *cb;
+
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+
+ ep.buffer = &cb->qsi;
+ smp_call_function_single(cpu, execute_qsi, &ep, 1);
+
+ return ep.rc;
+}
+
+static inline unsigned long *trailer_entry_ptr(unsigned long v)
+{
+ void *ret;
+
+ ret = (void *)v;
+ ret += PAGE_SIZE;
+ ret -= sizeof(struct hws_trailer_entry);
+
+ return (unsigned long *) ret;
+}
+
+/* prototypes for external interrupt handler and worker */
+static void hws_ext_handler(unsigned int ext_int_code,
+ unsigned int param32, unsigned long param64);
+
+static void worker(struct work_struct *work);
+
+static void add_samples_to_oprofile(unsigned cpu, unsigned long *,
+ unsigned long *dear);
+
+static void init_all_cpu_buffers(void)
+{
+ int cpu;
+ struct hws_cpu_buffer *cb;
+
+ for_each_online_cpu(cpu) {
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+ memset(cb, 0, sizeof(struct hws_cpu_buffer));
+ }
+}
+
+static int is_link_entry(unsigned long *s)
+{
+ return *s & 0x1ul ? 1 : 0;
+}
+
+static unsigned long *get_next_sdbt(unsigned long *s)
+{
+ return (unsigned long *) (*s & ~0x1ul);
+}
+
+static int prepare_cpu_buffers(void)
+{
+ int cpu;
+ int rc;
+ struct hws_cpu_buffer *cb;
+
+ rc = 0;
+ for_each_online_cpu(cpu) {
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+ atomic_set(&cb->ext_params, 0);
+ cb->worker_entry = 0;
+ cb->sample_overflow = 0;
+ cb->req_alert = 0;
+ cb->incorrect_sdbt_entry = 0;
+ cb->invalid_entry_address = 0;
+ cb->loss_of_sample_data = 0;
+ cb->sample_auth_change_alert = 0;
+ cb->finish = 0;
+ cb->oom = 0;
+ cb->stop_mode = 0;
+ }
+
+ return rc;
+}
+
+/*
+ * allocate_sdbt() - allocate sampler memory
+ * @cpu: the cpu for which sampler memory is allocated
+ *
+ * A 4K page is allocated for each requested SDBT.
+ * A maximum of 511 4K pages are allocated for the SDBs in each of the SDBTs.
+ * Set ALERT_REQ mask in each SDBs trailer.
+ * Returns zero if successful, <0 otherwise.
+ */
+static int allocate_sdbt(int cpu)
+{
+ int j, k, rc;
+ unsigned long *sdbt;
+ unsigned long sdb;
+ unsigned long *tail;
+ unsigned long *trailer;
+ struct hws_cpu_buffer *cb;
+
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+
+ if (cb->first_sdbt)
+ return -EINVAL;
+
+ sdbt = NULL;
+ tail = sdbt;
+
+ for (j = 0; j < num_sdbt; j++) {
+ sdbt = (unsigned long *)get_zeroed_page(GFP_KERNEL);
+
+ mutex_lock(&hws_sem_oom);
+ /* OOM killer might have been activated */
+ barrier();
+ if (oom_killer_was_active || !sdbt) {
+ if (sdbt)
+ free_page((unsigned long)sdbt);
+
+ goto allocate_sdbt_error;
+ }
+ if (cb->first_sdbt == 0)
+ cb->first_sdbt = (unsigned long)sdbt;
+
+ /* link current page to tail of chain */
+ if (tail)
+ *tail = (unsigned long)(void *)sdbt + 1;
+
+ mutex_unlock(&hws_sem_oom);
+
+ for (k = 0; k < num_sdb; k++) {
+ /* get and set SDB page */
+ sdb = get_zeroed_page(GFP_KERNEL);
+
+ mutex_lock(&hws_sem_oom);
+ /* OOM killer might have been activated */
+ barrier();
+ if (oom_killer_was_active || !sdb) {
+ if (sdb)
+ free_page(sdb);
+
+ goto allocate_sdbt_error;
+ }
+ *sdbt = sdb;
+ trailer = trailer_entry_ptr(*sdbt);
+ *trailer = ALERT_REQ_MASK;
+ sdbt++;
+ mutex_unlock(&hws_sem_oom);
+ }
+ tail = sdbt;
+ }
+ mutex_lock(&hws_sem_oom);
+ if (oom_killer_was_active)
+ goto allocate_sdbt_error;
+
+ rc = 0;
+ if (tail)
+ *tail = (unsigned long)
+ ((void *)cb->first_sdbt) + 1;
+
+allocate_sdbt_exit:
+ mutex_unlock(&hws_sem_oom);
+ return rc;
+
+allocate_sdbt_error:
+ rc = -ENOMEM;
+ goto allocate_sdbt_exit;
+}
+
+/*
+ * deallocate_sdbt() - deallocate all sampler memory
+ *
+ * For each online CPU all SDBT trees are deallocated.
+ * Returns the number of freed pages.
+ */
+static int deallocate_sdbt(void)
+{
+ int cpu;
+ int counter;
+
+ counter = 0;
+
+ for_each_online_cpu(cpu) {
+ unsigned long start;
+ unsigned long sdbt;
+ unsigned long *curr;
+ struct hws_cpu_buffer *cb;
+
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+
+ if (!cb->first_sdbt)
+ continue;
+
+ sdbt = cb->first_sdbt;
+ curr = (unsigned long *) sdbt;
+ start = sdbt;
+
+ /* we'll free the SDBT after all SDBs are processed... */
+ while (1) {
+ if (!*curr || !sdbt)
+ break;
+
+ /* watch for link entry reset if found */
+ if (is_link_entry(curr)) {
+ curr = get_next_sdbt(curr);
+ if (sdbt)
+ free_page(sdbt);
+
+ /* we are done if we reach the start */
+ if ((unsigned long) curr == start)
+ break;
+ else
+ sdbt = (unsigned long) curr;
+ } else {
+ /* process SDB pointer */
+ if (*curr) {
+ free_page(*curr);
+ curr++;
+ }
+ }
+ counter++;
+ }
+ cb->first_sdbt = 0;
+ }
+ return counter;
+}
+
+static int start_sampling(int cpu)
+{
+ int rc;
+ struct hws_cpu_buffer *cb;
+
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+ rc = smp_ctl_ssctl_enable_activate(cpu, interval);
+ if (rc) {
+ printk(KERN_INFO "hwsampler: CPU %d ssctl failed.\n", cpu);
+ goto start_exit;
+ }
+
+ rc = -EINVAL;
+ if (!cb->qsi.es) {
+ printk(KERN_INFO "hwsampler: CPU %d ssctl not enabled.\n", cpu);
+ goto start_exit;
+ }
+
+ if (!cb->qsi.cs) {
+ printk(KERN_INFO "hwsampler: CPU %d ssctl not active.\n", cpu);
+ goto start_exit;
+ }
+
+ printk(KERN_INFO
+ "hwsampler: CPU %d, CPUMF Sampling started, interval %lu.\n",
+ cpu, interval);
+
+ rc = 0;
+
+start_exit:
+ return rc;
+}
+
+static int stop_sampling(int cpu)
+{
+ unsigned long v;
+ int rc;
+ struct hws_cpu_buffer *cb;
+
+ rc = smp_ctl_qsi(cpu);
+ WARN_ON(rc);
+
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+ if (!rc && !cb->qsi.es)
+ printk(KERN_INFO "hwsampler: CPU %d, already stopped.\n", cpu);
+
+ rc = smp_ctl_ssctl_stop(cpu);
+ if (rc) {
+ printk(KERN_INFO "hwsampler: CPU %d, ssctl stop error %d.\n",
+ cpu, rc);
+ goto stop_exit;
+ }
+
+ printk(KERN_INFO "hwsampler: CPU %d, CPUMF Sampling stopped.\n", cpu);
+
+stop_exit:
+ v = cb->req_alert;
+ if (v)
+ printk(KERN_ERR "hwsampler: CPU %d CPUMF Request alert,"
+ " count=%lu.\n", cpu, v);
+
+ v = cb->loss_of_sample_data;
+ if (v)
+ printk(KERN_ERR "hwsampler: CPU %d CPUMF Loss of sample data,"
+ " count=%lu.\n", cpu, v);
+
+ v = cb->invalid_entry_address;
+ if (v)
+ printk(KERN_ERR "hwsampler: CPU %d CPUMF Invalid entry address,"
+ " count=%lu.\n", cpu, v);
+
+ v = cb->incorrect_sdbt_entry;
+ if (v)
+ printk(KERN_ERR
+ "hwsampler: CPU %d CPUMF Incorrect SDBT address,"
+ " count=%lu.\n", cpu, v);
+
+ v = cb->sample_auth_change_alert;
+ if (v)
+ printk(KERN_ERR
+ "hwsampler: CPU %d CPUMF Sample authorization change,"
+ " count=%lu.\n", cpu, v);
+
+ return rc;
+}
+
+static int check_hardware_prerequisites(void)
+{
+ unsigned long long facility_bits[2];
+
+ memcpy(facility_bits, S390_lowcore.stfle_fac_list, 32);
+ if (!(facility_bits[1] & (1ULL << 59)))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+/*
+ * hws_oom_callback() - the OOM callback function
+ *
+ * In case the callback is invoked during memory allocation for the
+ * hw sampler, all obtained memory is deallocated and a flag is set
+ * so main sampler memory allocation can exit with a failure code.
+ * In case the callback is invoked during sampling the hw sampler
+ * is deactivated for all CPUs.
+ */
+static int hws_oom_callback(struct notifier_block *nfb,
+ unsigned long dummy, void *parm)
+{
+ unsigned long *freed;
+ int cpu;
+ struct hws_cpu_buffer *cb;
+
+ freed = parm;
+
+ mutex_lock(&hws_sem_oom);
+
+ if (hws_state == HWS_DEALLOCATED) {
+ /* during memory allocation */
+ if (oom_killer_was_active == 0) {
+ oom_killer_was_active = 1;
+ *freed += deallocate_sdbt();
+ }
+ } else {
+ int i;
+ cpu = get_cpu();
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+
+ if (!cb->oom) {
+ for_each_online_cpu(i) {
+ smp_ctl_ssctl_deactivate(i);
+ cb->oom = 1;
+ }
+ cb->finish = 1;
+
+ printk(KERN_INFO
+ "hwsampler: CPU %d, OOM notify during CPUMF Sampling.\n",
+ cpu);
+ }
+ }
+
+ mutex_unlock(&hws_sem_oom);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block hws_oom_notifier = {
+ .notifier_call = hws_oom_callback
+};
+
+static int hws_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ /* We do not have sampler space available for all possible CPUs.
+ All CPUs should be online when hw sampling is activated. */
+ return NOTIFY_BAD;
+}
+
+static struct notifier_block hws_cpu_notifier = {
+ .notifier_call = hws_cpu_callback
+};
+
+/**
+ * hwsampler_deactivate() - set hardware sampling temporarily inactive
+ * @cpu: specifies the CPU to be set inactive.
+ *
+ * Returns 0 on success, !0 on failure.
+ */
+int hwsampler_deactivate(unsigned int cpu)
+{
+ /*
+ * Deactivate hw sampling temporarily and flush the buffer
+ * by pushing all the pending samples to oprofile buffer.
+ *
+ * This function can be called under one of the following conditions:
+ * Memory unmap, task is exiting.
+ */
+ int rc;
+ struct hws_cpu_buffer *cb;
+
+ rc = 0;
+ mutex_lock(&hws_sem);
+
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+ if (hws_state == HWS_STARTED) {
+ rc = smp_ctl_qsi(cpu);
+ WARN_ON(rc);
+ if (cb->qsi.cs) {
+ rc = smp_ctl_ssctl_deactivate(cpu);
+ if (rc) {
+ printk(KERN_INFO
+ "hwsampler: CPU %d, CPUMF Deactivation failed.\n", cpu);
+ cb->finish = 1;
+ hws_state = HWS_STOPPING;
+ } else {
+ hws_flush_all = 1;
+ /* Add work to queue to read pending samples.*/
+ queue_work_on(cpu, hws_wq, &cb->worker);
+ }
+ }
+ }
+ mutex_unlock(&hws_sem);
+
+ if (hws_wq)
+ flush_workqueue(hws_wq);
+
+ return rc;
+}
+
+/**
+ * hwsampler_activate() - activate/resume hardware sampling which was deactivated
+ * @cpu: specifies the CPU to be set active.
+ *
+ * Returns 0 on success, !0 on failure.
+ */
+int hwsampler_activate(unsigned int cpu)
+{
+ /*
+ * Re-activate hw sampling. This should be called in pair with
+ * hwsampler_deactivate().
+ */
+ int rc;
+ struct hws_cpu_buffer *cb;
+
+ rc = 0;
+ mutex_lock(&hws_sem);
+
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+ if (hws_state == HWS_STARTED) {
+ rc = smp_ctl_qsi(cpu);
+ WARN_ON(rc);
+ if (!cb->qsi.cs) {
+ hws_flush_all = 0;
+ rc = smp_ctl_ssctl_enable_activate(cpu, interval);
+ if (rc) {
+ printk(KERN_ERR
+ "CPU %d, CPUMF activate sampling failed.\n",
+ cpu);
+ }
+ }
+ }
+
+ mutex_unlock(&hws_sem);
+
+ return rc;
+}
+
+static void hws_ext_handler(unsigned int ext_int_code,
+ unsigned int param32, unsigned long param64)
+{
+ int cpu;
+ struct hws_cpu_buffer *cb;
+
+ cpu = smp_processor_id();
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+
+ atomic_xchg(
+ &cb->ext_params,
+ atomic_read(&cb->ext_params)
+ | S390_lowcore.ext_params);
+
+ if (hws_wq)
+ queue_work(hws_wq, &cb->worker);
+}
+
+static int check_qsi_on_setup(void)
+{
+ int rc;
+ unsigned int cpu;
+ struct hws_cpu_buffer *cb;
+
+ for_each_online_cpu(cpu) {
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+ rc = smp_ctl_qsi(cpu);
+ WARN_ON(rc);
+ if (rc)
+ return -EOPNOTSUPP;
+
+ if (!cb->qsi.as) {
+ printk(KERN_INFO "hwsampler: CPUMF sampling is not authorized.\n");
+ return -EINVAL;
+ }
+
+ if (cb->qsi.es) {
+ printk(KERN_WARNING "hwsampler: CPUMF is still enabled.\n");
+ rc = smp_ctl_ssctl_stop(cpu);
+ if (rc)
+ return -EINVAL;
+
+ printk(KERN_INFO
+ "CPU %d, CPUMF Sampling stopped now.\n", cpu);
+ }
+ }
+ return 0;
+}
+
+static int check_qsi_on_start(void)
+{
+ unsigned int cpu;
+ int rc;
+ struct hws_cpu_buffer *cb;
+
+ for_each_online_cpu(cpu) {
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+ rc = smp_ctl_qsi(cpu);
+ WARN_ON(rc);
+
+ if (!cb->qsi.as)
+ return -EINVAL;
+
+ if (cb->qsi.es)
+ return -EINVAL;
+
+ if (cb->qsi.cs)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void worker_on_start(unsigned int cpu)
+{
+ struct hws_cpu_buffer *cb;
+
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+ cb->worker_entry = cb->first_sdbt;
+}
+
+static int worker_check_error(unsigned int cpu, int ext_params)
+{
+ int rc;
+ unsigned long *sdbt;
+ struct hws_cpu_buffer *cb;
+
+ rc = 0;
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+ sdbt = (unsigned long *) cb->worker_entry;
+
+ if (!sdbt || !*sdbt)
+ return -EINVAL;
+
+ if (ext_params & EI_IEA)
+ cb->req_alert++;
+
+ if (ext_params & EI_LSDA)
+ cb->loss_of_sample_data++;
+
+ if (ext_params & EI_IEA) {
+ cb->invalid_entry_address++;
+ rc = -EINVAL;
+ }
+
+ if (ext_params & EI_ISE) {
+ cb->incorrect_sdbt_entry++;
+ rc = -EINVAL;
+ }
+
+ if (ext_params & EI_SACA) {
+ cb->sample_auth_change_alert++;
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static void worker_on_finish(unsigned int cpu)
+{
+ int rc, i;
+ struct hws_cpu_buffer *cb;
+
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+
+ if (cb->finish) {
+ rc = smp_ctl_qsi(cpu);
+ WARN_ON(rc);
+ if (cb->qsi.es) {
+ printk(KERN_INFO
+ "hwsampler: CPU %d, CPUMF Stop/Deactivate sampling.\n",
+ cpu);
+ rc = smp_ctl_ssctl_stop(cpu);
+ if (rc)
+ printk(KERN_INFO
+ "hwsampler: CPU %d, CPUMF Deactivation failed.\n",
+ cpu);
+
+ for_each_online_cpu(i) {
+ if (i == cpu)
+ continue;
+ if (!cb->finish) {
+ cb->finish = 1;
+ queue_work_on(i, hws_wq,
+ &cb->worker);
+ }
+ }
+ }
+ }
+}
+
+static void worker_on_interrupt(unsigned int cpu)
+{
+ unsigned long *sdbt;
+ unsigned char done;
+ struct hws_cpu_buffer *cb;
+
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+
+ sdbt = (unsigned long *) cb->worker_entry;
+
+ done = 0;
+ /* do not proceed if stop was entered,
+ * forget the buffers not yet processed */
+ while (!done && !cb->stop_mode) {
+ unsigned long *trailer;
+ struct hws_trailer_entry *te;
+ unsigned long *dear = 0;
+
+ trailer = trailer_entry_ptr(*sdbt);
+ /* leave loop if no more work to do */
+ if (!(*trailer & BUFFER_FULL_MASK)) {
+ done = 1;
+ if (!hws_flush_all)
+ continue;
+ }
+
+ te = (struct hws_trailer_entry *)trailer;
+ cb->sample_overflow += te->overflow;
+
+ add_samples_to_oprofile(cpu, sdbt, dear);
+
+ /* reset trailer */
+ xchg((unsigned char *) te, 0x40);
+
+ /* advance to next sdb slot in current sdbt */
+ sdbt++;
+ /* in case link bit is set use address w/o link bit */
+ if (is_link_entry(sdbt))
+ sdbt = get_next_sdbt(sdbt);
+
+ cb->worker_entry = (unsigned long)sdbt;
+ }
+}
+
+static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt,
+ unsigned long *dear)
+{
+ struct hws_data_entry *sample_data_ptr;
+ unsigned long *trailer;
+
+ trailer = trailer_entry_ptr(*sdbt);
+ if (dear) {
+ if (dear > trailer)
+ return;
+ trailer = dear;
+ }
+
+ sample_data_ptr = (struct hws_data_entry *)(*sdbt);
+
+ while ((unsigned long *)sample_data_ptr < trailer) {
+ struct pt_regs *regs = NULL;
+ struct task_struct *tsk = NULL;
+
+ /*
+ * Check sampling mode, 1 indicates basic (=customer) sampling
+ * mode.
+ */
+ if (sample_data_ptr->def != 1) {
+ /* sample slot is not yet written */
+ break;
+ } else {
+ /* make sure we don't use it twice,
+ * the next time the sampler will set it again */
+ sample_data_ptr->def = 0;
+ }
+
+ /* Get pt_regs. */
+ if (sample_data_ptr->P == 1) {
+ /* userspace sample */
+ unsigned int pid = sample_data_ptr->prim_asn;
+ rcu_read_lock();
+ tsk = pid_task(find_vpid(pid), PIDTYPE_PID);
+ if (tsk)
+ regs = task_pt_regs(tsk);
+ rcu_read_unlock();
+ } else {
+ /* kernelspace sample */
+ regs = task_pt_regs(current);
+ }
+
+ mutex_lock(&hws_sem);
+ oprofile_add_ext_hw_sample(sample_data_ptr->ia, regs, 0,
+ !sample_data_ptr->P, tsk);
+ mutex_unlock(&hws_sem);
+
+ sample_data_ptr++;
+ }
+}
+
+static void worker(struct work_struct *work)
+{
+ unsigned int cpu;
+ int ext_params;
+ struct hws_cpu_buffer *cb;
+
+ cb = container_of(work, struct hws_cpu_buffer, worker);
+ cpu = smp_processor_id();
+ ext_params = atomic_xchg(&cb->ext_params, 0);
+
+ if (!cb->worker_entry)
+ worker_on_start(cpu);
+
+ if (worker_check_error(cpu, ext_params))
+ return;
+
+ if (!cb->finish)
+ worker_on_interrupt(cpu);
+
+ if (cb->finish)
+ worker_on_finish(cpu);
+}
+
+/**
+ * hwsampler_allocate() - allocate memory for the hardware sampler
+ * @sdbt: number of SDBTs per online CPU (must be > 0)
+ * @sdb: number of SDBs per SDBT (minimum 1, maximum 511)
+ *
+ * Returns 0 on success, !0 on failure.
+ */
+int hwsampler_allocate(unsigned long sdbt, unsigned long sdb)
+{
+ int cpu, rc;
+ mutex_lock(&hws_sem);
+
+ rc = -EINVAL;
+ if (hws_state != HWS_DEALLOCATED)
+ goto allocate_exit;
+
+ if (sdbt < 1)
+ goto allocate_exit;
+
+ if (sdb > MAX_NUM_SDB || sdb < MIN_NUM_SDB)
+ goto allocate_exit;
+
+ num_sdbt = sdbt;
+ num_sdb = sdb;
+
+ oom_killer_was_active = 0;
+ register_oom_notifier(&hws_oom_notifier);
+
+ for_each_online_cpu(cpu) {
+ if (allocate_sdbt(cpu)) {
+ unregister_oom_notifier(&hws_oom_notifier);
+ goto allocate_error;
+ }
+ }
+ unregister_oom_notifier(&hws_oom_notifier);
+ if (oom_killer_was_active)
+ goto allocate_error;
+
+ hws_state = HWS_STOPPED;
+ rc = 0;
+
+allocate_exit:
+ mutex_unlock(&hws_sem);
+ return rc;
+
+allocate_error:
+ rc = -ENOMEM;
+ printk(KERN_ERR "hwsampler: CPUMF Memory allocation failed.\n");
+ goto allocate_exit;
+}
+
+/**
+ * hwsampler_deallocate() - deallocate hardware sampler memory
+ *
+ * Returns 0 on success, !0 on failure.
+ */
+int hwsampler_deallocate()
+{
+ int rc;
+
+ mutex_lock(&hws_sem);
+
+ rc = -EINVAL;
+ if (hws_state != HWS_STOPPED)
+ goto deallocate_exit;
+
+ smp_ctl_clear_bit(0, 5); /* set bit 58 CR0 off */
+ deallocate_sdbt();
+
+ hws_state = HWS_DEALLOCATED;
+ rc = 0;
+
+deallocate_exit:
+ mutex_unlock(&hws_sem);
+
+ return rc;
+}
+
+long hwsampler_query_min_interval(void)
+{
+ if (min_sampler_rate)
+ return min_sampler_rate;
+ else
+ return -EINVAL;
+}
+
+long hwsampler_query_max_interval(void)
+{
+ if (max_sampler_rate)
+ return max_sampler_rate;
+ else
+ return -EINVAL;
+}
+
+unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu)
+{
+ struct hws_cpu_buffer *cb;
+
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+
+ return cb->sample_overflow;
+}
+
+int hwsampler_setup()
+{
+ int rc;
+ int cpu;
+ struct hws_cpu_buffer *cb;
+
+ mutex_lock(&hws_sem);
+
+ rc = -EINVAL;
+ if (hws_state)
+ goto setup_exit;
+
+ hws_state = HWS_INIT;
+
+ init_all_cpu_buffers();
+
+ rc = check_hardware_prerequisites();
+ if (rc)
+ goto setup_exit;
+
+ rc = check_qsi_on_setup();
+ if (rc)
+ goto setup_exit;
+
+ rc = -EINVAL;
+ hws_wq = create_workqueue("hwsampler");
+ if (!hws_wq)
+ goto setup_exit;
+
+ register_cpu_notifier(&hws_cpu_notifier);
+
+ for_each_online_cpu(cpu) {
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+ INIT_WORK(&cb->worker, worker);
+ rc = smp_ctl_qsi(cpu);
+ WARN_ON(rc);
+ if (min_sampler_rate != cb->qsi.min_sampl_rate) {
+ if (min_sampler_rate) {
+ printk(KERN_WARNING
+ "hwsampler: different min sampler rate values.\n");
+ if (min_sampler_rate < cb->qsi.min_sampl_rate)
+ min_sampler_rate =
+ cb->qsi.min_sampl_rate;
+ } else
+ min_sampler_rate = cb->qsi.min_sampl_rate;
+ }
+ if (max_sampler_rate != cb->qsi.max_sampl_rate) {
+ if (max_sampler_rate) {
+ printk(KERN_WARNING
+ "hwsampler: different max sampler rate values.\n");
+ if (max_sampler_rate > cb->qsi.max_sampl_rate)
+ max_sampler_rate =
+ cb->qsi.max_sampl_rate;
+ } else
+ max_sampler_rate = cb->qsi.max_sampl_rate;
+ }
+ }
+ register_external_interrupt(0x1407, hws_ext_handler);
+
+ hws_state = HWS_DEALLOCATED;
+ rc = 0;
+
+setup_exit:
+ mutex_unlock(&hws_sem);
+ return rc;
+}
+
+int hwsampler_shutdown()
+{
+ int rc;
+
+ mutex_lock(&hws_sem);
+
+ rc = -EINVAL;
+ if (hws_state == HWS_DEALLOCATED || hws_state == HWS_STOPPED) {
+ mutex_unlock(&hws_sem);
+
+ if (hws_wq)
+ flush_workqueue(hws_wq);
+
+ mutex_lock(&hws_sem);
+
+ if (hws_state == HWS_STOPPED) {
+ smp_ctl_clear_bit(0, 5); /* set bit 58 CR0 off */
+ deallocate_sdbt();
+ }
+ if (hws_wq) {
+ destroy_workqueue(hws_wq);
+ hws_wq = NULL;
+ }
+
+ unregister_external_interrupt(0x1407, hws_ext_handler);
+ hws_state = HWS_INIT;
+ rc = 0;
+ }
+ mutex_unlock(&hws_sem);
+
+ unregister_cpu_notifier(&hws_cpu_notifier);
+
+ return rc;
+}
+
+/**
+ * hwsampler_start_all() - start hardware sampling on all online CPUs
+ * @rate: specifies the used interval when samples are taken
+ *
+ * Returns 0 on success, !0 on failure.
+ */
+int hwsampler_start_all(unsigned long rate)
+{
+ int rc, cpu;
+
+ mutex_lock(&hws_sem);
+
+ hws_oom = 0;
+
+ rc = -EINVAL;
+ if (hws_state != HWS_STOPPED)
+ goto start_all_exit;
+
+ interval = rate;
+
+ /* fail if rate is not valid */
+ if (interval < min_sampler_rate || interval > max_sampler_rate)
+ goto start_all_exit;
+
+ rc = check_qsi_on_start();
+ if (rc)
+ goto start_all_exit;
+
+ rc = prepare_cpu_buffers();
+ if (rc)
+ goto start_all_exit;
+
+ for_each_online_cpu(cpu) {
+ rc = start_sampling(cpu);
+ if (rc)
+ break;
+ }
+ if (rc) {
+ for_each_online_cpu(cpu) {
+ stop_sampling(cpu);
+ }
+ goto start_all_exit;
+ }
+ hws_state = HWS_STARTED;
+ rc = 0;
+
+start_all_exit:
+ mutex_unlock(&hws_sem);
+
+ if (rc)
+ return rc;
+
+ register_oom_notifier(&hws_oom_notifier);
+ hws_oom = 1;
+ hws_flush_all = 0;
+ /* now let them in, 1407 CPUMF external interrupts */
+ smp_ctl_set_bit(0, 5); /* set CR0 bit 58 */
+
+ return 0;
+}
+
+/**
+ * hwsampler_stop_all() - stop hardware sampling on all online CPUs
+ *
+ * Returns 0 on success, !0 on failure.
+ */
+int hwsampler_stop_all()
+{
+ int tmp_rc, rc, cpu;
+ struct hws_cpu_buffer *cb;
+
+ mutex_lock(&hws_sem);
+
+ rc = 0;
+ if (hws_state == HWS_INIT) {
+ mutex_unlock(&hws_sem);
+ return rc;
+ }
+ hws_state = HWS_STOPPING;
+ mutex_unlock(&hws_sem);
+
+ for_each_online_cpu(cpu) {
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+ cb->stop_mode = 1;
+ tmp_rc = stop_sampling(cpu);
+ if (tmp_rc)
+ rc = tmp_rc;
+ }
+
+ if (hws_wq)
+ flush_workqueue(hws_wq);
+
+ mutex_lock(&hws_sem);
+ if (hws_oom) {
+ unregister_oom_notifier(&hws_oom_notifier);
+ hws_oom = 0;
+ }
+ hws_state = HWS_STOPPED;
+ mutex_unlock(&hws_sem);
+
+ return rc;
+}
diff --git a/arch/s390/oprofile/hwsampler.h b/arch/s390/oprofile/hwsampler.h
new file mode 100644
index 000000000000..8c72b59316b5
--- /dev/null
+++ b/arch/s390/oprofile/hwsampler.h
@@ -0,0 +1,113 @@
+/*
+ * CPUMF HW sampler functions and internal structures
+ *
+ * Copyright IBM Corp. 2010
+ * Author(s): Heinz Graalfs <graalfs@de.ibm.com>
+ */
+
+#ifndef HWSAMPLER_H_
+#define HWSAMPLER_H_
+
+#include <linux/workqueue.h>
+
+struct hws_qsi_info_block /* QUERY SAMPLING information block */
+{ /* Bit(s) */
+ unsigned int b0_13:14; /* 0-13: zeros */
+ unsigned int as:1; /* 14: sampling authorisation control*/
+ unsigned int b15_21:7; /* 15-21: zeros */
+ unsigned int es:1; /* 22: sampling enable control */
+ unsigned int b23_29:7; /* 23-29: zeros */
+ unsigned int cs:1; /* 30: sampling activation control */
+ unsigned int:1; /* 31: reserved */
+ unsigned int bsdes:16; /* 4-5: size of sampling entry */
+ unsigned int:16; /* 6-7: reserved */
+ unsigned long min_sampl_rate; /* 8-15: minimum sampling interval */
+ unsigned long max_sampl_rate; /* 16-23: maximum sampling interval*/
+ unsigned long tear; /* 24-31: TEAR contents */
+ unsigned long dear; /* 32-39: DEAR contents */
+ unsigned int rsvrd0; /* 40-43: reserved */
+ unsigned int cpu_speed; /* 44-47: CPU speed */
+ unsigned long long rsvrd1; /* 48-55: reserved */
+ unsigned long long rsvrd2; /* 56-63: reserved */
+};
+
+struct hws_ssctl_request_block /* SET SAMPLING CONTROLS req block */
+{ /* bytes 0 - 7 Bit(s) */
+ unsigned int s:1; /* 0: maximum buffer indicator */
+ unsigned int h:1; /* 1: part. level reserved for VM use*/
+ unsigned long b2_53:52; /* 2-53: zeros */
+ unsigned int es:1; /* 54: sampling enable control */
+ unsigned int b55_61:7; /* 55-61: - zeros */
+ unsigned int cs:1; /* 62: sampling activation control */
+ unsigned int b63:1; /* 63: zero */
+ unsigned long interval; /* 8-15: sampling interval */
+ unsigned long tear; /* 16-23: TEAR contents */
+ unsigned long dear; /* 24-31: DEAR contents */
+ /* 32-63: */
+ unsigned long rsvrd1; /* reserved */
+ unsigned long rsvrd2; /* reserved */
+ unsigned long rsvrd3; /* reserved */
+ unsigned long rsvrd4; /* reserved */
+};
+
+struct hws_cpu_buffer {
+ unsigned long first_sdbt; /* @ of 1st SDB-Table for this CP*/
+ unsigned long worker_entry;
+ unsigned long sample_overflow; /* taken from SDB ... */
+ struct hws_qsi_info_block qsi;
+ struct hws_ssctl_request_block ssctl;
+ struct work_struct worker;
+ atomic_t ext_params;
+ unsigned long req_alert;
+ unsigned long loss_of_sample_data;
+ unsigned long invalid_entry_address;
+ unsigned long incorrect_sdbt_entry;
+ unsigned long sample_auth_change_alert;
+ unsigned int finish:1;
+ unsigned int oom:1;
+ unsigned int stop_mode:1;
+};
+
+struct hws_data_entry {
+ unsigned int def:16; /* 0-15 Data Entry Format */
+ unsigned int R:4; /* 16-19 reserved */
+ unsigned int U:4; /* 20-23 Number of unique instruct. */
+ unsigned int z:2; /* zeros */
+ unsigned int T:1; /* 26 PSW DAT mode */
+ unsigned int W:1; /* 27 PSW wait state */
+ unsigned int P:1; /* 28 PSW Problem state */
+ unsigned int AS:2; /* 29-30 PSW address-space control */
+ unsigned int I:1; /* 31 entry valid or invalid */
+ unsigned int:16;
+ unsigned int prim_asn:16; /* primary ASN */
+ unsigned long long ia; /* Instruction Address */
+ unsigned long long lpp; /* Logical-Partition Program Param. */
+ unsigned long long vpp; /* Virtual-Machine Program Param. */
+};
+
+struct hws_trailer_entry {
+ unsigned int f:1; /* 0 - Block Full Indicator */
+ unsigned int a:1; /* 1 - Alert request control */
+ unsigned long:62; /* 2 - 63: Reserved */
+ unsigned long overflow; /* 64 - sample Overflow count */
+ unsigned long timestamp; /* 16 - time-stamp */
+ unsigned long timestamp1; /* */
+ unsigned long reserved1; /* 32 -Reserved */
+ unsigned long reserved2; /* */
+ unsigned long progusage1; /* 48 - reserved for programming use */
+ unsigned long progusage2; /* */
+};
+
+int hwsampler_setup(void);
+int hwsampler_shutdown(void);
+int hwsampler_allocate(unsigned long sdbt, unsigned long sdb);
+int hwsampler_deallocate(void);
+long hwsampler_query_min_interval(void);
+long hwsampler_query_max_interval(void);
+int hwsampler_start_all(unsigned long interval);
+int hwsampler_stop_all(void);
+int hwsampler_deactivate(unsigned int cpu);
+int hwsampler_activate(unsigned int cpu);
+unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu);
+
+#endif /*HWSAMPLER_H_*/
diff --git a/arch/s390/oprofile/hwsampler_files.c b/arch/s390/oprofile/hwsampler_files.c
new file mode 100644
index 000000000000..2e1da2449ba9
--- /dev/null
+++ b/arch/s390/oprofile/hwsampler_files.c
@@ -0,0 +1,162 @@
+/**
+ * arch/s390/oprofile/hwsampler_files.c
+ *
+ * Copyright IBM Corp. 2010
+ * Author: Mahesh Salgaonkar (mahesh@linux.vnet.ibm.com)
+ */
+#include <linux/oprofile.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+
+#include "../../../drivers/oprofile/oprof.h"
+#include "hwsampler.h"
+
+#define DEFAULT_INTERVAL 4096
+
+#define DEFAULT_SDBT_BLOCKS 1
+#define DEFAULT_SDB_BLOCKS 511
+
+static unsigned long oprofile_hw_interval = DEFAULT_INTERVAL;
+static unsigned long oprofile_min_interval;
+static unsigned long oprofile_max_interval;
+
+static unsigned long oprofile_sdbt_blocks = DEFAULT_SDBT_BLOCKS;
+static unsigned long oprofile_sdb_blocks = DEFAULT_SDB_BLOCKS;
+
+static int hwsampler_file;
+static int hwsampler_running; /* start_mutex must be held to change */
+
+static struct oprofile_operations timer_ops;
+
+static int oprofile_hwsampler_start(void)
+{
+ int retval;
+
+ hwsampler_running = hwsampler_file;
+
+ if (!hwsampler_running)
+ return timer_ops.start();
+
+ retval = hwsampler_allocate(oprofile_sdbt_blocks, oprofile_sdb_blocks);
+ if (retval)
+ return retval;
+
+ retval = hwsampler_start_all(oprofile_hw_interval);
+ if (retval)
+ hwsampler_deallocate();
+
+ return retval;
+}
+
+static void oprofile_hwsampler_stop(void)
+{
+ if (!hwsampler_running) {
+ timer_ops.stop();
+ return;
+ }
+
+ hwsampler_stop_all();
+ hwsampler_deallocate();
+ return;
+}
+
+static ssize_t hwsampler_read(struct file *file, char __user *buf,
+ size_t count, loff_t *offset)
+{
+ return oprofilefs_ulong_to_user(hwsampler_file, buf, count, offset);
+}
+
+static ssize_t hwsampler_write(struct file *file, char const __user *buf,
+ size_t count, loff_t *offset)
+{
+ unsigned long val;
+ int retval;
+
+ if (*offset)
+ return -EINVAL;
+
+ retval = oprofilefs_ulong_from_user(&val, buf, count);
+ if (retval)
+ return retval;
+
+ if (oprofile_started)
+ /*
+ * save to do without locking as we set
+ * hwsampler_running in start() when start_mutex is
+ * held
+ */
+ return -EBUSY;
+
+ hwsampler_file = val;
+
+ return count;
+}
+
+static const struct file_operations hwsampler_fops = {
+ .read = hwsampler_read,
+ .write = hwsampler_write,
+};
+
+static int oprofile_create_hwsampling_files(struct super_block *sb,
+ struct dentry *root)
+{
+ struct dentry *hw_dir;
+
+ /* reinitialize default values */
+ hwsampler_file = 1;
+
+ hw_dir = oprofilefs_mkdir(sb, root, "hwsampling");
+ if (!hw_dir)
+ return -EINVAL;
+
+ oprofilefs_create_file(sb, hw_dir, "hwsampler", &hwsampler_fops);
+ oprofilefs_create_ulong(sb, hw_dir, "hw_interval",
+ &oprofile_hw_interval);
+ oprofilefs_create_ro_ulong(sb, hw_dir, "hw_min_interval",
+ &oprofile_min_interval);
+ oprofilefs_create_ro_ulong(sb, hw_dir, "hw_max_interval",
+ &oprofile_max_interval);
+ oprofilefs_create_ulong(sb, hw_dir, "hw_sdbt_blocks",
+ &oprofile_sdbt_blocks);
+
+ return 0;
+}
+
+int oprofile_hwsampler_init(struct oprofile_operations* ops)
+{
+ if (hwsampler_setup())
+ return -ENODEV;
+
+ /*
+ * create hwsampler files only if hwsampler_setup() succeeds.
+ */
+ oprofile_min_interval = hwsampler_query_min_interval();
+ if (oprofile_min_interval < 0) {
+ oprofile_min_interval = 0;
+ return -ENODEV;
+ }
+ oprofile_max_interval = hwsampler_query_max_interval();
+ if (oprofile_max_interval < 0) {
+ oprofile_max_interval = 0;
+ return -ENODEV;
+ }
+
+ if (oprofile_timer_init(ops))
+ return -ENODEV;
+
+ printk(KERN_INFO "oprofile: using hardware sampling\n");
+
+ memcpy(&timer_ops, ops, sizeof(timer_ops));
+
+ ops->start = oprofile_hwsampler_start;
+ ops->stop = oprofile_hwsampler_stop;
+ ops->create_files = oprofile_create_hwsampling_files;
+
+ return 0;
+}
+
+void oprofile_hwsampler_exit(void)
+{
+ oprofile_timer_exit();
+ hwsampler_shutdown();
+}
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index 7a995113b918..059b44b9f171 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -12,15 +12,19 @@
#include <linux/init.h>
#include <linux/errno.h>
+extern int oprofile_hwsampler_init(struct oprofile_operations* ops);
+extern void oprofile_hwsampler_exit(void);
extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth);
int __init oprofile_arch_init(struct oprofile_operations* ops)
{
ops->backtrace = s390_backtrace;
- return -ENODEV;
+
+ return oprofile_hwsampler_init(ops);
}
void oprofile_arch_exit(void)
{
+ oprofile_hwsampler_exit();
}
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 8a9011dced14..28e392473ca2 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -11,6 +11,7 @@ config SUPERH
select HAVE_DMA_ATTRS
select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG if (GUSA_RB || CPU_SH4A)
select PERF_USE_VMALLOC
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 701667acfd89..7bdb572ddf93 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -723,11 +723,7 @@ static struct platform_device camera_devices[] = {
/* FSI */
static struct sh_fsi_platform_info fsi_info = {
- .portb_flags = SH_FSI_BRS_INV |
- SH_FSI_OUT_SLAVE_MODE |
- SH_FSI_IN_SLAVE_MODE |
- SH_FSI_OFMT(I2S) |
- SH_FSI_IFMT(I2S),
+ .portb_flags = SH_FSI_BRS_INV,
};
static struct resource fsi_resources[] = {
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index 527679394a25..c8bcf6a19b55 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -286,11 +286,7 @@ static struct platform_device ceu1_device = {
/* FSI */
/* change J20, J21, J22 pin to 1-2 connection to use slave mode */
static struct sh_fsi_platform_info fsi_info = {
- .porta_flags = SH_FSI_BRS_INV |
- SH_FSI_OUT_SLAVE_MODE |
- SH_FSI_IN_SLAVE_MODE |
- SH_FSI_OFMT(PCM) |
- SH_FSI_IFMT(PCM),
+ .porta_flags = SH_FSI_BRS_INV,
};
static struct resource fsi_resources[] = {
diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile
index e0b0293bae63..780e083e4d17 100644
--- a/arch/sh/boot/compressed/Makefile
+++ b/arch/sh/boot/compressed/Makefile
@@ -11,6 +11,8 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz \
OBJECTS = $(obj)/head_$(BITS).o $(obj)/misc.o $(obj)/cache.o
+GCOV_PROFILE := n
+
#
# IMAGE_OFFSET is the load offset of the compression loader
#
diff --git a/arch/sh/drivers/pci/pcie-sh7786.c b/arch/sh/drivers/pci/pcie-sh7786.c
index 96e9b058aa1d..4418f9070ed1 100644
--- a/arch/sh/drivers/pci/pcie-sh7786.c
+++ b/arch/sh/drivers/pci/pcie-sh7786.c
@@ -1,16 +1,19 @@
/*
* Low-Level PCI Express Support for the SH7786
*
- * Copyright (C) 2009 - 2010 Paul Mundt
+ * Copyright (C) 2009 - 2011 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
+#define pr_fmt(fmt) "PCI: " fmt
+
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
+#include <linux/async.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/clk.h>
@@ -31,7 +34,7 @@ static unsigned int nr_ports;
static struct sh7786_pcie_hwops {
int (*core_init)(void);
- int (*port_init_hw)(struct sh7786_pcie_port *port);
+ async_func_ptr *port_init_hw;
} *sh7786_pcie_hwops;
static struct resource sh7786_pci0_resources[] = {
@@ -474,8 +477,9 @@ static int __init sh7786_pcie_core_init(void)
return test_mode_pin(MODE_PIN12) ? 3 : 2;
}
-static int __init sh7786_pcie_init_hw(struct sh7786_pcie_port *port)
+static void __init sh7786_pcie_init_hw(void *data, async_cookie_t cookie)
{
+ struct sh7786_pcie_port *port = data;
int ret;
/*
@@ -488,18 +492,30 @@ static int __init sh7786_pcie_init_hw(struct sh7786_pcie_port *port)
* Setup clocks, needed both for PHY and PCIe registers.
*/
ret = pcie_clk_init(port);
- if (unlikely(ret < 0))
- return ret;
+ if (unlikely(ret < 0)) {
+ pr_err("clock initialization failed for port#%d\n",
+ port->index);
+ return;
+ }
ret = phy_init(port);
- if (unlikely(ret < 0))
- return ret;
+ if (unlikely(ret < 0)) {
+ pr_err("phy initialization failed for port#%d\n",
+ port->index);
+ return;
+ }
ret = pcie_init(port);
- if (unlikely(ret < 0))
- return ret;
+ if (unlikely(ret < 0)) {
+ pr_err("core initialization failed for port#%d\n",
+ port->index);
+ return;
+ }
- return register_pci_controller(port->hose);
+ /* In the interest of preserving device ordering, synchronize */
+ async_synchronize_cookie(cookie);
+
+ register_pci_controller(port->hose);
}
static struct sh7786_pcie_hwops sh7786_65nm_pcie_hwops __initdata = {
@@ -510,7 +526,7 @@ static struct sh7786_pcie_hwops sh7786_65nm_pcie_hwops __initdata = {
static int __init sh7786_pcie_init(void)
{
struct clk *platclk;
- int ret = 0, i;
+ int i;
printk(KERN_NOTICE "PCI: Starting initialization.\n");
@@ -552,14 +568,10 @@ static int __init sh7786_pcie_init(void)
port->hose = sh7786_pci_channels + i;
port->hose->io_map_base = port->hose->resources[0].start;
- ret |= sh7786_pcie_hwops->port_init_hw(port);
+ async_schedule(sh7786_pcie_hwops->port_init_hw, port);
}
- if (unlikely(ret)) {
- clk_disable(platclk);
- clk_put(platclk);
- return ret;
- }
+ async_synchronize_full();
return 0;
}
diff --git a/arch/sh/include/asm/rwsem.h b/arch/sh/include/asm/rwsem.h
index 06e2251a5e48..edab57265293 100644
--- a/arch/sh/include/asm/rwsem.h
+++ b/arch/sh/include/asm/rwsem.h
@@ -11,64 +11,13 @@
#endif
#ifdef __KERNEL__
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <asm/atomic.h>
-#include <asm/system.h>
-/*
- * the semaphore definition
- */
-struct rw_semaphore {
- long count;
#define RWSEM_UNLOCKED_VALUE 0x00000000
#define RWSEM_ACTIVE_BIAS 0x00000001
#define RWSEM_ACTIVE_MASK 0x0000ffff
#define RWSEM_WAITING_BIAS (-0x00010000)
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
- spinlock_t wait_lock;
- struct list_head wait_list;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-};
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
-#else
-# define __RWSEM_DEP_MAP_INIT(lockname)
-#endif
-
-#define __RWSEM_INITIALIZER(name) \
- { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
- LIST_HEAD_INIT((name).wait_list) \
- __RWSEM_DEP_MAP_INIT(name) }
-
-#define DECLARE_RWSEM(name) \
- struct rw_semaphore name = __RWSEM_INITIALIZER(name)
-
-extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
-
-extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
- struct lock_class_key *key);
-
-#define init_rwsem(sem) \
-do { \
- static struct lock_class_key __key; \
- \
- __init_rwsem((sem), #sem, &__key); \
-} while (0)
-
-static inline void init_rwsem(struct rw_semaphore *sem)
-{
- sem->count = RWSEM_UNLOCKED_VALUE;
- spin_lock_init(&sem->wait_lock);
- INIT_LIST_HEAD(&sem->wait_list);
-}
/*
* lock for reading
@@ -179,10 +128,5 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
return atomic_add_return(delta, (atomic_t *)(&sem->count));
}
-static inline int rwsem_is_locked(struct rw_semaphore *sem)
-{
- return (sem->count != 0);
-}
-
#endif /* __KERNEL__ */
#endif /* _ASM_SH_RWSEM_H */
diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h
index a78701da775b..4a5350037c8f 100644
--- a/arch/sh/include/asm/sections.h
+++ b/arch/sh/include/asm/sections.h
@@ -3,7 +3,7 @@
#include <asm-generic/sections.h>
-extern void __nosave_begin, __nosave_end;
+extern long __nosave_begin, __nosave_end;
extern long __machvec_start, __machvec_end;
extern char __uncached_start, __uncached_end;
extern char _ebss[];
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
index 672944f5b19c..e53b4b38bd11 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
@@ -14,7 +14,7 @@
#include <linux/io.h>
#include <linux/sh_timer.h>
#include <linux/serial_sci.h>
-#include <asm/machtypes.h>
+#include <generated/machtypes.h>
static struct resource rtc_resources[] = {
[0] = {
@@ -255,12 +255,17 @@ static struct platform_device *sh7750_early_devices[] __initdata = {
void __init plat_early_device_setup(void)
{
+ struct platform_device *dev[1];
+
if (mach_is_rts7751r2d()) {
scif_platform_data.scscr |= SCSCR_CKE1;
- early_platform_add_devices(&scif_device, 1);
+ dev[0] = &scif_device;
+ early_platform_add_devices(dev, 1);
} else {
- early_platform_add_devices(&sci_device, 1);
- early_platform_add_devices(&scif_device, 1);
+ dev[0] = &sci_device;
+ early_platform_add_devices(dev, 1);
+ dev[0] = &scif_device;
+ early_platform_add_devices(dev, 1);
}
early_platform_add_devices(sh7750_early_devices,
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index 7f8a709c3ada..af4d46187a79 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -66,7 +66,7 @@ SECTIONS
__machvec_end = .;
}
- PERCPU(PAGE_SIZE)
+ PERCPU(L1_CACHE_BYTES, PAGE_SIZE)
/*
* .exit.text is discarded at runtime, not link time, to deal with
diff --git a/arch/sh/lib/delay.c b/arch/sh/lib/delay.c
index faa8f86c0db4..0901b2f14e15 100644
--- a/arch/sh/lib/delay.c
+++ b/arch/sh/lib/delay.c
@@ -10,6 +10,16 @@
void __delay(unsigned long loops)
{
__asm__ __volatile__(
+ /*
+ * ST40-300 appears to have an issue with this code,
+ * normally taking two cycles each loop, as with all
+ * other SH variants. If however the branch and the
+ * delay slot straddle an 8 byte boundary, this increases
+ * to 3 cycles.
+ * This align directive ensures this doesn't occur.
+ */
+ ".balign 8\n\t"
+
"tst %0, %0\n\t"
"1:\t"
"bf/s 1b\n\t"
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
index 150aa326afff..2228c8cee4d6 100644
--- a/arch/sh/mm/Makefile
+++ b/arch/sh/mm/Makefile
@@ -42,6 +42,8 @@ obj-$(CONFIG_IOREMAP_FIXED) += ioremap_fixed.o
obj-$(CONFIG_UNCACHED_MAPPING) += uncached.o
obj-$(CONFIG_HAVE_SRAM_POOL) += sram.o
+GCOV_PROFILE_pmb.o := n
+
# Special flags for fault_64.o. This puts restrictions on the number of
# caller-save registers that the compiler can target when building this file.
# This is required because the code is called from a context in entry.S where
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index 88d3dc3d30d5..5a580ea04429 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -108,7 +108,8 @@ void copy_user_highpage(struct page *to, struct page *from,
kunmap_atomic(vfrom, KM_USER0);
}
- if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
+ if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
+ (vma->vm_flags & VM_EXEC))
__flush_purge_region(vto, PAGE_SIZE);
kunmap_atomic(vto, KM_USER1);
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 95695e97703e..8b30435ad1ab 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -51,6 +51,8 @@ config SPARC64
select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC
select HAVE_GENERIC_HARDIRQS
+ select GENERIC_HARDIRQS_NO_DEPRECATED
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG
config ARCH_DEFCONFIG
string
@@ -460,6 +462,39 @@ config SPARC_LEON
from www.gaisler.com. You can download a sparc-linux cross-compilation
toolchain at www.gaisler.com.
+if SPARC_LEON
+menu "U-Boot options"
+
+config UBOOT_LOAD_ADDR
+ hex "uImage Load Address"
+ default 0x40004000
+ ---help---
+ U-Boot kernel load address, the address in physical address space
+ where u-boot will place the Linux kernel before booting it.
+ This address is normally the base address of main memory + 0x4000.
+
+config UBOOT_FLASH_ADDR
+ hex "uImage.o Load Address"
+ default 0x00080000
+ ---help---
+ Optional setting only affecting the uImage.o ELF-image used to
+ download the uImage file to the target using a ELF-loader other than
+ U-Boot. It may for example be used to download an uImage to FLASH with
+ the GRMON utility before even starting u-boot.
+
+config UBOOT_ENTRY_ADDR
+ hex "uImage Entry Address"
+ default 0xf0004000
+ ---help---
+ Do not change this unless you know what you're doing. This is
+ hardcoded by the SPARC32 and LEON port.
+
+ This is the virtual address u-boot jumps to when booting the Linux
+ Kernel.
+
+endmenu
+endif
+
endmenu
menu "Bus options (PCI etc.)"
diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
index 113225b241e0..ad1fb5d969f3 100644
--- a/arch/sparc/Makefile
+++ b/arch/sparc/Makefile
@@ -88,7 +88,7 @@ boot := arch/sparc/boot
# Default target
all: zImage
-image zImage tftpboot.img vmlinux.aout: vmlinux
+image zImage uImage tftpboot.img vmlinux.aout: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
archclean:
@@ -102,6 +102,7 @@ ifeq ($(ARCH),sparc)
define archhelp
echo '* image - kernel image ($(boot)/image)'
echo '* zImage - stripped kernel image ($(boot)/zImage)'
+ echo ' uImage - U-Boot SPARC32 Image (only for LEON)'
echo ' tftpboot.img - image prepared for tftp'
endef
else
diff --git a/arch/sparc/boot/Makefile b/arch/sparc/boot/Makefile
index a2c5898c1ab1..9205416b1e67 100644
--- a/arch/sparc/boot/Makefile
+++ b/arch/sparc/boot/Makefile
@@ -5,6 +5,7 @@
ROOT_IMG := /usr/src/root.img
ELFTOAOUT := elftoaout
+MKIMAGE := $(srctree)/scripts/mkuboot.sh
hostprogs-y := piggyback btfixupprep
targets := tftpboot.img btfix.o btfix.S image zImage vmlinux.aout
@@ -77,6 +78,36 @@ $(obj)/zImage: $(obj)/image
$(obj)/vmlinux.aout: vmlinux FORCE
$(call if_changed,elftoaout)
@echo ' kernel: $@ is ready'
+else
+
+# The following lines make a readable image for U-Boot.
+# uImage - Binary file read by U-boot
+# uImage.o - object file of uImage for loading with a
+# flash programmer understanding ELF.
+
+OBJCOPYFLAGS_image.bin := -S -O binary -R .note -R .comment
+$(obj)/image.bin: $(obj)/image FORCE
+ $(call if_changed,objcopy)
+
+$(obj)/image.gz: $(obj)/image.bin
+ $(call if_changed,gzip)
+
+quiet_cmd_uimage = UIMAGE $@
+ cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A sparc -O linux -T kernel \
+ -C gzip -a $(CONFIG_UBOOT_LOAD_ADDR) \
+ -e $(CONFIG_UBOOT_ENTRY_ADDR) -n 'Linux-$(KERNELRELEASE)' \
+ -d $< $@
+
+quiet_cmd_uimage.o = UIMAGE.O $@
+ cmd_uimage.o = $(LD) -Tdata $(CONFIG_UBOOT_FLASH_ADDR) \
+ -r -b binary $@ -o $@.o
+
+targets += uImage
+$(obj)/uImage: $(obj)/image.gz
+ $(call if_changed,uimage)
+ $(call if_changed,uimage.o)
+ @echo ' Image $@ is ready'
+
endif
$(obj)/tftpboot.img: $(obj)/image $(obj)/piggyback System.map $(ROOT_IMG) FORCE
diff --git a/arch/sparc/include/asm/errno.h b/arch/sparc/include/asm/errno.h
index 4e2bc490d714..c351aba997b7 100644
--- a/arch/sparc/include/asm/errno.h
+++ b/arch/sparc/include/asm/errno.h
@@ -112,4 +112,6 @@
#define ERFKILL 134 /* Operation not possible due to RF-kill */
+#define EHWPOISON 135 /* Memory page has hardware error */
+
#endif
diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h
index a0b443cb3c1f..4f09666f0798 100644
--- a/arch/sparc/include/asm/irq_64.h
+++ b/arch/sparc/include/asm/irq_64.h
@@ -33,34 +33,34 @@
/* The largest number of unique interrupt sources we support.
* If this needs to ever be larger than 255, you need to change
- * the type of ino_bucket->virt_irq as appropriate.
+ * the type of ino_bucket->irq as appropriate.
*
- * ino_bucket->virt_irq allocation is made during {sun4v_,}build_irq().
+ * ino_bucket->irq allocation is made during {sun4v_,}build_irq().
*/
#define NR_IRQS 255
-extern void irq_install_pre_handler(int virt_irq,
+extern void irq_install_pre_handler(int irq,
void (*func)(unsigned int, void *, void *),
void *arg1, void *arg2);
#define irq_canonicalize(irq) (irq)
extern unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap);
extern unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino);
extern unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino);
-extern unsigned int sun4v_build_msi(u32 devhandle, unsigned int *virt_irq_p,
+extern unsigned int sun4v_build_msi(u32 devhandle, unsigned int *irq_p,
unsigned int msi_devino_start,
unsigned int msi_devino_end);
-extern void sun4v_destroy_msi(unsigned int virt_irq);
-extern unsigned int sun4u_build_msi(u32 portid, unsigned int *virt_irq_p,
+extern void sun4v_destroy_msi(unsigned int irq);
+extern unsigned int sun4u_build_msi(u32 portid, unsigned int *irq_p,
unsigned int msi_devino_start,
unsigned int msi_devino_end,
unsigned long imap_base,
unsigned long iclr_base);
-extern void sun4u_destroy_msi(unsigned int virt_irq);
+extern void sun4u_destroy_msi(unsigned int irq);
-extern unsigned char virt_irq_alloc(unsigned int dev_handle,
+extern unsigned char irq_alloc(unsigned int dev_handle,
unsigned int dev_ino);
#ifdef CONFIG_PCI_MSI
-extern void virt_irq_free(unsigned int virt_irq);
+extern void irq_free(unsigned int irq);
#endif
extern void __init init_IRQ(void);
diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h
index 8580d1764f90..c04f96fb753c 100644
--- a/arch/sparc/include/asm/leon.h
+++ b/arch/sparc/include/asm/leon.h
@@ -375,9 +375,6 @@ void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu);
extern unsigned int real_irq_entry[], smpleon_ticker[];
extern unsigned int patchme_maybe_smp_msg[];
-extern unsigned long trapbase_cpu1[];
-extern unsigned long trapbase_cpu2[];
-extern unsigned long trapbase_cpu3[];
extern unsigned int t_nmi[], linux_trap_ipi15_leon[];
extern unsigned int linux_trap_ipi15_sun4m[];
diff --git a/arch/sparc/include/asm/leon_amba.h b/arch/sparc/include/asm/leon_amba.h
index 263c719e96f5..e50f326e71bd 100644
--- a/arch/sparc/include/asm/leon_amba.h
+++ b/arch/sparc/include/asm/leon_amba.h
@@ -180,6 +180,7 @@ struct amba_ahb_device {
struct device_node;
void _amba_init(struct device_node *dp, struct device_node ***nextp);
+extern unsigned long amba_system_id;
extern struct leon3_irqctrl_regs_map *leon3_irqctrl_regs;
extern struct leon3_gptimer_regs_map *leon3_gptimer_regs;
extern struct amba_apb_device leon_percpu_timer_dev[16];
@@ -254,6 +255,11 @@ extern unsigned int sparc_leon_eirq;
#define GAISLER_L2C 0xffe /* internal device: leon2compat */
#define GAISLER_PLUGPLAY 0xfff /* internal device: plug & play configarea */
+/* Chip IDs */
+#define AEROFLEX_UT699 0x0699
+#define LEON4_NEXTREME1 0x0102
+#define GAISLER_GR712RC 0x0712
+
#define amba_vendor(x) (((x) >> 24) & 0xff)
#define amba_device(x) (((x) >> 12) & 0xfff)
diff --git a/arch/sparc/include/asm/mmu_32.h b/arch/sparc/include/asm/mmu_32.h
index ccd36d26615a..6f056e535cf8 100644
--- a/arch/sparc/include/asm/mmu_32.h
+++ b/arch/sparc/include/asm/mmu_32.h
@@ -4,4 +4,7 @@
/* Default "unsigned long" context */
typedef unsigned long mm_context_t;
+/* mm/srmmu.c */
+extern ctxd_t *srmmu_ctx_table_phys;
+
#endif
diff --git a/arch/sparc/include/asm/parport.h b/arch/sparc/include/asm/parport.h
index aa4c82648d88..cb33608cc68f 100644
--- a/arch/sparc/include/asm/parport.h
+++ b/arch/sparc/include/asm/parport.h
@@ -103,7 +103,7 @@ static inline unsigned int get_dma_residue(unsigned int dmanr)
return ebus_dma_residue(&sparc_ebus_dmas[dmanr].info);
}
-static int __devinit ecpp_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit ecpp_probe(struct platform_device *op)
{
unsigned long base = op->resource[0].start;
unsigned long config = op->resource[1].start;
@@ -235,7 +235,7 @@ static const struct of_device_id ecpp_match[] = {
{},
};
-static struct of_platform_driver ecpp_driver = {
+static struct platform_driver ecpp_driver = {
.driver = {
.name = "ecpp",
.owner = THIS_MODULE,
@@ -247,7 +247,7 @@ static struct of_platform_driver ecpp_driver = {
static int parport_pc_find_nonpci_ports(int autoirq, int autodma)
{
- return of_register_platform_driver(&ecpp_driver);
+ return platform_driver_register(&ecpp_driver);
}
#endif /* !(_ASM_SPARC64_PARPORT_H */
diff --git a/arch/sparc/include/asm/rwsem.h b/arch/sparc/include/asm/rwsem.h
index a2b4302869bc..069bf4d663a1 100644
--- a/arch/sparc/include/asm/rwsem.h
+++ b/arch/sparc/include/asm/rwsem.h
@@ -13,53 +13,12 @@
#ifdef __KERNEL__
-#include <linux/list.h>
-#include <linux/spinlock.h>
-
-struct rwsem_waiter;
-
-struct rw_semaphore {
- signed long count;
#define RWSEM_UNLOCKED_VALUE 0x00000000L
#define RWSEM_ACTIVE_BIAS 0x00000001L
#define RWSEM_ACTIVE_MASK 0xffffffffL
#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
- spinlock_t wait_lock;
- struct list_head wait_list;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-};
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
-#else
-# define __RWSEM_DEP_MAP_INIT(lockname)
-#endif
-
-#define __RWSEM_INITIALIZER(name) \
-{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
- LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
-
-#define DECLARE_RWSEM(name) \
- struct rw_semaphore name = __RWSEM_INITIALIZER(name)
-
-extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
-
-extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
- struct lock_class_key *key);
-
-#define init_rwsem(sem) \
-do { \
- static struct lock_class_key __key; \
- \
- __init_rwsem((sem), #sem, &__key); \
-} while (0)
/*
* lock for reading
@@ -160,11 +119,6 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
return atomic64_add_return(delta, (atomic64_t *)(&sem->count));
}
-static inline int rwsem_is_locked(struct rw_semaphore *sem)
-{
- return (sem->count != 0);
-}
-
#endif /* __KERNEL__ */
#endif /* _SPARC64_RWSEM_H */
diff --git a/arch/sparc/include/asm/smp_32.h b/arch/sparc/include/asm/smp_32.h
index 841905c10215..d82d7f4c0a79 100644
--- a/arch/sparc/include/asm/smp_32.h
+++ b/arch/sparc/include/asm/smp_32.h
@@ -29,10 +29,16 @@
*/
extern unsigned char boot_cpu_id;
+extern volatile unsigned long cpu_callin_map[NR_CPUS];
+extern cpumask_t smp_commenced_mask;
+extern struct linux_prom_registers smp_penguin_ctable;
typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long);
+void cpu_panic(void);
+extern void smp4m_irq_rotate(int cpu);
+
/*
* General functions that each host system must provide.
*/
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 599398fbbc7c..99aa4db6e9c2 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -42,7 +42,6 @@ obj-$(CONFIG_SPARC32) += windows.o
obj-y += cpu.o
obj-$(CONFIG_SPARC32) += devices.o
obj-$(CONFIG_SPARC32) += tadpole.o
-obj-$(CONFIG_SPARC32) += tick14.o
obj-y += ptrace_$(BITS).o
obj-y += unaligned_$(BITS).o
obj-y += una_asm_$(BITS).o
@@ -54,6 +53,7 @@ obj-y += of_device_$(BITS).o
obj-$(CONFIG_SPARC64) += prom_irqtrans.o
obj-$(CONFIG_SPARC_LEON)+= leon_kernel.o
+obj-$(CONFIG_SPARC_LEON)+= leon_pmc.o
obj-$(CONFIG_SPARC64) += reboot.o
obj-$(CONFIG_SPARC64) += sysfs.o
diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c
index 52de4a9424e8..f679c57644d5 100644
--- a/arch/sparc/kernel/apc.c
+++ b/arch/sparc/kernel/apc.c
@@ -137,8 +137,7 @@ static const struct file_operations apc_fops = {
static struct miscdevice apc_miscdev = { APC_MINOR, APC_DEVNAME, &apc_fops };
-static int __devinit apc_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit apc_probe(struct platform_device *op)
{
int err;
@@ -174,7 +173,7 @@ static struct of_device_id __initdata apc_match[] = {
};
MODULE_DEVICE_TABLE(of, apc_match);
-static struct of_platform_driver apc_driver = {
+static struct platform_driver apc_driver = {
.driver = {
.name = "apc",
.owner = THIS_MODULE,
@@ -185,7 +184,7 @@ static struct of_platform_driver apc_driver = {
static int __init apc_init(void)
{
- return of_register_platform_driver(&apc_driver);
+ return platform_driver_register(&apc_driver);
}
/* This driver is not critical to the boot process
diff --git a/arch/sparc/kernel/auxio_64.c b/arch/sparc/kernel/auxio_64.c
index 3efd3c5af6a9..2abace076c7d 100644
--- a/arch/sparc/kernel/auxio_64.c
+++ b/arch/sparc/kernel/auxio_64.c
@@ -102,8 +102,7 @@ static struct of_device_id __initdata auxio_match[] = {
MODULE_DEVICE_TABLE(of, auxio_match);
-static int __devinit auxio_probe(struct platform_device *dev,
- const struct of_device_id *match)
+static int __devinit auxio_probe(struct platform_device *dev)
{
struct device_node *dp = dev->dev.of_node;
unsigned long size;
@@ -132,7 +131,7 @@ static int __devinit auxio_probe(struct platform_device *dev,
return 0;
}
-static struct of_platform_driver auxio_driver = {
+static struct platform_driver auxio_driver = {
.probe = auxio_probe,
.driver = {
.name = "auxio",
@@ -143,7 +142,7 @@ static struct of_platform_driver auxio_driver = {
static int __init auxio_init(void)
{
- return of_register_platform_driver(&auxio_driver);
+ return platform_driver_register(&auxio_driver);
}
/* Must be after subsys_initcall() so that busses are probed. Must
diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c
index cfa2624c5332..136d3718a74a 100644
--- a/arch/sparc/kernel/central.c
+++ b/arch/sparc/kernel/central.c
@@ -59,8 +59,7 @@ static int __devinit clock_board_calc_nslots(struct clock_board *p)
}
}
-static int __devinit clock_board_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit clock_board_probe(struct platform_device *op)
{
struct clock_board *p = kzalloc(sizeof(*p), GFP_KERNEL);
int err = -ENOMEM;
@@ -148,7 +147,7 @@ static struct of_device_id __initdata clock_board_match[] = {
{},
};
-static struct of_platform_driver clock_board_driver = {
+static struct platform_driver clock_board_driver = {
.probe = clock_board_probe,
.driver = {
.name = "clock_board",
@@ -157,8 +156,7 @@ static struct of_platform_driver clock_board_driver = {
},
};
-static int __devinit fhc_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit fhc_probe(struct platform_device *op)
{
struct fhc *p = kzalloc(sizeof(*p), GFP_KERNEL);
int err = -ENOMEM;
@@ -254,7 +252,7 @@ static struct of_device_id __initdata fhc_match[] = {
{},
};
-static struct of_platform_driver fhc_driver = {
+static struct platform_driver fhc_driver = {
.probe = fhc_probe,
.driver = {
.name = "fhc",
@@ -265,8 +263,8 @@ static struct of_platform_driver fhc_driver = {
static int __init sunfire_init(void)
{
- (void) of_register_platform_driver(&fhc_driver);
- (void) of_register_platform_driver(&clock_board_driver);
+ (void) platform_driver_register(&fhc_driver);
+ (void) platform_driver_register(&clock_board_driver);
return 0;
}
diff --git a/arch/sparc/kernel/chmc.c b/arch/sparc/kernel/chmc.c
index 08c466ebb32b..668c7be5d365 100644
--- a/arch/sparc/kernel/chmc.c
+++ b/arch/sparc/kernel/chmc.c
@@ -392,8 +392,7 @@ static void __devinit jbusmc_construct_dimm_groups(struct jbusmc *p,
}
}
-static int __devinit jbusmc_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit jbusmc_probe(struct platform_device *op)
{
const struct linux_prom64_registers *mem_regs;
struct device_node *mem_node;
@@ -690,8 +689,7 @@ static void chmc_fetch_decode_regs(struct chmc *p)
chmc_read_mcreg(p, CHMCTRL_DECODE4));
}
-static int __devinit chmc_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit chmc_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
unsigned long ver;
@@ -765,13 +763,12 @@ out_free:
goto out;
}
-static int __devinit us3mc_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit us3mc_probe(struct platform_device *op)
{
if (mc_type == MC_TYPE_SAFARI)
- return chmc_probe(op, match);
+ return chmc_probe(op);
else if (mc_type == MC_TYPE_JBUS)
- return jbusmc_probe(op, match);
+ return jbusmc_probe(op);
return -ENODEV;
}
@@ -810,7 +807,7 @@ static const struct of_device_id us3mc_match[] = {
};
MODULE_DEVICE_TABLE(of, us3mc_match);
-static struct of_platform_driver us3mc_driver = {
+static struct platform_driver us3mc_driver = {
.driver = {
.name = "us3mc",
.owner = THIS_MODULE,
@@ -848,7 +845,7 @@ static int __init us3mc_init(void)
ret = register_dimm_printer(us3mc_dimm_printer);
if (!ret) {
- ret = of_register_platform_driver(&us3mc_driver);
+ ret = platform_driver_register(&us3mc_driver);
if (ret)
unregister_dimm_printer(us3mc_dimm_printer);
}
@@ -859,7 +856,7 @@ static void __exit us3mc_cleanup(void)
{
if (us3mc_platform()) {
unregister_dimm_printer(us3mc_dimm_printer);
- of_unregister_platform_driver(&us3mc_driver);
+ platform_driver_unregister(&us3mc_driver);
}
}
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index 0dc714fa23d8..7925c54f4133 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -324,7 +324,7 @@ void __cpuinit cpu_probe(void)
psr = get_psr();
put_psr(psr | PSR_EF);
#ifdef CONFIG_SPARC_LEON
- fpu_vers = 7;
+ fpu_vers = get_psr() & PSR_EF ? ((get_fsr() >> 17) & 0x7) : 7;
#else
fpu_vers = ((get_fsr() >> 17) & 0x7);
#endif
diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
index c011b932bb17..d1f1361c4167 100644
--- a/arch/sparc/kernel/entry.h
+++ b/arch/sparc/kernel/entry.h
@@ -213,8 +213,8 @@ extern struct cheetah_err_info *cheetah_error_log;
struct ino_bucket {
/*0x00*/unsigned long __irq_chain_pa;
- /* Virtual interrupt number assigned to this INO. */
-/*0x08*/unsigned int __virt_irq;
+ /* Interrupt number assigned to this INO. */
+/*0x08*/unsigned int __irq;
/*0x0c*/unsigned int __pad;
};
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 72509d0e34be..6f01e8c83197 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -333,13 +333,10 @@ static void dma_4u_free_coherent(struct device *dev, size_t size,
void *cpu, dma_addr_t dvma)
{
struct iommu *iommu;
- iopte_t *iopte;
unsigned long flags, order, npages;
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
iommu = dev->archdata.iommu;
- iopte = iommu->page_table +
- ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
spin_lock_irqsave(&iommu->lock, flags);
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 41f7e4e0f72a..c6ce9a6a4790 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -50,10 +50,14 @@
#include <asm/io-unit.h>
#include <asm/leon.h>
-#ifdef CONFIG_SPARC_LEON
-#define mmu_inval_dma_area(p, l) leon_flush_dcache_all()
-#else
+#ifndef CONFIG_SPARC_LEON
#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */
+#else
+static inline void mmu_inval_dma_area(void *va, unsigned long len)
+{
+ if (!sparc_leon3_snooping_enabled())
+ leon_flush_dcache_all();
+}
#endif
static struct resource *_sparc_find_resource(struct resource *r,
@@ -254,7 +258,7 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len,
dma_addr_t *dma_addrp, gfp_t gfp)
{
struct platform_device *op = to_platform_device(dev);
- unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
+ unsigned long len_total = PAGE_ALIGN(len);
unsigned long va;
struct resource *res;
int order;
@@ -280,7 +284,8 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len,
printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total);
goto err_nova;
}
- mmu_inval_dma_area(va, len_total);
+ mmu_inval_dma_area((void *)va, len_total);
+
// XXX The mmu_map_dma_area does this for us below, see comments.
// sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
/*
@@ -297,9 +302,9 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len,
err_noiommu:
release_resource(res);
err_nova:
- free_pages(va, order);
-err_nomem:
kfree(res);
+err_nomem:
+ free_pages(va, order);
err_nopages:
return NULL;
}
@@ -321,7 +326,7 @@ static void sbus_free_coherent(struct device *dev, size_t n, void *p,
return;
}
- n = (n + PAGE_SIZE-1) & PAGE_MASK;
+ n = PAGE_ALIGN(n);
if ((res->end-res->start)+1 != n) {
printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n",
(long)((res->end-res->start)+1), n);
@@ -408,9 +413,6 @@ struct dma_map_ops sbus_dma_ops = {
.sync_sg_for_device = sbus_sync_sg_for_device,
};
-struct dma_map_ops *dma_ops = &sbus_dma_ops;
-EXPORT_SYMBOL(dma_ops);
-
static int __init sparc_register_ioport(void)
{
register_proc_sparc_ioport();
@@ -422,7 +424,9 @@ arch_initcall(sparc_register_ioport);
#endif /* CONFIG_SBUS */
-#ifdef CONFIG_PCI
+
+/* LEON reuses PCI DMA ops */
+#if defined(CONFIG_PCI) || defined(CONFIG_SPARC_LEON)
/* Allocate and map kernel buffer using consistent mode DMA for a device.
* hwdev should be valid struct pci_dev pointer for PCI devices.
@@ -430,8 +434,8 @@ arch_initcall(sparc_register_ioport);
static void *pci32_alloc_coherent(struct device *dev, size_t len,
dma_addr_t *pba, gfp_t gfp)
{
- unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
- unsigned long va;
+ unsigned long len_total = PAGE_ALIGN(len);
+ void *va;
struct resource *res;
int order;
@@ -443,34 +447,34 @@ static void *pci32_alloc_coherent(struct device *dev, size_t len,
}
order = get_order(len_total);
- va = __get_free_pages(GFP_KERNEL, order);
- if (va == 0) {
+ va = (void *) __get_free_pages(GFP_KERNEL, order);
+ if (va == NULL) {
printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT);
- return NULL;
+ goto err_nopages;
}
if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
- free_pages(va, order);
printk("pci_alloc_consistent: no core\n");
- return NULL;
+ goto err_nomem;
}
if (allocate_resource(&_sparc_dvma, res, len_total,
_sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total);
- free_pages(va, order);
- kfree(res);
- return NULL;
+ goto err_nova;
}
mmu_inval_dma_area(va, len_total);
-#if 0
-/* P3 */ printk("pci_alloc_consistent: kva %lx uncva %lx phys %lx size %lx\n",
- (long)va, (long)res->start, (long)virt_to_phys(va), len_total);
-#endif
sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
*pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
return (void *) res->start;
+
+err_nova:
+ kfree(res);
+err_nomem:
+ free_pages((unsigned long)va, order);
+err_nopages:
+ return NULL;
}
/* Free and unmap a consistent DMA buffer.
@@ -485,7 +489,7 @@ static void pci32_free_coherent(struct device *dev, size_t n, void *p,
dma_addr_t ba)
{
struct resource *res;
- unsigned long pgp;
+ void *pgp;
if ((res = _sparc_find_resource(&_sparc_dvma,
(unsigned long)p)) == NULL) {
@@ -498,21 +502,21 @@ static void pci32_free_coherent(struct device *dev, size_t n, void *p,
return;
}
- n = (n + PAGE_SIZE-1) & PAGE_MASK;
+ n = PAGE_ALIGN(n);
if ((res->end-res->start)+1 != n) {
printk("pci_free_consistent: region 0x%lx asked 0x%lx\n",
(long)((res->end-res->start)+1), (long)n);
return;
}
- pgp = (unsigned long) phys_to_virt(ba); /* bus_to_virt actually */
+ pgp = phys_to_virt(ba); /* bus_to_virt actually */
mmu_inval_dma_area(pgp, n);
sparc_unmapiorange((unsigned long)p, n);
release_resource(res);
kfree(res);
- free_pages(pgp, get_order(n));
+ free_pages((unsigned long)pgp, get_order(n));
}
/*
@@ -527,6 +531,13 @@ static dma_addr_t pci32_map_page(struct device *dev, struct page *page,
return page_to_phys(page) + offset;
}
+static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ if (dir != PCI_DMA_TODEVICE)
+ mmu_inval_dma_area(phys_to_virt(ba), PAGE_ALIGN(size));
+}
+
/* Map a set of buffers described by scatterlist in streaming
* mode for DMA. This is the scather-gather version of the
* above pci_map_single interface. Here the scatter gather list
@@ -572,9 +583,8 @@ static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl,
if (dir != PCI_DMA_TODEVICE) {
for_each_sg(sgl, sg, nents, n) {
BUG_ON(page_address(sg_page(sg)) == NULL);
- mmu_inval_dma_area(
- (unsigned long) page_address(sg_page(sg)),
- (sg->length + PAGE_SIZE-1) & PAGE_MASK);
+ mmu_inval_dma_area(page_address(sg_page(sg)),
+ PAGE_ALIGN(sg->length));
}
}
}
@@ -593,8 +603,8 @@ static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
size_t size, enum dma_data_direction dir)
{
if (dir != PCI_DMA_TODEVICE) {
- mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
- (size + PAGE_SIZE-1) & PAGE_MASK);
+ mmu_inval_dma_area(phys_to_virt(ba),
+ PAGE_ALIGN(size));
}
}
@@ -602,8 +612,8 @@ static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba,
size_t size, enum dma_data_direction dir)
{
if (dir != PCI_DMA_TODEVICE) {
- mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
- (size + PAGE_SIZE-1) & PAGE_MASK);
+ mmu_inval_dma_area(phys_to_virt(ba),
+ PAGE_ALIGN(size));
}
}
@@ -622,9 +632,8 @@ static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
if (dir != PCI_DMA_TODEVICE) {
for_each_sg(sgl, sg, nents, n) {
BUG_ON(page_address(sg_page(sg)) == NULL);
- mmu_inval_dma_area(
- (unsigned long) page_address(sg_page(sg)),
- (sg->length + PAGE_SIZE-1) & PAGE_MASK);
+ mmu_inval_dma_area(page_address(sg_page(sg)),
+ PAGE_ALIGN(sg->length));
}
}
}
@@ -638,9 +647,8 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
if (dir != PCI_DMA_TODEVICE) {
for_each_sg(sgl, sg, nents, n) {
BUG_ON(page_address(sg_page(sg)) == NULL);
- mmu_inval_dma_area(
- (unsigned long) page_address(sg_page(sg)),
- (sg->length + PAGE_SIZE-1) & PAGE_MASK);
+ mmu_inval_dma_area(page_address(sg_page(sg)),
+ PAGE_ALIGN(sg->length));
}
}
}
@@ -649,6 +657,7 @@ struct dma_map_ops pci32_dma_ops = {
.alloc_coherent = pci32_alloc_coherent,
.free_coherent = pci32_free_coherent,
.map_page = pci32_map_page,
+ .unmap_page = pci32_unmap_page,
.map_sg = pci32_map_sg,
.unmap_sg = pci32_unmap_sg,
.sync_single_for_cpu = pci32_sync_single_for_cpu,
@@ -658,7 +667,16 @@ struct dma_map_ops pci32_dma_ops = {
};
EXPORT_SYMBOL(pci32_dma_ops);
-#endif /* CONFIG_PCI */
+#endif /* CONFIG_PCI || CONFIG_SPARC_LEON */
+
+#ifdef CONFIG_SPARC_LEON
+struct dma_map_ops *dma_ops = &pci32_dma_ops;
+#elif defined(CONFIG_SBUS)
+struct dma_map_ops *dma_ops = &sbus_dma_ops;
+#endif
+
+EXPORT_SYMBOL(dma_ops);
+
/*
* Return whether the given PCI device DMA address mask can be
@@ -717,7 +735,7 @@ static const struct file_operations sparc_io_proc_fops = {
static struct resource *_sparc_find_resource(struct resource *root,
unsigned long hit)
{
- struct resource *tmp;
+ struct resource *tmp;
for (tmp = root->child; tmp != 0; tmp = tmp->sibling) {
if (tmp->start <= hit && tmp->end >= hit)
diff --git a/arch/sparc/kernel/irq.h b/arch/sparc/kernel/irq.h
index db7513881530..008453b798ec 100644
--- a/arch/sparc/kernel/irq.h
+++ b/arch/sparc/kernel/irq.h
@@ -1,5 +1,41 @@
+#include <linux/platform_device.h>
+
#include <asm/btfixup.h>
+/* sun4m specific type definitions */
+
+/* This maps direct to CPU specific interrupt registers */
+struct sun4m_irq_percpu {
+ u32 pending;
+ u32 clear;
+ u32 set;
+};
+
+/* This maps direct to global interrupt registers */
+struct sun4m_irq_global {
+ u32 pending;
+ u32 mask;
+ u32 mask_clear;
+ u32 mask_set;
+ u32 interrupt_target;
+};
+
+extern struct sun4m_irq_percpu __iomem *sun4m_irq_percpu[SUN4M_NCPUS];
+extern struct sun4m_irq_global __iomem *sun4m_irq_global;
+
+/*
+ * Platform specific irq configuration
+ * The individual platforms assign their platform
+ * specifics in their init functions.
+ */
+struct sparc_irq_config {
+ void (*init_timers)(irq_handler_t);
+ unsigned int (*build_device_irq)(struct platform_device *op,
+ unsigned int real_irq);
+};
+extern struct sparc_irq_config sparc_irq_config;
+
+
/* Dave Redman (djhr@tadpole.co.uk)
* changed these to function pointers.. it saves cycles and will allow
* the irq dependencies to be split into different files at a later date
@@ -45,12 +81,6 @@ static inline void load_profile_irq(int cpu, int limit)
BTFIXUP_CALL(load_profile_irq)(cpu, limit);
}
-extern void (*sparc_init_timers)(irq_handler_t lvl10_irq);
-
-extern void claim_ticker14(irq_handler_t irq_handler,
- int irq,
- unsigned int timeout);
-
#ifdef CONFIG_SMP
BTFIXUPDEF_CALL(void, set_cpu_int, int, int)
BTFIXUPDEF_CALL(void, clear_cpu_int, int, int)
diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c
index 5ad6e5c5dbb3..7c93df4099cb 100644
--- a/arch/sparc/kernel/irq_32.c
+++ b/arch/sparc/kernel/irq_32.c
@@ -1,8 +1,8 @@
/*
- * arch/sparc/kernel/irq.c: Interrupt request handling routines. On the
- * Sparc the IRQs are basically 'cast in stone'
- * and you are supposed to probe the prom's device
- * node trees to find out who's got which IRQ.
+ * Interrupt request handling routines. On the
+ * Sparc the IRQs are basically 'cast in stone'
+ * and you are supposed to probe the prom's device
+ * node trees to find out who's got which IRQ.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
@@ -11,40 +11,11 @@
* Copyright (C) 1998-2000 Anton Blanchard (anton@samba.org)
*/
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/linkage.h>
#include <linux/kernel_stat.h>
-#include <linux/signal.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/random.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/delay.h>
-#include <linux/threads.h>
-#include <linux/spinlock.h>
#include <linux/seq_file.h>
-#include <asm/ptrace.h>
-#include <asm/processor.h>
-#include <asm/system.h>
-#include <asm/psr.h>
-#include <asm/smp.h>
-#include <asm/vaddrs.h>
-#include <asm/timer.h>
-#include <asm/openprom.h>
-#include <asm/oplib.h>
-#include <asm/traps.h>
-#include <asm/irq.h>
-#include <asm/io.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/pcic.h>
#include <asm/cacheflush.h>
-#include <asm/irq_regs.h>
+#include <asm/pcic.h>
#include <asm/leon.h>
#include "kernel.h"
@@ -57,6 +28,10 @@
#define SMP_NOP2
#define SMP_NOP3
#endif /* SMP */
+
+/* platform specific irq setup */
+struct sparc_irq_config sparc_irq_config;
+
unsigned long arch_local_irq_save(void)
{
unsigned long retval;
@@ -128,15 +103,7 @@ EXPORT_SYMBOL(arch_local_irq_restore);
*
*/
-static void irq_panic(void)
-{
- extern char *cputypval;
- prom_printf("machine: %s doesn't have irq handlers defined!\n",cputypval);
- prom_halt();
-}
-void (*sparc_init_timers)(irq_handler_t ) =
- (void (*)(irq_handler_t )) irq_panic;
/*
* Dave Redman (djhr@tadpole.co.uk)
@@ -145,7 +112,7 @@ void (*sparc_init_timers)(irq_handler_t ) =
* instead, because some of the devices attach very early, I do something
* equally sucky but at least we'll never try to free statically allocated
* space or call kmalloc before kmalloc_init :(.
- *
+ *
* In fact it's the timer10 that attaches first.. then timer14
* then kmalloc_init is called.. then the tty interrupts attach.
* hmmm....
@@ -166,22 +133,20 @@ DEFINE_SPINLOCK(irq_action_lock);
int show_interrupts(struct seq_file *p, void *v)
{
- int i = *(loff_t *) v;
- struct irqaction * action;
+ int i = *(loff_t *)v;
+ struct irqaction *action;
unsigned long flags;
#ifdef CONFIG_SMP
int j;
#endif
- if (sparc_cpu_model == sun4d) {
- extern int show_sun4d_interrupts(struct seq_file *, void *);
-
+ if (sparc_cpu_model == sun4d)
return show_sun4d_interrupts(p, v);
- }
+
spin_lock_irqsave(&irq_action_lock, flags);
if (i < NR_IRQS) {
action = sparc_irq[i].action;
- if (!action)
+ if (!action)
goto out_unlock;
seq_printf(p, "%3d: ", i);
#ifndef CONFIG_SMP
@@ -195,7 +160,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, " %c %s",
(action->flags & IRQF_DISABLED) ? '+' : ' ',
action->name);
- for (action=action->next; action; action = action->next) {
+ for (action = action->next; action; action = action->next) {
seq_printf(p, ",%s %s",
(action->flags & IRQF_DISABLED) ? " +" : "",
action->name);
@@ -209,22 +174,20 @@ out_unlock:
void free_irq(unsigned int irq, void *dev_id)
{
- struct irqaction * action;
+ struct irqaction *action;
struct irqaction **actionp;
- unsigned long flags;
+ unsigned long flags;
unsigned int cpu_irq;
-
+
if (sparc_cpu_model == sun4d) {
- extern void sun4d_free_irq(unsigned int, void *);
-
sun4d_free_irq(irq, dev_id);
return;
}
cpu_irq = irq & (NR_IRQS - 1);
- if (cpu_irq > 14) { /* 14 irq levels on the sparc */
- printk("Trying to free bogus IRQ %d\n", irq);
- return;
- }
+ if (cpu_irq > 14) { /* 14 irq levels on the sparc */
+ printk(KERN_ERR "Trying to free bogus IRQ %d\n", irq);
+ return;
+ }
spin_lock_irqsave(&irq_action_lock, flags);
@@ -232,7 +195,7 @@ void free_irq(unsigned int irq, void *dev_id)
action = *actionp;
if (!action->handler) {
- printk("Trying to free free IRQ%d\n",irq);
+ printk(KERN_ERR "Trying to free free IRQ%d\n", irq);
goto out_unlock;
}
if (dev_id) {
@@ -242,19 +205,21 @@ void free_irq(unsigned int irq, void *dev_id)
actionp = &action->next;
}
if (!action) {
- printk("Trying to free free shared IRQ%d\n",irq);
+ printk(KERN_ERR "Trying to free free shared IRQ%d\n",
+ irq);
goto out_unlock;
}
} else if (action->flags & IRQF_SHARED) {
- printk("Trying to free shared IRQ%d with NULL device ID\n", irq);
+ printk(KERN_ERR "Trying to free shared IRQ%d with NULL device ID\n",
+ irq);
goto out_unlock;
}
- if (action->flags & SA_STATIC_ALLOC)
- {
- /* This interrupt is marked as specially allocated
+ if (action->flags & SA_STATIC_ALLOC) {
+ /*
+ * This interrupt is marked as specially allocated
* so it is a bad idea to free it.
*/
- printk("Attempt to free statically allocated IRQ%d (%s)\n",
+ printk(KERN_ERR "Attempt to free statically allocated IRQ%d (%s)\n",
irq, action->name);
goto out_unlock;
}
@@ -275,7 +240,6 @@ void free_irq(unsigned int irq, void *dev_id)
out_unlock:
spin_unlock_irqrestore(&irq_action_lock, flags);
}
-
EXPORT_SYMBOL(free_irq);
/*
@@ -297,64 +261,62 @@ void synchronize_irq(unsigned int irq)
EXPORT_SYMBOL(synchronize_irq);
#endif /* SMP */
-void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs)
+void unexpected_irq(int irq, void *dev_id, struct pt_regs *regs)
{
- int i;
- struct irqaction * action;
+ int i;
+ struct irqaction *action;
unsigned int cpu_irq;
-
+
cpu_irq = irq & (NR_IRQS - 1);
action = sparc_irq[cpu_irq].action;
- printk("IO device interrupt, irq = %d\n", irq);
- printk("PC = %08lx NPC = %08lx FP=%08lx\n", regs->pc,
+ printk(KERN_ERR "IO device interrupt, irq = %d\n", irq);
+ printk(KERN_ERR "PC = %08lx NPC = %08lx FP=%08lx\n", regs->pc,
regs->npc, regs->u_regs[14]);
if (action) {
- printk("Expecting: ");
- for (i = 0; i < 16; i++)
- if (action->handler)
- printk("[%s:%d:0x%x] ", action->name,
- (int) i, (unsigned int) action->handler);
+ printk(KERN_ERR "Expecting: ");
+ for (i = 0; i < 16; i++)
+ if (action->handler)
+ printk(KERN_CONT "[%s:%d:0x%x] ", action->name,
+ i, (unsigned int)action->handler);
}
- printk("AIEEE\n");
+ printk(KERN_ERR "AIEEE\n");
panic("bogus interrupt received");
}
-void handler_irq(int irq, struct pt_regs * regs)
+void handler_irq(int pil, struct pt_regs *regs)
{
struct pt_regs *old_regs;
- struct irqaction * action;
+ struct irqaction *action;
int cpu = smp_processor_id();
-#ifdef CONFIG_SMP
- extern void smp4m_irq_rotate(int cpu);
-#endif
old_regs = set_irq_regs(regs);
irq_enter();
- disable_pil_irq(irq);
+ disable_pil_irq(pil);
#ifdef CONFIG_SMP
/* Only rotate on lower priority IRQs (scsi, ethernet, etc.). */
- if((sparc_cpu_model==sun4m) && (irq < 10))
+ if ((sparc_cpu_model==sun4m) && (pil < 10))
smp4m_irq_rotate(cpu);
#endif
- action = sparc_irq[irq].action;
- sparc_irq[irq].flags |= SPARC_IRQ_INPROGRESS;
- kstat_cpu(cpu).irqs[irq]++;
+ action = sparc_irq[pil].action;
+ sparc_irq[pil].flags |= SPARC_IRQ_INPROGRESS;
+ kstat_cpu(cpu).irqs[pil]++;
do {
if (!action || !action->handler)
- unexpected_irq(irq, NULL, regs);
- action->handler(irq, action->dev_id);
+ unexpected_irq(pil, NULL, regs);
+ action->handler(pil, action->dev_id);
action = action->next;
} while (action);
- sparc_irq[irq].flags &= ~SPARC_IRQ_INPROGRESS;
- enable_pil_irq(irq);
+ sparc_irq[pil].flags &= ~SPARC_IRQ_INPROGRESS;
+ enable_pil_irq(pil);
irq_exit();
set_irq_regs(old_regs);
}
#if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE)
-/* Fast IRQs on the Sparc can only have one routine attached to them,
+/*
+ * Fast IRQs on the Sparc can only have one routine attached to them,
* thus no sharing possible.
*/
static int request_fast_irq(unsigned int irq,
@@ -367,15 +329,13 @@ static int request_fast_irq(unsigned int irq,
int ret;
#if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON
struct tt_entry *trap_table;
- extern struct tt_entry trapbase_cpu1, trapbase_cpu2, trapbase_cpu3;
#endif
-
cpu_irq = irq & (NR_IRQS - 1);
- if(cpu_irq > 14) {
+ if (cpu_irq > 14) {
ret = -EINVAL;
goto out;
}
- if(!handler) {
+ if (!handler) {
ret = -EINVAL;
goto out;
}
@@ -383,34 +343,33 @@ static int request_fast_irq(unsigned int irq,
spin_lock_irqsave(&irq_action_lock, flags);
action = sparc_irq[cpu_irq].action;
- if(action) {
- if(action->flags & IRQF_SHARED)
+ if (action) {
+ if (action->flags & IRQF_SHARED)
panic("Trying to register fast irq when already shared.\n");
- if(irqflags & IRQF_SHARED)
+ if (irqflags & IRQF_SHARED)
panic("Trying to register fast irq as shared.\n");
/* Anyway, someone already owns it so cannot be made fast. */
- printk("request_fast_irq: Trying to register yet already owned.\n");
+ printk(KERN_ERR "request_fast_irq: Trying to register yet already owned.\n");
ret = -EBUSY;
goto out_unlock;
}
- /* If this is flagged as statically allocated then we use our
+ /*
+ * If this is flagged as statically allocated then we use our
* private struct which is never freed.
*/
if (irqflags & SA_STATIC_ALLOC) {
- if (static_irq_count < MAX_STATIC_ALLOC)
- action = &static_irqaction[static_irq_count++];
- else
- printk("Fast IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",
- irq, devname);
+ if (static_irq_count < MAX_STATIC_ALLOC)
+ action = &static_irqaction[static_irq_count++];
+ else
+ printk(KERN_ERR "Fast IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",
+ irq, devname);
}
-
+
if (action == NULL)
- action = kmalloc(sizeof(struct irqaction),
- GFP_ATOMIC);
-
- if (!action) {
+ action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
+ if (!action) {
ret = -ENOMEM;
goto out_unlock;
}
@@ -426,9 +385,12 @@ static int request_fast_irq(unsigned int irq,
INSTANTIATE(sparc_ttable)
#if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON
- trap_table = &trapbase_cpu1; INSTANTIATE(trap_table)
- trap_table = &trapbase_cpu2; INSTANTIATE(trap_table)
- trap_table = &trapbase_cpu3; INSTANTIATE(trap_table)
+ trap_table = &trapbase_cpu1;
+ INSTANTIATE(trap_table)
+ trap_table = &trapbase_cpu2;
+ INSTANTIATE(trap_table)
+ trap_table = &trapbase_cpu3;
+ INSTANTIATE(trap_table)
#endif
#undef INSTANTIATE
/*
@@ -454,7 +416,8 @@ out:
return ret;
}
-/* These variables are used to access state from the assembler
+/*
+ * These variables are used to access state from the assembler
* interrupt handler, floppy_hardint, so we cannot put these in
* the floppy driver image because that would not work in the
* modular case.
@@ -477,8 +440,6 @@ EXPORT_SYMBOL(pdma_base);
unsigned long pdma_areasize;
EXPORT_SYMBOL(pdma_areasize);
-extern void floppy_hardint(void);
-
static irq_handler_t floppy_irq_handler;
void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs)
@@ -494,9 +455,11 @@ void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs)
irq_exit();
enable_pil_irq(irq);
set_irq_regs(old_regs);
- // XXX Eek, it's totally changed with preempt_count() and such
- // if (softirq_pending(cpu))
- // do_softirq();
+ /*
+ * XXX Eek, it's totally changed with preempt_count() and such
+ * if (softirq_pending(cpu))
+ * do_softirq();
+ */
}
int sparc_floppy_request_irq(int irq, unsigned long flags,
@@ -511,21 +474,18 @@ EXPORT_SYMBOL(sparc_floppy_request_irq);
int request_irq(unsigned int irq,
irq_handler_t handler,
- unsigned long irqflags, const char * devname, void *dev_id)
+ unsigned long irqflags, const char *devname, void *dev_id)
{
- struct irqaction * action, **actionp;
+ struct irqaction *action, **actionp;
unsigned long flags;
unsigned int cpu_irq;
int ret;
-
- if (sparc_cpu_model == sun4d) {
- extern int sun4d_request_irq(unsigned int,
- irq_handler_t ,
- unsigned long, const char *, void *);
+
+ if (sparc_cpu_model == sun4d)
return sun4d_request_irq(irq, handler, irqflags, devname, dev_id);
- }
+
cpu_irq = irq & (NR_IRQS - 1);
- if(cpu_irq > 14) {
+ if (cpu_irq > 14) {
ret = -EINVAL;
goto out;
}
@@ -533,7 +493,7 @@ int request_irq(unsigned int irq,
ret = -EINVAL;
goto out;
}
-
+
spin_lock_irqsave(&irq_action_lock, flags);
actionp = &sparc_irq[cpu_irq].action;
@@ -544,7 +504,8 @@ int request_irq(unsigned int irq,
goto out_unlock;
}
if ((action->flags & IRQF_DISABLED) != (irqflags & IRQF_DISABLED)) {
- printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq);
+ printk(KERN_ERR "Attempt to mix fast and slow interrupts on IRQ%d denied\n",
+ irq);
ret = -EBUSY;
goto out_unlock;
}
@@ -559,14 +520,12 @@ int request_irq(unsigned int irq,
if (static_irq_count < MAX_STATIC_ALLOC)
action = &static_irqaction[static_irq_count++];
else
- printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", irq, devname);
+ printk(KERN_ERR "Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",
+ irq, devname);
}
-
if (action == NULL)
- action = kmalloc(sizeof(struct irqaction),
- GFP_ATOMIC);
-
- if (!action) {
+ action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
+ if (!action) {
ret = -ENOMEM;
goto out_unlock;
}
@@ -587,7 +546,6 @@ out_unlock:
out:
return ret;
}
-
EXPORT_SYMBOL(request_irq);
void disable_irq_nosync(unsigned int irq)
@@ -606,26 +564,30 @@ void enable_irq(unsigned int irq)
{
__enable_irq(irq);
}
-
EXPORT_SYMBOL(enable_irq);
-/* We really don't need these at all on the Sparc. We only have
+/*
+ * We really don't need these at all on the Sparc. We only have
* stubs here because they are exported to modules.
*/
unsigned long probe_irq_on(void)
{
return 0;
}
-
EXPORT_SYMBOL(probe_irq_on);
int probe_irq_off(unsigned long mask)
{
return 0;
}
-
EXPORT_SYMBOL(probe_irq_off);
+static unsigned int build_device_irq(struct platform_device *op,
+ unsigned int real_irq)
+{
+ return real_irq;
+}
+
/* djhr
* This could probably be made indirect too and assigned in the CPU
* bits of the code. That would be much nicer I think and would also
@@ -636,11 +598,9 @@ EXPORT_SYMBOL(probe_irq_off);
void __init init_IRQ(void)
{
- extern void sun4c_init_IRQ( void );
- extern void sun4m_init_IRQ( void );
- extern void sun4d_init_IRQ( void );
+ sparc_irq_config.build_device_irq = build_device_irq;
- switch(sparc_cpu_model) {
+ switch (sparc_cpu_model) {
case sun4c:
case sun4:
sun4c_init_IRQ();
@@ -656,7 +616,7 @@ void __init init_IRQ(void)
#endif
sun4m_init_IRQ();
break;
-
+
case sun4d:
sun4d_init_IRQ();
break;
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index 830d70a3e20b..eb16e3b8a2dd 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -82,7 +82,7 @@ static void bucket_clear_chain_pa(unsigned long bucket_pa)
"i" (ASI_PHYS_USE_EC));
}
-static unsigned int bucket_get_virt_irq(unsigned long bucket_pa)
+static unsigned int bucket_get_irq(unsigned long bucket_pa)
{
unsigned int ret;
@@ -90,21 +90,20 @@ static unsigned int bucket_get_virt_irq(unsigned long bucket_pa)
: "=&r" (ret)
: "r" (bucket_pa +
offsetof(struct ino_bucket,
- __virt_irq)),
+ __irq)),
"i" (ASI_PHYS_USE_EC));
return ret;
}
-static void bucket_set_virt_irq(unsigned long bucket_pa,
- unsigned int virt_irq)
+static void bucket_set_irq(unsigned long bucket_pa, unsigned int irq)
{
__asm__ __volatile__("stwa %0, [%1] %2"
: /* no outputs */
- : "r" (virt_irq),
+ : "r" (irq),
"r" (bucket_pa +
offsetof(struct ino_bucket,
- __virt_irq)),
+ __irq)),
"i" (ASI_PHYS_USE_EC));
}
@@ -114,50 +113,49 @@ static struct {
unsigned int dev_handle;
unsigned int dev_ino;
unsigned int in_use;
-} virt_irq_table[NR_IRQS];
-static DEFINE_SPINLOCK(virt_irq_alloc_lock);
+} irq_table[NR_IRQS];
+static DEFINE_SPINLOCK(irq_alloc_lock);
-unsigned char virt_irq_alloc(unsigned int dev_handle,
- unsigned int dev_ino)
+unsigned char irq_alloc(unsigned int dev_handle, unsigned int dev_ino)
{
unsigned long flags;
unsigned char ent;
BUILD_BUG_ON(NR_IRQS >= 256);
- spin_lock_irqsave(&virt_irq_alloc_lock, flags);
+ spin_lock_irqsave(&irq_alloc_lock, flags);
for (ent = 1; ent < NR_IRQS; ent++) {
- if (!virt_irq_table[ent].in_use)
+ if (!irq_table[ent].in_use)
break;
}
if (ent >= NR_IRQS) {
printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
ent = 0;
} else {
- virt_irq_table[ent].dev_handle = dev_handle;
- virt_irq_table[ent].dev_ino = dev_ino;
- virt_irq_table[ent].in_use = 1;
+ irq_table[ent].dev_handle = dev_handle;
+ irq_table[ent].dev_ino = dev_ino;
+ irq_table[ent].in_use = 1;
}
- spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
+ spin_unlock_irqrestore(&irq_alloc_lock, flags);
return ent;
}
#ifdef CONFIG_PCI_MSI
-void virt_irq_free(unsigned int virt_irq)
+void irq_free(unsigned int irq)
{
unsigned long flags;
- if (virt_irq >= NR_IRQS)
+ if (irq >= NR_IRQS)
return;
- spin_lock_irqsave(&virt_irq_alloc_lock, flags);
+ spin_lock_irqsave(&irq_alloc_lock, flags);
- virt_irq_table[virt_irq].in_use = 0;
+ irq_table[irq].in_use = 0;
- spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
+ spin_unlock_irqrestore(&irq_alloc_lock, flags);
}
#endif
@@ -190,7 +188,7 @@ int show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
#endif
- seq_printf(p, " %9s", irq_desc[i].chip->name);
+ seq_printf(p, " %9s", irq_desc[i].irq_data.chip->name);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
@@ -253,39 +251,38 @@ struct irq_handler_data {
};
#ifdef CONFIG_SMP
-static int irq_choose_cpu(unsigned int virt_irq, const struct cpumask *affinity)
+static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity)
{
cpumask_t mask;
int cpuid;
cpumask_copy(&mask, affinity);
if (cpus_equal(mask, cpu_online_map)) {
- cpuid = map_to_cpu(virt_irq);
+ cpuid = map_to_cpu(irq);
} else {
cpumask_t tmp;
cpus_and(tmp, cpu_online_map, mask);
- cpuid = cpus_empty(tmp) ? map_to_cpu(virt_irq) : first_cpu(tmp);
+ cpuid = cpus_empty(tmp) ? map_to_cpu(irq) : first_cpu(tmp);
}
return cpuid;
}
#else
-#define irq_choose_cpu(virt_irq, affinity) \
+#define irq_choose_cpu(irq, affinity) \
real_hard_smp_processor_id()
#endif
-static void sun4u_irq_enable(unsigned int virt_irq)
+static void sun4u_irq_enable(struct irq_data *data)
{
- struct irq_handler_data *data = get_irq_chip_data(virt_irq);
+ struct irq_handler_data *handler_data = data->handler_data;
- if (likely(data)) {
+ if (likely(handler_data)) {
unsigned long cpuid, imap, val;
unsigned int tid;
- cpuid = irq_choose_cpu(virt_irq,
- irq_desc[virt_irq].affinity);
- imap = data->imap;
+ cpuid = irq_choose_cpu(data->irq, data->affinity);
+ imap = handler_data->imap;
tid = sun4u_compute_tid(imap, cpuid);
@@ -294,21 +291,21 @@ static void sun4u_irq_enable(unsigned int virt_irq)
IMAP_AID_SAFARI | IMAP_NID_SAFARI);
val |= tid | IMAP_VALID;
upa_writeq(val, imap);
- upa_writeq(ICLR_IDLE, data->iclr);
+ upa_writeq(ICLR_IDLE, handler_data->iclr);
}
}
-static int sun4u_set_affinity(unsigned int virt_irq,
- const struct cpumask *mask)
+static int sun4u_set_affinity(struct irq_data *data,
+ const struct cpumask *mask, bool force)
{
- struct irq_handler_data *data = get_irq_chip_data(virt_irq);
+ struct irq_handler_data *handler_data = data->handler_data;
- if (likely(data)) {
+ if (likely(handler_data)) {
unsigned long cpuid, imap, val;
unsigned int tid;
- cpuid = irq_choose_cpu(virt_irq, mask);
- imap = data->imap;
+ cpuid = irq_choose_cpu(data->irq, mask);
+ imap = handler_data->imap;
tid = sun4u_compute_tid(imap, cpuid);
@@ -317,7 +314,7 @@ static int sun4u_set_affinity(unsigned int virt_irq,
IMAP_AID_SAFARI | IMAP_NID_SAFARI);
val |= tid | IMAP_VALID;
upa_writeq(val, imap);
- upa_writeq(ICLR_IDLE, data->iclr);
+ upa_writeq(ICLR_IDLE, handler_data->iclr);
}
return 0;
@@ -340,27 +337,26 @@ static int sun4u_set_affinity(unsigned int virt_irq,
* sees that, it also hooks up a default ->shutdown method which
* invokes ->mask() which we do not want. See irq_chip_set_defaults().
*/
-static void sun4u_irq_disable(unsigned int virt_irq)
+static void sun4u_irq_disable(struct irq_data *data)
{
}
-static void sun4u_irq_eoi(unsigned int virt_irq)
+static void sun4u_irq_eoi(struct irq_data *data)
{
- struct irq_handler_data *data = get_irq_chip_data(virt_irq);
- struct irq_desc *desc = irq_desc + virt_irq;
+ struct irq_handler_data *handler_data = data->handler_data;
+ struct irq_desc *desc = irq_desc + data->irq;
if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
return;
- if (likely(data))
- upa_writeq(ICLR_IDLE, data->iclr);
+ if (likely(handler_data))
+ upa_writeq(ICLR_IDLE, handler_data->iclr);
}
-static void sun4v_irq_enable(unsigned int virt_irq)
+static void sun4v_irq_enable(struct irq_data *data)
{
- unsigned int ino = virt_irq_table[virt_irq].dev_ino;
- unsigned long cpuid = irq_choose_cpu(virt_irq,
- irq_desc[virt_irq].affinity);
+ unsigned int ino = irq_table[data->irq].dev_ino;
+ unsigned long cpuid = irq_choose_cpu(data->irq, data->affinity);
int err;
err = sun4v_intr_settarget(ino, cpuid);
@@ -377,11 +373,11 @@ static void sun4v_irq_enable(unsigned int virt_irq)
ino, err);
}
-static int sun4v_set_affinity(unsigned int virt_irq,
- const struct cpumask *mask)
+static int sun4v_set_affinity(struct irq_data *data,
+ const struct cpumask *mask, bool force)
{
- unsigned int ino = virt_irq_table[virt_irq].dev_ino;
- unsigned long cpuid = irq_choose_cpu(virt_irq, mask);
+ unsigned int ino = irq_table[data->irq].dev_ino;
+ unsigned long cpuid = irq_choose_cpu(data->irq, mask);
int err;
err = sun4v_intr_settarget(ino, cpuid);
@@ -392,9 +388,9 @@ static int sun4v_set_affinity(unsigned int virt_irq,
return 0;
}
-static void sun4v_irq_disable(unsigned int virt_irq)
+static void sun4v_irq_disable(struct irq_data *data)
{
- unsigned int ino = virt_irq_table[virt_irq].dev_ino;
+ unsigned int ino = irq_table[data->irq].dev_ino;
int err;
err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
@@ -403,10 +399,10 @@ static void sun4v_irq_disable(unsigned int virt_irq)
"err(%d)\n", ino, err);
}
-static void sun4v_irq_eoi(unsigned int virt_irq)
+static void sun4v_irq_eoi(struct irq_data *data)
{
- unsigned int ino = virt_irq_table[virt_irq].dev_ino;
- struct irq_desc *desc = irq_desc + virt_irq;
+ unsigned int ino = irq_table[data->irq].dev_ino;
+ struct irq_desc *desc = irq_desc + data->irq;
int err;
if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
@@ -418,15 +414,15 @@ static void sun4v_irq_eoi(unsigned int virt_irq)
"err(%d)\n", ino, err);
}
-static void sun4v_virq_enable(unsigned int virt_irq)
+static void sun4v_virq_enable(struct irq_data *data)
{
unsigned long cpuid, dev_handle, dev_ino;
int err;
- cpuid = irq_choose_cpu(virt_irq, irq_desc[virt_irq].affinity);
+ cpuid = irq_choose_cpu(data->irq, data->affinity);
- dev_handle = virt_irq_table[virt_irq].dev_handle;
- dev_ino = virt_irq_table[virt_irq].dev_ino;
+ dev_handle = irq_table[data->irq].dev_handle;
+ dev_ino = irq_table[data->irq].dev_ino;
err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
if (err != HV_EOK)
@@ -447,16 +443,16 @@ static void sun4v_virq_enable(unsigned int virt_irq)
dev_handle, dev_ino, err);
}
-static int sun4v_virt_set_affinity(unsigned int virt_irq,
- const struct cpumask *mask)
+static int sun4v_virt_set_affinity(struct irq_data *data,
+ const struct cpumask *mask, bool force)
{
unsigned long cpuid, dev_handle, dev_ino;
int err;
- cpuid = irq_choose_cpu(virt_irq, mask);
+ cpuid = irq_choose_cpu(data->irq, mask);
- dev_handle = virt_irq_table[virt_irq].dev_handle;
- dev_ino = virt_irq_table[virt_irq].dev_ino;
+ dev_handle = irq_table[data->irq].dev_handle;
+ dev_ino = irq_table[data->irq].dev_ino;
err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
if (err != HV_EOK)
@@ -467,13 +463,13 @@ static int sun4v_virt_set_affinity(unsigned int virt_irq,
return 0;
}
-static void sun4v_virq_disable(unsigned int virt_irq)
+static void sun4v_virq_disable(struct irq_data *data)
{
unsigned long dev_handle, dev_ino;
int err;
- dev_handle = virt_irq_table[virt_irq].dev_handle;
- dev_ino = virt_irq_table[virt_irq].dev_ino;
+ dev_handle = irq_table[data->irq].dev_handle;
+ dev_ino = irq_table[data->irq].dev_ino;
err = sun4v_vintr_set_valid(dev_handle, dev_ino,
HV_INTR_DISABLED);
@@ -483,17 +479,17 @@ static void sun4v_virq_disable(unsigned int virt_irq)
dev_handle, dev_ino, err);
}
-static void sun4v_virq_eoi(unsigned int virt_irq)
+static void sun4v_virq_eoi(struct irq_data *data)
{
- struct irq_desc *desc = irq_desc + virt_irq;
+ struct irq_desc *desc = irq_desc + data->irq;
unsigned long dev_handle, dev_ino;
int err;
if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
return;
- dev_handle = virt_irq_table[virt_irq].dev_handle;
- dev_ino = virt_irq_table[virt_irq].dev_ino;
+ dev_handle = irq_table[data->irq].dev_handle;
+ dev_ino = irq_table[data->irq].dev_ino;
err = sun4v_vintr_set_state(dev_handle, dev_ino,
HV_INTR_STATE_IDLE);
@@ -504,50 +500,49 @@ static void sun4v_virq_eoi(unsigned int virt_irq)
}
static struct irq_chip sun4u_irq = {
- .name = "sun4u",
- .enable = sun4u_irq_enable,
- .disable = sun4u_irq_disable,
- .eoi = sun4u_irq_eoi,
- .set_affinity = sun4u_set_affinity,
+ .name = "sun4u",
+ .irq_enable = sun4u_irq_enable,
+ .irq_disable = sun4u_irq_disable,
+ .irq_eoi = sun4u_irq_eoi,
+ .irq_set_affinity = sun4u_set_affinity,
};
static struct irq_chip sun4v_irq = {
- .name = "sun4v",
- .enable = sun4v_irq_enable,
- .disable = sun4v_irq_disable,
- .eoi = sun4v_irq_eoi,
- .set_affinity = sun4v_set_affinity,
+ .name = "sun4v",
+ .irq_enable = sun4v_irq_enable,
+ .irq_disable = sun4v_irq_disable,
+ .irq_eoi = sun4v_irq_eoi,
+ .irq_set_affinity = sun4v_set_affinity,
};
static struct irq_chip sun4v_virq = {
- .name = "vsun4v",
- .enable = sun4v_virq_enable,
- .disable = sun4v_virq_disable,
- .eoi = sun4v_virq_eoi,
- .set_affinity = sun4v_virt_set_affinity,
+ .name = "vsun4v",
+ .irq_enable = sun4v_virq_enable,
+ .irq_disable = sun4v_virq_disable,
+ .irq_eoi = sun4v_virq_eoi,
+ .irq_set_affinity = sun4v_virt_set_affinity,
};
-static void pre_flow_handler(unsigned int virt_irq,
- struct irq_desc *desc)
+static void pre_flow_handler(unsigned int irq, struct irq_desc *desc)
{
- struct irq_handler_data *data = get_irq_chip_data(virt_irq);
- unsigned int ino = virt_irq_table[virt_irq].dev_ino;
+ struct irq_handler_data *handler_data = get_irq_data(irq);
+ unsigned int ino = irq_table[irq].dev_ino;
- data->pre_handler(ino, data->arg1, data->arg2);
+ handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2);
- handle_fasteoi_irq(virt_irq, desc);
+ handle_fasteoi_irq(irq, desc);
}
-void irq_install_pre_handler(int virt_irq,
+void irq_install_pre_handler(int irq,
void (*func)(unsigned int, void *, void *),
void *arg1, void *arg2)
{
- struct irq_handler_data *data = get_irq_chip_data(virt_irq);
- struct irq_desc *desc = irq_desc + virt_irq;
+ struct irq_handler_data *handler_data = get_irq_data(irq);
+ struct irq_desc *desc = irq_desc + irq;
- data->pre_handler = func;
- data->arg1 = arg1;
- data->arg2 = arg2;
+ handler_data->pre_handler = func;
+ handler_data->arg1 = arg1;
+ handler_data->arg2 = arg2;
desc->handle_irq = pre_flow_handler;
}
@@ -555,81 +550,81 @@ void irq_install_pre_handler(int virt_irq,
unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
{
struct ino_bucket *bucket;
- struct irq_handler_data *data;
- unsigned int virt_irq;
+ struct irq_handler_data *handler_data;
+ unsigned int irq;
int ino;
BUG_ON(tlb_type == hypervisor);
ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
bucket = &ivector_table[ino];
- virt_irq = bucket_get_virt_irq(__pa(bucket));
- if (!virt_irq) {
- virt_irq = virt_irq_alloc(0, ino);
- bucket_set_virt_irq(__pa(bucket), virt_irq);
- set_irq_chip_and_handler_name(virt_irq,
+ irq = bucket_get_irq(__pa(bucket));
+ if (!irq) {
+ irq = irq_alloc(0, ino);
+ bucket_set_irq(__pa(bucket), irq);
+ set_irq_chip_and_handler_name(irq,
&sun4u_irq,
handle_fasteoi_irq,
"IVEC");
}
- data = get_irq_chip_data(virt_irq);
- if (unlikely(data))
+ handler_data = get_irq_data(irq);
+ if (unlikely(handler_data))
goto out;
- data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
- if (unlikely(!data)) {
+ handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
+ if (unlikely(!handler_data)) {
prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
prom_halt();
}
- set_irq_chip_data(virt_irq, data);
+ set_irq_data(irq, handler_data);
- data->imap = imap;
- data->iclr = iclr;
+ handler_data->imap = imap;
+ handler_data->iclr = iclr;
out:
- return virt_irq;
+ return irq;
}
static unsigned int sun4v_build_common(unsigned long sysino,
struct irq_chip *chip)
{
struct ino_bucket *bucket;
- struct irq_handler_data *data;
- unsigned int virt_irq;
+ struct irq_handler_data *handler_data;
+ unsigned int irq;
BUG_ON(tlb_type != hypervisor);
bucket = &ivector_table[sysino];
- virt_irq = bucket_get_virt_irq(__pa(bucket));
- if (!virt_irq) {
- virt_irq = virt_irq_alloc(0, sysino);
- bucket_set_virt_irq(__pa(bucket), virt_irq);
- set_irq_chip_and_handler_name(virt_irq, chip,
+ irq = bucket_get_irq(__pa(bucket));
+ if (!irq) {
+ irq = irq_alloc(0, sysino);
+ bucket_set_irq(__pa(bucket), irq);
+ set_irq_chip_and_handler_name(irq, chip,
handle_fasteoi_irq,
"IVEC");
}
- data = get_irq_chip_data(virt_irq);
- if (unlikely(data))
+ handler_data = get_irq_data(irq);
+ if (unlikely(handler_data))
goto out;
- data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
- if (unlikely(!data)) {
+ handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
+ if (unlikely(!handler_data)) {
prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
prom_halt();
}
- set_irq_chip_data(virt_irq, data);
+ set_irq_data(irq, handler_data);
/* Catch accidental accesses to these things. IMAP/ICLR handling
* is done by hypervisor calls on sun4v platforms, not by direct
* register accesses.
*/
- data->imap = ~0UL;
- data->iclr = ~0UL;
+ handler_data->imap = ~0UL;
+ handler_data->iclr = ~0UL;
out:
- return virt_irq;
+ return irq;
}
unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
@@ -641,11 +636,11 @@ unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
{
- struct irq_handler_data *data;
+ struct irq_handler_data *handler_data;
unsigned long hv_err, cookie;
struct ino_bucket *bucket;
struct irq_desc *desc;
- unsigned int virt_irq;
+ unsigned int irq;
bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
if (unlikely(!bucket))
@@ -662,32 +657,32 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
((unsigned long) bucket +
sizeof(struct ino_bucket)));
- virt_irq = virt_irq_alloc(devhandle, devino);
- bucket_set_virt_irq(__pa(bucket), virt_irq);
+ irq = irq_alloc(devhandle, devino);
+ bucket_set_irq(__pa(bucket), irq);
- set_irq_chip_and_handler_name(virt_irq, &sun4v_virq,
+ set_irq_chip_and_handler_name(irq, &sun4v_virq,
handle_fasteoi_irq,
"IVEC");
- data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
- if (unlikely(!data))
+ handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
+ if (unlikely(!handler_data))
return 0;
/* In order to make the LDC channel startup sequence easier,
* especially wrt. locking, we do not let request_irq() enable
* the interrupt.
*/
- desc = irq_desc + virt_irq;
+ desc = irq_desc + irq;
desc->status |= IRQ_NOAUTOEN;
- set_irq_chip_data(virt_irq, data);
+ set_irq_data(irq, handler_data);
/* Catch accidental accesses to these things. IMAP/ICLR handling
* is done by hypervisor calls on sun4v platforms, not by direct
* register accesses.
*/
- data->imap = ~0UL;
- data->iclr = ~0UL;
+ handler_data->imap = ~0UL;
+ handler_data->iclr = ~0UL;
cookie = ~__pa(bucket);
hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
@@ -697,30 +692,30 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
prom_halt();
}
- return virt_irq;
+ return irq;
}
-void ack_bad_irq(unsigned int virt_irq)
+void ack_bad_irq(unsigned int irq)
{
- unsigned int ino = virt_irq_table[virt_irq].dev_ino;
+ unsigned int ino = irq_table[irq].dev_ino;
if (!ino)
ino = 0xdeadbeef;
- printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n",
- ino, virt_irq);
+ printk(KERN_CRIT "Unexpected IRQ from ino[%x] irq[%u]\n",
+ ino, irq);
}
void *hardirq_stack[NR_CPUS];
void *softirq_stack[NR_CPUS];
-void __irq_entry handler_irq(int irq, struct pt_regs *regs)
+void __irq_entry handler_irq(int pil, struct pt_regs *regs)
{
unsigned long pstate, bucket_pa;
struct pt_regs *old_regs;
void *orig_sp;
- clear_softint(1 << irq);
+ clear_softint(1 << pil);
old_regs = set_irq_regs(regs);
irq_enter();
@@ -741,16 +736,16 @@ void __irq_entry handler_irq(int irq, struct pt_regs *regs)
while (bucket_pa) {
struct irq_desc *desc;
unsigned long next_pa;
- unsigned int virt_irq;
+ unsigned int irq;
next_pa = bucket_get_chain_pa(bucket_pa);
- virt_irq = bucket_get_virt_irq(bucket_pa);
+ irq = bucket_get_irq(bucket_pa);
bucket_clear_chain_pa(bucket_pa);
- desc = irq_desc + virt_irq;
+ desc = irq_desc + irq;
if (!(desc->status & IRQ_DISABLED))
- desc->handle_irq(virt_irq, desc);
+ desc->handle_irq(irq, desc);
bucket_pa = next_pa;
}
@@ -798,9 +793,12 @@ void fixup_irqs(void)
raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
if (irq_desc[irq].action &&
!(irq_desc[irq].status & IRQ_PER_CPU)) {
- if (irq_desc[irq].chip->set_affinity)
- irq_desc[irq].chip->set_affinity(irq,
- irq_desc[irq].affinity);
+ struct irq_data *data = irq_get_irq_data(irq);
+
+ if (data->chip->irq_set_affinity)
+ data->chip->irq_set_affinity(data,
+ data->affinity,
+ false);
}
raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
}
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h
index 15d8a3f645c9..24ad449886be 100644
--- a/arch/sparc/kernel/kernel.h
+++ b/arch/sparc/kernel/kernel.h
@@ -3,6 +3,8 @@
#include <linux/interrupt.h>
+#include <asm/traps.h>
+
/* cpu.c */
extern const char *sparc_cpu_type;
extern const char *sparc_pmu_type;
@@ -26,6 +28,53 @@ extern int static_irq_count;
extern spinlock_t irq_action_lock;
extern void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs);
+extern void init_IRQ(void);
+
+/* sun4c_irq.c */
+extern void sun4c_init_IRQ(void);
+
+/* sun4m_irq.c */
+extern unsigned int lvl14_resolution;
+
+extern void sun4m_init_IRQ(void);
+extern void sun4m_clear_profile_irq(int cpu);
+
+/* sun4d_irq.c */
+extern spinlock_t sun4d_imsk_lock;
+
+extern void sun4d_init_IRQ(void);
+extern int sun4d_request_irq(unsigned int irq,
+ irq_handler_t handler,
+ unsigned long irqflags,
+ const char *devname, void *dev_id);
+extern int show_sun4d_interrupts(struct seq_file *, void *);
+extern void sun4d_distribute_irqs(void);
+extern void sun4d_free_irq(unsigned int irq, void *dev_id);
+
+/* head_32.S */
+extern unsigned int t_nmi[];
+extern unsigned int linux_trap_ipi15_sun4d[];
+extern unsigned int linux_trap_ipi15_sun4m[];
+
+extern struct tt_entry trapbase_cpu1;
+extern struct tt_entry trapbase_cpu2;
+extern struct tt_entry trapbase_cpu3;
+
+extern char cputypval[];
+
+/* entry.S */
+extern unsigned long lvl14_save[4];
+extern unsigned int real_irq_entry[];
+extern unsigned int smp4d_ticker[];
+extern unsigned int patchme_maybe_smp_msg[];
+
+extern void floppy_hardint(void);
+
+/* trampoline_32.S */
+extern int __smp4m_processor_id(void);
+extern int __smp4d_processor_id(void);
+extern unsigned long sun4m_cpu_startup;
+extern unsigned long sun4d_cpu_startup;
#else /* CONFIG_SPARC32 */
#endif /* CONFIG_SPARC32 */
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index df39a0f0d27a..732b0bce6001 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -790,16 +790,20 @@ static void send_events(struct ldc_channel *lp, unsigned int event_mask)
static irqreturn_t ldc_rx(int irq, void *dev_id)
{
struct ldc_channel *lp = dev_id;
- unsigned long orig_state, hv_err, flags;
+ unsigned long orig_state, flags;
unsigned int event_mask;
spin_lock_irqsave(&lp->lock, flags);
orig_state = lp->chan_state;
- hv_err = sun4v_ldc_rx_get_state(lp->id,
- &lp->rx_head,
- &lp->rx_tail,
- &lp->chan_state);
+
+ /* We should probably check for hypervisor errors here and
+ * reset the LDC channel if we get one.
+ */
+ sun4v_ldc_rx_get_state(lp->id,
+ &lp->rx_head,
+ &lp->rx_tail,
+ &lp->chan_state);
ldcdbg(RX, "RX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n",
orig_state, lp->chan_state, lp->rx_head, lp->rx_tail);
@@ -904,16 +908,20 @@ out:
static irqreturn_t ldc_tx(int irq, void *dev_id)
{
struct ldc_channel *lp = dev_id;
- unsigned long flags, hv_err, orig_state;
+ unsigned long flags, orig_state;
unsigned int event_mask = 0;
spin_lock_irqsave(&lp->lock, flags);
orig_state = lp->chan_state;
- hv_err = sun4v_ldc_tx_get_state(lp->id,
- &lp->tx_head,
- &lp->tx_tail,
- &lp->chan_state);
+
+ /* We should probably check for hypervisor errors here and
+ * reset the LDC channel if we get one.
+ */
+ sun4v_ldc_tx_get_state(lp->id,
+ &lp->tx_head,
+ &lp->tx_tail,
+ &lp->chan_state);
ldcdbg(TX, " TX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n",
orig_state, lp->chan_state, lp->tx_head, lp->tx_tail);
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
index fdab7f854f80..2969f777fa11 100644
--- a/arch/sparc/kernel/leon_kernel.c
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -30,6 +30,7 @@ struct amba_apb_device leon_percpu_timer_dev[16];
int leondebug_irq_disable;
int leon_debug_irqout;
static int dummy_master_l10_counter;
+unsigned long amba_system_id;
unsigned long leon3_gptimer_irq; /* interrupt controller irq number */
unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */
@@ -117,10 +118,16 @@ void __init leon_init_timers(irq_handler_t counter_fn)
master_l10_counter = (unsigned int *)&dummy_master_l10_counter;
dummy_master_l10_counter = 0;
- /*Find IRQMP IRQ Controller Registers base address otherwise bail out.*/
rootnp = of_find_node_by_path("/ambapp0");
if (!rootnp)
goto bad;
+
+ /* Find System ID: GRLIB build ID and optional CHIP ID */
+ pp = of_find_property(rootnp, "systemid", &len);
+ if (pp)
+ amba_system_id = *(unsigned long *)pp->value;
+
+ /* Find IRQMP IRQ Controller Registers base adr otherwise bail out */
np = of_find_node_by_name(rootnp, "GAISLER_IRQMP");
if (!np) {
np = of_find_node_by_name(rootnp, "01_00d");
@@ -340,7 +347,7 @@ void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu)
void __init leon_init_IRQ(void)
{
- sparc_init_timers = leon_init_timers;
+ sparc_irq_config.init_timers = leon_init_timers;
BTFIXUPSET_CALL(enable_irq, leon_enable_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(disable_irq, leon_disable_irq, BTFIXUPCALL_NORM);
diff --git a/arch/sparc/kernel/leon_pmc.c b/arch/sparc/kernel/leon_pmc.c
new file mode 100644
index 000000000000..519ca923f59f
--- /dev/null
+++ b/arch/sparc/kernel/leon_pmc.c
@@ -0,0 +1,82 @@
+/* leon_pmc.c: LEON Power-down cpu_idle() handler
+ *
+ * Copyright (C) 2011 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB
+ */
+
+#include <linux/init.h>
+#include <linux/pm.h>
+
+#include <asm/leon_amba.h>
+#include <asm/leon.h>
+
+/* List of Systems that need fixup instructions around power-down instruction */
+unsigned int pmc_leon_fixup_ids[] = {
+ AEROFLEX_UT699,
+ GAISLER_GR712RC,
+ LEON4_NEXTREME1,
+ 0
+};
+
+int pmc_leon_need_fixup(void)
+{
+ unsigned int systemid = amba_system_id >> 16;
+ unsigned int *id;
+
+ id = &pmc_leon_fixup_ids[0];
+ while (*id != 0) {
+ if (*id == systemid)
+ return 1;
+ id++;
+ }
+
+ return 0;
+}
+
+/*
+ * CPU idle callback function for systems that need some extra handling
+ * See .../arch/sparc/kernel/process.c
+ */
+void pmc_leon_idle_fixup(void)
+{
+ /* Prepare an address to a non-cachable region. APB is always
+ * none-cachable. One instruction is executed after the Sleep
+ * instruction, we make sure to read the bus and throw away the
+ * value by accessing a non-cachable area, also we make sure the
+ * MMU does not get a TLB miss here by using the MMU BYPASS ASI.
+ */
+ register unsigned int address = (unsigned int)leon3_irqctrl_regs;
+ __asm__ __volatile__ (
+ "mov %%g0, %%asr19\n"
+ "lda [%0] %1, %%g0\n"
+ :
+ : "r"(address), "i"(ASI_LEON_BYPASS));
+}
+
+/*
+ * CPU idle callback function
+ * See .../arch/sparc/kernel/process.c
+ */
+void pmc_leon_idle(void)
+{
+ /* For systems without power-down, this will be no-op */
+ __asm__ __volatile__ ("mov %g0, %asr19\n\t");
+}
+
+/* Install LEON Power Down function */
+static int __init leon_pmc_install(void)
+{
+ /* Assign power management IDLE handler */
+ if (pmc_leon_need_fixup())
+ pm_idle = pmc_leon_idle_fixup;
+ else
+ pm_idle = pmc_leon_idle;
+
+ printk(KERN_INFO "leon: power management initialized\n");
+
+ return 0;
+}
+
+/* This driver is not critical to the boot process, don't care
+ * if initialized late.
+ */
+late_initcall(leon_pmc_install);
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c
index 16582d85368a..8f5de4aa3c0a 100644
--- a/arch/sparc/kernel/leon_smp.c
+++ b/arch/sparc/kernel/leon_smp.c
@@ -41,6 +41,8 @@
#include <asm/leon.h>
#include <asm/leon_amba.h>
+#include "kernel.h"
+
#ifdef CONFIG_SPARC_LEON
#include "irq.h"
@@ -261,23 +263,23 @@ void __init leon_smp_done(void)
/* Free unneeded trap tables */
if (!cpu_isset(1, cpu_present_map)) {
- ClearPageReserved(virt_to_page(trapbase_cpu1));
- init_page_count(virt_to_page(trapbase_cpu1));
- free_page((unsigned long)trapbase_cpu1);
+ ClearPageReserved(virt_to_page(&trapbase_cpu1));
+ init_page_count(virt_to_page(&trapbase_cpu1));
+ free_page((unsigned long)&trapbase_cpu1);
totalram_pages++;
num_physpages++;
}
if (!cpu_isset(2, cpu_present_map)) {
- ClearPageReserved(virt_to_page(trapbase_cpu2));
- init_page_count(virt_to_page(trapbase_cpu2));
- free_page((unsigned long)trapbase_cpu2);
+ ClearPageReserved(virt_to_page(&trapbase_cpu2));
+ init_page_count(virt_to_page(&trapbase_cpu2));
+ free_page((unsigned long)&trapbase_cpu2);
totalram_pages++;
num_physpages++;
}
if (!cpu_isset(3, cpu_present_map)) {
- ClearPageReserved(virt_to_page(trapbase_cpu3));
- init_page_count(virt_to_page(trapbase_cpu3));
- free_page((unsigned long)trapbase_cpu3);
+ ClearPageReserved(virt_to_page(&trapbase_cpu3));
+ init_page_count(virt_to_page(&trapbase_cpu3));
+ free_page((unsigned long)&trapbase_cpu3);
totalram_pages++;
num_physpages++;
}
@@ -437,15 +439,6 @@ void __init leon_blackbox_current(unsigned *addr)
}
-/*
- * CPU idle callback function
- * See .../arch/sparc/kernel/process.c
- */
-void pmc_leon_idle(void)
-{
- __asm__ volatile ("mov %g0, %asr19");
-}
-
void __init leon_init_smp(void)
{
/* Patch ipi15 trap table */
@@ -456,13 +449,6 @@ void __init leon_init_smp(void)
BTFIXUPSET_CALL(smp_cross_call, leon_cross_call, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(__hard_smp_processor_id, __leon_processor_id,
BTFIXUPCALL_NORM);
-
-#ifndef PMC_NO_IDLE
- /* Assign power management IDLE handler */
- pm_idle = pmc_leon_idle;
- printk(KERN_INFO "leon: power management initialized\n");
-#endif
-
}
#endif /* CONFIG_SPARC_LEON */
diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c
index 2d055a1e9cc2..a312af40ea84 100644
--- a/arch/sparc/kernel/of_device_32.c
+++ b/arch/sparc/kernel/of_device_32.c
@@ -13,6 +13,7 @@
#include <asm/leon_amba.h>
#include "of_device_common.h"
+#include "irq.h"
/*
* PCI bus specific translator
@@ -355,7 +356,8 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
if (intr) {
op->archdata.num_irqs = len / sizeof(struct linux_prom_irqs);
for (i = 0; i < op->archdata.num_irqs; i++)
- op->archdata.irqs[i] = intr[i].pri;
+ op->archdata.irqs[i] =
+ sparc_irq_config.build_device_irq(op, intr[i].pri);
} else {
const unsigned int *irq =
of_get_property(dp, "interrupts", &len);
@@ -363,64 +365,13 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
if (irq) {
op->archdata.num_irqs = len / sizeof(unsigned int);
for (i = 0; i < op->archdata.num_irqs; i++)
- op->archdata.irqs[i] = irq[i];
+ op->archdata.irqs[i] =
+ sparc_irq_config.build_device_irq(op, irq[i]);
} else {
op->archdata.num_irqs = 0;
}
}
- if (sparc_cpu_model == sun4d) {
- static int pil_to_sbus[] = {
- 0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0,
- };
- struct device_node *io_unit, *sbi = dp->parent;
- const struct linux_prom_registers *regs;
- int board, slot;
-
- while (sbi) {
- if (!strcmp(sbi->name, "sbi"))
- break;
-
- sbi = sbi->parent;
- }
- if (!sbi)
- goto build_resources;
-
- regs = of_get_property(dp, "reg", NULL);
- if (!regs)
- goto build_resources;
-
- slot = regs->which_io;
-
- /* If SBI's parent is not io-unit or the io-unit lacks
- * a "board#" property, something is very wrong.
- */
- if (!sbi->parent || strcmp(sbi->parent->name, "io-unit")) {
- printk("%s: Error, parent is not io-unit.\n",
- sbi->full_name);
- goto build_resources;
- }
- io_unit = sbi->parent;
- board = of_getintprop_default(io_unit, "board#", -1);
- if (board == -1) {
- printk("%s: Error, lacks board# property.\n",
- io_unit->full_name);
- goto build_resources;
- }
-
- for (i = 0; i < op->archdata.num_irqs; i++) {
- int this_irq = op->archdata.irqs[i];
- int sbusl = pil_to_sbus[this_irq];
-
- if (sbusl)
- this_irq = (((board + 1) << 5) +
- (sbusl << 2) +
- slot);
-
- op->archdata.irqs[i] = this_irq;
- }
- }
-build_resources:
build_device_resources(op, parent);
op->dev.parent = parent;
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 4137579d9adc..44f41e312f73 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -675,6 +675,7 @@ static void __devinit pci_bus_register_of_sysfs(struct pci_bus *bus)
* humanoid.
*/
err = sysfs_create_file(&dev->dev.kobj, &dev_attr_obppath.attr);
+ (void) err;
}
list_for_each_entry(child_bus, &bus->children, node)
pci_bus_register_of_sysfs(child_bus);
@@ -1001,22 +1002,22 @@ EXPORT_SYMBOL(pci_domain_nr);
int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
{
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
- unsigned int virt_irq;
+ unsigned int irq;
if (!pbm->setup_msi_irq)
return -EINVAL;
- return pbm->setup_msi_irq(&virt_irq, pdev, desc);
+ return pbm->setup_msi_irq(&irq, pdev, desc);
}
-void arch_teardown_msi_irq(unsigned int virt_irq)
+void arch_teardown_msi_irq(unsigned int irq)
{
- struct msi_desc *entry = get_irq_msi(virt_irq);
+ struct msi_desc *entry = get_irq_msi(irq);
struct pci_dev *pdev = entry->dev;
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
if (pbm->teardown_msi_irq)
- pbm->teardown_msi_irq(virt_irq, pdev);
+ pbm->teardown_msi_irq(irq, pdev);
}
#endif /* !(CONFIG_PCI_MSI) */
diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c
index 6c7a33af3ba6..6e3874b64488 100644
--- a/arch/sparc/kernel/pci_common.c
+++ b/arch/sparc/kernel/pci_common.c
@@ -295,14 +295,17 @@ static int sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
unsigned int bus = bus_dev->number;
unsigned int device = PCI_SLOT(devfn);
unsigned int func = PCI_FUNC(devfn);
- unsigned long ret;
if (config_out_of_range(pbm, bus, devfn, where)) {
/* Do nothing. */
} else {
- ret = pci_sun4v_config_put(devhandle,
- HV_PCI_DEVICE_BUILD(bus, device, func),
- where, size, value);
+ /* We don't check for hypervisor errors here, but perhaps
+ * we should and influence our return value depending upon
+ * what kind of error is thrown.
+ */
+ pci_sun4v_config_put(devhandle,
+ HV_PCI_DEVICE_BUILD(bus, device, func),
+ where, size, value);
}
return PCIBIOS_SUCCESSFUL;
}
diff --git a/arch/sparc/kernel/pci_fire.c b/arch/sparc/kernel/pci_fire.c
index efb896d68754..3d70f8326efd 100644
--- a/arch/sparc/kernel/pci_fire.c
+++ b/arch/sparc/kernel/pci_fire.c
@@ -214,11 +214,9 @@ static int pci_fire_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
static int pci_fire_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
{
- unsigned long msiqid;
u64 val;
val = upa_readq(pbm->pbm_regs + MSI_MAP(msi));
- msiqid = (val & MSI_MAP_EQNUM);
val &= ~MSI_MAP_VALID;
@@ -277,7 +275,7 @@ static int pci_fire_msiq_build_irq(struct pci_pbm_info *pbm,
{
unsigned long cregs = (unsigned long) pbm->pbm_regs;
unsigned long imap_reg, iclr_reg, int_ctrlr;
- unsigned int virt_irq;
+ unsigned int irq;
int fixup;
u64 val;
@@ -293,14 +291,14 @@ static int pci_fire_msiq_build_irq(struct pci_pbm_info *pbm,
fixup = ((pbm->portid << 6) | devino) - int_ctrlr;
- virt_irq = build_irq(fixup, iclr_reg, imap_reg);
- if (!virt_irq)
+ irq = build_irq(fixup, iclr_reg, imap_reg);
+ if (!irq)
return -ENOMEM;
upa_writeq(EVENT_QUEUE_CONTROL_SET_EN,
pbm->pbm_regs + EVENT_QUEUE_CONTROL_SET(msiqid));
- return virt_irq;
+ return irq;
}
static const struct sparc64_msiq_ops pci_fire_msiq_ops = {
@@ -455,8 +453,7 @@ static int __devinit pci_fire_pbm_init(struct pci_pbm_info *pbm,
return 0;
}
-static int __devinit fire_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit fire_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
struct pci_pbm_info *pbm;
@@ -507,7 +504,7 @@ static struct of_device_id __initdata fire_match[] = {
{},
};
-static struct of_platform_driver fire_driver = {
+static struct platform_driver fire_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
@@ -518,7 +515,7 @@ static struct of_platform_driver fire_driver = {
static int __init fire_init(void)
{
- return of_register_platform_driver(&fire_driver);
+ return platform_driver_register(&fire_driver);
}
subsys_initcall(fire_init);
diff --git a/arch/sparc/kernel/pci_impl.h b/arch/sparc/kernel/pci_impl.h
index e20ed5f06e9c..6beb60df31d0 100644
--- a/arch/sparc/kernel/pci_impl.h
+++ b/arch/sparc/kernel/pci_impl.h
@@ -131,9 +131,9 @@ struct pci_pbm_info {
void *msi_queues;
unsigned long *msi_bitmap;
unsigned int *msi_irq_table;
- int (*setup_msi_irq)(unsigned int *virt_irq_p, struct pci_dev *pdev,
+ int (*setup_msi_irq)(unsigned int *irq_p, struct pci_dev *pdev,
struct msi_desc *entry);
- void (*teardown_msi_irq)(unsigned int virt_irq, struct pci_dev *pdev);
+ void (*teardown_msi_irq)(unsigned int irq, struct pci_dev *pdev);
const struct sparc64_msiq_ops *msi_ops;
#endif /* !(CONFIG_PCI_MSI) */
diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c
index b210416ace7b..550e937720e7 100644
--- a/arch/sparc/kernel/pci_msi.c
+++ b/arch/sparc/kernel/pci_msi.c
@@ -31,12 +31,12 @@ static irqreturn_t sparc64_msiq_interrupt(int irq, void *cookie)
err = ops->dequeue_msi(pbm, msiqid, &head, &msi);
if (likely(err > 0)) {
struct irq_desc *desc;
- unsigned int virt_irq;
+ unsigned int irq;
- virt_irq = pbm->msi_irq_table[msi - pbm->msi_first];
- desc = irq_desc + virt_irq;
+ irq = pbm->msi_irq_table[msi - pbm->msi_first];
+ desc = irq_desc + irq;
- desc->handle_irq(virt_irq, desc);
+ desc->handle_irq(irq, desc);
}
if (unlikely(err < 0))
@@ -121,7 +121,7 @@ static struct irq_chip msi_irq = {
/* XXX affinity XXX */
};
-static int sparc64_setup_msi_irq(unsigned int *virt_irq_p,
+static int sparc64_setup_msi_irq(unsigned int *irq_p,
struct pci_dev *pdev,
struct msi_desc *entry)
{
@@ -131,17 +131,17 @@ static int sparc64_setup_msi_irq(unsigned int *virt_irq_p,
int msi, err;
u32 msiqid;
- *virt_irq_p = virt_irq_alloc(0, 0);
+ *irq_p = irq_alloc(0, 0);
err = -ENOMEM;
- if (!*virt_irq_p)
+ if (!*irq_p)
goto out_err;
- set_irq_chip_and_handler_name(*virt_irq_p, &msi_irq,
+ set_irq_chip_and_handler_name(*irq_p, &msi_irq,
handle_simple_irq, "MSI");
err = alloc_msi(pbm);
if (unlikely(err < 0))
- goto out_virt_irq_free;
+ goto out_irq_free;
msi = err;
@@ -152,7 +152,7 @@ static int sparc64_setup_msi_irq(unsigned int *virt_irq_p,
if (err)
goto out_msi_free;
- pbm->msi_irq_table[msi - pbm->msi_first] = *virt_irq_p;
+ pbm->msi_irq_table[msi - pbm->msi_first] = *irq_p;
if (entry->msi_attrib.is_64) {
msg.address_hi = pbm->msi64_start >> 32;
@@ -163,24 +163,24 @@ static int sparc64_setup_msi_irq(unsigned int *virt_irq_p,
}
msg.data = msi;
- set_irq_msi(*virt_irq_p, entry);
- write_msi_msg(*virt_irq_p, &msg);
+ set_irq_msi(*irq_p, entry);
+ write_msi_msg(*irq_p, &msg);
return 0;
out_msi_free:
free_msi(pbm, msi);
-out_virt_irq_free:
- set_irq_chip(*virt_irq_p, NULL);
- virt_irq_free(*virt_irq_p);
- *virt_irq_p = 0;
+out_irq_free:
+ set_irq_chip(*irq_p, NULL);
+ irq_free(*irq_p);
+ *irq_p = 0;
out_err:
return err;
}
-static void sparc64_teardown_msi_irq(unsigned int virt_irq,
+static void sparc64_teardown_msi_irq(unsigned int irq,
struct pci_dev *pdev)
{
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
@@ -189,12 +189,12 @@ static void sparc64_teardown_msi_irq(unsigned int virt_irq,
int i, err;
for (i = 0; i < pbm->msi_num; i++) {
- if (pbm->msi_irq_table[i] == virt_irq)
+ if (pbm->msi_irq_table[i] == irq)
break;
}
if (i >= pbm->msi_num) {
printk(KERN_ERR "%s: teardown: No MSI for irq %u\n",
- pbm->name, virt_irq);
+ pbm->name, irq);
return;
}
@@ -205,14 +205,14 @@ static void sparc64_teardown_msi_irq(unsigned int virt_irq,
if (err) {
printk(KERN_ERR "%s: teardown: ops->teardown() on MSI %u, "
"irq %u, gives error %d\n",
- pbm->name, msi_num, virt_irq, err);
+ pbm->name, msi_num, irq, err);
return;
}
free_msi(pbm, msi_num);
- set_irq_chip(virt_irq, NULL);
- virt_irq_free(virt_irq);
+ set_irq_chip(irq, NULL);
+ irq_free(irq);
}
static int msi_bitmap_alloc(struct pci_pbm_info *pbm)
diff --git a/arch/sparc/kernel/pci_psycho.c b/arch/sparc/kernel/pci_psycho.c
index 22eab7cf3b11..56ee745064de 100644
--- a/arch/sparc/kernel/pci_psycho.c
+++ b/arch/sparc/kernel/pci_psycho.c
@@ -503,8 +503,7 @@ static struct pci_pbm_info * __devinit psycho_find_sibling(u32 upa_portid)
#define PSYCHO_CONFIGSPACE 0x001000000UL
-static int __devinit psycho_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit psycho_probe(struct platform_device *op)
{
const struct linux_prom64_registers *pr_regs;
struct device_node *dp = op->dev.of_node;
@@ -601,7 +600,7 @@ static struct of_device_id __initdata psycho_match[] = {
{},
};
-static struct of_platform_driver psycho_driver = {
+static struct platform_driver psycho_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
@@ -612,7 +611,7 @@ static struct of_platform_driver psycho_driver = {
static int __init psycho_init(void)
{
- return of_register_platform_driver(&psycho_driver);
+ return platform_driver_register(&psycho_driver);
}
subsys_initcall(psycho_init);
diff --git a/arch/sparc/kernel/pci_sabre.c b/arch/sparc/kernel/pci_sabre.c
index 5c3f5ec4cabc..2857073342d2 100644
--- a/arch/sparc/kernel/pci_sabre.c
+++ b/arch/sparc/kernel/pci_sabre.c
@@ -452,8 +452,7 @@ static void __devinit sabre_pbm_init(struct pci_pbm_info *pbm,
sabre_scan_bus(pbm, &op->dev);
}
-static int __devinit sabre_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit sabre_probe(struct platform_device *op)
{
const struct linux_prom64_registers *pr_regs;
struct device_node *dp = op->dev.of_node;
@@ -464,7 +463,7 @@ static int __devinit sabre_probe(struct platform_device *op,
const u32 *vdma;
u64 clear_irq;
- hummingbird_p = (match->data != NULL);
+ hummingbird_p = op->dev.of_match && (op->dev.of_match->data != NULL);
if (!hummingbird_p) {
struct device_node *cpu_dp;
@@ -595,7 +594,7 @@ static struct of_device_id __initdata sabre_match[] = {
{},
};
-static struct of_platform_driver sabre_driver = {
+static struct platform_driver sabre_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
@@ -606,7 +605,7 @@ static struct of_platform_driver sabre_driver = {
static int __init sabre_init(void)
{
- return of_register_platform_driver(&sabre_driver);
+ return platform_driver_register(&sabre_driver);
}
subsys_initcall(sabre_init);
diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c
index 445a47a2fb3d..1d41af73a92f 100644
--- a/arch/sparc/kernel/pci_schizo.c
+++ b/arch/sparc/kernel/pci_schizo.c
@@ -1313,7 +1313,7 @@ static int __devinit schizo_pbm_init(struct pci_pbm_info *pbm,
const struct linux_prom64_registers *regs;
struct device_node *dp = op->dev.of_node;
const char *chipset_name;
- int is_pbm_a, err;
+ int err;
switch (chip_type) {
case PBM_CHIP_TYPE_TOMATILLO:
@@ -1343,8 +1343,6 @@ static int __devinit schizo_pbm_init(struct pci_pbm_info *pbm,
*/
regs = of_get_property(dp, "reg", NULL);
- is_pbm_a = ((regs[0].phys_addr & 0x00700000) == 0x00600000);
-
pbm->next = pci_pbm_root;
pci_pbm_root = pbm;
@@ -1460,10 +1458,11 @@ out_err:
return err;
}
-static int __devinit schizo_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit schizo_probe(struct platform_device *op)
{
- return __schizo_init(op, (unsigned long) match->data);
+ if (!op->dev.of_match)
+ return -EINVAL;
+ return __schizo_init(op, (unsigned long) op->dev.of_match->data);
}
/* The ordering of this table is very important. Some Tomatillo
@@ -1490,7 +1489,7 @@ static struct of_device_id __initdata schizo_match[] = {
{},
};
-static struct of_platform_driver schizo_driver = {
+static struct platform_driver schizo_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
@@ -1501,7 +1500,7 @@ static struct of_platform_driver schizo_driver = {
static int __init schizo_init(void)
{
- return of_register_platform_driver(&schizo_driver);
+ return platform_driver_register(&schizo_driver);
}
subsys_initcall(schizo_init);
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 743344aa6d8a..6cf534681788 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -580,7 +580,7 @@ static int __devinit pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
{
static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
struct iommu *iommu = pbm->iommu;
- unsigned long num_tsb_entries, sz, tsbsize;
+ unsigned long num_tsb_entries, sz;
u32 dma_mask, dma_offset;
const u32 *vdma;
@@ -596,7 +596,6 @@ static int __devinit pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
- tsbsize = num_tsb_entries * sizeof(iopte_t);
dma_offset = vdma[0];
@@ -844,9 +843,9 @@ static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
unsigned long msiqid,
unsigned long devino)
{
- unsigned int virt_irq = sun4v_build_irq(pbm->devhandle, devino);
+ unsigned int irq = sun4v_build_irq(pbm->devhandle, devino);
- if (!virt_irq)
+ if (!irq)
return -ENOMEM;
if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
@@ -854,7 +853,7 @@ static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
return -EINVAL;
- return virt_irq;
+ return irq;
}
static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
@@ -918,8 +917,7 @@ static int __devinit pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
return 0;
}
-static int __devinit pci_sun4v_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit pci_sun4v_probe(struct platform_device *op)
{
const struct linux_prom64_registers *regs;
static int hvapi_negotiated = 0;
@@ -1008,7 +1006,7 @@ static struct of_device_id __initdata pci_sun4v_match[] = {
{},
};
-static struct of_platform_driver pci_sun4v_driver = {
+static struct platform_driver pci_sun4v_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
@@ -1019,7 +1017,7 @@ static struct of_platform_driver pci_sun4v_driver = {
static int __init pci_sun4v_init(void)
{
- return of_register_platform_driver(&pci_sun4v_driver);
+ return platform_driver_register(&pci_sun4v_driver);
}
subsys_initcall(pci_sun4v_init);
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index aeaa09a3c655..2cdc131b50ac 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -700,10 +700,8 @@ static void pcic_clear_clock_irq(void)
static irqreturn_t pcic_timer_handler (int irq, void *h)
{
- write_seqlock(&xtime_lock); /* Dummy, to show that we remember */
pcic_clear_clock_irq();
- do_timer(1);
- write_sequnlock(&xtime_lock);
+ xtime_update(1);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
#endif
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index 7c2ced612b8f..8ac23e660080 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -81,7 +81,7 @@ static void n2_pcr_write(u64 val)
unsigned long ret;
ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val);
- if (val != HV_EOK)
+ if (ret != HV_EOK)
write_pcr(val);
}
diff --git a/arch/sparc/kernel/pmc.c b/arch/sparc/kernel/pmc.c
index 94536a85f161..93d7b4465f8d 100644
--- a/arch/sparc/kernel/pmc.c
+++ b/arch/sparc/kernel/pmc.c
@@ -51,8 +51,7 @@ static void pmc_swift_idle(void)
#endif
}
-static int __devinit pmc_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit pmc_probe(struct platform_device *op)
{
regs = of_ioremap(&op->resource[0], 0,
resource_size(&op->resource[0]), PMC_OBPNAME);
@@ -78,7 +77,7 @@ static struct of_device_id __initdata pmc_match[] = {
};
MODULE_DEVICE_TABLE(of, pmc_match);
-static struct of_platform_driver pmc_driver = {
+static struct platform_driver pmc_driver = {
.driver = {
.name = "pmc",
.owner = THIS_MODULE,
@@ -89,7 +88,7 @@ static struct of_platform_driver pmc_driver = {
static int __init pmc_init(void)
{
- return of_register_platform_driver(&pmc_driver);
+ return platform_driver_register(&pmc_driver);
}
/* This driver is not critical to the boot process
diff --git a/arch/sparc/kernel/power.c b/arch/sparc/kernel/power.c
index 2c59f4d387dd..cd725fe238b2 100644
--- a/arch/sparc/kernel/power.c
+++ b/arch/sparc/kernel/power.c
@@ -33,7 +33,7 @@ static int __devinit has_button_interrupt(unsigned int irq, struct device_node *
return 1;
}
-static int __devinit power_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit power_probe(struct platform_device *op)
{
struct resource *res = &op->resource[0];
unsigned int irq = op->archdata.irqs[0];
@@ -59,7 +59,7 @@ static struct of_device_id __initdata power_match[] = {
{},
};
-static struct of_platform_driver power_driver = {
+static struct platform_driver power_driver = {
.probe = power_probe,
.driver = {
.name = "power",
@@ -70,7 +70,7 @@ static struct of_platform_driver power_driver = {
static int __init power_init(void)
{
- return of_register_platform_driver(&power_driver);
+ return platform_driver_register(&power_driver);
}
device_initcall(power_init);
diff --git a/arch/sparc/kernel/prom_irqtrans.c b/arch/sparc/kernel/prom_irqtrans.c
index ce651147fabc..570b98f6e897 100644
--- a/arch/sparc/kernel/prom_irqtrans.c
+++ b/arch/sparc/kernel/prom_irqtrans.c
@@ -227,7 +227,7 @@ static unsigned int sabre_irq_build(struct device_node *dp,
unsigned long imap, iclr;
unsigned long imap_off, iclr_off;
int inofixup = 0;
- int virt_irq;
+ int irq;
ino &= 0x3f;
if (ino < SABRE_ONBOARD_IRQ_BASE) {
@@ -247,7 +247,7 @@ static unsigned int sabre_irq_build(struct device_node *dp,
if ((ino & 0x20) == 0)
inofixup = ino & 0x03;
- virt_irq = build_irq(inofixup, iclr, imap);
+ irq = build_irq(inofixup, iclr, imap);
/* If the parent device is a PCI<->PCI bridge other than
* APB, we have to install a pre-handler to ensure that
@@ -256,13 +256,13 @@ static unsigned int sabre_irq_build(struct device_node *dp,
*/
regs = of_get_property(dp, "reg", NULL);
if (regs && sabre_device_needs_wsync(dp)) {
- irq_install_pre_handler(virt_irq,
+ irq_install_pre_handler(irq,
sabre_wsync_handler,
(void *) (long) regs->phys_hi,
(void *) irq_data);
}
- return virt_irq;
+ return irq;
}
static void __init sabre_irq_trans_init(struct device_node *dp)
@@ -382,7 +382,7 @@ static unsigned int schizo_irq_build(struct device_node *dp,
unsigned long pbm_regs = irq_data->pbm_regs;
unsigned long imap, iclr;
int ign_fixup;
- int virt_irq;
+ int irq;
int is_tomatillo;
ino &= 0x3f;
@@ -409,17 +409,17 @@ static unsigned int schizo_irq_build(struct device_node *dp,
ign_fixup = (1 << 6);
}
- virt_irq = build_irq(ign_fixup, iclr, imap);
+ irq = build_irq(ign_fixup, iclr, imap);
if (is_tomatillo) {
- irq_install_pre_handler(virt_irq,
+ irq_install_pre_handler(irq,
tomatillo_wsync_handler,
((irq_data->chip_version <= 4) ?
(void *) 1 : (void *) 0),
(void *) irq_data->sync_reg);
}
- return virt_irq;
+ return irq;
}
static void __init __schizo_irq_trans_init(struct device_node *dp,
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index 9ccc812bc09e..96ee50a80661 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -1086,6 +1086,7 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
asmlinkage void syscall_trace_leave(struct pt_regs *regs)
{
+#ifdef CONFIG_AUDITSYSCALL
if (unlikely(current->audit_context)) {
unsigned long tstate = regs->tstate;
int result = AUDITSC_SUCCESS;
@@ -1095,7 +1096,7 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
audit_syscall_exit(result, regs->u_regs[UREG_I0]);
}
-
+#endif
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->u_regs[UREG_G1]);
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index 648f2161b851..7b8b76c9557f 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -184,7 +184,6 @@ static void __init boot_flags_init(char *commands)
*/
extern void sun4c_probe_vac(void);
-extern char cputypval;
extern unsigned short root_flags;
extern unsigned short root_dev;
@@ -218,21 +217,21 @@ void __init setup_arch(char **cmdline_p)
/* Set sparc_cpu_model */
sparc_cpu_model = sun_unknown;
- if (!strcmp(&cputypval,"sun4 "))
+ if (!strcmp(&cputypval[0], "sun4 "))
sparc_cpu_model = sun4;
- if (!strcmp(&cputypval,"sun4c"))
+ if (!strcmp(&cputypval[0], "sun4c"))
sparc_cpu_model = sun4c;
- if (!strcmp(&cputypval,"sun4m"))
+ if (!strcmp(&cputypval[0], "sun4m"))
sparc_cpu_model = sun4m;
- if (!strcmp(&cputypval,"sun4s"))
+ if (!strcmp(&cputypval[0], "sun4s"))
sparc_cpu_model = sun4m; /* CP-1200 with PROM 2.30 -E */
- if (!strcmp(&cputypval,"sun4d"))
+ if (!strcmp(&cputypval[0], "sun4d"))
sparc_cpu_model = sun4d;
- if (!strcmp(&cputypval,"sun4e"))
+ if (!strcmp(&cputypval[0], "sun4e"))
sparc_cpu_model = sun4e;
- if (!strcmp(&cputypval,"sun4u"))
+ if (!strcmp(&cputypval[0], "sun4u"))
sparc_cpu_model = sun4u;
- if (!strncmp(&cputypval, "leon" , 4))
+ if (!strncmp(&cputypval[0], "leon" , 4))
sparc_cpu_model = sparc_leon;
printk("ARCH: ");
@@ -335,7 +334,7 @@ static int show_cpuinfo(struct seq_file *m, void *__unused)
prom_rev,
romvec->pv_printrev >> 16,
romvec->pv_printrev & 0xffff,
- &cputypval,
+ &cputypval[0],
ncpus_probed,
num_online_cpus()
#ifndef CONFIG_SMP
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 555a76d1f4a1..3e94a8c23238 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -189,7 +189,7 @@ static inline long get_delta (long *rt, long *master)
void smp_synchronize_tick_client(void)
{
long i, delta, adj, adjust_latency = 0, done = 0;
- unsigned long flags, rt, master_time_stamp, bound;
+ unsigned long flags, rt, master_time_stamp;
#if DEBUG_TICK_SYNC
struct {
long rt; /* roundtrip time */
@@ -208,10 +208,8 @@ void smp_synchronize_tick_client(void)
{
for (i = 0; i < NUM_ROUNDS; i++) {
delta = get_delta(&rt, &master_time_stamp);
- if (delta == 0) {
+ if (delta == 0)
done = 1; /* let's lock on to this... */
- bound = rt;
- }
if (!done) {
if (i > 0) {
@@ -933,13 +931,12 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
{
void *pg_addr;
- int this_cpu;
u64 data0;
if (tlb_type == hypervisor)
return;
- this_cpu = get_cpu();
+ preempt_disable();
#ifdef CONFIG_DEBUG_DCFLUSH
atomic_inc(&dcpage_flushes);
@@ -964,7 +961,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
}
__local_flush_dcache_page(page);
- put_cpu();
+ preempt_enable();
}
void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
diff --git a/arch/sparc/kernel/sun4c_irq.c b/arch/sparc/kernel/sun4c_irq.c
index 892fb884910a..90eea38ad66f 100644
--- a/arch/sparc/kernel/sun4c_irq.c
+++ b/arch/sparc/kernel/sun4c_irq.c
@@ -1,5 +1,5 @@
-/* sun4c_irq.c
- * arch/sparc/kernel/sun4c_irq.c:
+/*
+ * sun4c irq support
*
* djhr: Hacked out of irq.c into a CPU dependent version.
*
@@ -9,31 +9,41 @@
* Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk)
*/
-#include <linux/errno.h>
-#include <linux/linkage.h>
-#include <linux/kernel_stat.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/interrupt.h>
#include <linux/init.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include "irq.h"
-#include <asm/ptrace.h>
-#include <asm/processor.h>
-#include <asm/system.h>
-#include <asm/psr.h>
-#include <asm/vaddrs.h>
-#include <asm/timer.h>
-#include <asm/openprom.h>
#include <asm/oplib.h>
-#include <asm/traps.h>
+#include <asm/timer.h>
#include <asm/irq.h>
#include <asm/io.h>
-#include <asm/idprom.h>
-#include <asm/machines.h>
+
+#include "irq.h"
+
+/* Sun4c interrupts are typically laid out as follows:
+ *
+ * 1 - Software interrupt, SBUS level 1
+ * 2 - SBUS level 2
+ * 3 - ESP SCSI, SBUS level 3
+ * 4 - Software interrupt
+ * 5 - Lance ethernet, SBUS level 4
+ * 6 - Software interrupt
+ * 7 - Graphics card, SBUS level 5
+ * 8 - SBUS level 6
+ * 9 - SBUS level 7
+ * 10 - Counter timer
+ * 11 - Floppy
+ * 12 - Zilog uart
+ * 13 - CS4231 audio
+ * 14 - Profiling timer
+ * 15 - NMI
+ *
+ * The interrupt enable bits in the interrupt mask register are
+ * really only used to enable/disable the timer interrupts, and
+ * for signalling software interrupts. There is also a master
+ * interrupt enable bit in this register.
+ *
+ * Interrupts are enabled by setting the SUN4C_INT_* bits, they
+ * are disabled by clearing those bits.
+ */
/*
* Bit field defines for the interrupt registers on various
@@ -49,26 +59,21 @@
#define SUN4C_INT_E4 0x04 /* Enable level 4 IRQ. */
#define SUN4C_INT_E1 0x02 /* Enable level 1 IRQ. */
-/* Pointer to the interrupt enable byte
- *
- * Dave Redman (djhr@tadpole.co.uk)
- * What you may not be aware of is that entry.S requires this variable.
- *
- * --- linux_trap_nmi_sun4c --
- *
- * so don't go making it static, like I tried. sigh.
+/*
+ * Pointer to the interrupt enable byte
+ * Used by entry.S
*/
-unsigned char __iomem *interrupt_enable = NULL;
+unsigned char __iomem *interrupt_enable;
static void sun4c_disable_irq(unsigned int irq_nr)
{
unsigned long flags;
unsigned char current_mask, new_mask;
-
+
local_irq_save(flags);
irq_nr &= (NR_IRQS - 1);
current_mask = sbus_readb(interrupt_enable);
- switch(irq_nr) {
+ switch (irq_nr) {
case 1:
new_mask = ((current_mask) & (~(SUN4C_INT_E1)));
break;
@@ -93,11 +98,11 @@ static void sun4c_enable_irq(unsigned int irq_nr)
{
unsigned long flags;
unsigned char current_mask, new_mask;
-
+
local_irq_save(flags);
irq_nr &= (NR_IRQS - 1);
current_mask = sbus_readb(interrupt_enable);
- switch(irq_nr) {
+ switch (irq_nr) {
case 1:
new_mask = ((current_mask) | SUN4C_INT_E1);
break;
@@ -180,12 +185,14 @@ static void __init sun4c_init_timers(irq_handler_t counter_fn)
prom_printf("sun4c_init_timers: request_irq() fails with %d\n", err);
prom_halt();
}
-
+
sun4c_disable_irq(irq[1].pri);
}
#ifdef CONFIG_SMP
-static void sun4c_nop(void) {}
+static void sun4c_nop(void)
+{
+}
#endif
void __init sun4c_init_IRQ(void)
@@ -214,7 +221,9 @@ void __init sun4c_init_IRQ(void)
BTFIXUPSET_CALL(disable_pil_irq, sun4c_disable_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(clear_clock_irq, sun4c_clear_clock_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(load_profile_irq, sun4c_load_profile_irq, BTFIXUPCALL_NOP);
- sparc_init_timers = sun4c_init_timers;
+
+ sparc_irq_config.init_timers = sun4c_init_timers;
+
#ifdef CONFIG_SMP
BTFIXUPSET_CALL(set_cpu_int, sun4c_nop, BTFIXUPCALL_NOP);
BTFIXUPSET_CALL(clear_cpu_int, sun4c_nop, BTFIXUPCALL_NOP);
diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c
index e11b4612dabb..77b4a8992710 100644
--- a/arch/sparc/kernel/sun4d_irq.c
+++ b/arch/sparc/kernel/sun4d_irq.c
@@ -1,50 +1,42 @@
/*
- * arch/sparc/kernel/sun4d_irq.c:
- * SS1000/SC2000 interrupt handling.
+ * SS1000/SC2000 interrupt handling.
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Heavily based on arch/sparc/kernel/irq.c.
*/
-#include <linux/errno.h>
-#include <linux/linkage.h>
#include <linux/kernel_stat.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/random.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/spinlock.h>
#include <linux/seq_file.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-
-#include <asm/ptrace.h>
-#include <asm/processor.h>
-#include <asm/system.h>
-#include <asm/psr.h>
-#include <asm/smp.h>
-#include <asm/vaddrs.h>
+
#include <asm/timer.h>
-#include <asm/openprom.h>
-#include <asm/oplib.h>
#include <asm/traps.h>
#include <asm/irq.h>
#include <asm/io.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/sbi.h>
#include <asm/cacheflush.h>
-#include <asm/irq_regs.h>
#include "kernel.h"
#include "irq.h"
-/* If you trust current SCSI layer to handle different SCSI IRQs, enable this. I don't trust it... -jj */
-/* #define DISTRIBUTE_IRQS */
+/* Sun4d interrupts fall roughly into two categories. SBUS and
+ * cpu local. CPU local interrupts cover the timer interrupts
+ * and whatnot, and we encode those as normal PILs between
+ * 0 and 15.
+ *
+ * SBUS interrupts are encoded integers including the board number
+ * (plus one), the SBUS level, and the SBUS slot number. Sun4D
+ * IRQ dispatch is done by:
+ *
+ * 1) Reading the BW local interrupt table in order to get the bus
+ * interrupt mask.
+ *
+ * This table is indexed by SBUS interrupt level which can be
+ * derived from the PIL we got interrupted on.
+ *
+ * 2) For each bus showing interrupt pending from #1, read the
+ * SBI interrupt state register. This will indicate which slots
+ * have interrupts pending for that SBUS interrupt level.
+ */
struct sun4d_timer_regs {
u32 l10_timer_limit;
@@ -59,11 +51,9 @@ static struct sun4d_timer_regs __iomem *sun4d_timers;
#define TIMER_IRQ 10
#define MAX_STATIC_ALLOC 4
-extern int static_irq_count;
static unsigned char sbus_tid[32];
static struct irqaction *irq_action[NR_IRQS];
-extern spinlock_t irq_action_lock;
static struct sbus_action {
struct irqaction *action;
@@ -71,11 +61,33 @@ static struct sbus_action {
} *sbus_actions;
static int pil_to_sbus[] = {
- 0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0,
+ 0,
+ 0,
+ 1,
+ 2,
+ 0,
+ 3,
+ 0,
+ 4,
+ 0,
+ 5,
+ 0,
+ 6,
+ 0,
+ 7,
+ 0,
+ 0,
};
static int sbus_to_pil[] = {
- 0, 2, 3, 5, 7, 9, 11, 13,
+ 0,
+ 2,
+ 3,
+ 5,
+ 7,
+ 9,
+ 11,
+ 13,
};
static int nsbi;
@@ -86,7 +98,7 @@ DEFINE_SPINLOCK(sun4d_imsk_lock);
int show_sun4d_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *) v, j = 0, k = 0, sbusl;
- struct irqaction * action;
+ struct irqaction *action;
unsigned long flags;
#ifdef CONFIG_SMP
int x;
@@ -96,13 +108,14 @@ int show_sun4d_interrupts(struct seq_file *p, void *v)
if (i < NR_IRQS) {
sbusl = pil_to_sbus[i];
if (!sbusl) {
- action = *(i + irq_action);
- if (!action)
- goto out_unlock;
+ action = *(i + irq_action);
+ if (!action)
+ goto out_unlock;
} else {
for (j = 0; j < nsbi; j++) {
for (k = 0; k < 4; k++)
- if ((action = sbus_actions [(j << 5) + (sbusl << 2) + k].action))
+ action = sbus_actions[(j << 5) + (sbusl << 2) + k].action;
+ if (action)
goto found_it;
}
goto out_unlock;
@@ -125,15 +138,17 @@ found_it: seq_printf(p, "%3d: ", i);
(action->flags & IRQF_DISABLED) ? " +" : "",
action->name);
}
- if (!sbusl) break;
+ if (!sbusl)
+ break;
k++;
- if (k < 4)
- action = sbus_actions [(j << 5) + (sbusl << 2) + k].action;
- else {
+ if (k < 4) {
+ action = sbus_actions[(j << 5) + (sbusl << 2) + k].action;
+ } else {
j++;
- if (j == nsbi) break;
+ if (j == nsbi)
+ break;
k = 0;
- action = sbus_actions [(j << 5) + (sbusl << 2)].action;
+ action = sbus_actions[(j << 5) + (sbusl << 2)].action;
}
}
seq_putc(p, '\n');
@@ -147,7 +162,7 @@ void sun4d_free_irq(unsigned int irq, void *dev_id)
{
struct irqaction *action, **actionp;
struct irqaction *tmp = NULL;
- unsigned long flags;
+ unsigned long flags;
spin_lock_irqsave(&irq_action_lock, flags);
if (irq < 15)
@@ -156,7 +171,7 @@ void sun4d_free_irq(unsigned int irq, void *dev_id)
actionp = &(sbus_actions[irq - (1 << 5)].action);
action = *actionp;
if (!action) {
- printk("Trying to free free IRQ%d\n",irq);
+ printk(KERN_ERR "Trying to free free IRQ%d\n", irq);
goto out_unlock;
}
if (dev_id) {
@@ -166,23 +181,25 @@ void sun4d_free_irq(unsigned int irq, void *dev_id)
tmp = action;
}
if (!action) {
- printk("Trying to free free shared IRQ%d\n",irq);
+ printk(KERN_ERR "Trying to free free shared IRQ%d\n",
+ irq);
goto out_unlock;
}
} else if (action->flags & IRQF_SHARED) {
- printk("Trying to free shared IRQ%d with NULL device ID\n", irq);
+ printk(KERN_ERR "Trying to free shared IRQ%d with NULL device ID\n",
+ irq);
goto out_unlock;
}
- if (action->flags & SA_STATIC_ALLOC)
- {
- /* This interrupt is marked as specially allocated
+ if (action->flags & SA_STATIC_ALLOC) {
+ /*
+ * This interrupt is marked as specially allocated
* so it is a bad idea to free it.
*/
- printk("Attempt to free statically allocated IRQ%d (%s)\n",
+ printk(KERN_ERR "Attempt to free statically allocated IRQ%d (%s)\n",
irq, action->name);
goto out_unlock;
}
-
+
if (tmp)
tmp->next = action->next;
else
@@ -203,30 +220,28 @@ out_unlock:
spin_unlock_irqrestore(&irq_action_lock, flags);
}
-extern void unexpected_irq(int, void *, struct pt_regs *);
-
-void sun4d_handler_irq(int irq, struct pt_regs * regs)
+void sun4d_handler_irq(int pil, struct pt_regs *regs)
{
struct pt_regs *old_regs;
- struct irqaction * action;
+ struct irqaction *action;
int cpu = smp_processor_id();
/* SBUS IRQ level (1 - 7) */
- int sbusl = pil_to_sbus[irq];
-
+ int sbusl = pil_to_sbus[pil];
+
/* FIXME: Is this necessary?? */
cc_get_ipen();
-
- cc_set_iclr(1 << irq);
-
+
+ cc_set_iclr(1 << pil);
+
old_regs = set_irq_regs(regs);
irq_enter();
- kstat_cpu(cpu).irqs[irq]++;
+ kstat_cpu(cpu).irqs[pil]++;
if (!sbusl) {
- action = *(irq + irq_action);
+ action = *(pil + irq_action);
if (!action)
- unexpected_irq(irq, NULL, regs);
+ unexpected_irq(pil, NULL, regs);
do {
- action->handler(irq, action->dev_id);
+ action->handler(pil, action->dev_id);
action = action->next;
} while (action);
} else {
@@ -235,9 +250,9 @@ void sun4d_handler_irq(int irq, struct pt_regs * regs)
struct sbus_action *actionp;
unsigned mask, slot;
int sbil = (sbusl << 2);
-
+
bw_clear_intr_mask(sbusl, bus_mask);
-
+
/* Loop for each pending SBI */
for (sbino = 0; bus_mask; sbino++, bus_mask >>= 1)
if (bus_mask & 1) {
@@ -249,11 +264,11 @@ void sun4d_handler_irq(int irq, struct pt_regs * regs)
if (mask & slot) {
mask &= ~slot;
action = actionp->action;
-
+
if (!action)
- unexpected_irq(irq, NULL, regs);
+ unexpected_irq(pil, NULL, regs);
do {
- action->handler(irq, action->dev_id);
+ action->handler(pil, action->dev_id);
action = action->next;
} while (action);
release_sbi(SBI2DEVID(sbino), slot);
@@ -266,13 +281,13 @@ void sun4d_handler_irq(int irq, struct pt_regs * regs)
int sun4d_request_irq(unsigned int irq,
irq_handler_t handler,
- unsigned long irqflags, const char * devname, void *dev_id)
+ unsigned long irqflags, const char *devname, void *dev_id)
{
struct irqaction *action, *tmp = NULL, **actionp;
unsigned long flags;
int ret;
-
- if(irq > 14 && irq < (1 << 5)) {
+
+ if (irq > 14 && irq < (1 << 5)) {
ret = -EINVAL;
goto out;
}
@@ -289,16 +304,18 @@ int sun4d_request_irq(unsigned int irq,
else
actionp = irq + irq_action;
action = *actionp;
-
+
if (action) {
if ((action->flags & IRQF_SHARED) && (irqflags & IRQF_SHARED)) {
- for (tmp = action; tmp->next; tmp = tmp->next);
+ for (tmp = action; tmp->next; tmp = tmp->next)
+ /* find last entry - tmp used below */;
} else {
ret = -EBUSY;
goto out_unlock;
}
if ((action->flags & IRQF_DISABLED) ^ (irqflags & IRQF_DISABLED)) {
- printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq);
+ printk(KERN_ERR "Attempt to mix fast and slow interrupts on IRQ%d denied\n",
+ irq);
ret = -EBUSY;
goto out_unlock;
}
@@ -312,14 +329,14 @@ int sun4d_request_irq(unsigned int irq,
if (static_irq_count < MAX_STATIC_ALLOC)
action = &static_irqaction[static_irq_count++];
else
- printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", irq, devname);
+ printk(KERN_ERR "Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",
+ irq, devname);
}
-
+
if (action == NULL)
- action = kmalloc(sizeof(struct irqaction),
- GFP_ATOMIC);
-
- if (!action) {
+ action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
+
+ if (!action) {
ret = -ENOMEM;
goto out_unlock;
}
@@ -334,7 +351,7 @@ int sun4d_request_irq(unsigned int irq,
tmp->next = action;
else
*actionp = action;
-
+
__enable_irq(irq);
ret = 0;
@@ -348,7 +365,7 @@ static void sun4d_disable_irq(unsigned int irq)
{
int tid = sbus_tid[(irq >> 5) - 1];
unsigned long flags;
-
+
if (irq < NR_IRQS)
return;
@@ -361,7 +378,7 @@ static void sun4d_enable_irq(unsigned int irq)
{
int tid = sbus_tid[(irq >> 5) - 1];
unsigned long flags;
-
+
if (irq < NR_IRQS)
return;
@@ -389,44 +406,6 @@ void __init sun4d_distribute_irqs(void)
{
struct device_node *dp;
-#ifdef DISTRIBUTE_IRQS
- cpumask_t sbus_serving_map;
-
- sbus_serving_map = cpu_present_map;
- for_each_node_by_name(dp, "sbi") {
- int board = of_getintprop_default(dp, "board#", 0);
-
- if ((board * 2) == boot_cpu_id && cpu_isset(board * 2 + 1, cpu_present_map))
- sbus_tid[board] = (board * 2 + 1);
- else if (cpu_isset(board * 2, cpu_present_map))
- sbus_tid[board] = (board * 2);
- else if (cpu_isset(board * 2 + 1, cpu_present_map))
- sbus_tid[board] = (board * 2 + 1);
- else
- sbus_tid[board] = 0xff;
- if (sbus_tid[board] != 0xff)
- cpu_clear(sbus_tid[board], sbus_serving_map);
- }
- for_each_node_by_name(dp, "sbi") {
- int board = of_getintprop_default(dp, "board#", 0);
- if (sbus_tid[board] == 0xff) {
- int i = 31;
-
- if (cpus_empty(sbus_serving_map))
- sbus_serving_map = cpu_present_map;
- while (cpu_isset(i, sbus_serving_map))
- i--;
- sbus_tid[board] = i;
- cpu_clear(i, sbus_serving_map);
- }
- }
- for_each_node_by_name(dp, "sbi") {
- int devid = of_getintprop_default(dp, "device-id", 0);
- int board = of_getintprop_default(dp, "board#", 0);
- printk("sbus%d IRQs directed to CPU%d\n", board, sbus_tid[board]);
- set_sbi_tid(devid, sbus_tid[board] << 3);
- }
-#else
int cpuid = cpu_logical_map(1);
if (cpuid == -1)
@@ -437,11 +416,10 @@ void __init sun4d_distribute_irqs(void)
sbus_tid[board] = cpuid;
set_sbi_tid(devid, cpuid << 3);
}
- printk("All sbus IRQs directed to CPU%d\n", cpuid);
-#endif
+ printk(KERN_ERR "All sbus IRQs directed to CPU%d\n", cpuid);
}
#endif
-
+
static void sun4d_clear_clock_irq(void)
{
sbus_readl(&sun4d_timers->l10_timer_limit);
@@ -462,14 +440,61 @@ static void __init sun4d_load_profile_irqs(void)
}
}
+unsigned int sun4d_build_device_irq(struct platform_device *op,
+ unsigned int real_irq)
+{
+ static int pil_to_sbus[] = {
+ 0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0,
+ };
+ struct device_node *dp = op->dev.of_node;
+ struct device_node *io_unit, *sbi = dp->parent;
+ const struct linux_prom_registers *regs;
+ int board, slot;
+ int sbusl;
+
+ while (sbi) {
+ if (!strcmp(sbi->name, "sbi"))
+ break;
+
+ sbi = sbi->parent;
+ }
+ if (!sbi)
+ goto err_out;
+
+ regs = of_get_property(dp, "reg", NULL);
+ if (!regs)
+ goto err_out;
+
+ slot = regs->which_io;
+
+ /*
+ * If SBI's parent is not io-unit or the io-unit lacks
+ * a "board#" property, something is very wrong.
+ */
+ if (!sbi->parent || strcmp(sbi->parent->name, "io-unit")) {
+ printk("%s: Error, parent is not io-unit.\n", sbi->full_name);
+ goto err_out;
+ }
+ io_unit = sbi->parent;
+ board = of_getintprop_default(io_unit, "board#", -1);
+ if (board == -1) {
+ printk("%s: Error, lacks board# property.\n", io_unit->full_name);
+ goto err_out;
+ }
+
+ sbusl = pil_to_sbus[real_irq];
+ if (sbusl)
+ return (((board + 1) << 5) + (sbusl << 2) + slot);
+
+err_out:
+ return real_irq;
+}
+
static void __init sun4d_fixup_trap_table(void)
{
#ifdef CONFIG_SMP
unsigned long flags;
- extern unsigned long lvl14_save[4];
struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (14 - 1)];
- extern unsigned int real_irq_entry[], smp4d_ticker[];
- extern unsigned int patchme_maybe_smp_msg[];
/* Adjust so that we jump directly to smp4d_ticker */
lvl14_save[2] += smp4d_ticker - real_irq_entry;
@@ -531,7 +556,8 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn)
(IRQF_DISABLED | SA_STATIC_ALLOC),
"timer", NULL);
if (err) {
- prom_printf("sun4d_init_timers: request_irq() failed with %d\n", err);
+ prom_printf("sun4d_init_timers: request_irq() failed with %d\n",
+ err);
prom_halt();
}
sun4d_load_profile_irqs();
@@ -550,7 +576,7 @@ void __init sun4d_init_sbi_irq(void)
nsbi = 0;
for_each_node_by_name(dp, "sbi")
nsbi++;
- sbus_actions = kzalloc (nsbi * 8 * 4 * sizeof(struct sbus_action), GFP_ATOMIC);
+ sbus_actions = kzalloc(nsbi * 8 * 4 * sizeof(struct sbus_action), GFP_ATOMIC);
if (!sbus_actions) {
prom_printf("SUN4D: Cannot allocate sbus_actions, halting.\n");
prom_halt();
@@ -566,7 +592,8 @@ void __init sun4d_init_sbi_irq(void)
/* Get rid of pending irqs from PROM */
mask = acquire_sbi(devid, 0xffffffff);
if (mask) {
- printk ("Clearing pending IRQs %08x on SBI %d\n", mask, board);
+ printk(KERN_ERR "Clearing pending IRQs %08x on SBI %d\n",
+ mask, board);
release_sbi(devid, mask);
}
}
@@ -580,7 +607,10 @@ void __init sun4d_init_IRQ(void)
BTFIXUPSET_CALL(disable_irq, sun4d_disable_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(clear_clock_irq, sun4d_clear_clock_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(load_profile_irq, sun4d_load_profile_irq, BTFIXUPCALL_NORM);
- sparc_init_timers = sun4d_init_timers;
+
+ sparc_irq_config.init_timers = sun4d_init_timers;
+ sparc_irq_config.build_device_irq = sun4d_build_device_irq;
+
#ifdef CONFIG_SMP
BTFIXUPSET_CALL(set_cpu_int, sun4d_set_cpu_int, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(clear_cpu_int, sun4d_clear_ipi, BTFIXUPCALL_NOP);
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index 482f2ab92692..475d50b96cd0 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -1,4 +1,4 @@
-/* sun4d_smp.c: Sparc SS1000/SC2000 SMP support.
+/* Sparc SS1000/SC2000 SMP support.
*
* Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*
@@ -6,59 +6,23 @@
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
-#include <asm/head.h>
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/threads.h>
-#include <linux/smp.h>
#include <linux/interrupt.h>
-#include <linux/kernel_stat.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
#include <linux/profile.h>
#include <linux/delay.h>
#include <linux/cpu.h>
-#include <asm/ptrace.h>
-#include <asm/atomic.h>
-#include <asm/irq_regs.h>
-
-#include <asm/irq.h>
-#include <asm/page.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/oplib.h>
#include <asm/sbi.h>
+#include <asm/mmu.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
-#include <asm/cpudata.h>
+#include "kernel.h"
#include "irq.h"
-#define IRQ_CROSS_CALL 15
-extern ctxd_t *srmmu_ctx_table_phys;
+#define IRQ_CROSS_CALL 15
-static volatile int smp_processors_ready = 0;
+static volatile int smp_processors_ready;
static int smp_highest_cpu;
-extern volatile unsigned long cpu_callin_map[NR_CPUS];
-extern cpuinfo_sparc cpu_data[NR_CPUS];
-extern unsigned char boot_cpu_id;
-extern volatile int smp_process_available;
-
-extern cpumask_t smp_commenced_mask;
-
-extern int __smp4d_processor_id(void);
-
-/* #define SMP_DEBUG */
-
-#ifdef SMP_DEBUG
-#define SMP_PRINTK(x) printk x
-#else
-#define SMP_PRINTK(x)
-#endif
static inline unsigned long sun4d_swap(volatile unsigned long *ptr, unsigned long val)
{
@@ -69,8 +33,6 @@ static inline unsigned long sun4d_swap(volatile unsigned long *ptr, unsigned lon
}
static void smp_setup_percpu_timer(void);
-extern void cpu_probe(void);
-extern void sun4d_distribute_irqs(void);
static unsigned char cpu_leds[32];
@@ -86,9 +48,8 @@ static inline void show_leds(int cpuid)
void __cpuinit smp4d_callin(void)
{
int cpuid = hard_smp4d_processor_id();
- extern spinlock_t sun4d_imsk_lock;
unsigned long flags;
-
+
/* Show we are alive */
cpu_leds[cpuid] = 0x6;
show_leds(cpuid);
@@ -118,15 +79,15 @@ void __cpuinit smp4d_callin(void)
sun4d_swap((unsigned long *)&cpu_callin_map[cpuid], 1);
local_flush_cache_all();
local_flush_tlb_all();
-
+
cpu_probe();
- while((unsigned long)current_set[cpuid] < PAGE_OFFSET)
+ while ((unsigned long)current_set[cpuid] < PAGE_OFFSET)
barrier();
-
- while(current_set[cpuid]->cpu != cpuid)
+
+ while (current_set[cpuid]->cpu != cpuid)
barrier();
-
+
/* Fix idle thread fields. */
__asm__ __volatile__("ld [%0], %%g6\n\t"
: : "r" (&current_set[cpuid])
@@ -134,16 +95,16 @@ void __cpuinit smp4d_callin(void)
cpu_leds[cpuid] = 0x9;
show_leds(cpuid);
-
+
/* Attach to the address space of init_task. */
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
local_flush_cache_all();
local_flush_tlb_all();
-
+
local_irq_enable(); /* We don't allow PIL 14 yet */
-
+
while (!cpu_isset(cpuid, smp_commenced_mask))
barrier();
@@ -154,15 +115,9 @@ void __cpuinit smp4d_callin(void)
}
-extern void init_IRQ(void);
-extern void cpu_panic(void);
-
/*
* Cycle through the processors asking the PROM to start each one.
*/
-
-extern struct linux_prom_registers smp_penguin_ctable;
-
void __init smp4d_boot_cpus(void)
{
if (boot_cpu_id)
@@ -173,43 +128,42 @@ void __init smp4d_boot_cpus(void)
int __cpuinit smp4d_boot_one_cpu(int i)
{
- extern unsigned long sun4d_cpu_startup;
- unsigned long *entry = &sun4d_cpu_startup;
- struct task_struct *p;
- int timeout;
- int cpu_node;
+ unsigned long *entry = &sun4d_cpu_startup;
+ struct task_struct *p;
+ int timeout;
+ int cpu_node;
- cpu_find_by_instance(i, &cpu_node,NULL);
- /* Cook up an idler for this guy. */
- p = fork_idle(i);
- current_set[i] = task_thread_info(p);
+ cpu_find_by_instance(i, &cpu_node, NULL);
+ /* Cook up an idler for this guy. */
+ p = fork_idle(i);
+ current_set[i] = task_thread_info(p);
+
+ /*
+ * Initialize the contexts table
+ * Since the call to prom_startcpu() trashes the structure,
+ * we need to re-initialize it for each cpu
+ */
+ smp_penguin_ctable.which_io = 0;
+ smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
+ smp_penguin_ctable.reg_size = 0;
+
+ /* whirrr, whirrr, whirrrrrrrrr... */
+ printk(KERN_INFO "Starting CPU %d at %p\n", i, entry);
+ local_flush_cache_all();
+ prom_startcpu(cpu_node,
+ &smp_penguin_ctable, 0, (char *)entry);
+
+ printk(KERN_INFO "prom_startcpu returned :)\n");
+
+ /* wheee... it's going... */
+ for (timeout = 0; timeout < 10000; timeout++) {
+ if (cpu_callin_map[i])
+ break;
+ udelay(200);
+ }
- /*
- * Initialize the contexts table
- * Since the call to prom_startcpu() trashes the structure,
- * we need to re-initialize it for each cpu
- */
- smp_penguin_ctable.which_io = 0;
- smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
- smp_penguin_ctable.reg_size = 0;
-
- /* whirrr, whirrr, whirrrrrrrrr... */
- SMP_PRINTK(("Starting CPU %d at %p\n", i, entry));
- local_flush_cache_all();
- prom_startcpu(cpu_node,
- &smp_penguin_ctable, 0, (char *)entry);
-
- SMP_PRINTK(("prom_startcpu returned :)\n"));
-
- /* wheee... it's going... */
- for(timeout = 0; timeout < 10000; timeout++) {
- if(cpu_callin_map[i])
- break;
- udelay(200);
- }
-
if (!(cpu_callin_map[i])) {
- printk("Processor %d is stuck.\n", i);
+ printk(KERN_ERR "Processor %d is stuck.\n", i);
return -ENODEV;
}
@@ -255,14 +209,17 @@ static void smp4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4)
{
- if(smp_processors_ready) {
+ if (smp_processors_ready) {
register int high = smp_highest_cpu;
unsigned long flags;
spin_lock_irqsave(&cross_call_lock, flags);
{
- /* If you make changes here, make sure gcc generates proper code... */
+ /*
+ * If you make changes here, make sure
+ * gcc generates proper code...
+ */
register smpfunc_t f asm("i0") = func;
register unsigned long a1 asm("i1") = arg1;
register unsigned long a2 asm("i2") = arg2;
@@ -284,7 +241,7 @@ static void smp4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
cpu_clear(smp_processor_id(), mask);
cpus_and(mask, cpu_online_map, mask);
- for(i = 0; i <= high; i++) {
+ for (i = 0; i <= high; i++) {
if (cpu_isset(i, mask)) {
ccall_info.processors_in[i] = 0;
ccall_info.processors_out[i] = 0;
@@ -300,17 +257,17 @@ static void smp4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
do {
if (!cpu_isset(i, mask))
continue;
- while(!ccall_info.processors_in[i])
+ while (!ccall_info.processors_in[i])
barrier();
- } while(++i <= high);
+ } while (++i <= high);
i = 0;
do {
if (!cpu_isset(i, mask))
continue;
- while(!ccall_info.processors_out[i])
+ while (!ccall_info.processors_out[i])
barrier();
- } while(++i <= high);
+ } while (++i <= high);
}
spin_unlock_irqrestore(&cross_call_lock, flags);
@@ -336,7 +293,7 @@ void smp4d_percpu_timer_interrupt(struct pt_regs *regs)
static char led_mask[] = { 0xe, 0xd, 0xb, 0x7, 0xb, 0xd };
old_regs = set_irq_regs(regs);
- bw_get_prof_limit(cpu);
+ bw_get_prof_limit(cpu);
bw_clear_intr_mask(0, 1); /* INTR_TABLE[0] & 1 is Profile IRQ */
cpu_tick[cpu]++;
@@ -349,7 +306,7 @@ void smp4d_percpu_timer_interrupt(struct pt_regs *regs)
profile_tick(CPU_PROFILING);
- if(!--prof_counter(cpu)) {
+ if (!--prof_counter(cpu)) {
int user = user_mode(regs);
irq_enter();
@@ -361,8 +318,6 @@ void smp4d_percpu_timer_interrupt(struct pt_regs *regs)
set_irq_regs(old_regs);
}
-extern unsigned int lvl14_resolution;
-
static void __cpuinit smp_setup_percpu_timer(void)
{
int cpu = hard_smp4d_processor_id();
@@ -374,16 +329,16 @@ static void __cpuinit smp_setup_percpu_timer(void)
void __init smp4d_blackbox_id(unsigned *addr)
{
int rd = *addr & 0x3e000000;
-
+
addr[0] = 0xc0800800 | rd; /* lda [%g0] ASI_M_VIKING_TMP1, reg */
- addr[1] = 0x01000000; /* nop */
- addr[2] = 0x01000000; /* nop */
+ addr[1] = 0x01000000; /* nop */
+ addr[2] = 0x01000000; /* nop */
}
void __init smp4d_blackbox_current(unsigned *addr)
{
int rd = *addr & 0x3e000000;
-
+
addr[0] = 0xc0800800 | rd; /* lda [%g0] ASI_M_VIKING_TMP1, reg */
addr[2] = 0x81282002 | rd | (rd >> 11); /* sll reg, 2, reg */
addr[4] = 0x01000000; /* nop */
@@ -392,17 +347,16 @@ void __init smp4d_blackbox_current(unsigned *addr)
void __init sun4d_init_smp(void)
{
int i;
- extern unsigned int t_nmi[], linux_trap_ipi15_sun4d[], linux_trap_ipi15_sun4m[];
/* Patch ipi15 trap table */
t_nmi[1] = t_nmi[1] + (linux_trap_ipi15_sun4d - linux_trap_ipi15_sun4m);
-
+
/* And set btfixup... */
BTFIXUPSET_BLACKBOX(hard_smp_processor_id, smp4d_blackbox_id);
BTFIXUPSET_BLACKBOX(load_current, smp4d_blackbox_current);
BTFIXUPSET_CALL(smp_cross_call, smp4d_cross_call, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4d_processor_id, BTFIXUPCALL_NORM);
-
+
for (i = 0; i < NR_CPUS; i++) {
ccall_info.processors_in[i] = 1;
ccall_info.processors_out[i] = 1;
diff --git a/arch/sparc/kernel/sun4m_irq.c b/arch/sparc/kernel/sun4m_irq.c
index 7f3b97ff62c1..69df6257a32e 100644
--- a/arch/sparc/kernel/sun4m_irq.c
+++ b/arch/sparc/kernel/sun4m_irq.c
@@ -1,5 +1,5 @@
-/* sun4m_irq.c
- * arch/sparc/kernel/sun4m_irq.c:
+/*
+ * sun4m irq support
*
* djhr: Hacked out of irq.c into a CPU dependent version.
*
@@ -9,101 +9,44 @@
* Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk)
*/
-#include <linux/errno.h>
-#include <linux/linkage.h>
-#include <linux/kernel_stat.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-
-#include <asm/ptrace.h>
-#include <asm/processor.h>
-#include <asm/system.h>
-#include <asm/psr.h>
-#include <asm/vaddrs.h>
#include <asm/timer.h>
-#include <asm/openprom.h>
-#include <asm/oplib.h>
#include <asm/traps.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
-#include <asm/smp.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/cacheflush.h>
#include "irq.h"
+#include "kernel.h"
-struct sun4m_irq_percpu {
- u32 pending;
- u32 clear;
- u32 set;
-};
-
-struct sun4m_irq_global {
- u32 pending;
- u32 mask;
- u32 mask_clear;
- u32 mask_set;
- u32 interrupt_target;
-};
-
-/* Code in entry.S needs to get at these register mappings. */
-struct sun4m_irq_percpu __iomem *sun4m_irq_percpu[SUN4M_NCPUS];
-struct sun4m_irq_global __iomem *sun4m_irq_global;
-
-/* Dave Redman (djhr@tadpole.co.uk)
- * The sun4m interrupt registers.
- */
-#define SUN4M_INT_ENABLE 0x80000000
-#define SUN4M_INT_E14 0x00000080
-#define SUN4M_INT_E10 0x00080000
-
-#define SUN4M_HARD_INT(x) (0x000000001 << (x))
-#define SUN4M_SOFT_INT(x) (0x000010000 << (x))
-
-#define SUN4M_INT_MASKALL 0x80000000 /* mask all interrupts */
-#define SUN4M_INT_MODULE_ERR 0x40000000 /* module error */
-#define SUN4M_INT_M2S_WRITE_ERR 0x20000000 /* write buffer error */
-#define SUN4M_INT_ECC_ERR 0x10000000 /* ecc memory error */
-#define SUN4M_INT_VME_ERR 0x08000000 /* vme async error */
-#define SUN4M_INT_FLOPPY 0x00400000 /* floppy disk */
-#define SUN4M_INT_MODULE 0x00200000 /* module interrupt */
-#define SUN4M_INT_VIDEO 0x00100000 /* onboard video */
-#define SUN4M_INT_REALTIME 0x00080000 /* system timer */
-#define SUN4M_INT_SCSI 0x00040000 /* onboard scsi */
-#define SUN4M_INT_AUDIO 0x00020000 /* audio/isdn */
-#define SUN4M_INT_ETHERNET 0x00010000 /* onboard ethernet */
-#define SUN4M_INT_SERIAL 0x00008000 /* serial ports */
-#define SUN4M_INT_KBDMS 0x00004000 /* keyboard/mouse */
-#define SUN4M_INT_SBUSBITS 0x00003F80 /* sbus int bits */
-#define SUN4M_INT_VMEBITS 0x0000007F /* vme int bits */
-
-#define SUN4M_INT_ERROR (SUN4M_INT_MODULE_ERR | \
- SUN4M_INT_M2S_WRITE_ERR | \
- SUN4M_INT_ECC_ERR | \
- SUN4M_INT_VME_ERR)
-
-#define SUN4M_INT_SBUS(x) (1 << (x+7))
-#define SUN4M_INT_VME(x) (1 << (x))
-
-/* Interrupt levels used by OBP */
-#define OBP_INT_LEVEL_SOFT 0x10
-#define OBP_INT_LEVEL_ONBOARD 0x20
-#define OBP_INT_LEVEL_SBUS 0x30
-#define OBP_INT_LEVEL_VME 0x40
-
-/* Interrupt level assignment on sun4m:
+/* Sample sun4m IRQ layout:
+ *
+ * 0x22 - Power
+ * 0x24 - ESP SCSI
+ * 0x26 - Lance ethernet
+ * 0x2b - Floppy
+ * 0x2c - Zilog uart
+ * 0x32 - SBUS level 0
+ * 0x33 - Parallel port, SBUS level 1
+ * 0x35 - SBUS level 2
+ * 0x37 - SBUS level 3
+ * 0x39 - Audio, Graphics card, SBUS level 4
+ * 0x3b - SBUS level 5
+ * 0x3d - SBUS level 6
+ *
+ * Each interrupt source has a mask bit in the interrupt registers.
+ * When the mask bit is set, this blocks interrupt deliver. So you
+ * clear the bit to enable the interrupt.
+ *
+ * Interrupts numbered less than 0x10 are software triggered interrupts
+ * and unused by Linux.
+ *
+ * Interrupt level assignment on sun4m:
*
* level source
* ------------------------------------------------------------
- * 1 softint-1
+ * 1 softint-1
* 2 softint-2, VME/SBUS level 1
* 3 softint-3, VME/SBUS level 2
* 4 softint-4, onboard SCSI
@@ -138,10 +81,10 @@ struct sun4m_irq_global __iomem *sun4m_irq_global;
* 'intr' property IRQ priority values from ss4, ss5, ss10, ss20, and
* Tadpole S3 GX systems.
*
- * esp: 0x24 onboard ESP SCSI
- * le: 0x26 onboard Lance ETHERNET
+ * esp: 0x24 onboard ESP SCSI
+ * le: 0x26 onboard Lance ETHERNET
* p9100: 0x32 SBUS level 1 P9100 video
- * bpp: 0x33 SBUS level 2 BPP parallel port device
+ * bpp: 0x33 SBUS level 2 BPP parallel port device
* DBRI: 0x39 SBUS level 5 DBRI ISDN audio
* SUNW,leo: 0x39 SBUS level 5 LEO video
* pcmcia: 0x3b SBUS level 6 PCMCIA controller
@@ -152,8 +95,57 @@ struct sun4m_irq_global __iomem *sun4m_irq_global;
* power: 0x22 onboard power device (XXX unknown mask bit XXX)
*/
+
+/* Code in entry.S needs to get at these register mappings. */
+struct sun4m_irq_percpu __iomem *sun4m_irq_percpu[SUN4M_NCPUS];
+struct sun4m_irq_global __iomem *sun4m_irq_global;
+
+/* Dave Redman (djhr@tadpole.co.uk)
+ * The sun4m interrupt registers.
+ */
+#define SUN4M_INT_ENABLE 0x80000000
+#define SUN4M_INT_E14 0x00000080
+#define SUN4M_INT_E10 0x00080000
+
+#define SUN4M_HARD_INT(x) (0x000000001 << (x))
+#define SUN4M_SOFT_INT(x) (0x000010000 << (x))
+
+#define SUN4M_INT_MASKALL 0x80000000 /* mask all interrupts */
+#define SUN4M_INT_MODULE_ERR 0x40000000 /* module error */
+#define SUN4M_INT_M2S_WRITE_ERR 0x20000000 /* write buffer error */
+#define SUN4M_INT_ECC_ERR 0x10000000 /* ecc memory error */
+#define SUN4M_INT_VME_ERR 0x08000000 /* vme async error */
+#define SUN4M_INT_FLOPPY 0x00400000 /* floppy disk */
+#define SUN4M_INT_MODULE 0x00200000 /* module interrupt */
+#define SUN4M_INT_VIDEO 0x00100000 /* onboard video */
+#define SUN4M_INT_REALTIME 0x00080000 /* system timer */
+#define SUN4M_INT_SCSI 0x00040000 /* onboard scsi */
+#define SUN4M_INT_AUDIO 0x00020000 /* audio/isdn */
+#define SUN4M_INT_ETHERNET 0x00010000 /* onboard ethernet */
+#define SUN4M_INT_SERIAL 0x00008000 /* serial ports */
+#define SUN4M_INT_KBDMS 0x00004000 /* keyboard/mouse */
+#define SUN4M_INT_SBUSBITS 0x00003F80 /* sbus int bits */
+#define SUN4M_INT_VMEBITS 0x0000007F /* vme int bits */
+
+#define SUN4M_INT_ERROR (SUN4M_INT_MODULE_ERR | \
+ SUN4M_INT_M2S_WRITE_ERR | \
+ SUN4M_INT_ECC_ERR | \
+ SUN4M_INT_VME_ERR)
+
+#define SUN4M_INT_SBUS(x) (1 << (x+7))
+#define SUN4M_INT_VME(x) (1 << (x))
+
+/* Interrupt levels used by OBP */
+#define OBP_INT_LEVEL_SOFT 0x10
+#define OBP_INT_LEVEL_ONBOARD 0x20
+#define OBP_INT_LEVEL_SBUS 0x30
+#define OBP_INT_LEVEL_VME 0x40
+
+#define SUN4M_TIMER_IRQ (OBP_INT_LEVEL_ONBOARD | 10)
+#define SUM4M_PROFILE_IRQ (OBP_INT_LEVEL_ONBOARD | 14)
+
static unsigned long irq_mask[0x50] = {
- /* SMP */
+ /* 0x00 - SMP */
0, SUN4M_SOFT_INT(1),
SUN4M_SOFT_INT(2), SUN4M_SOFT_INT(3),
SUN4M_SOFT_INT(4), SUN4M_SOFT_INT(5),
@@ -162,7 +154,7 @@ static unsigned long irq_mask[0x50] = {
SUN4M_SOFT_INT(10), SUN4M_SOFT_INT(11),
SUN4M_SOFT_INT(12), SUN4M_SOFT_INT(13),
SUN4M_SOFT_INT(14), SUN4M_SOFT_INT(15),
- /* soft */
+ /* 0x10 - soft */
0, SUN4M_SOFT_INT(1),
SUN4M_SOFT_INT(2), SUN4M_SOFT_INT(3),
SUN4M_SOFT_INT(4), SUN4M_SOFT_INT(5),
@@ -171,19 +163,19 @@ static unsigned long irq_mask[0x50] = {
SUN4M_SOFT_INT(10), SUN4M_SOFT_INT(11),
SUN4M_SOFT_INT(12), SUN4M_SOFT_INT(13),
SUN4M_SOFT_INT(14), SUN4M_SOFT_INT(15),
- /* onboard */
+ /* 0x20 - onboard */
0, 0, 0, 0,
SUN4M_INT_SCSI, 0, SUN4M_INT_ETHERNET, 0,
SUN4M_INT_VIDEO, SUN4M_INT_MODULE,
SUN4M_INT_REALTIME, SUN4M_INT_FLOPPY,
(SUN4M_INT_SERIAL | SUN4M_INT_KBDMS),
SUN4M_INT_AUDIO, 0, SUN4M_INT_MODULE_ERR,
- /* sbus */
+ /* 0x30 - sbus */
0, 0, SUN4M_INT_SBUS(0), SUN4M_INT_SBUS(1),
0, SUN4M_INT_SBUS(2), 0, SUN4M_INT_SBUS(3),
0, SUN4M_INT_SBUS(4), 0, SUN4M_INT_SBUS(5),
0, SUN4M_INT_SBUS(6), 0, 0,
- /* vme */
+ /* 0x40 - vme */
0, 0, SUN4M_INT_VME(0), SUN4M_INT_VME(1),
0, SUN4M_INT_VME(2), 0, SUN4M_INT_VME(3),
0, SUN4M_INT_VME(4), 0, SUN4M_INT_VME(5),
@@ -193,7 +185,7 @@ static unsigned long irq_mask[0x50] = {
static unsigned long sun4m_get_irqmask(unsigned int irq)
{
unsigned long mask;
-
+
if (irq < 0x50)
mask = irq_mask[irq];
else
@@ -217,7 +209,7 @@ static void sun4m_disable_irq(unsigned int irq_nr)
sbus_writel(mask, &sun4m_irq_global->mask_set);
else
sbus_writel(mask, &sun4m_irq_percpu[cpu]->set);
- local_irq_restore(flags);
+ local_irq_restore(flags);
}
static void sun4m_enable_irq(unsigned int irq_nr)
@@ -226,17 +218,17 @@ static void sun4m_enable_irq(unsigned int irq_nr)
int cpu = smp_processor_id();
/* Dreadful floppy hack. When we use 0x2b instead of
- * 0x0b the system blows (it starts to whistle!).
- * So we continue to use 0x0b. Fixme ASAP. --P3
- */
- if (irq_nr != 0x0b) {
+ * 0x0b the system blows (it starts to whistle!).
+ * So we continue to use 0x0b. Fixme ASAP. --P3
+ */
+ if (irq_nr != 0x0b) {
mask = sun4m_get_irqmask(irq_nr);
local_irq_save(flags);
if (irq_nr > 15)
sbus_writel(mask, &sun4m_irq_global->mask_clear);
else
sbus_writel(mask, &sun4m_irq_percpu[cpu]->clear);
- local_irq_restore(flags);
+ local_irq_restore(flags);
} else {
local_irq_save(flags);
sbus_writel(SUN4M_INT_FLOPPY, &sun4m_irq_global->mask_clear);
@@ -260,7 +252,7 @@ static unsigned long cpu_pil_to_imask[16] = {
/*12*/ SUN4M_INT_SERIAL | SUN4M_INT_KBDMS,
/*13*/ SUN4M_INT_SBUS(6) | SUN4M_INT_VME(6) | SUN4M_INT_AUDIO,
/*14*/ SUN4M_INT_E14,
-/*15*/ SUN4M_INT_ERROR
+/*15*/ SUN4M_INT_ERROR,
};
/* We assume the caller has disabled local interrupts when these are called,
@@ -280,12 +272,14 @@ static void sun4m_enable_pil_irq(unsigned int pil)
static void sun4m_send_ipi(int cpu, int level)
{
unsigned long mask = sun4m_get_irqmask(level);
+
sbus_writel(mask, &sun4m_irq_percpu[cpu]->set);
}
static void sun4m_clear_ipi(int cpu, int level)
{
unsigned long mask = sun4m_get_irqmask(level);
+
sbus_writel(mask, &sun4m_irq_percpu[cpu]->clear);
}
@@ -314,7 +308,6 @@ struct sun4m_timer_global {
static struct sun4m_timer_global __iomem *timers_global;
-#define TIMER_IRQ (OBP_INT_LEVEL_ONBOARD | 10)
unsigned int lvl14_resolution = (((1000000/HZ) + 1) << 10);
@@ -391,7 +384,7 @@ static void __init sun4m_init_timers(irq_handler_t counter_fn)
master_l10_counter = &timers_global->l10_count;
- err = request_irq(TIMER_IRQ, counter_fn,
+ err = request_irq(SUN4M_TIMER_IRQ, counter_fn,
(IRQF_DISABLED | SA_STATIC_ALLOC), "timer", NULL);
if (err) {
printk(KERN_ERR "sun4m_init_timers: Register IRQ error %d.\n",
@@ -407,7 +400,6 @@ static void __init sun4m_init_timers(irq_handler_t counter_fn)
#ifdef CONFIG_SMP
{
unsigned long flags;
- extern unsigned long lvl14_save[4];
struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (14 - 1)];
/* For SMP we use the level 14 ticker, however the bootup code
@@ -466,7 +458,9 @@ void __init sun4m_init_IRQ(void)
BTFIXUPSET_CALL(disable_pil_irq, sun4m_disable_pil_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(clear_clock_irq, sun4m_clear_clock_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(load_profile_irq, sun4m_load_profile_irq, BTFIXUPCALL_NORM);
- sparc_init_timers = sun4m_init_timers;
+
+ sparc_irq_config.init_timers = sun4m_init_timers;
+
#ifdef CONFIG_SMP
BTFIXUPSET_CALL(set_cpu_int, sun4m_send_ipi, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(clear_cpu_int, sun4m_clear_ipi, BTFIXUPCALL_NORM);
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index 762d6eedd944..5cc7dc51de3d 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -1,59 +1,22 @@
-/* sun4m_smp.c: Sparc SUN4M SMP support.
+/*
+ * sun4m SMP support.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
-#include <asm/head.h>
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/threads.h>
-#include <linux/smp.h>
#include <linux/interrupt.h>
-#include <linux/kernel_stat.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
#include <linux/profile.h>
#include <linux/delay.h>
#include <linux/cpu.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
-#include <asm/irq_regs.h>
-
-#include <asm/ptrace.h>
-#include <asm/atomic.h>
-
-#include <asm/irq.h>
-#include <asm/page.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/oplib.h>
-#include <asm/cpudata.h>
#include "irq.h"
+#include "kernel.h"
#define IRQ_CROSS_CALL 15
-extern ctxd_t *srmmu_ctx_table_phys;
-
-extern volatile unsigned long cpu_callin_map[NR_CPUS];
-extern unsigned char boot_cpu_id;
-
-extern cpumask_t smp_commenced_mask;
-
-extern int __smp4m_processor_id(void);
-
-/*#define SMP_DEBUG*/
-
-#ifdef SMP_DEBUG
-#define SMP_PRINTK(x) printk x
-#else
-#define SMP_PRINTK(x)
-#endif
-
static inline unsigned long
swap_ulong(volatile unsigned long *ptr, unsigned long val)
{
@@ -64,7 +27,6 @@ swap_ulong(volatile unsigned long *ptr, unsigned long val)
}
static void smp_setup_percpu_timer(void);
-extern void cpu_probe(void);
void __cpuinit smp4m_callin(void)
{
@@ -96,7 +58,7 @@ void __cpuinit smp4m_callin(void)
/* XXX: What's up with all the flushes? */
local_flush_cache_all();
local_flush_tlb_all();
-
+
cpu_probe();
/* Fix idle thread fields. */
@@ -119,9 +81,6 @@ void __cpuinit smp4m_callin(void)
/*
* Cycle through the processors asking the PROM to start each one.
*/
-
-extern struct linux_prom_registers smp_penguin_ctable;
-
void __init smp4m_boot_cpus(void)
{
smp_setup_percpu_timer();
@@ -130,7 +89,6 @@ void __init smp4m_boot_cpus(void)
int __cpuinit smp4m_boot_one_cpu(int i)
{
- extern unsigned long sun4m_cpu_startup;
unsigned long *entry = &sun4m_cpu_startup;
struct task_struct *p;
int timeout;
@@ -142,7 +100,7 @@ int __cpuinit smp4m_boot_one_cpu(int i)
p = fork_idle(i);
current_set[i] = task_thread_info(p);
/* See trampoline.S for details... */
- entry += ((i-1) * 3);
+ entry += ((i - 1) * 3);
/*
* Initialize the contexts table
@@ -154,20 +112,19 @@ int __cpuinit smp4m_boot_one_cpu(int i)
smp_penguin_ctable.reg_size = 0;
/* whirrr, whirrr, whirrrrrrrrr... */
- printk("Starting CPU %d at %p\n", i, entry);
+ printk(KERN_INFO "Starting CPU %d at %p\n", i, entry);
local_flush_cache_all();
- prom_startcpu(cpu_node,
- &smp_penguin_ctable, 0, (char *)entry);
+ prom_startcpu(cpu_node, &smp_penguin_ctable, 0, (char *)entry);
/* wheee... it's going... */
- for(timeout = 0; timeout < 10000; timeout++) {
- if(cpu_callin_map[i])
+ for (timeout = 0; timeout < 10000; timeout++) {
+ if (cpu_callin_map[i])
break;
udelay(200);
}
if (!(cpu_callin_map[i])) {
- printk("Processor %d is stuck.\n", i);
+ printk(KERN_ERR "Processor %d is stuck.\n", i);
return -ENODEV;
}
@@ -202,6 +159,7 @@ void __init smp4m_smp_done(void)
void smp4m_irq_rotate(int cpu)
{
int next = cpu_data(cpu).next;
+
if (next != cpu)
set_irq_udt(next);
}
@@ -243,7 +201,7 @@ static void smp4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
cpu_clear(smp_processor_id(), mask);
cpus_and(mask, cpu_online_map, mask);
- for(i = 0; i < ncpus; i++) {
+ for (i = 0; i < ncpus; i++) {
if (cpu_isset(i, mask)) {
ccall_info.processors_in[i] = 0;
ccall_info.processors_out[i] = 0;
@@ -262,19 +220,18 @@ static void smp4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
do {
if (!cpu_isset(i, mask))
continue;
- while(!ccall_info.processors_in[i])
+ while (!ccall_info.processors_in[i])
barrier();
- } while(++i < ncpus);
+ } while (++i < ncpus);
i = 0;
do {
if (!cpu_isset(i, mask))
continue;
- while(!ccall_info.processors_out[i])
+ while (!ccall_info.processors_out[i])
barrier();
- } while(++i < ncpus);
+ } while (++i < ncpus);
}
-
spin_unlock_irqrestore(&cross_call_lock, flags);
}
@@ -289,8 +246,6 @@ void smp4m_cross_call_irq(void)
ccall_info.processors_out[i] = 1;
}
-extern void sun4m_clear_profile_irq(int cpu);
-
void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
{
struct pt_regs *old_regs;
@@ -302,7 +257,7 @@ void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
profile_tick(CPU_PROFILING);
- if(!--prof_counter(cpu)) {
+ if (!--prof_counter(cpu)) {
int user = user_mode(regs);
irq_enter();
@@ -314,8 +269,6 @@ void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
set_irq_regs(old_regs);
}
-extern unsigned int lvl14_resolution;
-
static void __cpuinit smp_setup_percpu_timer(void)
{
int cpu = smp_processor_id();
@@ -323,7 +276,7 @@ static void __cpuinit smp_setup_percpu_timer(void)
prof_counter(cpu) = prof_multiplier(cpu) = 1;
load_profile_irq(cpu, lvl14_resolution);
- if(cpu == boot_cpu_id)
+ if (cpu == boot_cpu_id)
enable_pil_irq(14);
}
@@ -331,9 +284,9 @@ static void __init smp4m_blackbox_id(unsigned *addr)
{
int rd = *addr & 0x3e000000;
int rs1 = rd >> 11;
-
+
addr[0] = 0x81580000 | rd; /* rd %tbr, reg */
- addr[1] = 0x8130200c | rd | rs1; /* srl reg, 0xc, reg */
+ addr[1] = 0x8130200c | rd | rs1; /* srl reg, 0xc, reg */
addr[2] = 0x80082003 | rd | rs1; /* and reg, 3, reg */
}
@@ -341,9 +294,9 @@ static void __init smp4m_blackbox_current(unsigned *addr)
{
int rd = *addr & 0x3e000000;
int rs1 = rd >> 11;
-
+
addr[0] = 0x81580000 | rd; /* rd %tbr, reg */
- addr[2] = 0x8130200a | rd | rs1; /* srl reg, 0xa, reg */
+ addr[2] = 0x8130200a | rd | rs1; /* srl reg, 0xa, reg */
addr[4] = 0x8008200c | rd | rs1; /* and reg, 0xc, reg */
}
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index f836f4e93afe..96082d30def0 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -360,20 +360,25 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
}
EXPORT_SYMBOL(get_fb_unmapped_area);
-/* Essentially the same as PowerPC... */
-void arch_pick_mmap_layout(struct mm_struct *mm)
+/* Essentially the same as PowerPC. */
+static unsigned long mmap_rnd(void)
{
- unsigned long random_factor = 0UL;
- unsigned long gap;
+ unsigned long rnd = 0UL;
if (current->flags & PF_RANDOMIZE) {
- random_factor = get_random_int();
+ unsigned long val = get_random_int();
if (test_thread_flag(TIF_32BIT))
- random_factor &= ((1 * 1024 * 1024) - 1);
+ rnd = (val % (1UL << (22UL-PAGE_SHIFT)));
else
- random_factor = ((random_factor << PAGE_SHIFT) &
- 0xffffffffUL);
+ rnd = (val % (1UL << (29UL-PAGE_SHIFT)));
}
+ return (rnd << PAGE_SHIFT) * 2;
+}
+
+void arch_pick_mmap_layout(struct mm_struct *mm)
+{
+ unsigned long random_factor = mmap_rnd();
+ unsigned long gap;
/*
* Fall back to the standard layout if the personality
diff --git a/arch/sparc/kernel/tick14.c b/arch/sparc/kernel/tick14.c
deleted file mode 100644
index 138bbf5f8724..000000000000
--- a/arch/sparc/kernel/tick14.c
+++ /dev/null
@@ -1,39 +0,0 @@
-/* tick14.c
- *
- * Copyright (C) 1996 David Redman (djhr@tadpole.co.uk)
- *
- * This file handles the Sparc specific level14 ticker
- * This is really useful for profiling OBP uses it for keyboard
- * aborts and other stuff.
- */
-#include <linux/kernel.h>
-
-extern unsigned long lvl14_save[5];
-static unsigned long *linux_lvl14 = NULL;
-static unsigned long obp_lvl14[4];
-
-/*
- * Call with timer IRQ closed.
- * First time we do it with disable_irq, later prom code uses spin_lock_irq().
- */
-void install_linux_ticker(void)
-{
-
- if (!linux_lvl14)
- return;
- linux_lvl14[0] = lvl14_save[0];
- linux_lvl14[1] = lvl14_save[1];
- linux_lvl14[2] = lvl14_save[2];
- linux_lvl14[3] = lvl14_save[3];
-}
-
-void install_obp_ticker(void)
-{
-
- if (!linux_lvl14)
- return;
- linux_lvl14[0] = obp_lvl14[0];
- linux_lvl14[1] = obp_lvl14[1];
- linux_lvl14[2] = obp_lvl14[2];
- linux_lvl14[3] = obp_lvl14[3];
-}
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c
index 9c743b1886ff..8237dd4dfeb4 100644
--- a/arch/sparc/kernel/time_32.c
+++ b/arch/sparc/kernel/time_32.c
@@ -85,7 +85,7 @@ int update_persistent_clock(struct timespec now)
/*
* timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "do_timer()" routine every clocktick
+ * as well as call the "xtime_update()" routine every clocktick
*/
#define TICK_SIZE (tick_nsec / 1000)
@@ -96,14 +96,9 @@ static irqreturn_t timer_interrupt(int dummy, void *dev_id)
profile_tick(CPU_PROFILING);
#endif
- /* Protect counter clear so that do_gettimeoffset works */
- write_seqlock(&xtime_lock);
-
clear_clock_irq();
- do_timer(1);
-
- write_sequnlock(&xtime_lock);
+ xtime_update(1);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
@@ -142,7 +137,7 @@ static struct platform_device m48t59_rtc = {
},
};
-static int __devinit clock_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit clock_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
const char *model = of_get_property(dp, "model", NULL);
@@ -176,7 +171,7 @@ static struct of_device_id __initdata clock_match[] = {
{},
};
-static struct of_platform_driver clock_driver = {
+static struct platform_driver clock_driver = {
.probe = clock_probe,
.driver = {
.name = "rtc",
@@ -189,7 +184,7 @@ static struct of_platform_driver clock_driver = {
/* Probe for the mostek real time clock chip. */
static int __init clock_init(void)
{
- return of_register_platform_driver(&clock_driver);
+ return platform_driver_register(&clock_driver);
}
/* Must be after subsys_initcall() so that busses are probed. Must
* be before device_initcall() because things like the RTC driver
@@ -224,7 +219,7 @@ static void __init sbus_time_init(void)
btfixup();
- sparc_init_timers(timer_interrupt);
+ sparc_irq_config.init_timers(timer_interrupt);
}
void __init time_init(void)
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 3bc9c9979b92..95ec25faba39 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -419,7 +419,7 @@ static struct platform_device rtc_cmos_device = {
.num_resources = 1,
};
-static int __devinit rtc_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit rtc_probe(struct platform_device *op)
{
struct resource *r;
@@ -462,7 +462,7 @@ static struct of_device_id __initdata rtc_match[] = {
{},
};
-static struct of_platform_driver rtc_driver = {
+static struct platform_driver rtc_driver = {
.probe = rtc_probe,
.driver = {
.name = "rtc",
@@ -477,7 +477,7 @@ static struct platform_device rtc_bq4802_device = {
.num_resources = 1,
};
-static int __devinit bq4802_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit bq4802_probe(struct platform_device *op)
{
printk(KERN_INFO "%s: BQ4802 regs at 0x%llx\n",
@@ -495,7 +495,7 @@ static struct of_device_id __initdata bq4802_match[] = {
{},
};
-static struct of_platform_driver bq4802_driver = {
+static struct platform_driver bq4802_driver = {
.probe = bq4802_probe,
.driver = {
.name = "bq4802",
@@ -534,7 +534,7 @@ static struct platform_device m48t59_rtc = {
},
};
-static int __devinit mostek_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit mostek_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
@@ -559,7 +559,7 @@ static struct of_device_id __initdata mostek_match[] = {
{},
};
-static struct of_platform_driver mostek_driver = {
+static struct platform_driver mostek_driver = {
.probe = mostek_probe,
.driver = {
.name = "mostek",
@@ -586,9 +586,9 @@ static int __init clock_init(void)
if (tlb_type == hypervisor)
return platform_device_register(&rtc_sun4v_device);
- (void) of_register_platform_driver(&rtc_driver);
- (void) of_register_platform_driver(&mostek_driver);
- (void) of_register_platform_driver(&bq4802_driver);
+ (void) platform_driver_register(&rtc_driver);
+ (void) platform_driver_register(&mostek_driver);
+ (void) platform_driver_register(&bq4802_driver);
return 0;
}
@@ -816,14 +816,12 @@ void __init time_init(void)
clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT);
clocksource_tick.name = tick_ops->name;
- clocksource_calc_mult_shift(&clocksource_tick, freq, 4);
clocksource_tick.read = clocksource_tick_read;
+ clocksource_register_hz(&clocksource_tick, freq);
printk("clocksource: mult[%x] shift[%d]\n",
clocksource_tick.mult, clocksource_tick.shift);
- clocksource_register(&clocksource_tick);
-
sparc64_clockevent.name = tick_ops->name;
clockevents_calc_mult_shift(&sparc64_clockevent, freq, 4);
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 1e9770936c3b..1ed547bd850f 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2152,7 +2152,7 @@ static void user_instruction_dump(unsigned int __user *pc)
void show_stack(struct task_struct *tsk, unsigned long *_ksp)
{
- unsigned long fp, thread_base, ksp;
+ unsigned long fp, ksp;
struct thread_info *tp;
int count = 0;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -2173,7 +2173,6 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
flushw_all();
fp = ksp + STACK_BIAS;
- thread_base = (unsigned long) tp;
printk("Call Trace:\n");
do {
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index 0c1e6783657f..92b557afe535 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -108,7 +108,7 @@ SECTIONS
__sun4v_2insn_patch_end = .;
}
- PERCPU(PAGE_SIZE)
+ PERCPU(SMP_CACHE_BYTES, PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
__init_end = .;
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index cbddeb38ffda..d3c7a12ad879 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -16,7 +16,7 @@
#define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
- [0 ... (ATOMIC_HASH_SIZE-1)] = SPIN_LOCK_UNLOCKED
+ [0 ... (ATOMIC_HASH_SIZE-1)] = __SPIN_LOCK_UNLOCKED(__atomic_hash)
};
#else /* SMP */
diff --git a/arch/sparc/prom/misc_32.c b/arch/sparc/prom/misc_32.c
index 8c278c311ba4..677b6a10fbde 100644
--- a/arch/sparc/prom/misc_32.c
+++ b/arch/sparc/prom/misc_32.c
@@ -54,15 +54,11 @@ EXPORT_SYMBOL(prom_feval);
void
prom_cmdline(void)
{
- extern void install_obp_ticker(void);
- extern void install_linux_ticker(void);
unsigned long flags;
spin_lock_irqsave(&prom_lock, flags);
- install_obp_ticker();
(*(romvec->pv_abort))();
restore_current();
- install_linux_ticker();
spin_unlock_irqrestore(&prom_lock, flags);
set_auxio(AUXIO_LED, 0);
}
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 08948e4e1503..c152e1be9773 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -11,6 +11,7 @@ config TILE
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_PROBE
select GENERIC_PENDING_IRQ if SMP
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG if !M386
# FIXME: investigate whether we need/want these options.
# select HAVE_IOREMAP_PROT
diff --git a/arch/tile/kernel/vmlinux.lds.S b/arch/tile/kernel/vmlinux.lds.S
index 25fdc0c1839a..c6ce378e0678 100644
--- a/arch/tile/kernel/vmlinux.lds.S
+++ b/arch/tile/kernel/vmlinux.lds.S
@@ -63,7 +63,7 @@ SECTIONS
*(.init.page)
} :data =0
INIT_DATA_SECTION(16)
- PERCPU(PAGE_SIZE)
+ PERCPU(L2_CACHE_BYTES, PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
VMLINUX_SYMBOL(_einitdata) = .;
diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common
index e351e14b4339..1e78940218c0 100644
--- a/arch/um/Kconfig.common
+++ b/arch/um/Kconfig.common
@@ -7,6 +7,7 @@ config UML
bool
default y
select HAVE_GENERIC_HARDIRQS
+ select GENERIC_HARDIRQS_NO_DEPRECATED
config MMU
bool
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index ba4a98ba39c0..620f5b70957d 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -185,7 +185,7 @@ struct ubd {
.no_cow = 0, \
.shared = 0, \
.cow = DEFAULT_COW, \
- .lock = SPIN_LOCK_UNLOCKED, \
+ .lock = __SPIN_LOCK_UNLOCKED(ubd_devs.lock), \
.request = NULL, \
.start_sg = 0, \
.end_sg = 0, \
diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S
index ac55b9efa1ce..34bede8aad4a 100644
--- a/arch/um/include/asm/common.lds.S
+++ b/arch/um/include/asm/common.lds.S
@@ -42,7 +42,7 @@
INIT_SETUP(0)
}
- PERCPU(32)
+ PERCPU(32, 32)
.initcall.init : {
INIT_CALLS
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index 3f0ac9e0c966..64cfea80cfe2 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -35,8 +35,10 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
- action = irq_desc[i].action;
+ struct irq_desc *desc = irq_to_desc(i);
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ action = desc->action;
if (!action)
goto skip;
seq_printf(p, "%3d: ",i);
@@ -46,7 +48,7 @@ int show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
#endif
- seq_printf(p, " %14s", irq_desc[i].chip->name);
+ seq_printf(p, " %14s", get_irq_desc_chip(desc)->name);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
@@ -54,7 +56,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
} else if (i == NR_IRQS)
seq_putc(p, '\n');
@@ -360,10 +362,10 @@ EXPORT_SYMBOL(um_request_irq);
EXPORT_SYMBOL(reactivate_fd);
/*
- * irq_chip must define (startup || enable) &&
- * (shutdown || disable) && end
+ * irq_chip must define at least enable/disable and ack when
+ * the edge handler is used.
*/
-static void dummy(unsigned int irq)
+static void dummy(struct irq_data *d)
{
}
@@ -371,20 +373,17 @@ static void dummy(unsigned int irq)
static struct irq_chip normal_irq_type = {
.name = "SIGIO",
.release = free_irq_by_irq_and_dev,
- .disable = dummy,
- .enable = dummy,
- .ack = dummy,
- .end = dummy
+ .irq_disable = dummy,
+ .irq_enable = dummy,
+ .irq_ack = dummy,
};
static struct irq_chip SIGVTALRM_irq_type = {
.name = "SIGVTALRM",
.release = free_irq_by_irq_and_dev,
- .shutdown = dummy, /* never called */
- .disable = dummy,
- .enable = dummy,
- .ack = dummy,
- .end = dummy
+ .irq_disable = dummy,
+ .irq_enable = dummy,
+ .irq_ack = dummy,
};
void __init init_IRQ(void)
diff --git a/arch/unicore32/.gitignore b/arch/unicore32/.gitignore
new file mode 100644
index 000000000000..947e99c2a957
--- /dev/null
+++ b/arch/unicore32/.gitignore
@@ -0,0 +1,21 @@
+#
+# Generated include files
+#
+include/generated
+#
+# Generated ld script file
+#
+kernel/vmlinux.lds
+#
+# Generated images in boot
+#
+boot/Image
+boot/zImage
+boot/uImage
+#
+# Generated files in boot/compressed
+#
+boot/compressed/piggy.S
+boot/compressed/piggy.gzip
+boot/compressed/vmlinux
+boot/compressed/vmlinux.lds
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
new file mode 100644
index 000000000000..4a36db45fb3d
--- /dev/null
+++ b/arch/unicore32/Kconfig
@@ -0,0 +1,275 @@
+config UNICORE32
+ def_bool y
+ select HAVE_MEMBLOCK
+ select HAVE_GENERIC_DMA_COHERENT
+ select HAVE_GENERIC_HARDIRQS
+ select HAVE_DMA_ATTRS
+ select HAVE_KERNEL_GZIP
+ select HAVE_KERNEL_BZIP2
+ select HAVE_KERNEL_LZO
+ select HAVE_KERNEL_LZMA
+ select GENERIC_FIND_FIRST_BIT
+ select GENERIC_IRQ_PROBE
+ select GENERIC_HARDIRQS_NO_DEPRECATED
+ select ARCH_WANT_FRAME_POINTERS
+ help
+ UniCore-32 is 32-bit Instruction Set Architecture,
+ including a series of low-power-consumption RISC chip
+ designs licensed by PKUnity Ltd.
+ Please see web page at <http://www.pkunity.com/>.
+
+config HAVE_PWM
+ bool
+
+config GENERIC_GPIO
+ def_bool y
+
+config GENERIC_CLOCKEVENTS
+ bool
+
+config GENERIC_CSUM
+ def_bool y
+
+config GENERIC_IOMAP
+ def_bool y
+
+config NO_IOPORT
+ bool
+
+config STACKTRACE_SUPPORT
+ def_bool y
+
+config HAVE_LATENCYTOP_SUPPORT
+ def_bool y
+
+config LOCKDEP_SUPPORT
+ def_bool y
+
+config RWSEM_GENERIC_SPINLOCK
+ def_bool y
+
+config RWSEM_XCHGADD_ALGORITHM
+ bool
+
+config ARCH_HAS_ILOG2_U32
+ bool
+
+config ARCH_HAS_ILOG2_U64
+ bool
+
+config ARCH_HAS_CPUFREQ
+ bool
+
+config GENERIC_HWEIGHT
+ def_bool y
+
+config GENERIC_CALIBRATE_DELAY
+ def_bool y
+
+config ARCH_MAY_HAVE_PC_FDC
+ bool
+
+config NEED_DMA_MAP_STATE
+ def_bool y
+
+source "init/Kconfig"
+
+source "kernel/Kconfig.freezer"
+
+menu "System Type"
+
+config MMU
+ def_bool y
+
+config ARCH_FPGA
+ bool
+
+config ARCH_PUV3
+ def_bool y
+ select CPU_UCV2
+ select GENERIC_CLOCKEVENTS
+ select HAVE_CLK
+ select ARCH_REQUIRE_GPIOLIB
+ select ARCH_HAS_CPUFREQ
+
+# CONFIGs for ARCH_PUV3
+
+if ARCH_PUV3
+
+choice
+ prompt "Board Selection"
+ default PUV3_DB0913
+
+config PUV3_FPGA_DLX200
+ select ARCH_FPGA
+ bool "FPGA board"
+
+config PUV3_DB0913
+ bool "DEBUG board (0913)"
+
+config PUV3_NB0916
+ bool "NetBook board (0916)"
+ select HAVE_PWM
+
+config PUV3_SMW0919
+ bool "Security Mini-Workstation board (0919)"
+
+endchoice
+
+config PUV3_PM
+ def_bool y if !ARCH_FPGA
+
+endif
+
+source "arch/unicore32/mm/Kconfig"
+
+comment "Floating poing support"
+
+config UNICORE_FPU_F64
+ def_bool y if !ARCH_FPGA
+
+endmenu
+
+menu "Bus support"
+
+config PCI
+ bool "PCI Support"
+ help
+ Find out whether you have a PCI motherboard. PCI is the name of a
+ bus system, i.e. the way the CPU talks to the other stuff inside
+ your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or
+ VESA. If you have PCI, say Y, otherwise N.
+
+source "drivers/pci/Kconfig"
+
+source "drivers/pcmcia/Kconfig"
+
+endmenu
+
+menu "Kernel Features"
+
+source "kernel/time/Kconfig"
+
+source "kernel/Kconfig.preempt"
+
+source "kernel/Kconfig.hz"
+
+source "mm/Kconfig"
+
+config LEDS
+ def_bool y
+ depends on GENERIC_GPIO
+
+config ALIGNMENT_TRAP
+ def_bool y
+ help
+ Unicore processors can not fetch/store information which is not
+ naturally aligned on the bus, i.e., a 4 byte fetch must start at an
+ address divisible by 4. On 32-bit Unicore processors, these non-aligned
+ fetch/store instructions will be emulated in software if you say
+ here, which has a severe performance impact. This is necessary for
+ correct operation of some network protocols. With an IP-only
+ configuration it is safe to say N, otherwise say Y.
+
+endmenu
+
+menu "Boot options"
+
+config CMDLINE
+ string "Default kernel command string"
+ default ""
+
+config CMDLINE_FORCE
+ bool "Always use the default kernel command string"
+ depends on CMDLINE != ""
+ help
+ Always use the default kernel command string, even if the boot
+ loader passes other arguments to the kernel.
+ This is useful if you cannot or don't want to change the
+ command-line options your boot loader passes to the kernel.
+
+ If unsure, say N.
+
+endmenu
+
+menu "Userspace binary formats"
+
+source "fs/Kconfig.binfmt"
+
+endmenu
+
+menu "Power management options"
+
+source "kernel/power/Kconfig"
+
+if ARCH_HAS_CPUFREQ
+source "drivers/cpufreq/Kconfig"
+endif
+
+config ARCH_SUSPEND_POSSIBLE
+ def_bool y if !ARCH_FPGA
+
+config ARCH_HIBERNATION_POSSIBLE
+ def_bool y if !ARCH_FPGA
+
+endmenu
+
+source "net/Kconfig"
+
+if ARCH_PUV3
+
+config PUV3_GPIO
+ bool
+ depends on !ARCH_FPGA
+ select GENERIC_GPIO
+ select GPIO_SYSFS if EXPERIMENTAL
+ default y
+
+config PUV3_PWM
+ tristate
+ default BACKLIGHT_PWM
+ help
+ Enable support for NB0916 PWM controllers
+
+config PUV3_RTC
+ tristate "PKUnity v3 RTC Support"
+ depends on !ARCH_FPGA
+
+if PUV3_NB0916
+
+menu "PKUnity NetBook-0916 Features"
+
+config I2C_BATTERY_BQ27200
+ tristate "I2C Battery BQ27200 Support"
+ select PUV3_I2C
+ select POWER_SUPPLY
+ select BATTERY_BQ27x00
+
+config I2C_EEPROM_AT24
+ tristate "I2C EEPROMs AT24 support"
+ select PUV3_I2C
+ select MISC_DEVICES
+ select EEPROM_AT24
+
+config LCD_BACKLIGHT
+ tristate "LCD Backlight support"
+ select BACKLIGHT_LCD_SUPPORT
+ select BACKLIGHT_PWM
+
+endmenu
+
+endif
+
+endif
+
+source "drivers/Kconfig"
+
+source "fs/Kconfig"
+
+source "arch/unicore32/Kconfig.debug"
+
+source "security/Kconfig"
+
+source "crypto/Kconfig"
+
+source "lib/Kconfig"
diff --git a/arch/unicore32/Kconfig.debug b/arch/unicore32/Kconfig.debug
new file mode 100644
index 000000000000..3140151ede45
--- /dev/null
+++ b/arch/unicore32/Kconfig.debug
@@ -0,0 +1,68 @@
+menu "Kernel hacking"
+
+source "lib/Kconfig.debug"
+
+config STRICT_DEVMEM
+ bool "Filter access to /dev/mem"
+ depends on MMU
+ ---help---
+ If this option is disabled, you allow userspace (root) access to all
+ of memory, including kernel and userspace memory. Accidental
+ access to this is obviously disastrous, but specific access can
+ be used by people debugging the kernel.
+
+ If this option is switched on, the /dev/mem file only allows
+ userspace access to memory mapped peripherals.
+
+ If in doubt, say Y.
+
+config EARLY_PRINTK
+ def_bool DEBUG_OCD
+ help
+ Write kernel log output directly into the ocd or to a serial port.
+
+ This is useful for kernel debugging when your machine crashes very
+ early before the console code is initialized. For normal operation
+ it is not recommended because it looks ugly and doesn't cooperate
+ with klogd/syslogd or the X server. You should normally N here,
+ unless you want to debug such a crash.
+
+config DEBUG_STACK_USAGE
+ bool "Enable stack utilization instrumentation"
+ depends on DEBUG_KERNEL
+ help
+ Enables the display of the minimum amount of free stack which each
+ task has ever had available in the sysrq-T output.
+
+# These options are only for real kernel hackers who want to get their hands dirty.
+config DEBUG_LL
+ bool "Kernel low-level debugging functions"
+ depends on DEBUG_KERNEL
+ help
+ Say Y here to include definitions of printascii, printch, printhex
+ in the kernel. This is helpful if you are debugging code that
+ executes before the console is initialized.
+
+config DEBUG_OCD
+ bool "Kernel low-level debugging via On-Chip-Debugger"
+ depends on DEBUG_LL
+ default y
+ help
+ Say Y here if you want the debug print routines to direct their
+ output to the UniCore On-Chip-Debugger channel using CP #1.
+
+config DEBUG_OCD_BREAKPOINT
+ bool "Breakpoint support via On-Chip-Debugger"
+ depends on DEBUG_OCD
+
+config DEBUG_UART
+ int "Kernel low-level debugging messages via serial port"
+ depends on DEBUG_LL
+ range 0 1
+ default "0"
+ help
+ Choice for UART for kernel low-level using PKUnity UARTS,
+ should be between zero and one. The port must have been
+ initialised by the boot-loader before use.
+
+endmenu
diff --git a/arch/unicore32/Makefile b/arch/unicore32/Makefile
new file mode 100644
index 000000000000..e08d6d370a8a
--- /dev/null
+++ b/arch/unicore32/Makefile
@@ -0,0 +1,95 @@
+#
+# arch/unicore32/Makefile
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies.
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2002~2010 by Guan Xue-tao
+#
+ifneq ($(SUBARCH),$(ARCH))
+ ifeq ($(CROSS_COMPILE),)
+ CROSS_COMPILE := $(call cc-cross-prefix, unicore32-linux-)
+ endif
+endif
+
+LDFLAGS_vmlinux := -p --no-undefined -X
+
+OBJCOPYFLAGS := -O binary -R .note -R .note.gnu.build-id -R .comment -S
+
+# Never generate .eh_frame
+KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm)
+
+# Never use hard float in kernel
+KBUILD_CFLAGS += -msoft-float
+
+ifeq ($(CONFIG_FRAME_POINTER),y)
+KBUILD_CFLAGS += -mno-sched-prolog
+endif
+
+CHECKFLAGS += -D__unicore32__
+
+head-y := arch/unicore32/kernel/head.o
+head-y += arch/unicore32/kernel/init_task.o
+
+core-y += arch/unicore32/kernel/
+core-y += arch/unicore32/mm/
+
+libs-y += arch/unicore32/lib/
+
+ASM_GENERATED_DIR := $(srctree)/arch/unicore32/include/generated
+LINUXINCLUDE += -I$(ASM_GENERATED_DIR)
+
+ASM_GENERIC_HEADERS := atomic.h auxvec.h
+ASM_GENERIC_HEADERS += bitsperlong.h bug.h bugs.h
+ASM_GENERIC_HEADERS += cputime.h current.h
+ASM_GENERIC_HEADERS += device.h div64.h
+ASM_GENERIC_HEADERS += emergency-restart.h errno.h
+ASM_GENERIC_HEADERS += fb.h fcntl.h ftrace.h
+ASM_GENERIC_HEADERS += hardirq.h hw_irq.h
+ASM_GENERIC_HEADERS += ioctl.h ioctls.h ipcbuf.h irq_regs.h
+ASM_GENERIC_HEADERS += kdebug.h kmap_types.h
+ASM_GENERIC_HEADERS += local.h
+ASM_GENERIC_HEADERS += mman.h module.h msgbuf.h
+ASM_GENERIC_HEADERS += param.h parport.h percpu.h poll.h posix_types.h
+ASM_GENERIC_HEADERS += resource.h
+ASM_GENERIC_HEADERS += scatterlist.h sections.h segment.h sembuf.h serial.h
+ASM_GENERIC_HEADERS += setup.h shmbuf.h shmparam.h
+ASM_GENERIC_HEADERS += siginfo.h signal.h sizes.h
+ASM_GENERIC_HEADERS += socket.h sockios.h stat.h statfs.h swab.h syscalls.h
+ASM_GENERIC_HEADERS += termbits.h termios.h topology.h types.h
+ASM_GENERIC_HEADERS += ucontext.h unaligned.h user.h
+ASM_GENERIC_HEADERS += vga.h
+ASM_GENERIC_HEADERS += xor.h
+
+archprepare:
+ifneq ($(ASM_GENERATED_DIR), $(wildcard $(ASM_GENERATED_DIR)))
+ $(Q)mkdir -p $(ASM_GENERATED_DIR)/asm
+ $(Q)$(foreach a, $(ASM_GENERIC_HEADERS), \
+ echo '#include <asm-generic/$a>' \
+ > $(ASM_GENERATED_DIR)/asm/$a; )
+endif
+
+boot := arch/unicore32/boot
+
+# Default target when executing plain make
+KBUILD_IMAGE := zImage
+
+all: $(KBUILD_IMAGE)
+
+zImage Image uImage: vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
+MRPROPER_DIRS += $(ASM_GENERATED_DIR)
+
+archclean:
+ $(Q)$(MAKE) $(clean)=$(boot)
+
+define archhelp
+ echo '* zImage - Compressed kernel image (arch/$(ARCH)/boot/zImage)'
+ echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)'
+ echo ' uImage - U-Boot wrapped zImage'
+endef
diff --git a/arch/unicore32/boot/Makefile b/arch/unicore32/boot/Makefile
new file mode 100644
index 000000000000..79e5f88845d9
--- /dev/null
+++ b/arch/unicore32/boot/Makefile
@@ -0,0 +1,47 @@
+#
+# arch/unicore32/boot/Makefile
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies.
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2001~2010 GUAN Xue-tao
+#
+
+MKIMAGE := $(srctree)/scripts/mkuboot.sh
+
+targets := Image zImage uImage
+
+$(obj)/Image: vmlinux FORCE
+ $(call if_changed,objcopy)
+ @echo ' Kernel: $@ is ready'
+
+$(obj)/compressed/vmlinux: $(obj)/Image FORCE
+ $(Q)$(MAKE) $(build)=$(obj)/compressed $@
+
+$(obj)/zImage: $(obj)/compressed/vmlinux FORCE
+ $(call if_changed,objcopy)
+ @echo ' Kernel: $@ is ready'
+
+quiet_cmd_uimage = UIMAGE $@
+ cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A unicore -O linux -T kernel \
+ -C none -a $(LOADADDR) -e $(STARTADDR) \
+ -n 'Linux-$(KERNELRELEASE)' -d $< $@
+
+$(obj)/uImage: LOADADDR=0x0
+
+$(obj)/uImage: STARTADDR=$(LOADADDR)
+
+$(obj)/uImage: $(obj)/zImage FORCE
+ $(call if_changed,uimage)
+ @echo ' Image $@ is ready'
+
+PHONY += initrd FORCE
+initrd:
+ @test "$(INITRD)" != "" || \
+ (echo You must specify INITRD; exit -1)
+
+subdir- := compressed
diff --git a/arch/unicore32/boot/compressed/Makefile b/arch/unicore32/boot/compressed/Makefile
new file mode 100644
index 000000000000..95373428cb3d
--- /dev/null
+++ b/arch/unicore32/boot/compressed/Makefile
@@ -0,0 +1,68 @@
+#
+# linux/arch/unicore32/boot/compressed/Makefile
+#
+# create a compressed vmlinuz image from the original vmlinux
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2001~2010 GUAN Xue-tao
+#
+
+EXTRA_CFLAGS := -fpic -fno-builtin
+EXTRA_AFLAGS := -Wa,-march=all
+
+OBJS := misc.o
+
+# font.c and font.o
+CFLAGS_font.o := -Dstatic=
+$(obj)/font.c: $(srctree)/drivers/video/console/font_8x8.c
+ $(call cmd,shipped)
+
+# piggy.S and piggy.o
+suffix_$(CONFIG_KERNEL_GZIP) := gzip
+suffix_$(CONFIG_KERNEL_BZIP2) := bz2
+suffix_$(CONFIG_KERNEL_LZO) := lzo
+suffix_$(CONFIG_KERNEL_LZMA) := lzma
+
+$(obj)/piggy.$(suffix_y): $(obj)/../Image FORCE
+ $(call if_changed,$(suffix_y))
+
+SEDFLAGS_piggy = s/DECOMP_SUFFIX/$(suffix_y)/
+$(obj)/piggy.S: $(obj)/piggy.S.in
+ @sed "$(SEDFLAGS_piggy)" < $< > $@
+
+$(obj)/piggy.o: $(obj)/piggy.$(suffix_y) $(obj)/piggy.S FORCE
+
+targets := vmlinux vmlinux.lds font.o font.c head.o misc.o \
+ piggy.$(suffix_y) piggy.o piggy.S \
+
+# Make sure files are removed during clean
+extra-y += piggy.gzip piggy.bz2 piggy.lzo piggy.lzma
+
+# ?
+LDFLAGS_vmlinux += -p
+# Report unresolved symbol references
+LDFLAGS_vmlinux += --no-undefined
+# Delete all temporary local symbols
+LDFLAGS_vmlinux += -X
+# Next argument is a linker script
+LDFLAGS_vmlinux += -T
+
+# For uidivmod
+$(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/head.o $(obj)/piggy.o \
+ $(obj)/misc.o FORCE
+ $(call if_changed,ld)
+ @:
+
+# We now have a PIC decompressor implementation. Decompressors running
+# from RAM should not define ZTEXTADDR. Decompressors running directly
+# from ROM or Flash must define ZTEXTADDR (preferably via the config)
+ZTEXTADDR := 0
+ZBSSADDR := ALIGN(4)
+
+SEDFLAGS_lds = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/
+$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/unicore32/boot/Makefile $(KCONFIG_CONFIG)
+ @sed "$(SEDFLAGS_lds)" < $< > $@
+
diff --git a/arch/unicore32/boot/compressed/head.S b/arch/unicore32/boot/compressed/head.S
new file mode 100644
index 000000000000..fbd1e374c685
--- /dev/null
+++ b/arch/unicore32/boot/compressed/head.S
@@ -0,0 +1,204 @@
+/*
+ * linux/arch/unicore32/boot/compressed/head.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <mach/memory.h>
+
+#define csub cmpsub
+#define cand cmpand
+#define nop8 nop; nop; nop; nop; nop; nop; nop; nop
+
+ .section ".start", #alloc, #execinstr
+ .text
+start:
+ .type start,#function
+
+ /* Initialize ASR, PRIV mode and INTR off */
+ mov r0, #0xD3
+ mov.a asr, r0
+
+ adr r0, LC0
+ ldm (r1, r2, r3, r5, r6, r7, r8), [r0]+
+ ldw sp, [r0+], #28
+ sub.a r0, r0, r1 @ calculate the delta offset
+
+ /*
+ * if delta is zero, we are running at the address
+ * we were linked at.
+ */
+ beq not_relocated
+
+ /*
+ * We're running at a different address. We need to fix
+ * up various pointers:
+ * r5 - zImage base address (_start)
+ * r7 - GOT start
+ * r8 - GOT end
+ */
+ add r5, r5, r0
+ add r7, r7, r0
+ add r8, r8, r0
+
+ /*
+ * we need to fix up pointers into the BSS region.
+ * r2 - BSS start
+ * r3 - BSS end
+ * sp - stack pointer
+ */
+ add r2, r2, r0
+ add r3, r3, r0
+ add sp, sp, r0
+
+ /*
+ * Relocate all entries in the GOT table.
+ * This fixes up the C references.
+ * r7 - GOT start
+ * r8 - GOT end
+ */
+1001: ldw r1, [r7+], #0
+ add r1, r1, r0
+ stw.w r1, [r7]+, #4
+ csub.a r7, r8
+ bub 1001b
+
+not_relocated:
+ /*
+ * Clear BSS region.
+ * r2 - BSS start
+ * r3 - BSS end
+ */
+ mov r0, #0
+1002: stw.w r0, [r2]+, #4
+ csub.a r2, r3
+ bub 1002b
+
+ /*
+ * Turn on the cache.
+ */
+ mov r0, #0
+ movc p0.c5, r0, #28 @ cache invalidate all
+ nop8
+ movc p0.c6, r0, #6 @ tlb invalidate all
+ nop8
+
+ mov r0, #0x1c @ en icache and wb dcache
+ movc p0.c1, r0, #0
+ nop8
+
+ /*
+ * Set up some pointers, for starting decompressing.
+ */
+
+ mov r1, sp @ malloc space above stack
+ add r2, sp, #0x10000 @ 64k max
+
+ /*
+ * Check to see if we will overwrite ourselves.
+ * r4 = final kernel address
+ * r5 = start of this image
+ * r6 = size of decompressed image
+ * r2 = end of malloc space (and therefore this image)
+ * We basically want:
+ * r4 >= r2 -> OK
+ * r4 + image length <= r5 -> OK
+ */
+ ldw r4, =KERNEL_IMAGE_START
+ csub.a r4, r2
+ bea wont_overwrite
+ add r0, r4, r6
+ csub.a r0, r5
+ beb wont_overwrite
+
+ /*
+ * If overwrite, just print error message
+ */
+ b __error_overwrite
+
+ /*
+ * We're not in danger of overwriting ourselves.
+ * Do this the simple way.
+ */
+wont_overwrite:
+ /*
+ * decompress_kernel:
+ * r0: output_start
+ * r1: free_mem_ptr_p
+ * r2: free_mem_ptr_end_p
+ */
+ mov r0, r4
+ b.l decompress_kernel @ C functions
+
+ /*
+ * Clean and flush the cache to maintain consistency.
+ */
+ mov r0, #0
+ movc p0.c5, r0, #14 @ flush dcache
+ nop8
+ movc p0.c5, r0, #20 @ icache invalidate all
+ nop8
+
+ /*
+ * Turn off the Cache and MMU.
+ */
+ mov r0, #0 @ disable i/d cache and MMU
+ movc p0.c1, r0, #0
+ nop8
+
+ mov r0, #0 @ must be zero
+ ldw r4, =KERNEL_IMAGE_START
+ mov pc, r4 @ call kernel
+
+
+ .align 2
+ .type LC0, #object
+LC0: .word LC0 @ r1
+ .word __bss_start @ r2
+ .word _end @ r3
+ .word _start @ r5
+ .word _image_size @ r6
+ .word _got_start @ r7
+ .word _got_end @ r8
+ .word decompress_stack_end @ sp
+ .size LC0, . - LC0
+
+print_string:
+#ifdef CONFIG_DEBUG_OCD
+2001: ldb.w r1, [r0]+, #1
+ csub.a r1, #0
+ bne 2002f
+ mov pc, lr
+2002:
+ movc r2, p1.c0, #0
+ cand.a r2, #2
+ bne 2002b
+ movc p1.c1, r1, #1
+ csub.a r1, #'\n'
+ cmoveq r1, #'\r'
+ beq 2002b
+ b 2001b
+#else
+ mov pc, lr
+#endif
+
+__error_overwrite:
+ adr r0, str_error
+ b.l print_string
+2001: nop8
+ b 2001b
+str_error: .asciz "\nError: Kernel address OVERWRITE\n"
+ .align
+
+ .ltorg
+
+ .align 4
+ .section ".stack", "aw", %nobits
+decompress_stack: .space 4096
+decompress_stack_end:
diff --git a/arch/unicore32/boot/compressed/misc.c b/arch/unicore32/boot/compressed/misc.c
new file mode 100644
index 000000000000..176d5bda3559
--- /dev/null
+++ b/arch/unicore32/boot/compressed/misc.c
@@ -0,0 +1,126 @@
+/*
+ * linux/arch/unicore32/boot/compressed/misc.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/unaligned.h>
+#include <mach/uncompress.h>
+
+/*
+ * gzip delarations
+ */
+unsigned char *output_data;
+unsigned long output_ptr;
+
+unsigned int free_mem_ptr;
+unsigned int free_mem_end_ptr;
+
+#define STATIC static
+#define STATIC_RW_DATA /* non-static please */
+
+/*
+ * arch-dependent implementations
+ */
+#ifndef ARCH_HAVE_DECOMP_ERROR
+#define arch_decomp_error(x)
+#endif
+
+#ifndef ARCH_HAVE_DECOMP_SETUP
+#define arch_decomp_setup()
+#endif
+
+#ifndef ARCH_HAVE_DECOMP_PUTS
+#define arch_decomp_puts(p)
+#endif
+
+void *memcpy(void *dest, const void *src, size_t n)
+{
+ int i = 0;
+ unsigned char *d = (unsigned char *)dest, *s = (unsigned char *)src;
+
+ for (i = n >> 3; i > 0; i--) {
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ }
+
+ if (n & 1 << 2) {
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ }
+
+ if (n & 1 << 1) {
+ *d++ = *s++;
+ *d++ = *s++;
+ }
+
+ if (n & 1)
+ *d++ = *s++;
+
+ return dest;
+}
+
+void error(char *x)
+{
+ arch_decomp_puts("\n\n");
+ arch_decomp_puts(x);
+ arch_decomp_puts("\n\n -- System halted");
+
+ arch_decomp_error(x);
+
+ for (;;)
+ ; /* Halt */
+}
+
+/* Heap size should be adjusted for different decompress method */
+#ifdef CONFIG_KERNEL_GZIP
+#include "../../../../lib/decompress_inflate.c"
+#endif
+
+#ifdef CONFIG_KERNEL_BZIP2
+#include "../../../../lib/decompress_bunzip2.c"
+#endif
+
+#ifdef CONFIG_KERNEL_LZO
+#include "../../../../lib/decompress_unlzo.c"
+#endif
+
+#ifdef CONFIG_KERNEL_LZMA
+#include "../../../../lib/decompress_unlzma.c"
+#endif
+
+unsigned long decompress_kernel(unsigned long output_start,
+ unsigned long free_mem_ptr_p,
+ unsigned long free_mem_ptr_end_p)
+{
+ unsigned char *tmp;
+
+ output_data = (unsigned char *)output_start;
+ free_mem_ptr = free_mem_ptr_p;
+ free_mem_end_ptr = free_mem_ptr_end_p;
+
+ arch_decomp_setup();
+
+ tmp = (unsigned char *) (((unsigned long)input_data_end) - 4);
+ output_ptr = get_unaligned_le32(tmp);
+
+ arch_decomp_puts("Uncompressing Linux...");
+ decompress(input_data, input_data_end - input_data, NULL, NULL,
+ output_data, NULL, error);
+ arch_decomp_puts(" done, booting the kernel.\n");
+ return output_ptr;
+}
diff --git a/arch/unicore32/boot/compressed/piggy.S.in b/arch/unicore32/boot/compressed/piggy.S.in
new file mode 100644
index 000000000000..b79704d58026
--- /dev/null
+++ b/arch/unicore32/boot/compressed/piggy.S.in
@@ -0,0 +1,6 @@
+ .section .piggydata,#alloc
+ .globl input_data
+input_data:
+ .incbin "arch/unicore32/boot/compressed/piggy.DECOMP_SUFFIX"
+ .globl input_data_end
+input_data_end:
diff --git a/arch/unicore32/boot/compressed/vmlinux.lds.in b/arch/unicore32/boot/compressed/vmlinux.lds.in
new file mode 100644
index 000000000000..d5a3ce296239
--- /dev/null
+++ b/arch/unicore32/boot/compressed/vmlinux.lds.in
@@ -0,0 +1,61 @@
+/*
+ * linux/arch/unicore/boot/compressed/vmlinux.lds.in
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+OUTPUT_ARCH(unicore32)
+ENTRY(_start)
+SECTIONS
+{
+ /DISCARD/ : {
+ /*
+ * Discard any r/w data - this produces a link error if we have any,
+ * which is required for PIC decompression. Local data generates
+ * GOTOFF relocations, which prevents it being relocated independently
+ * of the text/got segments.
+ */
+ *(.data)
+ }
+
+ . = TEXT_START;
+ _text = .;
+
+ .text : {
+ _start = .;
+ *(.start)
+ *(.text)
+ *(.text.*)
+ *(.fixup)
+ *(.gnu.warning)
+ *(.rodata)
+ *(.rodata.*)
+ *(.piggydata)
+ . = ALIGN(4);
+ }
+
+ _etext = .;
+
+ /* Assume size of decompressed image is 4x the compressed image */
+ _image_size = (_etext - _text) * 4;
+
+ _got_start = .;
+ .got : { *(.got) }
+ _got_end = .;
+ .got.plt : { *(.got.plt) }
+ _edata = .;
+
+ . = BSS_START;
+ __bss_start = .;
+ .bss : { *(.bss) }
+ _end = .;
+
+ .stack : { *(.stack) }
+ .comment 0 : { *(.comment) }
+}
+
diff --git a/arch/unicore32/configs/debug_defconfig b/arch/unicore32/configs/debug_defconfig
new file mode 100644
index 000000000000..b5fbde9f1cb2
--- /dev/null
+++ b/arch/unicore32/configs/debug_defconfig
@@ -0,0 +1,215 @@
+### General setup
+CONFIG_EXPERIMENTAL=y
+CONFIG_LOCALVERSION="-debug"
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_HOTPLUG=y
+# Initial RAM filesystem and RAM disk (initramfs/initrd) support
+#CONFIG_BLK_DEV_INITRD=y
+#CONFIG_INITRAMFS_SOURCE="arch/unicore/ramfs/ramfs_config"
+
+### Enable loadable module support
+CONFIG_MODULES=n
+CONFIG_MODULE_UNLOAD=y
+
+### System Type
+CONFIG_ARCH_PUV3=y
+# Board Selection
+CONFIG_PUV3_NB0916=y
+# Processor Features
+CONFIG_CPU_DCACHE_LINE_DISABLE=y
+CONFIG_CPU_TLB_SINGLE_ENTRY_DISABLE=n
+
+### Bus support
+CONFIG_PCI=y
+CONFIG_PCI_LEGACY=n
+
+### Boot options
+# for debug, adding: earlyprintk=ocd,keep initcall_debug
+# others support: test_suspend=mem root=/dev/sda
+# hibernate support: resume=/dev/sda3
+CONFIG_CMDLINE="earlyprintk=ocd,keep ignore_loglevel"
+# TODO: mem=512M video=unifb:1024x600-16@75
+# for nfs: root=/dev/nfs rw nfsroot=192.168.10.88:/home/udb/nfs/,rsize=1024,wsize=1024
+# ip=192.168.10.83:192.168.10.88:192.168.10.1:255.255.255.0::eth0:off
+CONFIG_CMDLINE_FORCE=y
+
+### Power management options
+CONFIG_PM=y
+CONFIG_HIBERNATION=y
+CONFIG_PM_STD_PARTITION="/dev/sda3"
+CONFIG_CPU_FREQ=n
+CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
+
+### Networking support
+CONFIG_NET=y
+# Networking options
+CONFIG_PACKET=m
+CONFIG_UNIX=m
+# TCP/IP networking
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IPV6=n
+# Wireless
+CONFIG_WIRELESS=y
+CONFIG_WIRELESS_EXT=y
+CONFIG_MAC80211=m
+
+### PKUnity SoC Features
+CONFIG_USB_WLAN_HED_AQ3=n
+CONFIG_USB_CMMB_INNOFIDEI=n
+CONFIG_I2C_BATTERY_BQ27200=n
+CONFIG_I2C_EEPROM_AT24=n
+CONFIG_LCD_BACKLIGHT=n
+
+CONFIG_PUV3_RTC=y
+CONFIG_PUV3_UMAL=y
+CONFIG_PUV3_MUSB=n
+CONFIG_PUV3_AC97=n
+CONFIG_PUV3_NAND=n
+CONFIG_PUV3_MMC=n
+CONFIG_PUV3_UART=n
+
+### Device Drivers
+# Memory Technology Device (MTD) support
+CONFIG_MTD=m
+CONFIG_MTD_UBI=m
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_CHAR=m
+CONFIG_MTD_BLKDEVS=m
+# RAM/ROM/Flash chip drivers
+CONFIG_MTD_CFI=m
+CONFIG_MTD_JEDECPROBE=m
+CONFIG_MTD_CFI_AMDSTD=m
+# Mapping drivers for chip access
+CONFIG_MTD_PHYSMAP=m
+
+# Block devices
+CONFIG_BLK_DEV_LOOP=m
+
+# SCSI device support
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=m
+CONFIG_CHR_DEV_SG=m
+
+# Serial ATA (prod) and Parallel ATA (experimental) drivers
+CONFIG_ATA=y
+CONFIG_SATA_VIA=y
+
+# Network device support
+CONFIG_NETDEVICES=y
+CONFIG_NET_ETHERNET=y
+CONFIG_NETDEV_1000=y
+# Wireless LAN
+CONFIG_WLAN_80211=n
+CONFIG_RT2X00=n
+CONFIG_RT73USB=n
+
+# Input device support
+CONFIG_INPUT_EVDEV=m
+# Keyboards
+CONFIG_KEYBOARD_GPIO=m
+
+# I2C support
+CONFIG_I2C=y
+CONFIG_I2C_PUV3=y
+
+# Hardware Monitoring support
+#CONFIG_SENSORS_LM75=m
+# Generic Thermal sysfs driver
+#CONFIG_THERMAL=m
+#CONFIG_THERMAL_HWMON=y
+
+# Multimedia support
+CONFIG_MEDIA_SUPPORT=n
+CONFIG_VIDEO_DEV=n
+CONFIG_USB_VIDEO_CLASS=n
+
+# Graphics support
+CONFIG_FB=y
+CONFIG_FB_PUV3_UNIGFX=y
+# Console display driver support
+CONFIG_VGA_CONSOLE=n
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+# Bootup logo
+CONFIG_LOGO=n
+
+# Sound card support
+CONFIG_SOUND=m
+# Advanced Linux Sound Architecture
+CONFIG_SND=m
+CONFIG_SND_MIXER_OSS=m
+CONFIG_SND_PCM_OSS=m
+
+# USB support
+CONFIG_USB_ARCH_HAS_HCD=n
+CONFIG_USB=n
+CONFIG_USB_DEVICEFS=n
+CONFIG_USB_PRINTER=n
+CONFIG_USB_STORAGE=n
+# Inventra Highspeed Dual Role Controller
+CONFIG_USB_MUSB_HDRC=n
+
+# LED Support
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+# LED Triggers
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+
+# Real Time Clock
+CONFIG_RTC_LIB=m
+CONFIG_RTC_CLASS=m
+
+### File systems
+CONFIG_EXT2_FS=m
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
+CONFIG_FUSE_FS=m
+# CD-ROM/DVD Filesystems
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_UDF_FS=m
+# DOS/FAT/NT Filesystems
+CONFIG_VFAT_FS=m
+# Pseudo filesystems
+CONFIG_PROC_FS=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# Miscellaneous filesystems
+CONFIG_MISC_FILESYSTEMS=y
+CONFIG_JFFS2_FS=m
+CONFIG_UBIFS_FS=m
+# Network File Systems
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_ROOT_NFS=y
+# Partition Types
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MSDOS_PARTITION=y
+# Native language support
+CONFIG_NLS=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_UTF8=m
+
+### Kernel hacking
+CONFIG_FRAME_WARN=8096
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_PROVE_LOCKING=n
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_FRAME_POINTER=y
+CONFIG_DEBUG_LL=y
+
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild
new file mode 100644
index 000000000000..b200fdaca44d
--- /dev/null
+++ b/arch/unicore32/include/asm/Kbuild
@@ -0,0 +1,2 @@
+include include/asm-generic/Kbuild.asm
+
diff --git a/arch/unicore32/include/asm/assembler.h b/arch/unicore32/include/asm/assembler.h
new file mode 100644
index 000000000000..8e87ed7faeba
--- /dev/null
+++ b/arch/unicore32/include/asm/assembler.h
@@ -0,0 +1,131 @@
+/*
+ * linux/arch/unicore32/include/asm/assembler.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Do not include any C declarations in this file - it is included by
+ * assembler source.
+ */
+#ifndef __ASSEMBLY__
+#error "Only include this from assembly code"
+#endif
+
+#include <asm/ptrace.h>
+
+/*
+ * Little Endian independent macros for shifting bytes within registers.
+ */
+#define pull >>
+#define push <<
+#define get_byte_0 << #0
+#define get_byte_1 >> #8
+#define get_byte_2 >> #16
+#define get_byte_3 >> #24
+#define put_byte_0 << #0
+#define put_byte_1 << #8
+#define put_byte_2 << #16
+#define put_byte_3 << #24
+
+#define cadd cmpadd
+#define cand cmpand
+#define csub cmpsub
+#define cxor cmpxor
+
+/*
+ * Enable and disable interrupts
+ */
+ .macro disable_irq, temp
+ mov \temp, asr
+ andn \temp, \temp, #0xFF
+ or \temp, \temp, #PSR_I_BIT | PRIV_MODE
+ mov.a asr, \temp
+ .endm
+
+ .macro enable_irq, temp
+ mov \temp, asr
+ andn \temp, \temp, #0xFF
+ or \temp, \temp, #PRIV_MODE
+ mov.a asr, \temp
+ .endm
+
+#define USER(x...) \
+9999: x; \
+ .pushsection __ex_table, "a"; \
+ .align 3; \
+ .long 9999b, 9001f; \
+ .popsection
+
+ .macro notcond, cond, nexti = .+8
+ .ifc \cond, eq
+ bne \nexti
+ .else; .ifc \cond, ne
+ beq \nexti
+ .else; .ifc \cond, ea
+ bub \nexti
+ .else; .ifc \cond, ub
+ bea \nexti
+ .else; .ifc \cond, fs
+ bns \nexti
+ .else; .ifc \cond, ns
+ bfs \nexti
+ .else; .ifc \cond, fv
+ bnv \nexti
+ .else; .ifc \cond, nv
+ bfv \nexti
+ .else; .ifc \cond, ua
+ beb \nexti
+ .else; .ifc \cond, eb
+ bua \nexti
+ .else; .ifc \cond, eg
+ bsl \nexti
+ .else; .ifc \cond, sl
+ beg \nexti
+ .else; .ifc \cond, sg
+ bel \nexti
+ .else; .ifc \cond, el
+ bsg \nexti
+ .else; .ifnc \cond, al
+ .error "Unknown cond in notcond macro argument"
+ .endif; .endif; .endif; .endif; .endif; .endif; .endif
+ .endif; .endif; .endif; .endif; .endif; .endif; .endif
+ .endif
+ .endm
+
+ .macro usracc, instr, reg, ptr, inc, cond, rept, abort
+ .rept \rept
+ notcond \cond, .+8
+9999 :
+ .if \inc == 1
+ \instr\()b.u \reg, [\ptr], #\inc
+ .elseif \inc == 4
+ \instr\()w.u \reg, [\ptr], #\inc
+ .else
+ .error "Unsupported inc macro argument"
+ .endif
+
+ .pushsection __ex_table, "a"
+ .align 3
+ .long 9999b, \abort
+ .popsection
+ .endr
+ .endm
+
+ .macro strusr, reg, ptr, inc, cond = al, rept = 1, abort = 9001f
+ usracc st, \reg, \ptr, \inc, \cond, \rept, \abort
+ .endm
+
+ .macro ldrusr, reg, ptr, inc, cond = al, rept = 1, abort = 9001f
+ usracc ld, \reg, \ptr, \inc, \cond, \rept, \abort
+ .endm
+
+ .macro nop8
+ .rept 8
+ nop
+ .endr
+ .endm
diff --git a/arch/unicore32/include/asm/bitops.h b/arch/unicore32/include/asm/bitops.h
new file mode 100644
index 000000000000..1628a6328994
--- /dev/null
+++ b/arch/unicore32/include/asm/bitops.h
@@ -0,0 +1,47 @@
+/*
+ * linux/arch/unicore32/include/asm/bitops.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __UNICORE_BITOPS_H__
+#define __UNICORE_BITOPS_H__
+
+#define find_next_bit __uc32_find_next_bit
+#define find_next_zero_bit __uc32_find_next_zero_bit
+
+#define find_first_bit __uc32_find_first_bit
+#define find_first_zero_bit __uc32_find_first_zero_bit
+
+#define _ASM_GENERIC_BITOPS_FLS_H_
+#define _ASM_GENERIC_BITOPS___FLS_H_
+#define _ASM_GENERIC_BITOPS_FFS_H_
+#define _ASM_GENERIC_BITOPS___FFS_H_
+/*
+ * On UNICORE, those functions can be implemented around
+ * the cntlz instruction for much better code efficiency.
+ */
+
+static inline int fls(int x)
+{
+ int ret;
+
+ asm("cntlz\t%0, %1" : "=r" (ret) : "r" (x) : "cc");
+ ret = 32 - ret;
+
+ return ret;
+}
+
+#define __fls(x) (fls(x) - 1)
+#define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); })
+#define __ffs(x) (ffs(x) - 1)
+
+#include <asm-generic/bitops.h>
+
+#endif /* __UNICORE_BITOPS_H__ */
diff --git a/arch/unicore32/include/asm/byteorder.h b/arch/unicore32/include/asm/byteorder.h
new file mode 100644
index 000000000000..ebe1b3fef3e3
--- /dev/null
+++ b/arch/unicore32/include/asm/byteorder.h
@@ -0,0 +1,24 @@
+/*
+ * linux/arch/unicore32/include/asm/byteorder.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * UniCore ONLY support Little Endian mode, the data bus is connected such
+ * that byte accesses appear as:
+ * 0 = d0...d7, 1 = d8...d15, 2 = d16...d23, 3 = d24...d31
+ * and word accesses (data or instruction) appear as:
+ * d0...d31
+ */
+#ifndef __UNICORE_BYTEORDER_H__
+#define __UNICORE_BYTEORDER_H__
+
+#include <linux/byteorder/little_endian.h>
+
+#endif
+
diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
new file mode 100644
index 000000000000..ad8f795d86ca
--- /dev/null
+++ b/arch/unicore32/include/asm/cache.h
@@ -0,0 +1,27 @@
+/*
+ * linux/arch/unicore32/include/asm/cache.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_CACHE_H__
+#define __UNICORE_CACHE_H__
+
+#define L1_CACHE_SHIFT (5)
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+/*
+ * Memory returned by kmalloc() may be used for DMA, so we must make
+ * sure that all such allocations are cache aligned. Otherwise,
+ * unrelated code may cause parts of the buffer to be read into the
+ * cache before the transfer is done, causing old data to be seen by
+ * the CPU.
+ */
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
+#endif
diff --git a/arch/unicore32/include/asm/cacheflush.h b/arch/unicore32/include/asm/cacheflush.h
new file mode 100644
index 000000000000..c0301e6c8b81
--- /dev/null
+++ b/arch/unicore32/include/asm/cacheflush.h
@@ -0,0 +1,211 @@
+/*
+ * linux/arch/unicore32/include/asm/cacheflush.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_CACHEFLUSH_H__
+#define __UNICORE_CACHEFLUSH_H__
+
+#include <linux/mm.h>
+
+#include <asm/shmparam.h>
+
+#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
+
+/*
+ * This flag is used to indicate that the page pointed to by a pte is clean
+ * and does not require cleaning before returning it to the user.
+ */
+#define PG_dcache_clean PG_arch_1
+
+/*
+ * MM Cache Management
+ * ===================
+ *
+ * The arch/unicore32/mm/cache.S files implement these methods.
+ *
+ * Start addresses are inclusive and end addresses are exclusive;
+ * start addresses should be rounded down, end addresses up.
+ *
+ * See Documentation/cachetlb.txt for more information.
+ * Please note that the implementation of these, and the required
+ * effects are cache-type (VIVT/VIPT/PIPT) specific.
+ *
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ * Currently only needed for cache-v6.S and cache-v7.S, see
+ * __flush_icache_all for the generic implementation.
+ *
+ * flush_kern_all()
+ *
+ * Unconditionally clean and invalidate the entire cache.
+ *
+ * flush_user_all()
+ *
+ * Clean and invalidate all user space cache entries
+ * before a change of page tables.
+ *
+ * flush_user_range(start, end, flags)
+ *
+ * Clean and invalidate a range of cache entries in the
+ * specified address space before a change of page tables.
+ * - start - user start address (inclusive, page aligned)
+ * - end - user end address (exclusive, page aligned)
+ * - flags - vma->vm_flags field
+ *
+ * coherent_kern_range(start, end)
+ *
+ * Ensure coherency between the Icache and the Dcache in the
+ * region described by start, end. If you have non-snooping
+ * Harvard caches, you need to implement this function.
+ * - start - virtual start address
+ * - end - virtual end address
+ *
+ * coherent_user_range(start, end)
+ *
+ * Ensure coherency between the Icache and the Dcache in the
+ * region described by start, end. If you have non-snooping
+ * Harvard caches, you need to implement this function.
+ * - start - virtual start address
+ * - end - virtual end address
+ *
+ * flush_kern_dcache_area(kaddr, size)
+ *
+ * Ensure that the data held in page is written back.
+ * - kaddr - page address
+ * - size - region size
+ *
+ * DMA Cache Coherency
+ * ===================
+ *
+ * dma_flush_range(start, end)
+ *
+ * Clean and invalidate the specified virtual address range.
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+
+extern void __cpuc_flush_icache_all(void);
+extern void __cpuc_flush_kern_all(void);
+extern void __cpuc_flush_user_all(void);
+extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
+extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
+extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
+extern void __cpuc_flush_dcache_area(void *, size_t);
+extern void __cpuc_flush_kern_dcache_area(void *addr, size_t size);
+
+/*
+ * These are private to the dma-mapping API. Do not use directly.
+ * Their sole purpose is to ensure that data held in the cache
+ * is visible to DMA, or data written by DMA to system memory is
+ * visible to the CPU.
+ */
+extern void __cpuc_dma_clean_range(unsigned long, unsigned long);
+extern void __cpuc_dma_flush_range(unsigned long, unsigned long);
+
+/*
+ * Copy user data from/to a page which is mapped into a different
+ * processes address space. Really, we want to allow our "user
+ * space" model to handle this.
+ */
+extern void copy_to_user_page(struct vm_area_struct *, struct page *,
+ unsigned long, void *, const void *, unsigned long);
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ do { \
+ memcpy(dst, src, len); \
+ } while (0)
+
+/*
+ * Convert calls to our calling convention.
+ */
+/* Invalidate I-cache */
+static inline void __flush_icache_all(void)
+{
+ asm("movc p0.c5, %0, #20;\n"
+ "nop; nop; nop; nop; nop; nop; nop; nop\n"
+ :
+ : "r" (0));
+}
+
+#define flush_cache_all() __cpuc_flush_kern_all()
+
+extern void flush_cache_mm(struct mm_struct *mm);
+extern void flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+extern void flush_cache_page(struct vm_area_struct *vma,
+ unsigned long user_addr, unsigned long pfn);
+
+#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
+
+/*
+ * flush_cache_user_range is used when we want to ensure that the
+ * Harvard caches are synchronised for the user space address range.
+ * This is used for the UniCore private sys_cacheflush system call.
+ */
+#define flush_cache_user_range(vma, start, end) \
+ __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
+
+/*
+ * Perform necessary cache operations to ensure that data previously
+ * stored within this range of addresses can be executed by the CPU.
+ */
+#define flush_icache_range(s, e) __cpuc_coherent_kern_range(s, e)
+
+/*
+ * Perform necessary cache operations to ensure that the TLB will
+ * see data written in the specified area.
+ */
+#define clean_dcache_area(start, size) cpu_dcache_clean_area(start, size)
+
+/*
+ * flush_dcache_page is used when the kernel has written to the page
+ * cache page at virtual address page->virtual.
+ *
+ * If this page isn't mapped (ie, page_mapping == NULL), or it might
+ * have userspace mappings, then we _must_ always clean + invalidate
+ * the dcache entries associated with the kernel mapping.
+ *
+ * Otherwise we can defer the operation, and clean the cache when we are
+ * about to change to user space. This is the same method as used on SPARC64.
+ * See update_mmu_cache for the user space part.
+ */
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+extern void flush_dcache_page(struct page *);
+
+#define flush_dcache_mmap_lock(mapping) \
+ spin_lock_irq(&(mapping)->tree_lock)
+#define flush_dcache_mmap_unlock(mapping) \
+ spin_unlock_irq(&(mapping)->tree_lock)
+
+#define flush_icache_user_range(vma, page, addr, len) \
+ flush_dcache_page(page)
+
+/*
+ * We don't appear to need to do anything here. In fact, if we did, we'd
+ * duplicate cache flushing elsewhere performed by flush_dcache_page().
+ */
+#define flush_icache_page(vma, page) do { } while (0)
+
+/*
+ * flush_cache_vmap() is used when creating mappings (eg, via vmap,
+ * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
+ * caches, since the direct-mappings of these pages may contain cached
+ * data, we need to do a full cache flush to ensure that writebacks
+ * don't corrupt data placed into these pages via the new mappings.
+ */
+static inline void flush_cache_vmap(unsigned long start, unsigned long end)
+{
+}
+
+static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
+{
+}
+
+#endif
diff --git a/arch/unicore32/include/asm/checksum.h b/arch/unicore32/include/asm/checksum.h
new file mode 100644
index 000000000000..f55c3f937c3e
--- /dev/null
+++ b/arch/unicore32/include/asm/checksum.h
@@ -0,0 +1,41 @@
+/*
+ * linux/arch/unicore32/include/asm/checksum.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * IP checksum routines
+ */
+#ifndef __UNICORE_CHECKSUM_H__
+#define __UNICORE_CHECKSUM_H__
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+ unsigned short proto, __wsum sum)
+{
+ __asm__(
+ "add.a %0, %1, %2\n"
+ "addc.a %0, %0, %3\n"
+ "addc.a %0, %0, %4 << #8\n"
+ "addc.a %0, %0, %5\n"
+ "addc %0, %0, #0\n"
+ : "=&r"(sum)
+ : "r" (sum), "r" (daddr), "r" (saddr), "r" (len), "Ir" (htons(proto))
+ : "cc");
+ return sum;
+}
+#define csum_tcpudp_nofold csum_tcpudp_nofold
+
+#include <asm-generic/checksum.h>
+
+#endif
diff --git a/arch/unicore32/include/asm/cpu-single.h b/arch/unicore32/include/asm/cpu-single.h
new file mode 100644
index 000000000000..0f55d1823439
--- /dev/null
+++ b/arch/unicore32/include/asm/cpu-single.h
@@ -0,0 +1,45 @@
+/*
+ * linux/arch/unicore32/include/asm/cpu-single.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_CPU_SINGLE_H__
+#define __UNICORE_CPU_SINGLE_H__
+
+#include <asm/page.h>
+#include <asm/memory.h>
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+#define cpu_switch_mm(pgd, mm) cpu_do_switch_mm(virt_to_phys(pgd), mm)
+
+#define cpu_get_pgd() \
+ ({ \
+ unsigned long pg; \
+ __asm__("movc %0, p0.c2, #0" \
+ : "=r" (pg) : : "cc"); \
+ pg &= ~0x0fff; \
+ (pgd_t *)phys_to_virt(pg); \
+ })
+
+struct mm_struct;
+
+/* declare all the functions as extern */
+extern void cpu_proc_fin(void);
+extern int cpu_do_idle(void);
+extern void cpu_dcache_clean_area(void *, int);
+extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
+extern void cpu_set_pte(pte_t *ptep, pte_t pte);
+extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+
+#endif /* __UNICORE_CPU_SINGLE_H__ */
diff --git a/arch/unicore32/include/asm/cputype.h b/arch/unicore32/include/asm/cputype.h
new file mode 100644
index 000000000000..ec1a30f98077
--- /dev/null
+++ b/arch/unicore32/include/asm/cputype.h
@@ -0,0 +1,33 @@
+/*
+ * linux/arch/unicore32/include/asm/cputype.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_CPUTYPE_H__
+#define __UNICORE_CPUTYPE_H__
+
+#include <linux/stringify.h>
+
+#define CPUID_CPUID 0
+#define CPUID_CACHETYPE 1
+
+#define read_cpuid(reg) \
+ ({ \
+ unsigned int __val; \
+ asm("movc %0, p0.c0, #" __stringify(reg) \
+ : "=r" (__val) \
+ : \
+ : "cc"); \
+ __val; \
+ })
+
+#define uc32_cpuid read_cpuid(CPUID_CPUID)
+#define uc32_cachetype read_cpuid(CPUID_CACHETYPE)
+
+#endif
diff --git a/arch/unicore32/include/asm/delay.h b/arch/unicore32/include/asm/delay.h
new file mode 100644
index 000000000000..164ae61cd6f7
--- /dev/null
+++ b/arch/unicore32/include/asm/delay.h
@@ -0,0 +1,52 @@
+/*
+ * linux/arch/unicore32/include/asm/delay.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Delay routines, using a pre-computed "loops_per_second" value.
+ */
+#ifndef __UNICORE_DELAY_H__
+#define __UNICORE_DELAY_H__
+
+#include <asm/param.h> /* HZ */
+
+extern void __delay(int loops);
+
+/*
+ * This function intentionally does not exist; if you see references to
+ * it, it means that you're calling udelay() with an out of range value.
+ *
+ * With currently imposed limits, this means that we support a max delay
+ * of 2000us. Further limits: HZ<=1000 and bogomips<=3355
+ */
+extern void __bad_udelay(void);
+
+/*
+ * division by multiplication: you don't have to worry about
+ * loss of precision.
+ *
+ * Use only for very small delays ( < 1 msec). Should probably use a
+ * lookup table, really, as the multiplications take much too long with
+ * short delays. This is a "reasonable" implementation, though (and the
+ * first constant multiplications gets optimized away if the delay is
+ * a constant)
+ */
+extern void __udelay(unsigned long usecs);
+extern void __const_udelay(unsigned long);
+
+#define MAX_UDELAY_MS 2
+
+#define udelay(n) \
+ (__builtin_constant_p(n) ? \
+ ((n) > (MAX_UDELAY_MS * 1000) ? __bad_udelay() : \
+ __const_udelay((n) * ((2199023U*HZ)>>11))) : \
+ __udelay(n))
+
+#endif /* __UNICORE_DELAY_H__ */
+
diff --git a/arch/unicore32/include/asm/dma-mapping.h b/arch/unicore32/include/asm/dma-mapping.h
new file mode 100644
index 000000000000..9258e592f414
--- /dev/null
+++ b/arch/unicore32/include/asm/dma-mapping.h
@@ -0,0 +1,124 @@
+/*
+ * linux/arch/unicore32/include/asm/dma-mapping.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_DMA_MAPPING_H__
+#define __UNICORE_DMA_MAPPING_H__
+
+#ifdef __KERNEL__
+
+#include <linux/mm_types.h>
+#include <linux/scatterlist.h>
+#include <linux/swiotlb.h>
+
+#include <asm-generic/dma-coherent.h>
+
+#include <asm/memory.h>
+#include <asm/cacheflush.h>
+
+extern struct dma_map_ops swiotlb_dma_map_ops;
+
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+ return &swiotlb_dma_map_ops;
+}
+
+static inline int dma_supported(struct device *dev, u64 mask)
+{
+ struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ if (unlikely(dma_ops == NULL))
+ return 0;
+
+ return dma_ops->dma_supported(dev, mask);
+}
+
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ if (dma_ops->mapping_error)
+ return dma_ops->mapping_error(dev, dma_addr);
+
+ return 0;
+}
+
+#include <asm-generic/dma-mapping-common.h>
+
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+ if (dev && dev->dma_mask)
+ return addr + size - 1 <= *dev->dma_mask;
+
+ return 1;
+}
+
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ return paddr;
+}
+
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+ return daddr;
+}
+
+static inline void dma_mark_clean(void *addr, size_t size) {}
+
+static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+ if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+ return -EIO;
+
+ *dev->dma_mask = dma_mask;
+
+ return 0;
+}
+
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
+}
+
+static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
+}
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+
+static inline void dma_cache_sync(struct device *dev, void *vaddr,
+ size_t size, enum dma_data_direction direction)
+{
+ unsigned long start = (unsigned long)vaddr;
+ unsigned long end = start + size;
+
+ switch (direction) {
+ case DMA_NONE:
+ BUG();
+ case DMA_FROM_DEVICE:
+ case DMA_BIDIRECTIONAL: /* writeback and invalidate */
+ __cpuc_dma_flush_range(start, end);
+ break;
+ case DMA_TO_DEVICE: /* writeback only */
+ __cpuc_dma_clean_range(start, end);
+ break;
+ }
+}
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/arch/unicore32/include/asm/dma.h b/arch/unicore32/include/asm/dma.h
new file mode 100644
index 000000000000..38dfff9df32f
--- /dev/null
+++ b/arch/unicore32/include/asm/dma.h
@@ -0,0 +1,23 @@
+/*
+ * linux/arch/unicore32/include/asm/dma.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __UNICORE_DMA_H__
+#define __UNICORE_DMA_H__
+
+#include <asm/memory.h>
+#include <asm-generic/dma.h>
+
+#ifdef CONFIG_PCI
+extern int isa_dma_bridge_buggy;
+#endif
+
+#endif /* __UNICORE_DMA_H__ */
diff --git a/arch/unicore32/include/asm/elf.h b/arch/unicore32/include/asm/elf.h
new file mode 100644
index 000000000000..829042d07722
--- /dev/null
+++ b/arch/unicore32/include/asm/elf.h
@@ -0,0 +1,94 @@
+/*
+ * linux/arch/unicore32/include/asm/elf.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __UNICORE_ELF_H__
+#define __UNICORE_ELF_H__
+
+#include <asm/hwcap.h>
+
+/*
+ * ELF register definitions..
+ */
+#include <asm/ptrace.h>
+
+typedef unsigned long elf_greg_t;
+typedef unsigned long elf_freg_t[3];
+
+#define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t))
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+typedef struct fp_state elf_fpregset_t;
+
+#define EM_UNICORE 110
+
+#define R_UNICORE_NONE 0
+#define R_UNICORE_PC24 1
+#define R_UNICORE_ABS32 2
+#define R_UNICORE_CALL 28
+#define R_UNICORE_JUMP24 29
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS ELFCLASS32
+#define ELF_DATA ELFDATA2LSB
+#define ELF_ARCH EM_UNICORE
+
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization. This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ *
+ */
+#define ELF_PLATFORM_SIZE 8
+#define ELF_PLATFORM (elf_platform)
+
+extern char elf_platform[];
+
+struct elf32_hdr;
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+extern int elf_check_arch(const struct elf32_hdr *);
+#define elf_check_arch elf_check_arch
+
+struct task_struct;
+int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
+#define ELF_CORE_COPY_TASK_REGS dump_task_regs
+
+#define ELF_EXEC_PAGESIZE 4096
+
+/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ use of this is to invoke "./ld.so someprog" to test out a new version of
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+
+/* When the program starts, a1 contains a pointer to a function to be
+ registered with atexit, as per the SVR4 ABI. A value of 0 means we
+ have no such handler. */
+#define ELF_PLAT_INIT(_r, load_addr) {(_r)->UCreg_00 = 0; }
+
+extern void elf_set_personality(const struct elf32_hdr *);
+#define SET_PERSONALITY(ex) elf_set_personality(&(ex))
+
+struct mm_struct;
+extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+#define arch_randomize_brk arch_randomize_brk
+
+extern int vectors_user_mapping(void);
+#define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
+
+#endif
diff --git a/arch/unicore32/include/asm/fpstate.h b/arch/unicore32/include/asm/fpstate.h
new file mode 100644
index 000000000000..ba97fac6220d
--- /dev/null
+++ b/arch/unicore32/include/asm/fpstate.h
@@ -0,0 +1,26 @@
+/*
+ * linux/arch/unicore32/include/asm/fpstate.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __UNICORE_FPSTATE_H__
+#define __UNICORE_FPSTATE_H__
+
+#ifndef __ASSEMBLY__
+
+#define FP_REGS_NUMBER 33
+
+struct fp_state {
+ unsigned int regs[FP_REGS_NUMBER];
+} __attribute__((aligned(8)));
+
+#endif
+
+#endif
diff --git a/arch/unicore32/include/asm/fpu-ucf64.h b/arch/unicore32/include/asm/fpu-ucf64.h
new file mode 100644
index 000000000000..16c1457882ee
--- /dev/null
+++ b/arch/unicore32/include/asm/fpu-ucf64.h
@@ -0,0 +1,53 @@
+/*
+ * linux/arch/unicore32/include/asm/fpu-ucf64.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#define FPSCR s31
+
+/* FPSCR bits */
+#define FPSCR_DEFAULT_NAN (1<<25)
+
+#define FPSCR_CMPINSTR_BIT (1<<31)
+
+#define FPSCR_CON (1<<29)
+#define FPSCR_TRAP (1<<27)
+
+/* RND mode */
+#define FPSCR_ROUND_NEAREST (0<<0)
+#define FPSCR_ROUND_PLUSINF (2<<0)
+#define FPSCR_ROUND_MINUSINF (3<<0)
+#define FPSCR_ROUND_TOZERO (1<<0)
+#define FPSCR_RMODE_BIT (0)
+#define FPSCR_RMODE_MASK (7 << FPSCR_RMODE_BIT)
+
+/* trap enable */
+#define FPSCR_IOE (1<<16)
+#define FPSCR_OFE (1<<14)
+#define FPSCR_UFE (1<<13)
+#define FPSCR_IXE (1<<12)
+#define FPSCR_HIE (1<<11)
+#define FPSCR_NDE (1<<10) /* non denomal */
+
+/* flags */
+#define FPSCR_IDC (1<<24)
+#define FPSCR_HIC (1<<23)
+#define FPSCR_IXC (1<<22)
+#define FPSCR_OFC (1<<21)
+#define FPSCR_UFC (1<<20)
+#define FPSCR_IOC (1<<19)
+
+/* stick bits */
+#define FPSCR_IOS (1<<9)
+#define FPSCR_OFS (1<<7)
+#define FPSCR_UFS (1<<6)
+#define FPSCR_IXS (1<<5)
+#define FPSCR_HIS (1<<4)
+#define FPSCR_NDS (1<<3) /*non denomal */
diff --git a/arch/unicore32/include/asm/futex.h b/arch/unicore32/include/asm/futex.h
new file mode 100644
index 000000000000..07dea6170558
--- /dev/null
+++ b/arch/unicore32/include/asm/futex.h
@@ -0,0 +1,143 @@
+/*
+ * linux/arch/unicore32/include/asm/futex.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __UNICORE_FUTEX_H__
+#define __UNICORE_FUTEX_H__
+
+#ifdef __KERNEL__
+
+#include <linux/futex.h>
+#include <linux/preempt.h>
+#include <linux/uaccess.h>
+#include <linux/errno.h>
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+ __asm__ __volatile__( \
+ "1: ldw.u %1, [%2]\n" \
+ " " insn "\n" \
+ "2: stw.u %0, [%2]\n" \
+ " mov %0, #0\n" \
+ "3:\n" \
+ " .pushsection __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 1b, 4f, 2b, 4f\n" \
+ " .popsection\n" \
+ " .pushsection .fixup,\"ax\"\n" \
+ "4: mov %0, %4\n" \
+ " b 3b\n" \
+ " .popsection" \
+ : "=&r" (ret), "=&r" (oldval) \
+ : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
+ : "cc", "memory")
+
+static inline int
+futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
+{
+ int op = (encoded_op >> 28) & 7;
+ int cmp = (encoded_op >> 24) & 15;
+ int oparg = (encoded_op << 8) >> 20;
+ int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret;
+
+ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+ oparg = 1 << oparg;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ pagefault_disable(); /* implies preempt_disable() */
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("or %0, %1, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("and %0, %1, %3",
+ ret, oldval, uaddr, ~oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("xor %0, %1, %3", ret, oldval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ pagefault_enable(); /* subsumes preempt_enable() */
+
+ if (!ret) {
+ switch (cmp) {
+ case FUTEX_OP_CMP_EQ:
+ ret = (oldval == cmparg);
+ break;
+ case FUTEX_OP_CMP_NE:
+ ret = (oldval != cmparg);
+ break;
+ case FUTEX_OP_CMP_LT:
+ ret = (oldval < cmparg);
+ break;
+ case FUTEX_OP_CMP_GE:
+ ret = (oldval >= cmparg);
+ break;
+ case FUTEX_OP_CMP_LE:
+ ret = (oldval <= cmparg);
+ break;
+ case FUTEX_OP_CMP_GT:
+ ret = (oldval > cmparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+ }
+ return ret;
+}
+
+static inline int
+futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+{
+ int val;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ pagefault_disable(); /* implies preempt_disable() */
+
+ __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
+ "1: ldw.u %0, [%3]\n"
+ " cmpxor.a %0, %1\n"
+ " bne 3f\n"
+ "2: stw.u %2, [%3]\n"
+ "3:\n"
+ " .pushsection __ex_table,\"a\"\n"
+ " .align 3\n"
+ " .long 1b, 4f, 2b, 4f\n"
+ " .popsection\n"
+ " .pushsection .fixup,\"ax\"\n"
+ "4: mov %0, %4\n"
+ " b 3b\n"
+ " .popsection"
+ : "=&r" (val)
+ : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
+ : "cc", "memory");
+
+ pagefault_enable(); /* subsumes preempt_enable() */
+
+ return val;
+}
+
+#endif /* __KERNEL__ */
+#endif /* __UNICORE_FUTEX_H__ */
diff --git a/arch/unicore32/include/asm/gpio.h b/arch/unicore32/include/asm/gpio.h
new file mode 100644
index 000000000000..2716f14e3ff6
--- /dev/null
+++ b/arch/unicore32/include/asm/gpio.h
@@ -0,0 +1,104 @@
+/*
+ * linux/arch/unicore32/include/asm/gpio.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __UNICORE_GPIO_H__
+#define __UNICORE_GPIO_H__
+
+#include <linux/io.h>
+#include <asm/irq.h>
+#include <mach/hardware.h>
+#include <asm-generic/gpio.h>
+
+#define GPI_OTP_INT 0
+#define GPI_PCI_INTA 1
+#define GPI_PCI_INTB 2
+#define GPI_PCI_INTC 3
+#define GPI_PCI_INTD 4
+#define GPI_BAT_DET 5
+#define GPI_SD_CD 6
+#define GPI_SOFF_REQ 7
+#define GPI_SD_WP 8
+#define GPI_LCD_CASE_OFF 9
+#define GPO_WIFI_EN 10
+#define GPO_HDD_LED 11
+#define GPO_VGA_EN 12
+#define GPO_LCD_EN 13
+#define GPO_LED_DATA 14
+#define GPO_LED_CLK 15
+#define GPO_CAM_PWR_EN 16
+#define GPO_LCD_VCC_EN 17
+#define GPO_SOFT_OFF 18
+#define GPO_BT_EN 19
+#define GPO_FAN_ON 20
+#define GPO_SPKR 21
+#define GPO_SET_V1 23
+#define GPO_SET_V2 24
+#define GPO_CPU_HEALTH 25
+#define GPO_LAN_SEL 26
+
+#ifdef CONFIG_PUV3_NB0916
+#define GPI_BTN_TOUCH 14
+#define GPIO_IN 0x000043ff /* 1 for input */
+#define GPIO_OUT 0x0fffbc00 /* 1 for output */
+#endif /* CONFIG_PUV3_NB0916 */
+
+#ifdef CONFIG_PUV3_SMW0919
+#define GPIO_IN 0x000003ff /* 1 for input */
+#define GPIO_OUT 0x0ffffc00 /* 1 for output */
+#endif /* CONFIG_PUV3_SMW0919 */
+
+#ifdef CONFIG_PUV3_DB0913
+#define GPIO_IN 0x000001df /* 1 for input */
+#define GPIO_OUT 0x03fee800 /* 1 for output */
+#endif /* CONFIG_PUV3_DB0913 */
+
+#define GPIO_DIR (~((GPIO_IN) | 0xf0000000))
+ /* 0 input, 1 output */
+
+static inline int gpio_get_value(unsigned gpio)
+{
+ if (__builtin_constant_p(gpio) && (gpio <= GPIO_MAX))
+ return readl(GPIO_GPLR) & GPIO_GPIO(gpio);
+ else
+ return __gpio_get_value(gpio);
+}
+
+static inline void gpio_set_value(unsigned gpio, int value)
+{
+ if (__builtin_constant_p(gpio) && (gpio <= GPIO_MAX))
+ if (value)
+ writel(GPIO_GPIO(gpio), GPIO_GPSR);
+ else
+ writel(GPIO_GPIO(gpio), GPIO_GPCR);
+ else
+ __gpio_set_value(gpio, value);
+}
+
+#define gpio_cansleep __gpio_cansleep
+
+static inline unsigned gpio_to_irq(unsigned gpio)
+{
+ if ((gpio < IRQ_GPIOHIGH) && (FIELD(1, 1, gpio) & readl(GPIO_GPIR)))
+ return IRQ_GPIOLOW0 + gpio;
+ else
+ return IRQ_GPIO0 + gpio;
+}
+
+static inline unsigned irq_to_gpio(unsigned irq)
+{
+ if (irq < IRQ_GPIOHIGH)
+ return irq - IRQ_GPIOLOW0;
+ else
+ return irq - IRQ_GPIO0;
+}
+
+#endif /* __UNICORE_GPIO_H__ */
diff --git a/arch/unicore32/include/asm/hwcap.h b/arch/unicore32/include/asm/hwcap.h
new file mode 100644
index 000000000000..97bd40fdd4ac
--- /dev/null
+++ b/arch/unicore32/include/asm/hwcap.h
@@ -0,0 +1,32 @@
+/*
+ * linux/arch/unicore32/include/asm/hwcap.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_HWCAP_H__
+#define __UNICORE_HWCAP_H__
+
+/*
+ * HWCAP flags
+ */
+#define HWCAP_MSP 1
+#define HWCAP_UNICORE16 2
+#define HWCAP_CMOV 4
+#define HWCAP_UNICORE_F64 8
+#define HWCAP_TLS 0x80
+
+#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+/*
+ * This yields a mask that user programs can use to figure out what
+ * instruction set this cpu supports.
+ */
+#define ELF_HWCAP (HWCAP_CMOV | HWCAP_UNICORE_F64)
+#endif
+
+#endif
diff --git a/arch/unicore32/include/asm/io.h b/arch/unicore32/include/asm/io.h
new file mode 100644
index 000000000000..2483fcbadbe4
--- /dev/null
+++ b/arch/unicore32/include/asm/io.h
@@ -0,0 +1,55 @@
+/*
+ * linux/arch/unicore32/include/asm/io.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_IO_H__
+#define __UNICORE_IO_H__
+
+#ifdef __KERNEL__
+
+#include <asm/byteorder.h>
+#include <asm/memory.h>
+#include <asm/system.h>
+
+#define PCI_IOBASE io_p2v(PKUNITY_PCILIO_BASE)
+#include <asm-generic/io.h>
+
+/*
+ * __uc32_ioremap and __uc32_ioremap_cached takes CPU physical address.
+ */
+extern void __iomem *__uc32_ioremap(unsigned long, size_t);
+extern void __iomem *__uc32_ioremap_cached(unsigned long, size_t);
+extern void __uc32_iounmap(volatile void __iomem *addr);
+
+/*
+ * ioremap and friends.
+ *
+ * ioremap takes a PCI memory address, as specified in
+ * Documentation/IO-mapping.txt.
+ *
+ */
+#define ioremap(cookie, size) __uc32_ioremap(cookie, size)
+#define ioremap_cached(cookie, size) __uc32_ioremap_cached(cookie, size)
+#define iounmap(cookie) __uc32_iounmap(cookie)
+
+/*
+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
+ * access
+ */
+#undef xlate_dev_mem_ptr
+#define xlate_dev_mem_ptr(p) __va(p)
+
+#define HAVE_ARCH_PIO_SIZE
+#define PIO_OFFSET (unsigned int)(PCI_IOBASE)
+#define PIO_MASK (unsigned int)(IO_SPACE_LIMIT)
+#define PIO_RESERVED (PIO_OFFSET + PIO_MASK + 1)
+
+#endif /* __KERNEL__ */
+#endif /* __UNICORE_IO_H__ */
diff --git a/arch/unicore32/include/asm/irq.h b/arch/unicore32/include/asm/irq.h
new file mode 100644
index 000000000000..baea93e2a6e6
--- /dev/null
+++ b/arch/unicore32/include/asm/irq.h
@@ -0,0 +1,105 @@
+/*
+ * linux/arch/unicore32/include/asm/irq.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_IRQ_H__
+#define __UNICORE_IRQ_H__
+
+#include <asm-generic/irq.h>
+
+#define IRQ_GPIOLOW0 0x00
+#define IRQ_GPIOLOW1 0x01
+#define IRQ_GPIOLOW2 0x02
+#define IRQ_GPIOLOW3 0x03
+#define IRQ_GPIOLOW4 0x04
+#define IRQ_GPIOLOW5 0x05
+#define IRQ_GPIOLOW6 0x06
+#define IRQ_GPIOLOW7 0x07
+#define IRQ_GPIOHIGH 0x08
+#define IRQ_USB 0x09
+#define IRQ_SDC 0x0a
+#define IRQ_AC97 0x0b
+#define IRQ_SATA 0x0c
+#define IRQ_MME 0x0d
+#define IRQ_PCI_BRIDGE 0x0e
+#define IRQ_DDR 0x0f
+#define IRQ_SPI 0x10
+#define IRQ_UNIGFX 0x11
+#define IRQ_I2C 0x11
+#define IRQ_UART1 0x12
+#define IRQ_UART0 0x13
+#define IRQ_UMAL 0x14
+#define IRQ_NAND 0x15
+#define IRQ_PS2_KBD 0x16
+#define IRQ_PS2_AUX 0x17
+#define IRQ_DMA 0x18
+#define IRQ_DMAERR 0x19
+#define IRQ_TIMER0 0x1a
+#define IRQ_TIMER1 0x1b
+#define IRQ_TIMER2 0x1c
+#define IRQ_TIMER3 0x1d
+#define IRQ_RTC 0x1e
+#define IRQ_RTCAlarm 0x1f
+
+#define IRQ_GPIO0 0x20
+#define IRQ_GPIO1 0x21
+#define IRQ_GPIO2 0x22
+#define IRQ_GPIO3 0x23
+#define IRQ_GPIO4 0x24
+#define IRQ_GPIO5 0x25
+#define IRQ_GPIO6 0x26
+#define IRQ_GPIO7 0x27
+#define IRQ_GPIO8 0x28
+#define IRQ_GPIO9 0x29
+#define IRQ_GPIO10 0x2a
+#define IRQ_GPIO11 0x2b
+#define IRQ_GPIO12 0x2c
+#define IRQ_GPIO13 0x2d
+#define IRQ_GPIO14 0x2e
+#define IRQ_GPIO15 0x2f
+#define IRQ_GPIO16 0x30
+#define IRQ_GPIO17 0x31
+#define IRQ_GPIO18 0x32
+#define IRQ_GPIO19 0x33
+#define IRQ_GPIO20 0x34
+#define IRQ_GPIO21 0x35
+#define IRQ_GPIO22 0x36
+#define IRQ_GPIO23 0x37
+#define IRQ_GPIO24 0x38
+#define IRQ_GPIO25 0x39
+#define IRQ_GPIO26 0x3a
+#define IRQ_GPIO27 0x3b
+
+#ifdef CONFIG_ARCH_FPGA
+#define IRQ_PCIINTA IRQ_GPIOLOW2
+#define IRQ_PCIINTB IRQ_GPIOLOW1
+#define IRQ_PCIINTC IRQ_GPIOLOW0
+#define IRQ_PCIINTD IRQ_GPIOLOW6
+#endif
+
+#if defined(CONFIG_PUV3_DB0913) || defined(CONFIG_PUV3_NB0916) \
+ || defined(CONFIG_PUV3_SMW0919)
+#define IRQ_PCIINTA IRQ_GPIOLOW1
+#define IRQ_PCIINTB IRQ_GPIOLOW2
+#define IRQ_PCIINTC IRQ_GPIOLOW3
+#define IRQ_PCIINTD IRQ_GPIOLOW4
+#endif
+
+#define IRQ_SD_CD IRQ_GPIO6 /* falling or rising trigger */
+
+#ifndef __ASSEMBLY__
+struct pt_regs;
+
+extern void asm_do_IRQ(unsigned int, struct pt_regs *);
+
+#endif
+
+#endif
+
diff --git a/arch/unicore32/include/asm/irqflags.h b/arch/unicore32/include/asm/irqflags.h
new file mode 100644
index 000000000000..6d8a28dfdbae
--- /dev/null
+++ b/arch/unicore32/include/asm/irqflags.h
@@ -0,0 +1,53 @@
+/*
+ * linux/arch/unicore32/include/asm/irqflags.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_IRQFLAGS_H__
+#define __UNICORE_IRQFLAGS_H__
+
+#ifdef __KERNEL__
+
+#include <asm/ptrace.h>
+
+#define ARCH_IRQ_DISABLED (PRIV_MODE | PSR_I_BIT)
+#define ARCH_IRQ_ENABLED (PRIV_MODE)
+
+/*
+ * Save the current interrupt enable state.
+ */
+static inline unsigned long arch_local_save_flags(void)
+{
+ unsigned long temp;
+
+ asm volatile("mov %0, asr" : "=r" (temp) : : "memory", "cc");
+
+ return temp & PSR_c;
+}
+
+/*
+ * restore saved IRQ state
+ */
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ unsigned long temp;
+
+ asm volatile(
+ "mov %0, asr\n"
+ "mov.a asr, %1\n"
+ "mov.f asr, %0"
+ : "=&r" (temp)
+ : "r" (flags)
+ : "memory", "cc");
+}
+
+#include <asm-generic/irqflags.h>
+
+#endif
+#endif
diff --git a/arch/unicore32/include/asm/linkage.h b/arch/unicore32/include/asm/linkage.h
new file mode 100644
index 000000000000..d1618bd35b67
--- /dev/null
+++ b/arch/unicore32/include/asm/linkage.h
@@ -0,0 +1,22 @@
+/*
+ * linux/arch/unicore32/include/asm/linkage.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_LINKAGE_H__
+#define __UNICORE_LINKAGE_H__
+
+#define __ALIGN .align 0
+#define __ALIGN_STR ".align 0"
+
+#define ENDPROC(name) \
+ .type name, %function; \
+ END(name)
+
+#endif
diff --git a/arch/unicore32/include/asm/memblock.h b/arch/unicore32/include/asm/memblock.h
new file mode 100644
index 000000000000..a8a5d8d0a26e
--- /dev/null
+++ b/arch/unicore32/include/asm/memblock.h
@@ -0,0 +1,46 @@
+/*
+ * linux/arch/unicore32/include/asm/memblock.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __UNICORE_MEMBLOCK_H__
+#define __UNICORE_MEMBLOCK_H__
+
+/*
+ * Memory map description
+ */
+# define NR_BANKS 8
+
+struct membank {
+ unsigned long start;
+ unsigned long size;
+ unsigned int highmem;
+};
+
+struct meminfo {
+ int nr_banks;
+ struct membank bank[NR_BANKS];
+};
+
+extern struct meminfo meminfo;
+
+#define for_each_bank(iter, mi) \
+ for (iter = 0; iter < (mi)->nr_banks; iter++)
+
+#define bank_pfn_start(bank) __phys_to_pfn((bank)->start)
+#define bank_pfn_end(bank) __phys_to_pfn((bank)->start + (bank)->size)
+#define bank_pfn_size(bank) ((bank)->size >> PAGE_SHIFT)
+#define bank_phys_start(bank) ((bank)->start)
+#define bank_phys_end(bank) ((bank)->start + (bank)->size)
+#define bank_phys_size(bank) ((bank)->size)
+
+extern void uc32_memblock_init(struct meminfo *);
+
+#endif
diff --git a/arch/unicore32/include/asm/memory.h b/arch/unicore32/include/asm/memory.h
new file mode 100644
index 000000000000..5eddb997defe
--- /dev/null
+++ b/arch/unicore32/include/asm/memory.h
@@ -0,0 +1,123 @@
+/*
+ * linux/arch/unicore32/include/asm/memory.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Note: this file should not be included by non-asm/.h files
+ */
+#ifndef __UNICORE_MEMORY_H__
+#define __UNICORE_MEMORY_H__
+
+#include <linux/compiler.h>
+#include <linux/const.h>
+#include <asm/sizes.h>
+#include <mach/memory.h>
+
+/*
+ * Allow for constants defined here to be used from assembly code
+ * by prepending the UL suffix only with actual C code compilation.
+ */
+#define UL(x) _AC(x, UL)
+
+/*
+ * PAGE_OFFSET - the virtual address of the start of the kernel image
+ * TASK_SIZE - the maximum size of a user space task.
+ * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
+ */
+#define PAGE_OFFSET UL(0xC0000000)
+#define TASK_SIZE (PAGE_OFFSET - UL(0x41000000))
+#define TASK_UNMAPPED_BASE (PAGE_OFFSET / 3)
+
+/*
+ * The module space lives between the addresses given by TASK_SIZE
+ * and PAGE_OFFSET - it must be within 32MB of the kernel text.
+ */
+#define MODULES_VADDR (PAGE_OFFSET - 16*1024*1024)
+#if TASK_SIZE > MODULES_VADDR
+#error Top of user space clashes with start of module space
+#endif
+
+#define MODULES_END (PAGE_OFFSET)
+
+/*
+ * Allow 16MB-aligned ioremap pages
+ */
+#define IOREMAP_MAX_ORDER 24
+
+/*
+ * Physical vs virtual RAM address space conversion. These are
+ * private definitions which should NOT be used outside memory.h
+ * files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
+ */
+#ifndef __virt_to_phys
+#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET)
+#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
+#endif
+
+/*
+ * Convert a physical address to a Page Frame Number and back
+ */
+#define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT)
+#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
+
+/*
+ * Convert a page to/from a physical address
+ */
+#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
+#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
+
+#ifndef __ASSEMBLY__
+
+#ifndef arch_adjust_zones
+#define arch_adjust_zones(size, holes) do { } while (0)
+#endif
+
+/*
+ * PFNs are used to describe any physical page; this means
+ * PFN 0 == physical address 0.
+ *
+ * This is the PFN of the first RAM page in the kernel
+ * direct-mapped view. We assume this is the first page
+ * of RAM in the mem_map as well.
+ */
+#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
+
+/*
+ * Drivers should NOT use these either.
+ */
+#define __pa(x) __virt_to_phys((unsigned long)(x))
+#define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
+#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+
+/*
+ * Conversion between a struct page and a physical address.
+ *
+ * Note: when converting an unknown physical address to a
+ * struct page, the resulting pointer must be validated
+ * using VALID_PAGE(). It must return an invalid struct page
+ * for any physical address not corresponding to a system
+ * RAM address.
+ *
+ * page_to_pfn(page) convert a struct page * to a PFN number
+ * pfn_to_page(pfn) convert a _valid_ PFN number to struct page *
+ *
+ * virt_to_page(k) convert a _valid_ virtual address to struct page *
+ * virt_addr_valid(k) indicates whether a virtual address is valid
+ */
+#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
+
+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && \
+ (unsigned long)(kaddr) < (unsigned long)high_memory)
+
+#endif
+
+#include <asm-generic/memory_model.h>
+
+#endif
diff --git a/arch/unicore32/include/asm/mmu.h b/arch/unicore32/include/asm/mmu.h
new file mode 100644
index 000000000000..66fa341dc2c6
--- /dev/null
+++ b/arch/unicore32/include/asm/mmu.h
@@ -0,0 +1,17 @@
+/*
+ * linux/arch/unicore32/include/asm/mmu.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_MMU_H__
+#define __UNICORE_MMU_H__
+
+typedef unsigned long mm_context_t;
+
+#endif
diff --git a/arch/unicore32/include/asm/mmu_context.h b/arch/unicore32/include/asm/mmu_context.h
new file mode 100644
index 000000000000..fb5e4c658f7a
--- /dev/null
+++ b/arch/unicore32/include/asm/mmu_context.h
@@ -0,0 +1,87 @@
+/*
+ * linux/arch/unicore32/include/asm/mmu_context.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_MMU_CONTEXT_H__
+#define __UNICORE_MMU_CONTEXT_H__
+
+#include <linux/compiler.h>
+#include <linux/sched.h>
+#include <linux/io.h>
+
+#include <asm/cacheflush.h>
+#include <asm/cpu-single.h>
+
+#define init_new_context(tsk, mm) 0
+
+#define destroy_context(mm) do { } while (0)
+
+/*
+ * This is called when "tsk" is about to enter lazy TLB mode.
+ *
+ * mm: describes the currently active mm context
+ * tsk: task which is entering lazy tlb
+ * cpu: cpu number which is entering lazy tlb
+ *
+ * tsk->mm will be NULL
+ */
+static inline void
+enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+}
+
+/*
+ * This is the actual mm switch as far as the scheduler
+ * is concerned. No registers are touched. We avoid
+ * calling the CPU specific function when the mm hasn't
+ * actually changed.
+ */
+static inline void
+switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ unsigned int cpu = smp_processor_id();
+
+ if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
+ cpu_switch_mm(next->pgd, next);
+}
+
+#define deactivate_mm(tsk, mm) do { } while (0)
+#define activate_mm(prev, next) switch_mm(prev, next, NULL)
+
+/*
+ * We are inserting a "fake" vma for the user-accessible vector page so
+ * gdb and friends can get to it through ptrace and /proc/<pid>/mem.
+ * But we also want to remove it before the generic code gets to see it
+ * during process exit or the unmapping of it would cause total havoc.
+ * (the macro is used as remove_vma() is static to mm/mmap.c)
+ */
+#define arch_exit_mmap(mm) \
+do { \
+ struct vm_area_struct *high_vma = find_vma(mm, 0xffff0000); \
+ if (high_vma) { \
+ BUG_ON(high_vma->vm_next); /* it should be last */ \
+ if (high_vma->vm_prev) \
+ high_vma->vm_prev->vm_next = NULL; \
+ else \
+ mm->mmap = NULL; \
+ rb_erase(&high_vma->vm_rb, &mm->mm_rb); \
+ mm->mmap_cache = NULL; \
+ mm->map_count--; \
+ remove_vma(high_vma); \
+ } \
+} while (0)
+
+static inline void arch_dup_mmap(struct mm_struct *oldmm,
+ struct mm_struct *mm)
+{
+}
+
+#endif
diff --git a/arch/unicore32/include/asm/mutex.h b/arch/unicore32/include/asm/mutex.h
new file mode 100644
index 000000000000..fab7d0e8adf6
--- /dev/null
+++ b/arch/unicore32/include/asm/mutex.h
@@ -0,0 +1,20 @@
+/*
+ * linux/arch/unicore32/include/asm/mutex.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * UniCore optimized mutex locking primitives
+ *
+ * Please look into asm-generic/mutex-xchg.h for a formal definition.
+ */
+#ifndef __UNICORE_MUTEX_H__
+#define __UNICORE_MUTEX_H__
+
+# include <asm-generic/mutex-xchg.h>
+#endif
diff --git a/arch/unicore32/include/asm/page.h b/arch/unicore32/include/asm/page.h
new file mode 100644
index 000000000000..594b3226250e
--- /dev/null
+++ b/arch/unicore32/include/asm/page.h
@@ -0,0 +1,80 @@
+/*
+ * linux/arch/unicore32/include/asm/page.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_PAGE_H__
+#define __UNICORE_PAGE_H__
+
+/* PAGE_SHIFT determines the page size */
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+#ifndef __ASSEMBLY__
+
+struct page;
+struct vm_area_struct;
+
+#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
+extern void copy_page(void *to, const void *from);
+
+#define clear_user_page(page, vaddr, pg) clear_page(page)
+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+
+#undef STRICT_MM_TYPECHECKS
+
+#ifdef STRICT_MM_TYPECHECKS
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { unsigned long pte; } pte_t;
+typedef struct { unsigned long pgd; } pgd_t;
+typedef struct { unsigned long pgprot; } pgprot_t;
+
+#define pte_val(x) ((x).pte)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) })
+#define __pgd(x) ((pgd_t) { (x) })
+#define __pgprot(x) ((pgprot_t) { (x) })
+
+#else
+/*
+ * .. while these make it easier on the compiler
+ */
+typedef unsigned long pte_t;
+typedef unsigned long pgd_t;
+typedef unsigned long pgprot_t;
+
+#define pte_val(x) (x)
+#define pgd_val(x) (x)
+#define pgprot_val(x) (x)
+
+#define __pte(x) (x)
+#define __pgd(x) (x)
+#define __pgprot(x) (x)
+
+#endif /* STRICT_MM_TYPECHECKS */
+
+typedef struct page *pgtable_t;
+
+extern int pfn_valid(unsigned long);
+
+#include <asm/memory.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#define VM_DATA_DEFAULT_FLAGS \
+ (VM_READ | VM_WRITE | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#include <asm-generic/getorder.h>
+
+#endif
diff --git a/arch/unicore32/include/asm/pci.h b/arch/unicore32/include/asm/pci.h
new file mode 100644
index 000000000000..c5b28b459535
--- /dev/null
+++ b/arch/unicore32/include/asm/pci.h
@@ -0,0 +1,46 @@
+/*
+ * linux/arch/unicore32/include/asm/pci.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_PCI_H__
+#define __UNICORE_PCI_H__
+
+#ifdef __KERNEL__
+#include <asm-generic/pci-dma-compat.h>
+#include <asm-generic/pci.h>
+#include <mach/hardware.h> /* for PCIBIOS_MIN_* */
+
+static inline void pcibios_set_master(struct pci_dev *dev)
+{
+ /* No special bus mastering setup handling */
+}
+
+static inline void pcibios_penalize_isa_irq(int irq, int active)
+{
+ /* We don't do dynamic PCI IRQ allocation */
+}
+
+#ifdef CONFIG_PCI
+static inline void pci_dma_burst_advice(struct pci_dev *pdev,
+ enum pci_dma_burst_strategy *strat,
+ unsigned long *strategy_parameter)
+{
+ *strat = PCI_DMA_BURST_INFINITY;
+ *strategy_parameter = ~0UL;
+}
+#endif
+
+#define HAVE_PCI_MMAP
+extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine);
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/arch/unicore32/include/asm/pgalloc.h b/arch/unicore32/include/asm/pgalloc.h
new file mode 100644
index 000000000000..0213e373a895
--- /dev/null
+++ b/arch/unicore32/include/asm/pgalloc.h
@@ -0,0 +1,110 @@
+/*
+ * linux/arch/unicore32/include/asm/pgalloc.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_PGALLOC_H__
+#define __UNICORE_PGALLOC_H__
+
+#include <asm/pgtable-hwdef.h>
+#include <asm/processor.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+
+#define check_pgt_cache() do { } while (0)
+
+#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_PRESENT)
+#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_PRESENT)
+
+extern pgd_t *get_pgd_slow(struct mm_struct *mm);
+extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
+
+#define pgd_alloc(mm) get_pgd_slow(mm)
+#define pgd_free(mm, pgd) free_pgd_slow(mm, pgd)
+
+#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
+
+/*
+ * Allocate one PTE table.
+ */
+static inline pte_t *
+pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
+{
+ pte_t *pte;
+
+ pte = (pte_t *)__get_free_page(PGALLOC_GFP);
+ if (pte)
+ clean_dcache_area(pte, PTRS_PER_PTE * sizeof(pte_t));
+
+ return pte;
+}
+
+static inline pgtable_t
+pte_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ struct page *pte;
+
+ pte = alloc_pages(PGALLOC_GFP, 0);
+ if (pte) {
+ if (!PageHighMem(pte)) {
+ void *page = page_address(pte);
+ clean_dcache_area(page, PTRS_PER_PTE * sizeof(pte_t));
+ }
+ pgtable_page_ctor(pte);
+ }
+
+ return pte;
+}
+
+/*
+ * Free one PTE table.
+ */
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ if (pte)
+ free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
+{
+ pgtable_page_dtor(pte);
+ __free_page(pte);
+}
+
+static inline void __pmd_populate(pmd_t *pmdp, unsigned long pmdval)
+{
+ set_pmd(pmdp, __pmd(pmdval));
+ flush_pmd_entry(pmdp);
+}
+
+/*
+ * Populate the pmdp entry with a pointer to the pte. This pmd is part
+ * of the mm address space.
+ */
+static inline void
+pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
+{
+ unsigned long pte_ptr = (unsigned long)ptep;
+
+ /*
+ * The pmd must be loaded with the physical
+ * address of the PTE table
+ */
+ __pmd_populate(pmdp, __pa(pte_ptr) | _PAGE_KERNEL_TABLE);
+}
+
+static inline void
+pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
+{
+ __pmd_populate(pmdp,
+ page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE);
+}
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+#endif
diff --git a/arch/unicore32/include/asm/pgtable-hwdef.h b/arch/unicore32/include/asm/pgtable-hwdef.h
new file mode 100644
index 000000000000..7314e859cca0
--- /dev/null
+++ b/arch/unicore32/include/asm/pgtable-hwdef.h
@@ -0,0 +1,55 @@
+/*
+ * linux/arch/unicore32/include/asm/pgtable-hwdef.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_PGTABLE_HWDEF_H__
+#define __UNICORE_PGTABLE_HWDEF_H__
+
+/*
+ * Hardware page table definitions.
+ *
+ * + Level 1 descriptor (PMD)
+ * - common
+ */
+#define PMD_TYPE_MASK (3 << 0)
+#define PMD_TYPE_TABLE (0 << 0)
+/*#define PMD_TYPE_LARGE (1 << 0) */
+#define PMD_TYPE_INVALID (2 << 0)
+#define PMD_TYPE_SECT (3 << 0)
+
+#define PMD_PRESENT (1 << 2)
+#define PMD_YOUNG (1 << 3)
+
+/*#define PMD_SECT_DIRTY (1 << 4) */
+#define PMD_SECT_CACHEABLE (1 << 5)
+#define PMD_SECT_EXEC (1 << 6)
+#define PMD_SECT_WRITE (1 << 7)
+#define PMD_SECT_READ (1 << 8)
+
+/*
+ * + Level 2 descriptor (PTE)
+ * - common
+ */
+#define PTE_TYPE_MASK (3 << 0)
+#define PTE_TYPE_SMALL (0 << 0)
+#define PTE_TYPE_MIDDLE (1 << 0)
+#define PTE_TYPE_LARGE (2 << 0)
+#define PTE_TYPE_INVALID (3 << 0)
+
+#define PTE_PRESENT (1 << 2)
+#define PTE_FILE (1 << 3) /* only when !PRESENT */
+#define PTE_YOUNG (1 << 3)
+#define PTE_DIRTY (1 << 4)
+#define PTE_CACHEABLE (1 << 5)
+#define PTE_EXEC (1 << 6)
+#define PTE_WRITE (1 << 7)
+#define PTE_READ (1 << 8)
+
+#endif
diff --git a/arch/unicore32/include/asm/pgtable.h b/arch/unicore32/include/asm/pgtable.h
new file mode 100644
index 000000000000..68b2f297ac97
--- /dev/null
+++ b/arch/unicore32/include/asm/pgtable.h
@@ -0,0 +1,317 @@
+/*
+ * linux/arch/unicore32/include/asm/pgtable.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_PGTABLE_H__
+#define __UNICORE_PGTABLE_H__
+
+#include <asm-generic/pgtable-nopmd.h>
+#include <asm/cpu-single.h>
+
+#include <asm/memory.h>
+#include <asm/pgtable-hwdef.h>
+
+/*
+ * Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 8MB value just means that there will be a 8MB "hole" after the
+ * physical memory until the kernel virtual memory starts. That means that
+ * any out-of-bounds memory accesses will hopefully be caught.
+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+ * area for the same reason. ;)
+ *
+ * Note that platforms may override VMALLOC_START, but they must provide
+ * VMALLOC_END. VMALLOC_END defines the (exclusive) limit of this space,
+ * which may not overlap IO space.
+ */
+#ifndef VMALLOC_START
+#define VMALLOC_OFFSET SZ_8M
+#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) \
+ & ~(VMALLOC_OFFSET-1))
+#define VMALLOC_END (0xff000000UL)
+#endif
+
+#define PTRS_PER_PTE 1024
+#define PTRS_PER_PGD 1024
+
+/*
+ * PGDIR_SHIFT determines what a third-level page table entry can map
+ */
+#define PGDIR_SHIFT 22
+
+#ifndef __ASSEMBLY__
+extern void __pte_error(const char *file, int line, unsigned long val);
+extern void __pgd_error(const char *file, int line, unsigned long val);
+
+#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
+#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
+#endif /* !__ASSEMBLY__ */
+
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+/*
+ * This is the lowest virtual address we can permit any user space
+ * mapping to be mapped at. This is particularly important for
+ * non-high vector CPUs.
+ */
+#define FIRST_USER_ADDRESS PAGE_SIZE
+
+#define FIRST_USER_PGD_NR 1
+#define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR)
+
+/*
+ * section address mask and size definitions.
+ */
+#define SECTION_SHIFT 22
+#define SECTION_SIZE (1UL << SECTION_SHIFT)
+#define SECTION_MASK (~(SECTION_SIZE-1))
+
+#ifndef __ASSEMBLY__
+
+/*
+ * The pgprot_* and protection_map entries will be fixed up in runtime
+ * to include the cachable bits based on memory policy, as well as any
+ * architecture dependent bits.
+ */
+#define _PTE_DEFAULT (PTE_PRESENT | PTE_YOUNG | PTE_CACHEABLE)
+
+extern pgprot_t pgprot_user;
+extern pgprot_t pgprot_kernel;
+
+#define PAGE_NONE pgprot_user
+#define PAGE_SHARED __pgprot(pgprot_val(pgprot_user | PTE_READ \
+ | PTE_WRITE)
+#define PAGE_SHARED_EXEC __pgprot(pgprot_val(pgprot_user | PTE_READ \
+ | PTE_WRITE \
+ | PTE_EXEC)
+#define PAGE_COPY __pgprot(pgprot_val(pgprot_user | PTE_READ)
+#define PAGE_COPY_EXEC __pgprot(pgprot_val(pgprot_user | PTE_READ \
+ | PTE_EXEC)
+#define PAGE_READONLY __pgprot(pgprot_val(pgprot_user | PTE_READ)
+#define PAGE_READONLY_EXEC __pgprot(pgprot_val(pgprot_user | PTE_READ \
+ | PTE_EXEC)
+#define PAGE_KERNEL pgprot_kernel
+#define PAGE_KERNEL_EXEC __pgprot(pgprot_val(pgprot_kernel | PTE_EXEC))
+
+#define __PAGE_NONE __pgprot(_PTE_DEFAULT)
+#define __PAGE_SHARED __pgprot(_PTE_DEFAULT | PTE_READ \
+ | PTE_WRITE)
+#define __PAGE_SHARED_EXEC __pgprot(_PTE_DEFAULT | PTE_READ \
+ | PTE_WRITE \
+ | PTE_EXEC)
+#define __PAGE_COPY __pgprot(_PTE_DEFAULT | PTE_READ)
+#define __PAGE_COPY_EXEC __pgprot(_PTE_DEFAULT | PTE_READ \
+ | PTE_EXEC)
+#define __PAGE_READONLY __pgprot(_PTE_DEFAULT | PTE_READ)
+#define __PAGE_READONLY_EXEC __pgprot(_PTE_DEFAULT | PTE_READ \
+ | PTE_EXEC)
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * The table below defines the page protection levels that we insert into our
+ * Linux page table version. These get translated into the best that the
+ * architecture can perform. Note that on UniCore hardware:
+ * 1) We cannot do execute protection
+ * 2) If we could do execute protection, then read is implied
+ * 3) write implies read permissions
+ */
+#define __P000 __PAGE_NONE
+#define __P001 __PAGE_READONLY
+#define __P010 __PAGE_COPY
+#define __P011 __PAGE_COPY
+#define __P100 __PAGE_READONLY_EXEC
+#define __P101 __PAGE_READONLY_EXEC
+#define __P110 __PAGE_COPY_EXEC
+#define __P111 __PAGE_COPY_EXEC
+
+#define __S000 __PAGE_NONE
+#define __S001 __PAGE_READONLY
+#define __S010 __PAGE_SHARED
+#define __S011 __PAGE_SHARED
+#define __S100 __PAGE_READONLY_EXEC
+#define __S101 __PAGE_READONLY_EXEC
+#define __S110 __PAGE_SHARED_EXEC
+#define __S111 __PAGE_SHARED_EXEC
+
+#ifndef __ASSEMBLY__
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern struct page *empty_zero_page;
+#define ZERO_PAGE(vaddr) (empty_zero_page)
+
+#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
+#define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) \
+ | pgprot_val(prot)))
+
+#define pte_none(pte) (!pte_val(pte))
+#define pte_clear(mm, addr, ptep) set_pte(ptep, __pte(0))
+#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
+#define pte_offset_kernel(dir, addr) (pmd_page_vaddr(*(dir)) \
+ + __pte_index(addr))
+
+#define pte_offset_map(dir, addr) (pmd_page_vaddr(*(dir)) \
+ + __pte_index(addr))
+#define pte_unmap(pte) do { } while (0)
+
+#define set_pte(ptep, pte) cpu_set_pte(ptep, pte)
+
+#define set_pte_at(mm, addr, ptep, pteval) \
+ do { \
+ set_pte(ptep, pteval); \
+ } while (0)
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+#define pte_present(pte) (pte_val(pte) & PTE_PRESENT)
+#define pte_write(pte) (pte_val(pte) & PTE_WRITE)
+#define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY)
+#define pte_young(pte) (pte_val(pte) & PTE_YOUNG)
+#define pte_exec(pte) (pte_val(pte) & PTE_EXEC)
+#define pte_special(pte) (0)
+
+#define PTE_BIT_FUNC(fn, op) \
+static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
+
+PTE_BIT_FUNC(wrprotect, &= ~PTE_WRITE);
+PTE_BIT_FUNC(mkwrite, |= PTE_WRITE);
+PTE_BIT_FUNC(mkclean, &= ~PTE_DIRTY);
+PTE_BIT_FUNC(mkdirty, |= PTE_DIRTY);
+PTE_BIT_FUNC(mkold, &= ~PTE_YOUNG);
+PTE_BIT_FUNC(mkyoung, |= PTE_YOUNG);
+
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+
+/*
+ * Mark the prot value as uncacheable.
+ */
+#define pgprot_noncached(prot) \
+ __pgprot(pgprot_val(prot) & ~PTE_CACHEABLE)
+#define pgprot_writecombine(prot) \
+ __pgprot(pgprot_val(prot) & ~PTE_CACHEABLE)
+#define pgprot_dmacoherent(prot) \
+ __pgprot(pgprot_val(prot) & ~PTE_CACHEABLE)
+
+#define pmd_none(pmd) (!pmd_val(pmd))
+#define pmd_present(pmd) (pmd_val(pmd) & PMD_PRESENT)
+#define pmd_bad(pmd) (((pmd_val(pmd) & \
+ (PMD_PRESENT | PMD_TYPE_MASK)) \
+ != (PMD_PRESENT | PMD_TYPE_TABLE)))
+
+#define set_pmd(pmdpd, pmdval) \
+ do { \
+ *(pmdpd) = pmdval; \
+ } while (0)
+
+#define pmd_clear(pmdp) \
+ do { \
+ set_pmd(pmdp, __pmd(0));\
+ clean_pmd_entry(pmdp); \
+ } while (0)
+
+#define pmd_page_vaddr(pmd) ((pte_t *)__va(pmd_val(pmd) & PAGE_MASK))
+#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd)))
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
+
+/* to find an entry in a page-table-directory */
+#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
+
+#define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr))
+
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
+
+/* Find an entry in the third-level page table.. */
+#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ const unsigned long mask = PTE_EXEC | PTE_WRITE | PTE_READ;
+ pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
+ return pte;
+}
+
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+
+/*
+ * Encode and decode a swap entry. Swap entries are stored in the Linux
+ * page tables as follows:
+ *
+ * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * <--------------- offset --------------> <--- type --> 0 0 0 0 0
+ *
+ * This gives us up to 127 swap files and 32GB per swap file. Note that
+ * the offset field is always non-zero.
+ */
+#define __SWP_TYPE_SHIFT 5
+#define __SWP_TYPE_BITS 7
+#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
+#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
+
+#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) \
+ & __SWP_TYPE_MASK)
+#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
+#define __swp_entry(type, offset) ((swp_entry_t) { \
+ ((type) << __SWP_TYPE_SHIFT) | \
+ ((offset) << __SWP_OFFSET_SHIFT) })
+
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
+
+/*
+ * It is an error for the kernel to have more swap files than we can
+ * encode in the PTEs. This ensures that we know when MAX_SWAPFILES
+ * is increased beyond what we presently support.
+ */
+#define MAX_SWAPFILES_CHECK() \
+ BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
+
+/*
+ * Encode and decode a file entry. File entries are stored in the Linux
+ * page tables as follows:
+ *
+ * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * <----------------------- offset ----------------------> 1 0 0 0
+ */
+#define pte_file(pte) (pte_val(pte) & PTE_FILE)
+#define pte_to_pgoff(x) (pte_val(x) >> 4)
+#define pgoff_to_pte(x) __pte(((x) << 4) | PTE_FILE)
+
+#define PTE_FILE_MAX_BITS 28
+
+/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
+/* FIXME: this is not correct */
+#define kern_addr_valid(addr) (1)
+
+#include <asm-generic/pgtable.h>
+
+/*
+ * remap a physical page `pfn' of size `size' with page protection `prot'
+ * into virtual address `from'
+ */
+#define io_remap_pfn_range(vma, from, pfn, size, prot) \
+ remap_pfn_range(vma, from, pfn, size, prot)
+
+#define pgtable_cache_init() do { } while (0)
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __UNICORE_PGTABLE_H__ */
diff --git a/arch/unicore32/include/asm/processor.h b/arch/unicore32/include/asm/processor.h
new file mode 100644
index 000000000000..e11cb0786578
--- /dev/null
+++ b/arch/unicore32/include/asm/processor.h
@@ -0,0 +1,92 @@
+/*
+ * linux/arch/unicore32/include/asm/processor.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __UNICORE_PROCESSOR_H__
+#define __UNICORE_PROCESSOR_H__
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l; })
+
+#ifdef __KERNEL__
+
+#include <asm/ptrace.h>
+#include <asm/types.h>
+
+#ifdef __KERNEL__
+#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX TASK_SIZE
+#endif
+
+struct debug_entry {
+ u32 address;
+ u32 insn;
+};
+
+struct debug_info {
+ int nsaved;
+ struct debug_entry bp[2];
+};
+
+struct thread_struct {
+ /* fault info */
+ unsigned long address;
+ unsigned long trap_no;
+ unsigned long error_code;
+ /* debugging */
+ struct debug_info debug;
+};
+
+#define INIT_THREAD { }
+
+#define start_thread(regs, pc, sp) \
+({ \
+ unsigned long *stack = (unsigned long *)sp; \
+ set_fs(USER_DS); \
+ memset(regs->uregs, 0, sizeof(regs->uregs)); \
+ regs->UCreg_asr = USER_MODE; \
+ regs->UCreg_pc = pc & ~1; /* pc */ \
+ regs->UCreg_sp = sp; /* sp */ \
+ regs->UCreg_02 = stack[2]; /* r2 (envp) */ \
+ regs->UCreg_01 = stack[1]; /* r1 (argv) */ \
+ regs->UCreg_00 = stack[0]; /* r0 (argc) */ \
+})
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+
+/* Free all resources held by a thread. */
+extern void release_thread(struct task_struct *);
+
+/* Prepare to copy thread state - unlazy all lazy status */
+#define prepare_to_copy(tsk) do { } while (0)
+
+unsigned long get_wchan(struct task_struct *p);
+
+#define cpu_relax() barrier()
+
+/*
+ * Create a new kernel thread
+ */
+extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
+
+#define task_pt_regs(p) \
+ ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
+
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->UCreg_pc)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->UCreg_sp)
+
+#endif
+
+#endif /* __UNICORE_PROCESSOR_H__ */
diff --git a/arch/unicore32/include/asm/ptrace.h b/arch/unicore32/include/asm/ptrace.h
new file mode 100644
index 000000000000..b9caf9b0997b
--- /dev/null
+++ b/arch/unicore32/include/asm/ptrace.h
@@ -0,0 +1,133 @@
+/*
+ * linux/arch/unicore32/include/asm/ptrace.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_PTRACE_H__
+#define __UNICORE_PTRACE_H__
+
+#define PTRACE_GET_THREAD_AREA 22
+
+/*
+ * PSR bits
+ */
+#define USER_MODE 0x00000010
+#define REAL_MODE 0x00000011
+#define INTR_MODE 0x00000012
+#define PRIV_MODE 0x00000013
+#define ABRT_MODE 0x00000017
+#define EXTN_MODE 0x0000001b
+#define SUSR_MODE 0x0000001f
+#define MODE_MASK 0x0000001f
+#define PSR_R_BIT 0x00000040
+#define PSR_I_BIT 0x00000080
+#define PSR_V_BIT 0x10000000
+#define PSR_C_BIT 0x20000000
+#define PSR_Z_BIT 0x40000000
+#define PSR_S_BIT 0x80000000
+
+/*
+ * Groups of PSR bits
+ */
+#define PSR_f 0xff000000 /* Flags */
+#define PSR_c 0x000000ff /* Control */
+
+#ifndef __ASSEMBLY__
+
+/*
+ * This struct defines the way the registers are stored on the
+ * stack during a system call. Note that sizeof(struct pt_regs)
+ * has to be a multiple of 8.
+ */
+struct pt_regs {
+ unsigned long uregs[34];
+};
+
+#define UCreg_asr uregs[32]
+#define UCreg_pc uregs[31]
+#define UCreg_lr uregs[30]
+#define UCreg_sp uregs[29]
+#define UCreg_ip uregs[28]
+#define UCreg_fp uregs[27]
+#define UCreg_26 uregs[26]
+#define UCreg_25 uregs[25]
+#define UCreg_24 uregs[24]
+#define UCreg_23 uregs[23]
+#define UCreg_22 uregs[22]
+#define UCreg_21 uregs[21]
+#define UCreg_20 uregs[20]
+#define UCreg_19 uregs[19]
+#define UCreg_18 uregs[18]
+#define UCreg_17 uregs[17]
+#define UCreg_16 uregs[16]
+#define UCreg_15 uregs[15]
+#define UCreg_14 uregs[14]
+#define UCreg_13 uregs[13]
+#define UCreg_12 uregs[12]
+#define UCreg_11 uregs[11]
+#define UCreg_10 uregs[10]
+#define UCreg_09 uregs[9]
+#define UCreg_08 uregs[8]
+#define UCreg_07 uregs[7]
+#define UCreg_06 uregs[6]
+#define UCreg_05 uregs[5]
+#define UCreg_04 uregs[4]
+#define UCreg_03 uregs[3]
+#define UCreg_02 uregs[2]
+#define UCreg_01 uregs[1]
+#define UCreg_00 uregs[0]
+#define UCreg_ORIG_00 uregs[33]
+
+#ifdef __KERNEL__
+
+#define user_mode(regs) \
+ (processor_mode(regs) == USER_MODE)
+
+#define processor_mode(regs) \
+ ((regs)->UCreg_asr & MODE_MASK)
+
+#define interrupts_enabled(regs) \
+ (!((regs)->UCreg_asr & PSR_I_BIT))
+
+#define fast_interrupts_enabled(regs) \
+ (!((regs)->UCreg_asr & PSR_R_BIT))
+
+/* Are the current registers suitable for user mode?
+ * (used to maintain security in signal handlers)
+ */
+static inline int valid_user_regs(struct pt_regs *regs)
+{
+ unsigned long mode = regs->UCreg_asr & MODE_MASK;
+
+ /*
+ * Always clear the R (REAL) bits
+ */
+ regs->UCreg_asr &= ~(PSR_R_BIT);
+
+ if ((regs->UCreg_asr & PSR_I_BIT) == 0) {
+ if (mode == USER_MODE)
+ return 1;
+ }
+
+ /*
+ * Force ASR to something logical...
+ */
+ regs->UCreg_asr &= PSR_f | USER_MODE;
+
+ return 0;
+}
+
+#define instruction_pointer(regs) ((regs)->UCreg_pc)
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASSEMBLY__ */
+
+#endif
+
diff --git a/arch/unicore32/include/asm/sigcontext.h b/arch/unicore32/include/asm/sigcontext.h
new file mode 100644
index 000000000000..6a2d7671c052
--- /dev/null
+++ b/arch/unicore32/include/asm/sigcontext.h
@@ -0,0 +1,29 @@
+/*
+ * linux/arch/unicore32/include/asm/sigcontext.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_SIGCONTEXT_H__
+#define __UNICORE_SIGCONTEXT_H__
+
+#include <asm/ptrace.h>
+/*
+ * Signal context structure - contains all info to do with the state
+ * before the signal handler was invoked. Note: only add new entries
+ * to the end of the structure.
+ */
+struct sigcontext {
+ unsigned long trap_no;
+ unsigned long error_code;
+ unsigned long oldmask;
+ unsigned long fault_address;
+ struct pt_regs regs;
+};
+
+#endif
diff --git a/arch/unicore32/include/asm/stacktrace.h b/arch/unicore32/include/asm/stacktrace.h
new file mode 100644
index 000000000000..76edc65a5871
--- /dev/null
+++ b/arch/unicore32/include/asm/stacktrace.h
@@ -0,0 +1,31 @@
+/*
+ * linux/arch/unicore32/include/asm/stacktrace.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __UNICORE_STACKTRACE_H__
+#define __UNICORE_STACKTRACE_H__
+
+struct stackframe {
+ unsigned long fp;
+ unsigned long sp;
+ unsigned long lr;
+ unsigned long pc;
+};
+
+#ifdef CONFIG_FRAME_POINTER
+extern int unwind_frame(struct stackframe *frame);
+#else
+#define unwind_frame(f) (-EINVAL)
+#endif
+extern void walk_stackframe(struct stackframe *frame,
+ int (*fn)(struct stackframe *, void *), void *data);
+
+#endif /* __UNICORE_STACKTRACE_H__ */
diff --git a/arch/unicore32/include/asm/string.h b/arch/unicore32/include/asm/string.h
new file mode 100644
index 000000000000..55264c84369a
--- /dev/null
+++ b/arch/unicore32/include/asm/string.h
@@ -0,0 +1,38 @@
+/*
+ * linux/arch/unicore32/include/asm/string.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_STRING_H__
+#define __UNICORE_STRING_H__
+
+/*
+ * We don't do inline string functions, since the
+ * optimised inline asm versions are not small.
+ */
+
+#define __HAVE_ARCH_STRRCHR
+extern char *strrchr(const char *s, int c);
+
+#define __HAVE_ARCH_STRCHR
+extern char *strchr(const char *s, int c);
+
+#define __HAVE_ARCH_MEMCPY
+extern void *memcpy(void *, const void *, __kernel_size_t);
+
+#define __HAVE_ARCH_MEMMOVE
+extern void *memmove(void *, const void *, __kernel_size_t);
+
+#define __HAVE_ARCH_MEMCHR
+extern void *memchr(const void *, int, __kernel_size_t);
+
+#define __HAVE_ARCH_MEMSET
+extern void *memset(void *, int, __kernel_size_t);
+
+#endif
diff --git a/arch/unicore32/include/asm/suspend.h b/arch/unicore32/include/asm/suspend.h
new file mode 100644
index 000000000000..88a9c0f32b21
--- /dev/null
+++ b/arch/unicore32/include/asm/suspend.h
@@ -0,0 +1,30 @@
+/*
+ * linux/arch/unicore32/include/asm/suspend.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __UNICORE_SUSPEND_H__
+#define __UNICORE_SUSPEND_H__
+
+#ifndef __ASSEMBLY__
+static inline int arch_prepare_suspend(void) { return 0; }
+
+#include <asm/ptrace.h>
+
+struct swsusp_arch_regs {
+ struct cpu_context_save cpu_context; /* cpu context */
+#ifdef CONFIG_UNICORE_FPU_F64
+ struct fp_state fpstate __attribute__((aligned(8)));
+#endif
+};
+#endif
+
+#endif /* __UNICORE_SUSPEND_H__ */
+
diff --git a/arch/unicore32/include/asm/system.h b/arch/unicore32/include/asm/system.h
new file mode 100644
index 000000000000..246b71c17fda
--- /dev/null
+++ b/arch/unicore32/include/asm/system.h
@@ -0,0 +1,161 @@
+/*
+ * linux/arch/unicore32/include/asm/system.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_SYSTEM_H__
+#define __UNICORE_SYSTEM_H__
+
+#ifdef __KERNEL__
+
+/*
+ * CR1 bits (CP#0 CR1)
+ */
+#define CR_M (1 << 0) /* MMU enable */
+#define CR_A (1 << 1) /* Alignment abort enable */
+#define CR_D (1 << 2) /* Dcache enable */
+#define CR_I (1 << 3) /* Icache enable */
+#define CR_B (1 << 4) /* Dcache write mechanism: write back */
+#define CR_T (1 << 5) /* Burst enable */
+#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */
+
+#ifndef __ASSEMBLY__
+
+#include <linux/linkage.h>
+#include <linux/irqflags.h>
+
+struct thread_info;
+struct task_struct;
+
+struct pt_regs;
+
+void die(const char *msg, struct pt_regs *regs, int err);
+
+struct siginfo;
+void uc32_notify_die(const char *str, struct pt_regs *regs,
+ struct siginfo *info, unsigned long err, unsigned long trap);
+
+void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
+ struct pt_regs *),
+ int sig, int code, const char *name);
+
+#define xchg(ptr, x) \
+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
+
+extern asmlinkage void __backtrace(void);
+extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
+
+struct mm_struct;
+extern void show_pte(struct mm_struct *mm, unsigned long addr);
+extern void __show_regs(struct pt_regs *);
+
+extern int cpu_architecture(void);
+extern void cpu_init(void);
+
+#define vectors_high() (cr_alignment & CR_V)
+
+#define isb() __asm__ __volatile__ ("" : : : "memory")
+#define dsb() __asm__ __volatile__ ("" : : : "memory")
+#define dmb() __asm__ __volatile__ ("" : : : "memory")
+
+#define mb() barrier()
+#define rmb() barrier()
+#define wmb() barrier()
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define read_barrier_depends() do { } while (0)
+#define smp_read_barrier_depends() do { } while (0)
+
+#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
+#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
+
+extern unsigned long cr_no_alignment; /* defined in entry-unicore.S */
+extern unsigned long cr_alignment; /* defined in entry-unicore.S */
+
+static inline unsigned int get_cr(void)
+{
+ unsigned int val;
+ asm("movc %0, p0.c1, #0" : "=r" (val) : : "cc");
+ return val;
+}
+
+static inline void set_cr(unsigned int val)
+{
+ asm volatile("movc p0.c1, %0, #0 @set CR"
+ : : "r" (val) : "cc");
+ isb();
+}
+
+extern void adjust_cr(unsigned long mask, unsigned long set);
+
+/*
+ * switch_to(prev, next) should switch from task `prev' to `next'
+ * `prev' will never be the same as `next'. schedule() itself
+ * contains the memory barrier to tell GCC not to cache `current'.
+ */
+extern struct task_struct *__switch_to(struct task_struct *,
+ struct thread_info *, struct thread_info *);
+extern void panic(const char *fmt, ...);
+
+#define switch_to(prev, next, last) \
+do { \
+ last = __switch_to(prev, \
+ task_thread_info(prev), task_thread_info(next)); \
+} while (0)
+
+static inline unsigned long
+__xchg(unsigned long x, volatile void *ptr, int size)
+{
+ unsigned long ret;
+
+ switch (size) {
+ case 1:
+ asm volatile("@ __xchg1\n"
+ " swapb %0, %1, [%2]"
+ : "=&r" (ret)
+ : "r" (x), "r" (ptr)
+ : "memory", "cc");
+ break;
+ case 4:
+ asm volatile("@ __xchg4\n"
+ " swapw %0, %1, [%2]"
+ : "=&r" (ret)
+ : "r" (x), "r" (ptr)
+ : "memory", "cc");
+ break;
+ default:
+ panic("xchg: bad data size: ptr 0x%p, size %d\n",
+ ptr, size);
+ }
+
+ return ret;
+}
+
+#include <asm-generic/cmpxchg-local.h>
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), \
+ (unsigned long)(o), (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg64_local(ptr, o, n) \
+ __cmpxchg64_local_generic((ptr), (o), (n))
+
+#include <asm-generic/cmpxchg.h>
+
+#endif /* __ASSEMBLY__ */
+
+#define arch_align_stack(x) (x)
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/arch/unicore32/include/asm/thread_info.h b/arch/unicore32/include/asm/thread_info.h
new file mode 100644
index 000000000000..c270e9e04861
--- /dev/null
+++ b/arch/unicore32/include/asm/thread_info.h
@@ -0,0 +1,154 @@
+/*
+ * linux/arch/unicore32/include/asm/thread_info.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_THREAD_INFO_H__
+#define __UNICORE_THREAD_INFO_H__
+
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+#include <asm/fpstate.h>
+
+#define THREAD_SIZE_ORDER 1
+#define THREAD_SIZE 8192
+#define THREAD_START_SP (THREAD_SIZE - 8)
+
+#ifndef __ASSEMBLY__
+
+struct task_struct;
+struct exec_domain;
+
+#include <asm/types.h>
+
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
+
+struct cpu_context_save {
+ __u32 r4;
+ __u32 r5;
+ __u32 r6;
+ __u32 r7;
+ __u32 r8;
+ __u32 r9;
+ __u32 r10;
+ __u32 r11;
+ __u32 r12;
+ __u32 r13;
+ __u32 r14;
+ __u32 r15;
+ __u32 r16;
+ __u32 r17;
+ __u32 r18;
+ __u32 r19;
+ __u32 r20;
+ __u32 r21;
+ __u32 r22;
+ __u32 r23;
+ __u32 r24;
+ __u32 r25;
+ __u32 r26;
+ __u32 fp;
+ __u32 sp;
+ __u32 pc;
+};
+
+/*
+ * low level task data that entry.S needs immediate access to.
+ * __switch_to() assumes cpu_context follows immediately after cpu_domain.
+ */
+struct thread_info {
+ unsigned long flags; /* low level flags */
+ int preempt_count; /* 0 => preemptable */
+ /* <0 => bug */
+ mm_segment_t addr_limit; /* address limit */
+ struct task_struct *task; /* main task structure */
+ struct exec_domain *exec_domain; /* execution domain */
+ __u32 cpu; /* cpu */
+ struct cpu_context_save cpu_context; /* cpu context */
+ __u32 syscall; /* syscall number */
+ __u8 used_cp[16]; /* thread used copro */
+#ifdef CONFIG_UNICORE_FPU_F64
+ struct fp_state fpstate __attribute__((aligned(8)));
+#endif
+ struct restart_block restart_block;
+};
+
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .exec_domain = &default_exec_domain, \
+ .flags = 0, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+ .addr_limit = KERNEL_DS, \
+ .restart_block = { \
+ .fn = do_no_restart_syscall, \
+ }, \
+}
+
+#define init_thread_info (init_thread_union.thread_info)
+#define init_stack (init_thread_union.stack)
+
+/*
+ * how to get the thread information struct from C
+ */
+static inline struct thread_info *current_thread_info(void) __attribute_const__;
+
+static inline struct thread_info *current_thread_info(void)
+{
+ register unsigned long sp asm ("sp");
+ return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
+}
+
+#define thread_saved_pc(tsk) \
+ ((unsigned long)(task_thread_info(tsk)->cpu_context.pc))
+#define thread_saved_sp(tsk) \
+ ((unsigned long)(task_thread_info(tsk)->cpu_context.sp))
+#define thread_saved_fp(tsk) \
+ ((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
+
+#endif
+
+/*
+ * We use bit 30 of the preempt_count to indicate that kernel
+ * preemption is occurring. See <asm/hardirq.h>.
+ */
+#define PREEMPT_ACTIVE 0x40000000
+
+/*
+ * thread information flags:
+ * TIF_SYSCALL_TRACE - syscall trace active
+ * TIF_SIGPENDING - signal pending
+ * TIF_NEED_RESCHED - rescheduling necessary
+ * TIF_NOTIFY_RESUME - callback before returning to user
+ */
+#define TIF_SIGPENDING 0
+#define TIF_NEED_RESCHED 1
+#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
+#define TIF_SYSCALL_TRACE 8
+#define TIF_MEMDIE 18
+#define TIF_FREEZE 19
+#define TIF_RESTORE_SIGMASK 20
+
+#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+#define _TIF_FREEZE (1 << TIF_FREEZE)
+#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
+
+/*
+ * Change these and you break ASM code in entry-common.S
+ */
+#define _TIF_WORK_MASK 0x000000ff
+
+#endif /* __KERNEL__ */
+#endif /* __UNICORE_THREAD_INFO_H__ */
diff --git a/arch/unicore32/include/asm/timex.h b/arch/unicore32/include/asm/timex.h
new file mode 100644
index 000000000000..faf16ba46544
--- /dev/null
+++ b/arch/unicore32/include/asm/timex.h
@@ -0,0 +1,34 @@
+/*
+ * linux/arch/unicore32/include/asm/timex.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __UNICORE_TIMEX_H__
+#define __UNICORE_TIMEX_H__
+
+#ifdef CONFIG_ARCH_FPGA
+
+/* in FPGA, APB clock is 33M, and OST clock is 32K, */
+/* so, 1M is selected for timer interrupt correctly */
+#define CLOCK_TICK_RATE (32*1024)
+
+#endif
+
+#if defined(CONFIG_PUV3_DB0913) \
+ || defined(CONFIG_PUV3_NB0916) \
+ || defined(CONFIG_PUV3_SMW0919)
+
+#define CLOCK_TICK_RATE (14318000)
+
+#endif
+
+#include <asm-generic/timex.h>
+
+#endif
diff --git a/arch/unicore32/include/asm/tlb.h b/arch/unicore32/include/asm/tlb.h
new file mode 100644
index 000000000000..02ee40e47a0d
--- /dev/null
+++ b/arch/unicore32/include/asm/tlb.h
@@ -0,0 +1,98 @@
+/*
+ * linux/arch/unicore32/include/asm/tlb.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_TLB_H__
+#define __UNICORE_TLB_H__
+
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/pgalloc.h>
+
+/*
+ * TLB handling. This allows us to remove pages from the page
+ * tables, and efficiently handle the TLB issues.
+ */
+struct mmu_gather {
+ struct mm_struct *mm;
+ unsigned int fullmm;
+ unsigned long range_start;
+ unsigned long range_end;
+};
+
+DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
+
+static inline struct mmu_gather *
+tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
+{
+ struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
+
+ tlb->mm = mm;
+ tlb->fullmm = full_mm_flush;
+
+ return tlb;
+}
+
+static inline void
+tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+{
+ if (tlb->fullmm)
+ flush_tlb_mm(tlb->mm);
+
+ /* keep the page table cache within bounds */
+ check_pgt_cache();
+
+ put_cpu_var(mmu_gathers);
+}
+
+/*
+ * Memorize the range for the TLB flush.
+ */
+static inline void
+tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
+{
+ if (!tlb->fullmm) {
+ if (addr < tlb->range_start)
+ tlb->range_start = addr;
+ if (addr + PAGE_SIZE > tlb->range_end)
+ tlb->range_end = addr + PAGE_SIZE;
+ }
+}
+
+/*
+ * In the case of tlb vma handling, we can optimise these away in the
+ * case where we're doing a full MM flush. When we're doing a munmap,
+ * the vmas are adjusted to only cover the region to be torn down.
+ */
+static inline void
+tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+ if (!tlb->fullmm) {
+ flush_cache_range(vma, vma->vm_start, vma->vm_end);
+ tlb->range_start = TASK_SIZE;
+ tlb->range_end = 0;
+ }
+}
+
+static inline void
+tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+ if (!tlb->fullmm && tlb->range_end > 0)
+ flush_tlb_range(vma, tlb->range_start, tlb->range_end);
+}
+
+#define tlb_remove_page(tlb, page) free_page_and_swap_cache(page)
+#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
+#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
+#define pud_free_tlb(tlb, x, addr) do { } while (0)
+
+#define tlb_migrate_finish(mm) do { } while (0)
+
+#endif
diff --git a/arch/unicore32/include/asm/tlbflush.h b/arch/unicore32/include/asm/tlbflush.h
new file mode 100644
index 000000000000..e446ac8bb9e5
--- /dev/null
+++ b/arch/unicore32/include/asm/tlbflush.h
@@ -0,0 +1,195 @@
+/*
+ * linux/arch/unicore32/include/asm/tlbflush.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_TLBFLUSH_H__
+#define __UNICORE_TLBFLUSH_H__
+
+#ifndef __ASSEMBLY__
+
+#include <linux/sched.h>
+
+extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long,
+ struct vm_area_struct *);
+extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
+
+/*
+ * TLB Management
+ * ==============
+ *
+ * The arch/unicore/mm/tlb-*.S files implement these methods.
+ *
+ * The TLB specific code is expected to perform whatever tests it
+ * needs to determine if it should invalidate the TLB for each
+ * call. Start addresses are inclusive and end addresses are
+ * exclusive; it is safe to round these addresses down.
+ *
+ * flush_tlb_all()
+ *
+ * Invalidate the entire TLB.
+ *
+ * flush_tlb_mm(mm)
+ *
+ * Invalidate all TLB entries in a particular address
+ * space.
+ * - mm - mm_struct describing address space
+ *
+ * flush_tlb_range(mm,start,end)
+ *
+ * Invalidate a range of TLB entries in the specified
+ * address space.
+ * - mm - mm_struct describing address space
+ * - start - start address (may not be aligned)
+ * - end - end address (exclusive, may not be aligned)
+ *
+ * flush_tlb_page(vaddr,vma)
+ *
+ * Invalidate the specified page in the specified address range.
+ * - vaddr - virtual address (may not be aligned)
+ * - vma - vma_struct describing address range
+ *
+ * flush_kern_tlb_page(kaddr)
+ *
+ * Invalidate the TLB entry for the specified page. The address
+ * will be in the kernels virtual memory space. Current uses
+ * only require the D-TLB to be invalidated.
+ * - kaddr - Kernel virtual memory address
+ */
+
+static inline void local_flush_tlb_all(void)
+{
+ const int zero = 0;
+
+ /* TLB invalidate all */
+ asm("movc p0.c6, %0, #6; nop; nop; nop; nop; nop; nop; nop; nop"
+ : : "r" (zero) : "cc");
+}
+
+static inline void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ const int zero = 0;
+
+ if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) {
+ /* TLB invalidate all */
+ asm("movc p0.c6, %0, #6; nop; nop; nop; nop; nop; nop; nop; nop"
+ : : "r" (zero) : "cc");
+ }
+ put_cpu();
+}
+
+static inline void
+local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
+{
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
+#ifndef CONFIG_CPU_TLB_SINGLE_ENTRY_DISABLE
+ /* iTLB invalidate page */
+ asm("movc p0.c6, %0, #5; nop; nop; nop; nop; nop; nop; nop; nop"
+ : : "r" (uaddr & PAGE_MASK) : "cc");
+ /* dTLB invalidate page */
+ asm("movc p0.c6, %0, #3; nop; nop; nop; nop; nop; nop; nop; nop"
+ : : "r" (uaddr & PAGE_MASK) : "cc");
+#else
+ /* TLB invalidate all */
+ asm("movc p0.c6, %0, #6; nop; nop; nop; nop; nop; nop; nop; nop"
+ : : "r" (uaddr & PAGE_MASK) : "cc");
+#endif
+ }
+}
+
+static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
+{
+#ifndef CONFIG_CPU_TLB_SINGLE_ENTRY_DISABLE
+ /* iTLB invalidate page */
+ asm("movc p0.c6, %0, #5; nop; nop; nop; nop; nop; nop; nop; nop"
+ : : "r" (kaddr & PAGE_MASK) : "cc");
+ /* dTLB invalidate page */
+ asm("movc p0.c6, %0, #3; nop; nop; nop; nop; nop; nop; nop; nop"
+ : : "r" (kaddr & PAGE_MASK) : "cc");
+#else
+ /* TLB invalidate all */
+ asm("movc p0.c6, %0, #6; nop; nop; nop; nop; nop; nop; nop; nop"
+ : : "r" (kaddr & PAGE_MASK) : "cc");
+#endif
+}
+
+/*
+ * flush_pmd_entry
+ *
+ * Flush a PMD entry (word aligned, or double-word aligned) to
+ * RAM if the TLB for the CPU we are running on requires this.
+ * This is typically used when we are creating PMD entries.
+ *
+ * clean_pmd_entry
+ *
+ * Clean (but don't drain the write buffer) if the CPU requires
+ * these operations. This is typically used when we are removing
+ * PMD entries.
+ */
+static inline void flush_pmd_entry(pmd_t *pmd)
+{
+#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
+ /* flush dcache line, see dcacheline_flush in proc-macros.S */
+ asm("mov r1, %0 << #20\n"
+ "ldw r2, =_stext\n"
+ "add r2, r2, r1 >> #20\n"
+ "ldw r1, [r2+], #0x0000\n"
+ "ldw r1, [r2+], #0x1000\n"
+ "ldw r1, [r2+], #0x2000\n"
+ "ldw r1, [r2+], #0x3000\n"
+ : : "r" (pmd) : "r1", "r2");
+#else
+ /* flush dcache all */
+ asm("movc p0.c5, %0, #14; nop; nop; nop; nop; nop; nop; nop; nop"
+ : : "r" (pmd) : "cc");
+#endif
+}
+
+static inline void clean_pmd_entry(pmd_t *pmd)
+{
+#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
+ /* clean dcache line */
+ asm("movc p0.c5, %0, #11; nop; nop; nop; nop; nop; nop; nop; nop"
+ : : "r" (__pa(pmd) & ~(L1_CACHE_BYTES - 1)) : "cc");
+#else
+ /* clean dcache all */
+ asm("movc p0.c5, %0, #10; nop; nop; nop; nop; nop; nop; nop; nop"
+ : : "r" (pmd) : "cc");
+#endif
+}
+
+/*
+ * Convert calls to our calling convention.
+ */
+#define local_flush_tlb_range(vma, start, end) \
+ __cpu_flush_user_tlb_range(start, end, vma)
+#define local_flush_tlb_kernel_range(s, e) \
+ __cpu_flush_kern_tlb_range(s, e)
+
+#define flush_tlb_all local_flush_tlb_all
+#define flush_tlb_mm local_flush_tlb_mm
+#define flush_tlb_page local_flush_tlb_page
+#define flush_tlb_kernel_page local_flush_tlb_kernel_page
+#define flush_tlb_range local_flush_tlb_range
+#define flush_tlb_kernel_range local_flush_tlb_kernel_range
+
+/*
+ * if PG_dcache_clean is not set for the page, we need to ensure that any
+ * cache entries for the kernels virtual memory range are written
+ * back to the page.
+ */
+extern void update_mmu_cache(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep);
+
+extern void do_bad_area(unsigned long addr, unsigned int fsr,
+ struct pt_regs *regs);
+
+#endif
+
+#endif
diff --git a/arch/unicore32/include/asm/traps.h b/arch/unicore32/include/asm/traps.h
new file mode 100644
index 000000000000..66e17a724bfe
--- /dev/null
+++ b/arch/unicore32/include/asm/traps.h
@@ -0,0 +1,21 @@
+/*
+ * linux/arch/unicore32/include/asm/traps.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_TRAP_H__
+#define __UNICORE_TRAP_H__
+
+extern void __init early_trap_init(void);
+extern void dump_backtrace_entry(unsigned long where,
+ unsigned long from, unsigned long frame);
+
+extern void do_DataAbort(unsigned long addr, unsigned int fsr,
+ struct pt_regs *regs);
+#endif
diff --git a/arch/unicore32/include/asm/uaccess.h b/arch/unicore32/include/asm/uaccess.h
new file mode 100644
index 000000000000..2acda503a6d9
--- /dev/null
+++ b/arch/unicore32/include/asm/uaccess.h
@@ -0,0 +1,47 @@
+/*
+ * linux/arch/unicore32/include/asm/uaccess.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_UACCESS_H__
+#define __UNICORE_UACCESS_H__
+
+#include <linux/thread_info.h>
+#include <linux/errno.h>
+
+#include <asm/memory.h>
+#include <asm/system.h>
+
+#define __copy_from_user __copy_from_user
+#define __copy_to_user __copy_to_user
+#define __strncpy_from_user __strncpy_from_user
+#define __strnlen_user __strnlen_user
+#define __clear_user __clear_user
+
+#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
+#define __user_ok(addr, size) (((size) <= TASK_SIZE) \
+ && ((addr) <= TASK_SIZE - (size)))
+#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
+
+extern unsigned long __must_check
+__copy_from_user(void *to, const void __user *from, unsigned long n);
+extern unsigned long __must_check
+__copy_to_user(void __user *to, const void *from, unsigned long n);
+extern unsigned long __must_check
+__clear_user(void __user *addr, unsigned long n);
+extern unsigned long __must_check
+__strncpy_from_user(char *to, const char __user *from, unsigned long count);
+extern unsigned long
+__strnlen_user(const char __user *s, long n);
+
+#include <asm-generic/uaccess.h>
+
+extern int fixup_exception(struct pt_regs *regs);
+
+#endif /* __UNICORE_UACCESS_H__ */
diff --git a/arch/unicore32/include/asm/unistd.h b/arch/unicore32/include/asm/unistd.h
new file mode 100644
index 000000000000..9b2428019961
--- /dev/null
+++ b/arch/unicore32/include/asm/unistd.h
@@ -0,0 +1,18 @@
+/*
+ * linux/arch/unicore32/include/asm/unistd.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#if !defined(__UNICORE_UNISTD_H__) || defined(__SYSCALL)
+#define __UNICORE_UNISTD_H__
+
+/* Use the standard ABI for syscalls. */
+#include <asm-generic/unistd.h>
+
+#endif /* __UNICORE_UNISTD_H__ */
diff --git a/arch/unicore32/include/mach/PKUnity.h b/arch/unicore32/include/mach/PKUnity.h
new file mode 100644
index 000000000000..940e9ed0941c
--- /dev/null
+++ b/arch/unicore32/include/mach/PKUnity.h
@@ -0,0 +1,104 @@
+/*
+ * linux/arch/unicore32/include/mach/PKUnity.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Be sure that virtual mapping is defined right */
+#ifndef __MACH_PUV3_HARDWARE_H__
+#error You must include hardware.h not PKUnity.h
+#endif
+
+#include "bitfield.h"
+
+/*
+ * Memory Definitions
+ */
+#define PKUNITY_SDRAM_BASE 0x00000000 /* 0x00000000 - 0x7FFFFFFF 2GB */
+#define PKUNITY_MMIO_BASE 0x80000000 /* 0x80000000 - 0xFFFFFFFF 2GB */
+#define PKUNITY_PCI_BASE 0x80000000 /* 0x80000000 - 0xBFFFFFFF 1GB */
+#include "regs-pci.h"
+#define PKUNITY_BOOT_ROM2_BASE 0xF4000000 /* 0xF4000000 - 0xF7FFFFFF 64MB */
+#define PKUNITY_BOOT_SRAM2_BASE 0xF8000000 /* 0xF8000000 - 0xFBFFFFFF 64MB */
+#define PKUNITY_BOOT_FLASH_BASE 0xFC000000 /* 0xFC000000 - 0xFFFFFFFF 64MB */
+
+/*
+ * PKUNITY Memory Map Addresses: 0x0D000000 - 0x0EFFFFFF (32MB)
+ */
+#define PKUNITY_UVC_MMAP_BASE 0x0D000000 /* 0x0D000000 - 0x0DFFFFFF 16MB */
+#define PKUNITY_UVC_MMAP_SIZE 0x01000000 /* 16MB */
+#define PKUNITY_UNIGFX_MMAP_BASE 0x0E000000 /* 0x0E000000 - 0x0EFFFFFF 16MB */
+#define PKUNITY_UNIGFX_MMAP_SIZE 0x01000000 /* 16MB */
+
+/*
+ * PKUNITY System Bus Addresses (PCI): 0x80000000 - 0xBFFFFFFF (1GB)
+ */
+/* PCI Configuration regs */
+#define PKUNITY_PCICFG_BASE 0x80000000 /* 0x80000000 - 0x8000000B 12B */
+/* PCI Bridge Base */
+#define PKUNITY_PCIBRI_BASE 0x80010000 /* 0x80010000 - 0x80010250 592B */
+/* PCI Legacy IO */
+#define PKUNITY_PCILIO_BASE 0x80030000 /* 0x80030000 - 0x8003FFFF 64KB */
+/* PCI AHB-PCI MEM-mapping */
+#define PKUNITY_PCIMEM_BASE 0x90000000 /* 0x90000000 - 0x97FFFFFF 128MB */
+/* PCI PCI-AHB MEM-mapping */
+#define PKUNITY_PCIAHB_BASE 0x98000000 /* 0x98000000 - 0x9FFFFFFF 128MB */
+
+/*
+ * PKUNITY System Bus Addresses (AHB): 0xC0000000 - 0xEDFFFFFF (640MB)
+ */
+/* AHB-0 is DDR2 SDRAM */
+/* AHB-1 is PCI Space */
+#define PKUNITY_ARBITER_BASE 0xC0000000 /* AHB-2 */
+#define PKUNITY_DDR2CTRL_BASE 0xC0100000 /* AHB-3 */
+#define PKUNITY_DMAC_BASE 0xC0200000 /* AHB-4 */
+#include "regs-dmac.h"
+#define PKUNITY_UMAL_BASE 0xC0300000 /* AHB-5 */
+#include "regs-umal.h"
+#define PKUNITY_USB_BASE 0xC0400000 /* AHB-6 */
+#define PKUNITY_SATA_BASE 0xC0500000 /* AHB-7 */
+#define PKUNITY_SMC_BASE 0xC0600000 /* AHB-8 */
+/* AHB-9 is for APB bridge */
+#define PKUNITY_MME_BASE 0xC0700000 /* AHB-10 */
+#define PKUNITY_UNIGFX_BASE 0xC0800000 /* AHB-11 */
+#include "regs-unigfx.h"
+#define PKUNITY_NAND_BASE 0xC0900000 /* AHB-12 */
+#include "regs-nand.h"
+#define PKUNITY_H264D_BASE 0xC0A00000 /* AHB-13 */
+#define PKUNITY_H264E_BASE 0xC0B00000 /* AHB-14 */
+
+/*
+ * PKUNITY Peripheral Bus Addresses (APB): 0xEE000000 - 0xEFFFFFFF (128MB)
+ */
+#define PKUNITY_UART0_BASE 0xEE000000 /* APB-0 */
+#define PKUNITY_UART1_BASE 0xEE100000 /* APB-1 */
+#include "regs-uart.h"
+#define PKUNITY_I2C_BASE 0xEE200000 /* APB-2 */
+#include "regs-i2c.h"
+#define PKUNITY_SPI_BASE 0xEE300000 /* APB-3 */
+#include "regs-spi.h"
+#define PKUNITY_AC97_BASE 0xEE400000 /* APB-4 */
+#include "regs-ac97.h"
+#define PKUNITY_GPIO_BASE 0xEE500000 /* APB-5 */
+#include "regs-gpio.h"
+#define PKUNITY_INTC_BASE 0xEE600000 /* APB-6 */
+#include "regs-intc.h"
+#define PKUNITY_RTC_BASE 0xEE700000 /* APB-7 */
+#include "regs-rtc.h"
+#define PKUNITY_OST_BASE 0xEE800000 /* APB-8 */
+#include "regs-ost.h"
+#define PKUNITY_RESETC_BASE 0xEE900000 /* APB-9 */
+#include "regs-resetc.h"
+#define PKUNITY_PM_BASE 0xEEA00000 /* APB-10 */
+#include "regs-pm.h"
+#define PKUNITY_PS2_BASE 0xEEB00000 /* APB-11 */
+#include "regs-ps2.h"
+#define PKUNITY_SDC_BASE 0xEEC00000 /* APB-12 */
+#include "regs-sdc.h"
+
diff --git a/arch/unicore32/include/mach/bitfield.h b/arch/unicore32/include/mach/bitfield.h
new file mode 100644
index 000000000000..128a70281efc
--- /dev/null
+++ b/arch/unicore32/include/mach/bitfield.h
@@ -0,0 +1,24 @@
+/*
+ * linux/arch/unicore32/include/mach/bitfield.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __MACH_PUV3_BITFIELD_H__
+#define __MACH_PUV3_BITFIELD_H__
+
+#ifndef __ASSEMBLY__
+#define UData(Data) ((unsigned long) (Data))
+#else
+#define UData(Data) (Data)
+#endif
+
+#define FIELD(val, vmask, vshift) (((val) & ((UData(1) << (vmask)) - 1)) << (vshift))
+#define FMASK(vmask, vshift) (((UData(1) << (vmask)) - 1) << (vshift))
+
+#endif /* __MACH_PUV3_BITFIELD_H__ */
diff --git a/arch/unicore32/include/mach/dma.h b/arch/unicore32/include/mach/dma.h
new file mode 100644
index 000000000000..d655c1b6e083
--- /dev/null
+++ b/arch/unicore32/include/mach/dma.h
@@ -0,0 +1,48 @@
+/*
+ * linux/arch/unicore32/include/mach/dma.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __MACH_PUV3_DMA_H__
+#define __MACH_PUV3_DMA_H__
+
+/*
+ * The PKUnity has six internal DMA channels.
+ */
+#define MAX_DMA_CHANNELS 6
+
+typedef enum {
+ DMA_PRIO_HIGH = 0,
+ DMA_PRIO_MEDIUM = 1,
+ DMA_PRIO_LOW = 2
+} puv3_dma_prio;
+
+/*
+ * DMA registration
+ */
+
+extern int puv3_request_dma(char *name,
+ puv3_dma_prio prio,
+ void (*irq_handler)(int, void *),
+ void (*err_handler)(int, void *),
+ void *data);
+
+extern void puv3_free_dma(int dma_ch);
+
+static inline void puv3_stop_dma(int ch)
+{
+ writel(readl(DMAC_CONFIG(ch)) & ~DMAC_CONFIG_EN, DMAC_CONFIG(ch));
+}
+
+static inline void puv3_resume_dma(int ch)
+{
+ writel(readl(DMAC_CONFIG(ch)) | DMAC_CONFIG_EN, DMAC_CONFIG(ch));
+}
+
+#endif /* __MACH_PUV3_DMA_H__ */
diff --git a/arch/unicore32/include/mach/hardware.h b/arch/unicore32/include/mach/hardware.h
new file mode 100644
index 000000000000..b197b0bfc53b
--- /dev/null
+++ b/arch/unicore32/include/mach/hardware.h
@@ -0,0 +1,36 @@
+/*
+ * linux/arch/unicore32/include/mach/hardware.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file contains the hardware definitions for PKUnity architecture
+ */
+
+#ifndef __MACH_PUV3_HARDWARE_H__
+#define __MACH_PUV3_HARDWARE_H__
+
+#include "PKUnity.h"
+
+#ifndef __ASSEMBLY__
+#define io_p2v(x) (void __iomem *)((x) - PKUNITY_MMIO_BASE)
+#else
+#define io_p2v(x) ((x) - PKUNITY_MMIO_BASE)
+#endif
+
+#define PCIBIOS_MIN_IO 0x4000 /* should lower than 64KB */
+#define PCIBIOS_MIN_MEM PKUNITY_PCIMEM_BASE
+
+/*
+ * We override the standard dma-mask routines for bouncing.
+ */
+#define HAVE_ARCH_PCI_SET_DMA_MASK
+
+#define pcibios_assign_all_busses() 1
+
+#endif /* __MACH_PUV3_HARDWARE_H__ */
diff --git a/arch/unicore32/include/mach/map.h b/arch/unicore32/include/mach/map.h
new file mode 100644
index 000000000000..55c936573741
--- /dev/null
+++ b/arch/unicore32/include/mach/map.h
@@ -0,0 +1,20 @@
+/*
+ * linux/arch/unicore32/include/mach/map.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Page table mapping constructs and function prototypes
+ */
+#define MT_DEVICE 0
+#define MT_DEVICE_CACHED 2
+#define MT_KUSER 7
+#define MT_HIGH_VECTORS 8
+#define MT_MEMORY 9
+#define MT_ROM 10
+
diff --git a/arch/unicore32/include/mach/memory.h b/arch/unicore32/include/mach/memory.h
new file mode 100644
index 000000000000..b774eff3ea65
--- /dev/null
+++ b/arch/unicore32/include/mach/memory.h
@@ -0,0 +1,58 @@
+/*
+ * linux/arch/unicore32/include/mach/memory.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __MACH_PUV3_MEMORY_H__
+#define __MACH_PUV3_MEMORY_H__
+
+#include <mach/hardware.h>
+
+/* Physical DRAM offset. */
+#define PHYS_OFFSET UL(0x00000000)
+/* The base address of exception vectors. */
+#define VECTORS_BASE UL(0xffff0000)
+/* The base address of kuser area. */
+#define KUSER_BASE UL(0x80000000)
+
+#ifdef __ASSEMBLY__
+/* The byte offset of the kernel image in RAM from the start of RAM. */
+#define KERNEL_IMAGE_START 0x00408000
+#endif
+
+#if !defined(__ASSEMBLY__) && defined(CONFIG_PCI)
+
+void puv3_pci_adjust_zones(unsigned long *size, unsigned long *holes);
+
+#define arch_adjust_zones(size, holes) \
+ puv3_pci_adjust_zones(size, holes)
+
+#endif
+
+/*
+ * PCI controller in PKUnity-3 masks highest 5-bit for upstream channel,
+ * so we must limit the DMA allocation within 128M physical memory for
+ * supporting PCI devices.
+ */
+#define PCI_DMA_THRESHOLD (PHYS_OFFSET + SZ_128M - 1)
+
+#define is_pcibus_device(dev) (dev && \
+ (strncmp(dev->bus->name, "pci", 3) == 0))
+
+#define __virt_to_pcibus(x) (__virt_to_phys(x) + PKUNITY_PCIAHB_BASE)
+#define __pcibus_to_virt(x) __phys_to_virt((x) - PKUNITY_PCIAHB_BASE)
+
+/* kuser area */
+#define KUSER_VECPAGE_BASE (KUSER_BASE + UL(0x3fff0000))
+#define KUSER_UNIGFX_BASE (PAGE_OFFSET + PKUNITY_UNIGFX_MMAP_BASE)
+/* kuser_vecpage (0xbfff0000) is ro, and vectors page (0xffff0000) is rw */
+#define kuser_vecpage_to_vectors(x) ((x) - (KUSER_VECPAGE_BASE) \
+ + (VECTORS_BASE))
+
+#endif
diff --git a/arch/unicore32/include/mach/ocd.h b/arch/unicore32/include/mach/ocd.h
new file mode 100644
index 000000000000..189fd71bfa34
--- /dev/null
+++ b/arch/unicore32/include/mach/ocd.h
@@ -0,0 +1,36 @@
+/*
+ * linux/arch/unicore32/include/mach/ocd.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MACH_PUV3_OCD_H__
+#define __MACH_PUV3_OCD_H__
+
+#if defined(CONFIG_DEBUG_OCD)
+static inline void ocd_putc(unsigned int c)
+{
+ int status, i = 0x2000000;
+
+ do {
+ if (--i < 0)
+ return;
+
+ asm volatile ("movc %0, p1.c0, #0" : "=r" (status));
+ } while (status & 2);
+
+ asm("movc p1.c1, %0, #1" : : "r" (c));
+}
+
+#define putc(ch) ocd_putc(ch)
+#else
+#define putc(ch)
+#endif
+
+#endif
diff --git a/arch/unicore32/include/mach/pm.h b/arch/unicore32/include/mach/pm.h
new file mode 100644
index 000000000000..4dcd34ae194c
--- /dev/null
+++ b/arch/unicore32/include/mach/pm.h
@@ -0,0 +1,43 @@
+/*
+ * linux/arch/unicore/include/mach/pm.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __PUV3_PM_H__
+#define __PUV3_PM_H__
+
+#include <linux/suspend.h>
+
+struct puv3_cpu_pm_fns {
+ int save_count;
+ void (*save)(unsigned long *);
+ void (*restore)(unsigned long *);
+ int (*valid)(suspend_state_t state);
+ void (*enter)(suspend_state_t state);
+ int (*prepare)(void);
+ void (*finish)(void);
+};
+
+extern struct puv3_cpu_pm_fns *puv3_cpu_pm_fns;
+
+/* sleep.S */
+extern void puv3_cpu_suspend(unsigned int);
+
+extern void puv3_cpu_resume(void);
+
+extern int puv3_pm_enter(suspend_state_t state);
+
+/* Defined in hibernate_asm.S */
+extern int restore_image(pgd_t *resume_pg_dir, struct pbe *restore_pblist);
+
+/* References to section boundaries */
+extern const void __nosave_begin, __nosave_end;
+
+extern struct pbe *restore_pblist;
+#endif
diff --git a/arch/unicore32/include/mach/regs-ac97.h b/arch/unicore32/include/mach/regs-ac97.h
new file mode 100644
index 000000000000..2d76cc38f413
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-ac97.h
@@ -0,0 +1,32 @@
+/*
+ * PKUnity AC97 Registers
+ */
+
+#define PKUNITY_AC97_CONR io_p2v(PKUNITY_AC97_BASE + 0x0000)
+#define PKUNITY_AC97_OCR io_p2v(PKUNITY_AC97_BASE + 0x0004)
+#define PKUNITY_AC97_ICR io_p2v(PKUNITY_AC97_BASE + 0x0008)
+#define PKUNITY_AC97_CRAC io_p2v(PKUNITY_AC97_BASE + 0x000C)
+#define PKUNITY_AC97_INTR io_p2v(PKUNITY_AC97_BASE + 0x0010)
+#define PKUNITY_AC97_INTRSTAT io_p2v(PKUNITY_AC97_BASE + 0x0014)
+#define PKUNITY_AC97_INTRCLEAR io_p2v(PKUNITY_AC97_BASE + 0x0018)
+#define PKUNITY_AC97_ENABLE io_p2v(PKUNITY_AC97_BASE + 0x001C)
+#define PKUNITY_AC97_OUT_FIFO io_p2v(PKUNITY_AC97_BASE + 0x0020)
+#define PKUNITY_AC97_IN_FIFO io_p2v(PKUNITY_AC97_BASE + 0x0030)
+
+#define AC97_CODEC_REG(v) FIELD((v), 7, 16)
+#define AC97_CODEC_VAL(v) FIELD((v), 16, 0)
+#define AC97_CODEC_WRITECOMPLETE FIELD(1, 1, 2)
+
+/*
+ * VAR PLAY SAMPLE RATE
+ */
+#define AC97_CMD_VPSAMPLE (FIELD(3, 2, 16) | FIELD(3, 2, 0))
+
+/*
+ * FIX CAPTURE SAMPLE RATE
+ */
+#define AC97_CMD_FCSAMPLE FIELD(7, 3, 0)
+
+#define AC97_CMD_RESET FIELD(1, 1, 0)
+#define AC97_CMD_ENABLE FIELD(1, 1, 0)
+#define AC97_CMD_DISABLE FIELD(0, 1, 0)
diff --git a/arch/unicore32/include/mach/regs-dmac.h b/arch/unicore32/include/mach/regs-dmac.h
new file mode 100644
index 000000000000..3553243e8738
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-dmac.h
@@ -0,0 +1,81 @@
+/*
+ * PKUnity Direct Memory Access Controller (DMAC)
+ */
+
+/*
+ * Interrupt Status Reg DMAC_ISR.
+ */
+#define DMAC_ISR io_p2v(PKUNITY_DMAC_BASE + 0x0020)
+/*
+ * Interrupt Transfer Complete Status Reg DMAC_ITCSR.
+ */
+#define DMAC_ITCSR io_p2v(PKUNITY_DMAC_BASE + 0x0050)
+/*
+ * Interrupt Transfer Complete Clear Reg DMAC_ITCCR.
+ */
+#define DMAC_ITCCR io_p2v(PKUNITY_DMAC_BASE + 0x0060)
+/*
+ * Interrupt Error Status Reg DMAC_IESR.
+ */
+#define DMAC_IESR io_p2v(PKUNITY_DMAC_BASE + 0x0080)
+/*
+ * Interrupt Error Clear Reg DMAC_IECR.
+ */
+#define DMAC_IECR io_p2v(PKUNITY_DMAC_BASE + 0x0090)
+/*
+ * Enable Channels Reg DMAC_ENCH.
+ */
+#define DMAC_ENCH io_p2v(PKUNITY_DMAC_BASE + 0x00B0)
+
+/*
+ * DMA control reg. Space [byte]
+ */
+#define DMASp 0x00000100
+
+/*
+ * Source Addr DMAC_SRCADDR(ch).
+ */
+#define DMAC_SRCADDR(ch) io_p2v(PKUNITY_DMAC_BASE + (ch)*DMASp + 0x00)
+/*
+ * Destination Addr DMAC_DESTADDR(ch).
+ */
+#define DMAC_DESTADDR(ch) io_p2v(PKUNITY_DMAC_BASE + (ch)*DMASp + 0x04)
+/*
+ * Control Reg DMAC_CONTROL(ch).
+ */
+#define DMAC_CONTROL(ch) io_p2v(PKUNITY_DMAC_BASE + (ch)*DMASp + 0x0C)
+/*
+ * Configuration Reg DMAC_CONFIG(ch).
+ */
+#define DMAC_CONFIG(ch) io_p2v(PKUNITY_DMAC_BASE + (ch)*DMASp + 0x10)
+
+#define DMAC_IR_MASK FMASK(6, 0)
+/*
+ * select channel (ch)
+ */
+#define DMAC_CHANNEL(ch) FIELD(1, 1, (ch))
+
+#define DMAC_CONTROL_SIZE_BYTE(v) (FIELD((v), 12, 14) | \
+ FIELD(0, 3, 9) | FIELD(0, 3, 6))
+#define DMAC_CONTROL_SIZE_HWORD(v) (FIELD((v) >> 1, 12, 14) | \
+ FIELD(1, 3, 9) | FIELD(1, 3, 6))
+#define DMAC_CONTROL_SIZE_WORD(v) (FIELD((v) >> 2, 12, 14) | \
+ FIELD(2, 3, 9) | FIELD(2, 3, 6))
+#define DMAC_CONTROL_DI FIELD(1, 1, 13)
+#define DMAC_CONTROL_SI FIELD(1, 1, 12)
+#define DMAC_CONTROL_BURST_1BYTE (FIELD(0, 3, 3) | FIELD(0, 3, 0))
+#define DMAC_CONTROL_BURST_4BYTE (FIELD(3, 3, 3) | FIELD(3, 3, 0))
+#define DMAC_CONTROL_BURST_8BYTE (FIELD(5, 3, 3) | FIELD(5, 3, 0))
+#define DMAC_CONTROL_BURST_16BYTE (FIELD(7, 3, 3) | FIELD(7, 3, 0))
+
+#define DMAC_CONFIG_UART0_WR (FIELD(2, 4, 11) | FIELD(1, 2, 1))
+#define DMAC_CONFIG_UART0_RD (FIELD(2, 4, 7) | FIELD(2, 2, 1))
+#define DMAC_CONFIG_UART1_WR (FIELD(3, 4, 11) | FIELD(1, 2, 1))
+#define DMAC_CONFIG_UART1RD (FIELD(3, 4, 7) | FIELD(2, 2, 1))
+#define DMAC_CONFIG_AC97WR (FIELD(4, 4, 11) | FIELD(1, 2, 1))
+#define DMAC_CONFIG_AC97RD (FIELD(4, 4, 7) | FIELD(2, 2, 1))
+#define DMAC_CONFIG_MMCWR (FIELD(7, 4, 11) | FIELD(1, 2, 1))
+#define DMAC_CONFIG_MMCRD (FIELD(7, 4, 7) | FIELD(2, 2, 1))
+#define DMAC_CONFIG_MASKITC FIELD(1, 1, 4)
+#define DMAC_CONFIG_MASKIE FIELD(1, 1, 3)
+#define DMAC_CONFIG_EN FIELD(1, 1, 0)
diff --git a/arch/unicore32/include/mach/regs-gpio.h b/arch/unicore32/include/mach/regs-gpio.h
new file mode 100644
index 000000000000..241a4731e67b
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-gpio.h
@@ -0,0 +1,70 @@
+/*
+ * PKUnity General-Purpose Input/Output (GPIO) Registers
+ */
+
+/*
+ * Voltage Status Reg GPIO_GPLR.
+ */
+#define GPIO_GPLR io_p2v(PKUNITY_GPIO_BASE + 0x0000)
+/*
+ * Pin Direction Reg GPIO_GPDR.
+ */
+#define GPIO_GPDR io_p2v(PKUNITY_GPIO_BASE + 0x0004)
+/*
+ * Output Pin Set Reg GPIO_GPSR.
+ */
+#define GPIO_GPSR io_p2v(PKUNITY_GPIO_BASE + 0x0008)
+/*
+ * Output Pin Clear Reg GPIO_GPCR.
+ */
+#define GPIO_GPCR io_p2v(PKUNITY_GPIO_BASE + 0x000C)
+/*
+ * Raise Edge Detect Reg GPIO_GRER.
+ */
+#define GPIO_GRER io_p2v(PKUNITY_GPIO_BASE + 0x0010)
+/*
+ * Fall Edge Detect Reg GPIO_GFER.
+ */
+#define GPIO_GFER io_p2v(PKUNITY_GPIO_BASE + 0x0014)
+/*
+ * Edge Status Reg GPIO_GEDR.
+ */
+#define GPIO_GEDR io_p2v(PKUNITY_GPIO_BASE + 0x0018)
+/*
+ * Sepcial Voltage Detect Reg GPIO_GPIR.
+ */
+#define GPIO_GPIR io_p2v(PKUNITY_GPIO_BASE + 0x0020)
+
+#define GPIO_MIN (0)
+#define GPIO_MAX (27)
+
+#define GPIO_GPIO(Nb) (0x00000001 << (Nb)) /* GPIO [0..27] */
+#define GPIO_GPIO0 GPIO_GPIO(0) /* GPIO [0] */
+#define GPIO_GPIO1 GPIO_GPIO(1) /* GPIO [1] */
+#define GPIO_GPIO2 GPIO_GPIO(2) /* GPIO [2] */
+#define GPIO_GPIO3 GPIO_GPIO(3) /* GPIO [3] */
+#define GPIO_GPIO4 GPIO_GPIO(4) /* GPIO [4] */
+#define GPIO_GPIO5 GPIO_GPIO(5) /* GPIO [5] */
+#define GPIO_GPIO6 GPIO_GPIO(6) /* GPIO [6] */
+#define GPIO_GPIO7 GPIO_GPIO(7) /* GPIO [7] */
+#define GPIO_GPIO8 GPIO_GPIO(8) /* GPIO [8] */
+#define GPIO_GPIO9 GPIO_GPIO(9) /* GPIO [9] */
+#define GPIO_GPIO10 GPIO_GPIO(10) /* GPIO [10] */
+#define GPIO_GPIO11 GPIO_GPIO(11) /* GPIO [11] */
+#define GPIO_GPIO12 GPIO_GPIO(12) /* GPIO [12] */
+#define GPIO_GPIO13 GPIO_GPIO(13) /* GPIO [13] */
+#define GPIO_GPIO14 GPIO_GPIO(14) /* GPIO [14] */
+#define GPIO_GPIO15 GPIO_GPIO(15) /* GPIO [15] */
+#define GPIO_GPIO16 GPIO_GPIO(16) /* GPIO [16] */
+#define GPIO_GPIO17 GPIO_GPIO(17) /* GPIO [17] */
+#define GPIO_GPIO18 GPIO_GPIO(18) /* GPIO [18] */
+#define GPIO_GPIO19 GPIO_GPIO(19) /* GPIO [19] */
+#define GPIO_GPIO20 GPIO_GPIO(20) /* GPIO [20] */
+#define GPIO_GPIO21 GPIO_GPIO(21) /* GPIO [21] */
+#define GPIO_GPIO22 GPIO_GPIO(22) /* GPIO [22] */
+#define GPIO_GPIO23 GPIO_GPIO(23) /* GPIO [23] */
+#define GPIO_GPIO24 GPIO_GPIO(24) /* GPIO [24] */
+#define GPIO_GPIO25 GPIO_GPIO(25) /* GPIO [25] */
+#define GPIO_GPIO26 GPIO_GPIO(26) /* GPIO [26] */
+#define GPIO_GPIO27 GPIO_GPIO(27) /* GPIO [27] */
+
diff --git a/arch/unicore32/include/mach/regs-i2c.h b/arch/unicore32/include/mach/regs-i2c.h
new file mode 100644
index 000000000000..a1fe882f7e0f
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-i2c.h
@@ -0,0 +1,63 @@
+/*
+ * PKUnity Inter-integrated Circuit (I2C) Registers
+ */
+
+/*
+ * Control Reg I2C_CON.
+ */
+#define I2C_CON io_p2v(PKUNITY_I2C_BASE + 0x0000)
+/*
+ * Target Address Reg I2C_TAR.
+ */
+#define I2C_TAR io_p2v(PKUNITY_I2C_BASE + 0x0004)
+/*
+ * Data buffer and command Reg I2C_DATACMD.
+ */
+#define I2C_DATACMD io_p2v(PKUNITY_I2C_BASE + 0x0010)
+/*
+ * Enable Reg I2C_ENABLE.
+ */
+#define I2C_ENABLE io_p2v(PKUNITY_I2C_BASE + 0x006C)
+/*
+ * Status Reg I2C_STATUS.
+ */
+#define I2C_STATUS io_p2v(PKUNITY_I2C_BASE + 0x0070)
+/*
+ * Tx FIFO Length Reg I2C_TXFLR.
+ */
+#define I2C_TXFLR io_p2v(PKUNITY_I2C_BASE + 0x0074)
+/*
+ * Rx FIFO Length Reg I2C_RXFLR.
+ */
+#define I2C_RXFLR io_p2v(PKUNITY_I2C_BASE + 0x0078)
+/*
+ * Enable Status Reg I2C_ENSTATUS.
+ */
+#define I2C_ENSTATUS io_p2v(PKUNITY_I2C_BASE + 0x009C)
+
+#define I2C_CON_MASTER FIELD(1, 1, 0)
+#define I2C_CON_SPEED_STD FIELD(1, 2, 1)
+#define I2C_CON_SPEED_FAST FIELD(2, 2, 1)
+#define I2C_CON_RESTART FIELD(1, 1, 5)
+#define I2C_CON_SLAVEDISABLE FIELD(1, 1, 6)
+
+#define I2C_DATACMD_READ FIELD(1, 1, 8)
+#define I2C_DATACMD_WRITE FIELD(0, 1, 8)
+#define I2C_DATACMD_DAT_MASK FMASK(8, 0)
+#define I2C_DATACMD_DAT(v) FIELD((v), 8, 0)
+
+#define I2C_ENABLE_ENABLE FIELD(1, 1, 0)
+#define I2C_ENABLE_DISABLE FIELD(0, 1, 0)
+
+#define I2C_STATUS_RFF FIELD(1, 1, 4)
+#define I2C_STATUS_RFNE FIELD(1, 1, 3)
+#define I2C_STATUS_TFE FIELD(1, 1, 2)
+#define I2C_STATUS_TFNF FIELD(1, 1, 1)
+#define I2C_STATUS_ACTIVITY FIELD(1, 1, 0)
+
+#define I2C_ENSTATUS_ENABLE FIELD(1, 1, 0)
+
+#define I2C_TAR_THERMAL 0x4f
+#define I2C_TAR_SPD 0x50
+#define I2C_TAR_PWIC 0x55
+#define I2C_TAR_EEPROM 0x57
diff --git a/arch/unicore32/include/mach/regs-intc.h b/arch/unicore32/include/mach/regs-intc.h
new file mode 100644
index 000000000000..de828d000cf9
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-intc.h
@@ -0,0 +1,28 @@
+/*
+ * PKUNITY Interrupt Controller (INTC) Registers
+ */
+/*
+ * INTC Level Reg INTC_ICLR.
+ */
+#define INTC_ICLR io_p2v(PKUNITY_INTC_BASE + 0x0000)
+/*
+ * INTC Mask Reg INTC_ICMR.
+ */
+#define INTC_ICMR io_p2v(PKUNITY_INTC_BASE + 0x0004)
+/*
+ * INTC Pending Reg INTC_ICPR.
+ */
+#define INTC_ICPR io_p2v(PKUNITY_INTC_BASE + 0x0008)
+/*
+ * INTC IRQ Pending Reg INTC_ICIP.
+ */
+#define INTC_ICIP io_p2v(PKUNITY_INTC_BASE + 0x000C)
+/*
+ * INTC REAL Pending Reg INTC_ICFP.
+ */
+#define INTC_ICFP io_p2v(PKUNITY_INTC_BASE + 0x0010)
+/*
+ * INTC Control Reg INTC_ICCR.
+ */
+#define INTC_ICCR io_p2v(PKUNITY_INTC_BASE + 0x0014)
+
diff --git a/arch/unicore32/include/mach/regs-nand.h b/arch/unicore32/include/mach/regs-nand.h
new file mode 100644
index 000000000000..92578df0fc7c
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-nand.h
@@ -0,0 +1,79 @@
+/*
+ * PKUnity NAND Controller Registers
+ */
+/*
+ * ID Reg. 0 NAND_IDR0
+ */
+#define NAND_IDR0 io_p2v(PKUNITY_NAND_BASE + 0x0000)
+/*
+ * ID Reg. 1 NAND_IDR1
+ */
+#define NAND_IDR1 io_p2v(PKUNITY_NAND_BASE + 0x0004)
+/*
+ * ID Reg. 2 NAND_IDR2
+ */
+#define NAND_IDR2 io_p2v(PKUNITY_NAND_BASE + 0x0008)
+/*
+ * ID Reg. 3 NAND_IDR3
+ */
+#define NAND_IDR3 io_p2v(PKUNITY_NAND_BASE + 0x000C)
+/*
+ * Page Address Reg 0 NAND_PAR0
+ */
+#define NAND_PAR0 io_p2v(PKUNITY_NAND_BASE + 0x0010)
+/*
+ * Page Address Reg 1 NAND_PAR1
+ */
+#define NAND_PAR1 io_p2v(PKUNITY_NAND_BASE + 0x0014)
+/*
+ * Page Address Reg 2 NAND_PAR2
+ */
+#define NAND_PAR2 io_p2v(PKUNITY_NAND_BASE + 0x0018)
+/*
+ * ECC Enable Reg NAND_ECCEN
+ */
+#define NAND_ECCEN io_p2v(PKUNITY_NAND_BASE + 0x001C)
+/*
+ * Buffer Reg NAND_BUF
+ */
+#define NAND_BUF io_p2v(PKUNITY_NAND_BASE + 0x0020)
+/*
+ * ECC Status Reg NAND_ECCSR
+ */
+#define NAND_ECCSR io_p2v(PKUNITY_NAND_BASE + 0x0024)
+/*
+ * Command Reg NAND_CMD
+ */
+#define NAND_CMD io_p2v(PKUNITY_NAND_BASE + 0x0028)
+/*
+ * DMA Configure Reg NAND_DMACR
+ */
+#define NAND_DMACR io_p2v(PKUNITY_NAND_BASE + 0x002C)
+/*
+ * Interrupt Reg NAND_IR
+ */
+#define NAND_IR io_p2v(PKUNITY_NAND_BASE + 0x0030)
+/*
+ * Interrupt Mask Reg NAND_IMR
+ */
+#define NAND_IMR io_p2v(PKUNITY_NAND_BASE + 0x0034)
+/*
+ * Chip Enable Reg NAND_CHIPEN
+ */
+#define NAND_CHIPEN io_p2v(PKUNITY_NAND_BASE + 0x0038)
+/*
+ * Address Reg NAND_ADDR
+ */
+#define NAND_ADDR io_p2v(PKUNITY_NAND_BASE + 0x003C)
+
+/*
+ * Command bits NAND_CMD_CMD_MASK
+ */
+#define NAND_CMD_CMD_MASK FMASK(4, 4)
+#define NAND_CMD_CMD_READPAGE FIELD(0x0, 4, 4)
+#define NAND_CMD_CMD_ERASEBLOCK FIELD(0x6, 4, 4)
+#define NAND_CMD_CMD_READSTATUS FIELD(0x7, 4, 4)
+#define NAND_CMD_CMD_WRITEPAGE FIELD(0x8, 4, 4)
+#define NAND_CMD_CMD_READID FIELD(0x9, 4, 4)
+#define NAND_CMD_CMD_RESET FIELD(0xf, 4, 4)
+
diff --git a/arch/unicore32/include/mach/regs-ost.h b/arch/unicore32/include/mach/regs-ost.h
new file mode 100644
index 000000000000..72b586ae86d8
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-ost.h
@@ -0,0 +1,92 @@
+/*
+ * PKUnity Operating System Timer (OST) Registers
+ */
+/*
+ * Match Reg 0 OST_OSMR0
+ */
+#define OST_OSMR0 io_p2v(PKUNITY_OST_BASE + 0x0000)
+/*
+ * Match Reg 1 OST_OSMR1
+ */
+#define OST_OSMR1 io_p2v(PKUNITY_OST_BASE + 0x0004)
+/*
+ * Match Reg 2 OST_OSMR2
+ */
+#define OST_OSMR2 io_p2v(PKUNITY_OST_BASE + 0x0008)
+/*
+ * Match Reg 3 OST_OSMR3
+ */
+#define OST_OSMR3 io_p2v(PKUNITY_OST_BASE + 0x000C)
+/*
+ * Counter Reg OST_OSCR
+ */
+#define OST_OSCR io_p2v(PKUNITY_OST_BASE + 0x0010)
+/*
+ * Status Reg OST_OSSR
+ */
+#define OST_OSSR io_p2v(PKUNITY_OST_BASE + 0x0014)
+/*
+ * Watchdog Enable Reg OST_OWER
+ */
+#define OST_OWER io_p2v(PKUNITY_OST_BASE + 0x0018)
+/*
+ * Interrupt Enable Reg OST_OIER
+ */
+#define OST_OIER io_p2v(PKUNITY_OST_BASE + 0x001C)
+/*
+ * PWM Pulse Width Control Reg OST_PWMPWCR
+ */
+#define OST_PWMPWCR io_p2v(PKUNITY_OST_BASE + 0x0080)
+/*
+ * PWM Duty Cycle Control Reg OST_PWMDCCR
+ */
+#define OST_PWMDCCR io_p2v(PKUNITY_OST_BASE + 0x0084)
+/*
+ * PWM Period Control Reg OST_PWMPCR
+ */
+#define OST_PWMPCR io_p2v(PKUNITY_OST_BASE + 0x0088)
+
+/*
+ * Match detected 0 OST_OSSR_M0
+ */
+#define OST_OSSR_M0 FIELD(1, 1, 0)
+/*
+ * Match detected 1 OST_OSSR_M1
+ */
+#define OST_OSSR_M1 FIELD(1, 1, 1)
+/*
+ * Match detected 2 OST_OSSR_M2
+ */
+#define OST_OSSR_M2 FIELD(1, 1, 2)
+/*
+ * Match detected 3 OST_OSSR_M3
+ */
+#define OST_OSSR_M3 FIELD(1, 1, 3)
+
+/*
+ * Interrupt enable 0 OST_OIER_E0
+ */
+#define OST_OIER_E0 FIELD(1, 1, 0)
+/*
+ * Interrupt enable 1 OST_OIER_E1
+ */
+#define OST_OIER_E1 FIELD(1, 1, 1)
+/*
+ * Interrupt enable 2 OST_OIER_E2
+ */
+#define OST_OIER_E2 FIELD(1, 1, 2)
+/*
+ * Interrupt enable 3 OST_OIER_E3
+ */
+#define OST_OIER_E3 FIELD(1, 1, 3)
+
+/*
+ * Watchdog Match Enable OST_OWER_WME
+ */
+#define OST_OWER_WME FIELD(1, 1, 0)
+
+/*
+ * PWM Full Duty Cycle OST_PWMDCCR_FDCYCLE
+ */
+#define OST_PWMDCCR_FDCYCLE FIELD(1, 1, 10)
+
diff --git a/arch/unicore32/include/mach/regs-pci.h b/arch/unicore32/include/mach/regs-pci.h
new file mode 100644
index 000000000000..97db16885626
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-pci.h
@@ -0,0 +1,94 @@
+/*
+ * PKUnity AHB-PCI Bridge Registers
+ */
+
+/*
+ * AHB/PCI fixed physical address for pci addess configuration
+ */
+/*
+ * PCICFG Bridge Base Reg.
+ */
+#define PCICFG_BRIBASE io_p2v(PKUNITY_PCICFG_BASE + 0x0000)
+/*
+ * PCICFG Address Reg.
+ */
+#define PCICFG_ADDR io_p2v(PKUNITY_PCICFG_BASE + 0x0004)
+/*
+ * PCICFG Address Reg.
+ */
+#define PCICFG_DATA io_p2v(PKUNITY_PCICFG_BASE + 0x0008)
+
+/*
+ * PCI Bridge configuration space
+ */
+#define PCIBRI_ID io_p2v(PKUNITY_PCIBRI_BASE + 0x0000)
+#define PCIBRI_CMD io_p2v(PKUNITY_PCIBRI_BASE + 0x0004)
+#define PCIBRI_CLASS io_p2v(PKUNITY_PCIBRI_BASE + 0x0008)
+#define PCIBRI_LTR io_p2v(PKUNITY_PCIBRI_BASE + 0x000C)
+#define PCIBRI_BAR0 io_p2v(PKUNITY_PCIBRI_BASE + 0x0010)
+#define PCIBRI_BAR1 io_p2v(PKUNITY_PCIBRI_BASE + 0x0014)
+#define PCIBRI_BAR2 io_p2v(PKUNITY_PCIBRI_BASE + 0x0018)
+#define PCIBRI_BAR3 io_p2v(PKUNITY_PCIBRI_BASE + 0x001C)
+#define PCIBRI_BAR4 io_p2v(PKUNITY_PCIBRI_BASE + 0x0020)
+#define PCIBRI_BAR5 io_p2v(PKUNITY_PCIBRI_BASE + 0x0024)
+
+#define PCIBRI_PCICTL0 io_p2v(PKUNITY_PCIBRI_BASE + 0x0100)
+#define PCIBRI_PCIBAR0 io_p2v(PKUNITY_PCIBRI_BASE + 0x0104)
+#define PCIBRI_PCIAMR0 io_p2v(PKUNITY_PCIBRI_BASE + 0x0108)
+#define PCIBRI_PCITAR0 io_p2v(PKUNITY_PCIBRI_BASE + 0x010C)
+#define PCIBRI_PCICTL1 io_p2v(PKUNITY_PCIBRI_BASE + 0x0110)
+#define PCIBRI_PCIBAR1 io_p2v(PKUNITY_PCIBRI_BASE + 0x0114)
+#define PCIBRI_PCIAMR1 io_p2v(PKUNITY_PCIBRI_BASE + 0x0118)
+#define PCIBRI_PCITAR1 io_p2v(PKUNITY_PCIBRI_BASE + 0x011C)
+#define PCIBRI_PCICTL2 io_p2v(PKUNITY_PCIBRI_BASE + 0x0120)
+#define PCIBRI_PCIBAR2 io_p2v(PKUNITY_PCIBRI_BASE + 0x0124)
+#define PCIBRI_PCIAMR2 io_p2v(PKUNITY_PCIBRI_BASE + 0x0128)
+#define PCIBRI_PCITAR2 io_p2v(PKUNITY_PCIBRI_BASE + 0x012C)
+#define PCIBRI_PCICTL3 io_p2v(PKUNITY_PCIBRI_BASE + 0x0130)
+#define PCIBRI_PCIBAR3 io_p2v(PKUNITY_PCIBRI_BASE + 0x0134)
+#define PCIBRI_PCIAMR3 io_p2v(PKUNITY_PCIBRI_BASE + 0x0138)
+#define PCIBRI_PCITAR3 io_p2v(PKUNITY_PCIBRI_BASE + 0x013C)
+#define PCIBRI_PCICTL4 io_p2v(PKUNITY_PCIBRI_BASE + 0x0140)
+#define PCIBRI_PCIBAR4 io_p2v(PKUNITY_PCIBRI_BASE + 0x0144)
+#define PCIBRI_PCIAMR4 io_p2v(PKUNITY_PCIBRI_BASE + 0x0148)
+#define PCIBRI_PCITAR4 io_p2v(PKUNITY_PCIBRI_BASE + 0x014C)
+#define PCIBRI_PCICTL5 io_p2v(PKUNITY_PCIBRI_BASE + 0x0150)
+#define PCIBRI_PCIBAR5 io_p2v(PKUNITY_PCIBRI_BASE + 0x0154)
+#define PCIBRI_PCIAMR5 io_p2v(PKUNITY_PCIBRI_BASE + 0x0158)
+#define PCIBRI_PCITAR5 io_p2v(PKUNITY_PCIBRI_BASE + 0x015C)
+
+#define PCIBRI_AHBCTL0 io_p2v(PKUNITY_PCIBRI_BASE + 0x0180)
+#define PCIBRI_AHBBAR0 io_p2v(PKUNITY_PCIBRI_BASE + 0x0184)
+#define PCIBRI_AHBAMR0 io_p2v(PKUNITY_PCIBRI_BASE + 0x0188)
+#define PCIBRI_AHBTAR0 io_p2v(PKUNITY_PCIBRI_BASE + 0x018C)
+#define PCIBRI_AHBCTL1 io_p2v(PKUNITY_PCIBRI_BASE + 0x0190)
+#define PCIBRI_AHBBAR1 io_p2v(PKUNITY_PCIBRI_BASE + 0x0194)
+#define PCIBRI_AHBAMR1 io_p2v(PKUNITY_PCIBRI_BASE + 0x0198)
+#define PCIBRI_AHBTAR1 io_p2v(PKUNITY_PCIBRI_BASE + 0x019C)
+#define PCIBRI_AHBCTL2 io_p2v(PKUNITY_PCIBRI_BASE + 0x01A0)
+#define PCIBRI_AHBBAR2 io_p2v(PKUNITY_PCIBRI_BASE + 0x01A4)
+#define PCIBRI_AHBAMR2 io_p2v(PKUNITY_PCIBRI_BASE + 0x01A8)
+#define PCIBRI_AHBTAR2 io_p2v(PKUNITY_PCIBRI_BASE + 0x01AC)
+#define PCIBRI_AHBCTL3 io_p2v(PKUNITY_PCIBRI_BASE + 0x01B0)
+#define PCIBRI_AHBBAR3 io_p2v(PKUNITY_PCIBRI_BASE + 0x01B4)
+#define PCIBRI_AHBAMR3 io_p2v(PKUNITY_PCIBRI_BASE + 0x01B8)
+#define PCIBRI_AHBTAR3 io_p2v(PKUNITY_PCIBRI_BASE + 0x01BC)
+#define PCIBRI_AHBCTL4 io_p2v(PKUNITY_PCIBRI_BASE + 0x01C0)
+#define PCIBRI_AHBBAR4 io_p2v(PKUNITY_PCIBRI_BASE + 0x01C4)
+#define PCIBRI_AHBAMR4 io_p2v(PKUNITY_PCIBRI_BASE + 0x01C8)
+#define PCIBRI_AHBTAR4 io_p2v(PKUNITY_PCIBRI_BASE + 0x01CC)
+#define PCIBRI_AHBCTL5 io_p2v(PKUNITY_PCIBRI_BASE + 0x01D0)
+#define PCIBRI_AHBBAR5 io_p2v(PKUNITY_PCIBRI_BASE + 0x01D4)
+#define PCIBRI_AHBAMR5 io_p2v(PKUNITY_PCIBRI_BASE + 0x01D8)
+#define PCIBRI_AHBTAR5 io_p2v(PKUNITY_PCIBRI_BASE + 0x01DC)
+
+#define PCIBRI_CTLx_AT FIELD(1, 1, 2)
+#define PCIBRI_CTLx_PREF FIELD(1, 1, 1)
+#define PCIBRI_CTLx_MRL FIELD(1, 1, 0)
+
+#define PCIBRI_BARx_ADDR FIELD(0xFFFFFFFC, 30, 2)
+#define PCIBRI_BARx_IO FIELD(1, 1, 0)
+#define PCIBRI_BARx_MEM FIELD(0, 1, 0)
+
+#define PCIBRI_CMD_IO FIELD(1, 1, 0)
+#define PCIBRI_CMD_MEM FIELD(1, 1, 1)
diff --git a/arch/unicore32/include/mach/regs-pm.h b/arch/unicore32/include/mach/regs-pm.h
new file mode 100644
index 000000000000..8274b87e3b12
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-pm.h
@@ -0,0 +1,126 @@
+/*
+ * PKUNITY Power Manager (PM) Registers
+ */
+/*
+ * PM Control Reg PM_PMCR
+ */
+#define PM_PMCR io_p2v(PKUNITY_PM_BASE + 0x0000)
+/*
+ * PM General Conf. Reg PM_PGCR
+ */
+#define PM_PGCR io_p2v(PKUNITY_PM_BASE + 0x0004)
+/*
+ * PM PLL Conf. Reg PM_PPCR
+ */
+#define PM_PPCR io_p2v(PKUNITY_PM_BASE + 0x0008)
+/*
+ * PM Wakeup Enable Reg PM_PWER
+ */
+#define PM_PWER io_p2v(PKUNITY_PM_BASE + 0x000C)
+/*
+ * PM GPIO Sleep Status Reg PM_PGSR
+ */
+#define PM_PGSR io_p2v(PKUNITY_PM_BASE + 0x0010)
+/*
+ * PM Clock Gate Reg PM_PCGR
+ */
+#define PM_PCGR io_p2v(PKUNITY_PM_BASE + 0x0014)
+/*
+ * PM SYS PLL Conf. Reg PM_PLLSYSCFG
+ */
+#define PM_PLLSYSCFG io_p2v(PKUNITY_PM_BASE + 0x0018)
+/*
+ * PM DDR PLL Conf. Reg PM_PLLDDRCFG
+ */
+#define PM_PLLDDRCFG io_p2v(PKUNITY_PM_BASE + 0x001C)
+/*
+ * PM VGA PLL Conf. Reg PM_PLLVGACFG
+ */
+#define PM_PLLVGACFG io_p2v(PKUNITY_PM_BASE + 0x0020)
+/*
+ * PM Div Conf. Reg PM_DIVCFG
+ */
+#define PM_DIVCFG io_p2v(PKUNITY_PM_BASE + 0x0024)
+/*
+ * PM SYS PLL Status Reg PM_PLLSYSSTATUS
+ */
+#define PM_PLLSYSSTATUS io_p2v(PKUNITY_PM_BASE + 0x0028)
+/*
+ * PM DDR PLL Status Reg PM_PLLDDRSTATUS
+ */
+#define PM_PLLDDRSTATUS io_p2v(PKUNITY_PM_BASE + 0x002C)
+/*
+ * PM VGA PLL Status Reg PM_PLLVGASTATUS
+ */
+#define PM_PLLVGASTATUS io_p2v(PKUNITY_PM_BASE + 0x0030)
+/*
+ * PM Div Status Reg PM_DIVSTATUS
+ */
+#define PM_DIVSTATUS io_p2v(PKUNITY_PM_BASE + 0x0034)
+/*
+ * PM Software Reset Reg PM_SWRESET
+ */
+#define PM_SWRESET io_p2v(PKUNITY_PM_BASE + 0x0038)
+/*
+ * PM DDR2 PAD Start Reg PM_DDR2START
+ */
+#define PM_DDR2START io_p2v(PKUNITY_PM_BASE + 0x003C)
+/*
+ * PM DDR2 PAD Status Reg PM_DDR2CAL0
+ */
+#define PM_DDR2CAL0 io_p2v(PKUNITY_PM_BASE + 0x0040)
+/*
+ * PM PLL DFC Done Reg PM_PLLDFCDONE
+ */
+#define PM_PLLDFCDONE io_p2v(PKUNITY_PM_BASE + 0x0044)
+
+#define PM_PMCR_SFB FIELD(1, 1, 0)
+#define PM_PMCR_IFB FIELD(1, 1, 1)
+#define PM_PMCR_CFBSYS FIELD(1, 1, 2)
+#define PM_PMCR_CFBDDR FIELD(1, 1, 3)
+#define PM_PMCR_CFBVGA FIELD(1, 1, 4)
+#define PM_PMCR_CFBDIVBCLK FIELD(1, 1, 5)
+
+/*
+ * GPIO 8~27 wake-up enable PM_PWER_GPIOHIGH
+ */
+#define PM_PWER_GPIOHIGH FIELD(1, 1, 8)
+/*
+ * RTC alarm wake-up enable PM_PWER_RTC
+ */
+#define PM_PWER_RTC FIELD(1, 1, 31)
+
+#define PM_PCGR_BCLK64DDR FIELD(1, 1, 0)
+#define PM_PCGR_BCLK64VGA FIELD(1, 1, 1)
+#define PM_PCGR_BCLKDDR FIELD(1, 1, 2)
+#define PM_PCGR_BCLKPCI FIELD(1, 1, 4)
+#define PM_PCGR_BCLKDMAC FIELD(1, 1, 5)
+#define PM_PCGR_BCLKUMAL FIELD(1, 1, 6)
+#define PM_PCGR_BCLKUSB FIELD(1, 1, 7)
+#define PM_PCGR_BCLKMME FIELD(1, 1, 10)
+#define PM_PCGR_BCLKNAND FIELD(1, 1, 11)
+#define PM_PCGR_BCLKH264E FIELD(1, 1, 12)
+#define PM_PCGR_BCLKVGA FIELD(1, 1, 13)
+#define PM_PCGR_BCLKH264D FIELD(1, 1, 14)
+#define PM_PCGR_VECLK FIELD(1, 1, 15)
+#define PM_PCGR_HECLK FIELD(1, 1, 16)
+#define PM_PCGR_HDCLK FIELD(1, 1, 17)
+#define PM_PCGR_NANDCLK FIELD(1, 1, 18)
+#define PM_PCGR_GECLK FIELD(1, 1, 19)
+#define PM_PCGR_VGACLK FIELD(1, 1, 20)
+#define PM_PCGR_PCICLK FIELD(1, 1, 21)
+#define PM_PCGR_SATACLK FIELD(1, 1, 25)
+
+/*
+ * [23:20]PM_DIVCFG_VGACLK(v)
+ */
+#define PM_DIVCFG_VGACLK_MASK FMASK(4, 20)
+#define PM_DIVCFG_VGACLK(v) FIELD((v), 4, 20)
+
+#define PM_SWRESET_USB FIELD(1, 1, 6)
+#define PM_SWRESET_VGADIV FIELD(1, 1, 26)
+#define PM_SWRESET_GEDIV FIELD(1, 1, 27)
+
+#define PM_PLLDFCDONE_SYSDFC FIELD(1, 1, 0)
+#define PM_PLLDFCDONE_DDRDFC FIELD(1, 1, 1)
+#define PM_PLLDFCDONE_VGADFC FIELD(1, 1, 2)
diff --git a/arch/unicore32/include/mach/regs-ps2.h b/arch/unicore32/include/mach/regs-ps2.h
new file mode 100644
index 000000000000..725841ec0ce1
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-ps2.h
@@ -0,0 +1,20 @@
+/*
+ * PKUnity PS2 Controller Registers
+ */
+/*
+ * the same as I8042_DATA_REG PS2_DATA
+ */
+#define PS2_DATA io_p2v(PKUNITY_PS2_BASE + 0x0060)
+/*
+ * the same as I8042_COMMAND_REG PS2_COMMAND
+ */
+#define PS2_COMMAND io_p2v(PKUNITY_PS2_BASE + 0x0064)
+/*
+ * the same as I8042_STATUS_REG PS2_STATUS
+ */
+#define PS2_STATUS io_p2v(PKUNITY_PS2_BASE + 0x0064)
+/*
+ * counter reg PS2_CNT
+ */
+#define PS2_CNT io_p2v(PKUNITY_PS2_BASE + 0x0068)
+
diff --git a/arch/unicore32/include/mach/regs-resetc.h b/arch/unicore32/include/mach/regs-resetc.h
new file mode 100644
index 000000000000..8b0629c088d1
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-resetc.h
@@ -0,0 +1,34 @@
+/*
+ * PKUnity Reset Controller (RC) Registers
+ */
+/*
+ * Software Reset Register
+ */
+#define RESETC_SWRR io_p2v(PKUNITY_RESETC_BASE + 0x0000)
+/*
+ * Reset Status Register
+ */
+#define RESETC_RSSR io_p2v(PKUNITY_RESETC_BASE + 0x0004)
+
+/*
+ * Software Reset Bit
+ */
+#define RESETC_SWRR_SRB FIELD(1, 1, 0)
+
+/*
+ * Hardware Reset
+ */
+#define RESETC_RSSR_HWR FIELD(1, 1, 0)
+/*
+ * Software Reset
+ */
+#define RESETC_RSSR_SWR FIELD(1, 1, 1)
+/*
+ * Watchdog Reset
+ */
+#define RESETC_RSSR_WDR FIELD(1, 1, 2)
+/*
+ * Sleep Mode Reset
+ */
+#define RESETC_RSSR_SMR FIELD(1, 1, 3)
+
diff --git a/arch/unicore32/include/mach/regs-rtc.h b/arch/unicore32/include/mach/regs-rtc.h
new file mode 100644
index 000000000000..b70bc164e01c
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-rtc.h
@@ -0,0 +1,37 @@
+/*
+ * PKUnity Real-Time Clock (RTC) control registers
+ */
+/*
+ * RTC Alarm Reg RTC_RTAR
+ */
+#define RTC_RTAR io_p2v(PKUNITY_RTC_BASE + 0x0000)
+/*
+ * RTC Count Reg RTC_RCNR
+ */
+#define RTC_RCNR io_p2v(PKUNITY_RTC_BASE + 0x0004)
+/*
+ * RTC Trim Reg RTC_RTTR
+ */
+#define RTC_RTTR io_p2v(PKUNITY_RTC_BASE + 0x0008)
+/*
+ * RTC Status Reg RTC_RTSR
+ */
+#define RTC_RTSR io_p2v(PKUNITY_RTC_BASE + 0x0010)
+
+/*
+ * ALarm detected RTC_RTSR_AL
+ */
+#define RTC_RTSR_AL FIELD(1, 1, 0)
+/*
+ * 1 Hz clock detected RTC_RTSR_HZ
+ */
+#define RTC_RTSR_HZ FIELD(1, 1, 1)
+/*
+ * ALarm interrupt Enable RTC_RTSR_ALE
+ */
+#define RTC_RTSR_ALE FIELD(1, 1, 2)
+/*
+ * 1 Hz clock interrupt Enable RTC_RTSR_HZE
+ */
+#define RTC_RTSR_HZE FIELD(1, 1, 3)
+
diff --git a/arch/unicore32/include/mach/regs-sdc.h b/arch/unicore32/include/mach/regs-sdc.h
new file mode 100644
index 000000000000..47f56e5865f3
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-sdc.h
@@ -0,0 +1,156 @@
+/*
+ * PKUnity Multi-Media Card and Security Digital Card (MMC/SD) Registers
+ */
+/*
+ * Clock Control Reg SDC_CCR
+ */
+#define SDC_CCR io_p2v(PKUNITY_SDC_BASE + 0x0000)
+/*
+ * Software Reset Reg SDC_SRR
+ */
+#define SDC_SRR io_p2v(PKUNITY_SDC_BASE + 0x0004)
+/*
+ * Argument Reg SDC_ARGUMENT
+ */
+#define SDC_ARGUMENT io_p2v(PKUNITY_SDC_BASE + 0x0008)
+/*
+ * Command Reg SDC_COMMAND
+ */
+#define SDC_COMMAND io_p2v(PKUNITY_SDC_BASE + 0x000C)
+/*
+ * Block Size Reg SDC_BLOCKSIZE
+ */
+#define SDC_BLOCKSIZE io_p2v(PKUNITY_SDC_BASE + 0x0010)
+/*
+ * Block Cound Reg SDC_BLOCKCOUNT
+ */
+#define SDC_BLOCKCOUNT io_p2v(PKUNITY_SDC_BASE + 0x0014)
+/*
+ * Transfer Mode Reg SDC_TMR
+ */
+#define SDC_TMR io_p2v(PKUNITY_SDC_BASE + 0x0018)
+/*
+ * Response Reg. 0 SDC_RES0
+ */
+#define SDC_RES0 io_p2v(PKUNITY_SDC_BASE + 0x001C)
+/*
+ * Response Reg. 1 SDC_RES1
+ */
+#define SDC_RES1 io_p2v(PKUNITY_SDC_BASE + 0x0020)
+/*
+ * Response Reg. 2 SDC_RES2
+ */
+#define SDC_RES2 io_p2v(PKUNITY_SDC_BASE + 0x0024)
+/*
+ * Response Reg. 3 SDC_RES3
+ */
+#define SDC_RES3 io_p2v(PKUNITY_SDC_BASE + 0x0028)
+/*
+ * Read Timeout Control Reg SDC_RTCR
+ */
+#define SDC_RTCR io_p2v(PKUNITY_SDC_BASE + 0x002C)
+/*
+ * Interrupt Status Reg SDC_ISR
+ */
+#define SDC_ISR io_p2v(PKUNITY_SDC_BASE + 0x0030)
+/*
+ * Interrupt Status Mask Reg SDC_ISMR
+ */
+#define SDC_ISMR io_p2v(PKUNITY_SDC_BASE + 0x0034)
+/*
+ * RX FIFO SDC_RXFIFO
+ */
+#define SDC_RXFIFO io_p2v(PKUNITY_SDC_BASE + 0x0038)
+/*
+ * TX FIFO SDC_TXFIFO
+ */
+#define SDC_TXFIFO io_p2v(PKUNITY_SDC_BASE + 0x003C)
+
+/*
+ * SD Clock Enable SDC_CCR_CLKEN
+ */
+#define SDC_CCR_CLKEN FIELD(1, 1, 2)
+/*
+ * [15:8] SDC_CCR_PDIV(v)
+ */
+#define SDC_CCR_PDIV(v) FIELD((v), 8, 8)
+
+/*
+ * Software reset enable SDC_SRR_ENABLE
+ */
+#define SDC_SRR_ENABLE FIELD(0, 1, 0)
+/*
+ * Software reset disable SDC_SRR_DISABLE
+ */
+#define SDC_SRR_DISABLE FIELD(1, 1, 0)
+
+/*
+ * Response type SDC_COMMAND_RESTYPE_MASK
+ */
+#define SDC_COMMAND_RESTYPE_MASK FMASK(2, 0)
+/*
+ * No response SDC_COMMAND_RESTYPE_NONE
+ */
+#define SDC_COMMAND_RESTYPE_NONE FIELD(0, 2, 0)
+/*
+ * 136-bit long response SDC_COMMAND_RESTYPE_LONG
+ */
+#define SDC_COMMAND_RESTYPE_LONG FIELD(1, 2, 0)
+/*
+ * 48-bit short response SDC_COMMAND_RESTYPE_SHORT
+ */
+#define SDC_COMMAND_RESTYPE_SHORT FIELD(2, 2, 0)
+/*
+ * 48-bit short and test if busy response SDC_COMMAND_RESTYPE_SHORTBUSY
+ */
+#define SDC_COMMAND_RESTYPE_SHORTBUSY FIELD(3, 2, 0)
+/*
+ * data ready SDC_COMMAND_DATAREADY
+ */
+#define SDC_COMMAND_DATAREADY FIELD(1, 1, 2)
+#define SDC_COMMAND_CMDEN FIELD(1, 1, 3)
+/*
+ * [10:5] SDC_COMMAND_CMDINDEX(v)
+ */
+#define SDC_COMMAND_CMDINDEX(v) FIELD((v), 6, 5)
+
+/*
+ * [10:0] SDC_BLOCKSIZE_BSMASK(v)
+ */
+#define SDC_BLOCKSIZE_BSMASK(v) FIELD((v), 11, 0)
+/*
+ * [11:0] SDC_BLOCKCOUNT_BCMASK(v)
+ */
+#define SDC_BLOCKCOUNT_BCMASK(v) FIELD((v), 12, 0)
+
+/*
+ * Data Width 1bit SDC_TMR_WTH_1BIT
+ */
+#define SDC_TMR_WTH_1BIT FIELD(0, 1, 0)
+/*
+ * Data Width 4bit SDC_TMR_WTH_4BIT
+ */
+#define SDC_TMR_WTH_4BIT FIELD(1, 1, 0)
+/*
+ * Read SDC_TMR_DIR_READ
+ */
+#define SDC_TMR_DIR_READ FIELD(0, 1, 1)
+/*
+ * Write SDC_TMR_DIR_WRITE
+ */
+#define SDC_TMR_DIR_WRITE FIELD(1, 1, 1)
+
+#define SDC_IR_MASK FMASK(13, 0)
+#define SDC_IR_RESTIMEOUT FIELD(1, 1, 0)
+#define SDC_IR_WRITECRC FIELD(1, 1, 1)
+#define SDC_IR_READCRC FIELD(1, 1, 2)
+#define SDC_IR_TXFIFOREAD FIELD(1, 1, 3)
+#define SDC_IR_RXFIFOWRITE FIELD(1, 1, 4)
+#define SDC_IR_READTIMEOUT FIELD(1, 1, 5)
+#define SDC_IR_DATACOMPLETE FIELD(1, 1, 6)
+#define SDC_IR_CMDCOMPLETE FIELD(1, 1, 7)
+#define SDC_IR_RXFIFOFULL FIELD(1, 1, 8)
+#define SDC_IR_RXFIFOEMPTY FIELD(1, 1, 9)
+#define SDC_IR_TXFIFOFULL FIELD(1, 1, 10)
+#define SDC_IR_TXFIFOEMPTY FIELD(1, 1, 11)
+#define SDC_IR_ENDCMDWITHRES FIELD(1, 1, 12)
diff --git a/arch/unicore32/include/mach/regs-spi.h b/arch/unicore32/include/mach/regs-spi.h
new file mode 100644
index 000000000000..6afe263c1e46
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-spi.h
@@ -0,0 +1,98 @@
+/*
+ * PKUnity Serial Peripheral Interface (SPI) Registers
+ */
+/*
+ * Control reg. 0 SPI_CR0
+ */
+#define SPI_CR0 io_p2v(PKUNITY_SPI_BASE + 0x0000)
+/*
+ * Control reg. 1 SPI_CR1
+ */
+#define SPI_CR1 io_p2v(PKUNITY_SPI_BASE + 0x0004)
+/*
+ * Enable reg SPI_SSIENR
+ */
+#define SPI_SSIENR io_p2v(PKUNITY_SPI_BASE + 0x0008)
+/*
+ * Status reg SPI_SR
+ */
+#define SPI_SR io_p2v(PKUNITY_SPI_BASE + 0x0028)
+/*
+ * Interrupt Mask reg SPI_IMR
+ */
+#define SPI_IMR io_p2v(PKUNITY_SPI_BASE + 0x002C)
+/*
+ * Interrupt Status reg SPI_ISR
+ */
+#define SPI_ISR io_p2v(PKUNITY_SPI_BASE + 0x0030)
+
+/*
+ * Enable SPI Controller SPI_SSIENR_EN
+ */
+#define SPI_SSIENR_EN FIELD(1, 1, 0)
+
+/*
+ * SPI Busy SPI_SR_BUSY
+ */
+#define SPI_SR_BUSY FIELD(1, 1, 0)
+/*
+ * Transmit FIFO Not Full SPI_SR_TFNF
+ */
+#define SPI_SR_TFNF FIELD(1, 1, 1)
+/*
+ * Transmit FIFO Empty SPI_SR_TFE
+ */
+#define SPI_SR_TFE FIELD(1, 1, 2)
+/*
+ * Receive FIFO Not Empty SPI_SR_RFNE
+ */
+#define SPI_SR_RFNE FIELD(1, 1, 3)
+/*
+ * Receive FIFO Full SPI_SR_RFF
+ */
+#define SPI_SR_RFF FIELD(1, 1, 4)
+
+/*
+ * Trans. FIFO Empty Interrupt Status SPI_ISR_TXEIS
+ */
+#define SPI_ISR_TXEIS FIELD(1, 1, 0)
+/*
+ * Trans. FIFO Overflow Interrupt Status SPI_ISR_TXOIS
+ */
+#define SPI_ISR_TXOIS FIELD(1, 1, 1)
+/*
+ * Receiv. FIFO Underflow Interrupt Status SPI_ISR_RXUIS
+ */
+#define SPI_ISR_RXUIS FIELD(1, 1, 2)
+/*
+ * Receiv. FIFO Overflow Interrupt Status SPI_ISR_RXOIS
+ */
+#define SPI_ISR_RXOIS FIELD(1, 1, 3)
+/*
+ * Receiv. FIFO Full Interrupt Status SPI_ISR_RXFIS
+ */
+#define SPI_ISR_RXFIS FIELD(1, 1, 4)
+#define SPI_ISR_MSTIS FIELD(1, 1, 5)
+
+/*
+ * Trans. FIFO Empty Interrupt Mask SPI_IMR_TXEIM
+ */
+#define SPI_IMR_TXEIM FIELD(1, 1, 0)
+/*
+ * Trans. FIFO Overflow Interrupt Mask SPI_IMR_TXOIM
+ */
+#define SPI_IMR_TXOIM FIELD(1, 1, 1)
+/*
+ * Receiv. FIFO Underflow Interrupt Mask SPI_IMR_RXUIM
+ */
+#define SPI_IMR_RXUIM FIELD(1, 1, 2)
+/*
+ * Receiv. FIFO Overflow Interrupt Mask SPI_IMR_RXOIM
+ */
+#define SPI_IMR_RXOIM FIELD(1, 1, 3)
+/*
+ * Receiv. FIFO Full Interrupt Mask SPI_IMR_RXFIM
+ */
+#define SPI_IMR_RXFIM FIELD(1, 1, 4)
+#define SPI_IMR_MSTIM FIELD(1, 1, 5)
+
diff --git a/arch/unicore32/include/mach/regs-uart.h b/arch/unicore32/include/mach/regs-uart.h
new file mode 100644
index 000000000000..9fa6b1938b77
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-uart.h
@@ -0,0 +1,3 @@
+/*
+ * PKUnity Universal Asynchronous Receiver/Transmitter (UART) Registers
+ */
diff --git a/arch/unicore32/include/mach/regs-umal.h b/arch/unicore32/include/mach/regs-umal.h
new file mode 100644
index 000000000000..35880d62b1c9
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-umal.h
@@ -0,0 +1,229 @@
+/*
+ * PKUnity Ultra Media Access Layer (UMAL) Ethernet MAC Registers
+ */
+
+/* MAC module of UMAL */
+/* UMAL's MAC module includes G/MII interface, several additional PHY
+ * interfaces, and MAC control sub-layer, which provides support for control
+ * frames (e.g. PAUSE frames).
+ */
+/*
+ * TX/RX reset and control UMAL_CFG1
+ */
+#define UMAL_CFG1 io_p2v(PKUNITY_UMAL_BASE + 0x0000)
+/*
+ * MAC interface mode control UMAL_CFG2
+ */
+#define UMAL_CFG2 io_p2v(PKUNITY_UMAL_BASE + 0x0004)
+/*
+ * Inter Packet/Frame Gap UMAL_IPGIFG
+ */
+#define UMAL_IPGIFG io_p2v(PKUNITY_UMAL_BASE + 0x0008)
+/*
+ * Collision retry or backoff UMAL_HALFDUPLEX
+ */
+#define UMAL_HALFDUPLEX io_p2v(PKUNITY_UMAL_BASE + 0x000c)
+/*
+ * Maximum Frame Length UMAL_MAXFRAME
+ */
+#define UMAL_MAXFRAME io_p2v(PKUNITY_UMAL_BASE + 0x0010)
+/*
+ * Test Regsiter UMAL_TESTREG
+ */
+#define UMAL_TESTREG io_p2v(PKUNITY_UMAL_BASE + 0x001c)
+/*
+ * MII Management Configure UMAL_MIICFG
+ */
+#define UMAL_MIICFG io_p2v(PKUNITY_UMAL_BASE + 0x0020)
+/*
+ * MII Management Command UMAL_MIICMD
+ */
+#define UMAL_MIICMD io_p2v(PKUNITY_UMAL_BASE + 0x0024)
+/*
+ * MII Management Address UMAL_MIIADDR
+ */
+#define UMAL_MIIADDR io_p2v(PKUNITY_UMAL_BASE + 0x0028)
+/*
+ * MII Management Control UMAL_MIICTRL
+ */
+#define UMAL_MIICTRL io_p2v(PKUNITY_UMAL_BASE + 0x002c)
+/*
+ * MII Management Status UMAL_MIISTATUS
+ */
+#define UMAL_MIISTATUS io_p2v(PKUNITY_UMAL_BASE + 0x0030)
+/*
+ * MII Managment Indicator UMAL_MIIIDCT
+ */
+#define UMAL_MIIIDCT io_p2v(PKUNITY_UMAL_BASE + 0x0034)
+/*
+ * Interface Control UMAL_IFCTRL
+ */
+#define UMAL_IFCTRL io_p2v(PKUNITY_UMAL_BASE + 0x0038)
+/*
+ * Interface Status UMAL_IFSTATUS
+ */
+#define UMAL_IFSTATUS io_p2v(PKUNITY_UMAL_BASE + 0x003c)
+/*
+ * MAC address (high 4 bytes) UMAL_STADDR1
+ */
+#define UMAL_STADDR1 io_p2v(PKUNITY_UMAL_BASE + 0x0040)
+/*
+ * MAC address (low 2 bytes) UMAL_STADDR2
+ */
+#define UMAL_STADDR2 io_p2v(PKUNITY_UMAL_BASE + 0x0044)
+
+/* FIFO MODULE OF UMAL */
+/* UMAL's FIFO module provides data queuing for increased system level
+ * throughput
+ */
+#define UMAL_FIFOCFG0 io_p2v(PKUNITY_UMAL_BASE + 0x0048)
+#define UMAL_FIFOCFG1 io_p2v(PKUNITY_UMAL_BASE + 0x004c)
+#define UMAL_FIFOCFG2 io_p2v(PKUNITY_UMAL_BASE + 0x0050)
+#define UMAL_FIFOCFG3 io_p2v(PKUNITY_UMAL_BASE + 0x0054)
+#define UMAL_FIFOCFG4 io_p2v(PKUNITY_UMAL_BASE + 0x0058)
+#define UMAL_FIFOCFG5 io_p2v(PKUNITY_UMAL_BASE + 0x005c)
+#define UMAL_FIFORAM0 io_p2v(PKUNITY_UMAL_BASE + 0x0060)
+#define UMAL_FIFORAM1 io_p2v(PKUNITY_UMAL_BASE + 0x0064)
+#define UMAL_FIFORAM2 io_p2v(PKUNITY_UMAL_BASE + 0x0068)
+#define UMAL_FIFORAM3 io_p2v(PKUNITY_UMAL_BASE + 0x006c)
+#define UMAL_FIFORAM4 io_p2v(PKUNITY_UMAL_BASE + 0x0070)
+#define UMAL_FIFORAM5 io_p2v(PKUNITY_UMAL_BASE + 0x0074)
+#define UMAL_FIFORAM6 io_p2v(PKUNITY_UMAL_BASE + 0x0078)
+#define UMAL_FIFORAM7 io_p2v(PKUNITY_UMAL_BASE + 0x007c)
+
+/* MAHBE MODUEL OF UMAL */
+/* UMAL's MAHBE module interfaces to the host system through 32-bit AHB Master
+ * and Slave ports.Registers within the M-AHBE provide Control and Status
+ * information concerning these transfers.
+ */
+/*
+ * Transmit Control UMAL_DMATxCtrl
+ */
+#define UMAL_DMATxCtrl io_p2v(PKUNITY_UMAL_BASE + 0x0180)
+/*
+ * Pointer to TX Descripter UMAL_DMATxDescriptor
+ */
+#define UMAL_DMATxDescriptor io_p2v(PKUNITY_UMAL_BASE + 0x0184)
+/*
+ * Status of Tx Packet Transfers UMAL_DMATxStatus
+ */
+#define UMAL_DMATxStatus io_p2v(PKUNITY_UMAL_BASE + 0x0188)
+/*
+ * Receive Control UMAL_DMARxCtrl
+ */
+#define UMAL_DMARxCtrl io_p2v(PKUNITY_UMAL_BASE + 0x018c)
+/*
+ * Pointer to Rx Descriptor UMAL_DMARxDescriptor
+ */
+#define UMAL_DMARxDescriptor io_p2v(PKUNITY_UMAL_BASE + 0x0190)
+/*
+ * Status of Rx Packet Transfers UMAL_DMARxStatus
+ */
+#define UMAL_DMARxStatus io_p2v(PKUNITY_UMAL_BASE + 0x0194)
+/*
+ * Interrupt Mask UMAL_DMAIntrMask
+ */
+#define UMAL_DMAIntrMask io_p2v(PKUNITY_UMAL_BASE + 0x0198)
+/*
+ * Interrupts, read only UMAL_DMAInterrupt
+ */
+#define UMAL_DMAInterrupt io_p2v(PKUNITY_UMAL_BASE + 0x019c)
+
+/*
+ * Commands for UMAL_CFG1 register
+ */
+#define UMAL_CFG1_TXENABLE FIELD(1, 1, 0)
+#define UMAL_CFG1_RXENABLE FIELD(1, 1, 2)
+#define UMAL_CFG1_TXFLOWCTL FIELD(1, 1, 4)
+#define UMAL_CFG1_RXFLOWCTL FIELD(1, 1, 5)
+#define UMAL_CFG1_CONFLPBK FIELD(1, 1, 8)
+#define UMAL_CFG1_RESET FIELD(1, 1, 31)
+#define UMAL_CFG1_CONFFLCTL (MAC_TX_FLOW_CTL | MAC_RX_FLOW_CTL)
+
+/*
+ * Commands for UMAL_CFG2 register
+ */
+#define UMAL_CFG2_FULLDUPLEX FIELD(1, 1, 0)
+#define UMAL_CFG2_CRCENABLE FIELD(1, 1, 1)
+#define UMAL_CFG2_PADCRC FIELD(1, 1, 2)
+#define UMAL_CFG2_LENGTHCHECK FIELD(1, 1, 4)
+#define UMAL_CFG2_MODEMASK FMASK(2, 8)
+#define UMAL_CFG2_NIBBLEMODE FIELD(1, 2, 8)
+#define UMAL_CFG2_BYTEMODE FIELD(2, 2, 8)
+#define UMAL_CFG2_PREAMBLENMASK FMASK(4, 12)
+#define UMAL_CFG2_DEFPREAMBLEN FIELD(7, 4, 12)
+#define UMAL_CFG2_FD100 (UMAL_CFG2_DEFPREAMBLEN | UMAL_CFG2_NIBBLEMODE \
+ | UMAL_CFG2_LENGTHCHECK | UMAL_CFG2_PADCRC \
+ | UMAL_CFG2_CRCENABLE | UMAL_CFG2_FULLDUPLEX)
+#define UMAL_CFG2_FD1000 (UMAL_CFG2_DEFPREAMBLEN | UMAL_CFG2_BYTEMODE \
+ | UMAL_CFG2_LENGTHCHECK | UMAL_CFG2_PADCRC \
+ | UMAL_CFG2_CRCENABLE | UMAL_CFG2_FULLDUPLEX)
+#define UMAL_CFG2_HD100 (UMAL_CFG2_DEFPREAMBLEN | UMAL_CFG2_NIBBLEMODE \
+ | UMAL_CFG2_LENGTHCHECK | UMAL_CFG2_PADCRC \
+ | UMAL_CFG2_CRCENABLE)
+
+/*
+ * Command for UMAL_IFCTRL register
+ */
+#define UMAL_IFCTRL_RESET FIELD(1, 1, 31)
+
+/*
+ * Command for UMAL_MIICFG register
+ */
+#define UMAL_MIICFG_RESET FIELD(1, 1, 31)
+
+/*
+ * Command for UMAL_MIICMD register
+ */
+#define UMAL_MIICMD_READ FIELD(1, 1, 0)
+
+/*
+ * Command for UMAL_MIIIDCT register
+ */
+#define UMAL_MIIIDCT_BUSY FIELD(1, 1, 0)
+#define UMAL_MIIIDCT_NOTVALID FIELD(1, 1, 2)
+
+/*
+ * Commands for DMATxCtrl regesters
+ */
+#define UMAL_DMA_Enable FIELD(1, 1, 0)
+
+/*
+ * Commands for DMARxCtrl regesters
+ */
+#define UMAL_DMAIntrMask_ENABLEHALFWORD FIELD(1, 1, 16)
+
+/*
+ * Command for DMARxStatus
+ */
+#define CLR_RX_BUS_ERR FIELD(1, 1, 3)
+#define CLR_RX_OVERFLOW FIELD(1, 1, 2)
+#define CLR_RX_PKT FIELD(1, 1, 0)
+
+/*
+ * Command for DMATxStatus
+ */
+#define CLR_TX_BUS_ERR FIELD(1, 1, 3)
+#define CLR_TX_UNDERRUN FIELD(1, 1, 1)
+#define CLR_TX_PKT FIELD(1, 1, 0)
+
+/*
+ * Commands for DMAIntrMask and DMAInterrupt register
+ */
+#define INT_RX_MASK FIELD(0xd, 4, 4)
+#define INT_TX_MASK FIELD(0xb, 4, 0)
+
+#define INT_RX_BUS_ERR FIELD(1, 1, 7)
+#define INT_RX_OVERFLOW FIELD(1, 1, 6)
+#define INT_RX_PKT FIELD(1, 1, 4)
+#define INT_TX_BUS_ERR FIELD(1, 1, 3)
+#define INT_TX_UNDERRUN FIELD(1, 1, 1)
+#define INT_TX_PKT FIELD(1, 1, 0)
+
+/*
+ * MARCOS of UMAL's descriptors
+ */
+#define UMAL_DESC_PACKETSIZE_EMPTY FIELD(1, 1, 31)
+#define UMAL_DESC_PACKETSIZE_NONEMPTY FIELD(0, 1, 31)
+#define UMAL_DESC_PACKETSIZE_SIZEMASK FMASK(12, 0)
+
diff --git a/arch/unicore32/include/mach/regs-unigfx.h b/arch/unicore32/include/mach/regs-unigfx.h
new file mode 100644
index 000000000000..a2f1dec2bfab
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-unigfx.h
@@ -0,0 +1,200 @@
+/*
+ * PKUnity UNIGFX Registers
+ */
+
+#define UDE_BASE (PKUNITY_UNIGFX_BASE + 0x1400)
+#define UGE_BASE (PKUNITY_UNIGFX_BASE + 0x0000)
+
+/*
+ * command reg for UNIGFX DE
+ */
+/*
+ * control reg UDE_CFG
+ */
+#define UDE_CFG io_p2v(UDE_BASE + 0x0000)
+/*
+ * framebuffer start address reg UDE_FSA
+ */
+#define UDE_FSA io_p2v(UDE_BASE + 0x0004)
+/*
+ * line size reg UDE_LS
+ */
+#define UDE_LS io_p2v(UDE_BASE + 0x0008)
+/*
+ * pitch size reg UDE_PS
+ */
+#define UDE_PS io_p2v(UDE_BASE + 0x000C)
+/*
+ * horizontal active time reg UDE_HAT
+ */
+#define UDE_HAT io_p2v(UDE_BASE + 0x0010)
+/*
+ * horizontal blank time reg UDE_HBT
+ */
+#define UDE_HBT io_p2v(UDE_BASE + 0x0014)
+/*
+ * horizontal sync time reg UDE_HST
+ */
+#define UDE_HST io_p2v(UDE_BASE + 0x0018)
+/*
+ * vertival active time reg UDE_VAT
+ */
+#define UDE_VAT io_p2v(UDE_BASE + 0x001C)
+/*
+ * vertival blank time reg UDE_VBT
+ */
+#define UDE_VBT io_p2v(UDE_BASE + 0x0020)
+/*
+ * vertival sync time reg UDE_VST
+ */
+#define UDE_VST io_p2v(UDE_BASE + 0x0024)
+/*
+ * cursor position UDE_CXY
+ */
+#define UDE_CXY io_p2v(UDE_BASE + 0x0028)
+/*
+ * cursor front color UDE_CC0
+ */
+#define UDE_CC0 io_p2v(UDE_BASE + 0x002C)
+/*
+ * cursor background color UDE_CC1
+ */
+#define UDE_CC1 io_p2v(UDE_BASE + 0x0030)
+/*
+ * video position UDE_VXY
+ */
+#define UDE_VXY io_p2v(UDE_BASE + 0x0034)
+/*
+ * video start address reg UDE_VSA
+ */
+#define UDE_VSA io_p2v(UDE_BASE + 0x0040)
+/*
+ * video size reg UDE_VS
+ */
+#define UDE_VS io_p2v(UDE_BASE + 0x004C)
+
+/*
+ * command reg for UNIGFX GE
+ */
+/*
+ * src xy reg UGE_SRCXY
+ */
+#define UGE_SRCXY io_p2v(UGE_BASE + 0x0000)
+/*
+ * dst xy reg UGE_DSTXY
+ */
+#define UGE_DSTXY io_p2v(UGE_BASE + 0x0004)
+/*
+ * pitch reg UGE_PITCH
+ */
+#define UGE_PITCH io_p2v(UGE_BASE + 0x0008)
+/*
+ * src start reg UGE_SRCSTART
+ */
+#define UGE_SRCSTART io_p2v(UGE_BASE + 0x000C)
+/*
+ * dst start reg UGE_DSTSTART
+ */
+#define UGE_DSTSTART io_p2v(UGE_BASE + 0x0010)
+/*
+ * width height reg UGE_WIDHEIGHT
+ */
+#define UGE_WIDHEIGHT io_p2v(UGE_BASE + 0x0014)
+/*
+ * rop alpah reg UGE_ROPALPHA
+ */
+#define UGE_ROPALPHA io_p2v(UGE_BASE + 0x0018)
+/*
+ * front color UGE_FCOLOR
+ */
+#define UGE_FCOLOR io_p2v(UGE_BASE + 0x001C)
+/*
+ * background color UGE_BCOLOR
+ */
+#define UGE_BCOLOR io_p2v(UGE_BASE + 0x0020)
+/*
+ * src color key for high value UGE_SCH
+ */
+#define UGE_SCH io_p2v(UGE_BASE + 0x0024)
+/*
+ * dst color key for high value UGE_DCH
+ */
+#define UGE_DCH io_p2v(UGE_BASE + 0x0028)
+/*
+ * src color key for low value UGE_SCL
+ */
+#define UGE_SCL io_p2v(UGE_BASE + 0x002C)
+/*
+ * dst color key for low value UGE_DCL
+ */
+#define UGE_DCL io_p2v(UGE_BASE + 0x0030)
+/*
+ * clip 0 reg UGE_CLIP0
+ */
+#define UGE_CLIP0 io_p2v(UGE_BASE + 0x0034)
+/*
+ * clip 1 reg UGE_CLIP1
+ */
+#define UGE_CLIP1 io_p2v(UGE_BASE + 0x0038)
+/*
+ * command reg UGE_COMMAND
+ */
+#define UGE_COMMAND io_p2v(UGE_BASE + 0x003C)
+/*
+ * pattern 0 UGE_P0
+ */
+#define UGE_P0 io_p2v(UGE_BASE + 0x0040)
+#define UGE_P1 io_p2v(UGE_BASE + 0x0044)
+#define UGE_P2 io_p2v(UGE_BASE + 0x0048)
+#define UGE_P3 io_p2v(UGE_BASE + 0x004C)
+#define UGE_P4 io_p2v(UGE_BASE + 0x0050)
+#define UGE_P5 io_p2v(UGE_BASE + 0x0054)
+#define UGE_P6 io_p2v(UGE_BASE + 0x0058)
+#define UGE_P7 io_p2v(UGE_BASE + 0x005C)
+#define UGE_P8 io_p2v(UGE_BASE + 0x0060)
+#define UGE_P9 io_p2v(UGE_BASE + 0x0064)
+#define UGE_P10 io_p2v(UGE_BASE + 0x0068)
+#define UGE_P11 io_p2v(UGE_BASE + 0x006C)
+#define UGE_P12 io_p2v(UGE_BASE + 0x0070)
+#define UGE_P13 io_p2v(UGE_BASE + 0x0074)
+#define UGE_P14 io_p2v(UGE_BASE + 0x0078)
+#define UGE_P15 io_p2v(UGE_BASE + 0x007C)
+#define UGE_P16 io_p2v(UGE_BASE + 0x0080)
+#define UGE_P17 io_p2v(UGE_BASE + 0x0084)
+#define UGE_P18 io_p2v(UGE_BASE + 0x0088)
+#define UGE_P19 io_p2v(UGE_BASE + 0x008C)
+#define UGE_P20 io_p2v(UGE_BASE + 0x0090)
+#define UGE_P21 io_p2v(UGE_BASE + 0x0094)
+#define UGE_P22 io_p2v(UGE_BASE + 0x0098)
+#define UGE_P23 io_p2v(UGE_BASE + 0x009C)
+#define UGE_P24 io_p2v(UGE_BASE + 0x00A0)
+#define UGE_P25 io_p2v(UGE_BASE + 0x00A4)
+#define UGE_P26 io_p2v(UGE_BASE + 0x00A8)
+#define UGE_P27 io_p2v(UGE_BASE + 0x00AC)
+#define UGE_P28 io_p2v(UGE_BASE + 0x00B0)
+#define UGE_P29 io_p2v(UGE_BASE + 0x00B4)
+#define UGE_P30 io_p2v(UGE_BASE + 0x00B8)
+#define UGE_P31 io_p2v(UGE_BASE + 0x00BC)
+
+#define UDE_CFG_DST_MASK FMASK(2, 8)
+#define UDE_CFG_DST8 FIELD(0x0, 2, 8)
+#define UDE_CFG_DST16 FIELD(0x1, 2, 8)
+#define UDE_CFG_DST24 FIELD(0x2, 2, 8)
+#define UDE_CFG_DST32 FIELD(0x3, 2, 8)
+
+/*
+ * GDEN enable UDE_CFG_GDEN_ENABLE
+ */
+#define UDE_CFG_GDEN_ENABLE FIELD(1, 1, 3)
+/*
+ * VDEN enable UDE_CFG_VDEN_ENABLE
+ */
+#define UDE_CFG_VDEN_ENABLE FIELD(1, 1, 4)
+/*
+ * CDEN enable UDE_CFG_CDEN_ENABLE
+ */
+#define UDE_CFG_CDEN_ENABLE FIELD(1, 1, 5)
+/*
+ * TIMEUP enable UDE_CFG_TIMEUP_ENABLE
+ */
+#define UDE_CFG_TIMEUP_ENABLE FIELD(1, 1, 6)
diff --git a/arch/unicore32/include/mach/uncompress.h b/arch/unicore32/include/mach/uncompress.h
new file mode 100644
index 000000000000..142d3e7958a9
--- /dev/null
+++ b/arch/unicore32/include/mach/uncompress.h
@@ -0,0 +1,34 @@
+/*
+ * linux/arch/unicore32/include/mach/uncompress.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MACH_PUV3_UNCOMPRESS_H__
+#define __MACH_PUV3_UNCOMPRESS_H__
+
+#include "hardware.h"
+#include "ocd.h"
+
+extern char input_data[];
+extern char input_data_end[];
+
+static void arch_decomp_puts(const char *ptr)
+{
+ char c;
+
+ while ((c = *ptr++) != '\0') {
+ if (c == '\n')
+ putc('\r');
+ putc(c);
+ }
+}
+#define ARCH_HAVE_DECOMP_PUTS
+
+#endif /* __MACH_PUV3_UNCOMPRESS_H__ */
diff --git a/arch/unicore32/kernel/Makefile b/arch/unicore32/kernel/Makefile
new file mode 100644
index 000000000000..ec23a2fb2f50
--- /dev/null
+++ b/arch/unicore32/kernel/Makefile
@@ -0,0 +1,33 @@
+#
+# Makefile for the linux kernel.
+#
+
+# Object file lists.
+obj-y := dma.o elf.o entry.o process.o ptrace.o
+obj-y += setup.o signal.o sys.o stacktrace.o traps.o
+
+obj-$(CONFIG_MODULES) += ksyms.o module.o
+obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+
+obj-$(CONFIG_CPU_FREQ) += cpu-ucv2.o
+obj-$(CONFIG_UNICORE_FPU_F64) += fpu-ucf64.o
+
+# obj-y for architecture PKUnity v3
+obj-$(CONFIG_ARCH_PUV3) += clock.o irq.o time.o
+
+obj-$(CONFIG_PUV3_GPIO) += gpio.o
+obj-$(CONFIG_PUV3_RTC) += rtc.o
+obj-$(CONFIG_PUV3_PWM) += pwm.o
+obj-$(CONFIG_PUV3_PM) += pm.o sleep.o
+obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate_asm.o
+
+obj-$(CONFIG_PCI) += pci.o
+
+# obj-y for specific machines
+obj-$(CONFIG_ARCH_PUV3) += puv3-core.o
+obj-$(CONFIG_PUV3_NB0916) += puv3-nb0916.o
+
+head-y := head.o
+obj-$(CONFIG_DEBUG_LL) += debug.o
+
+extra-y := $(head-y) init_task.o vmlinux.lds
diff --git a/arch/unicore32/kernel/asm-offsets.c b/arch/unicore32/kernel/asm-offsets.c
new file mode 100644
index 000000000000..ffcbe7536ca7
--- /dev/null
+++ b/arch/unicore32/kernel/asm-offsets.c
@@ -0,0 +1,112 @@
+/*
+ * linux/arch/unicore32/kernel/asm-offsets.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * Generate definitions needed by assembly language modules.
+ * This code generates raw asm output which is post-processed to extract
+ * and format the required data.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/kbuild.h>
+#include <linux/suspend.h>
+#include <linux/thread_info.h>
+#include <asm/memory.h>
+#include <asm/suspend.h>
+
+/*
+ * GCC 3.0, 3.1: general bad code generation.
+ * GCC 3.2.0: incorrect function argument offset calculation.
+ * GCC 3.2.x: miscompiles NEW_AUX_ENT in fs/binfmt_elf.c
+ * (http://gcc.gnu.org/PR8896) and incorrect structure
+ * initialisation in fs/jffs2/erase.c
+ */
+#if (__GNUC__ < 4)
+#error Your compiler should upgrade to uc4
+#error Known good compilers: 4.2.2
+#endif
+
+int main(void)
+{
+ DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
+ BLANK();
+ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
+ DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
+ DEFINE(TI_TASK, offsetof(struct thread_info, task));
+ DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
+ DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+ DEFINE(TI_CPU_SAVE, offsetof(struct thread_info, cpu_context));
+ DEFINE(TI_USED_CP, offsetof(struct thread_info, used_cp));
+#ifdef CONFIG_UNICORE_FPU_F64
+ DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate));
+#endif
+ BLANK();
+ DEFINE(S_R0, offsetof(struct pt_regs, UCreg_00));
+ DEFINE(S_R1, offsetof(struct pt_regs, UCreg_01));
+ DEFINE(S_R2, offsetof(struct pt_regs, UCreg_02));
+ DEFINE(S_R3, offsetof(struct pt_regs, UCreg_03));
+ DEFINE(S_R4, offsetof(struct pt_regs, UCreg_04));
+ DEFINE(S_R5, offsetof(struct pt_regs, UCreg_05));
+ DEFINE(S_R6, offsetof(struct pt_regs, UCreg_06));
+ DEFINE(S_R7, offsetof(struct pt_regs, UCreg_07));
+ DEFINE(S_R8, offsetof(struct pt_regs, UCreg_08));
+ DEFINE(S_R9, offsetof(struct pt_regs, UCreg_09));
+ DEFINE(S_R10, offsetof(struct pt_regs, UCreg_10));
+ DEFINE(S_R11, offsetof(struct pt_regs, UCreg_11));
+ DEFINE(S_R12, offsetof(struct pt_regs, UCreg_12));
+ DEFINE(S_R13, offsetof(struct pt_regs, UCreg_13));
+ DEFINE(S_R14, offsetof(struct pt_regs, UCreg_14));
+ DEFINE(S_R15, offsetof(struct pt_regs, UCreg_15));
+ DEFINE(S_R16, offsetof(struct pt_regs, UCreg_16));
+ DEFINE(S_R17, offsetof(struct pt_regs, UCreg_17));
+ DEFINE(S_R18, offsetof(struct pt_regs, UCreg_18));
+ DEFINE(S_R19, offsetof(struct pt_regs, UCreg_19));
+ DEFINE(S_R20, offsetof(struct pt_regs, UCreg_20));
+ DEFINE(S_R21, offsetof(struct pt_regs, UCreg_21));
+ DEFINE(S_R22, offsetof(struct pt_regs, UCreg_22));
+ DEFINE(S_R23, offsetof(struct pt_regs, UCreg_23));
+ DEFINE(S_R24, offsetof(struct pt_regs, UCreg_24));
+ DEFINE(S_R25, offsetof(struct pt_regs, UCreg_25));
+ DEFINE(S_R26, offsetof(struct pt_regs, UCreg_26));
+ DEFINE(S_FP, offsetof(struct pt_regs, UCreg_fp));
+ DEFINE(S_IP, offsetof(struct pt_regs, UCreg_ip));
+ DEFINE(S_SP, offsetof(struct pt_regs, UCreg_sp));
+ DEFINE(S_LR, offsetof(struct pt_regs, UCreg_lr));
+ DEFINE(S_PC, offsetof(struct pt_regs, UCreg_pc));
+ DEFINE(S_PSR, offsetof(struct pt_regs, UCreg_asr));
+ DEFINE(S_OLD_R0, offsetof(struct pt_regs, UCreg_ORIG_00));
+ DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
+ BLANK();
+ DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm));
+ DEFINE(VMA_VM_FLAGS, offsetof(struct vm_area_struct, vm_flags));
+ BLANK();
+ DEFINE(VM_EXEC, VM_EXEC);
+ BLANK();
+ DEFINE(PAGE_SZ, PAGE_SIZE);
+ BLANK();
+ DEFINE(SYS_ERROR0, 0x9f0000);
+ BLANK();
+ DEFINE(PBE_ADDRESS, offsetof(struct pbe, address));
+ DEFINE(PBE_ORIN_ADDRESS, offsetof(struct pbe, orig_address));
+ DEFINE(PBE_NEXT, offsetof(struct pbe, next));
+ DEFINE(SWSUSP_CPU, offsetof(struct swsusp_arch_regs, \
+ cpu_context));
+#ifdef CONFIG_UNICORE_FPU_F64
+ DEFINE(SWSUSP_FPSTATE, offsetof(struct swsusp_arch_regs, \
+ fpstate));
+#endif
+ BLANK();
+ DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
+ DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
+ DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
+ return 0;
+}
diff --git a/arch/unicore32/kernel/clock.c b/arch/unicore32/kernel/clock.c
new file mode 100644
index 000000000000..18d4563e6fa5
--- /dev/null
+++ b/arch/unicore32/kernel/clock.c
@@ -0,0 +1,390 @@
+/*
+ * linux/arch/unicore32/kernel/clock.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/clk.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+
+/*
+ * Very simple clock implementation
+ */
+struct clk {
+ struct list_head node;
+ unsigned long rate;
+ const char *name;
+};
+
+static struct clk clk_ost_clk = {
+ .name = "OST_CLK",
+ .rate = CLOCK_TICK_RATE,
+};
+
+static struct clk clk_mclk_clk = {
+ .name = "MAIN_CLK",
+};
+
+static struct clk clk_bclk32_clk = {
+ .name = "BUS32_CLK",
+};
+
+static struct clk clk_ddr_clk = {
+ .name = "DDR_CLK",
+};
+
+static struct clk clk_vga_clk = {
+ .name = "VGA_CLK",
+};
+
+static LIST_HEAD(clocks);
+static DEFINE_MUTEX(clocks_mutex);
+
+struct clk *clk_get(struct device *dev, const char *id)
+{
+ struct clk *p, *clk = ERR_PTR(-ENOENT);
+
+ mutex_lock(&clocks_mutex);
+ list_for_each_entry(p, &clocks, node) {
+ if (strcmp(id, p->name) == 0) {
+ clk = p;
+ break;
+ }
+ }
+ mutex_unlock(&clocks_mutex);
+
+ return clk;
+}
+EXPORT_SYMBOL(clk_get);
+
+void clk_put(struct clk *clk)
+{
+}
+EXPORT_SYMBOL(clk_put);
+
+int clk_enable(struct clk *clk)
+{
+ return 0;
+}
+EXPORT_SYMBOL(clk_enable);
+
+void clk_disable(struct clk *clk)
+{
+}
+EXPORT_SYMBOL(clk_disable);
+
+unsigned long clk_get_rate(struct clk *clk)
+{
+ return clk->rate;
+}
+EXPORT_SYMBOL(clk_get_rate);
+
+struct {
+ unsigned long rate;
+ unsigned long cfg;
+ unsigned long div;
+} vga_clk_table[] = {
+ {.rate = 25175000, .cfg = 0x00002001, .div = 0x9},
+ {.rate = 31500000, .cfg = 0x00002001, .div = 0x7},
+ {.rate = 40000000, .cfg = 0x00003801, .div = 0x9},
+ {.rate = 49500000, .cfg = 0x00003801, .div = 0x7},
+ {.rate = 65000000, .cfg = 0x00002c01, .div = 0x4},
+ {.rate = 78750000, .cfg = 0x00002400, .div = 0x7},
+ {.rate = 108000000, .cfg = 0x00002c01, .div = 0x2},
+ {.rate = 106500000, .cfg = 0x00003c01, .div = 0x3},
+ {.rate = 50650000, .cfg = 0x00106400, .div = 0x9},
+ {.rate = 61500000, .cfg = 0x00106400, .div = 0xa},
+ {.rate = 85500000, .cfg = 0x00002800, .div = 0x6},
+};
+
+struct {
+ unsigned long mrate;
+ unsigned long prate;
+} mclk_clk_table[] = {
+ {.mrate = 500000000, .prate = 0x00109801},
+ {.mrate = 525000000, .prate = 0x00104C00},
+ {.mrate = 550000000, .prate = 0x00105000},
+ {.mrate = 575000000, .prate = 0x00105400},
+ {.mrate = 600000000, .prate = 0x00105800},
+ {.mrate = 625000000, .prate = 0x00105C00},
+ {.mrate = 650000000, .prate = 0x00106000},
+ {.mrate = 675000000, .prate = 0x00106400},
+ {.mrate = 700000000, .prate = 0x00106800},
+ {.mrate = 725000000, .prate = 0x00106C00},
+ {.mrate = 750000000, .prate = 0x00107000},
+ {.mrate = 775000000, .prate = 0x00107400},
+ {.mrate = 800000000, .prate = 0x00107800},
+};
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ if (clk == &clk_vga_clk) {
+ unsigned long pll_vgacfg, pll_vgadiv;
+ int ret, i;
+
+ /* lookup vga_clk_table */
+ ret = -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(vga_clk_table); i++) {
+ if (rate == vga_clk_table[i].rate) {
+ pll_vgacfg = vga_clk_table[i].cfg;
+ pll_vgadiv = vga_clk_table[i].div;
+ ret = 0;
+ break;
+ }
+ }
+
+ if (ret)
+ return ret;
+
+ if (readl(PM_PLLVGACFG) == pll_vgacfg)
+ return 0;
+
+ /* set pll vga cfg reg. */
+ writel(pll_vgacfg, PM_PLLVGACFG);
+
+ writel(PM_PMCR_CFBVGA, PM_PMCR);
+ while ((readl(PM_PLLDFCDONE) & PM_PLLDFCDONE_VGADFC)
+ != PM_PLLDFCDONE_VGADFC)
+ udelay(100); /* about 1ms */
+
+ /* set div cfg reg. */
+ writel(readl(PM_PCGR) | PM_PCGR_VGACLK, PM_PCGR);
+
+ writel((readl(PM_DIVCFG) & ~PM_DIVCFG_VGACLK_MASK)
+ | PM_DIVCFG_VGACLK(pll_vgadiv), PM_DIVCFG);
+
+ writel(readl(PM_SWRESET) | PM_SWRESET_VGADIV, PM_SWRESET);
+ while ((readl(PM_SWRESET) & PM_SWRESET_VGADIV)
+ == PM_SWRESET_VGADIV)
+ udelay(100); /* 65536 bclk32, about 320us */
+
+ writel(readl(PM_PCGR) & ~PM_PCGR_VGACLK, PM_PCGR);
+ }
+#ifdef CONFIG_CPU_FREQ
+ if (clk == &clk_mclk_clk) {
+ u32 pll_rate, divstatus = PM_DIVSTATUS;
+ int ret, i;
+
+ /* lookup mclk_clk_table */
+ ret = -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(mclk_clk_table); i++) {
+ if (rate == mclk_clk_table[i].mrate) {
+ pll_rate = mclk_clk_table[i].prate;
+ clk_mclk_clk.rate = mclk_clk_table[i].mrate;
+ ret = 0;
+ break;
+ }
+ }
+
+ if (ret)
+ return ret;
+
+ if (clk_mclk_clk.rate)
+ clk_bclk32_clk.rate = clk_mclk_clk.rate
+ / (((divstatus & 0x0000f000) >> 12) + 1);
+
+ /* set pll sys cfg reg. */
+ PM_PLLSYSCFG = pll_rate;
+
+ PM_PMCR = PM_PMCR_CFBSYS;
+ while ((PM_PLLDFCDONE & PM_PLLDFCDONE_SYSDFC)
+ != PM_PLLDFCDONE_SYSDFC)
+ udelay(100);
+ /* about 1ms */
+ }
+#endif
+ return 0;
+}
+EXPORT_SYMBOL(clk_set_rate);
+
+int clk_register(struct clk *clk)
+{
+ mutex_lock(&clocks_mutex);
+ list_add(&clk->node, &clocks);
+ mutex_unlock(&clocks_mutex);
+ printk(KERN_DEFAULT "PKUnity PM: %s %lu.%02luM\n", clk->name,
+ (clk->rate)/1000000, (clk->rate)/10000 % 100);
+ return 0;
+}
+EXPORT_SYMBOL(clk_register);
+
+void clk_unregister(struct clk *clk)
+{
+ mutex_lock(&clocks_mutex);
+ list_del(&clk->node);
+ mutex_unlock(&clocks_mutex);
+}
+EXPORT_SYMBOL(clk_unregister);
+
+struct {
+ unsigned long prate;
+ unsigned long rate;
+} pllrate_table[] = {
+ {.prate = 0x00002001, .rate = 250000000},
+ {.prate = 0x00104801, .rate = 250000000},
+ {.prate = 0x00104C01, .rate = 262500000},
+ {.prate = 0x00002401, .rate = 275000000},
+ {.prate = 0x00105001, .rate = 275000000},
+ {.prate = 0x00105401, .rate = 287500000},
+ {.prate = 0x00002801, .rate = 300000000},
+ {.prate = 0x00105801, .rate = 300000000},
+ {.prate = 0x00105C01, .rate = 312500000},
+ {.prate = 0x00002C01, .rate = 325000000},
+ {.prate = 0x00106001, .rate = 325000000},
+ {.prate = 0x00106401, .rate = 337500000},
+ {.prate = 0x00003001, .rate = 350000000},
+ {.prate = 0x00106801, .rate = 350000000},
+ {.prate = 0x00106C01, .rate = 362500000},
+ {.prate = 0x00003401, .rate = 375000000},
+ {.prate = 0x00107001, .rate = 375000000},
+ {.prate = 0x00107401, .rate = 387500000},
+ {.prate = 0x00003801, .rate = 400000000},
+ {.prate = 0x00107801, .rate = 400000000},
+ {.prate = 0x00107C01, .rate = 412500000},
+ {.prate = 0x00003C01, .rate = 425000000},
+ {.prate = 0x00108001, .rate = 425000000},
+ {.prate = 0x00108401, .rate = 437500000},
+ {.prate = 0x00004001, .rate = 450000000},
+ {.prate = 0x00108801, .rate = 450000000},
+ {.prate = 0x00108C01, .rate = 462500000},
+ {.prate = 0x00004401, .rate = 475000000},
+ {.prate = 0x00109001, .rate = 475000000},
+ {.prate = 0x00109401, .rate = 487500000},
+ {.prate = 0x00004801, .rate = 500000000},
+ {.prate = 0x00109801, .rate = 500000000},
+ {.prate = 0x00104C00, .rate = 525000000},
+ {.prate = 0x00002400, .rate = 550000000},
+ {.prate = 0x00105000, .rate = 550000000},
+ {.prate = 0x00105400, .rate = 575000000},
+ {.prate = 0x00002800, .rate = 600000000},
+ {.prate = 0x00105800, .rate = 600000000},
+ {.prate = 0x00105C00, .rate = 625000000},
+ {.prate = 0x00002C00, .rate = 650000000},
+ {.prate = 0x00106000, .rate = 650000000},
+ {.prate = 0x00106400, .rate = 675000000},
+ {.prate = 0x00003000, .rate = 700000000},
+ {.prate = 0x00106800, .rate = 700000000},
+ {.prate = 0x00106C00, .rate = 725000000},
+ {.prate = 0x00003400, .rate = 750000000},
+ {.prate = 0x00107000, .rate = 750000000},
+ {.prate = 0x00107400, .rate = 775000000},
+ {.prate = 0x00003800, .rate = 800000000},
+ {.prate = 0x00107800, .rate = 800000000},
+ {.prate = 0x00107C00, .rate = 825000000},
+ {.prate = 0x00003C00, .rate = 850000000},
+ {.prate = 0x00108000, .rate = 850000000},
+ {.prate = 0x00108400, .rate = 875000000},
+ {.prate = 0x00004000, .rate = 900000000},
+ {.prate = 0x00108800, .rate = 900000000},
+ {.prate = 0x00108C00, .rate = 925000000},
+ {.prate = 0x00004400, .rate = 950000000},
+ {.prate = 0x00109000, .rate = 950000000},
+ {.prate = 0x00109400, .rate = 975000000},
+ {.prate = 0x00004800, .rate = 1000000000},
+ {.prate = 0x00109800, .rate = 1000000000},
+};
+
+struct {
+ unsigned long prate;
+ unsigned long drate;
+} pddr_table[] = {
+ {.prate = 0x00100800, .drate = 44236800},
+ {.prate = 0x00100C00, .drate = 66355200},
+ {.prate = 0x00101000, .drate = 88473600},
+ {.prate = 0x00101400, .drate = 110592000},
+ {.prate = 0x00101800, .drate = 132710400},
+ {.prate = 0x00101C01, .drate = 154828800},
+ {.prate = 0x00102001, .drate = 176947200},
+ {.prate = 0x00102401, .drate = 199065600},
+ {.prate = 0x00102801, .drate = 221184000},
+ {.prate = 0x00102C01, .drate = 243302400},
+ {.prate = 0x00103001, .drate = 265420800},
+ {.prate = 0x00103401, .drate = 287539200},
+ {.prate = 0x00103801, .drate = 309657600},
+ {.prate = 0x00103C01, .drate = 331776000},
+ {.prate = 0x00104001, .drate = 353894400},
+};
+
+static int __init clk_init(void)
+{
+#ifdef CONFIG_PUV3_PM
+ u32 pllrate, divstatus = readl(PM_DIVSTATUS);
+ u32 pcgr_val = readl(PM_PCGR);
+ int i;
+
+ pcgr_val |= PM_PCGR_BCLKMME | PM_PCGR_BCLKH264E | PM_PCGR_BCLKH264D
+ | PM_PCGR_HECLK | PM_PCGR_HDCLK;
+ writel(pcgr_val, PM_PCGR);
+
+ pllrate = readl(PM_PLLSYSSTATUS);
+
+ /* lookup pmclk_table */
+ clk_mclk_clk.rate = 0;
+ for (i = 0; i < ARRAY_SIZE(pllrate_table); i++) {
+ if (pllrate == pllrate_table[i].prate) {
+ clk_mclk_clk.rate = pllrate_table[i].rate;
+ break;
+ }
+ }
+
+ if (clk_mclk_clk.rate)
+ clk_bclk32_clk.rate = clk_mclk_clk.rate /
+ (((divstatus & 0x0000f000) >> 12) + 1);
+
+ pllrate = readl(PM_PLLDDRSTATUS);
+
+ /* lookup pddr_table */
+ clk_ddr_clk.rate = 0;
+ for (i = 0; i < ARRAY_SIZE(pddr_table); i++) {
+ if (pllrate == pddr_table[i].prate) {
+ clk_ddr_clk.rate = pddr_table[i].drate;
+ break;
+ }
+ }
+
+ pllrate = readl(PM_PLLVGASTATUS);
+
+ /* lookup pvga_table */
+ clk_vga_clk.rate = 0;
+ for (i = 0; i < ARRAY_SIZE(pllrate_table); i++) {
+ if (pllrate == pllrate_table[i].prate) {
+ clk_vga_clk.rate = pllrate_table[i].rate;
+ break;
+ }
+ }
+
+ if (clk_vga_clk.rate)
+ clk_vga_clk.rate = clk_vga_clk.rate /
+ (((divstatus & 0x00f00000) >> 20) + 1);
+
+ clk_register(&clk_vga_clk);
+#endif
+#ifdef CONFIG_ARCH_FPGA
+ clk_ddr_clk.rate = 33000000;
+ clk_mclk_clk.rate = 33000000;
+ clk_bclk32_clk.rate = 33000000;
+#endif
+ clk_register(&clk_ddr_clk);
+ clk_register(&clk_mclk_clk);
+ clk_register(&clk_bclk32_clk);
+ clk_register(&clk_ost_clk);
+ return 0;
+}
+core_initcall(clk_init);
diff --git a/arch/unicore32/kernel/cpu-ucv2.c b/arch/unicore32/kernel/cpu-ucv2.c
new file mode 100644
index 000000000000..4a99f62584c7
--- /dev/null
+++ b/arch/unicore32/kernel/cpu-ucv2.c
@@ -0,0 +1,93 @@
+/*
+ * linux/arch/unicore32/kernel/cpu-ucv2.c: clock scaling for the UniCore-II
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+
+#include <mach/hardware.h>
+
+static struct cpufreq_driver ucv2_driver;
+
+/* make sure that only the "userspace" governor is run
+ * -- anything else wouldn't make sense on this platform, anyway.
+ */
+int ucv2_verify_speed(struct cpufreq_policy *policy)
+{
+ if (policy->cpu)
+ return -EINVAL;
+
+ cpufreq_verify_within_limits(policy,
+ policy->cpuinfo.min_freq, policy->cpuinfo.max_freq);
+
+ return 0;
+}
+
+static unsigned int ucv2_getspeed(unsigned int cpu)
+{
+ struct clk *mclk = clk_get(NULL, "MAIN_CLK");
+
+ if (cpu)
+ return 0;
+ return clk_get_rate(mclk)/1000;
+}
+
+static int ucv2_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned int cur = ucv2_getspeed(0);
+ struct cpufreq_freqs freqs;
+ struct clk *mclk = clk_get(NULL, "MAIN_CLK");
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ if (!clk_set_rate(mclk, target_freq * 1000)) {
+ freqs.old = cur;
+ freqs.new = target_freq;
+ freqs.cpu = 0;
+ }
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ return 0;
+}
+
+static int __init ucv2_cpu_init(struct cpufreq_policy *policy)
+{
+ if (policy->cpu != 0)
+ return -EINVAL;
+ policy->cur = ucv2_getspeed(0);
+ policy->min = policy->cpuinfo.min_freq = 250000;
+ policy->max = policy->cpuinfo.max_freq = 1000000;
+ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+ return 0;
+}
+
+static struct cpufreq_driver ucv2_driver = {
+ .flags = CPUFREQ_STICKY,
+ .verify = ucv2_verify_speed,
+ .target = ucv2_target,
+ .get = ucv2_getspeed,
+ .init = ucv2_cpu_init,
+ .name = "UniCore-II",
+};
+
+static int __init ucv2_cpufreq_init(void)
+{
+ return cpufreq_register_driver(&ucv2_driver);
+}
+
+arch_initcall(ucv2_cpufreq_init);
diff --git a/arch/unicore32/kernel/debug-macro.S b/arch/unicore32/kernel/debug-macro.S
new file mode 100644
index 000000000000..2711d6d87d8e
--- /dev/null
+++ b/arch/unicore32/kernel/debug-macro.S
@@ -0,0 +1,89 @@
+/*
+ * linux/arch/unicore32/kernel/debug-macro.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Debugging macro include header
+ */
+#include <generated/asm-offsets.h>
+#include <mach/hardware.h>
+
+ .macro put_word_ocd, rd, rx=r16
+1001: movc \rx, p1.c0, #0
+ cand.a \rx, #2
+ bne 1001b
+ movc p1.c1, \rd, #1
+ .endm
+
+#ifdef CONFIG_DEBUG_OCD
+ /* debug using UniCore On-Chip-Debugger */
+ .macro addruart, rx
+ .endm
+
+ .macro senduart, rd, rx
+ put_word_ocd \rd, \rx
+ .endm
+
+ .macro busyuart, rd, rx
+ .endm
+
+ .macro waituart, rd, rx
+ .endm
+#else
+#define UART_CLK_DEFAULT 3686400 * 20
+ /* Uartclk = MCLK/ 2, The MCLK on my board is 3686400 * 40 */
+#define BAUD_RATE_DEFAULT 115200
+ /* The baud rate of the serial port */
+
+#define UART_DIVISOR_DEFAULT (UART_CLK_DEFAULT \
+ / (16 * BAUD_RATE_DEFAULT) - 1)
+
+ .macro addruart,rx
+ mrc p0, #0, \rx, c1, c0
+ tst \rx, #1 @ MMU enabled?
+ moveq \rx, #0xee000000 @ physical base address
+ movne \rx, #0x6e000000 @ virtual address
+
+ @ We probe for the active serial port here
+ @ However, now we assume UART0 is active: epip4d
+ @ We assume r1 and r2 can be clobbered.
+
+ movl r2, #UART_DIVISOR_DEFAULT
+ mov r1, #0x80
+ str r1, [\rx, #UART_LCR_OFFSET]
+ and r1, r2, #0xff00
+ mov r1, r1, lsr #8
+ str r1, [\rx, #UART_DLH_OFFSET]
+ and r1, r2, #0xff
+ str r1, [\rx, #UART_DLL_OFFSET]
+ mov r1, #0x7
+ str r1, [\rx, #UART_FCR_OFFSET]
+ mov r1, #0x3
+ str r1, [\rx, #UART_LCR_OFFSET]
+ mov r1, #0x0
+ str r1, [\rx, #UART_IER_OFFSET]
+ .endm
+
+ .macro senduart,rd,rx
+ str \rd, [\rx, #UART_THR_OFFSET]
+ .endm
+
+ .macro waituart,rd,rx
+1001: ldr \rd, [\rx, #UART_LSR_OFFSET]
+ tst \rd, #UART_LSR_THRE
+ beq 1001b
+ .endm
+
+ .macro busyuart,rd,rx
+1001: ldr \rd, [\rx, #UART_LSR_OFFSET]
+ tst \rd, #UART_LSR_TEMT
+ bne 1001b
+ .endm
+#endif
+
diff --git a/arch/unicore32/kernel/debug.S b/arch/unicore32/kernel/debug.S
new file mode 100644
index 000000000000..029fd12f6ab0
--- /dev/null
+++ b/arch/unicore32/kernel/debug.S
@@ -0,0 +1,85 @@
+/*
+ * linux/arch/unicore32/kernel/debug.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * 32-bit debugging code
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ .text
+
+/*
+ * Some debugging routines (useful if you've got MM problems and
+ * printk isn't working). For DEBUGGING ONLY!!! Do not leave
+ * references to these in a production kernel!
+ */
+#include "debug-macro.S"
+
+/*
+ * Useful debugging routines
+ */
+ENTRY(printhex8)
+ mov r1, #8
+ b printhex
+ENDPROC(printhex8)
+
+ENTRY(printhex4)
+ mov r1, #4
+ b printhex
+ENDPROC(printhex4)
+
+ENTRY(printhex2)
+ mov r1, #2
+printhex: adr r2, hexbuf
+ add r3, r2, r1
+ mov r1, #0
+ stb r1, [r3]
+1: and r1, r0, #15
+ mov r0, r0 >> #4
+ csub.a r1, #10
+ beg 2f
+ add r1, r1, #'0' - 'a' + 10
+2: add r1, r1, #'a' - 10
+ stb.w r1, [r3+], #-1
+ cxor.a r3, r2
+ bne 1b
+ mov r0, r2
+ b printascii
+ENDPROC(printhex2)
+
+ .ltorg
+
+ENTRY(printascii)
+ addruart r3
+ b 2f
+1: waituart r2, r3
+ senduart r1, r3
+ busyuart r2, r3
+ cxor.a r1, #'\n'
+ cmoveq r1, #'\r'
+ beq 1b
+2: cxor.a r0, #0
+ beq 3f
+ ldb.w r1, [r0]+, #1
+ cxor.a r1, #0
+ bne 1b
+3: mov pc, lr
+ENDPROC(printascii)
+
+ENTRY(printch)
+ addruart r3
+ mov r1, r0
+ mov r0, #0
+ b 1b
+ENDPROC(printch)
+
+hexbuf: .space 16
+
diff --git a/arch/unicore32/kernel/dma.c b/arch/unicore32/kernel/dma.c
new file mode 100644
index 000000000000..ae441bc3122c
--- /dev/null
+++ b/arch/unicore32/kernel/dma.c
@@ -0,0 +1,183 @@
+/*
+ * linux/arch/unicore32/kernel/dma.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <mach/hardware.h>
+#include <mach/dma.h>
+
+struct dma_channel {
+ char *name;
+ puv3_dma_prio prio;
+ void (*irq_handler)(int, void *);
+ void (*err_handler)(int, void *);
+ void *data;
+};
+
+static struct dma_channel dma_channels[MAX_DMA_CHANNELS];
+
+int puv3_request_dma(char *name, puv3_dma_prio prio,
+ void (*irq_handler)(int, void *),
+ void (*err_handler)(int, void *),
+ void *data)
+{
+ unsigned long flags;
+ int i, found = 0;
+
+ /* basic sanity checks */
+ if (!name)
+ return -EINVAL;
+
+ local_irq_save(flags);
+
+ do {
+ /* try grabbing a DMA channel with the requested priority */
+ for (i = 0; i < MAX_DMA_CHANNELS; i++) {
+ if ((dma_channels[i].prio == prio) &&
+ !dma_channels[i].name) {
+ found = 1;
+ break;
+ }
+ }
+ /* if requested prio group is full, try a hier priority */
+ } while (!found && prio--);
+
+ if (found) {
+ dma_channels[i].name = name;
+ dma_channels[i].irq_handler = irq_handler;
+ dma_channels[i].err_handler = err_handler;
+ dma_channels[i].data = data;
+ } else {
+ printk(KERN_WARNING "No more available DMA channels for %s\n",
+ name);
+ i = -ENODEV;
+ }
+
+ local_irq_restore(flags);
+ return i;
+}
+EXPORT_SYMBOL(puv3_request_dma);
+
+void puv3_free_dma(int dma_ch)
+{
+ unsigned long flags;
+
+ if (!dma_channels[dma_ch].name) {
+ printk(KERN_CRIT
+ "%s: trying to free channel %d which is already freed\n",
+ __func__, dma_ch);
+ return;
+ }
+
+ local_irq_save(flags);
+ dma_channels[dma_ch].name = NULL;
+ dma_channels[dma_ch].err_handler = NULL;
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(puv3_free_dma);
+
+static irqreturn_t dma_irq_handler(int irq, void *dev_id)
+{
+ int i, dint;
+
+ dint = readl(DMAC_ITCSR);
+ for (i = 0; i < MAX_DMA_CHANNELS; i++) {
+ if (dint & DMAC_CHANNEL(i)) {
+ struct dma_channel *channel = &dma_channels[i];
+
+ /* Clear TC interrupt of channel i */
+ writel(DMAC_CHANNEL(i), DMAC_ITCCR);
+ writel(0, DMAC_ITCCR);
+
+ if (channel->name && channel->irq_handler) {
+ channel->irq_handler(i, channel->data);
+ } else {
+ /*
+ * IRQ for an unregistered DMA channel:
+ * let's clear the interrupts and disable it.
+ */
+ printk(KERN_WARNING "spurious IRQ for"
+ " DMA channel %d\n", i);
+ }
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dma_err_handler(int irq, void *dev_id)
+{
+ int i, dint;
+
+ dint = readl(DMAC_IESR);
+ for (i = 0; i < MAX_DMA_CHANNELS; i++) {
+ if (dint & DMAC_CHANNEL(i)) {
+ struct dma_channel *channel = &dma_channels[i];
+
+ /* Clear Err interrupt of channel i */
+ writel(DMAC_CHANNEL(i), DMAC_IECR);
+ writel(0, DMAC_IECR);
+
+ if (channel->name && channel->err_handler) {
+ channel->err_handler(i, channel->data);
+ } else {
+ /*
+ * IRQ for an unregistered DMA channel:
+ * let's clear the interrupts and disable it.
+ */
+ printk(KERN_WARNING "spurious IRQ for"
+ " DMA channel %d\n", i);
+ }
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+int __init puv3_init_dma(void)
+{
+ int i, ret;
+
+ /* dma channel priorities on v8 processors:
+ * ch 0 - 1 <--> (0) DMA_PRIO_HIGH
+ * ch 2 - 3 <--> (1) DMA_PRIO_MEDIUM
+ * ch 4 - 5 <--> (2) DMA_PRIO_LOW
+ */
+ for (i = 0; i < MAX_DMA_CHANNELS; i++) {
+ puv3_stop_dma(i);
+ dma_channels[i].name = NULL;
+ dma_channels[i].prio = min((i & 0x7) >> 1, DMA_PRIO_LOW);
+ }
+
+ ret = request_irq(IRQ_DMA, dma_irq_handler, 0, "DMA", NULL);
+ if (ret) {
+ printk(KERN_CRIT "Can't register IRQ for DMA\n");
+ return ret;
+ }
+
+ ret = request_irq(IRQ_DMAERR, dma_err_handler, 0, "DMAERR", NULL);
+ if (ret) {
+ printk(KERN_CRIT "Can't register IRQ for DMAERR\n");
+ free_irq(IRQ_DMA, "DMA");
+ return ret;
+ }
+
+ return 0;
+}
+
+postcore_initcall(puv3_init_dma);
diff --git a/arch/unicore32/kernel/early_printk.c b/arch/unicore32/kernel/early_printk.c
new file mode 100644
index 000000000000..3922255f1fa8
--- /dev/null
+++ b/arch/unicore32/kernel/early_printk.c
@@ -0,0 +1,59 @@
+/*
+ * linux/arch/unicore32/kernel/early_printk.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <mach/ocd.h>
+
+/* On-Chip-Debugger functions */
+
+static void early_ocd_write(struct console *con, const char *s, unsigned n)
+{
+ while (*s && n-- > 0) {
+ if (*s == '\n')
+ ocd_putc((int)'\r');
+ ocd_putc((int)*s);
+ s++;
+ }
+}
+
+static struct console early_ocd_console = {
+ .name = "earlyocd",
+ .write = early_ocd_write,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+/* Direct interface for emergencies */
+static struct console *early_console = &early_ocd_console;
+
+static int __initdata keep_early;
+
+static int __init setup_early_printk(char *buf)
+{
+ if (!buf)
+ return 0;
+
+ if (strstr(buf, "keep"))
+ keep_early = 1;
+
+ if (!strncmp(buf, "ocd", 3))
+ early_console = &early_ocd_console;
+
+ if (keep_early)
+ early_console->flags &= ~CON_BOOT;
+ else
+ early_console->flags |= CON_BOOT;
+ register_console(early_console);
+ return 0;
+}
+early_param("earlyprintk", setup_early_printk);
diff --git a/arch/unicore32/kernel/elf.c b/arch/unicore32/kernel/elf.c
new file mode 100644
index 000000000000..0a176734fefa
--- /dev/null
+++ b/arch/unicore32/kernel/elf.c
@@ -0,0 +1,38 @@
+/*
+ * linux/arch/unicore32/kernel/elf.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/personality.h>
+#include <linux/binfmts.h>
+#include <linux/elf.h>
+
+int elf_check_arch(const struct elf32_hdr *x)
+{
+ /* Make sure it's an UniCore executable */
+ if (x->e_machine != EM_UNICORE)
+ return 0;
+
+ /* Make sure the entry address is reasonable */
+ if (x->e_entry & 3)
+ return 0;
+
+ return 1;
+}
+EXPORT_SYMBOL(elf_check_arch);
+
+void elf_set_personality(const struct elf32_hdr *x)
+{
+ unsigned int personality = PER_LINUX;
+
+ set_personality(personality);
+}
+EXPORT_SYMBOL(elf_set_personality);
diff --git a/arch/unicore32/kernel/entry.S b/arch/unicore32/kernel/entry.S
new file mode 100644
index 000000000000..83698b7c8f5b
--- /dev/null
+++ b/arch/unicore32/kernel/entry.S
@@ -0,0 +1,824 @@
+/*
+ * linux/arch/unicore32/kernel/entry.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Low-level vector interface routines
+ */
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/errno.h>
+#include <asm/thread_info.h>
+#include <asm/memory.h>
+#include <asm/unistd.h>
+#include <generated/asm-offsets.h>
+#include "debug-macro.S"
+
+@
+@ Most of the stack format comes from struct pt_regs, but with
+@ the addition of 8 bytes for storing syscall args 5 and 6.
+@
+#define S_OFF 8
+
+/*
+ * The SWI code relies on the fact that R0 is at the bottom of the stack
+ * (due to slow/fast restore user regs).
+ */
+#if S_R0 != 0
+#error "Please fix"
+#endif
+
+ .macro zero_fp
+#ifdef CONFIG_FRAME_POINTER
+ mov fp, #0
+#endif
+ .endm
+
+ .macro alignment_trap, rtemp
+#ifdef CONFIG_ALIGNMENT_TRAP
+ ldw \rtemp, .LCcralign
+ ldw \rtemp, [\rtemp]
+ movc p0.c1, \rtemp, #0
+#endif
+ .endm
+
+ .macro load_user_sp_lr, rd, rtemp, offset = 0
+ mov \rtemp, asr
+ xor \rtemp, \rtemp, #(PRIV_MODE ^ SUSR_MODE)
+ mov.a asr, \rtemp @ switch to the SUSR mode
+
+ ldw sp, [\rd+], #\offset @ load sp_user
+ ldw lr, [\rd+], #\offset + 4 @ load lr_user
+
+ xor \rtemp, \rtemp, #(PRIV_MODE ^ SUSR_MODE)
+ mov.a asr, \rtemp @ switch back to the PRIV mode
+ .endm
+
+ .macro priv_exit, rpsr
+ mov.a bsr, \rpsr
+ ldm.w (r0 - r15), [sp]+
+ ldm.b (r16 - pc), [sp]+ @ load r0 - pc, asr
+ .endm
+
+ .macro restore_user_regs, fast = 0, offset = 0
+ ldw r1, [sp+], #\offset + S_PSR @ get calling asr
+ ldw lr, [sp+], #\offset + S_PC @ get pc
+ mov.a bsr, r1 @ save in bsr_priv
+ .if \fast
+ add sp, sp, #\offset + S_R1 @ r0 is syscall return value
+ ldm.w (r1 - r15), [sp]+ @ get calling r1 - r15
+ ldur (r16 - lr), [sp]+ @ get calling r16 - lr
+ .else
+ ldm.w (r0 - r15), [sp]+ @ get calling r0 - r15
+ ldur (r16 - lr), [sp]+ @ get calling r16 - lr
+ .endif
+ nop
+ add sp, sp, #S_FRAME_SIZE - S_R16
+ mov.a pc, lr @ return
+ @ and move bsr_priv into asr
+ .endm
+
+ .macro get_thread_info, rd
+ mov \rd, sp >> #13
+ mov \rd, \rd << #13
+ .endm
+
+ .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+ ldw \base, =(io_p2v(PKUNITY_INTC_BASE))
+ ldw \irqstat, [\base+], #0xC @ INTC_ICIP
+ ldw \tmp, [\base+], #0x4 @ INTC_ICMR
+ and.a \irqstat, \irqstat, \tmp
+ beq 1001f
+ cntlz \irqnr, \irqstat
+ rsub \irqnr, \irqnr, #31
+1001: /* EQ will be set if no irqs pending */
+ .endm
+
+#ifdef CONFIG_DEBUG_LL
+ .macro printreg, reg, temp
+ adr \temp, 901f
+ stm (r0-r3), [\temp]+
+ stw lr, [\temp+], #0x10
+ mov r0, \reg
+ b.l printhex8
+ mov r0, #':'
+ b.l printch
+ mov r0, pc
+ b.l printhex8
+ adr r0, 902f
+ b.l printascii
+ adr \temp, 901f
+ ldm (r0-r3), [\temp]+
+ ldw lr, [\temp+], #0x10
+ b 903f
+901: .word 0, 0, 0, 0, 0 @ r0-r3, lr
+902: .asciz ": epip4d\n"
+ .align
+903:
+ .endm
+#endif
+
+/*
+ * These are the registers used in the syscall handler, and allow us to
+ * have in theory up to 7 arguments to a function - r0 to r6.
+ *
+ * Note that tbl == why is intentional.
+ *
+ * We must set at least "tsk" and "why" when calling ret_with_reschedule.
+ */
+scno .req r21 @ syscall number
+tbl .req r22 @ syscall table pointer
+why .req r22 @ Linux syscall (!= 0)
+tsk .req r23 @ current thread_info
+
+/*
+ * Interrupt handling. Preserves r17, r18, r19
+ */
+ .macro intr_handler
+1: get_irqnr_and_base r0, r6, r5, lr
+ beq 2f
+ mov r1, sp
+ @
+ @ routine called with r0 = irq number, r1 = struct pt_regs *
+ @
+ adr lr, 1b
+ b asm_do_IRQ
+2:
+ .endm
+
+/*
+ * PRIV mode handlers
+ */
+ .macro priv_entry
+ sub sp, sp, #(S_FRAME_SIZE - 4)
+ stm (r1 - r15), [sp]+
+ add r5, sp, #S_R15
+ stm (r16 - r28), [r5]+
+
+ ldm (r1 - r3), [r0]+
+ add r5, sp, #S_SP - 4 @ here for interlock avoidance
+ mov r4, #-1 @ "" "" "" ""
+ add r0, sp, #(S_FRAME_SIZE - 4)
+ stw.w r1, [sp+], #-4 @ save the "real" r0 copied
+ @ from the exception stack
+
+ mov r1, lr
+
+ @
+ @ We are now ready to fill in the remaining blanks on the stack:
+ @
+ @ r0 - sp_priv
+ @ r1 - lr_priv
+ @ r2 - lr_<exception>, already fixed up for correct return/restart
+ @ r3 - bsr_<exception>
+ @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
+ @
+ stm (r0 - r4), [r5]+
+ .endm
+
+/*
+ * User mode handlers
+ *
+ */
+ .macro user_entry
+ sub sp, sp, #S_FRAME_SIZE
+ stm (r1 - r15), [sp+]
+ add r4, sp, #S_R16
+ stm (r16 - r28), [r4]+
+
+ ldm (r1 - r3), [r0]+
+ add r0, sp, #S_PC @ here for interlock avoidance
+ mov r4, #-1 @ "" "" "" ""
+
+ stw r1, [sp] @ save the "real" r0 copied
+ @ from the exception stack
+
+ @
+ @ We are now ready to fill in the remaining blanks on the stack:
+ @
+ @ r2 - lr_<exception>, already fixed up for correct return/restart
+ @ r3 - bsr_<exception>
+ @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
+ @
+ @ Also, separately save sp_user and lr_user
+ @
+ stm (r2 - r4), [r0]+
+ stur (sp, lr), [r0-]
+
+ @
+ @ Enable the alignment trap while in kernel mode
+ @
+ alignment_trap r0
+
+ @
+ @ Clear FP to mark the first stack frame
+ @
+ zero_fp
+ .endm
+
+ .text
+
+@
+@ __invalid - generic code for failed exception
+@ (re-entrant version of handlers)
+@
+__invalid:
+ sub sp, sp, #S_FRAME_SIZE
+ stm (r1 - r15), [sp+]
+ add r1, sp, #S_R16
+ stm (r16 - r28, sp, lr), [r1]+
+
+ zero_fp
+
+ ldm (r4 - r6), [r0]+
+ add r0, sp, #S_PC @ here for interlock avoidance
+ mov r7, #-1 @ "" "" "" ""
+ stw r4, [sp] @ save preserved r0
+ stm (r5 - r7), [r0]+ @ lr_<exception>,
+ @ asr_<exception>, "old_r0"
+
+ mov r0, sp
+ mov r1, asr
+ b bad_mode
+ENDPROC(__invalid)
+
+ .align 5
+__dabt_priv:
+ priv_entry
+
+ @
+ @ get ready to re-enable interrupts if appropriate
+ @
+ mov r17, asr
+ cand.a r3, #PSR_I_BIT
+ bne 1f
+ andn r17, r17, #PSR_I_BIT
+1:
+
+ @
+ @ Call the processor-specific abort handler:
+ @
+ @ r2 - aborted context pc
+ @ r3 - aborted context asr
+ @
+ @ The abort handler must return the aborted address in r0, and
+ @ the fault status register in r1.
+ @
+ movc r1, p0.c3, #0 @ get FSR
+ movc r0, p0.c4, #0 @ get FAR
+
+ @
+ @ set desired INTR state, then call main handler
+ @
+ mov.a asr, r17
+ mov r2, sp
+ b.l do_DataAbort
+
+ @
+ @ INTRs off again before pulling preserved data off the stack
+ @
+ disable_irq r0
+
+ @
+ @ restore BSR and restart the instruction
+ @
+ ldw r2, [sp+], #S_PSR
+ priv_exit r2 @ return from exception
+ENDPROC(__dabt_priv)
+
+ .align 5
+__intr_priv:
+ priv_entry
+
+ intr_handler
+
+ mov r0, #0 @ epip4d
+ movc p0.c5, r0, #14
+ nop; nop; nop; nop; nop; nop; nop; nop
+
+ ldw r4, [sp+], #S_PSR @ irqs are already disabled
+
+ priv_exit r4 @ return from exception
+ENDPROC(__intr_priv)
+
+ .ltorg
+
+ .align 5
+__extn_priv:
+ priv_entry
+
+ mov r0, sp @ struct pt_regs *regs
+ mov r1, asr
+ b bad_mode @ not supported
+ENDPROC(__extn_priv)
+
+ .align 5
+__pabt_priv:
+ priv_entry
+
+ @
+ @ re-enable interrupts if appropriate
+ @
+ mov r17, asr
+ cand.a r3, #PSR_I_BIT
+ bne 1f
+ andn r17, r17, #PSR_I_BIT
+1:
+
+ @
+ @ set args, then call main handler
+ @
+ @ r0 - address of faulting instruction
+ @ r1 - pointer to registers on stack
+ @
+ mov r0, r2 @ pass address of aborted instruction
+ mov r1, #5
+ mov.a asr, r17
+ mov r2, sp @ regs
+ b.l do_PrefetchAbort @ call abort handler
+
+ @
+ @ INTRs off again before pulling preserved data off the stack
+ @
+ disable_irq r0
+
+ @
+ @ restore BSR and restart the instruction
+ @
+ ldw r2, [sp+], #S_PSR
+ priv_exit r2 @ return from exception
+ENDPROC(__pabt_priv)
+
+ .align 5
+.LCcralign:
+ .word cr_alignment
+
+ .align 5
+__dabt_user:
+ user_entry
+
+#ifdef CONFIG_UNICORE_FPU_F64
+ cff ip, s31
+ cand.a ip, #0x08000000 @ FPU execption traps?
+ beq 209f
+
+ ldw ip, [sp+], #S_PC
+ add ip, ip, #4
+ stw ip, [sp+], #S_PC
+ @
+ @ fall through to the emulation code, which returns using r19 if
+ @ it has emulated the instruction, or the more conventional lr
+ @ if we are to treat this as a real extended instruction
+ @
+ @ r0 - instruction
+ @
+1: ldw.u r0, [r2]
+ adr r19, ret_from_exception
+ adr lr, 209f
+ @
+ @ fallthrough to call do_uc_f64
+ @
+/*
+ * Check whether the instruction is a co-processor instruction.
+ * If yes, we need to call the relevant co-processor handler.
+ *
+ * Note that we don't do a full check here for the co-processor
+ * instructions; all instructions with bit 27 set are well
+ * defined. The only instructions that should fault are the
+ * co-processor instructions.
+ *
+ * Emulators may wish to make use of the following registers:
+ * r0 = instruction opcode.
+ * r2 = PC
+ * r19 = normal "successful" return address
+ * r20 = this threads thread_info structure.
+ * lr = unrecognised instruction return address
+ */
+ get_thread_info r20 @ get current thread
+ and r8, r0, #0x00003c00 @ mask out CP number
+ mov r7, #1
+ stb r7, [r20+], #TI_USED_CP + 2 @ set appropriate used_cp[]
+
+ @ F64 hardware support entry point.
+ @ r0 = faulted instruction
+ @ r19 = return address
+ @ r20 = fp_state
+ enable_irq r4
+ add r20, r20, #TI_FPSTATE @ r20 = workspace
+ cff r1, s31 @ get fpu FPSCR
+ andn r2, r1, #0x08000000
+ ctf r2, s31 @ clear 27 bit
+ mov r2, sp @ nothing stacked - regdump is at TOS
+ mov lr, r19 @ setup for a return to the user code
+
+ @ Now call the C code to package up the bounce to the support code
+ @ r0 holds the trigger instruction
+ @ r1 holds the FPSCR value
+ @ r2 pointer to register dump
+ b ucf64_exchandler
+209:
+#endif
+ @
+ @ Call the processor-specific abort handler:
+ @
+ @ r2 - aborted context pc
+ @ r3 - aborted context asr
+ @
+ @ The abort handler must return the aborted address in r0, and
+ @ the fault status register in r1.
+ @
+ movc r1, p0.c3, #0 @ get FSR
+ movc r0, p0.c4, #0 @ get FAR
+
+ @
+ @ INTRs on, then call the main handler
+ @
+ enable_irq r2
+ mov r2, sp
+ adr lr, ret_from_exception
+ b do_DataAbort
+ENDPROC(__dabt_user)
+
+ .align 5
+__intr_user:
+ user_entry
+
+ get_thread_info tsk
+
+ intr_handler
+
+ mov why, #0
+ b ret_to_user
+ENDPROC(__intr_user)
+
+ .ltorg
+
+ .align 5
+__extn_user:
+ user_entry
+
+ mov r0, sp
+ mov r1, asr
+ b bad_mode
+ENDPROC(__extn_user)
+
+ .align 5
+__pabt_user:
+ user_entry
+
+ mov r0, r2 @ pass address of aborted instruction.
+ mov r1, #5
+ enable_irq r1 @ Enable interrupts
+ mov r2, sp @ regs
+ b.l do_PrefetchAbort @ call abort handler
+ /* fall through */
+/*
+ * This is the return code to user mode for abort handlers
+ */
+ENTRY(ret_from_exception)
+ get_thread_info tsk
+ mov why, #0
+ b ret_to_user
+ENDPROC(__pabt_user)
+ENDPROC(ret_from_exception)
+
+/*
+ * Register switch for UniCore V2 processors
+ * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
+ * previous and next are guaranteed not to be the same.
+ */
+ENTRY(__switch_to)
+ add ip, r1, #TI_CPU_SAVE
+ stm.w (r4 - r15), [ip]+
+ stm.w (r16 - r27, sp, lr), [ip]+
+
+#ifdef CONFIG_UNICORE_FPU_F64
+ add ip, r1, #TI_FPSTATE
+ sfm.w (f0 - f7 ), [ip]+
+ sfm.w (f8 - f15), [ip]+
+ sfm.w (f16 - f23), [ip]+
+ sfm.w (f24 - f31), [ip]+
+ cff r4, s31
+ stw r4, [ip]
+
+ add ip, r2, #TI_FPSTATE
+ lfm.w (f0 - f7 ), [ip]+
+ lfm.w (f8 - f15), [ip]+
+ lfm.w (f16 - f23), [ip]+
+ lfm.w (f24 - f31), [ip]+
+ ldw r4, [ip]
+ ctf r4, s31
+#endif
+ add ip, r2, #TI_CPU_SAVE
+ ldm.w (r4 - r15), [ip]+
+ ldm (r16 - r27, sp, pc), [ip]+ @ Load all regs saved previously
+ENDPROC(__switch_to)
+
+ .align 5
+/*
+ * This is the fast syscall return path. We do as little as
+ * possible here, and this includes saving r0 back into the PRIV
+ * stack.
+ */
+ret_fast_syscall:
+ disable_irq r1 @ disable interrupts
+ ldw r1, [tsk+], #TI_FLAGS
+ cand.a r1, #_TIF_WORK_MASK
+ bne fast_work_pending
+
+ @ fast_restore_user_regs
+ restore_user_regs fast = 1, offset = S_OFF
+
+/*
+ * Ok, we need to do extra processing, enter the slow path.
+ */
+fast_work_pending:
+ stw.w r0, [sp+], #S_R0+S_OFF @ returned r0
+work_pending:
+ cand.a r1, #_TIF_NEED_RESCHED
+ bne work_resched
+ cand.a r1, #_TIF_SIGPENDING|_TIF_NOTIFY_RESUME
+ beq no_work_pending
+ mov r0, sp @ 'regs'
+ mov r2, why @ 'syscall'
+ cand.a r1, #_TIF_SIGPENDING @ delivering a signal?
+ cmovne why, #0 @ prevent further restarts
+ b.l do_notify_resume
+ b ret_slow_syscall @ Check work again
+
+work_resched:
+ b.l schedule
+/*
+ * "slow" syscall return path. "why" tells us if this was a real syscall.
+ */
+ENTRY(ret_to_user)
+ret_slow_syscall:
+ disable_irq r1 @ disable interrupts
+ get_thread_info tsk @ epip4d, one path error?!
+ ldw r1, [tsk+], #TI_FLAGS
+ cand.a r1, #_TIF_WORK_MASK
+ bne work_pending
+no_work_pending:
+ @ slow_restore_user_regs
+ restore_user_regs fast = 0, offset = 0
+ENDPROC(ret_to_user)
+
+/*
+ * This is how we return from a fork.
+ */
+ENTRY(ret_from_fork)
+ b.l schedule_tail
+ get_thread_info tsk
+ ldw r1, [tsk+], #TI_FLAGS @ check for syscall tracing
+ mov why, #1
+ cand.a r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
+ beq ret_slow_syscall
+ mov r1, sp
+ mov r0, #1 @ trace exit [IP = 1]
+ b.l syscall_trace
+ b ret_slow_syscall
+ENDPROC(ret_from_fork)
+
+/*=============================================================================
+ * SWI handler
+ *-----------------------------------------------------------------------------
+ */
+ .align 5
+ENTRY(vector_swi)
+ sub sp, sp, #S_FRAME_SIZE
+ stm (r0 - r15), [sp]+ @ Calling r0 - r15
+ add r8, sp, #S_R16
+ stm (r16 - r28), [r8]+ @ Calling r16 - r28
+ add r8, sp, #S_PC
+ stur (sp, lr), [r8-] @ Calling sp, lr
+ mov r8, bsr @ called from non-REAL mode
+ stw lr, [sp+], #S_PC @ Save calling PC
+ stw r8, [sp+], #S_PSR @ Save ASR
+ stw r0, [sp+], #S_OLD_R0 @ Save OLD_R0
+ zero_fp
+
+ /*
+ * Get the system call number.
+ */
+ sub ip, lr, #4
+ ldw.u scno, [ip] @ get SWI instruction
+
+#ifdef CONFIG_ALIGNMENT_TRAP
+ ldw ip, __cr_alignment
+ ldw ip, [ip]
+ movc p0.c1, ip, #0 @ update control register
+#endif
+ enable_irq ip
+
+ get_thread_info tsk
+ ldw tbl, =sys_call_table @ load syscall table pointer
+
+ andn scno, scno, #0xff000000 @ mask off SWI op-code
+ andn scno, scno, #0x00ff0000 @ mask off SWI op-code
+
+ stm.w (r4, r5), [sp-] @ push fifth and sixth args
+ ldw ip, [tsk+], #TI_FLAGS @ check for syscall tracing
+ cand.a ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
+ bne __sys_trace
+
+ csub.a scno, #__NR_syscalls @ check upper syscall limit
+ adr lr, ret_fast_syscall @ return address
+ bea 1f
+ ldw pc, [tbl+], scno << #2 @ call sys_* routine
+1:
+ add r1, sp, #S_OFF
+2: mov why, #0 @ no longer a real syscall
+ b sys_ni_syscall @ not private func
+
+ /*
+ * This is the really slow path. We're going to be doing
+ * context switches, and waiting for our parent to respond.
+ */
+__sys_trace:
+ mov r2, scno
+ add r1, sp, #S_OFF
+ mov r0, #0 @ trace entry [IP = 0]
+ b.l syscall_trace
+
+ adr lr, __sys_trace_return @ return address
+ mov scno, r0 @ syscall number (possibly new)
+ add r1, sp, #S_R0 + S_OFF @ pointer to regs
+ csub.a scno, #__NR_syscalls @ check upper syscall limit
+ bea 2b
+ ldm (r0 - r3), [r1]+ @ have to reload r0 - r3
+ ldw pc, [tbl+], scno << #2 @ call sys_* routine
+
+__sys_trace_return:
+ stw.w r0, [sp+], #S_R0 + S_OFF @ save returned r0
+ mov r2, scno
+ mov r1, sp
+ mov r0, #1 @ trace exit [IP = 1]
+ b.l syscall_trace
+ b ret_slow_syscall
+
+ .align 5
+#ifdef CONFIG_ALIGNMENT_TRAP
+ .type __cr_alignment, #object
+__cr_alignment:
+ .word cr_alignment
+#endif
+ .ltorg
+
+ENTRY(sys_execve)
+ add r3, sp, #S_OFF
+ b __sys_execve
+ENDPROC(sys_execve)
+
+ENTRY(sys_clone)
+ add ip, sp, #S_OFF
+ stw ip, [sp+], #4
+ b __sys_clone
+ENDPROC(sys_clone)
+
+ENTRY(sys_rt_sigreturn)
+ add r0, sp, #S_OFF
+ mov why, #0 @ prevent syscall restart handling
+ b __sys_rt_sigreturn
+ENDPROC(sys_rt_sigreturn)
+
+ENTRY(sys_sigaltstack)
+ ldw r2, [sp+], #S_OFF + S_SP
+ b do_sigaltstack
+ENDPROC(sys_sigaltstack)
+
+ __INIT
+
+/*
+ * Vector stubs.
+ *
+ * This code is copied to 0xffff0200 so we can use branches in the
+ * vectors, rather than ldr's. Note that this code must not
+ * exceed 0x300 bytes.
+ *
+ * Common stub entry macro:
+ * Enter in INTR mode, bsr = PRIV/USER ASR, lr = PRIV/USER PC
+ *
+ * SP points to a minimal amount of processor-private memory, the address
+ * of which is copied into r0 for the mode specific abort handler.
+ */
+ .macro vector_stub, name, mode
+ .align 5
+
+vector_\name:
+ @
+ @ Save r0, lr_<exception> (parent PC) and bsr_<exception>
+ @ (parent ASR)
+ @
+ stw r0, [sp]
+ stw lr, [sp+], #4 @ save r0, lr
+ mov lr, bsr
+ stw lr, [sp+], #8 @ save bsr
+
+ @
+ @ Prepare for PRIV mode. INTRs remain disabled.
+ @
+ mov r0, asr
+ xor r0, r0, #(\mode ^ PRIV_MODE)
+ mov.a bsr, r0
+
+ @
+ @ the branch table must immediately follow this code
+ @
+ and lr, lr, #0x03
+ add lr, lr, #1
+ mov r0, sp
+ ldw lr, [pc+], lr << #2
+ mov.a pc, lr @ branch to handler in PRIV mode
+ENDPROC(vector_\name)
+ .align 2
+ @ handler addresses follow this label
+ .endm
+
+ .globl __stubs_start
+__stubs_start:
+/*
+ * Interrupt dispatcher
+ */
+ vector_stub intr, INTR_MODE
+
+ .long __intr_user @ 0 (USER)
+ .long __invalid @ 1
+ .long __invalid @ 2
+ .long __intr_priv @ 3 (PRIV)
+
+/*
+ * Data abort dispatcher
+ * Enter in ABT mode, bsr = USER ASR, lr = USER PC
+ */
+ vector_stub dabt, ABRT_MODE
+
+ .long __dabt_user @ 0 (USER)
+ .long __invalid @ 1
+ .long __invalid @ 2 (INTR)
+ .long __dabt_priv @ 3 (PRIV)
+
+/*
+ * Prefetch abort dispatcher
+ * Enter in ABT mode, bsr = USER ASR, lr = USER PC
+ */
+ vector_stub pabt, ABRT_MODE
+
+ .long __pabt_user @ 0 (USER)
+ .long __invalid @ 1
+ .long __invalid @ 2 (INTR)
+ .long __pabt_priv @ 3 (PRIV)
+
+/*
+ * Undef instr entry dispatcher
+ * Enter in EXTN mode, bsr = PRIV/USER ASR, lr = PRIV/USER PC
+ */
+ vector_stub extn, EXTN_MODE
+
+ .long __extn_user @ 0 (USER)
+ .long __invalid @ 1
+ .long __invalid @ 2 (INTR)
+ .long __extn_priv @ 3 (PRIV)
+
+/*
+ * We group all the following data together to optimise
+ * for CPUs with separate I & D caches.
+ */
+ .align 5
+
+.LCvswi:
+ .word vector_swi
+
+ .globl __stubs_end
+__stubs_end:
+
+ .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
+
+ .globl __vectors_start
+__vectors_start:
+ jepriv SYS_ERROR0
+ b vector_extn + stubs_offset
+ ldw pc, .LCvswi + stubs_offset
+ b vector_pabt + stubs_offset
+ b vector_dabt + stubs_offset
+ jepriv SYS_ERROR0
+ b vector_intr + stubs_offset
+ jepriv SYS_ERROR0
+
+ .globl __vectors_end
+__vectors_end:
+
+ .data
+
+ .globl cr_alignment
+ .globl cr_no_alignment
+cr_alignment:
+ .space 4
+cr_no_alignment:
+ .space 4
diff --git a/arch/unicore32/kernel/fpu-ucf64.c b/arch/unicore32/kernel/fpu-ucf64.c
new file mode 100644
index 000000000000..282a60ac82ba
--- /dev/null
+++ b/arch/unicore32/kernel/fpu-ucf64.c
@@ -0,0 +1,126 @@
+/*
+ * linux/arch/unicore32/kernel/fpu-ucf64.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+
+#include <asm/fpu-ucf64.h>
+
+/*
+ * A special flag to tell the normalisation code not to normalise.
+ */
+#define F64_NAN_FLAG 0x100
+
+/*
+ * A bit pattern used to indicate the initial (unset) value of the
+ * exception mask, in case nothing handles an instruction. This
+ * doesn't include the NAN flag, which get masked out before
+ * we check for an error.
+ */
+#define F64_EXCEPTION_ERROR ((u32)-1 & ~F64_NAN_FLAG)
+
+/*
+ * Since we aren't building with -mfpu=f64, we need to code
+ * these instructions using their MRC/MCR equivalents.
+ */
+#define f64reg(_f64_) #_f64_
+
+#define cff(_f64_) ({ \
+ u32 __v; \
+ asm("cff %0, " f64reg(_f64_) "@ fmrx %0, " #_f64_ \
+ : "=r" (__v) : : "cc"); \
+ __v; \
+ })
+
+#define ctf(_f64_, _var_) \
+ asm("ctf %0, " f64reg(_f64_) "@ fmxr " #_f64_ ", %0" \
+ : : "r" (_var_) : "cc")
+
+/*
+ * Raise a SIGFPE for the current process.
+ * sicode describes the signal being raised.
+ */
+void ucf64_raise_sigfpe(unsigned int sicode, struct pt_regs *regs)
+{
+ siginfo_t info;
+
+ memset(&info, 0, sizeof(info));
+
+ info.si_signo = SIGFPE;
+ info.si_code = sicode;
+ info.si_addr = (void __user *)(instruction_pointer(regs) - 4);
+
+ /*
+ * This is the same as NWFPE, because it's not clear what
+ * this is used for
+ */
+ current->thread.error_code = 0;
+ current->thread.trap_no = 6;
+
+ send_sig_info(SIGFPE, &info, current);
+}
+
+/*
+ * Handle exceptions of UniCore-F64.
+ */
+void ucf64_exchandler(u32 inst, u32 fpexc, struct pt_regs *regs)
+{
+ u32 tmp = fpexc;
+ u32 exc = F64_EXCEPTION_ERROR & fpexc;
+
+ pr_debug("UniCore-F64: instruction %08x fpscr %08x\n",
+ inst, fpexc);
+
+ if (exc & FPSCR_CMPINSTR_BIT) {
+ if (exc & FPSCR_CON)
+ tmp |= FPSCR_CON;
+ else
+ tmp &= ~(FPSCR_CON);
+ exc &= ~(FPSCR_CMPINSTR_BIT | FPSCR_CON);
+ } else {
+ pr_debug(KERN_ERR "UniCore-F64 Error: unhandled exceptions\n");
+ pr_debug(KERN_ERR "UniCore-F64 FPSCR 0x%08x INST 0x%08x\n",
+ cff(FPSCR), inst);
+
+ ucf64_raise_sigfpe(0, regs);
+ return;
+ }
+
+ /*
+ * Update the FPSCR with the additional exception flags.
+ * Comparison instructions always return at least one of
+ * these flags set.
+ */
+ tmp &= ~(FPSCR_TRAP | FPSCR_IOS | FPSCR_OFS | FPSCR_UFS |
+ FPSCR_IXS | FPSCR_HIS | FPSCR_IOC | FPSCR_OFC |
+ FPSCR_UFC | FPSCR_IXC | FPSCR_HIC);
+
+ tmp |= exc;
+ ctf(FPSCR, tmp);
+}
+
+/*
+ * F64 support code initialisation.
+ */
+static int __init ucf64_init(void)
+{
+ ctf(FPSCR, 0x0); /* FPSCR_UFE | FPSCR_NDE perhaps better */
+
+ printk(KERN_INFO "Enable UniCore-F64 support.\n");
+
+ return 0;
+}
+
+late_initcall(ucf64_init);
diff --git a/arch/unicore32/kernel/gpio.c b/arch/unicore32/kernel/gpio.c
new file mode 100644
index 000000000000..cb12ec39552c
--- /dev/null
+++ b/arch/unicore32/kernel/gpio.c
@@ -0,0 +1,122 @@
+/*
+ * linux/arch/unicore32/kernel/gpio.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/* in FPGA, no GPIO support */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <mach/hardware.h>
+
+#ifdef CONFIG_LEDS
+#include <linux/leds.h>
+#include <linux/platform_device.h>
+
+static const struct gpio_led puv3_gpio_leds[] = {
+ { .name = "cpuhealth", .gpio = GPO_CPU_HEALTH, .active_low = 0,
+ .default_trigger = "heartbeat", },
+ { .name = "hdd_led", .gpio = GPO_HDD_LED, .active_low = 1,
+ .default_trigger = "ide-disk", },
+};
+
+static const struct gpio_led_platform_data puv3_gpio_led_data = {
+ .num_leds = ARRAY_SIZE(puv3_gpio_leds),
+ .leds = (void *) puv3_gpio_leds,
+};
+
+static struct platform_device puv3_gpio_gpio_leds = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = (void *) &puv3_gpio_led_data,
+ }
+};
+
+static int __init puv3_gpio_leds_init(void)
+{
+ platform_device_register(&puv3_gpio_gpio_leds);
+ return 0;
+}
+
+device_initcall(puv3_gpio_leds_init);
+#endif
+
+static int puv3_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ return readl(GPIO_GPLR) & GPIO_GPIO(offset);
+}
+
+static void puv3_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ if (value)
+ writel(GPIO_GPIO(offset), GPIO_GPSR);
+ else
+ writel(GPIO_GPIO(offset), GPIO_GPCR);
+}
+
+static int puv3_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ writel(readl(GPIO_GPDR) & ~GPIO_GPIO(offset), GPIO_GPDR);
+ local_irq_restore(flags);
+ return 0;
+}
+
+static int puv3_direction_output(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ puv3_gpio_set(chip, offset, value);
+ writel(readl(GPIO_GPDR) | GPIO_GPIO(offset), GPIO_GPDR);
+ local_irq_restore(flags);
+ return 0;
+}
+
+static struct gpio_chip puv3_gpio_chip = {
+ .label = "gpio",
+ .direction_input = puv3_direction_input,
+ .direction_output = puv3_direction_output,
+ .set = puv3_gpio_set,
+ .get = puv3_gpio_get,
+ .base = 0,
+ .ngpio = GPIO_MAX + 1,
+};
+
+void __init puv3_init_gpio(void)
+{
+ writel(GPIO_DIR, GPIO_GPDR);
+#if defined(CONFIG_PUV3_NB0916) || defined(CONFIG_PUV3_SMW0919) \
+ || defined(CONFIG_PUV3_DB0913)
+ gpio_set_value(GPO_WIFI_EN, 1);
+ gpio_set_value(GPO_HDD_LED, 1);
+ gpio_set_value(GPO_VGA_EN, 1);
+ gpio_set_value(GPO_LCD_EN, 1);
+ gpio_set_value(GPO_CAM_PWR_EN, 0);
+ gpio_set_value(GPO_LCD_VCC_EN, 1);
+ gpio_set_value(GPO_SOFT_OFF, 1);
+ gpio_set_value(GPO_BT_EN, 1);
+ gpio_set_value(GPO_FAN_ON, 0);
+ gpio_set_value(GPO_SPKR, 0);
+ gpio_set_value(GPO_CPU_HEALTH, 1);
+ gpio_set_value(GPO_LAN_SEL, 1);
+/*
+ * DO NOT modify the GPO_SET_V1 and GPO_SET_V2 in kernel
+ * gpio_set_value(GPO_SET_V1, 1);
+ * gpio_set_value(GPO_SET_V2, 1);
+ */
+#endif
+ gpiochip_add(&puv3_gpio_chip);
+}
diff --git a/arch/unicore32/kernel/head.S b/arch/unicore32/kernel/head.S
new file mode 100644
index 000000000000..92255f3ab6a7
--- /dev/null
+++ b/arch/unicore32/kernel/head.S
@@ -0,0 +1,252 @@
+/*
+ * linux/arch/unicore32/kernel/head.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+#include <asm/assembler.h>
+#include <asm/ptrace.h>
+#include <generated/asm-offsets.h>
+#include <asm/memory.h>
+#include <asm/thread_info.h>
+#include <asm/system.h>
+#include <asm/pgtable-hwdef.h>
+
+#if (PHYS_OFFSET & 0x003fffff)
+#error "PHYS_OFFSET must be at an even 4MiB boundary!"
+#endif
+
+#define KERNEL_RAM_VADDR (PAGE_OFFSET + KERNEL_IMAGE_START)
+#define KERNEL_RAM_PADDR (PHYS_OFFSET + KERNEL_IMAGE_START)
+
+#define KERNEL_PGD_PADDR (KERNEL_RAM_PADDR - 0x1000)
+#define KERNEL_PGD_VADDR (KERNEL_RAM_VADDR - 0x1000)
+
+#define KERNEL_START KERNEL_RAM_VADDR
+#define KERNEL_END _end
+
+/*
+ * swapper_pg_dir is the virtual address of the initial page table.
+ * We place the page tables 4K below KERNEL_RAM_VADDR. Therefore, we must
+ * make sure that KERNEL_RAM_VADDR is correctly set. Currently, we expect
+ * the least significant 16 bits to be 0x8000, but we could probably
+ * relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x1000.
+ */
+#if (KERNEL_RAM_VADDR & 0xffff) != 0x8000
+#error KERNEL_RAM_VADDR must start at 0xXXXX8000
+#endif
+
+ .globl swapper_pg_dir
+ .equ swapper_pg_dir, KERNEL_RAM_VADDR - 0x1000
+
+/*
+ * Kernel startup entry point.
+ * ---------------------------
+ *
+ * This is normally called from the decompressor code. The requirements
+ * are: MMU = off, D-cache = off, I-cache = dont care
+ *
+ * This code is mostly position independent, so if you link the kernel at
+ * 0xc0008000, you call this at __pa(0xc0008000).
+ */
+ __HEAD
+ENTRY(stext)
+ @ set asr
+ mov r0, #PRIV_MODE @ ensure priv mode
+ or r0, #PSR_R_BIT | PSR_I_BIT @ disable irqs
+ mov.a asr, r0
+
+ @ process identify
+ movc r0, p0.c0, #0 @ cpuid
+ movl r1, 0xff00ffff @ mask
+ movl r2, 0x4d000863 @ value
+ and r0, r1, r0
+ cxor.a r0, r2
+ bne __error_p @ invalid processor id
+
+ /*
+ * Clear the 4K level 1 swapper page table
+ */
+ movl r0, #KERNEL_PGD_PADDR @ page table address
+ mov r1, #0
+ add r2, r0, #0x1000
+101: stw.w r1, [r0]+, #4
+ stw.w r1, [r0]+, #4
+ stw.w r1, [r0]+, #4
+ stw.w r1, [r0]+, #4
+ cxor.a r0, r2
+ bne 101b
+
+ movl r4, #KERNEL_PGD_PADDR @ page table address
+ mov r7, #PMD_TYPE_SECT | PMD_PRESENT @ page size: section
+ or r7, r7, #PMD_SECT_CACHEABLE @ cacheable
+ or r7, r7, #PMD_SECT_READ | PMD_SECT_WRITE | PMD_SECT_EXEC
+
+ /*
+ * Create identity mapping for first 4MB of kernel to
+ * cater for the MMU enable. This identity mapping
+ * will be removed by paging_init(). We use our current program
+ * counter to determine corresponding section base address.
+ */
+ mov r6, pc
+ mov r6, r6 >> #22 @ start of kernel section
+ or r1, r7, r6 << #22 @ flags + kernel base
+ stw r1, [r4+], r6 << #2 @ identity mapping
+
+ /*
+ * Now setup the pagetables for our kernel direct
+ * mapped region.
+ */
+ add r0, r4, #(KERNEL_START & 0xff000000) >> 20
+ stw.w r1, [r0+], #(KERNEL_START & 0x00c00000) >> 20
+ movl r6, #(KERNEL_END - 1)
+ add r0, r0, #4
+ add r6, r4, r6 >> #20
+102: csub.a r0, r6
+ add r1, r1, #1 << 22
+ bua 103f
+ stw.w r1, [r0]+, #4
+ b 102b
+103:
+ /*
+ * Then map first 4MB of ram in case it contains our boot params.
+ */
+ add r0, r4, #PAGE_OFFSET >> 20
+ or r6, r7, #(PHYS_OFFSET & 0xffc00000)
+ stw r6, [r0]
+
+ ldw r15, __switch_data @ address to jump to after
+
+ /*
+ * Initialise TLB, Caches, and MMU state ready to switch the MMU
+ * on.
+ */
+ mov r0, #0
+ movc p0.c5, r0, #28 @ cache invalidate all
+ nop8
+ movc p0.c6, r0, #6 @ TLB invalidate all
+ nop8
+
+ /*
+ * ..V. .... ..TB IDAM
+ * ..1. .... ..01 1111
+ */
+ movl r0, #0x201f @ control register setting
+
+ /*
+ * Setup common bits before finally enabling the MMU. Essentially
+ * this is just loading the page table pointer and domain access
+ * registers.
+ */
+ #ifndef CONFIG_ALIGNMENT_TRAP
+ andn r0, r0, #CR_A
+ #endif
+ #ifdef CONFIG_CPU_DCACHE_DISABLE
+ andn r0, r0, #CR_D
+ #endif
+ #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
+ andn r0, r0, #CR_B
+ #endif
+ #ifdef CONFIG_CPU_ICACHE_DISABLE
+ andn r0, r0, #CR_I
+ #endif
+
+ movc p0.c2, r4, #0 @ set pgd
+ b __turn_mmu_on
+ENDPROC(stext)
+
+/*
+ * Enable the MMU. This completely changes the stucture of the visible
+ * memory space. You will not be able to trace execution through this.
+ *
+ * r0 = cp#0 control register
+ * r15 = *virtual* address to jump to upon completion
+ */
+ .align 5
+__turn_mmu_on:
+ mov r0, r0
+ movc p0.c1, r0, #0 @ write control reg
+ nop @ fetch inst by phys addr
+ mov pc, r15
+ nop8 @ fetch inst by phys addr
+ENDPROC(__turn_mmu_on)
+
+/*
+ * Setup the initial page tables. We only setup the barest
+ * amount which are required to get the kernel running, which
+ * generally means mapping in the kernel code.
+ *
+ * r9 = cpuid
+ * r10 = procinfo
+ *
+ * Returns:
+ * r0, r3, r6, r7 corrupted
+ * r4 = physical page table address
+ */
+ .ltorg
+
+ .align 2
+ .type __switch_data, %object
+__switch_data:
+ .long __mmap_switched
+ .long __bss_start @ r6
+ .long _end @ r7
+ .long cr_alignment @ r8
+ .long init_thread_union + THREAD_START_SP @ sp
+
+/*
+ * The following fragment of code is executed with the MMU on in MMU mode,
+ * and uses absolute addresses; this is not position independent.
+ *
+ * r0 = cp#0 control register
+ */
+__mmap_switched:
+ adr r3, __switch_data + 4
+
+ ldm.w (r6, r7, r8), [r3]+
+ ldw sp, [r3]
+
+ mov fp, #0 @ Clear BSS (and zero fp)
+203: csub.a r6, r7
+ bea 204f
+ stw.w fp, [r6]+,#4
+ b 203b
+204:
+ andn r1, r0, #CR_A @ Clear 'A' bit
+ stm (r0, r1), [r8]+ @ Save control register values
+ b start_kernel
+ENDPROC(__mmap_switched)
+
+/*
+ * Exception handling. Something went wrong and we can't proceed. We
+ * ought to tell the user, but since we don't have any guarantee that
+ * we're even running on the right architecture, we do virtually nothing.
+ *
+ * If CONFIG_DEBUG_LL is set we try to print out something about the error
+ * and hope for the best (useful if bootloader fails to pass a proper
+ * machine ID for example).
+ */
+__error_p:
+#ifdef CONFIG_DEBUG_LL
+ adr r0, str_p1
+ b.l printascii
+ mov r0, r9
+ b.l printhex8
+ adr r0, str_p2
+ b.l printascii
+901: nop8
+ b 901b
+str_p1: .asciz "\nError: unrecognized processor variant (0x"
+str_p2: .asciz ").\n"
+ .align
+#endif
+ENDPROC(__error_p)
+
diff --git a/arch/unicore32/kernel/hibernate.c b/arch/unicore32/kernel/hibernate.c
new file mode 100644
index 000000000000..7d0f0b7983a0
--- /dev/null
+++ b/arch/unicore32/kernel/hibernate.c
@@ -0,0 +1,160 @@
+/*
+ * linux/arch/unicore32/kernel/hibernate.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/gfp.h>
+#include <linux/suspend.h>
+#include <linux/bootmem.h>
+
+#include <asm/system.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/suspend.h>
+
+#include "mach/pm.h"
+
+/* Pointer to the temporary resume page tables */
+pgd_t *resume_pg_dir;
+
+struct swsusp_arch_regs swsusp_arch_regs_cpu0;
+
+/*
+ * Create a middle page table on a resume-safe page and put a pointer to it in
+ * the given global directory entry. This only returns the gd entry
+ * in non-PAE compilation mode, since the middle layer is folded.
+ */
+static pmd_t *resume_one_md_table_init(pgd_t *pgd)
+{
+ pud_t *pud;
+ pmd_t *pmd_table;
+
+ pud = pud_offset(pgd, 0);
+ pmd_table = pmd_offset(pud, 0);
+
+ return pmd_table;
+}
+
+/*
+ * Create a page table on a resume-safe page and place a pointer to it in
+ * a middle page directory entry.
+ */
+static pte_t *resume_one_page_table_init(pmd_t *pmd)
+{
+ if (pmd_none(*pmd)) {
+ pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC);
+ if (!page_table)
+ return NULL;
+
+ set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_KERNEL_TABLE));
+
+ BUG_ON(page_table != pte_offset_kernel(pmd, 0));
+
+ return page_table;
+ }
+
+ return pte_offset_kernel(pmd, 0);
+}
+
+/*
+ * This maps the physical memory to kernel virtual address space, a total
+ * of max_low_pfn pages, by creating page tables starting from address
+ * PAGE_OFFSET. The page tables are allocated out of resume-safe pages.
+ */
+static int resume_physical_mapping_init(pgd_t *pgd_base)
+{
+ unsigned long pfn;
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+ int pgd_idx, pmd_idx;
+
+ pgd_idx = pgd_index(PAGE_OFFSET);
+ pgd = pgd_base + pgd_idx;
+ pfn = 0;
+
+ for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
+ pmd = resume_one_md_table_init(pgd);
+ if (!pmd)
+ return -ENOMEM;
+
+ if (pfn >= max_low_pfn)
+ continue;
+
+ for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) {
+ pte_t *max_pte;
+
+ if (pfn >= max_low_pfn)
+ break;
+
+ /* Map with normal page tables.
+ * NOTE: We can mark everything as executable here
+ */
+ pte = resume_one_page_table_init(pmd);
+ if (!pte)
+ return -ENOMEM;
+
+ max_pte = pte + PTRS_PER_PTE;
+ for (; pte < max_pte; pte++, pfn++) {
+ if (pfn >= max_low_pfn)
+ break;
+
+ set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
+ }
+ }
+ }
+
+ return 0;
+}
+
+static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
+{
+}
+
+int swsusp_arch_resume(void)
+{
+ int error;
+
+ resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
+ if (!resume_pg_dir)
+ return -ENOMEM;
+
+ resume_init_first_level_page_table(resume_pg_dir);
+ error = resume_physical_mapping_init(resume_pg_dir);
+ if (error)
+ return error;
+
+ /* We have got enough memory and from now on we cannot recover */
+ restore_image(resume_pg_dir, restore_pblist);
+ return 0;
+}
+
+/*
+ * pfn_is_nosave - check if given pfn is in the 'nosave' section
+ */
+
+int pfn_is_nosave(unsigned long pfn)
+{
+ unsigned long begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
+ unsigned long end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT;
+
+ return (pfn >= begin_pfn) && (pfn < end_pfn);
+}
+
+void save_processor_state(void)
+{
+}
+
+void restore_processor_state(void)
+{
+ local_flush_tlb_all();
+}
diff --git a/arch/unicore32/kernel/hibernate_asm.S b/arch/unicore32/kernel/hibernate_asm.S
new file mode 100644
index 000000000000..cc3c65253c8c
--- /dev/null
+++ b/arch/unicore32/kernel/hibernate_asm.S
@@ -0,0 +1,117 @@
+/*
+ * linux/arch/unicore32/kernel/hibernate_asm.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/sys.h>
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <generated/asm-offsets.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/assembler.h>
+
+@ restore_image(pgd_t *resume_pg_dir, struct pbe *restore_pblist)
+@ r0: resume_pg_dir
+@ r1: restore_pblist
+@ copy restore_pblist pages
+@ restore registers from swsusp_arch_regs_cpu0
+@
+ENTRY(restore_image)
+ sub r0, r0, #PAGE_OFFSET
+ mov r5, #0
+ movc p0.c6, r5, #6 @invalidate ITLB & DTLB
+ movc p0.c2, r0, #0
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ .p2align 4,,7
+101:
+ csub.a r1, #0
+ beq 109f
+
+ ldw r6, [r1+], #PBE_ADDRESS
+ ldw r7, [r1+], #PBE_ORIN_ADDRESS
+
+ movl ip, #128
+102: ldm.w (r8 - r15), [r6]+
+ stm.w (r8 - r15), [r7]+
+ sub.a ip, ip, #1
+ bne 102b
+
+ ldw r1, [r1+], #PBE_NEXT
+ b 101b
+
+ .p2align 4,,7
+109:
+ /* go back to the original page tables */
+ ldw r0, =swapper_pg_dir
+ sub r0, r0, #PAGE_OFFSET
+ mov r5, #0
+ movc p0.c6, r5, #6
+ movc p0.c2, r0, #0
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+#ifdef CONFIG_UNICORE_FPU_F64
+ ldw ip, 1f
+ add ip, ip, #SWSUSP_FPSTATE
+ lfm.w (f0 - f7 ), [ip]+
+ lfm.w (f8 - f15), [ip]+
+ lfm.w (f16 - f23), [ip]+
+ lfm.w (f24 - f31), [ip]+
+ ldw r4, [ip]
+ ctf r4, s31
+#endif
+ mov r0, #0x0
+ ldw ip, 1f
+ add ip, ip, #SWSUSP_CPU
+ ldm.w (r4 - r15), [ip]+
+ ldm (r16 - r27, sp, pc), [ip]+ @ Load all regs saved previously
+
+ .align 2
+1: .long swsusp_arch_regs_cpu0
+
+
+@ swsusp_arch_suspend()
+@ - prepare pc for resume, return from function without swsusp_save on resume
+@ - save registers in swsusp_arch_regs_cpu0
+@ - call swsusp_save write suspend image
+
+ENTRY(swsusp_arch_suspend)
+ ldw ip, 1f
+ add ip, ip, #SWSUSP_CPU
+ stm.w (r4 - r15), [ip]+
+ stm.w (r16 - r27, sp, lr), [ip]+
+
+#ifdef CONFIG_UNICORE_FPU_F64
+ ldw ip, 1f
+ add ip, ip, #SWSUSP_FPSTATE
+ sfm.w (f0 - f7 ), [ip]+
+ sfm.w (f8 - f15), [ip]+
+ sfm.w (f16 - f23), [ip]+
+ sfm.w (f24 - f31), [ip]+
+ cff r4, s31
+ stw r4, [ip]
+#endif
+ b swsusp_save @ no return
+
+1: .long swsusp_arch_regs_cpu0
diff --git a/arch/unicore32/kernel/init_task.c b/arch/unicore32/kernel/init_task.c
new file mode 100644
index 000000000000..a35a1e50e4f4
--- /dev/null
+++ b/arch/unicore32/kernel/init_task.c
@@ -0,0 +1,44 @@
+/*
+ * linux/arch/unicore32/kernel/init_task.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/init_task.h>
+#include <linux/mqueue.h>
+#include <linux/uaccess.h>
+
+#include <asm/pgtable.h>
+
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+/*
+ * Initial thread structure.
+ *
+ * We need to make sure that this is 8192-byte aligned due to the
+ * way process stacks are handled. This is done by making sure
+ * the linker maps this in the .text segment right after head.S,
+ * and making head.S ensure the proper alignment.
+ *
+ * The things we do for performance..
+ */
+union thread_union init_thread_union __init_task_data = {
+ INIT_THREAD_INFO(init_task) };
+
+/*
+ * Initial task structure.
+ *
+ * All other task structs will be allocated on slabs in fork.c
+ */
+struct task_struct init_task = INIT_TASK(init_task);
+EXPORT_SYMBOL(init_task);
diff --git a/arch/unicore32/kernel/irq.c b/arch/unicore32/kernel/irq.c
new file mode 100644
index 000000000000..e1dbfcb61873
--- /dev/null
+++ b/arch/unicore32/kernel/irq.c
@@ -0,0 +1,426 @@
+/*
+ * linux/arch/unicore32/kernel/irq.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel_stat.h>
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/random.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/seq_file.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/kallsyms.h>
+#include <linux/proc_fs.h>
+#include <linux/sysdev.h>
+#include <linux/gpio.h>
+
+#include <asm/system.h>
+#include <mach/hardware.h>
+
+#include "setup.h"
+
+/*
+ * PKUnity GPIO edge detection for IRQs:
+ * IRQs are generated on Falling-Edge, Rising-Edge, or both.
+ * Use this instead of directly setting GRER/GFER.
+ */
+static int GPIO_IRQ_rising_edge;
+static int GPIO_IRQ_falling_edge;
+static int GPIO_IRQ_mask = 0;
+
+#define GPIO_MASK(irq) (1 << (irq - IRQ_GPIO0))
+
+static int puv3_gpio_type(struct irq_data *d, unsigned int type)
+{
+ unsigned int mask;
+
+ if (d->irq < IRQ_GPIOHIGH)
+ mask = 1 << d->irq;
+ else
+ mask = GPIO_MASK(d->irq);
+
+ if (type == IRQ_TYPE_PROBE) {
+ if ((GPIO_IRQ_rising_edge | GPIO_IRQ_falling_edge) & mask)
+ return 0;
+ type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
+ }
+
+ if (type & IRQ_TYPE_EDGE_RISING)
+ GPIO_IRQ_rising_edge |= mask;
+ else
+ GPIO_IRQ_rising_edge &= ~mask;
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ GPIO_IRQ_falling_edge |= mask;
+ else
+ GPIO_IRQ_falling_edge &= ~mask;
+
+ writel(GPIO_IRQ_rising_edge & GPIO_IRQ_mask, GPIO_GRER);
+ writel(GPIO_IRQ_falling_edge & GPIO_IRQ_mask, GPIO_GFER);
+
+ return 0;
+}
+
+/*
+ * GPIO IRQs must be acknowledged. This is for IRQs from 0 to 7.
+ */
+static void puv3_low_gpio_ack(struct irq_data *d)
+{
+ writel((1 << d->irq), GPIO_GEDR);
+}
+
+static void puv3_low_gpio_mask(struct irq_data *d)
+{
+ writel(readl(INTC_ICMR) & ~(1 << d->irq), INTC_ICMR);
+}
+
+static void puv3_low_gpio_unmask(struct irq_data *d)
+{
+ writel(readl(INTC_ICMR) | (1 << d->irq), INTC_ICMR);
+}
+
+static int puv3_low_gpio_wake(struct irq_data *d, unsigned int on)
+{
+ if (on)
+ writel(readl(PM_PWER) | (1 << d->irq), PM_PWER);
+ else
+ writel(readl(PM_PWER) & ~(1 << d->irq), PM_PWER);
+ return 0;
+}
+
+static struct irq_chip puv3_low_gpio_chip = {
+ .name = "GPIO-low",
+ .irq_ack = puv3_low_gpio_ack,
+ .irq_mask = puv3_low_gpio_mask,
+ .irq_unmask = puv3_low_gpio_unmask,
+ .irq_set_type = puv3_gpio_type,
+ .irq_set_wake = puv3_low_gpio_wake,
+};
+
+/*
+ * IRQ8 (GPIO0 through 27) handler. We enter here with the
+ * irq_controller_lock held, and IRQs disabled. Decode the IRQ
+ * and call the handler.
+ */
+static void
+puv3_gpio_handler(unsigned int irq, struct irq_desc *desc)
+{
+ unsigned int mask;
+
+ mask = readl(GPIO_GEDR);
+ do {
+ /*
+ * clear down all currently active IRQ sources.
+ * We will be processing them all.
+ */
+ writel(mask, GPIO_GEDR);
+
+ irq = IRQ_GPIO0;
+ do {
+ if (mask & 1)
+ generic_handle_irq(irq);
+ mask >>= 1;
+ irq++;
+ } while (mask);
+ mask = readl(GPIO_GEDR);
+ } while (mask);
+}
+
+/*
+ * GPIO0-27 edge IRQs need to be handled specially.
+ * In addition, the IRQs are all collected up into one bit in the
+ * interrupt controller registers.
+ */
+static void puv3_high_gpio_ack(struct irq_data *d)
+{
+ unsigned int mask = GPIO_MASK(d->irq);
+
+ writel(mask, GPIO_GEDR);
+}
+
+static void puv3_high_gpio_mask(struct irq_data *d)
+{
+ unsigned int mask = GPIO_MASK(d->irq);
+
+ GPIO_IRQ_mask &= ~mask;
+
+ writel(readl(GPIO_GRER) & ~mask, GPIO_GRER);
+ writel(readl(GPIO_GFER) & ~mask, GPIO_GFER);
+}
+
+static void puv3_high_gpio_unmask(struct irq_data *d)
+{
+ unsigned int mask = GPIO_MASK(d->irq);
+
+ GPIO_IRQ_mask |= mask;
+
+ writel(GPIO_IRQ_rising_edge & GPIO_IRQ_mask, GPIO_GRER);
+ writel(GPIO_IRQ_falling_edge & GPIO_IRQ_mask, GPIO_GFER);
+}
+
+static int puv3_high_gpio_wake(struct irq_data *d, unsigned int on)
+{
+ if (on)
+ writel(readl(PM_PWER) | PM_PWER_GPIOHIGH, PM_PWER);
+ else
+ writel(readl(PM_PWER) & ~PM_PWER_GPIOHIGH, PM_PWER);
+ return 0;
+}
+
+static struct irq_chip puv3_high_gpio_chip = {
+ .name = "GPIO-high",
+ .irq_ack = puv3_high_gpio_ack,
+ .irq_mask = puv3_high_gpio_mask,
+ .irq_unmask = puv3_high_gpio_unmask,
+ .irq_set_type = puv3_gpio_type,
+ .irq_set_wake = puv3_high_gpio_wake,
+};
+
+/*
+ * We don't need to ACK IRQs on the PKUnity unless they're GPIOs
+ * this is for internal IRQs i.e. from 8 to 31.
+ */
+static void puv3_mask_irq(struct irq_data *d)
+{
+ writel(readl(INTC_ICMR) & ~(1 << d->irq), INTC_ICMR);
+}
+
+static void puv3_unmask_irq(struct irq_data *d)
+{
+ writel(readl(INTC_ICMR) | (1 << d->irq), INTC_ICMR);
+}
+
+/*
+ * Apart form GPIOs, only the RTC alarm can be a wakeup event.
+ */
+static int puv3_set_wake(struct irq_data *d, unsigned int on)
+{
+ if (d->irq == IRQ_RTCAlarm) {
+ if (on)
+ writel(readl(PM_PWER) | PM_PWER_RTC, PM_PWER);
+ else
+ writel(readl(PM_PWER) & ~PM_PWER_RTC, PM_PWER);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static struct irq_chip puv3_normal_chip = {
+ .name = "PKUnity-v3",
+ .irq_ack = puv3_mask_irq,
+ .irq_mask = puv3_mask_irq,
+ .irq_unmask = puv3_unmask_irq,
+ .irq_set_wake = puv3_set_wake,
+};
+
+static struct resource irq_resource = {
+ .name = "irqs",
+ .start = PKUNITY_INTC_BASE,
+ .end = PKUNITY_INTC_BASE + 0xFFFFF,
+};
+
+static struct puv3_irq_state {
+ unsigned int saved;
+ unsigned int icmr;
+ unsigned int iclr;
+ unsigned int iccr;
+} puv3_irq_state;
+
+static int puv3_irq_suspend(struct sys_device *dev, pm_message_t state)
+{
+ struct puv3_irq_state *st = &puv3_irq_state;
+
+ st->saved = 1;
+ st->icmr = readl(INTC_ICMR);
+ st->iclr = readl(INTC_ICLR);
+ st->iccr = readl(INTC_ICCR);
+
+ /*
+ * Disable all GPIO-based interrupts.
+ */
+ writel(readl(INTC_ICMR) & ~(0x1ff), INTC_ICMR);
+
+ /*
+ * Set the appropriate edges for wakeup.
+ */
+ writel(readl(PM_PWER) & GPIO_IRQ_rising_edge, GPIO_GRER);
+ writel(readl(PM_PWER) & GPIO_IRQ_falling_edge, GPIO_GFER);
+
+ /*
+ * Clear any pending GPIO interrupts.
+ */
+ writel(readl(GPIO_GEDR), GPIO_GEDR);
+
+ return 0;
+}
+
+static int puv3_irq_resume(struct sys_device *dev)
+{
+ struct puv3_irq_state *st = &puv3_irq_state;
+
+ if (st->saved) {
+ writel(st->iccr, INTC_ICCR);
+ writel(st->iclr, INTC_ICLR);
+
+ writel(GPIO_IRQ_rising_edge & GPIO_IRQ_mask, GPIO_GRER);
+ writel(GPIO_IRQ_falling_edge & GPIO_IRQ_mask, GPIO_GFER);
+
+ writel(st->icmr, INTC_ICMR);
+ }
+ return 0;
+}
+
+static struct sysdev_class puv3_irq_sysclass = {
+ .name = "pkunity-irq",
+ .suspend = puv3_irq_suspend,
+ .resume = puv3_irq_resume,
+};
+
+static struct sys_device puv3_irq_device = {
+ .id = 0,
+ .cls = &puv3_irq_sysclass,
+};
+
+static int __init puv3_irq_init_devicefs(void)
+{
+ sysdev_class_register(&puv3_irq_sysclass);
+ return sysdev_register(&puv3_irq_device);
+}
+
+device_initcall(puv3_irq_init_devicefs);
+
+void __init init_IRQ(void)
+{
+ unsigned int irq;
+
+ request_resource(&iomem_resource, &irq_resource);
+
+ /* disable all IRQs */
+ writel(0, INTC_ICMR);
+
+ /* all IRQs are IRQ, not REAL */
+ writel(0, INTC_ICLR);
+
+ /* clear all GPIO edge detects */
+ writel(FMASK(8, 0) & ~FIELD(1, 1, GPI_SOFF_REQ), GPIO_GPIR);
+ writel(0, GPIO_GFER);
+ writel(0, GPIO_GRER);
+ writel(0x0FFFFFFF, GPIO_GEDR);
+
+ writel(1, INTC_ICCR);
+
+ for (irq = 0; irq < IRQ_GPIOHIGH; irq++) {
+ set_irq_chip(irq, &puv3_low_gpio_chip);
+ set_irq_handler(irq, handle_edge_irq);
+ irq_modify_status(irq,
+ IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN,
+ 0);
+ }
+
+ for (irq = IRQ_GPIOHIGH + 1; irq < IRQ_GPIO0; irq++) {
+ set_irq_chip(irq, &puv3_normal_chip);
+ set_irq_handler(irq, handle_level_irq);
+ irq_modify_status(irq,
+ IRQ_NOREQUEST | IRQ_NOAUTOEN,
+ IRQ_NOPROBE);
+ }
+
+ for (irq = IRQ_GPIO0; irq <= IRQ_GPIO27; irq++) {
+ set_irq_chip(irq, &puv3_high_gpio_chip);
+ set_irq_handler(irq, handle_edge_irq);
+ irq_modify_status(irq,
+ IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN,
+ 0);
+ }
+
+ /*
+ * Install handler for GPIO 0-27 edge detect interrupts
+ */
+ set_irq_chip(IRQ_GPIOHIGH, &puv3_normal_chip);
+ set_irq_chained_handler(IRQ_GPIOHIGH, puv3_gpio_handler);
+
+#ifdef CONFIG_PUV3_GPIO
+ puv3_init_gpio();
+#endif
+}
+
+int show_interrupts(struct seq_file *p, void *v)
+{
+ int i = *(loff_t *) v, cpu;
+ struct irq_desc *desc;
+ struct irqaction *action;
+ unsigned long flags;
+
+ if (i == 0) {
+ char cpuname[12];
+
+ seq_printf(p, " ");
+ for_each_present_cpu(cpu) {
+ sprintf(cpuname, "CPU%d", cpu);
+ seq_printf(p, " %10s", cpuname);
+ }
+ seq_putc(p, '\n');
+ }
+
+ if (i < nr_irqs) {
+ desc = irq_to_desc(i);
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ action = desc->action;
+ if (!action)
+ goto unlock;
+
+ seq_printf(p, "%3d: ", i);
+ for_each_present_cpu(cpu)
+ seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
+ seq_printf(p, " %10s", desc->irq_data.chip->name ? : "-");
+ seq_printf(p, " %s", action->name);
+ for (action = action->next; action; action = action->next)
+ seq_printf(p, ", %s", action->name);
+
+ seq_putc(p, '\n');
+unlock:
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ } else if (i == nr_irqs) {
+ seq_printf(p, "Error in interrupt!\n");
+ }
+ return 0;
+}
+
+/*
+ * do_IRQ handles all hardware IRQ's. Decoded IRQs should not
+ * come via this function. Instead, they should provide their
+ * own 'handler'
+ */
+asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ irq_enter();
+
+ /*
+ * Some hardware gives randomly wrong interrupts. Rather
+ * than crashing, do something sensible.
+ */
+ if (unlikely(irq >= nr_irqs)) {
+ if (printk_ratelimit())
+ printk(KERN_WARNING "Bad IRQ%u\n", irq);
+ ack_bad_irq(irq);
+ } else {
+ generic_handle_irq(irq);
+ }
+
+ irq_exit();
+ set_irq_regs(old_regs);
+}
+
diff --git a/arch/unicore32/kernel/ksyms.c b/arch/unicore32/kernel/ksyms.c
new file mode 100644
index 000000000000..a8970809428a
--- /dev/null
+++ b/arch/unicore32/kernel/ksyms.c
@@ -0,0 +1,99 @@
+/*
+ * linux/arch/unicore32/kernel/ksyms.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/cryptohash.h>
+#include <linux/delay.h>
+#include <linux/in6.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#include <asm/checksum.h>
+#include <asm/system.h>
+
+#include "ksyms.h"
+
+EXPORT_SYMBOL(__uc32_find_next_zero_bit);
+EXPORT_SYMBOL(__uc32_find_next_bit);
+
+EXPORT_SYMBOL(__backtrace);
+
+ /* platform dependent support */
+EXPORT_SYMBOL(__udelay);
+EXPORT_SYMBOL(__const_udelay);
+
+ /* networking */
+EXPORT_SYMBOL(csum_partial);
+EXPORT_SYMBOL(csum_partial_copy_from_user);
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
+EXPORT_SYMBOL(__csum_ipv6_magic);
+
+ /* io */
+#ifndef __raw_readsb
+EXPORT_SYMBOL(__raw_readsb);
+#endif
+#ifndef __raw_readsw
+EXPORT_SYMBOL(__raw_readsw);
+#endif
+#ifndef __raw_readsl
+EXPORT_SYMBOL(__raw_readsl);
+#endif
+#ifndef __raw_writesb
+EXPORT_SYMBOL(__raw_writesb);
+#endif
+#ifndef __raw_writesw
+EXPORT_SYMBOL(__raw_writesw);
+#endif
+#ifndef __raw_writesl
+EXPORT_SYMBOL(__raw_writesl);
+#endif
+
+ /* string / mem functions */
+EXPORT_SYMBOL(strchr);
+EXPORT_SYMBOL(strrchr);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(memchr);
+
+ /* user mem (segment) */
+EXPORT_SYMBOL(__strnlen_user);
+EXPORT_SYMBOL(__strncpy_from_user);
+
+EXPORT_SYMBOL(copy_page);
+
+EXPORT_SYMBOL(__copy_from_user);
+EXPORT_SYMBOL(__copy_to_user);
+EXPORT_SYMBOL(__clear_user);
+
+EXPORT_SYMBOL(__get_user_1);
+EXPORT_SYMBOL(__get_user_2);
+EXPORT_SYMBOL(__get_user_4);
+
+EXPORT_SYMBOL(__put_user_1);
+EXPORT_SYMBOL(__put_user_2);
+EXPORT_SYMBOL(__put_user_4);
+EXPORT_SYMBOL(__put_user_8);
+
+EXPORT_SYMBOL(__ashldi3);
+EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__divsi3);
+EXPORT_SYMBOL(__lshrdi3);
+EXPORT_SYMBOL(__modsi3);
+EXPORT_SYMBOL(__muldi3);
+EXPORT_SYMBOL(__ucmpdi2);
+EXPORT_SYMBOL(__udivsi3);
+EXPORT_SYMBOL(__umodsi3);
+EXPORT_SYMBOL(__bswapsi2);
+
diff --git a/arch/unicore32/kernel/ksyms.h b/arch/unicore32/kernel/ksyms.h
new file mode 100644
index 000000000000..185cdc712d03
--- /dev/null
+++ b/arch/unicore32/kernel/ksyms.h
@@ -0,0 +1,15 @@
+/*
+ * libgcc functions - functions that are used internally by the
+ * compiler... (prototypes are not correct though, but that
+ * doesn't really matter since they're not versioned).
+ */
+extern void __ashldi3(void);
+extern void __ashrdi3(void);
+extern void __divsi3(void);
+extern void __lshrdi3(void);
+extern void __modsi3(void);
+extern void __muldi3(void);
+extern void __ucmpdi2(void);
+extern void __udivsi3(void);
+extern void __umodsi3(void);
+extern void __bswapsi2(void);
diff --git a/arch/unicore32/kernel/module.c b/arch/unicore32/kernel/module.c
new file mode 100644
index 000000000000..3e5a38d71a1e
--- /dev/null
+++ b/arch/unicore32/kernel/module.c
@@ -0,0 +1,152 @@
+/*
+ * linux/arch/unicore32/kernel/module.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/moduleloader.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/gfp.h>
+
+#include <asm/pgtable.h>
+#include <asm/sections.h>
+
+void *module_alloc(unsigned long size)
+{
+ struct vm_struct *area;
+
+ size = PAGE_ALIGN(size);
+ if (!size)
+ return NULL;
+
+ area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
+ if (!area)
+ return NULL;
+
+ return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL_EXEC);
+}
+
+void module_free(struct module *module, void *region)
+{
+ vfree(region);
+}
+
+int module_frob_arch_sections(Elf_Ehdr *hdr,
+ Elf_Shdr *sechdrs,
+ char *secstrings,
+ struct module *mod)
+{
+ return 0;
+}
+
+int
+apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
+ unsigned int relindex, struct module *module)
+{
+ Elf32_Shdr *symsec = sechdrs + symindex;
+ Elf32_Shdr *relsec = sechdrs + relindex;
+ Elf32_Shdr *dstsec = sechdrs + relsec->sh_info;
+ Elf32_Rel *rel = (void *)relsec->sh_addr;
+ unsigned int i;
+
+ for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rel); i++, rel++) {
+ unsigned long loc;
+ Elf32_Sym *sym;
+ s32 offset;
+
+ offset = ELF32_R_SYM(rel->r_info);
+ if (offset < 0 || offset >
+ (symsec->sh_size / sizeof(Elf32_Sym))) {
+ printk(KERN_ERR "%s: bad relocation, "
+ "section %d reloc %d\n",
+ module->name, relindex, i);
+ return -ENOEXEC;
+ }
+
+ sym = ((Elf32_Sym *)symsec->sh_addr) + offset;
+
+ if (rel->r_offset < 0 || rel->r_offset >
+ dstsec->sh_size - sizeof(u32)) {
+ printk(KERN_ERR "%s: out of bounds relocation, "
+ "section %d reloc %d offset %d size %d\n",
+ module->name, relindex, i, rel->r_offset,
+ dstsec->sh_size);
+ return -ENOEXEC;
+ }
+
+ loc = dstsec->sh_addr + rel->r_offset;
+
+ switch (ELF32_R_TYPE(rel->r_info)) {
+ case R_UNICORE_NONE:
+ /* ignore */
+ break;
+
+ case R_UNICORE_ABS32:
+ *(u32 *)loc += sym->st_value;
+ break;
+
+ case R_UNICORE_PC24:
+ case R_UNICORE_CALL:
+ case R_UNICORE_JUMP24:
+ offset = (*(u32 *)loc & 0x00ffffff) << 2;
+ if (offset & 0x02000000)
+ offset -= 0x04000000;
+
+ offset += sym->st_value - loc;
+ if (offset & 3 ||
+ offset <= (s32)0xfe000000 ||
+ offset >= (s32)0x02000000) {
+ printk(KERN_ERR
+ "%s: relocation out of range, section "
+ "%d reloc %d sym '%s'\n", module->name,
+ relindex, i, strtab + sym->st_name);
+ return -ENOEXEC;
+ }
+
+ offset >>= 2;
+
+ *(u32 *)loc &= 0xff000000;
+ *(u32 *)loc |= offset & 0x00ffffff;
+ break;
+
+ default:
+ printk(KERN_ERR "%s: unknown relocation: %u\n",
+ module->name, ELF32_R_TYPE(rel->r_info));
+ return -ENOEXEC;
+ }
+ }
+ return 0;
+}
+
+int
+apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec,
+ struct module *module)
+{
+ printk(KERN_ERR "module %s: ADD RELOCATION unsupported\n",
+ module->name);
+ return -ENOEXEC;
+}
+
+int
+module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
+ struct module *module)
+{
+ return 0;
+}
+
+void
+module_arch_cleanup(struct module *mod)
+{
+}
diff --git a/arch/unicore32/kernel/pci.c b/arch/unicore32/kernel/pci.c
new file mode 100644
index 000000000000..65c265ee8e94
--- /dev/null
+++ b/arch/unicore32/kernel/pci.c
@@ -0,0 +1,404 @@
+/*
+ * linux/arch/unicore32/kernel/pci.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * PCI bios-type initialisation for PCI machines
+ *
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/io.h>
+
+static int debug_pci;
+static int use_firmware;
+
+#define CONFIG_CMD(bus, devfn, where) \
+ (0x80000000 | (bus->number << 16) | (devfn << 8) | (where & ~3))
+
+static int
+puv3_read_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *value)
+{
+ writel(CONFIG_CMD(bus, devfn, where), PCICFG_ADDR);
+ switch (size) {
+ case 1:
+ *value = (readl(PCICFG_DATA) >> ((where & 3) * 8)) & 0xFF;
+ break;
+ case 2:
+ *value = (readl(PCICFG_DATA) >> ((where & 2) * 8)) & 0xFFFF;
+ break;
+ case 4:
+ *value = readl(PCICFG_DATA);
+ break;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+puv3_write_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 value)
+{
+ writel(CONFIG_CMD(bus, devfn, where), PCICFG_ADDR);
+ switch (size) {
+ case 1:
+ writel((readl(PCICFG_DATA) & ~FMASK(8, (where&3)*8))
+ | FIELD(value, 8, (where&3)*8), PCICFG_DATA);
+ break;
+ case 2:
+ writel((readl(PCICFG_DATA) & ~FMASK(16, (where&2)*8))
+ | FIELD(value, 16, (where&2)*8), PCICFG_DATA);
+ break;
+ case 4:
+ writel(value, PCICFG_DATA);
+ break;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops pci_puv3_ops = {
+ .read = puv3_read_config,
+ .write = puv3_write_config,
+};
+
+void pci_puv3_preinit(void)
+{
+ printk(KERN_DEBUG "PCI: PKUnity PCI Controller Initializing ...\n");
+ /* config PCI bridge base */
+ writel(PKUNITY_PCIBRI_BASE, PCICFG_BRIBASE);
+
+ writel(0, PCIBRI_AHBCTL0);
+ writel(PKUNITY_PCIBRI_BASE | PCIBRI_BARx_MEM, PCIBRI_AHBBAR0);
+ writel(0xFFFF0000, PCIBRI_AHBAMR0);
+ writel(0, PCIBRI_AHBTAR0);
+
+ writel(PCIBRI_CTLx_AT, PCIBRI_AHBCTL1);
+ writel(PKUNITY_PCILIO_BASE | PCIBRI_BARx_IO, PCIBRI_AHBBAR1);
+ writel(0xFFFF0000, PCIBRI_AHBAMR1);
+ writel(0x00000000, PCIBRI_AHBTAR1);
+
+ writel(PCIBRI_CTLx_PREF, PCIBRI_AHBCTL2);
+ writel(PKUNITY_PCIMEM_BASE | PCIBRI_BARx_MEM, PCIBRI_AHBBAR2);
+ writel(0xF8000000, PCIBRI_AHBAMR2);
+ writel(0, PCIBRI_AHBTAR2);
+
+ writel(PKUNITY_PCIAHB_BASE | PCIBRI_BARx_MEM, PCIBRI_BAR1);
+
+ writel(PCIBRI_CTLx_AT | PCIBRI_CTLx_PREF, PCIBRI_PCICTL0);
+ writel(PKUNITY_PCIAHB_BASE | PCIBRI_BARx_MEM, PCIBRI_PCIBAR0);
+ writel(0xF8000000, PCIBRI_PCIAMR0);
+ writel(PKUNITY_SDRAM_BASE, PCIBRI_PCITAR0);
+
+ writel(readl(PCIBRI_CMD) | PCIBRI_CMD_IO | PCIBRI_CMD_MEM, PCIBRI_CMD);
+}
+
+static int __init pci_puv3_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ if (dev->bus->number == 0) {
+#ifdef CONFIG_ARCH_FPGA /* 4 pci slots */
+ if (dev->devfn == 0x00)
+ return IRQ_PCIINTA;
+ else if (dev->devfn == 0x08)
+ return IRQ_PCIINTB;
+ else if (dev->devfn == 0x10)
+ return IRQ_PCIINTC;
+ else if (dev->devfn == 0x18)
+ return IRQ_PCIINTD;
+#endif
+#ifdef CONFIG_PUV3_DB0913 /* 3 pci slots */
+ if (dev->devfn == 0x30)
+ return IRQ_PCIINTB;
+ else if (dev->devfn == 0x60)
+ return IRQ_PCIINTC;
+ else if (dev->devfn == 0x58)
+ return IRQ_PCIINTD;
+#endif
+#if defined(CONFIG_PUV3_NB0916) || defined(CONFIG_PUV3_SMW0919)
+ /* only support 2 pci devices */
+ if (dev->devfn == 0x00)
+ return IRQ_PCIINTC; /* sata */
+#endif
+ }
+ return -1;
+}
+
+/*
+ * Only first 128MB of memory can be accessed via PCI.
+ * We use GFP_DMA to allocate safe buffers to do map/unmap.
+ * This is really ugly and we need a better way of specifying
+ * DMA-capable regions of memory.
+ */
+void __init puv3_pci_adjust_zones(unsigned long *zone_size,
+ unsigned long *zhole_size)
+{
+ unsigned int sz = SZ_128M >> PAGE_SHIFT;
+
+ /*
+ * Only adjust if > 128M on current system
+ */
+ if (zone_size[0] <= sz)
+ return;
+
+ zone_size[1] = zone_size[0] - sz;
+ zone_size[0] = sz;
+ zhole_size[1] = zhole_size[0];
+ zhole_size[0] = 0;
+}
+
+void __devinit pcibios_update_irq(struct pci_dev *dev, int irq)
+{
+ if (debug_pci)
+ printk(KERN_DEBUG "PCI: Assigning IRQ %02d to %s\n",
+ irq, pci_name(dev));
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
+}
+
+/*
+ * If the bus contains any of these devices, then we must not turn on
+ * parity checking of any kind.
+ */
+static inline int pdev_bad_for_parity(struct pci_dev *dev)
+{
+ return 0;
+}
+
+/*
+ * pcibios_fixup_bus - Called after each bus is probed,
+ * but before its children are examined.
+ */
+void __devinit pcibios_fixup_bus(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+ u16 features = PCI_COMMAND_SERR
+ | PCI_COMMAND_PARITY
+ | PCI_COMMAND_FAST_BACK;
+
+ bus->resource[0] = &ioport_resource;
+ bus->resource[1] = &iomem_resource;
+
+ /*
+ * Walk the devices on this bus, working out what we can
+ * and can't support.
+ */
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ u16 status;
+
+ pci_read_config_word(dev, PCI_STATUS, &status);
+
+ /*
+ * If any device on this bus does not support fast back
+ * to back transfers, then the bus as a whole is not able
+ * to support them. Having fast back to back transfers
+ * on saves us one PCI cycle per transaction.
+ */
+ if (!(status & PCI_STATUS_FAST_BACK))
+ features &= ~PCI_COMMAND_FAST_BACK;
+
+ if (pdev_bad_for_parity(dev))
+ features &= ~(PCI_COMMAND_SERR
+ | PCI_COMMAND_PARITY);
+
+ switch (dev->class >> 8) {
+ case PCI_CLASS_BRIDGE_PCI:
+ pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &status);
+ status |= PCI_BRIDGE_CTL_PARITY
+ | PCI_BRIDGE_CTL_MASTER_ABORT;
+ status &= ~(PCI_BRIDGE_CTL_BUS_RESET
+ | PCI_BRIDGE_CTL_FAST_BACK);
+ pci_write_config_word(dev, PCI_BRIDGE_CONTROL, status);
+ break;
+
+ case PCI_CLASS_BRIDGE_CARDBUS:
+ pci_read_config_word(dev, PCI_CB_BRIDGE_CONTROL,
+ &status);
+ status |= PCI_CB_BRIDGE_CTL_PARITY
+ | PCI_CB_BRIDGE_CTL_MASTER_ABORT;
+ pci_write_config_word(dev, PCI_CB_BRIDGE_CONTROL,
+ status);
+ break;
+ }
+ }
+
+ /*
+ * Now walk the devices again, this time setting them up.
+ */
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ u16 cmd;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ cmd |= features;
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+
+ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE,
+ L1_CACHE_BYTES >> 2);
+ }
+
+ /*
+ * Propagate the flags to the PCI bridge.
+ */
+ if (bus->self && bus->self->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+ if (features & PCI_COMMAND_FAST_BACK)
+ bus->bridge_ctl |= PCI_BRIDGE_CTL_FAST_BACK;
+ if (features & PCI_COMMAND_PARITY)
+ bus->bridge_ctl |= PCI_BRIDGE_CTL_PARITY;
+ }
+
+ /*
+ * Report what we did for this bus
+ */
+ printk(KERN_INFO "PCI: bus%d: Fast back to back transfers %sabled\n",
+ bus->number, (features & PCI_COMMAND_FAST_BACK) ? "en" : "dis");
+}
+#ifdef CONFIG_HOTPLUG
+EXPORT_SYMBOL(pcibios_fixup_bus);
+#endif
+
+static int __init pci_common_init(void)
+{
+ struct pci_bus *puv3_bus;
+
+ pci_puv3_preinit();
+
+ puv3_bus = pci_scan_bus(0, &pci_puv3_ops, NULL);
+
+ if (!puv3_bus)
+ panic("PCI: unable to scan bus!");
+
+ pci_fixup_irqs(pci_common_swizzle, pci_puv3_map_irq);
+
+ if (!use_firmware) {
+ /*
+ * Size the bridge windows.
+ */
+ pci_bus_size_bridges(puv3_bus);
+
+ /*
+ * Assign resources.
+ */
+ pci_bus_assign_resources(puv3_bus);
+ }
+
+ /*
+ * Tell drivers about devices found.
+ */
+ pci_bus_add_devices(puv3_bus);
+
+ return 0;
+}
+subsys_initcall(pci_common_init);
+
+char * __devinit pcibios_setup(char *str)
+{
+ if (!strcmp(str, "debug")) {
+ debug_pci = 1;
+ return NULL;
+ } else if (!strcmp(str, "firmware")) {
+ use_firmware = 1;
+ return NULL;
+ }
+ return str;
+}
+
+/*
+ * From arch/i386/kernel/pci-i386.c:
+ *
+ * We need to avoid collisions with `mirrored' VGA ports
+ * and other strange ISA hardware, so we always want the
+ * addresses to be allocated in the 0x000-0x0ff region
+ * modulo 0x400.
+ *
+ * Why? Because some silly external IO cards only decode
+ * the low 10 bits of the IO address. The 0x00-0xff region
+ * is reserved for motherboard devices that decode all 16
+ * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
+ * but we want to try to avoid allocating at 0x2900-0x2bff
+ * which might be mirrored at 0x0100-0x03ff..
+ */
+resource_size_t pcibios_align_resource(void *data, const struct resource *res,
+ resource_size_t size, resource_size_t align)
+{
+ resource_size_t start = res->start;
+
+ if (res->flags & IORESOURCE_IO && start & 0x300)
+ start = (start + 0x3ff) & ~0x3ff;
+
+ start = (start + align - 1) & ~(align - 1);
+
+ return start;
+}
+
+/**
+ * pcibios_enable_device - Enable I/O and memory.
+ * @dev: PCI device to be enabled
+ */
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+ u16 cmd, old_cmd;
+ int idx;
+ struct resource *r;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ old_cmd = cmd;
+ for (idx = 0; idx < 6; idx++) {
+ /* Only set up the requested stuff */
+ if (!(mask & (1 << idx)))
+ continue;
+
+ r = dev->resource + idx;
+ if (!r->start && r->end) {
+ printk(KERN_ERR "PCI: Device %s not available because"
+ " of resource collisions\n", pci_name(dev));
+ return -EINVAL;
+ }
+ if (r->flags & IORESOURCE_IO)
+ cmd |= PCI_COMMAND_IO;
+ if (r->flags & IORESOURCE_MEM)
+ cmd |= PCI_COMMAND_MEMORY;
+ }
+
+ /*
+ * Bridges (eg, cardbus bridges) need to be fully enabled
+ */
+ if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE)
+ cmd |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
+
+ if (cmd != old_cmd) {
+ printk("PCI: enabling device %s (%04x -> %04x)\n",
+ pci_name(dev), old_cmd, cmd);
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+ return 0;
+}
+
+int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine)
+{
+ unsigned long phys;
+
+ if (mmap_state == pci_mmap_io)
+ return -EINVAL;
+
+ phys = vma->vm_pgoff;
+
+ /*
+ * Mark this as IO
+ */
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ if (remap_pfn_range(vma, vma->vm_start, phys,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
+}
diff --git a/arch/unicore32/kernel/pm.c b/arch/unicore32/kernel/pm.c
new file mode 100644
index 000000000000..784bc2db3b28
--- /dev/null
+++ b/arch/unicore32/kernel/pm.c
@@ -0,0 +1,123 @@
+/*
+ * linux/arch/unicore32/kernel/pm.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/suspend.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+#include <mach/pm.h>
+
+#include "setup.h"
+
+struct puv3_cpu_pm_fns *puv3_cpu_pm_fns;
+static unsigned long *sleep_save;
+
+int puv3_pm_enter(suspend_state_t state)
+{
+ unsigned long sleep_save_checksum = 0, checksum = 0;
+ int i;
+
+ /* skip registers saving for standby */
+ if (state != PM_SUSPEND_STANDBY) {
+ puv3_cpu_pm_fns->save(sleep_save);
+ /* before sleeping, calculate and save a checksum */
+ for (i = 0; i < puv3_cpu_pm_fns->save_count - 1; i++)
+ sleep_save_checksum += sleep_save[i];
+ }
+
+ /* *** go zzz *** */
+ puv3_cpu_pm_fns->enter(state);
+ cpu_init();
+#ifdef CONFIG_INPUT_KEYBOARD
+ puv3_ps2_init();
+#endif
+#ifdef CONFIG_PCI
+ pci_puv3_preinit();
+#endif
+ if (state != PM_SUSPEND_STANDBY) {
+ /* after sleeping, validate the checksum */
+ for (i = 0; i < puv3_cpu_pm_fns->save_count - 1; i++)
+ checksum += sleep_save[i];
+
+ /* if invalid, display message and wait for a hardware reset */
+ if (checksum != sleep_save_checksum) {
+ while (1)
+ puv3_cpu_pm_fns->enter(state);
+ }
+ puv3_cpu_pm_fns->restore(sleep_save);
+ }
+
+ pr_debug("*** made it back from resume\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(puv3_pm_enter);
+
+unsigned long sleep_phys_sp(void *sp)
+{
+ return virt_to_phys(sp);
+}
+
+static int puv3_pm_valid(suspend_state_t state)
+{
+ if (puv3_cpu_pm_fns)
+ return puv3_cpu_pm_fns->valid(state);
+
+ return -EINVAL;
+}
+
+static int puv3_pm_prepare(void)
+{
+ int ret = 0;
+
+ if (puv3_cpu_pm_fns && puv3_cpu_pm_fns->prepare)
+ ret = puv3_cpu_pm_fns->prepare();
+
+ return ret;
+}
+
+static void puv3_pm_finish(void)
+{
+ if (puv3_cpu_pm_fns && puv3_cpu_pm_fns->finish)
+ puv3_cpu_pm_fns->finish();
+}
+
+static struct platform_suspend_ops puv3_pm_ops = {
+ .valid = puv3_pm_valid,
+ .enter = puv3_pm_enter,
+ .prepare = puv3_pm_prepare,
+ .finish = puv3_pm_finish,
+};
+
+static int __init puv3_pm_init(void)
+{
+ if (!puv3_cpu_pm_fns) {
+ printk(KERN_ERR "no valid puv3_cpu_pm_fns defined\n");
+ return -EINVAL;
+ }
+
+ sleep_save = kmalloc(puv3_cpu_pm_fns->save_count
+ * sizeof(unsigned long), GFP_KERNEL);
+ if (!sleep_save) {
+ printk(KERN_ERR "failed to alloc memory for pm save\n");
+ return -ENOMEM;
+ }
+
+ suspend_set_ops(&puv3_pm_ops);
+ return 0;
+}
+
+device_initcall(puv3_pm_init);
diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c
new file mode 100644
index 000000000000..ba401df971ed
--- /dev/null
+++ b/arch/unicore32/kernel/process.c
@@ -0,0 +1,389 @@
+/*
+ * linux/arch/unicore32/kernel/process.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <stdarg.h>
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+#include <linux/interrupt.h>
+#include <linux/kallsyms.h>
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/elfcore.h>
+#include <linux/pm.h>
+#include <linux/tick.h>
+#include <linux/utsname.h>
+#include <linux/uaccess.h>
+#include <linux/random.h>
+#include <linux/gpio.h>
+#include <linux/stacktrace.h>
+
+#include <asm/cacheflush.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/stacktrace.h>
+
+#include "setup.h"
+
+static const char * const processor_modes[] = {
+ "UK00", "UK01", "UK02", "UK03", "UK04", "UK05", "UK06", "UK07",
+ "UK08", "UK09", "UK0A", "UK0B", "UK0C", "UK0D", "UK0E", "UK0F",
+ "USER", "REAL", "INTR", "PRIV", "UK14", "UK15", "UK16", "ABRT",
+ "UK18", "UK19", "UK1A", "EXTN", "UK1C", "UK1D", "UK1E", "SUSR"
+};
+
+/*
+ * The idle thread, has rather strange semantics for calling pm_idle,
+ * but this is what x86 does and we need to do the same, so that
+ * things like cpuidle get called in the same way.
+ */
+void cpu_idle(void)
+{
+ /* endless idle loop with no priority at all */
+ while (1) {
+ tick_nohz_stop_sched_tick(1);
+ while (!need_resched()) {
+ local_irq_disable();
+ stop_critical_timings();
+ cpu_do_idle();
+ local_irq_enable();
+ start_critical_timings();
+ }
+ tick_nohz_restart_sched_tick();
+ preempt_enable_no_resched();
+ schedule();
+ preempt_disable();
+ }
+}
+
+static char reboot_mode = 'h';
+
+int __init reboot_setup(char *str)
+{
+ reboot_mode = str[0];
+ return 1;
+}
+
+__setup("reboot=", reboot_setup);
+
+void machine_halt(void)
+{
+ gpio_set_value(GPO_SOFT_OFF, 0);
+}
+
+/*
+ * Function pointers to optional machine specific functions
+ */
+void (*pm_power_off)(void) = NULL;
+
+void machine_power_off(void)
+{
+ if (pm_power_off)
+ pm_power_off();
+ machine_halt();
+}
+
+void machine_restart(char *cmd)
+{
+ /* Disable interrupts first */
+ local_irq_disable();
+
+ /*
+ * Tell the mm system that we are going to reboot -
+ * we may need it to insert some 1:1 mappings so that
+ * soft boot works.
+ */
+ setup_mm_for_reboot(reboot_mode);
+
+ /* Clean and invalidate caches */
+ flush_cache_all();
+
+ /* Turn off caching */
+ cpu_proc_fin();
+
+ /* Push out any further dirty data, and ensure cache is empty */
+ flush_cache_all();
+
+ /*
+ * Now handle reboot code.
+ */
+ if (reboot_mode == 's') {
+ /* Jump into ROM at address 0xffff0000 */
+ cpu_reset(VECTORS_BASE);
+ } else {
+ writel(0x00002001, PM_PLLSYSCFG); /* cpu clk = 250M */
+ writel(0x00100800, PM_PLLDDRCFG); /* ddr clk = 44M */
+ writel(0x00002001, PM_PLLVGACFG); /* vga clk = 250M */
+
+ /* Use on-chip reset capability */
+ /* following instructions must be in one icache line */
+ __asm__ __volatile__(
+ " .align 5\n\t"
+ " stw %1, [%0]\n\t"
+ "201: ldw r0, [%0]\n\t"
+ " cmpsub.a r0, #0\n\t"
+ " bne 201b\n\t"
+ " stw %3, [%2]\n\t"
+ " nop; nop; nop\n\t"
+ /* prefetch 3 instructions at most */
+ :
+ : "r" (PM_PMCR),
+ "r" (PM_PMCR_CFBSYS | PM_PMCR_CFBDDR
+ | PM_PMCR_CFBVGA),
+ "r" (RESETC_SWRR),
+ "r" (RESETC_SWRR_SRB)
+ : "r0", "memory");
+ }
+
+ /*
+ * Whoops - the architecture was unable to reboot.
+ * Tell the user!
+ */
+ mdelay(1000);
+ printk(KERN_EMERG "Reboot failed -- System halted\n");
+ do { } while (1);
+}
+
+void __show_regs(struct pt_regs *regs)
+{
+ unsigned long flags;
+ char buf[64];
+
+ printk(KERN_DEFAULT "CPU: %d %s (%s %.*s)\n",
+ raw_smp_processor_id(), print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+ print_symbol("PC is at %s\n", instruction_pointer(regs));
+ print_symbol("LR is at %s\n", regs->UCreg_lr);
+ printk(KERN_DEFAULT "pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
+ "sp : %08lx ip : %08lx fp : %08lx\n",
+ regs->UCreg_pc, regs->UCreg_lr, regs->UCreg_asr,
+ regs->UCreg_sp, regs->UCreg_ip, regs->UCreg_fp);
+ printk(KERN_DEFAULT "r26: %08lx r25: %08lx r24: %08lx\n",
+ regs->UCreg_26, regs->UCreg_25,
+ regs->UCreg_24);
+ printk(KERN_DEFAULT "r23: %08lx r22: %08lx r21: %08lx r20: %08lx\n",
+ regs->UCreg_23, regs->UCreg_22,
+ regs->UCreg_21, regs->UCreg_20);
+ printk(KERN_DEFAULT "r19: %08lx r18: %08lx r17: %08lx r16: %08lx\n",
+ regs->UCreg_19, regs->UCreg_18,
+ regs->UCreg_17, regs->UCreg_16);
+ printk(KERN_DEFAULT "r15: %08lx r14: %08lx r13: %08lx r12: %08lx\n",
+ regs->UCreg_15, regs->UCreg_14,
+ regs->UCreg_13, regs->UCreg_12);
+ printk(KERN_DEFAULT "r11: %08lx r10: %08lx r9 : %08lx r8 : %08lx\n",
+ regs->UCreg_11, regs->UCreg_10,
+ regs->UCreg_09, regs->UCreg_08);
+ printk(KERN_DEFAULT "r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
+ regs->UCreg_07, regs->UCreg_06,
+ regs->UCreg_05, regs->UCreg_04);
+ printk(KERN_DEFAULT "r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
+ regs->UCreg_03, regs->UCreg_02,
+ regs->UCreg_01, regs->UCreg_00);
+
+ flags = regs->UCreg_asr;
+ buf[0] = flags & PSR_S_BIT ? 'S' : 's';
+ buf[1] = flags & PSR_Z_BIT ? 'Z' : 'z';
+ buf[2] = flags & PSR_C_BIT ? 'C' : 'c';
+ buf[3] = flags & PSR_V_BIT ? 'V' : 'v';
+ buf[4] = '\0';
+
+ printk(KERN_DEFAULT "Flags: %s INTR o%s REAL o%s Mode %s Segment %s\n",
+ buf, interrupts_enabled(regs) ? "n" : "ff",
+ fast_interrupts_enabled(regs) ? "n" : "ff",
+ processor_modes[processor_mode(regs)],
+ segment_eq(get_fs(), get_ds()) ? "kernel" : "user");
+ {
+ unsigned int ctrl;
+
+ buf[0] = '\0';
+ {
+ unsigned int transbase;
+ asm("movc %0, p0.c2, #0\n"
+ : "=r" (transbase));
+ snprintf(buf, sizeof(buf), " Table: %08x", transbase);
+ }
+ asm("movc %0, p0.c1, #0\n" : "=r" (ctrl));
+
+ printk(KERN_DEFAULT "Control: %08x%s\n", ctrl, buf);
+ }
+}
+
+void show_regs(struct pt_regs *regs)
+{
+ printk(KERN_DEFAULT "\n");
+ printk(KERN_DEFAULT "Pid: %d, comm: %20s\n",
+ task_pid_nr(current), current->comm);
+ __show_regs(regs);
+ __backtrace();
+}
+
+/*
+ * Free current thread data structures etc..
+ */
+void exit_thread(void)
+{
+}
+
+void flush_thread(void)
+{
+ struct thread_info *thread = current_thread_info();
+ struct task_struct *tsk = current;
+
+ memset(thread->used_cp, 0, sizeof(thread->used_cp));
+ memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
+#ifdef CONFIG_UNICORE_FPU_F64
+ memset(&thread->fpstate, 0, sizeof(struct fp_state));
+#endif
+}
+
+void release_thread(struct task_struct *dead_task)
+{
+}
+
+asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
+
+int
+copy_thread(unsigned long clone_flags, unsigned long stack_start,
+ unsigned long stk_sz, struct task_struct *p, struct pt_regs *regs)
+{
+ struct thread_info *thread = task_thread_info(p);
+ struct pt_regs *childregs = task_pt_regs(p);
+
+ *childregs = *regs;
+ childregs->UCreg_00 = 0;
+ childregs->UCreg_sp = stack_start;
+
+ memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
+ thread->cpu_context.sp = (unsigned long)childregs;
+ thread->cpu_context.pc = (unsigned long)ret_from_fork;
+
+ if (clone_flags & CLONE_SETTLS)
+ childregs->UCreg_16 = regs->UCreg_03;
+
+ return 0;
+}
+
+/*
+ * Fill in the task's elfregs structure for a core dump.
+ */
+int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs)
+{
+ elf_core_copy_regs(elfregs, task_pt_regs(t));
+ return 1;
+}
+
+/*
+ * fill in the fpe structure for a core dump...
+ */
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fp)
+{
+ struct thread_info *thread = current_thread_info();
+ int used_math = thread->used_cp[1] | thread->used_cp[2];
+
+#ifdef CONFIG_UNICORE_FPU_F64
+ if (used_math)
+ memcpy(fp, &thread->fpstate, sizeof(*fp));
+#endif
+ return used_math != 0;
+}
+EXPORT_SYMBOL(dump_fpu);
+
+/*
+ * Shuffle the argument into the correct register before calling the
+ * thread function. r1 is the thread argument, r2 is the pointer to
+ * the thread function, and r3 points to the exit function.
+ */
+asm(".pushsection .text\n"
+" .align\n"
+" .type kernel_thread_helper, #function\n"
+"kernel_thread_helper:\n"
+" mov.a asr, r7\n"
+" mov r0, r4\n"
+" mov lr, r6\n"
+" mov pc, r5\n"
+" .size kernel_thread_helper, . - kernel_thread_helper\n"
+" .popsection");
+
+/*
+ * Create a kernel thread.
+ */
+pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
+{
+ struct pt_regs regs;
+
+ memset(&regs, 0, sizeof(regs));
+
+ regs.UCreg_04 = (unsigned long)arg;
+ regs.UCreg_05 = (unsigned long)fn;
+ regs.UCreg_06 = (unsigned long)do_exit;
+ regs.UCreg_07 = PRIV_MODE;
+ regs.UCreg_pc = (unsigned long)kernel_thread_helper;
+ regs.UCreg_asr = regs.UCreg_07 | PSR_I_BIT;
+
+ return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
+}
+EXPORT_SYMBOL(kernel_thread);
+
+unsigned long get_wchan(struct task_struct *p)
+{
+ struct stackframe frame;
+ int count = 0;
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+
+ frame.fp = thread_saved_fp(p);
+ frame.sp = thread_saved_sp(p);
+ frame.lr = 0; /* recovered from the stack */
+ frame.pc = thread_saved_pc(p);
+ do {
+ int ret = unwind_frame(&frame);
+ if (ret < 0)
+ return 0;
+ if (!in_sched_functions(frame.pc))
+ return frame.pc;
+ } while ((count++) < 16);
+ return 0;
+}
+
+unsigned long arch_randomize_brk(struct mm_struct *mm)
+{
+ unsigned long range_end = mm->brk + 0x02000000;
+ return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+}
+
+/*
+ * The vectors page is always readable from user space for the
+ * atomic helpers and the signal restart code. Let's declare a mapping
+ * for it so it is visible through ptrace and /proc/<pid>/mem.
+ */
+
+int vectors_user_mapping(void)
+{
+ struct mm_struct *mm = current->mm;
+ return install_special_mapping(mm, 0xffff0000, PAGE_SIZE,
+ VM_READ | VM_EXEC |
+ VM_MAYREAD | VM_MAYEXEC |
+ VM_ALWAYSDUMP | VM_RESERVED,
+ NULL);
+}
+
+const char *arch_vma_name(struct vm_area_struct *vma)
+{
+ return (vma->vm_start == 0xffff0000) ? "[vectors]" : NULL;
+}
diff --git a/arch/unicore32/kernel/ptrace.c b/arch/unicore32/kernel/ptrace.c
new file mode 100644
index 000000000000..9f07c08da050
--- /dev/null
+++ b/arch/unicore32/kernel/ptrace.c
@@ -0,0 +1,149 @@
+/*
+ * linux/arch/unicore32/kernel/ptrace.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * By Ross Biro 1/23/92
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/signal.h>
+#include <linux/uaccess.h>
+
+/*
+ * this routine will get a word off of the processes privileged stack.
+ * the offset is how far from the base addr as stored in the THREAD.
+ * this routine assumes that all the privileged stacks are in our
+ * data space.
+ */
+static inline long get_user_reg(struct task_struct *task, int offset)
+{
+ return task_pt_regs(task)->uregs[offset];
+}
+
+/*
+ * this routine will put a word on the processes privileged stack.
+ * the offset is how far from the base addr as stored in the THREAD.
+ * this routine assumes that all the privileged stacks are in our
+ * data space.
+ */
+static inline int
+put_user_reg(struct task_struct *task, int offset, long data)
+{
+ struct pt_regs newregs, *regs = task_pt_regs(task);
+ int ret = -EINVAL;
+
+ newregs = *regs;
+ newregs.uregs[offset] = data;
+
+ if (valid_user_regs(&newregs)) {
+ regs->uregs[offset] = data;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ */
+void ptrace_disable(struct task_struct *child)
+{
+}
+
+/*
+ * We actually access the pt_regs stored on the kernel stack.
+ */
+static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
+ unsigned long __user *ret)
+{
+ unsigned long tmp;
+
+ tmp = 0;
+ if (off < sizeof(struct pt_regs))
+ tmp = get_user_reg(tsk, off >> 2);
+
+ return put_user(tmp, ret);
+}
+
+/*
+ * We actually access the pt_regs stored on the kernel stack.
+ */
+static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
+ unsigned long val)
+{
+ if (off >= sizeof(struct pt_regs))
+ return 0;
+
+ return put_user_reg(tsk, off >> 2, val);
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ int ret;
+ unsigned long __user *datap = (unsigned long __user *) data;
+
+ switch (request) {
+ case PTRACE_PEEKUSR:
+ ret = ptrace_read_user(child, addr, datap);
+ break;
+
+ case PTRACE_POKEUSR:
+ ret = ptrace_write_user(child, addr, data);
+ break;
+
+ case PTRACE_GET_THREAD_AREA:
+ ret = put_user(task_pt_regs(child)->UCreg_16,
+ datap);
+ break;
+
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+ }
+
+ return ret;
+}
+
+asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
+{
+ unsigned long ip;
+
+ if (!test_thread_flag(TIF_SYSCALL_TRACE))
+ return scno;
+ if (!(current->ptrace & PT_PTRACED))
+ return scno;
+
+ /*
+ * Save IP. IP is used to denote syscall entry/exit:
+ * IP = 0 -> entry, = 1 -> exit
+ */
+ ip = regs->UCreg_ip;
+ regs->UCreg_ip = why;
+
+ current_thread_info()->syscall = scno;
+
+ /* the 0x80 provides a way for the tracing parent to distinguish
+ between a syscall stop and SIGTRAP delivery */
+ ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
+ ? 0x80 : 0));
+ /*
+ * this isn't the same as continuing with a signal, but it will do
+ * for normal use. strace only continues with a signal if the
+ * stopping signal is not SIGTRAP. -brl
+ */
+ if (current->exit_code) {
+ send_sig(current->exit_code, current, 1);
+ current->exit_code = 0;
+ }
+ regs->UCreg_ip = ip;
+
+ return current_thread_info()->syscall;
+}
diff --git a/arch/unicore32/kernel/puv3-core.c b/arch/unicore32/kernel/puv3-core.c
new file mode 100644
index 000000000000..7d10e7b07c20
--- /dev/null
+++ b/arch/unicore32/kernel/puv3-core.c
@@ -0,0 +1,285 @@
+/*
+ * linux/arch/unicore32/kernel/puv3-core.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/sysdev.h>
+#include <linux/amba/bus.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/cnt32_to_63.h>
+#include <linux/usb/musb.h>
+
+#include <asm/irq.h>
+#include <mach/hardware.h>
+#include <mach/pm.h>
+
+/*
+ * This is the PKUnity sched_clock implementation. This has
+ * a resolution of 271ns, and a maximum value of 32025597s (370 days).
+ *
+ * The return value is guaranteed to be monotonic in that range as
+ * long as there is always less than 582 seconds between successive
+ * calls to this function.
+ *
+ * ( * 1E9 / CLOCK_TICK_RATE ) -> about 2235/32
+ */
+unsigned long long sched_clock(void)
+{
+ unsigned long long v = cnt32_to_63(readl(OST_OSCR));
+
+ /* original conservative method, but overflow frequently
+ * v *= NSEC_PER_SEC >> 12;
+ * do_div(v, CLOCK_TICK_RATE >> 12);
+ */
+ v = ((v & 0x7fffffffffffffffULL) * 2235) >> 5;
+
+ return v;
+}
+
+static struct resource puv3_usb_resources[] = {
+ /* order is significant! */
+ {
+ .start = PKUNITY_USB_BASE,
+ .end = PKUNITY_USB_BASE + 0x3ff,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = IRQ_USB,
+ .flags = IORESOURCE_IRQ,
+ }, {
+ .start = IRQ_USB,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct musb_hdrc_config puv3_usb_config[] = {
+ {
+ .num_eps = 16,
+ .multipoint = 1,
+#ifdef CONFIG_USB_INVENTRA_DMA
+ .dma = 1,
+ .dma_channels = 8,
+#endif
+ },
+};
+
+static struct musb_hdrc_platform_data puv3_usb_plat = {
+ .mode = MUSB_HOST,
+ .min_power = 100,
+ .clock = 0,
+ .config = puv3_usb_config,
+};
+
+static struct resource puv3_mmc_resources[] = {
+ [0] = {
+ .start = PKUNITY_SDC_BASE,
+ .end = PKUNITY_SDC_BASE + 0xfff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_SDC,
+ .end = IRQ_SDC,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource puv3_unigfx_resources[] = {
+ [0] = {
+ .start = PKUNITY_UNIGFX_BASE,
+ .end = PKUNITY_UNIGFX_BASE + 0xfff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = PKUNITY_UNIGFX_MMAP_BASE,
+ .end = PKUNITY_UNIGFX_MMAP_BASE + PKUNITY_UNIGFX_MMAP_SIZE,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource puv3_rtc_resources[] = {
+ [0] = {
+ .start = PKUNITY_RTC_BASE,
+ .end = PKUNITY_RTC_BASE + 0xff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_RTCAlarm,
+ .end = IRQ_RTCAlarm,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .start = IRQ_RTC,
+ .end = IRQ_RTC,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+static struct resource puv3_pwm_resources[] = {
+ [0] = {
+ .start = PKUNITY_OST_BASE + 0x80,
+ .end = PKUNITY_OST_BASE + 0xff,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource puv3_uart0_resources[] = {
+ [0] = {
+ .start = PKUNITY_UART0_BASE,
+ .end = PKUNITY_UART0_BASE + 0xff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_UART0,
+ .end = IRQ_UART0,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+static struct resource puv3_uart1_resources[] = {
+ [0] = {
+ .start = PKUNITY_UART1_BASE,
+ .end = PKUNITY_UART1_BASE + 0xff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_UART1,
+ .end = IRQ_UART1,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+static struct resource puv3_umal_resources[] = {
+ [0] = {
+ .start = PKUNITY_UMAL_BASE,
+ .end = PKUNITY_UMAL_BASE + 0x1fff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_UMAL,
+ .end = IRQ_UMAL,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+#ifdef CONFIG_PUV3_PM
+
+#define SAVE(x) sleep_save[SLEEP_SAVE_##x] = x
+#define RESTORE(x) x = sleep_save[SLEEP_SAVE_##x]
+
+/*
+ * List of global PXA peripheral registers to preserve.
+ * More ones like CP and general purpose register values are preserved
+ * with the stack pointer in sleep.S.
+ */
+enum {
+ SLEEP_SAVE_PM_PLLDDRCFG,
+ SLEEP_SAVE_COUNT
+};
+
+
+static void puv3_cpu_pm_save(unsigned long *sleep_save)
+{
+/* SAVE(PM_PLLDDRCFG); */
+}
+
+static void puv3_cpu_pm_restore(unsigned long *sleep_save)
+{
+/* RESTORE(PM_PLLDDRCFG); */
+}
+
+static int puv3_cpu_pm_prepare(void)
+{
+ /* set resume return address */
+ writel(virt_to_phys(puv3_cpu_resume), PM_DIVCFG);
+ return 0;
+}
+
+static void puv3_cpu_pm_enter(suspend_state_t state)
+{
+ /* Clear reset status */
+ writel(RESETC_RSSR_HWR | RESETC_RSSR_WDR
+ | RESETC_RSSR_SMR | RESETC_RSSR_SWR, RESETC_RSSR);
+
+ switch (state) {
+/* case PM_SUSPEND_ON:
+ puv3_cpu_idle();
+ break; */
+ case PM_SUSPEND_MEM:
+ puv3_cpu_pm_prepare();
+ puv3_cpu_suspend(PM_PMCR_SFB);
+ break;
+ }
+}
+
+static int puv3_cpu_pm_valid(suspend_state_t state)
+{
+ return state == PM_SUSPEND_MEM;
+}
+
+static void puv3_cpu_pm_finish(void)
+{
+ /* ensure not to come back here if it wasn't intended */
+ /* PSPR = 0; */
+}
+
+static struct puv3_cpu_pm_fns puv3_cpu_pm_fnss = {
+ .save_count = SLEEP_SAVE_COUNT,
+ .valid = puv3_cpu_pm_valid,
+ .save = puv3_cpu_pm_save,
+ .restore = puv3_cpu_pm_restore,
+ .enter = puv3_cpu_pm_enter,
+ .prepare = puv3_cpu_pm_prepare,
+ .finish = puv3_cpu_pm_finish,
+};
+
+static void __init puv3_init_pm(void)
+{
+ puv3_cpu_pm_fns = &puv3_cpu_pm_fnss;
+}
+#else
+static inline void puv3_init_pm(void) {}
+#endif
+
+void puv3_ps2_init(void)
+{
+ struct clk *bclk32;
+
+ bclk32 = clk_get(NULL, "BUS32_CLK");
+ writel(clk_get_rate(bclk32) / 200000, PS2_CNT); /* should > 5us */
+}
+
+void __init puv3_core_init(void)
+{
+ puv3_init_pm();
+ puv3_ps2_init();
+
+ platform_device_register_simple("PKUnity-v3-RTC", -1,
+ puv3_rtc_resources, ARRAY_SIZE(puv3_rtc_resources));
+ platform_device_register_simple("PKUnity-v3-UMAL", -1,
+ puv3_umal_resources, ARRAY_SIZE(puv3_umal_resources));
+ platform_device_register_simple("PKUnity-v3-MMC", -1,
+ puv3_mmc_resources, ARRAY_SIZE(puv3_mmc_resources));
+ platform_device_register_simple("PKUnity-v3-UNIGFX", -1,
+ puv3_unigfx_resources, ARRAY_SIZE(puv3_unigfx_resources));
+ platform_device_register_simple("PKUnity-v3-PWM", -1,
+ puv3_pwm_resources, ARRAY_SIZE(puv3_pwm_resources));
+ platform_device_register_simple("PKUnity-v3-UART", 0,
+ puv3_uart0_resources, ARRAY_SIZE(puv3_uart0_resources));
+ platform_device_register_simple("PKUnity-v3-UART", 1,
+ puv3_uart1_resources, ARRAY_SIZE(puv3_uart1_resources));
+ platform_device_register_simple("PKUnity-v3-AC97", -1, NULL, 0);
+ platform_device_register_resndata(&platform_bus, "musb_hdrc", -1,
+ puv3_usb_resources, ARRAY_SIZE(puv3_usb_resources),
+ &puv3_usb_plat, sizeof(puv3_usb_plat));
+}
+
diff --git a/arch/unicore32/kernel/puv3-nb0916.c b/arch/unicore32/kernel/puv3-nb0916.c
new file mode 100644
index 000000000000..a78e60420653
--- /dev/null
+++ b/arch/unicore32/kernel/puv3-nb0916.c
@@ -0,0 +1,145 @@
+/*
+ * linux/arch/unicore32/kernel/puv3-nb0916.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/sysdev.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/physmap.h>
+#include <linux/io.h>
+#include <linux/reboot.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/pwm_backlight.h>
+#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
+#include <linux/input.h>
+
+#include <mach/hardware.h>
+
+static struct physmap_flash_data physmap_flash_data = {
+ .width = 1,
+};
+
+static struct resource physmap_flash_resource = {
+ .start = 0xFFF80000,
+ .end = 0xFFFFFFFF,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource puv3_i2c_resources[] = {
+ [0] = {
+ .start = PKUNITY_I2C_BASE,
+ .end = PKUNITY_I2C_BASE + 0xff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_I2C,
+ .end = IRQ_I2C,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static struct platform_pwm_backlight_data nb0916_backlight_data = {
+ .pwm_id = 0,
+ .max_brightness = 100,
+ .dft_brightness = 100,
+ .pwm_period_ns = 70 * 1024,
+};
+
+static struct gpio_keys_button nb0916_gpio_keys[] = {
+ {
+ .type = EV_KEY,
+ .code = KEY_POWER,
+ .gpio = GPI_SOFF_REQ,
+ .desc = "Power Button",
+ .wakeup = 1,
+ .active_low = 1,
+ },
+ {
+ .type = EV_KEY,
+ .code = BTN_TOUCH,
+ .gpio = GPI_BTN_TOUCH,
+ .desc = "Touchpad Button",
+ .wakeup = 1,
+ .active_low = 1,
+ },
+};
+
+static struct gpio_keys_platform_data nb0916_gpio_button_data = {
+ .buttons = nb0916_gpio_keys,
+ .nbuttons = ARRAY_SIZE(nb0916_gpio_keys),
+};
+
+static irqreturn_t nb0916_lcdcaseoff_handler(int irq, void *dev_id)
+{
+ if (gpio_get_value(GPI_LCD_CASE_OFF))
+ gpio_set_value(GPO_LCD_EN, 1);
+ else
+ gpio_set_value(GPO_LCD_EN, 0);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t nb0916_overheat_handler(int irq, void *dev_id)
+{
+ machine_halt();
+ /* SYSTEM HALT, NO RETURN */
+ return IRQ_HANDLED;
+}
+
+static struct i2c_board_info __initdata puv3_i2c_devices[] = {
+ { I2C_BOARD_INFO("lm75", I2C_TAR_THERMAL), },
+ { I2C_BOARD_INFO("bq27200", I2C_TAR_PWIC), },
+ { I2C_BOARD_INFO("24c02", I2C_TAR_EEPROM), },
+};
+
+int __init mach_nb0916_init(void)
+{
+ i2c_register_board_info(0, puv3_i2c_devices,
+ ARRAY_SIZE(puv3_i2c_devices));
+
+ platform_device_register_simple("PKUnity-v3-I2C", -1,
+ puv3_i2c_resources, ARRAY_SIZE(puv3_i2c_resources));
+
+ platform_device_register_data(&platform_bus, "pwm-backlight", -1,
+ &nb0916_backlight_data, sizeof(nb0916_backlight_data));
+
+ platform_device_register_data(&platform_bus, "gpio-keys", -1,
+ &nb0916_gpio_button_data, sizeof(nb0916_gpio_button_data));
+
+ platform_device_register_resndata(&platform_bus, "physmap-flash", -1,
+ &physmap_flash_resource, 1,
+ &physmap_flash_data, sizeof(physmap_flash_data));
+
+ if (request_irq(gpio_to_irq(GPI_LCD_CASE_OFF),
+ &nb0916_lcdcaseoff_handler,
+ IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "NB0916 lcd case off", NULL) < 0) {
+
+ printk(KERN_DEBUG "LCD-Case-OFF IRQ %d not available\n",
+ gpio_to_irq(GPI_LCD_CASE_OFF));
+ }
+
+ if (request_irq(gpio_to_irq(GPI_OTP_INT), &nb0916_overheat_handler,
+ IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "NB0916 overheating protection", NULL) < 0) {
+
+ printk(KERN_DEBUG "Overheating Protection IRQ %d not available\n",
+ gpio_to_irq(GPI_OTP_INT));
+ }
+
+ return 0;
+}
+
+subsys_initcall_sync(mach_nb0916_init);
diff --git a/arch/unicore32/kernel/pwm.c b/arch/unicore32/kernel/pwm.c
new file mode 100644
index 000000000000..4615d51e3ba6
--- /dev/null
+++ b/arch/unicore32/kernel/pwm.c
@@ -0,0 +1,263 @@
+/*
+ * linux/arch/unicore32/kernel/pwm.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/pwm.h>
+
+#include <asm/div64.h>
+#include <mach/hardware.h>
+
+struct pwm_device {
+ struct list_head node;
+ struct platform_device *pdev;
+
+ const char *label;
+ struct clk *clk;
+ int clk_enabled;
+
+ unsigned int use_count;
+ unsigned int pwm_id;
+};
+
+/*
+ * period_ns = 10^9 * (PRESCALE + 1) * (PV + 1) / PWM_CLK_RATE
+ * duty_ns = 10^9 * (PRESCALE + 1) * DC / PWM_CLK_RATE
+ */
+int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
+{
+ unsigned long long c;
+ unsigned long period_cycles, prescale, pv, dc;
+
+ if (pwm == NULL || period_ns == 0 || duty_ns > period_ns)
+ return -EINVAL;
+
+ c = clk_get_rate(pwm->clk);
+ c = c * period_ns;
+ do_div(c, 1000000000);
+ period_cycles = c;
+
+ if (period_cycles < 1)
+ period_cycles = 1;
+ prescale = (period_cycles - 1) / 1024;
+ pv = period_cycles / (prescale + 1) - 1;
+
+ if (prescale > 63)
+ return -EINVAL;
+
+ if (duty_ns == period_ns)
+ dc = OST_PWMDCCR_FDCYCLE;
+ else
+ dc = (pv + 1) * duty_ns / period_ns;
+
+ /* NOTE: the clock to PWM has to be enabled first
+ * before writing to the registers
+ */
+ clk_enable(pwm->clk);
+ OST_PWMPWCR = prescale;
+ OST_PWMDCCR = pv - dc;
+ OST_PWMPCR = pv;
+ clk_disable(pwm->clk);
+
+ return 0;
+}
+EXPORT_SYMBOL(pwm_config);
+
+int pwm_enable(struct pwm_device *pwm)
+{
+ int rc = 0;
+
+ if (!pwm->clk_enabled) {
+ rc = clk_enable(pwm->clk);
+ if (!rc)
+ pwm->clk_enabled = 1;
+ }
+ return rc;
+}
+EXPORT_SYMBOL(pwm_enable);
+
+void pwm_disable(struct pwm_device *pwm)
+{
+ if (pwm->clk_enabled) {
+ clk_disable(pwm->clk);
+ pwm->clk_enabled = 0;
+ }
+}
+EXPORT_SYMBOL(pwm_disable);
+
+static DEFINE_MUTEX(pwm_lock);
+static LIST_HEAD(pwm_list);
+
+struct pwm_device *pwm_request(int pwm_id, const char *label)
+{
+ struct pwm_device *pwm;
+ int found = 0;
+
+ mutex_lock(&pwm_lock);
+
+ list_for_each_entry(pwm, &pwm_list, node) {
+ if (pwm->pwm_id == pwm_id) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found) {
+ if (pwm->use_count == 0) {
+ pwm->use_count++;
+ pwm->label = label;
+ } else
+ pwm = ERR_PTR(-EBUSY);
+ } else
+ pwm = ERR_PTR(-ENOENT);
+
+ mutex_unlock(&pwm_lock);
+ return pwm;
+}
+EXPORT_SYMBOL(pwm_request);
+
+void pwm_free(struct pwm_device *pwm)
+{
+ mutex_lock(&pwm_lock);
+
+ if (pwm->use_count) {
+ pwm->use_count--;
+ pwm->label = NULL;
+ } else
+ pr_warning("PWM device already freed\n");
+
+ mutex_unlock(&pwm_lock);
+}
+EXPORT_SYMBOL(pwm_free);
+
+static inline void __add_pwm(struct pwm_device *pwm)
+{
+ mutex_lock(&pwm_lock);
+ list_add_tail(&pwm->node, &pwm_list);
+ mutex_unlock(&pwm_lock);
+}
+
+static struct pwm_device *pwm_probe(struct platform_device *pdev,
+ unsigned int pwm_id, struct pwm_device *parent_pwm)
+{
+ struct pwm_device *pwm;
+ struct resource *r;
+ int ret = 0;
+
+ pwm = kzalloc(sizeof(struct pwm_device), GFP_KERNEL);
+ if (pwm == NULL) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pwm->clk = clk_get(NULL, "OST_CLK");
+ if (IS_ERR(pwm->clk)) {
+ ret = PTR_ERR(pwm->clk);
+ goto err_free;
+ }
+ pwm->clk_enabled = 0;
+
+ pwm->use_count = 0;
+ pwm->pwm_id = pwm_id;
+ pwm->pdev = pdev;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ dev_err(&pdev->dev, "no memory resource defined\n");
+ ret = -ENODEV;
+ goto err_free_clk;
+ }
+
+ r = request_mem_region(r->start, resource_size(r), pdev->name);
+ if (r == NULL) {
+ dev_err(&pdev->dev, "failed to request memory resource\n");
+ ret = -EBUSY;
+ goto err_free_clk;
+ }
+
+ __add_pwm(pwm);
+ platform_set_drvdata(pdev, pwm);
+ return pwm;
+
+err_free_clk:
+ clk_put(pwm->clk);
+err_free:
+ kfree(pwm);
+ return ERR_PTR(ret);
+}
+
+static int __devinit puv3_pwm_probe(struct platform_device *pdev)
+{
+ struct pwm_device *pwm = pwm_probe(pdev, pdev->id, NULL);
+
+ if (IS_ERR(pwm))
+ return PTR_ERR(pwm);
+
+ return 0;
+}
+
+static int __devexit pwm_remove(struct platform_device *pdev)
+{
+ struct pwm_device *pwm;
+ struct resource *r;
+
+ pwm = platform_get_drvdata(pdev);
+ if (pwm == NULL)
+ return -ENODEV;
+
+ mutex_lock(&pwm_lock);
+ list_del(&pwm->node);
+ mutex_unlock(&pwm_lock);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(r->start, resource_size(r));
+
+ clk_put(pwm->clk);
+ kfree(pwm);
+ return 0;
+}
+
+static struct platform_driver puv3_pwm_driver = {
+ .driver = {
+ .name = "PKUnity-v3-PWM",
+ },
+ .probe = puv3_pwm_probe,
+ .remove = __devexit_p(pwm_remove),
+};
+
+static int __init pwm_init(void)
+{
+ int ret = 0;
+
+ ret = platform_driver_register(&puv3_pwm_driver);
+ if (ret) {
+ printk(KERN_ERR "failed to register puv3_pwm_driver\n");
+ return ret;
+ }
+
+ return ret;
+}
+arch_initcall(pwm_init);
+
+static void __exit pwm_exit(void)
+{
+ platform_driver_unregister(&puv3_pwm_driver);
+}
+module_exit(pwm_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/arch/unicore32/kernel/rtc.c b/arch/unicore32/kernel/rtc.c
new file mode 100644
index 000000000000..c5f068295b51
--- /dev/null
+++ b/arch/unicore32/kernel/rtc.c
@@ -0,0 +1,380 @@
+/*
+ * linux/arch/unicore32/kernel/rtc.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/rtc.h>
+#include <linux/bcd.h>
+#include <linux/clk.h>
+#include <linux/log2.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#include <asm/irq.h>
+#include <mach/hardware.h>
+
+static struct resource *puv3_rtc_mem;
+
+static int puv3_rtc_alarmno = IRQ_RTCAlarm;
+static int puv3_rtc_tickno = IRQ_RTC;
+
+static DEFINE_SPINLOCK(puv3_rtc_pie_lock);
+
+/* IRQ Handlers */
+
+static irqreturn_t puv3_rtc_alarmirq(int irq, void *id)
+{
+ struct rtc_device *rdev = id;
+
+ writel(readl(RTC_RTSR) | RTC_RTSR_AL, RTC_RTSR);
+ rtc_update_irq(rdev, 1, RTC_AF | RTC_IRQF);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t puv3_rtc_tickirq(int irq, void *id)
+{
+ struct rtc_device *rdev = id;
+
+ writel(readl(RTC_RTSR) | RTC_RTSR_HZ, RTC_RTSR);
+ rtc_update_irq(rdev, 1, RTC_PF | RTC_IRQF);
+ return IRQ_HANDLED;
+}
+
+/* Update control registers */
+static void puv3_rtc_setaie(int to)
+{
+ unsigned int tmp;
+
+ pr_debug("%s: aie=%d\n", __func__, to);
+
+ tmp = readl(RTC_RTSR) & ~RTC_RTSR_ALE;
+
+ if (to)
+ tmp |= RTC_RTSR_ALE;
+
+ writel(tmp, RTC_RTSR);
+}
+
+static int puv3_rtc_setpie(struct device *dev, int enabled)
+{
+ unsigned int tmp;
+
+ pr_debug("%s: pie=%d\n", __func__, enabled);
+
+ spin_lock_irq(&puv3_rtc_pie_lock);
+ tmp = readl(RTC_RTSR) & ~RTC_RTSR_HZE;
+
+ if (enabled)
+ tmp |= RTC_RTSR_HZE;
+
+ writel(tmp, RTC_RTSR);
+ spin_unlock_irq(&puv3_rtc_pie_lock);
+
+ return 0;
+}
+
+static int puv3_rtc_setfreq(struct device *dev, int freq)
+{
+ return 0;
+}
+
+/* Time read/write */
+
+static int puv3_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
+{
+ rtc_time_to_tm(readl(RTC_RCNR), rtc_tm);
+
+ pr_debug("read time %02x.%02x.%02x %02x/%02x/%02x\n",
+ rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday,
+ rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec);
+
+ return 0;
+}
+
+static int puv3_rtc_settime(struct device *dev, struct rtc_time *tm)
+{
+ unsigned long rtc_count = 0;
+
+ pr_debug("set time %02d.%02d.%02d %02d/%02d/%02d\n",
+ tm->tm_year, tm->tm_mon, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
+
+ rtc_tm_to_time(tm, &rtc_count);
+ writel(rtc_count, RTC_RCNR);
+
+ return 0;
+}
+
+static int puv3_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct rtc_time *alm_tm = &alrm->time;
+
+ rtc_time_to_tm(readl(RTC_RTAR), alm_tm);
+
+ alrm->enabled = readl(RTC_RTSR) & RTC_RTSR_ALE;
+
+ pr_debug("read alarm %02x %02x.%02x.%02x %02x/%02x/%02x\n",
+ alrm->enabled,
+ alm_tm->tm_year, alm_tm->tm_mon, alm_tm->tm_mday,
+ alm_tm->tm_hour, alm_tm->tm_min, alm_tm->tm_sec);
+
+ return 0;
+}
+
+static int puv3_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct rtc_time *tm = &alrm->time;
+ unsigned long rtcalarm_count = 0;
+
+ pr_debug("puv3_rtc_setalarm: %d, %02x/%02x/%02x %02x.%02x.%02x\n",
+ alrm->enabled,
+ tm->tm_mday & 0xff, tm->tm_mon & 0xff, tm->tm_year & 0xff,
+ tm->tm_hour & 0xff, tm->tm_min & 0xff, tm->tm_sec);
+
+ rtc_tm_to_time(tm, &rtcalarm_count);
+ writel(rtcalarm_count, RTC_RTAR);
+
+ puv3_rtc_setaie(alrm->enabled);
+
+ if (alrm->enabled)
+ enable_irq_wake(puv3_rtc_alarmno);
+ else
+ disable_irq_wake(puv3_rtc_alarmno);
+
+ return 0;
+}
+
+static int puv3_rtc_proc(struct device *dev, struct seq_file *seq)
+{
+ seq_printf(seq, "periodic_IRQ\t: %s\n",
+ (readl(RTC_RTSR) & RTC_RTSR_HZE) ? "yes" : "no");
+ return 0;
+}
+
+static int puv3_rtc_open(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_device *rtc_dev = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = request_irq(puv3_rtc_alarmno, puv3_rtc_alarmirq,
+ IRQF_DISABLED, "pkunity-rtc alarm", rtc_dev);
+
+ if (ret) {
+ dev_err(dev, "IRQ%d error %d\n", puv3_rtc_alarmno, ret);
+ return ret;
+ }
+
+ ret = request_irq(puv3_rtc_tickno, puv3_rtc_tickirq,
+ IRQF_DISABLED, "pkunity-rtc tick", rtc_dev);
+
+ if (ret) {
+ dev_err(dev, "IRQ%d error %d\n", puv3_rtc_tickno, ret);
+ goto tick_err;
+ }
+
+ return ret;
+
+ tick_err:
+ free_irq(puv3_rtc_alarmno, rtc_dev);
+ return ret;
+}
+
+static void puv3_rtc_release(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_device *rtc_dev = platform_get_drvdata(pdev);
+
+ /* do not clear AIE here, it may be needed for wake */
+
+ puv3_rtc_setpie(dev, 0);
+ free_irq(puv3_rtc_alarmno, rtc_dev);
+ free_irq(puv3_rtc_tickno, rtc_dev);
+}
+
+static const struct rtc_class_ops puv3_rtcops = {
+ .open = puv3_rtc_open,
+ .release = puv3_rtc_release,
+ .read_time = puv3_rtc_gettime,
+ .set_time = puv3_rtc_settime,
+ .read_alarm = puv3_rtc_getalarm,
+ .set_alarm = puv3_rtc_setalarm,
+ .irq_set_freq = puv3_rtc_setfreq,
+ .irq_set_state = puv3_rtc_setpie,
+ .proc = puv3_rtc_proc,
+};
+
+static void puv3_rtc_enable(struct platform_device *pdev, int en)
+{
+ if (!en) {
+ writel(readl(RTC_RTSR) & ~RTC_RTSR_HZE, RTC_RTSR);
+ } else {
+ /* re-enable the device, and check it is ok */
+
+ if ((readl(RTC_RTSR) & RTC_RTSR_HZE) == 0) {
+ dev_info(&pdev->dev, "rtc disabled, re-enabling\n");
+ writel(readl(RTC_RTSR) | RTC_RTSR_HZE, RTC_RTSR);
+ }
+ }
+}
+
+static int puv3_rtc_remove(struct platform_device *dev)
+{
+ struct rtc_device *rtc = platform_get_drvdata(dev);
+
+ platform_set_drvdata(dev, NULL);
+ rtc_device_unregister(rtc);
+
+ puv3_rtc_setpie(&dev->dev, 0);
+ puv3_rtc_setaie(0);
+
+ release_resource(puv3_rtc_mem);
+ kfree(puv3_rtc_mem);
+
+ return 0;
+}
+
+static int puv3_rtc_probe(struct platform_device *pdev)
+{
+ struct rtc_device *rtc;
+ struct resource *res;
+ int ret;
+
+ pr_debug("%s: probe=%p\n", __func__, pdev);
+
+ /* find the IRQs */
+
+ puv3_rtc_tickno = platform_get_irq(pdev, 1);
+ if (puv3_rtc_tickno < 0) {
+ dev_err(&pdev->dev, "no irq for rtc tick\n");
+ return -ENOENT;
+ }
+
+ puv3_rtc_alarmno = platform_get_irq(pdev, 0);
+ if (puv3_rtc_alarmno < 0) {
+ dev_err(&pdev->dev, "no irq for alarm\n");
+ return -ENOENT;
+ }
+
+ pr_debug("PKUnity_rtc: tick irq %d, alarm irq %d\n",
+ puv3_rtc_tickno, puv3_rtc_alarmno);
+
+ /* get the memory region */
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "failed to get memory region resource\n");
+ return -ENOENT;
+ }
+
+ puv3_rtc_mem = request_mem_region(res->start,
+ res->end-res->start+1,
+ pdev->name);
+
+ if (puv3_rtc_mem == NULL) {
+ dev_err(&pdev->dev, "failed to reserve memory region\n");
+ ret = -ENOENT;
+ goto err_nores;
+ }
+
+ puv3_rtc_enable(pdev, 1);
+
+ puv3_rtc_setfreq(&pdev->dev, 1);
+
+ /* register RTC and exit */
+
+ rtc = rtc_device_register("pkunity", &pdev->dev, &puv3_rtcops,
+ THIS_MODULE);
+
+ if (IS_ERR(rtc)) {
+ dev_err(&pdev->dev, "cannot attach rtc\n");
+ ret = PTR_ERR(rtc);
+ goto err_nortc;
+ }
+
+ /* platform setup code should have handled this; sigh */
+ if (!device_can_wakeup(&pdev->dev))
+ device_init_wakeup(&pdev->dev, 1);
+
+ platform_set_drvdata(pdev, rtc);
+ return 0;
+
+ err_nortc:
+ puv3_rtc_enable(pdev, 0);
+ release_resource(puv3_rtc_mem);
+
+ err_nores:
+ return ret;
+}
+
+#ifdef CONFIG_PM
+
+/* RTC Power management control */
+
+static int ticnt_save;
+
+static int puv3_rtc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ /* save RTAR for anyone using periodic interrupts */
+ ticnt_save = readl(RTC_RTAR);
+ puv3_rtc_enable(pdev, 0);
+ return 0;
+}
+
+static int puv3_rtc_resume(struct platform_device *pdev)
+{
+ puv3_rtc_enable(pdev, 1);
+ writel(ticnt_save, RTC_RTAR);
+ return 0;
+}
+#else
+#define puv3_rtc_suspend NULL
+#define puv3_rtc_resume NULL
+#endif
+
+static struct platform_driver puv3_rtcdrv = {
+ .probe = puv3_rtc_probe,
+ .remove = __devexit_p(puv3_rtc_remove),
+ .suspend = puv3_rtc_suspend,
+ .resume = puv3_rtc_resume,
+ .driver = {
+ .name = "PKUnity-v3-RTC",
+ .owner = THIS_MODULE,
+ }
+};
+
+static char __initdata banner[] = "PKUnity-v3 RTC, (c) 2009 PKUnity Co.\n";
+
+static int __init puv3_rtc_init(void)
+{
+ printk(banner);
+ return platform_driver_register(&puv3_rtcdrv);
+}
+
+static void __exit puv3_rtc_exit(void)
+{
+ platform_driver_unregister(&puv3_rtcdrv);
+}
+
+module_init(puv3_rtc_init);
+module_exit(puv3_rtc_exit);
+
+MODULE_DESCRIPTION("RTC Driver for the PKUnity v3 chip");
+MODULE_AUTHOR("Hu Dongliang");
+MODULE_LICENSE("GPL v2");
+
diff --git a/arch/unicore32/kernel/setup.c b/arch/unicore32/kernel/setup.c
new file mode 100644
index 000000000000..1e175a82844d
--- /dev/null
+++ b/arch/unicore32/kernel/setup.c
@@ -0,0 +1,360 @@
+/*
+ * linux/arch/unicore32/kernel/setup.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/utsname.h>
+#include <linux/initrd.h>
+#include <linux/console.h>
+#include <linux/bootmem.h>
+#include <linux/seq_file.h>
+#include <linux/screen_info.h>
+#include <linux/init.h>
+#include <linux/root_dev.h>
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/memblock.h>
+#include <linux/elf.h>
+#include <linux/io.h>
+
+#include <asm/cputype.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/traps.h>
+
+#include "setup.h"
+
+#ifndef MEM_SIZE
+#define MEM_SIZE (16*1024*1024)
+#endif
+
+struct stack {
+ u32 irq[3];
+ u32 abt[3];
+ u32 und[3];
+} ____cacheline_aligned;
+
+static struct stack stacks[NR_CPUS];
+
+char elf_platform[ELF_PLATFORM_SIZE];
+EXPORT_SYMBOL(elf_platform);
+
+static char __initdata cmd_line[COMMAND_LINE_SIZE];
+
+static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
+
+/*
+ * Standard memory resources
+ */
+static struct resource mem_res[] = {
+ {
+ .name = "Video RAM",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_MEM
+ },
+ {
+ .name = "Kernel text",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_MEM
+ },
+ {
+ .name = "Kernel data",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_MEM
+ }
+};
+
+#define video_ram mem_res[0]
+#define kernel_code mem_res[1]
+#define kernel_data mem_res[2]
+
+/*
+ * These functions re-use the assembly code in head.S, which
+ * already provide the required functionality.
+ */
+static void __init setup_processor(void)
+{
+ printk(KERN_DEFAULT "CPU: UniCore-II [%08x] revision %d, cr=%08lx\n",
+ uc32_cpuid, (int)(uc32_cpuid >> 16) & 15, cr_alignment);
+
+ sprintf(init_utsname()->machine, "puv3");
+ sprintf(elf_platform, "ucv2");
+}
+
+/*
+ * cpu_init - initialise one CPU.
+ *
+ * cpu_init sets up the per-CPU stacks.
+ */
+void cpu_init(void)
+{
+ unsigned int cpu = smp_processor_id();
+ struct stack *stk = &stacks[cpu];
+
+ /*
+ * setup stacks for re-entrant exception handlers
+ */
+ __asm__ (
+ "mov.a asr, %1\n\t"
+ "add sp, %0, %2\n\t"
+ "mov.a asr, %3\n\t"
+ "add sp, %0, %4\n\t"
+ "mov.a asr, %5\n\t"
+ "add sp, %0, %6\n\t"
+ "mov.a asr, %7"
+ :
+ : "r" (stk),
+ "r" (PSR_R_BIT | PSR_I_BIT | INTR_MODE),
+ "I" (offsetof(struct stack, irq[0])),
+ "r" (PSR_R_BIT | PSR_I_BIT | ABRT_MODE),
+ "I" (offsetof(struct stack, abt[0])),
+ "r" (PSR_R_BIT | PSR_I_BIT | EXTN_MODE),
+ "I" (offsetof(struct stack, und[0])),
+ "r" (PSR_R_BIT | PSR_I_BIT | PRIV_MODE)
+ : "r30", "cc");
+}
+
+static int __init uc32_add_memory(unsigned long start, unsigned long size)
+{
+ struct membank *bank = &meminfo.bank[meminfo.nr_banks];
+
+ if (meminfo.nr_banks >= NR_BANKS) {
+ printk(KERN_CRIT "NR_BANKS too low, "
+ "ignoring memory at %#lx\n", start);
+ return -EINVAL;
+ }
+
+ /*
+ * Ensure that start/size are aligned to a page boundary.
+ * Size is appropriately rounded down, start is rounded up.
+ */
+ size -= start & ~PAGE_MASK;
+
+ bank->start = PAGE_ALIGN(start);
+ bank->size = size & PAGE_MASK;
+
+ /*
+ * Check whether this memory region has non-zero size or
+ * invalid node number.
+ */
+ if (bank->size == 0)
+ return -EINVAL;
+
+ meminfo.nr_banks++;
+ return 0;
+}
+
+/*
+ * Pick out the memory size. We look for mem=size@start,
+ * where start and size are "size[KkMm]"
+ */
+static int __init early_mem(char *p)
+{
+ static int usermem __initdata = 1;
+ unsigned long size, start;
+ char *endp;
+
+ /*
+ * If the user specifies memory size, we
+ * blow away any automatically generated
+ * size.
+ */
+ if (usermem) {
+ usermem = 0;
+ meminfo.nr_banks = 0;
+ }
+
+ start = PHYS_OFFSET;
+ size = memparse(p, &endp);
+ if (*endp == '@')
+ start = memparse(endp + 1, NULL);
+
+ uc32_add_memory(start, size);
+
+ return 0;
+}
+early_param("mem", early_mem);
+
+static void __init
+request_standard_resources(struct meminfo *mi)
+{
+ struct resource *res;
+ int i;
+
+ kernel_code.start = virt_to_phys(_stext);
+ kernel_code.end = virt_to_phys(_etext - 1);
+ kernel_data.start = virt_to_phys(_sdata);
+ kernel_data.end = virt_to_phys(_end - 1);
+
+ for (i = 0; i < mi->nr_banks; i++) {
+ if (mi->bank[i].size == 0)
+ continue;
+
+ res = alloc_bootmem_low(sizeof(*res));
+ res->name = "System RAM";
+ res->start = mi->bank[i].start;
+ res->end = mi->bank[i].start + mi->bank[i].size - 1;
+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+
+ request_resource(&iomem_resource, res);
+
+ if (kernel_code.start >= res->start &&
+ kernel_code.end <= res->end)
+ request_resource(res, &kernel_code);
+ if (kernel_data.start >= res->start &&
+ kernel_data.end <= res->end)
+ request_resource(res, &kernel_data);
+ }
+
+ video_ram.start = PKUNITY_UNIGFX_MMAP_BASE;
+ video_ram.end = PKUNITY_UNIGFX_MMAP_BASE + PKUNITY_UNIGFX_MMAP_SIZE;
+ request_resource(&iomem_resource, &video_ram);
+}
+
+static void (*init_machine)(void) __initdata;
+
+static int __init customize_machine(void)
+{
+ /* customizes platform devices, or adds new ones */
+ if (init_machine)
+ init_machine();
+ return 0;
+}
+arch_initcall(customize_machine);
+
+void __init setup_arch(char **cmdline_p)
+{
+ char *from = default_command_line;
+
+ setup_processor();
+
+ init_mm.start_code = (unsigned long) _stext;
+ init_mm.end_code = (unsigned long) _etext;
+ init_mm.end_data = (unsigned long) _edata;
+ init_mm.brk = (unsigned long) _end;
+
+ /* parse_early_param needs a boot_command_line */
+ strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
+
+ /* populate cmd_line too for later use, preserving boot_command_line */
+ strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
+ *cmdline_p = cmd_line;
+
+ parse_early_param();
+
+ uc32_memblock_init(&meminfo);
+
+ paging_init();
+ request_standard_resources(&meminfo);
+
+ cpu_init();
+
+ /*
+ * Set up various architecture-specific pointers
+ */
+ init_machine = puv3_core_init;
+
+#ifdef CONFIG_VT
+#if defined(CONFIG_VGA_CONSOLE)
+ conswitchp = &vga_con;
+#elif defined(CONFIG_DUMMY_CONSOLE)
+ conswitchp = &dummy_con;
+#endif
+#endif
+ early_trap_init();
+}
+
+static struct cpu cpuinfo_unicore;
+
+static int __init topology_init(void)
+{
+ int i;
+
+ for_each_possible_cpu(i)
+ register_cpu(&cpuinfo_unicore, i);
+
+ return 0;
+}
+subsys_initcall(topology_init);
+
+#ifdef CONFIG_HAVE_PROC_CPU
+static int __init proc_cpu_init(void)
+{
+ struct proc_dir_entry *res;
+
+ res = proc_mkdir("cpu", NULL);
+ if (!res)
+ return -ENOMEM;
+ return 0;
+}
+fs_initcall(proc_cpu_init);
+#endif
+
+static int c_show(struct seq_file *m, void *v)
+{
+ seq_printf(m, "Processor\t: UniCore-II rev %d (%s)\n",
+ (int)(uc32_cpuid >> 16) & 15, elf_platform);
+
+ seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
+ loops_per_jiffy / (500000/HZ),
+ (loops_per_jiffy / (5000/HZ)) % 100);
+
+ /* dump out the processor features */
+ seq_puts(m, "Features\t: CMOV UC-F64");
+
+ seq_printf(m, "\nCPU implementer\t: 0x%02x\n", uc32_cpuid >> 24);
+ seq_printf(m, "CPU architecture: 2\n");
+ seq_printf(m, "CPU revision\t: %d\n", (uc32_cpuid >> 16) & 15);
+
+ seq_printf(m, "Cache type\t: write-back\n"
+ "Cache clean\t: cp0 c5 ops\n"
+ "Cache lockdown\t: not support\n"
+ "Cache format\t: Harvard\n");
+
+ seq_puts(m, "\n");
+
+ seq_printf(m, "Hardware\t: PKUnity v3\n");
+
+ return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ return *pos < 1 ? (void *)1 : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return NULL;
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = c_show
+};
diff --git a/arch/unicore32/kernel/setup.h b/arch/unicore32/kernel/setup.h
new file mode 100644
index 000000000000..dcd1306eb5c6
--- /dev/null
+++ b/arch/unicore32/kernel/setup.h
@@ -0,0 +1,30 @@
+/*
+ * linux/arch/unicore32/kernel/setup.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_KERNEL_SETUP_H__
+#define __UNICORE_KERNEL_SETUP_H__
+
+extern void paging_init(void);
+extern void puv3_core_init(void);
+
+extern void puv3_ps2_init(void);
+extern void pci_puv3_preinit(void);
+extern void __init puv3_init_gpio(void);
+
+extern void setup_mm_for_reboot(char mode);
+
+extern char __stubs_start[], __stubs_end[];
+extern char __vectors_start[], __vectors_end[];
+
+extern void kernel_thread_helper(void);
+
+extern void __init early_signal_init(void);
+#endif
diff --git a/arch/unicore32/kernel/signal.c b/arch/unicore32/kernel/signal.c
new file mode 100644
index 000000000000..b163fca56789
--- /dev/null
+++ b/arch/unicore32/kernel/signal.c
@@ -0,0 +1,494 @@
+/*
+ * linux/arch/unicore32/kernel/signal.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/personality.h>
+#include <linux/freezer.h>
+#include <linux/uaccess.h>
+#include <linux/tracehook.h>
+#include <linux/elf.h>
+#include <linux/unistd.h>
+
+#include <asm/cacheflush.h>
+#include <asm/ucontext.h>
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+/*
+ * For UniCore syscalls, we encode the syscall number into the instruction.
+ */
+#define SWI_SYS_SIGRETURN (0xff000000) /* error number for new abi */
+#define SWI_SYS_RT_SIGRETURN (0xff000000 | (__NR_rt_sigreturn))
+#define SWI_SYS_RESTART (0xff000000 | (__NR_restart_syscall))
+
+#define KERN_SIGRETURN_CODE (KUSER_VECPAGE_BASE + 0x00000500)
+#define KERN_RESTART_CODE (KERN_SIGRETURN_CODE + sizeof(sigreturn_codes))
+
+const unsigned long sigreturn_codes[3] = {
+ SWI_SYS_SIGRETURN, SWI_SYS_RT_SIGRETURN,
+};
+
+const unsigned long syscall_restart_code[2] = {
+ SWI_SYS_RESTART, /* swi __NR_restart_syscall */
+ 0x69efc004, /* ldr pc, [sp], #4 */
+};
+
+/*
+ * Do a signal return; undo the signal stack. These are aligned to 64-bit.
+ */
+struct sigframe {
+ struct ucontext uc;
+ unsigned long retcode[2];
+};
+
+struct rt_sigframe {
+ struct siginfo info;
+ struct sigframe sig;
+};
+
+static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
+{
+ sigset_t set;
+ int err;
+
+ err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
+ if (err == 0) {
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+ }
+
+ err |= __get_user(regs->UCreg_00, &sf->uc.uc_mcontext.regs.UCreg_00);
+ err |= __get_user(regs->UCreg_01, &sf->uc.uc_mcontext.regs.UCreg_01);
+ err |= __get_user(regs->UCreg_02, &sf->uc.uc_mcontext.regs.UCreg_02);
+ err |= __get_user(regs->UCreg_03, &sf->uc.uc_mcontext.regs.UCreg_03);
+ err |= __get_user(regs->UCreg_04, &sf->uc.uc_mcontext.regs.UCreg_04);
+ err |= __get_user(regs->UCreg_05, &sf->uc.uc_mcontext.regs.UCreg_05);
+ err |= __get_user(regs->UCreg_06, &sf->uc.uc_mcontext.regs.UCreg_06);
+ err |= __get_user(regs->UCreg_07, &sf->uc.uc_mcontext.regs.UCreg_07);
+ err |= __get_user(regs->UCreg_08, &sf->uc.uc_mcontext.regs.UCreg_08);
+ err |= __get_user(regs->UCreg_09, &sf->uc.uc_mcontext.regs.UCreg_09);
+ err |= __get_user(regs->UCreg_10, &sf->uc.uc_mcontext.regs.UCreg_10);
+ err |= __get_user(regs->UCreg_11, &sf->uc.uc_mcontext.regs.UCreg_11);
+ err |= __get_user(regs->UCreg_12, &sf->uc.uc_mcontext.regs.UCreg_12);
+ err |= __get_user(regs->UCreg_13, &sf->uc.uc_mcontext.regs.UCreg_13);
+ err |= __get_user(regs->UCreg_14, &sf->uc.uc_mcontext.regs.UCreg_14);
+ err |= __get_user(regs->UCreg_15, &sf->uc.uc_mcontext.regs.UCreg_15);
+ err |= __get_user(regs->UCreg_16, &sf->uc.uc_mcontext.regs.UCreg_16);
+ err |= __get_user(regs->UCreg_17, &sf->uc.uc_mcontext.regs.UCreg_17);
+ err |= __get_user(regs->UCreg_18, &sf->uc.uc_mcontext.regs.UCreg_18);
+ err |= __get_user(regs->UCreg_19, &sf->uc.uc_mcontext.regs.UCreg_19);
+ err |= __get_user(regs->UCreg_20, &sf->uc.uc_mcontext.regs.UCreg_20);
+ err |= __get_user(regs->UCreg_21, &sf->uc.uc_mcontext.regs.UCreg_21);
+ err |= __get_user(regs->UCreg_22, &sf->uc.uc_mcontext.regs.UCreg_22);
+ err |= __get_user(regs->UCreg_23, &sf->uc.uc_mcontext.regs.UCreg_23);
+ err |= __get_user(regs->UCreg_24, &sf->uc.uc_mcontext.regs.UCreg_24);
+ err |= __get_user(regs->UCreg_25, &sf->uc.uc_mcontext.regs.UCreg_25);
+ err |= __get_user(regs->UCreg_26, &sf->uc.uc_mcontext.regs.UCreg_26);
+ err |= __get_user(regs->UCreg_fp, &sf->uc.uc_mcontext.regs.UCreg_fp);
+ err |= __get_user(regs->UCreg_ip, &sf->uc.uc_mcontext.regs.UCreg_ip);
+ err |= __get_user(regs->UCreg_sp, &sf->uc.uc_mcontext.regs.UCreg_sp);
+ err |= __get_user(regs->UCreg_lr, &sf->uc.uc_mcontext.regs.UCreg_lr);
+ err |= __get_user(regs->UCreg_pc, &sf->uc.uc_mcontext.regs.UCreg_pc);
+ err |= __get_user(regs->UCreg_asr, &sf->uc.uc_mcontext.regs.UCreg_asr);
+
+ err |= !valid_user_regs(regs);
+
+ return err;
+}
+
+asmlinkage int __sys_rt_sigreturn(struct pt_regs *regs)
+{
+ struct rt_sigframe __user *frame;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+ /*
+ * Since we stacked the signal on a 64-bit boundary,
+ * then 'sp' should be word aligned here. If it's
+ * not, then the user is trying to mess with us.
+ */
+ if (regs->UCreg_sp & 7)
+ goto badframe;
+
+ frame = (struct rt_sigframe __user *)regs->UCreg_sp;
+
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+
+ if (restore_sigframe(regs, &frame->sig))
+ goto badframe;
+
+ if (do_sigaltstack(&frame->sig.uc.uc_stack, NULL, regs->UCreg_sp)
+ == -EFAULT)
+ goto badframe;
+
+ return regs->UCreg_00;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+static int setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs,
+ sigset_t *set)
+{
+ int err = 0;
+
+ err |= __put_user(regs->UCreg_00, &sf->uc.uc_mcontext.regs.UCreg_00);
+ err |= __put_user(regs->UCreg_01, &sf->uc.uc_mcontext.regs.UCreg_01);
+ err |= __put_user(regs->UCreg_02, &sf->uc.uc_mcontext.regs.UCreg_02);
+ err |= __put_user(regs->UCreg_03, &sf->uc.uc_mcontext.regs.UCreg_03);
+ err |= __put_user(regs->UCreg_04, &sf->uc.uc_mcontext.regs.UCreg_04);
+ err |= __put_user(regs->UCreg_05, &sf->uc.uc_mcontext.regs.UCreg_05);
+ err |= __put_user(regs->UCreg_06, &sf->uc.uc_mcontext.regs.UCreg_06);
+ err |= __put_user(regs->UCreg_07, &sf->uc.uc_mcontext.regs.UCreg_07);
+ err |= __put_user(regs->UCreg_08, &sf->uc.uc_mcontext.regs.UCreg_08);
+ err |= __put_user(regs->UCreg_09, &sf->uc.uc_mcontext.regs.UCreg_09);
+ err |= __put_user(regs->UCreg_10, &sf->uc.uc_mcontext.regs.UCreg_10);
+ err |= __put_user(regs->UCreg_11, &sf->uc.uc_mcontext.regs.UCreg_11);
+ err |= __put_user(regs->UCreg_12, &sf->uc.uc_mcontext.regs.UCreg_12);
+ err |= __put_user(regs->UCreg_13, &sf->uc.uc_mcontext.regs.UCreg_13);
+ err |= __put_user(regs->UCreg_14, &sf->uc.uc_mcontext.regs.UCreg_14);
+ err |= __put_user(regs->UCreg_15, &sf->uc.uc_mcontext.regs.UCreg_15);
+ err |= __put_user(regs->UCreg_16, &sf->uc.uc_mcontext.regs.UCreg_16);
+ err |= __put_user(regs->UCreg_17, &sf->uc.uc_mcontext.regs.UCreg_17);
+ err |= __put_user(regs->UCreg_18, &sf->uc.uc_mcontext.regs.UCreg_18);
+ err |= __put_user(regs->UCreg_19, &sf->uc.uc_mcontext.regs.UCreg_19);
+ err |= __put_user(regs->UCreg_20, &sf->uc.uc_mcontext.regs.UCreg_20);
+ err |= __put_user(regs->UCreg_21, &sf->uc.uc_mcontext.regs.UCreg_21);
+ err |= __put_user(regs->UCreg_22, &sf->uc.uc_mcontext.regs.UCreg_22);
+ err |= __put_user(regs->UCreg_23, &sf->uc.uc_mcontext.regs.UCreg_23);
+ err |= __put_user(regs->UCreg_24, &sf->uc.uc_mcontext.regs.UCreg_24);
+ err |= __put_user(regs->UCreg_25, &sf->uc.uc_mcontext.regs.UCreg_25);
+ err |= __put_user(regs->UCreg_26, &sf->uc.uc_mcontext.regs.UCreg_26);
+ err |= __put_user(regs->UCreg_fp, &sf->uc.uc_mcontext.regs.UCreg_fp);
+ err |= __put_user(regs->UCreg_ip, &sf->uc.uc_mcontext.regs.UCreg_ip);
+ err |= __put_user(regs->UCreg_sp, &sf->uc.uc_mcontext.regs.UCreg_sp);
+ err |= __put_user(regs->UCreg_lr, &sf->uc.uc_mcontext.regs.UCreg_lr);
+ err |= __put_user(regs->UCreg_pc, &sf->uc.uc_mcontext.regs.UCreg_pc);
+ err |= __put_user(regs->UCreg_asr, &sf->uc.uc_mcontext.regs.UCreg_asr);
+
+ err |= __put_user(current->thread.trap_no,
+ &sf->uc.uc_mcontext.trap_no);
+ err |= __put_user(current->thread.error_code,
+ &sf->uc.uc_mcontext.error_code);
+ err |= __put_user(current->thread.address,
+ &sf->uc.uc_mcontext.fault_address);
+ err |= __put_user(set->sig[0], &sf->uc.uc_mcontext.oldmask);
+
+ err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
+
+ return err;
+}
+
+static inline void __user *get_sigframe(struct k_sigaction *ka,
+ struct pt_regs *regs, int framesize)
+{
+ unsigned long sp = regs->UCreg_sp;
+ void __user *frame;
+
+ /*
+ * This is the X/Open sanctioned signal stack switching.
+ */
+ if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp))
+ sp = current->sas_ss_sp + current->sas_ss_size;
+
+ /*
+ * ATPCS B01 mandates 8-byte alignment
+ */
+ frame = (void __user *)((sp - framesize) & ~7);
+
+ /*
+ * Check that we can actually write to the signal frame.
+ */
+ if (!access_ok(VERIFY_WRITE, frame, framesize))
+ frame = NULL;
+
+ return frame;
+}
+
+static int setup_return(struct pt_regs *regs, struct k_sigaction *ka,
+ unsigned long __user *rc, void __user *frame, int usig)
+{
+ unsigned long handler = (unsigned long)ka->sa.sa_handler;
+ unsigned long retcode;
+ unsigned long asr = regs->UCreg_asr & ~PSR_f;
+
+ unsigned int idx = 0;
+
+ if (ka->sa.sa_flags & SA_SIGINFO)
+ idx += 1;
+
+ if (__put_user(sigreturn_codes[idx], rc) ||
+ __put_user(sigreturn_codes[idx+1], rc+1))
+ return 1;
+
+ retcode = KERN_SIGRETURN_CODE + (idx << 2);
+
+ regs->UCreg_00 = usig;
+ regs->UCreg_sp = (unsigned long)frame;
+ regs->UCreg_lr = retcode;
+ regs->UCreg_pc = handler;
+ regs->UCreg_asr = asr;
+
+ return 0;
+}
+
+static int setup_frame(int usig, struct k_sigaction *ka,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame));
+ int err = 0;
+
+ if (!frame)
+ return 1;
+
+ /*
+ * Set uc.uc_flags to a value which sc.trap_no would never have.
+ */
+ err |= __put_user(0x5ac3c35a, &frame->uc.uc_flags);
+
+ err |= setup_sigframe(frame, regs, set);
+ if (err == 0)
+ err |= setup_return(regs, ka, frame->retcode, frame, usig);
+
+ return err;
+}
+
+static int setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct rt_sigframe __user *frame =
+ get_sigframe(ka, regs, sizeof(*frame));
+ stack_t stack;
+ int err = 0;
+
+ if (!frame)
+ return 1;
+
+ err |= copy_siginfo_to_user(&frame->info, info);
+
+ err |= __put_user(0, &frame->sig.uc.uc_flags);
+ err |= __put_user(NULL, &frame->sig.uc.uc_link);
+
+ memset(&stack, 0, sizeof(stack));
+ stack.ss_sp = (void __user *)current->sas_ss_sp;
+ stack.ss_flags = sas_ss_flags(regs->UCreg_sp);
+ stack.ss_size = current->sas_ss_size;
+ err |= __copy_to_user(&frame->sig.uc.uc_stack, &stack, sizeof(stack));
+
+ err |= setup_sigframe(&frame->sig, regs, set);
+ if (err == 0)
+ err |= setup_return(regs, ka, frame->sig.retcode, frame, usig);
+
+ if (err == 0) {
+ /*
+ * For realtime signals we must also set the second and third
+ * arguments for the signal handler.
+ */
+ regs->UCreg_01 = (unsigned long)&frame->info;
+ regs->UCreg_02 = (unsigned long)&frame->sig.uc;
+ }
+
+ return err;
+}
+
+static inline void setup_syscall_restart(struct pt_regs *regs)
+{
+ regs->UCreg_00 = regs->UCreg_ORIG_00;
+ regs->UCreg_pc -= 4;
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+static int handle_signal(unsigned long sig, struct k_sigaction *ka,
+ siginfo_t *info, sigset_t *oldset,
+ struct pt_regs *regs, int syscall)
+{
+ struct thread_info *thread = current_thread_info();
+ struct task_struct *tsk = current;
+ int usig = sig;
+ int ret;
+
+ /*
+ * If we were from a system call, check for system call restarting...
+ */
+ if (syscall) {
+ switch (regs->UCreg_00) {
+ case -ERESTART_RESTARTBLOCK:
+ case -ERESTARTNOHAND:
+ regs->UCreg_00 = -EINTR;
+ break;
+ case -ERESTARTSYS:
+ if (!(ka->sa.sa_flags & SA_RESTART)) {
+ regs->UCreg_00 = -EINTR;
+ break;
+ }
+ /* fallthrough */
+ case -ERESTARTNOINTR:
+ setup_syscall_restart(regs);
+ }
+ }
+
+ /*
+ * translate the signal
+ */
+ if (usig < 32 && thread->exec_domain
+ && thread->exec_domain->signal_invmap)
+ usig = thread->exec_domain->signal_invmap[usig];
+
+ /*
+ * Set up the stack frame
+ */
+ if (ka->sa.sa_flags & SA_SIGINFO)
+ ret = setup_rt_frame(usig, ka, info, oldset, regs);
+ else
+ ret = setup_frame(usig, ka, oldset, regs);
+
+ /*
+ * Check that the resulting registers are actually sane.
+ */
+ ret |= !valid_user_regs(regs);
+
+ if (ret != 0) {
+ force_sigsegv(sig, tsk);
+ return ret;
+ }
+
+ /*
+ * Block the signal if we were successful.
+ */
+ spin_lock_irq(&tsk->sighand->siglock);
+ sigorsets(&tsk->blocked, &tsk->blocked,
+ &ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
+ sigaddset(&tsk->blocked, sig);
+ recalc_sigpending();
+ spin_unlock_irq(&tsk->sighand->siglock);
+
+ return 0;
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ *
+ * Note that we go through the signals twice: once to check the signals that
+ * the kernel can handle, and then we build all the user-level signal handling
+ * stack-frames in one go after that.
+ */
+static void do_signal(struct pt_regs *regs, int syscall)
+{
+ struct k_sigaction ka;
+ siginfo_t info;
+ int signr;
+
+ /*
+ * We want the common case to go fast, which
+ * is why we may in certain cases get here from
+ * kernel mode. Just return without doing anything
+ * if so.
+ */
+ if (!user_mode(regs))
+ return;
+
+ if (try_to_freeze())
+ goto no_signal;
+
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+ if (signr > 0) {
+ sigset_t *oldset;
+
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ oldset = &current->saved_sigmask;
+ else
+ oldset = &current->blocked;
+ if (handle_signal(signr, &ka, &info, oldset, regs, syscall)
+ == 0) {
+ /*
+ * A signal was successfully delivered; the saved
+ * sigmask will have been stored in the signal frame,
+ * and will be restored by sigreturn, so we can simply
+ * clear the TIF_RESTORE_SIGMASK flag.
+ */
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+ }
+ return;
+ }
+
+ no_signal:
+ /*
+ * No signal to deliver to the process - restart the syscall.
+ */
+ if (syscall) {
+ if (regs->UCreg_00 == -ERESTART_RESTARTBLOCK) {
+ u32 __user *usp;
+
+ regs->UCreg_sp -= 4;
+ usp = (u32 __user *)regs->UCreg_sp;
+
+ if (put_user(regs->UCreg_pc, usp) == 0) {
+ regs->UCreg_pc = KERN_RESTART_CODE;
+ } else {
+ regs->UCreg_sp += 4;
+ force_sigsegv(0, current);
+ }
+ }
+ if (regs->UCreg_00 == -ERESTARTNOHAND ||
+ regs->UCreg_00 == -ERESTARTSYS ||
+ regs->UCreg_00 == -ERESTARTNOINTR) {
+ setup_syscall_restart(regs);
+ }
+
+ /* If there's no signal to deliver, we just put the saved
+ * sigmask back.
+ */
+ if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+ sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+ }
+ }
+}
+
+asmlinkage void do_notify_resume(struct pt_regs *regs,
+ unsigned int thread_flags, int syscall)
+{
+ if (thread_flags & _TIF_SIGPENDING)
+ do_signal(regs, syscall);
+
+ if (thread_flags & _TIF_NOTIFY_RESUME) {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(regs);
+ if (current->replacement_session_keyring)
+ key_replace_session_keyring();
+ }
+}
+
+/*
+ * Copy signal return handlers into the vector page, and
+ * set sigreturn to be a pointer to these.
+ */
+void __init early_signal_init(void)
+{
+ memcpy((void *)kuser_vecpage_to_vectors(KERN_SIGRETURN_CODE),
+ sigreturn_codes, sizeof(sigreturn_codes));
+ memcpy((void *)kuser_vecpage_to_vectors(KERN_RESTART_CODE),
+ syscall_restart_code, sizeof(syscall_restart_code));
+ /* Need not to flush icache, since early_trap_init will do it last. */
+}
diff --git a/arch/unicore32/kernel/sleep.S b/arch/unicore32/kernel/sleep.S
new file mode 100644
index 000000000000..f7c3fc87f7fe
--- /dev/null
+++ b/arch/unicore32/kernel/sleep.S
@@ -0,0 +1,202 @@
+/*
+ * linux/arch/unicore32/kernel/sleep.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <mach/hardware.h>
+
+ .text
+
+pkunity_cpu_save_cp:
+
+ @ get coprocessor registers
+
+ movc r3, p0.c7, #0 @ PID
+ movc r4, p0.c2, #0 @ translation table base addr
+ movc r5, p0.c1, #0 @ control reg
+
+
+ @ store them plus current virtual stack ptr on stack
+ mov r6, sp
+ stm.w (r3 - r6), [sp-]
+
+ mov pc, lr
+
+pkunity_cpu_save_sp:
+ @ preserve phys address of stack
+ mov r0, sp
+ stw.w lr, [sp+], #-4
+ b.l sleep_phys_sp
+ ldw r1, =sleep_save_sp
+ stw r0, [r1]
+ ldw.w pc, [sp]+, #4
+
+/*
+ * puv3_cpu_suspend()
+ *
+ * Forces CPU into sleep state.
+ *
+ * r0 = value for PWRMODE M field for desired sleep state
+ */
+
+ENTRY(puv3_cpu_suspend)
+ stm.w (r16 - r27, lr), [sp-] @ save registers on stack
+ stm.w (r4 - r15), [sp-] @ save registers on stack
+
+#ifdef CONFIG_UNICORE_FPU_F64
+ sfm.w (f0 - f7 ), [sp-]
+ sfm.w (f8 - f15), [sp-]
+ sfm.w (f16 - f23), [sp-]
+ sfm.w (f24 - f31), [sp-]
+ cff r4, s31
+ stm.w (r4), [sp-]
+#endif
+ b.l pkunity_cpu_save_cp
+
+ b.l pkunity_cpu_save_sp
+
+ @ clean data cache
+ mov r1, #0
+ movc p0.c5, r1, #14
+ nop
+ nop
+ nop
+ nop
+
+
+
+ @ DDR2 BaseAddr
+ ldw r0, =io_p2v(PKUNITY_DDR2CTRL_BASE)
+
+ @ PM BaseAddr
+ ldw r1, =io_p2v(PKUNITY_PM_BASE)
+
+ @ set PLL_SYS_CFG reg, 275
+ movl r6, #0x00002401
+ stw r6, [r1+], #0x18
+ @ set PLL_DDR_CFG reg, 66MHz
+ movl r6, #0x00100c00
+ stw r6, [r1+], #0x1c
+
+ @ set wake up source
+ movl r8, #0x800001ff @ epip4d
+ stw r8, [r1+], #0xc
+
+ @ set PGSR
+ movl r5, #0x40000
+ stw r5, [r1+], #0x10
+
+ @ prepare DDR2 refresh settings
+ ldw r5, [r0+], #0x24
+ or r5, r5, #0x00000001
+
+ @ prepare PMCR for PLL changing
+ movl r6, #0xc
+
+ @ prepare for closing PLL
+ movl r7, #0x1
+
+ @ prepare sleep mode
+ mov r8, #0x1
+
+@ movl r0, 0x11111111
+@ put_word_ocd r0
+ b pkunity_cpu_do_suspend
+
+ .ltorg
+ .align 5
+pkunity_cpu_do_suspend:
+ b 101f
+ @ put DDR2 into self-refresh
+100: stw r5, [r0+], #0x24
+ @ change PLL
+ stw r6, [r1]
+ b 1f
+
+ .ltorg
+ .align 5
+101: b 102f
+ @ wait for PLL changing complete
+1: ldw r6, [r1+], #0x44
+ csub.a r6, #0x1
+ bne 1b
+ b 2f
+
+ .ltorg
+ .align 5
+102: b 100b
+ @ close PLL
+2: stw r7, [r1+], #0x4
+ @ enter sleep mode
+ stw r8, [r1]
+3: b 3b
+
+
+
+
+/*
+ * puv3_cpu_resume()
+ *
+ * entry point from bootloader into kernel during resume
+ *
+ * Note: Yes, part of the following code is located into the .data section.
+ * This is to allow sleep_save_sp to be accessed with a relative load
+ * while we can't rely on any MMU translation. We could have put
+ * sleep_save_sp in the .text section as well, but some setups might
+ * insist on it to be truly read-only.
+ */
+
+ .data
+ .align 5
+ENTRY(puv3_cpu_resume)
+@ movl r0, 0x20202020
+@ put_word_ocd r0
+
+ ldw r0, sleep_save_sp @ stack phys addr
+ ldw r2, =resume_after_mmu @ its absolute virtual address
+ ldm (r3 - r6), [r0]+ @ CP regs + virt stack ptr
+ mov sp, r6 @ CP regs + virt stack ptr
+
+ mov r1, #0
+ movc p0.c6, r1, #6 @ invalidate I & D TLBs
+ movc p0.c5, r1, #28 @ invalidate I & D caches, BTB
+
+ movc p0.c7, r3, #0 @ PID
+ movc p0.c2, r4, #0 @ translation table base addr
+ movc p0.c1, r5, #0 @ control reg, turn on mmu
+ nop
+ jump r2
+ nop
+ nop
+ nop
+ nop
+ nop
+
+sleep_save_sp:
+ .word 0 @ preserve stack phys ptr here
+
+ .text
+resume_after_mmu:
+@ movl r0, 0x30303030
+@ put_word_ocd r0
+
+#ifdef CONFIG_UNICORE_FPU_F64
+ lfm.w (f0 - f7 ), [sp]+
+ lfm.w (f8 - f15), [sp]+
+ lfm.w (f16 - f23), [sp]+
+ lfm.w (f24 - f31), [sp]+
+ ldm.w (r4), [sp]+
+ ctf r4, s31
+#endif
+ ldm.w (r4 - r15), [sp]+ @ restore registers from stack
+ ldm.w (r16 - r27, pc), [sp]+ @ return to caller
diff --git a/arch/unicore32/kernel/stacktrace.c b/arch/unicore32/kernel/stacktrace.c
new file mode 100644
index 000000000000..b34030bdabe3
--- /dev/null
+++ b/arch/unicore32/kernel/stacktrace.c
@@ -0,0 +1,131 @@
+/*
+ * linux/arch/unicore32/kernel/stacktrace.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/stacktrace.h>
+
+#include <asm/stacktrace.h>
+
+#if defined(CONFIG_FRAME_POINTER)
+/*
+ * Unwind the current stack frame and store the new register values in the
+ * structure passed as argument. Unwinding is equivalent to a function return,
+ * hence the new PC value rather than LR should be used for backtrace.
+ *
+ * With framepointer enabled, a simple function prologue looks like this:
+ * mov ip, sp
+ * stmdb sp!, {fp, ip, lr, pc}
+ * sub fp, ip, #4
+ *
+ * A simple function epilogue looks like this:
+ * ldm sp, {fp, sp, pc}
+ *
+ * Note that with framepointer enabled, even the leaf functions have the same
+ * prologue and epilogue, therefore we can ignore the LR value in this case.
+ */
+int notrace unwind_frame(struct stackframe *frame)
+{
+ unsigned long high, low;
+ unsigned long fp = frame->fp;
+
+ /* only go to a higher address on the stack */
+ low = frame->sp;
+ high = ALIGN(low, THREAD_SIZE);
+
+ /* check current frame pointer is within bounds */
+ if (fp < (low + 12) || fp + 4 >= high)
+ return -EINVAL;
+
+ /* restore the registers from the stack frame */
+ frame->fp = *(unsigned long *)(fp - 12);
+ frame->sp = *(unsigned long *)(fp - 8);
+ frame->pc = *(unsigned long *)(fp - 4);
+
+ return 0;
+}
+#endif
+
+void notrace walk_stackframe(struct stackframe *frame,
+ int (*fn)(struct stackframe *, void *), void *data)
+{
+ while (1) {
+ int ret;
+
+ if (fn(frame, data))
+ break;
+ ret = unwind_frame(frame);
+ if (ret < 0)
+ break;
+ }
+}
+EXPORT_SYMBOL(walk_stackframe);
+
+#ifdef CONFIG_STACKTRACE
+struct stack_trace_data {
+ struct stack_trace *trace;
+ unsigned int no_sched_functions;
+ unsigned int skip;
+};
+
+static int save_trace(struct stackframe *frame, void *d)
+{
+ struct stack_trace_data *data = d;
+ struct stack_trace *trace = data->trace;
+ unsigned long addr = frame->pc;
+
+ if (data->no_sched_functions && in_sched_functions(addr))
+ return 0;
+ if (data->skip) {
+ data->skip--;
+ return 0;
+ }
+
+ trace->entries[trace->nr_entries++] = addr;
+
+ return trace->nr_entries >= trace->max_entries;
+}
+
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+ struct stack_trace_data data;
+ struct stackframe frame;
+
+ data.trace = trace;
+ data.skip = trace->skip;
+
+ if (tsk != current) {
+ data.no_sched_functions = 1;
+ frame.fp = thread_saved_fp(tsk);
+ frame.sp = thread_saved_sp(tsk);
+ frame.lr = 0; /* recovered from the stack */
+ frame.pc = thread_saved_pc(tsk);
+ } else {
+ register unsigned long current_sp asm("sp");
+
+ data.no_sched_functions = 0;
+ frame.fp = (unsigned long)__builtin_frame_address(0);
+ frame.sp = current_sp;
+ frame.lr = (unsigned long)__builtin_return_address(0);
+ frame.pc = (unsigned long)save_stack_trace_tsk;
+ }
+
+ walk_stackframe(&frame, save_trace, &data);
+ if (trace->nr_entries < trace->max_entries)
+ trace->entries[trace->nr_entries++] = ULONG_MAX;
+}
+
+void save_stack_trace(struct stack_trace *trace)
+{
+ save_stack_trace_tsk(current, trace);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+#endif
diff --git a/arch/unicore32/kernel/sys.c b/arch/unicore32/kernel/sys.c
new file mode 100644
index 000000000000..3afe60a39ac9
--- /dev/null
+++ b/arch/unicore32/kernel/sys.c
@@ -0,0 +1,126 @@
+/*
+ * linux/arch/unicore32/kernel/sys.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/stat.h>
+#include <linux/syscalls.h>
+#include <linux/mman.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/ipc.h>
+#include <linux/uaccess.h>
+
+#include <asm/syscalls.h>
+#include <asm/cacheflush.h>
+
+/* Clone a task - this clones the calling program thread.
+ * This is called indirectly via a small wrapper
+ */
+asmlinkage long __sys_clone(unsigned long clone_flags, unsigned long newsp,
+ void __user *parent_tid, void __user *child_tid,
+ struct pt_regs *regs)
+{
+ if (!newsp)
+ newsp = regs->UCreg_sp;
+
+ return do_fork(clone_flags, newsp, regs, 0,
+ parent_tid, child_tid);
+}
+
+/* sys_execve() executes a new program.
+ * This is called indirectly via a small wrapper
+ */
+asmlinkage long __sys_execve(const char __user *filename,
+ const char __user *const __user *argv,
+ const char __user *const __user *envp,
+ struct pt_regs *regs)
+{
+ int error;
+ char *fn;
+
+ fn = getname(filename);
+ error = PTR_ERR(fn);
+ if (IS_ERR(fn))
+ goto out;
+ error = do_execve(fn, argv, envp, regs);
+ putname(fn);
+out:
+ return error;
+}
+
+int kernel_execve(const char *filename,
+ const char *const argv[],
+ const char *const envp[])
+{
+ struct pt_regs regs;
+ int ret;
+
+ memset(&regs, 0, sizeof(struct pt_regs));
+ ret = do_execve(filename,
+ (const char __user *const __user *)argv,
+ (const char __user *const __user *)envp, &regs);
+ if (ret < 0)
+ goto out;
+
+ /*
+ * Save argc to the register structure for userspace.
+ */
+ regs.UCreg_00 = ret;
+
+ /*
+ * We were successful. We won't be returning to our caller, but
+ * instead to user space by manipulating the kernel stack.
+ */
+ asm("add r0, %0, %1\n\t"
+ "mov r1, %2\n\t"
+ "mov r2, %3\n\t"
+ "mov r22, #0\n\t" /* not a syscall */
+ "mov r23, %0\n\t" /* thread structure */
+ "b.l memmove\n\t" /* copy regs to top of stack */
+ "mov sp, r0\n\t" /* reposition stack pointer */
+ "b ret_to_user"
+ :
+ : "r" (current_thread_info()),
+ "Ir" (THREAD_START_SP - sizeof(regs)),
+ "r" (&regs),
+ "Ir" (sizeof(regs))
+ : "r0", "r1", "r2", "r3", "ip", "lr", "memory");
+
+ out:
+ return ret;
+}
+EXPORT_SYMBOL(kernel_execve);
+
+/* Note: used by the compat code even in 64-bit Linux. */
+SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
+ unsigned long, prot, unsigned long, flags,
+ unsigned long, fd, unsigned long, off_4k)
+{
+ return sys_mmap_pgoff(addr, len, prot, flags, fd,
+ off_4k);
+}
+
+/* Provide the actual syscall number to call mapping. */
+#undef __SYSCALL
+#define __SYSCALL(nr, call) [nr] = (call),
+
+/* Note that we don't include <linux/unistd.h> but <asm/unistd.h> */
+void *sys_call_table[__NR_syscalls] = {
+ [0 ... __NR_syscalls-1] = sys_ni_syscall,
+#include <asm/unistd.h>
+};
diff --git a/arch/unicore32/kernel/time.c b/arch/unicore32/kernel/time.c
new file mode 100644
index 000000000000..080710c09241
--- /dev/null
+++ b/arch/unicore32/kernel/time.c
@@ -0,0 +1,143 @@
+/*
+ * linux/arch/unicore32/kernel/time.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/timex.h>
+#include <linux/clockchips.h>
+
+#include <mach/hardware.h>
+
+#define MIN_OSCR_DELTA 2
+
+static irqreturn_t puv3_ost0_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *c = dev_id;
+
+ /* Disarm the compare/match, signal the event. */
+ writel(readl(OST_OIER) & ~OST_OIER_E0, OST_OIER);
+ writel(readl(OST_OSSR) & ~OST_OSSR_M0, OST_OSSR);
+ c->event_handler(c);
+
+ return IRQ_HANDLED;
+}
+
+static int
+puv3_osmr0_set_next_event(unsigned long delta, struct clock_event_device *c)
+{
+ unsigned long next, oscr;
+
+ writel(readl(OST_OIER) | OST_OIER_E0, OST_OIER);
+ next = readl(OST_OSCR) + delta;
+ writel(next, OST_OSMR0);
+ oscr = readl(OST_OSCR);
+
+ return (signed)(next - oscr) <= MIN_OSCR_DELTA ? -ETIME : 0;
+}
+
+static void
+puv3_osmr0_set_mode(enum clock_event_mode mode, struct clock_event_device *c)
+{
+ switch (mode) {
+ case CLOCK_EVT_MODE_ONESHOT:
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ writel(readl(OST_OIER) & ~OST_OIER_E0, OST_OIER);
+ writel(readl(OST_OSSR) & ~OST_OSSR_M0, OST_OSSR);
+ break;
+
+ case CLOCK_EVT_MODE_RESUME:
+ case CLOCK_EVT_MODE_PERIODIC:
+ break;
+ }
+}
+
+static struct clock_event_device ckevt_puv3_osmr0 = {
+ .name = "osmr0",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 200,
+ .set_next_event = puv3_osmr0_set_next_event,
+ .set_mode = puv3_osmr0_set_mode,
+};
+
+static cycle_t puv3_read_oscr(struct clocksource *cs)
+{
+ return readl(OST_OSCR);
+}
+
+static struct clocksource cksrc_puv3_oscr = {
+ .name = "oscr",
+ .rating = 200,
+ .read = puv3_read_oscr,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static struct irqaction puv3_timer_irq = {
+ .name = "ost0",
+ .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .handler = puv3_ost0_interrupt,
+ .dev_id = &ckevt_puv3_osmr0,
+};
+
+void __init time_init(void)
+{
+ writel(0, OST_OIER); /* disable any timer interrupts */
+ writel(0, OST_OSSR); /* clear status on all timers */
+
+ clockevents_calc_mult_shift(&ckevt_puv3_osmr0, CLOCK_TICK_RATE, 5);
+
+ ckevt_puv3_osmr0.max_delta_ns =
+ clockevent_delta2ns(0x7fffffff, &ckevt_puv3_osmr0);
+ ckevt_puv3_osmr0.min_delta_ns =
+ clockevent_delta2ns(MIN_OSCR_DELTA * 2, &ckevt_puv3_osmr0) + 1;
+ ckevt_puv3_osmr0.cpumask = cpumask_of(0);
+
+ setup_irq(IRQ_TIMER0, &puv3_timer_irq);
+
+ clocksource_register_hz(&cksrc_puv3_oscr, CLOCK_TICK_RATE);
+ clockevents_register_device(&ckevt_puv3_osmr0);
+}
+
+#ifdef CONFIG_PM
+unsigned long osmr[4], oier;
+
+void puv3_timer_suspend(void)
+{
+ osmr[0] = readl(OST_OSMR0);
+ osmr[1] = readl(OST_OSMR1);
+ osmr[2] = readl(OST_OSMR2);
+ osmr[3] = readl(OST_OSMR3);
+ oier = readl(OST_OIER);
+}
+
+void puv3_timer_resume(void)
+{
+ writel(0, OST_OSSR);
+ writel(osmr[0], OST_OSMR0);
+ writel(osmr[1], OST_OSMR1);
+ writel(osmr[2], OST_OSMR2);
+ writel(osmr[3], OST_OSMR3);
+ writel(oier, OST_OIER);
+
+ /*
+ * OSMR0 is the system timer: make sure OSCR is sufficiently behind
+ */
+ writel(readl(OST_OSMR0) - LATCH, OST_OSCR);
+}
+#else
+void puv3_timer_suspend(void) { };
+void puv3_timer_resume(void) { };
+#endif
+
diff --git a/arch/unicore32/kernel/traps.c b/arch/unicore32/kernel/traps.c
new file mode 100644
index 000000000000..25abbb101729
--- /dev/null
+++ b/arch/unicore32/kernel/traps.c
@@ -0,0 +1,333 @@
+/*
+ * linux/arch/unicore32/kernel/traps.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * 'traps.c' handles hardware exceptions after we have saved some state.
+ * Mostly a debugging aid, but will probably kill the offending process.
+ */
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/spinlock.h>
+#include <linux/personality.h>
+#include <linux/kallsyms.h>
+#include <linux/kdebug.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/hardirq.h>
+#include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/atomic.h>
+#include <linux/unistd.h>
+
+#include <asm/cacheflush.h>
+#include <asm/system.h>
+#include <asm/traps.h>
+
+#include "setup.h"
+
+static void dump_mem(const char *, const char *, unsigned long, unsigned long);
+
+void dump_backtrace_entry(unsigned long where,
+ unsigned long from, unsigned long frame)
+{
+#ifdef CONFIG_KALLSYMS
+ printk(KERN_DEFAULT "[<%08lx>] (%pS) from [<%08lx>] (%pS)\n",
+ where, (void *)where, from, (void *)from);
+#else
+ printk(KERN_DEFAULT "Function entered at [<%08lx>] from [<%08lx>]\n",
+ where, from);
+#endif
+}
+
+/*
+ * Stack pointers should always be within the kernels view of
+ * physical memory. If it is not there, then we can't dump
+ * out any information relating to the stack.
+ */
+static int verify_stack(unsigned long sp)
+{
+ if (sp < PAGE_OFFSET ||
+ (sp > (unsigned long)high_memory && high_memory != NULL))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * Dump out the contents of some memory nicely...
+ */
+static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
+ unsigned long top)
+{
+ unsigned long first;
+ mm_segment_t fs;
+ int i;
+
+ /*
+ * We need to switch to kernel mode so that we can use __get_user
+ * to safely read from kernel space. Note that we now dump the
+ * code first, just in case the backtrace kills us.
+ */
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ printk(KERN_DEFAULT "%s%s(0x%08lx to 0x%08lx)\n",
+ lvl, str, bottom, top);
+
+ for (first = bottom & ~31; first < top; first += 32) {
+ unsigned long p;
+ char str[sizeof(" 12345678") * 8 + 1];
+
+ memset(str, ' ', sizeof(str));
+ str[sizeof(str) - 1] = '\0';
+
+ for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
+ if (p >= bottom && p < top) {
+ unsigned long val;
+ if (__get_user(val, (unsigned long *)p) == 0)
+ sprintf(str + i * 9, " %08lx", val);
+ else
+ sprintf(str + i * 9, " ????????");
+ }
+ }
+ printk(KERN_DEFAULT "%s%04lx:%s\n", lvl, first & 0xffff, str);
+ }
+
+ set_fs(fs);
+}
+
+static void dump_instr(const char *lvl, struct pt_regs *regs)
+{
+ unsigned long addr = instruction_pointer(regs);
+ const int width = 8;
+ mm_segment_t fs;
+ char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
+ int i;
+
+ /*
+ * We need to switch to kernel mode so that we can use __get_user
+ * to safely read from kernel space. Note that we now dump the
+ * code first, just in case the backtrace kills us.
+ */
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ for (i = -4; i < 1; i++) {
+ unsigned int val, bad;
+
+ bad = __get_user(val, &((u32 *)addr)[i]);
+
+ if (!bad)
+ p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
+ width, val);
+ else {
+ p += sprintf(p, "bad PC value");
+ break;
+ }
+ }
+ printk(KERN_DEFAULT "%sCode: %s\n", lvl, str);
+
+ set_fs(fs);
+}
+
+static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
+{
+ unsigned int fp, mode;
+ int ok = 1;
+
+ printk(KERN_DEFAULT "Backtrace: ");
+
+ if (!tsk)
+ tsk = current;
+
+ if (regs) {
+ fp = regs->UCreg_fp;
+ mode = processor_mode(regs);
+ } else if (tsk != current) {
+ fp = thread_saved_fp(tsk);
+ mode = 0x10;
+ } else {
+ asm("mov %0, fp" : "=r" (fp) : : "cc");
+ mode = 0x10;
+ }
+
+ if (!fp) {
+ printk("no frame pointer");
+ ok = 0;
+ } else if (verify_stack(fp)) {
+ printk("invalid frame pointer 0x%08x", fp);
+ ok = 0;
+ } else if (fp < (unsigned long)end_of_stack(tsk))
+ printk("frame pointer underflow");
+ printk("\n");
+
+ if (ok)
+ c_backtrace(fp, mode);
+}
+
+void dump_stack(void)
+{
+ dump_backtrace(NULL, NULL);
+}
+EXPORT_SYMBOL(dump_stack);
+
+void show_stack(struct task_struct *tsk, unsigned long *sp)
+{
+ dump_backtrace(NULL, tsk);
+ barrier();
+}
+
+static int __die(const char *str, int err, struct thread_info *thread,
+ struct pt_regs *regs)
+{
+ struct task_struct *tsk = thread->task;
+ static int die_counter;
+ int ret;
+
+ printk(KERN_EMERG "Internal error: %s: %x [#%d]\n",
+ str, err, ++die_counter);
+ sysfs_printk_last_file();
+
+ /* trap and error numbers are mostly meaningless on UniCore */
+ ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, \
+ SIGSEGV);
+ if (ret == NOTIFY_STOP)
+ return ret;
+
+ print_modules();
+ __show_regs(regs);
+ printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n",
+ TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1);
+
+ if (!user_mode(regs) || in_interrupt()) {
+ dump_mem(KERN_EMERG, "Stack: ", regs->UCreg_sp,
+ THREAD_SIZE + (unsigned long)task_stack_page(tsk));
+ dump_backtrace(regs, tsk);
+ dump_instr(KERN_EMERG, regs);
+ }
+
+ return ret;
+}
+
+DEFINE_SPINLOCK(die_lock);
+
+/*
+ * This function is protected against re-entrancy.
+ */
+void die(const char *str, struct pt_regs *regs, int err)
+{
+ struct thread_info *thread = current_thread_info();
+ int ret;
+
+ oops_enter();
+
+ spin_lock_irq(&die_lock);
+ console_verbose();
+ bust_spinlocks(1);
+ ret = __die(str, err, thread, regs);
+
+ bust_spinlocks(0);
+ add_taint(TAINT_DIE);
+ spin_unlock_irq(&die_lock);
+ oops_exit();
+
+ if (in_interrupt())
+ panic("Fatal exception in interrupt");
+ if (panic_on_oops)
+ panic("Fatal exception");
+ if (ret != NOTIFY_STOP)
+ do_exit(SIGSEGV);
+}
+
+void uc32_notify_die(const char *str, struct pt_regs *regs,
+ struct siginfo *info, unsigned long err, unsigned long trap)
+{
+ if (user_mode(regs)) {
+ current->thread.error_code = err;
+ current->thread.trap_no = trap;
+
+ force_sig_info(info->si_signo, info, current);
+ } else
+ die(str, regs, err);
+}
+
+/*
+ * bad_mode handles the impossible case in the vectors. If you see one of
+ * these, then it's extremely serious, and could mean you have buggy hardware.
+ * It never returns, and never tries to sync. We hope that we can at least
+ * dump out some state information...
+ */
+asmlinkage void bad_mode(struct pt_regs *regs, unsigned int reason)
+{
+ console_verbose();
+
+ printk(KERN_CRIT "Bad mode detected with reason 0x%x\n", reason);
+
+ die("Oops - bad mode", regs, 0);
+ local_irq_disable();
+ panic("bad mode");
+}
+
+void __pte_error(const char *file, int line, unsigned long val)
+{
+ printk(KERN_DEFAULT "%s:%d: bad pte %08lx.\n", file, line, val);
+}
+
+void __pmd_error(const char *file, int line, unsigned long val)
+{
+ printk(KERN_DEFAULT "%s:%d: bad pmd %08lx.\n", file, line, val);
+}
+
+void __pgd_error(const char *file, int line, unsigned long val)
+{
+ printk(KERN_DEFAULT "%s:%d: bad pgd %08lx.\n", file, line, val);
+}
+
+asmlinkage void __div0(void)
+{
+ printk(KERN_DEFAULT "Division by zero in kernel.\n");
+ dump_stack();
+}
+EXPORT_SYMBOL(__div0);
+
+void abort(void)
+{
+ BUG();
+
+ /* if that doesn't kill us, halt */
+ panic("Oops failed to kill thread");
+}
+EXPORT_SYMBOL(abort);
+
+void __init trap_init(void)
+{
+ return;
+}
+
+void __init early_trap_init(void)
+{
+ unsigned long vectors = VECTORS_BASE;
+
+ /*
+ * Copy the vectors, stubs (in entry-unicore.S)
+ * into the vector page, mapped at 0xffff0000, and ensure these
+ * are visible to the instruction stream.
+ */
+ memcpy((void *)vectors,
+ __vectors_start,
+ __vectors_end - __vectors_start);
+ memcpy((void *)vectors + 0x200,
+ __stubs_start,
+ __stubs_end - __stubs_start);
+
+ early_signal_init();
+
+ flush_icache_range(vectors, vectors + PAGE_SIZE);
+}
diff --git a/arch/unicore32/kernel/vmlinux.lds.S b/arch/unicore32/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..0b4eb89729e7
--- /dev/null
+++ b/arch/unicore32/kernel/vmlinux.lds.S
@@ -0,0 +1,61 @@
+/*
+ * linux/arch/unicore32/kernel/vmlinux.lds.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/thread_info.h>
+#include <asm/memory.h>
+#include <asm/page.h>
+
+OUTPUT_ARCH(unicore32)
+ENTRY(stext)
+
+jiffies = jiffies_64;
+
+SECTIONS
+{
+ . = PAGE_OFFSET + KERNEL_IMAGE_START;
+
+ _text = .;
+ __init_begin = .;
+ HEAD_TEXT_SECTION
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ INIT_DATA_SECTION(16)
+ PERCPU(PAGE_SIZE)
+ __init_end = .;
+
+ _stext = .;
+ .text : { /* Real text segment */
+ TEXT_TEXT
+ SCHED_TEXT
+ LOCK_TEXT
+
+ *(.fixup)
+ *(.gnu.warning)
+ }
+ _etext = .;
+
+ _sdata = .;
+ RO_DATA_SECTION(PAGE_SIZE)
+ RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE)
+ _edata = .;
+
+ EXCEPTION_TABLE(32)
+ NOTES
+
+ BSS_SECTION(0, 0, 0)
+ _end = .;
+
+ STABS_DEBUG
+ DWARF_DEBUG
+
+ DISCARDS /* Exit code and data */
+}
diff --git a/arch/unicore32/lib/Makefile b/arch/unicore32/lib/Makefile
new file mode 100644
index 000000000000..87229a558b36
--- /dev/null
+++ b/arch/unicore32/lib/Makefile
@@ -0,0 +1,27 @@
+#
+# linux/arch/unicore32/lib/Makefile
+#
+# Copyright (C) 2001-2010 GUAN Xue-tao
+#
+
+lib-y := backtrace.o delay.o findbit.o
+lib-y += strncpy_from_user.o strnlen_user.o
+lib-y += clear_user.o copy_page.o
+lib-y += copy_from_user.o copy_to_user.o
+
+GNU_LIBC_A := $(shell $(CC) $(KBUILD_CFLAGS) -print-file-name=libc.a)
+GNU_LIBC_A_OBJS := memchr.o memcpy.o memmove.o memset.o
+GNU_LIBC_A_OBJS += strchr.o strrchr.o
+GNU_LIBC_A_OBJS += rawmemchr.o # needed by strrchr.o
+
+GNU_LIBGCC_A := $(shell $(CC) $(KBUILD_CFLAGS) -print-file-name=libgcc.a)
+GNU_LIBGCC_A_OBJS := _ashldi3.o _ashrdi3.o _lshrdi3.o
+GNU_LIBGCC_A_OBJS += _divsi3.o _modsi3.o _ucmpdi2.o _umodsi3.o _udivsi3.o
+
+lib-y += $(GNU_LIBC_A_OBJS) $(GNU_LIBGCC_A_OBJS)
+
+$(addprefix $(obj)/, $(GNU_LIBC_A_OBJS)):
+ $(Q)$(AR) p $(GNU_LIBC_A) $(notdir $@) > $@
+
+$(addprefix $(obj)/, $(GNU_LIBGCC_A_OBJS)):
+ $(Q)$(AR) p $(GNU_LIBGCC_A) $(notdir $@) > $@
diff --git a/arch/unicore32/lib/backtrace.S b/arch/unicore32/lib/backtrace.S
new file mode 100644
index 000000000000..ef01d77f2f65
--- /dev/null
+++ b/arch/unicore32/lib/backtrace.S
@@ -0,0 +1,163 @@
+/*
+ * linux/arch/unicore32/lib/backtrace.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+ .text
+
+@ fp is 0 or stack frame
+
+#define frame v4
+#define sv_fp v5
+#define sv_pc v6
+#define offset v8
+
+ENTRY(__backtrace)
+ mov r0, fp
+
+ENTRY(c_backtrace)
+
+#if !defined(CONFIG_FRAME_POINTER) || !defined(CONFIG_PRINTK)
+ mov pc, lr
+ENDPROC(__backtrace)
+ENDPROC(c_backtrace)
+#else
+ stm.w (v4 - v8, lr), [sp-] @ Save an extra register
+ @ so we have a location...
+ mov.a frame, r0 @ if frame pointer is zero
+ beq no_frame @ we have no stack frames
+
+1: stm.w (pc), [sp-] @ calculate offset of PC stored
+ ldw.w r0, [sp]+, #4 @ by stmfd for this CPU
+ adr r1, 1b
+ sub offset, r0, r1
+
+/*
+ * Stack frame layout:
+ * optionally saved caller registers (r4 - r10)
+ * saved fp
+ * saved sp
+ * saved lr
+ * frame => saved pc
+ * optionally saved arguments (r0 - r3)
+ * saved sp => <next word>
+ *
+ * Functions start with the following code sequence:
+ * mov ip, sp
+ * stm.w (r0 - r3), [sp-] (optional)
+ * corrected pc => stm.w sp, (..., fp, ip, lr, pc)
+ */
+for_each_frame:
+
+1001: ldw sv_pc, [frame+], #0 @ get saved pc
+1002: ldw sv_fp, [frame+], #-12 @ get saved fp
+
+ sub sv_pc, sv_pc, offset @ Correct PC for prefetching
+
+1003: ldw r2, [sv_pc+], #-4 @ if stmfd sp, {args} exists,
+ ldw r3, .Ldsi+4 @ adjust saved 'pc' back one
+ cxor.a r3, r2 >> #14 @ instruction
+ beq 201f
+ sub r0, sv_pc, #4 @ allow for mov
+ b 202f
+201:
+ sub r0, sv_pc, #8 @ allow for mov + stmia
+202:
+ ldw r1, [frame+], #-4 @ get saved lr
+ mov r2, frame
+ b.l dump_backtrace_entry
+
+ ldw r1, [sv_pc+], #-4 @ if stmfd sp, {args} exists,
+ ldw r3, .Ldsi+4
+ cxor.a r3, r1 >> #14
+ bne 1004f
+ ldw r0, [frame+], #-8 @ get sp
+ sub r0, r0, #4 @ point at the last arg
+ b.l .Ldumpstm @ dump saved registers
+
+1004: ldw r1, [sv_pc+], #0 @ if stmfd {, fp, ip, lr, pc}
+ ldw r3, .Ldsi @ instruction exists,
+ cxor.a r3, r1 >> #14
+ bne 201f
+ sub r0, frame, #16
+ b.l .Ldumpstm @ dump saved registers
+201:
+ cxor.a sv_fp, #0 @ zero saved fp means
+ beq no_frame @ no further frames
+
+ csub.a sv_fp, frame @ next frame must be
+ mov frame, sv_fp @ above the current frame
+ bua for_each_frame
+
+1006: adr r0, .Lbad
+ mov r1, frame
+ b.l printk
+no_frame: ldm.w (v4 - v8, pc), [sp]+
+ENDPROC(__backtrace)
+ENDPROC(c_backtrace)
+
+ .pushsection __ex_table,"a"
+ .align 3
+ .long 1001b, 1006b
+ .long 1002b, 1006b
+ .long 1003b, 1006b
+ .long 1004b, 1006b
+ .popsection
+
+#define instr v4
+#define reg v5
+#define stack v6
+
+.Ldumpstm: stm.w (instr, reg, stack, v7, lr), [sp-]
+ mov stack, r0
+ mov instr, r1
+ mov reg, #14
+ mov v7, #0
+1: mov r3, #1
+ csub.a reg, #8
+ bne 201f
+ sub reg, reg, #3
+201:
+ cand.a instr, r3 << reg
+ beq 2f
+ add v7, v7, #1
+ cxor.a v7, #6
+ cmoveq v7, #1
+ cmoveq r1, #'\n'
+ cmovne r1, #' '
+ ldw.w r3, [stack]+, #-4
+ mov r2, reg
+ csub.a r2, #8
+ bsl 201f
+ sub r2, r2, #3
+201:
+ cand.a instr, #0x40 @ if H is 1, high 16 regs
+ beq 201f
+ add r2, r2, #0x10 @ so r2 need add 16
+201:
+ adr r0, .Lfp
+ b.l printk
+2: sub.a reg, reg, #1
+ bns 1b
+ cxor.a v7, #0
+ beq 201f
+ adr r0, .Lcr
+ b.l printk
+201: ldm.w (instr, reg, stack, v7, pc), [sp]+
+
+.Lfp: .asciz "%cr%d:%08x"
+.Lcr: .asciz "\n"
+.Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n"
+ .align
+.Ldsi: .word 0x92eec000 >> 14 @ stm.w sp, (... fp, ip, lr, pc)
+ .word 0x92e10000 >> 14 @ stm.w sp, ()
+
+#endif
diff --git a/arch/unicore32/lib/clear_user.S b/arch/unicore32/lib/clear_user.S
new file mode 100644
index 000000000000..20047f7224fd
--- /dev/null
+++ b/arch/unicore32/lib/clear_user.S
@@ -0,0 +1,57 @@
+/*
+ * linux/arch/unicore32/lib/clear_user.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ .text
+
+/* Prototype: int __clear_user(void *addr, size_t sz)
+ * Purpose : clear some user memory
+ * Params : addr - user memory address to clear
+ * : sz - number of bytes to clear
+ * Returns : number of bytes NOT cleared
+ */
+WEAK(__clear_user)
+ stm.w (lr), [sp-]
+ stm.w (r1), [sp-]
+ mov r2, #0
+ csub.a r1, #4
+ bsl 2f
+ and.a ip, r0, #3
+ beq 1f
+ csub.a ip, #2
+ strusr r2, r0, 1
+ strusr r2, r0, 1, el
+ strusr r2, r0, 1, sl
+ rsub ip, ip, #4
+ sub r1, r1, ip @ 7 6 5 4 3 2 1
+1: sub.a r1, r1, #8 @ -1 -2 -3 -4 -5 -6 -7
+ strusr r2, r0, 4, ns, rept=2
+ bns 1b
+ add.a r1, r1, #4 @ 3 2 1 0 -1 -2 -3
+ strusr r2, r0, 4, ns
+2: cand.a r1, #2 @ 1x 1x 0x 0x 1x 1x 0x
+ strusr r2, r0, 1, ne, rept=2
+ cand.a r1, #1 @ x1 x0 x1 x0 x1 x0 x1
+ beq 3f
+USER( stb.u r2, [r0])
+3: mov r0, #0
+ ldm.w (r1), [sp]+
+ ldm.w (pc), [sp]+
+ENDPROC(__clear_user)
+
+ .pushsection .fixup,"ax"
+ .align 0
+9001: ldm.w (r0), [sp]+
+ ldm.w (pc), [sp]+
+ .popsection
+
diff --git a/arch/unicore32/lib/copy_from_user.S b/arch/unicore32/lib/copy_from_user.S
new file mode 100644
index 000000000000..ab0767ea5dbd
--- /dev/null
+++ b/arch/unicore32/lib/copy_from_user.S
@@ -0,0 +1,108 @@
+/*
+ * linux/arch/unicore32/lib/copy_from_user.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/*
+ * Prototype:
+ *
+ * size_t __copy_from_user(void *to, const void *from, size_t n)
+ *
+ * Purpose:
+ *
+ * copy a block to kernel memory from user memory
+ *
+ * Params:
+ *
+ * to = kernel memory
+ * from = user memory
+ * n = number of bytes to copy
+ *
+ * Return value:
+ *
+ * Number of bytes NOT copied.
+ */
+
+ .macro ldr1w ptr reg abort
+ ldrusr \reg, \ptr, 4, abort=\abort
+ .endm
+
+ .macro ldr4w ptr reg1 reg2 reg3 reg4 abort
+100: ldm.w (\reg1, \reg2, \reg3, \reg4), [\ptr]+
+ .pushsection __ex_table, "a"
+ .align 3
+ .long 100b, \abort
+ .popsection
+ .endm
+
+ .macro ldr8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
+100: ldm.w (\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8), [\ptr]+
+ .pushsection __ex_table, "a"
+ .align 3
+ .long 100b, \abort
+ .popsection
+ .endm
+
+ .macro ldr1b ptr reg cond=al abort
+ ldrusr \reg, \ptr, 1, \cond, abort=\abort
+ .endm
+
+ .macro str1w ptr reg abort
+ stw.w \reg, [\ptr]+, #4
+ .endm
+
+ .macro str8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
+ stm.w (\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8), [\ptr]+
+ .endm
+
+ .macro str1b ptr reg cond=al abort
+ .ifnc \cond, al
+ b\cond 201f
+ b 202f
+ .endif
+201: stb.w \reg, [\ptr]+, #1
+202:
+ .endm
+
+ .macro enter
+ mov r3, #0
+ stm.w (r0, r2, r3), [sp-]
+ .endm
+
+ .macro exit
+ add sp, sp, #8
+ ldm.w (r0), [sp]+
+ mov pc, lr
+ .endm
+
+ .text
+
+ENTRY(__copy_from_user)
+
+#include "copy_template.S"
+
+ENDPROC(__copy_from_user)
+
+ .pushsection .fixup,"ax"
+ .align 0
+ copy_abort_preamble
+ ldm.w (r1, r2), [sp]+
+ sub r3, r0, r1
+ rsub r2, r3, r2
+ stw r2, [sp]
+ mov r1, #0
+ b.l memset
+ ldw.w r0, [sp]+, #4
+ copy_abort_end
+ .popsection
+
diff --git a/arch/unicore32/lib/copy_page.S b/arch/unicore32/lib/copy_page.S
new file mode 100644
index 000000000000..3a448d755ade
--- /dev/null
+++ b/arch/unicore32/lib/copy_page.S
@@ -0,0 +1,39 @@
+/*
+ * linux/arch/unicore32/lib/copy_page.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * ASM optimised string functions
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <generated/asm-offsets.h>
+#include <asm/cache.h>
+
+#define COPY_COUNT (PAGE_SZ/256)
+
+ .text
+ .align 5
+/*
+ * UniCore optimised copy_page routine
+ */
+ENTRY(copy_page)
+ stm.w (r17 - r19, lr), [sp-]
+ mov r17, r0
+ mov r18, r1
+ mov r19, #COPY_COUNT
+1:
+ .rept 4
+ ldm.w (r0 - r15), [r18]+
+ stm.w (r0 - r15), [r17]+
+ .endr
+ sub.a r19, r19, #1
+ bne 1b
+ ldm.w (r17 - r19, pc), [sp]+
+ENDPROC(copy_page)
diff --git a/arch/unicore32/lib/copy_template.S b/arch/unicore32/lib/copy_template.S
new file mode 100644
index 000000000000..524287fc0120
--- /dev/null
+++ b/arch/unicore32/lib/copy_template.S
@@ -0,0 +1,214 @@
+/*
+ * linux/arch/unicore32/lib/copy_template.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Theory of operation
+ * -------------------
+ *
+ * This file provides the core code for a forward memory copy used in
+ * the implementation of memcopy(), copy_to_user() and copy_from_user().
+ *
+ * The including file must define the following accessor macros
+ * according to the need of the given function:
+ *
+ * ldr1w ptr reg abort
+ *
+ * This loads one word from 'ptr', stores it in 'reg' and increments
+ * 'ptr' to the next word. The 'abort' argument is used for fixup tables.
+ *
+ * ldr4w ptr reg1 reg2 reg3 reg4 abort
+ * ldr8w ptr, reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
+ *
+ * This loads four or eight words starting from 'ptr', stores them
+ * in provided registers and increments 'ptr' past those words.
+ * The'abort' argument is used for fixup tables.
+ *
+ * ldr1b ptr reg cond abort
+ *
+ * Similar to ldr1w, but it loads a byte and increments 'ptr' one byte.
+ * It also must apply the condition code if provided, otherwise the
+ * "al" condition is assumed by default.
+ *
+ * str1w ptr reg abort
+ * str8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
+ * str1b ptr reg cond abort
+ *
+ * Same as their ldr* counterparts, but data is stored to 'ptr' location
+ * rather than being loaded.
+ *
+ * enter
+ *
+ * Preserve the provided registers on the stack plus any additional
+ * data as needed by the implementation including this code. Called
+ * upon code entry.
+ *
+ * exit
+ *
+ * Restore registers with the values previously saved with the
+ * 'preserv' macro. Called upon code termination.
+ */
+
+
+ enter
+
+ sub.a r2, r2, #4
+ bsl 8f
+ and.a ip, r0, #3
+ bne 9f
+ and.a ip, r1, #3
+ bne 10f
+
+1: sub.a r2, r2, #(28)
+ stm.w (r5 - r8), [sp-]
+ bsl 5f
+
+3:
+4: ldr8w r1, r3, r4, r5, r6, r7, r8, r10, r11, abort=20f
+ sub.a r2, r2, #32
+ str8w r0, r3, r4, r5, r6, r7, r8, r10, r11, abort=20f
+ beg 3b
+
+5: and.a ip, r2, #28
+ rsub ip, ip, #32
+ beq 7f
+ add pc, pc, ip @ C is always clear here
+ nop
+
+ ldr1w r1, r3, abort=20f
+ ldr1w r1, r4, abort=20f
+ ldr1w r1, r5, abort=20f
+ ldr1w r1, r6, abort=20f
+ ldr1w r1, r7, abort=20f
+ ldr1w r1, r8, abort=20f
+ ldr1w r1, r11, abort=20f
+
+ add pc, pc, ip
+ nop
+
+ str1w r0, r3, abort=20f
+ str1w r0, r4, abort=20f
+ str1w r0, r5, abort=20f
+ str1w r0, r6, abort=20f
+ str1w r0, r7, abort=20f
+ str1w r0, r8, abort=20f
+ str1w r0, r11, abort=20f
+
+7: ldm.w (r5 - r8), [sp]+
+
+8: mov.a r2, r2 << #31
+ ldr1b r1, r3, ne, abort=21f
+ ldr1b r1, r4, ea, abort=21f
+ ldr1b r1, r10, ea, abort=21f
+ str1b r0, r3, ne, abort=21f
+ str1b r0, r4, ea, abort=21f
+ str1b r0, r10, ea, abort=21f
+
+ exit
+
+9: rsub ip, ip, #4
+ csub.a ip, #2
+ ldr1b r1, r3, sg, abort=21f
+ ldr1b r1, r4, eg, abort=21f
+ ldr1b r1, r11, abort=21f
+ str1b r0, r3, sg, abort=21f
+ str1b r0, r4, eg, abort=21f
+ sub.a r2, r2, ip
+ str1b r0, r11, abort=21f
+ bsl 8b
+ and.a ip, r1, #3
+ beq 1b
+
+10: andn r1, r1, #3
+ csub.a ip, #2
+ ldr1w r1, r11, abort=21f
+ beq 17f
+ bsg 18f
+
+
+ .macro forward_copy_shift a b
+
+ sub.a r2, r2, #28
+ bsl 14f
+
+11: stm.w (r5 - r9), [sp-]
+
+12:
+ ldr4w r1, r4, r5, r6, r7, abort=19f
+ mov r3, r11 pull #\a
+ sub.a r2, r2, #32
+ ldr4w r1, r8, r9, r10, r11, abort=19f
+ or r3, r3, r4 push #\b
+ mov r4, r4 pull #\a
+ or r4, r4, r5 push #\b
+ mov r5, r5 pull #\a
+ or r5, r5, r6 push #\b
+ mov r6, r6 pull #\a
+ or r6, r6, r7 push #\b
+ mov r7, r7 pull #\a
+ or r7, r7, r8 push #\b
+ mov r8, r8 pull #\a
+ or r8, r8, r9 push #\b
+ mov r9, r9 pull #\a
+ or r9, r9, r10 push #\b
+ mov r10, r10 pull #\a
+ or r10, r10, r11 push #\b
+ str8w r0, r3, r4, r5, r6, r7, r8, r9, r10, , abort=19f
+ beg 12b
+
+ ldm.w (r5 - r9), [sp]+
+
+14: and.a ip, r2, #28
+ beq 16f
+
+15: mov r3, r11 pull #\a
+ ldr1w r1, r11, abort=21f
+ sub.a ip, ip, #4
+ or r3, r3, r11 push #\b
+ str1w r0, r3, abort=21f
+ bsg 15b
+
+16: sub r1, r1, #(\b / 8)
+ b 8b
+
+ .endm
+
+
+ forward_copy_shift a=8 b=24
+
+17: forward_copy_shift a=16 b=16
+
+18: forward_copy_shift a=24 b=8
+
+
+/*
+ * Abort preamble and completion macros.
+ * If a fixup handler is required then those macros must surround it.
+ * It is assumed that the fixup code will handle the private part of
+ * the exit macro.
+ */
+
+ .macro copy_abort_preamble
+19: ldm.w (r5 - r9), [sp]+
+ b 21f
+299: .word 0 @ store lr
+ @ to avoid function call in fixup
+20: ldm.w (r5 - r8), [sp]+
+21:
+ adr r1, 299b
+ stw lr, [r1]
+ .endm
+
+ .macro copy_abort_end
+ adr lr, 299b
+ ldw pc, [lr]
+ .endm
+
diff --git a/arch/unicore32/lib/copy_to_user.S b/arch/unicore32/lib/copy_to_user.S
new file mode 100644
index 000000000000..6e22151c840d
--- /dev/null
+++ b/arch/unicore32/lib/copy_to_user.S
@@ -0,0 +1,96 @@
+/*
+ * linux/arch/unicore32/lib/copy_to_user.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/*
+ * Prototype:
+ *
+ * size_t __copy_to_user(void *to, const void *from, size_t n)
+ *
+ * Purpose:
+ *
+ * copy a block to user memory from kernel memory
+ *
+ * Params:
+ *
+ * to = user memory
+ * from = kernel memory
+ * n = number of bytes to copy
+ *
+ * Return value:
+ *
+ * Number of bytes NOT copied.
+ */
+
+ .macro ldr1w ptr reg abort
+ ldw.w \reg, [\ptr]+, #4
+ .endm
+
+ .macro ldr4w ptr reg1 reg2 reg3 reg4 abort
+ ldm.w (\reg1, \reg2, \reg3, \reg4), [\ptr]+
+ .endm
+
+ .macro ldr8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
+ ldm.w (\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8), [\ptr]+
+ .endm
+
+ .macro ldr1b ptr reg cond=al abort
+ notcond \cond, .+8
+ ldb.w \reg, [\ptr]+, #1
+ .endm
+
+ .macro str1w ptr reg abort
+ strusr \reg, \ptr, 4, abort=\abort
+ .endm
+
+ .macro str8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
+100: stm.w (\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8), [\ptr]+
+
+ .pushsection __ex_table, "a"
+ .long 100b, \abort
+ .popsection
+ .endm
+
+ .macro str1b ptr reg cond=al abort
+ strusr \reg, \ptr, 1, \cond, abort=\abort
+ .endm
+
+ .macro enter
+ mov r3, #0
+ stm.w (r0, r2, r3), [sp-]
+ .endm
+
+ .macro exit
+ add sp, sp, #8
+ ldm.w (r0), [sp]+
+ mov pc, lr
+ .endm
+
+ .text
+
+WEAK(__copy_to_user)
+
+#include "copy_template.S"
+
+ENDPROC(__copy_to_user)
+
+ .pushsection .fixup,"ax"
+ .align 0
+ copy_abort_preamble
+ ldm.w (r1, r2, r3), [sp]+
+ sub r0, r0, r1
+ rsub r0, r0, r2
+ copy_abort_end
+ .popsection
+
diff --git a/arch/unicore32/lib/delay.S b/arch/unicore32/lib/delay.S
new file mode 100644
index 000000000000..24664c009e78
--- /dev/null
+++ b/arch/unicore32/lib/delay.S
@@ -0,0 +1,51 @@
+/*
+ * linux/arch/unicore32/lib/delay.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/param.h>
+ .text
+
+.LC0: .word loops_per_jiffy
+.LC1: .word (2199023*HZ)>>11
+
+/*
+ * r0 <= 2000
+ * lpj <= 0x01ffffff (max. 3355 bogomips)
+ * HZ <= 1000
+ */
+
+ENTRY(__udelay)
+ ldw r2, .LC1
+ mul r0, r2, r0
+ENTRY(__const_udelay) @ 0 <= r0 <= 0x7fffff06
+ ldw r2, .LC0
+ ldw r2, [r2] @ max = 0x01ffffff
+ mov r0, r0 >> #14 @ max = 0x0001ffff
+ mov r2, r2 >> #10 @ max = 0x00007fff
+ mul r0, r2, r0 @ max = 2^32-1
+ mov.a r0, r0 >> #6
+ cmoveq pc, lr
+
+/*
+ * loops = r0 * HZ * loops_per_jiffy / 1000000
+ *
+ * Oh, if only we had a cycle counter...
+ */
+
+@ Delay routine
+ENTRY(__delay)
+ sub.a r0, r0, #2
+ bua __delay
+ mov pc, lr
+ENDPROC(__udelay)
+ENDPROC(__const_udelay)
+ENDPROC(__delay)
diff --git a/arch/unicore32/lib/findbit.S b/arch/unicore32/lib/findbit.S
new file mode 100644
index 000000000000..c360ce905d8b
--- /dev/null
+++ b/arch/unicore32/lib/findbit.S
@@ -0,0 +1,98 @@
+/*
+ * linux/arch/unicore32/lib/findbit.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+ .text
+
+/*
+ * Purpose : Find a 'zero' bit
+ * Prototype: int find_first_zero_bit(void *addr, unsigned int maxbit);
+ */
+__uc32_find_first_zero_bit:
+ cxor.a r1, #0
+ beq 3f
+ mov r2, #0
+1: ldb r3, [r0+], r2 >> #3
+ xor.a r3, r3, #0xff @ invert bits
+ bne .L_found @ any now set - found zero bit
+ add r2, r2, #8 @ next bit pointer
+2: csub.a r2, r1 @ any more?
+ bub 1b
+3: mov r0, r1 @ no free bits
+ mov pc, lr
+
+/*
+ * Purpose : Find next 'zero' bit
+ * Prototype: int find_next_zero_bit
+ * (void *addr, unsigned int maxbit, int offset)
+ */
+ENTRY(__uc32_find_next_zero_bit)
+ cxor.a r1, #0
+ beq 3b
+ and.a ip, r2, #7
+ beq 1b @ If new byte, goto old routine
+ ldb r3, [r0+], r2 >> #3
+ xor r3, r3, #0xff @ now looking for a 1 bit
+ mov.a r3, r3 >> ip @ shift off unused bits
+ bne .L_found
+ or r2, r2, #7 @ if zero, then no bits here
+ add r2, r2, #1 @ align bit pointer
+ b 2b @ loop for next bit
+ENDPROC(__uc32_find_next_zero_bit)
+
+/*
+ * Purpose : Find a 'one' bit
+ * Prototype: int find_first_bit
+ * (const unsigned long *addr, unsigned int maxbit);
+ */
+__uc32_find_first_bit:
+ cxor.a r1, #0
+ beq 3f
+ mov r2, #0
+1: ldb r3, [r0+], r2 >> #3
+ mov.a r3, r3
+ bne .L_found @ any now set - found zero bit
+ add r2, r2, #8 @ next bit pointer
+2: csub.a r2, r1 @ any more?
+ bub 1b
+3: mov r0, r1 @ no free bits
+ mov pc, lr
+
+/*
+ * Purpose : Find next 'one' bit
+ * Prototype: int find_next_zero_bit
+ * (void *addr, unsigned int maxbit, int offset)
+ */
+ENTRY(__uc32_find_next_bit)
+ cxor.a r1, #0
+ beq 3b
+ and.a ip, r2, #7
+ beq 1b @ If new byte, goto old routine
+ ldb r3, [r0+], r2 >> #3
+ mov.a r3, r3 >> ip @ shift off unused bits
+ bne .L_found
+ or r2, r2, #7 @ if zero, then no bits here
+ add r2, r2, #1 @ align bit pointer
+ b 2b @ loop for next bit
+ENDPROC(__uc32_find_next_bit)
+
+/*
+ * One or more bits in the LSB of r3 are assumed to be set.
+ */
+.L_found:
+ rsub r1, r3, #0
+ and r3, r3, r1
+ cntlz r3, r3
+ rsub r3, r3, #31
+ add r0, r2, r3
+ mov pc, lr
+
diff --git a/arch/unicore32/lib/strncpy_from_user.S b/arch/unicore32/lib/strncpy_from_user.S
new file mode 100644
index 000000000000..ff6c304d5c7e
--- /dev/null
+++ b/arch/unicore32/lib/strncpy_from_user.S
@@ -0,0 +1,45 @@
+/*
+ * linux/arch/unicore32/lib/strncpy_from_user.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/errno.h>
+
+ .text
+ .align 5
+
+/*
+ * Copy a string from user space to kernel space.
+ * r0 = dst, r1 = src, r2 = byte length
+ * returns the number of characters copied (strlen of copied string),
+ * -EFAULT on exception, or "len" if we fill the whole buffer
+ */
+ENTRY(__strncpy_from_user)
+ mov ip, r1
+1: sub.a r2, r2, #1
+ ldrusr r3, r1, 1, ns
+ bfs 2f
+ stb.w r3, [r0]+, #1
+ cxor.a r3, #0
+ bne 1b
+ sub r1, r1, #1 @ take NUL character out of count
+2: sub r0, r1, ip
+ mov pc, lr
+ENDPROC(__strncpy_from_user)
+
+ .pushsection .fixup,"ax"
+ .align 0
+9001: mov r3, #0
+ stb r3, [r0+], #0 @ null terminate
+ mov r0, #-EFAULT
+ mov pc, lr
+ .popsection
+
diff --git a/arch/unicore32/lib/strnlen_user.S b/arch/unicore32/lib/strnlen_user.S
new file mode 100644
index 000000000000..75863030f21d
--- /dev/null
+++ b/arch/unicore32/lib/strnlen_user.S
@@ -0,0 +1,42 @@
+/*
+ * linux/arch/unicore32/lib/strnlen_user.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/errno.h>
+
+ .text
+ .align 5
+
+/* Prototype: unsigned long __strnlen_user(const char *str, long n)
+ * Purpose : get length of a string in user memory
+ * Params : str - address of string in user memory
+ * Returns : length of string *including terminator*
+ * or zero on exception, or n + 1 if too long
+ */
+ENTRY(__strnlen_user)
+ mov r2, r0
+1:
+ ldrusr r3, r0, 1
+ cxor.a r3, #0
+ beq 2f
+ sub.a r1, r1, #1
+ bne 1b
+ add r0, r0, #1
+2: sub r0, r0, r2
+ mov pc, lr
+ENDPROC(__strnlen_user)
+
+ .pushsection .fixup,"ax"
+ .align 0
+9001: mov r0, #0
+ mov pc, lr
+ .popsection
diff --git a/arch/unicore32/mm/Kconfig b/arch/unicore32/mm/Kconfig
new file mode 100644
index 000000000000..5f77fb3c63be
--- /dev/null
+++ b/arch/unicore32/mm/Kconfig
@@ -0,0 +1,50 @@
+comment "Processor Type"
+
+# Select CPU types depending on the architecture selected. This selects
+# which CPUs we support in the kernel image, and the compiler instruction
+# optimiser behaviour.
+
+config CPU_UCV2
+ def_bool y
+
+comment "Processor Features"
+
+config CPU_ICACHE_DISABLE
+ bool "Disable I-Cache (I-bit)"
+ help
+ Say Y here to disable the processor instruction cache. Unless
+ you have a reason not to or are unsure, say N.
+
+config CPU_DCACHE_DISABLE
+ bool "Disable D-Cache (D-bit)"
+ help
+ Say Y here to disable the processor data cache. Unless
+ you have a reason not to or are unsure, say N.
+
+config CPU_DCACHE_WRITETHROUGH
+ bool "Force write through D-cache"
+ help
+ Say Y here to use the data cache in writethrough mode. Unless you
+ specifically require this or are unsure, say N.
+
+config CPU_DCACHE_LINE_DISABLE
+ bool "Disable D-cache line ops"
+ default y
+ help
+ Say Y here to disable the data cache line operations.
+
+config CPU_TLB_SINGLE_ENTRY_DISABLE
+ bool "Disable TLB single entry ops"
+ default y
+ help
+ Say Y here to disable the TLB single entry operations.
+
+config SWIOTLB
+ def_bool y
+
+config IOMMU_HELPER
+ def_bool SWIOTLB
+
+config NEED_SG_DMA_LENGTH
+ def_bool SWIOTLB
+
diff --git a/arch/unicore32/mm/Makefile b/arch/unicore32/mm/Makefile
new file mode 100644
index 000000000000..46c166699319
--- /dev/null
+++ b/arch/unicore32/mm/Makefile
@@ -0,0 +1,15 @@
+#
+# Makefile for the linux unicore-specific parts of the memory manager.
+#
+
+obj-y := extable.o fault.o init.o pgd.o mmu.o
+obj-y += flush.o ioremap.o
+
+obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o
+
+obj-$(CONFIG_MODULES) += proc-syms.o
+
+obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
+
+obj-$(CONFIG_CPU_UCV2) += cache-ucv2.o tlb-ucv2.o proc-ucv2.o
+
diff --git a/arch/unicore32/mm/alignment.c b/arch/unicore32/mm/alignment.c
new file mode 100644
index 000000000000..28f576d733ee
--- /dev/null
+++ b/arch/unicore32/mm/alignment.c
@@ -0,0 +1,523 @@
+/*
+ * linux/arch/unicore32/mm/alignment.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/*
+ * TODO:
+ * FPU ldm/stm not handling
+ */
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+
+#include <asm/tlbflush.h>
+#include <asm/unaligned.h>
+
+#define CODING_BITS(i) (i & 0xe0000120)
+
+#define LDST_P_BIT(i) (i & (1 << 28)) /* Preindex */
+#define LDST_U_BIT(i) (i & (1 << 27)) /* Add offset */
+#define LDST_W_BIT(i) (i & (1 << 25)) /* Writeback */
+#define LDST_L_BIT(i) (i & (1 << 24)) /* Load */
+
+#define LDST_P_EQ_U(i) ((((i) ^ ((i) >> 1)) & (1 << 27)) == 0)
+
+#define LDSTH_I_BIT(i) (i & (1 << 26)) /* half-word immed */
+#define LDM_S_BIT(i) (i & (1 << 26)) /* write ASR from BSR */
+#define LDM_H_BIT(i) (i & (1 << 6)) /* select r0-r15 or r16-r31 */
+
+#define RN_BITS(i) ((i >> 19) & 31) /* Rn */
+#define RD_BITS(i) ((i >> 14) & 31) /* Rd */
+#define RM_BITS(i) (i & 31) /* Rm */
+
+#define REGMASK_BITS(i) (((i & 0x7fe00) >> 3) | (i & 0x3f))
+#define OFFSET_BITS(i) (i & 0x03fff)
+
+#define SHIFT_BITS(i) ((i >> 9) & 0x1f)
+#define SHIFT_TYPE(i) (i & 0xc0)
+#define SHIFT_LSL 0x00
+#define SHIFT_LSR 0x40
+#define SHIFT_ASR 0x80
+#define SHIFT_RORRRX 0xc0
+
+union offset_union {
+ unsigned long un;
+ signed long sn;
+};
+
+#define TYPE_ERROR 0
+#define TYPE_FAULT 1
+#define TYPE_LDST 2
+#define TYPE_DONE 3
+#define TYPE_SWAP 4
+#define TYPE_COLS 5 /* Coprocessor load/store */
+
+#define get8_unaligned_check(val, addr, err) \
+ __asm__( \
+ "1: ldb.u %1, [%2], #1\n" \
+ "2:\n" \
+ " .pushsection .fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "3: mov %0, #1\n" \
+ " b 2b\n" \
+ " .popsection\n" \
+ " .pushsection __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 1b, 3b\n" \
+ " .popsection\n" \
+ : "=r" (err), "=&r" (val), "=r" (addr) \
+ : "0" (err), "2" (addr))
+
+#define get8t_unaligned_check(val, addr, err) \
+ __asm__( \
+ "1: ldb.u %1, [%2], #1\n" \
+ "2:\n" \
+ " .pushsection .fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "3: mov %0, #1\n" \
+ " b 2b\n" \
+ " .popsection\n" \
+ " .pushsection __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 1b, 3b\n" \
+ " .popsection\n" \
+ : "=r" (err), "=&r" (val), "=r" (addr) \
+ : "0" (err), "2" (addr))
+
+#define get16_unaligned_check(val, addr) \
+ do { \
+ unsigned int err = 0, v, a = addr; \
+ get8_unaligned_check(val, a, err); \
+ get8_unaligned_check(v, a, err); \
+ val |= v << 8; \
+ if (err) \
+ goto fault; \
+ } while (0)
+
+#define put16_unaligned_check(val, addr) \
+ do { \
+ unsigned int err = 0, v = val, a = addr; \
+ __asm__( \
+ "1: stb.u %1, [%2], #1\n" \
+ " mov %1, %1 >> #8\n" \
+ "2: stb.u %1, [%2]\n" \
+ "3:\n" \
+ " .pushsection .fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "4: mov %0, #1\n" \
+ " b 3b\n" \
+ " .popsection\n" \
+ " .pushsection __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 1b, 4b\n" \
+ " .long 2b, 4b\n" \
+ " .popsection\n" \
+ : "=r" (err), "=&r" (v), "=&r" (a) \
+ : "0" (err), "1" (v), "2" (a)); \
+ if (err) \
+ goto fault; \
+ } while (0)
+
+#define __put32_unaligned_check(ins, val, addr) \
+ do { \
+ unsigned int err = 0, v = val, a = addr; \
+ __asm__( \
+ "1: "ins" %1, [%2], #1\n" \
+ " mov %1, %1 >> #8\n" \
+ "2: "ins" %1, [%2], #1\n" \
+ " mov %1, %1 >> #8\n" \
+ "3: "ins" %1, [%2], #1\n" \
+ " mov %1, %1 >> #8\n" \
+ "4: "ins" %1, [%2]\n" \
+ "5:\n" \
+ " .pushsection .fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "6: mov %0, #1\n" \
+ " b 5b\n" \
+ " .popsection\n" \
+ " .pushsection __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 1b, 6b\n" \
+ " .long 2b, 6b\n" \
+ " .long 3b, 6b\n" \
+ " .long 4b, 6b\n" \
+ " .popsection\n" \
+ : "=r" (err), "=&r" (v), "=&r" (a) \
+ : "0" (err), "1" (v), "2" (a)); \
+ if (err) \
+ goto fault; \
+ } while (0)
+
+#define get32_unaligned_check(val, addr) \
+ do { \
+ unsigned int err = 0, v, a = addr; \
+ get8_unaligned_check(val, a, err); \
+ get8_unaligned_check(v, a, err); \
+ val |= v << 8; \
+ get8_unaligned_check(v, a, err); \
+ val |= v << 16; \
+ get8_unaligned_check(v, a, err); \
+ val |= v << 24; \
+ if (err) \
+ goto fault; \
+ } while (0)
+
+#define put32_unaligned_check(val, addr) \
+ __put32_unaligned_check("stb.u", val, addr)
+
+#define get32t_unaligned_check(val, addr) \
+ do { \
+ unsigned int err = 0, v, a = addr; \
+ get8t_unaligned_check(val, a, err); \
+ get8t_unaligned_check(v, a, err); \
+ val |= v << 8; \
+ get8t_unaligned_check(v, a, err); \
+ val |= v << 16; \
+ get8t_unaligned_check(v, a, err); \
+ val |= v << 24; \
+ if (err) \
+ goto fault; \
+ } while (0)
+
+#define put32t_unaligned_check(val, addr) \
+ __put32_unaligned_check("stb.u", val, addr)
+
+static void
+do_alignment_finish_ldst(unsigned long addr, unsigned long instr,
+ struct pt_regs *regs, union offset_union offset)
+{
+ if (!LDST_U_BIT(instr))
+ offset.un = -offset.un;
+
+ if (!LDST_P_BIT(instr))
+ addr += offset.un;
+
+ if (!LDST_P_BIT(instr) || LDST_W_BIT(instr))
+ regs->uregs[RN_BITS(instr)] = addr;
+}
+
+static int
+do_alignment_ldrhstrh(unsigned long addr, unsigned long instr,
+ struct pt_regs *regs)
+{
+ unsigned int rd = RD_BITS(instr);
+
+ /* old value 0x40002120, can't judge swap instr correctly */
+ if ((instr & 0x4b003fe0) == 0x40000120)
+ goto swp;
+
+ if (LDST_L_BIT(instr)) {
+ unsigned long val;
+ get16_unaligned_check(val, addr);
+
+ /* signed half-word? */
+ if (instr & 0x80)
+ val = (signed long)((signed short)val);
+
+ regs->uregs[rd] = val;
+ } else
+ put16_unaligned_check(regs->uregs[rd], addr);
+
+ return TYPE_LDST;
+
+swp:
+ /* only handle swap word
+ * for swap byte should not active this alignment exception */
+ get32_unaligned_check(regs->uregs[RD_BITS(instr)], addr);
+ put32_unaligned_check(regs->uregs[RM_BITS(instr)], addr);
+ return TYPE_SWAP;
+
+fault:
+ return TYPE_FAULT;
+}
+
+static int
+do_alignment_ldrstr(unsigned long addr, unsigned long instr,
+ struct pt_regs *regs)
+{
+ unsigned int rd = RD_BITS(instr);
+
+ if (!LDST_P_BIT(instr) && LDST_W_BIT(instr))
+ goto trans;
+
+ if (LDST_L_BIT(instr))
+ get32_unaligned_check(regs->uregs[rd], addr);
+ else
+ put32_unaligned_check(regs->uregs[rd], addr);
+ return TYPE_LDST;
+
+trans:
+ if (LDST_L_BIT(instr))
+ get32t_unaligned_check(regs->uregs[rd], addr);
+ else
+ put32t_unaligned_check(regs->uregs[rd], addr);
+ return TYPE_LDST;
+
+fault:
+ return TYPE_FAULT;
+}
+
+/*
+ * LDM/STM alignment handler.
+ *
+ * There are 4 variants of this instruction:
+ *
+ * B = rn pointer before instruction, A = rn pointer after instruction
+ * ------ increasing address ----->
+ * | | r0 | r1 | ... | rx | |
+ * PU = 01 B A
+ * PU = 11 B A
+ * PU = 00 A B
+ * PU = 10 A B
+ */
+static int
+do_alignment_ldmstm(unsigned long addr, unsigned long instr,
+ struct pt_regs *regs)
+{
+ unsigned int rd, rn, pc_correction, reg_correction, nr_regs, regbits;
+ unsigned long eaddr, newaddr;
+
+ if (LDM_S_BIT(instr))
+ goto bad;
+
+ pc_correction = 4; /* processor implementation defined */
+
+ /* count the number of registers in the mask to be transferred */
+ nr_regs = hweight16(REGMASK_BITS(instr)) * 4;
+
+ rn = RN_BITS(instr);
+ newaddr = eaddr = regs->uregs[rn];
+
+ if (!LDST_U_BIT(instr))
+ nr_regs = -nr_regs;
+ newaddr += nr_regs;
+ if (!LDST_U_BIT(instr))
+ eaddr = newaddr;
+
+ if (LDST_P_EQ_U(instr)) /* U = P */
+ eaddr += 4;
+
+ /*
+ * This is a "hint" - we already have eaddr worked out by the
+ * processor for us.
+ */
+ if (addr != eaddr) {
+ printk(KERN_ERR "LDMSTM: PC = %08lx, instr = %08lx, "
+ "addr = %08lx, eaddr = %08lx\n",
+ instruction_pointer(regs), instr, addr, eaddr);
+ show_regs(regs);
+ }
+
+ if (LDM_H_BIT(instr))
+ reg_correction = 0x10;
+ else
+ reg_correction = 0x00;
+
+ for (regbits = REGMASK_BITS(instr), rd = 0; regbits;
+ regbits >>= 1, rd += 1)
+ if (regbits & 1) {
+ if (LDST_L_BIT(instr))
+ get32_unaligned_check(regs->
+ uregs[rd + reg_correction], eaddr);
+ else
+ put32_unaligned_check(regs->
+ uregs[rd + reg_correction], eaddr);
+ eaddr += 4;
+ }
+
+ if (LDST_W_BIT(instr))
+ regs->uregs[rn] = newaddr;
+ return TYPE_DONE;
+
+fault:
+ regs->UCreg_pc -= pc_correction;
+ return TYPE_FAULT;
+
+bad:
+ printk(KERN_ERR "Alignment trap: not handling ldm with s-bit set\n");
+ return TYPE_ERROR;
+}
+
+static int
+do_alignment(unsigned long addr, unsigned int error_code, struct pt_regs *regs)
+{
+ union offset_union offset;
+ unsigned long instr, instrptr;
+ int (*handler) (unsigned long addr, unsigned long instr,
+ struct pt_regs *regs);
+ unsigned int type;
+
+ instrptr = instruction_pointer(regs);
+ if (instrptr >= PAGE_OFFSET)
+ instr = *(unsigned long *)instrptr;
+ else {
+ __asm__ __volatile__(
+ "ldw.u %0, [%1]\n"
+ : "=&r"(instr)
+ : "r"(instrptr));
+ }
+
+ regs->UCreg_pc += 4;
+
+ switch (CODING_BITS(instr)) {
+ case 0x40000120: /* ldrh or strh */
+ if (LDSTH_I_BIT(instr))
+ offset.un = (instr & 0x3e00) >> 4 | (instr & 31);
+ else
+ offset.un = regs->uregs[RM_BITS(instr)];
+ handler = do_alignment_ldrhstrh;
+ break;
+
+ case 0x60000000: /* ldr or str immediate */
+ case 0x60000100: /* ldr or str immediate */
+ case 0x60000020: /* ldr or str immediate */
+ case 0x60000120: /* ldr or str immediate */
+ offset.un = OFFSET_BITS(instr);
+ handler = do_alignment_ldrstr;
+ break;
+
+ case 0x40000000: /* ldr or str register */
+ offset.un = regs->uregs[RM_BITS(instr)];
+ {
+ unsigned int shiftval = SHIFT_BITS(instr);
+
+ switch (SHIFT_TYPE(instr)) {
+ case SHIFT_LSL:
+ offset.un <<= shiftval;
+ break;
+
+ case SHIFT_LSR:
+ offset.un >>= shiftval;
+ break;
+
+ case SHIFT_ASR:
+ offset.sn >>= shiftval;
+ break;
+
+ case SHIFT_RORRRX:
+ if (shiftval == 0) {
+ offset.un >>= 1;
+ if (regs->UCreg_asr & PSR_C_BIT)
+ offset.un |= 1 << 31;
+ } else
+ offset.un = offset.un >> shiftval |
+ offset.un << (32 - shiftval);
+ break;
+ }
+ }
+ handler = do_alignment_ldrstr;
+ break;
+
+ case 0x80000000: /* ldm or stm */
+ case 0x80000020: /* ldm or stm */
+ handler = do_alignment_ldmstm;
+ break;
+
+ default:
+ goto bad;
+ }
+
+ type = handler(addr, instr, regs);
+
+ if (type == TYPE_ERROR || type == TYPE_FAULT)
+ goto bad_or_fault;
+
+ if (type == TYPE_LDST)
+ do_alignment_finish_ldst(addr, instr, regs, offset);
+
+ return 0;
+
+bad_or_fault:
+ if (type == TYPE_ERROR)
+ goto bad;
+ regs->UCreg_pc -= 4;
+ /*
+ * We got a fault - fix it up, or die.
+ */
+ do_bad_area(addr, error_code, regs);
+ return 0;
+
+bad:
+ /*
+ * Oops, we didn't handle the instruction.
+ * However, we must handle fpu instr firstly.
+ */
+#ifdef CONFIG_UNICORE_FPU_F64
+ /* handle co.load/store */
+#define CODING_COLS 0xc0000000
+#define COLS_OFFSET_BITS(i) (i & 0x1FF)
+#define COLS_L_BITS(i) (i & (1<<24))
+#define COLS_FN_BITS(i) ((i>>14) & 31)
+ if ((instr & 0xe0000000) == CODING_COLS) {
+ unsigned int fn = COLS_FN_BITS(instr);
+ unsigned long val = 0;
+ if (COLS_L_BITS(instr)) {
+ get32t_unaligned_check(val, addr);
+ switch (fn) {
+#define ASM_MTF(n) case n: \
+ __asm__ __volatile__("MTF %0, F" __stringify(n) \
+ : : "r"(val)); \
+ break;
+ ASM_MTF(0); ASM_MTF(1); ASM_MTF(2); ASM_MTF(3);
+ ASM_MTF(4); ASM_MTF(5); ASM_MTF(6); ASM_MTF(7);
+ ASM_MTF(8); ASM_MTF(9); ASM_MTF(10); ASM_MTF(11);
+ ASM_MTF(12); ASM_MTF(13); ASM_MTF(14); ASM_MTF(15);
+ ASM_MTF(16); ASM_MTF(17); ASM_MTF(18); ASM_MTF(19);
+ ASM_MTF(20); ASM_MTF(21); ASM_MTF(22); ASM_MTF(23);
+ ASM_MTF(24); ASM_MTF(25); ASM_MTF(26); ASM_MTF(27);
+ ASM_MTF(28); ASM_MTF(29); ASM_MTF(30); ASM_MTF(31);
+#undef ASM_MTF
+ }
+ } else {
+ switch (fn) {
+#define ASM_MFF(n) case n: \
+ __asm__ __volatile__("MFF %0, F" __stringify(n) \
+ : : "r"(val)); \
+ break;
+ ASM_MFF(0); ASM_MFF(1); ASM_MFF(2); ASM_MFF(3);
+ ASM_MFF(4); ASM_MFF(5); ASM_MFF(6); ASM_MFF(7);
+ ASM_MFF(8); ASM_MFF(9); ASM_MFF(10); ASM_MFF(11);
+ ASM_MFF(12); ASM_MFF(13); ASM_MFF(14); ASM_MFF(15);
+ ASM_MFF(16); ASM_MFF(17); ASM_MFF(18); ASM_MFF(19);
+ ASM_MFF(20); ASM_MFF(21); ASM_MFF(22); ASM_MFF(23);
+ ASM_MFF(24); ASM_MFF(25); ASM_MFF(26); ASM_MFF(27);
+ ASM_MFF(28); ASM_MFF(29); ASM_MFF(30); ASM_MFF(31);
+#undef ASM_MFF
+ }
+ put32t_unaligned_check(val, addr);
+ }
+ return TYPE_COLS;
+ }
+fault:
+ return TYPE_FAULT;
+#endif
+ printk(KERN_ERR "Alignment trap: not handling instruction "
+ "%08lx at [<%08lx>]\n", instr, instrptr);
+ return 1;
+}
+
+/*
+ * This needs to be done after sysctl_init, otherwise sys/ will be
+ * overwritten. Actually, this shouldn't be in sys/ at all since
+ * it isn't a sysctl, and it doesn't contain sysctl information.
+ */
+static int __init alignment_init(void)
+{
+ hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN,
+ "alignment exception");
+
+ return 0;
+}
+
+fs_initcall(alignment_init);
diff --git a/arch/unicore32/mm/cache-ucv2.S b/arch/unicore32/mm/cache-ucv2.S
new file mode 100644
index 000000000000..ecaa1727f906
--- /dev/null
+++ b/arch/unicore32/mm/cache-ucv2.S
@@ -0,0 +1,212 @@
+/*
+ * linux/arch/unicore32/mm/cache-ucv2.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This is the "shell" of the UniCore-v2 processor support.
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/assembler.h>
+#include <asm/page.h>
+
+#include "proc-macros.S"
+
+/*
+ * __cpuc_flush_icache_all()
+ * __cpuc_flush_kern_all()
+ * __cpuc_flush_user_all()
+ *
+ * Flush the entire cache.
+ */
+ENTRY(__cpuc_flush_icache_all)
+ /*FALLTHROUGH*/
+ENTRY(__cpuc_flush_kern_all)
+ /*FALLTHROUGH*/
+ENTRY(__cpuc_flush_user_all)
+ mov r0, #0
+ movc p0.c5, r0, #14 @ Dcache flush all
+ nop8
+
+ mov r0, #0
+ movc p0.c5, r0, #20 @ Icache invalidate all
+ nop8
+
+ mov pc, lr
+
+/*
+ * __cpuc_flush_user_range(start, end, flags)
+ *
+ * Flush a range of TLB entries in the specified address space.
+ *
+ * - start - start address (may not be aligned)
+ * - end - end address (exclusive, may not be aligned)
+ * - flags - vm_area_struct flags describing address space
+ */
+ENTRY(__cpuc_flush_user_range)
+ cxor.a r2, #0
+ beq __cpuc_dma_flush_range
+
+#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
+ andn r0, r0, #CACHE_LINESIZE - 1 @ Safety check
+ sub r1, r1, r0
+ csub.a r1, #MAX_AREA_SIZE
+ bsg 2f
+
+ andn r1, r1, #CACHE_LINESIZE - 1
+ add r1, r1, #CACHE_LINESIZE
+
+101: dcacheline_flush r0, r11, r12
+
+ add r0, r0, #CACHE_LINESIZE
+ sub.a r1, r1, #CACHE_LINESIZE
+ bns 101b
+ b 3f
+#endif
+2: mov ip, #0
+ movc p0.c5, ip, #14 @ Dcache flush all
+ nop8
+
+3: mov ip, #0
+ movc p0.c5, ip, #20 @ Icache invalidate all
+ nop8
+
+ mov pc, lr
+
+/*
+ * __cpuc_coherent_kern_range(start,end)
+ * __cpuc_coherent_user_range(start,end)
+ *
+ * Ensure that the I and D caches are coherent within specified
+ * region. This is typically used when code has been written to
+ * a memory region, and will be executed.
+ *
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ */
+ENTRY(__cpuc_coherent_kern_range)
+ /* FALLTHROUGH */
+ENTRY(__cpuc_coherent_user_range)
+#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
+ andn r0, r0, #CACHE_LINESIZE - 1 @ Safety check
+ sub r1, r1, r0
+ csub.a r1, #MAX_AREA_SIZE
+ bsg 2f
+
+ andn r1, r1, #CACHE_LINESIZE - 1
+ add r1, r1, #CACHE_LINESIZE
+
+ @ r0 va2pa r10
+ mov r9, #PAGE_SZ
+ sub r9, r9, #1 @ PAGE_MASK
+101: va2pa r0, r10, r11, r12, r13, 2f @ r10 is PA
+ b 103f
+102: cand.a r0, r9
+ beq 101b
+
+103: movc p0.c5, r10, #11 @ Dcache clean line of R10
+ nop8
+
+ add r0, r0, #CACHE_LINESIZE
+ add r10, r10, #CACHE_LINESIZE
+ sub.a r1, r1, #CACHE_LINESIZE
+ bns 102b
+ b 3f
+#endif
+2: mov ip, #0
+ movc p0.c5, ip, #10 @ Dcache clean all
+ nop8
+
+3: mov ip, #0
+ movc p0.c5, ip, #20 @ Icache invalidate all
+ nop8
+
+ mov pc, lr
+
+/*
+ * __cpuc_flush_kern_dcache_area(void *addr, size_t size)
+ *
+ * - addr - kernel address
+ * - size - region size
+ */
+ENTRY(__cpuc_flush_kern_dcache_area)
+ mov ip, #0
+ movc p0.c5, ip, #14 @ Dcache flush all
+ nop8
+ mov pc, lr
+
+/*
+ * __cpuc_dma_clean_range(start,end)
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ */
+ENTRY(__cpuc_dma_clean_range)
+#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
+ andn r0, r0, #CACHE_LINESIZE - 1
+ sub r1, r1, r0
+ andn r1, r1, #CACHE_LINESIZE - 1
+ add r1, r1, #CACHE_LINESIZE
+
+ csub.a r1, #MAX_AREA_SIZE
+ bsg 2f
+
+ @ r0 va2pa r10
+ mov r9, #PAGE_SZ
+ sub r9, r9, #1 @ PAGE_MASK
+101: va2pa r0, r10, r11, r12, r13, 2f @ r10 is PA
+ b 1f
+102: cand.a r0, r9
+ beq 101b
+
+1: movc p0.c5, r10, #11 @ Dcache clean line of R10
+ nop8
+ add r0, r0, #CACHE_LINESIZE
+ add r10, r10, #CACHE_LINESIZE
+ sub.a r1, r1, #CACHE_LINESIZE
+ bns 102b
+ mov pc, lr
+#endif
+2: mov ip, #0
+ movc p0.c5, ip, #10 @ Dcache clean all
+ nop8
+
+ mov pc, lr
+
+/*
+ * __cpuc_dma_inv_range(start,end)
+ * __cpuc_dma_flush_range(start,end)
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ */
+__cpuc_dma_inv_range:
+ /* FALLTHROUGH */
+ENTRY(__cpuc_dma_flush_range)
+#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
+ andn r0, r0, #CACHE_LINESIZE - 1
+ sub r1, r1, r0
+ andn r1, r1, #CACHE_LINESIZE - 1
+ add r1, r1, #CACHE_LINESIZE
+
+ csub.a r1, #MAX_AREA_SIZE
+ bsg 2f
+
+ @ r0 va2pa r10
+101: dcacheline_flush r0, r11, r12
+
+ add r0, r0, #CACHE_LINESIZE
+ sub.a r1, r1, #CACHE_LINESIZE
+ bns 101b
+ mov pc, lr
+#endif
+2: mov ip, #0
+ movc p0.c5, ip, #14 @ Dcache flush all
+ nop8
+
+ mov pc, lr
+
diff --git a/arch/unicore32/mm/dma-swiotlb.c b/arch/unicore32/mm/dma-swiotlb.c
new file mode 100644
index 000000000000..bfa9fbb2bbb1
--- /dev/null
+++ b/arch/unicore32/mm/dma-swiotlb.c
@@ -0,0 +1,34 @@
+/*
+ * Contains routines needed to support swiotlb for UniCore32.
+ *
+ * Copyright (C) 2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/pci.h>
+#include <linux/cache.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/swiotlb.h>
+#include <linux/bootmem.h>
+
+#include <asm/dma.h>
+
+struct dma_map_ops swiotlb_dma_map_ops = {
+ .alloc_coherent = swiotlb_alloc_coherent,
+ .free_coherent = swiotlb_free_coherent,
+ .map_sg = swiotlb_map_sg_attrs,
+ .unmap_sg = swiotlb_unmap_sg_attrs,
+ .dma_supported = swiotlb_dma_supported,
+ .map_page = swiotlb_map_page,
+ .unmap_page = swiotlb_unmap_page,
+ .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
+ .sync_single_for_device = swiotlb_sync_single_for_device,
+ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
+ .sync_sg_for_device = swiotlb_sync_sg_for_device,
+ .mapping_error = swiotlb_dma_mapping_error,
+};
+EXPORT_SYMBOL(swiotlb_dma_map_ops);
diff --git a/arch/unicore32/mm/extable.c b/arch/unicore32/mm/extable.c
new file mode 100644
index 000000000000..6564180eb285
--- /dev/null
+++ b/arch/unicore32/mm/extable.c
@@ -0,0 +1,24 @@
+/*
+ * linux/arch/unicore32/mm/extable.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+int fixup_exception(struct pt_regs *regs)
+{
+ const struct exception_table_entry *fixup;
+
+ fixup = search_exception_tables(instruction_pointer(regs));
+ if (fixup)
+ regs->UCreg_pc = fixup->fixup;
+
+ return fixup != NULL;
+}
diff --git a/arch/unicore32/mm/fault.c b/arch/unicore32/mm/fault.c
new file mode 100644
index 000000000000..283aa4b50b7a
--- /dev/null
+++ b/arch/unicore32/mm/fault.c
@@ -0,0 +1,479 @@
+/*
+ * linux/arch/unicore32/mm/fault.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/mm.h>
+#include <linux/hardirq.h>
+#include <linux/init.h>
+#include <linux/kprobes.h>
+#include <linux/uaccess.h>
+#include <linux/page-flags.h>
+#include <linux/sched.h>
+#include <linux/io.h>
+
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+/*
+ * Fault status register encodings. We steal bit 31 for our own purposes.
+ */
+#define FSR_LNX_PF (1 << 31)
+
+static inline int fsr_fs(unsigned int fsr)
+{
+ /* xyabcde will be abcde+xy */
+ return (fsr & 31) + ((fsr & (3 << 5)) >> 5);
+}
+
+/*
+ * This is useful to dump out the page tables associated with
+ * 'addr' in mm 'mm'.
+ */
+void show_pte(struct mm_struct *mm, unsigned long addr)
+{
+ pgd_t *pgd;
+
+ if (!mm)
+ mm = &init_mm;
+
+ printk(KERN_ALERT "pgd = %p\n", mm->pgd);
+ pgd = pgd_offset(mm, addr);
+ printk(KERN_ALERT "[%08lx] *pgd=%08lx", addr, pgd_val(*pgd));
+
+ do {
+ pmd_t *pmd;
+ pte_t *pte;
+
+ if (pgd_none(*pgd))
+ break;
+
+ if (pgd_bad(*pgd)) {
+ printk("(bad)");
+ break;
+ }
+
+ pmd = pmd_offset((pud_t *) pgd, addr);
+ if (PTRS_PER_PMD != 1)
+ printk(", *pmd=%08lx", pmd_val(*pmd));
+
+ if (pmd_none(*pmd))
+ break;
+
+ if (pmd_bad(*pmd)) {
+ printk("(bad)");
+ break;
+ }
+
+ /* We must not map this if we have highmem enabled */
+ if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
+ break;
+
+ pte = pte_offset_map(pmd, addr);
+ printk(", *pte=%08lx", pte_val(*pte));
+ pte_unmap(pte);
+ } while (0);
+
+ printk("\n");
+}
+
+/*
+ * Oops. The kernel tried to access some page that wasn't present.
+ */
+static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr,
+ unsigned int fsr, struct pt_regs *regs)
+{
+ /*
+ * Are we prepared to handle this kernel fault?
+ */
+ if (fixup_exception(regs))
+ return;
+
+ /*
+ * No handler, we'll have to terminate things with extreme prejudice.
+ */
+ bust_spinlocks(1);
+ printk(KERN_ALERT
+ "Unable to handle kernel %s at virtual address %08lx\n",
+ (addr < PAGE_SIZE) ? "NULL pointer dereference" :
+ "paging request", addr);
+
+ show_pte(mm, addr);
+ die("Oops", regs, fsr);
+ bust_spinlocks(0);
+ do_exit(SIGKILL);
+}
+
+/*
+ * Something tried to access memory that isn't in our memory map..
+ * User mode accesses just cause a SIGSEGV
+ */
+static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
+ unsigned int fsr, unsigned int sig, int code,
+ struct pt_regs *regs)
+{
+ struct siginfo si;
+
+ tsk->thread.address = addr;
+ tsk->thread.error_code = fsr;
+ tsk->thread.trap_no = 14;
+ si.si_signo = sig;
+ si.si_errno = 0;
+ si.si_code = code;
+ si.si_addr = (void __user *)addr;
+ force_sig_info(sig, &si, tsk);
+}
+
+void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+{
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->active_mm;
+
+ /*
+ * If we are in kernel mode at this point, we
+ * have no context to handle this fault with.
+ */
+ if (user_mode(regs))
+ __do_user_fault(tsk, addr, fsr, SIGSEGV, SEGV_MAPERR, regs);
+ else
+ __do_kernel_fault(mm, addr, fsr, regs);
+}
+
+#define VM_FAULT_BADMAP 0x010000
+#define VM_FAULT_BADACCESS 0x020000
+
+/*
+ * Check that the permissions on the VMA allow for the fault which occurred.
+ * If we encountered a write fault, we must have write permission, otherwise
+ * we allow any permission.
+ */
+static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
+{
+ unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
+
+ if (!(fsr ^ 0x12)) /* write? */
+ mask = VM_WRITE;
+ if (fsr & FSR_LNX_PF)
+ mask = VM_EXEC;
+
+ return vma->vm_flags & mask ? false : true;
+}
+
+static int __do_pf(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
+ struct task_struct *tsk)
+{
+ struct vm_area_struct *vma;
+ int fault;
+
+ vma = find_vma(mm, addr);
+ fault = VM_FAULT_BADMAP;
+ if (unlikely(!vma))
+ goto out;
+ if (unlikely(vma->vm_start > addr))
+ goto check_stack;
+
+ /*
+ * Ok, we have a good vm_area for this
+ * memory access, so we can handle it.
+ */
+good_area:
+ if (access_error(fsr, vma)) {
+ fault = VM_FAULT_BADACCESS;
+ goto out;
+ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault, make
+ * sure we exit gracefully rather than endlessly redo the fault.
+ */
+ fault = handle_mm_fault(mm, vma, addr & PAGE_MASK,
+ (!(fsr ^ 0x12)) ? FAULT_FLAG_WRITE : 0);
+ if (unlikely(fault & VM_FAULT_ERROR))
+ return fault;
+ if (fault & VM_FAULT_MAJOR)
+ tsk->maj_flt++;
+ else
+ tsk->min_flt++;
+ return fault;
+
+check_stack:
+ if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
+ goto good_area;
+out:
+ return fault;
+}
+
+static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+{
+ struct task_struct *tsk;
+ struct mm_struct *mm;
+ int fault, sig, code;
+
+ tsk = current;
+ mm = tsk->mm;
+
+ /*
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+ if (in_atomic() || !mm)
+ goto no_context;
+
+ /*
+ * As per x86, we may deadlock here. However, since the kernel only
+ * validly references user space from well defined areas of the code,
+ * we can bug out early if this is from code which shouldn't.
+ */
+ if (!down_read_trylock(&mm->mmap_sem)) {
+ if (!user_mode(regs)
+ && !search_exception_tables(regs->UCreg_pc))
+ goto no_context;
+ down_read(&mm->mmap_sem);
+ } else {
+ /*
+ * The above down_read_trylock() might have succeeded in
+ * which case, we'll have missed the might_sleep() from
+ * down_read()
+ */
+ might_sleep();
+#ifdef CONFIG_DEBUG_VM
+ if (!user_mode(regs) &&
+ !search_exception_tables(regs->UCreg_pc))
+ goto no_context;
+#endif
+ }
+
+ fault = __do_pf(mm, addr, fsr, tsk);
+ up_read(&mm->mmap_sem);
+
+ /*
+ * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
+ */
+ if (likely(!(fault &
+ (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
+ return 0;
+
+ if (fault & VM_FAULT_OOM) {
+ /*
+ * We ran out of memory, call the OOM killer, and return to
+ * userspace (which will retry the fault, or kill us if we
+ * got oom-killed)
+ */
+ pagefault_out_of_memory();
+ return 0;
+ }
+
+ /*
+ * If we are in kernel mode at this point, we
+ * have no context to handle this fault with.
+ */
+ if (!user_mode(regs))
+ goto no_context;
+
+ if (fault & VM_FAULT_SIGBUS) {
+ /*
+ * We had some memory, but were unable to
+ * successfully fix up this page fault.
+ */
+ sig = SIGBUS;
+ code = BUS_ADRERR;
+ } else {
+ /*
+ * Something tried to access memory that
+ * isn't in our memory map..
+ */
+ sig = SIGSEGV;
+ code = fault == VM_FAULT_BADACCESS ? SEGV_ACCERR : SEGV_MAPERR;
+ }
+
+ __do_user_fault(tsk, addr, fsr, sig, code, regs);
+ return 0;
+
+no_context:
+ __do_kernel_fault(mm, addr, fsr, regs);
+ return 0;
+}
+
+/*
+ * First Level Translation Fault Handler
+ *
+ * We enter here because the first level page table doesn't contain
+ * a valid entry for the address.
+ *
+ * If the address is in kernel space (>= TASK_SIZE), then we are
+ * probably faulting in the vmalloc() area.
+ *
+ * If the init_task's first level page tables contains the relevant
+ * entry, we copy the it to this task. If not, we send the process
+ * a signal, fixup the exception, or oops the kernel.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may be in an
+ * interrupt or a critical region, and should only copy the information
+ * from the master page table, nothing more.
+ */
+static int do_ifault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+{
+ unsigned int index;
+ pgd_t *pgd, *pgd_k;
+ pmd_t *pmd, *pmd_k;
+
+ if (addr < TASK_SIZE)
+ return do_pf(addr, fsr, regs);
+
+ if (user_mode(regs))
+ goto bad_area;
+
+ index = pgd_index(addr);
+
+ pgd = cpu_get_pgd() + index;
+ pgd_k = init_mm.pgd + index;
+
+ if (pgd_none(*pgd_k))
+ goto bad_area;
+
+ pmd_k = pmd_offset((pud_t *) pgd_k, addr);
+ pmd = pmd_offset((pud_t *) pgd, addr);
+
+ if (pmd_none(*pmd_k))
+ goto bad_area;
+
+ set_pmd(pmd, *pmd_k);
+ flush_pmd_entry(pmd);
+ return 0;
+
+bad_area:
+ do_bad_area(addr, fsr, regs);
+ return 0;
+}
+
+/*
+ * This abort handler always returns "fault".
+ */
+static int do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+{
+ return 1;
+}
+
+static int do_good(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+{
+ unsigned int res1, res2;
+
+ printk("dabt exception but no error!\n");
+
+ __asm__ __volatile__(
+ "mff %0,f0\n"
+ "mff %1,f1\n"
+ : "=r"(res1), "=r"(res2)
+ :
+ : "memory");
+
+ printk(KERN_EMERG "r0 :%08x r1 :%08x\n", res1, res2);
+ panic("shut up\n");
+ return 0;
+}
+
+static struct fsr_info {
+ int (*fn) (unsigned long addr, unsigned int fsr, struct pt_regs *regs);
+ int sig;
+ int code;
+ const char *name;
+} fsr_info[] = {
+ /*
+ * The following are the standard Unicore-I and UniCore-II aborts.
+ */
+ { do_good, SIGBUS, 0, "no error" },
+ { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" },
+ { do_bad, SIGBUS, BUS_OBJERR, "external exception" },
+ { do_bad, SIGBUS, 0, "burst operation" },
+ { do_bad, SIGBUS, 0, "unknown 00100" },
+ { do_ifault, SIGSEGV, SEGV_MAPERR, "2nd level pt non-exist"},
+ { do_bad, SIGBUS, 0, "2nd lvl large pt non-exist" },
+ { do_bad, SIGBUS, 0, "invalid pte" },
+ { do_pf, SIGSEGV, SEGV_MAPERR, "page miss" },
+ { do_bad, SIGBUS, 0, "middle page miss" },
+ { do_bad, SIGBUS, 0, "large page miss" },
+ { do_pf, SIGSEGV, SEGV_MAPERR, "super page (section) miss" },
+ { do_bad, SIGBUS, 0, "unknown 01100" },
+ { do_bad, SIGBUS, 0, "unknown 01101" },
+ { do_bad, SIGBUS, 0, "unknown 01110" },
+ { do_bad, SIGBUS, 0, "unknown 01111" },
+ { do_bad, SIGBUS, 0, "addr: up 3G or IO" },
+ { do_pf, SIGSEGV, SEGV_ACCERR, "read unreadable addr" },
+ { do_pf, SIGSEGV, SEGV_ACCERR, "write unwriteable addr"},
+ { do_pf, SIGSEGV, SEGV_ACCERR, "exec unexecutable addr"},
+ { do_bad, SIGBUS, 0, "unknown 10100" },
+ { do_bad, SIGBUS, 0, "unknown 10101" },
+ { do_bad, SIGBUS, 0, "unknown 10110" },
+ { do_bad, SIGBUS, 0, "unknown 10111" },
+ { do_bad, SIGBUS, 0, "unknown 11000" },
+ { do_bad, SIGBUS, 0, "unknown 11001" },
+ { do_bad, SIGBUS, 0, "unknown 11010" },
+ { do_bad, SIGBUS, 0, "unknown 11011" },
+ { do_bad, SIGBUS, 0, "unknown 11100" },
+ { do_bad, SIGBUS, 0, "unknown 11101" },
+ { do_bad, SIGBUS, 0, "unknown 11110" },
+ { do_bad, SIGBUS, 0, "unknown 11111" }
+};
+
+void __init hook_fault_code(int nr,
+ int (*fn) (unsigned long, unsigned int, struct pt_regs *),
+ int sig, int code, const char *name)
+{
+ if (nr < 0 || nr >= ARRAY_SIZE(fsr_info))
+ BUG();
+
+ fsr_info[nr].fn = fn;
+ fsr_info[nr].sig = sig;
+ fsr_info[nr].code = code;
+ fsr_info[nr].name = name;
+}
+
+/*
+ * Dispatch a data abort to the relevant handler.
+ */
+asmlinkage void do_DataAbort(unsigned long addr, unsigned int fsr,
+ struct pt_regs *regs)
+{
+ const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
+ struct siginfo info;
+
+ if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
+ return;
+
+ printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
+ inf->name, fsr, addr);
+
+ info.si_signo = inf->sig;
+ info.si_errno = 0;
+ info.si_code = inf->code;
+ info.si_addr = (void __user *)addr;
+ uc32_notify_die("", regs, &info, fsr, 0);
+}
+
+asmlinkage void do_PrefetchAbort(unsigned long addr,
+ unsigned int ifsr, struct pt_regs *regs)
+{
+ const struct fsr_info *inf = fsr_info + fsr_fs(ifsr);
+ struct siginfo info;
+
+ if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
+ return;
+
+ printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
+ inf->name, ifsr, addr);
+
+ info.si_signo = inf->sig;
+ info.si_errno = 0;
+ info.si_code = inf->code;
+ info.si_addr = (void __user *)addr;
+ uc32_notify_die("", regs, &info, ifsr, 0);
+}
diff --git a/arch/unicore32/mm/flush.c b/arch/unicore32/mm/flush.c
new file mode 100644
index 000000000000..93478cc8b26d
--- /dev/null
+++ b/arch/unicore32/mm/flush.c
@@ -0,0 +1,98 @@
+/*
+ * linux/arch/unicore32/mm/flush.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+
+#include <asm/cacheflush.h>
+#include <asm/system.h>
+#include <asm/tlbflush.h>
+
+void flush_cache_mm(struct mm_struct *mm)
+{
+}
+
+void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ if (vma->vm_flags & VM_EXEC)
+ __flush_icache_all();
+}
+
+void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr,
+ unsigned long pfn)
+{
+}
+
+static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
+ unsigned long uaddr, void *kaddr, unsigned long len)
+{
+ /* VIPT non-aliasing D-cache */
+ if (vma->vm_flags & VM_EXEC) {
+ unsigned long addr = (unsigned long)kaddr;
+
+ __cpuc_coherent_kern_range(addr, addr + len);
+ }
+}
+
+/*
+ * Copy user data from/to a page which is mapped into a different
+ * processes address space. Really, we want to allow our "user
+ * space" model to handle this.
+ *
+ * Note that this code needs to run on the current CPU.
+ */
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long uaddr, void *dst, const void *src,
+ unsigned long len)
+{
+ memcpy(dst, src, len);
+ flush_ptrace_access(vma, page, uaddr, dst, len);
+}
+
+void __flush_dcache_page(struct address_space *mapping, struct page *page)
+{
+ /*
+ * Writeback any data associated with the kernel mapping of this
+ * page. This ensures that data in the physical page is mutually
+ * coherent with the kernels mapping.
+ */
+ __cpuc_flush_kern_dcache_area(page_address(page), PAGE_SIZE);
+}
+
+/*
+ * Ensure cache coherency between kernel mapping and userspace mapping
+ * of this page.
+ */
+void flush_dcache_page(struct page *page)
+{
+ struct address_space *mapping;
+
+ /*
+ * The zero page is never written to, so never has any dirty
+ * cache lines, and therefore never needs to be flushed.
+ */
+ if (page == ZERO_PAGE(0))
+ return;
+
+ mapping = page_mapping(page);
+
+ if (mapping && !mapping_mapped(mapping))
+ clear_bit(PG_dcache_clean, &page->flags);
+ else {
+ __flush_dcache_page(mapping, page);
+ if (mapping)
+ __flush_icache_all();
+ set_bit(PG_dcache_clean, &page->flags);
+ }
+}
+EXPORT_SYMBOL(flush_dcache_page);
diff --git a/arch/unicore32/mm/init.c b/arch/unicore32/mm/init.c
new file mode 100644
index 000000000000..3dbe3709b69d
--- /dev/null
+++ b/arch/unicore32/mm/init.c
@@ -0,0 +1,517 @@
+/*
+ * linux/arch/unicore32/mm/init.c
+ *
+ * Copyright (C) 2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/swap.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/mman.h>
+#include <linux/nodemask.h>
+#include <linux/initrd.h>
+#include <linux/highmem.h>
+#include <linux/gfp.h>
+#include <linux/memblock.h>
+#include <linux/sort.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/sizes.h>
+#include <asm/tlb.h>
+#include <mach/map.h>
+
+#include "mm.h"
+
+static unsigned long phys_initrd_start __initdata = 0x01000000;
+static unsigned long phys_initrd_size __initdata = SZ_8M;
+
+static int __init early_initrd(char *p)
+{
+ unsigned long start, size;
+ char *endp;
+
+ start = memparse(p, &endp);
+ if (*endp == ',') {
+ size = memparse(endp + 1, NULL);
+
+ phys_initrd_start = start;
+ phys_initrd_size = size;
+ }
+ return 0;
+}
+early_param("initrd", early_initrd);
+
+/*
+ * This keeps memory configuration data used by a couple memory
+ * initialization functions, as well as show_mem() for the skipping
+ * of holes in the memory map. It is populated by uc32_add_memory().
+ */
+struct meminfo meminfo;
+
+void show_mem(void)
+{
+ int free = 0, total = 0, reserved = 0;
+ int shared = 0, cached = 0, slab = 0, i;
+ struct meminfo *mi = &meminfo;
+
+ printk(KERN_DEFAULT "Mem-info:\n");
+ show_free_areas();
+
+ for_each_bank(i, mi) {
+ struct membank *bank = &mi->bank[i];
+ unsigned int pfn1, pfn2;
+ struct page *page, *end;
+
+ pfn1 = bank_pfn_start(bank);
+ pfn2 = bank_pfn_end(bank);
+
+ page = pfn_to_page(pfn1);
+ end = pfn_to_page(pfn2 - 1) + 1;
+
+ do {
+ total++;
+ if (PageReserved(page))
+ reserved++;
+ else if (PageSwapCache(page))
+ cached++;
+ else if (PageSlab(page))
+ slab++;
+ else if (!page_count(page))
+ free++;
+ else
+ shared += page_count(page) - 1;
+ page++;
+ } while (page < end);
+ }
+
+ printk(KERN_DEFAULT "%d pages of RAM\n", total);
+ printk(KERN_DEFAULT "%d free pages\n", free);
+ printk(KERN_DEFAULT "%d reserved pages\n", reserved);
+ printk(KERN_DEFAULT "%d slab pages\n", slab);
+ printk(KERN_DEFAULT "%d pages shared\n", shared);
+ printk(KERN_DEFAULT "%d pages swap cached\n", cached);
+}
+
+static void __init find_limits(unsigned long *min, unsigned long *max_low,
+ unsigned long *max_high)
+{
+ struct meminfo *mi = &meminfo;
+ int i;
+
+ *min = -1UL;
+ *max_low = *max_high = 0;
+
+ for_each_bank(i, mi) {
+ struct membank *bank = &mi->bank[i];
+ unsigned long start, end;
+
+ start = bank_pfn_start(bank);
+ end = bank_pfn_end(bank);
+
+ if (*min > start)
+ *min = start;
+ if (*max_high < end)
+ *max_high = end;
+ if (bank->highmem)
+ continue;
+ if (*max_low < end)
+ *max_low = end;
+ }
+}
+
+static void __init uc32_bootmem_init(unsigned long start_pfn,
+ unsigned long end_pfn)
+{
+ struct memblock_region *reg;
+ unsigned int boot_pages;
+ phys_addr_t bitmap;
+ pg_data_t *pgdat;
+
+ /*
+ * Allocate the bootmem bitmap page. This must be in a region
+ * of memory which has already been mapped.
+ */
+ boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
+ bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
+ __pfn_to_phys(end_pfn));
+
+ /*
+ * Initialise the bootmem allocator, handing the
+ * memory banks over to bootmem.
+ */
+ node_set_online(0);
+ pgdat = NODE_DATA(0);
+ init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
+
+ /* Free the lowmem regions from memblock into bootmem. */
+ for_each_memblock(memory, reg) {
+ unsigned long start = memblock_region_memory_base_pfn(reg);
+ unsigned long end = memblock_region_memory_end_pfn(reg);
+
+ if (end >= end_pfn)
+ end = end_pfn;
+ if (start >= end)
+ break;
+
+ free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
+ }
+
+ /* Reserve the lowmem memblock reserved regions in bootmem. */
+ for_each_memblock(reserved, reg) {
+ unsigned long start = memblock_region_reserved_base_pfn(reg);
+ unsigned long end = memblock_region_reserved_end_pfn(reg);
+
+ if (end >= end_pfn)
+ end = end_pfn;
+ if (start >= end)
+ break;
+
+ reserve_bootmem(__pfn_to_phys(start),
+ (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);
+ }
+}
+
+static void __init uc32_bootmem_free(unsigned long min, unsigned long max_low,
+ unsigned long max_high)
+{
+ unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
+ struct memblock_region *reg;
+
+ /*
+ * initialise the zones.
+ */
+ memset(zone_size, 0, sizeof(zone_size));
+
+ /*
+ * The memory size has already been determined. If we need
+ * to do anything fancy with the allocation of this memory
+ * to the zones, now is the time to do it.
+ */
+ zone_size[0] = max_low - min;
+
+ /*
+ * Calculate the size of the holes.
+ * holes = node_size - sum(bank_sizes)
+ */
+ memcpy(zhole_size, zone_size, sizeof(zhole_size));
+ for_each_memblock(memory, reg) {
+ unsigned long start = memblock_region_memory_base_pfn(reg);
+ unsigned long end = memblock_region_memory_end_pfn(reg);
+
+ if (start < max_low) {
+ unsigned long low_end = min(end, max_low);
+ zhole_size[0] -= low_end - start;
+ }
+ }
+
+ /*
+ * Adjust the sizes according to any special requirements for
+ * this machine type.
+ */
+ arch_adjust_zones(zone_size, zhole_size);
+
+ free_area_init_node(0, zone_size, min, zhole_size);
+}
+
+int pfn_valid(unsigned long pfn)
+{
+ return memblock_is_memory(pfn << PAGE_SHIFT);
+}
+EXPORT_SYMBOL(pfn_valid);
+
+static void uc32_memory_present(void)
+{
+}
+
+static int __init meminfo_cmp(const void *_a, const void *_b)
+{
+ const struct membank *a = _a, *b = _b;
+ long cmp = bank_pfn_start(a) - bank_pfn_start(b);
+ return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
+}
+
+void __init uc32_memblock_init(struct meminfo *mi)
+{
+ int i;
+
+ sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]),
+ meminfo_cmp, NULL);
+
+ memblock_init();
+ for (i = 0; i < mi->nr_banks; i++)
+ memblock_add(mi->bank[i].start, mi->bank[i].size);
+
+ /* Register the kernel text, kernel data and initrd with memblock. */
+ memblock_reserve(__pa(_text), _end - _text);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (phys_initrd_size) {
+ memblock_reserve(phys_initrd_start, phys_initrd_size);
+
+ /* Now convert initrd to virtual addresses */
+ initrd_start = __phys_to_virt(phys_initrd_start);
+ initrd_end = initrd_start + phys_initrd_size;
+ }
+#endif
+
+ uc32_mm_memblock_reserve();
+
+ memblock_analyze();
+ memblock_dump_all();
+}
+
+void __init bootmem_init(void)
+{
+ unsigned long min, max_low, max_high;
+
+ max_low = max_high = 0;
+
+ find_limits(&min, &max_low, &max_high);
+
+ uc32_bootmem_init(min, max_low);
+
+#ifdef CONFIG_SWIOTLB
+ swiotlb_init(1);
+#endif
+ /*
+ * Sparsemem tries to allocate bootmem in memory_present(),
+ * so must be done after the fixed reservations
+ */
+ uc32_memory_present();
+
+ /*
+ * sparse_init() needs the bootmem allocator up and running.
+ */
+ sparse_init();
+
+ /*
+ * Now free the memory - free_area_init_node needs
+ * the sparse mem_map arrays initialized by sparse_init()
+ * for memmap_init_zone(), otherwise all PFNs are invalid.
+ */
+ uc32_bootmem_free(min, max_low, max_high);
+
+ high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1;
+
+ /*
+ * This doesn't seem to be used by the Linux memory manager any
+ * more, but is used by ll_rw_block. If we can get rid of it, we
+ * also get rid of some of the stuff above as well.
+ *
+ * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
+ * the system, not the maximum PFN.
+ */
+ max_low_pfn = max_low - PHYS_PFN_OFFSET;
+ max_pfn = max_high - PHYS_PFN_OFFSET;
+}
+
+static inline int free_area(unsigned long pfn, unsigned long end, char *s)
+{
+ unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
+
+ for (; pfn < end; pfn++) {
+ struct page *page = pfn_to_page(pfn);
+ ClearPageReserved(page);
+ init_page_count(page);
+ __free_page(page);
+ pages++;
+ }
+
+ if (size && s)
+ printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
+
+ return pages;
+}
+
+static inline void
+free_memmap(unsigned long start_pfn, unsigned long end_pfn)
+{
+ struct page *start_pg, *end_pg;
+ unsigned long pg, pgend;
+
+ /*
+ * Convert start_pfn/end_pfn to a struct page pointer.
+ */
+ start_pg = pfn_to_page(start_pfn - 1) + 1;
+ end_pg = pfn_to_page(end_pfn);
+
+ /*
+ * Convert to physical addresses, and
+ * round start upwards and end downwards.
+ */
+ pg = PAGE_ALIGN(__pa(start_pg));
+ pgend = __pa(end_pg) & PAGE_MASK;
+
+ /*
+ * If there are free pages between these,
+ * free the section of the memmap array.
+ */
+ if (pg < pgend)
+ free_bootmem(pg, pgend - pg);
+}
+
+/*
+ * The mem_map array can get very big. Free the unused area of the memory map.
+ */
+static void __init free_unused_memmap(struct meminfo *mi)
+{
+ unsigned long bank_start, prev_bank_end = 0;
+ unsigned int i;
+
+ /*
+ * This relies on each bank being in address order.
+ * The banks are sorted previously in bootmem_init().
+ */
+ for_each_bank(i, mi) {
+ struct membank *bank = &mi->bank[i];
+
+ bank_start = bank_pfn_start(bank);
+
+ /*
+ * If we had a previous bank, and there is a space
+ * between the current bank and the previous, free it.
+ */
+ if (prev_bank_end && prev_bank_end < bank_start)
+ free_memmap(prev_bank_end, bank_start);
+
+ /*
+ * Align up here since the VM subsystem insists that the
+ * memmap entries are valid from the bank end aligned to
+ * MAX_ORDER_NR_PAGES.
+ */
+ prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
+ }
+}
+
+/*
+ * mem_init() marks the free areas in the mem_map and tells us how much
+ * memory is free. This is done after various parts of the system have
+ * claimed their memory after the kernel image.
+ */
+void __init mem_init(void)
+{
+ unsigned long reserved_pages, free_pages;
+ struct memblock_region *reg;
+ int i;
+
+ max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
+
+ /* this will put all unused low memory onto the freelists */
+ free_unused_memmap(&meminfo);
+
+ totalram_pages += free_all_bootmem();
+
+ reserved_pages = free_pages = 0;
+
+ for_each_bank(i, &meminfo) {
+ struct membank *bank = &meminfo.bank[i];
+ unsigned int pfn1, pfn2;
+ struct page *page, *end;
+
+ pfn1 = bank_pfn_start(bank);
+ pfn2 = bank_pfn_end(bank);
+
+ page = pfn_to_page(pfn1);
+ end = pfn_to_page(pfn2 - 1) + 1;
+
+ do {
+ if (PageReserved(page))
+ reserved_pages++;
+ else if (!page_count(page))
+ free_pages++;
+ page++;
+ } while (page < end);
+ }
+
+ /*
+ * Since our memory may not be contiguous, calculate the
+ * real number of pages we have in this system
+ */
+ printk(KERN_INFO "Memory:");
+ num_physpages = 0;
+ for_each_memblock(memory, reg) {
+ unsigned long pages = memblock_region_memory_end_pfn(reg) -
+ memblock_region_memory_base_pfn(reg);
+ num_physpages += pages;
+ printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
+ }
+ printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
+
+ printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
+ nr_free_pages() << (PAGE_SHIFT-10),
+ free_pages << (PAGE_SHIFT-10),
+ reserved_pages << (PAGE_SHIFT-10),
+ totalhigh_pages << (PAGE_SHIFT-10));
+
+ printk(KERN_NOTICE "Virtual kernel memory layout:\n"
+ " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
+ " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
+ " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
+ " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
+ " .init : 0x%p" " - 0x%p" " (%4d kB)\n"
+ " .text : 0x%p" " - 0x%p" " (%4d kB)\n"
+ " .data : 0x%p" " - 0x%p" " (%4d kB)\n",
+
+ VECTORS_BASE, VECTORS_BASE + PAGE_SIZE,
+ DIV_ROUND_UP(PAGE_SIZE, SZ_1K),
+ VMALLOC_START, VMALLOC_END,
+ DIV_ROUND_UP((VMALLOC_END - VMALLOC_START), SZ_1M),
+ PAGE_OFFSET, (unsigned long)high_memory,
+ DIV_ROUND_UP(((unsigned long)high_memory - PAGE_OFFSET), SZ_1M),
+ MODULES_VADDR, MODULES_END,
+ DIV_ROUND_UP((MODULES_END - MODULES_VADDR), SZ_1M),
+
+ __init_begin, __init_end,
+ DIV_ROUND_UP((__init_end - __init_begin), SZ_1K),
+ _stext, _etext,
+ DIV_ROUND_UP((_etext - _stext), SZ_1K),
+ _sdata, _edata,
+ DIV_ROUND_UP((_edata - _sdata), SZ_1K));
+
+ BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
+ BUG_ON(TASK_SIZE > MODULES_VADDR);
+
+ if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
+ /*
+ * On a machine this small we won't get
+ * anywhere without overcommit, so turn
+ * it on by default.
+ */
+ sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
+ }
+}
+
+void free_initmem(void)
+{
+ totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
+ __phys_to_pfn(__pa(__init_end)),
+ "init");
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+
+static int keep_initrd;
+
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+ if (!keep_initrd)
+ totalram_pages += free_area(__phys_to_pfn(__pa(start)),
+ __phys_to_pfn(__pa(end)),
+ "initrd");
+}
+
+static int __init keepinitrd_setup(char *__unused)
+{
+ keep_initrd = 1;
+ return 1;
+}
+
+__setup("keepinitrd", keepinitrd_setup);
+#endif
diff --git a/arch/unicore32/mm/ioremap.c b/arch/unicore32/mm/ioremap.c
new file mode 100644
index 000000000000..b7a605597b08
--- /dev/null
+++ b/arch/unicore32/mm/ioremap.c
@@ -0,0 +1,261 @@
+/*
+ * linux/arch/unicore32/mm/ioremap.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *
+ * Re-map IO memory to kernel address space so that we can access it.
+ *
+ * This allows a driver to remap an arbitrary region of bus memory into
+ * virtual space. One should *only* use readl, writel, memcpy_toio and
+ * so on with such remapped areas.
+ *
+ * Because UniCore only has a 32-bit address space we can't address the
+ * whole of the (physical) PCI space at once. PCI huge-mode addressing
+ * allows us to circumvent this restriction by splitting PCI space into
+ * two 2GB chunks and mapping only one at a time into processor memory.
+ * We use MMU protection domains to trap any attempt to access the bank
+ * that is not currently mapped. (This isn't fully implemented yet.)
+ */
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/io.h>
+
+#include <asm/cputype.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+#include <asm/sizes.h>
+
+#include <mach/map.h>
+#include "mm.h"
+
+/*
+ * Used by ioremap() and iounmap() code to mark (super)section-mapped
+ * I/O regions in vm_struct->flags field.
+ */
+#define VM_UNICORE_SECTION_MAPPING 0x80000000
+
+int ioremap_page(unsigned long virt, unsigned long phys,
+ const struct mem_type *mtype)
+{
+ return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
+ __pgprot(mtype->prot_pte));
+}
+EXPORT_SYMBOL(ioremap_page);
+
+/*
+ * Section support is unsafe on SMP - If you iounmap and ioremap a region,
+ * the other CPUs will not see this change until their next context switch.
+ * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
+ * which requires the new ioremap'd region to be referenced, the CPU will
+ * reference the _old_ region.
+ *
+ * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
+ * mask the size back to 4MB aligned or we will overflow in the loop below.
+ */
+static void unmap_area_sections(unsigned long virt, unsigned long size)
+{
+ unsigned long addr = virt, end = virt + (size & ~(SZ_4M - 1));
+ pgd_t *pgd;
+
+ flush_cache_vunmap(addr, end);
+ pgd = pgd_offset_k(addr);
+ do {
+ pmd_t pmd, *pmdp = pmd_offset((pud_t *)pgd, addr);
+
+ pmd = *pmdp;
+ if (!pmd_none(pmd)) {
+ /*
+ * Clear the PMD from the page table, and
+ * increment the kvm sequence so others
+ * notice this change.
+ *
+ * Note: this is still racy on SMP machines.
+ */
+ pmd_clear(pmdp);
+
+ /*
+ * Free the page table, if there was one.
+ */
+ if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
+ pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
+ }
+
+ addr += PGDIR_SIZE;
+ pgd++;
+ } while (addr < end);
+
+ flush_tlb_kernel_range(virt, end);
+}
+
+static int
+remap_area_sections(unsigned long virt, unsigned long pfn,
+ size_t size, const struct mem_type *type)
+{
+ unsigned long addr = virt, end = virt + size;
+ pgd_t *pgd;
+
+ /*
+ * Remove and free any PTE-based mapping, and
+ * sync the current kernel mapping.
+ */
+ unmap_area_sections(virt, size);
+
+ pgd = pgd_offset_k(addr);
+ do {
+ pmd_t *pmd = pmd_offset((pud_t *)pgd, addr);
+
+ set_pmd(pmd, __pmd(__pfn_to_phys(pfn) | type->prot_sect));
+ pfn += SZ_4M >> PAGE_SHIFT;
+ flush_pmd_entry(pmd);
+
+ addr += PGDIR_SIZE;
+ pgd++;
+ } while (addr < end);
+
+ return 0;
+}
+
+void __iomem *__uc32_ioremap_pfn_caller(unsigned long pfn,
+ unsigned long offset, size_t size, unsigned int mtype, void *caller)
+{
+ const struct mem_type *type;
+ int err;
+ unsigned long addr;
+ struct vm_struct *area;
+
+ /*
+ * High mappings must be section aligned
+ */
+ if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SECTION_MASK))
+ return NULL;
+
+ /*
+ * Don't allow RAM to be mapped
+ */
+ if (pfn_valid(pfn)) {
+ printk(KERN_WARNING "BUG: Your driver calls ioremap() on\n"
+ "system memory. This leads to architecturally\n"
+ "unpredictable behaviour, and ioremap() will fail in\n"
+ "the next kernel release. Please fix your driver.\n");
+ WARN_ON(1);
+ }
+
+ type = get_mem_type(mtype);
+ if (!type)
+ return NULL;
+
+ /*
+ * Page align the mapping size, taking account of any offset.
+ */
+ size = PAGE_ALIGN(offset + size);
+
+ area = get_vm_area_caller(size, VM_IOREMAP, caller);
+ if (!area)
+ return NULL;
+ addr = (unsigned long)area->addr;
+
+ if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
+ area->flags |= VM_UNICORE_SECTION_MAPPING;
+ err = remap_area_sections(addr, pfn, size, type);
+ } else
+ err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
+ __pgprot(type->prot_pte));
+
+ if (err) {
+ vunmap((void *)addr);
+ return NULL;
+ }
+
+ flush_cache_vmap(addr, addr + size);
+ return (void __iomem *) (offset + addr);
+}
+
+void __iomem *__uc32_ioremap_caller(unsigned long phys_addr, size_t size,
+ unsigned int mtype, void *caller)
+{
+ unsigned long last_addr;
+ unsigned long offset = phys_addr & ~PAGE_MASK;
+ unsigned long pfn = __phys_to_pfn(phys_addr);
+
+ /*
+ * Don't allow wraparound or zero size
+ */
+ last_addr = phys_addr + size - 1;
+ if (!size || last_addr < phys_addr)
+ return NULL;
+
+ return __uc32_ioremap_pfn_caller(pfn, offset, size, mtype, caller);
+}
+
+/*
+ * Remap an arbitrary physical address space into the kernel virtual
+ * address space. Needed when the kernel wants to access high addresses
+ * directly.
+ *
+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
+ * have to convert them into an offset in a page-aligned mapping, but the
+ * caller shouldn't need to know that small detail.
+ */
+void __iomem *
+__uc32_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
+ unsigned int mtype)
+{
+ return __uc32_ioremap_pfn_caller(pfn, offset, size, mtype,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(__uc32_ioremap_pfn);
+
+void __iomem *
+__uc32_ioremap(unsigned long phys_addr, size_t size)
+{
+ return __uc32_ioremap_caller(phys_addr, size, MT_DEVICE,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(__uc32_ioremap);
+
+void __iomem *
+__uc32_ioremap_cached(unsigned long phys_addr, size_t size)
+{
+ return __uc32_ioremap_caller(phys_addr, size, MT_DEVICE_CACHED,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(__uc32_ioremap_cached);
+
+void __uc32_iounmap(volatile void __iomem *io_addr)
+{
+ void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
+ struct vm_struct **p, *tmp;
+
+ /*
+ * If this is a section based mapping we need to handle it
+ * specially as the VM subsystem does not know how to handle
+ * such a beast. We need the lock here b/c we need to clear
+ * all the mappings before the area can be reclaimed
+ * by someone else.
+ */
+ write_lock(&vmlist_lock);
+ for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
+ if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
+ if (tmp->flags & VM_UNICORE_SECTION_MAPPING) {
+ unmap_area_sections((unsigned long)tmp->addr,
+ tmp->size);
+ }
+ break;
+ }
+ }
+ write_unlock(&vmlist_lock);
+
+ vunmap(addr);
+}
+EXPORT_SYMBOL(__uc32_iounmap);
diff --git a/arch/unicore32/mm/mm.h b/arch/unicore32/mm/mm.h
new file mode 100644
index 000000000000..3296bca0f1f7
--- /dev/null
+++ b/arch/unicore32/mm/mm.h
@@ -0,0 +1,39 @@
+/*
+ * linux/arch/unicore32/mm/mm.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/* the upper-most page table pointer */
+extern pmd_t *top_pmd;
+extern int sysctl_overcommit_memory;
+
+#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
+
+static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt)
+{
+ return pmd_offset((pud_t *)pgd, virt);
+}
+
+static inline pmd_t *pmd_off_k(unsigned long virt)
+{
+ return pmd_off(pgd_offset_k(virt), virt);
+}
+
+struct mem_type {
+ unsigned int prot_pte;
+ unsigned int prot_l1;
+ unsigned int prot_sect;
+};
+
+const struct mem_type *get_mem_type(unsigned int type);
+
+extern void __flush_dcache_page(struct address_space *, struct page *);
+
+void __init bootmem_init(void);
+void uc32_mm_memblock_reserve(void);
diff --git a/arch/unicore32/mm/mmu.c b/arch/unicore32/mm/mmu.c
new file mode 100644
index 000000000000..7bf3d588631f
--- /dev/null
+++ b/arch/unicore32/mm/mmu.c
@@ -0,0 +1,533 @@
+/*
+ * linux/arch/unicore32/mm/mmu.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/mman.h>
+#include <linux/nodemask.h>
+#include <linux/memblock.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <linux/io.h>
+
+#include <asm/cputype.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/sizes.h>
+#include <asm/tlb.h>
+
+#include <mach/map.h>
+
+#include "mm.h"
+
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+
+/*
+ * empty_zero_page is a special page that is used for
+ * zero-initialized data and COW.
+ */
+struct page *empty_zero_page;
+EXPORT_SYMBOL(empty_zero_page);
+
+/*
+ * The pmd table for the upper-most set of pages.
+ */
+pmd_t *top_pmd;
+
+pgprot_t pgprot_user;
+EXPORT_SYMBOL(pgprot_user);
+
+pgprot_t pgprot_kernel;
+EXPORT_SYMBOL(pgprot_kernel);
+
+static int __init noalign_setup(char *__unused)
+{
+ cr_alignment &= ~CR_A;
+ cr_no_alignment &= ~CR_A;
+ set_cr(cr_alignment);
+ return 1;
+}
+__setup("noalign", noalign_setup);
+
+void adjust_cr(unsigned long mask, unsigned long set)
+{
+ unsigned long flags;
+
+ mask &= ~CR_A;
+
+ set &= mask;
+
+ local_irq_save(flags);
+
+ cr_no_alignment = (cr_no_alignment & ~mask) | set;
+ cr_alignment = (cr_alignment & ~mask) | set;
+
+ set_cr((get_cr() & ~mask) | set);
+
+ local_irq_restore(flags);
+}
+
+struct map_desc {
+ unsigned long virtual;
+ unsigned long pfn;
+ unsigned long length;
+ unsigned int type;
+};
+
+#define PROT_PTE_DEVICE (PTE_PRESENT | PTE_YOUNG | \
+ PTE_DIRTY | PTE_READ | PTE_WRITE)
+#define PROT_SECT_DEVICE (PMD_TYPE_SECT | PMD_PRESENT | \
+ PMD_SECT_READ | PMD_SECT_WRITE)
+
+static struct mem_type mem_types[] = {
+ [MT_DEVICE] = { /* Strongly ordered */
+ .prot_pte = PROT_PTE_DEVICE,
+ .prot_l1 = PMD_TYPE_TABLE | PMD_PRESENT,
+ .prot_sect = PROT_SECT_DEVICE,
+ },
+ /*
+ * MT_KUSER: pte for vecpage -- cacheable,
+ * and sect for unigfx mmap -- noncacheable
+ */
+ [MT_KUSER] = {
+ .prot_pte = PTE_PRESENT | PTE_YOUNG | PTE_DIRTY |
+ PTE_CACHEABLE | PTE_READ | PTE_EXEC,
+ .prot_l1 = PMD_TYPE_TABLE | PMD_PRESENT,
+ .prot_sect = PROT_SECT_DEVICE,
+ },
+ [MT_HIGH_VECTORS] = {
+ .prot_pte = PTE_PRESENT | PTE_YOUNG | PTE_DIRTY |
+ PTE_CACHEABLE | PTE_READ | PTE_WRITE |
+ PTE_EXEC,
+ .prot_l1 = PMD_TYPE_TABLE | PMD_PRESENT,
+ },
+ [MT_MEMORY] = {
+ .prot_pte = PTE_PRESENT | PTE_YOUNG | PTE_DIRTY |
+ PTE_WRITE | PTE_EXEC,
+ .prot_l1 = PMD_TYPE_TABLE | PMD_PRESENT,
+ .prot_sect = PMD_TYPE_SECT | PMD_PRESENT | PMD_SECT_CACHEABLE |
+ PMD_SECT_READ | PMD_SECT_WRITE | PMD_SECT_EXEC,
+ },
+ [MT_ROM] = {
+ .prot_sect = PMD_TYPE_SECT | PMD_PRESENT | PMD_SECT_CACHEABLE |
+ PMD_SECT_READ,
+ },
+};
+
+const struct mem_type *get_mem_type(unsigned int type)
+{
+ return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
+}
+EXPORT_SYMBOL(get_mem_type);
+
+/*
+ * Adjust the PMD section entries according to the CPU in use.
+ */
+static void __init build_mem_type_table(void)
+{
+ pgprot_user = __pgprot(PTE_PRESENT | PTE_YOUNG | PTE_CACHEABLE);
+ pgprot_kernel = __pgprot(PTE_PRESENT | PTE_YOUNG |
+ PTE_DIRTY | PTE_READ | PTE_WRITE |
+ PTE_EXEC | PTE_CACHEABLE);
+}
+
+#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
+
+static void __init *early_alloc(unsigned long sz)
+{
+ void *ptr = __va(memblock_alloc(sz, sz));
+ memset(ptr, 0, sz);
+ return ptr;
+}
+
+static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr,
+ unsigned long prot)
+{
+ if (pmd_none(*pmd)) {
+ pte_t *pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t));
+ __pmd_populate(pmd, __pa(pte) | prot);
+ }
+ BUG_ON(pmd_bad(*pmd));
+ return pte_offset_kernel(pmd, addr);
+}
+
+static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
+ unsigned long end, unsigned long pfn,
+ const struct mem_type *type)
+{
+ pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
+ do {
+ set_pte(pte, pfn_pte(pfn, __pgprot(type->prot_pte)));
+ pfn++;
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+}
+
+static void __init alloc_init_section(pgd_t *pgd, unsigned long addr,
+ unsigned long end, unsigned long phys,
+ const struct mem_type *type)
+{
+ pmd_t *pmd = pmd_offset((pud_t *)pgd, addr);
+
+ /*
+ * Try a section mapping - end, addr and phys must all be aligned
+ * to a section boundary.
+ */
+ if (((addr | end | phys) & ~SECTION_MASK) == 0) {
+ pmd_t *p = pmd;
+
+ do {
+ set_pmd(pmd, __pmd(phys | type->prot_sect));
+ phys += SECTION_SIZE;
+ } while (pmd++, addr += SECTION_SIZE, addr != end);
+
+ flush_pmd_entry(p);
+ } else {
+ /*
+ * No need to loop; pte's aren't interested in the
+ * individual L1 entries.
+ */
+ alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
+ }
+}
+
+/*
+ * Create the page directory entries and any necessary
+ * page tables for the mapping specified by `md'. We
+ * are able to cope here with varying sizes and address
+ * offsets, and we take full advantage of sections.
+ */
+static void __init create_mapping(struct map_desc *md)
+{
+ unsigned long phys, addr, length, end;
+ const struct mem_type *type;
+ pgd_t *pgd;
+
+ if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
+ printk(KERN_WARNING "BUG: not creating mapping for "
+ "0x%08llx at 0x%08lx in user region\n",
+ __pfn_to_phys((u64)md->pfn), md->virtual);
+ return;
+ }
+
+ if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
+ md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
+ printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx "
+ "overlaps vmalloc space\n",
+ __pfn_to_phys((u64)md->pfn), md->virtual);
+ }
+
+ type = &mem_types[md->type];
+
+ addr = md->virtual & PAGE_MASK;
+ phys = (unsigned long)__pfn_to_phys(md->pfn);
+ length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
+
+ if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
+ printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not "
+ "be mapped using pages, ignoring.\n",
+ __pfn_to_phys(md->pfn), addr);
+ return;
+ }
+
+ pgd = pgd_offset_k(addr);
+ end = addr + length;
+ do {
+ unsigned long next = pgd_addr_end(addr, end);
+
+ alloc_init_section(pgd, addr, next, phys, type);
+
+ phys += next - addr;
+ addr = next;
+ } while (pgd++, addr != end);
+}
+
+static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M);
+
+/*
+ * vmalloc=size forces the vmalloc area to be exactly 'size'
+ * bytes. This can be used to increase (or decrease) the vmalloc
+ * area - the default is 128m.
+ */
+static int __init early_vmalloc(char *arg)
+{
+ unsigned long vmalloc_reserve = memparse(arg, NULL);
+
+ if (vmalloc_reserve < SZ_16M) {
+ vmalloc_reserve = SZ_16M;
+ printk(KERN_WARNING
+ "vmalloc area too small, limiting to %luMB\n",
+ vmalloc_reserve >> 20);
+ }
+
+ if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
+ vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
+ printk(KERN_WARNING
+ "vmalloc area is too big, limiting to %luMB\n",
+ vmalloc_reserve >> 20);
+ }
+
+ vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
+ return 0;
+}
+early_param("vmalloc", early_vmalloc);
+
+static phys_addr_t lowmem_limit __initdata = SZ_1G;
+
+static void __init sanity_check_meminfo(void)
+{
+ int i, j;
+
+ lowmem_limit = __pa(vmalloc_min - 1) + 1;
+ memblock_set_current_limit(lowmem_limit);
+
+ for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
+ struct membank *bank = &meminfo.bank[j];
+ *bank = meminfo.bank[i];
+ j++;
+ }
+ meminfo.nr_banks = j;
+}
+
+static inline void prepare_page_table(void)
+{
+ unsigned long addr;
+ phys_addr_t end;
+
+ /*
+ * Clear out all the mappings below the kernel image.
+ */
+ for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE)
+ pmd_clear(pmd_off_k(addr));
+
+ for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE)
+ pmd_clear(pmd_off_k(addr));
+
+ /*
+ * Find the end of the first block of lowmem.
+ */
+ end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
+ if (end >= lowmem_limit)
+ end = lowmem_limit;
+
+ /*
+ * Clear out all the kernel space mappings, except for the first
+ * memory bank, up to the end of the vmalloc region.
+ */
+ for (addr = __phys_to_virt(end);
+ addr < VMALLOC_END; addr += PGDIR_SIZE)
+ pmd_clear(pmd_off_k(addr));
+}
+
+/*
+ * Reserve the special regions of memory
+ */
+void __init uc32_mm_memblock_reserve(void)
+{
+ /*
+ * Reserve the page tables. These are already in use,
+ * and can only be in node 0.
+ */
+ memblock_reserve(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t));
+
+#ifdef CONFIG_PUV3_UNIGFX
+ /*
+ * These should likewise go elsewhere. They pre-reserve the
+ * screen/video memory region at the 48M~64M of main system memory.
+ */
+ memblock_reserve(PKUNITY_UNIGFX_MMAP_BASE, PKUNITY_UNIGFX_MMAP_SIZE);
+ memblock_reserve(PKUNITY_UVC_MMAP_BASE, PKUNITY_UVC_MMAP_SIZE);
+#endif
+}
+
+/*
+ * Set up device the mappings. Since we clear out the page tables for all
+ * mappings above VMALLOC_END, we will remove any debug device mappings.
+ * This means you have to be careful how you debug this function, or any
+ * called function. This means you can't use any function or debugging
+ * method which may touch any device, otherwise the kernel _will_ crash.
+ */
+static void __init devicemaps_init(void)
+{
+ struct map_desc map;
+ unsigned long addr;
+ void *vectors;
+
+ /*
+ * Allocate the vector page early.
+ */
+ vectors = early_alloc(PAGE_SIZE);
+
+ for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
+ pmd_clear(pmd_off_k(addr));
+
+ /*
+ * Create a mapping for UniGFX VRAM
+ */
+#ifdef CONFIG_PUV3_UNIGFX
+ map.pfn = __phys_to_pfn(PKUNITY_UNIGFX_MMAP_BASE);
+ map.virtual = KUSER_UNIGFX_BASE;
+ map.length = PKUNITY_UNIGFX_MMAP_SIZE;
+ map.type = MT_KUSER;
+ create_mapping(&map);
+#endif
+
+ /*
+ * Create a mapping for the machine vectors at the high-vectors
+ * location (0xffff0000). If we aren't using high-vectors, also
+ * create a mapping at the low-vectors virtual address.
+ */
+ map.pfn = __phys_to_pfn(virt_to_phys(vectors));
+ map.virtual = VECTORS_BASE;
+ map.length = PAGE_SIZE;
+ map.type = MT_HIGH_VECTORS;
+ create_mapping(&map);
+
+ /*
+ * Create a mapping for the kuser page at the special
+ * location (0xbfff0000) to the same vectors location.
+ */
+ map.pfn = __phys_to_pfn(virt_to_phys(vectors));
+ map.virtual = KUSER_VECPAGE_BASE;
+ map.length = PAGE_SIZE;
+ map.type = MT_KUSER;
+ create_mapping(&map);
+
+ /*
+ * Finally flush the caches and tlb to ensure that we're in a
+ * consistent state wrt the writebuffer. This also ensures that
+ * any write-allocated cache lines in the vector page are written
+ * back. After this point, we can start to touch devices again.
+ */
+ local_flush_tlb_all();
+ flush_cache_all();
+}
+
+static void __init map_lowmem(void)
+{
+ struct memblock_region *reg;
+
+ /* Map all the lowmem memory banks. */
+ for_each_memblock(memory, reg) {
+ phys_addr_t start = reg->base;
+ phys_addr_t end = start + reg->size;
+ struct map_desc map;
+
+ if (end > lowmem_limit)
+ end = lowmem_limit;
+ if (start >= end)
+ break;
+
+ map.pfn = __phys_to_pfn(start);
+ map.virtual = __phys_to_virt(start);
+ map.length = end - start;
+ map.type = MT_MEMORY;
+
+ create_mapping(&map);
+ }
+}
+
+/*
+ * paging_init() sets up the page tables, initialises the zone memory
+ * maps, and sets up the zero page, bad page and bad page tables.
+ */
+void __init paging_init(void)
+{
+ void *zero_page;
+
+ build_mem_type_table();
+ sanity_check_meminfo();
+ prepare_page_table();
+ map_lowmem();
+ devicemaps_init();
+
+ top_pmd = pmd_off_k(0xffff0000);
+
+ /* allocate the zero page. */
+ zero_page = early_alloc(PAGE_SIZE);
+
+ bootmem_init();
+
+ empty_zero_page = virt_to_page(zero_page);
+ __flush_dcache_page(NULL, empty_zero_page);
+}
+
+/*
+ * In order to soft-boot, we need to insert a 1:1 mapping in place of
+ * the user-mode pages. This will then ensure that we have predictable
+ * results when turning the mmu off
+ */
+void setup_mm_for_reboot(char mode)
+{
+ unsigned long base_pmdval;
+ pgd_t *pgd;
+ int i;
+
+ /*
+ * We need to access to user-mode page tables here. For kernel threads
+ * we don't have any user-mode mappings so we use the context that we
+ * "borrowed".
+ */
+ pgd = current->active_mm->pgd;
+
+ base_pmdval = PMD_SECT_WRITE | PMD_SECT_READ | PMD_TYPE_SECT;
+
+ for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) {
+ unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval;
+ pmd_t *pmd;
+
+ pmd = pmd_off(pgd, i << PGDIR_SHIFT);
+ set_pmd(pmd, __pmd(pmdval));
+ flush_pmd_entry(pmd);
+ }
+
+ local_flush_tlb_all();
+}
+
+/*
+ * Take care of architecture specific things when placing a new PTE into
+ * a page table, or changing an existing PTE. Basically, there are two
+ * things that we need to take care of:
+ *
+ * 1. If PG_dcache_clean is not set for the page, we need to ensure
+ * that any cache entries for the kernels virtual memory
+ * range are written back to the page.
+ * 2. If we have multiple shared mappings of the same space in
+ * an object, we need to deal with the cache aliasing issues.
+ *
+ * Note that the pte lock will be held.
+ */
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep)
+{
+ unsigned long pfn = pte_pfn(*ptep);
+ struct address_space *mapping;
+ struct page *page;
+
+ if (!pfn_valid(pfn))
+ return;
+
+ /*
+ * The zero page is never written to, so never has any dirty
+ * cache lines, and therefore never needs to be flushed.
+ */
+ page = pfn_to_page(pfn);
+ if (page == ZERO_PAGE(0))
+ return;
+
+ mapping = page_mapping(page);
+ if (!test_and_set_bit(PG_dcache_clean, &page->flags))
+ __flush_dcache_page(mapping, page);
+ if (mapping)
+ if (vma->vm_flags & VM_EXEC)
+ __flush_icache_all();
+}
diff --git a/arch/unicore32/mm/pgd.c b/arch/unicore32/mm/pgd.c
new file mode 100644
index 000000000000..08b8d4295e70
--- /dev/null
+++ b/arch/unicore32/mm/pgd.c
@@ -0,0 +1,102 @@
+/*
+ * linux/arch/unicore32/mm/pgd.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/mm.h>
+#include <linux/gfp.h>
+#include <linux/highmem.h>
+
+#include <asm/pgalloc.h>
+#include <asm/page.h>
+#include <asm/tlbflush.h>
+
+#include "mm.h"
+
+#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
+
+/*
+ * need to get a 4k page for level 1
+ */
+pgd_t *get_pgd_slow(struct mm_struct *mm)
+{
+ pgd_t *new_pgd, *init_pgd;
+ pmd_t *new_pmd, *init_pmd;
+ pte_t *new_pte, *init_pte;
+
+ new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 0);
+ if (!new_pgd)
+ goto no_pgd;
+
+ memset(new_pgd, 0, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
+
+ /*
+ * Copy over the kernel and IO PGD entries
+ */
+ init_pgd = pgd_offset_k(0);
+ memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
+ (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
+
+ clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
+
+ if (!vectors_high()) {
+ /*
+ * On UniCore, first page must always be allocated since it
+ * contains the machine vectors.
+ */
+ new_pmd = pmd_alloc(mm, (pud_t *)new_pgd, 0);
+ if (!new_pmd)
+ goto no_pmd;
+
+ new_pte = pte_alloc_map(mm, NULL, new_pmd, 0);
+ if (!new_pte)
+ goto no_pte;
+
+ init_pmd = pmd_offset((pud_t *)init_pgd, 0);
+ init_pte = pte_offset_map(init_pmd, 0);
+ set_pte(new_pte, *init_pte);
+ pte_unmap(init_pte);
+ pte_unmap(new_pte);
+ }
+
+ return new_pgd;
+
+no_pte:
+ pmd_free(mm, new_pmd);
+no_pmd:
+ free_pages((unsigned long)new_pgd, 0);
+no_pgd:
+ return NULL;
+}
+
+void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd)
+{
+ pmd_t *pmd;
+ pgtable_t pte;
+
+ if (!pgd)
+ return;
+
+ /* pgd is always present and good */
+ pmd = pmd_off(pgd, 0);
+ if (pmd_none(*pmd))
+ goto free;
+ if (pmd_bad(*pmd)) {
+ pmd_ERROR(*pmd);
+ pmd_clear(pmd);
+ goto free;
+ }
+
+ pte = pmd_pgtable(*pmd);
+ pmd_clear(pmd);
+ pte_free(mm, pte);
+ pmd_free(mm, pmd);
+free:
+ free_pages((unsigned long) pgd, 0);
+}
diff --git a/arch/unicore32/mm/proc-macros.S b/arch/unicore32/mm/proc-macros.S
new file mode 100644
index 000000000000..51560d68c894
--- /dev/null
+++ b/arch/unicore32/mm/proc-macros.S
@@ -0,0 +1,145 @@
+/*
+ * linux/arch/unicore32/mm/proc-macros.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * We need constants.h for:
+ * VMA_VM_MM
+ * VMA_VM_FLAGS
+ * VM_EXEC
+ */
+#include <generated/asm-offsets.h>
+#include <asm/thread_info.h>
+#include <asm/memory.h>
+
+/*
+ * the cache line sizes of the I and D cache are the same
+ */
+#define CACHE_LINESIZE 32
+
+/*
+ * This is the maximum size of an area which will be invalidated
+ * using the single invalidate entry instructions. Anything larger
+ * than this, and we go for the whole cache.
+ *
+ * This value should be chosen such that we choose the cheapest
+ * alternative.
+ */
+#ifdef CONFIG_CPU_UCV2
+#define MAX_AREA_SIZE 0x800 /* 64 cache line */
+#endif
+
+/*
+ * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
+ */
+ .macro vma_vm_mm, rd, rn
+ ldw \rd, [\rn+], #VMA_VM_MM
+ .endm
+
+/*
+ * vma_vm_flags - get vma->vm_flags
+ */
+ .macro vma_vm_flags, rd, rn
+ ldw \rd, [\rn+], #VMA_VM_FLAGS
+ .endm
+
+ .macro tsk_mm, rd, rn
+ ldw \rd, [\rn+], #TI_TASK
+ ldw \rd, [\rd+], #TSK_ACTIVE_MM
+ .endm
+
+/*
+ * act_mm - get current->active_mm
+ */
+ .macro act_mm, rd
+ andn \rd, sp, #8128
+ andn \rd, \rd, #63
+ ldw \rd, [\rd+], #TI_TASK
+ ldw \rd, [\rd+], #TSK_ACTIVE_MM
+ .endm
+
+/*
+ * mmid - get context id from mm pointer (mm->context.id)
+ */
+ .macro mmid, rd, rn
+ ldw \rd, [\rn+], #MM_CONTEXT_ID
+ .endm
+
+/*
+ * mask_asid - mask the ASID from the context ID
+ */
+ .macro asid, rd, rn
+ and \rd, \rn, #255
+ .endm
+
+ .macro crval, clear, mmuset, ucset
+ .word \clear
+ .word \mmuset
+ .endm
+
+#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
+/*
+ * va2pa va, pa, tbl, msk, off, err
+ * This macro is used to translate virtual address to its physical address.
+ *
+ * va: virtual address
+ * pa: physical address, result is stored in this register
+ * tbl, msk, off: temp registers, will be destroyed
+ * err: jump to error label if the physical address not exist
+ * NOTE: all regs must be different
+ */
+ .macro va2pa, va, pa, tbl, msk, off, err=990f
+ movc \pa, p0.c2, #0
+ mov \off, \va >> #22 @ off <- index of 1st page table
+ adr \tbl, 910f @ tbl <- table of 1st page table
+900: @ ---- handle 1, 2 page table
+ add \pa, \pa, #PAGE_OFFSET @ pa <- virt addr of page table
+ ldw \pa, [\pa+], \off << #2 @ pa <- the content of pt
+ cand.a \pa, #4 @ test exist bit
+ beq \err @ if not exist
+ and \off, \pa, #3 @ off <- the last 2 bits
+ add \tbl, \tbl, \off << #3 @ cmove table pointer
+ ldw \msk, [\tbl+], #0 @ get the mask
+ ldw pc, [\tbl+], #4
+930: @ ---- handle 2nd page table
+ and \pa, \pa, \msk @ pa <- phys addr of 2nd pt
+ mov \off, \va << #10
+ cntlo \tbl, \msk @ use tbl as temp reg
+ mov \off, \off >> \tbl
+ mov \off, \off >> #2 @ off <- index of 2nd pt
+ adr \tbl, 920f @ tbl <- table of 2nd pt
+ b 900b
+910: @ 1st level page table
+ .word 0xfffff000, 930b @ second level page table
+ .word 0xfffffc00, 930b @ second level large page table
+ .word 0x00000000, \err @ invalid
+ .word 0xffc00000, 980f @ super page
+
+920: @ 2nd level page table
+ .word 0xfffff000, 980f @ page
+ .word 0xffffc000, 980f @ middle page
+ .word 0xffff0000, 980f @ large page
+ .word 0x00000000, \err @ invalid
+980:
+ andn \tbl, \va, \msk
+ and \pa, \pa, \msk
+ or \pa, \pa, \tbl
+990:
+ .endm
+#endif
+
+ .macro dcacheline_flush, addr, t1, t2
+ mov \t1, \addr << #20
+ ldw \t2, =_stext @ _stext must ALIGN(4096)
+ add \t2, \t2, \t1 >> #20
+ ldw \t1, [\t2+], #0x0000
+ ldw \t1, [\t2+], #0x1000
+ ldw \t1, [\t2+], #0x2000
+ ldw \t1, [\t2+], #0x3000
+ .endm
diff --git a/arch/unicore32/mm/proc-syms.c b/arch/unicore32/mm/proc-syms.c
new file mode 100644
index 000000000000..f30071e3665d
--- /dev/null
+++ b/arch/unicore32/mm/proc-syms.c
@@ -0,0 +1,23 @@
+/*
+ * linux/arch/unicore32/mm/proc-syms.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/page.h>
+
+EXPORT_SYMBOL(cpu_dcache_clean_area);
+EXPORT_SYMBOL(cpu_set_pte);
+
+EXPORT_SYMBOL(__cpuc_dma_flush_range);
+EXPORT_SYMBOL(__cpuc_dma_clean_range);
diff --git a/arch/unicore32/mm/proc-ucv2.S b/arch/unicore32/mm/proc-ucv2.S
new file mode 100644
index 000000000000..9d296092e362
--- /dev/null
+++ b/arch/unicore32/mm/proc-ucv2.S
@@ -0,0 +1,134 @@
+/*
+ * linux/arch/unicore32/mm/proc-ucv2.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/hwcap.h>
+#include <asm/pgtable-hwdef.h>
+#include <asm/pgtable.h>
+
+#include "proc-macros.S"
+
+ENTRY(cpu_proc_fin)
+ stm.w (lr), [sp-]
+ mov ip, #PSR_R_BIT | PSR_I_BIT | PRIV_MODE
+ mov.a asr, ip
+ b.l __cpuc_flush_kern_all
+ ldm.w (pc), [sp]+
+
+/*
+ * cpu_reset(loc)
+ *
+ * Perform a soft reset of the system. Put the CPU into the
+ * same state as it would be if it had been reset, and branch
+ * to what would be the reset vector.
+ *
+ * - loc - location to jump to for soft reset
+ */
+ .align 5
+ENTRY(cpu_reset)
+ mov ip, #0
+ movc p0.c5, ip, #28 @ Cache invalidate all
+ nop8
+
+ movc p0.c6, ip, #6 @ TLB invalidate all
+ nop8
+
+ movc ip, p0.c1, #0 @ ctrl register
+ or ip, ip, #0x2000 @ vector base address
+ andn ip, ip, #0x000f @ ............idam
+ movc p0.c1, ip, #0 @ disable caches and mmu
+ nop
+ mov pc, r0 @ jump to loc
+ nop8
+
+/*
+ * cpu_do_idle()
+ *
+ * Idle the processor (eg, wait for interrupt).
+ *
+ * IRQs are already disabled.
+ */
+ENTRY(cpu_do_idle)
+ mov r0, #0 @ PCI address
+ .rept 8
+ ldw r1, [r0]
+ .endr
+ mov pc, lr
+
+ENTRY(cpu_dcache_clean_area)
+#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
+ csub.a r1, #MAX_AREA_SIZE
+ bsg 101f
+ mov r9, #PAGE_SZ
+ sub r9, r9, #1 @ PAGE_MASK
+1: va2pa r0, r10, r11, r12, r13 @ r10 is PA
+ b 3f
+2: cand.a r0, r9
+ beq 1b
+3: movc p0.c5, r10, #11 @ clean D entry
+ nop8
+ add r0, r0, #CACHE_LINESIZE
+ add r10, r10, #CACHE_LINESIZE
+ sub.a r1, r1, #CACHE_LINESIZE
+ bua 2b
+ mov pc, lr
+#endif
+101: mov ip, #0
+ movc p0.c5, ip, #10 @ Dcache clean all
+ nop8
+
+ mov pc, lr
+
+/*
+ * cpu_do_switch_mm(pgd_phys)
+ *
+ * Set the translation table base pointer to be pgd_phys
+ *
+ * - pgd_phys - physical address of new pgd
+ *
+ * It is assumed that:
+ * - we are not using split page tables
+ */
+ .align 5
+ENTRY(cpu_do_switch_mm)
+ movc p0.c2, r0, #0 @ update page table ptr
+ nop8
+
+ movc p0.c6, ip, #6 @ TLB invalidate all
+ nop8
+
+ mov pc, lr
+
+/*
+ * cpu_set_pte(ptep, pte)
+ *
+ * Set a level 2 translation table entry.
+ *
+ * - ptep - pointer to level 2 translation table entry
+ * - pte - PTE value to store
+ */
+ .align 5
+ENTRY(cpu_set_pte)
+ stw r1, [r0]
+#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
+ sub r2, r0, #PAGE_OFFSET
+ movc p0.c5, r2, #11 @ Dcache clean line
+ nop8
+#else
+ mov ip, #0
+ movc p0.c5, ip, #10 @ Dcache clean all
+ nop8
+ @dcacheline_flush r0, r2, ip
+#endif
+ mov pc, lr
+
diff --git a/arch/unicore32/mm/tlb-ucv2.S b/arch/unicore32/mm/tlb-ucv2.S
new file mode 100644
index 000000000000..061d455f9a15
--- /dev/null
+++ b/arch/unicore32/mm/tlb-ucv2.S
@@ -0,0 +1,89 @@
+/*
+ * linux/arch/unicore32/mm/tlb-ucv2.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/page.h>
+#include <asm/tlbflush.h>
+#include "proc-macros.S"
+
+/*
+ * __cpu_flush_user_tlb_range(start, end, vma)
+ *
+ * Invalidate a range of TLB entries in the specified address space.
+ *
+ * - start - start address (may not be aligned)
+ * - end - end address (exclusive, may not be aligned)
+ * - vma - vma_struct describing address range
+ */
+ENTRY(__cpu_flush_user_tlb_range)
+#ifndef CONFIG_CPU_TLB_SINGLE_ENTRY_DISABLE
+ mov r0, r0 >> #PAGE_SHIFT @ align address
+ mov r0, r0 << #PAGE_SHIFT
+ vma_vm_flags r2, r2 @ get vma->vm_flags
+1:
+ movc p0.c6, r0, #3
+ nop8
+
+ cand.a r2, #VM_EXEC @ Executable area ?
+ beq 2f
+
+ movc p0.c6, r0, #5
+ nop8
+2:
+ add r0, r0, #PAGE_SZ
+ csub.a r0, r1
+ beb 1b
+#else
+ movc p0.c6, r0, #2
+ nop8
+
+ cand.a r2, #VM_EXEC @ Executable area ?
+ beq 2f
+
+ movc p0.c6, r0, #4
+ nop8
+2:
+#endif
+ mov pc, lr
+
+/*
+ * __cpu_flush_kern_tlb_range(start,end)
+ *
+ * Invalidate a range of kernel TLB entries
+ *
+ * - start - start address (may not be aligned)
+ * - end - end address (exclusive, may not be aligned)
+ */
+ENTRY(__cpu_flush_kern_tlb_range)
+#ifndef CONFIG_CPU_TLB_SINGLE_ENTRY_DISABLE
+ mov r0, r0 >> #PAGE_SHIFT @ align address
+ mov r0, r0 << #PAGE_SHIFT
+1:
+ movc p0.c6, r0, #3
+ nop8
+
+ movc p0.c6, r0, #5
+ nop8
+
+ add r0, r0, #PAGE_SZ
+ csub.a r0, r1
+ beb 1b
+#else
+ movc p0.c6, r0, #2
+ nop8
+
+ movc p0.c6, r0, #4
+ nop8
+#endif
+ mov pc, lr
+
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index d5ed94d30aad..0215fcb2684f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -67,6 +67,7 @@ config X86
select GENERIC_IRQ_PROBE
select GENERIC_PENDING_IRQ if SMP
select USE_GENERIC_SMP_HELPERS if SMP
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG
config INSTRUCTION_DECODER
def_bool (KPROBES || PERF_EVENTS)
@@ -217,10 +218,6 @@ config X86_HT
def_bool y
depends on SMP
-config X86_TRAMPOLINE
- def_bool y
- depends on SMP || (64BIT && ACPI_SLEEP)
-
config X86_32_LAZY_GS
def_bool y
depends on X86_32 && !CC_STACKPROTECTOR
@@ -382,6 +379,8 @@ config X86_INTEL_CE
depends on X86_32
depends on X86_EXTENDED_PLATFORM
select X86_REBOOTFIXUPS
+ select OF
+ select OF_EARLY_FLATTREE
---help---
Select for the Intel CE media processor (CE4100) SOC.
This option compiles in support for the CE4100 SOC for settop
@@ -811,7 +810,7 @@ config X86_LOCAL_APIC
config X86_IO_APIC
def_bool y
- depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC
+ depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC
config X86_VISWS_APIC
def_bool y
@@ -1705,7 +1704,7 @@ config HAVE_ARCH_EARLY_PFN_TO_NID
depends on NUMA
config USE_PERCPU_NUMA_NODE_ID
- def_bool X86_64
+ def_bool y
depends on NUMA
menu "Power management and ACPI options"
@@ -2066,9 +2065,10 @@ config SCx200HR_TIMER
config OLPC
bool "One Laptop Per Child support"
+ depends on !X86_PAE
select GPIOLIB
- select OLPC_OPENFIRMWARE
- depends on !X86_64 && !X86_PAE
+ select OF
+ select OF_PROMTREE if PROC_DEVICETREE
---help---
Add support for detecting the unique features of the OLPC
XO hardware.
@@ -2079,21 +2079,6 @@ config OLPC_XO1
---help---
Add support for non-essential features of the OLPC XO-1 laptop.
-config OLPC_OPENFIRMWARE
- bool "Support for OLPC's Open Firmware"
- depends on !X86_64 && !X86_PAE
- default n
- select OF
- help
- This option adds support for the implementation of Open Firmware
- that is used on the OLPC XO-1 Children's Machine.
- If unsure, say N here.
-
-config OLPC_OPENFIRMWARE_DT
- bool
- default y if OLPC_OPENFIRMWARE && PROC_DEVICETREE
- select OF_PROMTREE
-
endif # X86_32
config AMD_NB
@@ -2104,6 +2089,8 @@ source "drivers/pcmcia/Kconfig"
source "drivers/pci/hotplug/Kconfig"
+source "drivers/vbus/Kconfig"
+
endmenu
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index e1e60c7d5813..e0e6340c8dad 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -873,22 +873,18 @@ rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
- if (ret) {
- crypto_free_ablkcipher(ctr_tfm);
- return ret;
- }
+ if (ret)
+ goto out_free_ablkcipher;
+ ret = -ENOMEM;
req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
- if (!req) {
- crypto_free_ablkcipher(ctr_tfm);
- return -EINVAL;
- }
+ if (!req)
+ goto out_free_ablkcipher;
req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
- if (!req_data) {
- crypto_free_ablkcipher(ctr_tfm);
- return -ENOMEM;
- }
+ if (!req_data)
+ goto out_free_request;
+
memset(req_data->iv, 0, sizeof(req_data->iv));
/* Clear the data in the hash sub key container to zero.*/
@@ -913,8 +909,10 @@ rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
if (!ret)
ret = req_data->result.err;
}
- ablkcipher_request_free(req);
kfree(req_data);
+out_free_request:
+ ablkcipher_request_free(req);
+out_free_ablkcipher:
crypto_free_ablkcipher(ctr_tfm);
return ret;
}
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 518bb99c3394..0ed78961b60f 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -851,4 +851,5 @@ ia32_sys_call_table:
.quad sys_fanotify_init
.quad sys32_fanotify_mark
.quad sys_prlimit64 /* 340 */
+ .quad compat_sys_clock_adjtime
ia32_syscall_end:
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 211ca3f7fd16..12e0e7dd869c 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -29,6 +29,7 @@
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/mpspec.h>
+#include <asm/trampoline.h>
#define COMPILER_DEPENDENT_INT64 long long
#define COMPILER_DEPENDENT_UINT64 unsigned long long
@@ -88,6 +89,7 @@ extern int acpi_disabled;
extern int acpi_pci_disabled;
extern int acpi_skip_timer_override;
extern int acpi_use_timer_override;
+extern int acpi_fix_pin2_polarity;
extern u8 acpi_sci_flags;
extern int acpi_sci_override_gsi;
@@ -112,11 +114,11 @@ static inline void acpi_disable_pci(void)
acpi_noirq_set();
}
-/* routines for saving/restoring kernel state */
-extern int acpi_save_state_mem(void);
-extern void acpi_restore_state_mem(void);
+/* Low-level suspend routine. */
+extern int acpi_suspend_lowlevel(void);
-extern unsigned long acpi_wakeup_address;
+extern const unsigned char acpi_wakeup_code[];
+#define acpi_wakeup_address (__pa(TRAMPOLINE_SYM(acpi_wakeup_code)))
/* early initialization routine */
extern void acpi_reserve_wakeup_memory(void);
@@ -185,15 +187,7 @@ struct bootnode;
#ifdef CONFIG_ACPI_NUMA
extern int acpi_numa;
-extern void acpi_get_nodes(struct bootnode *physnodes, unsigned long start,
- unsigned long end);
-extern int acpi_scan_nodes(unsigned long start, unsigned long end);
-#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
-
-#ifdef CONFIG_NUMA_EMU
-extern void acpi_fake_nodes(const struct bootnode *fake_nodes,
- int num_nodes);
-#endif
+extern int x86_acpi_numa_init(void);
#endif /* CONFIG_ACPI_NUMA */
#define acpi_unlazy_tlb(x) leave_mm(x)
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index 64dc82ee19f0..e264ae5a1443 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -9,23 +9,20 @@ struct amd_nb_bus_dev_range {
u8 dev_limit;
};
-extern struct pci_device_id amd_nb_misc_ids[];
+extern const struct pci_device_id amd_nb_misc_ids[];
extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[];
struct bootnode;
extern int early_is_amd_nb(u32 value);
extern int amd_cache_northbridges(void);
extern void amd_flush_garts(void);
-extern int amd_numa_init(unsigned long start_pfn, unsigned long end_pfn);
-extern int amd_scan_nodes(void);
-
-#ifdef CONFIG_NUMA_EMU
-extern void amd_fake_nodes(const struct bootnode *nodes, int nr_nodes);
-extern void amd_get_nodes(struct bootnode *nodes);
-#endif
+extern int amd_numa_init(void);
+extern int amd_get_subcaches(int);
+extern int amd_set_subcaches(int, int);
struct amd_northbridge {
struct pci_dev *misc;
+ struct pci_dev *link;
};
struct amd_northbridge_info {
@@ -37,6 +34,7 @@ extern struct amd_northbridge_info amd_northbridges;
#define AMD_NB_GART 0x1
#define AMD_NB_L3_INDEX_DISABLE 0x2
+#define AMD_NB_L3_PARTITIONING 0x4
#ifdef CONFIG_AMD_NB
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 3c896946f4cc..dbd558cbd2b1 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -240,7 +240,7 @@ extern void setup_boot_APIC_clock(void);
extern void setup_secondary_APIC_clock(void);
extern int APIC_init_uniprocessor(void);
extern void enable_NMI_through_LVT0(void);
-extern int apic_force_enable(void);
+extern int apic_force_enable(unsigned long addr);
/*
* On 32bit this is mach-xxx local
@@ -307,8 +307,6 @@ struct apic {
void (*setup_apic_routing)(void);
int (*multi_timer_check)(int apic, int irq);
- int (*apicid_to_node)(int logical_apicid);
- int (*cpu_to_logical_apicid)(int cpu);
int (*cpu_present_to_apicid)(int mps_cpu);
void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap);
void (*setup_portio_remap)(void);
@@ -356,6 +354,23 @@ struct apic {
void (*icr_write)(u32 low, u32 high);
void (*wait_icr_idle)(void);
u32 (*safe_wait_icr_idle)(void);
+
+#ifdef CONFIG_X86_32
+ /*
+ * Called very early during boot from get_smp_config(). It should
+ * return the logical apicid. x86_[bios]_cpu_to_apicid is
+ * initialized before this function is called.
+ *
+ * If logical apicid can't be determined that early, the function
+ * may return BAD_APICID. Logical apicid will be configured after
+ * init_apic_ldr() while bringing up CPUs. Note that NUMA affinity
+ * won't be applied properly during early boot in this case.
+ */
+ int (*x86_32_early_logical_apicid)(int cpu);
+
+ /* determine CPU -> NUMA node mapping */
+ int (*x86_32_numa_cpu_node)(int cpu);
+#endif
};
/*
@@ -503,6 +518,11 @@ extern struct apic apic_noop;
extern struct apic apic_default;
+static inline int noop_x86_32_early_logical_apicid(int cpu)
+{
+ return BAD_APICID;
+}
+
/*
* Set up the logical destination ID.
*
@@ -522,7 +542,7 @@ static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
return cpuid_apic >> index_msb;
}
-extern int default_apicid_to_node(int logical_apicid);
+extern int default_x86_32_numa_cpu_node(int cpu);
#endif
@@ -558,12 +578,6 @@ static inline void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_ma
*retmap = *phys_map;
}
-/* Mapping from cpu number to logical apicid */
-static inline int default_cpu_to_logical_apicid(int cpu)
-{
- return 1 << cpu;
-}
-
static inline int __default_cpu_present_to_apicid(int mps_cpu)
{
if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
@@ -596,8 +610,4 @@ extern int default_check_phys_apicid_present(int phys_apicid);
#endif /* CONFIG_X86_LOCAL_APIC */
-#ifdef CONFIG_X86_32
-extern u8 cpu_2_logical_apicid[NR_CPUS];
-#endif
-
#endif /* _ASM_X86_APIC_H */
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h
index 47a30ff8e517..d87988bacf3e 100644
--- a/arch/x86/include/asm/apicdef.h
+++ b/arch/x86/include/asm/apicdef.h
@@ -426,4 +426,16 @@ struct local_apic {
#else
#define BAD_APICID 0xFFFFu
#endif
+
+enum ioapic_irq_destination_types {
+ dest_Fixed = 0,
+ dest_LowestPrio = 1,
+ dest_SMI = 2,
+ dest__reserved_1 = 3,
+ dest_NMI = 4,
+ dest_INIT = 5,
+ dest__reserved_2 = 6,
+ dest_ExtINT = 7
+};
+
#endif /* _ASM_X86_APICDEF_H */
diff --git a/arch/x86/include/asm/bootparam.h b/arch/x86/include/asm/bootparam.h
index c8bfe63a06de..e020d88ec02d 100644
--- a/arch/x86/include/asm/bootparam.h
+++ b/arch/x86/include/asm/bootparam.h
@@ -12,6 +12,7 @@
/* setup data types */
#define SETUP_NONE 0
#define SETUP_E820_EXT 1
+#define SETUP_DTB 2
/* extensible setup data list node */
struct setup_data {
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 220e2ea08e80..91f3e087cf21 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -160,6 +160,7 @@
#define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */
#define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */
#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */
+#define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */
/*
* Auxiliary flags: Linux defined - For features scattered in various
@@ -279,6 +280,7 @@ extern const char * const x86_power_flags[32];
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
+#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
# define cpu_has_invlpg 1
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index e99d55d74df5..908b96957d88 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -96,7 +96,7 @@ extern void e820_setup_gap(void);
extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
unsigned long start_addr, unsigned long long end_addr);
struct setup_data;
-extern void parse_e820_ext(struct setup_data *data, unsigned long pa_data);
+extern void parse_e820_ext(struct setup_data *data);
#if defined(CONFIG_X86_64) || \
(defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION))
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h
index 57650ab4a5f5..1cd6d26a0a8d 100644
--- a/arch/x86/include/asm/entry_arch.h
+++ b/arch/x86/include/asm/entry_arch.h
@@ -16,10 +16,13 @@ BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR)
-.irpc idx, "01234567"
+.irp idx,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \
+ 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
+.if NUM_INVALIDATE_TLB_VECTORS > \idx
BUILD_INTERRUPT3(invalidate_interrupt\idx,
(INVALIDATE_TLB_VECTOR_START)+\idx,
smp_invalidate_interrupt)
+.endif
.endr
#endif
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index 0274ec5a7e62..bb9efe8706e2 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -45,6 +45,30 @@ extern void invalidate_interrupt4(void);
extern void invalidate_interrupt5(void);
extern void invalidate_interrupt6(void);
extern void invalidate_interrupt7(void);
+extern void invalidate_interrupt8(void);
+extern void invalidate_interrupt9(void);
+extern void invalidate_interrupt10(void);
+extern void invalidate_interrupt11(void);
+extern void invalidate_interrupt12(void);
+extern void invalidate_interrupt13(void);
+extern void invalidate_interrupt14(void);
+extern void invalidate_interrupt15(void);
+extern void invalidate_interrupt16(void);
+extern void invalidate_interrupt17(void);
+extern void invalidate_interrupt18(void);
+extern void invalidate_interrupt19(void);
+extern void invalidate_interrupt20(void);
+extern void invalidate_interrupt21(void);
+extern void invalidate_interrupt22(void);
+extern void invalidate_interrupt23(void);
+extern void invalidate_interrupt24(void);
+extern void invalidate_interrupt25(void);
+extern void invalidate_interrupt26(void);
+extern void invalidate_interrupt27(void);
+extern void invalidate_interrupt28(void);
+extern void invalidate_interrupt29(void);
+extern void invalidate_interrupt30(void);
+extern void invalidate_interrupt31(void);
extern void irq_move_cleanup_interrupt(void);
extern void reboot_interrupt(void);
diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h
index 36fb1a6a5109..8dbe353e41e1 100644
--- a/arch/x86/include/asm/init.h
+++ b/arch/x86/include/asm/init.h
@@ -11,8 +11,8 @@ kernel_physical_mapping_init(unsigned long start,
unsigned long page_size_mask);
-extern unsigned long __initdata e820_table_start;
-extern unsigned long __meminitdata e820_table_end;
-extern unsigned long __meminitdata e820_table_top;
+extern unsigned long __initdata pgt_buf_start;
+extern unsigned long __meminitdata pgt_buf_end;
+extern unsigned long __meminitdata pgt_buf_top;
#endif /* _ASM_X86_INIT_32_H */
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index f327d386d6cc..c4bd267dfc50 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -63,17 +63,6 @@ union IO_APIC_reg_03 {
} __attribute__ ((packed)) bits;
};
-enum ioapic_irq_destination_types {
- dest_Fixed = 0,
- dest_LowestPrio = 1,
- dest_SMI = 2,
- dest__reserved_1 = 3,
- dest_NMI = 4,
- dest_INIT = 5,
- dest__reserved_2 = 6,
- dest_ExtINT = 7
-};
-
struct IO_APIC_route_entry {
__u32 vector : 8,
delivery_mode : 3, /* 000: FIXED
@@ -106,6 +95,10 @@ struct IR_IO_APIC_route_entry {
index : 15;
} __attribute__ ((packed));
+#define IOAPIC_AUTO -1
+#define IOAPIC_EDGE 0
+#define IOAPIC_LEVEL 1
+
#ifdef CONFIG_X86_IO_APIC
/*
@@ -150,11 +143,6 @@ extern int timer_through_8259;
#define io_apic_assign_pci_irqs \
(mp_irq_entries && !skip_ioapic_setup && io_apic_irqs)
-extern u8 io_apic_unique_id(u8 id);
-extern int io_apic_get_unique_id(int ioapic, int apic_id);
-extern int io_apic_get_version(int ioapic);
-extern int io_apic_get_redir_entries(int ioapic);
-
struct io_apic_irq_attr;
extern int io_apic_set_pci_routing(struct device *dev, int irq,
struct io_apic_irq_attr *irq_attr);
@@ -162,6 +150,8 @@ void setup_IO_APIC_irq_extra(u32 gsi);
extern void ioapic_and_gsi_init(void);
extern void ioapic_insert_resources(void);
+int io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr);
+
extern struct IO_APIC_route_entry **alloc_ioapic_entries(void);
extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries);
extern int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries);
@@ -186,6 +176,8 @@ extern void __init pre_init_apic_IRQ0(void);
extern void mp_save_irq(struct mpc_intsrc *m);
+extern void disable_ioapic_support(void);
+
#else /* !CONFIG_X86_IO_APIC */
#define io_apic_assign_pci_irqs 0
@@ -199,6 +191,26 @@ static inline int mp_find_ioapic(u32 gsi) { return 0; }
struct io_apic_irq_attr;
static inline int io_apic_set_pci_routing(struct device *dev, int irq,
struct io_apic_irq_attr *irq_attr) { return 0; }
+
+static inline struct IO_APIC_route_entry **alloc_ioapic_entries(void)
+{
+ return NULL;
+}
+
+static inline void free_ioapic_entries(struct IO_APIC_route_entry **ent) { }
+static inline int save_IO_APIC_setup(struct IO_APIC_route_entry **ent)
+{
+ return -ENOMEM;
+}
+
+static inline void mask_IO_APIC_setup(struct IO_APIC_route_entry **ent) { }
+static inline int restore_IO_APIC_setup(struct IO_APIC_route_entry **ent)
+{
+ return -ENOMEM;
+}
+
+static inline void mp_save_irq(struct mpc_intsrc *m) { };
+static inline void disable_ioapic_support(void) { }
#endif
#endif /* _ASM_X86_IO_APIC_H */
diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h
index 0b7228268a63..615fa9061b57 100644
--- a/arch/x86/include/asm/ipi.h
+++ b/arch/x86/include/asm/ipi.h
@@ -123,10 +123,6 @@ extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask,
int vector);
extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
int vector);
-extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
- int vector);
-extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
- int vector);
/* Avoid include hell */
#define NMI_VECTOR 0x02
@@ -150,6 +146,10 @@ static inline void __default_local_send_IPI_all(int vector)
}
#ifdef CONFIG_X86_32
+extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
+ int vector);
+extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
+ int vector);
extern void default_send_IPI_mask_logical(const struct cpumask *mask,
int vector);
extern void default_send_IPI_allbutself(int vector);
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index c704b38c57a2..ba870bb6dd8e 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -10,9 +10,6 @@
#include <asm/apicdef.h>
#include <asm/irq_vectors.h>
-/* Even though we don't support this, supply it to appease OF */
-static inline void irq_dispose_mapping(unsigned int virq) { }
-
static inline int irq_canonicalize(int irq)
{
return ((irq == 2) ? 9 : irq);
diff --git a/arch/x86/include/asm/irq_controller.h b/arch/x86/include/asm/irq_controller.h
new file mode 100644
index 000000000000..423bbbddf36d
--- /dev/null
+++ b/arch/x86/include/asm/irq_controller.h
@@ -0,0 +1,12 @@
+#ifndef __IRQ_CONTROLLER__
+#define __IRQ_CONTROLLER__
+
+struct irq_domain {
+ int (*xlate)(struct irq_domain *h, const u32 *intspec, u32 intsize,
+ u32 *out_hwirq, u32 *out_type);
+ void *priv;
+ struct device_node *controller;
+ struct list_head l;
+};
+
+#endif
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 6af0894dafb4..4980f48bbbb7 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -1,6 +1,7 @@
#ifndef _ASM_X86_IRQ_VECTORS_H
#define _ASM_X86_IRQ_VECTORS_H
+#include <linux/threads.h>
/*
* Linux IRQ vector layout.
*
@@ -16,8 +17,8 @@
* Vectors 0 ... 31 : system traps and exceptions - hardcoded events
* Vectors 32 ... 127 : device interrupts
* Vector 128 : legacy int80 syscall interface
- * Vectors 129 ... 237 : device interrupts
- * Vectors 238 ... 255 : special interrupts
+ * Vectors 129 ... INVALIDATE_TLB_VECTOR_START-1 : device interrupts
+ * Vectors INVALIDATE_TLB_VECTOR_START ... 255 : special interrupts
*
* 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table.
*
@@ -96,37 +97,43 @@
#define THRESHOLD_APIC_VECTOR 0xf9
#define REBOOT_VECTOR 0xf8
-/* f0-f7 used for spreading out TLB flushes: */
-#define INVALIDATE_TLB_VECTOR_END 0xf7
-#define INVALIDATE_TLB_VECTOR_START 0xf0
-#define NUM_INVALIDATE_TLB_VECTORS 8
-
-/*
- * Local APIC timer IRQ vector is on a different priority level,
- * to work around the 'lost local interrupt if more than 2 IRQ
- * sources per level' errata.
- */
-#define LOCAL_TIMER_VECTOR 0xef
-
/*
* Generic system vector for platform specific use
*/
-#define X86_PLATFORM_IPI_VECTOR 0xed
+#define X86_PLATFORM_IPI_VECTOR 0xf7
/*
* IRQ work vector:
*/
-#define IRQ_WORK_VECTOR 0xec
+#define IRQ_WORK_VECTOR 0xf6
-#define UV_BAU_MESSAGE 0xea
+#define UV_BAU_MESSAGE 0xf5
/*
* Self IPI vector for machine checks
*/
-#define MCE_SELF_VECTOR 0xeb
+#define MCE_SELF_VECTOR 0xf4
/* Xen vector callback to receive events in a HVM domain */
-#define XEN_HVM_EVTCHN_CALLBACK 0xe9
+#define XEN_HVM_EVTCHN_CALLBACK 0xf3
+
+/*
+ * Local APIC timer IRQ vector is on a different priority level,
+ * to work around the 'lost local interrupt if more than 2 IRQ
+ * sources per level' errata.
+ */
+#define LOCAL_TIMER_VECTOR 0xef
+
+/* up to 32 vectors used for spreading out TLB flushes: */
+#if NR_CPUS <= 32
+# define NUM_INVALIDATE_TLB_VECTORS NR_CPUS
+#else
+# define NUM_INVALIDATE_TLB_VECTORS 32
+#endif
+
+#define INVALIDATE_TLB_VECTOR_END 0xee
+#define INVALIDATE_TLB_VECTOR_START \
+ (INVALIDATE_TLB_VECTOR_END - NUM_INVALIDATE_TLB_VECTORS + 1)
#define NR_VECTORS 256
diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h
index ca242d35e873..518bbbb9ee59 100644
--- a/arch/x86/include/asm/kdebug.h
+++ b/arch/x86/include/asm/kdebug.h
@@ -13,7 +13,6 @@ enum die_val {
DIE_PANIC,
DIE_NMI,
DIE_DIE,
- DIE_NMIWATCHDOG,
DIE_KERNELDEBUG,
DIE_TRAP,
DIE_GPF,
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 8e37deb1eb38..50ebc327a368 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -239,6 +239,7 @@ struct x86_emulate_ctxt {
int interruptibility;
bool perm_ok; /* do not check permissions if true */
+ bool only_vendor_specific_insn;
bool have_exception;
struct x86_exception exception;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index ffd7f8d29187..37bd730ff852 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -85,7 +85,7 @@
#define ASYNC_PF_PER_VCPU 64
-extern spinlock_t kvm_lock;
+extern raw_spinlock_t kvm_lock;
extern struct list_head vm_list;
struct kvm_vcpu;
@@ -448,7 +448,7 @@ struct kvm_arch {
unsigned long irq_sources_bitmap;
s64 kvmclock_offset;
- spinlock_t tsc_write_lock;
+ raw_spinlock_t tsc_write_lock;
u64 last_tsc_nsec;
u64 last_tsc_offset;
u64 last_tsc_write;
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h
index 0c90dd9f0505..9c7d95f6174b 100644
--- a/arch/x86/include/asm/mpspec.h
+++ b/arch/x86/include/asm/mpspec.h
@@ -25,7 +25,6 @@ extern int pic_mode;
#define MAX_IRQ_SOURCES 256
extern unsigned int def_to_bigsmp;
-extern u8 apicid_2_node[];
#ifdef CONFIG_X86_NUMAQ
extern int mp_bus_id_to_node[MAX_MP_BUSSES];
@@ -33,8 +32,6 @@ extern int mp_bus_id_to_local[MAX_MP_BUSSES];
extern int quad_local_to_mp_bus_id [NR_CPUS/4][4];
#endif
-#define MAX_APICID 256
-
#else /* CONFIG_X86_64: */
#define MAX_MP_BUSSES 256
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 4d0dfa0d998e..d22611121ac7 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -36,8 +36,14 @@
#define MSR_IA32_PERFCTR1 0x000000c2
#define MSR_FSB_FREQ 0x000000cd
+#define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2
+#define NHM_C3_AUTO_DEMOTE (1UL << 25)
+#define NHM_C1_AUTO_DEMOTE (1UL << 26)
+#define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25)
+
#define MSR_MTRRcap 0x000000fe
#define MSR_IA32_BBL_CR_CTL 0x00000119
+#define MSR_IA32_BBL_CR_CTL3 0x0000011e
#define MSR_IA32_SYSENTER_CS 0x00000174
#define MSR_IA32_SYSENTER_ESP 0x00000175
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index c76f5b92b840..07f46016d3ff 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -7,7 +7,6 @@
#ifdef CONFIG_X86_LOCAL_APIC
-extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
extern int reserve_perfctr_nmi(unsigned int);
extern void release_perfctr_nmi(unsigned int);
diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h
index 27da400d3138..3d4dab43c994 100644
--- a/arch/x86/include/asm/numa.h
+++ b/arch/x86/include/asm/numa.h
@@ -1,5 +1,57 @@
+#ifndef _ASM_X86_NUMA_H
+#define _ASM_X86_NUMA_H
+
+#include <asm/topology.h>
+#include <asm/apicdef.h>
+
+#ifdef CONFIG_NUMA
+
+#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
+
+/*
+ * __apicid_to_node[] stores the raw mapping between physical apicid and
+ * node and is used to initialize cpu_to_node mapping.
+ *
+ * The mapping may be overridden by apic->numa_cpu_node() on 32bit and thus
+ * should be accessed by the accessors - set_apicid_to_node() and
+ * numa_cpu_node().
+ */
+extern s16 __apicid_to_node[MAX_LOCAL_APIC];
+
+static inline void set_apicid_to_node(int apicid, s16 node)
+{
+ __apicid_to_node[apicid] = node;
+}
+#else /* CONFIG_NUMA */
+static inline void set_apicid_to_node(int apicid, s16 node)
+{
+}
+#endif /* CONFIG_NUMA */
+
#ifdef CONFIG_X86_32
# include "numa_32.h"
#else
# include "numa_64.h"
#endif
+
+#ifdef CONFIG_NUMA
+extern void __cpuinit numa_set_node(int cpu, int node);
+extern void __cpuinit numa_clear_node(int cpu);
+extern void __init numa_init_array(void);
+extern void __init init_cpu_to_node(void);
+extern void __cpuinit numa_add_cpu(int cpu);
+extern void __cpuinit numa_remove_cpu(int cpu);
+#else /* CONFIG_NUMA */
+static inline void numa_set_node(int cpu, int node) { }
+static inline void numa_clear_node(int cpu) { }
+static inline void numa_init_array(void) { }
+static inline void init_cpu_to_node(void) { }
+static inline void numa_add_cpu(int cpu) { }
+static inline void numa_remove_cpu(int cpu) { }
+#endif /* CONFIG_NUMA */
+
+#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable);
+#endif
+
+#endif /* _ASM_X86_NUMA_H */
diff --git a/arch/x86/include/asm/numa_32.h b/arch/x86/include/asm/numa_32.h
index b0ef2b449a9d..c6beed1ef103 100644
--- a/arch/x86/include/asm/numa_32.h
+++ b/arch/x86/include/asm/numa_32.h
@@ -4,7 +4,12 @@
extern int numa_off;
extern int pxm_to_nid(int pxm);
-extern void numa_remove_cpu(int cpu);
+
+#ifdef CONFIG_NUMA
+extern int __cpuinit numa_cpu_node(int cpu);
+#else /* CONFIG_NUMA */
+static inline int numa_cpu_node(int cpu) { return NUMA_NO_NODE; }
+#endif /* CONFIG_NUMA */
#ifdef CONFIG_HIGHMEM
extern void set_highmem_pages_init(void);
diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h
index 0493be39607c..344eb1790b46 100644
--- a/arch/x86/include/asm/numa_64.h
+++ b/arch/x86/include/asm/numa_64.h
@@ -2,23 +2,16 @@
#define _ASM_X86_NUMA_64_H
#include <linux/nodemask.h>
-#include <asm/apicdef.h>
struct bootnode {
u64 start;
u64 end;
};
-extern int compute_hash_shift(struct bootnode *nodes, int numblks,
- int *nodeids);
-
#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT))
-extern void numa_init_array(void);
extern int numa_off;
-extern s16 apicid_to_node[MAX_LOCAL_APIC];
-
extern unsigned long numa_free_all_bootmem(void);
extern void setup_node_bootmem(int nodeid, unsigned long start,
unsigned long end);
@@ -31,11 +24,11 @@ extern void setup_node_bootmem(int nodeid, unsigned long start,
*/
#define NODE_MIN_SIZE (4*1024*1024)
-extern void __init init_cpu_to_node(void);
-extern void __cpuinit numa_set_node(int cpu, int node);
-extern void __cpuinit numa_clear_node(int cpu);
-extern void __cpuinit numa_add_cpu(int cpu);
-extern void __cpuinit numa_remove_cpu(int cpu);
+extern nodemask_t numa_nodes_parsed __initdata;
+
+extern int __cpuinit numa_cpu_node(int cpu);
+extern int __init numa_add_memblk(int nodeid, u64 start, u64 end);
+extern void __init numa_set_distance(int from, int to, int distance);
#ifdef CONFIG_NUMA_EMU
#define FAKE_NODE_MIN_SIZE ((u64)32 << 20)
@@ -43,11 +36,7 @@ extern void __cpuinit numa_remove_cpu(int cpu);
void numa_emu_cmdline(char *);
#endif /* CONFIG_NUMA_EMU */
#else
-static inline void init_cpu_to_node(void) { }
-static inline void numa_set_node(int cpu, int node) { }
-static inline void numa_clear_node(int cpu) { }
-static inline void numa_add_cpu(int cpu, int node) { }
-static inline void numa_remove_cpu(int cpu) { }
+static inline int numa_cpu_node(int cpu) { return NUMA_NO_NODE; }
#endif
#endif /* _ASM_X86_NUMA_64_H */
diff --git a/arch/x86/include/asm/olpc_ofw.h b/arch/x86/include/asm/olpc_ofw.h
index 641988efe063..c5d3a5abbb9f 100644
--- a/arch/x86/include/asm/olpc_ofw.h
+++ b/arch/x86/include/asm/olpc_ofw.h
@@ -6,7 +6,7 @@
#define OLPC_OFW_SIG 0x2057464F /* aka "OFW " */
-#ifdef CONFIG_OLPC_OPENFIRMWARE
+#ifdef CONFIG_OLPC
extern bool olpc_ofw_is_installed(void);
@@ -26,19 +26,15 @@ extern void setup_olpc_ofw_pgd(void);
/* check if OFW was detected during boot */
extern bool olpc_ofw_present(void);
-#else /* !CONFIG_OLPC_OPENFIRMWARE */
-
-static inline bool olpc_ofw_is_installed(void) { return false; }
+#else /* !CONFIG_OLPC */
static inline void olpc_ofw_detect(void) { }
static inline void setup_olpc_ofw_pgd(void) { }
-static inline bool olpc_ofw_present(void) { return false; }
-
-#endif /* !CONFIG_OLPC_OPENFIRMWARE */
+#endif /* !CONFIG_OLPC */
-#ifdef CONFIG_OLPC_OPENFIRMWARE_DT
+#ifdef CONFIG_OF_PROMTREE
extern void olpc_dt_build_devicetree(void);
#else
static inline void olpc_dt_build_devicetree(void) { }
-#endif /* CONFIG_OLPC_OPENFIRMWARE_DT */
+#endif
#endif /* _ASM_X86_OLPC_OFW_H */
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index 1df66211fd1b..97e6007e4edd 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -2,6 +2,7 @@
#define _ASM_X86_PAGE_DEFS_H
#include <linux/const.h>
+#include <linux/types.h>
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
@@ -45,11 +46,17 @@ extern int devmem_is_allowed(unsigned long pagenr);
extern unsigned long max_low_pfn_mapped;
extern unsigned long max_pfn_mapped;
+static inline phys_addr_t get_max_mapped(void)
+{
+ return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT;
+}
+
extern unsigned long init_memory_mapping(unsigned long start,
unsigned long end);
-extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn,
- int acpi, int k8);
+void init_memory_mapping_high(void);
+
+extern void initmem_init(void);
extern void free_initmem(void);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 7e172955ee57..a09e1f052d84 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -451,6 +451,26 @@ do { \
#define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
#endif /* !CONFIG_M386 */
+#ifdef CONFIG_X86_CMPXCHG64
+#define percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) \
+({ \
+ char __ret; \
+ typeof(o1) __o1 = o1; \
+ typeof(o1) __n1 = n1; \
+ typeof(o2) __o2 = o2; \
+ typeof(o2) __n2 = n2; \
+ typeof(o2) __dummy = n2; \
+ asm volatile("cmpxchg8b "__percpu_arg(1)"\n\tsetz %0\n\t" \
+ : "=a"(__ret), "=m" (pcp1), "=d"(__dummy) \
+ : "b"(__n1), "c"(__n2), "a"(__o1), "d"(__o2)); \
+ __ret; \
+})
+
+#define __this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
+#define this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
+#define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
+#endif /* CONFIG_X86_CMPXCHG64 */
+
/*
* Per cpu atomic 64 bit operations are only available under 64 bit.
* 32 bit must fall back to generic operations.
@@ -480,6 +500,34 @@ do { \
#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
#define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
+
+/*
+ * Pretty complex macro to generate cmpxchg16 instruction. The instruction
+ * is not supported on early AMD64 processors so we must be able to emulate
+ * it in software. The address used in the cmpxchg16 instruction must be
+ * aligned to a 16 byte boundary.
+ */
+#define percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) \
+({ \
+ char __ret; \
+ typeof(o1) __o1 = o1; \
+ typeof(o1) __n1 = n1; \
+ typeof(o2) __o2 = o2; \
+ typeof(o2) __n2 = n2; \
+ typeof(o2) __dummy; \
+ alternative_io("call this_cpu_cmpxchg16b_emu\n\t" P6_NOP4, \
+ "cmpxchg16b %%gs:(%%rsi)\n\tsetz %0\n\t", \
+ X86_FEATURE_CX16, \
+ ASM_OUTPUT2("=a"(__ret), "=d"(__dummy)), \
+ "S" (&pcp1), "b"(__n1), "c"(__n2), \
+ "a"(__o1), "d"(__o2)); \
+ __ret; \
+})
+
+#define __this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
+#define this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
+#define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
+
#endif
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h
index e2f6a99f14ab..cc29086e30cd 100644
--- a/arch/x86/include/asm/perf_event_p4.h
+++ b/arch/x86/include/asm/perf_event_p4.h
@@ -22,6 +22,7 @@
#define ARCH_P4_CNTRVAL_BITS (40)
#define ARCH_P4_CNTRVAL_MASK ((1ULL << ARCH_P4_CNTRVAL_BITS) - 1)
+#define ARCH_P4_UNFLAGGED_BIT ((1ULL) << (ARCH_P4_CNTRVAL_BITS - 1))
#define P4_ESCR_EVENT_MASK 0x7e000000U
#define P4_ESCR_EVENT_SHIFT 25
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 45636cefa186..4c25ab48257b 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -94,10 +94,6 @@ struct cpuinfo_x86 {
int x86_cache_alignment; /* In bytes */
int x86_power;
unsigned long loops_per_jiffy;
-#ifdef CONFIG_SMP
- /* cpus sharing the last level cache: */
- cpumask_var_t llc_shared_map;
-#endif
/* cpuid returned max cores value: */
u16 x86_max_cores;
u16 apicid;
diff --git a/arch/x86/include/asm/prom.h b/arch/x86/include/asm/prom.h
index b4ec95f07518..971e0b46446e 100644
--- a/arch/x86/include/asm/prom.h
+++ b/arch/x86/include/asm/prom.h
@@ -1 +1,69 @@
-/* dummy prom.h; here to make linux/of.h's #includes happy */
+/*
+ * Definitions for Device tree / OpenFirmware handling on X86
+ *
+ * based on arch/powerpc/include/asm/prom.h which is
+ * Copyright (C) 1996-2005 Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_X86_PROM_H
+#define _ASM_X86_PROM_H
+#ifndef __ASSEMBLY__
+
+#include <linux/of.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+
+#include <asm/irq.h>
+#include <asm/atomic.h>
+#include <asm/setup.h>
+#include <asm/irq_controller.h>
+
+#ifdef CONFIG_OF
+extern int of_ioapic;
+extern u64 initial_dtb;
+extern void add_dtb(u64 data);
+extern void x86_add_irq_domains(void);
+void __cpuinit x86_of_pci_init(void);
+void x86_dtb_init(void);
+
+static inline struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
+{
+ return pdev ? pdev->dev.of_node : NULL;
+}
+
+static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
+{
+ return pci_device_to_OF_node(bus->self);
+}
+
+#else
+static inline void add_dtb(u64 data) { }
+static inline void x86_add_irq_domains(void) { }
+static inline void x86_of_pci_init(void) { }
+static inline void x86_dtb_init(void) { }
+#define of_ioapic 0
+#endif
+
+extern char cmd_line[COMMAND_LINE_SIZE];
+
+#define pci_address_to_pio pci_address_to_pio
+unsigned long pci_address_to_pio(phys_addr_t addr);
+
+/**
+ * irq_dispose_mapping - Unmap an interrupt
+ * @virq: linux virq number of the interrupt to unmap
+ *
+ * FIXME: We really should implement proper virq handling like power,
+ * but that's going to be major surgery.
+ */
+static inline void irq_dispose_mapping(unsigned int virq) { }
+
+#define HAVE_ARCH_DEVTREE_FIXUPS
+
+#endif /* __ASSEMBLY__ */
+#endif
diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
index 562d4fd31ba8..3250e3d605d9 100644
--- a/arch/x86/include/asm/reboot.h
+++ b/arch/x86/include/asm/reboot.h
@@ -18,7 +18,10 @@ extern struct machine_ops machine_ops;
void native_machine_crash_shutdown(struct pt_regs *regs);
void native_machine_shutdown(void);
-void machine_real_restart(const unsigned char *code, int length);
+void machine_real_restart(unsigned int type);
+/* These must match dispatch_table in reboot_32.S */
+#define MRR_BIOS 0
+#define MRR_APM 1
typedef void (*nmi_shootdown_cb)(int, struct die_args*);
void nmi_shootdown_cpus(nmi_shootdown_cb callback);
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index d1e41b0f9b60..df4cd32b4cc6 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -37,26 +37,9 @@
#endif
#ifdef __KERNEL__
-
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/lockdep.h>
#include <asm/asm.h>
-struct rwsem_waiter;
-
-extern asmregparm struct rw_semaphore *
- rwsem_down_read_failed(struct rw_semaphore *sem);
-extern asmregparm struct rw_semaphore *
- rwsem_down_write_failed(struct rw_semaphore *sem);
-extern asmregparm struct rw_semaphore *
- rwsem_wake(struct rw_semaphore *);
-extern asmregparm struct rw_semaphore *
- rwsem_downgrade_wake(struct rw_semaphore *sem);
-
/*
- * the semaphore definition
- *
* The bias values and the counter type limits the number of
* potential readers/writers to 32767 for 32 bits and 2147483647
* for 64 bits.
@@ -74,43 +57,6 @@ extern asmregparm struct rw_semaphore *
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-typedef signed long rwsem_count_t;
-
-struct rw_semaphore {
- rwsem_count_t count;
- spinlock_t wait_lock;
- struct list_head wait_list;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-};
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
-#else
-# define __RWSEM_DEP_MAP_INIT(lockname)
-#endif
-
-
-#define __RWSEM_INITIALIZER(name) \
-{ \
- RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
- LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) \
-}
-
-#define DECLARE_RWSEM(name) \
- struct rw_semaphore name = __RWSEM_INITIALIZER(name)
-
-extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
- struct lock_class_key *key);
-
-#define init_rwsem(sem) \
-do { \
- static struct lock_class_key __key; \
- \
- __init_rwsem((sem), #sem, &__key); \
-} while (0)
-
/*
* lock for reading
*/
@@ -133,7 +79,7 @@ static inline void __down_read(struct rw_semaphore *sem)
*/
static inline int __down_read_trylock(struct rw_semaphore *sem)
{
- rwsem_count_t result, tmp;
+ long result, tmp;
asm volatile("# beginning __down_read_trylock\n\t"
" mov %0,%1\n\t"
"1:\n\t"
@@ -155,7 +101,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
*/
static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
{
- rwsem_count_t tmp;
+ long tmp;
asm volatile("# beginning down_write\n\t"
LOCK_PREFIX " xadd %1,(%2)\n\t"
/* adds 0xffff0001, returns the old value */
@@ -180,9 +126,8 @@ static inline void __down_write(struct rw_semaphore *sem)
*/
static inline int __down_write_trylock(struct rw_semaphore *sem)
{
- rwsem_count_t ret = cmpxchg(&sem->count,
- RWSEM_UNLOCKED_VALUE,
- RWSEM_ACTIVE_WRITE_BIAS);
+ long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
+ RWSEM_ACTIVE_WRITE_BIAS);
if (ret == RWSEM_UNLOCKED_VALUE)
return 1;
return 0;
@@ -193,7 +138,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
*/
static inline void __up_read(struct rw_semaphore *sem)
{
- rwsem_count_t tmp;
+ long tmp;
asm volatile("# beginning __up_read\n\t"
LOCK_PREFIX " xadd %1,(%2)\n\t"
/* subtracts 1, returns the old value */
@@ -211,7 +156,7 @@ static inline void __up_read(struct rw_semaphore *sem)
*/
static inline void __up_write(struct rw_semaphore *sem)
{
- rwsem_count_t tmp;
+ long tmp;
asm volatile("# beginning __up_write\n\t"
LOCK_PREFIX " xadd %1,(%2)\n\t"
/* subtracts 0xffff0001, returns the old value */
@@ -247,8 +192,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
/*
* implement atomic add functionality
*/
-static inline void rwsem_atomic_add(rwsem_count_t delta,
- struct rw_semaphore *sem)
+static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
{
asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
: "+m" (sem->count)
@@ -258,10 +202,9 @@ static inline void rwsem_atomic_add(rwsem_count_t delta,
/*
* implement exchange and add functionality
*/
-static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
- struct rw_semaphore *sem)
+static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
{
- rwsem_count_t tmp = delta;
+ long tmp = delta;
asm volatile(LOCK_PREFIX "xadd %0,%1"
: "+r" (tmp), "+m" (sem->count)
@@ -270,10 +213,5 @@ static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
return tmp + delta;
}
-static inline int rwsem_is_locked(struct rw_semaphore *sem)
-{
- return (sem->count != 0);
-}
-
#endif /* __KERNEL__ */
#endif /* _ASM_X86_RWSEM_H */
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index 231f1c1d6607..cd84f7208f76 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -1,14 +1,16 @@
#ifndef _ASM_X86_SEGMENT_H
#define _ASM_X86_SEGMENT_H
+#include <linux/const.h>
+
/* Constructor for a conventional segment GDT (or LDT) entry */
/* This is a macro so it can be used in initializers */
#define GDT_ENTRY(flags, base, limit) \
- ((((base) & 0xff000000ULL) << (56-24)) | \
- (((flags) & 0x0000f0ffULL) << 40) | \
- (((limit) & 0x000f0000ULL) << (48-16)) | \
- (((base) & 0x00ffffffULL) << 16) | \
- (((limit) & 0x0000ffffULL)))
+ ((((base) & _AC(0xff000000,ULL)) << (56-24)) | \
+ (((flags) & _AC(0x0000f0ff,ULL)) << 40) | \
+ (((limit) & _AC(0x000f0000,ULL)) << (48-16)) | \
+ (((base) & _AC(0x00ffffff,ULL)) << 16) | \
+ (((limit) & _AC(0x0000ffff,ULL))))
/* Simple and small GDT entries for booting only */
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 1f4695136776..0747de6227dc 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -23,6 +23,8 @@ extern unsigned int num_processors;
DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
+/* cpus sharing the last level cache: */
+DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
DECLARE_PER_CPU(u16, cpu_llc_id);
DECLARE_PER_CPU(int, cpu_number);
@@ -36,8 +38,16 @@ static inline struct cpumask *cpu_core_mask(int cpu)
return per_cpu(cpu_core_map, cpu);
}
+static inline struct cpumask *cpu_llc_shared_mask(int cpu)
+{
+ return per_cpu(cpu_llc_shared_map, cpu);
+}
+
DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
+#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
+DECLARE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid);
+#endif
/* Static state in head.S used to set up a CPU */
extern unsigned long stack_start; /* Initial stack pointer address */
diff --git a/arch/x86/include/asm/smpboot_hooks.h b/arch/x86/include/asm/smpboot_hooks.h
index 6c22bf353f26..725b77831993 100644
--- a/arch/x86/include/asm/smpboot_hooks.h
+++ b/arch/x86/include/asm/smpboot_hooks.h
@@ -34,7 +34,7 @@ static inline void smpboot_restore_warm_reset_vector(void)
*/
CMOS_WRITE(0, 0xf);
- *((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0;
+ *((volatile u32 *)phys_to_virt(apic->trampoline_phys_low)) = 0;
}
static inline void __init smpboot_setup_io_apic(void)
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index 33ecc3ea8782..12569e691ce3 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -98,8 +98,6 @@ do { \
*/
#define HAVE_DISABLE_HLT
#else
-#define __SAVE(reg, offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
-#define __RESTORE(reg, offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
/* frame pointer must be last for get_wchan */
#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 21899cc31e52..910a7084f7f2 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -47,21 +47,6 @@
#include <asm/mpspec.h>
-#ifdef CONFIG_X86_32
-
-/* Mappings between logical cpu number and node number */
-extern int cpu_to_node_map[];
-
-/* Returns the number of the node containing CPU 'cpu' */
-static inline int __cpu_to_node(int cpu)
-{
- return cpu_to_node_map[cpu];
-}
-#define early_cpu_to_node __cpu_to_node
-#define cpu_to_node __cpu_to_node
-
-#else /* CONFIG_X86_64 */
-
/* Mappings between logical cpu number and node number */
DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
@@ -84,8 +69,6 @@ static inline int early_cpu_to_node(int cpu)
#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
-#endif /* CONFIG_X86_64 */
-
/* Mappings between node number and cpus on that node. */
extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
@@ -155,7 +138,7 @@ extern unsigned long node_remap_size[];
.balance_interval = 1, \
}
-#ifdef CONFIG_X86_64_ACPI_NUMA
+#ifdef CONFIG_X86_64
extern int __node_distance(int, int);
#define node_distance(a, b) __node_distance(a, b)
#endif
diff --git a/arch/x86/include/asm/trampoline.h b/arch/x86/include/asm/trampoline.h
index f4500fb3b485..feca3118a73b 100644
--- a/arch/x86/include/asm/trampoline.h
+++ b/arch/x86/include/asm/trampoline.h
@@ -3,25 +3,36 @@
#ifndef __ASSEMBLY__
-#ifdef CONFIG_X86_TRAMPOLINE
+#include <linux/types.h>
+#include <asm/io.h>
+
/*
- * Trampoline 80x86 program as an array.
+ * Trampoline 80x86 program as an array. These are in the init rodata
+ * segment, but that's okay, because we only care about the relative
+ * addresses of the symbols.
*/
-extern const unsigned char trampoline_data [];
-extern const unsigned char trampoline_end [];
-extern unsigned char *trampoline_base;
+extern const unsigned char x86_trampoline_start [];
+extern const unsigned char x86_trampoline_end [];
+extern unsigned char *x86_trampoline_base;
extern unsigned long init_rsp;
extern unsigned long initial_code;
extern unsigned long initial_gs;
-#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE)
+extern void __init setup_trampolines(void);
+
+extern const unsigned char trampoline_data[];
+extern const unsigned char trampoline_status[];
+
+#define TRAMPOLINE_SYM(x) \
+ ((void *)(x86_trampoline_base + \
+ ((const unsigned char *)(x) - x86_trampoline_start)))
-extern unsigned long setup_trampoline(void);
-extern void __init reserve_trampoline_memory(void);
-#else
-static inline void reserve_trampoline_memory(void) {}
-#endif /* CONFIG_X86_TRAMPOLINE */
+/* Address of the SMP trampoline */
+static inline unsigned long trampoline_address(void)
+{
+ return virt_to_phys(TRAMPOLINE_SYM(trampoline_data));
+}
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h
index b766a5e8ba0e..b6f73f1bc7ac 100644
--- a/arch/x86/include/asm/unistd_32.h
+++ b/arch/x86/include/asm/unistd_32.h
@@ -346,10 +346,11 @@
#define __NR_fanotify_init 338
#define __NR_fanotify_mark 339
#define __NR_prlimit64 340
+#define __NR_clock_adjtime 341
#ifdef __KERNEL__
-#define NR_syscalls 341
+#define NR_syscalls 342
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h
index 363e9b8a715b..5ee30858f5d6 100644
--- a/arch/x86/include/asm/unistd_64.h
+++ b/arch/x86/include/asm/unistd_64.h
@@ -669,6 +669,8 @@ __SYSCALL(__NR_fanotify_init, sys_fanotify_init)
__SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
#define __NR_prlimit64 302
__SYSCALL(__NR_prlimit64, sys_prlimit64)
+#define __NR_clock_adjtime 303
+__SYSCALL(__NR_clock_adjtime, sys_clock_adjtime)
#ifndef __NO_STUBS
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 64642ad019fb..643ebf2e2ad8 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -83,11 +83,13 @@ struct x86_init_paging {
* boot cpu
* @tsc_pre_init: platform function called before TSC init
* @timer_init: initialize the platform timer (default PIT/HPET)
+ * @wallclock_init: init the wallclock device
*/
struct x86_init_timers {
void (*setup_percpu_clockev)(void);
void (*tsc_pre_init)(void);
void (*timer_init)(void);
+ void (*wallclock_init)(void);
};
/**
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index a3c28ae4025b..8508bfe52296 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -287,7 +287,7 @@ HYPERVISOR_fpu_taskswitch(int set)
static inline int
HYPERVISOR_sched_op(int cmd, void *arg)
{
- return _hypercall2(int, sched_op_new, cmd, arg);
+ return _hypercall2(int, sched_op, cmd, arg);
}
static inline long
@@ -422,10 +422,17 @@ HYPERVISOR_set_segment_base(int reg, unsigned long value)
#endif
static inline int
-HYPERVISOR_suspend(unsigned long srec)
+HYPERVISOR_suspend(unsigned long start_info_mfn)
{
- return _hypercall3(int, sched_op, SCHEDOP_shutdown,
- SHUTDOWN_suspend, srec);
+ struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
+
+ /*
+ * For a PV guest the tools require that the start_info mfn be
+ * present in rdx/edx when the hypercall is made. Per the
+ * hypercall calling convention this is the third hypercall
+ * argument, which is start_info_mfn here.
+ */
+ return _hypercall3(int, sched_op, SCHEDOP_shutdown, &r, start_info_mfn);
}
static inline int
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index f25bdf238a33..c61934fbf22a 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -29,8 +29,10 @@ typedef struct xpaddr {
/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
#define INVALID_P2M_ENTRY (~0UL)
-#define FOREIGN_FRAME_BIT (1UL<<31)
+#define FOREIGN_FRAME_BIT (1UL<<(BITS_PER_LONG-1))
+#define IDENTITY_FRAME_BIT (1UL<<(BITS_PER_LONG-2))
#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
+#define IDENTITY_FRAME(m) ((m) | IDENTITY_FRAME_BIT)
/* Maximum amount of memory we can handle in a domain in pages */
#define MAX_DOMAIN_PAGES \
@@ -41,12 +43,18 @@ extern unsigned int machine_to_phys_order;
extern unsigned long get_phys_to_machine(unsigned long pfn);
extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
+extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
+extern unsigned long set_phys_range_identity(unsigned long pfn_s,
+ unsigned long pfn_e);
extern int m2p_add_override(unsigned long mfn, struct page *page);
extern int m2p_remove_override(struct page *page);
extern struct page *m2p_find_override(unsigned long mfn);
extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
+#ifdef CONFIG_XEN_DEBUG_FS
+extern int p2m_dump_show(struct seq_file *m, void *v);
+#endif
static inline unsigned long pfn_to_mfn(unsigned long pfn)
{
unsigned long mfn;
@@ -57,7 +65,7 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn)
mfn = get_phys_to_machine(pfn);
if (mfn != INVALID_P2M_ENTRY)
- mfn &= ~FOREIGN_FRAME_BIT;
+ mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
return mfn;
}
@@ -73,25 +81,44 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn)
static inline unsigned long mfn_to_pfn(unsigned long mfn)
{
unsigned long pfn;
+ int ret = 0;
if (xen_feature(XENFEAT_auto_translated_physmap))
return mfn;
+ if (unlikely((mfn >> machine_to_phys_order) != 0)) {
+ pfn = ~0;
+ goto try_override;
+ }
pfn = 0;
/*
* The array access can fail (e.g., device space beyond end of RAM).
* In such cases it doesn't matter what we return (we return garbage),
* but we must handle the fault without crashing!
*/
- __get_user(pfn, &machine_to_phys_mapping[mfn]);
-
- /*
- * If this appears to be a foreign mfn (because the pfn
- * doesn't map back to the mfn), then check the local override
- * table to see if there's a better pfn to use.
+ ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
+try_override:
+ /* ret might be < 0 if there are no entries in the m2p for mfn */
+ if (ret < 0)
+ pfn = ~0;
+ else if (get_phys_to_machine(pfn) != mfn)
+ /*
+ * If this appears to be a foreign mfn (because the pfn
+ * doesn't map back to the mfn), then check the local override
+ * table to see if there's a better pfn to use.
+ *
+ * m2p_find_override_pfn returns ~0 if it doesn't find anything.
+ */
+ pfn = m2p_find_override_pfn(mfn, ~0);
+
+ /*
+ * pfn is ~0 if there are no entries in the m2p for mfn or if the
+ * entry doesn't map back to the mfn and m2p_override doesn't have a
+ * valid entry for it.
*/
- if (get_phys_to_machine(pfn) != mfn)
- pfn = m2p_find_override_pfn(mfn, pfn);
+ if (pfn == ~0 &&
+ get_phys_to_machine(mfn) == IDENTITY_FRAME(mfn))
+ pfn = mfn;
return pfn;
}
diff --git a/arch/x86/include/asm/xen/pci.h b/arch/x86/include/asm/xen/pci.h
index 2329b3eaf8d3..aa8620989162 100644
--- a/arch/x86/include/asm/xen/pci.h
+++ b/arch/x86/include/asm/xen/pci.h
@@ -27,16 +27,16 @@ static inline void __init xen_setup_pirqs(void)
* its own functions.
*/
struct xen_pci_frontend_ops {
- int (*enable_msi)(struct pci_dev *dev, int **vectors);
+ int (*enable_msi)(struct pci_dev *dev, int vectors[]);
void (*disable_msi)(struct pci_dev *dev);
- int (*enable_msix)(struct pci_dev *dev, int **vectors, int nvec);
+ int (*enable_msix)(struct pci_dev *dev, int vectors[], int nvec);
void (*disable_msix)(struct pci_dev *dev);
};
extern struct xen_pci_frontend_ops *xen_pci_frontend;
static inline int xen_pci_frontend_enable_msi(struct pci_dev *dev,
- int **vectors)
+ int vectors[])
{
if (xen_pci_frontend && xen_pci_frontend->enable_msi)
return xen_pci_frontend->enable_msi(dev, vectors);
@@ -48,7 +48,7 @@ static inline void xen_pci_frontend_disable_msi(struct pci_dev *dev)
xen_pci_frontend->disable_msi(dev);
}
static inline int xen_pci_frontend_enable_msix(struct pci_dev *dev,
- int **vectors, int nvec)
+ int vectors[], int nvec)
{
if (xen_pci_frontend && xen_pci_frontend->enable_msix)
return xen_pci_frontend->enable_msix(dev, vectors, nvec);
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 34244b2cd880..4ad450948f36 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -47,7 +47,7 @@ obj-y += tsc.o io_delay.o rtc.o
obj-y += pci-iommu_table.o
obj-y += resource.o
-obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
+obj-y += trampoline.o trampoline_$(BITS).o
obj-y += process.o
obj-y += i387.o xsave.o
obj-y += ptrace.o
@@ -59,6 +59,7 @@ obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-y += cpu/
obj-y += acpi/
obj-y += reboot.o
+obj-$(CONFIG_X86_32) += reboot_32.o
obj-$(CONFIG_MCA) += mca_32.o
obj-$(CONFIG_X86_MSR) += msr.o
obj-$(CONFIG_X86_CPUID) += cpuid.o
@@ -69,7 +70,6 @@ obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SMP) += smpboot.o tsc_sync.o
obj-$(CONFIG_SMP) += setup_percpu.o
obj-$(CONFIG_X86_64_SMP) += tsc_sync.o
-obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o
obj-$(CONFIG_X86_MPPARSE) += mpparse.o
obj-y += apic/
obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
@@ -109,6 +109,7 @@ obj-$(CONFIG_MICROCODE) += microcode.o
obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o
obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
+obj-$(CONFIG_OF) += devicetree.o
###
# 64 bit specific files
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index b3a71137983a..9a966c579af5 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -72,6 +72,7 @@ u8 acpi_sci_flags __initdata;
int acpi_sci_override_gsi __initdata;
int acpi_skip_timer_override __initdata;
int acpi_use_timer_override __initdata;
+int acpi_fix_pin2_polarity __initdata;
#ifdef CONFIG_X86_LOCAL_APIC
static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
@@ -415,10 +416,15 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
return 0;
}
- if (acpi_skip_timer_override &&
- intsrc->source_irq == 0 && intsrc->global_irq == 2) {
- printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
- return 0;
+ if (intsrc->source_irq == 0 && intsrc->global_irq == 2) {
+ if (acpi_skip_timer_override) {
+ printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
+ return 0;
+ }
+ if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
+ intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
+ printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
+ }
}
mp_override_legacy_irq(intsrc->source_irq,
@@ -589,14 +595,8 @@ static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
nid = acpi_get_node(handle);
if (nid == -1 || !node_online(nid))
return;
-#ifdef CONFIG_X86_64
- apicid_to_node[physid] = nid;
+ set_apicid_to_node(physid, nid);
numa_set_node(cpu, nid);
-#else /* CONFIG_X86_32 */
- apicid_2_node[physid] = nid;
- cpu_to_node_map[cpu] = nid;
-#endif
-
#endif
}
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
index 28595d6df47c..ead21b663117 100644
--- a/arch/x86/kernel/acpi/realmode/wakeup.S
+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
@@ -6,11 +6,17 @@
#include <asm/page_types.h>
#include <asm/pgtable_types.h>
#include <asm/processor-flags.h>
+#include "wakeup.h"
.code16
- .section ".header", "a"
+ .section ".jump", "ax"
+ .globl _start
+_start:
+ cli
+ jmp wakeup_code
/* This should match the structure in wakeup.h */
+ .section ".header", "a"
.globl wakeup_header
wakeup_header:
video_mode: .short 0 /* Video mode number */
@@ -30,14 +36,11 @@ wakeup_jmp: .byte 0xea /* ljmpw */
wakeup_jmp_off: .word 3f
wakeup_jmp_seg: .word 0
wakeup_gdt: .quad 0, 0, 0
-signature: .long 0x51ee1111
+signature: .long WAKEUP_HEADER_SIGNATURE
.text
- .globl _start
.code16
wakeup_code:
-_start:
- cli
cld
/* Apparently some dimwit BIOS programmers don't know how to
@@ -77,12 +80,12 @@ _start:
/* Check header signature... */
movl signature, %eax
- cmpl $0x51ee1111, %eax
+ cmpl $WAKEUP_HEADER_SIGNATURE, %eax
jne bogus_real_magic
/* Check we really have everything... */
movl end_signature, %eax
- cmpl $0x65a22c82, %eax
+ cmpl $WAKEUP_END_SIGNATURE, %eax
jne bogus_real_magic
/* Call the C code */
@@ -147,3 +150,7 @@ wakeup_heap:
wakeup_stack:
.space 2048
wakeup_stack_end:
+
+ .section ".signature","a"
+end_signature:
+ .long WAKEUP_END_SIGNATURE
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.h b/arch/x86/kernel/acpi/realmode/wakeup.h
index 69d38d0b2b64..e1828c07e79c 100644
--- a/arch/x86/kernel/acpi/realmode/wakeup.h
+++ b/arch/x86/kernel/acpi/realmode/wakeup.h
@@ -35,7 +35,8 @@ struct wakeup_header {
extern struct wakeup_header wakeup_header;
#endif
-#define HEADER_OFFSET 0x3f00
-#define WAKEUP_SIZE 0x4000
+#define WAKEUP_HEADER_OFFSET 8
+#define WAKEUP_HEADER_SIGNATURE 0x51ee1111
+#define WAKEUP_END_SIGNATURE 0x65a22c82
#endif /* ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H */
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.lds.S b/arch/x86/kernel/acpi/realmode/wakeup.lds.S
index 060fff8f5c5b..d4f8010a5b1b 100644
--- a/arch/x86/kernel/acpi/realmode/wakeup.lds.S
+++ b/arch/x86/kernel/acpi/realmode/wakeup.lds.S
@@ -13,9 +13,19 @@ ENTRY(_start)
SECTIONS
{
. = 0;
+ .jump : {
+ *(.jump)
+ } = 0x90909090
+
+ . = WAKEUP_HEADER_OFFSET;
+ .header : {
+ *(.header)
+ }
+
+ . = ALIGN(16);
.text : {
*(.text*)
- }
+ } = 0x90909090
. = ALIGN(16);
.rodata : {
@@ -33,11 +43,6 @@ SECTIONS
*(.data*)
}
- .signature : {
- end_signature = .;
- LONG(0x65a22c82)
- }
-
. = ALIGN(16);
.bss : {
__bss_start = .;
@@ -45,20 +50,13 @@ SECTIONS
__bss_end = .;
}
- . = HEADER_OFFSET;
- .header : {
- *(.header)
+ .signature : {
+ *(.signature)
}
- . = ALIGN(16);
_end = .;
/DISCARD/ : {
*(.note*)
}
-
- /*
- * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
- */
- . = ASSERT(_end <= WAKEUP_SIZE, "Wakeup too big!");
}
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 68d1537b8c81..ff93bc1b09c3 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -18,37 +18,28 @@
#include "realmode/wakeup.h"
#include "sleep.h"
-unsigned long acpi_wakeup_address;
unsigned long acpi_realmode_flags;
-/* address in low memory of the wakeup routine. */
-static unsigned long acpi_realmode;
-
#if defined(CONFIG_SMP) && defined(CONFIG_64BIT)
static char temp_stack[4096];
#endif
/**
- * acpi_save_state_mem - save kernel state
+ * acpi_suspend_lowlevel - save kernel state
*
* Create an identity mapped page table and copy the wakeup routine to
* low memory.
- *
- * Note that this is too late to change acpi_wakeup_address.
*/
-int acpi_save_state_mem(void)
+int acpi_suspend_lowlevel(void)
{
struct wakeup_header *header;
+ /* address in low memory of the wakeup routine. */
+ char *acpi_realmode;
- if (!acpi_realmode) {
- printk(KERN_ERR "Could not allocate memory during boot, "
- "S3 disabled\n");
- return -ENOMEM;
- }
- memcpy((void *)acpi_realmode, &wakeup_code_start, WAKEUP_SIZE);
+ acpi_realmode = TRAMPOLINE_SYM(acpi_wakeup_code);
- header = (struct wakeup_header *)(acpi_realmode + HEADER_OFFSET);
- if (header->signature != 0x51ee1111) {
+ header = (struct wakeup_header *)(acpi_realmode + WAKEUP_HEADER_OFFSET);
+ if (header->signature != WAKEUP_HEADER_SIGNATURE) {
printk(KERN_ERR "wakeup header does not match\n");
return -EINVAL;
}
@@ -68,9 +59,7 @@ int acpi_save_state_mem(void)
/* GDT[0]: GDT self-pointer */
header->wakeup_gdt[0] =
(u64)(sizeof(header->wakeup_gdt) - 1) +
- ((u64)(acpi_wakeup_address +
- ((char *)&header->wakeup_gdt - (char *)acpi_realmode))
- << 16);
+ ((u64)__pa(&header->wakeup_gdt) << 16);
/* GDT[1]: big real mode-like code segment */
header->wakeup_gdt[1] =
GDT_ENTRY(0x809b, acpi_wakeup_address, 0xfffff);
@@ -96,7 +85,7 @@ int acpi_save_state_mem(void)
header->pmode_cr3 = (u32)__pa(&initial_page_table);
saved_magic = 0x12345678;
#else /* CONFIG_64BIT */
- header->trampoline_segment = setup_trampoline() >> 4;
+ header->trampoline_segment = trampoline_address() >> 4;
#ifdef CONFIG_SMP
stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
early_gdt_descr.address =
@@ -107,56 +96,10 @@ int acpi_save_state_mem(void)
saved_magic = 0x123456789abcdef0L;
#endif /* CONFIG_64BIT */
+ do_suspend_lowlevel();
return 0;
}
-/*
- * acpi_restore_state - undo effects of acpi_save_state_mem
- */
-void acpi_restore_state_mem(void)
-{
-}
-
-
-/**
- * acpi_reserve_wakeup_memory - do _very_ early ACPI initialisation
- *
- * We allocate a page from the first 1MB of memory for the wakeup
- * routine for when we come back from a sleep state. The
- * runtime allocator allows specification of <16MB pages, but not
- * <1MB pages.
- */
-void __init acpi_reserve_wakeup_memory(void)
-{
- phys_addr_t mem;
-
- if ((&wakeup_code_end - &wakeup_code_start) > WAKEUP_SIZE) {
- printk(KERN_ERR
- "ACPI: Wakeup code way too big, S3 disabled.\n");
- return;
- }
-
- mem = memblock_find_in_range(0, 1<<20, WAKEUP_SIZE, PAGE_SIZE);
-
- if (mem == MEMBLOCK_ERROR) {
- printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
- return;
- }
- acpi_realmode = (unsigned long) phys_to_virt(mem);
- acpi_wakeup_address = mem;
- memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP");
-}
-
-int __init acpi_configure_wakeup_memory(void)
-{
- if (acpi_realmode)
- set_memory_x(acpi_realmode, WAKEUP_SIZE >> PAGE_SHIFT);
-
- return 0;
-}
-arch_initcall(acpi_configure_wakeup_memory);
-
-
static int __init acpi_sleep_setup(char *str)
{
while ((str != NULL) && (*str != '\0')) {
diff --git a/arch/x86/kernel/acpi/sleep.h b/arch/x86/kernel/acpi/sleep.h
index adbcbaa6f1df..416d4be13fef 100644
--- a/arch/x86/kernel/acpi/sleep.h
+++ b/arch/x86/kernel/acpi/sleep.h
@@ -4,13 +4,12 @@
#include <asm/trampoline.h>
-extern char wakeup_code_start, wakeup_code_end;
-
extern unsigned long saved_video_mode;
extern long saved_magic;
extern int wakeup_pmode_return;
-extern char swsusp_pg_dir[PAGE_SIZE];
extern unsigned long acpi_copy_wakeup_routine(unsigned long);
extern void wakeup_long64(void);
+
+extern void do_suspend_lowlevel(void);
diff --git a/arch/x86/kernel/acpi/wakeup_rm.S b/arch/x86/kernel/acpi/wakeup_rm.S
index 6ff3b5730575..6ce81ee9cb03 100644
--- a/arch/x86/kernel/acpi/wakeup_rm.S
+++ b/arch/x86/kernel/acpi/wakeup_rm.S
@@ -2,9 +2,11 @@
* Wrapper script for the realmode binary as a transport object
* before copying to low memory.
*/
- .section ".rodata","a"
- .globl wakeup_code_start, wakeup_code_end
-wakeup_code_start:
+#include <asm/page_types.h>
+
+ .section ".x86_trampoline","a"
+ .balign PAGE_SIZE
+ .globl acpi_wakeup_code
+acpi_wakeup_code:
.incbin "arch/x86/kernel/acpi/realmode/wakeup.bin"
-wakeup_code_end:
.size wakeup_code_start, .-wakeup_code_start
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 0a99f7198bc3..5e4c75b3d18b 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -12,14 +12,19 @@
static u32 *flush_words;
-struct pci_device_id amd_nb_misc_ids[] = {
+const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
- { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
{}
};
EXPORT_SYMBOL(amd_nb_misc_ids);
+static struct pci_device_id amd_nb_link_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_LINK) },
+ {}
+};
+
const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
{ 0x00, 0x18, 0x20 },
{ 0xff, 0x00, 0x20 },
@@ -31,7 +36,7 @@ struct amd_northbridge_info amd_northbridges;
EXPORT_SYMBOL(amd_northbridges);
static struct pci_dev *next_northbridge(struct pci_dev *dev,
- struct pci_device_id *ids)
+ const struct pci_device_id *ids)
{
do {
dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
@@ -45,7 +50,7 @@ int amd_cache_northbridges(void)
{
int i = 0;
struct amd_northbridge *nb;
- struct pci_dev *misc;
+ struct pci_dev *misc, *link;
if (amd_nb_num())
return 0;
@@ -64,10 +69,12 @@ int amd_cache_northbridges(void)
amd_northbridges.nb = nb;
amd_northbridges.num = i;
- misc = NULL;
+ link = misc = NULL;
for (i = 0; i != amd_nb_num(); i++) {
node_to_amd_nb(i)->misc = misc =
next_northbridge(misc, amd_nb_misc_ids);
+ node_to_amd_nb(i)->link = link =
+ next_northbridge(link, amd_nb_link_ids);
}
/* some CPU families (e.g. family 0x11) do not support GART */
@@ -85,6 +92,13 @@ int amd_cache_northbridges(void)
boot_cpu_data.x86_mask >= 0x1))
amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
+ if (boot_cpu_data.x86 == 0x15)
+ amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
+
+ /* L3 cache partitioning is supported on family 0x15 */
+ if (boot_cpu_data.x86 == 0x15)
+ amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
+
return 0;
}
EXPORT_SYMBOL_GPL(amd_cache_northbridges);
@@ -93,8 +107,9 @@ EXPORT_SYMBOL_GPL(amd_cache_northbridges);
they're useless anyways */
int __init early_is_amd_nb(u32 device)
{
- struct pci_device_id *id;
+ const struct pci_device_id *id;
u32 vendor = device & 0xffff;
+
device >>= 16;
for (id = amd_nb_misc_ids; id->vendor; id++)
if (vendor == id->vendor && device == id->device)
@@ -102,6 +117,65 @@ int __init early_is_amd_nb(u32 device)
return 0;
}
+int amd_get_subcaches(int cpu)
+{
+ struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
+ unsigned int mask;
+ int cuid = 0;
+
+ if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
+ return 0;
+
+ pci_read_config_dword(link, 0x1d4, &mask);
+
+#ifdef CONFIG_SMP
+ cuid = cpu_data(cpu).compute_unit_id;
+#endif
+ return (mask >> (4 * cuid)) & 0xf;
+}
+
+int amd_set_subcaches(int cpu, int mask)
+{
+ static unsigned int reset, ban;
+ struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
+ unsigned int reg;
+ int cuid = 0;
+
+ if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
+ return -EINVAL;
+
+ /* if necessary, collect reset state of L3 partitioning and BAN mode */
+ if (reset == 0) {
+ pci_read_config_dword(nb->link, 0x1d4, &reset);
+ pci_read_config_dword(nb->misc, 0x1b8, &ban);
+ ban &= 0x180000;
+ }
+
+ /* deactivate BAN mode if any subcaches are to be disabled */
+ if (mask != 0xf) {
+ pci_read_config_dword(nb->misc, 0x1b8, &reg);
+ pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
+ }
+
+#ifdef CONFIG_SMP
+ cuid = cpu_data(cpu).compute_unit_id;
+#endif
+ mask <<= 4 * cuid;
+ mask |= (0xf ^ (1 << cuid)) << 26;
+
+ pci_write_config_dword(nb->link, 0x1d4, mask);
+
+ /* reset BAN mode if L3 partitioning returned to reset state */
+ pci_read_config_dword(nb->link, 0x1d4, &reg);
+ if (reg == reset) {
+ pci_read_config_dword(nb->misc, 0x1b8, &reg);
+ reg &= ~0x180000;
+ pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
+ }
+
+ return 0;
+}
+
int amd_cache_gart(void)
{
int i;
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index 51ef31a89be9..1293c709ee85 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -284,7 +284,7 @@ static int __init apbt_clockevent_register(void)
memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device));
if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) {
- apbt_clockevent.rating = APBT_CLOCKEVENT_RATING - 100;
+ adev->evt.rating = APBT_CLOCKEVENT_RATING - 100;
global_clock_event = &adev->evt;
printk(KERN_DEBUG "%s clockevent registered as global\n",
global_clock_event->name);
@@ -508,64 +508,12 @@ static int apbt_next_event(unsigned long delta,
return 0;
}
-/*
- * APB timer clock is not in sync with pclk on Langwell, which translates to
- * unreliable read value caused by sampling error. the error does not add up
- * overtime and only happens when sampling a 0 as a 1 by mistake. so the time
- * would go backwards. the following code is trying to prevent time traveling
- * backwards. little bit paranoid.
- */
static cycle_t apbt_read_clocksource(struct clocksource *cs)
{
- unsigned long t0, t1, t2;
- static unsigned long last_read;
-
-bad_count:
- t1 = apbt_readl(phy_cs_timer_id,
- APBTMR_N_CURRENT_VALUE);
- t2 = apbt_readl(phy_cs_timer_id,
- APBTMR_N_CURRENT_VALUE);
- if (unlikely(t1 < t2)) {
- pr_debug("APBT: read current count error %lx:%lx:%lx\n",
- t1, t2, t2 - t1);
- goto bad_count;
- }
- /*
- * check against cached last read, makes sure time does not go back.
- * it could be a normal rollover but we will do tripple check anyway
- */
- if (unlikely(t2 > last_read)) {
- /* check if we have a normal rollover */
- unsigned long raw_intr_status =
- apbt_readl_reg(APBTMRS_RAW_INT_STATUS);
- /*
- * cs timer interrupt is masked but raw intr bit is set if
- * rollover occurs. then we read EOI reg to clear it.
- */
- if (raw_intr_status & (1 << phy_cs_timer_id)) {
- apbt_readl(phy_cs_timer_id, APBTMR_N_EOI);
- goto out;
- }
- pr_debug("APB CS going back %lx:%lx:%lx ",
- t2, last_read, t2 - last_read);
-bad_count_x3:
- pr_debug("triple check enforced\n");
- t0 = apbt_readl(phy_cs_timer_id,
- APBTMR_N_CURRENT_VALUE);
- udelay(1);
- t1 = apbt_readl(phy_cs_timer_id,
- APBTMR_N_CURRENT_VALUE);
- udelay(1);
- t2 = apbt_readl(phy_cs_timer_id,
- APBTMR_N_CURRENT_VALUE);
- if ((t2 > t1) || (t1 > t0)) {
- printk(KERN_ERR "Error: APB CS tripple check failed\n");
- goto bad_count_x3;
- }
- }
-out:
- last_read = t2;
- return (cycle_t)~t2;
+ unsigned long current_count;
+
+ current_count = apbt_readl(phy_cs_timer_id, APBTMR_N_CURRENT_VALUE);
+ return (cycle_t)~current_count;
}
static int apbt_clocksource_register(void)
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 5955a7800a96..7b1e8e10b89c 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -13,7 +13,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/mmzone.h>
#include <linux/pci_ids.h>
#include <linux/pci.h>
@@ -57,7 +57,7 @@ static void __init insert_aperture_resource(u32 aper_base, u32 aper_size)
static u32 __init allocate_aperture(void)
{
u32 aper_size;
- void *p;
+ unsigned long addr;
/* aper_size should <= 1G */
if (fallback_aper_order > 5)
@@ -83,27 +83,26 @@ static u32 __init allocate_aperture(void)
* so don't use 512M below as gart iommu, leave the space for kernel
* code for safe
*/
- p = __alloc_bootmem_nopanic(aper_size, aper_size, 512ULL<<20);
+ addr = memblock_find_in_range(0, 1ULL<<32, aper_size, 512ULL<<20);
+ if (addr == MEMBLOCK_ERROR || addr + aper_size > 0xffffffff) {
+ printk(KERN_ERR
+ "Cannot allocate aperture memory hole (%lx,%uK)\n",
+ addr, aper_size>>10);
+ return 0;
+ }
+ memblock_x86_reserve_range(addr, addr + aper_size, "aperture64");
/*
* Kmemleak should not scan this block as it may not be mapped via the
* kernel direct mapping.
*/
- kmemleak_ignore(p);
- if (!p || __pa(p)+aper_size > 0xffffffff) {
- printk(KERN_ERR
- "Cannot allocate aperture memory hole (%p,%uK)\n",
- p, aper_size>>10);
- if (p)
- free_bootmem(__pa(p), aper_size);
- return 0;
- }
+ kmemleak_ignore(phys_to_virt(addr));
printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n",
- aper_size >> 10, __pa(p));
- insert_aperture_resource((u32)__pa(p), aper_size);
- register_nosave_region((u32)__pa(p) >> PAGE_SHIFT,
- (u32)__pa(p+aper_size) >> PAGE_SHIFT);
+ aper_size >> 10, addr);
+ insert_aperture_resource((u32)addr, aper_size);
+ register_nosave_region(addr >> PAGE_SHIFT,
+ (addr+aper_size) >> PAGE_SHIFT);
- return (u32)__pa(p);
+ return (u32)addr;
}
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 76b96d74978a..6c464a315d4b 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -43,6 +43,7 @@
#include <asm/i8259.h>
#include <asm/proto.h>
#include <asm/apic.h>
+#include <asm/io_apic.h>
#include <asm/desc.h>
#include <asm/hpet.h>
#include <asm/idle.h>
@@ -78,6 +79,15 @@ EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
#ifdef CONFIG_X86_32
+
+/*
+ * On x86_32, the mapping between cpu and logical apicid may vary
+ * depending on apic in use. The following early percpu variable is
+ * used for the mapping. This is where the behaviors of x86_64 and 32
+ * actually diverge. Let's keep it ugly for now.
+ */
+DEFINE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid, BAD_APICID);
+
/*
* Knob to control our willingness to enable the local APIC.
*
@@ -1209,7 +1219,7 @@ void __cpuinit setup_local_APIC(void)
rdtscll(tsc);
if (disable_apic) {
- arch_disable_smp_support();
+ disable_ioapic_support();
return;
}
@@ -1237,6 +1247,19 @@ void __cpuinit setup_local_APIC(void)
*/
apic->init_apic_ldr();
+#ifdef CONFIG_X86_32
+ /*
+ * APIC LDR is initialized. If logical_apicid mapping was
+ * initialized during get_smp_config(), make sure it matches the
+ * actual value.
+ */
+ i = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
+ WARN_ON(i != BAD_APICID && i != logical_smp_processor_id());
+ /* always use the value from LDR */
+ early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
+ logical_smp_processor_id();
+#endif
+
/*
* Set Task Priority to 'accept all'. We never change this
* later on.
@@ -1448,7 +1471,7 @@ int __init enable_IR(void)
void __init enable_IR_x2apic(void)
{
unsigned long flags;
- struct IO_APIC_route_entry **ioapic_entries = NULL;
+ struct IO_APIC_route_entry **ioapic_entries;
int ret, x2apic_enabled = 0;
int dmar_table_init_ret;
@@ -1562,7 +1585,7 @@ static int apic_verify(void)
return 0;
}
-int apic_force_enable(void)
+int apic_force_enable(unsigned long addr)
{
u32 h, l;
@@ -1578,7 +1601,7 @@ int apic_force_enable(void)
if (!(l & MSR_IA32_APICBASE_ENABLE)) {
pr_info("Local APIC disabled by BIOS -- reenabling.\n");
l &= ~MSR_IA32_APICBASE_BASE;
- l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
+ l |= MSR_IA32_APICBASE_ENABLE | addr;
wrmsr(MSR_IA32_APICBASE, l, h);
enabled_via_apicbase = 1;
}
@@ -1619,7 +1642,7 @@ static int __init detect_init_APIC(void)
"you can enable it with \"lapic\"\n");
return -1;
}
- if (apic_force_enable())
+ if (apic_force_enable(APIC_DEFAULT_PHYS_BASE))
return -1;
} else {
if (apic_verify())
@@ -1930,17 +1953,6 @@ void __cpuinit generic_processor_info(int apicid, int version)
{
int cpu;
- /*
- * Validate version
- */
- if (version == 0x0) {
- pr_warning("BIOS bug, APIC version is 0 for CPU#%d! "
- "fixing up to 0x10. (tell your hw vendor)\n",
- version);
- version = 0x10;
- }
- apic_version[apicid] = version;
-
if (num_processors >= nr_cpu_ids) {
int max = nr_cpu_ids;
int thiscpu = max + disabled_cpus;
@@ -1954,22 +1966,34 @@ void __cpuinit generic_processor_info(int apicid, int version)
}
num_processors++;
- cpu = cpumask_next_zero(-1, cpu_present_mask);
-
- if (version != apic_version[boot_cpu_physical_apicid])
- WARN_ONCE(1,
- "ACPI: apic version mismatch, bootcpu: %x cpu %d: %x\n",
- apic_version[boot_cpu_physical_apicid], cpu, version);
-
- physid_set(apicid, phys_cpu_present_map);
if (apicid == boot_cpu_physical_apicid) {
/*
* x86_bios_cpu_apicid is required to have processors listed
* in same order as logical cpu numbers. Hence the first
* entry is BSP, and so on.
+ * boot_cpu_init() already hold bit 0 in cpu_present_mask
+ * for BSP.
*/
cpu = 0;
+ } else
+ cpu = cpumask_next_zero(-1, cpu_present_mask);
+
+ /*
+ * Validate version
+ */
+ if (version == 0x0) {
+ pr_warning("BIOS bug: APIC version is 0 for CPU %d/0x%x, fixing up to 0x10\n",
+ cpu, apicid);
+ version = 0x10;
+ }
+ apic_version[apicid] = version;
+
+ if (version != apic_version[boot_cpu_physical_apicid]) {
+ pr_warning("BIOS bug: APIC version mismatch, boot CPU: %x, CPU %d: version %x\n",
+ apic_version[boot_cpu_physical_apicid], cpu, version);
}
+
+ physid_set(apicid, phys_cpu_present_map);
if (apicid > max_physical_apicid)
max_physical_apicid = apicid;
@@ -1977,7 +2001,10 @@ void __cpuinit generic_processor_info(int apicid, int version)
early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
#endif
-
+#ifdef CONFIG_X86_32
+ early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
+ apic->x86_32_early_logical_apicid(cpu);
+#endif
set_cpu_possible(cpu, true);
set_cpu_present(cpu, true);
}
@@ -1998,10 +2025,14 @@ void default_init_apic_ldr(void)
}
#ifdef CONFIG_X86_32
-int default_apicid_to_node(int logical_apicid)
+int default_x86_32_numa_cpu_node(int cpu)
{
-#ifdef CONFIG_SMP
- return apicid_2_node[hard_smp_processor_id()];
+#ifdef CONFIG_NUMA
+ int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
+
+ if (apicid != BAD_APICID)
+ return __apicid_to_node[apicid];
+ return NUMA_NO_NODE;
#else
return 0;
#endif
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index 09d3b17ce0c2..5652d31fe108 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -185,8 +185,6 @@ struct apic apic_flat = {
.ioapic_phys_id_map = NULL,
.setup_apic_routing = NULL,
.multi_timer_check = NULL,
- .apicid_to_node = NULL,
- .cpu_to_logical_apicid = NULL,
.cpu_present_to_apicid = default_cpu_present_to_apicid,
.apicid_to_cpu_present = NULL,
.setup_portio_remap = NULL,
@@ -337,8 +335,6 @@ struct apic apic_physflat = {
.ioapic_phys_id_map = NULL,
.setup_apic_routing = NULL,
.multi_timer_check = NULL,
- .apicid_to_node = NULL,
- .cpu_to_logical_apicid = NULL,
.cpu_present_to_apicid = default_cpu_present_to_apicid,
.apicid_to_cpu_present = NULL,
.setup_portio_remap = NULL,
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
index e31b9ffe25f5..f1baa2dc087a 100644
--- a/arch/x86/kernel/apic/apic_noop.c
+++ b/arch/x86/kernel/apic/apic_noop.c
@@ -54,11 +54,6 @@ static u64 noop_apic_icr_read(void)
return 0;
}
-static int noop_cpu_to_logical_apicid(int cpu)
-{
- return 0;
-}
-
static int noop_phys_pkg_id(int cpuid_apic, int index_msb)
{
return 0;
@@ -113,12 +108,6 @@ static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask)
cpumask_set_cpu(cpu, retmask);
}
-int noop_apicid_to_node(int logical_apicid)
-{
- /* we're always on node 0 */
- return 0;
-}
-
static u32 noop_apic_read(u32 reg)
{
WARN_ON_ONCE((cpu_has_apic && !disable_apic));
@@ -130,6 +119,14 @@ static void noop_apic_write(u32 reg, u32 v)
WARN_ON_ONCE(cpu_has_apic && !disable_apic);
}
+#ifdef CONFIG_X86_32
+static int noop_x86_32_numa_cpu_node(int cpu)
+{
+ /* we're always on node 0 */
+ return 0;
+}
+#endif
+
struct apic apic_noop = {
.name = "noop",
.probe = noop_probe,
@@ -153,9 +150,7 @@ struct apic apic_noop = {
.ioapic_phys_id_map = default_ioapic_phys_id_map,
.setup_apic_routing = NULL,
.multi_timer_check = NULL,
- .apicid_to_node = noop_apicid_to_node,
- .cpu_to_logical_apicid = noop_cpu_to_logical_apicid,
.cpu_present_to_apicid = default_cpu_present_to_apicid,
.apicid_to_cpu_present = physid_set_mask_of_physid,
@@ -197,4 +192,9 @@ struct apic apic_noop = {
.icr_write = noop_apic_icr_write,
.wait_icr_idle = noop_apic_wait_icr_idle,
.safe_wait_icr_idle = noop_safe_apic_wait_icr_idle,
+
+#ifdef CONFIG_X86_32
+ .x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid,
+ .x86_32_numa_cpu_node = noop_x86_32_numa_cpu_node,
+#endif
};
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
index cb804c5091b9..541a2e431659 100644
--- a/arch/x86/kernel/apic/bigsmp_32.c
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -45,6 +45,12 @@ static unsigned long bigsmp_check_apicid_present(int bit)
return 1;
}
+static int bigsmp_early_logical_apicid(int cpu)
+{
+ /* on bigsmp, logical apicid is the same as physical */
+ return early_per_cpu(x86_cpu_to_apicid, cpu);
+}
+
static inline unsigned long calculate_ldr(int cpu)
{
unsigned long val, id;
@@ -80,11 +86,6 @@ static void bigsmp_setup_apic_routing(void)
nr_ioapics);
}
-static int bigsmp_apicid_to_node(int logical_apicid)
-{
- return apicid_2_node[hard_smp_processor_id()];
-}
-
static int bigsmp_cpu_present_to_apicid(int mps_cpu)
{
if (mps_cpu < nr_cpu_ids)
@@ -93,14 +94,6 @@ static int bigsmp_cpu_present_to_apicid(int mps_cpu)
return BAD_APICID;
}
-/* Mapping from cpu number to logical apicid */
-static inline int bigsmp_cpu_to_logical_apicid(int cpu)
-{
- if (cpu >= nr_cpu_ids)
- return BAD_APICID;
- return cpu_physical_id(cpu);
-}
-
static void bigsmp_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
{
/* For clustered we don't have a good way to do this yet - hack */
@@ -115,7 +108,11 @@ static int bigsmp_check_phys_apicid_present(int phys_apicid)
/* As we are using single CPU as destination, pick only one CPU here */
static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask)
{
- return bigsmp_cpu_to_logical_apicid(cpumask_first(cpumask));
+ int cpu = cpumask_first(cpumask);
+
+ if (cpu < nr_cpu_ids)
+ return cpu_physical_id(cpu);
+ return BAD_APICID;
}
static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
@@ -129,9 +126,9 @@ static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
*/
for_each_cpu_and(cpu, cpumask, andmask) {
if (cpumask_test_cpu(cpu, cpu_online_mask))
- break;
+ return cpu_physical_id(cpu);
}
- return bigsmp_cpu_to_logical_apicid(cpu);
+ return BAD_APICID;
}
static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
@@ -219,8 +216,6 @@ struct apic apic_bigsmp = {
.ioapic_phys_id_map = bigsmp_ioapic_phys_id_map,
.setup_apic_routing = bigsmp_setup_apic_routing,
.multi_timer_check = NULL,
- .apicid_to_node = bigsmp_apicid_to_node,
- .cpu_to_logical_apicid = bigsmp_cpu_to_logical_apicid,
.cpu_present_to_apicid = bigsmp_cpu_present_to_apicid,
.apicid_to_cpu_present = physid_set_mask_of_physid,
.setup_portio_remap = NULL,
@@ -256,4 +251,7 @@ struct apic apic_bigsmp = {
.icr_write = native_apic_icr_write,
.wait_icr_idle = native_apic_wait_icr_idle,
.safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
+
+ .x86_32_early_logical_apicid = bigsmp_early_logical_apicid,
+ .x86_32_numa_cpu_node = default_x86_32_numa_cpu_node,
};
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
index 8593582d8022..3e9de4854c5b 100644
--- a/arch/x86/kernel/apic/es7000_32.c
+++ b/arch/x86/kernel/apic/es7000_32.c
@@ -460,6 +460,12 @@ static unsigned long es7000_check_apicid_present(int bit)
return physid_isset(bit, phys_cpu_present_map);
}
+static int es7000_early_logical_apicid(int cpu)
+{
+ /* on es7000, logical apicid is the same as physical */
+ return early_per_cpu(x86_bios_cpu_apicid, cpu);
+}
+
static unsigned long calculate_ldr(int cpu)
{
unsigned long id = per_cpu(x86_bios_cpu_apicid, cpu);
@@ -504,12 +510,11 @@ static void es7000_setup_apic_routing(void)
nr_ioapics, cpumask_bits(es7000_target_cpus())[0]);
}
-static int es7000_apicid_to_node(int logical_apicid)
+static int es7000_numa_cpu_node(int cpu)
{
return 0;
}
-
static int es7000_cpu_present_to_apicid(int mps_cpu)
{
if (!mps_cpu)
@@ -528,18 +533,6 @@ static void es7000_apicid_to_cpu_present(int phys_apicid, physid_mask_t *retmap)
++cpu_id;
}
-/* Mapping from cpu number to logical apicid */
-static int es7000_cpu_to_logical_apicid(int cpu)
-{
-#ifdef CONFIG_SMP
- if (cpu >= nr_cpu_ids)
- return BAD_APICID;
- return cpu_2_logical_apicid[cpu];
-#else
- return logical_smp_processor_id();
-#endif
-}
-
static void es7000_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
{
/* For clustered we don't have a good way to do this yet - hack */
@@ -561,7 +554,7 @@ static unsigned int es7000_cpu_mask_to_apicid(const struct cpumask *cpumask)
* The cpus in the mask must all be on the apic cluster.
*/
for_each_cpu(cpu, cpumask) {
- int new_apicid = es7000_cpu_to_logical_apicid(cpu);
+ int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
WARN(1, "Not a valid mask!");
@@ -578,7 +571,7 @@ static unsigned int
es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask,
const struct cpumask *andmask)
{
- int apicid = es7000_cpu_to_logical_apicid(0);
+ int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
cpumask_var_t cpumask;
if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
@@ -655,8 +648,6 @@ struct apic __refdata apic_es7000_cluster = {
.ioapic_phys_id_map = es7000_ioapic_phys_id_map,
.setup_apic_routing = es7000_setup_apic_routing,
.multi_timer_check = NULL,
- .apicid_to_node = es7000_apicid_to_node,
- .cpu_to_logical_apicid = es7000_cpu_to_logical_apicid,
.cpu_present_to_apicid = es7000_cpu_present_to_apicid,
.apicid_to_cpu_present = es7000_apicid_to_cpu_present,
.setup_portio_remap = NULL,
@@ -695,6 +686,9 @@ struct apic __refdata apic_es7000_cluster = {
.icr_write = native_apic_icr_write,
.wait_icr_idle = native_apic_wait_icr_idle,
.safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
+
+ .x86_32_early_logical_apicid = es7000_early_logical_apicid,
+ .x86_32_numa_cpu_node = es7000_numa_cpu_node,
};
struct apic __refdata apic_es7000 = {
@@ -720,8 +714,6 @@ struct apic __refdata apic_es7000 = {
.ioapic_phys_id_map = es7000_ioapic_phys_id_map,
.setup_apic_routing = es7000_setup_apic_routing,
.multi_timer_check = NULL,
- .apicid_to_node = es7000_apicid_to_node,
- .cpu_to_logical_apicid = es7000_cpu_to_logical_apicid,
.cpu_present_to_apicid = es7000_cpu_present_to_apicid,
.apicid_to_cpu_present = es7000_apicid_to_cpu_present,
.setup_portio_remap = NULL,
@@ -758,4 +750,7 @@ struct apic __refdata apic_es7000 = {
.icr_write = native_apic_icr_write,
.wait_icr_idle = native_apic_wait_icr_idle,
.safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
+
+ .x86_32_early_logical_apicid = es7000_early_logical_apicid,
+ .x86_32_numa_cpu_node = es7000_numa_cpu_node,
};
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c
index 79fd43ca6f96..c4e557a1ebb6 100644
--- a/arch/x86/kernel/apic/hw_nmi.c
+++ b/arch/x86/kernel/apic/hw_nmi.c
@@ -83,7 +83,6 @@ arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self,
arch_spin_lock(&lock);
printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
show_regs(regs);
- dump_stack();
arch_spin_unlock(&lock);
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
return NOTIFY_STOP;
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index ca9e2a3545a9..a96c87cfbabc 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -108,7 +108,10 @@ DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
int skip_ioapic_setup;
-void arch_disable_smp_support(void)
+/**
+ * disable_ioapic_support() - disables ioapic support at runtime
+ */
+void disable_ioapic_support(void)
{
#ifdef CONFIG_PCI
noioapicquirk = 1;
@@ -120,11 +123,14 @@ void arch_disable_smp_support(void)
static int __init parse_noapic(char *str)
{
/* disable IO-APIC */
- arch_disable_smp_support();
+ disable_ioapic_support();
return 0;
}
early_param("noapic", parse_noapic);
+static int io_apic_setup_irq_pin_once(unsigned int irq, int node,
+ struct io_apic_irq_attr *attr);
+
/* Will be called in mpparse/acpi/sfi codes for saving IRQ info */
void mp_save_irq(struct mpc_intsrc *m)
{
@@ -818,7 +824,7 @@ static int EISA_ELCR(unsigned int irq)
#define default_MCA_trigger(idx) (1)
#define default_MCA_polarity(idx) default_ISA_polarity(idx)
-static int MPBIOS_polarity(int idx)
+static int irq_polarity(int idx)
{
int bus = mp_irqs[idx].srcbus;
int polarity;
@@ -860,7 +866,7 @@ static int MPBIOS_polarity(int idx)
return polarity;
}
-static int MPBIOS_trigger(int idx)
+static int irq_trigger(int idx)
{
int bus = mp_irqs[idx].srcbus;
int trigger;
@@ -932,16 +938,6 @@ static int MPBIOS_trigger(int idx)
return trigger;
}
-static inline int irq_polarity(int idx)
-{
- return MPBIOS_polarity(idx);
-}
-
-static inline int irq_trigger(int idx)
-{
- return MPBIOS_trigger(idx);
-}
-
static int pin_2_irq(int idx, int apic, int pin)
{
int irq;
@@ -1220,10 +1216,6 @@ void __setup_vector_irq(int cpu)
static struct irq_chip ioapic_chip;
static struct irq_chip ir_ioapic_chip;
-#define IOAPIC_AUTO -1
-#define IOAPIC_EDGE 0
-#define IOAPIC_LEVEL 1
-
#ifdef CONFIG_X86_32
static inline int IO_APIC_irq_trigger(int irq)
{
@@ -1385,33 +1377,26 @@ static struct {
DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1);
} mp_ioapic_routing[MAX_IO_APICS];
-static void __init setup_IO_APIC_irqs(void)
+static bool __init io_apic_pin_not_connected(int idx, int apic_id, int pin)
{
- int apic_id, pin, idx, irq, notcon = 0;
- int node = cpu_to_node(0);
- struct irq_cfg *cfg;
+ if (idx != -1)
+ return false;
- apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
+ apic_printk(APIC_VERBOSE, KERN_DEBUG " apic %d pin %d not connected\n",
+ mp_ioapics[apic_id].apicid, pin);
+ return true;
+}
+
+static void __init __io_apic_setup_irqs(unsigned int apic_id)
+{
+ int idx, node = cpu_to_node(0);
+ struct io_apic_irq_attr attr;
+ unsigned int pin, irq;
- for (apic_id = 0; apic_id < nr_ioapics; apic_id++)
for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) {
idx = find_irq_entry(apic_id, pin, mp_INT);
- if (idx == -1) {
- if (!notcon) {
- notcon = 1;
- apic_printk(APIC_VERBOSE,
- KERN_DEBUG " %d-%d",
- mp_ioapics[apic_id].apicid, pin);
- } else
- apic_printk(APIC_VERBOSE, " %d-%d",
- mp_ioapics[apic_id].apicid, pin);
+ if (io_apic_pin_not_connected(idx, apic_id, pin))
continue;
- }
- if (notcon) {
- apic_printk(APIC_VERBOSE,
- " (apicid-pin) not connected\n");
- notcon = 0;
- }
irq = pin_2_irq(idx, apic_id, pin);
@@ -1423,25 +1408,24 @@ static void __init setup_IO_APIC_irqs(void)
* installed and if it returns 1:
*/
if (apic->multi_timer_check &&
- apic->multi_timer_check(apic_id, irq))
+ apic->multi_timer_check(apic_id, irq))
continue;
- cfg = alloc_irq_and_cfg_at(irq, node);
- if (!cfg)
- continue;
+ set_io_apic_irq_attr(&attr, apic_id, pin, irq_trigger(idx),
+ irq_polarity(idx));
- add_pin_to_irq_node(cfg, node, apic_id, pin);
- /*
- * don't mark it in pin_programmed, so later acpi could
- * set it correctly when irq < 16
- */
- setup_ioapic_irq(apic_id, pin, irq, cfg, irq_trigger(idx),
- irq_polarity(idx));
+ io_apic_setup_irq_pin(irq, node, &attr);
}
+}
- if (notcon)
- apic_printk(APIC_VERBOSE,
- " (apicid-pin) not connected\n");
+static void __init setup_IO_APIC_irqs(void)
+{
+ unsigned int apic_id;
+
+ apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
+
+ for (apic_id = 0; apic_id < nr_ioapics; apic_id++)
+ __io_apic_setup_irqs(apic_id);
}
/*
@@ -1452,7 +1436,7 @@ static void __init setup_IO_APIC_irqs(void)
void setup_IO_APIC_irq_extra(u32 gsi)
{
int apic_id = 0, pin, idx, irq, node = cpu_to_node(0);
- struct irq_cfg *cfg;
+ struct io_apic_irq_attr attr;
/*
* Convert 'gsi' to 'ioapic.pin'.
@@ -1472,21 +1456,10 @@ void setup_IO_APIC_irq_extra(u32 gsi)
if (apic_id == 0 || irq < NR_IRQS_LEGACY)
return;
- cfg = alloc_irq_and_cfg_at(irq, node);
- if (!cfg)
- return;
-
- add_pin_to_irq_node(cfg, node, apic_id, pin);
-
- if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) {
- pr_debug("Pin %d-%d already programmed\n",
- mp_ioapics[apic_id].apicid, pin);
- return;
- }
- set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed);
+ set_io_apic_irq_attr(&attr, apic_id, pin, irq_trigger(idx),
+ irq_polarity(idx));
- setup_ioapic_irq(apic_id, pin, irq, cfg,
- irq_trigger(idx), irq_polarity(idx));
+ io_apic_setup_irq_pin_once(irq, node, &attr);
}
/*
@@ -3605,7 +3578,40 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
}
#endif /* CONFIG_HT_IRQ */
-int __init io_apic_get_redir_entries (int ioapic)
+int
+io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr)
+{
+ struct irq_cfg *cfg = alloc_irq_and_cfg_at(irq, node);
+ int ret;
+
+ if (!cfg)
+ return -EINVAL;
+ ret = __add_pin_to_irq_node(cfg, node, attr->ioapic, attr->ioapic_pin);
+ if (!ret)
+ setup_ioapic_irq(attr->ioapic, attr->ioapic_pin, irq, cfg,
+ attr->trigger, attr->polarity);
+ return ret;
+}
+
+static int io_apic_setup_irq_pin_once(unsigned int irq, int node,
+ struct io_apic_irq_attr *attr)
+{
+ unsigned int id = attr->ioapic, pin = attr->ioapic_pin;
+ int ret;
+
+ /* Avoid redundant programming */
+ if (test_bit(pin, mp_ioapic_routing[id].pin_programmed)) {
+ pr_debug("Pin %d-%d already programmed\n",
+ mp_ioapics[id].apicid, pin);
+ return 0;
+ }
+ ret = io_apic_setup_irq_pin(irq, node, attr);
+ if (!ret)
+ set_bit(pin, mp_ioapic_routing[id].pin_programmed);
+ return ret;
+}
+
+static int __init io_apic_get_redir_entries(int ioapic)
{
union IO_APIC_reg_01 reg_01;
unsigned long flags;
@@ -3659,96 +3665,24 @@ int __init arch_probe_nr_irqs(void)
}
#endif
-static int __io_apic_set_pci_routing(struct device *dev, int irq,
- struct io_apic_irq_attr *irq_attr)
+int io_apic_set_pci_routing(struct device *dev, int irq,
+ struct io_apic_irq_attr *irq_attr)
{
- struct irq_cfg *cfg;
int node;
- int ioapic, pin;
- int trigger, polarity;
- ioapic = irq_attr->ioapic;
if (!IO_APIC_IRQ(irq)) {
apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
- ioapic);
+ irq_attr->ioapic);
return -EINVAL;
}
- if (dev)
- node = dev_to_node(dev);
- else
- node = cpu_to_node(0);
-
- cfg = alloc_irq_and_cfg_at(irq, node);
- if (!cfg)
- return 0;
-
- pin = irq_attr->ioapic_pin;
- trigger = irq_attr->trigger;
- polarity = irq_attr->polarity;
-
- /*
- * IRQs < 16 are already in the irq_2_pin[] map
- */
- if (irq >= legacy_pic->nr_legacy_irqs) {
- if (__add_pin_to_irq_node(cfg, node, ioapic, pin)) {
- printk(KERN_INFO "can not add pin %d for irq %d\n",
- pin, irq);
- return 0;
- }
- }
-
- setup_ioapic_irq(ioapic, pin, irq, cfg, trigger, polarity);
+ node = dev ? dev_to_node(dev) : cpu_to_node(0);
- return 0;
+ return io_apic_setup_irq_pin_once(irq, node, irq_attr);
}
-int io_apic_set_pci_routing(struct device *dev, int irq,
- struct io_apic_irq_attr *irq_attr)
-{
- int ioapic, pin;
- /*
- * Avoid pin reprogramming. PRTs typically include entries
- * with redundant pin->gsi mappings (but unique PCI devices);
- * we only program the IOAPIC on the first.
- */
- ioapic = irq_attr->ioapic;
- pin = irq_attr->ioapic_pin;
- if (test_bit(pin, mp_ioapic_routing[ioapic].pin_programmed)) {
- pr_debug("Pin %d-%d already programmed\n",
- mp_ioapics[ioapic].apicid, pin);
- return 0;
- }
- set_bit(pin, mp_ioapic_routing[ioapic].pin_programmed);
-
- return __io_apic_set_pci_routing(dev, irq, irq_attr);
-}
-
-u8 __init io_apic_unique_id(u8 id)
-{
#ifdef CONFIG_X86_32
- if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
- !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
- return io_apic_get_unique_id(nr_ioapics, id);
- else
- return id;
-#else
- int i;
- DECLARE_BITMAP(used, 256);
-
- bitmap_zero(used, 256);
- for (i = 0; i < nr_ioapics; i++) {
- struct mpc_ioapic *ia = &mp_ioapics[i];
- __set_bit(ia->apicid, used);
- }
- if (!test_bit(id, used))
- return id;
- return find_first_zero_bit(used, 256);
-#endif
-}
-
-#ifdef CONFIG_X86_32
-int __init io_apic_get_unique_id(int ioapic, int apic_id)
+static int __init io_apic_get_unique_id(int ioapic, int apic_id)
{
union IO_APIC_reg_00 reg_00;
static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
@@ -3821,9 +3755,33 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
return apic_id;
}
+
+static u8 __init io_apic_unique_id(u8 id)
+{
+ if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
+ !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
+ return io_apic_get_unique_id(nr_ioapics, id);
+ else
+ return id;
+}
+#else
+static u8 __init io_apic_unique_id(u8 id)
+{
+ int i;
+ DECLARE_BITMAP(used, 256);
+
+ bitmap_zero(used, 256);
+ for (i = 0; i < nr_ioapics; i++) {
+ struct mpc_ioapic *ia = &mp_ioapics[i];
+ __set_bit(ia->apicid, used);
+ }
+ if (!test_bit(id, used))
+ return id;
+ return find_first_zero_bit(used, 256);
+}
#endif
-int __init io_apic_get_version(int ioapic)
+static int __init io_apic_get_version(int ioapic)
{
union IO_APIC_reg_01 reg_01;
unsigned long flags;
@@ -4026,10 +3984,10 @@ int mp_find_ioapic_pin(int ioapic, u32 gsi)
return gsi - mp_gsi_routing[ioapic].gsi_base;
}
-static int bad_ioapic(unsigned long address)
+static __init int bad_ioapic(unsigned long address)
{
if (nr_ioapics >= MAX_IO_APICS) {
- printk(KERN_WARNING "WARING: Max # of I/O APICs (%d) exceeded "
+ printk(KERN_WARNING "WARNING: Max # of I/O APICs (%d) exceeded "
"(found %d), skipping\n", MAX_IO_APICS, nr_ioapics);
return 1;
}
@@ -4086,20 +4044,15 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
/* Enable IOAPIC early just for system timer */
void __init pre_init_apic_IRQ0(void)
{
- struct irq_cfg *cfg;
+ struct io_apic_irq_attr attr = { 0, 0, 0, 0 };
printk(KERN_INFO "Early APIC setup for system timer0\n");
#ifndef CONFIG_SMP
physid_set_mask_of_physid(boot_cpu_physical_apicid,
&phys_cpu_present_map);
#endif
- /* Make sure the irq descriptor is set up */
- cfg = alloc_irq_and_cfg_at(0, 0);
-
setup_local_APIC();
- add_pin_to_irq_node(cfg, 0, 0, 0);
+ io_apic_setup_irq_pin(0, 0, &attr);
set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
-
- setup_ioapic_irq(0, 0, 0, cfg, 0, 0);
}
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c
index 08385e090a6f..cce91bf26676 100644
--- a/arch/x86/kernel/apic/ipi.c
+++ b/arch/x86/kernel/apic/ipi.c
@@ -56,6 +56,8 @@ void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
local_irq_restore(flags);
}
+#ifdef CONFIG_X86_32
+
void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
int vector)
{
@@ -71,8 +73,8 @@ void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
local_irq_save(flags);
for_each_cpu(query_cpu, mask)
__default_send_IPI_dest_field(
- apic->cpu_to_logical_apicid(query_cpu), vector,
- apic->dest_logical);
+ early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
+ vector, apic->dest_logical);
local_irq_restore(flags);
}
@@ -90,14 +92,12 @@ void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
if (query_cpu == this_cpu)
continue;
__default_send_IPI_dest_field(
- apic->cpu_to_logical_apicid(query_cpu), vector,
- apic->dest_logical);
+ early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
+ vector, apic->dest_logical);
}
local_irq_restore(flags);
}
-#ifdef CONFIG_X86_32
-
/*
* This is only used on smaller machines.
*/
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
index 960f26ab5c9f..6273eee5134b 100644
--- a/arch/x86/kernel/apic/numaq_32.c
+++ b/arch/x86/kernel/apic/numaq_32.c
@@ -373,13 +373,6 @@ static inline void numaq_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask
return physids_promote(0xFUL, retmap);
}
-static inline int numaq_cpu_to_logical_apicid(int cpu)
-{
- if (cpu >= nr_cpu_ids)
- return BAD_APICID;
- return cpu_2_logical_apicid[cpu];
-}
-
/*
* Supporting over 60 cpus on NUMA-Q requires a locality-dependent
* cpu to APIC ID relation to properly interact with the intelligent
@@ -398,6 +391,15 @@ static inline int numaq_apicid_to_node(int logical_apicid)
return logical_apicid >> 4;
}
+static int numaq_numa_cpu_node(int cpu)
+{
+ int logical_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
+
+ if (logical_apicid != BAD_APICID)
+ return numaq_apicid_to_node(logical_apicid);
+ return NUMA_NO_NODE;
+}
+
static void numaq_apicid_to_cpu_present(int logical_apicid, physid_mask_t *retmap)
{
int node = numaq_apicid_to_node(logical_apicid);
@@ -508,8 +510,6 @@ struct apic __refdata apic_numaq = {
.ioapic_phys_id_map = numaq_ioapic_phys_id_map,
.setup_apic_routing = numaq_setup_apic_routing,
.multi_timer_check = numaq_multi_timer_check,
- .apicid_to_node = numaq_apicid_to_node,
- .cpu_to_logical_apicid = numaq_cpu_to_logical_apicid,
.cpu_present_to_apicid = numaq_cpu_present_to_apicid,
.apicid_to_cpu_present = numaq_apicid_to_cpu_present,
.setup_portio_remap = numaq_setup_portio_remap,
@@ -547,4 +547,7 @@ struct apic __refdata apic_numaq = {
.icr_write = native_apic_icr_write,
.wait_icr_idle = native_apic_wait_icr_idle,
.safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
+
+ .x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid,
+ .x86_32_numa_cpu_node = numaq_numa_cpu_node,
};
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index 99d2fe016084..fc84c7b61108 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -77,6 +77,11 @@ void __init default_setup_apic_routing(void)
apic->setup_apic_routing();
}
+static int default_x86_32_early_logical_apicid(int cpu)
+{
+ return 1 << cpu;
+}
+
static void setup_apic_flat_routing(void)
{
#ifdef CONFIG_X86_IO_APIC
@@ -130,8 +135,6 @@ struct apic apic_default = {
.ioapic_phys_id_map = default_ioapic_phys_id_map,
.setup_apic_routing = setup_apic_flat_routing,
.multi_timer_check = NULL,
- .apicid_to_node = default_apicid_to_node,
- .cpu_to_logical_apicid = default_cpu_to_logical_apicid,
.cpu_present_to_apicid = default_cpu_present_to_apicid,
.apicid_to_cpu_present = physid_set_mask_of_physid,
.setup_portio_remap = NULL,
@@ -167,6 +170,9 @@ struct apic apic_default = {
.icr_write = native_apic_icr_write,
.wait_icr_idle = native_apic_wait_icr_idle,
.safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
+
+ .x86_32_early_logical_apicid = default_x86_32_early_logical_apicid,
+ .x86_32_numa_cpu_node = default_x86_32_numa_cpu_node,
};
extern struct apic apic_numaq;
diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
index 9b419263d90d..e4b8059b414a 100644
--- a/arch/x86/kernel/apic/summit_32.c
+++ b/arch/x86/kernel/apic/summit_32.c
@@ -194,11 +194,10 @@ static unsigned long summit_check_apicid_present(int bit)
return 1;
}
-static void summit_init_apic_ldr(void)
+static int summit_early_logical_apicid(int cpu)
{
- unsigned long val, id;
int count = 0;
- u8 my_id = (u8)hard_smp_processor_id();
+ u8 my_id = early_per_cpu(x86_cpu_to_apicid, cpu);
u8 my_cluster = APIC_CLUSTER(my_id);
#ifdef CONFIG_SMP
u8 lid;
@@ -206,7 +205,7 @@ static void summit_init_apic_ldr(void)
/* Create logical APIC IDs by counting CPUs already in cluster. */
for (count = 0, i = nr_cpu_ids; --i >= 0; ) {
- lid = cpu_2_logical_apicid[i];
+ lid = early_per_cpu(x86_cpu_to_logical_apicid, i);
if (lid != BAD_APICID && APIC_CLUSTER(lid) == my_cluster)
++count;
}
@@ -214,7 +213,15 @@ static void summit_init_apic_ldr(void)
/* We only have a 4 wide bitmap in cluster mode. If a deranged
* BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
- id = my_cluster | (1UL << count);
+ return my_cluster | (1UL << count);
+}
+
+static void summit_init_apic_ldr(void)
+{
+ int cpu = smp_processor_id();
+ unsigned long id = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
+ unsigned long val;
+
apic_write(APIC_DFR, SUMMIT_APIC_DFR_VALUE);
val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
val |= SET_APIC_LOGICAL_ID(id);
@@ -232,27 +239,6 @@ static void summit_setup_apic_routing(void)
nr_ioapics);
}
-static int summit_apicid_to_node(int logical_apicid)
-{
-#ifdef CONFIG_SMP
- return apicid_2_node[hard_smp_processor_id()];
-#else
- return 0;
-#endif
-}
-
-/* Mapping from cpu number to logical apicid */
-static inline int summit_cpu_to_logical_apicid(int cpu)
-{
-#ifdef CONFIG_SMP
- if (cpu >= nr_cpu_ids)
- return BAD_APICID;
- return cpu_2_logical_apicid[cpu];
-#else
- return logical_smp_processor_id();
-#endif
-}
-
static int summit_cpu_present_to_apicid(int mps_cpu)
{
if (mps_cpu < nr_cpu_ids)
@@ -286,7 +272,7 @@ static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask)
* The cpus in the mask must all be on the apic cluster.
*/
for_each_cpu(cpu, cpumask) {
- int new_apicid = summit_cpu_to_logical_apicid(cpu);
+ int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
printk("%s: Not a valid mask!\n", __func__);
@@ -301,7 +287,7 @@ static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask)
static unsigned int summit_cpu_mask_to_apicid_and(const struct cpumask *inmask,
const struct cpumask *andmask)
{
- int apicid = summit_cpu_to_logical_apicid(0);
+ int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
cpumask_var_t cpumask;
if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
@@ -528,8 +514,6 @@ struct apic apic_summit = {
.ioapic_phys_id_map = summit_ioapic_phys_id_map,
.setup_apic_routing = summit_setup_apic_routing,
.multi_timer_check = NULL,
- .apicid_to_node = summit_apicid_to_node,
- .cpu_to_logical_apicid = summit_cpu_to_logical_apicid,
.cpu_present_to_apicid = summit_cpu_present_to_apicid,
.apicid_to_cpu_present = summit_apicid_to_cpu_present,
.setup_portio_remap = NULL,
@@ -565,4 +549,7 @@ struct apic apic_summit = {
.icr_write = native_apic_icr_write,
.wait_icr_idle = native_apic_wait_icr_idle,
.safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
+
+ .x86_32_early_logical_apicid = summit_early_logical_apicid,
+ .x86_32_numa_cpu_node = default_x86_32_numa_cpu_node,
};
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index cf69c59f4910..90949bbd566d 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -206,8 +206,6 @@ struct apic apic_x2apic_cluster = {
.ioapic_phys_id_map = NULL,
.setup_apic_routing = NULL,
.multi_timer_check = NULL,
- .apicid_to_node = NULL,
- .cpu_to_logical_apicid = NULL,
.cpu_present_to_apicid = default_cpu_present_to_apicid,
.apicid_to_cpu_present = NULL,
.setup_portio_remap = NULL,
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index 8972f38c5ced..c7e6d6645bf4 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -195,8 +195,6 @@ struct apic apic_x2apic_phys = {
.ioapic_phys_id_map = NULL,
.setup_apic_routing = NULL,
.multi_timer_check = NULL,
- .apicid_to_node = NULL,
- .cpu_to_logical_apicid = NULL,
.cpu_present_to_apicid = default_cpu_present_to_apicid,
.apicid_to_cpu_present = NULL,
.setup_portio_remap = NULL,
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index bd16b58b8850..3c289281394c 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -338,8 +338,6 @@ struct apic __refdata apic_x2apic_uv_x = {
.ioapic_phys_id_map = NULL,
.setup_apic_routing = NULL,
.multi_timer_check = NULL,
- .apicid_to_node = NULL,
- .cpu_to_logical_apicid = NULL,
.cpu_present_to_apicid = default_cpu_present_to_apicid,
.apicid_to_cpu_present = NULL,
.setup_portio_remap = NULL,
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 0e4f24c2a746..9079926a5b18 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -227,6 +227,7 @@
#include <linux/suspend.h>
#include <linux/kthread.h>
#include <linux/jiffies.h>
+#include <linux/acpi.h>
#include <asm/system.h>
#include <asm/uaccess.h>
@@ -975,20 +976,10 @@ recalc:
static void apm_power_off(void)
{
- unsigned char po_bios_call[] = {
- 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
- 0x8e, 0xd0, /* movw ax,ss */
- 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
- 0xb8, 0x07, 0x53, /* movw $0x5307,ax */
- 0xbb, 0x01, 0x00, /* movw $0x0001,bx */
- 0xb9, 0x03, 0x00, /* movw $0x0003,cx */
- 0xcd, 0x15 /* int $0x15 */
- };
-
/* Some bioses don't like being called from CPU != 0 */
if (apm_info.realmode_power_off) {
set_cpus_allowed_ptr(current, cpumask_of(0));
- machine_real_restart(po_bios_call, sizeof(po_bios_call));
+ machine_real_restart(MRR_APM);
} else {
(void)set_system_power_state(APM_STATE_OFF);
}
@@ -2331,12 +2322,11 @@ static int __init apm_init(void)
apm_info.disabled = 1;
return -ENODEV;
}
- if (pm_flags & PM_ACPI) {
+ if (!acpi_disabled) {
printk(KERN_NOTICE "apm: overridden by ACPI.\n");
apm_info.disabled = 1;
return -ENODEV;
}
- pm_flags |= PM_APM;
/*
* Set up the long jump entry point to the APM BIOS, which is called
@@ -2428,7 +2418,6 @@ static void __exit apm_exit(void)
kthread_stop(kapmd_task);
kapmd_task = NULL;
}
- pm_flags &= ~PM_APM;
}
module_init(apm_init);
diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
index cfa82c899f47..4f13fafc5264 100644
--- a/arch/x86/kernel/asm-offsets.c
+++ b/arch/x86/kernel/asm-offsets.c
@@ -1,5 +1,70 @@
+/*
+ * Generate definitions needed by assembly language modules.
+ * This code generates raw asm output which is post-processed to extract
+ * and format the required data.
+ */
+#define COMPILE_OFFSETS
+
+#include <linux/crypto.h>
+#include <linux/sched.h>
+#include <linux/stddef.h>
+#include <linux/hardirq.h>
+#include <linux/suspend.h>
+#include <linux/kbuild.h>
+#include <asm/processor.h>
+#include <asm/thread_info.h>
+#include <asm/sigframe.h>
+#include <asm/bootparam.h>
+#include <asm/suspend.h>
+
+#ifdef CONFIG_XEN
+#include <xen/interface/xen.h>
+#endif
+
#ifdef CONFIG_X86_32
# include "asm-offsets_32.c"
#else
# include "asm-offsets_64.c"
#endif
+
+void common(void) {
+ BLANK();
+ OFFSET(TI_flags, thread_info, flags);
+ OFFSET(TI_status, thread_info, status);
+ OFFSET(TI_addr_limit, thread_info, addr_limit);
+ OFFSET(TI_preempt_count, thread_info, preempt_count);
+
+ BLANK();
+ OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
+
+ BLANK();
+ OFFSET(pbe_address, pbe, address);
+ OFFSET(pbe_orig_address, pbe, orig_address);
+ OFFSET(pbe_next, pbe, next);
+
+#ifdef CONFIG_PARAVIRT
+ BLANK();
+ OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled);
+ OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops);
+ OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops);
+ OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
+ OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
+ OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
+ OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
+ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
+ OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
+#endif
+
+#ifdef CONFIG_XEN
+ BLANK();
+ OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
+ OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending);
+#endif
+
+ BLANK();
+ OFFSET(BP_scratch, boot_params, scratch);
+ OFFSET(BP_loadflags, boot_params, hdr.loadflags);
+ OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch);
+ OFFSET(BP_version, boot_params, hdr.version);
+ OFFSET(BP_kernel_alignment, boot_params, hdr.kernel_alignment);
+}
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index 1a4088dda37a..c29d631af6fc 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -1,26 +1,4 @@
-/*
- * Generate definitions needed by assembly language modules.
- * This code generates raw asm output which is post-processed
- * to extract and format the required data.
- */
-
-#include <linux/crypto.h>
-#include <linux/sched.h>
-#include <linux/signal.h>
-#include <linux/personality.h>
-#include <linux/suspend.h>
-#include <linux/kbuild.h>
#include <asm/ucontext.h>
-#include <asm/sigframe.h>
-#include <asm/pgtable.h>
-#include <asm/fixmap.h>
-#include <asm/processor.h>
-#include <asm/thread_info.h>
-#include <asm/bootparam.h>
-#include <asm/elf.h>
-#include <asm/suspend.h>
-
-#include <xen/interface/xen.h>
#include <linux/lguest.h>
#include "../../../drivers/lguest/lg.h"
@@ -51,21 +29,10 @@ void foo(void)
OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
BLANK();
- OFFSET(TI_task, thread_info, task);
- OFFSET(TI_exec_domain, thread_info, exec_domain);
- OFFSET(TI_flags, thread_info, flags);
- OFFSET(TI_status, thread_info, status);
- OFFSET(TI_preempt_count, thread_info, preempt_count);
- OFFSET(TI_addr_limit, thread_info, addr_limit);
- OFFSET(TI_restart_block, thread_info, restart_block);
OFFSET(TI_sysenter_return, thread_info, sysenter_return);
OFFSET(TI_cpu, thread_info, cpu);
BLANK();
- OFFSET(GDS_size, desc_ptr, size);
- OFFSET(GDS_address, desc_ptr, address);
- BLANK();
-
OFFSET(PT_EBX, pt_regs, bx);
OFFSET(PT_ECX, pt_regs, cx);
OFFSET(PT_EDX, pt_regs, dx);
@@ -85,42 +52,13 @@ void foo(void)
OFFSET(PT_OLDSS, pt_regs, ss);
BLANK();
- OFFSET(EXEC_DOMAIN_handler, exec_domain, handler);
OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe, uc.uc_mcontext);
BLANK();
- OFFSET(pbe_address, pbe, address);
- OFFSET(pbe_orig_address, pbe, orig_address);
- OFFSET(pbe_next, pbe, next);
-
/* Offset from the sysenter stack to tss.sp0 */
DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) -
sizeof(struct tss_struct));
- DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
- DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
- DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
-
- OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
-
-#ifdef CONFIG_PARAVIRT
- BLANK();
- OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled);
- OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops);
- OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops);
- OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
- OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
- OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
- OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
- OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
-#endif
-
-#ifdef CONFIG_XEN
- BLANK();
- OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
- OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending);
-#endif
-
#if defined(CONFIG_LGUEST) || defined(CONFIG_LGUEST_GUEST) || defined(CONFIG_LGUEST_MODULE)
BLANK();
OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled);
@@ -139,11 +77,4 @@ void foo(void)
OFFSET(LGUEST_PAGES_regs_errcode, lguest_pages, regs.errcode);
OFFSET(LGUEST_PAGES_regs, lguest_pages, regs);
#endif
-
- BLANK();
- OFFSET(BP_scratch, boot_params, scratch);
- OFFSET(BP_loadflags, boot_params, hdr.loadflags);
- OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch);
- OFFSET(BP_version, boot_params, hdr.version);
- OFFSET(BP_kernel_alignment, boot_params, hdr.kernel_alignment);
}
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index 4a6aeedcd965..e72a1194af22 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -1,27 +1,4 @@
-/*
- * Generate definitions needed by assembly language modules.
- * This code generates raw asm output which is post-processed to extract
- * and format the required data.
- */
-#define COMPILE_OFFSETS
-
-#include <linux/crypto.h>
-#include <linux/sched.h>
-#include <linux/stddef.h>
-#include <linux/errno.h>
-#include <linux/hardirq.h>
-#include <linux/suspend.h>
-#include <linux/kbuild.h>
-#include <asm/processor.h>
-#include <asm/segment.h>
-#include <asm/thread_info.h>
#include <asm/ia32.h>
-#include <asm/bootparam.h>
-#include <asm/suspend.h>
-
-#include <xen/interface/xen.h>
-
-#include <asm/sigframe.h>
#define __NO_STUBS 1
#undef __SYSCALL
@@ -33,41 +10,19 @@ static char syscalls[] = {
int main(void)
{
-#define ENTRY(entry) DEFINE(tsk_ ## entry, offsetof(struct task_struct, entry))
- ENTRY(state);
- ENTRY(flags);
- ENTRY(pid);
- BLANK();
-#undef ENTRY
-#define ENTRY(entry) DEFINE(TI_ ## entry, offsetof(struct thread_info, entry))
- ENTRY(flags);
- ENTRY(addr_limit);
- ENTRY(preempt_count);
- ENTRY(status);
-#ifdef CONFIG_IA32_EMULATION
- ENTRY(sysenter_return);
-#endif
- BLANK();
-#undef ENTRY
#ifdef CONFIG_PARAVIRT
- BLANK();
- OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled);
- OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops);
- OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops);
- OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
- OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
OFFSET(PV_IRQ_adjust_exception_frame, pv_irq_ops, adjust_exception_frame);
- OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
OFFSET(PV_CPU_usergs_sysret32, pv_cpu_ops, usergs_sysret32);
OFFSET(PV_CPU_usergs_sysret64, pv_cpu_ops, usergs_sysret64);
- OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
- OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
+ BLANK();
#endif
-
#ifdef CONFIG_IA32_EMULATION
-#define ENTRY(entry) DEFINE(IA32_SIGCONTEXT_ ## entry, offsetof(struct sigcontext_ia32, entry))
+ OFFSET(TI_sysenter_return, thread_info, sysenter_return);
+ BLANK();
+
+#define ENTRY(entry) OFFSET(IA32_SIGCONTEXT_ ## entry, sigcontext_ia32, entry)
ENTRY(ax);
ENTRY(bx);
ENTRY(cx);
@@ -79,15 +34,12 @@ int main(void)
ENTRY(ip);
BLANK();
#undef ENTRY
- DEFINE(IA32_RT_SIGFRAME_sigcontext,
- offsetof (struct rt_sigframe_ia32, uc.uc_mcontext));
+
+ OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe_ia32, uc.uc_mcontext);
BLANK();
#endif
- DEFINE(pbe_address, offsetof(struct pbe, address));
- DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
- DEFINE(pbe_next, offsetof(struct pbe, next));
- BLANK();
-#define ENTRY(entry) DEFINE(pt_regs_ ## entry, offsetof(struct pt_regs, entry))
+
+#define ENTRY(entry) OFFSET(pt_regs_ ## entry, pt_regs, entry)
ENTRY(bx);
ENTRY(bx);
ENTRY(cx);
@@ -107,7 +59,8 @@ int main(void)
ENTRY(flags);
BLANK();
#undef ENTRY
-#define ENTRY(entry) DEFINE(saved_context_ ## entry, offsetof(struct saved_context, entry))
+
+#define ENTRY(entry) OFFSET(saved_context_ ## entry, saved_context, entry)
ENTRY(cr0);
ENTRY(cr2);
ENTRY(cr3);
@@ -115,26 +68,11 @@ int main(void)
ENTRY(cr8);
BLANK();
#undef ENTRY
- DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
- BLANK();
- DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
- BLANK();
- DEFINE(__NR_syscall_max, sizeof(syscalls) - 1);
+ OFFSET(TSS_ist, tss_struct, x86_tss.ist);
BLANK();
- OFFSET(BP_scratch, boot_params, scratch);
- OFFSET(BP_loadflags, boot_params, hdr.loadflags);
- OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch);
- OFFSET(BP_version, boot_params, hdr.version);
- OFFSET(BP_kernel_alignment, boot_params, hdr.kernel_alignment);
- BLANK();
- DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
-#ifdef CONFIG_XEN
- BLANK();
- OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
- OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending);
-#undef ENTRY
-#endif
+ DEFINE(__NR_syscall_max, sizeof(syscalls) - 1);
+
return 0;
}
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 7c7bedb83c5a..f771ab6b49e9 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -233,18 +233,22 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
}
#endif
-#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
+#ifdef CONFIG_NUMA
+/*
+ * To workaround broken NUMA config. Read the comment in
+ * srat_detect_node().
+ */
static int __cpuinit nearby_node(int apicid)
{
int i, node;
for (i = apicid - 1; i >= 0; i--) {
- node = apicid_to_node[i];
+ node = __apicid_to_node[i];
if (node != NUMA_NO_NODE && node_online(node))
return node;
}
for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
- node = apicid_to_node[i];
+ node = __apicid_to_node[i];
if (node != NUMA_NO_NODE && node_online(node))
return node;
}
@@ -261,7 +265,7 @@ static int __cpuinit nearby_node(int apicid)
#ifdef CONFIG_X86_HT
static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)
{
- u32 nodes;
+ u32 nodes, cores_per_cu = 1;
u8 node_id;
int cpu = smp_processor_id();
@@ -276,6 +280,7 @@ static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)
/* get compute unit information */
smp_num_siblings = ((ebx >> 8) & 3) + 1;
c->compute_unit_id = ebx & 0xff;
+ cores_per_cu += ((ebx >> 8) & 3);
} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
u64 value;
@@ -288,15 +293,18 @@ static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)
/* fixup multi-node processor information */
if (nodes > 1) {
u32 cores_per_node;
+ u32 cus_per_node;
set_cpu_cap(c, X86_FEATURE_AMD_DCM);
cores_per_node = c->x86_max_cores / nodes;
+ cus_per_node = cores_per_node / cores_per_cu;
/* store NodeID, use llc_shared_map to store sibling info */
per_cpu(cpu_llc_id, cpu) = node_id;
- /* core id to be in range from 0 to (cores_per_node - 1) */
- c->cpu_core_id = c->cpu_core_id % cores_per_node;
+ /* core id has to be in the [0 .. cores_per_node - 1] range */
+ c->cpu_core_id %= cores_per_node;
+ c->compute_unit_id %= cus_per_node;
}
}
#endif
@@ -334,31 +342,40 @@ EXPORT_SYMBOL_GPL(amd_get_nb_id);
static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
{
-#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
+#ifdef CONFIG_NUMA
int cpu = smp_processor_id();
int node;
unsigned apicid = c->apicid;
- node = per_cpu(cpu_llc_id, cpu);
+ node = numa_cpu_node(cpu);
+ if (node == NUMA_NO_NODE)
+ node = per_cpu(cpu_llc_id, cpu);
- if (apicid_to_node[apicid] != NUMA_NO_NODE)
- node = apicid_to_node[apicid];
if (!node_online(node)) {
- /* Two possibilities here:
- - The CPU is missing memory and no node was created.
- In that case try picking one from a nearby CPU
- - The APIC IDs differ from the HyperTransport node IDs
- which the K8 northbridge parsing fills in.
- Assume they are all increased by a constant offset,
- but in the same order as the HT nodeids.
- If that doesn't result in a usable node fall back to the
- path for the previous case. */
-
+ /*
+ * Two possibilities here:
+ *
+ * - The CPU is missing memory and no node was created. In
+ * that case try picking one from a nearby CPU.
+ *
+ * - The APIC IDs differ from the HyperTransport node IDs
+ * which the K8 northbridge parsing fills in. Assume
+ * they are all increased by a constant offset, but in
+ * the same order as the HT nodeids. If that doesn't
+ * result in a usable node fall back to the path for the
+ * previous case.
+ *
+ * This workaround operates directly on the mapping between
+ * APIC ID and NUMA node, assuming certain relationship
+ * between APIC ID, HT node ID and NUMA topology. As going
+ * through CPU mapping may alter the outcome, directly
+ * access __apicid_to_node[].
+ */
int ht_nodeid = c->initial_apicid;
if (ht_nodeid >= 0 &&
- apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
- node = apicid_to_node[ht_nodeid];
+ __apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
+ node = __apicid_to_node[ht_nodeid];
/* Pick a nearby node */
if (!node_online(node))
node = nearby_node(apicid);
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 1d59834396bd..a2559c3ed500 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -869,7 +869,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
select_idle_routine(c);
-#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
+#ifdef CONFIG_NUMA
numa_add_cpu(smp_processor_id());
#endif
}
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
index bd1cac747f67..52c93648e492 100644
--- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
@@ -158,9 +158,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
{
if (c->x86 == 0x06) {
if (cpu_has(c, X86_FEATURE_EST))
- printk(KERN_WARNING PFX "Warning: EST-capable CPU "
- "detected. The acpi-cpufreq module offers "
- "voltage scaling in addition of frequency "
+ printk_once(KERN_WARNING PFX "Warning: EST-capable "
+ "CPU detected. The acpi-cpufreq module offers "
+ "voltage scaling in addition to frequency "
"scaling. You should use that instead of "
"p4-clockmod, if possible.\n");
switch (c->x86_model) {
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 35c7e65e59be..c567dec854f6 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -1537,6 +1537,7 @@ static struct notifier_block cpb_nb = {
static int __cpuinit powernowk8_init(void)
{
unsigned int i, supported_cpus = 0, cpu;
+ int rv;
for_each_online_cpu(i) {
int rc;
@@ -1555,14 +1556,14 @@ static int __cpuinit powernowk8_init(void)
cpb_capable = true;
- register_cpu_notifier(&cpb_nb);
-
msrs = msrs_alloc();
if (!msrs) {
printk(KERN_ERR "%s: Error allocating msrs!\n", __func__);
return -ENOMEM;
}
+ register_cpu_notifier(&cpb_nb);
+
rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
for_each_cpu(cpu, cpu_online_mask) {
@@ -1574,7 +1575,13 @@ static int __cpuinit powernowk8_init(void)
(cpb_enabled ? "on" : "off"));
}
- return cpufreq_register_driver(&cpufreq_amd64_driver);
+ rv = cpufreq_register_driver(&cpufreq_amd64_driver);
+ if (rv < 0 && boot_cpu_has(X86_FEATURE_CPB)) {
+ unregister_cpu_notifier(&cpb_nb);
+ msrs_free(msrs);
+ msrs = NULL;
+ }
+ return rv;
}
/* driver entry point for term */
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index d16c2c53d6bf..df86bc8c859d 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -276,14 +276,13 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
{
-#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
+#ifdef CONFIG_NUMA
unsigned node;
int cpu = smp_processor_id();
- int apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid;
/* Don't do the funky fallback heuristics the AMD version employs
for now. */
- node = apicid_to_node[apicid];
+ node = numa_cpu_node(cpu);
if (node == NUMA_NO_NODE || !node_online(node)) {
/* reuse the value from init_cpu_to_node() */
node = cpu_to_node(cpu);
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index ec2c19a7b8ef..1ce1af2899df 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -304,8 +304,9 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
struct _cache_attr {
struct attribute attr;
- ssize_t (*show)(struct _cpuid4_info *, char *);
- ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
+ ssize_t (*show)(struct _cpuid4_info *, char *, unsigned int);
+ ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count,
+ unsigned int);
};
#ifdef CONFIG_AMD_NB
@@ -400,7 +401,8 @@ static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
#define SHOW_CACHE_DISABLE(slot) \
static ssize_t \
-show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf) \
+show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \
+ unsigned int cpu) \
{ \
return show_cache_disable(this_leaf, buf, slot); \
}
@@ -512,7 +514,8 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
#define STORE_CACHE_DISABLE(slot) \
static ssize_t \
store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \
- const char *buf, size_t count) \
+ const char *buf, size_t count, \
+ unsigned int cpu) \
{ \
return store_cache_disable(this_leaf, buf, count, slot); \
}
@@ -524,6 +527,39 @@ static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
show_cache_disable_1, store_cache_disable_1);
+static ssize_t
+show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu)
+{
+ if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
+ return -EINVAL;
+
+ return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
+}
+
+static ssize_t
+store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
+ unsigned int cpu)
+{
+ unsigned long val;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
+ return -EINVAL;
+
+ if (strict_strtoul(buf, 16, &val) < 0)
+ return -EINVAL;
+
+ if (amd_set_subcaches(cpu, val))
+ return -EINVAL;
+
+ return count;
+}
+
+static struct _cache_attr subcaches =
+ __ATTR(subcaches, 0644, show_subcaches, store_subcaches);
+
#else /* CONFIG_AMD_NB */
#define amd_init_l3_cache(x, y)
#endif /* CONFIG_AMD_NB */
@@ -532,9 +568,9 @@ static int
__cpuinit cpuid4_cache_lookup_regs(int index,
struct _cpuid4_info_regs *this_leaf)
{
- union _cpuid4_leaf_eax eax;
- union _cpuid4_leaf_ebx ebx;
- union _cpuid4_leaf_ecx ecx;
+ union _cpuid4_leaf_eax eax;
+ union _cpuid4_leaf_ebx ebx;
+ union _cpuid4_leaf_ecx ecx;
unsigned edx;
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
@@ -732,11 +768,11 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
struct cpuinfo_x86 *c = &cpu_data(cpu);
if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
- for_each_cpu(i, c->llc_shared_map) {
+ for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
if (!per_cpu(ici_cpuid4_info, i))
continue;
this_leaf = CPUID4_INFO_IDX(i, index);
- for_each_cpu(sibling, c->llc_shared_map) {
+ for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
if (!cpu_online(sibling))
continue;
set_bit(sibling, this_leaf->shared_cpu_map);
@@ -870,8 +906,8 @@ static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
#define show_one_plus(file_name, object, val) \
-static ssize_t show_##file_name \
- (struct _cpuid4_info *this_leaf, char *buf) \
+static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \
+ unsigned int cpu) \
{ \
return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
}
@@ -882,7 +918,8 @@ show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
-static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
+static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf,
+ unsigned int cpu)
{
return sprintf(buf, "%luK\n", this_leaf->size / 1024);
}
@@ -906,17 +943,20 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
return n;
}
-static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf)
+static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf,
+ unsigned int cpu)
{
return show_shared_cpu_map_func(leaf, 0, buf);
}
-static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
+static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf,
+ unsigned int cpu)
{
return show_shared_cpu_map_func(leaf, 1, buf);
}
-static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
+static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf,
+ unsigned int cpu)
{
switch (this_leaf->eax.split.type) {
case CACHE_TYPE_DATA:
@@ -974,6 +1014,9 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
n += 2;
+ if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
+ n += 1;
+
attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
if (attrs == NULL)
return attrs = default_attrs;
@@ -986,6 +1029,9 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
attrs[n++] = &cache_disable_1.attr;
}
+ if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
+ attrs[n++] = &subcaches.attr;
+
return attrs;
}
#endif
@@ -998,7 +1044,7 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
ret = fattr->show ?
fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
- buf) :
+ buf, this_leaf->cpu) :
0;
return ret;
}
@@ -1012,7 +1058,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
ret = fattr->store ?
fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
- buf, count) :
+ buf, count, this_leaf->cpu) :
0;
return ret;
}
diff --git a/arch/x86/kernel/cpu/mcheck/mce-apei.c b/arch/x86/kernel/cpu/mcheck/mce-apei.c
index 8209472b27a5..83930deec3c6 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-apei.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-apei.c
@@ -106,24 +106,34 @@ int apei_write_mce(struct mce *m)
ssize_t apei_read_mce(struct mce *m, u64 *record_id)
{
struct cper_mce_record rcd;
- ssize_t len;
-
- len = erst_read_next(&rcd.hdr, sizeof(rcd));
- if (len <= 0)
- return len;
- /* Can not skip other records in storage via ERST unless clear them */
- else if (len != sizeof(rcd) ||
- uuid_le_cmp(rcd.hdr.creator_id, CPER_CREATOR_MCE)) {
- if (printk_ratelimit())
- pr_warning(
- "MCE-APEI: Can not skip the unknown record in ERST");
- return -EIO;
- }
-
+ int rc, pos;
+
+ rc = erst_get_record_id_begin(&pos);
+ if (rc)
+ return rc;
+retry:
+ rc = erst_get_record_id_next(&pos, record_id);
+ if (rc)
+ goto out;
+ /* no more record */
+ if (*record_id == APEI_ERST_INVALID_RECORD_ID)
+ goto out;
+ rc = erst_read(*record_id, &rcd.hdr, sizeof(rcd));
+ /* someone else has cleared the record, try next one */
+ if (rc == -ENOENT)
+ goto retry;
+ else if (rc < 0)
+ goto out;
+ /* try to skip other type records in storage */
+ else if (rc != sizeof(rcd) ||
+ uuid_le_cmp(rcd.hdr.creator_id, CPER_CREATOR_MCE))
+ goto retry;
memcpy(m, &rcd.mce, sizeof(*m));
- *record_id = rcd.hdr.record_id;
+ rc = sizeof(*m);
+out:
+ erst_get_record_id_end();
- return sizeof(*m);
+ return rc;
}
/* Check whether there is record in ERST */
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 5bf2fac52aca..167f97b5596e 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -527,15 +527,12 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
int i, err = 0;
struct threshold_bank *b = NULL;
char name[32];
-#ifdef CONFIG_SMP
- struct cpuinfo_x86 *c = &cpu_data(cpu);
-#endif
sprintf(name, "threshold_bank%i", bank);
#ifdef CONFIG_SMP
if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
- i = cpumask_first(c->llc_shared_map);
+ i = cpumask_first(cpu_llc_shared_mask(cpu));
/* first core not up yet */
if (cpu_data(i).cpu_core_id)
@@ -555,7 +552,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
if (err)
goto out;
- cpumask_copy(b->cpus, c->llc_shared_map);
+ cpumask_copy(b->cpus, cpu_llc_shared_mask(cpu));
per_cpu(threshold_banks, cpu)[bank] = b;
goto out;
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 9d977a2ea693..10bfe2472d16 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -298,7 +298,7 @@ x86_perf_event_update(struct perf_event *event)
*/
again:
prev_raw_count = local64_read(&hwc->prev_count);
- rdmsrl(hwc->event_base + idx, new_raw_count);
+ rdmsrl(hwc->event_base, new_raw_count);
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
@@ -321,6 +321,24 @@ again:
return new_raw_count;
}
+/* using X86_FEATURE_PERFCTR_CORE to later implement ALTERNATIVE() here */
+static inline int x86_pmu_addr_offset(int index)
+{
+ if (boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
+ return index << 1;
+ return index;
+}
+
+static inline unsigned int x86_pmu_config_addr(int index)
+{
+ return x86_pmu.eventsel + x86_pmu_addr_offset(index);
+}
+
+static inline unsigned int x86_pmu_event_addr(int index)
+{
+ return x86_pmu.perfctr + x86_pmu_addr_offset(index);
+}
+
static atomic_t active_events;
static DEFINE_MUTEX(pmc_reserve_mutex);
@@ -331,12 +349,12 @@ static bool reserve_pmc_hardware(void)
int i;
for (i = 0; i < x86_pmu.num_counters; i++) {
- if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
+ if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
goto perfctr_fail;
}
for (i = 0; i < x86_pmu.num_counters; i++) {
- if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
+ if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
goto eventsel_fail;
}
@@ -344,13 +362,13 @@ static bool reserve_pmc_hardware(void)
eventsel_fail:
for (i--; i >= 0; i--)
- release_evntsel_nmi(x86_pmu.eventsel + i);
+ release_evntsel_nmi(x86_pmu_config_addr(i));
i = x86_pmu.num_counters;
perfctr_fail:
for (i--; i >= 0; i--)
- release_perfctr_nmi(x86_pmu.perfctr + i);
+ release_perfctr_nmi(x86_pmu_event_addr(i));
return false;
}
@@ -360,8 +378,8 @@ static void release_pmc_hardware(void)
int i;
for (i = 0; i < x86_pmu.num_counters; i++) {
- release_perfctr_nmi(x86_pmu.perfctr + i);
- release_evntsel_nmi(x86_pmu.eventsel + i);
+ release_perfctr_nmi(x86_pmu_event_addr(i));
+ release_evntsel_nmi(x86_pmu_config_addr(i));
}
}
@@ -382,7 +400,7 @@ static bool check_hw_exists(void)
* complain and bail.
*/
for (i = 0; i < x86_pmu.num_counters; i++) {
- reg = x86_pmu.eventsel + i;
+ reg = x86_pmu_config_addr(i);
ret = rdmsrl_safe(reg, &val);
if (ret)
goto msr_fail;
@@ -407,8 +425,8 @@ static bool check_hw_exists(void)
* that don't trap on the MSR access and always return 0s.
*/
val = 0xabcdUL;
- ret = checking_wrmsrl(x86_pmu.perfctr, val);
- ret |= rdmsrl_safe(x86_pmu.perfctr, &val_new);
+ ret = checking_wrmsrl(x86_pmu_event_addr(0), val);
+ ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new);
if (ret || val != val_new)
goto msr_fail;
@@ -617,11 +635,11 @@ static void x86_pmu_disable_all(void)
if (!test_bit(idx, cpuc->active_mask))
continue;
- rdmsrl(x86_pmu.eventsel + idx, val);
+ rdmsrl(x86_pmu_config_addr(idx), val);
if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
continue;
val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
- wrmsrl(x86_pmu.eventsel + idx, val);
+ wrmsrl(x86_pmu_config_addr(idx), val);
}
}
@@ -642,21 +660,24 @@ static void x86_pmu_disable(struct pmu *pmu)
x86_pmu.disable_all();
}
+static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
+ u64 enable_mask)
+{
+ wrmsrl(hwc->config_base, hwc->config | enable_mask);
+}
+
static void x86_pmu_enable_all(int added)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
- struct perf_event *event = cpuc->events[idx];
- u64 val;
+ struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
if (!test_bit(idx, cpuc->active_mask))
continue;
- val = event->hw.config;
- val |= ARCH_PERFMON_EVENTSEL_ENABLE;
- wrmsrl(x86_pmu.eventsel + idx, val);
+ __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
}
}
@@ -821,15 +842,10 @@ static inline void x86_assign_hw_event(struct perf_event *event,
hwc->event_base = 0;
} else if (hwc->idx >= X86_PMC_IDX_FIXED) {
hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
- /*
- * We set it so that event_base + idx in wrmsr/rdmsr maps to
- * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
- */
- hwc->event_base =
- MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
+ hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0;
} else {
- hwc->config_base = x86_pmu.eventsel;
- hwc->event_base = x86_pmu.perfctr;
+ hwc->config_base = x86_pmu_config_addr(hwc->idx);
+ hwc->event_base = x86_pmu_event_addr(hwc->idx);
}
}
@@ -915,17 +931,11 @@ static void x86_pmu_enable(struct pmu *pmu)
x86_pmu.enable_all(added);
}
-static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
- u64 enable_mask)
-{
- wrmsrl(hwc->config_base + hwc->idx, hwc->config | enable_mask);
-}
-
static inline void x86_pmu_disable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
- wrmsrl(hwc->config_base + hwc->idx, hwc->config);
+ wrmsrl(hwc->config_base, hwc->config);
}
static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
@@ -978,7 +988,7 @@ x86_perf_event_set_period(struct perf_event *event)
*/
local64_set(&hwc->prev_count, (u64)-left);
- wrmsrl(hwc->event_base + idx, (u64)(-left) & x86_pmu.cntval_mask);
+ wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
/*
* Due to erratum on certan cpu we need
@@ -986,7 +996,7 @@ x86_perf_event_set_period(struct perf_event *event)
* is updated properly
*/
if (x86_pmu.perfctr_second_write) {
- wrmsrl(hwc->event_base + idx,
+ wrmsrl(hwc->event_base,
(u64)(-left) & x86_pmu.cntval_mask);
}
@@ -1113,8 +1123,8 @@ void perf_event_print_debug(void)
pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
- rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
- rdmsrl(x86_pmu.perfctr + idx, pmc_count);
+ rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
+ rdmsrl(x86_pmu_event_addr(idx), pmc_count);
prev_left = per_cpu(pmc_prev_left[idx], cpu);
@@ -1389,7 +1399,7 @@ static void __init pmu_check_apic(void)
pr_info("no hardware sampling interrupt available.\n");
}
-int __init init_hw_perf_events(void)
+static int __init init_hw_perf_events(void)
{
struct event_constraint *c;
int err;
@@ -1608,7 +1618,7 @@ out:
return ret;
}
-int x86_pmu_event_init(struct perf_event *event)
+static int x86_pmu_event_init(struct perf_event *event)
{
struct pmu *tmp;
int err;
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 67e2202a6039..461f62bbd774 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -127,6 +127,11 @@ static int amd_pmu_hw_config(struct perf_event *event)
/*
* AMD64 events are detected based on their event codes.
*/
+static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
+{
+ return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
+}
+
static inline int amd_is_nb_event(struct hw_perf_event *hwc)
{
return (hwc->config & 0xe0) == 0xe0;
@@ -385,13 +390,181 @@ static __initconst const struct x86_pmu amd_pmu = {
.cpu_dead = amd_pmu_cpu_dead,
};
+/* AMD Family 15h */
+
+#define AMD_EVENT_TYPE_MASK 0x000000F0ULL
+
+#define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL
+#define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL
+#define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL
+#define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL
+#define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL
+#define AMD_EVENT_EX_LS 0x000000C0ULL
+#define AMD_EVENT_DE 0x000000D0ULL
+#define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL
+
+/*
+ * AMD family 15h event code/PMC mappings:
+ *
+ * type = event_code & 0x0F0:
+ *
+ * 0x000 FP PERF_CTL[5:3]
+ * 0x010 FP PERF_CTL[5:3]
+ * 0x020 LS PERF_CTL[5:0]
+ * 0x030 LS PERF_CTL[5:0]
+ * 0x040 DC PERF_CTL[5:0]
+ * 0x050 DC PERF_CTL[5:0]
+ * 0x060 CU PERF_CTL[2:0]
+ * 0x070 CU PERF_CTL[2:0]
+ * 0x080 IC/DE PERF_CTL[2:0]
+ * 0x090 IC/DE PERF_CTL[2:0]
+ * 0x0A0 ---
+ * 0x0B0 ---
+ * 0x0C0 EX/LS PERF_CTL[5:0]
+ * 0x0D0 DE PERF_CTL[2:0]
+ * 0x0E0 NB NB_PERF_CTL[3:0]
+ * 0x0F0 NB NB_PERF_CTL[3:0]
+ *
+ * Exceptions:
+ *
+ * 0x003 FP PERF_CTL[3]
+ * 0x00B FP PERF_CTL[3]
+ * 0x00D FP PERF_CTL[3]
+ * 0x023 DE PERF_CTL[2:0]
+ * 0x02D LS PERF_CTL[3]
+ * 0x02E LS PERF_CTL[3,0]
+ * 0x043 CU PERF_CTL[2:0]
+ * 0x045 CU PERF_CTL[2:0]
+ * 0x046 CU PERF_CTL[2:0]
+ * 0x054 CU PERF_CTL[2:0]
+ * 0x055 CU PERF_CTL[2:0]
+ * 0x08F IC PERF_CTL[0]
+ * 0x187 DE PERF_CTL[0]
+ * 0x188 DE PERF_CTL[0]
+ * 0x0DB EX PERF_CTL[5:0]
+ * 0x0DC LS PERF_CTL[5:0]
+ * 0x0DD LS PERF_CTL[5:0]
+ * 0x0DE LS PERF_CTL[5:0]
+ * 0x0DF LS PERF_CTL[5:0]
+ * 0x1D6 EX PERF_CTL[5:0]
+ * 0x1D8 EX PERF_CTL[5:0]
+ */
+
+static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0);
+static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
+static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0);
+static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT(0, 0x09, 0);
+static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
+static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
+
+static struct event_constraint *
+amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
+{
+ unsigned int event_code = amd_get_event_code(&event->hw);
+
+ switch (event_code & AMD_EVENT_TYPE_MASK) {
+ case AMD_EVENT_FP:
+ switch (event_code) {
+ case 0x003:
+ case 0x00B:
+ case 0x00D:
+ return &amd_f15_PMC3;
+ default:
+ return &amd_f15_PMC53;
+ }
+ case AMD_EVENT_LS:
+ case AMD_EVENT_DC:
+ case AMD_EVENT_EX_LS:
+ switch (event_code) {
+ case 0x023:
+ case 0x043:
+ case 0x045:
+ case 0x046:
+ case 0x054:
+ case 0x055:
+ return &amd_f15_PMC20;
+ case 0x02D:
+ return &amd_f15_PMC3;
+ case 0x02E:
+ return &amd_f15_PMC30;
+ default:
+ return &amd_f15_PMC50;
+ }
+ case AMD_EVENT_CU:
+ case AMD_EVENT_IC_DE:
+ case AMD_EVENT_DE:
+ switch (event_code) {
+ case 0x08F:
+ case 0x187:
+ case 0x188:
+ return &amd_f15_PMC0;
+ case 0x0DB ... 0x0DF:
+ case 0x1D6:
+ case 0x1D8:
+ return &amd_f15_PMC50;
+ default:
+ return &amd_f15_PMC20;
+ }
+ case AMD_EVENT_NB:
+ /* not yet implemented */
+ return &emptyconstraint;
+ default:
+ return &emptyconstraint;
+ }
+}
+
+static __initconst const struct x86_pmu amd_pmu_f15h = {
+ .name = "AMD Family 15h",
+ .handle_irq = x86_pmu_handle_irq,
+ .disable_all = x86_pmu_disable_all,
+ .enable_all = x86_pmu_enable_all,
+ .enable = x86_pmu_enable_event,
+ .disable = x86_pmu_disable_event,
+ .hw_config = amd_pmu_hw_config,
+ .schedule_events = x86_schedule_events,
+ .eventsel = MSR_F15H_PERF_CTL,
+ .perfctr = MSR_F15H_PERF_CTR,
+ .event_map = amd_pmu_event_map,
+ .max_events = ARRAY_SIZE(amd_perfmon_event_map),
+ .num_counters = 6,
+ .cntval_bits = 48,
+ .cntval_mask = (1ULL << 48) - 1,
+ .apic = 1,
+ /* use highest bit to detect overflow */
+ .max_period = (1ULL << 47) - 1,
+ .get_event_constraints = amd_get_event_constraints_f15h,
+ /* nortbridge counters not yet implemented: */
+#if 0
+ .put_event_constraints = amd_put_event_constraints,
+
+ .cpu_prepare = amd_pmu_cpu_prepare,
+ .cpu_starting = amd_pmu_cpu_starting,
+ .cpu_dead = amd_pmu_cpu_dead,
+#endif
+};
+
static __init int amd_pmu_init(void)
{
/* Performance-monitoring supported from K7 and later: */
if (boot_cpu_data.x86 < 6)
return -ENODEV;
- x86_pmu = amd_pmu;
+ /*
+ * If core performance counter extensions exists, it must be
+ * family 15h, otherwise fail. See x86_pmu_addr_offset().
+ */
+ switch (boot_cpu_data.x86) {
+ case 0x15:
+ if (!cpu_has_perfctr_core)
+ return -ENODEV;
+ x86_pmu = amd_pmu_f15h;
+ break;
+ default:
+ if (cpu_has_perfctr_core)
+ return -ENODEV;
+ x86_pmu = amd_pmu;
+ break;
+ }
/* Events are common for all AMDs */
memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 008835c1d79c..084b38362db7 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -691,8 +691,8 @@ static void intel_pmu_reset(void)
printk("clearing PMU state on CPU#%d\n", smp_processor_id());
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
- checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
- checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
+ checking_wrmsrl(x86_pmu_config_addr(idx), 0ull);
+ checking_wrmsrl(x86_pmu_event_addr(idx), 0ull);
}
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index f7a0993c1e7c..3769ac822f96 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -764,15 +764,20 @@ static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
u64 v;
/* an official way for overflow indication */
- rdmsrl(hwc->config_base + hwc->idx, v);
+ rdmsrl(hwc->config_base, v);
if (v & P4_CCCR_OVF) {
- wrmsrl(hwc->config_base + hwc->idx, v & ~P4_CCCR_OVF);
+ wrmsrl(hwc->config_base, v & ~P4_CCCR_OVF);
return 1;
}
- /* it might be unflagged overflow */
- rdmsrl(hwc->event_base + hwc->idx, v);
- if (!(v & ARCH_P4_CNTRVAL_MASK))
+ /*
+ * In some circumstances the overflow might issue an NMI but did
+ * not set P4_CCCR_OVF bit. Because a counter holds a negative value
+ * we simply check for high bit being set, if it's cleared it means
+ * the counter has reached zero value and continued counting before
+ * real NMI signal was received:
+ */
+ if (!(v & ARCH_P4_UNFLAGGED_BIT))
return 1;
return 0;
@@ -810,7 +815,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event)
* state we need to clear P4_CCCR_OVF, otherwise interrupt get
* asserted again and again
*/
- (void)checking_wrmsrl(hwc->config_base + hwc->idx,
+ (void)checking_wrmsrl(hwc->config_base,
(u64)(p4_config_unpack_cccr(hwc->config)) &
~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED);
}
@@ -880,7 +885,7 @@ static void p4_pmu_enable_event(struct perf_event *event)
p4_pmu_enable_pebs(hwc->config);
(void)checking_wrmsrl(escr_addr, escr_conf);
- (void)checking_wrmsrl(hwc->config_base + hwc->idx,
+ (void)checking_wrmsrl(hwc->config_base,
(cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE);
}
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index 34ba07be2cda..20c097e33860 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -68,7 +68,7 @@ p6_pmu_disable_event(struct perf_event *event)
if (cpuc->enabled)
val |= ARCH_PERFMON_EVENTSEL_ENABLE;
- (void)checking_wrmsrl(hwc->config_base + hwc->idx, val);
+ (void)checking_wrmsrl(hwc->config_base, val);
}
static void p6_pmu_enable_event(struct perf_event *event)
@@ -81,7 +81,7 @@ static void p6_pmu_enable_event(struct perf_event *event)
if (cpuc->enabled)
val |= ARCH_PERFMON_EVENTSEL_ENABLE;
- (void)checking_wrmsrl(hwc->config_base + hwc->idx, val);
+ (void)checking_wrmsrl(hwc->config_base, val);
}
static __initconst const struct x86_pmu p6_pmu = {
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index d5a236615501..966512b2cacf 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -46,6 +46,8 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
/* returns the bit offset of the performance counter register */
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
+ if (msr >= MSR_F15H_PERF_CTR)
+ return (msr - MSR_F15H_PERF_CTR) >> 1;
return msr - MSR_K7_PERFCTR0;
case X86_VENDOR_INTEL:
if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
@@ -70,6 +72,8 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
/* returns the bit offset of the event selection register */
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
+ if (msr >= MSR_F15H_PERF_CTL)
+ return (msr - MSR_F15H_PERF_CTL) >> 1;
return msr - MSR_K7_EVNTSEL0;
case X86_VENDOR_INTEL:
if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
new file mode 100644
index 000000000000..7a8cebc9ff29
--- /dev/null
+++ b/arch/x86/kernel/devicetree.c
@@ -0,0 +1,441 @@
+/*
+ * Architecture specific OF callbacks.
+ */
+#include <linux/bootmem.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/of_pci.h>
+
+#include <asm/hpet.h>
+#include <asm/irq_controller.h>
+#include <asm/apic.h>
+#include <asm/pci_x86.h>
+
+__initdata u64 initial_dtb;
+char __initdata cmd_line[COMMAND_LINE_SIZE];
+static LIST_HEAD(irq_domains);
+static DEFINE_RAW_SPINLOCK(big_irq_lock);
+
+int __initdata of_ioapic;
+
+#ifdef CONFIG_X86_IO_APIC
+static void add_interrupt_host(struct irq_domain *ih)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&big_irq_lock, flags);
+ list_add(&ih->l, &irq_domains);
+ raw_spin_unlock_irqrestore(&big_irq_lock, flags);
+}
+#endif
+
+static struct irq_domain *get_ih_from_node(struct device_node *controller)
+{
+ struct irq_domain *ih, *found = NULL;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&big_irq_lock, flags);
+ list_for_each_entry(ih, &irq_domains, l) {
+ if (ih->controller == controller) {
+ found = ih;
+ break;
+ }
+ }
+ raw_spin_unlock_irqrestore(&big_irq_lock, flags);
+ return found;
+}
+
+unsigned int irq_create_of_mapping(struct device_node *controller,
+ const u32 *intspec, unsigned int intsize)
+{
+ struct irq_domain *ih;
+ u32 virq, type;
+ int ret;
+
+ ih = get_ih_from_node(controller);
+ if (!ih)
+ return 0;
+ ret = ih->xlate(ih, intspec, intsize, &virq, &type);
+ if (ret)
+ return ret;
+ if (type == IRQ_TYPE_NONE)
+ return virq;
+ /* set the mask if it is different from current */
+ if (type == (irq_to_desc(virq)->status & IRQF_TRIGGER_MASK))
+ set_irq_type(virq, type);
+ return virq;
+}
+EXPORT_SYMBOL_GPL(irq_create_of_mapping);
+
+unsigned long pci_address_to_pio(phys_addr_t address)
+{
+ /*
+ * The ioport address can be directly used by inX / outX
+ */
+ BUG_ON(address >= (1 << 16));
+ return (unsigned long)address;
+}
+EXPORT_SYMBOL_GPL(pci_address_to_pio);
+
+void __init early_init_dt_scan_chosen_arch(unsigned long node)
+{
+ BUG();
+}
+
+void __init early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+ BUG();
+}
+
+void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
+{
+ return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS));
+}
+
+void __init add_dtb(u64 data)
+{
+ initial_dtb = data + offsetof(struct setup_data, data);
+}
+
+/*
+ * CE4100 ids. Will be moved to machine_device_initcall() once we have it.
+ */
+static struct of_device_id __initdata ce4100_ids[] = {
+ { .compatible = "intel,ce4100-cp", },
+ { .compatible = "isa", },
+ { .compatible = "pci", },
+ {},
+};
+
+static int __init add_bus_probe(void)
+{
+ if (!of_have_populated_dt())
+ return 0;
+
+ return of_platform_bus_probe(NULL, ce4100_ids, NULL);
+}
+module_init(add_bus_probe);
+
+#ifdef CONFIG_PCI
+static int x86_of_pci_irq_enable(struct pci_dev *dev)
+{
+ struct of_irq oirq;
+ u32 virq;
+ int ret;
+ u8 pin;
+
+ ret = pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+ if (ret)
+ return ret;
+ if (!pin)
+ return 0;
+
+ ret = of_irq_map_pci(dev, &oirq);
+ if (ret)
+ return ret;
+
+ virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
+ oirq.size);
+ if (virq == 0)
+ return -EINVAL;
+ dev->irq = virq;
+ return 0;
+}
+
+static void x86_of_pci_irq_disable(struct pci_dev *dev)
+{
+}
+
+void __cpuinit x86_of_pci_init(void)
+{
+ struct device_node *np;
+
+ pcibios_enable_irq = x86_of_pci_irq_enable;
+ pcibios_disable_irq = x86_of_pci_irq_disable;
+
+ for_each_node_by_type(np, "pci") {
+ const void *prop;
+ struct pci_bus *bus;
+ unsigned int bus_min;
+ struct device_node *child;
+
+ prop = of_get_property(np, "bus-range", NULL);
+ if (!prop)
+ continue;
+ bus_min = be32_to_cpup(prop);
+
+ bus = pci_find_bus(0, bus_min);
+ if (!bus) {
+ printk(KERN_ERR "Can't find a node for bus %s.\n",
+ np->full_name);
+ continue;
+ }
+
+ if (bus->self)
+ bus->self->dev.of_node = np;
+ else
+ bus->dev.of_node = np;
+
+ for_each_child_of_node(np, child) {
+ struct pci_dev *dev;
+ u32 devfn;
+
+ prop = of_get_property(child, "reg", NULL);
+ if (!prop)
+ continue;
+
+ devfn = (be32_to_cpup(prop) >> 8) & 0xff;
+ dev = pci_get_slot(bus, devfn);
+ if (!dev)
+ continue;
+ dev->dev.of_node = child;
+ pci_dev_put(dev);
+ }
+ }
+}
+#endif
+
+static void __init dtb_setup_hpet(void)
+{
+#ifdef CONFIG_HPET_TIMER
+ struct device_node *dn;
+ struct resource r;
+ int ret;
+
+ dn = of_find_compatible_node(NULL, NULL, "intel,ce4100-hpet");
+ if (!dn)
+ return;
+ ret = of_address_to_resource(dn, 0, &r);
+ if (ret) {
+ WARN_ON(1);
+ return;
+ }
+ hpet_address = r.start;
+#endif
+}
+
+static void __init dtb_lapic_setup(void)
+{
+#ifdef CONFIG_X86_LOCAL_APIC
+ struct device_node *dn;
+ struct resource r;
+ int ret;
+
+ dn = of_find_compatible_node(NULL, NULL, "intel,ce4100-lapic");
+ if (!dn)
+ return;
+
+ ret = of_address_to_resource(dn, 0, &r);
+ if (WARN_ON(ret))
+ return;
+
+ /* Did the boot loader setup the local APIC ? */
+ if (!cpu_has_apic) {
+ if (apic_force_enable(r.start))
+ return;
+ }
+ smp_found_config = 1;
+ pic_mode = 1;
+ register_lapic_address(r.start);
+ generic_processor_info(boot_cpu_physical_apicid,
+ GET_APIC_VERSION(apic_read(APIC_LVR)));
+#endif
+}
+
+#ifdef CONFIG_X86_IO_APIC
+static unsigned int ioapic_id;
+
+static void __init dtb_add_ioapic(struct device_node *dn)
+{
+ struct resource r;
+ int ret;
+
+ ret = of_address_to_resource(dn, 0, &r);
+ if (ret) {
+ printk(KERN_ERR "Can't obtain address from node %s.\n",
+ dn->full_name);
+ return;
+ }
+ mp_register_ioapic(++ioapic_id, r.start, gsi_top);
+}
+
+static void __init dtb_ioapic_setup(void)
+{
+ struct device_node *dn;
+
+ for_each_compatible_node(dn, NULL, "intel,ce4100-ioapic")
+ dtb_add_ioapic(dn);
+
+ if (nr_ioapics) {
+ of_ioapic = 1;
+ return;
+ }
+ printk(KERN_ERR "Error: No information about IO-APIC in OF.\n");
+}
+#else
+static void __init dtb_ioapic_setup(void) {}
+#endif
+
+static void __init dtb_apic_setup(void)
+{
+ dtb_lapic_setup();
+ dtb_ioapic_setup();
+}
+
+#ifdef CONFIG_OF_FLATTREE
+static void __init x86_flattree_get_config(void)
+{
+ u32 size, map_len;
+ void *new_dtb;
+
+ if (!initial_dtb)
+ return;
+
+ map_len = max(PAGE_SIZE - (initial_dtb & ~PAGE_MASK),
+ (u64)sizeof(struct boot_param_header));
+
+ initial_boot_params = early_memremap(initial_dtb, map_len);
+ size = be32_to_cpu(initial_boot_params->totalsize);
+ if (map_len < size) {
+ early_iounmap(initial_boot_params, map_len);
+ initial_boot_params = early_memremap(initial_dtb, size);
+ map_len = size;
+ }
+
+ new_dtb = alloc_bootmem(size);
+ memcpy(new_dtb, initial_boot_params, size);
+ early_iounmap(initial_boot_params, map_len);
+
+ initial_boot_params = new_dtb;
+
+ /* root level address cells */
+ of_scan_flat_dt(early_init_dt_scan_root, NULL);
+
+ unflatten_device_tree();
+}
+#else
+static inline void x86_flattree_get_config(void) { }
+#endif
+
+void __init x86_dtb_init(void)
+{
+ x86_flattree_get_config();
+
+ if (!of_have_populated_dt())
+ return;
+
+ dtb_setup_hpet();
+ dtb_apic_setup();
+}
+
+#ifdef CONFIG_X86_IO_APIC
+
+struct of_ioapic_type {
+ u32 out_type;
+ u32 trigger;
+ u32 polarity;
+};
+
+static struct of_ioapic_type of_ioapic_type[] =
+{
+ {
+ .out_type = IRQ_TYPE_EDGE_RISING,
+ .trigger = IOAPIC_EDGE,
+ .polarity = 1,
+ },
+ {
+ .out_type = IRQ_TYPE_LEVEL_LOW,
+ .trigger = IOAPIC_LEVEL,
+ .polarity = 0,
+ },
+ {
+ .out_type = IRQ_TYPE_LEVEL_HIGH,
+ .trigger = IOAPIC_LEVEL,
+ .polarity = 1,
+ },
+ {
+ .out_type = IRQ_TYPE_EDGE_FALLING,
+ .trigger = IOAPIC_EDGE,
+ .polarity = 0,
+ },
+};
+
+static int ioapic_xlate(struct irq_domain *id, const u32 *intspec, u32 intsize,
+ u32 *out_hwirq, u32 *out_type)
+{
+ struct io_apic_irq_attr attr;
+ struct of_ioapic_type *it;
+ u32 line, idx, type;
+
+ if (intsize < 2)
+ return -EINVAL;
+
+ line = *intspec;
+ idx = (u32) id->priv;
+ *out_hwirq = line + mp_gsi_routing[idx].gsi_base;
+
+ intspec++;
+ type = *intspec;
+
+ if (type >= ARRAY_SIZE(of_ioapic_type))
+ return -EINVAL;
+
+ it = of_ioapic_type + type;
+ *out_type = it->out_type;
+
+ set_io_apic_irq_attr(&attr, idx, line, it->trigger, it->polarity);
+
+ return io_apic_setup_irq_pin(*out_hwirq, cpu_to_node(0), &attr);
+}
+
+static void __init ioapic_add_ofnode(struct device_node *np)
+{
+ struct resource r;
+ int i, ret;
+
+ ret = of_address_to_resource(np, 0, &r);
+ if (ret) {
+ printk(KERN_ERR "Failed to obtain address for %s\n",
+ np->full_name);
+ return;
+ }
+
+ for (i = 0; i < nr_ioapics; i++) {
+ if (r.start == mp_ioapics[i].apicaddr) {
+ struct irq_domain *id;
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ BUG_ON(!id);
+ id->controller = np;
+ id->xlate = ioapic_xlate;
+ id->priv = (void *)i;
+ add_interrupt_host(id);
+ return;
+ }
+ }
+ printk(KERN_ERR "IOxAPIC at %s is not registered.\n", np->full_name);
+}
+
+void __init x86_add_irq_domains(void)
+{
+ struct device_node *dp;
+
+ if (!of_have_populated_dt())
+ return;
+
+ for_each_node_with_property(dp, "interrupt-controller") {
+ if (of_device_is_compatible(dp, "intel,ce4100-ioapic"))
+ ioapic_add_ofnode(dp);
+ }
+}
+#else
+void __init x86_add_irq_domains(void) { }
+#endif
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index df20723a6a1b..220a1c11cfde 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -320,31 +320,6 @@ void die(const char *str, struct pt_regs *regs, long err)
oops_end(flags, regs, sig);
}
-void notrace __kprobes
-die_nmi(char *str, struct pt_regs *regs, int do_panic)
-{
- unsigned long flags;
-
- if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP)
- return;
-
- /*
- * We are in trouble anyway, lets at least try
- * to get a message out.
- */
- flags = oops_begin();
- printk(KERN_EMERG "%s", str);
- printk(" on CPU%d, ip %08lx, registers:\n",
- smp_processor_id(), regs->ip);
- show_registers(regs);
- oops_end(flags, regs, 0);
- if (do_panic || panic_on_oops)
- panic("Non maskable interrupt");
- nmi_exit();
- local_irq_enable();
- do_exit(SIGBUS);
-}
-
static int __init oops_setup(char *s)
{
if (!s)
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 294f26da0c0c..cdf5bfd9d4d5 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -667,21 +667,15 @@ __init void e820_setup_gap(void)
* boot_params.e820_map, others are passed via SETUP_E820_EXT node of
* linked list of struct setup_data, which is parsed here.
*/
-void __init parse_e820_ext(struct setup_data *sdata, unsigned long pa_data)
+void __init parse_e820_ext(struct setup_data *sdata)
{
- u32 map_len;
int entries;
struct e820entry *extmap;
entries = sdata->len / sizeof(struct e820entry);
- map_len = sdata->len + sizeof(struct setup_data);
- if (map_len > PAGE_SIZE)
- sdata = early_ioremap(pa_data, map_len);
extmap = (struct e820entry *)(sdata->data);
__append_e820_map(extmap, entries);
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
- if (map_len > PAGE_SIZE)
- early_iounmap(sdata, map_len);
printk(KERN_INFO "extended physical RAM map:\n");
e820_print_map("extended");
}
@@ -847,15 +841,21 @@ static int __init parse_memopt(char *p)
if (!p)
return -EINVAL;
-#ifdef CONFIG_X86_32
if (!strcmp(p, "nopentium")) {
+#ifdef CONFIG_X86_32
setup_clear_cpu_cap(X86_FEATURE_PSE);
return 0;
- }
+#else
+ printk(KERN_WARNING "mem=nopentium ignored! (only supported on x86_32)\n");
+ return -EINVAL;
#endif
+ }
userdef = 1;
mem_size = memparse(p, &p);
+ /* don't remove all of memory when handling "mem={invalid}" param */
+ if (mem_size == 0)
+ return -EINVAL;
e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1);
return 0;
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 76b8cd953dee..9efbdcc56425 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -143,15 +143,10 @@ static void __init ati_bugs(int num, int slot, int func)
static u32 __init ati_sbx00_rev(int num, int slot, int func)
{
- u32 old, d;
+ u32 d;
- d = read_pci_config(num, slot, func, 0x70);
- old = d;
- d &= ~(1<<8);
- write_pci_config(num, slot, func, 0x70, d);
d = read_pci_config(num, slot, func, 0x8);
d &= 0xff;
- write_pci_config(num, slot, func, 0x70, old);
return d;
}
@@ -160,13 +155,16 @@ static void __init ati_bugs_contd(int num, int slot, int func)
{
u32 d, rev;
- if (acpi_use_timer_override)
- return;
-
rev = ati_sbx00_rev(num, slot, func);
+ if (rev >= 0x40)
+ acpi_fix_pin2_polarity = 1;
+
if (rev > 0x13)
return;
+ if (acpi_use_timer_override)
+ return;
+
/* check for IRQ0 interrupt swap */
d = read_pci_config(num, slot, func, 0x64);
if (!(d & (1<<14)))
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index c8b4efad7ebb..49bdedd2dbcf 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -395,7 +395,7 @@ sysenter_past_esp:
* A tiny bit of offset fixup is necessary - 4*4 means the 4 words
* pushed above; +8 corresponds to copy_thread's esp0 setting.
*/
- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE_asm+8+4*4)(%esp)
+ pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
CFI_REL_OFFSET eip, 0
pushl_cfi %eax
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index aed1ffbeb0c9..891268c645ea 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -975,9 +975,12 @@ apicinterrupt X86_PLATFORM_IPI_VECTOR \
x86_platform_ipi smp_x86_platform_ipi
#ifdef CONFIG_SMP
-.irpc idx, "01234567"
+.irp idx,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \
+ 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
+.if NUM_INVALIDATE_TLB_VECTORS > \idx
apicinterrupt (INVALIDATE_TLB_VECTOR_START)+\idx \
invalidate_interrupt\idx smp_invalidate_interrupt
+.endif
.endr
#endif
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 7f138b3c3c52..d6d6bb361931 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -34,15 +34,6 @@ void __init i386_start_kernel(void)
{
memblock_init();
-#ifdef CONFIG_X86_TRAMPOLINE
- /*
- * But first pinch a few for the stack/trampoline stuff
- * FIXME: Don't need the extra page at 4K, but need to fix
- * trampoline before removing it. (see the GDT stuff)
- */
- memblock_x86_reserve_range(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE");
-#endif
-
memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 767d6c43de37..ce0be7cd085e 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -73,7 +73,7 @@ MAPPING_BEYOND_END = PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT
*/
KERNEL_PAGES = LOWMEM_PAGES
-INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm
+INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
RESERVE_BRK(pagetables, INIT_MAP_SIZE)
/*
@@ -137,7 +137,7 @@ ENTRY(startup_32)
movsl
1:
-#ifdef CONFIG_OLPC_OPENFIRMWARE
+#ifdef CONFIG_OLPC
/* save OFW's pgdir table for later use when calling into OFW */
movl %cr3, %eax
movl %eax, pa(olpc_ofw_pgd)
@@ -623,7 +623,7 @@ ENTRY(initial_code)
* BSS section
*/
__PAGE_ALIGNED_BSS
- .align PAGE_SIZE_asm
+ .align PAGE_SIZE
#ifdef CONFIG_X86_PAE
initial_pg_pmd:
.fill 1024*KPMDS,4,0
@@ -644,7 +644,7 @@ ENTRY(swapper_pg_dir)
#ifdef CONFIG_X86_PAE
__PAGE_ALIGNED_DATA
/* Page-aligned for the benefit of paravirt? */
- .align PAGE_SIZE_asm
+ .align PAGE_SIZE
ENTRY(initial_page_table)
.long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
# if KPMDS == 3
@@ -662,7 +662,7 @@ ENTRY(initial_page_table)
# else
# error "Kernel PMDs should be 1, 2 or 3"
# endif
- .align PAGE_SIZE_asm /* needs to be page-sized too */
+ .align PAGE_SIZE /* needs to be page-sized too */
#endif
.data
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 239046bd447f..e11e39478a49 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -136,10 +136,9 @@ ident_complete:
/* Fixup phys_base */
addq %rbp, phys_base(%rip)
-#ifdef CONFIG_X86_TRAMPOLINE
+ /* Fixup trampoline */
addq %rbp, trampoline_level4_pgt + 0(%rip)
addq %rbp, trampoline_level4_pgt + (511*8)(%rip)
-#endif
/* Due to ENTRY(), sometimes the empty space gets filled with
* zeros. Better take a jmp than relying on empty space being
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
index 8eec0ec59af2..8c968974253d 100644
--- a/arch/x86/kernel/ioport.c
+++ b/arch/x86/kernel/ioport.c
@@ -14,22 +14,9 @@
#include <linux/slab.h>
#include <linux/thread_info.h>
#include <linux/syscalls.h>
+#include <linux/bitmap.h>
#include <asm/syscalls.h>
-/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
-static void set_bitmap(unsigned long *bitmap, unsigned int base,
- unsigned int extent, int new_value)
-{
- unsigned int i;
-
- for (i = base; i < base + extent; i++) {
- if (new_value)
- __set_bit(i, bitmap);
- else
- __clear_bit(i, bitmap);
- }
-}
-
/*
* this changes the io permissions bitmap in the current task.
*/
@@ -69,7 +56,10 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
*/
tss = &per_cpu(init_tss, get_cpu());
- set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
+ if (turn_on)
+ bitmap_clear(t->io_bitmap_ptr, from, num);
+ else
+ bitmap_set(t->io_bitmap_ptr, from, num);
/*
* Search for a (possibly new) maximum. This is simple and stupid,
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 387b6a0c9e81..5478df47d6dd 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -276,15 +276,6 @@ void smp_x86_platform_ipi(struct pt_regs *regs)
EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
-#ifdef CONFIG_OF
-unsigned int irq_create_of_mapping(struct device_node *controller,
- const u32 *intspec, unsigned int intsize)
-{
- return intspec[0];
-}
-EXPORT_SYMBOL_GPL(irq_create_of_mapping);
-#endif
-
#ifdef CONFIG_HOTPLUG_CPU
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
void fixup_irqs(void)
@@ -310,7 +301,7 @@ void fixup_irqs(void)
data = &desc->irq_data;
affinity = data->affinity;
if (!irq_has_action(irq) ||
- cpumask_equal(affinity, cpu_online_mask)) {
+ cpumask_subset(affinity, cpu_online_mask)) {
raw_spin_unlock(&desc->lock);
continue;
}
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index c752e973958d..34f765a2d22e 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -25,6 +25,7 @@
#include <asm/setup.h>
#include <asm/i8259.h>
#include <asm/traps.h>
+#include <asm/prom.h>
/*
* ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
@@ -118,6 +119,12 @@ void __init init_IRQ(void)
int i;
/*
+ * We probably need a better place for this, but it works for
+ * now ...
+ */
+ x86_add_irq_domains();
+
+ /*
* On cpu 0, Assign IRQ0_VECTOR..IRQ15_VECTOR's to IRQ 0..15.
* If these IRQ's are handled by legacy interrupt-controllers like PIC,
* then this configuration will likely be static after the boot. If
@@ -164,14 +171,77 @@ static void __init smp_intr_init(void)
alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
/* IPIs for invalidation */
- alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0);
- alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1);
- alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2);
- alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3);
- alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4);
- alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5);
- alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6);
- alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7);
+#define ALLOC_INVTLB_VEC(NR) \
+ alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+NR, \
+ invalidate_interrupt##NR)
+
+ switch (NUM_INVALIDATE_TLB_VECTORS) {
+ default:
+ ALLOC_INVTLB_VEC(31);
+ case 31:
+ ALLOC_INVTLB_VEC(30);
+ case 30:
+ ALLOC_INVTLB_VEC(29);
+ case 29:
+ ALLOC_INVTLB_VEC(28);
+ case 28:
+ ALLOC_INVTLB_VEC(27);
+ case 27:
+ ALLOC_INVTLB_VEC(26);
+ case 26:
+ ALLOC_INVTLB_VEC(25);
+ case 25:
+ ALLOC_INVTLB_VEC(24);
+ case 24:
+ ALLOC_INVTLB_VEC(23);
+ case 23:
+ ALLOC_INVTLB_VEC(22);
+ case 22:
+ ALLOC_INVTLB_VEC(21);
+ case 21:
+ ALLOC_INVTLB_VEC(20);
+ case 20:
+ ALLOC_INVTLB_VEC(19);
+ case 19:
+ ALLOC_INVTLB_VEC(18);
+ case 18:
+ ALLOC_INVTLB_VEC(17);
+ case 17:
+ ALLOC_INVTLB_VEC(16);
+ case 16:
+ ALLOC_INVTLB_VEC(15);
+ case 15:
+ ALLOC_INVTLB_VEC(14);
+ case 14:
+ ALLOC_INVTLB_VEC(13);
+ case 13:
+ ALLOC_INVTLB_VEC(12);
+ case 12:
+ ALLOC_INVTLB_VEC(11);
+ case 11:
+ ALLOC_INVTLB_VEC(10);
+ case 10:
+ ALLOC_INVTLB_VEC(9);
+ case 9:
+ ALLOC_INVTLB_VEC(8);
+ case 8:
+ ALLOC_INVTLB_VEC(7);
+ case 7:
+ ALLOC_INVTLB_VEC(6);
+ case 6:
+ ALLOC_INVTLB_VEC(5);
+ case 5:
+ ALLOC_INVTLB_VEC(4);
+ case 4:
+ ALLOC_INVTLB_VEC(3);
+ case 3:
+ ALLOC_INVTLB_VEC(2);
+ case 2:
+ ALLOC_INVTLB_VEC(1);
+ case 1:
+ ALLOC_INVTLB_VEC(0);
+ break;
+ }
/* IPI for generic function call */
alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
@@ -243,7 +313,7 @@ void __init native_init_IRQ(void)
set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]);
}
- if (!acpi_ioapic)
+ if (!acpi_ioapic && !of_ioapic)
setup_irq(2, &irq2);
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index a4130005028a..d4a28afe5db5 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -121,8 +121,8 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
dbg_reg_def[regno].size);
- switch (regno) {
#ifdef CONFIG_X86_32
+ switch (regno) {
case GDB_SS:
if (!user_mode_vm(regs))
*(unsigned long *)mem = __KERNEL_DS;
@@ -135,8 +135,8 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
case GDB_FS:
*(unsigned long *)mem = 0xFFFF;
break;
-#endif
}
+#endif
return dbg_reg_def[regno].name;
}
@@ -533,15 +533,6 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
}
return NOTIFY_DONE;
- case DIE_NMIWATCHDOG:
- if (atomic_read(&kgdb_active) != -1) {
- /* KGDB CPU roundup: */
- kgdb_nmicallback(raw_smp_processor_id(), regs);
- return NOTIFY_STOP;
- }
- /* Enter debugger: */
- break;
-
case DIE_DEBUG:
if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
if (user_mode(regs))
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 8dc44662394b..33c07b0b122e 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -493,7 +493,7 @@ static void __init kvm_smp_prepare_boot_cpu(void)
native_smp_prepare_boot_cpu();
}
-static void kvm_guest_cpu_online(void *dummy)
+static void __cpuinit kvm_guest_cpu_online(void *dummy)
{
kvm_guest_cpu_init();
}
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index 0fe6d1a66c38..c5610384ab16 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -66,7 +66,6 @@ struct microcode_amd {
unsigned int mpb[0];
};
-#define UCODE_MAX_SIZE 2048
#define UCODE_CONTAINER_SECTION_HDR 8
#define UCODE_CONTAINER_HEADER_SIZE 12
@@ -77,20 +76,20 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
struct cpuinfo_x86 *c = &cpu_data(cpu);
u32 dummy;
- memset(csig, 0, sizeof(*csig));
if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
- pr_warning("microcode: CPU%d: AMD CPU family 0x%x not "
- "supported\n", cpu, c->x86);
+ pr_warning("CPU%d: family %d not supported\n", cpu, c->x86);
return -1;
}
+
rdmsr(MSR_AMD64_PATCH_LEVEL, csig->rev, dummy);
- pr_info("CPU%d: patch_level=0x%x\n", cpu, csig->rev);
+ pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev);
+
return 0;
}
-static int get_matching_microcode(int cpu, void *mc, int rev)
+static int get_matching_microcode(int cpu, struct microcode_header_amd *mc_hdr,
+ int rev)
{
- struct microcode_header_amd *mc_header = mc;
unsigned int current_cpu_id;
u16 equiv_cpu_id = 0;
unsigned int i = 0;
@@ -109,17 +108,17 @@ static int get_matching_microcode(int cpu, void *mc, int rev)
if (!equiv_cpu_id)
return 0;
- if (mc_header->processor_rev_id != equiv_cpu_id)
+ if (mc_hdr->processor_rev_id != equiv_cpu_id)
return 0;
/* ucode might be chipset specific -- currently we don't support this */
- if (mc_header->nb_dev_id || mc_header->sb_dev_id) {
- pr_err("CPU%d: loading of chipset specific code not yet supported\n",
+ if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
+ pr_err("CPU%d: chipset specific code not yet supported\n",
cpu);
return 0;
}
- if (mc_header->patch_id <= rev)
+ if (mc_hdr->patch_id <= rev)
return 0;
return 1;
@@ -144,71 +143,93 @@ static int apply_microcode_amd(int cpu)
/* check current patch id and patch's id for match */
if (rev != mc_amd->hdr.patch_id) {
- pr_err("CPU%d: update failed (for patch_level=0x%x)\n",
+ pr_err("CPU%d: update failed for patch_level=0x%08x\n",
cpu, mc_amd->hdr.patch_id);
return -1;
}
- pr_info("CPU%d: updated (new patch_level=0x%x)\n", cpu, rev);
+ pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev);
uci->cpu_sig.rev = rev;
return 0;
}
-static void *
-get_next_ucode(const u8 *buf, unsigned int size, unsigned int *mc_size)
+static unsigned int verify_ucode_size(int cpu, const u8 *buf, unsigned int size)
{
- unsigned int total_size;
- u8 section_hdr[UCODE_CONTAINER_SECTION_HDR];
- void *mc;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+ unsigned int max_size, actual_size;
+
+#define F1XH_MPB_MAX_SIZE 2048
+#define F14H_MPB_MAX_SIZE 1824
+#define F15H_MPB_MAX_SIZE 4096
+
+ switch (c->x86) {
+ case 0x14:
+ max_size = F14H_MPB_MAX_SIZE;
+ break;
+ case 0x15:
+ max_size = F15H_MPB_MAX_SIZE;
+ break;
+ default:
+ max_size = F1XH_MPB_MAX_SIZE;
+ break;
+ }
- get_ucode_data(section_hdr, buf, UCODE_CONTAINER_SECTION_HDR);
+ actual_size = buf[4] + (buf[5] << 8);
- if (section_hdr[0] != UCODE_UCODE_TYPE) {
- pr_err("error: invalid type field in container file section header\n");
- return NULL;
+ if (actual_size > size || actual_size > max_size) {
+ pr_err("section size mismatch\n");
+ return 0;
}
- total_size = (unsigned long) (section_hdr[4] + (section_hdr[5] << 8));
+ return actual_size;
+}
- if (total_size > size || total_size > UCODE_MAX_SIZE) {
- pr_err("error: size mismatch\n");
- return NULL;
+static struct microcode_header_amd *
+get_next_ucode(int cpu, const u8 *buf, unsigned int size, unsigned int *mc_size)
+{
+ struct microcode_header_amd *mc = NULL;
+ unsigned int actual_size = 0;
+
+ if (buf[0] != UCODE_UCODE_TYPE) {
+ pr_err("invalid type field in container file section header\n");
+ goto out;
}
- mc = vzalloc(UCODE_MAX_SIZE);
+ actual_size = verify_ucode_size(cpu, buf, size);
+ if (!actual_size)
+ goto out;
+
+ mc = vzalloc(actual_size);
if (!mc)
- return NULL;
+ goto out;
- get_ucode_data(mc, buf + UCODE_CONTAINER_SECTION_HDR, total_size);
- *mc_size = total_size + UCODE_CONTAINER_SECTION_HDR;
+ get_ucode_data(mc, buf + UCODE_CONTAINER_SECTION_HDR, actual_size);
+ *mc_size = actual_size + UCODE_CONTAINER_SECTION_HDR;
+out:
return mc;
}
static int install_equiv_cpu_table(const u8 *buf)
{
- u8 *container_hdr[UCODE_CONTAINER_HEADER_SIZE];
- unsigned int *buf_pos = (unsigned int *)container_hdr;
- unsigned long size;
-
- get_ucode_data(&container_hdr, buf, UCODE_CONTAINER_HEADER_SIZE);
-
- size = buf_pos[2];
-
- if (buf_pos[1] != UCODE_EQUIV_CPU_TABLE_TYPE || !size) {
- pr_err("error: invalid type field in container file section header\n");
- return 0;
+ unsigned int *ibuf = (unsigned int *)buf;
+ unsigned int type = ibuf[1];
+ unsigned int size = ibuf[2];
+
+ if (type != UCODE_EQUIV_CPU_TABLE_TYPE || !size) {
+ pr_err("empty section/"
+ "invalid type field in container file section header\n");
+ return -EINVAL;
}
equiv_cpu_table = vmalloc(size);
if (!equiv_cpu_table) {
pr_err("failed to allocate equivalent CPU table\n");
- return 0;
+ return -ENOMEM;
}
- buf += UCODE_CONTAINER_HEADER_SIZE;
- get_ucode_data(equiv_cpu_table, buf, size);
+ get_ucode_data(equiv_cpu_table, buf + UCODE_CONTAINER_HEADER_SIZE, size);
return size + UCODE_CONTAINER_HEADER_SIZE; /* add header length */
}
@@ -223,16 +244,16 @@ static enum ucode_state
generic_load_microcode(int cpu, const u8 *data, size_t size)
{
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
+ struct microcode_header_amd *mc_hdr = NULL;
+ unsigned int mc_size, leftover;
+ int offset;
const u8 *ucode_ptr = data;
void *new_mc = NULL;
- void *mc;
- int new_rev = uci->cpu_sig.rev;
- unsigned int leftover;
- unsigned long offset;
+ unsigned int new_rev = uci->cpu_sig.rev;
enum ucode_state state = UCODE_OK;
offset = install_equiv_cpu_table(ucode_ptr);
- if (!offset) {
+ if (offset < 0) {
pr_err("failed to create equivalent cpu table\n");
return UCODE_ERROR;
}
@@ -241,64 +262,65 @@ generic_load_microcode(int cpu, const u8 *data, size_t size)
leftover = size - offset;
while (leftover) {
- unsigned int uninitialized_var(mc_size);
- struct microcode_header_amd *mc_header;
-
- mc = get_next_ucode(ucode_ptr, leftover, &mc_size);
- if (!mc)
+ mc_hdr = get_next_ucode(cpu, ucode_ptr, leftover, &mc_size);
+ if (!mc_hdr)
break;
- mc_header = (struct microcode_header_amd *)mc;
- if (get_matching_microcode(cpu, mc, new_rev)) {
+ if (get_matching_microcode(cpu, mc_hdr, new_rev)) {
vfree(new_mc);
- new_rev = mc_header->patch_id;
- new_mc = mc;
+ new_rev = mc_hdr->patch_id;
+ new_mc = mc_hdr;
} else
- vfree(mc);
+ vfree(mc_hdr);
ucode_ptr += mc_size;
leftover -= mc_size;
}
- if (new_mc) {
- if (!leftover) {
- vfree(uci->mc);
- uci->mc = new_mc;
- pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
- cpu, new_rev, uci->cpu_sig.rev);
- } else {
- vfree(new_mc);
- state = UCODE_ERROR;
- }
- } else
+ if (!new_mc) {
state = UCODE_NFOUND;
+ goto free_table;
+ }
+ if (!leftover) {
+ vfree(uci->mc);
+ uci->mc = new_mc;
+ pr_debug("CPU%d update ucode (0x%08x -> 0x%08x)\n",
+ cpu, uci->cpu_sig.rev, new_rev);
+ } else {
+ vfree(new_mc);
+ state = UCODE_ERROR;
+ }
+
+free_table:
free_equiv_cpu_table();
return state;
}
-static enum ucode_state request_microcode_fw(int cpu, struct device *device)
+static enum ucode_state request_microcode_amd(int cpu, struct device *device)
{
const char *fw_name = "amd-ucode/microcode_amd.bin";
- const struct firmware *firmware;
- enum ucode_state ret;
+ const struct firmware *fw;
+ enum ucode_state ret = UCODE_NFOUND;
- if (request_firmware(&firmware, fw_name, device)) {
- printk(KERN_ERR "microcode: failed to load file %s\n", fw_name);
- return UCODE_NFOUND;
+ if (request_firmware(&fw, fw_name, device)) {
+ pr_err("failed to load file %s\n", fw_name);
+ goto out;
}
- if (*(u32 *)firmware->data != UCODE_MAGIC) {
- pr_err("invalid UCODE_MAGIC (0x%08x)\n",
- *(u32 *)firmware->data);
- return UCODE_ERROR;
+ ret = UCODE_ERROR;
+ if (*(u32 *)fw->data != UCODE_MAGIC) {
+ pr_err("invalid magic value (0x%08x)\n", *(u32 *)fw->data);
+ goto fw_release;
}
- ret = generic_load_microcode(cpu, firmware->data, firmware->size);
+ ret = generic_load_microcode(cpu, fw->data, fw->size);
- release_firmware(firmware);
+fw_release:
+ release_firmware(fw);
+out:
return ret;
}
@@ -319,7 +341,7 @@ static void microcode_fini_cpu_amd(int cpu)
static struct microcode_ops microcode_amd_ops = {
.request_microcode_user = request_microcode_user,
- .request_microcode_fw = request_microcode_fw,
+ .request_microcode_fw = request_microcode_amd,
.collect_cpu_info = collect_cpu_info_amd,
.apply_microcode = apply_microcode_amd,
.microcode_fini_cpu = microcode_fini_cpu_amd,
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index 1cca374a2bac..87af68e0e1e1 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -417,8 +417,10 @@ static int mc_sysdev_add(struct sys_device *sys_dev)
if (err)
return err;
- if (microcode_init_cpu(cpu) == UCODE_ERROR)
- err = -EINVAL;
+ if (microcode_init_cpu(cpu) == UCODE_ERROR) {
+ sysfs_remove_group(&sys_dev->kobj, &mc_attr_group);
+ return -EINVAL;
+ }
return err;
}
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index ff4554198981..99fa3adf0141 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -110,12 +110,9 @@ void show_regs_common(void)
init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
init_utsname()->version);
- printk(KERN_CONT " ");
- printk(KERN_CONT "%s %s", vendor, product);
- if (board) {
- printk(KERN_CONT "/");
- printk(KERN_CONT "%s", board);
- }
+ printk(KERN_CONT " %s %s", vendor, product);
+ if (board)
+ printk(KERN_CONT "/%s", board);
printk(KERN_CONT "\n");
}
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index fc7aae1e2bc7..d3ce37edb54d 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -285,6 +285,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
},
},
+ { /* Handle problems with rebooting on VersaLogic Menlow boards */
+ .callback = set_bios_reboot,
+ .ident = "VersaLogic Menlow based board",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "VersaLogic Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"),
+ },
+ },
{ }
};
@@ -295,68 +303,16 @@ static int __init reboot_init(void)
}
core_initcall(reboot_init);
-/* The following code and data reboots the machine by switching to real
- mode and jumping to the BIOS reset entry point, as if the CPU has
- really been reset. The previous version asked the keyboard
- controller to pulse the CPU reset line, which is more thorough, but
- doesn't work with at least one type of 486 motherboard. It is easy
- to stop this code working; hence the copious comments. */
-static const unsigned long long
-real_mode_gdt_entries [3] =
-{
- 0x0000000000000000ULL, /* Null descriptor */
- 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
- 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
-};
+extern const unsigned char machine_real_restart_asm[];
+extern const u64 machine_real_restart_gdt[3];
-static const struct desc_ptr
-real_mode_gdt = { sizeof (real_mode_gdt_entries) - 1, (long)real_mode_gdt_entries },
-real_mode_idt = { 0x3ff, 0 };
-
-/* This is 16-bit protected mode code to disable paging and the cache,
- switch to real mode and jump to the BIOS reset code.
-
- The instruction that switches to real mode by writing to CR0 must be
- followed immediately by a far jump instruction, which set CS to a
- valid value for real mode, and flushes the prefetch queue to avoid
- running instructions that have already been decoded in protected
- mode.
-
- Clears all the flags except ET, especially PG (paging), PE
- (protected-mode enable) and TS (task switch for coprocessor state
- save). Flushes the TLB after paging has been disabled. Sets CD and
- NW, to disable the cache on a 486, and invalidates the cache. This
- is more like the state of a 486 after reset. I don't know if
- something else should be done for other chips.
-
- More could be done here to set up the registers as if a CPU reset had
- occurred; hopefully real BIOSs don't assume much. */
-static const unsigned char real_mode_switch [] =
-{
- 0x66, 0x0f, 0x20, 0xc0, /* movl %cr0,%eax */
- 0x66, 0x83, 0xe0, 0x11, /* andl $0x00000011,%eax */
- 0x66, 0x0d, 0x00, 0x00, 0x00, 0x60, /* orl $0x60000000,%eax */
- 0x66, 0x0f, 0x22, 0xc0, /* movl %eax,%cr0 */
- 0x66, 0x0f, 0x22, 0xd8, /* movl %eax,%cr3 */
- 0x66, 0x0f, 0x20, 0xc3, /* movl %cr0,%ebx */
- 0x66, 0x81, 0xe3, 0x00, 0x00, 0x00, 0x60, /* andl $0x60000000,%ebx */
- 0x74, 0x02, /* jz f */
- 0x0f, 0x09, /* wbinvd */
- 0x24, 0x10, /* f: andb $0x10,al */
- 0x66, 0x0f, 0x22, 0xc0 /* movl %eax,%cr0 */
-};
-static const unsigned char jump_to_bios [] =
+void machine_real_restart(unsigned int type)
{
- 0xea, 0x00, 0x00, 0xff, 0xff /* ljmp $0xffff,$0x0000 */
-};
+ void *restart_va;
+ unsigned long restart_pa;
+ void (*restart_lowmem)(unsigned int);
+ u64 *lowmem_gdt;
-/*
- * Switch to real mode and then execute the code
- * specified by the code and length parameters.
- * We assume that length will aways be less that 100!
- */
-void machine_real_restart(const unsigned char *code, int length)
-{
local_irq_disable();
/* Write zero to CMOS register number 0x0f, which the BIOS POST
@@ -384,41 +340,23 @@ void machine_real_restart(const unsigned char *code, int length)
too. */
*((unsigned short *)0x472) = reboot_mode;
- /* For the switch to real mode, copy some code to low memory. It has
- to be in the first 64k because it is running in 16-bit mode, and it
- has to have the same physical and virtual address, because it turns
- off paging. Copy it near the end of the first page, out of the way
- of BIOS variables. */
- memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
- real_mode_switch, sizeof (real_mode_switch));
- memcpy((void *)(0x1000 - 100), code, length);
-
- /* Set up the IDT for real mode. */
- load_idt(&real_mode_idt);
-
- /* Set up a GDT from which we can load segment descriptors for real
- mode. The GDT is not used in real mode; it is just needed here to
- prepare the descriptors. */
- load_gdt(&real_mode_gdt);
-
- /* Load the data segment registers, and thus the descriptors ready for
- real mode. The base address of each segment is 0x100, 16 times the
- selector value being loaded here. This is so that the segment
- registers don't have to be reloaded after switching to real mode:
- the values are consistent for real mode operation already. */
- __asm__ __volatile__ ("movl $0x0010,%%eax\n"
- "\tmovl %%eax,%%ds\n"
- "\tmovl %%eax,%%es\n"
- "\tmovl %%eax,%%fs\n"
- "\tmovl %%eax,%%gs\n"
- "\tmovl %%eax,%%ss" : : : "eax");
-
- /* Jump to the 16-bit code that we copied earlier. It disables paging
- and the cache, switches to real mode, and jumps to the BIOS reset
- entry point. */
- __asm__ __volatile__ ("ljmp $0x0008,%0"
- :
- : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
+ /* Patch the GDT in the low memory trampoline */
+ lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
+
+ restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
+ restart_pa = virt_to_phys(restart_va);
+ restart_lowmem = (void (*)(unsigned int))restart_pa;
+
+ /* GDT[0]: GDT self-pointer */
+ lowmem_gdt[0] =
+ (u64)(sizeof(machine_real_restart_gdt) - 1) +
+ ((u64)virt_to_phys(lowmem_gdt) << 16);
+ /* GDT[1]: 64K real mode code segment */
+ lowmem_gdt[1] =
+ GDT_ENTRY(0x009b, restart_pa, 0xffff);
+
+ /* Jump to the identity-mapped low memory code */
+ restart_lowmem(type);
}
#ifdef CONFIG_APM_MODULE
EXPORT_SYMBOL(machine_real_restart);
@@ -573,7 +511,7 @@ static void native_machine_emergency_restart(void)
#ifdef CONFIG_X86_32
case BOOT_BIOS:
- machine_real_restart(jump_to_bios, sizeof(jump_to_bios));
+ machine_real_restart(MRR_BIOS);
reboot_type = BOOT_KBD;
break;
diff --git a/arch/x86/kernel/reboot_32.S b/arch/x86/kernel/reboot_32.S
new file mode 100644
index 000000000000..29092b38d816
--- /dev/null
+++ b/arch/x86/kernel/reboot_32.S
@@ -0,0 +1,135 @@
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/segment.h>
+#include <asm/page_types.h>
+
+/*
+ * The following code and data reboots the machine by switching to real
+ * mode and jumping to the BIOS reset entry point, as if the CPU has
+ * really been reset. The previous version asked the keyboard
+ * controller to pulse the CPU reset line, which is more thorough, but
+ * doesn't work with at least one type of 486 motherboard. It is easy
+ * to stop this code working; hence the copious comments.
+ *
+ * This code is called with the restart type (0 = BIOS, 1 = APM) in %eax.
+ */
+ .section ".x86_trampoline","a"
+ .balign 16
+ .code32
+ENTRY(machine_real_restart_asm)
+r_base = .
+ /* Get our own relocated address */
+ call 1f
+1: popl %ebx
+ subl $1b, %ebx
+
+ /* Compute the equivalent real-mode segment */
+ movl %ebx, %ecx
+ shrl $4, %ecx
+
+ /* Patch post-real-mode segment jump */
+ movw dispatch_table(%ebx,%eax,2),%ax
+ movw %ax, 101f(%ebx)
+ movw %cx, 102f(%ebx)
+
+ /* Set up the IDT for real mode. */
+ lidtl machine_real_restart_idt(%ebx)
+
+ /*
+ * Set up a GDT from which we can load segment descriptors for real
+ * mode. The GDT is not used in real mode; it is just needed here to
+ * prepare the descriptors.
+ */
+ lgdtl machine_real_restart_gdt(%ebx)
+
+ /*
+ * Load the data segment registers with 16-bit compatible values
+ */
+ movl $16, %ecx
+ movl %ecx, %ds
+ movl %ecx, %es
+ movl %ecx, %fs
+ movl %ecx, %gs
+ movl %ecx, %ss
+ ljmpl $8, $1f - r_base
+
+/*
+ * This is 16-bit protected mode code to disable paging and the cache,
+ * switch to real mode and jump to the BIOS reset code.
+ *
+ * The instruction that switches to real mode by writing to CR0 must be
+ * followed immediately by a far jump instruction, which set CS to a
+ * valid value for real mode, and flushes the prefetch queue to avoid
+ * running instructions that have already been decoded in protected
+ * mode.
+ *
+ * Clears all the flags except ET, especially PG (paging), PE
+ * (protected-mode enable) and TS (task switch for coprocessor state
+ * save). Flushes the TLB after paging has been disabled. Sets CD and
+ * NW, to disable the cache on a 486, and invalidates the cache. This
+ * is more like the state of a 486 after reset. I don't know if
+ * something else should be done for other chips.
+ *
+ * More could be done here to set up the registers as if a CPU reset had
+ * occurred; hopefully real BIOSs don't assume much. This is not the
+ * actual BIOS entry point, anyway (that is at 0xfffffff0).
+ *
+ * Most of this work is probably excessive, but it is what is tested.
+ */
+ .code16
+1:
+ xorl %ecx, %ecx
+ movl %cr0, %eax
+ andl $0x00000011, %eax
+ orl $0x60000000, %eax
+ movl %eax, %cr0
+ movl %ecx, %cr3
+ movl %cr0, %edx
+ andl $0x60000000, %edx /* If no cache bits -> no wbinvd */
+ jz 2f
+ wbinvd
+2:
+ andb $0x10, %al
+ movl %eax, %cr0
+ .byte 0xea /* ljmpw */
+101: .word 0 /* Offset */
+102: .word 0 /* Segment */
+
+bios:
+ ljmpw $0xf000, $0xfff0
+
+apm:
+ movw $0x1000, %ax
+ movw %ax, %ss
+ movw $0xf000, %sp
+ movw $0x5307, %ax
+ movw $0x0001, %bx
+ movw $0x0003, %cx
+ int $0x15
+
+END(machine_real_restart_asm)
+
+ .balign 16
+ /* These must match <asm/reboot.h */
+dispatch_table:
+ .word bios - r_base
+ .word apm - r_base
+END(dispatch_table)
+
+ .balign 16
+machine_real_restart_idt:
+ .word 0xffff /* Length - real mode default value */
+ .long 0 /* Base - real mode default value */
+END(machine_real_restart_idt)
+
+ .balign 16
+ENTRY(machine_real_restart_gdt)
+ .quad 0 /* Self-pointer, filled in by PM code */
+ .quad 0 /* 16-bit code segment, filled in by PM code */
+ /*
+ * 16-bit data segment with the selector value 16 = 0x10 and
+ * base value 0x100; since this is consistent with real mode
+ * semantics we don't have to reload the segments once CR0.PE = 0.
+ */
+ .quad GDT_ENTRY(0x0093, 0x100, 0xffff)
+END(machine_real_restart_gdt)
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index 6f39cab052d5..3f2ad2640d85 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -6,6 +6,7 @@
#include <linux/acpi.h>
#include <linux/bcd.h>
#include <linux/pnp.h>
+#include <linux/of.h>
#include <asm/vsyscall.h>
#include <asm/x86_init.h>
@@ -236,6 +237,8 @@ static __init int add_rtc_cmos(void)
}
}
#endif
+ if (of_have_populated_dt())
+ return 0;
platform_device_register(&rtc_device);
dev_info(&rtc_device.dev,
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index d3cfe26c0252..a47fe0099c27 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -113,6 +113,7 @@
#endif
#include <asm/mce.h>
#include <asm/alternative.h>
+#include <asm/prom.h>
/*
* end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries.
@@ -293,10 +294,32 @@ static void __init init_gbpages(void)
else
direct_gbpages = 0;
}
+
+static void __init cleanup_highmap_brk_end(void)
+{
+ pud_t *pud;
+ pmd_t *pmd;
+
+ mmu_cr4_features = read_cr4();
+
+ /*
+ * _brk_end cannot change anymore, but it and _end may be
+ * located on different 2M pages. cleanup_highmap(), however,
+ * can only consider _end when it runs, so destroy any
+ * mappings beyond _brk_end here.
+ */
+ pud = pud_offset(pgd_offset_k(_brk_end), _brk_end);
+ pmd = pmd_offset(pud, _brk_end - 1);
+ while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1))
+ pmd_clear(pmd);
+}
#else
static inline void init_gbpages(void)
{
}
+static inline void cleanup_highmap_brk_end(void)
+{
+}
#endif
static void __init reserve_brk(void)
@@ -307,6 +330,8 @@ static void __init reserve_brk(void)
/* Mark brk area as locked down and no longer taking any
new allocations */
_brk_start = 0;
+
+ cleanup_highmap_brk_end();
}
#ifdef CONFIG_BLK_DEV_INITRD
@@ -429,16 +454,30 @@ static void __init parse_setup_data(void)
return;
pa_data = boot_params.hdr.setup_data;
while (pa_data) {
- data = early_memremap(pa_data, PAGE_SIZE);
+ u32 data_len, map_len;
+
+ map_len = max(PAGE_SIZE - (pa_data & ~PAGE_MASK),
+ (u64)sizeof(struct setup_data));
+ data = early_memremap(pa_data, map_len);
+ data_len = data->len + sizeof(struct setup_data);
+ if (data_len > map_len) {
+ early_iounmap(data, map_len);
+ data = early_memremap(pa_data, data_len);
+ map_len = data_len;
+ }
+
switch (data->type) {
case SETUP_E820_EXT:
- parse_e820_ext(data, pa_data);
+ parse_e820_ext(data);
+ break;
+ case SETUP_DTB:
+ add_dtb(pa_data);
break;
default:
break;
}
pa_data = data->next;
- early_iounmap(data, PAGE_SIZE);
+ early_iounmap(data, map_len);
}
}
@@ -680,15 +719,6 @@ static int __init parse_reservelow(char *p)
early_param("reservelow", parse_reservelow);
-static u64 __init get_max_mapped(void)
-{
- u64 end = max_pfn_mapped;
-
- end <<= PAGE_SHIFT;
-
- return end;
-}
-
/*
* Determine if we were loaded by an EFI loader. If so, then we have also been
* passed the efi memmap, systab, etc., so we should use these data structures
@@ -704,8 +734,6 @@ static u64 __init get_max_mapped(void)
void __init setup_arch(char **cmdline_p)
{
- int acpi = 0;
- int amd = 0;
unsigned long flags;
#ifdef CONFIG_X86_32
@@ -935,29 +963,14 @@ void __init setup_arch(char **cmdline_p)
printk(KERN_DEBUG "initial memory mapped : 0 - %08lx\n",
max_pfn_mapped<<PAGE_SHIFT);
- reserve_trampoline_memory();
+ setup_trampolines();
-#ifdef CONFIG_ACPI_SLEEP
- /*
- * Reserve low memory region for sleep support.
- * even before init_memory_mapping
- */
- acpi_reserve_wakeup_memory();
-#endif
init_gbpages();
/* max_pfn_mapped is updated here */
max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
max_pfn_mapped = max_low_pfn_mapped;
-#ifdef CONFIG_X86_64
- if (max_pfn > max_low_pfn) {
- max_pfn_mapped = init_memory_mapping(1UL<<32,
- max_pfn<<PAGE_SHIFT);
- /* can we preseve max_low_pfn ?*/
- max_low_pfn = max_pfn;
- }
-#endif
memblock.current_limit = get_max_mapped();
/*
@@ -984,19 +997,7 @@ void __init setup_arch(char **cmdline_p)
early_acpi_boot_init();
-#ifdef CONFIG_ACPI_NUMA
- /*
- * Parse SRAT to discover nodes.
- */
- acpi = acpi_numa_init();
-#endif
-
-#ifdef CONFIG_AMD_NUMA
- if (!acpi)
- amd = !amd_numa_init(0, max_pfn);
-#endif
-
- initmem_init(0, max_pfn, acpi, amd);
+ initmem_init();
memblock_find_dma_reserve();
dma32_reserve_bootmem();
@@ -1029,8 +1030,8 @@ void __init setup_arch(char **cmdline_p)
* Read APIC and some other early information from ACPI tables.
*/
acpi_boot_init();
-
sfi_init();
+ x86_dtb_init();
/*
* get boot-time SMP configuration:
@@ -1040,9 +1041,7 @@ void __init setup_arch(char **cmdline_p)
prefill_possible_map();
-#ifdef CONFIG_X86_64
init_cpu_to_node();
-#endif
init_apic_mappings();
ioapic_and_gsi_init();
@@ -1066,6 +1065,8 @@ void __init setup_arch(char **cmdline_p)
#endif
x86_init.oem.banner();
+ x86_init.timers.wallclock_init();
+
mcheck_init();
local_irq_save(flags);
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 002b79685f73..71f4727da373 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -225,10 +225,15 @@ void __init setup_per_cpu_areas(void)
per_cpu(x86_bios_cpu_apicid, cpu) =
early_per_cpu_map(x86_bios_cpu_apicid, cpu);
#endif
+#ifdef CONFIG_X86_32
+ per_cpu(x86_cpu_to_logical_apicid, cpu) =
+ early_per_cpu_map(x86_cpu_to_logical_apicid, cpu);
+#endif
#ifdef CONFIG_X86_64
per_cpu(irq_stack_ptr, cpu) =
per_cpu(irq_stack_union.irq_stack, cpu) +
IRQ_STACK_SIZE - 64;
+#endif
#ifdef CONFIG_NUMA
per_cpu(x86_cpu_to_node_map, cpu) =
early_per_cpu_map(x86_cpu_to_node_map, cpu);
@@ -242,7 +247,6 @@ void __init setup_per_cpu_areas(void)
*/
set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
#endif
-#endif
/*
* Up to this point, the boot CPU has been using .init.data
* area. Reload any changed state for the boot CPU.
@@ -256,7 +260,10 @@ void __init setup_per_cpu_areas(void)
early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
#endif
-#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
+#ifdef CONFIG_X86_32
+ early_per_cpu_ptr(x86_cpu_to_logical_apicid) = NULL;
+#endif
+#ifdef CONFIG_NUMA
early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
#endif
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 08776a953487..c2871d3c71b6 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -64,6 +64,7 @@
#include <asm/mtrr.h>
#include <asm/mwait.h>
#include <asm/apic.h>
+#include <asm/io_apic.h>
#include <asm/setup.h>
#include <asm/uv/uv.h>
#include <linux/mc146818rtc.h>
@@ -71,10 +72,6 @@
#include <asm/smpboot_hooks.h>
#include <asm/i8259.h>
-#ifdef CONFIG_X86_32
-u8 apicid_2_node[MAX_APICID];
-#endif
-
/* State of each CPU */
DEFINE_PER_CPU(int, cpu_state) = { 0 };
@@ -130,68 +127,14 @@ EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
EXPORT_PER_CPU_SYMBOL(cpu_core_map);
+DEFINE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
+
/* Per CPU bogomips and other parameters */
DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
EXPORT_PER_CPU_SYMBOL(cpu_info);
atomic_t init_deasserted;
-#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
-/* which node each logical CPU is on */
-int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
-EXPORT_SYMBOL(cpu_to_node_map);
-
-/* set up a mapping between cpu and node. */
-static void map_cpu_to_node(int cpu, int node)
-{
- printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node);
- cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
- cpu_to_node_map[cpu] = node;
-}
-
-/* undo a mapping between cpu and node. */
-static void unmap_cpu_to_node(int cpu)
-{
- int node;
-
- printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu);
- for (node = 0; node < MAX_NUMNODES; node++)
- cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
- cpu_to_node_map[cpu] = 0;
-}
-#else /* !(CONFIG_NUMA && CONFIG_X86_32) */
-#define map_cpu_to_node(cpu, node) ({})
-#define unmap_cpu_to_node(cpu) ({})
-#endif
-
-#ifdef CONFIG_X86_32
-static int boot_cpu_logical_apicid;
-
-u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly =
- { [0 ... NR_CPUS-1] = BAD_APICID };
-
-static void map_cpu_to_logical_apicid(void)
-{
- int cpu = smp_processor_id();
- int apicid = logical_smp_processor_id();
- int node = apic->apicid_to_node(apicid);
-
- if (!node_online(node))
- node = first_online_node;
-
- cpu_2_logical_apicid[cpu] = apicid;
- map_cpu_to_node(cpu, node);
-}
-
-void numa_remove_cpu(int cpu)
-{
- cpu_2_logical_apicid[cpu] = BAD_APICID;
- unmap_cpu_to_node(cpu);
-}
-#else
-#define map_cpu_to_logical_apicid() do {} while (0)
-#endif
-
/*
* Report back to the Boot Processor.
* Running on AP.
@@ -259,7 +202,6 @@ static void __cpuinit smp_callin(void)
apic->smp_callin_clear_local_apic();
setup_local_APIC();
end_local_APIC_setup();
- map_cpu_to_logical_apicid();
/*
* Need to setup vector mappings before we enable interrupts.
@@ -355,23 +297,6 @@ notrace static void __cpuinit start_secondary(void *unused)
cpu_idle();
}
-#ifdef CONFIG_CPUMASK_OFFSTACK
-/* In this case, llc_shared_map is a pointer to a cpumask. */
-static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst,
- const struct cpuinfo_x86 *src)
-{
- struct cpumask *llc = dst->llc_shared_map;
- *dst = *src;
- dst->llc_shared_map = llc;
-}
-#else
-static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst,
- const struct cpuinfo_x86 *src)
-{
- *dst = *src;
-}
-#endif /* CONFIG_CPUMASK_OFFSTACK */
-
/*
* The bootstrap kernel entry code has set these up. Save them for
* a given CPU
@@ -381,7 +306,7 @@ void __cpuinit smp_store_cpu_info(int id)
{
struct cpuinfo_x86 *c = &cpu_data(id);
- copy_cpuinfo_x86(c, &boot_cpu_data);
+ *c = boot_cpu_data;
c->cpu_index = id;
if (id != 0)
identify_secondary_cpu(c);
@@ -389,15 +314,12 @@ void __cpuinit smp_store_cpu_info(int id)
static void __cpuinit link_thread_siblings(int cpu1, int cpu2)
{
- struct cpuinfo_x86 *c1 = &cpu_data(cpu1);
- struct cpuinfo_x86 *c2 = &cpu_data(cpu2);
-
cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2));
cpumask_set_cpu(cpu2, cpu_sibling_mask(cpu1));
cpumask_set_cpu(cpu1, cpu_core_mask(cpu2));
cpumask_set_cpu(cpu2, cpu_core_mask(cpu1));
- cpumask_set_cpu(cpu1, c2->llc_shared_map);
- cpumask_set_cpu(cpu2, c1->llc_shared_map);
+ cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2));
+ cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1));
}
@@ -414,6 +336,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
if (cpu_has(c, X86_FEATURE_TOPOEXT)) {
if (c->phys_proc_id == o->phys_proc_id &&
+ per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i) &&
c->compute_unit_id == o->compute_unit_id)
link_thread_siblings(cpu, i);
} else if (c->phys_proc_id == o->phys_proc_id &&
@@ -425,7 +348,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
}
- cpumask_set_cpu(cpu, c->llc_shared_map);
+ cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
if (__this_cpu_read(cpu_info.x86_max_cores) == 1) {
cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
@@ -436,8 +359,8 @@ void __cpuinit set_cpu_sibling_map(int cpu)
for_each_cpu(i, cpu_sibling_setup_mask) {
if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
- cpumask_set_cpu(i, c->llc_shared_map);
- cpumask_set_cpu(cpu, cpu_data(i).llc_shared_map);
+ cpumask_set_cpu(i, cpu_llc_shared_mask(cpu));
+ cpumask_set_cpu(cpu, cpu_llc_shared_mask(i));
}
if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
cpumask_set_cpu(i, cpu_core_mask(cpu));
@@ -476,7 +399,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
!(cpu_has(c, X86_FEATURE_AMD_DCM)))
return cpu_core_mask(cpu);
else
- return c->llc_shared_map;
+ return cpu_llc_shared_mask(cpu);
}
static void impress_friends(void)
@@ -788,7 +711,7 @@ do_rest:
stack_start = c_idle.idle->thread.sp;
/* start_ip had better be page-aligned! */
- start_ip = setup_trampoline();
+ start_ip = trampoline_address();
/* So we see what's up */
announce_cpu(cpu, apicid);
@@ -798,6 +721,8 @@ do_rest:
* the targeted processor.
*/
+ printk(KERN_DEBUG "smpboot cpu %d: start_ip = %lx\n", cpu, start_ip);
+
atomic_set(&init_deasserted, 0);
if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
@@ -851,8 +776,8 @@ do_rest:
pr_debug("CPU%d: has booted.\n", cpu);
else {
boot_error = 1;
- if (*((volatile unsigned char *)trampoline_base)
- == 0xA5)
+ if (*(volatile u32 *)TRAMPOLINE_SYM(trampoline_status)
+ == 0xA5A5A5A5)
/* trampoline started but...? */
pr_err("CPU%d: Stuck ??\n", cpu);
else
@@ -878,7 +803,7 @@ do_rest:
}
/* mark "stuck" area as not stuck */
- *((volatile unsigned long *)trampoline_base) = 0;
+ *(volatile u32 *)TRAMPOLINE_SYM(trampoline_status) = 0;
if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
/*
@@ -945,6 +870,14 @@ int __cpuinit native_cpu_up(unsigned int cpu)
return 0;
}
+/**
+ * arch_disable_smp_support() - disables SMP support for x86 at runtime
+ */
+void arch_disable_smp_support(void)
+{
+ disable_ioapic_support();
+}
+
/*
* Fall back to non SMP mode after errors.
*
@@ -960,7 +893,6 @@ static __init void disable_smp(void)
physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
else
physid_set_mask_of_physid(0, &phys_cpu_present_map);
- map_cpu_to_logical_apicid();
cpumask_set_cpu(0, cpu_sibling_mask(0));
cpumask_set_cpu(0, cpu_core_mask(0));
}
@@ -1045,7 +977,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
"(tell your hw vendor)\n");
}
smpboot_clear_io_apic();
- arch_disable_smp_support();
+ disable_ioapic_support();
return -1;
}
@@ -1089,21 +1021,19 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
preempt_disable();
smp_cpu_index_default();
- memcpy(__this_cpu_ptr(&cpu_info), &boot_cpu_data, sizeof(cpu_info));
- cpumask_copy(cpu_callin_mask, cpumask_of(0));
- mb();
+
/*
* Setup boot CPU information
*/
smp_store_cpu_info(0); /* Final full version of the data */
-#ifdef CONFIG_X86_32
- boot_cpu_logical_apicid = logical_smp_processor_id();
-#endif
+ cpumask_copy(cpu_callin_mask, cpumask_of(0));
+ mb();
+
current_thread_info()->cpu = 0; /* needed? */
for_each_possible_cpu(i) {
zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
- zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
+ zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
}
set_cpu_sibling_map(0);
@@ -1139,8 +1069,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
bsp_end_local_APIC_setup();
- map_cpu_to_logical_apicid();
-
if (apic->setup_portio_remap)
apic->setup_portio_remap();
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
index b35786dc9b8f..68c7b9aa5001 100644
--- a/arch/x86/kernel/syscall_table_32.S
+++ b/arch/x86/kernel/syscall_table_32.S
@@ -340,3 +340,4 @@ ENTRY(sys_call_table)
.long sys_fanotify_init
.long sys_fanotify_mark
.long sys_prlimit64 /* 340 */
+ .long sys_clock_adjtime
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c
index a375616d77f7..a91ae7709b49 100644
--- a/arch/x86/kernel/trampoline.c
+++ b/arch/x86/kernel/trampoline.c
@@ -2,39 +2,41 @@
#include <linux/memblock.h>
#include <asm/trampoline.h>
+#include <asm/cacheflush.h>
#include <asm/pgtable.h>
-#if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP)
-#define __trampinit
-#define __trampinitdata
-#else
-#define __trampinit __cpuinit
-#define __trampinitdata __cpuinitdata
-#endif
+unsigned char *x86_trampoline_base;
-/* ready for x86_64 and x86 */
-unsigned char *__trampinitdata trampoline_base;
-
-void __init reserve_trampoline_memory(void)
+void __init setup_trampolines(void)
{
phys_addr_t mem;
+ size_t size = PAGE_ALIGN(x86_trampoline_end - x86_trampoline_start);
/* Has to be in very low memory so we can execute real-mode AP code. */
- mem = memblock_find_in_range(0, 1<<20, TRAMPOLINE_SIZE, PAGE_SIZE);
+ mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
if (mem == MEMBLOCK_ERROR)
panic("Cannot allocate trampoline\n");
- trampoline_base = __va(mem);
- memblock_x86_reserve_range(mem, mem + TRAMPOLINE_SIZE, "TRAMPOLINE");
+ x86_trampoline_base = __va(mem);
+ memblock_x86_reserve_range(mem, mem + size, "TRAMPOLINE");
+
+ printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
+ x86_trampoline_base, (unsigned long long)mem, size);
+
+ memcpy(x86_trampoline_base, x86_trampoline_start, size);
}
/*
- * Currently trivial. Write the real->protected mode
- * bootstrap into the page concerned. The caller
- * has made sure it's suitably aligned.
+ * setup_trampolines() gets called very early, to guarantee the
+ * availability of low memory. This is before the proper kernel page
+ * tables are set up, so we cannot set page permissions in that
+ * function. Thus, we use an arch_initcall instead.
*/
-unsigned long __trampinit setup_trampoline(void)
+static int __init configure_trampolines(void)
{
- memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE);
- return virt_to_phys(trampoline_base);
+ size_t size = PAGE_ALIGN(x86_trampoline_end - x86_trampoline_start);
+
+ set_memory_x((unsigned long)x86_trampoline_base, size >> PAGE_SHIFT);
+ return 0;
}
+arch_initcall(configure_trampolines);
diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
index 8508237e8e43..451c0a7ef7fd 100644
--- a/arch/x86/kernel/trampoline_32.S
+++ b/arch/x86/kernel/trampoline_32.S
@@ -32,9 +32,11 @@
#include <asm/segment.h>
#include <asm/page_types.h>
-/* We can free up trampoline after bootup if cpu hotplug is not supported. */
-__CPUINITRODATA
-.code16
+#ifdef CONFIG_SMP
+
+ .section ".x86_trampoline","a"
+ .balign PAGE_SIZE
+ .code16
ENTRY(trampoline_data)
r_base = .
@@ -44,7 +46,7 @@ r_base = .
cli # We should be safe anyway
- movl $0xA5A5A5A5, trampoline_data - r_base
+ movl $0xA5A5A5A5, trampoline_status - r_base
# write marker for master knows we're running
/* GDT tables in non default location kernel can be beyond 16MB and
@@ -72,5 +74,10 @@ boot_idt_descr:
.word 0 # idt limit = 0
.long 0 # idt base = 0L
+ENTRY(trampoline_status)
+ .long 0
+
.globl trampoline_end
trampoline_end:
+
+#endif /* CONFIG_SMP */
diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
index 075d130efcf9..09ff51799e96 100644
--- a/arch/x86/kernel/trampoline_64.S
+++ b/arch/x86/kernel/trampoline_64.S
@@ -32,13 +32,9 @@
#include <asm/segment.h>
#include <asm/processor-flags.h>
-#ifdef CONFIG_ACPI_SLEEP
-.section .rodata, "a", @progbits
-#else
-/* We can free up the trampoline after bootup if cpu hotplug is not supported. */
-__CPUINITRODATA
-#endif
-.code16
+ .section ".x86_trampoline","a"
+ .balign PAGE_SIZE
+ .code16
ENTRY(trampoline_data)
r_base = .
@@ -50,7 +46,7 @@ r_base = .
mov %ax, %ss
- movl $0xA5A5A5A5, trampoline_data - r_base
+ movl $0xA5A5A5A5, trampoline_status - r_base
# write marker for master knows we're running
# Setup stack
@@ -64,10 +60,13 @@ r_base = .
movzx %ax, %esi # Find the 32bit trampoline location
shll $4, %esi
- # Fixup the vectors
- addl %esi, startup_32_vector - r_base
- addl %esi, startup_64_vector - r_base
- addl %esi, tgdt + 2 - r_base # Fixup the gdt pointer
+ # Fixup the absolute vectors
+ leal (startup_32 - r_base)(%esi), %eax
+ movl %eax, startup_32_vector - r_base
+ leal (startup_64 - r_base)(%esi), %eax
+ movl %eax, startup_64_vector - r_base
+ leal (tgdt - r_base)(%esi), %eax
+ movl %eax, (tgdt + 2 - r_base)
/*
* GDT tables in non default location kernel can be beyond 16MB and
@@ -129,6 +128,7 @@ no_longmode:
jmp no_longmode
#include "verify_cpu.S"
+ .balign 4
# Careful these need to be in the same 64K segment as the above;
tidt:
.word 0 # idt limit = 0
@@ -156,6 +156,10 @@ startup_64_vector:
.long startup_64 - r_base
.word __KERNEL_CS, 0
+ .balign 4
+ENTRY(trampoline_status)
+ .long 0
+
trampoline_stack:
.org 0x1000
trampoline_stack_end:
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index bf4700755184..a950112b4b1d 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -230,7 +230,7 @@ SECTIONS
* output PHDR, so the next output section - .init.text - should
* start another segment - init.
*/
- PERCPU_VADDR(0, :percpu)
+ PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
#endif
INIT_TEXT_SECTION(PAGE_SIZE)
@@ -240,6 +240,18 @@ SECTIONS
INIT_DATA_SECTION(16)
+ /*
+ * Code and data for a variety of lowlevel trampolines, to be
+ * copied into base memory (< 1 MiB) during initialization.
+ * Since it is copied early, the main copy can be discarded
+ * afterwards.
+ */
+ .x86_trampoline : AT(ADDR(.x86_trampoline) - LOAD_OFFSET) {
+ x86_trampoline_start = .;
+ *(.x86_trampoline)
+ x86_trampoline_end = .;
+ }
+
.x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
__x86_cpu_dev_start = .;
*(.x86_cpu_dev.init)
@@ -291,6 +303,7 @@ SECTIONS
*(.iommu_table)
__iommu_table_end = .;
}
+
. = ALIGN(8);
/*
* .exit.text is discard at runtime, not link time, to deal with
@@ -305,7 +318,7 @@ SECTIONS
}
#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
- PERCPU(THREAD_SIZE)
+ PERCPU(INTERNODE_CACHE_BYTES, PAGE_SIZE)
#endif
. = ALIGN(PAGE_SIZE);
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index 1b950d151e58..9796c2f3d074 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -52,6 +52,7 @@ extern void *__memcpy(void *, const void *, __kernel_size_t);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(__memcpy);
+EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(empty_zero_page);
#ifndef CONFIG_PARAVIRT
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index ceb2911aa439..c11514e9128b 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -70,6 +70,7 @@ struct x86_init_ops x86_init __initdata = {
.setup_percpu_clockev = setup_boot_APIC_clock,
.tsc_pre_init = x86_init_noop,
.timer_init = hpet_time_init,
+ .wallclock_init = x86_init_noop,
},
.iommu = {
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index caf966781d25..a90d7e033304 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -76,6 +76,7 @@
#define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
#define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
/* Misc flags */
+#define VendorSpecific (1<<22) /* Vendor specific instruction */
#define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
#define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
#define Undefined (1<<25) /* No Such Instruction */
@@ -2365,7 +2366,8 @@ static struct group_dual group7 = { {
D(SrcMem16 | ModRM | Mov | Priv),
D(SrcMem | ModRM | ByteOp | Priv | NoAccess),
}, {
- D(SrcNone | ModRM | Priv), N, N, D(SrcNone | ModRM | Priv),
+ D(SrcNone | ModRM | Priv | VendorSpecific), N,
+ N, D(SrcNone | ModRM | Priv | VendorSpecific),
D(SrcNone | ModRM | DstMem | Mov), N,
D(SrcMem16 | ModRM | Mov | Priv), N,
} };
@@ -2489,7 +2491,7 @@ static struct opcode opcode_table[256] = {
static struct opcode twobyte_table[256] = {
/* 0x00 - 0x0F */
N, GD(0, &group7), N, N,
- N, D(ImplicitOps), D(ImplicitOps | Priv), N,
+ N, D(ImplicitOps | VendorSpecific), D(ImplicitOps | Priv), N,
D(ImplicitOps | Priv), D(ImplicitOps | Priv), N, N,
N, D(ImplicitOps | ModRM), N, N,
/* 0x10 - 0x1F */
@@ -2502,7 +2504,8 @@ static struct opcode twobyte_table[256] = {
/* 0x30 - 0x3F */
D(ImplicitOps | Priv), I(ImplicitOps, em_rdtsc),
D(ImplicitOps | Priv), N,
- D(ImplicitOps), D(ImplicitOps | Priv), N, N,
+ D(ImplicitOps | VendorSpecific), D(ImplicitOps | Priv | VendorSpecific),
+ N, N,
N, N, N, N, N, N, N, N,
/* 0x40 - 0x4F */
X16(D(DstReg | SrcMem | ModRM | Mov)),
@@ -2741,6 +2744,9 @@ done_prefixes:
if (c->d == 0 || (c->d & Undefined))
return -1;
+ if (!(c->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
+ return -1;
+
if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
c->op_bytes = 8;
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 3cece05e4ac4..19fe855e7953 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -62,9 +62,6 @@ static void pic_unlock(struct kvm_pic *s)
}
if (!found)
- found = s->kvm->bsp_vcpu;
-
- if (!found)
return;
kvm_make_request(KVM_REQ_EVENT, found);
@@ -75,7 +72,6 @@ static void pic_unlock(struct kvm_pic *s)
static void pic_clear_isr(struct kvm_kpic_state *s, int irq)
{
s->isr &= ~(1 << irq);
- s->isr_ack |= (1 << irq);
if (s != &s->pics_state->pics[0])
irq += 8;
/*
@@ -89,16 +85,6 @@ static void pic_clear_isr(struct kvm_kpic_state *s, int irq)
pic_lock(s->pics_state);
}
-void kvm_pic_clear_isr_ack(struct kvm *kvm)
-{
- struct kvm_pic *s = pic_irqchip(kvm);
-
- pic_lock(s);
- s->pics[0].isr_ack = 0xff;
- s->pics[1].isr_ack = 0xff;
- pic_unlock(s);
-}
-
/*
* set irq level. If an edge is detected, then the IRR is set to 1
*/
@@ -281,7 +267,6 @@ void kvm_pic_reset(struct kvm_kpic_state *s)
s->irr = 0;
s->imr = 0;
s->isr = 0;
- s->isr_ack = 0xff;
s->priority_add = 0;
s->irq_base = 0;
s->read_reg_select = 0;
@@ -545,15 +530,11 @@ static int picdev_read(struct kvm_io_device *this,
*/
static void pic_irq_request(struct kvm *kvm, int level)
{
- struct kvm_vcpu *vcpu = kvm->bsp_vcpu;
struct kvm_pic *s = pic_irqchip(kvm);
- int irq = pic_get_irq(&s->pics[0]);
- s->output = level;
- if (vcpu && level && (s->pics[0].isr_ack & (1 << irq))) {
- s->pics[0].isr_ack &= ~(1 << irq);
+ if (!s->output)
s->wakeup_needed = true;
- }
+ s->output = level;
}
static const struct kvm_io_device_ops picdev_ops = {
@@ -575,8 +556,6 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm)
s->pics[1].elcr_mask = 0xde;
s->pics[0].pics_state = s;
s->pics[1].pics_state = s;
- s->pics[0].isr_ack = 0xff;
- s->pics[1].isr_ack = 0xff;
/*
* Initialize PIO device
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 93cf9d0d3653..0171e66e9994 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -417,10 +417,6 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
case APIC_DM_INIT:
if (level) {
result = 1;
- if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
- printk(KERN_DEBUG
- "INIT on a runnable vcpu %d\n",
- vcpu->vcpu_id);
vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_kick(vcpu);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index f02b8edc3d44..b6a9963400a7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3538,14 +3538,18 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
if (!test_bit(slot, sp->slot_bitmap))
continue;
- if (sp->role.level != PT_PAGE_TABLE_LEVEL)
- continue;
-
pt = sp->spt;
- for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
+ for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
+ if (sp->role.level != PT_PAGE_TABLE_LEVEL
+ && is_large_pte(pt[i])) {
+ drop_spte(kvm, &pt[i],
+ shadow_trap_nonpresent_pte);
+ --kvm->stat.lpages;
+ }
/* avoid RMW */
if (is_writable_pte(pt[i]))
update_spte(&pt[i], pt[i] & ~PT_WRITABLE_MASK);
+ }
}
kvm_flush_remote_tlbs(kvm);
}
@@ -3583,7 +3587,7 @@ static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
if (nr_to_scan == 0)
goto out;
- spin_lock(&kvm_lock);
+ raw_spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
int idx, freed_pages;
@@ -3606,7 +3610,7 @@ static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
if (kvm_freed)
list_move_tail(&kvm_freed->vm_list, &vm_list);
- spin_unlock(&kvm_lock);
+ raw_spin_unlock(&kvm_lock);
out:
return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 54ce246a383e..8d61df4a02c7 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -135,6 +135,8 @@ struct vcpu_svm {
u32 *msrpm;
+ ulong nmi_iret_rip;
+
struct nested_state nested;
bool nmi_singlestep;
@@ -2653,6 +2655,7 @@ static int iret_interception(struct vcpu_svm *svm)
++svm->vcpu.stat.nmi_window_exits;
clr_intercept(svm, INTERCEPT_IRET);
svm->vcpu.arch.hflags |= HF_IRET_MASK;
+ svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
return 1;
}
@@ -2777,6 +2780,8 @@ static int dr_interception(struct vcpu_svm *svm)
kvm_register_write(&svm->vcpu, reg, val);
}
+ skip_emulated_instruction(&svm->vcpu);
+
return 1;
}
@@ -3472,7 +3477,12 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
svm->int3_injected = 0;
- if (svm->vcpu.arch.hflags & HF_IRET_MASK) {
+ /*
+ * If we've made progress since setting HF_IRET_MASK, we've
+ * executed an IRET and can allow NMI injection.
+ */
+ if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
+ && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
}
@@ -3645,13 +3655,21 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
local_irq_disable();
- stgi();
-
vcpu->arch.cr2 = svm->vmcb->save.cr2;
vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
+ if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
+ kvm_before_handle_nmi(&svm->vcpu);
+
+ stgi();
+
+ /* Any pending NMI will happen here */
+
+ if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
+ kvm_after_handle_nmi(&svm->vcpu);
+
sync_cr8_to_lapic(vcpu);
svm->next_rip = 0;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index bf89ec2cfb82..e2b8c6b21ff2 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -93,14 +93,14 @@ module_param(yield_on_hlt, bool, S_IRUGO);
* These 2 parameters are used to config the controls for Pause-Loop Exiting:
* ple_gap: upper bound on the amount of time between two successive
* executions of PAUSE in a loop. Also indicate if ple enabled.
- * According to test, this time is usually small than 41 cycles.
+ * According to test, this time is usually smaller than 128 cycles.
* ple_window: upper bound on the amount of time a guest is allowed to execute
* in a PAUSE loop. Tests indicate that most spinlocks are held for
* less than 2^12 cycles
* Time is measured based on a counter that runs at the same rate as the TSC,
* refer SDM volume 3b section 21.6.13 & 22.1.3.
*/
-#define KVM_VMX_DEFAULT_PLE_GAP 41
+#define KVM_VMX_DEFAULT_PLE_GAP 128
#define KVM_VMX_DEFAULT_PLE_WINDOW 4096
static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP;
module_param(ple_gap, int, S_IRUGO);
@@ -176,7 +176,6 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
return container_of(vcpu, struct vcpu_vmx, vcpu);
}
-static int init_rmode(struct kvm *kvm);
static u64 construct_eptp(unsigned long root_hpa);
static void kvm_cpu_vmxon(u64 addr);
static void kvm_cpu_vmxoff(void);
@@ -1333,19 +1332,25 @@ static __init int vmx_disabled_by_bios(void)
rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
if (msr & FEATURE_CONTROL_LOCKED) {
+ /* launched w/ TXT and VMX disabled */
if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
&& tboot_enabled())
return 1;
+ /* launched w/o TXT and VMX only enabled w/ TXT */
if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
+ && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
&& !tboot_enabled()) {
printk(KERN_WARNING "kvm: disable TXT in the BIOS or "
- " activate TXT before enabling KVM\n");
+ "activate TXT before enabling KVM\n");
return 1;
}
+ /* launched w/o TXT and VMX disabled */
+ if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
+ && !tboot_enabled())
+ return 1;
}
return 0;
- /* locked but not enabled */
}
static void kvm_cpu_vmxon(u64 addr)
@@ -1683,6 +1688,7 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
vmx->emulation_required = 1;
vmx->rmode.vm86_active = 0;
+ vmcs_write16(GUEST_TR_SELECTOR, vmx->rmode.tr.selector);
vmcs_writel(GUEST_TR_BASE, vmx->rmode.tr.base);
vmcs_write32(GUEST_TR_LIMIT, vmx->rmode.tr.limit);
vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar);
@@ -1756,6 +1762,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
vmx->emulation_required = 1;
vmx->rmode.vm86_active = 1;
+ vmx->rmode.tr.selector = vmcs_read16(GUEST_TR_SELECTOR);
vmx->rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
@@ -1794,7 +1801,6 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
continue_rmode:
kvm_mmu_reset_context(vcpu);
- init_rmode(vcpu->kvm);
}
static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
@@ -2030,23 +2036,40 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
vmcs_writel(GUEST_CR4, hw_cr4);
}
-static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
-{
- struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
-
- return vmcs_readl(sf->base);
-}
-
static void vmx_get_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg)
{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
+ struct kvm_save_segment *save;
u32 ar;
+ if (vmx->rmode.vm86_active
+ && (seg == VCPU_SREG_TR || seg == VCPU_SREG_ES
+ || seg == VCPU_SREG_DS || seg == VCPU_SREG_FS
+ || seg == VCPU_SREG_GS)
+ && !emulate_invalid_guest_state) {
+ switch (seg) {
+ case VCPU_SREG_TR: save = &vmx->rmode.tr; break;
+ case VCPU_SREG_ES: save = &vmx->rmode.es; break;
+ case VCPU_SREG_DS: save = &vmx->rmode.ds; break;
+ case VCPU_SREG_FS: save = &vmx->rmode.fs; break;
+ case VCPU_SREG_GS: save = &vmx->rmode.gs; break;
+ default: BUG();
+ }
+ var->selector = save->selector;
+ var->base = save->base;
+ var->limit = save->limit;
+ ar = save->ar;
+ if (seg == VCPU_SREG_TR
+ || var->selector == vmcs_read16(sf->selector))
+ goto use_saved_rmode_seg;
+ }
var->base = vmcs_readl(sf->base);
var->limit = vmcs_read32(sf->limit);
var->selector = vmcs_read16(sf->selector);
ar = vmcs_read32(sf->ar_bytes);
+use_saved_rmode_seg:
if ((ar & AR_UNUSABLE_MASK) && !emulate_invalid_guest_state)
ar = 0;
var->type = ar & 15;
@@ -2060,6 +2083,18 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu,
var->unusable = (ar >> 16) & 1;
}
+static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
+{
+ struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
+ struct kvm_segment s;
+
+ if (to_vmx(vcpu)->rmode.vm86_active) {
+ vmx_get_segment(vcpu, &s, seg);
+ return s.base;
+ }
+ return vmcs_readl(sf->base);
+}
+
static int vmx_get_cpl(struct kvm_vcpu *vcpu)
{
if (!is_protmode(vcpu))
@@ -2101,6 +2136,7 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu,
u32 ar;
if (vmx->rmode.vm86_active && seg == VCPU_SREG_TR) {
+ vmcs_write16(sf->selector, var->selector);
vmx->rmode.tr.selector = var->selector;
vmx->rmode.tr.base = var->base;
vmx->rmode.tr.limit = var->limit;
@@ -2699,22 +2735,6 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
return 0;
}
-static int init_rmode(struct kvm *kvm)
-{
- int idx, ret = 0;
-
- idx = srcu_read_lock(&kvm->srcu);
- if (!init_rmode_tss(kvm))
- goto exit;
- if (!init_rmode_identity_map(kvm))
- goto exit;
-
- ret = 1;
-exit:
- srcu_read_unlock(&kvm->srcu, idx);
- return ret;
-}
-
static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -2722,10 +2742,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
int ret;
vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
- if (!init_rmode(vmx->vcpu.kvm)) {
- ret = -ENOMEM;
- goto out;
- }
vmx->rmode.vm86_active = 0;
@@ -2971,6 +2987,9 @@ static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
if (ret)
return ret;
kvm->arch.tss_addr = addr;
+ if (!init_rmode_tss(kvm))
+ return -ENOMEM;
+
return 0;
}
@@ -3962,7 +3981,7 @@ static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
#define Q "l"
#endif
-static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
+static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -3991,6 +4010,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
asm(
/* Store host registers */
"push %%"R"dx; push %%"R"bp;"
+ "push %%"R"cx \n\t" /* placeholder for guest rcx */
"push %%"R"cx \n\t"
"cmp %%"R"sp, %c[host_rsp](%0) \n\t"
"je 1f \n\t"
@@ -4032,10 +4052,11 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
".Lkvm_vmx_return: "
/* Save guest registers, load host registers, keep flags */
- "xchg %0, (%%"R"sp) \n\t"
+ "mov %0, %c[wordsize](%%"R"sp) \n\t"
+ "pop %0 \n\t"
"mov %%"R"ax, %c[rax](%0) \n\t"
"mov %%"R"bx, %c[rbx](%0) \n\t"
- "push"Q" (%%"R"sp); pop"Q" %c[rcx](%0) \n\t"
+ "pop"Q" %c[rcx](%0) \n\t"
"mov %%"R"dx, %c[rdx](%0) \n\t"
"mov %%"R"si, %c[rsi](%0) \n\t"
"mov %%"R"di, %c[rdi](%0) \n\t"
@@ -4053,7 +4074,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
"mov %%cr2, %%"R"ax \n\t"
"mov %%"R"ax, %c[cr2](%0) \n\t"
- "pop %%"R"bp; pop %%"R"bp; pop %%"R"dx \n\t"
+ "pop %%"R"bp; pop %%"R"dx \n\t"
"setbe %c[fail](%0) \n\t"
: : "c"(vmx), "d"((unsigned long)HOST_RSP),
[launched]"i"(offsetof(struct vcpu_vmx, launched)),
@@ -4076,7 +4097,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
[r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
[r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
#endif
- [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
+ [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
+ [wordsize]"i"(sizeof(ulong))
: "cc", "memory"
, R"ax", R"bx", R"di", R"si"
#ifdef CONFIG_X86_64
@@ -4183,8 +4205,11 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
if (!kvm->arch.ept_identity_map_addr)
kvm->arch.ept_identity_map_addr =
VMX_EPT_IDENTITY_PAGETABLE_ADDR;
+ err = -ENOMEM;
if (alloc_identity_pagetable(kvm) != 0)
goto free_vmcs;
+ if (!init_rmode_identity_map(kvm))
+ goto free_vmcs;
}
return &vmx->vcpu;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index bcc0efce85bf..785ae0c300f7 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -81,9 +81,10 @@
* - enable LME and LMA per default on 64 bit KVM
*/
#ifdef CONFIG_X86_64
-static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
+static
+u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
#else
-static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
+static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
#endif
#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
@@ -360,8 +361,8 @@ void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
void kvm_inject_nmi(struct kvm_vcpu *vcpu)
{
+ kvm_make_request(KVM_REQ_NMI, vcpu);
kvm_make_request(KVM_REQ_EVENT, vcpu);
- vcpu->arch.nmi_pending = 1;
}
EXPORT_SYMBOL_GPL(kvm_inject_nmi);
@@ -525,8 +526,10 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
kvm_x86_ops->set_cr0(vcpu, cr0);
- if ((cr0 ^ old_cr0) & X86_CR0_PG)
+ if ((cr0 ^ old_cr0) & X86_CR0_PG) {
kvm_clear_async_pf_completion_queue(vcpu);
+ kvm_async_pf_hash_reset(vcpu);
+ }
if ((cr0 ^ old_cr0) & update_bits)
kvm_mmu_reset_context(vcpu);
@@ -1017,7 +1020,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
unsigned long flags;
s64 sdiff;
- spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
+ raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
offset = data - native_read_tsc();
ns = get_kernel_ns();
elapsed = ns - kvm->arch.last_tsc_nsec;
@@ -1050,7 +1053,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
kvm->arch.last_tsc_write = data;
kvm->arch.last_tsc_offset = offset;
kvm_x86_ops->write_tsc_offset(vcpu, offset);
- spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
+ raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
/* Reset of TSC must disable overshoot protection below */
vcpu->arch.hv_clock.tsc_timestamp = 0;
@@ -1453,6 +1456,14 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
return 0;
}
+static void kvmclock_reset(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.time_page) {
+ kvm_release_page_dirty(vcpu->arch.time_page);
+ vcpu->arch.time_page = NULL;
+ }
+}
+
int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
switch (msr) {
@@ -1510,10 +1521,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
break;
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
- if (vcpu->arch.time_page) {
- kvm_release_page_dirty(vcpu->arch.time_page);
- vcpu->arch.time_page = NULL;
- }
+ kvmclock_reset(vcpu);
vcpu->arch.time = data;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -1592,6 +1600,12 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
} else
return set_msr_hyperv(vcpu, msr, data);
break;
+ case MSR_IA32_BBL_CR_CTL3:
+ /* Drop writes to this legacy MSR -- see rdmsr
+ * counterpart for further detail.
+ */
+ pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data);
+ break;
default:
if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
return xen_hvm_config(vcpu, data);
@@ -1846,6 +1860,19 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
} else
return get_msr_hyperv(vcpu, msr, pdata);
break;
+ case MSR_IA32_BBL_CR_CTL3:
+ /* This legacy MSR exists but isn't fully documented in current
+ * silicon. It is however accessed by winxp in very narrow
+ * scenarios where it sets bit #19, itself documented as
+ * a "reserved" bit. Best effort attempt to source coherent
+ * read data here should the balance of the register be
+ * interpreted by the guest:
+ *
+ * L2 cache control register 3: 64GB range, 256KB size,
+ * enabled, latency 0x1, configured
+ */
+ data = 0xbe702111;
+ break;
default:
if (!ignore_msrs) {
pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
@@ -2575,9 +2602,6 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
if (mce->status & MCI_STATUS_UC) {
if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
!kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
- printk(KERN_DEBUG "kvm: set_mce: "
- "injects mce exception while "
- "previous one is in progress!\n");
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
return 0;
}
@@ -2648,8 +2672,6 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
vcpu->arch.interrupt.pending = events->interrupt.injected;
vcpu->arch.interrupt.nr = events->interrupt.nr;
vcpu->arch.interrupt.soft = events->interrupt.soft;
- if (vcpu->arch.interrupt.pending && irqchip_in_kernel(vcpu->kvm))
- kvm_pic_clear_isr_ack(vcpu->kvm);
if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
kvm_x86_ops->set_interrupt_shadow(vcpu,
events->interrupt.shadow);
@@ -4390,41 +4412,16 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
vcpu->arch.emulate_ctxt.have_exception = false;
vcpu->arch.emulate_ctxt.perm_ok = false;
+ vcpu->arch.emulate_ctxt.only_vendor_specific_insn
+ = emulation_type & EMULTYPE_TRAP_UD;
+
r = x86_decode_insn(&vcpu->arch.emulate_ctxt, insn, insn_len);
- if (r == X86EMUL_PROPAGATE_FAULT)
- goto done;
trace_kvm_emulate_insn_start(vcpu);
-
- /* Only allow emulation of specific instructions on #UD
- * (namely VMMCALL, sysenter, sysexit, syscall)*/
- if (emulation_type & EMULTYPE_TRAP_UD) {
- if (!c->twobyte)
- return EMULATE_FAIL;
- switch (c->b) {
- case 0x01: /* VMMCALL */
- if (c->modrm_mod != 3 || c->modrm_rm != 1)
- return EMULATE_FAIL;
- break;
- case 0x34: /* sysenter */
- case 0x35: /* sysexit */
- if (c->modrm_mod != 0 || c->modrm_rm != 0)
- return EMULATE_FAIL;
- break;
- case 0x05: /* syscall */
- if (c->modrm_mod != 0 || c->modrm_rm != 0)
- return EMULATE_FAIL;
- break;
- default:
- return EMULATE_FAIL;
- }
-
- if (!(c->modrm_reg == 0 || c->modrm_reg == 3))
- return EMULATE_FAIL;
- }
-
++vcpu->stat.insn_emulation;
if (r) {
+ if (emulation_type & EMULTYPE_TRAP_UD)
+ return EMULATE_FAIL;
if (reexecute_instruction(vcpu, cr2))
return EMULATE_DONE;
if (emulation_type & EMULTYPE_SKIP)
@@ -4452,7 +4449,6 @@ restart:
return handle_emulation_failure(vcpu);
}
-done:
if (vcpu->arch.emulate_ctxt.have_exception) {
inject_emulated_exception(vcpu);
r = EMULATE_DONE;
@@ -4562,7 +4558,7 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
- spin_lock(&kvm_lock);
+ raw_spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->cpu != freq->cpu)
@@ -4572,7 +4568,7 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
send_ipi = 1;
}
}
- spin_unlock(&kvm_lock);
+ raw_spin_unlock(&kvm_lock);
if (freq->old < freq->new && send_ipi) {
/*
@@ -5185,6 +5181,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
r = 1;
goto out;
}
+ if (kvm_check_request(KVM_REQ_NMI, vcpu))
+ vcpu->arch.nmi_pending = true;
}
r = kvm_mmu_reload(vcpu);
@@ -5213,14 +5211,18 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_load_guest_fpu(vcpu);
kvm_load_guest_xcr0(vcpu);
- atomic_set(&vcpu->guest_mode, 1);
- smp_wmb();
+ vcpu->mode = IN_GUEST_MODE;
+
+ /* We should set ->mode before check ->requests,
+ * see the comment in make_all_cpus_request.
+ */
+ smp_mb();
local_irq_disable();
- if (!atomic_read(&vcpu->guest_mode) || vcpu->requests
+ if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests
|| need_resched() || signal_pending(current)) {
- atomic_set(&vcpu->guest_mode, 0);
+ vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb();
local_irq_enable();
preempt_enable();
@@ -5256,7 +5258,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_get_msr(vcpu, MSR_IA32_TSC, &vcpu->arch.last_guest_tsc);
- atomic_set(&vcpu->guest_mode, 0);
+ vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb();
local_irq_enable();
@@ -5574,7 +5576,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
int mmu_reset_needed = 0;
- int pending_vec, max_bits;
+ int pending_vec, max_bits, idx;
struct desc_ptr dt;
dt.size = sregs->idt.limit;
@@ -5603,10 +5605,13 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
if (sregs->cr4 & X86_CR4_OSXSAVE)
update_cpuid(vcpu);
+
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
if (!is_long_mode(vcpu) && is_pae(vcpu)) {
load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
mmu_reset_needed = 1;
}
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
if (mmu_reset_needed)
kvm_mmu_reset_context(vcpu);
@@ -5617,8 +5622,6 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
if (pending_vec < max_bits) {
kvm_queue_interrupt(vcpu, pending_vec, false);
pr_debug("Set back pending irq %d\n", pending_vec);
- if (irqchip_in_kernel(vcpu->kvm))
- kvm_pic_clear_isr_ack(vcpu->kvm);
}
kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
@@ -5814,10 +5817,7 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.time_page) {
- kvm_release_page_dirty(vcpu->arch.time_page);
- vcpu->arch.time_page = NULL;
- }
+ kvmclock_reset(vcpu);
free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
fx_free(vcpu);
@@ -5878,6 +5878,8 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu->arch.apf.msr_val = 0;
+ kvmclock_reset(vcpu);
+
kvm_clear_async_pf_completion_queue(vcpu);
kvm_async_pf_hash_reset(vcpu);
vcpu->arch.apf.halted = false;
@@ -6005,7 +6007,7 @@ int kvm_arch_init_vm(struct kvm *kvm)
/* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
- spin_lock_init(&kvm->arch.tsc_write_lock);
+ raw_spin_lock_init(&kvm->arch.tsc_write_lock);
return 0;
}
@@ -6157,7 +6159,7 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
me = get_cpu();
if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
- if (atomic_xchg(&vcpu->guest_mode, 0))
+ if (kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE)
smp_send_reschedule(cpu);
put_cpu();
}
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index e10cf070ede0..f2479f19ddde 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -42,4 +42,5 @@ else
lib-y += memmove_64.o memset_64.o
lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem_64.o
+ lib-y += cmpxchg16b_emu.o
endif
diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
new file mode 100644
index 000000000000..3e8b08a6de2b
--- /dev/null
+++ b/arch/x86/lib/cmpxchg16b_emu.S
@@ -0,0 +1,59 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ *
+ */
+#include <linux/linkage.h>
+#include <asm/alternative-asm.h>
+#include <asm/frame.h>
+#include <asm/dwarf2.h>
+
+.text
+
+/*
+ * Inputs:
+ * %rsi : memory location to compare
+ * %rax : low 64 bits of old value
+ * %rdx : high 64 bits of old value
+ * %rbx : low 64 bits of new value
+ * %rcx : high 64 bits of new value
+ * %al : Operation successful
+ */
+ENTRY(this_cpu_cmpxchg16b_emu)
+CFI_STARTPROC
+
+#
+# Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not
+# via the ZF. Caller will access %al to get result.
+#
+# Note that this is only useful for a cpuops operation. Meaning that we
+# do *not* have a fully atomic operation but just an operation that is
+# *atomic* on a single cpu (as provided by the this_cpu_xx class of
+# macros).
+#
+this_cpu_cmpxchg16b_emu:
+ pushf
+ cli
+
+ cmpq %gs:(%rsi), %rax
+ jne not_same
+ cmpq %gs:8(%rsi), %rdx
+ jne not_same
+
+ movq %rbx, %gs:(%rsi)
+ movq %rcx, %gs:8(%rsi)
+
+ popf
+ mov $1, %al
+ ret
+
+ not_same:
+ popf
+ xor %al,%al
+ ret
+
+CFI_ENDPROC
+
+ENDPROC(this_cpu_cmpxchg16b_emu)
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
new file mode 100644
index 000000000000..0ecb8433e5a8
--- /dev/null
+++ b/arch/x86/lib/memmove_64.S
@@ -0,0 +1,197 @@
+/*
+ * Normally compiler builtins are used, but sometimes the compiler calls out
+ * of line code. Based on asm-i386/string.h.
+ *
+ * This assembly file is re-written from memmove_64.c file.
+ * - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com>
+ */
+#define _STRING_C
+#include <linux/linkage.h>
+#include <asm/dwarf2.h>
+
+#undef memmove
+
+/*
+ * Implement memmove(). This can handle overlap between src and dst.
+ *
+ * Input:
+ * rdi: dest
+ * rsi: src
+ * rdx: count
+ *
+ * Output:
+ * rax: dest
+ */
+ENTRY(memmove)
+ CFI_STARTPROC
+ /* Handle more 32bytes in loop */
+ mov %rdi, %rax
+ cmp $0x20, %rdx
+ jb 1f
+
+ /* Decide forward/backward copy mode */
+ cmp %rdi, %rsi
+ jb 2f
+
+ /*
+ * movsq instruction have many startup latency
+ * so we handle small size by general register.
+ */
+ cmp $680, %rdx
+ jb 3f
+ /*
+ * movsq instruction is only good for aligned case.
+ */
+
+ cmpb %dil, %sil
+ je 4f
+3:
+ sub $0x20, %rdx
+ /*
+ * We gobble 32byts forward in each loop.
+ */
+5:
+ sub $0x20, %rdx
+ movq 0*8(%rsi), %r11
+ movq 1*8(%rsi), %r10
+ movq 2*8(%rsi), %r9
+ movq 3*8(%rsi), %r8
+ leaq 4*8(%rsi), %rsi
+
+ movq %r11, 0*8(%rdi)
+ movq %r10, 1*8(%rdi)
+ movq %r9, 2*8(%rdi)
+ movq %r8, 3*8(%rdi)
+ leaq 4*8(%rdi), %rdi
+ jae 5b
+ addq $0x20, %rdx
+ jmp 1f
+ /*
+ * Handle data forward by movsq.
+ */
+ .p2align 4
+4:
+ movq %rdx, %rcx
+ movq -8(%rsi, %rdx), %r11
+ lea -8(%rdi, %rdx), %r10
+ shrq $3, %rcx
+ rep movsq
+ movq %r11, (%r10)
+ jmp 13f
+ /*
+ * Handle data backward by movsq.
+ */
+ .p2align 4
+7:
+ movq %rdx, %rcx
+ movq (%rsi), %r11
+ movq %rdi, %r10
+ leaq -8(%rsi, %rdx), %rsi
+ leaq -8(%rdi, %rdx), %rdi
+ shrq $3, %rcx
+ std
+ rep movsq
+ cld
+ movq %r11, (%r10)
+ jmp 13f
+
+ /*
+ * Start to prepare for backward copy.
+ */
+ .p2align 4
+2:
+ cmp $680, %rdx
+ jb 6f
+ cmp %dil, %sil
+ je 7b
+6:
+ /*
+ * Calculate copy position to tail.
+ */
+ addq %rdx, %rsi
+ addq %rdx, %rdi
+ subq $0x20, %rdx
+ /*
+ * We gobble 32byts backward in each loop.
+ */
+8:
+ subq $0x20, %rdx
+ movq -1*8(%rsi), %r11
+ movq -2*8(%rsi), %r10
+ movq -3*8(%rsi), %r9
+ movq -4*8(%rsi), %r8
+ leaq -4*8(%rsi), %rsi
+
+ movq %r11, -1*8(%rdi)
+ movq %r10, -2*8(%rdi)
+ movq %r9, -3*8(%rdi)
+ movq %r8, -4*8(%rdi)
+ leaq -4*8(%rdi), %rdi
+ jae 8b
+ /*
+ * Calculate copy position to head.
+ */
+ addq $0x20, %rdx
+ subq %rdx, %rsi
+ subq %rdx, %rdi
+1:
+ cmpq $16, %rdx
+ jb 9f
+ /*
+ * Move data from 16 bytes to 31 bytes.
+ */
+ movq 0*8(%rsi), %r11
+ movq 1*8(%rsi), %r10
+ movq -2*8(%rsi, %rdx), %r9
+ movq -1*8(%rsi, %rdx), %r8
+ movq %r11, 0*8(%rdi)
+ movq %r10, 1*8(%rdi)
+ movq %r9, -2*8(%rdi, %rdx)
+ movq %r8, -1*8(%rdi, %rdx)
+ jmp 13f
+ .p2align 4
+9:
+ cmpq $8, %rdx
+ jb 10f
+ /*
+ * Move data from 8 bytes to 15 bytes.
+ */
+ movq 0*8(%rsi), %r11
+ movq -1*8(%rsi, %rdx), %r10
+ movq %r11, 0*8(%rdi)
+ movq %r10, -1*8(%rdi, %rdx)
+ jmp 13f
+10:
+ cmpq $4, %rdx
+ jb 11f
+ /*
+ * Move data from 4 bytes to 7 bytes.
+ */
+ movl (%rsi), %r11d
+ movl -4(%rsi, %rdx), %r10d
+ movl %r11d, (%rdi)
+ movl %r10d, -4(%rdi, %rdx)
+ jmp 13f
+11:
+ cmp $2, %rdx
+ jb 12f
+ /*
+ * Move data from 2 bytes to 3 bytes.
+ */
+ movw (%rsi), %r11w
+ movw -2(%rsi, %rdx), %r10w
+ movw %r11w, (%rdi)
+ movw %r10w, -2(%rdi, %rdx)
+ jmp 13f
+12:
+ cmp $1, %rdx
+ jb 13f
+ /*
+ * Move data for 1 byte.
+ */
+ movb (%rsi), %r11b
+ movb %r11b, (%rdi)
+13:
+ retq
+ CFI_ENDPROC
+ENDPROC(memmove)
diff --git a/arch/x86/lib/memmove_64.c b/arch/x86/lib/memmove_64.c
deleted file mode 100644
index 6d0f0ec41b34..000000000000
--- a/arch/x86/lib/memmove_64.c
+++ /dev/null
@@ -1,192 +0,0 @@
-/* Normally compiler builtins are used, but sometimes the compiler calls out
- of line code. Based on asm-i386/string.h.
- */
-#define _STRING_C
-#include <linux/string.h>
-#include <linux/module.h>
-
-#undef memmove
-void *memmove(void *dest, const void *src, size_t count)
-{
- unsigned long d0,d1,d2,d3,d4,d5,d6,d7;
- char *ret;
-
- __asm__ __volatile__(
- /* Handle more 32bytes in loop */
- "mov %2, %3\n\t"
- "cmp $0x20, %0\n\t"
- "jb 1f\n\t"
-
- /* Decide forward/backward copy mode */
- "cmp %2, %1\n\t"
- "jb 2f\n\t"
-
- /*
- * movsq instruction have many startup latency
- * so we handle small size by general register.
- */
- "cmp $680, %0\n\t"
- "jb 3f\n\t"
- /*
- * movsq instruction is only good for aligned case.
- */
- "cmpb %%dil, %%sil\n\t"
- "je 4f\n\t"
- "3:\n\t"
- "sub $0x20, %0\n\t"
- /*
- * We gobble 32byts forward in each loop.
- */
- "5:\n\t"
- "sub $0x20, %0\n\t"
- "movq 0*8(%1), %4\n\t"
- "movq 1*8(%1), %5\n\t"
- "movq 2*8(%1), %6\n\t"
- "movq 3*8(%1), %7\n\t"
- "leaq 4*8(%1), %1\n\t"
-
- "movq %4, 0*8(%2)\n\t"
- "movq %5, 1*8(%2)\n\t"
- "movq %6, 2*8(%2)\n\t"
- "movq %7, 3*8(%2)\n\t"
- "leaq 4*8(%2), %2\n\t"
- "jae 5b\n\t"
- "addq $0x20, %0\n\t"
- "jmp 1f\n\t"
- /*
- * Handle data forward by movsq.
- */
- ".p2align 4\n\t"
- "4:\n\t"
- "movq %0, %8\n\t"
- "movq -8(%1, %0), %4\n\t"
- "lea -8(%2, %0), %5\n\t"
- "shrq $3, %8\n\t"
- "rep movsq\n\t"
- "movq %4, (%5)\n\t"
- "jmp 13f\n\t"
- /*
- * Handle data backward by movsq.
- */
- ".p2align 4\n\t"
- "7:\n\t"
- "movq %0, %8\n\t"
- "movq (%1), %4\n\t"
- "movq %2, %5\n\t"
- "leaq -8(%1, %0), %1\n\t"
- "leaq -8(%2, %0), %2\n\t"
- "shrq $3, %8\n\t"
- "std\n\t"
- "rep movsq\n\t"
- "cld\n\t"
- "movq %4, (%5)\n\t"
- "jmp 13f\n\t"
-
- /*
- * Start to prepare for backward copy.
- */
- ".p2align 4\n\t"
- "2:\n\t"
- "cmp $680, %0\n\t"
- "jb 6f \n\t"
- "cmp %%dil, %%sil\n\t"
- "je 7b \n\t"
- "6:\n\t"
- /*
- * Calculate copy position to tail.
- */
- "addq %0, %1\n\t"
- "addq %0, %2\n\t"
- "subq $0x20, %0\n\t"
- /*
- * We gobble 32byts backward in each loop.
- */
- "8:\n\t"
- "subq $0x20, %0\n\t"
- "movq -1*8(%1), %4\n\t"
- "movq -2*8(%1), %5\n\t"
- "movq -3*8(%1), %6\n\t"
- "movq -4*8(%1), %7\n\t"
- "leaq -4*8(%1), %1\n\t"
-
- "movq %4, -1*8(%2)\n\t"
- "movq %5, -2*8(%2)\n\t"
- "movq %6, -3*8(%2)\n\t"
- "movq %7, -4*8(%2)\n\t"
- "leaq -4*8(%2), %2\n\t"
- "jae 8b\n\t"
- /*
- * Calculate copy position to head.
- */
- "addq $0x20, %0\n\t"
- "subq %0, %1\n\t"
- "subq %0, %2\n\t"
- "1:\n\t"
- "cmpq $16, %0\n\t"
- "jb 9f\n\t"
- /*
- * Move data from 16 bytes to 31 bytes.
- */
- "movq 0*8(%1), %4\n\t"
- "movq 1*8(%1), %5\n\t"
- "movq -2*8(%1, %0), %6\n\t"
- "movq -1*8(%1, %0), %7\n\t"
- "movq %4, 0*8(%2)\n\t"
- "movq %5, 1*8(%2)\n\t"
- "movq %6, -2*8(%2, %0)\n\t"
- "movq %7, -1*8(%2, %0)\n\t"
- "jmp 13f\n\t"
- ".p2align 4\n\t"
- "9:\n\t"
- "cmpq $8, %0\n\t"
- "jb 10f\n\t"
- /*
- * Move data from 8 bytes to 15 bytes.
- */
- "movq 0*8(%1), %4\n\t"
- "movq -1*8(%1, %0), %5\n\t"
- "movq %4, 0*8(%2)\n\t"
- "movq %5, -1*8(%2, %0)\n\t"
- "jmp 13f\n\t"
- "10:\n\t"
- "cmpq $4, %0\n\t"
- "jb 11f\n\t"
- /*
- * Move data from 4 bytes to 7 bytes.
- */
- "movl (%1), %4d\n\t"
- "movl -4(%1, %0), %5d\n\t"
- "movl %4d, (%2)\n\t"
- "movl %5d, -4(%2, %0)\n\t"
- "jmp 13f\n\t"
- "11:\n\t"
- "cmp $2, %0\n\t"
- "jb 12f\n\t"
- /*
- * Move data from 2 bytes to 3 bytes.
- */
- "movw (%1), %4w\n\t"
- "movw -2(%1, %0), %5w\n\t"
- "movw %4w, (%2)\n\t"
- "movw %5w, -2(%2, %0)\n\t"
- "jmp 13f\n\t"
- "12:\n\t"
- "cmp $1, %0\n\t"
- "jb 13f\n\t"
- /*
- * Move data for 1 byte.
- */
- "movb (%1), %4b\n\t"
- "movb %4b, (%2)\n\t"
- "13:\n\t"
- : "=&d" (d0), "=&S" (d1), "=&D" (d2), "=&a" (ret) ,
- "=r"(d3), "=r"(d4), "=r"(d5), "=r"(d6), "=&c" (d7)
- :"0" (count),
- "1" (src),
- "2" (dest)
- :"memory");
-
- return ret;
-
-}
-EXPORT_SYMBOL(memmove);
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 09df2f9a3d69..3e608edf9958 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o
obj-$(CONFIG_NUMA) += numa.o numa_$(BITS).o
obj-$(CONFIG_AMD_NUMA) += amdtopology_64.o
obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o
+obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology_64.c
index f21962c435ed..0919c26820d4 100644
--- a/arch/x86/mm/amdtopology_64.c
+++ b/arch/x86/mm/amdtopology_64.c
@@ -26,9 +26,7 @@
#include <asm/apic.h>
#include <asm/amd_nb.h>
-static struct bootnode __initdata nodes[8];
static unsigned char __initdata nodeids[8];
-static nodemask_t __initdata nodes_parsed = NODE_MASK_NONE;
static __init int find_northbridge(void)
{
@@ -51,7 +49,7 @@ static __init int find_northbridge(void)
return num;
}
- return -1;
+ return -ENOENT;
}
static __init void early_get_boot_cpu_id(void)
@@ -69,17 +67,18 @@ static __init void early_get_boot_cpu_id(void)
#endif
}
-int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn)
+int __init amd_numa_init(void)
{
- unsigned long start = PFN_PHYS(start_pfn);
- unsigned long end = PFN_PHYS(end_pfn);
+ unsigned long start = PFN_PHYS(0);
+ unsigned long end = PFN_PHYS(max_pfn);
unsigned numnodes;
unsigned long prevbase;
- int i, nb, found = 0;
+ int i, j, nb;
u32 nodeid, reg;
+ unsigned int bits, cores, apicid_base;
if (!early_pci_allowed())
- return -1;
+ return -EINVAL;
nb = find_northbridge();
if (nb < 0)
@@ -90,7 +89,7 @@ int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn)
reg = read_pci_config(0, nb, 0, 0x60);
numnodes = ((reg >> 4) & 0xF) + 1;
if (numnodes <= 1)
- return -1;
+ return -ENOENT;
pr_info("Number of physical nodes %d\n", numnodes);
@@ -121,9 +120,9 @@ int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn)
if ((base >> 8) & 3 || (limit >> 8) & 3) {
pr_err("Node %d using interleaving mode %lx/%lx\n",
nodeid, (base >> 8) & 3, (limit >> 8) & 3);
- return -1;
+ return -EINVAL;
}
- if (node_isset(nodeid, nodes_parsed)) {
+ if (node_isset(nodeid, numa_nodes_parsed)) {
pr_info("Node %d already present, skipping\n",
nodeid);
continue;
@@ -160,117 +159,28 @@ int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn)
if (prevbase > base) {
pr_err("Node map not sorted %lx,%lx\n",
prevbase, base);
- return -1;
+ return -EINVAL;
}
pr_info("Node %d MemBase %016lx Limit %016lx\n",
nodeid, base, limit);
- found++;
-
- nodes[nodeid].start = base;
- nodes[nodeid].end = limit;
-
prevbase = base;
-
- node_set(nodeid, nodes_parsed);
- }
-
- if (!found)
- return -1;
- return 0;
-}
-
-#ifdef CONFIG_NUMA_EMU
-static s16 fake_apicid_to_node[MAX_LOCAL_APIC] __initdata = {
- [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
-};
-
-void __init amd_get_nodes(struct bootnode *physnodes)
-{
- int i;
-
- for_each_node_mask(i, nodes_parsed) {
- physnodes[i].start = nodes[i].start;
- physnodes[i].end = nodes[i].end;
+ numa_add_memblk(nodeid, base, limit);
+ node_set(nodeid, numa_nodes_parsed);
}
-}
-
-static int __init find_node_by_addr(unsigned long addr)
-{
- int ret = NUMA_NO_NODE;
- int i;
-
- for (i = 0; i < 8; i++)
- if (addr >= nodes[i].start && addr < nodes[i].end) {
- ret = i;
- break;
- }
- return ret;
-}
-/*
- * For NUMA emulation, fake proximity domain (_PXM) to node id mappings must be
- * setup to represent the physical topology but reflect the emulated
- * environment. For each emulated node, the real node which it appears on is
- * found and a fake pxm to nid mapping is created which mirrors the actual
- * locality. node_distance() then represents the correct distances between
- * emulated nodes by using the fake acpi mappings to pxms.
- */
-void __init amd_fake_nodes(const struct bootnode *nodes, int nr_nodes)
-{
- unsigned int bits;
- unsigned int cores;
- unsigned int apicid_base = 0;
- int i;
+ if (!nodes_weight(numa_nodes_parsed))
+ return -ENOENT;
+ /*
+ * We seem to have valid NUMA configuration. Map apicids to nodes
+ * using the coreid bits from early_identify_cpu.
+ */
bits = boot_cpu_data.x86_coreid_bits;
cores = 1 << bits;
- early_get_boot_cpu_id();
- if (boot_cpu_physical_apicid > 0)
- apicid_base = boot_cpu_physical_apicid;
-
- for (i = 0; i < nr_nodes; i++) {
- int index;
- int nid;
- int j;
-
- nid = find_node_by_addr(nodes[i].start);
- if (nid == NUMA_NO_NODE)
- continue;
-
- index = nodeids[nid] << bits;
- if (fake_apicid_to_node[index + apicid_base] == NUMA_NO_NODE)
- for (j = apicid_base; j < cores + apicid_base; j++)
- fake_apicid_to_node[index + j] = i;
-#ifdef CONFIG_ACPI_NUMA
- __acpi_map_pxm_to_node(nid, i);
-#endif
- }
- memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node));
-}
-#endif /* CONFIG_NUMA_EMU */
-
-int __init amd_scan_nodes(void)
-{
- unsigned int bits;
- unsigned int cores;
- unsigned int apicid_base;
- int i;
-
- BUG_ON(nodes_empty(nodes_parsed));
- node_possible_map = nodes_parsed;
- memnode_shift = compute_hash_shift(nodes, 8, NULL);
- if (memnode_shift < 0) {
- pr_err("No NUMA node hash function found. Contact maintainer\n");
- return -1;
- }
- pr_info("Using node hash shift of %d\n", memnode_shift);
-
- /* use the coreid bits from early_identify_cpu */
- bits = boot_cpu_data.x86_coreid_bits;
- cores = (1<<bits);
apicid_base = 0;
+
/* get the APIC ID of the BSP early for systems with apicid lifting */
early_get_boot_cpu_id();
if (boot_cpu_physical_apicid > 0) {
@@ -278,17 +188,9 @@ int __init amd_scan_nodes(void)
apicid_base = boot_cpu_physical_apicid;
}
- for_each_node_mask(i, node_possible_map) {
- int j;
-
- memblock_x86_register_active_regions(i,
- nodes[i].start >> PAGE_SHIFT,
- nodes[i].end >> PAGE_SHIFT);
+ for_each_node_mask(i, numa_nodes_parsed)
for (j = apicid_base; j < cores + apicid_base; j++)
- apicid_to_node[(i << bits) + j] = i;
- setup_node_bootmem(i, nodes[i].start, nodes[i].end);
- }
+ set_apicid_to_node((i << bits) + j, i);
- numa_init_array();
return 0;
}
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 947f42abe820..286d289b039b 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -18,9 +18,9 @@
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-unsigned long __initdata e820_table_start;
-unsigned long __meminitdata e820_table_end;
-unsigned long __meminitdata e820_table_top;
+unsigned long __initdata pgt_buf_start;
+unsigned long __meminitdata pgt_buf_end;
+unsigned long __meminitdata pgt_buf_top;
int after_bootmem;
@@ -33,7 +33,7 @@ int direct_gbpages
static void __init find_early_table_space(unsigned long end, int use_pse,
int use_gbpages)
{
- unsigned long puds, pmds, ptes, tables, start;
+ unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
phys_addr_t base;
puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
@@ -65,29 +65,20 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
#ifdef CONFIG_X86_32
/* for fixmap */
tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE);
-#endif
- /*
- * RED-PEN putting page tables only on node 0 could
- * cause a hotspot and fill up ZONE_DMA. The page tables
- * need roughly 0.5KB per GB.
- */
-#ifdef CONFIG_X86_32
- start = 0x7000;
-#else
- start = 0x8000;
+ good_end = max_pfn_mapped << PAGE_SHIFT;
#endif
- base = memblock_find_in_range(start, max_pfn_mapped<<PAGE_SHIFT,
- tables, PAGE_SIZE);
+
+ base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE);
if (base == MEMBLOCK_ERROR)
panic("Cannot find space for the kernel page tables");
- e820_table_start = base >> PAGE_SHIFT;
- e820_table_end = e820_table_start;
- e820_table_top = e820_table_start + (tables >> PAGE_SHIFT);
+ pgt_buf_start = base >> PAGE_SHIFT;
+ pgt_buf_end = pgt_buf_start;
+ pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
- end, e820_table_start << PAGE_SHIFT, e820_table_top << PAGE_SHIFT);
+ end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT);
}
struct map_range {
@@ -279,30 +270,11 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
load_cr3(swapper_pg_dir);
#endif
-#ifdef CONFIG_X86_64
- if (!after_bootmem && !start) {
- pud_t *pud;
- pmd_t *pmd;
-
- mmu_cr4_features = read_cr4();
-
- /*
- * _brk_end cannot change anymore, but it and _end may be
- * located on different 2M pages. cleanup_highmap(), however,
- * can only consider _end when it runs, so destroy any
- * mappings beyond _brk_end here.
- */
- pud = pud_offset(pgd_offset_k(_brk_end), _brk_end);
- pmd = pmd_offset(pud, _brk_end - 1);
- while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1))
- pmd_clear(pmd);
- }
-#endif
__flush_tlb_all();
- if (!after_bootmem && e820_table_end > e820_table_start)
- memblock_x86_reserve_range(e820_table_start << PAGE_SHIFT,
- e820_table_end << PAGE_SHIFT, "PGTABLE");
+ if (!after_bootmem && pgt_buf_end > pgt_buf_start)
+ memblock_x86_reserve_range(pgt_buf_start << PAGE_SHIFT,
+ pgt_buf_end << PAGE_SHIFT, "PGTABLE");
if (!after_bootmem)
early_memtest(start, end);
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index c821074b7f0b..73ad7ebd6e9c 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -62,10 +62,10 @@ bool __read_mostly __vmalloc_start_set = false;
static __init void *alloc_low_page(void)
{
- unsigned long pfn = e820_table_end++;
+ unsigned long pfn = pgt_buf_end++;
void *adr;
- if (pfn >= e820_table_top)
+ if (pfn >= pgt_buf_top)
panic("alloc_low_page: ran out of memory");
adr = __va(pfn * PAGE_SIZE);
@@ -163,8 +163,8 @@ static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
if (pmd_idx_kmap_begin != pmd_idx_kmap_end
&& (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
&& (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end
- && ((__pa(pte) >> PAGE_SHIFT) < e820_table_start
- || (__pa(pte) >> PAGE_SHIFT) >= e820_table_end)) {
+ && ((__pa(pte) >> PAGE_SHIFT) < pgt_buf_start
+ || (__pa(pte) >> PAGE_SHIFT) >= pgt_buf_end)) {
pte_t *newpte;
int i;
@@ -644,8 +644,7 @@ void __init find_low_pfn_range(void)
}
#ifndef CONFIG_NEED_MULTIPLE_NODES
-void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
- int acpi, int k8)
+void __init initmem_init(void)
{
#ifdef CONFIG_HIGHMEM
highstart_pfn = highend_pfn = max_pfn;
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 71a59296af80..470cc4704a9a 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -314,7 +314,7 @@ void __init cleanup_highmap(void)
static __ref void *alloc_low_page(unsigned long *phys)
{
- unsigned long pfn = e820_table_end++;
+ unsigned long pfn = pgt_buf_end++;
void *adr;
if (after_bootmem) {
@@ -324,7 +324,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
return adr;
}
- if (pfn >= e820_table_top)
+ if (pfn >= pgt_buf_top)
panic("alloc_low_page: ran out of memory");
adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
@@ -333,12 +333,28 @@ static __ref void *alloc_low_page(unsigned long *phys)
return adr;
}
+static __ref void *map_low_page(void *virt)
+{
+ void *adr;
+ unsigned long phys, left;
+
+ if (after_bootmem)
+ return virt;
+
+ phys = __pa(virt);
+ left = phys & (PAGE_SIZE - 1);
+ adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
+ adr = (void *)(((unsigned long)adr) | left);
+
+ return adr;
+}
+
static __ref void unmap_low_page(void *adr)
{
if (after_bootmem)
return;
- early_iounmap(adr, PAGE_SIZE);
+ early_iounmap((void *)((unsigned long)adr & PAGE_MASK), PAGE_SIZE);
}
static unsigned long __meminit
@@ -386,15 +402,6 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
}
static unsigned long __meminit
-phys_pte_update(pmd_t *pmd, unsigned long address, unsigned long end,
- pgprot_t prot)
-{
- pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd);
-
- return phys_pte_init(pte, address, end, prot);
-}
-
-static unsigned long __meminit
phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
unsigned long page_size_mask, pgprot_t prot)
{
@@ -420,8 +427,10 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
if (pmd_val(*pmd)) {
if (!pmd_large(*pmd)) {
spin_lock(&init_mm.page_table_lock);
- last_map_addr = phys_pte_update(pmd, address,
+ pte = map_low_page((pte_t *)pmd_page_vaddr(*pmd));
+ last_map_addr = phys_pte_init(pte, address,
end, prot);
+ unmap_low_page(pte);
spin_unlock(&init_mm.page_table_lock);
continue;
}
@@ -468,18 +477,6 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
}
static unsigned long __meminit
-phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end,
- unsigned long page_size_mask, pgprot_t prot)
-{
- pmd_t *pmd = pmd_offset(pud, 0);
- unsigned long last_map_addr;
-
- last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask, prot);
- __flush_tlb_all();
- return last_map_addr;
-}
-
-static unsigned long __meminit
phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
unsigned long page_size_mask)
{
@@ -504,8 +501,11 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
if (pud_val(*pud)) {
if (!pud_large(*pud)) {
- last_map_addr = phys_pmd_update(pud, addr, end,
+ pmd = map_low_page(pmd_offset(pud, 0));
+ last_map_addr = phys_pmd_init(pmd, addr, end,
page_size_mask, prot);
+ unmap_low_page(pmd);
+ __flush_tlb_all();
continue;
}
/*
@@ -553,17 +553,6 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
return last_map_addr;
}
-static unsigned long __meminit
-phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end,
- unsigned long page_size_mask)
-{
- pud_t *pud;
-
- pud = (pud_t *)pgd_page_vaddr(*pgd);
-
- return phys_pud_init(pud, addr, end, page_size_mask);
-}
-
unsigned long __meminit
kernel_physical_mapping_init(unsigned long start,
unsigned long end,
@@ -587,8 +576,10 @@ kernel_physical_mapping_init(unsigned long start,
next = end;
if (pgd_val(*pgd)) {
- last_map_addr = phys_pud_update(pgd, __pa(start),
+ pud = map_low_page((pud_t *)pgd_page_vaddr(*pgd));
+ last_map_addr = phys_pud_init(pud, __pa(start),
__pa(end), page_size_mask);
+ unmap_low_page(pud);
continue;
}
@@ -612,13 +603,66 @@ kernel_physical_mapping_init(unsigned long start,
}
#ifndef CONFIG_NUMA
-void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
- int acpi, int k8)
+void __init initmem_init(void)
{
- memblock_x86_register_active_regions(0, start_pfn, end_pfn);
+ memblock_x86_register_active_regions(0, 0, max_pfn);
+ init_memory_mapping_high();
}
#endif
+struct mapping_work_data {
+ unsigned long start;
+ unsigned long end;
+ unsigned long pfn_mapped;
+};
+
+static int __init_refok
+mapping_work_fn(unsigned long start_pfn, unsigned long end_pfn, void *datax)
+{
+ struct mapping_work_data *data = datax;
+ unsigned long pfn_mapped;
+ unsigned long final_start, final_end;
+
+ final_start = max_t(unsigned long, start_pfn<<PAGE_SHIFT, data->start);
+ final_end = min_t(unsigned long, end_pfn<<PAGE_SHIFT, data->end);
+
+ if (final_end <= final_start)
+ return 0;
+
+ pfn_mapped = init_memory_mapping(final_start, final_end);
+
+ if (pfn_mapped > data->pfn_mapped)
+ data->pfn_mapped = pfn_mapped;
+
+ return 0;
+}
+
+static unsigned long __init_refok
+init_memory_mapping_active_regions(unsigned long start, unsigned long end)
+{
+ struct mapping_work_data data;
+
+ data.start = start;
+ data.end = end;
+ data.pfn_mapped = 0;
+
+ work_with_active_regions(MAX_NUMNODES, mapping_work_fn, &data);
+
+ return data.pfn_mapped;
+}
+
+void __init_refok init_memory_mapping_high(void)
+{
+ if (max_pfn > max_low_pfn) {
+ max_pfn_mapped = init_memory_mapping_active_regions(1UL<<32,
+ max_pfn<<PAGE_SHIFT);
+ /* can we preserve max_low_pfn ? */
+ max_low_pfn = max_pfn;
+
+ memblock.current_limit = get_max_mapped();
+ }
+}
+
void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index ebf6d7887a38..9559d360fde7 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -26,12 +26,50 @@ static __init int numa_setup(char *opt)
early_param("numa", numa_setup);
/*
- * Which logical CPUs are on which nodes
+ * apicid, cpu, node mappings
*/
+s16 __apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
+ [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
+};
+
cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
EXPORT_SYMBOL(node_to_cpumask_map);
/*
+ * Map cpu index to node index
+ */
+DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
+EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
+
+void __cpuinit numa_set_node(int cpu, int node)
+{
+ int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
+
+ /* early setting, no percpu area yet */
+ if (cpu_to_node_map) {
+ cpu_to_node_map[cpu] = node;
+ return;
+ }
+
+#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+ if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
+ printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
+ dump_stack();
+ return;
+ }
+#endif
+ per_cpu(x86_cpu_to_node_map, cpu) = node;
+
+ if (node != NUMA_NO_NODE)
+ set_cpu_numa_node(cpu, node);
+}
+
+void __cpuinit numa_clear_node(int cpu)
+{
+ numa_set_node(cpu, NUMA_NO_NODE);
+}
+
+/*
* Allocate node_to_cpumask_map based on number of available nodes
* Requires node_possible_map to be valid.
*
@@ -57,7 +95,174 @@ void __init setup_node_to_cpumask_map(void)
pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
}
-#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+/*
+ * There are unfortunately some poorly designed mainboards around that
+ * only connect memory to a single CPU. This breaks the 1:1 cpu->node
+ * mapping. To avoid this fill in the mapping for all possible CPUs,
+ * as the number of CPUs is not known yet. We round robin the existing
+ * nodes.
+ */
+void __init numa_init_array(void)
+{
+ int rr, i;
+
+ rr = first_node(node_online_map);
+ for (i = 0; i < nr_cpu_ids; i++) {
+ if (early_cpu_to_node(i) != NUMA_NO_NODE)
+ continue;
+ numa_set_node(i, rr);
+ rr = next_node(rr, node_online_map);
+ if (rr == MAX_NUMNODES)
+ rr = first_node(node_online_map);
+ }
+}
+
+static __init int find_near_online_node(int node)
+{
+ int n, val;
+ int min_val = INT_MAX;
+ int best_node = -1;
+
+ for_each_online_node(n) {
+ val = node_distance(node, n);
+
+ if (val < min_val) {
+ min_val = val;
+ best_node = n;
+ }
+ }
+
+ return best_node;
+}
+
+/*
+ * Setup early cpu_to_node.
+ *
+ * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
+ * and apicid_to_node[] tables have valid entries for a CPU.
+ * This means we skip cpu_to_node[] initialisation for NUMA
+ * emulation and faking node case (when running a kernel compiled
+ * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
+ * is already initialized in a round robin manner at numa_init_array,
+ * prior to this call, and this initialization is good enough
+ * for the fake NUMA cases.
+ *
+ * Called before the per_cpu areas are setup.
+ */
+void __init init_cpu_to_node(void)
+{
+ int cpu;
+ u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
+
+ BUG_ON(cpu_to_apicid == NULL);
+
+ for_each_possible_cpu(cpu) {
+ int node = numa_cpu_node(cpu);
+
+ if (node == NUMA_NO_NODE)
+ continue;
+ if (!node_online(node))
+ node = find_near_online_node(node);
+ numa_set_node(cpu, node);
+ }
+}
+
+#ifndef CONFIG_DEBUG_PER_CPU_MAPS
+
+# ifndef CONFIG_NUMA_EMU
+void __cpuinit numa_add_cpu(int cpu)
+{
+ cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
+}
+
+void __cpuinit numa_remove_cpu(int cpu)
+{
+ cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
+}
+# endif /* !CONFIG_NUMA_EMU */
+
+#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
+
+int __cpu_to_node(int cpu)
+{
+ if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
+ printk(KERN_WARNING
+ "cpu_to_node(%d): usage too early!\n", cpu);
+ dump_stack();
+ return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
+ }
+ return per_cpu(x86_cpu_to_node_map, cpu);
+}
+EXPORT_SYMBOL(__cpu_to_node);
+
+/*
+ * Same function as cpu_to_node() but used if called before the
+ * per_cpu areas are setup.
+ */
+int early_cpu_to_node(int cpu)
+{
+ if (early_per_cpu_ptr(x86_cpu_to_node_map))
+ return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
+
+ if (!cpu_possible(cpu)) {
+ printk(KERN_WARNING
+ "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
+ dump_stack();
+ return NUMA_NO_NODE;
+ }
+ return per_cpu(x86_cpu_to_node_map, cpu);
+}
+
+struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable)
+{
+ int node = early_cpu_to_node(cpu);
+ struct cpumask *mask;
+ char buf[64];
+
+ if (node == NUMA_NO_NODE) {
+ /* early_cpu_to_node() already emits a warning and trace */
+ return NULL;
+ }
+ mask = node_to_cpumask_map[node];
+ if (!mask) {
+ pr_err("node_to_cpumask_map[%i] NULL\n", node);
+ dump_stack();
+ return NULL;
+ }
+
+ cpulist_scnprintf(buf, sizeof(buf), mask);
+ printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
+ enable ? "numa_add_cpu" : "numa_remove_cpu",
+ cpu, node, buf);
+ return mask;
+}
+
+# ifndef CONFIG_NUMA_EMU
+static void __cpuinit numa_set_cpumask(int cpu, int enable)
+{
+ struct cpumask *mask;
+
+ mask = debug_cpumask_set_cpu(cpu, enable);
+ if (!mask)
+ return;
+
+ if (enable)
+ cpumask_set_cpu(cpu, mask);
+ else
+ cpumask_clear_cpu(cpu, mask);
+}
+
+void __cpuinit numa_add_cpu(int cpu)
+{
+ numa_set_cpumask(cpu, 1);
+}
+
+void __cpuinit numa_remove_cpu(int cpu)
+{
+ numa_set_cpumask(cpu, 0);
+}
+# endif /* !CONFIG_NUMA_EMU */
+
/*
* Returns a pointer to the bitmask of CPUs on Node 'node'.
*/
@@ -80,4 +285,5 @@ const struct cpumask *cpumask_of_node(int node)
return node_to_cpumask_map[node];
}
EXPORT_SYMBOL(cpumask_of_node);
-#endif
+
+#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 84a3e4c9f277..bde3906420df 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -110,6 +110,12 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
static unsigned long kva_start_pfn;
static unsigned long kva_pages;
+
+int __cpuinit numa_cpu_node(int cpu)
+{
+ return apic->x86_32_numa_cpu_node(cpu);
+}
+
/*
* FLAT - support for basic PC memory model with discontig enabled, essentially
* a single node with all available processors in it with a flat
@@ -346,8 +352,7 @@ static void init_remap_allocator(int nid)
(ulong) node_remap_end_vaddr[nid]);
}
-void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
- int acpi, int k8)
+void __init initmem_init(void)
{
int nid;
long kva_target_pfn;
@@ -361,6 +366,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
*/
get_memcfg_numa();
+ numa_init_array();
kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE);
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 95ea1551eebc..7757d2214fab 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -13,31 +13,30 @@
#include <linux/module.h>
#include <linux/nodemask.h>
#include <linux/sched.h>
+#include <linux/acpi.h>
#include <asm/e820.h>
#include <asm/proto.h>
#include <asm/dma.h>
-#include <asm/numa.h>
#include <asm/acpi.h>
#include <asm/amd_nb.h>
+#include "numa_internal.h"
+
struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
EXPORT_SYMBOL(node_data);
-struct memnode memnode;
+nodemask_t numa_nodes_parsed __initdata;
-s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
- [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
-};
+struct memnode memnode;
static unsigned long __initdata nodemap_addr;
static unsigned long __initdata nodemap_size;
-/*
- * Map cpu index to node index
- */
-DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
-EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
+static struct numa_meminfo numa_meminfo __initdata;
+
+static int numa_distance_cnt;
+static u8 *numa_distance;
/*
* Given a shift value, try to populate memnodemap[]
@@ -46,16 +45,15 @@ EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
* 0 if memnodmap[] too small (of shift too small)
* -1 if node overlap or lost ram (shift too big)
*/
-static int __init populate_memnodemap(const struct bootnode *nodes,
- int numnodes, int shift, int *nodeids)
+static int __init populate_memnodemap(const struct numa_meminfo *mi, int shift)
{
unsigned long addr, end;
int i, res = -1;
memset(memnodemap, 0xff, sizeof(s16)*memnodemapsize);
- for (i = 0; i < numnodes; i++) {
- addr = nodes[i].start;
- end = nodes[i].end;
+ for (i = 0; i < mi->nr_blks; i++) {
+ addr = mi->blk[i].start;
+ end = mi->blk[i].end;
if (addr >= end)
continue;
if ((end >> shift) >= memnodemapsize)
@@ -63,12 +61,7 @@ static int __init populate_memnodemap(const struct bootnode *nodes,
do {
if (memnodemap[addr >> shift] != NUMA_NO_NODE)
return -1;
-
- if (!nodeids)
- memnodemap[addr >> shift] = i;
- else
- memnodemap[addr >> shift] = nodeids[i];
-
+ memnodemap[addr >> shift] = mi->blk[i].nid;
addr += (1UL << shift);
} while (addr < end);
res = 1;
@@ -86,7 +79,7 @@ static int __init allocate_cachealigned_memnodemap(void)
addr = 0x8000;
nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES);
- nodemap_addr = memblock_find_in_range(addr, max_pfn<<PAGE_SHIFT,
+ nodemap_addr = memblock_find_in_range(addr, get_max_mapped(),
nodemap_size, L1_CACHE_BYTES);
if (nodemap_addr == MEMBLOCK_ERROR) {
printk(KERN_ERR
@@ -106,16 +99,15 @@ static int __init allocate_cachealigned_memnodemap(void)
* The LSB of all start and end addresses in the node map is the value of the
* maximum possible shift.
*/
-static int __init extract_lsb_from_nodes(const struct bootnode *nodes,
- int numnodes)
+static int __init extract_lsb_from_nodes(const struct numa_meminfo *mi)
{
int i, nodes_used = 0;
unsigned long start, end;
unsigned long bitfield = 0, memtop = 0;
- for (i = 0; i < numnodes; i++) {
- start = nodes[i].start;
- end = nodes[i].end;
+ for (i = 0; i < mi->nr_blks; i++) {
+ start = mi->blk[i].start;
+ end = mi->blk[i].end;
if (start >= end)
continue;
bitfield |= start;
@@ -131,18 +123,17 @@ static int __init extract_lsb_from_nodes(const struct bootnode *nodes,
return i;
}
-int __init compute_hash_shift(struct bootnode *nodes, int numnodes,
- int *nodeids)
+static int __init compute_hash_shift(const struct numa_meminfo *mi)
{
int shift;
- shift = extract_lsb_from_nodes(nodes, numnodes);
+ shift = extract_lsb_from_nodes(mi);
if (allocate_cachealigned_memnodemap())
return -1;
printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n",
shift);
- if (populate_memnodemap(nodes, numnodes, shift, nodeids) != 1) {
+ if (populate_memnodemap(mi, shift) != 1) {
printk(KERN_INFO "Your memory is not aligned you need to "
"rebuild your kernel with a bigger NODEMAPSIZE "
"shift=%d\n", shift);
@@ -188,6 +179,63 @@ static void * __init early_node_mem(int nodeid, unsigned long start,
return NULL;
}
+static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
+ struct numa_meminfo *mi)
+{
+ /* ignore zero length blks */
+ if (start == end)
+ return 0;
+
+ /* whine about and ignore invalid blks */
+ if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
+ pr_warning("NUMA: Warning: invalid memblk node %d (%Lx-%Lx)\n",
+ nid, start, end);
+ return 0;
+ }
+
+ if (mi->nr_blks >= NR_NODE_MEMBLKS) {
+ pr_err("NUMA: too many memblk ranges\n");
+ return -EINVAL;
+ }
+
+ mi->blk[mi->nr_blks].start = start;
+ mi->blk[mi->nr_blks].end = end;
+ mi->blk[mi->nr_blks].nid = nid;
+ mi->nr_blks++;
+ return 0;
+}
+
+/**
+ * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo
+ * @idx: Index of memblk to remove
+ * @mi: numa_meminfo to remove memblk from
+ *
+ * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and
+ * decrementing @mi->nr_blks.
+ */
+void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
+{
+ mi->nr_blks--;
+ memmove(&mi->blk[idx], &mi->blk[idx + 1],
+ (mi->nr_blks - idx) * sizeof(mi->blk[0]));
+}
+
+/**
+ * numa_add_memblk - Add one numa_memblk to numa_meminfo
+ * @nid: NUMA node ID of the new memblk
+ * @start: Start address of the new memblk
+ * @end: End address of the new memblk
+ *
+ * Add a new memblk to the default numa_meminfo.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int __init numa_add_memblk(int nid, u64 start, u64 end)
+{
+ return numa_add_memblk_to(nid, start, end, &numa_meminfo);
+}
+
/* Initialize bootmem allocator for a node */
void __init
setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
@@ -234,696 +282,365 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
node_set_online(nodeid);
}
-/*
- * There are unfortunately some poorly designed mainboards around that
- * only connect memory to a single CPU. This breaks the 1:1 cpu->node
- * mapping. To avoid this fill in the mapping for all possible CPUs,
- * as the number of CPUs is not known yet. We round robin the existing
- * nodes.
+/**
+ * numa_cleanup_meminfo - Cleanup a numa_meminfo
+ * @mi: numa_meminfo to clean up
+ *
+ * Sanitize @mi by merging and removing unncessary memblks. Also check for
+ * conflicts and clear unused memblks.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
*/
-void __init numa_init_array(void)
+int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
{
- int rr, i;
-
- rr = first_node(node_online_map);
- for (i = 0; i < nr_cpu_ids; i++) {
- if (early_cpu_to_node(i) != NUMA_NO_NODE)
- continue;
- numa_set_node(i, rr);
- rr = next_node(rr, node_online_map);
- if (rr == MAX_NUMNODES)
- rr = first_node(node_online_map);
- }
-}
-
-#ifdef CONFIG_NUMA_EMU
-/* Numa emulation */
-static struct bootnode nodes[MAX_NUMNODES] __initdata;
-static struct bootnode physnodes[MAX_NUMNODES] __cpuinitdata;
-static char *cmdline __initdata;
+ const u64 low = 0;
+ const u64 high = (u64)max_pfn << PAGE_SHIFT;
+ int i, j, k;
-void __init numa_emu_cmdline(char *str)
-{
- cmdline = str;
-}
+ for (i = 0; i < mi->nr_blks; i++) {
+ struct numa_memblk *bi = &mi->blk[i];
-static int __init setup_physnodes(unsigned long start, unsigned long end,
- int acpi, int amd)
-{
- int ret = 0;
- int i;
+ /* make sure all blocks are inside the limits */
+ bi->start = max(bi->start, low);
+ bi->end = min(bi->end, high);
- memset(physnodes, 0, sizeof(physnodes));
-#ifdef CONFIG_ACPI_NUMA
- if (acpi)
- acpi_get_nodes(physnodes, start, end);
-#endif
-#ifdef CONFIG_AMD_NUMA
- if (amd)
- amd_get_nodes(physnodes);
-#endif
- /*
- * Basic sanity checking on the physical node map: there may be errors
- * if the SRAT or AMD code incorrectly reported the topology or the mem=
- * kernel parameter is used.
- */
- for (i = 0; i < MAX_NUMNODES; i++) {
- if (physnodes[i].start == physnodes[i].end)
- continue;
- if (physnodes[i].start > end) {
- physnodes[i].end = physnodes[i].start;
- continue;
- }
- if (physnodes[i].end < start) {
- physnodes[i].start = physnodes[i].end;
+ /* and there's no empty block */
+ if (bi->start == bi->end) {
+ numa_remove_memblk_from(i--, mi);
continue;
}
- if (physnodes[i].start < start)
- physnodes[i].start = start;
- if (physnodes[i].end > end)
- physnodes[i].end = end;
- ret++;
- }
-
- /*
- * If no physical topology was detected, a single node is faked to cover
- * the entire address space.
- */
- if (!ret) {
- physnodes[ret].start = start;
- physnodes[ret].end = end;
- ret = 1;
- }
- return ret;
-}
-
-static void __init fake_physnodes(int acpi, int amd, int nr_nodes)
-{
- int i;
-
- BUG_ON(acpi && amd);
-#ifdef CONFIG_ACPI_NUMA
- if (acpi)
- acpi_fake_nodes(nodes, nr_nodes);
-#endif
-#ifdef CONFIG_AMD_NUMA
- if (amd)
- amd_fake_nodes(nodes, nr_nodes);
-#endif
- if (!acpi && !amd)
- for (i = 0; i < nr_cpu_ids; i++)
- numa_set_node(i, 0);
-}
-
-/*
- * Setups up nid to range from addr to addr + size. If the end
- * boundary is greater than max_addr, then max_addr is used instead.
- * The return value is 0 if there is additional memory left for
- * allocation past addr and -1 otherwise. addr is adjusted to be at
- * the end of the node.
- */
-static int __init setup_node_range(int nid, u64 *addr, u64 size, u64 max_addr)
-{
- int ret = 0;
- nodes[nid].start = *addr;
- *addr += size;
- if (*addr >= max_addr) {
- *addr = max_addr;
- ret = -1;
- }
- nodes[nid].end = *addr;
- node_set(nid, node_possible_map);
- printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid,
- nodes[nid].start, nodes[nid].end,
- (nodes[nid].end - nodes[nid].start) >> 20);
- return ret;
-}
-
-/*
- * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr
- * to max_addr. The return value is the number of nodes allocated.
- */
-static int __init split_nodes_interleave(u64 addr, u64 max_addr, int nr_nodes)
-{
- nodemask_t physnode_mask = NODE_MASK_NONE;
- u64 size;
- int big;
- int ret = 0;
- int i;
-
- if (nr_nodes <= 0)
- return -1;
- if (nr_nodes > MAX_NUMNODES) {
- pr_info("numa=fake=%d too large, reducing to %d\n",
- nr_nodes, MAX_NUMNODES);
- nr_nodes = MAX_NUMNODES;
- }
- size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / nr_nodes;
- /*
- * Calculate the number of big nodes that can be allocated as a result
- * of consolidating the remainder.
- */
- big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * nr_nodes) /
- FAKE_NODE_MIN_SIZE;
-
- size &= FAKE_NODE_MIN_HASH_MASK;
- if (!size) {
- pr_err("Not enough memory for each node. "
- "NUMA emulation disabled.\n");
- return -1;
- }
-
- for (i = 0; i < MAX_NUMNODES; i++)
- if (physnodes[i].start != physnodes[i].end)
- node_set(i, physnode_mask);
-
- /*
- * Continue to fill physical nodes with fake nodes until there is no
- * memory left on any of them.
- */
- while (nodes_weight(physnode_mask)) {
- for_each_node_mask(i, physnode_mask) {
- u64 end = physnodes[i].start + size;
- u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN);
-
- if (ret < big)
- end += FAKE_NODE_MIN_SIZE;
+ for (j = i + 1; j < mi->nr_blks; j++) {
+ struct numa_memblk *bj = &mi->blk[j];
+ unsigned long start, end;
/*
- * Continue to add memory to this fake node if its
- * non-reserved memory is less than the per-node size.
+ * See whether there are overlapping blocks. Whine
+ * about but allow overlaps of the same nid. They
+ * will be merged below.
*/
- while (end - physnodes[i].start -
- memblock_x86_hole_size(physnodes[i].start, end) < size) {
- end += FAKE_NODE_MIN_SIZE;
- if (end > physnodes[i].end) {
- end = physnodes[i].end;
- break;
+ if (bi->end > bj->start && bi->start < bj->end) {
+ if (bi->nid != bj->nid) {
+ pr_err("NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n",
+ bi->nid, bi->start, bi->end,
+ bj->nid, bj->start, bj->end);
+ return -EINVAL;
}
+ pr_warning("NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n",
+ bi->nid, bi->start, bi->end,
+ bj->start, bj->end);
}
/*
- * If there won't be at least FAKE_NODE_MIN_SIZE of
- * non-reserved memory in ZONE_DMA32 for the next node,
- * this one must extend to the boundary.
- */
- if (end < dma32_end && dma32_end - end -
- memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
- end = dma32_end;
-
- /*
- * If there won't be enough non-reserved memory for the
- * next node, this one must extend to the end of the
- * physical node.
- */
- if (physnodes[i].end - end -
- memblock_x86_hole_size(end, physnodes[i].end) < size)
- end = physnodes[i].end;
-
- /*
- * Avoid allocating more nodes than requested, which can
- * happen as a result of rounding down each node's size
- * to FAKE_NODE_MIN_SIZE.
+ * Join together blocks on the same node, holes
+ * between which don't overlap with memory on other
+ * nodes.
*/
- if (nodes_weight(physnode_mask) + ret >= nr_nodes)
- end = physnodes[i].end;
-
- if (setup_node_range(ret++, &physnodes[i].start,
- end - physnodes[i].start,
- physnodes[i].end) < 0)
- node_clear(i, physnode_mask);
+ if (bi->nid != bj->nid)
+ continue;
+ start = max(min(bi->start, bj->start), low);
+ end = min(max(bi->end, bj->end), high);
+ for (k = 0; k < mi->nr_blks; k++) {
+ struct numa_memblk *bk = &mi->blk[k];
+
+ if (bi->nid == bk->nid)
+ continue;
+ if (start < bk->end && end > bk->start)
+ break;
+ }
+ if (k < mi->nr_blks)
+ continue;
+ printk(KERN_INFO "NUMA: Node %d [%Lx,%Lx) + [%Lx,%Lx) -> [%lx,%lx)\n",
+ bi->nid, bi->start, bi->end, bj->start, bj->end,
+ start, end);
+ bi->start = start;
+ bi->end = end;
+ numa_remove_memblk_from(j--, mi);
}
}
- return ret;
-}
-
-/*
- * Returns the end address of a node so that there is at least `size' amount of
- * non-reserved memory or `max_addr' is reached.
- */
-static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size)
-{
- u64 end = start + size;
- while (end - start - memblock_x86_hole_size(start, end) < size) {
- end += FAKE_NODE_MIN_SIZE;
- if (end > max_addr) {
- end = max_addr;
- break;
- }
+ for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
+ mi->blk[i].start = mi->blk[i].end = 0;
+ mi->blk[i].nid = NUMA_NO_NODE;
}
- return end;
+
+ return 0;
}
/*
- * Sets up fake nodes of `size' interleaved over physical nodes ranging from
- * `addr' to `max_addr'. The return value is the number of nodes allocated.
+ * Set nodes, which have memory in @mi, in *@nodemask.
*/
-static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size)
+static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
+ const struct numa_meminfo *mi)
{
- nodemask_t physnode_mask = NODE_MASK_NONE;
- u64 min_size;
- int ret = 0;
int i;
- if (!size)
- return -1;
- /*
- * The limit on emulated nodes is MAX_NUMNODES, so the size per node is
- * increased accordingly if the requested size is too small. This
- * creates a uniform distribution of node sizes across the entire
- * machine (but not necessarily over physical nodes).
- */
- min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) /
- MAX_NUMNODES;
- min_size = max(min_size, FAKE_NODE_MIN_SIZE);
- if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size)
- min_size = (min_size + FAKE_NODE_MIN_SIZE) &
- FAKE_NODE_MIN_HASH_MASK;
- if (size < min_size) {
- pr_err("Fake node size %LuMB too small, increasing to %LuMB\n",
- size >> 20, min_size >> 20);
- size = min_size;
- }
- size &= FAKE_NODE_MIN_HASH_MASK;
-
- for (i = 0; i < MAX_NUMNODES; i++)
- if (physnodes[i].start != physnodes[i].end)
- node_set(i, physnode_mask);
- /*
- * Fill physical nodes with fake nodes of size until there is no memory
- * left on any of them.
- */
- while (nodes_weight(physnode_mask)) {
- for_each_node_mask(i, physnode_mask) {
- u64 dma32_end = MAX_DMA32_PFN << PAGE_SHIFT;
- u64 end;
-
- end = find_end_of_node(physnodes[i].start,
- physnodes[i].end, size);
- /*
- * If there won't be at least FAKE_NODE_MIN_SIZE of
- * non-reserved memory in ZONE_DMA32 for the next node,
- * this one must extend to the boundary.
- */
- if (end < dma32_end && dma32_end - end -
- memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
- end = dma32_end;
+ for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
+ if (mi->blk[i].start != mi->blk[i].end &&
+ mi->blk[i].nid != NUMA_NO_NODE)
+ node_set(mi->blk[i].nid, *nodemask);
+}
- /*
- * If there won't be enough non-reserved memory for the
- * next node, this one must extend to the end of the
- * physical node.
- */
- if (physnodes[i].end - end -
- memblock_x86_hole_size(end, physnodes[i].end) < size)
- end = physnodes[i].end;
+/**
+ * numa_reset_distance - Reset NUMA distance table
+ *
+ * The current table is freed. The next numa_set_distance() call will
+ * create a new one.
+ */
+void __init numa_reset_distance(void)
+{
+ size_t size;
- /*
- * Setup the fake node that will be allocated as bootmem
- * later. If setup_node_range() returns non-zero, there
- * is no more memory available on this physical node.
- */
- if (setup_node_range(ret++, &physnodes[i].start,
- end - physnodes[i].start,
- physnodes[i].end) < 0)
- node_clear(i, physnode_mask);
- }
+ if (numa_distance_cnt) {
+ size = numa_distance_cnt * sizeof(numa_distance[0]);
+ memblock_x86_free_range(__pa(numa_distance),
+ __pa(numa_distance) + size);
+ numa_distance_cnt = 0;
}
- return ret;
+ numa_distance = NULL;
}
-/*
- * Sets up the system RAM area from start_pfn to last_pfn according to the
- * numa=fake command-line option.
- */
-static int __init numa_emulation(unsigned long start_pfn,
- unsigned long last_pfn, int acpi, int amd)
+static int __init numa_alloc_distance(void)
{
- u64 addr = start_pfn << PAGE_SHIFT;
- u64 max_addr = last_pfn << PAGE_SHIFT;
- int num_nodes;
- int i;
+ nodemask_t nodes_parsed;
+ size_t size;
+ int i, j, cnt = 0;
+ u64 phys;
- /*
- * If the numa=fake command-line contains a 'M' or 'G', it represents
- * the fixed node size. Otherwise, if it is just a single number N,
- * split the system RAM into N fake nodes.
- */
- if (strchr(cmdline, 'M') || strchr(cmdline, 'G')) {
- u64 size;
+ /* size the new table and allocate it */
+ nodes_parsed = numa_nodes_parsed;
+ numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
- size = memparse(cmdline, &cmdline);
- num_nodes = split_nodes_size_interleave(addr, max_addr, size);
- } else {
- unsigned long n;
+ for_each_node_mask(i, nodes_parsed)
+ cnt = i;
+ cnt++;
+ size = cnt * cnt * sizeof(numa_distance[0]);
- n = simple_strtoul(cmdline, NULL, 0);
- num_nodes = split_nodes_interleave(addr, max_addr, n);
+ phys = memblock_find_in_range(0, (u64)max_pfn_mapped << PAGE_SHIFT,
+ size, PAGE_SIZE);
+ if (phys == MEMBLOCK_ERROR) {
+ pr_warning("NUMA: Warning: can't allocate distance table!\n");
+ /* don't retry until explicitly reset */
+ numa_distance = (void *)1LU;
+ return -ENOMEM;
}
+ memblock_x86_reserve_range(phys, phys + size, "NUMA DIST");
- if (num_nodes < 0)
- return num_nodes;
- memnode_shift = compute_hash_shift(nodes, num_nodes, NULL);
- if (memnode_shift < 0) {
- memnode_shift = 0;
- printk(KERN_ERR "No NUMA hash function found. NUMA emulation "
- "disabled.\n");
- return -1;
- }
+ numa_distance = __va(phys);
+ numa_distance_cnt = cnt;
+
+ /* fill with the default distances */
+ for (i = 0; i < cnt; i++)
+ for (j = 0; j < cnt; j++)
+ numa_distance[i * cnt + j] = i == j ?
+ LOCAL_DISTANCE : REMOTE_DISTANCE;
+ printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
- /*
- * We need to vacate all active ranges that may have been registered for
- * the e820 memory map.
- */
- remove_all_active_ranges();
- for_each_node_mask(i, node_possible_map) {
- memblock_x86_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
- nodes[i].end >> PAGE_SHIFT);
- setup_node_bootmem(i, nodes[i].start, nodes[i].end);
- }
- setup_physnodes(addr, max_addr, acpi, amd);
- fake_physnodes(acpi, amd, num_nodes);
- numa_init_array();
return 0;
}
-#endif /* CONFIG_NUMA_EMU */
-void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn,
- int acpi, int amd)
+/**
+ * numa_set_distance - Set NUMA distance from one NUMA to another
+ * @from: the 'from' node to set distance
+ * @to: the 'to' node to set distance
+ * @distance: NUMA distance
+ *
+ * Set the distance from node @from to @to to @distance. If distance table
+ * doesn't exist, one which is large enough to accomodate all the currently
+ * known nodes will be created.
+ */
+void __init numa_set_distance(int from, int to, int distance)
{
- int i;
-
- nodes_clear(node_possible_map);
- nodes_clear(node_online_map);
-
-#ifdef CONFIG_NUMA_EMU
- setup_physnodes(start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT,
- acpi, amd);
- if (cmdline && !numa_emulation(start_pfn, last_pfn, acpi, amd))
+ if (!numa_distance && numa_alloc_distance() < 0)
return;
- setup_physnodes(start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT,
- acpi, amd);
- nodes_clear(node_possible_map);
- nodes_clear(node_online_map);
-#endif
-#ifdef CONFIG_ACPI_NUMA
- if (!numa_off && acpi && !acpi_scan_nodes(start_pfn << PAGE_SHIFT,
- last_pfn << PAGE_SHIFT))
+ if (from >= numa_distance_cnt || to >= numa_distance_cnt) {
+ printk_once(KERN_DEBUG "NUMA: Debug: distance out of bound, from=%d to=%d distance=%d\n",
+ from, to, distance);
return;
- nodes_clear(node_possible_map);
- nodes_clear(node_online_map);
-#endif
+ }
-#ifdef CONFIG_AMD_NUMA
- if (!numa_off && amd && !amd_scan_nodes())
+ if ((u8)distance != distance ||
+ (from == to && distance != LOCAL_DISTANCE)) {
+ pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
+ from, to, distance);
return;
- nodes_clear(node_possible_map);
- nodes_clear(node_online_map);
-#endif
- printk(KERN_INFO "%s\n",
- numa_off ? "NUMA turned off" : "No NUMA configuration found");
+ }
- printk(KERN_INFO "Faking a node at %016lx-%016lx\n",
- start_pfn << PAGE_SHIFT,
- last_pfn << PAGE_SHIFT);
- /* setup dummy node covering all memory */
- memnode_shift = 63;
- memnodemap = memnode.embedded_map;
- memnodemap[0] = 0;
- node_set_online(0);
- node_set(0, node_possible_map);
- for (i = 0; i < nr_cpu_ids; i++)
- numa_set_node(i, 0);
- memblock_x86_register_active_regions(0, start_pfn, last_pfn);
- setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT);
+ numa_distance[from * numa_distance_cnt + to] = distance;
}
-unsigned long __init numa_free_all_bootmem(void)
+int __node_distance(int from, int to)
{
- unsigned long pages = 0;
- int i;
-
- for_each_online_node(i)
- pages += free_all_bootmem_node(NODE_DATA(i));
-
- pages += free_all_memory_core_early(MAX_NUMNODES);
-
- return pages;
+ if (from >= numa_distance_cnt || to >= numa_distance_cnt)
+ return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
+ return numa_distance[from * numa_distance_cnt + to];
}
+EXPORT_SYMBOL(__node_distance);
-#ifdef CONFIG_NUMA
-
-static __init int find_near_online_node(int node)
+/*
+ * Sanity check to catch more bad NUMA configurations (they are amazingly
+ * common). Make sure the nodes cover all memory.
+ */
+static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
{
- int n, val;
- int min_val = INT_MAX;
- int best_node = -1;
-
- for_each_online_node(n) {
- val = node_distance(node, n);
+ unsigned long numaram, e820ram;
+ int i;
- if (val < min_val) {
- min_val = val;
- best_node = n;
- }
+ numaram = 0;
+ for (i = 0; i < mi->nr_blks; i++) {
+ unsigned long s = mi->blk[i].start >> PAGE_SHIFT;
+ unsigned long e = mi->blk[i].end >> PAGE_SHIFT;
+ numaram += e - s;
+ numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
+ if ((long)numaram < 0)
+ numaram = 0;
}
- return best_node;
+ e820ram = max_pfn - (memblock_x86_hole_size(0,
+ max_pfn << PAGE_SHIFT) >> PAGE_SHIFT);
+ /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
+ if ((long)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
+ printk(KERN_ERR "NUMA: nodes only cover %luMB of your %luMB e820 RAM. Not used.\n",
+ (numaram << PAGE_SHIFT) >> 20,
+ (e820ram << PAGE_SHIFT) >> 20);
+ return false;
+ }
+ return true;
}
-/*
- * Setup early cpu_to_node.
- *
- * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
- * and apicid_to_node[] tables have valid entries for a CPU.
- * This means we skip cpu_to_node[] initialisation for NUMA
- * emulation and faking node case (when running a kernel compiled
- * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
- * is already initialized in a round robin manner at numa_init_array,
- * prior to this call, and this initialization is good enough
- * for the fake NUMA cases.
- *
- * Called before the per_cpu areas are setup.
- */
-void __init init_cpu_to_node(void)
+static int __init numa_register_memblks(struct numa_meminfo *mi)
{
- int cpu;
- u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
+ int i, nid;
- BUG_ON(cpu_to_apicid == NULL);
+ /* Account for nodes with cpus and no memory */
+ node_possible_map = numa_nodes_parsed;
+ numa_nodemask_from_meminfo(&node_possible_map, mi);
+ if (WARN_ON(nodes_empty(node_possible_map)))
+ return -EINVAL;
- for_each_possible_cpu(cpu) {
- int node;
- u16 apicid = cpu_to_apicid[cpu];
-
- if (apicid == BAD_APICID)
- continue;
- node = apicid_to_node[apicid];
- if (node == NUMA_NO_NODE)
- continue;
- if (!node_online(node))
- node = find_near_online_node(node);
- numa_set_node(cpu, node);
+ memnode_shift = compute_hash_shift(mi);
+ if (memnode_shift < 0) {
+ printk(KERN_ERR "NUMA: No NUMA node hash function found. Contact maintainer\n");
+ return -EINVAL;
}
-}
-#endif
+ for (i = 0; i < mi->nr_blks; i++)
+ memblock_x86_register_active_regions(mi->blk[i].nid,
+ mi->blk[i].start >> PAGE_SHIFT,
+ mi->blk[i].end >> PAGE_SHIFT);
-void __cpuinit numa_set_node(int cpu, int node)
-{
- int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
+ /* for out of order entries */
+ sort_node_map();
+ if (!numa_meminfo_cover_memory(mi))
+ return -EINVAL;
- /* early setting, no percpu area yet */
- if (cpu_to_node_map) {
- cpu_to_node_map[cpu] = node;
- return;
- }
+ init_memory_mapping_high();
-#ifdef CONFIG_DEBUG_PER_CPU_MAPS
- if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
- printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
- dump_stack();
- return;
+ /* Finally register nodes. */
+ for_each_node_mask(nid, node_possible_map) {
+ u64 start = (u64)max_pfn << PAGE_SHIFT;
+ u64 end = 0;
+
+ for (i = 0; i < mi->nr_blks; i++) {
+ if (nid != mi->blk[i].nid)
+ continue;
+ start = min(mi->blk[i].start, start);
+ end = max(mi->blk[i].end, end);
+ }
+
+ if (start < end)
+ setup_node_bootmem(nid, start, end);
}
-#endif
- per_cpu(x86_cpu_to_node_map, cpu) = node;
- if (node != NUMA_NO_NODE)
- set_cpu_numa_node(cpu, node);
+ return 0;
}
-void __cpuinit numa_clear_node(int cpu)
+static int __init dummy_numa_init(void)
{
- numa_set_node(cpu, NUMA_NO_NODE);
-}
+ printk(KERN_INFO "%s\n",
+ numa_off ? "NUMA turned off" : "No NUMA configuration found");
+ printk(KERN_INFO "Faking a node at %016lx-%016lx\n",
+ 0LU, max_pfn << PAGE_SHIFT);
-#ifndef CONFIG_DEBUG_PER_CPU_MAPS
+ node_set(0, numa_nodes_parsed);
+ numa_add_memblk(0, 0, (u64)max_pfn << PAGE_SHIFT);
-#ifndef CONFIG_NUMA_EMU
-void __cpuinit numa_add_cpu(int cpu)
-{
- cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
+ return 0;
}
-void __cpuinit numa_remove_cpu(int cpu)
-{
- cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
-}
-#else
-void __cpuinit numa_add_cpu(int cpu)
+void __init initmem_init(void)
{
- unsigned long addr;
- u16 apicid;
- int physnid;
- int nid = NUMA_NO_NODE;
+ int (*numa_init[])(void) = { [2] = dummy_numa_init };
+ int i, j;
- apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
- if (apicid != BAD_APICID)
- nid = apicid_to_node[apicid];
- if (nid == NUMA_NO_NODE)
- nid = early_cpu_to_node(cpu);
- BUG_ON(nid == NUMA_NO_NODE || !node_online(nid));
-
- /*
- * Use the starting address of the emulated node to find which physical
- * node it is allocated on.
- */
- addr = node_start_pfn(nid) << PAGE_SHIFT;
- for (physnid = 0; physnid < MAX_NUMNODES; physnid++)
- if (addr >= physnodes[physnid].start &&
- addr < physnodes[physnid].end)
- break;
-
- /*
- * Map the cpu to each emulated node that is allocated on the physical
- * node of the cpu's apic id.
- */
- for_each_online_node(nid) {
- addr = node_start_pfn(nid) << PAGE_SHIFT;
- if (addr >= physnodes[physnid].start &&
- addr < physnodes[physnid].end)
- cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
+ if (!numa_off) {
+#ifdef CONFIG_ACPI_NUMA
+ numa_init[0] = x86_acpi_numa_init;
+#endif
+#ifdef CONFIG_AMD_NUMA
+ numa_init[1] = amd_numa_init;
+#endif
}
-}
-void __cpuinit numa_remove_cpu(int cpu)
-{
- int i;
+ for (i = 0; i < ARRAY_SIZE(numa_init); i++) {
+ if (!numa_init[i])
+ continue;
- for_each_online_node(i)
- cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
-}
-#endif /* !CONFIG_NUMA_EMU */
+ for (j = 0; j < MAX_LOCAL_APIC; j++)
+ set_apicid_to_node(j, NUMA_NO_NODE);
-#else /* CONFIG_DEBUG_PER_CPU_MAPS */
-static struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable)
-{
- int node = early_cpu_to_node(cpu);
- struct cpumask *mask;
- char buf[64];
-
- mask = node_to_cpumask_map[node];
- if (!mask) {
- pr_err("node_to_cpumask_map[%i] NULL\n", node);
- dump_stack();
- return NULL;
- }
+ nodes_clear(numa_nodes_parsed);
+ nodes_clear(node_possible_map);
+ nodes_clear(node_online_map);
+ memset(&numa_meminfo, 0, sizeof(numa_meminfo));
+ remove_all_active_ranges();
+ numa_reset_distance();
- cpulist_scnprintf(buf, sizeof(buf), mask);
- printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
- enable ? "numa_add_cpu" : "numa_remove_cpu",
- cpu, node, buf);
- return mask;
-}
+ if (numa_init[i]() < 0)
+ continue;
-/*
- * --------- debug versions of the numa functions ---------
- */
-#ifndef CONFIG_NUMA_EMU
-static void __cpuinit numa_set_cpumask(int cpu, int enable)
-{
- struct cpumask *mask;
+ if (numa_cleanup_meminfo(&numa_meminfo) < 0)
+ continue;
- mask = debug_cpumask_set_cpu(cpu, enable);
- if (!mask)
- return;
+ numa_emulation(&numa_meminfo, numa_distance_cnt);
- if (enable)
- cpumask_set_cpu(cpu, mask);
- else
- cpumask_clear_cpu(cpu, mask);
-}
-#else
-static void __cpuinit numa_set_cpumask(int cpu, int enable)
-{
- int node = early_cpu_to_node(cpu);
- struct cpumask *mask;
- int i;
+ if (numa_register_memblks(&numa_meminfo) < 0)
+ continue;
- for_each_online_node(i) {
- unsigned long addr;
+ for (j = 0; j < nr_cpu_ids; j++) {
+ int nid = early_cpu_to_node(j);
- addr = node_start_pfn(i) << PAGE_SHIFT;
- if (addr < physnodes[node].start ||
- addr >= physnodes[node].end)
- continue;
- mask = debug_cpumask_set_cpu(cpu, enable);
- if (!mask)
- return;
-
- if (enable)
- cpumask_set_cpu(cpu, mask);
- else
- cpumask_clear_cpu(cpu, mask);
+ if (nid == NUMA_NO_NODE)
+ continue;
+ if (!node_online(nid))
+ numa_clear_node(j);
+ }
+ numa_init_array();
+ return;
}
+ BUG();
}
-#endif /* CONFIG_NUMA_EMU */
-void __cpuinit numa_add_cpu(int cpu)
+unsigned long __init numa_free_all_bootmem(void)
{
- numa_set_cpumask(cpu, 1);
-}
+ unsigned long pages = 0;
+ int i;
-void __cpuinit numa_remove_cpu(int cpu)
-{
- numa_set_cpumask(cpu, 0);
-}
+ for_each_online_node(i)
+ pages += free_all_bootmem_node(NODE_DATA(i));
-int __cpu_to_node(int cpu)
-{
- if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
- printk(KERN_WARNING
- "cpu_to_node(%d): usage too early!\n", cpu);
- dump_stack();
- return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
- }
- return per_cpu(x86_cpu_to_node_map, cpu);
-}
-EXPORT_SYMBOL(__cpu_to_node);
+ pages += free_all_memory_core_early(MAX_NUMNODES);
-/*
- * Same function as cpu_to_node() but used if called before the
- * per_cpu areas are setup.
- */
-int early_cpu_to_node(int cpu)
-{
- if (early_per_cpu_ptr(x86_cpu_to_node_map))
- return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
-
- if (!cpu_possible(cpu)) {
- printk(KERN_WARNING
- "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
- dump_stack();
- return NUMA_NO_NODE;
- }
- return per_cpu(x86_cpu_to_node_map, cpu);
+ return pages;
}
-/*
- * --------- end of debug versions of the numa functions ---------
- */
+int __cpuinit numa_cpu_node(int cpu)
+{
+ int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
-#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
+ if (apicid != BAD_APICID)
+ return __apicid_to_node[apicid];
+ return NUMA_NO_NODE;
+}
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
new file mode 100644
index 000000000000..607a2e8bc87a
--- /dev/null
+++ b/arch/x86/mm/numa_emulation.c
@@ -0,0 +1,475 @@
+/*
+ * NUMA emulation
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/topology.h>
+#include <linux/memblock.h>
+#include <asm/dma.h>
+
+#include "numa_internal.h"
+
+static int emu_nid_to_phys[MAX_NUMNODES] __cpuinitdata;
+static char *emu_cmdline __initdata;
+
+void __init numa_emu_cmdline(char *str)
+{
+ emu_cmdline = str;
+}
+
+static int __init emu_find_memblk_by_nid(int nid, const struct numa_meminfo *mi)
+{
+ int i;
+
+ for (i = 0; i < mi->nr_blks; i++)
+ if (mi->blk[i].nid == nid)
+ return i;
+ return -ENOENT;
+}
+
+/*
+ * Sets up nid to range from @start to @end. The return value is -errno if
+ * something went wrong, 0 otherwise.
+ */
+static int __init emu_setup_memblk(struct numa_meminfo *ei,
+ struct numa_meminfo *pi,
+ int nid, int phys_blk, u64 size)
+{
+ struct numa_memblk *eb = &ei->blk[ei->nr_blks];
+ struct numa_memblk *pb = &pi->blk[phys_blk];
+
+ if (ei->nr_blks >= NR_NODE_MEMBLKS) {
+ pr_err("NUMA: Too many emulated memblks, failing emulation\n");
+ return -EINVAL;
+ }
+
+ ei->nr_blks++;
+ eb->start = pb->start;
+ eb->end = pb->start + size;
+ eb->nid = nid;
+
+ if (emu_nid_to_phys[nid] == NUMA_NO_NODE)
+ emu_nid_to_phys[nid] = pb->nid;
+
+ pb->start += size;
+ if (pb->start >= pb->end) {
+ WARN_ON_ONCE(pb->start > pb->end);
+ numa_remove_memblk_from(phys_blk, pi);
+ }
+
+ printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid,
+ eb->start, eb->end, (eb->end - eb->start) >> 20);
+ return 0;
+}
+
+/*
+ * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr
+ * to max_addr. The return value is the number of nodes allocated.
+ */
+static int __init split_nodes_interleave(struct numa_meminfo *ei,
+ struct numa_meminfo *pi,
+ u64 addr, u64 max_addr, int nr_nodes)
+{
+ nodemask_t physnode_mask = NODE_MASK_NONE;
+ u64 size;
+ int big;
+ int nid = 0;
+ int i, ret;
+
+ if (nr_nodes <= 0)
+ return -1;
+ if (nr_nodes > MAX_NUMNODES) {
+ pr_info("numa=fake=%d too large, reducing to %d\n",
+ nr_nodes, MAX_NUMNODES);
+ nr_nodes = MAX_NUMNODES;
+ }
+
+ size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / nr_nodes;
+ /*
+ * Calculate the number of big nodes that can be allocated as a result
+ * of consolidating the remainder.
+ */
+ big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * nr_nodes) /
+ FAKE_NODE_MIN_SIZE;
+
+ size &= FAKE_NODE_MIN_HASH_MASK;
+ if (!size) {
+ pr_err("Not enough memory for each node. "
+ "NUMA emulation disabled.\n");
+ return -1;
+ }
+
+ for (i = 0; i < pi->nr_blks; i++)
+ node_set(pi->blk[i].nid, physnode_mask);
+
+ /*
+ * Continue to fill physical nodes with fake nodes until there is no
+ * memory left on any of them.
+ */
+ while (nodes_weight(physnode_mask)) {
+ for_each_node_mask(i, physnode_mask) {
+ u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN);
+ u64 start, limit, end;
+ int phys_blk;
+
+ phys_blk = emu_find_memblk_by_nid(i, pi);
+ if (phys_blk < 0) {
+ node_clear(i, physnode_mask);
+ continue;
+ }
+ start = pi->blk[phys_blk].start;
+ limit = pi->blk[phys_blk].end;
+ end = start + size;
+
+ if (nid < big)
+ end += FAKE_NODE_MIN_SIZE;
+
+ /*
+ * Continue to add memory to this fake node if its
+ * non-reserved memory is less than the per-node size.
+ */
+ while (end - start -
+ memblock_x86_hole_size(start, end) < size) {
+ end += FAKE_NODE_MIN_SIZE;
+ if (end > limit) {
+ end = limit;
+ break;
+ }
+ }
+
+ /*
+ * If there won't be at least FAKE_NODE_MIN_SIZE of
+ * non-reserved memory in ZONE_DMA32 for the next node,
+ * this one must extend to the boundary.
+ */
+ if (end < dma32_end && dma32_end - end -
+ memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
+ end = dma32_end;
+
+ /*
+ * If there won't be enough non-reserved memory for the
+ * next node, this one must extend to the end of the
+ * physical node.
+ */
+ if (limit - end -
+ memblock_x86_hole_size(end, limit) < size)
+ end = limit;
+
+ ret = emu_setup_memblk(ei, pi, nid++ % nr_nodes,
+ phys_blk,
+ min(end, limit) - start);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Returns the end address of a node so that there is at least `size' amount of
+ * non-reserved memory or `max_addr' is reached.
+ */
+static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size)
+{
+ u64 end = start + size;
+
+ while (end - start - memblock_x86_hole_size(start, end) < size) {
+ end += FAKE_NODE_MIN_SIZE;
+ if (end > max_addr) {
+ end = max_addr;
+ break;
+ }
+ }
+ return end;
+}
+
+/*
+ * Sets up fake nodes of `size' interleaved over physical nodes ranging from
+ * `addr' to `max_addr'. The return value is the number of nodes allocated.
+ */
+static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
+ struct numa_meminfo *pi,
+ u64 addr, u64 max_addr, u64 size)
+{
+ nodemask_t physnode_mask = NODE_MASK_NONE;
+ u64 min_size;
+ int nid = 0;
+ int i, ret;
+
+ if (!size)
+ return -1;
+ /*
+ * The limit on emulated nodes is MAX_NUMNODES, so the size per node is
+ * increased accordingly if the requested size is too small. This
+ * creates a uniform distribution of node sizes across the entire
+ * machine (but not necessarily over physical nodes).
+ */
+ min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) /
+ MAX_NUMNODES;
+ min_size = max(min_size, FAKE_NODE_MIN_SIZE);
+ if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size)
+ min_size = (min_size + FAKE_NODE_MIN_SIZE) &
+ FAKE_NODE_MIN_HASH_MASK;
+ if (size < min_size) {
+ pr_err("Fake node size %LuMB too small, increasing to %LuMB\n",
+ size >> 20, min_size >> 20);
+ size = min_size;
+ }
+ size &= FAKE_NODE_MIN_HASH_MASK;
+
+ for (i = 0; i < pi->nr_blks; i++)
+ node_set(pi->blk[i].nid, physnode_mask);
+
+ /*
+ * Fill physical nodes with fake nodes of size until there is no memory
+ * left on any of them.
+ */
+ while (nodes_weight(physnode_mask)) {
+ for_each_node_mask(i, physnode_mask) {
+ u64 dma32_end = MAX_DMA32_PFN << PAGE_SHIFT;
+ u64 start, limit, end;
+ int phys_blk;
+
+ phys_blk = emu_find_memblk_by_nid(i, pi);
+ if (phys_blk < 0) {
+ node_clear(i, physnode_mask);
+ continue;
+ }
+ start = pi->blk[phys_blk].start;
+ limit = pi->blk[phys_blk].end;
+
+ end = find_end_of_node(start, limit, size);
+ /*
+ * If there won't be at least FAKE_NODE_MIN_SIZE of
+ * non-reserved memory in ZONE_DMA32 for the next node,
+ * this one must extend to the boundary.
+ */
+ if (end < dma32_end && dma32_end - end -
+ memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
+ end = dma32_end;
+
+ /*
+ * If there won't be enough non-reserved memory for the
+ * next node, this one must extend to the end of the
+ * physical node.
+ */
+ if (limit - end -
+ memblock_x86_hole_size(end, limit) < size)
+ end = limit;
+
+ ret = emu_setup_memblk(ei, pi, nid++ % MAX_NUMNODES,
+ phys_blk,
+ min(end, limit) - start);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ return 0;
+}
+
+/**
+ * numa_emulation - Emulate NUMA nodes
+ * @numa_meminfo: NUMA configuration to massage
+ * @numa_dist_cnt: The size of the physical NUMA distance table
+ *
+ * Emulate NUMA nodes according to the numa=fake kernel parameter.
+ * @numa_meminfo contains the physical memory configuration and is modified
+ * to reflect the emulated configuration on success. @numa_dist_cnt is
+ * used to determine the size of the physical distance table.
+ *
+ * On success, the following modifications are made.
+ *
+ * - @numa_meminfo is updated to reflect the emulated nodes.
+ *
+ * - __apicid_to_node[] is updated such that APIC IDs are mapped to the
+ * emulated nodes.
+ *
+ * - NUMA distance table is rebuilt to represent distances between emulated
+ * nodes. The distances are determined considering how emulated nodes
+ * are mapped to physical nodes and match the actual distances.
+ *
+ * - emu_nid_to_phys[] reflects how emulated nodes are mapped to physical
+ * nodes. This is used by numa_add_cpu() and numa_remove_cpu().
+ *
+ * If emulation is not enabled or fails, emu_nid_to_phys[] is filled with
+ * identity mapping and no other modification is made.
+ */
+void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
+{
+ static struct numa_meminfo ei __initdata;
+ static struct numa_meminfo pi __initdata;
+ const u64 max_addr = max_pfn << PAGE_SHIFT;
+ u8 *phys_dist = NULL;
+ int i, j, ret;
+
+ if (!emu_cmdline)
+ goto no_emu;
+
+ memset(&ei, 0, sizeof(ei));
+ pi = *numa_meminfo;
+
+ for (i = 0; i < MAX_NUMNODES; i++)
+ emu_nid_to_phys[i] = NUMA_NO_NODE;
+
+ /*
+ * If the numa=fake command-line contains a 'M' or 'G', it represents
+ * the fixed node size. Otherwise, if it is just a single number N,
+ * split the system RAM into N fake nodes.
+ */
+ if (strchr(emu_cmdline, 'M') || strchr(emu_cmdline, 'G')) {
+ u64 size;
+
+ size = memparse(emu_cmdline, &emu_cmdline);
+ ret = split_nodes_size_interleave(&ei, &pi, 0, max_addr, size);
+ } else {
+ unsigned long n;
+
+ n = simple_strtoul(emu_cmdline, NULL, 0);
+ ret = split_nodes_interleave(&ei, &pi, 0, max_addr, n);
+ }
+
+ if (ret < 0)
+ goto no_emu;
+
+ if (numa_cleanup_meminfo(&ei) < 0) {
+ pr_warning("NUMA: Warning: constructed meminfo invalid, disabling emulation\n");
+ goto no_emu;
+ }
+
+ /*
+ * Copy the original distance table. It's temporary so no need to
+ * reserve it.
+ */
+ if (numa_dist_cnt) {
+ size_t size = numa_dist_cnt * sizeof(phys_dist[0]);
+ u64 phys;
+
+ phys = memblock_find_in_range(0,
+ (u64)max_pfn_mapped << PAGE_SHIFT,
+ size, PAGE_SIZE);
+ if (phys == MEMBLOCK_ERROR) {
+ pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n");
+ goto no_emu;
+ }
+ phys_dist = __va(phys);
+
+ for (i = 0; i < numa_dist_cnt; i++)
+ for (j = 0; j < numa_dist_cnt; j++)
+ phys_dist[i * numa_dist_cnt + j] =
+ node_distance(i, j);
+ }
+
+ /* commit */
+ *numa_meminfo = ei;
+
+ /*
+ * Transform __apicid_to_node table to use emulated nids by
+ * reverse-mapping phys_nid. The maps should always exist but fall
+ * back to zero just in case.
+ */
+ for (i = 0; i < ARRAY_SIZE(__apicid_to_node); i++) {
+ if (__apicid_to_node[i] == NUMA_NO_NODE)
+ continue;
+ for (j = 0; j < ARRAY_SIZE(emu_nid_to_phys); j++)
+ if (__apicid_to_node[i] == emu_nid_to_phys[j])
+ break;
+ __apicid_to_node[i] = j < ARRAY_SIZE(emu_nid_to_phys) ? j : 0;
+ }
+
+ /* make sure all emulated nodes are mapped to a physical node */
+ for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++)
+ if (emu_nid_to_phys[i] == NUMA_NO_NODE)
+ emu_nid_to_phys[i] = 0;
+
+ /* transform distance table */
+ numa_reset_distance();
+ for (i = 0; i < MAX_NUMNODES; i++) {
+ for (j = 0; j < MAX_NUMNODES; j++) {
+ int physi = emu_nid_to_phys[i];
+ int physj = emu_nid_to_phys[j];
+ int dist;
+
+ if (physi >= numa_dist_cnt || physj >= numa_dist_cnt)
+ dist = physi == physj ?
+ LOCAL_DISTANCE : REMOTE_DISTANCE;
+ else
+ dist = phys_dist[physi * numa_dist_cnt + physj];
+
+ numa_set_distance(i, j, dist);
+ }
+ }
+ return;
+
+no_emu:
+ /* No emulation. Build identity emu_nid_to_phys[] for numa_add_cpu() */
+ for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++)
+ emu_nid_to_phys[i] = i;
+}
+
+#ifndef CONFIG_DEBUG_PER_CPU_MAPS
+void __cpuinit numa_add_cpu(int cpu)
+{
+ int physnid, nid;
+
+ nid = numa_cpu_node(cpu);
+ if (nid == NUMA_NO_NODE)
+ nid = early_cpu_to_node(cpu);
+ BUG_ON(nid == NUMA_NO_NODE || !node_online(nid));
+
+ physnid = emu_nid_to_phys[nid];
+
+ /*
+ * Map the cpu to each emulated node that is allocated on the physical
+ * node of the cpu's apic id.
+ */
+ for_each_online_node(nid)
+ if (emu_nid_to_phys[nid] == physnid)
+ cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
+}
+
+void __cpuinit numa_remove_cpu(int cpu)
+{
+ int i;
+
+ for_each_online_node(i)
+ cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
+}
+#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
+static void __cpuinit numa_set_cpumask(int cpu, int enable)
+{
+ struct cpumask *mask;
+ int nid, physnid, i;
+
+ nid = early_cpu_to_node(cpu);
+ if (nid == NUMA_NO_NODE) {
+ /* early_cpu_to_node() already emits a warning and trace */
+ return;
+ }
+
+ physnid = emu_nid_to_phys[nid];
+
+ for_each_online_node(i) {
+ if (emu_nid_to_phys[nid] != physnid)
+ continue;
+
+ mask = debug_cpumask_set_cpu(cpu, enable);
+ if (!mask)
+ return;
+
+ if (enable)
+ cpumask_set_cpu(cpu, mask);
+ else
+ cpumask_clear_cpu(cpu, mask);
+ }
+}
+
+void __cpuinit numa_add_cpu(int cpu)
+{
+ numa_set_cpumask(cpu, 1);
+}
+
+void __cpuinit numa_remove_cpu(int cpu)
+{
+ numa_set_cpumask(cpu, 0);
+}
+#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
diff --git a/arch/x86/mm/numa_internal.h b/arch/x86/mm/numa_internal.h
new file mode 100644
index 000000000000..ef2d97377d7c
--- /dev/null
+++ b/arch/x86/mm/numa_internal.h
@@ -0,0 +1,31 @@
+#ifndef __X86_MM_NUMA_INTERNAL_H
+#define __X86_MM_NUMA_INTERNAL_H
+
+#include <linux/types.h>
+#include <asm/numa.h>
+
+struct numa_memblk {
+ u64 start;
+ u64 end;
+ int nid;
+};
+
+struct numa_meminfo {
+ int nr_blks;
+ struct numa_memblk blk[NR_NODE_MEMBLKS];
+};
+
+void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi);
+int __init numa_cleanup_meminfo(struct numa_meminfo *mi);
+void __init numa_reset_distance(void);
+
+#ifdef CONFIG_NUMA_EMU
+void __init numa_emulation(struct numa_meminfo *numa_meminfo,
+ int numa_dist_cnt);
+#else
+static inline void numa_emulation(struct numa_meminfo *numa_meminfo,
+ int numa_dist_cnt)
+{ }
+#endif
+
+#endif /* __X86_MM_NUMA_INTERNAL_H */
diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c
index ae96e7b8051d..48651c6f657d 100644
--- a/arch/x86/mm/srat_32.c
+++ b/arch/x86/mm/srat_32.c
@@ -57,7 +57,7 @@ struct node_memory_chunk_s {
static struct node_memory_chunk_s __initdata node_memory_chunk[MAXCHUNKS];
static int __initdata num_memory_chunks; /* total number of memory chunks */
-static u8 __initdata apicid_to_pxm[MAX_APICID];
+static u8 __initdata apicid_to_pxm[MAX_LOCAL_APIC];
int acpi_numa __initdata;
@@ -254,8 +254,8 @@ int __init get_memcfg_from_srat(void)
printk(KERN_DEBUG "Number of memory chunks in system = %d\n",
num_memory_chunks);
- for (i = 0; i < MAX_APICID; i++)
- apicid_2_node[i] = pxm_to_node(apicid_to_pxm[i]);
+ for (i = 0; i < MAX_LOCAL_APIC; i++)
+ set_apicid_to_node(i, pxm_to_node(apicid_to_pxm[i]));
for (j = 0; j < num_memory_chunks; j++){
struct node_memory_chunk_s * chunk = &node_memory_chunk[j];
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index 603d285d1daa..8e9d3394f6d4 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -26,88 +26,34 @@
int acpi_numa __initdata;
-static struct acpi_table_slit *acpi_slit;
-
-static nodemask_t nodes_parsed __initdata;
-static nodemask_t cpu_nodes_parsed __initdata;
-static struct bootnode nodes[MAX_NUMNODES] __initdata;
static struct bootnode nodes_add[MAX_NUMNODES];
-static int num_node_memblks __initdata;
-static struct bootnode node_memblk_range[NR_NODE_MEMBLKS] __initdata;
-static int memblk_nodeid[NR_NODE_MEMBLKS] __initdata;
-
static __init int setup_node(int pxm)
{
return acpi_map_pxm_to_node(pxm);
}
-static __init int conflicting_memblks(unsigned long start, unsigned long end)
-{
- int i;
- for (i = 0; i < num_node_memblks; i++) {
- struct bootnode *nd = &node_memblk_range[i];
- if (nd->start == nd->end)
- continue;
- if (nd->end > start && nd->start < end)
- return memblk_nodeid[i];
- if (nd->end == end && nd->start == start)
- return memblk_nodeid[i];
- }
- return -1;
-}
-
-static __init void cutoff_node(int i, unsigned long start, unsigned long end)
-{
- struct bootnode *nd = &nodes[i];
-
- if (nd->start < start) {
- nd->start = start;
- if (nd->end < nd->start)
- nd->start = nd->end;
- }
- if (nd->end > end) {
- nd->end = end;
- if (nd->start > nd->end)
- nd->start = nd->end;
- }
-}
-
static __init void bad_srat(void)
{
- int i;
printk(KERN_ERR "SRAT: SRAT not used.\n");
acpi_numa = -1;
- for (i = 0; i < MAX_LOCAL_APIC; i++)
- apicid_to_node[i] = NUMA_NO_NODE;
- for (i = 0; i < MAX_NUMNODES; i++) {
- nodes[i].start = nodes[i].end = 0;
- nodes_add[i].start = nodes_add[i].end = 0;
- }
- remove_all_active_ranges();
+ memset(nodes_add, 0, sizeof(nodes_add));
}
static __init inline int srat_disabled(void)
{
- return numa_off || acpi_numa < 0;
+ return acpi_numa < 0;
}
/* Callback for SLIT parsing */
void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
{
- unsigned length;
- unsigned long phys;
-
- length = slit->header.length;
- phys = memblock_find_in_range(0, max_pfn_mapped<<PAGE_SHIFT, length,
- PAGE_SIZE);
-
- if (phys == MEMBLOCK_ERROR)
- panic(" Can not save slit!\n");
+ int i, j;
- acpi_slit = __va(phys);
- memcpy(acpi_slit, slit, length);
- memblock_x86_reserve_range(phys, phys + length, "ACPI SLIT");
+ for (i = 0; i < slit->locality_count; i++)
+ for (j = 0; j < slit->locality_count; j++)
+ numa_set_distance(pxm_to_node(i), pxm_to_node(j),
+ slit->entry[slit->locality_count * i + j]);
}
/* Callback for Proximity Domain -> x2APIC mapping */
@@ -138,8 +84,8 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u skipped apicid that is too big\n", pxm, apic_id, node);
return;
}
- apicid_to_node[apic_id] = node;
- node_set(node, cpu_nodes_parsed);
+ set_apicid_to_node(apic_id, node);
+ node_set(node, numa_nodes_parsed);
acpi_numa = 1;
printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u\n",
pxm, apic_id, node);
@@ -178,8 +124,8 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
return;
}
- apicid_to_node[apic_id] = node;
- node_set(node, cpu_nodes_parsed);
+ set_apicid_to_node(apic_id, node);
+ node_set(node, numa_nodes_parsed);
acpi_numa = 1;
printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u\n",
pxm, apic_id, node);
@@ -241,7 +187,7 @@ update_nodes_add(int node, unsigned long start, unsigned long end)
}
if (changed) {
- node_set(node, cpu_nodes_parsed);
+ node_set(node, numa_nodes_parsed);
printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n",
nd->start, nd->end);
}
@@ -251,10 +197,8 @@ update_nodes_add(int node, unsigned long start, unsigned long end)
void __init
acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
{
- struct bootnode *nd, oldnode;
unsigned long start, end;
int node, pxm;
- int i;
if (srat_disabled())
return;
@@ -276,300 +220,31 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
bad_srat();
return;
}
- i = conflicting_memblks(start, end);
- if (i == node) {
- printk(KERN_WARNING
- "SRAT: Warning: PXM %d (%lx-%lx) overlaps with itself (%Lx-%Lx)\n",
- pxm, start, end, nodes[i].start, nodes[i].end);
- } else if (i >= 0) {
- printk(KERN_ERR
- "SRAT: PXM %d (%lx-%lx) overlaps with PXM %d (%Lx-%Lx)\n",
- pxm, start, end, node_to_pxm(i),
- nodes[i].start, nodes[i].end);
+
+ if (numa_add_memblk(node, start, end) < 0) {
bad_srat();
return;
}
- nd = &nodes[node];
- oldnode = *nd;
- if (!node_test_and_set(node, nodes_parsed)) {
- nd->start = start;
- nd->end = end;
- } else {
- if (start < nd->start)
- nd->start = start;
- if (nd->end < end)
- nd->end = end;
- }
printk(KERN_INFO "SRAT: Node %u PXM %u %lx-%lx\n", node, pxm,
start, end);
- if (ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) {
+ if (ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)
update_nodes_add(node, start, end);
- /* restore nodes[node] */
- *nd = oldnode;
- if ((nd->start | nd->end) == 0)
- node_clear(node, nodes_parsed);
- }
-
- node_memblk_range[num_node_memblks].start = start;
- node_memblk_range[num_node_memblks].end = end;
- memblk_nodeid[num_node_memblks] = node;
- num_node_memblks++;
-}
-
-/* Sanity check to catch more bad SRATs (they are amazingly common).
- Make sure the PXMs cover all memory. */
-static int __init nodes_cover_memory(const struct bootnode *nodes)
-{
- int i;
- unsigned long pxmram, e820ram;
-
- pxmram = 0;
- for_each_node_mask(i, nodes_parsed) {
- unsigned long s = nodes[i].start >> PAGE_SHIFT;
- unsigned long e = nodes[i].end >> PAGE_SHIFT;
- pxmram += e - s;
- pxmram -= __absent_pages_in_range(i, s, e);
- if ((long)pxmram < 0)
- pxmram = 0;
- }
-
- e820ram = max_pfn - (memblock_x86_hole_size(0, max_pfn<<PAGE_SHIFT)>>PAGE_SHIFT);
- /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
- if ((long)(e820ram - pxmram) >= (1<<(20 - PAGE_SHIFT))) {
- printk(KERN_ERR
- "SRAT: PXMs only cover %luMB of your %luMB e820 RAM. Not used.\n",
- (pxmram << PAGE_SHIFT) >> 20,
- (e820ram << PAGE_SHIFT) >> 20);
- return 0;
- }
- return 1;
}
void __init acpi_numa_arch_fixup(void) {}
-#ifdef CONFIG_NUMA_EMU
-void __init acpi_get_nodes(struct bootnode *physnodes, unsigned long start,
- unsigned long end)
-{
- int i;
-
- for_each_node_mask(i, nodes_parsed) {
- cutoff_node(i, start, end);
- physnodes[i].start = nodes[i].start;
- physnodes[i].end = nodes[i].end;
- }
-}
-#endif /* CONFIG_NUMA_EMU */
-
-/* Use the information discovered above to actually set up the nodes. */
-int __init acpi_scan_nodes(unsigned long start, unsigned long end)
+int __init x86_acpi_numa_init(void)
{
- int i;
-
- if (acpi_numa <= 0)
- return -1;
-
- /* First clean up the node list */
- for (i = 0; i < MAX_NUMNODES; i++)
- cutoff_node(i, start, end);
-
- /*
- * Join together blocks on the same node, holes between
- * which don't overlap with memory on other nodes.
- */
- for (i = 0; i < num_node_memblks; ++i) {
- int j, k;
-
- for (j = i + 1; j < num_node_memblks; ++j) {
- unsigned long start, end;
-
- if (memblk_nodeid[i] != memblk_nodeid[j])
- continue;
- start = min(node_memblk_range[i].end,
- node_memblk_range[j].end);
- end = max(node_memblk_range[i].start,
- node_memblk_range[j].start);
- for (k = 0; k < num_node_memblks; ++k) {
- if (memblk_nodeid[i] == memblk_nodeid[k])
- continue;
- if (start < node_memblk_range[k].end &&
- end > node_memblk_range[k].start)
- break;
- }
- if (k < num_node_memblks)
- continue;
- start = min(node_memblk_range[i].start,
- node_memblk_range[j].start);
- end = max(node_memblk_range[i].end,
- node_memblk_range[j].end);
- printk(KERN_INFO "SRAT: Node %d "
- "[%Lx,%Lx) + [%Lx,%Lx) -> [%lx,%lx)\n",
- memblk_nodeid[i],
- node_memblk_range[i].start,
- node_memblk_range[i].end,
- node_memblk_range[j].start,
- node_memblk_range[j].end,
- start, end);
- node_memblk_range[i].start = start;
- node_memblk_range[i].end = end;
- k = --num_node_memblks - j;
- memmove(memblk_nodeid + j, memblk_nodeid + j+1,
- k * sizeof(*memblk_nodeid));
- memmove(node_memblk_range + j, node_memblk_range + j+1,
- k * sizeof(*node_memblk_range));
- --j;
- }
- }
-
- memnode_shift = compute_hash_shift(node_memblk_range, num_node_memblks,
- memblk_nodeid);
- if (memnode_shift < 0) {
- printk(KERN_ERR
- "SRAT: No NUMA node hash function found. Contact maintainer\n");
- bad_srat();
- return -1;
- }
-
- for (i = 0; i < num_node_memblks; i++)
- memblock_x86_register_active_regions(memblk_nodeid[i],
- node_memblk_range[i].start >> PAGE_SHIFT,
- node_memblk_range[i].end >> PAGE_SHIFT);
-
- /* for out of order entries in SRAT */
- sort_node_map();
- if (!nodes_cover_memory(nodes)) {
- bad_srat();
- return -1;
- }
+ int ret;
- /* Account for nodes with cpus and no memory */
- nodes_or(node_possible_map, nodes_parsed, cpu_nodes_parsed);
-
- /* Finally register nodes */
- for_each_node_mask(i, node_possible_map)
- setup_node_bootmem(i, nodes[i].start, nodes[i].end);
- /* Try again in case setup_node_bootmem missed one due
- to missing bootmem */
- for_each_node_mask(i, node_possible_map)
- if (!node_online(i))
- setup_node_bootmem(i, nodes[i].start, nodes[i].end);
-
- for (i = 0; i < nr_cpu_ids; i++) {
- int node = early_cpu_to_node(i);
-
- if (node == NUMA_NO_NODE)
- continue;
- if (!node_online(node))
- numa_clear_node(i);
- }
- numa_init_array();
- return 0;
-}
-
-#ifdef CONFIG_NUMA_EMU
-static int fake_node_to_pxm_map[MAX_NUMNODES] __initdata = {
- [0 ... MAX_NUMNODES-1] = PXM_INVAL
-};
-static s16 fake_apicid_to_node[MAX_LOCAL_APIC] __initdata = {
- [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
-};
-static int __init find_node_by_addr(unsigned long addr)
-{
- int ret = NUMA_NO_NODE;
- int i;
-
- for_each_node_mask(i, nodes_parsed) {
- /*
- * Find the real node that this emulated node appears on. For
- * the sake of simplicity, we only use a real node's starting
- * address to determine which emulated node it appears on.
- */
- if (addr >= nodes[i].start && addr < nodes[i].end) {
- ret = i;
- break;
- }
- }
- return ret;
+ ret = acpi_numa_init();
+ if (ret < 0)
+ return ret;
+ return srat_disabled() ? -EINVAL : 0;
}
-/*
- * In NUMA emulation, we need to setup proximity domain (_PXM) to node ID
- * mappings that respect the real ACPI topology but reflect our emulated
- * environment. For each emulated node, we find which real node it appears on
- * and create PXM to NID mappings for those fake nodes which mirror that
- * locality. SLIT will now represent the correct distances between emulated
- * nodes as a result of the real topology.
- */
-void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes)
-{
- int i, j;
-
- for (i = 0; i < num_nodes; i++) {
- int nid, pxm;
-
- nid = find_node_by_addr(fake_nodes[i].start);
- if (nid == NUMA_NO_NODE)
- continue;
- pxm = node_to_pxm(nid);
- if (pxm == PXM_INVAL)
- continue;
- fake_node_to_pxm_map[i] = pxm;
- /*
- * For each apicid_to_node mapping that exists for this real
- * node, it must now point to the fake node ID.
- */
- for (j = 0; j < MAX_LOCAL_APIC; j++)
- if (apicid_to_node[j] == nid &&
- fake_apicid_to_node[j] == NUMA_NO_NODE)
- fake_apicid_to_node[j] = i;
- }
-
- /*
- * If there are apicid-to-node mappings for physical nodes that do not
- * have a corresponding emulated node, it should default to a guaranteed
- * value.
- */
- for (i = 0; i < MAX_LOCAL_APIC; i++)
- if (apicid_to_node[i] != NUMA_NO_NODE &&
- fake_apicid_to_node[i] == NUMA_NO_NODE)
- fake_apicid_to_node[i] = 0;
-
- for (i = 0; i < num_nodes; i++)
- __acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i);
- memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node));
-
- nodes_clear(nodes_parsed);
- for (i = 0; i < num_nodes; i++)
- if (fake_nodes[i].start != fake_nodes[i].end)
- node_set(i, nodes_parsed);
-}
-
-static int null_slit_node_compare(int a, int b)
-{
- return node_to_pxm(a) == node_to_pxm(b);
-}
-#else
-static int null_slit_node_compare(int a, int b)
-{
- return a == b;
-}
-#endif /* CONFIG_NUMA_EMU */
-
-int __node_distance(int a, int b)
-{
- int index;
-
- if (!acpi_slit)
- return null_slit_node_compare(a, b) ? LOCAL_DISTANCE :
- REMOTE_DISTANCE;
- index = acpi_slit->locality_count * node_to_pxm(a);
- return acpi_slit->entry[index + node_to_pxm(b)];
-}
-
-EXPORT_SYMBOL(__node_distance);
-
#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || defined(CONFIG_ACPI_HOTPLUG_MEMORY)
int memory_add_physaddr_to_nid(u64 start)
{
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 6acc724d5d8f..55272d7c3b0b 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -179,12 +179,8 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask,
sender = this_cpu_read(tlb_vector_offset);
f = &flush_state[sender];
- /*
- * Could avoid this lock when
- * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
- * probably not worth checking this for a cache-hot lock.
- */
- raw_spin_lock(&f->tlbstate_lock);
+ if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS)
+ raw_spin_lock(&f->tlbstate_lock);
f->flush_mm = mm;
f->flush_va = va;
@@ -202,7 +198,8 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask,
f->flush_mm = NULL;
f->flush_va = 0;
- raw_spin_unlock(&f->tlbstate_lock);
+ if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS)
+ raw_spin_unlock(&f->tlbstate_lock);
}
void native_flush_tlb_others(const struct cpumask *cpumask,
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
index 9fadec074142..98ab13058f89 100644
--- a/arch/x86/oprofile/op_model_p4.c
+++ b/arch/x86/oprofile/op_model_p4.c
@@ -50,7 +50,7 @@ static inline void setup_num_counters(void)
#endif
}
-static int inline addr_increment(void)
+static inline int addr_increment(void)
{
#ifdef CONFIG_SMP
return smp_num_siblings == 2 ? 2 : 1;
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index e27dffbbb1a7..026e4931d162 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -350,7 +350,7 @@ static int __init early_fill_mp_bus_info(void)
#define ENABLE_CF8_EXT_CFG (1ULL << 46)
-static void enable_pci_io_ecs(void *unused)
+static void __cpuinit enable_pci_io_ecs(void *unused)
{
u64 reg;
rdmsrl(MSR_AMD64_NB_CFG, reg);
diff --git a/arch/x86/pci/ce4100.c b/arch/x86/pci/ce4100.c
index 85b68ef5e809..c63c6d3dd8e8 100644
--- a/arch/x86/pci/ce4100.c
+++ b/arch/x86/pci/ce4100.c
@@ -254,7 +254,7 @@ int bridge_read(unsigned int devfn, int reg, int len, u32 *value)
static int ce4100_conf_read(unsigned int seg, unsigned int bus,
unsigned int devfn, int reg, int len, u32 *value)
{
- int i, retval = 1;
+ int i;
if (bus == 1) {
for (i = 0; i < ARRAY_SIZE(bus1_fixups); i++) {
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index 87e6c8323117..8201165bae28 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -597,21 +597,18 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route
return 1;
}
- if ((device >= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN) &&
- (device <= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX)) {
+ if ((device >= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN &&
+ device <= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX)
+ || (device >= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN &&
+ device <= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX)
+ || (device >= PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN &&
+ device <= PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MAX)) {
r->name = "PIIX/ICH";
r->get = pirq_piix_get;
r->set = pirq_piix_set;
return 1;
}
- if ((device >= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN) &&
- (device <= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX)) {
- r->name = "PIIX/ICH";
- r->get = pirq_piix_get;
- r->set = pirq_piix_set;
- return 1;
- }
return 0;
}
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 25cd4a07d09f..8c4085a95ef1 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -20,7 +20,8 @@
#include <asm/xen/pci.h>
#ifdef CONFIG_ACPI
-static int xen_hvm_register_pirq(u32 gsi, int triggering)
+static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
+ int trigger, int polarity)
{
int rc, irq;
struct physdev_map_pirq map_irq;
@@ -41,7 +42,7 @@ static int xen_hvm_register_pirq(u32 gsi, int triggering)
return -1;
}
- if (triggering == ACPI_EDGE_SENSITIVE) {
+ if (trigger == ACPI_EDGE_SENSITIVE) {
shareable = 0;
name = "ioapic-edge";
} else {
@@ -55,12 +56,6 @@ static int xen_hvm_register_pirq(u32 gsi, int triggering)
return irq;
}
-
-static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
- int trigger, int polarity)
-{
- return xen_hvm_register_pirq(gsi, trigger);
-}
#endif
#if defined(CONFIG_PCI_MSI)
@@ -91,7 +86,7 @@ static void xen_msi_compose_msg(struct pci_dev *pdev, unsigned int pirq,
static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
- int irq, pirq, ret = 0;
+ int irq, pirq;
struct msi_desc *msidesc;
struct msi_msg msg;
@@ -99,39 +94,32 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
__read_msi_msg(msidesc, &msg);
pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) |
((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff);
- if (xen_irq_from_pirq(pirq) >= 0 && msg.data == XEN_PIRQ_MSI_DATA) {
- xen_allocate_pirq_msi((type == PCI_CAP_ID_MSIX) ?
- "msi-x" : "msi", &irq, &pirq, XEN_ALLOC_IRQ);
- if (irq < 0)
+ if (msg.data != XEN_PIRQ_MSI_DATA ||
+ xen_irq_from_pirq(pirq) < 0) {
+ pirq = xen_allocate_pirq_msi(dev, msidesc);
+ if (pirq < 0)
goto error;
- ret = set_irq_msi(irq, msidesc);
- if (ret < 0)
- goto error_while;
- printk(KERN_DEBUG "xen: msi already setup: msi --> irq=%d"
- " pirq=%d\n", irq, pirq);
- return 0;
+ xen_msi_compose_msg(dev, pirq, &msg);
+ __write_msi_msg(msidesc, &msg);
+ dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
+ } else {
+ dev_dbg(&dev->dev,
+ "xen: msi already bound to pirq=%d\n", pirq);
}
- xen_allocate_pirq_msi((type == PCI_CAP_ID_MSIX) ?
- "msi-x" : "msi", &irq, &pirq, (XEN_ALLOC_IRQ | XEN_ALLOC_PIRQ));
- if (irq < 0 || pirq < 0)
+ irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, 0,
+ (type == PCI_CAP_ID_MSIX) ?
+ "msi-x" : "msi");
+ if (irq < 0)
goto error;
- printk(KERN_DEBUG "xen: msi --> irq=%d, pirq=%d\n", irq, pirq);
- xen_msi_compose_msg(dev, pirq, &msg);
- ret = set_irq_msi(irq, msidesc);
- if (ret < 0)
- goto error_while;
- write_msi_msg(irq, &msg);
+ dev_dbg(&dev->dev,
+ "xen: msi --> pirq=%d --> irq=%d\n", pirq, irq);
}
return 0;
-error_while:
- unbind_from_irqhandler(irq, NULL);
error:
- if (ret == -ENODEV)
- dev_err(&dev->dev, "Xen PCI frontend has not registered" \
- " MSI/MSI-X support!\n");
-
- return ret;
+ dev_err(&dev->dev,
+ "Xen PCI frontend has not registered MSI/MSI-X support!\n");
+ return -ENODEV;
}
/*
@@ -150,35 +138,26 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
return -ENOMEM;
if (type == PCI_CAP_ID_MSIX)
- ret = xen_pci_frontend_enable_msix(dev, &v, nvec);
+ ret = xen_pci_frontend_enable_msix(dev, v, nvec);
else
- ret = xen_pci_frontend_enable_msi(dev, &v);
+ ret = xen_pci_frontend_enable_msi(dev, v);
if (ret)
goto error;
i = 0;
list_for_each_entry(msidesc, &dev->msi_list, list) {
- irq = xen_allocate_pirq(v[i], 0, /* not sharable */
- (type == PCI_CAP_ID_MSIX) ?
- "pcifront-msi-x" : "pcifront-msi");
- if (irq < 0) {
- ret = -1;
+ irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0,
+ (type == PCI_CAP_ID_MSIX) ?
+ "pcifront-msi-x" :
+ "pcifront-msi");
+ if (irq < 0)
goto free;
- }
-
- ret = set_irq_msi(irq, msidesc);
- if (ret)
- goto error_while;
i++;
}
kfree(v);
return 0;
-error_while:
- unbind_from_irqhandler(irq, NULL);
error:
- if (ret == -ENODEV)
- dev_err(&dev->dev, "Xen PCI frontend has not registered" \
- " MSI/MSI-X support!\n");
+ dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n");
free:
kfree(v);
return ret;
@@ -193,6 +172,9 @@ static void xen_teardown_msi_irqs(struct pci_dev *dev)
xen_pci_frontend_disable_msix(dev);
else
xen_pci_frontend_disable_msi(dev);
+
+ /* Free the IRQ's and the msidesc using the generic code. */
+ default_teardown_msi_irqs(dev);
}
static void xen_teardown_msi_irq(unsigned int irq)
@@ -200,47 +182,82 @@ static void xen_teardown_msi_irq(unsigned int irq)
xen_destroy_irq(irq);
}
+#ifdef CONFIG_XEN_DOM0
static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
- int irq, ret;
+ int ret = 0;
struct msi_desc *msidesc;
list_for_each_entry(msidesc, &dev->msi_list, list) {
- irq = xen_create_msi_irq(dev, msidesc, type);
- if (irq < 0)
- return -1;
+ struct physdev_map_pirq map_irq;
- ret = set_irq_msi(irq, msidesc);
- if (ret)
- goto error;
- }
- return 0;
+ memset(&map_irq, 0, sizeof(map_irq));
+ map_irq.domid = DOMID_SELF;
+ map_irq.type = MAP_PIRQ_TYPE_MSI;
+ map_irq.index = -1;
+ map_irq.pirq = -1;
+ map_irq.bus = dev->bus->number;
+ map_irq.devfn = dev->devfn;
-error:
- xen_destroy_irq(irq);
+ if (type == PCI_CAP_ID_MSIX) {
+ int pos;
+ u32 table_offset, bir;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+
+ pci_read_config_dword(dev, pos + PCI_MSIX_TABLE,
+ &table_offset);
+ bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
+
+ map_irq.table_base = pci_resource_start(dev, bir);
+ map_irq.entry_nr = msidesc->msi_attrib.entry_nr;
+ }
+
+ ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
+ if (ret) {
+ dev_warn(&dev->dev, "xen map irq failed %d\n", ret);
+ goto out;
+ }
+
+ ret = xen_bind_pirq_msi_to_irq(dev, msidesc,
+ map_irq.pirq, map_irq.index,
+ (type == PCI_CAP_ID_MSIX) ?
+ "msi-x" : "msi");
+ if (ret < 0)
+ goto out;
+ }
+ ret = 0;
+out:
return ret;
}
#endif
+#endif
static int xen_pcifront_enable_irq(struct pci_dev *dev)
{
int rc;
int share = 1;
+ u8 gsi;
- dev_info(&dev->dev, "Xen PCI enabling IRQ: %d\n", dev->irq);
-
- if (dev->irq < 0)
- return -EINVAL;
+ rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
+ if (rc < 0) {
+ dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n",
+ rc);
+ return rc;
+ }
- if (dev->irq < NR_IRQS_LEGACY)
+ if (gsi < NR_IRQS_LEGACY)
share = 0;
- rc = xen_allocate_pirq(dev->irq, share, "pcifront");
+ rc = xen_allocate_pirq(gsi, share, "pcifront");
if (rc < 0) {
- dev_warn(&dev->dev, "Xen PCI IRQ: %d, failed to register:%d\n",
- dev->irq, rc);
+ dev_warn(&dev->dev, "Xen PCI: failed to register GSI%d: %d\n",
+ gsi, rc);
return rc;
}
+
+ dev->irq = rc;
+ dev_info(&dev->dev, "Xen PCI mapped GSI%d to IRQ%d\n", gsi, dev->irq);
return 0;
}
diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c
index d2c0d51a7178..68c0dbcc95be 100644
--- a/arch/x86/platform/ce4100/ce4100.c
+++ b/arch/x86/platform/ce4100/ce4100.c
@@ -15,21 +15,19 @@
#include <linux/serial_reg.h>
#include <linux/serial_8250.h>
+#include <asm/prom.h>
#include <asm/setup.h>
+#include <asm/i8259.h>
#include <asm/io.h>
+#include <asm/io_apic.h>
static int ce4100_i8042_detect(void)
{
return 0;
}
-static void __init sdv_find_smp_config(void)
-{
-}
-
#ifdef CONFIG_SERIAL_8250
-
static unsigned int mem_serial_in(struct uart_port *p, int offset)
{
offset = offset << p->regshift;
@@ -118,6 +116,15 @@ static void __init sdv_arch_setup(void)
sdv_serial_fixup();
}
+#ifdef CONFIG_X86_IO_APIC
+static void __cpuinit sdv_pci_init(void)
+{
+ x86_of_pci_init();
+ /* We can't set this earlier, because we need to calibrate the timer */
+ legacy_pic = &null_legacy_pic;
+}
+#endif
+
/*
* CE4100 specific x86_init function overrides and early setup
* calls.
@@ -128,5 +135,10 @@ void __init x86_ce4100_early_setup(void)
x86_platform.i8042_detect = ce4100_i8042_detect;
x86_init.resources.probe_roms = x86_init_noop;
x86_init.mpparse.get_smp_config = x86_init_uint_noop;
- x86_init.mpparse.find_smp_config = sdv_find_smp_config;
+ x86_init.mpparse.find_smp_config = x86_init_noop;
+
+#ifdef CONFIG_X86_IO_APIC
+ x86_init.pci.init_irq = sdv_pci_init;
+ x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc_nocheck;
+#endif
}
diff --git a/arch/x86/platform/ce4100/falconfalls.dts b/arch/x86/platform/ce4100/falconfalls.dts
new file mode 100644
index 000000000000..dc701ea58546
--- /dev/null
+++ b/arch/x86/platform/ce4100/falconfalls.dts
@@ -0,0 +1,428 @@
+/*
+ * CE4100 on Falcon Falls
+ *
+ * (c) Copyright 2010 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License.
+ */
+/dts-v1/;
+/ {
+ model = "intel,falconfalls";
+ compatible = "intel,falconfalls";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "intel,ce4100";
+ reg = <0>;
+ lapic = <&lapic0>;
+ };
+ };
+
+ soc@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "intel,ce4100-cp";
+ ranges;
+
+ ioapic1: interrupt-controller@fec00000 {
+ #interrupt-cells = <2>;
+ compatible = "intel,ce4100-ioapic";
+ interrupt-controller;
+ reg = <0xfec00000 0x1000>;
+ };
+
+ timer@fed00000 {
+ compatible = "intel,ce4100-hpet";
+ reg = <0xfed00000 0x200>;
+ };
+
+ lapic0: interrupt-controller@fee00000 {
+ compatible = "intel,ce4100-lapic";
+ reg = <0xfee00000 0x1000>;
+ };
+
+ pci@3fc {
+ #address-cells = <3>;
+ #size-cells = <2>;
+ compatible = "intel,ce4100-pci", "pci";
+ device_type = "pci";
+ bus-range = <0 0>;
+ ranges = <0x2000000 0 0xbffff000 0xbffff000 0 0x1000
+ 0x2000000 0 0xdffe0000 0xdffe0000 0 0x1000
+ 0x0000000 0 0x0 0x0 0 0x100>;
+
+ /* Secondary IO-APIC */
+ ioapic2: interrupt-controller@0,1 {
+ #interrupt-cells = <2>;
+ compatible = "intel,ce4100-ioapic";
+ interrupt-controller;
+ reg = <0x100 0x0 0x0 0x0 0x0>;
+ assigned-addresses = <0x02000000 0x0 0xbffff000 0x0 0x1000>;
+ };
+
+ pci@1,0 {
+ #address-cells = <3>;
+ #size-cells = <2>;
+ compatible = "intel,ce4100-pci", "pci";
+ device_type = "pci";
+ bus-range = <1 1>;
+ ranges = <0x2000000 0 0xdffe0000 0x2000000 0 0xdffe0000 0 0x1000>;
+
+ interrupt-parent = <&ioapic2>;
+
+ display@2,0 {
+ compatible = "pci8086,2e5b.2",
+ "pci8086,2e5b",
+ "pciclass038000",
+ "pciclass0380";
+
+ reg = <0x11000 0x0 0x0 0x0 0x0>;
+ interrupts = <0 1>;
+ };
+
+ multimedia@3,0 {
+ compatible = "pci8086,2e5c.2",
+ "pci8086,2e5c",
+ "pciclass048000",
+ "pciclass0480";
+
+ reg = <0x11800 0x0 0x0 0x0 0x0>;
+ interrupts = <2 1>;
+ };
+
+ multimedia@4,0 {
+ compatible = "pci8086,2e5d.2",
+ "pci8086,2e5d",
+ "pciclass048000",
+ "pciclass0480";
+
+ reg = <0x12000 0x0 0x0 0x0 0x0>;
+ interrupts = <4 1>;
+ };
+
+ multimedia@4,1 {
+ compatible = "pci8086,2e5e.2",
+ "pci8086,2e5e",
+ "pciclass048000",
+ "pciclass0480";
+
+ reg = <0x12100 0x0 0x0 0x0 0x0>;
+ interrupts = <5 1>;
+ };
+
+ sound@6,0 {
+ compatible = "pci8086,2e5f.2",
+ "pci8086,2e5f",
+ "pciclass040100",
+ "pciclass0401";
+
+ reg = <0x13000 0x0 0x0 0x0 0x0>;
+ interrupts = <6 1>;
+ };
+
+ sound@6,1 {
+ compatible = "pci8086,2e5f.2",
+ "pci8086,2e5f",
+ "pciclass040100",
+ "pciclass0401";
+
+ reg = <0x13100 0x0 0x0 0x0 0x0>;
+ interrupts = <7 1>;
+ };
+
+ sound@6,2 {
+ compatible = "pci8086,2e60.2",
+ "pci8086,2e60",
+ "pciclass040100",
+ "pciclass0401";
+
+ reg = <0x13200 0x0 0x0 0x0 0x0>;
+ interrupts = <8 1>;
+ };
+
+ display@8,0 {
+ compatible = "pci8086,2e61.2",
+ "pci8086,2e61",
+ "pciclass038000",
+ "pciclass0380";
+
+ reg = <0x14000 0x0 0x0 0x0 0x0>;
+ interrupts = <9 1>;
+ };
+
+ display@8,1 {
+ compatible = "pci8086,2e62.2",
+ "pci8086,2e62",
+ "pciclass038000",
+ "pciclass0380";
+
+ reg = <0x14100 0x0 0x0 0x0 0x0>;
+ interrupts = <10 1>;
+ };
+
+ multimedia@8,2 {
+ compatible = "pci8086,2e63.2",
+ "pci8086,2e63",
+ "pciclass048000",
+ "pciclass0480";
+
+ reg = <0x14200 0x0 0x0 0x0 0x0>;
+ interrupts = <11 1>;
+ };
+
+ entertainment-encryption@9,0 {
+ compatible = "pci8086,2e64.2",
+ "pci8086,2e64",
+ "pciclass101000",
+ "pciclass1010";
+
+ reg = <0x14800 0x0 0x0 0x0 0x0>;
+ interrupts = <12 1>;
+ };
+
+ localbus@a,0 {
+ compatible = "pci8086,2e65.2",
+ "pci8086,2e65",
+ "pciclassff0000",
+ "pciclassff00";
+
+ reg = <0x15000 0x0 0x0 0x0 0x0>;
+ };
+
+ serial@b,0 {
+ compatible = "pci8086,2e66.2",
+ "pci8086,2e66",
+ "pciclass070003",
+ "pciclass0700";
+
+ reg = <0x15800 0x0 0x0 0x0 0x0>;
+ interrupts = <14 1>;
+ };
+
+ gpio@b,1 {
+ compatible = "pci8086,2e67.2",
+ "pci8086,2e67",
+ "pciclassff0000",
+ "pciclassff00";
+
+ #gpio-cells = <2>;
+ reg = <0x15900 0x0 0x0 0x0 0x0>;
+ interrupts = <15 1>;
+ gpio-controller;
+ };
+
+ i2c-controller@b,2 {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "pci8086,2e68.2",
+ "pci8086,2e68",
+ "pciclass,ff0000",
+ "pciclass,ff00";
+
+ reg = <0x15a00 0x0 0x0 0x0 0x0>;
+ interrupts = <16 1>;
+ ranges = <0 0 0x02000000 0 0xdffe0500 0x100
+ 1 0 0x02000000 0 0xdffe0600 0x100
+ 2 0 0x02000000 0 0xdffe0700 0x100>;
+
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "intel,ce4100-i2c-controller";
+ reg = <0 0 0x100>;
+ };
+
+ i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "intel,ce4100-i2c-controller";
+ reg = <1 0 0x100>;
+
+ gpio@26 {
+ #gpio-cells = <2>;
+ compatible = "ti,pcf8575";
+ reg = <0x26>;
+ gpio-controller;
+ };
+ };
+
+ i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "intel,ce4100-i2c-controller";
+ reg = <2 0 0x100>;
+
+ gpio@26 {
+ #gpio-cells = <2>;
+ compatible = "ti,pcf8575";
+ reg = <0x26>;
+ gpio-controller;
+ };
+ };
+ };
+
+ smard-card@b,3 {
+ compatible = "pci8086,2e69.2",
+ "pci8086,2e69",
+ "pciclass070500",
+ "pciclass0705";
+
+ reg = <0x15b00 0x0 0x0 0x0 0x0>;
+ interrupts = <15 1>;
+ };
+
+ spi-controller@b,4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible =
+ "pci8086,2e6a.2",
+ "pci8086,2e6a",
+ "pciclass,ff0000",
+ "pciclass,ff00";
+
+ reg = <0x15c00 0x0 0x0 0x0 0x0>;
+ interrupts = <15 1>;
+
+ dac@0 {
+ compatible = "ti,pcm1755";
+ reg = <0>;
+ spi-max-frequency = <115200>;
+ };
+
+ dac@1 {
+ compatible = "ti,pcm1609a";
+ reg = <1>;
+ spi-max-frequency = <115200>;
+ };
+
+ eeprom@2 {
+ compatible = "atmel,at93c46";
+ reg = <2>;
+ spi-max-frequency = <115200>;
+ };
+ };
+
+ multimedia@b,7 {
+ compatible = "pci8086,2e6d.2",
+ "pci8086,2e6d",
+ "pciclassff0000",
+ "pciclassff00";
+
+ reg = <0x15f00 0x0 0x0 0x0 0x0>;
+ };
+
+ ethernet@c,0 {
+ compatible = "pci8086,2e6e.2",
+ "pci8086,2e6e",
+ "pciclass020000",
+ "pciclass0200";
+
+ reg = <0x16000 0x0 0x0 0x0 0x0>;
+ interrupts = <21 1>;
+ };
+
+ clock@c,1 {
+ compatible = "pci8086,2e6f.2",
+ "pci8086,2e6f",
+ "pciclassff0000",
+ "pciclassff00";
+
+ reg = <0x16100 0x0 0x0 0x0 0x0>;
+ interrupts = <3 1>;
+ };
+
+ usb@d,0 {
+ compatible = "pci8086,2e70.2",
+ "pci8086,2e70",
+ "pciclass0c0320",
+ "pciclass0c03";
+
+ reg = <0x16800 0x0 0x0 0x0 0x0>;
+ interrupts = <22 3>;
+ };
+
+ usb@d,1 {
+ compatible = "pci8086,2e70.2",
+ "pci8086,2e70",
+ "pciclass0c0320",
+ "pciclass0c03";
+
+ reg = <0x16900 0x0 0x0 0x0 0x0>;
+ interrupts = <22 3>;
+ };
+
+ sata@e,0 {
+ compatible = "pci8086,2e71.0",
+ "pci8086,2e71",
+ "pciclass010601",
+ "pciclass0106";
+
+ reg = <0x17000 0x0 0x0 0x0 0x0>;
+ interrupts = <23 3>;
+ };
+
+ flash@f,0 {
+ compatible = "pci8086,701.1",
+ "pci8086,701",
+ "pciclass050100",
+ "pciclass0501";
+
+ reg = <0x17800 0x0 0x0 0x0 0x0>;
+ interrupts = <13 1>;
+ };
+
+ entertainment-encryption@10,0 {
+ compatible = "pci8086,702.1",
+ "pci8086,702",
+ "pciclass101000",
+ "pciclass1010";
+
+ reg = <0x18000 0x0 0x0 0x0 0x0>;
+ };
+
+ co-processor@11,0 {
+ compatible = "pci8086,703.1",
+ "pci8086,703",
+ "pciclass0b4000",
+ "pciclass0b40";
+
+ reg = <0x18800 0x0 0x0 0x0 0x0>;
+ interrupts = <1 1>;
+ };
+
+ multimedia@12,0 {
+ compatible = "pci8086,704.0",
+ "pci8086,704",
+ "pciclass048000",
+ "pciclass0480";
+
+ reg = <0x19000 0x0 0x0 0x0 0x0>;
+ };
+ };
+
+ isa@1f,0 {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "isa";
+ ranges = <1 0 0 0 0 0x100>;
+
+ rtc@70 {
+ compatible = "intel,ce4100-rtc", "motorola,mc146818";
+ interrupts = <8 3>;
+ interrupt-parent = <&ioapic1>;
+ ctrl-reg = <2>;
+ freq-reg = <0x26>;
+ reg = <1 0x70 2>;
+ };
+ };
+ };
+ };
+};
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
index ea6529e93c6f..5c0207bf959b 100644
--- a/arch/x86/platform/mrst/mrst.c
+++ b/arch/x86/platform/mrst/mrst.c
@@ -31,6 +31,7 @@
#include <asm/apic.h>
#include <asm/io_apic.h>
#include <asm/mrst.h>
+#include <asm/mrst-vrtc.h>
#include <asm/io.h>
#include <asm/i8259.h>
#include <asm/intel_scu_ipc.h>
@@ -268,6 +269,7 @@ void __init x86_mrst_early_setup(void)
x86_platform.calibrate_tsc = mrst_calibrate_tsc;
x86_platform.i8042_detect = mrst_i8042_detect;
+ x86_init.timers.wallclock_init = mrst_rtc_init;
x86_init.pci.init = pci_mrst_init;
x86_init.pci.fixup_irqs = x86_init_noop;
diff --git a/arch/x86/platform/mrst/vrtc.c b/arch/x86/platform/mrst/vrtc.c
index 32cd7edd71a0..04cf645feb92 100644
--- a/arch/x86/platform/mrst/vrtc.c
+++ b/arch/x86/platform/mrst/vrtc.c
@@ -100,22 +100,14 @@ int vrtc_set_mmss(unsigned long nowtime)
void __init mrst_rtc_init(void)
{
- unsigned long rtc_paddr;
- void __iomem *virt_base;
+ unsigned long vrtc_paddr = sfi_mrtc_array[0].phys_addr;
sfi_table_parse(SFI_SIG_MRTC, NULL, NULL, sfi_parse_mrtc);
- if (!sfi_mrtc_num)
+ if (!sfi_mrtc_num || !vrtc_paddr)
return;
- rtc_paddr = sfi_mrtc_array[0].phys_addr;
-
- /* vRTC's register address may not be page aligned */
- set_fixmap_nocache(FIX_LNW_VRTC, rtc_paddr);
-
- virt_base = (void __iomem *)__fix_to_virt(FIX_LNW_VRTC);
- virt_base += rtc_paddr & ~PAGE_MASK;
- vrtc_virt_base = virt_base;
-
+ vrtc_virt_base = (void __iomem *)set_fixmap_offset_nocache(FIX_LNW_VRTC,
+ vrtc_paddr);
x86_platform.get_wallclock = vrtc_get_time;
x86_platform.set_wallclock = vrtc_set_mmss;
}
diff --git a/arch/x86/platform/olpc/Makefile b/arch/x86/platform/olpc/Makefile
index e797428b163b..c2a8cab65e5d 100644
--- a/arch/x86/platform/olpc/Makefile
+++ b/arch/x86/platform/olpc/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_OLPC) += olpc.o
obj-$(CONFIG_OLPC_XO1) += olpc-xo1.o
-obj-$(CONFIG_OLPC_OPENFIRMWARE) += olpc_ofw.o
-obj-$(CONFIG_OLPC_OPENFIRMWARE_DT) += olpc_dt.o
+obj-$(CONFIG_OLPC) += olpc_ofw.o
+obj-$(CONFIG_OF_PROMTREE) += olpc_dt.o
diff --git a/arch/x86/platform/olpc/olpc-xo1.c b/arch/x86/platform/olpc/olpc-xo1.c
index 127775696d6c..f4155354a7b0 100644
--- a/arch/x86/platform/olpc/olpc-xo1.c
+++ b/arch/x86/platform/olpc/olpc-xo1.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
+#include <linux/mfd/core.h>
#include <asm/io.h>
#include <asm/olpc.h>
@@ -56,25 +57,24 @@ static void xo1_power_off(void)
static int __devinit olpc_xo1_probe(struct platform_device *pdev)
{
struct resource *res;
+ int err;
/* don't run on non-XOs */
if (!machine_is_olpc())
return -ENODEV;
+ err = mfd_shared_cell_enable(pdev);
+ if (err)
+ return err;
+
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!res) {
dev_err(&pdev->dev, "can't fetch device resource info\n");
return -EIO;
}
-
- if (!request_region(res->start, resource_size(res), DRV_NAME)) {
- dev_err(&pdev->dev, "can't request region\n");
- return -EIO;
- }
-
- if (strcmp(pdev->name, "cs5535-pms") == 0)
+ if (strcmp(pdev->name, "olpc-xo1-pms") == 0)
pms_base = res->start;
- else if (strcmp(pdev->name, "cs5535-acpi") == 0)
+ else if (strcmp(pdev->name, "olpc-xo1-ac-acpi") == 0)
acpi_base = res->start;
/* If we have both addresses, we can override the poweroff hook */
@@ -88,14 +88,11 @@ static int __devinit olpc_xo1_probe(struct platform_device *pdev)
static int __devexit olpc_xo1_remove(struct platform_device *pdev)
{
- struct resource *r;
-
- r = platform_get_resource(pdev, IORESOURCE_IO, 0);
- release_region(r->start, resource_size(r));
+ mfd_shared_cell_disable(pdev);
- if (strcmp(pdev->name, "cs5535-pms") == 0)
+ if (strcmp(pdev->name, "olpc-xo1-pms") == 0)
pms_base = 0;
- else if (strcmp(pdev->name, "cs5535-acpi") == 0)
+ else if (strcmp(pdev->name, "olpc-xo1-acpi") == 0)
acpi_base = 0;
pm_power_off = NULL;
@@ -104,7 +101,7 @@ static int __devexit olpc_xo1_remove(struct platform_device *pdev)
static struct platform_driver cs5535_pms_drv = {
.driver = {
- .name = "cs5535-pms",
+ .name = "olpc-xo1-pms",
.owner = THIS_MODULE,
},
.probe = olpc_xo1_probe,
@@ -113,7 +110,7 @@ static struct platform_driver cs5535_pms_drv = {
static struct platform_driver cs5535_acpi_drv = {
.driver = {
- .name = "cs5535-acpi",
+ .name = "olpc-xo1-acpi",
.owner = THIS_MODULE,
},
.probe = olpc_xo1_probe,
@@ -124,26 +121,27 @@ static int __init olpc_xo1_init(void)
{
int r;
- r = platform_driver_register(&cs5535_pms_drv);
+ r = mfd_shared_platform_driver_register(&cs5535_pms_drv, "cs5535-pms");
if (r)
return r;
- r = platform_driver_register(&cs5535_acpi_drv);
+ r = mfd_shared_platform_driver_register(&cs5535_acpi_drv,
+ "cs5535-acpi");
if (r)
- platform_driver_unregister(&cs5535_pms_drv);
+ mfd_shared_platform_driver_unregister(&cs5535_pms_drv);
return r;
}
static void __exit olpc_xo1_exit(void)
{
- platform_driver_unregister(&cs5535_acpi_drv);
- platform_driver_unregister(&cs5535_pms_drv);
+ mfd_shared_platform_driver_unregister(&cs5535_acpi_drv);
+ mfd_shared_platform_driver_unregister(&cs5535_pms_drv);
}
MODULE_AUTHOR("Daniel Drake <dsd@laptop.org>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:olpc-xo1");
+MODULE_ALIAS("platform:cs5535-pms");
module_init(olpc_xo1_init);
module_exit(olpc_xo1_exit);
diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
index dab874647530..044bda5b3174 100644
--- a/arch/x86/platform/olpc/olpc_dt.c
+++ b/arch/x86/platform/olpc/olpc_dt.c
@@ -140,8 +140,7 @@ void * __init prom_early_alloc(unsigned long size)
* wasted bootmem) and hand off chunks of it to callers.
*/
res = alloc_bootmem(chunk_size);
- if (!res)
- return NULL;
+ BUG_ON(!res);
prom_early_allocated += chunk_size;
memset(res, 0, chunk_size);
free_mem = chunk_size;
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index 5b54892e4bc3..1c7121ba18ff 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -38,7 +38,7 @@ config XEN_MAX_DOMAIN_MEMORY
config XEN_SAVE_RESTORE
bool
- depends on XEN && PM
+ depends on XEN
default y
config XEN_DEBUG_FS
@@ -48,3 +48,11 @@ config XEN_DEBUG_FS
help
Enable statistics output and various tuning options in debugfs.
Enabling this option may incur a significant performance overhead.
+
+config XEN_DEBUG
+ bool "Enable Xen debug checks"
+ depends on XEN
+ default n
+ help
+ Enable various WARN_ON checks in the Xen MMU code.
+ Enabling this option WILL incur a significant performance overhead.
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 50542efe45fb..49dbd78ec3cb 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1284,15 +1284,14 @@ static int init_hvm_pv_info(int *major, int *minor)
xen_setup_features();
- pv_info = xen_info;
- pv_info.kernel_rpl = 0;
+ pv_info.name = "Xen HVM";
xen_domain_type = XEN_HVM_DOMAIN;
return 0;
}
-void xen_hvm_init_shared_info(void)
+void __ref xen_hvm_init_shared_info(void)
{
int cpu;
struct xen_add_to_physmap xatp;
@@ -1331,6 +1330,8 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
switch (action) {
case CPU_UP_PREPARE:
per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
+ if (xen_have_vector_callback)
+ xen_init_lock_cpu(cpu);
break;
default:
break;
@@ -1355,6 +1356,7 @@ static void __init xen_hvm_guest_init(void)
if (xen_feature(XENFEAT_hvm_callback_vector))
xen_have_vector_callback = 1;
+ xen_hvm_smp_init();
register_cpu_notifier(&xen_hvm_cpu_notifier);
xen_unplug_emulated_devices();
have_vcpu_info_placement = 0;
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 5e92b61ad574..2bcecdd03037 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -46,6 +46,7 @@
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
+#include <linux/seq_file.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
@@ -416,8 +417,12 @@ static pteval_t pte_pfn_to_mfn(pteval_t val)
if (val & _PAGE_PRESENT) {
unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
pteval_t flags = val & PTE_FLAGS_MASK;
- unsigned long mfn = pfn_to_mfn(pfn);
+ unsigned long mfn;
+ if (!xen_feature(XENFEAT_auto_translated_physmap))
+ mfn = get_phys_to_machine(pfn);
+ else
+ mfn = pfn;
/*
* If there's no mfn for the pfn, then just create an
* empty non-present pte. Unfortunately this loses
@@ -427,8 +432,18 @@ static pteval_t pte_pfn_to_mfn(pteval_t val)
if (unlikely(mfn == INVALID_P2M_ENTRY)) {
mfn = 0;
flags = 0;
+ } else {
+ /*
+ * Paramount to do this test _after_ the
+ * INVALID_P2M_ENTRY as INVALID_P2M_ENTRY &
+ * IDENTITY_FRAME_BIT resolves to true.
+ */
+ mfn &= ~FOREIGN_FRAME_BIT;
+ if (mfn & IDENTITY_FRAME_BIT) {
+ mfn &= ~IDENTITY_FRAME_BIT;
+ flags |= _PAGE_IOMAP;
+ }
}
-
val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
}
@@ -532,6 +547,41 @@ pte_t xen_make_pte(pteval_t pte)
}
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
+#ifdef CONFIG_XEN_DEBUG
+pte_t xen_make_pte_debug(pteval_t pte)
+{
+ phys_addr_t addr = (pte & PTE_PFN_MASK);
+ phys_addr_t other_addr;
+ bool io_page = false;
+ pte_t _pte;
+
+ if (pte & _PAGE_IOMAP)
+ io_page = true;
+
+ _pte = xen_make_pte(pte);
+
+ if (!addr)
+ return _pte;
+
+ if (io_page &&
+ (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
+ other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT;
+ WARN(addr != other_addr,
+ "0x%lx is using VM_IO, but it is 0x%lx!\n",
+ (unsigned long)addr, (unsigned long)other_addr);
+ } else {
+ pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP;
+ other_addr = (_pte.pte & PTE_PFN_MASK);
+ WARN((addr == other_addr) && (!io_page) && (!iomap_set),
+ "0x%lx is missing VM_IO (and wasn't fixed)!\n",
+ (unsigned long)addr);
+ }
+
+ return _pte;
+}
+PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug);
+#endif
+
pgd_t xen_make_pgd(pgdval_t pgd)
{
pgd = pte_pfn_to_mfn(pgd);
@@ -1443,7 +1493,7 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
* early_ioremap fixmap slot, make sure it is RO.
*/
if (!is_early_ioremap_ptep(ptep) &&
- pfn >= e820_table_start && pfn < e820_table_end)
+ pfn >= pgt_buf_start && pfn < pgt_buf_end)
pte = pte_wrprotect(pte);
return pte;
@@ -1942,6 +1992,9 @@ __init void xen_ident_map_ISA(void)
static __init void xen_post_allocator_init(void)
{
+#ifdef CONFIG_XEN_DEBUG
+ pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug);
+#endif
pv_mmu_ops.set_pte = xen_set_pte;
pv_mmu_ops.set_pmd = xen_set_pmd;
pv_mmu_ops.set_pud = xen_set_pud;
@@ -2074,7 +2127,7 @@ static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
in_frames[i] = virt_to_mfn(vaddr);
MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
- set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
+ __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
if (out_frames)
out_frames[i] = virt_to_pfn(vaddr);
@@ -2353,6 +2406,18 @@ EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
#ifdef CONFIG_XEN_DEBUG_FS
+static int p2m_dump_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, p2m_dump_show, NULL);
+}
+
+static const struct file_operations p2m_dump_fops = {
+ .open = p2m_dump_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static struct dentry *d_mmu_debug;
static int __init xen_mmu_debugfs(void)
@@ -2408,6 +2473,7 @@ static int __init xen_mmu_debugfs(void)
debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug,
&mmu_stats.prot_commit_batched);
+ debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops);
return 0;
}
fs_initcall(xen_mmu_debugfs);
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index fd12d7ce7ff9..c1ebcda60cc6 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -30,6 +30,7 @@
#include <linux/list.h>
#include <linux/hash.h>
#include <linux/sched.h>
+#include <linux/seq_file.h>
#include <asm/cache.h>
#include <asm/setup.h>
@@ -59,9 +60,15 @@ static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE);
static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE);
static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long, p2m_identity, P2M_PER_PAGE);
+
RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
+/* We might hit two boundary violations at the start and end, at max each
+ * boundary violation will require three middle nodes. */
+RESERVE_BRK(p2m_mid_identity, PAGE_SIZE * 2 * 3);
+
static inline unsigned p2m_top_index(unsigned long pfn)
{
BUG_ON(pfn >= MAX_P2M_PFN);
@@ -136,7 +143,7 @@ static void p2m_init(unsigned long *p2m)
* - After resume we're called from within stop_machine, but the mfn
* tree should alreay be completely allocated.
*/
-void xen_build_mfn_list_list(void)
+void __ref xen_build_mfn_list_list(void)
{
unsigned long pfn;
@@ -221,6 +228,9 @@ void __init xen_build_dynamic_phys_to_machine(void)
p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE);
p2m_top_init(p2m_top);
+ p2m_identity = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ p2m_init(p2m_identity);
+
/*
* The domain builder gives us a pre-constructed p2m array in
* mfn_list for all the pages initially given to us, so we just
@@ -266,6 +276,14 @@ unsigned long get_phys_to_machine(unsigned long pfn)
mididx = p2m_mid_index(pfn);
idx = p2m_index(pfn);
+ /*
+ * The INVALID_P2M_ENTRY is filled in both p2m_*identity
+ * and in p2m_*missing, so returning the INVALID_P2M_ENTRY
+ * would be wrong.
+ */
+ if (p2m_top[topidx][mididx] == p2m_identity)
+ return IDENTITY_FRAME(pfn);
+
return p2m_top[topidx][mididx][idx];
}
EXPORT_SYMBOL_GPL(get_phys_to_machine);
@@ -335,9 +353,11 @@ static bool alloc_p2m(unsigned long pfn)
p2m_top_mfn_p[topidx] = mid_mfn;
}
- if (p2m_top[topidx][mididx] == p2m_missing) {
+ if (p2m_top[topidx][mididx] == p2m_identity ||
+ p2m_top[topidx][mididx] == p2m_missing) {
/* p2m leaf page is missing */
unsigned long *p2m;
+ unsigned long *p2m_orig = p2m_top[topidx][mididx];
p2m = alloc_p2m_page();
if (!p2m)
@@ -345,7 +365,7 @@ static bool alloc_p2m(unsigned long pfn)
p2m_init(p2m);
- if (cmpxchg(&mid[mididx], p2m_missing, p2m) != p2m_missing)
+ if (cmpxchg(&mid[mididx], p2m_orig, p2m) != p2m_orig)
free_p2m_page(p2m);
else
mid_mfn[mididx] = virt_to_mfn(p2m);
@@ -354,11 +374,91 @@ static bool alloc_p2m(unsigned long pfn)
return true;
}
+bool __early_alloc_p2m(unsigned long pfn)
+{
+ unsigned topidx, mididx, idx;
+
+ topidx = p2m_top_index(pfn);
+ mididx = p2m_mid_index(pfn);
+ idx = p2m_index(pfn);
+
+ /* Pfff.. No boundary cross-over, lets get out. */
+ if (!idx)
+ return false;
+
+ WARN(p2m_top[topidx][mididx] == p2m_identity,
+ "P2M[%d][%d] == IDENTITY, should be MISSING (or alloced)!\n",
+ topidx, mididx);
+
+ /*
+ * Could be done by xen_build_dynamic_phys_to_machine..
+ */
+ if (p2m_top[topidx][mididx] != p2m_missing)
+ return false;
+
+ /* Boundary cross-over for the edges: */
+ if (idx) {
+ unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
+
+ p2m_init(p2m);
+
+ p2m_top[topidx][mididx] = p2m;
+
+ }
+ return idx != 0;
+}
+unsigned long set_phys_range_identity(unsigned long pfn_s,
+ unsigned long pfn_e)
+{
+ unsigned long pfn;
+
+ if (unlikely(pfn_s >= MAX_P2M_PFN || pfn_e >= MAX_P2M_PFN))
+ return 0;
+
+ if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
+ return pfn_e - pfn_s;
+
+ if (pfn_s > pfn_e)
+ return 0;
+
+ for (pfn = (pfn_s & ~(P2M_MID_PER_PAGE * P2M_PER_PAGE - 1));
+ pfn < ALIGN(pfn_e, (P2M_MID_PER_PAGE * P2M_PER_PAGE));
+ pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE)
+ {
+ unsigned topidx = p2m_top_index(pfn);
+ if (p2m_top[topidx] == p2m_mid_missing) {
+ unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
+
+ p2m_mid_init(mid);
+
+ p2m_top[topidx] = mid;
+ }
+ }
+
+ __early_alloc_p2m(pfn_s);
+ __early_alloc_p2m(pfn_e);
+
+ for (pfn = pfn_s; pfn < pfn_e; pfn++)
+ if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn)))
+ break;
+
+ if (!WARN((pfn - pfn_s) != (pfn_e - pfn_s),
+ "Identity mapping failed. We are %ld short of 1-1 mappings!\n",
+ (pfn_e - pfn_s) - (pfn - pfn_s)))
+ printk(KERN_DEBUG "1-1 mapping on %lx->%lx\n", pfn_s, pfn);
+
+ return pfn - pfn_s;
+}
+
/* Try to install p2m mapping; fail if intermediate bits missing */
bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
unsigned topidx, mididx, idx;
+ if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
+ BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
+ return true;
+ }
if (unlikely(pfn >= MAX_P2M_PFN)) {
BUG_ON(mfn != INVALID_P2M_ENTRY);
return true;
@@ -368,6 +468,21 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
mididx = p2m_mid_index(pfn);
idx = p2m_index(pfn);
+ /* For sparse holes were the p2m leaf has real PFN along with
+ * PCI holes, stick in the PFN as the MFN value.
+ */
+ if (mfn != INVALID_P2M_ENTRY && (mfn & IDENTITY_FRAME_BIT)) {
+ if (p2m_top[topidx][mididx] == p2m_identity)
+ return true;
+
+ /* Swap over from MISSING to IDENTITY if needed. */
+ if (p2m_top[topidx][mididx] == p2m_missing) {
+ WARN_ON(cmpxchg(&p2m_top[topidx][mididx], p2m_missing,
+ p2m_identity) != p2m_missing);
+ return true;
+ }
+ }
+
if (p2m_top[topidx][mididx] == p2m_missing)
return mfn == INVALID_P2M_ENTRY;
@@ -378,11 +493,6 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
- if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
- BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
- return true;
- }
-
if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
if (!alloc_p2m(pfn))
return false;
@@ -421,7 +531,7 @@ int m2p_add_override(unsigned long mfn, struct page *page)
{
unsigned long flags;
unsigned long pfn;
- unsigned long address;
+ unsigned long uninitialized_var(address);
unsigned level;
pte_t *ptep = NULL;
@@ -455,7 +565,7 @@ int m2p_remove_override(struct page *page)
unsigned long flags;
unsigned long mfn;
unsigned long pfn;
- unsigned long address;
+ unsigned long uninitialized_var(address);
unsigned level;
pte_t *ptep = NULL;
@@ -520,3 +630,80 @@ unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn)
return ret;
}
EXPORT_SYMBOL_GPL(m2p_find_override_pfn);
+
+#ifdef CONFIG_XEN_DEBUG_FS
+
+int p2m_dump_show(struct seq_file *m, void *v)
+{
+ static const char * const level_name[] = { "top", "middle",
+ "entry", "abnormal" };
+ static const char * const type_name[] = { "identity", "missing",
+ "pfn", "abnormal"};
+#define TYPE_IDENTITY 0
+#define TYPE_MISSING 1
+#define TYPE_PFN 2
+#define TYPE_UNKNOWN 3
+ unsigned long pfn, prev_pfn_type = 0, prev_pfn_level = 0;
+ unsigned int uninitialized_var(prev_level);
+ unsigned int uninitialized_var(prev_type);
+
+ if (!p2m_top)
+ return 0;
+
+ for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn++) {
+ unsigned topidx = p2m_top_index(pfn);
+ unsigned mididx = p2m_mid_index(pfn);
+ unsigned idx = p2m_index(pfn);
+ unsigned lvl, type;
+
+ lvl = 4;
+ type = TYPE_UNKNOWN;
+ if (p2m_top[topidx] == p2m_mid_missing) {
+ lvl = 0; type = TYPE_MISSING;
+ } else if (p2m_top[topidx] == NULL) {
+ lvl = 0; type = TYPE_UNKNOWN;
+ } else if (p2m_top[topidx][mididx] == NULL) {
+ lvl = 1; type = TYPE_UNKNOWN;
+ } else if (p2m_top[topidx][mididx] == p2m_identity) {
+ lvl = 1; type = TYPE_IDENTITY;
+ } else if (p2m_top[topidx][mididx] == p2m_missing) {
+ lvl = 1; type = TYPE_MISSING;
+ } else if (p2m_top[topidx][mididx][idx] == 0) {
+ lvl = 2; type = TYPE_UNKNOWN;
+ } else if (p2m_top[topidx][mididx][idx] == IDENTITY_FRAME(pfn)) {
+ lvl = 2; type = TYPE_IDENTITY;
+ } else if (p2m_top[topidx][mididx][idx] == INVALID_P2M_ENTRY) {
+ lvl = 2; type = TYPE_MISSING;
+ } else if (p2m_top[topidx][mididx][idx] == pfn) {
+ lvl = 2; type = TYPE_PFN;
+ } else if (p2m_top[topidx][mididx][idx] != pfn) {
+ lvl = 2; type = TYPE_PFN;
+ }
+ if (pfn == 0) {
+ prev_level = lvl;
+ prev_type = type;
+ }
+ if (pfn == MAX_DOMAIN_PAGES-1) {
+ lvl = 3;
+ type = TYPE_UNKNOWN;
+ }
+ if (prev_type != type) {
+ seq_printf(m, " [0x%lx->0x%lx] %s\n",
+ prev_pfn_type, pfn, type_name[prev_type]);
+ prev_pfn_type = pfn;
+ prev_type = type;
+ }
+ if (prev_level != lvl) {
+ seq_printf(m, " [0x%lx->0x%lx] level %s\n",
+ prev_pfn_level, pfn, level_name[prev_level]);
+ prev_pfn_level = pfn;
+ prev_level = lvl;
+ }
+ }
+ return 0;
+#undef TYPE_IDENTITY
+#undef TYPE_MISSING
+#undef TYPE_PFN
+#undef TYPE_UNKNOWN
+}
+#endif
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index a8a66a50d446..edeaff2a2983 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -52,6 +52,8 @@ phys_addr_t xen_extra_mem_start, xen_extra_mem_size;
static __init void xen_add_extra_mem(unsigned long pages)
{
+ unsigned long pfn;
+
u64 size = (u64)pages * PAGE_SIZE;
u64 extra_start = xen_extra_mem_start + xen_extra_mem_size;
@@ -66,6 +68,9 @@ static __init void xen_add_extra_mem(unsigned long pages)
xen_extra_mem_size += size;
xen_max_p2m_pfn = PFN_DOWN(extra_start + size);
+
+ for (pfn = PFN_DOWN(extra_start); pfn <= xen_max_p2m_pfn; pfn++)
+ __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
}
static unsigned long __init xen_release_chunk(phys_addr_t start_addr,
@@ -104,7 +109,7 @@ static unsigned long __init xen_release_chunk(phys_addr_t start_addr,
WARN(ret != 1, "Failed to release memory %lx-%lx err=%d\n",
start, end, ret);
if (ret == 1) {
- set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
+ __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
len++;
}
}
@@ -138,12 +143,55 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn,
return released;
}
+static unsigned long __init xen_set_identity(const struct e820entry *list,
+ ssize_t map_size)
+{
+ phys_addr_t last = xen_initial_domain() ? 0 : ISA_END_ADDRESS;
+ phys_addr_t start_pci = last;
+ const struct e820entry *entry;
+ unsigned long identity = 0;
+ int i;
+
+ for (i = 0, entry = list; i < map_size; i++, entry++) {
+ phys_addr_t start = entry->addr;
+ phys_addr_t end = start + entry->size;
+
+ if (start < last)
+ start = last;
+
+ if (end <= start)
+ continue;
+
+ /* Skip over the 1MB region. */
+ if (last > end)
+ continue;
+
+ if (entry->type == E820_RAM) {
+ if (start > start_pci)
+ identity += set_phys_range_identity(
+ PFN_UP(start_pci), PFN_DOWN(start));
+
+ /* Without saving 'last' we would gooble RAM too
+ * at the end of the loop. */
+ last = end;
+ start_pci = end;
+ continue;
+ }
+ start_pci = min(start, start_pci);
+ last = end;
+ }
+ if (last > start_pci)
+ identity += set_phys_range_identity(
+ PFN_UP(start_pci), PFN_DOWN(last));
+ return identity;
+}
/**
* machine_specific_memory_setup - Hook for machine specific memory setup.
**/
char * __init xen_memory_setup(void)
{
static struct e820entry map[E820MAX] __initdata;
+ static struct e820entry map_raw[E820MAX] __initdata;
unsigned long max_pfn = xen_start_info->nr_pages;
unsigned long long mem_end;
@@ -151,6 +199,7 @@ char * __init xen_memory_setup(void)
struct xen_memory_map memmap;
unsigned long extra_pages = 0;
unsigned long extra_limit;
+ unsigned long identity_pages = 0;
int i;
int op;
@@ -176,6 +225,7 @@ char * __init xen_memory_setup(void)
}
BUG_ON(rc);
+ memcpy(map_raw, map, sizeof(map));
e820.nr_map = 0;
xen_extra_mem_start = mem_end;
for (i = 0; i < memmap.nr_entries; i++) {
@@ -194,6 +244,14 @@ char * __init xen_memory_setup(void)
end -= delta;
extra_pages += PFN_DOWN(delta);
+ /*
+ * Set RAM below 4GB that is not for us to be unusable.
+ * This prevents "System RAM" address space from being
+ * used as potential resource for I/O address (happens
+ * when 'allocate_resource' is called).
+ */
+ if (delta && end < 0x100000000UL)
+ e820_add_region(end, delta, E820_UNUSABLE);
}
if (map[i].size > 0 && end > xen_extra_mem_start)
@@ -251,6 +309,13 @@ char * __init xen_memory_setup(void)
xen_add_extra_mem(extra_pages);
+ /*
+ * Set P2M for all non-RAM pages and E820 gaps to be identity
+ * type PFNs. We supply it with the non-sanitized version
+ * of the E820.
+ */
+ identity_pages = xen_set_identity(map_raw, memmap.nr_entries);
+ printk(KERN_INFO "Set %ld page(s) to 1-1 mapping.\n", identity_pages);
return "Xen";
}
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 72a4c7959045..30612441ed99 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -509,3 +509,41 @@ void __init xen_smp_init(void)
xen_fill_possible_map();
xen_init_spinlocks();
}
+
+static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
+{
+ native_smp_prepare_cpus(max_cpus);
+ WARN_ON(xen_smp_intr_init(0));
+
+ if (!xen_have_vector_callback)
+ return;
+ xen_init_lock_cpu(0);
+ xen_init_spinlocks();
+}
+
+static int __cpuinit xen_hvm_cpu_up(unsigned int cpu)
+{
+ int rc;
+ rc = native_cpu_up(cpu);
+ WARN_ON (xen_smp_intr_init(cpu));
+ return rc;
+}
+
+static void xen_hvm_cpu_die(unsigned int cpu)
+{
+ unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
+ unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
+ unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
+ unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
+ native_cpu_die(cpu);
+}
+
+void __init xen_hvm_smp_init(void)
+{
+ smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
+ smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
+ smp_ops.cpu_up = xen_hvm_cpu_up;
+ smp_ops.cpu_die = xen_hvm_cpu_die;
+ smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
+ smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
+}
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index 9bbd63a129b5..45329c8c226e 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -12,7 +12,7 @@
#include "xen-ops.h"
#include "mmu.h"
-void xen_pre_suspend(void)
+void xen_arch_pre_suspend(void)
{
xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
xen_start_info->console.domU.mfn =
@@ -26,8 +26,9 @@ void xen_pre_suspend(void)
BUG();
}
-void xen_hvm_post_suspend(int suspend_cancelled)
+void xen_arch_hvm_post_suspend(int suspend_cancelled)
{
+#ifdef CONFIG_XEN_PVHVM
int cpu;
xen_hvm_init_shared_info();
xen_callback_vector();
@@ -37,9 +38,10 @@ void xen_hvm_post_suspend(int suspend_cancelled)
xen_setup_runstate_info(cpu);
}
}
+#endif
}
-void xen_post_suspend(int suspend_cancelled)
+void xen_arch_post_suspend(int suspend_cancelled)
{
xen_build_mfn_list_list();
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 067759e3d6a5..2e2d370a47b1 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -397,7 +397,9 @@ void xen_setup_timer(int cpu)
name = "<timer kasprintf failed>";
irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
- IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER,
+ IRQF_DISABLED|IRQF_PERCPU|
+ IRQF_NOBALANCING|IRQF_TIMER|
+ IRQF_FORCE_RESUME,
name, NULL);
evt = &per_cpu(xen_clock_events, cpu);
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
index 1a5ff24e29c0..aaa7291c9259 100644
--- a/arch/x86/xen/xen-head.S
+++ b/arch/x86/xen/xen-head.S
@@ -28,9 +28,9 @@ ENTRY(startup_xen)
__FINIT
.pushsection .text
- .align PAGE_SIZE_asm
+ .align PAGE_SIZE
ENTRY(hypercall_page)
- .skip PAGE_SIZE_asm
+ .skip PAGE_SIZE
.popsection
ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux")
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 9d41bf985757..3112f55638c4 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -64,10 +64,12 @@ void xen_setup_vcpu_info_placement(void);
#ifdef CONFIG_SMP
void xen_smp_init(void);
+void __init xen_hvm_smp_init(void);
extern cpumask_var_t xen_cpu_initialized_map;
#else
static inline void xen_smp_init(void) {}
+static inline void xen_hvm_smp_init(void) {}
#endif
#ifdef CONFIG_PARAVIRT_SPINLOCKS
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index d373d159e75e..f2d948a77ac0 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -76,10 +76,10 @@ config XTENSA_VARIANT_S6000
endchoice
config XTENSA_UNALIGNED_USER
- bool "Unaligned memory access in use space"
+ bool "Unaligned memory access in user space"
help
- The Xtensa architecture currently does not handle unaligned
- memory accesses in hardware but through an exception handler.
+ Xtensa processors are often not configured to handle unaligned
+ memory accesses in hardware, but rather through an exception handler.
Per default, unaligned memory accesses are disabled in user space.
Say Y here to enable unaligned memory access in user space.
@@ -149,8 +149,15 @@ config XTENSA_PLATFORM_ISS
config XTENSA_PLATFORM_XT2000
bool "XT2000"
help
- XT2000 is the name of Tensilica's feature-rich emulation platform.
- This hardware is capable of running a full Linux distribution.
+ XT2000 is the name of Tensilica's older emulation platform.
+
+config XTENSA_PLATFORM_XTAVNET
+ bool "XTAVNET"
+ select XTENSA_CALIBRATE_CCOUNT
+ select ETHOC
+ help
+ Selects support for the Tensilica-configured Avnet emulation boards.
+ These include the LX60 (XT-AV60), LX200 (XT-AV200), and LX110 (XT-AV110).
config XTENSA_PLATFORM_S6105
bool "S6105"
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
index 7608559de93a..34d8427622eb 100644
--- a/arch/xtensa/Makefile
+++ b/arch/xtensa/Makefile
@@ -24,6 +24,7 @@ export VARIANT
# Platform configuration
platform-$(CONFIG_XTENSA_PLATFORM_XT2000) := xt2000
+platform-$(CONFIG_XTENSA_PLATFORM_XTAVNET) := xtavnet
platform-$(CONFIG_XTENSA_PLATFORM_ISS) := iss
platform-$(CONFIG_XTENSA_PLATFORM_S6105) := s6105
@@ -61,7 +62,9 @@ ifneq ($(VARIANT),)
ifneq ($(COMPILE_ARCH), xtensa)
ifndef CROSS_COMPILE
- CROSS_COMPILE = xtensa_$(VARIANT)-
+ CROSS_COMPILE := $(call cc-cross-prefix, xtensa_$(VARIANT)- \
+ xtensa-linux-uclibc- xtensa_$(VARIANT)-linux-uclibc- \
+ xtensa-linux-gnu- xtensa_$(VARIANT)-linux-gnu-)
endif
endif
endif
diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile
index 40aa55b485be..8657c62fc78b 100644
--- a/arch/xtensa/boot/Makefile
+++ b/arch/xtensa/boot/Makefile
@@ -23,6 +23,7 @@ subdir-y := lib
bootdir-$(CONFIG_XTENSA_PLATFORM_ISS) += boot-elf
bootdir-$(CONFIG_XTENSA_PLATFORM_XT2000) += boot-redboot boot-elf
+bootdir-$(CONFIG_XTENSA_PLATFORM_XTAVNET) += boot-redboot boot-elf
zImage zImage.initrd Image Image.initrd: $(bootdir-y)
diff --git a/arch/xtensa/configs/s6105_defconfig b/arch/xtensa/configs/s6105_defconfig
index 095cd8084164..42b7feba71b7 100644
--- a/arch/xtensa/configs/s6105_defconfig
+++ b/arch/xtensa/configs/s6105_defconfig
@@ -598,7 +598,7 @@ CONFIG_DEBUG_NOMMU_REGIONS=y
# CONFIG_CONTEXT_SWITCH_TRACER is not set
# CONFIG_BOOT_TRACER is not set
# CONFIG_TRACE_BRANCH_PROFILING is not set
-# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
+# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
#
diff --git a/arch/xtensa/include/asm/coprocessor.h b/arch/xtensa/include/asm/coprocessor.h
index 75c94a1658b0..42da613d1623 100644
--- a/arch/xtensa/include/asm/coprocessor.h
+++ b/arch/xtensa/include/asm/coprocessor.h
@@ -1,11 +1,11 @@
/*
- * include/asm-xtensa/coprocessor.h
+ * arch/xtensa/include/asm/coprocessor.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2003 - 2007 Tensilica Inc.
+ * Copyright (C) 2003-2010 Tensilica Inc.
*/
@@ -15,9 +15,10 @@
#include <linux/stringify.h>
#include <variant/core.h>
#include <variant/tie.h>
+#include <variant/core.h>
#include <asm/types.h>
-#ifdef __ASSEMBLY__
+#if defined(__ASSEMBLY__) && !defined(LINKER_SCRIPT)
# include <variant/tie-asm.h>
.macro xchal_sa_start a b
@@ -70,7 +71,7 @@
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLY__ && !LINKER_SCRIPT */
/*
* XTENSA_HAVE_COPROCESSOR(x) returns 1 if coprocessor x is configured.
diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h
index d04cd3a625fa..145db5cc6191 100644
--- a/arch/xtensa/include/asm/io.h
+++ b/arch/xtensa/include/asm/io.h
@@ -1,11 +1,11 @@
/*
- * include/asm-xtensa/io.h
+ * arch/xtensa/include/asm/io.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001-2010 Tensilica Inc.
*/
#ifndef _XTENSA_IO_H
@@ -63,40 +63,33 @@ static inline void * phys_to_virt(unsigned long address)
#define bus_to_virt(x) phys_to_virt(x)
/*
- * Return the virtual (cached) address for the specified bus memory.
+ * Return the virtual (uncached) address for the specified bus memory
+ * (which is, for now, simply a physical address).
* Note that we currently don't support any address outside the KIO segment.
+ * See also arch/mips/include/asm/io.h for nice comments.
*/
-static inline void *ioremap(unsigned long offset, unsigned long size)
+static inline void __iomem *__ioremap(unsigned long offset, unsigned long size)
{
#ifdef CONFIG_MMU
if (offset >= XCHAL_KIO_PADDR
- && offset < XCHAL_KIO_PADDR + XCHAL_KIO_SIZE)
- return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR);
+ && offset <= XCHAL_KIO_PADDR + XCHAL_KIO_SIZE - 1)
+ return (void __iomem *)(offset - XCHAL_KIO_PADDR + XCHAL_KIO_BYPASS_VADDR);
else
BUG();
#else
- return (void *)offset;
+ return (void __iomem *)offset;
#endif
}
-static inline void *ioremap_nocache(unsigned long offset, unsigned long size)
-{
-#ifdef CONFIG_MMU
- if (offset >= XCHAL_KIO_PADDR
- && offset < XCHAL_KIO_PADDR + XCHAL_KIO_SIZE)
- return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR);
- else
- BUG();
-#else
- return (void *)offset;
-#endif
-}
+#define ioremap(offset, size) __ioremap(offset, size)
+#define ioremap_nocache(offset, size) __ioremap(offset, size)
-static inline void iounmap(void *addr)
+static inline void iounmap(void __iomem *addr)
{
}
+
/*
* Generic I/O
*/
@@ -191,6 +184,13 @@ extern void outsl (unsigned long port, const void *src, unsigned long count);
#endif
+#ifndef CONFIG_GENERIC_IOMAP
+/* Partial Simple MMIO */
+#define ioread32(a) __raw_readl(a)
+#define iowrite32(v,a) __raw_writel((v),(a))
+#endif
+
+
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem access
*/
diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h
index 4c0ccc9c4f4c..6af436f4429d 100644
--- a/arch/xtensa/include/asm/irq.h
+++ b/arch/xtensa/include/asm/irq.h
@@ -1,11 +1,11 @@
/*
- * include/asm-xtensa/irq.h
+ * arch/xtensa/include/asm/irq.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001-2010 Tensilica Inc.
*/
#ifndef _XTENSA_IRQ_H
@@ -22,6 +22,9 @@ static inline void variant_irq_enable(unsigned int irq) { }
static inline void variant_irq_disable(unsigned int irq) { }
#endif
+/* This number is used when no interrupt has been assigned. */
+#define NO_IRQ (-1)
+
#ifndef VARIANT_NR_IRQS
# define VARIANT_NR_IRQS 0
#endif
diff --git a/arch/xtensa/include/asm/rwsem.h b/arch/xtensa/include/asm/rwsem.h
index e39edf5c86f2..249619e7e7f2 100644
--- a/arch/xtensa/include/asm/rwsem.h
+++ b/arch/xtensa/include/asm/rwsem.h
@@ -17,44 +17,12 @@
#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
#endif
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <asm/atomic.h>
-#include <asm/system.h>
-
-/*
- * the semaphore definition
- */
-struct rw_semaphore {
- signed long count;
#define RWSEM_UNLOCKED_VALUE 0x00000000
#define RWSEM_ACTIVE_BIAS 0x00000001
#define RWSEM_ACTIVE_MASK 0x0000ffff
#define RWSEM_WAITING_BIAS (-0x00010000)
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
- spinlock_t wait_lock;
- struct list_head wait_list;
-};
-
-#define __RWSEM_INITIALIZER(name) \
- { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
- LIST_HEAD_INIT((name).wait_list) }
-
-#define DECLARE_RWSEM(name) \
- struct rw_semaphore name = __RWSEM_INITIALIZER(name)
-
-extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
-
-static inline void init_rwsem(struct rw_semaphore *sem)
-{
- sem->count = RWSEM_UNLOCKED_VALUE;
- spin_lock_init(&sem->wait_lock);
- INIT_LIST_HEAD(&sem->wait_list);
-}
/*
* lock for reading
@@ -160,9 +128,4 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
return atomic_add_return(delta, (atomic_t *)(&sem->count));
}
-static inline int rwsem_is_locked(struct rw_semaphore *sem)
-{
- return (sem->count != 0);
-}
-
#endif /* _XTENSA_RWSEM_H */
diff --git a/arch/xtensa/include/asm/serial.h b/arch/xtensa/include/asm/serial.h
index a8a2493260f6..c55a0e2b47ca 100644
--- a/arch/xtensa/include/asm/serial.h
+++ b/arch/xtensa/include/asm/serial.h
@@ -1,5 +1,5 @@
/*
- * include/asm-xtensa/serial.h
+ * arch/xtensa/include/asm/serial.h
*
* Configuration details for 8250, 16450, 16550, etc. serial ports
*
@@ -7,12 +7,20 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001-2010 Tensilica Inc.
*/
#ifndef _XTENSA_SERIAL_H
#define _XTENSA_SERIAL_H
+#include <asm/irq.h>
#include <platform/serial.h>
+/* The 8250 driver treats IRQ 0 as absent. For the Xtensa architecture,
+ interrupt 0 is valid, must compare against NO_IRQ instead. */
+#ifdef is_real_interrupt
+#undef is_real_interrupt
+#define is_real_interrupt(irq) ((irq) != NO_IRQ)
+#endif
+
#endif /* _XTENSA_SERIAL_H */
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
index 2d2728b3e862..e426a4ec7f22 100644
--- a/arch/xtensa/kernel/Makefile
+++ b/arch/xtensa/kernel/Makefile
@@ -27,8 +27,8 @@ sed-y = -e 's/\*(\(\.[a-z]*it\|\.ref\|\)\.text)/*(\1.literal \1.text)/g' \
-e 's/\*(\(\.text\.[a-z]*\))/*(\1.literal \1)/g'
quiet_cmd__cpp_lds_S = LDS $@
- cmd__cpp_lds_S = $(CPP) $(cpp_flags) -P -C -Uxtensa -D__ASSEMBLY__ $< \
- | sed $(sed-y) >$@
+ cmd__cpp_lds_S = $(CPP) $(cpp_flags) -P -C -Uxtensa -D__ASSEMBLY__ \
+ -DLINKER_SCRIPT $< | sed $(sed-y) >$@
$(obj)/vmlinux.lds: $(src)/vmlinux.lds.S FORCE
$(call if_changed_dep,_cpp_lds_S)
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index 19df764f6399..db18807bb9e7 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -31,7 +31,7 @@ unsigned long ccount_per_jiffy; /* per 1/HZ */
unsigned long nsec_per_ccount; /* nsec per ccount increment */
#endif
-static cycle_t ccount_read(void)
+static cycle_t ccount_read(struct clocksource *cs)
{
return (cycle_t)get_ccount();
}
@@ -96,16 +96,12 @@ again:
update_process_times(user_mode(get_irq_regs()));
#endif
- write_seqlock(&xtime_lock);
-
- do_timer(1); /* Linux handler in kernel/timer.c */
+ xtime_update(1); /* Linux handler in kernel/time/timekeeping */
/* Note that writing CCOMPARE clears the interrupt. */
next += CCOUNT_PER_JIFFY;
set_linux_timer(next);
-
- write_sequnlock(&xtime_lock);
}
/* Allow platform to do something useful (Wdog). */
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index 9b526154c9ba..a2820065927e 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -155,7 +155,7 @@ SECTIONS
INIT_RAM_FS
}
- PERCPU(PAGE_SIZE)
+ PERCPU(XCHAL_ICACHE_LINESIZE, PAGE_SIZE)
/* We need this dummy segment here */
diff --git a/arch/xtensa/platforms/xtavnet/Makefile b/arch/xtensa/platforms/xtavnet/Makefile
new file mode 100644
index 000000000000..06b120b40015
--- /dev/null
+++ b/arch/xtensa/platforms/xtavnet/Makefile
@@ -0,0 +1,10 @@
+# Makefile for the Tensilica Avnet-based Emulation Boards
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definitions are in the main makefile...
+
+obj-y = setup.o lcd.o
+
diff --git a/arch/xtensa/platforms/xtavnet/include/platform/hardware.h b/arch/xtensa/platforms/xtavnet/include/platform/hardware.h
new file mode 100644
index 000000000000..5301be745113
--- /dev/null
+++ b/arch/xtensa/platforms/xtavnet/include/platform/hardware.h
@@ -0,0 +1,85 @@
+/*
+ * arch/xtensa/platforms/xtavnet/include/platform/hardware.h
+ *
+ * This file contains the hardware configuration of Tensilica Avnet boards
+ * (XT-AV60, XT-AV110, XT-AV200, derived from Avnet LX60, LX110, LX200
+ * boards respectively).
+ *
+ * Copyright (C) 2006-2010 Tensilica Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef __XTAVNET_HARDWARE_H
+#define __XTAVNET_HARDWARE_H
+
+#include <variant/core.h>
+
+/* Memory configuration. */
+
+#define PLATFORM_DEFAULT_MEM_START 0x00000000
+#define PLATFORM_DEFAULT_MEM_SIZE 0x04000000
+
+/* Interrupt configuration. */
+
+#define PLATFORM_NR_IRQS 2
+
+/*
+ * Default assignment of XTAVnet devices to external interrupts.
+ *
+ * CONFIG_ARCH_HAS_SMP means the hardware supports SMP, ie. is Xtensa MX.
+ *
+ * Systems with SMP support (MX) have an External Interrupt Distributor
+ * between external interrupts and the core's interrupts. The first three
+ * core interrupts are used for IPI (interprocessor interrupts), so
+ * external (board device) interrupts end up shifted up by 3.
+ */
+
+/* UART interrupt: */
+#ifdef CONFIG_ARCH_HAS_SMP
+#define UART_INTNUM XCHAL_EXTINT3_NUM
+#else
+#define UART_INTNUM XCHAL_EXTINT0_NUM
+#endif
+
+/* Ethernet interrupt: */
+#ifdef CONFIG_ARCH_HAS_SMP
+#define OETH_IRQ XCHAL_EXTINT4_NUM
+#else
+#define OETH_IRQ XCHAL_EXTINT1_NUM
+#endif
+
+/*
+ * Device addresses and parameters.
+ */
+
+/* UART */
+#define UART_PADDR 0xFD050020
+
+/* LCD instruction and data virt. addresses. */
+#define LCD_INSTR_ADDR (char*)(0xFD040000)
+#define LCD_DATA_ADDR (char*)(0xFD040004)
+
+/* Misc. */
+#define XTAVNET_FPGAREGS_VADDR 0xFD020000
+/* Clock frequency in Hz (read-only): */
+#define XTAVNET_CLKFRQ_VADDR (XTAVNET_FPGAREGS_VADDR + 0x04)
+/* Setting of 8 DIP switches: */
+#define DIP_SWITCHES_VADDR (XTAVNET_FPGAREGS_VADDR + 0x0C)
+/* Software reset (write 0xdead): */
+#define XTAVNET_SWRST_VADDR (XTAVNET_FPGAREGS_VADDR + 0x10)
+
+/* OpenCores Ethernet controller: */
+#define OETH_REGS_PADDR IOADDR(0xD030000) /* regs + RX/TX descriptors */
+#define OETH_REGS_VADDR 0xFD030000
+#define OETH_REGS_SIZE 0x1000
+#define OETH_SRAMBUFF_PADDR 0xFD800000
+#define OETH_SRAMBUFF_SIZE (5*0x600 + 5*0x600) /* 5*rx buffs + 5*tx buffs */
+/*#define OETH_SRAMBUFF_SIZE 0x3C00*/ /* probably 0x4000 ? */
+/* The MAC address for these boards is 00:50:c2:13:6f:xx.
+ The last byte (here as zero) is read from the DIP switches on the board. */
+#define OETH_MACADDR 0x00, 0x50, 0xc2, 0x13, 0x6f, 0
+
+#endif /* __XTAVNET_HARDWARE_H */
diff --git a/arch/xtensa/platforms/xtavnet/include/platform/lcd.h b/arch/xtensa/platforms/xtavnet/include/platform/lcd.h
new file mode 100644
index 000000000000..67de96acb2f9
--- /dev/null
+++ b/arch/xtensa/platforms/xtavnet/include/platform/lcd.h
@@ -0,0 +1,22 @@
+/*
+ * arch/xtensa/platforms/xtavnet/include/platform/lcd.h
+ *
+ * Copyright (C) 2001-2006 Tensilica Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef __XTAVNET_LCD_H
+#define __XTAVNET_LCD_H
+
+/* Display string STR at position POS on the LCD. */
+void lcd_disp_at_pos(char *str, unsigned char pos);
+
+/* Shift the contents of the LCD display left or right. */
+void lcd_shiftleft(void);
+void lcd_shiftright(void);
+
+#endif
+
diff --git a/arch/xtensa/platforms/xtavnet/include/platform/serial.h b/arch/xtensa/platforms/xtavnet/include/platform/serial.h
new file mode 100644
index 000000000000..a0cb0caff152
--- /dev/null
+++ b/arch/xtensa/platforms/xtavnet/include/platform/serial.h
@@ -0,0 +1 @@
+#include <asm-generic/serial.h>
diff --git a/arch/xtensa/platforms/xtavnet/lcd.c b/arch/xtensa/platforms/xtavnet/lcd.c
new file mode 100644
index 000000000000..283986f158ad
--- /dev/null
+++ b/arch/xtensa/platforms/xtavnet/lcd.c
@@ -0,0 +1,79 @@
+/*
+ * Driver for the LCD display on the Tensilica LX60 Board.
+ * This code has no effect on the LX200 board. (LX110: TBD)
+ *
+ * Copyright (C) 2001-2006 Tensilica Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+/*
+ *
+ * FIXME: this code is from the examples from the LX60 user guide.
+ *
+ * The lcd_pause function does busy waiting, which is probably not
+ * great. Maybe the code could be changed to use kernel timers, or
+ * change the hardware to not need to wait.
+ */
+
+#include <linux/init.h>
+
+#include <platform/hardware.h>
+#include <asm/processor.h>
+#include <platform/lcd.h>
+#include <linux/delay.h>
+
+#define LCD_PAUSE_ITERATIONS 4000
+#define LCD_CLEAR 0x1
+#define LCD_DISPLAY_ON 0xc
+
+/* 8bit and 2 lines display */
+#define LCD_DISPLAY_MODE8BIT 0x38
+#define LCD_DISPLAY_POS 0x80
+#define LCD_SHIFT_LEFT 0x18
+#define LCD_SHIFT_RIGHT 0x1c
+
+static int __init lcd_init(void)
+{
+ *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
+ mdelay(5);
+ *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
+ udelay(200);
+ *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
+ udelay(50);
+ *LCD_INSTR_ADDR = LCD_DISPLAY_ON;
+ udelay(50);
+ *LCD_INSTR_ADDR = LCD_CLEAR;
+ mdelay(10);
+ lcd_disp_at_pos("XTENSA LINUX", 0);
+
+ return 0;
+}
+
+void lcd_disp_at_pos (char *str, unsigned char pos)
+{
+ *LCD_INSTR_ADDR = LCD_DISPLAY_POS | pos;
+ udelay(100);
+ while (*str != 0){
+ *LCD_DATA_ADDR = *str;
+ udelay(200);
+ str++;
+ }
+}
+
+void lcd_shiftleft(void)
+{
+ *LCD_INSTR_ADDR = LCD_SHIFT_LEFT;
+ udelay(50);
+}
+
+void lcd_shiftright(void)
+{
+ *LCD_INSTR_ADDR = LCD_SHIFT_RIGHT;
+ udelay(50);
+}
+
+arch_initcall(lcd_init);
+
diff --git a/arch/xtensa/platforms/xtavnet/setup.c b/arch/xtensa/platforms/xtavnet/setup.c
new file mode 100644
index 000000000000..256fcb2b2af4
--- /dev/null
+++ b/arch/xtensa/platforms/xtavnet/setup.c
@@ -0,0 +1,269 @@
+/*
+ * arch/xtensa/platform-xtavnet/setup.c
+ *
+ * Setup/initialization for Tensilica Avnet boards (XT-AV60, XT-AV110, XT-AV200,
+ * derived from Avnet LX60, LX110, LX200 boards respectively).
+ * For details, see "Tensilica Avnet LX### (XT-AV###) Board User's Guide"
+ * (where ### is 60, 110, or 200).
+ *
+ * Authors: Chris Zankel <chris@zankel.net>
+ * Joe Taylor <joe@tensilica.com>
+ * Pete Delaney <piet@tensilica.com>
+ * Marc Gauthier <marc@tensilica.com> <marc@alumni.uwaterloo.ca>
+ *
+ * Copyright 2001-2010 Tensilica Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License, version 2. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ */
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/reboot.h>
+#include <linux/kdev_t.h>
+#include <linux/types.h>
+#include <linux/major.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/stringify.h>
+#include <linux/platform_device.h>
+#include <linux/serial_8250.h>
+#include <asm/timex.h>
+
+#include <linux/etherdevice.h>
+#include <net/ethoc.h>
+
+#include <asm/processor.h>
+#include <asm/platform.h>
+#include <asm/bootparam.h>
+#include <platform/lcd.h>
+#include <platform/hardware.h>
+#include <variant/core.h>
+
+/* For doing extra init. beyond what the ethoc driver does. */
+struct oeth_regs {
+ unsigned moder; /* Mode Register */
+ unsigned int_src; /* Interrupt Source Register */
+ unsigned int_mask; /* Interrupt Mask Register */
+ unsigned ipgt; /* Back to Bak Inter Packet Gap Register */
+ unsigned ipgr1; /* Non Back to Back Inter Packet Gap Register 1 */
+ unsigned ipgr2; /* Non Back to Back Inter Packet Gap Register 2 */
+ unsigned packet_len; /* Packet Length Register (min. and max.) */
+ unsigned collconf; /* Collision and Retry Configuration Register */
+ unsigned tx_bd_num; /* Transmit Buffer Descriptor Number Register */
+ unsigned ctrlmoder; /* Control Module Mode Register */
+ unsigned miimoder; /* MII Mode Register */
+ unsigned miicommand; /* MII Command Register */
+ unsigned miiaddress; /* MII Address Register */
+ unsigned miitx_data; /* MII Transmit Data Register */
+ unsigned miirx_data; /* MII Receive Data Register */
+ unsigned miistatus; /* MII Status Register */
+ unsigned mac_addr0; /* MAC Individual Address Register 0 */
+ unsigned mac_addr1; /* MAC Individual Address Register 1 */
+ unsigned hash_addr0; /* Hash Register 0 */
+ unsigned hash_addr1; /* Hash Register 1 */
+};
+/* MODER Register */
+#define OETH_MODER_RST 0x00000800 /* Reset MAC */
+/* MII Mode Register */
+#define OETH_MIIMODER_CLKDIV 0x000000FF /* Clock Divider */
+
+
+
+void platform_halt(void)
+{
+ /* Just display HALT on LCD display, and loop. */
+ lcd_disp_at_pos(" HALT ", 0);
+ local_irq_disable();
+ while (1);
+}
+
+void platform_power_off(void)
+{
+ /* No software-controlled power-off, just display POWEROFF and loop. */
+ lcd_disp_at_pos ("POWEROFF", 0);
+ local_irq_disable();
+ while (1);
+}
+
+void platform_restart(void)
+{
+ /* Software-initiated board reset. */
+ *(volatile unsigned *)XTAVNET_SWRST_VADDR = 0xdead;
+}
+
+void platform_heartbeat(void)
+{
+ /* Executes every timer tick. */
+}
+
+
+/*
+ * Called from time_init(). "Calibrating" is a misnomer, here we just read
+ * the clock rate from the board specific FPGA registers.
+ */
+void platform_calibrate_ccount(void)
+{
+ long clk_freq = *(long *)XTAVNET_CLKFRQ_VADDR;
+
+ ccount_per_jiffy = clk_freq / HZ;
+ nsec_per_ccount = 1000000000UL / clk_freq;
+}
+
+
+/*----------------------------------------------------------------------------
+ * Ethernet -- OpenCores Ethernet MAC (ethoc driver)
+ */
+
+static struct resource ethoc_res[] = {
+ [0] = { /* register space */
+ .start = OETH_REGS_PADDR,
+ .end = OETH_REGS_PADDR + OETH_REGS_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = { /* buffer space */
+ .start = OETH_SRAMBUFF_PADDR,
+ .end = OETH_SRAMBUFF_PADDR + OETH_SRAMBUFF_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [2] = { /* IRQ number */
+ .start = OETH_IRQ,
+ .end = OETH_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct ethoc_platform_data ethoc_pdata = {
+ .hwaddr = { OETH_MACADDR }, /* last byte written in setup below */
+ .phy_id = -1,
+};
+
+static struct platform_device ethoc_device = {
+ .name = "ethoc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(ethoc_res),
+ .resource = ethoc_res,
+ .dev = {
+ .platform_data = &ethoc_pdata,
+ },
+};
+
+
+/*----------------------------------------------------------------------------
+ * UART
+ */
+
+static struct resource serial_resource = {
+ .start = UART_PADDR,
+ .end = UART_PADDR + 0x1f,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct plat_serial8250_port serial_platform_data[] = {
+ [0] = {
+ .mapbase = UART_PADDR,
+ .irq = UART_INTNUM,
+ .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP,
+ .iotype = UPIO_MEM32,
+ .regshift = 2,
+ .uartclk = 0, /* set in xtavnet_init() */
+ },
+ { },
+};
+
+static struct platform_device xtavnet_uart = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = serial_platform_data,
+ },
+ .num_resources = 1,
+ .resource = &serial_resource,
+};
+
+
+/*----------------------------------------------------------------------------
+ */
+
+/* platform devices */
+static struct platform_device *platform_devices[] = {
+ &ethoc_device,
+ &xtavnet_uart,
+};
+
+
+
+/* very early init */
+void __init platform_setup(char **cmdline)
+{
+ if (cmdline) {
+ if (cmdline[0])
+ printk("XTAVnet: platform_setup(cmdline[0]:'%s')\n", cmdline[0]);
+ else
+ printk("XTAVnet: platform_setup(cmdline[0]:<null>)\n");
+ }
+}
+
+/* early initialization, before secondary cpu's have been brought up */
+
+void platform_init(bp_tag_t *bootparams)
+{
+ printk("\n");
+ if( bootparams )
+ printk("XTAVnet: platform_init(bootparams:0x%x)\n", (unsigned)bootparams);
+}
+
+static int xtavnet_init(void)
+{
+ volatile struct oeth_regs *regs = (volatile struct oeth_regs*)OETH_REGS_VADDR;
+
+ /*
+ * Do some of the initialization missing in the ETHOC driver.
+ * (Perhaps not all necessary, but was in a previously used driver.)
+ */
+
+ /* Reset the controller. */
+ regs->moder = OETH_MODER_RST; /* Reset ON */
+ regs->moder &= ~OETH_MODER_RST; /* Reset OFF */
+
+ regs->packet_len = (64 << 16) | 1536;
+ regs->ipgr1 = 0x0000000c;
+ regs->ipgr2 = 0x00000012;
+ regs->collconf = 0x000f003f;
+ regs->ctrlmoder = 0;
+ regs->miimoder = (OETH_MIIMODER_CLKDIV & 0x2);
+
+ /*
+ * Setup dynamic info in platform device init structures.
+ */
+
+ /* Ethernet MAC address. */
+ ethoc_pdata.hwaddr[5] = *(u32*)DIP_SWITCHES_VADDR;
+
+ /* Clock rate varies among FPGA bitstreams; board specific FPGA register
+ * reports the actual clock rate. */
+ serial_platform_data[0].uartclk = *(long *)XTAVNET_CLKFRQ_VADDR;
+
+
+ /* register platform devices */
+ platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
+
+ /* ETHOC driver is a bit quiet; at least display Ethernet MAC, so user
+ knows whether they set it correctly on the DIP switches. */
+ printk("XTAVnet: Ethernet MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+ ethoc_pdata.hwaddr[0], ethoc_pdata.hwaddr[1], ethoc_pdata.hwaddr[2],
+ ethoc_pdata.hwaddr[3], ethoc_pdata.hwaddr[4], ethoc_pdata.hwaddr[5]);
+
+ return 0;
+}
+
+
+/*
+ * Register to be done during do_initcalls().
+ */
+arch_initcall(xtavnet_init);
+
+
diff --git a/block/blk-core.c b/block/blk-core.c
index 2f4002f79a24..27b9741ea293 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -149,39 +149,29 @@ EXPORT_SYMBOL(blk_rq_init);
static void req_bio_endio(struct request *rq, struct bio *bio,
unsigned int nbytes, int error)
{
- struct request_queue *q = rq->q;
-
- if (&q->flush_rq != rq) {
- if (error)
- clear_bit(BIO_UPTODATE, &bio->bi_flags);
- else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
- error = -EIO;
-
- if (unlikely(nbytes > bio->bi_size)) {
- printk(KERN_ERR "%s: want %u bytes done, %u left\n",
- __func__, nbytes, bio->bi_size);
- nbytes = bio->bi_size;
- }
+ if (error)
+ clear_bit(BIO_UPTODATE, &bio->bi_flags);
+ else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+ error = -EIO;
+
+ if (unlikely(nbytes > bio->bi_size)) {
+ printk(KERN_ERR "%s: want %u bytes done, %u left\n",
+ __func__, nbytes, bio->bi_size);
+ nbytes = bio->bi_size;
+ }
- if (unlikely(rq->cmd_flags & REQ_QUIET))
- set_bit(BIO_QUIET, &bio->bi_flags);
+ if (unlikely(rq->cmd_flags & REQ_QUIET))
+ set_bit(BIO_QUIET, &bio->bi_flags);
- bio->bi_size -= nbytes;
- bio->bi_sector += (nbytes >> 9);
+ bio->bi_size -= nbytes;
+ bio->bi_sector += (nbytes >> 9);
- if (bio_integrity(bio))
- bio_integrity_advance(bio, nbytes);
+ if (bio_integrity(bio))
+ bio_integrity_advance(bio, nbytes);
- if (bio->bi_size == 0)
- bio_endio(bio, error);
- } else {
- /*
- * Okay, this is the sequenced flush request in
- * progress, just record the error;
- */
- if (error && !q->flush_err)
- q->flush_err = error;
- }
+ /* don't actually finish bio if it's part of flush sequence */
+ if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
+ bio_endio(bio, error);
}
void blk_dump_rq_flags(struct request *rq, char *msg)
@@ -540,7 +530,9 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
init_timer(&q->unplug_timer);
setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
INIT_LIST_HEAD(&q->timeout_list);
- INIT_LIST_HEAD(&q->pending_flushes);
+ INIT_LIST_HEAD(&q->flush_queue[0]);
+ INIT_LIST_HEAD(&q->flush_queue[1]);
+ INIT_LIST_HEAD(&q->flush_data_in_flight);
INIT_WORK(&q->unplug_work, blk_unplug_work);
kobject_init(&q->kobj, &blk_queue_ktype);
@@ -761,6 +753,25 @@ static void freed_request(struct request_queue *q, int sync, int priv)
}
/*
+ * Determine if elevator data should be initialized when allocating the
+ * request associated with @bio.
+ */
+static bool blk_rq_should_init_elevator(struct bio *bio)
+{
+ if (!bio)
+ return true;
+
+ /*
+ * Flush requests do not use the elevator so skip initialization.
+ * This allows a request to share the flush and elevator data.
+ */
+ if (bio->bi_rw & (REQ_FLUSH | REQ_FUA))
+ return false;
+
+ return true;
+}
+
+/*
* Get a free request, queue_lock must be held.
* Returns NULL on failure, with queue_lock held.
* Returns !NULL on success, with queue_lock *not held*.
@@ -772,7 +783,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
struct request_list *rl = &q->rq;
struct io_context *ioc = NULL;
const bool is_sync = rw_is_sync(rw_flags) != 0;
- int may_queue, priv;
+ int may_queue, priv = 0;
may_queue = elv_may_queue(q, rw_flags);
if (may_queue == ELV_MQUEUE_NO)
@@ -816,9 +827,11 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
rl->count[is_sync]++;
rl->starved[is_sync] = 0;
- priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
- if (priv)
- rl->elvpriv++;
+ if (blk_rq_should_init_elevator(bio)) {
+ priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
+ if (priv)
+ rl->elvpriv++;
+ }
if (blk_queue_io_stat(q))
rw_flags |= REQ_IO_STAT;
@@ -1219,7 +1232,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
spin_lock_irq(q->queue_lock);
if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
- where = ELEVATOR_INSERT_FRONT;
+ where = ELEVATOR_INSERT_FLUSH;
goto get_rq;
}
@@ -1804,7 +1817,7 @@ static void blk_account_io_done(struct request *req)
* normal IO on queueing nor completion. Accounting the
* containing request is enough.
*/
- if (blk_do_io_stat(req) && req != &req->q->flush_rq) {
+ if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) {
unsigned long duration = jiffies - req->start_time;
const int rw = rq_data_dir(req);
struct hd_struct *part;
@@ -2044,9 +2057,26 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
if (error && req->cmd_type == REQ_TYPE_FS &&
!(req->cmd_flags & REQ_QUIET)) {
- printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
- req->rq_disk ? req->rq_disk->disk_name : "?",
- (unsigned long long)blk_rq_pos(req));
+ char *error_type;
+
+ switch (error) {
+ case -ENOLINK:
+ error_type = "recoverable transport";
+ break;
+ case -EREMOTEIO:
+ error_type = "critical target";
+ break;
+ case -EBADE:
+ error_type = "critical nexus";
+ break;
+ case -EIO:
+ default:
+ error_type = "I/O";
+ break;
+ }
+ printk(KERN_ERR "end_request: %s error, dev %s, sector %llu\n",
+ error_type, req->rq_disk ? req->rq_disk->disk_name : "?",
+ (unsigned long long)blk_rq_pos(req));
}
blk_account_io_completion(req, nr_bytes);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 54b123d6563e..a867e3f524f3 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -1,6 +1,69 @@
/*
* Functions to sequence FLUSH and FUA writes.
+ *
+ * Copyright (C) 2011 Max Planck Institute for Gravitational Physics
+ * Copyright (C) 2011 Tejun Heo <tj@kernel.org>
+ *
+ * This file is released under the GPLv2.
+ *
+ * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three
+ * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
+ * properties and hardware capability.
+ *
+ * If a request doesn't have data, only REQ_FLUSH makes sense, which
+ * indicates a simple flush request. If there is data, REQ_FLUSH indicates
+ * that the device cache should be flushed before the data is executed, and
+ * REQ_FUA means that the data must be on non-volatile media on request
+ * completion.
+ *
+ * If the device doesn't have writeback cache, FLUSH and FUA don't make any
+ * difference. The requests are either completed immediately if there's no
+ * data or executed as normal requests otherwise.
+ *
+ * If the device has writeback cache and supports FUA, REQ_FLUSH is
+ * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
+ *
+ * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is
+ * translated to PREFLUSH and REQ_FUA to POSTFLUSH.
+ *
+ * The actual execution of flush is double buffered. Whenever a request
+ * needs to execute PRE or POSTFLUSH, it queues at
+ * q->flush_queue[q->flush_pending_idx]. Once certain criteria are met, a
+ * flush is issued and the pending_idx is toggled. When the flush
+ * completes, all the requests which were pending are proceeded to the next
+ * step. This allows arbitrary merging of different types of FLUSH/FUA
+ * requests.
+ *
+ * Currently, the following conditions are used to determine when to issue
+ * flush.
+ *
+ * C1. At any given time, only one flush shall be in progress. This makes
+ * double buffering sufficient.
+ *
+ * C2. Flush is deferred if any request is executing DATA of its sequence.
+ * This avoids issuing separate POSTFLUSHes for requests which shared
+ * PREFLUSH.
+ *
+ * C3. The second condition is ignored if there is a request which has
+ * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid
+ * starvation in the unlikely case where there are continuous stream of
+ * FUA (without FLUSH) requests.
+ *
+ * For devices which support FUA, it isn't clear whether C2 (and thus C3)
+ * is beneficial.
+ *
+ * Note that a sequenced FLUSH/FUA request with DATA is completed twice.
+ * Once while executing DATA and again after the whole sequence is
+ * complete. The first completion updates the contained bio but doesn't
+ * finish it so that the bio submitter is notified only after the whole
+ * sequence is complete. This is implemented by testing REQ_FLUSH_SEQ in
+ * req_bio_endio().
+ *
+ * The above peculiarity requires that each FLUSH/FUA request has only one
+ * bio attached to it, which is guaranteed as they aren't allowed to be
+ * merged in the usual way.
*/
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
@@ -11,184 +74,290 @@
/* FLUSH/FUA sequences */
enum {
- QUEUE_FSEQ_STARTED = (1 << 0), /* flushing in progress */
- QUEUE_FSEQ_PREFLUSH = (1 << 1), /* pre-flushing in progress */
- QUEUE_FSEQ_DATA = (1 << 2), /* data write in progress */
- QUEUE_FSEQ_POSTFLUSH = (1 << 3), /* post-flushing in progress */
- QUEUE_FSEQ_DONE = (1 << 4),
+ REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */
+ REQ_FSEQ_DATA = (1 << 1), /* data write in progress */
+ REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */
+ REQ_FSEQ_DONE = (1 << 3),
+
+ REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
+ REQ_FSEQ_POSTFLUSH,
+
+ /*
+ * If flush has been pending longer than the following timeout,
+ * it's issued even if flush_data requests are still in flight.
+ */
+ FLUSH_PENDING_TIMEOUT = 5 * HZ,
};
-static struct request *queue_next_fseq(struct request_queue *q);
+static bool blk_kick_flush(struct request_queue *q);
-unsigned blk_flush_cur_seq(struct request_queue *q)
+static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
{
- if (!q->flush_seq)
- return 0;
- return 1 << ffz(q->flush_seq);
+ unsigned int policy = 0;
+
+ if (fflags & REQ_FLUSH) {
+ if (rq->cmd_flags & REQ_FLUSH)
+ policy |= REQ_FSEQ_PREFLUSH;
+ if (blk_rq_sectors(rq))
+ policy |= REQ_FSEQ_DATA;
+ if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
+ policy |= REQ_FSEQ_POSTFLUSH;
+ }
+ return policy;
}
-static struct request *blk_flush_complete_seq(struct request_queue *q,
- unsigned seq, int error)
+static unsigned int blk_flush_cur_seq(struct request *rq)
{
- struct request *next_rq = NULL;
-
- if (error && !q->flush_err)
- q->flush_err = error;
-
- BUG_ON(q->flush_seq & seq);
- q->flush_seq |= seq;
-
- if (blk_flush_cur_seq(q) != QUEUE_FSEQ_DONE) {
- /* not complete yet, queue the next flush sequence */
- next_rq = queue_next_fseq(q);
- } else {
- /* complete this flush request */
- __blk_end_request_all(q->orig_flush_rq, q->flush_err);
- q->orig_flush_rq = NULL;
- q->flush_seq = 0;
-
- /* dispatch the next flush if there's one */
- if (!list_empty(&q->pending_flushes)) {
- next_rq = list_entry_rq(q->pending_flushes.next);
- list_move(&next_rq->queuelist, &q->queue_head);
- }
- }
- return next_rq;
+ return 1 << ffz(rq->flush.seq);
}
-static void blk_flush_complete_seq_end_io(struct request_queue *q,
- unsigned seq, int error)
+static void blk_flush_restore_request(struct request *rq)
{
- bool was_empty = elv_queue_empty(q);
- struct request *next_rq;
-
- next_rq = blk_flush_complete_seq(q, seq, error);
-
/*
- * Moving a request silently to empty queue_head may stall the
- * queue. Kick the queue in those cases.
+ * After flush data completion, @rq->bio is %NULL but we need to
+ * complete the bio again. @rq->biotail is guaranteed to equal the
+ * original @rq->bio. Restore it.
*/
- if (was_empty && next_rq)
- __blk_run_queue(q);
+ rq->bio = rq->biotail;
+
+ /* make @rq a normal request */
+ rq->cmd_flags &= ~REQ_FLUSH_SEQ;
+ rq->end_io = NULL;
}
-static void pre_flush_end_io(struct request *rq, int error)
+/**
+ * blk_flush_complete_seq - complete flush sequence
+ * @rq: FLUSH/FUA request being sequenced
+ * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
+ * @error: whether an error occurred
+ *
+ * @rq just completed @seq part of its flush sequence, record the
+ * completion and trigger the next step.
+ *
+ * CONTEXT:
+ * spin_lock_irq(q->queue_lock)
+ *
+ * RETURNS:
+ * %true if requests were added to the dispatch queue, %false otherwise.
+ */
+static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
+ int error)
{
- elv_completed_request(rq->q, rq);
- blk_flush_complete_seq_end_io(rq->q, QUEUE_FSEQ_PREFLUSH, error);
+ struct request_queue *q = rq->q;
+ struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
+ bool queued = false;
+
+ BUG_ON(rq->flush.seq & seq);
+ rq->flush.seq |= seq;
+
+ if (likely(!error))
+ seq = blk_flush_cur_seq(rq);
+ else
+ seq = REQ_FSEQ_DONE;
+
+ switch (seq) {
+ case REQ_FSEQ_PREFLUSH:
+ case REQ_FSEQ_POSTFLUSH:
+ /* queue for flush */
+ if (list_empty(pending))
+ q->flush_pending_since = jiffies;
+ list_move_tail(&rq->flush.list, pending);
+ break;
+
+ case REQ_FSEQ_DATA:
+ list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
+ list_add(&rq->queuelist, &q->queue_head);
+ queued = true;
+ break;
+
+ case REQ_FSEQ_DONE:
+ /*
+ * @rq was previously adjusted by blk_flush_issue() for
+ * flush sequencing and may already have gone through the
+ * flush data request completion path. Restore @rq for
+ * normal completion and end it.
+ */
+ BUG_ON(!list_empty(&rq->queuelist));
+ list_del_init(&rq->flush.list);
+ blk_flush_restore_request(rq);
+ __blk_end_request_all(rq, error);
+ break;
+
+ default:
+ BUG();
+ }
+
+ return blk_kick_flush(q) | queued;
}
-static void flush_data_end_io(struct request *rq, int error)
+static void flush_end_io(struct request *flush_rq, int error)
{
- elv_completed_request(rq->q, rq);
- blk_flush_complete_seq_end_io(rq->q, QUEUE_FSEQ_DATA, error);
+ struct request_queue *q = flush_rq->q;
+ struct list_head *running = &q->flush_queue[q->flush_running_idx];
+ bool was_empty = elv_queue_empty(q);
+ bool queued = false;
+ struct request *rq, *n;
+
+ BUG_ON(q->flush_pending_idx == q->flush_running_idx);
+
+ /* account completion of the flush request */
+ q->flush_running_idx ^= 1;
+ elv_completed_request(q, flush_rq);
+
+ /* and push the waiting requests to the next stage */
+ list_for_each_entry_safe(rq, n, running, flush.list) {
+ unsigned int seq = blk_flush_cur_seq(rq);
+
+ BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
+ queued |= blk_flush_complete_seq(rq, seq, error);
+ }
+
+ /* after populating an empty queue, kick it to avoid stall */
+ if (queued && was_empty)
+ __blk_run_queue(q);
}
-static void post_flush_end_io(struct request *rq, int error)
+/**
+ * blk_kick_flush - consider issuing flush request
+ * @q: request_queue being kicked
+ *
+ * Flush related states of @q have changed, consider issuing flush request.
+ * Please read the comment at the top of this file for more info.
+ *
+ * CONTEXT:
+ * spin_lock_irq(q->queue_lock)
+ *
+ * RETURNS:
+ * %true if flush was issued, %false otherwise.
+ */
+static bool blk_kick_flush(struct request_queue *q)
{
- elv_completed_request(rq->q, rq);
- blk_flush_complete_seq_end_io(rq->q, QUEUE_FSEQ_POSTFLUSH, error);
+ struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
+ struct request *first_rq =
+ list_first_entry(pending, struct request, flush.list);
+
+ /* C1 described at the top of this file */
+ if (q->flush_pending_idx != q->flush_running_idx || list_empty(pending))
+ return false;
+
+ /* C2 and C3 */
+ if (!list_empty(&q->flush_data_in_flight) &&
+ time_before(jiffies,
+ q->flush_pending_since + FLUSH_PENDING_TIMEOUT))
+ return false;
+
+ /*
+ * Issue flush and toggle pending_idx. This makes pending_idx
+ * different from running_idx, which means flush is in flight.
+ */
+ blk_rq_init(q, &q->flush_rq);
+ q->flush_rq.cmd_type = REQ_TYPE_FS;
+ q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
+ q->flush_rq.rq_disk = first_rq->rq_disk;
+ q->flush_rq.end_io = flush_end_io;
+
+ q->flush_pending_idx ^= 1;
+ elv_insert(q, &q->flush_rq, ELEVATOR_INSERT_FRONT);
+ return true;
}
-static void init_flush_request(struct request *rq, struct gendisk *disk)
+static void flush_data_end_io(struct request *rq, int error)
{
- rq->cmd_type = REQ_TYPE_FS;
- rq->cmd_flags = WRITE_FLUSH;
- rq->rq_disk = disk;
+ struct request_queue *q = rq->q;
+ bool was_empty = elv_queue_empty(q);
+
+ /* after populating an empty queue, kick it to avoid stall */
+ if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error) && was_empty)
+ __blk_run_queue(q);
}
-static struct request *queue_next_fseq(struct request_queue *q)
+/**
+ * blk_insert_flush - insert a new FLUSH/FUA request
+ * @rq: request to insert
+ *
+ * To be called from elv_insert() for %ELEVATOR_INSERT_FLUSH insertions.
+ * @rq is being submitted. Analyze what needs to be done and put it on the
+ * right queue.
+ *
+ * CONTEXT:
+ * spin_lock_irq(q->queue_lock)
+ */
+void blk_insert_flush(struct request *rq)
{
- struct request *orig_rq = q->orig_flush_rq;
- struct request *rq = &q->flush_rq;
+ struct request_queue *q = rq->q;
+ unsigned int fflags = q->flush_flags; /* may change, cache */
+ unsigned int policy = blk_flush_policy(fflags, rq);
- blk_rq_init(q, rq);
+ BUG_ON(rq->end_io);
+ BUG_ON(!rq->bio || rq->bio != rq->biotail);
- switch (blk_flush_cur_seq(q)) {
- case QUEUE_FSEQ_PREFLUSH:
- init_flush_request(rq, orig_rq->rq_disk);
- rq->end_io = pre_flush_end_io;
- break;
- case QUEUE_FSEQ_DATA:
- init_request_from_bio(rq, orig_rq->bio);
- /*
- * orig_rq->rq_disk may be different from
- * bio->bi_bdev->bd_disk if orig_rq got here through
- * remapping drivers. Make sure rq->rq_disk points
- * to the same one as orig_rq.
- */
- rq->rq_disk = orig_rq->rq_disk;
- rq->cmd_flags &= ~(REQ_FLUSH | REQ_FUA);
- rq->cmd_flags |= orig_rq->cmd_flags & (REQ_FLUSH | REQ_FUA);
- rq->end_io = flush_data_end_io;
- break;
- case QUEUE_FSEQ_POSTFLUSH:
- init_flush_request(rq, orig_rq->rq_disk);
- rq->end_io = post_flush_end_io;
- break;
- default:
- BUG();
+ /*
+ * @policy now records what operations need to be done. Adjust
+ * REQ_FLUSH and FUA for the driver.
+ */
+ rq->cmd_flags &= ~REQ_FLUSH;
+ if (!(fflags & REQ_FUA))
+ rq->cmd_flags &= ~REQ_FUA;
+
+ /*
+ * If there's data but flush is not necessary, the request can be
+ * processed directly without going through flush machinery. Queue
+ * for normal execution.
+ */
+ if ((policy & REQ_FSEQ_DATA) &&
+ !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
+ list_add(&rq->queuelist, &q->queue_head);
+ return;
}
- elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
- return rq;
+ /*
+ * @rq should go through flush machinery. Mark it part of flush
+ * sequence and submit for further processing.
+ */
+ memset(&rq->flush, 0, sizeof(rq->flush));
+ INIT_LIST_HEAD(&rq->flush.list);
+ rq->cmd_flags |= REQ_FLUSH_SEQ;
+ rq->end_io = flush_data_end_io;
+
+ blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
}
-struct request *blk_do_flush(struct request_queue *q, struct request *rq)
+/**
+ * blk_abort_flushes - @q is being aborted, abort flush requests
+ * @q: request_queue being aborted
+ *
+ * To be called from elv_abort_queue(). @q is being aborted. Prepare all
+ * FLUSH/FUA requests for abortion.
+ *
+ * CONTEXT:
+ * spin_lock_irq(q->queue_lock)
+ */
+void blk_abort_flushes(struct request_queue *q)
{
- unsigned int fflags = q->flush_flags; /* may change, cache it */
- bool has_flush = fflags & REQ_FLUSH, has_fua = fflags & REQ_FUA;
- bool do_preflush = has_flush && (rq->cmd_flags & REQ_FLUSH);
- bool do_postflush = has_flush && !has_fua && (rq->cmd_flags & REQ_FUA);
- unsigned skip = 0;
+ struct request *rq, *n;
+ int i;
/*
- * Special case. If there's data but flush is not necessary,
- * the request can be issued directly.
- *
- * Flush w/o data should be able to be issued directly too but
- * currently some drivers assume that rq->bio contains
- * non-zero data if it isn't NULL and empty FLUSH requests
- * getting here usually have bio's without data.
+ * Requests in flight for data are already owned by the dispatch
+ * queue or the device driver. Just restore for normal completion.
*/
- if (blk_rq_sectors(rq) && !do_preflush && !do_postflush) {
- rq->cmd_flags &= ~REQ_FLUSH;
- if (!has_fua)
- rq->cmd_flags &= ~REQ_FUA;
- return rq;
+ list_for_each_entry_safe(rq, n, &q->flush_data_in_flight, flush.list) {
+ list_del_init(&rq->flush.list);
+ blk_flush_restore_request(rq);
}
/*
- * Sequenced flushes can't be processed in parallel. If
- * another one is already in progress, queue for later
- * processing.
+ * We need to give away requests on flush queues. Restore for
+ * normal completion and put them on the dispatch queue.
*/
- if (q->flush_seq) {
- list_move_tail(&rq->queuelist, &q->pending_flushes);
- return NULL;
+ for (i = 0; i < ARRAY_SIZE(q->flush_queue); i++) {
+ list_for_each_entry_safe(rq, n, &q->flush_queue[i],
+ flush.list) {
+ list_del_init(&rq->flush.list);
+ blk_flush_restore_request(rq);
+ list_add_tail(&rq->queuelist, &q->queue_head);
+ }
}
-
- /*
- * Start a new flush sequence
- */
- q->flush_err = 0;
- q->flush_seq |= QUEUE_FSEQ_STARTED;
-
- /* adjust FLUSH/FUA of the original request and stash it away */
- rq->cmd_flags &= ~REQ_FLUSH;
- if (!has_fua)
- rq->cmd_flags &= ~REQ_FUA;
- blk_dequeue_request(rq);
- q->orig_flush_rq = rq;
-
- /* skip unneded sequences and return the first one */
- if (!do_preflush)
- skip |= QUEUE_FSEQ_PREFLUSH;
- if (!blk_rq_sectors(rq))
- skip |= QUEUE_FSEQ_DATA;
- if (!do_postflush)
- skip |= QUEUE_FSEQ_POSTFLUSH;
- return blk_flush_complete_seq(q, skip, 0);
}
static void bio_end_flush(struct bio *bio, int err)
diff --git a/block/blk.h b/block/blk.h
index 2db8f32838e7..284b500852bd 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -51,21 +51,17 @@ static inline void blk_clear_rq_complete(struct request *rq)
*/
#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
-struct request *blk_do_flush(struct request_queue *q, struct request *rq);
+void blk_insert_flush(struct request *rq);
+void blk_abort_flushes(struct request_queue *q);
static inline struct request *__elv_next_request(struct request_queue *q)
{
struct request *rq;
while (1) {
- while (!list_empty(&q->queue_head)) {
+ if (!list_empty(&q->queue_head)) {
rq = list_entry_rq(q->queue_head.next);
- if (!(rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) ||
- rq == &q->flush_rq)
- return rq;
- rq = blk_do_flush(q, rq);
- if (rq)
- return rq;
+ return rq;
}
if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 7be4c7959625..dbf01f111d22 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -54,9 +54,9 @@ static const int cfq_hist_divisor = 4;
#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
#define RQ_CIC(rq) \
- ((struct cfq_io_context *) (rq)->elevator_private)
-#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2)
-#define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elevator_private3)
+ ((struct cfq_io_context *) (rq)->elevator_private[0])
+#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private[1])
+#define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elevator_private[2])
static struct kmem_cache *cfq_pool;
static struct kmem_cache *cfq_ioc_pool;
@@ -3613,12 +3613,12 @@ static void cfq_put_request(struct request *rq)
put_io_context(RQ_CIC(rq)->ioc);
- rq->elevator_private = NULL;
- rq->elevator_private2 = NULL;
+ rq->elevator_private[0] = NULL;
+ rq->elevator_private[1] = NULL;
/* Put down rq reference on cfqg */
cfq_put_cfqg(RQ_CFQG(rq));
- rq->elevator_private3 = NULL;
+ rq->elevator_private[2] = NULL;
cfq_put_queue(cfqq);
}
@@ -3706,12 +3706,12 @@ new_queue:
cfqq->allocated[rw]++;
cfqq->ref++;
- rq->elevator_private = cic;
- rq->elevator_private2 = cfqq;
- rq->elevator_private3 = cfq_ref_get_cfqg(cfqq->cfqg);
spin_unlock_irqrestore(q->queue_lock, flags);
+ rq->elevator_private[0] = cic;
+ rq->elevator_private[1] = cfqq;
+ rq->elevator_private[2] = cfq_ref_get_cfqg(cfqq->cfqg);
return 0;
queue_fail:
diff --git a/block/elevator.c b/block/elevator.c
index 2569512830d3..f98e92edc937 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -673,6 +673,11 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
q->elevator->ops->elevator_add_req_fn(q, rq);
break;
+ case ELEVATOR_INSERT_FLUSH:
+ rq->cmd_flags |= REQ_SOFTBARRIER;
+ blk_insert_flush(rq);
+ break;
+
default:
printk(KERN_ERR "%s: bad insertion point %d\n",
__func__, where);
@@ -759,7 +764,7 @@ int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
if (e->ops->elevator_set_req_fn)
return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
- rq->elevator_private = NULL;
+ rq->elevator_private[0] = NULL;
return 0;
}
@@ -785,6 +790,8 @@ void elv_abort_queue(struct request_queue *q)
{
struct request *rq;
+ blk_abort_flushes(q);
+
while (!list_empty(&q->queue_head)) {
rq = list_entry_rq(q->queue_head.next);
rq->cmd_flags |= REQ_QUIET;
diff --git a/block/genhd.c b/block/genhd.c
index 6a5b772aa201..cbf1112a885c 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1355,7 +1355,7 @@ int invalidate_partition(struct gendisk *disk, int partno)
struct block_device *bdev = bdget_disk(disk, partno);
if (bdev) {
fsync_bdev(bdev);
- res = __invalidate_device(bdev);
+ res = __invalidate_device(bdev, true);
bdput(bdev);
}
return res;
diff --git a/block/ioctl.c b/block/ioctl.c
index 9049d460fa89..1124cd297263 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -294,9 +294,11 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
return -EINVAL;
if (get_user(n, (int __user *) arg))
return -EFAULT;
- if (!(mode & FMODE_EXCL) &&
- blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
- return -EBUSY;
+ if (!(mode & FMODE_EXCL)) {
+ bdgrab(bdev);
+ if (blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
+ return -EBUSY;
+ }
ret = set_blocksize(bdev, n);
if (!(mode & FMODE_EXCL))
blkdev_put(bdev, mode | FMODE_EXCL);
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index a854df2a5a4b..fdc67d38660b 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -141,8 +141,7 @@ err:
if (walk->iv != req->info)
memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
- if (walk->iv_buffer)
- kfree(walk->iv_buffer);
+ kfree(walk->iv_buffer);
return err;
}
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 27ea9fe9476f..2854865f2434 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -2077,6 +2077,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
.alg = "ghash",
.test = alg_test_hash,
+ .fips_allowed = 1,
.suite = {
.hash = {
.vecs = ghash_tv_template,
@@ -2453,6 +2454,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
.alg = "xts(aes)",
.test = alg_test_skcipher,
+ .fips_allowed = 1,
.suite = {
.cipher = {
.enc = {
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 834af7f2adee..aa6dac05f843 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -451,8 +451,9 @@ static struct hash_testvec rmd320_tv_template[] = {
/*
* SHA1 test vectors from from FIPS PUB 180-1
+ * Long vector from CAVS 5.0
*/
-#define SHA1_TEST_VECTORS 2
+#define SHA1_TEST_VECTORS 3
static struct hash_testvec sha1_tv_template[] = {
{
@@ -467,6 +468,33 @@ static struct hash_testvec sha1_tv_template[] = {
"\x4a\xa1\xf9\x51\x29\xe5\xe5\x46\x70\xf1",
.np = 2,
.tap = { 28, 28 }
+ }, {
+ .plaintext = "\xec\x29\x56\x12\x44\xed\xe7\x06"
+ "\xb6\xeb\x30\xa1\xc3\x71\xd7\x44"
+ "\x50\xa1\x05\xc3\xf9\x73\x5f\x7f"
+ "\xa9\xfe\x38\xcf\x67\xf3\x04\xa5"
+ "\x73\x6a\x10\x6e\x92\xe1\x71\x39"
+ "\xa6\x81\x3b\x1c\x81\xa4\xf3\xd3"
+ "\xfb\x95\x46\xab\x42\x96\xfa\x9f"
+ "\x72\x28\x26\xc0\x66\x86\x9e\xda"
+ "\xcd\x73\xb2\x54\x80\x35\x18\x58"
+ "\x13\xe2\x26\x34\xa9\xda\x44\x00"
+ "\x0d\x95\xa2\x81\xff\x9f\x26\x4e"
+ "\xcc\xe0\xa9\x31\x22\x21\x62\xd0"
+ "\x21\xcc\xa2\x8d\xb5\xf3\xc2\xaa"
+ "\x24\x94\x5a\xb1\xe3\x1c\xb4\x13"
+ "\xae\x29\x81\x0f\xd7\x94\xca\xd5"
+ "\xdf\xaf\x29\xec\x43\xcb\x38\xd1"
+ "\x98\xfe\x4a\xe1\xda\x23\x59\x78"
+ "\x02\x21\x40\x5b\xd6\x71\x2a\x53"
+ "\x05\xda\x4b\x1b\x73\x7f\xce\x7c"
+ "\xd2\x1c\x0e\xb7\x72\x8d\x08\x23"
+ "\x5a\x90\x11",
+ .psize = 163,
+ .digest = "\x97\x01\x11\xc4\xe7\x7b\xcc\x88\xcc\x20"
+ "\x45\x9c\x02\xb6\x9b\x4a\xa8\xf5\x82\x17",
+ .np = 4,
+ .tap = { 63, 64, 31, 5 }
}
};
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 9bfb71ff3a6a..177c7d156933 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -117,4 +117,6 @@ source "drivers/staging/Kconfig"
source "drivers/platform/Kconfig"
source "drivers/clk/Kconfig"
+
+source "drivers/hwspinlock/Kconfig"
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index b423bb16c3a8..a125e0bba4fd 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -117,3 +117,6 @@ obj-y += platform/
obj-y += ieee802154/
#common clk code
obj-y += clk/
+
+obj-$(CONFIG_HWSPINLOCK) += hwspinlock/
+obj-y += vbus/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 2aa042a5da6d..3a17ca5fff6f 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -7,7 +7,6 @@ menuconfig ACPI
depends on !IA64_HP_SIM
depends on IA64 || X86
depends on PCI
- depends on PM
select PNP
default y
help
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index fca34ccfd294..a188b868f29a 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -1,5 +1,6 @@
config ACPI_APEI
bool "ACPI Platform Error Interface (APEI)"
+ select PSTORE
depends on X86
help
APEI allows to report errors (for example from the chipset)
@@ -11,6 +12,8 @@ config ACPI_APEI_GHES
tristate "APEI Generic Hardware Error Source"
depends on ACPI_APEI && X86
select ACPI_HED
+ select LLIST
+ select GENERIC_ALLOCATOR
help
Generic Hardware Error Source provides a way to report
platform hardware errors (such as that from chipset). It
@@ -21,6 +24,13 @@ config ACPI_APEI_GHES
by firmware to produce more valuable hardware error
information for Linux.
+config ACPI_APEI_PCIEAER
+ bool "APEI PCIe AER logging/recovering support"
+ depends on ACPI_APEI && PCIEAER
+ help
+ PCIe AER errors may be reported via APEI firmware first mode.
+ Turn on this option to enable the corresponding support.
+
config ACPI_APEI_EINJ
tristate "APEI Error INJection (EINJ)"
depends on ACPI_APEI && DEBUG_FS
diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
index 31464a006d76..5d4189464d63 100644
--- a/drivers/acpi/apei/cper.c
+++ b/drivers/acpi/apei/cper.c
@@ -29,6 +29,7 @@
#include <linux/time.h>
#include <linux/cper.h>
#include <linux/acpi.h>
+#include <linux/aer.h>
/*
* CPER record ID need to be unique even after reboot, because record
@@ -70,8 +71,8 @@ static const char *cper_severity_str(unsigned int severity)
* If the output length is longer than 80, multiple line will be
* printed, with @pfx is printed at the beginning of each line.
*/
-static void cper_print_bits(const char *pfx, unsigned int bits,
- const char *strs[], unsigned int strs_size)
+void cper_print_bits(const char *pfx, unsigned int bits,
+ const char *strs[], unsigned int strs_size)
{
int i, len = 0;
const char *str;
@@ -81,6 +82,8 @@ static void cper_print_bits(const char *pfx, unsigned int bits,
if (!(bits & (1U << i)))
continue;
str = strs[i];
+ if (!str)
+ continue;
if (len && len + strlen(str) + 2 > 80) {
printk("%s\n", buf);
len = 0;
@@ -243,7 +246,8 @@ static const char *cper_pcie_port_type_strs[] = {
"root complex event collector",
};
-static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie)
+static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
+ const struct acpi_hest_generic_data *gdata)
{
if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE)
printk("%s""port_type: %d, %s\n", pfx, pcie->port_type,
@@ -276,6 +280,12 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie)
printk(
"%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n",
pfx, pcie->bridge.secondary_status, pcie->bridge.control);
+#ifdef CONFIG_ACPI_APEI_PCIEAER
+ if (pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) {
+ struct aer_capability_regs *aer_regs = (void *)pcie->aer_info;
+ cper_print_aer(pfx, gdata->error_severity, aer_regs);
+ }
+#endif
}
static const char *apei_estatus_section_flag_strs[] = {
@@ -322,7 +332,7 @@ static void apei_estatus_print_section(
struct cper_sec_pcie *pcie = (void *)(gdata + 1);
printk("%s""section_type: PCIe error\n", pfx);
if (gdata->error_data_length >= sizeof(*pcie))
- cper_print_pcie(pfx, pcie);
+ cper_print_pcie(pfx, pcie, gdata);
else
goto err_section_too_small;
} else
diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c
index de73caf3cebc..a4cfb64c86a1 100644
--- a/drivers/acpi/apei/erst-dbg.c
+++ b/drivers/acpi/apei/erst-dbg.c
@@ -43,12 +43,27 @@ static DEFINE_MUTEX(erst_dbg_mutex);
static int erst_dbg_open(struct inode *inode, struct file *file)
{
+ int rc, *pos;
+
if (erst_disable)
return -ENODEV;
+ pos = (int *)&file->private_data;
+
+ rc = erst_get_record_id_begin(pos);
+ if (rc)
+ return rc;
+
return nonseekable_open(inode, file);
}
+static int erst_dbg_release(struct inode *inode, struct file *file)
+{
+ erst_get_record_id_end();
+
+ return 0;
+}
+
static long erst_dbg_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
{
int rc;
@@ -79,18 +94,20 @@ static long erst_dbg_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
static ssize_t erst_dbg_read(struct file *filp, char __user *ubuf,
size_t usize, loff_t *off)
{
- int rc;
+ int rc, *pos;
ssize_t len = 0;
u64 id;
- if (*off != 0)
+ if (*off)
return -EINVAL;
if (mutex_lock_interruptible(&erst_dbg_mutex) != 0)
return -EINTR;
+ pos = (int *)&filp->private_data;
+
retry_next:
- rc = erst_get_next_record_id(&id);
+ rc = erst_get_record_id_next(pos, &id);
if (rc)
goto out;
/* no more record */
@@ -181,6 +198,7 @@ out:
static const struct file_operations erst_dbg_ops = {
.owner = THIS_MODULE,
.open = erst_dbg_open,
+ .release = erst_dbg_release,
.read = erst_dbg_read,
.write = erst_dbg_write,
.unlocked_ioctl = erst_dbg_ioctl,
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index cf6db6b7662a..d6cb0ff6988e 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -34,6 +34,7 @@
#include <linux/cper.h>
#include <linux/nmi.h>
#include <linux/hardirq.h>
+#include <linux/pstore.h>
#include <acpi/apei.h>
#include "apei-internal.h"
@@ -429,6 +430,22 @@ ssize_t erst_get_record_count(void)
}
EXPORT_SYMBOL_GPL(erst_get_record_count);
+#define ERST_RECORD_ID_CACHE_SIZE_MIN 16
+#define ERST_RECORD_ID_CACHE_SIZE_MAX 1024
+
+struct erst_record_id_cache {
+ struct mutex lock;
+ u64 *entries;
+ int len;
+ int size;
+ int refcount;
+};
+
+static struct erst_record_id_cache erst_record_id_cache = {
+ .lock = __MUTEX_INITIALIZER(erst_record_id_cache.lock),
+ .refcount = 0,
+};
+
static int __erst_get_next_record_id(u64 *record_id)
{
struct apei_exec_context ctx;
@@ -443,26 +460,179 @@ static int __erst_get_next_record_id(u64 *record_id)
return 0;
}
+int erst_get_record_id_begin(int *pos)
+{
+ int rc;
+
+ if (erst_disable)
+ return -ENODEV;
+
+ rc = mutex_lock_interruptible(&erst_record_id_cache.lock);
+ if (rc)
+ return rc;
+ erst_record_id_cache.refcount++;
+ mutex_unlock(&erst_record_id_cache.lock);
+
+ *pos = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(erst_get_record_id_begin);
+
+/* erst_record_id_cache.lock must be held by caller */
+static int __erst_record_id_cache_add_one(void)
+{
+ u64 id, prev_id, first_id;
+ int i, rc;
+ u64 *entries;
+ unsigned long flags;
+
+ id = prev_id = first_id = APEI_ERST_INVALID_RECORD_ID;
+retry:
+ raw_spin_lock_irqsave(&erst_lock, flags);
+ rc = __erst_get_next_record_id(&id);
+ raw_spin_unlock_irqrestore(&erst_lock, flags);
+ if (rc == -ENOENT)
+ return 0;
+ if (rc)
+ return rc;
+ if (id == APEI_ERST_INVALID_RECORD_ID)
+ return 0;
+ /* can not skip current ID, or loop back to first ID */
+ if (id == prev_id || id == first_id)
+ return 0;
+ if (first_id == APEI_ERST_INVALID_RECORD_ID)
+ first_id = id;
+ prev_id = id;
+
+ entries = erst_record_id_cache.entries;
+ for (i = 0; i < erst_record_id_cache.len; i++) {
+ if (entries[i] == id)
+ break;
+ }
+ /* record id already in cache, try next */
+ if (i < erst_record_id_cache.len)
+ goto retry;
+ if (erst_record_id_cache.len >= erst_record_id_cache.size) {
+ int new_size, alloc_size;
+ u64 *new_entries;
+
+ new_size = erst_record_id_cache.size * 2;
+ new_size = clamp_val(new_size, ERST_RECORD_ID_CACHE_SIZE_MIN,
+ ERST_RECORD_ID_CACHE_SIZE_MAX);
+ if (new_size <= erst_record_id_cache.size) {
+ if (printk_ratelimit())
+ pr_warning(FW_WARN ERST_PFX
+ "too many record ID!\n");
+ return 0;
+ }
+ alloc_size = new_size * sizeof(entries[0]);
+ if (alloc_size < PAGE_SIZE)
+ new_entries = kmalloc(alloc_size, GFP_KERNEL);
+ else
+ new_entries = vmalloc(alloc_size);
+ if (!new_entries)
+ return -ENOMEM;
+ memcpy(new_entries, entries,
+ erst_record_id_cache.len * sizeof(entries[0]));
+ if (erst_record_id_cache.size < PAGE_SIZE)
+ kfree(entries);
+ else
+ vfree(entries);
+ erst_record_id_cache.entries = entries = new_entries;
+ erst_record_id_cache.size = new_size;
+ }
+ entries[i] = id;
+ erst_record_id_cache.len++;
+
+ return 1;
+}
+
/*
* Get the record ID of an existing error record on the persistent
* storage. If there is no error record on the persistent storage, the
* returned record_id is APEI_ERST_INVALID_RECORD_ID.
*/
-int erst_get_next_record_id(u64 *record_id)
+int erst_get_record_id_next(int *pos, u64 *record_id)
{
- int rc;
- unsigned long flags;
+ int rc = 0;
+ u64 *entries;
if (erst_disable)
return -ENODEV;
- raw_spin_lock_irqsave(&erst_lock, flags);
- rc = __erst_get_next_record_id(record_id);
- raw_spin_unlock_irqrestore(&erst_lock, flags);
+ /* must be enclosed by erst_get_record_id_begin/end */
+ BUG_ON(!erst_record_id_cache.refcount);
+ BUG_ON(*pos < 0 || *pos > erst_record_id_cache.len);
+
+ mutex_lock(&erst_record_id_cache.lock);
+ entries = erst_record_id_cache.entries;
+ for (; *pos < erst_record_id_cache.len; (*pos)++)
+ if (entries[*pos] != APEI_ERST_INVALID_RECORD_ID)
+ break;
+ /* found next record id in cache */
+ if (*pos < erst_record_id_cache.len) {
+ *record_id = entries[*pos];
+ (*pos)++;
+ goto out_unlock;
+ }
+
+ /* Try to add one more record ID to cache */
+ rc = __erst_record_id_cache_add_one();
+ if (rc < 0)
+ goto out_unlock;
+ /* successfully add one new ID */
+ if (rc == 1) {
+ *record_id = erst_record_id_cache.entries[*pos];
+ (*pos)++;
+ rc = 0;
+ } else {
+ *pos = -1;
+ *record_id = APEI_ERST_INVALID_RECORD_ID;
+ }
+out_unlock:
+ mutex_unlock(&erst_record_id_cache.lock);
return rc;
}
-EXPORT_SYMBOL_GPL(erst_get_next_record_id);
+EXPORT_SYMBOL_GPL(erst_get_record_id_next);
+
+/* erst_record_id_cache.lock must be held by caller */
+static void __erst_record_id_cache_compact(void)
+{
+ int i, wpos = 0;
+ u64 *entries;
+
+ if (erst_record_id_cache.refcount)
+ return;
+
+ entries = erst_record_id_cache.entries;
+ for (i = 0; i < erst_record_id_cache.len; i++) {
+ if (entries[i] == APEI_ERST_INVALID_RECORD_ID)
+ continue;
+ if (wpos != i)
+ memcpy(&entries[wpos], &entries[i], sizeof(entries[i]));
+ wpos++;
+ }
+ erst_record_id_cache.len = wpos;
+}
+
+void erst_get_record_id_end(void)
+{
+ /*
+ * erst_disable != 0 should be detected by invoker via the
+ * return value of erst_get_record_id_begin/next, so this
+ * function should not be called for erst_disable != 0.
+ */
+ BUG_ON(erst_disable);
+
+ mutex_lock(&erst_record_id_cache.lock);
+ erst_record_id_cache.refcount--;
+ BUG_ON(erst_record_id_cache.refcount < 0);
+ __erst_record_id_cache_compact();
+ mutex_unlock(&erst_record_id_cache.lock);
+}
+EXPORT_SYMBOL_GPL(erst_get_record_id_end);
static int __erst_write_to_storage(u64 offset)
{
@@ -703,56 +873,34 @@ ssize_t erst_read(u64 record_id, struct cper_record_header *record,
}
EXPORT_SYMBOL_GPL(erst_read);
-/*
- * If return value > buflen, the buffer size is not big enough,
- * else if return value = 0, there is no more record to read,
- * else if return value < 0, something goes wrong,
- * else everything is OK, and return value is record length
- */
-ssize_t erst_read_next(struct cper_record_header *record, size_t buflen)
-{
- int rc;
- ssize_t len;
- unsigned long flags;
- u64 record_id;
-
- if (erst_disable)
- return -ENODEV;
-
- raw_spin_lock_irqsave(&erst_lock, flags);
- rc = __erst_get_next_record_id(&record_id);
- if (rc) {
- raw_spin_unlock_irqrestore(&erst_lock, flags);
- return rc;
- }
- /* no more record */
- if (record_id == APEI_ERST_INVALID_RECORD_ID) {
- raw_spin_unlock_irqrestore(&erst_lock, flags);
- return 0;
- }
-
- len = __erst_read(record_id, record, buflen);
- raw_spin_unlock_irqrestore(&erst_lock, flags);
-
- return len;
-}
-EXPORT_SYMBOL_GPL(erst_read_next);
-
int erst_clear(u64 record_id)
{
- int rc;
+ int rc, i;
unsigned long flags;
+ u64 *entries;
if (erst_disable)
return -ENODEV;
+ rc = mutex_lock_interruptible(&erst_record_id_cache.lock);
+ if (rc)
+ return rc;
raw_spin_lock_irqsave(&erst_lock, flags);
if (erst_erange.attr & ERST_RANGE_NVRAM)
rc = __erst_clear_from_nvram(record_id);
else
rc = __erst_clear_from_storage(record_id);
raw_spin_unlock_irqrestore(&erst_lock, flags);
-
+ if (rc)
+ goto out;
+ entries = erst_record_id_cache.entries;
+ for (i = 0; i < erst_record_id_cache.len; i++) {
+ if (entries[i] == record_id)
+ entries[i] = APEI_ERST_INVALID_RECORD_ID;
+ }
+ __erst_record_id_cache_compact();
+out:
+ mutex_unlock(&erst_record_id_cache.lock);
return rc;
}
EXPORT_SYMBOL_GPL(erst_clear);
@@ -781,6 +929,128 @@ static int erst_check_table(struct acpi_table_erst *erst_tab)
return 0;
}
+static size_t erst_reader(u64 *id, enum pstore_type_id *type,
+ struct timespec *time);
+static u64 erst_writer(enum pstore_type_id type, size_t size);
+
+static struct pstore_info erst_info = {
+ .owner = THIS_MODULE,
+ .name = "erst",
+ .read = erst_reader,
+ .write = erst_writer,
+ .erase = erst_clear
+};
+
+#define CPER_CREATOR_PSTORE \
+ UUID_LE(0x75a574e3, 0x5052, 0x4b29, 0x8a, 0x8e, 0xbe, 0x2c, \
+ 0x64, 0x90, 0xb8, 0x9d)
+#define CPER_SECTION_TYPE_DMESG \
+ UUID_LE(0xc197e04e, 0xd545, 0x4a70, 0x9c, 0x17, 0xa5, 0x54, \
+ 0x94, 0x19, 0xeb, 0x12)
+#define CPER_SECTION_TYPE_MCE \
+ UUID_LE(0xfe08ffbe, 0x95e4, 0x4be7, 0xbc, 0x73, 0x40, 0x96, \
+ 0x04, 0x4a, 0x38, 0xfc)
+
+struct cper_pstore_record {
+ struct cper_record_header hdr;
+ struct cper_section_descriptor sec_hdr;
+ char data[];
+} __packed;
+
+static size_t erst_reader(u64 *id, enum pstore_type_id *type,
+ struct timespec *time)
+{
+ int rc;
+ ssize_t len;
+ unsigned long flags;
+ u64 record_id;
+ struct cper_pstore_record *rcd = (struct cper_pstore_record *)
+ (erst_info.buf - sizeof(*rcd));
+
+ if (erst_disable)
+ return -ENODEV;
+
+ raw_spin_lock_irqsave(&erst_lock, flags);
+skip:
+ rc = __erst_get_next_record_id(&record_id);
+ if (rc) {
+ raw_spin_unlock_irqrestore(&erst_lock, flags);
+ return rc;
+ }
+ /* no more record */
+ if (record_id == APEI_ERST_INVALID_RECORD_ID) {
+ raw_spin_unlock_irqrestore(&erst_lock, flags);
+ return 0;
+ }
+
+ len = __erst_read(record_id, &rcd->hdr, sizeof(*rcd) +
+ erst_erange.size);
+ if (uuid_le_cmp(rcd->hdr.creator_id, CPER_CREATOR_PSTORE) != 0)
+ goto skip;
+ raw_spin_unlock_irqrestore(&erst_lock, flags);
+
+ *id = record_id;
+ if (uuid_le_cmp(rcd->sec_hdr.section_type,
+ CPER_SECTION_TYPE_DMESG) == 0)
+ *type = PSTORE_TYPE_DMESG;
+ else if (uuid_le_cmp(rcd->sec_hdr.section_type,
+ CPER_SECTION_TYPE_MCE) == 0)
+ *type = PSTORE_TYPE_MCE;
+ else
+ *type = PSTORE_TYPE_UNKNOWN;
+
+ if (rcd->hdr.validation_bits & CPER_VALID_TIMESTAMP)
+ time->tv_sec = rcd->hdr.timestamp;
+ else
+ time->tv_sec = 0;
+ time->tv_nsec = 0;
+
+ return len - sizeof(*rcd);
+}
+
+static u64 erst_writer(enum pstore_type_id type, size_t size)
+{
+ struct cper_pstore_record *rcd = (struct cper_pstore_record *)
+ (erst_info.buf - sizeof(*rcd));
+
+ memset(rcd, 0, sizeof(*rcd));
+ memcpy(rcd->hdr.signature, CPER_SIG_RECORD, CPER_SIG_SIZE);
+ rcd->hdr.revision = CPER_RECORD_REV;
+ rcd->hdr.signature_end = CPER_SIG_END;
+ rcd->hdr.section_count = 1;
+ rcd->hdr.error_severity = CPER_SEV_FATAL;
+ /* timestamp valid. platform_id, partition_id are invalid */
+ rcd->hdr.validation_bits = CPER_VALID_TIMESTAMP;
+ rcd->hdr.timestamp = get_seconds();
+ rcd->hdr.record_length = sizeof(*rcd) + size;
+ rcd->hdr.creator_id = CPER_CREATOR_PSTORE;
+ rcd->hdr.notification_type = CPER_NOTIFY_MCE;
+ rcd->hdr.record_id = cper_next_record_id();
+ rcd->hdr.flags = CPER_HW_ERROR_FLAGS_PREVERR;
+
+ rcd->sec_hdr.section_offset = sizeof(*rcd);
+ rcd->sec_hdr.section_length = size;
+ rcd->sec_hdr.revision = CPER_SEC_REV;
+ /* fru_id and fru_text is invalid */
+ rcd->sec_hdr.validation_bits = 0;
+ rcd->sec_hdr.flags = CPER_SEC_PRIMARY;
+ switch (type) {
+ case PSTORE_TYPE_DMESG:
+ rcd->sec_hdr.section_type = CPER_SECTION_TYPE_DMESG;
+ break;
+ case PSTORE_TYPE_MCE:
+ rcd->sec_hdr.section_type = CPER_SECTION_TYPE_MCE;
+ break;
+ default:
+ return -EINVAL;
+ }
+ rcd->sec_hdr.section_severity = CPER_SEV_FATAL;
+
+ erst_write(&rcd->hdr);
+
+ return rcd->hdr.record_id;
+}
+
static int __init erst_init(void)
{
int rc = 0;
@@ -788,6 +1058,7 @@ static int __init erst_init(void)
struct apei_exec_context ctx;
struct apei_resources erst_resources;
struct resource *r;
+ char *buf;
if (acpi_disabled)
goto err;
@@ -854,6 +1125,18 @@ static int __init erst_init(void)
if (!erst_erange.vaddr)
goto err_release_erange;
+ buf = kmalloc(erst_erange.size, GFP_KERNEL);
+ mutex_init(&erst_info.buf_mutex);
+ if (buf) {
+ erst_info.buf = buf + sizeof(struct cper_pstore_record);
+ erst_info.bufsize = erst_erange.size -
+ sizeof(struct cper_pstore_record);
+ if (pstore_register(&erst_info)) {
+ pr_info(ERST_PFX "Could not register with persistent store\n");
+ kfree(buf);
+ }
+ }
+
pr_info(ERST_PFX
"Error Record Serialization Table (ERST) support is initialized.\n");
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index d1d484d4a06a..1cc28d04da2a 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -42,6 +42,9 @@
#include <linux/mutex.h>
#include <linux/ratelimit.h>
#include <linux/vmalloc.h>
+#include <linux/irq_work.h>
+#include <linux/llist.h>
+#include <linux/genalloc.h>
#include <acpi/apei.h>
#include <acpi/atomicio.h>
#include <acpi/hed.h>
@@ -53,6 +56,15 @@
#define GHES_PFX "GHES: "
#define GHES_ESTATUS_MAX_SIZE 65536
+#define GHES_ESOURCE_PREALLOC_MAX_SIZE 65536
+
+#define GHES_ESTATUS_POOL_MIN_ALLOC_ORDER 3
+
+#define GHES_ESTATUS_NODE_LEN(estatus_len) \
+ (sizeof(struct ghes_estatus_node) + (estatus_len))
+#define GHES_ESTATUS_FROM_NODE(estatus_node) \
+ ((struct acpi_hest_generic_status *) \
+ ((struct ghes_estatus_node *)(estatus_node) + 1))
/*
* One struct ghes is created for each generic hardware error source.
@@ -77,6 +89,11 @@ struct ghes {
};
};
+struct ghes_estatus_node {
+ struct llist_node llnode;
+ struct acpi_hest_generic *generic;
+};
+
static int ghes_panic_timeout __read_mostly = 30;
/*
@@ -121,6 +138,19 @@ static struct vm_struct *ghes_ioremap_area;
static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi);
static DEFINE_SPINLOCK(ghes_ioremap_lock_irq);
+/*
+ * printk is not safe in NMI context. So in NMI handler, we allocate
+ * required memory from lock-less memory allocator
+ * (ghes_estatus_pool), save estatus into it, put them into lock-less
+ * list (ghes_estatus_llist), then delay printk into IRQ context via
+ * irq_work (ghes_proc_irq_work). ghes_estatus_size_request record
+ * required pool size by all NMI error source.
+ */
+static struct gen_pool *ghes_estatus_pool;
+static unsigned long ghes_estatus_pool_size_request;
+static struct llist_head ghes_estatus_llist;
+static struct irq_work ghes_proc_irq_work;
+
static int ghes_ioremap_init(void)
{
ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES,
@@ -180,6 +210,50 @@ static void ghes_iounmap_irq(void __iomem *vaddr_ptr)
__flush_tlb_one(vaddr);
}
+static int ghes_estatus_pool_init(void)
+{
+ ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1);
+ if (!ghes_estatus_pool)
+ return -ENOMEM;
+ return 0;
+}
+
+static void ghes_estatus_pool_exit(void)
+{
+ struct gen_pool_chunk *chunk;
+
+ gen_pool_for_each_chunk(chunk, ghes_estatus_pool)
+ free_page(chunk->start_addr);
+ gen_pool_destroy(ghes_estatus_pool);
+}
+
+static int ghes_estatus_pool_expand(unsigned long len)
+{
+ unsigned long i, pages, size, addr;
+ int ret;
+
+ ghes_estatus_pool_size_request += PAGE_ALIGN(len);
+ size = gen_pool_size(ghes_estatus_pool);
+ if (size >= ghes_estatus_pool_size_request)
+ return 0;
+ pages = (ghes_estatus_pool_size_request - size) / PAGE_SIZE;
+ for (i = 0; i < pages; i++) {
+ addr = __get_free_page(GFP_KERNEL);
+ if (!addr)
+ return -ENOMEM;
+ ret = gen_pool_add(ghes_estatus_pool, addr, PAGE_SIZE, -1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ghes_estatus_pool_shrink(unsigned long len)
+{
+ ghes_estatus_pool_size_request -= PAGE_ALIGN(len);
+}
+
static struct ghes *ghes_new(struct acpi_hest_generic *generic)
{
struct ghes *ghes;
@@ -341,13 +415,13 @@ static void ghes_clear_estatus(struct ghes *ghes)
ghes->flags &= ~GHES_TO_CLEAR;
}
-static void ghes_do_proc(struct ghes *ghes)
+static void ghes_do_proc(const struct acpi_hest_generic_status *estatus)
{
int sev, processed = 0;
struct acpi_hest_generic_data *gdata;
- sev = ghes_severity(ghes->estatus->error_severity);
- apei_estatus_for_each_section(ghes->estatus, gdata) {
+ sev = ghes_severity(estatus->error_severity);
+ apei_estatus_for_each_section(estatus, gdata) {
#ifdef CONFIG_X86_MCE
if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
CPER_SEC_PLATFORM_MEM)) {
@@ -360,13 +434,15 @@ static void ghes_do_proc(struct ghes *ghes)
}
}
-static void ghes_print_estatus(const char *pfx, struct ghes *ghes)
+static void ghes_print_estatus(const char *pfx,
+ const struct acpi_hest_generic *generic,
+ const struct acpi_hest_generic_status *estatus)
{
/* Not more than 2 messages every 5 seconds */
static DEFINE_RATELIMIT_STATE(ratelimit, 5*HZ, 2);
if (pfx == NULL) {
- if (ghes_severity(ghes->estatus->error_severity) <=
+ if (ghes_severity(estatus->error_severity) <=
GHES_SEV_CORRECTED)
pfx = KERN_WARNING HW_ERR;
else
@@ -375,8 +451,8 @@ static void ghes_print_estatus(const char *pfx, struct ghes *ghes)
if (__ratelimit(&ratelimit)) {
printk(
"%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
- pfx, ghes->generic->header.source_id);
- apei_estatus_print(pfx, ghes->estatus);
+ pfx, generic->header.source_id);
+ apei_estatus_print(pfx, estatus);
}
}
@@ -387,8 +463,8 @@ static int ghes_proc(struct ghes *ghes)
rc = ghes_read_estatus(ghes, 0);
if (rc)
goto out;
- ghes_print_estatus(NULL, ghes);
- ghes_do_proc(ghes);
+ ghes_print_estatus(NULL, ghes->generic, ghes->estatus);
+ ghes_do_proc(ghes->estatus);
out:
ghes_clear_estatus(ghes);
@@ -447,6 +523,40 @@ static int ghes_notify_sci(struct notifier_block *this,
return ret;
}
+static void ghes_proc_in_irq(struct irq_work *irq_work)
+{
+ struct llist_node *llnode, *next, *tail = NULL;
+ struct ghes_estatus_node *estatus_node;
+ struct acpi_hest_generic_status *estatus;
+ u32 len, node_len;
+
+ /*
+ * Because the time order of estatus in list is reversed,
+ * revert it back to proper order.
+ */
+ llnode = llist_del_all(&ghes_estatus_llist);
+ while (llnode) {
+ next = llnode->next;
+ llnode->next = tail;
+ tail = llnode;
+ llnode = next;
+ }
+ llnode = tail;
+ while (llnode) {
+ next = llnode->next;
+ estatus_node = llist_entry(llnode, struct ghes_estatus_node,
+ llnode);
+ estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
+ len = apei_estatus_len(estatus);
+ node_len = GHES_ESTATUS_NODE_LEN(len);
+ ghes_do_proc(estatus);
+ ghes_print_estatus(NULL, estatus_node->generic, estatus);
+ gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node,
+ node_len);
+ llnode = next;
+ }
+}
+
static int ghes_notify_nmi(struct notifier_block *this,
unsigned long cmd, void *data)
{
@@ -476,7 +586,8 @@ static int ghes_notify_nmi(struct notifier_block *this,
if (sev_global >= GHES_SEV_PANIC) {
oops_begin();
- ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global);
+ ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global->generic,
+ ghes_global->estatus);
/* reboot to log the error! */
if (panic_timeout == 0)
panic_timeout = ghes_panic_timeout;
@@ -484,12 +595,31 @@ static int ghes_notify_nmi(struct notifier_block *this,
}
list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
+#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ u32 len, node_len;
+ struct ghes_estatus_node *estatus_node;
+ struct acpi_hest_generic_status *estatus;
+#endif
if (!(ghes->flags & GHES_TO_CLEAR))
continue;
- /* Do not print estatus because printk is not NMI safe */
- ghes_do_proc(ghes);
+#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ /* Save estatus for further processing in IRQ context */
+ len = apei_estatus_len(ghes->estatus);
+ node_len = GHES_ESTATUS_NODE_LEN(len);
+ estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool,
+ node_len);
+ if (estatus_node) {
+ estatus_node->generic = ghes->generic;
+ estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
+ memcpy(estatus, ghes->estatus, len);
+ llist_add(&estatus_node->llnode, &ghes_estatus_llist);
+ }
+#endif
ghes_clear_estatus(ghes);
}
+#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ irq_work_queue(&ghes_proc_irq_work);
+#endif
out:
raw_spin_unlock(&ghes_nmi_lock);
@@ -504,10 +634,26 @@ static struct notifier_block ghes_notifier_nmi = {
.notifier_call = ghes_notify_nmi,
};
+static unsigned long ghes_esource_prealloc_size(
+ const struct acpi_hest_generic *generic)
+{
+ unsigned long block_length, prealloc_records, prealloc_size;
+
+ block_length = min_t(unsigned long, generic->error_block_length,
+ GHES_ESTATUS_MAX_SIZE);
+ prealloc_records = max_t(unsigned long,
+ generic->records_to_preallocate, 1);
+ prealloc_size = min_t(unsigned long, block_length * prealloc_records,
+ GHES_ESOURCE_PREALLOC_MAX_SIZE);
+
+ return prealloc_size;
+}
+
static int __devinit ghes_probe(struct platform_device *ghes_dev)
{
struct acpi_hest_generic *generic;
struct ghes *ghes = NULL;
+ unsigned long len;
int rc = -EINVAL;
generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data;
@@ -573,6 +719,8 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev)
mutex_unlock(&ghes_list_mutex);
break;
case ACPI_HEST_NOTIFY_NMI:
+ len = ghes_esource_prealloc_size(generic);
+ ghes_estatus_pool_expand(len);
mutex_lock(&ghes_list_mutex);
if (list_empty(&ghes_nmi))
register_die_notifier(&ghes_notifier_nmi);
@@ -597,6 +745,7 @@ static int __devexit ghes_remove(struct platform_device *ghes_dev)
{
struct ghes *ghes;
struct acpi_hest_generic *generic;
+ unsigned long len;
ghes = platform_get_drvdata(ghes_dev);
generic = ghes->generic;
@@ -627,6 +776,8 @@ static int __devexit ghes_remove(struct platform_device *ghes_dev)
* freed after NMI handler finishes.
*/
synchronize_rcu();
+ len = ghes_esource_prealloc_size(generic);
+ ghes_estatus_pool_shrink(len);
break;
default:
BUG();
@@ -662,15 +813,23 @@ static int __init ghes_init(void)
return -EINVAL;
}
+ init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq);
+
rc = ghes_ioremap_init();
if (rc)
goto err;
- rc = platform_driver_register(&ghes_platform_driver);
+ rc = ghes_estatus_pool_init();
if (rc)
goto err_ioremap_exit;
+ rc = platform_driver_register(&ghes_platform_driver);
+ if (rc)
+ goto err_pool_exit;
+
return 0;
+err_pool_exit:
+ ghes_estatus_pool_exit();
err_ioremap_exit:
ghes_ioremap_exit();
err:
@@ -680,6 +839,7 @@ err:
static void __exit ghes_exit(void)
{
platform_driver_unregister(&ghes_platform_driver);
+ ghes_estatus_pool_exit();
ghes_ioremap_exit();
}
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 7ced61f39492..9749980ca6ca 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -40,6 +40,7 @@
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include <linux/dmi.h>
+#include <linux/suspend.h>
#include "internal.h"
@@ -1006,8 +1007,7 @@ struct kobject *acpi_kobj;
static int __init acpi_init(void)
{
- int result = 0;
-
+ int result;
if (acpi_disabled) {
printk(KERN_INFO PREFIX "Interpreter disabled.\n");
@@ -1022,29 +1022,18 @@ static int __init acpi_init(void)
init_acpi_device_notify();
result = acpi_bus_init();
-
- if (!result) {
- pci_mmcfg_late_init();
- if (!(pm_flags & PM_APM))
- pm_flags |= PM_ACPI;
- else {
- printk(KERN_INFO PREFIX
- "APM is already active, exiting\n");
- disable_acpi();
- result = -ENODEV;
- }
- } else
+ if (result) {
disable_acpi();
-
- if (acpi_disabled)
return result;
+ }
+ pci_mmcfg_late_init();
acpi_scan_init();
acpi_ec_init();
acpi_debugfs_init();
acpi_sleep_proc_init();
acpi_wakeup_device_init();
- return result;
+ return 0;
}
subsys_initcall(acpi_init);
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 76bbb78a5ad9..12c28f4adb67 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -98,6 +98,7 @@ struct acpi_button {
struct input_dev *input;
char phys[32]; /* for input device */
unsigned long pushed;
+ bool wakeup_enabled;
};
static const struct file_operations acpi_button_info_fops = {
@@ -430,8 +431,10 @@ static int acpi_button_add(struct acpi_device *device)
/* Button's GPE is run-wake GPE */
acpi_enable_gpe(device->wakeup.gpe_device,
device->wakeup.gpe_number);
- device->wakeup.run_wake_count++;
- device_set_wakeup_enable(&device->dev, true);
+ if (!device_may_wakeup(&device->dev)) {
+ device_set_wakeup_enable(&device->dev, true);
+ button->wakeup_enabled = true;
+ }
}
printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device));
@@ -453,8 +456,8 @@ static int acpi_button_remove(struct acpi_device *device, int type)
if (device->wakeup.flags.valid) {
acpi_disable_gpe(device->wakeup.gpe_device,
device->wakeup.gpe_number);
- device->wakeup.run_wake_count--;
- device_set_wakeup_enable(&device->dev, false);
+ if (button->wakeup_enabled)
+ device_set_wakeup_enable(&device->dev, false);
}
acpi_button_remove_fs(device);
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 5eb25eb3ea48..3b5c3189fd99 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -274,7 +274,7 @@ acpi_table_parse_srat(enum acpi_srat_type id,
int __init acpi_numa_init(void)
{
- int ret = 0;
+ int cnt = 0;
/*
* Should not limit number with cpu num that is from NR_CPUS or nr_cpus=
@@ -288,7 +288,7 @@ int __init acpi_numa_init(void)
acpi_parse_x2apic_affinity, 0);
acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
acpi_parse_processor_affinity, 0);
- ret = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
+ cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
acpi_parse_memory_affinity,
NR_NODE_MEMBLKS);
}
@@ -297,7 +297,10 @@ int __init acpi_numa_init(void)
acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit);
acpi_numa_arch_fixup();
- return ret;
+
+ if (cnt <= 0)
+ return cnt ?: -ENOENT;
+ return 0;
}
int acpi_get_pxm(acpi_handle h)
diff --git a/drivers/acpi/nvs.c b/drivers/acpi/nvs.c
index fa5a1df42b79..096787b43c96 100644
--- a/drivers/acpi/nvs.c
+++ b/drivers/acpi/nvs.c
@@ -26,6 +26,7 @@ struct nvs_page {
unsigned int size;
void *kaddr;
void *data;
+ bool unmap;
struct list_head node;
};
@@ -44,6 +45,9 @@ int suspend_nvs_register(unsigned long start, unsigned long size)
{
struct nvs_page *entry, *next;
+ pr_info("PM: Registering ACPI NVS region at %lx (%ld bytes)\n",
+ start, size);
+
while (size > 0) {
unsigned int nr_bytes;
@@ -81,7 +85,13 @@ void suspend_nvs_free(void)
free_page((unsigned long)entry->data);
entry->data = NULL;
if (entry->kaddr) {
- iounmap(entry->kaddr);
+ if (entry->unmap) {
+ iounmap(entry->kaddr);
+ entry->unmap = false;
+ } else {
+ acpi_os_unmap_memory(entry->kaddr,
+ entry->size);
+ }
entry->kaddr = NULL;
}
}
@@ -115,8 +125,14 @@ int suspend_nvs_save(void)
list_for_each_entry(entry, &nvs_list, node)
if (entry->data) {
- entry->kaddr = acpi_os_ioremap(entry->phys_start,
- entry->size);
+ unsigned long phys = entry->phys_start;
+ unsigned int size = entry->size;
+
+ entry->kaddr = acpi_os_get_iomem(phys, size);
+ if (!entry->kaddr) {
+ entry->kaddr = acpi_os_ioremap(phys, size);
+ entry->unmap = !!entry->kaddr;
+ }
if (!entry->kaddr) {
suspend_nvs_free();
return -ENOMEM;
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index c90c76aa7f8b..45ad4ffef533 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -76,7 +76,6 @@ EXPORT_SYMBOL(acpi_in_debugger);
extern char line_buf[80];
#endif /*ENABLE_DEBUGGER */
-static unsigned int acpi_irq_irq;
static acpi_osd_handler acpi_irq_handler;
static void *acpi_irq_context;
static struct workqueue_struct *kacpid_wq;
@@ -105,11 +104,11 @@ struct acpi_ioremap {
void __iomem *virt;
acpi_physical_address phys;
acpi_size size;
- struct kref ref;
+ unsigned long refcount;
};
static LIST_HEAD(acpi_ioremaps);
-static DEFINE_SPINLOCK(acpi_ioremap_lock);
+static DEFINE_MUTEX(acpi_ioremap_lock);
static void __init acpi_osi_setup_late(void);
@@ -285,6 +284,22 @@ acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
return NULL;
}
+void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
+{
+ struct acpi_ioremap *map;
+ void __iomem *virt = NULL;
+
+ mutex_lock(&acpi_ioremap_lock);
+ map = acpi_map_lookup(phys, size);
+ if (map) {
+ virt = map->virt + (phys - map->phys);
+ map->refcount++;
+ }
+ mutex_unlock(&acpi_ioremap_lock);
+ return virt;
+}
+EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
+
/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
static struct acpi_ioremap *
acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
@@ -302,8 +317,7 @@ acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
void __iomem *__init_refok
acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
{
- struct acpi_ioremap *map, *tmp_map;
- unsigned long flags;
+ struct acpi_ioremap *map;
void __iomem *virt;
acpi_physical_address pg_off;
acpi_size pg_sz;
@@ -316,14 +330,25 @@ acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
if (!acpi_gbl_permanent_mmap)
return __acpi_map_table((unsigned long)phys, size);
+ mutex_lock(&acpi_ioremap_lock);
+ /* Check if there's a suitable mapping already. */
+ map = acpi_map_lookup(phys, size);
+ if (map) {
+ map->refcount++;
+ goto out;
+ }
+
map = kzalloc(sizeof(*map), GFP_KERNEL);
- if (!map)
+ if (!map) {
+ mutex_unlock(&acpi_ioremap_lock);
return NULL;
+ }
pg_off = round_down(phys, PAGE_SIZE);
pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
virt = acpi_os_ioremap(pg_off, pg_sz);
if (!virt) {
+ mutex_unlock(&acpi_ioremap_lock);
kfree(map);
return NULL;
}
@@ -332,62 +357,51 @@ acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
map->virt = virt;
map->phys = pg_off;
map->size = pg_sz;
- kref_init(&map->ref);
-
- spin_lock_irqsave(&acpi_ioremap_lock, flags);
- /* Check if page has already been mapped. */
- tmp_map = acpi_map_lookup(phys, size);
- if (tmp_map) {
- kref_get(&tmp_map->ref);
- spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
- iounmap(map->virt);
- kfree(map);
- return tmp_map->virt + (phys - tmp_map->phys);
- }
+ map->refcount = 1;
+
list_add_tail_rcu(&map->list, &acpi_ioremaps);
- spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
+ out:
+ mutex_unlock(&acpi_ioremap_lock);
return map->virt + (phys - map->phys);
}
EXPORT_SYMBOL_GPL(acpi_os_map_memory);
-static void acpi_kref_del_iomap(struct kref *ref)
+static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
{
- struct acpi_ioremap *map;
+ if (!--map->refcount)
+ list_del_rcu(&map->list);
+}
- map = container_of(ref, struct acpi_ioremap, ref);
- list_del_rcu(&map->list);
+static void acpi_os_map_cleanup(struct acpi_ioremap *map)
+{
+ if (!map->refcount) {
+ synchronize_rcu();
+ iounmap(map->virt);
+ kfree(map);
+ }
}
void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
{
struct acpi_ioremap *map;
- unsigned long flags;
- int del;
if (!acpi_gbl_permanent_mmap) {
__acpi_unmap_table(virt, size);
return;
}
- spin_lock_irqsave(&acpi_ioremap_lock, flags);
+ mutex_lock(&acpi_ioremap_lock);
map = acpi_map_lookup_virt(virt, size);
if (!map) {
- spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
- printk(KERN_ERR PREFIX "%s: bad address %p\n", __func__, virt);
- dump_stack();
+ mutex_unlock(&acpi_ioremap_lock);
+ WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
return;
}
+ acpi_os_drop_map_ref(map);
+ mutex_unlock(&acpi_ioremap_lock);
- del = kref_put(&map->ref, acpi_kref_del_iomap);
- spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
-
- if (!del)
- return;
-
- synchronize_rcu();
- iounmap(map->virt);
- kfree(map);
+ acpi_os_map_cleanup(map);
}
EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
@@ -397,7 +411,7 @@ void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
__acpi_unmap_table(virt, size);
}
-int acpi_os_map_generic_address(struct acpi_generic_address *addr)
+static int acpi_os_map_generic_address(struct acpi_generic_address *addr)
{
void __iomem *virt;
@@ -413,13 +427,10 @@ int acpi_os_map_generic_address(struct acpi_generic_address *addr)
return 0;
}
-EXPORT_SYMBOL_GPL(acpi_os_map_generic_address);
-void acpi_os_unmap_generic_address(struct acpi_generic_address *addr)
+static void acpi_os_unmap_generic_address(struct acpi_generic_address *addr)
{
- void __iomem *virt;
- unsigned long flags;
- acpi_size size = addr->bit_width / 8;
+ struct acpi_ioremap *map;
if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
return;
@@ -427,13 +438,17 @@ void acpi_os_unmap_generic_address(struct acpi_generic_address *addr)
if (!addr->address || !addr->bit_width)
return;
- spin_lock_irqsave(&acpi_ioremap_lock, flags);
- virt = acpi_map_vaddr_lookup(addr->address, size);
- spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
+ mutex_lock(&acpi_ioremap_lock);
+ map = acpi_map_lookup(addr->address, addr->bit_width / 8);
+ if (!map) {
+ mutex_unlock(&acpi_ioremap_lock);
+ return;
+ }
+ acpi_os_drop_map_ref(map);
+ mutex_unlock(&acpi_ioremap_lock);
- acpi_os_unmap_memory(virt, size);
+ acpi_os_map_cleanup(map);
}
-EXPORT_SYMBOL_GPL(acpi_os_unmap_generic_address);
#ifdef ACPI_FUTURE_USAGE
acpi_status
@@ -516,11 +531,15 @@ acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
acpi_irq_stats_init();
/*
- * Ignore the GSI from the core, and use the value in our copy of the
- * FADT. It may not be the same if an interrupt source override exists
- * for the SCI.
+ * ACPI interrupts different from the SCI in our copy of the FADT are
+ * not supported.
*/
- gsi = acpi_gbl_FADT.sci_interrupt;
+ if (gsi != acpi_gbl_FADT.sci_interrupt)
+ return AE_BAD_PARAMETER;
+
+ if (acpi_irq_handler)
+ return AE_ALREADY_ACQUIRED;
+
if (acpi_gsi_to_irq(gsi, &irq) < 0) {
printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
gsi);
@@ -531,20 +550,20 @@ acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
acpi_irq_context = context;
if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
+ acpi_irq_handler = NULL;
return AE_NOT_ACQUIRED;
}
- acpi_irq_irq = irq;
return AE_OK;
}
acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
{
- if (irq) {
- free_irq(irq, acpi_irq);
- acpi_irq_handler = NULL;
- acpi_irq_irq = 0;
- }
+ if (irq != acpi_gbl_FADT.sci_interrupt)
+ return AE_BAD_PARAMETER;
+
+ free_irq(irq, acpi_irq);
+ acpi_irq_handler = NULL;
return AE_OK;
}
@@ -1589,9 +1608,9 @@ acpi_status __init acpi_os_initialize(void)
acpi_status __init acpi_os_initialize1(void)
{
- kacpid_wq = create_workqueue("kacpid");
- kacpi_notify_wq = create_workqueue("kacpi_notify");
- kacpi_hotplug_wq = create_workqueue("kacpi_hotplug");
+ kacpid_wq = alloc_workqueue("kacpid", 0, 1);
+ kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
+ kacpi_hotplug_wq = alloc_workqueue("kacpi_hotplug", 0, 1);
BUG_ON(!kacpid_wq);
BUG_ON(!kacpi_notify_wq);
BUG_ON(!kacpi_hotplug_wq);
@@ -1603,7 +1622,7 @@ acpi_status __init acpi_os_initialize1(void)
acpi_status acpi_os_terminate(void)
{
if (acpi_irq_handler) {
- acpi_os_remove_interrupt_handler(acpi_irq_irq,
+ acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt,
acpi_irq_handler);
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index b99e62494607..b136c9c1e531 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -797,7 +797,6 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
acpi_status status;
acpi_event_status event_status;
- device->wakeup.run_wake_count = 0;
device->wakeup.flags.notifier_present = 0;
/* Power button, Lid switch always enable wakeup */
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index d6a8cd14de2e..6c949602cbd1 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -16,6 +16,7 @@
#include <linux/device.h>
#include <linux/suspend.h>
#include <linux/reboot.h>
+#include <linux/acpi.h>
#include <asm/io.h>
@@ -199,8 +200,6 @@ static void acpi_pm_end(void)
#endif /* CONFIG_ACPI_SLEEP */
#ifdef CONFIG_SUSPEND
-extern void do_suspend_lowlevel(void);
-
static u32 acpi_suspend_states[] = {
[PM_SUSPEND_ON] = ACPI_STATE_S0,
[PM_SUSPEND_STANDBY] = ACPI_STATE_S1,
@@ -243,20 +242,11 @@ static int acpi_suspend_begin(suspend_state_t pm_state)
static int acpi_suspend_enter(suspend_state_t pm_state)
{
acpi_status status = AE_OK;
- unsigned long flags = 0;
u32 acpi_state = acpi_target_sleep_state;
+ int error;
ACPI_FLUSH_CPU_CACHE();
- /* Do arch specific saving of state. */
- if (acpi_state == ACPI_STATE_S3) {
- int error = acpi_save_state_mem();
-
- if (error)
- return error;
- }
-
- local_irq_save(flags);
switch (acpi_state) {
case ACPI_STATE_S1:
barrier();
@@ -264,7 +254,10 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
break;
case ACPI_STATE_S3:
- do_suspend_lowlevel();
+ error = acpi_suspend_lowlevel();
+ if (error)
+ return error;
+ pr_info(PREFIX "Low-level resume complete\n");
break;
}
@@ -290,13 +283,6 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
/* Allow EC transactions to happen. */
acpi_ec_unblock_transactions_early();
- local_irq_restore(flags);
- printk(KERN_DEBUG "Back to C!\n");
-
- /* restore processor state */
- if (acpi_state == ACPI_STATE_S3)
- acpi_restore_state_mem();
-
suspend_nvs_restore();
return ACPI_SUCCESS(status) ? 0 : -EFAULT;
@@ -472,16 +458,13 @@ static int acpi_hibernation_begin(void)
static int acpi_hibernation_enter(void)
{
acpi_status status = AE_OK;
- unsigned long flags = 0;
ACPI_FLUSH_CPU_CACHE();
- local_irq_save(flags);
/* This shouldn't return. If it returns, we have a problem */
status = acpi_enter_sleep_state(ACPI_STATE_S4);
/* Reprogram control registers and execute _BFS */
acpi_leave_sleep_state_prep(ACPI_STATE_S4);
- local_irq_restore(flags);
return ACPI_SUCCESS(status) ? 0 : -EFAULT;
}
@@ -585,7 +568,7 @@ int acpi_suspend(u32 acpi_state)
return -EINVAL;
}
-#ifdef CONFIG_PM_OPS
+#ifdef CONFIG_PM
/**
* acpi_pm_device_sleep_state - return preferred power state of ACPI device
* in the system sleep state given by %acpi_target_sleep_state
@@ -671,7 +654,7 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
*d_min_p = d_min;
return d_max;
}
-#endif /* CONFIG_PM_OPS */
+#endif /* CONFIG_PM */
#ifdef CONFIG_PM_SLEEP
/**
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index e7df019d29d4..6d2bb2524b6e 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -13,16 +13,17 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/amba/bus.h>
#include <asm/irq.h>
#include <asm/sizes.h>
-#define to_amba_device(d) container_of(d, struct amba_device, dev)
#define to_amba_driver(d) container_of(d, struct amba_driver, drv)
-static struct amba_id *
-amba_lookup(struct amba_id *table, struct amba_device *dev)
+static const struct amba_id *
+amba_lookup(const struct amba_id *table, struct amba_device *dev)
{
int ret = 0;
@@ -57,26 +58,6 @@ static int amba_uevent(struct device *dev, struct kobj_uevent_env *env)
#define amba_uevent NULL
#endif
-static int amba_suspend(struct device *dev, pm_message_t state)
-{
- struct amba_driver *drv = to_amba_driver(dev->driver);
- int ret = 0;
-
- if (dev->driver && drv->suspend)
- ret = drv->suspend(to_amba_device(dev), state);
- return ret;
-}
-
-static int amba_resume(struct device *dev)
-{
- struct amba_driver *drv = to_amba_driver(dev->driver);
- int ret = 0;
-
- if (dev->driver && drv->resume)
- ret = drv->resume(to_amba_device(dev));
- return ret;
-}
-
#define amba_attr_func(name,fmt,arg...) \
static ssize_t name##_show(struct device *_dev, \
struct device_attribute *attr, char *buf) \
@@ -102,17 +83,330 @@ static struct device_attribute amba_dev_attrs[] = {
__ATTR_NULL,
};
+#ifdef CONFIG_PM_SLEEP
+
+static int amba_legacy_suspend(struct device *dev, pm_message_t mesg)
+{
+ struct amba_driver *adrv = to_amba_driver(dev->driver);
+ struct amba_device *adev = to_amba_device(dev);
+ int ret = 0;
+
+ if (dev->driver && adrv->suspend)
+ ret = adrv->suspend(adev, mesg);
+
+ return ret;
+}
+
+static int amba_legacy_resume(struct device *dev)
+{
+ struct amba_driver *adrv = to_amba_driver(dev->driver);
+ struct amba_device *adev = to_amba_device(dev);
+ int ret = 0;
+
+ if (dev->driver && adrv->resume)
+ ret = adrv->resume(adev);
+
+ return ret;
+}
+
+static int amba_pm_prepare(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (drv && drv->pm && drv->pm->prepare)
+ ret = drv->pm->prepare(dev);
+
+ return ret;
+}
+
+static void amba_pm_complete(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+
+ if (drv && drv->pm && drv->pm->complete)
+ drv->pm->complete(dev);
+}
+
+#else /* !CONFIG_PM_SLEEP */
+
+#define amba_pm_prepare NULL
+#define amba_pm_complete NULL
+
+#endif /* !CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_SUSPEND
+
+static int amba_pm_suspend(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->suspend)
+ ret = drv->pm->suspend(dev);
+ } else {
+ ret = amba_legacy_suspend(dev, PMSG_SUSPEND);
+ }
+
+ return ret;
+}
+
+static int amba_pm_suspend_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->suspend_noirq)
+ ret = drv->pm->suspend_noirq(dev);
+ }
+
+ return ret;
+}
+
+static int amba_pm_resume(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->resume)
+ ret = drv->pm->resume(dev);
+ } else {
+ ret = amba_legacy_resume(dev);
+ }
+
+ return ret;
+}
+
+static int amba_pm_resume_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->resume_noirq)
+ ret = drv->pm->resume_noirq(dev);
+ }
+
+ return ret;
+}
+
+#else /* !CONFIG_SUSPEND */
+
+#define amba_pm_suspend NULL
+#define amba_pm_resume NULL
+#define amba_pm_suspend_noirq NULL
+#define amba_pm_resume_noirq NULL
+
+#endif /* !CONFIG_SUSPEND */
+
+#ifdef CONFIG_HIBERNATION
+
+static int amba_pm_freeze(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->freeze)
+ ret = drv->pm->freeze(dev);
+ } else {
+ ret = amba_legacy_suspend(dev, PMSG_FREEZE);
+ }
+
+ return ret;
+}
+
+static int amba_pm_freeze_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->freeze_noirq)
+ ret = drv->pm->freeze_noirq(dev);
+ }
+
+ return ret;
+}
+
+static int amba_pm_thaw(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->thaw)
+ ret = drv->pm->thaw(dev);
+ } else {
+ ret = amba_legacy_resume(dev);
+ }
+
+ return ret;
+}
+
+static int amba_pm_thaw_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->thaw_noirq)
+ ret = drv->pm->thaw_noirq(dev);
+ }
+
+ return ret;
+}
+
+static int amba_pm_poweroff(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->poweroff)
+ ret = drv->pm->poweroff(dev);
+ } else {
+ ret = amba_legacy_suspend(dev, PMSG_HIBERNATE);
+ }
+
+ return ret;
+}
+
+static int amba_pm_poweroff_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->poweroff_noirq)
+ ret = drv->pm->poweroff_noirq(dev);
+ }
+
+ return ret;
+}
+
+static int amba_pm_restore(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->restore)
+ ret = drv->pm->restore(dev);
+ } else {
+ ret = amba_legacy_resume(dev);
+ }
+
+ return ret;
+}
+
+static int amba_pm_restore_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->restore_noirq)
+ ret = drv->pm->restore_noirq(dev);
+ }
+
+ return ret;
+}
+
+#else /* !CONFIG_HIBERNATION */
+
+#define amba_pm_freeze NULL
+#define amba_pm_thaw NULL
+#define amba_pm_poweroff NULL
+#define amba_pm_restore NULL
+#define amba_pm_freeze_noirq NULL
+#define amba_pm_thaw_noirq NULL
+#define amba_pm_poweroff_noirq NULL
+#define amba_pm_restore_noirq NULL
+
+#endif /* !CONFIG_HIBERNATION */
+
+#ifdef CONFIG_PM
+
+static const struct dev_pm_ops amba_pm = {
+ .prepare = amba_pm_prepare,
+ .complete = amba_pm_complete,
+ .suspend = amba_pm_suspend,
+ .resume = amba_pm_resume,
+ .freeze = amba_pm_freeze,
+ .thaw = amba_pm_thaw,
+ .poweroff = amba_pm_poweroff,
+ .restore = amba_pm_restore,
+ .suspend_noirq = amba_pm_suspend_noirq,
+ .resume_noirq = amba_pm_resume_noirq,
+ .freeze_noirq = amba_pm_freeze_noirq,
+ .thaw_noirq = amba_pm_thaw_noirq,
+ .poweroff_noirq = amba_pm_poweroff_noirq,
+ .restore_noirq = amba_pm_restore_noirq,
+ SET_RUNTIME_PM_OPS(
+ pm_generic_runtime_suspend,
+ pm_generic_runtime_resume,
+ pm_generic_runtime_idle
+ )
+};
+
+#define AMBA_PM (&amba_pm)
+
+#else /* !CONFIG_PM */
+
+#define AMBA_PM NULL
+
+#endif /* !CONFIG_PM */
+
/*
* Primecells are part of the Advanced Microcontroller Bus Architecture,
* so we call the bus "amba".
*/
-static struct bus_type amba_bustype = {
+struct bus_type amba_bustype = {
.name = "amba",
.dev_attrs = amba_dev_attrs,
.match = amba_match,
.uevent = amba_uevent,
- .suspend = amba_suspend,
- .resume = amba_resume,
+ .pm = AMBA_PM,
};
static int __init amba_init(void)
@@ -188,7 +482,7 @@ static int amba_probe(struct device *dev)
{
struct amba_device *pcdev = to_amba_device(dev);
struct amba_driver *pcdrv = to_amba_driver(dev->driver);
- struct amba_id *id = amba_lookup(pcdrv->id_table, pcdev);
+ const struct amba_id *id = amba_lookup(pcdrv->id_table, pcdev);
int ret;
do {
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index c2328aed0836..8a1fc396f055 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -202,6 +202,18 @@ config SATA_DWC
If unsure, say N.
+config SATA_DWC_DEBUG
+ bool "Debugging driver version"
+ depends on SATA_DWC
+ help
+ This option enables debugging output in the driver.
+
+config SATA_DWC_VDEBUG
+ bool "Verbose debug output"
+ depends on SATA_DWC_DEBUG
+ help
+ This option enables the taskfile dumping and NCQ debugging.
+
config SATA_MV
tristate "Marvell SATA support"
help
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index b8d96ce37fc9..256d2b72cd1c 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -175,8 +175,7 @@ static const struct ata_port_info ahci_port_info[] = {
{
AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
+ .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
@@ -383,6 +382,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
.class = PCI_CLASS_STORAGE_SATA_AHCI,
.class_mask = 0xffffff,
.driver_data = board_ahci_yes_fbs }, /* 88se9128 */
+ { PCI_DEVICE(0x1b4b, 0x9125),
+ .driver_data = board_ahci_yes_fbs }, /* 88se9125 */
/* Promise */
{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 3e606c34f57b..ccaf08122058 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -213,10 +213,8 @@ enum {
/* ap->flags bits */
- AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
- ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
- ATA_FLAG_LPM,
+ AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
+ ATA_FLAG_ACPI_SATA | ATA_FLAG_AN,
ICH_MAP = 0x90, /* ICH MAP register */
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 6981f7680a00..721d38bfa339 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -237,7 +237,7 @@ static struct pci_device_id ata_generic[] = {
#endif
/* Intel, IDE class device */
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL,
+ PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL,
.driver_data = ATA_GEN_INTEL_IDER },
/* Must come last. If you add entries adjust this table appropriately */
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL),
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 6cb14ca8ee85..cdec4ab3b159 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -230,7 +230,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
{ 0x8086, 0x2850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* SATA ports */
-
+
/* 82801EB (ICH5) */
{ 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
/* 82801EB (ICH5) */
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 8b5ea399a4f4..a791b8ce6294 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -660,8 +660,7 @@ static int ata_acpi_filter_tf(struct ata_device *dev,
* @dev: target ATA device
* @gtf: raw ATA taskfile register set (0x1f1 - 0x1f7)
*
- * Outputs ATA taskfile to standard ATA host controller using MMIO
- * or PIO as indicated by the ATA_FLAG_MMIO flag.
+ * Outputs ATA taskfile to standard ATA host controller.
* Writes the control, feature, nsect, lbal, lbam, and lbah registers.
* Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
* hob_lbal, hob_lbam, and hob_lbah.
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index d4e52e214859..b91e19cab102 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4210,7 +4210,7 @@ static int glob_match (const char *text, const char *pattern)
return 0; /* End of both strings: match */
return 1; /* No match */
}
-
+
static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
{
unsigned char model_num[ATA_ID_PROD_LEN + 1];
@@ -5479,7 +5479,7 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
ap = kzalloc(sizeof(*ap), GFP_KERNEL);
if (!ap)
return NULL;
-
+
ap->pflags |= ATA_PFLAG_INITIALIZING;
ap->lock = &host->lock;
ap->print_id = -1;
@@ -5887,21 +5887,9 @@ void ata_host_init(struct ata_host *host, struct device *dev,
host->ops = ops;
}
-
-static void async_port_probe(void *data, async_cookie_t cookie)
+int ata_port_probe(struct ata_port *ap)
{
- int rc;
- struct ata_port *ap = data;
-
- /*
- * If we're not allowed to scan this host in parallel,
- * we need to wait until all previous scans have completed
- * before going further.
- * Jeff Garzik says this is only within a controller, so we
- * don't need to wait for port 0, only for later ports.
- */
- if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
- async_synchronize_cookie(cookie);
+ int rc = 0;
/* probe */
if (ap->ops->error_handler) {
@@ -5927,23 +5915,33 @@ static void async_port_probe(void *data, async_cookie_t cookie)
DPRINTK("ata%u: bus probe begin\n", ap->print_id);
rc = ata_bus_probe(ap);
DPRINTK("ata%u: bus probe end\n", ap->print_id);
-
- if (rc) {
- /* FIXME: do something useful here?
- * Current libata behavior will
- * tear down everything when
- * the module is removed
- * or the h/w is unplugged.
- */
- }
}
+ return rc;
+}
+
+
+static void async_port_probe(void *data, async_cookie_t cookie)
+{
+ struct ata_port *ap = data;
+
+ /*
+ * If we're not allowed to scan this host in parallel,
+ * we need to wait until all previous scans have completed
+ * before going further.
+ * Jeff Garzik says this is only within a controller, so we
+ * don't need to wait for port 0, only for later ports.
+ */
+ if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
+ async_synchronize_cookie(cookie);
+
+ (void)ata_port_probe(ap);
/* in order to keep device order, we need to synchronize at this point */
async_synchronize_cookie(cookie);
ata_scsi_scan_host(ap, 1);
-
}
+
/**
* ata_host_register - register initialized ATA host
* @host: ATA host to register
@@ -5983,7 +5981,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
for (i = 0; i < host->n_ports; i++)
host->ports[i]->print_id = ata_print_id++;
-
+
/* Create associated sysfs transport objects */
for (i = 0; i < host->n_ports; i++) {
rc = ata_tport_add(host->dev,host->ports[i]);
@@ -6471,7 +6469,7 @@ static int __init ata_init(void)
ata_sff_exit();
rc = -ENOMEM;
goto err_out;
- }
+ }
printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
return 0;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 17a637877d03..df3f3140c9c7 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -587,11 +587,43 @@ static void ata_eh_unload(struct ata_port *ap)
void ata_scsi_error(struct Scsi_Host *host)
{
struct ata_port *ap = ata_shost_to_port(host);
- int i;
unsigned long flags;
+ LIST_HEAD(eh_work_q);
DPRINTK("ENTER\n");
+ spin_lock_irqsave(host->host_lock, flags);
+ list_splice_init(&host->eh_cmd_q, &eh_work_q);
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ ata_scsi_cmd_error_handler(host, ap, &eh_work_q);
+
+ /* If we timed raced normal completion and there is nothing to
+ recover nr_timedout == 0 why exactly are we doing error recovery ? */
+ ata_scsi_port_error_handler(host, ap);
+
+ /* finish or retry handled scmd's and clean up */
+ WARN_ON(host->host_failed || !list_empty(&eh_work_q));
+
+ DPRINTK("EXIT\n");
+}
+
+/**
+ * ata_scsi_cmd_error_handler - error callback for a list of commands
+ * @host: scsi host containing the port
+ * @ap: ATA port within the host
+ * @eh_work_q: list of commands to process
+ *
+ * process the given list of commands and return those finished to the
+ * ap->eh_done_q. This function is the first part of the libata error
+ * handler which processes a given list of failed commands.
+ */
+void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
+ struct list_head *eh_work_q)
+{
+ int i;
+ unsigned long flags;
+
/* make sure sff pio task is not running */
ata_sff_flush_pio_task(ap);
@@ -627,7 +659,7 @@ void ata_scsi_error(struct Scsi_Host *host)
if (ap->ops->lost_interrupt)
ap->ops->lost_interrupt(ap);
- list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
+ list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
struct ata_queued_cmd *qc;
for (i = 0; i < ATA_MAX_QUEUE; i++) {
@@ -671,8 +703,20 @@ void ata_scsi_error(struct Scsi_Host *host)
} else
spin_unlock_wait(ap->lock);
- /* If we timed raced normal completion and there is nothing to
- recover nr_timedout == 0 why exactly are we doing error recovery ? */
+}
+EXPORT_SYMBOL(ata_scsi_cmd_error_handler);
+
+/**
+ * ata_scsi_port_error_handler - recover the port after the commands
+ * @host: SCSI host containing the port
+ * @ap: the ATA port
+ *
+ * Handle the recovery of the port @ap after all the commands
+ * have been recovered.
+ */
+void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
+{
+ unsigned long flags;
/* invoke error handler */
if (ap->ops->error_handler) {
@@ -761,9 +805,6 @@ void ata_scsi_error(struct Scsi_Host *host)
ap->ops->eng_timeout(ap);
}
- /* finish or retry handled scmd's and clean up */
- WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
-
scsi_eh_flush_done_q(&ap->eh_done_q);
/* clean up */
@@ -784,9 +825,8 @@ void ata_scsi_error(struct Scsi_Host *host)
wake_up_all(&ap->eh_wait_q);
spin_unlock_irqrestore(ap->lock, flags);
-
- DPRINTK("EXIT\n");
}
+EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler);
/**
* ata_port_wait_eh - Wait for the currently pending EH to complete
@@ -1618,7 +1658,7 @@ static void ata_eh_analyze_serror(struct ata_link *link)
* host links. For disabled PMP links, only N bit is
* considered as X bit is left at 1 for link plugging.
*/
- if (link->lpm_policy != ATA_LPM_MAX_POWER)
+ if (link->lpm_policy > ATA_LPM_MAX_POWER)
hotplug_mask = 0; /* hotplug doesn't work w/ LPM */
else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 600f6353ecf8..c11675f34b93 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3759,7 +3759,7 @@ struct ata_port *ata_sas_port_alloc(struct ata_host *host,
return NULL;
ap->port_no = 0;
- ap->lock = shost->host_lock;
+ ap->lock = &host->lock;
ap->pio_mask = port_info->pio_mask;
ap->mwdma_mask = port_info->mwdma_mask;
ap->udma_mask = port_info->udma_mask;
@@ -3821,7 +3821,7 @@ int ata_sas_port_init(struct ata_port *ap)
if (!rc) {
ap->print_id = ata_print_id++;
- rc = ata_bus_probe(ap);
+ rc = ata_port_probe(ap);
}
return rc;
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index af6141bb1ba3..e75a02d61a8f 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -1336,7 +1336,7 @@ static void ata_sff_pio_task(struct work_struct *work)
u8 status;
int poll_next;
- BUG_ON(ap->sff_pio_task_link == NULL);
+ BUG_ON(ap->sff_pio_task_link == NULL);
/* qc can be NULL if timeout occurred */
qc = ata_qc_from_tag(ap, link->active_tag);
if (!qc) {
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index a9be110dbf51..773de97988a2 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -103,6 +103,7 @@ extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg);
extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
extern struct ata_port *ata_port_alloc(struct ata_host *host);
extern const char *sata_spd_string(unsigned int spd);
+extern int ata_port_probe(struct ata_port *ap);
/* libata-acpi.c */
#ifdef CONFIG_ATA_ACPI
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index c8d47034d5e9..91949d997555 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -245,7 +245,7 @@ static struct ata_port_operations pacpi_ops = {
static int pacpi_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct ata_port_info info = {
- .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+ .flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
index 66ce6a526f27..36f189c7ee8c 100644
--- a/drivers/ata/pata_at32.c
+++ b/drivers/ata/pata_at32.c
@@ -194,7 +194,7 @@ static int __init pata_at32_init_one(struct device *dev,
/* Setup ATA bindings */
ap->ops = &at32_port_ops;
ap->pio_mask = PIO_MASK;
- ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_SLAVE_POSS;
+ ap->flags |= ATA_FLAG_SLAVE_POSS;
/*
* Since all 8-bit taskfile transfers has to go on the lower
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index 7aed5c792597..e0b58b8dfe6f 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -1454,9 +1454,7 @@ static struct ata_port_operations bfin_pata_ops = {
static struct ata_port_info bfin_port_info[] = {
{
- .flags = ATA_FLAG_SLAVE_POSS
- | ATA_FLAG_MMIO
- | ATA_FLAG_NO_LEGACY,
+ .flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = 0,
.udma_mask = 0,
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
index b63d5e2d4628..24d7df81546b 100644
--- a/drivers/ata/pata_hpt3x3.c
+++ b/drivers/ata/pata_hpt3x3.c
@@ -151,7 +151,7 @@ static struct ata_port_operations hpt3x3_port_ops = {
.check_atapi_dma= hpt3x3_atapi_dma,
.freeze = hpt3x3_freeze,
#endif
-
+
};
/**
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index aa0e0c51cc08..2d15f2548a10 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -616,7 +616,7 @@ static void it821x_display_disk(int n, u8 *buf)
if (buf[52] > 4) /* No Disk */
return;
- ata_id_c_string((u16 *)buf, id, 0, 41);
+ ata_id_c_string((u16 *)buf, id, 0, 41);
if (buf[51]) {
mode = ffs(buf[51]);
@@ -910,7 +910,7 @@ static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
rc = pcim_enable_device(pdev);
if (rc)
return rc;
-
+
if (pdev->vendor == PCI_VENDOR_ID_RDC) {
/* Deal with Vortex86SX */
if (pdev->revision == 0x11)
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index ba54b089f98c..5253b271b3fe 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -177,7 +177,7 @@ static __devinit int ixp4xx_pata_probe(struct platform_device *pdev)
ap->ops = &ixp4xx_port_ops;
ap->pio_mask = ATA_PIO4;
- ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY | ATA_FLAG_NO_ATAPI;
+ ap->flags |= ATA_FLAG_NO_ATAPI;
ixp4xx_setup_port(ap, data, cs0->start, cs1->start);
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 75b49d01780b..46f589edccdb 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -1053,8 +1053,7 @@ static int __devinit pata_macio_common_init(struct pata_macio_priv *priv,
/* Allocate libata host for 1 port */
memset(&pinfo, 0, sizeof(struct ata_port_info));
pmac_macio_calc_timing_masks(priv, &pinfo);
- pinfo.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_MMIO |
- ATA_FLAG_NO_LEGACY;
+ pinfo.flags = ATA_FLAG_SLAVE_POSS;
pinfo.port_ops = &pata_macio_ops;
pinfo.private_data = priv;
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index dd38083dcbeb..75a6a0c0094f 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -38,7 +38,7 @@ static int marvell_pata_active(struct pci_dev *pdev)
/* We don't yet know how to do this for other devices */
if (pdev->device != 0x6145)
- return 1;
+ return 1;
barp = pci_iomap(pdev, 5, 0x10);
if (barp == NULL)
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index d7d8026cde99..2fcac511d39c 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -680,7 +680,7 @@ mpc52xx_ata_remove_one(struct device *dev)
/* ======================================================================== */
static int __devinit
-mpc52xx_ata_probe(struct platform_device *op, const struct of_device_id *match)
+mpc52xx_ata_probe(struct platform_device *op)
{
unsigned int ipb_freq;
struct resource res_mem;
@@ -883,7 +883,7 @@ static struct of_device_id mpc52xx_ata_of_match[] = {
};
-static struct of_platform_driver mpc52xx_ata_of_platform_driver = {
+static struct platform_driver mpc52xx_ata_of_platform_driver = {
.probe = mpc52xx_ata_probe,
.remove = mpc52xx_ata_remove,
#ifdef CONFIG_PM
@@ -906,13 +906,13 @@ static int __init
mpc52xx_ata_init(void)
{
printk(KERN_INFO "ata: MPC52xx IDE/ATA libata driver\n");
- return of_register_platform_driver(&mpc52xx_ata_of_platform_driver);
+ return platform_driver_register(&mpc52xx_ata_of_platform_driver);
}
static void __exit
mpc52xx_ata_exit(void)
{
- of_unregister_platform_driver(&mpc52xx_ata_of_platform_driver);
+ platform_driver_unregister(&mpc52xx_ata_of_platform_driver);
}
module_init(mpc52xx_ata_init);
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
index cc50bd09aa26..e277a142138c 100644
--- a/drivers/ata/pata_ninja32.c
+++ b/drivers/ata/pata_ninja32.c
@@ -165,7 +165,7 @@ static int ninja32_reinit_one(struct pci_dev *pdev)
return rc;
ninja32_program(host->iomap[0]);
ata_host_resume(host);
- return 0;
+ return 0;
}
#endif
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index fa1b95a9a7ff..220ddc90608f 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -848,8 +848,7 @@ static int __devinit octeon_cf_probe(struct platform_device *pdev)
cf_port->ap = ap;
ap->ops = &octeon_cf_ops;
ap->pio_mask = ATA_PIO6;
- ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY
- | ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING;
+ ap->flags |= ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING;
base = cs0 + ocd->base_region_bias;
if (!ocd->is16bit) {
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index 480e043ce6b8..f3054009bd25 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -14,8 +14,7 @@
#include <linux/of_platform.h>
#include <linux/ata_platform.h>
-static int __devinit pata_of_platform_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit pata_of_platform_probe(struct platform_device *ofdev)
{
int ret;
struct device_node *dn = ofdev->dev.of_node;
@@ -90,7 +89,7 @@ static struct of_device_id pata_of_platform_match[] = {
};
MODULE_DEVICE_TABLE(of, pata_of_platform_match);
-static struct of_platform_driver pata_of_platform_driver = {
+static struct platform_driver pata_of_platform_driver = {
.driver = {
.name = "pata_of_platform",
.owner = THIS_MODULE,
@@ -102,13 +101,13 @@ static struct of_platform_driver pata_of_platform_driver = {
static int __init pata_of_platform_init(void)
{
- return of_register_platform_driver(&pata_of_platform_driver);
+ return platform_driver_register(&pata_of_platform_driver);
}
module_init(pata_of_platform_init);
static void __exit pata_of_platform_exit(void)
{
- of_unregister_platform_driver(&pata_of_platform_driver);
+ platform_driver_unregister(&pata_of_platform_driver);
}
module_exit(pata_of_platform_exit);
diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
index 11fb4ccc74b4..a2a73d953840 100644
--- a/drivers/ata/pata_palmld.c
+++ b/drivers/ata/pata_palmld.c
@@ -85,7 +85,7 @@ static __devinit int palmld_pata_probe(struct platform_device *pdev)
ap = host->ports[0];
ap->ops = &palmld_port_ops;
ap->pio_mask = ATA_PIO4;
- ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY | ATA_FLAG_PIO_POLLING;
+ ap->flags |= ATA_FLAG_PIO_POLLING;
/* memory mapping voodoo */
ap->ioaddr.cmd_addr = mem + 0x10;
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index 806292160b3f..29af660d968b 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -124,7 +124,7 @@ static unsigned int ata_data_xfer_8bit(struct ata_device *dev,
* reset will recover the device.
*
*/
-
+
static void pcmcia_8bit_drain_fifo(struct ata_queued_cmd *qc)
{
int count;
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index b18351122525..9765ace16921 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -150,8 +150,7 @@ static struct ata_port_operations pdc2027x_pata133_ops = {
static struct ata_port_info pdc2027x_port_info[] = {
/* PDC_UDMA_100 */
{
- .flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_SLAVE_POSS |
- ATA_FLAG_MMIO,
+ .flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
@@ -159,8 +158,7 @@ static struct ata_port_info pdc2027x_port_info[] = {
},
/* PDC_UDMA_133 */
{
- .flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_SLAVE_POSS |
- ATA_FLAG_MMIO,
+ .flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index 1898c6ed4b4e..b4ede40f8ae1 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -292,7 +292,6 @@ static int __devinit pxa_ata_probe(struct platform_device *pdev)
ap->ops = &pxa_ata_port_ops;
ap->pio_mask = ATA_PIO4;
ap->mwdma_mask = ATA_MWDMA2;
- ap->flags = ATA_FLAG_MMIO;
ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, cmd_res->start,
resource_size(cmd_res));
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 0ffd631000b7..baeaf938d55b 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -91,7 +91,6 @@ static void rb532_pata_setup_ports(struct ata_host *ah)
ap->ops = &rb532_pata_port_ops;
ap->pio_mask = ATA_PIO4;
- ap->flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO;
ap->ioaddr.cmd_addr = info->iobase + RB500_CF_REG_BASE;
ap->ioaddr.ctl_addr = info->iobase + RB500_CF_REG_CTRL;
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c
index 8a51d673e5b2..c446ae6055a3 100644
--- a/drivers/ata/pata_samsung_cf.c
+++ b/drivers/ata/pata_samsung_cf.c
@@ -531,7 +531,6 @@ static int __init pata_s3c_probe(struct platform_device *pdev)
}
ap = host->ports[0];
- ap->flags |= ATA_FLAG_MMIO;
ap->pio_mask = ATA_PIO4;
if (cpu_type == TYPE_S3C64XX) {
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index 093715c3273a..88ea9b677b47 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -959,7 +959,7 @@ static struct ata_port_operations scc_pata_ops = {
static struct ata_port_info scc_port_info[] = {
{
- .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY,
+ .flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
/* No MWDMA */
.udma_mask = ATA_UDMA6,
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index 60cea13cccce..c04abc393fc5 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -593,7 +593,7 @@ static const struct ata_port_info sis_info133 = {
.port_ops = &sis_133_ops,
};
const struct ata_port_info sis_info133_for_sata = {
- .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+ .flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
/* No MWDMA */
.udma_mask = ATA_UDMA6,
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index adbe0426c8f0..1111712b3d7d 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -166,9 +166,7 @@ static struct ata_port_operations adma_ata_ops = {
static struct ata_port_info adma_port_info[] = {
/* board_1841_idx */
{
- .flags = ATA_FLAG_SLAVE_POSS |
- ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO |
- ATA_FLAG_PIO_POLLING,
+ .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_POLLING,
.pio_mask = ATA_PIO4_ONLY,
.udma_mask = ATA_UDMA4,
.port_ops = &adma_ata_ops,
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 6cf57c5c2b5f..1c4b3aa4c7c4 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -40,8 +40,11 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
+/* These two are defined in "libata.h" */
+#undef DRV_NAME
+#undef DRV_VERSION
#define DRV_NAME "sata-dwc"
-#define DRV_VERSION "1.0"
+#define DRV_VERSION "1.3"
/* SATA DMA driver Globals */
#define DMA_NUM_CHANS 1
@@ -333,11 +336,47 @@ static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems,
void __iomem *addr, int dir);
static void dma_dwc_xfer_start(int dma_ch);
+static const char *get_prot_descript(u8 protocol)
+{
+ switch ((enum ata_tf_protocols)protocol) {
+ case ATA_PROT_NODATA:
+ return "ATA no data";
+ case ATA_PROT_PIO:
+ return "ATA PIO";
+ case ATA_PROT_DMA:
+ return "ATA DMA";
+ case ATA_PROT_NCQ:
+ return "ATA NCQ";
+ case ATAPI_PROT_NODATA:
+ return "ATAPI no data";
+ case ATAPI_PROT_PIO:
+ return "ATAPI PIO";
+ case ATAPI_PROT_DMA:
+ return "ATAPI DMA";
+ default:
+ return "unknown";
+ }
+}
+
+static const char *get_dma_dir_descript(int dma_dir)
+{
+ switch ((enum dma_data_direction)dma_dir) {
+ case DMA_BIDIRECTIONAL:
+ return "bidirectional";
+ case DMA_TO_DEVICE:
+ return "to device";
+ case DMA_FROM_DEVICE:
+ return "from device";
+ default:
+ return "none";
+ }
+}
+
static void sata_dwc_tf_dump(struct ata_taskfile *tf)
{
dev_vdbg(host_pvt.dwc_dev, "taskfile cmd: 0x%02x protocol: %s flags:"
- "0x%lx device: %x\n", tf->command, ata_get_cmd_descript\
- (tf->protocol), tf->flags, tf->device);
+ "0x%lx device: %x\n", tf->command,
+ get_prot_descript(tf->protocol), tf->flags, tf->device);
dev_vdbg(host_pvt.dwc_dev, "feature: 0x%02x nsect: 0x%x lbal: 0x%x "
"lbam: 0x%x lbah: 0x%x\n", tf->feature, tf->nsect, tf->lbal,
tf->lbam, tf->lbah);
@@ -715,7 +754,7 @@ static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems,
/* Program the CTL register with src enable / dst enable */
out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].ctl.low),
DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN);
- return 0;
+ return dma_ch;
}
/*
@@ -967,7 +1006,7 @@ static irqreturn_t sata_dwc_isr(int irq, void *dev_instance)
}
dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n",
- __func__, ata_get_cmd_descript(qc->tf.protocol));
+ __func__, get_prot_descript(qc->tf.protocol));
DRVSTILLBUSY:
if (ata_is_dma(qc->tf.protocol)) {
/*
@@ -1057,7 +1096,7 @@ DRVSTILLBUSY:
/* Process completed command */
dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__,
- ata_get_cmd_descript(qc->tf.protocol));
+ get_prot_descript(qc->tf.protocol));
if (ata_is_dma(qc->tf.protocol)) {
host_pvt.dma_interrupt_count++;
if (hsdevp->dma_pending[tag] == \
@@ -1142,8 +1181,8 @@ static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status)
if (tag > 0) {
dev_info(ap->dev, "%s tag=%u cmd=0x%02x dma dir=%s proto=%s "
"dmacr=0x%08x\n", __func__, qc->tag, qc->tf.command,
- ata_get_cmd_descript(qc->dma_dir),
- ata_get_cmd_descript(qc->tf.protocol),
+ get_dma_dir_descript(qc->dma_dir),
+ get_prot_descript(qc->tf.protocol),
in_le32(&(hsdev->sata_dwc_regs->dmacr)));
}
#endif
@@ -1354,7 +1393,7 @@ static void sata_dwc_exec_command_by_tag(struct ata_port *ap,
struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d\n", __func__, tf->command,
- ata_get_cmd_descript(tf), tag);
+ ata_get_cmd_descript(tf->command), tag);
spin_lock_irqsave(&ap->host->lock, flags);
hsdevp->cmd_issued[tag] = cmd_issued;
@@ -1413,7 +1452,7 @@ static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag)
dev_dbg(ap->dev, "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s "
"start_dma? %x\n", __func__, qc, tag, qc->tf.command,
- ata_get_cmd_descript(qc->dma_dir), start_dma);
+ get_dma_dir_descript(qc->dma_dir), start_dma);
sata_dwc_tf_dump(&(qc->tf));
if (start_dma) {
@@ -1462,10 +1501,9 @@ static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag)
int dma_chan;
struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
- int err;
dev_dbg(ap->dev, "%s: port=%d dma dir=%s n_elem=%d\n",
- __func__, ap->port_no, ata_get_cmd_descript(qc->dma_dir),
+ __func__, ap->port_no, get_dma_dir_descript(qc->dma_dir),
qc->n_elem);
dma_chan = dma_dwc_xfer_setup(sg, qc->n_elem, hsdevp->llit[tag],
@@ -1474,7 +1512,7 @@ static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag)
dmadr), qc->dma_dir);
if (dma_chan < 0) {
dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns err %d\n",
- __func__, err);
+ __func__, dma_chan);
return;
}
hsdevp->dma_chan[tag] = dma_chan;
@@ -1491,8 +1529,8 @@ static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
dev_info(ap->dev, "%s ap id=%d cmd(0x%02x)=%s qc tag=%d "
"prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n",
__func__, ap->print_id, qc->tf.command,
- ata_get_cmd_descript(&qc->tf),
- qc->tag, ata_get_cmd_descript(qc->tf.protocol),
+ ata_get_cmd_descript(qc->tf.command),
+ qc->tag, get_prot_descript(qc->tf.protocol),
ap->link.active_tag, ap->link.sactive);
#endif
@@ -1533,7 +1571,7 @@ static void sata_dwc_qc_prep(struct ata_queued_cmd *qc)
#ifdef DEBUG_NCQ
if (qc->tag > 0)
dev_info(qc->ap->dev, "%s: qc->tag=%d ap->active_tag=0x%08x\n",
- __func__, tag, qc->ap->link.active_tag);
+ __func__, qc->tag, qc->ap->link.active_tag);
return ;
#endif
@@ -1580,16 +1618,14 @@ static struct ata_port_operations sata_dwc_ops = {
static const struct ata_port_info sata_dwc_port_info[] = {
{
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO | ATA_FLAG_NCQ,
- .pio_mask = 0x1f, /* pio 0-4 */
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
+ .pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &sata_dwc_ops,
},
};
-static int sata_dwc_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int sata_dwc_probe(struct platform_device *ofdev)
{
struct sata_dwc_device *hsdev;
u32 idr, versionr;
@@ -1727,7 +1763,7 @@ static const struct of_device_id sata_dwc_match[] = {
};
MODULE_DEVICE_TABLE(of, sata_dwc_match);
-static struct of_platform_driver sata_dwc_driver = {
+static struct platform_driver sata_dwc_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
@@ -1739,12 +1775,12 @@ static struct of_platform_driver sata_dwc_driver = {
static int __init sata_dwc_init(void)
{
- return of_register_platform_driver(&sata_dwc_driver);
+ return platform_driver_register(&sata_dwc_driver);
}
static void __exit sata_dwc_exit(void)
{
- of_unregister_platform_driver(&sata_dwc_driver);
+ platform_driver_unregister(&sata_dwc_driver);
}
module_init(sata_dwc_init);
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index b0214d00d50b..883c967b11b9 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -33,8 +33,7 @@ enum {
SATA_FSL_MAX_PRD_USABLE = SATA_FSL_MAX_PRD - 1,
SATA_FSL_MAX_PRD_DIRECT = 16, /* Direct PRDT entries */
- SATA_FSL_HOST_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
+ SATA_FSL_HOST_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
ATA_FLAG_PMP | ATA_FLAG_NCQ | ATA_FLAG_AN),
SATA_FSL_MAX_CMDS = SATA_FSL_QUEUE_DEPTH,
@@ -1293,8 +1292,7 @@ static const struct ata_port_info sata_fsl_port_info[] = {
},
};
-static int sata_fsl_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int sata_fsl_probe(struct platform_device *ofdev)
{
int retval = -ENXIO;
void __iomem *hcr_base = NULL;
@@ -1423,7 +1421,7 @@ static struct of_device_id fsl_sata_match[] = {
MODULE_DEVICE_TABLE(of, fsl_sata_match);
-static struct of_platform_driver fsl_sata_driver = {
+static struct platform_driver fsl_sata_driver = {
.driver = {
.name = "fsl-sata",
.owner = THIS_MODULE,
@@ -1439,13 +1437,13 @@ static struct of_platform_driver fsl_sata_driver = {
static int __init sata_fsl_init(void)
{
- of_register_platform_driver(&fsl_sata_driver);
+ platform_driver_register(&fsl_sata_driver);
return 0;
}
static void __exit sata_fsl_exit(void)
{
- of_unregister_platform_driver(&fsl_sata_driver);
+ platform_driver_unregister(&fsl_sata_driver);
}
MODULE_LICENSE("GPL");
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index bf74a36d3cc3..cd40651e9b72 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -160,8 +160,7 @@ enum {
/* Host Flags */
MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
- MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
+ MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 7254e255fd78..42344e3c686d 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -539,7 +539,7 @@ struct nv_pi_priv {
static const struct ata_port_info nv_port_info[] = {
/* generic */
{
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+ .flags = ATA_FLAG_SATA,
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
@@ -548,7 +548,7 @@ static const struct ata_port_info nv_port_info[] = {
},
/* nforce2/3 */
{
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+ .flags = ATA_FLAG_SATA,
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
@@ -557,7 +557,7 @@ static const struct ata_port_info nv_port_info[] = {
},
/* ck804 */
{
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+ .flags = ATA_FLAG_SATA,
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
@@ -566,8 +566,7 @@ static const struct ata_port_info nv_port_info[] = {
},
/* ADMA */
{
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO | ATA_FLAG_NCQ,
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
@@ -576,7 +575,7 @@ static const struct ata_port_info nv_port_info[] = {
},
/* MCP5x */
{
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+ .flags = ATA_FLAG_SATA,
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
@@ -585,8 +584,7 @@ static const struct ata_port_info nv_port_info[] = {
},
/* SWNCQ */
{
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_NCQ,
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index f03ad48273ff..a004b1e0ea6d 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -134,9 +134,7 @@ enum {
PDC_IRQ_DISABLE = (1 << 10),
PDC_RESET = (1 << 11), /* HDMA reset */
- PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO |
- ATA_FLAG_PIO_POLLING,
+ PDC_COMMON_FLAGS = ATA_FLAG_PIO_POLLING,
/* ap->flags bits */
PDC_FLAG_GEN_II = (1 << 24),
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index daeebf19a6a9..c5603265fa58 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -155,8 +155,7 @@ static struct ata_port_operations qs_ata_ops = {
static const struct ata_port_info qs_port_info[] = {
/* board_2068_idx */
{
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
+ .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
.pio_mask = ATA_PIO4_ONLY,
.udma_mask = ATA_UDMA6,
.port_ops = &qs_ata_ops,
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 3a4f84219719..b42edaaf3a53 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -61,8 +61,7 @@ enum {
SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
SIL_FLAG_MOD15WRITE = (1 << 30),
- SIL_DFL_PORT_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO,
+ SIL_DFL_PORT_FLAGS = ATA_FLAG_SATA,
/*
* Controller IDs
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index af41c6fd1254..06c564e55051 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -244,8 +244,7 @@ enum {
BID_SIL3131 = 2,
/* host flags */
- SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
+ SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA |
ATA_FLAG_AN | ATA_FLAG_PMP,
SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index 2bfe3ae03976..cdcc13e9cf51 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -96,7 +96,7 @@ static struct ata_port_operations sis_ops = {
};
static const struct ata_port_info sis_port_info = {
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+ .flags = ATA_FLAG_SATA,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index 7d9db4aaf07e..35eabcf34568 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -359,8 +359,7 @@ static struct ata_port_operations k2_sata_ops = {
static const struct ata_port_info k2_port_info[] = {
/* chip_svw4 */
{
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA,
+ .flags = ATA_FLAG_SATA | K2_FLAG_NO_ATAPI_DMA,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
@@ -368,8 +367,7 @@ static const struct ata_port_info k2_port_info[] = {
},
/* chip_svw8 */
{
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA |
+ .flags = ATA_FLAG_SATA | K2_FLAG_NO_ATAPI_DMA |
K2_FLAG_SATA_8_PORTS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
@@ -378,8 +376,7 @@ static const struct ata_port_info k2_port_info[] = {
},
/* chip_svw42 */
{
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO | K2_FLAG_BAR_POS_3,
+ .flags = ATA_FLAG_SATA | K2_FLAG_BAR_POS_3,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
@@ -387,8 +384,7 @@ static const struct ata_port_info k2_port_info[] = {
},
/* chip_svw43 */
{
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO,
+ .flags = ATA_FLAG_SATA,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index bedd5188e5b0..8fd3b7252bda 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -273,9 +273,8 @@ static struct ata_port_operations pdc_20621_ops = {
static const struct ata_port_info pdc_port_info[] = {
/* board_20621 */
{
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_SRST | ATA_FLAG_MMIO |
- ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING,
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_ATAPI |
+ ATA_FLAG_PIO_POLLING,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index b8578c32d344..235be717a713 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -88,8 +88,7 @@ static struct ata_port_operations uli_ops = {
};
static const struct ata_port_info uli_port_info = {
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_IGN_SIMPLEX,
+ .flags = ATA_FLAG_SATA | ATA_FLAG_IGN_SIMPLEX,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &uli_ops,
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 8b677bbf2d37..21242c5709a0 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -148,7 +148,7 @@ static struct ata_port_operations vt8251_ops = {
};
static const struct ata_port_info vt6420_port_info = {
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+ .flags = ATA_FLAG_SATA,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
@@ -156,7 +156,7 @@ static const struct ata_port_info vt6420_port_info = {
};
static struct ata_port_info vt6421_sport_info = {
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+ .flags = ATA_FLAG_SATA,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
@@ -164,7 +164,7 @@ static struct ata_port_info vt6421_sport_info = {
};
static struct ata_port_info vt6421_pport_info = {
- .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_LEGACY,
+ .flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
/* No MWDMA */
.udma_mask = ATA_UDMA6,
@@ -172,8 +172,7 @@ static struct ata_port_info vt6421_pport_info = {
};
static struct ata_port_info vt8251_port_info = {
- .flags = ATA_FLAG_SATA | ATA_FLAG_SLAVE_POSS |
- ATA_FLAG_NO_LEGACY,
+ .flags = ATA_FLAG_SATA | ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index e079cf29ed5d..7c987371136e 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -340,8 +340,7 @@ static int __devinit vsc_sata_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
static const struct ata_port_info pi = {
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO,
+ .flags = ATA_FLAG_SATA,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 7d912baf01d4..049650d42c88 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -1031,7 +1031,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
/* We now use the "submit_command" function to submit commands to
the firestream. There is a define up near the definition of
that routine that switches this routine between immediate write
- to the immediate comamnd registers and queuing the commands in
+ to the immediate command registers and queuing the commands in
the HPTXQ for execution. This last technique might be more
efficient if we know we're going to submit a whole lot of
commands in one go, but this driver is not setup to be able to
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index 44f778507770..bdd2719f3f68 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -2643,14 +2643,17 @@ fore200e_init(struct fore200e* fore200e, struct device *parent)
}
#ifdef CONFIG_SBUS
-static int __devinit fore200e_sba_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit fore200e_sba_probe(struct platform_device *op)
{
- const struct fore200e_bus *bus = match->data;
+ const struct fore200e_bus *bus;
struct fore200e *fore200e;
static int index = 0;
int err;
+ if (!op->dev.of_match)
+ return -EINVAL;
+ bus = op->dev.of_match->data;
+
fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
if (!fore200e)
return -ENOMEM;
@@ -2694,7 +2697,7 @@ static const struct of_device_id fore200e_sba_match[] = {
};
MODULE_DEVICE_TABLE(of, fore200e_sba_match);
-static struct of_platform_driver fore200e_sba_driver = {
+static struct platform_driver fore200e_sba_driver = {
.driver = {
.name = "fore_200e",
.owner = THIS_MODULE,
@@ -2795,7 +2798,7 @@ static int __init fore200e_module_init(void)
printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n");
#ifdef CONFIG_SBUS
- err = of_register_platform_driver(&fore200e_sba_driver);
+ err = platform_driver_register(&fore200e_sba_driver);
if (err)
return err;
#endif
@@ -2806,7 +2809,7 @@ static int __init fore200e_module_init(void)
#ifdef CONFIG_SBUS
if (err)
- of_unregister_platform_driver(&fore200e_sba_driver);
+ platform_driver_unregister(&fore200e_sba_driver);
#endif
return err;
@@ -2818,7 +2821,7 @@ static void __exit fore200e_module_cleanup(void)
pci_unregister_driver(&fore200e_pca_driver);
#endif
#ifdef CONFIG_SBUS
- of_unregister_platform_driver(&fore200e_sba_driver);
+ platform_driver_unregister(&fore200e_sba_driver);
#endif
}
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index abe46edfe5b4..118c1b92a511 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -1,7 +1,6 @@
-obj-$(CONFIG_PM) += sysfs.o
+obj-$(CONFIG_PM) += sysfs.o generic_ops.o
obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
obj-$(CONFIG_PM_RUNTIME) += runtime.o
-obj-$(CONFIG_PM_OPS) += generic_ops.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
obj-$(CONFIG_PM_OPP) += opp.o
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 83404973f97a..052dc53eef38 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -423,26 +423,22 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
TRACE_DEVICE(dev);
TRACE_RESUME(0);
- if (dev->bus && dev->bus->pm) {
- pm_dev_dbg(dev, state, "EARLY ");
- error = pm_noirq_op(dev, dev->bus->pm, state);
- if (error)
- goto End;
+ if (dev->pwr_domain) {
+ pm_dev_dbg(dev, state, "EARLY power domain ");
+ pm_noirq_op(dev, &dev->pwr_domain->ops, state);
}
if (dev->type && dev->type->pm) {
pm_dev_dbg(dev, state, "EARLY type ");
error = pm_noirq_op(dev, dev->type->pm, state);
- if (error)
- goto End;
- }
-
- if (dev->class && dev->class->pm) {
+ } else if (dev->class && dev->class->pm) {
pm_dev_dbg(dev, state, "EARLY class ");
error = pm_noirq_op(dev, dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ pm_dev_dbg(dev, state, "EARLY ");
+ error = pm_noirq_op(dev, dev->bus->pm, state);
}
-End:
TRACE_RESUME(error);
return error;
}
@@ -518,36 +514,39 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
dev->power.in_suspend = false;
- if (dev->bus) {
- if (dev->bus->pm) {
- pm_dev_dbg(dev, state, "");
- error = pm_op(dev, dev->bus->pm, state);
- } else if (dev->bus->resume) {
- pm_dev_dbg(dev, state, "legacy ");
- error = legacy_resume(dev, dev->bus->resume);
- }
- if (error)
- goto End;
+ if (dev->pwr_domain) {
+ pm_dev_dbg(dev, state, "power domain ");
+ pm_op(dev, &dev->pwr_domain->ops, state);
}
- if (dev->type) {
- if (dev->type->pm) {
- pm_dev_dbg(dev, state, "type ");
- error = pm_op(dev, dev->type->pm, state);
- }
- if (error)
- goto End;
+ if (dev->type && dev->type->pm) {
+ pm_dev_dbg(dev, state, "type ");
+ error = pm_op(dev, dev->type->pm, state);
+ goto End;
}
if (dev->class) {
if (dev->class->pm) {
pm_dev_dbg(dev, state, "class ");
error = pm_op(dev, dev->class->pm, state);
+ goto End;
} else if (dev->class->resume) {
pm_dev_dbg(dev, state, "legacy class ");
error = legacy_resume(dev, dev->class->resume);
+ goto End;
}
}
+
+ if (dev->bus) {
+ if (dev->bus->pm) {
+ pm_dev_dbg(dev, state, "");
+ error = pm_op(dev, dev->bus->pm, state);
+ } else if (dev->bus->resume) {
+ pm_dev_dbg(dev, state, "legacy ");
+ error = legacy_resume(dev, dev->bus->resume);
+ }
+ }
+
End:
device_unlock(dev);
complete_all(&dev->power.completion);
@@ -629,19 +628,23 @@ static void device_complete(struct device *dev, pm_message_t state)
{
device_lock(dev);
- if (dev->class && dev->class->pm && dev->class->pm->complete) {
- pm_dev_dbg(dev, state, "completing class ");
- dev->class->pm->complete(dev);
+ if (dev->pwr_domain && dev->pwr_domain->ops.complete) {
+ pm_dev_dbg(dev, state, "completing power domain ");
+ dev->pwr_domain->ops.complete(dev);
}
- if (dev->type && dev->type->pm && dev->type->pm->complete) {
+ if (dev->type && dev->type->pm) {
pm_dev_dbg(dev, state, "completing type ");
- dev->type->pm->complete(dev);
- }
-
- if (dev->bus && dev->bus->pm && dev->bus->pm->complete) {
+ if (dev->type->pm->complete)
+ dev->type->pm->complete(dev);
+ } else if (dev->class && dev->class->pm) {
+ pm_dev_dbg(dev, state, "completing class ");
+ if (dev->class->pm->complete)
+ dev->class->pm->complete(dev);
+ } else if (dev->bus && dev->bus->pm) {
pm_dev_dbg(dev, state, "completing ");
- dev->bus->pm->complete(dev);
+ if (dev->bus->pm->complete)
+ dev->bus->pm->complete(dev);
}
device_unlock(dev);
@@ -669,7 +672,6 @@ static void dpm_complete(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
device_complete(dev, state);
- pm_runtime_put_sync(dev);
mutex_lock(&dpm_list_mtx);
put_device(dev);
@@ -727,29 +729,31 @@ static pm_message_t resume_event(pm_message_t sleep_state)
*/
static int device_suspend_noirq(struct device *dev, pm_message_t state)
{
- int error = 0;
-
- if (dev->class && dev->class->pm) {
- pm_dev_dbg(dev, state, "LATE class ");
- error = pm_noirq_op(dev, dev->class->pm, state);
- if (error)
- goto End;
- }
+ int error;
if (dev->type && dev->type->pm) {
pm_dev_dbg(dev, state, "LATE type ");
error = pm_noirq_op(dev, dev->type->pm, state);
if (error)
- goto End;
- }
-
- if (dev->bus && dev->bus->pm) {
+ return error;
+ } else if (dev->class && dev->class->pm) {
+ pm_dev_dbg(dev, state, "LATE class ");
+ error = pm_noirq_op(dev, dev->class->pm, state);
+ if (error)
+ return error;
+ } else if (dev->bus && dev->bus->pm) {
pm_dev_dbg(dev, state, "LATE ");
error = pm_noirq_op(dev, dev->bus->pm, state);
+ if (error)
+ return error;
}
-End:
- return error;
+ if (dev->pwr_domain) {
+ pm_dev_dbg(dev, state, "LATE power domain ");
+ pm_noirq_op(dev, &dev->pwr_domain->ops, state);
+ }
+
+ return 0;
}
/**
@@ -836,25 +840,22 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
goto End;
}
+ if (dev->type && dev->type->pm) {
+ pm_dev_dbg(dev, state, "type ");
+ error = pm_op(dev, dev->type->pm, state);
+ goto Domain;
+ }
+
if (dev->class) {
if (dev->class->pm) {
pm_dev_dbg(dev, state, "class ");
error = pm_op(dev, dev->class->pm, state);
+ goto Domain;
} else if (dev->class->suspend) {
pm_dev_dbg(dev, state, "legacy class ");
error = legacy_suspend(dev, state, dev->class->suspend);
+ goto Domain;
}
- if (error)
- goto End;
- }
-
- if (dev->type) {
- if (dev->type->pm) {
- pm_dev_dbg(dev, state, "type ");
- error = pm_op(dev, dev->type->pm, state);
- }
- if (error)
- goto End;
}
if (dev->bus) {
@@ -867,6 +868,12 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
}
}
+ Domain:
+ if (!error && dev->pwr_domain) {
+ pm_dev_dbg(dev, state, "power domain ");
+ pm_op(dev, &dev->pwr_domain->ops, state);
+ }
+
End:
device_unlock(dev);
complete_all(&dev->power.completion);
@@ -957,27 +964,34 @@ static int device_prepare(struct device *dev, pm_message_t state)
device_lock(dev);
- if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) {
+ if (dev->type && dev->type->pm) {
+ pm_dev_dbg(dev, state, "preparing type ");
+ if (dev->type->pm->prepare)
+ error = dev->type->pm->prepare(dev);
+ suspend_report_result(dev->type->pm->prepare, error);
+ if (error)
+ goto End;
+ } else if (dev->class && dev->class->pm) {
+ pm_dev_dbg(dev, state, "preparing class ");
+ if (dev->class->pm->prepare)
+ error = dev->class->pm->prepare(dev);
+ suspend_report_result(dev->class->pm->prepare, error);
+ if (error)
+ goto End;
+ } else if (dev->bus && dev->bus->pm) {
pm_dev_dbg(dev, state, "preparing ");
- error = dev->bus->pm->prepare(dev);
+ if (dev->bus->pm->prepare)
+ error = dev->bus->pm->prepare(dev);
suspend_report_result(dev->bus->pm->prepare, error);
if (error)
goto End;
}
- if (dev->type && dev->type->pm && dev->type->pm->prepare) {
- pm_dev_dbg(dev, state, "preparing type ");
- error = dev->type->pm->prepare(dev);
- suspend_report_result(dev->type->pm->prepare, error);
- if (error)
- goto End;
+ if (dev->pwr_domain && dev->pwr_domain->ops.prepare) {
+ pm_dev_dbg(dev, state, "preparing power domain ");
+ dev->pwr_domain->ops.prepare(dev);
}
- if (dev->class && dev->class->pm && dev->class->pm->prepare) {
- pm_dev_dbg(dev, state, "preparing class ");
- error = dev->class->pm->prepare(dev);
- suspend_report_result(dev->class->pm->prepare, error);
- }
End:
device_unlock(dev);
@@ -1005,12 +1019,9 @@ static int dpm_prepare(pm_message_t state)
if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
pm_wakeup_event(dev, 0);
- if (pm_wakeup_pending()) {
- pm_runtime_put_sync(dev);
- error = -EBUSY;
- } else {
- error = device_prepare(dev, state);
- }
+ pm_runtime_put_sync(dev);
+ error = pm_wakeup_pending() ?
+ -EBUSY : device_prepare(dev, state);
mutex_lock(&dpm_list_mtx);
if (error) {
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 2bb9b4cf59d7..56a6899f5e9e 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -222,7 +222,7 @@ int opp_get_opp_count(struct device *dev)
* opp_find_freq_exact() - search for an exact frequency
* @dev: device for which we do this operation
* @freq: frequency to search for
- * @is_available: true/false - match for available opp
+ * @available: true/false - match for available opp
*
* Searches for exact match in the opp list and returns pointer to the matching
* opp if found, else returns ERR_PTR in case of error and should be handled
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 698dde742587..f2a25f18fde7 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -58,19 +58,18 @@ static inline void device_pm_move_last(struct device *dev) {}
* sysfs.c
*/
-extern int dpm_sysfs_add(struct device *);
-extern void dpm_sysfs_remove(struct device *);
-extern void rpm_sysfs_remove(struct device *);
+extern int dpm_sysfs_add(struct device *dev);
+extern void dpm_sysfs_remove(struct device *dev);
+extern void rpm_sysfs_remove(struct device *dev);
+extern int wakeup_sysfs_add(struct device *dev);
+extern void wakeup_sysfs_remove(struct device *dev);
#else /* CONFIG_PM */
-static inline int dpm_sysfs_add(struct device *dev)
-{
- return 0;
-}
-
-static inline void dpm_sysfs_remove(struct device *dev)
-{
-}
+static inline int dpm_sysfs_add(struct device *dev) { return 0; }
+static inline void dpm_sysfs_remove(struct device *dev) {}
+static inline void rpm_sysfs_remove(struct device *dev) {}
+static inline int wakeup_sysfs_add(struct device *dev) { return 0; }
+static inline void wakeup_sysfs_remove(struct device *dev) {}
#endif
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 42615b419dfb..54597c859ecb 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -168,6 +168,7 @@ static int rpm_check_suspend_allowed(struct device *dev)
static int rpm_idle(struct device *dev, int rpmflags)
{
int (*callback)(struct device *);
+ int (*domain_callback)(struct device *);
int retval;
retval = rpm_check_suspend_allowed(dev);
@@ -213,19 +214,28 @@ static int rpm_idle(struct device *dev, int rpmflags)
dev->power.idle_notification = true;
- if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle)
- callback = dev->bus->pm->runtime_idle;
- else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle)
+ if (dev->type && dev->type->pm)
callback = dev->type->pm->runtime_idle;
else if (dev->class && dev->class->pm)
callback = dev->class->pm->runtime_idle;
+ else if (dev->bus && dev->bus->pm)
+ callback = dev->bus->pm->runtime_idle;
else
callback = NULL;
- if (callback) {
+ if (dev->pwr_domain)
+ domain_callback = dev->pwr_domain->ops.runtime_idle;
+ else
+ domain_callback = NULL;
+
+ if (callback || domain_callback) {
spin_unlock_irq(&dev->power.lock);
- callback(dev);
+ if (domain_callback)
+ retval = domain_callback(dev);
+
+ if (!retval && callback)
+ callback(dev);
spin_lock_irq(&dev->power.lock);
}
@@ -372,12 +382,12 @@ static int rpm_suspend(struct device *dev, int rpmflags)
__update_runtime_status(dev, RPM_SUSPENDING);
- if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend)
- callback = dev->bus->pm->runtime_suspend;
- else if (dev->type && dev->type->pm && dev->type->pm->runtime_suspend)
+ if (dev->type && dev->type->pm)
callback = dev->type->pm->runtime_suspend;
else if (dev->class && dev->class->pm)
callback = dev->class->pm->runtime_suspend;
+ else if (dev->bus && dev->bus->pm)
+ callback = dev->bus->pm->runtime_suspend;
else
callback = NULL;
@@ -390,6 +400,8 @@ static int rpm_suspend(struct device *dev, int rpmflags)
else
pm_runtime_cancel_pending(dev);
} else {
+ if (dev->pwr_domain)
+ rpm_callback(dev->pwr_domain->ops.runtime_suspend, dev);
no_callback:
__update_runtime_status(dev, RPM_SUSPENDED);
pm_runtime_deactivate_timer(dev);
@@ -569,12 +581,15 @@ static int rpm_resume(struct device *dev, int rpmflags)
__update_runtime_status(dev, RPM_RESUMING);
- if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume)
- callback = dev->bus->pm->runtime_resume;
- else if (dev->type && dev->type->pm && dev->type->pm->runtime_resume)
+ if (dev->pwr_domain)
+ rpm_callback(dev->pwr_domain->ops.runtime_resume, dev);
+
+ if (dev->type && dev->type->pm)
callback = dev->type->pm->runtime_resume;
else if (dev->class && dev->class->pm)
callback = dev->class->pm->runtime_resume;
+ else if (dev->bus && dev->bus->pm)
+ callback = dev->bus->pm->runtime_resume;
else
callback = NULL;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 0b1e46bf3e56..fff49bee781d 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -431,26 +431,18 @@ static ssize_t async_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(async, 0644, async_show, async_store);
#endif /* CONFIG_PM_ADVANCED_DEBUG */
-static struct attribute * power_attrs[] = {
- &dev_attr_wakeup.attr,
-#ifdef CONFIG_PM_SLEEP
- &dev_attr_wakeup_count.attr,
- &dev_attr_wakeup_active_count.attr,
- &dev_attr_wakeup_hit_count.attr,
- &dev_attr_wakeup_active.attr,
- &dev_attr_wakeup_total_time_ms.attr,
- &dev_attr_wakeup_max_time_ms.attr,
- &dev_attr_wakeup_last_time_ms.attr,
-#endif
+static struct attribute *power_attrs[] = {
#ifdef CONFIG_PM_ADVANCED_DEBUG
+#ifdef CONFIG_PM_SLEEP
&dev_attr_async.attr,
+#endif
#ifdef CONFIG_PM_RUNTIME
&dev_attr_runtime_status.attr,
&dev_attr_runtime_usage.attr,
&dev_attr_runtime_active_kids.attr,
&dev_attr_runtime_enabled.attr,
#endif
-#endif
+#endif /* CONFIG_PM_ADVANCED_DEBUG */
NULL,
};
static struct attribute_group pm_attr_group = {
@@ -458,9 +450,26 @@ static struct attribute_group pm_attr_group = {
.attrs = power_attrs,
};
-#ifdef CONFIG_PM_RUNTIME
+static struct attribute *wakeup_attrs[] = {
+#ifdef CONFIG_PM_SLEEP
+ &dev_attr_wakeup.attr,
+ &dev_attr_wakeup_count.attr,
+ &dev_attr_wakeup_active_count.attr,
+ &dev_attr_wakeup_hit_count.attr,
+ &dev_attr_wakeup_active.attr,
+ &dev_attr_wakeup_total_time_ms.attr,
+ &dev_attr_wakeup_max_time_ms.attr,
+ &dev_attr_wakeup_last_time_ms.attr,
+#endif
+ NULL,
+};
+static struct attribute_group pm_wakeup_attr_group = {
+ .name = power_group_name,
+ .attrs = wakeup_attrs,
+};
static struct attribute *runtime_attrs[] = {
+#ifdef CONFIG_PM_RUNTIME
#ifndef CONFIG_PM_ADVANCED_DEBUG
&dev_attr_runtime_status.attr,
#endif
@@ -468,6 +477,7 @@ static struct attribute *runtime_attrs[] = {
&dev_attr_runtime_suspended_time.attr,
&dev_attr_runtime_active_time.attr,
&dev_attr_autosuspend_delay_ms.attr,
+#endif /* CONFIG_PM_RUNTIME */
NULL,
};
static struct attribute_group pm_runtime_attr_group = {
@@ -480,35 +490,49 @@ int dpm_sysfs_add(struct device *dev)
int rc;
rc = sysfs_create_group(&dev->kobj, &pm_attr_group);
- if (rc == 0 && !dev->power.no_callbacks) {
+ if (rc)
+ return rc;
+
+ if (pm_runtime_callbacks_present(dev)) {
rc = sysfs_merge_group(&dev->kobj, &pm_runtime_attr_group);
if (rc)
- sysfs_remove_group(&dev->kobj, &pm_attr_group);
+ goto err_out;
+ }
+
+ if (device_can_wakeup(dev)) {
+ rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
+ if (rc) {
+ if (pm_runtime_callbacks_present(dev))
+ sysfs_unmerge_group(&dev->kobj,
+ &pm_runtime_attr_group);
+ goto err_out;
+ }
}
+ return 0;
+
+ err_out:
+ sysfs_remove_group(&dev->kobj, &pm_attr_group);
return rc;
}
-void rpm_sysfs_remove(struct device *dev)
+int wakeup_sysfs_add(struct device *dev)
{
- sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
+ return sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
}
-void dpm_sysfs_remove(struct device *dev)
+void wakeup_sysfs_remove(struct device *dev)
{
- rpm_sysfs_remove(dev);
- sysfs_remove_group(&dev->kobj, &pm_attr_group);
+ sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
}
-#else /* CONFIG_PM_RUNTIME */
-
-int dpm_sysfs_add(struct device * dev)
+void rpm_sysfs_remove(struct device *dev)
{
- return sysfs_create_group(&dev->kobj, &pm_attr_group);
+ sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
}
-void dpm_sysfs_remove(struct device * dev)
+void dpm_sysfs_remove(struct device *dev)
{
+ rpm_sysfs_remove(dev);
+ sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
sysfs_remove_group(&dev->kobj, &pm_attr_group);
}
-
-#endif
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index 9f4258df4cfd..c80e138b62fe 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -112,7 +112,7 @@ static unsigned int read_magic_time(void)
unsigned int val;
get_rtc_time(&time);
- printk("Time: %2d:%02d:%02d Date: %02d/%02d/%02d\n",
+ pr_info("Time: %2d:%02d:%02d Date: %02d/%02d/%02d\n",
time.tm_hour, time.tm_min, time.tm_sec,
time.tm_mon + 1, time.tm_mday, time.tm_year % 100);
val = time.tm_year; /* 100 years */
@@ -179,7 +179,7 @@ static int show_file_hash(unsigned int value)
unsigned int hash = hash_string(lineno, file, FILEHASH);
if (hash != value)
continue;
- printk(" hash matches %s:%u\n", file, lineno);
+ pr_info(" hash matches %s:%u\n", file, lineno);
match++;
}
return match;
@@ -255,7 +255,7 @@ static int late_resume_init(void)
val = val / FILEHASH;
dev = val /* % DEVHASH */;
- printk(" Magic number: %d:%d:%d\n", user, file, dev);
+ pr_info(" Magic number: %d:%d:%d\n", user, file, dev);
show_file_hash(file);
show_dev_hash(dev);
return 0;
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 8ec406d8f548..4573c83df6dd 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -24,12 +24,26 @@
*/
bool events_check_enabled;
-/* The counter of registered wakeup events. */
-static atomic_t event_count = ATOMIC_INIT(0);
-/* A preserved old value of event_count. */
+/*
+ * Combined counters of registered wakeup events and wakeup events in progress.
+ * They need to be modified together atomically, so it's better to use one
+ * atomic variable to hold them both.
+ */
+static atomic_t combined_event_count = ATOMIC_INIT(0);
+
+#define IN_PROGRESS_BITS (sizeof(int) * 4)
+#define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
+
+static void split_counters(unsigned int *cnt, unsigned int *inpr)
+{
+ unsigned int comb = atomic_read(&combined_event_count);
+
+ *cnt = (comb >> IN_PROGRESS_BITS);
+ *inpr = comb & MAX_IN_PROGRESS;
+}
+
+/* A preserved old value of the events counter. */
static unsigned int saved_count;
-/* The counter of wakeup events being processed. */
-static atomic_t events_in_progress = ATOMIC_INIT(0);
static DEFINE_SPINLOCK(events_lock);
@@ -228,6 +242,35 @@ int device_wakeup_disable(struct device *dev)
EXPORT_SYMBOL_GPL(device_wakeup_disable);
/**
+ * device_set_wakeup_capable - Set/reset device wakeup capability flag.
+ * @dev: Device to handle.
+ * @capable: Whether or not @dev is capable of waking up the system from sleep.
+ *
+ * If @capable is set, set the @dev's power.can_wakeup flag and add its
+ * wakeup-related attributes to sysfs. Otherwise, unset the @dev's
+ * power.can_wakeup flag and remove its wakeup-related attributes from sysfs.
+ *
+ * This function may sleep and it can't be called from any context where
+ * sleeping is not allowed.
+ */
+void device_set_wakeup_capable(struct device *dev, bool capable)
+{
+ if (!!dev->power.can_wakeup == !!capable)
+ return;
+
+ if (device_is_registered(dev)) {
+ if (capable) {
+ if (wakeup_sysfs_add(dev))
+ return;
+ } else {
+ wakeup_sysfs_remove(dev);
+ }
+ }
+ dev->power.can_wakeup = capable;
+}
+EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
+
+/**
* device_init_wakeup - Device wakeup initialization.
* @dev: Device to handle.
* @enable: Whether or not to enable @dev as a wakeup device.
@@ -307,7 +350,8 @@ static void wakeup_source_activate(struct wakeup_source *ws)
ws->timer_expires = jiffies;
ws->last_time = ktime_get();
- atomic_inc(&events_in_progress);
+ /* Increment the counter of events in progress. */
+ atomic_inc(&combined_event_count);
}
/**
@@ -394,14 +438,10 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
del_timer(&ws->timer);
/*
- * event_count has to be incremented before events_in_progress is
- * modified, so that the callers of pm_check_wakeup_events() and
- * pm_save_wakeup_count() don't see the old value of event_count and
- * events_in_progress equal to zero at the same time.
+ * Increment the counter of registered wakeup events and decrement the
+ * couter of wakeup events in progress simultaneously.
*/
- atomic_inc(&event_count);
- smp_mb__before_atomic_dec();
- atomic_dec(&events_in_progress);
+ atomic_add(MAX_IN_PROGRESS, &combined_event_count);
}
/**
@@ -556,8 +596,10 @@ bool pm_wakeup_pending(void)
spin_lock_irqsave(&events_lock, flags);
if (events_check_enabled) {
- ret = ((unsigned int)atomic_read(&event_count) != saved_count)
- || atomic_read(&events_in_progress);
+ unsigned int cnt, inpr;
+
+ split_counters(&cnt, &inpr);
+ ret = (cnt != saved_count || inpr > 0);
events_check_enabled = !ret;
}
spin_unlock_irqrestore(&events_lock, flags);
@@ -573,25 +615,25 @@ bool pm_wakeup_pending(void)
* Store the number of registered wakeup events at the address in @count. Block
* if the current number of wakeup events being processed is nonzero.
*
- * Return false if the wait for the number of wakeup events being processed to
+ * Return 'false' if the wait for the number of wakeup events being processed to
* drop down to zero has been interrupted by a signal (and the current number
- * of wakeup events being processed is still nonzero). Otherwise return true.
+ * of wakeup events being processed is still nonzero). Otherwise return 'true'.
*/
bool pm_get_wakeup_count(unsigned int *count)
{
- bool ret;
-
- if (capable(CAP_SYS_ADMIN))
- events_check_enabled = false;
+ unsigned int cnt, inpr;
- while (atomic_read(&events_in_progress) && !signal_pending(current)) {
+ for (;;) {
+ split_counters(&cnt, &inpr);
+ if (inpr == 0 || signal_pending(current))
+ break;
pm_wakeup_update_hit_counts();
schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT));
}
- ret = !atomic_read(&events_in_progress);
- *count = atomic_read(&event_count);
- return ret;
+ split_counters(&cnt, &inpr);
+ *count = cnt;
+ return !inpr;
}
/**
@@ -600,24 +642,25 @@ bool pm_get_wakeup_count(unsigned int *count)
*
* If @count is equal to the current number of registered wakeup events and the
* current number of wakeup events being processed is zero, store @count as the
- * old number of registered wakeup events to be used by pm_check_wakeup_events()
- * and return true. Otherwise return false.
+ * old number of registered wakeup events for pm_check_wakeup_events(), enable
+ * wakeup events detection and return 'true'. Otherwise disable wakeup events
+ * detection and return 'false'.
*/
bool pm_save_wakeup_count(unsigned int count)
{
- bool ret = false;
+ unsigned int cnt, inpr;
+ events_check_enabled = false;
spin_lock_irq(&events_lock);
- if (count == (unsigned int)atomic_read(&event_count)
- && !atomic_read(&events_in_progress)) {
+ split_counters(&cnt, &inpr);
+ if (cnt == count && inpr == 0) {
saved_count = count;
events_check_enabled = true;
- ret = true;
}
spin_unlock_irq(&events_lock);
- if (!ret)
+ if (!events_check_enabled)
pm_wakeup_update_hit_counts();
- return ret;
+ return events_check_enabled;
}
static struct dentry *wakeup_sources_stats_dentry;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index b9ba04fc2b34..77fc76f8aea9 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3281,7 +3281,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
struct block_device *bdev = opened_bdev[cnt];
if (!bdev || ITYPE(drive_state[cnt].fd_device) != type)
continue;
- __invalidate_device(bdev);
+ __invalidate_device(bdev, true);
}
mutex_unlock(&open_lock);
} else {
diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
index a0b403a6b4ed..e5565fbaeb30 100644
--- a/drivers/block/smart1,2.h
+++ b/drivers/block/smart1,2.h
@@ -95,7 +95,7 @@ static unsigned long smart4_completed(ctlr_info_t *h)
/*
* This hardware returns interrupt pending at a different place and
* it does not tell us if the fifo is empty, we will have check
- * that by getting a 0 back from the comamnd_completed call.
+ * that by getting a 0 back from the command_completed call.
*/
static unsigned long smart4_intr_pending(ctlr_info_t *h)
{
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6ecf89cdf006..33a48a80c7e8 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -6,10 +6,12 @@
#include <linux/virtio.h>
#include <linux/virtio_blk.h>
#include <linux/scatterlist.h>
+#include <linux/string_helpers.h>
#define PART_BITS 4
static int major, index;
+struct workqueue_struct *virtblk_wq;
struct virtio_blk
{
@@ -26,6 +28,9 @@ struct virtio_blk
mempool_t *pool;
+ /* Process context for config space updates */
+ struct work_struct config_work;
+
/* What host tells us, plus 2 for header & tailer. */
unsigned int sg_elems;
@@ -291,6 +296,46 @@ static ssize_t virtblk_serial_show(struct device *dev,
}
DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL);
+static void virtblk_config_changed_work(struct work_struct *work)
+{
+ struct virtio_blk *vblk =
+ container_of(work, struct virtio_blk, config_work);
+ struct virtio_device *vdev = vblk->vdev;
+ struct request_queue *q = vblk->disk->queue;
+ char cap_str_2[10], cap_str_10[10];
+ u64 capacity, size;
+
+ /* Host must always specify the capacity. */
+ vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
+ &capacity, sizeof(capacity));
+
+ /* If capacity is too big, truncate with warning. */
+ if ((sector_t)capacity != capacity) {
+ dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
+ (unsigned long long)capacity);
+ capacity = (sector_t)-1;
+ }
+
+ size = capacity * queue_logical_block_size(q);
+ string_get_size(size, STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
+ string_get_size(size, STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
+
+ dev_notice(&vdev->dev,
+ "new size: %llu %d-byte logical blocks (%s/%s)\n",
+ (unsigned long long)capacity,
+ queue_logical_block_size(q),
+ cap_str_10, cap_str_2);
+
+ set_capacity(vblk->disk, capacity);
+}
+
+static void virtblk_config_changed(struct virtio_device *vdev)
+{
+ struct virtio_blk *vblk = vdev->priv;
+
+ queue_work(virtblk_wq, &vblk->config_work);
+}
+
static int __devinit virtblk_probe(struct virtio_device *vdev)
{
struct virtio_blk *vblk;
@@ -327,6 +372,7 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
vblk->vdev = vdev;
vblk->sg_elems = sg_elems;
sg_init_table(vblk->sg, vblk->sg_elems);
+ INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
/* We expect one virtqueue, for output. */
vblk->vq = virtio_find_single_vq(vdev, blk_done, "requests");
@@ -477,6 +523,8 @@ static void __devexit virtblk_remove(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
+ flush_work(&vblk->config_work);
+
/* Nothing should be pending. */
BUG_ON(!list_empty(&vblk->reqs));
@@ -508,27 +556,47 @@ static unsigned int features[] = {
* Use __refdata to avoid this warning.
*/
static struct virtio_driver __refdata virtio_blk = {
- .feature_table = features,
- .feature_table_size = ARRAY_SIZE(features),
- .driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
- .id_table = id_table,
- .probe = virtblk_probe,
- .remove = __devexit_p(virtblk_remove),
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .probe = virtblk_probe,
+ .remove = __devexit_p(virtblk_remove),
+ .config_changed = virtblk_config_changed,
};
static int __init init(void)
{
+ int error;
+
+ virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
+ if (!virtblk_wq)
+ return -ENOMEM;
+
major = register_blkdev(0, "virtblk");
- if (major < 0)
- return major;
- return register_virtio_driver(&virtio_blk);
+ if (major < 0) {
+ error = major;
+ goto out_destroy_workqueue;
+ }
+
+ error = register_virtio_driver(&virtio_blk);
+ if (error)
+ goto out_unregister_blkdev;
+ return 0;
+
+out_unregister_blkdev:
+ unregister_blkdev(major, "virtblk");
+out_destroy_workqueue:
+ destroy_workqueue(virtblk_wq);
+ return error;
}
static void __exit fini(void)
{
unregister_blkdev(major, "virtblk");
unregister_virtio_driver(&virtio_blk);
+ destroy_workqueue(virtblk_wq);
}
module_init(init);
module_exit(fini);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index d7aa39e349a6..9cb8668ff5f4 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -120,6 +120,10 @@ static DEFINE_SPINLOCK(minor_lock);
#define EXTENDED (1<<EXT_SHIFT)
#define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
#define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
+#define EMULATED_HD_DISK_MINOR_OFFSET (0)
+#define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256)
+#define EMULATED_SD_DISK_MINOR_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET + (4 * 16))
+#define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_HD_DISK_NAME_OFFSET + 4)
#define DEV_NAME "xvd" /* name in /dev */
@@ -281,7 +285,7 @@ static int blkif_queue_request(struct request *req)
info->shadow[id].request = req;
ring_req->id = id;
- ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req);
+ ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
ring_req->handle = info->handle;
ring_req->operation = rq_data_dir(req) ?
@@ -317,7 +321,7 @@ static int blkif_queue_request(struct request *req)
rq_data_dir(req) );
info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
- ring_req->seg[i] =
+ ring_req->u.rw.seg[i] =
(struct blkif_request_segment) {
.gref = ref,
.first_sect = fsect,
@@ -434,6 +438,65 @@ static void xlvbd_flush(struct blkfront_info *info)
info->feature_flush ? "enabled" : "disabled");
}
+static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
+{
+ int major;
+ major = BLKIF_MAJOR(vdevice);
+ *minor = BLKIF_MINOR(vdevice);
+ switch (major) {
+ case XEN_IDE0_MAJOR:
+ *offset = (*minor / 64) + EMULATED_HD_DISK_NAME_OFFSET;
+ *minor = ((*minor / 64) * PARTS_PER_DISK) +
+ EMULATED_HD_DISK_MINOR_OFFSET;
+ break;
+ case XEN_IDE1_MAJOR:
+ *offset = (*minor / 64) + 2 + EMULATED_HD_DISK_NAME_OFFSET;
+ *minor = (((*minor / 64) + 2) * PARTS_PER_DISK) +
+ EMULATED_HD_DISK_MINOR_OFFSET;
+ break;
+ case XEN_SCSI_DISK0_MAJOR:
+ *offset = (*minor / PARTS_PER_DISK) + EMULATED_SD_DISK_NAME_OFFSET;
+ *minor = *minor + EMULATED_SD_DISK_MINOR_OFFSET;
+ break;
+ case XEN_SCSI_DISK1_MAJOR:
+ case XEN_SCSI_DISK2_MAJOR:
+ case XEN_SCSI_DISK3_MAJOR:
+ case XEN_SCSI_DISK4_MAJOR:
+ case XEN_SCSI_DISK5_MAJOR:
+ case XEN_SCSI_DISK6_MAJOR:
+ case XEN_SCSI_DISK7_MAJOR:
+ *offset = (*minor / PARTS_PER_DISK) +
+ ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16) +
+ EMULATED_SD_DISK_NAME_OFFSET;
+ *minor = *minor +
+ ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16 * PARTS_PER_DISK) +
+ EMULATED_SD_DISK_MINOR_OFFSET;
+ break;
+ case XEN_SCSI_DISK8_MAJOR:
+ case XEN_SCSI_DISK9_MAJOR:
+ case XEN_SCSI_DISK10_MAJOR:
+ case XEN_SCSI_DISK11_MAJOR:
+ case XEN_SCSI_DISK12_MAJOR:
+ case XEN_SCSI_DISK13_MAJOR:
+ case XEN_SCSI_DISK14_MAJOR:
+ case XEN_SCSI_DISK15_MAJOR:
+ *offset = (*minor / PARTS_PER_DISK) +
+ ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16) +
+ EMULATED_SD_DISK_NAME_OFFSET;
+ *minor = *minor +
+ ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16 * PARTS_PER_DISK) +
+ EMULATED_SD_DISK_MINOR_OFFSET;
+ break;
+ case XENVBD_MAJOR:
+ *offset = *minor / PARTS_PER_DISK;
+ break;
+ default:
+ printk(KERN_WARNING "blkfront: your disk configuration is "
+ "incorrect, please use an xvd device instead\n");
+ return -ENODEV;
+ }
+ return 0;
+}
static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
struct blkfront_info *info,
@@ -441,7 +504,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
{
struct gendisk *gd;
int nr_minors = 1;
- int err = -ENODEV;
+ int err;
unsigned int offset;
int minor;
int nr_parts;
@@ -456,12 +519,20 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
}
if (!VDEV_IS_EXTENDED(info->vdevice)) {
- minor = BLKIF_MINOR(info->vdevice);
- nr_parts = PARTS_PER_DISK;
+ err = xen_translate_vdev(info->vdevice, &minor, &offset);
+ if (err)
+ return err;
+ nr_parts = PARTS_PER_DISK;
} else {
minor = BLKIF_MINOR_EXT(info->vdevice);
nr_parts = PARTS_PER_EXT_DISK;
+ offset = minor / nr_parts;
+ if (xen_hvm_domain() && offset <= EMULATED_HD_DISK_NAME_OFFSET + 4)
+ printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with "
+ "emulated IDE disks,\n\t choose an xvd device name"
+ "from xvde on\n", info->vdevice);
}
+ err = -ENODEV;
if ((minor % nr_parts) == 0)
nr_minors = nr_parts;
@@ -475,8 +546,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
if (gd == NULL)
goto release;
- offset = minor / nr_parts;
-
if (nr_minors > 1) {
if (offset < 26)
sprintf(gd->disk_name, "%s%c", DEV_NAME, 'a' + offset);
@@ -615,7 +684,7 @@ static void blkif_completion(struct blk_shadow *s)
{
int i;
for (i = 0; i < s->req.nr_segments; i++)
- gnttab_end_foreign_access(s->req.seg[i].gref, 0, 0UL);
+ gnttab_end_foreign_access(s->req.u.rw.seg[i].gref, 0, 0UL);
}
static irqreturn_t blkif_interrupt(int irq, void *dev_id)
@@ -932,7 +1001,7 @@ static int blkif_recover(struct blkfront_info *info)
/* Rewrite any grant references invalidated by susp/resume. */
for (j = 0; j < req->nr_segments; j++)
gnttab_grant_foreign_access_ref(
- req->seg[j].gref,
+ req->u.rw.seg[j].gref,
info->xbdev->otherend_id,
pfn_to_mfn(info->shadow[req->id].frame[j]),
rq_data_dir(info->shadow[req->id].request));
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 829161edae53..2c590a796aa1 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -1195,16 +1195,13 @@ static struct platform_driver ace_platform_driver = {
*/
#if defined(CONFIG_OF)
-static int __devinit
-ace_of_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit ace_of_probe(struct platform_device *op)
{
struct resource res;
resource_size_t physaddr;
const u32 *id;
int irq, bus_width, rc;
- dev_dbg(&op->dev, "ace_of_probe(%p, %p)\n", op, match);
-
/* device id */
id = of_get_property(op->dev.of_node, "port-number", NULL);
@@ -1245,7 +1242,7 @@ static const struct of_device_id ace_of_match[] __devinitconst = {
};
MODULE_DEVICE_TABLE(of, ace_of_match);
-static struct of_platform_driver ace_of_driver = {
+static struct platform_driver ace_of_driver = {
.probe = ace_of_probe,
.remove = __devexit_p(ace_of_remove),
.driver = {
@@ -1259,12 +1256,12 @@ static struct of_platform_driver ace_of_driver = {
static inline int __init ace_of_register(void)
{
pr_debug("xsysace: registering OF binding\n");
- return of_register_platform_driver(&ace_of_driver);
+ return platform_driver_register(&ace_of_driver);
}
static inline void __exit ace_of_unregister(void)
{
- of_unregister_platform_driver(&ace_of_driver);
+ platform_driver_unregister(&ace_of_driver);
}
#else /* CONFIG_OF */
/* CONFIG_OF not enabled; do nothing helpers */
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 333c21289d97..5577ed656e2f 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -31,6 +31,30 @@
#define VERSION "1.0"
+#define ATH3K_DNLOAD 0x01
+#define ATH3K_GETSTATE 0x05
+#define ATH3K_SET_NORMAL_MODE 0x07
+#define ATH3K_GETVERSION 0x09
+#define USB_REG_SWITCH_VID_PID 0x0a
+
+#define ATH3K_MODE_MASK 0x3F
+#define ATH3K_NORMAL_MODE 0x0E
+
+#define ATH3K_PATCH_UPDATE 0x80
+#define ATH3K_SYSCFG_UPDATE 0x40
+
+#define ATH3K_XTAL_FREQ_26M 0x00
+#define ATH3K_XTAL_FREQ_40M 0x01
+#define ATH3K_XTAL_FREQ_19P2 0x02
+#define ATH3K_NAME_LEN 0xFF
+
+struct ath3k_version {
+ unsigned int rom_version;
+ unsigned int build_version;
+ unsigned int ram_version;
+ unsigned char ref_clock;
+ unsigned char reserved[0x07];
+};
static struct usb_device_id ath3k_table[] = {
/* Atheros AR3011 */
@@ -41,13 +65,32 @@ static struct usb_device_id ath3k_table[] = {
/* Atheros AR9285 Malbec with sflash firmware */
{ USB_DEVICE(0x03F0, 0x311D) },
+
+ /* Atheros AR3012 with sflash firmware*/
+ { USB_DEVICE(0x0CF3, 0x3004) },
+
+ /* Atheros AR5BBU12 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xE02C) },
+
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, ath3k_table);
+#define BTUSB_ATH3012 0x80
+/* This table is to load patch and sysconfig files
+ * for AR3012 */
+static struct usb_device_id ath3k_blist_tbl[] = {
+
+ /* Atheros AR3012 with sflash firmware*/
+ { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
+
+ { } /* Terminating entry */
+};
+
#define USB_REQ_DFU_DNLOAD 1
#define BULK_SIZE 4096
+#define FW_HDR_SIZE 20
static int ath3k_load_firmware(struct usb_device *udev,
const struct firmware *firmware)
@@ -103,28 +146,265 @@ error:
return err;
}
+static int ath3k_get_state(struct usb_device *udev, unsigned char *state)
+{
+ int pipe = 0;
+
+ pipe = usb_rcvctrlpipe(udev, 0);
+ return usb_control_msg(udev, pipe, ATH3K_GETSTATE,
+ USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
+ state, 0x01, USB_CTRL_SET_TIMEOUT);
+}
+
+static int ath3k_get_version(struct usb_device *udev,
+ struct ath3k_version *version)
+{
+ int pipe = 0;
+
+ pipe = usb_rcvctrlpipe(udev, 0);
+ return usb_control_msg(udev, pipe, ATH3K_GETVERSION,
+ USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, version,
+ sizeof(struct ath3k_version),
+ USB_CTRL_SET_TIMEOUT);
+}
+
+static int ath3k_load_fwfile(struct usb_device *udev,
+ const struct firmware *firmware)
+{
+ u8 *send_buf;
+ int err, pipe, len, size, count, sent = 0;
+ int ret;
+
+ count = firmware->size;
+
+ send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC);
+ if (!send_buf) {
+ BT_ERR("Can't allocate memory chunk for firmware");
+ return -ENOMEM;
+ }
+
+ size = min_t(uint, count, FW_HDR_SIZE);
+ memcpy(send_buf, firmware->data, size);
+
+ pipe = usb_sndctrlpipe(udev, 0);
+ ret = usb_control_msg(udev, pipe, ATH3K_DNLOAD,
+ USB_TYPE_VENDOR, 0, 0, send_buf,
+ size, USB_CTRL_SET_TIMEOUT);
+ if (ret < 0) {
+ BT_ERR("Can't change to loading configuration err");
+ kfree(send_buf);
+ return ret;
+ }
+
+ sent += size;
+ count -= size;
+
+ while (count) {
+ size = min_t(uint, count, BULK_SIZE);
+ pipe = usb_sndbulkpipe(udev, 0x02);
+
+ memcpy(send_buf, firmware->data + sent, size);
+
+ err = usb_bulk_msg(udev, pipe, send_buf, size,
+ &len, 3000);
+ if (err || (len != size)) {
+ BT_ERR("Error in firmware loading err = %d,"
+ "len = %d, size = %d", err, len, size);
+ kfree(send_buf);
+ return err;
+ }
+ sent += size;
+ count -= size;
+ }
+
+ kfree(send_buf);
+ return 0;
+}
+
+static int ath3k_switch_pid(struct usb_device *udev)
+{
+ int pipe = 0;
+
+ pipe = usb_sndctrlpipe(udev, 0);
+ return usb_control_msg(udev, pipe, USB_REG_SWITCH_VID_PID,
+ USB_TYPE_VENDOR, 0, 0,
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
+}
+
+static int ath3k_set_normal_mode(struct usb_device *udev)
+{
+ unsigned char fw_state;
+ int pipe = 0, ret;
+
+ ret = ath3k_get_state(udev, &fw_state);
+ if (ret < 0) {
+ BT_ERR("Can't get state to change to normal mode err");
+ return ret;
+ }
+
+ if ((fw_state & ATH3K_MODE_MASK) == ATH3K_NORMAL_MODE) {
+ BT_DBG("firmware was already in normal mode");
+ return 0;
+ }
+
+ pipe = usb_sndctrlpipe(udev, 0);
+ return usb_control_msg(udev, pipe, ATH3K_SET_NORMAL_MODE,
+ USB_TYPE_VENDOR, 0, 0,
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
+}
+
+static int ath3k_load_patch(struct usb_device *udev)
+{
+ unsigned char fw_state;
+ char filename[ATH3K_NAME_LEN] = {0};
+ const struct firmware *firmware;
+ struct ath3k_version fw_version, pt_version;
+ int ret;
+
+ ret = ath3k_get_state(udev, &fw_state);
+ if (ret < 0) {
+ BT_ERR("Can't get state to change to load ram patch err");
+ return ret;
+ }
+
+ if (fw_state & ATH3K_PATCH_UPDATE) {
+ BT_DBG("Patch was already downloaded");
+ return 0;
+ }
+
+ ret = ath3k_get_version(udev, &fw_version);
+ if (ret < 0) {
+ BT_ERR("Can't get version to change to load ram patch err");
+ return ret;
+ }
+
+ snprintf(filename, ATH3K_NAME_LEN, "ar3k/AthrBT_0x%08x.dfu",
+ fw_version.rom_version);
+
+ ret = request_firmware(&firmware, filename, &udev->dev);
+ if (ret < 0) {
+ BT_ERR("Patch file not found %s", filename);
+ return ret;
+ }
+
+ pt_version.rom_version = *(int *)(firmware->data + firmware->size - 8);
+ pt_version.build_version = *(int *)
+ (firmware->data + firmware->size - 4);
+
+ if ((pt_version.rom_version != fw_version.rom_version) ||
+ (pt_version.build_version <= fw_version.build_version)) {
+ BT_ERR("Patch file version did not match with firmware");
+ release_firmware(firmware);
+ return -EINVAL;
+ }
+
+ ret = ath3k_load_fwfile(udev, firmware);
+ release_firmware(firmware);
+
+ return ret;
+}
+
+static int ath3k_load_syscfg(struct usb_device *udev)
+{
+ unsigned char fw_state;
+ char filename[ATH3K_NAME_LEN] = {0};
+ const struct firmware *firmware;
+ struct ath3k_version fw_version;
+ int clk_value, ret;
+
+ ret = ath3k_get_state(udev, &fw_state);
+ if (ret < 0) {
+ BT_ERR("Can't get state to change to load configration err");
+ return -EBUSY;
+ }
+
+ ret = ath3k_get_version(udev, &fw_version);
+ if (ret < 0) {
+ BT_ERR("Can't get version to change to load ram patch err");
+ return ret;
+ }
+
+ switch (fw_version.ref_clock) {
+
+ case ATH3K_XTAL_FREQ_26M:
+ clk_value = 26;
+ break;
+ case ATH3K_XTAL_FREQ_40M:
+ clk_value = 40;
+ break;
+ case ATH3K_XTAL_FREQ_19P2:
+ clk_value = 19;
+ break;
+ default:
+ clk_value = 0;
+ break;
+ }
+
+ snprintf(filename, ATH3K_NAME_LEN, "ar3k/ramps_0x%08x_%d%s",
+ fw_version.rom_version, clk_value, ".dfu");
+
+ ret = request_firmware(&firmware, filename, &udev->dev);
+ if (ret < 0) {
+ BT_ERR("Configuration file not found %s", filename);
+ return ret;
+ }
+
+ ret = ath3k_load_fwfile(udev, firmware);
+ release_firmware(firmware);
+
+ return ret;
+}
+
static int ath3k_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
const struct firmware *firmware;
struct usb_device *udev = interface_to_usbdev(intf);
+ int ret;
BT_DBG("intf %p id %p", intf, id);
if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
return -ENODEV;
- if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) {
- return -EIO;
+ /* match device ID in ath3k blacklist table */
+ if (!id->driver_info) {
+ const struct usb_device_id *match;
+ match = usb_match_id(intf, ath3k_blist_tbl);
+ if (match)
+ id = match;
}
- if (ath3k_load_firmware(udev, firmware)) {
- release_firmware(firmware);
+ /* load patch and sysconfig files for AR3012 */
+ if (id->driver_info & BTUSB_ATH3012) {
+ ret = ath3k_load_patch(udev);
+ if (ret < 0) {
+ BT_ERR("Loading patch file failed");
+ return ret;
+ }
+ ret = ath3k_load_syscfg(udev);
+ if (ret < 0) {
+ BT_ERR("Loading sysconfig file failed");
+ return ret;
+ }
+ ret = ath3k_set_normal_mode(udev);
+ if (ret < 0) {
+ BT_ERR("Set normal mode failed");
+ return ret;
+ }
+ ath3k_switch_pid(udev);
+ return 0;
+ }
+
+ if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) {
+ BT_ERR("Error loading firmware");
return -EIO;
}
+
+ ret = ath3k_load_firmware(udev, firmware);
release_firmware(firmware);
- return 0;
+ return ret;
}
static void ath3k_disconnect(struct usb_interface *intf)
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 4cefa91e6c34..866811428e20 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -105,6 +105,12 @@ static struct usb_device_id blacklist_table[] = {
/* Atheros AR9285 Malbec with sflash firmware */
{ USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
+ /* Atheros 3012 with sflash firmware */
+ { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_IGNORE },
+
+ /* Atheros AR5BBU12 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
+
/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
{ USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
@@ -427,7 +433,7 @@ static void btusb_isoc_complete(struct urb *urb)
}
}
-static void inline __fill_isoc_descriptor(struct urb *urb, int len, int mtu)
+static inline void __fill_isoc_descriptor(struct urb *urb, int len, int mtu)
{
int i, offset = 0;
@@ -711,15 +717,11 @@ static int btusb_send_frame(struct sk_buff *skb)
pipe = usb_sndisocpipe(data->udev,
data->isoc_tx_ep->bEndpointAddress);
- urb->dev = data->udev;
- urb->pipe = pipe;
- urb->context = skb;
- urb->complete = btusb_isoc_tx_complete;
- urb->interval = data->isoc_tx_ep->bInterval;
+ usb_fill_int_urb(urb, data->udev, pipe,
+ skb->data, skb->len, btusb_isoc_tx_complete,
+ skb, data->isoc_tx_ep->bInterval);
urb->transfer_flags = URB_ISO_ASAP;
- urb->transfer_buffer = skb->data;
- urb->transfer_buffer_length = skb->len;
__fill_isoc_descriptor(urb, skb->len,
le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize));
@@ -778,7 +780,7 @@ static void btusb_notify(struct hci_dev *hdev, unsigned int evt)
}
}
-static int inline __set_isoc_interface(struct hci_dev *hdev, int altsetting)
+static inline int __set_isoc_interface(struct hci_dev *hdev, int altsetting)
{
struct btusb_data *data = hdev->driver_data;
struct usb_interface *intf = data->isoc;
@@ -829,7 +831,7 @@ static void btusb_work(struct work_struct *work)
if (hdev->conn_hash.sco_num > 0) {
if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) {
- err = usb_autopm_get_interface(data->isoc);
+ err = usb_autopm_get_interface(data->isoc ? data->isoc : data->intf);
if (err < 0) {
clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
usb_kill_anchored_urbs(&data->isoc_anchor);
@@ -858,7 +860,7 @@ static void btusb_work(struct work_struct *work)
__set_isoc_interface(hdev, 0);
if (test_and_clear_bit(BTUSB_DID_ISO_RESUME, &data->flags))
- usb_autopm_put_interface(data->isoc);
+ usb_autopm_put_interface(data->isoc ? data->isoc : data->intf);
}
}
@@ -1041,8 +1043,6 @@ static int btusb_probe(struct usb_interface *intf,
usb_set_intfdata(intf, data);
- usb_enable_autosuspend(interface_to_usbdev(intf));
-
return 0;
}
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 3c6cabcb7d84..48ad2a7ab080 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -398,6 +398,7 @@ static int hci_uart_register_dev(struct hci_uart *hu)
hdev->flush = hci_uart_flush;
hdev->send = hci_uart_send_frame;
hdev->destruct = hci_uart_destruct;
+ hdev->parent = hu->tty->dev;
hdev->owner = THIS_MODULE;
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 9252e85706ef..780498d76581 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -773,18 +773,23 @@ int __init agp_amd64_init(void)
#else
printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n");
#endif
+ pci_unregister_driver(&agp_amd64_pci_driver);
return -ENODEV;
}
/* First check that we have at least one AMD64 NB */
- if (!pci_dev_present(amd_nb_misc_ids))
+ if (!pci_dev_present(amd_nb_misc_ids)) {
+ pci_unregister_driver(&agp_amd64_pci_driver);
return -ENODEV;
+ }
/* Look for any AGP bridge */
agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
err = driver_attach(&agp_amd64_pci_driver.driver);
- if (err == 0 && agp_bridges_found == 0)
+ if (err == 0 && agp_bridges_found == 0) {
+ pci_unregister_driver(&agp_amd64_pci_driver);
err = -ENODEV;
+ }
}
return err;
}
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index c195bfeade11..5feebe2800e9 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -130,6 +130,7 @@
#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
#define I915_IFPADDR 0x60
+#define I830_HIC 0x70
/* Intel 965G registers */
#define I965_MSAC 0x62
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index fab3d3265adb..0d09b537bb9a 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <linux/pagemap.h>
#include <linux/agp_backend.h>
+#include <linux/delay.h>
#include <asm/smp.h>
#include "agp.h"
#include "intel-agp.h"
@@ -70,12 +71,8 @@ static struct _intel_private {
u32 __iomem *gtt; /* I915G */
bool clear_fake_agp; /* on first access via agp, fill with scratch */
int num_dcache_entries;
- union {
- void __iomem *i9xx_flush_page;
- void *i8xx_flush_page;
- };
+ void __iomem *i9xx_flush_page;
char *i81x_gtt_table;
- struct page *i8xx_page;
struct resource ifp_resource;
int resource_valid;
struct page *scratch_page;
@@ -722,28 +719,6 @@ static int intel_fake_agp_fetch_size(void)
static void i830_cleanup(void)
{
- if (intel_private.i8xx_flush_page) {
- kunmap(intel_private.i8xx_flush_page);
- intel_private.i8xx_flush_page = NULL;
- }
-
- __free_page(intel_private.i8xx_page);
- intel_private.i8xx_page = NULL;
-}
-
-static void intel_i830_setup_flush(void)
-{
- /* return if we've already set the flush mechanism up */
- if (intel_private.i8xx_page)
- return;
-
- intel_private.i8xx_page = alloc_page(GFP_KERNEL);
- if (!intel_private.i8xx_page)
- return;
-
- intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
- if (!intel_private.i8xx_flush_page)
- i830_cleanup();
}
/* The chipset_flush interface needs to get data that has already been
@@ -758,14 +733,27 @@ static void intel_i830_setup_flush(void)
*/
static void i830_chipset_flush(void)
{
- unsigned int *pg = intel_private.i8xx_flush_page;
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+ /* Forcibly evict everything from the CPU write buffers.
+ * clflush appears to be insufficient.
+ */
+ wbinvd_on_all_cpus();
+
+ /* Now we've only seen documents for this magic bit on 855GM,
+ * we hope it exists for the other gen2 chipsets...
+ *
+ * Also works as advertised on my 845G.
+ */
+ writel(readl(intel_private.registers+I830_HIC) | (1<<31),
+ intel_private.registers+I830_HIC);
- memset(pg, 0, 1024);
+ while (readl(intel_private.registers+I830_HIC) & (1<<31)) {
+ if (time_after(jiffies, timeout))
+ break;
- if (cpu_has_clflush)
- clflush_cache_range(pg, 1024);
- else if (wbinvd_on_all_cpus() != 0)
- printk(KERN_ERR "Timed out waiting for cache flush.\n");
+ udelay(50);
+ }
}
static void i830_write_entry(dma_addr_t addr, unsigned int entry,
@@ -849,8 +837,6 @@ static int i830_setup(void)
intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
- intel_i830_setup_flush();
-
return 0;
}
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index d31483c54883..beecd1cf9b99 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -198,3 +198,15 @@ config HW_RANDOM_NOMADIK
module will be called nomadik-rng.
If unsure, say Y.
+
+config HW_RANDOM_PICOXCELL
+ tristate "Picochip picoXcell true random number generator support"
+ depends on HW_RANDOM && ARCH_PICOXCELL && PICOXCELL_PC3X3
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Picochip PC3x3 and later devices.
+
+ To compile this driver as a module, choose M here: the
+ module will be called picoxcell-rng.
+
+ If unsure, say Y.
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 4273308aa1e3..3db4eb8b19c0 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -19,3 +19,4 @@ obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o
obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o
obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o
obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
+obj-$(CONFIG_HW_RANDOM_PICOXCELL) += picoxcell-rng.o
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index a3f5e381e746..43ac61978d8b 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -619,15 +619,17 @@ static void __devinit n2rng_driver_version(void)
pr_info("%s", version);
}
-static int __devinit n2rng_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit n2rng_probe(struct platform_device *op)
{
- int victoria_falls = (match->data != NULL);
+ int victoria_falls;
int err = -ENOMEM;
struct n2rng *np;
- n2rng_driver_version();
+ if (!op->dev.of_match)
+ return -EINVAL;
+ victoria_falls = (op->dev.of_match->data != NULL);
+ n2rng_driver_version();
np = kzalloc(sizeof(*np), GFP_KERNEL);
if (!np)
goto out;
@@ -750,7 +752,7 @@ static const struct of_device_id n2rng_match[] = {
};
MODULE_DEVICE_TABLE(of, n2rng_match);
-static struct of_platform_driver n2rng_driver = {
+static struct platform_driver n2rng_driver = {
.driver = {
.name = "n2rng",
.owner = THIS_MODULE,
@@ -762,12 +764,12 @@ static struct of_platform_driver n2rng_driver = {
static int __init n2rng_init(void)
{
- return of_register_platform_driver(&n2rng_driver);
+ return platform_driver_register(&n2rng_driver);
}
static void __exit n2rng_exit(void)
{
- of_unregister_platform_driver(&n2rng_driver);
+ platform_driver_unregister(&n2rng_driver);
}
module_init(n2rng_init);
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index a348c7e9aa0b..dd1d143eb8ea 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -39,7 +39,7 @@ static struct hwrng nmk_rng = {
.read = nmk_rng_read,
};
-static int nmk_rng_probe(struct amba_device *dev, struct amba_id *id)
+static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id)
{
void __iomem *base;
int ret;
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 06aad0831c73..2cc755a64302 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -91,7 +91,7 @@ static struct hwrng omap_rng_ops = {
static int __devinit omap_rng_probe(struct platform_device *pdev)
{
- struct resource *res, *mem;
+ struct resource *res;
int ret;
/*
@@ -116,14 +116,12 @@ static int __devinit omap_rng_probe(struct platform_device *pdev)
if (!res)
return -ENOENT;
- mem = request_mem_region(res->start, resource_size(res),
- pdev->name);
- if (mem == NULL) {
+ if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
ret = -EBUSY;
goto err_region;
}
- dev_set_drvdata(&pdev->dev, mem);
+ dev_set_drvdata(&pdev->dev, res);
rng_base = ioremap(res->start, resource_size(res));
if (!rng_base) {
ret = -ENOMEM;
@@ -146,7 +144,7 @@ err_register:
iounmap(rng_base);
rng_base = NULL;
err_ioremap:
- release_resource(mem);
+ release_mem_region(res->start, resource_size(res));
err_region:
if (cpu_is_omap24xx()) {
clk_disable(rng_ick);
@@ -157,7 +155,7 @@ err_region:
static int __exit omap_rng_remove(struct platform_device *pdev)
{
- struct resource *mem = dev_get_drvdata(&pdev->dev);
+ struct resource *res = dev_get_drvdata(&pdev->dev);
hwrng_unregister(&omap_rng_ops);
@@ -170,7 +168,7 @@ static int __exit omap_rng_remove(struct platform_device *pdev)
clk_put(rng_ick);
}
- release_resource(mem);
+ release_mem_region(res->start, resource_size(res));
rng_base = NULL;
return 0;
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
index a31c830ca8cd..1d504815e6db 100644
--- a/drivers/char/hw_random/pasemi-rng.c
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -94,8 +94,7 @@ static struct hwrng pasemi_rng = {
.data_read = pasemi_rng_data_read,
};
-static int __devinit rng_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit rng_probe(struct platform_device *ofdev)
{
void __iomem *rng_regs;
struct device_node *rng_np = ofdev->dev.of_node;
@@ -139,7 +138,7 @@ static struct of_device_id rng_match[] = {
{ },
};
-static struct of_platform_driver rng_driver = {
+static struct platform_driver rng_driver = {
.driver = {
.name = "pasemi-rng",
.owner = THIS_MODULE,
@@ -151,13 +150,13 @@ static struct of_platform_driver rng_driver = {
static int __init rng_init(void)
{
- return of_register_platform_driver(&rng_driver);
+ return platform_driver_register(&rng_driver);
}
module_init(rng_init);
static void __exit rng_exit(void)
{
- of_unregister_platform_driver(&rng_driver);
+ platform_driver_unregister(&rng_driver);
}
module_exit(rng_exit);
diff --git a/drivers/char/hw_random/picoxcell-rng.c b/drivers/char/hw_random/picoxcell-rng.c
new file mode 100644
index 000000000000..990d55a5e3e8
--- /dev/null
+++ b/drivers/char/hw_random/picoxcell-rng.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2010-2011 Picochip Ltd., Jamie Iles
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * All enquiries to support@picochip.com
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#define DATA_REG_OFFSET 0x0200
+#define CSR_REG_OFFSET 0x0278
+#define CSR_OUT_EMPTY_MASK (1 << 24)
+#define CSR_FAULT_MASK (1 << 1)
+#define TRNG_BLOCK_RESET_MASK (1 << 0)
+#define TAI_REG_OFFSET 0x0380
+
+/*
+ * The maximum amount of time in microseconds to spend waiting for data if the
+ * core wants us to wait. The TRNG should generate 32 bits every 320ns so a
+ * timeout of 20us seems reasonable. The TRNG does builtin tests of the data
+ * for randomness so we can't always assume there is data present.
+ */
+#define PICO_TRNG_TIMEOUT 20
+
+static void __iomem *rng_base;
+static struct clk *rng_clk;
+struct device *rng_dev;
+
+static inline u32 picoxcell_trng_read_csr(void)
+{
+ return __raw_readl(rng_base + CSR_REG_OFFSET);
+}
+
+static inline bool picoxcell_trng_is_empty(void)
+{
+ return picoxcell_trng_read_csr() & CSR_OUT_EMPTY_MASK;
+}
+
+/*
+ * Take the random number generator out of reset and make sure the interrupts
+ * are masked. We shouldn't need to get large amounts of random bytes so just
+ * poll the status register. The hardware generates 32 bits every 320ns so we
+ * shouldn't have to wait long enough to warrant waiting for an IRQ.
+ */
+static void picoxcell_trng_start(void)
+{
+ __raw_writel(0, rng_base + TAI_REG_OFFSET);
+ __raw_writel(0, rng_base + CSR_REG_OFFSET);
+}
+
+static void picoxcell_trng_reset(void)
+{
+ __raw_writel(TRNG_BLOCK_RESET_MASK, rng_base + CSR_REG_OFFSET);
+ __raw_writel(TRNG_BLOCK_RESET_MASK, rng_base + TAI_REG_OFFSET);
+ picoxcell_trng_start();
+}
+
+/*
+ * Get some random data from the random number generator. The hw_random core
+ * layer provides us with locking.
+ */
+static int picoxcell_trng_read(struct hwrng *rng, void *buf, size_t max,
+ bool wait)
+{
+ int i;
+
+ /* Wait for some data to become available. */
+ for (i = 0; i < PICO_TRNG_TIMEOUT && picoxcell_trng_is_empty(); ++i) {
+ if (!wait)
+ return 0;
+
+ udelay(1);
+ }
+
+ if (picoxcell_trng_read_csr() & CSR_FAULT_MASK) {
+ dev_err(rng_dev, "fault detected, resetting TRNG\n");
+ picoxcell_trng_reset();
+ return -EIO;
+ }
+
+ if (i == PICO_TRNG_TIMEOUT)
+ return 0;
+
+ *(u32 *)buf = __raw_readl(rng_base + DATA_REG_OFFSET);
+ return sizeof(u32);
+}
+
+static struct hwrng picoxcell_trng = {
+ .name = "picoxcell",
+ .read = picoxcell_trng_read,
+};
+
+static int picoxcell_trng_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!mem) {
+ dev_warn(&pdev->dev, "no memory resource\n");
+ return -ENOMEM;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
+ "picoxcell_trng")) {
+ dev_warn(&pdev->dev, "unable to request io mem\n");
+ return -EBUSY;
+ }
+
+ rng_base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
+ if (!rng_base) {
+ dev_warn(&pdev->dev, "unable to remap io mem\n");
+ return -ENOMEM;
+ }
+
+ rng_clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(rng_clk)) {
+ dev_warn(&pdev->dev, "no clk\n");
+ return PTR_ERR(rng_clk);
+ }
+
+ ret = clk_enable(rng_clk);
+ if (ret) {
+ dev_warn(&pdev->dev, "unable to enable clk\n");
+ goto err_enable;
+ }
+
+ picoxcell_trng_start();
+ ret = hwrng_register(&picoxcell_trng);
+ if (ret)
+ goto err_register;
+
+ rng_dev = &pdev->dev;
+ dev_info(&pdev->dev, "pixoxcell random number generator active\n");
+
+ return 0;
+
+err_register:
+ clk_disable(rng_clk);
+err_enable:
+ clk_put(rng_clk);
+
+ return ret;
+}
+
+static int __devexit picoxcell_trng_remove(struct platform_device *pdev)
+{
+ hwrng_unregister(&picoxcell_trng);
+ clk_disable(rng_clk);
+ clk_put(rng_clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int picoxcell_trng_suspend(struct device *dev)
+{
+ clk_disable(rng_clk);
+
+ return 0;
+}
+
+static int picoxcell_trng_resume(struct device *dev)
+{
+ return clk_enable(rng_clk);
+}
+
+static const struct dev_pm_ops picoxcell_trng_pm_ops = {
+ .suspend = picoxcell_trng_suspend,
+ .resume = picoxcell_trng_resume,
+};
+#endif /* CONFIG_PM */
+
+static struct platform_driver picoxcell_trng_driver = {
+ .probe = picoxcell_trng_probe,
+ .remove = __devexit_p(picoxcell_trng_remove),
+ .driver = {
+ .name = "picoxcell-trng",
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &picoxcell_trng_pm_ops,
+#endif /* CONFIG_PM */
+ },
+};
+
+static int __init picoxcell_trng_init(void)
+{
+ return platform_driver_register(&picoxcell_trng_driver);
+}
+module_init(picoxcell_trng_init);
+
+static void __exit picoxcell_trng_exit(void)
+{
+ platform_driver_unregister(&picoxcell_trng_driver);
+}
+module_exit(picoxcell_trng_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jamie Iles");
+MODULE_DESCRIPTION("Picochip picoXcell TRNG driver");
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 7855f9f45b8e..dcfc39ca753e 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -66,13 +66,10 @@
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/pnp.h>
-
-#ifdef CONFIG_PPC_OF
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#endif
#define PFX "ipmi_si: "
@@ -116,13 +113,7 @@ static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI",
#define DEVICE_NAME "ipmi_si"
-static struct platform_driver ipmi_driver = {
- .driver = {
- .name = DEVICE_NAME,
- .bus = &platform_bus_type
- }
-};
-
+static struct platform_driver ipmi_driver;
/*
* Indexes into stats[] in smi_info below.
@@ -308,9 +299,6 @@ static int pci_registered;
#ifdef CONFIG_ACPI
static int pnp_registered;
#endif
-#ifdef CONFIG_PPC_OF
-static int of_registered;
-#endif
static unsigned int kipmid_max_busy_us[SI_MAX_PARMS];
static int num_max_busy_us;
@@ -1860,8 +1848,9 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
return rv;
}
-static void __devinit hardcode_find_bmc(void)
+static int __devinit hardcode_find_bmc(void)
{
+ int ret = -ENODEV;
int i;
struct smi_info *info;
@@ -1871,7 +1860,7 @@ static void __devinit hardcode_find_bmc(void)
info = smi_info_alloc();
if (!info)
- return;
+ return -ENOMEM;
info->addr_source = SI_HARDCODED;
printk(KERN_INFO PFX "probing via hardcoded address\n");
@@ -1924,10 +1913,12 @@ static void __devinit hardcode_find_bmc(void)
if (!add_smi(info)) {
if (try_smi_init(info))
cleanup_one_si(info);
+ ret = 0;
} else {
kfree(info);
}
}
+ return ret;
}
#ifdef CONFIG_ACPI
@@ -2555,11 +2546,9 @@ static struct pci_driver ipmi_pci_driver = {
};
#endif /* CONFIG_PCI */
-
-#ifdef CONFIG_PPC_OF
-static int __devinit ipmi_of_probe(struct platform_device *dev,
- const struct of_device_id *match)
+static int __devinit ipmi_probe(struct platform_device *dev)
{
+#ifdef CONFIG_OF
struct smi_info *info;
struct resource resource;
const __be32 *regsize, *regspacing, *regshift;
@@ -2569,6 +2558,9 @@ static int __devinit ipmi_of_probe(struct platform_device *dev,
dev_info(&dev->dev, "probing via device tree\n");
+ if (!dev->dev.of_match)
+ return -EINVAL;
+
ret = of_address_to_resource(np, 0, &resource);
if (ret) {
dev_warn(&dev->dev, PFX "invalid address from OF\n");
@@ -2601,7 +2593,7 @@ static int __devinit ipmi_of_probe(struct platform_device *dev,
return -ENOMEM;
}
- info->si_type = (enum si_type) match->data;
+ info->si_type = (enum si_type) dev->dev.of_match->data;
info->addr_source = SI_DEVICETREE;
info->irq_setup = std_irq_setup;
@@ -2632,13 +2624,15 @@ static int __devinit ipmi_of_probe(struct platform_device *dev,
kfree(info);
return -EBUSY;
}
-
+#endif
return 0;
}
-static int __devexit ipmi_of_remove(struct platform_device *dev)
+static int __devexit ipmi_remove(struct platform_device *dev)
{
+#ifdef CONFIG_OF
cleanup_one_si(dev_get_drvdata(&dev->dev));
+#endif
return 0;
}
@@ -2653,16 +2647,15 @@ static struct of_device_id ipmi_match[] =
{},
};
-static struct of_platform_driver ipmi_of_platform_driver = {
+static struct platform_driver ipmi_driver = {
.driver = {
- .name = "ipmi",
+ .name = DEVICE_NAME,
.owner = THIS_MODULE,
.of_match_table = ipmi_match,
},
- .probe = ipmi_of_probe,
- .remove = __devexit_p(ipmi_of_remove),
+ .probe = ipmi_probe,
+ .remove = __devexit_p(ipmi_remove),
};
-#endif /* CONFIG_PPC_OF */
static int wait_for_msg_done(struct smi_info *smi_info)
{
@@ -3340,8 +3333,7 @@ static int __devinit init_ipmi_si(void)
return 0;
initialized = 1;
- /* Register the device drivers. */
- rv = driver_register(&ipmi_driver.driver);
+ rv = platform_driver_register(&ipmi_driver);
if (rv) {
printk(KERN_ERR PFX "Unable to register driver: %d\n", rv);
return rv;
@@ -3365,15 +3357,9 @@ static int __devinit init_ipmi_si(void)
printk(KERN_INFO "IPMI System Interface driver.\n");
- hardcode_find_bmc();
-
/* If the user gave us a device, they presumably want us to use it */
- mutex_lock(&smi_infos_lock);
- if (!list_empty(&smi_infos)) {
- mutex_unlock(&smi_infos_lock);
+ if (!hardcode_find_bmc())
return 0;
- }
- mutex_unlock(&smi_infos_lock);
#ifdef CONFIG_PCI
rv = pci_register_driver(&ipmi_pci_driver);
@@ -3396,11 +3382,6 @@ static int __devinit init_ipmi_si(void)
spmi_find_bmc();
#endif
-#ifdef CONFIG_PPC_OF
- of_register_platform_driver(&ipmi_of_platform_driver);
- of_registered = 1;
-#endif
-
/* We prefer devices with interrupts, but in the case of a machine
with multiple BMCs we assume that there will be several instances
of a given type so if we succeed in registering a type then also
@@ -3548,17 +3529,12 @@ static void __exit cleanup_ipmi_si(void)
pnp_unregister_driver(&ipmi_pnp_driver);
#endif
-#ifdef CONFIG_PPC_OF
- if (of_registered)
- of_unregister_platform_driver(&ipmi_of_platform_driver);
-#endif
+ platform_driver_unregister(&ipmi_driver);
mutex_lock(&smi_infos_lock);
list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
cleanup_one_si(e);
mutex_unlock(&smi_infos_lock);
-
- driver_unregister(&ipmi_driver.driver);
}
module_exit(cleanup_ipmi_si);
diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
index e6d75627c6c8..33dc2298af73 100644
--- a/drivers/char/mmtimer.c
+++ b/drivers/char/mmtimer.c
@@ -53,6 +53,8 @@ MODULE_LICENSE("GPL");
#define RTC_BITS 55 /* 55 bits for this implementation */
+static struct k_clock sgi_clock;
+
extern unsigned long sn_rtc_cycles_per_second;
#define RTC_COUNTER_ADDR ((long *)LOCAL_MMR_ADDR(SH_RTC))
@@ -487,7 +489,7 @@ static int sgi_clock_get(clockid_t clockid, struct timespec *tp)
return 0;
};
-static int sgi_clock_set(clockid_t clockid, struct timespec *tp)
+static int sgi_clock_set(const clockid_t clockid, const struct timespec *tp)
{
u64 nsec;
@@ -763,15 +765,21 @@ static int sgi_timer_set(struct k_itimer *timr, int flags,
return err;
}
+static int sgi_clock_getres(const clockid_t which_clock, struct timespec *tp)
+{
+ tp->tv_sec = 0;
+ tp->tv_nsec = sgi_clock_period;
+ return 0;
+}
+
static struct k_clock sgi_clock = {
- .res = 0,
- .clock_set = sgi_clock_set,
- .clock_get = sgi_clock_get,
- .timer_create = sgi_timer_create,
- .nsleep = do_posix_clock_nonanosleep,
- .timer_set = sgi_timer_set,
- .timer_del = sgi_timer_del,
- .timer_get = sgi_timer_get
+ .clock_set = sgi_clock_set,
+ .clock_get = sgi_clock_get,
+ .clock_getres = sgi_clock_getres,
+ .timer_create = sgi_timer_create,
+ .timer_set = sgi_timer_set,
+ .timer_del = sgi_timer_del,
+ .timer_get = sgi_timer_get
};
/**
@@ -831,8 +839,8 @@ static int __init mmtimer_init(void)
(unsigned long) node);
}
- sgi_clock_period = sgi_clock.res = NSEC_PER_SEC / sn_rtc_cycles_per_second;
- register_posix_clock(CLOCK_SGI_CYCLE, &sgi_clock);
+ sgi_clock_period = NSEC_PER_SEC / sn_rtc_cycles_per_second;
+ posix_timers_register_clock(CLOCK_SGI_CYCLE, &sgi_clock);
printk(KERN_INFO "%s: v%s, %ld MHz\n", MMTIMER_DESC, MMTIMER_VERSION,
sn_rtc_cycles_per_second/(unsigned long)1E6);
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 777181a2e603..bcbbc71febb7 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -830,8 +830,7 @@ static void monitor_card(unsigned long p)
test_bit(IS_ANY_T1, &dev->flags))) {
DEBUGP(4, dev, "Perform AUTOPPS\n");
set_bit(IS_AUTOPPS_ACT, &dev->flags);
- ptsreq.protocol = ptsreq.protocol =
- (0x01 << dev->proto);
+ ptsreq.protocol = (0x01 << dev->proto);
ptsreq.flags = 0x01;
ptsreq.pts1 = 0x00;
ptsreq.pts2 = 0x00;
diff --git a/drivers/char/pcmcia/ipwireless/main.c b/drivers/char/pcmcia/ipwireless/main.c
index 94b8eb4d691d..444155a305ae 100644
--- a/drivers/char/pcmcia/ipwireless/main.c
+++ b/drivers/char/pcmcia/ipwireless/main.c
@@ -78,7 +78,6 @@ static void signalled_reboot_callback(void *callback_data)
static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
{
struct ipw_dev *ipw = priv_data;
- struct resource *io_resource;
int ret;
p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
@@ -92,9 +91,12 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
if (ret)
return ret;
- io_resource = request_region(p_dev->resource[0]->start,
- resource_size(p_dev->resource[0]),
- IPWIRELESS_PCCARD_NAME);
+ if (!request_region(p_dev->resource[0]->start,
+ resource_size(p_dev->resource[0]),
+ IPWIRELESS_PCCARD_NAME)) {
+ ret = -EBUSY;
+ goto exit;
+ }
p_dev->resource[2]->flags |=
WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE;
@@ -105,22 +107,25 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
ret = pcmcia_map_mem_page(p_dev, p_dev->resource[2], p_dev->card_addr);
if (ret != 0)
- goto exit2;
+ goto exit1;
ipw->is_v2_card = resource_size(p_dev->resource[2]) == 0x100;
- ipw->attr_memory = ioremap(p_dev->resource[2]->start,
+ ipw->common_memory = ioremap(p_dev->resource[2]->start,
resource_size(p_dev->resource[2]));
- request_mem_region(p_dev->resource[2]->start,
- resource_size(p_dev->resource[2]),
- IPWIRELESS_PCCARD_NAME);
+ if (!request_mem_region(p_dev->resource[2]->start,
+ resource_size(p_dev->resource[2]),
+ IPWIRELESS_PCCARD_NAME)) {
+ ret = -EBUSY;
+ goto exit2;
+ }
p_dev->resource[3]->flags |= WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM |
WIN_ENABLE;
p_dev->resource[3]->end = 0; /* this used to be 0x1000 */
ret = pcmcia_request_window(p_dev, p_dev->resource[3], 0);
if (ret != 0)
- goto exit2;
+ goto exit3;
ret = pcmcia_map_mem_page(p_dev, p_dev->resource[3], 0);
if (ret != 0)
@@ -128,23 +133,28 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
ipw->attr_memory = ioremap(p_dev->resource[3]->start,
resource_size(p_dev->resource[3]));
- request_mem_region(p_dev->resource[3]->start,
- resource_size(p_dev->resource[3]),
- IPWIRELESS_PCCARD_NAME);
+ if (!request_mem_region(p_dev->resource[3]->start,
+ resource_size(p_dev->resource[3]),
+ IPWIRELESS_PCCARD_NAME)) {
+ ret = -EBUSY;
+ goto exit4;
+ }
return 0;
+exit4:
+ iounmap(ipw->attr_memory);
exit3:
+ release_mem_region(p_dev->resource[2]->start,
+ resource_size(p_dev->resource[2]));
exit2:
- if (ipw->common_memory) {
- release_mem_region(p_dev->resource[2]->start,
- resource_size(p_dev->resource[2]));
- iounmap(ipw->common_memory);
- }
+ iounmap(ipw->common_memory);
exit1:
- release_resource(io_resource);
+ release_region(p_dev->resource[0]->start,
+ resource_size(p_dev->resource[0]));
+exit:
pcmcia_disable_device(p_dev);
- return -1;
+ return ret;
}
static int config_ipwireless(struct ipw_dev *ipw)
@@ -219,6 +229,8 @@ exit:
static void release_ipwireless(struct ipw_dev *ipw)
{
+ release_region(ipw->link->resource[0]->start,
+ resource_size(ipw->link->resource[0]));
if (ipw->common_memory) {
release_mem_region(ipw->link->resource[2]->start,
resource_size(ipw->link->resource[2]));
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 72a4fcb17745..5e29e8031bbc 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -128,6 +128,7 @@
* void add_input_randomness(unsigned int type, unsigned int code,
* unsigned int value);
* void add_interrupt_randomness(int irq);
+ * void add_disk_randomness(struct gendisk *disk);
*
* add_input_randomness() uses the input layer interrupt timing, as well as
* the event type information from the hardware.
@@ -136,9 +137,15 @@
* inputs to the entropy pool. Note that not all interrupts are good
* sources of randomness! For example, the timer interrupts is not a
* good choice, because the periodicity of the interrupts is too
- * regular, and hence predictable to an attacker. Disk interrupts are
- * a better measure, since the timing of the disk interrupts are more
- * unpredictable.
+ * regular, and hence predictable to an attacker. Network Interface
+ * Controller interrupts are a better measure, since the timing of the
+ * NIC interrupts are more unpredictable.
+ *
+ * add_disk_randomness() uses what amounts to the seek time of block
+ * layer request events, on a per-disk_devt basis, as input to the
+ * entropy pool. Note that high-speed solid state drives with very low
+ * seek times do not make for good sources of entropy, as their seek
+ * times are usually fairly consistent.
*
* All of these routines try to estimate how many bits of randomness a
* particular randomness source. They do this by keeping track of the
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 36e0fa161c2b..1f46f1cd9225 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -364,14 +364,12 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
tpm_protected_ordinal_duration[ordinal &
TPM_PROTECTED_ORDINAL_MASK];
- if (duration_idx != TPM_UNDEFINED) {
+ if (duration_idx != TPM_UNDEFINED)
duration = chip->vendor.duration[duration_idx];
- /* if duration is 0, it's because chip->vendor.duration wasn't */
- /* filled yet, so we set the lowest timeout just to give enough */
- /* time for tpm_get_timeouts() to succeed */
- return (duration <= 0 ? HZ : duration);
- } else
+ if (duration <= 0)
return 2 * 60 * HZ;
+ else
+ return duration;
}
EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index 9f2272e6de1c..d3c9d755ed98 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -714,20 +714,29 @@ static int __devexit hwicap_remove(struct device *dev)
return 0; /* success */
}
-static int __devinit hwicap_drv_probe(struct platform_device *pdev)
+#ifdef CONFIG_OF
+static int __devinit hwicap_of_probe(struct platform_device *op)
{
- struct resource *res;
- const struct config_registers *regs;
+ struct resource res;
+ const unsigned int *id;
const char *family;
+ int rc;
+ const struct hwicap_driver_config *config = op->dev.of_match->data;
+ const struct config_registers *regs;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
+
+ rc = of_address_to_resource(op->dev.of_node, 0, &res);
+ if (rc) {
+ dev_err(&op->dev, "invalid address\n");
+ return rc;
+ }
+
+ id = of_get_property(op->dev.of_node, "port-number", NULL);
/* It's most likely that we're using V4, if the family is not
specified */
regs = &v4_config_registers;
- family = pdev->dev.platform_data;
+ family = of_get_property(op->dev.of_node, "xlnx,family", NULL);
if (family) {
if (!strcmp(family, "virtex2p")) {
@@ -738,54 +747,33 @@ static int __devinit hwicap_drv_probe(struct platform_device *pdev)
regs = &v5_config_registers;
}
}
-
- return hwicap_setup(&pdev->dev, pdev->id, res,
- &buffer_icap_config, regs);
+ return hwicap_setup(&op->dev, id ? *id : -1, &res, config,
+ regs);
}
-
-static int __devexit hwicap_drv_remove(struct platform_device *pdev)
+#else
+static inline int hwicap_of_probe(struct platform_device *op)
{
- return hwicap_remove(&pdev->dev);
+ return -EINVAL;
}
+#endif /* CONFIG_OF */
-static struct platform_driver hwicap_platform_driver = {
- .probe = hwicap_drv_probe,
- .remove = hwicap_drv_remove,
- .driver = {
- .owner = THIS_MODULE,
- .name = DRIVER_NAME,
- },
-};
-
-/* ---------------------------------------------------------------------
- * OF bus binding
- */
-
-#if defined(CONFIG_OF)
-static int __devinit
-hwicap_of_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit hwicap_drv_probe(struct platform_device *pdev)
{
- struct resource res;
- const unsigned int *id;
- const char *family;
- int rc;
- const struct hwicap_driver_config *config = match->data;
+ struct resource *res;
const struct config_registers *regs;
+ const char *family;
- dev_dbg(&op->dev, "hwicap_of_probe(%p, %p)\n", op, match);
-
- rc = of_address_to_resource(op->dev.of_node, 0, &res);
- if (rc) {
- dev_err(&op->dev, "invalid address\n");
- return rc;
- }
+ if (pdev->dev.of_match)
+ return hwicap_of_probe(pdev);
- id = of_get_property(op->dev.of_node, "port-number", NULL);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
/* It's most likely that we're using V4, if the family is not
specified */
regs = &v4_config_registers;
- family = of_get_property(op->dev.of_node, "xlnx,family", NULL);
+ family = pdev->dev.platform_data;
if (family) {
if (!strcmp(family, "virtex2p")) {
@@ -796,50 +784,38 @@ hwicap_of_probe(struct platform_device *op, const struct of_device_id *match)
regs = &v5_config_registers;
}
}
- return hwicap_setup(&op->dev, id ? *id : -1, &res, config,
- regs);
+
+ return hwicap_setup(&pdev->dev, pdev->id, res,
+ &buffer_icap_config, regs);
}
-static int __devexit hwicap_of_remove(struct platform_device *op)
+static int __devexit hwicap_drv_remove(struct platform_device *pdev)
{
- return hwicap_remove(&op->dev);
+ return hwicap_remove(&pdev->dev);
}
-/* Match table for of_platform binding */
+#ifdef CONFIG_OF
+/* Match table for device tree binding */
static const struct of_device_id __devinitconst hwicap_of_match[] = {
{ .compatible = "xlnx,opb-hwicap-1.00.b", .data = &buffer_icap_config},
{ .compatible = "xlnx,xps-hwicap-1.00.a", .data = &fifo_icap_config},
{},
};
MODULE_DEVICE_TABLE(of, hwicap_of_match);
+#else
+#define hwicap_of_match NULL
+#endif
-static struct of_platform_driver hwicap_of_driver = {
- .probe = hwicap_of_probe,
- .remove = __devexit_p(hwicap_of_remove),
+static struct platform_driver hwicap_platform_driver = {
+ .probe = hwicap_drv_probe,
+ .remove = hwicap_drv_remove,
.driver = {
- .name = DRIVER_NAME,
.owner = THIS_MODULE,
+ .name = DRIVER_NAME,
.of_match_table = hwicap_of_match,
},
};
-/* Registration helpers to keep the number of #ifdefs to a minimum */
-static inline int __init hwicap_of_register(void)
-{
- pr_debug("hwicap: calling of_register_platform_driver()\n");
- return of_register_platform_driver(&hwicap_of_driver);
-}
-
-static inline void __exit hwicap_of_unregister(void)
-{
- of_unregister_platform_driver(&hwicap_of_driver);
-}
-#else /* CONFIG_OF */
-/* CONFIG_OF not enabled; do nothing helpers */
-static inline int __init hwicap_of_register(void) { return 0; }
-static inline void __exit hwicap_of_unregister(void) { }
-#endif /* CONFIG_OF */
-
static int __init hwicap_module_init(void)
{
dev_t devt;
@@ -856,21 +832,12 @@ static int __init hwicap_module_init(void)
return retval;
retval = platform_driver_register(&hwicap_platform_driver);
-
- if (retval)
- goto failed1;
-
- retval = hwicap_of_register();
-
if (retval)
- goto failed2;
+ goto failed;
return retval;
- failed2:
- platform_driver_unregister(&hwicap_platform_driver);
-
- failed1:
+ failed:
unregister_chrdev_region(devt, HWICAP_DEVICES);
return retval;
@@ -884,8 +851,6 @@ static void __exit hwicap_module_cleanup(void)
platform_driver_unregister(&hwicap_platform_driver);
- hwicap_of_unregister();
-
unregister_chrdev_region(devt, HWICAP_DEVICES);
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 1109f6848a43..5cb4d09919d6 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1919,8 +1919,10 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
ret = sysdev_driver_register(&cpu_sysdev_class,
&cpufreq_sysdev_driver);
+ if (ret)
+ goto err_null_driver;
- if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) {
+ if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
int i;
ret = -ENODEV;
@@ -1935,21 +1937,22 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
if (ret) {
dprintk("no CPU initialized for driver %s\n",
driver_data->name);
- sysdev_driver_unregister(&cpu_sysdev_class,
- &cpufreq_sysdev_driver);
-
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
- cpufreq_driver = NULL;
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ goto err_sysdev_unreg;
}
}
- if (!ret) {
- register_hotcpu_notifier(&cpufreq_cpu_notifier);
- dprintk("driver %s up and running\n", driver_data->name);
- cpufreq_debug_enable_ratelimit();
- }
+ register_hotcpu_notifier(&cpufreq_cpu_notifier);
+ dprintk("driver %s up and running\n", driver_data->name);
+ cpufreq_debug_enable_ratelimit();
+ return 0;
+err_sysdev_unreg:
+ sysdev_driver_unregister(&cpu_sysdev_class,
+ &cpufreq_sysdev_driver);
+err_null_driver:
+ spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ cpufreq_driver = NULL;
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_register_driver);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 526bfbf69611..4531c46028cd 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -81,8 +81,6 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
*/
static DEFINE_MUTEX(dbs_mutex);
-static struct workqueue_struct *kconservative_wq;
-
static struct dbs_tuners {
unsigned int sampling_rate;
unsigned int sampling_down_factor;
@@ -118,7 +116,7 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
if (wall)
*wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
- return (cputime64_t)jiffies_to_usecs(idle_time);;
+ return (cputime64_t)jiffies_to_usecs(idle_time);
}
static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
@@ -560,7 +558,7 @@ static void do_dbs_timer(struct work_struct *work)
dbs_check_cpu(dbs_info);
- queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay);
+ schedule_delayed_work_on(cpu, &dbs_info->work, delay);
mutex_unlock(&dbs_info->timer_mutex);
}
@@ -572,8 +570,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
dbs_info->enable = 1;
INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
- queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work,
- delay);
+ schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
}
static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
@@ -716,25 +713,12 @@ struct cpufreq_governor cpufreq_gov_conservative = {
static int __init cpufreq_gov_dbs_init(void)
{
- int err;
-
- kconservative_wq = create_workqueue("kconservative");
- if (!kconservative_wq) {
- printk(KERN_ERR "Creation of kconservative failed\n");
- return -EFAULT;
- }
-
- err = cpufreq_register_governor(&cpufreq_gov_conservative);
- if (err)
- destroy_workqueue(kconservative_wq);
-
- return err;
+ return cpufreq_register_governor(&cpufreq_gov_conservative);
}
static void __exit cpufreq_gov_dbs_exit(void)
{
cpufreq_unregister_governor(&cpufreq_gov_conservative);
- destroy_workqueue(kconservative_wq);
}
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index c631f27a3dcc..059179db1985 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -104,8 +104,6 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
*/
static DEFINE_MUTEX(dbs_mutex);
-static struct workqueue_struct *kondemand_wq;
-
static struct dbs_tuners {
unsigned int sampling_rate;
unsigned int up_threshold;
@@ -644,12 +642,7 @@ static void do_dbs_timer(struct work_struct *work)
unsigned int cpu = dbs_info->cpu;
int sample_type = dbs_info->sample_type;
- /* We want all CPUs to do sampling nearly on same jiffy */
- int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
- * dbs_info->rate_mult);
-
- if (num_online_cpus() > 1)
- delay -= jiffies % delay;
+ int delay;
mutex_lock(&dbs_info->timer_mutex);
@@ -662,12 +655,22 @@ static void do_dbs_timer(struct work_struct *work)
/* Setup timer for SUB_SAMPLE */
dbs_info->sample_type = DBS_SUB_SAMPLE;
delay = dbs_info->freq_hi_jiffies;
+ } else {
+ /* We want all CPUs to do sampling nearly on
+ * same jiffy
+ */
+ delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
+ * dbs_info->rate_mult);
+
+ if (num_online_cpus() > 1)
+ delay -= jiffies % delay;
}
} else {
__cpufreq_driver_target(dbs_info->cur_policy,
dbs_info->freq_lo, CPUFREQ_RELATION_H);
+ delay = dbs_info->freq_lo_jiffies;
}
- queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
+ schedule_delayed_work_on(cpu, &dbs_info->work, delay);
mutex_unlock(&dbs_info->timer_mutex);
}
@@ -681,8 +684,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
dbs_info->sample_type = DBS_NORMAL_SAMPLE;
INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
- queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work,
- delay);
+ schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
}
static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
@@ -814,7 +816,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
static int __init cpufreq_gov_dbs_init(void)
{
- int err;
cputime64_t wall;
u64 idle_time;
int cpu = get_cpu();
@@ -838,22 +839,12 @@ static int __init cpufreq_gov_dbs_init(void)
MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
}
- kondemand_wq = create_workqueue("kondemand");
- if (!kondemand_wq) {
- printk(KERN_ERR "Creation of kondemand failed\n");
- return -EFAULT;
- }
- err = cpufreq_register_governor(&cpufreq_gov_ondemand);
- if (err)
- destroy_workqueue(kondemand_wq);
-
- return err;
+ return cpufreq_register_governor(&cpufreq_gov_ondemand);
}
static void __exit cpufreq_gov_dbs_exit(void)
{
cpufreq_unregister_governor(&cpufreq_gov_ondemand);
- destroy_workqueue(kondemand_wq);
}
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 0310ffaec9df..be7917ec40c9 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -300,7 +300,7 @@ static struct kobj_type ktype_state_cpuidle = {
.release = cpuidle_state_sysfs_release,
};
-static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
+static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
{
kobject_put(&device->kobjs[i]->kobj);
wait_for_completion(&device->kobjs[i]->kobj_unregister);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index eab2cf7a0269..e54185223c8c 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -252,4 +252,21 @@ config CRYPTO_DEV_OMAP_AES
OMAP processors have AES module accelerator. Select this if you
want to use the OMAP module for AES algorithms.
+config CRYPTO_DEV_PICOXCELL
+ tristate "Support for picoXcell IPSEC and Layer2 crypto engines"
+ depends on ARCH_PICOXCELL
+ select CRYPTO_AES
+ select CRYPTO_AUTHENC
+ select CRYPTO_ALGAPI
+ select CRYPTO_DES
+ select CRYPTO_CBC
+ select CRYPTO_ECB
+ select CRYPTO_SEQIV
+ help
+ This option enables support for the hardware offload engines in the
+ Picochip picoXcell SoC devices. Select this for IPSEC ESP offload
+ and for 3gpp Layer 2 ciphering support.
+
+ Saying m here will build a module named pipcoxcell_crypto.
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 256697330a41..5203e34248d7 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -10,4 +10,4 @@ obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
-
+obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 2b1baee525bc..18912521a7a5 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1150,8 +1150,7 @@ struct crypto4xx_alg_common crypto4xx_alg[] = {
/**
* Module Initialization Routine
*/
-static int __init crypto4xx_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __init crypto4xx_probe(struct platform_device *ofdev)
{
int rc;
struct resource res;
@@ -1280,7 +1279,7 @@ static const struct of_device_id crypto4xx_match[] = {
{ },
};
-static struct of_platform_driver crypto4xx_driver = {
+static struct platform_driver crypto4xx_driver = {
.driver = {
.name = "crypto4xx",
.owner = THIS_MODULE,
@@ -1292,12 +1291,12 @@ static struct of_platform_driver crypto4xx_driver = {
static int __init crypto4xx_init(void)
{
- return of_register_platform_driver(&crypto4xx_driver);
+ return platform_driver_register(&crypto4xx_driver);
}
static void __exit crypto4xx_exit(void)
{
- of_unregister_platform_driver(&crypto4xx_driver);
+ platform_driver_unregister(&crypto4xx_driver);
}
module_init(crypto4xx_init);
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 80dc094e78c6..2e5b2044c96f 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -2004,8 +2004,7 @@ static void __devinit n2_spu_driver_version(void)
pr_info("%s", version);
}
-static int __devinit n2_crypto_probe(struct platform_device *dev,
- const struct of_device_id *match)
+static int __devinit n2_crypto_probe(struct platform_device *dev)
{
struct mdesc_handle *mdesc;
const char *full_name;
@@ -2116,8 +2115,7 @@ static void free_ncp(struct n2_mau *mp)
kfree(mp);
}
-static int __devinit n2_mau_probe(struct platform_device *dev,
- const struct of_device_id *match)
+static int __devinit n2_mau_probe(struct platform_device *dev)
{
struct mdesc_handle *mdesc;
const char *full_name;
@@ -2211,7 +2209,7 @@ static struct of_device_id n2_crypto_match[] = {
MODULE_DEVICE_TABLE(of, n2_crypto_match);
-static struct of_platform_driver n2_crypto_driver = {
+static struct platform_driver n2_crypto_driver = {
.driver = {
.name = "n2cp",
.owner = THIS_MODULE,
@@ -2235,7 +2233,7 @@ static struct of_device_id n2_mau_match[] = {
MODULE_DEVICE_TABLE(of, n2_mau_match);
-static struct of_platform_driver n2_mau_driver = {
+static struct platform_driver n2_mau_driver = {
.driver = {
.name = "ncp",
.owner = THIS_MODULE,
@@ -2247,20 +2245,20 @@ static struct of_platform_driver n2_mau_driver = {
static int __init n2_init(void)
{
- int err = of_register_platform_driver(&n2_crypto_driver);
+ int err = platform_driver_register(&n2_crypto_driver);
if (!err) {
- err = of_register_platform_driver(&n2_mau_driver);
+ err = platform_driver_register(&n2_mau_driver);
if (err)
- of_unregister_platform_driver(&n2_crypto_driver);
+ platform_driver_unregister(&n2_crypto_driver);
}
return err;
}
static void __exit n2_exit(void)
{
- of_unregister_platform_driver(&n2_mau_driver);
- of_unregister_platform_driver(&n2_crypto_driver);
+ platform_driver_unregister(&n2_mau_driver);
+ platform_driver_unregister(&n2_crypto_driver);
}
module_init(n2_init);
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index add2a1a72ba4..5b970d9e9956 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -839,9 +839,9 @@ static int omap_aes_probe(struct platform_device *pdev)
/* Initializing the clock */
dd->iclk = clk_get(dev, "ick");
- if (!dd->iclk) {
+ if (IS_ERR(dd->iclk)) {
dev_err(dev, "clock intialization failed.\n");
- err = -ENODEV;
+ err = PTR_ERR(dd->iclk);
goto err_res;
}
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 2e71123516e0..465cde3e4f60 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -1206,9 +1206,9 @@ static int __devinit omap_sham_probe(struct platform_device *pdev)
/* Initializing the clock */
dd->iclk = clk_get(dev, "ick");
- if (!dd->iclk) {
+ if (IS_ERR(dd->iclk)) {
dev_err(dev, "clock intialization failed.\n");
- err = -ENODEV;
+ err = PTR_ERR(dd->iclk);
goto clk_err;
}
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
new file mode 100644
index 000000000000..b092d0a65837
--- /dev/null
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -0,0 +1,1867 @@
+/*
+ * Copyright (c) 2010-2011 Picochip Ltd., Jamie Iles
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/authenc.h>
+#include <crypto/des.h>
+#include <crypto/md5.h>
+#include <crypto/sha.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/rtnetlink.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+
+#include "picoxcell_crypto_regs.h"
+
+/*
+ * The threshold for the number of entries in the CMD FIFO available before
+ * the CMD0_CNT interrupt is raised. Increasing this value will reduce the
+ * number of interrupts raised to the CPU.
+ */
+#define CMD0_IRQ_THRESHOLD 1
+
+/*
+ * The timeout period (in jiffies) for a PDU. When the the number of PDUs in
+ * flight is greater than the STAT_IRQ_THRESHOLD or 0 the timer is disabled.
+ * When there are packets in flight but lower than the threshold, we enable
+ * the timer and at expiry, attempt to remove any processed packets from the
+ * queue and if there are still packets left, schedule the timer again.
+ */
+#define PACKET_TIMEOUT 1
+
+/* The priority to register each algorithm with. */
+#define SPACC_CRYPTO_ALG_PRIORITY 10000
+
+#define SPACC_CRYPTO_KASUMI_F8_KEY_LEN 16
+#define SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ 64
+#define SPACC_CRYPTO_IPSEC_HASH_PG_SZ 64
+#define SPACC_CRYPTO_IPSEC_MAX_CTXS 32
+#define SPACC_CRYPTO_IPSEC_FIFO_SZ 32
+#define SPACC_CRYPTO_L2_CIPHER_PG_SZ 64
+#define SPACC_CRYPTO_L2_HASH_PG_SZ 64
+#define SPACC_CRYPTO_L2_MAX_CTXS 128
+#define SPACC_CRYPTO_L2_FIFO_SZ 128
+
+#define MAX_DDT_LEN 16
+
+/* DDT format. This must match the hardware DDT format exactly. */
+struct spacc_ddt {
+ dma_addr_t p;
+ u32 len;
+};
+
+/*
+ * Asynchronous crypto request structure.
+ *
+ * This structure defines a request that is either queued for processing or
+ * being processed.
+ */
+struct spacc_req {
+ struct list_head list;
+ struct spacc_engine *engine;
+ struct crypto_async_request *req;
+ int result;
+ bool is_encrypt;
+ unsigned ctx_id;
+ dma_addr_t src_addr, dst_addr;
+ struct spacc_ddt *src_ddt, *dst_ddt;
+ void (*complete)(struct spacc_req *req);
+
+ /* AEAD specific bits. */
+ u8 *giv;
+ size_t giv_len;
+ dma_addr_t giv_pa;
+};
+
+struct spacc_engine {
+ void __iomem *regs;
+ struct list_head pending;
+ int next_ctx;
+ spinlock_t hw_lock;
+ int in_flight;
+ struct list_head completed;
+ struct list_head in_progress;
+ struct tasklet_struct complete;
+ unsigned long fifo_sz;
+ void __iomem *cipher_ctx_base;
+ void __iomem *hash_key_base;
+ struct spacc_alg *algs;
+ unsigned num_algs;
+ struct list_head registered_algs;
+ size_t cipher_pg_sz;
+ size_t hash_pg_sz;
+ const char *name;
+ struct clk *clk;
+ struct device *dev;
+ unsigned max_ctxs;
+ struct timer_list packet_timeout;
+ unsigned stat_irq_thresh;
+ struct dma_pool *req_pool;
+};
+
+/* Algorithm type mask. */
+#define SPACC_CRYPTO_ALG_MASK 0x7
+
+/* SPACC definition of a crypto algorithm. */
+struct spacc_alg {
+ unsigned long ctrl_default;
+ unsigned long type;
+ struct crypto_alg alg;
+ struct spacc_engine *engine;
+ struct list_head entry;
+ int key_offs;
+ int iv_offs;
+};
+
+/* Generic context structure for any algorithm type. */
+struct spacc_generic_ctx {
+ struct spacc_engine *engine;
+ int flags;
+ int key_offs;
+ int iv_offs;
+};
+
+/* Block cipher context. */
+struct spacc_ablk_ctx {
+ struct spacc_generic_ctx generic;
+ u8 key[AES_MAX_KEY_SIZE];
+ u8 key_len;
+ /*
+ * The fallback cipher. If the operation can't be done in hardware,
+ * fallback to a software version.
+ */
+ struct crypto_ablkcipher *sw_cipher;
+};
+
+/* AEAD cipher context. */
+struct spacc_aead_ctx {
+ struct spacc_generic_ctx generic;
+ u8 cipher_key[AES_MAX_KEY_SIZE];
+ u8 hash_ctx[SPACC_CRYPTO_IPSEC_HASH_PG_SZ];
+ u8 cipher_key_len;
+ u8 hash_key_len;
+ struct crypto_aead *sw_cipher;
+ size_t auth_size;
+ u8 salt[AES_BLOCK_SIZE];
+};
+
+static inline struct spacc_alg *to_spacc_alg(struct crypto_alg *alg)
+{
+ return alg ? container_of(alg, struct spacc_alg, alg) : NULL;
+}
+
+static inline int spacc_fifo_cmd_full(struct spacc_engine *engine)
+{
+ u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET);
+
+ return fifo_stat & SPA_FIFO_CMD_FULL;
+}
+
+/*
+ * Given a cipher context, and a context number, get the base address of the
+ * context page.
+ *
+ * Returns the address of the context page where the key/context may
+ * be written.
+ */
+static inline void __iomem *spacc_ctx_page_addr(struct spacc_generic_ctx *ctx,
+ unsigned indx,
+ bool is_cipher_ctx)
+{
+ return is_cipher_ctx ? ctx->engine->cipher_ctx_base +
+ (indx * ctx->engine->cipher_pg_sz) :
+ ctx->engine->hash_key_base + (indx * ctx->engine->hash_pg_sz);
+}
+
+/* The context pages can only be written with 32-bit accesses. */
+static inline void memcpy_toio32(u32 __iomem *dst, const void *src,
+ unsigned count)
+{
+ const u32 *src32 = (const u32 *) src;
+
+ while (count--)
+ writel(*src32++, dst++);
+}
+
+static void spacc_cipher_write_ctx(struct spacc_generic_ctx *ctx,
+ void __iomem *page_addr, const u8 *key,
+ size_t key_len, const u8 *iv, size_t iv_len)
+{
+ void __iomem *key_ptr = page_addr + ctx->key_offs;
+ void __iomem *iv_ptr = page_addr + ctx->iv_offs;
+
+ memcpy_toio32(key_ptr, key, key_len / 4);
+ memcpy_toio32(iv_ptr, iv, iv_len / 4);
+}
+
+/*
+ * Load a context into the engines context memory.
+ *
+ * Returns the index of the context page where the context was loaded.
+ */
+static unsigned spacc_load_ctx(struct spacc_generic_ctx *ctx,
+ const u8 *ciph_key, size_t ciph_len,
+ const u8 *iv, size_t ivlen, const u8 *hash_key,
+ size_t hash_len)
+{
+ unsigned indx = ctx->engine->next_ctx++;
+ void __iomem *ciph_page_addr, *hash_page_addr;
+
+ ciph_page_addr = spacc_ctx_page_addr(ctx, indx, 1);
+ hash_page_addr = spacc_ctx_page_addr(ctx, indx, 0);
+
+ ctx->engine->next_ctx &= ctx->engine->fifo_sz - 1;
+ spacc_cipher_write_ctx(ctx, ciph_page_addr, ciph_key, ciph_len, iv,
+ ivlen);
+ writel(ciph_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET) |
+ (1 << SPA_KEY_SZ_CIPHER_OFFSET),
+ ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET);
+
+ if (hash_key) {
+ memcpy_toio32(hash_page_addr, hash_key, hash_len / 4);
+ writel(hash_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET),
+ ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET);
+ }
+
+ return indx;
+}
+
+/* Count the number of scatterlist entries in a scatterlist. */
+static int sg_count(struct scatterlist *sg_list, int nbytes)
+{
+ struct scatterlist *sg = sg_list;
+ int sg_nents = 0;
+
+ while (nbytes > 0) {
+ ++sg_nents;
+ nbytes -= sg->length;
+ sg = sg_next(sg);
+ }
+
+ return sg_nents;
+}
+
+static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len)
+{
+ ddt->p = phys;
+ ddt->len = len;
+}
+
+/*
+ * Take a crypto request and scatterlists for the data and turn them into DDTs
+ * for passing to the crypto engines. This also DMA maps the data so that the
+ * crypto engines can DMA to/from them.
+ */
+static struct spacc_ddt *spacc_sg_to_ddt(struct spacc_engine *engine,
+ struct scatterlist *payload,
+ unsigned nbytes,
+ enum dma_data_direction dir,
+ dma_addr_t *ddt_phys)
+{
+ unsigned nents, mapped_ents;
+ struct scatterlist *cur;
+ struct spacc_ddt *ddt;
+ int i;
+
+ nents = sg_count(payload, nbytes);
+ mapped_ents = dma_map_sg(engine->dev, payload, nents, dir);
+
+ if (mapped_ents + 1 > MAX_DDT_LEN)
+ goto out;
+
+ ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, ddt_phys);
+ if (!ddt)
+ goto out;
+
+ for_each_sg(payload, cur, mapped_ents, i)
+ ddt_set(&ddt[i], sg_dma_address(cur), sg_dma_len(cur));
+ ddt_set(&ddt[mapped_ents], 0, 0);
+
+ return ddt;
+
+out:
+ dma_unmap_sg(engine->dev, payload, nents, dir);
+ return NULL;
+}
+
+static int spacc_aead_make_ddts(struct spacc_req *req, u8 *giv)
+{
+ struct aead_request *areq = container_of(req->req, struct aead_request,
+ base);
+ struct spacc_engine *engine = req->engine;
+ struct spacc_ddt *src_ddt, *dst_ddt;
+ unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(areq));
+ unsigned nents = sg_count(areq->src, areq->cryptlen);
+ dma_addr_t iv_addr;
+ struct scatterlist *cur;
+ int i, dst_ents, src_ents, assoc_ents;
+ u8 *iv = giv ? giv : areq->iv;
+
+ src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr);
+ if (!src_ddt)
+ return -ENOMEM;
+
+ dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr);
+ if (!dst_ddt) {
+ dma_pool_free(engine->req_pool, src_ddt, req->src_addr);
+ return -ENOMEM;
+ }
+
+ req->src_ddt = src_ddt;
+ req->dst_ddt = dst_ddt;
+
+ assoc_ents = dma_map_sg(engine->dev, areq->assoc,
+ sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE);
+ if (areq->src != areq->dst) {
+ src_ents = dma_map_sg(engine->dev, areq->src, nents,
+ DMA_TO_DEVICE);
+ dst_ents = dma_map_sg(engine->dev, areq->dst, nents,
+ DMA_FROM_DEVICE);
+ } else {
+ src_ents = dma_map_sg(engine->dev, areq->src, nents,
+ DMA_BIDIRECTIONAL);
+ dst_ents = 0;
+ }
+
+ /*
+ * Map the IV/GIV. For the GIV it needs to be bidirectional as it is
+ * formed by the crypto block and sent as the ESP IV for IPSEC.
+ */
+ iv_addr = dma_map_single(engine->dev, iv, ivsize,
+ giv ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+ req->giv_pa = iv_addr;
+
+ /*
+ * Map the associated data. For decryption we don't copy the
+ * associated data.
+ */
+ for_each_sg(areq->assoc, cur, assoc_ents, i) {
+ ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));
+ if (req->is_encrypt)
+ ddt_set(dst_ddt++, sg_dma_address(cur),
+ sg_dma_len(cur));
+ }
+ ddt_set(src_ddt++, iv_addr, ivsize);
+
+ if (giv || req->is_encrypt)
+ ddt_set(dst_ddt++, iv_addr, ivsize);
+
+ /*
+ * Now map in the payload for the source and destination and terminate
+ * with the NULL pointers.
+ */
+ for_each_sg(areq->src, cur, src_ents, i) {
+ ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));
+ if (areq->src == areq->dst)
+ ddt_set(dst_ddt++, sg_dma_address(cur),
+ sg_dma_len(cur));
+ }
+
+ for_each_sg(areq->dst, cur, dst_ents, i)
+ ddt_set(dst_ddt++, sg_dma_address(cur),
+ sg_dma_len(cur));
+
+ ddt_set(src_ddt, 0, 0);
+ ddt_set(dst_ddt, 0, 0);
+
+ return 0;
+}
+
+static void spacc_aead_free_ddts(struct spacc_req *req)
+{
+ struct aead_request *areq = container_of(req->req, struct aead_request,
+ base);
+ struct spacc_alg *alg = to_spacc_alg(req->req->tfm->__crt_alg);
+ struct spacc_ablk_ctx *aead_ctx = crypto_tfm_ctx(req->req->tfm);
+ struct spacc_engine *engine = aead_ctx->generic.engine;
+ unsigned ivsize = alg->alg.cra_aead.ivsize;
+ unsigned nents = sg_count(areq->src, areq->cryptlen);
+
+ if (areq->src != areq->dst) {
+ dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE);
+ dma_unmap_sg(engine->dev, areq->dst,
+ sg_count(areq->dst, areq->cryptlen),
+ DMA_FROM_DEVICE);
+ } else
+ dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL);
+
+ dma_unmap_sg(engine->dev, areq->assoc,
+ sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE);
+
+ dma_unmap_single(engine->dev, req->giv_pa, ivsize, DMA_BIDIRECTIONAL);
+
+ dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr);
+ dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr);
+}
+
+static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt,
+ dma_addr_t ddt_addr, struct scatterlist *payload,
+ unsigned nbytes, enum dma_data_direction dir)
+{
+ unsigned nents = sg_count(payload, nbytes);
+
+ dma_unmap_sg(req->engine->dev, payload, nents, dir);
+ dma_pool_free(req->engine->req_pool, ddt, ddt_addr);
+}
+
+/*
+ * Set key for a DES operation in an AEAD cipher. This also performs weak key
+ * checking if required.
+ */
+static int spacc_aead_des_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 tmp[DES_EXPKEY_WORDS];
+
+ if (unlikely(!des_ekey(tmp, key)) &&
+ (crypto_aead_get_flags(aead)) & CRYPTO_TFM_REQ_WEAK_KEY) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ return -EINVAL;
+ }
+
+ memcpy(ctx->cipher_key, key, len);
+ ctx->cipher_key_len = len;
+
+ return 0;
+}
+
+/* Set the key for the AES block cipher component of the AEAD transform. */
+static int spacc_aead_aes_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ /*
+ * IPSec engine only supports 128 and 256 bit AES keys. If we get a
+ * request for any other size (192 bits) then we need to do a software
+ * fallback.
+ */
+ if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) {
+ /*
+ * Set the fallback transform to use the same request flags as
+ * the hardware transform.
+ */
+ ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+ ctx->sw_cipher->base.crt_flags |=
+ tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
+ return crypto_aead_setkey(ctx->sw_cipher, key, len);
+ }
+
+ memcpy(ctx->cipher_key, key, len);
+ ctx->cipher_key_len = len;
+
+ return 0;
+}
+
+static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);
+ struct rtattr *rta = (void *)key;
+ struct crypto_authenc_key_param *param;
+ unsigned int authkeylen, enckeylen;
+ int err = -EINVAL;
+
+ if (!RTA_OK(rta, keylen))
+ goto badkey;
+
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ goto badkey;
+
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ goto badkey;
+
+ param = RTA_DATA(rta);
+ enckeylen = be32_to_cpu(param->enckeylen);
+
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+
+ if (keylen < enckeylen)
+ goto badkey;
+
+ authkeylen = keylen - enckeylen;
+
+ if (enckeylen > AES_MAX_KEY_SIZE)
+ goto badkey;
+
+ if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
+ SPA_CTRL_CIPH_ALG_AES)
+ err = spacc_aead_aes_setkey(tfm, key + authkeylen, enckeylen);
+ else
+ err = spacc_aead_des_setkey(tfm, key + authkeylen, enckeylen);
+
+ if (err)
+ goto badkey;
+
+ memcpy(ctx->hash_ctx, key, authkeylen);
+ ctx->hash_key_len = authkeylen;
+
+ return 0;
+
+badkey:
+ crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+}
+
+static int spacc_aead_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct spacc_aead_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
+
+ ctx->auth_size = authsize;
+
+ return 0;
+}
+
+/*
+ * Check if an AEAD request requires a fallback operation. Some requests can't
+ * be completed in hardware because the hardware may not support certain key
+ * sizes. In these cases we need to complete the request in software.
+ */
+static int spacc_aead_need_fallback(struct spacc_req *req)
+{
+ struct aead_request *aead_req;
+ struct crypto_tfm *tfm = req->req->tfm;
+ struct crypto_alg *alg = req->req->tfm->__crt_alg;
+ struct spacc_alg *spacc_alg = to_spacc_alg(alg);
+ struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ aead_req = container_of(req->req, struct aead_request, base);
+ /*
+ * If we have a non-supported key-length, then we need to do a
+ * software fallback.
+ */
+ if ((spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
+ SPA_CTRL_CIPH_ALG_AES &&
+ ctx->cipher_key_len != AES_KEYSIZE_128 &&
+ ctx->cipher_key_len != AES_KEYSIZE_256)
+ return 1;
+
+ return 0;
+}
+
+static int spacc_aead_do_fallback(struct aead_request *req, unsigned alg_type,
+ bool is_encrypt)
+{
+ struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req));
+ struct spacc_aead_ctx *ctx = crypto_tfm_ctx(old_tfm);
+ int err;
+
+ if (ctx->sw_cipher) {
+ /*
+ * Change the request to use the software fallback transform,
+ * and once the ciphering has completed, put the old transform
+ * back into the request.
+ */
+ aead_request_set_tfm(req, ctx->sw_cipher);
+ err = is_encrypt ? crypto_aead_encrypt(req) :
+ crypto_aead_decrypt(req);
+ aead_request_set_tfm(req, __crypto_aead_cast(old_tfm));
+ } else
+ err = -EINVAL;
+
+ return err;
+}
+
+static void spacc_aead_complete(struct spacc_req *req)
+{
+ spacc_aead_free_ddts(req);
+ req->req->complete(req->req, req->result);
+}
+
+static int spacc_aead_submit(struct spacc_req *req)
+{
+ struct crypto_tfm *tfm = req->req->tfm;
+ struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = req->req->tfm->__crt_alg;
+ struct spacc_alg *spacc_alg = to_spacc_alg(alg);
+ struct spacc_engine *engine = ctx->generic.engine;
+ u32 ctrl, proc_len, assoc_len;
+ struct aead_request *aead_req =
+ container_of(req->req, struct aead_request, base);
+
+ req->result = -EINPROGRESS;
+ req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key,
+ ctx->cipher_key_len, aead_req->iv, alg->cra_aead.ivsize,
+ ctx->hash_ctx, ctx->hash_key_len);
+
+ /* Set the source and destination DDT pointers. */
+ writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
+ writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
+ writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);
+
+ assoc_len = aead_req->assoclen;
+ proc_len = aead_req->cryptlen + assoc_len;
+
+ /*
+ * If we aren't generating an IV, then we need to include the IV in the
+ * associated data so that it is included in the hash.
+ */
+ if (!req->giv) {
+ assoc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req));
+ proc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req));
+ } else
+ proc_len += req->giv_len;
+
+ /*
+ * If we are decrypting, we need to take the length of the ICV out of
+ * the processing length.
+ */
+ if (!req->is_encrypt)
+ proc_len -= ctx->auth_size;
+
+ writel(proc_len, engine->regs + SPA_PROC_LEN_REG_OFFSET);
+ writel(assoc_len, engine->regs + SPA_AAD_LEN_REG_OFFSET);
+ writel(ctx->auth_size, engine->regs + SPA_ICV_LEN_REG_OFFSET);
+ writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
+ writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
+
+ ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |
+ (1 << SPA_CTRL_ICV_APPEND);
+ if (req->is_encrypt)
+ ctrl |= (1 << SPA_CTRL_ENCRYPT_IDX) | (1 << SPA_CTRL_AAD_COPY);
+ else
+ ctrl |= (1 << SPA_CTRL_KEY_EXP);
+
+ mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
+
+ writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET);
+
+ return -EINPROGRESS;
+}
+
+/*
+ * Setup an AEAD request for processing. This will configure the engine, load
+ * the context and then start the packet processing.
+ *
+ * @giv Pointer to destination address for a generated IV. If the
+ * request does not need to generate an IV then this should be set to NULL.
+ */
+static int spacc_aead_setup(struct aead_request *req, u8 *giv,
+ unsigned alg_type, bool is_encrypt)
+{
+ struct crypto_alg *alg = req->base.tfm->__crt_alg;
+ struct spacc_engine *engine = to_spacc_alg(alg)->engine;
+ struct spacc_req *dev_req = aead_request_ctx(req);
+ int err = -EINPROGRESS;
+ unsigned long flags;
+ unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
+
+ dev_req->giv = giv;
+ dev_req->giv_len = ivsize;
+ dev_req->req = &req->base;
+ dev_req->is_encrypt = is_encrypt;
+ dev_req->result = -EBUSY;
+ dev_req->engine = engine;
+ dev_req->complete = spacc_aead_complete;
+
+ if (unlikely(spacc_aead_need_fallback(dev_req)))
+ return spacc_aead_do_fallback(req, alg_type, is_encrypt);
+
+ spacc_aead_make_ddts(dev_req, dev_req->giv);
+
+ err = -EINPROGRESS;
+ spin_lock_irqsave(&engine->hw_lock, flags);
+ if (unlikely(spacc_fifo_cmd_full(engine))) {
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ err = -EBUSY;
+ spin_unlock_irqrestore(&engine->hw_lock, flags);
+ goto out_free_ddts;
+ }
+ list_add_tail(&dev_req->list, &engine->pending);
+ } else {
+ ++engine->in_flight;
+ list_add_tail(&dev_req->list, &engine->in_progress);
+ spacc_aead_submit(dev_req);
+ }
+ spin_unlock_irqrestore(&engine->hw_lock, flags);
+
+ goto out;
+
+out_free_ddts:
+ spacc_aead_free_ddts(dev_req);
+out:
+ return err;
+}
+
+static int spacc_aead_encrypt(struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
+
+ return spacc_aead_setup(req, NULL, alg->type, 1);
+}
+
+static int spacc_aead_givencrypt(struct aead_givcrypt_request *req)
+{
+ struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
+ struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ size_t ivsize = crypto_aead_ivsize(tfm);
+ struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);
+ unsigned len;
+ __be64 seq;
+
+ memcpy(req->areq.iv, ctx->salt, ivsize);
+ len = ivsize;
+ if (ivsize > sizeof(u64)) {
+ memset(req->giv, 0, ivsize - sizeof(u64));
+ len = sizeof(u64);
+ }
+ seq = cpu_to_be64(req->seq);
+ memcpy(req->giv + ivsize - len, &seq, len);
+
+ return spacc_aead_setup(&req->areq, req->giv, alg->type, 1);
+}
+
+static int spacc_aead_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
+
+ return spacc_aead_setup(req, NULL, alg->type, 0);
+}
+
+/*
+ * Initialise a new AEAD context. This is responsible for allocating the
+ * fallback cipher and initialising the context.
+ */
+static int spacc_aead_cra_init(struct crypto_tfm *tfm)
+{
+ struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct spacc_alg *spacc_alg = to_spacc_alg(alg);
+ struct spacc_engine *engine = spacc_alg->engine;
+
+ ctx->generic.flags = spacc_alg->type;
+ ctx->generic.engine = engine;
+ ctx->sw_cipher = crypto_alloc_aead(alg->cra_name, 0,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->sw_cipher)) {
+ dev_warn(engine->dev, "failed to allocate fallback for %s\n",
+ alg->cra_name);
+ ctx->sw_cipher = NULL;
+ }
+ ctx->generic.key_offs = spacc_alg->key_offs;
+ ctx->generic.iv_offs = spacc_alg->iv_offs;
+
+ get_random_bytes(ctx->salt, sizeof(ctx->salt));
+
+ tfm->crt_aead.reqsize = sizeof(struct spacc_req);
+
+ return 0;
+}
+
+/*
+ * Destructor for an AEAD context. This is called when the transform is freed
+ * and must free the fallback cipher.
+ */
+static void spacc_aead_cra_exit(struct crypto_tfm *tfm)
+{
+ struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (ctx->sw_cipher)
+ crypto_free_aead(ctx->sw_cipher);
+ ctx->sw_cipher = NULL;
+}
+
+/*
+ * Set the DES key for a block cipher transform. This also performs weak key
+ * checking if the transform has requested it.
+ */
+static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 tmp[DES_EXPKEY_WORDS];
+
+ if (len > DES3_EDE_KEY_SIZE) {
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ if (unlikely(!des_ekey(tmp, key)) &&
+ (crypto_ablkcipher_get_flags(cipher) & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ return -EINVAL;
+ }
+
+ memcpy(ctx->key, key, len);
+ ctx->key_len = len;
+
+ return 0;
+}
+
+/*
+ * Set the key for an AES block cipher. Some key lengths are not supported in
+ * hardware so this must also check whether a fallback is needed.
+ */
+static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
+ int err = 0;
+
+ if (len > AES_MAX_KEY_SIZE) {
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ /*
+ * IPSec engine only supports 128 and 256 bit AES keys. If we get a
+ * request for any other size (192 bits) then we need to do a software
+ * fallback.
+ */
+ if ((len != AES_KEYSIZE_128 || len != AES_KEYSIZE_256) &&
+ ctx->sw_cipher) {
+ /*
+ * Set the fallback transform to use the same request flags as
+ * the hardware transform.
+ */
+ ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+ ctx->sw_cipher->base.crt_flags |=
+ cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK;
+
+ err = crypto_ablkcipher_setkey(ctx->sw_cipher, key, len);
+ if (err)
+ goto sw_setkey_failed;
+ } else if ((len != AES_KEYSIZE_128 || len != AES_KEYSIZE_256) &&
+ !ctx->sw_cipher)
+ err = -EINVAL;
+
+ memcpy(ctx->key, key, len);
+ ctx->key_len = len;
+
+sw_setkey_failed:
+ if (err && ctx->sw_cipher) {
+ tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+ tfm->crt_flags |=
+ ctx->sw_cipher->base.crt_flags & CRYPTO_TFM_RES_MASK;
+ }
+
+ return err;
+}
+
+static int spacc_kasumi_f8_setkey(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
+ int err = 0;
+
+ if (len > AES_MAX_KEY_SIZE) {
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ err = -EINVAL;
+ goto out;
+ }
+
+ memcpy(ctx->key, key, len);
+ ctx->key_len = len;
+
+out:
+ return err;
+}
+
+static int spacc_ablk_need_fallback(struct spacc_req *req)
+{
+ struct spacc_ablk_ctx *ctx;
+ struct crypto_tfm *tfm = req->req->tfm;
+ struct crypto_alg *alg = req->req->tfm->__crt_alg;
+ struct spacc_alg *spacc_alg = to_spacc_alg(alg);
+
+ ctx = crypto_tfm_ctx(tfm);
+
+ return (spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
+ SPA_CTRL_CIPH_ALG_AES &&
+ ctx->key_len != AES_KEYSIZE_128 &&
+ ctx->key_len != AES_KEYSIZE_256;
+}
+
+static void spacc_ablk_complete(struct spacc_req *req)
+{
+ struct ablkcipher_request *ablk_req =
+ container_of(req->req, struct ablkcipher_request, base);
+
+ if (ablk_req->src != ablk_req->dst) {
+ spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src,
+ ablk_req->nbytes, DMA_TO_DEVICE);
+ spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
+ ablk_req->nbytes, DMA_FROM_DEVICE);
+ } else
+ spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
+ ablk_req->nbytes, DMA_BIDIRECTIONAL);
+
+ req->req->complete(req->req, req->result);
+}
+
+static int spacc_ablk_submit(struct spacc_req *req)
+{
+ struct crypto_tfm *tfm = req->req->tfm;
+ struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req);
+ struct crypto_alg *alg = req->req->tfm->__crt_alg;
+ struct spacc_alg *spacc_alg = to_spacc_alg(alg);
+ struct spacc_engine *engine = ctx->generic.engine;
+ u32 ctrl;
+
+ req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->key,
+ ctx->key_len, ablk_req->info, alg->cra_ablkcipher.ivsize,
+ NULL, 0);
+
+ writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
+ writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
+ writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);
+
+ writel(ablk_req->nbytes, engine->regs + SPA_PROC_LEN_REG_OFFSET);
+ writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
+ writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
+ writel(0, engine->regs + SPA_AAD_LEN_REG_OFFSET);
+
+ ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |
+ (req->is_encrypt ? (1 << SPA_CTRL_ENCRYPT_IDX) :
+ (1 << SPA_CTRL_KEY_EXP));
+
+ mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
+
+ writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET);
+
+ return -EINPROGRESS;
+}
+
+static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
+ unsigned alg_type, bool is_encrypt)
+{
+ struct crypto_tfm *old_tfm =
+ crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
+ struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm);
+ int err;
+
+ if (!ctx->sw_cipher)
+ return -EINVAL;
+
+ /*
+ * Change the request to use the software fallback transform, and once
+ * the ciphering has completed, put the old transform back into the
+ * request.
+ */
+ ablkcipher_request_set_tfm(req, ctx->sw_cipher);
+ err = is_encrypt ? crypto_ablkcipher_encrypt(req) :
+ crypto_ablkcipher_decrypt(req);
+ ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(old_tfm));
+
+ return err;
+}
+
+static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type,
+ bool is_encrypt)
+{
+ struct crypto_alg *alg = req->base.tfm->__crt_alg;
+ struct spacc_engine *engine = to_spacc_alg(alg)->engine;
+ struct spacc_req *dev_req = ablkcipher_request_ctx(req);
+ unsigned long flags;
+ int err = -ENOMEM;
+
+ dev_req->req = &req->base;
+ dev_req->is_encrypt = is_encrypt;
+ dev_req->engine = engine;
+ dev_req->complete = spacc_ablk_complete;
+ dev_req->result = -EINPROGRESS;
+
+ if (unlikely(spacc_ablk_need_fallback(dev_req)))
+ return spacc_ablk_do_fallback(req, alg_type, is_encrypt);
+
+ /*
+ * Create the DDT's for the engine. If we share the same source and
+ * destination then we can optimize by reusing the DDT's.
+ */
+ if (req->src != req->dst) {
+ dev_req->src_ddt = spacc_sg_to_ddt(engine, req->src,
+ req->nbytes, DMA_TO_DEVICE, &dev_req->src_addr);
+ if (!dev_req->src_ddt)
+ goto out;
+
+ dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
+ req->nbytes, DMA_FROM_DEVICE, &dev_req->dst_addr);
+ if (!dev_req->dst_ddt)
+ goto out_free_src;
+ } else {
+ dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
+ req->nbytes, DMA_BIDIRECTIONAL, &dev_req->dst_addr);
+ if (!dev_req->dst_ddt)
+ goto out;
+
+ dev_req->src_ddt = NULL;
+ dev_req->src_addr = dev_req->dst_addr;
+ }
+
+ err = -EINPROGRESS;
+ spin_lock_irqsave(&engine->hw_lock, flags);
+ /*
+ * Check if the engine will accept the operation now. If it won't then
+ * we either stick it on the end of a pending list if we can backlog,
+ * or bailout with an error if not.
+ */
+ if (unlikely(spacc_fifo_cmd_full(engine))) {
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ err = -EBUSY;
+ spin_unlock_irqrestore(&engine->hw_lock, flags);
+ goto out_free_ddts;
+ }
+ list_add_tail(&dev_req->list, &engine->pending);
+ } else {
+ ++engine->in_flight;
+ list_add_tail(&dev_req->list, &engine->in_progress);
+ spacc_ablk_submit(dev_req);
+ }
+ spin_unlock_irqrestore(&engine->hw_lock, flags);
+
+ goto out;
+
+out_free_ddts:
+ spacc_free_ddt(dev_req, dev_req->dst_ddt, dev_req->dst_addr, req->dst,
+ req->nbytes, req->src == req->dst ?
+ DMA_BIDIRECTIONAL : DMA_FROM_DEVICE);
+out_free_src:
+ if (req->src != req->dst)
+ spacc_free_ddt(dev_req, dev_req->src_ddt, dev_req->src_addr,
+ req->src, req->nbytes, DMA_TO_DEVICE);
+out:
+ return err;
+}
+
+static int spacc_ablk_cra_init(struct crypto_tfm *tfm)
+{
+ struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct spacc_alg *spacc_alg = to_spacc_alg(alg);
+ struct spacc_engine *engine = spacc_alg->engine;
+
+ ctx->generic.flags = spacc_alg->type;
+ ctx->generic.engine = engine;
+ if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
+ ctx->sw_cipher = crypto_alloc_ablkcipher(alg->cra_name, 0,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->sw_cipher)) {
+ dev_warn(engine->dev, "failed to allocate fallback for %s\n",
+ alg->cra_name);
+ ctx->sw_cipher = NULL;
+ }
+ }
+ ctx->generic.key_offs = spacc_alg->key_offs;
+ ctx->generic.iv_offs = spacc_alg->iv_offs;
+
+ tfm->crt_ablkcipher.reqsize = sizeof(struct spacc_req);
+
+ return 0;
+}
+
+static void spacc_ablk_cra_exit(struct crypto_tfm *tfm)
+{
+ struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (ctx->sw_cipher)
+ crypto_free_ablkcipher(ctx->sw_cipher);
+ ctx->sw_cipher = NULL;
+}
+
+static int spacc_ablk_encrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
+
+ return spacc_ablk_setup(req, alg->type, 1);
+}
+
+static int spacc_ablk_decrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
+
+ return spacc_ablk_setup(req, alg->type, 0);
+}
+
+static inline int spacc_fifo_stat_empty(struct spacc_engine *engine)
+{
+ return readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET) &
+ SPA_FIFO_STAT_EMPTY;
+}
+
+static void spacc_process_done(struct spacc_engine *engine)
+{
+ struct spacc_req *req;
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->hw_lock, flags);
+
+ while (!spacc_fifo_stat_empty(engine)) {
+ req = list_first_entry(&engine->in_progress, struct spacc_req,
+ list);
+ list_move_tail(&req->list, &engine->completed);
+
+ /* POP the status register. */
+ writel(~0, engine->regs + SPA_STAT_POP_REG_OFFSET);
+ req->result = (readl(engine->regs + SPA_STATUS_REG_OFFSET) &
+ SPA_STATUS_RES_CODE_MASK) >> SPA_STATUS_RES_CODE_OFFSET;
+
+ /*
+ * Convert the SPAcc error status into the standard POSIX error
+ * codes.
+ */
+ if (unlikely(req->result)) {
+ switch (req->result) {
+ case SPA_STATUS_ICV_FAIL:
+ req->result = -EBADMSG;
+ break;
+
+ case SPA_STATUS_MEMORY_ERROR:
+ dev_warn(engine->dev,
+ "memory error triggered\n");
+ req->result = -EFAULT;
+ break;
+
+ case SPA_STATUS_BLOCK_ERROR:
+ dev_warn(engine->dev,
+ "block error triggered\n");
+ req->result = -EIO;
+ break;
+ }
+ }
+ }
+
+ tasklet_schedule(&engine->complete);
+
+ spin_unlock_irqrestore(&engine->hw_lock, flags);
+}
+
+static irqreturn_t spacc_spacc_irq(int irq, void *dev)
+{
+ struct spacc_engine *engine = (struct spacc_engine *)dev;
+ u32 spacc_irq_stat = readl(engine->regs + SPA_IRQ_STAT_REG_OFFSET);
+
+ writel(spacc_irq_stat, engine->regs + SPA_IRQ_STAT_REG_OFFSET);
+ spacc_process_done(engine);
+
+ return IRQ_HANDLED;
+}
+
+static void spacc_packet_timeout(unsigned long data)
+{
+ struct spacc_engine *engine = (struct spacc_engine *)data;
+
+ spacc_process_done(engine);
+}
+
+static int spacc_req_submit(struct spacc_req *req)
+{
+ struct crypto_alg *alg = req->req->tfm->__crt_alg;
+
+ if (CRYPTO_ALG_TYPE_AEAD == (CRYPTO_ALG_TYPE_MASK & alg->cra_flags))
+ return spacc_aead_submit(req);
+ else
+ return spacc_ablk_submit(req);
+}
+
+static void spacc_spacc_complete(unsigned long data)
+{
+ struct spacc_engine *engine = (struct spacc_engine *)data;
+ struct spacc_req *req, *tmp;
+ unsigned long flags;
+ int num_removed = 0;
+ LIST_HEAD(completed);
+
+ spin_lock_irqsave(&engine->hw_lock, flags);
+ list_splice_init(&engine->completed, &completed);
+ spin_unlock_irqrestore(&engine->hw_lock, flags);
+
+ list_for_each_entry_safe(req, tmp, &completed, list) {
+ ++num_removed;
+ req->complete(req);
+ }
+
+ /* Try and fill the engine back up again. */
+ spin_lock_irqsave(&engine->hw_lock, flags);
+
+ engine->in_flight -= num_removed;
+
+ list_for_each_entry_safe(req, tmp, &engine->pending, list) {
+ if (spacc_fifo_cmd_full(engine))
+ break;
+
+ list_move_tail(&req->list, &engine->in_progress);
+ ++engine->in_flight;
+ req->result = spacc_req_submit(req);
+ }
+
+ if (engine->in_flight)
+ mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
+
+ spin_unlock_irqrestore(&engine->hw_lock, flags);
+}
+
+#ifdef CONFIG_PM
+static int spacc_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct spacc_engine *engine = platform_get_drvdata(pdev);
+
+ /*
+ * We only support standby mode. All we have to do is gate the clock to
+ * the spacc. The hardware will preserve state until we turn it back
+ * on again.
+ */
+ clk_disable(engine->clk);
+
+ return 0;
+}
+
+static int spacc_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct spacc_engine *engine = platform_get_drvdata(pdev);
+
+ return clk_enable(engine->clk);
+}
+
+static const struct dev_pm_ops spacc_pm_ops = {
+ .suspend = spacc_suspend,
+ .resume = spacc_resume,
+};
+#endif /* CONFIG_PM */
+
+static inline struct spacc_engine *spacc_dev_to_engine(struct device *dev)
+{
+ return dev ? platform_get_drvdata(to_platform_device(dev)) : NULL;
+}
+
+static ssize_t spacc_stat_irq_thresh_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct spacc_engine *engine = spacc_dev_to_engine(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", engine->stat_irq_thresh);
+}
+
+static ssize_t spacc_stat_irq_thresh_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct spacc_engine *engine = spacc_dev_to_engine(dev);
+ unsigned long thresh;
+
+ if (strict_strtoul(buf, 0, &thresh))
+ return -EINVAL;
+
+ thresh = clamp(thresh, 1UL, engine->fifo_sz - 1);
+
+ engine->stat_irq_thresh = thresh;
+ writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET,
+ engine->regs + SPA_IRQ_CTRL_REG_OFFSET);
+
+ return len;
+}
+static DEVICE_ATTR(stat_irq_thresh, 0644, spacc_stat_irq_thresh_show,
+ spacc_stat_irq_thresh_store);
+
+static struct spacc_alg ipsec_engine_algs[] = {
+ {
+ .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC,
+ .key_offs = 0,
+ .iv_offs = AES_MAX_KEY_SIZE,
+ .alg = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_ablkcipher = {
+ .setkey = spacc_aes_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cra_init = spacc_ablk_cra_init,
+ .cra_exit = spacc_ablk_cra_exit,
+ },
+ },
+ {
+ .key_offs = 0,
+ .iv_offs = AES_MAX_KEY_SIZE,
+ .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_ECB,
+ .alg = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_ablkcipher = {
+ .setkey = spacc_aes_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ },
+ .cra_init = spacc_ablk_cra_init,
+ .cra_exit = spacc_ablk_cra_exit,
+ },
+ },
+ {
+ .key_offs = DES_BLOCK_SIZE,
+ .iv_offs = 0,
+ .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
+ .alg = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "cbc-des-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_ablkcipher = {
+ .setkey = spacc_des_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ },
+ .cra_init = spacc_ablk_cra_init,
+ .cra_exit = spacc_ablk_cra_exit,
+ },
+ },
+ {
+ .key_offs = DES_BLOCK_SIZE,
+ .iv_offs = 0,
+ .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
+ .alg = {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "ecb-des-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_ablkcipher = {
+ .setkey = spacc_des_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ },
+ .cra_init = spacc_ablk_cra_init,
+ .cra_exit = spacc_ablk_cra_exit,
+ },
+ },
+ {
+ .key_offs = DES_BLOCK_SIZE,
+ .iv_offs = 0,
+ .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
+ .alg = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-des3-ede-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_ablkcipher = {
+ .setkey = spacc_des_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ },
+ .cra_init = spacc_ablk_cra_init,
+ .cra_exit = spacc_ablk_cra_exit,
+ },
+ },
+ {
+ .key_offs = DES_BLOCK_SIZE,
+ .iv_offs = 0,
+ .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
+ .alg = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "ecb-des3-ede-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_ablkcipher = {
+ .setkey = spacc_des_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ },
+ .cra_init = spacc_ablk_cra_init,
+ .cra_exit = spacc_ablk_cra_exit,
+ },
+ },
+ {
+ .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
+ SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC,
+ .key_offs = 0,
+ .iv_offs = AES_MAX_KEY_SIZE,
+ .alg = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha1-cbc-aes-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_aead_ctx),
+ .cra_type = &crypto_aead_type,
+ .cra_module = THIS_MODULE,
+ .cra_aead = {
+ .setkey = spacc_aead_setkey,
+ .setauthsize = spacc_aead_setauthsize,
+ .encrypt = spacc_aead_encrypt,
+ .decrypt = spacc_aead_decrypt,
+ .givencrypt = spacc_aead_givencrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .cra_init = spacc_aead_cra_init,
+ .cra_exit = spacc_aead_cra_exit,
+ },
+ },
+ {
+ .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
+ SPA_CTRL_HASH_ALG_SHA256 |
+ SPA_CTRL_HASH_MODE_HMAC,
+ .key_offs = 0,
+ .iv_offs = AES_MAX_KEY_SIZE,
+ .alg = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha256-cbc-aes-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_aead_ctx),
+ .cra_type = &crypto_aead_type,
+ .cra_module = THIS_MODULE,
+ .cra_aead = {
+ .setkey = spacc_aead_setkey,
+ .setauthsize = spacc_aead_setauthsize,
+ .encrypt = spacc_aead_encrypt,
+ .decrypt = spacc_aead_decrypt,
+ .givencrypt = spacc_aead_givencrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .cra_init = spacc_aead_cra_init,
+ .cra_exit = spacc_aead_cra_exit,
+ },
+ },
+ {
+ .key_offs = 0,
+ .iv_offs = AES_MAX_KEY_SIZE,
+ .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
+ SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC,
+ .alg = {
+ .cra_name = "authenc(hmac(md5),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-md5-cbc-aes-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_aead_ctx),
+ .cra_type = &crypto_aead_type,
+ .cra_module = THIS_MODULE,
+ .cra_aead = {
+ .setkey = spacc_aead_setkey,
+ .setauthsize = spacc_aead_setauthsize,
+ .encrypt = spacc_aead_encrypt,
+ .decrypt = spacc_aead_decrypt,
+ .givencrypt = spacc_aead_givencrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .cra_init = spacc_aead_cra_init,
+ .cra_exit = spacc_aead_cra_exit,
+ },
+ },
+ {
+ .key_offs = DES_BLOCK_SIZE,
+ .iv_offs = 0,
+ .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC |
+ SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC,
+ .alg = {
+ .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha1-cbc-3des-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_aead_ctx),
+ .cra_type = &crypto_aead_type,
+ .cra_module = THIS_MODULE,
+ .cra_aead = {
+ .setkey = spacc_aead_setkey,
+ .setauthsize = spacc_aead_setauthsize,
+ .encrypt = spacc_aead_encrypt,
+ .decrypt = spacc_aead_decrypt,
+ .givencrypt = spacc_aead_givencrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .cra_init = spacc_aead_cra_init,
+ .cra_exit = spacc_aead_cra_exit,
+ },
+ },
+ {
+ .key_offs = DES_BLOCK_SIZE,
+ .iv_offs = 0,
+ .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
+ SPA_CTRL_HASH_ALG_SHA256 |
+ SPA_CTRL_HASH_MODE_HMAC,
+ .alg = {
+ .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha256-cbc-3des-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_aead_ctx),
+ .cra_type = &crypto_aead_type,
+ .cra_module = THIS_MODULE,
+ .cra_aead = {
+ .setkey = spacc_aead_setkey,
+ .setauthsize = spacc_aead_setauthsize,
+ .encrypt = spacc_aead_encrypt,
+ .decrypt = spacc_aead_decrypt,
+ .givencrypt = spacc_aead_givencrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .cra_init = spacc_aead_cra_init,
+ .cra_exit = spacc_aead_cra_exit,
+ },
+ },
+ {
+ .key_offs = DES_BLOCK_SIZE,
+ .iv_offs = 0,
+ .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC |
+ SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC,
+ .alg = {
+ .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-md5-cbc-3des-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_aead_ctx),
+ .cra_type = &crypto_aead_type,
+ .cra_module = THIS_MODULE,
+ .cra_aead = {
+ .setkey = spacc_aead_setkey,
+ .setauthsize = spacc_aead_setauthsize,
+ .encrypt = spacc_aead_encrypt,
+ .decrypt = spacc_aead_decrypt,
+ .givencrypt = spacc_aead_givencrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .cra_init = spacc_aead_cra_init,
+ .cra_exit = spacc_aead_cra_exit,
+ },
+ },
+};
+
+static struct spacc_alg l2_engine_algs[] = {
+ {
+ .key_offs = 0,
+ .iv_offs = SPACC_CRYPTO_KASUMI_F8_KEY_LEN,
+ .ctrl_default = SPA_CTRL_CIPH_ALG_KASUMI |
+ SPA_CTRL_CIPH_MODE_F8,
+ .alg = {
+ .cra_name = "f8(kasumi)",
+ .cra_driver_name = "f8-kasumi-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8,
+ .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_ablkcipher = {
+ .setkey = spacc_kasumi_f8_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = 16,
+ .max_keysize = 16,
+ .ivsize = 8,
+ },
+ .cra_init = spacc_ablk_cra_init,
+ .cra_exit = spacc_ablk_cra_exit,
+ },
+ },
+};
+
+static int __devinit spacc_probe(struct platform_device *pdev,
+ unsigned max_ctxs, size_t cipher_pg_sz,
+ size_t hash_pg_sz, size_t fifo_sz,
+ struct spacc_alg *algs, size_t num_algs)
+{
+ int i, err, ret = -EINVAL;
+ struct resource *mem, *irq;
+ struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine),
+ GFP_KERNEL);
+ if (!engine)
+ return -ENOMEM;
+
+ engine->max_ctxs = max_ctxs;
+ engine->cipher_pg_sz = cipher_pg_sz;
+ engine->hash_pg_sz = hash_pg_sz;
+ engine->fifo_sz = fifo_sz;
+ engine->algs = algs;
+ engine->num_algs = num_algs;
+ engine->name = dev_name(&pdev->dev);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!mem || !irq) {
+ dev_err(&pdev->dev, "no memory/irq resource for engine\n");
+ return -ENXIO;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
+ engine->name))
+ return -ENOMEM;
+
+ engine->regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
+ if (!engine->regs) {
+ dev_err(&pdev->dev, "memory map failed\n");
+ return -ENOMEM;
+ }
+
+ if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0,
+ engine->name, engine)) {
+ dev_err(engine->dev, "failed to request IRQ\n");
+ return -EBUSY;
+ }
+
+ engine->dev = &pdev->dev;
+ engine->cipher_ctx_base = engine->regs + SPA_CIPH_KEY_BASE_REG_OFFSET;
+ engine->hash_key_base = engine->regs + SPA_HASH_KEY_BASE_REG_OFFSET;
+
+ engine->req_pool = dmam_pool_create(engine->name, engine->dev,
+ MAX_DDT_LEN * sizeof(struct spacc_ddt), 8, SZ_64K);
+ if (!engine->req_pool)
+ return -ENOMEM;
+
+ spin_lock_init(&engine->hw_lock);
+
+ engine->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(engine->clk)) {
+ dev_info(&pdev->dev, "clk unavailable\n");
+ device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
+ return PTR_ERR(engine->clk);
+ }
+
+ if (clk_enable(engine->clk)) {
+ dev_info(&pdev->dev, "unable to enable clk\n");
+ clk_put(engine->clk);
+ return -EIO;
+ }
+
+ err = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
+ if (err) {
+ clk_disable(engine->clk);
+ clk_put(engine->clk);
+ return err;
+ }
+
+
+ /*
+ * Use an IRQ threshold of 50% as a default. This seems to be a
+ * reasonable trade off of latency against throughput but can be
+ * changed at runtime.
+ */
+ engine->stat_irq_thresh = (engine->fifo_sz / 2);
+
+ /*
+ * Configure the interrupts. We only use the STAT_CNT interrupt as we
+ * only submit a new packet for processing when we complete another in
+ * the queue. This minimizes time spent in the interrupt handler.
+ */
+ writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET,
+ engine->regs + SPA_IRQ_CTRL_REG_OFFSET);
+ writel(SPA_IRQ_EN_STAT_EN | SPA_IRQ_EN_GLBL_EN,
+ engine->regs + SPA_IRQ_EN_REG_OFFSET);
+
+ setup_timer(&engine->packet_timeout, spacc_packet_timeout,
+ (unsigned long)engine);
+
+ INIT_LIST_HEAD(&engine->pending);
+ INIT_LIST_HEAD(&engine->completed);
+ INIT_LIST_HEAD(&engine->in_progress);
+ engine->in_flight = 0;
+ tasklet_init(&engine->complete, spacc_spacc_complete,
+ (unsigned long)engine);
+
+ platform_set_drvdata(pdev, engine);
+
+ INIT_LIST_HEAD(&engine->registered_algs);
+ for (i = 0; i < engine->num_algs; ++i) {
+ engine->algs[i].engine = engine;
+ err = crypto_register_alg(&engine->algs[i].alg);
+ if (!err) {
+ list_add_tail(&engine->algs[i].entry,
+ &engine->registered_algs);
+ ret = 0;
+ }
+ if (err)
+ dev_err(engine->dev, "failed to register alg \"%s\"\n",
+ engine->algs[i].alg.cra_name);
+ else
+ dev_dbg(engine->dev, "registered alg \"%s\"\n",
+ engine->algs[i].alg.cra_name);
+ }
+
+ return ret;
+}
+
+static int __devexit spacc_remove(struct platform_device *pdev)
+{
+ struct spacc_alg *alg, *next;
+ struct spacc_engine *engine = platform_get_drvdata(pdev);
+
+ del_timer_sync(&engine->packet_timeout);
+ device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
+
+ list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) {
+ list_del(&alg->entry);
+ crypto_unregister_alg(&alg->alg);
+ }
+
+ clk_disable(engine->clk);
+ clk_put(engine->clk);
+
+ return 0;
+}
+
+static int __devinit ipsec_probe(struct platform_device *pdev)
+{
+ return spacc_probe(pdev, SPACC_CRYPTO_IPSEC_MAX_CTXS,
+ SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ,
+ SPACC_CRYPTO_IPSEC_HASH_PG_SZ,
+ SPACC_CRYPTO_IPSEC_FIFO_SZ, ipsec_engine_algs,
+ ARRAY_SIZE(ipsec_engine_algs));
+}
+
+static struct platform_driver ipsec_driver = {
+ .probe = ipsec_probe,
+ .remove = __devexit_p(spacc_remove),
+ .driver = {
+ .name = "picoxcell-ipsec",
+#ifdef CONFIG_PM
+ .pm = &spacc_pm_ops,
+#endif /* CONFIG_PM */
+ },
+};
+
+static int __devinit l2_probe(struct platform_device *pdev)
+{
+ return spacc_probe(pdev, SPACC_CRYPTO_L2_MAX_CTXS,
+ SPACC_CRYPTO_L2_CIPHER_PG_SZ,
+ SPACC_CRYPTO_L2_HASH_PG_SZ, SPACC_CRYPTO_L2_FIFO_SZ,
+ l2_engine_algs, ARRAY_SIZE(l2_engine_algs));
+}
+
+static struct platform_driver l2_driver = {
+ .probe = l2_probe,
+ .remove = __devexit_p(spacc_remove),
+ .driver = {
+ .name = "picoxcell-l2",
+#ifdef CONFIG_PM
+ .pm = &spacc_pm_ops,
+#endif /* CONFIG_PM */
+ },
+};
+
+static int __init spacc_init(void)
+{
+ int ret = platform_driver_register(&ipsec_driver);
+ if (ret) {
+ pr_err("failed to register ipsec spacc driver");
+ goto out;
+ }
+
+ ret = platform_driver_register(&l2_driver);
+ if (ret) {
+ pr_err("failed to register l2 spacc driver");
+ goto l2_failed;
+ }
+
+ return 0;
+
+l2_failed:
+ platform_driver_unregister(&ipsec_driver);
+out:
+ return ret;
+}
+module_init(spacc_init);
+
+static void __exit spacc_exit(void)
+{
+ platform_driver_unregister(&ipsec_driver);
+ platform_driver_unregister(&l2_driver);
+}
+module_exit(spacc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jamie Iles");
diff --git a/drivers/crypto/picoxcell_crypto_regs.h b/drivers/crypto/picoxcell_crypto_regs.h
new file mode 100644
index 000000000000..af93442564c9
--- /dev/null
+++ b/drivers/crypto/picoxcell_crypto_regs.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2010 Picochip Ltd., Jamie Iles
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef __PICOXCELL_CRYPTO_REGS_H__
+#define __PICOXCELL_CRYPTO_REGS_H__
+
+#define SPA_STATUS_OK 0
+#define SPA_STATUS_ICV_FAIL 1
+#define SPA_STATUS_MEMORY_ERROR 2
+#define SPA_STATUS_BLOCK_ERROR 3
+
+#define SPA_IRQ_CTRL_STAT_CNT_OFFSET 16
+#define SPA_IRQ_STAT_STAT_MASK (1 << 4)
+#define SPA_FIFO_STAT_STAT_OFFSET 16
+#define SPA_FIFO_STAT_STAT_CNT_MASK (0x3F << SPA_FIFO_STAT_STAT_OFFSET)
+#define SPA_STATUS_RES_CODE_OFFSET 24
+#define SPA_STATUS_RES_CODE_MASK (0x3 << SPA_STATUS_RES_CODE_OFFSET)
+#define SPA_KEY_SZ_CTX_INDEX_OFFSET 8
+#define SPA_KEY_SZ_CIPHER_OFFSET 31
+
+#define SPA_IRQ_EN_REG_OFFSET 0x00000000
+#define SPA_IRQ_STAT_REG_OFFSET 0x00000004
+#define SPA_IRQ_CTRL_REG_OFFSET 0x00000008
+#define SPA_FIFO_STAT_REG_OFFSET 0x0000000C
+#define SPA_SDMA_BRST_SZ_REG_OFFSET 0x00000010
+#define SPA_SRC_PTR_REG_OFFSET 0x00000020
+#define SPA_DST_PTR_REG_OFFSET 0x00000024
+#define SPA_OFFSET_REG_OFFSET 0x00000028
+#define SPA_AAD_LEN_REG_OFFSET 0x0000002C
+#define SPA_PROC_LEN_REG_OFFSET 0x00000030
+#define SPA_ICV_LEN_REG_OFFSET 0x00000034
+#define SPA_ICV_OFFSET_REG_OFFSET 0x00000038
+#define SPA_SW_CTRL_REG_OFFSET 0x0000003C
+#define SPA_CTRL_REG_OFFSET 0x00000040
+#define SPA_AUX_INFO_REG_OFFSET 0x0000004C
+#define SPA_STAT_POP_REG_OFFSET 0x00000050
+#define SPA_STATUS_REG_OFFSET 0x00000054
+#define SPA_KEY_SZ_REG_OFFSET 0x00000100
+#define SPA_CIPH_KEY_BASE_REG_OFFSET 0x00004000
+#define SPA_HASH_KEY_BASE_REG_OFFSET 0x00008000
+#define SPA_RC4_CTX_BASE_REG_OFFSET 0x00020000
+
+#define SPA_IRQ_EN_REG_RESET 0x00000000
+#define SPA_IRQ_CTRL_REG_RESET 0x00000000
+#define SPA_FIFO_STAT_REG_RESET 0x00000000
+#define SPA_SDMA_BRST_SZ_REG_RESET 0x00000000
+#define SPA_SRC_PTR_REG_RESET 0x00000000
+#define SPA_DST_PTR_REG_RESET 0x00000000
+#define SPA_OFFSET_REG_RESET 0x00000000
+#define SPA_AAD_LEN_REG_RESET 0x00000000
+#define SPA_PROC_LEN_REG_RESET 0x00000000
+#define SPA_ICV_LEN_REG_RESET 0x00000000
+#define SPA_ICV_OFFSET_REG_RESET 0x00000000
+#define SPA_SW_CTRL_REG_RESET 0x00000000
+#define SPA_CTRL_REG_RESET 0x00000000
+#define SPA_AUX_INFO_REG_RESET 0x00000000
+#define SPA_STAT_POP_REG_RESET 0x00000000
+#define SPA_STATUS_REG_RESET 0x00000000
+#define SPA_KEY_SZ_REG_RESET 0x00000000
+
+#define SPA_CTRL_HASH_ALG_IDX 4
+#define SPA_CTRL_CIPH_MODE_IDX 8
+#define SPA_CTRL_HASH_MODE_IDX 12
+#define SPA_CTRL_CTX_IDX 16
+#define SPA_CTRL_ENCRYPT_IDX 24
+#define SPA_CTRL_AAD_COPY 25
+#define SPA_CTRL_ICV_PT 26
+#define SPA_CTRL_ICV_ENC 27
+#define SPA_CTRL_ICV_APPEND 28
+#define SPA_CTRL_KEY_EXP 29
+
+#define SPA_KEY_SZ_CXT_IDX 8
+#define SPA_KEY_SZ_CIPHER_IDX 31
+
+#define SPA_IRQ_EN_CMD0_EN (1 << 0)
+#define SPA_IRQ_EN_STAT_EN (1 << 4)
+#define SPA_IRQ_EN_GLBL_EN (1 << 31)
+
+#define SPA_CTRL_CIPH_ALG_NULL 0x00
+#define SPA_CTRL_CIPH_ALG_DES 0x01
+#define SPA_CTRL_CIPH_ALG_AES 0x02
+#define SPA_CTRL_CIPH_ALG_RC4 0x03
+#define SPA_CTRL_CIPH_ALG_MULTI2 0x04
+#define SPA_CTRL_CIPH_ALG_KASUMI 0x05
+
+#define SPA_CTRL_HASH_ALG_NULL (0x00 << SPA_CTRL_HASH_ALG_IDX)
+#define SPA_CTRL_HASH_ALG_MD5 (0x01 << SPA_CTRL_HASH_ALG_IDX)
+#define SPA_CTRL_HASH_ALG_SHA (0x02 << SPA_CTRL_HASH_ALG_IDX)
+#define SPA_CTRL_HASH_ALG_SHA224 (0x03 << SPA_CTRL_HASH_ALG_IDX)
+#define SPA_CTRL_HASH_ALG_SHA256 (0x04 << SPA_CTRL_HASH_ALG_IDX)
+#define SPA_CTRL_HASH_ALG_SHA384 (0x05 << SPA_CTRL_HASH_ALG_IDX)
+#define SPA_CTRL_HASH_ALG_SHA512 (0x06 << SPA_CTRL_HASH_ALG_IDX)
+#define SPA_CTRL_HASH_ALG_AESMAC (0x07 << SPA_CTRL_HASH_ALG_IDX)
+#define SPA_CTRL_HASH_ALG_AESCMAC (0x08 << SPA_CTRL_HASH_ALG_IDX)
+#define SPA_CTRL_HASH_ALG_KASF9 (0x09 << SPA_CTRL_HASH_ALG_IDX)
+
+#define SPA_CTRL_CIPH_MODE_NULL (0x00 << SPA_CTRL_CIPH_MODE_IDX)
+#define SPA_CTRL_CIPH_MODE_ECB (0x00 << SPA_CTRL_CIPH_MODE_IDX)
+#define SPA_CTRL_CIPH_MODE_CBC (0x01 << SPA_CTRL_CIPH_MODE_IDX)
+#define SPA_CTRL_CIPH_MODE_CTR (0x02 << SPA_CTRL_CIPH_MODE_IDX)
+#define SPA_CTRL_CIPH_MODE_CCM (0x03 << SPA_CTRL_CIPH_MODE_IDX)
+#define SPA_CTRL_CIPH_MODE_GCM (0x05 << SPA_CTRL_CIPH_MODE_IDX)
+#define SPA_CTRL_CIPH_MODE_OFB (0x07 << SPA_CTRL_CIPH_MODE_IDX)
+#define SPA_CTRL_CIPH_MODE_CFB (0x08 << SPA_CTRL_CIPH_MODE_IDX)
+#define SPA_CTRL_CIPH_MODE_F8 (0x09 << SPA_CTRL_CIPH_MODE_IDX)
+
+#define SPA_CTRL_HASH_MODE_RAW (0x00 << SPA_CTRL_HASH_MODE_IDX)
+#define SPA_CTRL_HASH_MODE_SSLMAC (0x01 << SPA_CTRL_HASH_MODE_IDX)
+#define SPA_CTRL_HASH_MODE_HMAC (0x02 << SPA_CTRL_HASH_MODE_IDX)
+
+#define SPA_FIFO_STAT_EMPTY (1 << 31)
+#define SPA_FIFO_CMD_FULL (1 << 7)
+
+#endif /* __PICOXCELL_CRYPTO_REGS_H__ */
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index b879c3f5d7c0..854e2632f9a6 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -2402,8 +2402,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
return t_alg;
}
-static int talitos_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int talitos_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
@@ -2580,7 +2579,7 @@ static const struct of_device_id talitos_match[] = {
};
MODULE_DEVICE_TABLE(of, talitos_match);
-static struct of_platform_driver talitos_driver = {
+static struct platform_driver talitos_driver = {
.driver = {
.name = "talitos",
.owner = THIS_MODULE,
@@ -2592,13 +2591,13 @@ static struct of_platform_driver talitos_driver = {
static int __init talitos_init(void)
{
- return of_register_platform_driver(&talitos_driver);
+ return platform_driver_register(&talitos_driver);
}
module_init(talitos_init);
static void __exit talitos_exit(void)
{
- of_unregister_platform_driver(&talitos_driver);
+ platform_driver_unregister(&talitos_driver);
}
module_exit(talitos_exit);
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 07bca4970e50..e6d7228b1479 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1845,7 +1845,7 @@ static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
}
#endif
-static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
+static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
{
struct pl08x_driver_data *pl08x;
const struct vendor_data *vd = id->data;
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index a6656834f0ff..00deabd9a04b 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -849,7 +849,7 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id)
/* Must clear TC interrupt before calling
* dma_tc_handle
- * in case tc_handle initate a new dma job
+ * in case tc_handle initiate a new dma job
*/
__set_bit(i, virtbase + COH901318_TC_INT_CLEAR1);
@@ -894,7 +894,7 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id)
}
/* Must clear TC interrupt before calling
* dma_tc_handle
- * in case tc_handle initate a new dma job
+ * in case tc_handle initiate a new dma job
*/
__set_bit(i, virtbase + COH901318_TC_INT_CLEAR2);
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index a3991ab0d67e..08dab3badad2 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -32,15 +32,18 @@
* which does not support descriptor writeback.
*/
-/* NOTE: DMS+SMS is system-specific. We should get this information
- * from the platform code somehow.
- */
-#define DWC_DEFAULT_CTLLO (DWC_CTLL_DST_MSIZE(0) \
- | DWC_CTLL_SRC_MSIZE(0) \
- | DWC_CTLL_DMS(0) \
- | DWC_CTLL_SMS(1) \
- | DWC_CTLL_LLP_D_EN \
- | DWC_CTLL_LLP_S_EN)
+#define DWC_DEFAULT_CTLLO(private) ({ \
+ struct dw_dma_slave *__slave = (private); \
+ int dms = __slave ? __slave->dst_master : 0; \
+ int sms = __slave ? __slave->src_master : 1; \
+ \
+ (DWC_CTLL_DST_MSIZE(0) \
+ | DWC_CTLL_SRC_MSIZE(0) \
+ | DWC_CTLL_LLP_D_EN \
+ | DWC_CTLL_LLP_S_EN \
+ | DWC_CTLL_DMS(dms) \
+ | DWC_CTLL_SMS(sms)); \
+ })
/*
* This is configuration-dependent and usually a funny size like 4095.
@@ -291,6 +294,9 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
return;
}
+ if (list_empty(&dwc->active_list))
+ return;
+
dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
@@ -588,7 +594,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
else
src_width = dst_width = 0;
- ctllo = DWC_DEFAULT_CTLLO
+ ctllo = DWC_DEFAULT_CTLLO(chan->private)
| DWC_CTLL_DST_WIDTH(dst_width)
| DWC_CTLL_SRC_WIDTH(src_width)
| DWC_CTLL_DST_INC
@@ -669,7 +675,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
switch (direction) {
case DMA_TO_DEVICE:
- ctllo = (DWC_DEFAULT_CTLLO
+ ctllo = (DWC_DEFAULT_CTLLO(chan->private)
| DWC_CTLL_DST_WIDTH(reg_width)
| DWC_CTLL_DST_FIX
| DWC_CTLL_SRC_INC
@@ -714,7 +720,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
}
break;
case DMA_FROM_DEVICE:
- ctllo = (DWC_DEFAULT_CTLLO
+ ctllo = (DWC_DEFAULT_CTLLO(chan->private)
| DWC_CTLL_SRC_WIDTH(reg_width)
| DWC_CTLL_DST_INC
| DWC_CTLL_SRC_FIX
@@ -1126,7 +1132,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
case DMA_TO_DEVICE:
desc->lli.dar = dws->tx_reg;
desc->lli.sar = buf_addr + (period_len * i);
- desc->lli.ctllo = (DWC_DEFAULT_CTLLO
+ desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
| DWC_CTLL_DST_WIDTH(reg_width)
| DWC_CTLL_SRC_WIDTH(reg_width)
| DWC_CTLL_DST_FIX
@@ -1137,7 +1143,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
case DMA_FROM_DEVICE:
desc->lli.dar = buf_addr + (period_len * i);
desc->lli.sar = dws->rx_reg;
- desc->lli.ctllo = (DWC_DEFAULT_CTLLO
+ desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
| DWC_CTLL_SRC_WIDTH(reg_width)
| DWC_CTLL_DST_WIDTH(reg_width)
| DWC_CTLL_DST_INC
@@ -1335,6 +1341,8 @@ static int __init dw_probe(struct platform_device *pdev)
dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
+ if (pdata->is_private)
+ dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
dw->dma.dev = &pdev->dev;
dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
dw->dma.device_free_chan_resources = dwc_free_chan_resources;
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 4de947a450fc..e3854a8f0de0 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1281,8 +1281,7 @@ static void fsl_dma_chan_remove(struct fsldma_chan *chan)
kfree(chan);
}
-static int __devinit fsldma_of_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit fsldma_of_probe(struct platform_device *op)
{
struct fsldma_device *fdev;
struct device_node *child;
@@ -1414,20 +1413,13 @@ static struct of_platform_driver fsldma_of_driver = {
static __init int fsldma_init(void)
{
- int ret;
-
pr_info("Freescale Elo / Elo Plus DMA driver\n");
-
- ret = of_register_platform_driver(&fsldma_of_driver);
- if (ret)
- pr_err("fsldma: failed to register platform driver\n");
-
- return ret;
+ return platform_driver_register(&fsldma_of_driver);
}
static void __exit fsldma_exit(void)
{
- of_unregister_platform_driver(&fsldma_of_driver);
+ platform_driver_unregister(&fsldma_of_driver);
}
subsys_initcall(fsldma_init);
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 59c270192ccc..4f95d31f5a20 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -649,8 +649,7 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
return &mdesc->desc;
}
-static int __devinit mpc_dma_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit mpc_dma_probe(struct platform_device *op)
{
struct device_node *dn = op->dev.of_node;
struct device *dev = &op->dev;
@@ -827,7 +826,7 @@ static struct of_device_id mpc_dma_match[] = {
{},
};
-static struct of_platform_driver mpc_dma_driver = {
+static struct platform_driver mpc_dma_driver = {
.probe = mpc_dma_probe,
.remove = __devexit_p(mpc_dma_remove),
.driver = {
@@ -839,13 +838,13 @@ static struct of_platform_driver mpc_dma_driver = {
static int __init mpc_dma_init(void)
{
- return of_register_platform_driver(&mpc_dma_driver);
+ return platform_driver_register(&mpc_dma_driver);
}
module_init(mpc_dma_init);
static void __exit mpc_dma_exit(void)
{
- of_unregister_platform_driver(&mpc_dma_driver);
+ platform_driver_unregister(&mpc_dma_driver);
}
module_exit(mpc_dma_exit);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 7c50f6dfd3f4..6abe1ec1f2ce 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -657,7 +657,7 @@ static irqreturn_t pl330_irq_handler(int irq, void *data)
}
static int __devinit
-pl330_probe(struct amba_device *adev, struct amba_id *id)
+pl330_probe(struct amba_device *adev, const struct amba_id *id)
{
struct dma_pl330_platdata *pdat;
struct dma_pl330_dmac *pdmac;
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index cef584533ee8..3b0247e74cc4 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -4393,8 +4393,7 @@ static void ppc440spe_adma_release_irqs(struct ppc440spe_adma_device *adev,
/**
* ppc440spe_adma_probe - probe the asynch device
*/
-static int __devinit ppc440spe_adma_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit ppc440spe_adma_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct resource res;
@@ -4944,7 +4943,7 @@ static const struct of_device_id ppc440spe_adma_of_match[] __devinitconst = {
};
MODULE_DEVICE_TABLE(of, ppc440spe_adma_of_match);
-static struct of_platform_driver ppc440spe_adma_driver = {
+static struct platform_driver ppc440spe_adma_driver = {
.probe = ppc440spe_adma_probe,
.remove = __devexit_p(ppc440spe_adma_remove),
.driver = {
@@ -4962,7 +4961,7 @@ static __init int ppc440spe_adma_init(void)
if (ret)
return ret;
- ret = of_register_platform_driver(&ppc440spe_adma_driver);
+ ret = platform_driver_register(&ppc440spe_adma_driver);
if (ret) {
pr_err("%s: failed to register platform driver\n",
__func__);
@@ -4996,7 +4995,7 @@ out_dev:
/* User will not be able to enable h/w RAID-6 */
pr_err("%s: failed to create RAID-6 driver interface\n",
__func__);
- of_unregister_platform_driver(&ppc440spe_adma_driver);
+ platform_driver_unregister(&ppc440spe_adma_driver);
out_reg:
dcr_unmap(ppc440spe_mq_dcr_host, ppc440spe_mq_dcr_len);
kfree(ppc440spe_dma_fifo_buf);
@@ -5011,7 +5010,7 @@ static void __exit ppc440spe_adma_exit(void)
&driver_attr_enable);
driver_remove_file(&ppc440spe_adma_driver.driver,
&driver_attr_devices);
- of_unregister_platform_driver(&ppc440spe_adma_driver);
+ platform_driver_unregister(&ppc440spe_adma_driver);
dcr_unmap(ppc440spe_mq_dcr_host, ppc440spe_mq_dcr_len);
kfree(ppc440spe_dma_fifo_buf);
}
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 28720d3103c4..6451b581a70b 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -750,7 +750,7 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
return;
}
- /* Find the first not transferred desciptor */
+ /* Find the first not transferred descriptor */
list_for_each_entry(desc, &sh_chan->ld_queue, node)
if (desc->mark == DESC_SUBMITTED) {
dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n",
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 6e1d46a65d0e..af955de035f4 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -68,6 +68,7 @@ enum d40_command {
* @base: Pointer to memory area when the pre_alloc_lli's are not large
* enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
* pre_alloc_lli is used.
+ * @dma_addr: DMA address, if mapped
* @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
* @pre_alloc_lli: Pre allocated area for the most common case of transfers,
* one buffer to one buffer.
@@ -75,6 +76,7 @@ enum d40_command {
struct d40_lli_pool {
void *base;
int size;
+ dma_addr_t dma_addr;
/* Space for dst and src, plus an extra for padding */
u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
};
@@ -94,7 +96,6 @@ struct d40_lli_pool {
* during a transfer.
* @node: List entry.
* @is_in_client_list: true if the client owns this descriptor.
- * @is_hw_linked: true if this job will automatically be continued for
* the previous one.
*
* This descriptor is used for both logical and physical transfers.
@@ -114,7 +115,7 @@ struct d40_desc {
struct list_head node;
bool is_in_client_list;
- bool is_hw_linked;
+ bool cyclic;
};
/**
@@ -130,6 +131,7 @@ struct d40_desc {
*/
struct d40_lcla_pool {
void *base;
+ dma_addr_t dma_addr;
void *base_unaligned;
int pages;
spinlock_t lock;
@@ -303,9 +305,37 @@ struct d40_reg_val {
unsigned int val;
};
-static int d40_pool_lli_alloc(struct d40_desc *d40d,
- int lli_len, bool is_log)
+static struct device *chan2dev(struct d40_chan *d40c)
{
+ return &d40c->chan.dev->device;
+}
+
+static bool chan_is_physical(struct d40_chan *chan)
+{
+ return chan->log_num == D40_PHY_CHAN;
+}
+
+static bool chan_is_logical(struct d40_chan *chan)
+{
+ return !chan_is_physical(chan);
+}
+
+static void __iomem *chan_base(struct d40_chan *chan)
+{
+ return chan->base->virtbase + D40_DREG_PCBASE +
+ chan->phy_chan->num * D40_DREG_PCDELTA;
+}
+
+#define d40_err(dev, format, arg...) \
+ dev_err(dev, "[%s] " format, __func__, ## arg)
+
+#define chan_err(d40c, format, arg...) \
+ d40_err(chan2dev(d40c), format, ## arg)
+
+static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
+ int lli_len)
+{
+ bool is_log = chan_is_logical(d40c);
u32 align;
void *base;
@@ -319,7 +349,7 @@ static int d40_pool_lli_alloc(struct d40_desc *d40d,
d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
d40d->lli_pool.base = NULL;
} else {
- d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
+ d40d->lli_pool.size = lli_len * 2 * align;
base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
d40d->lli_pool.base = base;
@@ -329,22 +359,37 @@ static int d40_pool_lli_alloc(struct d40_desc *d40d,
}
if (is_log) {
- d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
- align);
- d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
- align);
+ d40d->lli_log.src = PTR_ALIGN(base, align);
+ d40d->lli_log.dst = d40d->lli_log.src + lli_len;
+
+ d40d->lli_pool.dma_addr = 0;
} else {
- d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
- align);
- d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
- align);
+ d40d->lli_phy.src = PTR_ALIGN(base, align);
+ d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
+
+ d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
+ d40d->lli_phy.src,
+ d40d->lli_pool.size,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(d40c->base->dev,
+ d40d->lli_pool.dma_addr)) {
+ kfree(d40d->lli_pool.base);
+ d40d->lli_pool.base = NULL;
+ d40d->lli_pool.dma_addr = 0;
+ return -ENOMEM;
+ }
}
return 0;
}
-static void d40_pool_lli_free(struct d40_desc *d40d)
+static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
{
+ if (d40d->lli_pool.dma_addr)
+ dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
+ d40d->lli_pool.size, DMA_TO_DEVICE);
+
kfree(d40d->lli_pool.base);
d40d->lli_pool.base = NULL;
d40d->lli_pool.size = 0;
@@ -391,7 +436,7 @@ static int d40_lcla_free_all(struct d40_chan *d40c,
int i;
int ret = -EINVAL;
- if (d40c->log_num == D40_PHY_CHAN)
+ if (chan_is_physical(d40c))
return 0;
spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
@@ -430,7 +475,7 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
list_for_each_entry_safe(d, _d, &d40c->client, node)
if (async_tx_test_ack(&d->txd)) {
- d40_pool_lli_free(d);
+ d40_pool_lli_free(d40c, d);
d40_desc_remove(d);
desc = d;
memset(desc, 0, sizeof(*desc));
@@ -450,6 +495,7 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
{
+ d40_pool_lli_free(d40c, d40d);
d40_lcla_free_all(d40c, d40d);
kmem_cache_free(d40c->base->desc_slab, d40d);
}
@@ -459,57 +505,128 @@ static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
list_add_tail(&desc->node, &d40c->active);
}
-static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
+static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
{
- int curr_lcla = -EINVAL, next_lcla;
+ struct d40_phy_lli *lli_dst = desc->lli_phy.dst;
+ struct d40_phy_lli *lli_src = desc->lli_phy.src;
+ void __iomem *base = chan_base(chan);
+
+ writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG);
+ writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT);
+ writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR);
+ writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK);
+
+ writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG);
+ writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT);
+ writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR);
+ writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
+}
- if (d40c->log_num == D40_PHY_CHAN) {
- d40_phy_lli_write(d40c->base->virtbase,
- d40c->phy_chan->num,
- d40d->lli_phy.dst,
- d40d->lli_phy.src);
- d40d->lli_current = d40d->lli_len;
- } else {
+static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
+{
+ struct d40_lcla_pool *pool = &chan->base->lcla_pool;
+ struct d40_log_lli_bidir *lli = &desc->lli_log;
+ int lli_current = desc->lli_current;
+ int lli_len = desc->lli_len;
+ bool cyclic = desc->cyclic;
+ int curr_lcla = -EINVAL;
+ int first_lcla = 0;
+ bool linkback;
- if ((d40d->lli_len - d40d->lli_current) > 1)
- curr_lcla = d40_lcla_alloc_one(d40c, d40d);
+ /*
+ * We may have partially running cyclic transfers, in case we did't get
+ * enough LCLA entries.
+ */
+ linkback = cyclic && lli_current == 0;
- d40_log_lli_lcpa_write(d40c->lcpa,
- &d40d->lli_log.dst[d40d->lli_current],
- &d40d->lli_log.src[d40d->lli_current],
- curr_lcla);
+ /*
+ * For linkback, we need one LCLA even with only one link, because we
+ * can't link back to the one in LCPA space
+ */
+ if (linkback || (lli_len - lli_current > 1)) {
+ curr_lcla = d40_lcla_alloc_one(chan, desc);
+ first_lcla = curr_lcla;
+ }
- d40d->lli_current++;
- for (; d40d->lli_current < d40d->lli_len; d40d->lli_current++) {
- struct d40_log_lli *lcla;
+ /*
+ * For linkback, we normally load the LCPA in the loop since we need to
+ * link it to the second LCLA and not the first. However, if we
+ * couldn't even get a first LCLA, then we have to run in LCPA and
+ * reload manually.
+ */
+ if (!linkback || curr_lcla == -EINVAL) {
+ unsigned int flags = 0;
- if (d40d->lli_current + 1 < d40d->lli_len)
- next_lcla = d40_lcla_alloc_one(d40c, d40d);
- else
- next_lcla = -EINVAL;
+ if (curr_lcla == -EINVAL)
+ flags |= LLI_TERM_INT;
- lcla = d40c->base->lcla_pool.base +
- d40c->phy_chan->num * 1024 +
- 8 * curr_lcla * 2;
+ d40_log_lli_lcpa_write(chan->lcpa,
+ &lli->dst[lli_current],
+ &lli->src[lli_current],
+ curr_lcla,
+ flags);
+ lli_current++;
+ }
- d40_log_lli_lcla_write(lcla,
- &d40d->lli_log.dst[d40d->lli_current],
- &d40d->lli_log.src[d40d->lli_current],
- next_lcla);
+ if (curr_lcla < 0)
+ goto out;
- (void) dma_map_single(d40c->base->dev, lcla,
- 2 * sizeof(struct d40_log_lli),
- DMA_TO_DEVICE);
+ for (; lli_current < lli_len; lli_current++) {
+ unsigned int lcla_offset = chan->phy_chan->num * 1024 +
+ 8 * curr_lcla * 2;
+ struct d40_log_lli *lcla = pool->base + lcla_offset;
+ unsigned int flags = 0;
+ int next_lcla;
- curr_lcla = next_lcla;
+ if (lli_current + 1 < lli_len)
+ next_lcla = d40_lcla_alloc_one(chan, desc);
+ else
+ next_lcla = linkback ? first_lcla : -EINVAL;
- if (curr_lcla == -EINVAL) {
- d40d->lli_current++;
- break;
- }
+ if (cyclic || next_lcla == -EINVAL)
+ flags |= LLI_TERM_INT;
+
+ if (linkback && curr_lcla == first_lcla) {
+ /* First link goes in both LCPA and LCLA */
+ d40_log_lli_lcpa_write(chan->lcpa,
+ &lli->dst[lli_current],
+ &lli->src[lli_current],
+ next_lcla, flags);
+ }
+
+ /*
+ * One unused LCLA in the cyclic case if the very first
+ * next_lcla fails...
+ */
+ d40_log_lli_lcla_write(lcla,
+ &lli->dst[lli_current],
+ &lli->src[lli_current],
+ next_lcla, flags);
+
+ dma_sync_single_range_for_device(chan->base->dev,
+ pool->dma_addr, lcla_offset,
+ 2 * sizeof(struct d40_log_lli),
+ DMA_TO_DEVICE);
+ curr_lcla = next_lcla;
+
+ if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
+ lli_current++;
+ break;
}
}
+
+out:
+ desc->lli_current = lli_current;
+}
+
+static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
+{
+ if (chan_is_physical(d40c)) {
+ d40_phy_lli_load(d40c, d40d);
+ d40d->lli_current = d40d->lli_len;
+ } else
+ d40_log_lli_to_lcxa(d40c, d40d);
}
static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
@@ -543,18 +660,6 @@ static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
return d;
}
-static struct d40_desc *d40_last_queued(struct d40_chan *d40c)
-{
- struct d40_desc *d;
-
- if (list_empty(&d40c->queue))
- return NULL;
- list_for_each_entry(d, &d40c->queue, node)
- if (list_is_last(&d->node, &d40c->queue))
- break;
- return d;
-}
-
static int d40_psize_2_burst_size(bool is_log, int psize)
{
if (is_log) {
@@ -666,9 +771,9 @@ static int d40_channel_execute_command(struct d40_chan *d40c,
}
if (i == D40_SUSPEND_MAX_IT) {
- dev_err(&d40c->chan.dev->device,
- "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
- __func__, d40c->phy_chan->num, d40c->log_num,
+ chan_err(d40c,
+ "unable to suspend the chl %d (log: %d) status %x\n",
+ d40c->phy_chan->num, d40c->log_num,
status);
dump_stack();
ret = -EBUSY;
@@ -701,17 +806,45 @@ static void d40_term_all(struct d40_chan *d40c)
d40c->busy = false;
}
+static void __d40_config_set_event(struct d40_chan *d40c, bool enable,
+ u32 event, int reg)
+{
+ void __iomem *addr = chan_base(d40c) + reg;
+ int tries;
+
+ if (!enable) {
+ writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
+ | ~D40_EVENTLINE_MASK(event), addr);
+ return;
+ }
+
+ /*
+ * The hardware sometimes doesn't register the enable when src and dst
+ * event lines are active on the same logical channel. Retry to ensure
+ * it does. Usually only one retry is sufficient.
+ */
+ tries = 100;
+ while (--tries) {
+ writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
+ | ~D40_EVENTLINE_MASK(event), addr);
+
+ if (readl(addr) & D40_EVENTLINE_MASK(event))
+ break;
+ }
+
+ if (tries != 99)
+ dev_dbg(chan2dev(d40c),
+ "[%s] workaround enable S%cLNK (%d tries)\n",
+ __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
+ 100 - tries);
+
+ WARN_ON(!tries);
+}
+
static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
{
- u32 val;
unsigned long flags;
- /* Notice, that disable requires the physical channel to be stopped */
- if (do_enable)
- val = D40_ACTIVATE_EVENTLINE;
- else
- val = D40_DEACTIVATE_EVENTLINE;
-
spin_lock_irqsave(&d40c->phy_chan->lock, flags);
/* Enable event line connected to device (or memcpy) */
@@ -719,20 +852,15 @@ static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
(d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
- writel((val << D40_EVENTLINE_POS(event)) |
- ~D40_EVENTLINE_MASK(event),
- d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SSLNK);
+ __d40_config_set_event(d40c, do_enable, event,
+ D40_CHAN_REG_SSLNK);
}
+
if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
- writel((val << D40_EVENTLINE_POS(event)) |
- ~D40_EVENTLINE_MASK(event),
- d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SDLNK);
+ __d40_config_set_event(d40c, do_enable, event,
+ D40_CHAN_REG_SDLNK);
}
spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
@@ -740,15 +868,12 @@ static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
static u32 d40_chan_has_events(struct d40_chan *d40c)
{
+ void __iomem *chanbase = chan_base(d40c);
u32 val;
- val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SSLNK);
+ val = readl(chanbase + D40_CHAN_REG_SSLNK);
+ val |= readl(chanbase + D40_CHAN_REG_SDLNK);
- val |= readl(d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SDLNK);
return val;
}
@@ -771,7 +896,7 @@ static u32 d40_get_prmo(struct d40_chan *d40c)
= D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
};
- if (d40c->log_num == D40_PHY_CHAN)
+ if (chan_is_physical(d40c))
return phy_map[d40c->dma_cfg.mode_opt];
else
return log_map[d40c->dma_cfg.mode_opt];
@@ -785,7 +910,7 @@ static void d40_config_write(struct d40_chan *d40c)
/* Odd addresses are even addresses + 4 */
addr_base = (d40c->phy_chan->num % 2) * 4;
/* Setup channel mode to logical or physical */
- var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
+ var = ((u32)(chan_is_logical(d40c)) + 1) <<
D40_CHAN_POS(d40c->phy_chan->num);
writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
@@ -794,30 +919,18 @@ static void d40_config_write(struct d40_chan *d40c)
writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
- if (d40c->log_num != D40_PHY_CHAN) {
+ if (chan_is_logical(d40c)) {
+ int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
+ & D40_SREG_ELEM_LOG_LIDX_MASK;
+ void __iomem *chanbase = chan_base(d40c);
+
/* Set default config for CFG reg */
- writel(d40c->src_def_cfg,
- d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SSCFG);
- writel(d40c->dst_def_cfg,
- d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SDCFG);
+ writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
+ writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
/* Set LIDX for lcla */
- writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
- D40_SREG_ELEM_LOG_LIDX_MASK,
- d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SDELT);
-
- writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
- D40_SREG_ELEM_LOG_LIDX_MASK,
- d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SSELT);
-
+ writel(lidx, chanbase + D40_CHAN_REG_SSELT);
+ writel(lidx, chanbase + D40_CHAN_REG_SDELT);
}
}
@@ -825,15 +938,15 @@ static u32 d40_residue(struct d40_chan *d40c)
{
u32 num_elt;
- if (d40c->log_num != D40_PHY_CHAN)
+ if (chan_is_logical(d40c))
num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
>> D40_MEM_LCSP2_ECNT_POS;
- else
- num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SDELT) &
- D40_SREG_ELEM_PHY_ECNT_MASK) >>
- D40_SREG_ELEM_PHY_ECNT_POS;
+ else {
+ u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
+ num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
+ >> D40_SREG_ELEM_PHY_ECNT_POS;
+ }
+
return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
}
@@ -841,20 +954,17 @@ static bool d40_tx_is_linked(struct d40_chan *d40c)
{
bool is_link;
- if (d40c->log_num != D40_PHY_CHAN)
+ if (chan_is_logical(d40c))
is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
else
- is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SDLNK) &
- D40_SREG_LNK_PHYS_LNK_MASK;
+ is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
+ & D40_SREG_LNK_PHYS_LNK_MASK;
+
return is_link;
}
-static int d40_pause(struct dma_chan *chan)
+static int d40_pause(struct d40_chan *d40c)
{
- struct d40_chan *d40c =
- container_of(chan, struct d40_chan, chan);
int res = 0;
unsigned long flags;
@@ -865,7 +975,7 @@ static int d40_pause(struct dma_chan *chan)
res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
if (res == 0) {
- if (d40c->log_num != D40_PHY_CHAN) {
+ if (chan_is_logical(d40c)) {
d40_config_set_event(d40c, false);
/* Resume the other logical channels if any */
if (d40_chan_has_events(d40c))
@@ -878,10 +988,8 @@ static int d40_pause(struct dma_chan *chan)
return res;
}
-static int d40_resume(struct dma_chan *chan)
+static int d40_resume(struct d40_chan *d40c)
{
- struct d40_chan *d40c =
- container_of(chan, struct d40_chan, chan);
int res = 0;
unsigned long flags;
@@ -891,7 +999,7 @@ static int d40_resume(struct dma_chan *chan)
spin_lock_irqsave(&d40c->lock, flags);
if (d40c->base->rev == 0)
- if (d40c->log_num != D40_PHY_CHAN) {
+ if (chan_is_logical(d40c)) {
res = d40_channel_execute_command(d40c,
D40_DMA_SUSPEND_REQ);
goto no_suspend;
@@ -900,7 +1008,7 @@ static int d40_resume(struct dma_chan *chan)
/* If bytes left to transfer or linked tx resume job */
if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
- if (d40c->log_num != D40_PHY_CHAN)
+ if (chan_is_logical(d40c))
d40_config_set_event(d40c, true);
res = d40_channel_execute_command(d40c, D40_DMA_RUN);
@@ -911,75 +1019,20 @@ no_suspend:
return res;
}
-static void d40_tx_submit_log(struct d40_chan *d40c, struct d40_desc *d40d)
+static int d40_terminate_all(struct d40_chan *chan)
{
- /* TODO: Write */
-}
-
-static void d40_tx_submit_phy(struct d40_chan *d40c, struct d40_desc *d40d)
-{
- struct d40_desc *d40d_prev = NULL;
- int i;
- u32 val;
-
- if (!list_empty(&d40c->queue))
- d40d_prev = d40_last_queued(d40c);
- else if (!list_empty(&d40c->active))
- d40d_prev = d40_first_active_get(d40c);
-
- if (!d40d_prev)
- return;
-
- /* Here we try to join this job with previous jobs */
- val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SSLNK);
-
- /* Figure out which link we're currently transmitting */
- for (i = 0; i < d40d_prev->lli_len; i++)
- if (val == d40d_prev->lli_phy.src[i].reg_lnk)
- break;
-
- val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SSELT) >> D40_SREG_ELEM_LOG_ECNT_POS;
-
- if (i == (d40d_prev->lli_len - 1) && val > 0) {
- /* Change the current one */
- writel(virt_to_phys(d40d->lli_phy.src),
- d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SSLNK);
- writel(virt_to_phys(d40d->lli_phy.dst),
- d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SDLNK);
-
- d40d->is_hw_linked = true;
-
- } else if (i < d40d_prev->lli_len) {
- (void) dma_unmap_single(d40c->base->dev,
- virt_to_phys(d40d_prev->lli_phy.src),
- d40d_prev->lli_pool.size,
- DMA_TO_DEVICE);
+ unsigned long flags;
+ int ret = 0;
- /* Keep the settings */
- val = d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk &
- ~D40_SREG_LNK_PHYS_LNK_MASK;
- d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk =
- val | virt_to_phys(d40d->lli_phy.src);
+ ret = d40_pause(chan);
+ if (!ret && chan_is_physical(chan))
+ ret = d40_channel_execute_command(chan, D40_DMA_STOP);
- val = d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk &
- ~D40_SREG_LNK_PHYS_LNK_MASK;
- d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk =
- val | virt_to_phys(d40d->lli_phy.dst);
+ spin_lock_irqsave(&chan->lock, flags);
+ d40_term_all(chan);
+ spin_unlock_irqrestore(&chan->lock, flags);
- (void) dma_map_single(d40c->base->dev,
- d40d_prev->lli_phy.src,
- d40d_prev->lli_pool.size,
- DMA_TO_DEVICE);
- d40d->is_hw_linked = true;
- }
+ return ret;
}
static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
@@ -990,8 +1043,6 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
unsigned long flags;
- (void) d40_pause(&d40c->chan);
-
spin_lock_irqsave(&d40c->lock, flags);
d40c->chan.cookie++;
@@ -1001,17 +1052,10 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
d40d->txd.cookie = d40c->chan.cookie;
- if (d40c->log_num == D40_PHY_CHAN)
- d40_tx_submit_phy(d40c, d40d);
- else
- d40_tx_submit_log(d40c, d40d);
-
d40_desc_queue(d40c, d40d);
spin_unlock_irqrestore(&d40c->lock, flags);
- (void) d40_resume(&d40c->chan);
-
return tx->cookie;
}
@@ -1020,7 +1064,7 @@ static int d40_start(struct d40_chan *d40c)
if (d40c->base->rev == 0) {
int err;
- if (d40c->log_num != D40_PHY_CHAN) {
+ if (chan_is_logical(d40c)) {
err = d40_channel_execute_command(d40c,
D40_DMA_SUSPEND_REQ);
if (err)
@@ -1028,7 +1072,7 @@ static int d40_start(struct d40_chan *d40c)
}
}
- if (d40c->log_num != D40_PHY_CHAN)
+ if (chan_is_logical(d40c))
d40_config_set_event(d40c, true);
return d40_channel_execute_command(d40c, D40_DMA_RUN);
@@ -1051,21 +1095,14 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
/* Add to active queue */
d40_desc_submit(d40c, d40d);
- /*
- * If this job is already linked in hw,
- * do not submit it.
- */
-
- if (!d40d->is_hw_linked) {
- /* Initiate DMA job */
- d40_desc_load(d40c, d40d);
+ /* Initiate DMA job */
+ d40_desc_load(d40c, d40d);
- /* Start dma job */
- err = d40_start(d40c);
+ /* Start dma job */
+ err = d40_start(d40c);
- if (err)
- return NULL;
- }
+ if (err)
+ return NULL;
}
return d40d;
@@ -1082,17 +1119,36 @@ static void dma_tc_handle(struct d40_chan *d40c)
if (d40d == NULL)
return;
- d40_lcla_free_all(d40c, d40d);
+ if (d40d->cyclic) {
+ /*
+ * If this was a paritially loaded list, we need to reloaded
+ * it, and only when the list is completed. We need to check
+ * for done because the interrupt will hit for every link, and
+ * not just the last one.
+ */
+ if (d40d->lli_current < d40d->lli_len
+ && !d40_tx_is_linked(d40c)
+ && !d40_residue(d40c)) {
+ d40_lcla_free_all(d40c, d40d);
+ d40_desc_load(d40c, d40d);
+ (void) d40_start(d40c);
- if (d40d->lli_current < d40d->lli_len) {
- d40_desc_load(d40c, d40d);
- /* Start dma job */
- (void) d40_start(d40c);
- return;
- }
+ if (d40d->lli_current == d40d->lli_len)
+ d40d->lli_current = 0;
+ }
+ } else {
+ d40_lcla_free_all(d40c, d40d);
- if (d40_queue_start(d40c) == NULL)
- d40c->busy = false;
+ if (d40d->lli_current < d40d->lli_len) {
+ d40_desc_load(d40c, d40d);
+ /* Start dma job */
+ (void) d40_start(d40c);
+ return;
+ }
+
+ if (d40_queue_start(d40c) == NULL)
+ d40c->busy = false;
+ }
d40c->pending_tx++;
tasklet_schedule(&d40c->tasklet);
@@ -1111,11 +1167,11 @@ static void dma_tasklet(unsigned long data)
/* Get first active entry from list */
d40d = d40_first_active_get(d40c);
-
if (d40d == NULL)
goto err;
- d40c->completed = d40d->txd.cookie;
+ if (!d40d->cyclic)
+ d40c->completed = d40d->txd.cookie;
/*
* If terminating a channel pending_tx is set to zero.
@@ -1130,16 +1186,18 @@ static void dma_tasklet(unsigned long data)
callback = d40d->txd.callback;
callback_param = d40d->txd.callback_param;
- if (async_tx_test_ack(&d40d->txd)) {
- d40_pool_lli_free(d40d);
- d40_desc_remove(d40d);
- d40_desc_free(d40c, d40d);
- } else {
- if (!d40d->is_in_client_list) {
+ if (!d40d->cyclic) {
+ if (async_tx_test_ack(&d40d->txd)) {
+ d40_pool_lli_free(d40c, d40d);
d40_desc_remove(d40d);
- d40_lcla_free_all(d40c, d40d);
- list_add_tail(&d40d->node, &d40c->client);
- d40d->is_in_client_list = true;
+ d40_desc_free(d40c, d40d);
+ } else {
+ if (!d40d->is_in_client_list) {
+ d40_desc_remove(d40d);
+ d40_lcla_free_all(d40c, d40d);
+ list_add_tail(&d40d->node, &d40c->client);
+ d40d->is_in_client_list = true;
+ }
}
}
@@ -1216,9 +1274,8 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data)
if (!il[row].is_error)
dma_tc_handle(d40c);
else
- dev_err(base->dev,
- "[%s] IRQ chan: %ld offset %d idx %d\n",
- __func__, chan, il[row].offset, idx);
+ d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
+ chan, il[row].offset, idx);
spin_unlock(&d40c->lock);
}
@@ -1237,8 +1294,7 @@ static int d40_validate_conf(struct d40_chan *d40c,
bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
if (!conf->dir) {
- dev_err(&d40c->chan.dev->device, "[%s] Invalid direction.\n",
- __func__);
+ chan_err(d40c, "Invalid direction.\n");
res = -EINVAL;
}
@@ -1246,46 +1302,40 @@ static int d40_validate_conf(struct d40_chan *d40c,
d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 &&
d40c->runtime_addr == 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Invalid TX channel address (%d)\n",
- __func__, conf->dst_dev_type);
+ chan_err(d40c, "Invalid TX channel address (%d)\n",
+ conf->dst_dev_type);
res = -EINVAL;
}
if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
d40c->runtime_addr == 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Invalid RX channel address (%d)\n",
- __func__, conf->src_dev_type);
+ chan_err(d40c, "Invalid RX channel address (%d)\n",
+ conf->src_dev_type);
res = -EINVAL;
}
if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
dst_event_group == STEDMA40_DEV_DST_MEMORY) {
- dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
- __func__);
+ chan_err(d40c, "Invalid dst\n");
res = -EINVAL;
}
if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
src_event_group == STEDMA40_DEV_SRC_MEMORY) {
- dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
- __func__);
+ chan_err(d40c, "Invalid src\n");
res = -EINVAL;
}
if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
- dev_err(&d40c->chan.dev->device,
- "[%s] No event line\n", __func__);
+ chan_err(d40c, "No event line\n");
res = -EINVAL;
}
if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
(src_event_group != dst_event_group)) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Invalid event group\n", __func__);
+ chan_err(d40c, "Invalid event group\n");
res = -EINVAL;
}
@@ -1294,9 +1344,7 @@ static int d40_validate_conf(struct d40_chan *d40c,
* DMAC HW supports it. Will be added to this driver,
* in case any dma client requires it.
*/
- dev_err(&d40c->chan.dev->device,
- "[%s] periph to periph not supported\n",
- __func__);
+ chan_err(d40c, "periph to periph not supported\n");
res = -EINVAL;
}
@@ -1309,9 +1357,7 @@ static int d40_validate_conf(struct d40_chan *d40c,
* src (burst x width) == dst (burst x width)
*/
- dev_err(&d40c->chan.dev->device,
- "[%s] src (burst x width) != dst (burst x width)\n",
- __func__);
+ chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
res = -EINVAL;
}
@@ -1514,8 +1560,7 @@ static int d40_config_memcpy(struct d40_chan *d40c)
dma_has_cap(DMA_SLAVE, cap)) {
d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
} else {
- dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
- __func__);
+ chan_err(d40c, "No memcpy\n");
return -EINVAL;
}
@@ -1540,21 +1585,19 @@ static int d40_free_dma(struct d40_chan *d40c)
/* Release client owned descriptors */
if (!list_empty(&d40c->client))
list_for_each_entry_safe(d, _d, &d40c->client, node) {
- d40_pool_lli_free(d);
+ d40_pool_lli_free(d40c, d);
d40_desc_remove(d);
d40_desc_free(d40c, d);
}
if (phy == NULL) {
- dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
- __func__);
+ chan_err(d40c, "phy == null\n");
return -EINVAL;
}
if (phy->allocated_src == D40_ALLOC_FREE &&
phy->allocated_dst == D40_ALLOC_FREE) {
- dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
- __func__);
+ chan_err(d40c, "channel already free\n");
return -EINVAL;
}
@@ -1566,19 +1609,17 @@ static int d40_free_dma(struct d40_chan *d40c)
event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
is_src = true;
} else {
- dev_err(&d40c->chan.dev->device,
- "[%s] Unknown direction\n", __func__);
+ chan_err(d40c, "Unknown direction\n");
return -EINVAL;
}
res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
if (res) {
- dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n",
- __func__);
+ chan_err(d40c, "suspend failed\n");
return res;
}
- if (d40c->log_num != D40_PHY_CHAN) {
+ if (chan_is_logical(d40c)) {
/* Release logical channel, deactivate the event line */
d40_config_set_event(d40c, false);
@@ -1594,9 +1635,8 @@ static int d40_free_dma(struct d40_chan *d40c)
res = d40_channel_execute_command(d40c,
D40_DMA_RUN);
if (res) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Executing RUN command\n",
- __func__);
+ chan_err(d40c,
+ "Executing RUN command\n");
return res;
}
}
@@ -1609,8 +1649,7 @@ static int d40_free_dma(struct d40_chan *d40c)
/* Release physical channel */
res = d40_channel_execute_command(d40c, D40_DMA_STOP);
if (res) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Failed to stop channel\n", __func__);
+ chan_err(d40c, "Failed to stop channel\n");
return res;
}
d40c->phy_chan = NULL;
@@ -1622,6 +1661,7 @@ static int d40_free_dma(struct d40_chan *d40c)
static bool d40_is_paused(struct d40_chan *d40c)
{
+ void __iomem *chanbase = chan_base(d40c);
bool is_paused = false;
unsigned long flags;
void __iomem *active_reg;
@@ -1630,7 +1670,7 @@ static bool d40_is_paused(struct d40_chan *d40c)
spin_lock_irqsave(&d40c->lock, flags);
- if (d40c->log_num == D40_PHY_CHAN) {
+ if (chan_is_physical(d40c)) {
if (d40c->phy_chan->num % 2 == 0)
active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
else
@@ -1648,17 +1688,12 @@ static bool d40_is_paused(struct d40_chan *d40c)
if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
- status = readl(d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SDLNK);
+ status = readl(chanbase + D40_CHAN_REG_SDLNK);
} else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
- status = readl(d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SSLNK);
+ status = readl(chanbase + D40_CHAN_REG_SSLNK);
} else {
- dev_err(&d40c->chan.dev->device,
- "[%s] Unknown direction\n", __func__);
+ chan_err(d40c, "Unknown direction\n");
goto _exit;
}
@@ -1688,114 +1723,184 @@ static u32 stedma40_residue(struct dma_chan *chan)
return bytes_left;
}
-struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
- struct scatterlist *sgl_dst,
- struct scatterlist *sgl_src,
- unsigned int sgl_len,
- unsigned long dma_flags)
+static int
+d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc,
+ struct scatterlist *sg_src, struct scatterlist *sg_dst,
+ unsigned int sg_len, dma_addr_t src_dev_addr,
+ dma_addr_t dst_dev_addr)
{
- int res;
- struct d40_desc *d40d;
- struct d40_chan *d40c = container_of(chan, struct d40_chan,
- chan);
- unsigned long flags;
+ struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
+ struct stedma40_half_channel_info *src_info = &cfg->src_info;
+ struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
+ int ret;
- if (d40c->phy_chan == NULL) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Unallocated channel.\n", __func__);
- return ERR_PTR(-EINVAL);
- }
+ ret = d40_log_sg_to_lli(sg_src, sg_len,
+ src_dev_addr,
+ desc->lli_log.src,
+ chan->log_def.lcsp1,
+ src_info->data_width,
+ dst_info->data_width);
- spin_lock_irqsave(&d40c->lock, flags);
- d40d = d40_desc_get(d40c);
+ ret = d40_log_sg_to_lli(sg_dst, sg_len,
+ dst_dev_addr,
+ desc->lli_log.dst,
+ chan->log_def.lcsp3,
+ dst_info->data_width,
+ src_info->data_width);
- if (d40d == NULL)
+ return ret < 0 ? ret : 0;
+}
+
+static int
+d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
+ struct scatterlist *sg_src, struct scatterlist *sg_dst,
+ unsigned int sg_len, dma_addr_t src_dev_addr,
+ dma_addr_t dst_dev_addr)
+{
+ struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
+ struct stedma40_half_channel_info *src_info = &cfg->src_info;
+ struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
+ unsigned long flags = 0;
+ int ret;
+
+ if (desc->cyclic)
+ flags |= LLI_CYCLIC | LLI_TERM_INT;
+
+ ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr,
+ desc->lli_phy.src,
+ virt_to_phys(desc->lli_phy.src),
+ chan->src_def_cfg,
+ src_info, dst_info, flags);
+
+ ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr,
+ desc->lli_phy.dst,
+ virt_to_phys(desc->lli_phy.dst),
+ chan->dst_def_cfg,
+ dst_info, src_info, flags);
+
+ dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr,
+ desc->lli_pool.size, DMA_TO_DEVICE);
+
+ return ret < 0 ? ret : 0;
+}
+
+
+static struct d40_desc *
+d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
+ unsigned int sg_len, unsigned long dma_flags)
+{
+ struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
+ struct d40_desc *desc;
+ int ret;
+
+ desc = d40_desc_get(chan);
+ if (!desc)
+ return NULL;
+
+ desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
+ cfg->dst_info.data_width);
+ if (desc->lli_len < 0) {
+ chan_err(chan, "Unaligned size\n");
goto err;
+ }
- d40d->lli_len = d40_sg_2_dmalen(sgl_dst, sgl_len,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.data_width);
- if (d40d->lli_len < 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Unaligned size\n", __func__);
+ ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
+ if (ret < 0) {
+ chan_err(chan, "Could not allocate lli\n");
goto err;
}
- d40d->lli_current = 0;
- d40d->txd.flags = dma_flags;
- if (d40c->log_num != D40_PHY_CHAN) {
+ desc->lli_current = 0;
+ desc->txd.flags = dma_flags;
+ desc->txd.tx_submit = d40_tx_submit;
- if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Out of memory\n", __func__);
- goto err;
- }
+ dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
- (void) d40_log_sg_to_lli(sgl_src,
- sgl_len,
- d40d->lli_log.src,
- d40c->log_def.lcsp1,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.data_width);
-
- (void) d40_log_sg_to_lli(sgl_dst,
- sgl_len,
- d40d->lli_log.dst,
- d40c->log_def.lcsp3,
- d40c->dma_cfg.dst_info.data_width,
- d40c->dma_cfg.src_info.data_width);
- } else {
- if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Out of memory\n", __func__);
- goto err;
- }
+ return desc;
+
+err:
+ d40_desc_free(chan, desc);
+ return NULL;
+}
+
+static dma_addr_t
+d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
+{
+ struct stedma40_platform_data *plat = chan->base->plat_data;
+ struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
+ dma_addr_t addr;
- res = d40_phy_sg_to_lli(sgl_src,
- sgl_len,
- 0,
- d40d->lli_phy.src,
- virt_to_phys(d40d->lli_phy.src),
- d40c->src_def_cfg,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.data_width,
- d40c->dma_cfg.src_info.psize);
+ if (chan->runtime_addr)
+ return chan->runtime_addr;
- if (res < 0)
- goto err;
+ if (direction == DMA_FROM_DEVICE)
+ addr = plat->dev_rx[cfg->src_dev_type];
+ else if (direction == DMA_TO_DEVICE)
+ addr = plat->dev_tx[cfg->dst_dev_type];
- res = d40_phy_sg_to_lli(sgl_dst,
- sgl_len,
- 0,
- d40d->lli_phy.dst,
- virt_to_phys(d40d->lli_phy.dst),
- d40c->dst_def_cfg,
- d40c->dma_cfg.dst_info.data_width,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.psize);
+ return addr;
+}
- if (res < 0)
- goto err;
+static struct dma_async_tx_descriptor *
+d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
+ struct scatterlist *sg_dst, unsigned int sg_len,
+ enum dma_data_direction direction, unsigned long dma_flags)
+{
+ struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
+ dma_addr_t src_dev_addr = 0;
+ dma_addr_t dst_dev_addr = 0;
+ struct d40_desc *desc;
+ unsigned long flags;
+ int ret;
- (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
- d40d->lli_pool.size, DMA_TO_DEVICE);
+ if (!chan->phy_chan) {
+ chan_err(chan, "Cannot prepare unallocated channel\n");
+ return NULL;
}
- dma_async_tx_descriptor_init(&d40d->txd, chan);
- d40d->txd.tx_submit = d40_tx_submit;
+ spin_lock_irqsave(&chan->lock, flags);
- spin_unlock_irqrestore(&d40c->lock, flags);
+ desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
+ if (desc == NULL)
+ goto err;
+
+ if (sg_next(&sg_src[sg_len - 1]) == sg_src)
+ desc->cyclic = true;
+
+ if (direction != DMA_NONE) {
+ dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
+
+ if (direction == DMA_FROM_DEVICE)
+ src_dev_addr = dev_addr;
+ else if (direction == DMA_TO_DEVICE)
+ dst_dev_addr = dev_addr;
+ }
+
+ if (chan_is_logical(chan))
+ ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
+ sg_len, src_dev_addr, dst_dev_addr);
+ else
+ ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst,
+ sg_len, src_dev_addr, dst_dev_addr);
+
+ if (ret) {
+ chan_err(chan, "Failed to prepare %s sg job: %d\n",
+ chan_is_logical(chan) ? "log" : "phy", ret);
+ goto err;
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return &desc->txd;
- return &d40d->txd;
err:
- if (d40d)
- d40_desc_free(d40c, d40d);
- spin_unlock_irqrestore(&d40c->lock, flags);
+ if (desc)
+ d40_desc_free(chan, desc);
+ spin_unlock_irqrestore(&chan->lock, flags);
return NULL;
}
-EXPORT_SYMBOL(stedma40_memcpy_sg);
bool stedma40_filter(struct dma_chan *chan, void *data)
{
@@ -1818,6 +1923,38 @@ bool stedma40_filter(struct dma_chan *chan, void *data)
}
EXPORT_SYMBOL(stedma40_filter);
+static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
+{
+ bool realtime = d40c->dma_cfg.realtime;
+ bool highprio = d40c->dma_cfg.high_priority;
+ u32 prioreg = highprio ? D40_DREG_PSEG1 : D40_DREG_PCEG1;
+ u32 rtreg = realtime ? D40_DREG_RSEG1 : D40_DREG_RCEG1;
+ u32 event = D40_TYPE_TO_EVENT(dev_type);
+ u32 group = D40_TYPE_TO_GROUP(dev_type);
+ u32 bit = 1 << event;
+
+ /* Destination event lines are stored in the upper halfword */
+ if (!src)
+ bit <<= 16;
+
+ writel(bit, d40c->base->virtbase + prioreg + group * 4);
+ writel(bit, d40c->base->virtbase + rtreg + group * 4);
+}
+
+static void d40_set_prio_realtime(struct d40_chan *d40c)
+{
+ if (d40c->base->rev < 3)
+ return;
+
+ if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
+ (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
+ __d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true);
+
+ if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) ||
+ (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
+ __d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false);
+}
+
/* DMA ENGINE functions */
static int d40_alloc_chan_resources(struct dma_chan *chan)
{
@@ -1834,9 +1971,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
if (!d40c->configured) {
err = d40_config_memcpy(d40c);
if (err) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Failed to configure memcpy channel\n",
- __func__);
+ chan_err(d40c, "Failed to configure memcpy channel\n");
goto fail;
}
}
@@ -1844,16 +1979,17 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
err = d40_allocate_channel(d40c);
if (err) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Failed to allocate channel\n", __func__);
+ chan_err(d40c, "Failed to allocate channel\n");
goto fail;
}
/* Fill in basic CFG register values */
d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
- &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
+ &d40c->dst_def_cfg, chan_is_logical(d40c));
- if (d40c->log_num != D40_PHY_CHAN) {
+ d40_set_prio_realtime(d40c);
+
+ if (chan_is_logical(d40c)) {
d40_log_cfg(&d40c->dma_cfg,
&d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
@@ -1886,8 +2022,7 @@ static void d40_free_chan_resources(struct dma_chan *chan)
unsigned long flags;
if (d40c->phy_chan == NULL) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Cannot free unallocated channel\n", __func__);
+ chan_err(d40c, "Cannot free unallocated channel\n");
return;
}
@@ -1897,8 +2032,7 @@ static void d40_free_chan_resources(struct dma_chan *chan)
err = d40_free_dma(d40c);
if (err)
- dev_err(&d40c->chan.dev->device,
- "[%s] Failed to free channel\n", __func__);
+ chan_err(d40c, "Failed to free channel\n");
spin_unlock_irqrestore(&d40c->lock, flags);
}
@@ -1908,251 +2042,31 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
size_t size,
unsigned long dma_flags)
{
- struct d40_desc *d40d;
- struct d40_chan *d40c = container_of(chan, struct d40_chan,
- chan);
- unsigned long flags;
-
- if (d40c->phy_chan == NULL) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Channel is not allocated.\n", __func__);
- return ERR_PTR(-EINVAL);
- }
-
- spin_lock_irqsave(&d40c->lock, flags);
- d40d = d40_desc_get(d40c);
-
- if (d40d == NULL) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Descriptor is NULL\n", __func__);
- goto err;
- }
+ struct scatterlist dst_sg;
+ struct scatterlist src_sg;
- d40d->txd.flags = dma_flags;
- d40d->lli_len = d40_size_2_dmalen(size,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.data_width);
- if (d40d->lli_len < 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Unaligned size\n", __func__);
- goto err;
- }
+ sg_init_table(&dst_sg, 1);
+ sg_init_table(&src_sg, 1);
+ sg_dma_address(&dst_sg) = dst;
+ sg_dma_address(&src_sg) = src;
- dma_async_tx_descriptor_init(&d40d->txd, chan);
+ sg_dma_len(&dst_sg) = size;
+ sg_dma_len(&src_sg) = size;
- d40d->txd.tx_submit = d40_tx_submit;
-
- if (d40c->log_num != D40_PHY_CHAN) {
-
- if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Out of memory\n", __func__);
- goto err;
- }
- d40d->lli_current = 0;
-
- if (d40_log_buf_to_lli(d40d->lli_log.src,
- src,
- size,
- d40c->log_def.lcsp1,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.data_width,
- true) == NULL)
- goto err;
-
- if (d40_log_buf_to_lli(d40d->lli_log.dst,
- dst,
- size,
- d40c->log_def.lcsp3,
- d40c->dma_cfg.dst_info.data_width,
- d40c->dma_cfg.src_info.data_width,
- true) == NULL)
- goto err;
-
- } else {
-
- if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Out of memory\n", __func__);
- goto err;
- }
-
- if (d40_phy_buf_to_lli(d40d->lli_phy.src,
- src,
- size,
- d40c->dma_cfg.src_info.psize,
- 0,
- d40c->src_def_cfg,
- true,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.data_width,
- false) == NULL)
- goto err;
-
- if (d40_phy_buf_to_lli(d40d->lli_phy.dst,
- dst,
- size,
- d40c->dma_cfg.dst_info.psize,
- 0,
- d40c->dst_def_cfg,
- true,
- d40c->dma_cfg.dst_info.data_width,
- d40c->dma_cfg.src_info.data_width,
- false) == NULL)
- goto err;
-
- (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
- d40d->lli_pool.size, DMA_TO_DEVICE);
- }
-
- spin_unlock_irqrestore(&d40c->lock, flags);
- return &d40d->txd;
-
-err:
- if (d40d)
- d40_desc_free(d40c, d40d);
- spin_unlock_irqrestore(&d40c->lock, flags);
- return NULL;
+ return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags);
}
static struct dma_async_tx_descriptor *
-d40_prep_sg(struct dma_chan *chan,
- struct scatterlist *dst_sg, unsigned int dst_nents,
- struct scatterlist *src_sg, unsigned int src_nents,
- unsigned long dma_flags)
+d40_prep_memcpy_sg(struct dma_chan *chan,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents,
+ unsigned long dma_flags)
{
if (dst_nents != src_nents)
return NULL;
- return stedma40_memcpy_sg(chan, dst_sg, src_sg, dst_nents, dma_flags);
-}
-
-static int d40_prep_slave_sg_log(struct d40_desc *d40d,
- struct d40_chan *d40c,
- struct scatterlist *sgl,
- unsigned int sg_len,
- enum dma_data_direction direction,
- unsigned long dma_flags)
-{
- dma_addr_t dev_addr = 0;
- int total_size;
-
- d40d->lli_len = d40_sg_2_dmalen(sgl, sg_len,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.data_width);
- if (d40d->lli_len < 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Unaligned size\n", __func__);
- return -EINVAL;
- }
-
- if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Out of memory\n", __func__);
- return -ENOMEM;
- }
-
- d40d->lli_current = 0;
-
- if (direction == DMA_FROM_DEVICE)
- if (d40c->runtime_addr)
- dev_addr = d40c->runtime_addr;
- else
- dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
- else if (direction == DMA_TO_DEVICE)
- if (d40c->runtime_addr)
- dev_addr = d40c->runtime_addr;
- else
- dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
-
- else
- return -EINVAL;
-
- total_size = d40_log_sg_to_dev(sgl, sg_len,
- &d40d->lli_log,
- &d40c->log_def,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.data_width,
- direction,
- dev_addr);
-
- if (total_size < 0)
- return -EINVAL;
-
- return 0;
-}
-
-static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
- struct d40_chan *d40c,
- struct scatterlist *sgl,
- unsigned int sgl_len,
- enum dma_data_direction direction,
- unsigned long dma_flags)
-{
- dma_addr_t src_dev_addr;
- dma_addr_t dst_dev_addr;
- int res;
-
- d40d->lli_len = d40_sg_2_dmalen(sgl, sgl_len,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.data_width);
- if (d40d->lli_len < 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Unaligned size\n", __func__);
- return -EINVAL;
- }
-
- if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Out of memory\n", __func__);
- return -ENOMEM;
- }
-
- d40d->lli_current = 0;
-
- if (direction == DMA_FROM_DEVICE) {
- dst_dev_addr = 0;
- if (d40c->runtime_addr)
- src_dev_addr = d40c->runtime_addr;
- else
- src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
- } else if (direction == DMA_TO_DEVICE) {
- if (d40c->runtime_addr)
- dst_dev_addr = d40c->runtime_addr;
- else
- dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
- src_dev_addr = 0;
- } else
- return -EINVAL;
-
- res = d40_phy_sg_to_lli(sgl,
- sgl_len,
- src_dev_addr,
- d40d->lli_phy.src,
- virt_to_phys(d40d->lli_phy.src),
- d40c->src_def_cfg,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.data_width,
- d40c->dma_cfg.src_info.psize);
- if (res < 0)
- return res;
-
- res = d40_phy_sg_to_lli(sgl,
- sgl_len,
- dst_dev_addr,
- d40d->lli_phy.dst,
- virt_to_phys(d40d->lli_phy.dst),
- d40c->dst_def_cfg,
- d40c->dma_cfg.dst_info.data_width,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.psize);
- if (res < 0)
- return res;
-
- (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
- d40d->lli_pool.size, DMA_TO_DEVICE);
- return 0;
+ return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags);
}
static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
@@ -2161,52 +2075,40 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
enum dma_data_direction direction,
unsigned long dma_flags)
{
- struct d40_desc *d40d;
- struct d40_chan *d40c = container_of(chan, struct d40_chan,
- chan);
- unsigned long flags;
- int err;
-
- if (d40c->phy_chan == NULL) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Cannot prepare unallocated channel\n", __func__);
- return ERR_PTR(-EINVAL);
- }
+ if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE)
+ return NULL;
- spin_lock_irqsave(&d40c->lock, flags);
- d40d = d40_desc_get(d40c);
+ return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
+}
- if (d40d == NULL)
- goto err;
+static struct dma_async_tx_descriptor *
+dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_data_direction direction)
+{
+ unsigned int periods = buf_len / period_len;
+ struct dma_async_tx_descriptor *txd;
+ struct scatterlist *sg;
+ int i;
- if (d40c->log_num != D40_PHY_CHAN)
- err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
- direction, dma_flags);
- else
- err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
- direction, dma_flags);
- if (err) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Failed to prepare %s slave sg job: %d\n",
- __func__,
- d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
- goto err;
+ sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_KERNEL);
+ for (i = 0; i < periods; i++) {
+ sg_dma_address(&sg[i]) = dma_addr;
+ sg_dma_len(&sg[i]) = period_len;
+ dma_addr += period_len;
}
- d40d->txd.flags = dma_flags;
+ sg[periods].offset = 0;
+ sg[periods].length = 0;
+ sg[periods].page_link =
+ ((unsigned long)sg | 0x01) & ~0x02;
- dma_async_tx_descriptor_init(&d40d->txd, chan);
+ txd = d40_prep_sg(chan, sg, sg, periods, direction,
+ DMA_PREP_INTERRUPT);
- d40d->txd.tx_submit = d40_tx_submit;
+ kfree(sg);
- spin_unlock_irqrestore(&d40c->lock, flags);
- return &d40d->txd;
-
-err:
- if (d40d)
- d40_desc_free(d40c, d40d);
- spin_unlock_irqrestore(&d40c->lock, flags);
- return NULL;
+ return txd;
}
static enum dma_status d40_tx_status(struct dma_chan *chan,
@@ -2219,9 +2121,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan,
int ret;
if (d40c->phy_chan == NULL) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Cannot read status of unallocated channel\n",
- __func__);
+ chan_err(d40c, "Cannot read status of unallocated channel\n");
return -EINVAL;
}
@@ -2245,8 +2145,7 @@ static void d40_issue_pending(struct dma_chan *chan)
unsigned long flags;
if (d40c->phy_chan == NULL) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Channel is not allocated!\n", __func__);
+ chan_err(d40c, "Channel is not allocated!\n");
return;
}
@@ -2339,7 +2238,7 @@ static void d40_set_runtime_config(struct dma_chan *chan,
return;
}
- if (d40c->log_num != D40_PHY_CHAN) {
+ if (chan_is_logical(d40c)) {
if (config_maxburst >= 16)
psize = STEDMA40_PSIZE_LOG_16;
else if (config_maxburst >= 8)
@@ -2372,7 +2271,7 @@ static void d40_set_runtime_config(struct dma_chan *chan,
cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
/* Fill in register values */
- if (d40c->log_num != D40_PHY_CHAN)
+ if (chan_is_logical(d40c))
d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
else
d40_phy_cfg(cfg, &d40c->src_def_cfg,
@@ -2393,25 +2292,20 @@ static void d40_set_runtime_config(struct dma_chan *chan,
static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg)
{
- unsigned long flags;
struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
if (d40c->phy_chan == NULL) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Channel is not allocated!\n", __func__);
+ chan_err(d40c, "Channel is not allocated!\n");
return -EINVAL;
}
switch (cmd) {
case DMA_TERMINATE_ALL:
- spin_lock_irqsave(&d40c->lock, flags);
- d40_term_all(d40c);
- spin_unlock_irqrestore(&d40c->lock, flags);
- return 0;
+ return d40_terminate_all(d40c);
case DMA_PAUSE:
- return d40_pause(chan);
+ return d40_pause(d40c);
case DMA_RESUME:
- return d40_resume(chan);
+ return d40_resume(d40c);
case DMA_SLAVE_CONFIG:
d40_set_runtime_config(chan,
(struct dma_slave_config *) arg);
@@ -2456,6 +2350,35 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
}
}
+static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
+{
+ if (dma_has_cap(DMA_SLAVE, dev->cap_mask))
+ dev->device_prep_slave_sg = d40_prep_slave_sg;
+
+ if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
+ dev->device_prep_dma_memcpy = d40_prep_memcpy;
+
+ /*
+ * This controller can only access address at even
+ * 32bit boundaries, i.e. 2^2
+ */
+ dev->copy_align = 2;
+ }
+
+ if (dma_has_cap(DMA_SG, dev->cap_mask))
+ dev->device_prep_dma_sg = d40_prep_memcpy_sg;
+
+ if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
+ dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
+
+ dev->device_alloc_chan_resources = d40_alloc_chan_resources;
+ dev->device_free_chan_resources = d40_free_chan_resources;
+ dev->device_issue_pending = d40_issue_pending;
+ dev->device_tx_status = d40_tx_status;
+ dev->device_control = d40_control;
+ dev->dev = base->dev;
+}
+
static int __init d40_dmaengine_init(struct d40_base *base,
int num_reserved_chans)
{
@@ -2466,23 +2389,14 @@ static int __init d40_dmaengine_init(struct d40_base *base,
dma_cap_zero(base->dma_slave.cap_mask);
dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
+ dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
- base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
- base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
- base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
- base->dma_slave.device_prep_dma_sg = d40_prep_sg;
- base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
- base->dma_slave.device_tx_status = d40_tx_status;
- base->dma_slave.device_issue_pending = d40_issue_pending;
- base->dma_slave.device_control = d40_control;
- base->dma_slave.dev = base->dev;
+ d40_ops_init(base, &base->dma_slave);
err = dma_async_device_register(&base->dma_slave);
if (err) {
- dev_err(base->dev,
- "[%s] Failed to register slave channels\n",
- __func__);
+ d40_err(base->dev, "Failed to register slave channels\n");
goto failure1;
}
@@ -2491,29 +2405,15 @@ static int __init d40_dmaengine_init(struct d40_base *base,
dma_cap_zero(base->dma_memcpy.cap_mask);
dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
- dma_cap_set(DMA_SG, base->dma_slave.cap_mask);
-
- base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
- base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
- base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
- base->dma_slave.device_prep_dma_sg = d40_prep_sg;
- base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
- base->dma_memcpy.device_tx_status = d40_tx_status;
- base->dma_memcpy.device_issue_pending = d40_issue_pending;
- base->dma_memcpy.device_control = d40_control;
- base->dma_memcpy.dev = base->dev;
- /*
- * This controller can only access address at even
- * 32bit boundaries, i.e. 2^2
- */
- base->dma_memcpy.copy_align = 2;
+ dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask);
+
+ d40_ops_init(base, &base->dma_memcpy);
err = dma_async_device_register(&base->dma_memcpy);
if (err) {
- dev_err(base->dev,
- "[%s] Failed to regsiter memcpy only channels\n",
- __func__);
+ d40_err(base->dev,
+ "Failed to regsiter memcpy only channels\n");
goto failure2;
}
@@ -2523,24 +2423,15 @@ static int __init d40_dmaengine_init(struct d40_base *base,
dma_cap_zero(base->dma_both.cap_mask);
dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
- dma_cap_set(DMA_SG, base->dma_slave.cap_mask);
-
- base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
- base->dma_both.device_free_chan_resources = d40_free_chan_resources;
- base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
- base->dma_slave.device_prep_dma_sg = d40_prep_sg;
- base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
- base->dma_both.device_tx_status = d40_tx_status;
- base->dma_both.device_issue_pending = d40_issue_pending;
- base->dma_both.device_control = d40_control;
- base->dma_both.dev = base->dev;
- base->dma_both.copy_align = 2;
+ dma_cap_set(DMA_SG, base->dma_both.cap_mask);
+ dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
+
+ d40_ops_init(base, &base->dma_both);
err = dma_async_device_register(&base->dma_both);
if (err) {
- dev_err(base->dev,
- "[%s] Failed to register logical and physical capable channels\n",
- __func__);
+ d40_err(base->dev,
+ "Failed to register logical and physical capable channels\n");
goto failure3;
}
return 0;
@@ -2616,9 +2507,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
{ .reg = D40_DREG_PERIPHID1, .val = 0x0000},
/*
* D40_DREG_PERIPHID2 Depends on HW revision:
- * MOP500/HREF ED has 0x0008,
+ * DB8500ed has 0x0008,
* ? has 0x0018,
- * HREF V1 has 0x0028
+ * DB8500v1 has 0x0028
+ * DB8500v2 has 0x0038
*/
{ .reg = D40_DREG_PERIPHID3, .val = 0x0000},
@@ -2642,8 +2534,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "[%s] No matching clock found\n",
- __func__);
+ d40_err(&pdev->dev, "No matching clock found\n");
goto failure;
}
@@ -2666,9 +2557,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
if (dma_id_regs[i].val !=
readl(virtbase + dma_id_regs[i].reg)) {
- dev_err(&pdev->dev,
- "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
- __func__,
+ d40_err(&pdev->dev,
+ "Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
dma_id_regs[i].val,
dma_id_regs[i].reg,
readl(virtbase + dma_id_regs[i].reg));
@@ -2681,9 +2571,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) !=
D40_HW_DESIGNER) {
- dev_err(&pdev->dev,
- "[%s] Unknown designer! Got %x wanted %x\n",
- __func__, val & D40_DREG_PERIPHID2_DESIGNER_MASK,
+ d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
+ val & D40_DREG_PERIPHID2_DESIGNER_MASK,
D40_HW_DESIGNER);
goto failure;
}
@@ -2713,7 +2602,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
sizeof(struct d40_chan), GFP_KERNEL);
if (base == NULL) {
- dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
+ d40_err(&pdev->dev, "Out of memory\n");
goto failure;
}
@@ -2860,6 +2749,7 @@ static void __init d40_hw_init(struct d40_base *base)
static int __init d40_lcla_allocate(struct d40_base *base)
{
+ struct d40_lcla_pool *pool = &base->lcla_pool;
unsigned long *page_list;
int i, j;
int ret = 0;
@@ -2885,9 +2775,8 @@ static int __init d40_lcla_allocate(struct d40_base *base)
base->lcla_pool.pages);
if (!page_list[i]) {
- dev_err(base->dev,
- "[%s] Failed to allocate %d pages.\n",
- __func__, base->lcla_pool.pages);
+ d40_err(base->dev, "Failed to allocate %d pages.\n",
+ base->lcla_pool.pages);
for (j = 0; j < i; j++)
free_pages(page_list[j], base->lcla_pool.pages);
@@ -2925,6 +2814,15 @@ static int __init d40_lcla_allocate(struct d40_base *base)
LCLA_ALIGNMENT);
}
+ pool->dma_addr = dma_map_single(base->dev, pool->base,
+ SZ_1K * base->num_phy_chans,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(base->dev, pool->dma_addr)) {
+ pool->dma_addr = 0;
+ ret = -ENOMEM;
+ goto failure;
+ }
+
writel(virt_to_phys(base->lcla_pool.base),
base->virtbase + D40_DREG_LCLA);
failure:
@@ -2957,9 +2855,7 @@ static int __init d40_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
if (!res) {
ret = -ENOENT;
- dev_err(&pdev->dev,
- "[%s] No \"lcpa\" memory resource\n",
- __func__);
+ d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
goto failure;
}
base->lcpa_size = resource_size(res);
@@ -2968,9 +2864,9 @@ static int __init d40_probe(struct platform_device *pdev)
if (request_mem_region(res->start, resource_size(res),
D40_NAME " I/O lcpa") == NULL) {
ret = -EBUSY;
- dev_err(&pdev->dev,
- "[%s] Failed to request LCPA region 0x%x-0x%x\n",
- __func__, res->start, res->end);
+ d40_err(&pdev->dev,
+ "Failed to request LCPA region 0x%x-0x%x\n",
+ res->start, res->end);
goto failure;
}
@@ -2986,16 +2882,13 @@ static int __init d40_probe(struct platform_device *pdev)
base->lcpa_base = ioremap(res->start, resource_size(res));
if (!base->lcpa_base) {
ret = -ENOMEM;
- dev_err(&pdev->dev,
- "[%s] Failed to ioremap LCPA region\n",
- __func__);
+ d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
goto failure;
}
ret = d40_lcla_allocate(base);
if (ret) {
- dev_err(&pdev->dev, "[%s] Failed to allocate LCLA area\n",
- __func__);
+ d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
goto failure;
}
@@ -3004,9 +2897,8 @@ static int __init d40_probe(struct platform_device *pdev)
base->irq = platform_get_irq(pdev, 0);
ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
-
if (ret) {
- dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
+ d40_err(&pdev->dev, "No IRQ defined\n");
goto failure;
}
@@ -3025,6 +2917,12 @@ failure:
kmem_cache_destroy(base->desc_slab);
if (base->virtbase)
iounmap(base->virtbase);
+
+ if (base->lcla_pool.dma_addr)
+ dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
+ SZ_1K * base->num_phy_chans,
+ DMA_TO_DEVICE);
+
if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
free_pages((unsigned long)base->lcla_pool.base,
base->lcla_pool.pages);
@@ -3049,7 +2947,7 @@ failure:
kfree(base);
}
- dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
+ d40_err(&pdev->dev, "probe failed\n");
return ret;
}
@@ -3060,7 +2958,7 @@ static struct platform_driver d40_driver = {
},
};
-int __init stedma40_init(void)
+static int __init stedma40_init(void)
{
return platform_driver_probe(&d40_driver, d40_probe);
}
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
index 0b096a38322d..cad9e1daedff 100644
--- a/drivers/dma/ste_dma40_ll.c
+++ b/drivers/dma/ste_dma40_ll.c
@@ -125,13 +125,15 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
static int d40_phy_fill_lli(struct d40_phy_lli *lli,
dma_addr_t data,
u32 data_size,
- int psize,
dma_addr_t next_lli,
u32 reg_cfg,
- bool term_int,
- u32 data_width,
- bool is_device)
+ struct stedma40_half_channel_info *info,
+ unsigned int flags)
{
+ bool addr_inc = flags & LLI_ADDR_INC;
+ bool term_int = flags & LLI_TERM_INT;
+ unsigned int data_width = info->data_width;
+ int psize = info->psize;
int num_elems;
if (psize == STEDMA40_PSIZE_PHY_1)
@@ -154,7 +156,7 @@ static int d40_phy_fill_lli(struct d40_phy_lli *lli,
* Distance to next element sized entry.
* Usually the size of the element unless you want gaps.
*/
- if (!is_device)
+ if (addr_inc)
lli->reg_elt |= (0x1 << data_width) <<
D40_SREG_ELEM_PHY_EIDX_POS;
@@ -198,47 +200,51 @@ static int d40_seg_size(int size, int data_width1, int data_width2)
return seg_max;
}
-struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli,
- dma_addr_t addr,
- u32 size,
- int psize,
- dma_addr_t lli_phys,
- u32 reg_cfg,
- bool term_int,
- u32 data_width1,
- u32 data_width2,
- bool is_device)
+static struct d40_phy_lli *
+d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size,
+ dma_addr_t lli_phys, dma_addr_t first_phys, u32 reg_cfg,
+ struct stedma40_half_channel_info *info,
+ struct stedma40_half_channel_info *otherinfo,
+ unsigned long flags)
{
+ bool lastlink = flags & LLI_LAST_LINK;
+ bool addr_inc = flags & LLI_ADDR_INC;
+ bool term_int = flags & LLI_TERM_INT;
+ bool cyclic = flags & LLI_CYCLIC;
int err;
dma_addr_t next = lli_phys;
int size_rest = size;
int size_seg = 0;
+ /*
+ * This piece may be split up based on d40_seg_size(); we only want the
+ * term int on the last part.
+ */
+ if (term_int)
+ flags &= ~LLI_TERM_INT;
+
do {
- size_seg = d40_seg_size(size_rest, data_width1, data_width2);
+ size_seg = d40_seg_size(size_rest, info->data_width,
+ otherinfo->data_width);
size_rest -= size_seg;
- if (term_int && size_rest == 0)
- next = 0;
+ if (size_rest == 0 && term_int)
+ flags |= LLI_TERM_INT;
+
+ if (size_rest == 0 && lastlink)
+ next = cyclic ? first_phys : 0;
else
next = ALIGN(next + sizeof(struct d40_phy_lli),
D40_LLI_ALIGN);
- err = d40_phy_fill_lli(lli,
- addr,
- size_seg,
- psize,
- next,
- reg_cfg,
- !next,
- data_width1,
- is_device);
+ err = d40_phy_fill_lli(lli, addr, size_seg, next,
+ reg_cfg, info, flags);
if (err)
goto err;
lli++;
- if (!is_device)
+ if (addr_inc)
addr += size_seg;
} while (size_rest);
@@ -254,39 +260,35 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
struct d40_phy_lli *lli_sg,
dma_addr_t lli_phys,
u32 reg_cfg,
- u32 data_width1,
- u32 data_width2,
- int psize)
+ struct stedma40_half_channel_info *info,
+ struct stedma40_half_channel_info *otherinfo,
+ unsigned long flags)
{
int total_size = 0;
int i;
struct scatterlist *current_sg = sg;
- dma_addr_t dst;
struct d40_phy_lli *lli = lli_sg;
dma_addr_t l_phys = lli_phys;
+ if (!target)
+ flags |= LLI_ADDR_INC;
+
for_each_sg(sg, current_sg, sg_len, i) {
+ dma_addr_t sg_addr = sg_dma_address(current_sg);
+ unsigned int len = sg_dma_len(current_sg);
+ dma_addr_t dst = target ?: sg_addr;
total_size += sg_dma_len(current_sg);
- if (target)
- dst = target;
- else
- dst = sg_phys(current_sg);
+ if (i == sg_len - 1)
+ flags |= LLI_TERM_INT | LLI_LAST_LINK;
l_phys = ALIGN(lli_phys + (lli - lli_sg) *
sizeof(struct d40_phy_lli), D40_LLI_ALIGN);
- lli = d40_phy_buf_to_lli(lli,
- dst,
- sg_dma_len(current_sg),
- psize,
- l_phys,
- reg_cfg,
- sg_len - 1 == i,
- data_width1,
- data_width2,
- target == dst);
+ lli = d40_phy_buf_to_lli(lli, dst, len, l_phys, lli_phys,
+ reg_cfg, info, otherinfo, flags);
+
if (lli == NULL)
return -EINVAL;
}
@@ -295,45 +297,22 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
}
-void d40_phy_lli_write(void __iomem *virtbase,
- u32 phy_chan_num,
- struct d40_phy_lli *lli_dst,
- struct d40_phy_lli *lli_src)
-{
-
- writel(lli_src->reg_cfg, virtbase + D40_DREG_PCBASE +
- phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSCFG);
- writel(lli_src->reg_elt, virtbase + D40_DREG_PCBASE +
- phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
- writel(lli_src->reg_ptr, virtbase + D40_DREG_PCBASE +
- phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSPTR);
- writel(lli_src->reg_lnk, virtbase + D40_DREG_PCBASE +
- phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSLNK);
-
- writel(lli_dst->reg_cfg, virtbase + D40_DREG_PCBASE +
- phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDCFG);
- writel(lli_dst->reg_elt, virtbase + D40_DREG_PCBASE +
- phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
- writel(lli_dst->reg_ptr, virtbase + D40_DREG_PCBASE +
- phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDPTR);
- writel(lli_dst->reg_lnk, virtbase + D40_DREG_PCBASE +
- phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDLNK);
-
-}
-
/* DMA logical lli operations */
static void d40_log_lli_link(struct d40_log_lli *lli_dst,
struct d40_log_lli *lli_src,
- int next)
+ int next, unsigned int flags)
{
+ bool interrupt = flags & LLI_TERM_INT;
u32 slos = 0;
u32 dlos = 0;
if (next != -EINVAL) {
slos = next * 2;
dlos = next * 2 + 1;
- } else {
+ }
+
+ if (interrupt) {
lli_dst->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK;
lli_dst->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK;
}
@@ -348,9 +327,9 @@ static void d40_log_lli_link(struct d40_log_lli *lli_dst,
void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
struct d40_log_lli *lli_dst,
struct d40_log_lli *lli_src,
- int next)
+ int next, unsigned int flags)
{
- d40_log_lli_link(lli_dst, lli_src, next);
+ d40_log_lli_link(lli_dst, lli_src, next, flags);
writel(lli_src->lcsp02, &lcpa[0].lcsp0);
writel(lli_src->lcsp13, &lcpa[0].lcsp1);
@@ -361,9 +340,9 @@ void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
struct d40_log_lli *lli_dst,
struct d40_log_lli *lli_src,
- int next)
+ int next, unsigned int flags)
{
- d40_log_lli_link(lli_dst, lli_src, next);
+ d40_log_lli_link(lli_dst, lli_src, next, flags);
writel(lli_src->lcsp02, &lcla[0].lcsp02);
writel(lli_src->lcsp13, &lcla[0].lcsp13);
@@ -375,8 +354,10 @@ static void d40_log_fill_lli(struct d40_log_lli *lli,
dma_addr_t data, u32 data_size,
u32 reg_cfg,
u32 data_width,
- bool addr_inc)
+ unsigned int flags)
{
+ bool addr_inc = flags & LLI_ADDR_INC;
+
lli->lcsp13 = reg_cfg;
/* The number of elements to transfer */
@@ -395,67 +376,15 @@ static void d40_log_fill_lli(struct d40_log_lli *lli,
}
-int d40_log_sg_to_dev(struct scatterlist *sg,
- int sg_len,
- struct d40_log_lli_bidir *lli,
- struct d40_def_lcsp *lcsp,
- u32 src_data_width,
- u32 dst_data_width,
- enum dma_data_direction direction,
- dma_addr_t dev_addr)
-{
- int total_size = 0;
- struct scatterlist *current_sg = sg;
- int i;
- struct d40_log_lli *lli_src = lli->src;
- struct d40_log_lli *lli_dst = lli->dst;
-
- for_each_sg(sg, current_sg, sg_len, i) {
- total_size += sg_dma_len(current_sg);
-
- if (direction == DMA_TO_DEVICE) {
- lli_src =
- d40_log_buf_to_lli(lli_src,
- sg_phys(current_sg),
- sg_dma_len(current_sg),
- lcsp->lcsp1, src_data_width,
- dst_data_width,
- true);
- lli_dst =
- d40_log_buf_to_lli(lli_dst,
- dev_addr,
- sg_dma_len(current_sg),
- lcsp->lcsp3, dst_data_width,
- src_data_width,
- false);
- } else {
- lli_dst =
- d40_log_buf_to_lli(lli_dst,
- sg_phys(current_sg),
- sg_dma_len(current_sg),
- lcsp->lcsp3, dst_data_width,
- src_data_width,
- true);
- lli_src =
- d40_log_buf_to_lli(lli_src,
- dev_addr,
- sg_dma_len(current_sg),
- lcsp->lcsp1, src_data_width,
- dst_data_width,
- false);
- }
- }
- return total_size;
-}
-
-struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
+static struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
dma_addr_t addr,
int size,
u32 lcsp13, /* src or dst*/
u32 data_width1,
u32 data_width2,
- bool addr_inc)
+ unsigned int flags)
{
+ bool addr_inc = flags & LLI_ADDR_INC;
struct d40_log_lli *lli = lli_sg;
int size_rest = size;
int size_seg = 0;
@@ -468,7 +397,7 @@ struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
addr,
size_seg,
lcsp13, data_width1,
- addr_inc);
+ flags);
if (addr_inc)
addr += size_seg;
lli++;
@@ -479,6 +408,7 @@ struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
int d40_log_sg_to_lli(struct scatterlist *sg,
int sg_len,
+ dma_addr_t dev_addr,
struct d40_log_lli *lli_sg,
u32 lcsp13, /* src or dst*/
u32 data_width1, u32 data_width2)
@@ -487,14 +417,24 @@ int d40_log_sg_to_lli(struct scatterlist *sg,
struct scatterlist *current_sg = sg;
int i;
struct d40_log_lli *lli = lli_sg;
+ unsigned long flags = 0;
+
+ if (!dev_addr)
+ flags |= LLI_ADDR_INC;
for_each_sg(sg, current_sg, sg_len, i) {
+ dma_addr_t sg_addr = sg_dma_address(current_sg);
+ unsigned int len = sg_dma_len(current_sg);
+ dma_addr_t addr = dev_addr ?: sg_addr;
+
total_size += sg_dma_len(current_sg);
- lli = d40_log_buf_to_lli(lli,
- sg_phys(current_sg),
- sg_dma_len(current_sg),
+
+ lli = d40_log_buf_to_lli(lli, addr, len,
lcsp13,
- data_width1, data_width2, true);
+ data_width1,
+ data_width2,
+ flags);
}
+
return total_size;
}
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index 9cc43495bea2..195ee65ee7f3 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -163,6 +163,22 @@
#define D40_DREG_LCEIS1 0x0B4
#define D40_DREG_LCEIS2 0x0B8
#define D40_DREG_LCEIS3 0x0BC
+#define D40_DREG_PSEG1 0x110
+#define D40_DREG_PSEG2 0x114
+#define D40_DREG_PSEG3 0x118
+#define D40_DREG_PSEG4 0x11C
+#define D40_DREG_PCEG1 0x120
+#define D40_DREG_PCEG2 0x124
+#define D40_DREG_PCEG3 0x128
+#define D40_DREG_PCEG4 0x12C
+#define D40_DREG_RSEG1 0x130
+#define D40_DREG_RSEG2 0x134
+#define D40_DREG_RSEG3 0x138
+#define D40_DREG_RSEG4 0x13C
+#define D40_DREG_RCEG1 0x140
+#define D40_DREG_RCEG2 0x144
+#define D40_DREG_RCEG3 0x148
+#define D40_DREG_RCEG4 0x14C
#define D40_DREG_STFU 0xFC8
#define D40_DREG_ICFG 0xFCC
#define D40_DREG_PERIPHID0 0xFE0
@@ -277,6 +293,13 @@ struct d40_def_lcsp {
/* Physical channels */
+enum d40_lli_flags {
+ LLI_ADDR_INC = 1 << 0,
+ LLI_TERM_INT = 1 << 1,
+ LLI_CYCLIC = 1 << 2,
+ LLI_LAST_LINK = 1 << 3,
+};
+
void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
u32 *src_cfg,
u32 *dst_cfg,
@@ -292,46 +315,15 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
struct d40_phy_lli *lli,
dma_addr_t lli_phys,
u32 reg_cfg,
- u32 data_width1,
- u32 data_width2,
- int psize);
-
-struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli,
- dma_addr_t data,
- u32 data_size,
- int psize,
- dma_addr_t next_lli,
- u32 reg_cfg,
- bool term_int,
- u32 data_width1,
- u32 data_width2,
- bool is_device);
-
-void d40_phy_lli_write(void __iomem *virtbase,
- u32 phy_chan_num,
- struct d40_phy_lli *lli_dst,
- struct d40_phy_lli *lli_src);
+ struct stedma40_half_channel_info *info,
+ struct stedma40_half_channel_info *otherinfo,
+ unsigned long flags);
/* Logical channels */
-struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
- dma_addr_t addr,
- int size,
- u32 lcsp13, /* src or dst*/
- u32 data_width1, u32 data_width2,
- bool addr_inc);
-
-int d40_log_sg_to_dev(struct scatterlist *sg,
- int sg_len,
- struct d40_log_lli_bidir *lli,
- struct d40_def_lcsp *lcsp,
- u32 src_data_width,
- u32 dst_data_width,
- enum dma_data_direction direction,
- dma_addr_t dev_addr);
-
int d40_log_sg_to_lli(struct scatterlist *sg,
int sg_len,
+ dma_addr_t dev_addr,
struct d40_log_lli *lli_sg,
u32 lcsp13, /* src or dst*/
u32 data_width1, u32 data_width2);
@@ -339,11 +331,11 @@ int d40_log_sg_to_lli(struct scatterlist *sg,
void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
struct d40_log_lli *lli_dst,
struct d40_log_lli *lli_src,
- int next);
+ int next, unsigned int flags);
void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
struct d40_log_lli *lli_dst,
struct d40_log_lli *lli_src,
- int next);
+ int next, unsigned int flags);
#endif /* STE_DMA40_LLI_H */
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 3b88a4e7c98a..d2c75feff7df 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -27,6 +27,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
#include <linux/slab.h>
#include <linux/timb_dma.h>
@@ -629,7 +630,7 @@ static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
desc_node)
list_move(&td_desc->desc_node, &td_chan->free_list);
- /* now tear down the runnning */
+ /* now tear down the running */
__td_finish(td_chan);
spin_unlock_bh(&td_chan->lock);
@@ -684,7 +685,7 @@ static irqreturn_t td_irq(int irq, void *devid)
static int __devinit td_probe(struct platform_device *pdev)
{
- struct timb_dma_platform_data *pdata = pdev->dev.platform_data;
+ struct timb_dma_platform_data *pdata = mfd_get_data(pdev);
struct timb_dma *td;
struct resource *iomem;
int irq;
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 23e03554f0d3..70e0f84be0d8 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -25,59 +25,12 @@ static struct mem_ctl_info **mcis;
static struct ecc_settings **ecc_stngs;
/*
- * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and
- * later.
- */
-static int ddr2_dbam_revCG[] = {
- [0] = 32,
- [1] = 64,
- [2] = 128,
- [3] = 256,
- [4] = 512,
- [5] = 1024,
- [6] = 2048,
-};
-
-static int ddr2_dbam_revD[] = {
- [0] = 32,
- [1] = 64,
- [2 ... 3] = 128,
- [4] = 256,
- [5] = 512,
- [6] = 256,
- [7] = 512,
- [8 ... 9] = 1024,
- [10] = 2048,
-};
-
-static int ddr2_dbam[] = { [0] = 128,
- [1] = 256,
- [2 ... 4] = 512,
- [5 ... 6] = 1024,
- [7 ... 8] = 2048,
- [9 ... 10] = 4096,
- [11] = 8192,
-};
-
-static int ddr3_dbam[] = { [0] = -1,
- [1] = 256,
- [2] = 512,
- [3 ... 4] = -1,
- [5 ... 6] = 1024,
- [7 ... 8] = 2048,
- [9 ... 10] = 4096,
- [11] = 8192,
-};
-
-/*
* Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
* bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
* or higher value'.
*
*FIXME: Produce a better mapping/linearisation.
*/
-
-
struct scrubrate {
u32 scrubval; /* bit pattern for scrub rate */
u32 bandwidth; /* bandwidth consumed (bytes/sec) */
@@ -107,6 +60,79 @@ struct scrubrate {
{ 0x00, 0UL}, /* scrubbing off */
};
+static int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
+ u32 *val, const char *func)
+{
+ int err = 0;
+
+ err = pci_read_config_dword(pdev, offset, val);
+ if (err)
+ amd64_warn("%s: error reading F%dx%03x.\n",
+ func, PCI_FUNC(pdev->devfn), offset);
+
+ return err;
+}
+
+int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
+ u32 val, const char *func)
+{
+ int err = 0;
+
+ err = pci_write_config_dword(pdev, offset, val);
+ if (err)
+ amd64_warn("%s: error writing to F%dx%03x.\n",
+ func, PCI_FUNC(pdev->devfn), offset);
+
+ return err;
+}
+
+/*
+ *
+ * Depending on the family, F2 DCT reads need special handling:
+ *
+ * K8: has a single DCT only
+ *
+ * F10h: each DCT has its own set of regs
+ * DCT0 -> F2x040..
+ * DCT1 -> F2x140..
+ *
+ * F15h: we select which DCT we access using F1x10C[DctCfgSel]
+ *
+ */
+static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
+ const char *func)
+{
+ if (addr >= 0x100)
+ return -EINVAL;
+
+ return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
+}
+
+static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
+ const char *func)
+{
+ return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
+}
+
+static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
+ const char *func)
+{
+ u32 reg = 0;
+ u8 dct = 0;
+
+ if (addr >= 0x140 && addr <= 0x1a0) {
+ dct = 1;
+ addr -= 0x100;
+ }
+
+ amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
+ reg &= 0xfffffffe;
+ reg |= dct;
+ amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
+
+ return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
+}
+
/*
* Memory scrubber control interface. For K8, memory scrubbing is handled by
* hardware and can involve L2 cache, dcache as well as the main memory. With
@@ -156,7 +182,7 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
scrubval = scrubrates[i].scrubval;
- pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F);
+ pci_write_bits32(ctl, SCRCTRL, scrubval, 0x001F);
if (scrubval)
return scrubrates[i].bandwidth;
@@ -167,8 +193,12 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
{
struct amd64_pvt *pvt = mci->pvt_info;
+ u32 min_scrubrate = 0x5;
- return __amd64_set_scrub_rate(pvt->F3, bw, pvt->min_scrubrate);
+ if (boot_cpu_data.x86 == 0xf)
+ min_scrubrate = 0x0;
+
+ return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate);
}
static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
@@ -177,7 +207,7 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
u32 scrubval = 0;
int i, retval = -EINVAL;
- amd64_read_pci_cfg(pvt->F3, K8_SCRCTRL, &scrubval);
+ amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
scrubval = scrubval & 0x001F;
@@ -192,63 +222,13 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
return retval;
}
-/* Map from a CSROW entry to the mask entry that operates on it */
-static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
-{
- if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F)
- return csrow;
- else
- return csrow >> 1;
-}
-
-/* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */
-static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow)
-{
- if (dct == 0)
- return pvt->dcsb0[csrow];
- else
- return pvt->dcsb1[csrow];
-}
-
-/*
- * Return the 'mask' address the i'th CS entry. This function is needed because
- * there number of DCSM registers on Rev E and prior vs Rev F and later is
- * different.
- */
-static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow)
-{
- if (dct == 0)
- return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)];
- else
- return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)];
-}
-
-
/*
- * In *base and *limit, pass back the full 40-bit base and limit physical
- * addresses for the node given by node_id. This information is obtained from
- * DRAM Base (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers. The
- * base and limit addresses are of type SysAddr, as defined at the start of
- * section 3.4.4 (p. 70). They are the lowest and highest physical addresses
- * in the address range they represent.
+ * returns true if the SysAddr given by sys_addr matches the
+ * DRAM base/limit associated with node_id
*/
-static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id,
- u64 *base, u64 *limit)
+static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, int nid)
{
- *base = pvt->dram_base[node_id];
- *limit = pvt->dram_limit[node_id];
-}
-
-/*
- * Return 1 if the SysAddr given by sys_addr matches the base/limit associated
- * with node_id
- */
-static int amd64_base_limit_match(struct amd64_pvt *pvt,
- u64 sys_addr, int node_id)
-{
- u64 base, limit, addr;
-
- amd64_get_base_and_limit(pvt, node_id, &base, &limit);
+ u64 addr;
/* The K8 treats this as a 40-bit value. However, bits 63-40 will be
* all ones if the most significant implemented address bit is 1.
@@ -258,7 +238,8 @@ static int amd64_base_limit_match(struct amd64_pvt *pvt,
*/
addr = sys_addr & 0x000000ffffffffffull;
- return (addr >= base) && (addr <= limit);
+ return ((addr >= get_dram_base(pvt, nid)) &&
+ (addr <= get_dram_limit(pvt, nid)));
}
/*
@@ -285,10 +266,10 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
* registers. Therefore we arbitrarily choose to read it from the
* register for node 0.
*/
- intlv_en = pvt->dram_IntlvEn[0];
+ intlv_en = dram_intlv_en(pvt, 0);
if (intlv_en == 0) {
- for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) {
+ for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
if (amd64_base_limit_match(pvt, sys_addr, node_id))
goto found;
}
@@ -305,10 +286,10 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
bits = (((u32) sys_addr) >> 12) & intlv_en;
for (node_id = 0; ; ) {
- if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits)
+ if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
break; /* intlv_sel field matches */
- if (++node_id >= DRAM_REG_COUNT)
+ if (++node_id >= DRAM_RANGES)
goto err_no_match;
}
@@ -331,37 +312,50 @@ err_no_match:
}
/*
- * Extract the DRAM CS base address from selected csrow register.
- */
-static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow)
-{
- return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) <<
- pvt->dcs_shift;
-}
-
-/*
- * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way.
+ * compute the CS base address of the @csrow on the DRAM controller @dct.
+ * For details see F2x[5C:40] in the processor's BKDG
*/
-static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow)
+static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
+ u64 *base, u64 *mask)
{
- u64 dcsm_bits, other_bits;
- u64 mask;
+ u64 csbase, csmask, base_bits, mask_bits;
+ u8 addr_shift;
- /* Extract bits from DRAM CS Mask. */
- dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask;
+ if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
+ csbase = pvt->csels[dct].csbases[csrow];
+ csmask = pvt->csels[dct].csmasks[csrow];
+ base_bits = GENMASK(21, 31) | GENMASK(9, 15);
+ mask_bits = GENMASK(21, 29) | GENMASK(9, 15);
+ addr_shift = 4;
+ } else {
+ csbase = pvt->csels[dct].csbases[csrow];
+ csmask = pvt->csels[dct].csmasks[csrow >> 1];
+ addr_shift = 8;
- other_bits = pvt->dcsm_mask;
- other_bits = ~(other_bits << pvt->dcs_shift);
+ if (boot_cpu_data.x86 == 0x15)
+ base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13);
+ else
+ base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13);
+ }
- /*
- * The extracted bits from DCSM belong in the spaces represented by
- * the cleared bits in other_bits.
- */
- mask = (dcsm_bits << pvt->dcs_shift) | other_bits;
+ *base = (csbase & base_bits) << addr_shift;
- return mask;
+ *mask = ~0ULL;
+ /* poke holes for the csmask */
+ *mask &= ~(mask_bits << addr_shift);
+ /* OR them in */
+ *mask |= (csmask & mask_bits) << addr_shift;
}
+#define for_each_chip_select(i, dct, pvt) \
+ for (i = 0; i < pvt->csels[dct].b_cnt; i++)
+
+#define chip_select_base(i, dct, pvt) \
+ pvt->csels[dct].csbases[i]
+
+#define for_each_chip_select_mask(i, dct, pvt) \
+ for (i = 0; i < pvt->csels[dct].m_cnt; i++)
+
/*
* @input_addr is an InputAddr associated with the node given by mci. Return the
* csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
@@ -374,19 +368,13 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
pvt = mci->pvt_info;
- /*
- * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS
- * base/mask register pair, test the condition shown near the start of
- * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
- */
- for (csrow = 0; csrow < pvt->cs_count; csrow++) {
-
- /* This DRAM chip select is disabled on this node */
- if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
+ for_each_chip_select(csrow, 0, pvt) {
+ if (!csrow_enabled(csrow, 0, pvt))
continue;
- base = base_from_dct_base(pvt, csrow);
- mask = ~mask_from_dct_mask(pvt, csrow);
+ get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
+
+ mask = ~mask;
if ((input_addr & mask) == (base & mask)) {
debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
@@ -396,7 +384,6 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
return csrow;
}
}
-
debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
(unsigned long)input_addr, pvt->mc_node_id);
@@ -404,19 +391,6 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
}
/*
- * Return the base value defined by the DRAM Base register for the node
- * represented by mci. This function returns the full 40-bit value despite the
- * fact that the register only stores bits 39-24 of the value. See section
- * 3.4.4.1 (BKDG #26094, K8, revA-E)
- */
-static inline u64 get_dram_base(struct mem_ctl_info *mci)
-{
- struct amd64_pvt *pvt = mci->pvt_info;
-
- return pvt->dram_base[pvt->mc_node_id];
-}
-
-/*
* Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
* for the node represented by mci. Info is passed back in *hole_base,
* *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
@@ -445,14 +419,13 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
return 1;
}
- /* only valid for Fam10h */
- if (boot_cpu_data.x86 == 0x10 &&
- (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) {
+ /* valid for Fam10h and above */
+ if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
debugf1(" Dram Memory Hoisting is DISABLED on this system\n");
return 1;
}
- if ((pvt->dhar & DHAR_VALID) == 0) {
+ if (!dhar_valid(pvt)) {
debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n",
pvt->mc_node_id);
return 1;
@@ -476,15 +449,15 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
* addresses in the hole so that they start at 0x100000000.
*/
- base = dhar_base(pvt->dhar);
+ base = dhar_base(pvt);
*hole_base = base;
*hole_size = (0x1ull << 32) - base;
if (boot_cpu_data.x86 > 0xf)
- *hole_offset = f10_dhar_offset(pvt->dhar);
+ *hole_offset = f10_dhar_offset(pvt);
else
- *hole_offset = k8_dhar_offset(pvt->dhar);
+ *hole_offset = k8_dhar_offset(pvt);
debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
pvt->mc_node_id, (unsigned long)*hole_base,
@@ -525,10 +498,11 @@ EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
*/
static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
{
+ struct amd64_pvt *pvt = mci->pvt_info;
u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
int ret = 0;
- dram_base = get_dram_base(mci);
+ dram_base = get_dram_base(pvt, pvt->mc_node_id);
ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
&hole_size);
@@ -556,7 +530,7 @@ static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
* section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
* Programmer's Manual Volume 1 Application Programming.
*/
- dram_addr = (sys_addr & 0xffffffffffull) - dram_base;
+ dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base;
debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
"DramAddr 0x%lx\n", (unsigned long)sys_addr,
@@ -592,9 +566,9 @@ static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
* See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
* concerning translating a DramAddr to an InputAddr.
*/
- intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
- input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) +
- (dram_addr & 0xfff);
+ intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
+ input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) +
+ (dram_addr & 0xfff);
debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
intlv_shift, (unsigned long)dram_addr,
@@ -644,7 +618,7 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
node_id = pvt->mc_node_id;
BUG_ON((node_id < 0) || (node_id > 7));
- intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
+ intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
if (intlv_shift == 0) {
debugf1(" InputAddr 0x%lx translates to DramAddr of "
@@ -653,10 +627,10 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
return input_addr;
}
- bits = ((input_addr & 0xffffff000ull) << intlv_shift) +
- (input_addr & 0xfff);
+ bits = ((input_addr & GENMASK(12, 35)) << intlv_shift) +
+ (input_addr & 0xfff);
- intlv_sel = pvt->dram_IntlvSel[node_id] & ((1 << intlv_shift) - 1);
+ intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1);
dram_addr = bits + (intlv_sel << 12);
debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
@@ -673,7 +647,7 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
{
struct amd64_pvt *pvt = mci->pvt_info;
- u64 hole_base, hole_offset, hole_size, base, limit, sys_addr;
+ u64 hole_base, hole_offset, hole_size, base, sys_addr;
int ret = 0;
ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
@@ -691,7 +665,7 @@ static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
}
}
- amd64_get_base_and_limit(pvt, pvt->mc_node_id, &base, &limit);
+ base = get_dram_base(pvt, pvt->mc_node_id);
sys_addr = dram_addr + base;
/*
@@ -736,13 +710,12 @@ static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
u64 base, mask;
pvt = mci->pvt_info;
- BUG_ON((csrow < 0) || (csrow >= pvt->cs_count));
+ BUG_ON((csrow < 0) || (csrow >= pvt->csels[0].b_cnt));
- base = base_from_dct_base(pvt, csrow);
- mask = mask_from_dct_mask(pvt, csrow);
+ get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
*input_addr_min = base & ~mask;
- *input_addr_max = base | mask | pvt->dcs_mask_notused;
+ *input_addr_max = base | mask;
}
/* Map the Error address to a PAGE and PAGE OFFSET. */
@@ -775,18 +748,13 @@ static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
-static u16 extract_syndrome(struct err_regs *err)
-{
- return ((err->nbsh >> 15) & 0xff) | ((err->nbsl >> 16) & 0xff00);
-}
-
/*
* Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
* are ECC capable.
*/
static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
{
- int bit;
+ u8 bit;
enum dev_type edac_cap = EDAC_FLAG_NONE;
bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
@@ -813,8 +781,9 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan)
debugf1(" PAR/ERR parity: %s\n",
(dclr & BIT(8)) ? "enabled" : "disabled");
- debugf1(" DCT 128bit mode width: %s\n",
- (dclr & BIT(11)) ? "128b" : "64b");
+ if (boot_cpu_data.x86 == 0x10)
+ debugf1(" DCT 128bit mode width: %s\n",
+ (dclr & BIT(11)) ? "128b" : "64b");
debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
(dclr & BIT(12)) ? "yes" : "no",
@@ -824,16 +793,16 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan)
}
/* Display and decode various NB registers for debug purposes. */
-static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
+static void dump_misc_regs(struct amd64_pvt *pvt)
{
debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
debugf1(" NB two channel DRAM capable: %s\n",
- (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no");
+ (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n",
- (pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no",
- (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no");
+ (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
+ (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
amd64_dump_dramcfg_low(pvt->dclr0, 0);
@@ -841,13 +810,11 @@ static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, "
"offset: 0x%08x\n",
- pvt->dhar,
- dhar_base(pvt->dhar),
- (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt->dhar)
- : f10_dhar_offset(pvt->dhar));
+ pvt->dhar, dhar_base(pvt),
+ (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt)
+ : f10_dhar_offset(pvt));
- debugf1(" DramHoleValid: %s\n",
- (pvt->dhar & DHAR_VALID) ? "yes" : "no");
+ debugf1(" DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
amd64_debug_display_dimm_sizes(0, pvt);
@@ -857,114 +824,70 @@ static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
amd64_debug_display_dimm_sizes(1, pvt);
- amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4"));
+ amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
/* Only if NOT ganged does dclr1 have valid info */
if (!dct_ganging_enabled(pvt))
amd64_dump_dramcfg_low(pvt->dclr1, 1);
}
-/* Read in both of DBAM registers */
-static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
-{
- amd64_read_pci_cfg(pvt->F2, DBAM0, &pvt->dbam0);
-
- if (boot_cpu_data.x86 >= 0x10)
- amd64_read_pci_cfg(pvt->F2, DBAM1, &pvt->dbam1);
-}
-
/*
- * NOTE: CPU Revision Dependent code: Rev E and Rev F
- *
- * Set the DCSB and DCSM mask values depending on the CPU revision value. Also
- * set the shift factor for the DCSB and DCSM values.
- *
- * ->dcs_mask_notused, RevE:
- *
- * To find the max InputAddr for the csrow, start with the base address and set
- * all bits that are "don't care" bits in the test at the start of section
- * 3.5.4 (p. 84).
- *
- * The "don't care" bits are all set bits in the mask and all bits in the gaps
- * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS
- * represents bits [24:20] and [12:0], which are all bits in the above-mentioned
- * gaps.
- *
- * ->dcs_mask_notused, RevF and later:
- *
- * To find the max InputAddr for the csrow, start with the base address and set
- * all bits that are "don't care" bits in the test at the start of NPT section
- * 4.5.4 (p. 87).
- *
- * The "don't care" bits are all set bits in the mask and all bits in the gaps
- * between bit ranges [36:27] and [21:13].
- *
- * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0],
- * which are all bits in the above-mentioned gaps.
+ * see BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
*/
-static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
+static void prep_chip_selects(struct amd64_pvt *pvt)
{
-
if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
- pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
- pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
- pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
- pvt->dcs_shift = REV_E_DCS_SHIFT;
- pvt->cs_count = 8;
- pvt->num_dcsm = 8;
+ pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
+ pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
} else {
- pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS;
- pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS;
- pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS;
- pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT;
- pvt->cs_count = 8;
- pvt->num_dcsm = 4;
+ pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
+ pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
}
}
/*
- * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers
+ * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
*/
-static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
+static void read_dct_base_mask(struct amd64_pvt *pvt)
{
- int cs, reg;
+ int cs;
- amd64_set_dct_base_and_mask(pvt);
+ prep_chip_selects(pvt);
- for (cs = 0; cs < pvt->cs_count; cs++) {
- reg = K8_DCSB0 + (cs * 4);
- if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsb0[cs]))
+ for_each_chip_select(cs, 0, pvt) {
+ u32 reg0 = DCSB0 + (cs * 4);
+ u32 reg1 = DCSB1 + (cs * 4);
+ u32 *base0 = &pvt->csels[0].csbases[cs];
+ u32 *base1 = &pvt->csels[1].csbases[cs];
+
+ if (!amd64_read_dct_pci_cfg(pvt, reg0, base0))
debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
- cs, pvt->dcsb0[cs], reg);
-
- /* If DCT are NOT ganged, then read in DCT1's base */
- if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
- reg = F10_DCSB1 + (cs * 4);
- if (!amd64_read_pci_cfg(pvt->F2, reg,
- &pvt->dcsb1[cs]))
- debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
- cs, pvt->dcsb1[cs], reg);
- } else {
- pvt->dcsb1[cs] = 0;
- }
+ cs, *base0, reg0);
+
+ if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
+ continue;
+
+ if (!amd64_read_dct_pci_cfg(pvt, reg1, base1))
+ debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
+ cs, *base1, reg1);
}
- for (cs = 0; cs < pvt->num_dcsm; cs++) {
- reg = K8_DCSM0 + (cs * 4);
- if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsm0[cs]))
+ for_each_chip_select_mask(cs, 0, pvt) {
+ u32 reg0 = DCSM0 + (cs * 4);
+ u32 reg1 = DCSM1 + (cs * 4);
+ u32 *mask0 = &pvt->csels[0].csmasks[cs];
+ u32 *mask1 = &pvt->csels[1].csmasks[cs];
+
+ if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0))
debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
- cs, pvt->dcsm0[cs], reg);
-
- /* If DCT are NOT ganged, then read in DCT1's mask */
- if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
- reg = F10_DCSM1 + (cs * 4);
- if (!amd64_read_pci_cfg(pvt->F2, reg,
- &pvt->dcsm1[cs]))
- debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
- cs, pvt->dcsm1[cs], reg);
- } else {
- pvt->dcsm1[cs] = 0;
- }
+ cs, *mask0, reg0);
+
+ if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
+ continue;
+
+ if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1))
+ debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
+ cs, *mask1, reg1);
}
}
@@ -972,7 +895,10 @@ static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
{
enum mem_type type;
- if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) {
+ /* F15h supports only DDR3 */
+ if (boot_cpu_data.x86 >= 0x15)
+ type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
+ else if (boot_cpu_data.x86 == 0x10 || pvt->ext_model >= K8_REV_F) {
if (pvt->dchr0 & DDR3_MODE)
type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
else
@@ -986,26 +912,14 @@ static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
return type;
}
-/*
- * Read the DRAM Configuration Low register. It differs between CG, D & E revs
- * and the later RevF memory controllers (DDR vs DDR2)
- *
- * Return:
- * number of memory channels in operation
- * Pass back:
- * contents of the DCL0_LOW register
- */
+/* Get the number of DCT channels the memory controller is using. */
static int k8_early_channel_count(struct amd64_pvt *pvt)
{
- int flag, err = 0;
-
- err = amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0);
- if (err)
- return err;
+ int flag;
if (pvt->ext_model >= K8_REV_F)
/* RevF (NPT) and later */
- flag = pvt->dclr0 & F10_WIDTH_128;
+ flag = pvt->dclr0 & WIDTH_128;
else
/* RevE and earlier */
flag = pvt->dclr0 & REVE_WIDTH_128;
@@ -1016,55 +930,47 @@ static int k8_early_channel_count(struct amd64_pvt *pvt)
return (flag) ? 2 : 1;
}
-/* extract the ERROR ADDRESS for the K8 CPUs */
-static u64 k8_get_error_address(struct mem_ctl_info *mci,
- struct err_regs *info)
+/* On F10h and later ErrAddr is MC4_ADDR[47:1] */
+static u64 get_error_address(struct mce *m)
{
- return (((u64) (info->nbeah & 0xff)) << 32) +
- (info->nbeal & ~0x03);
+ u8 start_bit = 1;
+ u8 end_bit = 47;
+
+ if (boot_cpu_data.x86 == 0xf) {
+ start_bit = 3;
+ end_bit = 39;
+ }
+
+ return m->addr & GENMASK(start_bit, end_bit);
}
-/*
- * Read the Base and Limit registers for K8 based Memory controllers; extract
- * fields from the 'raw' reg into separate data fields
- *
- * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN
- */
-static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
+static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
{
- u32 low;
- u32 off = dram << 3; /* 8 bytes between DRAM entries */
+ u32 off = range << 3;
- amd64_read_pci_cfg(pvt->F1, K8_DRAM_BASE_LOW + off, &low);
+ amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
+ amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
- /* Extract parts into separate data entries */
- pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8;
- pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
- pvt->dram_rw_en[dram] = (low & 0x3);
+ if (boot_cpu_data.x86 == 0xf)
+ return;
- amd64_read_pci_cfg(pvt->F1, K8_DRAM_LIMIT_LOW + off, &low);
+ if (!dram_rw(pvt, range))
+ return;
- /*
- * Extract parts into separate data entries. Limit is the HIGHEST memory
- * location of the region, so lower 24 bits need to be all ones
- */
- pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF;
- pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7;
- pvt->dram_DstNode[dram] = (low & 0x7);
+ amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
+ amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
}
-static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
- struct err_regs *err_info, u64 sys_addr)
+static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
+ u16 syndrome)
{
struct mem_ctl_info *src_mci;
+ struct amd64_pvt *pvt = mci->pvt_info;
int channel, csrow;
u32 page, offset;
- u16 syndrome;
-
- syndrome = extract_syndrome(err_info);
/* CHIPKILL enabled */
- if (err_info->nbcfg & K8_NBCFG_CHIPKILL) {
+ if (pvt->nbcfg & NBCFG_CHIPKILL) {
channel = get_channel_from_ecc_syndrome(mci, syndrome);
if (channel < 0) {
/*
@@ -1113,18 +1019,41 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
}
}
-static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
+static int ddr2_cs_size(unsigned i, bool dct_width)
{
- int *dbam_map;
+ unsigned shift = 0;
- if (pvt->ext_model >= K8_REV_F)
- dbam_map = ddr2_dbam;
- else if (pvt->ext_model >= K8_REV_D)
- dbam_map = ddr2_dbam_revD;
+ if (i <= 2)
+ shift = i;
+ else if (!(i & 0x1))
+ shift = i >> 1;
else
- dbam_map = ddr2_dbam_revCG;
+ shift = (i + 1) >> 1;
- return dbam_map[cs_mode];
+ return 128 << (shift + !!dct_width);
+}
+
+static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
+ unsigned cs_mode)
+{
+ u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
+
+ if (pvt->ext_model >= K8_REV_F) {
+ WARN_ON(cs_mode > 11);
+ return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
+ }
+ else if (pvt->ext_model >= K8_REV_D) {
+ WARN_ON(cs_mode > 10);
+
+ if (cs_mode == 3 || cs_mode == 8)
+ return 32 << (cs_mode - 1);
+ else
+ return 32 << cs_mode;
+ }
+ else {
+ WARN_ON(cs_mode > 6);
+ return 32 << cs_mode;
+ }
}
/*
@@ -1135,17 +1064,13 @@ static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
* Pass back:
* contents of the DCL0_LOW register
*/
-static int f10_early_channel_count(struct amd64_pvt *pvt)
+static int f1x_early_channel_count(struct amd64_pvt *pvt)
{
- int dbams[] = { DBAM0, DBAM1 };
int i, j, channels = 0;
- u32 dbam;
- /* If we are in 128 bit mode, then we are using 2 channels */
- if (pvt->dclr0 & F10_WIDTH_128) {
- channels = 2;
- return channels;
- }
+ /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
+ if (boot_cpu_data.x86 == 0x10 && (pvt->dclr0 & WIDTH_128))
+ return 2;
/*
* Need to check if in unganged mode: In such, there are 2 channels,
@@ -1162,9 +1087,8 @@ static int f10_early_channel_count(struct amd64_pvt *pvt)
* is more than just one DIMM present in unganged mode. Need to check
* both controllers since DIMMs can be placed in either one.
*/
- for (i = 0; i < ARRAY_SIZE(dbams); i++) {
- if (amd64_read_pci_cfg(pvt->F2, dbams[i], &dbam))
- goto err_reg;
+ for (i = 0; i < 2; i++) {
+ u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
for (j = 0; j < 4; j++) {
if (DBAM_DIMM(j, dbam) > 0) {
@@ -1180,216 +1104,192 @@ static int f10_early_channel_count(struct amd64_pvt *pvt)
amd64_info("MCT channel count: %d\n", channels);
return channels;
-
-err_reg:
- return -1;
-
}
-static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
+static int ddr3_cs_size(unsigned i, bool dct_width)
{
- int *dbam_map;
+ unsigned shift = 0;
+ int cs_size = 0;
- if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
- dbam_map = ddr3_dbam;
+ if (i == 0 || i == 3 || i == 4)
+ cs_size = -1;
+ else if (i <= 2)
+ shift = i;
+ else if (i == 12)
+ shift = 7;
+ else if (!(i & 0x1))
+ shift = i >> 1;
else
- dbam_map = ddr2_dbam;
+ shift = (i + 1) >> 1;
+
+ if (cs_size != -1)
+ cs_size = (128 * (1 << !!dct_width)) << shift;
- return dbam_map[cs_mode];
+ return cs_size;
}
-static u64 f10_get_error_address(struct mem_ctl_info *mci,
- struct err_regs *info)
+static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
+ unsigned cs_mode)
{
- return (((u64) (info->nbeah & 0xffff)) << 32) +
- (info->nbeal & ~0x01);
+ u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
+
+ WARN_ON(cs_mode > 11);
+
+ if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
+ return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
+ else
+ return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
}
/*
- * Read the Base and Limit registers for F10 based Memory controllers. Extract
- * fields from the 'raw' reg into separate data fields.
- *
- * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN.
+ * F15h supports only 64bit DCT interfaces
*/
-static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
+static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
+ unsigned cs_mode)
{
- u32 high_offset, low_offset, high_base, low_base, high_limit, low_limit;
-
- low_offset = K8_DRAM_BASE_LOW + (dram << 3);
- high_offset = F10_DRAM_BASE_HIGH + (dram << 3);
-
- /* read the 'raw' DRAM BASE Address register */
- amd64_read_pci_cfg(pvt->F1, low_offset, &low_base);
- amd64_read_pci_cfg(pvt->F1, high_offset, &high_base);
-
- /* Extract parts into separate data entries */
- pvt->dram_rw_en[dram] = (low_base & 0x3);
-
- if (pvt->dram_rw_en[dram] == 0)
- return;
-
- pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7;
-
- pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) |
- (((u64)low_base & 0xFFFF0000) << 8);
-
- low_offset = K8_DRAM_LIMIT_LOW + (dram << 3);
- high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
-
- /* read the 'raw' LIMIT registers */
- amd64_read_pci_cfg(pvt->F1, low_offset, &low_limit);
- amd64_read_pci_cfg(pvt->F1, high_offset, &high_limit);
-
- pvt->dram_DstNode[dram] = (low_limit & 0x7);
- pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7;
+ WARN_ON(cs_mode > 12);
- /*
- * Extract address values and form a LIMIT address. Limit is the HIGHEST
- * memory location of the region, so low 24 bits need to be all ones.
- */
- pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) |
- (((u64) low_limit & 0xFFFF0000) << 8) |
- 0x00FFFFFF;
+ return ddr3_cs_size(cs_mode, false);
}
-static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
+static void read_dram_ctl_register(struct amd64_pvt *pvt)
{
- if (!amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_LOW,
- &pvt->dram_ctl_select_low)) {
- debugf0("F2x110 (DCTL Sel. Low): 0x%08x, "
- "High range addresses at: 0x%x\n",
- pvt->dram_ctl_select_low,
- dct_sel_baseaddr(pvt));
+ if (boot_cpu_data.x86 == 0xf)
+ return;
+
+ if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) {
+ debugf0("F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
+ pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
- debugf0(" DCT mode: %s, All DCTs on: %s\n",
- (dct_ganging_enabled(pvt) ? "ganged" : "unganged"),
- (dct_dram_enabled(pvt) ? "yes" : "no"));
+ debugf0(" DCTs operate in %s mode.\n",
+ (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
if (!dct_ganging_enabled(pvt))
debugf0(" Address range split per DCT: %s\n",
(dct_high_range_enabled(pvt) ? "yes" : "no"));
- debugf0(" DCT data interleave for ECC: %s, "
+ debugf0(" data interleave for ECC: %s, "
"DRAM cleared since last warm reset: %s\n",
(dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
(dct_memory_cleared(pvt) ? "yes" : "no"));
- debugf0(" DCT channel interleave: %s, "
- "DCT interleave bits selector: 0x%x\n",
+ debugf0(" channel interleave: %s, "
+ "interleave bits selector: 0x%x\n",
(dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
dct_sel_interleave_addr(pvt));
}
- amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_HIGH,
- &pvt->dram_ctl_select_high);
+ amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi);
}
/*
- * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory
+ * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
* Interleaving Modes.
*/
-static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
- int hi_range_sel, u32 intlv_en)
+static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
+ bool hi_range_sel, u8 intlv_en)
{
- u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1;
+ u32 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
if (dct_ganging_enabled(pvt))
- cs = 0;
- else if (hi_range_sel)
- cs = dct_sel_high;
- else if (dct_interleave_enabled(pvt)) {
- /*
- * see F2x110[DctSelIntLvAddr] - channel interleave mode
- */
- if (dct_sel_interleave_addr(pvt) == 0)
- cs = sys_addr >> 6 & 1;
- else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) {
- temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
+ return 0;
- if (dct_sel_interleave_addr(pvt) & 1)
- cs = (sys_addr >> 9 & 1) ^ temp;
- else
- cs = (sys_addr >> 6 & 1) ^ temp;
- } else if (intlv_en & 4)
- cs = sys_addr >> 15 & 1;
- else if (intlv_en & 2)
- cs = sys_addr >> 14 & 1;
- else if (intlv_en & 1)
- cs = sys_addr >> 13 & 1;
- else
- cs = sys_addr >> 12 & 1;
- } else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt))
- cs = ~dct_sel_high & 1;
- else
- cs = 0;
+ if (hi_range_sel)
+ return dct_sel_high;
- return cs;
-}
+ /*
+ * see F2x110[DctSelIntLvAddr] - channel interleave mode
+ */
+ if (dct_interleave_enabled(pvt)) {
+ u8 intlv_addr = dct_sel_interleave_addr(pvt);
-static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en)
-{
- if (intlv_en == 1)
- return 1;
- else if (intlv_en == 3)
- return 2;
- else if (intlv_en == 7)
- return 3;
+ /* return DCT select function: 0=DCT0, 1=DCT1 */
+ if (!intlv_addr)
+ return sys_addr >> 6 & 1;
+
+ if (intlv_addr & 0x2) {
+ u8 shift = intlv_addr & 0x1 ? 9 : 6;
+ u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
+
+ return ((sys_addr >> shift) & 1) ^ temp;
+ }
+
+ return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
+ }
+
+ if (dct_high_range_enabled(pvt))
+ return ~dct_sel_high & 1;
return 0;
}
-/* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */
-static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel,
- u32 dct_sel_base_addr,
- u64 dct_sel_base_off,
- u32 hole_valid, u32 hole_off,
- u64 dram_base)
+/* Convert the sys_addr to the normalized DCT address */
+static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, int range,
+ u64 sys_addr, bool hi_rng,
+ u32 dct_sel_base_addr)
{
u64 chan_off;
+ u64 dram_base = get_dram_base(pvt, range);
+ u64 hole_off = f10_dhar_offset(pvt);
+ u32 hole_valid = dhar_valid(pvt);
+ u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
- if (hi_range_sel) {
- if (!(dct_sel_base_addr & 0xFFFF0000) &&
- hole_valid && (sys_addr >= 0x100000000ULL))
- chan_off = hole_off << 16;
+ if (hi_rng) {
+ /*
+ * if
+ * base address of high range is below 4Gb
+ * (bits [47:27] at [31:11])
+ * DRAM address space on this DCT is hoisted above 4Gb &&
+ * sys_addr > 4Gb
+ *
+ * remove hole offset from sys_addr
+ * else
+ * remove high range offset from sys_addr
+ */
+ if ((!(dct_sel_base_addr >> 16) ||
+ dct_sel_base_addr < dhar_base(pvt)) &&
+ hole_valid &&
+ (sys_addr >= BIT_64(32)))
+ chan_off = hole_off;
else
chan_off = dct_sel_base_off;
} else {
- if (hole_valid && (sys_addr >= 0x100000000ULL))
- chan_off = hole_off << 16;
+ /*
+ * if
+ * we have a valid hole &&
+ * sys_addr > 4Gb
+ *
+ * remove hole
+ * else
+ * remove dram base to normalize to DCT address
+ */
+ if (hole_valid && (sys_addr >= BIT_64(32)))
+ chan_off = hole_off;
else
- chan_off = dram_base & 0xFFFFF8000000ULL;
+ chan_off = dram_base;
}
- return (sys_addr & 0x0000FFFFFFFFFFC0ULL) -
- (chan_off & 0x0000FFFFFF800000ULL);
+ return (sys_addr & GENMASK(6,47)) - (chan_off & GENMASK(23,47));
}
-/* Hack for the time being - Can we get this from BIOS?? */
-#define CH0SPARE_RANK 0
-#define CH1SPARE_RANK 1
-
/*
* checks if the csrow passed in is marked as SPARED, if so returns the new
* spare row
*/
-static inline int f10_process_possible_spare(int csrow,
- u32 cs, struct amd64_pvt *pvt)
-{
- u32 swap_done;
- u32 bad_dram_cs;
-
- /* Depending on channel, isolate respective SPARING info */
- if (cs) {
- swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare);
- bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare);
- if (swap_done && (csrow == bad_dram_cs))
- csrow = CH1SPARE_RANK;
- } else {
- swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare);
- bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare);
- if (swap_done && (csrow == bad_dram_cs))
- csrow = CH0SPARE_RANK;
+static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
+{
+ int tmp_cs;
+
+ if (online_spare_swap_done(pvt, dct) &&
+ csrow == online_spare_bad_dramcs(pvt, dct)) {
+
+ for_each_chip_select(tmp_cs, dct, pvt) {
+ if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
+ csrow = tmp_cs;
+ break;
+ }
+ }
}
return csrow;
}
@@ -1402,11 +1302,11 @@ static inline int f10_process_possible_spare(int csrow,
* -EINVAL: NOT FOUND
* 0..csrow = Chip-Select Row
*/
-static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
+static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
{
struct mem_ctl_info *mci;
struct amd64_pvt *pvt;
- u32 cs_base, cs_mask;
+ u64 cs_base, cs_mask;
int cs_found = -EINVAL;
int csrow;
@@ -1416,39 +1316,25 @@ static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
pvt = mci->pvt_info;
- debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs);
+ debugf1("input addr: 0x%llx, DCT: %d\n", in_addr, dct);
- for (csrow = 0; csrow < pvt->cs_count; csrow++) {
-
- cs_base = amd64_get_dct_base(pvt, cs, csrow);
- if (!(cs_base & K8_DCSB_CS_ENABLE))
+ for_each_chip_select(csrow, dct, pvt) {
+ if (!csrow_enabled(csrow, dct, pvt))
continue;
- /*
- * We have an ENABLED CSROW, Isolate just the MASK bits of the
- * target: [28:19] and [13:5], which map to [36:27] and [21:13]
- * of the actual address.
- */
- cs_base &= REV_F_F1Xh_DCSB_BASE_BITS;
-
- /*
- * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and
- * [4:0] to become ON. Then mask off bits [28:0] ([36:8])
- */
- cs_mask = amd64_get_dct_mask(pvt, cs, csrow);
+ get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
- debugf1(" CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n",
- csrow, cs_base, cs_mask);
+ debugf1(" CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
+ csrow, cs_base, cs_mask);
- cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF;
+ cs_mask = ~cs_mask;
- debugf1(" Final CSMask=0x%x\n", cs_mask);
- debugf1(" (InputAddr & ~CSMask)=0x%x "
- "(CSBase & ~CSMask)=0x%x\n",
- (in_addr & ~cs_mask), (cs_base & ~cs_mask));
+ debugf1(" (InputAddr & ~CSMask)=0x%llx "
+ "(CSBase & ~CSMask)=0x%llx\n",
+ (in_addr & cs_mask), (cs_base & cs_mask));
- if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) {
- cs_found = f10_process_possible_spare(csrow, cs, pvt);
+ if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
+ cs_found = f10_process_possible_spare(pvt, dct, csrow);
debugf1(" MATCH csrow=%d\n", cs_found);
break;
@@ -1457,38 +1343,75 @@ static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
return cs_found;
}
-/* For a given @dram_range, check if @sys_addr falls within it. */
-static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
- u64 sys_addr, int *nid, int *chan_sel)
+/*
+ * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
+ * swapped with a region located at the bottom of memory so that the GPU can use
+ * the interleaved region and thus two channels.
+ */
+static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
{
- int node_id, cs_found = -EINVAL, high_range = 0;
- u32 intlv_en, intlv_sel, intlv_shift, hole_off;
- u32 hole_valid, tmp, dct_sel_base, channel;
- u64 dram_base, chan_addr, dct_sel_base_off;
+ u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
- dram_base = pvt->dram_base[dram_range];
- intlv_en = pvt->dram_IntlvEn[dram_range];
+ if (boot_cpu_data.x86 == 0x10) {
+ /* only revC3 and revE have that feature */
+ if (boot_cpu_data.x86_model < 4 ||
+ (boot_cpu_data.x86_model < 0xa &&
+ boot_cpu_data.x86_mask < 3))
+ return sys_addr;
+ }
- node_id = pvt->dram_DstNode[dram_range];
- intlv_sel = pvt->dram_IntlvSel[dram_range];
+ amd64_read_dct_pci_cfg(pvt, SWAP_INTLV_REG, &swap_reg);
- debugf1("(dram=%d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n",
- dram_range, dram_base, sys_addr, pvt->dram_limit[dram_range]);
+ if (!(swap_reg & 0x1))
+ return sys_addr;
- /*
- * This assumes that one node's DHAR is the same as all the other
- * nodes' DHAR.
- */
- hole_off = (pvt->dhar & 0x0000FF80);
- hole_valid = (pvt->dhar & 0x1);
- dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16;
+ swap_base = (swap_reg >> 3) & 0x7f;
+ swap_limit = (swap_reg >> 11) & 0x7f;
+ rgn_size = (swap_reg >> 20) & 0x7f;
+ tmp_addr = sys_addr >> 27;
- debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n",
- hole_off, hole_valid, intlv_sel);
+ if (!(sys_addr >> 34) &&
+ (((tmp_addr >= swap_base) &&
+ (tmp_addr <= swap_limit)) ||
+ (tmp_addr < rgn_size)))
+ return sys_addr ^ (u64)swap_base << 27;
+
+ return sys_addr;
+}
+
+/* For a given @dram_range, check if @sys_addr falls within it. */
+static int f1x_match_to_this_node(struct amd64_pvt *pvt, int range,
+ u64 sys_addr, int *nid, int *chan_sel)
+{
+ int cs_found = -EINVAL;
+ u64 chan_addr;
+ u32 dct_sel_base;
+ u8 channel;
+ bool high_range = false;
+
+ u8 node_id = dram_dst_node(pvt, range);
+ u8 intlv_en = dram_intlv_en(pvt, range);
+ u32 intlv_sel = dram_intlv_sel(pvt, range);
+
+ debugf1("(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
+ range, sys_addr, get_dram_limit(pvt, range));
+
+ if (dhar_valid(pvt) &&
+ dhar_base(pvt) <= sys_addr &&
+ sys_addr < BIT_64(32)) {
+ amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
+ sys_addr);
+ return -EINVAL;
+ }
if (intlv_en &&
- (intlv_sel != ((sys_addr >> 12) & intlv_en)))
+ (intlv_sel != ((sys_addr >> 12) & intlv_en))) {
+ amd64_warn("Botched intlv bits, en: 0x%x, sel: 0x%x\n",
+ intlv_en, intlv_sel);
return -EINVAL;
+ }
+
+ sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
dct_sel_base = dct_sel_baseaddr(pvt);
@@ -1499,38 +1422,41 @@ static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
if (dct_high_range_enabled(pvt) &&
!dct_ganging_enabled(pvt) &&
((sys_addr >> 27) >= (dct_sel_base >> 11)))
- high_range = 1;
-
- channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en);
-
- chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base,
- dct_sel_base_off, hole_valid,
- hole_off, dram_base);
+ high_range = true;
- intlv_shift = f10_map_intlv_en_to_shift(intlv_en);
+ channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
- /* remove Node ID (in case of memory interleaving) */
- tmp = chan_addr & 0xFC0;
+ chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
+ high_range, dct_sel_base);
- chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp;
+ /* Remove node interleaving, see F1x120 */
+ if (intlv_en)
+ chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
+ (chan_addr & 0xfff);
- /* remove channel interleave and hash */
+ /* remove channel interleave */
if (dct_interleave_enabled(pvt) &&
!dct_high_range_enabled(pvt) &&
!dct_ganging_enabled(pvt)) {
- if (dct_sel_interleave_addr(pvt) != 1)
- chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL;
- else {
- tmp = chan_addr & 0xFC0;
- chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1)
- | tmp;
- }
+
+ if (dct_sel_interleave_addr(pvt) != 1) {
+ if (dct_sel_interleave_addr(pvt) == 0x3)
+ /* hash 9 */
+ chan_addr = ((chan_addr >> 10) << 9) |
+ (chan_addr & 0x1ff);
+ else
+ /* A[6] or hash 6 */
+ chan_addr = ((chan_addr >> 7) << 6) |
+ (chan_addr & 0x3f);
+ } else
+ /* A[12] */
+ chan_addr = ((chan_addr >> 13) << 12) |
+ (chan_addr & 0xfff);
}
- debugf1(" (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n",
- chan_addr, (u32)(chan_addr >> 8));
+ debugf1(" Normalized DCT addr: 0x%llx\n", chan_addr);
- cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel);
+ cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
if (cs_found >= 0) {
*nid = node_id;
@@ -1539,23 +1465,20 @@ static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
return cs_found;
}
-static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
+static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
int *node, int *chan_sel)
{
- int dram_range, cs_found = -EINVAL;
- u64 dram_base, dram_limit;
+ int range, cs_found = -EINVAL;
- for (dram_range = 0; dram_range < DRAM_REG_COUNT; dram_range++) {
+ for (range = 0; range < DRAM_RANGES; range++) {
- if (!pvt->dram_rw_en[dram_range])
+ if (!dram_rw(pvt, range))
continue;
- dram_base = pvt->dram_base[dram_range];
- dram_limit = pvt->dram_limit[dram_range];
+ if ((get_dram_base(pvt, range) <= sys_addr) &&
+ (get_dram_limit(pvt, range) >= sys_addr)) {
- if ((dram_base <= sys_addr) && (sys_addr <= dram_limit)) {
-
- cs_found = f10_match_to_this_node(pvt, dram_range,
+ cs_found = f1x_match_to_this_node(pvt, range,
sys_addr, node,
chan_sel);
if (cs_found >= 0)
@@ -1572,16 +1495,14 @@ static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
* The @sys_addr is usually an error address received from the hardware
* (MCX_ADDR).
*/
-static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
- struct err_regs *err_info,
- u64 sys_addr)
+static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
+ u16 syndrome)
{
struct amd64_pvt *pvt = mci->pvt_info;
u32 page, offset;
int nid, csrow, chan = 0;
- u16 syndrome;
- csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
+ csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
if (csrow < 0) {
edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
@@ -1590,14 +1511,12 @@ static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
error_address_to_page_and_offset(sys_addr, &page, &offset);
- syndrome = extract_syndrome(err_info);
-
/*
* We need the syndromes for channel detection only when we're
* ganged. Otherwise @chan should already contain the channel at
* this point.
*/
- if (dct_ganging_enabled(pvt) && (pvt->nbcfg & K8_NBCFG_CHIPKILL))
+ if (dct_ganging_enabled(pvt))
chan = get_channel_from_ecc_syndrome(mci, syndrome);
if (chan >= 0)
@@ -1614,16 +1533,16 @@ static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
/*
* debug routine to display the memory sizes of all logical DIMMs and its
- * CSROWs as well
+ * CSROWs
*/
static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
{
int dimm, size0, size1, factor = 0;
- u32 dbam;
- u32 *dcsb;
+ u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
+ u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
if (boot_cpu_data.x86 == 0xf) {
- if (pvt->dclr0 & F10_WIDTH_128)
+ if (pvt->dclr0 & WIDTH_128)
factor = 1;
/* K8 families < revF not supported yet */
@@ -1634,7 +1553,8 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
}
dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
- dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dcsb1 : pvt->dcsb0;
+ dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases
+ : pvt->csels[0].csbases;
debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam);
@@ -1644,12 +1564,14 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
for (dimm = 0; dimm < 4; dimm++) {
size0 = 0;
- if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE)
- size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
+ if (dcsb[dimm*2] & DCSB_CS_ENABLE)
+ size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
+ DBAM_DIMM(dimm, dbam));
size1 = 0;
- if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE)
- size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
+ if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
+ size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
+ DBAM_DIMM(dimm, dbam));
amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
dimm * 2, size0 << factor,
@@ -1664,10 +1586,9 @@ static struct amd64_family_type amd64_family_types[] = {
.f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
.ops = {
.early_channel_count = k8_early_channel_count,
- .get_error_address = k8_get_error_address,
- .read_dram_base_limit = k8_read_dram_base_limit,
.map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
.dbam_to_cs = k8_dbam_to_chip_select,
+ .read_dct_pci_cfg = k8_read_dct_pci_cfg,
}
},
[F10_CPUS] = {
@@ -1675,12 +1596,21 @@ static struct amd64_family_type amd64_family_types[] = {
.f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
.f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
.ops = {
- .early_channel_count = f10_early_channel_count,
- .get_error_address = f10_get_error_address,
- .read_dram_base_limit = f10_read_dram_base_limit,
- .read_dram_ctl_register = f10_read_dram_ctl_register,
- .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
+ .early_channel_count = f1x_early_channel_count,
+ .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f10_dbam_to_chip_select,
+ .read_dct_pci_cfg = f10_read_dct_pci_cfg,
+ }
+ },
+ [F15_CPUS] = {
+ .ctl_name = "F15h",
+ .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
+ .f3_id = PCI_DEVICE_ID_AMD_15H_NB_F3,
+ .ops = {
+ .early_channel_count = f1x_early_channel_count,
+ .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
+ .dbam_to_cs = f15_dbam_to_chip_select,
+ .read_dct_pci_cfg = f15_read_dct_pci_cfg,
}
},
};
@@ -1850,51 +1780,50 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
struct amd64_pvt *pvt = mci->pvt_info;
int err_sym = -1;
- if (pvt->syn_type == 8)
+ if (pvt->ecc_sym_sz == 8)
err_sym = decode_syndrome(syndrome, x8_vectors,
ARRAY_SIZE(x8_vectors),
- pvt->syn_type);
- else if (pvt->syn_type == 4)
+ pvt->ecc_sym_sz);
+ else if (pvt->ecc_sym_sz == 4)
err_sym = decode_syndrome(syndrome, x4_vectors,
ARRAY_SIZE(x4_vectors),
- pvt->syn_type);
+ pvt->ecc_sym_sz);
else {
- amd64_warn("Illegal syndrome type: %u\n", pvt->syn_type);
+ amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
return err_sym;
}
- return map_err_sym_to_channel(err_sym, pvt->syn_type);
+ return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
}
/*
* Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
* ADDRESS and process.
*/
-static void amd64_handle_ce(struct mem_ctl_info *mci,
- struct err_regs *info)
+static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m)
{
struct amd64_pvt *pvt = mci->pvt_info;
u64 sys_addr;
+ u16 syndrome;
/* Ensure that the Error Address is VALID */
- if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) {
+ if (!(m->status & MCI_STATUS_ADDRV)) {
amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
return;
}
- sys_addr = pvt->ops->get_error_address(mci, info);
+ sys_addr = get_error_address(m);
+ syndrome = extract_syndrome(m->status);
amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
- pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr);
+ pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, syndrome);
}
/* Handle any Un-correctable Errors (UEs) */
-static void amd64_handle_ue(struct mem_ctl_info *mci,
- struct err_regs *info)
+static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
{
- struct amd64_pvt *pvt = mci->pvt_info;
struct mem_ctl_info *log_mci, *src_mci = NULL;
int csrow;
u64 sys_addr;
@@ -1902,13 +1831,13 @@ static void amd64_handle_ue(struct mem_ctl_info *mci,
log_mci = mci;
- if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) {
+ if (!(m->status & MCI_STATUS_ADDRV)) {
amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
return;
}
- sys_addr = pvt->ops->get_error_address(mci, info);
+ sys_addr = get_error_address(m);
/*
* Find out which node the error address belongs to. This may be
@@ -1936,14 +1865,14 @@ static void amd64_handle_ue(struct mem_ctl_info *mci,
}
static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
- struct err_regs *info)
+ struct mce *m)
{
- u16 ec = EC(info->nbsl);
- u8 xec = XEC(info->nbsl, 0x1f);
- int ecc_type = (info->nbsh >> 13) & 0x3;
+ u16 ec = EC(m->status);
+ u8 xec = XEC(m->status, 0x1f);
+ u8 ecc_type = (m->status >> 45) & 0x3;
/* Bail early out if this was an 'observed' error */
- if (PP(ec) == K8_NBSL_PP_OBS)
+ if (PP(ec) == NBSL_PP_OBS)
return;
/* Do only ECC errors */
@@ -1951,34 +1880,16 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
return;
if (ecc_type == 2)
- amd64_handle_ce(mci, info);
+ amd64_handle_ce(mci, m);
else if (ecc_type == 1)
- amd64_handle_ue(mci, info);
+ amd64_handle_ue(mci, m);
}
void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg)
{
struct mem_ctl_info *mci = mcis[node_id];
- struct err_regs regs;
-
- regs.nbsl = (u32) m->status;
- regs.nbsh = (u32)(m->status >> 32);
- regs.nbeal = (u32) m->addr;
- regs.nbeah = (u32)(m->addr >> 32);
- regs.nbcfg = nbcfg;
-
- __amd64_decode_bus_error(mci, &regs);
-
- /*
- * Check the UE bit of the NB status high register, if set generate some
- * logs. If NOT a GART error, then process the event as a NO-INFO event.
- * If it was a GART error, skip that process.
- *
- * FIXME: this should go somewhere else, if at all.
- */
- if (regs.nbsh & K8_NBSH_UC_ERR && !report_gart_errors)
- edac_mc_handle_ue_no_info(mci, "UE bit is set");
+ __amd64_decode_bus_error(mci, m);
}
/*
@@ -2027,9 +1938,10 @@ static void free_mc_sibling_devs(struct amd64_pvt *pvt)
*/
static void read_mc_regs(struct amd64_pvt *pvt)
{
+ struct cpuinfo_x86 *c = &boot_cpu_data;
u64 msr_val;
u32 tmp;
- int dram;
+ int range;
/*
* Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
@@ -2046,75 +1958,66 @@ static void read_mc_regs(struct amd64_pvt *pvt)
} else
debugf0(" TOP_MEM2 disabled.\n");
- amd64_read_pci_cfg(pvt->F3, K8_NBCAP, &pvt->nbcap);
+ amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
- if (pvt->ops->read_dram_ctl_register)
- pvt->ops->read_dram_ctl_register(pvt);
+ read_dram_ctl_register(pvt);
- for (dram = 0; dram < DRAM_REG_COUNT; dram++) {
- /*
- * Call CPU specific READ function to get the DRAM Base and
- * Limit values from the DCT.
- */
- pvt->ops->read_dram_base_limit(pvt, dram);
+ for (range = 0; range < DRAM_RANGES; range++) {
+ u8 rw;
- /*
- * Only print out debug info on rows with both R and W Enabled.
- * Normal processing, compiler should optimize this whole 'if'
- * debug output block away.
- */
- if (pvt->dram_rw_en[dram] != 0) {
- debugf1(" DRAM-BASE[%d]: 0x%016llx "
- "DRAM-LIMIT: 0x%016llx\n",
- dram,
- pvt->dram_base[dram],
- pvt->dram_limit[dram]);
-
- debugf1(" IntlvEn=%s %s %s "
- "IntlvSel=%d DstNode=%d\n",
- pvt->dram_IntlvEn[dram] ?
- "Enabled" : "Disabled",
- (pvt->dram_rw_en[dram] & 0x2) ? "W" : "!W",
- (pvt->dram_rw_en[dram] & 0x1) ? "R" : "!R",
- pvt->dram_IntlvSel[dram],
- pvt->dram_DstNode[dram]);
- }
+ /* read settings for this DRAM range */
+ read_dram_base_limit_regs(pvt, range);
+
+ rw = dram_rw(pvt, range);
+ if (!rw)
+ continue;
+
+ debugf1(" DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
+ range,
+ get_dram_base(pvt, range),
+ get_dram_limit(pvt, range));
+
+ debugf1(" IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
+ dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
+ (rw & 0x1) ? "R" : "-",
+ (rw & 0x2) ? "W" : "-",
+ dram_intlv_sel(pvt, range),
+ dram_dst_node(pvt, range));
}
- amd64_read_dct_base_mask(pvt);
+ read_dct_base_mask(pvt);
- amd64_read_pci_cfg(pvt->F1, K8_DHAR, &pvt->dhar);
- amd64_read_dbam_reg(pvt);
+ amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
+ amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0);
amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
- amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0);
- amd64_read_pci_cfg(pvt->F2, F10_DCHR_0, &pvt->dchr0);
+ amd64_read_dct_pci_cfg(pvt, DCLR0, &pvt->dclr0);
+ amd64_read_dct_pci_cfg(pvt, DCHR0, &pvt->dchr0);
- if (boot_cpu_data.x86 >= 0x10) {
- if (!dct_ganging_enabled(pvt)) {
- amd64_read_pci_cfg(pvt->F2, F10_DCLR_1, &pvt->dclr1);
- amd64_read_pci_cfg(pvt->F2, F10_DCHR_1, &pvt->dchr1);
- }
- amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
+ if (!dct_ganging_enabled(pvt)) {
+ amd64_read_dct_pci_cfg(pvt, DCLR1, &pvt->dclr1);
+ amd64_read_dct_pci_cfg(pvt, DCHR1, &pvt->dchr1);
}
- if (boot_cpu_data.x86 == 0x10 &&
- boot_cpu_data.x86_model > 7 &&
- /* F3x180[EccSymbolSize]=1 => x8 symbols */
- tmp & BIT(25))
- pvt->syn_type = 8;
- else
- pvt->syn_type = 4;
+ pvt->ecc_sym_sz = 4;
- amd64_dump_misc_regs(pvt);
+ if (c->x86 >= 0x10) {
+ amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
+ amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
+
+ /* F10h, revD and later can do x8 ECC too */
+ if ((c->x86 > 0x10 || c->x86_model > 7) && tmp & BIT(25))
+ pvt->ecc_sym_sz = 8;
+ }
+ dump_misc_regs(pvt);
}
/*
* NOTE: CPU Revision Dependent code
*
* Input:
- * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1)
+ * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
* k8 private pointer to -->
* DRAM Bank Address mapping register
* node_id
@@ -2144,7 +2047,7 @@ static void read_mc_regs(struct amd64_pvt *pvt)
* encompasses
*
*/
-static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
+static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
{
u32 cs_mode, nr_pages;
@@ -2157,7 +2060,7 @@ static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
*/
cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
- nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT);
+ nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
/*
* If dual channel then double the memory size of single channel.
@@ -2180,23 +2083,22 @@ static int init_csrows(struct mem_ctl_info *mci)
{
struct csrow_info *csrow;
struct amd64_pvt *pvt = mci->pvt_info;
- u64 input_addr_min, input_addr_max, sys_addr;
+ u64 input_addr_min, input_addr_max, sys_addr, base, mask;
u32 val;
int i, empty = 1;
- amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &val);
+ amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
pvt->nbcfg = val;
- pvt->ctl_error_info.nbcfg = val;
debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
pvt->mc_node_id, val,
- !!(val & K8_NBCFG_CHIPKILL), !!(val & K8_NBCFG_ECC_ENABLE));
+ !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
- for (i = 0; i < pvt->cs_count; i++) {
+ for_each_chip_select(i, 0, pvt) {
csrow = &mci->csrows[i];
- if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) {
+ if (!csrow_enabled(i, 0, pvt)) {
debugf1("----CSROW %d EMPTY for node %d\n", i,
pvt->mc_node_id);
continue;
@@ -2206,13 +2108,15 @@ static int init_csrows(struct mem_ctl_info *mci)
i, pvt->mc_node_id);
empty = 0;
- csrow->nr_pages = amd64_csrow_nr_pages(i, pvt);
+ csrow->nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
- csrow->page_mask = ~mask_from_dct_mask(pvt, i);
+
+ get_cs_base_and_mask(pvt, i, 0, &base, &mask);
+ csrow->page_mask = ~mask;
/* 8 bytes of resolution */
csrow->mtype = amd64_determine_memory_type(pvt, i);
@@ -2231,9 +2135,9 @@ static int init_csrows(struct mem_ctl_info *mci)
/*
* determine whether CHIPKILL or JUST ECC or NO ECC is operating
*/
- if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE)
+ if (pvt->nbcfg & NBCFG_ECC_ENABLE)
csrow->edac_mode =
- (pvt->nbcfg & K8_NBCFG_CHIPKILL) ?
+ (pvt->nbcfg & NBCFG_CHIPKILL) ?
EDAC_S4ECD4ED : EDAC_SECDED;
else
csrow->edac_mode = EDAC_NONE;
@@ -2270,7 +2174,7 @@ static bool amd64_nb_mce_bank_enabled_on_node(int nid)
for_each_cpu(cpu, mask) {
struct msr *reg = per_cpu_ptr(msrs, cpu);
- nbe = reg->l & K8_MSR_MCGCTL_NBE;
+ nbe = reg->l & MSR_MCGCTL_NBE;
debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
cpu, reg->q,
@@ -2305,16 +2209,16 @@ static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
struct msr *reg = per_cpu_ptr(msrs, cpu);
if (on) {
- if (reg->l & K8_MSR_MCGCTL_NBE)
+ if (reg->l & MSR_MCGCTL_NBE)
s->flags.nb_mce_enable = 1;
- reg->l |= K8_MSR_MCGCTL_NBE;
+ reg->l |= MSR_MCGCTL_NBE;
} else {
/*
* Turn off NB MCE reporting only when it was off before
*/
if (!s->flags.nb_mce_enable)
- reg->l &= ~K8_MSR_MCGCTL_NBE;
+ reg->l &= ~MSR_MCGCTL_NBE;
}
}
wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
@@ -2328,40 +2232,38 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
struct pci_dev *F3)
{
bool ret = true;
- u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
+ u32 value, mask = 0x3; /* UECC/CECC enable */
if (toggle_ecc_err_reporting(s, nid, ON)) {
amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
return false;
}
- amd64_read_pci_cfg(F3, K8_NBCTL, &value);
+ amd64_read_pci_cfg(F3, NBCTL, &value);
- /* turn on UECCEn and CECCEn bits */
s->old_nbctl = value & mask;
s->nbctl_valid = true;
value |= mask;
- pci_write_config_dword(F3, K8_NBCTL, value);
+ amd64_write_pci_cfg(F3, NBCTL, value);
- amd64_read_pci_cfg(F3, K8_NBCFG, &value);
+ amd64_read_pci_cfg(F3, NBCFG, &value);
- debugf0("1: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
- nid, value,
- !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE));
+ debugf0("1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
+ nid, value, !!(value & NBCFG_ECC_ENABLE));
- if (!(value & K8_NBCFG_ECC_ENABLE)) {
+ if (!(value & NBCFG_ECC_ENABLE)) {
amd64_warn("DRAM ECC disabled on this node, enabling...\n");
s->flags.nb_ecc_prev = 0;
/* Attempt to turn on DRAM ECC Enable */
- value |= K8_NBCFG_ECC_ENABLE;
- pci_write_config_dword(F3, K8_NBCFG, value);
+ value |= NBCFG_ECC_ENABLE;
+ amd64_write_pci_cfg(F3, NBCFG, value);
- amd64_read_pci_cfg(F3, K8_NBCFG, &value);
+ amd64_read_pci_cfg(F3, NBCFG, &value);
- if (!(value & K8_NBCFG_ECC_ENABLE)) {
+ if (!(value & NBCFG_ECC_ENABLE)) {
amd64_warn("Hardware rejected DRAM ECC enable,"
"check memory DIMM configuration.\n");
ret = false;
@@ -2372,9 +2274,8 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
s->flags.nb_ecc_prev = 1;
}
- debugf0("2: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
- nid, value,
- !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE));
+ debugf0("2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
+ nid, value, !!(value & NBCFG_ECC_ENABLE));
return ret;
}
@@ -2382,22 +2283,23 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid,
struct pci_dev *F3)
{
- u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
+ u32 value, mask = 0x3; /* UECC/CECC enable */
+
if (!s->nbctl_valid)
return;
- amd64_read_pci_cfg(F3, K8_NBCTL, &value);
+ amd64_read_pci_cfg(F3, NBCTL, &value);
value &= ~mask;
value |= s->old_nbctl;
- pci_write_config_dword(F3, K8_NBCTL, value);
+ amd64_write_pci_cfg(F3, NBCTL, value);
/* restore previous BIOS DRAM ECC "off" setting we force-enabled */
if (!s->flags.nb_ecc_prev) {
- amd64_read_pci_cfg(F3, K8_NBCFG, &value);
- value &= ~K8_NBCFG_ECC_ENABLE;
- pci_write_config_dword(F3, K8_NBCFG, value);
+ amd64_read_pci_cfg(F3, NBCFG, &value);
+ value &= ~NBCFG_ECC_ENABLE;
+ amd64_write_pci_cfg(F3, NBCFG, value);
}
/* restore the NB Enable MCGCTL bit */
@@ -2423,9 +2325,9 @@ static bool ecc_enabled(struct pci_dev *F3, u8 nid)
u8 ecc_en = 0;
bool nb_mce_en = false;
- amd64_read_pci_cfg(F3, K8_NBCFG, &value);
+ amd64_read_pci_cfg(F3, NBCFG, &value);
- ecc_en = !!(value & K8_NBCFG_ECC_ENABLE);
+ ecc_en = !!(value & NBCFG_ECC_ENABLE);
amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid);
@@ -2463,23 +2365,24 @@ static void set_mc_sysfs_attrs(struct mem_ctl_info *mci)
mci->mc_driver_sysfs_attributes = sysfs_attrs;
}
-static void setup_mci_misc_attrs(struct mem_ctl_info *mci)
+static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
+ struct amd64_family_type *fam)
{
struct amd64_pvt *pvt = mci->pvt_info;
mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
- if (pvt->nbcap & K8_NBCAP_SECDED)
+ if (pvt->nbcap & NBCAP_SECDED)
mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
- if (pvt->nbcap & K8_NBCAP_CHIPKILL)
+ if (pvt->nbcap & NBCAP_CHIPKILL)
mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
mci->edac_cap = amd64_determine_edac_cap(pvt);
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = EDAC_AMD64_VERSION;
- mci->ctl_name = pvt->ctl_name;
+ mci->ctl_name = fam->ctl_name;
mci->dev_name = pci_name(pvt->F2);
mci->ctl_page_to_phys = NULL;
@@ -2500,14 +2403,16 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
case 0xf:
fam_type = &amd64_family_types[K8_CPUS];
pvt->ops = &amd64_family_types[K8_CPUS].ops;
- pvt->ctl_name = fam_type->ctl_name;
- pvt->min_scrubrate = K8_MIN_SCRUB_RATE_BITS;
break;
+
case 0x10:
fam_type = &amd64_family_types[F10_CPUS];
pvt->ops = &amd64_family_types[F10_CPUS].ops;
- pvt->ctl_name = fam_type->ctl_name;
- pvt->min_scrubrate = F10_MIN_SCRUB_RATE_BITS;
+ break;
+
+ case 0x15:
+ fam_type = &amd64_family_types[F15_CPUS];
+ pvt->ops = &amd64_family_types[F15_CPUS].ops;
break;
default:
@@ -2517,7 +2422,7 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
pvt->ext_model = boot_cpu_data.x86_model >> 4;
- amd64_info("%s %sdetected (node %d).\n", pvt->ctl_name,
+ amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
(fam == 0xf ?
(pvt->ext_model >= K8_REV_F ? "revF or later "
: "revE or earlier ")
@@ -2564,14 +2469,14 @@ static int amd64_init_one_instance(struct pci_dev *F2)
goto err_siblings;
ret = -ENOMEM;
- mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, nid);
+ mci = edac_mc_alloc(0, pvt->csels[0].b_cnt, pvt->channel_count, nid);
if (!mci)
goto err_siblings;
mci->pvt_info = pvt;
mci->dev = &pvt->F2->dev;
- setup_mci_misc_attrs(mci);
+ setup_mci_misc_attrs(mci, fam_type);
if (init_csrows(mci))
mci->edac_cap = EDAC_FLAG_NONE;
@@ -2714,6 +2619,15 @@ static const struct pci_device_id amd64_pci_table[] __devinitdata = {
.class = 0,
.class_mask = 0,
},
+ {
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_DEVICE_ID_AMD_15H_NB_F2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = 0,
+ .class_mask = 0,
+ },
+
{0, }
};
MODULE_DEVICE_TABLE(pci, amd64_pci_table);
@@ -2754,7 +2668,7 @@ static int __init amd64_edac_init(void)
{
int err = -ENODEV;
- edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
+ printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
opstate_init();
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 613ec72b0f65..4ece4f4ea581 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -144,7 +144,7 @@
* sections 3.5.4 and 3.5.5 for more information.
*/
-#define EDAC_AMD64_VERSION "v3.3.0"
+#define EDAC_AMD64_VERSION "3.4.0"
#define EDAC_MOD_STR "amd64_edac"
/* Extended Model from CPUID, for CPU Revision numbers */
@@ -153,85 +153,64 @@
#define K8_REV_F 4
/* Hardware limit on ChipSelect rows per MC and processors per system */
-#define MAX_CS_COUNT 8
-#define DRAM_REG_COUNT 8
+#define NUM_CHIPSELECTS 8
+#define DRAM_RANGES 8
#define ON true
#define OFF false
/*
+ * Create a contiguous bitmask starting at bit position @lo and ending at
+ * position @hi. For example
+ *
+ * GENMASK(21, 39) gives us the 64bit vector 0x000000ffffe00000.
+ */
+#define GENMASK(lo, hi) (((1ULL << ((hi) - (lo) + 1)) - 1) << (lo))
+
+/*
* PCI-defined configuration space registers
*/
+#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601
+#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602
/*
* Function 1 - Address Map
*/
-#define K8_DRAM_BASE_LOW 0x40
-#define K8_DRAM_LIMIT_LOW 0x44
-#define K8_DHAR 0xf0
-
-#define DHAR_VALID BIT(0)
-#define F10_DRAM_MEM_HOIST_VALID BIT(1)
+#define DRAM_BASE_LO 0x40
+#define DRAM_LIMIT_LO 0x44
-#define DHAR_BASE_MASK 0xff000000
-#define dhar_base(dhar) (dhar & DHAR_BASE_MASK)
+#define dram_intlv_en(pvt, i) ((pvt->ranges[i].base.lo >> 8) & 0x7)
+#define dram_rw(pvt, i) (pvt->ranges[i].base.lo & 0x3)
+#define dram_intlv_sel(pvt, i) ((pvt->ranges[i].lim.lo >> 8) & 0x7)
+#define dram_dst_node(pvt, i) (pvt->ranges[i].lim.lo & 0x7)
-#define K8_DHAR_OFFSET_MASK 0x0000ff00
-#define k8_dhar_offset(dhar) ((dhar & K8_DHAR_OFFSET_MASK) << 16)
+#define DHAR 0xf0
+#define dhar_valid(pvt) ((pvt)->dhar & BIT(0))
+#define dhar_mem_hoist_valid(pvt) ((pvt)->dhar & BIT(1))
+#define dhar_base(pvt) ((pvt)->dhar & 0xff000000)
+#define k8_dhar_offset(pvt) (((pvt)->dhar & 0x0000ff00) << 16)
-#define F10_DHAR_OFFSET_MASK 0x0000ff80
/* NOTE: Extra mask bit vs K8 */
-#define f10_dhar_offset(dhar) ((dhar & F10_DHAR_OFFSET_MASK) << 16)
+#define f10_dhar_offset(pvt) (((pvt)->dhar & 0x0000ff80) << 16)
+#define DCT_CFG_SEL 0x10C
-/* F10 High BASE/LIMIT registers */
-#define F10_DRAM_BASE_HIGH 0x140
-#define F10_DRAM_LIMIT_HIGH 0x144
+#define DRAM_BASE_HI 0x140
+#define DRAM_LIMIT_HI 0x144
/*
* Function 2 - DRAM controller
*/
-#define K8_DCSB0 0x40
-#define F10_DCSB1 0x140
+#define DCSB0 0x40
+#define DCSB1 0x140
+#define DCSB_CS_ENABLE BIT(0)
-#define K8_DCSB_CS_ENABLE BIT(0)
-#define K8_DCSB_NPT_SPARE BIT(1)
-#define K8_DCSB_NPT_TESTFAIL BIT(2)
+#define DCSM0 0x60
+#define DCSM1 0x160
-/*
- * REV E: select [31:21] and [15:9] from DCSB and the shift amount to form
- * the address
- */
-#define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL)
-#define REV_E_DCS_SHIFT 4
-
-#define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL)
-#define REV_F_F1Xh_DCS_SHIFT 8
-
-/*
- * REV F and later: selects [28:19] and [13:5] from DCSB and the shift amount
- * to form the address
- */
-#define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL)
-#define REV_F_DCS_SHIFT 8
-
-/* DRAM CS Mask Registers */
-#define K8_DCSM0 0x60
-#define F10_DCSM1 0x160
-
-/* REV E: select [29:21] and [15:9] from DCSM */
-#define REV_E_DCSM_MASK_BITS 0x3FE0FE00
-
-/* unused bits [24:20] and [12:0] */
-#define REV_E_DCS_NOTUSED_BITS 0x01F01FFF
-
-/* REV F and later: select [28:19] and [13:5] from DCSM */
-#define REV_F_F1Xh_DCSM_MASK_BITS 0x1FF83FE0
-
-/* unused bits [26:22] and [12:0] */
-#define REV_F_F1Xh_DCS_NOTUSED_BITS 0x07C01FFF
+#define csrow_enabled(i, dct, pvt) ((pvt)->csels[(dct)].csbases[(i)] & DCSB_CS_ENABLE)
#define DBAM0 0x80
#define DBAM1 0x180
@@ -241,138 +220,72 @@
#define DBAM_MAX_VALUE 11
-
-#define F10_DCLR_0 0x90
-#define F10_DCLR_1 0x190
+#define DCLR0 0x90
+#define DCLR1 0x190
#define REVE_WIDTH_128 BIT(16)
-#define F10_WIDTH_128 BIT(11)
+#define WIDTH_128 BIT(11)
+#define DCHR0 0x94
+#define DCHR1 0x194
+#define DDR3_MODE BIT(8)
-#define F10_DCHR_0 0x94
-#define F10_DCHR_1 0x194
+#define DCT_SEL_LO 0x110
+#define dct_sel_baseaddr(pvt) ((pvt)->dct_sel_lo & 0xFFFFF800)
+#define dct_sel_interleave_addr(pvt) (((pvt)->dct_sel_lo >> 6) & 0x3)
+#define dct_high_range_enabled(pvt) ((pvt)->dct_sel_lo & BIT(0))
+#define dct_interleave_enabled(pvt) ((pvt)->dct_sel_lo & BIT(2))
-#define F10_DCHR_FOUR_RANK_DIMM BIT(18)
-#define DDR3_MODE BIT(8)
-#define F10_DCHR_MblMode BIT(6)
+#define dct_ganging_enabled(pvt) ((boot_cpu_data.x86 == 0x10) && ((pvt)->dct_sel_lo & BIT(4)))
+#define dct_data_intlv_enabled(pvt) ((pvt)->dct_sel_lo & BIT(5))
+#define dct_memory_cleared(pvt) ((pvt)->dct_sel_lo & BIT(10))
-#define F10_DCTL_SEL_LOW 0x110
-#define dct_sel_baseaddr(pvt) ((pvt->dram_ctl_select_low) & 0xFFFFF800)
-#define dct_sel_interleave_addr(pvt) (((pvt->dram_ctl_select_low) >> 6) & 0x3)
-#define dct_high_range_enabled(pvt) (pvt->dram_ctl_select_low & BIT(0))
-#define dct_interleave_enabled(pvt) (pvt->dram_ctl_select_low & BIT(2))
-#define dct_ganging_enabled(pvt) (pvt->dram_ctl_select_low & BIT(4))
-#define dct_data_intlv_enabled(pvt) (pvt->dram_ctl_select_low & BIT(5))
-#define dct_dram_enabled(pvt) (pvt->dram_ctl_select_low & BIT(8))
-#define dct_memory_cleared(pvt) (pvt->dram_ctl_select_low & BIT(10))
+#define SWAP_INTLV_REG 0x10c
-#define F10_DCTL_SEL_HIGH 0x114
+#define DCT_SEL_HI 0x114
/*
* Function 3 - Misc Control
*/
-#define K8_NBCTL 0x40
-
-/* Correctable ECC error reporting enable */
-#define K8_NBCTL_CECCEn BIT(0)
-
-/* UnCorrectable ECC error reporting enable */
-#define K8_NBCTL_UECCEn BIT(1)
+#define NBCTL 0x40
-#define K8_NBCFG 0x44
-#define K8_NBCFG_CHIPKILL BIT(23)
-#define K8_NBCFG_ECC_ENABLE BIT(22)
+#define NBCFG 0x44
+#define NBCFG_CHIPKILL BIT(23)
+#define NBCFG_ECC_ENABLE BIT(22)
-#define K8_NBSL 0x48
-
-
-/* Family F10h: Normalized Extended Error Codes */
-#define F10_NBSL_EXT_ERR_RES 0x0
+/* F3x48: NBSL */
#define F10_NBSL_EXT_ERR_ECC 0x8
+#define NBSL_PP_OBS 0x2
-/* Next two are overloaded values */
-#define F10_NBSL_EXT_ERR_LINK_PROTO 0xB
-#define F10_NBSL_EXT_ERR_L3_PROTO 0xB
-
-#define F10_NBSL_EXT_ERR_NB_ARRAY 0xC
-#define F10_NBSL_EXT_ERR_DRAM_PARITY 0xD
-#define F10_NBSL_EXT_ERR_LINK_RETRY 0xE
-
-/* Next two are overloaded values */
-#define F10_NBSL_EXT_ERR_GART_WALK 0xF
-#define F10_NBSL_EXT_ERR_DEV_WALK 0xF
-
-/* 0x10 to 0x1B: Reserved */
-#define F10_NBSL_EXT_ERR_L3_DATA 0x1C
-#define F10_NBSL_EXT_ERR_L3_TAG 0x1D
-#define F10_NBSL_EXT_ERR_L3_LRU 0x1E
-
-/* K8: Normalized Extended Error Codes */
-#define K8_NBSL_EXT_ERR_ECC 0x0
-#define K8_NBSL_EXT_ERR_CRC 0x1
-#define K8_NBSL_EXT_ERR_SYNC 0x2
-#define K8_NBSL_EXT_ERR_MST 0x3
-#define K8_NBSL_EXT_ERR_TGT 0x4
-#define K8_NBSL_EXT_ERR_GART 0x5
-#define K8_NBSL_EXT_ERR_RMW 0x6
-#define K8_NBSL_EXT_ERR_WDT 0x7
-#define K8_NBSL_EXT_ERR_CHIPKILL_ECC 0x8
-#define K8_NBSL_EXT_ERR_DRAM_PARITY 0xD
-
-/*
- * The following are for BUS type errors AFTER values have been normalized by
- * shifting right
- */
-#define K8_NBSL_PP_SRC 0x0
-#define K8_NBSL_PP_RES 0x1
-#define K8_NBSL_PP_OBS 0x2
-#define K8_NBSL_PP_GENERIC 0x3
-
-#define EXTRACT_ERR_CPU_MAP(x) ((x) & 0xF)
-
-#define K8_NBEAL 0x50
-#define K8_NBEAH 0x54
-#define K8_SCRCTRL 0x58
-
-#define F10_NB_CFG_LOW 0x88
+#define SCRCTRL 0x58
#define F10_ONLINE_SPARE 0xB0
-#define F10_ONLINE_SPARE_SWAPDONE0(x) ((x) & BIT(1))
-#define F10_ONLINE_SPARE_SWAPDONE1(x) ((x) & BIT(3))
-#define F10_ONLINE_SPARE_BADDRAM_CS0(x) (((x) >> 4) & 0x00000007)
-#define F10_ONLINE_SPARE_BADDRAM_CS1(x) (((x) >> 8) & 0x00000007)
+#define online_spare_swap_done(pvt, c) (((pvt)->online_spare >> (1 + 2 * (c))) & 0x1)
+#define online_spare_bad_dramcs(pvt, c) (((pvt)->online_spare >> (4 + 4 * (c))) & 0x7)
#define F10_NB_ARRAY_ADDR 0xB8
-
-#define F10_NB_ARRAY_DRAM_ECC 0x80000000
+#define F10_NB_ARRAY_DRAM_ECC BIT(31)
/* Bits [2:1] are used to select 16-byte section within a 64-byte cacheline */
#define SET_NB_ARRAY_ADDRESS(section) (((section) & 0x3) << 1)
#define F10_NB_ARRAY_DATA 0xBC
-
#define SET_NB_DRAM_INJECTION_WRITE(word, bits) \
(BIT(((word) & 0xF) + 20) | \
BIT(17) | bits)
-
#define SET_NB_DRAM_INJECTION_READ(word, bits) \
(BIT(((word) & 0xF) + 20) | \
BIT(16) | bits)
-#define K8_NBCAP 0xE8
-#define K8_NBCAP_CORES (BIT(12)|BIT(13))
-#define K8_NBCAP_CHIPKILL BIT(4)
-#define K8_NBCAP_SECDED BIT(3)
-#define K8_NBCAP_DCT_DUAL BIT(0)
+#define NBCAP 0xE8
+#define NBCAP_CHIPKILL BIT(4)
+#define NBCAP_SECDED BIT(3)
+#define NBCAP_DCT_DUAL BIT(0)
#define EXT_NB_MCA_CFG 0x180
/* MSRs */
-#define K8_MSR_MCGCTL_NBE BIT(4)
-
-#define K8_MSR_MC4CTL 0x0410
-#define K8_MSR_MC4STAT 0x0411
-#define K8_MSR_MC4ADDR 0x0412
+#define MSR_MCGCTL_NBE BIT(4)
/* AMD sets the first MC device at device ID 0x18. */
static inline int get_node_id(struct pci_dev *pdev)
@@ -380,9 +293,11 @@ static inline int get_node_id(struct pci_dev *pdev)
return PCI_SLOT(pdev->devfn) - 0x18;
}
-enum amd64_chipset_families {
+enum amd_families {
K8_CPUS = 0,
F10_CPUS,
+ F15_CPUS,
+ NUM_FAMILIES,
};
/* Error injection control structure */
@@ -392,6 +307,28 @@ struct error_injection {
u32 bit_map;
};
+/* low and high part of PCI config space regs */
+struct reg_pair {
+ u32 lo, hi;
+};
+
+/*
+ * See F1x[1, 0][7C:40] DRAM Base/Limit Registers
+ */
+struct dram_range {
+ struct reg_pair base;
+ struct reg_pair lim;
+};
+
+/* A DCT chip selects collection */
+struct chip_select {
+ u32 csbases[NUM_CHIPSELECTS];
+ u8 b_cnt;
+
+ u32 csmasks[NUM_CHIPSELECTS];
+ u8 m_cnt;
+};
+
struct amd64_pvt {
struct low_ops *ops;
@@ -414,60 +351,50 @@ struct amd64_pvt {
u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */
u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */
- /* DRAM CS Base Address Registers F2x[1,0][5C:40] */
- u32 dcsb0[MAX_CS_COUNT];
- u32 dcsb1[MAX_CS_COUNT];
-
- /* DRAM CS Mask Registers F2x[1,0][6C:60] */
- u32 dcsm0[MAX_CS_COUNT];
- u32 dcsm1[MAX_CS_COUNT];
-
- /*
- * Decoded parts of DRAM BASE and LIMIT Registers
- * F1x[78,70,68,60,58,50,48,40]
- */
- u64 dram_base[DRAM_REG_COUNT];
- u64 dram_limit[DRAM_REG_COUNT];
- u8 dram_IntlvSel[DRAM_REG_COUNT];
- u8 dram_IntlvEn[DRAM_REG_COUNT];
- u8 dram_DstNode[DRAM_REG_COUNT];
- u8 dram_rw_en[DRAM_REG_COUNT];
-
- /*
- * The following fields are set at (load) run time, after CPU revision
- * has been determined, since the dct_base and dct_mask registers vary
- * based on revision
- */
- u32 dcsb_base; /* DCSB base bits */
- u32 dcsm_mask; /* DCSM mask bits */
- u32 cs_count; /* num chip selects (== num DCSB registers) */
- u32 num_dcsm; /* Number of DCSM registers */
- u32 dcs_mask_notused; /* DCSM notused mask bits */
- u32 dcs_shift; /* DCSB and DCSM shift value */
+ /* one for each DCT */
+ struct chip_select csels[2];
+
+ /* DRAM base and limit pairs F1x[78,70,68,60,58,50,48,40] */
+ struct dram_range ranges[DRAM_RANGES];
u64 top_mem; /* top of memory below 4GB */
u64 top_mem2; /* top of memory above 4GB */
- u32 dram_ctl_select_low; /* DRAM Controller Select Low Reg */
- u32 dram_ctl_select_high; /* DRAM Controller Select High Reg */
- u32 online_spare; /* On-Line spare Reg */
+ u32 dct_sel_lo; /* DRAM Controller Select Low */
+ u32 dct_sel_hi; /* DRAM Controller Select High */
+ u32 online_spare; /* On-Line spare Reg */
/* x4 or x8 syndromes in use */
- u8 syn_type;
-
- /* temp storage for when input is received from sysfs */
- struct err_regs ctl_error_info;
+ u8 ecc_sym_sz;
/* place to store error injection parameters prior to issue */
struct error_injection injection;
+};
- /* DCT per-family scrubrate setting */
- u32 min_scrubrate;
+static inline u64 get_dram_base(struct amd64_pvt *pvt, unsigned i)
+{
+ u64 addr = ((u64)pvt->ranges[i].base.lo & 0xffff0000) << 8;
- /* family name this instance is running on */
- const char *ctl_name;
+ if (boot_cpu_data.x86 == 0xf)
+ return addr;
-};
+ return (((u64)pvt->ranges[i].base.hi & 0x000000ff) << 40) | addr;
+}
+
+static inline u64 get_dram_limit(struct amd64_pvt *pvt, unsigned i)
+{
+ u64 lim = (((u64)pvt->ranges[i].lim.lo & 0xffff0000) << 8) | 0x00ffffff;
+
+ if (boot_cpu_data.x86 == 0xf)
+ return lim;
+
+ return (((u64)pvt->ranges[i].lim.hi & 0x000000ff) << 40) | lim;
+}
+
+static inline u16 extract_syndrome(u64 status)
+{
+ return ((status >> 47) & 0xff) | ((status >> 16) & 0xff00);
+}
/*
* per-node ECC settings descriptor
@@ -511,14 +438,11 @@ extern struct mcidev_sysfs_attribute amd64_dbg_attrs[NUM_DBG_ATTRS],
*/
struct low_ops {
int (*early_channel_count) (struct amd64_pvt *pvt);
-
- u64 (*get_error_address) (struct mem_ctl_info *mci,
- struct err_regs *info);
- void (*read_dram_base_limit) (struct amd64_pvt *pvt, int dram);
- void (*read_dram_ctl_register) (struct amd64_pvt *pvt);
- void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci,
- struct err_regs *info, u64 SystemAddr);
- int (*dbam_to_cs) (struct amd64_pvt *pvt, int cs_mode);
+ void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr,
+ u16 syndrome);
+ int (*dbam_to_cs) (struct amd64_pvt *pvt, u8 dct, unsigned cs_mode);
+ int (*read_dct_pci_cfg) (struct amd64_pvt *pvt, int offset,
+ u32 *val, const char *func);
};
struct amd64_family_type {
@@ -527,28 +451,17 @@ struct amd64_family_type {
struct low_ops ops;
};
-static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
- u32 *val, const char *func)
-{
- int err = 0;
-
- err = pci_read_config_dword(pdev, offset, val);
- if (err)
- amd64_warn("%s: error reading F%dx%x.\n",
- func, PCI_FUNC(pdev->devfn), offset);
-
- return err;
-}
+int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
+ u32 val, const char *func);
#define amd64_read_pci_cfg(pdev, offset, val) \
- amd64_read_pci_cfg_dword(pdev, offset, val, __func__)
+ __amd64_read_pci_cfg_dword(pdev, offset, val, __func__)
-/*
- * For future CPU versions, verify the following as new 'slow' rates appear and
- * modify the necessary skip values for the supported CPU.
- */
-#define K8_MIN_SCRUB_RATE_BITS 0x0
-#define F10_MIN_SCRUB_RATE_BITS 0x5
+#define amd64_write_pci_cfg(pdev, offset, val) \
+ __amd64_write_pci_cfg_dword(pdev, offset, val, __func__)
+
+#define amd64_read_dct_pci_cfg(pvt, offset, val) \
+ pvt->ops->read_dct_pci_cfg(pvt, offset, val, __func__)
int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
u64 *hole_offset, u64 *hole_size);
diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c
index 688478de1cbd..303f10e03dda 100644
--- a/drivers/edac/amd64_edac_inj.c
+++ b/drivers/edac/amd64_edac_inj.c
@@ -117,13 +117,13 @@ static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci,
/* Form value to choose 16-byte section of cacheline */
section = F10_NB_ARRAY_DRAM_ECC |
SET_NB_ARRAY_ADDRESS(pvt->injection.section);
- pci_write_config_dword(pvt->F3, F10_NB_ARRAY_ADDR, section);
+ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection.word,
pvt->injection.bit_map);
/* Issue 'word' and 'bit' along with the READ request */
- pci_write_config_dword(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
+ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
@@ -150,13 +150,13 @@ static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci,
/* Form value to choose 16-byte section of cacheline */
section = F10_NB_ARRAY_DRAM_ECC |
SET_NB_ARRAY_ADDRESS(pvt->injection.section);
- pci_write_config_dword(pvt->F3, F10_NB_ARRAY_ADDR, section);
+ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection.word,
pvt->injection.bit_map);
/* Issue 'word' and 'bit' along with the READ request */
- pci_write_config_dword(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
+ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 39d97cfdf58c..73196f7b7229 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -785,10 +785,10 @@ static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci,
{
int err;
- debugf1("%s()\n", __func__);
+ debugf4("%s()\n", __func__);
while (sysfs_attrib) {
- debugf1("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
+ debugf4("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
if (sysfs_attrib->grp) {
struct mcidev_sysfs_group_kobj *grp_kobj;
@@ -818,7 +818,7 @@ static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci,
if (err < 0)
return err;
} else if (sysfs_attrib->attr.name) {
- debugf0("%s() file %s\n", __func__,
+ debugf4("%s() file %s\n", __func__,
sysfs_attrib->attr.name);
err = sysfs_create_file(kobj, &sysfs_attrib->attr);
@@ -853,26 +853,26 @@ static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci,
* Remove first all the atributes
*/
while (sysfs_attrib) {
- debugf1("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
+ debugf4("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
if (sysfs_attrib->grp) {
- debugf1("%s() seeking for group %s\n",
+ debugf4("%s() seeking for group %s\n",
__func__, sysfs_attrib->grp->name);
list_for_each_entry(grp_kobj,
&mci->grp_kobj_list, list) {
- debugf1("%s() grp_kobj->grp = %p\n",__func__, grp_kobj->grp);
+ debugf4("%s() grp_kobj->grp = %p\n",__func__, grp_kobj->grp);
if (grp_kobj->grp == sysfs_attrib->grp) {
edac_remove_mci_instance_attributes(mci,
grp_kobj->grp->mcidev_attr,
&grp_kobj->kobj, count + 1);
- debugf0("%s() group %s\n", __func__,
+ debugf4("%s() group %s\n", __func__,
sysfs_attrib->grp->name);
kobject_put(&grp_kobj->kobj);
}
}
- debugf1("%s() end of seeking for group %s\n",
+ debugf4("%s() end of seeking for group %s\n",
__func__, sysfs_attrib->grp->name);
} else if (sysfs_attrib->attr.name) {
- debugf0("%s() file %s\n", __func__,
+ debugf4("%s() file %s\n", __func__,
sysfs_attrib->attr.name);
sysfs_remove_file(kobj, &sysfs_attrib->attr);
} else
@@ -979,7 +979,7 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
debugf0("%s()\n", __func__);
/* remove all csrow kobjects */
- debugf0("%s() unregister this mci kobj\n", __func__);
+ debugf4("%s() unregister this mci kobj\n", __func__);
for (i = 0; i < mci->nr_csrows; i++) {
if (mci->csrows[i].nr_pages > 0) {
debugf0("%s() unreg csrow-%d\n", __func__, i);
@@ -989,18 +989,18 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
/* remove this mci instance's attribtes */
if (mci->mc_driver_sysfs_attributes) {
- debugf0("%s() unregister mci private attributes\n", __func__);
+ debugf4("%s() unregister mci private attributes\n", __func__);
edac_remove_mci_instance_attributes(mci,
mci->mc_driver_sysfs_attributes,
&mci->edac_mci_kobj, 0);
}
/* remove the symlink */
- debugf0("%s() remove_link\n", __func__);
+ debugf4("%s() remove_link\n", __func__);
sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK);
/* unregister this instance's kobject */
- debugf0("%s() remove_mci_instance\n", __func__);
+ debugf4("%s() remove_mci_instance\n", __func__);
kobject_put(&mci->edac_mci_kobj);
}
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index 05523b504271..76d1f576cdc8 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -162,7 +162,7 @@ static struct edac_pci_ctl_info *i7300_pci;
#define AMBPRESENT_0 0x64
#define AMBPRESENT_1 0x66
-const static u16 mtr_regs[MAX_SLOTS] = {
+static const u16 mtr_regs[MAX_SLOTS] = {
0x80, 0x84, 0x88, 0x8c,
0x82, 0x86, 0x8a, 0x8e
};
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 81154ab296b6..5a92a6d7ec86 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1874,7 +1874,8 @@ static void i7core_pci_ctl_create(struct i7core_pvt *pvt)
&pvt->i7core_dev->pdev[0]->dev,
EDAC_MOD_STR);
if (unlikely(!pvt->i7core_pci))
- pr_warn("Unable to setup PCI error report via EDAC\n");
+ i7core_printk(KERN_WARNING,
+ "Unable to setup PCI error report via EDAC\n");
}
static void i7core_pci_ctl_release(struct i7core_pvt *pvt)
@@ -2035,7 +2036,7 @@ fail0:
static int __devinit i7core_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- int rc;
+ int rc, count = 0;
struct i7core_dev *i7core_dev;
/* get the pci devices we want to reserve for our use */
@@ -2055,12 +2056,28 @@ static int __devinit i7core_probe(struct pci_dev *pdev,
goto fail0;
list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
+ count++;
rc = i7core_register_mci(i7core_dev);
if (unlikely(rc < 0))
goto fail1;
}
- i7core_printk(KERN_INFO, "Driver loaded.\n");
+ /*
+ * Nehalem-EX uses a different memory controller. However, as the
+ * memory controller is not visible on some Nehalem/Nehalem-EP, we
+ * need to indirectly probe via a X58 PCI device. The same devices
+ * are found on (some) Nehalem-EX. So, on those machines, the
+ * probe routine needs to return -ENODEV, as the actual Memory
+ * Controller registers won't be detected.
+ */
+ if (!count) {
+ rc = -ENODEV;
+ goto fail1;
+ }
+
+ i7core_printk(KERN_INFO,
+ "Driver loaded, %d memory controller(s) found.\n",
+ count);
mutex_unlock(&i7core_edac_lock);
return 0;
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index 3218819b7286..92e65e7038e9 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -160,8 +160,8 @@ NOTE: Only ONE of the three must be enabled
* 3:2 Rank 1 architecture
* 1:0 Rank 0 architecture
*
- * 00 => x16 devices; i.e 4 banks
- * 01 => x8 devices; i.e 8 banks
+ * 00 => 4 banks
+ * 01 => 8 banks
*/
#define I82975X_C0BNKARC 0x10e
#define I82975X_C1BNKARC 0x18e
@@ -278,6 +278,7 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
struct i82975x_error_info *info, int handle_errors)
{
int row, multi_chan, chan;
+ unsigned long offst, page;
multi_chan = mci->csrows[0].nr_channels - 1;
@@ -292,17 +293,19 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
info->errsts = info->errsts2;
}
- chan = info->eap & 1;
- info->eap >>= 1;
- if (info->xeap )
- info->eap |= 0x80000000;
- info->eap >>= PAGE_SHIFT;
- row = edac_mc_find_csrow_by_page(mci, info->eap);
+ page = (unsigned long) info->eap;
+ if (info->xeap & 1)
+ page |= 0x100000000ul;
+ chan = page & 1;
+ page >>= 1;
+ offst = page & ((1 << PAGE_SHIFT) - 1);
+ page >>= PAGE_SHIFT;
+ row = edac_mc_find_csrow_by_page(mci, page);
if (info->errsts & 0x0002)
- edac_mc_handle_ue(mci, info->eap, 0, row, "i82975x UE");
+ edac_mc_handle_ue(mci, page, offst , row, "i82975x UE");
else
- edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
+ edac_mc_handle_ce(mci, page, offst, info->derrsyn, row,
multi_chan ? chan : 0,
"i82975x CE");
@@ -344,11 +347,7 @@ static int dual_channel_active(void __iomem *mch_window)
static enum dev_type i82975x_dram_type(void __iomem *mch_window, int rank)
{
/*
- * ASUS P5W DH either does not program this register or programs
- * it wrong!
- * ECC is possible on i92975x ONLY with DEV_X8 which should mean 'val'
- * for each rank should be 01b - the LSB of the word should be 0x55;
- * but it reads 0!
+ * ECC is possible on i92975x ONLY with DEV_X8
*/
return DEV_X8;
}
@@ -356,11 +355,15 @@ static enum dev_type i82975x_dram_type(void __iomem *mch_window, int rank)
static void i82975x_init_csrows(struct mem_ctl_info *mci,
struct pci_dev *pdev, void __iomem *mch_window)
{
+ static const char *labels[4] = {
+ "DIMM A1", "DIMM A2",
+ "DIMM B1", "DIMM B2"
+ };
struct csrow_info *csrow;
unsigned long last_cumul_size;
u8 value;
u32 cumul_size;
- int index;
+ int index, chan;
last_cumul_size = 0;
@@ -369,11 +372,7 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
* The dram row boundary (DRB) reg values are boundary address
* for each DRAM row with a granularity of 32 or 64MB (single/dual
* channel operation). DRB regs are cumulative; therefore DRB7 will
- * contain the total memory contained in all eight rows.
- *
- * FIXME:
- * EDAC currently works for Dual-channel Interleaved configuration.
- * Other configurations, which the chip supports, need fixing/testing.
+ * contain the total memory contained in all rows.
*
*/
@@ -384,8 +383,26 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
((index >= 4) ? 0x80 : 0));
cumul_size = value;
cumul_size <<= (I82975X_DRB_SHIFT - PAGE_SHIFT);
+ /*
+ * Adjust cumul_size w.r.t number of channels
+ *
+ */
+ if (csrow->nr_channels > 1)
+ cumul_size <<= 1;
debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
cumul_size);
+
+ /*
+ * Initialise dram labels
+ * index values:
+ * [0-7] for single-channel; i.e. csrow->nr_channels = 1
+ * [0-3] for dual-channel; i.e. csrow->nr_channels = 2
+ */
+ for (chan = 0; chan < csrow->nr_channels; chan++)
+ strncpy(csrow->channels[chan].label,
+ labels[(index >> 1) + (chan * 2)],
+ EDAC_MC_LABEL_LEN);
+
if (cumul_size == last_cumul_size)
continue; /* not populated */
@@ -393,8 +410,8 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
csrow->last_page = cumul_size - 1;
csrow->nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = 1 << 7; /* I82975X_EAP has 128B resolution */
- csrow->mtype = MEM_DDR; /* i82975x supports only DDR2 */
+ csrow->grain = 1 << 6; /* I82975X_EAP has 64B resolution */
+ csrow->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
csrow->dtype = i82975x_dram_type(mch_window, index);
csrow->edac_mode = EDAC_SECDED; /* only supported */
}
@@ -515,18 +532,20 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
debugf3("%s(): init mci\n", __func__);
mci->dev = &pdev->dev;
- mci->mtype_cap = MEM_FLAG_DDR;
+ mci->mtype_cap = MEM_FLAG_DDR2;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = I82975X_REVISION;
mci->ctl_name = i82975x_devs[dev_idx].ctl_name;
+ mci->dev_name = pci_name(pdev);
mci->edac_check = i82975x_check;
mci->ctl_page_to_phys = NULL;
debugf3("%s(): init pvt\n", __func__);
pvt = (struct i82975x_pvt *) mci->pvt_info;
pvt->mch_window = mch_window;
i82975x_init_csrows(mci, pdev, mch_window);
+ mci->scrub_mode = SCRUB_HW_SRC;
i82975x_get_error_info(mci, &discard); /* clear counters */
/* finalize this instance of memory controller with edac core */
@@ -664,7 +683,7 @@ module_init(i82975x_init);
module_exit(i82975x_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Arvind R. <arvind@acarlab.com>");
+MODULE_AUTHOR("Arvind R. <arvino55@gmail.com>");
MODULE_DESCRIPTION("MC support for Intel 82975 memory hub controllers");
module_param(edac_op_state, int, 0444);
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index f6cf73d93359..795cfbc0bf50 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -594,6 +594,7 @@ static bool nb_noop_mce(u16 ec, u8 xec)
void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg)
{
+ struct cpuinfo_x86 *c = &boot_cpu_data;
u16 ec = EC(m->status);
u8 xec = XEC(m->status, 0x1f);
u32 nbsh = (u32)(m->status >> 32);
@@ -602,9 +603,8 @@ void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg)
pr_emerg(HW_ERR "Northbridge Error (node %d", node_id);
/* F10h, revD can disable ErrCpu[3:0] through ErrCpuVal */
- if ((boot_cpu_data.x86 == 0x10) &&
- (boot_cpu_data.x86_model > 7)) {
- if (nbsh & K8_NBSH_ERR_CPU_VAL)
+ if (c->x86 == 0x10 && c->x86_model > 7) {
+ if (nbsh & NBSH_ERR_CPU_VAL)
core = nbsh & nb_err_cpumask;
} else {
u8 assoc_cpus = nbsh & nb_err_cpumask;
@@ -646,7 +646,7 @@ void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg)
if (!fam_ops->nb_mce(ec, xec))
goto wrong_nb_mce;
- if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10)
+ if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x15)
if ((xec == 0x8 || xec == 0x0) && nb_bus_decoder)
nb_bus_decoder(node_id, m, nbcfg);
diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
index 45dda47173f2..795a3206acf5 100644
--- a/drivers/edac/mce_amd.h
+++ b/drivers/edac/mce_amd.h
@@ -31,19 +31,10 @@
#define R4(x) (((x) >> 4) & 0xf)
#define R4_MSG(x) ((R4(x) < 9) ? rrrr_msgs[R4(x)] : "Wrong R4!")
-#define K8_NBSH 0x4C
-
-#define K8_NBSH_VALID_BIT BIT(31)
-#define K8_NBSH_OVERFLOW BIT(30)
-#define K8_NBSH_UC_ERR BIT(29)
-#define K8_NBSH_ERR_EN BIT(28)
-#define K8_NBSH_MISCV BIT(27)
-#define K8_NBSH_VALID_ERROR_ADDR BIT(26)
-#define K8_NBSH_PCC BIT(25)
-#define K8_NBSH_ERR_CPU_VAL BIT(24)
-#define K8_NBSH_CECC BIT(14)
-#define K8_NBSH_UECC BIT(13)
-#define K8_NBSH_ERR_SCRUBER BIT(8)
+/*
+ * F3x4C bits (MCi_STATUS' high half)
+ */
+#define NBSH_ERR_CPU_VAL BIT(24)
enum tt_ids {
TT_INSTR = 0,
@@ -86,17 +77,6 @@ extern const char *to_msgs[];
extern const char *ii_msgs[];
/*
- * relevant NB regs
- */
-struct err_regs {
- u32 nbcfg;
- u32 nbsh;
- u32 nbsl;
- u32 nbeah;
- u32 nbeal;
-};
-
-/*
* per-family decoder ops
*/
struct amd_decoder_ops {
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index b123bb308a4a..ffb5ad080bee 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -200,8 +200,7 @@ static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit mpc85xx_pci_err_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit mpc85xx_pci_err_probe(struct platform_device *op)
{
struct edac_pci_ctl_info *pci;
struct mpc85xx_pci_pdata *pdata;
@@ -338,7 +337,7 @@ static struct of_device_id mpc85xx_pci_err_of_match[] = {
};
MODULE_DEVICE_TABLE(of, mpc85xx_pci_err_of_match);
-static struct of_platform_driver mpc85xx_pci_err_driver = {
+static struct platform_driver mpc85xx_pci_err_driver = {
.probe = mpc85xx_pci_err_probe,
.remove = __devexit_p(mpc85xx_pci_err_remove),
.driver = {
@@ -503,8 +502,7 @@ static irqreturn_t mpc85xx_l2_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit mpc85xx_l2_err_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit mpc85xx_l2_err_probe(struct platform_device *op)
{
struct edac_device_ctl_info *edac_dev;
struct mpc85xx_l2_pdata *pdata;
@@ -656,7 +654,7 @@ static struct of_device_id mpc85xx_l2_err_of_match[] = {
};
MODULE_DEVICE_TABLE(of, mpc85xx_l2_err_of_match);
-static struct of_platform_driver mpc85xx_l2_err_driver = {
+static struct platform_driver mpc85xx_l2_err_driver = {
.probe = mpc85xx_l2_err_probe,
.remove = mpc85xx_l2_err_remove,
.driver = {
@@ -956,8 +954,7 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
}
}
-static int __devinit mpc85xx_mc_err_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
{
struct mem_ctl_info *mci;
struct mpc85xx_mc_pdata *pdata;
@@ -1136,7 +1133,7 @@ static struct of_device_id mpc85xx_mc_err_of_match[] = {
};
MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match);
-static struct of_platform_driver mpc85xx_mc_err_driver = {
+static struct platform_driver mpc85xx_mc_err_driver = {
.probe = mpc85xx_mc_err_probe,
.remove = mpc85xx_mc_err_remove,
.driver = {
@@ -1171,16 +1168,16 @@ static int __init mpc85xx_mc_init(void)
break;
}
- res = of_register_platform_driver(&mpc85xx_mc_err_driver);
+ res = platform_driver_register(&mpc85xx_mc_err_driver);
if (res)
printk(KERN_WARNING EDAC_MOD_STR "MC fails to register\n");
- res = of_register_platform_driver(&mpc85xx_l2_err_driver);
+ res = platform_driver_register(&mpc85xx_l2_err_driver);
if (res)
printk(KERN_WARNING EDAC_MOD_STR "L2 fails to register\n");
#ifdef CONFIG_PCI
- res = of_register_platform_driver(&mpc85xx_pci_err_driver);
+ res = platform_driver_register(&mpc85xx_pci_err_driver);
if (res)
printk(KERN_WARNING EDAC_MOD_STR "PCI fails to register\n");
#endif
@@ -1212,10 +1209,10 @@ static void __exit mpc85xx_mc_exit(void)
on_each_cpu(mpc85xx_mc_restore_hid1, NULL, 0);
#endif
#ifdef CONFIG_PCI
- of_unregister_platform_driver(&mpc85xx_pci_err_driver);
+ platform_driver_unregister(&mpc85xx_pci_err_driver);
#endif
- of_unregister_platform_driver(&mpc85xx_l2_err_driver);
- of_unregister_platform_driver(&mpc85xx_mc_err_driver);
+ platform_driver_unregister(&mpc85xx_l2_err_driver);
+ platform_driver_unregister(&mpc85xx_mc_err_driver);
}
module_exit(mpc85xx_mc_exit);
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index b9f0c20df1aa..c1f0045ceb8e 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -184,8 +184,7 @@ struct ppc4xx_ecc_status {
/* Function Prototypes */
-static int ppc4xx_edac_probe(struct platform_device *device,
- const struct of_device_id *device_id);
+static int ppc4xx_edac_probe(struct platform_device *device)
static int ppc4xx_edac_remove(struct platform_device *device);
/* Global Variables */
@@ -201,7 +200,7 @@ static struct of_device_id ppc4xx_edac_match[] = {
{ }
};
-static struct of_platform_driver ppc4xx_edac_driver = {
+static struct platform_driver ppc4xx_edac_driver = {
.probe = ppc4xx_edac_probe,
.remove = ppc4xx_edac_remove,
.driver = {
@@ -997,9 +996,6 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
* initialized.
* @op: A pointer to the OpenFirmware device tree node associated
* with the controller this EDAC instance is bound to.
- * @match: A pointer to the OpenFirmware device tree match
- * information associated with the controller this EDAC instance
- * is bound to.
* @dcr_host: A pointer to the DCR data containing the DCR mapping
* for this controller instance.
* @mcopt1: The 32-bit Memory Controller Option 1 register value
@@ -1015,7 +1011,6 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
static int __devinit
ppc4xx_edac_mc_init(struct mem_ctl_info *mci,
struct platform_device *op,
- const struct of_device_id *match,
const dcr_host_t *dcr_host,
u32 mcopt1)
{
@@ -1024,7 +1019,7 @@ ppc4xx_edac_mc_init(struct mem_ctl_info *mci,
struct ppc4xx_edac_pdata *pdata = NULL;
const struct device_node *np = op->dev.of_node;
- if (match == NULL)
+ if (op->dev.of_match == NULL)
return -EINVAL;
/* Initial driver pointers and private data */
@@ -1227,9 +1222,6 @@ ppc4xx_edac_map_dcrs(const struct device_node *np, dcr_host_t *dcr_host)
* ppc4xx_edac_probe - check controller and bind driver
* @op: A pointer to the OpenFirmware device tree node associated
* with the controller being probed for driver binding.
- * @match: A pointer to the OpenFirmware device tree match
- * information associated with the controller being probed
- * for driver binding.
*
* This routine probes a specific ibm,sdram-4xx-ddr2 controller
* instance for binding with the driver.
@@ -1237,8 +1229,7 @@ ppc4xx_edac_map_dcrs(const struct device_node *np, dcr_host_t *dcr_host)
* Returns 0 if the controller instance was successfully bound to the
* driver; otherwise, < 0 on error.
*/
-static int __devinit
-ppc4xx_edac_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit ppc4xx_edac_probe(struct platform_device *op)
{
int status = 0;
u32 mcopt1, memcheck;
@@ -1304,7 +1295,7 @@ ppc4xx_edac_probe(struct platform_device *op, const struct of_device_id *match)
goto done;
}
- status = ppc4xx_edac_mc_init(mci, op, match, &dcr_host, mcopt1);
+ status = ppc4xx_edac_mc_init(mci, op, &dcr_host, mcopt1);
if (status) {
ppc4xx_edac_mc_printk(KERN_ERR, mci,
@@ -1421,7 +1412,7 @@ ppc4xx_edac_init(void)
ppc4xx_edac_opstate_init();
- return of_register_platform_driver(&ppc4xx_edac_driver);
+ return platform_driver_register(&ppc4xx_edac_driver);
}
/**
@@ -1434,7 +1425,7 @@ ppc4xx_edac_init(void)
static void __exit
ppc4xx_edac_exit(void)
{
- of_unregister_platform_driver(&ppc4xx_edac_driver);
+ platform_driver_unregister(&ppc4xx_edac_driver);
}
module_init(ppc4xx_edac_init);
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 0c56989cd907..2be6f4520772 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -75,7 +75,8 @@ config FIREWIRE_NOSY
The following cards are known to be based on PCILynx or PCILynx-2:
IOI IOI-1394TT (PCI card), Unibrain Fireboard 400 PCI Lynx-2
(PCI card), Newer Technology FireWire 2 Go (CardBus card),
- Apple Power Mac G3 blue & white (onboard controller).
+ Apple Power Mac G3 blue & white and G4 with PCI graphics
+ (onboard controller).
To compile this driver as a module, say M here: The module will be
called nosy. Source code of a userspace interface to nosy, called
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 24ff35511e2b..3241dc4e1fc9 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -233,7 +233,7 @@ static void br_work(struct work_struct *work)
/* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */
if (card->reset_jiffies != 0 &&
- time_is_after_jiffies(card->reset_jiffies + 2 * HZ)) {
+ time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) {
if (!schedule_delayed_work(&card->br_work, 2 * HZ))
fw_card_put(card);
return;
@@ -316,7 +316,8 @@ static void bm_work(struct work_struct *work)
irm_id = card->irm_node->node_id;
local_id = card->local_node->node_id;
- grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
+ grace = time_after64(get_jiffies_64(),
+ card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
if ((is_next_generation(generation, card->bm_generation) &&
!card->bm_abdicate) ||
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 48ae712e2101..62ac111af243 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -64,6 +64,7 @@ struct client {
struct idr resource_idr;
struct list_head event_list;
wait_queue_head_t wait;
+ wait_queue_head_t tx_flush_wait;
u64 bus_reset_closure;
struct fw_iso_context *iso_context;
@@ -251,6 +252,7 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
idr_init(&client->resource_idr);
INIT_LIST_HEAD(&client->event_list);
init_waitqueue_head(&client->wait);
+ init_waitqueue_head(&client->tx_flush_wait);
INIT_LIST_HEAD(&client->phy_receiver_link);
kref_init(&client->kref);
@@ -520,10 +522,6 @@ static int release_client_resource(struct client *client, u32 handle,
static void release_transaction(struct client *client,
struct client_resource *resource)
{
- struct outbound_transaction_resource *r = container_of(resource,
- struct outbound_transaction_resource, resource);
-
- fw_cancel_transaction(client->device->card, &r->transaction);
}
static void complete_transaction(struct fw_card *card, int rcode,
@@ -540,22 +538,9 @@ static void complete_transaction(struct fw_card *card, int rcode,
memcpy(rsp->data, payload, rsp->length);
spin_lock_irqsave(&client->lock, flags);
- /*
- * 1. If called while in shutdown, the idr tree must be left untouched.
- * The idr handle will be removed and the client reference will be
- * dropped later.
- * 2. If the call chain was release_client_resource ->
- * release_transaction -> complete_transaction (instead of a normal
- * conclusion of the transaction), i.e. if this resource was already
- * unregistered from the idr, the client reference will be dropped
- * by release_client_resource and we must not drop it here.
- */
- if (!client->in_shutdown &&
- idr_find(&client->resource_idr, e->r.resource.handle)) {
- idr_remove(&client->resource_idr, e->r.resource.handle);
- /* Drop the idr's reference */
- client_put(client);
- }
+ idr_remove(&client->resource_idr, e->r.resource.handle);
+ if (client->in_shutdown)
+ wake_up(&client->tx_flush_wait);
spin_unlock_irqrestore(&client->lock, flags);
rsp->type = FW_CDEV_EVENT_RESPONSE;
@@ -575,7 +560,7 @@ static void complete_transaction(struct fw_card *card, int rcode,
queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
NULL, 0);
- /* Drop the transaction callback's reference */
+ /* Drop the idr's reference */
client_put(client);
}
@@ -614,9 +599,6 @@ static int init_request(struct client *client,
if (ret < 0)
goto failed;
- /* Get a reference for the transaction callback */
- client_get(client);
-
fw_send_request(client->device->card, &e->r.transaction,
request->tcode, destination_id, request->generation,
speed, request->offset, e->response.data,
@@ -1223,7 +1205,8 @@ static void iso_resource_work(struct work_struct *work)
todo = r->todo;
/* Allow 1000ms grace period for other reallocations. */
if (todo == ISO_RES_ALLOC &&
- time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) {
+ time_before64(get_jiffies_64(),
+ client->device->card->reset_jiffies + HZ)) {
schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
skip = true;
} else {
@@ -1678,6 +1661,25 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
return ret;
}
+static int is_outbound_transaction_resource(int id, void *p, void *data)
+{
+ struct client_resource *resource = p;
+
+ return resource->release == release_transaction;
+}
+
+static int has_outbound_transactions(struct client *client)
+{
+ int ret;
+
+ spin_lock_irq(&client->lock);
+ ret = idr_for_each(&client->resource_idr,
+ is_outbound_transaction_resource, NULL);
+ spin_unlock_irq(&client->lock);
+
+ return ret;
+}
+
static int shutdown_resource(int id, void *p, void *data)
{
struct client_resource *resource = p;
@@ -1713,6 +1715,8 @@ static int fw_device_op_release(struct inode *inode, struct file *file)
client->in_shutdown = true;
spin_unlock_irq(&client->lock);
+ wait_event(client->tx_flush_wait, !has_outbound_transactions(client));
+
idr_for_each(&client->resource_idr, shutdown_resource, client);
idr_remove_all(&client->resource_idr);
idr_destroy(&client->resource_idr);
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 6113b896e790..57461923bacf 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -747,7 +747,8 @@ static void fw_device_shutdown(struct work_struct *work)
container_of(work, struct fw_device, work.work);
int minor = MINOR(device->device.devt);
- if (time_is_after_jiffies(device->card->reset_jiffies + SHUTDOWN_DELAY)
+ if (time_before64(get_jiffies_64(),
+ device->card->reset_jiffies + SHUTDOWN_DELAY)
&& !list_empty(&device->card->link)) {
schedule_delayed_work(&device->work, SHUTDOWN_DELAY);
return;
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index c003fa4e2db1..95bc01a02a88 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -235,45 +235,45 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
static int manage_channel(struct fw_card *card, int irm_id, int generation,
u32 channels_mask, u64 offset, bool allocate, __be32 data[2])
{
- __be32 c, all, old;
- int i, ret = -EIO, retry = 5;
+ __be32 bit, all, old;
+ int channel, ret = -EIO, retry = 5;
old = all = allocate ? cpu_to_be32(~0) : 0;
- for (i = 0; i < 32; i++) {
- if (!(channels_mask & 1 << i))
+ for (channel = 0; channel < 32; channel++) {
+ if (!(channels_mask & 1 << channel))
continue;
ret = -EBUSY;
- c = cpu_to_be32(1 << (31 - i));
- if ((old & c) != (all & c))
+ bit = cpu_to_be32(1 << (31 - channel));
+ if ((old & bit) != (all & bit))
continue;
data[0] = old;
- data[1] = old ^ c;
+ data[1] = old ^ bit;
switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
irm_id, generation, SCODE_100,
offset, data, 8)) {
case RCODE_GENERATION:
/* A generation change frees all channels. */
- return allocate ? -EAGAIN : i;
+ return allocate ? -EAGAIN : channel;
case RCODE_COMPLETE:
if (data[0] == old)
- return i;
+ return channel;
old = data[0];
/* Is the IRM 1394a-2000 compliant? */
- if ((data[0] & c) == (data[1] & c))
+ if ((data[0] & bit) == (data[1] & bit))
continue;
/* 1394-1995 IRM, fall through to retry. */
default:
if (retry) {
retry--;
- i--;
+ channel--;
} else {
ret = -EIO;
}
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index 09be1a635505..193ed9233144 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -545,7 +545,7 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
*/
smp_wmb();
card->generation = generation;
- card->reset_jiffies = jiffies;
+ card->reset_jiffies = get_jiffies_64();
card->bm_node_id = 0xffff;
card->bm_abdicate = bm_abdicate;
fw_schedule_bm_work(card, 0);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index bd3c61b6dd8d..c0572283b93e 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -208,9 +208,11 @@ struct fw_ohci {
struct context at_request_ctx;
struct context at_response_ctx;
+ u32 it_context_support;
u32 it_context_mask; /* unoccupied IT contexts */
struct iso_context *it_context_list;
u64 ir_context_channels; /* unoccupied channels */
+ u32 ir_context_support;
u32 ir_context_mask; /* unoccupied IR contexts */
struct iso_context *ir_context_list;
u64 mc_channels; /* channels in use by the multichannel IR context */
@@ -338,7 +340,7 @@ static void log_irqs(u32 evt)
!(evt & OHCI1394_busReset))
return;
- fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
+ fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
evt & OHCI1394_selfIDComplete ? " selfID" : "",
evt & OHCI1394_RQPkt ? " AR_req" : "",
evt & OHCI1394_RSPkt ? " AR_resp" : "",
@@ -351,6 +353,7 @@ static void log_irqs(u32 evt)
evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
+ evt & OHCI1394_unrecoverableError ? " unrecoverableError" : "",
evt & OHCI1394_busReset ? " busReset" : "",
evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
OHCI1394_RSPkt | OHCI1394_reqTxComplete |
@@ -1590,6 +1593,47 @@ static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
}
+static void detect_dead_context(struct fw_ohci *ohci,
+ const char *name, unsigned int regs)
+{
+ u32 ctl;
+
+ ctl = reg_read(ohci, CONTROL_SET(regs));
+ if (ctl & CONTEXT_DEAD) {
+#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
+ fw_error("DMA context %s has stopped, error code: %s\n",
+ name, evts[ctl & 0x1f]);
+#else
+ fw_error("DMA context %s has stopped, error code: %#x\n",
+ name, ctl & 0x1f);
+#endif
+ }
+}
+
+static void handle_dead_contexts(struct fw_ohci *ohci)
+{
+ unsigned int i;
+ char name[8];
+
+ detect_dead_context(ohci, "ATReq", OHCI1394_AsReqTrContextBase);
+ detect_dead_context(ohci, "ATRsp", OHCI1394_AsRspTrContextBase);
+ detect_dead_context(ohci, "ARReq", OHCI1394_AsReqRcvContextBase);
+ detect_dead_context(ohci, "ARRsp", OHCI1394_AsRspRcvContextBase);
+ for (i = 0; i < 32; ++i) {
+ if (!(ohci->it_context_support & (1 << i)))
+ continue;
+ sprintf(name, "IT%u", i);
+ detect_dead_context(ohci, name, OHCI1394_IsoXmitContextBase(i));
+ }
+ for (i = 0; i < 32; ++i) {
+ if (!(ohci->ir_context_support & (1 << i)))
+ continue;
+ sprintf(name, "IR%u", i);
+ detect_dead_context(ohci, name, OHCI1394_IsoRcvContextBase(i));
+ }
+ /* TODO: maybe try to flush and restart the dead contexts */
+}
+
static u32 cycle_timer_ticks(u32 cycle_timer)
{
u32 ticks;
@@ -1904,6 +1948,9 @@ static irqreturn_t irq_handler(int irq, void *data)
fw_notify("isochronous cycle inconsistent\n");
}
+ if (unlikely(event & OHCI1394_unrecoverableError))
+ handle_dead_contexts(ohci);
+
if (event & OHCI1394_cycle64Seconds) {
spin_lock(&ohci->lock);
update_bus_time(ohci);
@@ -2141,7 +2188,9 @@ static int ohci_enable(struct fw_card *card,
OHCI1394_selfIDComplete |
OHCI1394_regAccessFail |
OHCI1394_cycle64Seconds |
- OHCI1394_cycleInconsistent | OHCI1394_cycleTooLong |
+ OHCI1394_cycleInconsistent |
+ OHCI1394_unrecoverableError |
+ OHCI1394_cycleTooLong |
OHCI1394_masterIntEnable;
if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
irqs |= OHCI1394_busReset;
@@ -2657,6 +2706,10 @@ static int ohci_start_iso(struct fw_iso_context *base,
u32 control = IR_CONTEXT_ISOCH_HEADER, match;
int index;
+ /* the controller cannot start without any queued packets */
+ if (ctx->context.last->branch_address == 0)
+ return -ENODATA;
+
switch (ctx->base.type) {
case FW_ISO_CONTEXT_TRANSMIT:
index = ctx - ohci->it_context_list;
@@ -2715,6 +2768,7 @@ static int ohci_stop_iso(struct fw_iso_context *base)
}
flush_writes(ohci);
context_stop(&ctx->context);
+ tasklet_kill(&ctx->context.tasklet);
return 0;
}
@@ -3207,15 +3261,17 @@ static int __devinit pci_probe(struct pci_dev *dev,
reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
ohci->ir_context_channels = ~0ULL;
- ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
+ ohci->ir_context_support = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
+ ohci->ir_context_mask = ohci->ir_context_support;
ohci->n_ir = hweight32(ohci->ir_context_mask);
size = sizeof(struct iso_context) * ohci->n_ir;
ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
- ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
+ ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
+ ohci->it_context_mask = ohci->it_context_support;
ohci->n_it = hweight32(ohci->it_context_mask);
size = sizeof(struct iso_context) * ohci->n_it;
ohci->it_context_list = kzalloc(size, GFP_KERNEL);
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index 69ad529d92fb..ea5ac2dc1233 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -268,8 +268,10 @@ int dcdbas_smi_request(struct smi_cmd *smi_cmd)
}
/* generate SMI */
+ /* inb to force posted write through and make SMI happen now */
asm volatile (
- "outb %b0,%w1"
+ "outb %b0,%w1\n"
+ "inb %w1"
: /* no output args */
: "a" (smi_cmd->command_code),
"d" (smi_cmd->command_address),
diff --git a/drivers/gpio/janz-ttl.c b/drivers/gpio/janz-ttl.c
index 813ac077e5d7..2514fb075f4a 100644
--- a/drivers/gpio/janz-ttl.c
+++ b/drivers/gpio/janz-ttl.c
@@ -15,6 +15,7 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/slab.h>
@@ -149,7 +150,7 @@ static int __devinit ttl_probe(struct platform_device *pdev)
struct resource *res;
int ret;
- pdata = pdev->dev.platform_data;
+ pdata = mfd_get_data(pdev);
if (!pdata) {
dev_err(dev, "no platform data\n");
ret = -ENXIO;
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index b473429eee75..2fc25dec7cf5 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -462,7 +462,8 @@ pca953x_get_alt_pdata(struct i2c_client *client)
{
struct pca953x_platform_data *pdata;
struct device_node *node;
- const uint16_t *val;
+ const __be32 *val;
+ int size;
node = client->dev.of_node;
if (node == NULL)
@@ -475,13 +476,13 @@ pca953x_get_alt_pdata(struct i2c_client *client)
}
pdata->gpio_base = -1;
- val = of_get_property(node, "linux,gpio-base", NULL);
+ val = of_get_property(node, "linux,gpio-base", &size);
if (val) {
- if (*val < 0)
- dev_warn(&client->dev,
- "invalid gpio-base in device tree\n");
+ if (size != sizeof(*val))
+ dev_warn(&client->dev, "%s: wrong linux,gpio-base\n",
+ node->full_name);
else
- pdata->gpio_base = *val;
+ pdata->gpio_base = be32_to_cpup(val);
}
val = of_get_property(node, "polarity", NULL);
diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c
index 2975d22daffe..838ddbdf90cc 100644
--- a/drivers/gpio/pl061.c
+++ b/drivers/gpio/pl061.c
@@ -232,7 +232,7 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
desc->irq_data.chip->irq_unmask(&desc->irq_data);
}
-static int pl061_probe(struct amba_device *dev, struct amba_id *id)
+static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
{
struct pl061_platform_data *pdata;
struct pl061_gpio *chip;
diff --git a/drivers/gpio/rdc321x-gpio.c b/drivers/gpio/rdc321x-gpio.c
index 897e0577e65e..a9bda881935a 100644
--- a/drivers/gpio/rdc321x-gpio.c
+++ b/drivers/gpio/rdc321x-gpio.c
@@ -27,6 +27,7 @@
#include <linux/pci.h>
#include <linux/gpio.h>
#include <linux/mfd/rdc321x.h>
+#include <linux/mfd/core.h>
#include <linux/slab.h>
struct rdc321x_gpio {
@@ -135,7 +136,7 @@ static int __devinit rdc321x_gpio_probe(struct platform_device *pdev)
struct rdc321x_gpio *rdc321x_gpio_dev;
struct rdc321x_gpio_pdata *pdata;
- pdata = platform_get_drvdata(pdev);
+ pdata = mfd_get_data(pdev);
if (!pdata) {
dev_err(&pdev->dev, "no platform data supplied\n");
return -ENODEV;
diff --git a/drivers/gpio/sx150x.c b/drivers/gpio/sx150x.c
index e60be0015c9b..d2f874c3d3d5 100644
--- a/drivers/gpio/sx150x.c
+++ b/drivers/gpio/sx150x.c
@@ -25,6 +25,8 @@
#include <linux/workqueue.h>
#include <linux/i2c/sx150x.h>
+#define NO_UPDATE_PENDING -1
+
struct sx150x_device_data {
u8 reg_pullup;
u8 reg_pulldn;
@@ -47,8 +49,11 @@ struct sx150x_chip {
const struct sx150x_device_data *dev_cfg;
int irq_summary;
int irq_base;
+ int irq_update;
u32 irq_sense;
- unsigned long irq_set_type_pending;
+ u32 irq_masked;
+ u32 dev_sense;
+ u32 dev_masked;
struct irq_chip irq_chip;
struct mutex lock;
};
@@ -312,9 +317,8 @@ static void sx150x_irq_mask(struct irq_data *d)
chip = container_of(ic, struct sx150x_chip, irq_chip);
n = d->irq - chip->irq_base;
-
- sx150x_write_cfg(chip, n, 1, chip->dev_cfg->reg_irq_mask, 1);
- sx150x_write_cfg(chip, n, 2, chip->dev_cfg->reg_sense, 0);
+ chip->irq_masked |= (1 << n);
+ chip->irq_update = n;
}
static void sx150x_irq_unmask(struct irq_data *d)
@@ -326,9 +330,8 @@ static void sx150x_irq_unmask(struct irq_data *d)
chip = container_of(ic, struct sx150x_chip, irq_chip);
n = d->irq - chip->irq_base;
- sx150x_write_cfg(chip, n, 1, chip->dev_cfg->reg_irq_mask, 0);
- sx150x_write_cfg(chip, n, 2, chip->dev_cfg->reg_sense,
- chip->irq_sense >> (n * 2));
+ chip->irq_masked &= ~(1 << n);
+ chip->irq_update = n;
}
static int sx150x_irq_set_type(struct irq_data *d, unsigned int flow_type)
@@ -350,7 +353,7 @@ static int sx150x_irq_set_type(struct irq_data *d, unsigned int flow_type)
chip->irq_sense &= ~(3UL << (n * 2));
chip->irq_sense |= val << (n * 2);
- chip->irq_set_type_pending |= BIT(n);
+ chip->irq_update = n;
return 0;
}
@@ -404,15 +407,29 @@ static void sx150x_irq_bus_sync_unlock(struct irq_data *d)
chip = container_of(ic, struct sx150x_chip, irq_chip);
- while (chip->irq_set_type_pending) {
- n = __ffs(chip->irq_set_type_pending);
- chip->irq_set_type_pending &= ~BIT(n);
- if (!(irq_to_desc(n + chip->irq_base)->status & IRQ_MASKED))
- sx150x_write_cfg(chip, n, 2,
- chip->dev_cfg->reg_sense,
- chip->irq_sense >> (n * 2));
- }
+ if (chip->irq_update == NO_UPDATE_PENDING)
+ goto out;
+
+ n = chip->irq_update;
+ chip->irq_update = NO_UPDATE_PENDING;
+ /* Avoid updates if nothing changed */
+ if (chip->dev_sense == chip->irq_sense &&
+ chip->dev_sense == chip->irq_masked)
+ goto out;
+
+ chip->dev_sense = chip->irq_sense;
+ chip->dev_masked = chip->irq_masked;
+
+ if (chip->irq_masked & (1 << n)) {
+ sx150x_write_cfg(chip, n, 1, chip->dev_cfg->reg_irq_mask, 1);
+ sx150x_write_cfg(chip, n, 2, chip->dev_cfg->reg_sense, 0);
+ } else {
+ sx150x_write_cfg(chip, n, 1, chip->dev_cfg->reg_irq_mask, 0);
+ sx150x_write_cfg(chip, n, 2, chip->dev_cfg->reg_sense,
+ chip->irq_sense >> (n * 2));
+ }
+out:
mutex_unlock(&chip->lock);
}
@@ -445,8 +462,11 @@ static void sx150x_init_chip(struct sx150x_chip *chip,
chip->irq_chip.irq_bus_sync_unlock = sx150x_irq_bus_sync_unlock;
chip->irq_summary = -1;
chip->irq_base = -1;
+ chip->irq_masked = ~0;
chip->irq_sense = 0;
- chip->irq_set_type_pending = 0;
+ chip->dev_masked = ~0;
+ chip->dev_sense = 0;
+ chip->irq_update = NO_UPDATE_PENDING;
}
static int sx150x_init_io(struct sx150x_chip *chip, u8 base, u16 cfg)
diff --git a/drivers/gpio/timbgpio.c b/drivers/gpio/timbgpio.c
index 58c8f30352dd..ffcd815b8b8b 100644
--- a/drivers/gpio/timbgpio.c
+++ b/drivers/gpio/timbgpio.c
@@ -23,6 +23,7 @@
#include <linux/module.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/timb_gpio.h>
@@ -228,7 +229,7 @@ static int __devinit timbgpio_probe(struct platform_device *pdev)
struct gpio_chip *gc;
struct timbgpio *tgpio;
struct resource *iomem;
- struct timbgpio_platform_data *pdata = pdev->dev.platform_data;
+ struct timbgpio_platform_data *pdata = mfd_get_data(pdev);
int irq = platform_get_irq(pdev, 0);
if (!pdata || pdata->nr_pins > 32) {
@@ -319,14 +320,13 @@ err_mem:
static int __devexit timbgpio_remove(struct platform_device *pdev)
{
int err;
- struct timbgpio_platform_data *pdata = pdev->dev.platform_data;
struct timbgpio *tgpio = platform_get_drvdata(pdev);
struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
int irq = platform_get_irq(pdev, 0);
if (irq >= 0 && tgpio->irq_base > 0) {
int i;
- for (i = 0; i < pdata->nr_pins; i++) {
+ for (i = 0; i < tgpio->gpio.ngpio; i++) {
set_irq_chip(tgpio->irq_base + i, NULL);
set_irq_chip_data(tgpio->irq_base + i, NULL);
}
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 0902d4460039..a6feb78c404c 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -73,32 +73,17 @@ source "drivers/gpu/drm/radeon/Kconfig"
config DRM_I810
tristate "Intel I810"
- # BKL usage in order to avoid AB-BA deadlocks, may become BROKEN_ON_SMP
- depends on DRM && AGP && AGP_INTEL && BKL
+ # !PREEMPT because of missing ioctl locking
+ depends on DRM && AGP && AGP_INTEL && (!PREEMPT || BROKEN)
help
Choose this option if you have an Intel I810 graphics card. If M is
selected, the module will be called i810. AGP support is required
for this driver to work.
-choice
- prompt "Intel 830M, 845G, 852GM, 855GM, 865G"
- depends on DRM && AGP && AGP_INTEL
- optional
-
-config DRM_I830
- tristate "i830 driver"
- # BKL usage in order to avoid AB-BA deadlocks, i830 may get removed
- depends on BKL
- help
- Choose this option if you have a system that has Intel 830M, 845G,
- 852GM, 855GM or 865G integrated graphics. If M is selected, the
- module will be called i830. AGP support is required for this driver
- to work. This driver is used by the older X releases X.org 6.7 and
- XFree86 4.3. If unsure, build this and i915 as modules and the X server
- will load the correct one.
-
config DRM_I915
- tristate "i915 driver"
+ tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
+ depends on DRM
+ depends on AGP
depends on AGP_INTEL
# we need shmfs for the swappable backing store, and in particular
# the shmem_readpage() which depends upon tmpfs
@@ -115,12 +100,20 @@ config DRM_I915
select ACPI_VIDEO if ACPI
select ACPI_BUTTON if ACPI
help
- Choose this option if you have a system that has Intel 830M, 845G,
- 852GM, 855GM 865G or 915G integrated graphics. If M is selected, the
- module will be called i915. AGP support is required for this driver
- to work. This driver is used by the Intel driver in X.org 6.8 and
- XFree86 4.4 and above. If unsure, build this and i830 as modules and
- the X server will load the correct one.
+ Choose this option if you have a system that has "Intel Graphics
+ Media Accelerator" or "HD Graphics" integrated graphics,
+ including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
+ G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
+ Core i5, Core i7 as well as Atom CPUs with integrated graphics.
+ If M is selected, the module will be called i915. AGP support
+ is required for this driver to work. This driver is used by
+ the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
+ replaces the older i830 module that supported a subset of the
+ hardware in older X.org releases.
+
+ Note that the older i810/i815 chipsets require the use of the
+ i810 driver instead, and the Atom z5xx series has an entirely
+ different implementation.
config DRM_I915_KMS
bool "Enable modesetting on intel by default"
@@ -132,8 +125,6 @@ config DRM_I915_KMS
the driver to bind to PCI devices, which precludes loading things
like intelfb.
-endchoice
-
config DRM_MGA
tristate "Matrox g200/g400"
depends on DRM && PCI
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 997c43d04909..89cf05a72d1c 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -12,7 +12,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_platform.o drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
drm_crtc.o drm_modes.o drm_edid.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
- drm_trace_points.o drm_global.o
+ drm_trace_points.o drm_global.o drm_usb.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
@@ -29,7 +29,6 @@ obj-$(CONFIG_DRM_R128) += r128/
obj-$(CONFIG_DRM_RADEON)+= radeon/
obj-$(CONFIG_DRM_MGA) += mga/
obj-$(CONFIG_DRM_I810) += i810/
-obj-$(CONFIG_DRM_I830) += i830/
obj-$(CONFIG_DRM_I915) += i915/
obj-$(CONFIG_DRM_SIS) += sis/
obj-$(CONFIG_DRM_SAVAGE)+= savage/
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 654faa803dcb..4c95b5fd9df3 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -2694,3 +2694,36 @@ void drm_mode_config_reset(struct drm_device *dev)
connector->funcs->reset(connector);
}
EXPORT_SYMBOL(drm_mode_config_reset);
+
+int drm_mode_create_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_create_dumb *args = data;
+
+ if (!dev->driver->dumb_create)
+ return -ENOSYS;
+ return dev->driver->dumb_create(file_priv, dev, args);
+}
+
+int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_map_dumb *args = data;
+
+ /* call driver ioctl to get mmap offset */
+ if (!dev->driver->dumb_map_offset)
+ return -ENOSYS;
+
+ return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset);
+}
+
+int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_destroy_dumb *args = data;
+
+ if (!dev->driver->dumb_destroy)
+ return -ENOSYS;
+
+ return dev->driver->dumb_destroy(file_priv, dev, args->handle);
+}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 271835a71570..0d04914eb058 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -150,7 +150,10 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED)
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED)
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
@@ -234,49 +237,6 @@ int drm_lastclose(struct drm_device * dev)
return 0;
}
-/**
- * Module initialization. Called via init_module at module load time, or via
- * linux/init/main.c (this is not currently supported).
- *
- * \return zero on success or a negative number on failure.
- *
- * Initializes an array of drm_device structures, and attempts to
- * initialize all available devices, using consecutive minors, registering the
- * stubs and initializing the device.
- *
- * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
- * after the initialization for driver customization.
- */
-int drm_init(struct drm_driver *driver)
-{
- DRM_DEBUG("\n");
- INIT_LIST_HEAD(&driver->device_list);
-
- if (driver->driver_features & DRIVER_USE_PLATFORM_DEVICE)
- return drm_platform_init(driver);
- else
- return drm_pci_init(driver);
-}
-
-EXPORT_SYMBOL(drm_init);
-
-void drm_exit(struct drm_driver *driver)
-{
- struct drm_device *dev, *tmp;
- DRM_DEBUG("\n");
-
- if (driver->driver_features & DRIVER_MODESET) {
- pci_unregister_driver(&driver->pci_driver);
- } else {
- list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
- drm_put_dev(dev);
- }
-
- DRM_INFO("Module unloaded\n");
-}
-
-EXPORT_SYMBOL(drm_exit);
-
/** File operations structure */
static const struct file_operations drm_stub_fops = {
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index a245d17165ae..af60d9be9632 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -449,12 +449,11 @@ static void edid_fixup_preferred(struct drm_connector *connector,
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh)
{
+ struct drm_display_mode *mode = NULL;
int i;
- struct drm_display_mode *ptr, *mode;
- mode = NULL;
for (i = 0; i < drm_num_dmt_modes; i++) {
- ptr = &drm_dmt_modes[i];
+ const struct drm_display_mode *ptr = &drm_dmt_modes[i];
if (hsize == ptr->hdisplay &&
vsize == ptr->vdisplay &&
fresh == drm_mode_vrefresh(ptr)) {
@@ -885,7 +884,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
}
static bool
-mode_is_rb(struct drm_display_mode *mode)
+mode_is_rb(const struct drm_display_mode *mode)
{
return (mode->htotal - mode->hdisplay == 160) &&
(mode->hsync_end - mode->hdisplay == 80) &&
@@ -894,7 +893,8 @@ mode_is_rb(struct drm_display_mode *mode)
}
static bool
-mode_in_hsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
+mode_in_hsync_range(const struct drm_display_mode *mode,
+ struct edid *edid, u8 *t)
{
int hsync, hmin, hmax;
@@ -910,7 +910,8 @@ mode_in_hsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
}
static bool
-mode_in_vsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
+mode_in_vsync_range(const struct drm_display_mode *mode,
+ struct edid *edid, u8 *t)
{
int vsync, vmin, vmax;
@@ -941,7 +942,7 @@ range_pixel_clock(struct edid *edid, u8 *t)
}
static bool
-mode_in_range(struct drm_display_mode *mode, struct edid *edid,
+mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
struct detailed_timing *timing)
{
u32 max_clock;
@@ -1472,7 +1473,7 @@ int drm_add_modes_noedid(struct drm_connector *connector,
int hdisplay, int vdisplay)
{
int i, count, num_modes = 0;
- struct drm_display_mode *mode, *ptr;
+ struct drm_display_mode *mode;
struct drm_device *dev = connector->dev;
count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
@@ -1482,7 +1483,7 @@ int drm_add_modes_noedid(struct drm_connector *connector,
vdisplay = 0;
for (i = 0; i < count; i++) {
- ptr = &drm_dmt_modes[i];
+ const struct drm_display_mode *ptr = &drm_dmt_modes[i];
if (hdisplay && vdisplay) {
/*
* Only when two are valid, they will be used to check
diff --git a/drivers/gpu/drm/drm_edid_modes.h b/drivers/gpu/drm/drm_edid_modes.h
index 6eb7592e152f..5f2064489fd5 100644
--- a/drivers/gpu/drm/drm_edid_modes.h
+++ b/drivers/gpu/drm/drm_edid_modes.h
@@ -32,7 +32,7 @@
* This table is copied from xfree86/modes/xf86EdidModes.c.
* But the mode with Reduced blank feature is deleted.
*/
-static struct drm_display_mode drm_dmt_modes[] = {
+static const struct drm_display_mode drm_dmt_modes[] = {
/* 640x350@85Hz */
{ DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
736, 832, 0, 350, 382, 385, 445, 0,
@@ -266,7 +266,7 @@ static struct drm_display_mode drm_dmt_modes[] = {
static const int drm_num_dmt_modes =
sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
-static struct drm_display_mode edid_est_modes[] = {
+static const struct drm_display_mode edid_est_modes[] = {
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
968, 1056, 0, 600, 601, 605, 628, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 6977a1ce9d98..d421f9d58d46 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -627,6 +627,11 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
value = (red << info->var.red.offset) |
(green << info->var.green.offset) |
(blue << info->var.blue.offset);
+ if (info->var.transp.length > 0) {
+ u32 mask = (1 << info->var.transp.length) - 1;
+ mask <<= info->var.transp.offset;
+ value |= mask;
+ }
palette[regno] = value;
return 0;
}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index ea1c4b019ebf..57ce27c9a747 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -101,7 +101,7 @@ drm_gem_init(struct drm_device *dev)
dev->mm_private = mm;
- if (drm_ht_create(&mm->offset_hash, 19)) {
+ if (drm_ht_create(&mm->offset_hash, 12)) {
kfree(mm);
return -ENOMEM;
}
@@ -181,7 +181,7 @@ EXPORT_SYMBOL(drm_gem_object_alloc);
/**
* Removes the mapping from handle to filp for this object.
*/
-static int
+int
drm_gem_handle_delete(struct drm_file *filp, u32 handle)
{
struct drm_device *dev;
@@ -214,6 +214,7 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
return 0;
}
+EXPORT_SYMBOL(drm_gem_handle_delete);
/**
* Create a handle for this object. This adds a handle reference
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index a93d7b4ddaa6..e3a75688f3cd 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -39,27 +39,18 @@
int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
{
- unsigned int i;
+ unsigned int size = 1 << order;
- ht->size = 1 << order;
ht->order = order;
- ht->fill = 0;
ht->table = NULL;
- ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE);
- if (!ht->use_vmalloc) {
- ht->table = kcalloc(ht->size, sizeof(*ht->table), GFP_KERNEL);
- }
- if (!ht->table) {
- ht->use_vmalloc = 1;
- ht->table = vmalloc(ht->size*sizeof(*ht->table));
- }
+ if (size <= PAGE_SIZE / sizeof(*ht->table))
+ ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL);
+ else
+ ht->table = vzalloc(size*sizeof(*ht->table));
if (!ht->table) {
DRM_ERROR("Out of memory for hash table\n");
return -ENOMEM;
}
- for (i=0; i< ht->size; ++i) {
- INIT_HLIST_HEAD(&ht->table[i]);
- }
return 0;
}
EXPORT_SYMBOL(drm_ht_create);
@@ -180,7 +171,6 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
list = drm_ht_find_key(ht, key);
if (list) {
hlist_del_init(list);
- ht->fill--;
return 0;
}
return -EINVAL;
@@ -189,7 +179,6 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
hlist_del_init(&item->head);
- ht->fill--;
return 0;
}
EXPORT_SYMBOL(drm_ht_remove_item);
@@ -197,10 +186,10 @@ EXPORT_SYMBOL(drm_ht_remove_item);
void drm_ht_remove(struct drm_open_hash *ht)
{
if (ht->table) {
- if (ht->use_vmalloc)
- vfree(ht->table);
- else
+ if ((PAGE_SIZE / sizeof(*ht->table)) >> ht->order)
kfree(ht->table);
+ else
+ vfree(ht->table);
ht->table = NULL;
}
}
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index be9a9c07d152..ab1162da70f8 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -47,30 +47,19 @@ int drm_name_info(struct seq_file *m, void *data)
struct drm_minor *minor = node->minor;
struct drm_device *dev = minor->dev;
struct drm_master *master = minor->master;
-
+ const char *bus_name;
if (!master)
return 0;
- if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE)) {
- if (master->unique) {
- seq_printf(m, "%s %s %s\n",
- dev->driver->platform_device->name,
- dev_name(dev->dev), master->unique);
- } else {
- seq_printf(m, "%s\n",
- dev->driver->platform_device->name);
- }
+ bus_name = dev->driver->bus->get_name(dev);
+ if (master->unique) {
+ seq_printf(m, "%s %s %s\n",
+ bus_name,
+ dev_name(dev->dev), master->unique);
} else {
- if (master->unique) {
- seq_printf(m, "%s %s %s\n",
- dev->driver->pci_driver.name,
- dev_name(dev->dev), master->unique);
- } else {
- seq_printf(m, "%s %s\n", dev->driver->pci_driver.name,
- dev_name(dev->dev));
- }
+ seq_printf(m, "%s %s\n",
+ bus_name, dev_name(dev->dev));
}
-
return 0;
}
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 47db4df37a69..117490590f56 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -96,7 +96,7 @@ int drm_setunique(struct drm_device *dev, void *data,
{
struct drm_unique *u = data;
struct drm_master *master = file_priv->master;
- int domain, bus, slot, func, ret;
+ int ret;
if (master->unique_len || master->unique)
return -EBUSY;
@@ -104,50 +104,12 @@ int drm_setunique(struct drm_device *dev, void *data,
if (!u->unique_len || u->unique_len > 1024)
return -EINVAL;
- master->unique_len = u->unique_len;
- master->unique_size = u->unique_len + 1;
- master->unique = kmalloc(master->unique_size, GFP_KERNEL);
- if (!master->unique) {
- ret = -ENOMEM;
- goto err;
- }
-
- if (copy_from_user(master->unique, u->unique, master->unique_len)) {
- ret = -EFAULT;
- goto err;
- }
-
- master->unique[master->unique_len] = '\0';
-
- dev->devname = kmalloc(strlen(dev->driver->pci_driver.name) +
- strlen(master->unique) + 2, GFP_KERNEL);
- if (!dev->devname) {
- ret = -ENOMEM;
- goto err;
- }
-
- sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
- master->unique);
-
- /* Return error if the busid submitted doesn't match the device's actual
- * busid.
- */
- ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
- if (ret != 3) {
- ret = -EINVAL;
- goto err;
- }
-
- domain = bus >> 8;
- bus &= 0xff;
+ if (!dev->driver->bus->set_unique)
+ return -EINVAL;
- if ((domain != drm_get_pci_domain(dev)) ||
- (bus != dev->pdev->bus->number) ||
- (slot != PCI_SLOT(dev->pdev->devfn)) ||
- (func != PCI_FUNC(dev->pdev->devfn))) {
- ret = -EINVAL;
+ ret = dev->driver->bus->set_unique(dev, master, u);
+ if (ret)
goto err;
- }
return 0;
@@ -159,74 +121,15 @@ err:
static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_master *master = file_priv->master;
- int len, ret;
+ int ret;
if (master->unique != NULL)
drm_unset_busid(dev, master);
- if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE)) {
- master->unique_len = 10 + strlen(dev->platformdev->name);
- master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
-
- if (master->unique == NULL)
- return -ENOMEM;
-
- len = snprintf(master->unique, master->unique_len,
- "platform:%s", dev->platformdev->name);
-
- if (len > master->unique_len) {
- DRM_ERROR("Unique buffer overflowed\n");
- ret = -EINVAL;
- goto err;
- }
-
- dev->devname =
- kmalloc(strlen(dev->platformdev->name) +
- master->unique_len + 2, GFP_KERNEL);
-
- if (dev->devname == NULL) {
- ret = -ENOMEM;
- goto err;
- }
-
- sprintf(dev->devname, "%s@%s", dev->platformdev->name,
- master->unique);
-
- } else {
- master->unique_len = 40;
- master->unique_size = master->unique_len;
- master->unique = kmalloc(master->unique_size, GFP_KERNEL);
- if (master->unique == NULL)
- return -ENOMEM;
-
- len = snprintf(master->unique, master->unique_len,
- "pci:%04x:%02x:%02x.%d",
- drm_get_pci_domain(dev),
- dev->pdev->bus->number,
- PCI_SLOT(dev->pdev->devfn),
- PCI_FUNC(dev->pdev->devfn));
- if (len >= master->unique_len) {
- DRM_ERROR("buffer overflow");
- ret = -EINVAL;
- goto err;
- } else
- master->unique_len = len;
-
- dev->devname =
- kmalloc(strlen(dev->driver->pci_driver.name) +
- master->unique_len + 2, GFP_KERNEL);
-
- if (dev->devname == NULL) {
- ret = -ENOMEM;
- goto err;
- }
-
- sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
- master->unique);
- }
-
+ ret = dev->driver->bus->set_busid(dev, master);
+ if (ret)
+ goto err;
return 0;
-
err:
drm_unset_busid(dev, master);
return ret;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 3dadfa2a8528..a34ef97d3c81 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -74,23 +74,13 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
{
struct drm_irq_busid *p = data;
- if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE))
+ if (!dev->driver->bus->irq_by_busid)
return -EINVAL;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
- if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
- (p->busnum & 0xff) != dev->pdev->bus->number ||
- p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
- return -EINVAL;
-
- p->irq = dev->pdev->irq;
-
- DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
- p->irq);
-
- return 0;
+ return dev->driver->bus->irq_by_busid(dev, p);
}
/*
@@ -164,8 +154,10 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
* available. In that case we can't account for this and just
* hope for the best.
*/
- if ((vblrc > 0) && (abs(diff_ns) > 1000000))
+ if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
atomic_inc(&dev->_vblank_count[crtc]);
+ smp_mb__after_atomic_inc();
+ }
/* Invalidate all timestamps while vblank irq's are off. */
clear_vblank_timestamps(dev, crtc);
@@ -491,6 +483,12 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc)
/* Dot clock in Hz: */
dotclock = (u64) crtc->hwmode.clock * 1000;
+ /* Fields of interlaced scanout modes are only halve a frame duration.
+ * Double the dotclock to get halve the frame-/line-/pixelduration.
+ */
+ if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE)
+ dotclock *= 2;
+
/* Valid dotclock? */
if (dotclock > 0) {
/* Convert scanline length in pixels and video dot clock to
@@ -603,14 +601,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
return -EAGAIN;
}
- /* Don't know yet how to handle interlaced or
- * double scan modes. Just no-op for now.
- */
- if (mode->flags & (DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN)) {
- DRM_DEBUG("crtc %d: Noop due to unsupported mode.\n", crtc);
- return -ENOTSUPP;
- }
-
/* Get current scanout position with system timestamp.
* Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
* if single query takes longer than max_error nanoseconds.
@@ -858,10 +848,11 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
if (rc) {
tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
vblanktimestamp(dev, crtc, tslot) = t_vblank;
- smp_wmb();
}
+ smp_mb__before_atomic_inc();
atomic_add(diff, &dev->_vblank_count[crtc]);
+ smp_mb__after_atomic_inc();
}
/**
@@ -1011,7 +1002,8 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_modeset_ctl *modeset = data;
- int crtc, ret = 0;
+ int ret = 0;
+ unsigned int crtc;
/* If drm_vblank_init() hasn't been called yet, just no-op */
if (!dev->num_crtcs)
@@ -1293,15 +1285,16 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
* e.g., due to spurious vblank interrupts. We need to
* ignore those for accounting.
*/
- if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
+ if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
/* Store new timestamp in ringbuffer. */
vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
- smp_wmb();
/* Increment cooked vblank count. This also atomically commits
* the timestamp computed above.
*/
+ smp_mb__before_atomic_inc();
atomic_inc(&dev->_vblank_count[crtc]);
+ smp_mb__after_atomic_inc();
} else {
DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
crtc, (int) diff_ns);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index c59515ba7e69..add1737dae0d 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -64,8 +64,8 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
else {
child =
list_entry(mm->unused_nodes.next,
- struct drm_mm_node, free_stack);
- list_del(&child->free_stack);
+ struct drm_mm_node, node_list);
+ list_del(&child->node_list);
--mm->num_unused;
}
spin_unlock(&mm->unused_lock);
@@ -94,195 +94,242 @@ int drm_mm_pre_get(struct drm_mm *mm)
return ret;
}
++mm->num_unused;
- list_add_tail(&node->free_stack, &mm->unused_nodes);
+ list_add_tail(&node->node_list, &mm->unused_nodes);
}
spin_unlock(&mm->unused_lock);
return 0;
}
EXPORT_SYMBOL(drm_mm_pre_get);
-static int drm_mm_create_tail_node(struct drm_mm *mm,
- unsigned long start,
- unsigned long size, int atomic)
+static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
{
- struct drm_mm_node *child;
-
- child = drm_mm_kmalloc(mm, atomic);
- if (unlikely(child == NULL))
- return -ENOMEM;
-
- child->free = 1;
- child->size = size;
- child->start = start;
- child->mm = mm;
+ return hole_node->start + hole_node->size;
+}
- list_add_tail(&child->node_list, &mm->node_list);
- list_add_tail(&child->free_stack, &mm->free_stack);
+static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
+{
+ struct drm_mm_node *next_node =
+ list_entry(hole_node->node_list.next, struct drm_mm_node,
+ node_list);
- return 0;
+ return next_node->start;
}
-static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
- unsigned long size,
- int atomic)
+static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
+ struct drm_mm_node *node,
+ unsigned long size, unsigned alignment)
{
- struct drm_mm_node *child;
+ struct drm_mm *mm = hole_node->mm;
+ unsigned long tmp = 0, wasted = 0;
+ unsigned long hole_start = drm_mm_hole_node_start(hole_node);
+ unsigned long hole_end = drm_mm_hole_node_end(hole_node);
- child = drm_mm_kmalloc(parent->mm, atomic);
- if (unlikely(child == NULL))
- return NULL;
+ BUG_ON(!hole_node->hole_follows || node->allocated);
- INIT_LIST_HEAD(&child->free_stack);
+ if (alignment)
+ tmp = hole_start % alignment;
- child->size = size;
- child->start = parent->start;
- child->mm = parent->mm;
+ if (!tmp) {
+ hole_node->hole_follows = 0;
+ list_del_init(&hole_node->hole_stack);
+ } else
+ wasted = alignment - tmp;
- list_add_tail(&child->node_list, &parent->node_list);
- INIT_LIST_HEAD(&child->free_stack);
+ node->start = hole_start + wasted;
+ node->size = size;
+ node->mm = mm;
+ node->allocated = 1;
- parent->size -= size;
- parent->start += size;
- return child;
-}
+ INIT_LIST_HEAD(&node->hole_stack);
+ list_add(&node->node_list, &hole_node->node_list);
+
+ BUG_ON(node->start + node->size > hole_end);
+ if (node->start + node->size < hole_end) {
+ list_add(&node->hole_stack, &mm->hole_stack);
+ node->hole_follows = 1;
+ } else {
+ node->hole_follows = 0;
+ }
+}
-struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
+struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
unsigned long size,
unsigned alignment,
int atomic)
{
+ struct drm_mm_node *node;
- struct drm_mm_node *align_splitoff = NULL;
- unsigned tmp = 0;
+ node = drm_mm_kmalloc(hole_node->mm, atomic);
+ if (unlikely(node == NULL))
+ return NULL;
- if (alignment)
- tmp = node->start % alignment;
+ drm_mm_insert_helper(hole_node, node, size, alignment);
- if (tmp) {
- align_splitoff =
- drm_mm_split_at_start(node, alignment - tmp, atomic);
- if (unlikely(align_splitoff == NULL))
- return NULL;
- }
+ return node;
+}
+EXPORT_SYMBOL(drm_mm_get_block_generic);
- if (node->size == size) {
- list_del_init(&node->free_stack);
- node->free = 0;
- } else {
- node = drm_mm_split_at_start(node, size, atomic);
- }
+/**
+ * Search for free space and insert a preallocated memory node. Returns
+ * -ENOSPC if no suitable free area is available. The preallocated memory node
+ * must be cleared.
+ */
+int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment)
+{
+ struct drm_mm_node *hole_node;
- if (align_splitoff)
- drm_mm_put_block(align_splitoff);
+ hole_node = drm_mm_search_free(mm, size, alignment, 0);
+ if (!hole_node)
+ return -ENOSPC;
- return node;
+ drm_mm_insert_helper(hole_node, node, size, alignment);
+
+ return 0;
}
-EXPORT_SYMBOL(drm_mm_get_block_generic);
+EXPORT_SYMBOL(drm_mm_insert_node);
-struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
- unsigned long size,
- unsigned alignment,
- unsigned long start,
- unsigned long end,
- int atomic)
+static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
+ struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long start, unsigned long end)
{
- struct drm_mm_node *align_splitoff = NULL;
- unsigned tmp = 0;
- unsigned wasted = 0;
+ struct drm_mm *mm = hole_node->mm;
+ unsigned long tmp = 0, wasted = 0;
+ unsigned long hole_start = drm_mm_hole_node_start(hole_node);
+ unsigned long hole_end = drm_mm_hole_node_end(hole_node);
+
+ BUG_ON(!hole_node->hole_follows || node->allocated);
- if (node->start < start)
- wasted += start - node->start;
+ if (hole_start < start)
+ wasted += start - hole_start;
if (alignment)
- tmp = ((node->start + wasted) % alignment);
+ tmp = (hole_start + wasted) % alignment;
if (tmp)
wasted += alignment - tmp;
- if (wasted) {
- align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
- if (unlikely(align_splitoff == NULL))
- return NULL;
+
+ if (!wasted) {
+ hole_node->hole_follows = 0;
+ list_del_init(&hole_node->hole_stack);
}
- if (node->size == size) {
- list_del_init(&node->free_stack);
- node->free = 0;
+ node->start = hole_start + wasted;
+ node->size = size;
+ node->mm = mm;
+ node->allocated = 1;
+
+ INIT_LIST_HEAD(&node->hole_stack);
+ list_add(&node->node_list, &hole_node->node_list);
+
+ BUG_ON(node->start + node->size > hole_end);
+ BUG_ON(node->start + node->size > end);
+
+ if (node->start + node->size < hole_end) {
+ list_add(&node->hole_stack, &mm->hole_stack);
+ node->hole_follows = 1;
} else {
- node = drm_mm_split_at_start(node, size, atomic);
+ node->hole_follows = 0;
}
+}
- if (align_splitoff)
- drm_mm_put_block(align_splitoff);
+struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end,
+ int atomic)
+{
+ struct drm_mm_node *node;
+
+ node = drm_mm_kmalloc(hole_node->mm, atomic);
+ if (unlikely(node == NULL))
+ return NULL;
+
+ drm_mm_insert_helper_range(hole_node, node, size, alignment,
+ start, end);
return node;
}
EXPORT_SYMBOL(drm_mm_get_block_range_generic);
-/*
- * Put a block. Merge with the previous and / or next block if they are free.
- * Otherwise add to the free stack.
+/**
+ * Search for free space and insert a preallocated memory node. Returns
+ * -ENOSPC if no suitable free area is available. This is for range
+ * restricted allocations. The preallocated memory node must be cleared.
*/
-
-void drm_mm_put_block(struct drm_mm_node *cur)
+int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long start, unsigned long end)
{
+ struct drm_mm_node *hole_node;
- struct drm_mm *mm = cur->mm;
- struct list_head *cur_head = &cur->node_list;
- struct list_head *root_head = &mm->node_list;
- struct drm_mm_node *prev_node = NULL;
- struct drm_mm_node *next_node;
+ hole_node = drm_mm_search_free_in_range(mm, size, alignment,
+ start, end, 0);
+ if (!hole_node)
+ return -ENOSPC;
- int merged = 0;
+ drm_mm_insert_helper_range(hole_node, node, size, alignment,
+ start, end);
- BUG_ON(cur->scanned_block || cur->scanned_prev_free
- || cur->scanned_next_free);
+ return 0;
+}
+EXPORT_SYMBOL(drm_mm_insert_node_in_range);
- if (cur_head->prev != root_head) {
- prev_node =
- list_entry(cur_head->prev, struct drm_mm_node, node_list);
- if (prev_node->free) {
- prev_node->size += cur->size;
- merged = 1;
- }
- }
- if (cur_head->next != root_head) {
- next_node =
- list_entry(cur_head->next, struct drm_mm_node, node_list);
- if (next_node->free) {
- if (merged) {
- prev_node->size += next_node->size;
- list_del(&next_node->node_list);
- list_del(&next_node->free_stack);
- spin_lock(&mm->unused_lock);
- if (mm->num_unused < MM_UNUSED_TARGET) {
- list_add(&next_node->free_stack,
- &mm->unused_nodes);
- ++mm->num_unused;
- } else
- kfree(next_node);
- spin_unlock(&mm->unused_lock);
- } else {
- next_node->size += cur->size;
- next_node->start = cur->start;
- merged = 1;
- }
- }
- }
- if (!merged) {
- cur->free = 1;
- list_add(&cur->free_stack, &mm->free_stack);
- } else {
- list_del(&cur->node_list);
- spin_lock(&mm->unused_lock);
- if (mm->num_unused < MM_UNUSED_TARGET) {
- list_add(&cur->free_stack, &mm->unused_nodes);
- ++mm->num_unused;
- } else
- kfree(cur);
- spin_unlock(&mm->unused_lock);
- }
+/**
+ * Remove a memory node from the allocator.
+ */
+void drm_mm_remove_node(struct drm_mm_node *node)
+{
+ struct drm_mm *mm = node->mm;
+ struct drm_mm_node *prev_node;
+
+ BUG_ON(node->scanned_block || node->scanned_prev_free
+ || node->scanned_next_free);
+
+ prev_node =
+ list_entry(node->node_list.prev, struct drm_mm_node, node_list);
+
+ if (node->hole_follows) {
+ BUG_ON(drm_mm_hole_node_start(node)
+ == drm_mm_hole_node_end(node));
+ list_del(&node->hole_stack);
+ } else
+ BUG_ON(drm_mm_hole_node_start(node)
+ != drm_mm_hole_node_end(node));
+
+ if (!prev_node->hole_follows) {
+ prev_node->hole_follows = 1;
+ list_add(&prev_node->hole_stack, &mm->hole_stack);
+ } else
+ list_move(&prev_node->hole_stack, &mm->hole_stack);
+
+ list_del(&node->node_list);
+ node->allocated = 0;
}
+EXPORT_SYMBOL(drm_mm_remove_node);
+
+/*
+ * Remove a memory node from the allocator and free the allocated struct
+ * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
+ * drm_mm_get_block functions.
+ */
+void drm_mm_put_block(struct drm_mm_node *node)
+{
+ struct drm_mm *mm = node->mm;
+
+ drm_mm_remove_node(node);
+
+ spin_lock(&mm->unused_lock);
+ if (mm->num_unused < MM_UNUSED_TARGET) {
+ list_add(&node->node_list, &mm->unused_nodes);
+ ++mm->num_unused;
+ } else
+ kfree(node);
+ spin_unlock(&mm->unused_lock);
+}
EXPORT_SYMBOL(drm_mm_put_block);
static int check_free_hole(unsigned long start, unsigned long end,
@@ -319,8 +366,10 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
best = NULL;
best_size = ~0UL;
- list_for_each_entry(entry, &mm->free_stack, free_stack) {
- if (!check_free_hole(entry->start, entry->start + entry->size,
+ list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
+ BUG_ON(!entry->hole_follows);
+ if (!check_free_hole(drm_mm_hole_node_start(entry),
+ drm_mm_hole_node_end(entry),
size, alignment))
continue;
@@ -353,12 +402,13 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
best = NULL;
best_size = ~0UL;
- list_for_each_entry(entry, &mm->free_stack, free_stack) {
- unsigned long adj_start = entry->start < start ?
- start : entry->start;
- unsigned long adj_end = entry->start + entry->size > end ?
- end : entry->start + entry->size;
+ list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
+ unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
+ start : drm_mm_hole_node_start(entry);
+ unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
+ end : drm_mm_hole_node_end(entry);
+ BUG_ON(!entry->hole_follows);
if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
@@ -376,6 +426,23 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
EXPORT_SYMBOL(drm_mm_search_free_in_range);
/**
+ * Moves an allocation. To be used with embedded struct drm_mm_node.
+ */
+void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
+{
+ list_replace(&old->node_list, &new->node_list);
+ list_replace(&old->node_list, &new->hole_stack);
+ new->hole_follows = old->hole_follows;
+ new->mm = old->mm;
+ new->start = old->start;
+ new->size = old->size;
+
+ old->allocated = 0;
+ new->allocated = 1;
+}
+EXPORT_SYMBOL(drm_mm_replace_node);
+
+/**
* Initializa lru scanning.
*
* This simply sets up the scanning routines with the parameters for the desired
@@ -393,6 +460,7 @@ void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
mm->scan_hit_start = 0;
mm->scan_hit_size = 0;
mm->scan_check_range = 0;
+ mm->prev_scanned_node = NULL;
}
EXPORT_SYMBOL(drm_mm_init_scan);
@@ -418,6 +486,7 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
mm->scan_start = start;
mm->scan_end = end;
mm->scan_check_range = 1;
+ mm->prev_scanned_node = NULL;
}
EXPORT_SYMBOL(drm_mm_init_scan_with_range);
@@ -430,70 +499,42 @@ EXPORT_SYMBOL(drm_mm_init_scan_with_range);
int drm_mm_scan_add_block(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
- struct list_head *prev_free, *next_free;
- struct drm_mm_node *prev_node, *next_node;
+ struct drm_mm_node *prev_node;
+ unsigned long hole_start, hole_end;
unsigned long adj_start;
unsigned long adj_end;
mm->scanned_blocks++;
- prev_free = next_free = NULL;
-
- BUG_ON(node->free);
+ BUG_ON(node->scanned_block);
node->scanned_block = 1;
- node->free = 1;
-
- if (node->node_list.prev != &mm->node_list) {
- prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
- node_list);
-
- if (prev_node->free) {
- list_del(&prev_node->node_list);
- node->start = prev_node->start;
- node->size += prev_node->size;
+ prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
+ node_list);
- prev_node->scanned_prev_free = 1;
-
- prev_free = &prev_node->free_stack;
- }
- }
-
- if (node->node_list.next != &mm->node_list) {
- next_node = list_entry(node->node_list.next, struct drm_mm_node,
- node_list);
-
- if (next_node->free) {
- list_del(&next_node->node_list);
-
- node->size += next_node->size;
-
- next_node->scanned_next_free = 1;
-
- next_free = &next_node->free_stack;
- }
- }
-
- /* The free_stack list is not used for allocated objects, so these two
- * pointers can be abused (as long as no allocations in this memory
- * manager happens). */
- node->free_stack.prev = prev_free;
- node->free_stack.next = next_free;
+ node->scanned_preceeds_hole = prev_node->hole_follows;
+ prev_node->hole_follows = 1;
+ list_del(&node->node_list);
+ node->node_list.prev = &prev_node->node_list;
+ node->node_list.next = &mm->prev_scanned_node->node_list;
+ mm->prev_scanned_node = node;
+ hole_start = drm_mm_hole_node_start(prev_node);
+ hole_end = drm_mm_hole_node_end(prev_node);
if (mm->scan_check_range) {
- adj_start = node->start < mm->scan_start ?
- mm->scan_start : node->start;
- adj_end = node->start + node->size > mm->scan_end ?
- mm->scan_end : node->start + node->size;
+ adj_start = hole_start < mm->scan_start ?
+ mm->scan_start : hole_start;
+ adj_end = hole_end > mm->scan_end ?
+ mm->scan_end : hole_end;
} else {
- adj_start = node->start;
- adj_end = node->start + node->size;
+ adj_start = hole_start;
+ adj_end = hole_end;
}
if (check_free_hole(adj_start , adj_end,
mm->scan_size, mm->scan_alignment)) {
- mm->scan_hit_start = node->start;
- mm->scan_hit_size = node->size;
+ mm->scan_hit_start = hole_start;
+ mm->scan_hit_size = hole_end;
return 1;
}
@@ -519,39 +560,19 @@ EXPORT_SYMBOL(drm_mm_scan_add_block);
int drm_mm_scan_remove_block(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
- struct drm_mm_node *prev_node, *next_node;
+ struct drm_mm_node *prev_node;
mm->scanned_blocks--;
BUG_ON(!node->scanned_block);
node->scanned_block = 0;
- node->free = 0;
-
- prev_node = list_entry(node->free_stack.prev, struct drm_mm_node,
- free_stack);
- next_node = list_entry(node->free_stack.next, struct drm_mm_node,
- free_stack);
- if (prev_node) {
- BUG_ON(!prev_node->scanned_prev_free);
- prev_node->scanned_prev_free = 0;
-
- list_add_tail(&prev_node->node_list, &node->node_list);
-
- node->start = prev_node->start + prev_node->size;
- node->size -= prev_node->size;
- }
+ prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
+ node_list);
- if (next_node) {
- BUG_ON(!next_node->scanned_next_free);
- next_node->scanned_next_free = 0;
-
- list_add(&next_node->node_list, &node->node_list);
-
- node->size -= next_node->size;
- }
-
- INIT_LIST_HEAD(&node->free_stack);
+ prev_node->hole_follows = node->scanned_preceeds_hole;
+ INIT_LIST_HEAD(&node->node_list);
+ list_add(&node->node_list, &prev_node->node_list);
/* Only need to check for containement because start&size for the
* complete resulting free block (not just the desired part) is
@@ -568,7 +589,7 @@ EXPORT_SYMBOL(drm_mm_scan_remove_block);
int drm_mm_clean(struct drm_mm * mm)
{
- struct list_head *head = &mm->node_list;
+ struct list_head *head = &mm->head_node.node_list;
return (head->next->next == head);
}
@@ -576,38 +597,40 @@ EXPORT_SYMBOL(drm_mm_clean);
int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
{
- INIT_LIST_HEAD(&mm->node_list);
- INIT_LIST_HEAD(&mm->free_stack);
+ INIT_LIST_HEAD(&mm->hole_stack);
INIT_LIST_HEAD(&mm->unused_nodes);
mm->num_unused = 0;
mm->scanned_blocks = 0;
spin_lock_init(&mm->unused_lock);
- return drm_mm_create_tail_node(mm, start, size, 0);
+ /* Clever trick to avoid a special case in the free hole tracking. */
+ INIT_LIST_HEAD(&mm->head_node.node_list);
+ INIT_LIST_HEAD(&mm->head_node.hole_stack);
+ mm->head_node.hole_follows = 1;
+ mm->head_node.scanned_block = 0;
+ mm->head_node.scanned_prev_free = 0;
+ mm->head_node.scanned_next_free = 0;
+ mm->head_node.mm = mm;
+ mm->head_node.start = start + size;
+ mm->head_node.size = start - mm->head_node.start;
+ list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
+
+ return 0;
}
EXPORT_SYMBOL(drm_mm_init);
void drm_mm_takedown(struct drm_mm * mm)
{
- struct list_head *bnode = mm->free_stack.next;
- struct drm_mm_node *entry;
- struct drm_mm_node *next;
+ struct drm_mm_node *entry, *next;
- entry = list_entry(bnode, struct drm_mm_node, free_stack);
-
- if (entry->node_list.next != &mm->node_list ||
- entry->free_stack.next != &mm->free_stack) {
+ if (!list_empty(&mm->head_node.node_list)) {
DRM_ERROR("Memory manager not clean. Delaying takedown\n");
return;
}
- list_del(&entry->free_stack);
- list_del(&entry->node_list);
- kfree(entry);
-
spin_lock(&mm->unused_lock);
- list_for_each_entry_safe(entry, next, &mm->unused_nodes, free_stack) {
- list_del(&entry->free_stack);
+ list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
+ list_del(&entry->node_list);
kfree(entry);
--mm->num_unused;
}
@@ -620,19 +643,37 @@ EXPORT_SYMBOL(drm_mm_takedown);
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
{
struct drm_mm_node *entry;
- int total_used = 0, total_free = 0, total = 0;
-
- list_for_each_entry(entry, &mm->node_list, node_list) {
- printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
+ unsigned long total_used = 0, total_free = 0, total = 0;
+ unsigned long hole_start, hole_end, hole_size;
+
+ hole_start = drm_mm_hole_node_start(&mm->head_node);
+ hole_end = drm_mm_hole_node_end(&mm->head_node);
+ hole_size = hole_end - hole_start;
+ if (hole_size)
+ printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
+ prefix, hole_start, hole_end,
+ hole_size);
+ total_free += hole_size;
+
+ drm_mm_for_each_node(entry, mm) {
+ printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
prefix, entry->start, entry->start + entry->size,
- entry->size, entry->free ? "free" : "used");
- total += entry->size;
- if (entry->free)
- total_free += entry->size;
- else
- total_used += entry->size;
+ entry->size);
+ total_used += entry->size;
+
+ if (entry->hole_follows) {
+ hole_start = drm_mm_hole_node_start(entry);
+ hole_end = drm_mm_hole_node_end(entry);
+ hole_size = hole_end - hole_start;
+ printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
+ prefix, hole_start, hole_end,
+ hole_size);
+ total_free += hole_size;
+ }
}
- printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
+ total = total_free + total_used;
+
+ printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
total_used, total_free);
}
EXPORT_SYMBOL(drm_mm_debug_table);
@@ -641,17 +682,34 @@ EXPORT_SYMBOL(drm_mm_debug_table);
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
{
struct drm_mm_node *entry;
- int total_used = 0, total_free = 0, total = 0;
-
- list_for_each_entry(entry, &mm->node_list, node_list) {
- seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used");
- total += entry->size;
- if (entry->free)
- total_free += entry->size;
- else
- total_used += entry->size;
+ unsigned long total_used = 0, total_free = 0, total = 0;
+ unsigned long hole_start, hole_end, hole_size;
+
+ hole_start = drm_mm_hole_node_start(&mm->head_node);
+ hole_end = drm_mm_hole_node_end(&mm->head_node);
+ hole_size = hole_end - hole_start;
+ if (hole_size)
+ seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
+ hole_start, hole_end, hole_size);
+ total_free += hole_size;
+
+ drm_mm_for_each_node(entry, mm) {
+ seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
+ entry->start, entry->start + entry->size,
+ entry->size);
+ total_used += entry->size;
+ if (entry->hole_follows) {
+ hole_start = drm_mm_hole_node_start(&mm->head_node);
+ hole_end = drm_mm_hole_node_end(&mm->head_node);
+ hole_size = hole_end - hole_start;
+ seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
+ hole_start, hole_end, hole_size);
+ total_free += hole_size;
+ }
}
- seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
+ total = total_free + total_used;
+
+ seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
return 0;
}
EXPORT_SYMBOL(drm_mm_dump_table);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 58e65f92c232..25bf87390f53 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -593,7 +593,7 @@ EXPORT_SYMBOL(drm_mode_height);
*
* Return @modes's hsync rate in kHz, rounded to the nearest int.
*/
-int drm_mode_hsync(struct drm_display_mode *mode)
+int drm_mode_hsync(const struct drm_display_mode *mode)
{
unsigned int calc_val;
@@ -627,7 +627,7 @@ EXPORT_SYMBOL(drm_mode_hsync);
* If it is 70.288, it will return 70Hz.
* If it is 59.6, it will return 60Hz.
*/
-int drm_mode_vrefresh(struct drm_display_mode *mode)
+int drm_mode_vrefresh(const struct drm_display_mode *mode)
{
int refresh = 0;
unsigned int calc_val;
@@ -725,7 +725,7 @@ EXPORT_SYMBOL(drm_mode_set_crtcinfo);
* a pointer to it. Used to create new instances of established modes.
*/
struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_display_mode *nmode;
int new_id;
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index f5bd9e590c80..e1aee4f6a7c6 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -125,6 +125,176 @@ void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
EXPORT_SYMBOL(drm_pci_free);
#ifdef CONFIG_PCI
+
+static int drm_get_pci_domain(struct drm_device *dev)
+{
+#ifndef __alpha__
+ /* For historical reasons, drm_get_pci_domain() is busticated
+ * on most archs and has to remain so for userspace interface
+ * < 1.4, except on alpha which was right from the beginning
+ */
+ if (dev->if_version < 0x10004)
+ return 0;
+#endif /* __alpha__ */
+
+ return pci_domain_nr(dev->pdev->bus);
+}
+
+static int drm_pci_get_irq(struct drm_device *dev)
+{
+ return dev->pdev->irq;
+}
+
+static const char *drm_pci_get_name(struct drm_device *dev)
+{
+ struct pci_driver *pdriver = dev->driver->kdriver.pci;
+ return pdriver->name;
+}
+
+int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
+{
+ int len, ret;
+ struct pci_driver *pdriver = dev->driver->kdriver.pci;
+ master->unique_len = 40;
+ master->unique_size = master->unique_len;
+ master->unique = kmalloc(master->unique_size, GFP_KERNEL);
+ if (master->unique == NULL)
+ return -ENOMEM;
+
+
+ len = snprintf(master->unique, master->unique_len,
+ "pci:%04x:%02x:%02x.%d",
+ drm_get_pci_domain(dev),
+ dev->pdev->bus->number,
+ PCI_SLOT(dev->pdev->devfn),
+ PCI_FUNC(dev->pdev->devfn));
+
+ if (len >= master->unique_len) {
+ DRM_ERROR("buffer overflow");
+ ret = -EINVAL;
+ goto err;
+ } else
+ master->unique_len = len;
+
+ dev->devname =
+ kmalloc(strlen(pdriver->name) +
+ master->unique_len + 2, GFP_KERNEL);
+
+ if (dev->devname == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ sprintf(dev->devname, "%s@%s", pdriver->name,
+ master->unique);
+
+ return 0;
+err:
+ return ret;
+}
+
+int drm_pci_set_unique(struct drm_device *dev,
+ struct drm_master *master,
+ struct drm_unique *u)
+{
+ int domain, bus, slot, func, ret;
+ const char *bus_name;
+
+ master->unique_len = u->unique_len;
+ master->unique_size = u->unique_len + 1;
+ master->unique = kmalloc(master->unique_size, GFP_KERNEL);
+ if (!master->unique) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (copy_from_user(master->unique, u->unique, master->unique_len)) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ master->unique[master->unique_len] = '\0';
+
+ bus_name = dev->driver->bus->get_name(dev);
+ dev->devname = kmalloc(strlen(bus_name) +
+ strlen(master->unique) + 2, GFP_KERNEL);
+ if (!dev->devname) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ sprintf(dev->devname, "%s@%s", bus_name,
+ master->unique);
+
+ /* Return error if the busid submitted doesn't match the device's actual
+ * busid.
+ */
+ ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
+ if (ret != 3) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ domain = bus >> 8;
+ bus &= 0xff;
+
+ if ((domain != drm_get_pci_domain(dev)) ||
+ (bus != dev->pdev->bus->number) ||
+ (slot != PCI_SLOT(dev->pdev->devfn)) ||
+ (func != PCI_FUNC(dev->pdev->devfn))) {
+ ret = -EINVAL;
+ goto err;
+ }
+ return 0;
+err:
+ return ret;
+}
+
+
+int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
+{
+ if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
+ (p->busnum & 0xff) != dev->pdev->bus->number ||
+ p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
+ return -EINVAL;
+
+ p->irq = dev->pdev->irq;
+
+ DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
+ p->irq);
+ return 0;
+}
+
+int drm_pci_agp_init(struct drm_device *dev)
+{
+ if (drm_core_has_AGP(dev)) {
+ if (drm_pci_device_is_agp(dev))
+ dev->agp = drm_agp_init(dev);
+ if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
+ && (dev->agp == NULL)) {
+ DRM_ERROR("Cannot initialize the agpgart module.\n");
+ return -EINVAL;
+ }
+ if (drm_core_has_MTRR(dev)) {
+ if (dev->agp)
+ dev->agp->agp_mtrr =
+ mtrr_add(dev->agp->agp_info.aper_base,
+ dev->agp->agp_info.aper_size *
+ 1024 * 1024, MTRR_TYPE_WRCOMB, 1);
+ }
+ }
+ return 0;
+}
+
+static struct drm_bus drm_pci_bus = {
+ .bus_type = DRIVER_BUS_PCI,
+ .get_irq = drm_pci_get_irq,
+ .get_name = drm_pci_get_name,
+ .set_busid = drm_pci_set_busid,
+ .set_unique = drm_pci_set_unique,
+ .agp_init = drm_pci_agp_init,
+};
+
/**
* Register.
*
@@ -219,7 +389,7 @@ err_g1:
EXPORT_SYMBOL(drm_get_pci_dev);
/**
- * PCI device initialization. Called via drm_init at module load time,
+ * PCI device initialization. Called direct from modules at load time.
*
* \return zero on success or a negative number on failure.
*
@@ -229,18 +399,24 @@ EXPORT_SYMBOL(drm_get_pci_dev);
* Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
* after the initialization for driver customization.
*/
-int drm_pci_init(struct drm_driver *driver)
+int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
{
struct pci_dev *pdev = NULL;
const struct pci_device_id *pid;
int i;
+ DRM_DEBUG("\n");
+
+ INIT_LIST_HEAD(&driver->device_list);
+ driver->kdriver.pci = pdriver;
+ driver->bus = &drm_pci_bus;
+
if (driver->driver_features & DRIVER_MODESET)
- return pci_register_driver(&driver->pci_driver);
+ return pci_register_driver(pdriver);
/* If not using KMS, fall back to stealth mode manual scanning. */
- for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
- pid = &driver->pci_driver.id_table[i];
+ for (i = 0; pdriver->id_table[i].vendor != 0; i++) {
+ pid = &pdriver->id_table[i];
/* Loop around setting up a DRM device for each PCI device
* matching our ID and device class. If we had the internal
@@ -265,10 +441,27 @@ int drm_pci_init(struct drm_driver *driver)
#else
-int drm_pci_init(struct drm_driver *driver)
+int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
{
return -1;
}
#endif
+
+EXPORT_SYMBOL(drm_pci_init);
+
/*@}*/
+void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
+{
+ struct drm_device *dev, *tmp;
+ DRM_DEBUG("\n");
+
+ if (driver->driver_features & DRIVER_MODESET) {
+ pci_unregister_driver(pdriver);
+ } else {
+ list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
+ drm_put_dev(dev);
+ }
+ DRM_INFO("Module unloaded\n");
+}
+EXPORT_SYMBOL(drm_pci_exit);
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index 92d1d0fb7b75..7223f06d8e58 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -109,8 +109,60 @@ err_g1:
}
EXPORT_SYMBOL(drm_get_platform_dev);
+static int drm_platform_get_irq(struct drm_device *dev)
+{
+ return platform_get_irq(dev->platformdev, 0);
+}
+
+static const char *drm_platform_get_name(struct drm_device *dev)
+{
+ return dev->platformdev->name;
+}
+
+static int drm_platform_set_busid(struct drm_device *dev, struct drm_master *master)
+{
+ int len, ret;
+
+ master->unique_len = 10 + strlen(dev->platformdev->name);
+ master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
+
+ if (master->unique == NULL)
+ return -ENOMEM;
+
+ len = snprintf(master->unique, master->unique_len,
+ "platform:%s", dev->platformdev->name);
+
+ if (len > master->unique_len) {
+ DRM_ERROR("Unique buffer overflowed\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ dev->devname =
+ kmalloc(strlen(dev->platformdev->name) +
+ master->unique_len + 2, GFP_KERNEL);
+
+ if (dev->devname == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ sprintf(dev->devname, "%s@%s", dev->platformdev->name,
+ master->unique);
+ return 0;
+err:
+ return ret;
+}
+
+static struct drm_bus drm_platform_bus = {
+ .bus_type = DRIVER_BUS_PLATFORM,
+ .get_irq = drm_platform_get_irq,
+ .get_name = drm_platform_get_name,
+ .set_busid = drm_platform_set_busid,
+};
+
/**
- * Platform device initialization. Called via drm_init at module load time,
+ * Platform device initialization. Called direct from modules.
*
* \return zero on success or a negative number on failure.
*
@@ -121,7 +173,24 @@ EXPORT_SYMBOL(drm_get_platform_dev);
* after the initialization for driver customization.
*/
-int drm_platform_init(struct drm_driver *driver)
+int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device)
{
- return drm_get_platform_dev(driver->platform_device, driver);
+ DRM_DEBUG("\n");
+
+ driver->kdriver.platform_device = platform_device;
+ driver->bus = &drm_platform_bus;
+ INIT_LIST_HEAD(&driver->device_list);
+ return drm_get_platform_dev(platform_device, driver);
+}
+EXPORT_SYMBOL(drm_platform_init);
+
+void drm_platform_exit(struct drm_driver *driver, struct platform_device *platform_device)
+{
+ struct drm_device *dev, *tmp;
+ DRM_DEBUG("\n");
+
+ list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
+ drm_put_dev(dev);
+ DRM_INFO("Module unloaded\n");
}
+EXPORT_SYMBOL(drm_platform_exit);
diff --git a/drivers/gpu/drm/drm_sman.c b/drivers/gpu/drm/drm_sman.c
index 463aed9403db..34664587a74e 100644
--- a/drivers/gpu/drm/drm_sman.c
+++ b/drivers/gpu/drm/drm_sman.c
@@ -59,9 +59,7 @@ drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
{
int ret = 0;
- sman->mm = (struct drm_sman_mm *) kcalloc(num_managers,
- sizeof(*sman->mm),
- GFP_KERNEL);
+ sman->mm = kcalloc(num_managers, sizeof(*sman->mm), GFP_KERNEL);
if (!sman->mm) {
ret = -ENOMEM;
goto out;
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index d59edc18301f..001273d57f2d 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -269,25 +269,14 @@ int drm_fill_in_dev(struct drm_device *dev,
dev->driver = driver;
- if (drm_core_has_AGP(dev)) {
- if (drm_device_is_agp(dev))
- dev->agp = drm_agp_init(dev);
- if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
- && (dev->agp == NULL)) {
- DRM_ERROR("Cannot initialize the agpgart module.\n");
- retcode = -EINVAL;
+ if (dev->driver->bus->agp_init) {
+ retcode = dev->driver->bus->agp_init(dev);
+ if (retcode)
goto error_out_unreg;
- }
- if (drm_core_has_MTRR(dev)) {
- if (dev->agp)
- dev->agp->agp_mtrr =
- mtrr_add(dev->agp->agp_info.aper_base,
- dev->agp->agp_info.aper_size *
- 1024 * 1024, MTRR_TYPE_WRCOMB, 1);
- }
}
+
retcode = drm_ctxbitmap_init(dev);
if (retcode) {
DRM_ERROR("Cannot allocate memory for context bitmap.\n");
@@ -425,7 +414,6 @@ int drm_put_minor(struct drm_minor **minor_p)
*
* Cleans up all DRM device, calling drm_lastclose().
*
- * \sa drm_init
*/
void drm_put_dev(struct drm_device *dev)
{
@@ -475,6 +463,7 @@ void drm_put_dev(struct drm_device *dev)
drm_put_minor(&dev->primary);
+ list_del(&dev->driver_item);
if (dev->devname) {
kfree(dev->devname);
dev->devname = NULL;
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
new file mode 100644
index 000000000000..206d2300d873
--- /dev/null
+++ b/drivers/gpu/drm/drm_usb.c
@@ -0,0 +1,117 @@
+#include "drmP.h"
+#include <linux/usb.h>
+
+#ifdef CONFIG_USB
+int drm_get_usb_dev(struct usb_interface *interface,
+ const struct usb_device_id *id,
+ struct drm_driver *driver)
+{
+ struct drm_device *dev;
+ struct usb_device *usbdev;
+ int ret;
+
+ DRM_DEBUG("\n");
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ usbdev = interface_to_usbdev(interface);
+ dev->usbdev = usbdev;
+ dev->dev = &usbdev->dev;
+
+ mutex_lock(&drm_global_mutex);
+
+ ret = drm_fill_in_dev(dev, NULL, driver);
+ if (ret) {
+ printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
+ goto err_g1;
+ }
+
+ usb_set_intfdata(interface, dev);
+ ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
+ if (ret)
+ goto err_g1;
+
+ ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
+ if (ret)
+ goto err_g2;
+
+ if (dev->driver->load) {
+ ret = dev->driver->load(dev, 0);
+ if (ret)
+ goto err_g3;
+ }
+
+ /* setup the grouping for the legacy output */
+ ret = drm_mode_group_init_legacy_group(dev,
+ &dev->primary->mode_group);
+ if (ret)
+ goto err_g3;
+
+ list_add_tail(&dev->driver_item, &driver->device_list);
+
+ mutex_unlock(&drm_global_mutex);
+
+ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+ driver->name, driver->major, driver->minor, driver->patchlevel,
+ driver->date, dev->primary->index);
+
+ return 0;
+
+err_g3:
+ drm_put_minor(&dev->primary);
+err_g2:
+ drm_put_minor(&dev->control);
+err_g1:
+ kfree(dev);
+ mutex_unlock(&drm_global_mutex);
+ return ret;
+
+}
+EXPORT_SYMBOL(drm_get_usb_dev);
+
+static int drm_usb_get_irq(struct drm_device *dev)
+{
+ return 0;
+}
+
+static const char *drm_usb_get_name(struct drm_device *dev)
+{
+ return "USB";
+}
+
+static int drm_usb_set_busid(struct drm_device *dev,
+ struct drm_master *master)
+{
+ return 0;
+}
+
+static struct drm_bus drm_usb_bus = {
+ .bus_type = DRIVER_BUS_USB,
+ .get_irq = drm_usb_get_irq,
+ .get_name = drm_usb_get_name,
+ .set_busid = drm_usb_set_busid,
+};
+
+int drm_usb_init(struct drm_driver *driver, struct usb_driver *udriver)
+{
+ int res;
+ DRM_DEBUG("\n");
+
+ INIT_LIST_HEAD(&driver->device_list);
+ driver->kdriver.usb = udriver;
+ driver->bus = &drm_usb_bus;
+
+ res = usb_register(udriver);
+ return res;
+}
+EXPORT_SYMBOL(drm_usb_init);
+
+void drm_usb_exit(struct drm_driver *driver,
+ struct usb_driver *udriver)
+{
+ usb_deregister(udriver);
+}
+EXPORT_SYMBOL(drm_usb_exit);
+#endif
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index ff33e53bbbf8..8f371e8d630f 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -37,7 +37,6 @@
#include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/pagemap.h>
#define I810_BUF_FREE 2
@@ -94,7 +93,6 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
struct drm_buf *buf;
drm_i810_buf_priv_t *buf_priv;
- lock_kernel();
dev = priv->minor->dev;
dev_priv = dev->dev_private;
buf = dev_priv->mmap_buffer;
@@ -104,7 +102,6 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
vma->vm_file = filp;
buf_priv->currently_mapped = I810_BUF_MAPPED;
- unlock_kernel();
if (io_remap_pfn_range(vma, vma->vm_start,
vma->vm_pgoff,
@@ -116,7 +113,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
static const struct file_operations i810_buffer_fops = {
.open = drm_open,
.release = drm_release,
- .unlocked_ioctl = i810_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = i810_mmap_buffers,
.fasync = drm_fasync,
.llseek = noop_llseek,
@@ -1242,19 +1239,6 @@ int i810_driver_dma_quiescent(struct drm_device *dev)
return 0;
}
-/*
- * call the drm_ioctl under the big kernel lock because
- * to lock against the i810_mmap_buffers function.
- */
-long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- int ret;
- lock_kernel();
- ret = drm_ioctl(file, cmd, arg);
- unlock_kernel();
- return ret;
-}
-
struct drm_ioctl_desc i810_ioctls[] = {
DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index 88bcd331e7c5..6f98d059f68a 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -57,18 +57,13 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .unlocked_ioctl = i810_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
-
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -77,15 +72,24 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver i810_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
static int __init i810_init(void)
{
+ if (num_possible_cpus() > 1) {
+ pr_err("drm/i810 does not support SMP\n");
+ return -EINVAL;
+ }
driver.num_ioctls = i810_max_ioctl;
- return drm_init(&driver);
+ return drm_pci_init(&driver, &i810_pci_driver);
}
static void __exit i810_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &i810_pci_driver);
}
module_init(i810_init);
diff --git a/drivers/gpu/drm/i830/Makefile b/drivers/gpu/drm/i830/Makefile
deleted file mode 100644
index c642ee0b238c..000000000000
--- a/drivers/gpu/drm/i830/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-#
-# Makefile for the drm device driver. This driver provides support for the
-# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-
-ccflags-y := -Iinclude/drm
-i830-y := i830_drv.o i830_dma.o i830_irq.o
-
-obj-$(CONFIG_DRM_I830) += i830.o
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c
deleted file mode 100644
index ca6f31ff0eec..000000000000
--- a/drivers/gpu/drm/i830/i830_dma.c
+++ /dev/null
@@ -1,1560 +0,0 @@
-/* i830_dma.c -- DMA support for the I830 -*- linux-c -*-
- * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
- * Jeff Hartmann <jhartmann@valinux.com>
- * Keith Whitwell <keith@tungstengraphics.com>
- * Abraham vd Merwe <abraham@2d3d.co.za>
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i830_drm.h"
-#include "i830_drv.h"
-#include <linux/interrupt.h> /* For task queue support */
-#include <linux/smp_lock.h>
-#include <linux/pagemap.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <asm/uaccess.h>
-
-#define I830_BUF_FREE 2
-#define I830_BUF_CLIENT 1
-#define I830_BUF_HARDWARE 0
-
-#define I830_BUF_UNMAPPED 0
-#define I830_BUF_MAPPED 1
-
-static struct drm_buf *i830_freelist_get(struct drm_device * dev)
-{
- struct drm_device_dma *dma = dev->dma;
- int i;
- int used;
-
- /* Linear search might not be the best solution */
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
- /* In use is already a pointer */
- used = cmpxchg(buf_priv->in_use, I830_BUF_FREE,
- I830_BUF_CLIENT);
- if (used == I830_BUF_FREE)
- return buf;
- }
- return NULL;
-}
-
-/* This should only be called if the buffer is not sent to the hardware
- * yet, the hardware updates in use for us once its on the ring buffer.
- */
-
-static int i830_freelist_put(struct drm_device *dev, struct drm_buf *buf)
-{
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
- int used;
-
- /* In use is already a pointer */
- used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, I830_BUF_FREE);
- if (used != I830_BUF_CLIENT) {
- DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev;
- drm_i830_private_t *dev_priv;
- struct drm_buf *buf;
- drm_i830_buf_priv_t *buf_priv;
-
- lock_kernel();
- dev = priv->minor->dev;
- dev_priv = dev->dev_private;
- buf = dev_priv->mmap_buffer;
- buf_priv = buf->dev_private;
-
- vma->vm_flags |= (VM_IO | VM_DONTCOPY);
- vma->vm_file = filp;
-
- buf_priv->currently_mapped = I830_BUF_MAPPED;
- unlock_kernel();
-
- if (io_remap_pfn_range(vma, vma->vm_start,
- vma->vm_pgoff,
- vma->vm_end - vma->vm_start, vma->vm_page_prot))
- return -EAGAIN;
- return 0;
-}
-
-static const struct file_operations i830_buffer_fops = {
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = i830_ioctl,
- .mmap = i830_mmap_buffers,
- .fasync = drm_fasync,
- .llseek = noop_llseek,
-};
-
-static int i830_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
-{
- struct drm_device *dev = file_priv->minor->dev;
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
- drm_i830_private_t *dev_priv = dev->dev_private;
- const struct file_operations *old_fops;
- unsigned long virtual;
- int retcode = 0;
-
- if (buf_priv->currently_mapped == I830_BUF_MAPPED)
- return -EINVAL;
-
- down_write(&current->mm->mmap_sem);
- old_fops = file_priv->filp->f_op;
- file_priv->filp->f_op = &i830_buffer_fops;
- dev_priv->mmap_buffer = buf;
- virtual = do_mmap(file_priv->filp, 0, buf->total, PROT_READ | PROT_WRITE,
- MAP_SHARED, buf->bus_address);
- dev_priv->mmap_buffer = NULL;
- file_priv->filp->f_op = old_fops;
- if (IS_ERR((void *)virtual)) { /* ugh */
- /* Real error */
- DRM_ERROR("mmap error\n");
- retcode = PTR_ERR((void *)virtual);
- buf_priv->virtual = NULL;
- } else {
- buf_priv->virtual = (void __user *)virtual;
- }
- up_write(&current->mm->mmap_sem);
-
- return retcode;
-}
-
-static int i830_unmap_buffer(struct drm_buf *buf)
-{
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
- int retcode = 0;
-
- if (buf_priv->currently_mapped != I830_BUF_MAPPED)
- return -EINVAL;
-
- down_write(&current->mm->mmap_sem);
- retcode = do_munmap(current->mm,
- (unsigned long)buf_priv->virtual,
- (size_t) buf->total);
- up_write(&current->mm->mmap_sem);
-
- buf_priv->currently_mapped = I830_BUF_UNMAPPED;
- buf_priv->virtual = NULL;
-
- return retcode;
-}
-
-static int i830_dma_get_buffer(struct drm_device *dev, drm_i830_dma_t *d,
- struct drm_file *file_priv)
-{
- struct drm_buf *buf;
- drm_i830_buf_priv_t *buf_priv;
- int retcode = 0;
-
- buf = i830_freelist_get(dev);
- if (!buf) {
- retcode = -ENOMEM;
- DRM_DEBUG("retcode=%d\n", retcode);
- return retcode;
- }
-
- retcode = i830_map_buffer(buf, file_priv);
- if (retcode) {
- i830_freelist_put(dev, buf);
- DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
- return retcode;
- }
- buf->file_priv = file_priv;
- buf_priv = buf->dev_private;
- d->granted = 1;
- d->request_idx = buf->idx;
- d->request_size = buf->total;
- d->virtual = buf_priv->virtual;
-
- return retcode;
-}
-
-static int i830_dma_cleanup(struct drm_device *dev)
-{
- struct drm_device_dma *dma = dev->dma;
-
- /* Make sure interrupts are disabled here because the uninstall ioctl
- * may not have been called from userspace and after dev_private
- * is freed, it's too late.
- */
- if (dev->irq_enabled)
- drm_irq_uninstall(dev);
-
- if (dev->dev_private) {
- int i;
- drm_i830_private_t *dev_priv =
- (drm_i830_private_t *) dev->dev_private;
-
- if (dev_priv->ring.virtual_start)
- drm_core_ioremapfree(&dev_priv->ring.map, dev);
- if (dev_priv->hw_status_page) {
- pci_free_consistent(dev->pdev, PAGE_SIZE,
- dev_priv->hw_status_page,
- dev_priv->dma_status_page);
- /* Need to rewrite hardware status page */
- I830_WRITE(0x02080, 0x1ffff000);
- }
-
- kfree(dev->dev_private);
- dev->dev_private = NULL;
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
- if (buf_priv->kernel_virtual && buf->total)
- drm_core_ioremapfree(&buf_priv->map, dev);
- }
- }
- return 0;
-}
-
-int i830_wait_ring(struct drm_device *dev, int n, const char *caller)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
- int iters = 0;
- unsigned long end;
- unsigned int last_head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
-
- end = jiffies + (HZ * 3);
- while (ring->space < n) {
- ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->Size;
-
- if (ring->head != last_head) {
- end = jiffies + (HZ * 3);
- last_head = ring->head;
- }
-
- iters++;
- if (time_before(end, jiffies)) {
- DRM_ERROR("space: %d wanted %d\n", ring->space, n);
- DRM_ERROR("lockup\n");
- goto out_wait_ring;
- }
- udelay(1);
- dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
- }
-
-out_wait_ring:
- return iters;
-}
-
-static void i830_kernel_lost_context(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
-
- ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
- ring->tail = I830_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->Size;
-
- if (ring->head == ring->tail)
- dev_priv->sarea_priv->perf_boxes |= I830_BOX_RING_EMPTY;
-}
-
-static int i830_freelist_init(struct drm_device *dev, drm_i830_private_t *dev_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- int my_idx = 36;
- u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx);
- int i;
-
- if (dma->buf_count > 1019) {
- /* Not enough space in the status page for the freelist */
- return -EINVAL;
- }
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
-
- buf_priv->in_use = hw_status++;
- buf_priv->my_use_idx = my_idx;
- my_idx += 4;
-
- *buf_priv->in_use = I830_BUF_FREE;
-
- buf_priv->map.offset = buf->bus_address;
- buf_priv->map.size = buf->total;
- buf_priv->map.type = _DRM_AGP;
- buf_priv->map.flags = 0;
- buf_priv->map.mtrr = 0;
-
- drm_core_ioremap(&buf_priv->map, dev);
- buf_priv->kernel_virtual = buf_priv->map.handle;
- }
- return 0;
-}
-
-static int i830_dma_initialize(struct drm_device *dev,
- drm_i830_private_t *dev_priv,
- drm_i830_init_t *init)
-{
- struct drm_map_list *r_list;
-
- memset(dev_priv, 0, sizeof(drm_i830_private_t));
-
- list_for_each_entry(r_list, &dev->maplist, head) {
- if (r_list->map &&
- r_list->map->type == _DRM_SHM &&
- r_list->map->flags & _DRM_CONTAINS_LOCK) {
- dev_priv->sarea_map = r_list->map;
- break;
- }
- }
-
- if (!dev_priv->sarea_map) {
- dev->dev_private = (void *)dev_priv;
- i830_dma_cleanup(dev);
- DRM_ERROR("can not find sarea!\n");
- return -EINVAL;
- }
- dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
- if (!dev_priv->mmio_map) {
- dev->dev_private = (void *)dev_priv;
- i830_dma_cleanup(dev);
- DRM_ERROR("can not find mmio map!\n");
- return -EINVAL;
- }
- dev->agp_buffer_token = init->buffers_offset;
- dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
- if (!dev->agp_buffer_map) {
- dev->dev_private = (void *)dev_priv;
- i830_dma_cleanup(dev);
- DRM_ERROR("can not find dma buffer map!\n");
- return -EINVAL;
- }
-
- dev_priv->sarea_priv = (drm_i830_sarea_t *)
- ((u8 *) dev_priv->sarea_map->handle + init->sarea_priv_offset);
-
- dev_priv->ring.Start = init->ring_start;
- dev_priv->ring.End = init->ring_end;
- dev_priv->ring.Size = init->ring_size;
-
- dev_priv->ring.map.offset = dev->agp->base + init->ring_start;
- dev_priv->ring.map.size = init->ring_size;
- dev_priv->ring.map.type = _DRM_AGP;
- dev_priv->ring.map.flags = 0;
- dev_priv->ring.map.mtrr = 0;
-
- drm_core_ioremap(&dev_priv->ring.map, dev);
-
- if (dev_priv->ring.map.handle == NULL) {
- dev->dev_private = (void *)dev_priv;
- i830_dma_cleanup(dev);
- DRM_ERROR("can not ioremap virtual address for"
- " ring buffer\n");
- return -ENOMEM;
- }
-
- dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
-
- dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
-
- dev_priv->w = init->w;
- dev_priv->h = init->h;
- dev_priv->pitch = init->pitch;
- dev_priv->back_offset = init->back_offset;
- dev_priv->depth_offset = init->depth_offset;
- dev_priv->front_offset = init->front_offset;
-
- dev_priv->front_di1 = init->front_offset | init->pitch_bits;
- dev_priv->back_di1 = init->back_offset | init->pitch_bits;
- dev_priv->zi1 = init->depth_offset | init->pitch_bits;
-
- DRM_DEBUG("front_di1 %x\n", dev_priv->front_di1);
- DRM_DEBUG("back_offset %x\n", dev_priv->back_offset);
- DRM_DEBUG("back_di1 %x\n", dev_priv->back_di1);
- DRM_DEBUG("pitch_bits %x\n", init->pitch_bits);
-
- dev_priv->cpp = init->cpp;
- /* We are using separate values as placeholders for mechanisms for
- * private backbuffer/depthbuffer usage.
- */
-
- dev_priv->back_pitch = init->back_pitch;
- dev_priv->depth_pitch = init->depth_pitch;
- dev_priv->do_boxes = 0;
- dev_priv->use_mi_batchbuffer_start = 0;
-
- /* Program Hardware Status Page */
- dev_priv->hw_status_page =
- pci_alloc_consistent(dev->pdev, PAGE_SIZE,
- &dev_priv->dma_status_page);
- if (!dev_priv->hw_status_page) {
- dev->dev_private = (void *)dev_priv;
- i830_dma_cleanup(dev);
- DRM_ERROR("Can not allocate hardware status page\n");
- return -ENOMEM;
- }
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
- DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
-
- I830_WRITE(0x02080, dev_priv->dma_status_page);
- DRM_DEBUG("Enabled hardware status page\n");
-
- /* Now we need to init our freelist */
- if (i830_freelist_init(dev, dev_priv) != 0) {
- dev->dev_private = (void *)dev_priv;
- i830_dma_cleanup(dev);
- DRM_ERROR("Not enough space in the status page for"
- " the freelist\n");
- return -ENOMEM;
- }
- dev->dev_private = (void *)dev_priv;
-
- return 0;
-}
-
-static int i830_dma_init(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv;
- drm_i830_init_t *init = data;
- int retcode = 0;
-
- switch (init->func) {
- case I830_INIT_DMA:
- dev_priv = kmalloc(sizeof(drm_i830_private_t), GFP_KERNEL);
- if (dev_priv == NULL)
- return -ENOMEM;
- retcode = i830_dma_initialize(dev, dev_priv, init);
- break;
- case I830_CLEANUP_DMA:
- retcode = i830_dma_cleanup(dev);
- break;
- default:
- retcode = -EINVAL;
- break;
- }
-
- return retcode;
-}
-
-#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
-#define ST1_ENABLE (1<<16)
-#define ST1_MASK (0xffff)
-
-/* Most efficient way to verify state for the i830 is as it is
- * emitted. Non-conformant state is silently dropped.
- */
-static void i830EmitContextVerified(struct drm_device *dev, unsigned int *code)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- int i, j = 0;
- unsigned int tmp;
- RING_LOCALS;
-
- BEGIN_LP_RING(I830_CTX_SETUP_SIZE + 4);
-
- for (i = 0; i < I830_CTXREG_BLENDCOLR0; i++) {
- tmp = code[i];
- if ((tmp & (7 << 29)) == CMD_3D &&
- (tmp & (0x1f << 24)) < (0x1d << 24)) {
- OUT_RING(tmp);
- j++;
- } else {
- DRM_ERROR("Skipping %d\n", i);
- }
- }
-
- OUT_RING(STATE3D_CONST_BLEND_COLOR_CMD);
- OUT_RING(code[I830_CTXREG_BLENDCOLR]);
- j += 2;
-
- for (i = I830_CTXREG_VF; i < I830_CTXREG_MCSB0; i++) {
- tmp = code[i];
- if ((tmp & (7 << 29)) == CMD_3D &&
- (tmp & (0x1f << 24)) < (0x1d << 24)) {
- OUT_RING(tmp);
- j++;
- } else {
- DRM_ERROR("Skipping %d\n", i);
- }
- }
-
- OUT_RING(STATE3D_MAP_COORD_SETBIND_CMD);
- OUT_RING(code[I830_CTXREG_MCSB1]);
- j += 2;
-
- if (j & 1)
- OUT_RING(0);
-
- ADVANCE_LP_RING();
-}
-
-static void i830EmitTexVerified(struct drm_device *dev, unsigned int *code)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- int i, j = 0;
- unsigned int tmp;
- RING_LOCALS;
-
- if (code[I830_TEXREG_MI0] == GFX_OP_MAP_INFO ||
- (code[I830_TEXREG_MI0] & ~(0xf * LOAD_TEXTURE_MAP0)) ==
- (STATE3D_LOAD_STATE_IMMEDIATE_2 | 4)) {
-
- BEGIN_LP_RING(I830_TEX_SETUP_SIZE);
-
- OUT_RING(code[I830_TEXREG_MI0]); /* TM0LI */
- OUT_RING(code[I830_TEXREG_MI1]); /* TM0S0 */
- OUT_RING(code[I830_TEXREG_MI2]); /* TM0S1 */
- OUT_RING(code[I830_TEXREG_MI3]); /* TM0S2 */
- OUT_RING(code[I830_TEXREG_MI4]); /* TM0S3 */
- OUT_RING(code[I830_TEXREG_MI5]); /* TM0S4 */
-
- for (i = 6; i < I830_TEX_SETUP_SIZE; i++) {
- tmp = code[i];
- OUT_RING(tmp);
- j++;
- }
-
- if (j & 1)
- OUT_RING(0);
-
- ADVANCE_LP_RING();
- } else
- printk("rejected packet %x\n", code[0]);
-}
-
-static void i830EmitTexBlendVerified(struct drm_device *dev,
- unsigned int *code, unsigned int num)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- int i, j = 0;
- unsigned int tmp;
- RING_LOCALS;
-
- if (!num)
- return;
-
- BEGIN_LP_RING(num + 1);
-
- for (i = 0; i < num; i++) {
- tmp = code[i];
- OUT_RING(tmp);
- j++;
- }
-
- if (j & 1)
- OUT_RING(0);
-
- ADVANCE_LP_RING();
-}
-
-static void i830EmitTexPalette(struct drm_device *dev,
- unsigned int *palette, int number, int is_shared)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- int i;
- RING_LOCALS;
-
- return;
-
- BEGIN_LP_RING(258);
-
- if (is_shared == 1) {
- OUT_RING(CMD_OP_MAP_PALETTE_LOAD |
- MAP_PALETTE_NUM(0) | MAP_PALETTE_BOTH);
- } else {
- OUT_RING(CMD_OP_MAP_PALETTE_LOAD | MAP_PALETTE_NUM(number));
- }
- for (i = 0; i < 256; i++)
- OUT_RING(palette[i]);
- OUT_RING(0);
- /* KW: WHERE IS THE ADVANCE_LP_RING? This is effectively a noop!
- */
-}
-
-/* Need to do some additional checking when setting the dest buffer.
- */
-static void i830EmitDestVerified(struct drm_device *dev, unsigned int *code)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- unsigned int tmp;
- RING_LOCALS;
-
- BEGIN_LP_RING(I830_DEST_SETUP_SIZE + 10);
-
- tmp = code[I830_DESTREG_CBUFADDR];
- if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
- if (((int)outring) & 8) {
- OUT_RING(0);
- OUT_RING(0);
- }
-
- OUT_RING(CMD_OP_DESTBUFFER_INFO);
- OUT_RING(BUF_3D_ID_COLOR_BACK |
- BUF_3D_PITCH(dev_priv->back_pitch * dev_priv->cpp) |
- BUF_3D_USE_FENCE);
- OUT_RING(tmp);
- OUT_RING(0);
-
- OUT_RING(CMD_OP_DESTBUFFER_INFO);
- OUT_RING(BUF_3D_ID_DEPTH | BUF_3D_USE_FENCE |
- BUF_3D_PITCH(dev_priv->depth_pitch * dev_priv->cpp));
- OUT_RING(dev_priv->zi1);
- OUT_RING(0);
- } else {
- DRM_ERROR("bad di1 %x (allow %x or %x)\n",
- tmp, dev_priv->front_di1, dev_priv->back_di1);
- }
-
- /* invarient:
- */
-
- OUT_RING(GFX_OP_DESTBUFFER_VARS);
- OUT_RING(code[I830_DESTREG_DV1]);
-
- OUT_RING(GFX_OP_DRAWRECT_INFO);
- OUT_RING(code[I830_DESTREG_DR1]);
- OUT_RING(code[I830_DESTREG_DR2]);
- OUT_RING(code[I830_DESTREG_DR3]);
- OUT_RING(code[I830_DESTREG_DR4]);
-
- /* Need to verify this */
- tmp = code[I830_DESTREG_SENABLE];
- if ((tmp & ~0x3) == GFX_OP_SCISSOR_ENABLE) {
- OUT_RING(tmp);
- } else {
- DRM_ERROR("bad scissor enable\n");
- OUT_RING(0);
- }
-
- OUT_RING(GFX_OP_SCISSOR_RECT);
- OUT_RING(code[I830_DESTREG_SR1]);
- OUT_RING(code[I830_DESTREG_SR2]);
- OUT_RING(0);
-
- ADVANCE_LP_RING();
-}
-
-static void i830EmitStippleVerified(struct drm_device *dev, unsigned int *code)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- RING_LOCALS;
-
- BEGIN_LP_RING(2);
- OUT_RING(GFX_OP_STIPPLE);
- OUT_RING(code[1]);
- ADVANCE_LP_RING();
-}
-
-static void i830EmitState(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
- unsigned int dirty = sarea_priv->dirty;
-
- DRM_DEBUG("%s %x\n", __func__, dirty);
-
- if (dirty & I830_UPLOAD_BUFFERS) {
- i830EmitDestVerified(dev, sarea_priv->BufferState);
- sarea_priv->dirty &= ~I830_UPLOAD_BUFFERS;
- }
-
- if (dirty & I830_UPLOAD_CTX) {
- i830EmitContextVerified(dev, sarea_priv->ContextState);
- sarea_priv->dirty &= ~I830_UPLOAD_CTX;
- }
-
- if (dirty & I830_UPLOAD_TEX0) {
- i830EmitTexVerified(dev, sarea_priv->TexState[0]);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX0;
- }
-
- if (dirty & I830_UPLOAD_TEX1) {
- i830EmitTexVerified(dev, sarea_priv->TexState[1]);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX1;
- }
-
- if (dirty & I830_UPLOAD_TEXBLEND0) {
- i830EmitTexBlendVerified(dev, sarea_priv->TexBlendState[0],
- sarea_priv->TexBlendStateWordsUsed[0]);
- sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND0;
- }
-
- if (dirty & I830_UPLOAD_TEXBLEND1) {
- i830EmitTexBlendVerified(dev, sarea_priv->TexBlendState[1],
- sarea_priv->TexBlendStateWordsUsed[1]);
- sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND1;
- }
-
- if (dirty & I830_UPLOAD_TEX_PALETTE_SHARED) {
- i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 1);
- } else {
- if (dirty & I830_UPLOAD_TEX_PALETTE_N(0)) {
- i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 0);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(0);
- }
- if (dirty & I830_UPLOAD_TEX_PALETTE_N(1)) {
- i830EmitTexPalette(dev, sarea_priv->Palette[1], 1, 0);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(1);
- }
-
- /* 1.3:
- */
-#if 0
- if (dirty & I830_UPLOAD_TEX_PALETTE_N(2)) {
- i830EmitTexPalette(dev, sarea_priv->Palette2[0], 0, 0);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2);
- }
- if (dirty & I830_UPLOAD_TEX_PALETTE_N(3)) {
- i830EmitTexPalette(dev, sarea_priv->Palette2[1], 1, 0);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2);
- }
-#endif
- }
-
- /* 1.3:
- */
- if (dirty & I830_UPLOAD_STIPPLE) {
- i830EmitStippleVerified(dev, sarea_priv->StippleState);
- sarea_priv->dirty &= ~I830_UPLOAD_STIPPLE;
- }
-
- if (dirty & I830_UPLOAD_TEX2) {
- i830EmitTexVerified(dev, sarea_priv->TexState2);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX2;
- }
-
- if (dirty & I830_UPLOAD_TEX3) {
- i830EmitTexVerified(dev, sarea_priv->TexState3);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX3;
- }
-
- if (dirty & I830_UPLOAD_TEXBLEND2) {
- i830EmitTexBlendVerified(dev,
- sarea_priv->TexBlendState2,
- sarea_priv->TexBlendStateWordsUsed2);
-
- sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND2;
- }
-
- if (dirty & I830_UPLOAD_TEXBLEND3) {
- i830EmitTexBlendVerified(dev,
- sarea_priv->TexBlendState3,
- sarea_priv->TexBlendStateWordsUsed3);
- sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND3;
- }
-}
-
-/* ================================================================
- * Performance monitoring functions
- */
-
-static void i830_fill_box(struct drm_device *dev,
- int x, int y, int w, int h, int r, int g, int b)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- u32 color;
- unsigned int BR13, CMD;
- RING_LOCALS;
-
- BR13 = (0xF0 << 16) | (dev_priv->pitch * dev_priv->cpp) | (1 << 24);
- CMD = XY_COLOR_BLT_CMD;
- x += dev_priv->sarea_priv->boxes[0].x1;
- y += dev_priv->sarea_priv->boxes[0].y1;
-
- if (dev_priv->cpp == 4) {
- BR13 |= (1 << 25);
- CMD |= (XY_COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB);
- color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
- } else {
- color = (((r & 0xf8) << 8) |
- ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
- }
-
- BEGIN_LP_RING(6);
- OUT_RING(CMD);
- OUT_RING(BR13);
- OUT_RING((y << 16) | x);
- OUT_RING(((y + h) << 16) | (x + w));
-
- if (dev_priv->current_page == 1)
- OUT_RING(dev_priv->front_offset);
- else
- OUT_RING(dev_priv->back_offset);
-
- OUT_RING(color);
- ADVANCE_LP_RING();
-}
-
-static void i830_cp_performance_boxes(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
-
- /* Purple box for page flipping
- */
- if (dev_priv->sarea_priv->perf_boxes & I830_BOX_FLIP)
- i830_fill_box(dev, 4, 4, 8, 8, 255, 0, 255);
-
- /* Red box if we have to wait for idle at any point
- */
- if (dev_priv->sarea_priv->perf_boxes & I830_BOX_WAIT)
- i830_fill_box(dev, 16, 4, 8, 8, 255, 0, 0);
-
- /* Blue box: lost context?
- */
- if (dev_priv->sarea_priv->perf_boxes & I830_BOX_LOST_CONTEXT)
- i830_fill_box(dev, 28, 4, 8, 8, 0, 0, 255);
-
- /* Yellow box for texture swaps
- */
- if (dev_priv->sarea_priv->perf_boxes & I830_BOX_TEXTURE_LOAD)
- i830_fill_box(dev, 40, 4, 8, 8, 255, 255, 0);
-
- /* Green box if hardware never idles (as far as we can tell)
- */
- if (!(dev_priv->sarea_priv->perf_boxes & I830_BOX_RING_EMPTY))
- i830_fill_box(dev, 64, 4, 8, 8, 0, 255, 0);
-
- /* Draw bars indicating number of buffers allocated
- * (not a great measure, easily confused)
- */
- if (dev_priv->dma_used) {
- int bar = dev_priv->dma_used / 10240;
- if (bar > 100)
- bar = 100;
- if (bar < 1)
- bar = 1;
- i830_fill_box(dev, 4, 16, bar, 4, 196, 128, 128);
- dev_priv->dma_used = 0;
- }
-
- dev_priv->sarea_priv->perf_boxes = 0;
-}
-
-static void i830_dma_dispatch_clear(struct drm_device *dev, int flags,
- unsigned int clear_color,
- unsigned int clear_zval,
- unsigned int clear_depthmask)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
- int nbox = sarea_priv->nbox;
- struct drm_clip_rect *pbox = sarea_priv->boxes;
- int pitch = dev_priv->pitch;
- int cpp = dev_priv->cpp;
- int i;
- unsigned int BR13, CMD, D_CMD;
- RING_LOCALS;
-
- if (dev_priv->current_page == 1) {
- unsigned int tmp = flags;
-
- flags &= ~(I830_FRONT | I830_BACK);
- if (tmp & I830_FRONT)
- flags |= I830_BACK;
- if (tmp & I830_BACK)
- flags |= I830_FRONT;
- }
-
- i830_kernel_lost_context(dev);
-
- switch (cpp) {
- case 2:
- BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24);
- D_CMD = CMD = XY_COLOR_BLT_CMD;
- break;
- case 4:
- BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24) | (1 << 25);
- CMD = (XY_COLOR_BLT_CMD | XY_COLOR_BLT_WRITE_ALPHA |
- XY_COLOR_BLT_WRITE_RGB);
- D_CMD = XY_COLOR_BLT_CMD;
- if (clear_depthmask & 0x00ffffff)
- D_CMD |= XY_COLOR_BLT_WRITE_RGB;
- if (clear_depthmask & 0xff000000)
- D_CMD |= XY_COLOR_BLT_WRITE_ALPHA;
- break;
- default:
- BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24);
- D_CMD = CMD = XY_COLOR_BLT_CMD;
- break;
- }
-
- if (nbox > I830_NR_SAREA_CLIPRECTS)
- nbox = I830_NR_SAREA_CLIPRECTS;
-
- for (i = 0; i < nbox; i++, pbox++) {
- if (pbox->x1 > pbox->x2 ||
- pbox->y1 > pbox->y2 ||
- pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
- continue;
-
- if (flags & I830_FRONT) {
- DRM_DEBUG("clear front\n");
- BEGIN_LP_RING(6);
- OUT_RING(CMD);
- OUT_RING(BR13);
- OUT_RING((pbox->y1 << 16) | pbox->x1);
- OUT_RING((pbox->y2 << 16) | pbox->x2);
- OUT_RING(dev_priv->front_offset);
- OUT_RING(clear_color);
- ADVANCE_LP_RING();
- }
-
- if (flags & I830_BACK) {
- DRM_DEBUG("clear back\n");
- BEGIN_LP_RING(6);
- OUT_RING(CMD);
- OUT_RING(BR13);
- OUT_RING((pbox->y1 << 16) | pbox->x1);
- OUT_RING((pbox->y2 << 16) | pbox->x2);
- OUT_RING(dev_priv->back_offset);
- OUT_RING(clear_color);
- ADVANCE_LP_RING();
- }
-
- if (flags & I830_DEPTH) {
- DRM_DEBUG("clear depth\n");
- BEGIN_LP_RING(6);
- OUT_RING(D_CMD);
- OUT_RING(BR13);
- OUT_RING((pbox->y1 << 16) | pbox->x1);
- OUT_RING((pbox->y2 << 16) | pbox->x2);
- OUT_RING(dev_priv->depth_offset);
- OUT_RING(clear_zval);
- ADVANCE_LP_RING();
- }
- }
-}
-
-static void i830_dma_dispatch_swap(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
- int nbox = sarea_priv->nbox;
- struct drm_clip_rect *pbox = sarea_priv->boxes;
- int pitch = dev_priv->pitch;
- int cpp = dev_priv->cpp;
- int i;
- unsigned int CMD, BR13;
- RING_LOCALS;
-
- DRM_DEBUG("swapbuffers\n");
-
- i830_kernel_lost_context(dev);
-
- if (dev_priv->do_boxes)
- i830_cp_performance_boxes(dev);
-
- switch (cpp) {
- case 2:
- BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24);
- CMD = XY_SRC_COPY_BLT_CMD;
- break;
- case 4:
- BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24) | (1 << 25);
- CMD = (XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
- XY_SRC_COPY_BLT_WRITE_RGB);
- break;
- default:
- BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24);
- CMD = XY_SRC_COPY_BLT_CMD;
- break;
- }
-
- if (nbox > I830_NR_SAREA_CLIPRECTS)
- nbox = I830_NR_SAREA_CLIPRECTS;
-
- for (i = 0; i < nbox; i++, pbox++) {
- if (pbox->x1 > pbox->x2 ||
- pbox->y1 > pbox->y2 ||
- pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
- continue;
-
- DRM_DEBUG("dispatch swap %d,%d-%d,%d!\n",
- pbox->x1, pbox->y1, pbox->x2, pbox->y2);
-
- BEGIN_LP_RING(8);
- OUT_RING(CMD);
- OUT_RING(BR13);
- OUT_RING((pbox->y1 << 16) | pbox->x1);
- OUT_RING((pbox->y2 << 16) | pbox->x2);
-
- if (dev_priv->current_page == 0)
- OUT_RING(dev_priv->front_offset);
- else
- OUT_RING(dev_priv->back_offset);
-
- OUT_RING((pbox->y1 << 16) | pbox->x1);
- OUT_RING(BR13 & 0xffff);
-
- if (dev_priv->current_page == 0)
- OUT_RING(dev_priv->back_offset);
- else
- OUT_RING(dev_priv->front_offset);
-
- ADVANCE_LP_RING();
- }
-}
-
-static void i830_dma_dispatch_flip(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- RING_LOCALS;
-
- DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
- __func__,
- dev_priv->current_page,
- dev_priv->sarea_priv->pf_current_page);
-
- i830_kernel_lost_context(dev);
-
- if (dev_priv->do_boxes) {
- dev_priv->sarea_priv->perf_boxes |= I830_BOX_FLIP;
- i830_cp_performance_boxes(dev);
- }
-
- BEGIN_LP_RING(2);
- OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- BEGIN_LP_RING(6);
- OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
- OUT_RING(0);
- if (dev_priv->current_page == 0) {
- OUT_RING(dev_priv->back_offset);
- dev_priv->current_page = 1;
- } else {
- OUT_RING(dev_priv->front_offset);
- dev_priv->current_page = 0;
- }
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- BEGIN_LP_RING(2);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
-}
-
-static void i830_dma_dispatch_vertex(struct drm_device *dev,
- struct drm_buf *buf, int discard, int used)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
- drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
- struct drm_clip_rect *box = sarea_priv->boxes;
- int nbox = sarea_priv->nbox;
- unsigned long address = (unsigned long)buf->bus_address;
- unsigned long start = address - dev->agp->base;
- int i = 0, u;
- RING_LOCALS;
-
- i830_kernel_lost_context(dev);
-
- if (nbox > I830_NR_SAREA_CLIPRECTS)
- nbox = I830_NR_SAREA_CLIPRECTS;
-
- if (discard) {
- u = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
- I830_BUF_HARDWARE);
- if (u != I830_BUF_CLIENT)
- DRM_DEBUG("xxxx 2\n");
- }
-
- if (used > 4 * 1023)
- used = 0;
-
- if (sarea_priv->dirty)
- i830EmitState(dev);
-
- DRM_DEBUG("dispatch vertex addr 0x%lx, used 0x%x nbox %d\n",
- address, used, nbox);
-
- dev_priv->counter++;
- DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
- DRM_DEBUG("i830_dma_dispatch\n");
- DRM_DEBUG("start : %lx\n", start);
- DRM_DEBUG("used : %d\n", used);
- DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
-
- if (buf_priv->currently_mapped == I830_BUF_MAPPED) {
- u32 *vp = buf_priv->kernel_virtual;
-
- vp[0] = (GFX_OP_PRIMITIVE |
- sarea_priv->vertex_prim | ((used / 4) - 2));
-
- if (dev_priv->use_mi_batchbuffer_start) {
- vp[used / 4] = MI_BATCH_BUFFER_END;
- used += 4;
- }
-
- if (used & 4) {
- vp[used / 4] = 0;
- used += 4;
- }
-
- i830_unmap_buffer(buf);
- }
-
- if (used) {
- do {
- if (i < nbox) {
- BEGIN_LP_RING(6);
- OUT_RING(GFX_OP_DRAWRECT_INFO);
- OUT_RING(sarea_priv->
- BufferState[I830_DESTREG_DR1]);
- OUT_RING(box[i].x1 | (box[i].y1 << 16));
- OUT_RING(box[i].x2 | (box[i].y2 << 16));
- OUT_RING(sarea_priv->
- BufferState[I830_DESTREG_DR4]);
- OUT_RING(0);
- ADVANCE_LP_RING();
- }
-
- if (dev_priv->use_mi_batchbuffer_start) {
- BEGIN_LP_RING(2);
- OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
- OUT_RING(start | MI_BATCH_NON_SECURE);
- ADVANCE_LP_RING();
- } else {
- BEGIN_LP_RING(4);
- OUT_RING(MI_BATCH_BUFFER);
- OUT_RING(start | MI_BATCH_NON_SECURE);
- OUT_RING(start + used - 4);
- OUT_RING(0);
- ADVANCE_LP_RING();
- }
-
- } while (++i < nbox);
- }
-
- if (discard) {
- dev_priv->counter++;
-
- (void)cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
- I830_BUF_HARDWARE);
-
- BEGIN_LP_RING(8);
- OUT_RING(CMD_STORE_DWORD_IDX);
- OUT_RING(20);
- OUT_RING(dev_priv->counter);
- OUT_RING(CMD_STORE_DWORD_IDX);
- OUT_RING(buf_priv->my_use_idx);
- OUT_RING(I830_BUF_FREE);
- OUT_RING(CMD_REPORT_HEAD);
- OUT_RING(0);
- ADVANCE_LP_RING();
- }
-}
-
-static void i830_dma_quiescent(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- RING_LOCALS;
-
- i830_kernel_lost_context(dev);
-
- BEGIN_LP_RING(4);
- OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
- OUT_RING(CMD_REPORT_HEAD);
- OUT_RING(0);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- i830_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
-}
-
-static int i830_flush_queue(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- struct drm_device_dma *dma = dev->dma;
- int i, ret = 0;
- RING_LOCALS;
-
- i830_kernel_lost_context(dev);
-
- BEGIN_LP_RING(2);
- OUT_RING(CMD_REPORT_HEAD);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- i830_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
-
- int used = cmpxchg(buf_priv->in_use, I830_BUF_HARDWARE,
- I830_BUF_FREE);
-
- if (used == I830_BUF_HARDWARE)
- DRM_DEBUG("reclaimed from HARDWARE\n");
- if (used == I830_BUF_CLIENT)
- DRM_DEBUG("still on client\n");
- }
-
- return ret;
-}
-
-/* Must be called with the lock held */
-static void i830_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- int i;
-
- if (!dma)
- return;
- if (!dev->dev_private)
- return;
- if (!dma->buflist)
- return;
-
- i830_flush_queue(dev);
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
-
- if (buf->file_priv == file_priv && buf_priv) {
- int used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
- I830_BUF_FREE);
-
- if (used == I830_BUF_CLIENT)
- DRM_DEBUG("reclaimed from client\n");
- if (buf_priv->currently_mapped == I830_BUF_MAPPED)
- buf_priv->currently_mapped = I830_BUF_UNMAPPED;
- }
- }
-}
-
-static int i830_flush_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- i830_flush_queue(dev);
- return 0;
-}
-
-static int i830_dma_vertex(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
- u32 *hw_status = dev_priv->hw_status_page;
- drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
- dev_priv->sarea_priv;
- drm_i830_vertex_t *vertex = data;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- DRM_DEBUG("i830 dma vertex, idx %d used %d discard %d\n",
- vertex->idx, vertex->used, vertex->discard);
-
- if (vertex->idx < 0 || vertex->idx > dma->buf_count)
- return -EINVAL;
-
- i830_dma_dispatch_vertex(dev,
- dma->buflist[vertex->idx],
- vertex->discard, vertex->used);
-
- sarea_priv->last_enqueue = dev_priv->counter - 1;
- sarea_priv->last_dispatch = (int)hw_status[5];
-
- return 0;
-}
-
-static int i830_clear_bufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_clear_t *clear = data;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- /* GH: Someone's doing nasty things... */
- if (!dev->dev_private)
- return -EINVAL;
-
- i830_dma_dispatch_clear(dev, clear->flags,
- clear->clear_color,
- clear->clear_depth, clear->clear_depthmask);
- return 0;
-}
-
-static int i830_swap_bufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- DRM_DEBUG("i830_swap_bufs\n");
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- i830_dma_dispatch_swap(dev);
- return 0;
-}
-
-/* Not sure why this isn't set all the time:
- */
-static void i830_do_init_pageflip(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
-
- DRM_DEBUG("%s\n", __func__);
- dev_priv->page_flipping = 1;
- dev_priv->current_page = 0;
- dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
-}
-
-static int i830_do_cleanup_pageflip(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
-
- DRM_DEBUG("%s\n", __func__);
- if (dev_priv->current_page != 0)
- i830_dma_dispatch_flip(dev);
-
- dev_priv->page_flipping = 0;
- return 0;
-}
-
-static int i830_flip_bufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
-
- DRM_DEBUG("%s\n", __func__);
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- if (!dev_priv->page_flipping)
- i830_do_init_pageflip(dev);
-
- i830_dma_dispatch_flip(dev);
- return 0;
-}
-
-static int i830_getage(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
- u32 *hw_status = dev_priv->hw_status_page;
- drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
- dev_priv->sarea_priv;
-
- sarea_priv->last_dispatch = (int)hw_status[5];
- return 0;
-}
-
-static int i830_getbuf(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- int retcode = 0;
- drm_i830_dma_t *d = data;
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
- u32 *hw_status = dev_priv->hw_status_page;
- drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
- dev_priv->sarea_priv;
-
- DRM_DEBUG("getbuf\n");
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- d->granted = 0;
-
- retcode = i830_dma_get_buffer(dev, d, file_priv);
-
- DRM_DEBUG("i830_dma: %d returning %d, granted = %d\n",
- task_pid_nr(current), retcode, d->granted);
-
- sarea_priv->last_dispatch = (int)hw_status[5];
-
- return retcode;
-}
-
-static int i830_copybuf(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- /* Never copy - 2.4.x doesn't need it */
- return 0;
-}
-
-static int i830_docopy(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return 0;
-}
-
-static int i830_getparam(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_getparam_t *param = data;
- int value;
-
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __func__);
- return -EINVAL;
- }
-
- switch (param->param) {
- case I830_PARAM_IRQ_ACTIVE:
- value = dev->irq_enabled;
- break;
- default:
- return -EINVAL;
- }
-
- if (copy_to_user(param->value, &value, sizeof(int))) {
- DRM_ERROR("copy_to_user\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int i830_setparam(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_setparam_t *param = data;
-
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __func__);
- return -EINVAL;
- }
-
- switch (param->param) {
- case I830_SETPARAM_USE_MI_BATCHBUFFER_START:
- dev_priv->use_mi_batchbuffer_start = param->value;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-int i830_driver_load(struct drm_device *dev, unsigned long flags)
-{
- /* i830 has 4 more counters */
- dev->counters += 4;
- dev->types[6] = _DRM_STAT_IRQ;
- dev->types[7] = _DRM_STAT_PRIMARY;
- dev->types[8] = _DRM_STAT_SECONDARY;
- dev->types[9] = _DRM_STAT_DMA;
-
- return 0;
-}
-
-void i830_driver_lastclose(struct drm_device *dev)
-{
- i830_dma_cleanup(dev);
-}
-
-void i830_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
-{
- if (dev->dev_private) {
- drm_i830_private_t *dev_priv = dev->dev_private;
- if (dev_priv->page_flipping)
- i830_do_cleanup_pageflip(dev);
- }
-}
-
-void i830_driver_reclaim_buffers_locked(struct drm_device *dev, struct drm_file *file_priv)
-{
- i830_reclaim_buffers(dev, file_priv);
-}
-
-int i830_driver_dma_quiescent(struct drm_device *dev)
-{
- i830_dma_quiescent(dev);
- return 0;
-}
-
-/*
- * call the drm_ioctl under the big kernel lock because
- * to lock against the i830_mmap_buffers function.
- */
-long i830_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- int ret;
- lock_kernel();
- ret = drm_ioctl(file, cmd, arg);
- unlock_kernel();
- return ret;
-}
-
-struct drm_ioctl_desc i830_ioctls[] = {
- DRM_IOCTL_DEF_DRV(I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_VERTEX, i830_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_CLEAR, i830_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_FLUSH, i830_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_GETAGE, i830_getage, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_GETBUF, i830_getbuf, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_SWAP, i830_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_COPY, i830_copybuf, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_DOCOPY, i830_docopy, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_FLIP, i830_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_GETPARAM, i830_getparam, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_SETPARAM, i830_setparam, DRM_AUTH|DRM_UNLOCKED),
-};
-
-int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
-
-/**
- * Determine if the device really is AGP or not.
- *
- * All Intel graphics chipsets are treated as AGP, even if they are really
- * PCI-e.
- *
- * \param dev The device to be tested.
- *
- * \returns
- * A value of 1 is always retured to indictate every i8xx is AGP.
- */
-int i830_driver_device_is_agp(struct drm_device *dev)
-{
- return 1;
-}
diff --git a/drivers/gpu/drm/i830/i830_drv.c b/drivers/gpu/drm/i830/i830_drv.c
deleted file mode 100644
index f655ab7977da..000000000000
--- a/drivers/gpu/drm/i830/i830_drv.c
+++ /dev/null
@@ -1,107 +0,0 @@
-/* i830_drv.c -- I810 driver -*- linux-c -*-
- * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Rickard E. (Rik) Faith <faith@valinux.com>
- * Jeff Hartmann <jhartmann@valinux.com>
- * Gareth Hughes <gareth@valinux.com>
- * Abraham vd Merwe <abraham@2d3d.co.za>
- * Keith Whitwell <keith@tungstengraphics.com>
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i830_drm.h"
-#include "i830_drv.h"
-
-#include "drm_pciids.h"
-
-static struct pci_device_id pciidlist[] = {
- i830_PCI_IDS
-};
-
-static struct drm_driver driver = {
- .driver_features =
- DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
- DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE,
-#if USE_IRQS
- .driver_features |= DRIVER_HAVE_IRQ | DRIVER_SHARED_IRQ,
-#endif
- .dev_priv_size = sizeof(drm_i830_buf_priv_t),
- .load = i830_driver_load,
- .lastclose = i830_driver_lastclose,
- .preclose = i830_driver_preclose,
- .device_is_agp = i830_driver_device_is_agp,
- .reclaim_buffers_locked = i830_driver_reclaim_buffers_locked,
- .dma_quiescent = i830_driver_dma_quiescent,
-#if USE_IRQS
- .irq_preinstall = i830_driver_irq_preinstall,
- .irq_postinstall = i830_driver_irq_postinstall,
- .irq_uninstall = i830_driver_irq_uninstall,
- .irq_handler = i830_driver_irq_handler,
-#endif
- .ioctls = i830_ioctls,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = i830_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .llseek = noop_llseek,
- },
-
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
-
- .name = DRIVER_NAME,
- .desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCHLEVEL,
-};
-
-static int __init i830_init(void)
-{
- driver.num_ioctls = i830_max_ioctl;
- return drm_init(&driver);
-}
-
-static void __exit i830_exit(void)
-{
- drm_exit(&driver);
-}
-
-module_init(i830_init);
-module_exit(i830_exit);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
deleted file mode 100644
index 0df1c720560b..000000000000
--- a/drivers/gpu/drm/i830/i830_drv.h
+++ /dev/null
@@ -1,295 +0,0 @@
-/* i830_drv.h -- Private header for the I830 driver -*- linux-c -*-
- * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
- * Jeff Hartmann <jhartmann@valinux.com>
- *
- */
-
-#ifndef _I830_DRV_H_
-#define _I830_DRV_H_
-
-/* General customization:
- */
-
-#define DRIVER_AUTHOR "VA Linux Systems Inc."
-
-#define DRIVER_NAME "i830"
-#define DRIVER_DESC "Intel 830M"
-#define DRIVER_DATE "20021108"
-
-/* Interface history:
- *
- * 1.1: Original.
- * 1.2: ?
- * 1.3: New irq emit/wait ioctls.
- * New pageflip ioctl.
- * New getparam ioctl.
- * State for texunits 3&4 in sarea.
- * New (alternative) layout for texture state.
- */
-#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 3
-#define DRIVER_PATCHLEVEL 2
-
-/* Driver will work either way: IRQ's save cpu time when waiting for
- * the card, but are subject to subtle interactions between bios,
- * hardware and the driver.
- */
-/* XXX: Add vblank support? */
-#define USE_IRQS 0
-
-typedef struct drm_i830_buf_priv {
- u32 *in_use;
- int my_use_idx;
- int currently_mapped;
- void __user *virtual;
- void *kernel_virtual;
- drm_local_map_t map;
-} drm_i830_buf_priv_t;
-
-typedef struct _drm_i830_ring_buffer {
- int tail_mask;
- unsigned long Start;
- unsigned long End;
- unsigned long Size;
- u8 *virtual_start;
- int head;
- int tail;
- int space;
- drm_local_map_t map;
-} drm_i830_ring_buffer_t;
-
-typedef struct drm_i830_private {
- struct drm_local_map *sarea_map;
- struct drm_local_map *mmio_map;
-
- drm_i830_sarea_t *sarea_priv;
- drm_i830_ring_buffer_t ring;
-
- void *hw_status_page;
- unsigned long counter;
-
- dma_addr_t dma_status_page;
-
- struct drm_buf *mmap_buffer;
-
- u32 front_di1, back_di1, zi1;
-
- int back_offset;
- int depth_offset;
- int front_offset;
- int w, h;
- int pitch;
- int back_pitch;
- int depth_pitch;
- unsigned int cpp;
-
- int do_boxes;
- int dma_used;
-
- int current_page;
- int page_flipping;
-
- wait_queue_head_t irq_queue;
- atomic_t irq_received;
- atomic_t irq_emitted;
-
- int use_mi_batchbuffer_start;
-
-} drm_i830_private_t;
-
-long i830_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-extern struct drm_ioctl_desc i830_ioctls[];
-extern int i830_max_ioctl;
-
-/* i830_irq.c */
-extern int i830_irq_emit(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int i830_irq_wait(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
-extern irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS);
-extern void i830_driver_irq_preinstall(struct drm_device *dev);
-extern void i830_driver_irq_postinstall(struct drm_device *dev);
-extern void i830_driver_irq_uninstall(struct drm_device *dev);
-extern int i830_driver_load(struct drm_device *, unsigned long flags);
-extern void i830_driver_preclose(struct drm_device *dev,
- struct drm_file *file_priv);
-extern void i830_driver_lastclose(struct drm_device *dev);
-extern void i830_driver_reclaim_buffers_locked(struct drm_device *dev,
- struct drm_file *file_priv);
-extern int i830_driver_dma_quiescent(struct drm_device *dev);
-extern int i830_driver_device_is_agp(struct drm_device *dev);
-
-#define I830_READ(reg) DRM_READ32(dev_priv->mmio_map, reg)
-#define I830_WRITE(reg, val) DRM_WRITE32(dev_priv->mmio_map, reg, val)
-#define I830_READ16(reg) DRM_READ16(dev_priv->mmio_map, reg)
-#define I830_WRITE16(reg, val) DRM_WRITE16(dev_priv->mmio_map, reg, val)
-
-#define I830_VERBOSE 0
-
-#define RING_LOCALS unsigned int outring, ringmask, outcount; \
- volatile char *virt;
-
-#define BEGIN_LP_RING(n) do { \
- if (I830_VERBOSE) \
- printk("BEGIN_LP_RING(%d)\n", (n)); \
- if (dev_priv->ring.space < n*4) \
- i830_wait_ring(dev, n*4, __func__); \
- outcount = 0; \
- outring = dev_priv->ring.tail; \
- ringmask = dev_priv->ring.tail_mask; \
- virt = dev_priv->ring.virtual_start; \
-} while (0)
-
-#define OUT_RING(n) do { \
- if (I830_VERBOSE) \
- printk(" OUT_RING %x\n", (int)(n)); \
- *(volatile unsigned int *)(virt + outring) = n; \
- outcount++; \
- outring += 4; \
- outring &= ringmask; \
-} while (0)
-
-#define ADVANCE_LP_RING() do { \
- if (I830_VERBOSE) \
- printk("ADVANCE_LP_RING %x\n", outring); \
- dev_priv->ring.tail = outring; \
- dev_priv->ring.space -= outcount * 4; \
- I830_WRITE(LP_RING + RING_TAIL, outring); \
-} while (0)
-
-extern int i830_wait_ring(struct drm_device *dev, int n, const char *caller);
-
-#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
-#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
-#define CMD_REPORT_HEAD (7<<23)
-#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
-#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1)
-
-#define STATE3D_LOAD_STATE_IMMEDIATE_2 ((0x3<<29)|(0x1d<<24)|(0x03<<16))
-#define LOAD_TEXTURE_MAP0 (1<<11)
-
-#define INST_PARSER_CLIENT 0x00000000
-#define INST_OP_FLUSH 0x02000000
-#define INST_FLUSH_MAP_CACHE 0x00000001
-
-#define BB1_START_ADDR_MASK (~0x7)
-#define BB1_PROTECTED (1<<0)
-#define BB1_UNPROTECTED (0<<0)
-#define BB2_END_ADDR_MASK (~0x7)
-
-#define I830REG_HWSTAM 0x02098
-#define I830REG_INT_IDENTITY_R 0x020a4
-#define I830REG_INT_MASK_R 0x020a8
-#define I830REG_INT_ENABLE_R 0x020a0
-
-#define I830_IRQ_RESERVED ((1<<13)|(3<<2))
-
-#define LP_RING 0x2030
-#define HP_RING 0x2040
-#define RING_TAIL 0x00
-#define TAIL_ADDR 0x001FFFF8
-#define RING_HEAD 0x04
-#define HEAD_WRAP_COUNT 0xFFE00000
-#define HEAD_WRAP_ONE 0x00200000
-#define HEAD_ADDR 0x001FFFFC
-#define RING_START 0x08
-#define START_ADDR 0x0xFFFFF000
-#define RING_LEN 0x0C
-#define RING_NR_PAGES 0x001FF000
-#define RING_REPORT_MASK 0x00000006
-#define RING_REPORT_64K 0x00000002
-#define RING_REPORT_128K 0x00000004
-#define RING_NO_REPORT 0x00000000
-#define RING_VALID_MASK 0x00000001
-#define RING_VALID 0x00000001
-#define RING_INVALID 0x00000000
-
-#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
-#define SC_UPDATE_SCISSOR (0x1<<1)
-#define SC_ENABLE_MASK (0x1<<0)
-#define SC_ENABLE (0x1<<0)
-
-#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
-#define SCI_YMIN_MASK (0xffff<<16)
-#define SCI_XMIN_MASK (0xffff<<0)
-#define SCI_YMAX_MASK (0xffff<<16)
-#define SCI_XMAX_MASK (0xffff<<0)
-
-#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
-#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
-#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
-#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
-#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
-#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
-#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
-#define GFX_OP_PRIMITIVE ((0x3<<29)|(0x1f<<24))
-
-#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
-
-#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
-#define ASYNC_FLIP (1<<22)
-
-#define CMD_3D (0x3<<29)
-#define STATE3D_CONST_BLEND_COLOR_CMD (CMD_3D|(0x1d<<24)|(0x88<<16))
-#define STATE3D_MAP_COORD_SETBIND_CMD (CMD_3D|(0x1d<<24)|(0x02<<16))
-
-#define BR00_BITBLT_CLIENT 0x40000000
-#define BR00_OP_COLOR_BLT 0x10000000
-#define BR00_OP_SRC_COPY_BLT 0x10C00000
-#define BR13_SOLID_PATTERN 0x80000000
-
-#define BUF_3D_ID_COLOR_BACK (0x3<<24)
-#define BUF_3D_ID_DEPTH (0x7<<24)
-#define BUF_3D_USE_FENCE (1<<23)
-#define BUF_3D_PITCH(x) (((x)/4)<<2)
-
-#define CMD_OP_MAP_PALETTE_LOAD ((3<<29)|(0x1d<<24)|(0x82<<16)|255)
-#define MAP_PALETTE_NUM(x) ((x<<8) & (1<<8))
-#define MAP_PALETTE_BOTH (1<<11)
-
-#define XY_COLOR_BLT_CMD ((2<<29)|(0x50<<22)|0x4)
-#define XY_COLOR_BLT_WRITE_ALPHA (1<<21)
-#define XY_COLOR_BLT_WRITE_RGB (1<<20)
-
-#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
-#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
-#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
-
-#define MI_BATCH_BUFFER ((0x30<<23)|1)
-#define MI_BATCH_BUFFER_START (0x31<<23)
-#define MI_BATCH_BUFFER_END (0xA<<23)
-#define MI_BATCH_NON_SECURE (1)
-
-#define MI_WAIT_FOR_EVENT ((0x3<<23))
-#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
-#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
-
-#define MI_LOAD_SCAN_LINES_INCL ((0x12<<23))
-
-#endif
diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
deleted file mode 100644
index d1a6b95d631d..000000000000
--- a/drivers/gpu/drm/i830/i830_irq.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/* i830_dma.c -- DMA support for the I830 -*- linux-c -*-
- *
- * Copyright 2002 Tungsten Graphics, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors: Keith Whitwell <keith@tungstengraphics.com>
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i830_drm.h"
-#include "i830_drv.h"
-#include <linux/interrupt.h> /* For task queue support */
-#include <linux/delay.h>
-
-irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
-{
- struct drm_device *dev = (struct drm_device *) arg;
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
- u16 temp;
-
- temp = I830_READ16(I830REG_INT_IDENTITY_R);
- DRM_DEBUG("%x\n", temp);
-
- if (!(temp & 2))
- return IRQ_NONE;
-
- I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
-
- atomic_inc(&dev_priv->irq_received);
- wake_up_interruptible(&dev_priv->irq_queue);
-
- return IRQ_HANDLED;
-}
-
-static int i830_emit_irq(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- RING_LOCALS;
-
- DRM_DEBUG("%s\n", __func__);
-
- atomic_inc(&dev_priv->irq_emitted);
-
- BEGIN_LP_RING(2);
- OUT_RING(0);
- OUT_RING(GFX_OP_USER_INTERRUPT);
- ADVANCE_LP_RING();
-
- return atomic_read(&dev_priv->irq_emitted);
-}
-
-static int i830_wait_irq(struct drm_device *dev, int irq_nr)
-{
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
- DECLARE_WAITQUEUE(entry, current);
- unsigned long end = jiffies + HZ * 3;
- int ret = 0;
-
- DRM_DEBUG("%s\n", __func__);
-
- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
- return 0;
-
- dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
-
- add_wait_queue(&dev_priv->irq_queue, &entry);
-
- for (;;) {
- __set_current_state(TASK_INTERRUPTIBLE);
- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
- break;
- if ((signed)(end - jiffies) <= 0) {
- DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
- I830_READ16(I830REG_INT_IDENTITY_R),
- I830_READ16(I830REG_INT_MASK_R),
- I830_READ16(I830REG_INT_ENABLE_R),
- I830_READ16(I830REG_HWSTAM));
-
- ret = -EBUSY; /* Lockup? Missed irq? */
- break;
- }
- schedule_timeout(HZ * 3);
- if (signal_pending(current)) {
- ret = -EINTR;
- break;
- }
- }
-
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&dev_priv->irq_queue, &entry);
- return ret;
-}
-
-/* Needs the lock as it touches the ring.
- */
-int i830_irq_emit(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_irq_emit_t *emit = data;
- int result;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __func__);
- return -EINVAL;
- }
-
- result = i830_emit_irq(dev);
-
- if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
- DRM_ERROR("copy_to_user\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-/* Doesn't need the hardware lock.
- */
-int i830_irq_wait(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_irq_wait_t *irqwait = data;
-
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __func__);
- return -EINVAL;
- }
-
- return i830_wait_irq(dev, irqwait->irq_seq);
-}
-
-/* drm_dma.h hooks
-*/
-void i830_driver_irq_preinstall(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
-
- I830_WRITE16(I830REG_HWSTAM, 0xffff);
- I830_WRITE16(I830REG_INT_MASK_R, 0x0);
- I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
- atomic_set(&dev_priv->irq_received, 0);
- atomic_set(&dev_priv->irq_emitted, 0);
- init_waitqueue_head(&dev_priv->irq_queue);
-}
-
-void i830_driver_irq_postinstall(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
-
- I830_WRITE16(I830REG_INT_ENABLE_R, 0x2);
-}
-
-void i830_driver_irq_uninstall(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
- if (!dev_priv)
- return;
-
- I830_WRITE16(I830REG_INT_MASK_R, 0xffff);
- I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
-}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 0ad533f06af9..464b3c760d10 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -703,6 +703,9 @@ static struct drm_driver driver = {
.gem_init_object = i915_gem_init_object,
.gem_free_object = i915_gem_free_object,
.gem_vm_ops = &i915_gem_vm_ops,
+ .dumb_create = i915_gem_dumb_create,
+ .dumb_map_offset = i915_gem_mmap_gtt,
+ .dumb_destroy = i915_gem_dumb_destroy,
.ioctls = i915_ioctls,
.fops = {
.owner = THIS_MODULE,
@@ -719,14 +722,6 @@ static struct drm_driver driver = {
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- .probe = i915_pci_probe,
- .remove = i915_pci_remove,
- .driver.pm = &i915_pm_ops,
- },
-
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -735,6 +730,14 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver i915_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = i915_pci_probe,
+ .remove = i915_pci_remove,
+ .driver.pm = &i915_pm_ops,
+};
+
static int __init i915_init(void)
{
if (!intel_agp_enabled) {
@@ -768,12 +771,12 @@ static int __init i915_init(void)
if (!(driver.driver_features & DRIVER_MODESET))
driver.get_vblank_timestamp = NULL;
- return drm_init(&driver);
+ return drm_pci_init(&driver, &i915_pci_driver);
}
static void __exit i915_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &i915_pci_driver);
}
module_init(i915_init);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 65dfe81d0035..da70f8cd6178 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1115,6 +1115,13 @@ void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring,
u32 seqno);
+int i915_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
+int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
+ uint32_t handle);
/**
* Returns true if seq1 is later than seq2.
*/
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index cf4f74c7c6fb..bc7f06b8fbca 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -193,22 +193,20 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
return 0;
}
-/**
- * Creates a new mm object and returns a handle to it.
- */
-int
-i915_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
+static int
+i915_gem_create(struct drm_file *file,
+ struct drm_device *dev,
+ uint64_t size,
+ uint32_t *handle_p)
{
- struct drm_i915_gem_create *args = data;
struct drm_i915_gem_object *obj;
int ret;
u32 handle;
- args->size = roundup(args->size, PAGE_SIZE);
+ size = roundup(size, PAGE_SIZE);
/* Allocate the new object */
- obj = i915_gem_alloc_object(dev, args->size);
+ obj = i915_gem_alloc_object(dev, size);
if (obj == NULL)
return -ENOMEM;
@@ -224,10 +222,41 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
drm_gem_object_unreference(&obj->base);
trace_i915_gem_object_create(obj);
- args->handle = handle;
+ *handle_p = handle;
return 0;
}
+int
+i915_gem_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ /* have to work out size/pitch and return them */
+ args->pitch = ALIGN(args->width & ((args->bpp + 1) / 8), 64);
+ args->size = args->pitch * args->height;
+ return i915_gem_create(file, dev,
+ args->size, &args->handle);
+}
+
+int i915_gem_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file, handle);
+}
+
+/**
+ * Creates a new mm object and returns a handle to it.
+ */
+int
+i915_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_create *args = data;
+ return i915_gem_create(file, dev,
+ args->size, &args->handle);
+}
+
static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
@@ -1425,27 +1454,13 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
return tile_height * obj->stride * 2;
}
-/**
- * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
- * @dev: DRM device
- * @data: GTT mapping ioctl data
- * @file: GEM object info
- *
- * Simply returns the fake offset to userspace so it can mmap it.
- * The mmap call will end up in drm_gem_mmap(), which will set things
- * up so we can get faults in the handler above.
- *
- * The fault handler will take care of binding the object into the GTT
- * (since it may have been evicted to make room for something), allocating
- * a fence register, and mapping the appropriate aperture address into
- * userspace.
- */
int
-i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
+i915_gem_mmap_gtt(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_mmap_gtt *args = data;
struct drm_i915_gem_object *obj;
int ret;
@@ -1456,7 +1471,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
@@ -1479,7 +1494,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
goto out;
}
- args->offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
+ *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
out:
drm_gem_object_unreference(&obj->base);
@@ -1488,6 +1503,34 @@ unlock:
return ret;
}
+/**
+ * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
+ * @dev: DRM device
+ * @data: GTT mapping ioctl data
+ * @file: GEM object info
+ *
+ * Simply returns the fake offset to userspace so it can mmap it.
+ * The mmap call will end up in drm_gem_mmap(), which will set things
+ * up so we can get faults in the handler above.
+ *
+ * The fault handler will take care of binding the object into the GTT
+ * (since it may have been evicted to make room for something), allocating
+ * a fence register, and mapping the appropriate aperture address into
+ * userspace.
+ */
+int
+i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_mmap_gtt *args = data;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
+}
+
+
static int
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
gfp_t gfpmask)
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 22a32b9932c5..79a04fde69b5 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -184,7 +184,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
static bool
i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
{
- int tile_width;
+ int tile_width, tile_height;
/* Linear is always fine */
if (tiling_mode == I915_TILING_NONE)
@@ -215,6 +215,20 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
}
}
+ if (IS_GEN2(dev) ||
+ (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
+ tile_height = 32;
+ else
+ tile_height = 8;
+ /* i8xx is strange: It has 2 interleaved rows of tiles, so needs an even
+ * number of tile rows. */
+ if (IS_GEN2(dev))
+ tile_height *= 2;
+
+ /* Size needs to be aligned to a full tile row */
+ if (size & (tile_height * stride - 1))
+ return false;
+
/* 965+ just needs multiples of tile width */
if (INTEL_INFO(dev)->gen >= 4) {
if (stride & (tile_width - 1))
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 97f946dcc1aa..8a9e08bf1cf7 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -316,6 +316,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
+ DRM_DEBUG_KMS("running encoder hotplug functions\n");
+
list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
if (encoder->hot_plug)
encoder->hot_plug(encoder);
@@ -1649,9 +1651,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
} else {
hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
- hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK;
- I915_WRITE(FDI_RXA_IMR, 0);
- I915_WRITE(FDI_RXB_IMR, 0);
+ hotplug_mask |= SDE_AUX_MASK;
}
dev_priv->pch_irq_mask = ~hotplug_mask;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 15d94c63918c..729d4233b763 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1553,17 +1553,7 @@
/* Backlight control */
#define BLC_PWM_CTL 0x61254
-#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
#define BLC_PWM_CTL2 0x61250 /* 965+ only */
-#define BLM_COMBINATION_MODE (1 << 30)
-/*
- * This is the most significant 15 bits of the number of backlight cycles in a
- * complete cycle of the modulated backlight control.
- *
- * The actual value is this field multiplied by two.
- */
-#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
-#define BLM_LEGACY_MODE (1 << 16)
/*
* This is the number of cycles out of the backlight modulation cycle for which
* the backlight is on.
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3b006536b3d2..e79b25bbee6c 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1630,19 +1630,19 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
wait_event(dev_priv->pending_flip_queue,
+ atomic_read(&dev_priv->mm.wedged) ||
atomic_read(&obj->pending_flip) == 0);
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
* current scanout is retired before unpinning the old
* framebuffer.
+ *
+ * This should only fail upon a hung GPU, in which case we
+ * can safely continue.
*/
ret = i915_gem_object_flush_gpu(obj, false);
- if (ret) {
- i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ (void) ret;
}
ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
@@ -2045,6 +2045,31 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
atomic_read(&obj->pending_flip) == 0);
}
+static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *encoder;
+
+ /*
+ * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
+ * must be driven by its own crtc; no sharing is possible.
+ */
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+ if (encoder->base.crtc != crtc)
+ continue;
+
+ switch (encoder->type) {
+ case INTEL_OUTPUT_EDP:
+ if (!intel_encoder_is_pch_edp(&encoder->base))
+ return false;
+ continue;
+ }
+ }
+
+ return true;
+}
+
static void ironlake_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -2053,6 +2078,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
u32 reg, temp;
+ bool is_pch_port = false;
if (intel_crtc->active)
return;
@@ -2066,7 +2092,56 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
}
- ironlake_fdi_enable(crtc);
+ is_pch_port = intel_crtc_driving_pch(crtc);
+
+ if (is_pch_port)
+ ironlake_fdi_enable(crtc);
+ else {
+ /* disable CPU FDI tx and PCH FDI rx */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
+ POSTING_READ(reg);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(0x7 << 16);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
+
+ POSTING_READ(reg);
+ udelay(100);
+
+ /* Ironlake workaround, disable clock pointer after downing FDI */
+ if (HAS_PCH_IBX(dev))
+ I915_WRITE(FDI_RX_CHICKEN(pipe),
+ I915_READ(FDI_RX_CHICKEN(pipe) &
+ ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
+
+ /* still set train pattern 1 */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
+ /* BPC in FDI rx is consistent with that in PIPECONF */
+ temp &= ~(0x07 << 16);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ udelay(100);
+ }
/* Enable panel fitting for LVDS */
if (dev_priv->pch_pf_size &&
@@ -2100,6 +2175,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_flush_display_plane(dev, plane);
}
+ /* Skip the PCH stuff if possible */
+ if (!is_pch_port)
+ goto done;
+
/* For PCH output, training FDI link */
if (IS_GEN6(dev))
gen6_fdi_link_train(crtc);
@@ -2184,7 +2263,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(reg, temp | TRANS_ENABLE);
if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
DRM_ERROR("failed to enable transcoder %d\n", pipe);
-
+done:
intel_crtc_load_lut(crtc);
intel_update_fbc(dev);
intel_crtc_update_cursor(crtc, true);
@@ -6496,7 +6575,7 @@ static void ironlake_disable_rc6(struct drm_device *dev)
POSTING_READ(RSTDBYCTL);
}
- ironlake_disable_rc6(dev);
+ ironlake_teardown_rc6(dev);
}
static int ironlake_setup_rc6(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index c65992df458d..d860abeda70f 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -30,8 +30,6 @@
#include "intel_drv.h"
-#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
-
void
intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode)
@@ -112,19 +110,6 @@ done:
dev_priv->pch_pf_size = (width << 16) | height;
}
-static int is_backlight_combination_mode(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (INTEL_INFO(dev)->gen >= 4)
- return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
-
- if (IS_GEN2(dev))
- return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE;
-
- return 0;
-}
-
static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
{
u32 val;
@@ -181,9 +166,6 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
if (INTEL_INFO(dev)->gen < 4)
max &= ~1;
}
-
- if (is_backlight_combination_mode(dev))
- max *= 0xff;
}
DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
@@ -201,15 +183,6 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
if (IS_PINEVIEW(dev))
val >>= 1;
-
- if (is_backlight_combination_mode(dev)){
- u8 lbpc;
-
- val &= ~1;
- pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
- val *= lbpc;
- val >>= 1;
- }
}
DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
@@ -232,16 +205,6 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
if (HAS_PCH_SPLIT(dev))
return intel_pch_panel_set_backlight(dev, level);
-
- if (is_backlight_combination_mode(dev)){
- u32 max = intel_panel_get_max_backlight(dev);
- u8 lpbc;
-
- lpbc = level * 0xfe / max + 1;
- level /= lpbc;
- pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc);
- }
-
tmp = I915_READ(BLC_PWM_CTL);
if (IS_PINEVIEW(dev)) {
tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 7c50cdce84f0..fefc8d60a835 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1482,7 +1482,7 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
* Note! This is in reply order (see loop in get_tv_modes).
* XXX: all 60Hz refresh?
*/
-struct drm_display_mode sdvo_tv_modes[] = {
+static const struct drm_display_mode sdvo_tv_modes[] = {
{ DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384,
416, 0, 200, 201, 232, 233, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index 08868ac3048a..1e1eb1d7e971 100644
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -703,7 +703,7 @@ static int mga_do_pci_dma_bootstrap(struct drm_device *dev,
static int mga_do_dma_bootstrap(struct drm_device *dev,
drm_mga_dma_bootstrap_t *dma_bs)
{
- const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev);
+ const int is_agp = (dma_bs->agp_mode != 0) && drm_pci_device_is_agp(dev);
int err;
drm_mga_private_t *const dev_priv =
(drm_mga_private_t *) dev->dev_private;
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index 0aaf5f67a436..42d31874edf2 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -75,10 +75,6 @@ static struct drm_driver driver = {
#endif
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -88,15 +84,20 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver mga_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
static int __init mga_init(void)
{
driver.num_ioctls = mga_max_ioctl;
- return drm_init(&driver);
+ return drm_pci_init(&driver, &mga_pci_driver);
}
static void __exit mga_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &mga_pci_driver);
}
module_init(mga_init);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 6bdab891c64e..8314a49b6b9a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -282,7 +282,7 @@ static void still_alive(void)
{
#if 0
sync();
- msleep(2);
+ mdelay(2);
#endif
}
@@ -1904,7 +1904,7 @@ init_condition_time(struct nvbios *bios, uint16_t offset,
BIOSLOG(bios, "0x%04X: "
"Condition not met, sleeping for 20ms\n",
offset);
- msleep(20);
+ mdelay(20);
}
}
@@ -1938,7 +1938,7 @@ init_ltime(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X milliseconds\n",
offset, time);
- msleep(time);
+ mdelay(time);
return 3;
}
@@ -2962,7 +2962,7 @@ init_time(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
if (time < 1000)
udelay(time);
else
- msleep((time + 900) / 1000);
+ mdelay((time + 900) / 1000);
return 3;
}
@@ -3856,7 +3856,7 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entr
if (script == LVDS_PANEL_OFF) {
/* off-on delay in ms */
- msleep(ROM16(bios->data[bios->fp.xlated_entry + 7]));
+ mdelay(ROM16(bios->data[bios->fp.xlated_entry + 7]));
}
#ifdef __powerpc__
/* Powerbook specific quirks */
@@ -5950,6 +5950,11 @@ apply_dcb_connector_quirks(struct nvbios *bios, int idx)
}
}
+static const u8 hpd_gpio[16] = {
+ 0xff, 0x07, 0x08, 0xff, 0xff, 0x51, 0x52, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x5f, 0x60,
+};
+
static void
parse_dcb_connector_table(struct nvbios *bios)
{
@@ -5986,23 +5991,9 @@ parse_dcb_connector_table(struct nvbios *bios)
cte->type = (cte->entry & 0x000000ff) >> 0;
cte->index2 = (cte->entry & 0x00000f00) >> 8;
- switch (cte->entry & 0x00033000) {
- case 0x00001000:
- cte->gpio_tag = 0x07;
- break;
- case 0x00002000:
- cte->gpio_tag = 0x08;
- break;
- case 0x00010000:
- cte->gpio_tag = 0x51;
- break;
- case 0x00020000:
- cte->gpio_tag = 0x52;
- break;
- default:
- cte->gpio_tag = 0xff;
- break;
- }
+
+ cte->gpio_tag = ffs((cte->entry & 0x07033000) >> 12);
+ cte->gpio_tag = hpd_gpio[cte->gpio_tag];
if (cte->type == 0xff)
continue;
@@ -6702,11 +6693,11 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
struct nvbios *bios = &dev_priv->vbios;
struct init_exec iexec = { true, false };
- mutex_lock(&bios->lock);
+ spin_lock_bh(&bios->lock);
bios->display.output = dcbent;
parse_init_table(bios, table, &iexec);
bios->display.output = NULL;
- mutex_unlock(&bios->lock);
+ spin_unlock_bh(&bios->lock);
}
static bool NVInitVBIOS(struct drm_device *dev)
@@ -6715,7 +6706,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
struct nvbios *bios = &dev_priv->vbios;
memset(bios, 0, sizeof(struct nvbios));
- mutex_init(&bios->lock);
+ spin_lock_init(&bios->lock);
bios->dev = dev;
if (!NVShadowVBIOS(dev, bios->data))
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 50a648e01c49..8a54fa7edf5c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -251,7 +251,7 @@ struct nvbios {
uint8_t digital_min_front_porch;
bool fp_no_ddc;
- struct mutex lock;
+ spinlock_t lock;
uint8_t data[NV_PROM_SIZE];
unsigned int length;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index d38a4d9f9b0b..2ad49cbf7c8b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -49,13 +49,16 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
DRM_ERROR("bo %p still attached to GEM object\n", bo);
nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
- nouveau_vm_put(&nvbo->vma);
+ if (nvbo->vma.node) {
+ nouveau_vm_unmap(&nvbo->vma);
+ nouveau_vm_put(&nvbo->vma);
+ }
kfree(nvbo);
}
static void
-nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
- int *page_shift)
+nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
+ int *align, int *size, int *page_shift)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
@@ -80,7 +83,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
}
} else {
if (likely(dev_priv->chan_vm)) {
- if (*size > 256 * 1024)
+ if (!(flags & TTM_PL_FLAG_TT) && *size > 256 * 1024)
*page_shift = dev_priv->chan_vm->lpg_shift;
else
*page_shift = dev_priv->chan_vm->spg_shift;
@@ -98,8 +101,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
int
nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
int size, int align, uint32_t flags, uint32_t tile_mode,
- uint32_t tile_flags, bool no_vm, bool mappable,
- struct nouveau_bo **pnvbo)
+ uint32_t tile_flags, struct nouveau_bo **pnvbo)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo;
@@ -110,16 +112,14 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
return -ENOMEM;
INIT_LIST_HEAD(&nvbo->head);
INIT_LIST_HEAD(&nvbo->entry);
- nvbo->mappable = mappable;
- nvbo->no_vm = no_vm;
nvbo->tile_mode = tile_mode;
nvbo->tile_flags = tile_flags;
nvbo->bo.bdev = &dev_priv->ttm.bdev;
- nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift);
+ nouveau_bo_fixup_align(nvbo, flags, &align, &size, &page_shift);
align >>= PAGE_SHIFT;
- if (!nvbo->no_vm && dev_priv->chan_vm) {
+ if (dev_priv->chan_vm) {
ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
NV_MEM_ACCESS_RW, &nvbo->vma);
if (ret) {
@@ -141,11 +141,8 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
}
nvbo->channel = NULL;
- if (nvbo->vma.node) {
- if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
- nvbo->bo.offset = nvbo->vma.offset;
- }
-
+ if (nvbo->vma.node)
+ nvbo->bo.offset = nvbo->vma.offset;
*pnvbo = nvbo;
return 0;
}
@@ -315,11 +312,8 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
if (ret)
return ret;
- if (nvbo->vma.node) {
- if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
- nvbo->bo.offset = nvbo->vma.offset;
- }
-
+ if (nvbo->vma.node)
+ nvbo->bo.offset = nvbo->vma.offset;
return 0;
}
@@ -382,7 +376,8 @@ nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
case NOUVEAU_GART_AGP:
return ttm_agp_backend_init(bdev, dev->agp->bridge);
#endif
- case NOUVEAU_GART_SGDMA:
+ case NOUVEAU_GART_PDMA:
+ case NOUVEAU_GART_HW:
return nouveau_sgdma_init_ttm(dev);
default:
NV_ERROR(dev, "Unknown GART type %d\n",
@@ -428,7 +423,10 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_WC;
break;
case TTM_PL_TT:
- man->func = &ttm_bo_manager_func;
+ if (dev_priv->card_type >= NV_50)
+ man->func = &nouveau_gart_manager;
+ else
+ man->func = &ttm_bo_manager_func;
switch (dev_priv->gart_info.type) {
case NOUVEAU_GART_AGP:
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -436,7 +434,8 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
break;
- case NOUVEAU_GART_SGDMA:
+ case NOUVEAU_GART_PDMA:
+ case NOUVEAU_GART_HW:
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
TTM_MEMTYPE_FLAG_CMA;
man->available_caching = TTM_PL_MASK_CACHING;
@@ -498,45 +497,22 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
return ret;
}
-static inline uint32_t
-nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
- struct nouveau_channel *chan, struct ttm_mem_reg *mem)
-{
- struct nouveau_bo *nvbo = nouveau_bo(bo);
-
- if (nvbo->no_vm) {
- if (mem->mem_type == TTM_PL_TT)
- return NvDmaGART;
- return NvDmaVRAM;
- }
-
- if (mem->mem_type == TTM_PL_TT)
- return chan->gart_handle;
- return chan->vram_handle;
-}
-
static int
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_mem *old_node = old_mem->mm_node;
+ struct nouveau_mem *new_node = new_mem->mm_node;
struct nouveau_bo *nvbo = nouveau_bo(bo);
- u64 src_offset = old_mem->start << PAGE_SHIFT;
- u64 dst_offset = new_mem->start << PAGE_SHIFT;
u32 page_count = new_mem->num_pages;
+ u64 src_offset, dst_offset;
int ret;
- if (!nvbo->no_vm) {
- if (old_mem->mem_type == TTM_PL_VRAM)
- src_offset = nvbo->vma.offset;
- else
- src_offset += dev_priv->gart_info.aper_base;
-
- if (new_mem->mem_type == TTM_PL_VRAM)
- dst_offset = nvbo->vma.offset;
- else
- dst_offset += dev_priv->gart_info.aper_base;
- }
+ src_offset = old_node->tmp_vma.offset;
+ if (new_node->tmp_vma.node)
+ dst_offset = new_node->tmp_vma.offset;
+ else
+ dst_offset = nvbo->vma.offset;
page_count = new_mem->num_pages;
while (page_count) {
@@ -571,33 +547,18 @@ static int
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_mem *old_node = old_mem->mm_node;
+ struct nouveau_mem *new_node = new_mem->mm_node;
struct nouveau_bo *nvbo = nouveau_bo(bo);
u64 length = (new_mem->num_pages << PAGE_SHIFT);
u64 src_offset, dst_offset;
int ret;
- src_offset = old_mem->start << PAGE_SHIFT;
- dst_offset = new_mem->start << PAGE_SHIFT;
- if (!nvbo->no_vm) {
- if (old_mem->mem_type == TTM_PL_VRAM)
- src_offset = nvbo->vma.offset;
- else
- src_offset += dev_priv->gart_info.aper_base;
-
- if (new_mem->mem_type == TTM_PL_VRAM)
- dst_offset = nvbo->vma.offset;
- else
- dst_offset += dev_priv->gart_info.aper_base;
- }
-
- ret = RING_SPACE(chan, 3);
- if (ret)
- return ret;
-
- BEGIN_RING(chan, NvSubM2MF, 0x0184, 2);
- OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
- OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
+ src_offset = old_node->tmp_vma.offset;
+ if (new_node->tmp_vma.node)
+ dst_offset = new_node->tmp_vma.offset;
+ else
+ dst_offset = nvbo->vma.offset;
while (length) {
u32 amount, stride, height;
@@ -678,6 +639,15 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
return 0;
}
+static inline uint32_t
+nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
+ struct nouveau_channel *chan, struct ttm_mem_reg *mem)
+{
+ if (mem->mem_type == TTM_PL_TT)
+ return chan->gart_handle;
+ return chan->vram_handle;
+}
+
static int
nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
@@ -731,15 +701,43 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct ttm_mem_reg *old_mem = &bo->mem;
struct nouveau_channel *chan;
int ret;
chan = nvbo->channel;
- if (!chan || nvbo->no_vm) {
+ if (!chan) {
chan = dev_priv->channel;
mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
}
+ /* create temporary vma for old memory, this will get cleaned
+ * up after ttm destroys the ttm_mem_reg
+ */
+ if (dev_priv->card_type >= NV_50) {
+ struct nouveau_mem *node = old_mem->mm_node;
+ if (!node->tmp_vma.node) {
+ u32 page_shift = nvbo->vma.node->type;
+ if (old_mem->mem_type == TTM_PL_TT)
+ page_shift = nvbo->vma.vm->spg_shift;
+
+ ret = nouveau_vm_get(chan->vm,
+ old_mem->num_pages << PAGE_SHIFT,
+ page_shift, NV_MEM_ACCESS_RO,
+ &node->tmp_vma);
+ if (ret)
+ goto out;
+ }
+
+ if (old_mem->mem_type == TTM_PL_VRAM)
+ nouveau_vm_map(&node->tmp_vma, node);
+ else {
+ nouveau_vm_map_sg(&node->tmp_vma, 0,
+ old_mem->num_pages << PAGE_SHIFT,
+ node, node->pages);
+ }
+ }
+
if (dev_priv->card_type < NV_50)
ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
else
@@ -753,6 +751,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
no_wait_gpu, new_mem);
}
+out:
if (chan == dev_priv->channel)
mutex_unlock(&chan->mutex);
return ret;
@@ -763,6 +762,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement;
struct ttm_mem_reg tmp_mem;
@@ -782,7 +782,23 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
+ if (dev_priv->card_type >= NV_50) {
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct nouveau_mem *node = tmp_mem.mm_node;
+ struct nouveau_vma *vma = &nvbo->vma;
+ if (vma->node->type != vma->vm->spg_shift)
+ vma = &node->tmp_vma;
+ nouveau_vm_map_sg(vma, 0, tmp_mem.num_pages << PAGE_SHIFT,
+ node, node->pages);
+ }
+
ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
+
+ if (dev_priv->card_type >= NV_50) {
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ nouveau_vm_unmap(&nvbo->vma);
+ }
+
if (ret)
goto out;
@@ -825,6 +841,36 @@ out:
return ret;
}
+static void
+nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_mem *node = new_mem->mm_node;
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct nouveau_vma *vma = &nvbo->vma;
+ struct nouveau_vm *vm = vma->vm;
+
+ if (dev_priv->card_type < NV_50)
+ return;
+
+ switch (new_mem->mem_type) {
+ case TTM_PL_VRAM:
+ nouveau_vm_map(vma, node);
+ break;
+ case TTM_PL_TT:
+ if (vma->node->type != vm->spg_shift) {
+ nouveau_vm_unmap(vma);
+ vma = &node->tmp_vma;
+ }
+ nouveau_vm_map_sg(vma, 0, new_mem->num_pages << PAGE_SHIFT,
+ node, node->pages);
+ break;
+ default:
+ nouveau_vm_unmap(&nvbo->vma);
+ break;
+ }
+}
+
static int
nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
struct nouveau_tile_reg **new_tile)
@@ -832,19 +878,13 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct drm_device *dev = dev_priv->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
- uint64_t offset;
+ u64 offset = new_mem->start << PAGE_SHIFT;
- if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
- /* Nothing to do. */
- *new_tile = NULL;
+ *new_tile = NULL;
+ if (new_mem->mem_type != TTM_PL_VRAM)
return 0;
- }
-
- offset = new_mem->start << PAGE_SHIFT;
- if (dev_priv->chan_vm) {
- nouveau_vm_map(&nvbo->vma, new_mem->mm_node);
- } else if (dev_priv->card_type >= NV_10) {
+ if (dev_priv->card_type >= NV_10) {
*new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
nvbo->tile_mode,
nvbo->tile_flags);
@@ -861,11 +901,8 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct drm_device *dev = dev_priv->dev;
- if (dev_priv->card_type >= NV_10 &&
- dev_priv->card_type < NV_50) {
- nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
- *old_tile = new_tile;
- }
+ nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
+ *old_tile = new_tile;
}
static int
@@ -879,9 +916,11 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
struct nouveau_tile_reg *new_tile = NULL;
int ret = 0;
- ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
- if (ret)
- return ret;
+ if (dev_priv->card_type < NV_50) {
+ ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
+ if (ret)
+ return ret;
+ }
/* Fake bo copy. */
if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
@@ -912,10 +951,12 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
out:
- if (ret)
- nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
- else
- nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
+ if (dev_priv->card_type < NV_50) {
+ if (ret)
+ nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
+ else
+ nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
+ }
return ret;
}
@@ -956,7 +997,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
break;
case TTM_PL_VRAM:
{
- struct nouveau_vram *vram = mem->mm_node;
+ struct nouveau_mem *node = mem->mm_node;
u8 page_shift;
if (!dev_priv->bar1_vm) {
@@ -967,23 +1008,23 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
}
if (dev_priv->card_type == NV_C0)
- page_shift = vram->page_shift;
+ page_shift = node->page_shift;
else
page_shift = 12;
ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
page_shift, NV_MEM_ACCESS_RW,
- &vram->bar_vma);
+ &node->bar_vma);
if (ret)
return ret;
- nouveau_vm_map(&vram->bar_vma, vram);
+ nouveau_vm_map(&node->bar_vma, node);
if (ret) {
- nouveau_vm_put(&vram->bar_vma);
+ nouveau_vm_put(&node->bar_vma);
return ret;
}
- mem->bus.offset = vram->bar_vma.offset;
+ mem->bus.offset = node->bar_vma.offset;
if (dev_priv->card_type == NV_50) /*XXX*/
mem->bus.offset -= 0x0020000000ULL;
mem->bus.base = pci_resource_start(dev->pdev, 1);
@@ -1000,16 +1041,16 @@ static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
- struct nouveau_vram *vram = mem->mm_node;
+ struct nouveau_mem *node = mem->mm_node;
if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
return;
- if (!vram->bar_vma.node)
+ if (!node->bar_vma.node)
return;
- nouveau_vm_unmap(&vram->bar_vma);
- nouveau_vm_put(&vram->bar_vma);
+ nouveau_vm_unmap(&node->bar_vma);
+ nouveau_vm_put(&node->bar_vma);
}
static int
@@ -1059,6 +1100,7 @@ struct ttm_bo_driver nouveau_bo_driver = {
.invalidate_caches = nouveau_bo_invalidate_caches,
.init_mem_type = nouveau_bo_init_mem_type,
.evict_flags = nouveau_bo_evict_flags,
+ .move_notify = nouveau_bo_move_ntfy,
.move = nouveau_bo_move,
.verify_access = nouveau_bo_verify_access,
.sync_obj_signaled = __nouveau_fence_signalled,
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 3960d66d7aba..3837090d66af 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -35,7 +35,7 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *pb = chan->pushbuf_bo;
struct nouveau_gpuobj *pushbuf = NULL;
- int ret;
+ int ret = 0;
if (dev_priv->card_type >= NV_50) {
if (dev_priv->card_type < NV_C0) {
@@ -90,8 +90,7 @@ nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
else
location = TTM_PL_FLAG_TT;
- ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, false,
- true, &pushbuf);
+ ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, &pushbuf);
if (ret) {
NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret);
return NULL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 505c6bfb4d75..764c15d537ba 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -32,6 +32,7 @@
#include "nouveau_hw.h"
#include "nouveau_crtc.h"
#include "nouveau_dma.h"
+#include "nv50_display.h"
static void
nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
@@ -61,18 +62,59 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
};
int
-nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
- struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo)
+nouveau_framebuffer_init(struct drm_device *dev,
+ struct nouveau_framebuffer *nv_fb,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct nouveau_bo *nvbo)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = &nv_fb->base;
int ret;
- ret = drm_framebuffer_init(dev, &nouveau_fb->base, &nouveau_framebuffer_funcs);
+ ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
if (ret) {
return ret;
}
- drm_helper_mode_fill_fb_struct(&nouveau_fb->base, mode_cmd);
- nouveau_fb->nvbo = nvbo;
+ drm_helper_mode_fill_fb_struct(fb, mode_cmd);
+ nv_fb->nvbo = nvbo;
+
+ if (dev_priv->card_type >= NV_50) {
+ u32 tile_flags = nouveau_bo_tile_layout(nvbo);
+ if (tile_flags == 0x7a00 ||
+ tile_flags == 0xfe00)
+ nv_fb->r_dma = NvEvoFB32;
+ else
+ if (tile_flags == 0x7000)
+ nv_fb->r_dma = NvEvoFB16;
+ else
+ nv_fb->r_dma = NvEvoVRAM_LP;
+
+ switch (fb->depth) {
+ case 8: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_8; break;
+ case 15: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_15; break;
+ case 16: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_16; break;
+ case 24:
+ case 32: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_24; break;
+ case 30: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_30; break;
+ default:
+ NV_ERROR(dev, "unknown depth %d\n", fb->depth);
+ return -EINVAL;
+ }
+
+ if (dev_priv->chipset == 0x50)
+ nv_fb->r_format |= (tile_flags << 8);
+
+ if (!tile_flags)
+ nv_fb->r_pitch = 0x00100000 | fb->pitch;
+ else {
+ u32 mode = nvbo->tile_mode;
+ if (dev_priv->card_type >= NV_C0)
+ mode >>= 4;
+ nv_fb->r_pitch = ((fb->pitch / 4) << 4) | mode;
+ }
+ }
+
return 0;
}
@@ -182,6 +224,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
struct nouveau_page_flip_state *s,
struct nouveau_fence **pfence)
{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct drm_device *dev = chan->dev;
unsigned long flags;
int ret;
@@ -201,9 +244,12 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
if (ret)
goto fail;
- BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
- OUT_RING(chan, 0);
- FIRE_RING(chan);
+ if (dev_priv->card_type < NV_C0)
+ BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
+ else
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0500, 1);
+ OUT_RING (chan, 0);
+ FIRE_RING (chan);
ret = nouveau_fence_new(chan, pfence, true);
if (ret)
@@ -244,7 +290,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Initialize a page flip struct */
*s = (struct nouveau_page_flip_state)
- { { }, s->event, nouveau_crtc(crtc)->index,
+ { { }, event, nouveau_crtc(crtc)->index,
fb->bits_per_pixel, fb->pitch, crtc->x, crtc->y,
new_bo->bo.offset };
@@ -255,6 +301,14 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
mutex_lock(&chan->mutex);
/* Emit a page flip */
+ if (dev_priv->card_type >= NV_50) {
+ ret = nv50_display_flip_next(crtc, fb, chan);
+ if (ret) {
+ nouveau_channel_put(&chan);
+ goto fail_unreserve;
+ }
+ }
+
ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
nouveau_channel_put(&chan);
if (ret)
@@ -305,7 +359,8 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
}
list_del(&s->head);
- *ps = *s;
+ if (ps)
+ *ps = *s;
kfree(s);
spin_unlock_irqrestore(&dev->event_lock, flags);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 65699bfaaaea..1ef39be996ed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -96,13 +96,15 @@ nouveau_dma_init(struct nouveau_channel *chan)
OUT_RING(chan, 0);
/* Initialise NV_MEMORY_TO_MEMORY_FORMAT */
- ret = RING_SPACE(chan, 4);
+ ret = RING_SPACE(chan, 6);
if (ret)
return ret;
BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
- OUT_RING(chan, NvM2MF);
- BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1);
- OUT_RING(chan, NvNotify0);
+ OUT_RING (chan, NvM2MF);
+ BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 3);
+ OUT_RING (chan, NvNotify0);
+ OUT_RING (chan, chan->vram_handle);
+ OUT_RING (chan, chan->gart_handle);
/* Sit back and pray the channel works.. */
FIRE_RING(chan);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index c36f1763feaa..23d4edf992b7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -61,8 +61,6 @@ enum {
NvM2MF = 0x80000001,
NvDmaFB = 0x80000002,
NvDmaTT = 0x80000003,
- NvDmaVRAM = 0x80000004,
- NvDmaGART = 0x80000005,
NvNotify0 = 0x80000006,
Nv2D = 0x80000007,
NvCtxSurf2D = 0x80000008,
@@ -73,12 +71,15 @@ enum {
NvImageBlit = 0x8000000d,
NvSw = 0x8000000e,
NvSema = 0x8000000f,
+ NvEvoSema0 = 0x80000010,
+ NvEvoSema1 = 0x80000011,
/* G80+ display objects */
NvEvoVRAM = 0x01000000,
NvEvoFB16 = 0x01000001,
NvEvoFB32 = 0x01000002,
- NvEvoVRAM_LP = 0x01000003
+ NvEvoVRAM_LP = 0x01000003,
+ NvEvoSync = 0xcafe0000
};
#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 38d599554bce..7beb82a0315d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -175,7 +175,6 @@ nouveau_dp_link_train_adjust(struct drm_encoder *encoder, uint8_t *config)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
- struct bit_displayport_encoder_table_entry *dpse;
struct bit_displayport_encoder_table *dpe;
int ret, i, dpe_headerlen, vs = 0, pre = 0;
uint8_t request[2];
@@ -183,7 +182,6 @@ nouveau_dp_link_train_adjust(struct drm_encoder *encoder, uint8_t *config)
dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
if (!dpe)
return false;
- dpse = (void *)((char *)dpe + dpe_headerlen);
ret = auxch_rd(encoder, DP_ADJUST_REQUEST_LANE0_1, request, 2);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index f658a04eecf9..155ebdcbf06f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -408,14 +408,6 @@ static struct drm_driver driver = {
#endif
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- .probe = nouveau_pci_probe,
- .remove = nouveau_pci_remove,
- .suspend = nouveau_pci_suspend,
- .resume = nouveau_pci_resume
- },
.gem_init_object = nouveau_gem_object_new,
.gem_free_object = nouveau_gem_object_del,
@@ -432,6 +424,15 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver nouveau_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = nouveau_pci_probe,
+ .remove = nouveau_pci_remove,
+ .suspend = nouveau_pci_suspend,
+ .resume = nouveau_pci_resume
+};
+
static int __init nouveau_init(void)
{
driver.num_ioctls = nouveau_max_ioctl;
@@ -449,7 +450,7 @@ static int __init nouveau_init(void)
return 0;
nouveau_register_dsm_handler();
- return drm_init(&driver);
+ return drm_pci_init(&driver, &nouveau_pci_driver);
}
static void __exit nouveau_exit(void)
@@ -457,7 +458,7 @@ static void __exit nouveau_exit(void)
if (!nouveau_modeset)
return;
- drm_exit(&driver);
+ drm_pci_exit(&driver, &nouveau_pci_driver);
nouveau_unregister_dsm_handler();
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 9821fcacc3d2..00aff226397d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -57,7 +57,7 @@ struct nouveau_fpriv {
#include "nouveau_util.h"
struct nouveau_grctx;
-struct nouveau_vram;
+struct nouveau_mem;
#include "nouveau_vm.h"
#define MAX_NUM_DCB_ENTRIES 16
@@ -65,13 +65,16 @@ struct nouveau_vram;
#define NOUVEAU_MAX_CHANNEL_NR 128
#define NOUVEAU_MAX_TILE_NR 15
-struct nouveau_vram {
+struct nouveau_mem {
struct drm_device *dev;
struct nouveau_vma bar_vma;
+ struct nouveau_vma tmp_vma;
u8 page_shift;
+ struct drm_mm_node *tag;
struct list_head regions;
+ dma_addr_t *pages;
u32 memtype;
u64 offset;
u64 size;
@@ -90,6 +93,7 @@ struct nouveau_tile_reg {
struct nouveau_bo {
struct ttm_buffer_object bo;
struct ttm_placement placement;
+ u32 valid_domains;
u32 placements[3];
u32 busy_placements[3];
struct ttm_bo_kmap_obj kmap;
@@ -104,8 +108,6 @@ struct nouveau_bo {
struct nouveau_channel *channel;
struct nouveau_vma vma;
- bool mappable;
- bool no_vm;
uint32_t tile_mode;
uint32_t tile_flags;
@@ -387,6 +389,7 @@ struct nouveau_pgraph_engine {
};
struct nouveau_display_engine {
+ void *priv;
int (*early_init)(struct drm_device *);
void (*late_takedown)(struct drm_device *);
int (*create)(struct drm_device *);
@@ -509,8 +512,8 @@ struct nouveau_crypt_engine {
struct nouveau_vram_engine {
int (*init)(struct drm_device *);
int (*get)(struct drm_device *, u64, u32 align, u32 size_nc,
- u32 type, struct nouveau_vram **);
- void (*put)(struct drm_device *, struct nouveau_vram **);
+ u32 type, struct nouveau_mem **);
+ void (*put)(struct drm_device *, struct nouveau_mem **);
bool (*flags_valid)(struct drm_device *, u32 tile_flags);
};
@@ -652,8 +655,6 @@ struct drm_nouveau_private {
/* interrupt handling */
void (*irq_handler[32])(struct drm_device *);
bool msi_enabled;
- struct workqueue_struct *wq;
- struct work_struct irq_work;
struct list_head vbl_waiting;
@@ -691,15 +692,22 @@ struct drm_nouveau_private {
struct {
enum {
NOUVEAU_GART_NONE = 0,
- NOUVEAU_GART_AGP,
- NOUVEAU_GART_SGDMA
+ NOUVEAU_GART_AGP, /* AGP */
+ NOUVEAU_GART_PDMA, /* paged dma object */
+ NOUVEAU_GART_HW /* on-chip gart/vm */
} type;
uint64_t aper_base;
uint64_t aper_size;
uint64_t aper_free;
+ struct ttm_backend_func *func;
+
+ struct {
+ struct page *page;
+ dma_addr_t addr;
+ } dummy;
+
struct nouveau_gpuobj *sg_ctxdma;
- struct nouveau_vma vma;
} gart_info;
/* nv10-nv40 tiling regions */
@@ -740,14 +748,6 @@ struct drm_nouveau_private {
struct backlight_device *backlight;
- struct nouveau_channel *evo;
- u32 evo_alloc;
- struct {
- struct dcb_entry *dcb;
- u16 script;
- u32 pclk;
- } evo_irq;
-
struct {
struct dentry *channel_root;
} debugfs;
@@ -847,6 +847,7 @@ extern void nv10_mem_put_tile_region(struct drm_device *dev,
struct nouveau_tile_reg *tile,
struct nouveau_fence *fence);
extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
+extern const struct ttm_mem_type_manager_func nouveau_gart_manager;
/* nouveau_notifier.c */
extern int nouveau_notifier_init_channel(struct nouveau_channel *);
@@ -1294,7 +1295,7 @@ extern struct ttm_bo_driver nouveau_bo_driver;
extern int nouveau_bo_new(struct drm_device *, struct nouveau_channel *,
int size, int align, uint32_t flags,
uint32_t tile_mode, uint32_t tile_flags,
- bool no_vm, bool mappable, struct nouveau_bo **);
+ struct nouveau_bo **);
extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags);
extern int nouveau_bo_unpin(struct nouveau_bo *);
extern int nouveau_bo_map(struct nouveau_bo *);
@@ -1355,9 +1356,9 @@ static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj)
/* nouveau_gem.c */
extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *,
- int size, int align, uint32_t flags,
+ int size, int align, uint32_t domain,
uint32_t tile_mode, uint32_t tile_flags,
- bool no_vm, bool mappable, struct nouveau_bo **);
+ struct nouveau_bo **);
extern int nouveau_gem_object_new(struct drm_gem_object *);
extern void nouveau_gem_object_del(struct drm_gem_object *);
extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h
index d432134b71e0..a3a88ad00f86 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fb.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fb.h
@@ -30,6 +30,9 @@
struct nouveau_framebuffer {
struct drm_framebuffer base;
struct nouveau_bo *nvbo;
+ u32 r_dma;
+ u32 r_format;
+ u32 r_pitch;
};
static inline struct nouveau_framebuffer *
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 60769d2f9a66..889c4454682e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -296,8 +296,8 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
size = mode_cmd.pitch * mode_cmd.height;
size = roundup(size, PAGE_SIZE);
- ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM,
- 0, 0x0000, false, true, &nvbo);
+ ret = nouveau_gem_new(dev, dev_priv->channel, size, 0,
+ NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, &nvbo);
if (ret) {
NV_ERROR(dev, "failed to allocate framebuffer\n");
goto out;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 221b8462ea37..a244702bb227 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -32,8 +32,7 @@
#include "nouveau_dma.h"
#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
-#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17 && \
- nouveau_private(dev)->card_type < NV_C0)
+#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
struct nouveau_fence {
struct nouveau_channel *channel;
@@ -259,11 +258,12 @@ __nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
}
static struct nouveau_semaphore *
-alloc_semaphore(struct drm_device *dev)
+semaphore_alloc(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_semaphore *sema;
- int ret;
+ int size = (dev_priv->chipset < 0x84) ? 4 : 16;
+ int ret, i;
if (!USE_SEMA(dev))
return NULL;
@@ -277,9 +277,9 @@ alloc_semaphore(struct drm_device *dev)
goto fail;
spin_lock(&dev_priv->fence.lock);
- sema->mem = drm_mm_search_free(&dev_priv->fence.heap, 4, 0, 0);
+ sema->mem = drm_mm_search_free(&dev_priv->fence.heap, size, 0, 0);
if (sema->mem)
- sema->mem = drm_mm_get_block_atomic(sema->mem, 4, 0);
+ sema->mem = drm_mm_get_block_atomic(sema->mem, size, 0);
spin_unlock(&dev_priv->fence.lock);
if (!sema->mem)
@@ -287,7 +287,8 @@ alloc_semaphore(struct drm_device *dev)
kref_init(&sema->ref);
sema->dev = dev;
- nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 0);
+ for (i = sema->mem->start; i < sema->mem->start + size; i += 4)
+ nouveau_bo_wr32(dev_priv->fence.bo, i / 4, 0);
return sema;
fail:
@@ -296,7 +297,7 @@ fail:
}
static void
-free_semaphore(struct kref *ref)
+semaphore_free(struct kref *ref)
{
struct nouveau_semaphore *sema =
container_of(ref, struct nouveau_semaphore, ref);
@@ -318,61 +319,107 @@ semaphore_work(void *priv, bool signalled)
if (unlikely(!signalled))
nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1);
- kref_put(&sema->ref, free_semaphore);
+ kref_put(&sema->ref, semaphore_free);
}
static int
-emit_semaphore(struct nouveau_channel *chan, int method,
- struct nouveau_semaphore *sema)
+semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
{
- struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
- struct nouveau_fence *fence;
- bool smart = (dev_priv->card_type >= NV_50);
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nouveau_fence *fence = NULL;
int ret;
- ret = RING_SPACE(chan, smart ? 8 : 4);
+ if (dev_priv->chipset < 0x84) {
+ ret = RING_SPACE(chan, 3);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 2);
+ OUT_RING (chan, sema->mem->start);
+ OUT_RING (chan, 1);
+ } else
+ if (dev_priv->chipset < 0xc0) {
+ struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
+ u64 offset = vma->offset + sema->mem->start;
+
+ ret = RING_SPACE(chan, 5);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(chan, NvSubSw, 0x0010, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset));
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 1); /* ACQUIRE_EQ */
+ } else {
+ struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
+ u64 offset = vma->offset + sema->mem->start;
+
+ ret = RING_SPACE(chan, 5);
+ if (ret)
+ return ret;
+
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset));
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0x1001); /* ACQUIRE_EQ */
+ }
+
+ /* Delay semaphore destruction until its work is done */
+ ret = nouveau_fence_new(chan, &fence, true);
if (ret)
return ret;
- if (smart) {
- BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
- OUT_RING(chan, NvSema);
- }
- BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 1);
- OUT_RING(chan, sema->mem->start);
-
- if (smart && method == NV_SW_SEMAPHORE_ACQUIRE) {
- /*
- * NV50 tries to be too smart and context-switch
- * between semaphores instead of doing a "first come,
- * first served" strategy like previous cards
- * do.
- *
- * That's bad because the ACQUIRE latency can get as
- * large as the PFIFO context time slice in the
- * typical DRI2 case where you have several
- * outstanding semaphores at the same moment.
- *
- * If we're going to ACQUIRE, force the card to
- * context switch before, just in case the matching
- * RELEASE is already scheduled to be executed in
- * another channel.
- */
- BEGIN_RING(chan, NvSubSw, NV_SW_YIELD, 1);
- OUT_RING(chan, 0);
- }
+ kref_get(&sema->ref);
+ nouveau_fence_work(fence, semaphore_work, sema);
+ nouveau_fence_unref(&fence);
+ return 0;
+}
+
+static int
+semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nouveau_fence *fence = NULL;
+ int ret;
+
+ if (dev_priv->chipset < 0x84) {
+ ret = RING_SPACE(chan, 4);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 1);
+ OUT_RING (chan, sema->mem->start);
+ BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1);
+ OUT_RING (chan, 1);
+ } else
+ if (dev_priv->chipset < 0xc0) {
+ struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
+ u64 offset = vma->offset + sema->mem->start;
- BEGIN_RING(chan, NvSubSw, method, 1);
- OUT_RING(chan, 1);
-
- if (smart && method == NV_SW_SEMAPHORE_RELEASE) {
- /*
- * Force the card to context switch, there may be
- * another channel waiting for the semaphore we just
- * released.
- */
- BEGIN_RING(chan, NvSubSw, NV_SW_YIELD, 1);
- OUT_RING(chan, 0);
+ ret = RING_SPACE(chan, 5);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(chan, NvSubSw, 0x0010, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset));
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 2); /* RELEASE */
+ } else {
+ struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
+ u64 offset = vma->offset + sema->mem->start;
+
+ ret = RING_SPACE(chan, 5);
+ if (ret)
+ return ret;
+
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset));
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0x1002); /* RELEASE */
}
/* Delay semaphore destruction until its work is done */
@@ -383,7 +430,6 @@ emit_semaphore(struct nouveau_channel *chan, int method,
kref_get(&sema->ref);
nouveau_fence_work(fence, semaphore_work, sema);
nouveau_fence_unref(&fence);
-
return 0;
}
@@ -400,7 +446,7 @@ nouveau_fence_sync(struct nouveau_fence *fence,
nouveau_fence_signalled(fence)))
goto out;
- sema = alloc_semaphore(dev);
+ sema = semaphore_alloc(dev);
if (!sema) {
/* Early card or broken userspace, fall back to
* software sync. */
@@ -418,17 +464,17 @@ nouveau_fence_sync(struct nouveau_fence *fence,
}
/* Make wchan wait until it gets signalled */
- ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema);
+ ret = semaphore_acquire(wchan, sema);
if (ret)
goto out_unlock;
/* Signal the semaphore from chan */
- ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema);
+ ret = semaphore_release(chan, sema);
out_unlock:
mutex_unlock(&chan->mutex);
out_unref:
- kref_put(&sema->ref, free_semaphore);
+ kref_put(&sema->ref, semaphore_free);
out:
if (chan)
nouveau_channel_put_unlocked(&chan);
@@ -449,22 +495,23 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
struct nouveau_gpuobj *obj = NULL;
int ret;
+ if (dev_priv->card_type >= NV_C0)
+ goto out_initialised;
+
/* Create an NV_SW object for various sync purposes */
ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
if (ret)
return ret;
/* we leave subchannel empty for nvc0 */
- if (dev_priv->card_type < NV_C0) {
- ret = RING_SPACE(chan, 2);
- if (ret)
- return ret;
- BEGIN_RING(chan, NvSubSw, 0, 1);
- OUT_RING(chan, NvSw);
- }
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ return ret;
+ BEGIN_RING(chan, NvSubSw, 0, 1);
+ OUT_RING(chan, NvSw);
/* Create a DMA object for the shared cross-channel sync area. */
- if (USE_SEMA(dev)) {
+ if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
@@ -484,14 +531,20 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
return ret;
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
OUT_RING(chan, NvSema);
+ } else {
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ return ret;
+ BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
+ OUT_RING (chan, chan->vram_handle); /* whole VM */
}
FIRE_RING(chan);
+out_initialised:
INIT_LIST_HEAD(&chan->fence.pending);
spin_lock_init(&chan->fence.lock);
atomic_set(&chan->fence.last_sequence_irq, 0);
-
return 0;
}
@@ -519,12 +572,13 @@ int
nouveau_fence_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int size = (dev_priv->chipset < 0x84) ? 4096 : 16384;
int ret;
/* Create a shared VRAM heap for cross-channel sync. */
if (USE_SEMA(dev)) {
- ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM,
- 0, 0, false, true, &dev_priv->fence.bo);
+ ret = nouveau_bo_new(dev, NULL, size, 0, TTM_PL_FLAG_VRAM,
+ 0, 0, &dev_priv->fence.bo);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 506c508b7eda..3ce58d2222cb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -61,19 +61,36 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
int
nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
- int size, int align, uint32_t flags, uint32_t tile_mode,
- uint32_t tile_flags, bool no_vm, bool mappable,
- struct nouveau_bo **pnvbo)
+ int size, int align, uint32_t domain, uint32_t tile_mode,
+ uint32_t tile_flags, struct nouveau_bo **pnvbo)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo;
+ u32 flags = 0;
int ret;
+ if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
+ flags |= TTM_PL_FLAG_VRAM;
+ if (domain & NOUVEAU_GEM_DOMAIN_GART)
+ flags |= TTM_PL_FLAG_TT;
+ if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
+ flags |= TTM_PL_FLAG_SYSTEM;
+
ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode,
- tile_flags, no_vm, mappable, pnvbo);
+ tile_flags, pnvbo);
if (ret)
return ret;
nvbo = *pnvbo;
+ /* we restrict allowed domains on nv50+ to only the types
+ * that were requested at creation time. not possibly on
+ * earlier chips without busting the ABI.
+ */
+ nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
+ NOUVEAU_GEM_DOMAIN_GART;
+ if (dev_priv->card_type >= NV_50)
+ nvbo->valid_domains &= domain;
+
nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
if (!nvbo->gem) {
nouveau_bo_ref(NULL, pnvbo);
@@ -97,7 +114,7 @@ nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
rep->offset = nvbo->bo.offset;
- rep->map_handle = nvbo->mappable ? nvbo->bo.addr_space_offset : 0;
+ rep->map_handle = nvbo->bo.addr_space_offset;
rep->tile_mode = nvbo->tile_mode;
rep->tile_flags = nvbo->tile_flags;
return 0;
@@ -111,19 +128,11 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
struct drm_nouveau_gem_new *req = data;
struct nouveau_bo *nvbo = NULL;
struct nouveau_channel *chan = NULL;
- uint32_t flags = 0;
int ret = 0;
if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
- if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM)
- flags |= TTM_PL_FLAG_VRAM;
- if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART)
- flags |= TTM_PL_FLAG_TT;
- if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU)
- flags |= TTM_PL_FLAG_SYSTEM;
-
if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) {
NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags);
return -EINVAL;
@@ -135,10 +144,9 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
return PTR_ERR(chan);
}
- ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags,
- req->info.tile_mode, req->info.tile_flags, false,
- (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE),
- &nvbo);
+ ret = nouveau_gem_new(dev, chan, req->info.size, req->align,
+ req->info.domain, req->info.tile_mode,
+ req->info.tile_flags, &nvbo);
if (chan)
nouveau_channel_put(&chan);
if (ret)
@@ -161,7 +169,7 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
{
struct nouveau_bo *nvbo = gem->driver_private;
struct ttm_buffer_object *bo = &nvbo->bo;
- uint32_t domains = valid_domains &
+ uint32_t domains = valid_domains & nvbo->valid_domains &
(write_domains ? write_domains : read_domains);
uint32_t pref_flags = 0, valid_flags = 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 26347b7cd872..63b9040b5f30 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -152,7 +152,6 @@ nouveau_mem_vram_fini(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- nouveau_bo_unpin(dev_priv->vga_ram);
nouveau_bo_ref(NULL, &dev_priv->vga_ram);
ttm_bo_device_release(&dev_priv->ttm.bdev);
@@ -393,11 +392,17 @@ nouveau_mem_vram_init(struct drm_device *dev)
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
int ret, dma_bits;
- if (dev_priv->card_type >= NV_50 &&
- pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
- dma_bits = 40;
- else
- dma_bits = 32;
+ dma_bits = 32;
+ if (dev_priv->card_type >= NV_50) {
+ if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
+ dma_bits = 40;
+ } else
+ if (drm_pci_device_is_pcie(dev) &&
+ dev_priv->chipset != 0x40 &&
+ dev_priv->chipset != 0x45) {
+ if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
+ dma_bits = 39;
+ }
ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
if (ret)
@@ -455,13 +460,17 @@ nouveau_mem_vram_init(struct drm_device *dev)
return ret;
}
- ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM,
- 0, 0, true, true, &dev_priv->vga_ram);
- if (ret == 0)
- ret = nouveau_bo_pin(dev_priv->vga_ram, TTM_PL_FLAG_VRAM);
- if (ret) {
- NV_WARN(dev, "failed to reserve VGA memory\n");
- nouveau_bo_ref(NULL, &dev_priv->vga_ram);
+ if (dev_priv->card_type < NV_50) {
+ ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM,
+ 0, 0, &dev_priv->vga_ram);
+ if (ret == 0)
+ ret = nouveau_bo_pin(dev_priv->vga_ram,
+ TTM_PL_FLAG_VRAM);
+
+ if (ret) {
+ NV_WARN(dev, "failed to reserve VGA memory\n");
+ nouveau_bo_ref(NULL, &dev_priv->vga_ram);
+ }
}
dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
@@ -480,7 +489,7 @@ nouveau_mem_gart_init(struct drm_device *dev)
dev_priv->gart_info.type = NOUVEAU_GART_NONE;
#if !defined(__powerpc__) && !defined(__ia64__)
- if (drm_device_is_agp(dev) && dev->agp && nouveau_agpmode) {
+ if (drm_pci_device_is_agp(dev) && dev->agp && nouveau_agpmode) {
ret = nouveau_mem_init_agp(dev);
if (ret)
NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
@@ -666,13 +675,14 @@ nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
struct nouveau_mm *mm;
- u32 b_size;
+ u64 size, block, rsvd;
int ret;
- p_size = (p_size << PAGE_SHIFT) >> 12;
- b_size = dev_priv->vram_rblock_size >> 12;
+ rsvd = (256 * 1024); /* vga memory */
+ size = (p_size << PAGE_SHIFT) - rsvd;
+ block = dev_priv->vram_rblock_size;
- ret = nouveau_mm_init(&mm, 0, p_size, b_size);
+ ret = nouveau_mm_init(&mm, rsvd >> 12, size >> 12, block >> 12);
if (ret)
return ret;
@@ -700,9 +710,15 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+ struct nouveau_mem *node = mem->mm_node;
struct drm_device *dev = dev_priv->dev;
- vram->put(dev, (struct nouveau_vram **)&mem->mm_node);
+ if (node->tmp_vma.node) {
+ nouveau_vm_unmap(&node->tmp_vma);
+ nouveau_vm_put(&node->tmp_vma);
+ }
+
+ vram->put(dev, (struct nouveau_mem **)&mem->mm_node);
}
static int
@@ -715,7 +731,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
struct drm_device *dev = dev_priv->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nouveau_vram *node;
+ struct nouveau_mem *node;
u32 size_nc = 0;
int ret;
@@ -724,7 +740,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
mem->page_alignment << PAGE_SHIFT, size_nc,
- (nvbo->tile_flags >> 8) & 0xff, &node);
+ (nvbo->tile_flags >> 8) & 0x3ff, &node);
if (ret)
return ret;
@@ -769,3 +785,84 @@ const struct ttm_mem_type_manager_func nouveau_vram_manager = {
nouveau_vram_manager_del,
nouveau_vram_manager_debug
};
+
+static int
+nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
+{
+ return 0;
+}
+
+static int
+nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
+{
+ return 0;
+}
+
+static void
+nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem)
+{
+ struct nouveau_mem *node = mem->mm_node;
+
+ if (node->tmp_vma.node) {
+ nouveau_vm_unmap(&node->tmp_vma);
+ nouveau_vm_put(&node->tmp_vma);
+ }
+ mem->mm_node = NULL;
+}
+
+static int
+nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct nouveau_vma *vma = &nvbo->vma;
+ struct nouveau_vm *vm = vma->vm;
+ struct nouveau_mem *node;
+ int ret;
+
+ if (unlikely((mem->num_pages << PAGE_SHIFT) >=
+ dev_priv->gart_info.aper_size))
+ return -ENOMEM;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ /* This node must be for evicting large-paged VRAM
+ * to system memory. Due to a nv50 limitation of
+ * not being able to mix large/small pages within
+ * the same PDE, we need to create a temporary
+ * small-paged VMA for the eviction.
+ */
+ if (vma->node->type != vm->spg_shift) {
+ ret = nouveau_vm_get(vm, (u64)vma->node->length << 12,
+ vm->spg_shift, NV_MEM_ACCESS_RW,
+ &node->tmp_vma);
+ if (ret) {
+ kfree(node);
+ return ret;
+ }
+ }
+
+ node->page_shift = nvbo->vma.node->type;
+ mem->mm_node = node;
+ mem->start = 0;
+ return 0;
+}
+
+void
+nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
+{
+}
+
+const struct ttm_mem_type_manager_func nouveau_gart_manager = {
+ nouveau_gart_manager_init,
+ nouveau_gart_manager_fini,
+ nouveau_gart_manager_new,
+ nouveau_gart_manager_del,
+ nouveau_gart_manager_debug
+};
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h
index 798eaf39691c..1f7483aae9a4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.h
@@ -53,13 +53,13 @@ void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *);
int nv50_vram_init(struct drm_device *);
int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc,
- u32 memtype, struct nouveau_vram **);
-void nv50_vram_del(struct drm_device *, struct nouveau_vram **);
+ u32 memtype, struct nouveau_mem **);
+void nv50_vram_del(struct drm_device *, struct nouveau_mem **);
bool nv50_vram_flags_valid(struct drm_device *, u32 tile_flags);
int nvc0_vram_init(struct drm_device *);
int nvc0_vram_new(struct drm_device *, u64 size, u32 align, u32 ncmin,
- u32 memtype, struct nouveau_vram **);
+ u32 memtype, struct nouveau_mem **);
bool nvc0_vram_flags_valid(struct drm_device *, u32 tile_flags);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index fe29d604b820..a86f27655fc4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -39,12 +39,11 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
int ret;
if (nouveau_vram_notify)
- flags = TTM_PL_FLAG_VRAM;
+ flags = NOUVEAU_GEM_DOMAIN_VRAM;
else
- flags = TTM_PL_FLAG_TT;
+ flags = NOUVEAU_GEM_DOMAIN_GART;
- ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags,
- 0, 0x0000, false, true, &ntfy);
+ ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy);
if (ret)
return ret;
@@ -99,6 +98,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
int size, uint32_t *b_offset)
{
struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *nobj = NULL;
struct drm_mm_node *mem;
uint32_t offset;
@@ -112,11 +112,16 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
return -ENOMEM;
}
- if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM)
- target = NV_MEM_TARGET_VRAM;
- else
- target = NV_MEM_TARGET_GART;
- offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
+ if (dev_priv->card_type < NV_50) {
+ if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM)
+ target = NV_MEM_TARGET_VRAM;
+ else
+ target = NV_MEM_TARGET_GART;
+ offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
+ } else {
+ target = NV_MEM_TARGET_VM;
+ offset = chan->notifier_bo->vma.offset;
+ }
offset += mem->start;
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset,
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 30b6544467ca..4f00c87ed86e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -36,6 +36,7 @@
#include "nouveau_drm.h"
#include "nouveau_ramht.h"
#include "nouveau_vm.h"
+#include "nv50_display.h"
struct nouveau_gpuobj_method {
struct list_head head;
@@ -490,16 +491,22 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
}
if (target == NV_MEM_TARGET_GART) {
- if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
- target = NV_MEM_TARGET_PCI_NOSNOOP;
- base += dev_priv->gart_info.aper_base;
- } else
- if (base != 0) {
- base = nouveau_sgdma_get_physical(dev, base);
+ struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
+
+ if (dev_priv->gart_info.type == NOUVEAU_GART_PDMA) {
+ if (base == 0) {
+ nouveau_gpuobj_ref(gart, pobj);
+ return 0;
+ }
+
+ base = nouveau_sgdma_get_physical(dev, base);
target = NV_MEM_TARGET_PCI;
} else {
- nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, pobj);
- return 0;
+ base += dev_priv->gart_info.aper_base;
+ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP)
+ target = NV_MEM_TARGET_PCI_NOSNOOP;
+ else
+ target = NV_MEM_TARGET_PCI;
}
}
@@ -776,7 +783,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *vram = NULL, *tt = NULL;
- int ret;
+ int ret, i;
NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
@@ -841,6 +848,25 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
nouveau_gpuobj_ref(NULL, &ramht);
if (ret)
return ret;
+
+ /* dma objects for display sync channel semaphore blocks */
+ for (i = 0; i < 2; i++) {
+ struct nouveau_gpuobj *sem = NULL;
+ struct nv50_display_crtc *dispc =
+ &nv50_display(dev)->crtc[i];
+ u64 offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT;
+
+ ret = nouveau_gpuobj_dma_new(chan, 0x3d, offset, 0xfff,
+ NV_MEM_ACCESS_RW,
+ NV_MEM_TARGET_VRAM, &sem);
+ if (ret)
+ return ret;
+
+ ret = nouveau_ramht_insert(chan, NvEvoSema0 + i, sem);
+ nouveau_gpuobj_ref(NULL, &sem);
+ if (ret)
+ return ret;
+ }
}
/* VRAM ctxdma */
@@ -909,7 +935,7 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
nouveau_gpuobj_ref(NULL, &chan->vm_pd);
- if (chan->ramin_heap.free_stack.next)
+ if (drm_mm_initialized(&chan->ramin_heap))
drm_mm_takedown(&chan->ramin_heap);
nouveau_gpuobj_ref(NULL, &chan->ramin);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c
index bef3e6910418..a24a81f5a89e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ramht.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c
@@ -114,7 +114,9 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
(gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
} else {
if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
- ctx = (gpuobj->cinst << 10) | chan->id;
+ ctx = (gpuobj->cinst << 10) |
+ (chan->id << 28) |
+ chan->id; /* HASH_TAG */
} else {
ctx = (gpuobj->cinst >> 4) |
((gpuobj->engine <<
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 9a250eb53098..1205f0f345b9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -12,6 +12,7 @@ struct nouveau_sgdma_be {
struct drm_device *dev;
dma_addr_t *pages;
+ bool *ttm_alloced;
unsigned nr_pages;
u64 offset;
@@ -20,7 +21,8 @@ struct nouveau_sgdma_be {
static int
nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
- struct page **pages, struct page *dummy_read_page)
+ struct page **pages, struct page *dummy_read_page,
+ dma_addr_t *dma_addrs)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_device *dev = nvbe->dev;
@@ -34,15 +36,25 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
if (!nvbe->pages)
return -ENOMEM;
+ nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL);
+ if (!nvbe->ttm_alloced)
+ return -ENOMEM;
+
nvbe->nr_pages = 0;
while (num_pages--) {
- nvbe->pages[nvbe->nr_pages] =
- pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
+ if (dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE) {
+ nvbe->pages[nvbe->nr_pages] =
+ dma_addrs[nvbe->nr_pages];
+ nvbe->ttm_alloced[nvbe->nr_pages] = true;
+ } else {
+ nvbe->pages[nvbe->nr_pages] =
+ pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev->pdev,
- nvbe->pages[nvbe->nr_pages])) {
- be->func->clear(be);
- return -EFAULT;
+ if (pci_dma_mapping_error(dev->pdev,
+ nvbe->pages[nvbe->nr_pages])) {
+ be->func->clear(be);
+ return -EFAULT;
+ }
}
nvbe->nr_pages++;
@@ -65,17 +77,36 @@ nouveau_sgdma_clear(struct ttm_backend *be)
be->func->unbind(be);
while (nvbe->nr_pages--) {
- pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
+ if (!nvbe->ttm_alloced[nvbe->nr_pages])
+ pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
}
kfree(nvbe->pages);
+ kfree(nvbe->ttm_alloced);
nvbe->pages = NULL;
+ nvbe->ttm_alloced = NULL;
nvbe->nr_pages = 0;
}
}
+static void
+nouveau_sgdma_destroy(struct ttm_backend *be)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+
+ if (be) {
+ NV_DEBUG(nvbe->dev, "\n");
+
+ if (nvbe) {
+ if (nvbe->pages)
+ be->func->clear(be);
+ kfree(nvbe);
+ }
+ }
+}
+
static int
-nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_device *dev = nvbe->dev;
@@ -102,7 +133,7 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
}
static int
-nouveau_sgdma_unbind(struct ttm_backend *be)
+nv04_sgdma_unbind(struct ttm_backend *be)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_device *dev = nvbe->dev;
@@ -125,59 +156,245 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
return 0;
}
+static struct ttm_backend_func nv04_sgdma_backend = {
+ .populate = nouveau_sgdma_populate,
+ .clear = nouveau_sgdma_clear,
+ .bind = nv04_sgdma_bind,
+ .unbind = nv04_sgdma_unbind,
+ .destroy = nouveau_sgdma_destroy
+};
+
static void
-nouveau_sgdma_destroy(struct ttm_backend *be)
+nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
+{
+ struct drm_device *dev = nvbe->dev;
+
+ nv_wr32(dev, 0x100810, 0x00000022);
+ if (!nv_wait(dev, 0x100810, 0x00000100, 0x00000100))
+ NV_ERROR(dev, "vm flush timeout: 0x%08x\n",
+ nv_rd32(dev, 0x100810));
+ nv_wr32(dev, 0x100810, 0x00000000);
+}
+
+static int
+nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+ struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
+ dma_addr_t *list = nvbe->pages;
+ u32 pte = mem->start << 2;
+ u32 cnt = nvbe->nr_pages;
- if (be) {
- NV_DEBUG(nvbe->dev, "\n");
+ nvbe->offset = mem->start << PAGE_SHIFT;
- if (nvbe) {
- if (nvbe->pages)
- be->func->clear(be);
- kfree(nvbe);
+ while (cnt--) {
+ nv_wo32(pgt, pte, (*list++ >> 7) | 1);
+ pte += 4;
+ }
+
+ nv41_sgdma_flush(nvbe);
+ nvbe->bound = true;
+ return 0;
+}
+
+static int
+nv41_sgdma_unbind(struct ttm_backend *be)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+ struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
+ u32 pte = (nvbe->offset >> 12) << 2;
+ u32 cnt = nvbe->nr_pages;
+
+ while (cnt--) {
+ nv_wo32(pgt, pte, 0x00000000);
+ pte += 4;
+ }
+
+ nv41_sgdma_flush(nvbe);
+ nvbe->bound = false;
+ return 0;
+}
+
+static struct ttm_backend_func nv41_sgdma_backend = {
+ .populate = nouveau_sgdma_populate,
+ .clear = nouveau_sgdma_clear,
+ .bind = nv41_sgdma_bind,
+ .unbind = nv41_sgdma_unbind,
+ .destroy = nouveau_sgdma_destroy
+};
+
+static void
+nv44_sgdma_flush(struct nouveau_sgdma_be *nvbe)
+{
+ struct drm_device *dev = nvbe->dev;
+
+ nv_wr32(dev, 0x100814, (nvbe->nr_pages - 1) << 12);
+ nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
+ if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
+ NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
+ nv_rd32(dev, 0x100808));
+ nv_wr32(dev, 0x100808, 0x00000000);
+}
+
+static void
+nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
+{
+ struct drm_nouveau_private *dev_priv = pgt->dev->dev_private;
+ dma_addr_t dummy = dev_priv->gart_info.dummy.addr;
+ u32 pte, tmp[4];
+
+ pte = base >> 2;
+ base &= ~0x0000000f;
+
+ tmp[0] = nv_ro32(pgt, base + 0x0);
+ tmp[1] = nv_ro32(pgt, base + 0x4);
+ tmp[2] = nv_ro32(pgt, base + 0x8);
+ tmp[3] = nv_ro32(pgt, base + 0xc);
+ while (cnt--) {
+ u32 addr = list ? (*list++ >> 12) : (dummy >> 12);
+ switch (pte++ & 0x3) {
+ case 0:
+ tmp[0] &= ~0x07ffffff;
+ tmp[0] |= addr;
+ break;
+ case 1:
+ tmp[0] &= ~0xf8000000;
+ tmp[0] |= addr << 27;
+ tmp[1] &= ~0x003fffff;
+ tmp[1] |= addr >> 5;
+ break;
+ case 2:
+ tmp[1] &= ~0xffc00000;
+ tmp[1] |= addr << 22;
+ tmp[2] &= ~0x0001ffff;
+ tmp[2] |= addr >> 10;
+ break;
+ case 3:
+ tmp[2] &= ~0xfffe0000;
+ tmp[2] |= addr << 17;
+ tmp[3] &= ~0x00000fff;
+ tmp[3] |= addr >> 15;
+ break;
}
}
+
+ tmp[3] |= 0x40000000;
+
+ nv_wo32(pgt, base + 0x0, tmp[0]);
+ nv_wo32(pgt, base + 0x4, tmp[1]);
+ nv_wo32(pgt, base + 0x8, tmp[2]);
+ nv_wo32(pgt, base + 0xc, tmp[3]);
}
static int
-nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+ struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
+ dma_addr_t *list = nvbe->pages;
+ u32 pte = mem->start << 2, tmp[4];
+ u32 cnt = nvbe->nr_pages;
+ int i;
nvbe->offset = mem->start << PAGE_SHIFT;
- nouveau_vm_map_sg(&dev_priv->gart_info.vma, nvbe->offset,
- nvbe->nr_pages << PAGE_SHIFT, nvbe->pages);
+ if (pte & 0x0000000c) {
+ u32 max = 4 - ((pte >> 2) & 0x3);
+ u32 part = (cnt > max) ? max : cnt;
+ nv44_sgdma_fill(pgt, list, pte, part);
+ pte += (part << 2);
+ list += part;
+ cnt -= part;
+ }
+
+ while (cnt >= 4) {
+ for (i = 0; i < 4; i++)
+ tmp[i] = *list++ >> 12;
+ nv_wo32(pgt, pte + 0x0, tmp[0] >> 0 | tmp[1] << 27);
+ nv_wo32(pgt, pte + 0x4, tmp[1] >> 5 | tmp[2] << 22);
+ nv_wo32(pgt, pte + 0x8, tmp[2] >> 10 | tmp[3] << 17);
+ nv_wo32(pgt, pte + 0xc, tmp[3] >> 15 | 0x40000000);
+ pte += 0x10;
+ cnt -= 4;
+ }
+
+ if (cnt)
+ nv44_sgdma_fill(pgt, list, pte, cnt);
+
+ nv44_sgdma_flush(nvbe);
nvbe->bound = true;
return 0;
}
static int
-nv50_sgdma_unbind(struct ttm_backend *be)
+nv44_sgdma_unbind(struct ttm_backend *be)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+ struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
+ u32 pte = (nvbe->offset >> 12) << 2;
+ u32 cnt = nvbe->nr_pages;
+
+ if (pte & 0x0000000c) {
+ u32 max = 4 - ((pte >> 2) & 0x3);
+ u32 part = (cnt > max) ? max : cnt;
+ nv44_sgdma_fill(pgt, NULL, pte, part);
+ pte += (part << 2);
+ cnt -= part;
+ }
- if (!nvbe->bound)
- return 0;
+ while (cnt >= 4) {
+ nv_wo32(pgt, pte + 0x0, 0x00000000);
+ nv_wo32(pgt, pte + 0x4, 0x00000000);
+ nv_wo32(pgt, pte + 0x8, 0x00000000);
+ nv_wo32(pgt, pte + 0xc, 0x00000000);
+ pte += 0x10;
+ cnt -= 4;
+ }
+
+ if (cnt)
+ nv44_sgdma_fill(pgt, NULL, pte, cnt);
- nouveau_vm_unmap_at(&dev_priv->gart_info.vma, nvbe->offset,
- nvbe->nr_pages << PAGE_SHIFT);
+ nv44_sgdma_flush(nvbe);
nvbe->bound = false;
return 0;
}
-static struct ttm_backend_func nouveau_sgdma_backend = {
+static struct ttm_backend_func nv44_sgdma_backend = {
.populate = nouveau_sgdma_populate,
.clear = nouveau_sgdma_clear,
- .bind = nouveau_sgdma_bind,
- .unbind = nouveau_sgdma_unbind,
+ .bind = nv44_sgdma_bind,
+ .unbind = nv44_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
};
+static int
+nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_mem *node = mem->mm_node;
+ /* noop: bound in move_notify() */
+ node->pages = nvbe->pages;
+ nvbe->pages = (dma_addr_t *)node;
+ nvbe->bound = true;
+ return 0;
+}
+
+static int
+nv50_sgdma_unbind(struct ttm_backend *be)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_mem *node = (struct nouveau_mem *)nvbe->pages;
+ /* noop: unbound in move_notify() */
+ nvbe->pages = node->pages;
+ node->pages = NULL;
+ nvbe->bound = false;
+ return 0;
+}
+
static struct ttm_backend_func nv50_sgdma_backend = {
.populate = nouveau_sgdma_populate,
.clear = nouveau_sgdma_clear,
@@ -198,10 +415,7 @@ nouveau_sgdma_init_ttm(struct drm_device *dev)
nvbe->dev = dev;
- if (dev_priv->card_type < NV_50)
- nvbe->backend.func = &nouveau_sgdma_backend;
- else
- nvbe->backend.func = &nv50_sgdma_backend;
+ nvbe->backend.func = dev_priv->gart_info.func;
return &nvbe->backend;
}
@@ -210,21 +424,65 @@ nouveau_sgdma_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = NULL;
- uint32_t aper_size, obj_size;
- int i, ret;
+ u32 aper_size, align;
+ int ret;
- if (dev_priv->card_type < NV_50) {
- if(dev_priv->ramin_rsvd_vram < 2 * 1024 * 1024)
- aper_size = 64 * 1024 * 1024;
- else
- aper_size = 512 * 1024 * 1024;
+ if (dev_priv->card_type >= NV_50 ||
+ dev_priv->ramin_rsvd_vram >= 2 * 1024 * 1024)
+ aper_size = 512 * 1024 * 1024;
+ else
+ aper_size = 64 * 1024 * 1024;
+
+ /* Dear NVIDIA, NV44+ would like proper present bits in PTEs for
+ * christmas. The cards before it have them, the cards after
+ * it have them, why is NV44 so unloved?
+ */
+ dev_priv->gart_info.dummy.page = alloc_page(GFP_DMA32 | GFP_KERNEL);
+ if (!dev_priv->gart_info.dummy.page)
+ return -ENOMEM;
+
+ dev_priv->gart_info.dummy.addr =
+ pci_map_page(dev->pdev, dev_priv->gart_info.dummy.page,
+ 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(dev->pdev, dev_priv->gart_info.dummy.addr)) {
+ NV_ERROR(dev, "error mapping dummy page\n");
+ __free_page(dev_priv->gart_info.dummy.page);
+ dev_priv->gart_info.dummy.page = NULL;
+ return -ENOMEM;
+ }
+
+ if (dev_priv->card_type >= NV_50) {
+ dev_priv->gart_info.aper_base = 0;
+ dev_priv->gart_info.aper_size = aper_size;
+ dev_priv->gart_info.type = NOUVEAU_GART_HW;
+ dev_priv->gart_info.func = &nv50_sgdma_backend;
+ } else
+ if (drm_pci_device_is_pcie(dev) &&
+ dev_priv->chipset != 0x40 && dev_priv->chipset != 0x45) {
+ if (nv44_graph_class(dev)) {
+ dev_priv->gart_info.func = &nv44_sgdma_backend;
+ align = 512 * 1024;
+ } else {
+ dev_priv->gart_info.func = &nv41_sgdma_backend;
+ align = 16;
+ }
- obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
- obj_size += 8; /* ctxdma header */
+ ret = nouveau_gpuobj_new(dev, NULL, aper_size / 1024, align,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &gpuobj);
+ if (ret) {
+ NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
+ return ret;
+ }
- ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
- NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &gpuobj);
+ dev_priv->gart_info.sg_ctxdma = gpuobj;
+ dev_priv->gart_info.aper_base = 0;
+ dev_priv->gart_info.aper_size = aper_size;
+ dev_priv->gart_info.type = NOUVEAU_GART_HW;
+ } else {
+ ret = nouveau_gpuobj_new(dev, NULL, (aper_size / 1024) + 8, 16,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &gpuobj);
if (ret) {
NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
return ret;
@@ -236,25 +494,14 @@ nouveau_sgdma_init(struct drm_device *dev)
(0 << 14) /* RW */ |
(2 << 16) /* PCI */);
nv_wo32(gpuobj, 4, aper_size - 1);
- for (i = 2; i < 2 + (aper_size >> 12); i++)
- nv_wo32(gpuobj, i * 4, 0x00000000);
dev_priv->gart_info.sg_ctxdma = gpuobj;
dev_priv->gart_info.aper_base = 0;
dev_priv->gart_info.aper_size = aper_size;
- } else
- if (dev_priv->chan_vm) {
- ret = nouveau_vm_get(dev_priv->chan_vm, 512 * 1024 * 1024,
- 12, NV_MEM_ACCESS_RW,
- &dev_priv->gart_info.vma);
- if (ret)
- return ret;
-
- dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset;
- dev_priv->gart_info.aper_size = 512 * 1024 * 1024;
+ dev_priv->gart_info.type = NOUVEAU_GART_PDMA;
+ dev_priv->gart_info.func = &nv04_sgdma_backend;
}
- dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
return 0;
}
@@ -264,7 +511,13 @@ nouveau_sgdma_takedown(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
- nouveau_vm_put(&dev_priv->gart_info.vma);
+
+ if (dev_priv->gart_info.dummy.page) {
+ pci_unmap_page(dev->pdev, dev_priv->gart_info.dummy.addr,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ __free_page(dev_priv->gart_info.dummy.page);
+ dev_priv->gart_info.dummy.page = NULL;
+ }
}
uint32_t
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index a54fc431fe98..05294910e135 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -544,7 +544,6 @@ static int
nouveau_card_init_channel(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *gpuobj = NULL;
int ret;
ret = nouveau_channel_alloc(dev, &dev_priv->channel,
@@ -552,41 +551,8 @@ nouveau_card_init_channel(struct drm_device *dev)
if (ret)
return ret;
- /* no dma objects on fermi... */
- if (dev_priv->card_type >= NV_C0)
- goto out_done;
-
- ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
- 0, dev_priv->vram_size,
- NV_MEM_ACCESS_RW, NV_MEM_TARGET_VRAM,
- &gpuobj);
- if (ret)
- goto out_err;
-
- ret = nouveau_ramht_insert(dev_priv->channel, NvDmaVRAM, gpuobj);
- nouveau_gpuobj_ref(NULL, &gpuobj);
- if (ret)
- goto out_err;
-
- ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
- 0, dev_priv->gart_info.aper_size,
- NV_MEM_ACCESS_RW, NV_MEM_TARGET_GART,
- &gpuobj);
- if (ret)
- goto out_err;
-
- ret = nouveau_ramht_insert(dev_priv->channel, NvDmaGART, gpuobj);
- nouveau_gpuobj_ref(NULL, &gpuobj);
- if (ret)
- goto out_err;
-
-out_done:
mutex_unlock(&dev_priv->channel->mutex);
return 0;
-
-out_err:
- nouveau_channel_put(&dev_priv->channel);
- return ret;
}
static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
@@ -929,12 +895,6 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n",
dev->pci_vendor, dev->pci_device, dev->pdev->class);
- dev_priv->wq = create_workqueue("nouveau");
- if (!dev_priv->wq) {
- ret = -EINVAL;
- goto err_priv;
- }
-
/* resource 0 is mmio regs */
/* resource 1 is linear FB */
/* resource 2 is RAMIN (mmio regs + 0x1000000) */
@@ -947,7 +907,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
NV_ERROR(dev, "Unable to initialize the mmio mapping. "
"Please report your setup to " DRIVER_EMAIL "\n");
ret = -EINVAL;
- goto err_wq;
+ goto err_priv;
}
NV_DEBUG(dev, "regs mapped ok at 0x%llx\n",
(unsigned long long)mmio_start_offs);
@@ -1054,8 +1014,6 @@ err_ramin:
iounmap(dev_priv->ramin);
err_mmio:
iounmap(dev_priv->mmio);
-err_wq:
- destroy_workqueue(dev_priv->wq);
err_priv:
kfree(dev_priv);
dev->dev_private = NULL;
@@ -1103,9 +1061,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
getparam->value = dev->pci_device;
break;
case NOUVEAU_GETPARAM_BUS_TYPE:
- if (drm_device_is_agp(dev))
+ if (drm_pci_device_is_agp(dev))
getparam->value = NV_AGP;
- else if (drm_device_is_pcie(dev))
+ else if (drm_pci_device_is_pcie(dev))
getparam->value = NV_PCIE;
else
getparam->value = NV_PCI;
@@ -1126,7 +1084,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
getparam->value = 1;
break;
case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
- getparam->value = (dev_priv->card_type < NV_50);
+ getparam->value = 1;
break;
case NOUVEAU_GETPARAM_GRAPH_UNITS:
/* NV40 and NV50 versions are quite different, but register
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c
index 8d9968e1cba8..649b0413b09f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_temp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_temp.c
@@ -239,11 +239,9 @@ static bool
probe_monitoring_device(struct nouveau_i2c_chan *i2c,
struct i2c_board_info *info)
{
- char modalias[16] = "i2c:";
struct i2c_client *client;
- strlcat(modalias, info->type, sizeof(modalias));
- request_module(modalias);
+ request_module("%s%s", I2C_MODULE_PREFIX, info->type);
client = i2c_new_device(&i2c->adapter, info);
if (!client)
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
index 97d82aedf86b..62824c80bcb8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -28,7 +28,7 @@
#include "nouveau_vm.h"
void
-nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
+nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
{
struct nouveau_vm *vm = vma->vm;
struct nouveau_mm_node *r;
@@ -40,7 +40,8 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
u32 max = 1 << (vm->pgt_bits - bits);
u32 end, len;
- list_for_each_entry(r, &vram->regions, rl_entry) {
+ delta = 0;
+ list_for_each_entry(r, &node->regions, rl_entry) {
u64 phys = (u64)r->offset << 12;
u32 num = r->length >> bits;
@@ -52,7 +53,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
end = max;
len = end - pte;
- vm->map(vma, pgt, vram, pte, len, phys);
+ vm->map(vma, pgt, node, pte, len, phys, delta);
num -= len;
pte += len;
@@ -60,6 +61,8 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
pde++;
pte = 0;
}
+
+ delta += (u64)len << vma->node->type;
}
}
@@ -67,14 +70,14 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
}
void
-nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_vram *vram)
+nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
{
- nouveau_vm_map_at(vma, 0, vram);
+ nouveau_vm_map_at(vma, 0, node);
}
void
nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
- dma_addr_t *list)
+ struct nouveau_mem *mem, dma_addr_t *list)
{
struct nouveau_vm *vm = vma->vm;
int big = vma->node->type != vm->spg_shift;
@@ -94,7 +97,7 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
end = max;
len = end - pte;
- vm->map_sg(vma, pgt, pte, list, len);
+ vm->map_sg(vma, pgt, mem, pte, len, list);
num -= len;
pte += len;
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
index e1193515771b..2e06b55cfdc1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -67,9 +67,10 @@ struct nouveau_vm {
void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2]);
void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
- struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
+ struct nouveau_mem *, u32 pte, u32 cnt,
+ u64 phys, u64 delta);
void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
- u32 pte, dma_addr_t *, u32 cnt);
+ struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
void (*flush)(struct nouveau_vm *);
};
@@ -82,20 +83,20 @@ int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **,
int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift,
u32 access, struct nouveau_vma *);
void nouveau_vm_put(struct nouveau_vma *);
-void nouveau_vm_map(struct nouveau_vma *, struct nouveau_vram *);
-void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_vram *);
+void nouveau_vm_map(struct nouveau_vma *, struct nouveau_mem *);
+void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *);
void nouveau_vm_unmap(struct nouveau_vma *);
void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
- dma_addr_t *);
+ struct nouveau_mem *, dma_addr_t *);
/* nv50_vm.c */
void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2]);
void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
- struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
+ struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta);
void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
- u32 pte, dma_addr_t *, u32 cnt);
+ struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
void nv50_vm_flush(struct nouveau_vm *);
void nv50_vm_flush_engine(struct drm_device *, int engine);
@@ -104,9 +105,9 @@ void nv50_vm_flush_engine(struct drm_device *, int engine);
void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2]);
void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
- struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
+ struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta);
void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
- u32 pte, dma_addr_t *, u32 cnt);
+ struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
void nvc0_vm_flush(struct nouveau_vm *);
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 297505eb98d5..a260fbbe3d9b 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -1031,7 +1031,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, false, true, &nv_crtc->cursor.nvbo);
+ 0, 0x0000, &nv_crtc->cursor.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
if (!ret)
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index f89d104698df..dfa600c46186 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -379,6 +379,15 @@ out:
return handled;
}
+static const char *nv_dma_state_err(u32 state)
+{
+ static const char * const desc[] = {
+ "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
+ "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
+ };
+ return desc[(state >> 29) & 0x7];
+}
+
void
nv04_fifo_isr(struct drm_device *dev)
{
@@ -460,9 +469,10 @@ nv04_fifo_isr(struct drm_device *dev)
if (nouveau_ratelimit())
NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
"Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
- "State 0x%08x Push 0x%08x\n",
+ "State 0x%08x (err: %s) Push 0x%08x\n",
chid, ho_get, dma_get, ho_put,
dma_put, ib_get, ib_put, state,
+ nv_dma_state_err(state),
push);
/* METHOD_COUNT, in DMA_STATE on earlier chipsets */
@@ -476,8 +486,9 @@ nv04_fifo_isr(struct drm_device *dev)
}
} else {
NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
- "Put 0x%08x State 0x%08x Push 0x%08x\n",
- chid, dma_get, dma_put, state, push);
+ "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
+ chid, dma_get, dma_put, state,
+ nv_dma_state_err(state), push);
if (dma_get != dma_put)
nv_wr32(dev, 0x003244, dma_put);
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 28119fd19d03..3900cebba560 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -197,10 +197,12 @@ static int nv17_tv_get_ld_modes(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
- struct drm_display_mode *mode, *tv_mode;
+ const struct drm_display_mode *tv_mode;
int n = 0;
for (tv_mode = nv17_tv_modes; tv_mode->hdisplay; tv_mode++) {
+ struct drm_display_mode *mode;
+
mode = drm_mode_duplicate(encoder->dev, tv_mode);
mode->clock = tv_norm->tv_enc_mode.vrefresh *
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.h b/drivers/gpu/drm/nouveau/nv17_tv.h
index 6bf03840f9eb..622e72221682 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.h
+++ b/drivers/gpu/drm/nouveau/nv17_tv.h
@@ -112,7 +112,7 @@ extern struct nv17_tv_norm_params {
} nv17_tv_norms[NUM_TV_NORMS];
#define get_tv_norm(enc) (&nv17_tv_norms[to_tv_enc(enc)->tv_norm])
-extern struct drm_display_mode nv17_tv_modes[];
+extern const struct drm_display_mode nv17_tv_modes[];
static inline int interpolate(int y0, int y1, int y2, int x)
{
diff --git a/drivers/gpu/drm/nouveau/nv17_tv_modes.c b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
index 9d3893c50a41..4d1d29f60307 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv_modes.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
@@ -438,7 +438,7 @@ void nv17_tv_state_load(struct drm_device *dev, struct nv17_tv_state *state)
/* Timings similar to the ones the blob sets */
-struct drm_display_mode nv17_tv_modes[] = {
+const struct drm_display_mode nv17_tv_modes[] = {
{ DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 0,
320, 344, 392, 560, 0, 200, 200, 202, 220, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
index f3d9c0505f7b..f0ac2a768c67 100644
--- a/drivers/gpu/drm/nouveau/nv40_fb.c
+++ b/drivers/gpu/drm/nouveau/nv40_fb.c
@@ -24,6 +24,53 @@ nv40_fb_set_tile_region(struct drm_device *dev, int i)
}
}
+static void
+nv40_fb_init_gart(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
+
+ if (dev_priv->gart_info.type != NOUVEAU_GART_HW) {
+ nv_wr32(dev, 0x100800, 0x00000001);
+ return;
+ }
+
+ nv_wr32(dev, 0x100800, gart->pinst | 0x00000002);
+ nv_mask(dev, 0x10008c, 0x00000100, 0x00000100);
+ nv_wr32(dev, 0x100820, 0x00000000);
+}
+
+static void
+nv44_fb_init_gart(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
+ u32 vinst;
+
+ if (dev_priv->gart_info.type != NOUVEAU_GART_HW) {
+ nv_wr32(dev, 0x100850, 0x80000000);
+ nv_wr32(dev, 0x100800, 0x00000001);
+ return;
+ }
+
+ /* calculate vram address of this PRAMIN block, object
+ * must be allocated on 512KiB alignment, and not exceed
+ * a total size of 512KiB for this to work correctly
+ */
+ vinst = nv_rd32(dev, 0x10020c);
+ vinst -= ((gart->pinst >> 19) + 1) << 19;
+
+ nv_wr32(dev, 0x100850, 0x80000000);
+ nv_wr32(dev, 0x100818, dev_priv->gart_info.dummy.addr);
+
+ nv_wr32(dev, 0x100804, dev_priv->gart_info.aper_size);
+ nv_wr32(dev, 0x100850, 0x00008000);
+ nv_mask(dev, 0x10008c, 0x00000200, 0x00000200);
+ nv_wr32(dev, 0x100820, 0x00000000);
+ nv_wr32(dev, 0x10082c, 0x00000001);
+ nv_wr32(dev, 0x100800, vinst | 0x00000010);
+}
+
int
nv40_fb_init(struct drm_device *dev)
{
@@ -32,12 +79,12 @@ nv40_fb_init(struct drm_device *dev)
uint32_t tmp;
int i;
- /* This is strictly a NV4x register (don't know about NV5x). */
- /* The blob sets these to all kinds of values, and they mess up our setup. */
- /* I got value 0x52802 instead. For some cards the blob even sets it back to 0x1. */
- /* Note: the blob doesn't read this value, so i'm pretty sure this is safe for all cards. */
- /* Any idea what this is? */
- nv_wr32(dev, NV40_PFB_UNK_800, 0x1);
+ if (dev_priv->chipset != 0x40 && dev_priv->chipset != 0x45) {
+ if (nv44_graph_class(dev))
+ nv44_fb_init_gart(dev);
+ else
+ nv40_fb_init_gart(dev);
+ }
switch (dev_priv->chipset) {
case 0x40:
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 9023c4dbb449..2b9984027f41 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -65,7 +65,7 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
{
struct drm_device *dev = nv_crtc->base.dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
int index = nv_crtc->index, ret;
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
@@ -135,8 +135,7 @@ static int
nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
{
struct drm_device *dev = nv_crtc->base.dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
int ret;
NV_DEBUG_KMS(dev, "\n");
@@ -186,8 +185,7 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update)
struct nouveau_connector *nv_connector =
nouveau_crtc_connector_get(nv_crtc);
struct drm_device *dev = nv_crtc->base.dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
struct drm_display_mode *native_mode = NULL;
struct drm_display_mode *mode = &nv_crtc->base.mode;
uint32_t outX, outY, horiz, vert;
@@ -445,6 +443,42 @@ nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
{
}
+static int
+nv50_crtc_wait_complete(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+ struct nv50_display *disp = nv50_display(dev);
+ struct nouveau_channel *evo = disp->master;
+ u64 start;
+ int ret;
+
+ ret = RING_SPACE(evo, 6);
+ if (ret)
+ return ret;
+ BEGIN_RING(evo, 0, 0x0084, 1);
+ OUT_RING (evo, 0x80000000);
+ BEGIN_RING(evo, 0, 0x0080, 1);
+ OUT_RING (evo, 0);
+ BEGIN_RING(evo, 0, 0x0084, 1);
+ OUT_RING (evo, 0x00000000);
+
+ nv_wo32(disp->ntfy, 0x000, 0x00000000);
+ FIRE_RING (evo);
+
+ start = ptimer->read(dev);
+ do {
+ nv_wr32(dev, 0x61002c, 0x370);
+ nv_wr32(dev, 0x000140, 1);
+
+ if (nv_ro32(disp->ntfy, 0x000))
+ return 0;
+ } while (ptimer->read(dev) - start < 2000000000ULL);
+
+ return -EBUSY;
+}
+
static void
nv50_crtc_prepare(struct drm_crtc *crtc)
{
@@ -453,6 +487,7 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
+ nv50_display_flip_stop(crtc);
drm_vblank_pre_modeset(dev, nv_crtc->index);
nv50_crtc_blank(nv_crtc, true);
}
@@ -461,24 +496,14 @@ static void
nv50_crtc_commit(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- int ret;
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
nv50_crtc_blank(nv_crtc, false);
drm_vblank_post_modeset(dev, nv_crtc->index);
-
- ret = RING_SPACE(evo, 2);
- if (ret) {
- NV_ERROR(dev, "no space while committing crtc\n");
- return;
- }
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING (evo, 0);
- FIRE_RING (evo);
+ nv50_crtc_wait_complete(crtc);
+ nv50_display_flip_next(crtc, crtc->fb, NULL);
}
static bool
@@ -491,15 +516,15 @@ nv50_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
static int
nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
struct drm_framebuffer *passed_fb,
- int x, int y, bool update, bool atomic)
+ int x, int y, bool atomic)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct drm_device *dev = nv_crtc->base.dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
struct drm_framebuffer *drm_fb = nv_crtc->base.fb;
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
- int ret, format;
+ int ret;
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
@@ -525,28 +550,6 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
}
}
- switch (drm_fb->depth) {
- case 8:
- format = NV50_EVO_CRTC_FB_DEPTH_8;
- break;
- case 15:
- format = NV50_EVO_CRTC_FB_DEPTH_15;
- break;
- case 16:
- format = NV50_EVO_CRTC_FB_DEPTH_16;
- break;
- case 24:
- case 32:
- format = NV50_EVO_CRTC_FB_DEPTH_24;
- break;
- case 30:
- format = NV50_EVO_CRTC_FB_DEPTH_30;
- break;
- default:
- NV_ERROR(dev, "unknown depth %d\n", drm_fb->depth);
- return -EINVAL;
- }
-
nv_crtc->fb.offset = fb->nvbo->bo.mem.start << PAGE_SHIFT;
nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
@@ -556,14 +559,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
return ret;
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
- if (nv_crtc->fb.tile_flags == 0x7a00 ||
- nv_crtc->fb.tile_flags == 0xfe00)
- OUT_RING(evo, NvEvoFB32);
- else
- if (nv_crtc->fb.tile_flags == 0x7000)
- OUT_RING(evo, NvEvoFB16);
- else
- OUT_RING(evo, NvEvoVRAM_LP);
+ OUT_RING (evo, fb->r_dma);
}
ret = RING_SPACE(evo, 12);
@@ -571,45 +567,26 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
return ret;
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5);
- OUT_RING(evo, nv_crtc->fb.offset >> 8);
- OUT_RING(evo, 0);
- OUT_RING(evo, (drm_fb->height << 16) | drm_fb->width);
- if (!nv_crtc->fb.tile_flags) {
- OUT_RING(evo, drm_fb->pitch | (1 << 20));
- } else {
- u32 tile_mode = fb->nvbo->tile_mode;
- if (dev_priv->card_type >= NV_C0)
- tile_mode >>= 4;
- OUT_RING(evo, ((drm_fb->pitch / 4) << 4) | tile_mode);
- }
- if (dev_priv->chipset == 0x50)
- OUT_RING(evo, (nv_crtc->fb.tile_flags << 8) | format);
- else
- OUT_RING(evo, format);
+ OUT_RING (evo, nv_crtc->fb.offset >> 8);
+ OUT_RING (evo, 0);
+ OUT_RING (evo, (drm_fb->height << 16) | drm_fb->width);
+ OUT_RING (evo, fb->r_pitch);
+ OUT_RING (evo, fb->r_format);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1);
- OUT_RING(evo, fb->base.depth == 8 ?
- NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
+ OUT_RING (evo, fb->base.depth == 8 ?
+ NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
- OUT_RING(evo, NV50_EVO_CRTC_COLOR_CTRL_COLOR);
+ OUT_RING (evo, NV50_EVO_CRTC_COLOR_CTRL_COLOR);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1);
- OUT_RING(evo, (y << 16) | x);
+ OUT_RING (evo, (y << 16) | x);
if (nv_crtc->lut.depth != fb->base.depth) {
nv_crtc->lut.depth = fb->base.depth;
nv50_crtc_lut_load(crtc);
}
- if (update) {
- ret = RING_SPACE(evo, 2);
- if (ret)
- return ret;
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING(evo, 0);
- FIRE_RING(evo);
- }
-
return 0;
}
@@ -619,8 +596,7 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nouveau_connector *nv_connector = NULL;
uint32_t hsync_dur, vsync_dur, hsync_start_to_end, vsync_start_to_end;
@@ -700,14 +676,25 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering, false);
nv_crtc->set_scale(nv_crtc, nv_connector->scaling_mode, false);
- return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false, false);
+ return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
}
static int
nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
- return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, true, false);
+ int ret;
+
+ nv50_display_flip_stop(crtc);
+ ret = nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
+ if (ret)
+ return ret;
+
+ ret = nv50_crtc_wait_complete(crtc);
+ if (ret)
+ return ret;
+
+ return nv50_display_flip_next(crtc, crtc->fb, NULL);
}
static int
@@ -715,7 +702,14 @@ nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y, enum mode_set_atomic state)
{
- return nv50_crtc_do_mode_set_base(crtc, fb, x, y, true, true);
+ int ret;
+
+ nv50_display_flip_stop(crtc);
+ ret = nv50_crtc_do_mode_set_base(crtc, fb, x, y, true);
+ if (ret)
+ return ret;
+
+ return nv50_crtc_wait_complete(crtc);
}
static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
@@ -758,7 +752,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
nv_crtc->lut.depth = 0;
ret = nouveau_bo_new(dev, NULL, 4096, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, false, true, &nv_crtc->lut.nvbo);
+ 0, 0x0000, &nv_crtc->lut.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
if (!ret)
@@ -784,7 +778,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, false, true, &nv_crtc->cursor.nvbo);
+ 0, 0x0000, &nv_crtc->cursor.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
if (!ret)
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
index 1b9ce3021aa3..9752c35bb84b 100644
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
@@ -36,9 +36,9 @@
static void
nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
{
- struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
struct drm_device *dev = nv_crtc->base.dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
int ret;
NV_DEBUG_KMS(dev, "\n");
@@ -71,9 +71,9 @@ nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
static void
nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
{
- struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
struct drm_device *dev = nv_crtc->base.dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
int ret;
NV_DEBUG_KMS(dev, "\n");
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index 875414b09ade..808f3ec8f827 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -41,8 +41,7 @@ nv50_dac_disconnect(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
int ret;
if (!nv_encoder->crtc)
@@ -216,8 +215,7 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
uint32_t mode_ctl = 0, mode_ctl2 = 0;
int ret;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 7cc94ed9ed95..75a376cc342a 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -24,6 +24,7 @@
*
*/
+#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
#include "nv50_display.h"
#include "nouveau_crtc.h"
#include "nouveau_encoder.h"
@@ -34,6 +35,7 @@
#include "drm_crtc_helper.h"
static void nv50_display_isr(struct drm_device *);
+static void nv50_display_bh(unsigned long);
static inline int
nv50_sor_nr(struct drm_device *dev)
@@ -172,16 +174,16 @@ nv50_display_init(struct drm_device *dev)
ret = nv50_evo_init(dev);
if (ret)
return ret;
- evo = dev_priv->evo;
+ evo = nv50_display(dev)->master;
nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
- ret = RING_SPACE(evo, 11);
+ ret = RING_SPACE(evo, 15);
if (ret)
return ret;
BEGIN_RING(evo, 0, NV50_EVO_UNK84, 2);
OUT_RING(evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
- OUT_RING(evo, NV50_EVO_DMA_NOTIFY_HANDLE_NONE);
+ OUT_RING(evo, NvEvoSync);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, FB_DMA), 1);
OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK0800), 1);
@@ -190,6 +192,11 @@ nv50_display_init(struct drm_device *dev)
OUT_RING(evo, 0);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK082C), 1);
OUT_RING(evo, 0);
+ /* required to make display sync channels not hate life */
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK900), 1);
+ OUT_RING (evo, 0x00000311);
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(1, UNK900), 1);
+ OUT_RING (evo, 0x00000311);
FIRE_RING(evo);
if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2))
NV_ERROR(dev, "evo pushbuf stalled\n");
@@ -201,6 +208,8 @@ nv50_display_init(struct drm_device *dev)
static int nv50_display_disable(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_display *disp = nv50_display(dev);
+ struct nouveau_channel *evo = disp->master;
struct drm_crtc *drm_crtc;
int ret, i;
@@ -212,12 +221,12 @@ static int nv50_display_disable(struct drm_device *dev)
nv50_crtc_blank(crtc, true);
}
- ret = RING_SPACE(dev_priv->evo, 2);
+ ret = RING_SPACE(evo, 2);
if (ret == 0) {
- BEGIN_RING(dev_priv->evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING(dev_priv->evo, 0);
+ BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ OUT_RING(evo, 0);
}
- FIRE_RING(dev_priv->evo);
+ FIRE_RING(evo);
/* Almost like ack'ing a vblank interrupt, maybe in the spirit of
* cleaning up?
@@ -267,10 +276,16 @@ int nv50_display_create(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct dcb_table *dcb = &dev_priv->vbios.dcb;
struct drm_connector *connector, *ct;
+ struct nv50_display *priv;
int ret, i;
NV_DEBUG_KMS(dev, "\n");
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ dev_priv->engine.display.priv = priv;
+
/* init basic kernel modesetting */
drm_mode_config_init(dev);
@@ -330,7 +345,7 @@ int nv50_display_create(struct drm_device *dev)
}
}
- INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
+ tasklet_init(&priv->tasklet, nv50_display_bh, (unsigned long)dev);
nouveau_irq_register(dev, 26, nv50_display_isr);
ret = nv50_display_init(dev);
@@ -345,12 +360,131 @@ int nv50_display_create(struct drm_device *dev)
void
nv50_display_destroy(struct drm_device *dev)
{
+ struct nv50_display *disp = nv50_display(dev);
+
NV_DEBUG_KMS(dev, "\n");
drm_mode_config_cleanup(dev);
nv50_display_disable(dev);
nouveau_irq_unregister(dev, 26);
+ kfree(disp);
+}
+
+void
+nv50_display_flip_stop(struct drm_crtc *crtc)
+{
+ struct nv50_display *disp = nv50_display(crtc->dev);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index];
+ struct nouveau_channel *evo = dispc->sync;
+ int ret;
+
+ ret = RING_SPACE(evo, 8);
+ if (ret) {
+ WARN_ON(1);
+ return;
+ }
+
+ BEGIN_RING(evo, 0, 0x0084, 1);
+ OUT_RING (evo, 0x00000000);
+ BEGIN_RING(evo, 0, 0x0094, 1);
+ OUT_RING (evo, 0x00000000);
+ BEGIN_RING(evo, 0, 0x00c0, 1);
+ OUT_RING (evo, 0x00000000);
+ BEGIN_RING(evo, 0, 0x0080, 1);
+ OUT_RING (evo, 0x00000000);
+ FIRE_RING (evo);
+}
+
+int
+nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ struct nouveau_channel *chan)
+{
+ struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
+ struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
+ struct nv50_display *disp = nv50_display(crtc->dev);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index];
+ struct nouveau_channel *evo = dispc->sync;
+ int ret;
+
+ ret = RING_SPACE(evo, 24);
+ if (unlikely(ret))
+ return ret;
+
+ /* synchronise with the rendering channel, if necessary */
+ if (likely(chan)) {
+ u64 offset = dispc->sem.bo->vma.offset + dispc->sem.offset;
+
+ ret = RING_SPACE(chan, 10);
+ if (ret) {
+ WIND_RING(evo);
+ return ret;
+ }
+
+ if (dev_priv->chipset < 0xc0) {
+ BEGIN_RING(chan, NvSubSw, 0x0060, 2);
+ OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
+ OUT_RING (chan, dispc->sem.offset);
+ BEGIN_RING(chan, NvSubSw, 0x006c, 1);
+ OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
+ BEGIN_RING(chan, NvSubSw, 0x0064, 2);
+ OUT_RING (chan, dispc->sem.offset ^ 0x10);
+ OUT_RING (chan, 0x74b1e000);
+ BEGIN_RING(chan, NvSubSw, 0x0060, 1);
+ if (dev_priv->chipset < 0x84)
+ OUT_RING (chan, NvSema);
+ else
+ OUT_RING (chan, chan->vram_handle);
+ } else {
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset));
+ OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
+ OUT_RING (chan, 0x1002);
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset ^ 0x10));
+ OUT_RING (chan, 0x74b1e000);
+ OUT_RING (chan, 0x1001);
+ }
+ FIRE_RING (chan);
+ } else {
+ nouveau_bo_wr32(dispc->sem.bo, dispc->sem.offset / 4,
+ 0xf00d0000 | dispc->sem.value);
+ }
+
+ /* queue the flip on the crtc's "display sync" channel */
+ BEGIN_RING(evo, 0, 0x0100, 1);
+ OUT_RING (evo, 0xfffe0000);
+ BEGIN_RING(evo, 0, 0x0084, 5);
+ OUT_RING (evo, chan ? 0x00000100 : 0x00000010);
+ OUT_RING (evo, dispc->sem.offset);
+ OUT_RING (evo, 0xf00d0000 | dispc->sem.value);
+ OUT_RING (evo, 0x74b1e000);
+ OUT_RING (evo, NvEvoSync);
+ BEGIN_RING(evo, 0, 0x00a0, 2);
+ OUT_RING (evo, 0x00000000);
+ OUT_RING (evo, 0x00000000);
+ BEGIN_RING(evo, 0, 0x00c0, 1);
+ OUT_RING (evo, nv_fb->r_dma);
+ BEGIN_RING(evo, 0, 0x0110, 2);
+ OUT_RING (evo, 0x00000000);
+ OUT_RING (evo, 0x00000000);
+ BEGIN_RING(evo, 0, 0x0800, 5);
+ OUT_RING (evo, (nv_fb->nvbo->bo.mem.start << PAGE_SHIFT) >> 8);
+ OUT_RING (evo, 0);
+ OUT_RING (evo, (fb->height << 16) | fb->width);
+ OUT_RING (evo, nv_fb->r_pitch);
+ OUT_RING (evo, nv_fb->r_format);
+ BEGIN_RING(evo, 0, 0x0080, 1);
+ OUT_RING (evo, 0x00000000);
+ FIRE_RING (evo);
+
+ dispc->sem.offset ^= 0x10;
+ dispc->sem.value++;
+ return 0;
}
static u16
@@ -466,11 +600,12 @@ static void
nv50_display_unk10_handler(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_display *disp = nv50_display(dev);
u32 unk30 = nv_rd32(dev, 0x610030), mc;
int i, crtc, or, type = OUTPUT_ANY;
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
- dev_priv->evo_irq.dcb = NULL;
+ disp->irq.dcb = NULL;
nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) & ~8);
@@ -541,7 +676,7 @@ nv50_display_unk10_handler(struct drm_device *dev)
if (dcb->type == type && (dcb->or & (1 << or))) {
nouveau_bios_run_display_table(dev, dcb, 0, -1);
- dev_priv->evo_irq.dcb = dcb;
+ disp->irq.dcb = dcb;
goto ack;
}
}
@@ -587,15 +722,16 @@ static void
nv50_display_unk20_handler(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc;
+ struct nv50_display *disp = nv50_display(dev);
+ u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc = 0;
struct dcb_entry *dcb;
int i, crtc, or, type = OUTPUT_ANY;
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
- dcb = dev_priv->evo_irq.dcb;
+ dcb = disp->irq.dcb;
if (dcb) {
nouveau_bios_run_display_table(dev, dcb, 0, -2);
- dev_priv->evo_irq.dcb = NULL;
+ disp->irq.dcb = NULL;
}
/* CRTC clock change requested? */
@@ -692,9 +828,9 @@ nv50_display_unk20_handler(struct drm_device *dev)
nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0);
}
- dev_priv->evo_irq.dcb = dcb;
- dev_priv->evo_irq.pclk = pclk;
- dev_priv->evo_irq.script = script;
+ disp->irq.dcb = dcb;
+ disp->irq.pclk = pclk;
+ disp->irq.script = script;
ack:
nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20);
@@ -735,13 +871,13 @@ nv50_display_unk40_dp_set_tmds(struct drm_device *dev, struct dcb_entry *dcb)
static void
nv50_display_unk40_handler(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct dcb_entry *dcb = dev_priv->evo_irq.dcb;
- u16 script = dev_priv->evo_irq.script;
- u32 unk30 = nv_rd32(dev, 0x610030), pclk = dev_priv->evo_irq.pclk;
+ struct nv50_display *disp = nv50_display(dev);
+ struct dcb_entry *dcb = disp->irq.dcb;
+ u16 script = disp->irq.script;
+ u32 unk30 = nv_rd32(dev, 0x610030), pclk = disp->irq.pclk;
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
- dev_priv->evo_irq.dcb = NULL;
+ disp->irq.dcb = NULL;
if (!dcb)
goto ack;
@@ -754,12 +890,10 @@ ack:
nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) | 8);
}
-void
-nv50_display_irq_handler_bh(struct work_struct *work)
+static void
+nv50_display_bh(unsigned long data)
{
- struct drm_nouveau_private *dev_priv =
- container_of(work, struct drm_nouveau_private, irq_work);
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = (struct drm_device *)data;
for (;;) {
uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
@@ -807,7 +941,7 @@ nv50_display_error_handler(struct drm_device *dev)
static void
nv50_display_isr(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_display *disp = nv50_display(dev);
uint32_t delayed = 0;
while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
@@ -835,8 +969,7 @@ nv50_display_isr(struct drm_device *dev)
NV50_PDISPLAY_INTR_1_CLK_UNK40));
if (clock) {
nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
- if (!work_pending(&dev_priv->irq_work))
- queue_work(dev_priv->wq, &dev_priv->irq_work);
+ tasklet_schedule(&disp->tasklet);
delayed |= clock;
intr1 &= ~clock;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index f0e30b78ef6b..c2da503a22aa 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -35,7 +35,36 @@
#include "nouveau_crtc.h"
#include "nv50_evo.h"
-void nv50_display_irq_handler_bh(struct work_struct *work);
+struct nv50_display_crtc {
+ struct nouveau_channel *sync;
+ struct {
+ struct nouveau_bo *bo;
+ u32 offset;
+ u16 value;
+ } sem;
+};
+
+struct nv50_display {
+ struct nouveau_channel *master;
+ struct nouveau_gpuobj *ntfy;
+
+ struct nv50_display_crtc crtc[2];
+
+ struct tasklet_struct tasklet;
+ struct {
+ struct dcb_entry *dcb;
+ u16 script;
+ u32 pclk;
+ } irq;
+};
+
+static inline struct nv50_display *
+nv50_display(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ return dev_priv->engine.display.priv;
+}
+
int nv50_display_early_init(struct drm_device *dev);
void nv50_display_late_takedown(struct drm_device *dev);
int nv50_display_create(struct drm_device *dev);
@@ -44,4 +73,15 @@ void nv50_display_destroy(struct drm_device *dev);
int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
+int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
+ struct nouveau_channel *chan);
+void nv50_display_flip_stop(struct drm_crtc *);
+
+int nv50_evo_init(struct drm_device *dev);
+void nv50_evo_fini(struct drm_device *dev);
+void nv50_evo_dmaobj_init(struct nouveau_gpuobj *, u32 memtype, u64 base,
+ u64 size);
+int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 handle, u32 memtype,
+ u64 base, u64 size, struct nouveau_gpuobj **);
+
#endif /* __NV50_DISPLAY_H__ */
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
index 0ea090f4244a..a2cfaa691e9b 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -27,20 +27,17 @@
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_ramht.h"
+#include "nv50_display.h"
static void
nv50_evo_channel_del(struct nouveau_channel **pevo)
{
- struct drm_nouveau_private *dev_priv;
struct nouveau_channel *evo = *pevo;
if (!evo)
return;
*pevo = NULL;
- dev_priv = evo->dev->dev_private;
- dev_priv->evo_alloc &= ~(1 << evo->id);
-
nouveau_gpuobj_channel_takedown(evo);
nouveau_bo_unmap(evo->pushbuf_bo);
nouveau_bo_ref(NULL, &evo->pushbuf_bo);
@@ -51,42 +48,61 @@ nv50_evo_channel_del(struct nouveau_channel **pevo)
kfree(evo);
}
+void
+nv50_evo_dmaobj_init(struct nouveau_gpuobj *obj, u32 memtype, u64 base, u64 size)
+{
+ struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
+ u32 flags5;
+
+ if (dev_priv->chipset < 0xc0) {
+ /* not supported on 0x50, specified in format mthd */
+ if (dev_priv->chipset == 0x50)
+ memtype = 0;
+ flags5 = 0x00010000;
+ } else {
+ if (memtype & 0x80000000)
+ flags5 = 0x00000000; /* large pages */
+ else
+ flags5 = 0x00020000;
+ }
+
+ nv50_gpuobj_dma_init(obj, 0, 0x3d, base, size, NV_MEM_TARGET_VRAM,
+ NV_MEM_ACCESS_RW, (memtype >> 8) & 0xff, 0);
+ nv_wo32(obj, 0x14, flags5);
+ dev_priv->engine.instmem.flush(obj->dev);
+}
+
int
-nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 class, u32 name,
- u32 tile_flags, u32 magic_flags, u32 offset, u32 limit,
- u32 flags5)
+nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype,
+ u64 base, u64 size, struct nouveau_gpuobj **pobj)
{
- struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
- struct drm_device *dev = evo->dev;
+ struct nv50_display *disp = nv50_display(evo->dev);
struct nouveau_gpuobj *obj = NULL;
int ret;
- ret = nouveau_gpuobj_new(dev, dev_priv->evo, 6*4, 32, 0, &obj);
+ ret = nouveau_gpuobj_new(evo->dev, disp->master, 6*4, 32, 0, &obj);
if (ret)
return ret;
obj->engine = NVOBJ_ENGINE_DISPLAY;
- nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
- nv_wo32(obj, 4, limit);
- nv_wo32(obj, 8, offset);
- nv_wo32(obj, 12, 0x00000000);
- nv_wo32(obj, 16, 0x00000000);
- nv_wo32(obj, 20, flags5);
- dev_priv->engine.instmem.flush(dev);
+ nv50_evo_dmaobj_init(obj, memtype, base, size);
- ret = nouveau_ramht_insert(evo, name, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- if (ret) {
- return ret;
- }
+ ret = nouveau_ramht_insert(evo, handle, obj);
+ if (ret)
+ goto out;
- return 0;
+ if (pobj)
+ nouveau_gpuobj_ref(obj, pobj);
+out:
+ nouveau_gpuobj_ref(NULL, &obj);
+ return ret;
}
static int
-nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pevo)
+nv50_evo_channel_new(struct drm_device *dev, int chid,
+ struct nouveau_channel **pevo)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_display *disp = nv50_display(dev);
struct nouveau_channel *evo;
int ret;
@@ -95,25 +111,13 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pevo)
return -ENOMEM;
*pevo = evo;
- for (evo->id = 0; evo->id < 5; evo->id++) {
- if (dev_priv->evo_alloc & (1 << evo->id))
- continue;
-
- dev_priv->evo_alloc |= (1 << evo->id);
- break;
- }
-
- if (evo->id == 5) {
- kfree(evo);
- return -ENODEV;
- }
-
+ evo->id = chid;
evo->dev = dev;
evo->user_get = 4;
evo->user_put = 0;
ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
- false, true, &evo->pushbuf_bo);
+ &evo->pushbuf_bo);
if (ret == 0)
ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM);
if (ret) {
@@ -138,8 +142,8 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pevo)
}
/* bind primary evo channel's ramht to the channel */
- if (dev_priv->evo && evo != dev_priv->evo)
- nouveau_ramht_ref(dev_priv->evo->ramht, &evo->ramht, NULL);
+ if (disp->master && evo != disp->master)
+ nouveau_ramht_ref(disp->master->ramht, &evo->ramht, NULL);
return 0;
}
@@ -212,21 +216,39 @@ nv50_evo_channel_fini(struct nouveau_channel *evo)
}
}
+static void
+nv50_evo_destroy(struct drm_device *dev)
+{
+ struct nv50_display *disp = nv50_display(dev);
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ if (disp->crtc[i].sem.bo) {
+ nouveau_bo_unmap(disp->crtc[i].sem.bo);
+ nouveau_bo_ref(NULL, &disp->crtc[i].sem.bo);
+ }
+ nv50_evo_channel_del(&disp->crtc[i].sync);
+ }
+ nouveau_gpuobj_ref(NULL, &disp->ntfy);
+ nv50_evo_channel_del(&disp->master);
+}
+
static int
nv50_evo_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_display *disp = nv50_display(dev);
struct nouveau_gpuobj *ramht = NULL;
struct nouveau_channel *evo;
- int ret;
+ int ret, i, j;
/* create primary evo channel, the one we use for modesetting
* purporses
*/
- ret = nv50_evo_channel_new(dev, &dev_priv->evo);
+ ret = nv50_evo_channel_new(dev, 0, &disp->master);
if (ret)
return ret;
- evo = dev_priv->evo;
+ evo = disp->master;
/* setup object management on it, any other evo channel will
* use this also as there's no per-channel support on the
@@ -236,109 +258,167 @@ nv50_evo_create(struct drm_device *dev)
NVOBJ_FLAG_ZERO_ALLOC, &evo->ramin);
if (ret) {
NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
+ goto err;
}
ret = drm_mm_init(&evo->ramin_heap, 0, 32768);
if (ret) {
NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
+ goto err;
}
ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht);
if (ret) {
NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
+ goto err;
}
ret = nouveau_ramht_new(dev, ramht, &evo->ramht);
nouveau_gpuobj_ref(NULL, &ramht);
- if (ret) {
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
- }
+ if (ret)
+ goto err;
+
+ /* not sure exactly what this is..
+ *
+ * the first dword of the structure is used by nvidia to wait on
+ * full completion of an EVO "update" command.
+ *
+ * method 0x8c on the master evo channel will fill a lot more of
+ * this structure with some undefined info
+ */
+ ret = nouveau_gpuobj_new(dev, disp->master, 0x1000, 0,
+ NVOBJ_FLAG_ZERO_ALLOC, &disp->ntfy);
+ if (ret)
+ goto err;
+
+ ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000,
+ disp->ntfy->vinst, disp->ntfy->size, NULL);
+ if (ret)
+ goto err;
/* create some default objects for the scanout memtypes we support */
- if (dev_priv->card_type >= NV_C0) {
- ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0xfe, 0x19,
- 0, 0xffffffff, 0x00000000);
- if (ret) {
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
- }
+ ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM, 0x0000,
+ 0, dev_priv->vram_size, NULL);
+ if (ret)
+ goto err;
- ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19,
- 0, dev_priv->vram_size, 0x00020000);
- if (ret) {
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
- }
+ ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM_LP, 0x80000000,
+ 0, dev_priv->vram_size, NULL);
+ if (ret)
+ goto err;
- ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM_LP, 0, 0x19,
- 0, dev_priv->vram_size, 0x00000000);
- if (ret) {
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
- }
- } else {
- ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19,
- 0, 0xffffffff, 0x00010000);
- if (ret) {
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
- }
+ ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 |
+ (dev_priv->chipset < 0xc0 ? 0x7a00 : 0xfe00),
+ 0, dev_priv->vram_size, NULL);
+ if (ret)
+ goto err;
+ ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 |
+ (dev_priv->chipset < 0xc0 ? 0x7000 : 0xfe00),
+ 0, dev_priv->vram_size, NULL);
+ if (ret)
+ goto err;
- ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0x7a, 0x19,
- 0, 0xffffffff, 0x00010000);
- if (ret) {
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
- }
+ /* create "display sync" channels and other structures we need
+ * to implement page flipping
+ */
+ for (i = 0; i < 2; i++) {
+ struct nv50_display_crtc *dispc = &disp->crtc[i];
+ u64 offset;
- ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19,
- 0, dev_priv->vram_size, 0x00010000);
- if (ret) {
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
+ ret = nv50_evo_channel_new(dev, 1 + i, &dispc->sync);
+ if (ret)
+ goto err;
+
+ ret = nouveau_bo_new(dev, NULL, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, &dispc->sem.bo);
+ if (!ret) {
+ offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT;
+
+ ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM);
+ if (!ret)
+ ret = nouveau_bo_map(dispc->sem.bo);
+ if (ret)
+ nouveau_bo_ref(NULL, &dispc->sem.bo);
}
- ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM_LP, 0, 0x19,
- 0, dev_priv->vram_size, 0x00010000);
- if (ret) {
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
- }
+ if (ret)
+ goto err;
+
+ ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoSync, 0x0000,
+ offset, 4096, NULL);
+ if (ret)
+ goto err;
+
+ ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoVRAM_LP, 0x80000000,
+ 0, dev_priv->vram_size, NULL);
+ if (ret)
+ goto err;
+
+ ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 |
+ (dev_priv->chipset < 0xc0 ?
+ 0x7a00 : 0xfe00),
+ 0, dev_priv->vram_size, NULL);
+ if (ret)
+ goto err;
+
+ ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 |
+ (dev_priv->chipset < 0xc0 ?
+ 0x7000 : 0xfe00),
+ 0, dev_priv->vram_size, NULL);
+ if (ret)
+ goto err;
+
+ for (j = 0; j < 4096; j += 4)
+ nouveau_bo_wr32(dispc->sem.bo, j / 4, 0x74b1e000);
+ dispc->sem.offset = 0;
}
return 0;
+
+err:
+ nv50_evo_destroy(dev);
+ return ret;
}
int
nv50_evo_init(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int ret;
+ struct nv50_display *disp = nv50_display(dev);
+ int ret, i;
- if (!dev_priv->evo) {
+ if (!disp->master) {
ret = nv50_evo_create(dev);
if (ret)
return ret;
}
- return nv50_evo_channel_init(dev_priv->evo);
+ ret = nv50_evo_channel_init(disp->master);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < 2; i++) {
+ ret = nv50_evo_channel_init(disp->crtc[i].sync);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
void
nv50_evo_fini(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_display *disp = nv50_display(dev);
+ int i;
- if (dev_priv->evo) {
- nv50_evo_channel_fini(dev_priv->evo);
- nv50_evo_channel_del(&dev_priv->evo);
+ for (i = 0; i < 2; i++) {
+ if (disp->crtc[i].sync)
+ nv50_evo_channel_fini(disp->crtc[i].sync);
}
+
+ if (disp->master)
+ nv50_evo_channel_fini(disp->master);
+
+ nv50_evo_destroy(dev);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h
index aa4f0d3cea8e..3860ca62cb19 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.h
+++ b/drivers/gpu/drm/nouveau/nv50_evo.h
@@ -27,12 +27,6 @@
#ifndef __NV50_EVO_H__
#define __NV50_EVO_H__
-int nv50_evo_init(struct drm_device *dev);
-void nv50_evo_fini(struct drm_device *dev);
-int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 class, u32 name,
- u32 tile_flags, u32 magic_flags,
- u32 offset, u32 limit);
-
#define NV50_EVO_UPDATE 0x00000080
#define NV50_EVO_UNK84 0x00000084
#define NV50_EVO_UNK84_NOTIFY 0x40000000
@@ -119,5 +113,7 @@ int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 class, u32 name,
/* Both of these are needed, otherwise nothing happens. */
#define NV50_EVO_CRTC_SCALE_RES1 0x000008d8
#define NV50_EVO_CRTC_SCALE_RES2 0x000008dc
+#define NV50_EVO_CRTC_UNK900 0x00000900
+#define NV50_EVO_CRTC_UNK904 0x00000904
#endif
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c
index 50290dea0ac4..ed411d88451d 100644
--- a/drivers/gpu/drm/nouveau/nv50_fb.c
+++ b/drivers/gpu/drm/nouveau/nv50_fb.c
@@ -8,31 +8,61 @@ struct nv50_fb_priv {
dma_addr_t r100c08;
};
+static void
+nv50_fb_destroy(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ struct nv50_fb_priv *priv = pfb->priv;
+
+ if (drm_mm_initialized(&pfb->tag_heap))
+ drm_mm_takedown(&pfb->tag_heap);
+
+ if (priv->r100c08_page) {
+ pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ __free_page(priv->r100c08_page);
+ }
+
+ kfree(priv);
+ pfb->priv = NULL;
+}
+
static int
nv50_fb_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
struct nv50_fb_priv *priv;
+ u32 tagmem;
+ int ret;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ pfb->priv = priv;
priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!priv->r100c08_page) {
- kfree(priv);
+ nv50_fb_destroy(dev);
return -ENOMEM;
}
priv->r100c08 = pci_map_page(dev->pdev, priv->r100c08_page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev, priv->r100c08)) {
- __free_page(priv->r100c08_page);
- kfree(priv);
+ nv50_fb_destroy(dev);
return -EFAULT;
}
- dev_priv->engine.fb.priv = priv;
+ tagmem = nv_rd32(dev, 0x100320);
+ NV_DEBUG(dev, "%d tags available\n", tagmem);
+ ret = drm_mm_init(&pfb->tag_heap, 0, tagmem);
+ if (ret) {
+ nv50_fb_destroy(dev);
+ return ret;
+ }
+
return 0;
}
@@ -81,18 +111,7 @@ nv50_fb_init(struct drm_device *dev)
void
nv50_fb_takedown(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_fb_priv *priv;
-
- priv = dev_priv->engine.fb.priv;
- if (!priv)
- return;
- dev_priv->engine.fb.priv = NULL;
-
- pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- __free_page(priv->r100c08_page);
- kfree(priv);
+ nv50_fb_destroy(dev);
}
void
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 8dd04c5dac67..c34a074f7ea1 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -149,6 +149,7 @@ nv50_fifo_init_regs(struct drm_device *dev)
nv_wr32(dev, 0x3204, 0);
nv_wr32(dev, 0x3210, 0);
nv_wr32(dev, 0x3270, 0);
+ nv_wr32(dev, 0x2044, 0x01003fff);
/* Enable dummy channels setup by nv50_instmem.c */
nv50_fifo_channel_enable(dev, 0);
@@ -273,7 +274,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
nv_wo32(ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
(4 << 24) /* SEARCH_FULL */ |
(chan->ramht->gpuobj->cinst >> 4));
- nv_wo32(ramfc, 0x44, 0x2101ffff);
+ nv_wo32(ramfc, 0x44, 0x01003fff);
nv_wo32(ramfc, 0x60, 0x7fffffff);
nv_wo32(ramfc, 0x40, 0x00000000);
nv_wo32(ramfc, 0x7c, 0x30000001);
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c
index 6b149c0cc06d..d4f4206dad7e 100644
--- a/drivers/gpu/drm/nouveau/nv50_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv50_gpio.c
@@ -137,6 +137,7 @@ nv50_gpio_irq_unregister(struct drm_device *dev, enum dcb_gpio_tag tag,
struct nv50_gpio_priv *priv = pgpio->priv;
struct nv50_gpio_handler *gpioh, *tmp;
struct dcb_gpio_entry *gpio;
+ LIST_HEAD(tofree);
unsigned long flags;
gpio = nouveau_bios_gpio_entry(dev, tag);
@@ -149,10 +150,14 @@ nv50_gpio_irq_unregister(struct drm_device *dev, enum dcb_gpio_tag tag,
gpioh->handler != handler ||
gpioh->data != data)
continue;
- list_del(&gpioh->head);
- kfree(gpioh);
+ list_move(&gpioh->head, &tofree);
}
spin_unlock_irqrestore(&priv->lock, flags);
+
+ list_for_each_entry_safe(gpioh, tmp, &tofree, head) {
+ flush_work_sync(&gpioh->work);
+ kfree(gpioh);
+ }
}
bool
@@ -205,7 +210,6 @@ nv50_gpio_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct nv50_gpio_priv *priv;
int ret;
if (!pgpio->priv) {
@@ -213,7 +217,6 @@ nv50_gpio_init(struct drm_device *dev)
if (ret)
return ret;
}
- priv = pgpio->priv;
/* disable, and ack any pending gpio interrupts */
nv_wr32(dev, 0xe050, 0x00000000);
@@ -293,7 +296,7 @@ nv50_gpio_isr(struct drm_device *dev)
continue;
gpioh->inhibit = true;
- queue_work(dev_priv->wq, &gpioh->work);
+ schedule_work(&gpioh->work);
}
spin_unlock(&priv->lock);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 37e21d2be95b..e1267a1f6d10 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -95,13 +95,41 @@ nv50_graph_init_regs__nv(struct drm_device *dev)
}
static void
-nv50_graph_init_regs(struct drm_device *dev)
+nv50_graph_init_zcull(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
NV_DEBUG(dev, "\n");
- nv_wr32(dev, NV04_PGRAPH_DEBUG_3,
- (1 << 2) /* HW_CONTEXT_SWITCH_ENABLED */);
- nv_wr32(dev, 0x402ca8, 0x800);
+ switch (dev_priv->chipset & 0xf0) {
+ case 0x50:
+ case 0x80:
+ case 0x90:
+ nv_wr32(dev, 0x402ca8, 0x00000800);
+ break;
+ case 0xa0:
+ default:
+ nv_wr32(dev, 0x402cc0, 0x00000000);
+ if (dev_priv->chipset == 0xa0 ||
+ dev_priv->chipset == 0xaa ||
+ dev_priv->chipset == 0xac) {
+ nv_wr32(dev, 0x402ca8, 0x00000802);
+ } else {
+ nv_wr32(dev, 0x402cc0, 0x00000000);
+ nv_wr32(dev, 0x402ca8, 0x00000002);
+ }
+
+ break;
+ }
+
+ /* zero out zcull regions */
+ for (i = 0; i < 8; i++) {
+ nv_wr32(dev, 0x402c20 + (i * 8), 0x00000000);
+ nv_wr32(dev, 0x402c24 + (i * 8), 0x00000000);
+ nv_wr32(dev, 0x402c28 + (i * 8), 0x00000000);
+ nv_wr32(dev, 0x402c2c + (i * 8), 0x00000000);
+ }
}
static int
@@ -136,6 +164,7 @@ nv50_graph_init_ctxctl(struct drm_device *dev)
}
kfree(cp);
+ nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */
nv_wr32(dev, 0x400320, 4);
nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0);
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, 0);
@@ -151,7 +180,7 @@ nv50_graph_init(struct drm_device *dev)
nv50_graph_init_reset(dev);
nv50_graph_init_regs__nv(dev);
- nv50_graph_init_regs(dev);
+ nv50_graph_init_zcull(dev);
ret = nv50_graph_init_ctxctl(dev);
if (ret)
@@ -409,12 +438,7 @@ static int
nv50_graph_nvsw_mthd_page_flip(struct nouveau_channel *chan,
u32 class, u32 mthd, u32 data)
{
- struct nouveau_page_flip_state s;
-
- if (!nouveau_finish_page_flip(chan, &s)) {
- /* XXX - Do something here */
- }
-
+ nouveau_finish_page_flip(chan, NULL);
return 0;
}
@@ -912,10 +936,10 @@ nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid
printk("\n");
NV_INFO(dev, "PGRAPH - TRAP_CCACHE %08x %08x %08x %08x"
" %08x %08x %08x\n",
- nv_rd32(dev, 0x405800), nv_rd32(dev, 0x405804),
- nv_rd32(dev, 0x405808), nv_rd32(dev, 0x40580c),
- nv_rd32(dev, 0x405810), nv_rd32(dev, 0x405814),
- nv_rd32(dev, 0x40581c));
+ nv_rd32(dev, 0x405000), nv_rd32(dev, 0x405004),
+ nv_rd32(dev, 0x405008), nv_rd32(dev, 0x40500c),
+ nv_rd32(dev, 0x405010), nv_rd32(dev, 0x405014),
+ nv_rd32(dev, 0x40501c));
}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index ea0041810ae3..306d4b1f585f 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -56,7 +56,7 @@ nv50_channel_del(struct nouveau_channel **pchan)
nouveau_gpuobj_ref(NULL, &chan->ramfc);
nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
nouveau_gpuobj_ref(NULL, &chan->vm_pd);
- if (chan->ramin_heap.free_stack.next)
+ if (drm_mm_initialized(&chan->ramin_heap))
drm_mm_takedown(&chan->ramin_heap);
nouveau_gpuobj_ref(NULL, &chan->ramin);
kfree(chan);
@@ -259,7 +259,7 @@ nv50_instmem_takedown(struct drm_device *dev)
nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
- if (dev_priv->ramin_heap.free_stack.next)
+ if (drm_mm_initialized(&dev_priv->ramin_heap))
drm_mm_takedown(&dev_priv->ramin_heap);
dev_priv->engine.instmem.priv = NULL;
@@ -300,7 +300,7 @@ nv50_instmem_resume(struct drm_device *dev)
}
struct nv50_gpuobj_node {
- struct nouveau_vram *vram;
+ struct nouveau_mem *vram;
struct nouveau_vma chan_vma;
u32 align;
};
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index b4a5ecb199f9..c25c59386420 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -41,8 +41,7 @@ nv50_sor_disconnect(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
int ret;
if (!nv_encoder->crtc)
@@ -184,8 +183,7 @@ static void
nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(encoder->dev)->master;
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 459ff08241e5..b23794c8859b 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -31,7 +31,6 @@ void
nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2])
{
- struct drm_nouveau_private *dev_priv = pgd->dev->dev_private;
u64 phys = 0xdeadcafe00000000ULL;
u32 coverage = 0;
@@ -58,10 +57,9 @@ nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
}
static inline u64
-nv50_vm_addr(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
- u64 phys, u32 memtype, u32 target)
+nv50_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
{
- struct drm_nouveau_private *dev_priv = pgt->dev->dev_private;
+ struct drm_nouveau_private *dev_priv = vma->vm->dev->dev_private;
phys |= 1; /* present */
phys |= (u64)memtype << 40;
@@ -85,12 +83,13 @@ nv50_vm_addr(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
void
nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
- struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys)
+ struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
{
+ u32 comp = (mem->memtype & 0x180) >> 7;
u32 block;
int i;
- phys = nv50_vm_addr(vma, pgt, phys, mem->memtype, 0);
+ phys = nv50_vm_addr(vma, phys, mem->memtype, 0);
pte <<= 3;
cnt <<= 3;
@@ -107,6 +106,11 @@ nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
phys += block << (vma->node->type - 3);
cnt -= block;
+ if (comp) {
+ u32 tag = mem->tag->start + ((delta >> 16) * comp);
+ offset_h |= (tag << 17);
+ delta += block << (vma->node->type - 3);
+ }
while (block) {
nv_wo32(pgt, pte + 0, offset_l);
@@ -119,11 +123,11 @@ nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
void
nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
- u32 pte, dma_addr_t *list, u32 cnt)
+ struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
pte <<= 3;
while (cnt--) {
- u64 phys = nv50_vm_addr(vma, pgt, (u64)*list++, 0, 2);
+ u64 phys = nv50_vm_addr(vma, (u64)*list++, mem->memtype, 2);
nv_wo32(pgt, pte + 0, lower_32_bits(phys));
nv_wo32(pgt, pte + 4, upper_32_bits(phys));
pte += 8;
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
index 58e98ad36347..ffbc3d8cf5be 100644
--- a/drivers/gpu/drm/nouveau/nv50_vram.c
+++ b/drivers/gpu/drm/nouveau/nv50_vram.c
@@ -48,42 +48,49 @@ nv50_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
}
void
-nv50_vram_del(struct drm_device *dev, struct nouveau_vram **pvram)
+nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
struct nouveau_mm *mm = man->priv;
struct nouveau_mm_node *this;
- struct nouveau_vram *vram;
+ struct nouveau_mem *mem;
- vram = *pvram;
- *pvram = NULL;
- if (unlikely(vram == NULL))
+ mem = *pmem;
+ *pmem = NULL;
+ if (unlikely(mem == NULL))
return;
mutex_lock(&mm->mutex);
- while (!list_empty(&vram->regions)) {
- this = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
+ while (!list_empty(&mem->regions)) {
+ this = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
list_del(&this->rl_entry);
nouveau_mm_put(mm, this);
}
+
+ if (mem->tag) {
+ drm_mm_put_block(mem->tag);
+ mem->tag = NULL;
+ }
mutex_unlock(&mm->mutex);
- kfree(vram);
+ kfree(mem);
}
int
nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
- u32 type, struct nouveau_vram **pvram)
+ u32 memtype, struct nouveau_mem **pmem)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
struct nouveau_mm *mm = man->priv;
struct nouveau_mm_node *r;
- struct nouveau_vram *vram;
+ struct nouveau_mem *mem;
+ int comp = (memtype & 0x300) >> 8;
+ int type = (memtype & 0x07f);
int ret;
if (!types[type])
@@ -92,32 +99,46 @@ nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
align >>= 12;
size_nc >>= 12;
- vram = kzalloc(sizeof(*vram), GFP_KERNEL);
- if (!vram)
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (!mem)
return -ENOMEM;
- INIT_LIST_HEAD(&vram->regions);
- vram->dev = dev_priv->dev;
- vram->memtype = type;
- vram->size = size;
-
mutex_lock(&mm->mutex);
+ if (comp) {
+ if (align == 16) {
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ int n = (size >> 4) * comp;
+
+ mem->tag = drm_mm_search_free(&pfb->tag_heap, n, 0, 0);
+ if (mem->tag)
+ mem->tag = drm_mm_get_block(mem->tag, n, 0);
+ }
+
+ if (unlikely(!mem->tag))
+ comp = 0;
+ }
+
+ INIT_LIST_HEAD(&mem->regions);
+ mem->dev = dev_priv->dev;
+ mem->memtype = (comp << 7) | type;
+ mem->size = size;
+
do {
ret = nouveau_mm_get(mm, types[type], size, size_nc, align, &r);
if (ret) {
mutex_unlock(&mm->mutex);
- nv50_vram_del(dev, &vram);
+ nv50_vram_del(dev, &mem);
return ret;
}
- list_add_tail(&r->rl_entry, &vram->regions);
+ list_add_tail(&r->rl_entry, &mem->regions);
size -= r->length;
} while (size);
mutex_unlock(&mm->mutex);
- r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
- vram->offset = (u64)r->offset << 12;
- *pvram = vram;
+ r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
+ mem->offset = (u64)r->offset << 12;
+ *pmem = mem;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
index e6f92c541dba..2886f2726a9e 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
@@ -116,7 +116,7 @@ nvc0_fifo_create_context(struct nouveau_channel *chan)
/* allocate vram for control regs, map into polling area */
ret = nouveau_bo_new(dev, NULL, 0x1000, 0, TTM_PL_FLAG_VRAM,
- 0, 0, true, true, &fifoch->user);
+ 0, 0, &fifoch->user);
if (ret)
goto error;
@@ -418,6 +418,12 @@ nvc0_fifo_isr(struct drm_device *dev)
{
u32 stat = nv_rd32(dev, 0x002100);
+ if (stat & 0x00000100) {
+ NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
+ nv_wr32(dev, 0x002100, 0x00000100);
+ stat &= ~0x00000100;
+ }
+
if (stat & 0x10000000) {
u32 units = nv_rd32(dev, 0x00259c);
u32 u = units;
@@ -446,10 +452,15 @@ nvc0_fifo_isr(struct drm_device *dev)
stat &= ~0x20000000;
}
+ if (stat & 0x40000000) {
+ NV_INFO(dev, "PFIFO: unknown status 0x40000000\n");
+ nv_mask(dev, 0x002a00, 0x00000000, 0x00000000);
+ stat &= ~0x40000000;
+ }
+
if (stat) {
NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
nv_wr32(dev, 0x002100, stat);
+ nv_wr32(dev, 0x002140, 0);
}
-
- nv_wr32(dev, 0x2140, 0);
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index eb18a7e89f5b..3de9b721d8db 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -299,6 +299,14 @@ nvc0_graph_takedown(struct drm_device *dev)
}
static int
+nvc0_graph_mthd_page_flip(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+{
+ nouveau_finish_page_flip(chan, NULL);
+ return 0;
+}
+
+static int
nvc0_graph_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -395,6 +403,7 @@ nvc0_graph_create(struct drm_device *dev)
nouveau_irq_register(dev, 25, nvc0_runk140_isr);
NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
+ NVOBJ_MTHD (dev, 0x9039, 0x0500, nvc0_graph_mthd_page_flip);
NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */
return 0;
@@ -640,7 +649,6 @@ nvc0_graph_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
- struct nvc0_graph_priv *priv;
int ret;
dev_priv->engine.graph.accel_blocked = true;
@@ -665,7 +673,6 @@ nvc0_graph_init(struct drm_device *dev)
if (ret)
return ret;
}
- priv = pgraph->priv;
nvc0_graph_init_obj418880(dev);
nvc0_graph_init_regs(dev);
@@ -730,9 +737,12 @@ nvc0_graph_isr(struct drm_device *dev)
u32 class = nv_rd32(dev, 0x404200 + (subc * 4));
if (stat & 0x00000010) {
- NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] subc %d "
- "class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, inst, subc, class, mthd, data);
+ if (nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) {
+ NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] "
+ "subc %d class 0x%04x mthd 0x%04x "
+ "data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ }
nv_wr32(dev, 0x400100, 0x00000010);
stat &= ~0x00000010;
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c
index c09091749054..82357d2df1f4 100644
--- a/drivers/gpu/drm/nouveau/nvc0_instmem.c
+++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c
@@ -67,7 +67,7 @@ nvc0_channel_del(struct nouveau_channel **pchan)
return;
nouveau_vm_ref(NULL, &chan->vm, NULL);
- if (chan->ramin_heap.free_stack.next)
+ if (drm_mm_initialized(&chan->ramin_heap))
drm_mm_takedown(&chan->ramin_heap);
nouveau_gpuobj_ref(NULL, &chan->ramin);
kfree(chan);
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c
index e4e83c2caf5b..69af0ba7edd3 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vm.c
@@ -59,7 +59,7 @@ nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
void
nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
- struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys)
+ struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
{
u32 next = 1 << (vma->node->type - 8);
@@ -75,11 +75,11 @@ nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
void
nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
- u32 pte, dma_addr_t *list, u32 cnt)
+ struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
pte <<= 3;
while (cnt--) {
- u64 phys = nvc0_vm_addr(vma, *list++, 0, 5);
+ u64 phys = nvc0_vm_addr(vma, *list++, mem->memtype, 5);
nv_wo32(pgt, pte + 0, lower_32_bits(phys));
nv_wo32(pgt, pte + 4, upper_32_bits(phys));
pte += 8;
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c
index 858eda5dedd1..67c6ec6f34ea 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vram.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vram.c
@@ -26,64 +26,78 @@
#include "nouveau_drv.h"
#include "nouveau_mm.h"
+/* 0 = unsupported
+ * 1 = non-compressed
+ * 3 = compressed
+ */
+static const u8 types[256] = {
+ 1, 1, 3, 3, 3, 3, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
+ 0, 1, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3,
+ 3, 3, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 1, 1, 1, 1, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
+ 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3,
+ 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3,
+ 3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0
+};
+
bool
nvc0_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
{
- switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) {
- case 0x0000:
- case 0xfe00:
- case 0xdb00:
- case 0x1100:
- return true;
- default:
- break;
- }
-
- return false;
+ u8 memtype = (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) >> 8;
+ return likely((types[memtype] == 1));
}
int
nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin,
- u32 type, struct nouveau_vram **pvram)
+ u32 type, struct nouveau_mem **pmem)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
struct nouveau_mm *mm = man->priv;
struct nouveau_mm_node *r;
- struct nouveau_vram *vram;
+ struct nouveau_mem *mem;
int ret;
size >>= 12;
align >>= 12;
ncmin >>= 12;
- vram = kzalloc(sizeof(*vram), GFP_KERNEL);
- if (!vram)
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (!mem)
return -ENOMEM;
- INIT_LIST_HEAD(&vram->regions);
- vram->dev = dev_priv->dev;
- vram->memtype = type;
- vram->size = size;
+ INIT_LIST_HEAD(&mem->regions);
+ mem->dev = dev_priv->dev;
+ mem->memtype = (type & 0xff);
+ mem->size = size;
mutex_lock(&mm->mutex);
do {
ret = nouveau_mm_get(mm, 1, size, ncmin, align, &r);
if (ret) {
mutex_unlock(&mm->mutex);
- nv50_vram_del(dev, &vram);
+ nv50_vram_del(dev, &mem);
return ret;
}
- list_add_tail(&r->rl_entry, &vram->regions);
+ list_add_tail(&r->rl_entry, &mem->regions);
size -= r->length;
} while (size);
mutex_unlock(&mm->mutex);
- r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
- vram->offset = (u64)r->offset << 12;
- *pvram = vram;
+ r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
+ mem->offset = (u64)r->offset << 12;
+ *pmem = mem;
return 0;
}
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 18c3c71e41b1..b9e8efd2b754 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -71,10 +71,7 @@ static struct drm_driver driver = {
#endif
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -89,16 +86,21 @@ int r128_driver_load(struct drm_device *dev, unsigned long flags)
return drm_vblank_init(dev, 1);
}
+static struct pci_driver r128_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
static int __init r128_init(void)
{
driver.num_ioctls = r128_max_ioctl;
- return drm_init(&driver);
+ return drm_pci_init(&driver, &r128_pci_driver);
}
static void __exit r128_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &r128_pci_driver);
}
module_init(r128_init);
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index a4e5e53e0a62..a2199fe9fa9b 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1026,7 +1026,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
* just update base pointers
*/
obj = radeon_fb->obj;
- rbo = obj->driver_private;
+ rbo = gem_to_radeon_bo(obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
@@ -1135,7 +1135,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
if (!atomic && fb && fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(fb);
- rbo = radeon_fb->obj->driver_private;
+ rbo = gem_to_radeon_bo(radeon_fb->obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
@@ -1181,7 +1181,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
}
obj = radeon_fb->obj;
- rbo = obj->driver_private;
+ rbo = gem_to_radeon_bo(obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
@@ -1292,7 +1292,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
if (!atomic && fb && fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(fb);
- rbo = radeon_fb->obj->driver_private;
+ rbo = gem_to_radeon_bo(radeon_fb->obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index d270b3ff896b..e91bacebb54e 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2988,7 +2988,7 @@ int evergreen_resume(struct radeon_device *rdev)
r = r600_ib_test(rdev);
if (r) {
- DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
return r;
}
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index 2adfb03f479b..3218287f4c51 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -579,7 +579,7 @@ int evergreen_blit_init(struct radeon_device *rdev)
obj_size += evergreen_ps_size * 4;
obj_size = ALIGN(obj_size, 256);
- r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
+ r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&rdev->r600_blit.shader_obj);
if (r) {
DRM_ERROR("evergreen failed to allocate shader\n");
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 345a75a03c96..5c84fca00d36 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -942,6 +942,37 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
idx_value = radeon_get_ib_value(p, idx);
switch (pkt->opcode) {
+ case PACKET3_SET_PREDICATION:
+ {
+ int pred_op;
+ int tmp;
+ if (pkt->count != 1) {
+ DRM_ERROR("bad SET PREDICATION\n");
+ return -EINVAL;
+ }
+
+ tmp = radeon_get_ib_value(p, idx + 1);
+ pred_op = (tmp >> 16) & 0x7;
+
+ /* for the clear predicate operation */
+ if (pred_op == 0)
+ return 0;
+
+ if (pred_op > 2) {
+ DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
+ return -EINVAL;
+ }
+
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad SET PREDICATION\n");
+ return -EINVAL;
+ }
+
+ ib[idx + 0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx + 1] = tmp + (upper_32_bits(reloc->lobj.gpu_offset) & 0xff);
+ }
+ break;
case PACKET3_CONTEXT_CONTROL:
if (pkt->count != 1) {
DRM_ERROR("bad CONTEXT_CONTROL\n");
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 56deae5bf02e..37b95f097420 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -3490,7 +3490,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
track->num_texture = 16;
track->maxy = 4096;
track->separate_cube = 0;
- track->aaresolve = true;
+ track->aaresolve = false;
track->aa.robj = NULL;
}
@@ -3639,7 +3639,7 @@ int r100_ib_test(struct radeon_device *rdev)
if (i < rdev->usec_timeout) {
DRM_INFO("ib test succeeded in %u usecs\n", i);
} else {
- DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
+ DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
scratch, tmp);
r = -EINVAL;
}
@@ -3659,13 +3659,13 @@ int r100_ib_init(struct radeon_device *rdev)
r = radeon_ib_pool_init(rdev);
if (r) {
- dev_err(rdev->dev, "failled initializing IB pool (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing IB pool (%d).\n", r);
r100_ib_fini(rdev);
return r;
}
r = r100_ib_test(rdev);
if (r) {
- dev_err(rdev->dev, "failled testing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed testing IB (%d).\n", r);
r100_ib_fini(rdev);
return r;
}
@@ -3801,8 +3801,6 @@ static int r100_startup(struct radeon_device *rdev)
r100_mc_program(rdev);
/* Resume clock */
r100_clock_startup(rdev);
- /* Initialize GPU configuration (# pipes, ...) */
-// r100_gpu_init(rdev);
/* Initialize GART (initialize after TTM so we can allocate
* memory through TTM but finalize after TTM) */
r100_enable_bm(rdev);
@@ -3823,12 +3821,12 @@ static int r100_startup(struct radeon_device *rdev)
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
- dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r = r100_ib_init(rdev);
if (r) {
- dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 069efa8c8ecf..8713731fa014 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -1401,12 +1401,12 @@ static int r300_startup(struct radeon_device *rdev)
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
- dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r = r100_ib_init(rdev);
if (r) {
- dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 0b59ed7c7d2c..417fab81812f 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -260,13 +260,13 @@ static int r420_startup(struct radeon_device *rdev)
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
- dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r420_cp_errata_init(rdev);
r = r100_ib_init(rdev);
if (r) {
- dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 2ce80d976568..3081d07f8de5 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -193,12 +193,12 @@ static int r520_startup(struct radeon_device *rdev)
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
- dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r = r100_ib_init(rdev);
if (r) {
- dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index de88624d5f87..2327a99143b6 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2464,7 +2464,7 @@ int r600_resume(struct radeon_device *rdev)
r = r600_ib_test(rdev);
if (r) {
- DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
return r;
}
@@ -2740,7 +2740,7 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev)
/* Allocate ring buffer */
if (rdev->ih.ring_obj == NULL) {
- r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
+ r = radeon_bo_create(rdev, rdev->ih.ring_size,
PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT,
&rdev->ih.ring_obj);
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index b5443fe1c1d1..846fae576399 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -26,6 +26,7 @@
#include "drmP.h"
#include "radeon.h"
#include "radeon_reg.h"
+#include "radeon_asic.h"
#include "atom.h"
#define AUDIO_TIMER_INTERVALL 100 /* 1/10 sekund should be enough */
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 41f7aafc97c4..2fed91750126 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -512,7 +512,7 @@ int r600_blit_init(struct radeon_device *rdev)
obj_size += r6xx_ps_size * 4;
obj_size = ALIGN(obj_size, 256);
- r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
+ r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&rdev->r600_blit.shader_obj);
if (r) {
DRM_ERROR("r600 failed to allocate shader\n");
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 153095fba62f..0a0848f0346d 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -71,75 +71,167 @@ struct r600_cs_track {
u64 db_bo_mc;
};
+#define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc }
+#define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc }
+#define FMT_24_BIT(fmt) [fmt] = { 1, 1, 3, 0 }
+#define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc }
+#define FMT_48_BIT(fmt) [fmt] = { 1, 1, 6, 0 }
+#define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc }
+#define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0 }
+#define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16, vc }
+
+struct gpu_formats {
+ unsigned blockwidth;
+ unsigned blockheight;
+ unsigned blocksize;
+ unsigned valid_color;
+};
+
+static const struct gpu_formats color_formats_table[] = {
+ /* 8 bit */
+ FMT_8_BIT(V_038004_COLOR_8, 1),
+ FMT_8_BIT(V_038004_COLOR_4_4, 1),
+ FMT_8_BIT(V_038004_COLOR_3_3_2, 1),
+ FMT_8_BIT(V_038004_FMT_1, 0),
+
+ /* 16-bit */
+ FMT_16_BIT(V_038004_COLOR_16, 1),
+ FMT_16_BIT(V_038004_COLOR_16_FLOAT, 1),
+ FMT_16_BIT(V_038004_COLOR_8_8, 1),
+ FMT_16_BIT(V_038004_COLOR_5_6_5, 1),
+ FMT_16_BIT(V_038004_COLOR_6_5_5, 1),
+ FMT_16_BIT(V_038004_COLOR_1_5_5_5, 1),
+ FMT_16_BIT(V_038004_COLOR_4_4_4_4, 1),
+ FMT_16_BIT(V_038004_COLOR_5_5_5_1, 1),
+
+ /* 24-bit */
+ FMT_24_BIT(V_038004_FMT_8_8_8),
+
+ /* 32-bit */
+ FMT_32_BIT(V_038004_COLOR_32, 1),
+ FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1),
+ FMT_32_BIT(V_038004_COLOR_16_16, 1),
+ FMT_32_BIT(V_038004_COLOR_16_16_FLOAT, 1),
+ FMT_32_BIT(V_038004_COLOR_8_24, 1),
+ FMT_32_BIT(V_038004_COLOR_8_24_FLOAT, 1),
+ FMT_32_BIT(V_038004_COLOR_24_8, 1),
+ FMT_32_BIT(V_038004_COLOR_24_8_FLOAT, 1),
+ FMT_32_BIT(V_038004_COLOR_10_11_11, 1),
+ FMT_32_BIT(V_038004_COLOR_10_11_11_FLOAT, 1),
+ FMT_32_BIT(V_038004_COLOR_11_11_10, 1),
+ FMT_32_BIT(V_038004_COLOR_11_11_10_FLOAT, 1),
+ FMT_32_BIT(V_038004_COLOR_2_10_10_10, 1),
+ FMT_32_BIT(V_038004_COLOR_8_8_8_8, 1),
+ FMT_32_BIT(V_038004_COLOR_10_10_10_2, 1),
+ FMT_32_BIT(V_038004_FMT_5_9_9_9_SHAREDEXP, 0),
+ FMT_32_BIT(V_038004_FMT_32_AS_8, 0),
+ FMT_32_BIT(V_038004_FMT_32_AS_8_8, 0),
+
+ /* 48-bit */
+ FMT_48_BIT(V_038004_FMT_16_16_16),
+ FMT_48_BIT(V_038004_FMT_16_16_16_FLOAT),
+
+ /* 64-bit */
+ FMT_64_BIT(V_038004_COLOR_X24_8_32_FLOAT, 1),
+ FMT_64_BIT(V_038004_COLOR_32_32, 1),
+ FMT_64_BIT(V_038004_COLOR_32_32_FLOAT, 1),
+ FMT_64_BIT(V_038004_COLOR_16_16_16_16, 1),
+ FMT_64_BIT(V_038004_COLOR_16_16_16_16_FLOAT, 1),
+
+ FMT_96_BIT(V_038004_FMT_32_32_32),
+ FMT_96_BIT(V_038004_FMT_32_32_32_FLOAT),
+
+ /* 128-bit */
+ FMT_128_BIT(V_038004_COLOR_32_32_32_32, 1),
+ FMT_128_BIT(V_038004_COLOR_32_32_32_32_FLOAT, 1),
+
+ [V_038004_FMT_GB_GR] = { 2, 1, 4, 0 },
+ [V_038004_FMT_BG_RG] = { 2, 1, 4, 0 },
+
+ /* block compressed formats */
+ [V_038004_FMT_BC1] = { 4, 4, 8, 0 },
+ [V_038004_FMT_BC2] = { 4, 4, 16, 0 },
+ [V_038004_FMT_BC3] = { 4, 4, 16, 0 },
+ [V_038004_FMT_BC4] = { 4, 4, 8, 0 },
+ [V_038004_FMT_BC5] = { 4, 4, 16, 0},
+
+};
+
+static inline bool fmt_is_valid_color(u32 format)
+{
+ if (format >= ARRAY_SIZE(color_formats_table))
+ return false;
+
+ if (color_formats_table[format].valid_color)
+ return true;
+
+ return false;
+}
+
+static inline bool fmt_is_valid_texture(u32 format)
+{
+ if (format >= ARRAY_SIZE(color_formats_table))
+ return false;
+
+ if (color_formats_table[format].blockwidth > 0)
+ return true;
+
+ return false;
+}
+
+static inline int fmt_get_blocksize(u32 format)
+{
+ if (format >= ARRAY_SIZE(color_formats_table))
+ return 0;
+
+ return color_formats_table[format].blocksize;
+}
+
+static inline int fmt_get_nblocksx(u32 format, u32 w)
+{
+ unsigned bw;
+
+ if (format >= ARRAY_SIZE(color_formats_table))
+ return 0;
+
+ bw = color_formats_table[format].blockwidth;
+ if (bw == 0)
+ return 0;
+
+ return (w + bw - 1) / bw;
+}
+
+static inline int fmt_get_nblocksy(u32 format, u32 h)
+{
+ unsigned bh;
+
+ if (format >= ARRAY_SIZE(color_formats_table))
+ return 0;
+
+ bh = color_formats_table[format].blockheight;
+ if (bh == 0)
+ return 0;
+
+ return (h + bh - 1) / bh;
+}
+
static inline int r600_bpe_from_format(u32 *bpe, u32 format)
{
- switch (format) {
- case V_038004_COLOR_8:
- case V_038004_COLOR_4_4:
- case V_038004_COLOR_3_3_2:
- case V_038004_FMT_1:
- *bpe = 1;
- break;
- case V_038004_COLOR_16:
- case V_038004_COLOR_16_FLOAT:
- case V_038004_COLOR_8_8:
- case V_038004_COLOR_5_6_5:
- case V_038004_COLOR_6_5_5:
- case V_038004_COLOR_1_5_5_5:
- case V_038004_COLOR_4_4_4_4:
- case V_038004_COLOR_5_5_5_1:
- *bpe = 2;
- break;
- case V_038004_FMT_8_8_8:
- *bpe = 3;
- break;
- case V_038004_COLOR_32:
- case V_038004_COLOR_32_FLOAT:
- case V_038004_COLOR_16_16:
- case V_038004_COLOR_16_16_FLOAT:
- case V_038004_COLOR_8_24:
- case V_038004_COLOR_8_24_FLOAT:
- case V_038004_COLOR_24_8:
- case V_038004_COLOR_24_8_FLOAT:
- case V_038004_COLOR_10_11_11:
- case V_038004_COLOR_10_11_11_FLOAT:
- case V_038004_COLOR_11_11_10:
- case V_038004_COLOR_11_11_10_FLOAT:
- case V_038004_COLOR_2_10_10_10:
- case V_038004_COLOR_8_8_8_8:
- case V_038004_COLOR_10_10_10_2:
- case V_038004_FMT_5_9_9_9_SHAREDEXP:
- case V_038004_FMT_32_AS_8:
- case V_038004_FMT_32_AS_8_8:
- *bpe = 4;
- break;
- case V_038004_COLOR_X24_8_32_FLOAT:
- case V_038004_COLOR_32_32:
- case V_038004_COLOR_32_32_FLOAT:
- case V_038004_COLOR_16_16_16_16:
- case V_038004_COLOR_16_16_16_16_FLOAT:
- *bpe = 8;
- break;
- case V_038004_FMT_16_16_16:
- case V_038004_FMT_16_16_16_FLOAT:
- *bpe = 6;
- break;
- case V_038004_FMT_32_32_32:
- case V_038004_FMT_32_32_32_FLOAT:
- *bpe = 12;
- break;
- case V_038004_COLOR_32_32_32_32:
- case V_038004_COLOR_32_32_32_32_FLOAT:
- *bpe = 16;
- break;
- case V_038004_FMT_GB_GR:
- case V_038004_FMT_BG_RG:
- case V_038004_COLOR_INVALID:
- default:
- *bpe = 16;
- return -EINVAL;
- }
+ unsigned res;
+
+ if (format >= ARRAY_SIZE(color_formats_table))
+ goto fail;
+
+ res = color_formats_table[format].blocksize;
+ if (res == 0)
+ goto fail;
+
+ *bpe = res;
return 0;
+
+fail:
+ *bpe = 16;
+ return -EINVAL;
}
struct array_mode_checker {
@@ -148,7 +240,7 @@ struct array_mode_checker {
u32 nbanks;
u32 npipes;
u32 nsamples;
- u32 bpe;
+ u32 blocksize;
};
/* returns alignment in pixels for pitch/height/depth and bytes for base */
@@ -162,7 +254,7 @@ static inline int r600_get_array_mode_alignment(struct array_mode_checker *value
u32 tile_height = 8;
u32 macro_tile_width = values->nbanks;
u32 macro_tile_height = values->npipes;
- u32 tile_bytes = tile_width * tile_height * values->bpe * values->nsamples;
+ u32 tile_bytes = tile_width * tile_height * values->blocksize * values->nsamples;
u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes;
switch (values->array_mode) {
@@ -174,7 +266,7 @@ static inline int r600_get_array_mode_alignment(struct array_mode_checker *value
*base_align = 1;
break;
case ARRAY_LINEAR_ALIGNED:
- *pitch_align = max((u32)64, (u32)(values->group_size / values->bpe));
+ *pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize));
*height_align = tile_height;
*depth_align = 1;
*base_align = values->group_size;
@@ -182,7 +274,7 @@ static inline int r600_get_array_mode_alignment(struct array_mode_checker *value
case ARRAY_1D_TILED_THIN1:
*pitch_align = max((u32)tile_width,
(u32)(values->group_size /
- (tile_height * values->bpe * values->nsamples)));
+ (tile_height * values->blocksize * values->nsamples)));
*height_align = tile_height;
*depth_align = 1;
*base_align = values->group_size;
@@ -190,12 +282,12 @@ static inline int r600_get_array_mode_alignment(struct array_mode_checker *value
case ARRAY_2D_TILED_THIN1:
*pitch_align = max((u32)macro_tile_width,
(u32)(((values->group_size / tile_height) /
- (values->bpe * values->nsamples)) *
+ (values->blocksize * values->nsamples)) *
values->nbanks)) * tile_width;
*height_align = macro_tile_height * tile_height;
*depth_align = 1;
*base_align = max(macro_tile_bytes,
- (*pitch_align) * values->bpe * (*height_align) * values->nsamples);
+ (*pitch_align) * values->blocksize * (*height_align) * values->nsamples);
break;
default:
return -EINVAL;
@@ -234,21 +326,22 @@ static void r600_cs_track_init(struct r600_cs_track *track)
static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
{
struct r600_cs_track *track = p->track;
- u32 bpe = 0, slice_tile_max, size, tmp;
+ u32 slice_tile_max, size, tmp;
u32 height, height_align, pitch, pitch_align, depth_align;
u64 base_offset, base_align;
struct array_mode_checker array_check;
volatile u32 *ib = p->ib->ptr;
unsigned array_mode;
-
+ u32 format;
if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n");
return -EINVAL;
}
size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
- if (r600_bpe_from_format(&bpe, G_0280A0_FORMAT(track->cb_color_info[i]))) {
+ format = G_0280A0_FORMAT(track->cb_color_info[i]);
+ if (!fmt_is_valid_color(format)) {
dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
- __func__, __LINE__, G_0280A0_FORMAT(track->cb_color_info[i]),
+ __func__, __LINE__, format,
i, track->cb_color_info[i]);
return -EINVAL;
}
@@ -267,7 +360,7 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
array_check.nbanks = track->nbanks;
array_check.npipes = track->npipes;
array_check.nsamples = track->nsamples;
- array_check.bpe = bpe;
+ array_check.blocksize = fmt_get_blocksize(format);
if (r600_get_array_mode_alignment(&array_check,
&pitch_align, &height_align, &depth_align, &base_align)) {
dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
@@ -311,7 +404,7 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
}
/* check offset */
- tmp = height * pitch * bpe;
+ tmp = fmt_get_nblocksy(format, height) * fmt_get_nblocksx(format, pitch) * fmt_get_blocksize(format);
if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
/* the initial DDX does bad things with the CB size occasionally */
@@ -436,7 +529,7 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
array_check.nbanks = track->nbanks;
array_check.npipes = track->npipes;
array_check.nsamples = track->nsamples;
- array_check.bpe = bpe;
+ array_check.blocksize = bpe;
if (r600_get_array_mode_alignment(&array_check,
&pitch_align, &height_align, &depth_align, &base_align)) {
dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
@@ -1113,39 +1206,61 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
return 0;
}
-static inline unsigned minify(unsigned size, unsigned levels)
+static inline unsigned mip_minify(unsigned size, unsigned level)
{
- size = size >> levels;
- if (size < 1)
- size = 1;
- return size;
+ unsigned val;
+
+ val = max(1U, size >> level);
+ if (level > 0)
+ val = roundup_pow_of_two(val);
+ return val;
}
-static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels,
- unsigned w0, unsigned h0, unsigned d0, unsigned bpe,
- unsigned pitch_align,
+static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
+ unsigned w0, unsigned h0, unsigned d0, unsigned format,
+ unsigned block_align, unsigned height_align, unsigned base_align,
unsigned *l0_size, unsigned *mipmap_size)
{
- unsigned offset, i, level, face;
- unsigned width, height, depth, rowstride, size;
-
- w0 = minify(w0, 0);
- h0 = minify(h0, 0);
- d0 = minify(d0, 0);
+ unsigned offset, i, level;
+ unsigned width, height, depth, size;
+ unsigned blocksize;
+ unsigned nbx, nby;
+ unsigned nlevels = llevel - blevel + 1;
+
+ *l0_size = -1;
+ blocksize = fmt_get_blocksize(format);
+
+ w0 = mip_minify(w0, 0);
+ h0 = mip_minify(h0, 0);
+ d0 = mip_minify(d0, 0);
for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
- width = minify(w0, i);
- height = minify(h0, i);
- depth = minify(d0, i);
- for(face = 0; face < nfaces; face++) {
- rowstride = ALIGN((width * bpe), pitch_align);
- size = height * rowstride * depth;
- offset += size;
- offset = (offset + 0x1f) & ~0x1f;
- }
+ width = mip_minify(w0, i);
+ nbx = fmt_get_nblocksx(format, width);
+
+ nbx = round_up(nbx, block_align);
+
+ height = mip_minify(h0, i);
+ nby = fmt_get_nblocksy(format, height);
+ nby = round_up(nby, height_align);
+
+ depth = mip_minify(d0, i);
+
+ size = nbx * nby * blocksize;
+ if (nfaces)
+ size *= nfaces;
+ else
+ size *= depth;
+
+ if (i == 0)
+ *l0_size = size;
+
+ if (i == 0 || i == 1)
+ offset = round_up(offset, base_align);
+
+ offset += size;
}
- *l0_size = ALIGN((w0 * bpe), pitch_align) * h0 * d0;
*mipmap_size = offset;
- if (!nlevels)
+ if (llevel == 0)
*mipmap_size = *l0_size;
if (!blevel)
*mipmap_size -= *l0_size;
@@ -1169,11 +1284,13 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
u32 tiling_flags)
{
struct r600_cs_track *track = p->track;
- u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0;
- u32 word0, word1, l0_size, mipmap_size;
+ u32 nfaces, llevel, blevel, w0, h0, d0;
+ u32 word0, word1, l0_size, mipmap_size, word2, word3;
u32 height_align, pitch, pitch_align, depth_align;
+ u32 array, barray, larray;
u64 base_align;
struct array_mode_checker array_check;
+ u32 format;
/* on legacy kernel we don't perform advanced check */
if (p->rdev == NULL)
@@ -1199,19 +1316,25 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
case V_038000_SQ_TEX_DIM_3D:
break;
case V_038000_SQ_TEX_DIM_CUBEMAP:
- nfaces = 6;
+ if (p->family >= CHIP_RV770)
+ nfaces = 8;
+ else
+ nfaces = 6;
break;
case V_038000_SQ_TEX_DIM_1D_ARRAY:
case V_038000_SQ_TEX_DIM_2D_ARRAY:
+ array = 1;
+ break;
case V_038000_SQ_TEX_DIM_2D_MSAA:
case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
default:
dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
return -EINVAL;
}
- if (r600_bpe_from_format(&bpe, G_038004_DATA_FORMAT(word1))) {
+ format = G_038004_DATA_FORMAT(word1);
+ if (!fmt_is_valid_texture(format)) {
dev_warn(p->dev, "%s:%d texture invalid format %d\n",
- __func__, __LINE__, G_038004_DATA_FORMAT(word1));
+ __func__, __LINE__, format);
return -EINVAL;
}
@@ -1222,7 +1345,7 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
array_check.nbanks = track->nbanks;
array_check.npipes = track->npipes;
array_check.nsamples = 1;
- array_check.bpe = bpe;
+ array_check.blocksize = fmt_get_blocksize(format);
if (r600_get_array_mode_alignment(&array_check,
&pitch_align, &height_align, &depth_align, &base_align)) {
dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
@@ -1248,25 +1371,34 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
return -EINVAL;
}
+ word2 = radeon_get_ib_value(p, idx + 2) << 8;
+ word3 = radeon_get_ib_value(p, idx + 3) << 8;
+
word0 = radeon_get_ib_value(p, idx + 4);
word1 = radeon_get_ib_value(p, idx + 5);
blevel = G_038010_BASE_LEVEL(word0);
- nlevels = G_038014_LAST_LEVEL(word1);
- r600_texture_size(nfaces, blevel, nlevels, w0, h0, d0, bpe,
- (pitch_align * bpe),
+ llevel = G_038014_LAST_LEVEL(word1);
+ if (array == 1) {
+ barray = G_038014_BASE_ARRAY(word1);
+ larray = G_038014_LAST_ARRAY(word1);
+
+ nfaces = larray - barray + 1;
+ }
+ r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, format,
+ pitch_align, height_align, base_align,
&l0_size, &mipmap_size);
/* using get ib will give us the offset into the texture bo */
- word0 = radeon_get_ib_value(p, idx + 2) << 8;
- if ((l0_size + word0) > radeon_bo_size(texture)) {
+ if ((l0_size + word2) > radeon_bo_size(texture)) {
dev_warn(p->dev, "texture bo too small (%d %d %d %d -> %d have %ld)\n",
- w0, h0, bpe, word0, l0_size, radeon_bo_size(texture));
+ w0, h0, format, word2, l0_size, radeon_bo_size(texture));
+ dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align);
return -EINVAL;
}
/* using get ib will give us the offset into the mipmap bo */
- word0 = radeon_get_ib_value(p, idx + 3) << 8;
- if ((mipmap_size + word0) > radeon_bo_size(mipmap)) {
+ word3 = radeon_get_ib_value(p, idx + 3) << 8;
+ if ((mipmap_size + word3) > radeon_bo_size(mipmap)) {
/*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
- w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));*/
+ w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/
}
return 0;
}
@@ -1289,6 +1421,38 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
idx_value = radeon_get_ib_value(p, idx);
switch (pkt->opcode) {
+ case PACKET3_SET_PREDICATION:
+ {
+ int pred_op;
+ int tmp;
+ if (pkt->count != 1) {
+ DRM_ERROR("bad SET PREDICATION\n");
+ return -EINVAL;
+ }
+
+ tmp = radeon_get_ib_value(p, idx + 1);
+ pred_op = (tmp >> 16) & 0x7;
+
+ /* for the clear predicate operation */
+ if (pred_op == 0)
+ return 0;
+
+ if (pred_op > 2) {
+ DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
+ return -EINVAL;
+ }
+
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad SET PREDICATION\n");
+ return -EINVAL;
+ }
+
+ ib[idx + 0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx + 1] = tmp + (upper_32_bits(reloc->lobj.gpu_offset) & 0xff);
+ }
+ break;
+
case PACKET3_START_3D_CMDBUF:
if (p->family >= CHIP_RV770 || pkt->count) {
DRM_ERROR("bad START_3D\n");
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index e6a58ed48dcf..50db6d62eec2 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -26,6 +26,7 @@
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "atom.h"
/*
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 04bac0bbd3ec..b2b944bcd05a 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1304,6 +1304,11 @@
#define V_038004_FMT_16_16_16_FLOAT 0x0000002E
#define V_038004_FMT_32_32_32 0x0000002F
#define V_038004_FMT_32_32_32_FLOAT 0x00000030
+#define V_038004_FMT_BC1 0x00000031
+#define V_038004_FMT_BC2 0x00000032
+#define V_038004_FMT_BC3 0x00000033
+#define V_038004_FMT_BC4 0x00000034
+#define V_038004_FMT_BC5 0x00000035
#define R_038010_SQ_TEX_RESOURCE_WORD4_0 0x038010
#define S_038010_FORMAT_COMP_X(x) (((x) & 0x3) << 0)
#define G_038010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 56c48b67ef3d..55fefe763965 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -258,8 +258,9 @@ struct radeon_bo {
int surface_reg;
/* Constant after initialization */
struct radeon_device *rdev;
- struct drm_gem_object *gobj;
+ struct drm_gem_object gem_base;
};
+#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
struct radeon_bo_list {
struct ttm_validate_buffer tv;
@@ -288,6 +289,15 @@ int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
uint64_t *gpu_addr);
void radeon_gem_object_unpin(struct drm_gem_object *obj);
+int radeon_mode_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int radeon_mode_dumb_mmap(struct drm_file *filp,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *offset_p);
+int radeon_mode_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle);
/*
* GART structures, functions & helpers
@@ -319,6 +329,7 @@ struct radeon_gart {
union radeon_gart_table table;
struct page **pages;
dma_addr_t *pages_addr;
+ bool *ttm_alloced;
bool ready;
};
@@ -331,7 +342,8 @@ void radeon_gart_fini(struct radeon_device *rdev);
void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
int pages);
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
- int pages, struct page **pagelist);
+ int pages, struct page **pagelist,
+ dma_addr_t *dma_addr);
/*
@@ -1186,19 +1198,6 @@ int radeon_device_init(struct radeon_device *rdev,
void radeon_device_fini(struct radeon_device *rdev);
int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
-/* r600 blit */
-int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
-void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
-void r600_kms_blit_copy(struct radeon_device *rdev,
- u64 src_gpu_addr, u64 dst_gpu_addr,
- int size_bytes);
-/* evergreen blit */
-int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
-void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
-void evergreen_kms_blit_copy(struct radeon_device *rdev,
- u64 src_gpu_addr, u64 dst_gpu_addr,
- int size_bytes);
-
static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
{
if (reg < rdev->rmmio_size)
@@ -1449,59 +1448,12 @@ extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc
extern int radeon_resume_kms(struct drm_device *dev);
extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
-/* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */
-extern bool r600_card_posted(struct radeon_device *rdev);
-extern void r600_cp_stop(struct radeon_device *rdev);
-extern int r600_cp_start(struct radeon_device *rdev);
-extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
-extern int r600_cp_resume(struct radeon_device *rdev);
-extern void r600_cp_fini(struct radeon_device *rdev);
-extern int r600_count_pipe_bits(uint32_t val);
-extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
-extern int r600_pcie_gart_init(struct radeon_device *rdev);
-extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
-extern int r600_ib_test(struct radeon_device *rdev);
-extern int r600_ring_test(struct radeon_device *rdev);
-extern void r600_scratch_init(struct radeon_device *rdev);
-extern int r600_blit_init(struct radeon_device *rdev);
-extern void r600_blit_fini(struct radeon_device *rdev);
-extern int r600_init_microcode(struct radeon_device *rdev);
-extern int r600_asic_reset(struct radeon_device *rdev);
-/* r600 irq */
-extern int r600_irq_init(struct radeon_device *rdev);
-extern void r600_irq_fini(struct radeon_device *rdev);
-extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
-extern int r600_irq_set(struct radeon_device *rdev);
-extern void r600_irq_suspend(struct radeon_device *rdev);
-extern void r600_disable_interrupts(struct radeon_device *rdev);
-extern void r600_rlc_stop(struct radeon_device *rdev);
-/* r600 audio */
-extern int r600_audio_init(struct radeon_device *rdev);
-extern int r600_audio_tmds_index(struct drm_encoder *encoder);
-extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
-extern int r600_audio_channels(struct radeon_device *rdev);
-extern int r600_audio_bits_per_sample(struct radeon_device *rdev);
-extern int r600_audio_rate(struct radeon_device *rdev);
-extern uint8_t r600_audio_status_bits(struct radeon_device *rdev);
-extern uint8_t r600_audio_category_code(struct radeon_device *rdev);
-extern void r600_audio_schedule_polling(struct radeon_device *rdev);
-extern void r600_audio_enable_polling(struct drm_encoder *encoder);
-extern void r600_audio_disable_polling(struct drm_encoder *encoder);
-extern void r600_audio_fini(struct radeon_device *rdev);
-extern void r600_hdmi_init(struct drm_encoder *encoder);
+/*
+ * r600 functions used by radeon_encoder.c
+ */
extern void r600_hdmi_enable(struct drm_encoder *encoder);
extern void r600_hdmi_disable(struct drm_encoder *encoder);
extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
-extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
-extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
-
-extern void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
-extern void r700_cp_stop(struct radeon_device *rdev);
-extern void r700_cp_fini(struct radeon_device *rdev);
-extern void evergreen_disable_interrupt_state(struct radeon_device *rdev);
-extern int evergreen_irq_set(struct radeon_device *rdev);
-extern int evergreen_blit_init(struct radeon_device *rdev);
-extern void evergreen_blit_fini(struct radeon_device *rdev);
extern int ni_init_microcode(struct radeon_device *rdev);
extern int btc_mc_load_microcode(struct radeon_device *rdev);
@@ -1513,14 +1465,6 @@ extern int radeon_acpi_init(struct radeon_device *rdev);
static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
#endif
-/* evergreen */
-struct evergreen_mc_save {
- u32 vga_control[6];
- u32 vga_render_control;
- u32 vga_hdp_control;
- u32 crtc_control[6];
-};
-
#include "radeon_object.h"
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index c59bd98a2029..1c7317e3aa8c 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -57,8 +57,6 @@ int r100_init(struct radeon_device *rdev);
void r100_fini(struct radeon_device *rdev);
int r100_suspend(struct radeon_device *rdev);
int r100_resume(struct radeon_device *rdev);
-uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
-void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void r100_vga_set_state(struct radeon_device *rdev, bool state);
bool r100_gpu_is_lockup(struct radeon_device *rdev);
int r100_asic_reset(struct radeon_device *rdev);
@@ -164,8 +162,6 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev,
extern int r300_cs_parse(struct radeon_cs_parser *p);
extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
-extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
-extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
extern void r300_set_reg_safe(struct radeon_device *rdev);
@@ -208,7 +204,6 @@ void rs400_gart_adjust_size(struct radeon_device *rdev);
void rs400_gart_disable(struct radeon_device *rdev);
void rs400_gart_fini(struct radeon_device *rdev);
-
/*
* rs600.
*/
@@ -270,8 +265,6 @@ void rv515_fini(struct radeon_device *rdev);
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rv515_ring_start(struct radeon_device *rdev);
-uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
-void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rv515_bandwidth_update(struct radeon_device *rdev);
int rv515_resume(struct radeon_device *rdev);
int rv515_suspend(struct radeon_device *rdev);
@@ -307,14 +300,13 @@ void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int r600_cs_parse(struct radeon_cs_parser *p);
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-int r600_irq_process(struct radeon_device *rdev);
-int r600_irq_set(struct radeon_device *rdev);
bool r600_gpu_is_lockup(struct radeon_device *rdev);
int r600_asic_reset(struct radeon_device *rdev);
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
+int r600_ib_test(struct radeon_device *rdev);
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r600_ring_test(struct radeon_device *rdev);
int r600_copy_blit(struct radeon_device *rdev,
@@ -333,6 +325,50 @@ extern void rs780_pm_init_profile(struct radeon_device *rdev);
extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes);
extern int r600_get_pcie_lanes(struct radeon_device *rdev);
+bool r600_card_posted(struct radeon_device *rdev);
+void r600_cp_stop(struct radeon_device *rdev);
+int r600_cp_start(struct radeon_device *rdev);
+void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
+int r600_cp_resume(struct radeon_device *rdev);
+void r600_cp_fini(struct radeon_device *rdev);
+int r600_count_pipe_bits(uint32_t val);
+int r600_mc_wait_for_idle(struct radeon_device *rdev);
+int r600_pcie_gart_init(struct radeon_device *rdev);
+void r600_scratch_init(struct radeon_device *rdev);
+int r600_blit_init(struct radeon_device *rdev);
+void r600_blit_fini(struct radeon_device *rdev);
+int r600_init_microcode(struct radeon_device *rdev);
+/* r600 irq */
+int r600_irq_process(struct radeon_device *rdev);
+int r600_irq_init(struct radeon_device *rdev);
+void r600_irq_fini(struct radeon_device *rdev);
+void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
+int r600_irq_set(struct radeon_device *rdev);
+void r600_irq_suspend(struct radeon_device *rdev);
+void r600_disable_interrupts(struct radeon_device *rdev);
+void r600_rlc_stop(struct radeon_device *rdev);
+/* r600 audio */
+int r600_audio_init(struct radeon_device *rdev);
+int r600_audio_tmds_index(struct drm_encoder *encoder);
+void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
+int r600_audio_channels(struct radeon_device *rdev);
+int r600_audio_bits_per_sample(struct radeon_device *rdev);
+int r600_audio_rate(struct radeon_device *rdev);
+uint8_t r600_audio_status_bits(struct radeon_device *rdev);
+uint8_t r600_audio_category_code(struct radeon_device *rdev);
+void r600_audio_schedule_polling(struct radeon_device *rdev);
+void r600_audio_enable_polling(struct drm_encoder *encoder);
+void r600_audio_disable_polling(struct drm_encoder *encoder);
+void r600_audio_fini(struct radeon_device *rdev);
+void r600_hdmi_init(struct drm_encoder *encoder);
+int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
+void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
+/* r600 blit */
+int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
+void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
+void r600_kms_blit_copy(struct radeon_device *rdev,
+ u64 src_gpu_addr, u64 dst_gpu_addr,
+ int size_bytes);
/*
* rv770,rv730,rv710,rv740
@@ -341,12 +377,21 @@ int rv770_init(struct radeon_device *rdev);
void rv770_fini(struct radeon_device *rdev);
int rv770_suspend(struct radeon_device *rdev);
int rv770_resume(struct radeon_device *rdev);
-extern void rv770_pm_misc(struct radeon_device *rdev);
-extern u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+void rv770_pm_misc(struct radeon_device *rdev);
+u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
+void r700_cp_stop(struct radeon_device *rdev);
+void r700_cp_fini(struct radeon_device *rdev);
/*
* evergreen
*/
+struct evergreen_mc_save {
+ u32 vga_control[6];
+ u32 vga_render_control;
+ u32 vga_hdp_control;
+ u32 crtc_control[6];
+};
void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
int evergreen_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
@@ -374,5 +419,15 @@ extern void evergreen_pm_finish(struct radeon_device *rdev);
extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
+void evergreen_disable_interrupt_state(struct radeon_device *rdev);
+int evergreen_blit_init(struct radeon_device *rdev);
+void evergreen_blit_fini(struct radeon_device *rdev);
+/* evergreen blit */
+int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
+void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
+void evergreen_kms_blit_copy(struct radeon_device *rdev,
+ u64 src_gpu_addr, u64 dst_gpu_addr,
+ int size_bytes);
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index c558685cc637..10191d9372d8 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -41,7 +41,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
size = bsize;
n = 1024;
- r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, sdomain, &sobj);
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj);
if (r) {
goto out_cleanup;
}
@@ -53,7 +53,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
if (r) {
goto out_cleanup;
}
- r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, ddomain, &dobj);
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, &dobj);
if (r) {
goto out_cleanup;
}
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index eb6b9eed7349..3d599e33b9cc 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -2113,9 +2113,9 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
break;
}
- if (drm_device_is_agp(dev))
+ if (drm_pci_device_is_agp(dev))
dev_priv->flags |= RADEON_IS_AGP;
- else if (drm_device_is_pcie(dev))
+ else if (drm_pci_device_is_pcie(dev))
dev_priv->flags |= RADEON_IS_PCIE;
else
dev_priv->flags |= RADEON_IS_PCI;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 35b5eb8fbe2a..8c1916941871 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -75,7 +75,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
return -ENOENT;
}
p->relocs_ptr[i] = &p->relocs[i];
- p->relocs[i].robj = p->relocs[i].gobj->driver_private;
+ p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
p->relocs[i].lobj.bo = p->relocs[i].robj;
p->relocs[i].lobj.wdomain = r->write_domain;
p->relocs[i].lobj.rdomain = r->read_domains;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 4954e2d6ffa2..55807e738ce6 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -184,7 +184,7 @@ int radeon_wb_init(struct radeon_device *rdev)
int r;
if (rdev->wb.wb_obj == NULL) {
- r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
+ r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
@@ -860,7 +860,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
if (rfb == NULL || rfb->obj == NULL) {
continue;
}
- robj = rfb->obj->driver_private;
+ robj = gem_to_radeon_bo(rfb->obj);
/* don't unpin kernel fb objects */
if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
r = radeon_bo_reserve(robj, false);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 0e657095de7c..4be58793dc17 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -371,7 +371,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
new_radeon_fb = to_radeon_framebuffer(fb);
/* schedule unpin of the old buffer */
obj = old_radeon_fb->obj;
- rbo = obj->driver_private;
+ rbo = gem_to_radeon_bo(obj);
work->old_rbo = rbo;
INIT_WORK(&work->work, radeon_unpin_work_func);
@@ -391,7 +391,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
/* pin the new buffer */
obj = new_radeon_fb->obj;
- rbo = obj->driver_private;
+ rbo = gem_to_radeon_bo(obj);
DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
work->old_rbo, rbo);
@@ -971,7 +971,7 @@ void radeon_compute_pll_legacy(struct radeon_pll *pll,
max_fractional_feed_div = pll->max_frac_feedback_div;
}
- for (post_div = min_post_div; post_div <= max_post_div; ++post_div) {
+ for (post_div = max_post_div; post_div >= min_post_div; --post_div) {
uint32_t ref_div;
if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 275b26a708d6..63d2de8771dc 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -49,9 +49,10 @@
* - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500)
* 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs
* 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query
+ * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 8
+#define KMS_DRIVER_MINOR 9
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -84,6 +85,16 @@ extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
extern struct drm_ioctl_desc radeon_ioctls_kms[];
extern int radeon_max_kms_ioctl;
int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
+int radeon_mode_dumb_mmap(struct drm_file *filp,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *offset_p);
+int radeon_mode_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int radeon_mode_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle);
+
#if defined(CONFIG_DEBUG_FS)
int radeon_debugfs_init(struct drm_minor *minor);
void radeon_debugfs_cleanup(struct drm_minor *minor);
@@ -228,11 +239,6 @@ static struct drm_driver driver_old = {
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
-
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -322,6 +328,9 @@ static struct drm_driver kms_driver = {
.gem_init_object = radeon_gem_object_init,
.gem_free_object = radeon_gem_object_free,
.dma_ioctl = radeon_dma_ioctl_kms,
+ .dumb_create = radeon_mode_dumb_create,
+ .dumb_map_offset = radeon_mode_dumb_mmap,
+ .dumb_destroy = radeon_mode_dumb_destroy,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -336,15 +345,6 @@ static struct drm_driver kms_driver = {
#endif
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- .probe = radeon_pci_probe,
- .remove = radeon_pci_remove,
- .suspend = radeon_pci_suspend,
- .resume = radeon_pci_resume,
- },
-
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -354,15 +354,32 @@ static struct drm_driver kms_driver = {
};
static struct drm_driver *driver;
+static struct pci_driver *pdriver;
+
+static struct pci_driver radeon_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
+static struct pci_driver radeon_kms_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = radeon_pci_probe,
+ .remove = radeon_pci_remove,
+ .suspend = radeon_pci_suspend,
+ .resume = radeon_pci_resume,
+};
static int __init radeon_init(void)
{
driver = &driver_old;
+ pdriver = &radeon_pci_driver;
driver->num_ioctls = radeon_max_ioctl;
#ifdef CONFIG_VGA_CONSOLE
if (vgacon_text_force() && radeon_modeset == -1) {
DRM_INFO("VGACON disable radeon kernel modesetting.\n");
driver = &driver_old;
+ pdriver = &radeon_pci_driver;
driver->driver_features &= ~DRIVER_MODESET;
radeon_modeset = 0;
}
@@ -380,18 +397,19 @@ static int __init radeon_init(void)
if (radeon_modeset == 1) {
DRM_INFO("radeon kernel modesetting enabled.\n");
driver = &kms_driver;
+ pdriver = &radeon_kms_pci_driver;
driver->driver_features |= DRIVER_MODESET;
driver->num_ioctls = radeon_max_kms_ioctl;
radeon_register_atpx_handler();
}
/* if the vga console setting is enabled still
* let modprobe override it */
- return drm_init(driver);
+ return drm_pci_init(driver, pdriver);
}
static void __exit radeon_exit(void)
{
- drm_exit(driver);
+ drm_pci_exit(driver, pdriver);
radeon_unregister_atpx_handler();
}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 66324b5bb5ba..0b7b486c97e8 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -64,7 +64,7 @@ static struct fb_ops radeonfb_ops = {
};
-static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
+int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
{
int aligned = width;
int align_large = (ASIC_IS_AVIVO(rdev)) || tiled;
@@ -90,7 +90,7 @@ static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bo
static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
{
- struct radeon_bo *rbo = gobj->driver_private;
+ struct radeon_bo *rbo = gem_to_radeon_bo(gobj);
int ret;
ret = radeon_bo_reserve(rbo, false);
@@ -113,11 +113,14 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
u32 tiling_flags = 0;
int ret;
int aligned_size, size;
+ int height = mode_cmd->height;
/* need to align pitch with crtc limits */
mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
- size = mode_cmd->pitch * mode_cmd->height;
+ if (rdev->family >= CHIP_R600)
+ height = ALIGN(mode_cmd->height, 8);
+ size = mode_cmd->pitch * height;
aligned_size = ALIGN(size, PAGE_SIZE);
ret = radeon_gem_object_create(rdev, aligned_size, 0,
RADEON_GEM_DOMAIN_VRAM,
@@ -128,7 +131,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
aligned_size);
return -ENOMEM;
}
- rbo = gobj->driver_private;
+ rbo = gem_to_radeon_bo(gobj);
if (fb_tiled)
tiling_flags = RADEON_TILING_MACRO;
@@ -202,7 +205,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
mode_cmd.depth = sizes->surface_depth;
ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
- rbo = gobj->driver_private;
+ rbo = gem_to_radeon_bo(gobj);
/* okay we have an object now allocate the framebuffer */
info = framebuffer_alloc(0, device);
@@ -403,14 +406,14 @@ int radeon_fbdev_total_size(struct radeon_device *rdev)
struct radeon_bo *robj;
int size = 0;
- robj = rdev->mode_info.rfbdev->rfb.obj->driver_private;
+ robj = gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj);
size += radeon_bo_size(robj);
return size;
}
bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
{
- if (robj == rdev->mode_info.rfbdev->rfb.obj->driver_private)
+ if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj))
return true;
return false;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 65016117d95f..f0534ef2f331 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -78,7 +78,7 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
int r;
if (rdev->gart.table.vram.robj == NULL) {
- r = radeon_bo_create(rdev, NULL, rdev->gart.table_size,
+ r = radeon_bo_create(rdev, rdev->gart.table_size,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&rdev->gart.table.vram.robj);
if (r) {
@@ -149,8 +149,9 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
if (rdev->gart.pages[p]) {
- pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (!rdev->gart.ttm_alloced[p])
+ pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
rdev->gart.pages[p] = NULL;
rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
page_base = rdev->gart.pages_addr[p];
@@ -165,7 +166,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
}
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
- int pages, struct page **pagelist)
+ int pages, struct page **pagelist, dma_addr_t *dma_addr)
{
unsigned t;
unsigned p;
@@ -180,15 +181,22 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
- /* we need to support large memory configurations */
- /* assume that unbind have already been call on the range */
- rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i],
+ /* On TTM path, we only use the DMA API if TTM_PAGE_FLAG_DMA32
+ * is requested. */
+ if (dma_addr[i] != DMA_ERROR_CODE) {
+ rdev->gart.ttm_alloced[p] = true;
+ rdev->gart.pages_addr[p] = dma_addr[i];
+ } else {
+ /* we need to support large memory configurations */
+ /* assume that unbind have already been call on the range */
+ rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i],
0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) {
- /* FIXME: failed to map page (return -ENOMEM?) */
- radeon_gart_unbind(rdev, offset, pages);
- return -ENOMEM;
+ if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) {
+ /* FIXME: failed to map page (return -ENOMEM?) */
+ radeon_gart_unbind(rdev, offset, pages);
+ return -ENOMEM;
+ }
}
rdev->gart.pages[p] = pagelist[i];
page_base = rdev->gart.pages_addr[p];
@@ -251,6 +259,12 @@ int radeon_gart_init(struct radeon_device *rdev)
radeon_gart_fini(rdev);
return -ENOMEM;
}
+ rdev->gart.ttm_alloced = kzalloc(sizeof(bool) *
+ rdev->gart.num_cpu_pages, GFP_KERNEL);
+ if (rdev->gart.ttm_alloced == NULL) {
+ radeon_gart_fini(rdev);
+ return -ENOMEM;
+ }
/* set GART entry to point to the dummy page by default */
for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
@@ -267,6 +281,8 @@ void radeon_gart_fini(struct radeon_device *rdev)
rdev->gart.ready = false;
kfree(rdev->gart.pages);
kfree(rdev->gart.pages_addr);
+ kfree(rdev->gart.ttm_alloced);
rdev->gart.pages = NULL;
rdev->gart.pages_addr = NULL;
+ rdev->gart.ttm_alloced = NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index df95eb83dac6..a419b67d8401 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -32,21 +32,18 @@
int radeon_gem_object_init(struct drm_gem_object *obj)
{
- /* we do nothings here */
+ BUG();
+
return 0;
}
void radeon_gem_object_free(struct drm_gem_object *gobj)
{
- struct radeon_bo *robj = gobj->driver_private;
+ struct radeon_bo *robj = gem_to_radeon_bo(gobj);
- gobj->driver_private = NULL;
if (robj) {
radeon_bo_unref(&robj);
}
-
- drm_gem_object_release(gobj);
- kfree(gobj);
}
int radeon_gem_object_create(struct radeon_device *rdev, int size,
@@ -54,36 +51,34 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
bool discardable, bool kernel,
struct drm_gem_object **obj)
{
- struct drm_gem_object *gobj;
struct radeon_bo *robj;
int r;
*obj = NULL;
- gobj = drm_gem_object_alloc(rdev->ddev, size);
- if (!gobj) {
- return -ENOMEM;
- }
/* At least align on page size */
if (alignment < PAGE_SIZE) {
alignment = PAGE_SIZE;
}
- r = radeon_bo_create(rdev, gobj, size, alignment, kernel, initial_domain, &robj);
+ r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, &robj);
if (r) {
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
size, initial_domain, alignment, r);
- drm_gem_object_unreference_unlocked(gobj);
return r;
}
- gobj->driver_private = robj;
- *obj = gobj;
+ *obj = &robj->gem_base;
+
+ mutex_lock(&rdev->gem.mutex);
+ list_add_tail(&robj->list, &rdev->gem.objects);
+ mutex_unlock(&rdev->gem.mutex);
+
return 0;
}
int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
uint64_t *gpu_addr)
{
- struct radeon_bo *robj = obj->driver_private;
+ struct radeon_bo *robj = gem_to_radeon_bo(obj);
int r;
r = radeon_bo_reserve(robj, false);
@@ -96,7 +91,7 @@ int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
void radeon_gem_object_unpin(struct drm_gem_object *obj)
{
- struct radeon_bo *robj = obj->driver_private;
+ struct radeon_bo *robj = gem_to_radeon_bo(obj);
int r;
r = radeon_bo_reserve(robj, false);
@@ -114,7 +109,7 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj,
int r;
/* FIXME: reeimplement */
- robj = gobj->driver_private;
+ robj = gem_to_radeon_bo(gobj);
/* work out where to validate the buffer to */
domain = wdomain;
if (!domain) {
@@ -228,7 +223,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if (gobj == NULL) {
return -ENOENT;
}
- robj = gobj->driver_private;
+ robj = gem_to_radeon_bo(gobj);
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
@@ -236,23 +231,31 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
return r;
}
-int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *filp)
+int radeon_mode_dumb_mmap(struct drm_file *filp,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *offset_p)
{
- struct drm_radeon_gem_mmap *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
- gobj = drm_gem_object_lookup(dev, filp, args->handle);
+ gobj = drm_gem_object_lookup(dev, filp, handle);
if (gobj == NULL) {
return -ENOENT;
}
- robj = gobj->driver_private;
- args->addr_ptr = radeon_bo_mmap_offset(robj);
+ robj = gem_to_radeon_bo(gobj);
+ *offset_p = radeon_bo_mmap_offset(robj);
drm_gem_object_unreference_unlocked(gobj);
return 0;
}
+int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct drm_radeon_gem_mmap *args = data;
+
+ return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
+}
+
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -266,7 +269,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
if (gobj == NULL) {
return -ENOENT;
}
- robj = gobj->driver_private;
+ robj = gem_to_radeon_bo(gobj);
r = radeon_bo_wait(robj, &cur_placement, true);
switch (cur_placement) {
case TTM_PL_VRAM:
@@ -296,7 +299,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
if (gobj == NULL) {
return -ENOENT;
}
- robj = gobj->driver_private;
+ robj = gem_to_radeon_bo(gobj);
r = radeon_bo_wait(robj, NULL, false);
/* callback hw specific functions if any */
if (robj->rdev->asic->ioctl_wait_idle)
@@ -317,7 +320,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL)
return -ENOENT;
- robj = gobj->driver_private;
+ robj = gem_to_radeon_bo(gobj);
r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
drm_gem_object_unreference_unlocked(gobj);
return r;
@@ -335,7 +338,7 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL)
return -ENOENT;
- rbo = gobj->driver_private;
+ rbo = gem_to_radeon_bo(gobj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
goto out;
@@ -345,3 +348,38 @@ out:
drm_gem_object_unreference_unlocked(gobj);
return r;
}
+
+int radeon_mode_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_gem_object *gobj;
+ int r;
+
+ args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
+ args->size = args->pitch * args->height;
+ args->size = ALIGN(args->size, PAGE_SIZE);
+
+ r = radeon_gem_object_create(rdev, args->size, 0,
+ RADEON_GEM_DOMAIN_VRAM,
+ false, ttm_bo_type_device,
+ &gobj);
+ if (r)
+ return -ENOMEM;
+
+ r = drm_gem_handle_create(file_priv, gobj, &args->handle);
+ if (r) {
+ drm_gem_object_unreference_unlocked(gobj);
+ return r;
+ }
+ drm_gem_object_handle_unreference_unlocked(gobj);
+ return 0;
+}
+
+int radeon_mode_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file_priv, handle);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 8387d32caaa7..68a7b22c1fe9 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -58,9 +58,9 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
dev->dev_private = (void *)rdev;
/* update BUS flag */
- if (drm_device_is_agp(dev)) {
+ if (drm_pci_device_is_agp(dev)) {
flags |= RADEON_IS_AGP;
- } else if (drm_device_is_pcie(dev)) {
+ } else if (drm_pci_device_is_pcie(dev)) {
flags |= RADEON_IS_PCIE;
} else {
flags |= RADEON_IS_PCI;
@@ -205,6 +205,17 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
/* return clock value in KHz */
value = rdev->clock.spll.reference_freq * 10;
break;
+ case RADEON_INFO_NUM_BACKENDS:
+ if (rdev->family >= CHIP_CEDAR)
+ value = rdev->config.evergreen.max_backends;
+ else if (rdev->family >= CHIP_RV770)
+ value = rdev->config.rv770.max_backends;
+ else if (rdev->family >= CHIP_R600)
+ value = rdev->config.r600.max_backends;
+ else {
+ return -EINVAL;
+ }
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index cf0638c3b7c7..9ae599eb2e6d 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -415,7 +415,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
/* Pin framebuffer & get tilling informations */
obj = radeon_fb->obj;
- rbo = obj->driver_private;
+ rbo = gem_to_radeon_bo(obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
@@ -520,7 +520,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
if (!atomic && fb && fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(fb);
- rbo = radeon_fb->obj->driver_private;
+ rbo = gem_to_radeon_bo(radeon_fb->obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index a670caaee29e..5067d18d0009 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -676,4 +676,5 @@ void radeon_fb_output_poll_changed(struct radeon_device *rdev);
void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id);
+int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 7d6b8e88f746..976c3b1b1b6e 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -55,6 +55,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
list_del_init(&bo->list);
mutex_unlock(&bo->rdev->gem.mutex);
radeon_bo_clear_surface_reg(bo);
+ drm_gem_object_release(&bo->gem_base);
kfree(bo);
}
@@ -86,7 +87,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
rbo->placement.num_busy_placement = c;
}
-int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
+int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align, bool kernel, u32 domain,
struct radeon_bo **bo_ptr)
{
@@ -96,6 +97,8 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
unsigned long max_size = 0;
int r;
+ size = ALIGN(size, PAGE_SIZE);
+
if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
}
@@ -118,8 +121,13 @@ retry:
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
+ r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
+ if (unlikely(r)) {
+ kfree(bo);
+ return r;
+ }
bo->rdev = rdev;
- bo->gobj = gobj;
+ bo->gem_base.driver_private = NULL;
bo->surface_reg = -1;
INIT_LIST_HEAD(&bo->list);
radeon_ttm_placement_from_domain(bo, domain);
@@ -142,12 +150,9 @@ retry:
return r;
}
*bo_ptr = bo;
- if (gobj) {
- mutex_lock(&bo->rdev->gem.mutex);
- list_add_tail(&bo->list, &rdev->gem.objects);
- mutex_unlock(&bo->rdev->gem.mutex);
- }
+
trace_radeon_bo_create(bo);
+
return 0;
}
@@ -260,7 +265,6 @@ int radeon_bo_evict_vram(struct radeon_device *rdev)
void radeon_bo_force_delete(struct radeon_device *rdev)
{
struct radeon_bo *bo, *n;
- struct drm_gem_object *gobj;
if (list_empty(&rdev->gem.objects)) {
return;
@@ -268,16 +272,14 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
dev_err(rdev->dev, "Userspace still has active objects !\n");
list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
mutex_lock(&rdev->ddev->struct_mutex);
- gobj = bo->gobj;
dev_err(rdev->dev, "%p %p %lu %lu force free\n",
- gobj, bo, (unsigned long)gobj->size,
- *((unsigned long *)&gobj->refcount));
+ &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
+ *((unsigned long *)&bo->gem_base.refcount));
mutex_lock(&bo->rdev->gem.mutex);
list_del_init(&bo->list);
mutex_unlock(&bo->rdev->gem.mutex);
- radeon_bo_unref(&bo);
- gobj->driver_private = NULL;
- drm_gem_object_unreference(gobj);
+ /* this should unref the ttm bo */
+ drm_gem_object_unreference(&bo->gem_base);
mutex_unlock(&rdev->ddev->struct_mutex);
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 22d4c237dea5..7f8e778dba46 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -137,10 +137,9 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
}
extern int radeon_bo_create(struct radeon_device *rdev,
- struct drm_gem_object *gobj, unsigned long size,
- int byte_align,
- bool kernel, u32 domain,
- struct radeon_bo **bo_ptr);
+ unsigned long size, int byte_align,
+ bool kernel, u32 domain,
+ struct radeon_bo **bo_ptr);
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
extern void radeon_bo_kunmap(struct radeon_bo *bo);
extern void radeon_bo_unref(struct radeon_bo **bo);
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 06e79822a2bf..bbc9cd823334 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -151,7 +151,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
/* 64 dwords should be enough for fence too */
r = radeon_ring_lock(rdev, 64);
if (r) {
- DRM_ERROR("radeon: scheduling IB failled (%d).\n", r);
+ DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
return r;
}
radeon_ring_ib_execute(rdev, ib);
@@ -175,7 +175,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
return 0;
INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
/* Allocate 1M object buffer */
- r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
+ r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
&rdev->ib_pool.robj);
if (r) {
@@ -332,7 +332,7 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
rdev->cp.ring_size = ring_size;
/* Allocate ring buffer */
if (rdev->cp.ring_obj == NULL) {
- r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, PAGE_SIZE, true,
+ r = radeon_bo_create(rdev, rdev->cp.ring_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT,
&rdev->cp.ring_obj);
if (r) {
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 5b44f652145c..dee4a0c1b4b2 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -52,7 +52,7 @@ void radeon_test_moves(struct radeon_device *rdev)
goto out_cleanup;
}
- r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&vram_obj);
if (r) {
DRM_ERROR("Failed to create VRAM object\n");
@@ -71,7 +71,7 @@ void radeon_test_moves(struct radeon_device *rdev)
void **gtt_start, **gtt_end;
void **vram_start, **vram_end;
- r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true,
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, gtt_obj + i);
if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index e5b2cf10cbf4..e446979e0e0a 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -529,7 +529,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
DRM_ERROR("Failed initializing VRAM heap.\n");
return r;
}
- r = radeon_bo_create(rdev, NULL, 256 * 1024, PAGE_SIZE, true,
+ r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM,
&rdev->stollen_vga_memory);
if (r) {
@@ -647,6 +647,7 @@ struct radeon_ttm_backend {
unsigned long num_pages;
struct page **pages;
struct page *dummy_read_page;
+ dma_addr_t *dma_addrs;
bool populated;
bool bound;
unsigned offset;
@@ -655,12 +656,14 @@ struct radeon_ttm_backend {
static int radeon_ttm_backend_populate(struct ttm_backend *backend,
unsigned long num_pages,
struct page **pages,
- struct page *dummy_read_page)
+ struct page *dummy_read_page,
+ dma_addr_t *dma_addrs)
{
struct radeon_ttm_backend *gtt;
gtt = container_of(backend, struct radeon_ttm_backend, backend);
gtt->pages = pages;
+ gtt->dma_addrs = dma_addrs;
gtt->num_pages = num_pages;
gtt->dummy_read_page = dummy_read_page;
gtt->populated = true;
@@ -673,6 +676,7 @@ static void radeon_ttm_backend_clear(struct ttm_backend *backend)
gtt = container_of(backend, struct radeon_ttm_backend, backend);
gtt->pages = NULL;
+ gtt->dma_addrs = NULL;
gtt->num_pages = 0;
gtt->dummy_read_page = NULL;
gtt->populated = false;
@@ -693,7 +697,7 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend,
gtt->num_pages, bo_mem, backend);
}
r = radeon_gart_bind(gtt->rdev, gtt->offset,
- gtt->num_pages, gtt->pages);
+ gtt->num_pages, gtt->pages, gtt->dma_addrs);
if (r) {
DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
gtt->num_pages, gtt->offset);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index c76283d9eb3d..aa6a66eeb4ec 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -412,12 +412,12 @@ static int rs400_startup(struct radeon_device *rdev)
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
- dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r = r100_ib_init(rdev);
if (r) {
- dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 5afe294ed51f..26807bf87473 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -866,12 +866,12 @@ static int rs600_startup(struct radeon_device *rdev)
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
- dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r = r100_ib_init(rdev);
if (r) {
- dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 6638c8e4c81b..9c2dc966c3ff 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -628,12 +628,12 @@ static int rs690_startup(struct radeon_device *rdev)
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
- dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r = r100_ib_init(rdev);
if (r) {
- dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 64b57af93714..6613ee9ecca3 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -398,12 +398,12 @@ static int rv515_startup(struct radeon_device *rdev)
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
- dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r = r100_ib_init(rdev);
if (r) {
- dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index d8ba67690656..5007b54f914f 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1003,7 +1003,7 @@ static int rv770_vram_scratch_init(struct radeon_device *rdev)
u64 gpu_addr;
if (rdev->vram_scratch.robj == NULL) {
- r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE,
+ r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&rdev->vram_scratch.robj);
if (r) {
@@ -1210,7 +1210,7 @@ int rv770_resume(struct radeon_device *rdev)
r = r600_ib_test(rdev);
if (r) {
- DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
return r;
}
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index fa64d25d4248..6464490b240b 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -55,11 +55,6 @@ static struct drm_driver driver = {
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
-
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -68,15 +63,20 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver savage_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
static int __init savage_init(void)
{
driver.num_ioctls = savage_max_ioctl;
- return drm_init(&driver);
+ return drm_pci_init(&driver, &savage_pci_driver);
}
static void __exit savage_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &savage_pci_driver);
}
module_init(savage_init);
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 4caf5d01cfd3..46d5be6e97e5 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -82,10 +82,6 @@ static struct drm_driver driver = {
.fasync = drm_fasync,
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -95,15 +91,20 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver sis_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
static int __init sis_init(void)
{
driver.num_ioctls = sis_max_ioctl;
- return drm_init(&driver);
+ return drm_pci_init(&driver, &sis_pci_driver);
}
static void __exit sis_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &sis_pci_driver);
}
module_init(sis_init);
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index b70fa91d761a..8bf98810a8d6 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -52,10 +52,6 @@ static struct drm_driver driver = {
.fasync = drm_fasync,
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -65,14 +61,19 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver tdfx_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
static int __init tdfx_init(void)
{
- return drm_init(&driver);
+ return drm_pci_init(&driver, &tdfx_pci_driver);
}
static void __exit tdfx_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &tdfx_pci_driver);
}
module_init(tdfx_init);
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index f999e36f30b4..1c4a72f681c1 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -47,7 +47,8 @@ struct ttm_agp_backend {
static int ttm_agp_populate(struct ttm_backend *backend,
unsigned long num_pages, struct page **pages,
- struct page *dummy_read_page)
+ struct page *dummy_read_page,
+ dma_addr_t *dma_addrs)
{
struct ttm_agp_backend *agp_be =
container_of(backend, struct ttm_agp_backend, backend);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index af61fc29e843..0b6a55ac2f87 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -406,11 +406,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
}
if (bo->mem.mem_type == TTM_PL_SYSTEM) {
+ if (bdev->driver->move_notify)
+ bdev->driver->move_notify(bo, mem);
bo->mem = *mem;
mem->mm_node = NULL;
goto moved;
}
-
}
if (bdev->driver->move_notify)
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index b1e02fffd3cc..737a2a2e46a5 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -38,6 +38,7 @@
#include <linux/mm.h>
#include <linux/seq_file.h> /* for seq_printf */
#include <linux/slab.h>
+#include <linux/dma-mapping.h>
#include <asm/atomic.h>
@@ -662,7 +663,8 @@ out:
* cached pages.
*/
int ttm_get_pages(struct list_head *pages, int flags,
- enum ttm_caching_state cstate, unsigned count)
+ enum ttm_caching_state cstate, unsigned count,
+ dma_addr_t *dma_address)
{
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
struct page *p = NULL;
@@ -681,14 +683,22 @@ int ttm_get_pages(struct list_head *pages, int flags,
gfp_flags |= GFP_HIGHUSER;
for (r = 0; r < count; ++r) {
- p = alloc_page(gfp_flags);
+ if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) {
+ void *addr;
+ addr = dma_alloc_coherent(NULL, PAGE_SIZE,
+ &dma_address[r],
+ gfp_flags);
+ if (addr == NULL)
+ return -ENOMEM;
+ p = virt_to_page(addr);
+ } else
+ p = alloc_page(gfp_flags);
if (!p) {
printk(KERN_ERR TTM_PFX
"Unable to allocate page.");
return -ENOMEM;
}
-
list_add(&p->lru, pages);
}
return 0;
@@ -720,7 +730,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
printk(KERN_ERR TTM_PFX
"Failed to allocate extra pages "
"for large request.");
- ttm_put_pages(pages, 0, flags, cstate);
+ ttm_put_pages(pages, 0, flags, cstate, NULL);
return r;
}
}
@@ -731,17 +741,29 @@ int ttm_get_pages(struct list_head *pages, int flags,
/* Put all pages in pages list to correct pool to wait for reuse */
void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
- enum ttm_caching_state cstate)
+ enum ttm_caching_state cstate, dma_addr_t *dma_address)
{
unsigned long irq_flags;
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
struct page *p, *tmp;
+ unsigned r;
if (pool == NULL) {
/* No pool for this memory type so free the pages */
+ r = page_count-1;
list_for_each_entry_safe(p, tmp, pages, lru) {
- __free_page(p);
+ if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) {
+ void *addr = page_address(p);
+ WARN_ON(!addr || !dma_address[r]);
+ if (addr)
+ dma_free_coherent(NULL, PAGE_SIZE,
+ addr,
+ dma_address[r]);
+ dma_address[r] = 0;
+ } else
+ __free_page(p);
+ r--;
}
/* Make the pages list empty */
INIT_LIST_HEAD(pages);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index af789dc869b9..86d5b1745a45 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -49,12 +49,16 @@ static int ttm_tt_swapin(struct ttm_tt *ttm);
static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
{
ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages));
+ ttm->dma_address = drm_calloc_large(ttm->num_pages,
+ sizeof(*ttm->dma_address));
}
static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
{
drm_free_large(ttm->pages);
ttm->pages = NULL;
+ drm_free_large(ttm->dma_address);
+ ttm->dma_address = NULL;
}
static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
@@ -105,7 +109,8 @@ static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
INIT_LIST_HEAD(&h);
- ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1);
+ ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1,
+ &ttm->dma_address[index]);
if (ret != 0)
return NULL;
@@ -164,7 +169,7 @@ int ttm_tt_populate(struct ttm_tt *ttm)
}
be->func->populate(be, ttm->num_pages, ttm->pages,
- ttm->dummy_read_page);
+ ttm->dummy_read_page, ttm->dma_address);
ttm->state = tt_unbound;
return 0;
}
@@ -298,7 +303,8 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
count++;
}
}
- ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state);
+ ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state,
+ ttm->dma_address);
ttm->state = tt_unpopulated;
ttm->first_himem_page = ttm->num_pages;
ttm->last_lomem_page = -1;
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index e1ff4e7a6eb0..920a55214bcf 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -62,10 +62,6 @@ static struct drm_driver driver = {
.fasync = drm_fasync,
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -75,16 +71,21 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver via_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
static int __init via_init(void)
{
driver.num_ioctls = via_max_ioctl;
via_init_command_verifier();
- return drm_init(&driver);
+ return drm_pci_init(&driver, &via_pci_driver);
}
static void __exit via_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &via_pci_driver);
}
module_init(via_init);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 80bc37b274e7..87e43e0733bf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -102,7 +102,8 @@ struct vmw_ttm_backend {
static int vmw_ttm_populate(struct ttm_backend *backend,
unsigned long num_pages, struct page **pages,
- struct page *dummy_read_page)
+ struct page *dummy_read_page,
+ dma_addr_t *dma_addrs)
{
struct vmw_ttm_backend *vmw_be =
container_of(backend, struct vmw_ttm_backend, backend);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 10ca97ee0206..96949b93d920 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -909,15 +909,6 @@ static struct drm_driver driver = {
#endif
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = VMWGFX_DRIVER_NAME,
- .id_table = vmw_pci_id_list,
- .probe = vmw_probe,
- .remove = vmw_remove,
- .driver = {
- .pm = &vmw_pm_ops
- }
- },
.name = VMWGFX_DRIVER_NAME,
.desc = VMWGFX_DRIVER_DESC,
.date = VMWGFX_DRIVER_DATE,
@@ -926,6 +917,16 @@ static struct drm_driver driver = {
.patchlevel = VMWGFX_DRIVER_PATCHLEVEL
};
+static struct pci_driver vmw_pci_driver = {
+ .name = VMWGFX_DRIVER_NAME,
+ .id_table = vmw_pci_id_list,
+ .probe = vmw_probe,
+ .remove = vmw_remove,
+ .driver = {
+ .pm = &vmw_pm_ops
+ }
+};
+
static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
return drm_get_pci_dev(pdev, ent, &driver);
@@ -934,7 +935,7 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
static int __init vmwgfx_init(void)
{
int ret;
- ret = drm_init(&driver);
+ ret = drm_pci_init(&driver, &vmw_pci_driver);
if (ret)
DRM_ERROR("Failed initializing DRM.\n");
return ret;
@@ -942,7 +943,7 @@ static int __init vmwgfx_init(void)
static void __exit vmwgfx_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &vmw_pci_driver);
}
module_init(vmwgfx_init);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 29113c9b26a8..b3a2cd5118d7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -345,7 +345,7 @@ static enum drm_connector_status
return connector_status_disconnected;
}
-static struct drm_display_mode vmw_ldu_connector_builtin[] = {
+static const struct drm_display_mode vmw_ldu_connector_builtin[] = {
/* 640x480@60Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 489, 492, 525, 0,
@@ -429,7 +429,6 @@ static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
struct drm_device *dev = connector->dev;
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_display_mode *mode = NULL;
- struct drm_display_mode *bmode;
struct drm_display_mode prefmode = { DRM_MODE("preferred",
DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -459,6 +458,8 @@ static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
}
for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) {
+ const struct drm_display_mode *bmode;
+
bmode = &vmw_ldu_connector_builtin[i];
if (bmode->hdisplay > max_width ||
bmode->vdisplay > max_height)
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 2560f01c1a63..4b88bbe9ed07 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -180,6 +180,14 @@ config HID_EZKEY
---help---
Support for Ezkey BTC 8193 keyboard.
+config HID_KEYTOUCH
+ tristate "Keyoutch HID devices"
+ depends on USB_HID
+ ---help---
+ Support for Keytouch HID devices not fully compliant with
+ the specification. Currently supported:
+ - Keytouch IEC 60945
+
config HID_KYE
tristate "Kye/Genius Ergo Mouse" if EXPERT
depends on USB_HID
@@ -218,6 +226,12 @@ config HID_KENSINGTON
---help---
Support for Kensington Slimblade Trackball.
+config HID_LCPOWER
+ tristate "LC-Power"
+ depends on USB_HID
+ ---help---
+ Support for LC-Power RC1000MCE RF remote control.
+
config HID_LOGITECH
tristate "Logitech devices" if EXPERT
depends on USB_HID
@@ -304,6 +318,7 @@ config HID_MULTITOUCH
Say Y here if you have one of the following devices:
- Cypress TrueTouch panels
- Hanvon dual touch panels
+ - IrTouch Infrared USB panels
- Pixcir dual touch panels
- 'Sensing Win7-TwoFinger' panel by GeneralTouch
@@ -417,10 +432,22 @@ config HID_ROCCAT
Say Y here if you have a Roccat mouse or keyboard and want OSD or
macro execution support.
+config HID_ROCCAT_COMMON
+ tristate
+
+config HID_ROCCAT_ARVO
+ tristate "Roccat Arvo keyboard support"
+ depends on USB_HID
+ select HID_ROCCAT
+ select HID_ROCCAT_COMMON
+ ---help---
+ Support for Roccat Arvo keyboard.
+
config HID_ROCCAT_KONE
tristate "Roccat Kone Mouse support"
depends on USB_HID
select HID_ROCCAT
+ select HID_ROCCAT_COMMON
---help---
Support for Roccat Kone mouse.
@@ -428,13 +455,23 @@ config HID_ROCCAT_KONEPLUS
tristate "Roccat Kone[+] mouse support"
depends on USB_HID
select HID_ROCCAT
+ select HID_ROCCAT_COMMON
---help---
Support for Roccat Kone[+] mouse.
+config HID_ROCCAT_KOVAPLUS
+ tristate "Roccat Kova[+] mouse support"
+ depends on USB_HID
+ select HID_ROCCAT
+ select HID_ROCCAT_COMMON
+ ---help---
+ Support for Roccat Kova[+] mouse.
+
config HID_ROCCAT_PYRA
tristate "Roccat Pyra mouse support"
depends on USB_HID
select HID_ROCCAT
+ select HID_ROCCAT_COMMON
---help---
Support for Roccat Pyra mouse.
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 6efc2a0370ad..04496abc7b76 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -41,7 +41,9 @@ obj-$(CONFIG_HID_ELECOM) += hid-elecom.o
obj-$(CONFIG_HID_EZKEY) += hid-ezkey.o
obj-$(CONFIG_HID_GYRATION) += hid-gyration.o
obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o
+obj-$(CONFIG_HID_KEYTOUCH) += hid-keytouch.o
obj-$(CONFIG_HID_KYE) += hid-kye.o
+obj-$(CONFIG_HID_LCPOWER) += hid-lcpower.o
obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o
obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o
obj-$(CONFIG_HID_MICROSOFT) += hid-microsoft.o
@@ -56,8 +58,11 @@ obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o
obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o
obj-$(CONFIG_HID_PICOLCD) += hid-picolcd.o
obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o
+obj-$(CONFIG_HID_ROCCAT_COMMON) += hid-roccat-common.o
+obj-$(CONFIG_HID_ROCCAT_ARVO) += hid-roccat-arvo.o
obj-$(CONFIG_HID_ROCCAT_KONE) += hid-roccat-kone.o
obj-$(CONFIG_HID_ROCCAT_KONEPLUS) += hid-roccat-koneplus.o
+obj-$(CONFIG_HID_ROCCAT_KOVAPLUS) += hid-roccat-kovaplus.o
obj-$(CONFIG_HID_ROCCAT_PYRA) += hid-roccat-pyra.o
obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o
obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index d678cf3d33d5..7d7034af4ca0 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1159,6 +1159,32 @@ static bool hid_hiddev(struct hid_device *hdev)
return !!hid_match_id(hdev, hid_hiddev_list);
}
+
+static ssize_t
+read_report_descriptor(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+
+ if (off >= hdev->rsize)
+ return 0;
+
+ if (off + count > hdev->rsize)
+ count = hdev->rsize - off;
+
+ memcpy(buf, hdev->rdesc + off, count);
+
+ return count;
+}
+
+static struct bin_attribute dev_bin_attr_report_desc = {
+ .attr = { .name = "report_descriptor", .mode = 0444 },
+ .read = read_report_descriptor,
+ .size = HID_MAX_DESCRIPTOR_SIZE,
+};
+
int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
{
static const char *types[] = { "Device", "Pointer", "Mouse", "Device",
@@ -1169,6 +1195,7 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
char buf[64];
unsigned int i;
int len;
+ int ret;
if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE)
connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV);
@@ -1230,6 +1257,11 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
bus = "<UNKNOWN>";
}
+ ret = device_create_bin_file(&hdev->dev, &dev_bin_attr_report_desc);
+ if (ret)
+ hid_warn(hdev,
+ "can't create sysfs report descriptor attribute err: %d\n", ret);
+
hid_info(hdev, "%s: %s HID v%x.%02x %s [%s] on %s\n",
buf, bus, hdev->version >> 8, hdev->version & 0xff,
type, hdev->name, hdev->phys);
@@ -1240,6 +1272,7 @@ EXPORT_SYMBOL_GPL(hid_connect);
void hid_disconnect(struct hid_device *hdev)
{
+ device_remove_bin_file(&hdev->dev, &dev_bin_attr_report_desc);
if (hdev->claimed & HID_CLAIMED_INPUT)
hidinput_disconnect(hdev);
if (hdev->claimed & HID_CLAIMED_HIDDEV)
@@ -1345,9 +1378,12 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HANVON, USB_DEVICE_ID_HANVON_MULTITOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_IRTOUCHSYSTEMS, USB_DEVICE_ID_IRTOUCH_INFRARED_USB) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
@@ -1405,7 +1441,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
diff --git a/drivers/hid/hid-gyration.c b/drivers/hid/hid-gyration.c
index 3975e039c3dd..e88b951cd10d 100644
--- a/drivers/hid/hid-gyration.c
+++ b/drivers/hid/hid-gyration.c
@@ -43,6 +43,11 @@ static int gyration_input_mapping(struct hid_device *hdev, struct hid_input *hi,
case 0x048: gy_map_key_clear(KEY_MEDIA); break;
case 0x049: gy_map_key_clear(KEY_CAMERA); break;
case 0x04a: gy_map_key_clear(KEY_VIDEO); break;
+ case 0x05a: gy_map_key_clear(KEY_TEXT); break;
+ case 0x05b: gy_map_key_clear(KEY_RED); break;
+ case 0x05c: gy_map_key_clear(KEY_GREEN); break;
+ case 0x05d: gy_map_key_clear(KEY_YELLOW); break;
+ case 0x05e: gy_map_key_clear(KEY_BLUE); break;
default:
return 0;
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 92a0d61a7379..f43525218fa2 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -333,6 +333,9 @@
#define USB_VENDOR_ID_IMATION 0x0718
#define USB_DEVICE_ID_DISC_STAKKA 0xd000
+#define USB_VENDOR_ID_IRTOUCHSYSTEMS 0x6615
+#define USB_DEVICE_ID_IRTOUCH_INFRARED_USB 0x0070
+
#define USB_VENDOR_ID_JESS 0x0c45
#define USB_DEVICE_ID_JESS_YUREX 0x1010
@@ -345,6 +348,9 @@
#define USB_VENDOR_ID_KWORLD 0x1b80
#define USB_DEVICE_ID_KWORLD_RADIO_FM700 0xd700
+#define USB_VENDOR_ID_KEYTOUCH 0x0926
+#define USB_DEVICE_ID_KEYTOUCH_IEC 0x3333
+
#define USB_VENDOR_ID_KYE 0x0458
#define USB_DEVICE_ID_KYE_ERGO_525V 0x0087
#define USB_DEVICE_ID_KYE_GPEN_560 0x5003
@@ -352,6 +358,9 @@
#define USB_VENDOR_ID_LABTEC 0x1020
#define USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD 0x0006
+#define USB_VENDOR_ID_LCPOWER 0x1241
+#define USB_DEVICE_ID_LCPOWER_LC1000 0xf767
+
#define USB_VENDOR_ID_LD 0x0f11
#define USB_DEVICE_ID_LD_CASSY 0x1000
#define USB_DEVICE_ID_LD_POCKETCASSY 0x1010
@@ -496,8 +505,10 @@
#define USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN 0x3001
#define USB_VENDOR_ID_ROCCAT 0x1e7d
+#define USB_DEVICE_ID_ROCCAT_ARVO 0x30d4
#define USB_DEVICE_ID_ROCCAT_KONE 0x2ced
#define USB_DEVICE_ID_ROCCAT_KONEPLUS 0x2d51
+#define USB_DEVICE_ID_ROCCAT_KOVAPLUS 0x2d50
#define USB_DEVICE_ID_ROCCAT_PYRA_WIRED 0x2c24
#define USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS 0x2cf6
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 7f552bfad32c..33dde8724e02 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -290,14 +290,6 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
goto ignore;
}
- if (field->report_type == HID_FEATURE_REPORT) {
- if (device->driver->feature_mapping) {
- device->driver->feature_mapping(device, hidinput, field,
- usage);
- }
- goto ignore;
- }
-
if (device->driver->input_mapping) {
int ret = device->driver->input_mapping(device, hidinput, field,
usage, &bit, &max);
@@ -835,6 +827,24 @@ static void hidinput_close(struct input_dev *dev)
hid_hw_close(hid);
}
+static void report_features(struct hid_device *hid)
+{
+ struct hid_driver *drv = hid->driver;
+ struct hid_report_enum *rep_enum;
+ struct hid_report *rep;
+ int i, j;
+
+ if (!drv->feature_mapping)
+ return;
+
+ rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
+ list_for_each_entry(rep, &rep_enum->report_list, list)
+ for (i = 0; i < rep->maxfield; i++)
+ for (j = 0; j < rep->field[i]->maxusage; j++)
+ drv->feature_mapping(hid, rep->field[i],
+ rep->field[i]->usage + j);
+}
+
/*
* Register the input device; print a message.
* Configure the input layer interface
@@ -863,7 +873,9 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
return -1;
}
- for (k = HID_INPUT_REPORT; k <= HID_FEATURE_REPORT; k++) {
+ report_features(hid);
+
+ for (k = HID_INPUT_REPORT; k <= HID_OUTPUT_REPORT; k++) {
if (k == HID_OUTPUT_REPORT &&
hid->quirks & HID_QUIRK_SKIP_OUTPUT_REPORTS)
continue;
@@ -888,8 +900,8 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
hid->ll_driver->hidinput_input_event;
input_dev->open = hidinput_open;
input_dev->close = hidinput_close;
- input_dev->setkeycode_new = hidinput_setkeycode;
- input_dev->getkeycode_new = hidinput_getkeycode;
+ input_dev->setkeycode = hidinput_setkeycode;
+ input_dev->getkeycode = hidinput_getkeycode;
input_dev->name = hid->name;
input_dev->phys = hid->phys;
@@ -928,6 +940,7 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
return 0;
out_cleanup:
+ list_del(&hidinput->list);
input_free_device(hidinput->input);
kfree(hidinput);
out_unwind:
diff --git a/drivers/hid/hid-keytouch.c b/drivers/hid/hid-keytouch.c
new file mode 100644
index 000000000000..07cd825f6f01
--- /dev/null
+++ b/drivers/hid/hid-keytouch.c
@@ -0,0 +1,66 @@
+/*
+ * HID driver for Keytouch devices not fully compliant with HID standard
+ *
+ * Copyright (c) 2011 Jiri Kosina
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+/* Replace the broken report descriptor of this device with rather
+ * a default one */
+static __u8 keytouch_fixed_rdesc[] = {
+0x05, 0x01, 0x09, 0x06, 0xa1, 0x01, 0x05, 0x07, 0x19, 0xe0, 0x29, 0xe7, 0x15,
+0x00, 0x25, 0x01, 0x75, 0x01, 0x95, 0x08, 0x81, 0x02, 0x95, 0x01, 0x75, 0x08,
+0x81, 0x01, 0x95, 0x03, 0x75, 0x01, 0x05, 0x08, 0x19, 0x01, 0x29, 0x03, 0x91,
+0x02, 0x95, 0x05, 0x75, 0x01, 0x91, 0x01, 0x95, 0x06, 0x75, 0x08, 0x15, 0x00,
+0x26, 0xff, 0x00, 0x05, 0x07, 0x19, 0x00, 0x2a, 0xff, 0x00, 0x81, 0x00, 0xc0
+};
+
+static __u8 *keytouch_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ hid_info(hdev, "fixing up Keytouch IEC report descriptor\n");
+
+ rdesc = keytouch_fixed_rdesc;
+ *rsize = sizeof(keytouch_fixed_rdesc);
+
+ return rdesc;
+}
+
+static const struct hid_device_id keytouch_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, keytouch_devices);
+
+static struct hid_driver keytouch_driver = {
+ .name = "keytouch",
+ .id_table = keytouch_devices,
+ .report_fixup = keytouch_report_fixup,
+};
+
+static int __init keytouch_init(void)
+{
+ return hid_register_driver(&keytouch_driver);
+}
+
+static void __exit keytouch_exit(void)
+{
+ hid_unregister_driver(&keytouch_driver);
+}
+
+module_init(keytouch_init);
+module_exit(keytouch_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jiri Kosina");
diff --git a/drivers/hid/hid-lcpower.c b/drivers/hid/hid-lcpower.c
new file mode 100644
index 000000000000..c4fe9bd095b7
--- /dev/null
+++ b/drivers/hid/hid-lcpower.c
@@ -0,0 +1,70 @@
+/*
+ * HID driver for LC Power Model RC1000MCE
+ *
+ * Copyright (c) 2011 Chris Schlund
+ * based on hid-topseed module
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+#define ts_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \
+ EV_KEY, (c))
+static int ts_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ if ((usage->hid & HID_USAGE_PAGE) != 0x0ffbc0000)
+ return 0;
+
+ switch (usage->hid & HID_USAGE) {
+ case 0x046: ts_map_key_clear(KEY_YELLOW); break;
+ case 0x047: ts_map_key_clear(KEY_GREEN); break;
+ case 0x049: ts_map_key_clear(KEY_BLUE); break;
+ case 0x04a: ts_map_key_clear(KEY_RED); break;
+ case 0x00d: ts_map_key_clear(KEY_HOME); break;
+ case 0x025: ts_map_key_clear(KEY_TV); break;
+ case 0x048: ts_map_key_clear(KEY_VCR); break;
+ case 0x024: ts_map_key_clear(KEY_MENU); break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+static const struct hid_device_id ts_devices[] = {
+ { HID_USB_DEVICE( USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, ts_devices);
+
+static struct hid_driver ts_driver = {
+ .name = "LC RC1000MCE",
+ .id_table = ts_devices,
+ .input_mapping = ts_input_mapping,
+};
+
+static int __init ts_init(void)
+{
+ return hid_register_driver(&ts_driver);
+}
+
+static void __exit ts_exit(void)
+{
+ hid_unregister_driver(&ts_driver);
+}
+
+module_init(ts_init);
+module_exit(ts_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 07d3183fdde5..65e7f20c4b22 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -24,6 +24,7 @@
MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>");
+MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>");
MODULE_DESCRIPTION("HID multitouch panels");
MODULE_LICENSE("GPL");
@@ -65,10 +66,10 @@ struct mt_class {
};
/* classes of device behavior */
-#define MT_CLS_DEFAULT 1
-#define MT_CLS_DUAL1 2
-#define MT_CLS_DUAL2 3
-#define MT_CLS_CYPRESS 4
+#define MT_CLS_DEFAULT 1
+#define MT_CLS_DUAL_INRANGE_CONTACTID 2
+#define MT_CLS_DUAL_INRANGE_CONTACTNUMBER 3
+#define MT_CLS_CYPRESS 4
/*
* these device-dependent functions determine what slot corresponds
@@ -104,13 +105,13 @@ static int find_slot_from_contactid(struct mt_device *td)
struct mt_class mt_classes[] = {
{ .name = MT_CLS_DEFAULT,
- .quirks = MT_QUIRK_VALID_IS_INRANGE,
+ .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP,
.maxcontacts = 10 },
- { .name = MT_CLS_DUAL1,
+ { .name = MT_CLS_DUAL_INRANGE_CONTACTID,
.quirks = MT_QUIRK_VALID_IS_INRANGE |
MT_QUIRK_SLOT_IS_CONTACTID,
.maxcontacts = 2 },
- { .name = MT_CLS_DUAL2,
+ { .name = MT_CLS_DUAL_INRANGE_CONTACTNUMBER,
.quirks = MT_QUIRK_VALID_IS_INRANGE |
MT_QUIRK_SLOT_IS_CONTACTNUMBER,
.maxcontacts = 2 },
@@ -122,7 +123,7 @@ struct mt_class mt_classes[] = {
{ }
};
-static void mt_feature_mapping(struct hid_device *hdev, struct hid_input *hi,
+static void mt_feature_mapping(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage)
{
if (usage->hid == HID_DG_INPUTMODE) {
@@ -466,15 +467,20 @@ static const struct hid_device_id mt_devices[] = {
USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
/* GeneralTouch panel */
- { .driver_data = MT_CLS_DUAL2,
+ { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTNUMBER,
HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS) },
+ /* IRTOUCH panels */
+ { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID,
+ HID_USB_DEVICE(USB_VENDOR_ID_IRTOUCHSYSTEMS,
+ USB_DEVICE_ID_IRTOUCH_INFRARED_USB) },
+
/* PixCir-based panels */
- { .driver_data = MT_CLS_DUAL1,
+ { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID,
HID_USB_DEVICE(USB_VENDOR_ID_HANVON,
USB_DEVICE_ID_HANVON_MULTITOUCH) },
- { .driver_data = MT_CLS_DUAL1,
+ { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID,
HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH) },
diff --git a/drivers/hid/hid-roccat-arvo.c b/drivers/hid/hid-roccat-arvo.c
new file mode 100644
index 000000000000..2307471d96dc
--- /dev/null
+++ b/drivers/hid/hid-roccat-arvo.c
@@ -0,0 +1,450 @@
+/*
+ * Roccat Arvo driver for Linux
+ *
+ * Copyright (c) 2011 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ * Roccat Arvo is a gamer keyboard with 5 macro keys that can be configured in
+ * 5 profiles.
+ */
+
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/hid-roccat.h>
+#include "hid-ids.h"
+#include "hid-roccat-common.h"
+#include "hid-roccat-arvo.h"
+
+static struct class *arvo_class;
+
+static ssize_t arvo_sysfs_show_mode_key(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct arvo_device *arvo =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
+ struct usb_device *usb_dev =
+ interface_to_usbdev(to_usb_interface(dev->parent->parent));
+ struct arvo_mode_key temp_buf;
+ int retval;
+
+ mutex_lock(&arvo->arvo_lock);
+ retval = roccat_common_receive(usb_dev, ARVO_USB_COMMAND_MODE_KEY,
+ &temp_buf, sizeof(struct arvo_mode_key));
+ mutex_unlock(&arvo->arvo_lock);
+ if (retval)
+ return retval;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", temp_buf.state);
+}
+
+static ssize_t arvo_sysfs_set_mode_key(struct device *dev,
+ struct device_attribute *attr, char const *buf, size_t size)
+{
+ struct arvo_device *arvo =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
+ struct usb_device *usb_dev =
+ interface_to_usbdev(to_usb_interface(dev->parent->parent));
+ struct arvo_mode_key temp_buf;
+ unsigned long state;
+ int retval;
+
+ retval = strict_strtoul(buf, 10, &state);
+ if (retval)
+ return retval;
+
+ temp_buf.command = ARVO_COMMAND_MODE_KEY;
+ temp_buf.state = state;
+
+ mutex_lock(&arvo->arvo_lock);
+ retval = roccat_common_send(usb_dev, ARVO_USB_COMMAND_MODE_KEY,
+ &temp_buf, sizeof(struct arvo_mode_key));
+ mutex_unlock(&arvo->arvo_lock);
+ if (retval)
+ return retval;
+
+ return size;
+}
+
+static ssize_t arvo_sysfs_show_key_mask(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct arvo_device *arvo =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
+ struct usb_device *usb_dev =
+ interface_to_usbdev(to_usb_interface(dev->parent->parent));
+ struct arvo_key_mask temp_buf;
+ int retval;
+
+ mutex_lock(&arvo->arvo_lock);
+ retval = roccat_common_receive(usb_dev, ARVO_USB_COMMAND_KEY_MASK,
+ &temp_buf, sizeof(struct arvo_key_mask));
+ mutex_unlock(&arvo->arvo_lock);
+ if (retval)
+ return retval;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", temp_buf.key_mask);
+}
+
+static ssize_t arvo_sysfs_set_key_mask(struct device *dev,
+ struct device_attribute *attr, char const *buf, size_t size)
+{
+ struct arvo_device *arvo =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
+ struct usb_device *usb_dev =
+ interface_to_usbdev(to_usb_interface(dev->parent->parent));
+ struct arvo_key_mask temp_buf;
+ unsigned long key_mask;
+ int retval;
+
+ retval = strict_strtoul(buf, 10, &key_mask);
+ if (retval)
+ return retval;
+
+ temp_buf.command = ARVO_COMMAND_KEY_MASK;
+ temp_buf.key_mask = key_mask;
+
+ mutex_lock(&arvo->arvo_lock);
+ retval = roccat_common_send(usb_dev, ARVO_USB_COMMAND_KEY_MASK,
+ &temp_buf, sizeof(struct arvo_key_mask));
+ mutex_unlock(&arvo->arvo_lock);
+ if (retval)
+ return retval;
+
+ return size;
+}
+
+/* retval is 1-5 on success, < 0 on error */
+static int arvo_get_actual_profile(struct usb_device *usb_dev)
+{
+ struct arvo_actual_profile temp_buf;
+ int retval;
+
+ retval = roccat_common_receive(usb_dev, ARVO_USB_COMMAND_ACTUAL_PROFILE,
+ &temp_buf, sizeof(struct arvo_actual_profile));
+
+ if (retval)
+ return retval;
+
+ return temp_buf.actual_profile;
+}
+
+static ssize_t arvo_sysfs_show_actual_profile(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct arvo_device *arvo =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", arvo->actual_profile);
+}
+
+static ssize_t arvo_sysfs_set_actual_profile(struct device *dev,
+ struct device_attribute *attr, char const *buf, size_t size)
+{
+ struct arvo_device *arvo =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
+ struct usb_device *usb_dev =
+ interface_to_usbdev(to_usb_interface(dev->parent->parent));
+ struct arvo_actual_profile temp_buf;
+ unsigned long profile;
+ int retval;
+
+ retval = strict_strtoul(buf, 10, &profile);
+ if (retval)
+ return retval;
+
+ temp_buf.command = ARVO_COMMAND_ACTUAL_PROFILE;
+ temp_buf.actual_profile = profile;
+
+ mutex_lock(&arvo->arvo_lock);
+ retval = roccat_common_send(usb_dev, ARVO_USB_COMMAND_ACTUAL_PROFILE,
+ &temp_buf, sizeof(struct arvo_actual_profile));
+ if (!retval) {
+ arvo->actual_profile = profile;
+ retval = size;
+ }
+ mutex_unlock(&arvo->arvo_lock);
+ return retval;
+}
+
+static ssize_t arvo_sysfs_write(struct file *fp,
+ struct kobject *kobj, void const *buf,
+ loff_t off, size_t count, size_t real_size, uint command)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct arvo_device *arvo = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+
+ if (off != 0 || count != real_size)
+ return -EINVAL;
+
+ mutex_lock(&arvo->arvo_lock);
+ retval = roccat_common_send(usb_dev, command, buf, real_size);
+ mutex_unlock(&arvo->arvo_lock);
+
+ return (retval ? retval : real_size);
+}
+
+static ssize_t arvo_sysfs_read(struct file *fp,
+ struct kobject *kobj, void *buf, loff_t off,
+ size_t count, size_t real_size, uint command)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct arvo_device *arvo = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+
+ if (off >= real_size)
+ return 0;
+
+ if (off != 0 || count != real_size)
+ return -EINVAL;
+
+ mutex_lock(&arvo->arvo_lock);
+ retval = roccat_common_receive(usb_dev, command, buf, real_size);
+ mutex_unlock(&arvo->arvo_lock);
+
+ return (retval ? retval : real_size);
+}
+
+static ssize_t arvo_sysfs_write_button(struct file *fp,
+ struct kobject *kobj, struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ return arvo_sysfs_write(fp, kobj, buf, off, count,
+ sizeof(struct arvo_button), ARVO_USB_COMMAND_BUTTON);
+}
+
+static ssize_t arvo_sysfs_read_info(struct file *fp,
+ struct kobject *kobj, struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ return arvo_sysfs_read(fp, kobj, buf, off, count,
+ sizeof(struct arvo_info), ARVO_USB_COMMAND_INFO);
+}
+
+
+static struct device_attribute arvo_attributes[] = {
+ __ATTR(mode_key, 0660,
+ arvo_sysfs_show_mode_key, arvo_sysfs_set_mode_key),
+ __ATTR(key_mask, 0660,
+ arvo_sysfs_show_key_mask, arvo_sysfs_set_key_mask),
+ __ATTR(actual_profile, 0660,
+ arvo_sysfs_show_actual_profile,
+ arvo_sysfs_set_actual_profile),
+ __ATTR_NULL
+};
+
+static struct bin_attribute arvo_bin_attributes[] = {
+ {
+ .attr = { .name = "button", .mode = 0220 },
+ .size = sizeof(struct arvo_button),
+ .write = arvo_sysfs_write_button
+ },
+ {
+ .attr = { .name = "info", .mode = 0440 },
+ .size = sizeof(struct arvo_info),
+ .read = arvo_sysfs_read_info
+ },
+ __ATTR_NULL
+};
+
+static int arvo_init_arvo_device_struct(struct usb_device *usb_dev,
+ struct arvo_device *arvo)
+{
+ int retval;
+
+ mutex_init(&arvo->arvo_lock);
+
+ retval = arvo_get_actual_profile(usb_dev);
+ if (retval < 0)
+ return retval;
+ arvo->actual_profile = retval;
+
+ return 0;
+}
+
+static int arvo_init_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct usb_device *usb_dev = interface_to_usbdev(intf);
+ struct arvo_device *arvo;
+ int retval;
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ == USB_INTERFACE_PROTOCOL_KEYBOARD) {
+ hid_set_drvdata(hdev, NULL);
+ return 0;
+ }
+
+ arvo = kzalloc(sizeof(*arvo), GFP_KERNEL);
+ if (!arvo) {
+ hid_err(hdev, "can't alloc device descriptor\n");
+ return -ENOMEM;
+ }
+ hid_set_drvdata(hdev, arvo);
+
+ retval = arvo_init_arvo_device_struct(usb_dev, arvo);
+ if (retval) {
+ hid_err(hdev, "couldn't init struct arvo_device\n");
+ goto exit_free;
+ }
+
+ retval = roccat_connect(arvo_class, hdev,
+ sizeof(struct arvo_roccat_report));
+ if (retval < 0) {
+ hid_err(hdev, "couldn't init char dev\n");
+ } else {
+ arvo->chrdev_minor = retval;
+ arvo->roccat_claimed = 1;
+ }
+
+ return 0;
+exit_free:
+ kfree(arvo);
+ return retval;
+}
+
+static void arvo_remove_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct arvo_device *arvo;
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ == USB_INTERFACE_PROTOCOL_KEYBOARD)
+ return;
+
+ arvo = hid_get_drvdata(hdev);
+ if (arvo->roccat_claimed)
+ roccat_disconnect(arvo->chrdev_minor);
+ kfree(arvo);
+}
+
+static int arvo_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int retval;
+
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+ goto exit;
+ }
+
+ retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (retval) {
+ hid_err(hdev, "hw start failed\n");
+ goto exit;
+ }
+
+ retval = arvo_init_specials(hdev);
+ if (retval) {
+ hid_err(hdev, "couldn't install keyboard\n");
+ goto exit_stop;
+ }
+
+ return 0;
+
+exit_stop:
+ hid_hw_stop(hdev);
+exit:
+ return retval;
+}
+
+static void arvo_remove(struct hid_device *hdev)
+{
+ arvo_remove_specials(hdev);
+ hid_hw_stop(hdev);
+}
+
+static void arvo_report_to_chrdev(struct arvo_device const *arvo,
+ u8 const *data)
+{
+ struct arvo_special_report const *special_report;
+ struct arvo_roccat_report roccat_report;
+
+ special_report = (struct arvo_special_report const *)data;
+
+ roccat_report.profile = arvo->actual_profile;
+ roccat_report.button = special_report->event &
+ ARVO_SPECIAL_REPORT_EVENT_MASK_BUTTON;
+ if ((special_report->event & ARVO_SPECIAL_REPORT_EVENT_MASK_ACTION) ==
+ ARVO_SPECIAL_REPORT_EVENT_ACTION_PRESS)
+ roccat_report.action = ARVO_ROCCAT_REPORT_ACTION_PRESS;
+ else
+ roccat_report.action = ARVO_ROCCAT_REPORT_ACTION_RELEASE;
+
+ roccat_report_event(arvo->chrdev_minor,
+ (uint8_t const *)&roccat_report);
+}
+
+static int arvo_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data, int size)
+{
+ struct arvo_device *arvo = hid_get_drvdata(hdev);
+
+ if (size != 3)
+ return 0;
+
+ if (arvo->roccat_claimed)
+ arvo_report_to_chrdev(arvo, data);
+
+ return 0;
+}
+
+static const struct hid_device_id arvo_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(hid, arvo_devices);
+
+static struct hid_driver arvo_driver = {
+ .name = "arvo",
+ .id_table = arvo_devices,
+ .probe = arvo_probe,
+ .remove = arvo_remove,
+ .raw_event = arvo_raw_event
+};
+
+static int __init arvo_init(void)
+{
+ int retval;
+
+ arvo_class = class_create(THIS_MODULE, "arvo");
+ if (IS_ERR(arvo_class))
+ return PTR_ERR(arvo_class);
+ arvo_class->dev_attrs = arvo_attributes;
+ arvo_class->dev_bin_attrs = arvo_bin_attributes;
+
+ retval = hid_register_driver(&arvo_driver);
+ if (retval)
+ class_destroy(arvo_class);
+ return retval;
+}
+
+static void __exit arvo_exit(void)
+{
+ hid_unregister_driver(&arvo_driver);
+ class_destroy(arvo_class);
+}
+
+module_init(arvo_init);
+module_exit(arvo_exit);
+
+MODULE_AUTHOR("Stefan Achatz");
+MODULE_DESCRIPTION("USB Roccat Arvo driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat-arvo.h b/drivers/hid/hid-roccat-arvo.h
new file mode 100644
index 000000000000..d284a781c99e
--- /dev/null
+++ b/drivers/hid/hid-roccat-arvo.h
@@ -0,0 +1,98 @@
+#ifndef __HID_ROCCAT_ARVO_H
+#define __HID_ROCCAT_ARVO_H
+
+/*
+ * Copyright (c) 2011 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/types.h>
+
+struct arvo_mode_key { /* 2 bytes */
+ uint8_t command; /* ARVO_COMMAND_MODE_KEY */
+ uint8_t state;
+} __packed;
+
+struct arvo_button {
+ uint8_t unknown[24];
+} __packed;
+
+struct arvo_info {
+ uint8_t unknown[8];
+} __packed;
+
+struct arvo_key_mask { /* 2 bytes */
+ uint8_t command; /* ARVO_COMMAND_KEY_MASK */
+ uint8_t key_mask;
+} __packed;
+
+/* selected profile is persistent */
+struct arvo_actual_profile { /* 2 bytes */
+ uint8_t command; /* ARVO_COMMAND_ACTUAL_PROFILE */
+ uint8_t actual_profile;
+} __packed;
+
+enum arvo_commands {
+ ARVO_COMMAND_MODE_KEY = 0x3,
+ ARVO_COMMAND_BUTTON = 0x4,
+ ARVO_COMMAND_INFO = 0x5,
+ ARVO_COMMAND_KEY_MASK = 0x6,
+ ARVO_COMMAND_ACTUAL_PROFILE = 0x7,
+};
+
+enum arvo_usb_commands {
+ ARVO_USB_COMMAND_MODE_KEY = 0x303,
+ /*
+ * read/write
+ * Read uses both index bytes as profile/key indexes
+ * Write has index 0, profile/key is determined by payload
+ */
+ ARVO_USB_COMMAND_BUTTON = 0x304,
+ ARVO_USB_COMMAND_INFO = 0x305,
+ ARVO_USB_COMMAND_KEY_MASK = 0x306,
+ ARVO_USB_COMMAND_ACTUAL_PROFILE = 0x307,
+};
+
+struct arvo_special_report {
+ uint8_t unknown1; /* always 0x01 */
+ uint8_t event;
+ uint8_t unknown2; /* always 0x70 */
+} __packed;
+
+enum arvo_special_report_events {
+ ARVO_SPECIAL_REPORT_EVENT_ACTION_PRESS = 0x10,
+ ARVO_SPECIAL_REPORT_EVENT_ACTION_RELEASE = 0x0,
+};
+
+enum arvo_special_report_event_masks {
+ ARVO_SPECIAL_REPORT_EVENT_MASK_ACTION = 0xf0,
+ ARVO_SPECIAL_REPORT_EVENT_MASK_BUTTON = 0x0f,
+};
+
+struct arvo_roccat_report {
+ uint8_t profile;
+ uint8_t button;
+ uint8_t action;
+} __packed;
+
+enum arvo_roccat_report_action {
+ ARVO_ROCCAT_REPORT_ACTION_RELEASE = 0,
+ ARVO_ROCCAT_REPORT_ACTION_PRESS = 1,
+};
+
+struct arvo_device {
+ int roccat_claimed;
+ int chrdev_minor;
+
+ struct mutex arvo_lock;
+
+ int actual_profile;
+};
+
+#endif
diff --git a/drivers/hid/hid-roccat-common.c b/drivers/hid/hid-roccat-common.c
new file mode 100644
index 000000000000..13b1eb0c8c65
--- /dev/null
+++ b/drivers/hid/hid-roccat-common.c
@@ -0,0 +1,62 @@
+/*
+ * Roccat common functions for device specific drivers
+ *
+ * Copyright (c) 2011 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/slab.h>
+#include "hid-roccat-common.h"
+
+int roccat_common_receive(struct usb_device *usb_dev, uint usb_command,
+ void *data, uint size)
+{
+ char *buf;
+ int len;
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+ USB_REQ_CLEAR_FEATURE,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+ usb_command, 0, buf, size, USB_CTRL_SET_TIMEOUT);
+
+ memcpy(data, buf, size);
+ kfree(buf);
+ return ((len < 0) ? len : ((len != size) ? -EIO : 0));
+}
+EXPORT_SYMBOL_GPL(roccat_common_receive);
+
+int roccat_common_send(struct usb_device *usb_dev, uint usb_command,
+ void const *data, uint size)
+{
+ char *buf;
+ int len;
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ memcpy(buf, data, size);
+
+ len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
+ USB_REQ_SET_CONFIGURATION,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
+ usb_command, 0, buf, size, USB_CTRL_SET_TIMEOUT);
+
+ kfree(buf);
+ return ((len < 0) ? len : ((len != size) ? -EIO : 0));
+}
+EXPORT_SYMBOL_GPL(roccat_common_send);
+
+MODULE_AUTHOR("Stefan Achatz");
+MODULE_DESCRIPTION("USB Roccat common driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat-common.h b/drivers/hid/hid-roccat-common.h
new file mode 100644
index 000000000000..fe45fae05bb9
--- /dev/null
+++ b/drivers/hid/hid-roccat-common.h
@@ -0,0 +1,23 @@
+#ifndef __HID_ROCCAT_COMMON_H
+#define __HID_ROCCAT_COMMON_H
+
+/*
+ * Copyright (c) 2011 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/usb.h>
+#include <linux/types.h>
+
+int roccat_common_receive(struct usb_device *usb_dev, uint usb_command,
+ void *data, uint size);
+int roccat_common_send(struct usb_device *usb_dev, uint usb_command,
+ void const *data, uint size);
+
+#endif
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index cbd8cc42e75a..a57838d15267 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -28,11 +28,11 @@
#include <linux/device.h>
#include <linux/input.h>
#include <linux/hid.h>
-#include <linux/usb.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/hid-roccat.h>
#include "hid-ids.h"
-#include "hid-roccat.h"
+#include "hid-roccat-common.h"
#include "hid-roccat-kone.h"
static uint profile_numbers[5] = {0, 1, 2, 3, 4};
@@ -58,12 +58,8 @@ static void kone_set_settings_checksum(struct kone_settings *settings)
*/
static int kone_check_write(struct usb_device *usb_dev)
{
- int len;
- unsigned char *data;
-
- data = kmalloc(1, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ int retval;
+ uint8_t data;
do {
/*
@@ -72,56 +68,36 @@ static int kone_check_write(struct usb_device *usb_dev)
*/
msleep(80);
- len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
- USB_REQ_CLEAR_FEATURE,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE |
- USB_DIR_IN,
- kone_command_confirm_write, 0, data, 1,
- USB_CTRL_SET_TIMEOUT);
-
- if (len != 1) {
- kfree(data);
- return -EIO;
- }
+ retval = roccat_common_receive(usb_dev,
+ kone_command_confirm_write, &data, 1);
+ if (retval)
+ return retval;
/*
* value of 3 seems to mean something like
* "not finished yet, but it looks good"
* So check again after a moment.
*/
- } while (*data == 3);
+ } while (data == 3);
- if (*data == 1) { /* everything alright */
- kfree(data);
+ if (data == 1) /* everything alright */
return 0;
- } else { /* unknown answer */
- hid_err(usb_dev, "got retval %d when checking write\n", *data);
- kfree(data);
- return -EIO;
- }
+
+ /* unknown answer */
+ hid_err(usb_dev, "got retval %d when checking write\n", data);
+ return -EIO;
}
/*
* Reads settings from mouse and stores it in @buf
- * @buf has to be alloced with GFP_KERNEL
* On success returns 0
* On failure returns errno
*/
static int kone_get_settings(struct usb_device *usb_dev,
struct kone_settings *buf)
{
- int len;
-
- len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
- USB_REQ_CLEAR_FEATURE,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
- kone_command_settings, 0, buf,
- sizeof(struct kone_settings), USB_CTRL_SET_TIMEOUT);
-
- if (len != sizeof(struct kone_settings))
- return -EIO;
-
- return 0;
+ return roccat_common_receive(usb_dev, kone_command_settings, buf,
+ sizeof(struct kone_settings));
}
/*
@@ -132,22 +108,12 @@ static int kone_get_settings(struct usb_device *usb_dev,
static int kone_set_settings(struct usb_device *usb_dev,
struct kone_settings const *settings)
{
- int len;
-
- len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
- USB_REQ_SET_CONFIGURATION,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
- kone_command_settings, 0, (char *)settings,
- sizeof(struct kone_settings),
- USB_CTRL_SET_TIMEOUT);
-
- if (len != sizeof(struct kone_settings))
- return -EIO;
-
- if (kone_check_write(usb_dev))
- return -EIO;
-
- return 0;
+ int retval;
+ retval = roccat_common_send(usb_dev, kone_command_settings,
+ settings, sizeof(struct kone_settings));
+ if (retval)
+ return retval;
+ return kone_check_write(usb_dev);
}
/*
@@ -193,7 +159,7 @@ static int kone_set_profile(struct usb_device *usb_dev,
len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
USB_REQ_SET_CONFIGURATION,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
- kone_command_profile, number, (char *)profile,
+ kone_command_profile, number, (void *)profile,
sizeof(struct kone_profile),
USB_CTRL_SET_TIMEOUT);
@@ -213,24 +179,15 @@ static int kone_set_profile(struct usb_device *usb_dev,
*/
static int kone_get_weight(struct usb_device *usb_dev, int *result)
{
- int len;
- uint8_t *data;
+ int retval;
+ uint8_t data;
- data = kmalloc(1, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ retval = roccat_common_receive(usb_dev, kone_command_weight, &data, 1);
- len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
- USB_REQ_CLEAR_FEATURE,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
- kone_command_weight, 0, data, 1, USB_CTRL_SET_TIMEOUT);
+ if (retval)
+ return retval;
- if (len != 1) {
- kfree(data);
- return -EIO;
- }
- *result = (int)*data;
- kfree(data);
+ *result = (int)data;
return 0;
}
@@ -241,25 +198,15 @@ static int kone_get_weight(struct usb_device *usb_dev, int *result)
*/
static int kone_get_firmware_version(struct usb_device *usb_dev, int *result)
{
- int len;
- unsigned char *data;
-
- data = kmalloc(2, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ int retval;
+ uint16_t data;
- len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
- USB_REQ_CLEAR_FEATURE,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
- kone_command_firmware_version, 0, data, 2,
- USB_CTRL_SET_TIMEOUT);
+ retval = roccat_common_receive(usb_dev, kone_command_firmware_version,
+ &data, 2);
+ if (retval)
+ return retval;
- if (len != 2) {
- kfree(data);
- return -EIO;
- }
- *result = le16_to_cpu(*data);
- kfree(data);
+ *result = le16_to_cpu(data);
return 0;
}
@@ -435,23 +382,9 @@ static ssize_t kone_sysfs_show_tcu(struct device *dev,
static int kone_tcu_command(struct usb_device *usb_dev, int number)
{
- int len;
- char *value;
-
- value = kmalloc(1, GFP_KERNEL);
- if (!value)
- return -ENOMEM;
-
- *value = number;
-
- len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
- USB_REQ_SET_CONFIGURATION,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
- kone_command_calibrate, 0, value, 1,
- USB_CTRL_SET_TIMEOUT);
-
- kfree(value);
- return ((len != 1) ? -EIO : 0);
+ unsigned char value;
+ value = number;
+ return roccat_common_send(usb_dev, kone_command_calibrate, &value, 1);
}
/*
@@ -727,7 +660,8 @@ static int kone_init_specials(struct hid_device *hdev)
goto exit_free;
}
- retval = roccat_connect(kone_class, hdev);
+ retval = roccat_connect(kone_class, hdev,
+ sizeof(struct kone_roccat_report));
if (retval < 0) {
hid_err(hdev, "couldn't init char dev\n");
/* be tolerant about not getting chrdev */
@@ -827,8 +761,7 @@ static void kone_report_to_chrdev(struct kone_device const *kone,
roccat_report.value = event->value;
roccat_report.key = 0;
roccat_report_event(kone->chrdev_minor,
- (uint8_t *)&roccat_report,
- sizeof(struct kone_roccat_report));
+ (uint8_t *)&roccat_report);
break;
case kone_mouse_event_call_overlong_macro:
if (event->value == kone_keystroke_action_press) {
@@ -836,8 +769,7 @@ static void kone_report_to_chrdev(struct kone_device const *kone,
roccat_report.value = kone->actual_profile;
roccat_report.key = event->macro_key;
roccat_report_event(kone->chrdev_minor,
- (uint8_t *)&roccat_report,
- sizeof(struct kone_roccat_report));
+ (uint8_t *)&roccat_report);
}
break;
}
@@ -912,8 +844,8 @@ static int __init kone_init(void)
static void __exit kone_exit(void)
{
- class_destroy(kone_class);
hid_unregister_driver(&kone_driver);
+ class_destroy(kone_class);
}
module_init(kone_init);
diff --git a/drivers/hid/hid-roccat-koneplus.c b/drivers/hid/hid-roccat-koneplus.c
index 1608c8d1efd6..a8e2117756ac 100644
--- a/drivers/hid/hid-roccat-koneplus.c
+++ b/drivers/hid/hid-roccat-koneplus.c
@@ -19,11 +19,11 @@
#include <linux/device.h>
#include <linux/input.h>
#include <linux/hid.h>
-#include <linux/usb.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/hid-roccat.h>
#include "hid-ids.h"
-#include "hid-roccat.h"
+#include "hid-roccat-common.h"
#include "hid-roccat-koneplus.h"
static uint profile_numbers[5] = {0, 1, 2, 3, 4};
@@ -39,110 +39,63 @@ static void koneplus_profile_activated(struct koneplus_device *koneplus,
static int koneplus_send_control(struct usb_device *usb_dev, uint value,
enum koneplus_control_requests request)
{
- int len;
- struct koneplus_control *control;
+ struct koneplus_control control;
if ((request == KONEPLUS_CONTROL_REQUEST_PROFILE_SETTINGS ||
request == KONEPLUS_CONTROL_REQUEST_PROFILE_BUTTONS) &&
value > 4)
return -EINVAL;
- control = kmalloc(sizeof(struct koneplus_control), GFP_KERNEL);
- if (!control)
- return -ENOMEM;
+ control.command = KONEPLUS_COMMAND_CONTROL;
+ control.value = value;
+ control.request = request;
- control->command = KONEPLUS_COMMAND_CONTROL;
- control->value = value;
- control->request = request;
-
- len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
- USB_REQ_SET_CONFIGURATION,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
- KONEPLUS_USB_COMMAND_CONTROL, 0, control,
- sizeof(struct koneplus_control),
- USB_CTRL_SET_TIMEOUT);
-
- kfree(control);
-
- if (len != sizeof(struct koneplus_control))
- return len;
-
- return 0;
-}
-
-static int koneplus_receive(struct usb_device *usb_dev, uint usb_command,
- void *buf, uint size) {
- int len;
-
- len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
- USB_REQ_CLEAR_FEATURE,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
- usb_command, 0, buf, size, USB_CTRL_SET_TIMEOUT);
-
- return (len != size) ? -EIO : 0;
+ return roccat_common_send(usb_dev, KONEPLUS_USB_COMMAND_CONTROL,
+ &control, sizeof(struct koneplus_control));
}
static int koneplus_receive_control_status(struct usb_device *usb_dev)
{
int retval;
- struct koneplus_control *control;
-
- control = kmalloc(sizeof(struct koneplus_control), GFP_KERNEL);
- if (!control)
- return -ENOMEM;
+ struct koneplus_control control;
do {
- retval = koneplus_receive(usb_dev, KONEPLUS_USB_COMMAND_CONTROL,
- control, sizeof(struct koneplus_control));
+ retval = roccat_common_receive(usb_dev, KONEPLUS_USB_COMMAND_CONTROL,
+ &control, sizeof(struct koneplus_control));
/* check if we get a completely wrong answer */
if (retval)
- goto out;
+ return retval;
- if (control->value == KONEPLUS_CONTROL_REQUEST_STATUS_OK) {
- retval = 0;
- goto out;
- }
+ if (control.value == KONEPLUS_CONTROL_REQUEST_STATUS_OK)
+ return 0;
/* indicates that hardware needs some more time to complete action */
- if (control->value == KONEPLUS_CONTROL_REQUEST_STATUS_WAIT) {
+ if (control.value == KONEPLUS_CONTROL_REQUEST_STATUS_WAIT) {
msleep(500); /* windows driver uses 1000 */
continue;
}
/* seems to be critical - replug necessary */
- if (control->value == KONEPLUS_CONTROL_REQUEST_STATUS_OVERLOAD) {
- retval = -EINVAL;
- goto out;
- }
-
- dev_err(&usb_dev->dev, "koneplus_receive_control_status: "
- "unknown response value 0x%x\n", control->value);
- retval = -EINVAL;
- goto out;
+ if (control.value == KONEPLUS_CONTROL_REQUEST_STATUS_OVERLOAD)
+ return -EINVAL;
+ hid_err(usb_dev, "koneplus_receive_control_status: "
+ "unknown response value 0x%x\n", control.value);
+ return -EINVAL;
} while (1);
-out:
- kfree(control);
- return retval;
}
static int koneplus_send(struct usb_device *usb_dev, uint command,
- void *buf, uint size) {
- int len;
-
- len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
- USB_REQ_SET_CONFIGURATION,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
- command, 0, buf, size, USB_CTRL_SET_TIMEOUT);
-
- if (len != size)
- return -EIO;
+ void const *buf, uint size)
+{
+ int retval;
- if (koneplus_receive_control_status(usb_dev))
- return -EIO;
+ retval = roccat_common_send(usb_dev, command, buf, size);
+ if (retval)
+ return retval;
- return 0;
+ return koneplus_receive_control_status(usb_dev);
}
static int koneplus_select_profile(struct usb_device *usb_dev, uint number,
@@ -167,7 +120,7 @@ static int koneplus_select_profile(struct usb_device *usb_dev, uint number,
static int koneplus_get_info(struct usb_device *usb_dev,
struct koneplus_info *buf)
{
- return koneplus_receive(usb_dev, KONEPLUS_USB_COMMAND_INFO,
+ return roccat_common_receive(usb_dev, KONEPLUS_USB_COMMAND_INFO,
buf, sizeof(struct koneplus_info));
}
@@ -181,7 +134,7 @@ static int koneplus_get_profile_settings(struct usb_device *usb_dev,
if (retval)
return retval;
- return koneplus_receive(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_SETTINGS,
+ return roccat_common_receive(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_SETTINGS,
buf, sizeof(struct koneplus_profile_settings));
}
@@ -189,7 +142,7 @@ static int koneplus_set_profile_settings(struct usb_device *usb_dev,
struct koneplus_profile_settings const *settings)
{
return koneplus_send(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_SETTINGS,
- (void *)settings, sizeof(struct koneplus_profile_settings));
+ settings, sizeof(struct koneplus_profile_settings));
}
static int koneplus_get_profile_buttons(struct usb_device *usb_dev,
@@ -202,7 +155,7 @@ static int koneplus_get_profile_buttons(struct usb_device *usb_dev,
if (retval)
return retval;
- return koneplus_receive(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_BUTTONS,
+ return roccat_common_receive(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_BUTTONS,
buf, sizeof(struct koneplus_profile_buttons));
}
@@ -210,27 +163,19 @@ static int koneplus_set_profile_buttons(struct usb_device *usb_dev,
struct koneplus_profile_buttons const *buttons)
{
return koneplus_send(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_BUTTONS,
- (void *)buttons, sizeof(struct koneplus_profile_buttons));
+ buttons, sizeof(struct koneplus_profile_buttons));
}
/* retval is 0-4 on success, < 0 on error */
static int koneplus_get_startup_profile(struct usb_device *usb_dev)
{
- struct koneplus_startup_profile *buf;
+ struct koneplus_startup_profile buf;
int retval;
- buf = kmalloc(sizeof(struct koneplus_startup_profile), GFP_KERNEL);
+ retval = roccat_common_receive(usb_dev, KONEPLUS_USB_COMMAND_STARTUP_PROFILE,
+ &buf, sizeof(struct koneplus_startup_profile));
- retval = koneplus_receive(usb_dev, KONEPLUS_USB_COMMAND_STARTUP_PROFILE,
- buf, sizeof(struct koneplus_startup_profile));
-
- if (retval)
- goto out;
-
- retval = buf->startup_profile;
-out:
- kfree(buf);
- return retval;
+ return retval ? retval : buf.startup_profile;
}
static int koneplus_set_startup_profile(struct usb_device *usb_dev,
@@ -243,7 +188,7 @@ static int koneplus_set_startup_profile(struct usb_device *usb_dev,
buf.startup_profile = startup_profile;
return koneplus_send(usb_dev, KONEPLUS_USB_COMMAND_STARTUP_PROFILE,
- (char *)&buf, sizeof(struct koneplus_profile_buttons));
+ &buf, sizeof(struct koneplus_profile_buttons));
}
static ssize_t koneplus_sysfs_read(struct file *fp, struct kobject *kobj,
@@ -256,11 +201,14 @@ static ssize_t koneplus_sysfs_read(struct file *fp, struct kobject *kobj,
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
int retval;
+ if (off >= real_size)
+ return 0;
+
if (off != 0 || count != real_size)
return -EINVAL;
mutex_lock(&koneplus->koneplus_lock);
- retval = koneplus_receive(usb_dev, command, buf, real_size);
+ retval = roccat_common_receive(usb_dev, command, buf, real_size);
mutex_unlock(&koneplus->koneplus_lock);
if (retval)
@@ -283,7 +231,7 @@ static ssize_t koneplus_sysfs_write(struct file *fp, struct kobject *kobj,
return -EINVAL;
mutex_lock(&koneplus->koneplus_lock);
- retval = koneplus_send(usb_dev, command, (void *)buf, real_size);
+ retval = koneplus_send(usb_dev, command, buf, real_size);
mutex_unlock(&koneplus->koneplus_lock);
if (retval)
@@ -347,7 +295,7 @@ static ssize_t koneplus_sysfs_read_profilex_settings(struct file *fp,
count = sizeof(struct koneplus_profile_settings) - off;
mutex_lock(&koneplus->koneplus_lock);
- memcpy(buf, ((void const *)&koneplus->profile_settings[*(uint *)(attr->private)]) + off,
+ memcpy(buf, ((char const *)&koneplus->profile_settings[*(uint *)(attr->private)]) + off,
count);
mutex_unlock(&koneplus->koneplus_lock);
@@ -406,7 +354,7 @@ static ssize_t koneplus_sysfs_read_profilex_buttons(struct file *fp,
count = sizeof(struct koneplus_profile_buttons) - off;
mutex_lock(&koneplus->koneplus_lock);
- memcpy(buf, ((void const *)&koneplus->profile_buttons[*(uint *)(attr->private)]) + off,
+ memcpy(buf, ((char const *)&koneplus->profile_buttons[*(uint *)(attr->private)]) + off,
count);
mutex_unlock(&koneplus->koneplus_lock);
@@ -609,11 +557,13 @@ static int koneplus_init_koneplus_device_struct(struct usb_device *usb_dev,
struct koneplus_device *koneplus)
{
int retval, i;
- static uint wait = 70; /* device will freeze with just 60 */
+ static uint wait = 100; /* device will freeze with just 60 */
mutex_init(&koneplus->koneplus_lock);
koneplus->startup_profile = koneplus_get_startup_profile(usb_dev);
+ if (koneplus->startup_profile < 0)
+ return koneplus->startup_profile;
msleep(wait);
retval = koneplus_get_info(usb_dev, &koneplus->info);
@@ -651,21 +601,21 @@ static int koneplus_init_specials(struct hid_device *hdev)
koneplus = kzalloc(sizeof(*koneplus), GFP_KERNEL);
if (!koneplus) {
- dev_err(&hdev->dev, "can't alloc device descriptor\n");
+ hid_err(hdev, "can't alloc device descriptor\n");
return -ENOMEM;
}
hid_set_drvdata(hdev, koneplus);
retval = koneplus_init_koneplus_device_struct(usb_dev, koneplus);
if (retval) {
- dev_err(&hdev->dev,
- "couldn't init struct koneplus_device\n");
+ hid_err(hdev, "couldn't init struct koneplus_device\n");
goto exit_free;
}
- retval = roccat_connect(koneplus_class, hdev);
+ retval = roccat_connect(koneplus_class, hdev,
+ sizeof(struct koneplus_roccat_report));
if (retval < 0) {
- dev_err(&hdev->dev, "couldn't init char dev\n");
+ hid_err(hdev, "couldn't init char dev\n");
} else {
koneplus->chrdev_minor = retval;
koneplus->roccat_claimed = 1;
@@ -701,19 +651,19 @@ static int koneplus_probe(struct hid_device *hdev,
retval = hid_parse(hdev);
if (retval) {
- dev_err(&hdev->dev, "parse failed\n");
+ hid_err(hdev, "parse failed\n");
goto exit;
}
retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (retval) {
- dev_err(&hdev->dev, "hw start failed\n");
+ hid_err(hdev, "hw start failed\n");
goto exit;
}
retval = koneplus_init_specials(hdev);
if (retval) {
- dev_err(&hdev->dev, "couldn't install mouse\n");
+ hid_err(hdev, "couldn't install mouse\n");
goto exit_stop;
}
@@ -769,8 +719,7 @@ static void koneplus_report_to_chrdev(struct koneplus_device const *koneplus,
roccat_report.data2 = button_report->data2;
roccat_report.profile = koneplus->actual_profile + 1;
roccat_report_event(koneplus->chrdev_minor,
- (uint8_t const *)&roccat_report,
- sizeof(struct koneplus_roccat_report));
+ (uint8_t const *)&roccat_report);
}
static int koneplus_raw_event(struct hid_device *hdev,
@@ -825,8 +774,8 @@ static int __init koneplus_init(void)
static void __exit koneplus_exit(void)
{
- class_destroy(koneplus_class);
hid_unregister_driver(&koneplus_driver);
+ class_destroy(koneplus_class);
}
module_init(koneplus_init);
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
new file mode 100644
index 000000000000..984be2f8967e
--- /dev/null
+++ b/drivers/hid/hid-roccat-kovaplus.c
@@ -0,0 +1,715 @@
+/*
+ * Roccat Kova[+] driver for Linux
+ *
+ * Copyright (c) 2011 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ * Roccat Kova[+] is a bigger version of the Pyra with two more side buttons.
+ */
+
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/hid-roccat.h>
+#include "hid-ids.h"
+#include "hid-roccat-common.h"
+#include "hid-roccat-kovaplus.h"
+
+static uint profile_numbers[5] = {0, 1, 2, 3, 4};
+
+static struct class *kovaplus_class;
+
+static uint kovaplus_convert_event_cpi(uint value)
+{
+ return (value == 7 ? 4 : (value == 4 ? 3 : value));
+}
+
+static void kovaplus_profile_activated(struct kovaplus_device *kovaplus,
+ uint new_profile_index)
+{
+ kovaplus->actual_profile = new_profile_index;
+ kovaplus->actual_cpi = kovaplus->profile_settings[new_profile_index].cpi_startup_level;
+ kovaplus->actual_x_sensitivity = kovaplus->profile_settings[new_profile_index].sensitivity_x;
+ kovaplus->actual_y_sensitivity = kovaplus->profile_settings[new_profile_index].sensitivity_y;
+}
+
+static int kovaplus_send_control(struct usb_device *usb_dev, uint value,
+ enum kovaplus_control_requests request)
+{
+ int retval;
+ struct kovaplus_control control;
+
+ if ((request == KOVAPLUS_CONTROL_REQUEST_PROFILE_SETTINGS ||
+ request == KOVAPLUS_CONTROL_REQUEST_PROFILE_BUTTONS) &&
+ value > 4)
+ return -EINVAL;
+
+ control.command = KOVAPLUS_COMMAND_CONTROL;
+ control.value = value;
+ control.request = request;
+
+ retval = roccat_common_send(usb_dev, KOVAPLUS_USB_COMMAND_CONTROL,
+ &control, sizeof(struct kovaplus_control));
+
+ return retval;
+}
+
+static int kovaplus_receive_control_status(struct usb_device *usb_dev)
+{
+ int retval;
+ struct kovaplus_control control;
+
+ do {
+ retval = roccat_common_receive(usb_dev, KOVAPLUS_USB_COMMAND_CONTROL,
+ &control, sizeof(struct kovaplus_control));
+
+ /* check if we get a completely wrong answer */
+ if (retval)
+ return retval;
+
+ if (control.value == KOVAPLUS_CONTROL_REQUEST_STATUS_OK)
+ return 0;
+
+ /* indicates that hardware needs some more time to complete action */
+ if (control.value == KOVAPLUS_CONTROL_REQUEST_STATUS_WAIT) {
+ msleep(500); /* windows driver uses 1000 */
+ continue;
+ }
+
+ /* seems to be critical - replug necessary */
+ if (control.value == KOVAPLUS_CONTROL_REQUEST_STATUS_OVERLOAD)
+ return -EINVAL;
+
+ hid_err(usb_dev, "kovaplus_receive_control_status: "
+ "unknown response value 0x%x\n", control.value);
+ return -EINVAL;
+ } while (1);
+}
+
+static int kovaplus_send(struct usb_device *usb_dev, uint command,
+ void const *buf, uint size)
+{
+ int retval;
+
+ retval = roccat_common_send(usb_dev, command, buf, size);
+ if (retval)
+ return retval;
+
+ msleep(100);
+
+ return kovaplus_receive_control_status(usb_dev);
+}
+
+static int kovaplus_select_profile(struct usb_device *usb_dev, uint number,
+ enum kovaplus_control_requests request)
+{
+ return kovaplus_send_control(usb_dev, number, request);
+}
+
+static int kovaplus_get_info(struct usb_device *usb_dev,
+ struct kovaplus_info *buf)
+{
+ return roccat_common_receive(usb_dev, KOVAPLUS_USB_COMMAND_INFO,
+ buf, sizeof(struct kovaplus_info));
+}
+
+static int kovaplus_get_profile_settings(struct usb_device *usb_dev,
+ struct kovaplus_profile_settings *buf, uint number)
+{
+ int retval;
+
+ retval = kovaplus_select_profile(usb_dev, number,
+ KOVAPLUS_CONTROL_REQUEST_PROFILE_SETTINGS);
+ if (retval)
+ return retval;
+
+ return roccat_common_receive(usb_dev, KOVAPLUS_USB_COMMAND_PROFILE_SETTINGS,
+ buf, sizeof(struct kovaplus_profile_settings));
+}
+
+static int kovaplus_set_profile_settings(struct usb_device *usb_dev,
+ struct kovaplus_profile_settings const *settings)
+{
+ return kovaplus_send(usb_dev, KOVAPLUS_USB_COMMAND_PROFILE_SETTINGS,
+ settings, sizeof(struct kovaplus_profile_settings));
+}
+
+static int kovaplus_get_profile_buttons(struct usb_device *usb_dev,
+ struct kovaplus_profile_buttons *buf, int number)
+{
+ int retval;
+
+ retval = kovaplus_select_profile(usb_dev, number,
+ KOVAPLUS_CONTROL_REQUEST_PROFILE_BUTTONS);
+ if (retval)
+ return retval;
+
+ return roccat_common_receive(usb_dev, KOVAPLUS_USB_COMMAND_PROFILE_BUTTONS,
+ buf, sizeof(struct kovaplus_profile_buttons));
+}
+
+static int kovaplus_set_profile_buttons(struct usb_device *usb_dev,
+ struct kovaplus_profile_buttons const *buttons)
+{
+ return kovaplus_send(usb_dev, KOVAPLUS_USB_COMMAND_PROFILE_BUTTONS,
+ buttons, sizeof(struct kovaplus_profile_buttons));
+}
+
+/* retval is 0-4 on success, < 0 on error */
+static int kovaplus_get_actual_profile(struct usb_device *usb_dev)
+{
+ struct kovaplus_actual_profile buf;
+ int retval;
+
+ retval = roccat_common_receive(usb_dev, KOVAPLUS_USB_COMMAND_ACTUAL_PROFILE,
+ &buf, sizeof(struct kovaplus_actual_profile));
+
+ return retval ? retval : buf.actual_profile;
+}
+
+static int kovaplus_set_actual_profile(struct usb_device *usb_dev,
+ int new_profile)
+{
+ struct kovaplus_actual_profile buf;
+
+ buf.command = KOVAPLUS_COMMAND_ACTUAL_PROFILE;
+ buf.size = sizeof(struct kovaplus_actual_profile);
+ buf.actual_profile = new_profile;
+
+ return kovaplus_send(usb_dev, KOVAPLUS_USB_COMMAND_ACTUAL_PROFILE,
+ &buf, sizeof(struct kovaplus_actual_profile));
+}
+
+static ssize_t kovaplus_sysfs_read_profilex_settings(struct file *fp,
+ struct kobject *kobj, struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct kovaplus_device *kovaplus = hid_get_drvdata(dev_get_drvdata(dev));
+
+ if (off >= sizeof(struct kovaplus_profile_settings))
+ return 0;
+
+ if (off + count > sizeof(struct kovaplus_profile_settings))
+ count = sizeof(struct kovaplus_profile_settings) - off;
+
+ mutex_lock(&kovaplus->kovaplus_lock);
+ memcpy(buf, ((char const *)&kovaplus->profile_settings[*(uint *)(attr->private)]) + off,
+ count);
+ mutex_unlock(&kovaplus->kovaplus_lock);
+
+ return count;
+}
+
+static ssize_t kovaplus_sysfs_write_profile_settings(struct file *fp,
+ struct kobject *kobj, struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct kovaplus_device *kovaplus = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval = 0;
+ int difference;
+ int profile_index;
+ struct kovaplus_profile_settings *profile_settings;
+
+ if (off != 0 || count != sizeof(struct kovaplus_profile_settings))
+ return -EINVAL;
+
+ profile_index = ((struct kovaplus_profile_settings const *)buf)->profile_index;
+ profile_settings = &kovaplus->profile_settings[profile_index];
+
+ mutex_lock(&kovaplus->kovaplus_lock);
+ difference = memcmp(buf, profile_settings,
+ sizeof(struct kovaplus_profile_settings));
+ if (difference) {
+ retval = kovaplus_set_profile_settings(usb_dev,
+ (struct kovaplus_profile_settings const *)buf);
+ if (!retval)
+ memcpy(profile_settings, buf,
+ sizeof(struct kovaplus_profile_settings));
+ }
+ mutex_unlock(&kovaplus->kovaplus_lock);
+
+ if (retval)
+ return retval;
+
+ return sizeof(struct kovaplus_profile_settings);
+}
+
+static ssize_t kovaplus_sysfs_read_profilex_buttons(struct file *fp,
+ struct kobject *kobj, struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct kovaplus_device *kovaplus = hid_get_drvdata(dev_get_drvdata(dev));
+
+ if (off >= sizeof(struct kovaplus_profile_buttons))
+ return 0;
+
+ if (off + count > sizeof(struct kovaplus_profile_buttons))
+ count = sizeof(struct kovaplus_profile_buttons) - off;
+
+ mutex_lock(&kovaplus->kovaplus_lock);
+ memcpy(buf, ((char const *)&kovaplus->profile_buttons[*(uint *)(attr->private)]) + off,
+ count);
+ mutex_unlock(&kovaplus->kovaplus_lock);
+
+ return count;
+}
+
+static ssize_t kovaplus_sysfs_write_profile_buttons(struct file *fp,
+ struct kobject *kobj, struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct kovaplus_device *kovaplus = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval = 0;
+ int difference;
+ uint profile_index;
+ struct kovaplus_profile_buttons *profile_buttons;
+
+ if (off != 0 || count != sizeof(struct kovaplus_profile_buttons))
+ return -EINVAL;
+
+ profile_index = ((struct kovaplus_profile_buttons const *)buf)->profile_index;
+ profile_buttons = &kovaplus->profile_buttons[profile_index];
+
+ mutex_lock(&kovaplus->kovaplus_lock);
+ difference = memcmp(buf, profile_buttons,
+ sizeof(struct kovaplus_profile_buttons));
+ if (difference) {
+ retval = kovaplus_set_profile_buttons(usb_dev,
+ (struct kovaplus_profile_buttons const *)buf);
+ if (!retval)
+ memcpy(profile_buttons, buf,
+ sizeof(struct kovaplus_profile_buttons));
+ }
+ mutex_unlock(&kovaplus->kovaplus_lock);
+
+ if (retval)
+ return retval;
+
+ return sizeof(struct kovaplus_profile_buttons);
+}
+
+static ssize_t kovaplus_sysfs_show_actual_profile(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kovaplus_device *kovaplus =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
+ return snprintf(buf, PAGE_SIZE, "%d\n", kovaplus->actual_profile);
+}
+
+static ssize_t kovaplus_sysfs_set_actual_profile(struct device *dev,
+ struct device_attribute *attr, char const *buf, size_t size)
+{
+ struct kovaplus_device *kovaplus;
+ struct usb_device *usb_dev;
+ unsigned long profile;
+ int retval;
+
+ dev = dev->parent->parent;
+ kovaplus = hid_get_drvdata(dev_get_drvdata(dev));
+ usb_dev = interface_to_usbdev(to_usb_interface(dev));
+
+ retval = strict_strtoul(buf, 10, &profile);
+ if (retval)
+ return retval;
+
+ if (profile >= 5)
+ return -EINVAL;
+
+ mutex_lock(&kovaplus->kovaplus_lock);
+ retval = kovaplus_set_actual_profile(usb_dev, profile);
+ kovaplus->actual_profile = profile;
+ mutex_unlock(&kovaplus->kovaplus_lock);
+ if (retval)
+ return retval;
+
+ return size;
+}
+
+static ssize_t kovaplus_sysfs_show_actual_cpi(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kovaplus_device *kovaplus =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
+ return snprintf(buf, PAGE_SIZE, "%d\n", kovaplus->actual_cpi);
+}
+
+static ssize_t kovaplus_sysfs_show_actual_sensitivity_x(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kovaplus_device *kovaplus =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
+ return snprintf(buf, PAGE_SIZE, "%d\n", kovaplus->actual_x_sensitivity);
+}
+
+static ssize_t kovaplus_sysfs_show_actual_sensitivity_y(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kovaplus_device *kovaplus =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
+ return snprintf(buf, PAGE_SIZE, "%d\n", kovaplus->actual_y_sensitivity);
+}
+
+static ssize_t kovaplus_sysfs_show_firmware_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kovaplus_device *kovaplus =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
+ return snprintf(buf, PAGE_SIZE, "%d\n", kovaplus->info.firmware_version);
+}
+
+static struct device_attribute kovaplus_attributes[] = {
+ __ATTR(actual_cpi, 0440,
+ kovaplus_sysfs_show_actual_cpi, NULL),
+ __ATTR(firmware_version, 0440,
+ kovaplus_sysfs_show_firmware_version, NULL),
+ __ATTR(actual_profile, 0660,
+ kovaplus_sysfs_show_actual_profile,
+ kovaplus_sysfs_set_actual_profile),
+ __ATTR(actual_sensitivity_x, 0440,
+ kovaplus_sysfs_show_actual_sensitivity_x, NULL),
+ __ATTR(actual_sensitivity_y, 0440,
+ kovaplus_sysfs_show_actual_sensitivity_y, NULL),
+ __ATTR_NULL
+};
+
+static struct bin_attribute kovaplus_bin_attributes[] = {
+ {
+ .attr = { .name = "profile_settings", .mode = 0220 },
+ .size = sizeof(struct kovaplus_profile_settings),
+ .write = kovaplus_sysfs_write_profile_settings
+ },
+ {
+ .attr = { .name = "profile1_settings", .mode = 0440 },
+ .size = sizeof(struct kovaplus_profile_settings),
+ .read = kovaplus_sysfs_read_profilex_settings,
+ .private = &profile_numbers[0]
+ },
+ {
+ .attr = { .name = "profile2_settings", .mode = 0440 },
+ .size = sizeof(struct kovaplus_profile_settings),
+ .read = kovaplus_sysfs_read_profilex_settings,
+ .private = &profile_numbers[1]
+ },
+ {
+ .attr = { .name = "profile3_settings", .mode = 0440 },
+ .size = sizeof(struct kovaplus_profile_settings),
+ .read = kovaplus_sysfs_read_profilex_settings,
+ .private = &profile_numbers[2]
+ },
+ {
+ .attr = { .name = "profile4_settings", .mode = 0440 },
+ .size = sizeof(struct kovaplus_profile_settings),
+ .read = kovaplus_sysfs_read_profilex_settings,
+ .private = &profile_numbers[3]
+ },
+ {
+ .attr = { .name = "profile5_settings", .mode = 0440 },
+ .size = sizeof(struct kovaplus_profile_settings),
+ .read = kovaplus_sysfs_read_profilex_settings,
+ .private = &profile_numbers[4]
+ },
+ {
+ .attr = { .name = "profile_buttons", .mode = 0220 },
+ .size = sizeof(struct kovaplus_profile_buttons),
+ .write = kovaplus_sysfs_write_profile_buttons
+ },
+ {
+ .attr = { .name = "profile1_buttons", .mode = 0440 },
+ .size = sizeof(struct kovaplus_profile_buttons),
+ .read = kovaplus_sysfs_read_profilex_buttons,
+ .private = &profile_numbers[0]
+ },
+ {
+ .attr = { .name = "profile2_buttons", .mode = 0440 },
+ .size = sizeof(struct kovaplus_profile_buttons),
+ .read = kovaplus_sysfs_read_profilex_buttons,
+ .private = &profile_numbers[1]
+ },
+ {
+ .attr = { .name = "profile3_buttons", .mode = 0440 },
+ .size = sizeof(struct kovaplus_profile_buttons),
+ .read = kovaplus_sysfs_read_profilex_buttons,
+ .private = &profile_numbers[2]
+ },
+ {
+ .attr = { .name = "profile4_buttons", .mode = 0440 },
+ .size = sizeof(struct kovaplus_profile_buttons),
+ .read = kovaplus_sysfs_read_profilex_buttons,
+ .private = &profile_numbers[3]
+ },
+ {
+ .attr = { .name = "profile5_buttons", .mode = 0440 },
+ .size = sizeof(struct kovaplus_profile_buttons),
+ .read = kovaplus_sysfs_read_profilex_buttons,
+ .private = &profile_numbers[4]
+ },
+ __ATTR_NULL
+};
+
+static int kovaplus_init_kovaplus_device_struct(struct usb_device *usb_dev,
+ struct kovaplus_device *kovaplus)
+{
+ int retval, i;
+ static uint wait = 70; /* device will freeze with just 60 */
+
+ mutex_init(&kovaplus->kovaplus_lock);
+
+ retval = kovaplus_get_info(usb_dev, &kovaplus->info);
+ if (retval)
+ return retval;
+
+ for (i = 0; i < 5; ++i) {
+ msleep(wait);
+ retval = kovaplus_get_profile_settings(usb_dev,
+ &kovaplus->profile_settings[i], i);
+ if (retval)
+ return retval;
+
+ msleep(wait);
+ retval = kovaplus_get_profile_buttons(usb_dev,
+ &kovaplus->profile_buttons[i], i);
+ if (retval)
+ return retval;
+ }
+
+ msleep(wait);
+ retval = kovaplus_get_actual_profile(usb_dev);
+ if (retval < 0)
+ return retval;
+ kovaplus_profile_activated(kovaplus, retval);
+
+ return 0;
+}
+
+static int kovaplus_init_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct usb_device *usb_dev = interface_to_usbdev(intf);
+ struct kovaplus_device *kovaplus;
+ int retval;
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ == USB_INTERFACE_PROTOCOL_MOUSE) {
+
+ kovaplus = kzalloc(sizeof(*kovaplus), GFP_KERNEL);
+ if (!kovaplus) {
+ hid_err(hdev, "can't alloc device descriptor\n");
+ return -ENOMEM;
+ }
+ hid_set_drvdata(hdev, kovaplus);
+
+ retval = kovaplus_init_kovaplus_device_struct(usb_dev, kovaplus);
+ if (retval) {
+ hid_err(hdev, "couldn't init struct kovaplus_device\n");
+ goto exit_free;
+ }
+
+ retval = roccat_connect(kovaplus_class, hdev,
+ sizeof(struct kovaplus_roccat_report));
+ if (retval < 0) {
+ hid_err(hdev, "couldn't init char dev\n");
+ } else {
+ kovaplus->chrdev_minor = retval;
+ kovaplus->roccat_claimed = 1;
+ }
+
+ } else {
+ hid_set_drvdata(hdev, NULL);
+ }
+
+ return 0;
+exit_free:
+ kfree(kovaplus);
+ return retval;
+}
+
+static void kovaplus_remove_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct kovaplus_device *kovaplus;
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ == USB_INTERFACE_PROTOCOL_MOUSE) {
+ kovaplus = hid_get_drvdata(hdev);
+ if (kovaplus->roccat_claimed)
+ roccat_disconnect(kovaplus->chrdev_minor);
+ kfree(kovaplus);
+ }
+}
+
+static int kovaplus_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int retval;
+
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+ goto exit;
+ }
+
+ retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (retval) {
+ hid_err(hdev, "hw start failed\n");
+ goto exit;
+ }
+
+ retval = kovaplus_init_specials(hdev);
+ if (retval) {
+ hid_err(hdev, "couldn't install mouse\n");
+ goto exit_stop;
+ }
+
+ return 0;
+
+exit_stop:
+ hid_hw_stop(hdev);
+exit:
+ return retval;
+}
+
+static void kovaplus_remove(struct hid_device *hdev)
+{
+ kovaplus_remove_specials(hdev);
+ hid_hw_stop(hdev);
+}
+
+static void kovaplus_keep_values_up_to_date(struct kovaplus_device *kovaplus,
+ u8 const *data)
+{
+ struct kovaplus_mouse_report_button const *button_report;
+
+ if (data[0] != KOVAPLUS_MOUSE_REPORT_NUMBER_BUTTON)
+ return;
+
+ button_report = (struct kovaplus_mouse_report_button const *)data;
+
+ switch (button_report->type) {
+ case KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_PROFILE_1:
+ kovaplus_profile_activated(kovaplus, button_report->data1 - 1);
+ break;
+ case KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_CPI:
+ kovaplus->actual_cpi = kovaplus_convert_event_cpi(button_report->data1);
+ case KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_SENSITIVITY:
+ kovaplus->actual_x_sensitivity = button_report->data1;
+ kovaplus->actual_y_sensitivity = button_report->data2;
+ }
+}
+
+static void kovaplus_report_to_chrdev(struct kovaplus_device const *kovaplus,
+ u8 const *data)
+{
+ struct kovaplus_roccat_report roccat_report;
+ struct kovaplus_mouse_report_button const *button_report;
+
+ if (data[0] != KOVAPLUS_MOUSE_REPORT_NUMBER_BUTTON)
+ return;
+
+ button_report = (struct kovaplus_mouse_report_button const *)data;
+
+ if (button_report->type == KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_PROFILE_2)
+ return;
+
+ roccat_report.type = button_report->type;
+ roccat_report.profile = kovaplus->actual_profile + 1;
+
+ if (roccat_report.type == KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_MACRO ||
+ roccat_report.type == KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_SHORTCUT ||
+ roccat_report.type == KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_QUICKLAUNCH ||
+ roccat_report.type == KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_TIMER)
+ roccat_report.button = button_report->data1;
+ else
+ roccat_report.button = 0;
+
+ if (roccat_report.type == KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_CPI)
+ roccat_report.data1 = kovaplus_convert_event_cpi(button_report->data1);
+ else
+ roccat_report.data1 = button_report->data1;
+
+ roccat_report.data2 = button_report->data2;
+
+ roccat_report_event(kovaplus->chrdev_minor,
+ (uint8_t const *)&roccat_report);
+}
+
+static int kovaplus_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data, int size)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct kovaplus_device *kovaplus = hid_get_drvdata(hdev);
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ != USB_INTERFACE_PROTOCOL_MOUSE)
+ return 0;
+
+ kovaplus_keep_values_up_to_date(kovaplus, data);
+
+ if (kovaplus->roccat_claimed)
+ kovaplus_report_to_chrdev(kovaplus, data);
+
+ return 0;
+}
+
+static const struct hid_device_id kovaplus_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(hid, kovaplus_devices);
+
+static struct hid_driver kovaplus_driver = {
+ .name = "kovaplus",
+ .id_table = kovaplus_devices,
+ .probe = kovaplus_probe,
+ .remove = kovaplus_remove,
+ .raw_event = kovaplus_raw_event
+};
+
+static int __init kovaplus_init(void)
+{
+ int retval;
+
+ kovaplus_class = class_create(THIS_MODULE, "kovaplus");
+ if (IS_ERR(kovaplus_class))
+ return PTR_ERR(kovaplus_class);
+ kovaplus_class->dev_attrs = kovaplus_attributes;
+ kovaplus_class->dev_bin_attrs = kovaplus_bin_attributes;
+
+ retval = hid_register_driver(&kovaplus_driver);
+ if (retval)
+ class_destroy(kovaplus_class);
+ return retval;
+}
+
+static void __exit kovaplus_exit(void)
+{
+ hid_unregister_driver(&kovaplus_driver);
+ class_destroy(kovaplus_class);
+}
+
+module_init(kovaplus_init);
+module_exit(kovaplus_exit);
+
+MODULE_AUTHOR("Stefan Achatz");
+MODULE_DESCRIPTION("USB Roccat Kova[+] driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat-kovaplus.h b/drivers/hid/hid-roccat-kovaplus.h
new file mode 100644
index 000000000000..ce40607d21c7
--- /dev/null
+++ b/drivers/hid/hid-roccat-kovaplus.h
@@ -0,0 +1,157 @@
+#ifndef __HID_ROCCAT_KOVAPLUS_H
+#define __HID_ROCCAT_KOVAPLUS_H
+
+/*
+ * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/types.h>
+
+struct kovaplus_control {
+ uint8_t command; /* KOVAPLUS_COMMAND_CONTROL */
+ uint8_t value;
+ uint8_t request;
+} __packed;
+
+enum kovaplus_control_requests {
+ /* read after write; value = 1 */
+ KOVAPLUS_CONTROL_REQUEST_STATUS = 0x0,
+ /* write; value = profile number range 0-4 */
+ KOVAPLUS_CONTROL_REQUEST_PROFILE_SETTINGS = 0x10,
+ /* write; value = profile number range 0-4 */
+ KOVAPLUS_CONTROL_REQUEST_PROFILE_BUTTONS = 0x20,
+};
+
+enum kovaplus_control_values {
+ KOVAPLUS_CONTROL_REQUEST_STATUS_OVERLOAD = 0, /* supposed */
+ KOVAPLUS_CONTROL_REQUEST_STATUS_OK = 1,
+ KOVAPLUS_CONTROL_REQUEST_STATUS_WAIT = 3, /* supposed */
+};
+
+struct kovaplus_actual_profile {
+ uint8_t command; /* KOVAPLUS_COMMAND_ACTUAL_PROFILE */
+ uint8_t size; /* always 3 */
+ uint8_t actual_profile; /* Range 0-4! */
+} __packed;
+
+struct kovaplus_profile_settings {
+ uint8_t command; /* KOVAPLUS_COMMAND_PROFILE_SETTINGS */
+ uint8_t size; /* 16 */
+ uint8_t profile_index; /* range 0-4 */
+ uint8_t unknown1;
+ uint8_t sensitivity_x; /* range 1-10 */
+ uint8_t sensitivity_y; /* range 1-10 */
+ uint8_t cpi_levels_enabled;
+ uint8_t cpi_startup_level; /* range 1-4 */
+ uint8_t data[8];
+} __packed;
+
+struct kovaplus_profile_buttons {
+ uint8_t command; /* KOVAPLUS_COMMAND_PROFILE_BUTTONS */
+ uint8_t size; /* 23 */
+ uint8_t profile_index; /* range 0-4 */
+ uint8_t data[20];
+} __packed;
+
+struct kovaplus_info {
+ uint8_t command; /* KOVAPLUS_COMMAND_INFO */
+ uint8_t size; /* 6 */
+ uint8_t firmware_version;
+ uint8_t unknown[3];
+} __packed;
+
+/* writes 1 on plugin */
+struct kovaplus_a {
+ uint8_t command; /* KOVAPLUS_COMMAND_A */
+ uint8_t size; /* 3 */
+ uint8_t unknown;
+} __packed;
+
+enum kovaplus_commands {
+ KOVAPLUS_COMMAND_CONTROL = 0x4,
+ KOVAPLUS_COMMAND_ACTUAL_PROFILE = 0x5,
+ KOVAPLUS_COMMAND_PROFILE_SETTINGS = 0x6,
+ KOVAPLUS_COMMAND_PROFILE_BUTTONS = 0x7,
+ KOVAPLUS_COMMAND_INFO = 0x9,
+ KOVAPLUS_COMMAND_A = 0xa,
+};
+
+enum kovaplus_usb_commands {
+ KOVAPLUS_USB_COMMAND_CONTROL = 0x304,
+ KOVAPLUS_USB_COMMAND_ACTUAL_PROFILE = 0x305,
+ KOVAPLUS_USB_COMMAND_PROFILE_SETTINGS = 0x306,
+ KOVAPLUS_USB_COMMAND_PROFILE_BUTTONS = 0x307,
+ KOVAPLUS_USB_COMMAND_INFO = 0x309,
+ KOVAPLUS_USB_COMMAND_A = 0x30a,
+};
+
+enum kovaplus_mouse_report_numbers {
+ KOVAPLUS_MOUSE_REPORT_NUMBER_MOUSE = 1,
+ KOVAPLUS_MOUSE_REPORT_NUMBER_AUDIO = 2,
+ KOVAPLUS_MOUSE_REPORT_NUMBER_BUTTON = 3,
+ KOVAPLUS_MOUSE_REPORT_NUMBER_KBD = 4,
+};
+
+struct kovaplus_mouse_report_button {
+ uint8_t report_number; /* KOVAPLUS_MOUSE_REPORT_NUMBER_BUTTON */
+ uint8_t unknown1;
+ uint8_t type;
+ uint8_t data1;
+ uint8_t data2;
+} __packed;
+
+enum kovaplus_mouse_report_button_types {
+ /* data1 = profile_number range 1-5; no release event */
+ KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_PROFILE_1 = 0x20,
+ /* data1 = profile_number range 1-5; no release event */
+ KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_PROFILE_2 = 0x30,
+ /* data1 = button_number range 1-18; data2 = action */
+ KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_MACRO = 0x40,
+ /* data1 = button_number range 1-18; data2 = action */
+ KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_SHORTCUT = 0x50,
+ /* data1 = button_number range 1-18; data2 = action */
+ KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_QUICKLAUNCH = 0x60,
+ /* data1 = button_number range 1-18; data2 = action */
+ KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_TIMER = 0x80,
+ /* data1 = 1 = 400, 2 = 800, 4 = 1600, 7 = 3200; no release event */
+ KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_CPI = 0xb0,
+ /* data1 + data2 = sense range 1-10; no release event */
+ KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_SENSITIVITY = 0xc0,
+ /* data1 = type as in profile_buttons; data2 = action */
+ KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_MULTIMEDIA = 0xf0,
+};
+
+enum kovaplus_mouse_report_button_actions {
+ KOVAPLUS_MOUSE_REPORT_BUTTON_ACTION_PRESS = 0,
+ KOVAPLUS_MOUSE_REPORT_BUTTON_ACTION_RELEASE = 1,
+};
+
+struct kovaplus_roccat_report {
+ uint8_t type;
+ uint8_t profile;
+ uint8_t button;
+ uint8_t data1;
+ uint8_t data2;
+} __packed;
+
+struct kovaplus_device {
+ int actual_profile;
+ int actual_cpi;
+ int actual_x_sensitivity;
+ int actual_y_sensitivity;
+ int roccat_claimed;
+ int chrdev_minor;
+ struct mutex kovaplus_lock;
+ struct kovaplus_info info;
+ struct kovaplus_profile_settings profile_settings[5];
+ struct kovaplus_profile_buttons profile_buttons[5];
+};
+
+#endif
diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
index 02c58e015bee..160f481344f6 100644
--- a/drivers/hid/hid-roccat-pyra.c
+++ b/drivers/hid/hid-roccat-pyra.c
@@ -20,11 +20,11 @@
#include <linux/device.h>
#include <linux/input.h>
#include <linux/hid.h>
-#include <linux/usb.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/hid-roccat.h>
#include "hid-ids.h"
-#include "hid-roccat.h"
+#include "hid-roccat-common.h"
#include "hid-roccat-pyra.h"
static uint profile_numbers[5] = {0, 1, 2, 3, 4};
@@ -42,7 +42,6 @@ static void profile_activated(struct pyra_device *pyra,
static int pyra_send_control(struct usb_device *usb_dev, int value,
enum pyra_control_requests request)
{
- int len;
struct pyra_control control;
if ((request == PYRA_CONTROL_REQUEST_PROFILE_SETTINGS ||
@@ -54,47 +53,31 @@ static int pyra_send_control(struct usb_device *usb_dev, int value,
control.value = value;
control.request = request;
- len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
- USB_REQ_SET_CONFIGURATION,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
- PYRA_USB_COMMAND_CONTROL, 0, (char *)&control,
- sizeof(struct pyra_control),
- USB_CTRL_SET_TIMEOUT);
-
- if (len != sizeof(struct pyra_control))
- return len;
-
- return 0;
+ return roccat_common_send(usb_dev, PYRA_USB_COMMAND_CONTROL,
+ &control, sizeof(struct pyra_control));
}
static int pyra_receive_control_status(struct usb_device *usb_dev)
{
- int len;
+ int retval;
struct pyra_control control;
do {
msleep(10);
-
- len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
- USB_REQ_CLEAR_FEATURE,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE |
- USB_DIR_IN,
- PYRA_USB_COMMAND_CONTROL, 0, (char *)&control,
- sizeof(struct pyra_control),
- USB_CTRL_SET_TIMEOUT);
+ retval = roccat_common_receive(usb_dev, PYRA_USB_COMMAND_CONTROL,
+ &control, sizeof(struct pyra_control));
/* requested too early, try again */
- } while (len == -EPROTO);
+ } while (retval == -EPROTO);
- if (len == sizeof(struct pyra_control) &&
- control.command == PYRA_COMMAND_CONTROL &&
+ if (!retval && control.command == PYRA_COMMAND_CONTROL &&
control.request == PYRA_CONTROL_REQUEST_STATUS &&
control.value == 1)
- return 0;
+ return 0;
else {
hid_err(usb_dev, "receive control status: unknown response 0x%x 0x%x\n",
control.request, control.value);
- return -EINVAL;
+ return retval ? retval : -EINVAL;
}
}
@@ -102,125 +85,72 @@ static int pyra_get_profile_settings(struct usb_device *usb_dev,
struct pyra_profile_settings *buf, int number)
{
int retval;
-
retval = pyra_send_control(usb_dev, number,
PYRA_CONTROL_REQUEST_PROFILE_SETTINGS);
-
if (retval)
return retval;
-
- retval = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
- USB_REQ_CLEAR_FEATURE,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
- PYRA_USB_COMMAND_PROFILE_SETTINGS, 0, (char *)buf,
- sizeof(struct pyra_profile_settings),
- USB_CTRL_SET_TIMEOUT);
-
- if (retval != sizeof(struct pyra_profile_settings))
- return retval;
-
- return 0;
+ return roccat_common_receive(usb_dev, PYRA_USB_COMMAND_PROFILE_SETTINGS,
+ buf, sizeof(struct pyra_profile_settings));
}
static int pyra_get_profile_buttons(struct usb_device *usb_dev,
struct pyra_profile_buttons *buf, int number)
{
int retval;
-
retval = pyra_send_control(usb_dev, number,
PYRA_CONTROL_REQUEST_PROFILE_BUTTONS);
-
if (retval)
return retval;
-
- retval = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
- USB_REQ_CLEAR_FEATURE,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
- PYRA_USB_COMMAND_PROFILE_BUTTONS, 0, (char *)buf,
- sizeof(struct pyra_profile_buttons),
- USB_CTRL_SET_TIMEOUT);
-
- if (retval != sizeof(struct pyra_profile_buttons))
- return retval;
-
- return 0;
+ return roccat_common_receive(usb_dev, PYRA_USB_COMMAND_PROFILE_BUTTONS,
+ buf, sizeof(struct pyra_profile_buttons));
}
static int pyra_get_settings(struct usb_device *usb_dev,
struct pyra_settings *buf)
{
- int len;
- len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
- USB_REQ_CLEAR_FEATURE,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
- PYRA_USB_COMMAND_SETTINGS, 0, buf,
- sizeof(struct pyra_settings), USB_CTRL_SET_TIMEOUT);
- if (len != sizeof(struct pyra_settings))
- return -EIO;
- return 0;
+ return roccat_common_receive(usb_dev, PYRA_USB_COMMAND_SETTINGS,
+ buf, sizeof(struct pyra_settings));
}
static int pyra_get_info(struct usb_device *usb_dev, struct pyra_info *buf)
{
- int len;
- len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
- USB_REQ_CLEAR_FEATURE,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
- PYRA_USB_COMMAND_INFO, 0, buf,
- sizeof(struct pyra_info), USB_CTRL_SET_TIMEOUT);
- if (len != sizeof(struct pyra_info))
- return -EIO;
- return 0;
+ return roccat_common_receive(usb_dev, PYRA_USB_COMMAND_INFO,
+ buf, sizeof(struct pyra_info));
+}
+
+static int pyra_send(struct usb_device *usb_dev, uint command,
+ void const *buf, uint size)
+{
+ int retval;
+ retval = roccat_common_send(usb_dev, command, buf, size);
+ if (retval)
+ return retval;
+ return pyra_receive_control_status(usb_dev);
}
static int pyra_set_profile_settings(struct usb_device *usb_dev,
struct pyra_profile_settings const *settings)
{
- int len;
- len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
- USB_REQ_SET_CONFIGURATION,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
- PYRA_USB_COMMAND_PROFILE_SETTINGS, 0, (char *)settings,
- sizeof(struct pyra_profile_settings),
- USB_CTRL_SET_TIMEOUT);
- if (len != sizeof(struct pyra_profile_settings))
- return -EIO;
- if (pyra_receive_control_status(usb_dev))
- return -EIO;
- return 0;
+ return pyra_send(usb_dev, PYRA_USB_COMMAND_PROFILE_SETTINGS, settings,
+ sizeof(struct pyra_profile_settings));
}
static int pyra_set_profile_buttons(struct usb_device *usb_dev,
struct pyra_profile_buttons const *buttons)
{
- int len;
- len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
- USB_REQ_SET_CONFIGURATION,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
- PYRA_USB_COMMAND_PROFILE_BUTTONS, 0, (char *)buttons,
- sizeof(struct pyra_profile_buttons),
- USB_CTRL_SET_TIMEOUT);
- if (len != sizeof(struct pyra_profile_buttons))
- return -EIO;
- if (pyra_receive_control_status(usb_dev))
- return -EIO;
- return 0;
+ return pyra_send(usb_dev, PYRA_USB_COMMAND_PROFILE_BUTTONS, buttons,
+ sizeof(struct pyra_profile_buttons));
}
static int pyra_set_settings(struct usb_device *usb_dev,
struct pyra_settings const *settings)
{
- int len;
- len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
- USB_REQ_SET_CONFIGURATION,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
- PYRA_USB_COMMAND_SETTINGS, 0, (char *)settings,
- sizeof(struct pyra_settings), USB_CTRL_SET_TIMEOUT);
- if (len != sizeof(struct pyra_settings))
- return -EIO;
- if (pyra_receive_control_status(usb_dev))
- return -EIO;
- return 0;
+ int retval;
+ retval = roccat_common_send(usb_dev, PYRA_USB_COMMAND_SETTINGS, settings,
+ sizeof(struct pyra_settings));
+ if (retval)
+ return retval;
+ return pyra_receive_control_status(usb_dev);
}
static ssize_t pyra_sysfs_read_profilex_settings(struct file *fp,
@@ -521,21 +451,16 @@ static struct bin_attribute pyra_bin_attributes[] = {
static int pyra_init_pyra_device_struct(struct usb_device *usb_dev,
struct pyra_device *pyra)
{
- struct pyra_info *info;
+ struct pyra_info info;
int retval, i;
mutex_init(&pyra->pyra_lock);
- info = kmalloc(sizeof(struct pyra_info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
- retval = pyra_get_info(usb_dev, info);
- if (retval) {
- kfree(info);
+ retval = pyra_get_info(usb_dev, &info);
+ if (retval)
return retval;
- }
- pyra->firmware_version = info->firmware_version;
- kfree(info);
+
+ pyra->firmware_version = info.firmware_version;
retval = pyra_get_settings(usb_dev, &pyra->settings);
if (retval)
@@ -581,7 +506,8 @@ static int pyra_init_specials(struct hid_device *hdev)
goto exit_free;
}
- retval = roccat_connect(pyra_class, hdev);
+ retval = roccat_connect(pyra_class, hdev,
+ sizeof(struct pyra_roccat_report));
if (retval < 0) {
hid_err(hdev, "couldn't init char dev\n");
} else {
@@ -685,8 +611,7 @@ static void pyra_report_to_chrdev(struct pyra_device const *pyra,
roccat_report.value = button_event->data1;
roccat_report.key = 0;
roccat_report_event(pyra->chrdev_minor,
- (uint8_t const *)&roccat_report,
- sizeof(struct pyra_roccat_report));
+ (uint8_t const *)&roccat_report);
break;
case PYRA_MOUSE_EVENT_BUTTON_TYPE_MACRO:
case PYRA_MOUSE_EVENT_BUTTON_TYPE_SHORTCUT:
@@ -700,8 +625,7 @@ static void pyra_report_to_chrdev(struct pyra_device const *pyra,
*/
roccat_report.value = pyra->actual_profile + 1;
roccat_report_event(pyra->chrdev_minor,
- (uint8_t const *)&roccat_report,
- sizeof(struct pyra_roccat_report));
+ (uint8_t const *)&roccat_report);
}
break;
}
@@ -761,8 +685,8 @@ static int __init pyra_init(void)
static void __exit pyra_exit(void)
{
- class_destroy(pyra_class);
hid_unregister_driver(&pyra_driver);
+ class_destroy(pyra_class);
}
module_init(pyra_init);
diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c
index a14c579ea781..5666e7587b18 100644
--- a/drivers/hid/hid-roccat.c
+++ b/drivers/hid/hid-roccat.c
@@ -26,8 +26,7 @@
#include <linux/cdev.h>
#include <linux/poll.h>
#include <linux/sched.h>
-
-#include "hid-roccat.h"
+#include <linux/hid-roccat.h>
#define ROCCAT_FIRST_MINOR 0
#define ROCCAT_MAX_DEVICES 8
@@ -37,11 +36,11 @@
struct roccat_report {
uint8_t *value;
- int len;
};
struct roccat_device {
unsigned int minor;
+ int report_size;
int open;
int exist;
wait_queue_head_t wait;
@@ -123,7 +122,7 @@ static ssize_t roccat_read(struct file *file, char __user *buffer,
* If report is larger than requested amount of data, rest of report
* is lost!
*/
- len = report->len > count ? count : report->len;
+ len = device->report_size > count ? count : device->report_size;
if (copy_to_user(buffer, report->value, len)) {
retval = -EFAULT;
@@ -248,26 +247,25 @@ static int roccat_release(struct inode *inode, struct file *file)
*
* This is called from interrupt handler.
*/
-int roccat_report_event(int minor, u8 const *data, int len)
+int roccat_report_event(int minor, u8 const *data)
{
struct roccat_device *device;
struct roccat_reader *reader;
struct roccat_report *report;
uint8_t *new_value;
- new_value = kmemdup(data, len, GFP_ATOMIC);
+ device = devices[minor];
+
+ new_value = kmemdup(data, device->report_size, GFP_ATOMIC);
if (!new_value)
return -ENOMEM;
- device = devices[minor];
-
report = &device->cbuf[device->cbuf_end];
/* passing NULL is safe */
kfree(report->value);
report->value = new_value;
- report->len = len;
device->cbuf_end = (device->cbuf_end + 1) % ROCCAT_CBUF_SIZE;
list_for_each_entry(reader, &device->readers, node) {
@@ -295,7 +293,7 @@ EXPORT_SYMBOL_GPL(roccat_report_event);
* Return value is minor device number in Range [0, ROCCAT_MAX_DEVICES] on
* success, a negative error code on failure.
*/
-int roccat_connect(struct class *klass, struct hid_device *hid)
+int roccat_connect(struct class *klass, struct hid_device *hid, int report_size)
{
unsigned int minor;
struct roccat_device *device;
@@ -343,6 +341,7 @@ int roccat_connect(struct class *klass, struct hid_device *hid)
device->hid = hid;
device->exist = 1;
device->cbuf_end = 0;
+ device->report_size = report_size;
return minor;
}
@@ -357,13 +356,16 @@ void roccat_disconnect(int minor)
mutex_lock(&devices_lock);
device = devices[minor];
- devices[minor] = NULL;
mutex_unlock(&devices_lock);
device->exist = 0; /* TODO exist maybe not needed */
device_destroy(device->dev->class, MKDEV(roccat_major, minor));
+ mutex_lock(&devices_lock);
+ devices[minor] = NULL;
+ mutex_unlock(&devices_lock);
+
if (device->open) {
hid_hw_close(device->hid);
wake_up_interruptible(&device->wait);
@@ -373,6 +375,34 @@ void roccat_disconnect(int minor)
}
EXPORT_SYMBOL_GPL(roccat_disconnect);
+static long roccat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct roccat_device *device;
+ unsigned int minor = iminor(inode);
+ long retval = 0;
+
+ mutex_lock(&devices_lock);
+
+ device = devices[minor];
+ if (!device) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ switch (cmd) {
+ case ROCCATIOCGREPSIZE:
+ if (put_user(device->report_size, (int __user *)arg))
+ retval = -EFAULT;
+ break;
+ default:
+ retval = -ENOTTY;
+ }
+out:
+ mutex_unlock(&devices_lock);
+ return retval;
+}
+
static const struct file_operations roccat_ops = {
.owner = THIS_MODULE,
.read = roccat_read,
@@ -380,6 +410,7 @@ static const struct file_operations roccat_ops = {
.open = roccat_open,
.release = roccat_release,
.llseek = noop_llseek,
+ .unlocked_ioctl = roccat_ioctl,
};
static int __init roccat_init(void)
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 68d7b36e31e4..93819a08121a 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -46,6 +46,16 @@ static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
return rdesc;
}
+/*
+ * The Sony Sixaxis does not handle HID Output Reports on the Interrupt EP
+ * like it should according to usbhid/hid-core.c::usbhid_output_raw_report()
+ * so we need to override that forcing HID Output Reports on the Control EP.
+ *
+ * There is also another issue about HID Output Reports via USB, the Sixaxis
+ * does not want the report_id as part of the data packet, so we have to
+ * discard buf[0] when sending the actual control message, even for numbered
+ * reports, humpf!
+ */
static int sixaxis_usb_output_raw_report(struct hid_device *hid, __u8 *buf,
size_t count, unsigned char report_type)
{
@@ -55,6 +65,12 @@ static int sixaxis_usb_output_raw_report(struct hid_device *hid, __u8 *buf,
int report_id = buf[0];
int ret;
+ if (report_type == HID_OUTPUT_REPORT) {
+ /* Don't send the Report ID */
+ buf++;
+ count--;
+ }
+
ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
HID_REQ_SET_REPORT,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
@@ -62,6 +78,10 @@ static int sixaxis_usb_output_raw_report(struct hid_device *hid, __u8 *buf,
interface->desc.bInterfaceNumber, buf, count,
USB_CTRL_SET_TIMEOUT);
+ /* Count also the Report ID, in case of an Output report. */
+ if (ret > 0 && report_type == HID_OUTPUT_REPORT)
+ ret++;
+
return ret;
}
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 468e87b53ed2..54409cba018c 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -91,7 +91,7 @@ static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count,
ret = -EFAULT;
goto out;
}
- ret += len;
+ ret = len;
kfree(list->buffer[list->tail].value);
list->tail = (list->tail + 1) & (HIDRAW_BUFFER_SIZE - 1);
@@ -102,15 +102,14 @@ out:
}
/* the first byte is expected to be a report number */
-static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
+/* This function is to be called with the minors_lock mutex held */
+static ssize_t hidraw_send_report(struct file *file, const char __user *buffer, size_t count, unsigned char report_type)
{
unsigned int minor = iminor(file->f_path.dentry->d_inode);
struct hid_device *dev;
__u8 *buf;
int ret = 0;
- mutex_lock(&minors_lock);
-
if (!hidraw_table[minor]) {
ret = -ENODEV;
goto out;
@@ -148,14 +147,92 @@ static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t
goto out_free;
}
- ret = dev->hid_output_raw_report(dev, buf, count, HID_OUTPUT_REPORT);
+ ret = dev->hid_output_raw_report(dev, buf, count, report_type);
out_free:
kfree(buf);
out:
+ return ret;
+}
+
+/* the first byte is expected to be a report number */
+static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
+{
+ ssize_t ret;
+ mutex_lock(&minors_lock);
+ ret = hidraw_send_report(file, buffer, count, HID_OUTPUT_REPORT);
mutex_unlock(&minors_lock);
return ret;
}
+
+/* This function performs a Get_Report transfer over the control endpoint
+ per section 7.2.1 of the HID specification, version 1.1. The first byte
+ of buffer is the report number to request, or 0x0 if the defice does not
+ use numbered reports. The report_type parameter can be HID_FEATURE_REPORT
+ or HID_INPUT_REPORT. This function is to be called with the minors_lock
+ mutex held. */
+static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t count, unsigned char report_type)
+{
+ unsigned int minor = iminor(file->f_path.dentry->d_inode);
+ struct hid_device *dev;
+ __u8 *buf;
+ int ret = 0, len;
+ unsigned char report_number;
+
+ dev = hidraw_table[minor]->hid;
+
+ if (!dev->hid_get_raw_report) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (count > HID_MAX_BUFFER_SIZE) {
+ printk(KERN_WARNING "hidraw: pid %d passed too large report\n",
+ task_pid_nr(current));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (count < 2) {
+ printk(KERN_WARNING "hidraw: pid %d passed too short report\n",
+ task_pid_nr(current));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ buf = kmalloc(count * sizeof(__u8), GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Read the first byte from the user. This is the report number,
+ which is passed to dev->hid_get_raw_report(). */
+ if (copy_from_user(&report_number, buffer, 1)) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+
+ ret = dev->hid_get_raw_report(dev, report_number, buf, count, report_type);
+
+ if (ret < 0)
+ goto out_free;
+
+ len = (ret < count) ? ret : count;
+
+ if (copy_to_user(buffer, buf, len)) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+
+ ret = len;
+
+out_free:
+ kfree(buf);
+out:
+ return ret;
+}
+
static unsigned int hidraw_poll(struct file *file, poll_table *wait)
{
struct hidraw_list *list = file->private_data;
@@ -295,7 +372,24 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
default:
{
struct hid_device *hid = dev->hid;
- if (_IOC_TYPE(cmd) != 'H' || _IOC_DIR(cmd) != _IOC_READ) {
+ if (_IOC_TYPE(cmd) != 'H') {
+ ret = -EINVAL;
+ break;
+ }
+
+ if (_IOC_NR(cmd) == _IOC_NR(HIDIOCSFEATURE(0))) {
+ int len = _IOC_SIZE(cmd);
+ ret = hidraw_send_report(file, user_arg, len, HID_FEATURE_REPORT);
+ break;
+ }
+ if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGFEATURE(0))) {
+ int len = _IOC_SIZE(cmd);
+ ret = hidraw_get_report(file, user_arg, len, HID_FEATURE_REPORT);
+ break;
+ }
+
+ /* Begin Read-only ioctls. */
+ if (_IOC_DIR(cmd) != _IOC_READ) {
ret = -EINVAL;
break;
}
@@ -327,7 +421,7 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
-EFAULT : len;
break;
}
- }
+ }
ret = -ENOTTY;
}
@@ -428,12 +522,12 @@ void hidraw_disconnect(struct hid_device *hid)
hidraw->exist = 0;
+ device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
+
mutex_lock(&minors_lock);
hidraw_table[hidraw->minor] = NULL;
mutex_unlock(&minors_lock);
- device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
-
if (hidraw->open) {
hid_hw_close(hid);
wake_up_interruptible(&hidraw->wait);
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index b336dd84036f..38c261a40c74 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -799,6 +799,40 @@ static int hid_alloc_buffers(struct usb_device *dev, struct hid_device *hid)
return 0;
}
+static int usbhid_get_raw_report(struct hid_device *hid,
+ unsigned char report_number, __u8 *buf, size_t count,
+ unsigned char report_type)
+{
+ struct usbhid_device *usbhid = hid->driver_data;
+ struct usb_device *dev = hid_to_usb_dev(hid);
+ struct usb_interface *intf = usbhid->intf;
+ struct usb_host_interface *interface = intf->cur_altsetting;
+ int skipped_report_id = 0;
+ int ret;
+
+ /* Byte 0 is the report number. Report data starts at byte 1.*/
+ buf[0] = report_number;
+ if (report_number == 0x0) {
+ /* Offset the return buffer by 1, so that the report ID
+ will remain in byte 0. */
+ buf++;
+ count--;
+ skipped_report_id = 1;
+ }
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ HID_REQ_GET_REPORT,
+ USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ ((report_type + 1) << 8) | report_number,
+ interface->desc.bInterfaceNumber, buf, count,
+ USB_CTRL_SET_TIMEOUT);
+
+ /* count also the report id */
+ if (ret > 0 && skipped_report_id)
+ ret++;
+
+ return ret;
+}
+
static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t count,
unsigned char report_type)
{
@@ -1139,6 +1173,7 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
usb_set_intfdata(intf, hid);
hid->ll_driver = &usb_hid_driver;
+ hid->hid_get_raw_report = usbhid_get_raw_report;
hid->hid_output_raw_report = usbhid_output_raw_report;
hid->ff_init = hid_pidff_init;
#ifdef CONFIG_USB_HIDDEV
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 297bc9a7d6e6..80de4df2aeaa 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -467,6 +467,17 @@ config SENSORS_JC42
This driver can also be built as a module. If so, the module
will be called jc42.
+config SENSORS_LINEAGE
+ tristate "Lineage Compact Power Line Power Entry Module"
+ depends on I2C && EXPERIMENTAL
+ help
+ If you say yes here you get support for the Lineage Compact Power Line
+ series of DC/DC and AC/DC converters such as CP1800, CP2000AC,
+ CP2000DC, CP2725, and others.
+
+ This driver can also be built as a module. If so, the module
+ will be called lineage-pem.
+
config SENSORS_LM63
tristate "National Semiconductor LM63 and LM64"
depends on I2C
@@ -510,7 +521,7 @@ config SENSORS_LM75
- Dallas Semiconductor DS75 and DS1775
- Maxim MAX6625 and MAX6626
- Microchip MCP980x
- - National Semiconductor LM75
+ - National Semiconductor LM75, LM75A
- NXP's LM75A
- ST Microelectronics STDS75
- TelCom (now Microchip) TCN75
@@ -685,6 +696,16 @@ config SENSORS_MAX1619
This driver can also be built as a module. If so, the module
will be called max1619.
+config SENSORS_MAX6639
+ tristate "Maxim MAX6639 sensor chip"
+ depends on I2C && EXPERIMENTAL
+ help
+ If you say yes here you get support for the MAX6639
+ sensor chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called max6639.
+
config SENSORS_MAX6650
tristate "Maxim MAX6650 sensor chip"
depends on I2C && EXPERIMENTAL
@@ -735,6 +756,61 @@ config SENSORS_PCF8591
These devices are hard to detect and rarely found on mainstream
hardware. If unsure, say N.
+config PMBUS
+ tristate "PMBus support"
+ depends on I2C && EXPERIMENTAL
+ default n
+ help
+ Say yes here if you want to enable PMBus support.
+
+ This driver can also be built as a module. If so, the module will
+ be called pmbus_core.
+
+if PMBUS
+
+config SENSORS_PMBUS
+ tristate "Generic PMBus devices"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for generic
+ PMBus devices, including but not limited to BMR450, BMR451, BMR453,
+ BMR454, and LTC2978.
+
+ This driver can also be built as a module. If so, the module will
+ be called pmbus.
+
+config SENSORS_MAX16064
+ tristate "Maxim MAX16064"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for Maxim
+ MAX16064.
+
+ This driver can also be built as a module. If so, the module will
+ be called max16064.
+
+config SENSORS_MAX34440
+ tristate "Maxim MAX34440/MAX34441"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for Maxim
+ MAX34440 and MAX34441.
+
+ This driver can also be built as a module. If so, the module will
+ be called max34440.
+
+config SENSORS_MAX8688
+ tristate "Maxim MAX8688"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for Maxim
+ MAX8688.
+
+ This driver can also be built as a module. If so, the module will
+ be called max8688.
+
+endif # PMBUS
+
config SENSORS_SHT15
tristate "Sensiron humidity and temperature sensors. SHT15 and compat."
depends on GENERIC_GPIO
@@ -872,6 +948,16 @@ config SENSORS_SMSC47B397
This driver can also be built as a module. If so, the module
will be called smsc47b397.
+config SENSORS_ADS1015
+ tristate "Texas Instruments ADS1015"
+ depends on I2C
+ help
+ If you say yes here you get support for Texas Instruments ADS1015
+ 12-bit 4-input ADC device.
+
+ This driver can also be built as a module. If so, the module
+ will be called ads1015.
+
config SENSORS_ADS7828
tristate "Texas Instruments ADS7828"
depends on I2C
@@ -941,6 +1027,16 @@ config SENSORS_TMP421
This driver can also be built as a module. If so, the module
will be called tmp421.
+config SENSORS_TWL4030_MADC
+ tristate "Texas Instruments TWL4030 MADC Hwmon"
+ depends on TWL4030_MADC
+ help
+ If you say yes here you get hwmon support for triton
+ TWL4030-MADC.
+
+ This driver can also be built as a module. If so it will be called
+ twl4030-madc-hwmon.
+
config SENSORS_VIA_CPUTEMP
tristate "VIA CPU temperature sensor"
depends on X86
@@ -1127,40 +1223,6 @@ config SENSORS_ULTRA45
This driver provides support for the Ultra45 workstation environmental
sensors.
-config SENSORS_LIS3_SPI
- tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer (SPI)"
- depends on !ACPI && SPI_MASTER && INPUT
- select INPUT_POLLDEV
- default n
- help
- This driver provides support for the LIS3LV02Dx accelerometer connected
- via SPI. The accelerometer data is readable via
- /sys/devices/platform/lis3lv02d.
-
- This driver also provides an absolute input class device, allowing
- the laptop to act as a pinball machine-esque joystick.
-
- This driver can also be built as modules. If so, the core module
- will be called lis3lv02d and a specific module for the SPI transport
- is called lis3lv02d_spi.
-
-config SENSORS_LIS3_I2C
- tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer (I2C)"
- depends on I2C && INPUT
- select INPUT_POLLDEV
- default n
- help
- This driver provides support for the LIS3LV02Dx accelerometer connected
- via I2C. The accelerometer data is readable via
- /sys/devices/platform/lis3lv02d.
-
- This driver also provides an absolute input class device, allowing
- the device to act as a pinball machine-esque joystick.
-
- This driver can also be built as modules. If so, the core module
- will be called lis3lv02d and a specific module for the I2C transport
- is called lis3lv02d_i2c.
-
config SENSORS_APPLESMC
tristate "Apple SMC (Motion sensor, light sensor, keyboard backlight)"
depends on INPUT && X86
@@ -1208,36 +1270,6 @@ config SENSORS_ATK0110
This driver can also be built as a module. If so, the module
will be called asus_atk0110.
-config SENSORS_LIS3LV02D
- tristate "STMicroeletronics LIS3* three-axis digital accelerometer"
- depends on INPUT
- select INPUT_POLLDEV
- select NEW_LEDS
- select LEDS_CLASS
- default n
- help
- This driver provides support for the LIS3* accelerometers, such as the
- LIS3LV02DL or the LIS331DL. In particular, it can be found in a number
- of HP laptops, which have the "Mobile Data Protection System 3D" or
- "3D DriveGuard" feature. On such systems the driver should load
- automatically (via ACPI alias). The accelerometer might also be found
- in other systems, connected via SPI or I2C. The accelerometer data is
- readable via /sys/devices/platform/lis3lv02d.
-
- This driver also provides an absolute input class device, allowing
- a laptop to act as a pinball machine-esque joystick. It provides also
- a misc device which can be used to detect free-fall. On HP laptops,
- if the led infrastructure is activated, support for a led indicating
- disk protection will be provided as hp::hddprotect. For more
- information on the feature, refer to Documentation/hwmon/lis3lv02d.
-
- This driver can also be built as modules. If so, the core module
- will be called lis3lv02d and a specific module for HP laptops will be
- called hp_accel.
-
- Say Y here if you have an applicable laptop and want to experience
- the awesome power of lis3lv02d.
-
endif # ACPI
endif # HWMON
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index dde02d99c238..91e145c57b7b 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_SENSORS_ADM1026) += adm1026.o
obj-$(CONFIG_SENSORS_ADM1029) += adm1029.o
obj-$(CONFIG_SENSORS_ADM1031) += adm1031.o
obj-$(CONFIG_SENSORS_ADM9240) += adm9240.o
+obj-$(CONFIG_SENSORS_ADS1015) += ads1015.o
obj-$(CONFIG_SENSORS_ADS7828) += ads7828.o
obj-$(CONFIG_SENSORS_ADS7871) += ads7871.o
obj-$(CONFIG_SENSORS_ADT7411) += adt7411.o
@@ -62,9 +63,7 @@ obj-$(CONFIG_SENSORS_JC42) += jc42.o
obj-$(CONFIG_SENSORS_JZ4740) += jz4740-hwmon.o
obj-$(CONFIG_SENSORS_K8TEMP) += k8temp.o
obj-$(CONFIG_SENSORS_K10TEMP) += k10temp.o
-obj-$(CONFIG_SENSORS_LIS3LV02D) += lis3lv02d.o hp_accel.o
-obj-$(CONFIG_SENSORS_LIS3_SPI) += lis3lv02d.o lis3lv02d_spi.o
-obj-$(CONFIG_SENSORS_LIS3_I2C) += lis3lv02d.o lis3lv02d_i2c.o
+obj-$(CONFIG_SENSORS_LINEAGE) += lineage-pem.o
obj-$(CONFIG_SENSORS_LM63) += lm63.o
obj-$(CONFIG_SENSORS_LM70) += lm70.o
obj-$(CONFIG_SENSORS_LM73) += lm73.o
@@ -84,6 +83,7 @@ obj-$(CONFIG_SENSORS_LTC4245) += ltc4245.o
obj-$(CONFIG_SENSORS_LTC4261) += ltc4261.o
obj-$(CONFIG_SENSORS_MAX1111) += max1111.o
obj-$(CONFIG_SENSORS_MAX1619) += max1619.o
+obj-$(CONFIG_SENSORS_MAX6639) += max6639.o
obj-$(CONFIG_SENSORS_MAX6650) += max6650.o
obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o
obj-$(CONFIG_SENSORS_PC87360) += pc87360.o
@@ -102,6 +102,7 @@ obj-$(CONFIG_SENSORS_THMC50) += thmc50.o
obj-$(CONFIG_SENSORS_TMP102) += tmp102.o
obj-$(CONFIG_SENSORS_TMP401) += tmp401.o
obj-$(CONFIG_SENSORS_TMP421) += tmp421.o
+obj-$(CONFIG_SENSORS_TWL4030_MADC)+= twl4030-madc-hwmon.o
obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o
obj-$(CONFIG_SENSORS_VIA686A) += via686a.o
obj-$(CONFIG_SENSORS_VT1211) += vt1211.o
@@ -112,6 +113,13 @@ obj-$(CONFIG_SENSORS_W83L786NG) += w83l786ng.o
obj-$(CONFIG_SENSORS_WM831X) += wm831x-hwmon.o
obj-$(CONFIG_SENSORS_WM8350) += wm8350-hwmon.o
+# PMBus drivers
+obj-$(CONFIG_PMBUS) += pmbus_core.o
+obj-$(CONFIG_SENSORS_PMBUS) += pmbus.o
+obj-$(CONFIG_SENSORS_MAX16064) += max16064.o
+obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
+obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
+
ifeq ($(CONFIG_HWMON_DEBUG_CHIP),y)
EXTRA_CFLAGS += -DDEBUG
endif
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c
index 86d822aa9bbf..d46c0c758ddf 100644
--- a/drivers/hwmon/ad7414.c
+++ b/drivers/hwmon/ad7414.c
@@ -242,6 +242,7 @@ static const struct i2c_device_id ad7414_id[] = {
{ "ad7414", 0 },
{}
};
+MODULE_DEVICE_TABLE(i2c, ad7414_id);
static struct i2c_driver ad7414_driver = {
.driver = {
diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
new file mode 100644
index 000000000000..66257c3c7c4a
--- /dev/null
+++ b/drivers/hwmon/ads1015.c
@@ -0,0 +1,282 @@
+/*
+ * ads1015.c - lm_sensors driver for ads1015 12-bit 4-input ADC
+ * (C) Copyright 2010
+ * Dirk Eibach, Guntermann & Drunck GmbH <eibach@gdsys.de>
+ *
+ * Based on the ads7828 driver by Steve Hardy.
+ *
+ * Datasheet available at: http://focus.ti.com/lit/ds/symlink/ads1015.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+
+#include <linux/i2c/ads1015.h>
+
+/* ADS1015 registers */
+enum {
+ ADS1015_CONVERSION = 0,
+ ADS1015_CONFIG = 1,
+};
+
+/* PGA fullscale voltages in mV */
+static const unsigned int fullscale_table[8] = {
+ 6144, 4096, 2048, 1024, 512, 256, 256, 256 };
+
+#define ADS1015_CONFIG_CHANNELS 8
+#define ADS1015_DEFAULT_CHANNELS 0xff
+
+struct ads1015_data {
+ struct device *hwmon_dev;
+ struct mutex update_lock; /* mutex protect updates */
+ struct attribute *attr_table[ADS1015_CONFIG_CHANNELS + 1];
+ struct attribute_group attr_group;
+};
+
+static s32 ads1015_read_reg(struct i2c_client *client, unsigned int reg)
+{
+ s32 data = i2c_smbus_read_word_data(client, reg);
+
+ return (data < 0) ? data : swab16(data);
+}
+
+static s32 ads1015_write_reg(struct i2c_client *client, unsigned int reg,
+ u16 val)
+{
+ return i2c_smbus_write_word_data(client, reg, swab16(val));
+}
+
+static int ads1015_read_value(struct i2c_client *client, unsigned int channel,
+ int *value)
+{
+ u16 config;
+ s16 conversion;
+ unsigned int pga;
+ int fullscale;
+ unsigned int k;
+ struct ads1015_data *data = i2c_get_clientdata(client);
+ int res;
+
+ mutex_lock(&data->update_lock);
+
+ /* get fullscale voltage */
+ res = ads1015_read_reg(client, ADS1015_CONFIG);
+ if (res < 0)
+ goto err_unlock;
+ config = res;
+ pga = (config >> 9) & 0x0007;
+ fullscale = fullscale_table[pga];
+
+ /* set channel and start single conversion */
+ config &= ~(0x0007 << 12);
+ config |= (1 << 15) | (1 << 8) | (channel & 0x0007) << 12;
+
+ /* wait until conversion finished */
+ res = ads1015_write_reg(client, ADS1015_CONFIG, config);
+ if (res < 0)
+ goto err_unlock;
+ for (k = 0; k < 5; ++k) {
+ schedule_timeout(msecs_to_jiffies(1));
+ res = ads1015_read_reg(client, ADS1015_CONFIG);
+ if (res < 0)
+ goto err_unlock;
+ config = res;
+ if (config & (1 << 15))
+ break;
+ }
+ if (k == 5) {
+ res = -EIO;
+ goto err_unlock;
+ }
+
+ res = ads1015_read_reg(client, ADS1015_CONVERSION);
+ if (res < 0)
+ goto err_unlock;
+ conversion = res;
+
+ mutex_unlock(&data->update_lock);
+
+ *value = DIV_ROUND_CLOSEST(conversion * fullscale, 0x7ff0);
+
+ return 0;
+
+err_unlock:
+ mutex_unlock(&data->update_lock);
+ return res;
+}
+
+/* sysfs callback function */
+static ssize_t show_in(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct i2c_client *client = to_i2c_client(dev);
+ int in;
+ int res;
+
+ res = ads1015_read_value(client, attr->index, &in);
+
+ return (res < 0) ? res : sprintf(buf, "%d\n", in);
+}
+
+#define in_reg(offset)\
+static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, show_in,\
+ NULL, offset)
+
+in_reg(0);
+in_reg(1);
+in_reg(2);
+in_reg(3);
+in_reg(4);
+in_reg(5);
+in_reg(6);
+in_reg(7);
+
+static struct attribute *all_attributes[] = {
+ &sensor_dev_attr_in0_input.dev_attr.attr,
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in3_input.dev_attr.attr,
+ &sensor_dev_attr_in4_input.dev_attr.attr,
+ &sensor_dev_attr_in5_input.dev_attr.attr,
+ &sensor_dev_attr_in6_input.dev_attr.attr,
+ &sensor_dev_attr_in7_input.dev_attr.attr,
+};
+
+/*
+ * Driver interface
+ */
+
+static int ads1015_remove(struct i2c_client *client)
+{
+ struct ads1015_data *data = i2c_get_clientdata(client);
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &data->attr_group);
+ kfree(data);
+ return 0;
+}
+
+static unsigned int ads1015_get_exported_channels(struct i2c_client *client)
+{
+ struct ads1015_platform_data *pdata = dev_get_platdata(&client->dev);
+#ifdef CONFIG_OF
+ struct device_node *np = client->dev.of_node;
+ const __be32 *of_channels;
+ int of_channels_size;
+#endif
+
+ /* prefer platform data */
+ if (pdata)
+ return pdata->exported_channels;
+
+#ifdef CONFIG_OF
+ /* fallback on OF */
+ of_channels = of_get_property(np, "exported-channels",
+ &of_channels_size);
+ if (of_channels && (of_channels_size == sizeof(*of_channels)))
+ return be32_to_cpup(of_channels);
+#endif
+
+ /* fallback on default configuration */
+ return ADS1015_DEFAULT_CHANNELS;
+}
+
+static int ads1015_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ads1015_data *data;
+ int err;
+ unsigned int exported_channels;
+ unsigned int k;
+ unsigned int n = 0;
+
+ data = kzalloc(sizeof(struct ads1015_data), GFP_KERNEL);
+ if (!data) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ /* build sysfs attribute group */
+ data->attr_group.attrs = data->attr_table;
+ exported_channels = ads1015_get_exported_channels(client);
+ for (k = 0; k < ADS1015_CONFIG_CHANNELS; ++k) {
+ if (!(exported_channels & (1<<k)))
+ continue;
+ data->attr_table[n++] = all_attributes[k];
+ }
+ err = sysfs_create_group(&client->dev.kobj, &data->attr_group);
+ if (err)
+ goto exit_free;
+
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ goto exit_remove;
+ }
+
+ return 0;
+
+exit_remove:
+ sysfs_remove_group(&client->dev.kobj, &data->attr_group);
+exit_free:
+ kfree(data);
+exit:
+ return err;
+}
+
+static const struct i2c_device_id ads1015_id[] = {
+ { "ads1015", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ads1015_id);
+
+static struct i2c_driver ads1015_driver = {
+ .driver = {
+ .name = "ads1015",
+ },
+ .probe = ads1015_probe,
+ .remove = ads1015_remove,
+ .id_table = ads1015_id,
+};
+
+static int __init sensors_ads1015_init(void)
+{
+ return i2c_add_driver(&ads1015_driver);
+}
+
+static void __exit sensors_ads1015_exit(void)
+{
+ i2c_del_driver(&ads1015_driver);
+}
+
+MODULE_AUTHOR("Dirk Eibach <eibach@gdsys.de>");
+MODULE_DESCRIPTION("ADS1015 driver");
+MODULE_LICENSE("GPL");
+
+module_init(sensors_ads1015_init);
+module_exit(sensors_ads1015_exit);
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index f13c843a2964..5cc3e3784b42 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -334,6 +334,7 @@ static const struct i2c_device_id adt7411_id[] = {
{ "adt7411", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, adt7411_id);
static struct i2c_driver adt7411_driver = {
.driver = {
diff --git a/drivers/hwmon/jz4740-hwmon.c b/drivers/hwmon/jz4740-hwmon.c
index 1c8b3d9e2051..fea292d43407 100644
--- a/drivers/hwmon/jz4740-hwmon.c
+++ b/drivers/hwmon/jz4740-hwmon.c
@@ -32,7 +32,7 @@ struct jz4740_hwmon {
int irq;
- struct mfd_cell *cell;
+ const struct mfd_cell *cell;
struct device *hwmon;
struct completion read_completion;
@@ -112,7 +112,7 @@ static int __devinit jz4740_hwmon_probe(struct platform_device *pdev)
return -ENOMEM;
}
- hwmon->cell = pdev->dev.platform_data;
+ hwmon->cell = mfd_get_cell(pdev);
hwmon->irq = platform_get_irq(pdev, 0);
if (hwmon->irq < 0) {
diff --git a/drivers/hwmon/lineage-pem.c b/drivers/hwmon/lineage-pem.c
new file mode 100644
index 000000000000..d39ee24e52f6
--- /dev/null
+++ b/drivers/hwmon/lineage-pem.c
@@ -0,0 +1,589 @@
+/*
+ * Driver for Lineage Compact Power Line series of power entry modules.
+ *
+ * Copyright (C) 2010, 2011 Ericsson AB.
+ *
+ * Documentation:
+ * http://www.lineagepower.com/oem/pdf/CPLI2C.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+/*
+ * This driver supports various Lineage Compact Power Line DC/DC and AC/DC
+ * converters such as CP1800, CP2000AC, CP2000DC, CP2100DC, and others.
+ *
+ * The devices are nominally PMBus compliant. However, most standard PMBus
+ * commands are not supported. Specifically, all hardware monitoring and
+ * status reporting commands are non-standard. For this reason, a standard
+ * PMBus driver can not be used.
+ *
+ * All Lineage CPL devices have a built-in I2C bus master selector (PCA9541).
+ * To ensure device access, this driver should only be used as client driver
+ * to the pca9541 I2C master selector driver.
+ */
+
+/* Command codes */
+#define PEM_OPERATION 0x01
+#define PEM_CLEAR_INFO_FLAGS 0x03
+#define PEM_VOUT_COMMAND 0x21
+#define PEM_VOUT_OV_FAULT_LIMIT 0x40
+#define PEM_READ_DATA_STRING 0xd0
+#define PEM_READ_INPUT_STRING 0xdc
+#define PEM_READ_FIRMWARE_REV 0xdd
+#define PEM_READ_RUN_TIMER 0xde
+#define PEM_FAN_HI_SPEED 0xdf
+#define PEM_FAN_NORMAL_SPEED 0xe0
+#define PEM_READ_FAN_SPEED 0xe1
+
+/* offsets in data string */
+#define PEM_DATA_STATUS_2 0
+#define PEM_DATA_STATUS_1 1
+#define PEM_DATA_ALARM_2 2
+#define PEM_DATA_ALARM_1 3
+#define PEM_DATA_VOUT_LSB 4
+#define PEM_DATA_VOUT_MSB 5
+#define PEM_DATA_CURRENT 6
+#define PEM_DATA_TEMP 7
+
+/* Virtual entries, to report constants */
+#define PEM_DATA_TEMP_MAX 10
+#define PEM_DATA_TEMP_CRIT 11
+
+/* offsets in input string */
+#define PEM_INPUT_VOLTAGE 0
+#define PEM_INPUT_POWER_LSB 1
+#define PEM_INPUT_POWER_MSB 2
+
+/* offsets in fan data */
+#define PEM_FAN_ADJUSTMENT 0
+#define PEM_FAN_FAN1 1
+#define PEM_FAN_FAN2 2
+#define PEM_FAN_FAN3 3
+
+/* Status register bits */
+#define STS1_OUTPUT_ON (1 << 0)
+#define STS1_LEDS_FLASHING (1 << 1)
+#define STS1_EXT_FAULT (1 << 2)
+#define STS1_SERVICE_LED_ON (1 << 3)
+#define STS1_SHUTDOWN_OCCURRED (1 << 4)
+#define STS1_INT_FAULT (1 << 5)
+#define STS1_ISOLATION_TEST_OK (1 << 6)
+
+#define STS2_ENABLE_PIN_HI (1 << 0)
+#define STS2_DATA_OUT_RANGE (1 << 1)
+#define STS2_RESTARTED_OK (1 << 1)
+#define STS2_ISOLATION_TEST_FAIL (1 << 3)
+#define STS2_HIGH_POWER_CAP (1 << 4)
+#define STS2_INVALID_INSTR (1 << 5)
+#define STS2_WILL_RESTART (1 << 6)
+#define STS2_PEC_ERR (1 << 7)
+
+/* Alarm register bits */
+#define ALRM1_VIN_OUT_LIMIT (1 << 0)
+#define ALRM1_VOUT_OUT_LIMIT (1 << 1)
+#define ALRM1_OV_VOLT_SHUTDOWN (1 << 2)
+#define ALRM1_VIN_OVERCURRENT (1 << 3)
+#define ALRM1_TEMP_WARNING (1 << 4)
+#define ALRM1_TEMP_SHUTDOWN (1 << 5)
+#define ALRM1_PRIMARY_FAULT (1 << 6)
+#define ALRM1_POWER_LIMIT (1 << 7)
+
+#define ALRM2_5V_OUT_LIMIT (1 << 1)
+#define ALRM2_TEMP_FAULT (1 << 2)
+#define ALRM2_OV_LOW (1 << 3)
+#define ALRM2_DCDC_TEMP_HIGH (1 << 4)
+#define ALRM2_PRI_TEMP_HIGH (1 << 5)
+#define ALRM2_NO_PRIMARY (1 << 6)
+#define ALRM2_FAN_FAULT (1 << 7)
+
+#define FIRMWARE_REV_LEN 4
+#define DATA_STRING_LEN 9
+#define INPUT_STRING_LEN 5 /* 4 for most devices */
+#define FAN_SPEED_LEN 5
+
+struct pem_data {
+ struct device *hwmon_dev;
+
+ struct mutex update_lock;
+ bool valid;
+ bool fans_supported;
+ int input_length;
+ unsigned long last_updated; /* in jiffies */
+
+ u8 firmware_rev[FIRMWARE_REV_LEN];
+ u8 data_string[DATA_STRING_LEN];
+ u8 input_string[INPUT_STRING_LEN];
+ u8 fan_speed[FAN_SPEED_LEN];
+};
+
+static int pem_read_block(struct i2c_client *client, u8 command, u8 *data,
+ int data_len)
+{
+ u8 block_buffer[I2C_SMBUS_BLOCK_MAX];
+ int result;
+
+ result = i2c_smbus_read_block_data(client, command, block_buffer);
+ if (unlikely(result < 0))
+ goto abort;
+ if (unlikely(result == 0xff || result != data_len)) {
+ result = -EIO;
+ goto abort;
+ }
+ memcpy(data, block_buffer, data_len);
+ result = 0;
+abort:
+ return result;
+}
+
+static struct pem_data *pem_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct pem_data *data = i2c_get_clientdata(client);
+ struct pem_data *ret = data;
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
+ int result;
+
+ /* Read data string */
+ result = pem_read_block(client, PEM_READ_DATA_STRING,
+ data->data_string,
+ sizeof(data->data_string));
+ if (unlikely(result < 0)) {
+ ret = ERR_PTR(result);
+ goto abort;
+ }
+
+ /* Read input string */
+ if (data->input_length) {
+ result = pem_read_block(client, PEM_READ_INPUT_STRING,
+ data->input_string,
+ data->input_length);
+ if (unlikely(result < 0)) {
+ ret = ERR_PTR(result);
+ goto abort;
+ }
+ }
+
+ /* Read fan speeds */
+ if (data->fans_supported) {
+ result = pem_read_block(client, PEM_READ_FAN_SPEED,
+ data->fan_speed,
+ sizeof(data->fan_speed));
+ if (unlikely(result < 0)) {
+ ret = ERR_PTR(result);
+ goto abort;
+ }
+ }
+
+ i2c_smbus_write_byte(client, PEM_CLEAR_INFO_FLAGS);
+
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+abort:
+ mutex_unlock(&data->update_lock);
+ return ret;
+}
+
+static long pem_get_data(u8 *data, int len, int index)
+{
+ long val;
+
+ switch (index) {
+ case PEM_DATA_VOUT_LSB:
+ val = (data[index] + (data[index+1] << 8)) * 5 / 2;
+ break;
+ case PEM_DATA_CURRENT:
+ val = data[index] * 200;
+ break;
+ case PEM_DATA_TEMP:
+ val = data[index] * 1000;
+ break;
+ case PEM_DATA_TEMP_MAX:
+ val = 97 * 1000; /* 97 degrees C per datasheet */
+ break;
+ case PEM_DATA_TEMP_CRIT:
+ val = 107 * 1000; /* 107 degrees C per datasheet */
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ val = 0;
+ }
+ return val;
+}
+
+static long pem_get_input(u8 *data, int len, int index)
+{
+ long val;
+
+ switch (index) {
+ case PEM_INPUT_VOLTAGE:
+ if (len == INPUT_STRING_LEN)
+ val = (data[index] + (data[index+1] << 8) - 75) * 1000;
+ else
+ val = (data[index] - 75) * 1000;
+ break;
+ case PEM_INPUT_POWER_LSB:
+ if (len == INPUT_STRING_LEN)
+ index++;
+ val = (data[index] + (data[index+1] << 8)) * 1000000L;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ val = 0;
+ }
+ return val;
+}
+
+static long pem_get_fan(u8 *data, int len, int index)
+{
+ long val;
+
+ switch (index) {
+ case PEM_FAN_FAN1:
+ case PEM_FAN_FAN2:
+ case PEM_FAN_FAN3:
+ val = data[index] * 100;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ val = 0;
+ }
+ return val;
+}
+
+/*
+ * Show boolean, either a fault or an alarm.
+ * .nr points to the register, .index is the bit mask to check
+ */
+static ssize_t pem_show_bool(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(da);
+ struct pem_data *data = pem_update_device(dev);
+ u8 status;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ status = data->data_string[attr->nr] & attr->index;
+ return snprintf(buf, PAGE_SIZE, "%d\n", !!status);
+}
+
+static ssize_t pem_show_data(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct pem_data *data = pem_update_device(dev);
+ long value;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ value = pem_get_data(data->data_string, sizeof(data->data_string),
+ attr->index);
+
+ return snprintf(buf, PAGE_SIZE, "%ld\n", value);
+}
+
+static ssize_t pem_show_input(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct pem_data *data = pem_update_device(dev);
+ long value;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ value = pem_get_input(data->input_string, sizeof(data->input_string),
+ attr->index);
+
+ return snprintf(buf, PAGE_SIZE, "%ld\n", value);
+}
+
+static ssize_t pem_show_fan(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct pem_data *data = pem_update_device(dev);
+ long value;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ value = pem_get_fan(data->fan_speed, sizeof(data->fan_speed),
+ attr->index);
+
+ return snprintf(buf, PAGE_SIZE, "%ld\n", value);
+}
+
+/* Voltages */
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, pem_show_data, NULL,
+ PEM_DATA_VOUT_LSB);
+static SENSOR_DEVICE_ATTR_2(in1_min_alarm, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_2, ALRM2_OV_LOW);
+static SENSOR_DEVICE_ATTR_2(in1_max_alarm, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_1, ALRM1_VOUT_OUT_LIMIT);
+static SENSOR_DEVICE_ATTR_2(in1_crit_alarm, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_1, ALRM1_OV_VOLT_SHUTDOWN);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, pem_show_input, NULL,
+ PEM_INPUT_VOLTAGE);
+static SENSOR_DEVICE_ATTR_2(in2_alarm, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_1,
+ ALRM1_VIN_OUT_LIMIT | ALRM1_PRIMARY_FAULT);
+
+/* Currents */
+static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, pem_show_data, NULL,
+ PEM_DATA_CURRENT);
+static SENSOR_DEVICE_ATTR_2(curr1_alarm, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_1, ALRM1_VIN_OVERCURRENT);
+
+/* Power */
+static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, pem_show_input, NULL,
+ PEM_INPUT_POWER_LSB);
+static SENSOR_DEVICE_ATTR_2(power1_alarm, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_1, ALRM1_POWER_LIMIT);
+
+/* Fans */
+static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, pem_show_fan, NULL,
+ PEM_FAN_FAN1);
+static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, pem_show_fan, NULL,
+ PEM_FAN_FAN2);
+static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, pem_show_fan, NULL,
+ PEM_FAN_FAN3);
+static SENSOR_DEVICE_ATTR_2(fan1_alarm, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_2, ALRM2_FAN_FAULT);
+
+/* Temperatures */
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, pem_show_data, NULL,
+ PEM_DATA_TEMP);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, pem_show_data, NULL,
+ PEM_DATA_TEMP_MAX);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, pem_show_data, NULL,
+ PEM_DATA_TEMP_CRIT);
+static SENSOR_DEVICE_ATTR_2(temp1_alarm, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_1, ALRM1_TEMP_WARNING);
+static SENSOR_DEVICE_ATTR_2(temp1_crit_alarm, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_1, ALRM1_TEMP_SHUTDOWN);
+static SENSOR_DEVICE_ATTR_2(temp1_fault, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_2, ALRM2_TEMP_FAULT);
+
+static struct attribute *pem_attributes[] = {
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_in1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_in1_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_in2_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_curr1_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_power1_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_fan1_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_fault.dev_attr.attr,
+
+ NULL,
+};
+
+static const struct attribute_group pem_group = {
+ .attrs = pem_attributes,
+};
+
+static struct attribute *pem_input_attributes[] = {
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_curr1_input.dev_attr.attr,
+ &sensor_dev_attr_power1_input.dev_attr.attr,
+};
+
+static const struct attribute_group pem_input_group = {
+ .attrs = pem_input_attributes,
+};
+
+static struct attribute *pem_fan_attributes[] = {
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
+ &sensor_dev_attr_fan2_input.dev_attr.attr,
+ &sensor_dev_attr_fan3_input.dev_attr.attr,
+};
+
+static const struct attribute_group pem_fan_group = {
+ .attrs = pem_fan_attributes,
+};
+
+static int pem_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ struct pem_data *data;
+ int ret;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BLOCK_DATA
+ | I2C_FUNC_SMBUS_WRITE_BYTE))
+ return -ENODEV;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ /*
+ * We use the next two commands to determine if the device is really
+ * there.
+ */
+ ret = pem_read_block(client, PEM_READ_FIRMWARE_REV,
+ data->firmware_rev, sizeof(data->firmware_rev));
+ if (ret < 0)
+ goto out_kfree;
+
+ ret = i2c_smbus_write_byte(client, PEM_CLEAR_INFO_FLAGS);
+ if (ret < 0)
+ goto out_kfree;
+
+ dev_info(&client->dev, "Firmware revision %d.%d.%d\n",
+ data->firmware_rev[0], data->firmware_rev[1],
+ data->firmware_rev[2]);
+
+ /* Register sysfs hooks */
+ ret = sysfs_create_group(&client->dev.kobj, &pem_group);
+ if (ret)
+ goto out_kfree;
+
+ /*
+ * Check if input readings are supported.
+ * This is the case if we can read input data,
+ * and if the returned data is not all zeros.
+ * Note that input alarms are always supported.
+ */
+ ret = pem_read_block(client, PEM_READ_INPUT_STRING,
+ data->input_string,
+ sizeof(data->input_string) - 1);
+ if (!ret && (data->input_string[0] || data->input_string[1] ||
+ data->input_string[2]))
+ data->input_length = sizeof(data->input_string) - 1;
+ else if (ret < 0) {
+ /* Input string is one byte longer for some devices */
+ ret = pem_read_block(client, PEM_READ_INPUT_STRING,
+ data->input_string,
+ sizeof(data->input_string));
+ if (!ret && (data->input_string[0] || data->input_string[1] ||
+ data->input_string[2] || data->input_string[3]))
+ data->input_length = sizeof(data->input_string);
+ }
+ ret = 0;
+ if (data->input_length) {
+ ret = sysfs_create_group(&client->dev.kobj, &pem_input_group);
+ if (ret)
+ goto out_remove_groups;
+ }
+
+ /*
+ * Check if fan speed readings are supported.
+ * This is the case if we can read fan speed data,
+ * and if the returned data is not all zeros.
+ * Note that the fan alarm is always supported.
+ */
+ ret = pem_read_block(client, PEM_READ_FAN_SPEED,
+ data->fan_speed,
+ sizeof(data->fan_speed));
+ if (!ret && (data->fan_speed[0] || data->fan_speed[1] ||
+ data->fan_speed[2] || data->fan_speed[3])) {
+ data->fans_supported = true;
+ ret = sysfs_create_group(&client->dev.kobj, &pem_fan_group);
+ if (ret)
+ goto out_remove_groups;
+ }
+
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ ret = PTR_ERR(data->hwmon_dev);
+ goto out_remove_groups;
+ }
+
+ return 0;
+
+out_remove_groups:
+ sysfs_remove_group(&client->dev.kobj, &pem_input_group);
+ sysfs_remove_group(&client->dev.kobj, &pem_fan_group);
+ sysfs_remove_group(&client->dev.kobj, &pem_group);
+out_kfree:
+ kfree(data);
+ return ret;
+}
+
+static int pem_remove(struct i2c_client *client)
+{
+ struct pem_data *data = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(data->hwmon_dev);
+
+ sysfs_remove_group(&client->dev.kobj, &pem_input_group);
+ sysfs_remove_group(&client->dev.kobj, &pem_fan_group);
+ sysfs_remove_group(&client->dev.kobj, &pem_group);
+
+ kfree(data);
+ return 0;
+}
+
+static const struct i2c_device_id pem_id[] = {
+ {"lineage_pem", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, pem_id);
+
+static struct i2c_driver pem_driver = {
+ .driver = {
+ .name = "lineage_pem",
+ },
+ .probe = pem_probe,
+ .remove = pem_remove,
+ .id_table = pem_id,
+};
+
+static int __init pem_init(void)
+{
+ return i2c_add_driver(&pem_driver);
+}
+
+static void __exit pem_exit(void)
+{
+ i2c_del_driver(&pem_driver);
+}
+
+MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_DESCRIPTION("Lineage CPL PEM hardware monitoring driver");
+MODULE_LICENSE("GPL");
+
+module_init(pem_init);
+module_exit(pem_exit);
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index f36eb80d227f..ef902d5d06ab 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -232,13 +232,16 @@ static const struct i2c_device_id lm75_ids[] = {
};
MODULE_DEVICE_TABLE(i2c, lm75_ids);
+#define LM75A_ID 0xA1
+
/* Return 0 if detection is successful, -ENODEV otherwise */
static int lm75_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
int i;
- int cur, conf, hyst, os;
+ int conf, hyst, os;
+ bool is_lm75a = 0;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
I2C_FUNC_SMBUS_WORD_DATA))
@@ -250,37 +253,58 @@ static int lm75_detect(struct i2c_client *new_client,
addresses 0x04-0x07 returning the last read value.
The cycling+unused addresses combination is not tested,
since it would significantly slow the detection down and would
- hardly add any value. */
+ hardly add any value.
- /* Unused addresses */
- cur = i2c_smbus_read_word_data(new_client, 0);
- conf = i2c_smbus_read_byte_data(new_client, 1);
- hyst = i2c_smbus_read_word_data(new_client, 2);
- if (i2c_smbus_read_word_data(new_client, 4) != hyst
- || i2c_smbus_read_word_data(new_client, 5) != hyst
- || i2c_smbus_read_word_data(new_client, 6) != hyst
- || i2c_smbus_read_word_data(new_client, 7) != hyst)
- return -ENODEV;
- os = i2c_smbus_read_word_data(new_client, 3);
- if (i2c_smbus_read_word_data(new_client, 4) != os
- || i2c_smbus_read_word_data(new_client, 5) != os
- || i2c_smbus_read_word_data(new_client, 6) != os
- || i2c_smbus_read_word_data(new_client, 7) != os)
- return -ENODEV;
+ The National Semiconductor LM75A is different than earlier
+ LM75s. It has an ID byte of 0xaX (where X is the chip
+ revision, with 1 being the only revision in existence) in
+ register 7, and unused registers return 0xff rather than the
+ last read value. */
/* Unused bits */
+ conf = i2c_smbus_read_byte_data(new_client, 1);
if (conf & 0xe0)
return -ENODEV;
+ /* First check for LM75A */
+ if (i2c_smbus_read_byte_data(new_client, 7) == LM75A_ID) {
+ /* LM75A returns 0xff on unused registers so
+ just to be sure we check for that too. */
+ if (i2c_smbus_read_byte_data(new_client, 4) != 0xff
+ || i2c_smbus_read_byte_data(new_client, 5) != 0xff
+ || i2c_smbus_read_byte_data(new_client, 6) != 0xff)
+ return -ENODEV;
+ is_lm75a = 1;
+ hyst = i2c_smbus_read_byte_data(new_client, 2);
+ os = i2c_smbus_read_byte_data(new_client, 3);
+ } else { /* Traditional style LM75 detection */
+ /* Unused addresses */
+ hyst = i2c_smbus_read_byte_data(new_client, 2);
+ if (i2c_smbus_read_byte_data(new_client, 4) != hyst
+ || i2c_smbus_read_byte_data(new_client, 5) != hyst
+ || i2c_smbus_read_byte_data(new_client, 6) != hyst
+ || i2c_smbus_read_byte_data(new_client, 7) != hyst)
+ return -ENODEV;
+ os = i2c_smbus_read_byte_data(new_client, 3);
+ if (i2c_smbus_read_byte_data(new_client, 4) != os
+ || i2c_smbus_read_byte_data(new_client, 5) != os
+ || i2c_smbus_read_byte_data(new_client, 6) != os
+ || i2c_smbus_read_byte_data(new_client, 7) != os)
+ return -ENODEV;
+ }
+
/* Addresses cycling */
- for (i = 8; i < 0xff; i += 8) {
+ for (i = 8; i <= 248; i += 40) {
if (i2c_smbus_read_byte_data(new_client, i + 1) != conf
- || i2c_smbus_read_word_data(new_client, i + 2) != hyst
- || i2c_smbus_read_word_data(new_client, i + 3) != os)
+ || i2c_smbus_read_byte_data(new_client, i + 2) != hyst
+ || i2c_smbus_read_byte_data(new_client, i + 3) != os)
+ return -ENODEV;
+ if (is_lm75a && i2c_smbus_read_byte_data(new_client, i + 7)
+ != LM75A_ID)
return -ENODEV;
}
- strlcpy(info->type, "lm75", I2C_NAME_SIZE);
+ strlcpy(info->type, is_lm75a ? "lm75a" : "lm75", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index d2cc28660816..cf47e6e476ed 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -41,7 +41,7 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
enum chips {
any_chip, lm85b, lm85c,
adm1027, adt7463, adt7468,
- emc6d100, emc6d102, emc6d103
+ emc6d100, emc6d102, emc6d103, emc6d103s
};
/* The LM85 registers */
@@ -283,10 +283,6 @@ struct lm85_zone {
u8 hyst; /* Low limit hysteresis. (0-15) */
u8 range; /* Temp range, encoded */
s8 critical; /* "All fans ON" temp limit */
- u8 off_desired; /* Actual "off" temperature specified. Preserved
- * to prevent "drift" as other autofan control
- * values change.
- */
u8 max_desired; /* Actual "max" temperature specified. Preserved
* to prevent "drift" as other autofan control
* values change.
@@ -306,6 +302,8 @@ struct lm85_data {
const int *freq_map;
enum chips type;
+ bool has_vid5; /* true if VID5 is configured for ADT7463 or ADT7468 */
+
struct mutex update_lock;
int valid; /* !=0 if following fields are valid */
unsigned long last_reading; /* In jiffies */
@@ -352,6 +350,7 @@ static const struct i2c_device_id lm85_id[] = {
{ "emc6d101", emc6d100 },
{ "emc6d102", emc6d102 },
{ "emc6d103", emc6d103 },
+ { "emc6d103s", emc6d103s },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm85_id);
@@ -420,8 +419,7 @@ static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr,
struct lm85_data *data = lm85_update_device(dev);
int vid;
- if ((data->type == adt7463 || data->type == adt7468) &&
- (data->vid & 0x80)) {
+ if (data->has_vid5) {
/* 6-pin VID (VRM 10) */
vid = vid_from_reg(data->vid & 0x3f, data->vrm);
} else {
@@ -891,7 +889,6 @@ static ssize_t set_temp_auto_temp_off(struct device *dev,
mutex_lock(&data->update_lock);
min = TEMP_FROM_REG(data->zone[nr].limit);
- data->zone[nr].off_desired = TEMP_TO_REG(val);
data->zone[nr].hyst = HYST_TO_REG(min - val);
if (nr == 0 || nr == 1) {
lm85_write_value(client, LM85_REG_AFAN_HYST1,
@@ -934,18 +931,6 @@ static ssize_t set_temp_auto_temp_min(struct device *dev,
((data->zone[nr].range & 0x0f) << 4)
| (data->pwm_freq[nr] & 0x07));
-/* Update temp_auto_hyst and temp_auto_off */
- data->zone[nr].hyst = HYST_TO_REG(TEMP_FROM_REG(
- data->zone[nr].limit) - TEMP_FROM_REG(
- data->zone[nr].off_desired));
- if (nr == 0 || nr == 1) {
- lm85_write_value(client, LM85_REG_AFAN_HYST1,
- (data->zone[0].hyst << 4)
- | data->zone[1].hyst);
- } else {
- lm85_write_value(client, LM85_REG_AFAN_HYST2,
- (data->zone[2].hyst << 4));
- }
mutex_unlock(&data->update_lock);
return count;
}
@@ -1084,13 +1069,7 @@ static struct attribute *lm85_attributes[] = {
&sensor_dev_attr_pwm1_auto_pwm_min.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_pwm_min.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_pwm_min.dev_attr.attr,
- &sensor_dev_attr_pwm1_auto_pwm_minctl.dev_attr.attr,
- &sensor_dev_attr_pwm2_auto_pwm_minctl.dev_attr.attr,
- &sensor_dev_attr_pwm3_auto_pwm_minctl.dev_attr.attr,
- &sensor_dev_attr_temp1_auto_temp_off.dev_attr.attr,
- &sensor_dev_attr_temp2_auto_temp_off.dev_attr.attr,
- &sensor_dev_attr_temp3_auto_temp_off.dev_attr.attr,
&sensor_dev_attr_temp1_auto_temp_min.dev_attr.attr,
&sensor_dev_attr_temp2_auto_temp_min.dev_attr.attr,
&sensor_dev_attr_temp3_auto_temp_min.dev_attr.attr,
@@ -1111,6 +1090,26 @@ static const struct attribute_group lm85_group = {
.attrs = lm85_attributes,
};
+static struct attribute *lm85_attributes_minctl[] = {
+ &sensor_dev_attr_pwm1_auto_pwm_minctl.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_pwm_minctl.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_pwm_minctl.dev_attr.attr,
+};
+
+static const struct attribute_group lm85_group_minctl = {
+ .attrs = lm85_attributes_minctl,
+};
+
+static struct attribute *lm85_attributes_temp_off[] = {
+ &sensor_dev_attr_temp1_auto_temp_off.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_temp_off.dev_attr.attr,
+ &sensor_dev_attr_temp3_auto_temp_off.dev_attr.attr,
+};
+
+static const struct attribute_group lm85_group_temp_off = {
+ .attrs = lm85_attributes_temp_off,
+};
+
static struct attribute *lm85_attributes_in4[] = {
&sensor_dev_attr_in4_input.dev_attr.attr,
&sensor_dev_attr_in4_min.dev_attr.attr,
@@ -1258,16 +1257,9 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
case LM85_VERSTEP_EMC6D103_A1:
type_name = "emc6d103";
break;
- /*
- * Registers apparently missing in EMC6D103S/EMC6D103:A2
- * compared to EMC6D103:A0, EMC6D103:A1, and EMC6D102
- * (according to the data sheets), but used unconditionally
- * in the driver: 62[5:7], 6D[0:7], and 6E[0:7].
- * So skip EMC6D103S for now.
case LM85_VERSTEP_EMC6D103S:
type_name = "emc6d103s";
break;
- */
}
} else {
dev_dbg(&adapter->dev,
@@ -1280,6 +1272,19 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
return 0;
}
+static void lm85_remove_files(struct i2c_client *client, struct lm85_data *data)
+{
+ sysfs_remove_group(&client->dev.kobj, &lm85_group);
+ if (data->type != emc6d103s) {
+ sysfs_remove_group(&client->dev.kobj, &lm85_group_minctl);
+ sysfs_remove_group(&client->dev.kobj, &lm85_group_temp_off);
+ }
+ if (!data->has_vid5)
+ sysfs_remove_group(&client->dev.kobj, &lm85_group_in4);
+ if (data->type == emc6d100)
+ sysfs_remove_group(&client->dev.kobj, &lm85_group_in567);
+}
+
static int lm85_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1302,6 +1307,7 @@ static int lm85_probe(struct i2c_client *client,
case emc6d100:
case emc6d102:
case emc6d103:
+ case emc6d103s:
data->freq_map = adm1027_freq_map;
break;
default:
@@ -1319,11 +1325,26 @@ static int lm85_probe(struct i2c_client *client,
if (err)
goto err_kfree;
+ /* minctl and temp_off exist on all chips except emc6d103s */
+ if (data->type != emc6d103s) {
+ err = sysfs_create_group(&client->dev.kobj, &lm85_group_minctl);
+ if (err)
+ goto err_kfree;
+ err = sysfs_create_group(&client->dev.kobj,
+ &lm85_group_temp_off);
+ if (err)
+ goto err_kfree;
+ }
+
/* The ADT7463/68 have an optional VRM 10 mode where pin 21 is used
as a sixth digital VID input rather than an analog input. */
- data->vid = lm85_read_value(client, LM85_REG_VID);
- if (!((data->type == adt7463 || data->type == adt7468) &&
- (data->vid & 0x80)))
+ if (data->type == adt7463 || data->type == adt7468) {
+ u8 vid = lm85_read_value(client, LM85_REG_VID);
+ if (vid & 0x80)
+ data->has_vid5 = true;
+ }
+
+ if (!data->has_vid5)
if ((err = sysfs_create_group(&client->dev.kobj,
&lm85_group_in4)))
goto err_remove_files;
@@ -1344,10 +1365,7 @@ static int lm85_probe(struct i2c_client *client,
/* Error out and cleanup code */
err_remove_files:
- sysfs_remove_group(&client->dev.kobj, &lm85_group);
- sysfs_remove_group(&client->dev.kobj, &lm85_group_in4);
- if (data->type == emc6d100)
- sysfs_remove_group(&client->dev.kobj, &lm85_group_in567);
+ lm85_remove_files(client, data);
err_kfree:
kfree(data);
return err;
@@ -1357,10 +1375,7 @@ static int lm85_remove(struct i2c_client *client)
{
struct lm85_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &lm85_group);
- sysfs_remove_group(&client->dev.kobj, &lm85_group_in4);
- if (data->type == emc6d100)
- sysfs_remove_group(&client->dev.kobj, &lm85_group_in567);
+ lm85_remove_files(client, data);
kfree(data);
return 0;
}
@@ -1457,11 +1472,8 @@ static struct lm85_data *lm85_update_device(struct device *dev)
lm85_read_value(client, LM85_REG_FAN(i));
}
- if (!((data->type == adt7463 || data->type == adt7468) &&
- (data->vid & 0x80))) {
- data->in[4] = lm85_read_value(client,
- LM85_REG_IN(4));
- }
+ if (!data->has_vid5)
+ data->in[4] = lm85_read_value(client, LM85_REG_IN(4));
if (data->type == adt7468)
data->cfg5 = lm85_read_value(client, ADT7468_REG_CFG5);
@@ -1487,7 +1499,8 @@ static struct lm85_data *lm85_update_device(struct device *dev)
/* More alarm bits */
data->alarms |= lm85_read_value(client,
EMC6D100_REG_ALARM3) << 16;
- } else if (data->type == emc6d102 || data->type == emc6d103) {
+ } else if (data->type == emc6d102 || data->type == emc6d103 ||
+ data->type == emc6d103s) {
/* Have to read LSB bits after the MSB ones because
the reading of the MSB bits has frozen the
LSBs (backward from the ADM1027).
@@ -1528,8 +1541,7 @@ static struct lm85_data *lm85_update_device(struct device *dev)
lm85_read_value(client, LM85_REG_FAN_MIN(i));
}
- if (!((data->type == adt7463 || data->type == adt7468) &&
- (data->vid & 0x80))) {
+ if (!data->has_vid5) {
data->in_min[4] = lm85_read_value(client,
LM85_REG_IN_MIN(4));
data->in_max[4] = lm85_read_value(client,
@@ -1573,17 +1585,19 @@ static struct lm85_data *lm85_update_device(struct device *dev)
}
}
- i = lm85_read_value(client, LM85_REG_AFAN_SPIKE1);
- data->autofan[0].min_off = (i & 0x20) != 0;
- data->autofan[1].min_off = (i & 0x40) != 0;
- data->autofan[2].min_off = (i & 0x80) != 0;
+ if (data->type != emc6d103s) {
+ i = lm85_read_value(client, LM85_REG_AFAN_SPIKE1);
+ data->autofan[0].min_off = (i & 0x20) != 0;
+ data->autofan[1].min_off = (i & 0x40) != 0;
+ data->autofan[2].min_off = (i & 0x80) != 0;
- i = lm85_read_value(client, LM85_REG_AFAN_HYST1);
- data->zone[0].hyst = i >> 4;
- data->zone[1].hyst = i & 0x0f;
+ i = lm85_read_value(client, LM85_REG_AFAN_HYST1);
+ data->zone[0].hyst = i >> 4;
+ data->zone[1].hyst = i & 0x0f;
- i = lm85_read_value(client, LM85_REG_AFAN_HYST2);
- data->zone[2].hyst = i >> 4;
+ i = lm85_read_value(client, LM85_REG_AFAN_HYST2);
+ data->zone[2].hyst = i >> 4;
+ }
data->last_config = jiffies;
} /* last_config */
diff --git a/drivers/hwmon/max16064.c b/drivers/hwmon/max16064.c
new file mode 100644
index 000000000000..1d6d717060d3
--- /dev/null
+++ b/drivers/hwmon/max16064.c
@@ -0,0 +1,91 @@
+/*
+ * Hardware monitoring driver for Maxim MAX16064
+ *
+ * Copyright (c) 2011 Ericsson AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include "pmbus.h"
+
+static struct pmbus_driver_info max16064_info = {
+ .pages = 4,
+ .direct[PSC_VOLTAGE_IN] = true,
+ .direct[PSC_VOLTAGE_OUT] = true,
+ .direct[PSC_TEMPERATURE] = true,
+ .m[PSC_VOLTAGE_IN] = 19995,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = -1,
+ .m[PSC_VOLTAGE_OUT] = 19995,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = -1,
+ .m[PSC_TEMPERATURE] = -7612,
+ .b[PSC_TEMPERATURE] = 335,
+ .R[PSC_TEMPERATURE] = -3,
+ .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_TEMP
+ | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_TEMP,
+ .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[3] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+};
+
+static int max16064_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ return pmbus_do_probe(client, id, &max16064_info);
+}
+
+static int max16064_remove(struct i2c_client *client)
+{
+ return pmbus_do_remove(client);
+}
+
+static const struct i2c_device_id max16064_id[] = {
+ {"max16064", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, max16064_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver max16064_driver = {
+ .driver = {
+ .name = "max16064",
+ },
+ .probe = max16064_probe,
+ .remove = max16064_remove,
+ .id_table = max16064_id,
+};
+
+static int __init max16064_init(void)
+{
+ return i2c_add_driver(&max16064_driver);
+}
+
+static void __exit max16064_exit(void)
+{
+ i2c_del_driver(&max16064_driver);
+}
+
+MODULE_AUTHOR("Guenter Roeck");
+MODULE_DESCRIPTION("PMBus driver for Maxim MAX16064");
+MODULE_LICENSE("GPL");
+module_init(max16064_init);
+module_exit(max16064_exit);
diff --git a/drivers/hwmon/max34440.c b/drivers/hwmon/max34440.c
new file mode 100644
index 000000000000..992b701b4c5e
--- /dev/null
+++ b/drivers/hwmon/max34440.c
@@ -0,0 +1,199 @@
+/*
+ * Hardware monitoring driver for Maxim MAX34440/MAX34441
+ *
+ * Copyright (c) 2011 Ericsson AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include "pmbus.h"
+
+enum chips { max34440, max34441 };
+
+#define MAX34440_STATUS_OC_WARN (1 << 0)
+#define MAX34440_STATUS_OC_FAULT (1 << 1)
+#define MAX34440_STATUS_OT_FAULT (1 << 5)
+#define MAX34440_STATUS_OT_WARN (1 << 6)
+
+static int max34440_get_status(struct i2c_client *client, int page, int reg)
+{
+ int ret;
+ int mfg_status;
+
+ ret = pmbus_set_page(client, page);
+ if (ret < 0)
+ return ret;
+
+ switch (reg) {
+ case PMBUS_STATUS_IOUT:
+ mfg_status = pmbus_read_word_data(client, 0,
+ PMBUS_STATUS_MFR_SPECIFIC);
+ if (mfg_status < 0)
+ return mfg_status;
+ if (mfg_status & MAX34440_STATUS_OC_WARN)
+ ret |= PB_IOUT_OC_WARNING;
+ if (mfg_status & MAX34440_STATUS_OC_FAULT)
+ ret |= PB_IOUT_OC_FAULT;
+ break;
+ case PMBUS_STATUS_TEMPERATURE:
+ mfg_status = pmbus_read_word_data(client, 0,
+ PMBUS_STATUS_MFR_SPECIFIC);
+ if (mfg_status < 0)
+ return mfg_status;
+ if (mfg_status & MAX34440_STATUS_OT_WARN)
+ ret |= PB_TEMP_OT_WARNING;
+ if (mfg_status & MAX34440_STATUS_OT_FAULT)
+ ret |= PB_TEMP_OT_FAULT;
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
+static struct pmbus_driver_info max34440_info[] = {
+ [max34440] = {
+ .pages = 14,
+ .direct[PSC_VOLTAGE_IN] = true,
+ .direct[PSC_VOLTAGE_OUT] = true,
+ .direct[PSC_TEMPERATURE] = true,
+ .direct[PSC_CURRENT_OUT] = true,
+ .m[PSC_VOLTAGE_IN] = 1,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = 3, /* R = 0 in datasheet reflects mV */
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 3, /* R = 0 in datasheet reflects mV */
+ .m[PSC_CURRENT_OUT] = 1,
+ .b[PSC_CURRENT_OUT] = 0,
+ .R[PSC_CURRENT_OUT] = 3, /* R = 0 in datasheet reflects mA */
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 2,
+ .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[3] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[4] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[5] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[6] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[7] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[8] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[9] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[10] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[11] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[12] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[13] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .get_status = max34440_get_status,
+ },
+ [max34441] = {
+ .pages = 12,
+ .direct[PSC_VOLTAGE_IN] = true,
+ .direct[PSC_VOLTAGE_OUT] = true,
+ .direct[PSC_TEMPERATURE] = true,
+ .direct[PSC_CURRENT_OUT] = true,
+ .direct[PSC_FAN] = true,
+ .m[PSC_VOLTAGE_IN] = 1,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = 3,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 3,
+ .m[PSC_CURRENT_OUT] = 1,
+ .b[PSC_CURRENT_OUT] = 0,
+ .R[PSC_CURRENT_OUT] = 3,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 2,
+ .m[PSC_FAN] = 1,
+ .b[PSC_FAN] = 0,
+ .R[PSC_FAN] = 0,
+ .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[3] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[4] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[5] = PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12,
+ .func[6] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[7] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[8] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[9] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[10] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[11] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .get_status = max34440_get_status,
+ },
+};
+
+static int max34440_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ return pmbus_do_probe(client, id, &max34440_info[id->driver_data]);
+}
+
+static int max34440_remove(struct i2c_client *client)
+{
+ return pmbus_do_remove(client);
+}
+
+static const struct i2c_device_id max34440_id[] = {
+ {"max34440", max34440},
+ {"max34441", max34441},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, max34440_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver max34440_driver = {
+ .driver = {
+ .name = "max34440",
+ },
+ .probe = max34440_probe,
+ .remove = max34440_remove,
+ .id_table = max34440_id,
+};
+
+static int __init max34440_init(void)
+{
+ return i2c_add_driver(&max34440_driver);
+}
+
+static void __exit max34440_exit(void)
+{
+ i2c_del_driver(&max34440_driver);
+}
+
+MODULE_AUTHOR("Guenter Roeck");
+MODULE_DESCRIPTION("PMBus driver for Maxim MAX34440/MAX34441");
+MODULE_LICENSE("GPL");
+module_init(max34440_init);
+module_exit(max34440_exit);
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
new file mode 100644
index 000000000000..f20d9978ee78
--- /dev/null
+++ b/drivers/hwmon/max6639.c
@@ -0,0 +1,653 @@
+/*
+ * max6639.c - Support for Maxim MAX6639
+ *
+ * 2-Channel Temperature Monitor with Dual PWM Fan-Speed Controller
+ *
+ * Copyright (C) 2010, 2011 Roland Stigge <stigge@antcom.de>
+ *
+ * based on the initial MAX6639 support from semptian.net
+ * by He Changqing <hechangqing@semptian.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/i2c/max6639.h>
+
+/* Addresses to scan */
+static unsigned short normal_i2c[] = { 0x2c, 0x2e, 0x2f, I2C_CLIENT_END };
+
+/* The MAX6639 registers, valid channel numbers: 0, 1 */
+#define MAX6639_REG_TEMP(ch) (0x00 + (ch))
+#define MAX6639_REG_STATUS 0x02
+#define MAX6639_REG_OUTPUT_MASK 0x03
+#define MAX6639_REG_GCONFIG 0x04
+#define MAX6639_REG_TEMP_EXT(ch) (0x05 + (ch))
+#define MAX6639_REG_ALERT_LIMIT(ch) (0x08 + (ch))
+#define MAX6639_REG_OT_LIMIT(ch) (0x0A + (ch))
+#define MAX6639_REG_THERM_LIMIT(ch) (0x0C + (ch))
+#define MAX6639_REG_FAN_CONFIG1(ch) (0x10 + (ch) * 4)
+#define MAX6639_REG_FAN_CONFIG2a(ch) (0x11 + (ch) * 4)
+#define MAX6639_REG_FAN_CONFIG2b(ch) (0x12 + (ch) * 4)
+#define MAX6639_REG_FAN_CONFIG3(ch) (0x13 + (ch) * 4)
+#define MAX6639_REG_FAN_CNT(ch) (0x20 + (ch))
+#define MAX6639_REG_TARGET_CNT(ch) (0x22 + (ch))
+#define MAX6639_REG_FAN_PPR(ch) (0x24 + (ch))
+#define MAX6639_REG_TARGTDUTY(ch) (0x26 + (ch))
+#define MAX6639_REG_FAN_START_TEMP(ch) (0x28 + (ch))
+#define MAX6639_REG_DEVID 0x3D
+#define MAX6639_REG_MANUID 0x3E
+#define MAX6639_REG_DEVREV 0x3F
+
+/* Register bits */
+#define MAX6639_GCONFIG_STANDBY 0x80
+#define MAX6639_GCONFIG_POR 0x40
+#define MAX6639_GCONFIG_DISABLE_TIMEOUT 0x20
+#define MAX6639_GCONFIG_CH2_LOCAL 0x10
+#define MAX6639_GCONFIG_PWM_FREQ_HI 0x08
+
+#define MAX6639_FAN_CONFIG1_PWM 0x80
+
+#define MAX6639_FAN_CONFIG3_THERM_FULL_SPEED 0x40
+
+static const int rpm_ranges[] = { 2000, 4000, 8000, 16000 };
+
+#define FAN_FROM_REG(val, div, rpm_range) ((val) == 0 ? -1 : \
+ (val) == 255 ? 0 : (rpm_ranges[rpm_range] * 30) / ((div + 1) * (val)))
+#define TEMP_LIMIT_TO_REG(val) SENSORS_LIMIT((val) / 1000, 0, 255)
+
+/*
+ * Client data (each client gets its own)
+ */
+struct max6639_data {
+ struct device *hwmon_dev;
+ struct mutex update_lock;
+ char valid; /* !=0 if following fields are valid */
+ unsigned long last_updated; /* In jiffies */
+
+ /* Register values sampled regularly */
+ u16 temp[2]; /* Temperature, in 1/8 C, 0..255 C */
+ bool temp_fault[2]; /* Detected temperature diode failure */
+ u8 fan[2]; /* Register value: TACH count for fans >=30 */
+ u8 status; /* Detected channel alarms and fan failures */
+
+ /* Register values only written to */
+ u8 pwm[2]; /* Register value: Duty cycle 0..120 */
+ u8 temp_therm[2]; /* THERM Temperature, 0..255 C (->_max) */
+ u8 temp_alert[2]; /* ALERT Temperature, 0..255 C (->_crit) */
+ u8 temp_ot[2]; /* OT Temperature, 0..255 C (->_emergency) */
+
+ /* Register values initialized only once */
+ u8 ppr; /* Pulses per rotation 0..3 for 1..4 ppr */
+ u8 rpm_range; /* Index in above rpm_ranges table */
+};
+
+static struct max6639_data *max6639_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct max6639_data *ret = data;
+ int i;
+ int status_reg;
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + 2 * HZ) || !data->valid) {
+ int res;
+
+ dev_dbg(&client->dev, "Starting max6639 update\n");
+
+ status_reg = i2c_smbus_read_byte_data(client,
+ MAX6639_REG_STATUS);
+ if (status_reg < 0) {
+ ret = ERR_PTR(status_reg);
+ goto abort;
+ }
+
+ data->status = status_reg;
+
+ for (i = 0; i < 2; i++) {
+ res = i2c_smbus_read_byte_data(client,
+ MAX6639_REG_FAN_CNT(i));
+ if (res < 0) {
+ ret = ERR_PTR(res);
+ goto abort;
+ }
+ data->fan[i] = res;
+
+ res = i2c_smbus_read_byte_data(client,
+ MAX6639_REG_TEMP_EXT(i));
+ if (res < 0) {
+ ret = ERR_PTR(res);
+ goto abort;
+ }
+ data->temp[i] = res >> 5;
+ data->temp_fault[i] = res & 0x01;
+
+ res = i2c_smbus_read_byte_data(client,
+ MAX6639_REG_TEMP(i));
+ if (res < 0) {
+ ret = ERR_PTR(res);
+ goto abort;
+ }
+ data->temp[i] |= res << 3;
+ }
+
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+abort:
+ mutex_unlock(&data->update_lock);
+
+ return ret;
+}
+
+static ssize_t show_temp_input(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ long temp;
+ struct max6639_data *data = max6639_update_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ temp = data->temp[attr->index] * 125;
+ return sprintf(buf, "%ld\n", temp);
+}
+
+static ssize_t show_temp_fault(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct max6639_data *data = max6639_update_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%d\n", data->temp_fault[attr->index]);
+}
+
+static ssize_t show_temp_max(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+
+ return sprintf(buf, "%d\n", (data->temp_therm[attr->index] * 1000));
+}
+
+static ssize_t set_temp_max(struct device *dev,
+ struct device_attribute *dev_attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ unsigned long val;
+ int res;
+
+ res = strict_strtoul(buf, 10, &val);
+ if (res)
+ return res;
+
+ mutex_lock(&data->update_lock);
+ data->temp_therm[attr->index] = TEMP_LIMIT_TO_REG(val);
+ i2c_smbus_write_byte_data(client,
+ MAX6639_REG_THERM_LIMIT(attr->index),
+ data->temp_therm[attr->index]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t show_temp_crit(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+
+ return sprintf(buf, "%d\n", (data->temp_alert[attr->index] * 1000));
+}
+
+static ssize_t set_temp_crit(struct device *dev,
+ struct device_attribute *dev_attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ unsigned long val;
+ int res;
+
+ res = strict_strtoul(buf, 10, &val);
+ if (res)
+ return res;
+
+ mutex_lock(&data->update_lock);
+ data->temp_alert[attr->index] = TEMP_LIMIT_TO_REG(val);
+ i2c_smbus_write_byte_data(client,
+ MAX6639_REG_ALERT_LIMIT(attr->index),
+ data->temp_alert[attr->index]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t show_temp_emergency(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+
+ return sprintf(buf, "%d\n", (data->temp_ot[attr->index] * 1000));
+}
+
+static ssize_t set_temp_emergency(struct device *dev,
+ struct device_attribute *dev_attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ unsigned long val;
+ int res;
+
+ res = strict_strtoul(buf, 10, &val);
+ if (res)
+ return res;
+
+ mutex_lock(&data->update_lock);
+ data->temp_ot[attr->index] = TEMP_LIMIT_TO_REG(val);
+ i2c_smbus_write_byte_data(client,
+ MAX6639_REG_OT_LIMIT(attr->index),
+ data->temp_ot[attr->index]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t show_pwm(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+
+ return sprintf(buf, "%d\n", data->pwm[attr->index] * 255 / 120);
+}
+
+static ssize_t set_pwm(struct device *dev,
+ struct device_attribute *dev_attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ unsigned long val;
+ int res;
+
+ res = strict_strtoul(buf, 10, &val);
+ if (res)
+ return res;
+
+ val = SENSORS_LIMIT(val, 0, 255);
+
+ mutex_lock(&data->update_lock);
+ data->pwm[attr->index] = (u8)(val * 120 / 255);
+ i2c_smbus_write_byte_data(client,
+ MAX6639_REG_TARGTDUTY(attr->index),
+ data->pwm[attr->index]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t show_fan_input(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct max6639_data *data = max6639_update_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[attr->index],
+ data->ppr, data->rpm_range));
+}
+
+static ssize_t show_alarm(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct max6639_data *data = max6639_update_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%d\n", !!(data->status & (1 << attr->index)));
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp_input, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_temp_fault, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_max,
+ set_temp_max, 0);
+static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp_max,
+ set_temp_max, 1);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp_crit,
+ set_temp_crit, 0);
+static SENSOR_DEVICE_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_temp_crit,
+ set_temp_crit, 1);
+static SENSOR_DEVICE_ATTR(temp1_emergency, S_IWUSR | S_IRUGO,
+ show_temp_emergency, set_temp_emergency, 0);
+static SENSOR_DEVICE_ATTR(temp2_emergency, S_IWUSR | S_IRUGO,
+ show_temp_emergency, set_temp_emergency, 1);
+static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 0);
+static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 1);
+static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan_input, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan_input, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan1_fault, S_IRUGO, show_alarm, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan2_fault, S_IRUGO, show_alarm, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 7);
+static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 6);
+static SENSOR_DEVICE_ATTR(temp1_emergency_alarm, S_IRUGO, show_alarm, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp2_emergency_alarm, S_IRUGO, show_alarm, NULL, 4);
+
+
+static struct attribute *max6639_attributes[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_fault.dev_attr.attr,
+ &sensor_dev_attr_temp2_fault.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp2_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_emergency.dev_attr.attr,
+ &sensor_dev_attr_temp2_emergency.dev_attr.attr,
+ &sensor_dev_attr_pwm1.dev_attr.attr,
+ &sensor_dev_attr_pwm2.dev_attr.attr,
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
+ &sensor_dev_attr_fan2_input.dev_attr.attr,
+ &sensor_dev_attr_fan1_fault.dev_attr.attr,
+ &sensor_dev_attr_fan2_fault.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_emergency_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_emergency_alarm.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group max6639_group = {
+ .attrs = max6639_attributes,
+};
+
+/*
+ * returns respective index in rpm_ranges table
+ * 1 by default on invalid range
+ */
+static int rpm_range_to_reg(int range)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rpm_ranges); i++) {
+ if (rpm_ranges[i] == range)
+ return i;
+ }
+
+ return 1; /* default: 4000 RPM */
+}
+
+static int max6639_init_client(struct i2c_client *client)
+{
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct max6639_platform_data *max6639_info =
+ client->dev.platform_data;
+ int i = 0;
+ int rpm_range = 1; /* default: 4000 RPM */
+ int err = 0;
+
+ /* Reset chip to default values, see below for GCONFIG setup */
+ err = i2c_smbus_write_byte_data(client, MAX6639_REG_GCONFIG,
+ MAX6639_GCONFIG_POR);
+ if (err)
+ goto exit;
+
+ /* Fans pulse per revolution is 2 by default */
+ if (max6639_info && max6639_info->ppr > 0 &&
+ max6639_info->ppr < 5)
+ data->ppr = max6639_info->ppr;
+ else
+ data->ppr = 2;
+ data->ppr -= 1;
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_FAN_PPR(i),
+ data->ppr << 5);
+ if (err)
+ goto exit;
+
+ if (max6639_info)
+ rpm_range = rpm_range_to_reg(max6639_info->rpm_range);
+ data->rpm_range = rpm_range;
+
+ for (i = 0; i < 2; i++) {
+
+ /* Fans config PWM, RPM */
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_FAN_CONFIG1(i),
+ MAX6639_FAN_CONFIG1_PWM | rpm_range);
+ if (err)
+ goto exit;
+
+ /* Fans PWM polarity high by default */
+ if (max6639_info && max6639_info->pwm_polarity == 0)
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_FAN_CONFIG2a(i), 0x00);
+ else
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_FAN_CONFIG2a(i), 0x02);
+ if (err)
+ goto exit;
+
+ /*
+ * /THERM full speed enable,
+ * PWM frequency 25kHz, see also GCONFIG below
+ */
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_FAN_CONFIG3(i),
+ MAX6639_FAN_CONFIG3_THERM_FULL_SPEED | 0x03);
+ if (err)
+ goto exit;
+
+ /* Max. temp. 80C/90C/100C */
+ data->temp_therm[i] = 80;
+ data->temp_alert[i] = 90;
+ data->temp_ot[i] = 100;
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_THERM_LIMIT(i),
+ data->temp_therm[i]);
+ if (err)
+ goto exit;
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_ALERT_LIMIT(i),
+ data->temp_alert[i]);
+ if (err)
+ goto exit;
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_OT_LIMIT(i), data->temp_ot[i]);
+ if (err)
+ goto exit;
+
+ /* PWM 120/120 (i.e. 100%) */
+ data->pwm[i] = 120;
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_TARGTDUTY(i), data->pwm[i]);
+ if (err)
+ goto exit;
+ }
+ /* Start monitoring */
+ err = i2c_smbus_write_byte_data(client, MAX6639_REG_GCONFIG,
+ MAX6639_GCONFIG_DISABLE_TIMEOUT | MAX6639_GCONFIG_CH2_LOCAL |
+ MAX6639_GCONFIG_PWM_FREQ_HI);
+exit:
+ return err;
+}
+
+/* Return 0 if detection is successful, -ENODEV otherwise */
+static int max6639_detect(struct i2c_client *client,
+ struct i2c_board_info *info)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ int dev_id, manu_id;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ /* Actual detection via device and manufacturer ID */
+ dev_id = i2c_smbus_read_byte_data(client, MAX6639_REG_DEVID);
+ manu_id = i2c_smbus_read_byte_data(client, MAX6639_REG_MANUID);
+ if (dev_id != 0x58 || manu_id != 0x4D)
+ return -ENODEV;
+
+ strlcpy(info->type, "max6639", I2C_NAME_SIZE);
+
+ return 0;
+}
+
+static int max6639_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct max6639_data *data;
+ int err;
+
+ data = kzalloc(sizeof(struct max6639_data), GFP_KERNEL);
+ if (!data) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ /* Initialize the max6639 chip */
+ err = max6639_init_client(client);
+ if (err < 0)
+ goto error_free;
+
+ /* Register sysfs hooks */
+ err = sysfs_create_group(&client->dev.kobj, &max6639_group);
+ if (err)
+ goto error_free;
+
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ goto error_remove;
+ }
+
+ dev_info(&client->dev, "temperature sensor and fan control found\n");
+
+ return 0;
+
+error_remove:
+ sysfs_remove_group(&client->dev.kobj, &max6639_group);
+error_free:
+ kfree(data);
+exit:
+ return err;
+}
+
+static int max6639_remove(struct i2c_client *client)
+{
+ struct max6639_data *data = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &max6639_group);
+
+ kfree(data);
+ return 0;
+}
+
+static int max6639_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ int data = i2c_smbus_read_byte_data(client, MAX6639_REG_GCONFIG);
+ if (data < 0)
+ return data;
+
+ return i2c_smbus_write_byte_data(client,
+ MAX6639_REG_GCONFIG, data | MAX6639_GCONFIG_STANDBY);
+}
+
+static int max6639_resume(struct i2c_client *client)
+{
+ int data = i2c_smbus_read_byte_data(client, MAX6639_REG_GCONFIG);
+ if (data < 0)
+ return data;
+
+ return i2c_smbus_write_byte_data(client,
+ MAX6639_REG_GCONFIG, data & ~MAX6639_GCONFIG_STANDBY);
+}
+
+static const struct i2c_device_id max6639_id[] = {
+ {"max6639", 0},
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, max6639_id);
+
+static struct i2c_driver max6639_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "max6639",
+ },
+ .probe = max6639_probe,
+ .remove = max6639_remove,
+ .suspend = max6639_suspend,
+ .resume = max6639_resume,
+ .id_table = max6639_id,
+ .detect = max6639_detect,
+ .address_list = normal_i2c,
+};
+
+static int __init max6639_init(void)
+{
+ return i2c_add_driver(&max6639_driver);
+}
+
+static void __exit max6639_exit(void)
+{
+ i2c_del_driver(&max6639_driver);
+}
+
+MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
+MODULE_DESCRIPTION("max6639 driver");
+MODULE_LICENSE("GPL");
+
+module_init(max6639_init);
+module_exit(max6639_exit);
diff --git a/drivers/hwmon/max8688.c b/drivers/hwmon/max8688.c
new file mode 100644
index 000000000000..8ebfef2ecf26
--- /dev/null
+++ b/drivers/hwmon/max8688.c
@@ -0,0 +1,158 @@
+/*
+ * Hardware monitoring driver for Maxim MAX8688
+ *
+ * Copyright (c) 2011 Ericsson AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include "pmbus.h"
+
+#define MAX8688_MFG_STATUS 0xd8
+
+#define MAX8688_STATUS_OC_FAULT (1 << 4)
+#define MAX8688_STATUS_OV_FAULT (1 << 5)
+#define MAX8688_STATUS_OV_WARNING (1 << 8)
+#define MAX8688_STATUS_UV_FAULT (1 << 9)
+#define MAX8688_STATUS_UV_WARNING (1 << 10)
+#define MAX8688_STATUS_UC_FAULT (1 << 11)
+#define MAX8688_STATUS_OC_WARNING (1 << 12)
+#define MAX8688_STATUS_OT_FAULT (1 << 13)
+#define MAX8688_STATUS_OT_WARNING (1 << 14)
+
+static int max8688_get_status(struct i2c_client *client, int page, int reg)
+{
+ int ret = 0;
+ int mfg_status;
+
+ if (page)
+ return -EINVAL;
+
+ switch (reg) {
+ case PMBUS_STATUS_VOUT:
+ mfg_status = pmbus_read_word_data(client, 0,
+ MAX8688_MFG_STATUS);
+ if (mfg_status < 0)
+ return mfg_status;
+ if (mfg_status & MAX8688_STATUS_UV_WARNING)
+ ret |= PB_VOLTAGE_UV_WARNING;
+ if (mfg_status & MAX8688_STATUS_UV_FAULT)
+ ret |= PB_VOLTAGE_UV_FAULT;
+ if (mfg_status & MAX8688_STATUS_OV_WARNING)
+ ret |= PB_VOLTAGE_OV_WARNING;
+ if (mfg_status & MAX8688_STATUS_OV_FAULT)
+ ret |= PB_VOLTAGE_OV_FAULT;
+ break;
+ case PMBUS_STATUS_IOUT:
+ mfg_status = pmbus_read_word_data(client, 0,
+ MAX8688_MFG_STATUS);
+ if (mfg_status < 0)
+ return mfg_status;
+ if (mfg_status & MAX8688_STATUS_UC_FAULT)
+ ret |= PB_IOUT_UC_FAULT;
+ if (mfg_status & MAX8688_STATUS_OC_WARNING)
+ ret |= PB_IOUT_OC_WARNING;
+ if (mfg_status & MAX8688_STATUS_OC_FAULT)
+ ret |= PB_IOUT_OC_FAULT;
+ break;
+ case PMBUS_STATUS_TEMPERATURE:
+ mfg_status = pmbus_read_word_data(client, 0,
+ MAX8688_MFG_STATUS);
+ if (mfg_status < 0)
+ return mfg_status;
+ if (mfg_status & MAX8688_STATUS_OT_WARNING)
+ ret |= PB_TEMP_OT_WARNING;
+ if (mfg_status & MAX8688_STATUS_OT_FAULT)
+ ret |= PB_TEMP_OT_FAULT;
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
+static struct pmbus_driver_info max8688_info = {
+ .pages = 1,
+ .direct[PSC_VOLTAGE_IN] = true,
+ .direct[PSC_VOLTAGE_OUT] = true,
+ .direct[PSC_TEMPERATURE] = true,
+ .direct[PSC_CURRENT_OUT] = true,
+ .m[PSC_VOLTAGE_IN] = 19995,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = -1,
+ .m[PSC_VOLTAGE_OUT] = 19995,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = -1,
+ .m[PSC_CURRENT_OUT] = 23109,
+ .b[PSC_CURRENT_OUT] = 0,
+ .R[PSC_CURRENT_OUT] = -2,
+ .m[PSC_TEMPERATURE] = -7612,
+ .b[PSC_TEMPERATURE] = 335,
+ .R[PSC_TEMPERATURE] = -3,
+ .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP
+ | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT
+ | PMBUS_HAVE_STATUS_TEMP,
+ .get_status = max8688_get_status,
+};
+
+static int max8688_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ return pmbus_do_probe(client, id, &max8688_info);
+}
+
+static int max8688_remove(struct i2c_client *client)
+{
+ return pmbus_do_remove(client);
+}
+
+static const struct i2c_device_id max8688_id[] = {
+ {"max8688", 0},
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, max8688_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver max8688_driver = {
+ .driver = {
+ .name = "max8688",
+ },
+ .probe = max8688_probe,
+ .remove = max8688_remove,
+ .id_table = max8688_id,
+};
+
+static int __init max8688_init(void)
+{
+ return i2c_add_driver(&max8688_driver);
+}
+
+static void __exit max8688_exit(void)
+{
+ i2c_del_driver(&max8688_driver);
+}
+
+MODULE_AUTHOR("Guenter Roeck");
+MODULE_DESCRIPTION("PMBus driver for Maxim MAX8688");
+MODULE_LICENSE("GPL");
+module_init(max8688_init);
+module_exit(max8688_exit);
diff --git a/drivers/hwmon/pmbus.c b/drivers/hwmon/pmbus.c
new file mode 100644
index 000000000000..98e2e28899e2
--- /dev/null
+++ b/drivers/hwmon/pmbus.c
@@ -0,0 +1,203 @@
+/*
+ * Hardware monitoring driver for PMBus devices
+ *
+ * Copyright (c) 2010, 2011 Ericsson AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/i2c.h>
+#include "pmbus.h"
+
+/*
+ * Find sensor groups and status registers on each page.
+ */
+static void pmbus_find_sensor_groups(struct i2c_client *client,
+ struct pmbus_driver_info *info)
+{
+ int page;
+
+ /* Sensors detected on page 0 only */
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_VIN))
+ info->func[0] |= PMBUS_HAVE_VIN;
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_VCAP))
+ info->func[0] |= PMBUS_HAVE_VCAP;
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_IIN))
+ info->func[0] |= PMBUS_HAVE_IIN;
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_PIN))
+ info->func[0] |= PMBUS_HAVE_PIN;
+ if (info->func[0]
+ && pmbus_check_byte_register(client, 0, PMBUS_STATUS_INPUT))
+ info->func[0] |= PMBUS_HAVE_STATUS_INPUT;
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_1)) {
+ info->func[0] |= PMBUS_HAVE_FAN12;
+ if (pmbus_check_byte_register(client, 0, PMBUS_STATUS_FAN_12))
+ info->func[0] |= PMBUS_HAVE_STATUS_FAN12;
+ }
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_3)) {
+ info->func[0] |= PMBUS_HAVE_FAN34;
+ if (pmbus_check_byte_register(client, 0, PMBUS_STATUS_FAN_34))
+ info->func[0] |= PMBUS_HAVE_STATUS_FAN34;
+ }
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_TEMPERATURE_1)) {
+ info->func[0] |= PMBUS_HAVE_TEMP;
+ if (pmbus_check_byte_register(client, 0,
+ PMBUS_STATUS_TEMPERATURE))
+ info->func[0] |= PMBUS_HAVE_STATUS_TEMP;
+ }
+
+ /* Sensors detected on all pages */
+ for (page = 0; page < info->pages; page++) {
+ if (pmbus_check_word_register(client, page, PMBUS_READ_VOUT)) {
+ info->func[page] |= PMBUS_HAVE_VOUT;
+ if (pmbus_check_byte_register(client, page,
+ PMBUS_STATUS_VOUT))
+ info->func[page] |= PMBUS_HAVE_STATUS_VOUT;
+ }
+ if (pmbus_check_word_register(client, page, PMBUS_READ_IOUT)) {
+ info->func[page] |= PMBUS_HAVE_IOUT;
+ if (pmbus_check_byte_register(client, 0,
+ PMBUS_STATUS_IOUT))
+ info->func[page] |= PMBUS_HAVE_STATUS_IOUT;
+ }
+ if (pmbus_check_word_register(client, page, PMBUS_READ_POUT))
+ info->func[page] |= PMBUS_HAVE_POUT;
+ }
+}
+
+/*
+ * Identify chip parameters.
+ */
+static int pmbus_identify(struct i2c_client *client,
+ struct pmbus_driver_info *info)
+{
+ if (!info->pages) {
+ /*
+ * Check if the PAGE command is supported. If it is,
+ * keep setting the page number until it fails or until the
+ * maximum number of pages has been reached. Assume that
+ * this is the number of pages supported by the chip.
+ */
+ if (pmbus_check_byte_register(client, 0, PMBUS_PAGE)) {
+ int page;
+
+ for (page = 1; page < PMBUS_PAGES; page++) {
+ if (pmbus_set_page(client, page) < 0)
+ break;
+ }
+ pmbus_set_page(client, 0);
+ info->pages = page;
+ } else {
+ info->pages = 1;
+ }
+ }
+
+ /*
+ * We should check if the COEFFICIENTS register is supported.
+ * If it is, and the chip is configured for direct mode, we can read
+ * the coefficients from the chip, one set per group of sensor
+ * registers.
+ *
+ * To do this, we will need access to a chip which actually supports the
+ * COEFFICIENTS command, since the command is too complex to implement
+ * without testing it.
+ */
+
+ /* Try to find sensor groups */
+ pmbus_find_sensor_groups(client, info);
+
+ return 0;
+}
+
+static int pmbus_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct pmbus_driver_info *info;
+ int ret;
+
+ info = kzalloc(sizeof(struct pmbus_driver_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->pages = id->driver_data;
+ info->identify = pmbus_identify;
+
+ ret = pmbus_do_probe(client, id, info);
+ if (ret < 0)
+ goto out;
+ return 0;
+
+out:
+ kfree(info);
+ return ret;
+}
+
+static int pmbus_remove(struct i2c_client *client)
+{
+ int ret;
+ const struct pmbus_driver_info *info;
+
+ info = pmbus_get_driver_info(client);
+ ret = pmbus_do_remove(client);
+ kfree(info);
+ return ret;
+}
+
+/*
+ * Use driver_data to set the number of pages supported by the chip.
+ */
+static const struct i2c_device_id pmbus_id[] = {
+ {"bmr450", 1},
+ {"bmr451", 1},
+ {"bmr453", 1},
+ {"bmr454", 1},
+ {"ltc2978", 8},
+ {"pmbus", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, pmbus_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver pmbus_driver = {
+ .driver = {
+ .name = "pmbus",
+ },
+ .probe = pmbus_probe,
+ .remove = pmbus_remove,
+ .id_table = pmbus_id,
+};
+
+static int __init pmbus_init(void)
+{
+ return i2c_add_driver(&pmbus_driver);
+}
+
+static void __exit pmbus_exit(void)
+{
+ i2c_del_driver(&pmbus_driver);
+}
+
+MODULE_AUTHOR("Guenter Roeck");
+MODULE_DESCRIPTION("Generic PMBus driver");
+MODULE_LICENSE("GPL");
+module_init(pmbus_init);
+module_exit(pmbus_exit);
diff --git a/drivers/hwmon/pmbus.h b/drivers/hwmon/pmbus.h
new file mode 100644
index 000000000000..a81f7f228762
--- /dev/null
+++ b/drivers/hwmon/pmbus.h
@@ -0,0 +1,313 @@
+/*
+ * pmbus.h - Common defines and structures for PMBus devices
+ *
+ * Copyright (c) 2010, 2011 Ericsson AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef PMBUS_H
+#define PMBUS_H
+
+/*
+ * Registers
+ */
+#define PMBUS_PAGE 0x00
+#define PMBUS_OPERATION 0x01
+#define PMBUS_ON_OFF_CONFIG 0x02
+#define PMBUS_CLEAR_FAULTS 0x03
+#define PMBUS_PHASE 0x04
+
+#define PMBUS_CAPABILITY 0x19
+#define PMBUS_QUERY 0x1A
+
+#define PMBUS_VOUT_MODE 0x20
+#define PMBUS_VOUT_COMMAND 0x21
+#define PMBUS_VOUT_TRIM 0x22
+#define PMBUS_VOUT_CAL_OFFSET 0x23
+#define PMBUS_VOUT_MAX 0x24
+#define PMBUS_VOUT_MARGIN_HIGH 0x25
+#define PMBUS_VOUT_MARGIN_LOW 0x26
+#define PMBUS_VOUT_TRANSITION_RATE 0x27
+#define PMBUS_VOUT_DROOP 0x28
+#define PMBUS_VOUT_SCALE_LOOP 0x29
+#define PMBUS_VOUT_SCALE_MONITOR 0x2A
+
+#define PMBUS_COEFFICIENTS 0x30
+#define PMBUS_POUT_MAX 0x31
+
+#define PMBUS_FAN_CONFIG_12 0x3A
+#define PMBUS_FAN_COMMAND_1 0x3B
+#define PMBUS_FAN_COMMAND_2 0x3C
+#define PMBUS_FAN_CONFIG_34 0x3D
+#define PMBUS_FAN_COMMAND_3 0x3E
+#define PMBUS_FAN_COMMAND_4 0x3F
+
+#define PMBUS_VOUT_OV_FAULT_LIMIT 0x40
+#define PMBUS_VOUT_OV_FAULT_RESPONSE 0x41
+#define PMBUS_VOUT_OV_WARN_LIMIT 0x42
+#define PMBUS_VOUT_UV_WARN_LIMIT 0x43
+#define PMBUS_VOUT_UV_FAULT_LIMIT 0x44
+#define PMBUS_VOUT_UV_FAULT_RESPONSE 0x45
+#define PMBUS_IOUT_OC_FAULT_LIMIT 0x46
+#define PMBUS_IOUT_OC_FAULT_RESPONSE 0x47
+#define PMBUS_IOUT_OC_LV_FAULT_LIMIT 0x48
+#define PMBUS_IOUT_OC_LV_FAULT_RESPONSE 0x49
+#define PMBUS_IOUT_OC_WARN_LIMIT 0x4A
+#define PMBUS_IOUT_UC_FAULT_LIMIT 0x4B
+#define PMBUS_IOUT_UC_FAULT_RESPONSE 0x4C
+
+#define PMBUS_OT_FAULT_LIMIT 0x4F
+#define PMBUS_OT_FAULT_RESPONSE 0x50
+#define PMBUS_OT_WARN_LIMIT 0x51
+#define PMBUS_UT_WARN_LIMIT 0x52
+#define PMBUS_UT_FAULT_LIMIT 0x53
+#define PMBUS_UT_FAULT_RESPONSE 0x54
+#define PMBUS_VIN_OV_FAULT_LIMIT 0x55
+#define PMBUS_VIN_OV_FAULT_RESPONSE 0x56
+#define PMBUS_VIN_OV_WARN_LIMIT 0x57
+#define PMBUS_VIN_UV_WARN_LIMIT 0x58
+#define PMBUS_VIN_UV_FAULT_LIMIT 0x59
+
+#define PMBUS_IIN_OC_FAULT_LIMIT 0x5B
+#define PMBUS_IIN_OC_WARN_LIMIT 0x5D
+
+#define PMBUS_POUT_OP_FAULT_LIMIT 0x68
+#define PMBUS_POUT_OP_WARN_LIMIT 0x6A
+#define PMBUS_PIN_OP_WARN_LIMIT 0x6B
+
+#define PMBUS_STATUS_BYTE 0x78
+#define PMBUS_STATUS_WORD 0x79
+#define PMBUS_STATUS_VOUT 0x7A
+#define PMBUS_STATUS_IOUT 0x7B
+#define PMBUS_STATUS_INPUT 0x7C
+#define PMBUS_STATUS_TEMPERATURE 0x7D
+#define PMBUS_STATUS_CML 0x7E
+#define PMBUS_STATUS_OTHER 0x7F
+#define PMBUS_STATUS_MFR_SPECIFIC 0x80
+#define PMBUS_STATUS_FAN_12 0x81
+#define PMBUS_STATUS_FAN_34 0x82
+
+#define PMBUS_READ_VIN 0x88
+#define PMBUS_READ_IIN 0x89
+#define PMBUS_READ_VCAP 0x8A
+#define PMBUS_READ_VOUT 0x8B
+#define PMBUS_READ_IOUT 0x8C
+#define PMBUS_READ_TEMPERATURE_1 0x8D
+#define PMBUS_READ_TEMPERATURE_2 0x8E
+#define PMBUS_READ_TEMPERATURE_3 0x8F
+#define PMBUS_READ_FAN_SPEED_1 0x90
+#define PMBUS_READ_FAN_SPEED_2 0x91
+#define PMBUS_READ_FAN_SPEED_3 0x92
+#define PMBUS_READ_FAN_SPEED_4 0x93
+#define PMBUS_READ_DUTY_CYCLE 0x94
+#define PMBUS_READ_FREQUENCY 0x95
+#define PMBUS_READ_POUT 0x96
+#define PMBUS_READ_PIN 0x97
+
+#define PMBUS_REVISION 0x98
+#define PMBUS_MFR_ID 0x99
+#define PMBUS_MFR_MODEL 0x9A
+#define PMBUS_MFR_REVISION 0x9B
+#define PMBUS_MFR_LOCATION 0x9C
+#define PMBUS_MFR_DATE 0x9D
+#define PMBUS_MFR_SERIAL 0x9E
+
+/*
+ * CAPABILITY
+ */
+#define PB_CAPABILITY_SMBALERT (1<<4)
+#define PB_CAPABILITY_ERROR_CHECK (1<<7)
+
+/*
+ * VOUT_MODE
+ */
+#define PB_VOUT_MODE_MODE_MASK 0xe0
+#define PB_VOUT_MODE_PARAM_MASK 0x1f
+
+#define PB_VOUT_MODE_LINEAR 0x00
+#define PB_VOUT_MODE_VID 0x20
+#define PB_VOUT_MODE_DIRECT 0x40
+
+/*
+ * Fan configuration
+ */
+#define PB_FAN_2_PULSE_MASK ((1 << 0) | (1 << 1))
+#define PB_FAN_2_RPM (1 << 2)
+#define PB_FAN_2_INSTALLED (1 << 3)
+#define PB_FAN_1_PULSE_MASK ((1 << 4) | (1 << 5))
+#define PB_FAN_1_RPM (1 << 6)
+#define PB_FAN_1_INSTALLED (1 << 7)
+
+/*
+ * STATUS_BYTE, STATUS_WORD (lower)
+ */
+#define PB_STATUS_NONE_ABOVE (1<<0)
+#define PB_STATUS_CML (1<<1)
+#define PB_STATUS_TEMPERATURE (1<<2)
+#define PB_STATUS_VIN_UV (1<<3)
+#define PB_STATUS_IOUT_OC (1<<4)
+#define PB_STATUS_VOUT_OV (1<<5)
+#define PB_STATUS_OFF (1<<6)
+#define PB_STATUS_BUSY (1<<7)
+
+/*
+ * STATUS_WORD (upper)
+ */
+#define PB_STATUS_UNKNOWN (1<<8)
+#define PB_STATUS_OTHER (1<<9)
+#define PB_STATUS_FANS (1<<10)
+#define PB_STATUS_POWER_GOOD_N (1<<11)
+#define PB_STATUS_WORD_MFR (1<<12)
+#define PB_STATUS_INPUT (1<<13)
+#define PB_STATUS_IOUT_POUT (1<<14)
+#define PB_STATUS_VOUT (1<<15)
+
+/*
+ * STATUS_IOUT
+ */
+#define PB_POUT_OP_WARNING (1<<0)
+#define PB_POUT_OP_FAULT (1<<1)
+#define PB_POWER_LIMITING (1<<2)
+#define PB_CURRENT_SHARE_FAULT (1<<3)
+#define PB_IOUT_UC_FAULT (1<<4)
+#define PB_IOUT_OC_WARNING (1<<5)
+#define PB_IOUT_OC_LV_FAULT (1<<6)
+#define PB_IOUT_OC_FAULT (1<<7)
+
+/*
+ * STATUS_VOUT, STATUS_INPUT
+ */
+#define PB_VOLTAGE_UV_FAULT (1<<4)
+#define PB_VOLTAGE_UV_WARNING (1<<5)
+#define PB_VOLTAGE_OV_WARNING (1<<6)
+#define PB_VOLTAGE_OV_FAULT (1<<7)
+
+/*
+ * STATUS_INPUT
+ */
+#define PB_PIN_OP_WARNING (1<<0)
+#define PB_IIN_OC_WARNING (1<<1)
+#define PB_IIN_OC_FAULT (1<<2)
+
+/*
+ * STATUS_TEMPERATURE
+ */
+#define PB_TEMP_UT_FAULT (1<<4)
+#define PB_TEMP_UT_WARNING (1<<5)
+#define PB_TEMP_OT_WARNING (1<<6)
+#define PB_TEMP_OT_FAULT (1<<7)
+
+/*
+ * STATUS_FAN
+ */
+#define PB_FAN_AIRFLOW_WARNING (1<<0)
+#define PB_FAN_AIRFLOW_FAULT (1<<1)
+#define PB_FAN_FAN2_SPEED_OVERRIDE (1<<2)
+#define PB_FAN_FAN1_SPEED_OVERRIDE (1<<3)
+#define PB_FAN_FAN2_WARNING (1<<4)
+#define PB_FAN_FAN1_WARNING (1<<5)
+#define PB_FAN_FAN2_FAULT (1<<6)
+#define PB_FAN_FAN1_FAULT (1<<7)
+
+/*
+ * CML_FAULT_STATUS
+ */
+#define PB_CML_FAULT_OTHER_MEM_LOGIC (1<<0)
+#define PB_CML_FAULT_OTHER_COMM (1<<1)
+#define PB_CML_FAULT_PROCESSOR (1<<3)
+#define PB_CML_FAULT_MEMORY (1<<4)
+#define PB_CML_FAULT_PACKET_ERROR (1<<5)
+#define PB_CML_FAULT_INVALID_DATA (1<<6)
+#define PB_CML_FAULT_INVALID_COMMAND (1<<7)
+
+enum pmbus_sensor_classes {
+ PSC_VOLTAGE_IN = 0,
+ PSC_VOLTAGE_OUT,
+ PSC_CURRENT_IN,
+ PSC_CURRENT_OUT,
+ PSC_POWER,
+ PSC_TEMPERATURE,
+ PSC_FAN,
+ PSC_NUM_CLASSES /* Number of power sensor classes */
+};
+
+#define PMBUS_PAGES 32 /* Per PMBus specification */
+
+/* Functionality bit mask */
+#define PMBUS_HAVE_VIN (1 << 0)
+#define PMBUS_HAVE_VCAP (1 << 1)
+#define PMBUS_HAVE_VOUT (1 << 2)
+#define PMBUS_HAVE_IIN (1 << 3)
+#define PMBUS_HAVE_IOUT (1 << 4)
+#define PMBUS_HAVE_PIN (1 << 5)
+#define PMBUS_HAVE_POUT (1 << 6)
+#define PMBUS_HAVE_FAN12 (1 << 7)
+#define PMBUS_HAVE_FAN34 (1 << 8)
+#define PMBUS_HAVE_TEMP (1 << 9)
+#define PMBUS_HAVE_TEMP2 (1 << 10)
+#define PMBUS_HAVE_TEMP3 (1 << 11)
+#define PMBUS_HAVE_STATUS_VOUT (1 << 12)
+#define PMBUS_HAVE_STATUS_IOUT (1 << 13)
+#define PMBUS_HAVE_STATUS_INPUT (1 << 14)
+#define PMBUS_HAVE_STATUS_TEMP (1 << 15)
+#define PMBUS_HAVE_STATUS_FAN12 (1 << 16)
+#define PMBUS_HAVE_STATUS_FAN34 (1 << 17)
+
+struct pmbus_driver_info {
+ int pages; /* Total number of pages */
+ bool direct[PSC_NUM_CLASSES];
+ /* true if device uses direct data format
+ for the given sensor class */
+ /*
+ * Support one set of coefficients for each sensor type
+ * Used for chips providing data in direct mode.
+ */
+ int m[PSC_NUM_CLASSES]; /* mantissa for direct data format */
+ int b[PSC_NUM_CLASSES]; /* offset */
+ int R[PSC_NUM_CLASSES]; /* exponent */
+
+ u32 func[PMBUS_PAGES]; /* Functionality, per page */
+ /*
+ * The get_status function maps manufacturing specific status values
+ * into PMBus standard status values.
+ * This function is optional and only necessary if chip specific status
+ * register values have to be mapped into standard PMBus status register
+ * values.
+ */
+ int (*get_status)(struct i2c_client *client, int page, int reg);
+ /*
+ * The identify function determines supported PMBus functionality.
+ * This function is only necessary if a chip driver supports multiple
+ * chips, and the chip functionality is not pre-determined.
+ */
+ int (*identify)(struct i2c_client *client,
+ struct pmbus_driver_info *info);
+};
+
+/* Function declarations */
+
+int pmbus_set_page(struct i2c_client *client, u8 page);
+int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg);
+void pmbus_clear_faults(struct i2c_client *client);
+bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg);
+bool pmbus_check_word_register(struct i2c_client *client, int page, int reg);
+int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
+ struct pmbus_driver_info *info);
+int pmbus_do_remove(struct i2c_client *client);
+const struct pmbus_driver_info *pmbus_get_driver_info(struct i2c_client
+ *client);
+
+#endif /* PMBUS_H */
diff --git a/drivers/hwmon/pmbus_core.c b/drivers/hwmon/pmbus_core.c
new file mode 100644
index 000000000000..0edab1c9553e
--- /dev/null
+++ b/drivers/hwmon/pmbus_core.c
@@ -0,0 +1,1628 @@
+/*
+ * Hardware monitoring driver for PMBus devices
+ *
+ * Copyright (c) 2010, 2011 Ericsson AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/delay.h>
+#include <linux/i2c/pmbus.h>
+#include "pmbus.h"
+
+/*
+ * Constants needed to determine number of sensors, booleans, and labels.
+ */
+#define PMBUS_MAX_INPUT_SENSORS 11 /* 6*volt, 3*curr, 2*power */
+#define PMBUS_VOUT_SENSORS_PER_PAGE 5 /* input, min, max, lcrit,
+ crit */
+#define PMBUS_IOUT_SENSORS_PER_PAGE 4 /* input, min, max, crit */
+#define PMBUS_POUT_SENSORS_PER_PAGE 4 /* input, cap, max, crit */
+#define PMBUS_MAX_SENSORS_PER_FAN 1 /* input */
+#define PMBUS_MAX_SENSORS_PER_TEMP 5 /* input, min, max, lcrit,
+ crit */
+
+#define PMBUS_MAX_INPUT_BOOLEANS 7 /* v: min_alarm, max_alarm,
+ lcrit_alarm, crit_alarm;
+ c: alarm, crit_alarm;
+ p: crit_alarm */
+#define PMBUS_VOUT_BOOLEANS_PER_PAGE 4 /* min_alarm, max_alarm,
+ lcrit_alarm, crit_alarm */
+#define PMBUS_IOUT_BOOLEANS_PER_PAGE 3 /* alarm, lcrit_alarm,
+ crit_alarm */
+#define PMBUS_POUT_BOOLEANS_PER_PAGE 2 /* alarm, crit_alarm */
+#define PMBUS_MAX_BOOLEANS_PER_FAN 2 /* alarm, fault */
+#define PMBUS_MAX_BOOLEANS_PER_TEMP 4 /* min_alarm, max_alarm,
+ lcrit_alarm, crit_alarm */
+
+#define PMBUS_MAX_INPUT_LABELS 4 /* vin, vcap, iin, pin */
+
+/*
+ * status, status_vout, status_iout, status_fans, and status_temp
+ * are paged. status_input and status_fan34 are unpaged.
+ * status_fan34 is a special case to handle a second set of fans
+ * on page 0.
+ */
+#define PB_NUM_STATUS_REG (PMBUS_PAGES * 5 + 2)
+
+/*
+ * Index into status register array, per status register group
+ */
+#define PB_STATUS_BASE 0
+#define PB_STATUS_VOUT_BASE (PB_STATUS_BASE + PMBUS_PAGES)
+#define PB_STATUS_IOUT_BASE (PB_STATUS_VOUT_BASE + PMBUS_PAGES)
+#define PB_STATUS_FAN_BASE (PB_STATUS_IOUT_BASE + PMBUS_PAGES)
+#define PB_STATUS_FAN34_BASE (PB_STATUS_FAN_BASE + PMBUS_PAGES)
+#define PB_STATUS_INPUT_BASE (PB_STATUS_FAN34_BASE + 1)
+#define PB_STATUS_TEMP_BASE (PB_STATUS_INPUT_BASE + 1)
+
+struct pmbus_sensor {
+ char name[I2C_NAME_SIZE]; /* sysfs sensor name */
+ struct sensor_device_attribute attribute;
+ u8 page; /* page number */
+ u8 reg; /* register */
+ enum pmbus_sensor_classes class; /* sensor class */
+ bool update; /* runtime sensor update needed */
+ int data; /* Sensor data.
+ Negative if there was a read error */
+};
+
+struct pmbus_boolean {
+ char name[I2C_NAME_SIZE]; /* sysfs boolean name */
+ struct sensor_device_attribute attribute;
+};
+
+struct pmbus_label {
+ char name[I2C_NAME_SIZE]; /* sysfs label name */
+ struct sensor_device_attribute attribute;
+ char label[I2C_NAME_SIZE]; /* label */
+};
+
+struct pmbus_data {
+ struct device *hwmon_dev;
+
+ u32 flags; /* from platform data */
+
+ int exponent; /* linear mode: exponent for output voltages */
+
+ const struct pmbus_driver_info *info;
+
+ int max_attributes;
+ int num_attributes;
+ struct attribute **attributes;
+ struct attribute_group group;
+
+ /*
+ * Sensors cover both sensor and limit registers.
+ */
+ int max_sensors;
+ int num_sensors;
+ struct pmbus_sensor *sensors;
+ /*
+ * Booleans are used for alarms.
+ * Values are determined from status registers.
+ */
+ int max_booleans;
+ int num_booleans;
+ struct pmbus_boolean *booleans;
+ /*
+ * Labels are used to map generic names (e.g., "in1")
+ * to PMBus specific names (e.g., "vin" or "vout1").
+ */
+ int max_labels;
+ int num_labels;
+ struct pmbus_label *labels;
+
+ struct mutex update_lock;
+ bool valid;
+ unsigned long last_updated; /* in jiffies */
+
+ /*
+ * A single status register covers multiple attributes,
+ * so we keep them all together.
+ */
+ u8 status_bits;
+ u8 status[PB_NUM_STATUS_REG];
+
+ u8 currpage;
+};
+
+int pmbus_set_page(struct i2c_client *client, u8 page)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ int rv = 0;
+ int newpage;
+
+ if (page != data->currpage) {
+ rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+ newpage = i2c_smbus_read_byte_data(client, PMBUS_PAGE);
+ if (newpage != page)
+ rv = -EINVAL;
+ else
+ data->currpage = page;
+ }
+ return rv;
+}
+EXPORT_SYMBOL_GPL(pmbus_set_page);
+
+static int pmbus_write_byte(struct i2c_client *client, u8 page, u8 value)
+{
+ int rv;
+
+ rv = pmbus_set_page(client, page);
+ if (rv < 0)
+ return rv;
+
+ return i2c_smbus_write_byte(client, value);
+}
+
+static int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg,
+ u16 word)
+{
+ int rv;
+
+ rv = pmbus_set_page(client, page);
+ if (rv < 0)
+ return rv;
+
+ return i2c_smbus_write_word_data(client, reg, word);
+}
+
+int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg)
+{
+ int rv;
+
+ rv = pmbus_set_page(client, page);
+ if (rv < 0)
+ return rv;
+
+ return i2c_smbus_read_word_data(client, reg);
+}
+EXPORT_SYMBOL_GPL(pmbus_read_word_data);
+
+static int pmbus_read_byte_data(struct i2c_client *client, u8 page, u8 reg)
+{
+ int rv;
+
+ rv = pmbus_set_page(client, page);
+ if (rv < 0)
+ return rv;
+
+ return i2c_smbus_read_byte_data(client, reg);
+}
+
+static void pmbus_clear_fault_page(struct i2c_client *client, int page)
+{
+ pmbus_write_byte(client, page, PMBUS_CLEAR_FAULTS);
+}
+
+void pmbus_clear_faults(struct i2c_client *client)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ int i;
+
+ for (i = 0; i < data->info->pages; i++)
+ pmbus_clear_fault_page(client, i);
+}
+EXPORT_SYMBOL_GPL(pmbus_clear_faults);
+
+static int pmbus_check_status_cml(struct i2c_client *client, int page)
+{
+ int status, status2;
+
+ status = pmbus_read_byte_data(client, page, PMBUS_STATUS_BYTE);
+ if (status < 0 || (status & PB_STATUS_CML)) {
+ status2 = pmbus_read_byte_data(client, page, PMBUS_STATUS_CML);
+ if (status2 < 0 || (status2 & PB_CML_FAULT_INVALID_COMMAND))
+ return -EINVAL;
+ }
+ return 0;
+}
+
+bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg)
+{
+ int rv;
+ struct pmbus_data *data = i2c_get_clientdata(client);
+
+ rv = pmbus_read_byte_data(client, page, reg);
+ if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK))
+ rv = pmbus_check_status_cml(client, page);
+ pmbus_clear_fault_page(client, page);
+ return rv >= 0;
+}
+EXPORT_SYMBOL_GPL(pmbus_check_byte_register);
+
+bool pmbus_check_word_register(struct i2c_client *client, int page, int reg)
+{
+ int rv;
+ struct pmbus_data *data = i2c_get_clientdata(client);
+
+ rv = pmbus_read_word_data(client, page, reg);
+ if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK))
+ rv = pmbus_check_status_cml(client, page);
+ pmbus_clear_fault_page(client, page);
+ return rv >= 0;
+}
+EXPORT_SYMBOL_GPL(pmbus_check_word_register);
+
+const struct pmbus_driver_info *pmbus_get_driver_info(struct i2c_client *client)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+
+ return data->info;
+}
+EXPORT_SYMBOL_GPL(pmbus_get_driver_info);
+
+static int pmbus_get_status(struct i2c_client *client, int page, int reg)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ const struct pmbus_driver_info *info = data->info;
+ int status;
+
+ if (info->get_status) {
+ status = info->get_status(client, page, reg);
+ if (status != -ENODATA)
+ return status;
+ }
+ return pmbus_read_byte_data(client, page, reg);
+}
+
+static struct pmbus_data *pmbus_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ const struct pmbus_driver_info *info = data->info;
+
+ mutex_lock(&data->update_lock);
+ if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
+ int i;
+
+ for (i = 0; i < info->pages; i++)
+ data->status[PB_STATUS_BASE + i]
+ = pmbus_read_byte_data(client, i,
+ PMBUS_STATUS_BYTE);
+ for (i = 0; i < info->pages; i++) {
+ if (!(info->func[i] & PMBUS_HAVE_STATUS_VOUT))
+ continue;
+ data->status[PB_STATUS_VOUT_BASE + i]
+ = pmbus_get_status(client, i, PMBUS_STATUS_VOUT);
+ }
+ for (i = 0; i < info->pages; i++) {
+ if (!(info->func[i] & PMBUS_HAVE_STATUS_IOUT))
+ continue;
+ data->status[PB_STATUS_IOUT_BASE + i]
+ = pmbus_get_status(client, i, PMBUS_STATUS_IOUT);
+ }
+ for (i = 0; i < info->pages; i++) {
+ if (!(info->func[i] & PMBUS_HAVE_STATUS_TEMP))
+ continue;
+ data->status[PB_STATUS_TEMP_BASE + i]
+ = pmbus_get_status(client, i,
+ PMBUS_STATUS_TEMPERATURE);
+ }
+ for (i = 0; i < info->pages; i++) {
+ if (!(info->func[i] & PMBUS_HAVE_STATUS_FAN12))
+ continue;
+ data->status[PB_STATUS_FAN_BASE + i]
+ = pmbus_get_status(client, i, PMBUS_STATUS_FAN_12);
+ }
+
+ if (info->func[0] & PMBUS_HAVE_STATUS_INPUT)
+ data->status[PB_STATUS_INPUT_BASE]
+ = pmbus_get_status(client, 0, PMBUS_STATUS_INPUT);
+
+ if (info->func[0] & PMBUS_HAVE_STATUS_FAN34)
+ data->status[PB_STATUS_FAN34_BASE]
+ = pmbus_get_status(client, 0, PMBUS_STATUS_FAN_34);
+
+ for (i = 0; i < data->num_sensors; i++) {
+ struct pmbus_sensor *sensor = &data->sensors[i];
+
+ if (!data->valid || sensor->update)
+ sensor->data
+ = pmbus_read_word_data(client, sensor->page,
+ sensor->reg);
+ }
+ pmbus_clear_faults(client);
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+ mutex_unlock(&data->update_lock);
+ return data;
+}
+
+/*
+ * Convert linear sensor values to milli- or micro-units
+ * depending on sensor type.
+ */
+static int pmbus_reg2data_linear(struct pmbus_data *data,
+ struct pmbus_sensor *sensor)
+{
+ s16 exponent, mantissa;
+ long val;
+
+ if (sensor->class == PSC_VOLTAGE_OUT) {
+ exponent = data->exponent;
+ mantissa = (s16) sensor->data;
+ } else {
+ exponent = (sensor->data >> 11) & 0x001f;
+ mantissa = sensor->data & 0x07ff;
+
+ if (exponent > 0x0f)
+ exponent |= 0xffe0; /* sign extend exponent */
+ if (mantissa > 0x03ff)
+ mantissa |= 0xf800; /* sign extend mantissa */
+ }
+
+ val = mantissa;
+
+ /* scale result to milli-units for all sensors except fans */
+ if (sensor->class != PSC_FAN)
+ val = val * 1000L;
+
+ /* scale result to micro-units for power sensors */
+ if (sensor->class == PSC_POWER)
+ val = val * 1000L;
+
+ if (exponent >= 0)
+ val <<= exponent;
+ else
+ val >>= -exponent;
+
+ return (int)val;
+}
+
+/*
+ * Convert direct sensor values to milli- or micro-units
+ * depending on sensor type.
+ */
+static int pmbus_reg2data_direct(struct pmbus_data *data,
+ struct pmbus_sensor *sensor)
+{
+ long val = (s16) sensor->data;
+ long m, b, R;
+
+ m = data->info->m[sensor->class];
+ b = data->info->b[sensor->class];
+ R = data->info->R[sensor->class];
+
+ if (m == 0)
+ return 0;
+
+ /* X = 1/m * (Y * 10^-R - b) */
+ R = -R;
+ /* scale result to milli-units for everything but fans */
+ if (sensor->class != PSC_FAN) {
+ R += 3;
+ b *= 1000;
+ }
+
+ /* scale result to micro-units for power sensors */
+ if (sensor->class == PSC_POWER) {
+ R += 3;
+ b *= 1000;
+ }
+
+ while (R > 0) {
+ val *= 10;
+ R--;
+ }
+ while (R < 0) {
+ val = DIV_ROUND_CLOSEST(val, 10);
+ R++;
+ }
+
+ return (int)((val - b) / m);
+}
+
+static int pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
+{
+ int val;
+
+ if (data->info->direct[sensor->class])
+ val = pmbus_reg2data_direct(data, sensor);
+ else
+ val = pmbus_reg2data_linear(data, sensor);
+
+ return val;
+}
+
+#define MAX_MANTISSA (1023 * 1000)
+#define MIN_MANTISSA (511 * 1000)
+
+static u16 pmbus_data2reg_linear(struct pmbus_data *data,
+ enum pmbus_sensor_classes class, long val)
+{
+ s16 exponent = 0, mantissa = 0;
+ bool negative = false;
+
+ /* simple case */
+ if (val == 0)
+ return 0;
+
+ if (val < 0) {
+ negative = true;
+ val = -val;
+ }
+
+ if (class == PSC_VOLTAGE_OUT) {
+ /*
+ * For a static exponents, we don't have a choice
+ * but to adjust the value to it.
+ */
+ if (data->exponent < 0)
+ val <<= -data->exponent;
+ else
+ val >>= data->exponent;
+ val = DIV_ROUND_CLOSEST(val, 1000);
+ if (val > 0x7fff)
+ val = 0x7fff;
+ return negative ? -val : val;
+ }
+
+ /* Power is in uW. Convert to mW before converting. */
+ if (class == PSC_POWER)
+ val = DIV_ROUND_CLOSEST(val, 1000L);
+
+ /*
+ * For simplicity, convert fan data to milli-units
+ * before calculating the exponent.
+ */
+ if (class == PSC_FAN)
+ val = val * 1000;
+
+ /* Reduce large mantissa until it fits into 10 bit */
+ while (val >= MAX_MANTISSA && exponent < 15) {
+ exponent++;
+ val >>= 1;
+ }
+ /* Increase small mantissa to improve precision */
+ while (val < MIN_MANTISSA && exponent > -15) {
+ exponent--;
+ val <<= 1;
+ }
+
+ /* Convert mantissa from milli-units to units */
+ mantissa = DIV_ROUND_CLOSEST(val, 1000);
+
+ /* Ensure that resulting number is within range */
+ if (mantissa > 0x3ff)
+ mantissa = 0x3ff;
+
+ /* restore sign */
+ if (negative)
+ mantissa = -mantissa;
+
+ /* Convert to 5 bit exponent, 11 bit mantissa */
+ return (mantissa & 0x7ff) | ((exponent << 11) & 0xf800);
+}
+
+static u16 pmbus_data2reg_direct(struct pmbus_data *data,
+ enum pmbus_sensor_classes class, long val)
+{
+ long m, b, R;
+
+ m = data->info->m[class];
+ b = data->info->b[class];
+ R = data->info->R[class];
+
+ /* Power is in uW. Adjust R and b. */
+ if (class == PSC_POWER) {
+ R -= 3;
+ b *= 1000;
+ }
+
+ /* Calculate Y = (m * X + b) * 10^R */
+ if (class != PSC_FAN) {
+ R -= 3; /* Adjust R and b for data in milli-units */
+ b *= 1000;
+ }
+ val = val * m + b;
+
+ while (R > 0) {
+ val *= 10;
+ R--;
+ }
+ while (R < 0) {
+ val = DIV_ROUND_CLOSEST(val, 10);
+ R++;
+ }
+
+ return val;
+}
+
+static u16 pmbus_data2reg(struct pmbus_data *data,
+ enum pmbus_sensor_classes class, long val)
+{
+ u16 regval;
+
+ if (data->info->direct[class])
+ regval = pmbus_data2reg_direct(data, class, val);
+ else
+ regval = pmbus_data2reg_linear(data, class, val);
+
+ return regval;
+}
+
+/*
+ * Return boolean calculated from converted data.
+ * <index> defines a status register index and mask, and optionally
+ * two sensor indexes.
+ * The upper half-word references the two sensors,
+ * two sensor indices.
+ * The upper half-word references the two optional sensors,
+ * the lower half word references status register and mask.
+ * The function returns true if (status[reg] & mask) is true and,
+ * if specified, if v1 >= v2.
+ * To determine if an object exceeds upper limits, specify <v, limit>.
+ * To determine if an object exceeds lower limits, specify <limit, v>.
+ *
+ * For booleans created with pmbus_add_boolean_reg(), only the lower 16 bits of
+ * index are set. s1 and s2 (the sensor index values) are zero in this case.
+ * The function returns true if (status[reg] & mask) is true.
+ *
+ * If the boolean was created with pmbus_add_boolean_cmp(), a comparison against
+ * a specified limit has to be performed to determine the boolean result.
+ * In this case, the function returns true if v1 >= v2 (where v1 and v2 are
+ * sensor values referenced by sensor indices s1 and s2).
+ *
+ * To determine if an object exceeds upper limits, specify <s1,s2> = <v,limit>.
+ * To determine if an object exceeds lower limits, specify <s1,s2> = <limit,v>.
+ *
+ * If a negative value is stored in any of the referenced registers, this value
+ * reflects an error code which will be returned.
+ */
+static int pmbus_get_boolean(struct pmbus_data *data, int index, int *val)
+{
+ u8 s1 = (index >> 24) & 0xff;
+ u8 s2 = (index >> 16) & 0xff;
+ u8 reg = (index >> 8) & 0xff;
+ u8 mask = index & 0xff;
+ int status;
+ u8 regval;
+
+ status = data->status[reg];
+ if (status < 0)
+ return status;
+
+ regval = status & mask;
+ if (!s1 && !s2)
+ *val = !!regval;
+ else {
+ int v1, v2;
+ struct pmbus_sensor *sensor1, *sensor2;
+
+ sensor1 = &data->sensors[s1];
+ if (sensor1->data < 0)
+ return sensor1->data;
+ sensor2 = &data->sensors[s2];
+ if (sensor2->data < 0)
+ return sensor2->data;
+
+ v1 = pmbus_reg2data(data, sensor1);
+ v2 = pmbus_reg2data(data, sensor2);
+ *val = !!(regval && v1 >= v2);
+ }
+ return 0;
+}
+
+static ssize_t pmbus_show_boolean(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct pmbus_data *data = pmbus_update_device(dev);
+ int val;
+ int err;
+
+ err = pmbus_get_boolean(data, attr->index, &val);
+ if (err)
+ return err;
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t pmbus_show_sensor(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct pmbus_data *data = pmbus_update_device(dev);
+ struct pmbus_sensor *sensor;
+
+ sensor = &data->sensors[attr->index];
+ if (sensor->data < 0)
+ return sensor->data;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", pmbus_reg2data(data, sensor));
+}
+
+static ssize_t pmbus_set_sensor(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct i2c_client *client = to_i2c_client(dev);
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ struct pmbus_sensor *sensor = &data->sensors[attr->index];
+ ssize_t rv = count;
+ long val = 0;
+ int ret;
+ u16 regval;
+
+ if (strict_strtol(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ regval = pmbus_data2reg(data, sensor->class, val);
+ ret = pmbus_write_word_data(client, sensor->page, sensor->reg, regval);
+ if (ret < 0)
+ rv = ret;
+ else
+ data->sensors[attr->index].data = regval;
+ mutex_unlock(&data->update_lock);
+ return rv;
+}
+
+static ssize_t pmbus_show_label(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ data->labels[attr->index].label);
+}
+
+#define PMBUS_ADD_ATTR(data, _name, _idx, _mode, _type, _show, _set) \
+do { \
+ struct sensor_device_attribute *a \
+ = &data->_type##s[data->num_##_type##s].attribute; \
+ BUG_ON(data->num_attributes >= data->max_attributes); \
+ a->dev_attr.attr.name = _name; \
+ a->dev_attr.attr.mode = _mode; \
+ a->dev_attr.show = _show; \
+ a->dev_attr.store = _set; \
+ a->index = _idx; \
+ data->attributes[data->num_attributes] = &a->dev_attr.attr; \
+ data->num_attributes++; \
+} while (0)
+
+#define PMBUS_ADD_GET_ATTR(data, _name, _type, _idx) \
+ PMBUS_ADD_ATTR(data, _name, _idx, S_IRUGO, _type, \
+ pmbus_show_##_type, NULL)
+
+#define PMBUS_ADD_SET_ATTR(data, _name, _type, _idx) \
+ PMBUS_ADD_ATTR(data, _name, _idx, S_IWUSR | S_IRUGO, _type, \
+ pmbus_show_##_type, pmbus_set_##_type)
+
+static void pmbus_add_boolean(struct pmbus_data *data,
+ const char *name, const char *type, int seq,
+ int idx)
+{
+ struct pmbus_boolean *boolean;
+
+ BUG_ON(data->num_booleans >= data->max_booleans);
+
+ boolean = &data->booleans[data->num_booleans];
+
+ snprintf(boolean->name, sizeof(boolean->name), "%s%d_%s",
+ name, seq, type);
+ PMBUS_ADD_GET_ATTR(data, boolean->name, boolean, idx);
+ data->num_booleans++;
+}
+
+static void pmbus_add_boolean_reg(struct pmbus_data *data,
+ const char *name, const char *type,
+ int seq, int reg, int bit)
+{
+ pmbus_add_boolean(data, name, type, seq, (reg << 8) | bit);
+}
+
+static void pmbus_add_boolean_cmp(struct pmbus_data *data,
+ const char *name, const char *type,
+ int seq, int i1, int i2, int reg, int mask)
+{
+ pmbus_add_boolean(data, name, type, seq,
+ (i1 << 24) | (i2 << 16) | (reg << 8) | mask);
+}
+
+static void pmbus_add_sensor(struct pmbus_data *data,
+ const char *name, const char *type, int seq,
+ int page, int reg, enum pmbus_sensor_classes class,
+ bool update)
+{
+ struct pmbus_sensor *sensor;
+
+ BUG_ON(data->num_sensors >= data->max_sensors);
+
+ sensor = &data->sensors[data->num_sensors];
+ snprintf(sensor->name, sizeof(sensor->name), "%s%d_%s",
+ name, seq, type);
+ sensor->page = page;
+ sensor->reg = reg;
+ sensor->class = class;
+ sensor->update = update;
+ if (update)
+ PMBUS_ADD_GET_ATTR(data, sensor->name, sensor,
+ data->num_sensors);
+ else
+ PMBUS_ADD_SET_ATTR(data, sensor->name, sensor,
+ data->num_sensors);
+ data->num_sensors++;
+}
+
+static void pmbus_add_label(struct pmbus_data *data,
+ const char *name, int seq,
+ const char *lstring, int index)
+{
+ struct pmbus_label *label;
+
+ BUG_ON(data->num_labels >= data->max_labels);
+
+ label = &data->labels[data->num_labels];
+ snprintf(label->name, sizeof(label->name), "%s%d_label", name, seq);
+ if (!index)
+ strncpy(label->label, lstring, sizeof(label->label) - 1);
+ else
+ snprintf(label->label, sizeof(label->label), "%s%d", lstring,
+ index);
+
+ PMBUS_ADD_GET_ATTR(data, label->name, label, data->num_labels);
+ data->num_labels++;
+}
+
+static const int pmbus_temp_registers[] = {
+ PMBUS_READ_TEMPERATURE_1,
+ PMBUS_READ_TEMPERATURE_2,
+ PMBUS_READ_TEMPERATURE_3
+};
+
+static const int pmbus_fan_registers[] = {
+ PMBUS_READ_FAN_SPEED_1,
+ PMBUS_READ_FAN_SPEED_2,
+ PMBUS_READ_FAN_SPEED_3,
+ PMBUS_READ_FAN_SPEED_4
+};
+
+static const int pmbus_fan_config_registers[] = {
+ PMBUS_FAN_CONFIG_12,
+ PMBUS_FAN_CONFIG_12,
+ PMBUS_FAN_CONFIG_34,
+ PMBUS_FAN_CONFIG_34
+};
+
+static const int pmbus_fan_status_registers[] = {
+ PMBUS_STATUS_FAN_12,
+ PMBUS_STATUS_FAN_12,
+ PMBUS_STATUS_FAN_34,
+ PMBUS_STATUS_FAN_34
+};
+
+/*
+ * Determine maximum number of sensors, booleans, and labels.
+ * To keep things simple, only make a rough high estimate.
+ */
+static void pmbus_find_max_attr(struct i2c_client *client,
+ struct pmbus_data *data)
+{
+ const struct pmbus_driver_info *info = data->info;
+ int page, max_sensors, max_booleans, max_labels;
+
+ max_sensors = PMBUS_MAX_INPUT_SENSORS;
+ max_booleans = PMBUS_MAX_INPUT_BOOLEANS;
+ max_labels = PMBUS_MAX_INPUT_LABELS;
+
+ for (page = 0; page < info->pages; page++) {
+ if (info->func[page] & PMBUS_HAVE_VOUT) {
+ max_sensors += PMBUS_VOUT_SENSORS_PER_PAGE;
+ max_booleans += PMBUS_VOUT_BOOLEANS_PER_PAGE;
+ max_labels++;
+ }
+ if (info->func[page] & PMBUS_HAVE_IOUT) {
+ max_sensors += PMBUS_IOUT_SENSORS_PER_PAGE;
+ max_booleans += PMBUS_IOUT_BOOLEANS_PER_PAGE;
+ max_labels++;
+ }
+ if (info->func[page] & PMBUS_HAVE_POUT) {
+ max_sensors += PMBUS_POUT_SENSORS_PER_PAGE;
+ max_booleans += PMBUS_POUT_BOOLEANS_PER_PAGE;
+ max_labels++;
+ }
+ if (info->func[page] & PMBUS_HAVE_FAN12) {
+ if (page == 0) {
+ max_sensors +=
+ ARRAY_SIZE(pmbus_fan_registers) *
+ PMBUS_MAX_SENSORS_PER_FAN;
+ max_booleans +=
+ ARRAY_SIZE(pmbus_fan_registers) *
+ PMBUS_MAX_BOOLEANS_PER_FAN;
+ } else {
+ max_sensors += PMBUS_MAX_SENSORS_PER_FAN;
+ max_booleans += PMBUS_MAX_BOOLEANS_PER_FAN;
+ }
+ }
+ if (info->func[page] & PMBUS_HAVE_TEMP) {
+ if (page == 0) {
+ max_sensors +=
+ ARRAY_SIZE(pmbus_temp_registers) *
+ PMBUS_MAX_SENSORS_PER_TEMP;
+ max_booleans +=
+ ARRAY_SIZE(pmbus_temp_registers) *
+ PMBUS_MAX_BOOLEANS_PER_TEMP;
+ } else {
+ max_sensors += PMBUS_MAX_SENSORS_PER_TEMP;
+ max_booleans += PMBUS_MAX_BOOLEANS_PER_TEMP;
+ }
+ }
+ }
+ data->max_sensors = max_sensors;
+ data->max_booleans = max_booleans;
+ data->max_labels = max_labels;
+ data->max_attributes = max_sensors + max_booleans + max_labels;
+}
+
+/*
+ * Search for attributes. Allocate sensors, booleans, and labels as needed.
+ */
+static void pmbus_find_attributes(struct i2c_client *client,
+ struct pmbus_data *data)
+{
+ const struct pmbus_driver_info *info = data->info;
+ int page, i0, i1, in_index;
+
+ /*
+ * Input voltage sensors
+ */
+ in_index = 1;
+ if (info->func[0] & PMBUS_HAVE_VIN) {
+ bool have_alarm = false;
+
+ i0 = data->num_sensors;
+ pmbus_add_label(data, "in", in_index, "vin", 0);
+ pmbus_add_sensor(data, "in", "input", in_index,
+ 0, PMBUS_READ_VIN, PSC_VOLTAGE_IN, true);
+ if (pmbus_check_word_register(client, 0,
+ PMBUS_VIN_UV_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "in", "min", in_index,
+ 0, PMBUS_VIN_UV_WARN_LIMIT,
+ PSC_VOLTAGE_IN, false);
+ if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) {
+ pmbus_add_boolean_reg(data, "in", "min_alarm",
+ in_index,
+ PB_STATUS_INPUT_BASE,
+ PB_VOLTAGE_UV_WARNING);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, 0,
+ PMBUS_VIN_UV_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "in", "lcrit", in_index,
+ 0, PMBUS_VIN_UV_FAULT_LIMIT,
+ PSC_VOLTAGE_IN, false);
+ if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) {
+ pmbus_add_boolean_reg(data, "in", "lcrit_alarm",
+ in_index,
+ PB_STATUS_INPUT_BASE,
+ PB_VOLTAGE_UV_FAULT);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, 0,
+ PMBUS_VIN_OV_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "in", "max", in_index,
+ 0, PMBUS_VIN_OV_WARN_LIMIT,
+ PSC_VOLTAGE_IN, false);
+ if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) {
+ pmbus_add_boolean_reg(data, "in", "max_alarm",
+ in_index,
+ PB_STATUS_INPUT_BASE,
+ PB_VOLTAGE_OV_WARNING);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, 0,
+ PMBUS_VIN_OV_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "in", "crit", in_index,
+ 0, PMBUS_VIN_OV_FAULT_LIMIT,
+ PSC_VOLTAGE_IN, false);
+ if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) {
+ pmbus_add_boolean_reg(data, "in", "crit_alarm",
+ in_index,
+ PB_STATUS_INPUT_BASE,
+ PB_VOLTAGE_OV_FAULT);
+ have_alarm = true;
+ }
+ }
+ /*
+ * Add generic alarm attribute only if there are no individual
+ * attributes.
+ */
+ if (!have_alarm)
+ pmbus_add_boolean_reg(data, "in", "alarm",
+ in_index,
+ PB_STATUS_BASE,
+ PB_STATUS_VIN_UV);
+ in_index++;
+ }
+ if (info->func[0] & PMBUS_HAVE_VCAP) {
+ pmbus_add_label(data, "in", in_index, "vcap", 0);
+ pmbus_add_sensor(data, "in", "input", in_index, 0,
+ PMBUS_READ_VCAP, PSC_VOLTAGE_IN, true);
+ in_index++;
+ }
+
+ /*
+ * Output voltage sensors
+ */
+ for (page = 0; page < info->pages; page++) {
+ bool have_alarm = false;
+
+ if (!(info->func[page] & PMBUS_HAVE_VOUT))
+ continue;
+
+ i0 = data->num_sensors;
+ pmbus_add_label(data, "in", in_index, "vout", page + 1);
+ pmbus_add_sensor(data, "in", "input", in_index, page,
+ PMBUS_READ_VOUT, PSC_VOLTAGE_OUT, true);
+ if (pmbus_check_word_register(client, page,
+ PMBUS_VOUT_UV_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "in", "min", in_index, page,
+ PMBUS_VOUT_UV_WARN_LIMIT,
+ PSC_VOLTAGE_OUT, false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) {
+ pmbus_add_boolean_reg(data, "in", "min_alarm",
+ in_index,
+ PB_STATUS_VOUT_BASE +
+ page,
+ PB_VOLTAGE_UV_WARNING);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, page,
+ PMBUS_VOUT_UV_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "in", "lcrit", in_index, page,
+ PMBUS_VOUT_UV_FAULT_LIMIT,
+ PSC_VOLTAGE_OUT, false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) {
+ pmbus_add_boolean_reg(data, "in", "lcrit_alarm",
+ in_index,
+ PB_STATUS_VOUT_BASE +
+ page,
+ PB_VOLTAGE_UV_FAULT);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, page,
+ PMBUS_VOUT_OV_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "in", "max", in_index, page,
+ PMBUS_VOUT_OV_WARN_LIMIT,
+ PSC_VOLTAGE_OUT, false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) {
+ pmbus_add_boolean_reg(data, "in", "max_alarm",
+ in_index,
+ PB_STATUS_VOUT_BASE +
+ page,
+ PB_VOLTAGE_OV_WARNING);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, page,
+ PMBUS_VOUT_OV_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "in", "crit", in_index, page,
+ PMBUS_VOUT_OV_FAULT_LIMIT,
+ PSC_VOLTAGE_OUT, false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) {
+ pmbus_add_boolean_reg(data, "in", "crit_alarm",
+ in_index,
+ PB_STATUS_VOUT_BASE +
+ page,
+ PB_VOLTAGE_OV_FAULT);
+ have_alarm = true;
+ }
+ }
+ /*
+ * Add generic alarm attribute only if there are no individual
+ * attributes.
+ */
+ if (!have_alarm)
+ pmbus_add_boolean_reg(data, "in", "alarm",
+ in_index,
+ PB_STATUS_BASE + page,
+ PB_STATUS_VOUT_OV);
+ in_index++;
+ }
+
+ /*
+ * Current sensors
+ */
+
+ /*
+ * Input current sensors
+ */
+ in_index = 1;
+ if (info->func[0] & PMBUS_HAVE_IIN) {
+ i0 = data->num_sensors;
+ pmbus_add_label(data, "curr", in_index, "iin", 0);
+ pmbus_add_sensor(data, "curr", "input", in_index,
+ 0, PMBUS_READ_IIN, PSC_CURRENT_IN, true);
+ if (pmbus_check_word_register(client, 0,
+ PMBUS_IIN_OC_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "curr", "max", in_index,
+ 0, PMBUS_IIN_OC_WARN_LIMIT,
+ PSC_CURRENT_IN, false);
+ if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) {
+ pmbus_add_boolean_reg(data, "curr", "max_alarm",
+ in_index,
+ PB_STATUS_INPUT_BASE,
+ PB_IIN_OC_WARNING);
+ }
+ }
+ if (pmbus_check_word_register(client, 0,
+ PMBUS_IIN_OC_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "curr", "crit", in_index,
+ 0, PMBUS_IIN_OC_FAULT_LIMIT,
+ PSC_CURRENT_IN, false);
+ if (info->func[0] & PMBUS_HAVE_STATUS_INPUT)
+ pmbus_add_boolean_reg(data, "curr",
+ "crit_alarm",
+ in_index,
+ PB_STATUS_INPUT_BASE,
+ PB_IIN_OC_FAULT);
+ }
+ in_index++;
+ }
+
+ /*
+ * Output current sensors
+ */
+ for (page = 0; page < info->pages; page++) {
+ bool have_alarm = false;
+
+ if (!(info->func[page] & PMBUS_HAVE_IOUT))
+ continue;
+
+ i0 = data->num_sensors;
+ pmbus_add_label(data, "curr", in_index, "iout", page + 1);
+ pmbus_add_sensor(data, "curr", "input", in_index, page,
+ PMBUS_READ_IOUT, PSC_CURRENT_OUT, true);
+ if (pmbus_check_word_register(client, page,
+ PMBUS_IOUT_OC_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "curr", "max", in_index, page,
+ PMBUS_IOUT_OC_WARN_LIMIT,
+ PSC_CURRENT_OUT, false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) {
+ pmbus_add_boolean_reg(data, "curr", "max_alarm",
+ in_index,
+ PB_STATUS_IOUT_BASE +
+ page, PB_IOUT_OC_WARNING);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, page,
+ PMBUS_IOUT_UC_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "curr", "lcrit", in_index, page,
+ PMBUS_IOUT_UC_FAULT_LIMIT,
+ PSC_CURRENT_OUT, false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) {
+ pmbus_add_boolean_reg(data, "curr",
+ "lcrit_alarm",
+ in_index,
+ PB_STATUS_IOUT_BASE +
+ page, PB_IOUT_UC_FAULT);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, page,
+ PMBUS_IOUT_OC_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "curr", "crit", in_index, page,
+ PMBUS_IOUT_OC_FAULT_LIMIT,
+ PSC_CURRENT_OUT, false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) {
+ pmbus_add_boolean_reg(data, "curr",
+ "crit_alarm",
+ in_index,
+ PB_STATUS_IOUT_BASE +
+ page, PB_IOUT_OC_FAULT);
+ have_alarm = true;
+ }
+ }
+ /*
+ * Add generic alarm attribute only if there are no individual
+ * attributes.
+ */
+ if (!have_alarm)
+ pmbus_add_boolean_reg(data, "curr", "alarm",
+ in_index,
+ PB_STATUS_BASE + page,
+ PB_STATUS_IOUT_OC);
+ in_index++;
+ }
+
+ /*
+ * Power sensors
+ */
+ /*
+ * Input Power sensors
+ */
+ in_index = 1;
+ if (info->func[0] & PMBUS_HAVE_PIN) {
+ i0 = data->num_sensors;
+ pmbus_add_label(data, "power", in_index, "pin", 0);
+ pmbus_add_sensor(data, "power", "input", in_index,
+ 0, PMBUS_READ_PIN, PSC_POWER, true);
+ if (pmbus_check_word_register(client, 0,
+ PMBUS_PIN_OP_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "power", "max", in_index,
+ 0, PMBUS_PIN_OP_WARN_LIMIT, PSC_POWER,
+ false);
+ if (info->func[0] & PMBUS_HAVE_STATUS_INPUT)
+ pmbus_add_boolean_reg(data, "power",
+ "alarm",
+ in_index,
+ PB_STATUS_INPUT_BASE,
+ PB_PIN_OP_WARNING);
+ }
+ in_index++;
+ }
+
+ /*
+ * Output Power sensors
+ */
+ for (page = 0; page < info->pages; page++) {
+ bool need_alarm = false;
+
+ if (!(info->func[page] & PMBUS_HAVE_POUT))
+ continue;
+
+ i0 = data->num_sensors;
+ pmbus_add_label(data, "power", in_index, "pout", page + 1);
+ pmbus_add_sensor(data, "power", "input", in_index, page,
+ PMBUS_READ_POUT, PSC_POWER, true);
+ /*
+ * Per hwmon sysfs API, power_cap is to be used to limit output
+ * power.
+ * We have two registers related to maximum output power,
+ * PMBUS_POUT_MAX and PMBUS_POUT_OP_WARN_LIMIT.
+ * PMBUS_POUT_MAX matches the powerX_cap attribute definition.
+ * There is no attribute in the API to match
+ * PMBUS_POUT_OP_WARN_LIMIT. We use powerX_max for now.
+ */
+ if (pmbus_check_word_register(client, page, PMBUS_POUT_MAX)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "power", "cap", in_index, page,
+ PMBUS_POUT_MAX, PSC_POWER, false);
+ need_alarm = true;
+ }
+ if (pmbus_check_word_register(client, page,
+ PMBUS_POUT_OP_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "power", "max", in_index, page,
+ PMBUS_POUT_OP_WARN_LIMIT, PSC_POWER,
+ false);
+ need_alarm = true;
+ }
+ if (need_alarm && (info->func[page] & PMBUS_HAVE_STATUS_IOUT))
+ pmbus_add_boolean_reg(data, "power", "alarm",
+ in_index,
+ PB_STATUS_IOUT_BASE + page,
+ PB_POUT_OP_WARNING
+ | PB_POWER_LIMITING);
+
+ if (pmbus_check_word_register(client, page,
+ PMBUS_POUT_OP_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "power", "crit", in_index, page,
+ PMBUS_POUT_OP_FAULT_LIMIT, PSC_POWER,
+ false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_IOUT)
+ pmbus_add_boolean_reg(data, "power",
+ "crit_alarm",
+ in_index,
+ PB_STATUS_IOUT_BASE
+ + page,
+ PB_POUT_OP_FAULT);
+ }
+ in_index++;
+ }
+
+ /*
+ * Temperature sensors
+ */
+ in_index = 1;
+ for (page = 0; page < info->pages; page++) {
+ int t, temps;
+
+ if (!(info->func[page] & PMBUS_HAVE_TEMP))
+ continue;
+
+ temps = page ? 1 : ARRAY_SIZE(pmbus_temp_registers);
+ for (t = 0; t < temps; t++) {
+ bool have_alarm = false;
+
+ if (!pmbus_check_word_register
+ (client, page, pmbus_temp_registers[t]))
+ break;
+
+ i0 = data->num_sensors;
+ pmbus_add_sensor(data, "temp", "input", in_index, page,
+ pmbus_temp_registers[t],
+ PSC_TEMPERATURE, true);
+
+ /*
+ * PMBus provides only one status register for TEMP1-3.
+ * Thus, we can not use the status register to determine
+ * which of the three sensors actually caused an alarm.
+ * Always compare current temperature against the limit
+ * registers to determine alarm conditions for a
+ * specific sensor.
+ */
+ if (pmbus_check_word_register
+ (client, page, PMBUS_UT_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "temp", "min", in_index,
+ page, PMBUS_UT_WARN_LIMIT,
+ PSC_TEMPERATURE, false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) {
+ pmbus_add_boolean_cmp(data, "temp",
+ "min_alarm", in_index, i1, i0,
+ PB_STATUS_TEMP_BASE + page,
+ PB_TEMP_UT_WARNING);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, page,
+ PMBUS_UT_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "temp", "lcrit",
+ in_index, page,
+ PMBUS_UT_FAULT_LIMIT,
+ PSC_TEMPERATURE, false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) {
+ pmbus_add_boolean_cmp(data, "temp",
+ "lcrit_alarm", in_index, i1, i0,
+ PB_STATUS_TEMP_BASE + page,
+ PB_TEMP_UT_FAULT);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register
+ (client, page, PMBUS_OT_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "temp", "max", in_index,
+ page, PMBUS_OT_WARN_LIMIT,
+ PSC_TEMPERATURE, false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) {
+ pmbus_add_boolean_cmp(data, "temp",
+ "max_alarm", in_index, i0, i1,
+ PB_STATUS_TEMP_BASE + page,
+ PB_TEMP_OT_WARNING);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, page,
+ PMBUS_OT_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "temp", "crit", in_index,
+ page, PMBUS_OT_FAULT_LIMIT,
+ PSC_TEMPERATURE, false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) {
+ pmbus_add_boolean_cmp(data, "temp",
+ "crit_alarm", in_index, i0, i1,
+ PB_STATUS_TEMP_BASE + page,
+ PB_TEMP_OT_FAULT);
+ have_alarm = true;
+ }
+ }
+ /*
+ * Last resort - we were not able to create any alarm
+ * registers. Report alarm for all sensors using the
+ * status register temperature alarm bit.
+ */
+ if (!have_alarm)
+ pmbus_add_boolean_reg(data, "temp", "alarm",
+ in_index,
+ PB_STATUS_BASE + page,
+ PB_STATUS_TEMPERATURE);
+ in_index++;
+ }
+ }
+
+ /*
+ * Fans
+ */
+ in_index = 1;
+ for (page = 0; page < info->pages; page++) {
+ int fans, f;
+
+ if (!(info->func[page] & PMBUS_HAVE_FAN12))
+ continue;
+
+ fans = page ? 1 : ARRAY_SIZE(pmbus_fan_registers);
+ for (f = 0; f < fans; f++) {
+ int regval;
+
+ if (!pmbus_check_word_register(client, page,
+ pmbus_fan_registers[f])
+ || !pmbus_check_byte_register(client, page,
+ pmbus_fan_config_registers[f]))
+ break;
+
+ /*
+ * Skip fan if not installed.
+ * Each fan configuration register covers multiple fans,
+ * so we have to do some magic.
+ */
+ regval = pmbus_read_byte_data(client, page,
+ pmbus_fan_config_registers[f]);
+ if (regval < 0 ||
+ (!(regval & (PB_FAN_1_INSTALLED >> ((f & 1) * 4)))))
+ continue;
+
+ i0 = data->num_sensors;
+ pmbus_add_sensor(data, "fan", "input", in_index, page,
+ pmbus_fan_registers[f], PSC_FAN, true);
+
+ /*
+ * Each fan status register covers multiple fans,
+ * so we have to do some magic.
+ */
+ if (pmbus_check_byte_register
+ (client, page, pmbus_fan_status_registers[f])) {
+ int base;
+
+ if (f > 1) /* fan 3, 4 */
+ base = PB_STATUS_FAN34_BASE;
+ else
+ base = PB_STATUS_FAN_BASE + page;
+ pmbus_add_boolean_reg(data, "fan", "alarm",
+ in_index, base,
+ PB_FAN_FAN1_WARNING >> (f & 1));
+ pmbus_add_boolean_reg(data, "fan", "fault",
+ in_index, base,
+ PB_FAN_FAN1_FAULT >> (f & 1));
+ }
+ in_index++;
+ }
+ }
+}
+
+/*
+ * Identify chip parameters.
+ * This function is called for all chips.
+ */
+static int pmbus_identify_common(struct i2c_client *client,
+ struct pmbus_data *data)
+{
+ int vout_mode, exponent;
+
+ vout_mode = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
+ if (vout_mode >= 0) {
+ /*
+ * Not all chips support the VOUT_MODE command,
+ * so a failure to read it is not an error.
+ */
+ switch (vout_mode >> 5) {
+ case 0: /* linear mode */
+ if (data->info->direct[PSC_VOLTAGE_OUT])
+ return -ENODEV;
+
+ exponent = vout_mode & 0x1f;
+ /* and sign-extend it */
+ if (exponent & 0x10)
+ exponent |= ~0x1f;
+ data->exponent = exponent;
+ break;
+ case 2: /* direct mode */
+ if (!data->info->direct[PSC_VOLTAGE_OUT])
+ return -ENODEV;
+ break;
+ default:
+ return -ENODEV;
+ }
+ }
+
+ /* Determine maximum number of sensors, booleans, and labels */
+ pmbus_find_max_attr(client, data);
+ pmbus_clear_fault_page(client, 0);
+ return 0;
+}
+
+int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
+ struct pmbus_driver_info *info)
+{
+ const struct pmbus_platform_data *pdata = client->dev.platform_data;
+ struct pmbus_data *data;
+ int ret;
+
+ if (!info) {
+ dev_err(&client->dev, "Missing chip information");
+ return -ENODEV;
+ }
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WRITE_BYTE
+ | I2C_FUNC_SMBUS_BYTE_DATA
+ | I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ dev_err(&client->dev, "No memory to allocate driver data\n");
+ return -ENOMEM;
+ }
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ /*
+ * Bail out if status register or PMBus revision register
+ * does not exist.
+ */
+ if (i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE) < 0
+ || i2c_smbus_read_byte_data(client, PMBUS_REVISION) < 0) {
+ dev_err(&client->dev,
+ "Status or revision register not found\n");
+ ret = -ENODEV;
+ goto out_data;
+ }
+
+ if (pdata)
+ data->flags = pdata->flags;
+ data->info = info;
+
+ pmbus_clear_faults(client);
+
+ if (info->identify) {
+ ret = (*info->identify)(client, info);
+ if (ret < 0) {
+ dev_err(&client->dev, "Chip identification failed\n");
+ goto out_data;
+ }
+ }
+
+ if (info->pages <= 0 || info->pages > PMBUS_PAGES) {
+ dev_err(&client->dev, "Bad number of PMBus pages: %d\n",
+ info->pages);
+ ret = -EINVAL;
+ goto out_data;
+ }
+ /*
+ * Bail out if more than one page was configured, but we can not
+ * select the highest page. This is an indication that the wrong
+ * chip type was selected. Better bail out now than keep
+ * returning errors later on.
+ */
+ if (info->pages > 1 && pmbus_set_page(client, info->pages - 1) < 0) {
+ dev_err(&client->dev, "Failed to select page %d\n",
+ info->pages - 1);
+ ret = -EINVAL;
+ goto out_data;
+ }
+
+ ret = pmbus_identify_common(client, data);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to identify chip capabilities\n");
+ goto out_data;
+ }
+
+ ret = -ENOMEM;
+ data->sensors = kzalloc(sizeof(struct pmbus_sensor) * data->max_sensors,
+ GFP_KERNEL);
+ if (!data->sensors) {
+ dev_err(&client->dev, "No memory to allocate sensor data\n");
+ goto out_data;
+ }
+
+ data->booleans = kzalloc(sizeof(struct pmbus_boolean)
+ * data->max_booleans, GFP_KERNEL);
+ if (!data->booleans) {
+ dev_err(&client->dev, "No memory to allocate boolean data\n");
+ goto out_sensors;
+ }
+
+ data->labels = kzalloc(sizeof(struct pmbus_label) * data->max_labels,
+ GFP_KERNEL);
+ if (!data->labels) {
+ dev_err(&client->dev, "No memory to allocate label data\n");
+ goto out_booleans;
+ }
+
+ data->attributes = kzalloc(sizeof(struct attribute *)
+ * data->max_attributes, GFP_KERNEL);
+ if (!data->attributes) {
+ dev_err(&client->dev, "No memory to allocate attribute data\n");
+ goto out_labels;
+ }
+
+ pmbus_find_attributes(client, data);
+
+ /*
+ * If there are no attributes, something is wrong.
+ * Bail out instead of trying to register nothing.
+ */
+ if (!data->num_attributes) {
+ dev_err(&client->dev, "No attributes found\n");
+ ret = -ENODEV;
+ goto out_attributes;
+ }
+
+ /* Register sysfs hooks */
+ data->group.attrs = data->attributes;
+ ret = sysfs_create_group(&client->dev.kobj, &data->group);
+ if (ret) {
+ dev_err(&client->dev, "Failed to create sysfs entries\n");
+ goto out_attributes;
+ }
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ ret = PTR_ERR(data->hwmon_dev);
+ dev_err(&client->dev, "Failed to register hwmon device\n");
+ goto out_hwmon_device_register;
+ }
+ return 0;
+
+out_hwmon_device_register:
+ sysfs_remove_group(&client->dev.kobj, &data->group);
+out_attributes:
+ kfree(data->attributes);
+out_labels:
+ kfree(data->labels);
+out_booleans:
+ kfree(data->booleans);
+out_sensors:
+ kfree(data->sensors);
+out_data:
+ kfree(data);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pmbus_do_probe);
+
+int pmbus_do_remove(struct i2c_client *client)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &data->group);
+ kfree(data->attributes);
+ kfree(data->labels);
+ kfree(data->booleans);
+ kfree(data->sensors);
+ kfree(data);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pmbus_do_remove);
+
+MODULE_AUTHOR("Guenter Roeck");
+MODULE_DESCRIPTION("PMBus core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index a610e7880fb3..3ba7dd82589a 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -610,7 +610,7 @@ static int __devexit sht15_remove(struct platform_device *pdev)
struct sht15_data *data = platform_get_drvdata(pdev);
/* Make sure any reads from the device are done and
- * prevent new ones beginnning */
+ * prevent new ones from beginning */
mutex_lock(&data->read_lock);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&pdev->dev.kobj, &sht15_attr_group);
diff --git a/drivers/hwmon/twl4030-madc-hwmon.c b/drivers/hwmon/twl4030-madc-hwmon.c
new file mode 100644
index 000000000000..97e22bef85ab
--- /dev/null
+++ b/drivers/hwmon/twl4030-madc-hwmon.c
@@ -0,0 +1,157 @@
+/*
+ *
+ * TWL4030 MADC Hwmon driver-This driver monitors the real time
+ * conversion of analog signals like battery temperature,
+ * battery type, battery level etc. User can ask for the conversion on a
+ * particular channel using the sysfs nodes.
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * J Keerthy <j-keerthy@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/i2c/twl.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/i2c/twl4030-madc.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/stddef.h>
+#include <linux/sysfs.h>
+#include <linux/err.h>
+#include <linux/types.h>
+
+/*
+ * sysfs hook function
+ */
+static ssize_t madc_read(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct twl4030_madc_request req;
+ long val;
+
+ req.channels = (1 << attr->index);
+ req.method = TWL4030_MADC_SW2;
+ req.func_cb = NULL;
+ val = twl4030_madc_conversion(&req);
+ if (val < 0)
+ return val;
+
+ return sprintf(buf, "%d\n", req.rbuf[attr->index]);
+}
+
+/* sysfs nodes to read individual channels from user side */
+static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, madc_read, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, madc_read, NULL, 1);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, madc_read, NULL, 2);
+static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, madc_read, NULL, 3);
+static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, madc_read, NULL, 4);
+static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, madc_read, NULL, 5);
+static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, madc_read, NULL, 6);
+static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, madc_read, NULL, 7);
+static SENSOR_DEVICE_ATTR(in8_input, S_IRUGO, madc_read, NULL, 8);
+static SENSOR_DEVICE_ATTR(in9_input, S_IRUGO, madc_read, NULL, 9);
+static SENSOR_DEVICE_ATTR(curr10_input, S_IRUGO, madc_read, NULL, 10);
+static SENSOR_DEVICE_ATTR(in11_input, S_IRUGO, madc_read, NULL, 11);
+static SENSOR_DEVICE_ATTR(in12_input, S_IRUGO, madc_read, NULL, 12);
+static SENSOR_DEVICE_ATTR(in15_input, S_IRUGO, madc_read, NULL, 15);
+
+static struct attribute *twl4030_madc_attributes[] = {
+ &sensor_dev_attr_in0_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in3_input.dev_attr.attr,
+ &sensor_dev_attr_in4_input.dev_attr.attr,
+ &sensor_dev_attr_in5_input.dev_attr.attr,
+ &sensor_dev_attr_in6_input.dev_attr.attr,
+ &sensor_dev_attr_in7_input.dev_attr.attr,
+ &sensor_dev_attr_in8_input.dev_attr.attr,
+ &sensor_dev_attr_in9_input.dev_attr.attr,
+ &sensor_dev_attr_curr10_input.dev_attr.attr,
+ &sensor_dev_attr_in11_input.dev_attr.attr,
+ &sensor_dev_attr_in12_input.dev_attr.attr,
+ &sensor_dev_attr_in15_input.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group twl4030_madc_group = {
+ .attrs = twl4030_madc_attributes,
+};
+
+static int __devinit twl4030_madc_hwmon_probe(struct platform_device *pdev)
+{
+ int ret;
+ int status;
+ struct device *hwmon;
+
+ ret = sysfs_create_group(&pdev->dev.kobj, &twl4030_madc_group);
+ if (ret)
+ goto err_sysfs;
+ hwmon = hwmon_device_register(&pdev->dev);
+ if (IS_ERR(hwmon)) {
+ dev_err(&pdev->dev, "hwmon_device_register failed.\n");
+ status = PTR_ERR(hwmon);
+ goto err_reg;
+ }
+
+ return 0;
+
+err_reg:
+ sysfs_remove_group(&pdev->dev.kobj, &twl4030_madc_group);
+err_sysfs:
+
+ return ret;
+}
+
+static int __devexit twl4030_madc_hwmon_remove(struct platform_device *pdev)
+{
+ hwmon_device_unregister(&pdev->dev);
+ sysfs_remove_group(&pdev->dev.kobj, &twl4030_madc_group);
+
+ return 0;
+}
+
+static struct platform_driver twl4030_madc_hwmon_driver = {
+ .probe = twl4030_madc_hwmon_probe,
+ .remove = __exit_p(twl4030_madc_hwmon_remove),
+ .driver = {
+ .name = "twl4030_madc_hwmon",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init twl4030_madc_hwmon_init(void)
+{
+ return platform_driver_register(&twl4030_madc_hwmon_driver);
+}
+
+module_init(twl4030_madc_hwmon_init);
+
+static void __exit twl4030_madc_hwmon_exit(void)
+{
+ platform_driver_unregister(&twl4030_madc_hwmon_driver);
+}
+
+module_exit(twl4030_madc_hwmon_exit);
+
+MODULE_DESCRIPTION("TWL4030 ADC Hwmon driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("J Keerthy");
+MODULE_ALIAS("twl4030_madc_hwmon");
diff --git a/drivers/hwmon/ultra45_env.c b/drivers/hwmon/ultra45_env.c
index d863e13a50b8..1f36c635d933 100644
--- a/drivers/hwmon/ultra45_env.c
+++ b/drivers/hwmon/ultra45_env.c
@@ -234,8 +234,7 @@ static const struct attribute_group env_group = {
.attrs = env_attributes,
};
-static int __devinit env_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit env_probe(struct platform_device *op)
{
struct env *p = kzalloc(sizeof(*p), GFP_KERNEL);
int err = -ENOMEM;
@@ -299,7 +298,7 @@ static const struct of_device_id env_match[] = {
};
MODULE_DEVICE_TABLE(of, env_match);
-static struct of_platform_driver env_driver = {
+static struct platform_driver env_driver = {
.driver = {
.name = "ultra45_env",
.owner = THIS_MODULE,
@@ -311,12 +310,12 @@ static struct of_platform_driver env_driver = {
static int __init env_init(void)
{
- return of_register_platform_driver(&env_driver);
+ return platform_driver_register(&env_driver);
}
static void __exit env_exit(void)
{
- of_unregister_platform_driver(&env_driver);
+ platform_driver_unregister(&env_driver);
}
module_init(env_init);
diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig
new file mode 100644
index 000000000000..eb4af28f8567
--- /dev/null
+++ b/drivers/hwspinlock/Kconfig
@@ -0,0 +1,22 @@
+#
+# Generic HWSPINLOCK framework
+#
+
+config HWSPINLOCK
+ tristate "Generic Hardware Spinlock framework"
+ help
+ Say y here to support the generic hardware spinlock framework.
+ You only need to enable this if you have hardware spinlock module
+ on your system (usually only relevant if your system has remote slave
+ coprocessors).
+
+ If unsure, say N.
+
+config HWSPINLOCK_OMAP
+ tristate "OMAP Hardware Spinlock device"
+ depends on HWSPINLOCK && ARCH_OMAP4
+ help
+ Say y here to support the OMAP Hardware Spinlock device (firstly
+ introduced in OMAP4).
+
+ If unsure, say N.
diff --git a/drivers/hwspinlock/Makefile b/drivers/hwspinlock/Makefile
new file mode 100644
index 000000000000..5729a3f7ed3d
--- /dev/null
+++ b/drivers/hwspinlock/Makefile
@@ -0,0 +1,6 @@
+#
+# Generic Hardware Spinlock framework
+#
+
+obj-$(CONFIG_HWSPINLOCK) += hwspinlock_core.o
+obj-$(CONFIG_HWSPINLOCK_OMAP) += omap_hwspinlock.o
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
new file mode 100644
index 000000000000..43a62714b4fb
--- /dev/null
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -0,0 +1,548 @@
+/*
+ * Hardware spinlock framework
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Contact: Ohad Ben-Cohen <ohad@wizery.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/jiffies.h>
+#include <linux/radix-tree.h>
+#include <linux/hwspinlock.h>
+#include <linux/pm_runtime.h>
+
+#include "hwspinlock_internal.h"
+
+/* radix tree tags */
+#define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */
+
+/*
+ * A radix tree is used to maintain the available hwspinlock instances.
+ * The tree associates hwspinlock pointers with their integer key id,
+ * and provides easy-to-use API which makes the hwspinlock core code simple
+ * and easy to read.
+ *
+ * Radix trees are quick on lookups, and reasonably efficient in terms of
+ * storage, especially with high density usages such as this framework
+ * requires (a continuous range of integer keys, beginning with zero, is
+ * used as the ID's of the hwspinlock instances).
+ *
+ * The radix tree API supports tagging items in the tree, which this
+ * framework uses to mark unused hwspinlock instances (see the
+ * HWSPINLOCK_UNUSED tag above). As a result, the process of querying the
+ * tree, looking for an unused hwspinlock instance, is now reduced to a
+ * single radix tree API call.
+ */
+static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
+
+/*
+ * Synchronization of access to the tree is achieved using this spinlock,
+ * as the radix-tree API requires that users provide all synchronisation.
+ */
+static DEFINE_SPINLOCK(hwspinlock_tree_lock);
+
+/**
+ * __hwspin_trylock() - attempt to lock a specific hwspinlock
+ * @hwlock: an hwspinlock which we want to trylock
+ * @mode: controls whether local interrupts are disabled or not
+ * @flags: a pointer where the caller's interrupt state will be saved at (if
+ * requested)
+ *
+ * This function attempts to lock an hwspinlock, and will immediately
+ * fail if the hwspinlock is already taken.
+ *
+ * Upon a successful return from this function, preemption (and possibly
+ * interrupts) is disabled, so the caller must not sleep, and is advised to
+ * release the hwspinlock as soon as possible. This is required in order to
+ * minimize remote cores polling on the hardware interconnect.
+ *
+ * The user decides whether local interrupts are disabled or not, and if yes,
+ * whether he wants their previous state to be saved. It is up to the user
+ * to choose the appropriate @mode of operation, exactly the same way users
+ * should decide between spin_trylock, spin_trylock_irq and
+ * spin_trylock_irqsave.
+ *
+ * Returns 0 if we successfully locked the hwspinlock or -EBUSY if
+ * the hwspinlock was already taken.
+ * This function will never sleep.
+ */
+int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
+{
+ int ret;
+
+ BUG_ON(!hwlock);
+ BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
+
+ /*
+ * This spin_lock{_irq, _irqsave} serves three purposes:
+ *
+ * 1. Disable preemption, in order to minimize the period of time
+ * in which the hwspinlock is taken. This is important in order
+ * to minimize the possible polling on the hardware interconnect
+ * by a remote user of this lock.
+ * 2. Make the hwspinlock SMP-safe (so we can take it from
+ * additional contexts on the local host).
+ * 3. Ensure that in_atomic/might_sleep checks catch potential
+ * problems with hwspinlock usage (e.g. scheduler checks like
+ * 'scheduling while atomic' etc.)
+ */
+ if (mode == HWLOCK_IRQSTATE)
+ ret = spin_trylock_irqsave(&hwlock->lock, *flags);
+ else if (mode == HWLOCK_IRQ)
+ ret = spin_trylock_irq(&hwlock->lock);
+ else
+ ret = spin_trylock(&hwlock->lock);
+
+ /* is lock already taken by another context on the local cpu ? */
+ if (!ret)
+ return -EBUSY;
+
+ /* try to take the hwspinlock device */
+ ret = hwlock->ops->trylock(hwlock);
+
+ /* if hwlock is already taken, undo spin_trylock_* and exit */
+ if (!ret) {
+ if (mode == HWLOCK_IRQSTATE)
+ spin_unlock_irqrestore(&hwlock->lock, *flags);
+ else if (mode == HWLOCK_IRQ)
+ spin_unlock_irq(&hwlock->lock);
+ else
+ spin_unlock(&hwlock->lock);
+
+ return -EBUSY;
+ }
+
+ /*
+ * We can be sure the other core's memory operations
+ * are observable to us only _after_ we successfully take
+ * the hwspinlock, and we must make sure that subsequent memory
+ * operations (both reads and writes) will not be reordered before
+ * we actually took the hwspinlock.
+ *
+ * Note: the implicit memory barrier of the spinlock above is too
+ * early, so we need this additional explicit memory barrier.
+ */
+ mb();
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__hwspin_trylock);
+
+/**
+ * __hwspin_lock_timeout() - lock an hwspinlock with timeout limit
+ * @hwlock: the hwspinlock to be locked
+ * @timeout: timeout value in msecs
+ * @mode: mode which controls whether local interrupts are disabled or not
+ * @flags: a pointer to where the caller's interrupt state will be saved at (if
+ * requested)
+ *
+ * This function locks the given @hwlock. If the @hwlock
+ * is already taken, the function will busy loop waiting for it to
+ * be released, but give up after @timeout msecs have elapsed.
+ *
+ * Upon a successful return from this function, preemption is disabled
+ * (and possibly local interrupts, too), so the caller must not sleep,
+ * and is advised to release the hwspinlock as soon as possible.
+ * This is required in order to minimize remote cores polling on the
+ * hardware interconnect.
+ *
+ * The user decides whether local interrupts are disabled or not, and if yes,
+ * whether he wants their previous state to be saved. It is up to the user
+ * to choose the appropriate @mode of operation, exactly the same way users
+ * should decide between spin_lock, spin_lock_irq and spin_lock_irqsave.
+ *
+ * Returns 0 when the @hwlock was successfully taken, and an appropriate
+ * error code otherwise (most notably -ETIMEDOUT if the @hwlock is still
+ * busy after @timeout msecs). The function will never sleep.
+ */
+int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
+ int mode, unsigned long *flags)
+{
+ int ret;
+ unsigned long expire;
+
+ expire = msecs_to_jiffies(to) + jiffies;
+
+ for (;;) {
+ /* Try to take the hwspinlock */
+ ret = __hwspin_trylock(hwlock, mode, flags);
+ if (ret != -EBUSY)
+ break;
+
+ /*
+ * The lock is already taken, let's check if the user wants
+ * us to try again
+ */
+ if (time_is_before_eq_jiffies(expire))
+ return -ETIMEDOUT;
+
+ /*
+ * Allow platform-specific relax handlers to prevent
+ * hogging the interconnect (no sleeping, though)
+ */
+ if (hwlock->ops->relax)
+ hwlock->ops->relax(hwlock);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__hwspin_lock_timeout);
+
+/**
+ * __hwspin_unlock() - unlock a specific hwspinlock
+ * @hwlock: a previously-acquired hwspinlock which we want to unlock
+ * @mode: controls whether local interrupts needs to be restored or not
+ * @flags: previous caller's interrupt state to restore (if requested)
+ *
+ * This function will unlock a specific hwspinlock, enable preemption and
+ * (possibly) enable interrupts or restore their previous state.
+ * @hwlock must be already locked before calling this function: it is a bug
+ * to call unlock on a @hwlock that is already unlocked.
+ *
+ * The user decides whether local interrupts should be enabled or not, and
+ * if yes, whether he wants their previous state to be restored. It is up
+ * to the user to choose the appropriate @mode of operation, exactly the
+ * same way users decide between spin_unlock, spin_unlock_irq and
+ * spin_unlock_irqrestore.
+ *
+ * The function will never sleep.
+ */
+void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
+{
+ BUG_ON(!hwlock);
+ BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
+
+ /*
+ * We must make sure that memory operations (both reads and writes),
+ * done before unlocking the hwspinlock, will not be reordered
+ * after the lock is released.
+ *
+ * That's the purpose of this explicit memory barrier.
+ *
+ * Note: the memory barrier induced by the spin_unlock below is too
+ * late; the other core is going to access memory soon after it will
+ * take the hwspinlock, and by then we want to be sure our memory
+ * operations are already observable.
+ */
+ mb();
+
+ hwlock->ops->unlock(hwlock);
+
+ /* Undo the spin_trylock{_irq, _irqsave} called while locking */
+ if (mode == HWLOCK_IRQSTATE)
+ spin_unlock_irqrestore(&hwlock->lock, *flags);
+ else if (mode == HWLOCK_IRQ)
+ spin_unlock_irq(&hwlock->lock);
+ else
+ spin_unlock(&hwlock->lock);
+}
+EXPORT_SYMBOL_GPL(__hwspin_unlock);
+
+/**
+ * hwspin_lock_register() - register a new hw spinlock
+ * @hwlock: hwspinlock to register.
+ *
+ * This function should be called from the underlying platform-specific
+ * implementation, to register a new hwspinlock instance.
+ *
+ * Can be called from an atomic context (will not sleep) but not from
+ * within interrupt context.
+ *
+ * Returns 0 on success, or an appropriate error code on failure
+ */
+int hwspin_lock_register(struct hwspinlock *hwlock)
+{
+ struct hwspinlock *tmp;
+ int ret;
+
+ if (!hwlock || !hwlock->ops ||
+ !hwlock->ops->trylock || !hwlock->ops->unlock) {
+ pr_err("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ spin_lock_init(&hwlock->lock);
+
+ spin_lock(&hwspinlock_tree_lock);
+
+ ret = radix_tree_insert(&hwspinlock_tree, hwlock->id, hwlock);
+ if (ret)
+ goto out;
+
+ /* mark this hwspinlock as available */
+ tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id,
+ HWSPINLOCK_UNUSED);
+
+ /* self-sanity check which should never fail */
+ WARN_ON(tmp != hwlock);
+
+out:
+ spin_unlock(&hwspinlock_tree_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hwspin_lock_register);
+
+/**
+ * hwspin_lock_unregister() - unregister an hw spinlock
+ * @id: index of the specific hwspinlock to unregister
+ *
+ * This function should be called from the underlying platform-specific
+ * implementation, to unregister an existing (and unused) hwspinlock.
+ *
+ * Can be called from an atomic context (will not sleep) but not from
+ * within interrupt context.
+ *
+ * Returns the address of hwspinlock @id on success, or NULL on failure
+ */
+struct hwspinlock *hwspin_lock_unregister(unsigned int id)
+{
+ struct hwspinlock *hwlock = NULL;
+ int ret;
+
+ spin_lock(&hwspinlock_tree_lock);
+
+ /* make sure the hwspinlock is not in use (tag is set) */
+ ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
+ if (ret == 0) {
+ pr_err("hwspinlock %d still in use (or not present)\n", id);
+ goto out;
+ }
+
+ hwlock = radix_tree_delete(&hwspinlock_tree, id);
+ if (!hwlock) {
+ pr_err("failed to delete hwspinlock %d\n", id);
+ goto out;
+ }
+
+out:
+ spin_unlock(&hwspinlock_tree_lock);
+ return hwlock;
+}
+EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
+
+/**
+ * __hwspin_lock_request() - tag an hwspinlock as used and power it up
+ *
+ * This is an internal function that prepares an hwspinlock instance
+ * before it is given to the user. The function assumes that
+ * hwspinlock_tree_lock is taken.
+ *
+ * Returns 0 or positive to indicate success, and a negative value to
+ * indicate an error (with the appropriate error code)
+ */
+static int __hwspin_lock_request(struct hwspinlock *hwlock)
+{
+ struct hwspinlock *tmp;
+ int ret;
+
+ /* prevent underlying implementation from being removed */
+ if (!try_module_get(hwlock->owner)) {
+ dev_err(hwlock->dev, "%s: can't get owner\n", __func__);
+ return -EINVAL;
+ }
+
+ /* notify PM core that power is now needed */
+ ret = pm_runtime_get_sync(hwlock->dev);
+ if (ret < 0) {
+ dev_err(hwlock->dev, "%s: can't power on device\n", __func__);
+ return ret;
+ }
+
+ /* mark hwspinlock as used, should not fail */
+ tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock->id,
+ HWSPINLOCK_UNUSED);
+
+ /* self-sanity check that should never fail */
+ WARN_ON(tmp != hwlock);
+
+ return ret;
+}
+
+/**
+ * hwspin_lock_get_id() - retrieve id number of a given hwspinlock
+ * @hwlock: a valid hwspinlock instance
+ *
+ * Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid.
+ */
+int hwspin_lock_get_id(struct hwspinlock *hwlock)
+{
+ if (!hwlock) {
+ pr_err("invalid hwlock\n");
+ return -EINVAL;
+ }
+
+ return hwlock->id;
+}
+EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
+
+/**
+ * hwspin_lock_request() - request an hwspinlock
+ *
+ * This function should be called by users of the hwspinlock device,
+ * in order to dynamically assign them an unused hwspinlock.
+ * Usually the user of this lock will then have to communicate the lock's id
+ * to the remote core before it can be used for synchronization (to get the
+ * id of a given hwlock, use hwspin_lock_get_id()).
+ *
+ * Can be called from an atomic context (will not sleep) but not from
+ * within interrupt context (simply because there is no use case for
+ * that yet).
+ *
+ * Returns the address of the assigned hwspinlock, or NULL on error
+ */
+struct hwspinlock *hwspin_lock_request(void)
+{
+ struct hwspinlock *hwlock;
+ int ret;
+
+ spin_lock(&hwspinlock_tree_lock);
+
+ /* look for an unused lock */
+ ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
+ 0, 1, HWSPINLOCK_UNUSED);
+ if (ret == 0) {
+ pr_warn("a free hwspinlock is not available\n");
+ hwlock = NULL;
+ goto out;
+ }
+
+ /* sanity check that should never fail */
+ WARN_ON(ret > 1);
+
+ /* mark as used and power up */
+ ret = __hwspin_lock_request(hwlock);
+ if (ret < 0)
+ hwlock = NULL;
+
+out:
+ spin_unlock(&hwspinlock_tree_lock);
+ return hwlock;
+}
+EXPORT_SYMBOL_GPL(hwspin_lock_request);
+
+/**
+ * hwspin_lock_request_specific() - request for a specific hwspinlock
+ * @id: index of the specific hwspinlock that is requested
+ *
+ * This function should be called by users of the hwspinlock module,
+ * in order to assign them a specific hwspinlock.
+ * Usually early board code will be calling this function in order to
+ * reserve specific hwspinlock ids for predefined purposes.
+ *
+ * Can be called from an atomic context (will not sleep) but not from
+ * within interrupt context (simply because there is no use case for
+ * that yet).
+ *
+ * Returns the address of the assigned hwspinlock, or NULL on error
+ */
+struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
+{
+ struct hwspinlock *hwlock;
+ int ret;
+
+ spin_lock(&hwspinlock_tree_lock);
+
+ /* make sure this hwspinlock exists */
+ hwlock = radix_tree_lookup(&hwspinlock_tree, id);
+ if (!hwlock) {
+ pr_warn("hwspinlock %u does not exist\n", id);
+ goto out;
+ }
+
+ /* sanity check (this shouldn't happen) */
+ WARN_ON(hwlock->id != id);
+
+ /* make sure this hwspinlock is unused */
+ ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
+ if (ret == 0) {
+ pr_warn("hwspinlock %u is already in use\n", id);
+ hwlock = NULL;
+ goto out;
+ }
+
+ /* mark as used and power up */
+ ret = __hwspin_lock_request(hwlock);
+ if (ret < 0)
+ hwlock = NULL;
+
+out:
+ spin_unlock(&hwspinlock_tree_lock);
+ return hwlock;
+}
+EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
+
+/**
+ * hwspin_lock_free() - free a specific hwspinlock
+ * @hwlock: the specific hwspinlock to free
+ *
+ * This function mark @hwlock as free again.
+ * Should only be called with an @hwlock that was retrieved from
+ * an earlier call to omap_hwspin_lock_request{_specific}.
+ *
+ * Can be called from an atomic context (will not sleep) but not from
+ * within interrupt context (simply because there is no use case for
+ * that yet).
+ *
+ * Returns 0 on success, or an appropriate error code on failure
+ */
+int hwspin_lock_free(struct hwspinlock *hwlock)
+{
+ struct hwspinlock *tmp;
+ int ret;
+
+ if (!hwlock) {
+ pr_err("invalid hwlock\n");
+ return -EINVAL;
+ }
+
+ spin_lock(&hwspinlock_tree_lock);
+
+ /* make sure the hwspinlock is used */
+ ret = radix_tree_tag_get(&hwspinlock_tree, hwlock->id,
+ HWSPINLOCK_UNUSED);
+ if (ret == 1) {
+ dev_err(hwlock->dev, "%s: hwlock is already free\n", __func__);
+ dump_stack();
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* notify the underlying device that power is not needed */
+ ret = pm_runtime_put(hwlock->dev);
+ if (ret < 0)
+ goto out;
+
+ /* mark this hwspinlock as available */
+ tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id,
+ HWSPINLOCK_UNUSED);
+
+ /* sanity check (this shouldn't happen) */
+ WARN_ON(tmp != hwlock);
+
+ module_put(hwlock->owner);
+
+out:
+ spin_unlock(&hwspinlock_tree_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hwspin_lock_free);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Hardware spinlock interface");
+MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");
diff --git a/drivers/hwspinlock/hwspinlock_internal.h b/drivers/hwspinlock/hwspinlock_internal.h
new file mode 100644
index 000000000000..69935e6b93e5
--- /dev/null
+++ b/drivers/hwspinlock/hwspinlock_internal.h
@@ -0,0 +1,61 @@
+/*
+ * Hardware spinlocks internal header
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Contact: Ohad Ben-Cohen <ohad@wizery.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __HWSPINLOCK_HWSPINLOCK_H
+#define __HWSPINLOCK_HWSPINLOCK_H
+
+#include <linux/spinlock.h>
+#include <linux/device.h>
+
+/**
+ * struct hwspinlock_ops - platform-specific hwspinlock handlers
+ *
+ * @trylock: make a single attempt to take the lock. returns 0 on
+ * failure and true on success. may _not_ sleep.
+ * @unlock: release the lock. always succeed. may _not_ sleep.
+ * @relax: optional, platform-specific relax handler, called by hwspinlock
+ * core while spinning on a lock, between two successive
+ * invocations of @trylock. may _not_ sleep.
+ */
+struct hwspinlock_ops {
+ int (*trylock)(struct hwspinlock *lock);
+ void (*unlock)(struct hwspinlock *lock);
+ void (*relax)(struct hwspinlock *lock);
+};
+
+/**
+ * struct hwspinlock - this struct represents a single hwspinlock instance
+ *
+ * @dev: underlying device, will be used to invoke runtime PM api
+ * @ops: platform-specific hwspinlock handlers
+ * @id: a global, unique, system-wide, index of the lock.
+ * @lock: initialized and used by hwspinlock core
+ * @owner: underlying implementation module, used to maintain module ref count
+ *
+ * Note: currently simplicity was opted for, but later we can squeeze some
+ * memory bytes by grouping the dev, ops and owner members in a single
+ * per-platform struct, and have all hwspinlocks point at it.
+ */
+struct hwspinlock {
+ struct device *dev;
+ const struct hwspinlock_ops *ops;
+ int id;
+ spinlock_t lock;
+ struct module *owner;
+};
+
+#endif /* __HWSPINLOCK_HWSPINLOCK_H */
diff --git a/drivers/hwspinlock/omap_hwspinlock.c b/drivers/hwspinlock/omap_hwspinlock.c
new file mode 100644
index 000000000000..a8f02734c026
--- /dev/null
+++ b/drivers/hwspinlock/omap_hwspinlock.c
@@ -0,0 +1,231 @@
+/*
+ * OMAP hardware spinlock driver
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Contact: Simon Que <sque@ti.com>
+ * Hari Kanigeri <h-kanigeri2@ti.com>
+ * Ohad Ben-Cohen <ohad@wizery.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/hwspinlock.h>
+#include <linux/platform_device.h>
+
+#include "hwspinlock_internal.h"
+
+/* Spinlock register offsets */
+#define SYSSTATUS_OFFSET 0x0014
+#define LOCK_BASE_OFFSET 0x0800
+
+#define SPINLOCK_NUMLOCKS_BIT_OFFSET (24)
+
+/* Possible values of SPINLOCK_LOCK_REG */
+#define SPINLOCK_NOTTAKEN (0) /* free */
+#define SPINLOCK_TAKEN (1) /* locked */
+
+#define to_omap_hwspinlock(lock) \
+ container_of(lock, struct omap_hwspinlock, lock)
+
+struct omap_hwspinlock {
+ struct hwspinlock lock;
+ void __iomem *addr;
+};
+
+struct omap_hwspinlock_state {
+ int num_locks; /* Total number of locks in system */
+ void __iomem *io_base; /* Mapped base address */
+};
+
+static int omap_hwspinlock_trylock(struct hwspinlock *lock)
+{
+ struct omap_hwspinlock *omap_lock = to_omap_hwspinlock(lock);
+
+ /* attempt to acquire the lock by reading its value */
+ return (SPINLOCK_NOTTAKEN == readl(omap_lock->addr));
+}
+
+static void omap_hwspinlock_unlock(struct hwspinlock *lock)
+{
+ struct omap_hwspinlock *omap_lock = to_omap_hwspinlock(lock);
+
+ /* release the lock by writing 0 to it */
+ writel(SPINLOCK_NOTTAKEN, omap_lock->addr);
+}
+
+/*
+ * relax the OMAP interconnect while spinning on it.
+ *
+ * The specs recommended that the retry delay time will be
+ * just over half of the time that a requester would be
+ * expected to hold the lock.
+ *
+ * The number below is taken from an hardware specs example,
+ * obviously it is somewhat arbitrary.
+ */
+static void omap_hwspinlock_relax(struct hwspinlock *lock)
+{
+ ndelay(50);
+}
+
+static const struct hwspinlock_ops omap_hwspinlock_ops = {
+ .trylock = omap_hwspinlock_trylock,
+ .unlock = omap_hwspinlock_unlock,
+ .relax = omap_hwspinlock_relax,
+};
+
+static int __devinit omap_hwspinlock_probe(struct platform_device *pdev)
+{
+ struct omap_hwspinlock *omap_lock;
+ struct omap_hwspinlock_state *state;
+ struct hwspinlock *lock;
+ struct resource *res;
+ void __iomem *io_base;
+ int i, ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ io_base = ioremap(res->start, resource_size(res));
+ if (!io_base) {
+ ret = -ENOMEM;
+ goto free_state;
+ }
+
+ /* Determine number of locks */
+ i = readl(io_base + SYSSTATUS_OFFSET);
+ i >>= SPINLOCK_NUMLOCKS_BIT_OFFSET;
+
+ /* one of the four lsb's must be set, and nothing else */
+ if (hweight_long(i & 0xf) != 1 || i > 8) {
+ ret = -EINVAL;
+ goto iounmap_base;
+ }
+
+ state->num_locks = i * 32;
+ state->io_base = io_base;
+
+ platform_set_drvdata(pdev, state);
+
+ /*
+ * runtime PM will make sure the clock of this module is
+ * enabled iff at least one lock is requested
+ */
+ pm_runtime_enable(&pdev->dev);
+
+ for (i = 0; i < state->num_locks; i++) {
+ omap_lock = kzalloc(sizeof(*omap_lock), GFP_KERNEL);
+ if (!omap_lock) {
+ ret = -ENOMEM;
+ goto free_locks;
+ }
+
+ omap_lock->lock.dev = &pdev->dev;
+ omap_lock->lock.owner = THIS_MODULE;
+ omap_lock->lock.id = i;
+ omap_lock->lock.ops = &omap_hwspinlock_ops;
+ omap_lock->addr = io_base + LOCK_BASE_OFFSET + sizeof(u32) * i;
+
+ ret = hwspin_lock_register(&omap_lock->lock);
+ if (ret) {
+ kfree(omap_lock);
+ goto free_locks;
+ }
+ }
+
+ return 0;
+
+free_locks:
+ while (--i >= 0) {
+ lock = hwspin_lock_unregister(i);
+ /* this should't happen, but let's give our best effort */
+ if (!lock) {
+ dev_err(&pdev->dev, "%s: cleanups failed\n", __func__);
+ continue;
+ }
+ omap_lock = to_omap_hwspinlock(lock);
+ kfree(omap_lock);
+ }
+ pm_runtime_disable(&pdev->dev);
+iounmap_base:
+ iounmap(io_base);
+free_state:
+ kfree(state);
+ return ret;
+}
+
+static int omap_hwspinlock_remove(struct platform_device *pdev)
+{
+ struct omap_hwspinlock_state *state = platform_get_drvdata(pdev);
+ struct hwspinlock *lock;
+ struct omap_hwspinlock *omap_lock;
+ int i;
+
+ for (i = 0; i < state->num_locks; i++) {
+ lock = hwspin_lock_unregister(i);
+ /* this shouldn't happen at this point. if it does, at least
+ * don't continue with the remove */
+ if (!lock) {
+ dev_err(&pdev->dev, "%s: failed on %d\n", __func__, i);
+ return -EBUSY;
+ }
+
+ omap_lock = to_omap_hwspinlock(lock);
+ kfree(omap_lock);
+ }
+
+ pm_runtime_disable(&pdev->dev);
+ iounmap(state->io_base);
+ kfree(state);
+
+ return 0;
+}
+
+static struct platform_driver omap_hwspinlock_driver = {
+ .probe = omap_hwspinlock_probe,
+ .remove = omap_hwspinlock_remove,
+ .driver = {
+ .name = "omap_hwspinlock",
+ },
+};
+
+static int __init omap_hwspinlock_init(void)
+{
+ return platform_driver_register(&omap_hwspinlock_driver);
+}
+/* board init code might need to reserve hwspinlocks for predefined purposes */
+postcore_initcall(omap_hwspinlock_init);
+
+static void __exit omap_hwspinlock_exit(void)
+{
+ platform_driver_unregister(&omap_hwspinlock_driver);
+}
+module_exit(omap_hwspinlock_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Hardware spinlock driver for OMAP");
+MODULE_AUTHOR("Simon Que <sque@ti.com>");
+MODULE_AUTHOR("Hari Kanigeri <h-kanigeri2@ti.com>");
+MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");
diff --git a/drivers/i2c/Makefile b/drivers/i2c/Makefile
index 23ac61e2db39..beee6b2d361d 100644
--- a/drivers/i2c/Makefile
+++ b/drivers/i2c/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_I2C_MUX) += i2c-mux.o
obj-y += algos/ busses/ muxes/
ccflags-$(CONFIG_I2C_DEBUG_CORE) := -DDEBUG
+CFLAGS_i2c-core.o := -Wno-deprecated-declarations
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 113505a6434e..14abb7bde741 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -452,6 +452,16 @@ config I2C_MV64XXX
This driver can also be built as a module. If so, the module
will be called i2c-mv64xxx.
+config I2C_MXS
+ tristate "Freescale i.MX28 I2C interface"
+ depends on SOC_IMX28
+ help
+ Say Y here if you want to use the I2C bus controller on
+ the Freescale i.MX28 processors.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-mxs.
+
config I2C_NOMADIK
tristate "ST-Ericsson Nomadik/Ux500 I2C Controller"
depends on PLAT_NOMADIK
@@ -523,17 +533,31 @@ config I2C_PNX
This driver can also be built as a module. If so, the module
will be called i2c-pnx.
+config I2C_PUV3
+ tristate "PKUnity v3 I2C bus support"
+ depends on UNICORE32 && ARCH_PUV3
+ select I2C_ALGOBIT
+ help
+ This driver supports the I2C IP inside the PKUnity-v3 SoC.
+ This I2C bus controller is under AMBA/AXI bus.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-puv3.
+
config I2C_PXA
tristate "Intel PXA2XX I2C adapter"
- depends on ARCH_PXA || ARCH_MMP
+ depends on ARCH_PXA || ARCH_MMP || (X86_32 && PCI && OF)
help
If you have devices in the PXA I2C bus, say yes to this option.
This driver can also be built as a module. If so, the module
will be called i2c-pxa.
+config I2C_PXA_PCI
+ def_bool I2C_PXA && X86_32 && PCI && OF
+
config I2C_PXA_SLAVE
bool "Intel PXA2XX I2C Slave comms support"
- depends on I2C_PXA
+ depends on I2C_PXA && !X86_32
help
Support I2C slave mode communications on the PXA I2C bus. This
is necessary for systems where the PXA may be a target on the
@@ -607,6 +631,13 @@ config I2C_STU300
This driver can also be built as a module. If so, the module
will be called i2c-stu300.
+config I2C_TEGRA
+ tristate "NVIDIA Tegra internal I2C controller"
+ depends on ARCH_TEGRA
+ help
+ If you say yes to this option, support will be included for the
+ I2C controller embedded in NVIDIA Tegra SOCs
+
config I2C_VERSATILE
tristate "ARM Versatile/Realview I2C bus support"
depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS
@@ -639,15 +670,28 @@ config I2C_XILINX
will be called xilinx_i2c.
config I2C_EG20T
- tristate "PCH I2C of Intel EG20T"
- depends on PCI
- help
- This driver is for PCH(Platform controller Hub) I2C of EG20T which
- is an IOH(Input/Output Hub) for x86 embedded processor.
- This driver can access PCH I2C bus device.
+ tristate "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH"
+ depends on PCI
+ help
+ This driver is for PCH(Platform controller Hub) I2C of EG20T which
+ is an IOH(Input/Output Hub) for x86 embedded processor.
+ This driver can access PCH I2C bus device.
+
+ This driver also supports the ML7213, a companion chip for the
+ Atom E6xx series and compatible with the Intel EG20T PCH.
comment "External I2C/SMBus adapter drivers"
+config I2C_DIOLAN_U2C
+ tristate "Diolan U2C-12 USB adapter"
+ depends on USB
+ help
+ If you say yes to this option, support will be included for Diolan
+ U2C-12, a USB to I2C interface.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-diolan-u2c.
+
config I2C_PARPORT
tristate "Parallel port adapter"
depends on PARPORT
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 9d2d0ec7fb23..e6cf294d3729 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_I2C_IOP3XX) += i2c-iop3xx.o
obj-$(CONFIG_I2C_IXP2000) += i2c-ixp2000.o
obj-$(CONFIG_I2C_MPC) += i2c-mpc.o
obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o
+obj-$(CONFIG_I2C_MXS) += i2c-mxs.o
obj-$(CONFIG_I2C_NOMADIK) += i2c-nomadik.o
obj-$(CONFIG_I2C_NUC900) += i2c-nuc900.o
obj-$(CONFIG_I2C_OCORES) += i2c-ocores.o
@@ -51,19 +52,23 @@ obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi.o
obj-$(CONFIG_I2C_PCA_PLATFORM) += i2c-pca-platform.o
obj-$(CONFIG_I2C_PMCMSP) += i2c-pmcmsp.o
obj-$(CONFIG_I2C_PNX) += i2c-pnx.o
+obj-$(CONFIG_I2C_PUV3) += i2c-puv3.o
obj-$(CONFIG_I2C_PXA) += i2c-pxa.o
+obj-$(CONFIG_I2C_PXA_PCI) += i2c-pxa-pci.o
obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o
obj-$(CONFIG_I2C_S6000) += i2c-s6000.o
obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o
obj-$(CONFIG_I2C_SH_MOBILE) += i2c-sh_mobile.o
obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o
obj-$(CONFIG_I2C_STU300) += i2c-stu300.o
+obj-$(CONFIG_I2C_TEGRA) += i2c-tegra.o
obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o
obj-$(CONFIG_I2C_OCTEON) += i2c-octeon.o
obj-$(CONFIG_I2C_XILINX) += i2c-xiic.o
obj-$(CONFIG_I2C_EG20T) += i2c-eg20t.o
# External I2C/SMBus adapter drivers
+obj-$(CONFIG_I2C_DIOLAN_U2C) += i2c-diolan-u2c.o
obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o
obj-$(CONFIG_I2C_PARPORT_LIGHT) += i2c-parport-light.o
obj-$(CONFIG_I2C_TAOS_EVM) += i2c-taos-evm.o
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index f2de3be35df3..3a20961bef1e 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -634,8 +634,7 @@ static void cpm_i2c_shutdown(struct cpm_i2c *cpm)
cpm_muram_free(cpm->i2c_addr);
}
-static int __devinit cpm_i2c_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit cpm_i2c_probe(struct platform_device *ofdev)
{
int result, len;
struct cpm_i2c *cpm;
@@ -718,7 +717,7 @@ static const struct of_device_id cpm_i2c_match[] = {
MODULE_DEVICE_TABLE(of, cpm_i2c_match);
-static struct of_platform_driver cpm_i2c_driver = {
+static struct platform_driver cpm_i2c_driver = {
.probe = cpm_i2c_probe,
.remove = __devexit_p(cpm_i2c_remove),
.driver = {
@@ -730,12 +729,12 @@ static struct of_platform_driver cpm_i2c_driver = {
static int __init cpm_i2c_init(void)
{
- return of_register_platform_driver(&cpm_i2c_driver);
+ return platform_driver_register(&cpm_i2c_driver);
}
static void __exit cpm_i2c_exit(void)
{
- of_unregister_platform_driver(&cpm_i2c_driver);
+ platform_driver_unregister(&cpm_i2c_driver);
}
module_init(cpm_i2c_init);
diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
new file mode 100644
index 000000000000..76366716a854
--- /dev/null
+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
@@ -0,0 +1,535 @@
+/*
+ * Driver for the Diolan u2c-12 USB-I2C adapter
+ *
+ * Copyright (c) 2010-2011 Ericsson AB
+ *
+ * Derived from:
+ * i2c-tiny-usb.c
+ * Copyright (C) 2006-2007 Till Harbaum (Till@Harbaum.org)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/i2c.h>
+
+#define DRIVER_NAME "i2c-diolan-u2c"
+
+#define USB_VENDOR_ID_DIOLAN 0x0abf
+#define USB_DEVICE_ID_DIOLAN_U2C 0x3370
+
+#define DIOLAN_OUT_EP 0x02
+#define DIOLAN_IN_EP 0x84
+
+/* commands via USB, must match command ids in the firmware */
+#define CMD_I2C_READ 0x01
+#define CMD_I2C_WRITE 0x02
+#define CMD_I2C_SCAN 0x03 /* Returns list of detected devices */
+#define CMD_I2C_RELEASE_SDA 0x04
+#define CMD_I2C_RELEASE_SCL 0x05
+#define CMD_I2C_DROP_SDA 0x06
+#define CMD_I2C_DROP_SCL 0x07
+#define CMD_I2C_READ_SDA 0x08
+#define CMD_I2C_READ_SCL 0x09
+#define CMD_GET_FW_VERSION 0x0a
+#define CMD_GET_SERIAL 0x0b
+#define CMD_I2C_START 0x0c
+#define CMD_I2C_STOP 0x0d
+#define CMD_I2C_REPEATED_START 0x0e
+#define CMD_I2C_PUT_BYTE 0x0f
+#define CMD_I2C_GET_BYTE 0x10
+#define CMD_I2C_PUT_ACK 0x11
+#define CMD_I2C_GET_ACK 0x12
+#define CMD_I2C_PUT_BYTE_ACK 0x13
+#define CMD_I2C_GET_BYTE_ACK 0x14
+#define CMD_I2C_SET_SPEED 0x1b
+#define CMD_I2C_GET_SPEED 0x1c
+#define CMD_I2C_SET_CLK_SYNC 0x24
+#define CMD_I2C_GET_CLK_SYNC 0x25
+#define CMD_I2C_SET_CLK_SYNC_TO 0x26
+#define CMD_I2C_GET_CLK_SYNC_TO 0x27
+
+#define RESP_OK 0x00
+#define RESP_FAILED 0x01
+#define RESP_BAD_MEMADDR 0x04
+#define RESP_DATA_ERR 0x05
+#define RESP_NOT_IMPLEMENTED 0x06
+#define RESP_NACK 0x07
+#define RESP_TIMEOUT 0x09
+
+#define U2C_I2C_SPEED_FAST 0 /* 400 kHz */
+#define U2C_I2C_SPEED_STD 1 /* 100 kHz */
+#define U2C_I2C_SPEED_2KHZ 242 /* 2 kHz, minimum speed */
+#define U2C_I2C_SPEED(f) ((DIV_ROUND_UP(1000000, (f)) - 10) / 2 + 1)
+
+#define U2C_I2C_FREQ_FAST 400000
+#define U2C_I2C_FREQ_STD 100000
+#define U2C_I2C_FREQ(s) (1000000 / (2 * (s - 1) + 10))
+
+#define DIOLAN_USB_TIMEOUT 100 /* in ms */
+#define DIOLAN_SYNC_TIMEOUT 20 /* in ms */
+
+#define DIOLAN_OUTBUF_LEN 128
+#define DIOLAN_FLUSH_LEN (DIOLAN_OUTBUF_LEN - 4)
+#define DIOLAN_INBUF_LEN 256 /* Maximum supported receive length */
+
+/* Structure to hold all of our device specific stuff */
+struct i2c_diolan_u2c {
+ u8 obuffer[DIOLAN_OUTBUF_LEN]; /* output buffer */
+ u8 ibuffer[DIOLAN_INBUF_LEN]; /* input buffer */
+ struct usb_device *usb_dev; /* the usb device for this device */
+ struct usb_interface *interface;/* the interface for this device */
+ struct i2c_adapter adapter; /* i2c related things */
+ int olen; /* Output buffer length */
+ int ocount; /* Number of enqueued messages */
+};
+
+static uint frequency = U2C_I2C_FREQ_STD; /* I2C clock frequency in Hz */
+
+module_param(frequency, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
+
+/* usb layer */
+
+/* Send command to device, and get response. */
+static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
+{
+ int ret = 0;
+ int actual;
+ int i;
+
+ if (!dev->olen || !dev->ocount)
+ return -EINVAL;
+
+ ret = usb_bulk_msg(dev->usb_dev,
+ usb_sndbulkpipe(dev->usb_dev, DIOLAN_OUT_EP),
+ dev->obuffer, dev->olen, &actual,
+ DIOLAN_USB_TIMEOUT);
+ if (!ret) {
+ for (i = 0; i < dev->ocount; i++) {
+ int tmpret;
+
+ tmpret = usb_bulk_msg(dev->usb_dev,
+ usb_rcvbulkpipe(dev->usb_dev,
+ DIOLAN_IN_EP),
+ dev->ibuffer,
+ sizeof(dev->ibuffer), &actual,
+ DIOLAN_USB_TIMEOUT);
+ /*
+ * Stop command processing if a previous command
+ * returned an error.
+ * Note that we still need to retrieve all messages.
+ */
+ if (ret < 0)
+ continue;
+ ret = tmpret;
+ if (ret == 0 && actual > 0) {
+ switch (dev->ibuffer[actual - 1]) {
+ case RESP_NACK:
+ /*
+ * Return ENXIO if NACK was received as
+ * response to the address phase,
+ * EIO otherwise
+ */
+ ret = i == 1 ? -ENXIO : -EIO;
+ break;
+ case RESP_TIMEOUT:
+ ret = -ETIMEDOUT;
+ break;
+ case RESP_OK:
+ /* strip off return code */
+ ret = actual - 1;
+ break;
+ default:
+ ret = -EIO;
+ break;
+ }
+ }
+ }
+ }
+ dev->olen = 0;
+ dev->ocount = 0;
+ return ret;
+}
+
+static int diolan_write_cmd(struct i2c_diolan_u2c *dev, bool flush)
+{
+ if (flush || dev->olen >= DIOLAN_FLUSH_LEN)
+ return diolan_usb_transfer(dev);
+ return 0;
+}
+
+/* Send command (no data) */
+static int diolan_usb_cmd(struct i2c_diolan_u2c *dev, u8 command, bool flush)
+{
+ dev->obuffer[dev->olen++] = command;
+ dev->ocount++;
+ return diolan_write_cmd(dev, flush);
+}
+
+/* Send command with one byte of data */
+static int diolan_usb_cmd_data(struct i2c_diolan_u2c *dev, u8 command, u8 data,
+ bool flush)
+{
+ dev->obuffer[dev->olen++] = command;
+ dev->obuffer[dev->olen++] = data;
+ dev->ocount++;
+ return diolan_write_cmd(dev, flush);
+}
+
+/* Send command with two bytes of data */
+static int diolan_usb_cmd_data2(struct i2c_diolan_u2c *dev, u8 command, u8 d1,
+ u8 d2, bool flush)
+{
+ dev->obuffer[dev->olen++] = command;
+ dev->obuffer[dev->olen++] = d1;
+ dev->obuffer[dev->olen++] = d2;
+ dev->ocount++;
+ return diolan_write_cmd(dev, flush);
+}
+
+/*
+ * Flush input queue.
+ * If we don't do this at startup and the controller has queued up
+ * messages which were not retrieved, it will stop responding
+ * at some point.
+ */
+static void diolan_flush_input(struct i2c_diolan_u2c *dev)
+{
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ int actual = 0;
+ int ret;
+
+ ret = usb_bulk_msg(dev->usb_dev,
+ usb_rcvbulkpipe(dev->usb_dev, DIOLAN_IN_EP),
+ dev->ibuffer, sizeof(dev->ibuffer), &actual,
+ DIOLAN_USB_TIMEOUT);
+ if (ret < 0 || actual == 0)
+ break;
+ }
+ if (i == 10)
+ dev_err(&dev->interface->dev, "Failed to flush input buffer\n");
+}
+
+static int diolan_i2c_start(struct i2c_diolan_u2c *dev)
+{
+ return diolan_usb_cmd(dev, CMD_I2C_START, false);
+}
+
+static int diolan_i2c_repeated_start(struct i2c_diolan_u2c *dev)
+{
+ return diolan_usb_cmd(dev, CMD_I2C_REPEATED_START, false);
+}
+
+static int diolan_i2c_stop(struct i2c_diolan_u2c *dev)
+{
+ return diolan_usb_cmd(dev, CMD_I2C_STOP, true);
+}
+
+static int diolan_i2c_get_byte_ack(struct i2c_diolan_u2c *dev, bool ack,
+ u8 *byte)
+{
+ int ret;
+
+ ret = diolan_usb_cmd_data(dev, CMD_I2C_GET_BYTE_ACK, ack, true);
+ if (ret > 0)
+ *byte = dev->ibuffer[0];
+ else if (ret == 0)
+ ret = -EIO;
+
+ return ret;
+}
+
+static int diolan_i2c_put_byte_ack(struct i2c_diolan_u2c *dev, u8 byte)
+{
+ return diolan_usb_cmd_data(dev, CMD_I2C_PUT_BYTE_ACK, byte, false);
+}
+
+static int diolan_set_speed(struct i2c_diolan_u2c *dev, u8 speed)
+{
+ return diolan_usb_cmd_data(dev, CMD_I2C_SET_SPEED, speed, true);
+}
+
+/* Enable or disable clock synchronization (stretching) */
+static int diolan_set_clock_synch(struct i2c_diolan_u2c *dev, bool enable)
+{
+ return diolan_usb_cmd_data(dev, CMD_I2C_SET_CLK_SYNC, enable, true);
+}
+
+/* Set clock synchronization timeout in ms */
+static int diolan_set_clock_synch_timeout(struct i2c_diolan_u2c *dev, int ms)
+{
+ int to_val = ms * 10;
+
+ return diolan_usb_cmd_data2(dev, CMD_I2C_SET_CLK_SYNC_TO,
+ to_val & 0xff, (to_val >> 8) & 0xff, true);
+}
+
+static void diolan_fw_version(struct i2c_diolan_u2c *dev)
+{
+ int ret;
+
+ ret = diolan_usb_cmd(dev, CMD_GET_FW_VERSION, true);
+ if (ret >= 2)
+ dev_info(&dev->interface->dev,
+ "Diolan U2C firmware version %u.%u\n",
+ (unsigned int)dev->ibuffer[0],
+ (unsigned int)dev->ibuffer[1]);
+}
+
+static void diolan_get_serial(struct i2c_diolan_u2c *dev)
+{
+ int ret;
+ u32 serial;
+
+ ret = diolan_usb_cmd(dev, CMD_GET_SERIAL, true);
+ if (ret >= 4) {
+ serial = le32_to_cpu(*(u32 *)dev->ibuffer);
+ dev_info(&dev->interface->dev,
+ "Diolan U2C serial number %u\n", serial);
+ }
+}
+
+static int diolan_init(struct i2c_diolan_u2c *dev)
+{
+ int speed, ret;
+
+ if (frequency >= 200000) {
+ speed = U2C_I2C_SPEED_FAST;
+ frequency = U2C_I2C_FREQ_FAST;
+ } else if (frequency >= 100000 || frequency == 0) {
+ speed = U2C_I2C_SPEED_STD;
+ frequency = U2C_I2C_FREQ_STD;
+ } else {
+ speed = U2C_I2C_SPEED(frequency);
+ if (speed > U2C_I2C_SPEED_2KHZ)
+ speed = U2C_I2C_SPEED_2KHZ;
+ frequency = U2C_I2C_FREQ(speed);
+ }
+
+ dev_info(&dev->interface->dev,
+ "Diolan U2C at USB bus %03d address %03d speed %d Hz\n",
+ dev->usb_dev->bus->busnum, dev->usb_dev->devnum, frequency);
+
+ diolan_flush_input(dev);
+ diolan_fw_version(dev);
+ diolan_get_serial(dev);
+
+ /* Set I2C speed */
+ ret = diolan_set_speed(dev, speed);
+ if (ret < 0)
+ return ret;
+
+ /* Configure I2C clock synchronization */
+ ret = diolan_set_clock_synch(dev, speed != U2C_I2C_SPEED_FAST);
+ if (ret < 0)
+ return ret;
+
+ if (speed != U2C_I2C_SPEED_FAST)
+ ret = diolan_set_clock_synch_timeout(dev, DIOLAN_SYNC_TIMEOUT);
+
+ return ret;
+}
+
+/* i2c layer */
+
+static int diolan_usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
+ int num)
+{
+ struct i2c_diolan_u2c *dev = i2c_get_adapdata(adapter);
+ struct i2c_msg *pmsg;
+ int i, j;
+ int ret, sret;
+
+ ret = diolan_i2c_start(dev);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < num; i++) {
+ pmsg = &msgs[i];
+ if (i) {
+ ret = diolan_i2c_repeated_start(dev);
+ if (ret < 0)
+ goto abort;
+ }
+ if (pmsg->flags & I2C_M_RD) {
+ ret =
+ diolan_i2c_put_byte_ack(dev, (pmsg->addr << 1) | 1);
+ if (ret < 0)
+ goto abort;
+ for (j = 0; j < pmsg->len; j++) {
+ u8 byte;
+ bool ack = j < pmsg->len - 1;
+
+ /*
+ * Don't send NACK if this is the first byte
+ * of a SMBUS_BLOCK message.
+ */
+ if (j == 0 && (pmsg->flags & I2C_M_RECV_LEN))
+ ack = true;
+
+ ret = diolan_i2c_get_byte_ack(dev, ack, &byte);
+ if (ret < 0)
+ goto abort;
+ /*
+ * Adjust count if first received byte is length
+ */
+ if (j == 0 && (pmsg->flags & I2C_M_RECV_LEN)) {
+ if (byte == 0
+ || byte > I2C_SMBUS_BLOCK_MAX) {
+ ret = -EPROTO;
+ goto abort;
+ }
+ pmsg->len += byte;
+ }
+ pmsg->buf[j] = byte;
+ }
+ } else {
+ ret = diolan_i2c_put_byte_ack(dev, pmsg->addr << 1);
+ if (ret < 0)
+ goto abort;
+ for (j = 0; j < pmsg->len; j++) {
+ ret = diolan_i2c_put_byte_ack(dev,
+ pmsg->buf[j]);
+ if (ret < 0)
+ goto abort;
+ }
+ }
+ }
+abort:
+ sret = diolan_i2c_stop(dev);
+ if (sret < 0 && ret >= 0)
+ ret = sret;
+ return ret;
+}
+
+/*
+ * Return list of supported functionality.
+ */
+static u32 diolan_usb_func(struct i2c_adapter *a)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_FUNC_SMBUS_BLOCK_PROC_CALL;
+}
+
+static const struct i2c_algorithm diolan_usb_algorithm = {
+ .master_xfer = diolan_usb_xfer,
+ .functionality = diolan_usb_func,
+};
+
+/* device layer */
+
+static const struct usb_device_id diolan_u2c_table[] = {
+ { USB_DEVICE(USB_VENDOR_ID_DIOLAN, USB_DEVICE_ID_DIOLAN_U2C) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(usb, diolan_u2c_table);
+
+static void diolan_u2c_free(struct i2c_diolan_u2c *dev)
+{
+ usb_put_dev(dev->usb_dev);
+ kfree(dev);
+}
+
+static int diolan_u2c_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct i2c_diolan_u2c *dev;
+ int ret;
+
+ /* allocate memory for our device state and initialize it */
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (dev == NULL) {
+ dev_err(&interface->dev, "no memory for device state\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ dev->usb_dev = usb_get_dev(interface_to_usbdev(interface));
+ dev->interface = interface;
+
+ /* save our data pointer in this interface device */
+ usb_set_intfdata(interface, dev);
+
+ /* setup i2c adapter description */
+ dev->adapter.owner = THIS_MODULE;
+ dev->adapter.class = I2C_CLASS_HWMON;
+ dev->adapter.algo = &diolan_usb_algorithm;
+ i2c_set_adapdata(&dev->adapter, dev);
+ snprintf(dev->adapter.name, sizeof(dev->adapter.name),
+ DRIVER_NAME " at bus %03d device %03d",
+ dev->usb_dev->bus->busnum, dev->usb_dev->devnum);
+
+ dev->adapter.dev.parent = &dev->interface->dev;
+
+ /* initialize diolan i2c interface */
+ ret = diolan_init(dev);
+ if (ret < 0) {
+ dev_err(&interface->dev, "failed to initialize adapter\n");
+ goto error_free;
+ }
+
+ /* and finally attach to i2c layer */
+ ret = i2c_add_adapter(&dev->adapter);
+ if (ret < 0) {
+ dev_err(&interface->dev, "failed to add I2C adapter\n");
+ goto error_free;
+ }
+
+ dev_dbg(&interface->dev, "connected " DRIVER_NAME "\n");
+
+ return 0;
+
+error_free:
+ usb_set_intfdata(interface, NULL);
+ diolan_u2c_free(dev);
+error:
+ return ret;
+}
+
+static void diolan_u2c_disconnect(struct usb_interface *interface)
+{
+ struct i2c_diolan_u2c *dev = usb_get_intfdata(interface);
+
+ i2c_del_adapter(&dev->adapter);
+ usb_set_intfdata(interface, NULL);
+ diolan_u2c_free(dev);
+
+ dev_dbg(&interface->dev, "disconnected\n");
+}
+
+static struct usb_driver diolan_u2c_driver = {
+ .name = DRIVER_NAME,
+ .probe = diolan_u2c_probe,
+ .disconnect = diolan_u2c_disconnect,
+ .id_table = diolan_u2c_table,
+};
+
+static int __init diolan_u2c_init(void)
+{
+ /* register this driver with the USB subsystem */
+ return usb_register(&diolan_u2c_driver);
+}
+
+static void __exit diolan_u2c_exit(void)
+{
+ /* deregister this driver with the USB subsystem */
+ usb_deregister(&diolan_u2c_driver);
+}
+
+module_init(diolan_u2c_init);
+module_exit(diolan_u2c_exit);
+
+MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_DESCRIPTION(DRIVER_NAME " driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 2e067dd2ee51..c57c83734692 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -131,6 +131,13 @@
#define pch_pci_dbg(pdev, fmt, arg...) \
dev_dbg(&pdev->dev, "%s :" fmt, __func__, ##arg)
+/*
+Set the number of I2C instance max
+Intel EG20T PCH : 1ch
+OKI SEMICONDUCTOR ML7213 IOH : 2ch
+*/
+#define PCH_I2C_MAX_DEV 2
+
/**
* struct i2c_algo_pch_data - for I2C driver functionalities
* @pch_adapter: stores the reference to i2c_adapter structure
@@ -155,12 +162,14 @@ struct i2c_algo_pch_data {
* @pch_data: stores a list of i2c_algo_pch_data
* @pch_i2c_suspended: specifies whether the system is suspended or not
* perhaps with more lines and words.
+ * @ch_num: specifies the number of i2c instance
*
* pch_data has as many elements as maximum I2C channels
*/
struct adapter_info {
- struct i2c_algo_pch_data pch_data;
+ struct i2c_algo_pch_data pch_data[PCH_I2C_MAX_DEV];
bool pch_i2c_suspended;
+ int ch_num;
};
@@ -169,8 +178,13 @@ static int pch_clk = 50000; /* specifies I2C clock speed in KHz */
static wait_queue_head_t pch_event;
static DEFINE_MUTEX(pch_mutex);
+/* Definition for ML7213 by OKI SEMICONDUCTOR */
+#define PCI_VENDOR_ID_ROHM 0x10DB
+#define PCI_DEVICE_ID_ML7213_I2C 0x802D
+
static struct pci_device_id __devinitdata pch_pcidev_id[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PCH_I2C)},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_I2C), 1, },
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_I2C), 2, },
{0,}
};
@@ -211,8 +225,7 @@ static void pch_i2c_init(struct i2c_algo_pch_data *adap)
/* Initialize I2C registers */
iowrite32(0x21, p + PCH_I2CNF);
- pch_setbit(adap->pch_base_address, PCH_I2CCTL,
- PCH_I2CCTL_I2CMEN);
+ pch_setbit(adap->pch_base_address, PCH_I2CCTL, PCH_I2CCTL_I2CMEN);
if (pch_i2c_speed != 400)
pch_i2c_speed = 100;
@@ -254,7 +267,7 @@ static inline bool ktime_lt(const ktime_t cmp1, const ktime_t cmp2)
* @timeout: waiting time counter (us).
*/
static s32 pch_i2c_wait_for_bus_idle(struct i2c_algo_pch_data *adap,
- s32 timeout)
+ s32 timeout)
{
void __iomem *p = adap->pch_base_address;
@@ -474,8 +487,8 @@ static void pch_i2c_sendnack(struct i2c_algo_pch_data *adap)
* @last: specifies whether last message or not.
* @first: specifies whether first message or not.
*/
-s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
- u32 last, u32 first)
+static s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
+ u32 last, u32 first)
{
struct i2c_algo_pch_data *adap = i2c_adap->algo_data;
@@ -568,10 +581,10 @@ s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
}
/**
- * pch_i2c_cb_ch0() - Interrupt handler Call back function
+ * pch_i2c_cb() - Interrupt handler Call back function
* @adap: Pointer to struct i2c_algo_pch_data.
*/
-static void pch_i2c_cb_ch0(struct i2c_algo_pch_data *adap)
+static void pch_i2c_cb(struct i2c_algo_pch_data *adap)
{
u32 sts;
void __iomem *p = adap->pch_base_address;
@@ -599,24 +612,30 @@ static void pch_i2c_cb_ch0(struct i2c_algo_pch_data *adap)
*/
static irqreturn_t pch_i2c_handler(int irq, void *pData)
{
- s32 reg_val;
-
- struct i2c_algo_pch_data *adap_data = (struct i2c_algo_pch_data *)pData;
- void __iomem *p = adap_data->pch_base_address;
- u32 mode = ioread32(p + PCH_I2CMOD) & (BUFFER_MODE | EEPROM_SR_MODE);
-
- if (mode != NORMAL_MODE) {
- pch_err(adap_data, "I2C mode is not supported\n");
- return IRQ_NONE;
+ u32 reg_val;
+ int flag;
+ int i;
+ struct adapter_info *adap_info = pData;
+ void __iomem *p;
+ u32 mode;
+
+ for (i = 0, flag = 0; i < adap_info->ch_num; i++) {
+ p = adap_info->pch_data[i].pch_base_address;
+ mode = ioread32(p + PCH_I2CMOD);
+ mode &= BUFFER_MODE | EEPROM_SR_MODE;
+ if (mode != NORMAL_MODE) {
+ pch_err(adap_info->pch_data,
+ "I2C-%d mode(%d) is not supported\n", mode, i);
+ continue;
+ }
+ reg_val = ioread32(p + PCH_I2CSR);
+ if (reg_val & (I2CMAL_BIT | I2CMCF_BIT | I2CMIF_BIT)) {
+ pch_i2c_cb(&adap_info->pch_data[i]);
+ flag = 1;
+ }
}
- reg_val = ioread32(p + PCH_I2CSR);
- if (reg_val & (I2CMAL_BIT | I2CMCF_BIT | I2CMIF_BIT))
- pch_i2c_cb_ch0(adap_data);
- else
- return IRQ_NONE;
-
- return IRQ_HANDLED;
+ return flag ? IRQ_HANDLED : IRQ_NONE;
}
/**
@@ -626,7 +645,7 @@ static irqreturn_t pch_i2c_handler(int irq, void *pData)
* @num: number of messages.
*/
static s32 pch_i2c_xfer(struct i2c_adapter *i2c_adap,
- struct i2c_msg *msgs, s32 num)
+ struct i2c_msg *msgs, s32 num)
{
struct i2c_msg *pmsg;
u32 i = 0;
@@ -709,11 +728,13 @@ static void pch_i2c_disbl_int(struct i2c_algo_pch_data *adap)
}
static int __devinit pch_i2c_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+ const struct pci_device_id *id)
{
void __iomem *base_addr;
- s32 ret;
+ int ret;
+ int i, j;
struct adapter_info *adap_info;
+ struct i2c_adapter *pch_adap;
pch_pci_dbg(pdev, "Entered.\n");
@@ -743,44 +764,48 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
goto err_pci_iomap;
}
- adap_info->pch_i2c_suspended = false;
+ /* Set the number of I2C channel instance */
+ adap_info->ch_num = id->driver_data;
- adap_info->pch_data.p_adapter_info = adap_info;
+ for (i = 0; i < adap_info->ch_num; i++) {
+ pch_adap = &adap_info->pch_data[i].pch_adapter;
+ adap_info->pch_i2c_suspended = false;
- adap_info->pch_data.pch_adapter.owner = THIS_MODULE;
- adap_info->pch_data.pch_adapter.class = I2C_CLASS_HWMON;
- strcpy(adap_info->pch_data.pch_adapter.name, KBUILD_MODNAME);
- adap_info->pch_data.pch_adapter.algo = &pch_algorithm;
- adap_info->pch_data.pch_adapter.algo_data =
- &adap_info->pch_data;
+ adap_info->pch_data[i].p_adapter_info = adap_info;
- /* (i * 0x80) + base_addr; */
- adap_info->pch_data.pch_base_address = base_addr;
+ pch_adap->owner = THIS_MODULE;
+ pch_adap->class = I2C_CLASS_HWMON;
+ strcpy(pch_adap->name, KBUILD_MODNAME);
+ pch_adap->algo = &pch_algorithm;
+ pch_adap->algo_data = &adap_info->pch_data[i];
- adap_info->pch_data.pch_adapter.dev.parent = &pdev->dev;
+ /* base_addr + offset; */
+ adap_info->pch_data[i].pch_base_address = base_addr + 0x100 * i;
- ret = i2c_add_adapter(&(adap_info->pch_data.pch_adapter));
+ pch_adap->dev.parent = &pdev->dev;
- if (ret) {
- pch_pci_err(pdev, "i2c_add_adapter FAILED\n");
- goto err_i2c_add_adapter;
- }
+ ret = i2c_add_adapter(pch_adap);
+ if (ret) {
+ pch_pci_err(pdev, "i2c_add_adapter[ch:%d] FAILED\n", i);
+ goto err_i2c_add_adapter;
+ }
- pch_i2c_init(&adap_info->pch_data);
+ pch_i2c_init(&adap_info->pch_data[i]);
+ }
ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
- KBUILD_MODNAME, &adap_info->pch_data);
+ KBUILD_MODNAME, adap_info);
if (ret) {
pch_pci_err(pdev, "request_irq FAILED\n");
- goto err_request_irq;
+ goto err_i2c_add_adapter;
}
pci_set_drvdata(pdev, adap_info);
pch_pci_dbg(pdev, "returns %d.\n", ret);
return 0;
-err_request_irq:
- i2c_del_adapter(&(adap_info->pch_data.pch_adapter));
err_i2c_add_adapter:
+ for (j = 0; j < i; j++)
+ i2c_del_adapter(&adap_info->pch_data[j].pch_adapter);
pci_iounmap(pdev, base_addr);
err_pci_iomap:
pci_release_regions(pdev);
@@ -793,17 +818,22 @@ err_pci_enable:
static void __devexit pch_i2c_remove(struct pci_dev *pdev)
{
+ int i;
struct adapter_info *adap_info = pci_get_drvdata(pdev);
- pch_i2c_disbl_int(&adap_info->pch_data);
- free_irq(pdev->irq, &adap_info->pch_data);
- i2c_del_adapter(&(adap_info->pch_data.pch_adapter));
+ free_irq(pdev->irq, adap_info);
- if (adap_info->pch_data.pch_base_address) {
- pci_iounmap(pdev, adap_info->pch_data.pch_base_address);
- adap_info->pch_data.pch_base_address = 0;
+ for (i = 0; i < adap_info->ch_num; i++) {
+ pch_i2c_disbl_int(&adap_info->pch_data[i]);
+ i2c_del_adapter(&adap_info->pch_data[i].pch_adapter);
}
+ if (adap_info->pch_data[0].pch_base_address)
+ pci_iounmap(pdev, adap_info->pch_data[0].pch_base_address);
+
+ for (i = 0; i < adap_info->ch_num; i++)
+ adap_info->pch_data[i].pch_base_address = 0;
+
pci_set_drvdata(pdev, NULL);
pci_release_regions(pdev);
@@ -816,17 +846,22 @@ static void __devexit pch_i2c_remove(struct pci_dev *pdev)
static int pch_i2c_suspend(struct pci_dev *pdev, pm_message_t state)
{
int ret;
+ int i;
struct adapter_info *adap_info = pci_get_drvdata(pdev);
- void __iomem *p = adap_info->pch_data.pch_base_address;
+ void __iomem *p = adap_info->pch_data[0].pch_base_address;
adap_info->pch_i2c_suspended = true;
- while ((adap_info->pch_data.pch_i2c_xfer_in_progress)) {
- /* Wait until all channel transfers are completed */
- msleep(20);
+ for (i = 0; i < adap_info->ch_num; i++) {
+ while ((adap_info->pch_data[i].pch_i2c_xfer_in_progress)) {
+ /* Wait until all channel transfers are completed */
+ msleep(20);
+ }
}
+
/* Disable the i2c interrupts */
- pch_i2c_disbl_int(&adap_info->pch_data);
+ for (i = 0; i < adap_info->ch_num; i++)
+ pch_i2c_disbl_int(&adap_info->pch_data[i]);
pch_pci_dbg(pdev, "I2CSR = %x I2CBUFSTA = %x I2CESRSTA = %x "
"invoked function pch_i2c_disbl_int successfully\n",
@@ -849,6 +884,7 @@ static int pch_i2c_suspend(struct pci_dev *pdev, pm_message_t state)
static int pch_i2c_resume(struct pci_dev *pdev)
{
+ int i;
struct adapter_info *adap_info = pci_get_drvdata(pdev);
pci_set_power_state(pdev, PCI_D0);
@@ -861,7 +897,8 @@ static int pch_i2c_resume(struct pci_dev *pdev)
pci_enable_wake(pdev, PCI_D3hot, 0);
- pch_i2c_init(&adap_info->pch_data);
+ for (i = 0; i < adap_info->ch_num; i++)
+ pch_i2c_init(&adap_info->pch_data[i]);
adap_info->pch_i2c_suspended = false;
@@ -893,7 +930,7 @@ static void __exit pch_pci_exit(void)
}
module_exit(pch_pci_exit);
-MODULE_DESCRIPTION("PCH I2C PCI Driver");
+MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH I2C Driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tomoya MORINAGA. <tomoya-linux@dsn.okisemi.com>");
module_param(pch_i2c_speed, int, (S_IRUSR | S_IWUSR));
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 6e3c38240336..e4f88dca99b5 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -691,8 +691,7 @@ static int __devinit iic_request_irq(struct platform_device *ofdev,
/*
* Register single IIC interface
*/
-static int __devinit iic_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit iic_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct ibm_iic_private *dev;
@@ -806,7 +805,7 @@ static const struct of_device_id ibm_iic_match[] = {
{}
};
-static struct of_platform_driver ibm_iic_driver = {
+static struct platform_driver ibm_iic_driver = {
.driver = {
.name = "ibm-iic",
.owner = THIS_MODULE,
@@ -818,12 +817,12 @@ static struct of_platform_driver ibm_iic_driver = {
static int __init iic_init(void)
{
- return of_register_platform_driver(&ibm_iic_driver);
+ return platform_driver_register(&ibm_iic_driver);
}
static void __exit iic_exit(void)
{
- of_unregister_platform_driver(&ibm_iic_driver);
+ platform_driver_unregister(&ibm_iic_driver);
}
module_init(iic_init);
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index b74e6dc6886c..75b984c519ac 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -560,8 +560,7 @@ static struct i2c_adapter mpc_ops = {
.timeout = HZ,
};
-static int __devinit fsl_i2c_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit fsl_i2c_probe(struct platform_device *op)
{
struct mpc_i2c *i2c;
const u32 *prop;
@@ -569,6 +568,9 @@ static int __devinit fsl_i2c_probe(struct platform_device *op,
int result = 0;
int plen;
+ if (!op->dev.of_match)
+ return -EINVAL;
+
i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
if (!i2c)
return -ENOMEM;
@@ -603,8 +605,8 @@ static int __devinit fsl_i2c_probe(struct platform_device *op,
clock = *prop;
}
- if (match->data) {
- struct mpc_i2c_data *data = match->data;
+ if (op->dev.of_match->data) {
+ struct mpc_i2c_data *data = op->dev.of_match->data;
data->setup(op->dev.of_node, i2c, clock, data->prescaler);
} else {
/* Backwards compatibility */
@@ -700,7 +702,7 @@ static const struct of_device_id mpc_i2c_of_match[] = {
MODULE_DEVICE_TABLE(of, mpc_i2c_of_match);
/* Structure for a device driver */
-static struct of_platform_driver mpc_i2c_driver = {
+static struct platform_driver mpc_i2c_driver = {
.probe = fsl_i2c_probe,
.remove = __devexit_p(fsl_i2c_remove),
.driver = {
@@ -712,18 +714,12 @@ static struct of_platform_driver mpc_i2c_driver = {
static int __init fsl_i2c_init(void)
{
- int rv;
-
- rv = of_register_platform_driver(&mpc_i2c_driver);
- if (rv)
- printk(KERN_ERR DRV_NAME
- " of_register_platform_driver failed (%i)\n", rv);
- return rv;
+ return platform_driver_register(&mpc_i2c_driver);
}
static void __exit fsl_i2c_exit(void)
{
- of_unregister_platform_driver(&mpc_i2c_driver);
+ platform_driver_unregister(&mpc_i2c_driver);
}
module_init(fsl_i2c_init);
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
new file mode 100644
index 000000000000..8022e2390a5a
--- /dev/null
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -0,0 +1,412 @@
+/*
+ * Freescale MXS I2C bus driver
+ *
+ * Copyright (C) 2011 Wolfram Sang, Pengutronix e.K.
+ *
+ * based on a (non-working) driver which was:
+ *
+ * Copyright (C) 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * TODO: add dma-support if platform-support for it is available
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <linux/jiffies.h>
+#include <linux/io.h>
+
+#include <mach/common.h>
+
+#define DRIVER_NAME "mxs-i2c"
+
+#define MXS_I2C_CTRL0 (0x00)
+#define MXS_I2C_CTRL0_SET (0x04)
+
+#define MXS_I2C_CTRL0_SFTRST 0x80000000
+#define MXS_I2C_CTRL0_SEND_NAK_ON_LAST 0x02000000
+#define MXS_I2C_CTRL0_RETAIN_CLOCK 0x00200000
+#define MXS_I2C_CTRL0_POST_SEND_STOP 0x00100000
+#define MXS_I2C_CTRL0_PRE_SEND_START 0x00080000
+#define MXS_I2C_CTRL0_MASTER_MODE 0x00020000
+#define MXS_I2C_CTRL0_DIRECTION 0x00010000
+#define MXS_I2C_CTRL0_XFER_COUNT(v) ((v) & 0x0000FFFF)
+
+#define MXS_I2C_CTRL1 (0x40)
+#define MXS_I2C_CTRL1_SET (0x44)
+#define MXS_I2C_CTRL1_CLR (0x48)
+
+#define MXS_I2C_CTRL1_BUS_FREE_IRQ 0x80
+#define MXS_I2C_CTRL1_DATA_ENGINE_CMPLT_IRQ 0x40
+#define MXS_I2C_CTRL1_NO_SLAVE_ACK_IRQ 0x20
+#define MXS_I2C_CTRL1_OVERSIZE_XFER_TERM_IRQ 0x10
+#define MXS_I2C_CTRL1_EARLY_TERM_IRQ 0x08
+#define MXS_I2C_CTRL1_MASTER_LOSS_IRQ 0x04
+#define MXS_I2C_CTRL1_SLAVE_STOP_IRQ 0x02
+#define MXS_I2C_CTRL1_SLAVE_IRQ 0x01
+
+#define MXS_I2C_IRQ_MASK (MXS_I2C_CTRL1_DATA_ENGINE_CMPLT_IRQ | \
+ MXS_I2C_CTRL1_NO_SLAVE_ACK_IRQ | \
+ MXS_I2C_CTRL1_EARLY_TERM_IRQ | \
+ MXS_I2C_CTRL1_MASTER_LOSS_IRQ | \
+ MXS_I2C_CTRL1_SLAVE_STOP_IRQ | \
+ MXS_I2C_CTRL1_SLAVE_IRQ)
+
+#define MXS_I2C_QUEUECTRL (0x60)
+#define MXS_I2C_QUEUECTRL_SET (0x64)
+#define MXS_I2C_QUEUECTRL_CLR (0x68)
+
+#define MXS_I2C_QUEUECTRL_QUEUE_RUN 0x20
+#define MXS_I2C_QUEUECTRL_PIO_QUEUE_MODE 0x04
+
+#define MXS_I2C_QUEUESTAT (0x70)
+#define MXS_I2C_QUEUESTAT_RD_QUEUE_EMPTY 0x00002000
+
+#define MXS_I2C_QUEUECMD (0x80)
+
+#define MXS_I2C_QUEUEDATA (0x90)
+
+#define MXS_I2C_DATA (0xa0)
+
+
+#define MXS_CMD_I2C_SELECT (MXS_I2C_CTRL0_RETAIN_CLOCK | \
+ MXS_I2C_CTRL0_PRE_SEND_START | \
+ MXS_I2C_CTRL0_MASTER_MODE | \
+ MXS_I2C_CTRL0_DIRECTION | \
+ MXS_I2C_CTRL0_XFER_COUNT(1))
+
+#define MXS_CMD_I2C_WRITE (MXS_I2C_CTRL0_PRE_SEND_START | \
+ MXS_I2C_CTRL0_MASTER_MODE | \
+ MXS_I2C_CTRL0_DIRECTION)
+
+#define MXS_CMD_I2C_READ (MXS_I2C_CTRL0_SEND_NAK_ON_LAST | \
+ MXS_I2C_CTRL0_MASTER_MODE)
+
+/**
+ * struct mxs_i2c_dev - per device, private MXS-I2C data
+ *
+ * @dev: driver model device node
+ * @regs: IO registers pointer
+ * @cmd_complete: completion object for transaction wait
+ * @cmd_err: error code for last transaction
+ * @adapter: i2c subsystem adapter node
+ */
+struct mxs_i2c_dev {
+ struct device *dev;
+ void __iomem *regs;
+ struct completion cmd_complete;
+ u32 cmd_err;
+ struct i2c_adapter adapter;
+};
+
+/*
+ * TODO: check if calls to here are really needed. If not, we could get rid of
+ * mxs_reset_block and the mach-dependency. Needs an I2C analyzer, probably.
+ */
+static void mxs_i2c_reset(struct mxs_i2c_dev *i2c)
+{
+ mxs_reset_block(i2c->regs);
+ writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET);
+}
+
+static void mxs_i2c_pioq_setup_read(struct mxs_i2c_dev *i2c, u8 addr, int len,
+ int flags)
+{
+ u32 data;
+
+ writel(MXS_CMD_I2C_SELECT, i2c->regs + MXS_I2C_QUEUECMD);
+
+ data = (addr << 1) | I2C_SMBUS_READ;
+ writel(data, i2c->regs + MXS_I2C_DATA);
+
+ data = MXS_CMD_I2C_READ | MXS_I2C_CTRL0_XFER_COUNT(len) | flags;
+ writel(data, i2c->regs + MXS_I2C_QUEUECMD);
+}
+
+static void mxs_i2c_pioq_setup_write(struct mxs_i2c_dev *i2c,
+ u8 addr, u8 *buf, int len, int flags)
+{
+ u32 data;
+ int i, shifts_left;
+
+ data = MXS_CMD_I2C_WRITE | MXS_I2C_CTRL0_XFER_COUNT(len + 1) | flags;
+ writel(data, i2c->regs + MXS_I2C_QUEUECMD);
+
+ /*
+ * We have to copy the slave address (u8) and buffer (arbitrary number
+ * of u8) into the data register (u32). To achieve that, the u8 are put
+ * into the MSBs of 'data' which is then shifted for the next u8. When
+ * apropriate, 'data' is written to MXS_I2C_DATA. So, the first u32
+ * looks like this:
+ *
+ * 3 2 1 0
+ * 10987654|32109876|54321098|76543210
+ * --------+--------+--------+--------
+ * buffer+2|buffer+1|buffer+0|slave_addr
+ */
+
+ data = ((addr << 1) | I2C_SMBUS_WRITE) << 24;
+
+ for (i = 0; i < len; i++) {
+ data >>= 8;
+ data |= buf[i] << 24;
+ if ((i & 3) == 2)
+ writel(data, i2c->regs + MXS_I2C_DATA);
+ }
+
+ /* Write out the remaining bytes if any */
+ shifts_left = 24 - (i & 3) * 8;
+ if (shifts_left)
+ writel(data >> shifts_left, i2c->regs + MXS_I2C_DATA);
+}
+
+/*
+ * TODO: should be replaceable with a waitqueue and RD_QUEUE_IRQ (setting the
+ * rd_threshold to 1). Couldn't get this to work, though.
+ */
+static int mxs_i2c_wait_for_data(struct mxs_i2c_dev *i2c)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+ while (readl(i2c->regs + MXS_I2C_QUEUESTAT)
+ & MXS_I2C_QUEUESTAT_RD_QUEUE_EMPTY) {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ cond_resched();
+ }
+
+ return 0;
+}
+
+static int mxs_i2c_finish_read(struct mxs_i2c_dev *i2c, u8 *buf, int len)
+{
+ u32 data;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ if ((i & 3) == 0) {
+ if (mxs_i2c_wait_for_data(i2c))
+ return -ETIMEDOUT;
+ data = readl(i2c->regs + MXS_I2C_QUEUEDATA);
+ }
+ buf[i] = data & 0xff;
+ data >>= 8;
+ }
+
+ return 0;
+}
+
+/*
+ * Low level master read/write transaction.
+ */
+static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
+ int stop)
+{
+ struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap);
+ int ret;
+ int flags;
+
+ init_completion(&i2c->cmd_complete);
+
+ dev_dbg(i2c->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n",
+ msg->addr, msg->len, msg->flags, stop);
+
+ if (msg->len == 0)
+ return -EINVAL;
+
+ flags = stop ? MXS_I2C_CTRL0_POST_SEND_STOP : 0;
+
+ if (msg->flags & I2C_M_RD)
+ mxs_i2c_pioq_setup_read(i2c, msg->addr, msg->len, flags);
+ else
+ mxs_i2c_pioq_setup_write(i2c, msg->addr, msg->buf, msg->len,
+ flags);
+
+ writel(MXS_I2C_QUEUECTRL_QUEUE_RUN,
+ i2c->regs + MXS_I2C_QUEUECTRL_SET);
+
+ ret = wait_for_completion_timeout(&i2c->cmd_complete,
+ msecs_to_jiffies(1000));
+ if (ret == 0)
+ goto timeout;
+
+ if ((!i2c->cmd_err) && (msg->flags & I2C_M_RD)) {
+ ret = mxs_i2c_finish_read(i2c, msg->buf, msg->len);
+ if (ret)
+ goto timeout;
+ }
+
+ if (i2c->cmd_err == -ENXIO)
+ mxs_i2c_reset(i2c);
+
+ dev_dbg(i2c->dev, "Done with err=%d\n", i2c->cmd_err);
+
+ return i2c->cmd_err;
+
+timeout:
+ dev_dbg(i2c->dev, "Timeout!\n");
+ mxs_i2c_reset(i2c);
+ return -ETIMEDOUT;
+}
+
+static int mxs_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
+ int num)
+{
+ int i;
+ int err;
+
+ for (i = 0; i < num; i++) {
+ err = mxs_i2c_xfer_msg(adap, &msgs[i], i == (num - 1));
+ if (err)
+ return err;
+ }
+
+ return num;
+}
+
+static u32 mxs_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+}
+
+static irqreturn_t mxs_i2c_isr(int this_irq, void *dev_id)
+{
+ struct mxs_i2c_dev *i2c = dev_id;
+ u32 stat = readl(i2c->regs + MXS_I2C_CTRL1) & MXS_I2C_IRQ_MASK;
+
+ if (!stat)
+ return IRQ_NONE;
+
+ if (stat & MXS_I2C_CTRL1_NO_SLAVE_ACK_IRQ)
+ i2c->cmd_err = -ENXIO;
+ else if (stat & (MXS_I2C_CTRL1_EARLY_TERM_IRQ |
+ MXS_I2C_CTRL1_MASTER_LOSS_IRQ |
+ MXS_I2C_CTRL1_SLAVE_STOP_IRQ | MXS_I2C_CTRL1_SLAVE_IRQ))
+ /* MXS_I2C_CTRL1_OVERSIZE_XFER_TERM_IRQ is only for slaves */
+ i2c->cmd_err = -EIO;
+ else
+ i2c->cmd_err = 0;
+
+ complete(&i2c->cmd_complete);
+
+ writel(stat, i2c->regs + MXS_I2C_CTRL1_CLR);
+ return IRQ_HANDLED;
+}
+
+static const struct i2c_algorithm mxs_i2c_algo = {
+ .master_xfer = mxs_i2c_xfer,
+ .functionality = mxs_i2c_func,
+};
+
+static int __devinit mxs_i2c_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mxs_i2c_dev *i2c;
+ struct i2c_adapter *adap;
+ struct resource *res;
+ resource_size_t res_size;
+ int err, irq;
+
+ i2c = devm_kzalloc(dev, sizeof(struct mxs_i2c_dev), GFP_KERNEL);
+ if (!i2c)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOENT;
+
+ res_size = resource_size(res);
+ if (!devm_request_mem_region(dev, res->start, res_size, res->name))
+ return -EBUSY;
+
+ i2c->regs = devm_ioremap_nocache(dev, res->start, res_size);
+ if (!i2c->regs)
+ return -EBUSY;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(dev, irq, mxs_i2c_isr, 0, dev_name(dev), i2c);
+ if (err)
+ return err;
+
+ i2c->dev = dev;
+ platform_set_drvdata(pdev, i2c);
+
+ /* Do reset to enforce correct startup after pinmuxing */
+ mxs_i2c_reset(i2c);
+ writel(MXS_I2C_QUEUECTRL_PIO_QUEUE_MODE,
+ i2c->regs + MXS_I2C_QUEUECTRL_SET);
+
+ adap = &i2c->adapter;
+ strlcpy(adap->name, "MXS I2C adapter", sizeof(adap->name));
+ adap->owner = THIS_MODULE;
+ adap->algo = &mxs_i2c_algo;
+ adap->dev.parent = dev;
+ adap->nr = pdev->id;
+ i2c_set_adapdata(adap, i2c);
+ err = i2c_add_numbered_adapter(adap);
+ if (err) {
+ dev_err(dev, "Failed to add adapter (%d)\n", err);
+ writel(MXS_I2C_CTRL0_SFTRST,
+ i2c->regs + MXS_I2C_CTRL0_SET);
+ return err;
+ }
+
+ return 0;
+}
+
+static int __devexit mxs_i2c_remove(struct platform_device *pdev)
+{
+ struct mxs_i2c_dev *i2c = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = i2c_del_adapter(&i2c->adapter);
+ if (ret)
+ return -EBUSY;
+
+ writel(MXS_I2C_QUEUECTRL_QUEUE_RUN,
+ i2c->regs + MXS_I2C_QUEUECTRL_CLR);
+ writel(MXS_I2C_CTRL0_SFTRST, i2c->regs + MXS_I2C_CTRL0_SET);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver mxs_i2c_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .remove = __devexit_p(mxs_i2c_remove),
+};
+
+static int __init mxs_i2c_init(void)
+{
+ return platform_driver_probe(&mxs_i2c_driver, mxs_i2c_probe);
+}
+subsys_initcall(mxs_i2c_init);
+
+static void __exit mxs_i2c_exit(void)
+{
+ platform_driver_unregister(&mxs_i2c_driver);
+}
+module_exit(mxs_i2c_exit);
+
+MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
+MODULE_DESCRIPTION("MXS I2C Bus Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index ef3bcb1ce864..beaf68f3199f 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -49,6 +49,7 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/wait.h>
@@ -305,7 +306,7 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
return -EIO;
}
- pdata = pdev->dev.platform_data;
+ pdata = mfd_get_data(pdev);
if (pdata) {
i2c->regstep = pdata->regstep;
i2c->clock_khz = pdata->clock_khz;
@@ -330,9 +331,7 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
i2c->adap = ocores_adapter;
i2c_set_adapdata(&i2c->adap, i2c);
i2c->adap.dev.parent = &pdev->dev;
-#ifdef CONFIG_OF
i2c->adap.dev.of_node = pdev->dev.of_node;
-#endif
/* add i2c adapter to i2c tree */
ret = i2c_add_adapter(&i2c->adap);
@@ -390,15 +389,11 @@ static int ocores_i2c_resume(struct platform_device *pdev)
#define ocores_i2c_resume NULL
#endif
-#ifdef CONFIG_OF
static struct of_device_id ocores_i2c_match[] = {
- {
- .compatible = "opencores,i2c-ocores",
- },
- {},
+ { .compatible = "opencores,i2c-ocores", },
+ {},
};
MODULE_DEVICE_TABLE(of, ocores_i2c_match);
-#endif
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:ocores-i2c");
@@ -411,9 +406,7 @@ static struct platform_driver ocores_i2c_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "ocores-i2c",
-#ifdef CONFIG_OF
- .of_match_table = ocores_i2c_match,
-#endif
+ .of_match_table = ocores_i2c_match,
},
};
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index b605ff3a1fa0..829a2a1029f7 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -847,11 +847,15 @@ complete:
dev_err(dev->dev, "Arbitration lost\n");
err |= OMAP_I2C_STAT_AL;
}
+ /*
+ * ProDB0017052: Clear ARDY bit twice
+ */
if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK |
OMAP_I2C_STAT_AL)) {
omap_i2c_ack_stat(dev, stat &
(OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR |
- OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
+ OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR |
+ OMAP_I2C_STAT_ARDY));
omap_i2c_complete_cmd(dev, err);
return IRQ_HANDLED;
}
@@ -1137,12 +1141,41 @@ omap_i2c_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_SUSPEND
+static int omap_i2c_suspend(struct device *dev)
+{
+ if (!pm_runtime_suspended(dev))
+ if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend)
+ dev->bus->pm->runtime_suspend(dev);
+
+ return 0;
+}
+
+static int omap_i2c_resume(struct device *dev)
+{
+ if (!pm_runtime_suspended(dev))
+ if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume)
+ dev->bus->pm->runtime_resume(dev);
+
+ return 0;
+}
+
+static struct dev_pm_ops omap_i2c_pm_ops = {
+ .suspend = omap_i2c_suspend,
+ .resume = omap_i2c_resume,
+};
+#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops)
+#else
+#define OMAP_I2C_PM_OPS NULL
+#endif
+
static struct platform_driver omap_i2c_driver = {
.probe = omap_i2c_probe,
.remove = omap_i2c_remove,
.driver = {
.name = "omap_i2c",
.owner = THIS_MODULE,
+ .pm = OMAP_I2C_PM_OPS,
},
};
diff --git a/drivers/i2c/busses/i2c-puv3.c b/drivers/i2c/busses/i2c-puv3.c
new file mode 100644
index 000000000000..fac673940849
--- /dev/null
+++ b/drivers/i2c/busses/i2c-puv3.c
@@ -0,0 +1,306 @@
+/*
+ * I2C driver for PKUnity-v3 SoC
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <mach/hardware.h>
+
+/*
+ * Poll the i2c status register until the specified bit is set.
+ * Returns 0 if timed out (100 msec).
+ */
+static short poll_status(unsigned long bit)
+{
+ int loop_cntr = 1000;
+
+ if (bit & I2C_STATUS_TFNF) {
+ do {
+ udelay(10);
+ } while (!(readl(I2C_STATUS) & bit) && (--loop_cntr > 0));
+ } else {
+ /* RXRDY handler */
+ do {
+ if (readl(I2C_TAR) == I2C_TAR_EEPROM)
+ msleep(20);
+ else
+ udelay(10);
+ } while (!(readl(I2C_RXFLR) & 0xf) && (--loop_cntr > 0));
+ }
+
+ return (loop_cntr > 0);
+}
+
+static int xfer_read(struct i2c_adapter *adap, unsigned char *buf, int length)
+{
+ int i2c_reg = *buf;
+
+ /* Read data */
+ while (length--) {
+ if (!poll_status(I2C_STATUS_TFNF)) {
+ dev_dbg(&adap->dev, "Tx FIFO Not Full timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ /* send addr */
+ writel(i2c_reg | I2C_DATACMD_WRITE, I2C_DATACMD);
+
+ /* get ready to next write */
+ i2c_reg++;
+
+ /* send read CMD */
+ writel(I2C_DATACMD_READ, I2C_DATACMD);
+
+ /* wait until the Rx FIFO have available */
+ if (!poll_status(I2C_STATUS_RFNE)) {
+ dev_dbg(&adap->dev, "RXRDY timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ /* read the data to buf */
+ *buf = (readl(I2C_DATACMD) & I2C_DATACMD_DAT_MASK);
+ buf++;
+ }
+
+ return 0;
+}
+
+static int xfer_write(struct i2c_adapter *adap, unsigned char *buf, int length)
+{
+ int i2c_reg = *buf;
+
+ /* Do nothing but storing the reg_num to a static variable */
+ if (i2c_reg == -1) {
+ printk(KERN_WARNING "Error i2c reg\n");
+ return -ETIMEDOUT;
+ }
+
+ if (length == 1)
+ return 0;
+
+ buf++;
+ length--;
+ while (length--) {
+ /* send addr */
+ writel(i2c_reg | I2C_DATACMD_WRITE, I2C_DATACMD);
+
+ /* send write CMD */
+ writel(*buf | I2C_DATACMD_WRITE, I2C_DATACMD);
+
+ /* wait until the Rx FIFO have available */
+ msleep(20);
+
+ /* read the data to buf */
+ i2c_reg++;
+ buf++;
+ }
+
+ return 0;
+}
+
+/*
+ * Generic i2c master transfer entrypoint.
+ *
+ */
+static int puv3_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *pmsg,
+ int num)
+{
+ int i, ret;
+ unsigned char swap;
+
+ /* Disable i2c */
+ writel(I2C_ENABLE_DISABLE, I2C_ENABLE);
+
+ /* Set the work mode and speed*/
+ writel(I2C_CON_MASTER | I2C_CON_SPEED_STD | I2C_CON_SLAVEDISABLE, I2C_CON);
+
+ writel(pmsg->addr, I2C_TAR);
+
+ /* Enable i2c */
+ writel(I2C_ENABLE_ENABLE, I2C_ENABLE);
+
+ dev_dbg(&adap->dev, "puv3_i2c_xfer: processing %d messages:\n", num);
+
+ for (i = 0; i < num; i++) {
+ dev_dbg(&adap->dev, " #%d: %sing %d byte%s %s 0x%02x\n", i,
+ pmsg->flags & I2C_M_RD ? "read" : "writ",
+ pmsg->len, pmsg->len > 1 ? "s" : "",
+ pmsg->flags & I2C_M_RD ? "from" : "to", pmsg->addr);
+
+ if (pmsg->len && pmsg->buf) { /* sanity check */
+ if (pmsg->flags & I2C_M_RD)
+ ret = xfer_read(adap, pmsg->buf, pmsg->len);
+ else
+ ret = xfer_write(adap, pmsg->buf, pmsg->len);
+
+ if (ret)
+ return ret;
+
+ }
+ dev_dbg(&adap->dev, "transfer complete\n");
+ pmsg++; /* next message */
+ }
+
+ /* XXX: fixup be16_to_cpu in bq27x00_battery.c */
+ if (pmsg->addr == I2C_TAR_PWIC) {
+ swap = pmsg->buf[0];
+ pmsg->buf[0] = pmsg->buf[1];
+ pmsg->buf[1] = swap;
+ }
+
+ return i;
+}
+
+/*
+ * Return list of supported functionality.
+ */
+static u32 puv3_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static struct i2c_algorithm puv3_i2c_algorithm = {
+ .master_xfer = puv3_i2c_xfer,
+ .functionality = puv3_i2c_func,
+};
+
+/*
+ * Main initialization routine.
+ */
+static int __devinit puv3_i2c_probe(struct platform_device *pdev)
+{
+ struct i2c_adapter *adapter;
+ struct resource *mem;
+ int rc;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem)
+ return -ENODEV;
+
+ if (!request_mem_region(mem->start, resource_size(mem), "puv3_i2c"))
+ return -EBUSY;
+
+ adapter = kzalloc(sizeof(struct i2c_adapter), GFP_KERNEL);
+ if (adapter == NULL) {
+ dev_err(&pdev->dev, "can't allocate inteface!\n");
+ rc = -ENOMEM;
+ goto fail_nomem;
+ }
+ snprintf(adapter->name, sizeof(adapter->name), "PUV3-I2C at 0x%08x",
+ mem->start);
+ adapter->algo = &puv3_i2c_algorithm;
+ adapter->class = I2C_CLASS_HWMON;
+ adapter->dev.parent = &pdev->dev;
+
+ platform_set_drvdata(pdev, adapter);
+
+ adapter->nr = pdev->id;
+ rc = i2c_add_numbered_adapter(adapter);
+ if (rc) {
+ dev_err(&pdev->dev, "Adapter '%s' registration failed\n",
+ adapter->name);
+ goto fail_add_adapter;
+ }
+
+ dev_info(&pdev->dev, "PKUnity v3 i2c bus adapter.\n");
+ return 0;
+
+fail_add_adapter:
+ platform_set_drvdata(pdev, NULL);
+ kfree(adapter);
+fail_nomem:
+ release_mem_region(mem->start, resource_size(mem));
+
+ return rc;
+}
+
+static int __devexit puv3_i2c_remove(struct platform_device *pdev)
+{
+ struct i2c_adapter *adapter = platform_get_drvdata(pdev);
+ struct resource *mem;
+ int rc;
+
+ rc = i2c_del_adapter(adapter);
+ if (rc) {
+ dev_err(&pdev->dev, "Adapter '%s' delete fail\n",
+ adapter->name);
+ return rc;
+ }
+
+ put_device(&pdev->dev);
+ platform_set_drvdata(pdev, NULL);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mem->start, resource_size(mem));
+
+ return rc;
+}
+
+#ifdef CONFIG_PM
+static int puv3_i2c_suspend(struct platform_device *dev, pm_message_t state)
+{
+ int poll_count;
+ /* Disable the IIC */
+ writel(I2C_ENABLE_DISABLE, I2C_ENABLE);
+ for (poll_count = 0; poll_count < 50; poll_count++) {
+ if (readl(I2C_ENSTATUS) & I2C_ENSTATUS_ENABLE)
+ udelay(25);
+ }
+
+ return 0;
+}
+
+static int puv3_i2c_resume(struct platform_device *dev)
+{
+ return 0 ;
+}
+#else
+#define puv3_i2c_suspend NULL
+#define puv3_i2c_resume NULL
+#endif
+
+MODULE_ALIAS("platform:puv3_i2c");
+
+static struct platform_driver puv3_i2c_driver = {
+ .probe = puv3_i2c_probe,
+ .remove = __devexit_p(puv3_i2c_remove),
+ .suspend = puv3_i2c_suspend,
+ .resume = puv3_i2c_resume,
+ .driver = {
+ .name = "PKUnity-v3-I2C",
+ .owner = THIS_MODULE,
+ }
+};
+
+static int __init puv3_i2c_init(void)
+{
+ return platform_driver_register(&puv3_i2c_driver);
+}
+
+static void __exit puv3_i2c_exit(void)
+{
+ platform_driver_unregister(&puv3_i2c_driver);
+}
+
+module_init(puv3_i2c_init);
+module_exit(puv3_i2c_exit);
+
+MODULE_DESCRIPTION("PKUnity v3 I2C driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c
new file mode 100644
index 000000000000..6659d269b841
--- /dev/null
+++ b/drivers/i2c/busses/i2c-pxa-pci.c
@@ -0,0 +1,176 @@
+/*
+ * The CE4100's I2C device is more or less the same one as found on PXA.
+ * It does not support slave mode, the register slightly moved. This PCI
+ * device provides three bars, every contains a single I2C controller.
+ */
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/i2c/pxa-i2c.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+
+#define CE4100_PCI_I2C_DEVS 3
+
+struct ce4100_devices {
+ struct platform_device *pdev[CE4100_PCI_I2C_DEVS];
+};
+
+static struct platform_device *add_i2c_device(struct pci_dev *dev, int bar)
+{
+ struct platform_device *pdev;
+ struct i2c_pxa_platform_data pdata;
+ struct resource res[2];
+ struct device_node *child;
+ static int devnum;
+ int ret;
+
+ memset(&pdata, 0, sizeof(struct i2c_pxa_platform_data));
+ memset(&res, 0, sizeof(res));
+
+ res[0].flags = IORESOURCE_MEM;
+ res[0].start = pci_resource_start(dev, bar);
+ res[0].end = pci_resource_end(dev, bar);
+
+ res[1].flags = IORESOURCE_IRQ;
+ res[1].start = dev->irq;
+ res[1].end = dev->irq;
+
+ for_each_child_of_node(dev->dev.of_node, child) {
+ const void *prop;
+ struct resource r;
+ int ret;
+
+ ret = of_address_to_resource(child, 0, &r);
+ if (ret < 0)
+ continue;
+ if (r.start != res[0].start)
+ continue;
+ if (r.end != res[0].end)
+ continue;
+ if (r.flags != res[0].flags)
+ continue;
+
+ prop = of_get_property(child, "fast-mode", NULL);
+ if (prop)
+ pdata.fast_mode = 1;
+
+ break;
+ }
+
+ if (!child) {
+ dev_err(&dev->dev, "failed to match a DT node for bar %d.\n",
+ bar);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ pdev = platform_device_alloc("ce4100-i2c", devnum);
+ if (!pdev) {
+ of_node_put(child);
+ ret = -ENOMEM;
+ goto out;
+ }
+ pdev->dev.parent = &dev->dev;
+ pdev->dev.of_node = child;
+
+ ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
+ if (ret)
+ goto err;
+
+ ret = platform_device_add_data(pdev, &pdata, sizeof(pdata));
+ if (ret)
+ goto err;
+
+ ret = platform_device_add(pdev);
+ if (ret)
+ goto err;
+ devnum++;
+ return pdev;
+err:
+ platform_device_put(pdev);
+out:
+ return ERR_PTR(ret);
+}
+
+static int __devinit ce4100_i2c_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ int ret;
+ int i;
+ struct ce4100_devices *sds;
+
+ ret = pci_enable_device_mem(dev);
+ if (ret)
+ return ret;
+
+ if (!dev->dev.of_node) {
+ dev_err(&dev->dev, "Missing device tree node.\n");
+ return -EINVAL;
+ }
+ sds = kzalloc(sizeof(*sds), GFP_KERNEL);
+ if (!sds)
+ goto err_mem;
+
+ for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) {
+ sds->pdev[i] = add_i2c_device(dev, i);
+ if (IS_ERR(sds->pdev[i])) {
+ while (--i >= 0)
+ platform_device_unregister(sds->pdev[i]);
+ goto err_dev_add;
+ }
+ }
+ pci_set_drvdata(dev, sds);
+ return 0;
+
+err_dev_add:
+ pci_set_drvdata(dev, NULL);
+ kfree(sds);
+err_mem:
+ pci_disable_device(dev);
+ return ret;
+}
+
+static void __devexit ce4100_i2c_remove(struct pci_dev *dev)
+{
+ struct ce4100_devices *sds;
+ unsigned int i;
+
+ sds = pci_get_drvdata(dev);
+ pci_set_drvdata(dev, NULL);
+
+ for (i = 0; i < ARRAY_SIZE(sds->pdev); i++)
+ platform_device_unregister(sds->pdev[i]);
+
+ pci_disable_device(dev);
+ kfree(sds);
+}
+
+static struct pci_device_id ce4100_i2c_devices[] __devinitdata = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e68)},
+ { },
+};
+MODULE_DEVICE_TABLE(pci, ce4100_i2c_devices);
+
+static struct pci_driver ce4100_i2c_driver = {
+ .name = "ce4100_i2c",
+ .id_table = ce4100_i2c_devices,
+ .probe = ce4100_i2c_probe,
+ .remove = __devexit_p(ce4100_i2c_remove),
+};
+
+static int __init ce4100_i2c_init(void)
+{
+ return pci_register_driver(&ce4100_i2c_driver);
+}
+module_init(ce4100_i2c_init);
+
+static void __exit ce4100_i2c_exit(void)
+{
+ pci_unregister_driver(&ce4100_i2c_driver);
+}
+module_exit(ce4100_i2c_exit);
+
+MODULE_DESCRIPTION("CE4100 PCI-I2C glue code for PXA's driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index f4c19a97e0b3..a90739be1369 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -29,38 +29,75 @@
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/i2c-pxa.h>
+#include <linux/of_i2c.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/i2c/pxa-i2c.h>
#include <asm/irq.h>
-#include <plat/i2c.h>
+
+#ifdef CONFIG_X86
+#define clk_get(dev, id) NULL
+#define clk_put(clk) do { } while (0)
+#define clk_disable(clk) do { } while (0)
+#define clk_enable(clk) do { } while (0)
+#endif
+
+struct pxa_reg_layout {
+ u32 ibmr;
+ u32 idbr;
+ u32 icr;
+ u32 isr;
+ u32 isar;
+};
+
+enum pxa_i2c_types {
+ REGS_PXA2XX,
+ REGS_PXA3XX,
+ REGS_CE4100,
+};
/*
- * I2C register offsets will be shifted 0 or 1 bit left, depending on
- * different SoCs
+ * I2C registers definitions
*/
-#define REG_SHIFT_0 (0 << 0)
-#define REG_SHIFT_1 (1 << 0)
-#define REG_SHIFT(d) ((d) & 0x1)
+static struct pxa_reg_layout pxa_reg_layout[] = {
+ [REGS_PXA2XX] = {
+ .ibmr = 0x00,
+ .idbr = 0x10,
+ .icr = 0x20,
+ .isr = 0x30,
+ .isar = 0x40,
+ },
+ [REGS_PXA3XX] = {
+ .ibmr = 0x00,
+ .idbr = 0x08,
+ .icr = 0x10,
+ .isr = 0x18,
+ .isar = 0x20,
+ },
+ [REGS_CE4100] = {
+ .ibmr = 0x14,
+ .idbr = 0x0c,
+ .icr = 0x00,
+ .isr = 0x04,
+ /* no isar register */
+ },
+};
static const struct platform_device_id i2c_pxa_id_table[] = {
- { "pxa2xx-i2c", REG_SHIFT_1 },
- { "pxa3xx-pwri2c", REG_SHIFT_0 },
+ { "pxa2xx-i2c", REGS_PXA2XX },
+ { "pxa3xx-pwri2c", REGS_PXA3XX },
+ { "ce4100-i2c", REGS_CE4100 },
{ },
};
MODULE_DEVICE_TABLE(platform, i2c_pxa_id_table);
/*
- * I2C registers and bit definitions
+ * I2C bit definitions
*/
-#define IBMR (0x00)
-#define IDBR (0x08)
-#define ICR (0x10)
-#define ISR (0x18)
-#define ISAR (0x20)
#define ICR_START (1 << 0) /* start bit */
#define ICR_STOP (1 << 1) /* stop bit */
@@ -111,7 +148,11 @@ struct pxa_i2c {
u32 icrlog[32];
void __iomem *reg_base;
- unsigned int reg_shift;
+ void __iomem *reg_ibmr;
+ void __iomem *reg_idbr;
+ void __iomem *reg_icr;
+ void __iomem *reg_isr;
+ void __iomem *reg_isar;
unsigned long iobase;
unsigned long iosize;
@@ -121,11 +162,11 @@ struct pxa_i2c {
unsigned int fast_mode :1;
};
-#define _IBMR(i2c) ((i2c)->reg_base + (0x0 << (i2c)->reg_shift))
-#define _IDBR(i2c) ((i2c)->reg_base + (0x4 << (i2c)->reg_shift))
-#define _ICR(i2c) ((i2c)->reg_base + (0x8 << (i2c)->reg_shift))
-#define _ISR(i2c) ((i2c)->reg_base + (0xc << (i2c)->reg_shift))
-#define _ISAR(i2c) ((i2c)->reg_base + (0x10 << (i2c)->reg_shift))
+#define _IBMR(i2c) ((i2c)->reg_ibmr)
+#define _IDBR(i2c) ((i2c)->reg_idbr)
+#define _ICR(i2c) ((i2c)->reg_icr)
+#define _ISR(i2c) ((i2c)->reg_isr)
+#define _ISAR(i2c) ((i2c)->reg_isar)
/*
* I2C Slave mode address
@@ -418,7 +459,8 @@ static void i2c_pxa_reset(struct pxa_i2c *i2c)
writel(I2C_ISR_INIT, _ISR(i2c));
writel(readl(_ICR(i2c)) & ~ICR_UR, _ICR(i2c));
- writel(i2c->slave_addr, _ISAR(i2c));
+ if (i2c->reg_isar)
+ writel(i2c->slave_addr, _ISAR(i2c));
/* set control register values */
writel(I2C_ICR_INIT | (i2c->fast_mode ? ICR_FM : 0), _ICR(i2c));
@@ -729,8 +771,10 @@ static int i2c_pxa_do_xfer(struct pxa_i2c *i2c, struct i2c_msg *msg, int num)
*/
ret = i2c->msg_idx;
- if (timeout == 0)
+ if (!timeout && i2c->msg_num) {
i2c_pxa_scream_blue_murder(i2c, "timeout");
+ ret = I2C_RETRY;
+ }
out:
return ret;
@@ -915,11 +959,17 @@ static void i2c_pxa_irq_rxfull(struct pxa_i2c *i2c, u32 isr)
writel(icr, _ICR(i2c));
}
+#define VALID_INT_SOURCE (ISR_SSD | ISR_ALD | ISR_ITE | ISR_IRF | \
+ ISR_SAD | ISR_BED)
static irqreturn_t i2c_pxa_handler(int this_irq, void *dev_id)
{
struct pxa_i2c *i2c = dev_id;
u32 isr = readl(_ISR(i2c));
+ isr &= VALID_INT_SOURCE;
+ if (!isr)
+ return IRQ_NONE;
+
if (i2c_debug > 2 && 0) {
dev_dbg(&i2c->adap.dev, "%s: ISR=%08x, ICR=%08x, IBMR=%02x\n",
__func__, isr, readl(_ICR(i2c)), readl(_IBMR(i2c)));
@@ -934,7 +984,7 @@ static irqreturn_t i2c_pxa_handler(int this_irq, void *dev_id)
/*
* Always clear all pending IRQs.
*/
- writel(isr & (ISR_SSD|ISR_ALD|ISR_ITE|ISR_IRF|ISR_SAD|ISR_BED), _ISR(i2c));
+ writel(isr, _ISR(i2c));
if (isr & ISR_SAD)
i2c_pxa_slave_start(i2c, isr);
@@ -1001,6 +1051,7 @@ static int i2c_pxa_probe(struct platform_device *dev)
struct resource *res;
struct i2c_pxa_platform_data *plat = dev->dev.platform_data;
const struct platform_device_id *id = platform_get_device_id(dev);
+ enum pxa_i2c_types i2c_type = id->driver_data;
int ret;
int irq;
@@ -1044,7 +1095,13 @@ static int i2c_pxa_probe(struct platform_device *dev)
ret = -EIO;
goto eremap;
}
- i2c->reg_shift = REG_SHIFT(id->driver_data);
+
+ i2c->reg_ibmr = i2c->reg_base + pxa_reg_layout[i2c_type].ibmr;
+ i2c->reg_idbr = i2c->reg_base + pxa_reg_layout[i2c_type].idbr;
+ i2c->reg_icr = i2c->reg_base + pxa_reg_layout[i2c_type].icr;
+ i2c->reg_isr = i2c->reg_base + pxa_reg_layout[i2c_type].isr;
+ if (i2c_type != REGS_CE4100)
+ i2c->reg_isar = i2c->reg_base + pxa_reg_layout[i2c_type].isar;
i2c->iobase = res->start;
i2c->iosize = resource_size(res);
@@ -1072,7 +1129,7 @@ static int i2c_pxa_probe(struct platform_device *dev)
i2c->adap.algo = &i2c_pxa_pio_algorithm;
} else {
i2c->adap.algo = &i2c_pxa_algorithm;
- ret = request_irq(irq, i2c_pxa_handler, IRQF_DISABLED,
+ ret = request_irq(irq, i2c_pxa_handler, IRQF_SHARED,
i2c->adap.name, i2c);
if (ret)
goto ereqirq;
@@ -1082,12 +1139,19 @@ static int i2c_pxa_probe(struct platform_device *dev)
i2c->adap.algo_data = i2c;
i2c->adap.dev.parent = &dev->dev;
+#ifdef CONFIG_OF
+ i2c->adap.dev.of_node = dev->dev.of_node;
+#endif
- ret = i2c_add_numbered_adapter(&i2c->adap);
+ if (i2c_type == REGS_CE4100)
+ ret = i2c_add_adapter(&i2c->adap);
+ else
+ ret = i2c_add_numbered_adapter(&i2c->adap);
if (ret < 0) {
printk(KERN_INFO "I2C: Failed to add bus\n");
goto eadapt;
}
+ of_i2c_register_devices(&i2c->adap);
platform_set_drvdata(dev, i2c);
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index 495be451d326..266135ddf7fa 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -942,7 +942,7 @@ stu300_probe(struct platform_device *pdev)
adap->owner = THIS_MODULE;
/* DDC class but actually often used for more generic I2C */
adap->class = I2C_CLASS_DDC;
- strncpy(adap->name, "ST Microelectronics DDC I2C adapter",
+ strlcpy(adap->name, "ST Microelectronics DDC I2C adapter",
sizeof(adap->name));
adap->nr = bus_nr;
adap->algo = &stu300_algo;
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
new file mode 100644
index 000000000000..3921f664c9c3
--- /dev/null
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -0,0 +1,700 @@
+/*
+ * drivers/i2c/busses/i2c-tegra.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/i2c-tegra.h>
+
+#include <asm/unaligned.h>
+
+#include <mach/clk.h>
+
+#define TEGRA_I2C_TIMEOUT (msecs_to_jiffies(1000))
+#define BYTES_PER_FIFO_WORD 4
+
+#define I2C_CNFG 0x000
+#define I2C_CNFG_PACKET_MODE_EN (1<<10)
+#define I2C_CNFG_NEW_MASTER_FSM (1<<11)
+#define I2C_SL_CNFG 0x020
+#define I2C_SL_CNFG_NEWSL (1<<2)
+#define I2C_SL_ADDR1 0x02c
+#define I2C_TX_FIFO 0x050
+#define I2C_RX_FIFO 0x054
+#define I2C_PACKET_TRANSFER_STATUS 0x058
+#define I2C_FIFO_CONTROL 0x05c
+#define I2C_FIFO_CONTROL_TX_FLUSH (1<<1)
+#define I2C_FIFO_CONTROL_RX_FLUSH (1<<0)
+#define I2C_FIFO_CONTROL_TX_TRIG_SHIFT 5
+#define I2C_FIFO_CONTROL_RX_TRIG_SHIFT 2
+#define I2C_FIFO_STATUS 0x060
+#define I2C_FIFO_STATUS_TX_MASK 0xF0
+#define I2C_FIFO_STATUS_TX_SHIFT 4
+#define I2C_FIFO_STATUS_RX_MASK 0x0F
+#define I2C_FIFO_STATUS_RX_SHIFT 0
+#define I2C_INT_MASK 0x064
+#define I2C_INT_STATUS 0x068
+#define I2C_INT_PACKET_XFER_COMPLETE (1<<7)
+#define I2C_INT_ALL_PACKETS_XFER_COMPLETE (1<<6)
+#define I2C_INT_TX_FIFO_OVERFLOW (1<<5)
+#define I2C_INT_RX_FIFO_UNDERFLOW (1<<4)
+#define I2C_INT_NO_ACK (1<<3)
+#define I2C_INT_ARBITRATION_LOST (1<<2)
+#define I2C_INT_TX_FIFO_DATA_REQ (1<<1)
+#define I2C_INT_RX_FIFO_DATA_REQ (1<<0)
+#define I2C_CLK_DIVISOR 0x06c
+
+#define DVC_CTRL_REG1 0x000
+#define DVC_CTRL_REG1_INTR_EN (1<<10)
+#define DVC_CTRL_REG2 0x004
+#define DVC_CTRL_REG3 0x008
+#define DVC_CTRL_REG3_SW_PROG (1<<26)
+#define DVC_CTRL_REG3_I2C_DONE_INTR_EN (1<<30)
+#define DVC_STATUS 0x00c
+#define DVC_STATUS_I2C_DONE_INTR (1<<30)
+
+#define I2C_ERR_NONE 0x00
+#define I2C_ERR_NO_ACK 0x01
+#define I2C_ERR_ARBITRATION_LOST 0x02
+
+#define PACKET_HEADER0_HEADER_SIZE_SHIFT 28
+#define PACKET_HEADER0_PACKET_ID_SHIFT 16
+#define PACKET_HEADER0_CONT_ID_SHIFT 12
+#define PACKET_HEADER0_PROTOCOL_I2C (1<<4)
+
+#define I2C_HEADER_HIGHSPEED_MODE (1<<22)
+#define I2C_HEADER_CONT_ON_NAK (1<<21)
+#define I2C_HEADER_SEND_START_BYTE (1<<20)
+#define I2C_HEADER_READ (1<<19)
+#define I2C_HEADER_10BIT_ADDR (1<<18)
+#define I2C_HEADER_IE_ENABLE (1<<17)
+#define I2C_HEADER_REPEAT_START (1<<16)
+#define I2C_HEADER_MASTER_ADDR_SHIFT 12
+#define I2C_HEADER_SLAVE_ADDR_SHIFT 1
+
+/**
+ * struct tegra_i2c_dev - per device i2c context
+ * @dev: device reference for power management
+ * @adapter: core i2c layer adapter information
+ * @clk: clock reference for i2c controller
+ * @i2c_clk: clock reference for i2c bus
+ * @iomem: memory resource for registers
+ * @base: ioremapped registers cookie
+ * @cont_id: i2c controller id, used for for packet header
+ * @irq: irq number of transfer complete interrupt
+ * @is_dvc: identifies the DVC i2c controller, has a different register layout
+ * @msg_complete: transfer completion notifier
+ * @msg_err: error code for completed message
+ * @msg_buf: pointer to current message data
+ * @msg_buf_remaining: size of unsent data in the message buffer
+ * @msg_read: identifies read transfers
+ * @bus_clk_rate: current i2c bus clock rate
+ * @is_suspended: prevents i2c controller accesses after suspend is called
+ */
+struct tegra_i2c_dev {
+ struct device *dev;
+ struct i2c_adapter adapter;
+ struct clk *clk;
+ struct clk *i2c_clk;
+ struct resource *iomem;
+ void __iomem *base;
+ int cont_id;
+ int irq;
+ int is_dvc;
+ struct completion msg_complete;
+ int msg_err;
+ u8 *msg_buf;
+ size_t msg_buf_remaining;
+ int msg_read;
+ unsigned long bus_clk_rate;
+ bool is_suspended;
+};
+
+static void dvc_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned long reg)
+{
+ writel(val, i2c_dev->base + reg);
+}
+
+static u32 dvc_readl(struct tegra_i2c_dev *i2c_dev, unsigned long reg)
+{
+ return readl(i2c_dev->base + reg);
+}
+
+/*
+ * i2c_writel and i2c_readl will offset the register if necessary to talk
+ * to the I2C block inside the DVC block
+ */
+static unsigned long tegra_i2c_reg_addr(struct tegra_i2c_dev *i2c_dev,
+ unsigned long reg)
+{
+ if (i2c_dev->is_dvc)
+ reg += (reg >= I2C_TX_FIFO) ? 0x10 : 0x40;
+ return reg;
+}
+
+static void i2c_writel(struct tegra_i2c_dev *i2c_dev, u32 val,
+ unsigned long reg)
+{
+ writel(val, i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
+}
+
+static u32 i2c_readl(struct tegra_i2c_dev *i2c_dev, unsigned long reg)
+{
+ return readl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
+}
+
+static void i2c_writesl(struct tegra_i2c_dev *i2c_dev, void *data,
+ unsigned long reg, int len)
+{
+ writesl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg), data, len);
+}
+
+static void i2c_readsl(struct tegra_i2c_dev *i2c_dev, void *data,
+ unsigned long reg, int len)
+{
+ readsl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg), data, len);
+}
+
+static void tegra_i2c_mask_irq(struct tegra_i2c_dev *i2c_dev, u32 mask)
+{
+ u32 int_mask = i2c_readl(i2c_dev, I2C_INT_MASK);
+ int_mask &= ~mask;
+ i2c_writel(i2c_dev, int_mask, I2C_INT_MASK);
+}
+
+static void tegra_i2c_unmask_irq(struct tegra_i2c_dev *i2c_dev, u32 mask)
+{
+ u32 int_mask = i2c_readl(i2c_dev, I2C_INT_MASK);
+ int_mask |= mask;
+ i2c_writel(i2c_dev, int_mask, I2C_INT_MASK);
+}
+
+static int tegra_i2c_flush_fifos(struct tegra_i2c_dev *i2c_dev)
+{
+ unsigned long timeout = jiffies + HZ;
+ u32 val = i2c_readl(i2c_dev, I2C_FIFO_CONTROL);
+ val |= I2C_FIFO_CONTROL_TX_FLUSH | I2C_FIFO_CONTROL_RX_FLUSH;
+ i2c_writel(i2c_dev, val, I2C_FIFO_CONTROL);
+
+ while (i2c_readl(i2c_dev, I2C_FIFO_CONTROL) &
+ (I2C_FIFO_CONTROL_TX_FLUSH | I2C_FIFO_CONTROL_RX_FLUSH)) {
+ if (time_after(jiffies, timeout)) {
+ dev_warn(i2c_dev->dev, "timeout waiting for fifo flush\n");
+ return -ETIMEDOUT;
+ }
+ msleep(1);
+ }
+ return 0;
+}
+
+static int tegra_i2c_empty_rx_fifo(struct tegra_i2c_dev *i2c_dev)
+{
+ u32 val;
+ int rx_fifo_avail;
+ u8 *buf = i2c_dev->msg_buf;
+ size_t buf_remaining = i2c_dev->msg_buf_remaining;
+ int words_to_transfer;
+
+ val = i2c_readl(i2c_dev, I2C_FIFO_STATUS);
+ rx_fifo_avail = (val & I2C_FIFO_STATUS_RX_MASK) >>
+ I2C_FIFO_STATUS_RX_SHIFT;
+
+ /* Rounds down to not include partial word at the end of buf */
+ words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD;
+ if (words_to_transfer > rx_fifo_avail)
+ words_to_transfer = rx_fifo_avail;
+
+ i2c_readsl(i2c_dev, buf, I2C_RX_FIFO, words_to_transfer);
+
+ buf += words_to_transfer * BYTES_PER_FIFO_WORD;
+ buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD;
+ rx_fifo_avail -= words_to_transfer;
+
+ /*
+ * If there is a partial word at the end of buf, handle it manually to
+ * prevent overwriting past the end of buf
+ */
+ if (rx_fifo_avail > 0 && buf_remaining > 0) {
+ BUG_ON(buf_remaining > 3);
+ val = i2c_readl(i2c_dev, I2C_RX_FIFO);
+ memcpy(buf, &val, buf_remaining);
+ buf_remaining = 0;
+ rx_fifo_avail--;
+ }
+
+ BUG_ON(rx_fifo_avail > 0 && buf_remaining > 0);
+ i2c_dev->msg_buf_remaining = buf_remaining;
+ i2c_dev->msg_buf = buf;
+ return 0;
+}
+
+static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
+{
+ u32 val;
+ int tx_fifo_avail;
+ u8 *buf = i2c_dev->msg_buf;
+ size_t buf_remaining = i2c_dev->msg_buf_remaining;
+ int words_to_transfer;
+
+ val = i2c_readl(i2c_dev, I2C_FIFO_STATUS);
+ tx_fifo_avail = (val & I2C_FIFO_STATUS_TX_MASK) >>
+ I2C_FIFO_STATUS_TX_SHIFT;
+
+ /* Rounds down to not include partial word at the end of buf */
+ words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD;
+ if (words_to_transfer > tx_fifo_avail)
+ words_to_transfer = tx_fifo_avail;
+
+ i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
+
+ buf += words_to_transfer * BYTES_PER_FIFO_WORD;
+ buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD;
+ tx_fifo_avail -= words_to_transfer;
+
+ /*
+ * If there is a partial word at the end of buf, handle it manually to
+ * prevent reading past the end of buf, which could cross a page
+ * boundary and fault.
+ */
+ if (tx_fifo_avail > 0 && buf_remaining > 0) {
+ BUG_ON(buf_remaining > 3);
+ memcpy(&val, buf, buf_remaining);
+ i2c_writel(i2c_dev, val, I2C_TX_FIFO);
+ buf_remaining = 0;
+ tx_fifo_avail--;
+ }
+
+ BUG_ON(tx_fifo_avail > 0 && buf_remaining > 0);
+ i2c_dev->msg_buf_remaining = buf_remaining;
+ i2c_dev->msg_buf = buf;
+ return 0;
+}
+
+/*
+ * One of the Tegra I2C blocks is inside the DVC (Digital Voltage Controller)
+ * block. This block is identical to the rest of the I2C blocks, except that
+ * it only supports master mode, it has registers moved around, and it needs
+ * some extra init to get it into I2C mode. The register moves are handled
+ * by i2c_readl and i2c_writel
+ */
+static void tegra_dvc_init(struct tegra_i2c_dev *i2c_dev)
+{
+ u32 val = 0;
+ val = dvc_readl(i2c_dev, DVC_CTRL_REG3);
+ val |= DVC_CTRL_REG3_SW_PROG;
+ val |= DVC_CTRL_REG3_I2C_DONE_INTR_EN;
+ dvc_writel(i2c_dev, val, DVC_CTRL_REG3);
+
+ val = dvc_readl(i2c_dev, DVC_CTRL_REG1);
+ val |= DVC_CTRL_REG1_INTR_EN;
+ dvc_writel(i2c_dev, val, DVC_CTRL_REG1);
+}
+
+static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
+{
+ u32 val;
+ int err = 0;
+
+ clk_enable(i2c_dev->clk);
+
+ tegra_periph_reset_assert(i2c_dev->clk);
+ udelay(2);
+ tegra_periph_reset_deassert(i2c_dev->clk);
+
+ if (i2c_dev->is_dvc)
+ tegra_dvc_init(i2c_dev);
+
+ val = I2C_CNFG_NEW_MASTER_FSM | I2C_CNFG_PACKET_MODE_EN;
+ i2c_writel(i2c_dev, val, I2C_CNFG);
+ i2c_writel(i2c_dev, 0, I2C_INT_MASK);
+ clk_set_rate(i2c_dev->clk, i2c_dev->bus_clk_rate * 8);
+
+ val = 7 << I2C_FIFO_CONTROL_TX_TRIG_SHIFT |
+ 0 << I2C_FIFO_CONTROL_RX_TRIG_SHIFT;
+ i2c_writel(i2c_dev, val, I2C_FIFO_CONTROL);
+
+ if (tegra_i2c_flush_fifos(i2c_dev))
+ err = -ETIMEDOUT;
+
+ clk_disable(i2c_dev->clk);
+ return err;
+}
+
+static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
+{
+ u32 status;
+ const u32 status_err = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST;
+ struct tegra_i2c_dev *i2c_dev = dev_id;
+
+ status = i2c_readl(i2c_dev, I2C_INT_STATUS);
+
+ if (status == 0) {
+ dev_warn(i2c_dev->dev, "interrupt with no status\n");
+ return IRQ_NONE;
+ }
+
+ if (unlikely(status & status_err)) {
+ if (status & I2C_INT_NO_ACK)
+ i2c_dev->msg_err |= I2C_ERR_NO_ACK;
+ if (status & I2C_INT_ARBITRATION_LOST)
+ i2c_dev->msg_err |= I2C_ERR_ARBITRATION_LOST;
+ complete(&i2c_dev->msg_complete);
+ goto err;
+ }
+
+ if (i2c_dev->msg_read && (status & I2C_INT_RX_FIFO_DATA_REQ)) {
+ if (i2c_dev->msg_buf_remaining)
+ tegra_i2c_empty_rx_fifo(i2c_dev);
+ else
+ BUG();
+ }
+
+ if (!i2c_dev->msg_read && (status & I2C_INT_TX_FIFO_DATA_REQ)) {
+ if (i2c_dev->msg_buf_remaining)
+ tegra_i2c_fill_tx_fifo(i2c_dev);
+ else
+ tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ);
+ }
+
+ if ((status & I2C_INT_PACKET_XFER_COMPLETE) &&
+ !i2c_dev->msg_buf_remaining)
+ complete(&i2c_dev->msg_complete);
+
+ i2c_writel(i2c_dev, status, I2C_INT_STATUS);
+ if (i2c_dev->is_dvc)
+ dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
+ return IRQ_HANDLED;
+err:
+ /* An error occured, mask all interrupts */
+ tegra_i2c_mask_irq(i2c_dev, I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST |
+ I2C_INT_PACKET_XFER_COMPLETE | I2C_INT_TX_FIFO_DATA_REQ |
+ I2C_INT_RX_FIFO_DATA_REQ);
+ i2c_writel(i2c_dev, status, I2C_INT_STATUS);
+ return IRQ_HANDLED;
+}
+
+static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
+ struct i2c_msg *msg, int stop)
+{
+ u32 packet_header;
+ u32 int_mask;
+ int ret;
+
+ tegra_i2c_flush_fifos(i2c_dev);
+ i2c_writel(i2c_dev, 0xFF, I2C_INT_STATUS);
+
+ if (msg->len == 0)
+ return -EINVAL;
+
+ i2c_dev->msg_buf = msg->buf;
+ i2c_dev->msg_buf_remaining = msg->len;
+ i2c_dev->msg_err = I2C_ERR_NONE;
+ i2c_dev->msg_read = (msg->flags & I2C_M_RD);
+ INIT_COMPLETION(i2c_dev->msg_complete);
+
+ packet_header = (0 << PACKET_HEADER0_HEADER_SIZE_SHIFT) |
+ PACKET_HEADER0_PROTOCOL_I2C |
+ (i2c_dev->cont_id << PACKET_HEADER0_CONT_ID_SHIFT) |
+ (1 << PACKET_HEADER0_PACKET_ID_SHIFT);
+ i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
+
+ packet_header = msg->len - 1;
+ i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
+
+ packet_header = msg->addr << I2C_HEADER_SLAVE_ADDR_SHIFT;
+ packet_header |= I2C_HEADER_IE_ENABLE;
+ if (msg->flags & I2C_M_TEN)
+ packet_header |= I2C_HEADER_10BIT_ADDR;
+ if (msg->flags & I2C_M_IGNORE_NAK)
+ packet_header |= I2C_HEADER_CONT_ON_NAK;
+ if (msg->flags & I2C_M_NOSTART)
+ packet_header |= I2C_HEADER_REPEAT_START;
+ if (msg->flags & I2C_M_RD)
+ packet_header |= I2C_HEADER_READ;
+ i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
+
+ if (!(msg->flags & I2C_M_RD))
+ tegra_i2c_fill_tx_fifo(i2c_dev);
+
+ int_mask = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST;
+ if (msg->flags & I2C_M_RD)
+ int_mask |= I2C_INT_RX_FIFO_DATA_REQ;
+ else if (i2c_dev->msg_buf_remaining)
+ int_mask |= I2C_INT_TX_FIFO_DATA_REQ;
+ tegra_i2c_unmask_irq(i2c_dev, int_mask);
+ dev_dbg(i2c_dev->dev, "unmasked irq: %02x\n",
+ i2c_readl(i2c_dev, I2C_INT_MASK));
+
+ ret = wait_for_completion_timeout(&i2c_dev->msg_complete, TEGRA_I2C_TIMEOUT);
+ tegra_i2c_mask_irq(i2c_dev, int_mask);
+
+ if (WARN_ON(ret == 0)) {
+ dev_err(i2c_dev->dev, "i2c transfer timed out\n");
+
+ tegra_i2c_init(i2c_dev);
+ return -ETIMEDOUT;
+ }
+
+ dev_dbg(i2c_dev->dev, "transfer complete: %d %d %d\n",
+ ret, completion_done(&i2c_dev->msg_complete), i2c_dev->msg_err);
+
+ if (likely(i2c_dev->msg_err == I2C_ERR_NONE))
+ return 0;
+
+ tegra_i2c_init(i2c_dev);
+ if (i2c_dev->msg_err == I2C_ERR_NO_ACK) {
+ if (msg->flags & I2C_M_IGNORE_NAK)
+ return 0;
+ return -EREMOTEIO;
+ }
+
+ return -EIO;
+}
+
+static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
+ int num)
+{
+ struct tegra_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
+ int i;
+ int ret = 0;
+
+ if (i2c_dev->is_suspended)
+ return -EBUSY;
+
+ clk_enable(i2c_dev->clk);
+ for (i = 0; i < num; i++) {
+ int stop = (i == (num - 1)) ? 1 : 0;
+ ret = tegra_i2c_xfer_msg(i2c_dev, &msgs[i], stop);
+ if (ret)
+ break;
+ }
+ clk_disable(i2c_dev->clk);
+ return ret ?: i;
+}
+
+static u32 tegra_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C;
+}
+
+static const struct i2c_algorithm tegra_i2c_algo = {
+ .master_xfer = tegra_i2c_xfer,
+ .functionality = tegra_i2c_func,
+};
+
+static int tegra_i2c_probe(struct platform_device *pdev)
+{
+ struct tegra_i2c_dev *i2c_dev;
+ struct tegra_i2c_platform_data *pdata = pdev->dev.platform_data;
+ struct resource *res;
+ struct resource *iomem;
+ struct clk *clk;
+ struct clk *i2c_clk;
+ void *base;
+ int irq;
+ int ret = 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no mem resource\n");
+ return -EINVAL;
+ }
+ iomem = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (!iomem) {
+ dev_err(&pdev->dev, "I2C region already claimed\n");
+ return -EBUSY;
+ }
+
+ base = ioremap(iomem->start, resource_size(iomem));
+ if (!base) {
+ dev_err(&pdev->dev, "Cannot ioremap I2C region\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no irq resource\n");
+ ret = -EINVAL;
+ goto err_iounmap;
+ }
+ irq = res->start;
+
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "missing controller clock");
+ ret = PTR_ERR(clk);
+ goto err_release_region;
+ }
+
+ i2c_clk = clk_get(&pdev->dev, "i2c");
+ if (IS_ERR(i2c_clk)) {
+ dev_err(&pdev->dev, "missing bus clock");
+ ret = PTR_ERR(i2c_clk);
+ goto err_clk_put;
+ }
+
+ i2c_dev = kzalloc(sizeof(struct tegra_i2c_dev), GFP_KERNEL);
+ if (!i2c_dev) {
+ ret = -ENOMEM;
+ goto err_i2c_clk_put;
+ }
+
+ i2c_dev->base = base;
+ i2c_dev->clk = clk;
+ i2c_dev->i2c_clk = i2c_clk;
+ i2c_dev->iomem = iomem;
+ i2c_dev->adapter.algo = &tegra_i2c_algo;
+ i2c_dev->irq = irq;
+ i2c_dev->cont_id = pdev->id;
+ i2c_dev->dev = &pdev->dev;
+ i2c_dev->bus_clk_rate = pdata ? pdata->bus_clk_rate : 100000;
+
+ if (pdev->id == 3)
+ i2c_dev->is_dvc = 1;
+ init_completion(&i2c_dev->msg_complete);
+
+ platform_set_drvdata(pdev, i2c_dev);
+
+ ret = tegra_i2c_init(i2c_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize i2c controller");
+ goto err_free;
+ }
+
+ ret = request_irq(i2c_dev->irq, tegra_i2c_isr, 0, pdev->name, i2c_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request irq %i\n", i2c_dev->irq);
+ goto err_free;
+ }
+
+ clk_enable(i2c_dev->i2c_clk);
+
+ i2c_set_adapdata(&i2c_dev->adapter, i2c_dev);
+ i2c_dev->adapter.owner = THIS_MODULE;
+ i2c_dev->adapter.class = I2C_CLASS_HWMON;
+ strlcpy(i2c_dev->adapter.name, "Tegra I2C adapter",
+ sizeof(i2c_dev->adapter.name));
+ i2c_dev->adapter.algo = &tegra_i2c_algo;
+ i2c_dev->adapter.dev.parent = &pdev->dev;
+ i2c_dev->adapter.nr = pdev->id;
+
+ ret = i2c_add_numbered_adapter(&i2c_dev->adapter);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add I2C adapter\n");
+ goto err_free_irq;
+ }
+
+ return 0;
+err_free_irq:
+ free_irq(i2c_dev->irq, i2c_dev);
+err_free:
+ kfree(i2c_dev);
+err_i2c_clk_put:
+ clk_put(i2c_clk);
+err_clk_put:
+ clk_put(clk);
+err_release_region:
+ release_mem_region(iomem->start, resource_size(iomem));
+err_iounmap:
+ iounmap(base);
+ return ret;
+}
+
+static int tegra_i2c_remove(struct platform_device *pdev)
+{
+ struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+ i2c_del_adapter(&i2c_dev->adapter);
+ free_irq(i2c_dev->irq, i2c_dev);
+ clk_put(i2c_dev->i2c_clk);
+ clk_put(i2c_dev->clk);
+ release_mem_region(i2c_dev->iomem->start,
+ resource_size(i2c_dev->iomem));
+ iounmap(i2c_dev->base);
+ kfree(i2c_dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_i2c_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+
+ i2c_lock_adapter(&i2c_dev->adapter);
+ i2c_dev->is_suspended = true;
+ i2c_unlock_adapter(&i2c_dev->adapter);
+
+ return 0;
+}
+
+static int tegra_i2c_resume(struct platform_device *pdev)
+{
+ struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+ int ret;
+
+ i2c_lock_adapter(&i2c_dev->adapter);
+
+ ret = tegra_i2c_init(i2c_dev);
+
+ if (ret) {
+ i2c_unlock_adapter(&i2c_dev->adapter);
+ return ret;
+ }
+
+ i2c_dev->is_suspended = false;
+
+ i2c_unlock_adapter(&i2c_dev->adapter);
+
+ return 0;
+}
+#endif
+
+static struct platform_driver tegra_i2c_driver = {
+ .probe = tegra_i2c_probe,
+ .remove = tegra_i2c_remove,
+#ifdef CONFIG_PM
+ .suspend = tegra_i2c_suspend,
+ .resume = tegra_i2c_resume,
+#endif
+ .driver = {
+ .name = "tegra-i2c",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init tegra_i2c_init_driver(void)
+{
+ return platform_driver_register(&tegra_i2c_driver);
+}
+
+static void __exit tegra_i2c_exit_driver(void)
+{
+ platform_driver_unregister(&tegra_i2c_driver);
+}
+
+subsys_initcall(tegra_i2c_init_driver);
+module_exit(tegra_i2c_exit_driver);
+
+MODULE_DESCRIPTION("nVidia Tegra2 I2C Bus Controller driver");
+MODULE_AUTHOR("Colin Cross");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index a9c419e075a5..9fbd7e6fe32e 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -34,6 +34,7 @@
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/wait.h>
@@ -704,7 +705,7 @@ static int __devinit xiic_i2c_probe(struct platform_device *pdev)
if (irq < 0)
goto resource_missing;
- pdata = (struct xiic_i2c_platform_data *) pdev->dev.platform_data;
+ pdata = mfd_get_data(pdev);
if (!pdata)
return -EINVAL;
diff --git a/drivers/i2c/i2c-boardinfo.c b/drivers/i2c/i2c-boardinfo.c
index 7e6a63b57165..3ca2e012e789 100644
--- a/drivers/i2c/i2c-boardinfo.c
+++ b/drivers/i2c/i2c-boardinfo.c
@@ -1,5 +1,5 @@
/*
- * i2c-boardinfo.h - collect pre-declarations of I2C devices
+ * i2c-boardinfo.c - collect pre-declarations of I2C devices
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index f0bd5bcdf563..e5f76a0372fd 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -537,9 +537,7 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
client->dev.parent = &client->adapter->dev;
client->dev.bus = &i2c_bus_type;
client->dev.type = &i2c_client_type;
-#ifdef CONFIG_OF
client->dev.of_node = info->of_node;
-#endif
dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap),
client->addr);
@@ -799,6 +797,9 @@ static int i2c_do_add_adapter(struct i2c_driver *driver,
/* Let legacy drivers scan this bus for matching devices */
if (driver->attach_adapter) {
+ dev_warn(&adap->dev, "attach_adapter method is deprecated\n");
+ dev_warn(&adap->dev, "Please use another way to instantiate "
+ "your i2c_client\n");
/* We ignore the return code; if it fails, too bad */
driver->attach_adapter(adap);
}
@@ -983,6 +984,7 @@ static int i2c_do_del_adapter(struct i2c_driver *driver,
if (!driver->detach_adapter)
return 0;
+ dev_warn(&adapter->dev, "detach_adapter method is deprecated\n");
res = driver->detach_adapter(adapter);
if (res)
dev_err(&adapter->dev, "detach_adapter failed (%d) "
@@ -1093,6 +1095,18 @@ EXPORT_SYMBOL(i2c_del_adapter);
/* ------------------------------------------------------------------------- */
+int i2c_for_each_dev(void *data, int (*fn)(struct device *, void *))
+{
+ int res;
+
+ mutex_lock(&core_lock);
+ res = bus_for_each_dev(&i2c_bus_type, NULL, data, fn);
+ mutex_unlock(&core_lock);
+
+ return res;
+}
+EXPORT_SYMBOL_GPL(i2c_for_each_dev);
+
static int __process_new_driver(struct device *dev, void *data)
{
if (dev->type != &i2c_adapter_type)
@@ -1136,9 +1150,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
INIT_LIST_HEAD(&driver->clients);
/* Walk the adapters that are already present */
- mutex_lock(&core_lock);
- bus_for_each_dev(&i2c_bus_type, NULL, driver, __process_new_driver);
- mutex_unlock(&core_lock);
+ i2c_for_each_dev(driver, __process_new_driver);
return 0;
}
@@ -1158,9 +1170,7 @@ static int __process_removed_driver(struct device *dev, void *data)
*/
void i2c_del_driver(struct i2c_driver *driver)
{
- mutex_lock(&core_lock);
- bus_for_each_dev(&i2c_bus_type, NULL, driver, __process_removed_driver);
- mutex_unlock(&core_lock);
+ i2c_for_each_dev(driver, __process_removed_driver);
driver_unregister(&driver->driver);
pr_debug("i2c-core: driver [%s] unregistered\n", driver->driver.name);
@@ -1583,12 +1593,12 @@ i2c_new_probed_device(struct i2c_adapter *adap,
}
EXPORT_SYMBOL_GPL(i2c_new_probed_device);
-struct i2c_adapter *i2c_get_adapter(int id)
+struct i2c_adapter *i2c_get_adapter(int nr)
{
struct i2c_adapter *adapter;
mutex_lock(&core_lock);
- adapter = idr_find(&i2c_adapter_idr, id);
+ adapter = idr_find(&i2c_adapter_idr, nr);
if (adapter && !try_module_get(adapter->owner))
adapter = NULL;
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index cec0f3ba97f8..66ac71800a06 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -28,6 +28,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/notifier.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/init.h>
@@ -37,16 +39,13 @@
#include <linux/jiffies.h>
#include <linux/uaccess.h>
-static struct i2c_driver i2cdev_driver;
-
/*
* An i2c_dev represents an i2c_adapter ... an I2C or SMBus master, not a
* slave (i2c_client) with which messages will be exchanged. It's coupled
* with a character special file which is accessed by user mode drivers.
*
* The list of i2c_dev structures is parallel to the i2c_adapter lists
- * maintained by the driver model, and is updated using notifications
- * delivered to the i2cdev_driver.
+ * maintained by the driver model, and is updated using bus notifications.
*/
struct i2c_dev {
struct list_head list;
@@ -491,7 +490,6 @@ static int i2cdev_open(struct inode *inode, struct file *file)
return -ENOMEM;
}
snprintf(client->name, I2C_NAME_SIZE, "i2c-dev %d", adap->nr);
- client->driver = &i2cdev_driver;
client->adapter = adap;
file->private_data = client;
@@ -522,19 +520,18 @@ static const struct file_operations i2cdev_fops = {
/* ------------------------------------------------------------------------- */
-/*
- * The legacy "i2cdev_driver" is used primarily to get notifications when
- * I2C adapters are added or removed, so that each one gets an i2c_dev
- * and is thus made available to userspace driver code.
- */
-
static struct class *i2c_dev_class;
-static int i2cdev_attach_adapter(struct i2c_adapter *adap)
+static int i2cdev_attach_adapter(struct device *dev, void *dummy)
{
+ struct i2c_adapter *adap;
struct i2c_dev *i2c_dev;
int res;
+ if (dev->type != &i2c_adapter_type)
+ return 0;
+ adap = to_i2c_adapter(dev);
+
i2c_dev = get_free_i2c_dev(adap);
if (IS_ERR(i2c_dev))
return PTR_ERR(i2c_dev);
@@ -561,10 +558,15 @@ error:
return res;
}
-static int i2cdev_detach_adapter(struct i2c_adapter *adap)
+static int i2cdev_detach_adapter(struct device *dev, void *dummy)
{
+ struct i2c_adapter *adap;
struct i2c_dev *i2c_dev;
+ if (dev->type != &i2c_adapter_type)
+ return 0;
+ adap = to_i2c_adapter(dev);
+
i2c_dev = i2c_dev_get_by_minor(adap->nr);
if (!i2c_dev) /* attach_adapter must have failed */
return 0;
@@ -577,12 +579,23 @@ static int i2cdev_detach_adapter(struct i2c_adapter *adap)
return 0;
}
-static struct i2c_driver i2cdev_driver = {
- .driver = {
- .name = "dev_driver",
- },
- .attach_adapter = i2cdev_attach_adapter,
- .detach_adapter = i2cdev_detach_adapter,
+int i2cdev_notifier_call(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct device *dev = data;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ return i2cdev_attach_adapter(dev, NULL);
+ case BUS_NOTIFY_DEL_DEVICE:
+ return i2cdev_detach_adapter(dev, NULL);
+ }
+
+ return 0;
+}
+
+static struct notifier_block i2cdev_notifier = {
+ .notifier_call = i2cdev_notifier_call,
};
/* ------------------------------------------------------------------------- */
@@ -607,10 +620,14 @@ static int __init i2c_dev_init(void)
goto out_unreg_chrdev;
}
- res = i2c_add_driver(&i2cdev_driver);
+ /* Keep track of adapters which will be added or removed later */
+ res = bus_register_notifier(&i2c_bus_type, &i2cdev_notifier);
if (res)
goto out_unreg_class;
+ /* Bind to already existing adapters right away */
+ i2c_for_each_dev(NULL, i2cdev_attach_adapter);
+
return 0;
out_unreg_class:
@@ -624,7 +641,8 @@ out:
static void __exit i2c_dev_exit(void)
{
- i2c_del_driver(&i2cdev_driver);
+ i2c_for_each_dev(NULL, i2cdev_detach_adapter);
+ bus_unregister_notifier(&i2c_bus_type, &i2cdev_notifier);
class_destroy(i2c_dev_class);
unregister_chrdev(I2C_MAJOR, "i2c");
}
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 1fa091e05690..a46dddf61078 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -62,6 +62,7 @@
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <asm/mwait.h>
+#include <asm/msr.h>
#define INTEL_IDLE_VERSION "0.4"
#define PREFIX "intel_idle: "
@@ -85,6 +86,12 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
static struct cpuidle_state *cpuidle_state_table;
/*
+ * Hardware C-state auto-demotion may not always be optimal.
+ * Indicate which enable bits to clear here.
+ */
+static unsigned long long auto_demotion_disable_flags;
+
+/*
* Set this flag for states where the HW flushes the TLB for us
* and so we don't need cross-calls to keep it consistent.
* If this flag is set, SW flushes the TLB, so even if the
@@ -100,7 +107,7 @@ static struct cpuidle_state *cpuidle_state_table;
static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
{ /* MWAIT C0 */ },
{ /* MWAIT C1 */
- .name = "NHM-C1",
+ .name = "C1-NHM",
.desc = "MWAIT 0x00",
.driver_data = (void *) 0x00,
.flags = CPUIDLE_FLAG_TIME_VALID,
@@ -108,7 +115,7 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
.target_residency = 6,
.enter = &intel_idle },
{ /* MWAIT C2 */
- .name = "NHM-C3",
+ .name = "C3-NHM",
.desc = "MWAIT 0x10",
.driver_data = (void *) 0x10,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
@@ -116,7 +123,7 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
.target_residency = 80,
.enter = &intel_idle },
{ /* MWAIT C3 */
- .name = "NHM-C6",
+ .name = "C6-NHM",
.desc = "MWAIT 0x20",
.driver_data = (void *) 0x20,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
@@ -128,7 +135,7 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
{ /* MWAIT C0 */ },
{ /* MWAIT C1 */
- .name = "SNB-C1",
+ .name = "C1-SNB",
.desc = "MWAIT 0x00",
.driver_data = (void *) 0x00,
.flags = CPUIDLE_FLAG_TIME_VALID,
@@ -136,7 +143,7 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
.target_residency = 1,
.enter = &intel_idle },
{ /* MWAIT C2 */
- .name = "SNB-C3",
+ .name = "C3-SNB",
.desc = "MWAIT 0x10",
.driver_data = (void *) 0x10,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
@@ -144,7 +151,7 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
.target_residency = 211,
.enter = &intel_idle },
{ /* MWAIT C3 */
- .name = "SNB-C6",
+ .name = "C6-SNB",
.desc = "MWAIT 0x20",
.driver_data = (void *) 0x20,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
@@ -152,7 +159,7 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
.target_residency = 345,
.enter = &intel_idle },
{ /* MWAIT C4 */
- .name = "SNB-C7",
+ .name = "C7-SNB",
.desc = "MWAIT 0x30",
.driver_data = (void *) 0x30,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
@@ -164,7 +171,7 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
{ /* MWAIT C0 */ },
{ /* MWAIT C1 */
- .name = "ATM-C1",
+ .name = "C1-ATM",
.desc = "MWAIT 0x00",
.driver_data = (void *) 0x00,
.flags = CPUIDLE_FLAG_TIME_VALID,
@@ -172,7 +179,7 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
.target_residency = 4,
.enter = &intel_idle },
{ /* MWAIT C2 */
- .name = "ATM-C2",
+ .name = "C2-ATM",
.desc = "MWAIT 0x10",
.driver_data = (void *) 0x10,
.flags = CPUIDLE_FLAG_TIME_VALID,
@@ -181,7 +188,7 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
.enter = &intel_idle },
{ /* MWAIT C3 */ },
{ /* MWAIT C4 */
- .name = "ATM-C4",
+ .name = "C4-ATM",
.desc = "MWAIT 0x30",
.driver_data = (void *) 0x30,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
@@ -190,7 +197,7 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
.enter = &intel_idle },
{ /* MWAIT C5 */ },
{ /* MWAIT C6 */
- .name = "ATM-C6",
+ .name = "C6-ATM",
.desc = "MWAIT 0x52",
.driver_data = (void *) 0x52,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
@@ -281,6 +288,15 @@ static struct notifier_block setup_broadcast_notifier = {
.notifier_call = setup_broadcast_cpuhp_notify,
};
+static void auto_demotion_disable(void *dummy)
+{
+ unsigned long long msr_bits;
+
+ rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
+ msr_bits &= ~auto_demotion_disable_flags;
+ wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
+}
+
/*
* intel_idle_probe()
*/
@@ -324,11 +340,17 @@ static int intel_idle_probe(void)
case 0x25: /* Westmere */
case 0x2C: /* Westmere */
cpuidle_state_table = nehalem_cstates;
+ auto_demotion_disable_flags =
+ (NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE);
break;
case 0x1C: /* 28 - Atom Processor */
+ cpuidle_state_table = atom_cstates;
+ break;
+
case 0x26: /* 38 - Lincroft Atom Processor */
cpuidle_state_table = atom_cstates;
+ auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE;
break;
case 0x2A: /* SNB */
@@ -436,6 +458,8 @@ static int intel_idle_cpuidle_devices_init(void)
return -EIO;
}
}
+ if (auto_demotion_disable_flags)
+ smp_call_function(auto_demotion_disable, NULL, 1);
return 0;
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index d02dcc6e5963..e654285aa6ba 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -354,7 +354,7 @@ static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip,
}
};
- if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
+ if (ip_route_output_flow(&init_net, &rt, &fl, NULL))
return NULL;
return rt;
}
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 8b00e6c46f01..7e0484f18db5 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -331,7 +331,7 @@ static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip,
}
};
- if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
+ if (ip_route_output_flow(&init_net, &rt, &fl, NULL))
return NULL;
return rt;
}
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c
index bab9f74c0665..cfed5399f074 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_pages.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c
@@ -53,8 +53,8 @@ static void __ipath_release_user_pages(struct page **p, size_t num_pages,
}
/* call with current->mm->mmap_sem held */
-static int __get_user_pages(unsigned long start_page, size_t num_pages,
- struct page **p, struct vm_area_struct **vma)
+static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages,
+ struct page **p, struct vm_area_struct **vma)
{
unsigned long lock_limit;
size_t got;
@@ -165,7 +165,7 @@ int ipath_get_user_pages(unsigned long start_page, size_t num_pages,
down_write(&current->mm->mmap_sem);
- ret = __get_user_pages(start_page, num_pages, p, NULL);
+ ret = __ipath_get_user_pages(start_page, num_pages, p, NULL);
up_write(&current->mm->mmap_sem);
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 3b4ec3238ceb..3d7f3664b67b 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -153,7 +153,8 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
nesdev, nesdev->netdev[0]->name);
netdev = nesdev->netdev[0];
nesvnic = netdev_priv(netdev);
- is_bonded = (netdev->master == event_netdev);
+ is_bonded = netif_is_bond_slave(netdev) &&
+ (netdev->master == event_netdev);
if ((netdev == event_netdev) || is_bonded) {
if (nesvnic->rdma_enabled == 0) {
nes_debug(NES_DBG_NETDEV, "Returning without processing event for %s since"
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 009ec814d517..ec3aa11c36cb 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1118,7 +1118,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
return rc;
}
- if (nesvnic->netdev->master)
+ if (netif_is_bond_slave(netdev))
netdev = nesvnic->netdev->master;
else
netdev = nesvnic->netdev;
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 5ad224e4a38b..4b9e11cc561b 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -705,7 +705,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
lwe = pip->link_width_enabled;
if (lwe) {
if (lwe == 0xFF)
- lwe = ppd->link_width_supported;
+ set_link_width_enabled(ppd, ppd->link_width_supported);
else if (lwe >= 16 || (lwe & ~ppd->link_width_supported))
smp->status |= IB_SMP_INVALID_FIELD;
else if (lwe != ppd->link_width_enabled)
@@ -720,7 +720,8 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
* speeds.
*/
if (lse == 15)
- lse = ppd->link_speed_supported;
+ set_link_speed_enabled(ppd,
+ ppd->link_speed_supported);
else if (lse >= 8 || (lse & ~ppd->link_speed_supported))
smp->status |= IB_SMP_INVALID_FIELD;
else if (lse != ppd->link_speed_enabled)
@@ -849,7 +850,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
if (clientrereg)
pip->clientrereg_resv_subnetto |= 0x80;
- goto done;
+ goto get_only;
err:
smp->status |= IB_SMP_INVALID_FIELD;
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index d7a26c1d4f37..7689e49c13c9 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -51,8 +51,8 @@ static void __qib_release_user_pages(struct page **p, size_t num_pages,
/*
* Call with current->mm->mmap_sem held.
*/
-static int __get_user_pages(unsigned long start_page, size_t num_pages,
- struct page **p, struct vm_area_struct **vma)
+static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
+ struct page **p, struct vm_area_struct **vma)
{
unsigned long lock_limit;
size_t got;
@@ -136,7 +136,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages,
down_write(&current->mm->mmap_sem);
- ret = __get_user_pages(start_page, num_pages, p, NULL);
+ ret = __qib_get_user_pages(start_page, num_pages, p, NULL);
up_write(&current->mm->mmap_sem);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 7b2fc98e2f2b..8db008de5392 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -532,6 +532,29 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
stats->custom[3].value = conn->fmr_unalign_cnt;
}
+static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep,
+ enum iscsi_param param, char *buf)
+{
+ struct iser_conn *ib_conn = ep->dd_data;
+ int len;
+
+ switch (param) {
+ case ISCSI_PARAM_CONN_PORT:
+ case ISCSI_PARAM_CONN_ADDRESS:
+ if (!ib_conn || !ib_conn->cma_id)
+ return -ENOTCONN;
+
+ return iscsi_conn_get_addr_param((struct sockaddr_storage *)
+ &ib_conn->cma_id->route.addr.dst_addr,
+ param, buf);
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return len;
+}
+
static struct iscsi_endpoint *
iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
int non_blocking)
@@ -637,6 +660,8 @@ static struct iscsi_transport iscsi_iser_transport = {
ISCSI_MAX_BURST |
ISCSI_PDU_INORDER_EN |
ISCSI_DATASEQ_INORDER_EN |
+ ISCSI_CONN_PORT |
+ ISCSI_CONN_ADDRESS |
ISCSI_EXP_STATSN |
ISCSI_PERSISTENT_PORT |
ISCSI_PERSISTENT_ADDRESS |
@@ -659,6 +684,7 @@ static struct iscsi_transport iscsi_iser_transport = {
.destroy_conn = iscsi_iser_conn_destroy,
.set_param = iscsi_iser_set_param,
.get_conn_param = iscsi_conn_get_param,
+ .get_ep_param = iscsi_iser_get_ep_param,
.get_session_param = iscsi_session_get_param,
.start_conn = iscsi_iser_conn_start,
.stop_conn = iscsi_iser_conn_stop,
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index c8471a2552e7..7f42d3a454d2 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -321,6 +321,9 @@ static ssize_t evdev_write(struct file *file, const char __user *buffer,
struct input_event event;
int retval;
+ if (count < input_event_size())
+ return -EINVAL;
+
retval = mutex_lock_interruptible(&evdev->mutex);
if (retval)
return retval;
@@ -330,17 +333,16 @@ static ssize_t evdev_write(struct file *file, const char __user *buffer,
goto out;
}
- while (retval < count) {
-
+ do {
if (input_event_from_user(buffer + retval, &event)) {
retval = -EFAULT;
goto out;
}
+ retval += input_event_size();
input_inject_event(&evdev->handle,
event.type, event.code, event.value);
- retval += input_event_size();
- }
+ } while (retval + input_event_size() <= count);
out:
mutex_unlock(&evdev->mutex);
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index 23cf8fc933ec..5b8f59d6c3e8 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -360,7 +360,7 @@ static int gameport_queue_event(void *object, struct module *owner,
event->owner = owner;
list_add_tail(&event->node, &gameport_event_list);
- schedule_work(&gameport_event_work);
+ queue_work(system_long_wq, &gameport_event_work);
out:
spin_unlock_irqrestore(&gameport_event_lock, flags);
diff --git a/drivers/input/input-polldev.c b/drivers/input/input-polldev.c
index 0559e309bac9..3037842a60d8 100644
--- a/drivers/input/input-polldev.c
+++ b/drivers/input/input-polldev.c
@@ -192,7 +192,7 @@ static struct attribute_group input_polldev_attribute_group = {
};
/**
- * input_allocate_polled_device - allocated memory polled device
+ * input_allocate_polled_device - allocate memory for polled device
*
* The function allocates memory for a polled device and also
* for an input device associated with this polled device.
@@ -239,7 +239,7 @@ EXPORT_SYMBOL(input_free_polled_device);
* with input layer. The device should be allocated with call to
* input_allocate_polled_device(). Callers should also set up poll()
* method and set up capabilities (id, name, phys, bits) of the
- * corresponing input_dev structure.
+ * corresponding input_dev structure.
*/
int input_register_polled_device(struct input_polled_dev *dev)
{
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 11905b6a3023..d6e8bd8a851c 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -791,22 +791,9 @@ int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke)
int retval;
spin_lock_irqsave(&dev->event_lock, flags);
-
- if (dev->getkeycode) {
- /*
- * Support for legacy drivers, that don't implement the new
- * ioctls
- */
- u32 scancode = ke->index;
-
- memcpy(ke->scancode, &scancode, sizeof(scancode));
- ke->len = sizeof(scancode);
- retval = dev->getkeycode(dev, scancode, &ke->keycode);
- } else {
- retval = dev->getkeycode_new(dev, ke);
- }
-
+ retval = dev->getkeycode(dev, ke);
spin_unlock_irqrestore(&dev->event_lock, flags);
+
return retval;
}
EXPORT_SYMBOL(input_get_keycode);
@@ -831,35 +818,7 @@ int input_set_keycode(struct input_dev *dev,
spin_lock_irqsave(&dev->event_lock, flags);
- if (dev->setkeycode) {
- /*
- * Support for legacy drivers, that don't implement the new
- * ioctls
- */
- unsigned int scancode;
-
- retval = input_scancode_to_scalar(ke, &scancode);
- if (retval)
- goto out;
-
- /*
- * We need to know the old scancode, in order to generate a
- * keyup effect, if the set operation happens successfully
- */
- if (!dev->getkeycode) {
- retval = -EINVAL;
- goto out;
- }
-
- retval = dev->getkeycode(dev, scancode, &old_keycode);
- if (retval)
- goto out;
-
- retval = dev->setkeycode(dev, scancode, ke->keycode);
- } else {
- retval = dev->setkeycode_new(dev, ke, &old_keycode);
- }
-
+ retval = dev->setkeycode(dev, ke, &old_keycode);
if (retval)
goto out;
@@ -1846,11 +1805,11 @@ int input_register_device(struct input_dev *dev)
dev->rep[REP_PERIOD] = 33;
}
- if (!dev->getkeycode && !dev->getkeycode_new)
- dev->getkeycode_new = input_default_getkeycode;
+ if (!dev->getkeycode)
+ dev->getkeycode = input_default_getkeycode;
- if (!dev->setkeycode && !dev->setkeycode_new)
- dev->setkeycode_new = input_default_setkeycode;
+ if (!dev->setkeycode)
+ dev->setkeycode = input_default_setkeycode;
dev_set_name(&dev->dev, "input%ld",
(unsigned long) atomic_inc_return(&input_no) - 1);
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index f7c2a166576b..b732870ecc89 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -30,6 +30,7 @@
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/leds.h>
+#include <linux/pm.h>
#include <linux/i2c/lm8323.h>
#include <linux/slab.h>
@@ -802,8 +803,9 @@ static int __devexit lm8323_remove(struct i2c_client *client)
* We don't need to explicitly suspend the chip, as it already switches off
* when there's no activity.
*/
-static int lm8323_suspend(struct i2c_client *client, pm_message_t mesg)
+static int lm8323_suspend(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct lm8323_chip *lm = i2c_get_clientdata(client);
int i;
@@ -821,8 +823,9 @@ static int lm8323_suspend(struct i2c_client *client, pm_message_t mesg)
return 0;
}
-static int lm8323_resume(struct i2c_client *client)
+static int lm8323_resume(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct lm8323_chip *lm = i2c_get_clientdata(client);
int i;
@@ -839,11 +842,10 @@ static int lm8323_resume(struct i2c_client *client)
return 0;
}
-#else
-#define lm8323_suspend NULL
-#define lm8323_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(lm8323_pm_ops, lm8323_suspend, lm8323_resume);
+
static const struct i2c_device_id lm8323_id[] = {
{ "lm8323", 0 },
{ }
@@ -852,11 +854,10 @@ static const struct i2c_device_id lm8323_id[] = {
static struct i2c_driver lm8323_i2c_driver = {
.driver = {
.name = "lm8323",
+ .pm = &lm8323_pm_ops,
},
.probe = lm8323_probe,
.remove = __devexit_p(lm8323_remove),
- .suspend = lm8323_suspend,
- .resume = lm8323_resume,
.id_table = lm8323_id,
};
MODULE_DEVICE_TABLE(i2c, lm8323_id);
diff --git a/drivers/input/keyboard/max7359_keypad.c b/drivers/input/keyboard/max7359_keypad.c
index 9091ff5ea808..5afe35ad24d3 100644
--- a/drivers/input/keyboard/max7359_keypad.c
+++ b/drivers/input/keyboard/max7359_keypad.c
@@ -17,6 +17,7 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/pm.h>
#include <linux/input.h>
#include <linux/input/matrix_keypad.h>
@@ -271,8 +272,10 @@ static int __devexit max7359_remove(struct i2c_client *client)
}
#ifdef CONFIG_PM
-static int max7359_suspend(struct i2c_client *client, pm_message_t mesg)
+static int max7359_suspend(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
+
max7359_fall_deepsleep(client);
if (device_may_wakeup(&client->dev))
@@ -281,8 +284,10 @@ static int max7359_suspend(struct i2c_client *client, pm_message_t mesg)
return 0;
}
-static int max7359_resume(struct i2c_client *client)
+static int max7359_resume(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
+
if (device_may_wakeup(&client->dev))
disable_irq_wake(client->irq);
@@ -291,11 +296,10 @@ static int max7359_resume(struct i2c_client *client)
return 0;
}
-#else
-#define max7359_suspend NULL
-#define max7359_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(max7359_pm, max7359_suspend, max7359_resume);
+
static const struct i2c_device_id max7359_ids[] = {
{ "max7359", 0 },
{ }
@@ -305,11 +309,10 @@ MODULE_DEVICE_TABLE(i2c, max7359_ids);
static struct i2c_driver max7359_i2c_driver = {
.driver = {
.name = "max7359",
+ .pm = &max7359_pm,
},
.probe = max7359_probe,
.remove = __devexit_p(max7359_remove),
- .suspend = max7359_suspend,
- .resume = max7359_resume,
.id_table = max7359_ids,
};
diff --git a/drivers/input/keyboard/mcs_touchkey.c b/drivers/input/keyboard/mcs_touchkey.c
index 63b849d7e90b..af1aab324a4c 100644
--- a/drivers/input/keyboard/mcs_touchkey.c
+++ b/drivers/input/keyboard/mcs_touchkey.c
@@ -1,5 +1,5 @@
/*
- * mcs_touchkey.c - Touchkey driver for MELFAS MCS5000/5080 controller
+ * Touchkey driver for MELFAS MCS5000/5080 controller
*
* Copyright (C) 2010 Samsung Electronics Co.Ltd
* Author: HeungJun Kim <riverful.kim@samsung.com>
@@ -19,6 +19,7 @@
#include <linux/input.h>
#include <linux/irq.h>
#include <linux/slab.h>
+#include <linux/pm.h>
/* MCS5000 Touchkey */
#define MCS5000_TOUCHKEY_STATUS 0x04
@@ -45,6 +46,8 @@ struct mcs_touchkey_chip {
};
struct mcs_touchkey_data {
+ void (*poweron)(bool);
+
struct i2c_client *client;
struct input_dev *input_dev;
struct mcs_touchkey_chip chip;
@@ -169,6 +172,11 @@ static int __devinit mcs_touchkey_probe(struct i2c_client *client,
if (pdata->cfg_pin)
pdata->cfg_pin();
+ if (pdata->poweron) {
+ data->poweron = pdata->poweron;
+ data->poweron(true);
+ }
+
error = request_threaded_irq(client->irq, NULL, mcs_touchkey_interrupt,
IRQF_TRIGGER_FALLING, client->dev.driver->name, data);
if (error) {
@@ -196,12 +204,57 @@ static int __devexit mcs_touchkey_remove(struct i2c_client *client)
struct mcs_touchkey_data *data = i2c_get_clientdata(client);
free_irq(client->irq, data);
+ if (data->poweron)
+ data->poweron(false);
input_unregister_device(data->input_dev);
kfree(data);
return 0;
}
+static void mcs_touchkey_shutdown(struct i2c_client *client)
+{
+ struct mcs_touchkey_data *data = i2c_get_clientdata(client);
+
+ if (data->poweron)
+ data->poweron(false);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mcs_touchkey_suspend(struct device *dev)
+{
+ struct mcs_touchkey_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+
+ /* Disable the work */
+ disable_irq(client->irq);
+
+ /* Finally turn off the power */
+ if (data->poweron)
+ data->poweron(false);
+
+ return 0;
+}
+
+static int mcs_touchkey_resume(struct device *dev)
+{
+ struct mcs_touchkey_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+
+ /* Enable the device first */
+ if (data->poweron)
+ data->poweron(true);
+
+ /* Enable irq again */
+ enable_irq(client->irq);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(mcs_touchkey_pm_ops,
+ mcs_touchkey_suspend, mcs_touchkey_resume);
+
static const struct i2c_device_id mcs_touchkey_id[] = {
{ "mcs5000_touchkey", MCS5000_TOUCHKEY },
{ "mcs5080_touchkey", MCS5080_TOUCHKEY },
@@ -213,9 +266,11 @@ static struct i2c_driver mcs_touchkey_driver = {
.driver = {
.name = "mcs_touchkey",
.owner = THIS_MODULE,
+ .pm = &mcs_touchkey_pm_ops,
},
.probe = mcs_touchkey_probe,
.remove = __devexit_p(mcs_touchkey_remove),
+ .shutdown = mcs_touchkey_shutdown,
.id_table = mcs_touchkey_id,
};
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index 45bd0977d006..c51a3c4a7feb 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -29,6 +29,7 @@
#include <linux/io.h>
#include <linux/input.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include <plat/omap4-keypad.h>
@@ -80,20 +81,6 @@ struct omap4_keypad {
unsigned short keymap[];
};
-static void __devinit omap4_keypad_config(struct omap4_keypad *keypad_data)
-{
- __raw_writel(OMAP4_VAL_FUNCTIONALCFG,
- keypad_data->base + OMAP4_KBD_CTRL);
- __raw_writel(OMAP4_VAL_DEBOUNCINGTIME,
- keypad_data->base + OMAP4_KBD_DEBOUNCINGTIME);
- __raw_writel(OMAP4_VAL_IRQDISABLE,
- keypad_data->base + OMAP4_KBD_IRQSTATUS);
- __raw_writel(OMAP4_DEF_IRQENABLE_EVENTEN | OMAP4_DEF_IRQENABLE_LONGKEY,
- keypad_data->base + OMAP4_KBD_IRQENABLE);
- __raw_writel(OMAP4_DEF_WUP_EVENT_ENA | OMAP4_DEF_WUP_LONG_KEY_ENA,
- keypad_data->base + OMAP4_KBD_WAKEUPENABLE);
-}
-
/* Interrupt handler */
static irqreturn_t omap4_keypad_interrupt(int irq, void *dev_id)
{
@@ -144,6 +131,49 @@ static irqreturn_t omap4_keypad_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static int omap4_keypad_open(struct input_dev *input)
+{
+ struct omap4_keypad *keypad_data = input_get_drvdata(input);
+
+ pm_runtime_get_sync(input->dev.parent);
+
+ disable_irq(keypad_data->irq);
+
+ __raw_writel(OMAP4_VAL_FUNCTIONALCFG,
+ keypad_data->base + OMAP4_KBD_CTRL);
+ __raw_writel(OMAP4_VAL_DEBOUNCINGTIME,
+ keypad_data->base + OMAP4_KBD_DEBOUNCINGTIME);
+ __raw_writel(OMAP4_VAL_IRQDISABLE,
+ keypad_data->base + OMAP4_KBD_IRQSTATUS);
+ __raw_writel(OMAP4_DEF_IRQENABLE_EVENTEN | OMAP4_DEF_IRQENABLE_LONGKEY,
+ keypad_data->base + OMAP4_KBD_IRQENABLE);
+ __raw_writel(OMAP4_DEF_WUP_EVENT_ENA | OMAP4_DEF_WUP_LONG_KEY_ENA,
+ keypad_data->base + OMAP4_KBD_WAKEUPENABLE);
+
+ enable_irq(keypad_data->irq);
+
+ return 0;
+}
+
+static void omap4_keypad_close(struct input_dev *input)
+{
+ struct omap4_keypad *keypad_data = input_get_drvdata(input);
+
+ disable_irq(keypad_data->irq);
+
+ /* Disable interrupts */
+ __raw_writel(OMAP4_VAL_IRQDISABLE,
+ keypad_data->base + OMAP4_KBD_IRQENABLE);
+
+ /* clear pending interrupts */
+ __raw_writel(__raw_readl(keypad_data->base + OMAP4_KBD_IRQSTATUS),
+ keypad_data->base + OMAP4_KBD_IRQSTATUS);
+
+ enable_irq(keypad_data->irq);
+
+ pm_runtime_put_sync(input->dev.parent);
+}
+
static int __devinit omap4_keypad_probe(struct platform_device *pdev)
{
const struct omap4_keypad_platform_data *pdata;
@@ -225,6 +255,9 @@ static int __devinit omap4_keypad_probe(struct platform_device *pdev)
input_dev->id.product = 0x0001;
input_dev->id.version = 0x0001;
+ input_dev->open = omap4_keypad_open;
+ input_dev->close = omap4_keypad_close;
+
input_dev->keycode = keypad_data->keymap;
input_dev->keycodesize = sizeof(keypad_data->keymap[0]);
input_dev->keycodemax = max_keys;
@@ -239,8 +272,6 @@ static int __devinit omap4_keypad_probe(struct platform_device *pdev)
matrix_keypad_build_keymap(pdata->keymap_data, row_shift,
input_dev->keycode, input_dev->keybit);
- omap4_keypad_config(keypad_data);
-
error = request_irq(keypad_data->irq, omap4_keypad_interrupt,
IRQF_TRIGGER_RISING,
"omap4-keypad", keypad_data);
@@ -249,17 +280,19 @@ static int __devinit omap4_keypad_probe(struct platform_device *pdev)
goto err_free_input;
}
+ pm_runtime_enable(&pdev->dev);
+
error = input_register_device(keypad_data->input);
if (error < 0) {
dev_err(&pdev->dev, "failed to register input device\n");
- goto err_free_irq;
+ goto err_pm_disable;
}
-
platform_set_drvdata(pdev, keypad_data);
return 0;
-err_free_irq:
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
free_irq(keypad_data->irq, keypad_data);
err_free_input:
input_free_device(input_dev);
@@ -278,6 +311,9 @@ static int __devexit omap4_keypad_remove(struct platform_device *pdev)
struct resource *res;
free_irq(keypad_data->irq, keypad_data);
+
+ pm_runtime_disable(&pdev->dev);
+
input_unregister_device(keypad_data->input);
iounmap(keypad_data->base);
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index ac471b77c18e..99ce9032d08c 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -71,8 +71,9 @@ struct tegra_kbc {
spinlock_t lock;
unsigned int repoll_dly;
unsigned long cp_dly_jiffies;
+ bool use_fn_map;
const struct tegra_kbc_platform_data *pdata;
- unsigned short keycode[KBC_MAX_KEY];
+ unsigned short keycode[KBC_MAX_KEY * 2];
unsigned short current_keys[KBC_MAX_KPENT];
unsigned int num_pressed_keys;
struct timer_list timer;
@@ -178,6 +179,40 @@ static const u32 tegra_kbc_default_keymap[] = {
KEY(15, 5, KEY_F2),
KEY(15, 6, KEY_CAPSLOCK),
KEY(15, 7, KEY_F6),
+
+ /* Software Handled Function Keys */
+ KEY(20, 0, KEY_KP7),
+
+ KEY(21, 0, KEY_KP9),
+ KEY(21, 1, KEY_KP8),
+ KEY(21, 2, KEY_KP4),
+ KEY(21, 4, KEY_KP1),
+
+ KEY(22, 1, KEY_KPSLASH),
+ KEY(22, 2, KEY_KP6),
+ KEY(22, 3, KEY_KP5),
+ KEY(22, 4, KEY_KP3),
+ KEY(22, 5, KEY_KP2),
+ KEY(22, 7, KEY_KP0),
+
+ KEY(27, 1, KEY_KPASTERISK),
+ KEY(27, 3, KEY_KPMINUS),
+ KEY(27, 4, KEY_KPPLUS),
+ KEY(27, 5, KEY_KPDOT),
+
+ KEY(28, 5, KEY_VOLUMEUP),
+
+ KEY(29, 3, KEY_HOME),
+ KEY(29, 4, KEY_END),
+ KEY(29, 5, KEY_BRIGHTNESSDOWN),
+ KEY(29, 6, KEY_VOLUMEDOWN),
+ KEY(29, 7, KEY_BRIGHTNESSUP),
+
+ KEY(30, 0, KEY_NUMLOCK),
+ KEY(30, 1, KEY_SCROLLLOCK),
+ KEY(30, 2, KEY_MUTE),
+
+ KEY(31, 4, KEY_HELP),
};
static const struct matrix_keymap_data tegra_kbc_default_keymap_data = {
@@ -224,6 +259,7 @@ static void tegra_kbc_report_keys(struct tegra_kbc *kbc)
unsigned int i;
unsigned int num_down = 0;
unsigned long flags;
+ bool fn_keypress = false;
spin_lock_irqsave(&kbc->lock, flags);
for (i = 0; i < KBC_MAX_KPENT; i++) {
@@ -237,11 +273,28 @@ static void tegra_kbc_report_keys(struct tegra_kbc *kbc)
MATRIX_SCAN_CODE(row, col, KBC_ROW_SHIFT);
scancodes[num_down] = scancode;
- keycodes[num_down++] = kbc->keycode[scancode];
+ keycodes[num_down] = kbc->keycode[scancode];
+ /* If driver uses Fn map, do not report the Fn key. */
+ if ((keycodes[num_down] == KEY_FN) && kbc->use_fn_map)
+ fn_keypress = true;
+ else
+ num_down++;
}
val >>= 8;
}
+
+ /*
+ * If the platform uses Fn keymaps, translate keys on a Fn keypress.
+ * Function keycodes are KBC_MAX_KEY apart from the plain keycodes.
+ */
+ if (fn_keypress) {
+ for (i = 0; i < num_down; i++) {
+ scancodes[i] += KBC_MAX_KEY;
+ keycodes[i] = kbc->keycode[scancodes[i]];
+ }
+ }
+
spin_unlock_irqrestore(&kbc->lock, flags);
tegra_kbc_report_released_keys(kbc->idev,
@@ -594,8 +647,11 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
input_dev->keycode = kbc->keycode;
input_dev->keycodesize = sizeof(kbc->keycode[0]);
- input_dev->keycodemax = ARRAY_SIZE(kbc->keycode);
+ input_dev->keycodemax = KBC_MAX_KEY;
+ if (pdata->use_fn_map)
+ input_dev->keycodemax *= 2;
+ kbc->use_fn_map = pdata->use_fn_map;
keymap_data = pdata->keymap_data ?: &tegra_kbc_default_keymap_data;
matrix_keypad_build_keymap(keymap_data, KBC_ROW_SHIFT,
input_dev->keycode, input_dev->keybit);
diff --git a/drivers/input/misc/ad714x-i2c.c b/drivers/input/misc/ad714x-i2c.c
index 2bef8fa56c94..e21deb1baa8a 100644
--- a/drivers/input/misc/ad714x-i2c.c
+++ b/drivers/input/misc/ad714x-i2c.c
@@ -10,23 +10,23 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/pm.h>
#include "ad714x.h"
#ifdef CONFIG_PM
-static int ad714x_i2c_suspend(struct i2c_client *client, pm_message_t message)
+static int ad714x_i2c_suspend(struct device *dev)
{
- return ad714x_disable(i2c_get_clientdata(client));
+ return ad714x_disable(i2c_get_clientdata(to_i2c_client(dev)));
}
-static int ad714x_i2c_resume(struct i2c_client *client)
+static int ad714x_i2c_resume(struct device *dev)
{
- return ad714x_enable(i2c_get_clientdata(client));
+ return ad714x_enable(i2c_get_clientdata(to_i2c_client(dev)));
}
-#else
-# define ad714x_i2c_suspend NULL
-# define ad714x_i2c_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(ad714x_i2c_pm, ad714x_i2c_suspend, ad714x_i2c_resume);
+
static int ad714x_i2c_write(struct device *dev, unsigned short reg,
unsigned short data)
{
@@ -114,11 +114,10 @@ MODULE_DEVICE_TABLE(i2c, ad714x_id);
static struct i2c_driver ad714x_i2c_driver = {
.driver = {
.name = "ad714x_captouch",
+ .pm = &ad714x_i2c_pm,
},
.probe = ad714x_i2c_probe,
.remove = __devexit_p(ad714x_i2c_remove),
- .suspend = ad714x_i2c_suspend,
- .resume = ad714x_i2c_resume,
.id_table = ad714x_id,
};
diff --git a/drivers/input/misc/ad714x-spi.c b/drivers/input/misc/ad714x-spi.c
index 7f8dedfd1bfe..4120dd549305 100644
--- a/drivers/input/misc/ad714x-spi.c
+++ b/drivers/input/misc/ad714x-spi.c
@@ -9,6 +9,7 @@
#include <linux/input.h> /* BUS_I2C */
#include <linux/module.h>
#include <linux/spi/spi.h>
+#include <linux/pm.h>
#include <linux/types.h>
#include "ad714x.h"
@@ -16,20 +17,19 @@
#define AD714x_SPI_READ BIT(10)
#ifdef CONFIG_PM
-static int ad714x_spi_suspend(struct spi_device *spi, pm_message_t message)
+static int ad714x_spi_suspend(struct device *dev)
{
- return ad714x_disable(spi_get_drvdata(spi));
+ return ad714x_disable(spi_get_drvdata(to_spi_device(dev)));
}
-static int ad714x_spi_resume(struct spi_device *spi)
+static int ad714x_spi_resume(struct device *dev)
{
- return ad714x_enable(spi_get_drvdata(spi));
+ return ad714x_enable(spi_get_drvdata(to_spi_device(dev)));
}
-#else
-# define ad714x_spi_suspend NULL
-# define ad714x_spi_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(ad714x_spi_pm, ad714x_spi_suspend, ad714x_spi_resume);
+
static int ad714x_spi_read(struct device *dev, unsigned short reg,
unsigned short *data)
{
@@ -79,11 +79,10 @@ static struct spi_driver ad714x_spi_driver = {
.driver = {
.name = "ad714x_captouch",
.owner = THIS_MODULE,
+ .pm = &ad714x_spi_pm,
},
.probe = ad714x_spi_probe,
.remove = __devexit_p(ad714x_spi_remove),
- .suspend = ad714x_spi_suspend,
- .resume = ad714x_spi_resume,
};
static __init int ad714x_spi_init(void)
diff --git a/drivers/input/misc/adxl34x-i2c.c b/drivers/input/misc/adxl34x-i2c.c
index 0779724af7e7..ccacf2bb06a4 100644
--- a/drivers/input/misc/adxl34x-i2c.c
+++ b/drivers/input/misc/adxl34x-i2c.c
@@ -11,6 +11,7 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/pm.h>
#include "adxl34x.h"
static int adxl34x_smbus_read(struct device *dev, unsigned char reg)
@@ -105,8 +106,9 @@ static int __devexit adxl34x_i2c_remove(struct i2c_client *client)
}
#ifdef CONFIG_PM
-static int adxl34x_i2c_suspend(struct i2c_client *client, pm_message_t message)
+static int adxl34x_i2c_suspend(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct adxl34x *ac = i2c_get_clientdata(client);
adxl34x_suspend(ac);
@@ -114,19 +116,20 @@ static int adxl34x_i2c_suspend(struct i2c_client *client, pm_message_t message)
return 0;
}
-static int adxl34x_i2c_resume(struct i2c_client *client)
+static int adxl34x_i2c_resume(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct adxl34x *ac = i2c_get_clientdata(client);
adxl34x_resume(ac);
return 0;
}
-#else
-# define adxl34x_i2c_suspend NULL
-# define adxl34x_i2c_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(adxl34x_i2c_pm, adxl34x_i2c_suspend,
+ adxl34x_i2c_resume);
+
static const struct i2c_device_id adxl34x_id[] = {
{ "adxl34x", 0 },
{ }
@@ -138,11 +141,10 @@ static struct i2c_driver adxl34x_driver = {
.driver = {
.name = "adxl34x",
.owner = THIS_MODULE,
+ .pm = &adxl34x_i2c_pm,
},
.probe = adxl34x_i2c_probe,
.remove = __devexit_p(adxl34x_i2c_remove),
- .suspend = adxl34x_i2c_suspend,
- .resume = adxl34x_i2c_resume,
.id_table = adxl34x_id,
};
diff --git a/drivers/input/misc/adxl34x-spi.c b/drivers/input/misc/adxl34x-spi.c
index 782de9e89828..f29de22fdda0 100644
--- a/drivers/input/misc/adxl34x-spi.c
+++ b/drivers/input/misc/adxl34x-spi.c
@@ -10,6 +10,7 @@
#include <linux/input.h> /* BUS_SPI */
#include <linux/module.h>
#include <linux/spi/spi.h>
+#include <linux/pm.h>
#include <linux/types.h>
#include "adxl34x.h"
@@ -57,7 +58,7 @@ static int adxl34x_spi_read_block(struct device *dev,
return (status < 0) ? status : 0;
}
-static const struct adxl34x_bus_ops adx134x_spi_bops = {
+static const struct adxl34x_bus_ops adxl34x_spi_bops = {
.bustype = BUS_SPI,
.write = adxl34x_spi_write,
.read = adxl34x_spi_read,
@@ -76,7 +77,7 @@ static int __devinit adxl34x_spi_probe(struct spi_device *spi)
ac = adxl34x_probe(&spi->dev, spi->irq,
spi->max_speed_hz > MAX_FREQ_NO_FIFODELAY,
- &adx134x_spi_bops);
+ &adxl34x_spi_bops);
if (IS_ERR(ac))
return PTR_ERR(ac);
@@ -94,8 +95,9 @@ static int __devexit adxl34x_spi_remove(struct spi_device *spi)
}
#ifdef CONFIG_PM
-static int adxl34x_spi_suspend(struct spi_device *spi, pm_message_t message)
+static int adxl34x_spi_suspend(struct device *dev)
{
+ struct spi_device *spi = to_spi_device(dev);
struct adxl34x *ac = dev_get_drvdata(&spi->dev);
adxl34x_suspend(ac);
@@ -103,29 +105,29 @@ static int adxl34x_spi_suspend(struct spi_device *spi, pm_message_t message)
return 0;
}
-static int adxl34x_spi_resume(struct spi_device *spi)
+static int adxl34x_spi_resume(struct device *dev)
{
+ struct spi_device *spi = to_spi_device(dev);
struct adxl34x *ac = dev_get_drvdata(&spi->dev);
adxl34x_resume(ac);
return 0;
}
-#else
-# define adxl34x_spi_suspend NULL
-# define adxl34x_spi_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(adxl34x_spi_pm, adxl34x_spi_suspend,
+ adxl34x_spi_resume);
+
static struct spi_driver adxl34x_driver = {
.driver = {
.name = "adxl34x",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .pm = &adxl34x_spi_pm,
},
.probe = adxl34x_spi_probe,
.remove = __devexit_p(adxl34x_spi_remove),
- .suspend = adxl34x_spi_suspend,
- .resume = adxl34x_spi_resume,
};
static int __init adxl34x_spi_init(void)
diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
index 0b0e9be63542..9ccdb82d869a 100644
--- a/drivers/input/misc/ati_remote2.c
+++ b/drivers/input/misc/ati_remote2.c
@@ -612,8 +612,8 @@ static int ati_remote2_input_init(struct ati_remote2 *ar2)
idev->open = ati_remote2_open;
idev->close = ati_remote2_close;
- idev->getkeycode_new = ati_remote2_getkeycode;
- idev->setkeycode_new = ati_remote2_setkeycode;
+ idev->getkeycode = ati_remote2_getkeycode;
+ idev->setkeycode = ati_remote2_setkeycode;
idev->name = ar2->name;
idev->phys = ar2->phys;
diff --git a/drivers/input/misc/sparcspkr.c b/drivers/input/misc/sparcspkr.c
index 8e130bf7d32b..0122f5351577 100644
--- a/drivers/input/misc/sparcspkr.c
+++ b/drivers/input/misc/sparcspkr.c
@@ -173,18 +173,16 @@ static int __devinit sparcspkr_probe(struct device *dev)
return 0;
}
-static int sparcspkr_shutdown(struct platform_device *dev)
+static void sparcspkr_shutdown(struct platform_device *dev)
{
struct sparcspkr_state *state = dev_get_drvdata(&dev->dev);
struct input_dev *input_dev = state->input_dev;
/* turn off the speaker */
state->event(input_dev, EV_SND, SND_BELL, 0);
-
- return 0;
}
-static int __devinit bbc_beep_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit bbc_beep_probe(struct platform_device *op)
{
struct sparcspkr_state *state;
struct bbc_beep_info *info;
@@ -258,7 +256,7 @@ static const struct of_device_id bbc_beep_match[] = {
{},
};
-static struct of_platform_driver bbc_beep_driver = {
+static struct platform_driver bbc_beep_driver = {
.driver = {
.name = "bbcbeep",
.owner = THIS_MODULE,
@@ -269,7 +267,7 @@ static struct of_platform_driver bbc_beep_driver = {
.shutdown = sparcspkr_shutdown,
};
-static int __devinit grover_beep_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit grover_beep_probe(struct platform_device *op)
{
struct sparcspkr_state *state;
struct grover_beep_info *info;
@@ -340,7 +338,7 @@ static const struct of_device_id grover_beep_match[] = {
{},
};
-static struct of_platform_driver grover_beep_driver = {
+static struct platform_driver grover_beep_driver = {
.driver = {
.name = "groverbeep",
.owner = THIS_MODULE,
@@ -353,12 +351,12 @@ static struct of_platform_driver grover_beep_driver = {
static int __init sparcspkr_init(void)
{
- int err = of_register_platform_driver(&bbc_beep_driver);
+ int err = platform_driver_register(&bbc_beep_driver);
if (!err) {
- err = of_register_platform_driver(&grover_beep_driver);
+ err = platform_driver_register(&grover_beep_driver);
if (err)
- of_unregister_platform_driver(&bbc_beep_driver);
+ platform_driver_unregister(&bbc_beep_driver);
}
return err;
@@ -366,8 +364,8 @@ static int __init sparcspkr_init(void)
static void __exit sparcspkr_exit(void)
{
- of_unregister_platform_driver(&bbc_beep_driver);
- of_unregister_platform_driver(&grover_beep_driver);
+ platform_driver_unregister(&bbc_beep_driver);
+ platform_driver_unregister(&grover_beep_driver);
}
module_init(sparcspkr_init);
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index 014dd4ad0d4f..6a11694e3fc7 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -29,6 +29,7 @@
#include <linux/workqueue.h>
#include <linux/i2c/twl.h>
#include <linux/mfd/twl4030-codec.h>
+#include <linux/mfd/core.h>
#include <linux/input.h>
#include <linux/slab.h>
@@ -196,7 +197,7 @@ static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops,
static int __devinit twl4030_vibra_probe(struct platform_device *pdev)
{
- struct twl4030_codec_vibra_data *pdata = pdev->dev.platform_data;
+ struct twl4030_codec_vibra_data *pdata = mfd_get_data(pdev);
struct vibra_info *info;
int ret;
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 82542a1c1098..364bdf43a381 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -347,8 +347,7 @@ static int uinput_setup_device(struct uinput_device *udev, const char __user *bu
{
struct uinput_user_dev *user_dev;
struct input_dev *dev;
- char *name;
- int i, size;
+ int i;
int retval;
if (count != sizeof(struct uinput_user_dev))
@@ -362,30 +361,25 @@ static int uinput_setup_device(struct uinput_device *udev, const char __user *bu
dev = udev->dev;
- user_dev = kmalloc(sizeof(struct uinput_user_dev), GFP_KERNEL);
- if (!user_dev)
- return -ENOMEM;
-
- if (copy_from_user(user_dev, buffer, sizeof(struct uinput_user_dev))) {
- retval = -EFAULT;
- goto exit;
- }
+ user_dev = memdup_user(buffer, sizeof(struct uinput_user_dev));
+ if (IS_ERR(user_dev))
+ return PTR_ERR(user_dev);
udev->ff_effects_max = user_dev->ff_effects_max;
- size = strnlen(user_dev->name, UINPUT_MAX_NAME_SIZE) + 1;
- if (!size) {
+ /* Ensure name is filled in */
+ if (!user_dev->name[0]) {
retval = -EINVAL;
goto exit;
}
kfree(dev->name);
- dev->name = name = kmalloc(size, GFP_KERNEL);
- if (!name) {
+ dev->name = kstrndup(user_dev->name, UINPUT_MAX_NAME_SIZE,
+ GFP_KERNEL);
+ if (!dev->name) {
retval = -ENOMEM;
goto exit;
}
- strlcpy(name, user_dev->name, size);
dev->id.bustype = user_dev->id.bustype;
dev->id.vendor = user_dev->id.vendor;
@@ -622,7 +616,6 @@ static long uinput_ioctl_handler(struct file *file, unsigned int cmd,
struct uinput_ff_upload ff_up;
struct uinput_ff_erase ff_erase;
struct uinput_request *req;
- int length;
char *phys;
retval = mutex_lock_interruptible(&udev->mutex);
@@ -689,24 +682,15 @@ static long uinput_ioctl_handler(struct file *file, unsigned int cmd,
retval = -EINVAL;
goto out;
}
- length = strnlen_user(p, 1024);
- if (length <= 0) {
- retval = -EFAULT;
- break;
+
+ phys = strndup_user(p, 1024);
+ if (IS_ERR(phys)) {
+ retval = PTR_ERR(phys);
+ goto out;
}
+
kfree(udev->dev->phys);
- udev->dev->phys = phys = kmalloc(length, GFP_KERNEL);
- if (!phys) {
- retval = -ENOMEM;
- break;
- }
- if (copy_from_user(phys, p, length)) {
- udev->dev->phys = NULL;
- kfree(phys);
- retval = -EFAULT;
- break;
- }
- phys[length - 1] = '\0';
+ udev->dev->phys = phys;
break;
case UI_BEGIN_FF_UPLOAD:
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index ee82851afe3e..0efaf667a1c8 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -430,10 +430,6 @@ static int report_tp_state(struct bcm5974 *dev, int size)
ptest = int2bound(&c->p, raw_p);
origin = raw2int(f->origin);
- /* set the integrated button if applicable */
- if (c->tp_type == TYPE2)
- ibt = raw2int(dev->tp_data[BUTTON_TYPE2]);
-
/* while tracking finger still valid, count all fingers */
if (ptest > PRESSURE_LOW && origin) {
abs_p = ptest;
@@ -452,6 +448,10 @@ static int report_tp_state(struct bcm5974 *dev, int size)
}
}
+ /* set the integrated button if applicable */
+ if (c->tp_type == TYPE2)
+ ibt = raw2int(dev->tp_data[BUTTON_TYPE2]);
+
if (dev->fingers < nmin)
dev->fingers = nmin;
if (dev->fingers > nmax)
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index 25e5d042a72c..7453938bf5ef 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -51,6 +51,29 @@
#define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20)
#define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12)
#define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16)
+
+/*
+ * The following describes response for the 0x0c query.
+ *
+ * byte mask name meaning
+ * ---- ---- ------- ------------
+ * 1 0x01 adjustable threshold capacitive button sensitivity
+ * can be adjusted
+ * 1 0x02 report max query 0x0d gives max coord reported
+ * 1 0x04 clearpad sensor is ClearPad product
+ * 1 0x08 advanced gesture not particularly meaningful
+ * 1 0x10 clickpad bit 0 1-button ClickPad
+ * 1 0x60 multifinger mode identifies firmware finger counting
+ * (not reporting!) algorithm.
+ * Not particularly meaningful
+ * 1 0x80 covered pad W clipped to 14, 15 == pad mostly covered
+ * 2 0x01 clickpad bit 1 2-button ClickPad
+ * 2 0x02 deluxe LED controls touchpad support LED commands
+ * ala multimedia control bar
+ * 2 0x04 reduced filtering firmware does less filtering on
+ * position data, driver should watch
+ * for noise.
+ */
#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */
#define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */
#define SYN_CAP_MAX_DIMENSIONS(ex0c) ((ex0c) & 0x020000)
diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c
index 0ae62f0bcb32..f6aa26d305ed 100644
--- a/drivers/input/mouse/synaptics_i2c.c
+++ b/drivers/input/mouse/synaptics_i2c.c
@@ -18,6 +18,7 @@
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
+#include <linux/pm.h>
#define DRIVER_NAME "synaptics_i2c"
/* maximum product id is 15 characters */
@@ -619,8 +620,9 @@ static int __devexit synaptics_i2c_remove(struct i2c_client *client)
}
#ifdef CONFIG_PM
-static int synaptics_i2c_suspend(struct i2c_client *client, pm_message_t mesg)
+static int synaptics_i2c_suspend(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct synaptics_i2c *touch = i2c_get_clientdata(client);
cancel_delayed_work_sync(&touch->dwork);
@@ -631,9 +633,10 @@ static int synaptics_i2c_suspend(struct i2c_client *client, pm_message_t mesg)
return 0;
}
-static int synaptics_i2c_resume(struct i2c_client *client)
+static int synaptics_i2c_resume(struct device *dev)
{
int ret;
+ struct i2c_client *client = to_i2c_client(dev);
struct synaptics_i2c *touch = i2c_get_clientdata(client);
ret = synaptics_i2c_reset_config(client);
@@ -645,11 +648,11 @@ static int synaptics_i2c_resume(struct i2c_client *client)
return 0;
}
-#else
-#define synaptics_i2c_suspend NULL
-#define synaptics_i2c_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(synaptics_i2c_pm, synaptics_i2c_suspend,
+ synaptics_i2c_resume);
+
static const struct i2c_device_id synaptics_i2c_id_table[] = {
{ "synaptics_i2c", 0 },
{ },
@@ -660,13 +663,12 @@ static struct i2c_driver synaptics_i2c_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
+ .pm = &synaptics_i2c_pm,
},
.probe = synaptics_i2c_probe,
.remove = __devexit_p(synaptics_i2c_remove),
- .suspend = synaptics_i2c_suspend,
- .resume = synaptics_i2c_resume,
.id_table = synaptics_i2c_id_table,
};
diff --git a/drivers/input/serio/altera_ps2.c b/drivers/input/serio/altera_ps2.c
index 7998560a1904..d363dc4571a3 100644
--- a/drivers/input/serio/altera_ps2.c
+++ b/drivers/input/serio/altera_ps2.c
@@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/of.h>
#define DRV_NAME "altera_ps2"
@@ -173,6 +174,16 @@ static int __devexit altera_ps2_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id altera_ps2_match[] = {
+ { .compatible = "ALTR,ps2-1.0", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, altera_ps2_match);
+#else /* CONFIG_OF */
+#define altera_ps2_match NULL
+#endif /* CONFIG_OF */
+
/*
* Our device driver structure
*/
@@ -182,6 +193,7 @@ static struct platform_driver altera_ps2_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
+ .of_match_table = altera_ps2_match,
},
};
@@ -189,13 +201,12 @@ static int __init altera_ps2_init(void)
{
return platform_driver_register(&altera_ps2_driver);
}
+module_init(altera_ps2_init);
static void __exit altera_ps2_exit(void)
{
platform_driver_unregister(&altera_ps2_driver);
}
-
-module_init(altera_ps2_init);
module_exit(altera_ps2_exit);
MODULE_DESCRIPTION("Altera University Program PS2 controller driver");
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 92563a681d65..12abc50508e5 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -107,7 +107,8 @@ static void amba_kmi_close(struct serio *io)
clk_disable(kmi->clk);
}
-static int __devinit amba_kmi_probe(struct amba_device *dev, struct amba_id *id)
+static int __devinit amba_kmi_probe(struct amba_device *dev,
+ const struct amba_id *id)
{
struct amba_kmi_port *kmi;
struct serio *io;
diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h
index c5cc4508d6df..395a9af3adcd 100644
--- a/drivers/input/serio/i8042-sparcio.h
+++ b/drivers/input/serio/i8042-sparcio.h
@@ -49,7 +49,7 @@ static inline void i8042_write_command(int val)
#define OBP_PS2MS_NAME1 "kdmouse"
#define OBP_PS2MS_NAME2 "mouse"
-static int __devinit sparc_i8042_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit sparc_i8042_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
@@ -95,7 +95,7 @@ static const struct of_device_id sparc_i8042_match[] = {
};
MODULE_DEVICE_TABLE(of, sparc_i8042_match);
-static struct of_platform_driver sparc_i8042_driver = {
+static struct platform_driver sparc_i8042_driver = {
.driver = {
.name = "i8042",
.owner = THIS_MODULE,
@@ -116,7 +116,7 @@ static int __init i8042_platform_init(void)
if (!kbd_iobase)
return -ENODEV;
} else {
- int err = of_register_platform_driver(&sparc_i8042_driver);
+ int err = platform_driver_register(&sparc_i8042_driver);
if (err)
return err;
@@ -140,7 +140,7 @@ static inline void i8042_platform_exit(void)
struct device_node *root = of_find_node_by_path("/");
if (strcmp(root->name, "SUNW,JavaStation-1"))
- of_unregister_platform_driver(&sparc_i8042_driver);
+ platform_driver_unregister(&sparc_i8042_driver);
}
#else /* !CONFIG_PCI */
diff --git a/drivers/input/serio/i8042-unicore32io.h b/drivers/input/serio/i8042-unicore32io.h
new file mode 100644
index 000000000000..73f5cc124a36
--- /dev/null
+++ b/drivers/input/serio/i8042-unicore32io.h
@@ -0,0 +1,73 @@
+/*
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2011 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _I8042_UNICORE32_H
+#define _I8042_UNICORE32_H
+
+#include <mach/hardware.h>
+
+/*
+ * Names.
+ */
+#define I8042_KBD_PHYS_DESC "isa0060/serio0"
+#define I8042_AUX_PHYS_DESC "isa0060/serio1"
+#define I8042_MUX_PHYS_DESC "isa0060/serio%d"
+
+/*
+ * IRQs.
+ */
+#define I8042_KBD_IRQ IRQ_PS2_KBD
+#define I8042_AUX_IRQ IRQ_PS2_AUX
+
+/*
+ * Register numbers.
+ */
+#define I8042_COMMAND_REG PS2_COMMAND
+#define I8042_STATUS_REG PS2_STATUS
+#define I8042_DATA_REG PS2_DATA
+
+#define I8042_REGION_START (resource_size_t)(PS2_DATA)
+#define I8042_REGION_SIZE (resource_size_t)(16)
+
+static inline int i8042_read_data(void)
+{
+ return readb(I8042_DATA_REG);
+}
+
+static inline int i8042_read_status(void)
+{
+ return readb(I8042_STATUS_REG);
+}
+
+static inline void i8042_write_data(int val)
+{
+ writeb(val, I8042_DATA_REG);
+}
+
+static inline void i8042_write_command(int val)
+{
+ writeb(val, I8042_COMMAND_REG);
+}
+
+static inline int i8042_platform_init(void)
+{
+ if (!request_mem_region(I8042_REGION_START, I8042_REGION_SIZE, "i8042"))
+ return -EBUSY;
+
+ i8042_reset = 1;
+ return 0;
+}
+
+static inline void i8042_platform_exit(void)
+{
+ release_mem_region(I8042_REGION_START, I8042_REGION_SIZE);
+}
+
+#endif /* _I8042_UNICORE32_H */
diff --git a/drivers/input/serio/i8042.h b/drivers/input/serio/i8042.h
index ac1d759d0f55..3452708fbe3b 100644
--- a/drivers/input/serio/i8042.h
+++ b/drivers/input/serio/i8042.h
@@ -26,6 +26,8 @@
#include "i8042-sparcio.h"
#elif defined(CONFIG_X86) || defined(CONFIG_IA64)
#include "i8042-x86ia64io.h"
+#elif defined(CONFIG_UNICORE32)
+#include "i8042-unicore32io.h"
#else
#include "i8042-io.h"
#endif
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 7c38d1fbabf2..ba70058e2be3 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -299,7 +299,7 @@ static int serio_queue_event(void *object, struct module *owner,
event->owner = owner;
list_add_tail(&event->node, &serio_event_list);
- schedule_work(&serio_event_work);
+ queue_work(system_long_wq, &serio_event_work);
out:
spin_unlock_irqrestore(&serio_event_lock, flags);
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c
index bb14449fb022..7540bafc95cf 100644
--- a/drivers/input/serio/xilinx_ps2.c
+++ b/drivers/input/serio/xilinx_ps2.c
@@ -232,8 +232,7 @@ static void sxps2_close(struct serio *pserio)
* It returns 0, if the driver is bound to the PS/2 device, or a negative
* value if there is an error.
*/
-static int __devinit xps2_of_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit xps2_of_probe(struct platform_device *ofdev)
{
struct resource r_irq; /* Interrupt resources */
struct resource r_mem; /* IO mem resources */
@@ -361,7 +360,7 @@ static const struct of_device_id xps2_of_match[] __devinitconst = {
};
MODULE_DEVICE_TABLE(of, xps2_of_match);
-static struct of_platform_driver xps2_of_driver = {
+static struct platform_driver xps2_of_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
@@ -373,12 +372,12 @@ static struct of_platform_driver xps2_of_driver = {
static int __init xps2_init(void)
{
- return of_register_platform_driver(&xps2_of_driver);
+ return platform_driver_register(&xps2_of_driver);
}
static void __exit xps2_cleanup(void)
{
- of_unregister_platform_driver(&xps2_of_driver);
+ platform_driver_unregister(&xps2_of_driver);
}
module_init(xps2_init);
diff --git a/drivers/input/sparse-keymap.c b/drivers/input/sparse-keymap.c
index 7729e547ba65..337bf51bc984 100644
--- a/drivers/input/sparse-keymap.c
+++ b/drivers/input/sparse-keymap.c
@@ -210,8 +210,8 @@ int sparse_keymap_setup(struct input_dev *dev,
dev->keycode = map;
dev->keycodemax = map_size;
- dev->getkeycode_new = sparse_keymap_getkeycode;
- dev->setkeycode_new = sparse_keymap_setkeycode;
+ dev->getkeycode = sparse_keymap_getkeycode;
+ dev->setkeycode = sparse_keymap_setkeycode;
return 0;
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 61834ae282e1..2fbd3d39b7eb 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -86,6 +86,18 @@ config TOUCHSCREEN_AD7879_SPI
To compile this driver as a module, choose M here: the
module will be called ad7879-spi.
+config TOUCHSCREEN_ATMEL_MXT
+ tristate "Atmel mXT I2C Touchscreen"
+ depends on I2C
+ help
+ Say Y here if you have Atmel mXT series I2C touchscreen,
+ such as AT42QT602240/ATMXT224, connected to your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called atmel_mxt_ts.
+
config TOUCHSCREEN_BITSY
tristate "Compaq iPAQ H3600 (Bitsy) touchscreen"
depends on SA1100_BITSY
@@ -339,18 +351,6 @@ config TOUCHSCREEN_PENMOUNT
To compile this driver as a module, choose M here: the
module will be called penmount.
-config TOUCHSCREEN_QT602240
- tristate "QT602240 I2C Touchscreen"
- depends on I2C
- help
- Say Y here if you have the AT42QT602240/ATMXT224 I2C touchscreen
- connected to your system.
-
- If unsure, say N.
-
- To compile this driver as a module, choose M here: the
- module will be called qt602240_ts.
-
config TOUCHSCREEN_MIGOR
tristate "Renesas MIGO-R touchscreen"
depends on SH_MIGOR && I2C
@@ -423,6 +423,16 @@ config TOUCHSCREEN_UCB1400
To compile this driver as a module, choose M here: the
module will be called ucb1400_ts.
+config TOUCHSCREEN_WM831X
+ tristate "Support for WM831x touchscreen controllers"
+ depends on MFD_WM831X
+ help
+ This enables support for the touchscreen controller on the WM831x
+ series of PMICs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called wm831x-ts.
+
config TOUCHSCREEN_WM97XX
tristate "Support for WM97xx AC97 touchscreen controllers"
depends on AC97_BUS
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 718bcc814952..756156409b83 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_TOUCHSCREEN_AD7879) += ad7879.o
obj-$(CONFIG_TOUCHSCREEN_AD7879_I2C) += ad7879-i2c.o
obj-$(CONFIG_TOUCHSCREEN_AD7879_SPI) += ad7879-spi.o
obj-$(CONFIG_TOUCHSCREEN_ADS7846) += ads7846.o
+obj-$(CONFIG_TOUCHSCREEN_ATMEL_MXT) += atmel_mxt_ts.o
obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) += atmel_tsadcc.o
obj-$(CONFIG_TOUCHSCREEN_BITSY) += h3600_ts_input.o
obj-$(CONFIG_TOUCHSCREEN_BU21013) += bu21013_ts.o
@@ -37,7 +38,6 @@ obj-$(CONFIG_TOUCHSCREEN_HTCPEN) += htcpen.o
obj-$(CONFIG_TOUCHSCREEN_USB_COMPOSITE) += usbtouchscreen.o
obj-$(CONFIG_TOUCHSCREEN_PCAP) += pcap_ts.o
obj-$(CONFIG_TOUCHSCREEN_PENMOUNT) += penmount.o
-obj-$(CONFIG_TOUCHSCREEN_QT602240) += qt602240_ts.o
obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o
obj-$(CONFIG_TOUCHSCREEN_ST1232) += st1232.o
obj-$(CONFIG_TOUCHSCREEN_STMPE) += stmpe-ts.o
@@ -48,6 +48,7 @@ obj-$(CONFIG_TOUCHSCREEN_TOUCHWIN) += touchwin.o
obj-$(CONFIG_TOUCHSCREEN_TSC2007) += tsc2007.o
obj-$(CONFIG_TOUCHSCREEN_UCB1400) += ucb1400_ts.o
obj-$(CONFIG_TOUCHSCREEN_WACOM_W8001) += wacom_w8001.o
+obj-$(CONFIG_TOUCHSCREEN_WM831X) += wm831x-ts.o
obj-$(CONFIG_TOUCHSCREEN_WM97XX) += wm97xx-ts.o
wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9705) += wm9705.o
wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9712) += wm9712.o
diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c
index a1952fcc083e..714d4e0f9f95 100644
--- a/drivers/input/touchscreen/ad7877.c
+++ b/drivers/input/touchscreen/ad7877.c
@@ -41,6 +41,7 @@
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/interrupt.h>
+#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/spi/ad7877.h>
@@ -826,39 +827,37 @@ static int __devexit ad7877_remove(struct spi_device *spi)
return 0;
}
-#ifdef CONFIG_PM
-static int ad7877_suspend(struct spi_device *spi, pm_message_t message)
+#ifdef CONFIG_PM_SLEEP
+static int ad7877_suspend(struct device *dev)
{
- struct ad7877 *ts = dev_get_drvdata(&spi->dev);
+ struct ad7877 *ts = dev_get_drvdata(dev);
ad7877_disable(ts);
return 0;
}
-static int ad7877_resume(struct spi_device *spi)
+static int ad7877_resume(struct device *dev)
{
- struct ad7877 *ts = dev_get_drvdata(&spi->dev);
+ struct ad7877 *ts = dev_get_drvdata(dev);
ad7877_enable(ts);
return 0;
}
-#else
-#define ad7877_suspend NULL
-#define ad7877_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(ad7877_pm, ad7877_suspend, ad7877_resume);
+
static struct spi_driver ad7877_driver = {
.driver = {
.name = "ad7877",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .pm = &ad7877_pm,
},
.probe = ad7877_probe,
.remove = __devexit_p(ad7877_remove),
- .suspend = ad7877_suspend,
- .resume = ad7877_resume,
};
static int __init ad7877_init(void)
diff --git a/drivers/input/touchscreen/ad7879-spi.c b/drivers/input/touchscreen/ad7879-spi.c
index 59c6e68c4325..ddf732f3cafc 100644
--- a/drivers/input/touchscreen/ad7879-spi.c
+++ b/drivers/input/touchscreen/ad7879-spi.c
@@ -7,6 +7,7 @@
*/
#include <linux/input.h> /* BUS_SPI */
+#include <linux/pm.h>
#include <linux/spi/spi.h>
#include "ad7879.h"
@@ -20,9 +21,10 @@
#define AD7879_WRITECMD(reg) (AD7879_CMD(reg))
#define AD7879_READCMD(reg) (AD7879_CMD(reg) | AD7879_CMD_READ)
-#ifdef CONFIG_PM
-static int ad7879_spi_suspend(struct spi_device *spi, pm_message_t message)
+#ifdef CONFIG_PM_SLEEP
+static int ad7879_spi_suspend(struct device *dev)
{
+ struct spi_device *spi = to_spi_device(dev);
struct ad7879 *ts = spi_get_drvdata(spi);
ad7879_suspend(ts);
@@ -30,19 +32,19 @@ static int ad7879_spi_suspend(struct spi_device *spi, pm_message_t message)
return 0;
}
-static int ad7879_spi_resume(struct spi_device *spi)
+static int ad7879_spi_resume(struct device *dev)
{
+ struct spi_device *spi = to_spi_device(dev);
struct ad7879 *ts = spi_get_drvdata(spi);
ad7879_resume(ts);
return 0;
}
-#else
-# define ad7879_spi_suspend NULL
-# define ad7879_spi_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(ad7879_spi_pm, ad7879_spi_suspend, ad7879_spi_resume);
+
/*
* ad7879_read/write are only used for initial setup and for sysfs controls.
* The main traffic is done in ad7879_collect().
@@ -173,11 +175,10 @@ static struct spi_driver ad7879_spi_driver = {
.name = "ad7879",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .pm = &ad7879_spi_pm,
},
.probe = ad7879_spi_probe,
.remove = __devexit_p(ad7879_spi_remove),
- .suspend = ad7879_spi_suspend,
- .resume = ad7879_spi_resume,
};
static int __init ad7879_spi_init(void)
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 4bf2316e3284..c24946f51256 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -26,6 +26,7 @@
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
+#include <linux/pm.h>
#include <linux/gpio.h>
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
@@ -892,9 +893,10 @@ static irqreturn_t ads7846_irq(int irq, void *handle)
return IRQ_HANDLED;
}
-static int ads7846_suspend(struct spi_device *spi, pm_message_t message)
+#ifdef CONFIG_PM_SLEEP
+static int ads7846_suspend(struct device *dev)
{
- struct ads7846 *ts = dev_get_drvdata(&spi->dev);
+ struct ads7846 *ts = dev_get_drvdata(dev);
mutex_lock(&ts->lock);
@@ -914,9 +916,9 @@ static int ads7846_suspend(struct spi_device *spi, pm_message_t message)
return 0;
}
-static int ads7846_resume(struct spi_device *spi)
+static int ads7846_resume(struct device *dev)
{
- struct ads7846 *ts = dev_get_drvdata(&spi->dev);
+ struct ads7846 *ts = dev_get_drvdata(dev);
mutex_lock(&ts->lock);
@@ -935,6 +937,9 @@ static int ads7846_resume(struct spi_device *spi)
return 0;
}
+#endif
+
+static SIMPLE_DEV_PM_OPS(ads7846_pm, ads7846_suspend, ads7846_resume);
static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads7846 *ts)
{
@@ -1408,11 +1413,10 @@ static struct spi_driver ads7846_driver = {
.name = "ads7846",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .pm = &ads7846_pm,
},
.probe = ads7846_probe,
.remove = __devexit_p(ads7846_remove),
- .suspend = ads7846_suspend,
- .resume = ads7846_resume,
};
static int __init ads7846_init(void)
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
new file mode 100644
index 000000000000..6264ba80c38c
--- /dev/null
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -0,0 +1,1214 @@
+/*
+ * Atmel maXTouch Touchscreen driver
+ *
+ * Copyright (C) 2010 Samsung Electronics Co.Ltd
+ * Author: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/i2c/atmel_mxt_ts.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+
+/* Version */
+#define MXT_VER_20 20
+#define MXT_VER_21 21
+#define MXT_VER_22 22
+
+/* Slave addresses */
+#define MXT_APP_LOW 0x4a
+#define MXT_APP_HIGH 0x4b
+#define MXT_BOOT_LOW 0x24
+#define MXT_BOOT_HIGH 0x25
+
+/* Firmware */
+#define MXT_FW_NAME "maxtouch.fw"
+
+/* Registers */
+#define MXT_FAMILY_ID 0x00
+#define MXT_VARIANT_ID 0x01
+#define MXT_VERSION 0x02
+#define MXT_BUILD 0x03
+#define MXT_MATRIX_X_SIZE 0x04
+#define MXT_MATRIX_Y_SIZE 0x05
+#define MXT_OBJECT_NUM 0x06
+#define MXT_OBJECT_START 0x07
+
+#define MXT_OBJECT_SIZE 6
+
+/* Object types */
+#define MXT_DEBUG_DIAGNOSTIC 37
+#define MXT_GEN_MESSAGE 5
+#define MXT_GEN_COMMAND 6
+#define MXT_GEN_POWER 7
+#define MXT_GEN_ACQUIRE 8
+#define MXT_TOUCH_MULTI 9
+#define MXT_TOUCH_KEYARRAY 15
+#define MXT_TOUCH_PROXIMITY 23
+#define MXT_PROCI_GRIPFACE 20
+#define MXT_PROCG_NOISE 22
+#define MXT_PROCI_ONETOUCH 24
+#define MXT_PROCI_TWOTOUCH 27
+#define MXT_SPT_COMMSCONFIG 18 /* firmware ver 21 over */
+#define MXT_SPT_GPIOPWM 19
+#define MXT_SPT_SELFTEST 25
+#define MXT_SPT_CTECONFIG 28
+#define MXT_SPT_USERDATA 38 /* firmware ver 21 over */
+
+/* MXT_GEN_COMMAND field */
+#define MXT_COMMAND_RESET 0
+#define MXT_COMMAND_BACKUPNV 1
+#define MXT_COMMAND_CALIBRATE 2
+#define MXT_COMMAND_REPORTALL 3
+#define MXT_COMMAND_DIAGNOSTIC 5
+
+/* MXT_GEN_POWER field */
+#define MXT_POWER_IDLEACQINT 0
+#define MXT_POWER_ACTVACQINT 1
+#define MXT_POWER_ACTV2IDLETO 2
+
+/* MXT_GEN_ACQUIRE field */
+#define MXT_ACQUIRE_CHRGTIME 0
+#define MXT_ACQUIRE_TCHDRIFT 2
+#define MXT_ACQUIRE_DRIFTST 3
+#define MXT_ACQUIRE_TCHAUTOCAL 4
+#define MXT_ACQUIRE_SYNC 5
+#define MXT_ACQUIRE_ATCHCALST 6
+#define MXT_ACQUIRE_ATCHCALSTHR 7
+
+/* MXT_TOUCH_MULTI field */
+#define MXT_TOUCH_CTRL 0
+#define MXT_TOUCH_XORIGIN 1
+#define MXT_TOUCH_YORIGIN 2
+#define MXT_TOUCH_XSIZE 3
+#define MXT_TOUCH_YSIZE 4
+#define MXT_TOUCH_BLEN 6
+#define MXT_TOUCH_TCHTHR 7
+#define MXT_TOUCH_TCHDI 8
+#define MXT_TOUCH_ORIENT 9
+#define MXT_TOUCH_MOVHYSTI 11
+#define MXT_TOUCH_MOVHYSTN 12
+#define MXT_TOUCH_NUMTOUCH 14
+#define MXT_TOUCH_MRGHYST 15
+#define MXT_TOUCH_MRGTHR 16
+#define MXT_TOUCH_AMPHYST 17
+#define MXT_TOUCH_XRANGE_LSB 18
+#define MXT_TOUCH_XRANGE_MSB 19
+#define MXT_TOUCH_YRANGE_LSB 20
+#define MXT_TOUCH_YRANGE_MSB 21
+#define MXT_TOUCH_XLOCLIP 22
+#define MXT_TOUCH_XHICLIP 23
+#define MXT_TOUCH_YLOCLIP 24
+#define MXT_TOUCH_YHICLIP 25
+#define MXT_TOUCH_XEDGECTRL 26
+#define MXT_TOUCH_XEDGEDIST 27
+#define MXT_TOUCH_YEDGECTRL 28
+#define MXT_TOUCH_YEDGEDIST 29
+#define MXT_TOUCH_JUMPLIMIT 30 /* firmware ver 22 over */
+
+/* MXT_PROCI_GRIPFACE field */
+#define MXT_GRIPFACE_CTRL 0
+#define MXT_GRIPFACE_XLOGRIP 1
+#define MXT_GRIPFACE_XHIGRIP 2
+#define MXT_GRIPFACE_YLOGRIP 3
+#define MXT_GRIPFACE_YHIGRIP 4
+#define MXT_GRIPFACE_MAXTCHS 5
+#define MXT_GRIPFACE_SZTHR1 7
+#define MXT_GRIPFACE_SZTHR2 8
+#define MXT_GRIPFACE_SHPTHR1 9
+#define MXT_GRIPFACE_SHPTHR2 10
+#define MXT_GRIPFACE_SUPEXTTO 11
+
+/* MXT_PROCI_NOISE field */
+#define MXT_NOISE_CTRL 0
+#define MXT_NOISE_OUTFLEN 1
+#define MXT_NOISE_GCAFUL_LSB 3
+#define MXT_NOISE_GCAFUL_MSB 4
+#define MXT_NOISE_GCAFLL_LSB 5
+#define MXT_NOISE_GCAFLL_MSB 6
+#define MXT_NOISE_ACTVGCAFVALID 7
+#define MXT_NOISE_NOISETHR 8
+#define MXT_NOISE_FREQHOPSCALE 10
+#define MXT_NOISE_FREQ0 11
+#define MXT_NOISE_FREQ1 12
+#define MXT_NOISE_FREQ2 13
+#define MXT_NOISE_FREQ3 14
+#define MXT_NOISE_FREQ4 15
+#define MXT_NOISE_IDLEGCAFVALID 16
+
+/* MXT_SPT_COMMSCONFIG */
+#define MXT_COMMS_CTRL 0
+#define MXT_COMMS_CMD 1
+
+/* MXT_SPT_CTECONFIG field */
+#define MXT_CTE_CTRL 0
+#define MXT_CTE_CMD 1
+#define MXT_CTE_MODE 2
+#define MXT_CTE_IDLEGCAFDEPTH 3
+#define MXT_CTE_ACTVGCAFDEPTH 4
+#define MXT_CTE_VOLTAGE 5 /* firmware ver 21 over */
+
+#define MXT_VOLTAGE_DEFAULT 2700000
+#define MXT_VOLTAGE_STEP 10000
+
+/* Define for MXT_GEN_COMMAND */
+#define MXT_BOOT_VALUE 0xa5
+#define MXT_BACKUP_VALUE 0x55
+#define MXT_BACKUP_TIME 25 /* msec */
+#define MXT_RESET_TIME 65 /* msec */
+
+#define MXT_FWRESET_TIME 175 /* msec */
+
+/* Command to unlock bootloader */
+#define MXT_UNLOCK_CMD_MSB 0xaa
+#define MXT_UNLOCK_CMD_LSB 0xdc
+
+/* Bootloader mode status */
+#define MXT_WAITING_BOOTLOAD_CMD 0xc0 /* valid 7 6 bit only */
+#define MXT_WAITING_FRAME_DATA 0x80 /* valid 7 6 bit only */
+#define MXT_FRAME_CRC_CHECK 0x02
+#define MXT_FRAME_CRC_FAIL 0x03
+#define MXT_FRAME_CRC_PASS 0x04
+#define MXT_APP_CRC_FAIL 0x40 /* valid 7 8 bit only */
+#define MXT_BOOT_STATUS_MASK 0x3f
+
+/* Touch status */
+#define MXT_SUPPRESS (1 << 1)
+#define MXT_AMP (1 << 2)
+#define MXT_VECTOR (1 << 3)
+#define MXT_MOVE (1 << 4)
+#define MXT_RELEASE (1 << 5)
+#define MXT_PRESS (1 << 6)
+#define MXT_DETECT (1 << 7)
+
+/* Touchscreen absolute values */
+#define MXT_MAX_XC 0x3ff
+#define MXT_MAX_YC 0x3ff
+#define MXT_MAX_AREA 0xff
+
+#define MXT_MAX_FINGER 10
+
+struct mxt_info {
+ u8 family_id;
+ u8 variant_id;
+ u8 version;
+ u8 build;
+ u8 matrix_xsize;
+ u8 matrix_ysize;
+ u8 object_num;
+};
+
+struct mxt_object {
+ u8 type;
+ u16 start_address;
+ u8 size;
+ u8 instances;
+ u8 num_report_ids;
+
+ /* to map object and message */
+ u8 max_reportid;
+};
+
+struct mxt_message {
+ u8 reportid;
+ u8 message[7];
+ u8 checksum;
+};
+
+struct mxt_finger {
+ int status;
+ int x;
+ int y;
+ int area;
+};
+
+/* Each client has this additional data */
+struct mxt_data {
+ struct i2c_client *client;
+ struct input_dev *input_dev;
+ const struct mxt_platform_data *pdata;
+ struct mxt_object *object_table;
+ struct mxt_info info;
+ struct mxt_finger finger[MXT_MAX_FINGER];
+ unsigned int irq;
+};
+
+static bool mxt_object_readable(unsigned int type)
+{
+ switch (type) {
+ case MXT_GEN_MESSAGE:
+ case MXT_GEN_COMMAND:
+ case MXT_GEN_POWER:
+ case MXT_GEN_ACQUIRE:
+ case MXT_TOUCH_MULTI:
+ case MXT_TOUCH_KEYARRAY:
+ case MXT_TOUCH_PROXIMITY:
+ case MXT_PROCI_GRIPFACE:
+ case MXT_PROCG_NOISE:
+ case MXT_PROCI_ONETOUCH:
+ case MXT_PROCI_TWOTOUCH:
+ case MXT_SPT_COMMSCONFIG:
+ case MXT_SPT_GPIOPWM:
+ case MXT_SPT_SELFTEST:
+ case MXT_SPT_CTECONFIG:
+ case MXT_SPT_USERDATA:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool mxt_object_writable(unsigned int type)
+{
+ switch (type) {
+ case MXT_GEN_COMMAND:
+ case MXT_GEN_POWER:
+ case MXT_GEN_ACQUIRE:
+ case MXT_TOUCH_MULTI:
+ case MXT_TOUCH_KEYARRAY:
+ case MXT_TOUCH_PROXIMITY:
+ case MXT_PROCI_GRIPFACE:
+ case MXT_PROCG_NOISE:
+ case MXT_PROCI_ONETOUCH:
+ case MXT_PROCI_TWOTOUCH:
+ case MXT_SPT_GPIOPWM:
+ case MXT_SPT_SELFTEST:
+ case MXT_SPT_CTECONFIG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void mxt_dump_message(struct device *dev,
+ struct mxt_message *message)
+{
+ dev_dbg(dev, "reportid:\t0x%x\n", message->reportid);
+ dev_dbg(dev, "message1:\t0x%x\n", message->message[0]);
+ dev_dbg(dev, "message2:\t0x%x\n", message->message[1]);
+ dev_dbg(dev, "message3:\t0x%x\n", message->message[2]);
+ dev_dbg(dev, "message4:\t0x%x\n", message->message[3]);
+ dev_dbg(dev, "message5:\t0x%x\n", message->message[4]);
+ dev_dbg(dev, "message6:\t0x%x\n", message->message[5]);
+ dev_dbg(dev, "message7:\t0x%x\n", message->message[6]);
+ dev_dbg(dev, "checksum:\t0x%x\n", message->checksum);
+}
+
+static int mxt_check_bootloader(struct i2c_client *client,
+ unsigned int state)
+{
+ u8 val;
+
+recheck:
+ if (i2c_master_recv(client, &val, 1) != 1) {
+ dev_err(&client->dev, "%s: i2c recv failed\n", __func__);
+ return -EIO;
+ }
+
+ switch (state) {
+ case MXT_WAITING_BOOTLOAD_CMD:
+ case MXT_WAITING_FRAME_DATA:
+ val &= ~MXT_BOOT_STATUS_MASK;
+ break;
+ case MXT_FRAME_CRC_PASS:
+ if (val == MXT_FRAME_CRC_CHECK)
+ goto recheck;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (val != state) {
+ dev_err(&client->dev, "Unvalid bootloader mode state\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mxt_unlock_bootloader(struct i2c_client *client)
+{
+ u8 buf[2];
+
+ buf[0] = MXT_UNLOCK_CMD_LSB;
+ buf[1] = MXT_UNLOCK_CMD_MSB;
+
+ if (i2c_master_send(client, buf, 2) != 2) {
+ dev_err(&client->dev, "%s: i2c send failed\n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mxt_fw_write(struct i2c_client *client,
+ const u8 *data, unsigned int frame_size)
+{
+ if (i2c_master_send(client, data, frame_size) != frame_size) {
+ dev_err(&client->dev, "%s: i2c send failed\n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int __mxt_read_reg(struct i2c_client *client,
+ u16 reg, u16 len, void *val)
+{
+ struct i2c_msg xfer[2];
+ u8 buf[2];
+
+ buf[0] = reg & 0xff;
+ buf[1] = (reg >> 8) & 0xff;
+
+ /* Write register */
+ xfer[0].addr = client->addr;
+ xfer[0].flags = 0;
+ xfer[0].len = 2;
+ xfer[0].buf = buf;
+
+ /* Read data */
+ xfer[1].addr = client->addr;
+ xfer[1].flags = I2C_M_RD;
+ xfer[1].len = len;
+ xfer[1].buf = val;
+
+ if (i2c_transfer(client->adapter, xfer, 2) != 2) {
+ dev_err(&client->dev, "%s: i2c transfer failed\n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mxt_read_reg(struct i2c_client *client, u16 reg, u8 *val)
+{
+ return __mxt_read_reg(client, reg, 1, val);
+}
+
+static int mxt_write_reg(struct i2c_client *client, u16 reg, u8 val)
+{
+ u8 buf[3];
+
+ buf[0] = reg & 0xff;
+ buf[1] = (reg >> 8) & 0xff;
+ buf[2] = val;
+
+ if (i2c_master_send(client, buf, 3) != 3) {
+ dev_err(&client->dev, "%s: i2c send failed\n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mxt_read_object_table(struct i2c_client *client,
+ u16 reg, u8 *object_buf)
+{
+ return __mxt_read_reg(client, reg, MXT_OBJECT_SIZE,
+ object_buf);
+}
+
+static struct mxt_object *
+mxt_get_object(struct mxt_data *data, u8 type)
+{
+ struct mxt_object *object;
+ int i;
+
+ for (i = 0; i < data->info.object_num; i++) {
+ object = data->object_table + i;
+ if (object->type == type)
+ return object;
+ }
+
+ dev_err(&data->client->dev, "Invalid object type\n");
+ return NULL;
+}
+
+static int mxt_read_message(struct mxt_data *data,
+ struct mxt_message *message)
+{
+ struct mxt_object *object;
+ u16 reg;
+
+ object = mxt_get_object(data, MXT_GEN_MESSAGE);
+ if (!object)
+ return -EINVAL;
+
+ reg = object->start_address;
+ return __mxt_read_reg(data->client, reg,
+ sizeof(struct mxt_message), message);
+}
+
+static int mxt_read_object(struct mxt_data *data,
+ u8 type, u8 offset, u8 *val)
+{
+ struct mxt_object *object;
+ u16 reg;
+
+ object = mxt_get_object(data, type);
+ if (!object)
+ return -EINVAL;
+
+ reg = object->start_address;
+ return __mxt_read_reg(data->client, reg + offset, 1, val);
+}
+
+static int mxt_write_object(struct mxt_data *data,
+ u8 type, u8 offset, u8 val)
+{
+ struct mxt_object *object;
+ u16 reg;
+
+ object = mxt_get_object(data, type);
+ if (!object)
+ return -EINVAL;
+
+ reg = object->start_address;
+ return mxt_write_reg(data->client, reg + offset, val);
+}
+
+static void mxt_input_report(struct mxt_data *data, int single_id)
+{
+ struct mxt_finger *finger = data->finger;
+ struct input_dev *input_dev = data->input_dev;
+ int status = finger[single_id].status;
+ int finger_num = 0;
+ int id;
+
+ for (id = 0; id < MXT_MAX_FINGER; id++) {
+ if (!finger[id].status)
+ continue;
+
+ input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR,
+ finger[id].status != MXT_RELEASE ?
+ finger[id].area : 0);
+ input_report_abs(input_dev, ABS_MT_POSITION_X,
+ finger[id].x);
+ input_report_abs(input_dev, ABS_MT_POSITION_Y,
+ finger[id].y);
+ input_mt_sync(input_dev);
+
+ if (finger[id].status == MXT_RELEASE)
+ finger[id].status = 0;
+ else
+ finger_num++;
+ }
+
+ input_report_key(input_dev, BTN_TOUCH, finger_num > 0);
+
+ if (status != MXT_RELEASE) {
+ input_report_abs(input_dev, ABS_X, finger[single_id].x);
+ input_report_abs(input_dev, ABS_Y, finger[single_id].y);
+ }
+
+ input_sync(input_dev);
+}
+
+static void mxt_input_touchevent(struct mxt_data *data,
+ struct mxt_message *message, int id)
+{
+ struct mxt_finger *finger = data->finger;
+ struct device *dev = &data->client->dev;
+ u8 status = message->message[0];
+ int x;
+ int y;
+ int area;
+
+ /* Check the touch is present on the screen */
+ if (!(status & MXT_DETECT)) {
+ if (status & MXT_RELEASE) {
+ dev_dbg(dev, "[%d] released\n", id);
+
+ finger[id].status = MXT_RELEASE;
+ mxt_input_report(data, id);
+ }
+ return;
+ }
+
+ /* Check only AMP detection */
+ if (!(status & (MXT_PRESS | MXT_MOVE)))
+ return;
+
+ x = (message->message[1] << 2) | ((message->message[3] & ~0x3f) >> 6);
+ y = (message->message[2] << 2) | ((message->message[3] & ~0xf3) >> 2);
+ area = message->message[4];
+
+ dev_dbg(dev, "[%d] %s x: %d, y: %d, area: %d\n", id,
+ status & MXT_MOVE ? "moved" : "pressed",
+ x, y, area);
+
+ finger[id].status = status & MXT_MOVE ?
+ MXT_MOVE : MXT_PRESS;
+ finger[id].x = x;
+ finger[id].y = y;
+ finger[id].area = area;
+
+ mxt_input_report(data, id);
+}
+
+static irqreturn_t mxt_interrupt(int irq, void *dev_id)
+{
+ struct mxt_data *data = dev_id;
+ struct mxt_message message;
+ struct mxt_object *object;
+ struct device *dev = &data->client->dev;
+ int id;
+ u8 reportid;
+ u8 max_reportid;
+ u8 min_reportid;
+
+ do {
+ if (mxt_read_message(data, &message)) {
+ dev_err(dev, "Failed to read message\n");
+ goto end;
+ }
+
+ reportid = message.reportid;
+
+ /* whether reportid is thing of MXT_TOUCH_MULTI */
+ object = mxt_get_object(data, MXT_TOUCH_MULTI);
+ if (!object)
+ goto end;
+
+ max_reportid = object->max_reportid;
+ min_reportid = max_reportid - object->num_report_ids + 1;
+ id = reportid - min_reportid;
+
+ if (reportid >= min_reportid && reportid <= max_reportid)
+ mxt_input_touchevent(data, &message, id);
+ else
+ mxt_dump_message(dev, &message);
+ } while (reportid != 0xff);
+
+end:
+ return IRQ_HANDLED;
+}
+
+static int mxt_check_reg_init(struct mxt_data *data)
+{
+ const struct mxt_platform_data *pdata = data->pdata;
+ struct mxt_object *object;
+ struct device *dev = &data->client->dev;
+ int index = 0;
+ int i, j, config_offset;
+
+ if (!pdata->config) {
+ dev_dbg(dev, "No cfg data defined, skipping reg init\n");
+ return 0;
+ }
+
+ for (i = 0; i < data->info.object_num; i++) {
+ object = data->object_table + i;
+
+ if (!mxt_object_writable(object->type))
+ continue;
+
+ for (j = 0; j < object->size + 1; j++) {
+ config_offset = index + j;
+ if (config_offset > pdata->config_length) {
+ dev_err(dev, "Not enough config data!\n");
+ return -EINVAL;
+ }
+ mxt_write_object(data, object->type, j,
+ pdata->config[config_offset]);
+ }
+ index += object->size + 1;
+ }
+
+ return 0;
+}
+
+static int mxt_make_highchg(struct mxt_data *data)
+{
+ struct device *dev = &data->client->dev;
+ struct mxt_message message;
+ int count = 10;
+ int error;
+
+ /* Read dummy message to make high CHG pin */
+ do {
+ error = mxt_read_message(data, &message);
+ if (error)
+ return error;
+ } while (message.reportid != 0xff && --count);
+
+ if (!count) {
+ dev_err(dev, "CHG pin isn't cleared\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void mxt_handle_pdata(struct mxt_data *data)
+{
+ const struct mxt_platform_data *pdata = data->pdata;
+ u8 voltage;
+
+ /* Set touchscreen lines */
+ mxt_write_object(data, MXT_TOUCH_MULTI, MXT_TOUCH_XSIZE,
+ pdata->x_line);
+ mxt_write_object(data, MXT_TOUCH_MULTI, MXT_TOUCH_YSIZE,
+ pdata->y_line);
+
+ /* Set touchscreen orient */
+ mxt_write_object(data, MXT_TOUCH_MULTI, MXT_TOUCH_ORIENT,
+ pdata->orient);
+
+ /* Set touchscreen burst length */
+ mxt_write_object(data, MXT_TOUCH_MULTI,
+ MXT_TOUCH_BLEN, pdata->blen);
+
+ /* Set touchscreen threshold */
+ mxt_write_object(data, MXT_TOUCH_MULTI,
+ MXT_TOUCH_TCHTHR, pdata->threshold);
+
+ /* Set touchscreen resolution */
+ mxt_write_object(data, MXT_TOUCH_MULTI,
+ MXT_TOUCH_XRANGE_LSB, (pdata->x_size - 1) & 0xff);
+ mxt_write_object(data, MXT_TOUCH_MULTI,
+ MXT_TOUCH_XRANGE_MSB, (pdata->x_size - 1) >> 8);
+ mxt_write_object(data, MXT_TOUCH_MULTI,
+ MXT_TOUCH_YRANGE_LSB, (pdata->y_size - 1) & 0xff);
+ mxt_write_object(data, MXT_TOUCH_MULTI,
+ MXT_TOUCH_YRANGE_MSB, (pdata->y_size - 1) >> 8);
+
+ /* Set touchscreen voltage */
+ if (data->info.version >= MXT_VER_21 && pdata->voltage) {
+ if (pdata->voltage < MXT_VOLTAGE_DEFAULT) {
+ voltage = (MXT_VOLTAGE_DEFAULT - pdata->voltage) /
+ MXT_VOLTAGE_STEP;
+ voltage = 0xff - voltage + 1;
+ } else
+ voltage = (pdata->voltage - MXT_VOLTAGE_DEFAULT) /
+ MXT_VOLTAGE_STEP;
+
+ mxt_write_object(data, MXT_SPT_CTECONFIG,
+ MXT_CTE_VOLTAGE, voltage);
+ }
+}
+
+static int mxt_get_info(struct mxt_data *data)
+{
+ struct i2c_client *client = data->client;
+ struct mxt_info *info = &data->info;
+ int error;
+ u8 val;
+
+ error = mxt_read_reg(client, MXT_FAMILY_ID, &val);
+ if (error)
+ return error;
+ info->family_id = val;
+
+ error = mxt_read_reg(client, MXT_VARIANT_ID, &val);
+ if (error)
+ return error;
+ info->variant_id = val;
+
+ error = mxt_read_reg(client, MXT_VERSION, &val);
+ if (error)
+ return error;
+ info->version = val;
+
+ error = mxt_read_reg(client, MXT_BUILD, &val);
+ if (error)
+ return error;
+ info->build = val;
+
+ error = mxt_read_reg(client, MXT_OBJECT_NUM, &val);
+ if (error)
+ return error;
+ info->object_num = val;
+
+ return 0;
+}
+
+static int mxt_get_object_table(struct mxt_data *data)
+{
+ int error;
+ int i;
+ u16 reg;
+ u8 reportid = 0;
+ u8 buf[MXT_OBJECT_SIZE];
+
+ for (i = 0; i < data->info.object_num; i++) {
+ struct mxt_object *object = data->object_table + i;
+
+ reg = MXT_OBJECT_START + MXT_OBJECT_SIZE * i;
+ error = mxt_read_object_table(data->client, reg, buf);
+ if (error)
+ return error;
+
+ object->type = buf[0];
+ object->start_address = (buf[2] << 8) | buf[1];
+ object->size = buf[3];
+ object->instances = buf[4];
+ object->num_report_ids = buf[5];
+
+ if (object->num_report_ids) {
+ reportid += object->num_report_ids *
+ (object->instances + 1);
+ object->max_reportid = reportid;
+ }
+ }
+
+ return 0;
+}
+
+static int mxt_initialize(struct mxt_data *data)
+{
+ struct i2c_client *client = data->client;
+ struct mxt_info *info = &data->info;
+ int error;
+ u8 val;
+
+ error = mxt_get_info(data);
+ if (error)
+ return error;
+
+ data->object_table = kcalloc(info->object_num,
+ sizeof(struct mxt_object),
+ GFP_KERNEL);
+ if (!data->object_table) {
+ dev_err(&client->dev, "Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ /* Get object table information */
+ error = mxt_get_object_table(data);
+ if (error)
+ return error;
+
+ /* Check register init values */
+ error = mxt_check_reg_init(data);
+ if (error)
+ return error;
+
+ error = mxt_make_highchg(data);
+ if (error)
+ return error;
+
+ mxt_handle_pdata(data);
+
+ /* Backup to memory */
+ mxt_write_object(data, MXT_GEN_COMMAND,
+ MXT_COMMAND_BACKUPNV,
+ MXT_BACKUP_VALUE);
+ msleep(MXT_BACKUP_TIME);
+
+ /* Soft reset */
+ mxt_write_object(data, MXT_GEN_COMMAND,
+ MXT_COMMAND_RESET, 1);
+ msleep(MXT_RESET_TIME);
+
+ /* Update matrix size at info struct */
+ error = mxt_read_reg(client, MXT_MATRIX_X_SIZE, &val);
+ if (error)
+ return error;
+ info->matrix_xsize = val;
+
+ error = mxt_read_reg(client, MXT_MATRIX_Y_SIZE, &val);
+ if (error)
+ return error;
+ info->matrix_ysize = val;
+
+ dev_info(&client->dev,
+ "Family ID: %d Variant ID: %d Version: %d Build: %d\n",
+ info->family_id, info->variant_id, info->version,
+ info->build);
+
+ dev_info(&client->dev,
+ "Matrix X Size: %d Matrix Y Size: %d Object Num: %d\n",
+ info->matrix_xsize, info->matrix_ysize,
+ info->object_num);
+
+ return 0;
+}
+
+static ssize_t mxt_object_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mxt_data *data = dev_get_drvdata(dev);
+ struct mxt_object *object;
+ int count = 0;
+ int i, j;
+ int error;
+ u8 val;
+
+ for (i = 0; i < data->info.object_num; i++) {
+ object = data->object_table + i;
+
+ count += sprintf(buf + count,
+ "Object Table Element %d(Type %d)\n",
+ i + 1, object->type);
+
+ if (!mxt_object_readable(object->type)) {
+ count += sprintf(buf + count, "\n");
+ continue;
+ }
+
+ for (j = 0; j < object->size + 1; j++) {
+ error = mxt_read_object(data,
+ object->type, j, &val);
+ if (error)
+ return error;
+
+ count += sprintf(buf + count,
+ " Byte %d: 0x%x (%d)\n", j, val, val);
+ }
+
+ count += sprintf(buf + count, "\n");
+ }
+
+ return count;
+}
+
+static int mxt_load_fw(struct device *dev, const char *fn)
+{
+ struct mxt_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ const struct firmware *fw = NULL;
+ unsigned int frame_size;
+ unsigned int pos = 0;
+ int ret;
+
+ ret = request_firmware(&fw, fn, dev);
+ if (ret) {
+ dev_err(dev, "Unable to open firmware %s\n", fn);
+ return ret;
+ }
+
+ /* Change to the bootloader mode */
+ mxt_write_object(data, MXT_GEN_COMMAND,
+ MXT_COMMAND_RESET, MXT_BOOT_VALUE);
+ msleep(MXT_RESET_TIME);
+
+ /* Change to slave address of bootloader */
+ if (client->addr == MXT_APP_LOW)
+ client->addr = MXT_BOOT_LOW;
+ else
+ client->addr = MXT_BOOT_HIGH;
+
+ ret = mxt_check_bootloader(client, MXT_WAITING_BOOTLOAD_CMD);
+ if (ret)
+ goto out;
+
+ /* Unlock bootloader */
+ mxt_unlock_bootloader(client);
+
+ while (pos < fw->size) {
+ ret = mxt_check_bootloader(client,
+ MXT_WAITING_FRAME_DATA);
+ if (ret)
+ goto out;
+
+ frame_size = ((*(fw->data + pos) << 8) | *(fw->data + pos + 1));
+
+ /* We should add 2 at frame size as the the firmware data is not
+ * included the CRC bytes.
+ */
+ frame_size += 2;
+
+ /* Write one frame to device */
+ mxt_fw_write(client, fw->data + pos, frame_size);
+
+ ret = mxt_check_bootloader(client,
+ MXT_FRAME_CRC_PASS);
+ if (ret)
+ goto out;
+
+ pos += frame_size;
+
+ dev_dbg(dev, "Updated %d bytes / %zd bytes\n", pos, fw->size);
+ }
+
+out:
+ release_firmware(fw);
+
+ /* Change to slave address of application */
+ if (client->addr == MXT_BOOT_LOW)
+ client->addr = MXT_APP_LOW;
+ else
+ client->addr = MXT_APP_HIGH;
+
+ return ret;
+}
+
+static ssize_t mxt_update_fw_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mxt_data *data = dev_get_drvdata(dev);
+ unsigned int version;
+ int error;
+
+ if (sscanf(buf, "%u", &version) != 1) {
+ dev_err(dev, "Invalid values\n");
+ return -EINVAL;
+ }
+
+ if (data->info.version < MXT_VER_21 || version < MXT_VER_21) {
+ dev_err(dev, "FW update supported starting with version 21\n");
+ return -EINVAL;
+ }
+
+ disable_irq(data->irq);
+
+ error = mxt_load_fw(dev, MXT_FW_NAME);
+ if (error) {
+ dev_err(dev, "The firmware update failed(%d)\n", error);
+ count = error;
+ } else {
+ dev_dbg(dev, "The firmware update succeeded\n");
+
+ /* Wait for reset */
+ msleep(MXT_FWRESET_TIME);
+
+ kfree(data->object_table);
+ data->object_table = NULL;
+
+ mxt_initialize(data);
+ }
+
+ enable_irq(data->irq);
+
+ return count;
+}
+
+static DEVICE_ATTR(object, 0444, mxt_object_show, NULL);
+static DEVICE_ATTR(update_fw, 0664, NULL, mxt_update_fw_store);
+
+static struct attribute *mxt_attrs[] = {
+ &dev_attr_object.attr,
+ &dev_attr_update_fw.attr,
+ NULL
+};
+
+static const struct attribute_group mxt_attr_group = {
+ .attrs = mxt_attrs,
+};
+
+static void mxt_start(struct mxt_data *data)
+{
+ /* Touch enable */
+ mxt_write_object(data,
+ MXT_TOUCH_MULTI, MXT_TOUCH_CTRL, 0x83);
+}
+
+static void mxt_stop(struct mxt_data *data)
+{
+ /* Touch disable */
+ mxt_write_object(data,
+ MXT_TOUCH_MULTI, MXT_TOUCH_CTRL, 0);
+}
+
+static int mxt_input_open(struct input_dev *dev)
+{
+ struct mxt_data *data = input_get_drvdata(dev);
+
+ mxt_start(data);
+
+ return 0;
+}
+
+static void mxt_input_close(struct input_dev *dev)
+{
+ struct mxt_data *data = input_get_drvdata(dev);
+
+ mxt_stop(data);
+}
+
+static int __devinit mxt_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct mxt_platform_data *pdata = client->dev.platform_data;
+ struct mxt_data *data;
+ struct input_dev *input_dev;
+ int error;
+
+ if (!pdata)
+ return -EINVAL;
+
+ data = kzalloc(sizeof(struct mxt_data), GFP_KERNEL);
+ input_dev = input_allocate_device();
+ if (!data || !input_dev) {
+ dev_err(&client->dev, "Failed to allocate memory\n");
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ input_dev->name = "Atmel maXTouch Touchscreen";
+ input_dev->id.bustype = BUS_I2C;
+ input_dev->dev.parent = &client->dev;
+ input_dev->open = mxt_input_open;
+ input_dev->close = mxt_input_close;
+
+ __set_bit(EV_ABS, input_dev->evbit);
+ __set_bit(EV_KEY, input_dev->evbit);
+ __set_bit(BTN_TOUCH, input_dev->keybit);
+
+ /* For single touch */
+ input_set_abs_params(input_dev, ABS_X,
+ 0, MXT_MAX_XC, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y,
+ 0, MXT_MAX_YC, 0, 0);
+
+ /* For multi touch */
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
+ 0, MXT_MAX_AREA, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X,
+ 0, MXT_MAX_XC, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
+ 0, MXT_MAX_YC, 0, 0);
+
+ input_set_drvdata(input_dev, data);
+
+ data->client = client;
+ data->input_dev = input_dev;
+ data->pdata = pdata;
+ data->irq = client->irq;
+
+ i2c_set_clientdata(client, data);
+
+ error = mxt_initialize(data);
+ if (error)
+ goto err_free_object;
+
+ error = request_threaded_irq(client->irq, NULL, mxt_interrupt,
+ pdata->irqflags, client->dev.driver->name, data);
+ if (error) {
+ dev_err(&client->dev, "Failed to register interrupt\n");
+ goto err_free_object;
+ }
+
+ error = input_register_device(input_dev);
+ if (error)
+ goto err_free_irq;
+
+ error = sysfs_create_group(&client->dev.kobj, &mxt_attr_group);
+ if (error)
+ goto err_unregister_device;
+
+ return 0;
+
+err_unregister_device:
+ input_unregister_device(input_dev);
+ input_dev = NULL;
+err_free_irq:
+ free_irq(client->irq, data);
+err_free_object:
+ kfree(data->object_table);
+err_free_mem:
+ input_free_device(input_dev);
+ kfree(data);
+ return error;
+}
+
+static int __devexit mxt_remove(struct i2c_client *client)
+{
+ struct mxt_data *data = i2c_get_clientdata(client);
+
+ sysfs_remove_group(&client->dev.kobj, &mxt_attr_group);
+ free_irq(data->irq, data);
+ input_unregister_device(data->input_dev);
+ kfree(data->object_table);
+ kfree(data);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int mxt_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct mxt_data *data = i2c_get_clientdata(client);
+ struct input_dev *input_dev = data->input_dev;
+
+ mutex_lock(&input_dev->mutex);
+
+ if (input_dev->users)
+ mxt_stop(data);
+
+ mutex_unlock(&input_dev->mutex);
+
+ return 0;
+}
+
+static int mxt_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct mxt_data *data = i2c_get_clientdata(client);
+ struct input_dev *input_dev = data->input_dev;
+
+ /* Soft reset */
+ mxt_write_object(data, MXT_GEN_COMMAND,
+ MXT_COMMAND_RESET, 1);
+
+ msleep(MXT_RESET_TIME);
+
+ mutex_lock(&input_dev->mutex);
+
+ if (input_dev->users)
+ mxt_start(data);
+
+ mutex_unlock(&input_dev->mutex);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mxt_pm_ops = {
+ .suspend = mxt_suspend,
+ .resume = mxt_resume,
+};
+#endif
+
+static const struct i2c_device_id mxt_id[] = {
+ { "qt602240_ts", 0 },
+ { "atmel_mxt_ts", 0 },
+ { "mXT224", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mxt_id);
+
+static struct i2c_driver mxt_driver = {
+ .driver = {
+ .name = "atmel_mxt_ts",
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &mxt_pm_ops,
+#endif
+ },
+ .probe = mxt_probe,
+ .remove = __devexit_p(mxt_remove),
+ .id_table = mxt_id,
+};
+
+static int __init mxt_init(void)
+{
+ return i2c_add_driver(&mxt_driver);
+}
+
+static void __exit mxt_exit(void)
+{
+ i2c_del_driver(&mxt_driver);
+}
+
+module_init(mxt_init);
+module_exit(mxt_exit);
+
+/* Module information */
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_DESCRIPTION("Atmel maXTouch Touchscreen driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/qt602240_ts.c b/drivers/input/touchscreen/qt602240_ts.c
deleted file mode 100644
index 4dcb0e872f6a..000000000000
--- a/drivers/input/touchscreen/qt602240_ts.c
+++ /dev/null
@@ -1,1406 +0,0 @@
-/*
- * AT42QT602240/ATMXT224 Touchscreen driver
- *
- * Copyright (C) 2010 Samsung Electronics Co.Ltd
- * Author: Joonyoung Shim <jy0922.shim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/firmware.h>
-#include <linux/i2c.h>
-#include <linux/i2c/qt602240_ts.h>
-#include <linux/input.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-
-/* Version */
-#define QT602240_VER_20 20
-#define QT602240_VER_21 21
-#define QT602240_VER_22 22
-
-/* Slave addresses */
-#define QT602240_APP_LOW 0x4a
-#define QT602240_APP_HIGH 0x4b
-#define QT602240_BOOT_LOW 0x24
-#define QT602240_BOOT_HIGH 0x25
-
-/* Firmware */
-#define QT602240_FW_NAME "qt602240.fw"
-
-/* Registers */
-#define QT602240_FAMILY_ID 0x00
-#define QT602240_VARIANT_ID 0x01
-#define QT602240_VERSION 0x02
-#define QT602240_BUILD 0x03
-#define QT602240_MATRIX_X_SIZE 0x04
-#define QT602240_MATRIX_Y_SIZE 0x05
-#define QT602240_OBJECT_NUM 0x06
-#define QT602240_OBJECT_START 0x07
-
-#define QT602240_OBJECT_SIZE 6
-
-/* Object types */
-#define QT602240_DEBUG_DIAGNOSTIC 37
-#define QT602240_GEN_MESSAGE 5
-#define QT602240_GEN_COMMAND 6
-#define QT602240_GEN_POWER 7
-#define QT602240_GEN_ACQUIRE 8
-#define QT602240_TOUCH_MULTI 9
-#define QT602240_TOUCH_KEYARRAY 15
-#define QT602240_TOUCH_PROXIMITY 23
-#define QT602240_PROCI_GRIPFACE 20
-#define QT602240_PROCG_NOISE 22
-#define QT602240_PROCI_ONETOUCH 24
-#define QT602240_PROCI_TWOTOUCH 27
-#define QT602240_SPT_COMMSCONFIG 18 /* firmware ver 21 over */
-#define QT602240_SPT_GPIOPWM 19
-#define QT602240_SPT_SELFTEST 25
-#define QT602240_SPT_CTECONFIG 28
-#define QT602240_SPT_USERDATA 38 /* firmware ver 21 over */
-
-/* QT602240_GEN_COMMAND field */
-#define QT602240_COMMAND_RESET 0
-#define QT602240_COMMAND_BACKUPNV 1
-#define QT602240_COMMAND_CALIBRATE 2
-#define QT602240_COMMAND_REPORTALL 3
-#define QT602240_COMMAND_DIAGNOSTIC 5
-
-/* QT602240_GEN_POWER field */
-#define QT602240_POWER_IDLEACQINT 0
-#define QT602240_POWER_ACTVACQINT 1
-#define QT602240_POWER_ACTV2IDLETO 2
-
-/* QT602240_GEN_ACQUIRE field */
-#define QT602240_ACQUIRE_CHRGTIME 0
-#define QT602240_ACQUIRE_TCHDRIFT 2
-#define QT602240_ACQUIRE_DRIFTST 3
-#define QT602240_ACQUIRE_TCHAUTOCAL 4
-#define QT602240_ACQUIRE_SYNC 5
-#define QT602240_ACQUIRE_ATCHCALST 6
-#define QT602240_ACQUIRE_ATCHCALSTHR 7
-
-/* QT602240_TOUCH_MULTI field */
-#define QT602240_TOUCH_CTRL 0
-#define QT602240_TOUCH_XORIGIN 1
-#define QT602240_TOUCH_YORIGIN 2
-#define QT602240_TOUCH_XSIZE 3
-#define QT602240_TOUCH_YSIZE 4
-#define QT602240_TOUCH_BLEN 6
-#define QT602240_TOUCH_TCHTHR 7
-#define QT602240_TOUCH_TCHDI 8
-#define QT602240_TOUCH_ORIENT 9
-#define QT602240_TOUCH_MOVHYSTI 11
-#define QT602240_TOUCH_MOVHYSTN 12
-#define QT602240_TOUCH_NUMTOUCH 14
-#define QT602240_TOUCH_MRGHYST 15
-#define QT602240_TOUCH_MRGTHR 16
-#define QT602240_TOUCH_AMPHYST 17
-#define QT602240_TOUCH_XRANGE_LSB 18
-#define QT602240_TOUCH_XRANGE_MSB 19
-#define QT602240_TOUCH_YRANGE_LSB 20
-#define QT602240_TOUCH_YRANGE_MSB 21
-#define QT602240_TOUCH_XLOCLIP 22
-#define QT602240_TOUCH_XHICLIP 23
-#define QT602240_TOUCH_YLOCLIP 24
-#define QT602240_TOUCH_YHICLIP 25
-#define QT602240_TOUCH_XEDGECTRL 26
-#define QT602240_TOUCH_XEDGEDIST 27
-#define QT602240_TOUCH_YEDGECTRL 28
-#define QT602240_TOUCH_YEDGEDIST 29
-#define QT602240_TOUCH_JUMPLIMIT 30 /* firmware ver 22 over */
-
-/* QT602240_PROCI_GRIPFACE field */
-#define QT602240_GRIPFACE_CTRL 0
-#define QT602240_GRIPFACE_XLOGRIP 1
-#define QT602240_GRIPFACE_XHIGRIP 2
-#define QT602240_GRIPFACE_YLOGRIP 3
-#define QT602240_GRIPFACE_YHIGRIP 4
-#define QT602240_GRIPFACE_MAXTCHS 5
-#define QT602240_GRIPFACE_SZTHR1 7
-#define QT602240_GRIPFACE_SZTHR2 8
-#define QT602240_GRIPFACE_SHPTHR1 9
-#define QT602240_GRIPFACE_SHPTHR2 10
-#define QT602240_GRIPFACE_SUPEXTTO 11
-
-/* QT602240_PROCI_NOISE field */
-#define QT602240_NOISE_CTRL 0
-#define QT602240_NOISE_OUTFLEN 1
-#define QT602240_NOISE_GCAFUL_LSB 3
-#define QT602240_NOISE_GCAFUL_MSB 4
-#define QT602240_NOISE_GCAFLL_LSB 5
-#define QT602240_NOISE_GCAFLL_MSB 6
-#define QT602240_NOISE_ACTVGCAFVALID 7
-#define QT602240_NOISE_NOISETHR 8
-#define QT602240_NOISE_FREQHOPSCALE 10
-#define QT602240_NOISE_FREQ0 11
-#define QT602240_NOISE_FREQ1 12
-#define QT602240_NOISE_FREQ2 13
-#define QT602240_NOISE_FREQ3 14
-#define QT602240_NOISE_FREQ4 15
-#define QT602240_NOISE_IDLEGCAFVALID 16
-
-/* QT602240_SPT_COMMSCONFIG */
-#define QT602240_COMMS_CTRL 0
-#define QT602240_COMMS_CMD 1
-
-/* QT602240_SPT_CTECONFIG field */
-#define QT602240_CTE_CTRL 0
-#define QT602240_CTE_CMD 1
-#define QT602240_CTE_MODE 2
-#define QT602240_CTE_IDLEGCAFDEPTH 3
-#define QT602240_CTE_ACTVGCAFDEPTH 4
-#define QT602240_CTE_VOLTAGE 5 /* firmware ver 21 over */
-
-#define QT602240_VOLTAGE_DEFAULT 2700000
-#define QT602240_VOLTAGE_STEP 10000
-
-/* Define for QT602240_GEN_COMMAND */
-#define QT602240_BOOT_VALUE 0xa5
-#define QT602240_BACKUP_VALUE 0x55
-#define QT602240_BACKUP_TIME 25 /* msec */
-#define QT602240_RESET_TIME 65 /* msec */
-
-#define QT602240_FWRESET_TIME 175 /* msec */
-
-/* Command to unlock bootloader */
-#define QT602240_UNLOCK_CMD_MSB 0xaa
-#define QT602240_UNLOCK_CMD_LSB 0xdc
-
-/* Bootloader mode status */
-#define QT602240_WAITING_BOOTLOAD_CMD 0xc0 /* valid 7 6 bit only */
-#define QT602240_WAITING_FRAME_DATA 0x80 /* valid 7 6 bit only */
-#define QT602240_FRAME_CRC_CHECK 0x02
-#define QT602240_FRAME_CRC_FAIL 0x03
-#define QT602240_FRAME_CRC_PASS 0x04
-#define QT602240_APP_CRC_FAIL 0x40 /* valid 7 8 bit only */
-#define QT602240_BOOT_STATUS_MASK 0x3f
-
-/* Touch status */
-#define QT602240_SUPPRESS (1 << 1)
-#define QT602240_AMP (1 << 2)
-#define QT602240_VECTOR (1 << 3)
-#define QT602240_MOVE (1 << 4)
-#define QT602240_RELEASE (1 << 5)
-#define QT602240_PRESS (1 << 6)
-#define QT602240_DETECT (1 << 7)
-
-/* Touchscreen absolute values */
-#define QT602240_MAX_XC 0x3ff
-#define QT602240_MAX_YC 0x3ff
-#define QT602240_MAX_AREA 0xff
-
-#define QT602240_MAX_FINGER 10
-
-/* Initial register values recommended from chip vendor */
-static const u8 init_vals_ver_20[] = {
- /* QT602240_GEN_COMMAND(6) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* QT602240_GEN_POWER(7) */
- 0x20, 0xff, 0x32,
- /* QT602240_GEN_ACQUIRE(8) */
- 0x08, 0x05, 0x05, 0x00, 0x00, 0x00, 0x05, 0x14,
- /* QT602240_TOUCH_MULTI(9) */
- 0x00, 0x00, 0x00, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x02, 0x00,
- 0x00, 0x01, 0x01, 0x0e, 0x0a, 0x0a, 0x0a, 0x0a, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x88, 0x64,
- /* QT602240_TOUCH_KEYARRAY(15) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00,
- /* QT602240_SPT_GPIOPWM(19) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00,
- /* QT602240_PROCI_GRIPFACE(20) */
- 0x00, 0x64, 0x64, 0x64, 0x64, 0x00, 0x00, 0x1e, 0x14, 0x04,
- 0x1e, 0x00,
- /* QT602240_PROCG_NOISE(22) */
- 0x05, 0x00, 0x00, 0x19, 0x00, 0xe7, 0xff, 0x04, 0x32, 0x00,
- 0x01, 0x0a, 0x0f, 0x14, 0x00, 0x00, 0xe8,
- /* QT602240_TOUCH_PROXIMITY(23) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00,
- /* QT602240_PROCI_ONETOUCH(24) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* QT602240_SPT_SELFTEST(25) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- /* QT602240_PROCI_TWOTOUCH(27) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* QT602240_SPT_CTECONFIG(28) */
- 0x00, 0x00, 0x00, 0x04, 0x08,
-};
-
-static const u8 init_vals_ver_21[] = {
- /* QT602240_GEN_COMMAND(6) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* QT602240_GEN_POWER(7) */
- 0x20, 0xff, 0x32,
- /* QT602240_GEN_ACQUIRE(8) */
- 0x0a, 0x00, 0x05, 0x00, 0x00, 0x00, 0x09, 0x23,
- /* QT602240_TOUCH_MULTI(9) */
- 0x00, 0x00, 0x00, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x02, 0x00,
- 0x00, 0x01, 0x01, 0x0e, 0x0a, 0x0a, 0x0a, 0x0a, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* QT602240_TOUCH_KEYARRAY(15) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00,
- /* QT602240_SPT_GPIOPWM(19) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* QT602240_PROCI_GRIPFACE(20) */
- 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x28, 0x04,
- 0x0f, 0x0a,
- /* QT602240_PROCG_NOISE(22) */
- 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x23, 0x00,
- 0x00, 0x05, 0x0f, 0x19, 0x23, 0x2d, 0x03,
- /* QT602240_TOUCH_PROXIMITY(23) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00,
- /* QT602240_PROCI_ONETOUCH(24) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* QT602240_SPT_SELFTEST(25) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- /* QT602240_PROCI_TWOTOUCH(27) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* QT602240_SPT_CTECONFIG(28) */
- 0x00, 0x00, 0x00, 0x08, 0x10, 0x00,
-};
-
-static const u8 init_vals_ver_22[] = {
- /* QT602240_GEN_COMMAND(6) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* QT602240_GEN_POWER(7) */
- 0x20, 0xff, 0x32,
- /* QT602240_GEN_ACQUIRE(8) */
- 0x0a, 0x00, 0x05, 0x00, 0x00, 0x00, 0x09, 0x23,
- /* QT602240_TOUCH_MULTI(9) */
- 0x00, 0x00, 0x00, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x02, 0x00,
- 0x00, 0x01, 0x01, 0x0e, 0x0a, 0x0a, 0x0a, 0x0a, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00,
- /* QT602240_TOUCH_KEYARRAY(15) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00,
- /* QT602240_SPT_GPIOPWM(19) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* QT602240_PROCI_GRIPFACE(20) */
- 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x28, 0x04,
- 0x0f, 0x0a,
- /* QT602240_PROCG_NOISE(22) */
- 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x23, 0x00,
- 0x00, 0x05, 0x0f, 0x19, 0x23, 0x2d, 0x03,
- /* QT602240_TOUCH_PROXIMITY(23) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00,
- /* QT602240_PROCI_ONETOUCH(24) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* QT602240_SPT_SELFTEST(25) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- /* QT602240_PROCI_TWOTOUCH(27) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* QT602240_SPT_CTECONFIG(28) */
- 0x00, 0x00, 0x00, 0x08, 0x10, 0x00,
-};
-
-struct qt602240_info {
- u8 family_id;
- u8 variant_id;
- u8 version;
- u8 build;
- u8 matrix_xsize;
- u8 matrix_ysize;
- u8 object_num;
-};
-
-struct qt602240_object {
- u8 type;
- u16 start_address;
- u8 size;
- u8 instances;
- u8 num_report_ids;
-
- /* to map object and message */
- u8 max_reportid;
-};
-
-struct qt602240_message {
- u8 reportid;
- u8 message[7];
- u8 checksum;
-};
-
-struct qt602240_finger {
- int status;
- int x;
- int y;
- int area;
-};
-
-/* Each client has this additional data */
-struct qt602240_data {
- struct i2c_client *client;
- struct input_dev *input_dev;
- const struct qt602240_platform_data *pdata;
- struct qt602240_object *object_table;
- struct qt602240_info info;
- struct qt602240_finger finger[QT602240_MAX_FINGER];
- unsigned int irq;
-};
-
-static bool qt602240_object_readable(unsigned int type)
-{
- switch (type) {
- case QT602240_GEN_MESSAGE:
- case QT602240_GEN_COMMAND:
- case QT602240_GEN_POWER:
- case QT602240_GEN_ACQUIRE:
- case QT602240_TOUCH_MULTI:
- case QT602240_TOUCH_KEYARRAY:
- case QT602240_TOUCH_PROXIMITY:
- case QT602240_PROCI_GRIPFACE:
- case QT602240_PROCG_NOISE:
- case QT602240_PROCI_ONETOUCH:
- case QT602240_PROCI_TWOTOUCH:
- case QT602240_SPT_COMMSCONFIG:
- case QT602240_SPT_GPIOPWM:
- case QT602240_SPT_SELFTEST:
- case QT602240_SPT_CTECONFIG:
- case QT602240_SPT_USERDATA:
- return true;
- default:
- return false;
- }
-}
-
-static bool qt602240_object_writable(unsigned int type)
-{
- switch (type) {
- case QT602240_GEN_COMMAND:
- case QT602240_GEN_POWER:
- case QT602240_GEN_ACQUIRE:
- case QT602240_TOUCH_MULTI:
- case QT602240_TOUCH_KEYARRAY:
- case QT602240_TOUCH_PROXIMITY:
- case QT602240_PROCI_GRIPFACE:
- case QT602240_PROCG_NOISE:
- case QT602240_PROCI_ONETOUCH:
- case QT602240_PROCI_TWOTOUCH:
- case QT602240_SPT_GPIOPWM:
- case QT602240_SPT_SELFTEST:
- case QT602240_SPT_CTECONFIG:
- return true;
- default:
- return false;
- }
-}
-
-static void qt602240_dump_message(struct device *dev,
- struct qt602240_message *message)
-{
- dev_dbg(dev, "reportid:\t0x%x\n", message->reportid);
- dev_dbg(dev, "message1:\t0x%x\n", message->message[0]);
- dev_dbg(dev, "message2:\t0x%x\n", message->message[1]);
- dev_dbg(dev, "message3:\t0x%x\n", message->message[2]);
- dev_dbg(dev, "message4:\t0x%x\n", message->message[3]);
- dev_dbg(dev, "message5:\t0x%x\n", message->message[4]);
- dev_dbg(dev, "message6:\t0x%x\n", message->message[5]);
- dev_dbg(dev, "message7:\t0x%x\n", message->message[6]);
- dev_dbg(dev, "checksum:\t0x%x\n", message->checksum);
-}
-
-static int qt602240_check_bootloader(struct i2c_client *client,
- unsigned int state)
-{
- u8 val;
-
-recheck:
- if (i2c_master_recv(client, &val, 1) != 1) {
- dev_err(&client->dev, "%s: i2c recv failed\n", __func__);
- return -EIO;
- }
-
- switch (state) {
- case QT602240_WAITING_BOOTLOAD_CMD:
- case QT602240_WAITING_FRAME_DATA:
- val &= ~QT602240_BOOT_STATUS_MASK;
- break;
- case QT602240_FRAME_CRC_PASS:
- if (val == QT602240_FRAME_CRC_CHECK)
- goto recheck;
- break;
- default:
- return -EINVAL;
- }
-
- if (val != state) {
- dev_err(&client->dev, "Unvalid bootloader mode state\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int qt602240_unlock_bootloader(struct i2c_client *client)
-{
- u8 buf[2];
-
- buf[0] = QT602240_UNLOCK_CMD_LSB;
- buf[1] = QT602240_UNLOCK_CMD_MSB;
-
- if (i2c_master_send(client, buf, 2) != 2) {
- dev_err(&client->dev, "%s: i2c send failed\n", __func__);
- return -EIO;
- }
-
- return 0;
-}
-
-static int qt602240_fw_write(struct i2c_client *client,
- const u8 *data, unsigned int frame_size)
-{
- if (i2c_master_send(client, data, frame_size) != frame_size) {
- dev_err(&client->dev, "%s: i2c send failed\n", __func__);
- return -EIO;
- }
-
- return 0;
-}
-
-static int __qt602240_read_reg(struct i2c_client *client,
- u16 reg, u16 len, void *val)
-{
- struct i2c_msg xfer[2];
- u8 buf[2];
-
- buf[0] = reg & 0xff;
- buf[1] = (reg >> 8) & 0xff;
-
- /* Write register */
- xfer[0].addr = client->addr;
- xfer[0].flags = 0;
- xfer[0].len = 2;
- xfer[0].buf = buf;
-
- /* Read data */
- xfer[1].addr = client->addr;
- xfer[1].flags = I2C_M_RD;
- xfer[1].len = len;
- xfer[1].buf = val;
-
- if (i2c_transfer(client->adapter, xfer, 2) != 2) {
- dev_err(&client->dev, "%s: i2c transfer failed\n", __func__);
- return -EIO;
- }
-
- return 0;
-}
-
-static int qt602240_read_reg(struct i2c_client *client, u16 reg, u8 *val)
-{
- return __qt602240_read_reg(client, reg, 1, val);
-}
-
-static int qt602240_write_reg(struct i2c_client *client, u16 reg, u8 val)
-{
- u8 buf[3];
-
- buf[0] = reg & 0xff;
- buf[1] = (reg >> 8) & 0xff;
- buf[2] = val;
-
- if (i2c_master_send(client, buf, 3) != 3) {
- dev_err(&client->dev, "%s: i2c send failed\n", __func__);
- return -EIO;
- }
-
- return 0;
-}
-
-static int qt602240_read_object_table(struct i2c_client *client,
- u16 reg, u8 *object_buf)
-{
- return __qt602240_read_reg(client, reg, QT602240_OBJECT_SIZE,
- object_buf);
-}
-
-static struct qt602240_object *
-qt602240_get_object(struct qt602240_data *data, u8 type)
-{
- struct qt602240_object *object;
- int i;
-
- for (i = 0; i < data->info.object_num; i++) {
- object = data->object_table + i;
- if (object->type == type)
- return object;
- }
-
- dev_err(&data->client->dev, "Invalid object type\n");
- return NULL;
-}
-
-static int qt602240_read_message(struct qt602240_data *data,
- struct qt602240_message *message)
-{
- struct qt602240_object *object;
- u16 reg;
-
- object = qt602240_get_object(data, QT602240_GEN_MESSAGE);
- if (!object)
- return -EINVAL;
-
- reg = object->start_address;
- return __qt602240_read_reg(data->client, reg,
- sizeof(struct qt602240_message), message);
-}
-
-static int qt602240_read_object(struct qt602240_data *data,
- u8 type, u8 offset, u8 *val)
-{
- struct qt602240_object *object;
- u16 reg;
-
- object = qt602240_get_object(data, type);
- if (!object)
- return -EINVAL;
-
- reg = object->start_address;
- return __qt602240_read_reg(data->client, reg + offset, 1, val);
-}
-
-static int qt602240_write_object(struct qt602240_data *data,
- u8 type, u8 offset, u8 val)
-{
- struct qt602240_object *object;
- u16 reg;
-
- object = qt602240_get_object(data, type);
- if (!object)
- return -EINVAL;
-
- reg = object->start_address;
- return qt602240_write_reg(data->client, reg + offset, val);
-}
-
-static void qt602240_input_report(struct qt602240_data *data, int single_id)
-{
- struct qt602240_finger *finger = data->finger;
- struct input_dev *input_dev = data->input_dev;
- int status = finger[single_id].status;
- int finger_num = 0;
- int id;
-
- for (id = 0; id < QT602240_MAX_FINGER; id++) {
- if (!finger[id].status)
- continue;
-
- input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR,
- finger[id].status != QT602240_RELEASE ?
- finger[id].area : 0);
- input_report_abs(input_dev, ABS_MT_POSITION_X,
- finger[id].x);
- input_report_abs(input_dev, ABS_MT_POSITION_Y,
- finger[id].y);
- input_mt_sync(input_dev);
-
- if (finger[id].status == QT602240_RELEASE)
- finger[id].status = 0;
- else
- finger_num++;
- }
-
- input_report_key(input_dev, BTN_TOUCH, finger_num > 0);
-
- if (status != QT602240_RELEASE) {
- input_report_abs(input_dev, ABS_X, finger[single_id].x);
- input_report_abs(input_dev, ABS_Y, finger[single_id].y);
- }
-
- input_sync(input_dev);
-}
-
-static void qt602240_input_touchevent(struct qt602240_data *data,
- struct qt602240_message *message, int id)
-{
- struct qt602240_finger *finger = data->finger;
- struct device *dev = &data->client->dev;
- u8 status = message->message[0];
- int x;
- int y;
- int area;
-
- /* Check the touch is present on the screen */
- if (!(status & QT602240_DETECT)) {
- if (status & QT602240_RELEASE) {
- dev_dbg(dev, "[%d] released\n", id);
-
- finger[id].status = QT602240_RELEASE;
- qt602240_input_report(data, id);
- }
- return;
- }
-
- /* Check only AMP detection */
- if (!(status & (QT602240_PRESS | QT602240_MOVE)))
- return;
-
- x = (message->message[1] << 2) | ((message->message[3] & ~0x3f) >> 6);
- y = (message->message[2] << 2) | ((message->message[3] & ~0xf3) >> 2);
- area = message->message[4];
-
- dev_dbg(dev, "[%d] %s x: %d, y: %d, area: %d\n", id,
- status & QT602240_MOVE ? "moved" : "pressed",
- x, y, area);
-
- finger[id].status = status & QT602240_MOVE ?
- QT602240_MOVE : QT602240_PRESS;
- finger[id].x = x;
- finger[id].y = y;
- finger[id].area = area;
-
- qt602240_input_report(data, id);
-}
-
-static irqreturn_t qt602240_interrupt(int irq, void *dev_id)
-{
- struct qt602240_data *data = dev_id;
- struct qt602240_message message;
- struct qt602240_object *object;
- struct device *dev = &data->client->dev;
- int id;
- u8 reportid;
- u8 max_reportid;
- u8 min_reportid;
-
- do {
- if (qt602240_read_message(data, &message)) {
- dev_err(dev, "Failed to read message\n");
- goto end;
- }
-
- reportid = message.reportid;
-
- /* whether reportid is thing of QT602240_TOUCH_MULTI */
- object = qt602240_get_object(data, QT602240_TOUCH_MULTI);
- if (!object)
- goto end;
-
- max_reportid = object->max_reportid;
- min_reportid = max_reportid - object->num_report_ids + 1;
- id = reportid - min_reportid;
-
- if (reportid >= min_reportid && reportid <= max_reportid)
- qt602240_input_touchevent(data, &message, id);
- else
- qt602240_dump_message(dev, &message);
- } while (reportid != 0xff);
-
-end:
- return IRQ_HANDLED;
-}
-
-static int qt602240_check_reg_init(struct qt602240_data *data)
-{
- struct qt602240_object *object;
- struct device *dev = &data->client->dev;
- int index = 0;
- int i, j;
- u8 version = data->info.version;
- u8 *init_vals;
-
- switch (version) {
- case QT602240_VER_20:
- init_vals = (u8 *)init_vals_ver_20;
- break;
- case QT602240_VER_21:
- init_vals = (u8 *)init_vals_ver_21;
- break;
- case QT602240_VER_22:
- init_vals = (u8 *)init_vals_ver_22;
- break;
- default:
- dev_err(dev, "Firmware version %d doesn't support\n", version);
- return -EINVAL;
- }
-
- for (i = 0; i < data->info.object_num; i++) {
- object = data->object_table + i;
-
- if (!qt602240_object_writable(object->type))
- continue;
-
- for (j = 0; j < object->size + 1; j++)
- qt602240_write_object(data, object->type, j,
- init_vals[index + j]);
-
- index += object->size + 1;
- }
-
- return 0;
-}
-
-static int qt602240_check_matrix_size(struct qt602240_data *data)
-{
- const struct qt602240_platform_data *pdata = data->pdata;
- struct device *dev = &data->client->dev;
- int mode = -1;
- int error;
- u8 val;
-
- dev_dbg(dev, "Number of X lines: %d\n", pdata->x_line);
- dev_dbg(dev, "Number of Y lines: %d\n", pdata->y_line);
-
- switch (pdata->x_line) {
- case 0 ... 15:
- if (pdata->y_line <= 14)
- mode = 0;
- break;
- case 16:
- if (pdata->y_line <= 12)
- mode = 1;
- if (pdata->y_line == 13 || pdata->y_line == 14)
- mode = 0;
- break;
- case 17:
- if (pdata->y_line <= 11)
- mode = 2;
- if (pdata->y_line == 12 || pdata->y_line == 13)
- mode = 1;
- break;
- case 18:
- if (pdata->y_line <= 10)
- mode = 3;
- if (pdata->y_line == 11 || pdata->y_line == 12)
- mode = 2;
- break;
- case 19:
- if (pdata->y_line <= 9)
- mode = 4;
- if (pdata->y_line == 10 || pdata->y_line == 11)
- mode = 3;
- break;
- case 20:
- mode = 4;
- }
-
- if (mode < 0) {
- dev_err(dev, "Invalid X/Y lines\n");
- return -EINVAL;
- }
-
- error = qt602240_read_object(data, QT602240_SPT_CTECONFIG,
- QT602240_CTE_MODE, &val);
- if (error)
- return error;
-
- if (mode == val)
- return 0;
-
- /* Change the CTE configuration */
- qt602240_write_object(data, QT602240_SPT_CTECONFIG,
- QT602240_CTE_CTRL, 1);
- qt602240_write_object(data, QT602240_SPT_CTECONFIG,
- QT602240_CTE_MODE, mode);
- qt602240_write_object(data, QT602240_SPT_CTECONFIG,
- QT602240_CTE_CTRL, 0);
-
- return 0;
-}
-
-static int qt602240_make_highchg(struct qt602240_data *data)
-{
- struct device *dev = &data->client->dev;
- int count = 10;
- int error;
- u8 val;
-
- /* Read dummy message to make high CHG pin */
- do {
- error = qt602240_read_object(data, QT602240_GEN_MESSAGE, 0, &val);
- if (error)
- return error;
- } while ((val != 0xff) && --count);
-
- if (!count) {
- dev_err(dev, "CHG pin isn't cleared\n");
- return -EBUSY;
- }
-
- return 0;
-}
-
-static void qt602240_handle_pdata(struct qt602240_data *data)
-{
- const struct qt602240_platform_data *pdata = data->pdata;
- u8 voltage;
-
- /* Set touchscreen lines */
- qt602240_write_object(data, QT602240_TOUCH_MULTI, QT602240_TOUCH_XSIZE,
- pdata->x_line);
- qt602240_write_object(data, QT602240_TOUCH_MULTI, QT602240_TOUCH_YSIZE,
- pdata->y_line);
-
- /* Set touchscreen orient */
- qt602240_write_object(data, QT602240_TOUCH_MULTI, QT602240_TOUCH_ORIENT,
- pdata->orient);
-
- /* Set touchscreen burst length */
- qt602240_write_object(data, QT602240_TOUCH_MULTI,
- QT602240_TOUCH_BLEN, pdata->blen);
-
- /* Set touchscreen threshold */
- qt602240_write_object(data, QT602240_TOUCH_MULTI,
- QT602240_TOUCH_TCHTHR, pdata->threshold);
-
- /* Set touchscreen resolution */
- qt602240_write_object(data, QT602240_TOUCH_MULTI,
- QT602240_TOUCH_XRANGE_LSB, (pdata->x_size - 1) & 0xff);
- qt602240_write_object(data, QT602240_TOUCH_MULTI,
- QT602240_TOUCH_XRANGE_MSB, (pdata->x_size - 1) >> 8);
- qt602240_write_object(data, QT602240_TOUCH_MULTI,
- QT602240_TOUCH_YRANGE_LSB, (pdata->y_size - 1) & 0xff);
- qt602240_write_object(data, QT602240_TOUCH_MULTI,
- QT602240_TOUCH_YRANGE_MSB, (pdata->y_size - 1) >> 8);
-
- /* Set touchscreen voltage */
- if (data->info.version >= QT602240_VER_21 && pdata->voltage) {
- if (pdata->voltage < QT602240_VOLTAGE_DEFAULT) {
- voltage = (QT602240_VOLTAGE_DEFAULT - pdata->voltage) /
- QT602240_VOLTAGE_STEP;
- voltage = 0xff - voltage + 1;
- } else
- voltage = (pdata->voltage - QT602240_VOLTAGE_DEFAULT) /
- QT602240_VOLTAGE_STEP;
-
- qt602240_write_object(data, QT602240_SPT_CTECONFIG,
- QT602240_CTE_VOLTAGE, voltage);
- }
-}
-
-static int qt602240_get_info(struct qt602240_data *data)
-{
- struct i2c_client *client = data->client;
- struct qt602240_info *info = &data->info;
- int error;
- u8 val;
-
- error = qt602240_read_reg(client, QT602240_FAMILY_ID, &val);
- if (error)
- return error;
- info->family_id = val;
-
- error = qt602240_read_reg(client, QT602240_VARIANT_ID, &val);
- if (error)
- return error;
- info->variant_id = val;
-
- error = qt602240_read_reg(client, QT602240_VERSION, &val);
- if (error)
- return error;
- info->version = val;
-
- error = qt602240_read_reg(client, QT602240_BUILD, &val);
- if (error)
- return error;
- info->build = val;
-
- error = qt602240_read_reg(client, QT602240_OBJECT_NUM, &val);
- if (error)
- return error;
- info->object_num = val;
-
- return 0;
-}
-
-static int qt602240_get_object_table(struct qt602240_data *data)
-{
- int error;
- int i;
- u16 reg;
- u8 reportid = 0;
- u8 buf[QT602240_OBJECT_SIZE];
-
- for (i = 0; i < data->info.object_num; i++) {
- struct qt602240_object *object = data->object_table + i;
-
- reg = QT602240_OBJECT_START + QT602240_OBJECT_SIZE * i;
- error = qt602240_read_object_table(data->client, reg, buf);
- if (error)
- return error;
-
- object->type = buf[0];
- object->start_address = (buf[2] << 8) | buf[1];
- object->size = buf[3];
- object->instances = buf[4];
- object->num_report_ids = buf[5];
-
- if (object->num_report_ids) {
- reportid += object->num_report_ids *
- (object->instances + 1);
- object->max_reportid = reportid;
- }
- }
-
- return 0;
-}
-
-static int qt602240_initialize(struct qt602240_data *data)
-{
- struct i2c_client *client = data->client;
- struct qt602240_info *info = &data->info;
- int error;
- u8 val;
-
- error = qt602240_get_info(data);
- if (error)
- return error;
-
- data->object_table = kcalloc(info->object_num,
- sizeof(struct qt602240_object),
- GFP_KERNEL);
- if (!data->object_table) {
- dev_err(&client->dev, "Failed to allocate memory\n");
- return -ENOMEM;
- }
-
- /* Get object table information */
- error = qt602240_get_object_table(data);
- if (error)
- return error;
-
- /* Check register init values */
- error = qt602240_check_reg_init(data);
- if (error)
- return error;
-
- /* Check X/Y matrix size */
- error = qt602240_check_matrix_size(data);
- if (error)
- return error;
-
- error = qt602240_make_highchg(data);
- if (error)
- return error;
-
- qt602240_handle_pdata(data);
-
- /* Backup to memory */
- qt602240_write_object(data, QT602240_GEN_COMMAND,
- QT602240_COMMAND_BACKUPNV,
- QT602240_BACKUP_VALUE);
- msleep(QT602240_BACKUP_TIME);
-
- /* Soft reset */
- qt602240_write_object(data, QT602240_GEN_COMMAND,
- QT602240_COMMAND_RESET, 1);
- msleep(QT602240_RESET_TIME);
-
- /* Update matrix size at info struct */
- error = qt602240_read_reg(client, QT602240_MATRIX_X_SIZE, &val);
- if (error)
- return error;
- info->matrix_xsize = val;
-
- error = qt602240_read_reg(client, QT602240_MATRIX_Y_SIZE, &val);
- if (error)
- return error;
- info->matrix_ysize = val;
-
- dev_info(&client->dev,
- "Family ID: %d Variant ID: %d Version: %d Build: %d\n",
- info->family_id, info->variant_id, info->version,
- info->build);
-
- dev_info(&client->dev,
- "Matrix X Size: %d Matrix Y Size: %d Object Num: %d\n",
- info->matrix_xsize, info->matrix_ysize,
- info->object_num);
-
- return 0;
-}
-
-static ssize_t qt602240_object_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct qt602240_data *data = dev_get_drvdata(dev);
- struct qt602240_object *object;
- int count = 0;
- int i, j;
- int error;
- u8 val;
-
- for (i = 0; i < data->info.object_num; i++) {
- object = data->object_table + i;
-
- count += sprintf(buf + count,
- "Object Table Element %d(Type %d)\n",
- i + 1, object->type);
-
- if (!qt602240_object_readable(object->type)) {
- count += sprintf(buf + count, "\n");
- continue;
- }
-
- for (j = 0; j < object->size + 1; j++) {
- error = qt602240_read_object(data,
- object->type, j, &val);
- if (error)
- return error;
-
- count += sprintf(buf + count,
- " Byte %d: 0x%x (%d)\n", j, val, val);
- }
-
- count += sprintf(buf + count, "\n");
- }
-
- return count;
-}
-
-static int qt602240_load_fw(struct device *dev, const char *fn)
-{
- struct qt602240_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- const struct firmware *fw = NULL;
- unsigned int frame_size;
- unsigned int pos = 0;
- int ret;
-
- ret = request_firmware(&fw, fn, dev);
- if (ret) {
- dev_err(dev, "Unable to open firmware %s\n", fn);
- return ret;
- }
-
- /* Change to the bootloader mode */
- qt602240_write_object(data, QT602240_GEN_COMMAND,
- QT602240_COMMAND_RESET, QT602240_BOOT_VALUE);
- msleep(QT602240_RESET_TIME);
-
- /* Change to slave address of bootloader */
- if (client->addr == QT602240_APP_LOW)
- client->addr = QT602240_BOOT_LOW;
- else
- client->addr = QT602240_BOOT_HIGH;
-
- ret = qt602240_check_bootloader(client, QT602240_WAITING_BOOTLOAD_CMD);
- if (ret)
- goto out;
-
- /* Unlock bootloader */
- qt602240_unlock_bootloader(client);
-
- while (pos < fw->size) {
- ret = qt602240_check_bootloader(client,
- QT602240_WAITING_FRAME_DATA);
- if (ret)
- goto out;
-
- frame_size = ((*(fw->data + pos) << 8) | *(fw->data + pos + 1));
-
- /* We should add 2 at frame size as the the firmware data is not
- * included the CRC bytes.
- */
- frame_size += 2;
-
- /* Write one frame to device */
- qt602240_fw_write(client, fw->data + pos, frame_size);
-
- ret = qt602240_check_bootloader(client,
- QT602240_FRAME_CRC_PASS);
- if (ret)
- goto out;
-
- pos += frame_size;
-
- dev_dbg(dev, "Updated %d bytes / %zd bytes\n", pos, fw->size);
- }
-
-out:
- release_firmware(fw);
-
- /* Change to slave address of application */
- if (client->addr == QT602240_BOOT_LOW)
- client->addr = QT602240_APP_LOW;
- else
- client->addr = QT602240_APP_HIGH;
-
- return ret;
-}
-
-static ssize_t qt602240_update_fw_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct qt602240_data *data = dev_get_drvdata(dev);
- unsigned int version;
- int error;
-
- if (sscanf(buf, "%u", &version) != 1) {
- dev_err(dev, "Invalid values\n");
- return -EINVAL;
- }
-
- if (data->info.version < QT602240_VER_21 || version < QT602240_VER_21) {
- dev_err(dev, "FW update supported starting with version 21\n");
- return -EINVAL;
- }
-
- disable_irq(data->irq);
-
- error = qt602240_load_fw(dev, QT602240_FW_NAME);
- if (error) {
- dev_err(dev, "The firmware update failed(%d)\n", error);
- count = error;
- } else {
- dev_dbg(dev, "The firmware update succeeded\n");
-
- /* Wait for reset */
- msleep(QT602240_FWRESET_TIME);
-
- kfree(data->object_table);
- data->object_table = NULL;
-
- qt602240_initialize(data);
- }
-
- enable_irq(data->irq);
-
- return count;
-}
-
-static DEVICE_ATTR(object, 0444, qt602240_object_show, NULL);
-static DEVICE_ATTR(update_fw, 0664, NULL, qt602240_update_fw_store);
-
-static struct attribute *qt602240_attrs[] = {
- &dev_attr_object.attr,
- &dev_attr_update_fw.attr,
- NULL
-};
-
-static const struct attribute_group qt602240_attr_group = {
- .attrs = qt602240_attrs,
-};
-
-static void qt602240_start(struct qt602240_data *data)
-{
- /* Touch enable */
- qt602240_write_object(data,
- QT602240_TOUCH_MULTI, QT602240_TOUCH_CTRL, 0x83);
-}
-
-static void qt602240_stop(struct qt602240_data *data)
-{
- /* Touch disable */
- qt602240_write_object(data,
- QT602240_TOUCH_MULTI, QT602240_TOUCH_CTRL, 0);
-}
-
-static int qt602240_input_open(struct input_dev *dev)
-{
- struct qt602240_data *data = input_get_drvdata(dev);
-
- qt602240_start(data);
-
- return 0;
-}
-
-static void qt602240_input_close(struct input_dev *dev)
-{
- struct qt602240_data *data = input_get_drvdata(dev);
-
- qt602240_stop(data);
-}
-
-static int __devinit qt602240_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct qt602240_data *data;
- struct input_dev *input_dev;
- int error;
-
- if (!client->dev.platform_data)
- return -EINVAL;
-
- data = kzalloc(sizeof(struct qt602240_data), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!data || !input_dev) {
- dev_err(&client->dev, "Failed to allocate memory\n");
- error = -ENOMEM;
- goto err_free_mem;
- }
-
- input_dev->name = "AT42QT602240/ATMXT224 Touchscreen";
- input_dev->id.bustype = BUS_I2C;
- input_dev->dev.parent = &client->dev;
- input_dev->open = qt602240_input_open;
- input_dev->close = qt602240_input_close;
-
- __set_bit(EV_ABS, input_dev->evbit);
- __set_bit(EV_KEY, input_dev->evbit);
- __set_bit(BTN_TOUCH, input_dev->keybit);
-
- /* For single touch */
- input_set_abs_params(input_dev, ABS_X,
- 0, QT602240_MAX_XC, 0, 0);
- input_set_abs_params(input_dev, ABS_Y,
- 0, QT602240_MAX_YC, 0, 0);
-
- /* For multi touch */
- input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
- 0, QT602240_MAX_AREA, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_POSITION_X,
- 0, QT602240_MAX_XC, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
- 0, QT602240_MAX_YC, 0, 0);
-
- input_set_drvdata(input_dev, data);
-
- data->client = client;
- data->input_dev = input_dev;
- data->pdata = client->dev.platform_data;
- data->irq = client->irq;
-
- i2c_set_clientdata(client, data);
-
- error = qt602240_initialize(data);
- if (error)
- goto err_free_object;
-
- error = request_threaded_irq(client->irq, NULL, qt602240_interrupt,
- IRQF_TRIGGER_FALLING, client->dev.driver->name, data);
- if (error) {
- dev_err(&client->dev, "Failed to register interrupt\n");
- goto err_free_object;
- }
-
- error = input_register_device(input_dev);
- if (error)
- goto err_free_irq;
-
- error = sysfs_create_group(&client->dev.kobj, &qt602240_attr_group);
- if (error)
- goto err_unregister_device;
-
- return 0;
-
-err_unregister_device:
- input_unregister_device(input_dev);
- input_dev = NULL;
-err_free_irq:
- free_irq(client->irq, data);
-err_free_object:
- kfree(data->object_table);
-err_free_mem:
- input_free_device(input_dev);
- kfree(data);
- return error;
-}
-
-static int __devexit qt602240_remove(struct i2c_client *client)
-{
- struct qt602240_data *data = i2c_get_clientdata(client);
-
- sysfs_remove_group(&client->dev.kobj, &qt602240_attr_group);
- free_irq(data->irq, data);
- input_unregister_device(data->input_dev);
- kfree(data->object_table);
- kfree(data);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int qt602240_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct qt602240_data *data = i2c_get_clientdata(client);
- struct input_dev *input_dev = data->input_dev;
-
- mutex_lock(&input_dev->mutex);
-
- if (input_dev->users)
- qt602240_stop(data);
-
- mutex_unlock(&input_dev->mutex);
-
- return 0;
-}
-
-static int qt602240_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct qt602240_data *data = i2c_get_clientdata(client);
- struct input_dev *input_dev = data->input_dev;
-
- /* Soft reset */
- qt602240_write_object(data, QT602240_GEN_COMMAND,
- QT602240_COMMAND_RESET, 1);
-
- msleep(QT602240_RESET_TIME);
-
- mutex_lock(&input_dev->mutex);
-
- if (input_dev->users)
- qt602240_start(data);
-
- mutex_unlock(&input_dev->mutex);
-
- return 0;
-}
-
-static const struct dev_pm_ops qt602240_pm_ops = {
- .suspend = qt602240_suspend,
- .resume = qt602240_resume,
-};
-#endif
-
-static const struct i2c_device_id qt602240_id[] = {
- { "qt602240_ts", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, qt602240_id);
-
-static struct i2c_driver qt602240_driver = {
- .driver = {
- .name = "qt602240_ts",
- .owner = THIS_MODULE,
-#ifdef CONFIG_PM
- .pm = &qt602240_pm_ops,
-#endif
- },
- .probe = qt602240_probe,
- .remove = __devexit_p(qt602240_remove),
- .id_table = qt602240_id,
-};
-
-static int __init qt602240_init(void)
-{
- return i2c_add_driver(&qt602240_driver);
-}
-
-static void __exit qt602240_exit(void)
-{
- i2c_del_driver(&qt602240_driver);
-}
-
-module_init(qt602240_init);
-module_exit(qt602240_exit);
-
-/* Module information */
-MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
-MODULE_DESCRIPTION("AT42QT602240/ATMXT224 Touchscreen driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/tps6507x-ts.c b/drivers/input/touchscreen/tps6507x-ts.c
index c8c136cf7bbc..43031492d733 100644
--- a/drivers/input/touchscreen/tps6507x-ts.c
+++ b/drivers/input/touchscreen/tps6507x-ts.c
@@ -43,7 +43,6 @@ struct tps6507x_ts {
struct input_dev *input_dev;
struct device *dev;
char phys[32];
- struct workqueue_struct *wq;
struct delayed_work work;
unsigned polling; /* polling is active */
struct ts_event tc;
@@ -220,8 +219,8 @@ done:
poll = 1;
if (poll) {
- schd = queue_delayed_work(tsc->wq, &tsc->work,
- msecs_to_jiffies(tsc->poll_period));
+ schd = schedule_delayed_work(&tsc->work,
+ msecs_to_jiffies(tsc->poll_period));
if (schd)
tsc->polling = 1;
else {
@@ -303,7 +302,6 @@ static int tps6507x_ts_probe(struct platform_device *pdev)
tsc->input_dev = input_dev;
INIT_DELAYED_WORK(&tsc->work, tps6507x_ts_handler);
- tsc->wq = create_workqueue("TPS6507x Touchscreen");
if (init_data) {
tsc->poll_period = init_data->poll_period;
@@ -325,8 +323,8 @@ static int tps6507x_ts_probe(struct platform_device *pdev)
if (error)
goto err2;
- schd = queue_delayed_work(tsc->wq, &tsc->work,
- msecs_to_jiffies(tsc->poll_period));
+ schd = schedule_delayed_work(&tsc->work,
+ msecs_to_jiffies(tsc->poll_period));
if (schd)
tsc->polling = 1;
@@ -341,7 +339,6 @@ static int tps6507x_ts_probe(struct platform_device *pdev)
err2:
cancel_delayed_work_sync(&tsc->work);
- destroy_workqueue(tsc->wq);
input_free_device(input_dev);
err1:
kfree(tsc);
@@ -357,7 +354,6 @@ static int __devexit tps6507x_ts_remove(struct platform_device *pdev)
struct input_dev *input_dev = tsc->input_dev;
cancel_delayed_work_sync(&tsc->work);
- destroy_workqueue(tsc->wq);
input_unregister_device(input_dev);
diff --git a/drivers/input/touchscreen/wm831x-ts.c b/drivers/input/touchscreen/wm831x-ts.c
new file mode 100644
index 000000000000..1022f715d3c2
--- /dev/null
+++ b/drivers/input/touchscreen/wm831x-ts.c
@@ -0,0 +1,363 @@
+/*
+ * Touchscreen driver for WM831x PMICs
+ *
+ * Copyright 2011 Wolfson Microelectronics plc.
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/pm.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mfd/wm831x/core.h>
+#include <linux/mfd/wm831x/irq.h>
+#include <linux/mfd/wm831x/pdata.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+/*
+ * R16424 (0x4028) - Touch Control 1
+ */
+#define WM831X_TCH_ENA 0x8000 /* TCH_ENA */
+#define WM831X_TCH_CVT_ENA 0x4000 /* TCH_CVT_ENA */
+#define WM831X_TCH_SLPENA 0x1000 /* TCH_SLPENA */
+#define WM831X_TCH_Z_ENA 0x0400 /* TCH_Z_ENA */
+#define WM831X_TCH_Y_ENA 0x0200 /* TCH_Y_ENA */
+#define WM831X_TCH_X_ENA 0x0100 /* TCH_X_ENA */
+#define WM831X_TCH_DELAY_MASK 0x00E0 /* TCH_DELAY - [7:5] */
+#define WM831X_TCH_DELAY_SHIFT 5 /* TCH_DELAY - [7:5] */
+#define WM831X_TCH_DELAY_WIDTH 3 /* TCH_DELAY - [7:5] */
+#define WM831X_TCH_RATE_MASK 0x001F /* TCH_RATE - [4:0] */
+#define WM831X_TCH_RATE_SHIFT 0 /* TCH_RATE - [4:0] */
+#define WM831X_TCH_RATE_WIDTH 5 /* TCH_RATE - [4:0] */
+
+/*
+ * R16425 (0x4029) - Touch Control 2
+ */
+#define WM831X_TCH_PD_WK 0x2000 /* TCH_PD_WK */
+#define WM831X_TCH_5WIRE 0x1000 /* TCH_5WIRE */
+#define WM831X_TCH_PDONLY 0x0800 /* TCH_PDONLY */
+#define WM831X_TCH_ISEL 0x0100 /* TCH_ISEL */
+#define WM831X_TCH_RPU_MASK 0x000F /* TCH_RPU - [3:0] */
+#define WM831X_TCH_RPU_SHIFT 0 /* TCH_RPU - [3:0] */
+#define WM831X_TCH_RPU_WIDTH 4 /* TCH_RPU - [3:0] */
+
+/*
+ * R16426-8 (0x402A-C) - Touch Data X/Y/X
+ */
+#define WM831X_TCH_PD 0x8000 /* TCH_PD1 */
+#define WM831X_TCH_DATA_MASK 0x0FFF /* TCH_DATA - [11:0] */
+#define WM831X_TCH_DATA_SHIFT 0 /* TCH_DATA - [11:0] */
+#define WM831X_TCH_DATA_WIDTH 12 /* TCH_DATA - [11:0] */
+
+struct wm831x_ts {
+ struct input_dev *input_dev;
+ struct wm831x *wm831x;
+ unsigned int data_irq;
+ unsigned int pd_irq;
+ bool pressure;
+ bool pen_down;
+};
+
+static irqreturn_t wm831x_ts_data_irq(int irq, void *irq_data)
+{
+ struct wm831x_ts *wm831x_ts = irq_data;
+ struct wm831x *wm831x = wm831x_ts->wm831x;
+ static int data_types[] = { ABS_X, ABS_Y, ABS_PRESSURE };
+ u16 data[3];
+ int count;
+ int i, ret;
+
+ if (wm831x_ts->pressure)
+ count = 3;
+ else
+ count = 2;
+
+ wm831x_set_bits(wm831x, WM831X_INTERRUPT_STATUS_1,
+ WM831X_TCHDATA_EINT, WM831X_TCHDATA_EINT);
+
+ ret = wm831x_bulk_read(wm831x, WM831X_TOUCH_DATA_X, count,
+ data);
+ if (ret != 0) {
+ dev_err(wm831x->dev, "Failed to read touch data: %d\n",
+ ret);
+ return IRQ_NONE;
+ }
+
+ /*
+ * We get a pen down reading on every reading, report pen up if any
+ * individual reading does so.
+ */
+ wm831x_ts->pen_down = true;
+ for (i = 0; i < count; i++) {
+ if (!(data[i] & WM831X_TCH_PD)) {
+ wm831x_ts->pen_down = false;
+ continue;
+ }
+ input_report_abs(wm831x_ts->input_dev, data_types[i],
+ data[i] & WM831X_TCH_DATA_MASK);
+ }
+
+ if (!wm831x_ts->pen_down) {
+ disable_irq_nosync(wm831x_ts->data_irq);
+
+ /* Don't need data any more */
+ wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_1,
+ WM831X_TCH_X_ENA | WM831X_TCH_Y_ENA |
+ WM831X_TCH_Z_ENA, 0);
+
+ /* Flush any final samples that arrived while reading */
+ wm831x_set_bits(wm831x, WM831X_INTERRUPT_STATUS_1,
+ WM831X_TCHDATA_EINT, WM831X_TCHDATA_EINT);
+
+ wm831x_bulk_read(wm831x, WM831X_TOUCH_DATA_X, count, data);
+
+ if (wm831x_ts->pressure)
+ input_report_abs(wm831x_ts->input_dev,
+ ABS_PRESSURE, 0);
+
+ input_report_key(wm831x_ts->input_dev, BTN_TOUCH, 0);
+ }
+
+ input_sync(wm831x_ts->input_dev);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wm831x_ts_pen_down_irq(int irq, void *irq_data)
+{
+ struct wm831x_ts *wm831x_ts = irq_data;
+ struct wm831x *wm831x = wm831x_ts->wm831x;
+ int ena = 0;
+
+ /* Start collecting data */
+ if (wm831x_ts->pressure)
+ ena |= WM831X_TCH_Z_ENA;
+
+ wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_1,
+ WM831X_TCH_X_ENA | WM831X_TCH_Y_ENA | WM831X_TCH_Z_ENA,
+ WM831X_TCH_X_ENA | WM831X_TCH_Y_ENA | ena);
+
+ input_report_key(wm831x_ts->input_dev, BTN_TOUCH, 1);
+ input_sync(wm831x_ts->input_dev);
+
+ wm831x_set_bits(wm831x, WM831X_INTERRUPT_STATUS_1,
+ WM831X_TCHPD_EINT, WM831X_TCHPD_EINT);
+
+ wm831x_ts->pen_down = true;
+ enable_irq(wm831x_ts->data_irq);
+
+ return IRQ_HANDLED;
+}
+
+static int wm831x_ts_input_open(struct input_dev *idev)
+{
+ struct wm831x_ts *wm831x_ts = input_get_drvdata(idev);
+ struct wm831x *wm831x = wm831x_ts->wm831x;
+
+ wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_1,
+ WM831X_TCH_ENA, WM831X_TCH_ENA);
+
+ wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_1,
+ WM831X_TCH_CVT_ENA, WM831X_TCH_CVT_ENA);
+
+ return 0;
+}
+
+static void wm831x_ts_input_close(struct input_dev *idev)
+{
+ struct wm831x_ts *wm831x_ts = input_get_drvdata(idev);
+ struct wm831x *wm831x = wm831x_ts->wm831x;
+
+ wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_1,
+ WM831X_TCH_ENA | WM831X_TCH_CVT_ENA |
+ WM831X_TCH_X_ENA | WM831X_TCH_Y_ENA |
+ WM831X_TCH_Z_ENA, 0);
+
+ if (wm831x_ts->pen_down)
+ disable_irq(wm831x_ts->data_irq);
+}
+
+static __devinit int wm831x_ts_probe(struct platform_device *pdev)
+{
+ struct wm831x_ts *wm831x_ts;
+ struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
+ struct wm831x_pdata *core_pdata = dev_get_platdata(pdev->dev.parent);
+ struct wm831x_touch_pdata *pdata = NULL;
+ struct input_dev *input_dev;
+ int error;
+
+ if (core_pdata)
+ pdata = core_pdata->touch;
+
+ wm831x_ts = kzalloc(sizeof(struct wm831x_ts), GFP_KERNEL);
+ input_dev = input_allocate_device();
+ if (!wm831x_ts || !input_dev) {
+ error = -ENOMEM;
+ goto err_alloc;
+ }
+
+ wm831x_ts->wm831x = wm831x;
+ wm831x_ts->input_dev = input_dev;
+
+ /*
+ * If we have a direct IRQ use it, otherwise use the interrupt
+ * from the WM831x IRQ controller.
+ */
+ if (pdata && pdata->data_irq)
+ wm831x_ts->data_irq = pdata->data_irq;
+ else
+ wm831x_ts->data_irq = platform_get_irq_byname(pdev, "TCHDATA");
+
+ if (pdata && pdata->pd_irq)
+ wm831x_ts->pd_irq = pdata->pd_irq;
+ else
+ wm831x_ts->pd_irq = platform_get_irq_byname(pdev, "TCHPD");
+
+ wm831x_ts->pressure = pdata && pdata->pressure;
+
+ /* Five wire touchscreens can't report pressure */
+ if (pdata && pdata->fivewire) {
+ wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_2,
+ WM831X_TCH_5WIRE, WM831X_TCH_5WIRE);
+
+ /* Pressure measurements are not possible for five wire mode */
+ WARN_ON(pdata->pressure && pdata->fivewire);
+ wm831x_ts->pressure = false;
+ } else {
+ wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_2,
+ WM831X_TCH_5WIRE, 0);
+ }
+
+ if (pdata) {
+ switch (pdata->isel) {
+ default:
+ dev_err(&pdev->dev, "Unsupported ISEL setting: %d\n",
+ pdata->isel);
+ /* Fall through */
+ case 200:
+ case 0:
+ wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_2,
+ WM831X_TCH_ISEL, 0);
+ break;
+ case 400:
+ wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_2,
+ WM831X_TCH_ISEL, WM831X_TCH_ISEL);
+ break;
+ }
+ }
+
+ wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_2,
+ WM831X_TCH_PDONLY, 0);
+
+ /* Default to 96 samples/sec */
+ wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_1,
+ WM831X_TCH_RATE_MASK, 6);
+
+ error = request_threaded_irq(wm831x_ts->data_irq,
+ NULL, wm831x_ts_data_irq,
+ IRQF_ONESHOT,
+ "Touchscreen data", wm831x_ts);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to request data IRQ %d: %d\n",
+ wm831x_ts->data_irq, error);
+ goto err_alloc;
+ }
+ disable_irq(wm831x_ts->data_irq);
+
+ error = request_threaded_irq(wm831x_ts->pd_irq,
+ NULL, wm831x_ts_pen_down_irq,
+ IRQF_ONESHOT,
+ "Touchscreen pen down", wm831x_ts);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to request pen down IRQ %d: %d\n",
+ wm831x_ts->pd_irq, error);
+ goto err_data_irq;
+ }
+
+ /* set up touch configuration */
+ input_dev->name = "WM831x touchscreen";
+ input_dev->phys = "wm831x";
+ input_dev->open = wm831x_ts_input_open;
+ input_dev->close = wm831x_ts_input_close;
+
+ __set_bit(EV_ABS, input_dev->evbit);
+ __set_bit(EV_KEY, input_dev->evbit);
+ __set_bit(BTN_TOUCH, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_X, 0, 4095, 5, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, 4095, 5, 0);
+ if (wm831x_ts->pressure)
+ input_set_abs_params(input_dev, ABS_PRESSURE, 0, 4095, 5, 0);
+
+ input_set_drvdata(input_dev, wm831x_ts);
+ input_dev->dev.parent = &pdev->dev;
+
+ error = input_register_device(input_dev);
+ if (error)
+ goto err_pd_irq;
+
+ platform_set_drvdata(pdev, wm831x_ts);
+ return 0;
+
+err_pd_irq:
+ free_irq(wm831x_ts->pd_irq, wm831x_ts);
+err_data_irq:
+ free_irq(wm831x_ts->data_irq, wm831x_ts);
+err_alloc:
+ input_free_device(input_dev);
+ kfree(wm831x_ts);
+
+ return error;
+}
+
+static __devexit int wm831x_ts_remove(struct platform_device *pdev)
+{
+ struct wm831x_ts *wm831x_ts = platform_get_drvdata(pdev);
+
+ free_irq(wm831x_ts->pd_irq, wm831x_ts);
+ free_irq(wm831x_ts->data_irq, wm831x_ts);
+ input_unregister_device(wm831x_ts->input_dev);
+ kfree(wm831x_ts);
+
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static struct platform_driver wm831x_ts_driver = {
+ .driver = {
+ .name = "wm831x-touch",
+ .owner = THIS_MODULE,
+ },
+ .probe = wm831x_ts_probe,
+ .remove = __devexit_p(wm831x_ts_remove),
+};
+
+static int __init wm831x_ts_init(void)
+{
+ return platform_driver_register(&wm831x_ts_driver);
+}
+module_init(wm831x_ts_init);
+
+static void __exit wm831x_ts_exit(void)
+{
+ platform_driver_unregister(&wm831x_ts_driver);
+}
+module_exit(wm831x_ts_exit);
+
+/* Module information */
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_DESCRIPTION("WM831x PMIC touchscreen driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:wm831x-touch");
diff --git a/drivers/isdn/hardware/eicon/istream.c b/drivers/isdn/hardware/eicon/istream.c
index 18f8798442fa..7bd5baa547be 100644
--- a/drivers/isdn/hardware/eicon/istream.c
+++ b/drivers/isdn/hardware/eicon/istream.c
@@ -62,7 +62,7 @@ void diva_xdi_provide_istream_info (ADAPTER* a,
stream interface.
If synchronous service was requested, then function
does return amount of data written to stream.
- 'final' does indicate that pice of data to be written is
+ 'final' does indicate that piece of data to be written is
final part of frame (necessary only by structured datatransfer)
return 0 if zero lengh packet was written
return -1 if stream is full
diff --git a/drivers/isdn/mISDN/hwchannel.c b/drivers/isdn/mISDN/hwchannel.c
index 199f374cf9da..f6e108d0125f 100644
--- a/drivers/isdn/mISDN/hwchannel.c
+++ b/drivers/isdn/mISDN/hwchannel.c
@@ -206,7 +206,7 @@ recv_Bchannel(struct bchannel *bch, unsigned int id)
hh->id = id;
if (bch->rcount >= 64) {
printk(KERN_WARNING "B-channel %p receive queue overflow, "
- "fushing!\n", bch);
+ "flushing!\n", bch);
skb_queue_purge(&bch->rqueue);
bch->rcount = 0;
return;
@@ -231,7 +231,7 @@ recv_Bchannel_skb(struct bchannel *bch, struct sk_buff *skb)
{
if (bch->rcount >= 64) {
printk(KERN_WARNING "B-channel %p receive queue overflow, "
- "fushing!\n", bch);
+ "flushing!\n", bch);
skb_queue_purge(&bch->rqueue);
bch->rcount = 0;
}
@@ -279,7 +279,7 @@ confirm_Bsend(struct bchannel *bch)
if (bch->rcount >= 64) {
printk(KERN_WARNING "B-channel %p receive queue overflow, "
- "fushing!\n", bch);
+ "flushing!\n", bch);
skb_queue_purge(&bch->rqueue);
bch->rcount = 0;
}
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index c41eb6180c9c..4bebae733349 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -231,6 +231,26 @@ void led_trigger_event(struct led_trigger *trigger,
}
EXPORT_SYMBOL_GPL(led_trigger_event);
+void led_trigger_blink(struct led_trigger *trigger,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct list_head *entry;
+
+ if (!trigger)
+ return;
+
+ read_lock(&trigger->leddev_list_lock);
+ list_for_each(entry, &trigger->led_cdevs) {
+ struct led_classdev *led_cdev;
+
+ led_cdev = list_entry(entry, struct led_classdev, trig_list);
+ led_blink_set(led_cdev, delay_on, delay_off);
+ }
+ read_unlock(&trigger->leddev_list_lock);
+}
+EXPORT_SYMBOL_GPL(led_trigger_blink);
+
void led_trigger_register_simple(const char *name, struct led_trigger **tp)
{
struct led_trigger *trigger;
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 4d9fa38d9ff6..b0480c8fbcbf 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -14,6 +14,8 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/leds.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
@@ -151,96 +153,34 @@ static void delete_gpio_led(struct gpio_led_data *led)
gpio_free(led->gpio);
}
-#ifdef CONFIG_LEDS_GPIO_PLATFORM
-static int __devinit gpio_led_probe(struct platform_device *pdev)
-{
- struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
- struct gpio_led_data *leds_data;
- int i, ret = 0;
-
- if (!pdata)
- return -EBUSY;
-
- leds_data = kzalloc(sizeof(struct gpio_led_data) * pdata->num_leds,
- GFP_KERNEL);
- if (!leds_data)
- return -ENOMEM;
-
- for (i = 0; i < pdata->num_leds; i++) {
- ret = create_gpio_led(&pdata->leds[i], &leds_data[i],
- &pdev->dev, pdata->gpio_blink_set);
- if (ret < 0)
- goto err;
- }
-
- platform_set_drvdata(pdev, leds_data);
-
- return 0;
-
-err:
- for (i = i - 1; i >= 0; i--)
- delete_gpio_led(&leds_data[i]);
-
- kfree(leds_data);
-
- return ret;
-}
+struct gpio_leds_priv {
+ int num_leds;
+ struct gpio_led_data leds[];
+};
-static int __devexit gpio_led_remove(struct platform_device *pdev)
+static inline int sizeof_gpio_leds_priv(int num_leds)
{
- int i;
- struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
- struct gpio_led_data *leds_data;
-
- leds_data = platform_get_drvdata(pdev);
-
- for (i = 0; i < pdata->num_leds; i++)
- delete_gpio_led(&leds_data[i]);
-
- kfree(leds_data);
-
- return 0;
+ return sizeof(struct gpio_leds_priv) +
+ (sizeof(struct gpio_led_data) * num_leds);
}
-static struct platform_driver gpio_led_driver = {
- .probe = gpio_led_probe,
- .remove = __devexit_p(gpio_led_remove),
- .driver = {
- .name = "leds-gpio",
- .owner = THIS_MODULE,
- },
-};
-
-MODULE_ALIAS("platform:leds-gpio");
-#endif /* CONFIG_LEDS_GPIO_PLATFORM */
-
/* Code to create from OpenFirmware platform devices */
#ifdef CONFIG_LEDS_GPIO_OF
-#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
-
-struct gpio_led_of_platform_data {
- int num_leds;
- struct gpio_led_data led_data[];
-};
-
-static int __devinit of_gpio_leds_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static struct gpio_leds_priv * __devinit gpio_leds_create_of(struct platform_device *pdev)
{
- struct device_node *np = ofdev->dev.of_node, *child;
- struct gpio_led_of_platform_data *pdata;
+ struct device_node *np = pdev->dev.of_node, *child;
+ struct gpio_leds_priv *priv;
int count = 0, ret;
- /* count LEDs defined by this device, so we know how much to allocate */
+ /* count LEDs in this device, so we know how much to allocate */
for_each_child_of_node(np, child)
count++;
if (!count)
- return 0; /* or ENODEV? */
+ return NULL;
- pdata = kzalloc(sizeof(*pdata) + sizeof(struct gpio_led_data) * count,
- GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
+ priv = kzalloc(sizeof_gpio_leds_priv(count), GFP_KERNEL);
+ if (!priv)
+ return NULL;
for_each_child_of_node(np, child) {
struct gpio_led led = {};
@@ -256,92 +196,112 @@ static int __devinit of_gpio_leds_probe(struct platform_device *ofdev,
if (state) {
if (!strcmp(state, "keep"))
led.default_state = LEDS_GPIO_DEFSTATE_KEEP;
- else if(!strcmp(state, "on"))
+ else if (!strcmp(state, "on"))
led.default_state = LEDS_GPIO_DEFSTATE_ON;
else
led.default_state = LEDS_GPIO_DEFSTATE_OFF;
}
- ret = create_gpio_led(&led, &pdata->led_data[pdata->num_leds++],
- &ofdev->dev, NULL);
+ ret = create_gpio_led(&led, &priv->leds[priv->num_leds++],
+ &pdev->dev, NULL);
if (ret < 0) {
of_node_put(child);
goto err;
}
}
- dev_set_drvdata(&ofdev->dev, pdata);
-
- return 0;
+ return priv;
err:
- for (count = pdata->num_leds - 2; count >= 0; count--)
- delete_gpio_led(&pdata->led_data[count]);
+ for (count = priv->num_leds - 2; count >= 0; count--)
+ delete_gpio_led(&priv->leds[count]);
+ kfree(priv);
+ return NULL;
+}
- kfree(pdata);
+static const struct of_device_id of_gpio_leds_match[] = {
+ { .compatible = "gpio-leds", },
+ {},
+};
+#else
+static struct gpio_leds_priv * __devinit gpio_leds_create_of(struct platform_device *pdev)
+{
+ return NULL;
+}
+#define of_gpio_leds_match NULL
+#endif
- return ret;
+
+static int __devinit gpio_led_probe(struct platform_device *pdev)
+{
+ struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
+ struct gpio_leds_priv *priv;
+ int i, ret = 0;
+
+ if (pdata && pdata->num_leds) {
+ priv = kzalloc(sizeof_gpio_leds_priv(pdata->num_leds),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->num_leds = pdata->num_leds;
+ for (i = 0; i < priv->num_leds; i++) {
+ ret = create_gpio_led(&pdata->leds[i],
+ &priv->leds[i],
+ &pdev->dev, pdata->gpio_blink_set);
+ if (ret < 0) {
+ /* On failure: unwind the led creations */
+ for (i = i - 1; i >= 0; i--)
+ delete_gpio_led(&priv->leds[i]);
+ kfree(priv);
+ return ret;
+ }
+ }
+ } else {
+ priv = gpio_leds_create_of(pdev);
+ if (!priv)
+ return -ENODEV;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
}
-static int __devexit of_gpio_leds_remove(struct platform_device *ofdev)
+static int __devexit gpio_led_remove(struct platform_device *pdev)
{
- struct gpio_led_of_platform_data *pdata = dev_get_drvdata(&ofdev->dev);
+ struct gpio_leds_priv *priv = dev_get_drvdata(&pdev->dev);
int i;
- for (i = 0; i < pdata->num_leds; i++)
- delete_gpio_led(&pdata->led_data[i]);
-
- kfree(pdata);
+ for (i = 0; i < priv->num_leds; i++)
+ delete_gpio_led(&priv->leds[i]);
- dev_set_drvdata(&ofdev->dev, NULL);
+ dev_set_drvdata(&pdev->dev, NULL);
+ kfree(priv);
return 0;
}
-static const struct of_device_id of_gpio_leds_match[] = {
- { .compatible = "gpio-leds", },
- {},
-};
-
-static struct of_platform_driver of_gpio_leds_driver = {
- .driver = {
- .name = "of_gpio_leds",
- .owner = THIS_MODULE,
+static struct platform_driver gpio_led_driver = {
+ .probe = gpio_led_probe,
+ .remove = __devexit_p(gpio_led_remove),
+ .driver = {
+ .name = "leds-gpio",
+ .owner = THIS_MODULE,
.of_match_table = of_gpio_leds_match,
},
- .probe = of_gpio_leds_probe,
- .remove = __devexit_p(of_gpio_leds_remove),
};
-#endif
+
+MODULE_ALIAS("platform:leds-gpio");
static int __init gpio_led_init(void)
{
- int ret = 0;
-
-#ifdef CONFIG_LEDS_GPIO_PLATFORM
- ret = platform_driver_register(&gpio_led_driver);
- if (ret)
- return ret;
-#endif
-#ifdef CONFIG_LEDS_GPIO_OF
- ret = of_register_platform_driver(&of_gpio_leds_driver);
-#endif
-#ifdef CONFIG_LEDS_GPIO_PLATFORM
- if (ret)
- platform_driver_unregister(&gpio_led_driver);
-#endif
-
- return ret;
+ return platform_driver_register(&gpio_led_driver);
}
static void __exit gpio_led_exit(void)
{
-#ifdef CONFIG_LEDS_GPIO_PLATFORM
platform_driver_unregister(&gpio_led_driver);
-#endif
-#ifdef CONFIG_LEDS_GPIO_OF
- of_unregister_platform_driver(&of_gpio_leds_driver);
-#endif
}
module_init(gpio_led_init);
diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
index f05bb08d0f09..06a5bb484707 100644
--- a/drivers/leds/leds-mc13783.c
+++ b/drivers/leds/leds-mc13783.c
@@ -22,6 +22,7 @@
#include <linux/leds.h>
#include <linux/workqueue.h>
#include <linux/mfd/mc13783.h>
+#include <linux/mfd/core.h>
#include <linux/slab.h>
struct mc13783_led {
@@ -183,7 +184,7 @@ static int __devinit mc13783_led_setup(struct mc13783_led *led, int max_current)
static int __devinit mc13783_leds_prepare(struct platform_device *pdev)
{
- struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct mc13783_leds_platform_data *pdata = mfd_get_data(pdev);
struct mc13783 *dev = dev_get_drvdata(pdev->dev.parent);
int ret = 0;
int reg = 0;
@@ -264,7 +265,7 @@ out:
static int __devinit mc13783_led_probe(struct platform_device *pdev)
{
- struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct mc13783_leds_platform_data *pdata = mfd_get_data(pdev);
struct mc13783_led_platform_data *led_cur;
struct mc13783_led *led, *led_dat;
int ret, i;
@@ -351,7 +352,7 @@ err_free:
static int __devexit mc13783_led_remove(struct platform_device *pdev)
{
- struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct mc13783_leds_platform_data *pdata = mfd_get_data(pdev);
struct mc13783_led *led = platform_get_drvdata(pdev);
struct mc13783 *dev = dev_get_drvdata(pdev->dev.parent);
int i;
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 290cb325a94c..116a49ce74b2 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -645,8 +645,7 @@ static void smu_expose_childs(struct work_struct *unused)
static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs);
-static int smu_platform_probe(struct platform_device* dev,
- const struct of_device_id *match)
+static int smu_platform_probe(struct platform_device* dev)
{
if (!smu)
return -ENODEV;
@@ -669,7 +668,7 @@ static const struct of_device_id smu_platform_match[] =
{},
};
-static struct of_platform_driver smu_of_platform_driver =
+static struct platform_driver smu_of_platform_driver =
{
.driver = {
.name = "smu",
@@ -689,7 +688,7 @@ static int __init smu_init_sysfs(void)
* I'm a bit too far from figuring out how that works with those
* new chipsets, but that will come back and bite us
*/
- of_register_platform_driver(&smu_of_platform_driver);
+ platform_driver_register(&smu_of_platform_driver);
return 0;
}
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index f3a29f264db9..bca2af2e3760 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -2210,7 +2210,7 @@ static void fcu_lookup_fans(struct device_node *fcu_node)
}
}
-static int fcu_of_probe(struct platform_device* dev, const struct of_device_id *match)
+static int fcu_of_probe(struct platform_device* dev)
{
state = state_detached;
of_dev = dev;
@@ -2240,7 +2240,7 @@ static const struct of_device_id fcu_match[] =
};
MODULE_DEVICE_TABLE(of, fcu_match);
-static struct of_platform_driver fcu_of_platform_driver =
+static struct platform_driver fcu_of_platform_driver =
{
.driver = {
.name = "temperature",
@@ -2263,12 +2263,12 @@ static int __init therm_pm72_init(void)
!rackmac)
return -ENODEV;
- return of_register_platform_driver(&fcu_of_platform_driver);
+ return platform_driver_register(&fcu_of_platform_driver);
}
static void __exit therm_pm72_exit(void)
{
- of_unregister_platform_driver(&fcu_of_platform_driver);
+ platform_driver_unregister(&fcu_of_platform_driver);
}
module_init(therm_pm72_init);
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index c89f396e4c53..d37819fd5ad3 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -443,8 +443,7 @@ static struct i2c_driver g4fan_driver = {
/* initialization / cleanup */
/************************************************************************/
-static int
-therm_of_probe( struct platform_device *dev, const struct of_device_id *match )
+static int therm_of_probe(struct platform_device *dev)
{
return i2c_add_driver( &g4fan_driver );
}
@@ -462,7 +461,7 @@ static const struct of_device_id therm_of_match[] = {{
}, {}
};
-static struct of_platform_driver therm_of_driver = {
+static struct platform_driver therm_of_driver = {
.driver = {
.name = "temperature",
.owner = THIS_MODULE,
@@ -509,14 +508,14 @@ g4fan_init( void )
return -ENODEV;
}
- of_register_platform_driver( &therm_of_driver );
+ platform_driver_register( &therm_of_driver );
return 0;
}
static void __exit
g4fan_exit( void )
{
- of_unregister_platform_driver( &therm_of_driver );
+ platform_driver_unregister( &therm_of_driver );
if( x.of_dev )
of_device_unregister( x.of_dev );
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 98d9ec85e0eb..8420129fc5ee 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -327,4 +327,10 @@ config DM_UEVENT
---help---
Generate udev events for DM events.
+config DM_FLAKEY
+ tristate "Flakey target (EXPERIMENTAL)"
+ depends on BLK_DEV_DM && EXPERIMENTAL
+ ---help---
+ A target that intermittently fails I/O for debugging purposes.
+
endif # MD
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index d0138606c2e8..448838b1f92a 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_BLK_DEV_MD) += md-mod.o
obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
obj-$(CONFIG_DM_DELAY) += dm-delay.o
+obj-$(CONFIG_DM_FLAKEY) += dm-flakey.o
obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o
obj-$(CONFIG_DM_MULTIPATH_QL) += dm-queue-length.o
obj-$(CONFIG_DM_MULTIPATH_ST) += dm-service-time.o
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 4e054bd91664..7dcae0d74db7 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1331,20 +1331,29 @@ static int crypt_setkey_allcpus(struct crypt_config *cc)
static int crypt_set_key(struct crypt_config *cc, char *key)
{
+ int r = -EINVAL;
+ int key_string_len = strlen(key);
+
/* The key size may not be changed. */
- if (cc->key_size != (strlen(key) >> 1))
- return -EINVAL;
+ if (cc->key_size != (key_string_len >> 1))
+ goto out;
/* Hyphen (which gives a key_size of zero) means there is no key. */
if (!cc->key_size && strcmp(key, "-"))
- return -EINVAL;
+ goto out;
if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
- return -EINVAL;
+ goto out;
set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
- return crypt_setkey_allcpus(cc);
+ r = crypt_setkey_allcpus(cc);
+
+out:
+ /* Hex key string not needed after here, so wipe it. */
+ memset(key, '0', key_string_len);
+
+ return r;
}
static int crypt_wipe_key(struct crypt_config *cc)
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
new file mode 100644
index 000000000000..ea790623c30b
--- /dev/null
+++ b/drivers/md/dm-flakey.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright (C) 2003 Sistina Software (UK) Limited.
+ * Copyright (C) 2004, 2010 Red Hat, Inc. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include <linux/device-mapper.h>
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/bio.h>
+#include <linux/slab.h>
+
+#define DM_MSG_PREFIX "flakey"
+
+/*
+ * Flakey: Used for testing only, simulates intermittent,
+ * catastrophic device failure.
+ */
+struct flakey_c {
+ struct dm_dev *dev;
+ unsigned long start_time;
+ sector_t start;
+ unsigned up_interval;
+ unsigned down_interval;
+};
+
+/*
+ * Construct a flakey mapping: <dev_path> <offset> <up interval> <down interval>
+ */
+static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+ struct flakey_c *fc;
+ unsigned long long tmp;
+
+ if (argc != 4) {
+ ti->error = "dm-flakey: Invalid argument count";
+ return -EINVAL;
+ }
+
+ fc = kmalloc(sizeof(*fc), GFP_KERNEL);
+ if (!fc) {
+ ti->error = "dm-flakey: Cannot allocate linear context";
+ return -ENOMEM;
+ }
+ fc->start_time = jiffies;
+
+ if (sscanf(argv[1], "%llu", &tmp) != 1) {
+ ti->error = "dm-flakey: Invalid device sector";
+ goto bad;
+ }
+ fc->start = tmp;
+
+ if (sscanf(argv[2], "%u", &fc->up_interval) != 1) {
+ ti->error = "dm-flakey: Invalid up interval";
+ goto bad;
+ }
+
+ if (sscanf(argv[3], "%u", &fc->down_interval) != 1) {
+ ti->error = "dm-flakey: Invalid down interval";
+ goto bad;
+ }
+
+ if (!(fc->up_interval + fc->down_interval)) {
+ ti->error = "dm-flakey: Total (up + down) interval is zero";
+ goto bad;
+ }
+
+ if (fc->up_interval + fc->down_interval < fc->up_interval) {
+ ti->error = "dm-flakey: Interval overflow";
+ goto bad;
+ }
+
+ if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &fc->dev)) {
+ ti->error = "dm-flakey: Device lookup failed";
+ goto bad;
+ }
+
+ ti->num_flush_requests = 1;
+ ti->private = fc;
+ return 0;
+
+bad:
+ kfree(fc);
+ return -EINVAL;
+}
+
+static void flakey_dtr(struct dm_target *ti)
+{
+ struct flakey_c *fc = ti->private;
+
+ dm_put_device(ti, fc->dev);
+ kfree(fc);
+}
+
+static sector_t flakey_map_sector(struct dm_target *ti, sector_t bi_sector)
+{
+ struct flakey_c *fc = ti->private;
+
+ return fc->start + (bi_sector - ti->begin);
+}
+
+static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
+{
+ struct flakey_c *fc = ti->private;
+
+ bio->bi_bdev = fc->dev->bdev;
+ if (bio_sectors(bio))
+ bio->bi_sector = flakey_map_sector(ti, bio->bi_sector);
+}
+
+static int flakey_map(struct dm_target *ti, struct bio *bio,
+ union map_info *map_context)
+{
+ struct flakey_c *fc = ti->private;
+ unsigned elapsed;
+
+ /* Are we alive ? */
+ elapsed = (jiffies - fc->start_time) / HZ;
+ if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval)
+ return -EIO;
+
+ flakey_map_bio(ti, bio);
+
+ return DM_MAPIO_REMAPPED;
+}
+
+static int flakey_status(struct dm_target *ti, status_type_t type,
+ char *result, unsigned int maxlen)
+{
+ struct flakey_c *fc = ti->private;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ result[0] = '\0';
+ break;
+
+ case STATUSTYPE_TABLE:
+ snprintf(result, maxlen, "%s %llu %u %u", fc->dev->name,
+ (unsigned long long)fc->start, fc->up_interval,
+ fc->down_interval);
+ break;
+ }
+ return 0;
+}
+
+static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg)
+{
+ struct flakey_c *fc = ti->private;
+
+ return __blkdev_driver_ioctl(fc->dev->bdev, fc->dev->mode, cmd, arg);
+}
+
+static int flakey_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
+ struct bio_vec *biovec, int max_size)
+{
+ struct flakey_c *fc = ti->private;
+ struct request_queue *q = bdev_get_queue(fc->dev->bdev);
+
+ if (!q->merge_bvec_fn)
+ return max_size;
+
+ bvm->bi_bdev = fc->dev->bdev;
+ bvm->bi_sector = flakey_map_sector(ti, bvm->bi_sector);
+
+ return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
+}
+
+static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
+{
+ struct flakey_c *fc = ti->private;
+
+ return fn(ti, fc->dev, fc->start, ti->len, data);
+}
+
+static struct target_type flakey_target = {
+ .name = "flakey",
+ .version = {1, 1, 0},
+ .module = THIS_MODULE,
+ .ctr = flakey_ctr,
+ .dtr = flakey_dtr,
+ .map = flakey_map,
+ .status = flakey_status,
+ .ioctl = flakey_ioctl,
+ .merge = flakey_merge,
+ .iterate_devices = flakey_iterate_devices,
+};
+
+static int __init dm_flakey_init(void)
+{
+ int r = dm_register_target(&flakey_target);
+
+ if (r < 0)
+ DMERR("register failed %d", r);
+
+ return r;
+}
+
+static void __exit dm_flakey_exit(void)
+{
+ dm_unregister_target(&flakey_target);
+}
+
+/* Module hooks */
+module_init(dm_flakey_init);
+module_exit(dm_flakey_exit);
+
+MODULE_DESCRIPTION(DM_NAME " flakey target");
+MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 6d12775a1061..e7af88b71592 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1501,11 +1501,6 @@ static int check_version(unsigned int cmd, struct dm_ioctl __user *user)
return r;
}
-static void free_params(struct dm_ioctl *param)
-{
- vfree(param);
-}
-
static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param)
{
struct dm_ioctl tmp, *dmi;
@@ -1520,13 +1515,22 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param)
if (!dmi)
return -ENOMEM;
- if (copy_from_user(dmi, user, tmp.data_size)) {
- vfree(dmi);
- return -EFAULT;
- }
+ if (copy_from_user(dmi, user, tmp.data_size))
+ goto bad;
+
+ /* Wipe the user buffer so we do not return it to userspace */
+ if ((tmp.flags & DM_SECURE_DATA_FLAG) &&
+ clear_user(user, tmp.data_size))
+ goto bad;
*param = dmi;
return 0;
+
+bad:
+ if (tmp.flags & DM_SECURE_DATA_FLAG)
+ memset(dmi, 0, tmp.data_size);
+ vfree(dmi);
+ return -EFAULT;
}
static int validate_params(uint cmd, struct dm_ioctl *param)
@@ -1534,6 +1538,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
/* Always clear this flag */
param->flags &= ~DM_BUFFER_FULL_FLAG;
param->flags &= ~DM_UEVENT_GENERATED_FLAG;
+ param->flags &= ~DM_SECURE_DATA_FLAG;
/* Ignores parameters */
if (cmd == DM_REMOVE_ALL_CMD ||
@@ -1561,10 +1566,11 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
{
int r = 0;
+ int wipe_buffer;
unsigned int cmd;
struct dm_ioctl *uninitialized_var(param);
ioctl_fn fn = NULL;
- size_t param_size;
+ size_t input_param_size;
/* only root can play with this */
if (!capable(CAP_SYS_ADMIN))
@@ -1605,6 +1611,8 @@ static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
* Copy the parameters into kernel space.
*/
r = copy_params(user, &param);
+ input_param_size = param->data_size;
+ wipe_buffer = param->flags & DM_SECURE_DATA_FLAG;
current->flags &= ~PF_MEMALLOC;
@@ -1615,9 +1623,8 @@ static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
if (r)
goto out;
- param_size = param->data_size;
param->data_size = sizeof(*param);
- r = fn(param, param_size);
+ r = fn(param, input_param_size);
/*
* Copy the results back to userland.
@@ -1625,8 +1632,11 @@ static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
if (!r && copy_to_user(user, param, param->data_size))
r = -EFAULT;
- out:
- free_params(param);
+out:
+ if (wipe_buffer)
+ memset(param, 0, input_param_size);
+
+ vfree(param);
return r;
}
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 6951536ea29c..8e8a868ca857 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -543,7 +543,7 @@ static int disk_ctr(struct dm_dirty_log *log, struct dm_target *ti,
return -EINVAL;
}
- r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &dev);
+ r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dev);
if (r)
return r;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index b82d28819e2a..a550a057d991 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -844,8 +844,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
{
/* target parameters */
static struct param _params[] = {
- {1, 1024, "invalid number of priority groups"},
- {1, 1024, "invalid initial priority group number"},
+ {0, 1024, "invalid number of priority groups"},
+ {0, 1024, "invalid initial priority group number"},
};
int r;
@@ -879,6 +879,13 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
if (r)
goto bad;
+ if ((!m->nr_priority_groups && next_pg_num) ||
+ (m->nr_priority_groups && !next_pg_num)) {
+ ti->error = "invalid initial priority group";
+ r = -EINVAL;
+ goto bad;
+ }
+
/* parse the priority groups */
while (as.argc) {
struct priority_group *pg;
@@ -1065,7 +1072,7 @@ out:
static int action_dev(struct multipath *m, struct dm_dev *dev,
action_fn action)
{
- int r = 0;
+ int r = -EINVAL;
struct pgpath *pgpath;
struct priority_group *pg;
@@ -1283,24 +1290,22 @@ static int do_end_io(struct multipath *m, struct request *clone,
if (!error && !clone->errors)
return 0; /* I/O complete */
- if (error == -EOPNOTSUPP)
- return error;
-
- if (clone->cmd_flags & REQ_DISCARD)
- /*
- * Pass all discard request failures up.
- * FIXME: only fail_path if the discard failed due to a
- * transport problem. This requires precise understanding
- * of the underlying failure (e.g. the SCSI sense).
- */
+ if (error == -EOPNOTSUPP || error == -EREMOTEIO)
return error;
if (mpio->pgpath)
fail_path(mpio->pgpath);
spin_lock_irqsave(&m->lock, flags);
- if (!m->nr_valid_paths && !m->queue_if_no_path && !__must_push_back(m))
- r = -EIO;
+ if (!m->nr_valid_paths) {
+ if (!m->queue_if_no_path) {
+ if (!__must_push_back(m))
+ r = -EIO;
+ } else {
+ if (error == -EBADE)
+ r = error;
+ }
+ }
spin_unlock_irqrestore(&m->lock, flags);
return r;
@@ -1417,7 +1422,7 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
else if (m->current_pg)
pg_num = m->current_pg->pg_num;
else
- pg_num = 1;
+ pg_num = (m->nr_priority_groups ? 1 : 0);
DMEMIT("%u ", pg_num);
@@ -1671,7 +1676,7 @@ out:
*---------------------------------------------------------------*/
static struct target_type multipath_target = {
.name = "multipath",
- .version = {1, 2, 0},
+ .version = {1, 3, 0},
.module = THIS_MODULE,
.ctr = multipath_ctr,
.dtr = multipath_dtr,
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index fdde53cd12b7..a2d330942cb2 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1080,7 +1080,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
argv++;
argc--;
- r = dm_get_device(ti, cow_path, FMODE_READ | FMODE_WRITE, &s->cow);
+ r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
if (r) {
ti->error = "Cannot get COW device";
goto bad_cow;
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 8a2f767f26d8..0ed7f6bc2a7f 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -216,7 +216,6 @@ static int linear_run (mddev_t *mddev)
if (md_check_no_bitmap(mddev))
return -EINVAL;
- mddev->queue->queue_lock = &mddev->queue->__queue_lock;
conf = linear_conf(mddev, mddev->raid_disks);
if (!conf)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 0cc30ecda4c1..d5ad7723b172 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -553,6 +553,9 @@ static mddev_t * mddev_find(dev_t unit)
{
mddev_t *mddev, *new = NULL;
+ if (unit && MAJOR(unit) != MD_MAJOR)
+ unit &= ~((1<<MdpMinorShift)-1);
+
retry:
spin_lock(&all_mddevs_lock);
@@ -4138,10 +4141,10 @@ array_size_store(mddev_t *mddev, const char *buf, size_t len)
}
mddev->array_sectors = sectors;
- set_capacity(mddev->gendisk, mddev->array_sectors);
- if (mddev->pers)
+ if (mddev->pers) {
+ set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
-
+ }
return len;
}
@@ -4624,6 +4627,7 @@ static int do_md_run(mddev_t *mddev)
}
set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
+ mddev->changed = 1;
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
out:
return err;
@@ -4712,6 +4716,7 @@ static void md_clean(mddev_t *mddev)
mddev->sync_speed_min = mddev->sync_speed_max = 0;
mddev->recovery = 0;
mddev->in_sync = 0;
+ mddev->changed = 0;
mddev->degraded = 0;
mddev->safemode = 0;
mddev->bitmap_info.offset = 0;
@@ -4827,6 +4832,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
set_capacity(disk, 0);
mutex_unlock(&mddev->open_mutex);
+ mddev->changed = 1;
revalidate_disk(disk);
if (mddev->ro)
@@ -6011,7 +6017,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
atomic_inc(&mddev->openers);
mutex_unlock(&mddev->open_mutex);
- check_disk_size_change(mddev->gendisk, bdev);
+ check_disk_change(bdev);
out:
return err;
}
@@ -6026,6 +6032,21 @@ static int md_release(struct gendisk *disk, fmode_t mode)
return 0;
}
+
+static int md_media_changed(struct gendisk *disk)
+{
+ mddev_t *mddev = disk->private_data;
+
+ return mddev->changed;
+}
+
+static int md_revalidate(struct gendisk *disk)
+{
+ mddev_t *mddev = disk->private_data;
+
+ mddev->changed = 0;
+ return 0;
+}
static const struct block_device_operations md_fops =
{
.owner = THIS_MODULE,
@@ -6036,6 +6057,8 @@ static const struct block_device_operations md_fops =
.compat_ioctl = md_compat_ioctl,
#endif
.getgeo = md_getgeo,
+ .media_changed = md_media_changed,
+ .revalidate_disk= md_revalidate,
};
static int md_thread(void * arg)
@@ -7338,7 +7361,7 @@ static int __init md_init(void)
{
int ret = -ENOMEM;
- md_wq = alloc_workqueue("md", WQ_RESCUER, 0);
+ md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
if (!md_wq)
goto err_wq;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 7e90b8593b2a..12215d437fcc 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -274,6 +274,8 @@ struct mddev_s
atomic_t active; /* general refcount */
atomic_t openers; /* number of active opens */
+ int changed; /* True if we might need to
+ * reread partition info */
int degraded; /* whether md should consider
* adding a spare
*/
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 6d7ddf32ef2e..3a62d440e27b 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -435,7 +435,6 @@ static int multipath_run (mddev_t *mddev)
* bookkeeping area. [whatever we allocate in multipath_run(),
* should be freed in multipath_stop()]
*/
- mddev->queue->queue_lock = &mddev->queue->__queue_lock;
conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL);
mddev->private = conf;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 637a96855edb..c0ac457f1218 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -361,7 +361,6 @@ static int raid0_run(mddev_t *mddev)
if (md_check_no_bitmap(mddev))
return -EINVAL;
blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
- mddev->queue->queue_lock = &mddev->queue->__queue_lock;
/* if private is not null, we are here after takeover */
if (mddev->private == NULL) {
@@ -670,6 +669,7 @@ static void *raid0_takeover_raid1(mddev_t *mddev)
mddev->new_layout = 0;
mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */
mddev->delta_disks = 1 - mddev->raid_disks;
+ mddev->raid_disks = 1;
/* make sure it will be not marked as dirty */
mddev->recovery_cp = MaxSector;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a23ffa397ba9..06cd712807d0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -593,7 +593,10 @@ static int flush_pending_writes(conf_t *conf)
if (conf->pending_bio_list.head) {
struct bio *bio;
bio = bio_list_get(&conf->pending_bio_list);
+ /* Only take the spinlock to quiet a warning */
+ spin_lock(conf->mddev->queue->queue_lock);
blk_remove_plug(conf->mddev->queue);
+ spin_unlock(conf->mddev->queue->queue_lock);
spin_unlock_irq(&conf->device_lock);
/* flush any pending bitmap writes to
* disk before proceeding w/ I/O */
@@ -959,7 +962,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
atomic_inc(&r1_bio->remaining);
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
- blk_plug_device(mddev->queue);
+ blk_plug_device_unlocked(mddev->queue);
spin_unlock_irqrestore(&conf->device_lock, flags);
}
r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
@@ -2021,7 +2024,6 @@ static int run(mddev_t *mddev)
if (IS_ERR(conf))
return PTR_ERR(conf);
- mddev->queue->queue_lock = &conf->device_lock;
list_for_each_entry(rdev, &mddev->disks, same_set) {
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 3b607b28741b..747d061d8e05 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -662,7 +662,10 @@ static int flush_pending_writes(conf_t *conf)
if (conf->pending_bio_list.head) {
struct bio *bio;
bio = bio_list_get(&conf->pending_bio_list);
+ /* Spinlock only taken to quiet a warning */
+ spin_lock(conf->mddev->queue->queue_lock);
blk_remove_plug(conf->mddev->queue);
+ spin_unlock(conf->mddev->queue->queue_lock);
spin_unlock_irq(&conf->device_lock);
/* flush any pending bitmap writes to disk
* before proceeding w/ I/O */
@@ -971,7 +974,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
atomic_inc(&r10_bio->remaining);
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
- blk_plug_device(mddev->queue);
+ blk_plug_device_unlocked(mddev->queue);
spin_unlock_irqrestore(&conf->device_lock, flags);
}
@@ -2304,8 +2307,6 @@ static int run(mddev_t *mddev)
if (!conf)
goto out;
- mddev->queue->queue_lock = &conf->device_lock;
-
mddev->thread = conf->thread;
conf->thread = NULL;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 702812824195..78536fdbd87f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5204,7 +5204,6 @@ static int run(mddev_t *mddev)
mddev->queue->backing_dev_info.congested_data = mddev;
mddev->queue->backing_dev_info.congested_fn = raid5_congested;
- mddev->queue->queue_lock = &conf->device_lock;
mddev->queue->unplug_fn = raid5_unplug_queue;
chunk_size = mddev->chunk_sectors << 9;
diff --git a/drivers/media/common/tuners/tuner-types.c b/drivers/media/common/tuners/tuner-types.c
index 58a513bcd747..afba6dc5e080 100644
--- a/drivers/media/common/tuners/tuner-types.c
+++ b/drivers/media/common/tuners/tuner-types.c
@@ -971,6 +971,22 @@ static struct tuner_params tuner_tena_9533_di_params[] = {
},
};
+/* ------------ TUNER_TENA_TNF_5337 - Tena tnf5337MFD STD M/N ------------ */
+
+static struct tuner_range tuner_tena_tnf_5337_ntsc_ranges[] = {
+ { 16 * 166.25 /*MHz*/, 0x86, 0x01, },
+ { 16 * 466.25 /*MHz*/, 0x86, 0x02, },
+ { 16 * 999.99 , 0x86, 0x08, },
+};
+
+static struct tuner_params tuner_tena_tnf_5337_params[] = {
+ {
+ .type = TUNER_PARAM_TYPE_NTSC,
+ .ranges = tuner_tena_tnf_5337_ntsc_ranges,
+ .count = ARRAY_SIZE(tuner_tena_tnf_5337_ntsc_ranges),
+ },
+};
+
/* ------------ TUNER_PHILIPS_FMD1216ME(X)_MK3 - Philips PAL ------------ */
static struct tuner_range tuner_philips_fmd1216me_mk3_pal_ranges[] = {
@@ -1842,6 +1858,11 @@ struct tunertype tuners[] = {
.params = tuner_philips_fq1236_mk5_params,
.count = ARRAY_SIZE(tuner_philips_fq1236_mk5_params),
},
+ [TUNER_TENA_TNF_5337] = { /* Tena 5337 MFD */
+ .name = "Tena TNF5337 MFD",
+ .params = tuner_tena_tnf_5337_params,
+ .count = ARRAY_SIZE(tuner_tena_tnf_5337_params),
+ },
};
EXPORT_SYMBOL(tuners);
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index 3d48ba019342..fe4f894183ff 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -358,3 +358,11 @@ config DVB_USB_LME2510
select DVB_IX2505V if !DVB_FE_CUSTOMISE
help
Say Y here to support the LME DM04/QQBOX DVB-S USB2.0 .
+
+config DVB_USB_TECHNISAT_USB2
+ tristate "Technisat DVB-S/S2 USB2.0 support"
+ depends on DVB_USB
+ select DVB_STB0899 if !DVB_FE_CUSTOMISE
+ select DVB_STB6100 if !DVB_FE_CUSTOMISE
+ help
+ Say Y here to support the Technisat USB2 DVB-S/S2 device
diff --git a/drivers/media/dvb/dvb-usb/Makefile b/drivers/media/dvb/dvb-usb/Makefile
index 5b1d12f2d591..4bac13da0c39 100644
--- a/drivers/media/dvb/dvb-usb/Makefile
+++ b/drivers/media/dvb/dvb-usb/Makefile
@@ -91,6 +91,9 @@ obj-$(CONFIG_DVB_USB_AZ6027) += dvb-usb-az6027.o
dvb-usb-lmedm04-objs = lmedm04.o
obj-$(CONFIG_DVB_USB_LME2510) += dvb-usb-lmedm04.o
+dvb-usb-technisat-usb2-objs = technisat-usb2.o
+obj-$(CONFIG_DVB_USB_TECHNISAT_USB2) += dvb-usb-technisat-usb2.o
+
EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
# due to tuner-xc3028
EXTRA_CFLAGS += -Idrivers/media/common/tuners
diff --git a/drivers/media/dvb/dvb-usb/dib0700.h b/drivers/media/dvb/dvb-usb/dib0700.h
index 3537d65c04bc..b2a87f2c2c3e 100644
--- a/drivers/media/dvb/dvb-usb/dib0700.h
+++ b/drivers/media/dvb/dvb-usb/dib0700.h
@@ -32,6 +32,7 @@ extern int dvb_usb_dib0700_debug;
// 1 Byte: 4MSB(1 = enable streaming, 0 = disable streaming) 4LSB(Video Mode: 0 = MPEG2 188Bytes, 1 = Analog)
// 2 Byte: MPEG2 mode: 4MSB(1 = Master Mode, 0 = Slave Mode) 4LSB(Channel 1 = bit0, Channel 2 = bit1)
// 2 Byte: Analog mode: 4MSB(0 = 625 lines, 1 = 525 lines) 4LSB( " " )
+#define REQUEST_SET_I2C_PARAM 0x10
#define REQUEST_SET_RC 0x11
#define REQUEST_NEW_I2C_READ 0x12
#define REQUEST_NEW_I2C_WRITE 0x13
@@ -61,6 +62,7 @@ extern struct i2c_algorithm dib0700_i2c_algo;
extern int dib0700_identify_state(struct usb_device *udev, struct dvb_usb_device_properties *props,
struct dvb_usb_device_description **desc, int *cold);
extern int dib0700_change_protocol(struct rc_dev *dev, u64 rc_type);
+extern int dib0700_set_i2c_speed(struct dvb_usb_device *d, u16 scl_kHz);
extern int dib0700_device_count;
extern int dvb_usb_dib0700_ir_proto;
diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
index 98ffb40728e3..b79af68c54ae 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
@@ -186,7 +186,7 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
msg[i].len,
USB_CTRL_GET_TIMEOUT);
if (result < 0) {
- err("i2c read error (status = %d)\n", result);
+ deb_info("i2c read error (status = %d)\n", result);
break;
}
@@ -215,7 +215,7 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
0, 0, buf, msg[i].len + 4,
USB_CTRL_GET_TIMEOUT);
if (result < 0) {
- err("i2c write error (status = %d)\n", result);
+ deb_info("i2c write error (status = %d)\n", result);
break;
}
}
@@ -328,6 +328,31 @@ static int dib0700_set_clock(struct dvb_usb_device *d, u8 en_pll,
return dib0700_ctrl_wr(d, b, 10);
}
+int dib0700_set_i2c_speed(struct dvb_usb_device *d, u16 scl_kHz)
+{
+ u16 divider;
+ u8 b[8];
+
+ if (scl_kHz == 0)
+ return -EINVAL;
+
+ b[0] = REQUEST_SET_I2C_PARAM;
+ divider = (u16) (30000 / scl_kHz);
+ b[2] = (u8) (divider >> 8);
+ b[3] = (u8) (divider & 0xff);
+ divider = (u16) (72000 / scl_kHz);
+ b[4] = (u8) (divider >> 8);
+ b[5] = (u8) (divider & 0xff);
+ divider = (u16) (72000 / scl_kHz); /* clock: 72MHz */
+ b[6] = (u8) (divider >> 8);
+ b[7] = (u8) (divider & 0xff);
+
+ deb_info("setting I2C speed: %04x %04x %04x (%d kHz).",
+ (b[2] << 8) | (b[3]), (b[4] << 8) | b[5], (b[6] << 8) | b[7], scl_kHz);
+ return dib0700_ctrl_wr(d, b, 8);
+}
+
+
int dib0700_ctrl_clock(struct dvb_usb_device *d, u32 clk_MHz, u8 clock_out_gp3)
{
switch (clk_MHz) {
@@ -459,10 +484,20 @@ int dib0700_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
deb_info("modifying (%d) streaming state for %d\n", onoff, adap->id);
- if (onoff)
- st->channel_state |= 1 << adap->id;
- else
- st->channel_state &= ~(1 << adap->id);
+ st->channel_state &= ~0x3;
+ if ((adap->stream.props.endpoint != 2)
+ && (adap->stream.props.endpoint != 3)) {
+ deb_info("the endpoint number (%i) is not correct, use the adapter id instead", adap->stream.props.endpoint);
+ if (onoff)
+ st->channel_state |= 1 << (adap->id);
+ else
+ st->channel_state |= 1 << ~(adap->id);
+ } else {
+ if (onoff)
+ st->channel_state |= 1 << (adap->stream.props.endpoint-2);
+ else
+ st->channel_state |= 1 << (3-adap->stream.props.endpoint);
+ }
b[2] |= st->channel_state;
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index defd83964ce2..c6022afeb785 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -12,6 +12,7 @@
#include "dib7000m.h"
#include "dib7000p.h"
#include "dib8000.h"
+#include "dib9000.h"
#include "mt2060.h"
#include "mt2266.h"
#include "tuner-xc2028.h"
@@ -29,6 +30,7 @@ MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplif
struct dib0700_adapter_state {
int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
+ const struct firmware *frontend_firmware;
};
/* Hauppauge Nova-T 500 (aka Bristol)
@@ -1226,13 +1228,13 @@ static int dib807x_tuner_attach(struct dvb_usb_adapter *adap)
static int stk80xx_pid_filter(struct dvb_usb_adapter *adapter, int index,
u16 pid, int onoff)
{
- return dib8000_pid_filter(adapter->fe, index, pid, onoff);
+ return dib8000_pid_filter(adapter->fe, index, pid, onoff);
}
static int stk80xx_pid_filter_ctrl(struct dvb_usb_adapter *adapter,
- int onoff)
+ int onoff)
{
- return dib8000_pid_filter_ctrl(adapter->fe, onoff);
+ return dib8000_pid_filter_ctrl(adapter->fe, onoff);
}
/* STK807x */
@@ -1304,11 +1306,11 @@ static int stk807xpvr_frontend_attach1(struct dvb_usb_adapter *adap)
/* STK8096GP */
struct dibx000_agc_config dib8090_agc_config[2] = {
- {
+ {
BAND_UHF | BAND_VHF | BAND_LBAND | BAND_SBAND,
/* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=1,
- * P_agc_inv_pwm1=0, P_agc_inv_pwm2=0, P_agc_inh_dc_rv_est=0,
- * P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5, P_agc_write=0 */
+ * P_agc_inv_pwm1=0, P_agc_inv_pwm2=0, P_agc_inh_dc_rv_est=0,
+ * P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5, P_agc_write=0 */
(0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8)
| (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0),
@@ -1345,12 +1347,12 @@ struct dibx000_agc_config dib8090_agc_config[2] = {
51,
0,
- },
- {
+ },
+ {
BAND_CBAND,
/* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=1,
- * P_agc_inv_pwm1=0, P_agc_inv_pwm2=0, P_agc_inh_dc_rv_est=0,
- * P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5, P_agc_write=0 */
+ * P_agc_inv_pwm1=0, P_agc_inv_pwm2=0, P_agc_inh_dc_rv_est=0,
+ * P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5, P_agc_write=0 */
(0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8)
| (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0),
@@ -1387,135 +1389,153 @@ struct dibx000_agc_config dib8090_agc_config[2] = {
51,
0,
- }
+ }
};
static struct dibx000_bandwidth_config dib8090_pll_config_12mhz = {
- 54000, 13500,
- 1, 18, 3, 1, 0,
- 0, 0, 1, 1, 2,
- (3 << 14) | (1 << 12) | (599 << 0),
- (0 << 25) | 0,
- 20199727,
- 12000000,
+ 54000, 13500,
+ 1, 18, 3, 1, 0,
+ 0, 0, 1, 1, 2,
+ (3 << 14) | (1 << 12) | (599 << 0),
+ (0 << 25) | 0,
+ 20199727,
+ 12000000,
};
static int dib8090_get_adc_power(struct dvb_frontend *fe)
{
- return dib8000_get_adc_power(fe, 1);
+ return dib8000_get_adc_power(fe, 1);
}
-static struct dib8000_config dib809x_dib8000_config = {
- .output_mpeg2_in_188_bytes = 1,
-
- .agc_config_count = 2,
- .agc = dib8090_agc_config,
- .agc_control = dib0090_dcc_freq,
- .pll = &dib8090_pll_config_12mhz,
- .tuner_is_baseband = 1,
-
- .gpio_dir = DIB8000_GPIO_DEFAULT_DIRECTIONS,
- .gpio_val = DIB8000_GPIO_DEFAULT_VALUES,
- .gpio_pwm_pos = DIB8000_GPIO_DEFAULT_PWM_POS,
-
- .hostbus_diversity = 1,
- .div_cfg = 0x31,
- .output_mode = OUTMODE_MPEG2_FIFO,
- .drives = 0x2d98,
- .diversity_delay = 144,
- .refclksel = 3,
+static struct dib8000_config dib809x_dib8000_config[2] = {
+ {
+ .output_mpeg2_in_188_bytes = 1,
+
+ .agc_config_count = 2,
+ .agc = dib8090_agc_config,
+ .agc_control = dib0090_dcc_freq,
+ .pll = &dib8090_pll_config_12mhz,
+ .tuner_is_baseband = 1,
+
+ .gpio_dir = DIB8000_GPIO_DEFAULT_DIRECTIONS,
+ .gpio_val = DIB8000_GPIO_DEFAULT_VALUES,
+ .gpio_pwm_pos = DIB8000_GPIO_DEFAULT_PWM_POS,
+
+ .hostbus_diversity = 1,
+ .div_cfg = 0x31,
+ .output_mode = OUTMODE_MPEG2_FIFO,
+ .drives = 0x2d98,
+ .diversity_delay = 48,
+ .refclksel = 3,
+ }, {
+ .output_mpeg2_in_188_bytes = 1,
+
+ .agc_config_count = 2,
+ .agc = dib8090_agc_config,
+ .agc_control = dib0090_dcc_freq,
+ .pll = &dib8090_pll_config_12mhz,
+ .tuner_is_baseband = 1,
+
+ .gpio_dir = DIB8000_GPIO_DEFAULT_DIRECTIONS,
+ .gpio_val = DIB8000_GPIO_DEFAULT_VALUES,
+ .gpio_pwm_pos = DIB8000_GPIO_DEFAULT_PWM_POS,
+
+ .hostbus_diversity = 1,
+ .div_cfg = 0x31,
+ .output_mode = OUTMODE_DIVERSITY,
+ .drives = 0x2d08,
+ .diversity_delay = 1,
+ .refclksel = 3,
+ }
+};
+
+static struct dib0090_wbd_slope dib8090_wbd_table[] = {
+ /* max freq ; cold slope ; cold offset ; warm slope ; warm offset ; wbd gain */
+ { 120, 0, 500, 0, 500, 4 }, /* CBAND */
+ { 170, 0, 450, 0, 450, 4 }, /* CBAND */
+ { 380, 48, 373, 28, 259, 6 }, /* VHF */
+ { 860, 34, 700, 36, 616, 6 }, /* high UHF */
+ { 0xFFFF, 34, 700, 36, 616, 6 }, /* default */
};
static struct dib0090_config dib809x_dib0090_config = {
- .io.pll_bypass = 1,
- .io.pll_range = 1,
- .io.pll_prediv = 1,
- .io.pll_loopdiv = 20,
- .io.adc_clock_ratio = 8,
- .io.pll_int_loop_filt = 0,
- .io.clock_khz = 12000,
- .reset = dib80xx_tuner_reset,
- .sleep = dib80xx_tuner_sleep,
- .clkouttobamse = 1,
- .analog_output = 1,
- .i2c_address = DEFAULT_DIB0090_I2C_ADDRESS,
- .wbd_vhf_offset = 100,
- .wbd_cband_offset = 450,
- .use_pwm_agc = 1,
- .clkoutdrive = 1,
- .get_adc_power = dib8090_get_adc_power,
- .freq_offset_khz_uhf = 0,
+ .io.pll_bypass = 1,
+ .io.pll_range = 1,
+ .io.pll_prediv = 1,
+ .io.pll_loopdiv = 20,
+ .io.adc_clock_ratio = 8,
+ .io.pll_int_loop_filt = 0,
+ .io.clock_khz = 12000,
+ .reset = dib80xx_tuner_reset,
+ .sleep = dib80xx_tuner_sleep,
+ .clkouttobamse = 1,
+ .analog_output = 1,
+ .i2c_address = DEFAULT_DIB0090_I2C_ADDRESS,
+ .use_pwm_agc = 1,
+ .clkoutdrive = 1,
+ .get_adc_power = dib8090_get_adc_power,
+ .freq_offset_khz_uhf = -63,
.freq_offset_khz_vhf = -143,
+ .wbd = dib8090_wbd_table,
+ .fref_clock_ratio = 6,
};
static int dib8096_set_param_override(struct dvb_frontend *fe,
struct dvb_frontend_parameters *fep)
{
- struct dvb_usb_adapter *adap = fe->dvb->priv;
- struct dib0700_adapter_state *state = adap->priv;
- u8 band = BAND_OF_FREQUENCY(fep->frequency/1000);
- u16 offset;
- int ret = 0;
- enum frontend_tune_state tune_state = CT_SHUTDOWN;
- u16 ltgain, rf_gain_limit;
-
- ret = state->set_param_save(fe, fep);
- if (ret < 0)
- return ret;
-
- switch (band) {
- case BAND_VHF:
- offset = 100;
- break;
- case BAND_UHF:
- offset = 550;
- break;
- default:
- offset = 0;
- break;
- }
- offset += (dib0090_get_wbd_offset(fe) * 8 * 18 / 33 + 1) / 2;
- dib8000_set_wbd_ref(fe, offset);
-
-
- if (band == BAND_CBAND) {
- deb_info("tuning in CBAND - soft-AGC startup\n");
- /* TODO specific wbd target for dib0090 - needed for startup ? */
- dib0090_set_tune_state(fe, CT_AGC_START);
- do {
- ret = dib0090_gain_control(fe);
- msleep(ret);
- tune_state = dib0090_get_tune_state(fe);
- if (tune_state == CT_AGC_STEP_0)
- dib8000_set_gpio(fe, 6, 0, 1);
- else if (tune_state == CT_AGC_STEP_1) {
- dib0090_get_current_gain(fe, NULL, NULL, &rf_gain_limit, &ltgain);
- if (rf_gain_limit == 0)
- dib8000_set_gpio(fe, 6, 0, 0);
- }
- } while (tune_state < CT_AGC_STOP);
- dib0090_pwm_gain_reset(fe);
- dib8000_pwm_agc_reset(fe);
- dib8000_set_tune_state(fe, CT_DEMOD_START);
- } else {
- deb_info("not tuning in CBAND - standard AGC startup\n");
- dib0090_pwm_gain_reset(fe);
- }
+ struct dvb_usb_adapter *adap = fe->dvb->priv;
+ struct dib0700_adapter_state *state = adap->priv;
+ u8 band = BAND_OF_FREQUENCY(fep->frequency/1000);
+ u16 target;
+ int ret = 0;
+ enum frontend_tune_state tune_state = CT_SHUTDOWN;
+ u16 ltgain, rf_gain_limit;
+
+ ret = state->set_param_save(fe, fep);
+ if (ret < 0)
+ return ret;
+
+ target = (dib0090_get_wbd_offset(fe) * 8 * 18 / 33 + 1) / 2;
+ dib8000_set_wbd_ref(fe, target);
+
+
+ if (band == BAND_CBAND) {
+ deb_info("tuning in CBAND - soft-AGC startup\n");
+ dib0090_set_tune_state(fe, CT_AGC_START);
+ do {
+ ret = dib0090_gain_control(fe);
+ msleep(ret);
+ tune_state = dib0090_get_tune_state(fe);
+ if (tune_state == CT_AGC_STEP_0)
+ dib8000_set_gpio(fe, 6, 0, 1);
+ else if (tune_state == CT_AGC_STEP_1) {
+ dib0090_get_current_gain(fe, NULL, NULL, &rf_gain_limit, &ltgain);
+ if (rf_gain_limit == 0)
+ dib8000_set_gpio(fe, 6, 0, 0);
+ }
+ } while (tune_state < CT_AGC_STOP);
+ dib0090_pwm_gain_reset(fe);
+ dib8000_pwm_agc_reset(fe);
+ dib8000_set_tune_state(fe, CT_DEMOD_START);
+ } else {
+ deb_info("not tuning in CBAND - standard AGC startup\n");
+ dib0090_pwm_gain_reset(fe);
+ }
- return 0;
+ return 0;
}
static int dib809x_tuner_attach(struct dvb_usb_adapter *adap)
{
- struct dib0700_adapter_state *st = adap->priv;
- struct i2c_adapter *tun_i2c = dib8000_get_i2c_master(adap->fe, DIBX000_I2C_INTERFACE_TUNER, 1);
+ struct dib0700_adapter_state *st = adap->priv;
+ struct i2c_adapter *tun_i2c = dib8000_get_i2c_master(adap->fe, DIBX000_I2C_INTERFACE_TUNER, 1);
- if (dvb_attach(dib0090_register, adap->fe, tun_i2c, &dib809x_dib0090_config) == NULL)
- return -ENODEV;
+ if (dvb_attach(dib0090_register, adap->fe, tun_i2c, &dib809x_dib0090_config) == NULL)
+ return -ENODEV;
- st->set_param_save = adap->fe->ops.tuner_ops.set_params;
- adap->fe->ops.tuner_ops.set_params = dib8096_set_param_override;
- return 0;
+ st->set_param_save = adap->fe->ops.tuner_ops.set_params;
+ adap->fe->ops.tuner_ops.set_params = dib8096_set_param_override;
+ return 0;
}
static int stk809x_frontend_attach(struct dvb_usb_adapter *adap)
@@ -1537,11 +1557,931 @@ static int stk809x_frontend_attach(struct dvb_usb_adapter *adap)
dib8000_i2c_enumeration(&adap->dev->i2c_adap, 1, 18, 0x80);
- adap->fe = dvb_attach(dib8000_attach, &adap->dev->i2c_adap, 0x80, &dib809x_dib8000_config);
+ adap->fe = dvb_attach(dib8000_attach, &adap->dev->i2c_adap, 0x80, &dib809x_dib8000_config[0]);
+
+ return adap->fe == NULL ? -ENODEV : 0;
+}
+
+static int nim8096md_tuner_attach(struct dvb_usb_adapter *adap)
+{
+ struct dib0700_adapter_state *st = adap->priv;
+ struct i2c_adapter *tun_i2c;
+ struct dvb_frontend *fe_slave = dib8000_get_slave_frontend(adap->fe, 1);
+
+ if (fe_slave) {
+ tun_i2c = dib8000_get_i2c_master(fe_slave, DIBX000_I2C_INTERFACE_TUNER, 1);
+ if (dvb_attach(dib0090_register, fe_slave, tun_i2c, &dib809x_dib0090_config) == NULL)
+ return -ENODEV;
+ fe_slave->dvb = adap->fe->dvb;
+ fe_slave->ops.tuner_ops.set_params = dib8096_set_param_override;
+ }
+ tun_i2c = dib8000_get_i2c_master(adap->fe, DIBX000_I2C_INTERFACE_TUNER, 1);
+ if (dvb_attach(dib0090_register, adap->fe, tun_i2c, &dib809x_dib0090_config) == NULL)
+ return -ENODEV;
+
+ st->set_param_save = adap->fe->ops.tuner_ops.set_params;
+ adap->fe->ops.tuner_ops.set_params = dib8096_set_param_override;
+
+ return 0;
+}
+
+static int nim8096md_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ struct dvb_frontend *fe_slave;
+
+ dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0);
+ msleep(20);
+ dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1);
+ msleep(1000);
+ dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1);
+ dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1);
+ dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1);
+
+ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0);
+
+ dib0700_ctrl_clock(adap->dev, 72, 1);
+
+ msleep(20);
+ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1);
+ msleep(20);
+ dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1);
+
+ dib8000_i2c_enumeration(&adap->dev->i2c_adap, 2, 18, 0x80);
+
+ adap->fe = dvb_attach(dib8000_attach, &adap->dev->i2c_adap, 0x80, &dib809x_dib8000_config[0]);
+ if (adap->fe == NULL)
+ return -ENODEV;
+
+ fe_slave = dvb_attach(dib8000_attach, &adap->dev->i2c_adap, 0x82, &dib809x_dib8000_config[1]);
+ dib8000_set_slave_frontend(adap->fe, fe_slave);
+
+ return fe_slave == NULL ? -ENODEV : 0;
+}
+
+/* STK9090M */
+static int dib90x0_pid_filter(struct dvb_usb_adapter *adapter, int index, u16 pid, int onoff)
+{
+ return dib9000_fw_pid_filter(adapter->fe, index, pid, onoff);
+}
+
+static int dib90x0_pid_filter_ctrl(struct dvb_usb_adapter *adapter, int onoff)
+{
+ return dib9000_fw_pid_filter_ctrl(adapter->fe, onoff);
+}
+
+static int dib90x0_tuner_reset(struct dvb_frontend *fe, int onoff)
+{
+ return dib9000_set_gpio(fe, 5, 0, !onoff);
+}
+
+static int dib90x0_tuner_sleep(struct dvb_frontend *fe, int onoff)
+{
+ return dib9000_set_gpio(fe, 0, 0, onoff);
+}
+
+static int dib01x0_pmu_update(struct i2c_adapter *i2c, u16 *data, u8 len)
+{
+ u8 wb[4] = { 0xc >> 8, 0xc & 0xff, 0, 0 };
+ u8 rb[2];
+ struct i2c_msg msg[2] = {
+ {.addr = 0x1e >> 1, .flags = 0, .buf = wb, .len = 2},
+ {.addr = 0x1e >> 1, .flags = I2C_M_RD, .buf = rb, .len = 2},
+ };
+ u8 index_data;
+
+ dibx000_i2c_set_speed(i2c, 250);
+
+ if (i2c_transfer(i2c, msg, 2) != 2)
+ return -EIO;
+
+ switch (rb[0] << 8 | rb[1]) {
+ case 0:
+ deb_info("Found DiB0170 rev1: This version of DiB0170 is not supported any longer.\n");
+ return -EIO;
+ case 1:
+ deb_info("Found DiB0170 rev2");
+ break;
+ case 2:
+ deb_info("Found DiB0190 rev2");
+ break;
+ default:
+ deb_info("DiB01x0 not found");
+ return -EIO;
+ }
+
+ for (index_data = 0; index_data < len; index_data += 2) {
+ wb[2] = (data[index_data + 1] >> 8) & 0xff;
+ wb[3] = (data[index_data + 1]) & 0xff;
+
+ if (data[index_data] == 0) {
+ wb[0] = (data[index_data] >> 8) & 0xff;
+ wb[1] = (data[index_data]) & 0xff;
+ msg[0].len = 2;
+ if (i2c_transfer(i2c, msg, 2) != 2)
+ return -EIO;
+ wb[2] |= rb[0];
+ wb[3] |= rb[1] & ~(3 << 4);
+ }
+
+ wb[0] = (data[index_data] >> 8)&0xff;
+ wb[1] = (data[index_data])&0xff;
+ msg[0].len = 4;
+ if (i2c_transfer(i2c, &msg[0], 1) != 1)
+ return -EIO;
+ }
+ return 0;
+}
+
+static struct dib9000_config stk9090m_config = {
+ .output_mpeg2_in_188_bytes = 1,
+ .output_mode = OUTMODE_MPEG2_FIFO,
+ .vcxo_timer = 279620,
+ .timing_frequency = 20452225,
+ .demod_clock_khz = 60000,
+ .xtal_clock_khz = 30000,
+ .if_drives = (0 << 15) | (1 << 13) | (0 << 12) | (3 << 10) | (0 << 9) | (1 << 7) | (0 << 6) | (0 << 4) | (1 << 3) | (1 << 1) | (0),
+ .subband = {
+ 2,
+ {
+ { 240, { BOARD_GPIO_COMPONENT_DEMOD, BOARD_GPIO_FUNCTION_SUBBAND_GPIO, 0x0008, 0x0000, 0x0008 } }, /* GPIO 3 to 1 for VHF */
+ { 890, { BOARD_GPIO_COMPONENT_DEMOD, BOARD_GPIO_FUNCTION_SUBBAND_GPIO, 0x0008, 0x0000, 0x0000 } }, /* GPIO 3 to 0 for UHF */
+ { 0 },
+ },
+ },
+ .gpio_function = {
+ { .component = BOARD_GPIO_COMPONENT_DEMOD, .function = BOARD_GPIO_FUNCTION_COMPONENT_ON, .mask = 0x10 | 0x21, .direction = 0 & ~0x21, .value = (0x10 & ~0x1) | 0x20 },
+ { .component = BOARD_GPIO_COMPONENT_DEMOD, .function = BOARD_GPIO_FUNCTION_COMPONENT_OFF, .mask = 0x10 | 0x21, .direction = 0 & ~0x21, .value = 0 | 0x21 },
+ },
+};
+
+static struct dib9000_config nim9090md_config[2] = {
+ {
+ .output_mpeg2_in_188_bytes = 1,
+ .output_mode = OUTMODE_MPEG2_FIFO,
+ .vcxo_timer = 279620,
+ .timing_frequency = 20452225,
+ .demod_clock_khz = 60000,
+ .xtal_clock_khz = 30000,
+ .if_drives = (0 << 15) | (1 << 13) | (0 << 12) | (3 << 10) | (0 << 9) | (1 << 7) | (0 << 6) | (0 << 4) | (1 << 3) | (1 << 1) | (0),
+ }, {
+ .output_mpeg2_in_188_bytes = 1,
+ .output_mode = OUTMODE_DIVERSITY,
+ .vcxo_timer = 279620,
+ .timing_frequency = 20452225,
+ .demod_clock_khz = 60000,
+ .xtal_clock_khz = 30000,
+ .if_drives = (0 << 15) | (1 << 13) | (0 << 12) | (3 << 10) | (0 << 9) | (1 << 7) | (0 << 6) | (0 << 4) | (1 << 3) | (1 << 1) | (0),
+ .subband = {
+ 2,
+ {
+ { 240, { BOARD_GPIO_COMPONENT_DEMOD, BOARD_GPIO_FUNCTION_SUBBAND_GPIO, 0x0006, 0x0000, 0x0006 } }, /* GPIO 1 and 2 to 1 for VHF */
+ { 890, { BOARD_GPIO_COMPONENT_DEMOD, BOARD_GPIO_FUNCTION_SUBBAND_GPIO, 0x0006, 0x0000, 0x0000 } }, /* GPIO 1 and 2 to 0 for UHF */
+ { 0 },
+ },
+ },
+ .gpio_function = {
+ { .component = BOARD_GPIO_COMPONENT_DEMOD, .function = BOARD_GPIO_FUNCTION_COMPONENT_ON, .mask = 0x10 | 0x21, .direction = 0 & ~0x21, .value = (0x10 & ~0x1) | 0x20 },
+ { .component = BOARD_GPIO_COMPONENT_DEMOD, .function = BOARD_GPIO_FUNCTION_COMPONENT_OFF, .mask = 0x10 | 0x21, .direction = 0 & ~0x21, .value = 0 | 0x21 },
+ },
+ }
+};
+
+static struct dib0090_config dib9090_dib0090_config = {
+ .io.pll_bypass = 0,
+ .io.pll_range = 1,
+ .io.pll_prediv = 1,
+ .io.pll_loopdiv = 8,
+ .io.adc_clock_ratio = 8,
+ .io.pll_int_loop_filt = 0,
+ .io.clock_khz = 30000,
+ .reset = dib90x0_tuner_reset,
+ .sleep = dib90x0_tuner_sleep,
+ .clkouttobamse = 0,
+ .analog_output = 0,
+ .use_pwm_agc = 0,
+ .clkoutdrive = 0,
+ .freq_offset_khz_uhf = 0,
+ .freq_offset_khz_vhf = 0,
+};
+
+static struct dib0090_config nim9090md_dib0090_config[2] = {
+ {
+ .io.pll_bypass = 0,
+ .io.pll_range = 1,
+ .io.pll_prediv = 1,
+ .io.pll_loopdiv = 8,
+ .io.adc_clock_ratio = 8,
+ .io.pll_int_loop_filt = 0,
+ .io.clock_khz = 30000,
+ .reset = dib90x0_tuner_reset,
+ .sleep = dib90x0_tuner_sleep,
+ .clkouttobamse = 1,
+ .analog_output = 0,
+ .use_pwm_agc = 0,
+ .clkoutdrive = 0,
+ .freq_offset_khz_uhf = 0,
+ .freq_offset_khz_vhf = 0,
+ }, {
+ .io.pll_bypass = 0,
+ .io.pll_range = 1,
+ .io.pll_prediv = 1,
+ .io.pll_loopdiv = 8,
+ .io.adc_clock_ratio = 8,
+ .io.pll_int_loop_filt = 0,
+ .io.clock_khz = 30000,
+ .reset = dib90x0_tuner_reset,
+ .sleep = dib90x0_tuner_sleep,
+ .clkouttobamse = 0,
+ .analog_output = 0,
+ .use_pwm_agc = 0,
+ .clkoutdrive = 0,
+ .freq_offset_khz_uhf = 0,
+ .freq_offset_khz_vhf = 0,
+ }
+};
+
+
+static int stk9090m_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ struct dib0700_adapter_state *state = adap->priv;
+ struct dib0700_state *st = adap->dev->priv;
+ u32 fw_version;
+
+ /* Make use of the new i2c functions from FW 1.20 */
+ dib0700_get_version(adap->dev, NULL, NULL, &fw_version, NULL);
+ if (fw_version >= 0x10200)
+ st->fw_use_new_i2c_api = 1;
+ dib0700_set_i2c_speed(adap->dev, 340);
+
+ dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1);
+ msleep(20);
+ dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1);
+ dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1);
+ dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1);
+ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0);
+
+ dib0700_ctrl_clock(adap->dev, 72, 1);
+
+ msleep(20);
+ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1);
+ msleep(20);
+ dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1);
+
+ dib9000_i2c_enumeration(&adap->dev->i2c_adap, 1, 0x10, 0x80);
+
+ if (request_firmware(&state->frontend_firmware, "dib9090.fw", &adap->dev->udev->dev)) {
+ deb_info("%s: Upload failed. (file not found?)\n", __func__);
+ return -ENODEV;
+ } else {
+ deb_info("%s: firmware read %Zu bytes.\n", __func__, state->frontend_firmware->size);
+ }
+ stk9090m_config.microcode_B_fe_size = state->frontend_firmware->size;
+ stk9090m_config.microcode_B_fe_buffer = state->frontend_firmware->data;
+
+ adap->fe = dvb_attach(dib9000_attach, &adap->dev->i2c_adap, 0x80, &stk9090m_config);
+
+ return adap->fe == NULL ? -ENODEV : 0;
+}
+
+static int dib9090_tuner_attach(struct dvb_usb_adapter *adap)
+{
+ struct dib0700_adapter_state *state = adap->priv;
+ struct i2c_adapter *i2c = dib9000_get_tuner_interface(adap->fe);
+ u16 data_dib190[10] = {
+ 1, 0x1374,
+ 2, 0x01a2,
+ 7, 0x0020,
+ 0, 0x00ef,
+ 8, 0x0486,
+ };
+
+ if (dvb_attach(dib0090_fw_register, adap->fe, i2c, &dib9090_dib0090_config) == NULL)
+ return -ENODEV;
+ i2c = dib9000_get_i2c_master(adap->fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0);
+ if (dib01x0_pmu_update(i2c, data_dib190, 10) != 0)
+ return -ENODEV;
+ dib0700_set_i2c_speed(adap->dev, 2000);
+ if (dib9000_firmware_post_pll_init(adap->fe) < 0)
+ return -ENODEV;
+ release_firmware(state->frontend_firmware);
+ return 0;
+}
+
+static int nim9090md_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ struct dib0700_adapter_state *state = adap->priv;
+ struct dib0700_state *st = adap->dev->priv;
+ struct i2c_adapter *i2c;
+ struct dvb_frontend *fe_slave;
+ u32 fw_version;
+
+ /* Make use of the new i2c functions from FW 1.20 */
+ dib0700_get_version(adap->dev, NULL, NULL, &fw_version, NULL);
+ if (fw_version >= 0x10200)
+ st->fw_use_new_i2c_api = 1;
+ dib0700_set_i2c_speed(adap->dev, 340);
+
+ dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1);
+ msleep(20);
+ dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1);
+ dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1);
+ dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1);
+ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0);
+
+ dib0700_ctrl_clock(adap->dev, 72, 1);
+
+ msleep(20);
+ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1);
+ msleep(20);
+ dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1);
+
+ if (request_firmware(&state->frontend_firmware, "dib9090.fw", &adap->dev->udev->dev)) {
+ deb_info("%s: Upload failed. (file not found?)\n", __func__);
+ return -EIO;
+ } else {
+ deb_info("%s: firmware read %Zu bytes.\n", __func__, state->frontend_firmware->size);
+ }
+ nim9090md_config[0].microcode_B_fe_size = state->frontend_firmware->size;
+ nim9090md_config[0].microcode_B_fe_buffer = state->frontend_firmware->data;
+ nim9090md_config[1].microcode_B_fe_size = state->frontend_firmware->size;
+ nim9090md_config[1].microcode_B_fe_buffer = state->frontend_firmware->data;
+
+ dib9000_i2c_enumeration(&adap->dev->i2c_adap, 1, 0x20, 0x80);
+ adap->fe = dvb_attach(dib9000_attach, &adap->dev->i2c_adap, 0x80, &nim9090md_config[0]);
+
+ if (adap->fe == NULL)
+ return -ENODEV;
+
+ i2c = dib9000_get_i2c_master(adap->fe, DIBX000_I2C_INTERFACE_GPIO_3_4, 0);
+ dib9000_i2c_enumeration(i2c, 1, 0x12, 0x82);
+
+ fe_slave = dvb_attach(dib9000_attach, i2c, 0x82, &nim9090md_config[1]);
+ dib9000_set_slave_frontend(adap->fe, fe_slave);
+
+ return fe_slave == NULL ? -ENODEV : 0;
+}
+
+static int nim9090md_tuner_attach(struct dvb_usb_adapter *adap)
+{
+ struct dib0700_adapter_state *state = adap->priv;
+ struct i2c_adapter *i2c;
+ struct dvb_frontend *fe_slave;
+ u16 data_dib190[10] = {
+ 1, 0x5374,
+ 2, 0x01ae,
+ 7, 0x0020,
+ 0, 0x00ef,
+ 8, 0x0406,
+ };
+ i2c = dib9000_get_tuner_interface(adap->fe);
+ if (dvb_attach(dib0090_fw_register, adap->fe, i2c, &nim9090md_dib0090_config[0]) == NULL)
+ return -ENODEV;
+ i2c = dib9000_get_i2c_master(adap->fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0);
+ if (dib01x0_pmu_update(i2c, data_dib190, 10) < 0)
+ return -ENODEV;
+ dib0700_set_i2c_speed(adap->dev, 2000);
+ if (dib9000_firmware_post_pll_init(adap->fe) < 0)
+ return -ENODEV;
+
+ fe_slave = dib9000_get_slave_frontend(adap->fe, 1);
+ if (fe_slave != NULL) {
+ i2c = dib9000_get_component_bus_interface(adap->fe);
+ dib9000_set_i2c_adapter(fe_slave, i2c);
+
+ i2c = dib9000_get_tuner_interface(fe_slave);
+ if (dvb_attach(dib0090_fw_register, fe_slave, i2c, &nim9090md_dib0090_config[1]) == NULL)
+ return -ENODEV;
+ fe_slave->dvb = adap->fe->dvb;
+ dib9000_fw_set_component_bus_speed(adap->fe, 2000);
+ if (dib9000_firmware_post_pll_init(fe_slave) < 0)
+ return -ENODEV;
+ }
+ release_firmware(state->frontend_firmware);
+
+ return 0;
+}
+
+/* NIM7090 */
+struct dib7090p_best_adc {
+ u32 timf;
+ u32 pll_loopdiv;
+ u32 pll_prediv;
+};
+
+static int dib7090p_get_best_sampling(struct dvb_frontend *fe , struct dib7090p_best_adc *adc)
+{
+ u8 spur = 0, prediv = 0, loopdiv = 0, min_prediv = 1, max_prediv = 1;
+
+ u16 xtal = 12000;
+ u32 fcp_min = 1900; /* PLL Minimum Frequency comparator KHz */
+ u32 fcp_max = 20000; /* PLL Maximum Frequency comparator KHz */
+ u32 fdem_max = 76000;
+ u32 fdem_min = 69500;
+ u32 fcp = 0, fs = 0, fdem = 0;
+ u32 harmonic_id = 0;
+
+ adc->pll_loopdiv = loopdiv;
+ adc->pll_prediv = prediv;
+ adc->timf = 0;
+
+ deb_info("bandwidth = %d fdem_min =%d", fe->dtv_property_cache.bandwidth_hz, fdem_min);
+
+ /* Find Min and Max prediv */
+ while ((xtal/max_prediv) >= fcp_min)
+ max_prediv++;
+
+ max_prediv--;
+ min_prediv = max_prediv;
+ while ((xtal/min_prediv) <= fcp_max) {
+ min_prediv--;
+ if (min_prediv == 1)
+ break;
+ }
+ deb_info("MIN prediv = %d : MAX prediv = %d", min_prediv, max_prediv);
+
+ min_prediv = 2;
+
+ for (prediv = min_prediv ; prediv < max_prediv; prediv++) {
+ fcp = xtal / prediv;
+ if (fcp > fcp_min && fcp < fcp_max) {
+ for (loopdiv = 1 ; loopdiv < 64 ; loopdiv++) {
+ fdem = ((xtal/prediv) * loopdiv);
+ fs = fdem / 4;
+ /* test min/max system restrictions */
+
+ if ((fdem >= fdem_min) && (fdem <= fdem_max) && (fs >= fe->dtv_property_cache.bandwidth_hz/1000)) {
+ spur = 0;
+ /* test fs harmonics positions */
+ for (harmonic_id = (fe->dtv_property_cache.frequency / (1000*fs)) ; harmonic_id <= ((fe->dtv_property_cache.frequency / (1000*fs))+1) ; harmonic_id++) {
+ if (((fs*harmonic_id) >= ((fe->dtv_property_cache.frequency/1000) - (fe->dtv_property_cache.bandwidth_hz/2000))) && ((fs*harmonic_id) <= ((fe->dtv_property_cache.frequency/1000) + (fe->dtv_property_cache.bandwidth_hz/2000)))) {
+ spur = 1;
+ break;
+ }
+ }
+
+ if (!spur) {
+ adc->pll_loopdiv = loopdiv;
+ adc->pll_prediv = prediv;
+ adc->timf = 2396745143UL/fdem*(1 << 9);
+ adc->timf += ((2396745143UL%fdem) << 9)/fdem;
+ deb_info("loopdiv=%i prediv=%i timf=%i", loopdiv, prediv, adc->timf);
+ break;
+ }
+ }
+ }
+ }
+ if (!spur)
+ break;
+ }
+
+
+ if (adc->pll_loopdiv == 0 && adc->pll_prediv == 0)
+ return -EINVAL;
+ else
+ return 0;
+}
+
+static int dib7090_agc_startup(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep)
+{
+ struct dvb_usb_adapter *adap = fe->dvb->priv;
+ struct dib0700_adapter_state *state = adap->priv;
+ struct dibx000_bandwidth_config pll;
+ u16 target;
+ struct dib7090p_best_adc adc;
+ int ret;
+
+ ret = state->set_param_save(fe, fep);
+ if (ret < 0)
+ return ret;
+
+ memset(&pll, 0, sizeof(struct dibx000_bandwidth_config));
+ dib0090_pwm_gain_reset(fe);
+ target = (dib0090_get_wbd_offset(fe) * 8 + 1) / 2;
+ dib7000p_set_wbd_ref(fe, target);
+
+ if (dib7090p_get_best_sampling(fe, &adc) == 0) {
+ pll.pll_ratio = adc.pll_loopdiv;
+ pll.pll_prediv = adc.pll_prediv;
+
+ dib7000p_update_pll(fe, &pll);
+ dib7000p_ctrl_timf(fe, DEMOD_TIMF_SET, adc.timf);
+ }
+ return 0;
+}
+
+static struct dib0090_wbd_slope dib7090_wbd_table[] = {
+ { 380, 81, 850, 64, 540, 4},
+ { 860, 51, 866, 21, 375, 4},
+ {1700, 0, 250, 0, 100, 6},
+ {2600, 0, 250, 0, 100, 6},
+ { 0xFFFF, 0, 0, 0, 0, 0},
+};
+
+struct dibx000_agc_config dib7090_agc_config[2] = {
+ {
+ .band_caps = BAND_UHF,
+ /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=1, P_agc_inv_pwm1=0, P_agc_inv_pwm2=0,
+ * P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5, P_agc_write=0 */
+ .setup = (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0),
+
+ .inv_gain = 687,
+ .time_stabiliz = 10,
+
+ .alpha_level = 0,
+ .thlock = 118,
+
+ .wbd_inv = 0,
+ .wbd_ref = 1200,
+ .wbd_sel = 3,
+ .wbd_alpha = 5,
+
+ .agc1_max = 65535,
+ .agc1_min = 0,
+
+ .agc2_max = 65535,
+ .agc2_min = 0,
+
+ .agc1_pt1 = 0,
+ .agc1_pt2 = 32,
+ .agc1_pt3 = 114,
+ .agc1_slope1 = 143,
+ .agc1_slope2 = 144,
+ .agc2_pt1 = 114,
+ .agc2_pt2 = 227,
+ .agc2_slope1 = 116,
+ .agc2_slope2 = 117,
+
+ .alpha_mant = 18,
+ .alpha_exp = 0,
+ .beta_mant = 20,
+ .beta_exp = 59,
+
+ .perform_agc_softsplit = 0,
+ } , {
+ .band_caps = BAND_FM | BAND_VHF | BAND_CBAND,
+ /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=1, P_agc_inv_pwm1=0, P_agc_inv_pwm2=0,
+ * P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5, P_agc_write=0 */
+ .setup = (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0),
+
+ .inv_gain = 732,
+ .time_stabiliz = 10,
+
+ .alpha_level = 0,
+ .thlock = 118,
+
+ .wbd_inv = 0,
+ .wbd_ref = 1200,
+ .wbd_sel = 3,
+ .wbd_alpha = 5,
+
+ .agc1_max = 65535,
+ .agc1_min = 0,
+
+ .agc2_max = 65535,
+ .agc2_min = 0,
+
+ .agc1_pt1 = 0,
+ .agc1_pt2 = 0,
+ .agc1_pt3 = 98,
+ .agc1_slope1 = 0,
+ .agc1_slope2 = 167,
+ .agc1_pt1 = 98,
+ .agc2_pt2 = 255,
+ .agc2_slope1 = 104,
+ .agc2_slope2 = 0,
+
+ .alpha_mant = 18,
+ .alpha_exp = 0,
+ .beta_mant = 20,
+ .beta_exp = 59,
+
+ .perform_agc_softsplit = 0,
+ }
+};
+
+static struct dibx000_bandwidth_config dib7090_clock_config_12_mhz = {
+ 60000, 15000,
+ 1, 5, 0, 0, 0,
+ 0, 0, 1, 1, 2,
+ (3 << 14) | (1 << 12) | (524 << 0),
+ (0 << 25) | 0,
+ 20452225,
+ 15000000,
+};
+
+static struct dib7000p_config nim7090_dib7000p_config = {
+ .output_mpeg2_in_188_bytes = 1,
+ .hostbus_diversity = 1,
+ .tuner_is_baseband = 1,
+ .update_lna = NULL,
+
+ .agc_config_count = 2,
+ .agc = dib7090_agc_config,
+
+ .bw = &dib7090_clock_config_12_mhz,
+
+ .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS,
+ .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES,
+ .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS,
+
+ .pwm_freq_div = 0,
+
+ .agc_control = dib7090_agc_restart,
+
+ .spur_protect = 0,
+ .disable_sample_and_hold = 0,
+ .enable_current_mirror = 0,
+ .diversity_delay = 0,
+
+ .output_mode = OUTMODE_MPEG2_FIFO,
+ .enMpegOutput = 1,
+};
+
+static struct dib7000p_config tfe7090pvr_dib7000p_config[2] = {
+ {
+ .output_mpeg2_in_188_bytes = 1,
+ .hostbus_diversity = 1,
+ .tuner_is_baseband = 1,
+ .update_lna = NULL,
+
+ .agc_config_count = 2,
+ .agc = dib7090_agc_config,
+
+ .bw = &dib7090_clock_config_12_mhz,
+
+ .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS,
+ .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES,
+ .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS,
+
+ .pwm_freq_div = 0,
+
+ .agc_control = dib7090_agc_restart,
+
+ .spur_protect = 0,
+ .disable_sample_and_hold = 0,
+ .enable_current_mirror = 0,
+ .diversity_delay = 0,
+
+ .output_mode = OUTMODE_MPEG2_PAR_GATED_CLK,
+ .default_i2c_addr = 0x90,
+ .enMpegOutput = 1,
+ }, {
+ .output_mpeg2_in_188_bytes = 1,
+ .hostbus_diversity = 1,
+ .tuner_is_baseband = 1,
+ .update_lna = NULL,
+
+ .agc_config_count = 2,
+ .agc = dib7090_agc_config,
+
+ .bw = &dib7090_clock_config_12_mhz,
+
+ .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS,
+ .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES,
+ .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS,
+
+ .pwm_freq_div = 0,
+
+ .agc_control = dib7090_agc_restart,
+
+ .spur_protect = 0,
+ .disable_sample_and_hold = 0,
+ .enable_current_mirror = 0,
+ .diversity_delay = 0,
+
+ .output_mode = OUTMODE_MPEG2_PAR_GATED_CLK,
+ .default_i2c_addr = 0x92,
+ .enMpegOutput = 0,
+ }
+};
+
+static const struct dib0090_config nim7090_dib0090_config = {
+ .io.clock_khz = 12000,
+ .io.pll_bypass = 0,
+ .io.pll_range = 0,
+ .io.pll_prediv = 3,
+ .io.pll_loopdiv = 6,
+ .io.adc_clock_ratio = 0,
+ .io.pll_int_loop_filt = 0,
+ .reset = dib7090_tuner_sleep,
+ .sleep = dib7090_tuner_sleep,
+
+ .freq_offset_khz_uhf = 0,
+ .freq_offset_khz_vhf = 0,
+
+ .get_adc_power = dib7090_get_adc_power,
+
+ .clkouttobamse = 1,
+ .analog_output = 0,
+
+ .wbd_vhf_offset = 0,
+ .wbd_cband_offset = 0,
+ .use_pwm_agc = 1,
+ .clkoutdrive = 0,
+
+ .fref_clock_ratio = 0,
+
+ .wbd = dib7090_wbd_table,
+
+ .ls_cfg_pad_drv = 0,
+ .data_tx_drv = 0,
+ .low_if = NULL,
+ .in_soc = 1,
+};
+
+static const struct dib0090_config tfe7090pvr_dib0090_config[2] = {
+ {
+ .io.clock_khz = 12000,
+ .io.pll_bypass = 0,
+ .io.pll_range = 0,
+ .io.pll_prediv = 3,
+ .io.pll_loopdiv = 6,
+ .io.adc_clock_ratio = 0,
+ .io.pll_int_loop_filt = 0,
+ .reset = dib7090_tuner_sleep,
+ .sleep = dib7090_tuner_sleep,
+
+ .freq_offset_khz_uhf = 50,
+ .freq_offset_khz_vhf = 70,
+
+ .get_adc_power = dib7090_get_adc_power,
+
+ .clkouttobamse = 1,
+ .analog_output = 0,
+
+ .wbd_vhf_offset = 0,
+ .wbd_cband_offset = 0,
+ .use_pwm_agc = 1,
+ .clkoutdrive = 0,
+
+ .fref_clock_ratio = 0,
+
+ .wbd = dib7090_wbd_table,
+
+ .ls_cfg_pad_drv = 0,
+ .data_tx_drv = 0,
+ .low_if = NULL,
+ .in_soc = 1,
+ }, {
+ .io.clock_khz = 12000,
+ .io.pll_bypass = 0,
+ .io.pll_range = 0,
+ .io.pll_prediv = 3,
+ .io.pll_loopdiv = 6,
+ .io.adc_clock_ratio = 0,
+ .io.pll_int_loop_filt = 0,
+ .reset = dib7090_tuner_sleep,
+ .sleep = dib7090_tuner_sleep,
+
+ .freq_offset_khz_uhf = -50,
+ .freq_offset_khz_vhf = -70,
+
+ .get_adc_power = dib7090_get_adc_power,
+
+ .clkouttobamse = 1,
+ .analog_output = 0,
+
+ .wbd_vhf_offset = 0,
+ .wbd_cband_offset = 0,
+ .use_pwm_agc = 1,
+ .clkoutdrive = 0,
+
+ .fref_clock_ratio = 0,
+
+ .wbd = dib7090_wbd_table,
+
+ .ls_cfg_pad_drv = 0,
+ .data_tx_drv = 0,
+ .low_if = NULL,
+ .in_soc = 1,
+ }
+};
+
+static int nim7090_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1);
+ msleep(20);
+ dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1);
+ dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1);
+ dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1);
+ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0);
+
+ msleep(20);
+ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1);
+ msleep(20);
+ dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1);
+
+ if (dib7000p_i2c_enumeration(&adap->dev->i2c_adap, 1, 0x10, &nim7090_dib7000p_config) != 0) {
+ err("%s: dib7000p_i2c_enumeration failed. Cannot continue\n", __func__);
+ return -ENODEV;
+ }
+ adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x80, &nim7090_dib7000p_config);
return adap->fe == NULL ? -ENODEV : 0;
}
+static int nim7090_tuner_attach(struct dvb_usb_adapter *adap)
+{
+ struct dib0700_adapter_state *st = adap->priv;
+ struct i2c_adapter *tun_i2c = dib7090_get_i2c_tuner(adap->fe);
+
+ if (dvb_attach(dib0090_register, adap->fe, tun_i2c, &nim7090_dib0090_config) == NULL)
+ return -ENODEV;
+
+ dib7000p_set_gpio(adap->fe, 8, 0, 1);
+
+ st->set_param_save = adap->fe->ops.tuner_ops.set_params;
+ adap->fe->ops.tuner_ops.set_params = dib7090_agc_startup;
+ return 0;
+}
+
+static int tfe7090pvr_frontend0_attach(struct dvb_usb_adapter *adap)
+{
+ struct dib0700_state *st = adap->dev->priv;
+
+ /* The TFE7090 requires the dib0700 to not be in master mode */
+ st->disable_streaming_master_mode = 1;
+
+ dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1);
+ msleep(20);
+ dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1);
+ dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1);
+ dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1);
+ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0);
+
+ msleep(20);
+ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1);
+ msleep(20);
+ dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1);
+
+ /* initialize IC 0 */
+ if (dib7000p_i2c_enumeration(&adap->dev->i2c_adap, 1, 0x20, &tfe7090pvr_dib7000p_config[0]) != 0) {
+ err("%s: dib7000p_i2c_enumeration failed. Cannot continue\n", __func__);
+ return -ENODEV;
+ }
+
+ dib0700_set_i2c_speed(adap->dev, 340);
+ adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x90, &tfe7090pvr_dib7000p_config[0]);
+
+ dib7090_slave_reset(adap->fe);
+
+ if (adap->fe == NULL)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int tfe7090pvr_frontend1_attach(struct dvb_usb_adapter *adap)
+{
+ struct i2c_adapter *i2c;
+
+ if (adap->dev->adapter[0].fe == NULL) {
+ err("the master dib7090 has to be initialized first");
+ return -ENODEV; /* the master device has not been initialized */
+ }
+
+ i2c = dib7000p_get_i2c_master(adap->dev->adapter[0].fe, DIBX000_I2C_INTERFACE_GPIO_6_7, 1);
+ if (dib7000p_i2c_enumeration(i2c, 1, 0x10, &tfe7090pvr_dib7000p_config[1]) != 0) {
+ err("%s: dib7000p_i2c_enumeration failed. Cannot continue\n", __func__);
+ return -ENODEV;
+ }
+
+ adap->fe = dvb_attach(dib7000p_attach, i2c, 0x92, &tfe7090pvr_dib7000p_config[1]);
+ dib0700_set_i2c_speed(adap->dev, 200);
+
+ return adap->fe == NULL ? -ENODEV : 0;
+}
+
+static int tfe7090pvr_tuner0_attach(struct dvb_usb_adapter *adap)
+{
+ struct dib0700_adapter_state *st = adap->priv;
+ struct i2c_adapter *tun_i2c = dib7090_get_i2c_tuner(adap->fe);
+
+ if (dvb_attach(dib0090_register, adap->fe, tun_i2c, &tfe7090pvr_dib0090_config[0]) == NULL)
+ return -ENODEV;
+
+ dib7000p_set_gpio(adap->fe, 8, 0, 1);
+
+ st->set_param_save = adap->fe->ops.tuner_ops.set_params;
+ adap->fe->ops.tuner_ops.set_params = dib7090_agc_startup;
+ return 0;
+}
+
+static int tfe7090pvr_tuner1_attach(struct dvb_usb_adapter *adap)
+{
+ struct dib0700_adapter_state *st = adap->priv;
+ struct i2c_adapter *tun_i2c = dib7090_get_i2c_tuner(adap->fe);
+
+ if (dvb_attach(dib0090_register, adap->fe, tun_i2c, &tfe7090pvr_dib0090_config[1]) == NULL)
+ return -ENODEV;
+
+ dib7000p_set_gpio(adap->fe, 8, 0, 1);
+
+ st->set_param_save = adap->fe->ops.tuner_ops.set_params;
+ adap->fe->ops.tuner_ops.set_params = dib7090_agc_startup;
+ return 0;
+}
+
/* STK7070PD */
static struct dib7000p_config stk7070pd_dib7000p_config[2] = {
{
@@ -1839,6 +2779,11 @@ struct usb_device_id dib0700_usb_id_table[] = {
{ USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV282E) },
{ USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK8096GP) },
{ USB_DEVICE(USB_VID_ELGATO, USB_PID_ELGATO_EYETV_DIVERSITY) },
+ { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_NIM9090M) },
+/* 70 */{ USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_NIM8096MD) },
+ { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_NIM9090MD) },
+ { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_NIM7090) },
+ { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_TFE7090PVR) },
{ 0 } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table);
@@ -2602,6 +3547,205 @@ struct dvb_usb_device_properties dib0700_devices[] = {
RC_TYPE_NEC,
.change_protocol = dib0700_change_protocol,
},
+ }, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .caps = DVB_USB_ADAP_HAS_PID_FILTER |
+ DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+ .pid_filter_count = 32,
+ .pid_filter = dib90x0_pid_filter,
+ .pid_filter_ctrl = dib90x0_pid_filter_ctrl,
+ .frontend_attach = stk9090m_frontend_attach,
+ .tuner_attach = dib9090_tuner_attach,
+
+ DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
+
+ .size_of_priv =
+ sizeof(struct dib0700_adapter_state),
+ },
+ },
+
+ .num_device_descs = 1,
+ .devices = {
+ { "DiBcom STK9090M reference design",
+ { &dib0700_usb_id_table[69], NULL },
+ { NULL },
+ },
+ },
+
+ .rc.core = {
+ .rc_interval = DEFAULT_RC_INTERVAL,
+ .rc_codes = RC_MAP_DIB0700_RC5_TABLE,
+ .module_name = "dib0700",
+ .rc_query = dib0700_rc_query_old_firmware,
+ .allowed_protos = RC_TYPE_RC5 |
+ RC_TYPE_RC6 |
+ RC_TYPE_NEC,
+ .change_protocol = dib0700_change_protocol,
+ },
+ }, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .caps = DVB_USB_ADAP_HAS_PID_FILTER |
+ DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+ .pid_filter_count = 32,
+ .pid_filter = stk80xx_pid_filter,
+ .pid_filter_ctrl = stk80xx_pid_filter_ctrl,
+ .frontend_attach = nim8096md_frontend_attach,
+ .tuner_attach = nim8096md_tuner_attach,
+
+ DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
+
+ .size_of_priv =
+ sizeof(struct dib0700_adapter_state),
+ },
+ },
+
+ .num_device_descs = 1,
+ .devices = {
+ { "DiBcom NIM8096MD reference design",
+ { &dib0700_usb_id_table[70], NULL },
+ { NULL },
+ },
+ },
+
+ .rc.core = {
+ .rc_interval = DEFAULT_RC_INTERVAL,
+ .rc_codes = RC_MAP_DIB0700_RC5_TABLE,
+ .module_name = "dib0700",
+ .rc_query = dib0700_rc_query_old_firmware,
+ .allowed_protos = RC_TYPE_RC5 |
+ RC_TYPE_RC6 |
+ RC_TYPE_NEC,
+ .change_protocol = dib0700_change_protocol,
+ },
+ }, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .caps = DVB_USB_ADAP_HAS_PID_FILTER |
+ DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+ .pid_filter_count = 32,
+ .pid_filter = dib90x0_pid_filter,
+ .pid_filter_ctrl = dib90x0_pid_filter_ctrl,
+ .frontend_attach = nim9090md_frontend_attach,
+ .tuner_attach = nim9090md_tuner_attach,
+
+ DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
+
+ .size_of_priv =
+ sizeof(struct dib0700_adapter_state),
+ },
+ },
+
+ .num_device_descs = 1,
+ .devices = {
+ { "DiBcom NIM9090MD reference design",
+ { &dib0700_usb_id_table[71], NULL },
+ { NULL },
+ },
+ },
+
+ .rc.core = {
+ .rc_interval = DEFAULT_RC_INTERVAL,
+ .rc_codes = RC_MAP_DIB0700_RC5_TABLE,
+ .module_name = "dib0700",
+ .rc_query = dib0700_rc_query_old_firmware,
+ .allowed_protos = RC_TYPE_RC5 |
+ RC_TYPE_RC6 |
+ RC_TYPE_NEC,
+ .change_protocol = dib0700_change_protocol,
+ },
+ }, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .caps = DVB_USB_ADAP_HAS_PID_FILTER |
+ DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+ .pid_filter_count = 32,
+ .pid_filter = stk70x0p_pid_filter,
+ .pid_filter_ctrl = stk70x0p_pid_filter_ctrl,
+ .frontend_attach = nim7090_frontend_attach,
+ .tuner_attach = nim7090_tuner_attach,
+
+ DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
+
+ .size_of_priv =
+ sizeof(struct dib0700_adapter_state),
+ },
+ },
+
+ .num_device_descs = 1,
+ .devices = {
+ { "DiBcom NIM7090 reference design",
+ { &dib0700_usb_id_table[72], NULL },
+ { NULL },
+ },
+ },
+
+ .rc.core = {
+ .rc_interval = DEFAULT_RC_INTERVAL,
+ .rc_codes = RC_MAP_DIB0700_RC5_TABLE,
+ .module_name = "dib0700",
+ .rc_query = dib0700_rc_query_old_firmware,
+ .allowed_protos = RC_TYPE_RC5 |
+ RC_TYPE_RC6 |
+ RC_TYPE_NEC,
+ .change_protocol = dib0700_change_protocol,
+ },
+ }, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
+ .num_adapters = 2,
+ .adapter = {
+ {
+ .caps = DVB_USB_ADAP_HAS_PID_FILTER |
+ DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+ .pid_filter_count = 32,
+ .pid_filter = stk70x0p_pid_filter,
+ .pid_filter_ctrl = stk70x0p_pid_filter_ctrl,
+ .frontend_attach = tfe7090pvr_frontend0_attach,
+ .tuner_attach = tfe7090pvr_tuner0_attach,
+
+ DIB0700_DEFAULT_STREAMING_CONFIG(0x03),
+
+ .size_of_priv =
+ sizeof(struct dib0700_adapter_state),
+ },
+ {
+ .caps = DVB_USB_ADAP_HAS_PID_FILTER |
+ DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+ .pid_filter_count = 32,
+ .pid_filter = stk70x0p_pid_filter,
+ .pid_filter_ctrl = stk70x0p_pid_filter_ctrl,
+ .frontend_attach = tfe7090pvr_frontend1_attach,
+ .tuner_attach = tfe7090pvr_tuner1_attach,
+
+ DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
+
+ .size_of_priv =
+ sizeof(struct dib0700_adapter_state),
+ },
+ },
+
+ .num_device_descs = 1,
+ .devices = {
+ { "DiBcom TFE7090PVR reference design",
+ { &dib0700_usb_id_table[73], NULL },
+ { NULL },
+ },
+ },
+
+ .rc.core = {
+ .rc_interval = DEFAULT_RC_INTERVAL,
+ .rc_codes = RC_MAP_DIB0700_RC5_TABLE,
+ .module_name = "dib0700",
+ .rc_query = dib0700_rc_query_old_firmware,
+ .allowed_protos = RC_TYPE_RC5 |
+ RC_TYPE_RC6 |
+ RC_TYPE_NEC,
+ .change_protocol = dib0700_change_protocol,
+ },
},
};
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
index 1a6310b61923..b71540d7e070 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
@@ -106,8 +106,13 @@
#define USB_PID_DIBCOM_STK807XP 0x1f90
#define USB_PID_DIBCOM_STK807XPVR 0x1f98
#define USB_PID_DIBCOM_STK8096GP 0x1fa0
+#define USB_PID_DIBCOM_NIM8096MD 0x1fa8
#define USB_PID_DIBCOM_ANCHOR_2135_COLD 0x2131
#define USB_PID_DIBCOM_STK7770P 0x1e80
+#define USB_PID_DIBCOM_NIM7090 0x1bb2
+#define USB_PID_DIBCOM_TFE7090PVR 0x1bb4
+#define USB_PID_DIBCOM_NIM9090M 0x2383
+#define USB_PID_DIBCOM_NIM9090MD 0x2384
#define USB_PID_DPOSH_M9206_COLD 0x9206
#define USB_PID_DPOSH_M9206_WARM 0xa090
#define USB_PID_E3C_EC168 0x1689
@@ -312,4 +317,5 @@
#define USB_PID_TERRATEC_DVBS2CI_V2 0x10ac
#define USB_PID_TECHNISAT_USB2_HDCI_V1 0x0001
#define USB_PID_TECHNISAT_USB2_HDCI_V2 0x0002
+#define USB_PID_TECHNISAT_USB2_DVB_S2 0x0500
#endif
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
index 23005b3cf30b..41bacff24960 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
@@ -8,60 +8,71 @@
#include "dvb-usb-common.h"
#include <linux/usb/input.h>
+static unsigned int
+legacy_dvb_usb_get_keymap_index(const struct input_keymap_entry *ke,
+ struct rc_map_table *keymap,
+ unsigned int keymap_size)
+{
+ unsigned int index;
+ unsigned int scancode;
+
+ if (ke->flags & INPUT_KEYMAP_BY_INDEX) {
+ index = ke->index;
+ } else {
+ if (input_scancode_to_scalar(ke, &scancode))
+ return keymap_size;
+
+ /* See if we can match the raw key code. */
+ for (index = 0; index < keymap_size; index++)
+ if (keymap[index].scancode == scancode)
+ break;
+
+ /* See if there is an unused hole in the map */
+ if (index >= keymap_size) {
+ for (index = 0; index < keymap_size; index++) {
+ if (keymap[index].keycode == KEY_RESERVED ||
+ keymap[index].keycode == KEY_UNKNOWN) {
+ break;
+ }
+ }
+ }
+ }
+
+ return index;
+}
+
static int legacy_dvb_usb_getkeycode(struct input_dev *dev,
- unsigned int scancode, unsigned int *keycode)
+ struct input_keymap_entry *ke)
{
struct dvb_usb_device *d = input_get_drvdata(dev);
-
struct rc_map_table *keymap = d->props.rc.legacy.rc_map_table;
- int i;
+ unsigned int keymap_size = d->props.rc.legacy.rc_map_size;
+ unsigned int index;
- /* See if we can match the raw key code. */
- for (i = 0; i < d->props.rc.legacy.rc_map_size; i++)
- if (keymap[i].scancode == scancode) {
- *keycode = keymap[i].keycode;
- return 0;
- }
+ index = legacy_dvb_usb_get_keymap_index(ke, keymap, keymap_size);
+ if (index >= keymap_size)
+ return -EINVAL;
- /*
- * If is there extra space, returns KEY_RESERVED,
- * otherwise, input core won't let legacy_dvb_usb_setkeycode
- * to work
- */
- for (i = 0; i < d->props.rc.legacy.rc_map_size; i++)
- if (keymap[i].keycode == KEY_RESERVED ||
- keymap[i].keycode == KEY_UNKNOWN) {
- *keycode = KEY_RESERVED;
- return 0;
- }
+ ke->keycode = keymap[index].keycode;
+ if (ke->keycode == KEY_UNKNOWN)
+ ke->keycode = KEY_RESERVED;
+ ke->len = sizeof(keymap[index].scancode);
+ memcpy(&ke->scancode, &keymap[index].scancode, ke->len);
+ ke->index = index;
- return -EINVAL;
+ return 0;
}
static int legacy_dvb_usb_setkeycode(struct input_dev *dev,
- unsigned int scancode, unsigned int keycode)
+ const struct input_keymap_entry *ke,
+ unsigned int *old_keycode)
{
struct dvb_usb_device *d = input_get_drvdata(dev);
-
struct rc_map_table *keymap = d->props.rc.legacy.rc_map_table;
- int i;
-
- /* Search if it is replacing an existing keycode */
- for (i = 0; i < d->props.rc.legacy.rc_map_size; i++)
- if (keymap[i].scancode == scancode) {
- keymap[i].keycode = keycode;
- return 0;
- }
-
- /* Search if is there a clean entry. If so, use it */
- for (i = 0; i < d->props.rc.legacy.rc_map_size; i++)
- if (keymap[i].keycode == KEY_RESERVED ||
- keymap[i].keycode == KEY_UNKNOWN) {
- keymap[i].scancode = scancode;
- keymap[i].keycode = keycode;
- return 0;
- }
+ unsigned int keymap_size = d->props.rc.legacy.rc_map_size;
+ unsigned int index;
+ index = legacy_dvb_usb_get_keymap_index(ke, keymap, keymap_size);
/*
* FIXME: Currently, it is not possible to increase the size of
* scancode table. For it to happen, one possibility
@@ -69,8 +80,24 @@ static int legacy_dvb_usb_setkeycode(struct input_dev *dev,
* copying data, appending the new key on it, and freeing
* the old one - or maybe just allocating some spare space
*/
+ if (index >= keymap_size)
+ return -EINVAL;
+
+ *old_keycode = keymap[index].keycode;
+ keymap->keycode = ke->keycode;
+ __set_bit(ke->keycode, dev->keybit);
+
+ if (*old_keycode != KEY_RESERVED) {
+ __clear_bit(*old_keycode, dev->keybit);
+ for (index = 0; index < keymap_size; index++) {
+ if (keymap[index].keycode == *old_keycode) {
+ __set_bit(*old_keycode, dev->keybit);
+ break;
+ }
+ }
+ }
- return -EINVAL;
+ return 0;
}
/* Remote-control poll function - called every dib->rc_query_interval ms to see
@@ -246,7 +273,7 @@ static int rc_core_dvb_usb_remote_init(struct dvb_usb_device *d)
dev->map_name = d->props.rc.core.rc_codes;
dev->change_protocol = d->props.rc.core.change_protocol;
dev->allowed_protos = d->props.rc.core.allowed_protos;
- dev->driver_type = RC_DRIVER_SCANCODE;
+ dev->driver_type = d->props.rc.core.driver_type;
usb_to_input_id(d->udev, &dev->input_id);
dev->input_name = "IR-receiver inside an USB DVB receiver";
dev->input_phys = d->rc_phys;
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb.h b/drivers/media/dvb/dvb-usb/dvb-usb.h
index 65fa9268e7f7..76a80968482a 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb.h
@@ -181,6 +181,7 @@ struct dvb_rc_legacy {
* @rc_codes: name of rc codes table
* @protocol: type of protocol(s) currently used by the driver
* @allowed_protos: protocol(s) supported by the driver
+ * @driver_type: Used to point if a device supports raw mode
* @change_protocol: callback to change protocol
* @rc_query: called to query an event event.
* @rc_interval: time in ms between two queries.
@@ -190,6 +191,7 @@ struct dvb_rc {
char *rc_codes;
u64 protocol;
u64 allowed_protos;
+ enum rc_driver_type driver_type;
int (*change_protocol)(struct rc_dev *dev, u64 rc_type);
char *module_name;
int (*rc_query) (struct dvb_usb_device *d);
diff --git a/drivers/media/dvb/dvb-usb/technisat-usb2.c b/drivers/media/dvb/dvb-usb/technisat-usb2.c
new file mode 100644
index 000000000000..08f8842ad280
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/technisat-usb2.c
@@ -0,0 +1,807 @@
+/*
+ * Linux driver for Technisat DVB-S/S2 USB 2.0 device
+ *
+ * Copyright (C) 2010 Patrick Boettcher,
+ * Kernel Labs Inc. PO Box 745, St James, NY 11780
+ *
+ * Development was sponsored by Technisat Digital UK Limited, whose
+ * registered office is Witan Gate House 500 - 600 Witan Gate West,
+ * Milton Keynes, MK9 1SH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * THIS PROGRAM IS PROVIDED "AS IS" AND BOTH THE COPYRIGHT HOLDER AND
+ * TECHNISAT DIGITAL UK LTD DISCLAIM ALL WARRANTIES WITH REGARD TO
+ * THIS PROGRAM INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY OR
+ * FITNESS FOR A PARTICULAR PURPOSE. NEITHER THE COPYRIGHT HOLDER
+ * NOR TECHNISAT DIGITAL UK LIMITED SHALL BE LIABLE FOR ANY SPECIAL,
+ * DIRECT, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
+ * RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
+ * IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS PROGRAM. See the
+ * GNU General Public License for more details.
+ */
+
+#define DVB_USB_LOG_PREFIX "technisat-usb2"
+#include "dvb-usb.h"
+
+#include "stv6110x.h"
+#include "stv090x.h"
+
+/* module parameters */
+DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug,
+ "set debugging level (bit-mask: 1=info,2=eeprom,4=i2c,8=rc)." \
+ DVB_USB_DEBUG_STATUS);
+
+/* disables all LED control command and
+ * also does not start the signal polling thread */
+static int disable_led_control;
+module_param(disable_led_control, int, 0444);
+MODULE_PARM_DESC(disable_led_control,
+ "disable LED control of the device "
+ "(default: 0 - LED control is active).");
+
+/* device private data */
+struct technisat_usb2_state {
+ struct dvb_usb_device *dev;
+ struct delayed_work green_led_work;
+ u8 power_state;
+
+ u16 last_scan_code;
+};
+
+/* debug print helpers */
+#define deb_info(args...) dprintk(debug, 0x01, args)
+#define deb_eeprom(args...) dprintk(debug, 0x02, args)
+#define deb_i2c(args...) dprintk(debug, 0x04, args)
+#define deb_rc(args...) dprintk(debug, 0x08, args)
+
+/* vendor requests */
+#define SET_IFCLK_TO_EXTERNAL_TSCLK_VENDOR_REQUEST 0xB3
+#define SET_FRONT_END_RESET_VENDOR_REQUEST 0xB4
+#define GET_VERSION_INFO_VENDOR_REQUEST 0xB5
+#define SET_GREEN_LED_VENDOR_REQUEST 0xB6
+#define SET_RED_LED_VENDOR_REQUEST 0xB7
+#define GET_IR_DATA_VENDOR_REQUEST 0xB8
+#define SET_LED_TIMER_DIVIDER_VENDOR_REQUEST 0xB9
+#define SET_USB_REENUMERATION 0xBA
+
+/* i2c-access methods */
+#define I2C_SPEED_100KHZ_BIT 0x40
+
+#define I2C_STATUS_NAK 7
+#define I2C_STATUS_OK 8
+
+static int technisat_usb2_i2c_access(struct usb_device *udev,
+ u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen)
+{
+ u8 b[64];
+ int ret, actual_length;
+
+ deb_i2c("i2c-access: %02x, tx: ", device_addr);
+ debug_dump(tx, txlen, deb_i2c);
+ deb_i2c(" ");
+
+ if (txlen > 62) {
+ err("i2c TX buffer can't exceed 62 bytes (dev 0x%02x)",
+ device_addr);
+ txlen = 62;
+ }
+ if (rxlen > 62) {
+ err("i2c RX buffer can't exceed 62 bytes (dev 0x%02x)",
+ device_addr);
+ txlen = 62;
+ }
+
+ b[0] = I2C_SPEED_100KHZ_BIT;
+ b[1] = device_addr << 1;
+
+ if (rx != NULL) {
+ b[0] |= rxlen;
+ b[1] |= 1;
+ }
+
+ memcpy(&b[2], tx, txlen);
+ ret = usb_bulk_msg(udev,
+ usb_sndbulkpipe(udev, 0x01),
+ b, 2 + txlen,
+ NULL, 1000);
+
+ if (ret < 0) {
+ err("i2c-error: out failed %02x = %d", device_addr, ret);
+ return -ENODEV;
+ }
+
+ ret = usb_bulk_msg(udev,
+ usb_rcvbulkpipe(udev, 0x01),
+ b, 64, &actual_length, 1000);
+ if (ret < 0) {
+ err("i2c-error: in failed %02x = %d", device_addr, ret);
+ return -ENODEV;
+ }
+
+ if (b[0] != I2C_STATUS_OK) {
+ err("i2c-error: %02x = %d", device_addr, b[0]);
+ /* handle tuner-i2c-nak */
+ if (!(b[0] == I2C_STATUS_NAK &&
+ device_addr == 0x60
+ /* && device_is_technisat_usb2 */))
+ return -ENODEV;
+ }
+
+ deb_i2c("status: %d, ", b[0]);
+
+ if (rx != NULL) {
+ memcpy(rx, &b[2], rxlen);
+
+ deb_i2c("rx (%d): ", rxlen);
+ debug_dump(rx, rxlen, deb_i2c);
+ }
+
+ deb_i2c("\n");
+
+ return 0;
+}
+
+static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
+ int num)
+{
+ int ret = 0, i;
+ struct dvb_usb_device *d = i2c_get_adapdata(adap);
+
+ /* Ensure nobody else hits the i2c bus while we're sending our
+ sequence of messages, (such as the remote control thread) */
+ if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
+ return -EAGAIN;
+
+ for (i = 0; i < num; i++) {
+ if (i+1 < num && msg[i+1].flags & I2C_M_RD) {
+ ret = technisat_usb2_i2c_access(d->udev, msg[i+1].addr,
+ msg[i].buf, msg[i].len,
+ msg[i+1].buf, msg[i+1].len);
+ if (ret != 0)
+ break;
+ i++;
+ } else {
+ ret = technisat_usb2_i2c_access(d->udev, msg[i].addr,
+ msg[i].buf, msg[i].len,
+ NULL, 0);
+ if (ret != 0)
+ break;
+ }
+ }
+
+ if (ret == 0)
+ ret = i;
+
+ mutex_unlock(&d->i2c_mutex);
+
+ return ret;
+}
+
+static u32 technisat_usb2_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C;
+}
+
+static struct i2c_algorithm technisat_usb2_i2c_algo = {
+ .master_xfer = technisat_usb2_i2c_xfer,
+ .functionality = technisat_usb2_i2c_func,
+};
+
+#if 0
+static void technisat_usb2_frontend_reset(struct usb_device *udev)
+{
+ usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ SET_FRONT_END_RESET_VENDOR_REQUEST,
+ USB_TYPE_VENDOR | USB_DIR_OUT,
+ 10, 0,
+ NULL, 0, 500);
+}
+#endif
+
+/* LED control */
+enum technisat_usb2_led_state {
+ LED_OFF,
+ LED_BLINK,
+ LED_ON,
+ LED_UNDEFINED
+};
+
+static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum technisat_usb2_led_state state)
+{
+ int ret;
+
+ u8 led[8] = {
+ red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
+ 0
+ };
+
+ if (disable_led_control && state != LED_OFF)
+ return 0;
+
+ switch (state) {
+ case LED_ON:
+ led[1] = 0x82;
+ break;
+ case LED_BLINK:
+ led[1] = 0x82;
+ if (red) {
+ led[2] = 0x02;
+ led[3] = 10;
+ led[4] = 10;
+ } else {
+ led[2] = 0xff;
+ led[3] = 50;
+ led[4] = 50;
+ }
+ led[5] = 1;
+ break;
+
+ default:
+ case LED_OFF:
+ led[1] = 0x80;
+ break;
+ }
+
+ if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
+ return -EAGAIN;
+
+ ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0),
+ red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
+ USB_TYPE_VENDOR | USB_DIR_OUT,
+ 0, 0,
+ led, sizeof(led), 500);
+
+ mutex_unlock(&d->i2c_mutex);
+ return ret;
+}
+
+static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 green)
+{
+ int ret;
+ u8 b = 0;
+
+ if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
+ return -EAGAIN;
+
+ ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0),
+ SET_LED_TIMER_DIVIDER_VENDOR_REQUEST,
+ USB_TYPE_VENDOR | USB_DIR_OUT,
+ (red << 8) | green, 0,
+ &b, 1, 500);
+
+ mutex_unlock(&d->i2c_mutex);
+
+ return ret;
+}
+
+static void technisat_usb2_green_led_control(struct work_struct *work)
+{
+ struct technisat_usb2_state *state =
+ container_of(work, struct technisat_usb2_state, green_led_work.work);
+ struct dvb_frontend *fe = state->dev->adapter[0].fe;
+
+ if (state->power_state == 0)
+ goto schedule;
+
+ if (fe != NULL) {
+ enum fe_status status;
+
+ if (fe->ops.read_status(fe, &status) != 0)
+ goto schedule;
+
+ if (status & FE_HAS_LOCK) {
+ u32 ber;
+
+ if (fe->ops.read_ber(fe, &ber) != 0)
+ goto schedule;
+
+ if (ber > 1000)
+ technisat_usb2_set_led(state->dev, 0, LED_BLINK);
+ else
+ technisat_usb2_set_led(state->dev, 0, LED_ON);
+ } else
+ technisat_usb2_set_led(state->dev, 0, LED_OFF);
+ }
+
+schedule:
+ schedule_delayed_work(&state->green_led_work,
+ msecs_to_jiffies(500));
+}
+
+/* method to find out whether the firmware has to be downloaded or not */
+static int technisat_usb2_identify_state(struct usb_device *udev,
+ struct dvb_usb_device_properties *props,
+ struct dvb_usb_device_description **desc, int *cold)
+{
+ int ret;
+ u8 version[3];
+
+ /* first select the interface */
+ if (usb_set_interface(udev, 0, 1) != 0)
+ err("could not set alternate setting to 0");
+ else
+ info("set alternate setting");
+
+ *cold = 0; /* by default do not download a firmware - just in case something is wrong */
+
+ ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ GET_VERSION_INFO_VENDOR_REQUEST,
+ USB_TYPE_VENDOR | USB_DIR_IN,
+ 0, 0,
+ version, sizeof(version), 500);
+
+ if (ret < 0)
+ *cold = 1;
+ else {
+ info("firmware version: %d.%d", version[1], version[2]);
+ *cold = 0;
+ }
+
+ return 0;
+}
+
+/* power control */
+static int technisat_usb2_power_ctrl(struct dvb_usb_device *d, int level)
+{
+ struct technisat_usb2_state *state = d->priv;
+
+ state->power_state = level;
+
+ if (disable_led_control)
+ return 0;
+
+ /* green led is turned off in any case - will be turned on when tuning */
+ technisat_usb2_set_led(d, 0, LED_OFF);
+ /* red led is turned on all the time */
+ technisat_usb2_set_led(d, 1, LED_ON);
+ return 0;
+}
+
+/* mac address reading - from the eeprom */
+#if 0
+static void technisat_usb2_eeprom_dump(struct dvb_usb_device *d)
+{
+ u8 reg;
+ u8 b[16];
+ int i, j;
+
+ /* full EEPROM dump */
+ for (j = 0; j < 256 * 4; j += 16) {
+ reg = j;
+ if (technisat_usb2_i2c_access(d->udev, 0x50 + j / 256, &reg, 1, b, 16) != 0)
+ break;
+
+ deb_eeprom("EEPROM: %01x%02x: ", j / 256, reg);
+ for (i = 0; i < 16; i++)
+ deb_eeprom("%02x ", b[i]);
+ deb_eeprom("\n");
+ }
+}
+#endif
+
+static u8 technisat_usb2_calc_lrc(const u8 *b, u16 length)
+{
+ u8 lrc = 0;
+ while (--length)
+ lrc ^= *b++;
+ return lrc;
+}
+
+static int technisat_usb2_eeprom_lrc_read(struct dvb_usb_device *d,
+ u16 offset, u8 *b, u16 length, u8 tries)
+{
+ u8 bo = offset & 0xff;
+ struct i2c_msg msg[] = {
+ {
+ .addr = 0x50 | ((offset >> 8) & 0x3),
+ .buf = &bo,
+ .len = 1
+ }, {
+ .addr = 0x50 | ((offset >> 8) & 0x3),
+ .flags = I2C_M_RD,
+ .buf = b,
+ .len = length
+ }
+ };
+
+ while (tries--) {
+ int status;
+
+ if (i2c_transfer(&d->i2c_adap, msg, 2) != 2)
+ break;
+
+ status =
+ technisat_usb2_calc_lrc(b, length - 1) == b[length - 1];
+
+ if (status)
+ return 0;
+ }
+
+ return -EREMOTEIO;
+}
+
+#define EEPROM_MAC_START 0x3f8
+#define EEPROM_MAC_TOTAL 8
+static int technisat_usb2_read_mac_address(struct dvb_usb_device *d,
+ u8 mac[])
+{
+ u8 buf[EEPROM_MAC_TOTAL];
+
+ if (technisat_usb2_eeprom_lrc_read(d, EEPROM_MAC_START,
+ buf, EEPROM_MAC_TOTAL, 4) != 0)
+ return -ENODEV;
+
+ memcpy(mac, buf, 6);
+ return 0;
+}
+
+/* frontend attach */
+static int technisat_usb2_set_voltage(struct dvb_frontend *fe,
+ fe_sec_voltage_t voltage)
+{
+ int i;
+ u8 gpio[3] = { 0 }; /* 0 = 2, 1 = 3, 2 = 4 */
+
+ gpio[2] = 1; /* high - voltage ? */
+
+ switch (voltage) {
+ case SEC_VOLTAGE_13:
+ gpio[0] = 1;
+ break;
+ case SEC_VOLTAGE_18:
+ gpio[0] = 1;
+ gpio[1] = 1;
+ break;
+ default:
+ case SEC_VOLTAGE_OFF:
+ break;
+ }
+
+ for (i = 0; i < 3; i++)
+ if (stv090x_set_gpio(fe, i+2, 0, gpio[i], 0) != 0)
+ return -EREMOTEIO;
+ return 0;
+}
+
+static struct stv090x_config technisat_usb2_stv090x_config = {
+ .device = STV0903,
+ .demod_mode = STV090x_SINGLE,
+ .clk_mode = STV090x_CLK_EXT,
+
+ .xtal = 8000000,
+ .address = 0x68,
+
+ .ts1_mode = STV090x_TSMODE_DVBCI,
+ .ts1_clk = 13400000,
+ .ts1_tei = 1,
+
+ .repeater_level = STV090x_RPTLEVEL_64,
+
+ .tuner_bbgain = 6,
+};
+
+static struct stv6110x_config technisat_usb2_stv6110x_config = {
+ .addr = 0x60,
+ .refclk = 16000000,
+ .clk_div = 2,
+};
+
+static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
+{
+ struct usb_device *udev = a->dev->udev;
+ int ret;
+
+ a->fe = dvb_attach(stv090x_attach, &technisat_usb2_stv090x_config,
+ &a->dev->i2c_adap, STV090x_DEMODULATOR_0);
+
+ if (a->fe) {
+ struct stv6110x_devctl *ctl;
+
+ ctl = dvb_attach(stv6110x_attach,
+ a->fe,
+ &technisat_usb2_stv6110x_config,
+ &a->dev->i2c_adap);
+
+ if (ctl) {
+ technisat_usb2_stv090x_config.tuner_init = ctl->tuner_init;
+ technisat_usb2_stv090x_config.tuner_sleep = ctl->tuner_sleep;
+ technisat_usb2_stv090x_config.tuner_set_mode = ctl->tuner_set_mode;
+ technisat_usb2_stv090x_config.tuner_set_frequency = ctl->tuner_set_frequency;
+ technisat_usb2_stv090x_config.tuner_get_frequency = ctl->tuner_get_frequency;
+ technisat_usb2_stv090x_config.tuner_set_bandwidth = ctl->tuner_set_bandwidth;
+ technisat_usb2_stv090x_config.tuner_get_bandwidth = ctl->tuner_get_bandwidth;
+ technisat_usb2_stv090x_config.tuner_set_bbgain = ctl->tuner_set_bbgain;
+ technisat_usb2_stv090x_config.tuner_get_bbgain = ctl->tuner_get_bbgain;
+ technisat_usb2_stv090x_config.tuner_set_refclk = ctl->tuner_set_refclk;
+ technisat_usb2_stv090x_config.tuner_get_status = ctl->tuner_get_status;
+
+ /* call the init function once to initialize
+ tuner's clock output divider and demod's
+ master clock */
+ if (a->fe->ops.init)
+ a->fe->ops.init(a->fe);
+
+ if (mutex_lock_interruptible(&a->dev->i2c_mutex) < 0)
+ return -EAGAIN;
+
+ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ SET_IFCLK_TO_EXTERNAL_TSCLK_VENDOR_REQUEST,
+ USB_TYPE_VENDOR | USB_DIR_OUT,
+ 0, 0,
+ NULL, 0, 500);
+ mutex_unlock(&a->dev->i2c_mutex);
+
+ if (ret != 0)
+ err("could not set IF_CLK to external");
+
+ a->fe->ops.set_voltage = technisat_usb2_set_voltage;
+
+ /* if everything was successful assign a nice name to the frontend */
+ strlcpy(a->fe->ops.info.name, a->dev->desc->name,
+ sizeof(a->fe->ops.info.name));
+ } else {
+ dvb_frontend_detach(a->fe);
+ a->fe = NULL;
+ }
+ }
+
+ technisat_usb2_set_led_timer(a->dev, 1, 1);
+
+ return a->fe == NULL ? -ENODEV : 0;
+}
+
+/* Remote control */
+
+/* the device is giving providing raw IR-signals to the host mapping
+ * it only to one remote control is just the default implementation
+ */
+#define NOMINAL_IR_BIT_TRANSITION_TIME_US 889
+#define NOMINAL_IR_BIT_TIME_US (2 * NOMINAL_IR_BIT_TRANSITION_TIME_US)
+
+#define FIRMWARE_CLOCK_TICK 83333
+#define FIRMWARE_CLOCK_DIVISOR 256
+
+#define IR_PERCENT_TOLERANCE 15
+
+#define NOMINAL_IR_BIT_TRANSITION_TICKS ((NOMINAL_IR_BIT_TRANSITION_TIME_US * 1000 * 1000) / FIRMWARE_CLOCK_TICK)
+#define NOMINAL_IR_BIT_TRANSITION_TICK_COUNT (NOMINAL_IR_BIT_TRANSITION_TICKS / FIRMWARE_CLOCK_DIVISOR)
+
+#define NOMINAL_IR_BIT_TIME_TICKS ((NOMINAL_IR_BIT_TIME_US * 1000 * 1000) / FIRMWARE_CLOCK_TICK)
+#define NOMINAL_IR_BIT_TIME_TICK_COUNT (NOMINAL_IR_BIT_TIME_TICKS / FIRMWARE_CLOCK_DIVISOR)
+
+#define MINIMUM_IR_BIT_TRANSITION_TICK_COUNT (NOMINAL_IR_BIT_TRANSITION_TICK_COUNT - ((NOMINAL_IR_BIT_TRANSITION_TICK_COUNT * IR_PERCENT_TOLERANCE) / 100))
+#define MAXIMUM_IR_BIT_TRANSITION_TICK_COUNT (NOMINAL_IR_BIT_TRANSITION_TICK_COUNT + ((NOMINAL_IR_BIT_TRANSITION_TICK_COUNT * IR_PERCENT_TOLERANCE) / 100))
+
+#define MINIMUM_IR_BIT_TIME_TICK_COUNT (NOMINAL_IR_BIT_TIME_TICK_COUNT - ((NOMINAL_IR_BIT_TIME_TICK_COUNT * IR_PERCENT_TOLERANCE) / 100))
+#define MAXIMUM_IR_BIT_TIME_TICK_COUNT (NOMINAL_IR_BIT_TIME_TICK_COUNT + ((NOMINAL_IR_BIT_TIME_TICK_COUNT * IR_PERCENT_TOLERANCE) / 100))
+
+static int technisat_usb2_get_ir(struct dvb_usb_device *d)
+{
+ u8 buf[62], *b;
+ int ret;
+ struct ir_raw_event ev;
+
+ buf[0] = GET_IR_DATA_VENDOR_REQUEST;
+ buf[1] = 0x08;
+ buf[2] = 0x8f;
+ buf[3] = MINIMUM_IR_BIT_TRANSITION_TICK_COUNT;
+ buf[4] = MAXIMUM_IR_BIT_TIME_TICK_COUNT;
+
+ if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
+ return -EAGAIN;
+ ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0),
+ GET_IR_DATA_VENDOR_REQUEST,
+ USB_TYPE_VENDOR | USB_DIR_OUT,
+ 0, 0,
+ buf, 5, 500);
+ if (ret < 0)
+ goto unlock;
+
+ buf[1] = 0;
+ buf[2] = 0;
+ ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0),
+ GET_IR_DATA_VENDOR_REQUEST,
+ USB_TYPE_VENDOR | USB_DIR_IN,
+ 0x8080, 0,
+ buf, sizeof(buf), 500);
+
+unlock:
+ mutex_unlock(&d->i2c_mutex);
+
+ if (ret < 0)
+ return ret;
+
+ if (ret == 1)
+ return 0; /* no key pressed */
+
+ /* decoding */
+ b = buf+1;
+
+#if 0
+ deb_rc("RC: %d ", ret);
+ debug_dump(b, ret, deb_rc);
+#endif
+
+ ev.pulse = 0;
+ while (1) {
+ ev.pulse = !ev.pulse;
+ ev.duration = (*b * FIRMWARE_CLOCK_DIVISOR * FIRMWARE_CLOCK_TICK) / 1000;
+ ir_raw_event_store(d->rc_dev, &ev);
+
+ b++;
+ if (*b == 0xff) {
+ ev.pulse = 0;
+ ev.duration = 888888*2;
+ ir_raw_event_store(d->rc_dev, &ev);
+ break;
+ }
+ }
+
+ ir_raw_event_handle(d->rc_dev);
+
+ return 1;
+}
+
+static int technisat_usb2_rc_query(struct dvb_usb_device *d)
+{
+ int ret = technisat_usb2_get_ir(d);
+
+ if (ret < 0)
+ return ret;
+
+ if (ret == 0)
+ return 0;
+
+ if (!disable_led_control)
+ technisat_usb2_set_led(d, 1, LED_BLINK);
+
+ return 0;
+}
+
+/* DVB-USB and USB stuff follows */
+static struct usb_device_id technisat_usb2_id_table[] = {
+ { USB_DEVICE(USB_VID_TECHNISAT, USB_PID_TECHNISAT_USB2_DVB_S2) },
+ { 0 } /* Terminating entry */
+};
+
+/* device description */
+static struct dvb_usb_device_properties technisat_usb2_devices = {
+ .caps = DVB_USB_IS_AN_I2C_ADAPTER,
+
+ .usb_ctrl = CYPRESS_FX2,
+
+ .identify_state = technisat_usb2_identify_state,
+ .firmware = "dvb-usb-SkyStar_USB_HD_FW_v17_63.HEX.fw",
+
+ .size_of_priv = sizeof(struct technisat_usb2_state),
+
+ .i2c_algo = &technisat_usb2_i2c_algo,
+
+ .power_ctrl = technisat_usb2_power_ctrl,
+ .read_mac_address = technisat_usb2_read_mac_address,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .frontend_attach = technisat_usb2_frontend_attach,
+
+ .stream = {
+ .type = USB_ISOC,
+ .count = 8,
+ .endpoint = 0x2,
+ .u = {
+ .isoc = {
+ .framesperurb = 32,
+ .framesize = 2048,
+ .interval = 3,
+ }
+ }
+ },
+
+ .size_of_priv = 0,
+ },
+ },
+
+ .num_device_descs = 1,
+ .devices = {
+ { "Technisat SkyStar USB HD (DVB-S/S2)",
+ { &technisat_usb2_id_table[0], NULL },
+ { NULL },
+ },
+ },
+
+ .rc.core = {
+ .rc_interval = 100,
+ .rc_codes = RC_MAP_TECHNISAT_USB2,
+ .module_name = "technisat-usb2",
+ .rc_query = technisat_usb2_rc_query,
+ .allowed_protos = RC_TYPE_ALL,
+ .driver_type = RC_DRIVER_IR_RAW,
+ }
+};
+
+static int technisat_usb2_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct dvb_usb_device *dev;
+
+ if (dvb_usb_device_init(intf, &technisat_usb2_devices, THIS_MODULE,
+ &dev, adapter_nr) != 0)
+ return -ENODEV;
+
+ if (dev) {
+ struct technisat_usb2_state *state = dev->priv;
+ state->dev = dev;
+
+ if (!disable_led_control) {
+ INIT_DELAYED_WORK(&state->green_led_work,
+ technisat_usb2_green_led_control);
+ schedule_delayed_work(&state->green_led_work,
+ msecs_to_jiffies(500));
+ }
+ }
+
+ return 0;
+}
+
+static void technisat_usb2_disconnect(struct usb_interface *intf)
+{
+ struct dvb_usb_device *dev = usb_get_intfdata(intf);
+
+ /* work and stuff was only created when the device is is hot-state */
+ if (dev != NULL) {
+ struct technisat_usb2_state *state = dev->priv;
+ if (state != NULL) {
+ cancel_delayed_work_sync(&state->green_led_work);
+ flush_scheduled_work();
+ }
+ }
+
+ dvb_usb_device_exit(intf);
+}
+
+static struct usb_driver technisat_usb2_driver = {
+ .name = "dvb_usb_technisat_usb2",
+ .probe = technisat_usb2_probe,
+ .disconnect = technisat_usb2_disconnect,
+ .id_table = technisat_usb2_id_table,
+};
+
+/* module stuff */
+static int __init technisat_usb2_module_init(void)
+{
+ int result = usb_register(&technisat_usb2_driver);
+ if (result) {
+ err("usb_register failed. Code %d", result);
+ return result;
+ }
+
+ return 0;
+}
+
+static void __exit technisat_usb2_module_exit(void)
+{
+ usb_deregister(&technisat_usb2_driver);
+}
+
+module_init(technisat_usb2_module_init);
+module_exit(technisat_usb2_module_exit);
+
+MODULE_AUTHOR("Patrick Boettcher <pboettcher@kernellabs.com>");
+MODULE_DESCRIPTION("Driver for Technisat DVB-S/S2 USB 2.0 device");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index b8519ba511e5..a20c1532726a 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -349,6 +349,14 @@ config DVB_DIB7000P
A DVB-T tuner module. Designed for mobile usage. Say Y when you want
to support this frontend.
+config DVB_DIB9000
+ tristate "DiBcom 9000"
+ depends on DVB_CORE && I2C
+ default m if DVB_FE_CUSTOMISE
+ help
+ A DVB-T tuner module. Designed for mobile usage. Say Y when you want
+ to support this frontend.
+
config DVB_TDA10048
tristate "Philips TDA10048HN based"
depends on DVB_CORE && I2C
diff --git a/drivers/media/dvb/frontends/Makefile b/drivers/media/dvb/frontends/Makefile
index b1d9525aa7e3..31dd201c57ce 100644
--- a/drivers/media/dvb/frontends/Makefile
+++ b/drivers/media/dvb/frontends/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_DVB_DIB3000MC) += dib3000mc.o dibx000_common.o
obj-$(CONFIG_DVB_DIB7000M) += dib7000m.o dibx000_common.o
obj-$(CONFIG_DVB_DIB7000P) += dib7000p.o dibx000_common.o
obj-$(CONFIG_DVB_DIB8000) += dib8000.o dibx000_common.o
+obj-$(CONFIG_DVB_DIB9000) += dib9000.o dibx000_common.o
obj-$(CONFIG_DVB_MT312) += mt312.o
obj-$(CONFIG_DVB_VES1820) += ves1820.o
obj-$(CONFIG_DVB_VES1X93) += ves1x93.o
diff --git a/drivers/media/dvb/frontends/dib0090.c b/drivers/media/dvb/frontends/dib0090.c
index 65240b7801e8..52ff1a252a90 100644
--- a/drivers/media/dvb/frontends/dib0090.c
+++ b/drivers/media/dvb/frontends/dib0090.c
@@ -45,6 +45,7 @@ MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
} \
} while (0)
+#define CONFIG_SYS_DVBT
#define CONFIG_SYS_ISDBT
#define CONFIG_BAND_CBAND
#define CONFIG_BAND_VHF
@@ -76,6 +77,34 @@ MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
#define EN_SBD 0x44E9
#define EN_CAB 0x88E9
+/* Calibration defines */
+#define DC_CAL 0x1
+#define WBD_CAL 0x2
+#define TEMP_CAL 0x4
+#define CAPTRIM_CAL 0x8
+
+#define KROSUS_PLL_LOCKED 0x800
+#define KROSUS 0x2
+
+/* Use those defines to identify SOC version */
+#define SOC 0x02
+#define SOC_7090_P1G_11R1 0x82
+#define SOC_7090_P1G_21R1 0x8a
+#define SOC_8090_P1G_11R1 0x86
+#define SOC_8090_P1G_21R1 0x8e
+
+/* else use thos ones to check */
+#define P1A_B 0x0
+#define P1C 0x1
+#define P1D_E_F 0x3
+#define P1G 0x7
+#define P1G_21R2 0xf
+
+#define MP001 0x1 /* Single 9090/8096 */
+#define MP005 0x4 /* Single Sband */
+#define MP008 0x6 /* Dual diversity VHF-UHF-LBAND */
+#define MP009 0x7 /* Dual diversity 29098 CBAND-UHF-LBAND-SBAND */
+
#define pgm_read_word(w) (*w)
struct dc_calibration;
@@ -84,7 +113,7 @@ struct dib0090_tuning {
u32 max_freq; /* for every frequency less than or equal to that field: this information is correct */
u8 switch_trim;
u8 lna_tune;
- u8 lna_bias;
+ u16 lna_bias;
u16 v2i;
u16 mix;
u16 load;
@@ -99,13 +128,19 @@ struct dib0090_pll {
u8 topresc;
};
+struct dib0090_identity {
+ u8 version;
+ u8 product;
+ u8 p1g;
+ u8 in_soc;
+};
+
struct dib0090_state {
struct i2c_adapter *i2c;
struct dvb_frontend *fe;
const struct dib0090_config *config;
u8 current_band;
- u16 revision;
enum frontend_tune_state tune_state;
u32 current_rf;
@@ -143,7 +178,26 @@ struct dib0090_state {
u8 tuner_is_tuned;
u8 agc_freeze;
- u8 reset;
+ struct dib0090_identity identity;
+
+ u32 rf_request;
+ u8 current_standard;
+
+ u8 calibrate;
+ u32 rest;
+ u16 bias;
+ s16 temperature;
+
+ u8 wbd_calibration_gain;
+ const struct dib0090_wbd_slope *current_wbd_table;
+ u16 wbdmux;
+};
+
+struct dib0090_fw_state {
+ struct i2c_adapter *i2c;
+ struct dvb_frontend *fe;
+ struct dib0090_identity identity;
+ const struct dib0090_config *config;
};
static u16 dib0090_read_reg(struct dib0090_state *state, u8 reg)
@@ -171,6 +225,28 @@ static int dib0090_write_reg(struct dib0090_state *state, u32 reg, u16 val)
return 0;
}
+static u16 dib0090_fw_read_reg(struct dib0090_fw_state *state, u8 reg)
+{
+ u8 b[2];
+ struct i2c_msg msg = {.addr = reg, .flags = I2C_M_RD, .buf = b, .len = 2 };
+ if (i2c_transfer(state->i2c, &msg, 1) != 1) {
+ printk(KERN_WARNING "DiB0090 I2C read failed\n");
+ return 0;
+ }
+ return (b[0] << 8) | b[1];
+}
+
+static int dib0090_fw_write_reg(struct dib0090_fw_state *state, u8 reg, u16 val)
+{
+ u8 b[2] = { val >> 8, val & 0xff };
+ struct i2c_msg msg = {.addr = reg, .flags = 0, .buf = b, .len = 2 };
+ if (i2c_transfer(state->i2c, &msg, 1) != 1) {
+ printk(KERN_WARNING "DiB0090 I2C write failed\n");
+ return -EREMOTEIO;
+ }
+ return 0;
+}
+
#define HARD_RESET(state) do { if (cfg->reset) { if (cfg->sleep) cfg->sleep(fe, 0); msleep(10); cfg->reset(fe, 1); msleep(10); cfg->reset(fe, 0); msleep(10); } } while (0)
#define ADC_TARGET -220
#define GAIN_ALPHA 5
@@ -183,89 +259,327 @@ static void dib0090_write_regs(struct dib0090_state *state, u8 r, const u16 * b,
} while (--c);
}
-static u16 dib0090_identify(struct dvb_frontend *fe)
+static int dib0090_identify(struct dvb_frontend *fe)
{
struct dib0090_state *state = fe->tuner_priv;
u16 v;
+ struct dib0090_identity *identity = &state->identity;
v = dib0090_read_reg(state, 0x1a);
-#ifdef FIRMWARE_FIREFLY
- /* pll is not locked locked */
- if (!(v & 0x800))
- dprintk("FE%d : Identification : pll is not yet locked", fe->id);
-#endif
+ identity->p1g = 0;
+ identity->in_soc = 0;
+
+ dprintk("Tuner identification (Version = 0x%04x)", v);
/* without PLL lock info */
- v &= 0x3ff;
- dprintk("P/V: %04x:", v);
+ v &= ~KROSUS_PLL_LOCKED;
- if ((v >> 8) & 0xf)
- dprintk("FE%d : Product ID = 0x%x : KROSUS", fe->id, (v >> 8) & 0xf);
- else
- return 0xff;
-
- v &= 0xff;
- if (((v >> 5) & 0x7) == 0x1)
- dprintk("FE%d : MP001 : 9090/8096", fe->id);
- else if (((v >> 5) & 0x7) == 0x4)
- dprintk("FE%d : MP005 : Single Sband", fe->id);
- else if (((v >> 5) & 0x7) == 0x6)
- dprintk("FE%d : MP008 : diversity VHF-UHF-LBAND", fe->id);
- else if (((v >> 5) & 0x7) == 0x7)
- dprintk("FE%d : MP009 : diversity 29098 CBAND-UHF-LBAND-SBAND", fe->id);
- else
- return 0xff;
-
- /* revision only */
- if ((v & 0x1f) == 0x3)
- dprintk("FE%d : P1-D/E/F detected", fe->id);
- else if ((v & 0x1f) == 0x1)
- dprintk("FE%d : P1C detected", fe->id);
- else if ((v & 0x1f) == 0x0) {
-#ifdef CONFIG_TUNER_DIB0090_P1B_SUPPORT
- dprintk("FE%d : P1-A/B detected: using previous driver - support will be removed soon", fe->id);
- dib0090_p1b_register(fe);
-#else
- dprintk("FE%d : P1-A/B detected: driver is deactivated - not available", fe->id);
- return 0xff;
-#endif
+ identity->version = v & 0xff;
+ identity->product = (v >> 8) & 0xf;
+
+ if (identity->product != KROSUS)
+ goto identification_error;
+
+ if ((identity->version & 0x3) == SOC) {
+ identity->in_soc = 1;
+ switch (identity->version) {
+ case SOC_8090_P1G_11R1:
+ dprintk("SOC 8090 P1-G11R1 Has been detected");
+ identity->p1g = 1;
+ break;
+ case SOC_8090_P1G_21R1:
+ dprintk("SOC 8090 P1-G21R1 Has been detected");
+ identity->p1g = 1;
+ break;
+ case SOC_7090_P1G_11R1:
+ dprintk("SOC 7090 P1-G11R1 Has been detected");
+ identity->p1g = 1;
+ break;
+ case SOC_7090_P1G_21R1:
+ dprintk("SOC 7090 P1-G21R1 Has been detected");
+ identity->p1g = 1;
+ break;
+ default:
+ goto identification_error;
+ }
+ } else {
+ switch ((identity->version >> 5) & 0x7) {
+ case MP001:
+ dprintk("MP001 : 9090/8096");
+ break;
+ case MP005:
+ dprintk("MP005 : Single Sband");
+ break;
+ case MP008:
+ dprintk("MP008 : diversity VHF-UHF-LBAND");
+ break;
+ case MP009:
+ dprintk("MP009 : diversity 29098 CBAND-UHF-LBAND-SBAND");
+ break;
+ default:
+ goto identification_error;
+ }
+
+ switch (identity->version & 0x1f) {
+ case P1G_21R2:
+ dprintk("P1G_21R2 detected");
+ identity->p1g = 1;
+ break;
+ case P1G:
+ dprintk("P1G detected");
+ identity->p1g = 1;
+ break;
+ case P1D_E_F:
+ dprintk("P1D/E/F detected");
+ break;
+ case P1C:
+ dprintk("P1C detected");
+ break;
+ case P1A_B:
+ dprintk("P1-A/B detected: driver is deactivated - not available");
+ goto identification_error;
+ break;
+ default:
+ goto identification_error;
+ }
}
- return v;
+ return 0;
+
+identification_error:
+ return -EIO;
+}
+
+static int dib0090_fw_identify(struct dvb_frontend *fe)
+{
+ struct dib0090_fw_state *state = fe->tuner_priv;
+ struct dib0090_identity *identity = &state->identity;
+
+ u16 v = dib0090_fw_read_reg(state, 0x1a);
+ identity->p1g = 0;
+ identity->in_soc = 0;
+
+ dprintk("FE: Tuner identification (Version = 0x%04x)", v);
+
+ /* without PLL lock info */
+ v &= ~KROSUS_PLL_LOCKED;
+
+ identity->version = v & 0xff;
+ identity->product = (v >> 8) & 0xf;
+
+ if (identity->product != KROSUS)
+ goto identification_error;
+
+ if ((identity->version & 0x3) == SOC) {
+ identity->in_soc = 1;
+ switch (identity->version) {
+ case SOC_8090_P1G_11R1:
+ dprintk("SOC 8090 P1-G11R1 Has been detected");
+ identity->p1g = 1;
+ break;
+ case SOC_8090_P1G_21R1:
+ dprintk("SOC 8090 P1-G21R1 Has been detected");
+ identity->p1g = 1;
+ break;
+ case SOC_7090_P1G_11R1:
+ dprintk("SOC 7090 P1-G11R1 Has been detected");
+ identity->p1g = 1;
+ break;
+ case SOC_7090_P1G_21R1:
+ dprintk("SOC 7090 P1-G21R1 Has been detected");
+ identity->p1g = 1;
+ break;
+ default:
+ goto identification_error;
+ }
+ } else {
+ switch ((identity->version >> 5) & 0x7) {
+ case MP001:
+ dprintk("MP001 : 9090/8096");
+ break;
+ case MP005:
+ dprintk("MP005 : Single Sband");
+ break;
+ case MP008:
+ dprintk("MP008 : diversity VHF-UHF-LBAND");
+ break;
+ case MP009:
+ dprintk("MP009 : diversity 29098 CBAND-UHF-LBAND-SBAND");
+ break;
+ default:
+ goto identification_error;
+ }
+
+ switch (identity->version & 0x1f) {
+ case P1G_21R2:
+ dprintk("P1G_21R2 detected");
+ identity->p1g = 1;
+ break;
+ case P1G:
+ dprintk("P1G detected");
+ identity->p1g = 1;
+ break;
+ case P1D_E_F:
+ dprintk("P1D/E/F detected");
+ break;
+ case P1C:
+ dprintk("P1C detected");
+ break;
+ case P1A_B:
+ dprintk("P1-A/B detected: driver is deactivated - not available");
+ goto identification_error;
+ break;
+ default:
+ goto identification_error;
+ }
+ }
+
+ return 0;
+
+identification_error:
+ return -EIO;;
}
static void dib0090_reset_digital(struct dvb_frontend *fe, const struct dib0090_config *cfg)
{
struct dib0090_state *state = fe->tuner_priv;
+ u16 PllCfg, i, v;
HARD_RESET(state);
- dib0090_write_reg(state, 0x24, EN_PLL);
+ dib0090_write_reg(state, 0x24, EN_PLL | EN_CRYSTAL);
dib0090_write_reg(state, 0x1b, EN_DIGCLK | EN_PLL | EN_CRYSTAL); /* PLL, DIG_CLK and CRYSTAL remain */
- /* adcClkOutRatio=8->7, release reset */
- dib0090_write_reg(state, 0x20, ((cfg->io.adc_clock_ratio - 1) << 11) | (0 << 10) | (1 << 9) | (1 << 8) | (0 << 4) | 0);
+ if (!cfg->in_soc) {
+ /* adcClkOutRatio=8->7, release reset */
+ dib0090_write_reg(state, 0x20, ((cfg->io.adc_clock_ratio - 1) << 11) | (0 << 10) | (1 << 9) | (1 << 8) | (0 << 4) | 0);
+ if (cfg->clkoutdrive != 0)
+ dib0090_write_reg(state, 0x23, (0 << 15) | ((!cfg->analog_output) << 14) | (2 << 10) | (1 << 9) | (0 << 8)
+ | (cfg->clkoutdrive << 5) | (cfg->clkouttobamse << 4) | (0 << 2) | (0));
+ else
+ dib0090_write_reg(state, 0x23, (0 << 15) | ((!cfg->analog_output) << 14) | (2 << 10) | (1 << 9) | (0 << 8)
+ | (7 << 5) | (cfg->clkouttobamse << 4) | (0 << 2) | (0));
+ }
+
+ /* Read Pll current config * */
+ PllCfg = dib0090_read_reg(state, 0x21);
+
+ /** Reconfigure PLL if current setting is different from default setting **/
+ if ((PllCfg & 0x1FFF) != ((cfg->io.pll_range << 12) | (cfg->io.pll_loopdiv << 6) | (cfg->io.pll_prediv)) && (!cfg->in_soc)
+ && !cfg->io.pll_bypass) {
+
+ /* Set Bypass mode */
+ PllCfg |= (1 << 15);
+ dib0090_write_reg(state, 0x21, PllCfg);
+
+ /* Set Reset Pll */
+ PllCfg &= ~(1 << 13);
+ dib0090_write_reg(state, 0x21, PllCfg);
+
+ /*** Set new Pll configuration in bypass and reset state ***/
+ PllCfg = (1 << 15) | (0 << 13) | (cfg->io.pll_range << 12) | (cfg->io.pll_loopdiv << 6) | (cfg->io.pll_prediv);
+ dib0090_write_reg(state, 0x21, PllCfg);
+
+ /* Remove Reset Pll */
+ PllCfg |= (1 << 13);
+ dib0090_write_reg(state, 0x21, PllCfg);
+
+ /*** Wait for PLL lock ***/
+ i = 100;
+ do {
+ v = !!(dib0090_read_reg(state, 0x1a) & 0x800);
+ if (v)
+ break;
+ } while (--i);
+
+ if (i == 0) {
+ dprintk("Pll: Unable to lock Pll");
+ return;
+ }
+
+ /* Finally Remove Bypass mode */
+ PllCfg &= ~(1 << 15);
+ dib0090_write_reg(state, 0x21, PllCfg);
+ }
+
+ if (cfg->io.pll_bypass) {
+ PllCfg |= (cfg->io.pll_bypass << 15);
+ dib0090_write_reg(state, 0x21, PllCfg);
+ }
+}
+
+static int dib0090_fw_reset_digital(struct dvb_frontend *fe, const struct dib0090_config *cfg)
+{
+ struct dib0090_fw_state *state = fe->tuner_priv;
+ u16 PllCfg;
+ u16 v;
+ int i;
+
+ dprintk("fw reset digital");
+ HARD_RESET(state);
+
+ dib0090_fw_write_reg(state, 0x24, EN_PLL | EN_CRYSTAL);
+ dib0090_fw_write_reg(state, 0x1b, EN_DIGCLK | EN_PLL | EN_CRYSTAL); /* PLL, DIG_CLK and CRYSTAL remain */
+
+ dib0090_fw_write_reg(state, 0x20,
+ ((cfg->io.adc_clock_ratio - 1) << 11) | (0 << 10) | (1 << 9) | (1 << 8) | (cfg->data_tx_drv << 4) | cfg->ls_cfg_pad_drv);
+
+ v = (0 << 15) | ((!cfg->analog_output) << 14) | (1 << 9) | (0 << 8) | (cfg->clkouttobamse << 4) | (0 << 2) | (0);
if (cfg->clkoutdrive != 0)
- dib0090_write_reg(state, 0x23,
- (0 << 15) | ((!cfg->analog_output) << 14) | (1 << 10) | (1 << 9) | (0 << 8) | (cfg->clkoutdrive << 5) | (cfg->
- clkouttobamse
- << 4) | (0
- <<
- 2)
- | (0));
+ v |= cfg->clkoutdrive << 5;
else
- dib0090_write_reg(state, 0x23,
- (0 << 15) | ((!cfg->analog_output) << 14) | (1 << 10) | (1 << 9) | (0 << 8) | (7 << 5) | (cfg->
- clkouttobamse << 4) | (0
- <<
- 2)
- | (0));
+ v |= 7 << 5;
+
+ v |= 2 << 10;
+ dib0090_fw_write_reg(state, 0x23, v);
+
+ /* Read Pll current config * */
+ PllCfg = dib0090_fw_read_reg(state, 0x21);
+
+ /** Reconfigure PLL if current setting is different from default setting **/
+ if ((PllCfg & 0x1FFF) != ((cfg->io.pll_range << 12) | (cfg->io.pll_loopdiv << 6) | (cfg->io.pll_prediv)) && !cfg->io.pll_bypass) {
- /* enable pll, de-activate reset, ratio: 2/1 = 60MHz */
- dib0090_write_reg(state, 0x21,
- (cfg->io.pll_bypass << 15) | (1 << 13) | (cfg->io.pll_range << 12) | (cfg->io.pll_loopdiv << 6) | (cfg->io.pll_prediv));
+ /* Set Bypass mode */
+ PllCfg |= (1 << 15);
+ dib0090_fw_write_reg(state, 0x21, PllCfg);
+ /* Set Reset Pll */
+ PllCfg &= ~(1 << 13);
+ dib0090_fw_write_reg(state, 0x21, PllCfg);
+
+ /*** Set new Pll configuration in bypass and reset state ***/
+ PllCfg = (1 << 15) | (0 << 13) | (cfg->io.pll_range << 12) | (cfg->io.pll_loopdiv << 6) | (cfg->io.pll_prediv);
+ dib0090_fw_write_reg(state, 0x21, PllCfg);
+
+ /* Remove Reset Pll */
+ PllCfg |= (1 << 13);
+ dib0090_fw_write_reg(state, 0x21, PllCfg);
+
+ /*** Wait for PLL lock ***/
+ i = 100;
+ do {
+ v = !!(dib0090_fw_read_reg(state, 0x1a) & 0x800);
+ if (v)
+ break;
+ } while (--i);
+
+ if (i == 0) {
+ dprintk("Pll: Unable to lock Pll");
+ return -EIO;
+ }
+
+ /* Finally Remove Bypass mode */
+ PllCfg &= ~(1 << 15);
+ dib0090_fw_write_reg(state, 0x21, PllCfg);
+ }
+
+ if (cfg->io.pll_bypass) {
+ PllCfg |= (cfg->io.pll_bypass << 15);
+ dib0090_fw_write_reg(state, 0x21, PllCfg);
+ }
+
+ return dib0090_fw_identify(fe);
}
static int dib0090_wakeup(struct dvb_frontend *fe)
@@ -273,6 +587,9 @@ static int dib0090_wakeup(struct dvb_frontend *fe)
struct dib0090_state *state = fe->tuner_priv;
if (state->config->sleep)
state->config->sleep(fe, 0);
+
+ /* enable dataTX in case we have been restarted in the wrong moment */
+ dib0090_write_reg(state, 0x23, dib0090_read_reg(state, 0x23) | (1 << 14));
return 0;
}
@@ -292,8 +609,75 @@ void dib0090_dcc_freq(struct dvb_frontend *fe, u8 fast)
else
dib0090_write_reg(state, 0x04, 1);
}
+
EXPORT_SYMBOL(dib0090_dcc_freq);
+static const u16 bb_ramp_pwm_normal_socs[] = {
+ 550, /* max BB gain in 10th of dB */
+ (1 << 9) | 8, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> BB_RAMP2 */
+ 440,
+ (4 << 9) | 0, /* BB_RAMP3 = 26dB */
+ (0 << 9) | 208, /* BB_RAMP4 */
+ (4 << 9) | 208, /* BB_RAMP5 = 29dB */
+ (0 << 9) | 440, /* BB_RAMP6 */
+};
+
+static const u16 rf_ramp_pwm_cband_7090[] = {
+ 280, /* max RF gain in 10th of dB */
+ 18, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> RF_RAMP2 */
+ 504, /* ramp_max = maximum X used on the ramp */
+ (29 << 10) | 364, /* RF_RAMP5, LNA 1 = 8dB */
+ (0 << 10) | 504, /* RF_RAMP6, LNA 1 */
+ (60 << 10) | 228, /* RF_RAMP7, LNA 2 = 7.7dB */
+ (0 << 10) | 364, /* RF_RAMP8, LNA 2 */
+ (34 << 10) | 109, /* GAIN_4_1, LNA 3 = 6.8dB */
+ (0 << 10) | 228, /* GAIN_4_2, LNA 3 */
+ (37 << 10) | 0, /* RF_RAMP3, LNA 4 = 6.2dB */
+ (0 << 10) | 109, /* RF_RAMP4, LNA 4 */
+};
+
+static const u16 rf_ramp_pwm_cband_8090[] = {
+ 345, /* max RF gain in 10th of dB */
+ 29, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> RF_RAMP2 */
+ 1000, /* ramp_max = maximum X used on the ramp */
+ (35 << 10) | 772, /* RF_RAMP3, LNA 1 = 8dB */
+ (0 << 10) | 1000, /* RF_RAMP4, LNA 1 */
+ (58 << 10) | 496, /* RF_RAMP5, LNA 2 = 9.5dB */
+ (0 << 10) | 772, /* RF_RAMP6, LNA 2 */
+ (27 << 10) | 200, /* RF_RAMP7, LNA 3 = 10.5dB */
+ (0 << 10) | 496, /* RF_RAMP8, LNA 3 */
+ (40 << 10) | 0, /* GAIN_4_1, LNA 4 = 7dB */
+ (0 << 10) | 200, /* GAIN_4_2, LNA 4 */
+};
+
+static const u16 rf_ramp_pwm_uhf_7090[] = {
+ 407, /* max RF gain in 10th of dB */
+ 13, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> RF_RAMP2 */
+ 529, /* ramp_max = maximum X used on the ramp */
+ (23 << 10) | 0, /* RF_RAMP3, LNA 1 = 14.7dB */
+ (0 << 10) | 176, /* RF_RAMP4, LNA 1 */
+ (63 << 10) | 400, /* RF_RAMP5, LNA 2 = 8dB */
+ (0 << 10) | 529, /* RF_RAMP6, LNA 2 */
+ (48 << 10) | 316, /* RF_RAMP7, LNA 3 = 6.8dB */
+ (0 << 10) | 400, /* RF_RAMP8, LNA 3 */
+ (29 << 10) | 176, /* GAIN_4_1, LNA 4 = 11.5dB */
+ (0 << 10) | 316, /* GAIN_4_2, LNA 4 */
+};
+
+static const u16 rf_ramp_pwm_uhf_8090[] = {
+ 388, /* max RF gain in 10th of dB */
+ 26, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> RF_RAMP2 */
+ 1008, /* ramp_max = maximum X used on the ramp */
+ (11 << 10) | 0, /* RF_RAMP3, LNA 1 = 14.7dB */
+ (0 << 10) | 369, /* RF_RAMP4, LNA 1 */
+ (41 << 10) | 809, /* RF_RAMP5, LNA 2 = 8dB */
+ (0 << 10) | 1008, /* RF_RAMP6, LNA 2 */
+ (27 << 10) | 659, /* RF_RAMP7, LNA 3 = 6dB */
+ (0 << 10) | 809, /* RF_RAMP8, LNA 3 */
+ (14 << 10) | 369, /* GAIN_4_1, LNA 4 = 11.5dB */
+ (0 << 10) | 659, /* GAIN_4_2, LNA 4 */
+};
+
static const u16 rf_ramp_pwm_cband[] = {
0, /* max RF gain in 10th of dB */
0, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> 0x2b */
@@ -326,6 +710,16 @@ static const u16 rf_ramp_uhf[] = {
0, 0, 127, /* CBAND : 0.0 dB */
};
+static const u16 rf_ramp_cband_broadmatching[] = /* for p1G only */
+{
+ 314, /* Calibrated at 200MHz order has been changed g4-g3-g2-g1 */
+ 84, 314, 127, /* LNA1 */
+ 80, 230, 255, /* LNA2 */
+ 80, 150, 127, /* LNA3 It was measured 12dB, do not lock if 120 */
+ 70, 70, 127, /* LNA4 */
+ 0, 0, 127, /* CBAND */
+};
+
static const u16 rf_ramp_cband[] = {
332, /* max RF gain in 10th of dB */
132, 252, 127, /* LNA1, dB */
@@ -380,8 +774,8 @@ static const u16 bb_ramp_pwm_normal[] = {
};
struct slope {
- int16_t range;
- int16_t slope;
+ s16 range;
+ s16 slope;
};
static u16 slopes_to_scale(const struct slope *slopes, u8 num, s16 val)
{
@@ -597,19 +991,39 @@ void dib0090_pwm_gain_reset(struct dvb_frontend *fe)
#endif
#ifdef CONFIG_BAND_CBAND
if (state->current_band == BAND_CBAND) {
- dib0090_set_rframp_pwm(state, rf_ramp_pwm_cband);
- dib0090_set_bbramp_pwm(state, bb_ramp_pwm_normal);
+ if (state->identity.in_soc) {
+ dib0090_set_bbramp_pwm(state, bb_ramp_pwm_normal_socs);
+ if (state->identity.version == SOC_8090_P1G_11R1 || state->identity.version == SOC_8090_P1G_21R1)
+ dib0090_set_rframp_pwm(state, rf_ramp_pwm_cband_8090);
+ else if (state->identity.version == SOC_7090_P1G_11R1 || state->identity.version == SOC_7090_P1G_21R1)
+ dib0090_set_rframp_pwm(state, rf_ramp_pwm_cband_7090);
+ } else {
+ dib0090_set_rframp_pwm(state, rf_ramp_pwm_cband);
+ dib0090_set_bbramp_pwm(state, bb_ramp_pwm_normal);
+ }
} else
#endif
#ifdef CONFIG_BAND_VHF
if (state->current_band == BAND_VHF) {
- dib0090_set_rframp_pwm(state, rf_ramp_pwm_vhf);
- dib0090_set_bbramp_pwm(state, bb_ramp_pwm_normal);
+ if (state->identity.in_soc) {
+ dib0090_set_bbramp_pwm(state, bb_ramp_pwm_normal_socs);
+ } else {
+ dib0090_set_rframp_pwm(state, rf_ramp_pwm_vhf);
+ dib0090_set_bbramp_pwm(state, bb_ramp_pwm_normal);
+ }
} else
#endif
{
- dib0090_set_rframp_pwm(state, rf_ramp_pwm_uhf);
- dib0090_set_bbramp_pwm(state, bb_ramp_pwm_normal);
+ if (state->identity.in_soc) {
+ if (state->identity.version == SOC_8090_P1G_11R1 || state->identity.version == SOC_8090_P1G_21R1)
+ dib0090_set_rframp_pwm(state, rf_ramp_pwm_uhf_8090);
+ else if (state->identity.version == SOC_7090_P1G_11R1 || state->identity.version == SOC_7090_P1G_21R1)
+ dib0090_set_rframp_pwm(state, rf_ramp_pwm_uhf_7090);
+ dib0090_set_bbramp_pwm(state, bb_ramp_pwm_normal_socs);
+ } else {
+ dib0090_set_rframp_pwm(state, rf_ramp_pwm_uhf);
+ dib0090_set_bbramp_pwm(state, bb_ramp_pwm_normal);
+ }
}
if (state->rf_ramp[0] != 0)
@@ -617,11 +1031,21 @@ void dib0090_pwm_gain_reset(struct dvb_frontend *fe)
else
dib0090_write_reg(state, 0x32, (0 << 11));
+ dib0090_write_reg(state, 0x04, 0x01);
dib0090_write_reg(state, 0x39, (1 << 10));
}
}
+
EXPORT_SYMBOL(dib0090_pwm_gain_reset);
+static u32 dib0090_get_slow_adc_val(struct dib0090_state *state)
+{
+ u16 adc_val = dib0090_read_reg(state, 0x1d);
+ if (state->identity.in_soc)
+ adc_val >>= 2;
+ return adc_val;
+}
+
int dib0090_gain_control(struct dvb_frontend *fe)
{
struct dib0090_state *state = fe->tuner_priv;
@@ -643,18 +1067,21 @@ int dib0090_gain_control(struct dvb_frontend *fe)
} else
#endif
#ifdef CONFIG_BAND_VHF
- if (state->current_band == BAND_VHF) {
+ if (state->current_band == BAND_VHF && !state->identity.p1g) {
dib0090_set_rframp(state, rf_ramp_vhf);
dib0090_set_bbramp(state, bb_ramp_boost);
} else
#endif
#ifdef CONFIG_BAND_CBAND
- if (state->current_band == BAND_CBAND) {
+ if (state->current_band == BAND_CBAND && !state->identity.p1g) {
dib0090_set_rframp(state, rf_ramp_cband);
dib0090_set_bbramp(state, bb_ramp_boost);
} else
#endif
- {
+ if ((state->current_band == BAND_CBAND || state->current_band == BAND_VHF) && state->identity.p1g) {
+ dib0090_set_rframp(state, rf_ramp_cband_broadmatching);
+ dib0090_set_bbramp(state, bb_ramp_boost);
+ } else {
dib0090_set_rframp(state, rf_ramp_uhf);
dib0090_set_bbramp(state, bb_ramp_boost);
}
@@ -669,17 +1096,25 @@ int dib0090_gain_control(struct dvb_frontend *fe)
*tune_state = CT_AGC_STEP_0;
} else if (!state->agc_freeze) {
- s16 wbd;
+ s16 wbd = 0, i, cnt;
int adc;
- wbd_val = dib0090_read_reg(state, 0x1d);
+ wbd_val = dib0090_get_slow_adc_val(state);
- /* read and calc the wbd power */
- wbd = dib0090_wbd_to_db(state, wbd_val);
+ if (*tune_state == CT_AGC_STEP_0)
+ cnt = 5;
+ else
+ cnt = 1;
+
+ for (i = 0; i < cnt; i++) {
+ wbd_val = dib0090_get_slow_adc_val(state);
+ wbd += dib0090_wbd_to_db(state, wbd_val);
+ }
+ wbd /= cnt;
wbd_error = state->wbd_target - wbd;
if (*tune_state == CT_AGC_STEP_0) {
- if (wbd_error < 0 && state->rf_gain_limit > 0) {
+ if (wbd_error < 0 && state->rf_gain_limit > 0 && !state->identity.p1g) {
#ifdef CONFIG_BAND_CBAND
/* in case of CBAND tune reduce first the lt_gain2 before adjusting the RF gain */
u8 ltg2 = (state->rf_lt_def >> 10) & 0x7;
@@ -700,39 +1135,39 @@ int dib0090_gain_control(struct dvb_frontend *fe)
adc_error = (s16) (((s32) ADC_TARGET) - adc);
#ifdef CONFIG_STANDARD_DAB
if (state->fe->dtv_property_cache.delivery_system == STANDARD_DAB)
- adc_error += 130;
+ adc_error -= 10;
#endif
#ifdef CONFIG_STANDARD_DVBT
if (state->fe->dtv_property_cache.delivery_system == STANDARD_DVBT &&
- (state->fe->dtv_property_cache.modulation == QAM_64 || state->fe->dtv_property_cache.modulation == QAM_16))
+ (state->fe->dtv_property_cache.modulation == QAM_64 || state->fe->dtv_property_cache.modulation == QAM_16))
adc_error += 60;
#endif
#ifdef CONFIG_SYS_ISDBT
if ((state->fe->dtv_property_cache.delivery_system == SYS_ISDBT) && (((state->fe->dtv_property_cache.layer[0].segment_count >
- 0)
- &&
- ((state->fe->dtv_property_cache.layer[0].modulation ==
- QAM_64)
- || (state->fe->dtv_property_cache.layer[0].
- modulation == QAM_16)))
- ||
- ((state->fe->dtv_property_cache.layer[1].segment_count >
- 0)
- &&
- ((state->fe->dtv_property_cache.layer[1].modulation ==
- QAM_64)
- || (state->fe->dtv_property_cache.layer[1].
- modulation == QAM_16)))
- ||
- ((state->fe->dtv_property_cache.layer[2].segment_count >
- 0)
- &&
- ((state->fe->dtv_property_cache.layer[2].modulation ==
- QAM_64)
- || (state->fe->dtv_property_cache.layer[2].
- modulation == QAM_16)))
- )
- )
+ 0)
+ &&
+ ((state->fe->dtv_property_cache.layer[0].modulation ==
+ QAM_64)
+ || (state->fe->dtv_property_cache.
+ layer[0].modulation == QAM_16)))
+ ||
+ ((state->fe->dtv_property_cache.layer[1].segment_count >
+ 0)
+ &&
+ ((state->fe->dtv_property_cache.layer[1].modulation ==
+ QAM_64)
+ || (state->fe->dtv_property_cache.
+ layer[1].modulation == QAM_16)))
+ ||
+ ((state->fe->dtv_property_cache.layer[2].segment_count >
+ 0)
+ &&
+ ((state->fe->dtv_property_cache.layer[2].modulation ==
+ QAM_64)
+ || (state->fe->dtv_property_cache.
+ layer[2].modulation == QAM_16)))
+ )
+ )
adc_error += 60;
#endif
@@ -760,9 +1195,9 @@ int dib0090_gain_control(struct dvb_frontend *fe)
}
#ifdef DEBUG_AGC
dprintk
- ("FE: %d, tune state %d, ADC = %3ddB (ADC err %3d) WBD %3ddB (WBD err %3d, WBD val SADC: %4d), RFGainLimit (TOP): %3d, signal: %3ddBm",
- (u32) fe->id, (u32) *tune_state, (u32) adc, (u32) adc_error, (u32) wbd, (u32) wbd_error, (u32) wbd_val,
- (u32) state->rf_gain_limit >> WBD_ALPHA, (s32) 200 + adc - (state->current_gain >> GAIN_ALPHA));
+ ("tune state %d, ADC = %3ddB (ADC err %3d) WBD %3ddB (WBD err %3d, WBD val SADC: %4d), RFGainLimit (TOP): %3d, signal: %3ddBm",
+ (u32) *tune_state, (u32) adc, (u32) adc_error, (u32) wbd, (u32) wbd_error, (u32) wbd_val,
+ (u32) state->rf_gain_limit >> WBD_ALPHA, (s32) 200 + adc - (state->current_gain >> GAIN_ALPHA));
#endif
}
@@ -771,6 +1206,7 @@ int dib0090_gain_control(struct dvb_frontend *fe)
dib0090_gain_apply(state, adc_error, wbd_error, apply_gain_immediatly);
return ret;
}
+
EXPORT_SYMBOL(dib0090_gain_control);
void dib0090_get_current_gain(struct dvb_frontend *fe, u16 * rf, u16 * bb, u16 * rf_gain_limit, u16 * rflt)
@@ -785,13 +1221,47 @@ void dib0090_get_current_gain(struct dvb_frontend *fe, u16 * rf, u16 * bb, u16 *
if (rflt)
*rflt = (state->rf_lt_def >> 10) & 0x7;
}
+
EXPORT_SYMBOL(dib0090_get_current_gain);
-u16 dib0090_get_wbd_offset(struct dvb_frontend *tuner)
+u16 dib0090_get_wbd_offset(struct dvb_frontend *fe)
{
- struct dib0090_state *st = tuner->tuner_priv;
- return st->wbd_offset;
+ struct dib0090_state *state = fe->tuner_priv;
+ u32 f_MHz = state->fe->dtv_property_cache.frequency / 1000000;
+ s32 current_temp = state->temperature;
+ s32 wbd_thot, wbd_tcold;
+ const struct dib0090_wbd_slope *wbd = state->current_wbd_table;
+
+ while (f_MHz > wbd->max_freq)
+ wbd++;
+
+ dprintk("using wbd-table-entry with max freq %d", wbd->max_freq);
+
+ if (current_temp < 0)
+ current_temp = 0;
+ if (current_temp > 128)
+ current_temp = 128;
+
+ state->wbdmux &= ~(7 << 13);
+ if (wbd->wbd_gain != 0)
+ state->wbdmux |= (wbd->wbd_gain << 13);
+ else
+ state->wbdmux |= (4 << 13);
+
+ dib0090_write_reg(state, 0x10, state->wbdmux);
+
+ wbd_thot = wbd->offset_hot - (((u32) wbd->slope_hot * f_MHz) >> 6);
+ wbd_tcold = wbd->offset_cold - (((u32) wbd->slope_cold * f_MHz) >> 6);
+
+ wbd_tcold += ((wbd_thot - wbd_tcold) * current_temp) >> 7;
+
+ state->wbd_target = dib0090_wbd_to_db(state, state->wbd_offset + wbd_tcold);
+ dprintk("wbd-target: %d dB", (u32) state->wbd_target);
+ dprintk("wbd offset applied is %d", wbd_tcold);
+
+ return state->wbd_offset + wbd_tcold;
}
+
EXPORT_SYMBOL(dib0090_get_wbd_offset);
static const u16 dib0090_defaults[] = {
@@ -801,7 +1271,7 @@ static const u16 dib0090_defaults[] = {
0x99a0,
0x6008,
0x0000,
- 0x8acb,
+ 0x8bcb,
0x0000,
0x0405,
0x0000,
@@ -829,8 +1299,6 @@ static const u16 dib0090_defaults[] = {
1, 0x39,
0x0000,
- 1, 0x1b,
- EN_IQADC | EN_BB | EN_BIAS | EN_DIGCLK | EN_PLL | EN_CRYSTAL,
2, 0x1e,
0x07FF,
0x0007,
@@ -844,50 +1312,125 @@ static const u16 dib0090_defaults[] = {
0
};
-static int dib0090_reset(struct dvb_frontend *fe)
-{
- struct dib0090_state *state = fe->tuner_priv;
- u16 l, r, *n;
+static const u16 dib0090_p1g_additionnal_defaults[] = {
+ 1, 0x05,
+ 0xabcd,
- dib0090_reset_digital(fe, state->config);
- state->revision = dib0090_identify(fe);
+ 1, 0x11,
+ 0x00b4,
- /* Revision definition */
- if (state->revision == 0xff)
- return -EINVAL;
-#ifdef EFUSE
- else if ((state->revision & 0x1f) >= 3) /* Update the efuse : Only available for KROSUS > P1C */
- dib0090_set_EFUSE(state);
-#endif
+ 1, 0x1c,
+ 0xfffd,
-#ifdef CONFIG_TUNER_DIB0090_P1B_SUPPORT
- if (!(state->revision & 0x1)) /* it is P1B - reset is already done */
- return 0;
-#endif
+ 1, 0x40,
+ 0x108,
+ 0
+};
+
+static void dib0090_set_default_config(struct dib0090_state *state, const u16 * n)
+{
+ u16 l, r;
- /* Upload the default values */
- n = (u16 *) dib0090_defaults;
l = pgm_read_word(n++);
while (l) {
r = pgm_read_word(n++);
do {
- /* DEBUG_TUNER */
- /* dprintk("%d, %d, %d", l, r, pgm_read_word(n)); */
dib0090_write_reg(state, r, pgm_read_word(n++));
r++;
} while (--l);
l = pgm_read_word(n++);
}
+}
+
+#define CAP_VALUE_MIN (u8) 9
+#define CAP_VALUE_MAX (u8) 40
+#define HR_MIN (u8) 25
+#define HR_MAX (u8) 40
+#define POLY_MIN (u8) 0
+#define POLY_MAX (u8) 8
+
+void dib0090_set_EFUSE(struct dib0090_state *state)
+{
+ u8 c, h, n;
+ u16 e2, e4;
+ u16 cal;
+
+ e2 = dib0090_read_reg(state, 0x26);
+ e4 = dib0090_read_reg(state, 0x28);
+
+ if ((state->identity.version == P1D_E_F) ||
+ (state->identity.version == P1G) || (e2 == 0xffff)) {
+
+ dib0090_write_reg(state, 0x22, 0x10);
+ cal = (dib0090_read_reg(state, 0x22) >> 6) & 0x3ff;
+
+ if ((cal < 670) || (cal == 1023))
+ cal = 850;
+ n = 165 - ((cal * 10)>>6) ;
+ e2 = e4 = (3<<12) | (34<<6) | (n);
+ }
+
+ if (e2 != e4)
+ e2 &= e4; /* Remove the redundancy */
+
+ if (e2 != 0xffff) {
+ c = e2 & 0x3f;
+ n = (e2 >> 12) & 0xf;
+ h = (e2 >> 6) & 0x3f;
+
+ if ((c >= CAP_VALUE_MAX) || (c <= CAP_VALUE_MIN))
+ c = 32;
+ if ((h >= HR_MAX) || (h <= HR_MIN))
+ h = 34;
+ if ((n >= POLY_MAX) || (n <= POLY_MIN))
+ n = 3;
+
+ dib0090_write_reg(state, 0x13, (h << 10)) ;
+ e2 = (n<<11) | ((h>>2)<<6) | (c);
+ dib0090_write_reg(state, 0x2, e2) ; /* Load the BB_2 */
+ }
+}
+
+static int dib0090_reset(struct dvb_frontend *fe)
+{
+ struct dib0090_state *state = fe->tuner_priv;
+
+ dib0090_reset_digital(fe, state->config);
+ if (dib0090_identify(fe) < 0)
+ return -EIO;
+
+#ifdef CONFIG_TUNER_DIB0090_P1B_SUPPORT
+ if (!(state->identity.version & 0x1)) /* it is P1B - reset is already done */
+ return 0;
+#endif
+
+ if (!state->identity.in_soc) {
+ if ((dib0090_read_reg(state, 0x1a) >> 5) & 0x2)
+ dib0090_write_reg(state, 0x1b, (EN_IQADC | EN_BB | EN_BIAS | EN_DIGCLK | EN_PLL | EN_CRYSTAL));
+ else
+ dib0090_write_reg(state, 0x1b, (EN_DIGCLK | EN_PLL | EN_CRYSTAL));
+ }
+
+ dib0090_set_default_config(state, dib0090_defaults);
+
+ if (state->identity.in_soc)
+ dib0090_write_reg(state, 0x18, 0x2910); /* charge pump current = 0 */
+
+ if (state->identity.p1g)
+ dib0090_set_default_config(state, dib0090_p1g_additionnal_defaults);
+
+ /* Update the efuse : Only available for KROSUS > P1C and SOC as well*/
+ if (((state->identity.version & 0x1f) >= P1D_E_F) || (state->identity.in_soc))
+ dib0090_set_EFUSE(state);
/* Congigure in function of the crystal */
if (state->config->io.clock_khz >= 24000)
- l = 1;
+ dib0090_write_reg(state, 0x14, 1);
else
- l = 2;
- dib0090_write_reg(state, 0x14, l);
+ dib0090_write_reg(state, 0x14, 2);
dprintk("Pll lock : %d", (dib0090_read_reg(state, 0x1a) >> 11) & 0x1);
- state->reset = 3; /* enable iq-offset-calibration and wbd-calibration when tuning next time */
+ state->calibrate = DC_CAL | WBD_CAL | TEMP_CAL; /* enable iq-offset-calibration and wbd-calibration when tuning next time */
return 0;
}
@@ -927,11 +1470,11 @@ static int dib0090_get_offset(struct dib0090_state *state, enum frontend_tune_st
}
struct dc_calibration {
- uint8_t addr;
- uint8_t offset;
- uint8_t pga:1;
- uint16_t bb1;
- uint8_t i:1;
+ u8 addr;
+ u8 offset;
+ u8 pga:1;
+ u16 bb1;
+ u8 i:1;
};
static const struct dc_calibration dc_table[] = {
@@ -944,6 +1487,17 @@ static const struct dc_calibration dc_table[] = {
{0},
};
+static const struct dc_calibration dc_p1g_table[] = {
+ /* Step1 BB gain1= 26 with boost 1, gain 2 = 0 */
+ /* addr ; trim reg offset ; pga ; CTRL_BB1 value ; i or q */
+ {0x06, 5, 1, (1 << 13) | (0 << 8) | (15 << 3), 1},
+ {0x07, 11, 1, (1 << 13) | (0 << 8) | (15 << 3), 0},
+ /* Step 2 BB gain 1 = 26 with boost = 1 & gain 2 = 29 */
+ {0x06, 0, 0, (1 << 13) | (29 << 8) | (15 << 3), 1},
+ {0x06, 10, 0, (1 << 13) | (29 << 8) | (15 << 3), 0},
+ {0},
+};
+
static void dib0090_set_trim(struct dib0090_state *state)
{
u16 *val;
@@ -962,41 +1516,45 @@ static void dib0090_set_trim(struct dib0090_state *state)
static int dib0090_dc_offset_calibration(struct dib0090_state *state, enum frontend_tune_state *tune_state)
{
int ret = 0;
+ u16 reg;
switch (*tune_state) {
-
case CT_TUNER_START:
- /* init */
- dprintk("Internal DC calibration");
-
- /* the LNA is off */
- dib0090_write_reg(state, 0x24, 0x02ed);
+ dprintk("Start DC offset calibration");
/* force vcm2 = 0.8V */
state->bb6 = 0;
state->bb7 = 0x040d;
+ /* the LNA AND LO are off */
+ reg = dib0090_read_reg(state, 0x24) & 0x0ffb; /* shutdown lna and lo */
+ dib0090_write_reg(state, 0x24, reg);
+
+ state->wbdmux = dib0090_read_reg(state, 0x10);
+ dib0090_write_reg(state, 0x10, (state->wbdmux & ~(0xff << 3)) | (0x7 << 3) | 0x3);
+ dib0090_write_reg(state, 0x23, dib0090_read_reg(state, 0x23) & ~(1 << 14));
+
state->dc = dc_table;
+ if (state->identity.p1g)
+ state->dc = dc_p1g_table;
*tune_state = CT_TUNER_STEP_0;
/* fall through */
case CT_TUNER_STEP_0:
+ dprintk("Sart/continue DC calibration for %s path", (state->dc->i == 1) ? "I" : "Q");
dib0090_write_reg(state, 0x01, state->dc->bb1);
dib0090_write_reg(state, 0x07, state->bb7 | (state->dc->i << 7));
state->step = 0;
-
state->min_adc_diff = 1023;
-
*tune_state = CT_TUNER_STEP_1;
ret = 50;
break;
case CT_TUNER_STEP_1:
dib0090_set_trim(state);
-
*tune_state = CT_TUNER_STEP_2;
break;
@@ -1007,7 +1565,13 @@ static int dib0090_dc_offset_calibration(struct dib0090_state *state, enum front
break;
case CT_TUNER_STEP_5: /* found an offset */
- dprintk("FE%d: IQC read=%d, current=%x", state->fe->id, (u32) state->adc_diff, state->step);
+ dprintk("adc_diff = %d, current step= %d", (u32) state->adc_diff, state->step);
+ if (state->step == 0 && state->adc_diff < 0) {
+ state->min_adc_diff = -1023;
+ dprintk("Change of sign of the minimum adc diff");
+ }
+
+ dprintk("adc_diff = %d, min_adc_diff = %d current_step = %d", state->adc_diff, state->min_adc_diff, state->step);
/* first turn for this frequency */
if (state->step == 0) {
@@ -1017,20 +1581,21 @@ static int dib0090_dc_offset_calibration(struct dib0090_state *state, enum front
state->step = 0x10;
}
- state->adc_diff = ABS(state->adc_diff);
-
- if (state->adc_diff < state->min_adc_diff && steps(state->step) < 15) { /* stop search when the delta to 0 is increasing */
+ /* Look for a change of Sign in the Adc_diff.min_adc_diff is used to STORE the setp N-1 */
+ if ((state->adc_diff & 0x8000) == (state->min_adc_diff & 0x8000) && steps(state->step) < 15) {
+ /* stop search when the delta the sign is changing and Steps =15 and Step=0 is force for continuance */
state->step++;
state->min_adc_diff = state->adc_diff;
*tune_state = CT_TUNER_STEP_1;
} else {
-
/* the minimum was what we have seen in the step before */
- state->step--;
- dib0090_set_trim(state);
+ if (ABS(state->adc_diff) > ABS(state->min_adc_diff)) {
+ dprintk("Since adc_diff N = %d > adc_diff step N-1 = %d, Come back one step", state->adc_diff, state->min_adc_diff);
+ state->step--;
+ }
- dprintk("FE%d: BB Offset Cal, BBreg=%hd,Offset=%hd,Value Set=%hd", state->fe->id, state->dc->addr, state->adc_diff,
- state->step);
+ dib0090_set_trim(state);
+ dprintk("BB Offset Cal, BBreg=%hd,Offset=%hd,Value Set=%hd", state->dc->addr, state->adc_diff, state->step);
state->dc++;
if (state->dc->addr == 0) /* done */
@@ -1045,7 +1610,7 @@ static int dib0090_dc_offset_calibration(struct dib0090_state *state, enum front
dib0090_write_reg(state, 0x07, state->bb7 & ~0x0008);
dib0090_write_reg(state, 0x1f, 0x7);
*tune_state = CT_TUNER_START; /* reset done -> real tuning can now begin */
- state->reset &= ~0x1;
+ state->calibrate &= ~DC_CAL;
default:
break;
}
@@ -1054,21 +1619,43 @@ static int dib0090_dc_offset_calibration(struct dib0090_state *state, enum front
static int dib0090_wbd_calibration(struct dib0090_state *state, enum frontend_tune_state *tune_state)
{
+ u8 wbd_gain;
+ const struct dib0090_wbd_slope *wbd = state->current_wbd_table;
+
switch (*tune_state) {
case CT_TUNER_START:
- /* WBD-mode=log, Bias=2, Gain=6, Testmode=1, en=1, WBDMUX=1 */
- dib0090_write_reg(state, 0x10, 0xdb09 | (1 << 10));
- dib0090_write_reg(state, 0x24, EN_UHF & 0x0fff);
+ while (state->current_rf / 1000 > wbd->max_freq)
+ wbd++;
+ if (wbd->wbd_gain != 0)
+ wbd_gain = wbd->wbd_gain;
+ else {
+ wbd_gain = 4;
+#if defined(CONFIG_BAND_LBAND) || defined(CONFIG_BAND_SBAND)
+ if ((state->current_band == BAND_LBAND) || (state->current_band == BAND_SBAND))
+ wbd_gain = 2;
+#endif
+ }
+
+ if (wbd_gain == state->wbd_calibration_gain) { /* the WBD calibration has already been done */
+ *tune_state = CT_TUNER_START;
+ state->calibrate &= ~WBD_CAL;
+ return 0;
+ }
+
+ dib0090_write_reg(state, 0x10, 0x1b81 | (1 << 10) | (wbd_gain << 13) | (1 << 3));
+ dib0090_write_reg(state, 0x24, ((EN_UHF & 0x0fff) | (1 << 1)));
*tune_state = CT_TUNER_STEP_0;
+ state->wbd_calibration_gain = wbd_gain;
return 90; /* wait for the WBDMUX to switch and for the ADC to sample */
+
case CT_TUNER_STEP_0:
- state->wbd_offset = dib0090_read_reg(state, 0x1d);
+ state->wbd_offset = dib0090_get_slow_adc_val(state);
dprintk("WBD calibration offset = %d", state->wbd_offset);
-
*tune_state = CT_TUNER_START; /* reset done -> real tuning can now begin */
- state->reset &= ~0x2;
+ state->calibrate &= ~WBD_CAL;
break;
+
default:
break;
}
@@ -1092,6 +1679,15 @@ static void dib0090_set_bandwidth(struct dib0090_state *state)
state->bb_1_def |= tmp;
dib0090_write_reg(state, 0x01, state->bb_1_def); /* be sure that we have the right bb-filter */
+
+ dib0090_write_reg(state, 0x03, 0x6008); /* = 0x6008 : vcm3_trim = 1 ; filter2_gm1_trim = 8 ; filter2_cutoff_freq = 0 */
+ dib0090_write_reg(state, 0x04, 0x1); /* 0 = 1KHz ; 1 = 50Hz ; 2 = 150Hz ; 3 = 50KHz ; 4 = servo fast */
+ if (state->identity.in_soc) {
+ dib0090_write_reg(state, 0x05, 0x9bcf); /* attenuator_ibias_tri = 2 ; input_stage_ibias_tr = 1 ; nc = 11 ; ext_gm_trim = 1 ; obuf_ibias_trim = 4 ; filter13_gm2_ibias_t = 15 */
+ } else {
+ dib0090_write_reg(state, 0x02, (5 << 11) | (8 << 6) | (22 & 0x3f)); /* 22 = cap_value */
+ dib0090_write_reg(state, 0x05, 0xabcd); /* = 0xabcd : attenuator_ibias_tri = 2 ; input_stage_ibias_tr = 2 ; nc = 11 ; ext_gm_trim = 1 ; obuf_ibias_trim = 4 ; filter13_gm2_ibias_t = 13 */
+ }
}
static const struct dib0090_pll dib0090_pll_table[] = {
@@ -1180,6 +1776,255 @@ static const struct dib0090_tuning dib0090_tuning_table[] = {
#endif
};
+static const struct dib0090_tuning dib0090_p1g_tuning_table[] = {
+#ifdef CONFIG_BAND_CBAND
+ {170000, 4, 1, 0x820f, 0x300, 0x2d22, 0x82cb, EN_CAB},
+#endif
+#ifdef CONFIG_BAND_VHF
+ {184000, 1, 1, 15, 0x300, 0x4d12, 0xb94e, EN_VHF},
+ {227000, 1, 3, 15, 0x300, 0x4d12, 0xb94e, EN_VHF},
+ {380000, 1, 7, 15, 0x300, 0x4d12, 0xb94e, EN_VHF},
+#endif
+#ifdef CONFIG_BAND_UHF
+ {510000, 2, 0, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
+ {540000, 2, 1, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
+ {600000, 2, 3, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
+ {630000, 2, 4, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
+ {680000, 2, 5, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
+ {720000, 2, 6, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
+ {900000, 2, 7, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
+#endif
+#ifdef CONFIG_BAND_LBAND
+ {1500000, 4, 0, 20, 0x300, 0x1912, 0x82c9, EN_LBD},
+ {1600000, 4, 1, 20, 0x300, 0x1912, 0x82c9, EN_LBD},
+ {1800000, 4, 3, 20, 0x300, 0x1912, 0x82c9, EN_LBD},
+#endif
+#ifdef CONFIG_BAND_SBAND
+ {2300000, 1, 4, 20, 0x300, 0x2d2A, 0x82c7, EN_SBD},
+ {2900000, 1, 7, 20, 0x280, 0x2deb, 0x8347, EN_SBD},
+#endif
+};
+
+static const struct dib0090_pll dib0090_p1g_pll_table[] = {
+#ifdef CONFIG_BAND_CBAND
+ {57000, 0, 11, 48, 6},
+ {70000, 1, 11, 48, 6},
+ {86000, 0, 10, 32, 4},
+ {105000, 1, 10, 32, 4},
+ {115000, 0, 9, 24, 6},
+ {140000, 1, 9, 24, 6},
+ {170000, 0, 8, 16, 4},
+#endif
+#ifdef CONFIG_BAND_VHF
+ {200000, 1, 8, 16, 4},
+ {230000, 0, 7, 12, 6},
+ {280000, 1, 7, 12, 6},
+ {340000, 0, 6, 8, 4},
+ {380000, 1, 6, 8, 4},
+ {455000, 0, 5, 6, 6},
+#endif
+#ifdef CONFIG_BAND_UHF
+ {580000, 1, 5, 6, 6},
+ {680000, 0, 4, 4, 4},
+ {860000, 1, 4, 4, 4},
+#endif
+#ifdef CONFIG_BAND_LBAND
+ {1800000, 1, 2, 2, 4},
+#endif
+#ifdef CONFIG_BAND_SBAND
+ {2900000, 0, 1, 1, 6},
+#endif
+};
+
+static const struct dib0090_tuning dib0090_p1g_tuning_table_fm_vhf_on_cband[] = {
+#ifdef CONFIG_BAND_CBAND
+ {184000, 4, 3, 0x4187, 0x2c0, 0x2d22, 0x81cb, EN_CAB},
+ {227000, 4, 3, 0x4187, 0x2c0, 0x2d22, 0x81cb, EN_CAB},
+ {380000, 4, 3, 0x4187, 0x2c0, 0x2d22, 0x81cb, EN_CAB},
+#endif
+#ifdef CONFIG_BAND_UHF
+ {520000, 2, 0, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
+ {550000, 2, 2, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
+ {650000, 2, 3, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
+ {750000, 2, 5, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
+ {850000, 2, 6, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
+ {900000, 2, 7, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
+#endif
+#ifdef CONFIG_BAND_LBAND
+ {1500000, 4, 0, 20, 0x300, 0x1912, 0x82c9, EN_LBD},
+ {1600000, 4, 1, 20, 0x300, 0x1912, 0x82c9, EN_LBD},
+ {1800000, 4, 3, 20, 0x300, 0x1912, 0x82c9, EN_LBD},
+#endif
+#ifdef CONFIG_BAND_SBAND
+ {2300000, 1, 4, 20, 0x300, 0x2d2A, 0x82c7, EN_SBD},
+ {2900000, 1, 7, 20, 0x280, 0x2deb, 0x8347, EN_SBD},
+#endif
+};
+
+static const struct dib0090_tuning dib0090_tuning_table_cband_7090[] = {
+#ifdef CONFIG_BAND_CBAND
+ {300000, 4, 3, 0x018F, 0x2c0, 0x2d22, 0xb9ce, EN_CAB},
+ {380000, 4, 10, 0x018F, 0x2c0, 0x2d22, 0xb9ce, EN_CAB},
+ {570000, 4, 10, 0x8190, 0x2c0, 0x2d22, 0xb9ce, EN_CAB},
+ {858000, 4, 5, 0x8190, 0x2c0, 0x2d22, 0xb9ce, EN_CAB},
+#endif
+};
+
+static int dib0090_captrim_search(struct dib0090_state *state, enum frontend_tune_state *tune_state)
+{
+ int ret = 0;
+ u16 lo4 = 0xe900;
+
+ s16 adc_target;
+ u16 adc;
+ s8 step_sign;
+ u8 force_soft_search = 0;
+
+ if (state->identity.version == SOC_8090_P1G_11R1 || state->identity.version == SOC_8090_P1G_21R1)
+ force_soft_search = 1;
+
+ if (*tune_state == CT_TUNER_START) {
+ dprintk("Start Captrim search : %s", (force_soft_search == 1) ? "FORCE SOFT SEARCH" : "AUTO");
+ dib0090_write_reg(state, 0x10, 0x2B1);
+ dib0090_write_reg(state, 0x1e, 0x0032);
+
+ if (!state->tuner_is_tuned) {
+ /* prepare a complete captrim */
+ if (!state->identity.p1g || force_soft_search)
+ state->step = state->captrim = state->fcaptrim = 64;
+
+ state->current_rf = state->rf_request;
+ } else { /* we are already tuned to this frequency - the configuration is correct */
+ if (!state->identity.p1g || force_soft_search) {
+ /* do a minimal captrim even if the frequency has not changed */
+ state->step = 4;
+ state->captrim = state->fcaptrim = dib0090_read_reg(state, 0x18) & 0x7f;
+ }
+ }
+ state->adc_diff = 3000;
+ *tune_state = CT_TUNER_STEP_0;
+
+ } else if (*tune_state == CT_TUNER_STEP_0) {
+ if (state->identity.p1g && !force_soft_search) {
+ u8 ratio = 31;
+
+ dib0090_write_reg(state, 0x40, (3 << 7) | (ratio << 2) | (1 << 1) | 1);
+ dib0090_read_reg(state, 0x40);
+ ret = 50;
+ } else {
+ state->step /= 2;
+ dib0090_write_reg(state, 0x18, lo4 | state->captrim);
+
+ if (state->identity.in_soc)
+ ret = 25;
+ }
+ *tune_state = CT_TUNER_STEP_1;
+
+ } else if (*tune_state == CT_TUNER_STEP_1) {
+ if (state->identity.p1g && !force_soft_search) {
+ dib0090_write_reg(state, 0x40, 0x18c | (0 << 1) | 0);
+ dib0090_read_reg(state, 0x40);
+
+ state->fcaptrim = dib0090_read_reg(state, 0x18) & 0x7F;
+ dprintk("***Final Captrim= 0x%x", state->fcaptrim);
+ *tune_state = CT_TUNER_STEP_3;
+
+ } else {
+ /* MERGE for all krosus before P1G */
+ adc = dib0090_get_slow_adc_val(state);
+ dprintk("CAPTRIM=%d; ADC = %d (ADC) & %dmV", (u32) state->captrim, (u32) adc, (u32) (adc) * (u32) 1800 / (u32) 1024);
+
+ if (state->rest == 0 || state->identity.in_soc) { /* Just for 8090P SOCS where auto captrim HW bug : TO CHECK IN ACI for SOCS !!! if 400 for 8090p SOC => tune issue !!! */
+ adc_target = 200;
+ } else
+ adc_target = 400;
+
+ if (adc >= adc_target) {
+ adc -= adc_target;
+ step_sign = -1;
+ } else {
+ adc = adc_target - adc;
+ step_sign = 1;
+ }
+
+ if (adc < state->adc_diff) {
+ dprintk("CAPTRIM=%d is closer to target (%d/%d)", (u32) state->captrim, (u32) adc, (u32) state->adc_diff);
+ state->adc_diff = adc;
+ state->fcaptrim = state->captrim;
+ }
+
+ state->captrim += step_sign * state->step;
+ if (state->step >= 1)
+ *tune_state = CT_TUNER_STEP_0;
+ else
+ *tune_state = CT_TUNER_STEP_2;
+
+ ret = 25;
+ }
+ } else if (*tune_state == CT_TUNER_STEP_2) { /* this step is only used by krosus < P1G */
+ /*write the final cptrim config */
+ dib0090_write_reg(state, 0x18, lo4 | state->fcaptrim);
+
+ *tune_state = CT_TUNER_STEP_3;
+
+ } else if (*tune_state == CT_TUNER_STEP_3) {
+ state->calibrate &= ~CAPTRIM_CAL;
+ *tune_state = CT_TUNER_STEP_0;
+ }
+
+ return ret;
+}
+
+static int dib0090_get_temperature(struct dib0090_state *state, enum frontend_tune_state *tune_state)
+{
+ int ret = 15;
+ s16 val;
+
+ switch (*tune_state) {
+ case CT_TUNER_START:
+ state->wbdmux = dib0090_read_reg(state, 0x10);
+ dib0090_write_reg(state, 0x10, (state->wbdmux & ~(0xff << 3)) | (0x8 << 3));
+
+ state->bias = dib0090_read_reg(state, 0x13);
+ dib0090_write_reg(state, 0x13, state->bias | (0x3 << 8));
+
+ *tune_state = CT_TUNER_STEP_0;
+ /* wait for the WBDMUX to switch and for the ADC to sample */
+ break;
+
+ case CT_TUNER_STEP_0:
+ state->adc_diff = dib0090_get_slow_adc_val(state);
+ dib0090_write_reg(state, 0x13, (state->bias & ~(0x3 << 8)) | (0x2 << 8));
+ *tune_state = CT_TUNER_STEP_1;
+ break;
+
+ case CT_TUNER_STEP_1:
+ val = dib0090_get_slow_adc_val(state);
+ state->temperature = ((s16) ((val - state->adc_diff) * 180) >> 8) + 55;
+
+ dprintk("temperature: %d C", state->temperature - 30);
+
+ *tune_state = CT_TUNER_STEP_2;
+ break;
+
+ case CT_TUNER_STEP_2:
+ dib0090_write_reg(state, 0x13, state->bias);
+ dib0090_write_reg(state, 0x10, state->wbdmux); /* write back original WBDMUX */
+
+ *tune_state = CT_TUNER_START;
+ state->calibrate &= ~TEMP_CAL;
+ if (state->config->analog_output == 0)
+ dib0090_write_reg(state, 0x23, dib0090_read_reg(state, 0x23) | (1 << 14));
+
+ break;
+
+ default:
+ ret = 0;
+ break;
+ }
+ return ret;
+}
+
#define WBD 0x781 /* 1 1 1 1 0000 0 0 1 */
static int dib0090_tune(struct dvb_frontend *fe)
{
@@ -1188,87 +2033,131 @@ static int dib0090_tune(struct dvb_frontend *fe)
const struct dib0090_pll *pll = state->current_pll_table_index;
enum frontend_tune_state *tune_state = &state->tune_state;
- u32 rf;
- u16 lo4 = 0xe900, lo5, lo6, Den;
+ u16 lo5, lo6, Den, tmp;
u32 FBDiv, Rest, FREF, VCOF_kHz = 0;
- u16 tmp, adc;
- int8_t step_sign;
int ret = 10; /* 1ms is the default delay most of the time */
u8 c, i;
- state->current_band = (u8) BAND_OF_FREQUENCY(fe->dtv_property_cache.frequency / 1000);
- rf = fe->dtv_property_cache.frequency / 1000 + (state->current_band ==
- BAND_UHF ? state->config->freq_offset_khz_uhf : state->config->freq_offset_khz_vhf);
- /* in any case we first need to do a reset if needed */
- if (state->reset & 0x1)
- return dib0090_dc_offset_calibration(state, tune_state);
- else if (state->reset & 0x2)
- return dib0090_wbd_calibration(state, tune_state);
-
- /************************* VCO ***************************/
+ /************************* VCO ***************************/
/* Default values for FG */
/* from these are needed : */
/* Cp,HFdiv,VCOband,SD,Num,Den,FB and REFDiv */
-#ifdef CONFIG_SYS_ISDBT
- if (state->fe->dtv_property_cache.delivery_system == SYS_ISDBT && state->fe->dtv_property_cache.isdbt_sb_mode == 1)
- rf += 850;
-#endif
+ /* in any case we first need to do a calibration if needed */
+ if (*tune_state == CT_TUNER_START) {
+ /* deactivate DataTX before some calibrations */
+ if (state->calibrate & (DC_CAL | TEMP_CAL | WBD_CAL))
+ dib0090_write_reg(state, 0x23, dib0090_read_reg(state, 0x23) & ~(1 << 14));
+ else
+ /* Activate DataTX in case a calibration has been done before */
+ if (state->config->analog_output == 0)
+ dib0090_write_reg(state, 0x23, dib0090_read_reg(state, 0x23) | (1 << 14));
+ }
- if (state->current_rf != rf) {
- state->tuner_is_tuned = 0;
+ if (state->calibrate & DC_CAL)
+ return dib0090_dc_offset_calibration(state, tune_state);
+ else if (state->calibrate & WBD_CAL) {
+ if (state->current_rf == 0)
+ state->current_rf = state->fe->dtv_property_cache.frequency / 1000;
+ return dib0090_wbd_calibration(state, tune_state);
+ } else if (state->calibrate & TEMP_CAL)
+ return dib0090_get_temperature(state, tune_state);
+ else if (state->calibrate & CAPTRIM_CAL)
+ return dib0090_captrim_search(state, tune_state);
- tune = dib0090_tuning_table;
+ if (*tune_state == CT_TUNER_START) {
+ /* if soc and AGC pwm control, disengage mux to be able to R/W access to 0x01 register to set the right filter (cutoff_freq_select) during the tune sequence, otherwise, SOC SERPAR error when accessing to 0x01 */
+ if (state->config->use_pwm_agc && state->identity.in_soc) {
+ tmp = dib0090_read_reg(state, 0x39);
+ if ((tmp >> 10) & 0x1)
+ dib0090_write_reg(state, 0x39, tmp & ~(1 << 10));
+ }
- tmp = (state->revision >> 5) & 0x7;
- if (tmp == 0x4 || tmp == 0x7) {
- /* CBAND tuner version for VHF */
- if (state->current_band == BAND_FM || state->current_band == BAND_VHF) {
- /* Force CBAND */
- state->current_band = BAND_CBAND;
- tune = dib0090_tuning_table_fm_vhf_on_cband;
+ state->current_band = (u8) BAND_OF_FREQUENCY(state->fe->dtv_property_cache.frequency / 1000);
+ state->rf_request =
+ state->fe->dtv_property_cache.frequency / 1000 + (state->current_band ==
+ BAND_UHF ? state->config->freq_offset_khz_uhf : state->config->
+ freq_offset_khz_vhf);
+
+ /* in ISDB-T 1seg we shift tuning frequency */
+ if ((state->fe->dtv_property_cache.delivery_system == SYS_ISDBT && state->fe->dtv_property_cache.isdbt_sb_mode == 1
+ && state->fe->dtv_property_cache.isdbt_partial_reception == 0)) {
+ const struct dib0090_low_if_offset_table *LUT_offset = state->config->low_if;
+ u8 found_offset = 0;
+ u32 margin_khz = 100;
+
+ if (LUT_offset != NULL) {
+ while (LUT_offset->RF_freq != 0xffff) {
+ if (((state->rf_request > (LUT_offset->RF_freq - margin_khz))
+ && (state->rf_request < (LUT_offset->RF_freq + margin_khz)))
+ && LUT_offset->std == state->fe->dtv_property_cache.delivery_system) {
+ state->rf_request += LUT_offset->offset_khz;
+ found_offset = 1;
+ break;
+ }
+ LUT_offset++;
+ }
}
+
+ if (found_offset == 0)
+ state->rf_request += 400;
}
+ if (state->current_rf != state->rf_request || (state->current_standard != state->fe->dtv_property_cache.delivery_system)) {
+ state->tuner_is_tuned = 0;
+ state->current_rf = 0;
+ state->current_standard = 0;
- pll = dib0090_pll_table;
- /* Look for the interval */
- while (rf > tune->max_freq)
- tune++;
- while (rf > pll->max_freq)
- pll++;
- state->current_tune_table_index = tune;
- state->current_pll_table_index = pll;
- }
+ tune = dib0090_tuning_table;
+ if (state->identity.p1g)
+ tune = dib0090_p1g_tuning_table;
- if (*tune_state == CT_TUNER_START) {
+ tmp = (state->identity.version >> 5) & 0x7;
- if (state->tuner_is_tuned == 0)
- state->current_rf = 0;
+ if (state->identity.in_soc) {
+ if (state->config->force_cband_input) { /* Use the CBAND input for all band */
+ if (state->current_band & BAND_CBAND || state->current_band & BAND_FM || state->current_band & BAND_VHF
+ || state->current_band & BAND_UHF) {
+ state->current_band = BAND_CBAND;
+ tune = dib0090_tuning_table_cband_7090;
+ }
+ } else { /* Use the CBAND input for all band under UHF */
+ if (state->current_band & BAND_CBAND || state->current_band & BAND_FM || state->current_band & BAND_VHF) {
+ state->current_band = BAND_CBAND;
+ tune = dib0090_tuning_table_cband_7090;
+ }
+ }
+ } else
+ if (tmp == 0x4 || tmp == 0x7) {
+ /* CBAND tuner version for VHF */
+ if (state->current_band == BAND_FM || state->current_band == BAND_CBAND || state->current_band == BAND_VHF) {
+ state->current_band = BAND_CBAND; /* Force CBAND */
+
+ tune = dib0090_tuning_table_fm_vhf_on_cband;
+ if (state->identity.p1g)
+ tune = dib0090_p1g_tuning_table_fm_vhf_on_cband;
+ }
+ }
- if (state->current_rf != rf) {
+ pll = dib0090_pll_table;
+ if (state->identity.p1g)
+ pll = dib0090_p1g_pll_table;
- dib0090_write_reg(state, 0x0b, 0xb800 | (tune->switch_trim));
+ /* Look for the interval */
+ while (state->rf_request > tune->max_freq)
+ tune++;
+ while (state->rf_request > pll->max_freq)
+ pll++;
- /* external loop filter, otherwise:
- * lo5 = (0 << 15) | (0 << 12) | (0 << 11) | (3 << 9) | (4 << 6) | (3 << 4) | 4;
- * lo6 = 0x0e34 */
- if (pll->vco_band)
- lo5 = 0x049e;
- else if (state->config->analog_output)
- lo5 = 0x041d;
- else
- lo5 = 0x041c;
-
- lo5 |= (pll->hfdiv_code << 11) | (pll->vco_band << 7); /* bit 15 is the split to the slave, we do not do it here */
+ state->current_tune_table_index = tune;
+ state->current_pll_table_index = pll;
- if (!state->config->io.pll_int_loop_filt)
- lo6 = 0xff28;
- else
- lo6 = (state->config->io.pll_int_loop_filt << 3);
+ dib0090_write_reg(state, 0x0b, 0xb800 | (tune->switch_trim));
- VCOF_kHz = (pll->hfdiv * rf) * 2;
+ VCOF_kHz = (pll->hfdiv * state->rf_request) * 2;
FREF = state->config->io.clock_khz;
+ if (state->config->fref_clock_ratio != 0)
+ FREF /= state->config->fref_clock_ratio;
FBDiv = (VCOF_kHz / pll->topresc / FREF);
Rest = (VCOF_kHz / pll->topresc) - FBDiv * FREF;
@@ -1283,144 +2172,132 @@ static int dib0090_tune(struct dvb_frontend *fe)
} else if (Rest > (FREF - 2 * LPF))
Rest = FREF - 2 * LPF;
Rest = (Rest * 6528) / (FREF / 10);
+ state->rest = Rest;
- Den = 1;
+ /* external loop filter, otherwise:
+ * lo5 = (0 << 15) | (0 << 12) | (0 << 11) | (3 << 9) | (4 << 6) | (3 << 4) | 4;
+ * lo6 = 0x0e34 */
+
+ if (Rest == 0) {
+ if (pll->vco_band)
+ lo5 = 0x049f;
+ else
+ lo5 = 0x041f;
+ } else {
+ if (pll->vco_band)
+ lo5 = 0x049e;
+ else if (state->config->analog_output)
+ lo5 = 0x041d;
+ else
+ lo5 = 0x041c;
+ }
+
+ if (state->identity.p1g) { /* Bias is done automatically in P1G */
+ if (state->identity.in_soc) {
+ if (state->identity.version == SOC_8090_P1G_11R1)
+ lo5 = 0x46f;
+ else
+ lo5 = 0x42f;
+ } else
+ lo5 = 0x42c;
+ }
+
+ lo5 |= (pll->hfdiv_code << 11) | (pll->vco_band << 7); /* bit 15 is the split to the slave, we do not do it here */
- dprintk(" ***** ******* Rest value = %d", Rest);
+ if (!state->config->io.pll_int_loop_filt) {
+ if (state->identity.in_soc)
+ lo6 = 0xff98;
+ else if (state->identity.p1g || (Rest == 0))
+ lo6 = 0xfff8;
+ else
+ lo6 = 0xff28;
+ } else
+ lo6 = (state->config->io.pll_int_loop_filt << 3);
+
+ Den = 1;
if (Rest > 0) {
if (state->config->analog_output)
lo6 |= (1 << 2) | 2;
- else
- lo6 |= (1 << 2) | 1;
+ else {
+ if (state->identity.in_soc)
+ lo6 |= (1 << 2) | 2;
+ else
+ lo6 |= (1 << 2) | 2;
+ }
Den = 255;
}
-#ifdef CONFIG_BAND_SBAND
- if (state->current_band == BAND_SBAND)
- lo6 &= 0xfffb;
-#endif
-
dib0090_write_reg(state, 0x15, (u16) FBDiv);
-
- dib0090_write_reg(state, 0x16, (Den << 8) | 1);
-
+ if (state->config->fref_clock_ratio != 0)
+ dib0090_write_reg(state, 0x16, (Den << 8) | state->config->fref_clock_ratio);
+ else
+ dib0090_write_reg(state, 0x16, (Den << 8) | 1);
dib0090_write_reg(state, 0x17, (u16) Rest);
-
dib0090_write_reg(state, 0x19, lo5);
-
dib0090_write_reg(state, 0x1c, lo6);
lo6 = tune->tuner_enable;
if (state->config->analog_output)
lo6 = (lo6 & 0xff9f) | 0x2;
- dib0090_write_reg(state, 0x24, lo6 | EN_LO
-#ifdef CONFIG_DIB0090_USE_PWM_AGC
- | state->config->use_pwm_agc * EN_CRYSTAL
-#endif
- );
-
- state->current_rf = rf;
-
- /* prepare a complete captrim */
- state->step = state->captrim = state->fcaptrim = 64;
-
- } else { /* we are already tuned to this frequency - the configuration is correct */
+ dib0090_write_reg(state, 0x24, lo6 | EN_LO | state->config->use_pwm_agc * EN_CRYSTAL);
- /* do a minimal captrim even if the frequency has not changed */
- state->step = 4;
- state->captrim = state->fcaptrim = dib0090_read_reg(state, 0x18) & 0x7f;
}
- state->adc_diff = 3000;
-
- dib0090_write_reg(state, 0x10, 0x2B1);
- dib0090_write_reg(state, 0x1e, 0x0032);
+ state->current_rf = state->rf_request;
+ state->current_standard = state->fe->dtv_property_cache.delivery_system;
ret = 20;
- *tune_state = CT_TUNER_STEP_1;
- } else if (*tune_state == CT_TUNER_STEP_0) {
- /* nothing */
- } else if (*tune_state == CT_TUNER_STEP_1) {
- state->step /= 2;
- dib0090_write_reg(state, 0x18, lo4 | state->captrim);
- *tune_state = CT_TUNER_STEP_2;
- } else if (*tune_state == CT_TUNER_STEP_2) {
+ state->calibrate = CAPTRIM_CAL; /* captrim serach now */
+ }
- adc = dib0090_read_reg(state, 0x1d);
- dprintk("FE %d CAPTRIM=%d; ADC = %d (ADC) & %dmV", (u32) fe->id, (u32) state->captrim, (u32) adc,
- (u32) (adc) * (u32) 1800 / (u32) 1024);
+ else if (*tune_state == CT_TUNER_STEP_0) { /* Warning : because of captrim cal, if you change this step, change it also in _cal.c file because it is the step following captrim cal state machine */
+ const struct dib0090_wbd_slope *wbd = state->current_wbd_table;
- if (adc >= 400) {
- adc -= 400;
- step_sign = -1;
- } else {
- adc = 400 - adc;
- step_sign = 1;
- }
+ while (state->current_rf / 1000 > wbd->max_freq)
+ wbd++;
- if (adc < state->adc_diff) {
- dprintk("FE %d CAPTRIM=%d is closer to target (%d/%d)", (u32) fe->id, (u32) state->captrim, (u32) adc, (u32) state->adc_diff);
- state->adc_diff = adc;
- state->fcaptrim = state->captrim;
-
- }
+ dib0090_write_reg(state, 0x1e, 0x07ff);
+ dprintk("Final Captrim: %d", (u32) state->fcaptrim);
+ dprintk("HFDIV code: %d", (u32) pll->hfdiv_code);
+ dprintk("VCO = %d", (u32) pll->vco_band);
+ dprintk("VCOF in kHz: %d ((%d*%d) << 1))", (u32) ((pll->hfdiv * state->rf_request) * 2), (u32) pll->hfdiv, (u32) state->rf_request);
+ dprintk("REFDIV: %d, FREF: %d", (u32) 1, (u32) state->config->io.clock_khz);
+ dprintk("FBDIV: %d, Rest: %d", (u32) dib0090_read_reg(state, 0x15), (u32) dib0090_read_reg(state, 0x17));
+ dprintk("Num: %d, Den: %d, SD: %d", (u32) dib0090_read_reg(state, 0x17), (u32) (dib0090_read_reg(state, 0x16) >> 8),
+ (u32) dib0090_read_reg(state, 0x1c) & 0x3);
- state->captrim += step_sign * state->step;
- if (state->step >= 1)
- *tune_state = CT_TUNER_STEP_1;
- else
- *tune_state = CT_TUNER_STEP_3;
+#define WBD 0x781 /* 1 1 1 1 0000 0 0 1 */
+ c = 4;
+ i = 3;
- ret = 15;
- } else if (*tune_state == CT_TUNER_STEP_3) {
- /*write the final cptrim config */
- dib0090_write_reg(state, 0x18, lo4 | state->fcaptrim);
+ if (wbd->wbd_gain != 0)
+ c = wbd->wbd_gain;
-#ifdef CONFIG_TUNER_DIB0090_CAPTRIM_MEMORY
- state->memory[state->memory_index].cap = state->fcaptrim;
-#endif
+ state->wbdmux = (c << 13) | (i << 11) | (WBD | (state->config->use_pwm_agc << 1));
+ dib0090_write_reg(state, 0x10, state->wbdmux);
- *tune_state = CT_TUNER_STEP_4;
- } else if (*tune_state == CT_TUNER_STEP_4) {
- dib0090_write_reg(state, 0x1e, 0x07ff);
-
- dprintk("FE %d Final Captrim: %d", (u32) fe->id, (u32) state->fcaptrim);
- dprintk("FE %d HFDIV code: %d", (u32) fe->id, (u32) pll->hfdiv_code);
- dprintk("FE %d VCO = %d", (u32) fe->id, (u32) pll->vco_band);
- dprintk("FE %d VCOF in kHz: %d ((%d*%d) << 1))", (u32) fe->id, (u32) ((pll->hfdiv * rf) * 2), (u32) pll->hfdiv, (u32) rf);
- dprintk("FE %d REFDIV: %d, FREF: %d", (u32) fe->id, (u32) 1, (u32) state->config->io.clock_khz);
- dprintk("FE %d FBDIV: %d, Rest: %d", (u32) fe->id, (u32) dib0090_read_reg(state, 0x15), (u32) dib0090_read_reg(state, 0x17));
- dprintk("FE %d Num: %d, Den: %d, SD: %d", (u32) fe->id, (u32) dib0090_read_reg(state, 0x17),
- (u32) (dib0090_read_reg(state, 0x16) >> 8), (u32) dib0090_read_reg(state, 0x1c) & 0x3);
+ if ((tune->tuner_enable == EN_CAB) && state->identity.p1g) {
+ dprintk("P1G : The cable band is selected and lna_tune = %d", tune->lna_tune);
+ dib0090_write_reg(state, 0x09, tune->lna_bias);
+ dib0090_write_reg(state, 0x0b, 0xb800 | (tune->lna_tune << 6) | (tune->switch_trim));
+ } else
+ dib0090_write_reg(state, 0x09, (tune->lna_tune << 5) | tune->lna_bias);
- c = 4;
- i = 3;
-#if defined(CONFIG_BAND_LBAND) || defined(CONFIG_BAND_SBAND)
- if ((state->current_band == BAND_LBAND) || (state->current_band == BAND_SBAND)) {
- c = 2;
- i = 2;
- }
-#endif
- dib0090_write_reg(state, 0x10, (c << 13) | (i << 11) | (WBD
-#ifdef CONFIG_DIB0090_USE_PWM_AGC
- | (state->config->use_pwm_agc << 1)
-#endif
- ));
- dib0090_write_reg(state, 0x09, (tune->lna_tune << 5) | (tune->lna_bias << 0));
dib0090_write_reg(state, 0x0c, tune->v2i);
dib0090_write_reg(state, 0x0d, tune->mix);
dib0090_write_reg(state, 0x0e, tune->load);
+ *tune_state = CT_TUNER_STEP_1;
- *tune_state = CT_TUNER_STEP_5;
- } else if (*tune_state == CT_TUNER_STEP_5) {
-
+ } else if (*tune_state == CT_TUNER_STEP_1) {
/* initialize the lt gain register */
state->rf_lt_def = 0x7c00;
- dib0090_write_reg(state, 0x0f, state->rf_lt_def);
dib0090_set_bandwidth(state);
state->tuner_is_tuned = 1;
+
+ state->calibrate |= WBD_CAL;
+ state->calibrate |= TEMP_CAL;
*tune_state = CT_TUNER_STOP;
} else
ret = FE_CALLBACK_TIME_NEVER;
@@ -1440,6 +2317,7 @@ enum frontend_tune_state dib0090_get_tune_state(struct dvb_frontend *fe)
return state->tune_state;
}
+
EXPORT_SYMBOL(dib0090_get_tune_state);
int dib0090_set_tune_state(struct dvb_frontend *fe, enum frontend_tune_state tune_state)
@@ -1449,6 +2327,7 @@ int dib0090_set_tune_state(struct dvb_frontend *fe, enum frontend_tune_state tun
state->tune_state = tune_state;
return 0;
}
+
EXPORT_SYMBOL(dib0090_set_tune_state);
static int dib0090_get_frequency(struct dvb_frontend *fe, u32 * frequency)
@@ -1462,7 +2341,7 @@ static int dib0090_get_frequency(struct dvb_frontend *fe, u32 * frequency)
static int dib0090_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *p)
{
struct dib0090_state *state = fe->tuner_priv;
- uint32_t ret;
+ u32 ret;
state->tune_state = CT_TUNER_START;
@@ -1492,6 +2371,29 @@ static const struct dvb_tuner_ops dib0090_ops = {
.get_frequency = dib0090_get_frequency,
};
+static const struct dvb_tuner_ops dib0090_fw_ops = {
+ .info = {
+ .name = "DiBcom DiB0090",
+ .frequency_min = 45000000,
+ .frequency_max = 860000000,
+ .frequency_step = 1000,
+ },
+ .release = dib0090_release,
+
+ .init = NULL,
+ .sleep = NULL,
+ .set_params = NULL,
+ .get_frequency = NULL,
+};
+
+static const struct dib0090_wbd_slope dib0090_wbd_table_default[] = {
+ {470, 0, 250, 0, 100, 4},
+ {860, 51, 866, 21, 375, 4},
+ {1700, 0, 800, 0, 850, 4},
+ {2900, 0, 250, 0, 100, 6},
+ {0xFFFF, 0, 0, 0, 0, 0},
+};
+
struct dvb_frontend *dib0090_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, const struct dib0090_config *config)
{
struct dib0090_state *st = kzalloc(sizeof(struct dib0090_state), GFP_KERNEL);
@@ -1503,6 +2405,11 @@ struct dvb_frontend *dib0090_register(struct dvb_frontend *fe, struct i2c_adapte
st->fe = fe;
fe->tuner_priv = st;
+ if (config->wbd == NULL)
+ st->current_wbd_table = dib0090_wbd_table_default;
+ else
+ st->current_wbd_table = config->wbd;
+
if (dib0090_reset(fe) != 0)
goto free_mem;
@@ -1515,8 +2422,34 @@ struct dvb_frontend *dib0090_register(struct dvb_frontend *fe, struct i2c_adapte
fe->tuner_priv = NULL;
return NULL;
}
+
EXPORT_SYMBOL(dib0090_register);
+struct dvb_frontend *dib0090_fw_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, const struct dib0090_config *config)
+{
+ struct dib0090_fw_state *st = kzalloc(sizeof(struct dib0090_fw_state), GFP_KERNEL);
+ if (st == NULL)
+ return NULL;
+
+ st->config = config;
+ st->i2c = i2c;
+ st->fe = fe;
+ fe->tuner_priv = st;
+
+ if (dib0090_fw_reset_digital(fe, st->config) != 0)
+ goto free_mem;
+
+ dprintk("DiB0090 FW: successfully identified");
+ memcpy(&fe->ops.tuner_ops, &dib0090_fw_ops, sizeof(struct dvb_tuner_ops));
+
+ return fe;
+free_mem:
+ kfree(st);
+ fe->tuner_priv = NULL;
+ return NULL;
+}
+EXPORT_SYMBOL(dib0090_fw_register);
+
MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
MODULE_AUTHOR("Olivier Grenie <olivier.grenie@dibcom.fr>");
MODULE_DESCRIPTION("Driver for the DiBcom 0090 base-band RF Tuner");
diff --git a/drivers/media/dvb/frontends/dib0090.h b/drivers/media/dvb/frontends/dib0090.h
index aa7711e88776..13d85244ec16 100644
--- a/drivers/media/dvb/frontends/dib0090.h
+++ b/drivers/media/dvb/frontends/dib0090.h
@@ -27,6 +27,21 @@ struct dib0090_io_config {
u16 pll_int_loop_filt;
};
+struct dib0090_wbd_slope {
+ u16 max_freq; /* for every frequency less than or equal to that field: this information is correct */
+ u16 slope_cold;
+ u16 offset_cold;
+ u16 slope_hot;
+ u16 offset_hot;
+ u8 wbd_gain;
+};
+
+struct dib0090_low_if_offset_table {
+ int std;
+ u32 RF_freq;
+ s32 offset_khz;
+};
+
struct dib0090_config {
struct dib0090_io_config io;
int (*reset) (struct dvb_frontend *, int);
@@ -47,10 +62,20 @@ struct dib0090_config {
u16 wbd_cband_offset;
u8 use_pwm_agc;
u8 clkoutdrive;
+
+ u8 ls_cfg_pad_drv;
+ u8 data_tx_drv;
+
+ u8 in_soc;
+ const struct dib0090_low_if_offset_table *low_if;
+ u8 fref_clock_ratio;
+ u16 force_cband_input;
+ struct dib0090_wbd_slope *wbd;
};
#if defined(CONFIG_DVB_TUNER_DIB0090) || (defined(CONFIG_DVB_TUNER_DIB0090_MODULE) && defined(MODULE))
extern struct dvb_frontend *dib0090_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, const struct dib0090_config *config);
+extern struct dvb_frontend *dib0090_fw_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, const struct dib0090_config *config);
extern void dib0090_dcc_freq(struct dvb_frontend *fe, u8 fast);
extern void dib0090_pwm_gain_reset(struct dvb_frontend *fe);
extern u16 dib0090_get_wbd_offset(struct dvb_frontend *tuner);
@@ -65,6 +90,12 @@ static inline struct dvb_frontend *dib0090_register(struct dvb_frontend *fe, str
return NULL;
}
+static inline struct dvb_frontend *dib0090_fw_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct dib0090_config *config)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+
static inline void dib0090_dcc_freq(struct dvb_frontend *fe, u8 fast)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
diff --git a/drivers/media/dvb/frontends/dib7000p.c b/drivers/media/dvb/frontends/dib7000p.c
index 6aa02cb80733..900af60b9d36 100644
--- a/drivers/media/dvb/frontends/dib7000p.c
+++ b/drivers/media/dvb/frontends/dib7000p.c
@@ -26,24 +26,29 @@ MODULE_PARM_DESC(buggy_sfn_workaround, "Enable work-around for buggy SFNs (defau
#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiB7000P: "); printk(args); printk("\n"); } } while (0)
+struct i2c_device {
+ struct i2c_adapter *i2c_adap;
+ u8 i2c_addr;
+};
+
struct dib7000p_state {
struct dvb_frontend demod;
- struct dib7000p_config cfg;
+ struct dib7000p_config cfg;
u8 i2c_addr;
- struct i2c_adapter *i2c_adap;
+ struct i2c_adapter *i2c_adap;
struct dibx000_i2c_master i2c_master;
u16 wbd_ref;
- u8 current_band;
+ u8 current_band;
u32 current_bandwidth;
struct dibx000_agc_config *current_agc;
u32 timf;
- u8 div_force_off : 1;
- u8 div_state : 1;
+ u8 div_force_off:1;
+ u8 div_state:1;
u16 div_sync_wait;
u8 agc_state;
@@ -51,7 +56,13 @@ struct dib7000p_state {
u16 gpio_dir;
u16 gpio_val;
- u8 sfn_workaround_active :1;
+ u8 sfn_workaround_active:1;
+
+#define SOC7090 0x7090
+ u16 version;
+
+ u16 tuner_enable;
+ struct i2c_adapter dib7090_tuner_adap;
};
enum dib7000p_power_mode {
@@ -60,17 +71,20 @@ enum dib7000p_power_mode {
DIB7000P_POWER_INTERFACE_ONLY,
};
+static int dib7090_set_output_mode(struct dvb_frontend *fe, int mode);
+static int dib7090_set_diversity_in(struct dvb_frontend *fe, int onoff);
+
static u16 dib7000p_read_word(struct dib7000p_state *state, u16 reg)
{
u8 wb[2] = { reg >> 8, reg & 0xff };
u8 rb[2];
struct i2c_msg msg[2] = {
- { .addr = state->i2c_addr >> 1, .flags = 0, .buf = wb, .len = 2 },
- { .addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .buf = rb, .len = 2 },
+ {.addr = state->i2c_addr >> 1, .flags = 0, .buf = wb, .len = 2},
+ {.addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .buf = rb, .len = 2},
};
if (i2c_transfer(state->i2c_adap, msg, 2) != 2)
- dprintk("i2c read error on %d",reg);
+ dprintk("i2c read error on %d", reg);
return (rb[0] << 8) | rb[1];
}
@@ -86,7 +100,8 @@ static int dib7000p_write_word(struct dib7000p_state *state, u16 reg, u16 val)
};
return i2c_transfer(state->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
}
-static void dib7000p_write_tab(struct dib7000p_state *state, u16 *buf)
+
+static void dib7000p_write_tab(struct dib7000p_state *state, u16 * buf)
{
u16 l = 0, r, *n;
n = buf;
@@ -104,54 +119,54 @@ static void dib7000p_write_tab(struct dib7000p_state *state, u16 *buf)
static int dib7000p_set_output_mode(struct dib7000p_state *state, int mode)
{
- int ret = 0;
+ int ret = 0;
u16 outreg, fifo_threshold, smo_mode;
outreg = 0;
fifo_threshold = 1792;
smo_mode = (dib7000p_read_word(state, 235) & 0x0050) | (1 << 1);
- dprintk( "setting output mode for demod %p to %d",
- &state->demod, mode);
+ dprintk("setting output mode for demod %p to %d", &state->demod, mode);
switch (mode) {
- case OUTMODE_MPEG2_PAR_GATED_CLK: // STBs with parallel gated clock
- outreg = (1 << 10); /* 0x0400 */
- break;
- case OUTMODE_MPEG2_PAR_CONT_CLK: // STBs with parallel continues clock
- outreg = (1 << 10) | (1 << 6); /* 0x0440 */
- break;
- case OUTMODE_MPEG2_SERIAL: // STBs with serial input
- outreg = (1 << 10) | (2 << 6) | (0 << 1); /* 0x0480 */
- break;
- case OUTMODE_DIVERSITY:
- if (state->cfg.hostbus_diversity)
- outreg = (1 << 10) | (4 << 6); /* 0x0500 */
- else
- outreg = (1 << 11);
- break;
- case OUTMODE_MPEG2_FIFO: // e.g. USB feeding
- smo_mode |= (3 << 1);
- fifo_threshold = 512;
- outreg = (1 << 10) | (5 << 6);
- break;
- case OUTMODE_ANALOG_ADC:
- outreg = (1 << 10) | (3 << 6);
- break;
- case OUTMODE_HIGH_Z: // disable
- outreg = 0;
- break;
- default:
- dprintk( "Unhandled output_mode passed to be set for demod %p",&state->demod);
- break;
+ case OUTMODE_MPEG2_PAR_GATED_CLK:
+ outreg = (1 << 10); /* 0x0400 */
+ break;
+ case OUTMODE_MPEG2_PAR_CONT_CLK:
+ outreg = (1 << 10) | (1 << 6); /* 0x0440 */
+ break;
+ case OUTMODE_MPEG2_SERIAL:
+ outreg = (1 << 10) | (2 << 6) | (0 << 1); /* 0x0480 */
+ break;
+ case OUTMODE_DIVERSITY:
+ if (state->cfg.hostbus_diversity)
+ outreg = (1 << 10) | (4 << 6); /* 0x0500 */
+ else
+ outreg = (1 << 11);
+ break;
+ case OUTMODE_MPEG2_FIFO:
+ smo_mode |= (3 << 1);
+ fifo_threshold = 512;
+ outreg = (1 << 10) | (5 << 6);
+ break;
+ case OUTMODE_ANALOG_ADC:
+ outreg = (1 << 10) | (3 << 6);
+ break;
+ case OUTMODE_HIGH_Z:
+ outreg = 0;
+ break;
+ default:
+ dprintk("Unhandled output_mode passed to be set for demod %p", &state->demod);
+ break;
}
if (state->cfg.output_mpeg2_in_188_bytes)
- smo_mode |= (1 << 5) ;
+ smo_mode |= (1 << 5);
- ret |= dib7000p_write_word(state, 235, smo_mode);
- ret |= dib7000p_write_word(state, 236, fifo_threshold); /* synchronous fread */
- ret |= dib7000p_write_word(state, 1286, outreg); /* P_Div_active */
+ ret |= dib7000p_write_word(state, 235, smo_mode);
+ ret |= dib7000p_write_word(state, 236, fifo_threshold); /* synchronous fread */
+ if (state->version != SOC7090)
+ ret |= dib7000p_write_word(state, 1286, outreg); /* P_Div_active */
return ret;
}
@@ -161,13 +176,13 @@ static int dib7000p_set_diversity_in(struct dvb_frontend *demod, int onoff)
struct dib7000p_state *state = demod->demodulator_priv;
if (state->div_force_off) {
- dprintk( "diversity combination deactivated - forced by COFDM parameters");
+ dprintk("diversity combination deactivated - forced by COFDM parameters");
onoff = 0;
dib7000p_write_word(state, 207, 0);
} else
dib7000p_write_word(state, 207, (state->div_sync_wait << 4) | (1 << 2) | (2 << 0));
- state->div_state = (u8)onoff;
+ state->div_state = (u8) onoff;
if (onoff) {
dib7000p_write_word(state, 204, 6);
@@ -184,37 +199,48 @@ static int dib7000p_set_diversity_in(struct dvb_frontend *demod, int onoff)
static int dib7000p_set_power_mode(struct dib7000p_state *state, enum dib7000p_power_mode mode)
{
/* by default everything is powered off */
- u16 reg_774 = 0xffff, reg_775 = 0xffff, reg_776 = 0x0007, reg_899 = 0x0003,
- reg_1280 = (0xfe00) | (dib7000p_read_word(state, 1280) & 0x01ff);
+ u16 reg_774 = 0x3fff, reg_775 = 0xffff, reg_776 = 0x0007, reg_899 = 0x0003, reg_1280 = (0xfe00) | (dib7000p_read_word(state, 1280) & 0x01ff);
/* now, depending on the requested mode, we power on */
switch (mode) {
/* power up everything in the demod */
- case DIB7000P_POWER_ALL:
- reg_774 = 0x0000; reg_775 = 0x0000; reg_776 = 0x0; reg_899 = 0x0; reg_1280 &= 0x01ff;
- break;
-
- case DIB7000P_POWER_ANALOG_ADC:
- /* dem, cfg, iqc, sad, agc */
- reg_774 &= ~((1 << 15) | (1 << 14) | (1 << 11) | (1 << 10) | (1 << 9));
- /* nud */
- reg_776 &= ~((1 << 0));
- /* Dout */
+ case DIB7000P_POWER_ALL:
+ reg_774 = 0x0000;
+ reg_775 = 0x0000;
+ reg_776 = 0x0;
+ reg_899 = 0x0;
+ if (state->version == SOC7090)
+ reg_1280 &= 0x001f;
+ else
+ reg_1280 &= 0x01ff;
+ break;
+
+ case DIB7000P_POWER_ANALOG_ADC:
+ /* dem, cfg, iqc, sad, agc */
+ reg_774 &= ~((1 << 15) | (1 << 14) | (1 << 11) | (1 << 10) | (1 << 9));
+ /* nud */
+ reg_776 &= ~((1 << 0));
+ /* Dout */
+ if (state->version != SOC7090)
reg_1280 &= ~((1 << 11));
- /* fall through wanted to enable the interfaces */
+ reg_1280 &= ~(1 << 6);
+ /* fall through wanted to enable the interfaces */
/* just leave power on the control-interfaces: GPIO and (I2C or SDIO) */
- case DIB7000P_POWER_INTERFACE_ONLY: /* TODO power up either SDIO or I2C */
+ case DIB7000P_POWER_INTERFACE_ONLY: /* TODO power up either SDIO or I2C */
+ if (state->version == SOC7090)
+ reg_1280 &= ~((1 << 7) | (1 << 5));
+ else
reg_1280 &= ~((1 << 14) | (1 << 13) | (1 << 12) | (1 << 10));
- break;
+ break;
/* TODO following stuff is just converted from the dib7000-driver - check when is used what */
}
- dib7000p_write_word(state, 774, reg_774);
- dib7000p_write_word(state, 775, reg_775);
- dib7000p_write_word(state, 776, reg_776);
- dib7000p_write_word(state, 899, reg_899);
+ dib7000p_write_word(state, 774, reg_774);
+ dib7000p_write_word(state, 775, reg_775);
+ dib7000p_write_word(state, 776, reg_776);
+ dib7000p_write_word(state, 899, reg_899);
dib7000p_write_word(state, 1280, reg_1280);
return 0;
@@ -222,40 +248,57 @@ static int dib7000p_set_power_mode(struct dib7000p_state *state, enum dib7000p_p
static void dib7000p_set_adc_state(struct dib7000p_state *state, enum dibx000_adc_states no)
{
- u16 reg_908 = dib7000p_read_word(state, 908),
- reg_909 = dib7000p_read_word(state, 909);
+ u16 reg_908 = dib7000p_read_word(state, 908), reg_909 = dib7000p_read_word(state, 909);
+ u16 reg;
switch (no) {
- case DIBX000_SLOW_ADC_ON:
+ case DIBX000_SLOW_ADC_ON:
+ if (state->version == SOC7090) {
+ reg = dib7000p_read_word(state, 1925);
+
+ dib7000p_write_word(state, 1925, reg | (1 << 4) | (1 << 2)); /* en_slowAdc = 1 & reset_sladc = 1 */
+
+ reg = dib7000p_read_word(state, 1925); /* read acces to make it works... strange ... */
+ msleep(200);
+ dib7000p_write_word(state, 1925, reg & ~(1 << 4)); /* en_slowAdc = 1 & reset_sladc = 0 */
+
+ reg = dib7000p_read_word(state, 72) & ~((0x3 << 14) | (0x3 << 12));
+ dib7000p_write_word(state, 72, reg | (1 << 14) | (3 << 12) | 524); /* ref = Vin1 => Vbg ; sel = Vin0 or Vin3 ; (Vin2 = Vcm) */
+ } else {
reg_909 |= (1 << 1) | (1 << 0);
dib7000p_write_word(state, 909, reg_909);
reg_909 &= ~(1 << 1);
- break;
+ }
+ break;
- case DIBX000_SLOW_ADC_OFF:
- reg_909 |= (1 << 1) | (1 << 0);
- break;
+ case DIBX000_SLOW_ADC_OFF:
+ if (state->version == SOC7090) {
+ reg = dib7000p_read_word(state, 1925);
+ dib7000p_write_word(state, 1925, (reg & ~(1 << 2)) | (1 << 4)); /* reset_sladc = 1 en_slowAdc = 0 */
+ } else
+ reg_909 |= (1 << 1) | (1 << 0);
+ break;
- case DIBX000_ADC_ON:
- reg_908 &= 0x0fff;
- reg_909 &= 0x0003;
- break;
+ case DIBX000_ADC_ON:
+ reg_908 &= 0x0fff;
+ reg_909 &= 0x0003;
+ break;
- case DIBX000_ADC_OFF: // leave the VBG voltage on
- reg_908 |= (1 << 14) | (1 << 13) | (1 << 12);
- reg_909 |= (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2);
- break;
+ case DIBX000_ADC_OFF:
+ reg_908 |= (1 << 14) | (1 << 13) | (1 << 12);
+ reg_909 |= (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2);
+ break;
- case DIBX000_VBG_ENABLE:
- reg_908 &= ~(1 << 15);
- break;
+ case DIBX000_VBG_ENABLE:
+ reg_908 &= ~(1 << 15);
+ break;
- case DIBX000_VBG_DISABLE:
- reg_908 |= (1 << 15);
- break;
+ case DIBX000_VBG_DISABLE:
+ reg_908 |= (1 << 15);
+ break;
- default:
- break;
+ default:
+ break;
}
// dprintk( "908: %x, 909: %x\n", reg_908, reg_909);
@@ -275,17 +318,17 @@ static int dib7000p_set_bandwidth(struct dib7000p_state *state, u32 bw)
state->current_bandwidth = bw;
if (state->timf == 0) {
- dprintk( "using default timf");
+ dprintk("using default timf");
timf = state->cfg.bw->timf;
} else {
- dprintk( "using updated timf");
+ dprintk("using updated timf");
timf = state->timf;
}
timf = timf * (bw / 50) / 160;
dib7000p_write_word(state, 23, (u16) ((timf >> 16) & 0xffff));
- dib7000p_write_word(state, 24, (u16) ((timf ) & 0xffff));
+ dib7000p_write_word(state, 24, (u16) ((timf) & 0xffff));
return 0;
}
@@ -293,9 +336,12 @@ static int dib7000p_set_bandwidth(struct dib7000p_state *state, u32 bw)
static int dib7000p_sad_calib(struct dib7000p_state *state)
{
/* internal */
-// dib7000p_write_word(state, 72, (3 << 14) | (1 << 12) | (524 << 0)); // sampling clock of the SAD is writting in set_bandwidth
dib7000p_write_word(state, 73, (0 << 1) | (0 << 0));
- dib7000p_write_word(state, 74, 776); // 0.625*3.3 / 4096
+
+ if (state->version == SOC7090)
+ dib7000p_write_word(state, 74, 2048);
+ else
+ dib7000p_write_word(state, 74, 776);
/* do the calibration */
dib7000p_write_word(state, 73, (1 << 0));
@@ -314,37 +360,91 @@ int dib7000p_set_wbd_ref(struct dvb_frontend *demod, u16 value)
state->wbd_ref = value;
return dib7000p_write_word(state, 105, (dib7000p_read_word(state, 105) & 0xf000) | value);
}
-
EXPORT_SYMBOL(dib7000p_set_wbd_ref);
+
static void dib7000p_reset_pll(struct dib7000p_state *state)
{
struct dibx000_bandwidth_config *bw = &state->cfg.bw[0];
u16 clk_cfg0;
- /* force PLL bypass */
- clk_cfg0 = (1 << 15) | ((bw->pll_ratio & 0x3f) << 9) |
- (bw->modulo << 7) | (bw->ADClkSrc << 6) | (bw->IO_CLK_en_core << 5) |
- (bw->bypclk_div << 2) | (bw->enable_refdiv << 1) | (0 << 0);
+ if (state->version == SOC7090) {
+ dib7000p_write_word(state, 1856, (!bw->pll_reset << 13) | (bw->pll_range << 12) | (bw->pll_ratio << 6) | (bw->pll_prediv));
+
+ while (((dib7000p_read_word(state, 1856) >> 15) & 0x1) != 1)
+ ;
- dib7000p_write_word(state, 900, clk_cfg0);
+ dib7000p_write_word(state, 1857, dib7000p_read_word(state, 1857) | (!bw->pll_bypass << 15));
+ } else {
+ /* force PLL bypass */
+ clk_cfg0 = (1 << 15) | ((bw->pll_ratio & 0x3f) << 9) |
+ (bw->modulo << 7) | (bw->ADClkSrc << 6) | (bw->IO_CLK_en_core << 5) | (bw->bypclk_div << 2) | (bw->enable_refdiv << 1) | (0 << 0);
+
+ dib7000p_write_word(state, 900, clk_cfg0);
- /* P_pll_cfg */
- dib7000p_write_word(state, 903, (bw->pll_prediv << 5) | (((bw->pll_ratio >> 6) & 0x3) << 3) | (bw->pll_range << 1) | bw->pll_reset);
- clk_cfg0 = (bw->pll_bypass << 15) | (clk_cfg0 & 0x7fff);
- dib7000p_write_word(state, 900, clk_cfg0);
+ /* P_pll_cfg */
+ dib7000p_write_word(state, 903, (bw->pll_prediv << 5) | (((bw->pll_ratio >> 6) & 0x3) << 3) | (bw->pll_range << 1) | bw->pll_reset);
+ clk_cfg0 = (bw->pll_bypass << 15) | (clk_cfg0 & 0x7fff);
+ dib7000p_write_word(state, 900, clk_cfg0);
+ }
- dib7000p_write_word(state, 18, (u16) (((bw->internal*1000) >> 16) & 0xffff));
- dib7000p_write_word(state, 19, (u16) ( (bw->internal*1000 ) & 0xffff));
- dib7000p_write_word(state, 21, (u16) ( (bw->ifreq >> 16) & 0xffff));
- dib7000p_write_word(state, 22, (u16) ( (bw->ifreq ) & 0xffff));
+ dib7000p_write_word(state, 18, (u16) (((bw->internal * 1000) >> 16) & 0xffff));
+ dib7000p_write_word(state, 19, (u16) ((bw->internal * 1000) & 0xffff));
+ dib7000p_write_word(state, 21, (u16) ((bw->ifreq >> 16) & 0xffff));
+ dib7000p_write_word(state, 22, (u16) ((bw->ifreq) & 0xffff));
dib7000p_write_word(state, 72, bw->sad_cfg);
}
+static u32 dib7000p_get_internal_freq(struct dib7000p_state *state)
+{
+ u32 internal = (u32) dib7000p_read_word(state, 18) << 16;
+ internal |= (u32) dib7000p_read_word(state, 19);
+ internal /= 1000;
+
+ return internal;
+}
+
+int dib7000p_update_pll(struct dvb_frontend *fe, struct dibx000_bandwidth_config *bw)
+{
+ struct dib7000p_state *state = fe->demodulator_priv;
+ u16 reg_1857, reg_1856 = dib7000p_read_word(state, 1856);
+ u8 loopdiv, prediv;
+ u32 internal, xtal;
+
+ /* get back old values */
+ prediv = reg_1856 & 0x3f;
+ loopdiv = (reg_1856 >> 6) & 0x3f;
+
+ if ((bw != NULL) && (bw->pll_prediv != prediv || bw->pll_ratio != loopdiv)) {
+ dprintk("Updating pll (prediv: old = %d new = %d ; loopdiv : old = %d new = %d)", prediv, bw->pll_prediv, loopdiv, bw->pll_ratio);
+ reg_1856 &= 0xf000;
+ reg_1857 = dib7000p_read_word(state, 1857);
+ dib7000p_write_word(state, 1857, reg_1857 & ~(1 << 15));
+
+ dib7000p_write_word(state, 1856, reg_1856 | ((bw->pll_ratio & 0x3f) << 6) | (bw->pll_prediv & 0x3f));
+
+ /* write new system clk into P_sec_len */
+ internal = dib7000p_get_internal_freq(state);
+ xtal = (internal / loopdiv) * prediv;
+ internal = 1000 * (xtal / bw->pll_prediv) * bw->pll_ratio; /* new internal */
+ dib7000p_write_word(state, 18, (u16) ((internal >> 16) & 0xffff));
+ dib7000p_write_word(state, 19, (u16) (internal & 0xffff));
+
+ dib7000p_write_word(state, 1857, reg_1857 | (1 << 15));
+
+ while (((dib7000p_read_word(state, 1856) >> 15) & 0x1) != 1)
+ dprintk("Waiting for PLL to lock");
+
+ return 0;
+ }
+ return -EIO;
+}
+EXPORT_SYMBOL(dib7000p_update_pll);
+
static int dib7000p_reset_gpio(struct dib7000p_state *st)
{
/* reset the GPIOs */
- dprintk( "gpio dir: %x: val: %x, pwm_pos: %x",st->gpio_dir, st->gpio_val,st->cfg.gpio_pwm_pos);
+ dprintk("gpio dir: %x: val: %x, pwm_pos: %x", st->gpio_dir, st->gpio_val, st->cfg.gpio_pwm_pos);
dib7000p_write_word(st, 1029, st->gpio_dir);
dib7000p_write_word(st, 1030, st->gpio_val);
@@ -360,13 +460,13 @@ static int dib7000p_reset_gpio(struct dib7000p_state *st)
static int dib7000p_cfg_gpio(struct dib7000p_state *st, u8 num, u8 dir, u8 val)
{
st->gpio_dir = dib7000p_read_word(st, 1029);
- st->gpio_dir &= ~(1 << num); /* reset the direction bit */
- st->gpio_dir |= (dir & 0x1) << num; /* set the new direction */
+ st->gpio_dir &= ~(1 << num); /* reset the direction bit */
+ st->gpio_dir |= (dir & 0x1) << num; /* set the new direction */
dib7000p_write_word(st, 1029, st->gpio_dir);
st->gpio_val = dib7000p_read_word(st, 1030);
- st->gpio_val &= ~(1 << num); /* reset the direction bit */
- st->gpio_val |= (val & 0x01) << num; /* set the new value */
+ st->gpio_val &= ~(1 << num); /* reset the direction bit */
+ st->gpio_val |= (val & 0x01) << num; /* set the new value */
dib7000p_write_word(st, 1030, st->gpio_val);
return 0;
@@ -377,96 +477,94 @@ int dib7000p_set_gpio(struct dvb_frontend *demod, u8 num, u8 dir, u8 val)
struct dib7000p_state *state = demod->demodulator_priv;
return dib7000p_cfg_gpio(state, num, dir, val);
}
-
EXPORT_SYMBOL(dib7000p_set_gpio);
-static u16 dib7000p_defaults[] =
-{
+static u16 dib7000p_defaults[] = {
// auto search configuration
3, 2,
- 0x0004,
- 0x1000,
- 0x0814, /* Equal Lock */
+ 0x0004,
+ 0x1000,
+ 0x0814, /* Equal Lock */
12, 6,
- 0x001b,
- 0x7740,
- 0x005b,
- 0x8d80,
- 0x01c9,
- 0xc380,
- 0x0000,
- 0x0080,
- 0x0000,
- 0x0090,
- 0x0001,
- 0xd4c0,
+ 0x001b,
+ 0x7740,
+ 0x005b,
+ 0x8d80,
+ 0x01c9,
+ 0xc380,
+ 0x0000,
+ 0x0080,
+ 0x0000,
+ 0x0090,
+ 0x0001,
+ 0xd4c0,
1, 26,
- 0x6680, // P_timf_alpha=6, P_corm_alpha=6, P_corm_thres=128 default: 6,4,26
+ 0x6680,
/* set ADC level to -16 */
11, 79,
- (1 << 13) - 825 - 117,
- (1 << 13) - 837 - 117,
- (1 << 13) - 811 - 117,
- (1 << 13) - 766 - 117,
- (1 << 13) - 737 - 117,
- (1 << 13) - 693 - 117,
- (1 << 13) - 648 - 117,
- (1 << 13) - 619 - 117,
- (1 << 13) - 575 - 117,
- (1 << 13) - 531 - 117,
- (1 << 13) - 501 - 117,
+ (1 << 13) - 825 - 117,
+ (1 << 13) - 837 - 117,
+ (1 << 13) - 811 - 117,
+ (1 << 13) - 766 - 117,
+ (1 << 13) - 737 - 117,
+ (1 << 13) - 693 - 117,
+ (1 << 13) - 648 - 117,
+ (1 << 13) - 619 - 117,
+ (1 << 13) - 575 - 117,
+ (1 << 13) - 531 - 117,
+ (1 << 13) - 501 - 117,
1, 142,
- 0x0410, // P_palf_filter_on=1, P_palf_filter_freeze=0, P_palf_alpha_regul=16
+ 0x0410,
/* disable power smoothing */
8, 145,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
1, 154,
- 1 << 13, // P_fft_freq_dir=1, P_fft_nb_to_cut=0
+ 1 << 13,
1, 168,
- 0x0ccd, // P_pha3_thres, default 0x3000
-
-// 1, 169,
-// 0x0010, // P_cti_use_cpe=0, P_cti_use_prog=0, P_cti_win_len=16, default: 0x0010
+ 0x0ccd,
1, 183,
- 0x200f, // P_cspu_regul=512, P_cspu_win_cut=15, default: 0x2005
+ 0x200f,
+
+ 1, 212,
+ 0x169,
5, 187,
- 0x023d, // P_adp_regul_cnt=573, default: 410
- 0x00a4, // P_adp_noise_cnt=
- 0x00a4, // P_adp_regul_ext
- 0x7ff0, // P_adp_noise_ext
- 0x3ccc, // P_adp_fil
+ 0x023d,
+ 0x00a4,
+ 0x00a4,
+ 0x7ff0,
+ 0x3ccc,
1, 198,
- 0x800, // P_equal_thres_wgn
+ 0x800,
1, 222,
- 0x0010, // P_fec_ber_rs_len=2
+ 0x0010,
1, 235,
- 0x0062, // P_smo_mode, P_smo_rs_discard, P_smo_fifo_flush, P_smo_pid_parse, P_smo_error_discard
+ 0x0062,
2, 901,
- 0x0006, // P_clk_cfg1
- (3 << 10) | (1 << 6), // P_divclksel=3 P_divbitsel=1
+ 0x0006,
+ (3 << 10) | (1 << 6),
1, 905,
- 0x2c8e, // Tuner IO bank: max drive (14mA) + divout pads max drive
+ 0x2c8e,
0,
};
@@ -475,51 +573,64 @@ static int dib7000p_demod_reset(struct dib7000p_state *state)
{
dib7000p_set_power_mode(state, DIB7000P_POWER_ALL);
+ if (state->version == SOC7090)
+ dibx000_reset_i2c_master(&state->i2c_master);
+
dib7000p_set_adc_state(state, DIBX000_VBG_ENABLE);
/* restart all parts */
- dib7000p_write_word(state, 770, 0xffff);
- dib7000p_write_word(state, 771, 0xffff);
- dib7000p_write_word(state, 772, 0x001f);
- dib7000p_write_word(state, 898, 0x0003);
- /* except i2c, sdio, gpio - control interfaces */
- dib7000p_write_word(state, 1280, 0x01fc - ((1 << 7) | (1 << 6) | (1 << 5)) );
-
- dib7000p_write_word(state, 770, 0);
- dib7000p_write_word(state, 771, 0);
- dib7000p_write_word(state, 772, 0);
- dib7000p_write_word(state, 898, 0);
+ dib7000p_write_word(state, 770, 0xffff);
+ dib7000p_write_word(state, 771, 0xffff);
+ dib7000p_write_word(state, 772, 0x001f);
+ dib7000p_write_word(state, 898, 0x0003);
+ dib7000p_write_word(state, 1280, 0x001f - ((1 << 4) | (1 << 3)));
+
+ dib7000p_write_word(state, 770, 0);
+ dib7000p_write_word(state, 771, 0);
+ dib7000p_write_word(state, 772, 0);
+ dib7000p_write_word(state, 898, 0);
dib7000p_write_word(state, 1280, 0);
/* default */
dib7000p_reset_pll(state);
if (dib7000p_reset_gpio(state) != 0)
- dprintk( "GPIO reset was not successful.");
-
- if (dib7000p_set_output_mode(state, OUTMODE_HIGH_Z) != 0)
- dprintk( "OUTPUT_MODE could not be reset.");
+ dprintk("GPIO reset was not successful.");
- /* unforce divstr regardless whether i2c enumeration was done or not */
- dib7000p_write_word(state, 1285, dib7000p_read_word(state, 1285) & ~(1 << 1) );
+ if (state->version == SOC7090) {
+ dib7000p_write_word(state, 899, 0);
- dib7000p_set_bandwidth(state, 8000);
+ /* impulse noise */
+ dib7000p_write_word(state, 42, (1<<5) | 3); /* P_iqc_thsat_ipc = 1 ; P_iqc_win2 = 3 */
+ dib7000p_write_word(state, 43, 0x2d4); /*-300 fag P_iqc_dect_min = -280 */
+ dib7000p_write_word(state, 44, 300); /* 300 fag P_iqc_dect_min = +280 */
+ dib7000p_write_word(state, 273, (1<<6) | 30);
+ }
+ if (dib7000p_set_output_mode(state, OUTMODE_HIGH_Z) != 0)
+ dprintk("OUTPUT_MODE could not be reset.");
dib7000p_set_adc_state(state, DIBX000_SLOW_ADC_ON);
dib7000p_sad_calib(state);
dib7000p_set_adc_state(state, DIBX000_SLOW_ADC_OFF);
- // P_iqc_alpha_pha, P_iqc_alpha_amp_dcc_alpha, ...
- if(state->cfg.tuner_is_baseband)
- dib7000p_write_word(state, 36,0x0755);
- else
- dib7000p_write_word(state, 36,0x1f55);
+ /* unforce divstr regardless whether i2c enumeration was done or not */
+ dib7000p_write_word(state, 1285, dib7000p_read_word(state, 1285) & ~(1 << 1));
+
+ dib7000p_set_bandwidth(state, 8000);
+
+ if (state->version == SOC7090) {
+ dib7000p_write_word(state, 36, 0x5755);/* P_iqc_impnc_on =1 & P_iqc_corr_inh = 1 for impulsive noise */
+ } else {
+ if (state->cfg.tuner_is_baseband)
+ dib7000p_write_word(state, 36, 0x0755);
+ else
+ dib7000p_write_word(state, 36, 0x1f55);
+ }
dib7000p_write_tab(state, dib7000p_defaults);
dib7000p_set_power_mode(state, DIB7000P_POWER_INTERFACE_ONLY);
-
return 0;
}
@@ -527,9 +638,9 @@ static void dib7000p_pll_clk_cfg(struct dib7000p_state *state)
{
u16 tmp = 0;
tmp = dib7000p_read_word(state, 903);
- dib7000p_write_word(state, 903, (tmp | 0x1)); //pwr-up pll
+ dib7000p_write_word(state, 903, (tmp | 0x1));
tmp = dib7000p_read_word(state, 900);
- dib7000p_write_word(state, 900, (tmp & 0x7fff) | (1 << 6)); //use High freq clock
+ dib7000p_write_word(state, 900, (tmp & 0x7fff) | (1 << 6));
}
static void dib7000p_restart_agc(struct dib7000p_state *state)
@@ -543,11 +654,9 @@ static int dib7000p_update_lna(struct dib7000p_state *state)
{
u16 dyn_gain;
- // when there is no LNA to program return immediatly
if (state->cfg.update_lna) {
- // read dyn_gain here (because it is demod-dependent and not fe)
dyn_gain = dib7000p_read_word(state, 394);
- if (state->cfg.update_lna(&state->demod,dyn_gain)) { // LNA has changed
+ if (state->cfg.update_lna(&state->demod, dyn_gain)) {
dib7000p_restart_agc(state);
return 1;
}
@@ -571,24 +680,24 @@ static int dib7000p_set_agc_config(struct dib7000p_state *state, u8 band)
}
if (agc == NULL) {
- dprintk( "no valid AGC configuration found for band 0x%02x",band);
+ dprintk("no valid AGC configuration found for band 0x%02x", band);
return -EINVAL;
}
state->current_agc = agc;
/* AGC */
- dib7000p_write_word(state, 75 , agc->setup );
- dib7000p_write_word(state, 76 , agc->inv_gain );
- dib7000p_write_word(state, 77 , agc->time_stabiliz );
+ dib7000p_write_word(state, 75, agc->setup);
+ dib7000p_write_word(state, 76, agc->inv_gain);
+ dib7000p_write_word(state, 77, agc->time_stabiliz);
dib7000p_write_word(state, 100, (agc->alpha_level << 12) | agc->thlock);
// Demod AGC loop configuration
dib7000p_write_word(state, 101, (agc->alpha_mant << 5) | agc->alpha_exp);
- dib7000p_write_word(state, 102, (agc->beta_mant << 6) | agc->beta_exp);
+ dib7000p_write_word(state, 102, (agc->beta_mant << 6) | agc->beta_exp);
/* AGC continued */
- dprintk( "WBD: ref: %d, sel: %d, active: %d, alpha: %d",
+ dprintk("WBD: ref: %d, sel: %d, active: %d, alpha: %d",
state->wbd_ref != 0 ? state->wbd_ref : agc->wbd_ref, agc->wbd_sel, !agc->perform_agc_softsplit, agc->wbd_sel);
if (state->wbd_ref != 0)
@@ -598,101 +707,135 @@ static int dib7000p_set_agc_config(struct dib7000p_state *state, u8 band)
dib7000p_write_word(state, 106, (agc->wbd_sel << 13) | (agc->wbd_alpha << 9) | (agc->perform_agc_softsplit << 8));
- dib7000p_write_word(state, 107, agc->agc1_max);
- dib7000p_write_word(state, 108, agc->agc1_min);
- dib7000p_write_word(state, 109, agc->agc2_max);
- dib7000p_write_word(state, 110, agc->agc2_min);
- dib7000p_write_word(state, 111, (agc->agc1_pt1 << 8) | agc->agc1_pt2);
- dib7000p_write_word(state, 112, agc->agc1_pt3);
+ dib7000p_write_word(state, 107, agc->agc1_max);
+ dib7000p_write_word(state, 108, agc->agc1_min);
+ dib7000p_write_word(state, 109, agc->agc2_max);
+ dib7000p_write_word(state, 110, agc->agc2_min);
+ dib7000p_write_word(state, 111, (agc->agc1_pt1 << 8) | agc->agc1_pt2);
+ dib7000p_write_word(state, 112, agc->agc1_pt3);
dib7000p_write_word(state, 113, (agc->agc1_slope1 << 8) | agc->agc1_slope2);
- dib7000p_write_word(state, 114, (agc->agc2_pt1 << 8) | agc->agc2_pt2);
+ dib7000p_write_word(state, 114, (agc->agc2_pt1 << 8) | agc->agc2_pt2);
dib7000p_write_word(state, 115, (agc->agc2_slope1 << 8) | agc->agc2_slope2);
return 0;
}
+static void dib7000p_set_dds(struct dib7000p_state *state, s32 offset_khz)
+{
+ u32 internal = dib7000p_get_internal_freq(state);
+ s32 unit_khz_dds_val = 67108864 / (internal); /* 2**26 / Fsampling is the unit 1KHz offset */
+ u32 abs_offset_khz = ABS(offset_khz);
+ u32 dds = state->cfg.bw->ifreq & 0x1ffffff;
+ u8 invert = !!(state->cfg.bw->ifreq & (1 << 25));
+
+ dprintk("setting a frequency offset of %dkHz internal freq = %d invert = %d", offset_khz, internal, invert);
+
+ if (offset_khz < 0)
+ unit_khz_dds_val *= -1;
+
+ /* IF tuner */
+ if (invert)
+ dds -= (abs_offset_khz * unit_khz_dds_val); /* /100 because of /100 on the unit_khz_dds_val line calc for better accuracy */
+ else
+ dds += (abs_offset_khz * unit_khz_dds_val);
+
+ if (abs_offset_khz <= (internal / 2)) { /* Max dds offset is the half of the demod freq */
+ dib7000p_write_word(state, 21, (u16) (((dds >> 16) & 0x1ff) | (0 << 10) | (invert << 9)));
+ dib7000p_write_word(state, 22, (u16) (dds & 0xffff));
+ }
+}
+
static int dib7000p_agc_startup(struct dvb_frontend *demod, struct dvb_frontend_parameters *ch)
{
struct dib7000p_state *state = demod->demodulator_priv;
int ret = -1;
u8 *agc_state = &state->agc_state;
u8 agc_split;
+ u16 reg;
+ u32 upd_demod_gain_period = 0x1000;
switch (state->agc_state) {
- case 0:
- // set power-up level: interf+analog+AGC
- dib7000p_set_power_mode(state, DIB7000P_POWER_ALL);
+ case 0:
+ dib7000p_set_power_mode(state, DIB7000P_POWER_ALL);
+ if (state->version == SOC7090) {
+ reg = dib7000p_read_word(state, 0x79b) & 0xff00;
+ dib7000p_write_word(state, 0x79a, upd_demod_gain_period & 0xFFFF); /* lsb */
+ dib7000p_write_word(state, 0x79b, reg | (1 << 14) | ((upd_demod_gain_period >> 16) & 0xFF));
+
+ /* enable adc i & q */
+ reg = dib7000p_read_word(state, 0x780);
+ dib7000p_write_word(state, 0x780, (reg | (0x3)) & (~(1 << 7)));
+ } else {
dib7000p_set_adc_state(state, DIBX000_ADC_ON);
dib7000p_pll_clk_cfg(state);
+ }
- if (dib7000p_set_agc_config(state, BAND_OF_FREQUENCY(ch->frequency/1000)) != 0)
- return -1;
-
- ret = 7;
- (*agc_state)++;
- break;
+ if (dib7000p_set_agc_config(state, BAND_OF_FREQUENCY(ch->frequency / 1000)) != 0)
+ return -1;
- case 1:
- // AGC initialization
- if (state->cfg.agc_control)
- state->cfg.agc_control(&state->demod, 1);
-
- dib7000p_write_word(state, 78, 32768);
- if (!state->current_agc->perform_agc_softsplit) {
- /* we are using the wbd - so slow AGC startup */
- /* force 0 split on WBD and restart AGC */
- dib7000p_write_word(state, 106, (state->current_agc->wbd_sel << 13) | (state->current_agc->wbd_alpha << 9) | (1 << 8));
- (*agc_state)++;
- ret = 5;
- } else {
- /* default AGC startup */
- (*agc_state) = 4;
- /* wait AGC rough lock time */
- ret = 7;
- }
+ dib7000p_set_dds(state, 0);
+ ret = 7;
+ (*agc_state)++;
+ break;
- dib7000p_restart_agc(state);
- break;
+ case 1:
+ if (state->cfg.agc_control)
+ state->cfg.agc_control(&state->demod, 1);
- case 2: /* fast split search path after 5sec */
- dib7000p_write_word(state, 75, state->current_agc->setup | (1 << 4)); /* freeze AGC loop */
- dib7000p_write_word(state, 106, (state->current_agc->wbd_sel << 13) | (2 << 9) | (0 << 8)); /* fast split search 0.25kHz */
+ dib7000p_write_word(state, 78, 32768);
+ if (!state->current_agc->perform_agc_softsplit) {
+ /* we are using the wbd - so slow AGC startup */
+ /* force 0 split on WBD and restart AGC */
+ dib7000p_write_word(state, 106, (state->current_agc->wbd_sel << 13) | (state->current_agc->wbd_alpha << 9) | (1 << 8));
(*agc_state)++;
- ret = 14;
- break;
+ ret = 5;
+ } else {
+ /* default AGC startup */
+ (*agc_state) = 4;
+ /* wait AGC rough lock time */
+ ret = 7;
+ }
- case 3: /* split search ended */
- agc_split = (u8)dib7000p_read_word(state, 396); /* store the split value for the next time */
- dib7000p_write_word(state, 78, dib7000p_read_word(state, 394)); /* set AGC gain start value */
+ dib7000p_restart_agc(state);
+ break;
- dib7000p_write_word(state, 75, state->current_agc->setup); /* std AGC loop */
- dib7000p_write_word(state, 106, (state->current_agc->wbd_sel << 13) | (state->current_agc->wbd_alpha << 9) | agc_split); /* standard split search */
+ case 2: /* fast split search path after 5sec */
+ dib7000p_write_word(state, 75, state->current_agc->setup | (1 << 4)); /* freeze AGC loop */
+ dib7000p_write_word(state, 106, (state->current_agc->wbd_sel << 13) | (2 << 9) | (0 << 8)); /* fast split search 0.25kHz */
+ (*agc_state)++;
+ ret = 14;
+ break;
- dib7000p_restart_agc(state);
+ case 3: /* split search ended */
+ agc_split = (u8) dib7000p_read_word(state, 396); /* store the split value for the next time */
+ dib7000p_write_word(state, 78, dib7000p_read_word(state, 394)); /* set AGC gain start value */
- dprintk( "SPLIT %p: %hd", demod, agc_split);
+ dib7000p_write_word(state, 75, state->current_agc->setup); /* std AGC loop */
+ dib7000p_write_word(state, 106, (state->current_agc->wbd_sel << 13) | (state->current_agc->wbd_alpha << 9) | agc_split); /* standard split search */
- (*agc_state)++;
- ret = 5;
- break;
+ dib7000p_restart_agc(state);
- case 4: /* LNA startup */
- // wait AGC accurate lock time
- ret = 7;
+ dprintk("SPLIT %p: %hd", demod, agc_split);
- if (dib7000p_update_lna(state))
- // wait only AGC rough lock time
- ret = 5;
- else // nothing was done, go to the next state
- (*agc_state)++;
- break;
+ (*agc_state)++;
+ ret = 5;
+ break;
- case 5:
- if (state->cfg.agc_control)
- state->cfg.agc_control(&state->demod, 0);
+ case 4: /* LNA startup */
+ ret = 7;
+
+ if (dib7000p_update_lna(state))
+ ret = 5;
+ else
(*agc_state)++;
- break;
- default:
- break;
+ break;
+
+ case 5:
+ if (state->cfg.agc_control)
+ state->cfg.agc_control(&state->demod, 0);
+ (*agc_state)++;
+ break;
+ default:
+ break;
}
return ret;
}
@@ -703,45 +846,89 @@ static void dib7000p_update_timf(struct dib7000p_state *state)
state->timf = timf * 160 / (state->current_bandwidth / 50);
dib7000p_write_word(state, 23, (u16) (timf >> 16));
dib7000p_write_word(state, 24, (u16) (timf & 0xffff));
- dprintk( "updated timf_frequency: %d (default: %d)",state->timf, state->cfg.bw->timf);
+ dprintk("updated timf_frequency: %d (default: %d)", state->timf, state->cfg.bw->timf);
+
+}
+u32 dib7000p_ctrl_timf(struct dvb_frontend *fe, u8 op, u32 timf)
+{
+ struct dib7000p_state *state = fe->demodulator_priv;
+ switch (op) {
+ case DEMOD_TIMF_SET:
+ state->timf = timf;
+ break;
+ case DEMOD_TIMF_UPDATE:
+ dib7000p_update_timf(state);
+ break;
+ case DEMOD_TIMF_GET:
+ break;
+ }
+ dib7000p_set_bandwidth(state, state->current_bandwidth);
+ return state->timf;
}
+EXPORT_SYMBOL(dib7000p_ctrl_timf);
static void dib7000p_set_channel(struct dib7000p_state *state, struct dvb_frontend_parameters *ch, u8 seq)
{
u16 value, est[4];
- dib7000p_set_bandwidth(state, BANDWIDTH_TO_KHZ(ch->u.ofdm.bandwidth));
+ dib7000p_set_bandwidth(state, BANDWIDTH_TO_KHZ(ch->u.ofdm.bandwidth));
/* nfft, guard, qam, alpha */
value = 0;
switch (ch->u.ofdm.transmission_mode) {
- case TRANSMISSION_MODE_2K: value |= (0 << 7); break;
- case TRANSMISSION_MODE_4K: value |= (2 << 7); break;
- default:
- case TRANSMISSION_MODE_8K: value |= (1 << 7); break;
+ case TRANSMISSION_MODE_2K:
+ value |= (0 << 7);
+ break;
+ case TRANSMISSION_MODE_4K:
+ value |= (2 << 7);
+ break;
+ default:
+ case TRANSMISSION_MODE_8K:
+ value |= (1 << 7);
+ break;
}
switch (ch->u.ofdm.guard_interval) {
- case GUARD_INTERVAL_1_32: value |= (0 << 5); break;
- case GUARD_INTERVAL_1_16: value |= (1 << 5); break;
- case GUARD_INTERVAL_1_4: value |= (3 << 5); break;
- default:
- case GUARD_INTERVAL_1_8: value |= (2 << 5); break;
+ case GUARD_INTERVAL_1_32:
+ value |= (0 << 5);
+ break;
+ case GUARD_INTERVAL_1_16:
+ value |= (1 << 5);
+ break;
+ case GUARD_INTERVAL_1_4:
+ value |= (3 << 5);
+ break;
+ default:
+ case GUARD_INTERVAL_1_8:
+ value |= (2 << 5);
+ break;
}
switch (ch->u.ofdm.constellation) {
- case QPSK: value |= (0 << 3); break;
- case QAM_16: value |= (1 << 3); break;
- default:
- case QAM_64: value |= (2 << 3); break;
+ case QPSK:
+ value |= (0 << 3);
+ break;
+ case QAM_16:
+ value |= (1 << 3);
+ break;
+ default:
+ case QAM_64:
+ value |= (2 << 3);
+ break;
}
switch (HIERARCHY_1) {
- case HIERARCHY_2: value |= 2; break;
- case HIERARCHY_4: value |= 4; break;
- default:
- case HIERARCHY_1: value |= 1; break;
+ case HIERARCHY_2:
+ value |= 2;
+ break;
+ case HIERARCHY_4:
+ value |= 4;
+ break;
+ default:
+ case HIERARCHY_1:
+ value |= 1;
+ break;
}
dib7000p_write_word(state, 0, value);
- dib7000p_write_word(state, 5, (seq << 4) | 1); /* do not force tps, search list 0 */
+ dib7000p_write_word(state, 5, (seq << 4) | 1); /* do not force tps, search list 0 */
/* P_dintl_native, P_dintlv_inv, P_hrch, P_code_rate, P_select_hp */
value = 0;
@@ -752,39 +939,63 @@ static void dib7000p_set_channel(struct dib7000p_state *state, struct dvb_fronte
if (1 == 1)
value |= 1;
switch ((ch->u.ofdm.hierarchy_information == 0 || 1 == 1) ? ch->u.ofdm.code_rate_HP : ch->u.ofdm.code_rate_LP) {
- case FEC_2_3: value |= (2 << 1); break;
- case FEC_3_4: value |= (3 << 1); break;
- case FEC_5_6: value |= (5 << 1); break;
- case FEC_7_8: value |= (7 << 1); break;
- default:
- case FEC_1_2: value |= (1 << 1); break;
+ case FEC_2_3:
+ value |= (2 << 1);
+ break;
+ case FEC_3_4:
+ value |= (3 << 1);
+ break;
+ case FEC_5_6:
+ value |= (5 << 1);
+ break;
+ case FEC_7_8:
+ value |= (7 << 1);
+ break;
+ default:
+ case FEC_1_2:
+ value |= (1 << 1);
+ break;
}
dib7000p_write_word(state, 208, value);
/* offset loop parameters */
- dib7000p_write_word(state, 26, 0x6680); // timf(6xxx)
- dib7000p_write_word(state, 32, 0x0003); // pha_off_max(xxx3)
- dib7000p_write_word(state, 29, 0x1273); // isi
- dib7000p_write_word(state, 33, 0x0005); // sfreq(xxx5)
+ dib7000p_write_word(state, 26, 0x6680);
+ dib7000p_write_word(state, 32, 0x0003);
+ dib7000p_write_word(state, 29, 0x1273);
+ dib7000p_write_word(state, 33, 0x0005);
/* P_dvsy_sync_wait */
switch (ch->u.ofdm.transmission_mode) {
- case TRANSMISSION_MODE_8K: value = 256; break;
- case TRANSMISSION_MODE_4K: value = 128; break;
- case TRANSMISSION_MODE_2K:
- default: value = 64; break;
+ case TRANSMISSION_MODE_8K:
+ value = 256;
+ break;
+ case TRANSMISSION_MODE_4K:
+ value = 128;
+ break;
+ case TRANSMISSION_MODE_2K:
+ default:
+ value = 64;
+ break;
}
switch (ch->u.ofdm.guard_interval) {
- case GUARD_INTERVAL_1_16: value *= 2; break;
- case GUARD_INTERVAL_1_8: value *= 4; break;
- case GUARD_INTERVAL_1_4: value *= 8; break;
- default:
- case GUARD_INTERVAL_1_32: value *= 1; break;
+ case GUARD_INTERVAL_1_16:
+ value *= 2;
+ break;
+ case GUARD_INTERVAL_1_8:
+ value *= 4;
+ break;
+ case GUARD_INTERVAL_1_4:
+ value *= 8;
+ break;
+ default:
+ case GUARD_INTERVAL_1_32:
+ value *= 1;
+ break;
}
if (state->cfg.diversity_delay == 0)
- state->div_sync_wait = (value * 3) / 2 + 48; // add 50% SFN margin + compensate for one DVSY-fifo
+ state->div_sync_wait = (value * 3) / 2 + 48;
else
- state->div_sync_wait = (value * 3) / 2 + state->cfg.diversity_delay; // add 50% SFN margin + compensate for one DVSY-fifo
+ state->div_sync_wait = (value * 3) / 2 + state->cfg.diversity_delay;
/* deactive the possibility of diversity reception if extended interleaver */
state->div_force_off = !1 && ch->u.ofdm.transmission_mode != TRANSMISSION_MODE_8K;
@@ -792,24 +1003,24 @@ static void dib7000p_set_channel(struct dib7000p_state *state, struct dvb_fronte
/* channel estimation fine configuration */
switch (ch->u.ofdm.constellation) {
- case QAM_64:
- est[0] = 0x0148; /* P_adp_regul_cnt 0.04 */
- est[1] = 0xfff0; /* P_adp_noise_cnt -0.002 */
- est[2] = 0x00a4; /* P_adp_regul_ext 0.02 */
- est[3] = 0xfff8; /* P_adp_noise_ext -0.001 */
- break;
- case QAM_16:
- est[0] = 0x023d; /* P_adp_regul_cnt 0.07 */
- est[1] = 0xffdf; /* P_adp_noise_cnt -0.004 */
- est[2] = 0x00a4; /* P_adp_regul_ext 0.02 */
- est[3] = 0xfff0; /* P_adp_noise_ext -0.002 */
- break;
- default:
- est[0] = 0x099a; /* P_adp_regul_cnt 0.3 */
- est[1] = 0xffae; /* P_adp_noise_cnt -0.01 */
- est[2] = 0x0333; /* P_adp_regul_ext 0.1 */
- est[3] = 0xfff8; /* P_adp_noise_ext -0.002 */
- break;
+ case QAM_64:
+ est[0] = 0x0148; /* P_adp_regul_cnt 0.04 */
+ est[1] = 0xfff0; /* P_adp_noise_cnt -0.002 */
+ est[2] = 0x00a4; /* P_adp_regul_ext 0.02 */
+ est[3] = 0xfff8; /* P_adp_noise_ext -0.001 */
+ break;
+ case QAM_16:
+ est[0] = 0x023d; /* P_adp_regul_cnt 0.07 */
+ est[1] = 0xffdf; /* P_adp_noise_cnt -0.004 */
+ est[2] = 0x00a4; /* P_adp_regul_ext 0.02 */
+ est[3] = 0xfff0; /* P_adp_noise_ext -0.002 */
+ break;
+ default:
+ est[0] = 0x099a; /* P_adp_regul_cnt 0.3 */
+ est[1] = 0xffae; /* P_adp_noise_cnt -0.01 */
+ est[2] = 0x0333; /* P_adp_regul_ext 0.1 */
+ est[3] = 0xfff8; /* P_adp_noise_ext -0.002 */
+ break;
}
for (value = 0; value < 4; value++)
dib7000p_write_word(state, 187 + value, est[value]);
@@ -820,14 +1031,15 @@ static int dib7000p_autosearch_start(struct dvb_frontend *demod, struct dvb_fron
struct dib7000p_state *state = demod->demodulator_priv;
struct dvb_frontend_parameters schan;
u32 value, factor;
+ u32 internal = dib7000p_get_internal_freq(state);
schan = *ch;
schan.u.ofdm.constellation = QAM_64;
- schan.u.ofdm.guard_interval = GUARD_INTERVAL_1_32;
- schan.u.ofdm.transmission_mode = TRANSMISSION_MODE_8K;
- schan.u.ofdm.code_rate_HP = FEC_2_3;
- schan.u.ofdm.code_rate_LP = FEC_3_4;
- schan.u.ofdm.hierarchy_information = 0;
+ schan.u.ofdm.guard_interval = GUARD_INTERVAL_1_32;
+ schan.u.ofdm.transmission_mode = TRANSMISSION_MODE_8K;
+ schan.u.ofdm.code_rate_HP = FEC_2_3;
+ schan.u.ofdm.code_rate_LP = FEC_3_4;
+ schan.u.ofdm.hierarchy_information = 0;
dib7000p_set_channel(state, &schan, 7);
@@ -837,16 +1049,15 @@ static int dib7000p_autosearch_start(struct dvb_frontend *demod, struct dvb_fron
else
factor = 6;
- // always use the setting for 8MHz here lock_time for 7,6 MHz are longer
- value = 30 * state->cfg.bw->internal * factor;
- dib7000p_write_word(state, 6, (u16) ((value >> 16) & 0xffff)); // lock0 wait time
- dib7000p_write_word(state, 7, (u16) (value & 0xffff)); // lock0 wait time
- value = 100 * state->cfg.bw->internal * factor;
- dib7000p_write_word(state, 8, (u16) ((value >> 16) & 0xffff)); // lock1 wait time
- dib7000p_write_word(state, 9, (u16) (value & 0xffff)); // lock1 wait time
- value = 500 * state->cfg.bw->internal * factor;
- dib7000p_write_word(state, 10, (u16) ((value >> 16) & 0xffff)); // lock2 wait time
- dib7000p_write_word(state, 11, (u16) (value & 0xffff)); // lock2 wait time
+ value = 30 * internal * factor;
+ dib7000p_write_word(state, 6, (u16) ((value >> 16) & 0xffff));
+ dib7000p_write_word(state, 7, (u16) (value & 0xffff));
+ value = 100 * internal * factor;
+ dib7000p_write_word(state, 8, (u16) ((value >> 16) & 0xffff));
+ dib7000p_write_word(state, 9, (u16) (value & 0xffff));
+ value = 500 * internal * factor;
+ dib7000p_write_word(state, 10, (u16) ((value >> 16) & 0xffff));
+ dib7000p_write_word(state, 11, (u16) (value & 0xffff));
value = dib7000p_read_word(state, 0);
dib7000p_write_word(state, 0, (u16) ((1 << 9) | value));
@@ -861,101 +1072,101 @@ static int dib7000p_autosearch_is_irq(struct dvb_frontend *demod)
struct dib7000p_state *state = demod->demodulator_priv;
u16 irq_pending = dib7000p_read_word(state, 1284);
- if (irq_pending & 0x1) // failed
+ if (irq_pending & 0x1)
return 1;
- if (irq_pending & 0x2) // succeeded
+ if (irq_pending & 0x2)
return 2;
- return 0; // still pending
+ return 0;
}
static void dib7000p_spur_protect(struct dib7000p_state *state, u32 rf_khz, u32 bw)
{
- static s16 notch[]={16143, 14402, 12238, 9713, 6902, 3888, 759, -2392};
- static u8 sine [] ={0, 2, 3, 5, 6, 8, 9, 11, 13, 14, 16, 17, 19, 20, 22,
- 24, 25, 27, 28, 30, 31, 33, 34, 36, 38, 39, 41, 42, 44, 45, 47, 48, 50, 51,
- 53, 55, 56, 58, 59, 61, 62, 64, 65, 67, 68, 70, 71, 73, 74, 76, 77, 79, 80,
- 82, 83, 85, 86, 88, 89, 91, 92, 94, 95, 97, 98, 99, 101, 102, 104, 105,
- 107, 108, 109, 111, 112, 114, 115, 117, 118, 119, 121, 122, 123, 125, 126,
- 128, 129, 130, 132, 133, 134, 136, 137, 138, 140, 141, 142, 144, 145, 146,
- 147, 149, 150, 151, 152, 154, 155, 156, 157, 159, 160, 161, 162, 164, 165,
- 166, 167, 168, 170, 171, 172, 173, 174, 175, 177, 178, 179, 180, 181, 182,
- 183, 184, 185, 186, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198,
- 199, 200, 201, 202, 203, 204, 205, 206, 207, 207, 208, 209, 210, 211, 212,
- 213, 214, 215, 215, 216, 217, 218, 219, 220, 220, 221, 222, 223, 224, 224,
- 225, 226, 227, 227, 228, 229, 229, 230, 231, 231, 232, 233, 233, 234, 235,
- 235, 236, 237, 237, 238, 238, 239, 239, 240, 241, 241, 242, 242, 243, 243,
- 244, 244, 245, 245, 245, 246, 246, 247, 247, 248, 248, 248, 249, 249, 249,
- 250, 250, 250, 251, 251, 251, 252, 252, 252, 252, 253, 253, 253, 253, 254,
- 254, 254, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255};
+ static s16 notch[] = { 16143, 14402, 12238, 9713, 6902, 3888, 759, -2392 };
+ static u8 sine[] = { 0, 2, 3, 5, 6, 8, 9, 11, 13, 14, 16, 17, 19, 20, 22,
+ 24, 25, 27, 28, 30, 31, 33, 34, 36, 38, 39, 41, 42, 44, 45, 47, 48, 50, 51,
+ 53, 55, 56, 58, 59, 61, 62, 64, 65, 67, 68, 70, 71, 73, 74, 76, 77, 79, 80,
+ 82, 83, 85, 86, 88, 89, 91, 92, 94, 95, 97, 98, 99, 101, 102, 104, 105,
+ 107, 108, 109, 111, 112, 114, 115, 117, 118, 119, 121, 122, 123, 125, 126,
+ 128, 129, 130, 132, 133, 134, 136, 137, 138, 140, 141, 142, 144, 145, 146,
+ 147, 149, 150, 151, 152, 154, 155, 156, 157, 159, 160, 161, 162, 164, 165,
+ 166, 167, 168, 170, 171, 172, 173, 174, 175, 177, 178, 179, 180, 181, 182,
+ 183, 184, 185, 186, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198,
+ 199, 200, 201, 202, 203, 204, 205, 206, 207, 207, 208, 209, 210, 211, 212,
+ 213, 214, 215, 215, 216, 217, 218, 219, 220, 220, 221, 222, 223, 224, 224,
+ 225, 226, 227, 227, 228, 229, 229, 230, 231, 231, 232, 233, 233, 234, 235,
+ 235, 236, 237, 237, 238, 238, 239, 239, 240, 241, 241, 242, 242, 243, 243,
+ 244, 244, 245, 245, 245, 246, 246, 247, 247, 248, 248, 248, 249, 249, 249,
+ 250, 250, 250, 251, 251, 251, 252, 252, 252, 252, 253, 253, 253, 253, 254,
+ 254, 254, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255
+ };
u32 xtal = state->cfg.bw->xtal_hz / 1000;
int f_rel = DIV_ROUND_CLOSEST(rf_khz, xtal) * xtal - rf_khz;
int k;
- int coef_re[8],coef_im[8];
+ int coef_re[8], coef_im[8];
int bw_khz = bw;
u32 pha;
- dprintk( "relative position of the Spur: %dk (RF: %dk, XTAL: %dk)", f_rel, rf_khz, xtal);
-
+ dprintk("relative position of the Spur: %dk (RF: %dk, XTAL: %dk)", f_rel, rf_khz, xtal);
- if (f_rel < -bw_khz/2 || f_rel > bw_khz/2)
+ if (f_rel < -bw_khz / 2 || f_rel > bw_khz / 2)
return;
bw_khz /= 100;
- dib7000p_write_word(state, 142 ,0x0610);
+ dib7000p_write_word(state, 142, 0x0610);
for (k = 0; k < 8; k++) {
- pha = ((f_rel * (k+1) * 112 * 80/bw_khz) /1000) & 0x3ff;
+ pha = ((f_rel * (k + 1) * 112 * 80 / bw_khz) / 1000) & 0x3ff;
- if (pha==0) {
+ if (pha == 0) {
coef_re[k] = 256;
coef_im[k] = 0;
- } else if(pha < 256) {
- coef_re[k] = sine[256-(pha&0xff)];
- coef_im[k] = sine[pha&0xff];
+ } else if (pha < 256) {
+ coef_re[k] = sine[256 - (pha & 0xff)];
+ coef_im[k] = sine[pha & 0xff];
} else if (pha == 256) {
coef_re[k] = 0;
coef_im[k] = 256;
} else if (pha < 512) {
- coef_re[k] = -sine[pha&0xff];
- coef_im[k] = sine[256 - (pha&0xff)];
+ coef_re[k] = -sine[pha & 0xff];
+ coef_im[k] = sine[256 - (pha & 0xff)];
} else if (pha == 512) {
coef_re[k] = -256;
coef_im[k] = 0;
} else if (pha < 768) {
- coef_re[k] = -sine[256-(pha&0xff)];
- coef_im[k] = -sine[pha&0xff];
+ coef_re[k] = -sine[256 - (pha & 0xff)];
+ coef_im[k] = -sine[pha & 0xff];
} else if (pha == 768) {
coef_re[k] = 0;
coef_im[k] = -256;
} else {
- coef_re[k] = sine[pha&0xff];
- coef_im[k] = -sine[256 - (pha&0xff)];
+ coef_re[k] = sine[pha & 0xff];
+ coef_im[k] = -sine[256 - (pha & 0xff)];
}
coef_re[k] *= notch[k];
- coef_re[k] += (1<<14);
- if (coef_re[k] >= (1<<24))
- coef_re[k] = (1<<24) - 1;
- coef_re[k] /= (1<<15);
+ coef_re[k] += (1 << 14);
+ if (coef_re[k] >= (1 << 24))
+ coef_re[k] = (1 << 24) - 1;
+ coef_re[k] /= (1 << 15);
coef_im[k] *= notch[k];
- coef_im[k] += (1<<14);
- if (coef_im[k] >= (1<<24))
- coef_im[k] = (1<<24)-1;
- coef_im[k] /= (1<<15);
+ coef_im[k] += (1 << 14);
+ if (coef_im[k] >= (1 << 24))
+ coef_im[k] = (1 << 24) - 1;
+ coef_im[k] /= (1 << 15);
- dprintk( "PALF COEF: %d re: %d im: %d", k, coef_re[k], coef_im[k]);
+ dprintk("PALF COEF: %d re: %d im: %d", k, coef_re[k], coef_im[k]);
dib7000p_write_word(state, 143, (0 << 14) | (k << 10) | (coef_re[k] & 0x3ff));
dib7000p_write_word(state, 144, coef_im[k] & 0x3ff);
dib7000p_write_word(state, 143, (1 << 14) | (k << 10) | (coef_re[k] & 0x3ff));
}
- dib7000p_write_word(state,143 ,0);
+ dib7000p_write_word(state, 143, 0);
}
static int dib7000p_tune(struct dvb_frontend *demod, struct dvb_frontend_parameters *ch)
@@ -976,11 +1187,11 @@ static int dib7000p_tune(struct dvb_frontend *demod, struct dvb_frontend_paramet
/* P_ctrl_inh_cor=0, P_ctrl_alpha_cor=4, P_ctrl_inh_isi=0, P_ctrl_alpha_isi=3, P_ctrl_inh_cor4=1, P_ctrl_alpha_cor4=3 */
tmp = (0 << 14) | (4 << 10) | (0 << 9) | (3 << 5) | (1 << 4) | (0x3);
if (state->sfn_workaround_active) {
- dprintk( "SFN workaround is active");
+ dprintk("SFN workaround is active");
tmp |= (1 << 9);
- dib7000p_write_word(state, 166, 0x4000); // P_pha3_force_pha_shift
+ dib7000p_write_word(state, 166, 0x4000);
} else {
- dib7000p_write_word(state, 166, 0x0000); // P_pha3_force_pha_shift
+ dib7000p_write_word(state, 166, 0x0000);
}
dib7000p_write_word(state, 29, tmp);
@@ -993,51 +1204,72 @@ static int dib7000p_tune(struct dvb_frontend *demod, struct dvb_frontend_paramet
/* P_timf_alpha, P_corm_alpha=6, P_corm_thres=0x80 */
tmp = (6 << 8) | 0x80;
switch (ch->u.ofdm.transmission_mode) {
- case TRANSMISSION_MODE_2K: tmp |= (7 << 12); break;
- case TRANSMISSION_MODE_4K: tmp |= (8 << 12); break;
- default:
- case TRANSMISSION_MODE_8K: tmp |= (9 << 12); break;
+ case TRANSMISSION_MODE_2K:
+ tmp |= (2 << 12);
+ break;
+ case TRANSMISSION_MODE_4K:
+ tmp |= (3 << 12);
+ break;
+ default:
+ case TRANSMISSION_MODE_8K:
+ tmp |= (4 << 12);
+ break;
}
- dib7000p_write_word(state, 26, tmp); /* timf_a(6xxx) */
+ dib7000p_write_word(state, 26, tmp); /* timf_a(6xxx) */
/* P_ctrl_freeze_pha_shift=0, P_ctrl_pha_off_max */
tmp = (0 << 4);
switch (ch->u.ofdm.transmission_mode) {
- case TRANSMISSION_MODE_2K: tmp |= 0x6; break;
- case TRANSMISSION_MODE_4K: tmp |= 0x7; break;
- default:
- case TRANSMISSION_MODE_8K: tmp |= 0x8; break;
+ case TRANSMISSION_MODE_2K:
+ tmp |= 0x6;
+ break;
+ case TRANSMISSION_MODE_4K:
+ tmp |= 0x7;
+ break;
+ default:
+ case TRANSMISSION_MODE_8K:
+ tmp |= 0x8;
+ break;
}
- dib7000p_write_word(state, 32, tmp);
+ dib7000p_write_word(state, 32, tmp);
/* P_ctrl_sfreq_inh=0, P_ctrl_sfreq_step */
tmp = (0 << 4);
switch (ch->u.ofdm.transmission_mode) {
- case TRANSMISSION_MODE_2K: tmp |= 0x6; break;
- case TRANSMISSION_MODE_4K: tmp |= 0x7; break;
- default:
- case TRANSMISSION_MODE_8K: tmp |= 0x8; break;
+ case TRANSMISSION_MODE_2K:
+ tmp |= 0x6;
+ break;
+ case TRANSMISSION_MODE_4K:
+ tmp |= 0x7;
+ break;
+ default:
+ case TRANSMISSION_MODE_8K:
+ tmp |= 0x8;
+ break;
}
- dib7000p_write_word(state, 33, tmp);
+ dib7000p_write_word(state, 33, tmp);
- tmp = dib7000p_read_word(state,509);
+ tmp = dib7000p_read_word(state, 509);
if (!((tmp >> 6) & 0x1)) {
/* restart the fec */
- tmp = dib7000p_read_word(state,771);
+ tmp = dib7000p_read_word(state, 771);
dib7000p_write_word(state, 771, tmp | (1 << 1));
dib7000p_write_word(state, 771, tmp);
- msleep(10);
- tmp = dib7000p_read_word(state,509);
+ msleep(40);
+ tmp = dib7000p_read_word(state, 509);
}
-
// we achieved a lock - it's time to update the osc freq
- if ((tmp >> 6) & 0x1)
+ if ((tmp >> 6) & 0x1) {
dib7000p_update_timf(state);
+ /* P_timf_alpha += 2 */
+ tmp = dib7000p_read_word(state, 26);
+ dib7000p_write_word(state, 26, (tmp & ~(0xf << 12)) | ((((tmp >> 12) & 0xf) + 5) << 12));
+ }
if (state->cfg.spur_protect)
- dib7000p_spur_protect(state, ch->frequency/1000, BANDWIDTH_TO_KHZ(ch->u.ofdm.bandwidth));
+ dib7000p_spur_protect(state, ch->frequency / 1000, BANDWIDTH_TO_KHZ(ch->u.ofdm.bandwidth));
- dib7000p_set_bandwidth(state, BANDWIDTH_TO_KHZ(ch->u.ofdm.bandwidth));
+ dib7000p_set_bandwidth(state, BANDWIDTH_TO_KHZ(ch->u.ofdm.bandwidth));
return 0;
}
@@ -1046,63 +1278,82 @@ static int dib7000p_wakeup(struct dvb_frontend *demod)
struct dib7000p_state *state = demod->demodulator_priv;
dib7000p_set_power_mode(state, DIB7000P_POWER_ALL);
dib7000p_set_adc_state(state, DIBX000_SLOW_ADC_ON);
+ if (state->version == SOC7090)
+ dib7000p_sad_calib(state);
return 0;
}
static int dib7000p_sleep(struct dvb_frontend *demod)
{
struct dib7000p_state *state = demod->demodulator_priv;
+ if (state->version == SOC7090)
+ return dib7090_set_output_mode(demod, OUTMODE_HIGH_Z) | dib7000p_set_power_mode(state, DIB7000P_POWER_INTERFACE_ONLY);
return dib7000p_set_output_mode(state, OUTMODE_HIGH_Z) | dib7000p_set_power_mode(state, DIB7000P_POWER_INTERFACE_ONLY);
}
static int dib7000p_identify(struct dib7000p_state *st)
{
u16 value;
- dprintk( "checking demod on I2C address: %d (%x)",
- st->i2c_addr, st->i2c_addr);
+ dprintk("checking demod on I2C address: %d (%x)", st->i2c_addr, st->i2c_addr);
if ((value = dib7000p_read_word(st, 768)) != 0x01b3) {
- dprintk( "wrong Vendor ID (read=0x%x)",value);
+ dprintk("wrong Vendor ID (read=0x%x)", value);
return -EREMOTEIO;
}
if ((value = dib7000p_read_word(st, 769)) != 0x4000) {
- dprintk( "wrong Device ID (%x)",value);
+ dprintk("wrong Device ID (%x)", value);
return -EREMOTEIO;
}
return 0;
}
-
-static int dib7000p_get_frontend(struct dvb_frontend* fe,
- struct dvb_frontend_parameters *fep)
+static int dib7000p_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep)
{
struct dib7000p_state *state = fe->demodulator_priv;
- u16 tps = dib7000p_read_word(state,463);
+ u16 tps = dib7000p_read_word(state, 463);
fep->inversion = INVERSION_AUTO;
fep->u.ofdm.bandwidth = BANDWIDTH_TO_INDEX(state->current_bandwidth);
switch ((tps >> 8) & 0x3) {
- case 0: fep->u.ofdm.transmission_mode = TRANSMISSION_MODE_2K; break;
- case 1: fep->u.ofdm.transmission_mode = TRANSMISSION_MODE_8K; break;
- /* case 2: fep->u.ofdm.transmission_mode = TRANSMISSION_MODE_4K; break; */
+ case 0:
+ fep->u.ofdm.transmission_mode = TRANSMISSION_MODE_2K;
+ break;
+ case 1:
+ fep->u.ofdm.transmission_mode = TRANSMISSION_MODE_8K;
+ break;
+ /* case 2: fep->u.ofdm.transmission_mode = TRANSMISSION_MODE_4K; break; */
}
switch (tps & 0x3) {
- case 0: fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_32; break;
- case 1: fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_16; break;
- case 2: fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_8; break;
- case 3: fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_4; break;
+ case 0:
+ fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_32;
+ break;
+ case 1:
+ fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_16;
+ break;
+ case 2:
+ fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_8;
+ break;
+ case 3:
+ fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_4;
+ break;
}
switch ((tps >> 14) & 0x3) {
- case 0: fep->u.ofdm.constellation = QPSK; break;
- case 1: fep->u.ofdm.constellation = QAM_16; break;
- case 2:
- default: fep->u.ofdm.constellation = QAM_64; break;
+ case 0:
+ fep->u.ofdm.constellation = QPSK;
+ break;
+ case 1:
+ fep->u.ofdm.constellation = QAM_16;
+ break;
+ case 2:
+ default:
+ fep->u.ofdm.constellation = QAM_64;
+ break;
}
/* as long as the frontend_param structure is fixed for hierarchical transmission I refuse to use it */
@@ -1110,22 +1361,42 @@ static int dib7000p_get_frontend(struct dvb_frontend* fe,
fep->u.ofdm.hierarchy_information = HIERARCHY_NONE;
switch ((tps >> 5) & 0x7) {
- case 1: fep->u.ofdm.code_rate_HP = FEC_1_2; break;
- case 2: fep->u.ofdm.code_rate_HP = FEC_2_3; break;
- case 3: fep->u.ofdm.code_rate_HP = FEC_3_4; break;
- case 5: fep->u.ofdm.code_rate_HP = FEC_5_6; break;
- case 7:
- default: fep->u.ofdm.code_rate_HP = FEC_7_8; break;
+ case 1:
+ fep->u.ofdm.code_rate_HP = FEC_1_2;
+ break;
+ case 2:
+ fep->u.ofdm.code_rate_HP = FEC_2_3;
+ break;
+ case 3:
+ fep->u.ofdm.code_rate_HP = FEC_3_4;
+ break;
+ case 5:
+ fep->u.ofdm.code_rate_HP = FEC_5_6;
+ break;
+ case 7:
+ default:
+ fep->u.ofdm.code_rate_HP = FEC_7_8;
+ break;
}
switch ((tps >> 2) & 0x7) {
- case 1: fep->u.ofdm.code_rate_LP = FEC_1_2; break;
- case 2: fep->u.ofdm.code_rate_LP = FEC_2_3; break;
- case 3: fep->u.ofdm.code_rate_LP = FEC_3_4; break;
- case 5: fep->u.ofdm.code_rate_LP = FEC_5_6; break;
- case 7:
- default: fep->u.ofdm.code_rate_LP = FEC_7_8; break;
+ case 1:
+ fep->u.ofdm.code_rate_LP = FEC_1_2;
+ break;
+ case 2:
+ fep->u.ofdm.code_rate_LP = FEC_2_3;
+ break;
+ case 3:
+ fep->u.ofdm.code_rate_LP = FEC_3_4;
+ break;
+ case 5:
+ fep->u.ofdm.code_rate_LP = FEC_5_6;
+ break;
+ case 7:
+ default:
+ fep->u.ofdm.code_rate_LP = FEC_7_8;
+ break;
}
/* native interleaver: (dib7000p_read_word(state, 464) >> 5) & 0x1 */
@@ -1133,15 +1404,18 @@ static int dib7000p_get_frontend(struct dvb_frontend* fe,
return 0;
}
-static int dib7000p_set_frontend(struct dvb_frontend* fe,
- struct dvb_frontend_parameters *fep)
+static int dib7000p_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep)
{
struct dib7000p_state *state = fe->demodulator_priv;
int time, ret;
- dib7000p_set_output_mode(state, OUTMODE_HIGH_Z);
+ if (state->version == SOC7090) {
+ dib7090_set_diversity_in(fe, 0);
+ dib7090_set_output_mode(fe, OUTMODE_HIGH_Z);
+ } else
+ dib7000p_set_output_mode(state, OUTMODE_HIGH_Z);
- /* maybe the parameter has been changed */
+ /* maybe the parameter has been changed */
state->sfn_workaround_active = buggy_sfn_workaround;
if (fe->ops.tuner_ops.set_params)
@@ -1156,9 +1430,7 @@ static int dib7000p_set_frontend(struct dvb_frontend* fe,
} while (time != -1);
if (fep->u.ofdm.transmission_mode == TRANSMISSION_MODE_AUTO ||
- fep->u.ofdm.guard_interval == GUARD_INTERVAL_AUTO ||
- fep->u.ofdm.constellation == QAM_AUTO ||
- fep->u.ofdm.code_rate_HP == FEC_AUTO) {
+ fep->u.ofdm.guard_interval == GUARD_INTERVAL_AUTO || fep->u.ofdm.constellation == QAM_AUTO || fep->u.ofdm.code_rate_HP == FEC_AUTO) {
int i = 800, found;
dib7000p_autosearch_start(fe, fep);
@@ -1167,9 +1439,9 @@ static int dib7000p_set_frontend(struct dvb_frontend* fe,
found = dib7000p_autosearch_is_irq(fe);
} while (found == 0 && i--);
- dprintk("autosearch returns: %d",found);
+ dprintk("autosearch returns: %d", found);
if (found == 0 || found == 1)
- return 0; // no channel found
+ return 0;
dib7000p_get_frontend(fe, fep);
}
@@ -1177,11 +1449,15 @@ static int dib7000p_set_frontend(struct dvb_frontend* fe,
ret = dib7000p_tune(fe, fep);
/* make this a config parameter */
- dib7000p_set_output_mode(state, state->cfg.output_mode);
- return ret;
+ if (state->version == SOC7090)
+ dib7090_set_output_mode(fe, state->cfg.output_mode);
+ else
+ dib7000p_set_output_mode(state, state->cfg.output_mode);
+
+ return ret;
}
-static int dib7000p_read_status(struct dvb_frontend *fe, fe_status_t *stat)
+static int dib7000p_read_status(struct dvb_frontend *fe, fe_status_t * stat)
{
struct dib7000p_state *state = fe->demodulator_priv;
u16 lock = dib7000p_read_word(state, 509);
@@ -1196,27 +1472,27 @@ static int dib7000p_read_status(struct dvb_frontend *fe, fe_status_t *stat)
*stat |= FE_HAS_VITERBI;
if (lock & 0x0010)
*stat |= FE_HAS_SYNC;
- if ((lock & 0x0038) == 0x38)
+ if ((lock & 0x0038) == 0x38)
*stat |= FE_HAS_LOCK;
return 0;
}
-static int dib7000p_read_ber(struct dvb_frontend *fe, u32 *ber)
+static int dib7000p_read_ber(struct dvb_frontend *fe, u32 * ber)
{
struct dib7000p_state *state = fe->demodulator_priv;
*ber = (dib7000p_read_word(state, 500) << 16) | dib7000p_read_word(state, 501);
return 0;
}
-static int dib7000p_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
+static int dib7000p_read_unc_blocks(struct dvb_frontend *fe, u32 * unc)
{
struct dib7000p_state *state = fe->demodulator_priv;
*unc = dib7000p_read_word(state, 506);
return 0;
}
-static int dib7000p_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
+static int dib7000p_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
{
struct dib7000p_state *state = fe->demodulator_priv;
u16 val = dib7000p_read_word(state, 394);
@@ -1224,7 +1500,7 @@ static int dib7000p_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
return 0;
}
-static int dib7000p_read_snr(struct dvb_frontend* fe, u16 *snr)
+static int dib7000p_read_snr(struct dvb_frontend *fe, u16 * snr)
{
struct dib7000p_state *state = fe->demodulator_priv;
u16 val;
@@ -1240,19 +1516,17 @@ static int dib7000p_read_snr(struct dvb_frontend* fe, u16 *snr)
noise_exp -= 0x40;
signal_mant = (val >> 6) & 0xFF;
- signal_exp = (val & 0x3F);
+ signal_exp = (val & 0x3F);
if ((signal_exp & 0x20) != 0)
signal_exp -= 0x40;
if (signal_mant != 0)
- result = intlog10(2) * 10 * signal_exp + 10 *
- intlog10(signal_mant);
+ result = intlog10(2) * 10 * signal_exp + 10 * intlog10(signal_mant);
else
result = intlog10(2) * 10 * signal_exp - 100;
if (noise_mant != 0)
- result -= intlog10(2) * 10 * noise_exp + 10 *
- intlog10(noise_mant);
+ result -= intlog10(2) * 10 * noise_exp + 10 * intlog10(noise_mant);
else
result -= intlog10(2) * 10 * noise_exp - 100;
@@ -1260,7 +1534,7 @@ static int dib7000p_read_snr(struct dvb_frontend* fe, u16 *snr)
return 0;
}
-static int dib7000p_fe_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings *tune)
+static int dib7000p_fe_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *tune)
{
tune->min_delay_ms = 1000;
return 0;
@@ -1270,6 +1544,7 @@ static void dib7000p_release(struct dvb_frontend *demod)
{
struct dib7000p_state *st = demod->demodulator_priv;
dibx000_exit_i2c_master(&st->i2c_master);
+ i2c_del_adapter(&st->dib7090_tuner_adap);
kfree(st);
}
@@ -1277,8 +1552,8 @@ int dib7000pc_detection(struct i2c_adapter *i2c_adap)
{
u8 tx[2], rx[2];
struct i2c_msg msg[2] = {
- { .addr = 18 >> 1, .flags = 0, .buf = tx, .len = 2 },
- { .addr = 18 >> 1, .flags = I2C_M_RD, .buf = rx, .len = 2 },
+ {.addr = 18 >> 1, .flags = 0, .buf = tx, .len = 2},
+ {.addr = 18 >> 1, .flags = I2C_M_RD, .buf = rx, .len = 2},
};
tx[0] = 0x03;
@@ -1303,7 +1578,7 @@ int dib7000pc_detection(struct i2c_adapter *i2c_adap)
}
EXPORT_SYMBOL(dib7000pc_detection);
-struct i2c_adapter * dib7000p_get_i2c_master(struct dvb_frontend *demod, enum dibx000_i2c_interface intf, int gating)
+struct i2c_adapter *dib7000p_get_i2c_master(struct dvb_frontend *demod, enum dibx000_i2c_interface intf, int gating)
{
struct dib7000p_state *st = demod->demodulator_priv;
return dibx000_get_i2c_adapter(&st->i2c_master, intf, gating);
@@ -1312,19 +1587,19 @@ EXPORT_SYMBOL(dib7000p_get_i2c_master);
int dib7000p_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
{
- struct dib7000p_state *state = fe->demodulator_priv;
- u16 val = dib7000p_read_word(state, 235) & 0xffef;
- val |= (onoff & 0x1) << 4;
- dprintk("PID filter enabled %d", onoff);
- return dib7000p_write_word(state, 235, val);
+ struct dib7000p_state *state = fe->demodulator_priv;
+ u16 val = dib7000p_read_word(state, 235) & 0xffef;
+ val |= (onoff & 0x1) << 4;
+ dprintk("PID filter enabled %d", onoff);
+ return dib7000p_write_word(state, 235, val);
}
EXPORT_SYMBOL(dib7000p_pid_filter_ctrl);
int dib7000p_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
{
- struct dib7000p_state *state = fe->demodulator_priv;
- dprintk("PID filter: index %x, PID %d, OnOff %d", id, pid, onoff);
- return dib7000p_write_word(state, 241 + id, onoff ? (1 << 13) | pid : 0);
+ struct dib7000p_state *state = fe->demodulator_priv;
+ dprintk("PID filter: index %x, PID %d, OnOff %d", id, pid, onoff);
+ return dib7000p_write_word(state, 241 + id, onoff ? (1 << 13) | pid : 0);
}
EXPORT_SYMBOL(dib7000p_pid_filter);
@@ -1340,16 +1615,19 @@ int dib7000p_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 defau
dpst->i2c_adap = i2c;
- for (k = no_of_demods-1; k >= 0; k--) {
+ for (k = no_of_demods - 1; k >= 0; k--) {
dpst->cfg = cfg[k];
/* designated i2c address */
- new_addr = (0x40 + k) << 1;
+ if (cfg[k].default_i2c_addr != 0)
+ new_addr = cfg[k].default_i2c_addr + (k << 1);
+ else
+ new_addr = (0x40 + k) << 1;
dpst->i2c_addr = new_addr;
- dib7000p_write_word(dpst, 1287, 0x0003); /* sram lead in, rdy */
+ dib7000p_write_word(dpst, 1287, 0x0003); /* sram lead in, rdy */
if (dib7000p_identify(dpst) != 0) {
dpst->i2c_addr = default_addr;
- dib7000p_write_word(dpst, 1287, 0x0003); /* sram lead in, rdy */
+ dib7000p_write_word(dpst, 1287, 0x0003); /* sram lead in, rdy */
if (dib7000p_identify(dpst) != 0) {
dprintk("DiB7000P #%d: not identified\n", k);
kfree(dpst);
@@ -1368,7 +1646,10 @@ int dib7000p_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 defau
for (k = 0; k < no_of_demods; k++) {
dpst->cfg = cfg[k];
- dpst->i2c_addr = (0x40 + k) << 1;
+ if (cfg[k].default_i2c_addr != 0)
+ dpst->i2c_addr = (cfg[k].default_i2c_addr + k) << 1;
+ else
+ dpst->i2c_addr = (0x40 + k) << 1;
// unforce divstr
dib7000p_write_word(dpst, 1285, dpst->i2c_addr << 2);
@@ -1382,8 +1663,613 @@ int dib7000p_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 defau
}
EXPORT_SYMBOL(dib7000p_i2c_enumeration);
+static const s32 lut_1000ln_mant[] = {
+ 6908, 6956, 7003, 7047, 7090, 7131, 7170, 7208, 7244, 7279, 7313, 7346, 7377, 7408, 7438, 7467, 7495, 7523, 7549, 7575, 7600
+};
+
+static s32 dib7000p_get_adc_power(struct dvb_frontend *fe)
+{
+ struct dib7000p_state *state = fe->demodulator_priv;
+ u32 tmp_val = 0, exp = 0, mant = 0;
+ s32 pow_i;
+ u16 buf[2];
+ u8 ix = 0;
+
+ buf[0] = dib7000p_read_word(state, 0x184);
+ buf[1] = dib7000p_read_word(state, 0x185);
+ pow_i = (buf[0] << 16) | buf[1];
+ dprintk("raw pow_i = %d", pow_i);
+
+ tmp_val = pow_i;
+ while (tmp_val >>= 1)
+ exp++;
+
+ mant = (pow_i * 1000 / (1 << exp));
+ dprintk(" mant = %d exp = %d", mant / 1000, exp);
+
+ ix = (u8) ((mant - 1000) / 100); /* index of the LUT */
+ dprintk(" ix = %d", ix);
+
+ pow_i = (lut_1000ln_mant[ix] + 693 * (exp - 20) - 6908);
+ pow_i = (pow_i << 8) / 1000;
+ dprintk(" pow_i = %d", pow_i);
+
+ return pow_i;
+}
+
+static int map_addr_to_serpar_number(struct i2c_msg *msg)
+{
+ if ((msg->buf[0] <= 15))
+ msg->buf[0] -= 1;
+ else if (msg->buf[0] == 17)
+ msg->buf[0] = 15;
+ else if (msg->buf[0] == 16)
+ msg->buf[0] = 17;
+ else if (msg->buf[0] == 19)
+ msg->buf[0] = 16;
+ else if (msg->buf[0] >= 21 && msg->buf[0] <= 25)
+ msg->buf[0] -= 3;
+ else if (msg->buf[0] == 28)
+ msg->buf[0] = 23;
+ else
+ return -EINVAL;
+ return 0;
+}
+
+static int w7090p_tuner_write_serpar(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num)
+{
+ struct dib7000p_state *state = i2c_get_adapdata(i2c_adap);
+ u8 n_overflow = 1;
+ u16 i = 1000;
+ u16 serpar_num = msg[0].buf[0];
+
+ while (n_overflow == 1 && i) {
+ n_overflow = (dib7000p_read_word(state, 1984) >> 1) & 0x1;
+ i--;
+ if (i == 0)
+ dprintk("Tuner ITF: write busy (overflow)");
+ }
+ dib7000p_write_word(state, 1985, (1 << 6) | (serpar_num & 0x3f));
+ dib7000p_write_word(state, 1986, (msg[0].buf[1] << 8) | msg[0].buf[2]);
+
+ return num;
+}
+
+static int w7090p_tuner_read_serpar(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num)
+{
+ struct dib7000p_state *state = i2c_get_adapdata(i2c_adap);
+ u8 n_overflow = 1, n_empty = 1;
+ u16 i = 1000;
+ u16 serpar_num = msg[0].buf[0];
+ u16 read_word;
+
+ while (n_overflow == 1 && i) {
+ n_overflow = (dib7000p_read_word(state, 1984) >> 1) & 0x1;
+ i--;
+ if (i == 0)
+ dprintk("TunerITF: read busy (overflow)");
+ }
+ dib7000p_write_word(state, 1985, (0 << 6) | (serpar_num & 0x3f));
+
+ i = 1000;
+ while (n_empty == 1 && i) {
+ n_empty = dib7000p_read_word(state, 1984) & 0x1;
+ i--;
+ if (i == 0)
+ dprintk("TunerITF: read busy (empty)");
+ }
+ read_word = dib7000p_read_word(state, 1987);
+ msg[1].buf[0] = (read_word >> 8) & 0xff;
+ msg[1].buf[1] = (read_word) & 0xff;
+
+ return num;
+}
+
+static int w7090p_tuner_rw_serpar(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num)
+{
+ if (map_addr_to_serpar_number(&msg[0]) == 0) { /* else = Tuner regs to ignore : DIG_CFG, CTRL_RF_LT, PLL_CFG, PWM1_REG, ADCCLK, DIG_CFG_3; SLEEP_EN... */
+ if (num == 1) { /* write */
+ return w7090p_tuner_write_serpar(i2c_adap, msg, 1);
+ } else { /* read */
+ return w7090p_tuner_read_serpar(i2c_adap, msg, 2);
+ }
+ }
+ return num;
+}
+
+int dib7090p_rw_on_apb(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num, u16 apb_address)
+{
+ struct dib7000p_state *state = i2c_get_adapdata(i2c_adap);
+ u16 word;
+
+ if (num == 1) { /* write */
+ dib7000p_write_word(state, apb_address, ((msg[0].buf[1] << 8) | (msg[0].buf[2])));
+ } else {
+ word = dib7000p_read_word(state, apb_address);
+ msg[1].buf[0] = (word >> 8) & 0xff;
+ msg[1].buf[1] = (word) & 0xff;
+ }
+
+ return num;
+}
+
+static int dib7090_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num)
+{
+ struct dib7000p_state *state = i2c_get_adapdata(i2c_adap);
+
+ u16 apb_address = 0, word;
+ int i = 0;
+ switch (msg[0].buf[0]) {
+ case 0x12:
+ apb_address = 1920;
+ break;
+ case 0x14:
+ apb_address = 1921;
+ break;
+ case 0x24:
+ apb_address = 1922;
+ break;
+ case 0x1a:
+ apb_address = 1923;
+ break;
+ case 0x22:
+ apb_address = 1924;
+ break;
+ case 0x33:
+ apb_address = 1926;
+ break;
+ case 0x34:
+ apb_address = 1927;
+ break;
+ case 0x35:
+ apb_address = 1928;
+ break;
+ case 0x36:
+ apb_address = 1929;
+ break;
+ case 0x37:
+ apb_address = 1930;
+ break;
+ case 0x38:
+ apb_address = 1931;
+ break;
+ case 0x39:
+ apb_address = 1932;
+ break;
+ case 0x2a:
+ apb_address = 1935;
+ break;
+ case 0x2b:
+ apb_address = 1936;
+ break;
+ case 0x2c:
+ apb_address = 1937;
+ break;
+ case 0x2d:
+ apb_address = 1938;
+ break;
+ case 0x2e:
+ apb_address = 1939;
+ break;
+ case 0x2f:
+ apb_address = 1940;
+ break;
+ case 0x30:
+ apb_address = 1941;
+ break;
+ case 0x31:
+ apb_address = 1942;
+ break;
+ case 0x32:
+ apb_address = 1943;
+ break;
+ case 0x3e:
+ apb_address = 1944;
+ break;
+ case 0x3f:
+ apb_address = 1945;
+ break;
+ case 0x40:
+ apb_address = 1948;
+ break;
+ case 0x25:
+ apb_address = 914;
+ break;
+ case 0x26:
+ apb_address = 915;
+ break;
+ case 0x27:
+ apb_address = 916;
+ break;
+ case 0x28:
+ apb_address = 917;
+ break;
+ case 0x1d:
+ i = ((dib7000p_read_word(state, 72) >> 12) & 0x3);
+ word = dib7000p_read_word(state, 384 + i);
+ msg[1].buf[0] = (word >> 8) & 0xff;
+ msg[1].buf[1] = (word) & 0xff;
+ return num;
+ case 0x1f:
+ if (num == 1) { /* write */
+ word = (u16) ((msg[0].buf[1] << 8) | msg[0].buf[2]);
+ word &= 0x3;
+ word = (dib7000p_read_word(state, 72) & ~(3 << 12)) | (word << 12);
+ dib7000p_write_word(state, 72, word); /* Set the proper input */
+ return num;
+ }
+ }
+
+ if (apb_address != 0) /* R/W acces via APB */
+ return dib7090p_rw_on_apb(i2c_adap, msg, num, apb_address);
+ else /* R/W access via SERPAR */
+ return w7090p_tuner_rw_serpar(i2c_adap, msg, num);
+
+ return 0;
+}
+
+static u32 dib7000p_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C;
+}
+
+static struct i2c_algorithm dib7090_tuner_xfer_algo = {
+ .master_xfer = dib7090_tuner_xfer,
+ .functionality = dib7000p_i2c_func,
+};
+
+struct i2c_adapter *dib7090_get_i2c_tuner(struct dvb_frontend *fe)
+{
+ struct dib7000p_state *st = fe->demodulator_priv;
+ return &st->dib7090_tuner_adap;
+}
+EXPORT_SYMBOL(dib7090_get_i2c_tuner);
+
+static int dib7090_host_bus_drive(struct dib7000p_state *state, u8 drive)
+{
+ u16 reg;
+
+ /* drive host bus 2, 3, 4 */
+ reg = dib7000p_read_word(state, 1798) & ~((0x7) | (0x7 << 6) | (0x7 << 12));
+ reg |= (drive << 12) | (drive << 6) | drive;
+ dib7000p_write_word(state, 1798, reg);
+
+ /* drive host bus 5,6 */
+ reg = dib7000p_read_word(state, 1799) & ~((0x7 << 2) | (0x7 << 8));
+ reg |= (drive << 8) | (drive << 2);
+ dib7000p_write_word(state, 1799, reg);
+
+ /* drive host bus 7, 8, 9 */
+ reg = dib7000p_read_word(state, 1800) & ~((0x7) | (0x7 << 6) | (0x7 << 12));
+ reg |= (drive << 12) | (drive << 6) | drive;
+ dib7000p_write_word(state, 1800, reg);
+
+ /* drive host bus 10, 11 */
+ reg = dib7000p_read_word(state, 1801) & ~((0x7 << 2) | (0x7 << 8));
+ reg |= (drive << 8) | (drive << 2);
+ dib7000p_write_word(state, 1801, reg);
+
+ /* drive host bus 12, 13, 14 */
+ reg = dib7000p_read_word(state, 1802) & ~((0x7) | (0x7 << 6) | (0x7 << 12));
+ reg |= (drive << 12) | (drive << 6) | drive;
+ dib7000p_write_word(state, 1802, reg);
+
+ return 0;
+}
+
+static u32 dib7090_calcSyncFreq(u32 P_Kin, u32 P_Kout, u32 insertExtSynchro, u32 syncSize)
+{
+ u32 quantif = 3;
+ u32 nom = (insertExtSynchro * P_Kin + syncSize);
+ u32 denom = P_Kout;
+ u32 syncFreq = ((nom << quantif) / denom);
+
+ if ((syncFreq & ((1 << quantif) - 1)) != 0)
+ syncFreq = (syncFreq >> quantif) + 1;
+ else
+ syncFreq = (syncFreq >> quantif);
+
+ if (syncFreq != 0)
+ syncFreq = syncFreq - 1;
+
+ return syncFreq;
+}
+
+static int dib7090_cfg_DibTx(struct dib7000p_state *state, u32 P_Kin, u32 P_Kout, u32 insertExtSynchro, u32 synchroMode, u32 syncWord, u32 syncSize)
+{
+ u8 index_buf;
+ u16 rx_copy_buf[22];
+
+ dprintk("Configure DibStream Tx");
+ for (index_buf = 0; index_buf < 22; index_buf++)
+ rx_copy_buf[index_buf] = dib7000p_read_word(state, 1536+index_buf);
+
+ dib7000p_write_word(state, 1615, 1);
+ dib7000p_write_word(state, 1603, P_Kin);
+ dib7000p_write_word(state, 1605, P_Kout);
+ dib7000p_write_word(state, 1606, insertExtSynchro);
+ dib7000p_write_word(state, 1608, synchroMode);
+ dib7000p_write_word(state, 1609, (syncWord >> 16) & 0xffff);
+ dib7000p_write_word(state, 1610, syncWord & 0xffff);
+ dib7000p_write_word(state, 1612, syncSize);
+ dib7000p_write_word(state, 1615, 0);
+
+ for (index_buf = 0; index_buf < 22; index_buf++)
+ dib7000p_write_word(state, 1536+index_buf, rx_copy_buf[index_buf]);
+
+ return 0;
+}
+
+static int dib7090_cfg_DibRx(struct dib7000p_state *state, u32 P_Kin, u32 P_Kout, u32 synchroMode, u32 insertExtSynchro, u32 syncWord, u32 syncSize,
+ u32 dataOutRate)
+{
+ u32 syncFreq;
+
+ dprintk("Configure DibStream Rx");
+ if ((P_Kin != 0) && (P_Kout != 0)) {
+ syncFreq = dib7090_calcSyncFreq(P_Kin, P_Kout, insertExtSynchro, syncSize);
+ dib7000p_write_word(state, 1542, syncFreq);
+ }
+ dib7000p_write_word(state, 1554, 1);
+ dib7000p_write_word(state, 1536, P_Kin);
+ dib7000p_write_word(state, 1537, P_Kout);
+ dib7000p_write_word(state, 1539, synchroMode);
+ dib7000p_write_word(state, 1540, (syncWord >> 16) & 0xffff);
+ dib7000p_write_word(state, 1541, syncWord & 0xffff);
+ dib7000p_write_word(state, 1543, syncSize);
+ dib7000p_write_word(state, 1544, dataOutRate);
+ dib7000p_write_word(state, 1554, 0);
+
+ return 0;
+}
+
+static int dib7090_enDivOnHostBus(struct dib7000p_state *state)
+{
+ u16 reg;
+
+ dprintk("Enable Diversity on host bus");
+ reg = (1 << 8) | (1 << 5);
+ dib7000p_write_word(state, 1288, reg);
+
+ return dib7090_cfg_DibTx(state, 5, 5, 0, 0, 0, 0);
+}
+
+static int dib7090_enAdcOnHostBus(struct dib7000p_state *state)
+{
+ u16 reg;
+
+ dprintk("Enable ADC on host bus");
+ reg = (1 << 7) | (1 << 5);
+ dib7000p_write_word(state, 1288, reg);
+
+ return dib7090_cfg_DibTx(state, 20, 5, 10, 0, 0, 0);
+}
+
+static int dib7090_enMpegOnHostBus(struct dib7000p_state *state)
+{
+ u16 reg;
+
+ dprintk("Enable Mpeg on host bus");
+ reg = (1 << 9) | (1 << 5);
+ dib7000p_write_word(state, 1288, reg);
+
+ return dib7090_cfg_DibTx(state, 8, 5, 0, 0, 0, 0);
+}
+
+static int dib7090_enMpegInput(struct dib7000p_state *state)
+{
+ dprintk("Enable Mpeg input");
+ return dib7090_cfg_DibRx(state, 8, 5, 0, 0, 0, 8, 0); /*outputRate = 8 */
+}
+
+static int dib7090_enMpegMux(struct dib7000p_state *state, u16 pulseWidth, u16 enSerialMode, u16 enSerialClkDiv2)
+{
+ u16 reg = (1 << 7) | ((pulseWidth & 0x1f) << 2) | ((enSerialMode & 0x1) << 1) | (enSerialClkDiv2 & 0x1);
+
+ dprintk("Enable Mpeg mux");
+ dib7000p_write_word(state, 1287, reg);
+
+ reg &= ~(1 << 7);
+ dib7000p_write_word(state, 1287, reg);
+
+ reg = (1 << 4);
+ dib7000p_write_word(state, 1288, reg);
+
+ return 0;
+}
+
+static int dib7090_disableMpegMux(struct dib7000p_state *state)
+{
+ u16 reg;
+
+ dprintk("Disable Mpeg mux");
+ dib7000p_write_word(state, 1288, 0);
+
+ reg = dib7000p_read_word(state, 1287);
+ reg &= ~(1 << 7);
+ dib7000p_write_word(state, 1287, reg);
+
+ return 0;
+}
+
+static int dib7090_set_input_mode(struct dvb_frontend *fe, int mode)
+{
+ struct dib7000p_state *state = fe->demodulator_priv;
+
+ switch (mode) {
+ case INPUT_MODE_DIVERSITY:
+ dprintk("Enable diversity INPUT");
+ dib7090_cfg_DibRx(state, 5, 5, 0, 0, 0, 0, 0);
+ break;
+ case INPUT_MODE_MPEG:
+ dprintk("Enable Mpeg INPUT");
+ dib7090_cfg_DibRx(state, 8, 5, 0, 0, 0, 8, 0); /*outputRate = 8 */
+ break;
+ case INPUT_MODE_OFF:
+ default:
+ dprintk("Disable INPUT");
+ dib7090_cfg_DibRx(state, 0, 0, 0, 0, 0, 0, 0);
+ break;
+ }
+ return 0;
+}
+
+static int dib7090_set_diversity_in(struct dvb_frontend *fe, int onoff)
+{
+ switch (onoff) {
+ case 0: /* only use the internal way - not the diversity input */
+ dib7090_set_input_mode(fe, INPUT_MODE_MPEG);
+ break;
+ case 1: /* both ways */
+ case 2: /* only the diversity input */
+ dib7090_set_input_mode(fe, INPUT_MODE_DIVERSITY);
+ break;
+ }
+
+ return 0;
+}
+
+static int dib7090_set_output_mode(struct dvb_frontend *fe, int mode)
+{
+ struct dib7000p_state *state = fe->demodulator_priv;
+
+ u16 outreg, smo_mode, fifo_threshold;
+ u8 prefer_mpeg_mux_use = 1;
+ int ret = 0;
+
+ dib7090_host_bus_drive(state, 1);
+
+ fifo_threshold = 1792;
+ smo_mode = (dib7000p_read_word(state, 235) & 0x0050) | (1 << 1);
+ outreg = dib7000p_read_word(state, 1286) & ~((1 << 10) | (0x7 << 6) | (1 << 1));
+
+ switch (mode) {
+ case OUTMODE_HIGH_Z:
+ outreg = 0;
+ break;
+
+ case OUTMODE_MPEG2_SERIAL:
+ if (prefer_mpeg_mux_use) {
+ dprintk("Sip 7090P setting output mode TS_SERIAL using Mpeg Mux");
+ dib7090_enMpegOnHostBus(state);
+ dib7090_enMpegInput(state);
+ if (state->cfg.enMpegOutput == 1)
+ dib7090_enMpegMux(state, 3, 1, 1);
+
+ } else { /* Use Smooth block */
+ dprintk("Sip 7090P setting output mode TS_SERIAL using Smooth bloc");
+ dib7090_disableMpegMux(state);
+ dib7000p_write_word(state, 1288, (1 << 6));
+ outreg |= (2 << 6) | (0 << 1);
+ }
+ break;
+
+ case OUTMODE_MPEG2_PAR_GATED_CLK:
+ if (prefer_mpeg_mux_use) {
+ dprintk("Sip 7090P setting output mode TS_PARALLEL_GATED using Mpeg Mux");
+ dib7090_enMpegOnHostBus(state);
+ dib7090_enMpegInput(state);
+ if (state->cfg.enMpegOutput == 1)
+ dib7090_enMpegMux(state, 2, 0, 0);
+ } else { /* Use Smooth block */
+ dprintk("Sip 7090P setting output mode TS_PARALLEL_GATED using Smooth block");
+ dib7090_disableMpegMux(state);
+ dib7000p_write_word(state, 1288, (1 << 6));
+ outreg |= (0 << 6);
+ }
+ break;
+
+ case OUTMODE_MPEG2_PAR_CONT_CLK: /* Using Smooth block only */
+ dprintk("Sip 7090P setting output mode TS_PARALLEL_CONT using Smooth block");
+ dib7090_disableMpegMux(state);
+ dib7000p_write_word(state, 1288, (1 << 6));
+ outreg |= (1 << 6);
+ break;
+
+ case OUTMODE_MPEG2_FIFO: /* Using Smooth block because not supported by new Mpeg Mux bloc */
+ dprintk("Sip 7090P setting output mode TS_FIFO using Smooth block");
+ dib7090_disableMpegMux(state);
+ dib7000p_write_word(state, 1288, (1 << 6));
+ outreg |= (5 << 6);
+ smo_mode |= (3 << 1);
+ fifo_threshold = 512;
+ break;
+
+ case OUTMODE_DIVERSITY:
+ dprintk("Sip 7090P setting output mode MODE_DIVERSITY");
+ dib7090_disableMpegMux(state);
+ dib7090_enDivOnHostBus(state);
+ break;
+
+ case OUTMODE_ANALOG_ADC:
+ dprintk("Sip 7090P setting output mode MODE_ANALOG_ADC");
+ dib7090_enAdcOnHostBus(state);
+ break;
+ }
+
+ if (state->cfg.output_mpeg2_in_188_bytes)
+ smo_mode |= (1 << 5);
+
+ ret |= dib7000p_write_word(state, 235, smo_mode);
+ ret |= dib7000p_write_word(state, 236, fifo_threshold); /* synchronous fread */
+ ret |= dib7000p_write_word(state, 1286, outreg | (1 << 10)); /* allways set Dout active = 1 !!! */
+
+ return ret;
+}
+
+int dib7090_tuner_sleep(struct dvb_frontend *fe, int onoff)
+{
+ struct dib7000p_state *state = fe->demodulator_priv;
+ u16 en_cur_state;
+
+ dprintk("sleep dib7090: %d", onoff);
+
+ en_cur_state = dib7000p_read_word(state, 1922);
+
+ if (en_cur_state > 0xff)
+ state->tuner_enable = en_cur_state;
+
+ if (onoff)
+ en_cur_state &= 0x00ff;
+ else {
+ if (state->tuner_enable != 0)
+ en_cur_state = state->tuner_enable;
+ }
+
+ dib7000p_write_word(state, 1922, en_cur_state);
+
+ return 0;
+}
+EXPORT_SYMBOL(dib7090_tuner_sleep);
+
+int dib7090_agc_restart(struct dvb_frontend *fe, u8 restart)
+{
+ dprintk("AGC restart callback: %d", restart);
+ return 0;
+}
+EXPORT_SYMBOL(dib7090_agc_restart);
+
+int dib7090_get_adc_power(struct dvb_frontend *fe)
+{
+ return dib7000p_get_adc_power(fe);
+}
+EXPORT_SYMBOL(dib7090_get_adc_power);
+
+int dib7090_slave_reset(struct dvb_frontend *fe)
+{
+ struct dib7000p_state *state = fe->demodulator_priv;
+ u16 reg;
+
+ reg = dib7000p_read_word(state, 1794);
+ dib7000p_write_word(state, 1794, reg | (4 << 12));
+
+ dib7000p_write_word(state, 1032, 0xffff);
+ return 0;
+}
+EXPORT_SYMBOL(dib7090_slave_reset);
+
static struct dvb_frontend_ops dib7000p_ops;
-struct dvb_frontend * dib7000p_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg)
+struct dvb_frontend *dib7000p_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg)
{
struct dvb_frontend *demod;
struct dib7000p_state *st;
@@ -1400,28 +2286,41 @@ struct dvb_frontend * dib7000p_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr,
/* Ensure the output mode remains at the previous default if it's
* not specifically set by the caller.
*/
- if ((st->cfg.output_mode != OUTMODE_MPEG2_SERIAL) &&
- (st->cfg.output_mode != OUTMODE_MPEG2_PAR_GATED_CLK))
+ if ((st->cfg.output_mode != OUTMODE_MPEG2_SERIAL) && (st->cfg.output_mode != OUTMODE_MPEG2_PAR_GATED_CLK))
st->cfg.output_mode = OUTMODE_MPEG2_FIFO;
- demod = &st->demod;
+ demod = &st->demod;
demod->demodulator_priv = st;
memcpy(&st->demod.ops, &dib7000p_ops, sizeof(struct dvb_frontend_ops));
- dib7000p_write_word(st, 1287, 0x0003); /* sram lead in, rdy */
+ dib7000p_write_word(st, 1287, 0x0003); /* sram lead in, rdy */
if (dib7000p_identify(st) != 0)
goto error;
+ st->version = dib7000p_read_word(st, 897);
+
/* FIXME: make sure the dev.parent field is initialized, or else
- request_firmware() will hit an OOPS (this should be moved somewhere
- more common) */
- st->i2c_master.gated_tuner_i2c_adap.dev.parent = i2c_adap->dev.parent;
+ request_firmware() will hit an OOPS (this should be moved somewhere
+ more common) */
dibx000_init_i2c_master(&st->i2c_master, DIB7000P, st->i2c_adap, st->i2c_addr);
+ /* init 7090 tuner adapter */
+ strncpy(st->dib7090_tuner_adap.name, "DiB7090 tuner interface", sizeof(st->dib7090_tuner_adap.name));
+ st->dib7090_tuner_adap.algo = &dib7090_tuner_xfer_algo;
+ st->dib7090_tuner_adap.algo_data = NULL;
+ st->dib7090_tuner_adap.dev.parent = st->i2c_adap->dev.parent;
+ i2c_set_adapdata(&st->dib7090_tuner_adap, st);
+ i2c_add_adapter(&st->dib7090_tuner_adap);
+
dib7000p_demod_reset(st);
+ if (st->version == SOC7090) {
+ dib7090_set_output_mode(demod, st->cfg.output_mode);
+ dib7090_set_diversity_in(demod, 0);
+ }
+
return demod;
error:
@@ -1432,37 +2331,35 @@ EXPORT_SYMBOL(dib7000p_attach);
static struct dvb_frontend_ops dib7000p_ops = {
.info = {
- .name = "DiBcom 7000PC",
- .type = FE_OFDM,
- .frequency_min = 44250000,
- .frequency_max = 867250000,
- .frequency_stepsize = 62500,
- .caps = FE_CAN_INVERSION_AUTO |
- FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
- FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
- FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
- FE_CAN_TRANSMISSION_MODE_AUTO |
- FE_CAN_GUARD_INTERVAL_AUTO |
- FE_CAN_RECOVER |
- FE_CAN_HIERARCHY_AUTO,
- },
-
- .release = dib7000p_release,
-
- .init = dib7000p_wakeup,
- .sleep = dib7000p_sleep,
-
- .set_frontend = dib7000p_set_frontend,
- .get_tune_settings = dib7000p_fe_get_tune_settings,
- .get_frontend = dib7000p_get_frontend,
-
- .read_status = dib7000p_read_status,
- .read_ber = dib7000p_read_ber,
+ .name = "DiBcom 7000PC",
+ .type = FE_OFDM,
+ .frequency_min = 44250000,
+ .frequency_max = 867250000,
+ .frequency_stepsize = 62500,
+ .caps = FE_CAN_INVERSION_AUTO |
+ FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
+ FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_RECOVER | FE_CAN_HIERARCHY_AUTO,
+ },
+
+ .release = dib7000p_release,
+
+ .init = dib7000p_wakeup,
+ .sleep = dib7000p_sleep,
+
+ .set_frontend = dib7000p_set_frontend,
+ .get_tune_settings = dib7000p_fe_get_tune_settings,
+ .get_frontend = dib7000p_get_frontend,
+
+ .read_status = dib7000p_read_status,
+ .read_ber = dib7000p_read_ber,
.read_signal_strength = dib7000p_read_signal_strength,
- .read_snr = dib7000p_read_snr,
- .read_ucblocks = dib7000p_read_unc_blocks,
+ .read_snr = dib7000p_read_snr,
+ .read_ucblocks = dib7000p_read_unc_blocks,
};
+MODULE_AUTHOR("Olivier Grenie <ogrenie@dibcom.fr>");
MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
MODULE_DESCRIPTION("Driver for the DiBcom 7000PC COFDM demodulator");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/dib7000p.h b/drivers/media/dvb/frontends/dib7000p.h
index da17345bf5bd..0179f9474bac 100644
--- a/drivers/media/dvb/frontends/dib7000p.h
+++ b/drivers/media/dvb/frontends/dib7000p.h
@@ -33,59 +33,54 @@ struct dib7000p_config {
int (*agc_control) (struct dvb_frontend *, u8 before);
u8 output_mode;
- u8 disable_sample_and_hold : 1;
+ u8 disable_sample_and_hold:1;
- u8 enable_current_mirror : 1;
- u8 diversity_delay;
+ u8 enable_current_mirror:1;
+ u16 diversity_delay;
+ u8 default_i2c_addr;
+ u8 enMpegOutput:1;
};
#define DEFAULT_DIB7000P_I2C_ADDRESS 18
#if defined(CONFIG_DVB_DIB7000P) || (defined(CONFIG_DVB_DIB7000P_MODULE) && \
- defined(MODULE))
-extern struct dvb_frontend *dib7000p_attach(struct i2c_adapter *i2c_adap,
- u8 i2c_addr,
- struct dib7000p_config *cfg);
-extern struct i2c_adapter *dib7000p_get_i2c_master(struct dvb_frontend *,
- enum dibx000_i2c_interface,
- int);
-extern int dib7000p_i2c_enumeration(struct i2c_adapter *i2c,
- int no_of_demods, u8 default_addr,
- struct dib7000p_config cfg[]);
+ defined(MODULE))
+extern struct dvb_frontend *dib7000p_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg);
+extern struct i2c_adapter *dib7000p_get_i2c_master(struct dvb_frontend *, enum dibx000_i2c_interface, int);
+extern int dib7000p_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 default_addr, struct dib7000p_config cfg[]);
extern int dib7000p_set_gpio(struct dvb_frontend *, u8 num, u8 dir, u8 val);
extern int dib7000p_set_wbd_ref(struct dvb_frontend *, u16 value);
extern int dib7000pc_detection(struct i2c_adapter *i2c_adap);
extern int dib7000p_pid_filter(struct dvb_frontend *, u8 id, u16 pid, u8 onoff);
extern int dib7000p_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff);
+extern int dib7000p_update_pll(struct dvb_frontend *fe, struct dibx000_bandwidth_config *bw);
+extern u32 dib7000p_ctrl_timf(struct dvb_frontend *fe, u8 op, u32 timf);
+extern int dib7090_agc_restart(struct dvb_frontend *fe, u8 restart);
+extern int dib7090_tuner_sleep(struct dvb_frontend *fe, int onoff);
+extern int dib7090_get_adc_power(struct dvb_frontend *fe);
+extern struct i2c_adapter *dib7090_get_i2c_tuner(struct dvb_frontend *fe);
+extern int dib7090_slave_reset(struct dvb_frontend *fe);
#else
-static inline
-struct dvb_frontend *dib7000p_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr,
- struct dib7000p_config *cfg)
+static inline struct dvb_frontend *dib7000p_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return NULL;
}
-static inline
-struct i2c_adapter *dib7000p_get_i2c_master(struct dvb_frontend *fe,
- enum dibx000_i2c_interface i,
- int x)
+static inline struct i2c_adapter *dib7000p_get_i2c_master(struct dvb_frontend *fe, enum dibx000_i2c_interface i, int x)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return NULL;
}
-static inline int dib7000p_i2c_enumeration(struct i2c_adapter *i2c,
- int no_of_demods, u8 default_addr,
- struct dib7000p_config cfg[])
+static inline int dib7000p_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 default_addr, struct dib7000p_config cfg[])
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return -ENODEV;
}
-static inline int dib7000p_set_gpio(struct dvb_frontend *fe,
- u8 num, u8 dir, u8 val)
+static inline int dib7000p_set_gpio(struct dvb_frontend *fe, u8 num, u8 dir, u8 val)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return -ENODEV;
@@ -102,16 +97,59 @@ static inline int dib7000pc_detection(struct i2c_adapter *i2c_adap)
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return -ENODEV;
}
+
static inline int dib7000p_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
- return -ENODEV;
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return -ENODEV;
}
static inline int dib7000p_pid_filter_ctrl(struct dvb_frontend *fe, uint8_t onoff)
{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
- return -ENODEV;
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return -ENODEV;
+}
+
+static inline int dib7000p_update_pll(struct dvb_frontend *fe, struct dibx000_bandwidth_config *bw)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return -ENODEV;
+}
+
+static inline u32 dib7000p_ctrl_timf(struct dvb_frontend *fe, u8 op, u32 timf)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return 0;
+}
+
+static inline int dib7090_agc_restart(struct dvb_frontend *fe, u8 restart)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return -ENODEV;
+}
+
+static inline int dib7090_tuner_sleep(struct dvb_frontend *fe, int onoff)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return -ENODEV;
+}
+
+static inline int dib7090_get_adc_power(struct dvb_frontend *fe)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return -ENODEV;
+}
+
+static inline struct i2c_adapter *dib7090_get_i2c_tuner(struct dvb_frontend *fe)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+
+static inline int dib7090_slave_reset(struct dvb_frontend *fe)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return -ENODEV;
}
#endif
diff --git a/drivers/media/dvb/frontends/dib8000.c b/drivers/media/dvb/frontends/dib8000.c
index df17b91b3250..3e20aa8db23b 100644
--- a/drivers/media/dvb/frontends/dib8000.c
+++ b/drivers/media/dvb/frontends/dib8000.c
@@ -22,6 +22,7 @@
#define LAYER_C 3
#define FE_CALLBACK_TIME_NEVER 0xffffffff
+#define MAX_NUMBER_OF_FRONTENDS 6
static int debug;
module_param(debug, int, 0644);
@@ -37,7 +38,6 @@ struct i2c_device {
};
struct dib8000_state {
- struct dvb_frontend fe;
struct dib8000_config cfg;
struct i2c_device i2c;
@@ -68,6 +68,8 @@ struct dib8000_state {
u8 isdbt_cfg_loaded;
enum frontend_tune_state tune_state;
u32 status;
+
+ struct dvb_frontend *fe[MAX_NUMBER_OF_FRONTENDS];
};
enum dib8000_power_mode {
@@ -122,111 +124,111 @@ static int dib8000_write_word(struct dib8000_state *state, u16 reg, u16 val)
return dib8000_i2c_write16(&state->i2c, reg, val);
}
-static const int16_t coeff_2k_sb_1seg_dqpsk[8] = {
+static const s16 coeff_2k_sb_1seg_dqpsk[8] = {
(769 << 5) | 0x0a, (745 << 5) | 0x03, (595 << 5) | 0x0d, (769 << 5) | 0x0a, (920 << 5) | 0x09, (784 << 5) | 0x02, (519 << 5) | 0x0c,
- (920 << 5) | 0x09
+ (920 << 5) | 0x09
};
-static const int16_t coeff_2k_sb_1seg[8] = {
+static const s16 coeff_2k_sb_1seg[8] = {
(692 << 5) | 0x0b, (683 << 5) | 0x01, (519 << 5) | 0x09, (692 << 5) | 0x0b, 0 | 0x1f, 0 | 0x1f, 0 | 0x1f, 0 | 0x1f
};
-static const int16_t coeff_2k_sb_3seg_0dqpsk_1dqpsk[8] = {
+static const s16 coeff_2k_sb_3seg_0dqpsk_1dqpsk[8] = {
(832 << 5) | 0x10, (912 << 5) | 0x05, (900 << 5) | 0x12, (832 << 5) | 0x10, (-931 << 5) | 0x0f, (912 << 5) | 0x04, (807 << 5) | 0x11,
- (-931 << 5) | 0x0f
+ (-931 << 5) | 0x0f
};
-static const int16_t coeff_2k_sb_3seg_0dqpsk[8] = {
+static const s16 coeff_2k_sb_3seg_0dqpsk[8] = {
(622 << 5) | 0x0c, (941 << 5) | 0x04, (796 << 5) | 0x10, (622 << 5) | 0x0c, (982 << 5) | 0x0c, (519 << 5) | 0x02, (572 << 5) | 0x0e,
- (982 << 5) | 0x0c
+ (982 << 5) | 0x0c
};
-static const int16_t coeff_2k_sb_3seg_1dqpsk[8] = {
+static const s16 coeff_2k_sb_3seg_1dqpsk[8] = {
(699 << 5) | 0x14, (607 << 5) | 0x04, (944 << 5) | 0x13, (699 << 5) | 0x14, (-720 << 5) | 0x0d, (640 << 5) | 0x03, (866 << 5) | 0x12,
- (-720 << 5) | 0x0d
+ (-720 << 5) | 0x0d
};
-static const int16_t coeff_2k_sb_3seg[8] = {
+static const s16 coeff_2k_sb_3seg[8] = {
(664 << 5) | 0x0c, (925 << 5) | 0x03, (937 << 5) | 0x10, (664 << 5) | 0x0c, (-610 << 5) | 0x0a, (697 << 5) | 0x01, (836 << 5) | 0x0e,
- (-610 << 5) | 0x0a
+ (-610 << 5) | 0x0a
};
-static const int16_t coeff_4k_sb_1seg_dqpsk[8] = {
+static const s16 coeff_4k_sb_1seg_dqpsk[8] = {
(-955 << 5) | 0x0e, (687 << 5) | 0x04, (818 << 5) | 0x10, (-955 << 5) | 0x0e, (-922 << 5) | 0x0d, (750 << 5) | 0x03, (665 << 5) | 0x0f,
- (-922 << 5) | 0x0d
+ (-922 << 5) | 0x0d
};
-static const int16_t coeff_4k_sb_1seg[8] = {
+static const s16 coeff_4k_sb_1seg[8] = {
(638 << 5) | 0x0d, (683 << 5) | 0x02, (638 << 5) | 0x0d, (638 << 5) | 0x0d, (-655 << 5) | 0x0a, (517 << 5) | 0x00, (698 << 5) | 0x0d,
- (-655 << 5) | 0x0a
+ (-655 << 5) | 0x0a
};
-static const int16_t coeff_4k_sb_3seg_0dqpsk_1dqpsk[8] = {
+static const s16 coeff_4k_sb_3seg_0dqpsk_1dqpsk[8] = {
(-707 << 5) | 0x14, (910 << 5) | 0x06, (889 << 5) | 0x16, (-707 << 5) | 0x14, (-958 << 5) | 0x13, (993 << 5) | 0x05, (523 << 5) | 0x14,
- (-958 << 5) | 0x13
+ (-958 << 5) | 0x13
};
-static const int16_t coeff_4k_sb_3seg_0dqpsk[8] = {
+static const s16 coeff_4k_sb_3seg_0dqpsk[8] = {
(-723 << 5) | 0x13, (910 << 5) | 0x05, (777 << 5) | 0x14, (-723 << 5) | 0x13, (-568 << 5) | 0x0f, (547 << 5) | 0x03, (696 << 5) | 0x12,
- (-568 << 5) | 0x0f
+ (-568 << 5) | 0x0f
};
-static const int16_t coeff_4k_sb_3seg_1dqpsk[8] = {
+static const s16 coeff_4k_sb_3seg_1dqpsk[8] = {
(-940 << 5) | 0x15, (607 << 5) | 0x05, (915 << 5) | 0x16, (-940 << 5) | 0x15, (-848 << 5) | 0x13, (683 << 5) | 0x04, (543 << 5) | 0x14,
- (-848 << 5) | 0x13
+ (-848 << 5) | 0x13
};
-static const int16_t coeff_4k_sb_3seg[8] = {
+static const s16 coeff_4k_sb_3seg[8] = {
(612 << 5) | 0x12, (910 << 5) | 0x04, (864 << 5) | 0x14, (612 << 5) | 0x12, (-869 << 5) | 0x13, (683 << 5) | 0x02, (869 << 5) | 0x12,
- (-869 << 5) | 0x13
+ (-869 << 5) | 0x13
};
-static const int16_t coeff_8k_sb_1seg_dqpsk[8] = {
+static const s16 coeff_8k_sb_1seg_dqpsk[8] = {
(-835 << 5) | 0x12, (684 << 5) | 0x05, (735 << 5) | 0x14, (-835 << 5) | 0x12, (-598 << 5) | 0x10, (781 << 5) | 0x04, (739 << 5) | 0x13,
- (-598 << 5) | 0x10
+ (-598 << 5) | 0x10
};
-static const int16_t coeff_8k_sb_1seg[8] = {
+static const s16 coeff_8k_sb_1seg[8] = {
(673 << 5) | 0x0f, (683 << 5) | 0x03, (808 << 5) | 0x12, (673 << 5) | 0x0f, (585 << 5) | 0x0f, (512 << 5) | 0x01, (780 << 5) | 0x0f,
- (585 << 5) | 0x0f
+ (585 << 5) | 0x0f
};
-static const int16_t coeff_8k_sb_3seg_0dqpsk_1dqpsk[8] = {
+static const s16 coeff_8k_sb_3seg_0dqpsk_1dqpsk[8] = {
(863 << 5) | 0x17, (930 << 5) | 0x07, (878 << 5) | 0x19, (863 << 5) | 0x17, (0 << 5) | 0x14, (521 << 5) | 0x05, (980 << 5) | 0x18,
- (0 << 5) | 0x14
+ (0 << 5) | 0x14
};
-static const int16_t coeff_8k_sb_3seg_0dqpsk[8] = {
+static const s16 coeff_8k_sb_3seg_0dqpsk[8] = {
(-924 << 5) | 0x17, (910 << 5) | 0x06, (774 << 5) | 0x17, (-924 << 5) | 0x17, (-877 << 5) | 0x15, (565 << 5) | 0x04, (553 << 5) | 0x15,
- (-877 << 5) | 0x15
+ (-877 << 5) | 0x15
};
-static const int16_t coeff_8k_sb_3seg_1dqpsk[8] = {
+static const s16 coeff_8k_sb_3seg_1dqpsk[8] = {
(-921 << 5) | 0x19, (607 << 5) | 0x06, (881 << 5) | 0x19, (-921 << 5) | 0x19, (-921 << 5) | 0x14, (713 << 5) | 0x05, (1018 << 5) | 0x18,
- (-921 << 5) | 0x14
+ (-921 << 5) | 0x14
};
-static const int16_t coeff_8k_sb_3seg[8] = {
+static const s16 coeff_8k_sb_3seg[8] = {
(514 << 5) | 0x14, (910 << 5) | 0x05, (861 << 5) | 0x17, (514 << 5) | 0x14, (690 << 5) | 0x14, (683 << 5) | 0x03, (662 << 5) | 0x15,
- (690 << 5) | 0x14
+ (690 << 5) | 0x14
};
-static const int16_t ana_fe_coeff_3seg[24] = {
+static const s16 ana_fe_coeff_3seg[24] = {
81, 80, 78, 74, 68, 61, 54, 45, 37, 28, 19, 11, 4, 1022, 1017, 1013, 1010, 1008, 1008, 1008, 1008, 1010, 1014, 1017
};
-static const int16_t ana_fe_coeff_1seg[24] = {
+static const s16 ana_fe_coeff_1seg[24] = {
249, 226, 164, 82, 5, 981, 970, 988, 1018, 20, 31, 26, 8, 1012, 1000, 1018, 1012, 8, 15, 14, 9, 3, 1017, 1003
};
-static const int16_t ana_fe_coeff_13seg[24] = {
+static const s16 ana_fe_coeff_13seg[24] = {
396, 305, 105, -51, -77, -12, 41, 31, -11, -30, -11, 14, 15, -2, -13, -7, 5, 8, 1, -6, -7, -3, 0, 1
};
static u16 fft_to_mode(struct dib8000_state *state)
{
u16 mode;
- switch (state->fe.dtv_property_cache.transmission_mode) {
+ switch (state->fe[0]->dtv_property_cache.transmission_mode) {
case TRANSMISSION_MODE_2K:
mode = 1;
break;
@@ -249,16 +251,18 @@ static void dib8000_set_acquisition_mode(struct dib8000_state *state)
dprintk("acquisition mode activated");
dib8000_write_word(state, 298, nud);
}
-
-static int dib8000_set_output_mode(struct dib8000_state *state, int mode)
+static int dib8000_set_output_mode(struct dvb_frontend *fe, int mode)
{
+ struct dib8000_state *state = fe->demodulator_priv;
+
u16 outreg, fifo_threshold, smo_mode, sram = 0x0205; /* by default SDRAM deintlv is enabled */
outreg = 0;
fifo_threshold = 1792;
smo_mode = (dib8000_read_word(state, 299) & 0x0050) | (1 << 1);
- dprintk("-I- Setting output mode for demod %p to %d", &state->fe, mode);
+ dprintk("-I- Setting output mode for demod %p to %d",
+ &state->fe[0], mode);
switch (mode) {
case OUTMODE_MPEG2_PAR_GATED_CLK: // STBs with parallel gated clock
@@ -292,7 +296,8 @@ static int dib8000_set_output_mode(struct dib8000_state *state, int mode)
break;
default:
- dprintk("Unhandled output_mode passed to be set for demod %p", &state->fe);
+ dprintk("Unhandled output_mode passed to be set for demod %p",
+ &state->fe[0]);
return -EINVAL;
}
@@ -342,7 +347,8 @@ static void dib8000_set_power_mode(struct dib8000_state *state, enum dib8000_pow
{
/* by default everything is going to be powered off */
u16 reg_774 = 0x3fff, reg_775 = 0xffff, reg_776 = 0xffff,
- reg_900 = (dib8000_read_word(state, 900) & 0xfffc) | 0x3, reg_1280 = (dib8000_read_word(state, 1280) & 0x00ff) | 0xff00;
+ reg_900 = (dib8000_read_word(state, 900) & 0xfffc) | 0x3,
+ reg_1280 = (dib8000_read_word(state, 1280) & 0x00ff) | 0xff00;
/* now, depending on the requested mode, we power on */
switch (mode) {
@@ -411,8 +417,9 @@ static int dib8000_set_adc_state(struct dib8000_state *state, enum dibx000_adc_s
return ret;
}
-static int dib8000_set_bandwidth(struct dib8000_state *state, u32 bw)
+static int dib8000_set_bandwidth(struct dvb_frontend *fe, u32 bw)
{
+ struct dib8000_state *state = fe->demodulator_priv;
u32 timf;
if (bw == 0)
@@ -478,7 +485,8 @@ static void dib8000_reset_pll(struct dib8000_state *state)
// clk_cfg1
clk_cfg1 = (1 << 10) | (0 << 9) | (pll->IO_CLK_en_core << 8) |
- (pll->bypclk_div << 5) | (pll->enable_refdiv << 4) | (1 << 3) | (pll->pll_range << 1) | (pll->pll_reset << 0);
+ (pll->bypclk_div << 5) | (pll->enable_refdiv << 4) | (1 << 3) |
+ (pll->pll_range << 1) | (pll->pll_reset << 0);
dib8000_write_word(state, 902, clk_cfg1);
clk_cfg1 = (clk_cfg1 & 0xfff7) | (pll->pll_bypass << 3);
@@ -488,11 +496,12 @@ static void dib8000_reset_pll(struct dib8000_state *state)
/* smpl_cfg: P_refclksel=2, P_ensmplsel=1 nodivsmpl=1 */
if (state->cfg.pll->ADClkSrc == 0)
- dib8000_write_word(state, 904, (0 << 15) | (0 << 12) | (0 << 10) | (pll->modulo << 8) | (pll->ADClkSrc << 7) | (0 << 1));
+ dib8000_write_word(state, 904, (0 << 15) | (0 << 12) | (0 << 10) |
+ (pll->modulo << 8) | (pll->ADClkSrc << 7) | (0 << 1));
else if (state->cfg.refclksel != 0)
- dib8000_write_word(state, 904,
- (0 << 15) | (1 << 12) | ((state->cfg.refclksel & 0x3) << 10) | (pll->modulo << 8) | (pll->
- ADClkSrc << 7) | (0 << 1));
+ dib8000_write_word(state, 904, (0 << 15) | (1 << 12) |
+ ((state->cfg.refclksel & 0x3) << 10) | (pll->modulo << 8) |
+ (pll->ADClkSrc << 7) | (0 << 1));
else
dib8000_write_word(state, 904, (0 << 15) | (1 << 12) | (3 << 10) | (pll->modulo << 8) | (pll->ADClkSrc << 7) | (0 << 1));
@@ -560,7 +569,7 @@ static const u16 dib8000_defaults[] = {
0xd4c0,
/*1, 32,
- 0x6680 // P_corm_thres Lock algorithms configuration */
+ 0x6680 // P_corm_thres Lock algorithms configuration */
11, 80, /* set ADC level to -16 */
(1 << 13) - 825 - 117,
@@ -623,14 +632,14 @@ static const u16 dib8000_defaults[] = {
1, 285,
0x0020, //p_fec_
1, 299,
- 0x0062, // P_smo_mode, P_smo_rs_discard, P_smo_fifo_flush, P_smo_pid_parse, P_smo_error_discard
+ 0x0062, /* P_smo_mode, P_smo_rs_discard, P_smo_fifo_flush, P_smo_pid_parse, P_smo_error_discard */
1, 338,
(1 << 12) | // P_ctrl_corm_thres4pre_freq_inh=1
- (1 << 10) | // P_ctrl_pre_freq_mode_sat=1
- (0 << 9) | // P_ctrl_pre_freq_inh=0
- (3 << 5) | // P_ctrl_pre_freq_step=3
- (1 << 0), // P_pre_freq_win_len=1
+ (1 << 10) |
+ (0 << 9) | /* P_ctrl_pre_freq_inh=0 */
+ (3 << 5) | /* P_ctrl_pre_freq_step=3 */
+ (1 << 0), /* P_pre_freq_win_len=1 */
1, 903,
(0 << 4) | 2, // P_divclksel=0 P_divbitsel=2 (was clk=3,bit=1 for MPW)
@@ -717,7 +726,7 @@ static int dib8000_reset(struct dvb_frontend *fe)
if (dib8000_reset_gpio(state) != 0)
dprintk("GPIO reset was not successful.");
- if (dib8000_set_output_mode(state, OUTMODE_HIGH_Z) != 0)
+ if (dib8000_set_output_mode(fe, OUTMODE_HIGH_Z) != 0)
dprintk("OUTPUT_MODE could not be resetted.");
state->current_agc = NULL;
@@ -752,7 +761,7 @@ static int dib8000_reset(struct dvb_frontend *fe)
/* unforce divstr regardless whether i2c enumeration was done or not */
dib8000_write_word(state, 1285, dib8000_read_word(state, 1285) & ~(1 << 1));
- dib8000_set_bandwidth(state, 6000);
+ dib8000_set_bandwidth(fe, 6000);
dib8000_set_adc_state(state, DIBX000_SLOW_ADC_ON);
dib8000_sad_calib(state);
@@ -778,7 +787,7 @@ static int dib8000_update_lna(struct dib8000_state *state)
// read dyn_gain here (because it is demod-dependent and not tuner)
dyn_gain = dib8000_read_word(state, 390);
- if (state->cfg.update_lna(&state->fe, dyn_gain)) { // LNA has changed
+ if (state->cfg.update_lna(state->fe[0], dyn_gain)) {
dib8000_restart_agc(state);
return 1;
}
@@ -865,7 +874,8 @@ static int dib8000_agc_soft_split(struct dib8000_state *state)
split_offset = state->current_agc->split.max;
else
split_offset = state->current_agc->split.max *
- (agc - state->current_agc->split.min_thres) / (state->current_agc->split.max_thres - state->current_agc->split.min_thres);
+ (agc - state->current_agc->split.min_thres) /
+ (state->current_agc->split.max_thres - state->current_agc->split.min_thres);
dprintk("AGC split_offset: %d", split_offset);
@@ -900,7 +910,7 @@ static int dib8000_agc_startup(struct dvb_frontend *fe)
case CT_AGC_STEP_0:
//AGC initialization
if (state->cfg.agc_control)
- state->cfg.agc_control(&state->fe, 1);
+ state->cfg.agc_control(fe, 1);
dib8000_restart_agc(state);
@@ -924,7 +934,7 @@ static int dib8000_agc_startup(struct dvb_frontend *fe)
dib8000_agc_soft_split(state);
if (state->cfg.agc_control)
- state->cfg.agc_control(&state->fe, 0);
+ state->cfg.agc_control(fe, 0);
*tune_state = CT_AGC_STOP;
break;
@@ -936,29 +946,28 @@ static int dib8000_agc_startup(struct dvb_frontend *fe)
}
-static const int32_t lut_1000ln_mant[] =
+static const s32 lut_1000ln_mant[] =
{
908, 7003, 7090, 7170, 7244, 7313, 7377, 7438, 7495, 7549, 7600
};
-int32_t dib8000_get_adc_power(struct dvb_frontend *fe, uint8_t mode)
+s32 dib8000_get_adc_power(struct dvb_frontend *fe, u8 mode)
{
- struct dib8000_state *state = fe->demodulator_priv;
- uint32_t ix = 0, tmp_val = 0, exp = 0, mant = 0;
- int32_t val;
-
- val = dib8000_read32(state, 384);
- /* mode = 1 : ln_agcpower calc using mant-exp conversion and mantis look up table */
- if (mode) {
- tmp_val = val;
- while (tmp_val >>= 1)
- exp++;
- mant = (val * 1000 / (1<<exp));
- ix = (uint8_t)((mant-1000)/100); /* index of the LUT */
- val = (lut_1000ln_mant[ix] + 693*(exp-20) - 6908); /* 1000 * ln(adcpower_real) ; 693 = 1000ln(2) ; 6908 = 1000*ln(1000) ; 20 comes from adc_real = adc_pow_int / 2**20 */
- val = (val*256)/1000;
- }
- return val;
+ struct dib8000_state *state = fe->demodulator_priv;
+ u32 ix = 0, tmp_val = 0, exp = 0, mant = 0;
+ s32 val;
+
+ val = dib8000_read32(state, 384);
+ if (mode) {
+ tmp_val = val;
+ while (tmp_val >>= 1)
+ exp++;
+ mant = (val * 1000 / (1<<exp));
+ ix = (u8)((mant-1000)/100); /* index of the LUT */
+ val = (lut_1000ln_mant[ix] + 693*(exp-20) - 6908);
+ val = (val*256)/1000;
+ }
+ return val;
}
EXPORT_SYMBOL(dib8000_get_adc_power);
@@ -1002,22 +1011,23 @@ static void dib8000_set_channel(struct dib8000_state *state, u8 seq, u8 autosear
dib8000_write_word(state, 285, dib8000_read_word(state, 285) & 0x60);
i = dib8000_read_word(state, 26) & 1; // P_dds_invspec
- dib8000_write_word(state, 26, state->fe.dtv_property_cache.inversion ^ i);
+ dib8000_write_word(state, 26, state->fe[0]->dtv_property_cache.inversion^i);
- if (state->fe.dtv_property_cache.isdbt_sb_mode) {
+ if (state->fe[0]->dtv_property_cache.isdbt_sb_mode) {
//compute new dds_freq for the seg and adjust prbs
int seg_offset =
- state->fe.dtv_property_cache.isdbt_sb_segment_idx - (state->fe.dtv_property_cache.isdbt_sb_segment_count / 2) -
- (state->fe.dtv_property_cache.isdbt_sb_segment_count % 2);
+ state->fe[0]->dtv_property_cache.isdbt_sb_segment_idx -
+ (state->fe[0]->dtv_property_cache.isdbt_sb_segment_count / 2) -
+ (state->fe[0]->dtv_property_cache.isdbt_sb_segment_count % 2);
int clk = state->cfg.pll->internal;
u32 segtodds = ((u32) (430 << 23) / clk) << 3; // segtodds = SegBW / Fclk * pow(2,26)
int dds_offset = seg_offset * segtodds;
int new_dds, sub_channel;
- if ((state->fe.dtv_property_cache.isdbt_sb_segment_count % 2) == 0) // if even
+ if ((state->fe[0]->dtv_property_cache.isdbt_sb_segment_count % 2) == 0)
dds_offset -= (int)(segtodds / 2);
if (state->cfg.pll->ifreq == 0) {
- if ((state->fe.dtv_property_cache.inversion ^ i) == 0) {
+ if ((state->fe[0]->dtv_property_cache.inversion ^ i) == 0) {
dib8000_write_word(state, 26, dib8000_read_word(state, 26) | 1);
new_dds = dds_offset;
} else
@@ -1027,35 +1037,35 @@ static void dib8000_set_channel(struct dib8000_state *state, u8 seq, u8 autosear
// - the segment of center frequency with an odd total number of segments
// - the segment to the left of center frequency with an even total number of segments
// - the segment to the right of center frequency with an even total number of segments
- if ((state->fe.dtv_property_cache.delivery_system == SYS_ISDBT) && (state->fe.dtv_property_cache.isdbt_sb_mode == 1)
- &&
- (((state->fe.dtv_property_cache.isdbt_sb_segment_count % 2)
- && (state->fe.dtv_property_cache.isdbt_sb_segment_idx ==
- ((state->fe.dtv_property_cache.isdbt_sb_segment_count / 2) + 1)))
- || (((state->fe.dtv_property_cache.isdbt_sb_segment_count % 2) == 0)
- && (state->fe.dtv_property_cache.isdbt_sb_segment_idx == (state->fe.dtv_property_cache.isdbt_sb_segment_count / 2)))
- || (((state->fe.dtv_property_cache.isdbt_sb_segment_count % 2) == 0)
- && (state->fe.dtv_property_cache.isdbt_sb_segment_idx ==
- ((state->fe.dtv_property_cache.isdbt_sb_segment_count / 2) + 1)))
- )) {
+ if ((state->fe[0]->dtv_property_cache.delivery_system == SYS_ISDBT)
+ && (state->fe[0]->dtv_property_cache.isdbt_sb_mode == 1)
+ && (((state->fe[0]->dtv_property_cache.isdbt_sb_segment_count % 2)
+ && (state->fe[0]->dtv_property_cache.isdbt_sb_segment_idx ==
+ ((state->fe[0]->dtv_property_cache.isdbt_sb_segment_count / 2) + 1)))
+ || (((state->fe[0]->dtv_property_cache.isdbt_sb_segment_count % 2) == 0)
+ && (state->fe[0]->dtv_property_cache.isdbt_sb_segment_idx == (state->fe[0]->dtv_property_cache.isdbt_sb_segment_count / 2)))
+ || (((state->fe[0]->dtv_property_cache.isdbt_sb_segment_count % 2) == 0)
+ && (state->fe[0]->dtv_property_cache.isdbt_sb_segment_idx ==
+ ((state->fe[0]->dtv_property_cache.isdbt_sb_segment_count / 2) + 1)))
+ )) {
new_dds -= ((u32) (850 << 22) / clk) << 4; // new_dds = 850 (freq shift in KHz) / Fclk * pow(2,26)
}
} else {
- if ((state->fe.dtv_property_cache.inversion ^ i) == 0)
+ if ((state->fe[0]->dtv_property_cache.inversion ^ i) == 0)
new_dds = state->cfg.pll->ifreq - dds_offset;
else
new_dds = state->cfg.pll->ifreq + dds_offset;
}
dib8000_write_word(state, 27, (u16) ((new_dds >> 16) & 0x01ff));
dib8000_write_word(state, 28, (u16) (new_dds & 0xffff));
- if (state->fe.dtv_property_cache.isdbt_sb_segment_count % 2) // if odd
- sub_channel = ((state->fe.dtv_property_cache.isdbt_sb_subchannel + (3 * seg_offset) + 1) % 41) / 3;
- else // if even
- sub_channel = ((state->fe.dtv_property_cache.isdbt_sb_subchannel + (3 * seg_offset)) % 41) / 3;
+ if (state->fe[0]->dtv_property_cache.isdbt_sb_segment_count % 2)
+ sub_channel = ((state->fe[0]->dtv_property_cache.isdbt_sb_subchannel + (3 * seg_offset) + 1) % 41) / 3;
+ else
+ sub_channel = ((state->fe[0]->dtv_property_cache.isdbt_sb_subchannel + (3 * seg_offset)) % 41) / 3;
sub_channel -= 6;
- if (state->fe.dtv_property_cache.transmission_mode == TRANSMISSION_MODE_2K
- || state->fe.dtv_property_cache.transmission_mode == TRANSMISSION_MODE_4K) {
+ if (state->fe[0]->dtv_property_cache.transmission_mode == TRANSMISSION_MODE_2K
+ || state->fe[0]->dtv_property_cache.transmission_mode == TRANSMISSION_MODE_4K) {
dib8000_write_word(state, 219, dib8000_read_word(state, 219) | 0x1); //adp_pass =1
dib8000_write_word(state, 190, dib8000_read_word(state, 190) | (0x1 << 14)); //pha3_force_pha_shift = 1
} else {
@@ -1063,7 +1073,7 @@ static void dib8000_set_channel(struct dib8000_state *state, u8 seq, u8 autosear
dib8000_write_word(state, 190, dib8000_read_word(state, 190) & 0xbfff); //pha3_force_pha_shift = 0
}
- switch (state->fe.dtv_property_cache.transmission_mode) {
+ switch (state->fe[0]->dtv_property_cache.transmission_mode) {
case TRANSMISSION_MODE_2K:
switch (sub_channel) {
case -6:
@@ -1209,7 +1219,7 @@ static void dib8000_set_channel(struct dib8000_state *state, u8 seq, u8 autosear
}
break;
}
- } else { // if not state->fe.dtv_property_cache.isdbt_sb_mode
+ } else {
dib8000_write_word(state, 27, (u16) ((state->cfg.pll->ifreq >> 16) & 0x01ff));
dib8000_write_word(state, 28, (u16) (state->cfg.pll->ifreq & 0xffff));
dib8000_write_word(state, 26, (u16) ((state->cfg.pll->ifreq >> 25) & 0x0003));
@@ -1218,7 +1228,7 @@ static void dib8000_set_channel(struct dib8000_state *state, u8 seq, u8 autosear
dib8000_write_word(state, 10, (seq << 4));
// dib8000_write_word(state, 287, (dib8000_read_word(state, 287) & 0xe000) | 0x1000);
- switch (state->fe.dtv_property_cache.guard_interval) {
+ switch (state->fe[0]->dtv_property_cache.guard_interval) {
case GUARD_INTERVAL_1_32:
guard = 0;
break;
@@ -1238,7 +1248,7 @@ static void dib8000_set_channel(struct dib8000_state *state, u8 seq, u8 autosear
max_constellation = DQPSK;
for (i = 0; i < 3; i++) {
- switch (state->fe.dtv_property_cache.layer[i].modulation) {
+ switch (state->fe[0]->dtv_property_cache.layer[i].modulation) {
case DQPSK:
constellation = 0;
break;
@@ -1254,7 +1264,7 @@ static void dib8000_set_channel(struct dib8000_state *state, u8 seq, u8 autosear
break;
}
- switch (state->fe.dtv_property_cache.layer[i].fec) {
+ switch (state->fe[0]->dtv_property_cache.layer[i].fec) {
case FEC_1_2:
crate = 1;
break;
@@ -1273,26 +1283,26 @@ static void dib8000_set_channel(struct dib8000_state *state, u8 seq, u8 autosear
break;
}
- if ((state->fe.dtv_property_cache.layer[i].interleaving > 0) &&
- ((state->fe.dtv_property_cache.layer[i].interleaving <= 3) ||
- (state->fe.dtv_property_cache.layer[i].interleaving == 4 && state->fe.dtv_property_cache.isdbt_sb_mode == 1))
- )
- timeI = state->fe.dtv_property_cache.layer[i].interleaving;
+ if ((state->fe[0]->dtv_property_cache.layer[i].interleaving > 0) &&
+ ((state->fe[0]->dtv_property_cache.layer[i].interleaving <= 3) ||
+ (state->fe[0]->dtv_property_cache.layer[i].interleaving == 4 && state->fe[0]->dtv_property_cache.isdbt_sb_mode == 1))
+ )
+ timeI = state->fe[0]->dtv_property_cache.layer[i].interleaving;
else
timeI = 0;
- dib8000_write_word(state, 2 + i, (constellation << 10) | ((state->fe.dtv_property_cache.layer[i].segment_count & 0xf) << 6) |
- (crate << 3) | timeI);
- if (state->fe.dtv_property_cache.layer[i].segment_count > 0) {
+ dib8000_write_word(state, 2 + i, (constellation << 10) | ((state->fe[0]->dtv_property_cache.layer[i].segment_count & 0xf) << 6) |
+ (crate << 3) | timeI);
+ if (state->fe[0]->dtv_property_cache.layer[i].segment_count > 0) {
switch (max_constellation) {
case DQPSK:
case QPSK:
- if (state->fe.dtv_property_cache.layer[i].modulation == QAM_16 ||
- state->fe.dtv_property_cache.layer[i].modulation == QAM_64)
- max_constellation = state->fe.dtv_property_cache.layer[i].modulation;
+ if (state->fe[0]->dtv_property_cache.layer[i].modulation == QAM_16 ||
+ state->fe[0]->dtv_property_cache.layer[i].modulation == QAM_64)
+ max_constellation = state->fe[0]->dtv_property_cache.layer[i].modulation;
break;
case QAM_16:
- if (state->fe.dtv_property_cache.layer[i].modulation == QAM_64)
- max_constellation = state->fe.dtv_property_cache.layer[i].modulation;
+ if (state->fe[0]->dtv_property_cache.layer[i].modulation == QAM_64)
+ max_constellation = state->fe[0]->dtv_property_cache.layer[i].modulation;
break;
}
}
@@ -1303,34 +1313,34 @@ static void dib8000_set_channel(struct dib8000_state *state, u8 seq, u8 autosear
//dib8000_write_word(state, 5, 13); /*p_last_seg = 13*/
dib8000_write_word(state, 274, (dib8000_read_word(state, 274) & 0xffcf) |
- ((state->fe.dtv_property_cache.isdbt_partial_reception & 1) << 5) | ((state->fe.dtv_property_cache.
+ ((state->fe[0]->dtv_property_cache.isdbt_partial_reception & 1) << 5) | ((state->fe[0]->dtv_property_cache.
isdbt_sb_mode & 1) << 4));
- dprintk("mode = %d ; guard = %d", mode, state->fe.dtv_property_cache.guard_interval);
+ dprintk("mode = %d ; guard = %d", mode, state->fe[0]->dtv_property_cache.guard_interval);
/* signal optimization parameter */
- if (state->fe.dtv_property_cache.isdbt_partial_reception) {
- seg_diff_mask = (state->fe.dtv_property_cache.layer[0].modulation == DQPSK) << permu_seg[0];
+ if (state->fe[0]->dtv_property_cache.isdbt_partial_reception) {
+ seg_diff_mask = (state->fe[0]->dtv_property_cache.layer[0].modulation == DQPSK) << permu_seg[0];
for (i = 1; i < 3; i++)
nbseg_diff +=
- (state->fe.dtv_property_cache.layer[i].modulation == DQPSK) * state->fe.dtv_property_cache.layer[i].segment_count;
+ (state->fe[0]->dtv_property_cache.layer[i].modulation == DQPSK) * state->fe[0]->dtv_property_cache.layer[i].segment_count;
for (i = 0; i < nbseg_diff; i++)
seg_diff_mask |= 1 << permu_seg[i + 1];
} else {
for (i = 0; i < 3; i++)
nbseg_diff +=
- (state->fe.dtv_property_cache.layer[i].modulation == DQPSK) * state->fe.dtv_property_cache.layer[i].segment_count;
+ (state->fe[0]->dtv_property_cache.layer[i].modulation == DQPSK) * state->fe[0]->dtv_property_cache.layer[i].segment_count;
for (i = 0; i < nbseg_diff; i++)
seg_diff_mask |= 1 << permu_seg[i];
}
dprintk("nbseg_diff = %X (%d)", seg_diff_mask, seg_diff_mask);
state->differential_constellation = (seg_diff_mask != 0);
- dib8000_set_diversity_in(&state->fe, state->diversity_onoff);
+ dib8000_set_diversity_in(state->fe[0], state->diversity_onoff);
- if (state->fe.dtv_property_cache.isdbt_sb_mode == 1) { // ISDB-Tsb
- if (state->fe.dtv_property_cache.isdbt_partial_reception == 1) // 3-segments
+ if (state->fe[0]->dtv_property_cache.isdbt_sb_mode == 1) {
+ if (state->fe[0]->dtv_property_cache.isdbt_partial_reception == 1)
seg_mask13 = 0x00E0;
else // 1-segment
seg_mask13 = 0x0040;
@@ -1340,7 +1350,7 @@ static void dib8000_set_channel(struct dib8000_state *state, u8 seq, u8 autosear
// WRITE: Mode & Diff mask
dib8000_write_word(state, 0, (mode << 13) | seg_diff_mask);
- if ((seg_diff_mask) || (state->fe.dtv_property_cache.isdbt_sb_mode))
+ if ((seg_diff_mask) || (state->fe[0]->dtv_property_cache.isdbt_sb_mode))
dib8000_write_word(state, 268, (dib8000_read_word(state, 268) & 0xF9FF) | 0x0200);
else
dib8000_write_word(state, 268, (2 << 9) | 39); //init value
@@ -1351,26 +1361,25 @@ static void dib8000_set_channel(struct dib8000_state *state, u8 seq, u8 autosear
dib8000_write_word(state, 353, seg_mask13); // ADDR 353
-/* // P_small_narrow_band=0, P_small_last_seg=13, P_small_offset_num_car=5 */
- // dib8000_write_word(state, 351, (state->fe.dtv_property_cache.isdbt_sb_mode << 8) | (13 << 4) | 5 );
+/* // P_small_narrow_band=0, P_small_last_seg=13, P_small_offset_num_car=5 */
// ---- SMALL ----
- if (state->fe.dtv_property_cache.isdbt_sb_mode == 1) {
- switch (state->fe.dtv_property_cache.transmission_mode) {
+ if (state->fe[0]->dtv_property_cache.isdbt_sb_mode == 1) {
+ switch (state->fe[0]->dtv_property_cache.transmission_mode) {
case TRANSMISSION_MODE_2K:
- if (state->fe.dtv_property_cache.isdbt_partial_reception == 0) { // 1-seg
- if (state->fe.dtv_property_cache.layer[0].modulation == DQPSK) // DQPSK
+ if (state->fe[0]->dtv_property_cache.isdbt_partial_reception == 0) {
+ if (state->fe[0]->dtv_property_cache.layer[0].modulation == DQPSK)
ncoeff = coeff_2k_sb_1seg_dqpsk;
else // QPSK or QAM
ncoeff = coeff_2k_sb_1seg;
} else { // 3-segments
- if (state->fe.dtv_property_cache.layer[0].modulation == DQPSK) { // DQPSK on central segment
- if (state->fe.dtv_property_cache.layer[1].modulation == DQPSK) // DQPSK on external segments
+ if (state->fe[0]->dtv_property_cache.layer[0].modulation == DQPSK) {
+ if (state->fe[0]->dtv_property_cache.layer[1].modulation == DQPSK)
ncoeff = coeff_2k_sb_3seg_0dqpsk_1dqpsk;
else // QPSK or QAM on external segments
ncoeff = coeff_2k_sb_3seg_0dqpsk;
} else { // QPSK or QAM on central segment
- if (state->fe.dtv_property_cache.layer[1].modulation == DQPSK) // DQPSK on external segments
+ if (state->fe[0]->dtv_property_cache.layer[1].modulation == DQPSK)
ncoeff = coeff_2k_sb_3seg_1dqpsk;
else // QPSK or QAM on external segments
ncoeff = coeff_2k_sb_3seg;
@@ -1379,20 +1388,20 @@ static void dib8000_set_channel(struct dib8000_state *state, u8 seq, u8 autosear
break;
case TRANSMISSION_MODE_4K:
- if (state->fe.dtv_property_cache.isdbt_partial_reception == 0) { // 1-seg
- if (state->fe.dtv_property_cache.layer[0].modulation == DQPSK) // DQPSK
+ if (state->fe[0]->dtv_property_cache.isdbt_partial_reception == 0) {
+ if (state->fe[0]->dtv_property_cache.layer[0].modulation == DQPSK)
ncoeff = coeff_4k_sb_1seg_dqpsk;
else // QPSK or QAM
ncoeff = coeff_4k_sb_1seg;
} else { // 3-segments
- if (state->fe.dtv_property_cache.layer[0].modulation == DQPSK) { // DQPSK on central segment
- if (state->fe.dtv_property_cache.layer[1].modulation == DQPSK) { // DQPSK on external segments
+ if (state->fe[0]->dtv_property_cache.layer[0].modulation == DQPSK) {
+ if (state->fe[0]->dtv_property_cache.layer[1].modulation == DQPSK) {
ncoeff = coeff_4k_sb_3seg_0dqpsk_1dqpsk;
} else { // QPSK or QAM on external segments
ncoeff = coeff_4k_sb_3seg_0dqpsk;
}
} else { // QPSK or QAM on central segment
- if (state->fe.dtv_property_cache.layer[1].modulation == DQPSK) { // DQPSK on external segments
+ if (state->fe[0]->dtv_property_cache.layer[1].modulation == DQPSK) {
ncoeff = coeff_4k_sb_3seg_1dqpsk;
} else // QPSK or QAM on external segments
ncoeff = coeff_4k_sb_3seg;
@@ -1403,20 +1412,20 @@ static void dib8000_set_channel(struct dib8000_state *state, u8 seq, u8 autosear
case TRANSMISSION_MODE_AUTO:
case TRANSMISSION_MODE_8K:
default:
- if (state->fe.dtv_property_cache.isdbt_partial_reception == 0) { // 1-seg
- if (state->fe.dtv_property_cache.layer[0].modulation == DQPSK) // DQPSK
+ if (state->fe[0]->dtv_property_cache.isdbt_partial_reception == 0) {
+ if (state->fe[0]->dtv_property_cache.layer[0].modulation == DQPSK)
ncoeff = coeff_8k_sb_1seg_dqpsk;
else // QPSK or QAM
ncoeff = coeff_8k_sb_1seg;
} else { // 3-segments
- if (state->fe.dtv_property_cache.layer[0].modulation == DQPSK) { // DQPSK on central segment
- if (state->fe.dtv_property_cache.layer[1].modulation == DQPSK) { // DQPSK on external segments
+ if (state->fe[0]->dtv_property_cache.layer[0].modulation == DQPSK) {
+ if (state->fe[0]->dtv_property_cache.layer[1].modulation == DQPSK) {
ncoeff = coeff_8k_sb_3seg_0dqpsk_1dqpsk;
} else { // QPSK or QAM on external segments
ncoeff = coeff_8k_sb_3seg_0dqpsk;
}
} else { // QPSK or QAM on central segment
- if (state->fe.dtv_property_cache.layer[1].modulation == DQPSK) { // DQPSK on external segments
+ if (state->fe[0]->dtv_property_cache.layer[1].modulation == DQPSK) {
ncoeff = coeff_8k_sb_3seg_1dqpsk;
} else // QPSK or QAM on external segments
ncoeff = coeff_8k_sb_3seg;
@@ -1430,22 +1439,22 @@ static void dib8000_set_channel(struct dib8000_state *state, u8 seq, u8 autosear
// P_small_coef_ext_enable=ISDB-Tsb, P_small_narrow_band=ISDB-Tsb, P_small_last_seg=13, P_small_offset_num_car=5
dib8000_write_word(state, 351,
- (state->fe.dtv_property_cache.isdbt_sb_mode << 9) | (state->fe.dtv_property_cache.isdbt_sb_mode << 8) | (13 << 4) | 5);
+ (state->fe[0]->dtv_property_cache.isdbt_sb_mode << 9) | (state->fe[0]->dtv_property_cache.isdbt_sb_mode << 8) | (13 << 4) | 5);
// ---- COFF ----
// Carloff, the most robust
- if (state->fe.dtv_property_cache.isdbt_sb_mode == 1) { // Sound Broadcasting mode - use both TMCC and AC pilots
+ if (state->fe[0]->dtv_property_cache.isdbt_sb_mode == 1) {
// P_coff_cpil_alpha=4, P_coff_inh=0, P_coff_cpil_winlen=64
// P_coff_narrow_band=1, P_coff_square_val=1, P_coff_one_seg=~partial_rcpt, P_coff_use_tmcc=1, P_coff_use_ac=1
dib8000_write_word(state, 187,
- (4 << 12) | (0 << 11) | (63 << 5) | (0x3 << 3) | ((~state->fe.dtv_property_cache.isdbt_partial_reception & 1) << 2)
- | 0x3);
+ (4 << 12) | (0 << 11) | (63 << 5) | (0x3 << 3) | ((~state->fe[0]->dtv_property_cache.isdbt_partial_reception & 1) << 2)
+ | 0x3);
-/* // P_small_coef_ext_enable = 1 */
-/* dib8000_write_word(state, 351, dib8000_read_word(state, 351) | 0x200); */
+/* // P_small_coef_ext_enable = 1 */
+/* dib8000_write_word(state, 351, dib8000_read_word(state, 351) | 0x200); */
- if (state->fe.dtv_property_cache.isdbt_partial_reception == 0) { // Sound Broadcasting mode 1 seg
+ if (state->fe[0]->dtv_property_cache.isdbt_partial_reception == 0) {
// P_coff_winlen=63, P_coff_thres_lock=15, P_coff_one_seg_width= (P_mode == 3) , P_coff_one_seg_sym= (P_mode-1)
if (mode == 3)
@@ -1469,10 +1478,10 @@ static void dib8000_set_channel(struct dib8000_state *state, u8 seq, u8 autosear
dib8000_write_word(state, 186, 80);
} else { // Sound Broadcasting mode 3 seg
// P_coff_one_seg_sym= 1, P_coff_one_seg_width= 1, P_coff_winlen=63, P_coff_thres_lock=15
- /* if (mode == 3) */
- /* dib8000_write_word(state, 180, 0x2fca | ((0) << 14)); */
- /* else */
- /* dib8000_write_word(state, 180, 0x2fca | ((1) << 14)); */
+ /* if (mode == 3) */
+ /* dib8000_write_word(state, 180, 0x2fca | ((0) << 14)); */
+ /* else */
+ /* dib8000_write_word(state, 180, 0x2fca | ((1) << 14)); */
dib8000_write_word(state, 180, 0x1fcf | (1 << 14));
// P_ctrl_corm_thres4pre_freq_inh = 1, P_ctrl_pre_freq_mode_sat=1,
@@ -1509,7 +1518,7 @@ static void dib8000_set_channel(struct dib8000_state *state, u8 seq, u8 autosear
dib8000_write_word(state, 341, (4 << 3) | (1 << 2) | (1 << 1) | (1 << 0));
}
// ---- FFT ----
- if (state->fe.dtv_property_cache.isdbt_sb_mode == 1 && state->fe.dtv_property_cache.isdbt_partial_reception == 0) // 1-seg
+ if (state->fe[0]->dtv_property_cache.isdbt_sb_mode == 1 && state->fe[0]->dtv_property_cache.isdbt_partial_reception == 0)
dib8000_write_word(state, 178, 64); // P_fft_powrange=64
else
dib8000_write_word(state, 178, 32); // P_fft_powrange=32
@@ -1518,12 +1527,12 @@ static void dib8000_set_channel(struct dib8000_state *state, u8 seq, u8 autosear
* 6bits; p_coff_thres_lock 6bits (for coff lock if needed)
*/
/* if ( ( nbseg_diff>0)&&(nbseg_diff<13))
- dib8000_write_word(state, 187, (dib8000_read_word(state, 187) & 0xfffb) | (1 << 3)); */
+ dib8000_write_word(state, 187, (dib8000_read_word(state, 187) & 0xfffb) | (1 << 3)); */
dib8000_write_word(state, 189, ~seg_mask13 | seg_diff_mask); /* P_lmod4_seg_inh */
dib8000_write_word(state, 192, ~seg_mask13 | seg_diff_mask); /* P_pha3_seg_inh */
dib8000_write_word(state, 225, ~seg_mask13 | seg_diff_mask); /* P_tac_seg_inh */
- if ((!state->fe.dtv_property_cache.isdbt_sb_mode) && (state->cfg.pll->ifreq == 0))
+ if ((!state->fe[0]->dtv_property_cache.isdbt_sb_mode) && (state->cfg.pll->ifreq == 0))
dib8000_write_word(state, 266, ~seg_mask13 | seg_diff_mask | 0x40); /* P_equal_noise_seg_inh */
else
dib8000_write_word(state, 266, ~seg_mask13 | seg_diff_mask); /* P_equal_noise_seg_inh */
@@ -1538,8 +1547,8 @@ static void dib8000_set_channel(struct dib8000_state *state, u8 seq, u8 autosear
dib8000_write_word(state, 211, seg_mask13 & (~seg_diff_mask)); /* P_des_seg_enabled */
/* offset loop parameters */
- if (state->fe.dtv_property_cache.isdbt_sb_mode == 1) {
- if (state->fe.dtv_property_cache.isdbt_partial_reception == 0) // Sound Broadcasting mode 1 seg
+ if (state->fe[0]->dtv_property_cache.isdbt_sb_mode == 1) {
+ if (state->fe[0]->dtv_property_cache.isdbt_partial_reception == 0)
/* P_timf_alpha = (11-P_mode), P_corm_alpha=6, P_corm_thres=0x80 */
dib8000_write_word(state, 32, ((11 - mode) << 12) | (6 << 8) | 0x40);
@@ -1551,8 +1560,8 @@ static void dib8000_set_channel(struct dib8000_state *state, u8 seq, u8 autosear
/* P_timf_alpha = (9-P_mode, P_corm_alpha=6, P_corm_thres=0x80 */
dib8000_write_word(state, 32, ((9 - mode) << 12) | (6 << 8) | 0x80);
- if (state->fe.dtv_property_cache.isdbt_sb_mode == 1) {
- if (state->fe.dtv_property_cache.isdbt_partial_reception == 0) // Sound Broadcasting mode 1 seg
+ if (state->fe[0]->dtv_property_cache.isdbt_sb_mode == 1) {
+ if (state->fe[0]->dtv_property_cache.isdbt_partial_reception == 0)
/* P_ctrl_pha_off_max=3 P_ctrl_sfreq_inh =0 P_ctrl_sfreq_step = (11-P_mode) */
dib8000_write_word(state, 37, (3 << 5) | (0 << 4) | (10 - mode));
@@ -1564,7 +1573,7 @@ static void dib8000_set_channel(struct dib8000_state *state, u8 seq, u8 autosear
dib8000_write_word(state, 37, (3 << 5) | (0 << 4) | (8 - mode));
/* P_dvsy_sync_wait - reuse mode */
- switch (state->fe.dtv_property_cache.transmission_mode) {
+ switch (state->fe[0]->dtv_property_cache.transmission_mode) {
case TRANSMISSION_MODE_8K:
mode = 256;
break;
@@ -1624,15 +1633,15 @@ static void dib8000_set_channel(struct dib8000_state *state, u8 seq, u8 autosear
}
// ---- ANA_FE ----
- if (state->fe.dtv_property_cache.isdbt_sb_mode) {
- if (state->fe.dtv_property_cache.isdbt_partial_reception == 1) // 3-segments
+ if (state->fe[0]->dtv_property_cache.isdbt_sb_mode) {
+ if (state->fe[0]->dtv_property_cache.isdbt_partial_reception == 1)
ana_fe = ana_fe_coeff_3seg;
else // 1-segment
ana_fe = ana_fe_coeff_1seg;
} else
ana_fe = ana_fe_coeff_13seg;
- if (state->fe.dtv_property_cache.isdbt_sb_mode == 1 || state->isdbt_cfg_loaded == 0)
+ if (state->fe[0]->dtv_property_cache.isdbt_sb_mode == 1 || state->isdbt_cfg_loaded == 0)
for (mode = 0; mode < 24; mode++)
dib8000_write_word(state, 117 + mode, ana_fe[mode]);
@@ -1648,11 +1657,11 @@ static void dib8000_set_channel(struct dib8000_state *state, u8 seq, u8 autosear
// "P_cspu_left_edge" not used => do not care
// "P_cspu_right_edge" not used => do not care
- if (state->fe.dtv_property_cache.isdbt_sb_mode == 1) { // ISDB-Tsb
+ if (state->fe[0]->dtv_property_cache.isdbt_sb_mode == 1) {
dib8000_write_word(state, 228, 1); // P_2d_mode_byp=1
dib8000_write_word(state, 205, dib8000_read_word(state, 205) & 0xfff0); // P_cspu_win_cut = 0
- if (state->fe.dtv_property_cache.isdbt_partial_reception == 0 // 1-segment
- && state->fe.dtv_property_cache.transmission_mode == TRANSMISSION_MODE_2K) {
+ if (state->fe[0]->dtv_property_cache.isdbt_partial_reception == 0
+ && state->fe[0]->dtv_property_cache.transmission_mode == TRANSMISSION_MODE_2K) {
//dib8000_write_word(state, 219, dib8000_read_word(state, 219) & 0xfffe); // P_adp_pass = 0
dib8000_write_word(state, 265, 15); // P_equal_noise_sel = 15
}
@@ -1664,7 +1673,7 @@ static void dib8000_set_channel(struct dib8000_state *state, u8 seq, u8 autosear
// ---- TMCC ----
for (i = 0; i < 3; i++)
tmcc_pow +=
- (((state->fe.dtv_property_cache.layer[i].modulation == DQPSK) * 4 + 1) * state->fe.dtv_property_cache.layer[i].segment_count);
+ (((state->fe[0]->dtv_property_cache.layer[i].modulation == DQPSK) * 4 + 1) * state->fe[0]->dtv_property_cache.layer[i].segment_count);
// Quantif of "P_tmcc_dec_thres_?k" is (0, 5+mode, 9);
// Threshold is set at 1/4 of max power.
tmcc_pow *= (1 << (9 - 2));
@@ -1678,7 +1687,7 @@ static void dib8000_set_channel(struct dib8000_state *state, u8 seq, u8 autosear
if (state->isdbt_cfg_loaded == 0)
dib8000_write_word(state, 250, 3285); /*p_2d_hspeed_thr0 */
- if (state->fe.dtv_property_cache.isdbt_sb_mode == 1)
+ if (state->fe[0]->dtv_property_cache.isdbt_sb_mode == 1)
state->isdbt_cfg_loaded = 0;
else
state->isdbt_cfg_loaded = 1;
@@ -1693,38 +1702,38 @@ static int dib8000_autosearch_start(struct dvb_frontend *fe)
int slist = 0;
- state->fe.dtv_property_cache.inversion = 0;
- if (!state->fe.dtv_property_cache.isdbt_sb_mode)
- state->fe.dtv_property_cache.layer[0].segment_count = 13;
- state->fe.dtv_property_cache.layer[0].modulation = QAM_64;
- state->fe.dtv_property_cache.layer[0].fec = FEC_2_3;
- state->fe.dtv_property_cache.layer[0].interleaving = 0;
+ state->fe[0]->dtv_property_cache.inversion = 0;
+ if (!state->fe[0]->dtv_property_cache.isdbt_sb_mode)
+ state->fe[0]->dtv_property_cache.layer[0].segment_count = 13;
+ state->fe[0]->dtv_property_cache.layer[0].modulation = QAM_64;
+ state->fe[0]->dtv_property_cache.layer[0].fec = FEC_2_3;
+ state->fe[0]->dtv_property_cache.layer[0].interleaving = 0;
//choose the right list, in sb, always do everything
- if (state->fe.dtv_property_cache.isdbt_sb_mode) {
- state->fe.dtv_property_cache.transmission_mode = TRANSMISSION_MODE_8K;
- state->fe.dtv_property_cache.guard_interval = GUARD_INTERVAL_1_8;
+ if (state->fe[0]->dtv_property_cache.isdbt_sb_mode) {
+ state->fe[0]->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_8K;
+ state->fe[0]->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_8;
slist = 7;
dib8000_write_word(state, 0, (dib8000_read_word(state, 0) & 0x9fff) | (1 << 13));
} else {
- if (state->fe.dtv_property_cache.guard_interval == GUARD_INTERVAL_AUTO) {
- if (state->fe.dtv_property_cache.transmission_mode == TRANSMISSION_MODE_AUTO) {
+ if (state->fe[0]->dtv_property_cache.guard_interval == GUARD_INTERVAL_AUTO) {
+ if (state->fe[0]->dtv_property_cache.transmission_mode == TRANSMISSION_MODE_AUTO) {
slist = 7;
dib8000_write_word(state, 0, (dib8000_read_word(state, 0) & 0x9fff) | (1 << 13)); // P_mode = 1 to have autosearch start ok with mode2
} else
slist = 3;
} else {
- if (state->fe.dtv_property_cache.transmission_mode == TRANSMISSION_MODE_AUTO) {
+ if (state->fe[0]->dtv_property_cache.transmission_mode == TRANSMISSION_MODE_AUTO) {
slist = 2;
dib8000_write_word(state, 0, (dib8000_read_word(state, 0) & 0x9fff) | (1 << 13)); // P_mode = 1
} else
slist = 0;
}
- if (state->fe.dtv_property_cache.transmission_mode == TRANSMISSION_MODE_AUTO)
- state->fe.dtv_property_cache.transmission_mode = TRANSMISSION_MODE_8K;
- if (state->fe.dtv_property_cache.guard_interval == GUARD_INTERVAL_AUTO)
- state->fe.dtv_property_cache.guard_interval = GUARD_INTERVAL_1_8;
+ if (state->fe[0]->dtv_property_cache.transmission_mode == TRANSMISSION_MODE_AUTO)
+ state->fe[0]->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_8K;
+ if (state->fe[0]->dtv_property_cache.guard_interval == GUARD_INTERVAL_AUTO)
+ state->fe[0]->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_8;
dprintk("using list for autosearch : %d", slist);
dib8000_set_channel(state, (unsigned char)slist, 1);
@@ -1786,7 +1795,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
if (state == NULL)
return -EINVAL;
- dib8000_set_bandwidth(state, state->fe.dtv_property_cache.bandwidth_hz / 1000);
+ dib8000_set_bandwidth(fe, state->fe[0]->dtv_property_cache.bandwidth_hz / 1000);
dib8000_set_channel(state, 0, 0);
// restart demod
@@ -1799,17 +1808,16 @@ static int dib8000_tune(struct dvb_frontend *fe)
// never achieved a lock before - wait for timfreq to update
if (state->timf == 0) {
- if (state->fe.dtv_property_cache.isdbt_sb_mode == 1) {
- if (state->fe.dtv_property_cache.isdbt_partial_reception == 0) // Sound Broadcasting mode 1 seg
+ if (state->fe[0]->dtv_property_cache.isdbt_sb_mode == 1) {
+ if (state->fe[0]->dtv_property_cache.isdbt_partial_reception == 0)
msleep(300);
else // Sound Broadcasting mode 3 seg
msleep(500);
} else // 13 seg
msleep(200);
}
- //dump_reg(state);
- if (state->fe.dtv_property_cache.isdbt_sb_mode == 1) {
- if (state->fe.dtv_property_cache.isdbt_partial_reception == 0) { // Sound Broadcasting mode 1 seg
+ if (state->fe[0]->dtv_property_cache.isdbt_sb_mode == 1) {
+ if (state->fe[0]->dtv_property_cache.isdbt_partial_reception == 0) {
/* P_timf_alpha = (13-P_mode) , P_corm_alpha=6, P_corm_thres=0x40 alpha to check on board */
dib8000_write_word(state, 32, ((13 - mode) << 12) | (6 << 8) | 0x40);
@@ -1854,26 +1862,38 @@ static int dib8000_tune(struct dvb_frontend *fe)
static int dib8000_wakeup(struct dvb_frontend *fe)
{
struct dib8000_state *state = fe->demodulator_priv;
+ u8 index_frontend;
+ int ret;
dib8000_set_power_mode(state, DIB8000M_POWER_ALL);
dib8000_set_adc_state(state, DIBX000_ADC_ON);
if (dib8000_set_adc_state(state, DIBX000_SLOW_ADC_ON) != 0)
dprintk("could not start Slow ADC");
+ for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
+ ret = state->fe[index_frontend]->ops.init(state->fe[index_frontend]);
+ if (ret < 0)
+ return ret;
+ }
+
return 0;
}
static int dib8000_sleep(struct dvb_frontend *fe)
{
- struct dib8000_state *st = fe->demodulator_priv;
- if (1) {
- dib8000_set_output_mode(st, OUTMODE_HIGH_Z);
- dib8000_set_power_mode(st, DIB8000M_POWER_INTERFACE_ONLY);
- return dib8000_set_adc_state(st, DIBX000_SLOW_ADC_OFF) | dib8000_set_adc_state(st, DIBX000_ADC_OFF);
- } else {
+ struct dib8000_state *state = fe->demodulator_priv;
+ u8 index_frontend;
+ int ret;
- return 0;
+ for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
+ ret = state->fe[index_frontend]->ops.sleep(state->fe[index_frontend]);
+ if (ret < 0)
+ return ret;
}
+
+ dib8000_set_output_mode(fe, OUTMODE_HIGH_Z);
+ dib8000_set_power_mode(state, DIB8000M_POWER_INTERFACE_ONLY);
+ return dib8000_set_adc_state(state, DIBX000_SLOW_ADC_OFF) | dib8000_set_adc_state(state, DIBX000_ADC_OFF);
}
enum frontend_tune_state dib8000_get_tune_state(struct dvb_frontend *fe)
@@ -1891,16 +1911,40 @@ int dib8000_set_tune_state(struct dvb_frontend *fe, enum frontend_tune_state tun
}
EXPORT_SYMBOL(dib8000_set_tune_state);
-
-
-
static int dib8000_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep)
{
struct dib8000_state *state = fe->demodulator_priv;
u16 i, val = 0;
+ fe_status_t stat;
+ u8 index_frontend, sub_index_frontend;
fe->dtv_property_cache.bandwidth_hz = 6000000;
+ for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
+ state->fe[index_frontend]->ops.read_status(state->fe[index_frontend], &stat);
+ if (stat&FE_HAS_SYNC) {
+ dprintk("TMCC lock on the slave%i", index_frontend);
+ /* synchronize the cache with the other frontends */
+ state->fe[index_frontend]->ops.get_frontend(state->fe[index_frontend], fep);
+ for (sub_index_frontend = 0; (sub_index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[sub_index_frontend] != NULL); sub_index_frontend++) {
+ if (sub_index_frontend != index_frontend) {
+ state->fe[sub_index_frontend]->dtv_property_cache.isdbt_sb_mode = state->fe[index_frontend]->dtv_property_cache.isdbt_sb_mode;
+ state->fe[sub_index_frontend]->dtv_property_cache.inversion = state->fe[index_frontend]->dtv_property_cache.inversion;
+ state->fe[sub_index_frontend]->dtv_property_cache.transmission_mode = state->fe[index_frontend]->dtv_property_cache.transmission_mode;
+ state->fe[sub_index_frontend]->dtv_property_cache.guard_interval = state->fe[index_frontend]->dtv_property_cache.guard_interval;
+ state->fe[sub_index_frontend]->dtv_property_cache.isdbt_partial_reception = state->fe[index_frontend]->dtv_property_cache.isdbt_partial_reception;
+ for (i = 0; i < 3; i++) {
+ state->fe[sub_index_frontend]->dtv_property_cache.layer[i].segment_count = state->fe[index_frontend]->dtv_property_cache.layer[i].segment_count;
+ state->fe[sub_index_frontend]->dtv_property_cache.layer[i].interleaving = state->fe[index_frontend]->dtv_property_cache.layer[i].interleaving;
+ state->fe[sub_index_frontend]->dtv_property_cache.layer[i].fec = state->fe[index_frontend]->dtv_property_cache.layer[i].fec;
+ state->fe[sub_index_frontend]->dtv_property_cache.layer[i].modulation = state->fe[index_frontend]->dtv_property_cache.layer[i].modulation;
+ }
+ }
+ }
+ return 0;
+ }
+ }
+
fe->dtv_property_cache.isdbt_sb_mode = dib8000_read_word(state, 508) & 0x1;
val = dib8000_read_word(state, 570);
@@ -1992,112 +2036,200 @@ static int dib8000_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
break;
}
}
+
+ /* synchronize the cache with the other frontends */
+ for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
+ state->fe[index_frontend]->dtv_property_cache.isdbt_sb_mode = fe->dtv_property_cache.isdbt_sb_mode;
+ state->fe[index_frontend]->dtv_property_cache.inversion = fe->dtv_property_cache.inversion;
+ state->fe[index_frontend]->dtv_property_cache.transmission_mode = fe->dtv_property_cache.transmission_mode;
+ state->fe[index_frontend]->dtv_property_cache.guard_interval = fe->dtv_property_cache.guard_interval;
+ state->fe[index_frontend]->dtv_property_cache.isdbt_partial_reception = fe->dtv_property_cache.isdbt_partial_reception;
+ for (i = 0; i < 3; i++) {
+ state->fe[index_frontend]->dtv_property_cache.layer[i].segment_count = fe->dtv_property_cache.layer[i].segment_count;
+ state->fe[index_frontend]->dtv_property_cache.layer[i].interleaving = fe->dtv_property_cache.layer[i].interleaving;
+ state->fe[index_frontend]->dtv_property_cache.layer[i].fec = fe->dtv_property_cache.layer[i].fec;
+ state->fe[index_frontend]->dtv_property_cache.layer[i].modulation = fe->dtv_property_cache.layer[i].modulation;
+ }
+ }
return 0;
}
static int dib8000_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep)
{
struct dib8000_state *state = fe->demodulator_priv;
+ u8 nbr_pending, exit_condition, index_frontend;
+ s8 index_frontend_success = -1;
int time, ret;
+ int time_slave = FE_CALLBACK_TIME_NEVER;
- fe->dtv_property_cache.delivery_system = SYS_ISDBT;
+ if (state->fe[0]->dtv_property_cache.frequency == 0) {
+ dprintk("dib8000: must at least specify frequency ");
+ return 0;
+ }
- dib8000_set_output_mode(state, OUTMODE_HIGH_Z);
+ if (state->fe[0]->dtv_property_cache.bandwidth_hz == 0) {
+ dprintk("dib8000: no bandwidth specified, set to default ");
+ state->fe[0]->dtv_property_cache.bandwidth_hz = 6000000;
+ }
+
+ for (index_frontend = 0; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
+ /* synchronization of the cache */
+ state->fe[index_frontend]->dtv_property_cache.delivery_system = SYS_ISDBT;
+ memcpy(&state->fe[index_frontend]->dtv_property_cache, &fe->dtv_property_cache, sizeof(struct dtv_frontend_properties));
+
+ dib8000_set_output_mode(state->fe[index_frontend], OUTMODE_HIGH_Z);
+ if (state->fe[index_frontend]->ops.tuner_ops.set_params)
+ state->fe[index_frontend]->ops.tuner_ops.set_params(state->fe[index_frontend], fep);
- if (fe->ops.tuner_ops.set_params)
- fe->ops.tuner_ops.set_params(fe, fep);
+ dib8000_set_tune_state(state->fe[index_frontend], CT_AGC_START);
+ }
/* start up the AGC */
- state->tune_state = CT_AGC_START;
do {
- time = dib8000_agc_startup(fe);
+ time = dib8000_agc_startup(state->fe[0]);
+ for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
+ time_slave = dib8000_agc_startup(state->fe[index_frontend]);
+ if (time == FE_CALLBACK_TIME_NEVER)
+ time = time_slave;
+ else if ((time_slave != FE_CALLBACK_TIME_NEVER) && (time_slave > time))
+ time = time_slave;
+ }
if (time != FE_CALLBACK_TIME_NEVER)
msleep(time / 10);
else
break;
- } while (state->tune_state != CT_AGC_STOP);
-
- if (state->fe.dtv_property_cache.frequency == 0) {
- dprintk("dib8000: must at least specify frequency ");
- return 0;
- }
-
- if (state->fe.dtv_property_cache.bandwidth_hz == 0) {
- dprintk("dib8000: no bandwidth specified, set to default ");
- state->fe.dtv_property_cache.bandwidth_hz = 6000000;
- }
+ exit_condition = 1;
+ for (index_frontend = 0; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
+ if (dib8000_get_tune_state(state->fe[index_frontend]) != CT_AGC_STOP) {
+ exit_condition = 0;
+ break;
+ }
+ }
+ } while (exit_condition == 0);
+
+ for (index_frontend = 0; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++)
+ dib8000_set_tune_state(state->fe[index_frontend], CT_DEMOD_START);
+
+ if ((state->fe[0]->dtv_property_cache.delivery_system != SYS_ISDBT) ||
+ (state->fe[0]->dtv_property_cache.inversion == INVERSION_AUTO) ||
+ (state->fe[0]->dtv_property_cache.transmission_mode == TRANSMISSION_MODE_AUTO) ||
+ (state->fe[0]->dtv_property_cache.guard_interval == GUARD_INTERVAL_AUTO) ||
+ (((state->fe[0]->dtv_property_cache.isdbt_layer_enabled & (1 << 0)) != 0) &&
+ (state->fe[0]->dtv_property_cache.layer[0].segment_count != 0xff) &&
+ (state->fe[0]->dtv_property_cache.layer[0].segment_count != 0) &&
+ ((state->fe[0]->dtv_property_cache.layer[0].modulation == QAM_AUTO) ||
+ (state->fe[0]->dtv_property_cache.layer[0].fec == FEC_AUTO))) ||
+ (((state->fe[0]->dtv_property_cache.isdbt_layer_enabled & (1 << 1)) != 0) &&
+ (state->fe[0]->dtv_property_cache.layer[1].segment_count != 0xff) &&
+ (state->fe[0]->dtv_property_cache.layer[1].segment_count != 0) &&
+ ((state->fe[0]->dtv_property_cache.layer[1].modulation == QAM_AUTO) ||
+ (state->fe[0]->dtv_property_cache.layer[1].fec == FEC_AUTO))) ||
+ (((state->fe[0]->dtv_property_cache.isdbt_layer_enabled & (1 << 2)) != 0) &&
+ (state->fe[0]->dtv_property_cache.layer[2].segment_count != 0xff) &&
+ (state->fe[0]->dtv_property_cache.layer[2].segment_count != 0) &&
+ ((state->fe[0]->dtv_property_cache.layer[2].modulation == QAM_AUTO) ||
+ (state->fe[0]->dtv_property_cache.layer[2].fec == FEC_AUTO))) ||
+ (((state->fe[0]->dtv_property_cache.layer[0].segment_count == 0) ||
+ ((state->fe[0]->dtv_property_cache.isdbt_layer_enabled & (1 << 0)) == 0)) &&
+ ((state->fe[0]->dtv_property_cache.layer[1].segment_count == 0) ||
+ ((state->fe[0]->dtv_property_cache.isdbt_layer_enabled & (2 << 0)) == 0)) &&
+ ((state->fe[0]->dtv_property_cache.layer[2].segment_count == 0) || ((state->fe[0]->dtv_property_cache.isdbt_layer_enabled & (3 << 0)) == 0)))) {
+ int i = 80000;
+ u8 found = 0;
+ u8 tune_failed = 0;
+
+ for (index_frontend = 0; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
+ dib8000_set_bandwidth(state->fe[index_frontend], fe->dtv_property_cache.bandwidth_hz / 1000);
+ dib8000_autosearch_start(state->fe[index_frontend]);
+ }
- state->tune_state = CT_DEMOD_START;
-
- if ((state->fe.dtv_property_cache.delivery_system != SYS_ISDBT) ||
- (state->fe.dtv_property_cache.inversion == INVERSION_AUTO) ||
- (state->fe.dtv_property_cache.transmission_mode == TRANSMISSION_MODE_AUTO) ||
- (state->fe.dtv_property_cache.guard_interval == GUARD_INTERVAL_AUTO) ||
- (((state->fe.dtv_property_cache.isdbt_layer_enabled & (1 << 0)) != 0) &&
- (state->fe.dtv_property_cache.layer[0].segment_count != 0xff) &&
- (state->fe.dtv_property_cache.layer[0].segment_count != 0) &&
- ((state->fe.dtv_property_cache.layer[0].modulation == QAM_AUTO) ||
- (state->fe.dtv_property_cache.layer[0].fec == FEC_AUTO))) ||
- (((state->fe.dtv_property_cache.isdbt_layer_enabled & (1 << 1)) != 0) &&
- (state->fe.dtv_property_cache.layer[1].segment_count != 0xff) &&
- (state->fe.dtv_property_cache.layer[1].segment_count != 0) &&
- ((state->fe.dtv_property_cache.layer[1].modulation == QAM_AUTO) ||
- (state->fe.dtv_property_cache.layer[1].fec == FEC_AUTO))) ||
- (((state->fe.dtv_property_cache.isdbt_layer_enabled & (1 << 2)) != 0) &&
- (state->fe.dtv_property_cache.layer[2].segment_count != 0xff) &&
- (state->fe.dtv_property_cache.layer[2].segment_count != 0) &&
- ((state->fe.dtv_property_cache.layer[2].modulation == QAM_AUTO) ||
- (state->fe.dtv_property_cache.layer[2].fec == FEC_AUTO))) ||
- (((state->fe.dtv_property_cache.layer[0].segment_count == 0) ||
- ((state->fe.dtv_property_cache.isdbt_layer_enabled & (1 << 0)) == 0)) &&
- ((state->fe.dtv_property_cache.layer[1].segment_count == 0) ||
- ((state->fe.dtv_property_cache.isdbt_layer_enabled & (2 << 0)) == 0)) &&
- ((state->fe.dtv_property_cache.layer[2].segment_count == 0) || ((state->fe.dtv_property_cache.isdbt_layer_enabled & (3 << 0)) == 0)))) {
- int i = 800, found;
-
- dib8000_set_bandwidth(state, fe->dtv_property_cache.bandwidth_hz / 1000);
- dib8000_autosearch_start(fe);
do {
- msleep(10);
- found = dib8000_autosearch_irq(fe);
- } while (found == 0 && i--);
+ msleep(20);
+ nbr_pending = 0;
+ exit_condition = 0; /* 0: tune pending; 1: tune failed; 2:tune success */
+ for (index_frontend = 0; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
+ if (((tune_failed >> index_frontend) & 0x1) == 0) {
+ found = dib8000_autosearch_irq(state->fe[index_frontend]);
+ switch (found) {
+ case 0: /* tune pending */
+ nbr_pending++;
+ break;
+ case 2:
+ dprintk("autosearch succeed on the frontend%i", index_frontend);
+ exit_condition = 2;
+ index_frontend_success = index_frontend;
+ break;
+ default:
+ dprintk("unhandled autosearch result");
+ case 1:
+ dprintk("autosearch failed for the frontend%i", index_frontend);
+ break;
+ }
+ }
+ }
- dprintk("Frequency %d Hz, autosearch returns: %d", fep->frequency, found);
+ /* if all tune are done and no success, exit: tune failed */
+ if ((nbr_pending == 0) && (exit_condition == 0))
+ exit_condition = 1;
+ } while ((exit_condition == 0) && i--);
- if (found == 0 || found == 1)
- return 0; // no channel found
+ if (exit_condition == 1) { /* tune failed */
+ dprintk("tune failed");
+ return 0;
+ }
+
+ dprintk("tune success on frontend%i", index_frontend_success);
dib8000_get_frontend(fe, fep);
}
- ret = dib8000_tune(fe);
+ for (index_frontend = 0, ret = 0; (ret >= 0) && (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++)
+ ret = dib8000_tune(state->fe[index_frontend]);
+
+ /* set output mode and diversity input */
+ dib8000_set_output_mode(state->fe[0], state->cfg.output_mode);
+ for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
+ dib8000_set_output_mode(state->fe[index_frontend], OUTMODE_DIVERSITY);
+ dib8000_set_diversity_in(state->fe[index_frontend-1], 1);
+ }
- /* make this a config parameter */
- dib8000_set_output_mode(state, state->cfg.output_mode);
+ /* turn off the diversity of the last chip */
+ dib8000_set_diversity_in(state->fe[index_frontend-1], 0);
return ret;
}
+static u16 dib8000_read_lock(struct dvb_frontend *fe)
+{
+ struct dib8000_state *state = fe->demodulator_priv;
+
+ return dib8000_read_word(state, 568);
+}
+
static int dib8000_read_status(struct dvb_frontend *fe, fe_status_t * stat)
{
struct dib8000_state *state = fe->demodulator_priv;
- u16 lock = dib8000_read_word(state, 568);
+ u16 lock_slave = 0, lock = dib8000_read_word(state, 568);
+ u8 index_frontend;
+
+ for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++)
+ lock_slave |= dib8000_read_lock(state->fe[index_frontend]);
*stat = 0;
- if ((lock >> 13) & 1)
+ if (((lock >> 13) & 1) || ((lock_slave >> 13) & 1))
*stat |= FE_HAS_SIGNAL;
- if ((lock >> 8) & 1) /* Equal */
+ if (((lock >> 8) & 1) || ((lock_slave >> 8) & 1)) /* Equal */
*stat |= FE_HAS_CARRIER;
- if (((lock >> 1) & 0xf) == 0xf) /* TMCC_SYNC */
+ if ((((lock >> 1) & 0xf) == 0xf) || (((lock_slave >> 1) & 0xf) == 0xf)) /* TMCC_SYNC */
*stat |= FE_HAS_SYNC;
- if (((lock >> 12) & 1) && ((lock >> 5) & 7)) /* FEC MPEG */
+ if ((((lock >> 12) & 1) || ((lock_slave >> 12) & 1)) && ((lock >> 5) & 7)) /* FEC MPEG */
*stat |= FE_HAS_LOCK;
- if ((lock >> 12) & 1) {
+ if (((lock >> 12) & 1) || ((lock_slave >> 12) & 1)) {
lock = dib8000_read_word(state, 554); /* Viterbi Layer A */
if (lock & 0x01)
*stat |= FE_HAS_VITERBI;
@@ -2131,44 +2263,120 @@ static int dib8000_read_unc_blocks(struct dvb_frontend *fe, u32 * unc)
static int dib8000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
{
struct dib8000_state *state = fe->demodulator_priv;
- u16 val = dib8000_read_word(state, 390);
- *strength = 65535 - val;
+ u8 index_frontend;
+ u16 val;
+
+ *strength = 0;
+ for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
+ state->fe[index_frontend]->ops.read_signal_strength(state->fe[index_frontend], &val);
+ if (val > 65535 - *strength)
+ *strength = 65535;
+ else
+ *strength += val;
+ }
+
+ val = 65535 - dib8000_read_word(state, 390);
+ if (val > 65535 - *strength)
+ *strength = 65535;
+ else
+ *strength += val;
return 0;
}
-static int dib8000_read_snr(struct dvb_frontend *fe, u16 * snr)
+static u32 dib8000_get_snr(struct dvb_frontend *fe)
{
struct dib8000_state *state = fe->demodulator_priv;
+ u32 n, s, exp;
u16 val;
- s32 signal_mant, signal_exp, noise_mant, noise_exp;
- u32 result = 0;
val = dib8000_read_word(state, 542);
- noise_mant = (val >> 6) & 0xff;
- noise_exp = (val & 0x3f);
+ n = (val >> 6) & 0xff;
+ exp = (val & 0x3f);
+ if ((exp & 0x20) != 0)
+ exp -= 0x40;
+ n <<= exp+16;
val = dib8000_read_word(state, 543);
- signal_mant = (val >> 6) & 0xff;
- signal_exp = (val & 0x3f);
+ s = (val >> 6) & 0xff;
+ exp = (val & 0x3f);
+ if ((exp & 0x20) != 0)
+ exp -= 0x40;
+ s <<= exp+16;
+
+ if (n > 0) {
+ u32 t = (s/n) << 16;
+ return t + ((s << 16) - n*t) / n;
+ }
+ return 0xffffffff;
+}
- if ((noise_exp & 0x20) != 0)
- noise_exp -= 0x40;
- if ((signal_exp & 0x20) != 0)
- signal_exp -= 0x40;
+static int dib8000_read_snr(struct dvb_frontend *fe, u16 * snr)
+{
+ struct dib8000_state *state = fe->demodulator_priv;
+ u8 index_frontend;
+ u32 snr_master;
- if (signal_mant != 0)
- result = intlog10(2) * 10 * signal_exp + 10 * intlog10(signal_mant);
- else
- result = intlog10(2) * 10 * signal_exp - 100;
- if (noise_mant != 0)
- result -= intlog10(2) * 10 * noise_exp + 10 * intlog10(noise_mant);
+ snr_master = dib8000_get_snr(fe);
+ for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++)
+ snr_master += dib8000_get_snr(state->fe[index_frontend]);
+
+ if (snr_master != 0) {
+ snr_master = 10*intlog10(snr_master>>16);
+ *snr = snr_master / ((1 << 24) / 10);
+ }
else
- result -= intlog10(2) * 10 * noise_exp - 100;
+ *snr = 0;
- *snr = result / ((1 << 24) / 10);
return 0;
}
+int dib8000_set_slave_frontend(struct dvb_frontend *fe, struct dvb_frontend *fe_slave)
+{
+ struct dib8000_state *state = fe->demodulator_priv;
+ u8 index_frontend = 1;
+
+ while ((index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL))
+ index_frontend++;
+ if (index_frontend < MAX_NUMBER_OF_FRONTENDS) {
+ dprintk("set slave fe %p to index %i", fe_slave, index_frontend);
+ state->fe[index_frontend] = fe_slave;
+ return 0;
+ }
+
+ dprintk("too many slave frontend");
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(dib8000_set_slave_frontend);
+
+int dib8000_remove_slave_frontend(struct dvb_frontend *fe)
+{
+ struct dib8000_state *state = fe->demodulator_priv;
+ u8 index_frontend = 1;
+
+ while ((index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL))
+ index_frontend++;
+ if (index_frontend != 1) {
+ dprintk("remove slave fe %p (index %i)", state->fe[index_frontend-1], index_frontend-1);
+ state->fe[index_frontend] = NULL;
+ return 0;
+ }
+
+ dprintk("no frontend to be removed");
+ return -ENODEV;
+}
+EXPORT_SYMBOL(dib8000_remove_slave_frontend);
+
+struct dvb_frontend *dib8000_get_slave_frontend(struct dvb_frontend *fe, int slave_index)
+{
+ struct dib8000_state *state = fe->demodulator_priv;
+
+ if (slave_index >= MAX_NUMBER_OF_FRONTENDS)
+ return NULL;
+ return state->fe[slave_index];
+}
+EXPORT_SYMBOL(dib8000_get_slave_frontend);
+
+
int dib8000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods, u8 default_addr, u8 first_addr)
{
int k = 0;
@@ -2227,7 +2435,13 @@ static int dib8000_fe_get_tune_settings(struct dvb_frontend *fe, struct dvb_fron
static void dib8000_release(struct dvb_frontend *fe)
{
struct dib8000_state *st = fe->demodulator_priv;
+ u8 index_frontend;
+
+ for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (st->fe[index_frontend] != NULL); index_frontend++)
+ dvb_frontend_detach(st->fe[index_frontend]);
+
dibx000_exit_i2c_master(&st->i2c_master);
+ kfree(st->fe[0]);
kfree(st);
}
@@ -2242,19 +2456,19 @@ EXPORT_SYMBOL(dib8000_get_i2c_master);
int dib8000_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
{
struct dib8000_state *st = fe->demodulator_priv;
- u16 val = dib8000_read_word(st, 299) & 0xffef;
- val |= (onoff & 0x1) << 4;
+ u16 val = dib8000_read_word(st, 299) & 0xffef;
+ val |= (onoff & 0x1) << 4;
- dprintk("pid filter enabled %d", onoff);
- return dib8000_write_word(st, 299, val);
+ dprintk("pid filter enabled %d", onoff);
+ return dib8000_write_word(st, 299, val);
}
EXPORT_SYMBOL(dib8000_pid_filter_ctrl);
int dib8000_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
{
struct dib8000_state *st = fe->demodulator_priv;
- dprintk("Index %x, PID %d, OnOff %d", id, pid, onoff);
- return dib8000_write_word(st, 305 + id, onoff ? (1 << 13) | pid : 0);
+ dprintk("Index %x, PID %d, OnOff %d", id, pid, onoff);
+ return dib8000_write_word(st, 305 + id, onoff ? (1 << 13) | pid : 0);
}
EXPORT_SYMBOL(dib8000_pid_filter);
@@ -2298,6 +2512,9 @@ struct dvb_frontend *dib8000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, s
state = kzalloc(sizeof(struct dib8000_state), GFP_KERNEL);
if (state == NULL)
return NULL;
+ fe = kzalloc(sizeof(struct dvb_frontend), GFP_KERNEL);
+ if (fe == NULL)
+ return NULL;
memcpy(&state->cfg, cfg, sizeof(struct dib8000_config));
state->i2c.adap = i2c_adap;
@@ -2311,9 +2528,9 @@ struct dvb_frontend *dib8000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, s
if ((state->cfg.output_mode != OUTMODE_MPEG2_SERIAL) && (state->cfg.output_mode != OUTMODE_MPEG2_PAR_GATED_CLK))
state->cfg.output_mode = OUTMODE_MPEG2_FIFO;
- fe = &state->fe;
+ state->fe[0] = fe;
fe->demodulator_priv = state;
- memcpy(&state->fe.ops, &dib8000_ops, sizeof(struct dvb_frontend_ops));
+ memcpy(&state->fe[0]->ops, &dib8000_ops, sizeof(struct dvb_frontend_ops));
state->timf_default = cfg->pll->timf;
diff --git a/drivers/media/dvb/frontends/dib8000.h b/drivers/media/dvb/frontends/dib8000.h
index e0a9ded11df4..617f9eba3a09 100644
--- a/drivers/media/dvb/frontends/dib8000.h
+++ b/drivers/media/dvb/frontends/dib8000.h
@@ -50,6 +50,9 @@ extern int dib8000_set_tune_state(struct dvb_frontend *fe, enum frontend_tune_st
extern enum frontend_tune_state dib8000_get_tune_state(struct dvb_frontend *fe);
extern void dib8000_pwm_agc_reset(struct dvb_frontend *fe);
extern s32 dib8000_get_adc_power(struct dvb_frontend *fe, u8 mode);
+extern int dib8000_set_slave_frontend(struct dvb_frontend *fe, struct dvb_frontend *fe_slave);
+extern int dib8000_remove_slave_frontend(struct dvb_frontend *fe);
+extern struct dvb_frontend *dib8000_get_slave_frontend(struct dvb_frontend *fe, int slave_index);
#else
static inline struct dvb_frontend *dib8000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib8000_config *cfg)
{
@@ -111,6 +114,23 @@ static inline s32 dib8000_get_adc_power(struct dvb_frontend *fe, u8 mode)
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return 0;
}
+static inline int dib8000_set_slave_frontend(struct dvb_frontend *fe, struct dvb_frontend *fe_slave)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return -ENODEV;
+}
+
+int dib8000_remove_slave_frontend(struct dvb_frontend *fe)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return -ENODEV;
+}
+
+static inline struct dvb_frontend *dib8000_get_slave_frontend(struct dvb_frontend *fe, int slave_index)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
#endif
#endif
diff --git a/drivers/media/dvb/frontends/dib9000.c b/drivers/media/dvb/frontends/dib9000.c
new file mode 100644
index 000000000000..43fb6e45424a
--- /dev/null
+++ b/drivers/media/dvb/frontends/dib9000.c
@@ -0,0 +1,2350 @@
+/*
+ * Linux-DVB Driver for DiBcom's DiB9000 and demodulator-family.
+ *
+ * Copyright (C) 2005-10 DiBcom (http://www.dibcom.fr/)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2.
+ */
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+
+#include "dvb_math.h"
+#include "dvb_frontend.h"
+
+#include "dib9000.h"
+#include "dibx000_common.h"
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
+
+#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiB9000: "); printk(args); printk("\n"); } } while (0)
+#define MAX_NUMBER_OF_FRONTENDS 6
+
+struct i2c_device {
+ struct i2c_adapter *i2c_adap;
+ u8 i2c_addr;
+};
+
+/* lock */
+#define DIB_LOCK struct mutex
+#define DibAcquireLock(lock) do { if (mutex_lock_interruptible(lock) < 0) dprintk("could not get the lock"); } while (0)
+#define DibReleaseLock(lock) mutex_unlock(lock)
+#define DibInitLock(lock) mutex_init(lock)
+#define DibFreeLock(lock)
+
+struct dib9000_state {
+ struct i2c_device i2c;
+
+ struct dibx000_i2c_master i2c_master;
+ struct i2c_adapter tuner_adap;
+ struct i2c_adapter component_bus;
+
+ u16 revision;
+ u8 reg_offs;
+
+ enum frontend_tune_state tune_state;
+ u32 status;
+ struct dvb_frontend_parametersContext channel_status;
+
+ u8 fe_id;
+
+#define DIB9000_GPIO_DEFAULT_DIRECTIONS 0xffff
+ u16 gpio_dir;
+#define DIB9000_GPIO_DEFAULT_VALUES 0x0000
+ u16 gpio_val;
+#define DIB9000_GPIO_DEFAULT_PWM_POS 0xffff
+ u16 gpio_pwm_pos;
+
+ union { /* common for all chips */
+ struct {
+ u8 mobile_mode:1;
+ } host;
+
+ struct {
+ struct dib9000_fe_memory_map {
+ u16 addr;
+ u16 size;
+ } fe_mm[18];
+ u8 memcmd;
+
+ DIB_LOCK mbx_if_lock; /* to protect read/write operations */
+ DIB_LOCK mbx_lock; /* to protect the whole mailbox handling */
+
+ DIB_LOCK mem_lock; /* to protect the memory accesses */
+ DIB_LOCK mem_mbx_lock; /* to protect the memory-based mailbox */
+
+#define MBX_MAX_WORDS (256 - 200 - 2)
+#define DIB9000_MSG_CACHE_SIZE 2
+ u16 message_cache[DIB9000_MSG_CACHE_SIZE][MBX_MAX_WORDS];
+ u8 fw_is_running;
+ } risc;
+ } platform;
+
+ union { /* common for all platforms */
+ struct {
+ struct dib9000_config cfg;
+ } d9;
+ } chip;
+
+ struct dvb_frontend *fe[MAX_NUMBER_OF_FRONTENDS];
+ u16 component_bus_speed;
+};
+
+u32 fe_info[44] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0
+};
+
+enum dib9000_power_mode {
+ DIB9000_POWER_ALL = 0,
+
+ DIB9000_POWER_NO,
+ DIB9000_POWER_INTERF_ANALOG_AGC,
+ DIB9000_POWER_COR4_DINTLV_ICIRM_EQUAL_CFROD,
+ DIB9000_POWER_COR4_CRY_ESRAM_MOUT_NUD,
+ DIB9000_POWER_INTERFACE_ONLY,
+};
+
+enum dib9000_out_messages {
+ OUT_MSG_HBM_ACK,
+ OUT_MSG_HOST_BUF_FAIL,
+ OUT_MSG_REQ_VERSION,
+ OUT_MSG_BRIDGE_I2C_W,
+ OUT_MSG_BRIDGE_I2C_R,
+ OUT_MSG_BRIDGE_APB_W,
+ OUT_MSG_BRIDGE_APB_R,
+ OUT_MSG_SCAN_CHANNEL,
+ OUT_MSG_MONIT_DEMOD,
+ OUT_MSG_CONF_GPIO,
+ OUT_MSG_DEBUG_HELP,
+ OUT_MSG_SUBBAND_SEL,
+ OUT_MSG_ENABLE_TIME_SLICE,
+ OUT_MSG_FE_FW_DL,
+ OUT_MSG_FE_CHANNEL_SEARCH,
+ OUT_MSG_FE_CHANNEL_TUNE,
+ OUT_MSG_FE_SLEEP,
+ OUT_MSG_FE_SYNC,
+ OUT_MSG_CTL_MONIT,
+
+ OUT_MSG_CONF_SVC,
+ OUT_MSG_SET_HBM,
+ OUT_MSG_INIT_DEMOD,
+ OUT_MSG_ENABLE_DIVERSITY,
+ OUT_MSG_SET_OUTPUT_MODE,
+ OUT_MSG_SET_PRIORITARY_CHANNEL,
+ OUT_MSG_ACK_FRG,
+ OUT_MSG_INIT_PMU,
+};
+
+enum dib9000_in_messages {
+ IN_MSG_DATA,
+ IN_MSG_FRAME_INFO,
+ IN_MSG_CTL_MONIT,
+ IN_MSG_ACK_FREE_ITEM,
+ IN_MSG_DEBUG_BUF,
+ IN_MSG_MPE_MONITOR,
+ IN_MSG_RAWTS_MONITOR,
+ IN_MSG_END_BRIDGE_I2C_RW,
+ IN_MSG_END_BRIDGE_APB_RW,
+ IN_MSG_VERSION,
+ IN_MSG_END_OF_SCAN,
+ IN_MSG_MONIT_DEMOD,
+ IN_MSG_ERROR,
+ IN_MSG_FE_FW_DL_DONE,
+ IN_MSG_EVENT,
+ IN_MSG_ACK_CHANGE_SVC,
+ IN_MSG_HBM_PROF,
+};
+
+/* memory_access requests */
+#define FE_MM_W_CHANNEL 0
+#define FE_MM_W_FE_INFO 1
+#define FE_MM_RW_SYNC 2
+
+#define FE_SYNC_CHANNEL 1
+#define FE_SYNC_W_GENERIC_MONIT 2
+#define FE_SYNC_COMPONENT_ACCESS 3
+
+#define FE_MM_R_CHANNEL_SEARCH_STATE 3
+#define FE_MM_R_CHANNEL_UNION_CONTEXT 4
+#define FE_MM_R_FE_INFO 5
+#define FE_MM_R_FE_MONITOR 6
+
+#define FE_MM_W_CHANNEL_HEAD 7
+#define FE_MM_W_CHANNEL_UNION 8
+#define FE_MM_W_CHANNEL_CONTEXT 9
+#define FE_MM_R_CHANNEL_UNION 10
+#define FE_MM_R_CHANNEL_CONTEXT 11
+#define FE_MM_R_CHANNEL_TUNE_STATE 12
+
+#define FE_MM_R_GENERIC_MONITORING_SIZE 13
+#define FE_MM_W_GENERIC_MONITORING 14
+#define FE_MM_R_GENERIC_MONITORING 15
+
+#define FE_MM_W_COMPONENT_ACCESS 16
+#define FE_MM_RW_COMPONENT_ACCESS_BUFFER 17
+static int dib9000_risc_apb_access_read(struct dib9000_state *state, u32 address, u16 attribute, const u8 * tx, u32 txlen, u8 * b, u32 len);
+static int dib9000_risc_apb_access_write(struct dib9000_state *state, u32 address, u16 attribute, const u8 * b, u32 len);
+
+static u16 to_fw_output_mode(u16 mode)
+{
+ switch (mode) {
+ case OUTMODE_HIGH_Z:
+ return 0;
+ case OUTMODE_MPEG2_PAR_GATED_CLK:
+ return 4;
+ case OUTMODE_MPEG2_PAR_CONT_CLK:
+ return 8;
+ case OUTMODE_MPEG2_SERIAL:
+ return 16;
+ case OUTMODE_DIVERSITY:
+ return 128;
+ case OUTMODE_MPEG2_FIFO:
+ return 2;
+ case OUTMODE_ANALOG_ADC:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static u16 dib9000_read16_attr(struct dib9000_state *state, u16 reg, u8 * b, u32 len, u16 attribute)
+{
+ u32 chunk_size = 126;
+ u32 l;
+ int ret;
+ u8 wb[2] = { reg >> 8, reg & 0xff };
+ struct i2c_msg msg[2] = {
+ {.addr = state->i2c.i2c_addr >> 1, .flags = 0, .buf = wb, .len = 2},
+ {.addr = state->i2c.i2c_addr >> 1, .flags = I2C_M_RD, .buf = b, .len = len},
+ };
+
+ if (state->platform.risc.fw_is_running && (reg < 1024))
+ return dib9000_risc_apb_access_read(state, reg, attribute, NULL, 0, b, len);
+
+ if (attribute & DATA_BUS_ACCESS_MODE_8BIT)
+ wb[0] |= (1 << 5);
+ if (attribute & DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT)
+ wb[0] |= (1 << 4);
+
+ do {
+ l = len < chunk_size ? len : chunk_size;
+ msg[1].len = l;
+ msg[1].buf = b;
+ ret = i2c_transfer(state->i2c.i2c_adap, msg, 2) != 2 ? -EREMOTEIO : 0;
+ if (ret != 0) {
+ dprintk("i2c read error on %d", reg);
+ return -EREMOTEIO;
+ }
+
+ b += l;
+ len -= l;
+
+ if (!(attribute & DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT))
+ reg += l / 2;
+ } while ((ret == 0) && len);
+
+ return 0;
+}
+
+static u16 dib9000_i2c_read16(struct i2c_device *i2c, u16 reg)
+{
+ u8 b[2];
+ u8 wb[2] = { reg >> 8, reg & 0xff };
+ struct i2c_msg msg[2] = {
+ {.addr = i2c->i2c_addr >> 1, .flags = 0, .buf = wb, .len = 2},
+ {.addr = i2c->i2c_addr >> 1, .flags = I2C_M_RD, .buf = b, .len = 2},
+ };
+
+ if (i2c_transfer(i2c->i2c_adap, msg, 2) != 2) {
+ dprintk("read register %x error", reg);
+ return 0;
+ }
+
+ return (b[0] << 8) | b[1];
+}
+
+static inline u16 dib9000_read_word(struct dib9000_state *state, u16 reg)
+{
+ u8 b[2];
+ if (dib9000_read16_attr(state, reg, b, 2, 0) != 0)
+ return 0;
+ return (b[0] << 8 | b[1]);
+}
+
+static inline u16 dib9000_read_word_attr(struct dib9000_state *state, u16 reg, u16 attribute)
+{
+ u8 b[2];
+ if (dib9000_read16_attr(state, reg, b, 2, attribute) != 0)
+ return 0;
+ return (b[0] << 8 | b[1]);
+}
+
+#define dib9000_read16_noinc_attr(state, reg, b, len, attribute) dib9000_read16_attr(state, reg, b, len, (attribute) | DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT)
+
+static u16 dib9000_write16_attr(struct dib9000_state *state, u16 reg, const u8 * buf, u32 len, u16 attribute)
+{
+ u8 b[255];
+ u32 chunk_size = 126;
+ u32 l;
+ int ret;
+
+ struct i2c_msg msg = {
+ .addr = state->i2c.i2c_addr >> 1, .flags = 0, .buf = b, .len = len + 2
+ };
+
+ if (state->platform.risc.fw_is_running && (reg < 1024)) {
+ if (dib9000_risc_apb_access_write
+ (state, reg, DATA_BUS_ACCESS_MODE_16BIT | DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT | attribute, buf, len) != 0)
+ return -EINVAL;
+ return 0;
+ }
+
+ b[0] = (reg >> 8) & 0xff;
+ b[1] = (reg) & 0xff;
+
+ if (attribute & DATA_BUS_ACCESS_MODE_8BIT)
+ b[0] |= (1 << 5);
+ if (attribute & DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT)
+ b[0] |= (1 << 4);
+
+ do {
+ l = len < chunk_size ? len : chunk_size;
+ msg.len = l + 2;
+ memcpy(&b[2], buf, l);
+
+ ret = i2c_transfer(state->i2c.i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
+
+ buf += l;
+ len -= l;
+
+ if (!(attribute & DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT))
+ reg += l / 2;
+ } while ((ret == 0) && len);
+
+ return ret;
+}
+
+static int dib9000_i2c_write16(struct i2c_device *i2c, u16 reg, u16 val)
+{
+ u8 b[4] = { (reg >> 8) & 0xff, reg & 0xff, (val >> 8) & 0xff, val & 0xff };
+ struct i2c_msg msg = {
+ .addr = i2c->i2c_addr >> 1, .flags = 0, .buf = b, .len = 4
+ };
+
+ return i2c_transfer(i2c->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
+}
+
+static inline int dib9000_write_word(struct dib9000_state *state, u16 reg, u16 val)
+{
+ u8 b[2] = { val >> 8, val & 0xff };
+ return dib9000_write16_attr(state, reg, b, 2, 0);
+}
+
+static inline int dib9000_write_word_attr(struct dib9000_state *state, u16 reg, u16 val, u16 attribute)
+{
+ u8 b[2] = { val >> 8, val & 0xff };
+ return dib9000_write16_attr(state, reg, b, 2, attribute);
+}
+
+#define dib9000_write(state, reg, buf, len) dib9000_write16_attr(state, reg, buf, len, 0)
+#define dib9000_write16_noinc(state, reg, buf, len) dib9000_write16_attr(state, reg, buf, len, DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT)
+#define dib9000_write16_noinc_attr(state, reg, buf, len, attribute) dib9000_write16_attr(state, reg, buf, len, DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT | (attribute))
+
+#define dib9000_mbx_send(state, id, data, len) dib9000_mbx_send_attr(state, id, data, len, 0)
+#define dib9000_mbx_get_message(state, id, msg, len) dib9000_mbx_get_message_attr(state, id, msg, len, 0)
+
+#define MAC_IRQ (1 << 1)
+#define IRQ_POL_MSK (1 << 4)
+
+#define dib9000_risc_mem_read_chunks(state, b, len) dib9000_read16_attr(state, 1063, b, len, DATA_BUS_ACCESS_MODE_8BIT | DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT)
+#define dib9000_risc_mem_write_chunks(state, buf, len) dib9000_write16_attr(state, 1063, buf, len, DATA_BUS_ACCESS_MODE_8BIT | DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT)
+
+static void dib9000_risc_mem_setup_cmd(struct dib9000_state *state, u32 addr, u32 len, u8 reading)
+{
+ u8 b[14] = { 0 };
+
+/* dprintk("%d memcmd: %d %d %d\n", state->fe_id, addr, addr+len, len); */
+/* b[0] = 0 << 7; */
+ b[1] = 1;
+
+/* b[2] = 0; */
+/* b[3] = 0; */
+ b[4] = (u8) (addr >> 8);
+ b[5] = (u8) (addr & 0xff);
+
+/* b[10] = 0; */
+/* b[11] = 0; */
+ b[12] = (u8) (addr >> 8);
+ b[13] = (u8) (addr & 0xff);
+
+ addr += len;
+/* b[6] = 0; */
+/* b[7] = 0; */
+ b[8] = (u8) (addr >> 8);
+ b[9] = (u8) (addr & 0xff);
+
+ dib9000_write(state, 1056, b, 14);
+ if (reading)
+ dib9000_write_word(state, 1056, (1 << 15) | 1);
+ state->platform.risc.memcmd = -1; /* if it was called directly reset it - to force a future setup-call to set it */
+}
+
+static void dib9000_risc_mem_setup(struct dib9000_state *state, u8 cmd)
+{
+ struct dib9000_fe_memory_map *m = &state->platform.risc.fe_mm[cmd & 0x7f];
+ /* decide whether we need to "refresh" the memory controller */
+ if (state->platform.risc.memcmd == cmd && /* same command */
+ !(cmd & 0x80 && m->size < 67)) /* and we do not want to read something with less than 67 bytes looping - working around a bug in the memory controller */
+ return;
+ dib9000_risc_mem_setup_cmd(state, m->addr, m->size, cmd & 0x80);
+ state->platform.risc.memcmd = cmd;
+}
+
+static int dib9000_risc_mem_read(struct dib9000_state *state, u8 cmd, u8 * b, u16 len)
+{
+ if (!state->platform.risc.fw_is_running)
+ return -EIO;
+
+ DibAcquireLock(&state->platform.risc.mem_lock);
+ dib9000_risc_mem_setup(state, cmd | 0x80);
+ dib9000_risc_mem_read_chunks(state, b, len);
+ DibReleaseLock(&state->platform.risc.mem_lock);
+ return 0;
+}
+
+static int dib9000_risc_mem_write(struct dib9000_state *state, u8 cmd, const u8 * b)
+{
+ struct dib9000_fe_memory_map *m = &state->platform.risc.fe_mm[cmd];
+ if (!state->platform.risc.fw_is_running)
+ return -EIO;
+
+ DibAcquireLock(&state->platform.risc.mem_lock);
+ dib9000_risc_mem_setup(state, cmd);
+ dib9000_risc_mem_write_chunks(state, b, m->size);
+ DibReleaseLock(&state->platform.risc.mem_lock);
+ return 0;
+}
+
+static int dib9000_firmware_download(struct dib9000_state *state, u8 risc_id, u16 key, const u8 * code, u32 len)
+{
+ u16 offs;
+
+ if (risc_id == 1)
+ offs = 16;
+ else
+ offs = 0;
+
+ /* config crtl reg */
+ dib9000_write_word(state, 1024 + offs, 0x000f);
+ dib9000_write_word(state, 1025 + offs, 0);
+ dib9000_write_word(state, 1031 + offs, key);
+
+ dprintk("going to download %dB of microcode", len);
+ if (dib9000_write16_noinc(state, 1026 + offs, (u8 *) code, (u16) len) != 0) {
+ dprintk("error while downloading microcode for RISC %c", 'A' + risc_id);
+ return -EIO;
+ }
+
+ dprintk("Microcode for RISC %c loaded", 'A' + risc_id);
+
+ return 0;
+}
+
+static int dib9000_mbx_host_init(struct dib9000_state *state, u8 risc_id)
+{
+ u16 mbox_offs;
+ u16 reset_reg;
+ u16 tries = 1000;
+
+ if (risc_id == 1)
+ mbox_offs = 16;
+ else
+ mbox_offs = 0;
+
+ /* Reset mailbox */
+ dib9000_write_word(state, 1027 + mbox_offs, 0x8000);
+
+ /* Read reset status */
+ do {
+ reset_reg = dib9000_read_word(state, 1027 + mbox_offs);
+ msleep(100);
+ } while ((reset_reg & 0x8000) && --tries);
+
+ if (reset_reg & 0x8000) {
+ dprintk("MBX: init ERROR, no response from RISC %c", 'A' + risc_id);
+ return -EIO;
+ }
+ dprintk("MBX: initialized");
+ return 0;
+}
+
+#define MAX_MAILBOX_TRY 100
+static int dib9000_mbx_send_attr(struct dib9000_state *state, u8 id, u16 * data, u8 len, u16 attr)
+{
+ u8 ret = 0, *d, b[2];
+ u16 tmp;
+ u16 size;
+ u32 i;
+
+ if (!state->platform.risc.fw_is_running)
+ return -EINVAL;
+
+ DibAcquireLock(&state->platform.risc.mbx_if_lock);
+ tmp = MAX_MAILBOX_TRY;
+ do {
+ size = dib9000_read_word_attr(state, 1043, attr) & 0xff;
+ if ((size + len + 1) > MBX_MAX_WORDS && --tmp) {
+ dprintk("MBX: RISC mbx full, retrying");
+ msleep(100);
+ } else
+ break;
+ } while (1);
+
+ /*dprintk( "MBX: size: %d", size); */
+
+ if (tmp == 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+#ifdef DUMP_MSG
+ dprintk("--> %02x %d ", id, len + 1);
+ for (i = 0; i < len; i++)
+ dprintk("%04x ", data[i]);
+ dprintk("\n");
+#endif
+
+ /* byte-order conversion - works on big (where it is not necessary) or little endian */
+ d = (u8 *) data;
+ for (i = 0; i < len; i++) {
+ tmp = data[i];
+ *d++ = tmp >> 8;
+ *d++ = tmp & 0xff;
+ }
+
+ /* write msg */
+ b[0] = id;
+ b[1] = len + 1;
+ if (dib9000_write16_noinc_attr(state, 1045, b, 2, attr) != 0 || dib9000_write16_noinc_attr(state, 1045, (u8 *) data, len * 2, attr) != 0) {
+ ret = -EIO;
+ goto out;
+ }
+
+ /* update register nb_mes_in_RX */
+ ret = (u8) dib9000_write_word_attr(state, 1043, 1 << 14, attr);
+
+out:
+ DibReleaseLock(&state->platform.risc.mbx_if_lock);
+
+ return ret;
+}
+
+static u8 dib9000_mbx_read(struct dib9000_state *state, u16 * data, u8 risc_id, u16 attr)
+{
+#ifdef DUMP_MSG
+ u16 *d = data;
+#endif
+
+ u16 tmp, i;
+ u8 size;
+ u8 mc_base;
+
+ if (!state->platform.risc.fw_is_running)
+ return 0;
+
+ DibAcquireLock(&state->platform.risc.mbx_if_lock);
+ if (risc_id == 1)
+ mc_base = 16;
+ else
+ mc_base = 0;
+
+ /* Length and type in the first word */
+ *data = dib9000_read_word_attr(state, 1029 + mc_base, attr);
+
+ size = *data & 0xff;
+ if (size <= MBX_MAX_WORDS) {
+ data++;
+ size--; /* Initial word already read */
+
+ dib9000_read16_noinc_attr(state, 1029 + mc_base, (u8 *) data, size * 2, attr);
+
+ /* to word conversion */
+ for (i = 0; i < size; i++) {
+ tmp = *data;
+ *data = (tmp >> 8) | (tmp << 8);
+ data++;
+ }
+
+#ifdef DUMP_MSG
+ dprintk("<-- ");
+ for (i = 0; i < size + 1; i++)
+ dprintk("%04x ", d[i]);
+ dprintk("\n");
+#endif
+ } else {
+ dprintk("MBX: message is too big for message cache (%d), flushing message", size);
+ size--; /* Initial word already read */
+ while (size--)
+ dib9000_read16_noinc_attr(state, 1029 + mc_base, (u8 *) data, 2, attr);
+ }
+ /* Update register nb_mes_in_TX */
+ dib9000_write_word_attr(state, 1028 + mc_base, 1 << 14, attr);
+
+ DibReleaseLock(&state->platform.risc.mbx_if_lock);
+
+ return size + 1;
+}
+
+static int dib9000_risc_debug_buf(struct dib9000_state *state, u16 * data, u8 size)
+{
+ u32 ts = data[1] << 16 | data[0];
+ char *b = (char *)&data[2];
+
+ b[2 * (size - 2) - 1] = '\0'; /* Bullet proof the buffer */
+ if (*b == '~') {
+ b++;
+ dprintk(b);
+ } else
+ dprintk("RISC%d: %d.%04d %s", state->fe_id, ts / 10000, ts % 10000, *b ? b : "<emtpy>");
+ return 1;
+}
+
+static int dib9000_mbx_fetch_to_cache(struct dib9000_state *state, u16 attr)
+{
+ int i;
+ u8 size;
+ u16 *block;
+ /* find a free slot */
+ for (i = 0; i < DIB9000_MSG_CACHE_SIZE; i++) {
+ block = state->platform.risc.message_cache[i];
+ if (*block == 0) {
+ size = dib9000_mbx_read(state, block, 1, attr);
+
+/* dprintk( "MBX: fetched %04x message to cache", *block); */
+
+ switch (*block >> 8) {
+ case IN_MSG_DEBUG_BUF:
+ dib9000_risc_debug_buf(state, block + 1, size); /* debug-messages are going to be printed right away */
+ *block = 0; /* free the block */
+ break;
+#if 0
+ case IN_MSG_DATA: /* FE-TRACE */
+ dib9000_risc_data_process(state, block + 1, size);
+ *block = 0;
+ break;
+#endif
+ default:
+ break;
+ }
+
+ return 1;
+ }
+ }
+ dprintk("MBX: no free cache-slot found for new message...");
+ return -1;
+}
+
+static u8 dib9000_mbx_count(struct dib9000_state *state, u8 risc_id, u16 attr)
+{
+ if (risc_id == 0)
+ return (u8) (dib9000_read_word_attr(state, 1028, attr) >> 10) & 0x1f; /* 5 bit field */
+ else
+ return (u8) (dib9000_read_word_attr(state, 1044, attr) >> 8) & 0x7f; /* 7 bit field */
+}
+
+static int dib9000_mbx_process(struct dib9000_state *state, u16 attr)
+{
+ int ret = 0;
+ u16 tmp;
+
+ if (!state->platform.risc.fw_is_running)
+ return -1;
+
+ DibAcquireLock(&state->platform.risc.mbx_lock);
+
+ if (dib9000_mbx_count(state, 1, attr)) /* 1=RiscB */
+ ret = dib9000_mbx_fetch_to_cache(state, attr);
+
+ tmp = dib9000_read_word_attr(state, 1229, attr); /* Clear the IRQ */
+/* if (tmp) */
+/* dprintk( "cleared IRQ: %x", tmp); */
+ DibReleaseLock(&state->platform.risc.mbx_lock);
+
+ return ret;
+}
+
+static int dib9000_mbx_get_message_attr(struct dib9000_state *state, u16 id, u16 * msg, u8 * size, u16 attr)
+{
+ u8 i;
+ u16 *block;
+ u16 timeout = 30;
+
+ *msg = 0;
+ do {
+ /* dib9000_mbx_get_from_cache(); */
+ for (i = 0; i < DIB9000_MSG_CACHE_SIZE; i++) {
+ block = state->platform.risc.message_cache[i];
+ if ((*block >> 8) == id) {
+ *size = (*block & 0xff) - 1;
+ memcpy(msg, block + 1, (*size) * 2);
+ *block = 0; /* free the block */
+ i = 0; /* signal that we found a message */
+ break;
+ }
+ }
+
+ if (i == 0)
+ break;
+
+ if (dib9000_mbx_process(state, attr) == -1) /* try to fetch one message - if any */
+ return -1;
+
+ } while (--timeout);
+
+ if (timeout == 0) {
+ dprintk("waiting for message %d timed out", id);
+ return -1;
+ }
+
+ return i == 0;
+}
+
+static int dib9000_risc_check_version(struct dib9000_state *state)
+{
+ u8 r[4];
+ u8 size;
+ u16 fw_version = 0;
+
+ if (dib9000_mbx_send(state, OUT_MSG_REQ_VERSION, &fw_version, 1) != 0)
+ return -EIO;
+
+ if (dib9000_mbx_get_message(state, IN_MSG_VERSION, (u16 *) r, &size) < 0)
+ return -EIO;
+
+ fw_version = (r[0] << 8) | r[1];
+ dprintk("RISC: ver: %d.%02d (IC: %d)", fw_version >> 10, fw_version & 0x3ff, (r[2] << 8) | r[3]);
+
+ if ((fw_version >> 10) != 7)
+ return -EINVAL;
+
+ switch (fw_version & 0x3ff) {
+ case 11:
+ case 12:
+ case 14:
+ case 15:
+ case 16:
+ case 17:
+ break;
+ default:
+ dprintk("RISC: invalid firmware version");
+ return -EINVAL;
+ }
+
+ dprintk("RISC: valid firmware version");
+ return 0;
+}
+
+static int dib9000_fw_boot(struct dib9000_state *state, const u8 * codeA, u32 lenA, const u8 * codeB, u32 lenB)
+{
+ /* Reconfig pool mac ram */
+ dib9000_write_word(state, 1225, 0x02); /* A: 8k C, 4 k D - B: 32k C 6 k D - IRAM 96k */
+ dib9000_write_word(state, 1226, 0x05);
+
+ /* Toggles IP crypto to Host APB interface. */
+ dib9000_write_word(state, 1542, 1);
+
+ /* Set jump and no jump in the dma box */
+ dib9000_write_word(state, 1074, 0);
+ dib9000_write_word(state, 1075, 0);
+
+ /* Set MAC as APB Master. */
+ dib9000_write_word(state, 1237, 0);
+
+ /* Reset the RISCs */
+ if (codeA != NULL)
+ dib9000_write_word(state, 1024, 2);
+ else
+ dib9000_write_word(state, 1024, 15);
+ if (codeB != NULL)
+ dib9000_write_word(state, 1040, 2);
+
+ if (codeA != NULL)
+ dib9000_firmware_download(state, 0, 0x1234, codeA, lenA);
+ if (codeB != NULL)
+ dib9000_firmware_download(state, 1, 0x1234, codeB, lenB);
+
+ /* Run the RISCs */
+ if (codeA != NULL)
+ dib9000_write_word(state, 1024, 0);
+ if (codeB != NULL)
+ dib9000_write_word(state, 1040, 0);
+
+ if (codeA != NULL)
+ if (dib9000_mbx_host_init(state, 0) != 0)
+ return -EIO;
+ if (codeB != NULL)
+ if (dib9000_mbx_host_init(state, 1) != 0)
+ return -EIO;
+
+ msleep(100);
+ state->platform.risc.fw_is_running = 1;
+
+ if (dib9000_risc_check_version(state) != 0)
+ return -EINVAL;
+
+ state->platform.risc.memcmd = 0xff;
+ return 0;
+}
+
+static u16 dib9000_identify(struct i2c_device *client)
+{
+ u16 value;
+
+ value = dib9000_i2c_read16(client, 896);
+ if (value != 0x01b3) {
+ dprintk("wrong Vendor ID (0x%x)", value);
+ return 0;
+ }
+
+ value = dib9000_i2c_read16(client, 897);
+ if (value != 0x4000 && value != 0x4001 && value != 0x4002 && value != 0x4003 && value != 0x4004 && value != 0x4005) {
+ dprintk("wrong Device ID (0x%x)", value);
+ return 0;
+ }
+
+ /* protect this driver to be used with 7000PC */
+ if (value == 0x4000 && dib9000_i2c_read16(client, 769) == 0x4000) {
+ dprintk("this driver does not work with DiB7000PC");
+ return 0;
+ }
+
+ switch (value) {
+ case 0x4000:
+ dprintk("found DiB7000MA/PA/MB/PB");
+ break;
+ case 0x4001:
+ dprintk("found DiB7000HC");
+ break;
+ case 0x4002:
+ dprintk("found DiB7000MC");
+ break;
+ case 0x4003:
+ dprintk("found DiB9000A");
+ break;
+ case 0x4004:
+ dprintk("found DiB9000H");
+ break;
+ case 0x4005:
+ dprintk("found DiB9000M");
+ break;
+ }
+
+ return value;
+}
+
+static void dib9000_set_power_mode(struct dib9000_state *state, enum dib9000_power_mode mode)
+{
+ /* by default everything is going to be powered off */
+ u16 reg_903 = 0x3fff, reg_904 = 0xffff, reg_905 = 0xffff, reg_906;
+ u8 offset;
+
+ if (state->revision == 0x4003 || state->revision == 0x4004 || state->revision == 0x4005)
+ offset = 1;
+ else
+ offset = 0;
+
+ reg_906 = dib9000_read_word(state, 906 + offset) | 0x3; /* keep settings for RISC */
+
+ /* now, depending on the requested mode, we power on */
+ switch (mode) {
+ /* power up everything in the demod */
+ case DIB9000_POWER_ALL:
+ reg_903 = 0x0000;
+ reg_904 = 0x0000;
+ reg_905 = 0x0000;
+ reg_906 = 0x0000;
+ break;
+
+ /* just leave power on the control-interfaces: GPIO and (I2C or SDIO or SRAM) */
+ case DIB9000_POWER_INTERFACE_ONLY: /* TODO power up either SDIO or I2C or SRAM */
+ reg_905 &= ~((1 << 7) | (1 << 6) | (1 << 5) | (1 << 2));
+ break;
+
+ case DIB9000_POWER_INTERF_ANALOG_AGC:
+ reg_903 &= ~((1 << 15) | (1 << 14) | (1 << 11) | (1 << 10));
+ reg_905 &= ~((1 << 7) | (1 << 6) | (1 << 5) | (1 << 4) | (1 << 2));
+ reg_906 &= ~((1 << 0));
+ break;
+
+ case DIB9000_POWER_COR4_DINTLV_ICIRM_EQUAL_CFROD:
+ reg_903 = 0x0000;
+ reg_904 = 0x801f;
+ reg_905 = 0x0000;
+ reg_906 &= ~((1 << 0));
+ break;
+
+ case DIB9000_POWER_COR4_CRY_ESRAM_MOUT_NUD:
+ reg_903 = 0x0000;
+ reg_904 = 0x8000;
+ reg_905 = 0x010b;
+ reg_906 &= ~((1 << 0));
+ break;
+ default:
+ case DIB9000_POWER_NO:
+ break;
+ }
+
+ /* always power down unused parts */
+ if (!state->platform.host.mobile_mode)
+ reg_904 |= (1 << 7) | (1 << 6) | (1 << 4) | (1 << 2) | (1 << 1);
+
+ /* P_sdio_select_clk = 0 on MC and after */
+ if (state->revision != 0x4000)
+ reg_906 <<= 1;
+
+ dib9000_write_word(state, 903 + offset, reg_903);
+ dib9000_write_word(state, 904 + offset, reg_904);
+ dib9000_write_word(state, 905 + offset, reg_905);
+ dib9000_write_word(state, 906 + offset, reg_906);
+}
+
+static int dib9000_fw_reset(struct dvb_frontend *fe)
+{
+ struct dib9000_state *state = fe->demodulator_priv;
+
+ dib9000_write_word(state, 1817, 0x0003);
+
+ dib9000_write_word(state, 1227, 1);
+ dib9000_write_word(state, 1227, 0);
+
+ switch ((state->revision = dib9000_identify(&state->i2c))) {
+ case 0x4003:
+ case 0x4004:
+ case 0x4005:
+ state->reg_offs = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* reset the i2c-master to use the host interface */
+ dibx000_reset_i2c_master(&state->i2c_master);
+
+ dib9000_set_power_mode(state, DIB9000_POWER_ALL);
+
+ /* unforce divstr regardless whether i2c enumeration was done or not */
+ dib9000_write_word(state, 1794, dib9000_read_word(state, 1794) & ~(1 << 1));
+ dib9000_write_word(state, 1796, 0);
+ dib9000_write_word(state, 1805, 0x805);
+
+ /* restart all parts */
+ dib9000_write_word(state, 898, 0xffff);
+ dib9000_write_word(state, 899, 0xffff);
+ dib9000_write_word(state, 900, 0x0001);
+ dib9000_write_word(state, 901, 0xff19);
+ dib9000_write_word(state, 902, 0x003c);
+
+ dib9000_write_word(state, 898, 0);
+ dib9000_write_word(state, 899, 0);
+ dib9000_write_word(state, 900, 0);
+ dib9000_write_word(state, 901, 0);
+ dib9000_write_word(state, 902, 0);
+
+ dib9000_write_word(state, 911, state->chip.d9.cfg.if_drives);
+
+ dib9000_set_power_mode(state, DIB9000_POWER_INTERFACE_ONLY);
+
+ return 0;
+}
+
+static int dib9000_risc_apb_access_read(struct dib9000_state *state, u32 address, u16 attribute, const u8 * tx, u32 txlen, u8 * b, u32 len)
+{
+ u16 mb[10];
+ u8 i, s;
+
+ if (address >= 1024 || !state->platform.risc.fw_is_running)
+ return -EINVAL;
+
+ /* dprintk( "APB access thru rd fw %d %x", address, attribute); */
+
+ mb[0] = (u16) address;
+ mb[1] = len / 2;
+ dib9000_mbx_send_attr(state, OUT_MSG_BRIDGE_APB_R, mb, 2, attribute);
+ switch (dib9000_mbx_get_message_attr(state, IN_MSG_END_BRIDGE_APB_RW, mb, &s, attribute)) {
+ case 1:
+ s--;
+ for (i = 0; i < s; i++) {
+ b[i * 2] = (mb[i + 1] >> 8) & 0xff;
+ b[i * 2 + 1] = (mb[i + 1]) & 0xff;
+ }
+ return 0;
+ default:
+ return -EIO;
+ }
+ return -EIO;
+}
+
+static int dib9000_risc_apb_access_write(struct dib9000_state *state, u32 address, u16 attribute, const u8 * b, u32 len)
+{
+ u16 mb[10];
+ u8 s, i;
+
+ if (address >= 1024 || !state->platform.risc.fw_is_running)
+ return -EINVAL;
+
+ /* dprintk( "APB access thru wr fw %d %x", address, attribute); */
+
+ mb[0] = (unsigned short)address;
+ for (i = 0; i < len && i < 20; i += 2)
+ mb[1 + (i / 2)] = (b[i] << 8 | b[i + 1]);
+
+ dib9000_mbx_send_attr(state, OUT_MSG_BRIDGE_APB_W, mb, 1 + len / 2, attribute);
+ return dib9000_mbx_get_message_attr(state, IN_MSG_END_BRIDGE_APB_RW, mb, &s, attribute) == 1 ? 0 : -EINVAL;
+}
+
+static int dib9000_fw_memmbx_sync(struct dib9000_state *state, u8 i)
+{
+ u8 index_loop = 10;
+
+ if (!state->platform.risc.fw_is_running)
+ return 0;
+ dib9000_risc_mem_write(state, FE_MM_RW_SYNC, &i);
+ do {
+ dib9000_risc_mem_read(state, FE_MM_RW_SYNC, &i, 1);
+ } while (i && index_loop--);
+
+ if (index_loop > 0)
+ return 0;
+ return -EIO;
+}
+
+static int dib9000_fw_init(struct dib9000_state *state)
+{
+ struct dibGPIOFunction *f;
+ u16 b[40] = { 0 };
+ u8 i;
+ u8 size;
+
+ if (dib9000_fw_boot(state, NULL, 0, state->chip.d9.cfg.microcode_B_fe_buffer, state->chip.d9.cfg.microcode_B_fe_size) != 0)
+ return -EIO;
+
+ /* initialize the firmware */
+ for (i = 0; i < ARRAY_SIZE(state->chip.d9.cfg.gpio_function); i++) {
+ f = &state->chip.d9.cfg.gpio_function[i];
+ if (f->mask) {
+ switch (f->function) {
+ case BOARD_GPIO_FUNCTION_COMPONENT_ON:
+ b[0] = (u16) f->mask;
+ b[1] = (u16) f->direction;
+ b[2] = (u16) f->value;
+ break;
+ case BOARD_GPIO_FUNCTION_COMPONENT_OFF:
+ b[3] = (u16) f->mask;
+ b[4] = (u16) f->direction;
+ b[5] = (u16) f->value;
+ break;
+ }
+ }
+ }
+ if (dib9000_mbx_send(state, OUT_MSG_CONF_GPIO, b, 15) != 0)
+ return -EIO;
+
+ /* subband */
+ b[0] = state->chip.d9.cfg.subband.size; /* type == 0 -> GPIO - PWM not yet supported */
+ for (i = 0; i < state->chip.d9.cfg.subband.size; i++) {
+ b[1 + i * 4] = state->chip.d9.cfg.subband.subband[i].f_mhz;
+ b[2 + i * 4] = (u16) state->chip.d9.cfg.subband.subband[i].gpio.mask;
+ b[3 + i * 4] = (u16) state->chip.d9.cfg.subband.subband[i].gpio.direction;
+ b[4 + i * 4] = (u16) state->chip.d9.cfg.subband.subband[i].gpio.value;
+ }
+ b[1 + i * 4] = 0; /* fe_id */
+ if (dib9000_mbx_send(state, OUT_MSG_SUBBAND_SEL, b, 2 + 4 * i) != 0)
+ return -EIO;
+
+ /* 0 - id, 1 - no_of_frontends */
+ b[0] = (0 << 8) | 1;
+ /* 0 = i2c-address demod, 0 = tuner */
+ b[1] = (0 << 8) | (0);
+ b[2] = (u16) (((state->chip.d9.cfg.xtal_clock_khz * 1000) >> 16) & 0xffff);
+ b[3] = (u16) (((state->chip.d9.cfg.xtal_clock_khz * 1000)) & 0xffff);
+ b[4] = (u16) ((state->chip.d9.cfg.vcxo_timer >> 16) & 0xffff);
+ b[5] = (u16) ((state->chip.d9.cfg.vcxo_timer) & 0xffff);
+ b[6] = (u16) ((state->chip.d9.cfg.timing_frequency >> 16) & 0xffff);
+ b[7] = (u16) ((state->chip.d9.cfg.timing_frequency) & 0xffff);
+ b[29] = state->chip.d9.cfg.if_drives;
+ if (dib9000_mbx_send(state, OUT_MSG_INIT_DEMOD, b, ARRAY_SIZE(b)) != 0)
+ return -EIO;
+
+ if (dib9000_mbx_send(state, OUT_MSG_FE_FW_DL, NULL, 0) != 0)
+ return -EIO;
+
+ if (dib9000_mbx_get_message(state, IN_MSG_FE_FW_DL_DONE, b, &size) < 0)
+ return -EIO;
+
+ if (size > ARRAY_SIZE(b)) {
+ dprintk("error : firmware returned %dbytes needed but the used buffer has only %dbytes\n Firmware init ABORTED", size,
+ (int)ARRAY_SIZE(b));
+ return -EINVAL;
+ }
+
+ for (i = 0; i < size; i += 2) {
+ state->platform.risc.fe_mm[i / 2].addr = b[i + 0];
+ state->platform.risc.fe_mm[i / 2].size = b[i + 1];
+ }
+
+ return 0;
+}
+
+static void dib9000_fw_set_channel_head(struct dib9000_state *state, struct dvb_frontend_parameters *ch)
+{
+ u8 b[9];
+ u32 freq = state->fe[0]->dtv_property_cache.frequency / 1000;
+ if (state->fe_id % 2)
+ freq += 101;
+
+ b[0] = (u8) ((freq >> 0) & 0xff);
+ b[1] = (u8) ((freq >> 8) & 0xff);
+ b[2] = (u8) ((freq >> 16) & 0xff);
+ b[3] = (u8) ((freq >> 24) & 0xff);
+ b[4] = (u8) ((state->fe[0]->dtv_property_cache.bandwidth_hz / 1000 >> 0) & 0xff);
+ b[5] = (u8) ((state->fe[0]->dtv_property_cache.bandwidth_hz / 1000 >> 8) & 0xff);
+ b[6] = (u8) ((state->fe[0]->dtv_property_cache.bandwidth_hz / 1000 >> 16) & 0xff);
+ b[7] = (u8) ((state->fe[0]->dtv_property_cache.bandwidth_hz / 1000 >> 24) & 0xff);
+ b[8] = 0x80; /* do not wait for CELL ID when doing autosearch */
+ if (state->fe[0]->dtv_property_cache.delivery_system == SYS_DVBT)
+ b[8] |= 1;
+ dib9000_risc_mem_write(state, FE_MM_W_CHANNEL_HEAD, b);
+}
+
+static int dib9000_fw_get_channel(struct dvb_frontend *fe, struct dvb_frontend_parameters *channel)
+{
+ struct dib9000_state *state = fe->demodulator_priv;
+ struct dibDVBTChannel {
+ s8 spectrum_inversion;
+
+ s8 nfft;
+ s8 guard;
+ s8 constellation;
+
+ s8 hrch;
+ s8 alpha;
+ s8 code_rate_hp;
+ s8 code_rate_lp;
+ s8 select_hp;
+
+ s8 intlv_native;
+ };
+ struct dibDVBTChannel ch;
+ int ret = 0;
+
+ DibAcquireLock(&state->platform.risc.mem_mbx_lock);
+ if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) {
+ goto error;
+ ret = -EIO;
+ }
+
+ dib9000_risc_mem_read(state, FE_MM_R_CHANNEL_UNION, (u8 *) &ch, sizeof(struct dibDVBTChannel));
+
+ switch (ch.spectrum_inversion & 0x7) {
+ case 1:
+ state->fe[0]->dtv_property_cache.inversion = INVERSION_ON;
+ break;
+ case 0:
+ state->fe[0]->dtv_property_cache.inversion = INVERSION_OFF;
+ break;
+ default:
+ case -1:
+ state->fe[0]->dtv_property_cache.inversion = INVERSION_AUTO;
+ break;
+ }
+ switch (ch.nfft) {
+ case 0:
+ state->fe[0]->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_2K;
+ break;
+ case 2:
+ state->fe[0]->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_4K;
+ break;
+ case 1:
+ state->fe[0]->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_8K;
+ break;
+ default:
+ case -1:
+ state->fe[0]->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_AUTO;
+ break;
+ }
+ switch (ch.guard) {
+ case 0:
+ state->fe[0]->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_32;
+ break;
+ case 1:
+ state->fe[0]->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_16;
+ break;
+ case 2:
+ state->fe[0]->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_8;
+ break;
+ case 3:
+ state->fe[0]->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_4;
+ break;
+ default:
+ case -1:
+ state->fe[0]->dtv_property_cache.guard_interval = GUARD_INTERVAL_AUTO;
+ break;
+ }
+ switch (ch.constellation) {
+ case 2:
+ state->fe[0]->dtv_property_cache.modulation = QAM_64;
+ break;
+ case 1:
+ state->fe[0]->dtv_property_cache.modulation = QAM_16;
+ break;
+ case 0:
+ state->fe[0]->dtv_property_cache.modulation = QPSK;
+ break;
+ default:
+ case -1:
+ state->fe[0]->dtv_property_cache.modulation = QAM_AUTO;
+ break;
+ }
+ switch (ch.hrch) {
+ case 0:
+ state->fe[0]->dtv_property_cache.hierarchy = HIERARCHY_NONE;
+ break;
+ case 1:
+ state->fe[0]->dtv_property_cache.hierarchy = HIERARCHY_1;
+ break;
+ default:
+ case -1:
+ state->fe[0]->dtv_property_cache.hierarchy = HIERARCHY_AUTO;
+ break;
+ }
+ switch (ch.code_rate_hp) {
+ case 1:
+ state->fe[0]->dtv_property_cache.code_rate_HP = FEC_1_2;
+ break;
+ case 2:
+ state->fe[0]->dtv_property_cache.code_rate_HP = FEC_2_3;
+ break;
+ case 3:
+ state->fe[0]->dtv_property_cache.code_rate_HP = FEC_3_4;
+ break;
+ case 5:
+ state->fe[0]->dtv_property_cache.code_rate_HP = FEC_5_6;
+ break;
+ case 7:
+ state->fe[0]->dtv_property_cache.code_rate_HP = FEC_7_8;
+ break;
+ default:
+ case -1:
+ state->fe[0]->dtv_property_cache.code_rate_HP = FEC_AUTO;
+ break;
+ }
+ switch (ch.code_rate_lp) {
+ case 1:
+ state->fe[0]->dtv_property_cache.code_rate_LP = FEC_1_2;
+ break;
+ case 2:
+ state->fe[0]->dtv_property_cache.code_rate_LP = FEC_2_3;
+ break;
+ case 3:
+ state->fe[0]->dtv_property_cache.code_rate_LP = FEC_3_4;
+ break;
+ case 5:
+ state->fe[0]->dtv_property_cache.code_rate_LP = FEC_5_6;
+ break;
+ case 7:
+ state->fe[0]->dtv_property_cache.code_rate_LP = FEC_7_8;
+ break;
+ default:
+ case -1:
+ state->fe[0]->dtv_property_cache.code_rate_LP = FEC_AUTO;
+ break;
+ }
+
+error:
+ DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+ return ret;
+}
+
+static int dib9000_fw_set_channel_union(struct dvb_frontend *fe, struct dvb_frontend_parameters *channel)
+{
+ struct dib9000_state *state = fe->demodulator_priv;
+ struct dibDVBTChannel {
+ s8 spectrum_inversion;
+
+ s8 nfft;
+ s8 guard;
+ s8 constellation;
+
+ s8 hrch;
+ s8 alpha;
+ s8 code_rate_hp;
+ s8 code_rate_lp;
+ s8 select_hp;
+
+ s8 intlv_native;
+ };
+ struct dibDVBTChannel ch;
+
+ switch (state->fe[0]->dtv_property_cache.inversion) {
+ case INVERSION_ON:
+ ch.spectrum_inversion = 1;
+ break;
+ case INVERSION_OFF:
+ ch.spectrum_inversion = 0;
+ break;
+ default:
+ case INVERSION_AUTO:
+ ch.spectrum_inversion = -1;
+ break;
+ }
+ switch (state->fe[0]->dtv_property_cache.transmission_mode) {
+ case TRANSMISSION_MODE_2K:
+ ch.nfft = 0;
+ break;
+ case TRANSMISSION_MODE_4K:
+ ch.nfft = 2;
+ break;
+ case TRANSMISSION_MODE_8K:
+ ch.nfft = 1;
+ break;
+ default:
+ case TRANSMISSION_MODE_AUTO:
+ ch.nfft = 1;
+ break;
+ }
+ switch (state->fe[0]->dtv_property_cache.guard_interval) {
+ case GUARD_INTERVAL_1_32:
+ ch.guard = 0;
+ break;
+ case GUARD_INTERVAL_1_16:
+ ch.guard = 1;
+ break;
+ case GUARD_INTERVAL_1_8:
+ ch.guard = 2;
+ break;
+ case GUARD_INTERVAL_1_4:
+ ch.guard = 3;
+ break;
+ default:
+ case GUARD_INTERVAL_AUTO:
+ ch.guard = -1;
+ break;
+ }
+ switch (state->fe[0]->dtv_property_cache.modulation) {
+ case QAM_64:
+ ch.constellation = 2;
+ break;
+ case QAM_16:
+ ch.constellation = 1;
+ break;
+ case QPSK:
+ ch.constellation = 0;
+ break;
+ default:
+ case QAM_AUTO:
+ ch.constellation = -1;
+ break;
+ }
+ switch (state->fe[0]->dtv_property_cache.hierarchy) {
+ case HIERARCHY_NONE:
+ ch.hrch = 0;
+ break;
+ case HIERARCHY_1:
+ case HIERARCHY_2:
+ case HIERARCHY_4:
+ ch.hrch = 1;
+ break;
+ default:
+ case HIERARCHY_AUTO:
+ ch.hrch = -1;
+ break;
+ }
+ ch.alpha = 1;
+ switch (state->fe[0]->dtv_property_cache.code_rate_HP) {
+ case FEC_1_2:
+ ch.code_rate_hp = 1;
+ break;
+ case FEC_2_3:
+ ch.code_rate_hp = 2;
+ break;
+ case FEC_3_4:
+ ch.code_rate_hp = 3;
+ break;
+ case FEC_5_6:
+ ch.code_rate_hp = 5;
+ break;
+ case FEC_7_8:
+ ch.code_rate_hp = 7;
+ break;
+ default:
+ case FEC_AUTO:
+ ch.code_rate_hp = -1;
+ break;
+ }
+ switch (state->fe[0]->dtv_property_cache.code_rate_LP) {
+ case FEC_1_2:
+ ch.code_rate_lp = 1;
+ break;
+ case FEC_2_3:
+ ch.code_rate_lp = 2;
+ break;
+ case FEC_3_4:
+ ch.code_rate_lp = 3;
+ break;
+ case FEC_5_6:
+ ch.code_rate_lp = 5;
+ break;
+ case FEC_7_8:
+ ch.code_rate_lp = 7;
+ break;
+ default:
+ case FEC_AUTO:
+ ch.code_rate_lp = -1;
+ break;
+ }
+ ch.select_hp = 1;
+ ch.intlv_native = 1;
+
+ dib9000_risc_mem_write(state, FE_MM_W_CHANNEL_UNION, (u8 *) &ch);
+
+ return 0;
+}
+
+static int dib9000_fw_tune(struct dvb_frontend *fe, struct dvb_frontend_parameters *ch)
+{
+ struct dib9000_state *state = fe->demodulator_priv;
+ int ret = 10, search = state->channel_status.status == CHANNEL_STATUS_PARAMETERS_UNKNOWN;
+ s8 i;
+
+ switch (state->tune_state) {
+ case CT_DEMOD_START:
+ dib9000_fw_set_channel_head(state, ch);
+
+ /* write the channel context - a channel is initialized to 0, so it is OK */
+ dib9000_risc_mem_write(state, FE_MM_W_CHANNEL_CONTEXT, (u8 *) fe_info);
+ dib9000_risc_mem_write(state, FE_MM_W_FE_INFO, (u8 *) fe_info);
+
+ if (search)
+ dib9000_mbx_send(state, OUT_MSG_FE_CHANNEL_SEARCH, NULL, 0);
+ else {
+ dib9000_fw_set_channel_union(fe, ch);
+ dib9000_mbx_send(state, OUT_MSG_FE_CHANNEL_TUNE, NULL, 0);
+ }
+ state->tune_state = CT_DEMOD_STEP_1;
+ break;
+ case CT_DEMOD_STEP_1:
+ if (search)
+ dib9000_risc_mem_read(state, FE_MM_R_CHANNEL_SEARCH_STATE, (u8 *) &i, 1);
+ else
+ dib9000_risc_mem_read(state, FE_MM_R_CHANNEL_TUNE_STATE, (u8 *) &i, 1);
+ switch (i) { /* something happened */
+ case 0:
+ break;
+ case -2: /* tps locks are "slower" than MPEG locks -> even in autosearch data is OK here */
+ if (search)
+ state->status = FE_STATUS_DEMOD_SUCCESS;
+ else {
+ state->tune_state = CT_DEMOD_STOP;
+ state->status = FE_STATUS_LOCKED;
+ }
+ break;
+ default:
+ state->status = FE_STATUS_TUNE_FAILED;
+ state->tune_state = CT_DEMOD_STOP;
+ break;
+ }
+ break;
+ default:
+ ret = FE_CALLBACK_TIME_NEVER;
+ break;
+ }
+
+ return ret;
+}
+
+static int dib9000_fw_set_diversity_in(struct dvb_frontend *fe, int onoff)
+{
+ struct dib9000_state *state = fe->demodulator_priv;
+ u16 mode = (u16) onoff;
+ return dib9000_mbx_send(state, OUT_MSG_ENABLE_DIVERSITY, &mode, 1);
+}
+
+static int dib9000_fw_set_output_mode(struct dvb_frontend *fe, int mode)
+{
+ struct dib9000_state *state = fe->demodulator_priv;
+ u16 outreg, smo_mode;
+
+ dprintk("setting output mode for demod %p to %d", fe, mode);
+
+ switch (mode) {
+ case OUTMODE_MPEG2_PAR_GATED_CLK:
+ outreg = (1 << 10); /* 0x0400 */
+ break;
+ case OUTMODE_MPEG2_PAR_CONT_CLK:
+ outreg = (1 << 10) | (1 << 6); /* 0x0440 */
+ break;
+ case OUTMODE_MPEG2_SERIAL:
+ outreg = (1 << 10) | (2 << 6) | (0 << 1); /* 0x0482 */
+ break;
+ case OUTMODE_DIVERSITY:
+ outreg = (1 << 10) | (4 << 6); /* 0x0500 */
+ break;
+ case OUTMODE_MPEG2_FIFO:
+ outreg = (1 << 10) | (5 << 6);
+ break;
+ case OUTMODE_HIGH_Z:
+ outreg = 0;
+ break;
+ default:
+ dprintk("Unhandled output_mode passed to be set for demod %p", &state->fe[0]);
+ return -EINVAL;
+ }
+
+ dib9000_write_word(state, 1795, outreg);
+
+ switch (mode) {
+ case OUTMODE_MPEG2_PAR_GATED_CLK:
+ case OUTMODE_MPEG2_PAR_CONT_CLK:
+ case OUTMODE_MPEG2_SERIAL:
+ case OUTMODE_MPEG2_FIFO:
+ smo_mode = (dib9000_read_word(state, 295) & 0x0010) | (1 << 1);
+ if (state->chip.d9.cfg.output_mpeg2_in_188_bytes)
+ smo_mode |= (1 << 5);
+ dib9000_write_word(state, 295, smo_mode);
+ break;
+ }
+
+ outreg = to_fw_output_mode(mode);
+ return dib9000_mbx_send(state, OUT_MSG_SET_OUTPUT_MODE, &outreg, 1);
+}
+
+static int dib9000_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num)
+{
+ struct dib9000_state *state = i2c_get_adapdata(i2c_adap);
+ u16 i, len, t, index_msg;
+
+ for (index_msg = 0; index_msg < num; index_msg++) {
+ if (msg[index_msg].flags & I2C_M_RD) { /* read */
+ len = msg[index_msg].len;
+ if (len > 16)
+ len = 16;
+
+ if (dib9000_read_word(state, 790) != 0)
+ dprintk("TunerITF: read busy");
+
+ dib9000_write_word(state, 784, (u16) (msg[index_msg].addr));
+ dib9000_write_word(state, 787, (len / 2) - 1);
+ dib9000_write_word(state, 786, 1); /* start read */
+
+ i = 1000;
+ while (dib9000_read_word(state, 790) != (len / 2) && i)
+ i--;
+
+ if (i == 0)
+ dprintk("TunerITF: read failed");
+
+ for (i = 0; i < len; i += 2) {
+ t = dib9000_read_word(state, 785);
+ msg[index_msg].buf[i] = (t >> 8) & 0xff;
+ msg[index_msg].buf[i + 1] = (t) & 0xff;
+ }
+ if (dib9000_read_word(state, 790) != 0)
+ dprintk("TunerITF: read more data than expected");
+ } else {
+ i = 1000;
+ while (dib9000_read_word(state, 789) && i)
+ i--;
+ if (i == 0)
+ dprintk("TunerITF: write busy");
+
+ len = msg[index_msg].len;
+ if (len > 16)
+ len = 16;
+
+ for (i = 0; i < len; i += 2)
+ dib9000_write_word(state, 785, (msg[index_msg].buf[i] << 8) | msg[index_msg].buf[i + 1]);
+ dib9000_write_word(state, 784, (u16) msg[index_msg].addr);
+ dib9000_write_word(state, 787, (len / 2) - 1);
+ dib9000_write_word(state, 786, 0); /* start write */
+
+ i = 1000;
+ while (dib9000_read_word(state, 791) > 0 && i)
+ i--;
+ if (i == 0)
+ dprintk("TunerITF: write failed");
+ }
+ }
+ return num;
+}
+
+int dib9000_fw_set_component_bus_speed(struct dvb_frontend *fe, u16 speed)
+{
+ struct dib9000_state *state = fe->demodulator_priv;
+
+ state->component_bus_speed = speed;
+ return 0;
+}
+EXPORT_SYMBOL(dib9000_fw_set_component_bus_speed);
+
+static int dib9000_fw_component_bus_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num)
+{
+ struct dib9000_state *state = i2c_get_adapdata(i2c_adap);
+ u8 type = 0; /* I2C */
+ u8 port = DIBX000_I2C_INTERFACE_GPIO_3_4;
+ u16 scl = state->component_bus_speed; /* SCL frequency */
+ struct dib9000_fe_memory_map *m = &state->platform.risc.fe_mm[FE_MM_RW_COMPONENT_ACCESS_BUFFER];
+ u8 p[13] = { 0 };
+
+ p[0] = type;
+ p[1] = port;
+ p[2] = msg[0].addr << 1;
+
+ p[3] = (u8) scl & 0xff; /* scl */
+ p[4] = (u8) (scl >> 8);
+
+ p[7] = 0;
+ p[8] = 0;
+
+ p[9] = (u8) (msg[0].len);
+ p[10] = (u8) (msg[0].len >> 8);
+ if ((num > 1) && (msg[1].flags & I2C_M_RD)) {
+ p[11] = (u8) (msg[1].len);
+ p[12] = (u8) (msg[1].len >> 8);
+ } else {
+ p[11] = 0;
+ p[12] = 0;
+ }
+
+ DibAcquireLock(&state->platform.risc.mem_mbx_lock);
+
+ dib9000_risc_mem_write(state, FE_MM_W_COMPONENT_ACCESS, p);
+
+ { /* write-part */
+ dib9000_risc_mem_setup_cmd(state, m->addr, msg[0].len, 0);
+ dib9000_risc_mem_write_chunks(state, msg[0].buf, msg[0].len);
+ }
+
+ /* do the transaction */
+ if (dib9000_fw_memmbx_sync(state, FE_SYNC_COMPONENT_ACCESS) < 0) {
+ DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+ return 0;
+ }
+
+ /* read back any possible result */
+ if ((num > 1) && (msg[1].flags & I2C_M_RD))
+ dib9000_risc_mem_read(state, FE_MM_RW_COMPONENT_ACCESS_BUFFER, msg[1].buf, msg[1].len);
+
+ DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+
+ return num;
+}
+
+static u32 dib9000_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C;
+}
+
+static struct i2c_algorithm dib9000_tuner_algo = {
+ .master_xfer = dib9000_tuner_xfer,
+ .functionality = dib9000_i2c_func,
+};
+
+static struct i2c_algorithm dib9000_component_bus_algo = {
+ .master_xfer = dib9000_fw_component_bus_xfer,
+ .functionality = dib9000_i2c_func,
+};
+
+struct i2c_adapter *dib9000_get_tuner_interface(struct dvb_frontend *fe)
+{
+ struct dib9000_state *st = fe->demodulator_priv;
+ return &st->tuner_adap;
+}
+EXPORT_SYMBOL(dib9000_get_tuner_interface);
+
+struct i2c_adapter *dib9000_get_component_bus_interface(struct dvb_frontend *fe)
+{
+ struct dib9000_state *st = fe->demodulator_priv;
+ return &st->component_bus;
+}
+EXPORT_SYMBOL(dib9000_get_component_bus_interface);
+
+struct i2c_adapter *dib9000_get_i2c_master(struct dvb_frontend *fe, enum dibx000_i2c_interface intf, int gating)
+{
+ struct dib9000_state *st = fe->demodulator_priv;
+ return dibx000_get_i2c_adapter(&st->i2c_master, intf, gating);
+}
+EXPORT_SYMBOL(dib9000_get_i2c_master);
+
+int dib9000_set_i2c_adapter(struct dvb_frontend *fe, struct i2c_adapter *i2c)
+{
+ struct dib9000_state *st = fe->demodulator_priv;
+
+ st->i2c.i2c_adap = i2c;
+ return 0;
+}
+EXPORT_SYMBOL(dib9000_set_i2c_adapter);
+
+static int dib9000_cfg_gpio(struct dib9000_state *st, u8 num, u8 dir, u8 val)
+{
+ st->gpio_dir = dib9000_read_word(st, 773);
+ st->gpio_dir &= ~(1 << num); /* reset the direction bit */
+ st->gpio_dir |= (dir & 0x1) << num; /* set the new direction */
+ dib9000_write_word(st, 773, st->gpio_dir);
+
+ st->gpio_val = dib9000_read_word(st, 774);
+ st->gpio_val &= ~(1 << num); /* reset the direction bit */
+ st->gpio_val |= (val & 0x01) << num; /* set the new value */
+ dib9000_write_word(st, 774, st->gpio_val);
+
+ dprintk("gpio dir: %04x: gpio val: %04x", st->gpio_dir, st->gpio_val);
+
+ return 0;
+}
+
+int dib9000_set_gpio(struct dvb_frontend *fe, u8 num, u8 dir, u8 val)
+{
+ struct dib9000_state *state = fe->demodulator_priv;
+ return dib9000_cfg_gpio(state, num, dir, val);
+}
+EXPORT_SYMBOL(dib9000_set_gpio);
+
+int dib9000_fw_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
+{
+ struct dib9000_state *state = fe->demodulator_priv;
+ u16 val = dib9000_read_word(state, 294 + 1) & 0xffef;
+ val |= (onoff & 0x1) << 4;
+
+ dprintk("PID filter enabled %d", onoff);
+ return dib9000_write_word(state, 294 + 1, val);
+}
+EXPORT_SYMBOL(dib9000_fw_pid_filter_ctrl);
+
+int dib9000_fw_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
+{
+ struct dib9000_state *state = fe->demodulator_priv;
+ dprintk("Index %x, PID %d, OnOff %d", id, pid, onoff);
+ return dib9000_write_word(state, 300 + 1 + id, onoff ? (1 << 13) | pid : 0);
+}
+EXPORT_SYMBOL(dib9000_fw_pid_filter);
+
+int dib9000_firmware_post_pll_init(struct dvb_frontend *fe)
+{
+ struct dib9000_state *state = fe->demodulator_priv;
+ return dib9000_fw_init(state);
+}
+EXPORT_SYMBOL(dib9000_firmware_post_pll_init);
+
+static void dib9000_release(struct dvb_frontend *demod)
+{
+ struct dib9000_state *st = demod->demodulator_priv;
+ u8 index_frontend;
+
+ for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (st->fe[index_frontend] != NULL); index_frontend++)
+ dvb_frontend_detach(st->fe[index_frontend]);
+
+ DibFreeLock(&state->platform.risc.mbx_if_lock);
+ DibFreeLock(&state->platform.risc.mbx_lock);
+ DibFreeLock(&state->platform.risc.mem_lock);
+ DibFreeLock(&state->platform.risc.mem_mbx_lock);
+ dibx000_exit_i2c_master(&st->i2c_master);
+
+ i2c_del_adapter(&st->tuner_adap);
+ i2c_del_adapter(&st->component_bus);
+ kfree(st->fe[0]);
+ kfree(st);
+}
+
+static int dib9000_wakeup(struct dvb_frontend *fe)
+{
+ return 0;
+}
+
+static int dib9000_sleep(struct dvb_frontend *fe)
+{
+ struct dib9000_state *state = fe->demodulator_priv;
+ u8 index_frontend;
+ int ret;
+
+ for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
+ ret = state->fe[index_frontend]->ops.sleep(state->fe[index_frontend]);
+ if (ret < 0)
+ return ret;
+ }
+ return dib9000_mbx_send(state, OUT_MSG_FE_SLEEP, NULL, 0);
+}
+
+static int dib9000_fe_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *tune)
+{
+ tune->min_delay_ms = 1000;
+ return 0;
+}
+
+static int dib9000_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep)
+{
+ struct dib9000_state *state = fe->demodulator_priv;
+ u8 index_frontend, sub_index_frontend;
+ fe_status_t stat;
+ int ret;
+
+ for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
+ state->fe[index_frontend]->ops.read_status(state->fe[index_frontend], &stat);
+ if (stat & FE_HAS_SYNC) {
+ dprintk("TPS lock on the slave%i", index_frontend);
+
+ /* synchronize the cache with the other frontends */
+ state->fe[index_frontend]->ops.get_frontend(state->fe[index_frontend], fep);
+ for (sub_index_frontend = 0; (sub_index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[sub_index_frontend] != NULL);
+ sub_index_frontend++) {
+ if (sub_index_frontend != index_frontend) {
+ state->fe[sub_index_frontend]->dtv_property_cache.modulation =
+ state->fe[index_frontend]->dtv_property_cache.modulation;
+ state->fe[sub_index_frontend]->dtv_property_cache.inversion =
+ state->fe[index_frontend]->dtv_property_cache.inversion;
+ state->fe[sub_index_frontend]->dtv_property_cache.transmission_mode =
+ state->fe[index_frontend]->dtv_property_cache.transmission_mode;
+ state->fe[sub_index_frontend]->dtv_property_cache.guard_interval =
+ state->fe[index_frontend]->dtv_property_cache.guard_interval;
+ state->fe[sub_index_frontend]->dtv_property_cache.hierarchy =
+ state->fe[index_frontend]->dtv_property_cache.hierarchy;
+ state->fe[sub_index_frontend]->dtv_property_cache.code_rate_HP =
+ state->fe[index_frontend]->dtv_property_cache.code_rate_HP;
+ state->fe[sub_index_frontend]->dtv_property_cache.code_rate_LP =
+ state->fe[index_frontend]->dtv_property_cache.code_rate_LP;
+ state->fe[sub_index_frontend]->dtv_property_cache.rolloff =
+ state->fe[index_frontend]->dtv_property_cache.rolloff;
+ }
+ }
+ return 0;
+ }
+ }
+
+ /* get the channel from master chip */
+ ret = dib9000_fw_get_channel(fe, fep);
+ if (ret != 0)
+ return ret;
+
+ /* synchronize the cache with the other frontends */
+ for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
+ state->fe[index_frontend]->dtv_property_cache.inversion = fe->dtv_property_cache.inversion;
+ state->fe[index_frontend]->dtv_property_cache.transmission_mode = fe->dtv_property_cache.transmission_mode;
+ state->fe[index_frontend]->dtv_property_cache.guard_interval = fe->dtv_property_cache.guard_interval;
+ state->fe[index_frontend]->dtv_property_cache.modulation = fe->dtv_property_cache.modulation;
+ state->fe[index_frontend]->dtv_property_cache.hierarchy = fe->dtv_property_cache.hierarchy;
+ state->fe[index_frontend]->dtv_property_cache.code_rate_HP = fe->dtv_property_cache.code_rate_HP;
+ state->fe[index_frontend]->dtv_property_cache.code_rate_LP = fe->dtv_property_cache.code_rate_LP;
+ state->fe[index_frontend]->dtv_property_cache.rolloff = fe->dtv_property_cache.rolloff;
+ }
+
+ return 0;
+}
+
+static int dib9000_set_tune_state(struct dvb_frontend *fe, enum frontend_tune_state tune_state)
+{
+ struct dib9000_state *state = fe->demodulator_priv;
+ state->tune_state = tune_state;
+ if (tune_state == CT_DEMOD_START)
+ state->status = FE_STATUS_TUNE_PENDING;
+
+ return 0;
+}
+
+static u32 dib9000_get_status(struct dvb_frontend *fe)
+{
+ struct dib9000_state *state = fe->demodulator_priv;
+ return state->status;
+}
+
+static int dib9000_set_channel_status(struct dvb_frontend *fe, struct dvb_frontend_parametersContext *channel_status)
+{
+ struct dib9000_state *state = fe->demodulator_priv;
+
+ memcpy(&state->channel_status, channel_status, sizeof(struct dvb_frontend_parametersContext));
+ return 0;
+}
+
+static int dib9000_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep)
+{
+ struct dib9000_state *state = fe->demodulator_priv;
+ int sleep_time, sleep_time_slave;
+ u32 frontend_status;
+ u8 nbr_pending, exit_condition, index_frontend, index_frontend_success;
+ struct dvb_frontend_parametersContext channel_status;
+
+ /* check that the correct parameters are set */
+ if (state->fe[0]->dtv_property_cache.frequency == 0) {
+ dprintk("dib9000: must specify frequency ");
+ return 0;
+ }
+
+ if (state->fe[0]->dtv_property_cache.bandwidth_hz == 0) {
+ dprintk("dib9000: must specify bandwidth ");
+ return 0;
+ }
+ fe->dtv_property_cache.delivery_system = SYS_DVBT;
+
+ /* set the master status */
+ if (fep->u.ofdm.transmission_mode == TRANSMISSION_MODE_AUTO ||
+ fep->u.ofdm.guard_interval == GUARD_INTERVAL_AUTO || fep->u.ofdm.constellation == QAM_AUTO || fep->u.ofdm.code_rate_HP == FEC_AUTO) {
+ /* no channel specified, autosearch the channel */
+ state->channel_status.status = CHANNEL_STATUS_PARAMETERS_UNKNOWN;
+ } else
+ state->channel_status.status = CHANNEL_STATUS_PARAMETERS_SET;
+
+ /* set mode and status for the different frontends */
+ for (index_frontend = 0; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
+ dib9000_fw_set_diversity_in(state->fe[index_frontend], 1);
+
+ /* synchronization of the cache */
+ memcpy(&state->fe[index_frontend]->dtv_property_cache, &fe->dtv_property_cache, sizeof(struct dtv_frontend_properties));
+
+ state->fe[index_frontend]->dtv_property_cache.delivery_system = SYS_DVBT;
+ dib9000_fw_set_output_mode(state->fe[index_frontend], OUTMODE_HIGH_Z);
+
+ dib9000_set_channel_status(state->fe[index_frontend], &state->channel_status);
+ dib9000_set_tune_state(state->fe[index_frontend], CT_DEMOD_START);
+ }
+
+ /* actual tune */
+ exit_condition = 0; /* 0: tune pending; 1: tune failed; 2:tune success */
+ index_frontend_success = 0;
+ do {
+ sleep_time = dib9000_fw_tune(state->fe[0], NULL);
+ for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
+ sleep_time_slave = dib9000_fw_tune(state->fe[index_frontend], NULL);
+ if (sleep_time == FE_CALLBACK_TIME_NEVER)
+ sleep_time = sleep_time_slave;
+ else if ((sleep_time_slave != FE_CALLBACK_TIME_NEVER) && (sleep_time_slave > sleep_time))
+ sleep_time = sleep_time_slave;
+ }
+ if (sleep_time != FE_CALLBACK_TIME_NEVER)
+ msleep(sleep_time / 10);
+ else
+ break;
+
+ nbr_pending = 0;
+ exit_condition = 0;
+ index_frontend_success = 0;
+ for (index_frontend = 0; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
+ frontend_status = -dib9000_get_status(state->fe[index_frontend]);
+ if (frontend_status > -FE_STATUS_TUNE_PENDING) {
+ exit_condition = 2; /* tune success */
+ index_frontend_success = index_frontend;
+ break;
+ }
+ if (frontend_status == -FE_STATUS_TUNE_PENDING)
+ nbr_pending++; /* some frontends are still tuning */
+ }
+ if ((exit_condition != 2) && (nbr_pending == 0))
+ exit_condition = 1; /* if all tune are done and no success, exit: tune failed */
+
+ } while (exit_condition == 0);
+
+ /* check the tune result */
+ if (exit_condition == 1) { /* tune failed */
+ dprintk("tune failed");
+ return 0;
+ }
+
+ dprintk("tune success on frontend%i", index_frontend_success);
+
+ /* synchronize all the channel cache */
+ dib9000_get_frontend(state->fe[0], fep);
+
+ /* retune the other frontends with the found channel */
+ channel_status.status = CHANNEL_STATUS_PARAMETERS_SET;
+ for (index_frontend = 0; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
+ /* only retune the frontends which was not tuned success */
+ if (index_frontend != index_frontend_success) {
+ dib9000_set_channel_status(state->fe[index_frontend], &channel_status);
+ dib9000_set_tune_state(state->fe[index_frontend], CT_DEMOD_START);
+ }
+ }
+ do {
+ sleep_time = FE_CALLBACK_TIME_NEVER;
+ for (index_frontend = 0; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
+ if (index_frontend != index_frontend_success) {
+ sleep_time_slave = dib9000_fw_tune(state->fe[index_frontend], NULL);
+ if (sleep_time == FE_CALLBACK_TIME_NEVER)
+ sleep_time = sleep_time_slave;
+ else if ((sleep_time_slave != FE_CALLBACK_TIME_NEVER) && (sleep_time_slave > sleep_time))
+ sleep_time = sleep_time_slave;
+ }
+ }
+ if (sleep_time != FE_CALLBACK_TIME_NEVER)
+ msleep(sleep_time / 10);
+ else
+ break;
+
+ nbr_pending = 0;
+ for (index_frontend = 0; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
+ if (index_frontend != index_frontend_success) {
+ frontend_status = -dib9000_get_status(state->fe[index_frontend]);
+ if ((index_frontend != index_frontend_success) && (frontend_status == -FE_STATUS_TUNE_PENDING))
+ nbr_pending++; /* some frontends are still tuning */
+ }
+ }
+ } while (nbr_pending != 0);
+
+ /* set the output mode */
+ dib9000_fw_set_output_mode(state->fe[0], state->chip.d9.cfg.output_mode);
+ for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++)
+ dib9000_fw_set_output_mode(state->fe[index_frontend], OUTMODE_DIVERSITY);
+
+ /* turn off the diversity for the last frontend */
+ dib9000_fw_set_diversity_in(state->fe[index_frontend - 1], 0);
+
+ return 0;
+}
+
+static u16 dib9000_read_lock(struct dvb_frontend *fe)
+{
+ struct dib9000_state *state = fe->demodulator_priv;
+
+ return dib9000_read_word(state, 535);
+}
+
+static int dib9000_read_status(struct dvb_frontend *fe, fe_status_t * stat)
+{
+ struct dib9000_state *state = fe->demodulator_priv;
+ u8 index_frontend;
+ u16 lock = 0, lock_slave = 0;
+
+ for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++)
+ lock_slave |= dib9000_read_lock(state->fe[index_frontend]);
+
+ lock = dib9000_read_word(state, 535);
+
+ *stat = 0;
+
+ if ((lock & 0x8000) || (lock_slave & 0x8000))
+ *stat |= FE_HAS_SIGNAL;
+ if ((lock & 0x3000) || (lock_slave & 0x3000))
+ *stat |= FE_HAS_CARRIER;
+ if ((lock & 0x0100) || (lock_slave & 0x0100))
+ *stat |= FE_HAS_VITERBI;
+ if (((lock & 0x0038) == 0x38) || ((lock_slave & 0x0038) == 0x38))
+ *stat |= FE_HAS_SYNC;
+ if ((lock & 0x0008) || (lock_slave & 0x0008))
+ *stat |= FE_HAS_LOCK;
+
+ return 0;
+}
+
+static int dib9000_read_ber(struct dvb_frontend *fe, u32 * ber)
+{
+ struct dib9000_state *state = fe->demodulator_priv;
+ u16 c[16];
+
+ DibAcquireLock(&state->platform.risc.mem_mbx_lock);
+ if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0)
+ return -EIO;
+ dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, sizeof(c));
+ DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+
+ *ber = c[10] << 16 | c[11];
+ return 0;
+}
+
+static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
+{
+ struct dib9000_state *state = fe->demodulator_priv;
+ u8 index_frontend;
+ u16 c[16];
+ u16 val;
+
+ *strength = 0;
+ for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
+ state->fe[index_frontend]->ops.read_signal_strength(state->fe[index_frontend], &val);
+ if (val > 65535 - *strength)
+ *strength = 65535;
+ else
+ *strength += val;
+ }
+
+ DibAcquireLock(&state->platform.risc.mem_mbx_lock);
+ if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0)
+ return -EIO;
+ dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, sizeof(c));
+ DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+
+ val = 65535 - c[4];
+ if (val > 65535 - *strength)
+ *strength = 65535;
+ else
+ *strength += val;
+ return 0;
+}
+
+static u32 dib9000_get_snr(struct dvb_frontend *fe)
+{
+ struct dib9000_state *state = fe->demodulator_priv;
+ u16 c[16];
+ u32 n, s, exp;
+ u16 val;
+
+ DibAcquireLock(&state->platform.risc.mem_mbx_lock);
+ if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0)
+ return -EIO;
+ dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, sizeof(c));
+ DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+
+ val = c[7];
+ n = (val >> 4) & 0xff;
+ exp = ((val & 0xf) << 2);
+ val = c[8];
+ exp += ((val >> 14) & 0x3);
+ if ((exp & 0x20) != 0)
+ exp -= 0x40;
+ n <<= exp + 16;
+
+ s = (val >> 6) & 0xFF;
+ exp = (val & 0x3F);
+ if ((exp & 0x20) != 0)
+ exp -= 0x40;
+ s <<= exp + 16;
+
+ if (n > 0) {
+ u32 t = (s / n) << 16;
+ return t + ((s << 16) - n * t) / n;
+ }
+ return 0xffffffff;
+}
+
+static int dib9000_read_snr(struct dvb_frontend *fe, u16 * snr)
+{
+ struct dib9000_state *state = fe->demodulator_priv;
+ u8 index_frontend;
+ u32 snr_master;
+
+ snr_master = dib9000_get_snr(fe);
+ for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++)
+ snr_master += dib9000_get_snr(state->fe[index_frontend]);
+
+ if ((snr_master >> 16) != 0) {
+ snr_master = 10 * intlog10(snr_master >> 16);
+ *snr = snr_master / ((1 << 24) / 10);
+ } else
+ *snr = 0;
+
+ return 0;
+}
+
+static int dib9000_read_unc_blocks(struct dvb_frontend *fe, u32 * unc)
+{
+ struct dib9000_state *state = fe->demodulator_priv;
+ u16 c[16];
+
+ DibAcquireLock(&state->platform.risc.mem_mbx_lock);
+ if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0)
+ return -EIO;
+ dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, sizeof(c));
+ DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+
+ *unc = c[12];
+ return 0;
+}
+
+int dib9000_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 default_addr, u8 first_addr)
+{
+ int k = 0;
+ u8 new_addr = 0;
+ struct i2c_device client = {.i2c_adap = i2c };
+
+ client.i2c_addr = default_addr + 16;
+ dib9000_i2c_write16(&client, 1796, 0x0);
+
+ for (k = no_of_demods - 1; k >= 0; k--) {
+ /* designated i2c address */
+ new_addr = first_addr + (k << 1);
+ client.i2c_addr = default_addr;
+
+ dib9000_i2c_write16(&client, 1817, 3);
+ dib9000_i2c_write16(&client, 1796, 0);
+ dib9000_i2c_write16(&client, 1227, 1);
+ dib9000_i2c_write16(&client, 1227, 0);
+
+ client.i2c_addr = new_addr;
+ dib9000_i2c_write16(&client, 1817, 3);
+ dib9000_i2c_write16(&client, 1796, 0);
+ dib9000_i2c_write16(&client, 1227, 1);
+ dib9000_i2c_write16(&client, 1227, 0);
+
+ if (dib9000_identify(&client) == 0) {
+ client.i2c_addr = default_addr;
+ if (dib9000_identify(&client) == 0) {
+ dprintk("DiB9000 #%d: not identified", k);
+ return -EIO;
+ }
+ }
+
+ dib9000_i2c_write16(&client, 1795, (1 << 10) | (4 << 6));
+ dib9000_i2c_write16(&client, 1794, (new_addr << 2) | 2);
+
+ dprintk("IC %d initialized (to i2c_address 0x%x)", k, new_addr);
+ }
+
+ for (k = 0; k < no_of_demods; k++) {
+ new_addr = first_addr | (k << 1);
+ client.i2c_addr = new_addr;
+
+ dib9000_i2c_write16(&client, 1794, (new_addr << 2));
+ dib9000_i2c_write16(&client, 1795, 0);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(dib9000_i2c_enumeration);
+
+int dib9000_set_slave_frontend(struct dvb_frontend *fe, struct dvb_frontend *fe_slave)
+{
+ struct dib9000_state *state = fe->demodulator_priv;
+ u8 index_frontend = 1;
+
+ while ((index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL))
+ index_frontend++;
+ if (index_frontend < MAX_NUMBER_OF_FRONTENDS) {
+ dprintk("set slave fe %p to index %i", fe_slave, index_frontend);
+ state->fe[index_frontend] = fe_slave;
+ return 0;
+ }
+
+ dprintk("too many slave frontend");
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(dib9000_set_slave_frontend);
+
+int dib9000_remove_slave_frontend(struct dvb_frontend *fe)
+{
+ struct dib9000_state *state = fe->demodulator_priv;
+ u8 index_frontend = 1;
+
+ while ((index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL))
+ index_frontend++;
+ if (index_frontend != 1) {
+ dprintk("remove slave fe %p (index %i)", state->fe[index_frontend - 1], index_frontend - 1);
+ state->fe[index_frontend] = NULL;
+ return 0;
+ }
+
+ dprintk("no frontend to be removed");
+ return -ENODEV;
+}
+EXPORT_SYMBOL(dib9000_remove_slave_frontend);
+
+struct dvb_frontend *dib9000_get_slave_frontend(struct dvb_frontend *fe, int slave_index)
+{
+ struct dib9000_state *state = fe->demodulator_priv;
+
+ if (slave_index >= MAX_NUMBER_OF_FRONTENDS)
+ return NULL;
+ return state->fe[slave_index];
+}
+EXPORT_SYMBOL(dib9000_get_slave_frontend);
+
+static struct dvb_frontend_ops dib9000_ops;
+struct dvb_frontend *dib9000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, const struct dib9000_config *cfg)
+{
+ struct dvb_frontend *fe;
+ struct dib9000_state *st;
+ st = kzalloc(sizeof(struct dib9000_state), GFP_KERNEL);
+ if (st == NULL)
+ return NULL;
+ fe = kzalloc(sizeof(struct dvb_frontend), GFP_KERNEL);
+ if (fe == NULL)
+ return NULL;
+
+ memcpy(&st->chip.d9.cfg, cfg, sizeof(struct dib9000_config));
+ st->i2c.i2c_adap = i2c_adap;
+ st->i2c.i2c_addr = i2c_addr;
+
+ st->gpio_dir = DIB9000_GPIO_DEFAULT_DIRECTIONS;
+ st->gpio_val = DIB9000_GPIO_DEFAULT_VALUES;
+ st->gpio_pwm_pos = DIB9000_GPIO_DEFAULT_PWM_POS;
+
+ DibInitLock(&st->platform.risc.mbx_if_lock);
+ DibInitLock(&st->platform.risc.mbx_lock);
+ DibInitLock(&st->platform.risc.mem_lock);
+ DibInitLock(&st->platform.risc.mem_mbx_lock);
+
+ st->fe[0] = fe;
+ fe->demodulator_priv = st;
+ memcpy(&st->fe[0]->ops, &dib9000_ops, sizeof(struct dvb_frontend_ops));
+
+ /* Ensure the output mode remains at the previous default if it's
+ * not specifically set by the caller.
+ */
+ if ((st->chip.d9.cfg.output_mode != OUTMODE_MPEG2_SERIAL) && (st->chip.d9.cfg.output_mode != OUTMODE_MPEG2_PAR_GATED_CLK))
+ st->chip.d9.cfg.output_mode = OUTMODE_MPEG2_FIFO;
+
+ if (dib9000_identify(&st->i2c) == 0)
+ goto error;
+
+ dibx000_init_i2c_master(&st->i2c_master, DIB7000MC, st->i2c.i2c_adap, st->i2c.i2c_addr);
+
+ st->tuner_adap.dev.parent = i2c_adap->dev.parent;
+ strncpy(st->tuner_adap.name, "DIB9000_FW TUNER ACCESS", sizeof(st->tuner_adap.name));
+ st->tuner_adap.algo = &dib9000_tuner_algo;
+ st->tuner_adap.algo_data = NULL;
+ i2c_set_adapdata(&st->tuner_adap, st);
+ if (i2c_add_adapter(&st->tuner_adap) < 0)
+ goto error;
+
+ st->component_bus.dev.parent = i2c_adap->dev.parent;
+ strncpy(st->component_bus.name, "DIB9000_FW COMPONENT BUS ACCESS", sizeof(st->component_bus.name));
+ st->component_bus.algo = &dib9000_component_bus_algo;
+ st->component_bus.algo_data = NULL;
+ st->component_bus_speed = 340;
+ i2c_set_adapdata(&st->component_bus, st);
+ if (i2c_add_adapter(&st->component_bus) < 0)
+ goto component_bus_add_error;
+
+ dib9000_fw_reset(fe);
+
+ return fe;
+
+component_bus_add_error:
+ i2c_del_adapter(&st->tuner_adap);
+error:
+ kfree(st);
+ return NULL;
+}
+EXPORT_SYMBOL(dib9000_attach);
+
+static struct dvb_frontend_ops dib9000_ops = {
+ .info = {
+ .name = "DiBcom 9000",
+ .type = FE_OFDM,
+ .frequency_min = 44250000,
+ .frequency_max = 867250000,
+ .frequency_stepsize = 62500,
+ .caps = FE_CAN_INVERSION_AUTO |
+ FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
+ FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_RECOVER | FE_CAN_HIERARCHY_AUTO,
+ },
+
+ .release = dib9000_release,
+
+ .init = dib9000_wakeup,
+ .sleep = dib9000_sleep,
+
+ .set_frontend = dib9000_set_frontend,
+ .get_tune_settings = dib9000_fe_get_tune_settings,
+ .get_frontend = dib9000_get_frontend,
+
+ .read_status = dib9000_read_status,
+ .read_ber = dib9000_read_ber,
+ .read_signal_strength = dib9000_read_signal_strength,
+ .read_snr = dib9000_read_snr,
+ .read_ucblocks = dib9000_read_unc_blocks,
+};
+
+MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
+MODULE_AUTHOR("Olivier Grenie <ogrenie@dibcom.fr>");
+MODULE_DESCRIPTION("Driver for the DiBcom 9000 COFDM demodulator");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/dib9000.h b/drivers/media/dvb/frontends/dib9000.h
new file mode 100644
index 000000000000..b5781a48034c
--- /dev/null
+++ b/drivers/media/dvb/frontends/dib9000.h
@@ -0,0 +1,131 @@
+#ifndef DIB9000_H
+#define DIB9000_H
+
+#include "dibx000_common.h"
+
+struct dib9000_config {
+ u8 dvbt_mode;
+ u8 output_mpeg2_in_188_bytes;
+ u8 hostbus_diversity;
+ struct dibx000_bandwidth_config *bw;
+
+ u16 if_drives;
+
+ u32 timing_frequency;
+ u32 xtal_clock_khz;
+ u32 vcxo_timer;
+ u32 demod_clock_khz;
+
+ const u8 *microcode_B_fe_buffer;
+ u32 microcode_B_fe_size;
+
+ struct dibGPIOFunction gpio_function[2];
+ struct dibSubbandSelection subband;
+
+ u8 output_mode;
+};
+
+#define DEFAULT_DIB9000_I2C_ADDRESS 18
+
+#if defined(CONFIG_DVB_DIB9000) || (defined(CONFIG_DVB_DIB9000_MODULE) && defined(MODULE))
+extern struct dvb_frontend *dib9000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, const struct dib9000_config *cfg);
+extern int dib9000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods, u8 default_addr, u8 first_addr);
+extern struct i2c_adapter *dib9000_get_tuner_interface(struct dvb_frontend *fe);
+extern struct i2c_adapter *dib9000_get_i2c_master(struct dvb_frontend *fe, enum dibx000_i2c_interface intf, int gating);
+extern int dib9000_set_gpio(struct dvb_frontend *fe, u8 num, u8 dir, u8 val);
+extern int dib9000_fw_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff);
+extern int dib9000_fw_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff);
+extern int dib9000_firmware_post_pll_init(struct dvb_frontend *fe);
+extern int dib9000_set_slave_frontend(struct dvb_frontend *fe, struct dvb_frontend *fe_slave);
+extern int dib9000_remove_slave_frontend(struct dvb_frontend *fe);
+extern struct dvb_frontend *dib9000_get_slave_frontend(struct dvb_frontend *fe, int slave_index);
+extern struct i2c_adapter *dib9000_get_component_bus_interface(struct dvb_frontend *fe);
+extern int dib9000_set_i2c_adapter(struct dvb_frontend *fe, struct i2c_adapter *i2c);
+extern int dib9000_fw_set_component_bus_speed(struct dvb_frontend *fe, u16 speed);
+#else
+static inline struct dvb_frontend *dib9000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib9000_config *cfg)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+
+static inline struct i2c_adapter *dib9000_get_i2c_master(struct dvb_frontend *fe, enum dibx000_i2c_interface intf, int gating)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+
+static inline int dib9000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods, u8 default_addr, u8 first_addr)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return -ENODEV;
+}
+
+static inline struct i2c_adapter *dib9000_get_tuner_interface(struct dvb_frontend *fe)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+
+static inline int dib9000_set_gpio(struct dvb_frontend *fe, u8 num, u8 dir, u8 val)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return -ENODEV;
+}
+
+static inline int dib9000_fw_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return -ENODEV;
+}
+
+static inline int dib9000_fw_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return -ENODEV;
+}
+
+static inline int dib9000_firmware_post_pll_init(struct dvb_frontend *fe)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return -ENODEV;
+}
+
+static inline int dib9000_set_slave_frontend(struct dvb_frontend *fe, struct dvb_frontend *fe_slave)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return -ENODEV;
+}
+
+int dib9000_remove_slave_frontend(struct dvb_frontend *fe)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return -ENODEV;
+}
+
+static inline struct dvb_frontend *dib9000_get_slave_frontend(struct dvb_frontend *fe, int slave_index)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+
+static inline struct i2c_adapter *dib9000_get_component_bus_interface(struct dvb_frontend *fe)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+
+static inline int dib9000_set_i2c_adapter(struct dvb_frontend *fe, struct i2c_adapter *i2c)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return -ENODEV;
+}
+
+static inline int dib9000_fw_set_component_bus_speed(struct dvb_frontend *fe, u16 speed)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return -ENODEV;
+}
+#endif
+
+#endif
diff --git a/drivers/media/dvb/frontends/dibx000_common.c b/drivers/media/dvb/frontends/dibx000_common.c
index 2311c0a3406c..f6938f97feb4 100644
--- a/drivers/media/dvb/frontends/dibx000_common.c
+++ b/drivers/media/dvb/frontends/dibx000_common.c
@@ -17,9 +17,145 @@ static int dibx000_write_word(struct dibx000_i2c_master *mst, u16 reg, u16 val)
struct i2c_msg msg = {
.addr = mst->i2c_addr,.flags = 0,.buf = b,.len = 4
};
+
return i2c_transfer(mst->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
}
+static u16 dibx000_read_word(struct dibx000_i2c_master *mst, u16 reg)
+{
+ u8 wb[2] = { reg >> 8, reg & 0xff };
+ u8 rb[2];
+ struct i2c_msg msg[2] = {
+ {.addr = mst->i2c_addr, .flags = 0, .buf = wb, .len = 2},
+ {.addr = mst->i2c_addr, .flags = I2C_M_RD, .buf = rb, .len = 2},
+ };
+
+ if (i2c_transfer(mst->i2c_adap, msg, 2) != 2)
+ dprintk("i2c read error on %d", reg);
+
+ return (rb[0] << 8) | rb[1];
+}
+
+static int dibx000_is_i2c_done(struct dibx000_i2c_master *mst)
+{
+ int i = 100;
+ u16 status;
+
+ while (((status = dibx000_read_word(mst, mst->base_reg + 2)) & 0x0100) == 0 && --i > 0)
+ ;
+
+ /* i2c timed out */
+ if (i == 0)
+ return -EREMOTEIO;
+
+ /* no acknowledge */
+ if ((status & 0x0080) == 0)
+ return -EREMOTEIO;
+
+ return 0;
+}
+
+static int dibx000_master_i2c_write(struct dibx000_i2c_master *mst, struct i2c_msg *msg, u8 stop)
+{
+ u16 data;
+ u16 da;
+ u16 i;
+ u16 txlen = msg->len, len;
+ const u8 *b = msg->buf;
+
+ while (txlen) {
+ dibx000_read_word(mst, mst->base_reg + 2);
+
+ len = txlen > 8 ? 8 : txlen;
+ for (i = 0; i < len; i += 2) {
+ data = *b++ << 8;
+ if (i+1 < len)
+ data |= *b++;
+ dibx000_write_word(mst, mst->base_reg, data);
+ }
+ da = (((u8) (msg->addr)) << 9) |
+ (1 << 8) |
+ (1 << 7) |
+ (0 << 6) |
+ (0 << 5) |
+ ((len & 0x7) << 2) |
+ (0 << 1) |
+ (0 << 0);
+
+ if (txlen == msg->len)
+ da |= 1 << 5; /* start */
+
+ if (txlen-len == 0 && stop)
+ da |= 1 << 6; /* stop */
+
+ dibx000_write_word(mst, mst->base_reg+1, da);
+
+ if (dibx000_is_i2c_done(mst) != 0)
+ return -EREMOTEIO;
+ txlen -= len;
+ }
+
+ return 0;
+}
+
+static int dibx000_master_i2c_read(struct dibx000_i2c_master *mst, struct i2c_msg *msg)
+{
+ u16 da;
+ u8 *b = msg->buf;
+ u16 rxlen = msg->len, len;
+
+ while (rxlen) {
+ len = rxlen > 8 ? 8 : rxlen;
+ da = (((u8) (msg->addr)) << 9) |
+ (1 << 8) |
+ (1 << 7) |
+ (0 << 6) |
+ (0 << 5) |
+ ((len & 0x7) << 2) |
+ (1 << 1) |
+ (0 << 0);
+
+ if (rxlen == msg->len)
+ da |= 1 << 5; /* start */
+
+ if (rxlen-len == 0)
+ da |= 1 << 6; /* stop */
+ dibx000_write_word(mst, mst->base_reg+1, da);
+
+ if (dibx000_is_i2c_done(mst) != 0)
+ return -EREMOTEIO;
+
+ rxlen -= len;
+
+ while (len) {
+ da = dibx000_read_word(mst, mst->base_reg);
+ *b++ = (da >> 8) & 0xff;
+ len--;
+ if (len >= 1) {
+ *b++ = da & 0xff;
+ len--;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int dibx000_i2c_set_speed(struct i2c_adapter *i2c_adap, u16 speed)
+{
+ struct dibx000_i2c_master *mst = i2c_get_adapdata(i2c_adap);
+
+ if (mst->device_rev < DIB7000MC && speed < 235)
+ speed = 235;
+ return dibx000_write_word(mst, mst->base_reg + 3, (u16)(60000 / speed));
+
+}
+EXPORT_SYMBOL(dibx000_i2c_set_speed);
+
+static u32 dibx000_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C;
+}
static int dibx000_i2c_select_interface(struct dibx000_i2c_master *mst,
enum dibx000_i2c_interface intf)
@@ -32,6 +168,60 @@ static int dibx000_i2c_select_interface(struct dibx000_i2c_master *mst,
return 0;
}
+static int dibx000_i2c_master_xfer_gpio12(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num)
+{
+ struct dibx000_i2c_master *mst = i2c_get_adapdata(i2c_adap);
+ int msg_index;
+ int ret = 0;
+
+ dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_GPIO_1_2);
+ for (msg_index = 0; msg_index < num; msg_index++) {
+ if (msg[msg_index].flags & I2C_M_RD) {
+ ret = dibx000_master_i2c_read(mst, &msg[msg_index]);
+ if (ret != 0)
+ return 0;
+ } else {
+ ret = dibx000_master_i2c_write(mst, &msg[msg_index], 1);
+ if (ret != 0)
+ return 0;
+ }
+ }
+
+ return num;
+}
+
+static int dibx000_i2c_master_xfer_gpio34(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num)
+{
+ struct dibx000_i2c_master *mst = i2c_get_adapdata(i2c_adap);
+ int msg_index;
+ int ret = 0;
+
+ dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_GPIO_3_4);
+ for (msg_index = 0; msg_index < num; msg_index++) {
+ if (msg[msg_index].flags & I2C_M_RD) {
+ ret = dibx000_master_i2c_read(mst, &msg[msg_index]);
+ if (ret != 0)
+ return 0;
+ } else {
+ ret = dibx000_master_i2c_write(mst, &msg[msg_index], 1);
+ if (ret != 0)
+ return 0;
+ }
+ }
+
+ return num;
+}
+
+static struct i2c_algorithm dibx000_i2c_master_gpio12_xfer_algo = {
+ .master_xfer = dibx000_i2c_master_xfer_gpio12,
+ .functionality = dibx000_i2c_func,
+};
+
+static struct i2c_algorithm dibx000_i2c_master_gpio34_xfer_algo = {
+ .master_xfer = dibx000_i2c_master_xfer_gpio34,
+ .functionality = dibx000_i2c_func,
+};
+
static int dibx000_i2c_gate_ctrl(struct dibx000_i2c_master *mst, u8 tx[4],
u8 addr, int onoff)
{
@@ -54,11 +244,37 @@ static int dibx000_i2c_gate_ctrl(struct dibx000_i2c_master *mst, u8 tx[4],
return 0;
}
-static u32 dibx000_i2c_func(struct i2c_adapter *adapter)
+static int dibx000_i2c_gated_gpio67_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg msg[], int num)
{
- return I2C_FUNC_I2C;
+ struct dibx000_i2c_master *mst = i2c_get_adapdata(i2c_adap);
+ struct i2c_msg m[2 + num];
+ u8 tx_open[4], tx_close[4];
+
+ memset(m, 0, sizeof(struct i2c_msg) * (2 + num));
+
+ dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_GPIO_6_7);
+
+ dibx000_i2c_gate_ctrl(mst, tx_open, msg[0].addr, 1);
+ m[0].addr = mst->i2c_addr;
+ m[0].buf = tx_open;
+ m[0].len = 4;
+
+ memcpy(&m[1], msg, sizeof(struct i2c_msg) * num);
+
+ dibx000_i2c_gate_ctrl(mst, tx_close, 0, 0);
+ m[num + 1].addr = mst->i2c_addr;
+ m[num + 1].buf = tx_close;
+ m[num + 1].len = 4;
+
+ return i2c_transfer(mst->i2c_adap, m, 2 + num) == 2 + num ? num : -EIO;
}
+static struct i2c_algorithm dibx000_i2c_gated_gpio67_algo = {
+ .master_xfer = dibx000_i2c_gated_gpio67_xfer,
+ .functionality = dibx000_i2c_func,
+};
+
static int dibx000_i2c_gated_tuner_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg msg[], int num)
{
@@ -91,8 +307,8 @@ static struct i2c_algorithm dibx000_i2c_gated_tuner_algo = {
};
struct i2c_adapter *dibx000_get_i2c_adapter(struct dibx000_i2c_master *mst,
- enum dibx000_i2c_interface intf,
- int gating)
+ enum dibx000_i2c_interface intf,
+ int gating)
{
struct i2c_adapter *i2c = NULL;
@@ -101,6 +317,18 @@ struct i2c_adapter *dibx000_get_i2c_adapter(struct dibx000_i2c_master *mst,
if (gating)
i2c = &mst->gated_tuner_i2c_adap;
break;
+ case DIBX000_I2C_INTERFACE_GPIO_1_2:
+ if (!gating)
+ i2c = &mst->master_i2c_adap_gpio12;
+ break;
+ case DIBX000_I2C_INTERFACE_GPIO_3_4:
+ if (!gating)
+ i2c = &mst->master_i2c_adap_gpio34;
+ break;
+ case DIBX000_I2C_INTERFACE_GPIO_6_7:
+ if (gating)
+ i2c = &mst->master_i2c_adap_gpio67;
+ break;
default:
printk(KERN_ERR "DiBX000: incorrect I2C interface selected\n");
break;
@@ -126,8 +354,8 @@ void dibx000_reset_i2c_master(struct dibx000_i2c_master *mst)
EXPORT_SYMBOL(dibx000_reset_i2c_master);
static int i2c_adapter_init(struct i2c_adapter *i2c_adap,
- struct i2c_algorithm *algo, const char *name,
- struct dibx000_i2c_master *mst)
+ struct i2c_algorithm *algo, const char *name,
+ struct dibx000_i2c_master *mst)
{
strncpy(i2c_adap->name, name, sizeof(i2c_adap->name));
i2c_adap->algo = algo;
@@ -139,7 +367,7 @@ static int i2c_adapter_init(struct i2c_adapter *i2c_adap,
}
int dibx000_init_i2c_master(struct dibx000_i2c_master *mst, u16 device_rev,
- struct i2c_adapter *i2c_adap, u8 i2c_addr)
+ struct i2c_adapter *i2c_adap, u8 i2c_addr)
{
u8 tx[4];
struct i2c_msg m = {.addr = i2c_addr >> 1,.buf = tx,.len = 4 };
@@ -153,11 +381,33 @@ int dibx000_init_i2c_master(struct dibx000_i2c_master *mst, u16 device_rev,
else
mst->base_reg = 768;
+ mst->gated_tuner_i2c_adap.dev.parent = mst->i2c_adap->dev.parent;
+ if (i2c_adapter_init
+ (&mst->gated_tuner_i2c_adap, &dibx000_i2c_gated_tuner_algo,
+ "DiBX000 tuner I2C bus", mst) != 0)
+ printk(KERN_ERR
+ "DiBX000: could not initialize the tuner i2c_adapter\n");
+
+ mst->master_i2c_adap_gpio12.dev.parent = mst->i2c_adap->dev.parent;
+ if (i2c_adapter_init
+ (&mst->master_i2c_adap_gpio12, &dibx000_i2c_master_gpio12_xfer_algo,
+ "DiBX000 master GPIO12 I2C bus", mst) != 0)
+ printk(KERN_ERR
+ "DiBX000: could not initialize the master i2c_adapter\n");
+
+ mst->master_i2c_adap_gpio34.dev.parent = mst->i2c_adap->dev.parent;
+ if (i2c_adapter_init
+ (&mst->master_i2c_adap_gpio34, &dibx000_i2c_master_gpio34_xfer_algo,
+ "DiBX000 master GPIO34 I2C bus", mst) != 0)
+ printk(KERN_ERR
+ "DiBX000: could not initialize the master i2c_adapter\n");
+
+ mst->master_i2c_adap_gpio67.dev.parent = mst->i2c_adap->dev.parent;
if (i2c_adapter_init
- (&mst->gated_tuner_i2c_adap, &dibx000_i2c_gated_tuner_algo,
- "DiBX000 tuner I2C bus", mst) != 0)
+ (&mst->master_i2c_adap_gpio67, &dibx000_i2c_gated_gpio67_algo,
+ "DiBX000 master GPIO67 I2C bus", mst) != 0)
printk(KERN_ERR
- "DiBX000: could not initialize the tuner i2c_adapter\n");
+ "DiBX000: could not initialize the master i2c_adapter\n");
/* initialize the i2c-master by closing the gate */
dibx000_i2c_gate_ctrl(mst, tx, 0, 0);
@@ -170,16 +420,19 @@ EXPORT_SYMBOL(dibx000_init_i2c_master);
void dibx000_exit_i2c_master(struct dibx000_i2c_master *mst)
{
i2c_del_adapter(&mst->gated_tuner_i2c_adap);
+ i2c_del_adapter(&mst->master_i2c_adap_gpio12);
+ i2c_del_adapter(&mst->master_i2c_adap_gpio34);
+ i2c_del_adapter(&mst->master_i2c_adap_gpio67);
}
EXPORT_SYMBOL(dibx000_exit_i2c_master);
u32 systime(void)
{
- struct timespec t;
+ struct timespec t;
- t = current_kernel_time();
- return (t.tv_sec * 10000) + (t.tv_nsec / 100000);
+ t = current_kernel_time();
+ return (t.tv_sec * 10000) + (t.tv_nsec / 100000);
}
EXPORT_SYMBOL(systime);
diff --git a/drivers/media/dvb/frontends/dibx000_common.h b/drivers/media/dvb/frontends/dibx000_common.h
index 4f5d141a308d..977d343369aa 100644
--- a/drivers/media/dvb/frontends/dibx000_common.h
+++ b/drivers/media/dvb/frontends/dibx000_common.h
@@ -4,7 +4,8 @@
enum dibx000_i2c_interface {
DIBX000_I2C_INTERFACE_TUNER = 0,
DIBX000_I2C_INTERFACE_GPIO_1_2 = 1,
- DIBX000_I2C_INTERFACE_GPIO_3_4 = 2
+ DIBX000_I2C_INTERFACE_GPIO_3_4 = 2,
+ DIBX000_I2C_INTERFACE_GPIO_6_7 = 3
};
struct dibx000_i2c_master {
@@ -17,8 +18,11 @@ struct dibx000_i2c_master {
enum dibx000_i2c_interface selected_interface;
-// struct i2c_adapter tuner_i2c_adap;
+/* struct i2c_adapter tuner_i2c_adap; */
struct i2c_adapter gated_tuner_i2c_adap;
+ struct i2c_adapter master_i2c_adap_gpio12;
+ struct i2c_adapter master_i2c_adap_gpio34;
+ struct i2c_adapter master_i2c_adap_gpio67;
struct i2c_adapter *i2c_adap;
u8 i2c_addr;
@@ -27,14 +31,15 @@ struct dibx000_i2c_master {
};
extern int dibx000_init_i2c_master(struct dibx000_i2c_master *mst,
- u16 device_rev, struct i2c_adapter *i2c_adap,
- u8 i2c_addr);
+ u16 device_rev, struct i2c_adapter *i2c_adap,
+ u8 i2c_addr);
extern struct i2c_adapter *dibx000_get_i2c_adapter(struct dibx000_i2c_master
- *mst,
- enum dibx000_i2c_interface
- intf, int gating);
+ *mst,
+ enum dibx000_i2c_interface
+ intf, int gating);
extern void dibx000_exit_i2c_master(struct dibx000_i2c_master *mst);
extern void dibx000_reset_i2c_master(struct dibx000_i2c_master *mst);
+extern int dibx000_i2c_set_speed(struct i2c_adapter *i2c_adap, u16 speed);
extern u32 systime(void);
@@ -42,7 +47,7 @@ extern u32 systime(void);
#define BAND_UHF 0x02
#define BAND_VHF 0x04
#define BAND_SBAND 0x08
-#define BAND_FM 0x10
+#define BAND_FM 0x10
#define BAND_CBAND 0x20
#define BAND_OF_FREQUENCY(freq_kHz) ((freq_kHz) <= 170000 ? BAND_CBAND : \
@@ -135,9 +140,9 @@ enum dibx000_adc_states {
DIBX000_VBG_DISABLE,
};
-#define BANDWIDTH_TO_KHZ(v) ( (v) == BANDWIDTH_8_MHZ ? 8000 : \
- (v) == BANDWIDTH_7_MHZ ? 7000 : \
- (v) == BANDWIDTH_6_MHZ ? 6000 : 8000 )
+#define BANDWIDTH_TO_KHZ(v) ((v) == BANDWIDTH_8_MHZ ? 8000 : \
+ (v) == BANDWIDTH_7_MHZ ? 7000 : \
+ (v) == BANDWIDTH_6_MHZ ? 6000 : 8000)
#define BANDWIDTH_TO_INDEX(v) ( \
(v) == 8000 ? BANDWIDTH_8_MHZ : \
@@ -153,53 +158,57 @@ enum dibx000_adc_states {
#define OUTMODE_MPEG2_FIFO 5
#define OUTMODE_ANALOG_ADC 6
+#define INPUT_MODE_OFF 0x11
+#define INPUT_MODE_DIVERSITY 0x12
+#define INPUT_MODE_MPEG 0x13
+
enum frontend_tune_state {
- CT_TUNER_START = 10,
- CT_TUNER_STEP_0,
- CT_TUNER_STEP_1,
- CT_TUNER_STEP_2,
- CT_TUNER_STEP_3,
- CT_TUNER_STEP_4,
- CT_TUNER_STEP_5,
- CT_TUNER_STEP_6,
- CT_TUNER_STEP_7,
- CT_TUNER_STOP,
-
- CT_AGC_START = 20,
- CT_AGC_STEP_0,
- CT_AGC_STEP_1,
- CT_AGC_STEP_2,
- CT_AGC_STEP_3,
- CT_AGC_STEP_4,
- CT_AGC_STOP,
+ CT_TUNER_START = 10,
+ CT_TUNER_STEP_0,
+ CT_TUNER_STEP_1,
+ CT_TUNER_STEP_2,
+ CT_TUNER_STEP_3,
+ CT_TUNER_STEP_4,
+ CT_TUNER_STEP_5,
+ CT_TUNER_STEP_6,
+ CT_TUNER_STEP_7,
+ CT_TUNER_STOP,
+
+ CT_AGC_START = 20,
+ CT_AGC_STEP_0,
+ CT_AGC_STEP_1,
+ CT_AGC_STEP_2,
+ CT_AGC_STEP_3,
+ CT_AGC_STEP_4,
+ CT_AGC_STOP,
CT_DEMOD_START = 30,
- CT_DEMOD_STEP_1,
- CT_DEMOD_STEP_2,
- CT_DEMOD_STEP_3,
- CT_DEMOD_STEP_4,
- CT_DEMOD_STEP_5,
- CT_DEMOD_STEP_6,
- CT_DEMOD_STEP_7,
- CT_DEMOD_STEP_8,
- CT_DEMOD_STEP_9,
- CT_DEMOD_STEP_10,
- CT_DEMOD_SEARCH_NEXT = 41,
- CT_DEMOD_STEP_LOCKED,
- CT_DEMOD_STOP,
-
- CT_DONE = 100,
- CT_SHUTDOWN,
+ CT_DEMOD_STEP_1,
+ CT_DEMOD_STEP_2,
+ CT_DEMOD_STEP_3,
+ CT_DEMOD_STEP_4,
+ CT_DEMOD_STEP_5,
+ CT_DEMOD_STEP_6,
+ CT_DEMOD_STEP_7,
+ CT_DEMOD_STEP_8,
+ CT_DEMOD_STEP_9,
+ CT_DEMOD_STEP_10,
+ CT_DEMOD_SEARCH_NEXT = 41,
+ CT_DEMOD_STEP_LOCKED,
+ CT_DEMOD_STOP,
+
+ CT_DONE = 100,
+ CT_SHUTDOWN,
};
struct dvb_frontend_parametersContext {
#define CHANNEL_STATUS_PARAMETERS_UNKNOWN 0x01
#define CHANNEL_STATUS_PARAMETERS_SET 0x02
- u8 status;
- u32 tune_time_estimation[2];
- s32 tps_available;
- u16 tps[9];
+ u8 status;
+ u32 tune_time_estimation[2];
+ s32 tps_available;
+ u16 tps[9];
};
#define FE_STATUS_TUNE_FAILED 0
@@ -216,4 +225,49 @@ struct dvb_frontend_parametersContext {
#define ABS(x) ((x < 0) ? (-x) : (x))
+#define DATA_BUS_ACCESS_MODE_8BIT 0x01
+#define DATA_BUS_ACCESS_MODE_16BIT 0x02
+#define DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT 0x10
+
+struct dibGPIOFunction {
+#define BOARD_GPIO_COMPONENT_BUS_ADAPTER 1
+#define BOARD_GPIO_COMPONENT_DEMOD 2
+ u8 component;
+
+#define BOARD_GPIO_FUNCTION_BOARD_ON 1
+#define BOARD_GPIO_FUNCTION_BOARD_OFF 2
+#define BOARD_GPIO_FUNCTION_COMPONENT_ON 3
+#define BOARD_GPIO_FUNCTION_COMPONENT_OFF 4
+#define BOARD_GPIO_FUNCTION_SUBBAND_PWM 5
+#define BOARD_GPIO_FUNCTION_SUBBAND_GPIO 6
+ u8 function;
+
+/* mask, direction and value are used specify which GPIO to change GPIO0
+ * is LSB and possible GPIO31 is MSB. The same bit-position as in the
+ * mask is used for the direction and the value. Direction == 1 is OUT,
+ * 0 == IN. For direction "OUT" value is either 1 or 0, for direction IN
+ * value has no meaning.
+ *
+ * In case of BOARD_GPIO_FUNCTION_PWM mask is giving the GPIO to be
+ * used to do the PWM. Direction gives the PWModulator to be used.
+ * Value gives the PWM value in device-dependent scale.
+ */
+ u32 mask;
+ u32 direction;
+ u32 value;
+};
+
+#define MAX_NB_SUBBANDS 8
+struct dibSubbandSelection {
+ u8 size; /* Actual number of subbands. */
+ struct {
+ u16 f_mhz;
+ struct dibGPIOFunction gpio;
+ } subband[MAX_NB_SUBBANDS];
+};
+
+#define DEMOD_TIMF_SET 0x00
+#define DEMOD_TIMF_GET 0x01
+#define DEMOD_TIMF_UPDATE 0x02
+
#endif
diff --git a/drivers/media/dvb/frontends/stv090x.c b/drivers/media/dvb/frontends/stv090x.c
index 4e0fc2c8a41c..d3362d0407eb 100644
--- a/drivers/media/dvb/frontends/stv090x.c
+++ b/drivers/media/dvb/frontends/stv090x.c
@@ -767,8 +767,12 @@ static int stv090x_i2c_gate_ctrl(struct stv090x_state *state, int enable)
* In case of any error, the lock is unlocked and exit within the
* relevant operations themselves.
*/
- if (enable)
- mutex_lock(&state->internal->tuner_lock);
+ if (enable) {
+ if (state->config->tuner_i2c_lock)
+ state->config->tuner_i2c_lock(&state->frontend, 1);
+ else
+ mutex_lock(&state->internal->tuner_lock);
+ }
reg = STV090x_READ_DEMOD(state, I2CRPT);
if (enable) {
@@ -784,13 +788,20 @@ static int stv090x_i2c_gate_ctrl(struct stv090x_state *state, int enable)
goto err;
}
- if (!enable)
- mutex_unlock(&state->internal->tuner_lock);
+ if (!enable) {
+ if (state->config->tuner_i2c_lock)
+ state->config->tuner_i2c_lock(&state->frontend, 0);
+ else
+ mutex_unlock(&state->internal->tuner_lock);
+ }
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
- mutex_unlock(&state->internal->tuner_lock);
+ if (state->config->tuner_i2c_lock)
+ state->config->tuner_i2c_lock(&state->frontend, 0);
+ else
+ mutex_unlock(&state->internal->tuner_lock);
return -1;
}
@@ -2883,10 +2894,12 @@ static int stv090x_optimize_track(struct stv090x_state *state)
STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 1);
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
goto err;
- if (STV090x_WRITE_DEMOD(state, ACLC, 0) < 0)
- goto err;
- if (STV090x_WRITE_DEMOD(state, BCLC, 0) < 0)
- goto err;
+ if (state->internal->dev_ver >= 0x30) {
+ if (STV090x_WRITE_DEMOD(state, ACLC, 0) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, BCLC, 0) < 0)
+ goto err;
+ }
if (state->frame_len == STV090x_LONG_FRAME) {
reg = STV090x_READ_DEMOD(state, DMDMODCOD);
modcod = STV090x_GETFIELD_Px(reg, DEMOD_MODCOD_FIELD);
@@ -3846,6 +3859,7 @@ static int stv090x_sleep(struct dvb_frontend *fe)
{
struct stv090x_state *state = fe->demodulator_priv;
u32 reg;
+ u8 full_standby = 0;
if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
@@ -3858,24 +3872,119 @@ static int stv090x_sleep(struct dvb_frontend *fe)
if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
- dprintk(FE_DEBUG, 1, "Set %s to sleep",
- state->device == STV0900 ? "STV0900" : "STV0903");
+ dprintk(FE_DEBUG, 1, "Set %s(%d) to sleep",
+ state->device == STV0900 ? "STV0900" : "STV0903",
+ state->demod);
- reg = stv090x_read_reg(state, STV090x_SYNTCTRL);
- STV090x_SETFIELD(reg, STANDBY_FIELD, 0x01);
- if (stv090x_write_reg(state, STV090x_SYNTCTRL, reg) < 0)
- goto err;
+ mutex_lock(&state->internal->demod_lock);
- reg = stv090x_read_reg(state, STV090x_TSTTNR1);
- STV090x_SETFIELD(reg, ADC1_PON_FIELD, 0);
- if (stv090x_write_reg(state, STV090x_TSTTNR1, reg) < 0)
- goto err;
+ switch (state->demod) {
+ case STV090x_DEMODULATOR_0:
+ /* power off ADC 1 */
+ reg = stv090x_read_reg(state, STV090x_TSTTNR1);
+ STV090x_SETFIELD(reg, ADC1_PON_FIELD, 0);
+ if (stv090x_write_reg(state, STV090x_TSTTNR1, reg) < 0)
+ goto err;
+ /* power off DiSEqC 1 */
+ reg = stv090x_read_reg(state, STV090x_TSTTNR2);
+ STV090x_SETFIELD(reg, DISEQC1_PON_FIELD, 0);
+ if (stv090x_write_reg(state, STV090x_TSTTNR2, reg) < 0)
+ goto err;
+
+ /* check whether path 2 is already sleeping, that is when
+ ADC2 is off */
+ reg = stv090x_read_reg(state, STV090x_TSTTNR3);
+ if (STV090x_GETFIELD(reg, ADC2_PON_FIELD) == 0)
+ full_standby = 1;
+
+ /* stop clocks */
+ reg = stv090x_read_reg(state, STV090x_STOPCLK1);
+ /* packet delineator 1 clock */
+ STV090x_SETFIELD(reg, STOP_CLKPKDT1_FIELD, 1);
+ /* ADC 1 clock */
+ STV090x_SETFIELD(reg, STOP_CLKADCI1_FIELD, 1);
+ /* FEC clock is shared between the two paths, only stop it
+ when full standby is possible */
+ if (full_standby)
+ STV090x_SETFIELD(reg, STOP_CLKFEC_FIELD, 1);
+ if (stv090x_write_reg(state, STV090x_STOPCLK1, reg) < 0)
+ goto err;
+ reg = stv090x_read_reg(state, STV090x_STOPCLK2);
+ /* sampling 1 clock */
+ STV090x_SETFIELD(reg, STOP_CLKSAMP1_FIELD, 1);
+ /* viterbi 1 clock */
+ STV090x_SETFIELD(reg, STOP_CLKVIT1_FIELD, 1);
+ /* TS clock is shared between the two paths, only stop it
+ when full standby is possible */
+ if (full_standby)
+ STV090x_SETFIELD(reg, STOP_CLKTS_FIELD, 1);
+ if (stv090x_write_reg(state, STV090x_STOPCLK2, reg) < 0)
+ goto err;
+ break;
+
+ case STV090x_DEMODULATOR_1:
+ /* power off ADC 2 */
+ reg = stv090x_read_reg(state, STV090x_TSTTNR3);
+ STV090x_SETFIELD(reg, ADC2_PON_FIELD, 0);
+ if (stv090x_write_reg(state, STV090x_TSTTNR3, reg) < 0)
+ goto err;
+ /* power off DiSEqC 2 */
+ reg = stv090x_read_reg(state, STV090x_TSTTNR4);
+ STV090x_SETFIELD(reg, DISEQC2_PON_FIELD, 0);
+ if (stv090x_write_reg(state, STV090x_TSTTNR4, reg) < 0)
+ goto err;
+
+ /* check whether path 1 is already sleeping, that is when
+ ADC1 is off */
+ reg = stv090x_read_reg(state, STV090x_TSTTNR1);
+ if (STV090x_GETFIELD(reg, ADC1_PON_FIELD) == 0)
+ full_standby = 1;
+
+ /* stop clocks */
+ reg = stv090x_read_reg(state, STV090x_STOPCLK1);
+ /* packet delineator 2 clock */
+ STV090x_SETFIELD(reg, STOP_CLKPKDT2_FIELD, 1);
+ /* ADC 2 clock */
+ STV090x_SETFIELD(reg, STOP_CLKADCI2_FIELD, 1);
+ /* FEC clock is shared between the two paths, only stop it
+ when full standby is possible */
+ if (full_standby)
+ STV090x_SETFIELD(reg, STOP_CLKFEC_FIELD, 1);
+ if (stv090x_write_reg(state, STV090x_STOPCLK1, reg) < 0)
+ goto err;
+ reg = stv090x_read_reg(state, STV090x_STOPCLK2);
+ /* sampling 2 clock */
+ STV090x_SETFIELD(reg, STOP_CLKSAMP2_FIELD, 1);
+ /* viterbi 2 clock */
+ STV090x_SETFIELD(reg, STOP_CLKVIT2_FIELD, 1);
+ /* TS clock is shared between the two paths, only stop it
+ when full standby is possible */
+ if (full_standby)
+ STV090x_SETFIELD(reg, STOP_CLKTS_FIELD, 1);
+ if (stv090x_write_reg(state, STV090x_STOPCLK2, reg) < 0)
+ goto err;
+ break;
+ default:
+ dprintk(FE_ERROR, 1, "Wrong demodulator!");
+ break;
+ }
+
+ if (full_standby) {
+ /* general power off */
+ reg = stv090x_read_reg(state, STV090x_SYNTCTRL);
+ STV090x_SETFIELD(reg, STANDBY_FIELD, 0x01);
+ if (stv090x_write_reg(state, STV090x_SYNTCTRL, reg) < 0)
+ goto err;
+ }
+
+ mutex_unlock(&state->internal->demod_lock);
return 0;
err_gateoff:
stv090x_i2c_gate_ctrl(state, 0);
err:
+ mutex_unlock(&state->internal->demod_lock);
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
@@ -3885,21 +3994,94 @@ static int stv090x_wakeup(struct dvb_frontend *fe)
struct stv090x_state *state = fe->demodulator_priv;
u32 reg;
- dprintk(FE_DEBUG, 1, "Wake %s from standby",
- state->device == STV0900 ? "STV0900" : "STV0903");
+ dprintk(FE_DEBUG, 1, "Wake %s(%d) from standby",
+ state->device == STV0900 ? "STV0900" : "STV0903",
+ state->demod);
+
+ mutex_lock(&state->internal->demod_lock);
+ /* general power on */
reg = stv090x_read_reg(state, STV090x_SYNTCTRL);
STV090x_SETFIELD(reg, STANDBY_FIELD, 0x00);
if (stv090x_write_reg(state, STV090x_SYNTCTRL, reg) < 0)
goto err;
- reg = stv090x_read_reg(state, STV090x_TSTTNR1);
- STV090x_SETFIELD(reg, ADC1_PON_FIELD, 1);
- if (stv090x_write_reg(state, STV090x_TSTTNR1, reg) < 0)
- goto err;
+ switch (state->demod) {
+ case STV090x_DEMODULATOR_0:
+ /* power on ADC 1 */
+ reg = stv090x_read_reg(state, STV090x_TSTTNR1);
+ STV090x_SETFIELD(reg, ADC1_PON_FIELD, 1);
+ if (stv090x_write_reg(state, STV090x_TSTTNR1, reg) < 0)
+ goto err;
+ /* power on DiSEqC 1 */
+ reg = stv090x_read_reg(state, STV090x_TSTTNR2);
+ STV090x_SETFIELD(reg, DISEQC1_PON_FIELD, 1);
+ if (stv090x_write_reg(state, STV090x_TSTTNR2, reg) < 0)
+ goto err;
+
+ /* activate clocks */
+ reg = stv090x_read_reg(state, STV090x_STOPCLK1);
+ /* packet delineator 1 clock */
+ STV090x_SETFIELD(reg, STOP_CLKPKDT1_FIELD, 0);
+ /* ADC 1 clock */
+ STV090x_SETFIELD(reg, STOP_CLKADCI1_FIELD, 0);
+ /* FEC clock */
+ STV090x_SETFIELD(reg, STOP_CLKFEC_FIELD, 0);
+ if (stv090x_write_reg(state, STV090x_STOPCLK1, reg) < 0)
+ goto err;
+ reg = stv090x_read_reg(state, STV090x_STOPCLK2);
+ /* sampling 1 clock */
+ STV090x_SETFIELD(reg, STOP_CLKSAMP1_FIELD, 0);
+ /* viterbi 1 clock */
+ STV090x_SETFIELD(reg, STOP_CLKVIT1_FIELD, 0);
+ /* TS clock */
+ STV090x_SETFIELD(reg, STOP_CLKTS_FIELD, 0);
+ if (stv090x_write_reg(state, STV090x_STOPCLK2, reg) < 0)
+ goto err;
+ break;
+
+ case STV090x_DEMODULATOR_1:
+ /* power on ADC 2 */
+ reg = stv090x_read_reg(state, STV090x_TSTTNR3);
+ STV090x_SETFIELD(reg, ADC2_PON_FIELD, 1);
+ if (stv090x_write_reg(state, STV090x_TSTTNR3, reg) < 0)
+ goto err;
+ /* power on DiSEqC 2 */
+ reg = stv090x_read_reg(state, STV090x_TSTTNR4);
+ STV090x_SETFIELD(reg, DISEQC2_PON_FIELD, 1);
+ if (stv090x_write_reg(state, STV090x_TSTTNR4, reg) < 0)
+ goto err;
+
+ /* activate clocks */
+ reg = stv090x_read_reg(state, STV090x_STOPCLK1);
+ /* packet delineator 2 clock */
+ STV090x_SETFIELD(reg, STOP_CLKPKDT2_FIELD, 0);
+ /* ADC 2 clock */
+ STV090x_SETFIELD(reg, STOP_CLKADCI2_FIELD, 0);
+ /* FEC clock */
+ STV090x_SETFIELD(reg, STOP_CLKFEC_FIELD, 0);
+ if (stv090x_write_reg(state, STV090x_STOPCLK1, reg) < 0)
+ goto err;
+ reg = stv090x_read_reg(state, STV090x_STOPCLK2);
+ /* sampling 2 clock */
+ STV090x_SETFIELD(reg, STOP_CLKSAMP2_FIELD, 0);
+ /* viterbi 2 clock */
+ STV090x_SETFIELD(reg, STOP_CLKVIT2_FIELD, 0);
+ /* TS clock */
+ STV090x_SETFIELD(reg, STOP_CLKTS_FIELD, 0);
+ if (stv090x_write_reg(state, STV090x_STOPCLK2, reg) < 0)
+ goto err;
+ break;
+
+ default:
+ dprintk(FE_ERROR, 1, "Wrong demodulator!");
+ break;
+ }
+ mutex_unlock(&state->internal->demod_lock);
return 0;
err:
+ mutex_unlock(&state->internal->demod_lock);
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
@@ -4169,6 +4351,7 @@ static int stv090x_set_tspath(struct stv090x_state *state)
switch (state->config->ts1_mode) {
case STV090x_TSMODE_PARALLEL_PUNCTURED:
reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
+ STV090x_SETFIELD_Px(reg, TSFIFO_TEIUPDATE_FIELD, state->config->ts1_tei);
STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x00);
STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x00);
if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
@@ -4177,6 +4360,7 @@ static int stv090x_set_tspath(struct stv090x_state *state)
case STV090x_TSMODE_DVBCI:
reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
+ STV090x_SETFIELD_Px(reg, TSFIFO_TEIUPDATE_FIELD, state->config->ts1_tei);
STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x00);
STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x01);
if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
@@ -4185,6 +4369,7 @@ static int stv090x_set_tspath(struct stv090x_state *state)
case STV090x_TSMODE_SERIAL_PUNCTURED:
reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
+ STV090x_SETFIELD_Px(reg, TSFIFO_TEIUPDATE_FIELD, state->config->ts1_tei);
STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x01);
STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x00);
if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
@@ -4193,6 +4378,7 @@ static int stv090x_set_tspath(struct stv090x_state *state)
case STV090x_TSMODE_SERIAL_CONTINUOUS:
reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
+ STV090x_SETFIELD_Px(reg, TSFIFO_TEIUPDATE_FIELD, state->config->ts1_tei);
STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x01);
STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x01);
if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
@@ -4206,6 +4392,7 @@ static int stv090x_set_tspath(struct stv090x_state *state)
switch (state->config->ts2_mode) {
case STV090x_TSMODE_PARALLEL_PUNCTURED:
reg = stv090x_read_reg(state, STV090x_P2_TSCFGH);
+ STV090x_SETFIELD_Px(reg, TSFIFO_TEIUPDATE_FIELD, state->config->ts2_tei);
STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x00);
STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x00);
if (stv090x_write_reg(state, STV090x_P2_TSCFGH, reg) < 0)
@@ -4214,6 +4401,7 @@ static int stv090x_set_tspath(struct stv090x_state *state)
case STV090x_TSMODE_DVBCI:
reg = stv090x_read_reg(state, STV090x_P2_TSCFGH);
+ STV090x_SETFIELD_Px(reg, TSFIFO_TEIUPDATE_FIELD, state->config->ts2_tei);
STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x00);
STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x01);
if (stv090x_write_reg(state, STV090x_P2_TSCFGH, reg) < 0)
@@ -4222,6 +4410,7 @@ static int stv090x_set_tspath(struct stv090x_state *state)
case STV090x_TSMODE_SERIAL_PUNCTURED:
reg = stv090x_read_reg(state, STV090x_P2_TSCFGH);
+ STV090x_SETFIELD_Px(reg, TSFIFO_TEIUPDATE_FIELD, state->config->ts2_tei);
STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x01);
STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x00);
if (stv090x_write_reg(state, STV090x_P2_TSCFGH, reg) < 0)
@@ -4230,6 +4419,7 @@ static int stv090x_set_tspath(struct stv090x_state *state)
case STV090x_TSMODE_SERIAL_CONTINUOUS:
reg = stv090x_read_reg(state, STV090x_P2_TSCFGH);
+ STV090x_SETFIELD_Px(reg, TSFIFO_TEIUPDATE_FIELD, state->config->ts2_tei);
STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x01);
STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x01);
if (stv090x_write_reg(state, STV090x_P2_TSCFGH, reg) < 0)
@@ -4506,16 +4696,26 @@ static int stv090x_setup(struct dvb_frontend *fe)
if (stv090x_write_reg(state, STV090x_TSTRES0, 0x00) < 0)
goto err;
- /* workaround for stuck DiSEqC output */
- if (config->diseqc_envelope_mode)
- stv090x_send_diseqc_burst(fe, SEC_MINI_A);
-
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
+int stv090x_set_gpio(struct dvb_frontend *fe, u8 gpio, u8 dir, u8 value,
+ u8 xor_value)
+{
+ struct stv090x_state *state = fe->demodulator_priv;
+ u8 reg = 0;
+
+ STV090x_SETFIELD(reg, GPIOx_OPD_FIELD, dir);
+ STV090x_SETFIELD(reg, GPIOx_CONFIG_FIELD, value);
+ STV090x_SETFIELD(reg, GPIOx_XOR_FIELD, xor_value);
+
+ return stv090x_write_reg(state, STV090x_GPIOxCFG(gpio), reg);
+}
+EXPORT_SYMBOL(stv090x_set_gpio);
+
static struct dvb_frontend_ops stv090x_ops = {
.info = {
@@ -4580,11 +4780,6 @@ struct dvb_frontend *stv090x_attach(const struct stv090x_config *config,
state->internal = temp_int->internal;
state->internal->num_used++;
dprintk(FE_INFO, 1, "Found Internal Structure!");
- dprintk(FE_ERROR, 1, "Attaching %s demodulator(%d) Cut=0x%02x",
- state->device == STV0900 ? "STV0900" : "STV0903",
- demod,
- state->internal->dev_ver);
- return &state->frontend;
} else {
state->internal = kmalloc(sizeof(struct stv090x_internal),
GFP_KERNEL);
@@ -4595,24 +4790,19 @@ struct dvb_frontend *stv090x_attach(const struct stv090x_config *config,
state->internal->i2c_adap = state->i2c;
state->internal->i2c_addr = state->config->address;
dprintk(FE_INFO, 1, "Create New Internal Structure!");
- }
- mutex_init(&state->internal->demod_lock);
- mutex_init(&state->internal->tuner_lock);
+ mutex_init(&state->internal->demod_lock);
+ mutex_init(&state->internal->tuner_lock);
- if (stv090x_sleep(&state->frontend) < 0) {
- dprintk(FE_ERROR, 1, "Error putting device to sleep");
- goto error;
+ if (stv090x_setup(&state->frontend) < 0) {
+ dprintk(FE_ERROR, 1, "Error setting up device");
+ goto error;
+ }
}
- if (stv090x_setup(&state->frontend) < 0) {
- dprintk(FE_ERROR, 1, "Error setting up device");
- goto error;
- }
- if (stv090x_wakeup(&state->frontend) < 0) {
- dprintk(FE_ERROR, 1, "Error waking device");
- goto error;
- }
+ /* workaround for stuck DiSEqC output */
+ if (config->diseqc_envelope_mode)
+ stv090x_send_diseqc_burst(&state->frontend, SEC_MINI_A);
dprintk(FE_ERROR, 1, "Attaching %s demodulator(%d) Cut=0x%02x",
state->device == STV0900 ? "STV0900" : "STV0903",
diff --git a/drivers/media/dvb/frontends/stv090x.h b/drivers/media/dvb/frontends/stv090x.h
index dd1b93ae4e9d..29cdc2b71314 100644
--- a/drivers/media/dvb/frontends/stv090x.h
+++ b/drivers/media/dvb/frontends/stv090x.h
@@ -78,6 +78,9 @@ struct stv090x_config {
u32 ts1_clk;
u32 ts2_clk;
+ u8 ts1_tei : 1;
+ u8 ts2_tei : 1;
+
enum stv090x_i2crpt repeater_level;
u8 tuner_bbgain; /* default: 10db */
@@ -97,6 +100,7 @@ struct stv090x_config {
int (*tuner_get_bbgain) (struct dvb_frontend *fe, u32 *gain);
int (*tuner_set_refclk) (struct dvb_frontend *fe, u32 refclk);
int (*tuner_get_status) (struct dvb_frontend *fe, u32 *status);
+ void (*tuner_i2c_lock) (struct dvb_frontend *fe, int lock);
};
#if defined(CONFIG_DVB_STV090x) || (defined(CONFIG_DVB_STV090x_MODULE) && defined(MODULE))
@@ -104,6 +108,11 @@ struct stv090x_config {
extern struct dvb_frontend *stv090x_attach(const struct stv090x_config *config,
struct i2c_adapter *i2c,
enum stv090x_demodulator demod);
+
+/* dir = 0 -> output, dir = 1 -> input/open-drain */
+extern int stv090x_set_gpio(struct dvb_frontend *fe, u8 gpio,
+ u8 dir, u8 value, u8 xor_value);
+
#else
static inline struct dvb_frontend *stv090x_attach(const struct stv090x_config *config,
@@ -113,6 +122,13 @@ static inline struct dvb_frontend *stv090x_attach(const struct stv090x_config *c
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return NULL;
}
+
+static inline int stv090x_set_gpio(struct dvb_frontend *fe, u8 gpio,
+ u8 opd, u8 value, u8 xor_value)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return -ENODEV;
+}
#endif /* CONFIG_DVB_STV090x */
#endif /* __STV090x_H */
diff --git a/drivers/media/dvb/frontends/stv090x_reg.h b/drivers/media/dvb/frontends/stv090x_reg.h
index 2502855dd784..93741ee14297 100644
--- a/drivers/media/dvb/frontends/stv090x_reg.h
+++ b/drivers/media/dvb/frontends/stv090x_reg.h
@@ -1327,10 +1327,10 @@
#define STV090x_WIDTH_Px_NOSPLHT_UNNORMED_FIELD 8
#define STV090x_Px_NOSPLHy(__x, __y) (0xf48f - (__x - 1) * 0x200 - __y * 0x1)
-#define STv090x_P1_NOSPLH0 STV090x_Px_NOSPLHy(1, 0)
-#define STv090x_P1_NOSPLH1 STV090x_Px_NOSPLHy(1, 1)
-#define STv090x_P2_NOSPLH0 STV090x_Px_NOSPLHy(2, 0)
-#define STv090x_P2_NOSPLH1 STV090x_Px_NOSPLHy(2, 1)
+#define STV090x_P1_NOSPLH0 STV090x_Px_NOSPLHy(1, 0)
+#define STV090x_P1_NOSPLH1 STV090x_Px_NOSPLHy(1, 1)
+#define STV090x_P2_NOSPLH0 STV090x_Px_NOSPLHy(2, 0)
+#define STV090x_P2_NOSPLH1 STV090x_Px_NOSPLHy(2, 1)
#define STV090x_OFFST_Px_NOSPLH_UNNORMED_FIELD 0
#define STV090x_WIDTH_Px_NOSPLH_UNNORMED_FIELD 8
@@ -1406,7 +1406,7 @@
#define STV090x_Px_BCLC2S28(__x) (0xf49d - (__x - 1) * 0x200)
#define STV090x_P1_BCLC2S28 STV090x_Px_BCLC2S28(1)
-#define STV090x_P2_BCLC2S28 STV090x_Px_BCLC2S28(1)
+#define STV090x_P2_BCLC2S28 STV090x_Px_BCLC2S28(2)
#define STV090x_OFFST_Px_CAR2S2_8_BETA_M_FIELD 4
#define STV090x_WIDTH_Px_CAR2S2_8_BETA_M_FIELD 2
#define STV090x_OFFST_Px_CAR2S2_8_BETA_E_FIELD 0
@@ -1414,7 +1414,7 @@
#define STV090x_Px_BCLC2S216A(__x) (0xf49e - (__x - 1) * 0x200)
#define STV090x_P1_BCLC2S216A STV090x_Px_BCLC2S216A(1)
-#define STV090x_P2_BCLC2S216A STV090x_Px_BCLC2S216A(1)
+#define STV090x_P2_BCLC2S216A STV090x_Px_BCLC2S216A(2)
#define STV090x_OFFST_Px_CAR2S2_16A_BETA_M_FIELD 4
#define STV090x_WIDTH_Px_CAR2S2_16A_BETA_M_FIELD 2
#define STV090x_OFFST_Px_CAR2S2_16A_BETA_E_FIELD 0
@@ -1422,7 +1422,7 @@
#define STV090x_Px_BCLC2S232A(__x) (0xf49f - (__x - 1) * 0x200)
#define STV090x_P1_BCLC2S232A STV090x_Px_BCLC2S232A(1)
-#define STV090x_P2_BCLC2S232A STV090x_Px_BCLC2S232A(1)
+#define STV090x_P2_BCLC2S232A STV090x_Px_BCLC2S232A(2)
#define STV090x_OFFST_Px_CAR2S2_32A_BETA_M_FIELD 4
#define STV090x_WIDTH_Px_CAR2S2_32A_BETA_M_FIELD 2
#define STV090x_OFFST_Px_CAR2S2_32A_BETA_E_FIELD 0
@@ -1602,7 +1602,7 @@
#define STV090x_Px_CCIACC(__x) (0xf4c4 - (__x - 1) * 0x200)
#define STV090x_P1_CCIACC STV090x_Px_CCIACC(1)
-#define STV090x_P2_CCIACC STV090x_Px_CCIACC(1)
+#define STV090x_P2_CCIACC STV090x_Px_CCIACC(2)
#define STV090x_OFFST_Px_CCI_VALUE_FIELD 0
#define STV090x_WIDTH_Px_CCI_VALUE_FIELD 8
diff --git a/drivers/media/dvb/ngene/Makefile b/drivers/media/dvb/ngene/Makefile
index 0608aabb14ee..2bc96874d044 100644
--- a/drivers/media/dvb/ngene/Makefile
+++ b/drivers/media/dvb/ngene/Makefile
@@ -9,3 +9,6 @@ obj-$(CONFIG_DVB_NGENE) += ngene.o
EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/
EXTRA_CFLAGS += -Idrivers/media/dvb/frontends/
EXTRA_CFLAGS += -Idrivers/media/common/tuners/
+
+# For the staging CI driver cxd2099
+EXTRA_CFLAGS += -Idrivers/staging/cxd2099/
diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
index 4692a41ad95b..fcf4be901ec8 100644
--- a/drivers/media/dvb/ngene/ngene-cards.c
+++ b/drivers/media/dvb/ngene/ngene-cards.c
@@ -48,20 +48,27 @@
static int tuner_attach_stv6110(struct ngene_channel *chan)
{
+ struct i2c_adapter *i2c;
struct stv090x_config *feconf = (struct stv090x_config *)
chan->dev->card_info->fe_config[chan->number];
struct stv6110x_config *tunerconf = (struct stv6110x_config *)
chan->dev->card_info->tuner_config[chan->number];
struct stv6110x_devctl *ctl;
- ctl = dvb_attach(stv6110x_attach, chan->fe, tunerconf,
- &chan->i2c_adapter);
+ /* tuner 1+2: i2c adapter #0, tuner 3+4: i2c adapter #1 */
+ if (chan->number < 2)
+ i2c = &chan->dev->channel[0].i2c_adapter;
+ else
+ i2c = &chan->dev->channel[1].i2c_adapter;
+
+ ctl = dvb_attach(stv6110x_attach, chan->fe, tunerconf, i2c);
if (ctl == NULL) {
printk(KERN_ERR DEVICE_NAME ": No STV6110X found!\n");
return -ENODEV;
}
feconf->tuner_init = ctl->tuner_init;
+ feconf->tuner_sleep = ctl->tuner_sleep;
feconf->tuner_set_mode = ctl->tuner_set_mode;
feconf->tuner_set_frequency = ctl->tuner_set_frequency;
feconf->tuner_get_frequency = ctl->tuner_get_frequency;
@@ -78,29 +85,106 @@ static int tuner_attach_stv6110(struct ngene_channel *chan)
static int demod_attach_stv0900(struct ngene_channel *chan)
{
+ struct i2c_adapter *i2c;
struct stv090x_config *feconf = (struct stv090x_config *)
chan->dev->card_info->fe_config[chan->number];
- chan->fe = dvb_attach(stv090x_attach,
- feconf,
- &chan->i2c_adapter,
- chan->number == 0 ? STV090x_DEMODULATOR_0 :
- STV090x_DEMODULATOR_1);
+ /* tuner 1+2: i2c adapter #0, tuner 3+4: i2c adapter #1 */
+ /* Note: Both adapters share the same i2c bus, but the demod */
+ /* driver requires that each demod has its own i2c adapter */
+ if (chan->number < 2)
+ i2c = &chan->dev->channel[0].i2c_adapter;
+ else
+ i2c = &chan->dev->channel[1].i2c_adapter;
+
+ chan->fe = dvb_attach(stv090x_attach, feconf, i2c,
+ (chan->number & 1) == 0 ? STV090x_DEMODULATOR_0
+ : STV090x_DEMODULATOR_1);
if (chan->fe == NULL) {
printk(KERN_ERR DEVICE_NAME ": No STV0900 found!\n");
return -ENODEV;
}
- if (!dvb_attach(lnbh24_attach, chan->fe, &chan->i2c_adapter, 0,
+ /* store channel info */
+ if (feconf->tuner_i2c_lock)
+ chan->fe->analog_demod_priv = chan;
+
+ if (!dvb_attach(lnbh24_attach, chan->fe, i2c, 0,
0, chan->dev->card_info->lnb[chan->number])) {
printk(KERN_ERR DEVICE_NAME ": No LNBH24 found!\n");
dvb_frontend_detach(chan->fe);
+ chan->fe = NULL;
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void cineS2_tuner_i2c_lock(struct dvb_frontend *fe, int lock)
+{
+ struct ngene_channel *chan = fe->analog_demod_priv;
+
+ if (lock)
+ down(&chan->dev->pll_mutex);
+ else
+ up(&chan->dev->pll_mutex);
+}
+
+static int cineS2_probe(struct ngene_channel *chan)
+{
+ struct i2c_adapter *i2c;
+ struct stv090x_config *fe_conf;
+ u8 buf[3];
+ struct i2c_msg i2c_msg = { .flags = 0, .buf = buf };
+ int rc;
+
+ /* tuner 1+2: i2c adapter #0, tuner 3+4: i2c adapter #1 */
+ if (chan->number < 2)
+ i2c = &chan->dev->channel[0].i2c_adapter;
+ else
+ i2c = &chan->dev->channel[1].i2c_adapter;
+
+ fe_conf = chan->dev->card_info->fe_config[chan->number];
+ i2c_msg.addr = fe_conf->address;
+
+ /* probe demod */
+ i2c_msg.len = 2;
+ buf[0] = 0xf1;
+ buf[1] = 0x00;
+ rc = i2c_transfer(i2c, &i2c_msg, 1);
+ if (rc != 1)
+ return -ENODEV;
+
+ /* demod found, attach it */
+ rc = demod_attach_stv0900(chan);
+ if (rc < 0 || chan->number < 2)
+ return rc;
+
+ /* demod #2: reprogram outputs DPN1 & DPN2 */
+ i2c_msg.len = 3;
+ buf[0] = 0xf1;
+ switch (chan->number) {
+ case 2:
+ buf[1] = 0x5c;
+ buf[2] = 0xc2;
+ break;
+ case 3:
+ buf[1] = 0x61;
+ buf[2] = 0xcc;
+ break;
+ default:
return -ENODEV;
}
+ rc = i2c_transfer(i2c, &i2c_msg, 1);
+ if (rc != 1) {
+ printk(KERN_ERR DEVICE_NAME ": could not setup DPNx\n");
+ return -EIO;
+ }
return 0;
}
+
static struct lgdt330x_config aver_m780 = {
.demod_address = 0xb2 >> 1,
.demod_chip = LGDT3303,
@@ -151,6 +235,29 @@ static struct stv090x_config fe_cineS2 = {
.adc2_range = STV090x_ADC_1Vpp,
.diseqc_envelope_mode = true,
+
+ .tuner_i2c_lock = cineS2_tuner_i2c_lock,
+};
+
+static struct stv090x_config fe_cineS2_2 = {
+ .device = STV0900,
+ .demod_mode = STV090x_DUAL,
+ .clk_mode = STV090x_CLK_EXT,
+
+ .xtal = 27000000,
+ .address = 0x69,
+
+ .ts1_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
+ .ts2_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
+
+ .repeater_level = STV090x_RPTLEVEL_16,
+
+ .adc1_range = STV090x_ADC_1Vpp,
+ .adc2_range = STV090x_ADC_1Vpp,
+
+ .diseqc_envelope_mode = true,
+
+ .tuner_i2c_lock = cineS2_tuner_i2c_lock,
};
static struct stv6110x_config tuner_cineS2_0 = {
@@ -175,7 +282,8 @@ static struct ngene_info ngene_info_cineS2 = {
.tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1},
.lnb = {0x0b, 0x08},
.tsf = {3, 3},
- .fw_version = 15,
+ .fw_version = 18,
+ .msi_supported = true,
};
static struct ngene_info ngene_info_satixS2 = {
@@ -188,46 +296,54 @@ static struct ngene_info ngene_info_satixS2 = {
.tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1},
.lnb = {0x0b, 0x08},
.tsf = {3, 3},
- .fw_version = 15,
+ .fw_version = 18,
+ .msi_supported = true,
};
static struct ngene_info ngene_info_satixS2v2 = {
.type = NGENE_SIDEWINDER,
.name = "Mystique SaTiX-S2 Dual (v2)",
- .io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN},
- .demod_attach = {demod_attach_stv0900, demod_attach_stv0900},
- .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110},
- .fe_config = {&fe_cineS2, &fe_cineS2},
- .tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1},
- .lnb = {0x0a, 0x08},
+ .io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN,
+ NGENE_IO_TSOUT},
+ .demod_attach = {demod_attach_stv0900, demod_attach_stv0900, cineS2_probe, cineS2_probe},
+ .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_stv6110},
+ .fe_config = {&fe_cineS2, &fe_cineS2, &fe_cineS2_2, &fe_cineS2_2},
+ .tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1, &tuner_cineS2_0, &tuner_cineS2_1},
+ .lnb = {0x0a, 0x08, 0x0b, 0x09},
.tsf = {3, 3},
- .fw_version = 15,
+ .fw_version = 18,
+ .msi_supported = true,
};
static struct ngene_info ngene_info_cineS2v5 = {
.type = NGENE_SIDEWINDER,
.name = "Linux4Media cineS2 DVB-S2 Twin Tuner (v5)",
- .io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN},
- .demod_attach = {demod_attach_stv0900, demod_attach_stv0900},
- .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110},
- .fe_config = {&fe_cineS2, &fe_cineS2},
- .tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1},
- .lnb = {0x0a, 0x08},
+ .io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN,
+ NGENE_IO_TSOUT},
+ .demod_attach = {demod_attach_stv0900, demod_attach_stv0900, cineS2_probe, cineS2_probe},
+ .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_stv6110},
+ .fe_config = {&fe_cineS2, &fe_cineS2, &fe_cineS2_2, &fe_cineS2_2},
+ .tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1, &tuner_cineS2_0, &tuner_cineS2_1},
+ .lnb = {0x0a, 0x08, 0x0b, 0x09},
.tsf = {3, 3},
- .fw_version = 15,
+ .fw_version = 18,
+ .msi_supported = true,
};
+
static struct ngene_info ngene_info_duoFlexS2 = {
.type = NGENE_SIDEWINDER,
.name = "Digital Devices DuoFlex S2 miniPCIe",
- .io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN},
- .demod_attach = {demod_attach_stv0900, demod_attach_stv0900},
- .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110},
- .fe_config = {&fe_cineS2, &fe_cineS2},
- .tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1},
- .lnb = {0x0a, 0x08},
+ .io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN,
+ NGENE_IO_TSOUT},
+ .demod_attach = {cineS2_probe, cineS2_probe, cineS2_probe, cineS2_probe},
+ .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_stv6110},
+ .fe_config = {&fe_cineS2, &fe_cineS2, &fe_cineS2_2, &fe_cineS2_2},
+ .tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1, &tuner_cineS2_0, &tuner_cineS2_1},
+ .lnb = {0x0a, 0x08, 0x0b, 0x09},
.tsf = {3, 3},
- .fw_version = 15,
+ .fw_version = 18,
+ .msi_supported = true,
};
static struct ngene_info ngene_info_m780 = {
@@ -321,6 +437,7 @@ static struct pci_driver ngene_pci_driver = {
.probe = ngene_probe,
.remove = __devexit_p(ngene_remove),
.err_handler = &ngene_errors,
+ .shutdown = ngene_shutdown,
};
static __init int module_init_ngene(void)
diff --git a/drivers/media/dvb/ngene/ngene-core.c b/drivers/media/dvb/ngene/ngene-core.c
index dc073bdc623a..175a0f6c2a4c 100644
--- a/drivers/media/dvb/ngene/ngene-core.c
+++ b/drivers/media/dvb/ngene/ngene-core.c
@@ -45,6 +45,9 @@ static int one_adapter = 1;
module_param(one_adapter, int, 0444);
MODULE_PARM_DESC(one_adapter, "Use only one adapter.");
+static int shutdown_workaround;
+module_param(shutdown_workaround, int, 0644);
+MODULE_PARM_DESC(shutdown_workaround, "Activate workaround for shutdown problem with some chipsets.");
static int debug;
module_param(debug, int, 0444);
@@ -143,7 +146,7 @@ static void demux_tasklet(unsigned long data)
}
} else {
if (chan->HWState == HWSTATE_RUN) {
- u32 Flags = 0;
+ u32 Flags = chan->DataFormatFlags;
IBufferExchange *exch1 = chan->pBufferExchange;
IBufferExchange *exch2 = chan->pBufferExchange2;
if (Cur->ngeneBuffer.SR.Flags & 0x01)
@@ -474,9 +477,9 @@ static u8 SPDIFConfiguration[10] = {
/* Set NGENE I2S Config to transport stream compatible mode */
-static u8 TS_I2SConfiguration[4] = { 0x3E, 0x1A, 0x00, 0x00 }; /*3e 18 00 00 ?*/
+static u8 TS_I2SConfiguration[4] = { 0x3E, 0x18, 0x00, 0x00 };
-static u8 TS_I2SOutConfiguration[4] = { 0x80, 0x20, 0x00, 0x00 };
+static u8 TS_I2SOutConfiguration[4] = { 0x80, 0x04, 0x00, 0x00 };
static u8 ITUDecoderSetup[4][16] = {
{0x1c, 0x13, 0x01, 0x68, 0x3d, 0x90, 0x14, 0x20, /* SDTV */
@@ -749,13 +752,11 @@ void set_transfer(struct ngene_channel *chan, int state)
if (chan->mode & NGENE_IO_TSOUT) {
chan->pBufferExchange = tsout_exchange;
/* 0x66666666 = 50MHz *2^33 /250MHz */
- chan->AudioDTOValue = 0x66666666;
- /* set_dto(chan, 38810700+1000); */
- /* set_dto(chan, 19392658); */
+ chan->AudioDTOValue = 0x80000000;
+ chan->AudioDTOUpdated = 1;
}
if (chan->mode & NGENE_IO_TSIN)
chan->pBufferExchange = tsin_exchange;
- /* ngwritel(0, 0x9310); */
spin_unlock_irq(&chan->state_lock);
} else
;/* printk(KERN_INFO DEVICE_NAME ": lock=%08x\n",
@@ -1168,6 +1169,7 @@ static void ngene_release_buffers(struct ngene *dev)
iounmap(dev->iomem);
free_common_buffers(dev);
vfree(dev->tsout_buf);
+ vfree(dev->tsin_buf);
vfree(dev->ain_buf);
vfree(dev->vin_buf);
vfree(dev);
@@ -1184,6 +1186,13 @@ static int ngene_get_buffers(struct ngene *dev)
dvb_ringbuffer_init(&dev->tsout_rbuf,
dev->tsout_buf, TSOUT_BUF_SIZE);
}
+ if (dev->card_info->io_type[2]&NGENE_IO_TSIN) {
+ dev->tsin_buf = vmalloc(TSIN_BUF_SIZE);
+ if (!dev->tsin_buf)
+ return -ENOMEM;
+ dvb_ringbuffer_init(&dev->tsin_rbuf,
+ dev->tsin_buf, TSIN_BUF_SIZE);
+ }
if (dev->card_info->io_type[2] & NGENE_IO_AIN) {
dev->ain_buf = vmalloc(AIN_BUF_SIZE);
if (!dev->ain_buf)
@@ -1257,6 +1266,10 @@ static int ngene_load_firm(struct ngene *dev)
fw_name = "ngene_17.fw";
dev->cmd_timeout_workaround = true;
break;
+ case 18:
+ size = 0;
+ fw_name = "ngene_18.fw";
+ break;
}
if (request_firmware(&fw, fw_name, &dev->pci_dev->dev) < 0) {
@@ -1266,6 +1279,8 @@ static int ngene_load_firm(struct ngene *dev)
": Copy %s to your hotplug directory!\n", fw_name);
return -1;
}
+ if (size == 0)
+ size = fw->size;
if (size != fw->size) {
printk(KERN_ERR DEVICE_NAME
": Firmware %s has invalid size!", fw_name);
@@ -1301,6 +1316,35 @@ static void ngene_stop(struct ngene *dev)
#endif
}
+static int ngene_buffer_config(struct ngene *dev)
+{
+ int stat;
+
+ if (dev->card_info->fw_version >= 17) {
+ u8 tsin12_config[6] = { 0x60, 0x60, 0x00, 0x00, 0x00, 0x00 };
+ u8 tsin1234_config[6] = { 0x30, 0x30, 0x00, 0x30, 0x30, 0x00 };
+ u8 tsio1235_config[6] = { 0x30, 0x30, 0x00, 0x28, 0x00, 0x38 };
+ u8 *bconf = tsin12_config;
+
+ if (dev->card_info->io_type[2]&NGENE_IO_TSIN &&
+ dev->card_info->io_type[3]&NGENE_IO_TSIN) {
+ bconf = tsin1234_config;
+ if (dev->card_info->io_type[4]&NGENE_IO_TSOUT &&
+ dev->ci.en)
+ bconf = tsio1235_config;
+ }
+ stat = ngene_command_config_free_buf(dev, bconf);
+ } else {
+ int bconf = BUFFER_CONFIG_4422;
+
+ if (dev->card_info->io_type[3] == NGENE_IO_TSIN)
+ bconf = BUFFER_CONFIG_3333;
+ stat = ngene_command_config_buf(dev, bconf);
+ }
+ return stat;
+}
+
+
static int ngene_start(struct ngene *dev)
{
int stat;
@@ -1365,23 +1409,6 @@ static int ngene_start(struct ngene *dev)
if (stat < 0)
goto fail;
- if (dev->card_info->fw_version == 17) {
- u8 tsin4_config[6] = {
- 3072 / 64, 3072 / 64, 0, 3072 / 64, 3072 / 64, 0};
- u8 default_config[6] = {
- 4096 / 64, 4096 / 64, 0, 2048 / 64, 2048 / 64, 0};
- u8 *bconf = default_config;
-
- if (dev->card_info->io_type[3] == NGENE_IO_TSIN)
- bconf = tsin4_config;
- dprintk(KERN_DEBUG DEVICE_NAME ": FW 17 buffer config\n");
- stat = ngene_command_config_free_buf(dev, bconf);
- } else {
- int bconf = BUFFER_CONFIG_4422;
- if (dev->card_info->io_type[3] == NGENE_IO_TSIN)
- bconf = BUFFER_CONFIG_3333;
- stat = ngene_command_config_buf(dev, bconf);
- }
if (!stat)
return stat;
@@ -1397,9 +1424,6 @@ fail2:
return stat;
}
-
-
-
/****************************************************************************/
/****************************************************************************/
/****************************************************************************/
@@ -1408,20 +1432,25 @@ static void release_channel(struct ngene_channel *chan)
{
struct dvb_demux *dvbdemux = &chan->demux;
struct ngene *dev = chan->dev;
- struct ngene_info *ni = dev->card_info;
- int io = ni->io_type[chan->number];
- if (chan->dev->cmd_timeout_workaround && chan->running)
+ if (chan->running)
set_transfer(chan, 0);
tasklet_kill(&chan->demux_tasklet);
- if (io & (NGENE_IO_TSIN | NGENE_IO_TSOUT)) {
- if (chan->fe) {
- dvb_unregister_frontend(chan->fe);
- dvb_frontend_detach(chan->fe);
- chan->fe = NULL;
- }
+ if (chan->ci_dev) {
+ dvb_unregister_device(chan->ci_dev);
+ chan->ci_dev = NULL;
+ }
+
+ if (chan->fe) {
+ dvb_unregister_frontend(chan->fe);
+ dvb_frontend_detach(chan->fe);
+ chan->fe = NULL;
+ }
+
+ if (chan->has_demux) {
+ dvb_net_release(&chan->dvbnet);
dvbdemux->dmx.close(&dvbdemux->dmx);
dvbdemux->dmx.remove_frontend(&dvbdemux->dmx,
&chan->hw_frontend);
@@ -1429,9 +1458,12 @@ static void release_channel(struct ngene_channel *chan)
&chan->mem_frontend);
dvb_dmxdev_release(&chan->dmxdev);
dvb_dmx_release(&chan->demux);
+ chan->has_demux = false;
+ }
- if (chan->number == 0 || !one_adapter)
- dvb_unregister_adapter(&dev->adapter[chan->number]);
+ if (chan->has_adapter) {
+ dvb_unregister_adapter(&dev->adapter[chan->number]);
+ chan->has_adapter = false;
}
}
@@ -1449,9 +1481,27 @@ static int init_channel(struct ngene_channel *chan)
chan->type = io;
chan->mode = chan->type; /* for now only one mode */
+ if (io & NGENE_IO_TSIN) {
+ chan->fe = NULL;
+ if (ni->demod_attach[nr]) {
+ ret = ni->demod_attach[nr](chan);
+ if (ret < 0)
+ goto err;
+ }
+ if (chan->fe && ni->tuner_attach[nr]) {
+ ret = ni->tuner_attach[nr](chan);
+ if (ret < 0)
+ goto err;
+ }
+ }
+
+ if (!dev->ci.en && (io & NGENE_IO_TSOUT))
+ return 0;
+
if (io & (NGENE_IO_TSIN | NGENE_IO_TSOUT)) {
if (nr >= STREAM_AUDIOIN1)
chan->DataFormatFlags = DF_SWAP32;
+
if (nr == 0 || !one_adapter || dev->first_adapter == NULL) {
adapter = &dev->adapter[nr];
ret = dvb_register_adapter(adapter, "nGene",
@@ -1459,40 +1509,50 @@ static int init_channel(struct ngene_channel *chan)
&chan->dev->pci_dev->dev,
adapter_nr);
if (ret < 0)
- return ret;
+ goto err;
if (dev->first_adapter == NULL)
dev->first_adapter = adapter;
- } else {
+ chan->has_adapter = true;
+ } else
adapter = dev->first_adapter;
- }
+ }
+ if (dev->ci.en && (io & NGENE_IO_TSOUT)) {
+ dvb_ca_en50221_init(adapter, dev->ci.en, 0, 1);
+ set_transfer(chan, 1);
+ set_transfer(&chan->dev->channel[2], 1);
+ dvb_register_device(adapter, &chan->ci_dev,
+ &ngene_dvbdev_ci, (void *) chan,
+ DVB_DEVICE_SEC);
+ if (!chan->ci_dev)
+ goto err;
+ }
+
+ if (chan->fe) {
+ if (dvb_register_frontend(adapter, chan->fe) < 0)
+ goto err;
+ chan->has_demux = true;
+ }
+
+ if (chan->has_demux) {
ret = my_dvb_dmx_ts_card_init(dvbdemux, "SW demux",
ngene_start_feed,
ngene_stop_feed, chan);
ret = my_dvb_dmxdev_ts_card_init(&chan->dmxdev, &chan->demux,
&chan->hw_frontend,
&chan->mem_frontend, adapter);
+ ret = dvb_net_init(adapter, &chan->dvbnet, &chan->demux.dmx);
}
- if (io & NGENE_IO_TSIN) {
+ return ret;
+
+err:
+ if (chan->fe) {
+ dvb_frontend_detach(chan->fe);
chan->fe = NULL;
- if (ni->demod_attach[nr])
- ni->demod_attach[nr](chan);
- if (chan->fe) {
- if (dvb_register_frontend(adapter, chan->fe) < 0) {
- if (chan->fe->ops.release)
- chan->fe->ops.release(chan->fe);
- chan->fe = NULL;
- }
- }
- if (chan->fe && ni->tuner_attach[nr])
- if (ni->tuner_attach[nr] (chan) < 0) {
- printk(KERN_ERR DEVICE_NAME
- ": Tuner attach failed on channel %d!\n",
- nr);
- }
}
- return ret;
+ release_channel(chan);
+ return 0;
}
static int init_channels(struct ngene *dev)
@@ -1510,6 +1570,57 @@ static int init_channels(struct ngene *dev)
return 0;
}
+static void cxd_attach(struct ngene *dev)
+{
+ struct ngene_ci *ci = &dev->ci;
+
+ ci->en = cxd2099_attach(0x40, dev, &dev->channel[0].i2c_adapter);
+ ci->dev = dev;
+ return;
+}
+
+static void cxd_detach(struct ngene *dev)
+{
+ struct ngene_ci *ci = &dev->ci;
+
+ dvb_ca_en50221_release(ci->en);
+ kfree(ci->en);
+ ci->en = 0;
+}
+
+/***********************************/
+/* workaround for shutdown failure */
+/***********************************/
+
+static void ngene_unlink(struct ngene *dev)
+{
+ struct ngene_command com;
+
+ com.cmd.hdr.Opcode = CMD_MEM_WRITE;
+ com.cmd.hdr.Length = 3;
+ com.cmd.MemoryWrite.address = 0x910c;
+ com.cmd.MemoryWrite.data = 0xff;
+ com.in_len = 3;
+ com.out_len = 1;
+
+ down(&dev->cmd_mutex);
+ ngwritel(0, NGENE_INT_ENABLE);
+ ngene_command_mutex(dev, &com);
+ up(&dev->cmd_mutex);
+}
+
+void ngene_shutdown(struct pci_dev *pdev)
+{
+ struct ngene *dev = (struct ngene *)pci_get_drvdata(pdev);
+
+ if (!dev || !shutdown_workaround)
+ return;
+
+ printk(KERN_INFO DEVICE_NAME ": shutdown workaround...\n");
+ ngene_unlink(dev);
+ pci_disable_device(pdev);
+}
+
/****************************************************************************/
/* device probe/remove calls ************************************************/
/****************************************************************************/
@@ -1522,6 +1633,8 @@ void __devexit ngene_remove(struct pci_dev *pdev)
tasklet_kill(&dev->event_tasklet);
for (i = MAX_STREAM - 1; i >= 0; i--)
release_channel(&dev->channel[i]);
+ if (dev->ci.en)
+ cxd_detach(dev);
ngene_stop(dev);
ngene_release_buffers(dev);
pci_set_drvdata(pdev, NULL);
@@ -1557,6 +1670,13 @@ int __devinit ngene_probe(struct pci_dev *pci_dev,
if (stat < 0)
goto fail1;
+ cxd_attach(dev);
+
+ stat = ngene_buffer_config(dev);
+ if (stat < 0)
+ goto fail1;
+
+
dev->i2c_current_bus = -1;
/* Register DVB adapters and devices for both channels */
diff --git a/drivers/media/dvb/ngene/ngene-dvb.c b/drivers/media/dvb/ngene/ngene-dvb.c
index 3832e5983c19..0b4943233166 100644
--- a/drivers/media/dvb/ngene/ngene-dvb.c
+++ b/drivers/media/dvb/ngene/ngene-dvb.c
@@ -47,6 +47,64 @@
/* COMMAND API interface ****************************************************/
/****************************************************************************/
+static ssize_t ts_write(struct file *file, const char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dvb_device *dvbdev = file->private_data;
+ struct ngene_channel *chan = dvbdev->priv;
+ struct ngene *dev = chan->dev;
+
+ if (wait_event_interruptible(dev->tsout_rbuf.queue,
+ dvb_ringbuffer_free
+ (&dev->tsout_rbuf) >= count) < 0)
+ return 0;
+
+ dvb_ringbuffer_write(&dev->tsout_rbuf, buf, count);
+
+ return count;
+}
+
+static ssize_t ts_read(struct file *file, char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dvb_device *dvbdev = file->private_data;
+ struct ngene_channel *chan = dvbdev->priv;
+ struct ngene *dev = chan->dev;
+ int left, avail;
+
+ left = count;
+ while (left) {
+ if (wait_event_interruptible(
+ dev->tsin_rbuf.queue,
+ dvb_ringbuffer_avail(&dev->tsin_rbuf) > 0) < 0)
+ return -EAGAIN;
+ avail = dvb_ringbuffer_avail(&dev->tsin_rbuf);
+ if (avail > left)
+ avail = left;
+ dvb_ringbuffer_read_user(&dev->tsin_rbuf, buf, avail);
+ left -= avail;
+ buf += avail;
+ }
+ return count;
+}
+
+static const struct file_operations ci_fops = {
+ .owner = THIS_MODULE,
+ .read = ts_read,
+ .write = ts_write,
+ .open = dvb_generic_open,
+ .release = dvb_generic_release,
+};
+
+struct dvb_device ngene_dvbdev_ci = {
+ .priv = 0,
+ .readers = -1,
+ .writers = -1,
+ .users = -1,
+ .fops = &ci_fops,
+};
+
+
/****************************************************************************/
/* DVB functions and API interface ******************************************/
/****************************************************************************/
@@ -63,10 +121,21 @@ static void swap_buffer(u32 *p, u32 len)
void *tsin_exchange(void *priv, void *buf, u32 len, u32 clock, u32 flags)
{
struct ngene_channel *chan = priv;
+ struct ngene *dev = chan->dev;
- if (chan->users > 0)
+ if (flags & DF_SWAP32)
+ swap_buffer(buf, len);
+ if (dev->ci.en && chan->number == 2) {
+ if (dvb_ringbuffer_free(&dev->tsin_rbuf) > len) {
+ dvb_ringbuffer_write(&dev->tsin_rbuf, buf, len);
+ wake_up_interruptible(&dev->tsin_rbuf.queue);
+ }
+ return 0;
+ }
+ if (chan->users > 0) {
dvb_dmx_swfilter(&chan->demux, buf, len);
+ }
return NULL;
}
diff --git a/drivers/media/dvb/ngene/ngene.h b/drivers/media/dvb/ngene/ngene.h
index 8fb4200f83f8..40fce9e3ae66 100644
--- a/drivers/media/dvb/ngene/ngene.h
+++ b/drivers/media/dvb/ngene/ngene.h
@@ -36,8 +36,11 @@
#include "dmxdev.h"
#include "dvbdev.h"
#include "dvb_demux.h"
+#include "dvb_ca_en50221.h"
#include "dvb_frontend.h"
#include "dvb_ringbuffer.h"
+#include "dvb_net.h"
+#include "cxd2099.h"
#define DEVICE_NAME "ngene"
@@ -636,14 +639,18 @@ struct ngene_channel {
int number;
int type;
int mode;
+ bool has_adapter;
+ bool has_demux;
struct dvb_frontend *fe;
struct dmxdev dmxdev;
struct dvb_demux demux;
+ struct dvb_net dvbnet;
struct dmx_frontend hw_frontend;
struct dmx_frontend mem_frontend;
int users;
struct video_device *v4l_dev;
+ struct dvb_device *ci_dev;
struct tasklet_struct demux_tasklet;
struct SBufferHeader *nextBuffer;
@@ -710,6 +717,15 @@ struct ngene_channel {
int running;
};
+
+struct ngene_ci {
+ struct device device;
+ struct i2c_adapter i2c_adapter;
+
+ struct ngene *dev;
+ struct dvb_ca_en50221 *en;
+};
+
struct ngene;
typedef void (rx_cb_t)(struct ngene *, u32, u8);
@@ -774,6 +790,10 @@ struct ngene {
#define TSOUT_BUF_SIZE (512*188*8)
struct dvb_ringbuffer tsout_rbuf;
+ u8 *tsin_buf;
+#define TSIN_BUF_SIZE (512*188*8)
+ struct dvb_ringbuffer tsin_rbuf;
+
u8 *ain_buf;
#define AIN_BUF_SIZE (128*1024)
struct dvb_ringbuffer ain_rbuf;
@@ -785,6 +805,8 @@ struct ngene {
unsigned long exp_val;
int prev_cmd;
+
+ struct ngene_ci ci;
};
struct ngene_info {
@@ -863,6 +885,7 @@ struct ngene_buffer {
int __devinit ngene_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id);
void __devexit ngene_remove(struct pci_dev *pdev);
+void ngene_shutdown(struct pci_dev *pdev);
int ngene_command(struct ngene *dev, struct ngene_command *com);
int ngene_command_gpio_set(struct ngene *dev, u8 select, u8 level);
void set_transfer(struct ngene_channel *chan, int state);
@@ -872,6 +895,7 @@ void FillTSBuffer(void *Buffer, int Length, u32 Flags);
int ngene_i2c_init(struct ngene *dev, int dev_nr);
/* Provided by ngene-dvb.c */
+extern struct dvb_device ngene_dvbdev_ci;
void *tsout_exchange(void *priv, void *buf, u32 len, u32 clock, u32 flags);
void *tsin_exchange(void *priv, void *buf, u32 len, u32 clock, u32 flags);
int ngene_start_feed(struct dvb_demux_feed *dvbdmxfeed);
diff --git a/drivers/media/radio/radio-timb.c b/drivers/media/radio/radio-timb.c
index a185610b376b..1e3a8dd820a4 100644
--- a/drivers/media/radio/radio-timb.c
+++ b/drivers/media/radio/radio-timb.c
@@ -21,6 +21,7 @@
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/i2c.h>
@@ -148,7 +149,7 @@ static const struct v4l2_file_operations timbradio_fops = {
static int __devinit timbradio_probe(struct platform_device *pdev)
{
- struct timb_radio_platform_data *pdata = pdev->dev.platform_data;
+ struct timb_radio_platform_data *pdata = mfd_get_data(pdev);
struct timbradio *tr;
int err;
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 7ecc8e657663..4698eb00b59f 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -2138,7 +2138,7 @@ static int wl1273_fm_radio_remove(struct platform_device *pdev)
static int __devinit wl1273_fm_radio_probe(struct platform_device *pdev)
{
- struct wl1273_core **core = pdev->dev.platform_data;
+ struct wl1273_core **core = mfd_get_data(pdev);
struct wl1273_device *radio;
struct v4l2_ctrl *ctrl;
int r = 0;
diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile
index 0659e9f50144..f0c80559b303 100644
--- a/drivers/media/rc/keymaps/Makefile
+++ b/drivers/media/rc/keymaps/Makefile
@@ -74,6 +74,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-real-audio-220-32-keys.o \
rc-streamzap.o \
rc-tbs-nec.o \
+ rc-technisat-usb2.o \
rc-terratec-cinergy-xs.o \
rc-terratec-slim.o \
rc-tevii-nec.o \
diff --git a/drivers/media/rc/keymaps/rc-technisat-usb2.c b/drivers/media/rc/keymaps/rc-technisat-usb2.c
new file mode 100644
index 000000000000..4afe5774f192
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-technisat-usb2.c
@@ -0,0 +1,93 @@
+/* rc-technisat-usb2.c - Keytable for SkyStar HD USB
+ *
+ * Copyright (C) 2010 Patrick Boettcher,
+ * Kernel Labs Inc. PO Box 745, St James, NY 11780
+ *
+ * Development was sponsored by Technisat Digital UK Limited, whose
+ * registered office is Witan Gate House 500 - 600 Witan Gate West,
+ * Milton Keynes, MK9 1SH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * THIS PROGRAM IS PROVIDED "AS IS" AND BOTH THE COPYRIGHT HOLDER AND
+ * TECHNISAT DIGITAL UK LTD DISCLAIM ALL WARRANTIES WITH REGARD TO
+ * THIS PROGRAM INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY OR
+ * FITNESS FOR A PARTICULAR PURPOSE. NEITHER THE COPYRIGHT HOLDER
+ * NOR TECHNISAT DIGITAL UK LIMITED SHALL BE LIABLE FOR ANY SPECIAL,
+ * DIRECT, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
+ * RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
+ * IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS PROGRAM. See the
+ * GNU General Public License for more details.
+ */
+
+#include <media/rc-map.h>
+
+static struct rc_map_table technisat_usb2[] = {
+ {0x0a0c, KEY_POWER},
+ {0x0a01, KEY_1},
+ {0x0a02, KEY_2},
+ {0x0a03, KEY_3},
+ {0x0a0d, KEY_MUTE},
+ {0x0a04, KEY_4},
+ {0x0a05, KEY_5},
+ {0x0a06, KEY_6},
+ {0x0a38, KEY_VIDEO}, /* EXT */
+ {0x0a07, KEY_7},
+ {0x0a08, KEY_8},
+ {0x0a09, KEY_9},
+ {0x0a00, KEY_0},
+ {0x0a4f, KEY_INFO},
+ {0x0a20, KEY_CHANNELUP},
+ {0x0a52, KEY_MENU},
+ {0x0a11, KEY_VOLUMEUP},
+ {0x0a57, KEY_OK},
+ {0x0a10, KEY_VOLUMEDOWN},
+ {0x0a2f, KEY_EPG},
+ {0x0a21, KEY_CHANNELDOWN},
+ {0x0a22, KEY_REFRESH},
+ {0x0a3c, KEY_TEXT},
+ {0x0a76, KEY_ENTER}, /* HOOK */
+ {0x0a0f, KEY_HELP},
+ {0x0a6b, KEY_RED},
+ {0x0a6c, KEY_GREEN},
+ {0x0a6d, KEY_YELLOW},
+ {0x0a6e, KEY_BLUE},
+ {0x0a29, KEY_STOP},
+ {0x0a23, KEY_LANGUAGE},
+ {0x0a53, KEY_TV},
+ {0x0a0a, KEY_PROGRAM},
+};
+
+static struct rc_map_list technisat_usb2_map = {
+ .map = {
+ .scan = technisat_usb2,
+ .size = ARRAY_SIZE(technisat_usb2),
+ .rc_type = RC_TYPE_RC5,
+ .name = RC_MAP_TECHNISAT_USB2,
+ }
+};
+
+static int __init init_rc_map(void)
+{
+ return rc_map_register(&technisat_usb2_map);
+}
+
+static void __exit exit_rc_map(void)
+{
+ rc_map_unregister(&technisat_usb2_map);
+}
+
+module_init(init_rc_map)
+module_exit(exit_rc_map)
+
+MODULE_AUTHOR("Patrick Boettcher <pboettcher@kernellabs.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 512a2f4ada0e..c3769283936f 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -966,8 +966,8 @@ struct rc_dev *rc_allocate_device(void)
return NULL;
}
- dev->input_dev->getkeycode_new = ir_getkeycode;
- dev->input_dev->setkeycode_new = ir_setkeycode;
+ dev->input_dev->getkeycode = ir_getkeycode;
+ dev->input_dev->setkeycode = ir_setkeycode;
input_set_drvdata(dev->input_dev, dev);
spin_lock_init(&dev->rc_map.lock);
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index aa021600e9df..d40a8fc01bfd 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -42,8 +42,30 @@ config VIDEO_TUNER
config V4L2_MEM2MEM_DEV
tristate
- depends on VIDEOBUF_GEN
+ depends on VIDEOBUF2_CORE
+config VIDEOBUF2_CORE
+ tristate
+
+config VIDEOBUF2_MEMOPS
+ tristate
+
+config VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_CORE
+ select VIDEOBUF2_MEMOPS
+ tristate
+
+config VIDEOBUF2_VMALLOC
+ select VIDEOBUF2_CORE
+ select VIDEOBUF2_MEMOPS
+ tristate
+
+
+config VIDEOBUF2_DMA_SG
+ #depends on HAS_DMA
+ select VIDEOBUF2_CORE
+ select VIDEOBUF2_MEMOPS
+ tristate
#
# Multimedia Video device configuration
#
@@ -527,7 +549,7 @@ config VIDEO_VIVI
depends on VIDEO_DEV && VIDEO_V4L2 && !SPARC32 && !SPARC64
depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE
select FONT_8x16
- select VIDEOBUF_VMALLOC
+ select VIDEOBUF2_VMALLOC
default n
---help---
Enables a virtual video driver. This device shows a color bar
@@ -718,6 +740,12 @@ config VIDEO_VIA_CAMERA
Chrome9 chipsets. Currently only tested on OLPC xo-1.5 systems
with ov7670 sensors.
+config VIDEO_NOON010PC30
+ tristate "NOON010PC30 CIF camera sensor support"
+ depends on I2C && VIDEO_V4L2
+ ---help---
+ This driver supports NOON010PC30 CIF camera from Siliconfile
+
config SOC_CAMERA
tristate "SoC camera support"
depends on VIDEO_V4L2 && HAS_DMA && I2C
@@ -967,7 +995,7 @@ if V4L_MEM2MEM_DRIVERS
config VIDEO_MEM2MEM_TESTDEV
tristate "Virtual test device for mem2mem framework"
depends on VIDEO_DEV && VIDEO_V4L2
- select VIDEOBUF_VMALLOC
+ select VIDEOBUF2_VMALLOC
select V4L2_MEM2MEM_DEV
default n
---help---
@@ -977,7 +1005,7 @@ config VIDEO_MEM2MEM_TESTDEV
config VIDEO_SAMSUNG_S5P_FIMC
tristate "Samsung S5P FIMC (video postprocessor) driver"
depends on VIDEO_DEV && VIDEO_V4L2 && PLAT_S5P
- select VIDEOBUF_DMA_CONTIG
+ select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
help
This is a v4l2 driver for the S5P camera interface
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index a509d317e258..251b7cac3f91 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -67,6 +67,7 @@ obj-$(CONFIG_VIDEO_TCM825X) += tcm825x.o
obj-$(CONFIG_VIDEO_TVEEPROM) += tveeprom.o
obj-$(CONFIG_VIDEO_MT9V011) += mt9v011.o
obj-$(CONFIG_VIDEO_SR030PC30) += sr030pc30.o
+obj-$(CONFIG_VIDEO_NOON010PC30) += noon010pc30.o
obj-$(CONFIG_SOC_CAMERA_IMX074) += imx074.o
obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o
@@ -111,6 +112,12 @@ obj-$(CONFIG_VIDEOBUF_VMALLOC) += videobuf-vmalloc.o
obj-$(CONFIG_VIDEOBUF_DVB) += videobuf-dvb.o
obj-$(CONFIG_VIDEO_BTCX) += btcx-risc.o
+obj-$(CONFIG_VIDEOBUF2_CORE) += videobuf2-core.o
+obj-$(CONFIG_VIDEOBUF2_MEMOPS) += videobuf2-memops.o
+obj-$(CONFIG_VIDEOBUF2_VMALLOC) += videobuf2-vmalloc.o
+obj-$(CONFIG_VIDEOBUF2_DMA_CONTIG) += videobuf2-dma-contig.o
+obj-$(CONFIG_VIDEOBUF2_DMA_SG) += videobuf2-dma-sg.o
+
obj-$(CONFIG_V4L2_MEM2MEM_DEV) += v4l2-mem2mem.o
obj-$(CONFIG_VIDEO_M32R_AR_M64278) += arv.o
diff --git a/drivers/media/video/adv7343.c b/drivers/media/video/adv7343.c
index 41b2930d0ce4..021fab23070d 100644
--- a/drivers/media/video/adv7343.c
+++ b/drivers/media/video/adv7343.c
@@ -29,6 +29,7 @@
#include <media/adv7343.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-ctrls.h>
#include "adv7343_regs.h"
@@ -41,15 +42,13 @@ MODULE_PARM_DESC(debug, "Debug level 0-1");
struct adv7343_state {
struct v4l2_subdev sd;
+ struct v4l2_ctrl_handler hdl;
u8 reg00;
u8 reg01;
u8 reg02;
u8 reg35;
u8 reg80;
u8 reg82;
- int bright;
- int hue;
- int gain;
u32 output;
v4l2_std_id std;
};
@@ -59,6 +58,11 @@ static inline struct adv7343_state *to_state(struct v4l2_subdev *sd)
return container_of(sd, struct adv7343_state, sd);
}
+static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct adv7343_state, hdl)->sd;
+}
+
static inline int adv7343_write(struct v4l2_subdev *sd, u8 reg, u8 value)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -268,111 +272,22 @@ static int adv7343_log_status(struct v4l2_subdev *sd)
return 0;
}
-static int adv7343_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
-{
- switch (qc->id) {
- case V4L2_CID_BRIGHTNESS:
- return v4l2_ctrl_query_fill(qc, ADV7343_BRIGHTNESS_MIN,
- ADV7343_BRIGHTNESS_MAX, 1,
- ADV7343_BRIGHTNESS_DEF);
- case V4L2_CID_HUE:
- return v4l2_ctrl_query_fill(qc, ADV7343_HUE_MIN,
- ADV7343_HUE_MAX, 1 ,
- ADV7343_HUE_DEF);
- case V4L2_CID_GAIN:
- return v4l2_ctrl_query_fill(qc, ADV7343_GAIN_MIN,
- ADV7343_GAIN_MAX, 1,
- ADV7343_GAIN_DEF);
- default:
- break;
- }
-
- return 0;
-}
-
-static int adv7343_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct adv7343_state *state = to_state(sd);
- int err = 0;
-
- switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- if (ctrl->value < ADV7343_BRIGHTNESS_MIN ||
- ctrl->value > ADV7343_BRIGHTNESS_MAX) {
- v4l2_dbg(1, debug, sd,
- "invalid brightness settings %d\n",
- ctrl->value);
- return -ERANGE;
- }
-
- state->bright = ctrl->value;
- err = adv7343_write(sd, ADV7343_SD_BRIGHTNESS_WSS,
- state->bright);
- break;
-
- case V4L2_CID_HUE:
- if (ctrl->value < ADV7343_HUE_MIN ||
- ctrl->value > ADV7343_HUE_MAX) {
- v4l2_dbg(1, debug, sd, "invalid hue settings %d\n",
- ctrl->value);
- return -ERANGE;
- }
-
- state->hue = ctrl->value;
- err = adv7343_write(sd, ADV7343_SD_HUE_REG, state->hue);
- break;
-
- case V4L2_CID_GAIN:
- if (ctrl->value < ADV7343_GAIN_MIN ||
- ctrl->value > ADV7343_GAIN_MAX) {
- v4l2_dbg(1, debug, sd, "invalid gain settings %d\n",
- ctrl->value);
- return -ERANGE;
- }
-
- if ((ctrl->value > POSITIVE_GAIN_MAX) &&
- (ctrl->value < NEGATIVE_GAIN_MIN)) {
- v4l2_dbg(1, debug, sd,
- "gain settings not within the specified range\n");
- return -ERANGE;
- }
-
- state->gain = ctrl->value;
- err = adv7343_write(sd, ADV7343_DAC2_OUTPUT_LEVEL, state->gain);
- break;
-
- default:
- return -EINVAL;
- }
-
- if (err < 0)
- v4l2_err(sd, "Failed to set the encoder controls\n");
-
- return err;
-}
-
-static int adv7343_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int adv7343_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct adv7343_state *state = to_state(sd);
+ struct v4l2_subdev *sd = to_sd(ctrl);
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
- ctrl->value = state->bright;
- break;
+ return adv7343_write(sd, ADV7343_SD_BRIGHTNESS_WSS,
+ ctrl->val);
case V4L2_CID_HUE:
- ctrl->value = state->hue;
- break;
+ return adv7343_write(sd, ADV7343_SD_HUE_REG, ctrl->val);
case V4L2_CID_GAIN:
- ctrl->value = state->gain;
- break;
-
- default:
- return -EINVAL;
+ return adv7343_write(sd, ADV7343_DAC2_OUTPUT_LEVEL, ctrl->val);
}
-
- return 0;
+ return -EINVAL;
}
static int adv7343_g_chip_ident(struct v4l2_subdev *sd,
@@ -383,12 +298,20 @@ static int adv7343_g_chip_ident(struct v4l2_subdev *sd,
return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_ADV7343, 0);
}
+static const struct v4l2_ctrl_ops adv7343_ctrl_ops = {
+ .s_ctrl = adv7343_s_ctrl,
+};
+
static const struct v4l2_subdev_core_ops adv7343_core_ops = {
- .log_status = adv7343_log_status,
- .g_chip_ident = adv7343_g_chip_ident,
- .g_ctrl = adv7343_g_ctrl,
- .s_ctrl = adv7343_s_ctrl,
- .queryctrl = adv7343_queryctrl,
+ .log_status = adv7343_log_status,
+ .g_chip_ident = adv7343_g_chip_ident,
+ .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
+ .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
+ .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
+ .g_ctrl = v4l2_subdev_g_ctrl,
+ .s_ctrl = v4l2_subdev_s_ctrl,
+ .queryctrl = v4l2_subdev_queryctrl,
+ .querymenu = v4l2_subdev_querymenu,
};
static int adv7343_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std)
@@ -468,6 +391,7 @@ static int adv7343_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct adv7343_state *state;
+ int err;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
@@ -490,15 +414,46 @@ static int adv7343_probe(struct i2c_client *client,
state->std = V4L2_STD_NTSC;
v4l2_i2c_subdev_init(&state->sd, client, &adv7343_ops);
- return adv7343_initialize(&state->sd);
+
+ v4l2_ctrl_handler_init(&state->hdl, 2);
+ v4l2_ctrl_new_std(&state->hdl, &adv7343_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, ADV7343_BRIGHTNESS_MIN,
+ ADV7343_BRIGHTNESS_MAX, 1,
+ ADV7343_BRIGHTNESS_DEF);
+ v4l2_ctrl_new_std(&state->hdl, &adv7343_ctrl_ops,
+ V4L2_CID_HUE, ADV7343_HUE_MIN,
+ ADV7343_HUE_MAX, 1,
+ ADV7343_HUE_DEF);
+ v4l2_ctrl_new_std(&state->hdl, &adv7343_ctrl_ops,
+ V4L2_CID_GAIN, ADV7343_GAIN_MIN,
+ ADV7343_GAIN_MAX, 1,
+ ADV7343_GAIN_DEF);
+ state->sd.ctrl_handler = &state->hdl;
+ if (state->hdl.error) {
+ int err = state->hdl.error;
+
+ v4l2_ctrl_handler_free(&state->hdl);
+ kfree(state);
+ return err;
+ }
+ v4l2_ctrl_handler_setup(&state->hdl);
+
+ err = adv7343_initialize(&state->sd);
+ if (err) {
+ v4l2_ctrl_handler_free(&state->hdl);
+ kfree(state);
+ }
+ return err;
}
static int adv7343_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct adv7343_state *state = to_state(sd);
v4l2_device_unregister_subdev(sd);
- kfree(to_state(sd));
+ v4l2_ctrl_handler_free(&state->hdl);
+ kfree(state);
return 0;
}
diff --git a/drivers/media/video/adv7343_regs.h b/drivers/media/video/adv7343_regs.h
index 3431045b33da..446606764346 100644
--- a/drivers/media/video/adv7343_regs.h
+++ b/drivers/media/video/adv7343_regs.h
@@ -102,10 +102,6 @@ struct adv7343_std_info {
/* Bit masks for DAC output levels */
#define DAC_OUTPUT_LEVEL_MASK (0xFF)
-#define POSITIVE_GAIN_MAX (0x40)
-#define POSITIVE_GAIN_MIN (0x00)
-#define NEGATIVE_GAIN_MAX (0xFF)
-#define NEGATIVE_GAIN_MIN (0xC0)
/* Bit masks for soft reset register */
#define SOFT_RESET (0x02)
@@ -178,8 +174,8 @@ struct adv7343_std_info {
#define ADV7343_HUE_MAX (255)
#define ADV7343_HUE_MIN (0)
#define ADV7343_HUE_DEF (127)
-#define ADV7343_GAIN_MAX (255)
-#define ADV7343_GAIN_MIN (0)
+#define ADV7343_GAIN_MAX (64)
+#define ADV7343_GAIN_MIN (-64)
#define ADV7343_GAIN_DEF (0)
#endif
diff --git a/drivers/media/video/bt819.c b/drivers/media/video/bt819.c
index c38300fc0b1d..f87204461cb4 100644
--- a/drivers/media/video/bt819.c
+++ b/drivers/media/video/bt819.c
@@ -37,6 +37,7 @@
#include <linux/slab.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-ctrls.h>
#include <media/bt819.h>
MODULE_DESCRIPTION("Brooktree-819 video decoder driver");
@@ -52,16 +53,13 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)");
struct bt819 {
struct v4l2_subdev sd;
+ struct v4l2_ctrl_handler hdl;
unsigned char reg[32];
v4l2_std_id norm;
int ident;
int input;
int enable;
- int bright;
- int contrast;
- int hue;
- int sat;
};
static inline struct bt819 *to_bt819(struct v4l2_subdev *sd)
@@ -69,6 +67,11 @@ static inline struct bt819 *to_bt819(struct v4l2_subdev *sd)
return container_of(sd, struct bt819, sd);
}
+static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct bt819, hdl)->sd;
+}
+
struct timing {
int hactive;
int hdelay;
@@ -333,71 +336,35 @@ static int bt819_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
}
-static int bt819_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
-{
- switch (qc->id) {
- case V4L2_CID_BRIGHTNESS:
- v4l2_ctrl_query_fill(qc, -128, 127, 1, 0);
- break;
-
- case V4L2_CID_CONTRAST:
- v4l2_ctrl_query_fill(qc, 0, 511, 1, 256);
- break;
-
- case V4L2_CID_SATURATION:
- v4l2_ctrl_query_fill(qc, 0, 511, 1, 256);
- break;
-
- case V4L2_CID_HUE:
- v4l2_ctrl_query_fill(qc, -128, 127, 1, 0);
- break;
-
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int bt819_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int bt819_s_ctrl(struct v4l2_ctrl *ctrl)
{
+ struct v4l2_subdev *sd = to_sd(ctrl);
struct bt819 *decoder = to_bt819(sd);
int temp;
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
- if (decoder->bright == ctrl->value)
- break;
- decoder->bright = ctrl->value;
- bt819_write(decoder, 0x0a, decoder->bright);
+ bt819_write(decoder, 0x0a, ctrl->val);
break;
case V4L2_CID_CONTRAST:
- if (decoder->contrast == ctrl->value)
- break;
- decoder->contrast = ctrl->value;
- bt819_write(decoder, 0x0c, decoder->contrast & 0xff);
- bt819_setbit(decoder, 0x0b, 2, ((decoder->contrast >> 8) & 0x01));
+ bt819_write(decoder, 0x0c, ctrl->val & 0xff);
+ bt819_setbit(decoder, 0x0b, 2, ((ctrl->val >> 8) & 0x01));
break;
case V4L2_CID_SATURATION:
- if (decoder->sat == ctrl->value)
- break;
- decoder->sat = ctrl->value;
- bt819_write(decoder, 0x0d, (decoder->sat >> 7) & 0xff);
- bt819_setbit(decoder, 0x0b, 1, ((decoder->sat >> 15) & 0x01));
+ bt819_write(decoder, 0x0d, (ctrl->val >> 7) & 0xff);
+ bt819_setbit(decoder, 0x0b, 1, ((ctrl->val >> 15) & 0x01));
/* Ratio between U gain and V gain must stay the same as
the ratio between the default U and V gain values. */
- temp = (decoder->sat * 180) / 254;
+ temp = (ctrl->val * 180) / 254;
bt819_write(decoder, 0x0e, (temp >> 7) & 0xff);
bt819_setbit(decoder, 0x0b, 0, (temp >> 15) & 0x01);
break;
case V4L2_CID_HUE:
- if (decoder->hue == ctrl->value)
- break;
- decoder->hue = ctrl->value;
- bt819_write(decoder, 0x0f, decoder->hue);
+ bt819_write(decoder, 0x0f, ctrl->val);
break;
default:
@@ -406,29 +373,6 @@ static int bt819_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
return 0;
}
-static int bt819_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct bt819 *decoder = to_bt819(sd);
-
- switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- ctrl->value = decoder->bright;
- break;
- case V4L2_CID_CONTRAST:
- ctrl->value = decoder->contrast;
- break;
- case V4L2_CID_SATURATION:
- ctrl->value = decoder->sat;
- break;
- case V4L2_CID_HUE:
- ctrl->value = decoder->hue;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
static int bt819_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip)
{
struct bt819 *decoder = to_bt819(sd);
@@ -439,11 +383,19 @@ static int bt819_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident
/* ----------------------------------------------------------------------- */
+static const struct v4l2_ctrl_ops bt819_ctrl_ops = {
+ .s_ctrl = bt819_s_ctrl,
+};
+
static const struct v4l2_subdev_core_ops bt819_core_ops = {
.g_chip_ident = bt819_g_chip_ident,
- .g_ctrl = bt819_g_ctrl,
- .s_ctrl = bt819_s_ctrl,
- .queryctrl = bt819_queryctrl,
+ .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
+ .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
+ .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
+ .g_ctrl = v4l2_subdev_g_ctrl,
+ .s_ctrl = v4l2_subdev_s_ctrl,
+ .queryctrl = v4l2_subdev_queryctrl,
+ .querymenu = v4l2_subdev_querymenu,
.s_std = bt819_s_std,
};
@@ -505,23 +457,40 @@ static int bt819_probe(struct i2c_client *client,
decoder->norm = V4L2_STD_NTSC;
decoder->input = 0;
decoder->enable = 1;
- decoder->bright = 0;
- decoder->contrast = 0xd8; /* 100% of original signal */
- decoder->hue = 0;
- decoder->sat = 0xfe; /* 100% of original signal */
i = bt819_init(sd);
if (i < 0)
v4l2_dbg(1, debug, sd, "init status %d\n", i);
+
+ v4l2_ctrl_handler_init(&decoder->hdl, 4);
+ v4l2_ctrl_new_std(&decoder->hdl, &bt819_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, -128, 127, 1, 0);
+ v4l2_ctrl_new_std(&decoder->hdl, &bt819_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 511, 1, 0xd8);
+ v4l2_ctrl_new_std(&decoder->hdl, &bt819_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 511, 1, 0xfe);
+ v4l2_ctrl_new_std(&decoder->hdl, &bt819_ctrl_ops,
+ V4L2_CID_HUE, -128, 127, 1, 0);
+ sd->ctrl_handler = &decoder->hdl;
+ if (decoder->hdl.error) {
+ int err = decoder->hdl.error;
+
+ v4l2_ctrl_handler_free(&decoder->hdl);
+ kfree(decoder);
+ return err;
+ }
+ v4l2_ctrl_handler_setup(&decoder->hdl);
return 0;
}
static int bt819_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct bt819 *decoder = to_bt819(sd);
v4l2_device_unregister_subdev(sd);
- kfree(to_bt819(sd));
+ v4l2_ctrl_handler_free(&decoder->hdl);
+ kfree(decoder);
return 0;
}
diff --git a/drivers/media/video/cs5345.c b/drivers/media/video/cs5345.c
index 9358fe77e562..5909f2557ab4 100644
--- a/drivers/media/video/cs5345.c
+++ b/drivers/media/video/cs5345.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-ctrls.h>
MODULE_DESCRIPTION("i2c device driver for cs5345 Audio ADC");
MODULE_AUTHOR("Hans Verkuil");
@@ -36,6 +37,20 @@ module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Debugging messages, 0=Off (default), 1=On");
+struct cs5345_state {
+ struct v4l2_subdev sd;
+ struct v4l2_ctrl_handler hdl;
+};
+
+static inline struct cs5345_state *to_state(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct cs5345_state, sd);
+}
+
+static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct cs5345_state, hdl)->sd;
+}
/* ----------------------------------------------------------------------- */
@@ -65,33 +80,20 @@ static int cs5345_s_routing(struct v4l2_subdev *sd,
return 0;
}
-static int cs5345_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int cs5345_s_ctrl(struct v4l2_ctrl *ctrl)
{
- if (ctrl->id == V4L2_CID_AUDIO_MUTE) {
- ctrl->value = (cs5345_read(sd, 0x04) & 0x08) != 0;
- return 0;
- }
- if (ctrl->id != V4L2_CID_AUDIO_VOLUME)
- return -EINVAL;
- ctrl->value = cs5345_read(sd, 0x07) & 0x3f;
- if (ctrl->value >= 32)
- ctrl->value = ctrl->value - 64;
- return 0;
-}
+ struct v4l2_subdev *sd = to_sd(ctrl);
-static int cs5345_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- if (ctrl->id == V4L2_CID_AUDIO_MUTE) {
- cs5345_write(sd, 0x04, ctrl->value ? 0x80 : 0);
+ switch (ctrl->id) {
+ case V4L2_CID_AUDIO_MUTE:
+ cs5345_write(sd, 0x04, ctrl->val ? 0x80 : 0);
+ return 0;
+ case V4L2_CID_AUDIO_VOLUME:
+ cs5345_write(sd, 0x07, ((u8)ctrl->val) & 0x3f);
+ cs5345_write(sd, 0x08, ((u8)ctrl->val) & 0x3f);
return 0;
}
- if (ctrl->id != V4L2_CID_AUDIO_VOLUME)
- return -EINVAL;
- if (ctrl->value > 24 || ctrl->value < -24)
- return -EINVAL;
- cs5345_write(sd, 0x07, ((u8)ctrl->value) & 0x3f);
- cs5345_write(sd, 0x08, ((u8)ctrl->value) & 0x3f);
- return 0;
+ return -EINVAL;
}
#ifdef CONFIG_VIDEO_ADV_DEBUG
@@ -144,11 +146,20 @@ static int cs5345_log_status(struct v4l2_subdev *sd)
/* ----------------------------------------------------------------------- */
+static const struct v4l2_ctrl_ops cs5345_ctrl_ops = {
+ .s_ctrl = cs5345_s_ctrl,
+};
+
static const struct v4l2_subdev_core_ops cs5345_core_ops = {
.log_status = cs5345_log_status,
.g_chip_ident = cs5345_g_chip_ident,
- .g_ctrl = cs5345_g_ctrl,
- .s_ctrl = cs5345_s_ctrl,
+ .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
+ .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
+ .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
+ .g_ctrl = v4l2_subdev_g_ctrl,
+ .s_ctrl = v4l2_subdev_s_ctrl,
+ .queryctrl = v4l2_subdev_queryctrl,
+ .querymenu = v4l2_subdev_querymenu,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = cs5345_g_register,
.s_register = cs5345_s_register,
@@ -169,6 +180,7 @@ static const struct v4l2_subdev_ops cs5345_ops = {
static int cs5345_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ struct cs5345_state *state;
struct v4l2_subdev *sd;
/* Check if the adapter supports the needed features */
@@ -178,11 +190,28 @@ static int cs5345_probe(struct i2c_client *client,
v4l_info(client, "chip found @ 0x%x (%s)\n",
client->addr << 1, client->adapter->name);
- sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL);
- if (sd == NULL)
+ state = kzalloc(sizeof(struct cs5345_state), GFP_KERNEL);
+ if (state == NULL)
return -ENOMEM;
+ sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &cs5345_ops);
+ v4l2_ctrl_handler_init(&state->hdl, 2);
+ v4l2_ctrl_new_std(&state->hdl, &cs5345_ctrl_ops,
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&state->hdl, &cs5345_ctrl_ops,
+ V4L2_CID_AUDIO_VOLUME, -24, 24, 1, 0);
+ sd->ctrl_handler = &state->hdl;
+ if (state->hdl.error) {
+ int err = state->hdl.error;
+
+ v4l2_ctrl_handler_free(&state->hdl);
+ kfree(state);
+ return err;
+ }
+ /* set volume/mute */
+ v4l2_ctrl_handler_setup(&state->hdl);
+
cs5345_write(sd, 0x02, 0x00);
cs5345_write(sd, 0x04, 0x01);
cs5345_write(sd, 0x09, 0x01);
@@ -194,9 +223,11 @@ static int cs5345_probe(struct i2c_client *client,
static int cs5345_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct cs5345_state *state = to_state(sd);
v4l2_device_unregister_subdev(sd);
- kfree(sd);
+ v4l2_ctrl_handler_free(&state->hdl);
+ kfree(state);
return 0;
}
diff --git a/drivers/media/video/cx18/cx18-av-audio.c b/drivers/media/video/cx18/cx18-av-audio.c
index 43d09a24b262..4a24ffb17a7d 100644
--- a/drivers/media/video/cx18/cx18-av-audio.c
+++ b/drivers/media/video/cx18/cx18-av-audio.c
@@ -342,17 +342,6 @@ void cx18_av_audio_set_path(struct cx18 *cx)
}
}
-static int get_volume(struct cx18 *cx)
-{
- /* Volume runs +18dB to -96dB in 1/2dB steps
- * change to fit the msp3400 -114dB to +12dB range */
-
- /* check PATH1_VOLUME */
- int vol = 228 - cx18_av_read(cx, 0x8d4);
- vol = (vol / 2) + 23;
- return vol << 9;
-}
-
static void set_volume(struct cx18 *cx, int volume)
{
/* First convert the volume to msp3400 values (0-127) */
@@ -369,52 +358,18 @@ static void set_volume(struct cx18 *cx, int volume)
cx18_av_write(cx, 0x8d4, 228 - (vol * 2));
}
-static int get_bass(struct cx18 *cx)
-{
- /* bass is 49 steps +12dB to -12dB */
-
- /* check PATH1_EQ_BASS_VOL */
- int bass = cx18_av_read(cx, 0x8d9) & 0x3f;
- bass = (((48 - bass) * 0xffff) + 47) / 48;
- return bass;
-}
-
static void set_bass(struct cx18 *cx, int bass)
{
/* PATH1_EQ_BASS_VOL */
cx18_av_and_or(cx, 0x8d9, ~0x3f, 48 - (bass * 48 / 0xffff));
}
-static int get_treble(struct cx18 *cx)
-{
- /* treble is 49 steps +12dB to -12dB */
-
- /* check PATH1_EQ_TREBLE_VOL */
- int treble = cx18_av_read(cx, 0x8db) & 0x3f;
- treble = (((48 - treble) * 0xffff) + 47) / 48;
- return treble;
-}
-
static void set_treble(struct cx18 *cx, int treble)
{
/* PATH1_EQ_TREBLE_VOL */
cx18_av_and_or(cx, 0x8db, ~0x3f, 48 - (treble * 48 / 0xffff));
}
-static int get_balance(struct cx18 *cx)
-{
- /* balance is 7 bit, 0 to -96dB */
-
- /* check PATH1_BAL_LEVEL */
- int balance = cx18_av_read(cx, 0x8d5) & 0x7f;
- /* check PATH1_BAL_LEFT */
- if ((cx18_av_read(cx, 0x8d5) & 0x80) == 0)
- balance = 0x80 - balance;
- else
- balance = 0x80 + balance;
- return balance << 8;
-}
-
static void set_balance(struct cx18 *cx, int balance)
{
int bal = balance >> 8;
@@ -431,12 +386,6 @@ static void set_balance(struct cx18 *cx, int balance)
}
}
-static int get_mute(struct cx18 *cx)
-{
- /* check SRC1_MUTE_EN */
- return cx18_av_read(cx, 0x8d3) & 0x2 ? 1 : 0;
-}
-
static void set_mute(struct cx18 *cx, int mute)
{
struct cx18_av_state *state = &cx->av_state;
@@ -490,50 +439,33 @@ int cx18_av_s_clock_freq(struct v4l2_subdev *sd, u32 freq)
return retval;
}
-int cx18_av_audio_g_ctrl(struct cx18 *cx, struct v4l2_control *ctrl)
+static int cx18_av_audio_s_ctrl(struct v4l2_ctrl *ctrl)
{
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_VOLUME:
- ctrl->value = get_volume(cx);
- break;
- case V4L2_CID_AUDIO_BASS:
- ctrl->value = get_bass(cx);
- break;
- case V4L2_CID_AUDIO_TREBLE:
- ctrl->value = get_treble(cx);
- break;
- case V4L2_CID_AUDIO_BALANCE:
- ctrl->value = get_balance(cx);
- break;
- case V4L2_CID_AUDIO_MUTE:
- ctrl->value = get_mute(cx);
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
+ struct v4l2_subdev *sd = to_sd(ctrl);
+ struct cx18 *cx = v4l2_get_subdevdata(sd);
-int cx18_av_audio_s_ctrl(struct cx18 *cx, struct v4l2_control *ctrl)
-{
switch (ctrl->id) {
case V4L2_CID_AUDIO_VOLUME:
- set_volume(cx, ctrl->value);
+ set_volume(cx, ctrl->val);
break;
case V4L2_CID_AUDIO_BASS:
- set_bass(cx, ctrl->value);
+ set_bass(cx, ctrl->val);
break;
case V4L2_CID_AUDIO_TREBLE:
- set_treble(cx, ctrl->value);
+ set_treble(cx, ctrl->val);
break;
case V4L2_CID_AUDIO_BALANCE:
- set_balance(cx, ctrl->value);
+ set_balance(cx, ctrl->val);
break;
case V4L2_CID_AUDIO_MUTE:
- set_mute(cx, ctrl->value);
+ set_mute(cx, ctrl->val);
break;
default:
return -EINVAL;
}
return 0;
}
+
+const struct v4l2_ctrl_ops cx18_av_audio_ctrl_ops = {
+ .s_ctrl = cx18_av_audio_s_ctrl,
+};
diff --git a/drivers/media/video/cx18/cx18-av-core.c b/drivers/media/video/cx18/cx18-av-core.c
index a41951cab276..f164b7f610a5 100644
--- a/drivers/media/video/cx18/cx18-av-core.c
+++ b/drivers/media/video/cx18/cx18-av-core.c
@@ -129,6 +129,7 @@ static void cx18_av_initialize(struct v4l2_subdev *sd)
{
struct cx18_av_state *state = to_cx18_av_state(sd);
struct cx18 *cx = v4l2_get_subdevdata(sd);
+ int default_volume;
u32 v;
cx18_av_loadfw(cx);
@@ -247,8 +248,23 @@ static void cx18_av_initialize(struct v4l2_subdev *sd)
/* CxDevWrReg(CXADEC_SRC_COMB_CFG, 0x6628021F); */
/* } */
cx18_av_write4(cx, CXADEC_SRC_COMB_CFG, 0x6628021F);
- state->default_volume = 228 - cx18_av_read(cx, 0x8d4);
- state->default_volume = ((state->default_volume / 2) + 23) << 9;
+ default_volume = cx18_av_read(cx, 0x8d4);
+ /*
+ * Enforce the legacy volume scale mapping limits to avoid
+ * -ERANGE errors when initializing the volume control
+ */
+ if (default_volume > 228) {
+ /* Bottom out at -96 dB, v4l2 vol range 0x2e00-0x2fff */
+ default_volume = 228;
+ cx18_av_write(cx, 0x8d4, 228);
+ } else if (default_volume < 20) {
+ /* Top out at + 8 dB, v4l2 vol range 0xfe00-0xffff */
+ default_volume = 20;
+ cx18_av_write(cx, 0x8d4, 20);
+ }
+ default_volume = (((228 - default_volume) >> 1) + 23) << 9;
+ state->volume->cur.val = state->volume->default_value = default_volume;
+ v4l2_ctrl_handler_setup(&state->hdl);
}
static int cx18_av_reset(struct v4l2_subdev *sd, u32 val)
@@ -901,126 +917,35 @@ static int cx18_av_s_radio(struct v4l2_subdev *sd)
return 0;
}
-static int cx18_av_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int cx18_av_s_ctrl(struct v4l2_ctrl *ctrl)
{
+ struct v4l2_subdev *sd = to_sd(ctrl);
struct cx18 *cx = v4l2_get_subdevdata(sd);
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
- if (ctrl->value < 0 || ctrl->value > 255) {
- CX18_ERR_DEV(sd, "invalid brightness setting %d\n",
- ctrl->value);
- return -ERANGE;
- }
-
- cx18_av_write(cx, 0x414, ctrl->value - 128);
+ cx18_av_write(cx, 0x414, ctrl->val - 128);
break;
case V4L2_CID_CONTRAST:
- if (ctrl->value < 0 || ctrl->value > 127) {
- CX18_ERR_DEV(sd, "invalid contrast setting %d\n",
- ctrl->value);
- return -ERANGE;
- }
-
- cx18_av_write(cx, 0x415, ctrl->value << 1);
+ cx18_av_write(cx, 0x415, ctrl->val << 1);
break;
case V4L2_CID_SATURATION:
- if (ctrl->value < 0 || ctrl->value > 127) {
- CX18_ERR_DEV(sd, "invalid saturation setting %d\n",
- ctrl->value);
- return -ERANGE;
- }
-
- cx18_av_write(cx, 0x420, ctrl->value << 1);
- cx18_av_write(cx, 0x421, ctrl->value << 1);
+ cx18_av_write(cx, 0x420, ctrl->val << 1);
+ cx18_av_write(cx, 0x421, ctrl->val << 1);
break;
case V4L2_CID_HUE:
- if (ctrl->value < -128 || ctrl->value > 127) {
- CX18_ERR_DEV(sd, "invalid hue setting %d\n",
- ctrl->value);
- return -ERANGE;
- }
-
- cx18_av_write(cx, 0x422, ctrl->value);
+ cx18_av_write(cx, 0x422, ctrl->val);
break;
- case V4L2_CID_AUDIO_VOLUME:
- case V4L2_CID_AUDIO_BASS:
- case V4L2_CID_AUDIO_TREBLE:
- case V4L2_CID_AUDIO_BALANCE:
- case V4L2_CID_AUDIO_MUTE:
- return cx18_av_audio_s_ctrl(cx, ctrl);
-
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int cx18_av_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct cx18 *cx = v4l2_get_subdevdata(sd);
-
- switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- ctrl->value = (s8)cx18_av_read(cx, 0x414) + 128;
- break;
- case V4L2_CID_CONTRAST:
- ctrl->value = cx18_av_read(cx, 0x415) >> 1;
- break;
- case V4L2_CID_SATURATION:
- ctrl->value = cx18_av_read(cx, 0x420) >> 1;
- break;
- case V4L2_CID_HUE:
- ctrl->value = (s8)cx18_av_read(cx, 0x422);
- break;
- case V4L2_CID_AUDIO_VOLUME:
- case V4L2_CID_AUDIO_BASS:
- case V4L2_CID_AUDIO_TREBLE:
- case V4L2_CID_AUDIO_BALANCE:
- case V4L2_CID_AUDIO_MUTE:
- return cx18_av_audio_g_ctrl(cx, ctrl);
default:
return -EINVAL;
}
return 0;
}
-static int cx18_av_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
-{
- struct cx18_av_state *state = to_cx18_av_state(sd);
-
- switch (qc->id) {
- case V4L2_CID_BRIGHTNESS:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 128);
- case V4L2_CID_CONTRAST:
- case V4L2_CID_SATURATION:
- return v4l2_ctrl_query_fill(qc, 0, 127, 1, 64);
- case V4L2_CID_HUE:
- return v4l2_ctrl_query_fill(qc, -128, 127, 1, 0);
- default:
- break;
- }
-
- switch (qc->id) {
- case V4L2_CID_AUDIO_VOLUME:
- return v4l2_ctrl_query_fill(qc, 0, 65535,
- 65535 / 100, state->default_volume);
- case V4L2_CID_AUDIO_MUTE:
- return v4l2_ctrl_query_fill(qc, 0, 1, 1, 0);
- case V4L2_CID_AUDIO_BALANCE:
- case V4L2_CID_AUDIO_BASS:
- case V4L2_CID_AUDIO_TREBLE:
- return v4l2_ctrl_query_fill(qc, 0, 65535, 65535 / 100, 32768);
- default:
- return -EINVAL;
- }
- return -EINVAL;
-}
-
static int cx18_av_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *fmt)
{
struct cx18_av_state *state = to_cx18_av_state(sd);
@@ -1356,14 +1281,22 @@ static int cx18_av_s_register(struct v4l2_subdev *sd,
}
#endif
+static const struct v4l2_ctrl_ops cx18_av_ctrl_ops = {
+ .s_ctrl = cx18_av_s_ctrl,
+};
+
static const struct v4l2_subdev_core_ops cx18_av_general_ops = {
.g_chip_ident = cx18_av_g_chip_ident,
.log_status = cx18_av_log_status,
.load_fw = cx18_av_load_fw,
.reset = cx18_av_reset,
- .queryctrl = cx18_av_queryctrl,
- .g_ctrl = cx18_av_g_ctrl,
- .s_ctrl = cx18_av_s_ctrl,
+ .g_ctrl = v4l2_subdev_g_ctrl,
+ .s_ctrl = v4l2_subdev_s_ctrl,
+ .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
+ .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
+ .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
+ .queryctrl = v4l2_subdev_queryctrl,
+ .querymenu = v4l2_subdev_querymenu,
.s_std = cx18_av_s_std,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = cx18_av_g_register,
@@ -1427,8 +1360,42 @@ int cx18_av_probe(struct cx18 *cx)
snprintf(sd->name, sizeof(sd->name),
"%s %03x", cx->v4l2_dev.name, (state->rev >> 4));
sd->grp_id = CX18_HW_418_AV;
+ v4l2_ctrl_handler_init(&state->hdl, 9);
+ v4l2_ctrl_new_std(&state->hdl, &cx18_av_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
+ v4l2_ctrl_new_std(&state->hdl, &cx18_av_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 127, 1, 64);
+ v4l2_ctrl_new_std(&state->hdl, &cx18_av_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 127, 1, 64);
+ v4l2_ctrl_new_std(&state->hdl, &cx18_av_ctrl_ops,
+ V4L2_CID_HUE, -128, 127, 1, 0);
+
+ state->volume = v4l2_ctrl_new_std(&state->hdl,
+ &cx18_av_audio_ctrl_ops, V4L2_CID_AUDIO_VOLUME,
+ 0, 65535, 65535 / 100, 0);
+ v4l2_ctrl_new_std(&state->hdl,
+ &cx18_av_audio_ctrl_ops, V4L2_CID_AUDIO_MUTE,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&state->hdl, &cx18_av_audio_ctrl_ops,
+ V4L2_CID_AUDIO_BALANCE,
+ 0, 65535, 65535 / 100, 32768);
+ v4l2_ctrl_new_std(&state->hdl, &cx18_av_audio_ctrl_ops,
+ V4L2_CID_AUDIO_BASS,
+ 0, 65535, 65535 / 100, 32768);
+ v4l2_ctrl_new_std(&state->hdl, &cx18_av_audio_ctrl_ops,
+ V4L2_CID_AUDIO_TREBLE,
+ 0, 65535, 65535 / 100, 32768);
+ sd->ctrl_handler = &state->hdl;
+ if (state->hdl.error) {
+ int err = state->hdl.error;
+
+ v4l2_ctrl_handler_free(&state->hdl);
+ return err;
+ }
err = v4l2_device_register_subdev(&cx->v4l2_dev, sd);
- if (!err)
+ if (err)
+ v4l2_ctrl_handler_free(&state->hdl);
+ else
cx18_av_init(cx);
return err;
}
diff --git a/drivers/media/video/cx18/cx18-av-core.h b/drivers/media/video/cx18/cx18-av-core.h
index 1956991795e3..188c9c3d2db1 100644
--- a/drivers/media/video/cx18/cx18-av-core.h
+++ b/drivers/media/video/cx18/cx18-av-core.h
@@ -26,6 +26,7 @@
#define _CX18_AV_CORE_H_
#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
struct cx18;
@@ -95,13 +96,14 @@ enum cx18_av_audio_input {
struct cx18_av_state {
struct v4l2_subdev sd;
+ struct v4l2_ctrl_handler hdl;
+ struct v4l2_ctrl *volume;
int radio;
v4l2_std_id std;
enum cx18_av_video_input vid_input;
enum cx18_av_audio_input aud_input;
u32 audclk_freq;
int audmode;
- int default_volume;
u32 id;
u32 rev;
int is_initialized;
@@ -347,6 +349,11 @@ static inline struct cx18_av_state *to_cx18_av_state(struct v4l2_subdev *sd)
return container_of(sd, struct cx18_av_state, sd);
}
+static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct cx18_av_state, hdl)->sd;
+}
+
/* ----------------------------------------------------------------------- */
/* cx18_av-core.c */
int cx18_av_write(struct cx18 *cx, u16 addr, u8 value);
@@ -369,10 +376,9 @@ int cx18_av_loadfw(struct cx18 *cx);
/* ----------------------------------------------------------------------- */
/* cx18_av-audio.c */
-int cx18_av_audio_g_ctrl(struct cx18 *cx, struct v4l2_control *ctrl);
-int cx18_av_audio_s_ctrl(struct cx18 *cx, struct v4l2_control *ctrl);
int cx18_av_s_clock_freq(struct v4l2_subdev *sd, u32 freq);
void cx18_av_audio_set_path(struct cx18 *cx);
+extern const struct v4l2_ctrl_ops cx18_av_audio_ctrl_ops;
/* ----------------------------------------------------------------------- */
/* cx18_av-vbi.c */
diff --git a/drivers/media/video/cx18/cx18-controls.c b/drivers/media/video/cx18/cx18-controls.c
index 97d7b7e100a3..282a3d29fdaa 100644
--- a/drivers/media/video/cx18/cx18-controls.c
+++ b/drivers/media/video/cx18/cx18-controls.c
@@ -30,152 +30,11 @@
#include "cx18-mailbox.h"
#include "cx18-controls.h"
-/* Must be sorted from low to high control ID! */
-static const u32 user_ctrls[] = {
- V4L2_CID_USER_CLASS,
- V4L2_CID_BRIGHTNESS,
- V4L2_CID_CONTRAST,
- V4L2_CID_SATURATION,
- V4L2_CID_HUE,
- V4L2_CID_AUDIO_VOLUME,
- V4L2_CID_AUDIO_BALANCE,
- V4L2_CID_AUDIO_BASS,
- V4L2_CID_AUDIO_TREBLE,
- V4L2_CID_AUDIO_MUTE,
- V4L2_CID_AUDIO_LOUDNESS,
- 0
-};
-
-static const u32 *ctrl_classes[] = {
- user_ctrls,
- cx2341x_mpeg_ctrls,
- NULL
-};
-
-int cx18_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *qctrl)
-{
- struct cx18 *cx = ((struct cx18_open_id *)fh)->cx;
- const char *name;
-
- qctrl->id = v4l2_ctrl_next(ctrl_classes, qctrl->id);
- if (qctrl->id == 0)
- return -EINVAL;
-
- switch (qctrl->id) {
- /* Standard V4L2 controls */
- case V4L2_CID_USER_CLASS:
- return v4l2_ctrl_query_fill(qctrl, 0, 0, 0, 0);
- case V4L2_CID_BRIGHTNESS:
- case V4L2_CID_HUE:
- case V4L2_CID_SATURATION:
- case V4L2_CID_CONTRAST:
- if (v4l2_subdev_call(cx->sd_av, core, queryctrl, qctrl))
- qctrl->flags |= V4L2_CTRL_FLAG_DISABLED;
- return 0;
-
- case V4L2_CID_AUDIO_VOLUME:
- case V4L2_CID_AUDIO_MUTE:
- case V4L2_CID_AUDIO_BALANCE:
- case V4L2_CID_AUDIO_BASS:
- case V4L2_CID_AUDIO_TREBLE:
- case V4L2_CID_AUDIO_LOUDNESS:
- if (v4l2_subdev_call(cx->sd_av, core, queryctrl, qctrl))
- qctrl->flags |= V4L2_CTRL_FLAG_DISABLED;
- return 0;
-
- default:
- if (cx2341x_ctrl_query(&cx->params, qctrl))
- qctrl->flags |= V4L2_CTRL_FLAG_DISABLED;
- return 0;
- }
- strncpy(qctrl->name, name, sizeof(qctrl->name) - 1);
- qctrl->name[sizeof(qctrl->name) - 1] = 0;
- return 0;
-}
-
-int cx18_querymenu(struct file *file, void *fh, struct v4l2_querymenu *qmenu)
+static int cx18_s_stream_vbi_fmt(struct cx2341x_handler *cxhdl, u32 fmt)
{
- struct cx18 *cx = ((struct cx18_open_id *)fh)->cx;
- struct v4l2_queryctrl qctrl;
-
- qctrl.id = qmenu->id;
- cx18_queryctrl(file, fh, &qctrl);
- return v4l2_ctrl_query_menu(qmenu, &qctrl,
- cx2341x_ctrl_get_menu(&cx->params, qmenu->id));
-}
-
-static int cx18_try_ctrl(struct file *file, void *fh,
- struct v4l2_ext_control *vctrl)
-{
- struct v4l2_queryctrl qctrl;
- const char * const *menu_items = NULL;
- int err;
-
- qctrl.id = vctrl->id;
- err = cx18_queryctrl(file, fh, &qctrl);
- if (err)
- return err;
- if (qctrl.type == V4L2_CTRL_TYPE_MENU)
- menu_items = v4l2_ctrl_get_menu(qctrl.id);
- return v4l2_ctrl_check(vctrl, &qctrl, menu_items);
-}
-
-static int cx18_s_ctrl(struct cx18 *cx, struct v4l2_control *vctrl)
-{
- switch (vctrl->id) {
- /* Standard V4L2 controls */
- case V4L2_CID_BRIGHTNESS:
- case V4L2_CID_HUE:
- case V4L2_CID_SATURATION:
- case V4L2_CID_CONTRAST:
- return v4l2_subdev_call(cx->sd_av, core, s_ctrl, vctrl);
-
- case V4L2_CID_AUDIO_VOLUME:
- case V4L2_CID_AUDIO_MUTE:
- case V4L2_CID_AUDIO_BALANCE:
- case V4L2_CID_AUDIO_BASS:
- case V4L2_CID_AUDIO_TREBLE:
- case V4L2_CID_AUDIO_LOUDNESS:
- return v4l2_subdev_call(cx->sd_av, core, s_ctrl, vctrl);
-
- default:
- CX18_DEBUG_IOCTL("invalid control 0x%x\n", vctrl->id);
- return -EINVAL;
- }
- return 0;
-}
-
-static int cx18_g_ctrl(struct cx18 *cx, struct v4l2_control *vctrl)
-{
- switch (vctrl->id) {
- /* Standard V4L2 controls */
- case V4L2_CID_BRIGHTNESS:
- case V4L2_CID_HUE:
- case V4L2_CID_SATURATION:
- case V4L2_CID_CONTRAST:
- return v4l2_subdev_call(cx->sd_av, core, g_ctrl, vctrl);
-
- case V4L2_CID_AUDIO_VOLUME:
- case V4L2_CID_AUDIO_MUTE:
- case V4L2_CID_AUDIO_BALANCE:
- case V4L2_CID_AUDIO_BASS:
- case V4L2_CID_AUDIO_TREBLE:
- case V4L2_CID_AUDIO_LOUDNESS:
- return v4l2_subdev_call(cx->sd_av, core, g_ctrl, vctrl);
+ struct cx18 *cx = container_of(cxhdl, struct cx18, cxhdl);
+ int type = cxhdl->stream_type->val;
- default:
- CX18_DEBUG_IOCTL("invalid control 0x%x\n", vctrl->id);
- return -EINVAL;
- }
- return 0;
-}
-
-static int cx18_setup_vbi_fmt(struct cx18 *cx,
- enum v4l2_mpeg_stream_vbi_fmt fmt,
- enum v4l2_mpeg_stream_type type)
-{
- if (!(cx->v4l2_cap & V4L2_CAP_SLICED_VBI_CAPTURE))
- return -EINVAL;
if (atomic_read(&cx->ana_capturing) > 0)
return -EBUSY;
@@ -230,121 +89,43 @@ static int cx18_setup_vbi_fmt(struct cx18 *cx,
return 0;
}
-int cx18_g_ext_ctrls(struct file *file, void *fh, struct v4l2_ext_controls *c)
+static int cx18_s_video_encoding(struct cx2341x_handler *cxhdl, u32 val)
{
- struct cx18 *cx = ((struct cx18_open_id *)fh)->cx;
- struct v4l2_control ctrl;
-
- if (c->ctrl_class == V4L2_CTRL_CLASS_USER) {
- int i;
- int err = 0;
-
- for (i = 0; i < c->count; i++) {
- ctrl.id = c->controls[i].id;
- ctrl.value = c->controls[i].value;
- err = cx18_g_ctrl(cx, &ctrl);
- c->controls[i].value = ctrl.value;
- if (err) {
- c->error_idx = i;
- break;
- }
- }
- return err;
- }
- if (c->ctrl_class == V4L2_CTRL_CLASS_MPEG)
- return cx2341x_ext_ctrls(&cx->params, 0, c, VIDIOC_G_EXT_CTRLS);
- return -EINVAL;
+ struct cx18 *cx = container_of(cxhdl, struct cx18, cxhdl);
+ int is_mpeg1 = val == V4L2_MPEG_VIDEO_ENCODING_MPEG_1;
+ struct v4l2_mbus_framefmt fmt;
+
+ /* fix videodecoder resolution */
+ fmt.width = cxhdl->width / (is_mpeg1 ? 2 : 1);
+ fmt.height = cxhdl->height;
+ fmt.code = V4L2_MBUS_FMT_FIXED;
+ v4l2_subdev_call(cx->sd_av, video, s_mbus_fmt, &fmt);
+ return 0;
}
-int cx18_s_ext_ctrls(struct file *file, void *fh, struct v4l2_ext_controls *c)
+static int cx18_s_audio_sampling_freq(struct cx2341x_handler *cxhdl, u32 idx)
{
- struct cx18_open_id *id = fh;
- struct cx18 *cx = id->cx;
- int ret;
- struct v4l2_control ctrl;
-
- ret = v4l2_prio_check(&cx->prio, id->prio);
- if (ret)
- return ret;
-
- if (c->ctrl_class == V4L2_CTRL_CLASS_USER) {
- int i;
- int err = 0;
-
- for (i = 0; i < c->count; i++) {
- ctrl.id = c->controls[i].id;
- ctrl.value = c->controls[i].value;
- err = cx18_s_ctrl(cx, &ctrl);
- c->controls[i].value = ctrl.value;
- if (err) {
- c->error_idx = i;
- break;
- }
- }
- return err;
- }
- if (c->ctrl_class == V4L2_CTRL_CLASS_MPEG) {
- static u32 freqs[3] = { 44100, 48000, 32000 };
- struct cx18_api_func_private priv;
- struct cx2341x_mpeg_params p = cx->params;
- int err = cx2341x_ext_ctrls(&p, atomic_read(&cx->ana_capturing),
- c, VIDIOC_S_EXT_CTRLS);
- unsigned int idx;
-
- if (err)
- return err;
+ static const u32 freqs[3] = { 44100, 48000, 32000 };
+ struct cx18 *cx = container_of(cxhdl, struct cx18, cxhdl);
- if (p.video_encoding != cx->params.video_encoding) {
- int is_mpeg1 = p.video_encoding ==
- V4L2_MPEG_VIDEO_ENCODING_MPEG_1;
- struct v4l2_mbus_framefmt fmt;
-
- /* fix videodecoder resolution */
- fmt.width = cx->params.width / (is_mpeg1 ? 2 : 1);
- fmt.height = cx->params.height;
- fmt.code = V4L2_MBUS_FMT_FIXED;
- v4l2_subdev_call(cx->sd_av, video, s_mbus_fmt, &fmt);
- }
- priv.cx = cx;
- priv.s = &cx->streams[id->type];
- err = cx2341x_update(&priv, cx18_api_func, &cx->params, &p);
- if (!err &&
- (cx->params.stream_vbi_fmt != p.stream_vbi_fmt ||
- cx->params.stream_type != p.stream_type))
- err = cx18_setup_vbi_fmt(cx, p.stream_vbi_fmt,
- p.stream_type);
- cx->params = p;
- cx->dualwatch_stereo_mode = p.audio_properties & 0x0300;
- idx = p.audio_properties & 0x03;
- /* The audio clock of the digitizer must match the codec sample
- rate otherwise you get some very strange effects. */
- if (idx < ARRAY_SIZE(freqs))
- cx18_call_all(cx, audio, s_clock_freq, freqs[idx]);
- return err;
- }
- return -EINVAL;
+ /* The audio clock of the digitizer must match the codec sample
+ rate otherwise you get some very strange effects. */
+ if (idx < ARRAY_SIZE(freqs))
+ cx18_call_all(cx, audio, s_clock_freq, freqs[idx]);
+ return 0;
}
-int cx18_try_ext_ctrls(struct file *file, void *fh, struct v4l2_ext_controls *c)
+static int cx18_s_audio_mode(struct cx2341x_handler *cxhdl, u32 val)
{
- struct cx18 *cx = ((struct cx18_open_id *)fh)->cx;
+ struct cx18 *cx = container_of(cxhdl, struct cx18, cxhdl);
- if (c->ctrl_class == V4L2_CTRL_CLASS_USER) {
- int i;
- int err = 0;
-
- for (i = 0; i < c->count; i++) {
- err = cx18_try_ctrl(file, fh, &c->controls[i]);
- if (err) {
- c->error_idx = i;
- break;
- }
- }
- return err;
- }
- if (c->ctrl_class == V4L2_CTRL_CLASS_MPEG)
- return cx2341x_ext_ctrls(&cx->params,
- atomic_read(&cx->ana_capturing),
- c, VIDIOC_TRY_EXT_CTRLS);
- return -EINVAL;
+ cx->dualwatch_stereo_mode = val;
+ return 0;
}
+
+struct cx2341x_handler_ops cx18_cxhdl_ops = {
+ .s_audio_mode = cx18_s_audio_mode,
+ .s_audio_sampling_freq = cx18_s_audio_sampling_freq,
+ .s_video_encoding = cx18_s_video_encoding,
+ .s_stream_vbi_fmt = cx18_s_stream_vbi_fmt,
+};
diff --git a/drivers/media/video/cx18/cx18-controls.h b/drivers/media/video/cx18/cx18-controls.h
index e46323700b81..cb5dfc7b2054 100644
--- a/drivers/media/video/cx18/cx18-controls.h
+++ b/drivers/media/video/cx18/cx18-controls.h
@@ -21,9 +21,4 @@
* 02111-1307 USA
*/
-int cx18_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *a);
-int cx18_g_ext_ctrls(struct file *file, void *fh, struct v4l2_ext_controls *a);
-int cx18_s_ext_ctrls(struct file *file, void *fh, struct v4l2_ext_controls *a);
-int cx18_try_ext_ctrls(struct file *file, void *fh,
- struct v4l2_ext_controls *a);
-int cx18_querymenu(struct file *file, void *fh, struct v4l2_querymenu *a);
+extern struct cx2341x_handler_ops cx18_cxhdl_ops;
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
index 944af8adbe0c..869690bcb767 100644
--- a/drivers/media/video/cx18/cx18-driver.c
+++ b/drivers/media/video/cx18/cx18-driver.c
@@ -36,6 +36,7 @@
#include "cx18-scb.h"
#include "cx18-mailbox.h"
#include "cx18-ioctl.h"
+#include "cx18-controls.h"
#include "tuner-xc2028.h"
#include <media/tveeprom.h>
@@ -708,15 +709,21 @@ static int __devinit cx18_init_struct1(struct cx18 *cx)
cx->open_id = 1;
/* Initial settings */
- cx2341x_fill_defaults(&cx->params);
- cx->temporal_strength = cx->params.video_temporal_filter;
- cx->spatial_strength = cx->params.video_spatial_filter;
- cx->filter_mode = cx->params.video_spatial_filter_mode |
- (cx->params.video_temporal_filter_mode << 1) |
- (cx->params.video_median_filter_type << 2);
- cx->params.port = CX2341X_PORT_MEMORY;
- cx->params.capabilities =
- CX2341X_CAP_HAS_TS | CX2341X_CAP_HAS_SLICED_VBI;
+ cx->cxhdl.port = CX2341X_PORT_MEMORY;
+ cx->cxhdl.capabilities = CX2341X_CAP_HAS_TS | CX2341X_CAP_HAS_SLICED_VBI;
+ cx->cxhdl.ops = &cx18_cxhdl_ops;
+ cx->cxhdl.func = cx18_api_func;
+ ret = cx2341x_handler_init(&cx->cxhdl, 50);
+ if (ret)
+ return ret;
+ cx->v4l2_dev.ctrl_handler = &cx->cxhdl.hdl;
+
+ cx->temporal_strength = cx->cxhdl.video_temporal_filter->cur.val;
+ cx->spatial_strength = cx->cxhdl.video_spatial_filter->cur.val;
+ cx->filter_mode = cx->cxhdl.video_spatial_filter_mode->cur.val |
+ (cx->cxhdl.video_temporal_filter_mode->cur.val << 1) |
+ (cx->cxhdl.video_median_filter_type->cur.val << 2);
+
init_waitqueue_head(&cx->cap_w);
init_waitqueue_head(&cx->mb_apu_waitq);
init_waitqueue_head(&cx->mb_cpu_waitq);
@@ -1028,7 +1035,7 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
else
cx->is_50hz = 1;
- cx->params.video_gop_size = cx->is_60hz ? 15 : 12;
+ cx2341x_handler_set_50hz(&cx->cxhdl, !cx->is_60hz);
if (cx->options.radio > 0)
cx->v4l2_cap |= V4L2_CAP_RADIO;
@@ -1074,7 +1081,6 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
/* Load cx18 submodules (cx18-alsa) */
request_modules(cx);
-
return 0;
free_streams:
@@ -1257,6 +1263,8 @@ static void cx18_remove(struct pci_dev *pci_dev)
for (i = 0; i < CX18_VBI_FRAMES; i++)
kfree(cx->vbi.sliced_mpeg_data[i]);
+ v4l2_ctrl_handler_free(&cx->av_state.hdl);
+
CX18_INFO("Removed %s\n", cx->card_name);
v4l2_device_unregister(v4l2_dev);
diff --git a/drivers/media/video/cx18/cx18-driver.h b/drivers/media/video/cx18/cx18-driver.h
index 306caac6d3fc..26eb910f6daf 100644
--- a/drivers/media/video/cx18/cx18-driver.h
+++ b/drivers/media/video/cx18/cx18-driver.h
@@ -564,7 +564,7 @@ struct cx18 {
struct cx18_av_state av_state;
/* codec settings */
- struct cx2341x_mpeg_params params;
+ struct cx2341x_handler cxhdl;
u32 filter_mode;
u32 temporal_strength;
u32 spatial_strength;
diff --git a/drivers/media/video/cx18/cx18-fileops.c b/drivers/media/video/cx18/cx18-fileops.c
index 9f23b90732f2..98ef33e4326a 100644
--- a/drivers/media/video/cx18/cx18-fileops.c
+++ b/drivers/media/video/cx18/cx18-fileops.c
@@ -160,13 +160,10 @@ EXPORT_SYMBOL(cx18_release_stream);
static void cx18_dualwatch(struct cx18 *cx)
{
struct v4l2_tuner vt;
- u32 new_bitmap;
u32 new_stereo_mode;
- const u32 stereo_mask = 0x0300;
const u32 dual = 0x0200;
- u32 h;
- new_stereo_mode = cx->params.audio_properties & stereo_mask;
+ new_stereo_mode = v4l2_ctrl_g_ctrl(cx->cxhdl.audio_mode);
memset(&vt, 0, sizeof(vt));
cx18_call_all(cx, tuner, g_tuner, &vt);
if (vt.audmode == V4L2_TUNER_MODE_LANG1_LANG2 &&
@@ -176,25 +173,10 @@ static void cx18_dualwatch(struct cx18 *cx)
if (new_stereo_mode == cx->dualwatch_stereo_mode)
return;
- new_bitmap = new_stereo_mode
- | (cx->params.audio_properties & ~stereo_mask);
-
- CX18_DEBUG_INFO("dualwatch: change stereo flag from 0x%x to 0x%x. "
- "new audio_bitmask=0x%ux\n",
- cx->dualwatch_stereo_mode, new_stereo_mode, new_bitmap);
-
- h = cx18_find_handle(cx);
- if (h == CX18_INVALID_TASK_HANDLE) {
- CX18_DEBUG_INFO("dualwatch: can't find valid task handle\n");
- return;
- }
-
- if (cx18_vapi(cx,
- CX18_CPU_SET_AUDIO_PARAMETERS, 2, h, new_bitmap) == 0) {
- cx->dualwatch_stereo_mode = new_stereo_mode;
- return;
- }
- CX18_DEBUG_INFO("dualwatch: changing stereo flag failed\n");
+ CX18_DEBUG_INFO("dualwatch: change stereo flag from 0x%x to 0x%x.\n",
+ cx->dualwatch_stereo_mode, new_stereo_mode);
+ if (v4l2_ctrl_s_ctrl(cx->cxhdl.audio_mode, new_stereo_mode))
+ CX18_DEBUG_INFO("dualwatch: changing stereo flag failed\n");
}
@@ -724,8 +706,8 @@ int cx18_v4l2_close(struct file *filp)
if (atomic_read(&cx->ana_capturing) > 0) {
/* Undo video mute */
cx18_vapi(cx, CX18_CPU_SET_VIDEO_MUTE, 2, s->handle,
- cx->params.video_mute |
- (cx->params.video_mute_yuv << 8));
+ (v4l2_ctrl_g_ctrl(cx->cxhdl.video_mute) |
+ (v4l2_ctrl_g_ctrl(cx->cxhdl.video_mute_yuv) << 8)));
}
/* Done! Unmute and continue. */
cx18_unmute(cx);
diff --git a/drivers/media/video/cx18/cx18-ioctl.c b/drivers/media/video/cx18/cx18-ioctl.c
index 7150195740dc..36b018c943e5 100644
--- a/drivers/media/video/cx18/cx18-ioctl.c
+++ b/drivers/media/video/cx18/cx18-ioctl.c
@@ -152,8 +152,8 @@ static int cx18_g_fmt_vid_cap(struct file *file, void *fh,
struct cx18 *cx = id->cx;
struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
- pixfmt->width = cx->params.width;
- pixfmt->height = cx->params.height;
+ pixfmt->width = cx->cxhdl.width;
+ pixfmt->height = cx->cxhdl.height;
pixfmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
pixfmt->field = V4L2_FIELD_INTERLACED;
pixfmt->priv = 0;
@@ -287,14 +287,14 @@ static int cx18_s_fmt_vid_cap(struct file *file, void *fh,
w = fmt->fmt.pix.width;
h = fmt->fmt.pix.height;
- if (cx->params.width == w && cx->params.height == h)
+ if (cx->cxhdl.width == w && cx->cxhdl.height == h)
return 0;
if (atomic_read(&cx->ana_capturing) > 0)
return -EBUSY;
- mbus_fmt.width = cx->params.width = w;
- mbus_fmt.height = cx->params.height = h;
+ mbus_fmt.width = cx->cxhdl.width = w;
+ mbus_fmt.height = cx->cxhdl.height = h;
mbus_fmt.code = V4L2_MBUS_FMT_FIXED;
v4l2_subdev_call(cx->sd_av, video, s_mbus_fmt, &mbus_fmt);
return cx18_g_fmt_vid_cap(file, fh, fmt);
@@ -696,9 +696,10 @@ int cx18_s_std(struct file *file, void *fh, v4l2_std_id *std)
cx->std = *std;
cx->is_60hz = (*std & V4L2_STD_525_60) ? 1 : 0;
- cx->params.is_50hz = cx->is_50hz = !cx->is_60hz;
- cx->params.width = 720;
- cx->params.height = cx->is_50hz ? 576 : 480;
+ cx->is_50hz = !cx->is_60hz;
+ cx2341x_handler_set_50hz(&cx->cxhdl, cx->is_50hz);
+ cx->cxhdl.width = 720;
+ cx->cxhdl.height = cx->is_50hz ? 576 : 480;
cx->vbi.count = cx->is_50hz ? 18 : 12;
cx->vbi.start[0] = cx->is_50hz ? 6 : 10;
cx->vbi.start[1] = cx->is_50hz ? 318 : 273;
@@ -1035,7 +1036,7 @@ static int cx18_log_status(struct file *file, void *fh)
mutex_unlock(&cx->gpio_lock);
CX18_INFO("Tuner: %s\n",
test_bit(CX18_F_I_RADIO_USER, &cx->i_flags) ? "Radio" : "TV");
- cx2341x_log_status(&cx->params, cx->v4l2_dev.name);
+ v4l2_ctrl_handler_log_status(&cx->cxhdl.hdl, cx->v4l2_dev.name);
CX18_INFO("Status flags: 0x%08lx\n", cx->i_flags);
for (i = 0; i < CX18_MAX_STREAMS; i++) {
struct cx18_stream *s = &cx->streams[i];
@@ -1136,11 +1137,6 @@ static const struct v4l2_ioctl_ops cx18_ioctl_ops = {
.vidioc_s_register = cx18_s_register,
#endif
.vidioc_default = cx18_default,
- .vidioc_queryctrl = cx18_queryctrl,
- .vidioc_querymenu = cx18_querymenu,
- .vidioc_g_ext_ctrls = cx18_g_ext_ctrls,
- .vidioc_s_ext_ctrls = cx18_s_ext_ctrls,
- .vidioc_try_ext_ctrls = cx18_try_ext_ctrls,
};
void cx18_set_funcs(struct video_device *vdev)
diff --git a/drivers/media/video/cx18/cx18-mailbox.c b/drivers/media/video/cx18/cx18-mailbox.c
index c545f3beef78..9605d54bd083 100644
--- a/drivers/media/video/cx18/cx18-mailbox.c
+++ b/drivers/media/video/cx18/cx18-mailbox.c
@@ -716,9 +716,8 @@ static int cx18_set_filter_param(struct cx18_stream *s)
int cx18_api_func(void *priv, u32 cmd, int in, int out,
u32 data[CX2341X_MBOX_MAX_DATA])
{
- struct cx18_api_func_private *api_priv = priv;
- struct cx18 *cx = api_priv->cx;
- struct cx18_stream *s = api_priv->s;
+ struct cx18_stream *s = priv;
+ struct cx18 *cx = s->cx;
switch (cmd) {
case CX2341X_ENC_SET_OUTPUT_PORT:
diff --git a/drivers/media/video/cx18/cx18-mailbox.h b/drivers/media/video/cx18/cx18-mailbox.h
index 077952fcbcca..05fe6bdbe062 100644
--- a/drivers/media/video/cx18/cx18-mailbox.h
+++ b/drivers/media/video/cx18/cx18-mailbox.h
@@ -81,11 +81,6 @@ struct cx18_mailbox {
struct cx18_stream;
-struct cx18_api_func_private {
- struct cx18 *cx;
- struct cx18_stream *s;
-};
-
int cx18_api(struct cx18 *cx, u32 cmd, int args, u32 data[]);
int cx18_vapi_result(struct cx18 *cx, u32 data[MAX_MB_ARGUMENTS], u32 cmd,
int args, ...);
diff --git a/drivers/media/video/cx18/cx18-streams.c b/drivers/media/video/cx18/cx18-streams.c
index 94f5d7967c5c..2d248560770e 100644
--- a/drivers/media/video/cx18/cx18-streams.c
+++ b/drivers/media/video/cx18/cx18-streams.c
@@ -572,7 +572,7 @@ static void cx18_stream_configure_mdls(struct cx18_stream *s)
* Set the MDL size to the exact size needed for one frame.
* Use enough buffers per MDL to cover the MDL size
*/
- s->mdl_size = 720 * s->cx->params.height * 3 / 2;
+ s->mdl_size = 720 * s->cx->cxhdl.height * 3 / 2;
s->bufs_per_mdl = s->mdl_size / s->buf_size;
if (s->mdl_size % s->buf_size)
s->bufs_per_mdl++;
@@ -607,7 +607,6 @@ int cx18_start_v4l2_encode_stream(struct cx18_stream *s)
u32 data[MAX_MB_ARGUMENTS];
struct cx18 *cx = s->cx;
int captype = 0;
- struct cx18_api_func_private priv;
struct cx18_stream *s_idx;
if (!cx18_stream_enabled(s))
@@ -620,7 +619,7 @@ int cx18_start_v4l2_encode_stream(struct cx18_stream *s)
captype = CAPTURE_CHANNEL_TYPE_MPEG;
cx->mpg_data_received = cx->vbi_data_inserted = 0;
cx->dualwatch_jiffies = jiffies;
- cx->dualwatch_stereo_mode = cx->params.audio_properties & 0x300;
+ cx->dualwatch_stereo_mode = v4l2_ctrl_g_ctrl(cx->cxhdl.audio_mode);
cx->search_pack_header = 0;
break;
@@ -710,21 +709,21 @@ int cx18_start_v4l2_encode_stream(struct cx18_stream *s)
s->handle, cx18_stream_enabled(s_idx) ? 7 : 0);
/* Call out to the common CX2341x API setup for user controls */
- priv.cx = cx;
- priv.s = s;
- cx2341x_update(&priv, cx18_api_func, NULL, &cx->params);
+ cx->cxhdl.priv = s;
+ cx2341x_handler_setup(&cx->cxhdl);
/*
* When starting a capture and we're set for radio,
* ensure the video is muted, despite the user control.
*/
- if (!cx->params.video_mute &&
+ if (!cx->cxhdl.video_mute &&
test_bit(CX18_F_I_RADIO_USER, &cx->i_flags))
cx18_vapi(cx, CX18_CPU_SET_VIDEO_MUTE, 2, s->handle,
- (cx->params.video_mute_yuv << 8) | 1);
+ (v4l2_ctrl_g_ctrl(cx->cxhdl.video_mute_yuv) << 8) | 1);
}
if (atomic_read(&cx->tot_capturing) == 0) {
+ cx2341x_handler_set_busy(&cx->cxhdl, 1);
clear_bit(CX18_F_I_EOS, &cx->i_flags);
cx18_write_reg(cx, 7, CX18_DSP0_INTERRUPT_MASK);
}
@@ -826,6 +825,7 @@ int cx18_stop_v4l2_encode_stream(struct cx18_stream *s, int gop_end)
if (atomic_read(&cx->tot_capturing) > 0)
return 0;
+ cx2341x_handler_set_busy(&cx->cxhdl, 0);
cx18_write_reg(cx, 5, CX18_DSP0_INTERRUPT_MASK);
wake_up(&s->waitq);
diff --git a/drivers/media/video/fsl-viu.c b/drivers/media/video/fsl-viu.c
index e4bba88254c7..031af1610154 100644
--- a/drivers/media/video/fsl-viu.c
+++ b/drivers/media/video/fsl-viu.c
@@ -1445,8 +1445,7 @@ static struct video_device viu_template = {
.current_norm = V4L2_STD_NTSC_M,
};
-static int __devinit viu_of_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit viu_of_probe(struct platform_device *op)
{
struct viu_dev *viu_dev;
struct video_device *vdev;
@@ -1627,7 +1626,7 @@ static struct of_device_id mpc512x_viu_of_match[] = {
};
MODULE_DEVICE_TABLE(of, mpc512x_viu_of_match);
-static struct of_platform_driver viu_of_platform_driver = {
+static struct platform_driver viu_of_platform_driver = {
.probe = viu_of_probe,
.remove = __devexit_p(viu_of_remove),
#ifdef CONFIG_PM
@@ -1643,12 +1642,12 @@ static struct of_platform_driver viu_of_platform_driver = {
static int __init viu_init(void)
{
- return of_register_platform_driver(&viu_of_platform_driver);
+ return platform_driver_register(&viu_of_platform_driver);
}
static void __exit viu_exit(void)
{
- of_unregister_platform_driver(&viu_of_platform_driver);
+ platform_driver_unregister(&viu_of_platform_driver);
}
module_init(viu_init);
diff --git a/drivers/media/video/mem2mem_testdev.c b/drivers/media/video/mem2mem_testdev.c
index c179041d91f8..6ab2d4f26342 100644
--- a/drivers/media/video/mem2mem_testdev.c
+++ b/drivers/media/video/mem2mem_testdev.c
@@ -28,7 +28,7 @@
#include <media/v4l2-mem2mem.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
-#include <media/videobuf-vmalloc.h>
+#include <media/videobuf2-vmalloc.h>
#define MEM2MEM_TEST_MODULE_NAME "mem2mem-testdev"
@@ -201,11 +201,6 @@ struct m2mtest_ctx {
struct v4l2_m2m_ctx *m2m_ctx;
};
-struct m2mtest_buffer {
- /* vb must be first! */
- struct videobuf_buffer vb;
-};
-
static struct v4l2_queryctrl *get_ctrl(int id)
{
int i;
@@ -219,37 +214,41 @@ static struct v4l2_queryctrl *get_ctrl(int id)
}
static int device_process(struct m2mtest_ctx *ctx,
- struct m2mtest_buffer *in_buf,
- struct m2mtest_buffer *out_buf)
+ struct vb2_buffer *in_vb,
+ struct vb2_buffer *out_vb)
{
struct m2mtest_dev *dev = ctx->dev;
+ struct m2mtest_q_data *q_data;
u8 *p_in, *p_out;
int x, y, t, w;
int tile_w, bytes_left;
- struct videobuf_queue *src_q;
- struct videobuf_queue *dst_q;
+ int width, height, bytesperline;
- src_q = v4l2_m2m_get_src_vq(ctx->m2m_ctx);
- dst_q = v4l2_m2m_get_dst_vq(ctx->m2m_ctx);
- p_in = videobuf_queue_to_vaddr(src_q, &in_buf->vb);
- p_out = videobuf_queue_to_vaddr(dst_q, &out_buf->vb);
+ q_data = get_q_data(V4L2_BUF_TYPE_VIDEO_OUTPUT);
+
+ width = q_data->width;
+ height = q_data->height;
+ bytesperline = (q_data->width * q_data->fmt->depth) >> 3;
+
+ p_in = vb2_plane_vaddr(in_vb, 0);
+ p_out = vb2_plane_vaddr(out_vb, 0);
if (!p_in || !p_out) {
v4l2_err(&dev->v4l2_dev,
"Acquiring kernel pointers to buffers failed\n");
return -EFAULT;
}
- if (in_buf->vb.size > out_buf->vb.size) {
+ if (vb2_plane_size(in_vb, 0) > vb2_plane_size(out_vb, 0)) {
v4l2_err(&dev->v4l2_dev, "Output buffer is too small\n");
return -EINVAL;
}
- tile_w = (in_buf->vb.width * (q_data[V4L2_M2M_DST].fmt->depth >> 3))
+ tile_w = (width * (q_data[V4L2_M2M_DST].fmt->depth >> 3))
/ MEM2MEM_NUM_TILES;
- bytes_left = in_buf->vb.bytesperline - tile_w * MEM2MEM_NUM_TILES;
+ bytes_left = bytesperline - tile_w * MEM2MEM_NUM_TILES;
w = 0;
- for (y = 0; y < in_buf->vb.height; ++y) {
+ for (y = 0; y < height; ++y) {
for (t = 0; t < MEM2MEM_NUM_TILES; ++t) {
if (w & 0x1) {
for (x = 0; x < tile_w; ++x)
@@ -301,6 +300,21 @@ static void job_abort(void *priv)
ctx->aborting = 1;
}
+static void m2mtest_lock(void *priv)
+{
+ struct m2mtest_ctx *ctx = priv;
+ struct m2mtest_dev *dev = ctx->dev;
+ mutex_lock(&dev->dev_mutex);
+}
+
+static void m2mtest_unlock(void *priv)
+{
+ struct m2mtest_ctx *ctx = priv;
+ struct m2mtest_dev *dev = ctx->dev;
+ mutex_unlock(&dev->dev_mutex);
+}
+
+
/* device_run() - prepares and starts the device
*
* This simulates all the immediate preparations required before starting
@@ -311,7 +325,7 @@ static void device_run(void *priv)
{
struct m2mtest_ctx *ctx = priv;
struct m2mtest_dev *dev = ctx->dev;
- struct m2mtest_buffer *src_buf, *dst_buf;
+ struct vb2_buffer *src_buf, *dst_buf;
src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
@@ -322,12 +336,11 @@ static void device_run(void *priv)
schedule_irq(dev, ctx->transtime);
}
-
static void device_isr(unsigned long priv)
{
struct m2mtest_dev *m2mtest_dev = (struct m2mtest_dev *)priv;
struct m2mtest_ctx *curr_ctx;
- struct m2mtest_buffer *src_buf, *dst_buf;
+ struct vb2_buffer *src_vb, *dst_vb;
unsigned long flags;
curr_ctx = v4l2_m2m_get_curr_priv(m2mtest_dev->m2m_dev);
@@ -338,31 +351,26 @@ static void device_isr(unsigned long priv)
return;
}
- src_buf = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx);
- dst_buf = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx);
+ src_vb = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx);
+
curr_ctx->num_processed++;
+ spin_lock_irqsave(&m2mtest_dev->irqlock, flags);
+ v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
+ spin_unlock_irqrestore(&m2mtest_dev->irqlock, flags);
+
if (curr_ctx->num_processed == curr_ctx->translen
|| curr_ctx->aborting) {
dprintk(curr_ctx->dev, "Finishing transaction\n");
curr_ctx->num_processed = 0;
- spin_lock_irqsave(&m2mtest_dev->irqlock, flags);
- src_buf->vb.state = dst_buf->vb.state = VIDEOBUF_DONE;
- wake_up(&src_buf->vb.done);
- wake_up(&dst_buf->vb.done);
- spin_unlock_irqrestore(&m2mtest_dev->irqlock, flags);
v4l2_m2m_job_finish(m2mtest_dev->m2m_dev, curr_ctx->m2m_ctx);
} else {
- spin_lock_irqsave(&m2mtest_dev->irqlock, flags);
- src_buf->vb.state = dst_buf->vb.state = VIDEOBUF_DONE;
- wake_up(&src_buf->vb.done);
- wake_up(&dst_buf->vb.done);
- spin_unlock_irqrestore(&m2mtest_dev->irqlock, flags);
device_run(curr_ctx);
}
}
-
/*
* video ioctls
*/
@@ -423,7 +431,7 @@ static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
static int vidioc_g_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
{
- struct videobuf_queue *vq;
+ struct vb2_queue *vq;
struct m2mtest_q_data *q_data;
vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
@@ -434,7 +442,7 @@ static int vidioc_g_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
f->fmt.pix.width = q_data->width;
f->fmt.pix.height = q_data->height;
- f->fmt.pix.field = vq->field;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
f->fmt.pix.pixelformat = q_data->fmt->fourcc;
f->fmt.pix.bytesperline = (q_data->width * q_data->fmt->depth) >> 3;
f->fmt.pix.sizeimage = q_data->sizeimage;
@@ -523,7 +531,7 @@ static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
static int vidioc_s_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
{
struct m2mtest_q_data *q_data;
- struct videobuf_queue *vq;
+ struct vb2_queue *vq;
vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
if (!vq)
@@ -533,7 +541,7 @@ static int vidioc_s_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
if (!q_data)
return -EINVAL;
- if (videobuf_queue_is_busy(vq)) {
+ if (vb2_is_busy(vq)) {
v4l2_err(&ctx->dev->v4l2_dev, "%s queue busy\n", __func__);
return -EBUSY;
}
@@ -543,7 +551,6 @@ static int vidioc_s_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
q_data->height = f->fmt.pix.height;
q_data->sizeimage = q_data->width * q_data->height
* q_data->fmt->depth >> 3;
- vq->field = f->fmt.pix.field;
dprintk(ctx->dev,
"Setting format for type %d, wxh: %dx%d, fmt: %d\n",
@@ -733,120 +740,94 @@ static const struct v4l2_ioctl_ops m2mtest_ioctl_ops = {
* Queue operations
*/
-static void m2mtest_buf_release(struct videobuf_queue *vq,
- struct videobuf_buffer *vb)
-{
- struct m2mtest_ctx *ctx = vq->priv_data;
-
- dprintk(ctx->dev, "type: %d, index: %d, state: %d\n",
- vq->type, vb->i, vb->state);
-
- videobuf_vmalloc_free(vb);
- vb->state = VIDEOBUF_NEEDS_INIT;
-}
-
-static int m2mtest_buf_setup(struct videobuf_queue *vq, unsigned int *count,
- unsigned int *size)
+static int m2mtest_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned long sizes[],
+ void *alloc_ctxs[])
{
- struct m2mtest_ctx *ctx = vq->priv_data;
+ struct m2mtest_ctx *ctx = vb2_get_drv_priv(vq);
struct m2mtest_q_data *q_data;
+ unsigned int size, count = *nbuffers;
q_data = get_q_data(vq->type);
- *size = q_data->width * q_data->height * q_data->fmt->depth >> 3;
- dprintk(ctx->dev, "size:%d, w/h %d/%d, depth: %d\n",
- *size, q_data->width, q_data->height, q_data->fmt->depth);
+ size = q_data->width * q_data->height * q_data->fmt->depth >> 3;
- if (0 == *count)
- *count = MEM2MEM_DEF_NUM_BUFS;
+ while (size * count > MEM2MEM_VID_MEM_LIMIT)
+ (count)--;
- while (*size * *count > MEM2MEM_VID_MEM_LIMIT)
- (*count)--;
+ *nplanes = 1;
+ *nbuffers = count;
+ sizes[0] = size;
- v4l2_info(&ctx->dev->v4l2_dev,
- "%d buffers of size %d set up.\n", *count, *size);
+ /*
+ * videobuf2-vmalloc allocator is context-less so no need to set
+ * alloc_ctxs array.
+ */
+
+ dprintk(ctx->dev, "get %d buffer(s) of size %d each.\n", count, size);
return 0;
}
-static int m2mtest_buf_prepare(struct videobuf_queue *vq,
- struct videobuf_buffer *vb,
- enum v4l2_field field)
+static int m2mtest_buf_prepare(struct vb2_buffer *vb)
{
- struct m2mtest_ctx *ctx = vq->priv_data;
+ struct m2mtest_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct m2mtest_q_data *q_data;
- int ret;
- dprintk(ctx->dev, "type: %d, index: %d, state: %d\n",
- vq->type, vb->i, vb->state);
+ dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type);
- q_data = get_q_data(vq->type);
+ q_data = get_q_data(vb->vb2_queue->type);
- if (vb->baddr) {
- /* User-provided buffer */
- if (vb->bsize < q_data->sizeimage) {
- /* Buffer too small to fit a frame */
- v4l2_err(&ctx->dev->v4l2_dev,
- "User-provided buffer too small\n");
- return -EINVAL;
- }
- } else if (vb->state != VIDEOBUF_NEEDS_INIT
- && vb->bsize < q_data->sizeimage) {
- /* We provide the buffer, but it's already been initialized
- * and is too small */
+ if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
+ dprintk(ctx->dev, "%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0), (long)q_data->sizeimage);
return -EINVAL;
}
- vb->width = q_data->width;
- vb->height = q_data->height;
- vb->bytesperline = (q_data->width * q_data->fmt->depth) >> 3;
- vb->size = q_data->sizeimage;
- vb->field = field;
-
- if (VIDEOBUF_NEEDS_INIT == vb->state) {
- ret = videobuf_iolock(vq, vb, NULL);
- if (ret) {
- v4l2_err(&ctx->dev->v4l2_dev,
- "Iolock failed\n");
- goto fail;
- }
- }
-
- vb->state = VIDEOBUF_PREPARED;
+ vb2_set_plane_payload(vb, 0, q_data->sizeimage);
return 0;
-fail:
- m2mtest_buf_release(vq, vb);
- return ret;
}
-static void m2mtest_buf_queue(struct videobuf_queue *vq,
- struct videobuf_buffer *vb)
+static void m2mtest_buf_queue(struct vb2_buffer *vb)
{
- struct m2mtest_ctx *ctx = vq->priv_data;
-
- v4l2_m2m_buf_queue(ctx->m2m_ctx, vq, vb);
+ struct m2mtest_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
}
-static struct videobuf_queue_ops m2mtest_qops = {
- .buf_setup = m2mtest_buf_setup,
- .buf_prepare = m2mtest_buf_prepare,
- .buf_queue = m2mtest_buf_queue,
- .buf_release = m2mtest_buf_release,
+static struct vb2_ops m2mtest_qops = {
+ .queue_setup = m2mtest_queue_setup,
+ .buf_prepare = m2mtest_buf_prepare,
+ .buf_queue = m2mtest_buf_queue,
};
-static void queue_init(void *priv, struct videobuf_queue *vq,
- enum v4l2_buf_type type)
+static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
{
struct m2mtest_ctx *ctx = priv;
- struct m2mtest_dev *dev = ctx->dev;
+ int ret;
- videobuf_queue_vmalloc_init(vq, &m2mtest_qops, dev->v4l2_dev.dev,
- &dev->irqlock, type, V4L2_FIELD_NONE,
- sizeof(struct m2mtest_buffer), priv,
- &dev->dev_mutex);
-}
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_MMAP;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->ops = &m2mtest_qops;
+ src_vq->mem_ops = &vb2_vmalloc_memops;
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_MMAP;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &m2mtest_qops;
+ dst_vq->mem_ops = &vb2_vmalloc_memops;
+
+ return vb2_queue_init(dst_vq);
+}
/*
* File operations
@@ -866,7 +847,8 @@ static int m2mtest_open(struct file *file)
ctx->transtime = MEM2MEM_DEF_TRANSTIME;
ctx->num_processed = 0;
- ctx->m2m_ctx = v4l2_m2m_ctx_init(ctx, dev->m2m_dev, queue_init);
+ ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
+
if (IS_ERR(ctx->m2m_ctx)) {
int ret = PTR_ERR(ctx->m2m_ctx);
@@ -932,6 +914,8 @@ static struct v4l2_m2m_ops m2m_ops = {
.device_run = device_run,
.job_ready = job_ready,
.job_abort = job_abort,
+ .lock = m2mtest_lock,
+ .unlock = m2mtest_unlock,
};
static int m2mtest_probe(struct platform_device *pdev)
@@ -990,6 +974,7 @@ static int m2mtest_probe(struct platform_device *pdev)
return 0;
+ v4l2_m2m_release(dev->m2m_dev);
err_m2m:
video_unregister_device(dev->vfd);
rel_vdev:
diff --git a/drivers/media/video/noon010pc30.c b/drivers/media/video/noon010pc30.c
new file mode 100644
index 000000000000..35f722a88f76
--- /dev/null
+++ b/drivers/media/video/noon010pc30.c
@@ -0,0 +1,792 @@
+/*
+ * Driver for SiliconFile NOON010PC30 CIF (1/11") Image Sensor with ISP
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ * Contact: Sylwester Nawrocki, <s.nawrocki@samsung.com>
+ *
+ * Initial register configuration based on a driver authored by
+ * HeungJun Kim <riverful.kim@samsung.com>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later vergsion.
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <media/noon010pc30.h>
+#include <media/v4l2-chip-ident.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mediabus.h>
+#include <media/v4l2-subdev.h>
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Enable module debug trace. Set to 1 to enable.");
+
+#define MODULE_NAME "NOON010PC30"
+
+/*
+ * Register offsets within a page
+ * b15..b8 - page id, b7..b0 - register address
+ */
+#define POWER_CTRL_REG 0x0001
+#define PAGEMODE_REG 0x03
+#define DEVICE_ID_REG 0x0004
+#define NOON010PC30_ID 0x86
+#define VDO_CTL_REG(n) (0x0010 + (n))
+#define SYNC_CTL_REG 0x0012
+/* Window size and position */
+#define WIN_ROWH_REG 0x0013
+#define WIN_ROWL_REG 0x0014
+#define WIN_COLH_REG 0x0015
+#define WIN_COLL_REG 0x0016
+#define WIN_HEIGHTH_REG 0x0017
+#define WIN_HEIGHTL_REG 0x0018
+#define WIN_WIDTHH_REG 0x0019
+#define WIN_WIDTHL_REG 0x001A
+#define HBLANKH_REG 0x001B
+#define HBLANKL_REG 0x001C
+#define VSYNCH_REG 0x001D
+#define VSYNCL_REG 0x001E
+/* VSYNC control */
+#define VS_CTL_REG(n) (0x00A1 + (n))
+/* page 1 */
+#define ISP_CTL_REG(n) (0x0110 + (n))
+#define YOFS_REG 0x0119
+#define DARK_YOFS_REG 0x011A
+#define SAT_CTL_REG 0x0120
+#define BSAT_REG 0x0121
+#define RSAT_REG 0x0122
+/* Color correction */
+#define CMC_CTL_REG 0x0130
+#define CMC_OFSGH_REG 0x0133
+#define CMC_OFSGL_REG 0x0135
+#define CMC_SIGN_REG 0x0136
+#define CMC_GOFS_REG 0x0137
+#define CMC_COEF_REG(n) (0x0138 + (n))
+#define CMC_OFS_REG(n) (0x0141 + (n))
+/* Gamma correction */
+#define GMA_CTL_REG 0x0160
+#define GMA_COEF_REG(n) (0x0161 + (n))
+/* Lens Shading */
+#define LENS_CTRL_REG 0x01D0
+#define LENS_XCEN_REG 0x01D1
+#define LENS_YCEN_REG 0x01D2
+#define LENS_RC_REG 0x01D3
+#define LENS_GC_REG 0x01D4
+#define LENS_BC_REG 0x01D5
+#define L_AGON_REG 0x01D6
+#define L_AGOFF_REG 0x01D7
+/* Page 3 - Auto Exposure */
+#define AE_CTL_REG(n) (0x0310 + (n))
+#define AE_CTL9_REG 0x032C
+#define AE_CTL10_REG 0x032D
+#define AE_YLVL_REG 0x031C
+#define AE_YTH_REG(n) (0x031D + (n))
+#define AE_WGT_REG 0x0326
+#define EXP_TIMEH_REG 0x0333
+#define EXP_TIMEM_REG 0x0334
+#define EXP_TIMEL_REG 0x0335
+#define EXP_MMINH_REG 0x0336
+#define EXP_MMINL_REG 0x0337
+#define EXP_MMAXH_REG 0x0338
+#define EXP_MMAXM_REG 0x0339
+#define EXP_MMAXL_REG 0x033A
+/* Page 4 - Auto White Balance */
+#define AWB_CTL_REG(n) (0x0410 + (n))
+#define AWB_ENABE 0x80
+#define AWB_WGHT_REG 0x0419
+#define BGAIN_PAR_REG(n) (0x044F + (n))
+/* Manual white balance, when AWB_CTL2[0]=1 */
+#define MWB_RGAIN_REG 0x0466
+#define MWB_BGAIN_REG 0x0467
+
+/* The token to mark an array end */
+#define REG_TERM 0xFFFF
+
+struct noon010_format {
+ enum v4l2_mbus_pixelcode code;
+ enum v4l2_colorspace colorspace;
+ u16 ispctl1_reg;
+};
+
+struct noon010_frmsize {
+ u16 width;
+ u16 height;
+ int vid_ctl1;
+};
+
+static const char * const noon010_supply_name[] = {
+ "vdd_core", "vddio", "vdda"
+};
+
+#define NOON010_NUM_SUPPLIES ARRAY_SIZE(noon010_supply_name)
+
+struct noon010_info {
+ struct v4l2_subdev sd;
+ struct v4l2_ctrl_handler hdl;
+ const struct noon010pc30_platform_data *pdata;
+ const struct noon010_format *curr_fmt;
+ const struct noon010_frmsize *curr_win;
+ unsigned int hflip:1;
+ unsigned int vflip:1;
+ unsigned int power:1;
+ u8 i2c_reg_page;
+ struct regulator_bulk_data supply[NOON010_NUM_SUPPLIES];
+ u32 gpio_nreset;
+ u32 gpio_nstby;
+};
+
+struct i2c_regval {
+ u16 addr;
+ u16 val;
+};
+
+/* Supported resolutions. */
+static const struct noon010_frmsize noon010_sizes[] = {
+ {
+ .width = 352,
+ .height = 288,
+ .vid_ctl1 = 0,
+ }, {
+ .width = 176,
+ .height = 144,
+ .vid_ctl1 = 0x10,
+ }, {
+ .width = 88,
+ .height = 72,
+ .vid_ctl1 = 0x20,
+ },
+};
+
+/* Supported pixel formats. */
+static const struct noon010_format noon010_formats[] = {
+ {
+ .code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .ispctl1_reg = 0x03,
+ }, {
+ .code = V4L2_MBUS_FMT_YVYU8_2X8,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .ispctl1_reg = 0x02,
+ }, {
+ .code = V4L2_MBUS_FMT_VYUY8_2X8,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .ispctl1_reg = 0,
+ }, {
+ .code = V4L2_MBUS_FMT_UYVY8_2X8,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .ispctl1_reg = 0x01,
+ }, {
+ .code = V4L2_MBUS_FMT_RGB565_2X8_BE,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .ispctl1_reg = 0x40,
+ },
+};
+
+static const struct i2c_regval noon010_base_regs[] = {
+ { WIN_COLL_REG, 0x06 }, { HBLANKL_REG, 0x7C },
+ /* Color corection and saturation */
+ { ISP_CTL_REG(0), 0x30 }, { ISP_CTL_REG(2), 0x30 },
+ { YOFS_REG, 0x80 }, { DARK_YOFS_REG, 0x04 },
+ { SAT_CTL_REG, 0x1F }, { BSAT_REG, 0x90 },
+ { CMC_CTL_REG, 0x0F }, { CMC_OFSGH_REG, 0x3C },
+ { CMC_OFSGL_REG, 0x2C }, { CMC_SIGN_REG, 0x3F },
+ { CMC_COEF_REG(0), 0x79 }, { CMC_OFS_REG(0), 0x00 },
+ { CMC_COEF_REG(1), 0x39 }, { CMC_OFS_REG(1), 0x00 },
+ { CMC_COEF_REG(2), 0x00 }, { CMC_OFS_REG(2), 0x00 },
+ { CMC_COEF_REG(3), 0x11 }, { CMC_OFS_REG(3), 0x8B },
+ { CMC_COEF_REG(4), 0x65 }, { CMC_OFS_REG(4), 0x07 },
+ { CMC_COEF_REG(5), 0x14 }, { CMC_OFS_REG(5), 0x04 },
+ { CMC_COEF_REG(6), 0x01 }, { CMC_OFS_REG(6), 0x9C },
+ { CMC_COEF_REG(7), 0x33 }, { CMC_OFS_REG(7), 0x89 },
+ { CMC_COEF_REG(8), 0x74 }, { CMC_OFS_REG(8), 0x25 },
+ /* Automatic white balance */
+ { AWB_CTL_REG(0), 0x78 }, { AWB_CTL_REG(1), 0x2E },
+ { AWB_CTL_REG(2), 0x20 }, { AWB_CTL_REG(3), 0x85 },
+ /* Auto exposure */
+ { AE_CTL_REG(0), 0xDC }, { AE_CTL_REG(1), 0x81 },
+ { AE_CTL_REG(2), 0x30 }, { AE_CTL_REG(3), 0xA5 },
+ { AE_CTL_REG(4), 0x40 }, { AE_CTL_REG(5), 0x51 },
+ { AE_CTL_REG(6), 0x33 }, { AE_CTL_REG(7), 0x7E },
+ { AE_CTL9_REG, 0x00 }, { AE_CTL10_REG, 0x02 },
+ { AE_YLVL_REG, 0x44 }, { AE_YTH_REG(0), 0x34 },
+ { AE_YTH_REG(1), 0x30 }, { AE_WGT_REG, 0xD5 },
+ /* Lens shading compensation */
+ { LENS_CTRL_REG, 0x01 }, { LENS_XCEN_REG, 0x80 },
+ { LENS_YCEN_REG, 0x70 }, { LENS_RC_REG, 0x53 },
+ { LENS_GC_REG, 0x40 }, { LENS_BC_REG, 0x3E },
+ { REG_TERM, 0 },
+};
+
+static inline struct noon010_info *to_noon010(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct noon010_info, sd);
+}
+
+static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct noon010_info, hdl)->sd;
+}
+
+static inline int set_i2c_page(struct noon010_info *info,
+ struct i2c_client *client, unsigned int reg)
+{
+ u32 page = reg >> 8 & 0xFF;
+ int ret = 0;
+
+ if (info->i2c_reg_page != page && (reg & 0xFF) != 0x03) {
+ ret = i2c_smbus_write_byte_data(client, PAGEMODE_REG, page);
+ if (!ret)
+ info->i2c_reg_page = page;
+ }
+ return ret;
+}
+
+static int cam_i2c_read(struct v4l2_subdev *sd, u32 reg_addr)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct noon010_info *info = to_noon010(sd);
+ int ret = set_i2c_page(info, client, reg_addr);
+
+ if (ret)
+ return ret;
+ return i2c_smbus_read_byte_data(client, reg_addr & 0xFF);
+}
+
+static int cam_i2c_write(struct v4l2_subdev *sd, u32 reg_addr, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct noon010_info *info = to_noon010(sd);
+ int ret = set_i2c_page(info, client, reg_addr);
+
+ if (ret)
+ return ret;
+ return i2c_smbus_write_byte_data(client, reg_addr & 0xFF, val);
+}
+
+static inline int noon010_bulk_write_reg(struct v4l2_subdev *sd,
+ const struct i2c_regval *msg)
+{
+ while (msg->addr != REG_TERM) {
+ int ret = cam_i2c_write(sd, msg->addr, msg->val);
+
+ if (ret)
+ return ret;
+ msg++;
+ }
+ return 0;
+}
+
+/* Device reset and sleep mode control */
+static int noon010_power_ctrl(struct v4l2_subdev *sd, bool reset, bool sleep)
+{
+ struct noon010_info *info = to_noon010(sd);
+ u8 reg = sleep ? 0xF1 : 0xF0;
+ int ret = 0;
+
+ if (reset)
+ ret = cam_i2c_write(sd, POWER_CTRL_REG, reg | 0x02);
+ if (!ret) {
+ ret = cam_i2c_write(sd, POWER_CTRL_REG, reg);
+ if (reset && !ret)
+ info->i2c_reg_page = -1;
+ }
+ return ret;
+}
+
+/* Automatic white balance control */
+static int noon010_enable_autowhitebalance(struct v4l2_subdev *sd, int on)
+{
+ int ret;
+
+ ret = cam_i2c_write(sd, AWB_CTL_REG(1), on ? 0x2E : 0x2F);
+ if (!ret)
+ ret = cam_i2c_write(sd, AWB_CTL_REG(0), on ? 0xFB : 0x7B);
+ return ret;
+}
+
+static int noon010_set_flip(struct v4l2_subdev *sd, int hflip, int vflip)
+{
+ struct noon010_info *info = to_noon010(sd);
+ int reg, ret;
+
+ reg = cam_i2c_read(sd, VDO_CTL_REG(1));
+ if (reg < 0)
+ return reg;
+
+ reg &= 0x7C;
+ if (hflip)
+ reg |= 0x01;
+ if (vflip)
+ reg |= 0x02;
+
+ ret = cam_i2c_write(sd, VDO_CTL_REG(1), reg | 0x80);
+ if (!ret) {
+ info->hflip = hflip;
+ info->vflip = vflip;
+ }
+ return ret;
+}
+
+/* Configure resolution and color format */
+static int noon010_set_params(struct v4l2_subdev *sd)
+{
+ struct noon010_info *info = to_noon010(sd);
+ int ret;
+
+ if (!info->curr_win)
+ return -EINVAL;
+
+ ret = cam_i2c_write(sd, VDO_CTL_REG(0), info->curr_win->vid_ctl1);
+
+ if (!ret && info->curr_fmt)
+ ret = cam_i2c_write(sd, ISP_CTL_REG(0),
+ info->curr_fmt->ispctl1_reg);
+ return ret;
+}
+
+/* Find nearest matching image pixel size. */
+static int noon010_try_frame_size(struct v4l2_mbus_framefmt *mf)
+{
+ unsigned int min_err = ~0;
+ int i = ARRAY_SIZE(noon010_sizes);
+ const struct noon010_frmsize *fsize = &noon010_sizes[0],
+ *match = NULL;
+
+ while (i--) {
+ int err = abs(fsize->width - mf->width)
+ + abs(fsize->height - mf->height);
+
+ if (err < min_err) {
+ min_err = err;
+ match = fsize;
+ }
+ fsize++;
+ }
+ if (match) {
+ mf->width = match->width;
+ mf->height = match->height;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int power_enable(struct noon010_info *info)
+{
+ int ret;
+
+ if (info->power) {
+ v4l2_info(&info->sd, "%s: sensor is already on\n", __func__);
+ return 0;
+ }
+
+ if (gpio_is_valid(info->gpio_nstby))
+ gpio_set_value(info->gpio_nstby, 0);
+
+ if (gpio_is_valid(info->gpio_nreset))
+ gpio_set_value(info->gpio_nreset, 0);
+
+ ret = regulator_bulk_enable(NOON010_NUM_SUPPLIES, info->supply);
+ if (ret)
+ return ret;
+
+ if (gpio_is_valid(info->gpio_nreset)) {
+ msleep(50);
+ gpio_set_value(info->gpio_nreset, 1);
+ }
+ if (gpio_is_valid(info->gpio_nstby)) {
+ udelay(1000);
+ gpio_set_value(info->gpio_nstby, 1);
+ }
+ if (gpio_is_valid(info->gpio_nreset)) {
+ udelay(1000);
+ gpio_set_value(info->gpio_nreset, 0);
+ msleep(100);
+ gpio_set_value(info->gpio_nreset, 1);
+ msleep(20);
+ }
+ info->power = 1;
+
+ v4l2_dbg(1, debug, &info->sd, "%s: sensor is on\n", __func__);
+ return 0;
+}
+
+static int power_disable(struct noon010_info *info)
+{
+ int ret;
+
+ if (!info->power) {
+ v4l2_info(&info->sd, "%s: sensor is already off\n", __func__);
+ return 0;
+ }
+
+ ret = regulator_bulk_disable(NOON010_NUM_SUPPLIES, info->supply);
+ if (ret)
+ return ret;
+
+ if (gpio_is_valid(info->gpio_nstby))
+ gpio_set_value(info->gpio_nstby, 0);
+
+ if (gpio_is_valid(info->gpio_nreset))
+ gpio_set_value(info->gpio_nreset, 0);
+
+ info->power = 0;
+
+ v4l2_dbg(1, debug, &info->sd, "%s: sensor is off\n", __func__);
+
+ return 0;
+}
+
+static int noon010_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = to_sd(ctrl);
+
+ v4l2_dbg(1, debug, sd, "%s: ctrl_id: %d, value: %d\n",
+ __func__, ctrl->id, ctrl->val);
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ return noon010_enable_autowhitebalance(sd, ctrl->val);
+ case V4L2_CID_BLUE_BALANCE:
+ return cam_i2c_write(sd, MWB_BGAIN_REG, ctrl->val);
+ case V4L2_CID_RED_BALANCE:
+ return cam_i2c_write(sd, MWB_RGAIN_REG, ctrl->val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int noon010_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
+ enum v4l2_mbus_pixelcode *code)
+{
+ if (!code || index >= ARRAY_SIZE(noon010_formats))
+ return -EINVAL;
+
+ *code = noon010_formats[index].code;
+ return 0;
+}
+
+static int noon010_g_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
+{
+ struct noon010_info *info = to_noon010(sd);
+ int ret;
+
+ if (!mf)
+ return -EINVAL;
+
+ if (!info->curr_win || !info->curr_fmt) {
+ ret = noon010_set_params(sd);
+ if (ret)
+ return ret;
+ }
+
+ mf->width = info->curr_win->width;
+ mf->height = info->curr_win->height;
+ mf->code = info->curr_fmt->code;
+ mf->colorspace = info->curr_fmt->colorspace;
+ mf->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+/* Return nearest media bus frame format. */
+static const struct noon010_format *try_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ int i = ARRAY_SIZE(noon010_formats);
+
+ noon010_try_frame_size(mf);
+
+ while (i--)
+ if (mf->code == noon010_formats[i].code)
+ break;
+
+ mf->code = noon010_formats[i].code;
+
+ return &noon010_formats[i];
+}
+
+static int noon010_try_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ if (!sd || !mf)
+ return -EINVAL;
+
+ try_fmt(sd, mf);
+ return 0;
+}
+
+static int noon010_s_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ struct noon010_info *info = to_noon010(sd);
+
+ if (!sd || !mf)
+ return -EINVAL;
+
+ info->curr_fmt = try_fmt(sd, mf);
+
+ return noon010_set_params(sd);
+}
+
+static int noon010_base_config(struct v4l2_subdev *sd)
+{
+ struct noon010_info *info = to_noon010(sd);
+ int ret;
+
+ ret = noon010_bulk_write_reg(sd, noon010_base_regs);
+ if (!ret) {
+ info->curr_fmt = &noon010_formats[0];
+ info->curr_win = &noon010_sizes[0];
+ ret = noon010_set_params(sd);
+ }
+ if (!ret)
+ ret = noon010_set_flip(sd, 1, 0);
+ if (!ret)
+ ret = noon010_power_ctrl(sd, false, false);
+
+ /* sync the handler and the registers state */
+ v4l2_ctrl_handler_setup(&to_noon010(sd)->hdl);
+ return ret;
+}
+
+static int noon010_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct noon010_info *info = to_noon010(sd);
+ const struct noon010pc30_platform_data *pdata = info->pdata;
+ int ret = 0;
+
+ if (WARN(pdata == NULL, "No platform data!\n"))
+ return -ENOMEM;
+
+ if (on) {
+ ret = power_enable(info);
+ if (ret)
+ return ret;
+ ret = noon010_base_config(sd);
+ } else {
+ noon010_power_ctrl(sd, false, true);
+ ret = power_disable(info);
+ info->curr_win = NULL;
+ info->curr_fmt = NULL;
+ }
+
+ return ret;
+}
+
+static int noon010_g_chip_ident(struct v4l2_subdev *sd,
+ struct v4l2_dbg_chip_ident *chip)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ return v4l2_chip_ident_i2c_client(client, chip,
+ V4L2_IDENT_NOON010PC30, 0);
+}
+
+static int noon010_log_status(struct v4l2_subdev *sd)
+{
+ struct noon010_info *info = to_noon010(sd);
+
+ v4l2_ctrl_handler_log_status(&info->hdl, sd->name);
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops noon010_ctrl_ops = {
+ .s_ctrl = noon010_s_ctrl,
+};
+
+static const struct v4l2_subdev_core_ops noon010_core_ops = {
+ .g_chip_ident = noon010_g_chip_ident,
+ .s_power = noon010_s_power,
+ .g_ctrl = v4l2_subdev_g_ctrl,
+ .s_ctrl = v4l2_subdev_s_ctrl,
+ .queryctrl = v4l2_subdev_queryctrl,
+ .querymenu = v4l2_subdev_querymenu,
+ .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
+ .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
+ .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
+ .log_status = noon010_log_status,
+};
+
+static const struct v4l2_subdev_video_ops noon010_video_ops = {
+ .g_mbus_fmt = noon010_g_fmt,
+ .s_mbus_fmt = noon010_s_fmt,
+ .try_mbus_fmt = noon010_try_fmt,
+ .enum_mbus_fmt = noon010_enum_fmt,
+};
+
+static const struct v4l2_subdev_ops noon010_ops = {
+ .core = &noon010_core_ops,
+ .video = &noon010_video_ops,
+};
+
+/* Return 0 if NOON010PC30L sensor type was detected or -ENODEV otherwise. */
+static int noon010_detect(struct i2c_client *client, struct noon010_info *info)
+{
+ int ret;
+
+ ret = power_enable(info);
+ if (ret)
+ return ret;
+
+ ret = i2c_smbus_read_byte_data(client, DEVICE_ID_REG);
+ if (ret < 0)
+ dev_err(&client->dev, "I2C read failed: 0x%X\n", ret);
+
+ power_disable(info);
+
+ return ret == NOON010PC30_ID ? 0 : -ENODEV;
+}
+
+static int noon010_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct noon010_info *info;
+ struct v4l2_subdev *sd;
+ const struct noon010pc30_platform_data *pdata
+ = client->dev.platform_data;
+ int ret;
+ int i;
+
+ if (!pdata) {
+ dev_err(&client->dev, "No platform data!\n");
+ return -EIO;
+ }
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ sd = &info->sd;
+ strlcpy(sd->name, MODULE_NAME, sizeof(sd->name));
+ v4l2_i2c_subdev_init(sd, client, &noon010_ops);
+
+ v4l2_ctrl_handler_init(&info->hdl, 3);
+
+ v4l2_ctrl_new_std(&info->hdl, &noon010_ctrl_ops,
+ V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1);
+ v4l2_ctrl_new_std(&info->hdl, &noon010_ctrl_ops,
+ V4L2_CID_RED_BALANCE, 0, 127, 1, 64);
+ v4l2_ctrl_new_std(&info->hdl, &noon010_ctrl_ops,
+ V4L2_CID_BLUE_BALANCE, 0, 127, 1, 64);
+
+ sd->ctrl_handler = &info->hdl;
+
+ ret = info->hdl.error;
+ if (ret)
+ goto np_err;
+
+ info->pdata = client->dev.platform_data;
+ info->i2c_reg_page = -1;
+ info->gpio_nreset = -EINVAL;
+ info->gpio_nstby = -EINVAL;
+
+ if (gpio_is_valid(pdata->gpio_nreset)) {
+ ret = gpio_request(pdata->gpio_nreset, "NOON010PC30 NRST");
+ if (ret) {
+ dev_err(&client->dev, "GPIO request error: %d\n", ret);
+ goto np_err;
+ }
+ info->gpio_nreset = pdata->gpio_nreset;
+ gpio_direction_output(info->gpio_nreset, 0);
+ gpio_export(info->gpio_nreset, 0);
+ }
+
+ if (gpio_is_valid(pdata->gpio_nstby)) {
+ ret = gpio_request(pdata->gpio_nstby, "NOON010PC30 NSTBY");
+ if (ret) {
+ dev_err(&client->dev, "GPIO request error: %d\n", ret);
+ goto np_gpio_err;
+ }
+ info->gpio_nstby = pdata->gpio_nstby;
+ gpio_direction_output(info->gpio_nstby, 0);
+ gpio_export(info->gpio_nstby, 0);
+ }
+
+ for (i = 0; i < NOON010_NUM_SUPPLIES; i++)
+ info->supply[i].supply = noon010_supply_name[i];
+
+ ret = regulator_bulk_get(&client->dev, NOON010_NUM_SUPPLIES,
+ info->supply);
+ if (ret)
+ goto np_reg_err;
+
+ ret = noon010_detect(client, info);
+ if (!ret)
+ return 0;
+
+ /* the sensor detection failed */
+ regulator_bulk_free(NOON010_NUM_SUPPLIES, info->supply);
+np_reg_err:
+ if (gpio_is_valid(info->gpio_nstby))
+ gpio_free(info->gpio_nstby);
+np_gpio_err:
+ if (gpio_is_valid(info->gpio_nreset))
+ gpio_free(info->gpio_nreset);
+np_err:
+ v4l2_ctrl_handler_free(&info->hdl);
+ v4l2_device_unregister_subdev(sd);
+ kfree(info);
+ return ret;
+}
+
+static int noon010_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct noon010_info *info = to_noon010(sd);
+
+ v4l2_device_unregister_subdev(sd);
+ v4l2_ctrl_handler_free(&info->hdl);
+
+ regulator_bulk_free(NOON010_NUM_SUPPLIES, info->supply);
+
+ if (gpio_is_valid(info->gpio_nreset))
+ gpio_free(info->gpio_nreset);
+
+ if (gpio_is_valid(info->gpio_nstby))
+ gpio_free(info->gpio_nstby);
+
+ kfree(info);
+ return 0;
+}
+
+static const struct i2c_device_id noon010_id[] = {
+ { MODULE_NAME, 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, noon010_id);
+
+
+static struct i2c_driver noon010_i2c_driver = {
+ .driver = {
+ .name = MODULE_NAME
+ },
+ .probe = noon010_probe,
+ .remove = noon010_remove,
+ .id_table = noon010_id,
+};
+
+static int __init noon010_init(void)
+{
+ return i2c_add_driver(&noon010_i2c_driver);
+}
+
+static void __exit noon010_exit(void)
+{
+ i2c_del_driver(&noon010_i2c_driver);
+}
+
+module_init(noon010_init);
+module_exit(noon010_exit);
+
+MODULE_DESCRIPTION("Siliconfile NOON010PC30 camera driver");
+MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/s5p-fimc/fimc-capture.c b/drivers/media/video/s5p-fimc/fimc-capture.c
index 2f500809f53d..5159cc8a0e1c 100644
--- a/drivers/media/video/s5p-fimc/fimc-capture.c
+++ b/drivers/media/video/s5p-fimc/fimc-capture.c
@@ -27,13 +27,13 @@
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-mem2mem.h>
-#include <media/videobuf-core.h>
-#include <media/videobuf-dma-contig.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
#include "fimc-core.h"
static struct v4l2_subdev *fimc_subdev_register(struct fimc_dev *fimc,
- struct s3c_fimc_isp_info *isp_info)
+ struct s5p_fimc_isp_info *isp_info)
{
struct i2c_adapter *i2c_adap;
struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
@@ -86,8 +86,8 @@ static void fimc_subdev_unregister(struct fimc_dev *fimc)
static int fimc_subdev_attach(struct fimc_dev *fimc, int index)
{
struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
- struct s3c_platform_fimc *pdata = fimc->pdata;
- struct s3c_fimc_isp_info *isp_info;
+ struct s5p_platform_fimc *pdata = fimc->pdata;
+ struct s5p_fimc_isp_info *isp_info;
struct v4l2_subdev *sd;
int i;
@@ -113,26 +113,43 @@ static int fimc_subdev_attach(struct fimc_dev *fimc, int index)
return -ENODEV;
}
-static int fimc_isp_subdev_init(struct fimc_dev *fimc, int index)
+static int fimc_isp_subdev_init(struct fimc_dev *fimc, unsigned int index)
{
- struct s3c_fimc_isp_info *isp_info;
+ struct s5p_fimc_isp_info *isp_info;
int ret;
+ if (index >= FIMC_MAX_CAMIF_CLIENTS)
+ return -EINVAL;
+
+ isp_info = fimc->pdata->isp_info[index];
+ if (!isp_info)
+ return -EINVAL;
+
+ if (isp_info->clk_frequency)
+ clk_set_rate(fimc->clock[CLK_CAM], isp_info->clk_frequency);
+
+ ret = clk_enable(fimc->clock[CLK_CAM]);
+ if (ret)
+ return ret;
+
ret = fimc_subdev_attach(fimc, index);
if (ret)
return ret;
- isp_info = fimc->pdata->isp_info[fimc->vid_cap.input_index];
ret = fimc_hw_set_camera_polarity(fimc, isp_info);
- if (!ret) {
- ret = v4l2_subdev_call(fimc->vid_cap.sd, core,
- s_power, 1);
- if (!ret)
- return ret;
- }
+ if (ret)
+ return ret;
+ ret = v4l2_subdev_call(fimc->vid_cap.sd, core, s_power, 1);
+ if (!ret)
+ return ret;
+
+ /* enabling power failed so unregister subdev */
fimc_subdev_unregister(fimc);
- err("ISP initialization failed: %d", ret);
+
+ v4l2_err(&fimc->vid_cap.v4l2_dev, "ISP initialization failed: %d\n",
+ ret);
+
return ret;
}
@@ -141,7 +158,7 @@ static int fimc_isp_subdev_init(struct fimc_dev *fimc, int index)
* Locking: The caller holds fimc->slock spinlock.
*/
int fimc_vid_cap_buf_queue(struct fimc_dev *fimc,
- struct fimc_vid_buffer *fimc_vb)
+ struct fimc_vid_buffer *fimc_vb)
{
struct fimc_vid_cap *cap = &fimc->vid_cap;
struct fimc_ctx *ctx = cap->ctx;
@@ -149,7 +166,7 @@ int fimc_vid_cap_buf_queue(struct fimc_dev *fimc,
BUG_ON(!fimc || !fimc_vb);
- ret = fimc_prepare_addr(ctx, fimc_vb, &ctx->d_frame,
+ ret = fimc_prepare_addr(ctx, &fimc_vb->vb, &ctx->d_frame,
&fimc_vb->paddr);
if (ret)
return ret;
@@ -174,7 +191,7 @@ static int fimc_stop_capture(struct fimc_dev *fimc)
{
unsigned long flags;
struct fimc_vid_cap *cap;
- int ret;
+ struct fimc_vid_buffer *buf;
cap = &fimc->vid_cap;
@@ -187,24 +204,213 @@ static int fimc_stop_capture(struct fimc_dev *fimc)
spin_unlock_irqrestore(&fimc->slock, flags);
wait_event_timeout(fimc->irq_queue,
- test_bit(ST_CAPT_SHUT, &fimc->state),
+ !test_bit(ST_CAPT_SHUT, &fimc->state),
FIMC_SHUTDOWN_TIMEOUT);
- ret = v4l2_subdev_call(cap->sd, video, s_stream, 0);
- if (ret)
- v4l2_err(&fimc->vid_cap.v4l2_dev, "s_stream(0) failed\n");
+ v4l2_subdev_call(cap->sd, video, s_stream, 0);
spin_lock_irqsave(&fimc->slock, flags);
fimc->state &= ~(1 << ST_CAPT_RUN | 1 << ST_CAPT_PEND |
1 << ST_CAPT_STREAM);
fimc->vid_cap.active_buf_cnt = 0;
+
+ /* Release buffers that were enqueued in the driver by videobuf2. */
+ while (!list_empty(&cap->pending_buf_q)) {
+ buf = pending_queue_pop(cap);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+
+ while (!list_empty(&cap->active_buf_q)) {
+ buf = active_queue_pop(cap);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+
spin_unlock_irqrestore(&fimc->slock, flags);
dbg("state: 0x%lx", fimc->state);
return 0;
}
+static int start_streaming(struct vb2_queue *q)
+{
+ struct fimc_ctx *ctx = q->drv_priv;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct s5p_fimc_isp_info *isp_info;
+ int ret;
+
+ ret = v4l2_subdev_call(fimc->vid_cap.sd, video, s_stream, 1);
+ if (ret && ret != -ENOIOCTLCMD)
+ return ret;
+
+ ret = fimc_prepare_config(ctx, ctx->state);
+ if (ret)
+ return ret;
+
+ isp_info = fimc->pdata->isp_info[fimc->vid_cap.input_index];
+ fimc_hw_set_camera_type(fimc, isp_info);
+ fimc_hw_set_camera_source(fimc, isp_info);
+ fimc_hw_set_camera_offset(fimc, &ctx->s_frame);
+
+ if (ctx->state & FIMC_PARAMS) {
+ ret = fimc_set_scaler_info(ctx);
+ if (ret) {
+ err("Scaler setup error");
+ return ret;
+ }
+ fimc_hw_set_input_path(ctx);
+ fimc_hw_set_prescaler(ctx);
+ fimc_hw_set_mainscaler(ctx);
+ fimc_hw_set_target_format(ctx);
+ fimc_hw_set_rotation(ctx);
+ fimc_hw_set_effect(ctx);
+ }
+
+ fimc_hw_set_output_path(ctx);
+ fimc_hw_set_out_dma(ctx);
+
+ INIT_LIST_HEAD(&fimc->vid_cap.pending_buf_q);
+ INIT_LIST_HEAD(&fimc->vid_cap.active_buf_q);
+ fimc->vid_cap.active_buf_cnt = 0;
+ fimc->vid_cap.frame_count = 0;
+ fimc->vid_cap.buf_index = fimc_hw_get_frame_index(fimc);
+
+ set_bit(ST_CAPT_PEND, &fimc->state);
+
+ return 0;
+}
+
+static int stop_streaming(struct vb2_queue *q)
+{
+ struct fimc_ctx *ctx = q->drv_priv;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ if (!fimc_capture_running(fimc) && !fimc_capture_pending(fimc)) {
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ return fimc_stop_capture(fimc);
+}
+
+static unsigned int get_plane_size(struct fimc_frame *fr, unsigned int plane)
+{
+ if (!fr || plane >= fr->fmt->memplanes)
+ return 0;
+
+ dbg("%s: w: %d. h: %d. depth[%d]: %d",
+ __func__, fr->width, fr->height, plane, fr->fmt->depth[plane]);
+
+ return fr->f_width * fr->f_height * fr->fmt->depth[plane] / 8;
+
+}
+
+static int queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
+ unsigned int *num_planes, unsigned long sizes[],
+ void *allocators[])
+{
+ struct fimc_ctx *ctx = vq->drv_priv;
+ struct fimc_fmt *fmt = ctx->d_frame.fmt;
+ int i;
+
+ if (!fmt)
+ return -EINVAL;
+
+ *num_planes = fmt->memplanes;
+
+ dbg("%s, buffer count=%d, plane count=%d",
+ __func__, *num_buffers, *num_planes);
+
+ for (i = 0; i < fmt->memplanes; i++) {
+ sizes[i] = get_plane_size(&ctx->d_frame, i);
+ dbg("plane: %u, plane_size: %lu", i, sizes[i]);
+ allocators[i] = ctx->fimc_dev->alloc_ctx;
+ }
+
+ return 0;
+}
+
+static int buffer_init(struct vb2_buffer *vb)
+{
+ /* TODO: */
+ return 0;
+}
+
+static int buffer_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct fimc_ctx *ctx = vq->drv_priv;
+ struct v4l2_device *v4l2_dev = &ctx->fimc_dev->m2m.v4l2_dev;
+ int i;
+
+ if (!ctx->d_frame.fmt || vq->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ for (i = 0; i < ctx->d_frame.fmt->memplanes; i++) {
+ unsigned long size = get_plane_size(&ctx->d_frame, i);
+
+ if (vb2_plane_size(vb, i) < size) {
+ v4l2_err(v4l2_dev, "User buffer too small (%ld < %ld)\n",
+ vb2_plane_size(vb, i), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, i, size);
+ }
+
+ return 0;
+}
+
+static void buffer_queue(struct vb2_buffer *vb)
+{
+ struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_vid_buffer *buf
+ = container_of(vb, struct fimc_vid_buffer, vb);
+ struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ fimc_vid_cap_buf_queue(fimc, buf);
+
+ dbg("active_buf_cnt: %d", fimc->vid_cap.active_buf_cnt);
+
+ if (vid_cap->active_buf_cnt >= vid_cap->reqbufs_count ||
+ vid_cap->active_buf_cnt >= FIMC_MAX_OUT_BUFS) {
+ if (!test_and_set_bit(ST_CAPT_STREAM, &fimc->state)) {
+ fimc_activate_capture(ctx);
+ dbg("");
+ }
+ }
+ spin_unlock_irqrestore(&fimc->slock, flags);
+}
+
+static void fimc_lock(struct vb2_queue *vq)
+{
+ struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
+ mutex_lock(&ctx->fimc_dev->lock);
+}
+
+static void fimc_unlock(struct vb2_queue *vq)
+{
+ struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
+ mutex_unlock(&ctx->fimc_dev->lock);
+}
+
+static struct vb2_ops fimc_capture_qops = {
+ .queue_setup = queue_setup,
+ .buf_prepare = buffer_prepare,
+ .buf_queue = buffer_queue,
+ .buf_init = buffer_init,
+ .wait_prepare = fimc_unlock,
+ .wait_finish = fimc_lock,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+};
+
static int fimc_capture_open(struct file *file)
{
struct fimc_dev *fimc = video_drvdata(file);
@@ -216,44 +422,36 @@ static int fimc_capture_open(struct file *file)
if (fimc_m2m_active(fimc))
return -EBUSY;
- if (mutex_lock_interruptible(&fimc->lock))
- return -ERESTARTSYS;
-
if (++fimc->vid_cap.refcnt == 1) {
- ret = fimc_isp_subdev_init(fimc, -1);
+ ret = fimc_isp_subdev_init(fimc, 0);
if (ret) {
fimc->vid_cap.refcnt--;
- ret = -EIO;
+ return -EIO;
}
}
file->private_data = fimc->vid_cap.ctx;
- mutex_unlock(&fimc->lock);
- return ret;
+ return 0;
}
static int fimc_capture_close(struct file *file)
{
struct fimc_dev *fimc = video_drvdata(file);
- if (mutex_lock_interruptible(&fimc->lock))
- return -ERESTARTSYS;
-
dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state);
if (--fimc->vid_cap.refcnt == 0) {
fimc_stop_capture(fimc);
-
- videobuf_stop(&fimc->vid_cap.vbq);
- videobuf_mmap_free(&fimc->vid_cap.vbq);
+ vb2_queue_release(&fimc->vid_cap.vbq);
v4l2_err(&fimc->vid_cap.v4l2_dev, "releasing ISP\n");
+
v4l2_subdev_call(fimc->vid_cap.sd, core, s_power, 0);
+ clk_disable(fimc->clock[CLK_CAM]);
fimc_subdev_unregister(fimc);
}
- mutex_unlock(&fimc->lock);
return 0;
}
@@ -262,32 +460,16 @@ static unsigned int fimc_capture_poll(struct file *file,
{
struct fimc_ctx *ctx = file->private_data;
struct fimc_dev *fimc = ctx->fimc_dev;
- struct fimc_vid_cap *cap = &fimc->vid_cap;
- int ret;
- if (mutex_lock_interruptible(&fimc->lock))
- return POLLERR;
-
- ret = videobuf_poll_stream(file, &cap->vbq, wait);
- mutex_unlock(&fimc->lock);
-
- return ret;
+ return vb2_poll(&fimc->vid_cap.vbq, file, wait);
}
static int fimc_capture_mmap(struct file *file, struct vm_area_struct *vma)
{
struct fimc_ctx *ctx = file->private_data;
struct fimc_dev *fimc = ctx->fimc_dev;
- struct fimc_vid_cap *cap = &fimc->vid_cap;
- int ret;
-
- if (mutex_lock_interruptible(&fimc->lock))
- return -ERESTARTSYS;
- ret = videobuf_mmap_mapper(&cap->vbq, vma);
- mutex_unlock(&fimc->lock);
-
- return ret;
+ return vb2_mmap(&fimc->vid_cap.vbq, vma);
}
/* video device file operations */
@@ -310,7 +492,8 @@ static int fimc_vidioc_querycap_capture(struct file *file, void *priv,
strncpy(cap->card, fimc->pdev->name, sizeof(cap->card) - 1);
cap->bus_info[0] = 0;
cap->version = KERNEL_VERSION(1, 0, 0);
- cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE;
+ cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE;
return 0;
}
@@ -351,57 +534,52 @@ static int sync_capture_fmt(struct fimc_ctx *ctx)
return 0;
}
-static int fimc_cap_s_fmt(struct file *file, void *priv,
- struct v4l2_format *f)
+static int fimc_cap_s_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
{
struct fimc_ctx *ctx = priv;
struct fimc_dev *fimc = ctx->fimc_dev;
struct fimc_frame *frame;
- struct v4l2_pix_format *pix;
+ struct v4l2_pix_format_mplane *pix;
int ret;
+ int i;
- if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
return -EINVAL;
- ret = fimc_vidioc_try_fmt(file, priv, f);
+ ret = fimc_vidioc_try_fmt_mplane(file, priv, f);
if (ret)
return ret;
- if (mutex_lock_interruptible(&fimc->lock))
- return -ERESTARTSYS;
-
- if (fimc_capture_active(fimc)) {
- ret = -EBUSY;
- goto sf_unlock;
- }
+ if (vb2_is_streaming(&fimc->vid_cap.vbq) || fimc_capture_active(fimc))
+ return -EBUSY;
frame = &ctx->d_frame;
- pix = &f->fmt.pix;
+ pix = &f->fmt.pix_mp;
frame->fmt = find_format(f, FMT_FLAGS_M2M | FMT_FLAGS_CAM);
if (!frame->fmt) {
err("fimc target format not found\n");
- ret = -EINVAL;
- goto sf_unlock;
+ return -EINVAL;
}
+ for (i = 0; i < frame->fmt->colplanes; i++)
+ frame->payload[i] = pix->plane_fmt[i].bytesperline * pix->height;
+
/* Output DMA frame pixel size and offsets. */
- frame->f_width = pix->bytesperline * 8 / frame->fmt->depth;
+ frame->f_width = pix->plane_fmt[0].bytesperline * 8
+ / frame->fmt->depth[0];
frame->f_height = pix->height;
frame->width = pix->width;
frame->height = pix->height;
frame->o_width = pix->width;
frame->o_height = pix->height;
- frame->size = (pix->width * pix->height * frame->fmt->depth) >> 3;
frame->offs_h = 0;
frame->offs_v = 0;
- ret = sync_capture_fmt(ctx);
-
ctx->state |= (FIMC_PARAMS | FIMC_DST_FMT);
-sf_unlock:
- mutex_unlock(&fimc->lock);
+ ret = sync_capture_fmt(ctx);
return ret;
}
@@ -409,8 +587,8 @@ static int fimc_cap_enum_input(struct file *file, void *priv,
struct v4l2_input *i)
{
struct fimc_ctx *ctx = priv;
- struct s3c_platform_fimc *pldata = ctx->fimc_dev->pdata;
- struct s3c_fimc_isp_info *isp_info;
+ struct s5p_platform_fimc *pldata = ctx->fimc_dev->pdata;
+ struct s5p_fimc_isp_info *isp_info;
if (i->index >= FIMC_MAX_CAMIF_CLIENTS)
return -EINVAL;
@@ -429,34 +607,27 @@ static int fimc_cap_s_input(struct file *file, void *priv,
{
struct fimc_ctx *ctx = priv;
struct fimc_dev *fimc = ctx->fimc_dev;
- struct s3c_platform_fimc *pdata = fimc->pdata;
- int ret;
+ struct s5p_platform_fimc *pdata = fimc->pdata;
if (fimc_capture_active(ctx->fimc_dev))
return -EBUSY;
- if (mutex_lock_interruptible(&fimc->lock))
- return -ERESTARTSYS;
+ if (i >= FIMC_MAX_CAMIF_CLIENTS || !pdata->isp_info[i])
+ return -EINVAL;
- if (i >= FIMC_MAX_CAMIF_CLIENTS || !pdata->isp_info[i]) {
- ret = -EINVAL;
- goto si_unlock;
- }
if (fimc->vid_cap.sd) {
- ret = v4l2_subdev_call(fimc->vid_cap.sd, core, s_power, 0);
+ int ret = v4l2_subdev_call(fimc->vid_cap.sd, core, s_power, 0);
if (ret)
err("s_power failed: %d", ret);
+
+ clk_disable(fimc->clock[CLK_CAM]);
}
/* Release the attached sensor subdevice. */
fimc_subdev_unregister(fimc);
- ret = fimc_isp_subdev_init(fimc, i);
-
-si_unlock:
- mutex_unlock(&fimc->lock);
- return ret;
+ return fimc_isp_subdev_init(fimc, i);
}
static int fimc_cap_g_input(struct file *file, void *priv,
@@ -470,66 +641,20 @@ static int fimc_cap_g_input(struct file *file, void *priv,
}
static int fimc_cap_streamon(struct file *file, void *priv,
- enum v4l2_buf_type type)
+ enum v4l2_buf_type type)
{
- struct s3c_fimc_isp_info *isp_info;
struct fimc_ctx *ctx = priv;
struct fimc_dev *fimc = ctx->fimc_dev;
- int ret = -EBUSY;
-
- if (mutex_lock_interruptible(&fimc->lock))
- return -ERESTARTSYS;
if (fimc_capture_active(fimc) || !fimc->vid_cap.sd)
- goto s_unlock;
+ return -EBUSY;
if (!(ctx->state & FIMC_DST_FMT)) {
v4l2_err(&fimc->vid_cap.v4l2_dev, "Format is not set\n");
- ret = -EINVAL;
- goto s_unlock;
- }
-
- ret = v4l2_subdev_call(fimc->vid_cap.sd, video, s_stream, 1);
- if (ret && ret != -ENOIOCTLCMD)
- goto s_unlock;
-
- ret = fimc_prepare_config(ctx, ctx->state);
- if (ret)
- goto s_unlock;
-
- isp_info = fimc->pdata->isp_info[fimc->vid_cap.input_index];
- fimc_hw_set_camera_type(fimc, isp_info);
- fimc_hw_set_camera_source(fimc, isp_info);
- fimc_hw_set_camera_offset(fimc, &ctx->s_frame);
-
- if (ctx->state & FIMC_PARAMS) {
- ret = fimc_set_scaler_info(ctx);
- if (ret) {
- err("Scaler setup error");
- goto s_unlock;
- }
- fimc_hw_set_input_path(ctx);
- fimc_hw_set_scaler(ctx);
- fimc_hw_set_target_format(ctx);
- fimc_hw_set_rotation(ctx);
- fimc_hw_set_effect(ctx);
+ return -EINVAL;
}
- fimc_hw_set_output_path(ctx);
- fimc_hw_set_out_dma(ctx);
-
- INIT_LIST_HEAD(&fimc->vid_cap.pending_buf_q);
- INIT_LIST_HEAD(&fimc->vid_cap.active_buf_q);
- fimc->vid_cap.active_buf_cnt = 0;
- fimc->vid_cap.frame_count = 0;
- fimc->vid_cap.buf_index = fimc_hw_get_frame_index(fimc);
-
- set_bit(ST_CAPT_PEND, &fimc->state);
- ret = videobuf_streamon(&fimc->vid_cap.vbq);
-
-s_unlock:
- mutex_unlock(&fimc->lock);
- return ret;
+ return vb2_streamon(&fimc->vid_cap.vbq, type);
}
static int fimc_cap_streamoff(struct file *file, void *priv,
@@ -537,46 +662,22 @@ static int fimc_cap_streamoff(struct file *file, void *priv,
{
struct fimc_ctx *ctx = priv;
struct fimc_dev *fimc = ctx->fimc_dev;
- struct fimc_vid_cap *cap = &fimc->vid_cap;
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&fimc->slock, flags);
- if (!fimc_capture_running(fimc) && !fimc_capture_pending(fimc)) {
- spin_unlock_irqrestore(&fimc->slock, flags);
- dbg("state: 0x%lx", fimc->state);
- return -EINVAL;
- }
- spin_unlock_irqrestore(&fimc->slock, flags);
- if (mutex_lock_interruptible(&fimc->lock))
- return -ERESTARTSYS;
-
- fimc_stop_capture(fimc);
- ret = videobuf_streamoff(&cap->vbq);
- mutex_unlock(&fimc->lock);
- return ret;
+ return vb2_streamoff(&fimc->vid_cap.vbq, type);
}
static int fimc_cap_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *reqbufs)
+ struct v4l2_requestbuffers *reqbufs)
{
struct fimc_ctx *ctx = priv;
- struct fimc_dev *fimc = ctx->fimc_dev;
- struct fimc_vid_cap *cap = &fimc->vid_cap;
+ struct fimc_vid_cap *cap = &ctx->fimc_dev->vid_cap;
int ret;
- if (fimc_capture_active(ctx->fimc_dev))
- return -EBUSY;
-
- if (mutex_lock_interruptible(&fimc->lock))
- return -ERESTARTSYS;
- ret = videobuf_reqbufs(&cap->vbq, reqbufs);
+ ret = vb2_reqbufs(&cap->vbq, reqbufs);
if (!ret)
cap->reqbufs_count = reqbufs->count;
- mutex_unlock(&fimc->lock);
return ret;
}
@@ -586,43 +687,23 @@ static int fimc_cap_querybuf(struct file *file, void *priv,
struct fimc_ctx *ctx = priv;
struct fimc_vid_cap *cap = &ctx->fimc_dev->vid_cap;
- if (fimc_capture_active(ctx->fimc_dev))
- return -EBUSY;
-
- return videobuf_querybuf(&cap->vbq, buf);
+ return vb2_querybuf(&cap->vbq, buf);
}
static int fimc_cap_qbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
struct fimc_ctx *ctx = priv;
- struct fimc_dev *fimc = ctx->fimc_dev;
- struct fimc_vid_cap *cap = &fimc->vid_cap;
- int ret;
-
- if (mutex_lock_interruptible(&fimc->lock))
- return -ERESTARTSYS;
-
- ret = videobuf_qbuf(&cap->vbq, buf);
-
- mutex_unlock(&fimc->lock);
- return ret;
+ struct fimc_vid_cap *cap = &ctx->fimc_dev->vid_cap;
+ return vb2_qbuf(&cap->vbq, buf);
}
static int fimc_cap_dqbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
struct fimc_ctx *ctx = priv;
- int ret;
-
- if (mutex_lock_interruptible(&ctx->fimc_dev->lock))
- return -ERESTARTSYS;
-
- ret = videobuf_dqbuf(&ctx->fimc_dev->vid_cap.vbq, buf,
+ return vb2_dqbuf(&ctx->fimc_dev->vid_cap.vbq, buf,
file->f_flags & O_NONBLOCK);
-
- mutex_unlock(&ctx->fimc_dev->lock);
- return ret;
}
static int fimc_cap_s_ctrl(struct file *file, void *priv,
@@ -631,9 +712,6 @@ static int fimc_cap_s_ctrl(struct file *file, void *priv,
struct fimc_ctx *ctx = priv;
int ret = -EINVAL;
- if (mutex_lock_interruptible(&ctx->fimc_dev->lock))
- return -ERESTARTSYS;
-
/* Allow any controls but 90/270 rotation while streaming */
if (!fimc_capture_active(ctx->fimc_dev) ||
ctrl->id != V4L2_CID_ROTATE ||
@@ -648,8 +726,6 @@ static int fimc_cap_s_ctrl(struct file *file, void *priv,
if (ret == -EINVAL)
ret = v4l2_subdev_call(ctx->fimc_dev->vid_cap.sd,
core, s_ctrl, ctrl);
-
- mutex_unlock(&ctx->fimc_dev->lock);
return ret;
}
@@ -658,22 +734,18 @@ static int fimc_cap_cropcap(struct file *file, void *fh,
{
struct fimc_frame *f;
struct fimc_ctx *ctx = fh;
- struct fimc_dev *fimc = ctx->fimc_dev;
- if (cr->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (cr->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
return -EINVAL;
- if (mutex_lock_interruptible(&fimc->lock))
- return -ERESTARTSYS;
-
f = &ctx->s_frame;
+
cr->bounds.left = 0;
cr->bounds.top = 0;
cr->bounds.width = f->o_width;
cr->bounds.height = f->o_height;
cr->defrect = cr->bounds;
- mutex_unlock(&fimc->lock);
return 0;
}
@@ -681,19 +753,14 @@ static int fimc_cap_g_crop(struct file *file, void *fh, struct v4l2_crop *cr)
{
struct fimc_frame *f;
struct fimc_ctx *ctx = file->private_data;
- struct fimc_dev *fimc = ctx->fimc_dev;
-
-
- if (mutex_lock_interruptible(&fimc->lock))
- return -ERESTARTSYS;
f = &ctx->s_frame;
+
cr->c.left = f->offs_h;
cr->c.top = f->offs_v;
cr->c.width = f->width;
cr->c.height = f->height;
- mutex_unlock(&fimc->lock);
return 0;
}
@@ -712,41 +779,38 @@ static int fimc_cap_s_crop(struct file *file, void *fh,
if (ret)
return ret;
- if (mutex_lock_interruptible(&fimc->lock))
- return -ERESTARTSYS;
-
if (!(ctx->state & FIMC_DST_FMT)) {
v4l2_err(&fimc->vid_cap.v4l2_dev,
"Capture color format not set\n");
- goto sc_unlock;
+ return -EINVAL; /* TODO: make sure this is the right value */
}
f = &ctx->s_frame;
/* Check for the pixel scaling ratio when cropping input image. */
- ret = fimc_check_scaler_ratio(&cr->c, &ctx->d_frame);
+ ret = fimc_check_scaler_ratio(cr->c.width, cr->c.height,
+ ctx->d_frame.width, ctx->d_frame.height,
+ ctx->rotation);
if (ret) {
v4l2_err(&fimc->vid_cap.v4l2_dev, "Out of the scaler range");
- } else {
- ret = 0;
- f->offs_h = cr->c.left;
- f->offs_v = cr->c.top;
- f->width = cr->c.width;
- f->height = cr->c.height;
+ return ret;
}
-sc_unlock:
- mutex_unlock(&fimc->lock);
- return ret;
+ f->offs_h = cr->c.left;
+ f->offs_v = cr->c.top;
+ f->width = cr->c.width;
+ f->height = cr->c.height;
+
+ return 0;
}
static const struct v4l2_ioctl_ops fimc_capture_ioctl_ops = {
.vidioc_querycap = fimc_vidioc_querycap_capture,
- .vidioc_enum_fmt_vid_cap = fimc_vidioc_enum_fmt,
- .vidioc_try_fmt_vid_cap = fimc_vidioc_try_fmt,
- .vidioc_s_fmt_vid_cap = fimc_cap_s_fmt,
- .vidioc_g_fmt_vid_cap = fimc_vidioc_g_fmt,
+ .vidioc_enum_fmt_vid_cap_mplane = fimc_vidioc_enum_fmt_mplane,
+ .vidioc_try_fmt_vid_cap_mplane = fimc_vidioc_try_fmt_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = fimc_cap_s_fmt_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = fimc_vidioc_g_fmt_mplane,
.vidioc_reqbufs = fimc_cap_reqbufs,
.vidioc_querybuf = fimc_cap_querybuf,
@@ -770,6 +834,7 @@ static const struct v4l2_ioctl_ops fimc_capture_ioctl_ops = {
.vidioc_g_input = fimc_cap_g_input,
};
+/* fimc->lock must be already initialized */
int fimc_register_capture_device(struct fimc_dev *fimc)
{
struct v4l2_device *v4l2_dev = &fimc->vid_cap.v4l2_dev;
@@ -777,6 +842,8 @@ int fimc_register_capture_device(struct fimc_dev *fimc)
struct fimc_vid_cap *vid_cap;
struct fimc_ctx *ctx;
struct v4l2_format f;
+ struct fimc_frame *fr;
+ struct vb2_queue *q;
int ret;
ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
@@ -788,8 +855,12 @@ int fimc_register_capture_device(struct fimc_dev *fimc)
ctx->out_path = FIMC_DMA;
ctx->state = FIMC_CTX_CAP;
- f.fmt.pix.pixelformat = V4L2_PIX_FMT_RGB24;
- ctx->d_frame.fmt = find_format(&f, FMT_FLAGS_M2M);
+ /* Default format of the output frames */
+ f.fmt.pix.pixelformat = V4L2_PIX_FMT_RGB32;
+ fr = &ctx->d_frame;
+ fr->fmt = find_format(&f, FMT_FLAGS_M2M);
+ fr->width = fr->f_width = fr->o_width = 640;
+ fr->height = fr->f_height = fr->o_height = 480;
if (!v4l2_dev->name[0])
snprintf(v4l2_dev->name, sizeof(v4l2_dev->name),
@@ -812,6 +883,7 @@ int fimc_register_capture_device(struct fimc_dev *fimc)
vfd->ioctl_ops = &fimc_capture_ioctl_ops;
vfd->minor = -1;
vfd->release = video_device_release;
+ vfd->lock = &fimc->lock;
video_set_drvdata(vfd, fimc);
vid_cap = &fimc->vid_cap;
@@ -819,7 +891,7 @@ int fimc_register_capture_device(struct fimc_dev *fimc)
vid_cap->active_buf_cnt = 0;
vid_cap->reqbufs_count = 0;
vid_cap->refcnt = 0;
- /* The default color format for image sensor. */
+ /* Default color format for image sensor */
vid_cap->fmt.code = V4L2_MBUS_FMT_YUYV8_2X8;
INIT_LIST_HEAD(&vid_cap->pending_buf_q);
@@ -827,10 +899,16 @@ int fimc_register_capture_device(struct fimc_dev *fimc)
spin_lock_init(&ctx->slock);
vid_cap->ctx = ctx;
- videobuf_queue_dma_contig_init(&vid_cap->vbq, &fimc_qops,
- vid_cap->v4l2_dev.dev, &fimc->irqlock,
- V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
- sizeof(struct fimc_vid_buffer), (void *)ctx, NULL);
+ q = &fimc->vid_cap.vbq;
+ memset(q, 0, sizeof(*q));
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->drv_priv = fimc->vid_cap.ctx;
+ q->ops = &fimc_capture_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct fimc_vid_buffer);
+
+ vb2_queue_init(q);
ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
if (ret) {
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
index 817aa66627f6..cd8a3003f1aa 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -25,114 +25,141 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <media/v4l2-ioctl.h>
-#include <media/videobuf-dma-contig.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
#include "fimc-core.h"
-static char *fimc_clock_name[NUM_FIMC_CLOCKS] = { "sclk_fimc", "fimc" };
+static char *fimc_clocks[MAX_FIMC_CLOCKS] = {
+ "sclk_fimc", "fimc", "sclk_cam"
+};
static struct fimc_fmt fimc_formats[] = {
{
- .name = "RGB565",
- .fourcc = V4L2_PIX_FMT_RGB565X,
- .depth = 16,
- .color = S5P_FIMC_RGB565,
- .buff_cnt = 1,
- .planes_cnt = 1,
- .mbus_code = V4L2_MBUS_FMT_RGB565_2X8_BE,
- .flags = FMT_FLAGS_M2M,
+ .name = "RGB565",
+ .fourcc = V4L2_PIX_FMT_RGB565X,
+ .depth = { 16 },
+ .color = S5P_FIMC_RGB565,
+ .memplanes = 1,
+ .colplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_RGB565_2X8_BE,
+ .flags = FMT_FLAGS_M2M,
+ }, {
+ .name = "BGR666",
+ .fourcc = V4L2_PIX_FMT_BGR666,
+ .depth = { 32 },
+ .color = S5P_FIMC_RGB666,
+ .memplanes = 1,
+ .colplanes = 1,
+ .flags = FMT_FLAGS_M2M,
+ }, {
+ .name = "XRGB-8-8-8-8, 32 bpp",
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .depth = { 32 },
+ .color = S5P_FIMC_RGB888,
+ .memplanes = 1,
+ .colplanes = 1,
+ .flags = FMT_FLAGS_M2M,
}, {
- .name = "BGR666",
- .fourcc = V4L2_PIX_FMT_BGR666,
- .depth = 32,
- .color = S5P_FIMC_RGB666,
- .buff_cnt = 1,
- .planes_cnt = 1,
- .flags = FMT_FLAGS_M2M,
+ .name = "YUV 4:2:2 packed, YCbYCr",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .depth = { 16 },
+ .color = S5P_FIMC_YCBYCR422,
+ .memplanes = 1,
+ .colplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
}, {
- .name = "XRGB-8-8-8-8, 32 bpp",
- .fourcc = V4L2_PIX_FMT_RGB32,
- .depth = 32,
- .color = S5P_FIMC_RGB888,
- .buff_cnt = 1,
- .planes_cnt = 1,
- .flags = FMT_FLAGS_M2M,
+ .name = "YUV 4:2:2 packed, CbYCrY",
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .depth = { 16 },
+ .color = S5P_FIMC_CBYCRY422,
+ .memplanes = 1,
+ .colplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_UYVY8_2X8,
+ .flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
}, {
- .name = "YUV 4:2:2 packed, YCbYCr",
- .fourcc = V4L2_PIX_FMT_YUYV,
- .depth = 16,
- .color = S5P_FIMC_YCBYCR422,
- .buff_cnt = 1,
- .planes_cnt = 1,
- .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
- .flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
+ .name = "YUV 4:2:2 packed, CrYCbY",
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .depth = { 16 },
+ .color = S5P_FIMC_CRYCBY422,
+ .memplanes = 1,
+ .colplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_VYUY8_2X8,
+ .flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
}, {
- .name = "YUV 4:2:2 packed, CbYCrY",
- .fourcc = V4L2_PIX_FMT_UYVY,
- .depth = 16,
- .color = S5P_FIMC_CBYCRY422,
- .buff_cnt = 1,
- .planes_cnt = 1,
- .mbus_code = V4L2_MBUS_FMT_UYVY8_2X8,
- .flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
+ .name = "YUV 4:2:2 packed, YCrYCb",
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .depth = { 16 },
+ .color = S5P_FIMC_YCRYCB422,
+ .memplanes = 1,
+ .colplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_YVYU8_2X8,
+ .flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
}, {
- .name = "YUV 4:2:2 packed, CrYCbY",
- .fourcc = V4L2_PIX_FMT_VYUY,
- .depth = 16,
- .color = S5P_FIMC_CRYCBY422,
- .buff_cnt = 1,
- .planes_cnt = 1,
- .mbus_code = V4L2_MBUS_FMT_VYUY8_2X8,
- .flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
+ .name = "YUV 4:2:2 planar, Y/Cb/Cr",
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ .depth = { 12 },
+ .color = S5P_FIMC_YCBYCR422,
+ .memplanes = 1,
+ .colplanes = 3,
+ .flags = FMT_FLAGS_M2M,
}, {
- .name = "YUV 4:2:2 packed, YCrYCb",
- .fourcc = V4L2_PIX_FMT_YVYU,
- .depth = 16,
- .color = S5P_FIMC_YCRYCB422,
- .buff_cnt = 1,
- .planes_cnt = 1,
- .mbus_code = V4L2_MBUS_FMT_YVYU8_2X8,
- .flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
+ .name = "YUV 4:2:2 planar, Y/CbCr",
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .depth = { 16 },
+ .color = S5P_FIMC_YCBYCR422,
+ .memplanes = 1,
+ .colplanes = 2,
+ .flags = FMT_FLAGS_M2M,
}, {
- .name = "YUV 4:2:2 planar, Y/Cb/Cr",
- .fourcc = V4L2_PIX_FMT_YUV422P,
- .depth = 12,
- .color = S5P_FIMC_YCBCR422,
- .buff_cnt = 1,
- .planes_cnt = 3,
- .flags = FMT_FLAGS_M2M,
+ .name = "YUV 4:2:2 planar, Y/CrCb",
+ .fourcc = V4L2_PIX_FMT_NV61,
+ .depth = { 16 },
+ .color = S5P_FIMC_YCRYCB422,
+ .memplanes = 1,
+ .colplanes = 2,
+ .flags = FMT_FLAGS_M2M,
}, {
- .name = "YUV 4:2:2 planar, Y/CbCr",
- .fourcc = V4L2_PIX_FMT_NV16,
- .depth = 16,
- .color = S5P_FIMC_YCBCR422,
- .buff_cnt = 1,
- .planes_cnt = 2,
- .flags = FMT_FLAGS_M2M,
+ .name = "YUV 4:2:0 planar, YCbCr",
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .depth = { 12 },
+ .color = S5P_FIMC_YCBCR420,
+ .memplanes = 1,
+ .colplanes = 3,
+ .flags = FMT_FLAGS_M2M,
}, {
- .name = "YUV 4:2:2 planar, Y/CrCb",
- .fourcc = V4L2_PIX_FMT_NV61,
- .depth = 16,
- .color = S5P_FIMC_RGB565,
- .buff_cnt = 1,
- .planes_cnt = 2,
- .flags = FMT_FLAGS_M2M,
+ .name = "YUV 4:2:0 planar, Y/CbCr",
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .depth = { 12 },
+ .color = S5P_FIMC_YCBCR420,
+ .memplanes = 1,
+ .colplanes = 2,
+ .flags = FMT_FLAGS_M2M,
}, {
- .name = "YUV 4:2:0 planar, YCbCr",
- .fourcc = V4L2_PIX_FMT_YUV420,
- .depth = 12,
- .color = S5P_FIMC_YCBCR420,
- .buff_cnt = 1,
- .planes_cnt = 3,
- .flags = FMT_FLAGS_M2M,
+ .name = "YUV 4:2:0 non-contiguous 2-planar, Y/CbCr",
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .color = S5P_FIMC_YCBCR420,
+ .depth = { 8, 4 },
+ .memplanes = 2,
+ .colplanes = 2,
+ .flags = FMT_FLAGS_M2M,
}, {
- .name = "YUV 4:2:0 planar, Y/CbCr",
- .fourcc = V4L2_PIX_FMT_NV12,
- .depth = 12,
- .color = S5P_FIMC_YCBCR420,
- .buff_cnt = 1,
- .planes_cnt = 2,
- .flags = FMT_FLAGS_M2M,
+ .name = "YUV 4:2:0 non-contiguous 3-planar, Y/Cb/Cr",
+ .fourcc = V4L2_PIX_FMT_YUV420M,
+ .color = S5P_FIMC_YCBCR420,
+ .depth = { 8, 2, 2 },
+ .memplanes = 3,
+ .colplanes = 3,
+ .flags = FMT_FLAGS_M2M,
+ }, {
+ .name = "YUV 4:2:0 non-contiguous 2-planar, Y/CbCr, tiled",
+ .fourcc = V4L2_PIX_FMT_NV12MT,
+ .color = S5P_FIMC_YCBCR420,
+ .depth = { 8, 4 },
+ .memplanes = 2,
+ .colplanes = 2,
+ .flags = FMT_FLAGS_M2M,
},
};
@@ -173,24 +200,21 @@ static struct v4l2_queryctrl *get_ctrl(int id)
return NULL;
}
-int fimc_check_scaler_ratio(struct v4l2_rect *r, struct fimc_frame *f)
+int fimc_check_scaler_ratio(int sw, int sh, int dw, int dh, int rot)
{
- if (r->width > f->width) {
- if (f->width > (r->width * SCALER_MAX_HRATIO))
- return -EINVAL;
- } else {
- if ((f->width * SCALER_MAX_HRATIO) < r->width)
- return -EINVAL;
- }
+ int tx, ty;
- if (r->height > f->height) {
- if (f->height > (r->height * SCALER_MAX_VRATIO))
- return -EINVAL;
+ if (rot == 90 || rot == 270) {
+ ty = dw;
+ tx = dh;
} else {
- if ((f->height * SCALER_MAX_VRATIO) < r->height)
- return -EINVAL;
+ tx = dw;
+ ty = dh;
}
+ if ((sw >= SCALER_MAX_HRATIO * tx) || (sh >= SCALER_MAX_VRATIO * ty))
+ return -EINVAL;
+
return 0;
}
@@ -221,6 +245,7 @@ int fimc_set_scaler_info(struct fimc_ctx *ctx)
struct fimc_scaler *sc = &ctx->scaler;
struct fimc_frame *s_frame = &ctx->s_frame;
struct fimc_frame *d_frame = &ctx->d_frame;
+ struct samsung_fimc_variant *variant = ctx->fimc_dev->variant;
int tx, ty, sx, sy;
int ret;
@@ -259,8 +284,14 @@ int fimc_set_scaler_info(struct fimc_ctx *ctx)
sc->pre_dst_width = sx / sc->pre_hratio;
sc->pre_dst_height = sy / sc->pre_vratio;
- sc->main_hratio = (sx << 8) / (tx << sc->hfactor);
- sc->main_vratio = (sy << 8) / (ty << sc->vfactor);
+ if (variant->has_mainscaler_ext) {
+ sc->main_hratio = (sx << 14) / (tx << sc->hfactor);
+ sc->main_vratio = (sy << 14) / (ty << sc->vfactor);
+ } else {
+ sc->main_hratio = (sx << 8) / (tx << sc->hfactor);
+ sc->main_vratio = (sy << 8) / (ty << sc->vfactor);
+
+ }
sc->scaleup_h = (tx >= sx) ? 1 : 0;
sc->scaleup_v = (ty >= sy) ? 1 : 0;
@@ -276,6 +307,23 @@ int fimc_set_scaler_info(struct fimc_ctx *ctx)
return 0;
}
+static int stop_streaming(struct vb2_queue *q)
+{
+ struct fimc_ctx *ctx = q->drv_priv;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+
+ if (!fimc_m2m_pending(fimc))
+ return 0;
+
+ set_bit(ST_M2M_SHUT, &fimc->state);
+
+ wait_event_timeout(fimc->irq_queue,
+ !test_bit(ST_M2M_SHUT, &fimc->state),
+ FIMC_SHUTDOWN_TIMEOUT);
+
+ return 0;
+}
+
static void fimc_capture_handler(struct fimc_dev *fimc)
{
struct fimc_vid_cap *cap = &fimc->vid_cap;
@@ -283,7 +331,7 @@ static void fimc_capture_handler(struct fimc_dev *fimc)
if (!list_empty(&cap->active_buf_q)) {
v_buf = active_queue_pop(cap);
- fimc_buf_finish(fimc, v_buf);
+ vb2_buffer_done(&v_buf->vb, VB2_BUF_STATE_DONE);
}
if (test_and_clear_bit(ST_CAPT_SHUT, &fimc->state)) {
@@ -300,10 +348,6 @@ static void fimc_capture_handler(struct fimc_dev *fimc)
dbg("hw ptr: %d, sw ptr: %d",
fimc_hw_get_frame_index(fimc), cap->buf_index);
- spin_lock(&fimc->irqlock);
- v_buf->vb.state = VIDEOBUF_ACTIVE;
- spin_unlock(&fimc->irqlock);
-
/* Move the buffer to the capture active queue */
active_queue_add(cap, v_buf);
@@ -324,8 +368,6 @@ static void fimc_capture_handler(struct fimc_dev *fimc)
static irqreturn_t fimc_isr(int irq, void *priv)
{
- struct fimc_vid_buffer *src_buf, *dst_buf;
- struct fimc_ctx *ctx;
struct fimc_dev *fimc = priv;
BUG_ON(!fimc);
@@ -333,18 +375,21 @@ static irqreturn_t fimc_isr(int irq, void *priv)
spin_lock(&fimc->slock);
- if (test_and_clear_bit(ST_M2M_PEND, &fimc->state)) {
- ctx = v4l2_m2m_get_curr_priv(fimc->m2m.m2m_dev);
+ if (test_and_clear_bit(ST_M2M_SHUT, &fimc->state)) {
+ wake_up(&fimc->irq_queue);
+ goto isr_unlock;
+ } else if (test_and_clear_bit(ST_M2M_PEND, &fimc->state)) {
+ struct vb2_buffer *src_vb, *dst_vb;
+ struct fimc_ctx *ctx = v4l2_m2m_get_curr_priv(fimc->m2m.m2m_dev);
+
if (!ctx || !ctx->m2m_ctx)
goto isr_unlock;
- src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
- dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
- if (src_buf && dst_buf) {
- spin_lock(&fimc->irqlock);
- src_buf->vb.state = dst_buf->vb.state = VIDEOBUF_DONE;
- wake_up(&src_buf->vb.done);
- wake_up(&dst_buf->vb.done);
- spin_unlock(&fimc->irqlock);
+
+ src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ if (src_vb && dst_vb) {
+ v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
v4l2_m2m_job_finish(fimc->m2m.m2m_dev, ctx->m2m_ctx);
}
goto isr_unlock;
@@ -364,25 +409,25 @@ isr_unlock:
return IRQ_HANDLED;
}
-/* The color format (planes_cnt, buff_cnt) must be already configured. */
-int fimc_prepare_addr(struct fimc_ctx *ctx, struct fimc_vid_buffer *buf,
+/* The color format (colplanes, memplanes) must be already configured. */
+int fimc_prepare_addr(struct fimc_ctx *ctx, struct vb2_buffer *vb,
struct fimc_frame *frame, struct fimc_addr *paddr)
{
int ret = 0;
u32 pix_size;
- if (buf == NULL || frame == NULL)
+ if (vb == NULL || frame == NULL)
return -EINVAL;
pix_size = frame->width * frame->height;
- dbg("buff_cnt= %d, planes_cnt= %d, frame->size= %d, pix_size= %d",
- frame->fmt->buff_cnt, frame->fmt->planes_cnt,
- frame->size, pix_size);
+ dbg("memplanes= %d, colplanes= %d, pix_size= %d",
+ frame->fmt->memplanes, frame->fmt->colplanes, pix_size);
- if (frame->fmt->buff_cnt == 1) {
- paddr->y = videobuf_to_dma_contig(&buf->vb);
- switch (frame->fmt->planes_cnt) {
+ paddr->y = vb2_dma_contig_plane_paddr(vb, 0);
+
+ if (frame->fmt->memplanes == 1) {
+ switch (frame->fmt->colplanes) {
case 1:
paddr->cb = 0;
paddr->cr = 0;
@@ -405,6 +450,12 @@ int fimc_prepare_addr(struct fimc_ctx *ctx, struct fimc_vid_buffer *buf,
default:
return -EINVAL;
}
+ } else {
+ if (frame->fmt->memplanes >= 2)
+ paddr->cb = vb2_dma_contig_plane_paddr(vb, 1);
+
+ if (frame->fmt->memplanes == 3)
+ paddr->cr = vb2_dma_contig_plane_paddr(vb, 2);
}
dbg("PHYS_ADDR: y= 0x%X cb= 0x%X cr= 0x%X ret= %d",
@@ -423,34 +474,34 @@ static void fimc_set_yuv_order(struct fimc_ctx *ctx)
/* Set order for 1 plane input formats. */
switch (ctx->s_frame.fmt->color) {
case S5P_FIMC_YCRYCB422:
- ctx->in_order_1p = S5P_FIMC_IN_YCRYCB;
+ ctx->in_order_1p = S5P_MSCTRL_ORDER422_CBYCRY;
break;
case S5P_FIMC_CBYCRY422:
- ctx->in_order_1p = S5P_FIMC_IN_CBYCRY;
+ ctx->in_order_1p = S5P_MSCTRL_ORDER422_YCRYCB;
break;
case S5P_FIMC_CRYCBY422:
- ctx->in_order_1p = S5P_FIMC_IN_CRYCBY;
+ ctx->in_order_1p = S5P_MSCTRL_ORDER422_YCBYCR;
break;
case S5P_FIMC_YCBYCR422:
default:
- ctx->in_order_1p = S5P_FIMC_IN_YCBYCR;
+ ctx->in_order_1p = S5P_MSCTRL_ORDER422_CRYCBY;
break;
}
dbg("ctx->in_order_1p= %d", ctx->in_order_1p);
switch (ctx->d_frame.fmt->color) {
case S5P_FIMC_YCRYCB422:
- ctx->out_order_1p = S5P_FIMC_OUT_YCRYCB;
+ ctx->out_order_1p = S5P_CIOCTRL_ORDER422_CBYCRY;
break;
case S5P_FIMC_CBYCRY422:
- ctx->out_order_1p = S5P_FIMC_OUT_CBYCRY;
+ ctx->out_order_1p = S5P_CIOCTRL_ORDER422_YCRYCB;
break;
case S5P_FIMC_CRYCBY422:
- ctx->out_order_1p = S5P_FIMC_OUT_CRYCBY;
+ ctx->out_order_1p = S5P_CIOCTRL_ORDER422_YCBYCR;
break;
case S5P_FIMC_YCBYCR422:
default:
- ctx->out_order_1p = S5P_FIMC_OUT_YCBYCR;
+ ctx->out_order_1p = S5P_CIOCTRL_ORDER422_CRYCBY;
break;
}
dbg("ctx->out_order_1p= %d", ctx->out_order_1p);
@@ -459,10 +510,14 @@ static void fimc_set_yuv_order(struct fimc_ctx *ctx)
static void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f)
{
struct samsung_fimc_variant *variant = ctx->fimc_dev->variant;
+ u32 i, depth = 0;
+
+ for (i = 0; i < f->fmt->colplanes; i++)
+ depth += f->fmt->depth[i];
f->dma_offset.y_h = f->offs_h;
if (!variant->pix_hoff)
- f->dma_offset.y_h *= (f->fmt->depth >> 3);
+ f->dma_offset.y_h *= (depth >> 3);
f->dma_offset.y_v = f->offs_v;
@@ -473,7 +528,7 @@ static void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f)
f->dma_offset.cr_v = f->offs_v;
if (!variant->pix_hoff) {
- if (f->fmt->planes_cnt == 3) {
+ if (f->fmt->colplanes == 3) {
f->dma_offset.cb_h >>= 1;
f->dma_offset.cr_h >>= 1;
}
@@ -499,7 +554,7 @@ static void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f)
int fimc_prepare_config(struct fimc_ctx *ctx, u32 flags)
{
struct fimc_frame *s_frame, *d_frame;
- struct fimc_vid_buffer *buf = NULL;
+ struct vb2_buffer *vb = NULL;
int ret = 0;
s_frame = &ctx->s_frame;
@@ -522,15 +577,15 @@ int fimc_prepare_config(struct fimc_ctx *ctx, u32 flags)
ctx->scaler.enabled = 1;
if (flags & FIMC_SRC_ADDR) {
- buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
- ret = fimc_prepare_addr(ctx, buf, s_frame, &s_frame->paddr);
+ vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ ret = fimc_prepare_addr(ctx, vb, s_frame, &s_frame->paddr);
if (ret)
return ret;
}
if (flags & FIMC_DST_ADDR) {
- buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
- ret = fimc_prepare_addr(ctx, buf, d_frame, &d_frame->paddr);
+ vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ ret = fimc_prepare_addr(ctx, vb, d_frame, &d_frame->paddr);
}
return ret;
@@ -572,7 +627,9 @@ static void fimc_dma_run(void *priv)
err("Scaler setup error");
goto dma_unlock;
}
- fimc_hw_set_scaler(ctx);
+
+ fimc_hw_set_prescaler(ctx);
+ fimc_hw_set_mainscaler(ctx);
fimc_hw_set_target_format(ctx);
fimc_hw_set_rotation(ctx);
fimc_hw_set_effect(ctx);
@@ -587,7 +644,8 @@ static void fimc_dma_run(void *priv)
fimc_activate_capture(ctx);
- ctx->state &= (FIMC_CTX_M2M | FIMC_CTX_CAP);
+ ctx->state &= (FIMC_CTX_M2M | FIMC_CTX_CAP |
+ FIMC_SRC_FMT | FIMC_DST_FMT);
fimc_hw_activate_input_dma(fimc, true);
dma_unlock:
@@ -596,109 +654,94 @@ dma_unlock:
static void fimc_job_abort(void *priv)
{
- /* Nothing done in job_abort. */
-}
+ struct fimc_ctx *ctx = priv;
+ struct fimc_dev *fimc = ctx->fimc_dev;
-static void fimc_buf_release(struct videobuf_queue *vq,
- struct videobuf_buffer *vb)
-{
- videobuf_dma_contig_free(vq, vb);
- vb->state = VIDEOBUF_NEEDS_INIT;
+ if (!fimc_m2m_pending(fimc))
+ return;
+
+ set_bit(ST_M2M_SHUT, &fimc->state);
+
+ wait_event_timeout(fimc->irq_queue,
+ !test_bit(ST_M2M_SHUT, &fimc->state),
+ FIMC_SHUTDOWN_TIMEOUT);
}
-static int fimc_buf_setup(struct videobuf_queue *vq, unsigned int *count,
- unsigned int *size)
+static int fimc_queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
+ unsigned int *num_planes, unsigned long sizes[],
+ void *allocators[])
{
- struct fimc_ctx *ctx = vq->priv_data;
- struct fimc_frame *frame;
+ struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
+ struct fimc_frame *f;
+ int i;
- frame = ctx_get_frame(ctx, vq->type);
- if (IS_ERR(frame))
- return PTR_ERR(frame);
+ f = ctx_get_frame(ctx, vq->type);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ /*
+ * Return number of non-contigous planes (plane buffers)
+ * depending on the configured color format.
+ */
+ if (f->fmt)
+ *num_planes = f->fmt->memplanes;
+
+ for (i = 0; i < f->fmt->memplanes; i++) {
+ sizes[i] = (f->width * f->height * f->fmt->depth[i]) >> 3;
+ allocators[i] = ctx->fimc_dev->alloc_ctx;
+ }
+
+ if (*num_buffers == 0)
+ *num_buffers = 1;
- *size = (frame->width * frame->height * frame->fmt->depth) >> 3;
- if (0 == *count)
- *count = 1;
return 0;
}
-static int fimc_buf_prepare(struct videobuf_queue *vq,
- struct videobuf_buffer *vb, enum v4l2_field field)
+static int fimc_buf_prepare(struct vb2_buffer *vb)
{
- struct fimc_ctx *ctx = vq->priv_data;
- struct v4l2_device *v4l2_dev = &ctx->fimc_dev->m2m.v4l2_dev;
+ struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct fimc_frame *frame;
- int ret;
+ int i;
- frame = ctx_get_frame(ctx, vq->type);
+ frame = ctx_get_frame(ctx, vb->vb2_queue->type);
if (IS_ERR(frame))
return PTR_ERR(frame);
- if (vb->baddr) {
- if (vb->bsize < frame->size) {
- v4l2_err(v4l2_dev,
- "User-provided buffer too small (%d < %d)\n",
- vb->bsize, frame->size);
- WARN_ON(1);
- return -EINVAL;
- }
- } else if (vb->state != VIDEOBUF_NEEDS_INIT
- && vb->bsize < frame->size) {
- return -EINVAL;
- }
-
- vb->width = frame->width;
- vb->height = frame->height;
- vb->bytesperline = (frame->width * frame->fmt->depth) >> 3;
- vb->size = frame->size;
- vb->field = field;
-
- if (VIDEOBUF_NEEDS_INIT == vb->state) {
- ret = videobuf_iolock(vq, vb, NULL);
- if (ret) {
- v4l2_err(v4l2_dev, "Iolock failed\n");
- fimc_buf_release(vq, vb);
- return ret;
- }
- }
- vb->state = VIDEOBUF_PREPARED;
+ for (i = 0; i < frame->fmt->memplanes; i++)
+ vb2_set_plane_payload(vb, i, frame->payload[i]);
return 0;
}
-static void fimc_buf_queue(struct videobuf_queue *vq,
- struct videobuf_buffer *vb)
+static void fimc_buf_queue(struct vb2_buffer *vb)
{
- struct fimc_ctx *ctx = vq->priv_data;
- struct fimc_dev *fimc = ctx->fimc_dev;
- struct fimc_vid_cap *cap = &fimc->vid_cap;
- unsigned long flags;
+ struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
dbg("ctx: %p, ctx->state: 0x%x", ctx, ctx->state);
- if ((ctx->state & FIMC_CTX_M2M) && ctx->m2m_ctx) {
- v4l2_m2m_buf_queue(ctx->m2m_ctx, vq, vb);
- } else if (ctx->state & FIMC_CTX_CAP) {
- spin_lock_irqsave(&fimc->slock, flags);
- fimc_vid_cap_buf_queue(fimc, (struct fimc_vid_buffer *)vb);
+ if (ctx->m2m_ctx)
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+}
- dbg("fimc->cap.active_buf_cnt: %d",
- fimc->vid_cap.active_buf_cnt);
+static void fimc_lock(struct vb2_queue *vq)
+{
+ struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
+ mutex_lock(&ctx->fimc_dev->lock);
+}
- if (cap->active_buf_cnt >= cap->reqbufs_count ||
- cap->active_buf_cnt >= FIMC_MAX_OUT_BUFS) {
- if (!test_and_set_bit(ST_CAPT_STREAM, &fimc->state))
- fimc_activate_capture(ctx);
- }
- spin_unlock_irqrestore(&fimc->slock, flags);
- }
+static void fimc_unlock(struct vb2_queue *vq)
+{
+ struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
+ mutex_unlock(&ctx->fimc_dev->lock);
}
-struct videobuf_queue_ops fimc_qops = {
- .buf_setup = fimc_buf_setup,
- .buf_prepare = fimc_buf_prepare,
- .buf_queue = fimc_buf_queue,
- .buf_release = fimc_buf_release,
+struct vb2_ops fimc_qops = {
+ .queue_setup = fimc_queue_setup,
+ .buf_prepare = fimc_buf_prepare,
+ .buf_queue = fimc_buf_queue,
+ .wait_prepare = fimc_unlock,
+ .wait_finish = fimc_lock,
+ .stop_streaming = stop_streaming,
};
static int fimc_m2m_querycap(struct file *file, void *priv,
@@ -712,12 +755,13 @@ static int fimc_m2m_querycap(struct file *file, void *priv,
cap->bus_info[0] = 0;
cap->version = KERNEL_VERSION(1, 0, 0);
cap->capabilities = V4L2_CAP_STREAMING |
- V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT;
+ V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
return 0;
}
-int fimc_vidioc_enum_fmt(struct file *file, void *priv,
+int fimc_vidioc_enum_fmt_mplane(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
struct fimc_fmt *fmt;
@@ -732,25 +776,21 @@ int fimc_vidioc_enum_fmt(struct file *file, void *priv,
return 0;
}
-int fimc_vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+int fimc_vidioc_g_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
{
struct fimc_ctx *ctx = priv;
- struct fimc_dev *fimc = ctx->fimc_dev;
struct fimc_frame *frame;
frame = ctx_get_frame(ctx, f->type);
if (IS_ERR(frame))
return PTR_ERR(frame);
- if (mutex_lock_interruptible(&fimc->lock))
- return -ERESTARTSYS;
-
f->fmt.pix.width = frame->width;
f->fmt.pix.height = frame->height;
f->fmt.pix.field = V4L2_FIELD_NONE;
f->fmt.pix.pixelformat = frame->fmt->fourcc;
- mutex_unlock(&fimc->lock);
return 0;
}
@@ -785,42 +825,40 @@ struct fimc_fmt *find_mbus_format(struct v4l2_mbus_framefmt *f,
}
-int fimc_vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
+int fimc_vidioc_try_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
{
struct fimc_ctx *ctx = priv;
struct fimc_dev *fimc = ctx->fimc_dev;
struct samsung_fimc_variant *variant = fimc->variant;
- struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
struct fimc_fmt *fmt;
u32 max_width, mod_x, mod_y, mask;
- int ret = -EINVAL, is_output = 0;
+ int i, is_output = 0;
- if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
if (ctx->state & FIMC_CTX_CAP)
return -EINVAL;
is_output = 1;
- } else if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ } else if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
return -EINVAL;
}
- dbg("w: %d, h: %d, bpl: %d",
- pix->width, pix->height, pix->bytesperline);
-
- if (mutex_lock_interruptible(&fimc->lock))
- return -ERESTARTSYS;
+ dbg("w: %d, h: %d", pix->width, pix->height);
mask = is_output ? FMT_FLAGS_M2M : FMT_FLAGS_M2M | FMT_FLAGS_CAM;
fmt = find_format(f, mask);
if (!fmt) {
v4l2_err(&fimc->m2m.v4l2_dev, "Fourcc format (0x%X) invalid.\n",
pix->pixelformat);
- goto tf_out;
+ return -EINVAL;
}
if (pix->field == V4L2_FIELD_ANY)
pix->field = V4L2_FIELD_NONE;
else if (V4L2_FIELD_NONE != pix->field)
- goto tf_out;
+ return -EINVAL;
if (is_output) {
max_width = variant->pix_limit->scaler_dis_w;
@@ -834,7 +872,7 @@ int fimc_vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
mod_x = 6; /* 64 x 32 pixels tile */
mod_y = 5;
} else {
- if (fimc->id == 1 && fimc->variant->pix_hoff)
+ if (fimc->id == 1 && variant->pix_hoff)
mod_y = fimc_fmt_is_rgb(fmt->color) ? 0 : 1;
else
mod_y = mod_x;
@@ -845,74 +883,73 @@ int fimc_vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
v4l_bound_align_image(&pix->width, 16, max_width, mod_x,
&pix->height, 8, variant->pix_limit->scaler_dis_w, mod_y, 0);
- if (pix->bytesperline == 0 ||
- (pix->bytesperline * 8 / fmt->depth) > pix->width)
- pix->bytesperline = (pix->width * fmt->depth) >> 3;
+ pix->num_planes = fmt->memplanes;
- if (pix->sizeimage == 0)
- pix->sizeimage = pix->height * pix->bytesperline;
+ for (i = 0; i < pix->num_planes; ++i) {
+ int bpl = pix->plane_fmt[i].bytesperline;
- dbg("w: %d, h: %d, bpl: %d, depth: %d",
- pix->width, pix->height, pix->bytesperline, fmt->depth);
+ dbg("[%d] bpl: %d, depth: %d, w: %d, h: %d",
+ i, bpl, fmt->depth[i], pix->width, pix->height);
- ret = 0;
+ if (!bpl || (bpl * 8 / fmt->depth[i]) > pix->width)
+ bpl = (pix->width * fmt->depth[0]) >> 3;
-tf_out:
- mutex_unlock(&fimc->lock);
- return ret;
+ if (!pix->plane_fmt[i].sizeimage)
+ pix->plane_fmt[i].sizeimage = pix->height * bpl;
+
+ pix->plane_fmt[i].bytesperline = bpl;
+
+ dbg("[%d]: bpl: %d, sizeimage: %d",
+ i, pix->plane_fmt[i].bytesperline,
+ pix->plane_fmt[i].sizeimage);
+ }
+
+ return 0;
}
-static int fimc_m2m_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
+static int fimc_m2m_s_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
{
struct fimc_ctx *ctx = priv;
struct fimc_dev *fimc = ctx->fimc_dev;
- struct v4l2_device *v4l2_dev = &fimc->m2m.v4l2_dev;
- struct videobuf_queue *vq;
+ struct vb2_queue *vq;
struct fimc_frame *frame;
- struct v4l2_pix_format *pix;
+ struct v4l2_pix_format_mplane *pix;
unsigned long flags;
- int ret = 0;
+ int i, ret = 0;
+ u32 tmp;
- ret = fimc_vidioc_try_fmt(file, priv, f);
+ ret = fimc_vidioc_try_fmt_mplane(file, priv, f);
if (ret)
return ret;
- if (mutex_lock_interruptible(&fimc->lock))
- return -ERESTARTSYS;
-
vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
- mutex_lock(&vq->vb_lock);
- if (videobuf_queue_is_busy(vq)) {
- v4l2_err(v4l2_dev, "%s: queue (%d) busy\n", __func__, f->type);
- ret = -EBUSY;
- goto sf_out;
+ if (vb2_is_streaming(vq)) {
+ v4l2_err(&fimc->m2m.v4l2_dev, "queue (%d) busy\n", f->type);
+ return -EBUSY;
}
- spin_lock_irqsave(&ctx->slock, flags);
- if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
frame = &ctx->s_frame;
- ctx->state |= FIMC_SRC_FMT;
- } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
frame = &ctx->d_frame;
- ctx->state |= FIMC_DST_FMT;
} else {
- spin_unlock_irqrestore(&ctx->slock, flags);
- v4l2_err(&ctx->fimc_dev->m2m.v4l2_dev,
+ v4l2_err(&fimc->m2m.v4l2_dev,
"Wrong buffer/video queue type (%d)\n", f->type);
- ret = -EINVAL;
- goto sf_out;
+ return -EINVAL;
}
- spin_unlock_irqrestore(&ctx->slock, flags);
- pix = &f->fmt.pix;
+ pix = &f->fmt.pix_mp;
frame->fmt = find_format(f, FMT_FLAGS_M2M);
- if (!frame->fmt) {
- ret = -EINVAL;
- goto sf_out;
- }
+ if (!frame->fmt)
+ return -EINVAL;
+
+ for (i = 0; i < frame->fmt->colplanes; i++)
+ frame->payload[i] = pix->plane_fmt[i].bytesperline * pix->height;
- frame->f_width = pix->bytesperline * 8 / frame->fmt->depth;
+ frame->f_width = pix->plane_fmt[0].bytesperline * 8 /
+ frame->fmt->depth[0];
frame->f_height = pix->height;
frame->width = pix->width;
frame->height = pix->height;
@@ -920,19 +957,15 @@ static int fimc_m2m_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
frame->o_height = pix->height;
frame->offs_h = 0;
frame->offs_v = 0;
- frame->size = (pix->width * pix->height * frame->fmt->depth) >> 3;
- vq->field = pix->field;
spin_lock_irqsave(&ctx->slock, flags);
- ctx->state |= FIMC_PARAMS;
+ tmp = (frame == &ctx->d_frame) ? FIMC_DST_FMT : FIMC_SRC_FMT;
+ ctx->state |= FIMC_PARAMS | tmp;
spin_unlock_irqrestore(&ctx->slock, flags);
dbg("f_w: %d, f_h: %d", frame->f_width, frame->f_height);
-sf_out:
- mutex_unlock(&vq->vb_lock);
- mutex_unlock(&fimc->lock);
- return ret;
+ return 0;
}
static int fimc_m2m_reqbufs(struct file *file, void *priv,
@@ -968,6 +1001,15 @@ static int fimc_m2m_streamon(struct file *file, void *priv,
enum v4l2_buf_type type)
{
struct fimc_ctx *ctx = priv;
+
+ /* The source and target color format need to be set */
+ if (V4L2_TYPE_IS_OUTPUT(type)) {
+ if (~ctx->state & FIMC_SRC_FMT)
+ return -EINVAL;
+ } else if (~ctx->state & FIMC_DST_FMT) {
+ return -EINVAL;
+ }
+
return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
}
@@ -992,11 +1034,8 @@ int fimc_vidioc_queryctrl(struct file *file, void *priv,
}
if (ctx->state & FIMC_CTX_CAP) {
- if (mutex_lock_interruptible(&ctx->fimc_dev->lock))
- return -ERESTARTSYS;
- ret = v4l2_subdev_call(ctx->fimc_dev->vid_cap.sd,
+ return v4l2_subdev_call(ctx->fimc_dev->vid_cap.sd,
core, queryctrl, qc);
- mutex_unlock(&ctx->fimc_dev->lock);
}
return ret;
}
@@ -1006,10 +1045,6 @@ int fimc_vidioc_g_ctrl(struct file *file, void *priv,
{
struct fimc_ctx *ctx = priv;
struct fimc_dev *fimc = ctx->fimc_dev;
- int ret = 0;
-
- if (mutex_lock_interruptible(&fimc->lock))
- return -ERESTARTSYS;
switch (ctrl->id) {
case V4L2_CID_HFLIP:
@@ -1023,18 +1058,17 @@ int fimc_vidioc_g_ctrl(struct file *file, void *priv,
break;
default:
if (ctx->state & FIMC_CTX_CAP) {
- ret = v4l2_subdev_call(fimc->vid_cap.sd, core,
- g_ctrl, ctrl);
+ return v4l2_subdev_call(fimc->vid_cap.sd, core,
+ g_ctrl, ctrl);
} else {
v4l2_err(&fimc->m2m.v4l2_dev,
"Invalid control\n");
- ret = -EINVAL;
+ return -EINVAL;
}
}
dbg("ctrl->value= %d", ctrl->value);
- mutex_unlock(&fimc->lock);
- return ret;
+ return 0;
}
int check_ctrl_val(struct fimc_ctx *ctx, struct v4l2_control *ctrl)
@@ -1059,13 +1093,7 @@ int fimc_s_ctrl(struct fimc_ctx *ctx, struct v4l2_control *ctrl)
struct samsung_fimc_variant *variant = ctx->fimc_dev->variant;
struct fimc_dev *fimc = ctx->fimc_dev;
unsigned long flags;
-
- if (ctx->rotation != 0 &&
- (ctrl->id == V4L2_CID_HFLIP || ctrl->id == V4L2_CID_VFLIP)) {
- v4l2_err(&fimc->m2m.v4l2_dev,
- "Simultaneous flip and rotation is not supported\n");
- return -EINVAL;
- }
+ int ret = 0;
spin_lock_irqsave(&ctx->slock, flags);
@@ -1085,6 +1113,20 @@ int fimc_s_ctrl(struct fimc_ctx *ctx, struct v4l2_control *ctrl)
break;
case V4L2_CID_ROTATE:
+ if (!(~ctx->state & (FIMC_DST_FMT | FIMC_SRC_FMT))) {
+ ret = fimc_check_scaler_ratio(ctx->s_frame.width,
+ ctx->s_frame.height,
+ ctx->d_frame.width,
+ ctx->d_frame.height,
+ ctrl->value);
+ if (ret) {
+ v4l2_err(&fimc->m2m.v4l2_dev,
+ "Out of scaler range");
+ spin_unlock_irqrestore(&ctx->slock, flags);
+ return -EINVAL;
+ }
+ }
+
/* Check for the output rotator availability */
if ((ctrl->value == 90 || ctrl->value == 270) &&
(ctx->in_path == FIMC_DMA && !variant->has_out_rot)) {
@@ -1107,7 +1149,7 @@ int fimc_s_ctrl(struct fimc_ctx *ctx, struct v4l2_control *ctrl)
}
static int fimc_m2m_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
+ struct v4l2_control *ctrl)
{
struct fimc_ctx *ctx = priv;
int ret = 0;
@@ -1125,22 +1167,17 @@ static int fimc_m2m_cropcap(struct file *file, void *fh,
{
struct fimc_frame *frame;
struct fimc_ctx *ctx = fh;
- struct fimc_dev *fimc = ctx->fimc_dev;
frame = ctx_get_frame(ctx, cr->type);
if (IS_ERR(frame))
return PTR_ERR(frame);
- if (mutex_lock_interruptible(&fimc->lock))
- return -ERESTARTSYS;
-
cr->bounds.left = 0;
cr->bounds.top = 0;
cr->bounds.width = frame->f_width;
cr->bounds.height = frame->f_height;
cr->defrect = cr->bounds;
- mutex_unlock(&fimc->lock);
return 0;
}
@@ -1148,21 +1185,16 @@ static int fimc_m2m_g_crop(struct file *file, void *fh, struct v4l2_crop *cr)
{
struct fimc_frame *frame;
struct fimc_ctx *ctx = file->private_data;
- struct fimc_dev *fimc = ctx->fimc_dev;
frame = ctx_get_frame(ctx, cr->type);
if (IS_ERR(frame))
return PTR_ERR(frame);
- if (mutex_lock_interruptible(&fimc->lock))
- return -ERESTARTSYS;
-
cr->c.left = frame->offs_h;
cr->c.top = frame->offs_v;
cr->c.width = frame->width;
cr->c.height = frame->height;
- mutex_unlock(&fimc->lock);
return 0;
}
@@ -1170,7 +1202,8 @@ int fimc_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr)
{
struct fimc_dev *fimc = ctx->fimc_dev;
struct fimc_frame *f;
- u32 min_size, halign;
+ u32 min_size, halign, depth = 0;
+ int i;
if (cr->c.top < 0 || cr->c.left < 0) {
v4l2_err(&fimc->m2m.v4l2_dev,
@@ -1178,9 +1211,9 @@ int fimc_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr)
return -EINVAL;
}
- if (cr->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (cr->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
f = (ctx->state & FIMC_CTX_CAP) ? &ctx->s_frame : &ctx->d_frame;
- else if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+ else if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
ctx->state & FIMC_CTX_M2M)
f = &ctx->s_frame;
else
@@ -1200,10 +1233,13 @@ int fimc_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr)
halign = 4;
}
+ for (i = 0; i < f->fmt->colplanes; i++)
+ depth += f->fmt->depth[i];
+
v4l_bound_align_image(&cr->c.width, min_size, f->o_width,
ffs(min_size) - 1,
&cr->c.height, min_size, f->o_height,
- halign, 64/(ALIGN(f->fmt->depth, 8)));
+ halign, 64/(ALIGN(depth, 8)));
/* adjust left/top if cropping rectangle is out of bounds */
if (cr->c.left + cr->c.width > f->o_width)
@@ -1235,25 +1271,31 @@ static int fimc_m2m_s_crop(struct file *file, void *fh, struct v4l2_crop *cr)
if (ret)
return ret;
- f = (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) ?
+ f = (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ?
&ctx->s_frame : &ctx->d_frame;
- if (mutex_lock_interruptible(&fimc->lock))
- return -ERESTARTSYS;
-
spin_lock_irqsave(&ctx->slock, flags);
- if (~ctx->state & (FIMC_SRC_FMT | FIMC_DST_FMT)) {
- /* Check to see if scaling ratio is within supported range */
- if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
- ret = fimc_check_scaler_ratio(&cr->c, &ctx->d_frame);
- else
- ret = fimc_check_scaler_ratio(&cr->c, &ctx->s_frame);
+ /* Check to see if scaling ratio is within supported range */
+ if (!(~ctx->state & (FIMC_DST_FMT | FIMC_SRC_FMT))) {
+ if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ ret = fimc_check_scaler_ratio(cr->c.width, cr->c.height,
+ ctx->d_frame.width,
+ ctx->d_frame.height,
+ ctx->rotation);
+ } else {
+ ret = fimc_check_scaler_ratio(ctx->s_frame.width,
+ ctx->s_frame.height,
+ cr->c.width, cr->c.height,
+ ctx->rotation);
+ }
+
if (ret) {
v4l2_err(&fimc->m2m.v4l2_dev, "Out of scaler range");
- ret = -EINVAL;
- goto scr_unlock;
+ spin_unlock_irqrestore(&ctx->slock, flags);
+ return -EINVAL;
}
}
+
ctx->state |= FIMC_PARAMS;
f->offs_h = cr->c.left;
@@ -1261,26 +1303,24 @@ static int fimc_m2m_s_crop(struct file *file, void *fh, struct v4l2_crop *cr)
f->width = cr->c.width;
f->height = cr->c.height;
-scr_unlock:
spin_unlock_irqrestore(&ctx->slock, flags);
- mutex_unlock(&fimc->lock);
return 0;
}
static const struct v4l2_ioctl_ops fimc_m2m_ioctl_ops = {
.vidioc_querycap = fimc_m2m_querycap,
- .vidioc_enum_fmt_vid_cap = fimc_vidioc_enum_fmt,
- .vidioc_enum_fmt_vid_out = fimc_vidioc_enum_fmt,
+ .vidioc_enum_fmt_vid_cap_mplane = fimc_vidioc_enum_fmt_mplane,
+ .vidioc_enum_fmt_vid_out_mplane = fimc_vidioc_enum_fmt_mplane,
- .vidioc_g_fmt_vid_cap = fimc_vidioc_g_fmt,
- .vidioc_g_fmt_vid_out = fimc_vidioc_g_fmt,
+ .vidioc_g_fmt_vid_cap_mplane = fimc_vidioc_g_fmt_mplane,
+ .vidioc_g_fmt_vid_out_mplane = fimc_vidioc_g_fmt_mplane,
- .vidioc_try_fmt_vid_cap = fimc_vidioc_try_fmt,
- .vidioc_try_fmt_vid_out = fimc_vidioc_try_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = fimc_vidioc_try_fmt_mplane,
+ .vidioc_try_fmt_vid_out_mplane = fimc_vidioc_try_fmt_mplane,
- .vidioc_s_fmt_vid_cap = fimc_m2m_s_fmt,
- .vidioc_s_fmt_vid_out = fimc_m2m_s_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = fimc_m2m_s_fmt_mplane,
+ .vidioc_s_fmt_vid_out_mplane = fimc_m2m_s_fmt_mplane,
.vidioc_reqbufs = fimc_m2m_reqbufs,
.vidioc_querybuf = fimc_m2m_querybuf,
@@ -1301,26 +1341,39 @@ static const struct v4l2_ioctl_ops fimc_m2m_ioctl_ops = {
};
-static void queue_init(void *priv, struct videobuf_queue *vq,
- enum v4l2_buf_type type)
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
{
struct fimc_ctx *ctx = priv;
- struct fimc_dev *fimc = ctx->fimc_dev;
+ int ret;
+
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ src_vq->drv_priv = ctx;
+ src_vq->ops = &fimc_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
- videobuf_queue_dma_contig_init(vq, &fimc_qops,
- &fimc->pdev->dev,
- &fimc->irqlock, type, V4L2_FIELD_NONE,
- sizeof(struct fimc_vid_buffer), priv, NULL);
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ dst_vq->drv_priv = ctx;
+ dst_vq->ops = &fimc_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+
+ return vb2_queue_init(dst_vq);
}
static int fimc_m2m_open(struct file *file)
{
struct fimc_dev *fimc = video_drvdata(file);
struct fimc_ctx *ctx = NULL;
- int err = 0;
-
- if (mutex_lock_interruptible(&fimc->lock))
- return -ERESTARTSYS;
dbg("pid: %d, state: 0x%lx, refcnt: %d",
task_pid_nr(current), fimc->state, fimc->vid_cap.refcnt);
@@ -1329,19 +1382,15 @@ static int fimc_m2m_open(struct file *file)
* Return if the corresponding video capture node
* is already opened.
*/
- if (fimc->vid_cap.refcnt > 0) {
- err = -EBUSY;
- goto err_unlock;
- }
+ if (fimc->vid_cap.refcnt > 0)
+ return -EBUSY;
fimc->m2m.refcnt++;
set_bit(ST_OUTDMA_RUN, &fimc->state);
ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
- if (!ctx) {
- err = -ENOMEM;
- goto err_unlock;
- }
+ if (!ctx)
+ return -ENOMEM;
file->private_data = ctx;
ctx->fimc_dev = fimc;
@@ -1355,15 +1404,14 @@ static int fimc_m2m_open(struct file *file)
ctx->out_path = FIMC_DMA;
spin_lock_init(&ctx->slock);
- ctx->m2m_ctx = v4l2_m2m_ctx_init(ctx, fimc->m2m.m2m_dev, queue_init);
+ ctx->m2m_ctx = v4l2_m2m_ctx_init(fimc->m2m.m2m_dev, ctx, queue_init);
if (IS_ERR(ctx->m2m_ctx)) {
- err = PTR_ERR(ctx->m2m_ctx);
+ int err = PTR_ERR(ctx->m2m_ctx);
kfree(ctx);
+ return err;
}
-err_unlock:
- mutex_unlock(&fimc->lock);
- return err;
+ return 0;
}
static int fimc_m2m_release(struct file *file)
@@ -1371,8 +1419,6 @@ static int fimc_m2m_release(struct file *file)
struct fimc_ctx *ctx = file->private_data;
struct fimc_dev *fimc = ctx->fimc_dev;
- mutex_lock(&fimc->lock);
-
dbg("pid: %d, state: 0x%lx, refcnt= %d",
task_pid_nr(current), fimc->state, fimc->m2m.refcnt);
@@ -1381,7 +1427,6 @@ static int fimc_m2m_release(struct file *file)
if (--fimc->m2m.refcnt <= 0)
clear_bit(ST_OUTDMA_RUN, &fimc->state);
- mutex_unlock(&fimc->lock);
return 0;
}
@@ -1415,7 +1460,6 @@ static struct v4l2_m2m_ops m2m_ops = {
.job_abort = fimc_job_abort,
};
-
static int fimc_register_m2m_device(struct fimc_dev *fimc)
{
struct video_device *vfd;
@@ -1448,6 +1492,7 @@ static int fimc_register_m2m_device(struct fimc_dev *fimc)
vfd->ioctl_ops = &fimc_m2m_ioctl_ops;
vfd->minor = -1;
vfd->release = video_device_release;
+ vfd->lock = &fimc->lock;
snprintf(vfd->name, sizeof(vfd->name), "%s:m2m", dev_name(&pdev->dev));
@@ -1496,7 +1541,7 @@ static void fimc_unregister_m2m_device(struct fimc_dev *fimc)
static void fimc_clk_release(struct fimc_dev *fimc)
{
int i;
- for (i = 0; i < NUM_FIMC_CLOCKS; i++) {
+ for (i = 0; i < fimc->num_clocks; i++) {
if (fimc->clock[i]) {
clk_disable(fimc->clock[i]);
clk_put(fimc->clock[i]);
@@ -1507,15 +1552,16 @@ static void fimc_clk_release(struct fimc_dev *fimc)
static int fimc_clk_get(struct fimc_dev *fimc)
{
int i;
- for (i = 0; i < NUM_FIMC_CLOCKS; i++) {
- fimc->clock[i] = clk_get(&fimc->pdev->dev, fimc_clock_name[i]);
- if (IS_ERR(fimc->clock[i])) {
- dev_err(&fimc->pdev->dev,
- "failed to get fimc clock: %s\n",
- fimc_clock_name[i]);
- return -ENXIO;
+ for (i = 0; i < fimc->num_clocks; i++) {
+ fimc->clock[i] = clk_get(&fimc->pdev->dev, fimc_clocks[i]);
+
+ if (!IS_ERR_OR_NULL(fimc->clock[i])) {
+ clk_enable(fimc->clock[i]);
+ continue;
}
- clk_enable(fimc->clock[i]);
+ dev_err(&fimc->pdev->dev, "failed to get fimc clock: %s\n",
+ fimc_clocks[i]);
+ return -ENXIO;
}
return 0;
}
@@ -1526,6 +1572,7 @@ static int fimc_probe(struct platform_device *pdev)
struct resource *res;
struct samsung_fimc_driverdata *drv_data;
int ret = 0;
+ int cap_input_index = -1;
dev_dbg(&pdev->dev, "%s():\n", __func__);
@@ -1548,7 +1595,6 @@ static int fimc_probe(struct platform_device *pdev)
fimc->pdata = pdev->dev.platform_data;
fimc->state = ST_IDLE;
- spin_lock_init(&fimc->irqlock);
init_waitqueue_head(&fimc->irq_queue);
spin_lock_init(&fimc->slock);
@@ -1576,10 +1622,26 @@ static int fimc_probe(struct platform_device *pdev)
goto err_req_region;
}
+ fimc->num_clocks = MAX_FIMC_CLOCKS - 1;
+ /*
+ * Check if vide capture node needs to be registered for this device
+ * instance.
+ */
+ if (fimc->pdata) {
+ int i;
+ for (i = 0; i < FIMC_MAX_CAMIF_CLIENTS; ++i)
+ if (fimc->pdata->isp_info[i])
+ break;
+ if (i < FIMC_MAX_CAMIF_CLIENTS) {
+ cap_input_index = i;
+ fimc->num_clocks++;
+ }
+ }
+
ret = fimc_clk_get(fimc);
if (ret)
goto err_regs_unmap;
- clk_set_rate(fimc->clock[0], drv_data->lclk_frequency);
+ clk_set_rate(fimc->clock[CLK_BUS], drv_data->lclk_frequency);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
@@ -1597,24 +1659,24 @@ static int fimc_probe(struct platform_device *pdev)
goto err_clk;
}
+ /* Initialize contiguous memory allocator */
+ fimc->alloc_ctx = vb2_dma_contig_init_ctx(&fimc->pdev->dev);
+ if (IS_ERR(fimc->alloc_ctx)) {
+ ret = PTR_ERR(fimc->alloc_ctx);
+ goto err_irq;
+ }
+
ret = fimc_register_m2m_device(fimc);
if (ret)
goto err_irq;
/* At least one camera sensor is required to register capture node */
- if (fimc->pdata) {
- int i;
- for (i = 0; i < FIMC_MAX_CAMIF_CLIENTS; ++i)
- if (fimc->pdata->isp_info[i])
- break;
-
- if (i < FIMC_MAX_CAMIF_CLIENTS) {
- ret = fimc_register_capture_device(fimc);
- if (ret)
- goto err_m2m;
- }
+ if (cap_input_index >= 0) {
+ ret = fimc_register_capture_device(fimc);
+ if (ret)
+ goto err_m2m;
+ clk_disable(fimc->clock[CLK_CAM]);
}
-
/*
* Exclude the additional output DMA address registers by masking
* them out on HW revisions that provide extended capabilites.
@@ -1656,6 +1718,9 @@ static int __devexit fimc_remove(struct platform_device *pdev)
fimc_unregister_capture_device(fimc);
fimc_clk_release(fimc);
+
+ vb2_dma_contig_cleanup_ctx(fimc->alloc_ctx);
+
iounmap(fimc->regs);
release_resource(fimc->regs_res);
kfree(fimc->regs_res);
@@ -1726,6 +1791,7 @@ static struct samsung_fimc_variant fimc1_variant_s5pv210 = {
.pix_hoff = 1,
.has_inp_rot = 1,
.has_out_rot = 1,
+ .has_mainscaler_ext = 1,
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 1,
@@ -1747,6 +1813,7 @@ static struct samsung_fimc_variant fimc0_variant_s5pv310 = {
.has_inp_rot = 1,
.has_out_rot = 1,
.has_cistatus2 = 1,
+ .has_mainscaler_ext = 1,
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 1,
@@ -1757,6 +1824,7 @@ static struct samsung_fimc_variant fimc0_variant_s5pv310 = {
static struct samsung_fimc_variant fimc2_variant_s5pv310 = {
.pix_hoff = 1,
.has_cistatus2 = 1,
+ .has_mainscaler_ext = 1,
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 1,
diff --git a/drivers/media/video/s5p-fimc/fimc-core.h b/drivers/media/video/s5p-fimc/fimc-core.h
index 4f047d35f8ad..4829a2515076 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.h
+++ b/drivers/media/video/s5p-fimc/fimc-core.h
@@ -16,11 +16,12 @@
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/videodev2.h>
-#include <media/videobuf-core.h>
+#include <linux/io.h>
+#include <media/videobuf2-core.h>
#include <media/v4l2-device.h>
#include <media/v4l2-mem2mem.h>
#include <media/v4l2-mediabus.h>
-#include <media/s3c_fimc.h>
+#include <media/s5p_fimc.h>
#include "regs-fimc.h"
@@ -36,7 +37,7 @@
/* Time to wait for next frame VSYNC interrupt while stopping operation. */
#define FIMC_SHUTDOWN_TIMEOUT ((100*HZ)/1000)
-#define NUM_FIMC_CLOCKS 2
+#define MAX_FIMC_CLOCKS 3
#define MODULE_NAME "s5p-fimc"
#define FIMC_MAX_DEVS 4
#define FIMC_MAX_OUT_BUFS 4
@@ -44,12 +45,19 @@
#define SCALER_MAX_VRATIO 64
#define DMA_MIN_SIZE 8
-/* FIMC device state flags */
+/* indices to the clocks array */
+enum {
+ CLK_BUS,
+ CLK_GATE,
+ CLK_CAM,
+};
+
enum fimc_dev_flags {
/* for m2m node */
ST_IDLE,
ST_OUTDMA_RUN,
ST_M2M_PEND,
+ ST_M2M_SHUT,
/* for capture node */
ST_CAPT_PEND,
ST_CAPT_RUN,
@@ -70,13 +78,6 @@ enum fimc_dev_flags {
#define fimc_capture_streaming(dev) \
test_bit(ST_CAPT_STREAM, &(dev)->state)
-#define fimc_buf_finish(dev, vid_buf) do { \
- spin_lock(&(dev)->irqlock); \
- (vid_buf)->vb.state = VIDEOBUF_DONE; \
- spin_unlock(&(dev)->irqlock); \
- wake_up(&(vid_buf)->vb.done); \
-} while (0)
-
enum fimc_datapath {
FIMC_CAMERA,
FIMC_DMA,
@@ -90,7 +91,6 @@ enum fimc_color_fmt {
S5P_FIMC_RGB888,
S5P_FIMC_RGB30_LOCAL,
S5P_FIMC_YCBCR420 = 0x20,
- S5P_FIMC_YCBCR422,
S5P_FIMC_YCBYCR422,
S5P_FIMC_YCRYCB422,
S5P_FIMC_CBYCRY422,
@@ -100,18 +100,6 @@ enum fimc_color_fmt {
#define fimc_fmt_is_rgb(x) ((x) & 0x10)
-/* Y/Cb/Cr components order at DMA output for 1 plane YCbCr 4:2:2 formats. */
-#define S5P_FIMC_OUT_CRYCBY S5P_CIOCTRL_ORDER422_CRYCBY
-#define S5P_FIMC_OUT_CBYCRY S5P_CIOCTRL_ORDER422_YCRYCB
-#define S5P_FIMC_OUT_YCRYCB S5P_CIOCTRL_ORDER422_CBYCRY
-#define S5P_FIMC_OUT_YCBYCR S5P_CIOCTRL_ORDER422_YCBYCR
-
-/* Input Y/Cb/Cr components order for 1 plane YCbCr 4:2:2 color formats. */
-#define S5P_FIMC_IN_CRYCBY S5P_MSCTRL_ORDER422_CRYCBY
-#define S5P_FIMC_IN_CBYCRY S5P_MSCTRL_ORDER422_YCRYCB
-#define S5P_FIMC_IN_YCRYCB S5P_MSCTRL_ORDER422_CBYCRY
-#define S5P_FIMC_IN_YCBYCR S5P_MSCTRL_ORDER422_YCBYCR
-
/* Cb/Cr chrominance components order for 2 plane Y/CbCr 4:2:2 formats. */
#define S5P_FIMC_LSB_CRCB S5P_CIOCTRL_ORDER422_2P_LSB_CRCB
@@ -157,18 +145,18 @@ enum fimc_color_fmt {
* @name: format description
* @fourcc: the fourcc code for this format, 0 if not applicable
* @color: the corresponding fimc_color_fmt
- * @depth: driver's private 'number of bits per pixel'
- * @buff_cnt: number of physically non-contiguous data planes
- * @planes_cnt: number of physically contiguous data planes
+ * @depth: per plane driver's private 'number of bits per pixel'
+ * @memplanes: number of physically non-contiguous data planes
+ * @colplanes: number of physically contiguous data planes
*/
struct fimc_fmt {
enum v4l2_mbus_pixelcode mbus_code;
char *name;
u32 fourcc;
u32 color;
- u16 buff_cnt;
- u16 planes_cnt;
- u16 depth;
+ u16 memplanes;
+ u16 colplanes;
+ u8 depth[VIDEO_MAX_PLANES];
u16 flags;
#define FMT_FLAGS_CAM (1 << 0)
#define FMT_FLAGS_M2M (1 << 1)
@@ -260,7 +248,8 @@ struct fimc_addr {
* @index: buffer index for the output DMA engine
*/
struct fimc_vid_buffer {
- struct videobuf_buffer vb;
+ struct vb2_buffer vb;
+ struct list_head list;
struct fimc_addr paddr;
int index;
};
@@ -277,7 +266,7 @@ struct fimc_vid_buffer {
* @height: image pixel weight
* @paddr: image frame buffer physical addresses
* @buf_cnt: number of buffers depending on a color format
- * @size: image size in bytes
+ * @payload: image size in bytes (w x h x bpp)
* @color: color format
* @dma_offset: DMA offset in bytes
*/
@@ -290,7 +279,7 @@ struct fimc_frame {
u32 offs_v;
u32 width;
u32 height;
- u32 size;
+ unsigned long payload[VIDEO_MAX_PLANES];
struct fimc_addr paddr;
struct fimc_dma_offset dma_offset;
struct fimc_fmt *fmt;
@@ -331,13 +320,14 @@ struct fimc_m2m_device {
*/
struct fimc_vid_cap {
struct fimc_ctx *ctx;
+ struct vb2_alloc_ctx *alloc_ctx;
struct video_device *vfd;
struct v4l2_device v4l2_dev;
- struct v4l2_subdev *sd;
+ struct v4l2_subdev *sd;;
struct v4l2_mbus_framefmt fmt;
struct list_head pending_buf_q;
struct list_head active_buf_q;
- struct videobuf_queue vbq;
+ struct vb2_queue vbq;
int active_buf_cnt;
int buf_index;
unsigned int frame_count;
@@ -372,6 +362,8 @@ struct fimc_pix_limit {
* @has_inp_rot: set if has input rotator
* @has_out_rot: set if has output rotator
* @has_cistatus2: 1 if CISTATUS2 register is present in this IP revision
+ * @has_mainscaler_ext: 1 if extended mainscaler ratios in CIEXTEN register
+ * are present in this IP revision
* @pix_limit: pixel size constraints for the scaler
* @min_inp_pixsize: minimum input pixel size
* @min_out_pixsize: minimum output pixel size
@@ -383,6 +375,7 @@ struct samsung_fimc_variant {
unsigned int has_inp_rot:1;
unsigned int has_out_rot:1;
unsigned int has_cistatus2:1;
+ unsigned int has_mainscaler_ext:1;
struct fimc_pix_limit *pix_limit;
u16 min_inp_pixsize;
u16 min_out_pixsize;
@@ -412,12 +405,12 @@ struct fimc_ctx;
* @lock: the mutex protecting this data structure
* @pdev: pointer to the FIMC platform device
* @pdata: pointer to the device platform data
- * @id: FIMC device index (0..2)
+ * @id: FIMC device index (0..FIMC_MAX_DEVS)
+ * @num_clocks: the number of clocks managed by this device instance
* @clock[]: the clocks required for FIMC operation
* @regs: the mapped hardware registers
* @regs_res: the resource claimed for IO registers
* @irq: interrupt number of the FIMC subdevice
- * @irqlock: spinlock protecting videobuffer queue
* @irq_queue:
* @m2m: memory-to-memory V4L2 device information
* @vid_cap: camera capture device information
@@ -427,18 +420,19 @@ struct fimc_dev {
spinlock_t slock;
struct mutex lock;
struct platform_device *pdev;
- struct s3c_platform_fimc *pdata;
+ struct s5p_platform_fimc *pdata;
struct samsung_fimc_variant *variant;
- int id;
- struct clk *clock[NUM_FIMC_CLOCKS];
+ u16 id;
+ u16 num_clocks;
+ struct clk *clock[MAX_FIMC_CLOCKS];
void __iomem *regs;
struct resource *regs_res;
int irq;
- spinlock_t irqlock;
wait_queue_head_t irq_queue;
struct fimc_m2m_device m2m;
struct fimc_vid_cap vid_cap;
unsigned long state;
+ struct vb2_alloc_ctx *alloc_ctx;
};
/**
@@ -482,11 +476,9 @@ struct fimc_ctx {
struct v4l2_m2m_ctx *m2m_ctx;
};
-extern struct videobuf_queue_ops fimc_qops;
-
static inline int tiled_fmt(struct fimc_fmt *fmt)
{
- return 0;
+ return fmt->fourcc == V4L2_PIX_FMT_NV12MT;
}
static inline void fimc_hw_clear_irq(struct fimc_dev *dev)
@@ -542,12 +534,12 @@ static inline struct fimc_frame *ctx_get_frame(struct fimc_ctx *ctx,
{
struct fimc_frame *frame;
- if (V4L2_BUF_TYPE_VIDEO_OUTPUT == type) {
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE == type) {
if (ctx->state & FIMC_CTX_M2M)
frame = &ctx->s_frame;
else
return ERR_PTR(-EINVAL);
- } else if (V4L2_BUF_TYPE_VIDEO_CAPTURE == type) {
+ } else if (V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE == type) {
frame = &ctx->d_frame;
} else {
v4l2_err(&ctx->fimc_dev->m2m.v4l2_dev,
@@ -581,7 +573,8 @@ void fimc_hw_set_target_format(struct fimc_ctx *ctx);
void fimc_hw_set_out_dma(struct fimc_ctx *ctx);
void fimc_hw_en_lastirq(struct fimc_dev *fimc, int enable);
void fimc_hw_en_irq(struct fimc_dev *fimc, int enable);
-void fimc_hw_set_scaler(struct fimc_ctx *ctx);
+void fimc_hw_set_prescaler(struct fimc_ctx *ctx);
+void fimc_hw_set_mainscaler(struct fimc_ctx *ctx);
void fimc_hw_en_capture(struct fimc_ctx *ctx);
void fimc_hw_set_effect(struct fimc_ctx *ctx);
void fimc_hw_set_in_dma(struct fimc_ctx *ctx);
@@ -589,23 +582,23 @@ void fimc_hw_set_input_path(struct fimc_ctx *ctx);
void fimc_hw_set_output_path(struct fimc_ctx *ctx);
void fimc_hw_set_input_addr(struct fimc_dev *fimc, struct fimc_addr *paddr);
void fimc_hw_set_output_addr(struct fimc_dev *fimc, struct fimc_addr *paddr,
- int index);
+ int index);
int fimc_hw_set_camera_source(struct fimc_dev *fimc,
- struct s3c_fimc_isp_info *cam);
+ struct s5p_fimc_isp_info *cam);
int fimc_hw_set_camera_offset(struct fimc_dev *fimc, struct fimc_frame *f);
int fimc_hw_set_camera_polarity(struct fimc_dev *fimc,
- struct s3c_fimc_isp_info *cam);
+ struct s5p_fimc_isp_info *cam);
int fimc_hw_set_camera_type(struct fimc_dev *fimc,
- struct s3c_fimc_isp_info *cam);
+ struct s5p_fimc_isp_info *cam);
/* -----------------------------------------------------*/
/* fimc-core.c */
-int fimc_vidioc_enum_fmt(struct file *file, void *priv,
- struct v4l2_fmtdesc *f);
-int fimc_vidioc_g_fmt(struct file *file, void *priv,
- struct v4l2_format *f);
-int fimc_vidioc_try_fmt(struct file *file, void *priv,
- struct v4l2_format *f);
+int fimc_vidioc_enum_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f);
+int fimc_vidioc_g_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_format *f);
+int fimc_vidioc_try_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_format *f);
int fimc_vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *qc);
int fimc_vidioc_g_ctrl(struct file *file, void *priv,
@@ -619,10 +612,10 @@ struct fimc_fmt *find_format(struct v4l2_format *f, unsigned int mask);
struct fimc_fmt *find_mbus_format(struct v4l2_mbus_framefmt *f,
unsigned int mask);
-int fimc_check_scaler_ratio(struct v4l2_rect *r, struct fimc_frame *f);
+int fimc_check_scaler_ratio(int sw, int sh, int dw, int dh, int rot);
int fimc_set_scaler_info(struct fimc_ctx *ctx);
int fimc_prepare_config(struct fimc_ctx *ctx, u32 flags);
-int fimc_prepare_addr(struct fimc_ctx *ctx, struct fimc_vid_buffer *buf,
+int fimc_prepare_addr(struct fimc_ctx *ctx, struct vb2_buffer *vb,
struct fimc_frame *frame, struct fimc_addr *paddr);
/* -----------------------------------------------------*/
@@ -649,28 +642,27 @@ static inline void fimc_deactivate_capture(struct fimc_dev *fimc)
}
/*
- * Add video buffer to the active buffers queue.
- * The caller holds irqlock spinlock.
+ * Add buf to the capture active buffers queue.
+ * Locking: Need to be called with fimc_dev::slock held.
*/
static inline void active_queue_add(struct fimc_vid_cap *vid_cap,
- struct fimc_vid_buffer *buf)
+ struct fimc_vid_buffer *buf)
{
- buf->vb.state = VIDEOBUF_ACTIVE;
- list_add_tail(&buf->vb.queue, &vid_cap->active_buf_q);
+ list_add_tail(&buf->list, &vid_cap->active_buf_q);
vid_cap->active_buf_cnt++;
}
/*
* Pop a video buffer from the capture active buffers queue
- * Locking: Need to be called with dev->slock held.
+ * Locking: Need to be called with fimc_dev::slock held.
*/
static inline struct fimc_vid_buffer *
active_queue_pop(struct fimc_vid_cap *vid_cap)
{
struct fimc_vid_buffer *buf;
buf = list_entry(vid_cap->active_buf_q.next,
- struct fimc_vid_buffer, vb.queue);
- list_del(&buf->vb.queue);
+ struct fimc_vid_buffer, list);
+ list_del(&buf->list);
vid_cap->active_buf_cnt--;
return buf;
}
@@ -679,8 +671,7 @@ active_queue_pop(struct fimc_vid_cap *vid_cap)
static inline void fimc_pending_queue_add(struct fimc_vid_cap *vid_cap,
struct fimc_vid_buffer *buf)
{
- buf->vb.state = VIDEOBUF_QUEUED;
- list_add_tail(&buf->vb.queue, &vid_cap->pending_buf_q);
+ list_add_tail(&buf->list, &vid_cap->pending_buf_q);
}
/* Add video buffer to the capture pending buffers queue */
@@ -689,10 +680,9 @@ pending_queue_pop(struct fimc_vid_cap *vid_cap)
{
struct fimc_vid_buffer *buf;
buf = list_entry(vid_cap->pending_buf_q.next,
- struct fimc_vid_buffer, vb.queue);
- list_del(&buf->vb.queue);
+ struct fimc_vid_buffer, list);
+ list_del(&buf->list);
return buf;
}
-
#endif /* FIMC_CORE_H_ */
diff --git a/drivers/media/video/s5p-fimc/fimc-reg.c b/drivers/media/video/s5p-fimc/fimc-reg.c
index 511631a2e5c3..10684aef5b2d 100644
--- a/drivers/media/video/s5p-fimc/fimc-reg.c
+++ b/drivers/media/video/s5p-fimc/fimc-reg.c
@@ -13,7 +13,7 @@
#include <linux/io.h>
#include <linux/delay.h>
#include <mach/map.h>
-#include <media/s3c_fimc.h>
+#include <media/s5p_fimc.h>
#include "fimc-core.h"
@@ -37,11 +37,11 @@ void fimc_hw_reset(struct fimc_dev *dev)
writel(cfg, dev->regs + S5P_CIGCTRL);
}
-static u32 fimc_hw_get_in_flip(u32 ctx_flip)
+static u32 fimc_hw_get_in_flip(struct fimc_ctx *ctx)
{
u32 flip = S5P_MSCTRL_FLIP_NORMAL;
- switch (ctx_flip) {
+ switch (ctx->flip) {
case FLIP_X_AXIS:
flip = S5P_MSCTRL_FLIP_X_MIRROR;
break;
@@ -51,16 +51,20 @@ static u32 fimc_hw_get_in_flip(u32 ctx_flip)
case FLIP_XY_AXIS:
flip = S5P_MSCTRL_FLIP_180;
break;
+ default:
+ break;
}
+ if (ctx->rotation <= 90)
+ return flip;
- return flip;
+ return (flip ^ S5P_MSCTRL_FLIP_180) & S5P_MSCTRL_FLIP_180;
}
-static u32 fimc_hw_get_target_flip(u32 ctx_flip)
+static u32 fimc_hw_get_target_flip(struct fimc_ctx *ctx)
{
u32 flip = S5P_CITRGFMT_FLIP_NORMAL;
- switch (ctx_flip) {
+ switch (ctx->flip) {
case FLIP_X_AXIS:
flip = S5P_CITRGFMT_FLIP_X_MIRROR;
break;
@@ -70,11 +74,13 @@ static u32 fimc_hw_get_target_flip(u32 ctx_flip)
case FLIP_XY_AXIS:
flip = S5P_CITRGFMT_FLIP_180;
break;
- case FLIP_NONE:
+ default:
break;
-
}
- return flip;
+ if (ctx->rotation <= 90)
+ return flip;
+
+ return (flip ^ S5P_CITRGFMT_FLIP_180) & S5P_CITRGFMT_FLIP_180;
}
void fimc_hw_set_rotation(struct fimc_ctx *ctx)
@@ -84,10 +90,7 @@ void fimc_hw_set_rotation(struct fimc_ctx *ctx)
cfg = readl(dev->regs + S5P_CITRGFMT);
cfg &= ~(S5P_CITRGFMT_INROT90 | S5P_CITRGFMT_OUTROT90 |
- S5P_CITRGFMT_FLIP_180);
-
- flip = readl(dev->regs + S5P_MSCTRL);
- flip &= ~S5P_MSCTRL_FLIP_MASK;
+ S5P_CITRGFMT_FLIP_180);
/*
* The input and output rotator cannot work simultaneously.
@@ -95,26 +98,22 @@ void fimc_hw_set_rotation(struct fimc_ctx *ctx)
* in direct fifo output mode.
*/
if (ctx->rotation == 90 || ctx->rotation == 270) {
- if (ctx->out_path == FIMC_LCDFIFO) {
- cfg |= S5P_CITRGFMT_INROT90;
- if (ctx->rotation == 270)
- flip |= S5P_MSCTRL_FLIP_180;
- } else {
- cfg |= S5P_CITRGFMT_OUTROT90;
- if (ctx->rotation == 270)
- cfg |= S5P_CITRGFMT_FLIP_180;
- }
- } else if (ctx->rotation == 180) {
if (ctx->out_path == FIMC_LCDFIFO)
- flip |= S5P_MSCTRL_FLIP_180;
+ cfg |= S5P_CITRGFMT_INROT90;
else
- cfg |= S5P_CITRGFMT_FLIP_180;
+ cfg |= S5P_CITRGFMT_OUTROT90;
}
- if (ctx->rotation == 180 || ctx->rotation == 270)
- writel(flip, dev->regs + S5P_MSCTRL);
- cfg |= fimc_hw_get_target_flip(ctx->flip);
- writel(cfg, dev->regs + S5P_CITRGFMT);
+ if (ctx->out_path == FIMC_DMA) {
+ cfg |= fimc_hw_get_target_flip(ctx);
+ writel(cfg, dev->regs + S5P_CITRGFMT);
+ } else {
+ /* LCD FIFO path */
+ flip = readl(dev->regs + S5P_MSCTRL);
+ flip &= ~S5P_MSCTRL_FLIP_MASK;
+ flip |= fimc_hw_get_in_flip(ctx);
+ writel(flip, dev->regs + S5P_MSCTRL);
+ }
}
void fimc_hw_set_target_format(struct fimc_ctx *ctx)
@@ -131,19 +130,14 @@ void fimc_hw_set_target_format(struct fimc_ctx *ctx)
S5P_CITRGFMT_VSIZE_MASK);
switch (frame->fmt->color) {
- case S5P_FIMC_RGB565:
- case S5P_FIMC_RGB666:
- case S5P_FIMC_RGB888:
+ case S5P_FIMC_RGB565...S5P_FIMC_RGB888:
cfg |= S5P_CITRGFMT_RGB;
break;
case S5P_FIMC_YCBCR420:
cfg |= S5P_CITRGFMT_YCBCR420;
break;
- case S5P_FIMC_YCBYCR422:
- case S5P_FIMC_YCRYCB422:
- case S5P_FIMC_CBYCRY422:
- case S5P_FIMC_CRYCBY422:
- if (frame->fmt->planes_cnt == 1)
+ case S5P_FIMC_YCBYCR422...S5P_FIMC_CRYCBY422:
+ if (frame->fmt->colplanes == 1)
cfg |= S5P_CITRGFMT_YCBCR422_1P;
else
cfg |= S5P_CITRGFMT_YCBCR422;
@@ -219,11 +213,11 @@ void fimc_hw_set_out_dma(struct fimc_ctx *ctx)
cfg &= ~(S5P_CIOCTRL_ORDER2P_MASK | S5P_CIOCTRL_ORDER422_MASK |
S5P_CIOCTRL_YCBCR_PLANE_MASK);
- if (frame->fmt->planes_cnt == 1)
+ if (frame->fmt->colplanes == 1)
cfg |= ctx->out_order_1p;
- else if (frame->fmt->planes_cnt == 2)
+ else if (frame->fmt->colplanes == 2)
cfg |= ctx->out_order_2p | S5P_CIOCTRL_YCBCR_2PLANE;
- else if (frame->fmt->planes_cnt == 3)
+ else if (frame->fmt->colplanes == 3)
cfg |= S5P_CIOCTRL_YCBCR_3PLANE;
writel(cfg, dev->regs + S5P_CIOCTRL);
@@ -249,7 +243,7 @@ void fimc_hw_en_lastirq(struct fimc_dev *dev, int enable)
writel(cfg, dev->regs + S5P_CIOCTRL);
}
-static void fimc_hw_set_prescaler(struct fimc_ctx *ctx)
+void fimc_hw_set_prescaler(struct fimc_ctx *ctx)
{
struct fimc_dev *dev = ctx->fimc_dev;
struct fimc_scaler *sc = &ctx->scaler;
@@ -267,7 +261,7 @@ static void fimc_hw_set_prescaler(struct fimc_ctx *ctx)
writel(cfg, dev->regs + S5P_CISCPREDST);
}
-void fimc_hw_set_scaler(struct fimc_ctx *ctx)
+static void fimc_hw_set_scaler(struct fimc_ctx *ctx)
{
struct fimc_dev *dev = ctx->fimc_dev;
struct fimc_scaler *sc = &ctx->scaler;
@@ -275,8 +269,6 @@ void fimc_hw_set_scaler(struct fimc_ctx *ctx)
struct fimc_frame *dst_frame = &ctx->d_frame;
u32 cfg = 0;
- fimc_hw_set_prescaler(ctx);
-
if (!(ctx->flags & FIMC_COLOR_RANGE_NARROW))
cfg |= (S5P_CISCCTRL_CSCR2Y_WIDE | S5P_CISCCTRL_CSCY2R_WIDE);
@@ -316,13 +308,42 @@ void fimc_hw_set_scaler(struct fimc_ctx *ctx)
cfg |= S5P_CISCCTRL_INTERLACE;
}
+ writel(cfg, dev->regs + S5P_CISCCTRL);
+}
+
+void fimc_hw_set_mainscaler(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+ struct samsung_fimc_variant *variant = dev->variant;
+ struct fimc_scaler *sc = &ctx->scaler;
+ u32 cfg;
+
dbg("main_hratio= 0x%X main_vratio= 0x%X",
sc->main_hratio, sc->main_vratio);
- cfg |= S5P_CISCCTRL_SC_HORRATIO(sc->main_hratio);
- cfg |= S5P_CISCCTRL_SC_VERRATIO(sc->main_vratio);
+ fimc_hw_set_scaler(ctx);
- writel(cfg, dev->regs + S5P_CISCCTRL);
+ cfg = readl(dev->regs + S5P_CISCCTRL);
+
+ if (variant->has_mainscaler_ext) {
+ cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
+ cfg |= S5P_CISCCTRL_MHRATIO_EXT(sc->main_hratio);
+ cfg |= S5P_CISCCTRL_MVRATIO_EXT(sc->main_vratio);
+ writel(cfg, dev->regs + S5P_CISCCTRL);
+
+ cfg = readl(dev->regs + S5P_CIEXTEN);
+
+ cfg &= ~(S5P_CIEXTEN_MVRATIO_EXT_MASK |
+ S5P_CIEXTEN_MHRATIO_EXT_MASK);
+ cfg |= S5P_CIEXTEN_MHRATIO_EXT(sc->main_hratio);
+ cfg |= S5P_CIEXTEN_MVRATIO_EXT(sc->main_vratio);
+ writel(cfg, dev->regs + S5P_CIEXTEN);
+ } else {
+ cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
+ cfg |= S5P_CISCCTRL_MHRATIO(sc->main_hratio);
+ cfg |= S5P_CISCCTRL_MVRATIO(sc->main_vratio);
+ writel(cfg, dev->regs + S5P_CISCCTRL);
+ }
}
void fimc_hw_en_capture(struct fimc_ctx *ctx)
@@ -410,41 +431,37 @@ void fimc_hw_set_in_dma(struct fimc_ctx *ctx)
/* Set the input DMA to process single frame only. */
cfg = readl(dev->regs + S5P_MSCTRL);
- cfg &= ~(S5P_MSCTRL_FLIP_MASK
- | S5P_MSCTRL_INFORMAT_MASK
+ cfg &= ~(S5P_MSCTRL_INFORMAT_MASK
| S5P_MSCTRL_IN_BURST_COUNT_MASK
| S5P_MSCTRL_INPUT_MASK
| S5P_MSCTRL_C_INT_IN_MASK
| S5P_MSCTRL_2P_IN_ORDER_MASK);
- cfg |= (S5P_MSCTRL_FRAME_COUNT(1) | S5P_MSCTRL_INPUT_MEMORY);
+ cfg |= (S5P_MSCTRL_IN_BURST_COUNT(4)
+ | S5P_MSCTRL_INPUT_MEMORY
+ | S5P_MSCTRL_FIFO_CTRL_FULL);
switch (frame->fmt->color) {
- case S5P_FIMC_RGB565:
- case S5P_FIMC_RGB666:
- case S5P_FIMC_RGB888:
+ case S5P_FIMC_RGB565...S5P_FIMC_RGB888:
cfg |= S5P_MSCTRL_INFORMAT_RGB;
break;
case S5P_FIMC_YCBCR420:
cfg |= S5P_MSCTRL_INFORMAT_YCBCR420;
- if (frame->fmt->planes_cnt == 2)
+ if (frame->fmt->colplanes == 2)
cfg |= ctx->in_order_2p | S5P_MSCTRL_C_INT_IN_2PLANE;
else
cfg |= S5P_MSCTRL_C_INT_IN_3PLANE;
break;
- case S5P_FIMC_YCBYCR422:
- case S5P_FIMC_YCRYCB422:
- case S5P_FIMC_CBYCRY422:
- case S5P_FIMC_CRYCBY422:
- if (frame->fmt->planes_cnt == 1) {
+ case S5P_FIMC_YCBYCR422...S5P_FIMC_CRYCBY422:
+ if (frame->fmt->colplanes == 1) {
cfg |= ctx->in_order_1p
| S5P_MSCTRL_INFORMAT_YCBCR422_1P;
} else {
cfg |= S5P_MSCTRL_INFORMAT_YCBCR422;
- if (frame->fmt->planes_cnt == 2)
+ if (frame->fmt->colplanes == 2)
cfg |= ctx->in_order_2p
| S5P_MSCTRL_C_INT_IN_2PLANE;
else
@@ -455,13 +472,6 @@ void fimc_hw_set_in_dma(struct fimc_ctx *ctx)
break;
}
- /*
- * Input DMA flip mode (and rotation).
- * Do not allow simultaneous rotation and flipping.
- */
- if (!ctx->rotation && ctx->out_path == FIMC_LCDFIFO)
- cfg |= fimc_hw_get_in_flip(ctx->flip);
-
writel(cfg, dev->regs + S5P_MSCTRL);
/* Input/output DMA linear/tiled mode. */
@@ -532,7 +542,7 @@ void fimc_hw_set_output_addr(struct fimc_dev *dev,
}
int fimc_hw_set_camera_polarity(struct fimc_dev *fimc,
- struct s3c_fimc_isp_info *cam)
+ struct s5p_fimc_isp_info *cam)
{
u32 cfg = readl(fimc->regs + S5P_CIGCTRL);
@@ -557,41 +567,46 @@ int fimc_hw_set_camera_polarity(struct fimc_dev *fimc,
}
int fimc_hw_set_camera_source(struct fimc_dev *fimc,
- struct s3c_fimc_isp_info *cam)
+ struct s5p_fimc_isp_info *cam)
{
struct fimc_frame *f = &fimc->vid_cap.ctx->s_frame;
u32 cfg = 0;
+ u32 bus_width;
+ int i;
+
+ static const struct {
+ u32 pixelcode;
+ u32 cisrcfmt;
+ u16 bus_width;
+ } pix_desc[] = {
+ { V4L2_MBUS_FMT_YUYV8_2X8, S5P_CISRCFMT_ORDER422_YCBYCR, 8 },
+ { V4L2_MBUS_FMT_YVYU8_2X8, S5P_CISRCFMT_ORDER422_YCRYCB, 8 },
+ { V4L2_MBUS_FMT_VYUY8_2X8, S5P_CISRCFMT_ORDER422_CRYCBY, 8 },
+ { V4L2_MBUS_FMT_UYVY8_2X8, S5P_CISRCFMT_ORDER422_CBYCRY, 8 },
+ /* TODO: Add pixel codes for 16-bit bus width */
+ };
if (cam->bus_type == FIMC_ITU_601 || cam->bus_type == FIMC_ITU_656) {
+ for (i = 0; i < ARRAY_SIZE(pix_desc); i++) {
+ if (fimc->vid_cap.fmt.code == pix_desc[i].pixelcode) {
+ cfg = pix_desc[i].cisrcfmt;
+ bus_width = pix_desc[i].bus_width;
+ break;
+ }
+ }
- switch (fimc->vid_cap.fmt.code) {
- case V4L2_MBUS_FMT_YUYV8_2X8:
- cfg = S5P_CISRCFMT_ORDER422_YCBYCR;
- break;
- case V4L2_MBUS_FMT_YVYU8_2X8:
- cfg = S5P_CISRCFMT_ORDER422_YCRYCB;
- break;
- case V4L2_MBUS_FMT_VYUY8_2X8:
- cfg = S5P_CISRCFMT_ORDER422_CRYCBY;
- break;
- case V4L2_MBUS_FMT_UYVY8_2X8:
- cfg = S5P_CISRCFMT_ORDER422_CBYCRY;
- break;
- default:
- err("camera image format not supported: %d",
- fimc->vid_cap.fmt.code);
+ if (i == ARRAY_SIZE(pix_desc)) {
+ v4l2_err(&fimc->vid_cap.v4l2_dev,
+ "Camera color format not supported: %d\n",
+ fimc->vid_cap.fmt.code);
return -EINVAL;
}
if (cam->bus_type == FIMC_ITU_601) {
- if (cam->bus_width == 8) {
+ if (bus_width == 8)
cfg |= S5P_CISRCFMT_ITU601_8BIT;
- } else if (cam->bus_width == 16) {
+ else if (bus_width == 16)
cfg |= S5P_CISRCFMT_ITU601_16BIT;
- } else {
- err("invalid bus width: %d", cam->bus_width);
- return -EINVAL;
- }
} /* else defaults to ITU-R BT.656 8-bit */
}
@@ -624,7 +639,7 @@ int fimc_hw_set_camera_offset(struct fimc_dev *fimc, struct fimc_frame *f)
}
int fimc_hw_set_camera_type(struct fimc_dev *fimc,
- struct s3c_fimc_isp_info *cam)
+ struct s5p_fimc_isp_info *cam)
{
u32 cfg, tmp;
struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
diff --git a/drivers/media/video/s5p-fimc/regs-fimc.h b/drivers/media/video/s5p-fimc/regs-fimc.h
index 57e33f84fcfa..0fea3e635d76 100644
--- a/drivers/media/video/s5p-fimc/regs-fimc.h
+++ b/drivers/media/video/s5p-fimc/regs-fimc.h
@@ -98,8 +98,8 @@
#define S5P_CIOCTRL 0x4c
#define S5P_CIOCTRL_ORDER422_MASK (3 << 0)
#define S5P_CIOCTRL_ORDER422_CRYCBY (0 << 0)
-#define S5P_CIOCTRL_ORDER422_YCRYCB (1 << 0)
-#define S5P_CIOCTRL_ORDER422_CBYCRY (2 << 0)
+#define S5P_CIOCTRL_ORDER422_CBYCRY (1 << 0)
+#define S5P_CIOCTRL_ORDER422_YCRYCB (2 << 0)
#define S5P_CIOCTRL_ORDER422_YCBYCR (3 << 0)
#define S5P_CIOCTRL_LASTIRQ_ENABLE (1 << 2)
#define S5P_CIOCTRL_YCBCR_3PLANE (0 << 3)
@@ -139,8 +139,12 @@
#define S5P_CISCCTRL_OUTRGB_FMT_MASK (3 << 11)
#define S5P_CISCCTRL_RGB_EXT (1 << 10)
#define S5P_CISCCTRL_ONE2ONE (1 << 9)
-#define S5P_CISCCTRL_SC_HORRATIO(x) ((x) << 16)
-#define S5P_CISCCTRL_SC_VERRATIO(x) ((x) << 0)
+#define S5P_CISCCTRL_MHRATIO(x) ((x) << 16)
+#define S5P_CISCCTRL_MVRATIO(x) ((x) << 0)
+#define S5P_CISCCTRL_MHRATIO_MASK (0x1ff << 16)
+#define S5P_CISCCTRL_MVRATIO_MASK (0x1ff << 0)
+#define S5P_CISCCTRL_MHRATIO_EXT(x) (((x) >> 6) << 16)
+#define S5P_CISCCTRL_MVRATIO_EXT(x) (((x) >> 6) << 0)
/* Target area */
#define S5P_CITAREA 0x5c
@@ -210,7 +214,7 @@
/* Input DMA control */
#define S5P_MSCTRL 0xfc
-#define S5P_MSCTRL_IN_BURST_COUNT_MASK (3 << 24)
+#define S5P_MSCTRL_IN_BURST_COUNT_MASK (0xF << 24)
#define S5P_MSCTRL_2P_IN_ORDER_MASK (3 << 16)
#define S5P_MSCTRL_2P_IN_ORDER_SHIFT 16
#define S5P_MSCTRL_C_INT_IN_3PLANE (0 << 15)
@@ -222,11 +226,12 @@
#define S5P_MSCTRL_FLIP_X_MIRROR (1 << 13)
#define S5P_MSCTRL_FLIP_Y_MIRROR (2 << 13)
#define S5P_MSCTRL_FLIP_180 (3 << 13)
+#define S5P_MSCTRL_FIFO_CTRL_FULL (1 << 12)
#define S5P_MSCTRL_ORDER422_SHIFT 4
-#define S5P_MSCTRL_ORDER422_CRYCBY (0 << 4)
-#define S5P_MSCTRL_ORDER422_YCRYCB (1 << 4)
-#define S5P_MSCTRL_ORDER422_CBYCRY (2 << 4)
-#define S5P_MSCTRL_ORDER422_YCBYCR (3 << 4)
+#define S5P_MSCTRL_ORDER422_YCBYCR (0 << 4)
+#define S5P_MSCTRL_ORDER422_CBYCRY (1 << 4)
+#define S5P_MSCTRL_ORDER422_YCRYCB (2 << 4)
+#define S5P_MSCTRL_ORDER422_CRYCBY (3 << 4)
#define S5P_MSCTRL_ORDER422_MASK (3 << 4)
#define S5P_MSCTRL_INPUT_EXTCAM (0 << 3)
#define S5P_MSCTRL_INPUT_MEMORY (1 << 3)
@@ -237,7 +242,7 @@
#define S5P_MSCTRL_INFORMAT_RGB (3 << 1)
#define S5P_MSCTRL_INFORMAT_MASK (3 << 1)
#define S5P_MSCTRL_ENVID (1 << 0)
-#define S5P_MSCTRL_FRAME_COUNT(x) ((x) << 24)
+#define S5P_MSCTRL_IN_BURST_COUNT(x) ((x) << 24)
/* Output DMA Y/Cb/Cr offset */
#define S5P_CIOYOFF 0x168
@@ -263,6 +268,10 @@
/* Real output DMA image size (extension register) */
#define S5P_CIEXTEN 0x188
+#define S5P_CIEXTEN_MHRATIO_EXT(x) (((x) & 0x3f) << 10)
+#define S5P_CIEXTEN_MVRATIO_EXT(x) ((x) & 0x3f)
+#define S5P_CIEXTEN_MHRATIO_EXT_MASK (0x3f << 10)
+#define S5P_CIEXTEN_MVRATIO_EXT_MASK 0x3f
#define S5P_CIDMAPARAM 0x18c
#define S5P_CIDMAPARAM_R_LINEAR (0 << 29)
diff --git a/drivers/media/video/saa7110.c b/drivers/media/video/saa7110.c
index 7913f93979b8..99664205ef4e 100644
--- a/drivers/media/video/saa7110.c
+++ b/drivers/media/video/saa7110.c
@@ -36,6 +36,7 @@
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-ctrls.h>
MODULE_DESCRIPTION("Philips SAA7110 video decoder driver");
MODULE_AUTHOR("Pauline Middelink");
@@ -53,15 +54,12 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)");
struct saa7110 {
struct v4l2_subdev sd;
+ struct v4l2_ctrl_handler hdl;
u8 reg[SAA7110_NR_REG];
v4l2_std_id norm;
int input;
int enable;
- int bright;
- int contrast;
- int hue;
- int sat;
wait_queue_head_t wq;
};
@@ -71,6 +69,11 @@ static inline struct saa7110 *to_saa7110(struct v4l2_subdev *sd)
return container_of(sd, struct saa7110, sd);
}
+static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct saa7110, hdl)->sd;
+}
+
/* ----------------------------------------------------------------------- */
/* I2C support functions */
/* ----------------------------------------------------------------------- */
@@ -326,73 +329,22 @@ static int saa7110_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
}
-static int saa7110_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
-{
- switch (qc->id) {
- case V4L2_CID_BRIGHTNESS:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 128);
- case V4L2_CID_CONTRAST:
- case V4L2_CID_SATURATION:
- return v4l2_ctrl_query_fill(qc, 0, 127, 1, 64);
- case V4L2_CID_HUE:
- return v4l2_ctrl_query_fill(qc, -128, 127, 1, 0);
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int saa7110_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct saa7110 *decoder = to_saa7110(sd);
-
- switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- ctrl->value = decoder->bright;
- break;
- case V4L2_CID_CONTRAST:
- ctrl->value = decoder->contrast;
- break;
- case V4L2_CID_SATURATION:
- ctrl->value = decoder->sat;
- break;
- case V4L2_CID_HUE:
- ctrl->value = decoder->hue;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int saa7110_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int saa7110_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct saa7110 *decoder = to_saa7110(sd);
+ struct v4l2_subdev *sd = to_sd(ctrl);
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
- if (decoder->bright != ctrl->value) {
- decoder->bright = ctrl->value;
- saa7110_write(sd, 0x19, decoder->bright);
- }
+ saa7110_write(sd, 0x19, ctrl->val);
break;
case V4L2_CID_CONTRAST:
- if (decoder->contrast != ctrl->value) {
- decoder->contrast = ctrl->value;
- saa7110_write(sd, 0x13, decoder->contrast);
- }
+ saa7110_write(sd, 0x13, ctrl->val);
break;
case V4L2_CID_SATURATION:
- if (decoder->sat != ctrl->value) {
- decoder->sat = ctrl->value;
- saa7110_write(sd, 0x12, decoder->sat);
- }
+ saa7110_write(sd, 0x12, ctrl->val);
break;
case V4L2_CID_HUE:
- if (decoder->hue != ctrl->value) {
- decoder->hue = ctrl->value;
- saa7110_write(sd, 0x07, decoder->hue);
- }
+ saa7110_write(sd, 0x07, ctrl->val);
break;
default:
return -EINVAL;
@@ -409,11 +361,19 @@ static int saa7110_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ide
/* ----------------------------------------------------------------------- */
+static const struct v4l2_ctrl_ops saa7110_ctrl_ops = {
+ .s_ctrl = saa7110_s_ctrl,
+};
+
static const struct v4l2_subdev_core_ops saa7110_core_ops = {
.g_chip_ident = saa7110_g_chip_ident,
- .g_ctrl = saa7110_g_ctrl,
- .s_ctrl = saa7110_s_ctrl,
- .queryctrl = saa7110_queryctrl,
+ .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
+ .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
+ .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
+ .g_ctrl = v4l2_subdev_g_ctrl,
+ .s_ctrl = v4l2_subdev_s_ctrl,
+ .queryctrl = v4l2_subdev_queryctrl,
+ .querymenu = v4l2_subdev_querymenu,
.s_std = saa7110_s_std,
};
@@ -454,10 +414,25 @@ static int saa7110_probe(struct i2c_client *client,
decoder->norm = V4L2_STD_PAL;
decoder->input = 0;
decoder->enable = 1;
- decoder->bright = 32768;
- decoder->contrast = 32768;
- decoder->hue = 32768;
- decoder->sat = 32768;
+ v4l2_ctrl_handler_init(&decoder->hdl, 2);
+ v4l2_ctrl_new_std(&decoder->hdl, &saa7110_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
+ v4l2_ctrl_new_std(&decoder->hdl, &saa7110_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 127, 1, 64);
+ v4l2_ctrl_new_std(&decoder->hdl, &saa7110_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 127, 1, 64);
+ v4l2_ctrl_new_std(&decoder->hdl, &saa7110_ctrl_ops,
+ V4L2_CID_HUE, -128, 127, 1, 0);
+ sd->ctrl_handler = &decoder->hdl;
+ if (decoder->hdl.error) {
+ int err = decoder->hdl.error;
+
+ v4l2_ctrl_handler_free(&decoder->hdl);
+ kfree(decoder);
+ return err;
+ }
+ v4l2_ctrl_handler_setup(&decoder->hdl);
+
init_waitqueue_head(&decoder->wq);
rv = saa7110_write_block(sd, initseq, sizeof(initseq));
@@ -490,9 +465,11 @@ static int saa7110_probe(struct i2c_client *client,
static int saa7110_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct saa7110 *decoder = to_saa7110(sd);
v4l2_device_unregister_subdev(sd);
- kfree(to_saa7110(sd));
+ v4l2_ctrl_handler_free(&decoder->hdl);
+ kfree(decoder);
return 0;
}
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index deb8fcf4aa49..74467c18e69a 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -3620,6 +3620,38 @@ struct saa7134_board saa7134_boards[] = {
.amux = 0,
},
},
+ [SAA7134_BOARD_ENCORE_ENLTV_FM3] = {
+ .name = "Encore ENLTV-FM 3",
+ .audio_clock = 0x02187de7,
+ .tuner_type = TUNER_TENA_TNF_5337,
+ .radio_type = TUNER_TEA5767,
+ .tuner_addr = 0x61,
+ .radio_addr = 0x60,
+ .inputs = { {
+ .name = name_tv,
+ .vmux = 1,
+ .amux = LINE2,
+ .tv = 1,
+ }, {
+ .name = name_comp1,
+ .vmux = 3,
+ .amux = LINE1,
+ }, {
+ .name = name_svideo,
+ .vmux = 8,
+ .amux = LINE1,
+ } },
+ .radio = {
+ .name = name_radio,
+ .vmux = 1,
+ .amux = LINE1,
+ },
+ .mute = {
+ .name = name_mute,
+ .amux = LINE1,
+ .gpio = 0x43000,
+ },
+ },
[SAA7134_BOARD_CINERGY_HT_PCI] = {
.name = "Terratec Cinergy HT PCI",
.audio_clock = 0x00187de7,
@@ -6387,6 +6419,12 @@ struct pci_device_id saa7134_pci_tbl[] = {
.driver_data = SAA7134_BOARD_ENCORE_ENLTV_FM53,
}, {
.vendor = PCI_VENDOR_ID_PHILIPS,
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7134,
+ .subvendor = 0x1a7f,
+ .subdevice = 0x2108,
+ .driver_data = SAA7134_BOARD_ENCORE_ENLTV_FM3,
+ }, {
+ .vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7133,
.subvendor = 0x153b,
.subdevice = 0x1175,
@@ -7102,6 +7140,7 @@ int saa7134_board_init1(struct saa7134_dev *dev)
case SAA7134_BOARD_ENCORE_ENLTV:
case SAA7134_BOARD_ENCORE_ENLTV_FM:
case SAA7134_BOARD_ENCORE_ENLTV_FM53:
+ case SAA7134_BOARD_ENCORE_ENLTV_FM3:
case SAA7134_BOARD_10MOONSTVMASTER3:
case SAA7134_BOARD_BEHOLD_401:
case SAA7134_BOARD_BEHOLD_403:
diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c
index 6abeecff6da7..41f836fc93ec 100644
--- a/drivers/media/video/saa7134/saa7134-core.c
+++ b/drivers/media/video/saa7134/saa7134-core.c
@@ -752,19 +752,28 @@ static int saa7134_hwfini(struct saa7134_dev *dev)
return 0;
}
-static void __devinit must_configure_manually(void)
+static void __devinit must_configure_manually(int has_eeprom)
{
unsigned int i,p;
- printk(KERN_WARNING
- "saa7134: <rant>\n"
- "saa7134: Congratulations! Your TV card vendor saved a few\n"
- "saa7134: cents for a eeprom, thus your pci board has no\n"
- "saa7134: subsystem ID and I can't identify it automatically\n"
- "saa7134: </rant>\n"
- "saa7134: I feel better now. Ok, here are the good news:\n"
- "saa7134: You can use the card=<nr> insmod option to specify\n"
- "saa7134: which board do you have. The list:\n");
+ if (!has_eeprom)
+ printk(KERN_WARNING
+ "saa7134: <rant>\n"
+ "saa7134: Congratulations! Your TV card vendor saved a few\n"
+ "saa7134: cents for a eeprom, thus your pci board has no\n"
+ "saa7134: subsystem ID and I can't identify it automatically\n"
+ "saa7134: </rant>\n"
+ "saa7134: I feel better now. Ok, here are the good news:\n"
+ "saa7134: You can use the card=<nr> insmod option to specify\n"
+ "saa7134: which board do you have. The list:\n");
+ else
+ printk(KERN_WARNING
+ "saa7134: Board is currently unknown. You might try to use the card=<nr>\n"
+ "saa7134: insmod option to specify which board do you have, but this is\n"
+ "saa7134: somewhat risky, as might damage your card. It is better to ask\n"
+ "saa7134: for support at linux-media@vger.kernel.org.\n"
+ "saa7134: The supported cards are:\n");
+
for (i = 0; i < saa7134_bcount; i++) {
printk(KERN_WARNING "saa7134: card=%d -> %-40.40s",
i,saa7134_boards[i].name);
@@ -936,8 +945,10 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
if (card[dev->nr] >= 0 &&
card[dev->nr] < saa7134_bcount)
dev->board = card[dev->nr];
- if (SAA7134_BOARD_NOAUTO == dev->board) {
- must_configure_manually();
+ if (SAA7134_BOARD_UNKNOWN == dev->board)
+ must_configure_manually(0);
+ else if (SAA7134_BOARD_NOAUTO == dev->board) {
+ must_configure_manually(1);
dev->board = SAA7134_BOARD_UNKNOWN;
}
dev->autodetected = card[dev->nr] != dev->board;
diff --git a/drivers/media/video/saa7134/saa7134-input.c b/drivers/media/video/saa7134/saa7134-input.c
index dc646e65edb7..c9eff0336aa6 100644
--- a/drivers/media/video/saa7134/saa7134-input.c
+++ b/drivers/media/video/saa7134/saa7134-input.c
@@ -681,6 +681,7 @@ int saa7134_input_init1(struct saa7134_dev *dev)
polling = 50; // ms
break;
case SAA7134_BOARD_ENCORE_ENLTV_FM53:
+ case SAA7134_BOARD_ENCORE_ENLTV_FM3:
ir_codes = RC_MAP_ENCORE_ENLTV_FM53;
mask_keydown = 0x0040000; /* Enable GPIO18 line on both edges */
mask_keyup = 0x0040000;
diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h
index 5b0a347b0b8f..f96cd5d761f9 100644
--- a/drivers/media/video/saa7134/saa7134.h
+++ b/drivers/media/video/saa7134/saa7134.h
@@ -327,6 +327,7 @@ struct saa7134_card_ir {
#define SAA7134_BOARD_TECHNOTREND_BUDGET_T3000 181
#define SAA7134_BOARD_KWORLD_PCI_SBTVD_FULLSEG 182
#define SAA7134_BOARD_VIDEOMATE_M1F 183
+#define SAA7134_BOARD_ENCORE_ENLTV_FM3 184
#define SAA7134_MAXBOARDS 32
#define SAA7134_INPUT_MAX 8
diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
index fc611ebeb82c..84d4c7c83435 100644
--- a/drivers/media/video/timblogiw.c
+++ b/drivers/media/video/timblogiw.c
@@ -24,6 +24,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/dmaengine.h>
+#include <linux/mfd/core.h>
#include <linux/scatterlist.h>
#include <linux/interrupt.h>
#include <linux/list.h>
@@ -790,7 +791,7 @@ static int __devinit timblogiw_probe(struct platform_device *pdev)
{
int err;
struct timblogiw *lw = NULL;
- struct timb_video_platform_data *pdata = pdev->dev.platform_data;
+ struct timb_video_platform_data *pdata = mfd_get_data(pdev);
if (!pdata) {
dev_err(&pdev->dev, "No platform data\n");
diff --git a/drivers/media/video/tlv320aic23b.c b/drivers/media/video/tlv320aic23b.c
index dfc4dd7c5097..286ec7e7062a 100644
--- a/drivers/media/video/tlv320aic23b.c
+++ b/drivers/media/video/tlv320aic23b.c
@@ -31,6 +31,7 @@
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
MODULE_DESCRIPTION("tlv320aic23b driver");
MODULE_AUTHOR("Scott Alfter, Ulf Eklund, Hans Verkuil");
@@ -41,7 +42,7 @@ MODULE_LICENSE("GPL");
struct tlv320aic23b_state {
struct v4l2_subdev sd;
- u8 muted;
+ struct v4l2_ctrl_handler hdl;
};
static inline struct tlv320aic23b_state *to_state(struct v4l2_subdev *sd)
@@ -49,6 +50,11 @@ static inline struct tlv320aic23b_state *to_state(struct v4l2_subdev *sd)
return container_of(sd, struct tlv320aic23b_state, sd);
}
+static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct tlv320aic23b_state, hdl)->sd;
+}
+
static int tlv320aic23b_write(struct v4l2_subdev *sd, int reg, u16 val)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -85,44 +91,44 @@ static int tlv320aic23b_s_clock_freq(struct v4l2_subdev *sd, u32 freq)
return 0;
}
-static int tlv320aic23b_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct tlv320aic23b_state *state = to_state(sd);
-
- if (ctrl->id != V4L2_CID_AUDIO_MUTE)
- return -EINVAL;
- ctrl->value = state->muted;
- return 0;
-}
-
-static int tlv320aic23b_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int tlv320aic23b_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct tlv320aic23b_state *state = to_state(sd);
-
- if (ctrl->id != V4L2_CID_AUDIO_MUTE)
- return -EINVAL;
- state->muted = ctrl->value;
- tlv320aic23b_write(sd, 0, 0x180); /* mute both channels */
- /* set gain on both channels to +3.0 dB */
- if (!state->muted)
- tlv320aic23b_write(sd, 0, 0x119);
- return 0;
+ struct v4l2_subdev *sd = to_sd(ctrl);
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUDIO_MUTE:
+ tlv320aic23b_write(sd, 0, 0x180); /* mute both channels */
+ /* set gain on both channels to +3.0 dB */
+ if (!ctrl->val)
+ tlv320aic23b_write(sd, 0, 0x119);
+ return 0;
+ }
+ return -EINVAL;
}
static int tlv320aic23b_log_status(struct v4l2_subdev *sd)
{
struct tlv320aic23b_state *state = to_state(sd);
- v4l2_info(sd, "Input: %s\n", state->muted ? "muted" : "active");
+ v4l2_ctrl_handler_log_status(&state->hdl, sd->name);
return 0;
}
/* ----------------------------------------------------------------------- */
+static const struct v4l2_ctrl_ops tlv320aic23b_ctrl_ops = {
+ .s_ctrl = tlv320aic23b_s_ctrl,
+};
+
static const struct v4l2_subdev_core_ops tlv320aic23b_core_ops = {
.log_status = tlv320aic23b_log_status,
- .g_ctrl = tlv320aic23b_g_ctrl,
- .s_ctrl = tlv320aic23b_s_ctrl,
+ .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
+ .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
+ .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
+ .g_ctrl = v4l2_subdev_g_ctrl,
+ .s_ctrl = v4l2_subdev_s_ctrl,
+ .queryctrl = v4l2_subdev_queryctrl,
+ .querymenu = v4l2_subdev_querymenu,
};
static const struct v4l2_subdev_audio_ops tlv320aic23b_audio_ops = {
@@ -161,7 +167,6 @@ static int tlv320aic23b_probe(struct i2c_client *client,
return -ENOMEM;
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &tlv320aic23b_ops);
- state->muted = 0;
/* Initialize tlv320aic23b */
@@ -177,15 +182,30 @@ static int tlv320aic23b_probe(struct i2c_client *client,
tlv320aic23b_write(sd, 8, 0x000);
/* activate digital interface */
tlv320aic23b_write(sd, 9, 0x001);
+
+ v4l2_ctrl_handler_init(&state->hdl, 1);
+ v4l2_ctrl_new_std(&state->hdl, &tlv320aic23b_ctrl_ops,
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0);
+ sd->ctrl_handler = &state->hdl;
+ if (state->hdl.error) {
+ int err = state->hdl.error;
+
+ v4l2_ctrl_handler_free(&state->hdl);
+ kfree(state);
+ return err;
+ }
+ v4l2_ctrl_handler_setup(&state->hdl);
return 0;
}
static int tlv320aic23b_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct tlv320aic23b_state *state = to_state(sd);
v4l2_device_unregister_subdev(sd);
- kfree(to_state(sd));
+ v4l2_ctrl_handler_free(&state->hdl);
+ kfree(state);
return 0;
}
diff --git a/drivers/media/video/tvp514x.c b/drivers/media/video/tvp514x.c
index 45bcf0358a1d..9b3e828b0775 100644
--- a/drivers/media/video/tvp514x.c
+++ b/drivers/media/video/tvp514x.c
@@ -37,6 +37,7 @@
#include <media/v4l2-common.h>
#include <media/v4l2-mediabus.h>
#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-ctrls.h>
#include <media/tvp514x.h>
#include "tvp514x_regs.h"
@@ -97,6 +98,7 @@ static int tvp514x_s_stream(struct v4l2_subdev *sd, int enable);
*/
struct tvp514x_decoder {
struct v4l2_subdev sd;
+ struct v4l2_ctrl_handler hdl;
struct tvp514x_reg tvp514x_regs[ARRAY_SIZE(tvp514x_reg_list_default)];
const struct tvp514x_platform_data *pdata;
@@ -238,6 +240,11 @@ static inline struct tvp514x_decoder *to_decoder(struct v4l2_subdev *sd)
return container_of(sd, struct tvp514x_decoder, sd);
}
+static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct tvp514x_decoder, hdl)->sd;
+}
+
/**
* tvp514x_read_reg() - Read a value from a register in an TVP5146/47.
@@ -719,213 +726,54 @@ static int tvp514x_s_routing(struct v4l2_subdev *sd,
}
/**
- * tvp514x_queryctrl() - V4L2 decoder interface handler for queryctrl
- * @sd: pointer to standard V4L2 sub-device structure
- * @qctrl: standard V4L2 v4l2_queryctrl structure
- *
- * If the requested control is supported, returns the control information.
- * Otherwise, returns -EINVAL if the control is not supported.
- */
-static int
-tvp514x_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qctrl)
-{
- int err = -EINVAL;
-
- if (qctrl == NULL)
- return err;
-
- switch (qctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- /* Brightness supported is (0-255), */
- err = v4l2_ctrl_query_fill(qctrl, 0, 255, 1, 128);
- break;
- case V4L2_CID_CONTRAST:
- case V4L2_CID_SATURATION:
- /**
- * Saturation and Contrast supported is -
- * Contrast: 0 - 255 (Default - 128)
- * Saturation: 0 - 255 (Default - 128)
- */
- err = v4l2_ctrl_query_fill(qctrl, 0, 255, 1, 128);
- break;
- case V4L2_CID_HUE:
- /* Hue Supported is -
- * Hue - -180 - +180 (Default - 0, Step - +180)
- */
- err = v4l2_ctrl_query_fill(qctrl, -180, 180, 180, 0);
- break;
- case V4L2_CID_AUTOGAIN:
- /**
- * Auto Gain supported is -
- * 0 - 1 (Default - 1)
- */
- err = v4l2_ctrl_query_fill(qctrl, 0, 1, 1, 1);
- break;
- default:
- v4l2_err(sd, "invalid control id %d\n", qctrl->id);
- return err;
- }
-
- v4l2_dbg(1, debug, sd, "Query Control:%s: Min - %d, Max - %d, Def - %d\n",
- qctrl->name, qctrl->minimum, qctrl->maximum,
- qctrl->default_value);
-
- return err;
-}
-
-/**
- * tvp514x_g_ctrl() - V4L2 decoder interface handler for g_ctrl
- * @sd: pointer to standard V4L2 sub-device structure
- * @ctrl: pointer to v4l2_control structure
- *
- * If the requested control is supported, returns the control's current
- * value from the decoder. Otherwise, returns -EINVAL if the control is not
- * supported.
- */
-static int
-tvp514x_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct tvp514x_decoder *decoder = to_decoder(sd);
-
- if (ctrl == NULL)
- return -EINVAL;
-
- switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- ctrl->value = decoder->tvp514x_regs[REG_BRIGHTNESS].val;
- break;
- case V4L2_CID_CONTRAST:
- ctrl->value = decoder->tvp514x_regs[REG_CONTRAST].val;
- break;
- case V4L2_CID_SATURATION:
- ctrl->value = decoder->tvp514x_regs[REG_SATURATION].val;
- break;
- case V4L2_CID_HUE:
- ctrl->value = decoder->tvp514x_regs[REG_HUE].val;
- if (ctrl->value == 0x7F)
- ctrl->value = 180;
- else if (ctrl->value == 0x80)
- ctrl->value = -180;
- else
- ctrl->value = 0;
-
- break;
- case V4L2_CID_AUTOGAIN:
- ctrl->value = decoder->tvp514x_regs[REG_AFE_GAIN_CTRL].val;
- if ((ctrl->value & 0x3) == 3)
- ctrl->value = 1;
- else
- ctrl->value = 0;
-
- break;
- default:
- v4l2_err(sd, "invalid control id %d\n", ctrl->id);
- return -EINVAL;
- }
-
- v4l2_dbg(1, debug, sd, "Get Control: ID - %d - %d\n",
- ctrl->id, ctrl->value);
- return 0;
-}
-
-/**
* tvp514x_s_ctrl() - V4L2 decoder interface handler for s_ctrl
- * @sd: pointer to standard V4L2 sub-device structure
- * @ctrl: pointer to v4l2_control structure
+ * @ctrl: pointer to v4l2_ctrl structure
*
* If the requested control is supported, sets the control's current
* value in HW. Otherwise, returns -EINVAL if the control is not supported.
*/
-static int
-tvp514x_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int tvp514x_s_ctrl(struct v4l2_ctrl *ctrl)
{
+ struct v4l2_subdev *sd = to_sd(ctrl);
struct tvp514x_decoder *decoder = to_decoder(sd);
int err = -EINVAL, value;
- if (ctrl == NULL)
- return err;
-
- value = ctrl->value;
+ value = ctrl->val;
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
- if (ctrl->value < 0 || ctrl->value > 255) {
- v4l2_err(sd, "invalid brightness setting %d\n",
- ctrl->value);
- return -ERANGE;
- }
- err = tvp514x_write_reg(sd, REG_BRIGHTNESS,
- value);
- if (err)
- return err;
-
- decoder->tvp514x_regs[REG_BRIGHTNESS].val = value;
+ err = tvp514x_write_reg(sd, REG_BRIGHTNESS, value);
+ if (!err)
+ decoder->tvp514x_regs[REG_BRIGHTNESS].val = value;
break;
case V4L2_CID_CONTRAST:
- if (ctrl->value < 0 || ctrl->value > 255) {
- v4l2_err(sd, "invalid contrast setting %d\n",
- ctrl->value);
- return -ERANGE;
- }
err = tvp514x_write_reg(sd, REG_CONTRAST, value);
- if (err)
- return err;
-
- decoder->tvp514x_regs[REG_CONTRAST].val = value;
+ if (!err)
+ decoder->tvp514x_regs[REG_CONTRAST].val = value;
break;
case V4L2_CID_SATURATION:
- if (ctrl->value < 0 || ctrl->value > 255) {
- v4l2_err(sd, "invalid saturation setting %d\n",
- ctrl->value);
- return -ERANGE;
- }
err = tvp514x_write_reg(sd, REG_SATURATION, value);
- if (err)
- return err;
-
- decoder->tvp514x_regs[REG_SATURATION].val = value;
+ if (!err)
+ decoder->tvp514x_regs[REG_SATURATION].val = value;
break;
case V4L2_CID_HUE:
if (value == 180)
value = 0x7F;
else if (value == -180)
value = 0x80;
- else if (value == 0)
- value = 0;
- else {
- v4l2_err(sd, "invalid hue setting %d\n", ctrl->value);
- return -ERANGE;
- }
err = tvp514x_write_reg(sd, REG_HUE, value);
- if (err)
- return err;
-
- decoder->tvp514x_regs[REG_HUE].val = value;
+ if (!err)
+ decoder->tvp514x_regs[REG_HUE].val = value;
break;
case V4L2_CID_AUTOGAIN:
- if (value == 1)
- value = 0x0F;
- else if (value == 0)
- value = 0x0C;
- else {
- v4l2_err(sd, "invalid auto gain setting %d\n",
- ctrl->value);
- return -ERANGE;
- }
- err = tvp514x_write_reg(sd, REG_AFE_GAIN_CTRL, value);
- if (err)
- return err;
-
- decoder->tvp514x_regs[REG_AFE_GAIN_CTRL].val = value;
+ err = tvp514x_write_reg(sd, REG_AFE_GAIN_CTRL, value ? 0x0f : 0x0c);
+ if (!err)
+ decoder->tvp514x_regs[REG_AFE_GAIN_CTRL].val = value;
break;
- default:
- v4l2_err(sd, "invalid control id %d\n", ctrl->id);
- return err;
}
v4l2_dbg(1, debug, sd, "Set Control: ID - %d - %d\n",
- ctrl->id, ctrl->value);
-
+ ctrl->id, ctrl->val);
return err;
}
@@ -1104,10 +952,18 @@ static int tvp514x_s_stream(struct v4l2_subdev *sd, int enable)
return err;
}
-static const struct v4l2_subdev_core_ops tvp514x_core_ops = {
- .queryctrl = tvp514x_queryctrl,
- .g_ctrl = tvp514x_g_ctrl,
+static const struct v4l2_ctrl_ops tvp514x_ctrl_ops = {
.s_ctrl = tvp514x_s_ctrl,
+};
+
+static const struct v4l2_subdev_core_ops tvp514x_core_ops = {
+ .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
+ .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
+ .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
+ .g_ctrl = v4l2_subdev_g_ctrl,
+ .s_ctrl = v4l2_subdev_s_ctrl,
+ .queryctrl = v4l2_subdev_queryctrl,
+ .querymenu = v4l2_subdev_querymenu,
.s_std = tvp514x_s_std,
};
@@ -1190,6 +1046,27 @@ tvp514x_probe(struct i2c_client *client, const struct i2c_device_id *id)
sd = &decoder->sd;
v4l2_i2c_subdev_init(sd, client, &tvp514x_ops);
+ v4l2_ctrl_handler_init(&decoder->hdl, 5);
+ v4l2_ctrl_new_std(&decoder->hdl, &tvp514x_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
+ v4l2_ctrl_new_std(&decoder->hdl, &tvp514x_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, 128);
+ v4l2_ctrl_new_std(&decoder->hdl, &tvp514x_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 255, 1, 128);
+ v4l2_ctrl_new_std(&decoder->hdl, &tvp514x_ctrl_ops,
+ V4L2_CID_HUE, -180, 180, 180, 0);
+ v4l2_ctrl_new_std(&decoder->hdl, &tvp514x_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+ sd->ctrl_handler = &decoder->hdl;
+ if (decoder->hdl.error) {
+ int err = decoder->hdl.error;
+
+ v4l2_ctrl_handler_free(&decoder->hdl);
+ kfree(decoder);
+ return err;
+ }
+ v4l2_ctrl_handler_setup(&decoder->hdl);
+
v4l2_info(sd, "%s decoder driver registered !!\n", sd->name);
return 0;
@@ -1209,6 +1086,7 @@ static int tvp514x_remove(struct i2c_client *client)
struct tvp514x_decoder *decoder = to_decoder(sd);
v4l2_device_unregister_subdev(sd);
+ v4l2_ctrl_handler_free(&decoder->hdl);
kfree(decoder);
return 0;
}
diff --git a/drivers/media/video/tvp5150.c b/drivers/media/video/tvp5150.c
index 58927664d3ea..959d690363bb 100644
--- a/drivers/media/video/tvp5150.c
+++ b/drivers/media/video/tvp5150.c
@@ -12,6 +12,7 @@
#include <media/v4l2-device.h>
#include <media/tvp5150.h>
#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-ctrls.h>
#include "tvp5150_reg.h"
@@ -24,58 +25,14 @@ static int debug;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0-2)");
-/* supported controls */
-static struct v4l2_queryctrl tvp5150_qctrl[] = {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 128,
- .flags = 0,
- }, {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 255,
- .step = 0x1,
- .default_value = 128,
- .flags = 0,
- }, {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Saturation",
- .minimum = 0,
- .maximum = 255,
- .step = 0x1,
- .default_value = 128,
- .flags = 0,
- }, {
- .id = V4L2_CID_HUE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Hue",
- .minimum = -128,
- .maximum = 127,
- .step = 0x1,
- .default_value = 0,
- .flags = 0,
- }
-};
-
struct tvp5150 {
struct v4l2_subdev sd;
+ struct v4l2_ctrl_handler hdl;
v4l2_std_id norm; /* Current set standard */
u32 input;
u32 output;
int enable;
- int bright;
- int contrast;
- int hue;
- int sat;
};
static inline struct tvp5150 *to_tvp5150(struct v4l2_subdev *sd)
@@ -83,6 +40,11 @@ static inline struct tvp5150 *to_tvp5150(struct v4l2_subdev *sd)
return container_of(sd, struct tvp5150, sd);
}
+static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct tvp5150, hdl)->sd;
+}
+
static int tvp5150_read(struct v4l2_subdev *sd, unsigned char addr)
{
struct i2c_client *c = v4l2_get_subdevdata(sd);
@@ -810,64 +772,28 @@ static int tvp5150_reset(struct v4l2_subdev *sd, u32 val)
tvp5150_write_inittab(sd, tvp5150_init_enable);
/* Initialize image preferences */
- tvp5150_write(sd, TVP5150_BRIGHT_CTL, decoder->bright);
- tvp5150_write(sd, TVP5150_CONTRAST_CTL, decoder->contrast);
- tvp5150_write(sd, TVP5150_SATURATION_CTL, decoder->contrast);
- tvp5150_write(sd, TVP5150_HUE_CTL, decoder->hue);
+ v4l2_ctrl_handler_setup(&decoder->hdl);
tvp5150_set_std(sd, decoder->norm);
return 0;
};
-static int tvp5150_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- v4l2_dbg(1, debug, sd, "g_ctrl called\n");
-
- switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- ctrl->value = tvp5150_read(sd, TVP5150_BRIGHT_CTL);
- return 0;
- case V4L2_CID_CONTRAST:
- ctrl->value = tvp5150_read(sd, TVP5150_CONTRAST_CTL);
- return 0;
- case V4L2_CID_SATURATION:
- ctrl->value = tvp5150_read(sd, TVP5150_SATURATION_CTL);
- return 0;
- case V4L2_CID_HUE:
- ctrl->value = tvp5150_read(sd, TVP5150_HUE_CTL);
- return 0;
- }
- return -EINVAL;
-}
-
-static int tvp5150_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int tvp5150_s_ctrl(struct v4l2_ctrl *ctrl)
{
- u8 i, n;
- n = ARRAY_SIZE(tvp5150_qctrl);
-
- for (i = 0; i < n; i++) {
- if (ctrl->id != tvp5150_qctrl[i].id)
- continue;
- if (ctrl->value < tvp5150_qctrl[i].minimum ||
- ctrl->value > tvp5150_qctrl[i].maximum)
- return -ERANGE;
- v4l2_dbg(1, debug, sd, "s_ctrl: id=%d, value=%d\n",
- ctrl->id, ctrl->value);
- break;
- }
+ struct v4l2_subdev *sd = to_sd(ctrl);
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
- tvp5150_write(sd, TVP5150_BRIGHT_CTL, ctrl->value);
+ tvp5150_write(sd, TVP5150_BRIGHT_CTL, ctrl->val);
return 0;
case V4L2_CID_CONTRAST:
- tvp5150_write(sd, TVP5150_CONTRAST_CTL, ctrl->value);
+ tvp5150_write(sd, TVP5150_CONTRAST_CTL, ctrl->val);
return 0;
case V4L2_CID_SATURATION:
- tvp5150_write(sd, TVP5150_SATURATION_CTL, ctrl->value);
+ tvp5150_write(sd, TVP5150_SATURATION_CTL, ctrl->val);
return 0;
case V4L2_CID_HUE:
- tvp5150_write(sd, TVP5150_HUE_CTL, ctrl->value);
+ tvp5150_write(sd, TVP5150_HUE_CTL, ctrl->val);
return 0;
}
return -EINVAL;
@@ -995,29 +921,21 @@ static int tvp5150_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
return 0;
}
-static int tvp5150_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
-{
- int i;
-
- v4l2_dbg(1, debug, sd, "queryctrl called\n");
-
- for (i = 0; i < ARRAY_SIZE(tvp5150_qctrl); i++)
- if (qc->id && qc->id == tvp5150_qctrl[i].id) {
- memcpy(qc, &(tvp5150_qctrl[i]),
- sizeof(*qc));
- return 0;
- }
-
- return -EINVAL;
-}
-
/* ----------------------------------------------------------------------- */
+static const struct v4l2_ctrl_ops tvp5150_ctrl_ops = {
+ .s_ctrl = tvp5150_s_ctrl,
+};
+
static const struct v4l2_subdev_core_ops tvp5150_core_ops = {
.log_status = tvp5150_log_status,
- .g_ctrl = tvp5150_g_ctrl,
- .s_ctrl = tvp5150_s_ctrl,
- .queryctrl = tvp5150_queryctrl,
+ .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
+ .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
+ .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
+ .g_ctrl = v4l2_subdev_g_ctrl,
+ .s_ctrl = v4l2_subdev_s_ctrl,
+ .queryctrl = v4l2_subdev_queryctrl,
+ .querymenu = v4l2_subdev_querymenu,
.s_std = tvp5150_s_std,
.reset = tvp5150_reset,
.g_chip_ident = tvp5150_g_chip_ident,
@@ -1077,10 +995,25 @@ static int tvp5150_probe(struct i2c_client *c,
core->norm = V4L2_STD_ALL; /* Default is autodetect */
core->input = TVP5150_COMPOSITE1;
core->enable = 1;
- core->bright = 128;
- core->contrast = 128;
- core->hue = 0;
- core->sat = 128;
+
+ v4l2_ctrl_handler_init(&core->hdl, 4);
+ v4l2_ctrl_new_std(&core->hdl, &tvp5150_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
+ v4l2_ctrl_new_std(&core->hdl, &tvp5150_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, 128);
+ v4l2_ctrl_new_std(&core->hdl, &tvp5150_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 255, 1, 128);
+ v4l2_ctrl_new_std(&core->hdl, &tvp5150_ctrl_ops,
+ V4L2_CID_HUE, -128, 127, 1, 0);
+ sd->ctrl_handler = &core->hdl;
+ if (core->hdl.error) {
+ int err = core->hdl.error;
+
+ v4l2_ctrl_handler_free(&core->hdl);
+ kfree(core);
+ return err;
+ }
+ v4l2_ctrl_handler_setup(&core->hdl);
if (debug > 1)
tvp5150_log_status(sd);
@@ -1090,12 +1023,14 @@ static int tvp5150_probe(struct i2c_client *c,
static int tvp5150_remove(struct i2c_client *c)
{
struct v4l2_subdev *sd = i2c_get_clientdata(c);
+ struct tvp5150 *decoder = to_tvp5150(sd);
v4l2_dbg(1, debug, sd,
"tvp5150.c: removing tvp5150 adapter on address 0x%x\n",
c->addr << 1);
v4l2_device_unregister_subdev(sd);
+ v4l2_ctrl_handler_free(&decoder->hdl);
kfree(to_tvp5150(sd));
return 0;
}
diff --git a/drivers/media/video/tvp7002.c b/drivers/media/video/tvp7002.c
index c799e4eb6fcd..b799851bf3d0 100644
--- a/drivers/media/video/tvp7002.c
+++ b/drivers/media/video/tvp7002.c
@@ -32,6 +32,7 @@
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
#include "tvp7002_reg.h"
MODULE_DESCRIPTION("TI TVP7002 Video and Graphics Digitizer driver");
@@ -421,13 +422,13 @@ static const struct tvp7002_preset_definition tvp7002_presets[] = {
/* Device definition */
struct tvp7002 {
struct v4l2_subdev sd;
+ struct v4l2_ctrl_handler hdl;
const struct tvp7002_config *pdata;
int ver;
int streaming;
const struct tvp7002_preset_definition *current_preset;
- u8 gain;
};
/*
@@ -441,6 +442,11 @@ static inline struct tvp7002 *to_tvp7002(struct v4l2_subdev *sd)
return container_of(sd, struct tvp7002, sd);
}
+static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct tvp7002, hdl)->sd;
+}
+
/*
* tvp7002_read - Read a value from a register in an TVP7002
* @sd: ptr to v4l2_subdev struct
@@ -606,78 +612,25 @@ static int tvp7002_s_dv_preset(struct v4l2_subdev *sd,
}
/*
- * tvp7002_g_ctrl() - Get a control
- * @sd: ptr to v4l2_subdev struct
- * @ctrl: ptr to v4l2_control struct
- *
- * Get a control for a TVP7002 decoder device.
- * Returns zero when successful or -EINVAL if register access fails.
- */
-static int tvp7002_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct tvp7002 *device = to_tvp7002(sd);
-
- switch (ctrl->id) {
- case V4L2_CID_GAIN:
- ctrl->value = device->gain;
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-/*
* tvp7002_s_ctrl() - Set a control
- * @sd: ptr to v4l2_subdev struct
- * @ctrl: ptr to v4l2_control struct
+ * @ctrl: ptr to v4l2_ctrl struct
*
* Set a control in TVP7002 decoder device.
* Returns zero when successful or -EINVAL if register access fails.
*/
-static int tvp7002_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int tvp7002_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct tvp7002 *device = to_tvp7002(sd);
+ struct v4l2_subdev *sd = to_sd(ctrl);
int error = 0;
switch (ctrl->id) {
case V4L2_CID_GAIN:
- tvp7002_write_err(sd, TVP7002_R_FINE_GAIN,
- ctrl->value & 0xff, &error);
- tvp7002_write_err(sd, TVP7002_G_FINE_GAIN,
- ctrl->value & 0xff, &error);
- tvp7002_write_err(sd, TVP7002_B_FINE_GAIN,
- ctrl->value & 0xff, &error);
-
- if (error < 0)
- return error;
-
- /* Set only after knowing there is no error */
- device->gain = ctrl->value & 0xff;
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-/*
- * tvp7002_queryctrl() - Query a control
- * @sd: ptr to v4l2_subdev struct
- * @qc: ptr to v4l2_queryctrl struct
- *
- * Query a control of a TVP7002 decoder device.
- * Returns zero when successful or -EINVAL if register read fails.
- */
-static int tvp7002_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
-{
- switch (qc->id) {
- case V4L2_CID_GAIN:
- /*
- * Gain is supported [0-255, default=0, step=1]
- */
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 0);
- default:
- return -EINVAL;
+ tvp7002_write_err(sd, TVP7002_R_FINE_GAIN, ctrl->val, &error);
+ tvp7002_write_err(sd, TVP7002_G_FINE_GAIN, ctrl->val, &error);
+ tvp7002_write_err(sd, TVP7002_B_FINE_GAIN, ctrl->val, &error);
+ return error;
}
+ return -EINVAL;
}
/*
@@ -924,7 +877,7 @@ static int tvp7002_log_status(struct v4l2_subdev *sd)
device->streaming ? "yes" : "no");
/* Print the current value of the gain control */
- v4l2_info(sd, "Gain: %u\n", device->gain);
+ v4l2_ctrl_handler_log_status(&device->hdl, sd->name);
return 0;
}
@@ -946,13 +899,21 @@ static int tvp7002_enum_dv_presets(struct v4l2_subdev *sd,
return v4l_fill_dv_preset_info(tvp7002_presets[preset->index].preset, preset);
}
+static const struct v4l2_ctrl_ops tvp7002_ctrl_ops = {
+ .s_ctrl = tvp7002_s_ctrl,
+};
+
/* V4L2 core operation handlers */
static const struct v4l2_subdev_core_ops tvp7002_core_ops = {
.g_chip_ident = tvp7002_g_chip_ident,
.log_status = tvp7002_log_status,
- .g_ctrl = tvp7002_g_ctrl,
- .s_ctrl = tvp7002_s_ctrl,
- .queryctrl = tvp7002_queryctrl,
+ .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
+ .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
+ .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
+ .g_ctrl = v4l2_subdev_g_ctrl,
+ .s_ctrl = v4l2_subdev_s_ctrl,
+ .queryctrl = v4l2_subdev_queryctrl,
+ .querymenu = v4l2_subdev_querymenu,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = tvp7002_g_register,
.s_register = tvp7002_s_register,
@@ -977,12 +938,6 @@ static const struct v4l2_subdev_ops tvp7002_ops = {
.video = &tvp7002_video_ops,
};
-static struct tvp7002 tvp7002_dev = {
- .streaming = 0,
- .current_preset = tvp7002_presets,
- .gain = 0,
-};
-
/*
* tvp7002_probe - Probe a TVP7002 device
* @c: ptr to i2c_client struct
@@ -1013,14 +968,14 @@ static int tvp7002_probe(struct i2c_client *c, const struct i2c_device_id *id)
return -ENODEV;
}
- device = kmalloc(sizeof(struct tvp7002), GFP_KERNEL);
+ device = kzalloc(sizeof(struct tvp7002), GFP_KERNEL);
if (!device)
return -ENOMEM;
- *device = tvp7002_dev;
sd = &device->sd;
device->pdata = c->dev.platform_data;
+ device->current_preset = tvp7002_presets;
/* Tell v4l2 the device is ready */
v4l2_i2c_subdev_init(sd, c, &tvp7002_ops);
@@ -1060,6 +1015,19 @@ static int tvp7002_probe(struct i2c_client *c, const struct i2c_device_id *id)
preset.preset = device->current_preset->preset;
error = tvp7002_s_dv_preset(sd, &preset);
+ v4l2_ctrl_handler_init(&device->hdl, 1);
+ v4l2_ctrl_new_std(&device->hdl, &tvp7002_ctrl_ops,
+ V4L2_CID_GAIN, 0, 255, 1, 0);
+ sd->ctrl_handler = &device->hdl;
+ if (device->hdl.error) {
+ int err = device->hdl.error;
+
+ v4l2_ctrl_handler_free(&device->hdl);
+ kfree(device);
+ return err;
+ }
+ v4l2_ctrl_handler_setup(&device->hdl);
+
found_error:
if (error < 0)
kfree(device);
@@ -1083,6 +1051,7 @@ static int tvp7002_remove(struct i2c_client *c)
"on address 0x%x\n", c->addr);
v4l2_device_unregister_subdev(sd);
+ v4l2_ctrl_handler_free(&device->hdl);
kfree(device);
return 0;
}
diff --git a/drivers/media/video/v4l2-compat-ioctl32.c b/drivers/media/video/v4l2-compat-ioctl32.c
index dc82eb83c1d4..c19208a07b48 100644
--- a/drivers/media/video/v4l2-compat-ioctl32.c
+++ b/drivers/media/video/v4l2-compat-ioctl32.c
@@ -97,6 +97,14 @@ static inline int get_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pi
return 0;
}
+static inline int get_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp,
+ struct v4l2_pix_format_mplane __user *up)
+{
+ if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format_mplane)))
+ return -EFAULT;
+ return 0;
+}
+
static inline int put_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up)
{
if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format)))
@@ -104,6 +112,14 @@ static inline int put_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pi
return 0;
}
+static inline int put_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp,
+ struct v4l2_pix_format_mplane __user *up)
+{
+ if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format_mplane)))
+ return -EFAULT;
+ return 0;
+}
+
static inline int get_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up)
{
if (copy_from_user(kp, up, sizeof(struct v4l2_vbi_format)))
@@ -136,6 +152,7 @@ struct v4l2_format32 {
enum v4l2_buf_type type;
union {
struct v4l2_pix_format pix;
+ struct v4l2_pix_format_mplane pix_mp;
struct v4l2_window32 win;
struct v4l2_vbi_format vbi;
struct v4l2_sliced_vbi_format sliced;
@@ -152,6 +169,10 @@ static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
return get_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix);
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return get_v4l2_pix_format_mplane(&kp->fmt.pix_mp,
+ &up->fmt.pix_mp);
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
return get_v4l2_window32(&kp->fmt.win, &up->fmt.win);
@@ -181,6 +202,10 @@ static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
return put_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix);
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return put_v4l2_pix_format_mplane(&kp->fmt.pix_mp,
+ &up->fmt.pix_mp);
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
return put_v4l2_window32(&kp->fmt.win, &up->fmt.win);
@@ -232,6 +257,17 @@ static int put_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32
return 0;
}
+struct v4l2_plane32 {
+ __u32 bytesused;
+ __u32 length;
+ union {
+ __u32 mem_offset;
+ compat_long_t userptr;
+ } m;
+ __u32 data_offset;
+ __u32 reserved[11];
+};
+
struct v4l2_buffer32 {
__u32 index;
enum v4l2_buf_type type;
@@ -247,14 +283,64 @@ struct v4l2_buffer32 {
union {
__u32 offset;
compat_long_t userptr;
+ compat_caddr_t planes;
} m;
__u32 length;
__u32 input;
__u32 reserved;
};
+static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
+ enum v4l2_memory memory)
+{
+ void __user *up_pln;
+ compat_long_t p;
+
+ if (copy_in_user(up, up32, 2 * sizeof(__u32)) ||
+ copy_in_user(&up->data_offset, &up32->data_offset,
+ sizeof(__u32)))
+ return -EFAULT;
+
+ if (memory == V4L2_MEMORY_USERPTR) {
+ if (get_user(p, &up32->m.userptr))
+ return -EFAULT;
+ up_pln = compat_ptr(p);
+ if (put_user((unsigned long)up_pln, &up->m.userptr))
+ return -EFAULT;
+ } else {
+ if (copy_in_user(&up->m.mem_offset, &up32->m.mem_offset,
+ sizeof(__u32)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
+ enum v4l2_memory memory)
+{
+ if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
+ copy_in_user(&up32->data_offset, &up->data_offset,
+ sizeof(__u32)))
+ return -EFAULT;
+
+ /* For MMAP, driver might've set up the offset, so copy it back.
+ * USERPTR stays the same (was userspace-provided), so no copying. */
+ if (memory == V4L2_MEMORY_MMAP)
+ if (copy_in_user(&up32->m.mem_offset, &up->m.mem_offset,
+ sizeof(__u32)))
+ return -EFAULT;
+
+ return 0;
+}
+
static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up)
{
+ struct v4l2_plane32 __user *uplane32;
+ struct v4l2_plane __user *uplane;
+ compat_caddr_t p;
+ int num_planes;
+ int ret;
if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_buffer32)) ||
get_user(kp->index, &up->index) ||
@@ -263,33 +349,84 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
get_user(kp->memory, &up->memory) ||
get_user(kp->input, &up->input))
return -EFAULT;
- switch (kp->memory) {
- case V4L2_MEMORY_MMAP:
- if (get_user(kp->length, &up->length) ||
- get_user(kp->m.offset, &up->m.offset))
+
+ if (V4L2_TYPE_IS_OUTPUT(kp->type))
+ if (get_user(kp->bytesused, &up->bytesused) ||
+ get_user(kp->field, &up->field) ||
+ get_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
+ get_user(kp->timestamp.tv_usec,
+ &up->timestamp.tv_usec))
return -EFAULT;
- break;
- case V4L2_MEMORY_USERPTR:
- {
- compat_long_t tmp;
- if (get_user(kp->length, &up->length) ||
- get_user(tmp, &up->m.userptr))
+ if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
+ if (get_user(kp->length, &up->length))
return -EFAULT;
- kp->m.userptr = (unsigned long)compat_ptr(tmp);
+ num_planes = kp->length;
+ if (num_planes == 0) {
+ kp->m.planes = NULL;
+ /* num_planes == 0 is legal, e.g. when userspace doesn't
+ * need planes array on DQBUF*/
+ return 0;
}
- break;
- case V4L2_MEMORY_OVERLAY:
- if (get_user(kp->m.offset, &up->m.offset))
+
+ if (get_user(p, &up->m.planes))
return -EFAULT;
- break;
+
+ uplane32 = compat_ptr(p);
+ if (!access_ok(VERIFY_READ, uplane32,
+ num_planes * sizeof(struct v4l2_plane32)))
+ return -EFAULT;
+
+ /* We don't really care if userspace decides to kill itself
+ * by passing a very big num_planes value */
+ uplane = compat_alloc_user_space(num_planes *
+ sizeof(struct v4l2_plane));
+ kp->m.planes = uplane;
+
+ while (--num_planes >= 0) {
+ ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
+ if (ret)
+ return ret;
+ ++uplane;
+ ++uplane32;
+ }
+ } else {
+ switch (kp->memory) {
+ case V4L2_MEMORY_MMAP:
+ if (get_user(kp->length, &up->length) ||
+ get_user(kp->m.offset, &up->m.offset))
+ return -EFAULT;
+ break;
+ case V4L2_MEMORY_USERPTR:
+ {
+ compat_long_t tmp;
+
+ if (get_user(kp->length, &up->length) ||
+ get_user(tmp, &up->m.userptr))
+ return -EFAULT;
+
+ kp->m.userptr = (unsigned long)compat_ptr(tmp);
+ }
+ break;
+ case V4L2_MEMORY_OVERLAY:
+ if (get_user(kp->m.offset, &up->m.offset))
+ return -EFAULT;
+ break;
+ }
}
+
return 0;
}
static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up)
{
+ struct v4l2_plane32 __user *uplane32;
+ struct v4l2_plane __user *uplane;
+ compat_caddr_t p;
+ int num_planes;
+ int ret;
+
if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_buffer32)) ||
put_user(kp->index, &up->index) ||
put_user(kp->type, &up->type) ||
@@ -297,22 +434,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
put_user(kp->memory, &up->memory) ||
put_user(kp->input, &up->input))
return -EFAULT;
- switch (kp->memory) {
- case V4L2_MEMORY_MMAP:
- if (put_user(kp->length, &up->length) ||
- put_user(kp->m.offset, &up->m.offset))
- return -EFAULT;
- break;
- case V4L2_MEMORY_USERPTR:
- if (put_user(kp->length, &up->length) ||
- put_user(kp->m.userptr, &up->m.userptr))
- return -EFAULT;
- break;
- case V4L2_MEMORY_OVERLAY:
- if (put_user(kp->m.offset, &up->m.offset))
- return -EFAULT;
- break;
- }
+
if (put_user(kp->bytesused, &up->bytesused) ||
put_user(kp->field, &up->field) ||
put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
@@ -321,6 +443,43 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
put_user(kp->sequence, &up->sequence) ||
put_user(kp->reserved, &up->reserved))
return -EFAULT;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
+ num_planes = kp->length;
+ if (num_planes == 0)
+ return 0;
+
+ uplane = kp->m.planes;
+ if (get_user(p, &up->m.planes))
+ return -EFAULT;
+ uplane32 = compat_ptr(p);
+
+ while (--num_planes >= 0) {
+ ret = put_v4l2_plane32(uplane, uplane32, kp->memory);
+ if (ret)
+ return ret;
+ ++uplane;
+ ++uplane32;
+ }
+ } else {
+ switch (kp->memory) {
+ case V4L2_MEMORY_MMAP:
+ if (put_user(kp->length, &up->length) ||
+ put_user(kp->m.offset, &up->m.offset))
+ return -EFAULT;
+ break;
+ case V4L2_MEMORY_USERPTR:
+ if (put_user(kp->length, &up->length) ||
+ put_user(kp->m.userptr, &up->m.userptr))
+ return -EFAULT;
+ break;
+ case V4L2_MEMORY_OVERLAY:
+ if (put_user(kp->m.offset, &up->m.offset))
+ return -EFAULT;
+ break;
+ }
+ }
+
return 0;
}
@@ -442,12 +601,13 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
if (get_user(p, &up->controls))
return -EFAULT;
ucontrols = compat_ptr(p);
- if (!access_ok(VERIFY_READ, ucontrols, n * sizeof(struct v4l2_ext_control)))
+ if (!access_ok(VERIFY_READ, ucontrols,
+ n * sizeof(struct v4l2_ext_control32)))
return -EFAULT;
kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
kp->controls = kcontrols;
while (--n >= 0) {
- if (copy_in_user(kcontrols, ucontrols, sizeof(*kcontrols)))
+ if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols)))
return -EFAULT;
if (ctrl_is_pointer(kcontrols->id)) {
void __user *s;
@@ -483,7 +643,8 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
if (get_user(p, &up->controls))
return -EFAULT;
ucontrols = compat_ptr(p);
- if (!access_ok(VERIFY_WRITE, ucontrols, n * sizeof(struct v4l2_ext_control)))
+ if (!access_ok(VERIFY_WRITE, ucontrols,
+ n * sizeof(struct v4l2_ext_control32)))
return -EFAULT;
while (--n >= 0) {
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index f51327ef6757..8360ed2d933a 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -165,6 +165,8 @@ const char *v4l2_type_names[] = {
[V4L2_BUF_TYPE_SLICED_VBI_CAPTURE] = "sliced-vbi-cap",
[V4L2_BUF_TYPE_SLICED_VBI_OUTPUT] = "sliced-vbi-out",
[V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY] = "vid-out-overlay",
+ [V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE] = "vid-cap-mplane",
+ [V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE] = "vid-out-mplane",
};
EXPORT_SYMBOL(v4l2_type_names);
@@ -426,20 +428,33 @@ static void dbgbuf(unsigned int cmd, struct video_device *vfd,
struct v4l2_buffer *p)
{
struct v4l2_timecode *tc = &p->timecode;
+ struct v4l2_plane *plane;
+ int i;
dbgarg(cmd, "%02ld:%02d:%02d.%08ld index=%d, type=%s, "
- "bytesused=%d, flags=0x%08d, "
- "field=%0d, sequence=%d, memory=%s, offset/userptr=0x%08lx, length=%d\n",
+ "flags=0x%08d, field=%0d, sequence=%d, memory=%s\n",
p->timestamp.tv_sec / 3600,
(int)(p->timestamp.tv_sec / 60) % 60,
(int)(p->timestamp.tv_sec % 60),
(long)p->timestamp.tv_usec,
p->index,
prt_names(p->type, v4l2_type_names),
- p->bytesused, p->flags,
- p->field, p->sequence,
- prt_names(p->memory, v4l2_memory_names),
- p->m.userptr, p->length);
+ p->flags, p->field, p->sequence,
+ prt_names(p->memory, v4l2_memory_names));
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(p->type) && p->m.planes) {
+ for (i = 0; i < p->length; ++i) {
+ plane = &p->m.planes[i];
+ dbgarg2("plane %d: bytesused=%d, data_offset=0x%08x "
+ "offset/userptr=0x%08lx, length=%d\n",
+ i, plane->bytesused, plane->data_offset,
+ plane->m.userptr, plane->length);
+ }
+ } else {
+ dbgarg2("bytesused=%d, offset/userptr=0x%08lx, length=%d\n",
+ p->bytesused, p->m.userptr, p->length);
+ }
+
dbgarg2("timecode=%02d:%02d:%02d type=%d, "
"flags=0x%08d, frames=%d, userbits=0x%08x\n",
tc->hours, tc->minutes, tc->seconds,
@@ -467,6 +482,27 @@ static inline void v4l_print_pix_fmt(struct video_device *vfd,
fmt->bytesperline, fmt->sizeimage, fmt->colorspace);
};
+static inline void v4l_print_pix_fmt_mplane(struct video_device *vfd,
+ struct v4l2_pix_format_mplane *fmt)
+{
+ int i;
+
+ dbgarg2("width=%d, height=%d, format=%c%c%c%c, field=%s, "
+ "colorspace=%d, num_planes=%d\n",
+ fmt->width, fmt->height,
+ (fmt->pixelformat & 0xff),
+ (fmt->pixelformat >> 8) & 0xff,
+ (fmt->pixelformat >> 16) & 0xff,
+ (fmt->pixelformat >> 24) & 0xff,
+ prt_names(fmt->field, v4l2_field_names),
+ fmt->colorspace, fmt->num_planes);
+
+ for (i = 0; i < fmt->num_planes; ++i)
+ dbgarg2("plane %d: bytesperline=%d sizeimage=%d\n", i,
+ fmt->plane_fmt[i].bytesperline,
+ fmt->plane_fmt[i].sizeimage);
+}
+
static inline void v4l_print_ext_ctrls(unsigned int cmd,
struct video_device *vfd, struct v4l2_ext_controls *c, int show_vals)
{
@@ -520,7 +556,12 @@ static int check_fmt(const struct v4l2_ioctl_ops *ops, enum v4l2_buf_type type)
switch (type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (ops->vidioc_g_fmt_vid_cap)
+ if (ops->vidioc_g_fmt_vid_cap ||
+ ops->vidioc_g_fmt_vid_cap_mplane)
+ return 0;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ if (ops->vidioc_g_fmt_vid_cap_mplane)
return 0;
break;
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
@@ -528,7 +569,12 @@ static int check_fmt(const struct v4l2_ioctl_ops *ops, enum v4l2_buf_type type)
return 0;
break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- if (ops->vidioc_g_fmt_vid_out)
+ if (ops->vidioc_g_fmt_vid_out ||
+ ops->vidioc_g_fmt_vid_out_mplane)
+ return 0;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (ops->vidioc_g_fmt_vid_out_mplane)
return 0;
break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
@@ -559,12 +605,70 @@ static int check_fmt(const struct v4l2_ioctl_ops *ops, enum v4l2_buf_type type)
return -EINVAL;
}
+/**
+ * fmt_sp_to_mp() - Convert a single-plane format to its multi-planar 1-plane
+ * equivalent
+ */
+static int fmt_sp_to_mp(const struct v4l2_format *f_sp,
+ struct v4l2_format *f_mp)
+{
+ struct v4l2_pix_format_mplane *pix_mp = &f_mp->fmt.pix_mp;
+ const struct v4l2_pix_format *pix = &f_sp->fmt.pix;
+
+ if (f_sp->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ f_mp->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ else if (f_sp->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ f_mp->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ else
+ return -EINVAL;
+
+ pix_mp->width = pix->width;
+ pix_mp->height = pix->height;
+ pix_mp->pixelformat = pix->pixelformat;
+ pix_mp->field = pix->field;
+ pix_mp->colorspace = pix->colorspace;
+ pix_mp->num_planes = 1;
+ pix_mp->plane_fmt[0].sizeimage = pix->sizeimage;
+ pix_mp->plane_fmt[0].bytesperline = pix->bytesperline;
+
+ return 0;
+}
+
+/**
+ * fmt_mp_to_sp() - Convert a multi-planar 1-plane format to its single-planar
+ * equivalent
+ */
+static int fmt_mp_to_sp(const struct v4l2_format *f_mp,
+ struct v4l2_format *f_sp)
+{
+ const struct v4l2_pix_format_mplane *pix_mp = &f_mp->fmt.pix_mp;
+ struct v4l2_pix_format *pix = &f_sp->fmt.pix;
+
+ if (f_mp->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ f_sp->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ else if (f_mp->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ f_sp->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ else
+ return -EINVAL;
+
+ pix->width = pix_mp->width;
+ pix->height = pix_mp->height;
+ pix->pixelformat = pix_mp->pixelformat;
+ pix->field = pix_mp->field;
+ pix->colorspace = pix_mp->colorspace;
+ pix->sizeimage = pix_mp->plane_fmt[0].sizeimage;
+ pix->bytesperline = pix_mp->plane_fmt[0].bytesperline;
+
+ return 0;
+}
+
static long __video_do_ioctl(struct file *file,
unsigned int cmd, void *arg)
{
struct video_device *vfd = video_devdata(file);
const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
void *fh = file->private_data;
+ struct v4l2_format f_copy;
long ret = -EINVAL;
if (ops == NULL) {
@@ -633,6 +737,11 @@ static long __video_do_ioctl(struct file *file,
if (ops->vidioc_enum_fmt_vid_cap)
ret = ops->vidioc_enum_fmt_vid_cap(file, fh, f);
break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ if (ops->vidioc_enum_fmt_vid_cap_mplane)
+ ret = ops->vidioc_enum_fmt_vid_cap_mplane(file,
+ fh, f);
+ break;
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
if (ops->vidioc_enum_fmt_vid_overlay)
ret = ops->vidioc_enum_fmt_vid_overlay(file,
@@ -642,6 +751,11 @@ static long __video_do_ioctl(struct file *file,
if (ops->vidioc_enum_fmt_vid_out)
ret = ops->vidioc_enum_fmt_vid_out(file, fh, f);
break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (ops->vidioc_enum_fmt_vid_out_mplane)
+ ret = ops->vidioc_enum_fmt_vid_out_mplane(file,
+ fh, f);
+ break;
case V4L2_BUF_TYPE_PRIVATE:
if (ops->vidioc_enum_fmt_type_private)
ret = ops->vidioc_enum_fmt_type_private(file,
@@ -670,22 +784,90 @@ static long __video_do_ioctl(struct file *file,
switch (f->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (ops->vidioc_g_fmt_vid_cap)
+ if (ops->vidioc_g_fmt_vid_cap) {
ret = ops->vidioc_g_fmt_vid_cap(file, fh, f);
+ } else if (ops->vidioc_g_fmt_vid_cap_mplane) {
+ if (fmt_sp_to_mp(f, &f_copy))
+ break;
+ ret = ops->vidioc_g_fmt_vid_cap_mplane(file, fh,
+ &f_copy);
+ if (ret)
+ break;
+
+ /* Driver is currently in multi-planar format,
+ * we can't return it in single-planar API*/
+ if (f_copy.fmt.pix_mp.num_planes > 1) {
+ ret = -EBUSY;
+ break;
+ }
+
+ ret = fmt_mp_to_sp(&f_copy, f);
+ }
if (!ret)
v4l_print_pix_fmt(vfd, &f->fmt.pix);
break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ if (ops->vidioc_g_fmt_vid_cap_mplane) {
+ ret = ops->vidioc_g_fmt_vid_cap_mplane(file,
+ fh, f);
+ } else if (ops->vidioc_g_fmt_vid_cap) {
+ if (fmt_mp_to_sp(f, &f_copy))
+ break;
+ ret = ops->vidioc_g_fmt_vid_cap(file,
+ fh, &f_copy);
+ if (ret)
+ break;
+
+ ret = fmt_sp_to_mp(&f_copy, f);
+ }
+ if (!ret)
+ v4l_print_pix_fmt_mplane(vfd, &f->fmt.pix_mp);
+ break;
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
if (ops->vidioc_g_fmt_vid_overlay)
ret = ops->vidioc_g_fmt_vid_overlay(file,
fh, f);
break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- if (ops->vidioc_g_fmt_vid_out)
+ if (ops->vidioc_g_fmt_vid_out) {
ret = ops->vidioc_g_fmt_vid_out(file, fh, f);
+ } else if (ops->vidioc_g_fmt_vid_out_mplane) {
+ if (fmt_sp_to_mp(f, &f_copy))
+ break;
+ ret = ops->vidioc_g_fmt_vid_out_mplane(file, fh,
+ &f_copy);
+ if (ret)
+ break;
+
+ /* Driver is currently in multi-planar format,
+ * we can't return it in single-planar API*/
+ if (f_copy.fmt.pix_mp.num_planes > 1) {
+ ret = -EBUSY;
+ break;
+ }
+
+ ret = fmt_mp_to_sp(&f_copy, f);
+ }
if (!ret)
v4l_print_pix_fmt(vfd, &f->fmt.pix);
break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (ops->vidioc_g_fmt_vid_out_mplane) {
+ ret = ops->vidioc_g_fmt_vid_out_mplane(file,
+ fh, f);
+ } else if (ops->vidioc_g_fmt_vid_out) {
+ if (fmt_mp_to_sp(f, &f_copy))
+ break;
+ ret = ops->vidioc_g_fmt_vid_out(file,
+ fh, &f_copy);
+ if (ret)
+ break;
+
+ ret = fmt_sp_to_mp(&f_copy, f);
+ }
+ if (!ret)
+ v4l_print_pix_fmt_mplane(vfd, &f->fmt.pix_mp);
+ break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
if (ops->vidioc_g_fmt_vid_out_overlay)
ret = ops->vidioc_g_fmt_vid_out_overlay(file,
@@ -729,8 +911,44 @@ static long __video_do_ioctl(struct file *file,
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
CLEAR_AFTER_FIELD(f, fmt.pix);
v4l_print_pix_fmt(vfd, &f->fmt.pix);
- if (ops->vidioc_s_fmt_vid_cap)
+ if (ops->vidioc_s_fmt_vid_cap) {
ret = ops->vidioc_s_fmt_vid_cap(file, fh, f);
+ } else if (ops->vidioc_s_fmt_vid_cap_mplane) {
+ if (fmt_sp_to_mp(f, &f_copy))
+ break;
+ ret = ops->vidioc_s_fmt_vid_cap_mplane(file, fh,
+ &f_copy);
+ if (ret)
+ break;
+
+ if (f_copy.fmt.pix_mp.num_planes > 1) {
+ /* Drivers shouldn't adjust from 1-plane
+ * to more than 1-plane formats */
+ ret = -EBUSY;
+ WARN_ON(1);
+ break;
+ }
+
+ ret = fmt_mp_to_sp(&f_copy, f);
+ }
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ CLEAR_AFTER_FIELD(f, fmt.pix_mp);
+ v4l_print_pix_fmt_mplane(vfd, &f->fmt.pix_mp);
+ if (ops->vidioc_s_fmt_vid_cap_mplane) {
+ ret = ops->vidioc_s_fmt_vid_cap_mplane(file,
+ fh, f);
+ } else if (ops->vidioc_s_fmt_vid_cap &&
+ f->fmt.pix_mp.num_planes == 1) {
+ if (fmt_mp_to_sp(f, &f_copy))
+ break;
+ ret = ops->vidioc_s_fmt_vid_cap(file,
+ fh, &f_copy);
+ if (ret)
+ break;
+
+ ret = fmt_sp_to_mp(&f_copy, f);
+ }
break;
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
CLEAR_AFTER_FIELD(f, fmt.win);
@@ -741,8 +959,44 @@ static long __video_do_ioctl(struct file *file,
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
CLEAR_AFTER_FIELD(f, fmt.pix);
v4l_print_pix_fmt(vfd, &f->fmt.pix);
- if (ops->vidioc_s_fmt_vid_out)
+ if (ops->vidioc_s_fmt_vid_out) {
ret = ops->vidioc_s_fmt_vid_out(file, fh, f);
+ } else if (ops->vidioc_s_fmt_vid_out_mplane) {
+ if (fmt_sp_to_mp(f, &f_copy))
+ break;
+ ret = ops->vidioc_s_fmt_vid_out_mplane(file, fh,
+ &f_copy);
+ if (ret)
+ break;
+
+ if (f_copy.fmt.pix_mp.num_planes > 1) {
+ /* Drivers shouldn't adjust from 1-plane
+ * to more than 1-plane formats */
+ ret = -EBUSY;
+ WARN_ON(1);
+ break;
+ }
+
+ ret = fmt_mp_to_sp(&f_copy, f);
+ }
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ CLEAR_AFTER_FIELD(f, fmt.pix_mp);
+ v4l_print_pix_fmt_mplane(vfd, &f->fmt.pix_mp);
+ if (ops->vidioc_s_fmt_vid_out_mplane) {
+ ret = ops->vidioc_s_fmt_vid_out_mplane(file,
+ fh, f);
+ } else if (ops->vidioc_s_fmt_vid_out &&
+ f->fmt.pix_mp.num_planes == 1) {
+ if (fmt_mp_to_sp(f, &f_copy))
+ break;
+ ret = ops->vidioc_s_fmt_vid_out(file,
+ fh, &f_copy);
+ if (ret)
+ break;
+
+ ret = fmt_mp_to_sp(&f_copy, f);
+ }
break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
CLEAR_AFTER_FIELD(f, fmt.win);
@@ -791,11 +1045,47 @@ static long __video_do_ioctl(struct file *file,
switch (f->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
CLEAR_AFTER_FIELD(f, fmt.pix);
- if (ops->vidioc_try_fmt_vid_cap)
+ if (ops->vidioc_try_fmt_vid_cap) {
ret = ops->vidioc_try_fmt_vid_cap(file, fh, f);
+ } else if (ops->vidioc_try_fmt_vid_cap_mplane) {
+ if (fmt_sp_to_mp(f, &f_copy))
+ break;
+ ret = ops->vidioc_try_fmt_vid_cap_mplane(file,
+ fh, &f_copy);
+ if (ret)
+ break;
+
+ if (f_copy.fmt.pix_mp.num_planes > 1) {
+ /* Drivers shouldn't adjust from 1-plane
+ * to more than 1-plane formats */
+ ret = -EBUSY;
+ WARN_ON(1);
+ break;
+ }
+ ret = fmt_mp_to_sp(&f_copy, f);
+ }
if (!ret)
v4l_print_pix_fmt(vfd, &f->fmt.pix);
break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ CLEAR_AFTER_FIELD(f, fmt.pix_mp);
+ if (ops->vidioc_try_fmt_vid_cap_mplane) {
+ ret = ops->vidioc_try_fmt_vid_cap_mplane(file,
+ fh, f);
+ } else if (ops->vidioc_try_fmt_vid_cap &&
+ f->fmt.pix_mp.num_planes == 1) {
+ if (fmt_mp_to_sp(f, &f_copy))
+ break;
+ ret = ops->vidioc_try_fmt_vid_cap(file,
+ fh, &f_copy);
+ if (ret)
+ break;
+
+ ret = fmt_sp_to_mp(&f_copy, f);
+ }
+ if (!ret)
+ v4l_print_pix_fmt_mplane(vfd, &f->fmt.pix_mp);
+ break;
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
CLEAR_AFTER_FIELD(f, fmt.win);
if (ops->vidioc_try_fmt_vid_overlay)
@@ -804,11 +1094,47 @@ static long __video_do_ioctl(struct file *file,
break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
CLEAR_AFTER_FIELD(f, fmt.pix);
- if (ops->vidioc_try_fmt_vid_out)
+ if (ops->vidioc_try_fmt_vid_out) {
ret = ops->vidioc_try_fmt_vid_out(file, fh, f);
+ } else if (ops->vidioc_try_fmt_vid_out_mplane) {
+ if (fmt_sp_to_mp(f, &f_copy))
+ break;
+ ret = ops->vidioc_try_fmt_vid_out_mplane(file,
+ fh, &f_copy);
+ if (ret)
+ break;
+
+ if (f_copy.fmt.pix_mp.num_planes > 1) {
+ /* Drivers shouldn't adjust from 1-plane
+ * to more than 1-plane formats */
+ ret = -EBUSY;
+ WARN_ON(1);
+ break;
+ }
+ ret = fmt_mp_to_sp(&f_copy, f);
+ }
if (!ret)
v4l_print_pix_fmt(vfd, &f->fmt.pix);
break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ CLEAR_AFTER_FIELD(f, fmt.pix_mp);
+ if (ops->vidioc_try_fmt_vid_out_mplane) {
+ ret = ops->vidioc_try_fmt_vid_out_mplane(file,
+ fh, f);
+ } else if (ops->vidioc_try_fmt_vid_out &&
+ f->fmt.pix_mp.num_planes == 1) {
+ if (fmt_mp_to_sp(f, &f_copy))
+ break;
+ ret = ops->vidioc_try_fmt_vid_out(file,
+ fh, &f_copy);
+ if (ret)
+ break;
+
+ ret = fmt_sp_to_mp(&f_copy, f);
+ }
+ if (!ret)
+ v4l_print_pix_fmt_mplane(vfd, &f->fmt.pix_mp);
+ break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
CLEAR_AFTER_FIELD(f, fmt.win);
if (ops->vidioc_try_fmt_vid_out_overlay)
@@ -1973,7 +2299,7 @@ static unsigned long cmd_input_size(unsigned int cmd)
switch (cmd) {
CMDINSIZE(ENUM_FMT, fmtdesc, type);
CMDINSIZE(G_FMT, format, type);
- CMDINSIZE(QUERYBUF, buffer, type);
+ CMDINSIZE(QUERYBUF, buffer, length);
CMDINSIZE(G_PARM, streamparm, type);
CMDINSIZE(ENUMSTD, standard, index);
CMDINSIZE(ENUMINPUT, input, index);
@@ -1998,6 +2324,49 @@ static unsigned long cmd_input_size(unsigned int cmd)
}
}
+static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
+ void * __user *user_ptr, void ***kernel_ptr)
+{
+ int ret = 0;
+
+ switch (cmd) {
+ case VIDIOC_QUERYBUF:
+ case VIDIOC_QBUF:
+ case VIDIOC_DQBUF: {
+ struct v4l2_buffer *buf = parg;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(buf->type) && buf->length > 0) {
+ if (buf->length > VIDEO_MAX_PLANES) {
+ ret = -EINVAL;
+ break;
+ }
+ *user_ptr = (void __user *)buf->m.planes;
+ *kernel_ptr = (void **)&buf->m.planes;
+ *array_size = sizeof(struct v4l2_plane) * buf->length;
+ ret = 1;
+ }
+ break;
+ }
+
+ case VIDIOC_S_EXT_CTRLS:
+ case VIDIOC_G_EXT_CTRLS:
+ case VIDIOC_TRY_EXT_CTRLS: {
+ struct v4l2_ext_controls *ctrls = parg;
+
+ if (ctrls->count != 0) {
+ *user_ptr = (void __user *)ctrls->controls;
+ *kernel_ptr = (void **)&ctrls->controls;
+ *array_size = sizeof(struct v4l2_ext_control)
+ * ctrls->count;
+ ret = 1;
+ }
+ break;
+ }
+ }
+
+ return ret;
+}
+
long video_ioctl2(struct file *file,
unsigned int cmd, unsigned long arg)
{
@@ -2005,16 +2374,14 @@ long video_ioctl2(struct file *file,
void *mbuf = NULL;
void *parg = (void *)arg;
long err = -EINVAL;
- int is_ext_ctrl;
- size_t ctrls_size = 0;
+ bool has_array_args;
+ size_t array_size = 0;
void __user *user_ptr = NULL;
+ void **kernel_ptr = NULL;
#ifdef __OLD_VIDIOC_
cmd = video_fix_command(cmd);
#endif
- is_ext_ctrl = (cmd == VIDIOC_S_EXT_CTRLS || cmd == VIDIOC_G_EXT_CTRLS ||
- cmd == VIDIOC_TRY_EXT_CTRLS);
-
/* Copy arguments into temp kernel buffer */
if (_IOC_DIR(cmd) != _IOC_NONE) {
if (_IOC_SIZE(cmd) <= sizeof(sbuf)) {
@@ -2043,43 +2410,43 @@ long video_ioctl2(struct file *file,
}
}
- if (is_ext_ctrl) {
- struct v4l2_ext_controls *p = parg;
+ err = check_array_args(cmd, parg, &array_size, &user_ptr, &kernel_ptr);
+ if (err < 0)
+ goto out;
+ has_array_args = err;
- /* In case of an error, tell the caller that it wasn't
- a specific control that caused it. */
- p->error_idx = p->count;
- user_ptr = (void __user *)p->controls;
- if (p->count) {
- ctrls_size = sizeof(struct v4l2_ext_control) * p->count;
- /* Note: v4l2_ext_controls fits in sbuf[] so mbuf is still NULL. */
- mbuf = kmalloc(ctrls_size, GFP_KERNEL);
- err = -ENOMEM;
- if (NULL == mbuf)
- goto out_ext_ctrl;
- err = -EFAULT;
- if (copy_from_user(mbuf, user_ptr, ctrls_size))
- goto out_ext_ctrl;
- p->controls = mbuf;
- }
+ if (has_array_args) {
+ /*
+ * When adding new types of array args, make sure that the
+ * parent argument to ioctl (which contains the pointer to the
+ * array) fits into sbuf (so that mbuf will still remain
+ * unused up to here).
+ */
+ mbuf = kmalloc(array_size, GFP_KERNEL);
+ err = -ENOMEM;
+ if (NULL == mbuf)
+ goto out_array_args;
+ err = -EFAULT;
+ if (copy_from_user(mbuf, user_ptr, array_size))
+ goto out_array_args;
+ *kernel_ptr = mbuf;
}
/* Handles IOCTL */
err = __video_do_ioctl(file, cmd, parg);
if (err == -ENOIOCTLCMD)
err = -EINVAL;
- if (is_ext_ctrl) {
- struct v4l2_ext_controls *p = parg;
- p->controls = (void *)user_ptr;
- if (p->count && err == 0 && copy_to_user(user_ptr, mbuf, ctrls_size))
+ if (has_array_args) {
+ *kernel_ptr = user_ptr;
+ if (copy_to_user(user_ptr, mbuf, array_size))
err = -EFAULT;
- goto out_ext_ctrl;
+ goto out_array_args;
}
if (err < 0)
goto out;
-out_ext_ctrl:
+out_array_args:
/* Copy results into user buffer */
switch (_IOC_DIR(cmd)) {
case _IOC_READ:
diff --git a/drivers/media/video/v4l2-mem2mem.c b/drivers/media/video/v4l2-mem2mem.c
index ac832a28e18e..a78e5c9be1a2 100644
--- a/drivers/media/video/v4l2-mem2mem.c
+++ b/drivers/media/video/v4l2-mem2mem.c
@@ -17,7 +17,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
-#include <media/videobuf-core.h>
+#include <media/videobuf2-core.h>
#include <media/v4l2-mem2mem.h>
MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
@@ -65,21 +65,16 @@ struct v4l2_m2m_dev {
static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
enum v4l2_buf_type type)
{
- switch (type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- return &m2m_ctx->cap_q_ctx;
- case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ if (V4L2_TYPE_IS_OUTPUT(type))
return &m2m_ctx->out_q_ctx;
- default:
- printk(KERN_ERR "Invalid buffer type\n");
- return NULL;
- }
+ else
+ return &m2m_ctx->cap_q_ctx;
}
/**
- * v4l2_m2m_get_vq() - return videobuf_queue for the given type
+ * v4l2_m2m_get_vq() - return vb2_queue for the given type
*/
-struct videobuf_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
+struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
enum v4l2_buf_type type)
{
struct v4l2_m2m_queue_ctx *q_ctx;
@@ -95,27 +90,20 @@ EXPORT_SYMBOL(v4l2_m2m_get_vq);
/**
* v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
*/
-void *v4l2_m2m_next_buf(struct v4l2_m2m_ctx *m2m_ctx, enum v4l2_buf_type type)
+void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
{
- struct v4l2_m2m_queue_ctx *q_ctx;
- struct videobuf_buffer *vb = NULL;
+ struct v4l2_m2m_buffer *b = NULL;
unsigned long flags;
- q_ctx = get_queue_ctx(m2m_ctx, type);
- if (!q_ctx)
- return NULL;
-
- spin_lock_irqsave(q_ctx->q.irqlock, flags);
+ spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
if (list_empty(&q_ctx->rdy_queue))
goto end;
- vb = list_entry(q_ctx->rdy_queue.next, struct videobuf_buffer, queue);
- vb->state = VIDEOBUF_ACTIVE;
-
+ b = list_entry(q_ctx->rdy_queue.next, struct v4l2_m2m_buffer, list);
end:
- spin_unlock_irqrestore(q_ctx->q.irqlock, flags);
- return vb;
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
+ return &b->vb;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
@@ -123,26 +111,21 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
* v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
* return it
*/
-void *v4l2_m2m_buf_remove(struct v4l2_m2m_ctx *m2m_ctx, enum v4l2_buf_type type)
+void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
{
- struct v4l2_m2m_queue_ctx *q_ctx;
- struct videobuf_buffer *vb = NULL;
+ struct v4l2_m2m_buffer *b = NULL;
unsigned long flags;
- q_ctx = get_queue_ctx(m2m_ctx, type);
- if (!q_ctx)
- return NULL;
-
- spin_lock_irqsave(q_ctx->q.irqlock, flags);
+ spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
if (!list_empty(&q_ctx->rdy_queue)) {
- vb = list_entry(q_ctx->rdy_queue.next, struct videobuf_buffer,
- queue);
- list_del(&vb->queue);
+ b = list_entry(q_ctx->rdy_queue.next, struct v4l2_m2m_buffer,
+ list);
+ list_del(&b->list);
q_ctx->num_rdy--;
}
- spin_unlock_irqrestore(q_ctx->q.irqlock, flags);
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
- return vb;
+ return &b->vb;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
@@ -235,20 +218,20 @@ static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
return;
}
- spin_lock_irqsave(m2m_ctx->out_q_ctx.q.irqlock, flags);
+ spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) {
- spin_unlock_irqrestore(m2m_ctx->out_q_ctx.q.irqlock, flags);
+ spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
dprintk("No input buffers available\n");
return;
}
if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) {
- spin_unlock_irqrestore(m2m_ctx->out_q_ctx.q.irqlock, flags);
+ spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
dprintk("No output buffers available\n");
return;
}
- spin_unlock_irqrestore(m2m_ctx->out_q_ctx.q.irqlock, flags);
+ spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
if (m2m_dev->m2m_ops->job_ready
&& (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
@@ -291,6 +274,7 @@ void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
list_del(&m2m_dev->curr_ctx->queue);
m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
+ wake_up(&m2m_dev->curr_ctx->finished);
m2m_dev->curr_ctx = NULL;
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
@@ -309,10 +293,10 @@ EXPORT_SYMBOL(v4l2_m2m_job_finish);
int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_requestbuffers *reqbufs)
{
- struct videobuf_queue *vq;
+ struct vb2_queue *vq;
vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
- return videobuf_reqbufs(vq, reqbufs);
+ return vb2_reqbufs(vq, reqbufs);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
@@ -324,15 +308,22 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf)
{
- struct videobuf_queue *vq;
- int ret;
+ struct vb2_queue *vq;
+ int ret = 0;
+ unsigned int i;
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
- ret = videobuf_querybuf(vq, buf);
-
- if (buf->memory == V4L2_MEMORY_MMAP
- && vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- buf->m.offset += DST_QUEUE_OFF_BASE;
+ ret = vb2_querybuf(vq, buf);
+
+ /* Adjust MMAP memory offsets for the CAPTURE queue */
+ if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
+ if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
+ for (i = 0; i < buf->length; ++i)
+ buf->m.planes[i].m.mem_offset
+ += DST_QUEUE_OFF_BASE;
+ } else {
+ buf->m.offset += DST_QUEUE_OFF_BASE;
+ }
}
return ret;
@@ -346,11 +337,11 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf)
{
- struct videobuf_queue *vq;
+ struct vb2_queue *vq;
int ret;
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
- ret = videobuf_qbuf(vq, buf);
+ ret = vb2_qbuf(vq, buf);
if (!ret)
v4l2_m2m_try_schedule(m2m_ctx);
@@ -365,10 +356,10 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf)
{
- struct videobuf_queue *vq;
+ struct vb2_queue *vq;
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
- return videobuf_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
+ return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
@@ -378,11 +369,11 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
enum v4l2_buf_type type)
{
- struct videobuf_queue *vq;
+ struct vb2_queue *vq;
int ret;
vq = v4l2_m2m_get_vq(m2m_ctx, type);
- ret = videobuf_streamon(vq);
+ ret = vb2_streamon(vq, type);
if (!ret)
v4l2_m2m_try_schedule(m2m_ctx);
@@ -396,10 +387,10 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
enum v4l2_buf_type type)
{
- struct videobuf_queue *vq;
+ struct vb2_queue *vq;
vq = v4l2_m2m_get_vq(m2m_ctx, type);
- return videobuf_streamoff(vq);
+ return vb2_streamoff(vq, type);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
@@ -414,44 +405,53 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct poll_table_struct *wait)
{
- struct videobuf_queue *src_q, *dst_q;
- struct videobuf_buffer *src_vb = NULL, *dst_vb = NULL;
+ struct vb2_queue *src_q, *dst_q;
+ struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
unsigned int rc = 0;
+ unsigned long flags;
src_q = v4l2_m2m_get_src_vq(m2m_ctx);
dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
- videobuf_queue_lock(src_q);
- videobuf_queue_lock(dst_q);
-
- if (src_q->streaming && !list_empty(&src_q->stream))
- src_vb = list_first_entry(&src_q->stream,
- struct videobuf_buffer, stream);
- if (dst_q->streaming && !list_empty(&dst_q->stream))
- dst_vb = list_first_entry(&dst_q->stream,
- struct videobuf_buffer, stream);
-
- if (!src_vb && !dst_vb) {
+ /*
+ * There has to be at least one buffer queued on each queued_list, which
+ * means either in driver already or waiting for driver to claim it
+ * and start processing.
+ */
+ if ((!src_q->streaming || list_empty(&src_q->queued_list))
+ && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
rc = POLLERR;
goto end;
}
- if (src_vb) {
- poll_wait(file, &src_vb->done, wait);
- if (src_vb->state == VIDEOBUF_DONE
- || src_vb->state == VIDEOBUF_ERROR)
- rc |= POLLOUT | POLLWRNORM;
- }
- if (dst_vb) {
- poll_wait(file, &dst_vb->done, wait);
- if (dst_vb->state == VIDEOBUF_DONE
- || dst_vb->state == VIDEOBUF_ERROR)
- rc |= POLLIN | POLLRDNORM;
- }
+ if (m2m_ctx->m2m_dev->m2m_ops->unlock)
+ m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv);
+
+ poll_wait(file, &src_q->done_wq, wait);
+ poll_wait(file, &dst_q->done_wq, wait);
+
+ if (m2m_ctx->m2m_dev->m2m_ops->lock)
+ m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv);
+
+ spin_lock_irqsave(&src_q->done_lock, flags);
+ if (!list_empty(&src_q->done_list))
+ src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
+ done_entry);
+ if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
+ || src_vb->state == VB2_BUF_STATE_ERROR))
+ rc |= POLLOUT | POLLWRNORM;
+ spin_unlock_irqrestore(&src_q->done_lock, flags);
+
+ spin_lock_irqsave(&dst_q->done_lock, flags);
+ if (!list_empty(&dst_q->done_list))
+ dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
+ done_entry);
+ if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
+ || dst_vb->state == VB2_BUF_STATE_ERROR))
+ rc |= POLLIN | POLLRDNORM;
+ spin_unlock_irqrestore(&dst_q->done_lock, flags);
end:
- videobuf_queue_unlock(dst_q);
- videobuf_queue_unlock(src_q);
return rc;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
@@ -470,7 +470,7 @@ int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct vm_area_struct *vma)
{
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
- struct videobuf_queue *vq;
+ struct vb2_queue *vq;
if (offset < DST_QUEUE_OFF_BASE) {
vq = v4l2_m2m_get_src_vq(m2m_ctx);
@@ -479,7 +479,7 @@ int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
}
- return videobuf_mmap_mapper(vq, vma);
+ return vb2_mmap(vq, vma);
}
EXPORT_SYMBOL(v4l2_m2m_mmap);
@@ -531,36 +531,41 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_release);
*
* Usually called from driver's open() function.
*/
-struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(void *priv, struct v4l2_m2m_dev *m2m_dev,
- void (*vq_init)(void *priv, struct videobuf_queue *,
- enum v4l2_buf_type))
+struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
+ void *drv_priv,
+ int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
{
struct v4l2_m2m_ctx *m2m_ctx;
struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
-
- if (!vq_init)
- return ERR_PTR(-EINVAL);
+ int ret;
m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
if (!m2m_ctx)
return ERR_PTR(-ENOMEM);
- m2m_ctx->priv = priv;
+ m2m_ctx->priv = drv_priv;
m2m_ctx->m2m_dev = m2m_dev;
+ init_waitqueue_head(&m2m_ctx->finished);
- out_q_ctx = get_queue_ctx(m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
- cap_q_ctx = get_queue_ctx(m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ out_q_ctx = &m2m_ctx->out_q_ctx;
+ cap_q_ctx = &m2m_ctx->cap_q_ctx;
INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
+ spin_lock_init(&out_q_ctx->rdy_spinlock);
+ spin_lock_init(&cap_q_ctx->rdy_spinlock);
INIT_LIST_HEAD(&m2m_ctx->queue);
- vq_init(priv, &out_q_ctx->q, V4L2_BUF_TYPE_VIDEO_OUTPUT);
- vq_init(priv, &cap_q_ctx->q, V4L2_BUF_TYPE_VIDEO_CAPTURE);
- out_q_ctx->q.priv_data = cap_q_ctx->q.priv_data = priv;
+ ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q);
+
+ if (ret)
+ goto err;
return m2m_ctx;
+err:
+ kfree(m2m_ctx);
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
@@ -572,7 +577,6 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
{
struct v4l2_m2m_dev *m2m_dev;
- struct videobuf_buffer *vb;
unsigned long flags;
m2m_dev = m2m_ctx->m2m_dev;
@@ -582,10 +586,7 @@ void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
- vb = v4l2_m2m_next_dst_buf(m2m_ctx);
- BUG_ON(NULL == vb);
- wait_event(vb->done, vb->state != VIDEOBUF_ACTIVE
- && vb->state != VIDEOBUF_QUEUED);
+ wait_event(m2m_ctx->finished, !(m2m_ctx->job_flags & TRANS_RUNNING));
} else if (m2m_ctx->job_flags & TRANS_QUEUED) {
list_del(&m2m_ctx->queue);
m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
@@ -597,11 +598,8 @@ void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
}
- videobuf_stop(&m2m_ctx->cap_q_ctx.q);
- videobuf_stop(&m2m_ctx->out_q_ctx.q);
-
- videobuf_mmap_free(&m2m_ctx->cap_q_ctx.q);
- videobuf_mmap_free(&m2m_ctx->out_q_ctx.q);
+ vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
+ vb2_queue_release(&m2m_ctx->out_q_ctx.q);
kfree(m2m_ctx);
}
@@ -611,23 +609,21 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
* v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
*
* Call from buf_queue(), videobuf_queue_ops callback.
- *
- * Locking: Caller holds q->irqlock (taken by videobuf before calling buf_queue
- * callback in the driver).
*/
-void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct videobuf_queue *vq,
- struct videobuf_buffer *vb)
+void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb)
{
+ struct v4l2_m2m_buffer *b = container_of(vb, struct v4l2_m2m_buffer, vb);
struct v4l2_m2m_queue_ctx *q_ctx;
+ unsigned long flags;
- q_ctx = get_queue_ctx(m2m_ctx, vq->type);
+ q_ctx = get_queue_ctx(m2m_ctx, vb->vb2_queue->type);
if (!q_ctx)
return;
- list_add_tail(&vb->queue, &q_ctx->rdy_queue);
+ spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
+ list_add_tail(&b->list, &q_ctx->rdy_queue);
q_ctx->num_rdy++;
-
- vb->state = VIDEOBUF_QUEUED;
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
diff --git a/drivers/media/video/videobuf2-core.c b/drivers/media/video/videobuf2-core.c
new file mode 100644
index 000000000000..cc7ab0a17b68
--- /dev/null
+++ b/drivers/media/video/videobuf2-core.c
@@ -0,0 +1,1804 @@
+/*
+ * videobuf2-core.c - V4L2 driver helper framework
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Pawel Osciak <p.osciak@samsung.com>
+ * Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+#include <media/videobuf2-core.h>
+
+static int debug;
+module_param(debug, int, 0644);
+
+#define dprintk(level, fmt, arg...) \
+ do { \
+ if (debug >= level) \
+ printk(KERN_DEBUG "vb2: " fmt, ## arg); \
+ } while (0)
+
+#define call_memop(q, plane, op, args...) \
+ (((q)->mem_ops->op) ? \
+ ((q)->mem_ops->op(args)) : 0)
+
+#define call_qop(q, op, args...) \
+ (((q)->ops->op) ? ((q)->ops->op(args)) : 0)
+
+/**
+ * __vb2_buf_mem_alloc() - allocate video memory for the given buffer
+ */
+static int __vb2_buf_mem_alloc(struct vb2_buffer *vb,
+ unsigned long *plane_sizes)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ void *mem_priv;
+ int plane;
+
+ /* Allocate memory for all planes in this buffer */
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ mem_priv = call_memop(q, plane, alloc, q->alloc_ctx[plane],
+ plane_sizes[plane]);
+ if (!mem_priv)
+ goto free;
+
+ /* Associate allocator private data with this plane */
+ vb->planes[plane].mem_priv = mem_priv;
+ vb->v4l2_planes[plane].length = plane_sizes[plane];
+ }
+
+ return 0;
+free:
+ /* Free already allocated memory if one of the allocations failed */
+ for (; plane > 0; --plane)
+ call_memop(q, plane, put, vb->planes[plane - 1].mem_priv);
+
+ return -ENOMEM;
+}
+
+/**
+ * __vb2_buf_mem_free() - free memory of the given buffer
+ */
+static void __vb2_buf_mem_free(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ unsigned int plane;
+
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ call_memop(q, plane, put, vb->planes[plane].mem_priv);
+ vb->planes[plane].mem_priv = NULL;
+ dprintk(3, "Freed plane %d of buffer %d\n",
+ plane, vb->v4l2_buf.index);
+ }
+}
+
+/**
+ * __vb2_buf_userptr_put() - release userspace memory associated with
+ * a USERPTR buffer
+ */
+static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ unsigned int plane;
+
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ void *mem_priv = vb->planes[plane].mem_priv;
+
+ if (mem_priv) {
+ call_memop(q, plane, put_userptr, mem_priv);
+ vb->planes[plane].mem_priv = NULL;
+ }
+ }
+}
+
+/**
+ * __setup_offsets() - setup unique offsets ("cookies") for every plane in
+ * every buffer on the queue
+ */
+static void __setup_offsets(struct vb2_queue *q)
+{
+ unsigned int buffer, plane;
+ struct vb2_buffer *vb;
+ unsigned long off = 0;
+
+ for (buffer = 0; buffer < q->num_buffers; ++buffer) {
+ vb = q->bufs[buffer];
+ if (!vb)
+ continue;
+
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ vb->v4l2_planes[plane].m.mem_offset = off;
+
+ dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n",
+ buffer, plane, off);
+
+ off += vb->v4l2_planes[plane].length;
+ off = PAGE_ALIGN(off);
+ }
+ }
+}
+
+/**
+ * __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type)
+ * video buffer memory for all buffers/planes on the queue and initializes the
+ * queue
+ *
+ * Returns the number of buffers successfully allocated.
+ */
+static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
+ unsigned int num_buffers, unsigned int num_planes,
+ unsigned long plane_sizes[])
+{
+ unsigned int buffer;
+ struct vb2_buffer *vb;
+ int ret;
+
+ for (buffer = 0; buffer < num_buffers; ++buffer) {
+ /* Allocate videobuf buffer structures */
+ vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
+ if (!vb) {
+ dprintk(1, "Memory alloc for buffer struct failed\n");
+ break;
+ }
+
+ /* Length stores number of planes for multiplanar buffers */
+ if (V4L2_TYPE_IS_MULTIPLANAR(q->type))
+ vb->v4l2_buf.length = num_planes;
+
+ vb->state = VB2_BUF_STATE_DEQUEUED;
+ vb->vb2_queue = q;
+ vb->num_planes = num_planes;
+ vb->v4l2_buf.index = buffer;
+ vb->v4l2_buf.type = q->type;
+ vb->v4l2_buf.memory = memory;
+
+ /* Allocate video buffer memory for the MMAP type */
+ if (memory == V4L2_MEMORY_MMAP) {
+ ret = __vb2_buf_mem_alloc(vb, plane_sizes);
+ if (ret) {
+ dprintk(1, "Failed allocating memory for "
+ "buffer %d\n", buffer);
+ kfree(vb);
+ break;
+ }
+ /*
+ * Call the driver-provided buffer initialization
+ * callback, if given. An error in initialization
+ * results in queue setup failure.
+ */
+ ret = call_qop(q, buf_init, vb);
+ if (ret) {
+ dprintk(1, "Buffer %d %p initialization"
+ " failed\n", buffer, vb);
+ __vb2_buf_mem_free(vb);
+ kfree(vb);
+ break;
+ }
+ }
+
+ q->bufs[buffer] = vb;
+ }
+
+ q->num_buffers = buffer;
+
+ __setup_offsets(q);
+
+ dprintk(1, "Allocated %d buffers, %d plane(s) each\n",
+ q->num_buffers, num_planes);
+
+ return buffer;
+}
+
+/**
+ * __vb2_free_mem() - release all video buffer memory for a given queue
+ */
+static void __vb2_free_mem(struct vb2_queue *q)
+{
+ unsigned int buffer;
+ struct vb2_buffer *vb;
+
+ for (buffer = 0; buffer < q->num_buffers; ++buffer) {
+ vb = q->bufs[buffer];
+ if (!vb)
+ continue;
+
+ /* Free MMAP buffers or release USERPTR buffers */
+ if (q->memory == V4L2_MEMORY_MMAP)
+ __vb2_buf_mem_free(vb);
+ else
+ __vb2_buf_userptr_put(vb);
+ }
+}
+
+/**
+ * __vb2_queue_free() - free the queue - video memory and related information
+ * and return the queue to an uninitialized state. Might be called even if the
+ * queue has already been freed.
+ */
+static int __vb2_queue_free(struct vb2_queue *q)
+{
+ unsigned int buffer;
+
+ /* Call driver-provided cleanup function for each buffer, if provided */
+ if (q->ops->buf_cleanup) {
+ for (buffer = 0; buffer < q->num_buffers; ++buffer) {
+ if (NULL == q->bufs[buffer])
+ continue;
+ q->ops->buf_cleanup(q->bufs[buffer]);
+ }
+ }
+
+ /* Release video buffer memory */
+ __vb2_free_mem(q);
+
+ /* Free videobuf buffers */
+ for (buffer = 0; buffer < q->num_buffers; ++buffer) {
+ kfree(q->bufs[buffer]);
+ q->bufs[buffer] = NULL;
+ }
+
+ q->num_buffers = 0;
+ q->memory = 0;
+
+ return 0;
+}
+
+/**
+ * __verify_planes_array() - verify that the planes array passed in struct
+ * v4l2_buffer from userspace can be safely used
+ */
+static int __verify_planes_array(struct vb2_buffer *vb, struct v4l2_buffer *b)
+{
+ /* Is memory for copying plane information present? */
+ if (NULL == b->m.planes) {
+ dprintk(1, "Multi-planar buffer passed but "
+ "planes array not provided\n");
+ return -EINVAL;
+ }
+
+ if (b->length < vb->num_planes || b->length > VIDEO_MAX_PLANES) {
+ dprintk(1, "Incorrect planes array length, "
+ "expected %d, got %d\n", vb->num_planes, b->length);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be
+ * returned to userspace
+ */
+static int __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ int ret = 0;
+
+ /* Copy back data such as timestamp, input, etc. */
+ memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m));
+ b->input = vb->v4l2_buf.input;
+ b->reserved = vb->v4l2_buf.reserved;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) {
+ ret = __verify_planes_array(vb, b);
+ if (ret)
+ return ret;
+
+ /*
+ * Fill in plane-related data if userspace provided an array
+ * for it. The memory and size is verified above.
+ */
+ memcpy(b->m.planes, vb->v4l2_planes,
+ b->length * sizeof(struct v4l2_plane));
+ } else {
+ /*
+ * We use length and offset in v4l2_planes array even for
+ * single-planar buffers, but userspace does not.
+ */
+ b->length = vb->v4l2_planes[0].length;
+ b->bytesused = vb->v4l2_planes[0].bytesused;
+ if (q->memory == V4L2_MEMORY_MMAP)
+ b->m.offset = vb->v4l2_planes[0].m.mem_offset;
+ else if (q->memory == V4L2_MEMORY_USERPTR)
+ b->m.userptr = vb->v4l2_planes[0].m.userptr;
+ }
+
+ b->flags = 0;
+
+ switch (vb->state) {
+ case VB2_BUF_STATE_QUEUED:
+ case VB2_BUF_STATE_ACTIVE:
+ b->flags |= V4L2_BUF_FLAG_QUEUED;
+ break;
+ case VB2_BUF_STATE_ERROR:
+ b->flags |= V4L2_BUF_FLAG_ERROR;
+ /* fall through */
+ case VB2_BUF_STATE_DONE:
+ b->flags |= V4L2_BUF_FLAG_DONE;
+ break;
+ case VB2_BUF_STATE_DEQUEUED:
+ /* nothing */
+ break;
+ }
+
+ if (vb->num_planes_mapped == vb->num_planes)
+ b->flags |= V4L2_BUF_FLAG_MAPPED;
+
+ return ret;
+}
+
+/**
+ * vb2_querybuf() - query video buffer information
+ * @q: videobuf queue
+ * @b: buffer struct passed from userspace to vidioc_querybuf handler
+ * in driver
+ *
+ * Should be called from vidioc_querybuf ioctl handler in driver.
+ * This function will verify the passed v4l2_buffer structure and fill the
+ * relevant information for the userspace.
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_querybuf handler in driver.
+ */
+int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
+{
+ struct vb2_buffer *vb;
+
+ if (b->type != q->type) {
+ dprintk(1, "querybuf: wrong buffer type\n");
+ return -EINVAL;
+ }
+
+ if (b->index >= q->num_buffers) {
+ dprintk(1, "querybuf: buffer index out of range\n");
+ return -EINVAL;
+ }
+ vb = q->bufs[b->index];
+
+ return __fill_v4l2_buffer(vb, b);
+}
+EXPORT_SYMBOL(vb2_querybuf);
+
+/**
+ * __verify_userptr_ops() - verify that all memory operations required for
+ * USERPTR queue type have been provided
+ */
+static int __verify_userptr_ops(struct vb2_queue *q)
+{
+ if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr ||
+ !q->mem_ops->put_userptr)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * __verify_mmap_ops() - verify that all memory operations required for
+ * MMAP queue type have been provided
+ */
+static int __verify_mmap_ops(struct vb2_queue *q)
+{
+ if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc ||
+ !q->mem_ops->put || !q->mem_ops->mmap)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * __buffers_in_use() - return true if any buffers on the queue are in use and
+ * the queue cannot be freed (by the means of REQBUFS(0)) call
+ */
+static bool __buffers_in_use(struct vb2_queue *q)
+{
+ unsigned int buffer, plane;
+ struct vb2_buffer *vb;
+
+ for (buffer = 0; buffer < q->num_buffers; ++buffer) {
+ vb = q->bufs[buffer];
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ /*
+ * If num_users() has not been provided, call_memop
+ * will return 0, apparently nobody cares about this
+ * case anyway. If num_users() returns more than 1,
+ * we are not the only user of the plane's memory.
+ */
+ if (call_memop(q, plane, num_users,
+ vb->planes[plane].mem_priv) > 1)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * vb2_reqbufs() - Initiate streaming
+ * @q: videobuf2 queue
+ * @req: struct passed from userspace to vidioc_reqbufs handler in driver
+ *
+ * Should be called from vidioc_reqbufs ioctl handler of a driver.
+ * This function:
+ * 1) verifies streaming parameters passed from the userspace,
+ * 2) sets up the queue,
+ * 3) negotiates number of buffers and planes per buffer with the driver
+ * to be used during streaming,
+ * 4) allocates internal buffer structures (struct vb2_buffer), according to
+ * the agreed parameters,
+ * 5) for MMAP memory type, allocates actual video memory, using the
+ * memory handling/allocation routines provided during queue initialization
+ *
+ * If req->count is 0, all the memory will be freed instead.
+ * If the queue has been allocated previously (by a previous vb2_reqbufs) call
+ * and the queue is not busy, memory will be reallocated.
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_reqbufs handler in driver.
+ */
+int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
+{
+ unsigned int num_buffers, num_planes;
+ unsigned long plane_sizes[VIDEO_MAX_PLANES];
+ int ret = 0;
+
+ if (q->fileio) {
+ dprintk(1, "reqbufs: file io in progress\n");
+ return -EBUSY;
+ }
+
+ if (req->memory != V4L2_MEMORY_MMAP
+ && req->memory != V4L2_MEMORY_USERPTR) {
+ dprintk(1, "reqbufs: unsupported memory type\n");
+ return -EINVAL;
+ }
+
+ if (req->type != q->type) {
+ dprintk(1, "reqbufs: requested type is incorrect\n");
+ return -EINVAL;
+ }
+
+ if (q->streaming) {
+ dprintk(1, "reqbufs: streaming active\n");
+ return -EBUSY;
+ }
+
+ /*
+ * Make sure all the required memory ops for given memory type
+ * are available.
+ */
+ if (req->memory == V4L2_MEMORY_MMAP && __verify_mmap_ops(q)) {
+ dprintk(1, "reqbufs: MMAP for current setup unsupported\n");
+ return -EINVAL;
+ }
+
+ if (req->memory == V4L2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
+ dprintk(1, "reqbufs: USERPTR for current setup unsupported\n");
+ return -EINVAL;
+ }
+
+ if (req->count == 0 || q->num_buffers != 0) {
+ /*
+ * We already have buffers allocated, so first check if they
+ * are not in use and can be freed.
+ */
+ if (q->memory == V4L2_MEMORY_MMAP && __buffers_in_use(q)) {
+ dprintk(1, "reqbufs: memory in use, cannot free\n");
+ return -EBUSY;
+ }
+
+ ret = __vb2_queue_free(q);
+ if (ret != 0)
+ return ret;
+ }
+
+ /*
+ * Make sure the requested values and current defaults are sane.
+ */
+ num_buffers = min_t(unsigned int, req->count, VIDEO_MAX_FRAME);
+ memset(plane_sizes, 0, sizeof(plane_sizes));
+ memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
+
+ /*
+ * Ask the driver how many buffers and planes per buffer it requires.
+ * Driver also sets the size and allocator context for each plane.
+ */
+ ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes,
+ plane_sizes, q->alloc_ctx);
+ if (ret)
+ return ret;
+
+ /* Finally, allocate buffers and video memory */
+ ret = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes,
+ plane_sizes);
+ if (ret < 0) {
+ dprintk(1, "Memory allocation failed with error: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Check if driver can handle the allocated number of buffers.
+ */
+ if (ret < num_buffers) {
+ unsigned int orig_num_buffers;
+
+ orig_num_buffers = num_buffers = ret;
+ ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes,
+ plane_sizes, q->alloc_ctx);
+ if (ret)
+ goto free_mem;
+
+ if (orig_num_buffers < num_buffers) {
+ ret = -ENOMEM;
+ goto free_mem;
+ }
+
+ /*
+ * Ok, driver accepted smaller number of buffers.
+ */
+ ret = num_buffers;
+ }
+
+ q->memory = req->memory;
+
+ /*
+ * Return the number of successfully allocated buffers
+ * to the userspace.
+ */
+ req->count = ret;
+
+ return 0;
+
+free_mem:
+ __vb2_queue_free(q);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vb2_reqbufs);
+
+/**
+ * vb2_plane_vaddr() - Return a kernel virtual address of a given plane
+ * @vb: vb2_buffer to which the plane in question belongs to
+ * @plane_no: plane number for which the address is to be returned
+ *
+ * This function returns a kernel virtual address of a given plane if
+ * such a mapping exist, NULL otherwise.
+ */
+void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+
+ if (plane_no > vb->num_planes)
+ return NULL;
+
+ return call_memop(q, plane_no, vaddr, vb->planes[plane_no].mem_priv);
+
+}
+EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
+
+/**
+ * vb2_plane_cookie() - Return allocator specific cookie for the given plane
+ * @vb: vb2_buffer to which the plane in question belongs to
+ * @plane_no: plane number for which the cookie is to be returned
+ *
+ * This function returns an allocator specific cookie for a given plane if
+ * available, NULL otherwise. The allocator should provide some simple static
+ * inline function, which would convert this cookie to the allocator specific
+ * type that can be used directly by the driver to access the buffer. This can
+ * be for example physical address, pointer to scatter list or IOMMU mapping.
+ */
+void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+
+ if (plane_no > vb->num_planes)
+ return NULL;
+
+ return call_memop(q, plane_no, cookie, vb->planes[plane_no].mem_priv);
+}
+EXPORT_SYMBOL_GPL(vb2_plane_cookie);
+
+/**
+ * vb2_buffer_done() - inform videobuf that an operation on a buffer is finished
+ * @vb: vb2_buffer returned from the driver
+ * @state: either VB2_BUF_STATE_DONE if the operation finished successfully
+ * or VB2_BUF_STATE_ERROR if the operation finished with an error
+ *
+ * This function should be called by the driver after a hardware operation on
+ * a buffer is finished and the buffer may be returned to userspace. The driver
+ * cannot use this buffer anymore until it is queued back to it by videobuf
+ * by the means of buf_queue callback. Only buffers previously queued to the
+ * driver by buf_queue can be passed to this function.
+ */
+void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ unsigned long flags;
+
+ if (vb->state != VB2_BUF_STATE_ACTIVE)
+ return;
+
+ if (state != VB2_BUF_STATE_DONE && state != VB2_BUF_STATE_ERROR)
+ return;
+
+ dprintk(4, "Done processing on buffer %d, state: %d\n",
+ vb->v4l2_buf.index, vb->state);
+
+ /* Add the buffer to the done buffers list */
+ spin_lock_irqsave(&q->done_lock, flags);
+ vb->state = state;
+ list_add_tail(&vb->done_entry, &q->done_list);
+ atomic_dec(&q->queued_count);
+ spin_unlock_irqrestore(&q->done_lock, flags);
+
+ /* Inform any processes that may be waiting for buffers */
+ wake_up(&q->done_wq);
+}
+EXPORT_SYMBOL_GPL(vb2_buffer_done);
+
+/**
+ * __fill_vb2_buffer() - fill a vb2_buffer with information provided in
+ * a v4l2_buffer by the userspace
+ */
+static int __fill_vb2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b,
+ struct v4l2_plane *v4l2_planes)
+{
+ unsigned int plane;
+ int ret;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
+ /*
+ * Verify that the userspace gave us a valid array for
+ * plane information.
+ */
+ ret = __verify_planes_array(vb, b);
+ if (ret)
+ return ret;
+
+ /* Fill in driver-provided information for OUTPUT types */
+ if (V4L2_TYPE_IS_OUTPUT(b->type)) {
+ /*
+ * Will have to go up to b->length when API starts
+ * accepting variable number of planes.
+ */
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ v4l2_planes[plane].bytesused =
+ b->m.planes[plane].bytesused;
+ v4l2_planes[plane].data_offset =
+ b->m.planes[plane].data_offset;
+ }
+ }
+
+ if (b->memory == V4L2_MEMORY_USERPTR) {
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ v4l2_planes[plane].m.userptr =
+ b->m.planes[plane].m.userptr;
+ v4l2_planes[plane].length =
+ b->m.planes[plane].length;
+ }
+ }
+ } else {
+ /*
+ * Single-planar buffers do not use planes array,
+ * so fill in relevant v4l2_buffer struct fields instead.
+ * In videobuf we use our internal V4l2_planes struct for
+ * single-planar buffers as well, for simplicity.
+ */
+ if (V4L2_TYPE_IS_OUTPUT(b->type))
+ v4l2_planes[0].bytesused = b->bytesused;
+
+ if (b->memory == V4L2_MEMORY_USERPTR) {
+ v4l2_planes[0].m.userptr = b->m.userptr;
+ v4l2_planes[0].length = b->length;
+ }
+ }
+
+ vb->v4l2_buf.field = b->field;
+ vb->v4l2_buf.timestamp = b->timestamp;
+
+ return 0;
+}
+
+/**
+ * __qbuf_userptr() - handle qbuf of a USERPTR buffer
+ */
+static int __qbuf_userptr(struct vb2_buffer *vb, struct v4l2_buffer *b)
+{
+ struct v4l2_plane planes[VIDEO_MAX_PLANES];
+ struct vb2_queue *q = vb->vb2_queue;
+ void *mem_priv;
+ unsigned int plane;
+ int ret;
+ int write = !V4L2_TYPE_IS_OUTPUT(q->type);
+
+ /* Verify and copy relevant information provided by the userspace */
+ ret = __fill_vb2_buffer(vb, b, planes);
+ if (ret)
+ return ret;
+
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ /* Skip the plane if already verified */
+ if (vb->v4l2_planes[plane].m.userptr == planes[plane].m.userptr
+ && vb->v4l2_planes[plane].length == planes[plane].length)
+ continue;
+
+ dprintk(3, "qbuf: userspace address for plane %d changed, "
+ "reacquiring memory\n", plane);
+
+ /* Release previously acquired memory if present */
+ if (vb->planes[plane].mem_priv)
+ call_memop(q, plane, put_userptr,
+ vb->planes[plane].mem_priv);
+
+ vb->planes[plane].mem_priv = NULL;
+
+ /* Acquire each plane's memory */
+ if (q->mem_ops->get_userptr) {
+ mem_priv = q->mem_ops->get_userptr(q->alloc_ctx[plane],
+ planes[plane].m.userptr,
+ planes[plane].length,
+ write);
+ if (IS_ERR(mem_priv)) {
+ dprintk(1, "qbuf: failed acquiring userspace "
+ "memory for plane %d\n", plane);
+ ret = PTR_ERR(mem_priv);
+ goto err;
+ }
+ vb->planes[plane].mem_priv = mem_priv;
+ }
+ }
+
+ /*
+ * Call driver-specific initialization on the newly acquired buffer,
+ * if provided.
+ */
+ ret = call_qop(q, buf_init, vb);
+ if (ret) {
+ dprintk(1, "qbuf: buffer initialization failed\n");
+ goto err;
+ }
+
+ /*
+ * Now that everything is in order, copy relevant information
+ * provided by userspace.
+ */
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ vb->v4l2_planes[plane] = planes[plane];
+
+ return 0;
+err:
+ /* In case of errors, release planes that were already acquired */
+ for (; plane > 0; --plane) {
+ call_memop(q, plane, put_userptr,
+ vb->planes[plane - 1].mem_priv);
+ vb->planes[plane - 1].mem_priv = NULL;
+ }
+
+ return ret;
+}
+
+/**
+ * __qbuf_mmap() - handle qbuf of an MMAP buffer
+ */
+static int __qbuf_mmap(struct vb2_buffer *vb, struct v4l2_buffer *b)
+{
+ return __fill_vb2_buffer(vb, b, vb->v4l2_planes);
+}
+
+/**
+ * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
+ */
+static void __enqueue_in_driver(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+
+ vb->state = VB2_BUF_STATE_ACTIVE;
+ atomic_inc(&q->queued_count);
+ q->ops->buf_queue(vb);
+}
+
+/**
+ * vb2_qbuf() - Queue a buffer from userspace
+ * @q: videobuf2 queue
+ * @b: buffer structure passed from userspace to vidioc_qbuf handler
+ * in driver
+ *
+ * Should be called from vidioc_qbuf ioctl handler of a driver.
+ * This function:
+ * 1) verifies the passed buffer,
+ * 2) calls buf_prepare callback in the driver (if provided), in which
+ * driver-specific buffer initialization can be performed,
+ * 3) if streaming is on, queues the buffer in driver by the means of buf_queue
+ * callback for processing.
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_qbuf handler in driver.
+ */
+int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
+{
+ struct vb2_buffer *vb;
+ int ret = 0;
+
+ if (q->fileio) {
+ dprintk(1, "qbuf: file io in progress\n");
+ return -EBUSY;
+ }
+
+ if (b->type != q->type) {
+ dprintk(1, "qbuf: invalid buffer type\n");
+ return -EINVAL;
+ }
+
+ if (b->index >= q->num_buffers) {
+ dprintk(1, "qbuf: buffer index out of range\n");
+ return -EINVAL;
+ }
+
+ vb = q->bufs[b->index];
+ if (NULL == vb) {
+ /* Should never happen */
+ dprintk(1, "qbuf: buffer is NULL\n");
+ return -EINVAL;
+ }
+
+ if (b->memory != q->memory) {
+ dprintk(1, "qbuf: invalid memory type\n");
+ return -EINVAL;
+ }
+
+ if (vb->state != VB2_BUF_STATE_DEQUEUED) {
+ dprintk(1, "qbuf: buffer already in use\n");
+ return -EINVAL;
+ }
+
+ if (q->memory == V4L2_MEMORY_MMAP)
+ ret = __qbuf_mmap(vb, b);
+ else if (q->memory == V4L2_MEMORY_USERPTR)
+ ret = __qbuf_userptr(vb, b);
+ else {
+ WARN(1, "Invalid queue type\n");
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ ret = call_qop(q, buf_prepare, vb);
+ if (ret) {
+ dprintk(1, "qbuf: buffer preparation failed\n");
+ return ret;
+ }
+
+ /*
+ * Add to the queued buffers list, a buffer will stay on it until
+ * dequeued in dqbuf.
+ */
+ list_add_tail(&vb->queued_entry, &q->queued_list);
+ vb->state = VB2_BUF_STATE_QUEUED;
+
+ /*
+ * If already streaming, give the buffer to driver for processing.
+ * If not, the buffer will be given to driver on next streamon.
+ */
+ if (q->streaming)
+ __enqueue_in_driver(vb);
+
+ dprintk(1, "qbuf of buffer %d succeeded\n", vb->v4l2_buf.index);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_qbuf);
+
+/**
+ * __vb2_wait_for_done_vb() - wait for a buffer to become available
+ * for dequeuing
+ *
+ * Will sleep if required for nonblocking == false.
+ */
+static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
+{
+ /*
+ * All operations on vb_done_list are performed under done_lock
+ * spinlock protection. However, buffers may be removed from
+ * it and returned to userspace only while holding both driver's
+ * lock and the done_lock spinlock. Thus we can be sure that as
+ * long as we hold the driver's lock, the list will remain not
+ * empty if list_empty() check succeeds.
+ */
+
+ for (;;) {
+ int ret;
+
+ if (!q->streaming) {
+ dprintk(1, "Streaming off, will not wait for buffers\n");
+ return -EINVAL;
+ }
+
+ if (!list_empty(&q->done_list)) {
+ /*
+ * Found a buffer that we were waiting for.
+ */
+ break;
+ }
+
+ if (nonblocking) {
+ dprintk(1, "Nonblocking and no buffers to dequeue, "
+ "will not wait\n");
+ return -EAGAIN;
+ }
+
+ /*
+ * We are streaming and blocking, wait for another buffer to
+ * become ready or for streamoff. Driver's lock is released to
+ * allow streamoff or qbuf to be called while waiting.
+ */
+ call_qop(q, wait_prepare, q);
+
+ /*
+ * All locks have been released, it is safe to sleep now.
+ */
+ dprintk(3, "Will sleep waiting for buffers\n");
+ ret = wait_event_interruptible(q->done_wq,
+ !list_empty(&q->done_list) || !q->streaming);
+
+ /*
+ * We need to reevaluate both conditions again after reacquiring
+ * the locks or return an error if one occurred.
+ */
+ call_qop(q, wait_finish, q);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * __vb2_get_done_vb() - get a buffer ready for dequeuing
+ *
+ * Will sleep if required for nonblocking == false.
+ */
+static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
+ int nonblocking)
+{
+ unsigned long flags;
+ int ret;
+
+ /*
+ * Wait for at least one buffer to become available on the done_list.
+ */
+ ret = __vb2_wait_for_done_vb(q, nonblocking);
+ if (ret)
+ return ret;
+
+ /*
+ * Driver's lock has been held since we last verified that done_list
+ * is not empty, so no need for another list_empty(done_list) check.
+ */
+ spin_lock_irqsave(&q->done_lock, flags);
+ *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);
+ list_del(&(*vb)->done_entry);
+ spin_unlock_irqrestore(&q->done_lock, flags);
+
+ return 0;
+}
+
+/**
+ * vb2_wait_for_all_buffers() - wait until all buffers are given back to vb2
+ * @q: videobuf2 queue
+ *
+ * This function will wait until all buffers that have been given to the driver
+ * by buf_queue() are given back to vb2 with vb2_buffer_done(). It doesn't call
+ * wait_prepare, wait_finish pair. It is intended to be called with all locks
+ * taken, for example from stop_streaming() callback.
+ */
+int vb2_wait_for_all_buffers(struct vb2_queue *q)
+{
+ if (!q->streaming) {
+ dprintk(1, "Streaming off, will not wait for buffers\n");
+ return -EINVAL;
+ }
+
+ wait_event(q->done_wq, !atomic_read(&q->queued_count));
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
+
+/**
+ * vb2_dqbuf() - Dequeue a buffer to the userspace
+ * @q: videobuf2 queue
+ * @b: buffer structure passed from userspace to vidioc_dqbuf handler
+ * in driver
+ * @nonblocking: if true, this call will not sleep waiting for a buffer if no
+ * buffers ready for dequeuing are present. Normally the driver
+ * would be passing (file->f_flags & O_NONBLOCK) here
+ *
+ * Should be called from vidioc_dqbuf ioctl handler of a driver.
+ * This function:
+ * 1) verifies the passed buffer,
+ * 2) calls buf_finish callback in the driver (if provided), in which
+ * driver can perform any additional operations that may be required before
+ * returning the buffer to userspace, such as cache sync,
+ * 3) the buffer struct members are filled with relevant information for
+ * the userspace.
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_dqbuf handler in driver.
+ */
+int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
+{
+ struct vb2_buffer *vb = NULL;
+ int ret;
+
+ if (q->fileio) {
+ dprintk(1, "dqbuf: file io in progress\n");
+ return -EBUSY;
+ }
+
+ if (b->type != q->type) {
+ dprintk(1, "dqbuf: invalid buffer type\n");
+ return -EINVAL;
+ }
+
+ ret = __vb2_get_done_vb(q, &vb, nonblocking);
+ if (ret < 0) {
+ dprintk(1, "dqbuf: error getting next done buffer\n");
+ return ret;
+ }
+
+ ret = call_qop(q, buf_finish, vb);
+ if (ret) {
+ dprintk(1, "dqbuf: buffer finish failed\n");
+ return ret;
+ }
+
+ switch (vb->state) {
+ case VB2_BUF_STATE_DONE:
+ dprintk(3, "dqbuf: Returning done buffer\n");
+ break;
+ case VB2_BUF_STATE_ERROR:
+ dprintk(3, "dqbuf: Returning done buffer with errors\n");
+ break;
+ default:
+ dprintk(1, "dqbuf: Invalid buffer state\n");
+ return -EINVAL;
+ }
+
+ /* Fill buffer information for the userspace */
+ __fill_v4l2_buffer(vb, b);
+ /* Remove from videobuf queue */
+ list_del(&vb->queued_entry);
+
+ dprintk(1, "dqbuf of buffer %d, with state %d\n",
+ vb->v4l2_buf.index, vb->state);
+
+ vb->state = VB2_BUF_STATE_DEQUEUED;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_dqbuf);
+
+/**
+ * vb2_streamon - start streaming
+ * @q: videobuf2 queue
+ * @type: type argument passed from userspace to vidioc_streamon handler
+ *
+ * Should be called from vidioc_streamon handler of a driver.
+ * This function:
+ * 1) verifies current state
+ * 2) starts streaming and passes any previously queued buffers to the driver
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_streamon handler in the driver.
+ */
+int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
+{
+ struct vb2_buffer *vb;
+
+ if (q->fileio) {
+ dprintk(1, "streamon: file io in progress\n");
+ return -EBUSY;
+ }
+
+ if (type != q->type) {
+ dprintk(1, "streamon: invalid stream type\n");
+ return -EINVAL;
+ }
+
+ if (q->streaming) {
+ dprintk(1, "streamon: already streaming\n");
+ return -EBUSY;
+ }
+
+ /*
+ * Cannot start streaming on an OUTPUT device if no buffers have
+ * been queued yet.
+ */
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ if (list_empty(&q->queued_list)) {
+ dprintk(1, "streamon: no output buffers queued\n");
+ return -EINVAL;
+ }
+ }
+
+ q->streaming = 1;
+
+ /*
+ * Let driver notice that streaming state has been enabled.
+ */
+ call_qop(q, start_streaming, q);
+
+ /*
+ * If any buffers were queued before streamon,
+ * we can now pass them to driver for processing.
+ */
+ list_for_each_entry(vb, &q->queued_list, queued_entry)
+ __enqueue_in_driver(vb);
+
+ dprintk(3, "Streamon successful\n");
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_streamon);
+
+/**
+ * __vb2_queue_cancel() - cancel and stop (pause) streaming
+ *
+ * Removes all queued buffers from driver's queue and all buffers queued by
+ * userspace from videobuf's queue. Returns to state after reqbufs.
+ */
+static void __vb2_queue_cancel(struct vb2_queue *q)
+{
+ unsigned int i;
+
+ /*
+ * Tell driver to stop all transactions and release all queued
+ * buffers.
+ */
+ if (q->streaming)
+ call_qop(q, stop_streaming, q);
+ q->streaming = 0;
+
+ /*
+ * Remove all buffers from videobuf's list...
+ */
+ INIT_LIST_HEAD(&q->queued_list);
+ /*
+ * ...and done list; userspace will not receive any buffers it
+ * has not already dequeued before initiating cancel.
+ */
+ INIT_LIST_HEAD(&q->done_list);
+ wake_up_all(&q->done_wq);
+
+ /*
+ * Reinitialize all buffers for next use.
+ */
+ for (i = 0; i < q->num_buffers; ++i)
+ q->bufs[i]->state = VB2_BUF_STATE_DEQUEUED;
+}
+
+/**
+ * vb2_streamoff - stop streaming
+ * @q: videobuf2 queue
+ * @type: type argument passed from userspace to vidioc_streamoff handler
+ *
+ * Should be called from vidioc_streamoff handler of a driver.
+ * This function:
+ * 1) verifies current state,
+ * 2) stop streaming and dequeues any queued buffers, including those previously
+ * passed to the driver (after waiting for the driver to finish).
+ *
+ * This call can be used for pausing playback.
+ * The return values from this function are intended to be directly returned
+ * from vidioc_streamoff handler in the driver
+ */
+int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
+{
+ if (q->fileio) {
+ dprintk(1, "streamoff: file io in progress\n");
+ return -EBUSY;
+ }
+
+ if (type != q->type) {
+ dprintk(1, "streamoff: invalid stream type\n");
+ return -EINVAL;
+ }
+
+ if (!q->streaming) {
+ dprintk(1, "streamoff: not streaming\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Cancel will pause streaming and remove all buffers from the driver
+ * and videobuf, effectively returning control over them to userspace.
+ */
+ __vb2_queue_cancel(q);
+
+ dprintk(3, "Streamoff successful\n");
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_streamoff);
+
+/**
+ * __find_plane_by_offset() - find plane associated with the given offset off
+ */
+static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
+ unsigned int *_buffer, unsigned int *_plane)
+{
+ struct vb2_buffer *vb;
+ unsigned int buffer, plane;
+
+ /*
+ * Go over all buffers and their planes, comparing the given offset
+ * with an offset assigned to each plane. If a match is found,
+ * return its buffer and plane numbers.
+ */
+ for (buffer = 0; buffer < q->num_buffers; ++buffer) {
+ vb = q->bufs[buffer];
+
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ if (vb->v4l2_planes[plane].m.mem_offset == off) {
+ *_buffer = buffer;
+ *_plane = plane;
+ return 0;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * vb2_mmap() - map video buffers into application address space
+ * @q: videobuf2 queue
+ * @vma: vma passed to the mmap file operation handler in the driver
+ *
+ * Should be called from mmap file operation handler of a driver.
+ * This function maps one plane of one of the available video buffers to
+ * userspace. To map whole video memory allocated on reqbufs, this function
+ * has to be called once per each plane per each buffer previously allocated.
+ *
+ * When the userspace application calls mmap, it passes to it an offset returned
+ * to it earlier by the means of vidioc_querybuf handler. That offset acts as
+ * a "cookie", which is then used to identify the plane to be mapped.
+ * This function finds a plane with a matching offset and a mapping is performed
+ * by the means of a provided memory operation.
+ *
+ * The return values from this function are intended to be directly returned
+ * from the mmap handler in driver.
+ */
+int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
+{
+ unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
+ struct vb2_plane *vb_plane;
+ struct vb2_buffer *vb;
+ unsigned int buffer, plane;
+ int ret;
+
+ if (q->memory != V4L2_MEMORY_MMAP) {
+ dprintk(1, "Queue is not currently set up for mmap\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Check memory area access mode.
+ */
+ if (!(vma->vm_flags & VM_SHARED)) {
+ dprintk(1, "Invalid vma flags, VM_SHARED needed\n");
+ return -EINVAL;
+ }
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ if (!(vma->vm_flags & VM_WRITE)) {
+ dprintk(1, "Invalid vma flags, VM_WRITE needed\n");
+ return -EINVAL;
+ }
+ } else {
+ if (!(vma->vm_flags & VM_READ)) {
+ dprintk(1, "Invalid vma flags, VM_READ needed\n");
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * Find the plane corresponding to the offset passed by userspace.
+ */
+ ret = __find_plane_by_offset(q, off, &buffer, &plane);
+ if (ret)
+ return ret;
+
+ vb = q->bufs[buffer];
+ vb_plane = &vb->planes[plane];
+
+ ret = q->mem_ops->mmap(vb_plane->mem_priv, vma);
+ if (ret)
+ return ret;
+
+ vb_plane->mapped = 1;
+ vb->num_planes_mapped++;
+
+ dprintk(3, "Buffer %d, plane %d successfully mapped\n", buffer, plane);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_mmap);
+
+static int __vb2_init_fileio(struct vb2_queue *q, int read);
+static int __vb2_cleanup_fileio(struct vb2_queue *q);
+
+/**
+ * vb2_poll() - implements poll userspace operation
+ * @q: videobuf2 queue
+ * @file: file argument passed to the poll file operation handler
+ * @wait: wait argument passed to the poll file operation handler
+ *
+ * This function implements poll file operation handler for a driver.
+ * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will
+ * be informed that the file descriptor of a video device is available for
+ * reading.
+ * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor
+ * will be reported as available for writing.
+ *
+ * The return values from this function are intended to be directly returned
+ * from poll handler in driver.
+ */
+unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
+{
+ unsigned long flags;
+ unsigned int ret;
+ struct vb2_buffer *vb = NULL;
+
+ /*
+ * Start file io emulator if streaming api has not been used yet.
+ */
+ if (q->num_buffers == 0 && q->fileio == NULL) {
+ if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ)) {
+ ret = __vb2_init_fileio(q, 1);
+ if (ret)
+ return ret;
+ }
+ if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE)) {
+ ret = __vb2_init_fileio(q, 0);
+ if (ret)
+ return ret;
+ /*
+ * Write to OUTPUT queue can be done immediately.
+ */
+ return POLLOUT | POLLWRNORM;
+ }
+ }
+
+ /*
+ * There is nothing to wait for if no buffers have already been queued.
+ */
+ if (list_empty(&q->queued_list))
+ return POLLERR;
+
+ poll_wait(file, &q->done_wq, wait);
+
+ /*
+ * Take first buffer available for dequeuing.
+ */
+ spin_lock_irqsave(&q->done_lock, flags);
+ if (!list_empty(&q->done_list))
+ vb = list_first_entry(&q->done_list, struct vb2_buffer,
+ done_entry);
+ spin_unlock_irqrestore(&q->done_lock, flags);
+
+ if (vb && (vb->state == VB2_BUF_STATE_DONE
+ || vb->state == VB2_BUF_STATE_ERROR)) {
+ return (V4L2_TYPE_IS_OUTPUT(q->type)) ? POLLOUT | POLLWRNORM :
+ POLLIN | POLLRDNORM;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_poll);
+
+/**
+ * vb2_queue_init() - initialize a videobuf2 queue
+ * @q: videobuf2 queue; this structure should be allocated in driver
+ *
+ * The vb2_queue structure should be allocated by the driver. The driver is
+ * responsible of clearing it's content and setting initial values for some
+ * required entries before calling this function.
+ * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer
+ * to the struct vb2_queue description in include/media/videobuf2-core.h
+ * for more information.
+ */
+int vb2_queue_init(struct vb2_queue *q)
+{
+ BUG_ON(!q);
+ BUG_ON(!q->ops);
+ BUG_ON(!q->mem_ops);
+ BUG_ON(!q->type);
+ BUG_ON(!q->io_modes);
+
+ BUG_ON(!q->ops->queue_setup);
+ BUG_ON(!q->ops->buf_queue);
+
+ INIT_LIST_HEAD(&q->queued_list);
+ INIT_LIST_HEAD(&q->done_list);
+ spin_lock_init(&q->done_lock);
+ init_waitqueue_head(&q->done_wq);
+
+ if (q->buf_struct_size == 0)
+ q->buf_struct_size = sizeof(struct vb2_buffer);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_queue_init);
+
+/**
+ * vb2_queue_release() - stop streaming, release the queue and free memory
+ * @q: videobuf2 queue
+ *
+ * This function stops streaming and performs necessary clean ups, including
+ * freeing video buffer memory. The driver is responsible for freeing
+ * the vb2_queue structure itself.
+ */
+void vb2_queue_release(struct vb2_queue *q)
+{
+ __vb2_cleanup_fileio(q);
+ __vb2_queue_cancel(q);
+ __vb2_queue_free(q);
+}
+EXPORT_SYMBOL_GPL(vb2_queue_release);
+
+/**
+ * struct vb2_fileio_buf - buffer context used by file io emulator
+ *
+ * vb2 provides a compatibility layer and emulator of file io (read and
+ * write) calls on top of streaming API. This structure is used for
+ * tracking context related to the buffers.
+ */
+struct vb2_fileio_buf {
+ void *vaddr;
+ unsigned int size;
+ unsigned int pos;
+ unsigned int queued:1;
+};
+
+/**
+ * struct vb2_fileio_data - queue context used by file io emulator
+ *
+ * vb2 provides a compatibility layer and emulator of file io (read and
+ * write) calls on top of streaming API. For proper operation it required
+ * this structure to save the driver state between each call of the read
+ * or write function.
+ */
+struct vb2_fileio_data {
+ struct v4l2_requestbuffers req;
+ struct v4l2_buffer b;
+ struct vb2_fileio_buf bufs[VIDEO_MAX_FRAME];
+ unsigned int index;
+ unsigned int q_count;
+ unsigned int dq_count;
+ unsigned int flags;
+};
+
+/**
+ * __vb2_init_fileio() - initialize file io emulator
+ * @q: videobuf2 queue
+ * @read: mode selector (1 means read, 0 means write)
+ */
+static int __vb2_init_fileio(struct vb2_queue *q, int read)
+{
+ struct vb2_fileio_data *fileio;
+ int i, ret;
+ unsigned int count = 0;
+
+ /*
+ * Sanity check
+ */
+ if ((read && !(q->io_modes & VB2_READ)) ||
+ (!read && !(q->io_modes & VB2_WRITE)))
+ BUG();
+
+ /*
+ * Check if device supports mapping buffers to kernel virtual space.
+ */
+ if (!q->mem_ops->vaddr)
+ return -EBUSY;
+
+ /*
+ * Check if streaming api has not been already activated.
+ */
+ if (q->streaming || q->num_buffers > 0)
+ return -EBUSY;
+
+ /*
+ * Start with count 1, driver can increase it in queue_setup()
+ */
+ count = 1;
+
+ dprintk(3, "setting up file io: mode %s, count %d, flags %08x\n",
+ (read) ? "read" : "write", count, q->io_flags);
+
+ fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL);
+ if (fileio == NULL)
+ return -ENOMEM;
+
+ fileio->flags = q->io_flags;
+
+ /*
+ * Request buffers and use MMAP type to force driver
+ * to allocate buffers by itself.
+ */
+ fileio->req.count = count;
+ fileio->req.memory = V4L2_MEMORY_MMAP;
+ fileio->req.type = q->type;
+ ret = vb2_reqbufs(q, &fileio->req);
+ if (ret)
+ goto err_kfree;
+
+ /*
+ * Check if plane_count is correct
+ * (multiplane buffers are not supported).
+ */
+ if (q->bufs[0]->num_planes != 1) {
+ fileio->req.count = 0;
+ ret = -EBUSY;
+ goto err_reqbufs;
+ }
+
+ /*
+ * Get kernel address of each buffer.
+ */
+ for (i = 0; i < q->num_buffers; i++) {
+ fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0);
+ if (fileio->bufs[i].vaddr == NULL)
+ goto err_reqbufs;
+ fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0);
+ }
+
+ /*
+ * Read mode requires pre queuing of all buffers.
+ */
+ if (read) {
+ /*
+ * Queue all buffers.
+ */
+ for (i = 0; i < q->num_buffers; i++) {
+ struct v4l2_buffer *b = &fileio->b;
+ memset(b, 0, sizeof(*b));
+ b->type = q->type;
+ b->memory = q->memory;
+ b->index = i;
+ ret = vb2_qbuf(q, b);
+ if (ret)
+ goto err_reqbufs;
+ fileio->bufs[i].queued = 1;
+ }
+
+ /*
+ * Start streaming.
+ */
+ ret = vb2_streamon(q, q->type);
+ if (ret)
+ goto err_reqbufs;
+ }
+
+ q->fileio = fileio;
+
+ return ret;
+
+err_reqbufs:
+ vb2_reqbufs(q, &fileio->req);
+
+err_kfree:
+ kfree(fileio);
+ return ret;
+}
+
+/**
+ * __vb2_cleanup_fileio() - free resourced used by file io emulator
+ * @q: videobuf2 queue
+ */
+static int __vb2_cleanup_fileio(struct vb2_queue *q)
+{
+ struct vb2_fileio_data *fileio = q->fileio;
+
+ if (fileio) {
+ /*
+ * Hack fileio context to enable direct calls to vb2 ioctl
+ * interface.
+ */
+ q->fileio = NULL;
+
+ vb2_streamoff(q, q->type);
+ fileio->req.count = 0;
+ vb2_reqbufs(q, &fileio->req);
+ kfree(fileio);
+ dprintk(3, "file io emulator closed\n");
+ }
+ return 0;
+}
+
+/**
+ * __vb2_perform_fileio() - perform a single file io (read or write) operation
+ * @q: videobuf2 queue
+ * @data: pointed to target userspace buffer
+ * @count: number of bytes to read or write
+ * @ppos: file handle position tracking pointer
+ * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking)
+ * @read: access mode selector (1 means read, 0 means write)
+ */
+static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
+ loff_t *ppos, int nonblock, int read)
+{
+ struct vb2_fileio_data *fileio;
+ struct vb2_fileio_buf *buf;
+ int ret, index;
+
+ dprintk(3, "file io: mode %s, offset %ld, count %zd, %sblocking\n",
+ read ? "read" : "write", (long)*ppos, count,
+ nonblock ? "non" : "");
+
+ if (!data)
+ return -EINVAL;
+
+ /*
+ * Initialize emulator on first call.
+ */
+ if (!q->fileio) {
+ ret = __vb2_init_fileio(q, read);
+ dprintk(3, "file io: vb2_init_fileio result: %d\n", ret);
+ if (ret)
+ return ret;
+ }
+ fileio = q->fileio;
+
+ /*
+ * Hack fileio context to enable direct calls to vb2 ioctl interface.
+ * The pointer will be restored before returning from this function.
+ */
+ q->fileio = NULL;
+
+ index = fileio->index;
+ buf = &fileio->bufs[index];
+
+ /*
+ * Check if we need to dequeue the buffer.
+ */
+ if (buf->queued) {
+ struct vb2_buffer *vb;
+
+ /*
+ * Call vb2_dqbuf to get buffer back.
+ */
+ memset(&fileio->b, 0, sizeof(fileio->b));
+ fileio->b.type = q->type;
+ fileio->b.memory = q->memory;
+ fileio->b.index = index;
+ ret = vb2_dqbuf(q, &fileio->b, nonblock);
+ dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
+ if (ret)
+ goto end;
+ fileio->dq_count += 1;
+
+ /*
+ * Get number of bytes filled by the driver
+ */
+ vb = q->bufs[index];
+ buf->size = vb2_get_plane_payload(vb, 0);
+ buf->queued = 0;
+ }
+
+ /*
+ * Limit count on last few bytes of the buffer.
+ */
+ if (buf->pos + count > buf->size) {
+ count = buf->size - buf->pos;
+ dprintk(5, "reducing read count: %zd\n", count);
+ }
+
+ /*
+ * Transfer data to userspace.
+ */
+ dprintk(3, "file io: copying %zd bytes - buffer %d, offset %u\n",
+ count, index, buf->pos);
+ if (read)
+ ret = copy_to_user(data, buf->vaddr + buf->pos, count);
+ else
+ ret = copy_from_user(buf->vaddr + buf->pos, data, count);
+ if (ret) {
+ dprintk(3, "file io: error copying data\n");
+ ret = -EFAULT;
+ goto end;
+ }
+
+ /*
+ * Update counters.
+ */
+ buf->pos += count;
+ *ppos += count;
+
+ /*
+ * Queue next buffer if required.
+ */
+ if (buf->pos == buf->size ||
+ (!read && (fileio->flags & VB2_FILEIO_WRITE_IMMEDIATELY))) {
+ /*
+ * Check if this is the last buffer to read.
+ */
+ if (read && (fileio->flags & VB2_FILEIO_READ_ONCE) &&
+ fileio->dq_count == 1) {
+ dprintk(3, "file io: read limit reached\n");
+ /*
+ * Restore fileio pointer and release the context.
+ */
+ q->fileio = fileio;
+ return __vb2_cleanup_fileio(q);
+ }
+
+ /*
+ * Call vb2_qbuf and give buffer to the driver.
+ */
+ memset(&fileio->b, 0, sizeof(fileio->b));
+ fileio->b.type = q->type;
+ fileio->b.memory = q->memory;
+ fileio->b.index = index;
+ fileio->b.bytesused = buf->pos;
+ ret = vb2_qbuf(q, &fileio->b);
+ dprintk(5, "file io: vb2_dbuf result: %d\n", ret);
+ if (ret)
+ goto end;
+
+ /*
+ * Buffer has been queued, update the status
+ */
+ buf->pos = 0;
+ buf->queued = 1;
+ buf->size = q->bufs[0]->v4l2_planes[0].length;
+ fileio->q_count += 1;
+
+ /*
+ * Switch to the next buffer
+ */
+ fileio->index = (index + 1) % q->num_buffers;
+
+ /*
+ * Start streaming if required.
+ */
+ if (!read && !q->streaming) {
+ ret = vb2_streamon(q, q->type);
+ if (ret)
+ goto end;
+ }
+ }
+
+ /*
+ * Return proper number of bytes processed.
+ */
+ if (ret == 0)
+ ret = count;
+end:
+ /*
+ * Restore the fileio context and block vb2 ioctl interface.
+ */
+ q->fileio = fileio;
+ return ret;
+}
+
+size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
+ loff_t *ppos, int nonblocking)
+{
+ return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1);
+}
+EXPORT_SYMBOL_GPL(vb2_read);
+
+size_t vb2_write(struct vb2_queue *q, char __user *data, size_t count,
+ loff_t *ppos, int nonblocking)
+{
+ return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 0);
+}
+EXPORT_SYMBOL_GPL(vb2_write);
+
+MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
+MODULE_AUTHOR("Pawel Osciak, Marek Szyprowski");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/videobuf2-dma-contig.c b/drivers/media/video/videobuf2-dma-contig.c
new file mode 100644
index 000000000000..bb6a5602cf94
--- /dev/null
+++ b/drivers/media/video/videobuf2-dma-contig.c
@@ -0,0 +1,185 @@
+/*
+ * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Pawel Osciak <p.osciak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-memops.h>
+
+struct vb2_dc_conf {
+ struct device *dev;
+};
+
+struct vb2_dc_buf {
+ struct vb2_dc_conf *conf;
+ void *vaddr;
+ dma_addr_t paddr;
+ unsigned long size;
+ struct vm_area_struct *vma;
+ atomic_t refcount;
+ struct vb2_vmarea_handler handler;
+};
+
+static void vb2_dma_contig_put(void *buf_priv);
+
+static void *vb2_dma_contig_alloc(void *alloc_ctx, unsigned long size)
+{
+ struct vb2_dc_conf *conf = alloc_ctx;
+ struct vb2_dc_buf *buf;
+
+ buf = kzalloc(sizeof *buf, GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ buf->vaddr = dma_alloc_coherent(conf->dev, size, &buf->paddr,
+ GFP_KERNEL);
+ if (!buf->vaddr) {
+ dev_err(conf->dev, "dma_alloc_coherent of size %ld failed\n",
+ buf->size);
+ kfree(buf);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ buf->conf = conf;
+ buf->size = size;
+
+ buf->handler.refcount = &buf->refcount;
+ buf->handler.put = vb2_dma_contig_put;
+ buf->handler.arg = buf;
+
+ atomic_inc(&buf->refcount);
+
+ return buf;
+}
+
+static void vb2_dma_contig_put(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+
+ if (atomic_dec_and_test(&buf->refcount)) {
+ dma_free_coherent(buf->conf->dev, buf->size, buf->vaddr,
+ buf->paddr);
+ kfree(buf);
+ }
+}
+
+static void *vb2_dma_contig_cookie(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+
+ return (void *)buf->paddr;
+}
+
+static void *vb2_dma_contig_vaddr(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+ if (!buf)
+ return 0;
+
+ return buf->vaddr;
+}
+
+static unsigned int vb2_dma_contig_num_users(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+
+ return atomic_read(&buf->refcount);
+}
+
+static int vb2_dma_contig_mmap(void *buf_priv, struct vm_area_struct *vma)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+
+ if (!buf) {
+ printk(KERN_ERR "No buffer to map\n");
+ return -EINVAL;
+ }
+
+ return vb2_mmap_pfn_range(vma, buf->paddr, buf->size,
+ &vb2_common_vm_ops, &buf->handler);
+}
+
+static void *vb2_dma_contig_get_userptr(void *alloc_ctx, unsigned long vaddr,
+ unsigned long size, int write)
+{
+ struct vb2_dc_buf *buf;
+ struct vm_area_struct *vma;
+ dma_addr_t paddr = 0;
+ int ret;
+
+ buf = kzalloc(sizeof *buf, GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ ret = vb2_get_contig_userptr(vaddr, size, &vma, &paddr);
+ if (ret) {
+ printk(KERN_ERR "Failed acquiring VMA for vaddr 0x%08lx\n",
+ vaddr);
+ kfree(buf);
+ return ERR_PTR(ret);
+ }
+
+ buf->size = size;
+ buf->paddr = paddr;
+ buf->vma = vma;
+
+ return buf;
+}
+
+static void vb2_dma_contig_put_userptr(void *mem_priv)
+{
+ struct vb2_dc_buf *buf = mem_priv;
+
+ if (!buf)
+ return;
+
+ vb2_put_vma(buf->vma);
+ kfree(buf);
+}
+
+const struct vb2_mem_ops vb2_dma_contig_memops = {
+ .alloc = vb2_dma_contig_alloc,
+ .put = vb2_dma_contig_put,
+ .cookie = vb2_dma_contig_cookie,
+ .vaddr = vb2_dma_contig_vaddr,
+ .mmap = vb2_dma_contig_mmap,
+ .get_userptr = vb2_dma_contig_get_userptr,
+ .put_userptr = vb2_dma_contig_put_userptr,
+ .num_users = vb2_dma_contig_num_users,
+};
+EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
+
+void *vb2_dma_contig_init_ctx(struct device *dev)
+{
+ struct vb2_dc_conf *conf;
+
+ conf = kzalloc(sizeof *conf, GFP_KERNEL);
+ if (!conf)
+ return ERR_PTR(-ENOMEM);
+
+ conf->dev = dev;
+
+ return conf;
+}
+EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
+
+void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
+{
+ kfree(alloc_ctx);
+}
+EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
+
+MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
+MODULE_AUTHOR("Pawel Osciak");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/videobuf2-dma-sg.c b/drivers/media/video/videobuf2-dma-sg.c
new file mode 100644
index 000000000000..20b5c5dcc0ef
--- /dev/null
+++ b/drivers/media/video/videobuf2-dma-sg.c
@@ -0,0 +1,292 @@
+/*
+ * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-memops.h>
+#include <media/videobuf2-dma-sg.h>
+
+struct vb2_dma_sg_buf {
+ void *vaddr;
+ struct page **pages;
+ int write;
+ int offset;
+ struct vb2_dma_sg_desc sg_desc;
+ atomic_t refcount;
+ struct vb2_vmarea_handler handler;
+};
+
+static void vb2_dma_sg_put(void *buf_priv);
+
+static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size)
+{
+ struct vb2_dma_sg_buf *buf;
+ int i;
+
+ buf = kzalloc(sizeof *buf, GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ buf->vaddr = NULL;
+ buf->write = 0;
+ buf->offset = 0;
+ buf->sg_desc.size = size;
+ buf->sg_desc.num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ buf->sg_desc.sglist = vmalloc(buf->sg_desc.num_pages *
+ sizeof(*buf->sg_desc.sglist));
+ if (!buf->sg_desc.sglist)
+ goto fail_sglist_alloc;
+ memset(buf->sg_desc.sglist, 0, buf->sg_desc.num_pages *
+ sizeof(*buf->sg_desc.sglist));
+ sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
+
+ buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
+ GFP_KERNEL);
+ if (!buf->pages)
+ goto fail_pages_array_alloc;
+
+ for (i = 0; i < buf->sg_desc.num_pages; ++i) {
+ buf->pages[i] = alloc_page(GFP_KERNEL);
+ if (NULL == buf->pages[i])
+ goto fail_pages_alloc;
+ sg_set_page(&buf->sg_desc.sglist[i],
+ buf->pages[i], PAGE_SIZE, 0);
+ }
+
+ buf->handler.refcount = &buf->refcount;
+ buf->handler.put = vb2_dma_sg_put;
+ buf->handler.arg = buf;
+
+ atomic_inc(&buf->refcount);
+
+ printk(KERN_DEBUG "%s: Allocated buffer of %d pages\n",
+ __func__, buf->sg_desc.num_pages);
+
+ if (!buf->vaddr)
+ buf->vaddr = vm_map_ram(buf->pages,
+ buf->sg_desc.num_pages,
+ -1,
+ PAGE_KERNEL);
+ return buf;
+
+fail_pages_alloc:
+ while (--i >= 0)
+ __free_page(buf->pages[i]);
+
+fail_pages_array_alloc:
+ vfree(buf->sg_desc.sglist);
+
+fail_sglist_alloc:
+ kfree(buf);
+ return NULL;
+}
+
+static void vb2_dma_sg_put(void *buf_priv)
+{
+ struct vb2_dma_sg_buf *buf = buf_priv;
+ int i = buf->sg_desc.num_pages;
+
+ if (atomic_dec_and_test(&buf->refcount)) {
+ printk(KERN_DEBUG "%s: Freeing buffer of %d pages\n", __func__,
+ buf->sg_desc.num_pages);
+ if (buf->vaddr)
+ vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
+ vfree(buf->sg_desc.sglist);
+ while (--i >= 0)
+ __free_page(buf->pages[i]);
+ kfree(buf->pages);
+ kfree(buf);
+ }
+}
+
+static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
+ unsigned long size, int write)
+{
+ struct vb2_dma_sg_buf *buf;
+ unsigned long first, last;
+ int num_pages_from_user, i;
+
+ buf = kzalloc(sizeof *buf, GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ buf->vaddr = NULL;
+ buf->write = write;
+ buf->offset = vaddr & ~PAGE_MASK;
+ buf->sg_desc.size = size;
+
+ first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
+ last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
+ buf->sg_desc.num_pages = last - first + 1;
+
+ buf->sg_desc.sglist = vmalloc(
+ buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist));
+ if (!buf->sg_desc.sglist)
+ goto userptr_fail_sglist_alloc;
+
+ memset(buf->sg_desc.sglist, 0,
+ buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist));
+ sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
+
+ buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
+ GFP_KERNEL);
+ if (!buf->pages)
+ goto userptr_fail_pages_array_alloc;
+
+ down_read(&current->mm->mmap_sem);
+ num_pages_from_user = get_user_pages(current, current->mm,
+ vaddr & PAGE_MASK,
+ buf->sg_desc.num_pages,
+ write,
+ 1, /* force */
+ buf->pages,
+ NULL);
+ up_read(&current->mm->mmap_sem);
+ if (num_pages_from_user != buf->sg_desc.num_pages)
+ goto userptr_fail_get_user_pages;
+
+ sg_set_page(&buf->sg_desc.sglist[0], buf->pages[0],
+ PAGE_SIZE - buf->offset, buf->offset);
+ size -= PAGE_SIZE - buf->offset;
+ for (i = 1; i < buf->sg_desc.num_pages; ++i) {
+ sg_set_page(&buf->sg_desc.sglist[i], buf->pages[i],
+ min_t(size_t, PAGE_SIZE, size), 0);
+ size -= min_t(size_t, PAGE_SIZE, size);
+ }
+ return buf;
+
+userptr_fail_get_user_pages:
+ printk(KERN_DEBUG "get_user_pages requested/got: %d/%d]\n",
+ num_pages_from_user, buf->sg_desc.num_pages);
+ while (--num_pages_from_user >= 0)
+ put_page(buf->pages[num_pages_from_user]);
+
+userptr_fail_pages_array_alloc:
+ vfree(buf->sg_desc.sglist);
+
+userptr_fail_sglist_alloc:
+ kfree(buf);
+ return NULL;
+}
+
+/*
+ * @put_userptr: inform the allocator that a USERPTR buffer will no longer
+ * be used
+ */
+static void vb2_dma_sg_put_userptr(void *buf_priv)
+{
+ struct vb2_dma_sg_buf *buf = buf_priv;
+ int i = buf->sg_desc.num_pages;
+
+ printk(KERN_DEBUG "%s: Releasing userspace buffer of %d pages\n",
+ __func__, buf->sg_desc.num_pages);
+ if (buf->vaddr)
+ vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
+ while (--i >= 0) {
+ if (buf->write)
+ set_page_dirty_lock(buf->pages[i]);
+ put_page(buf->pages[i]);
+ }
+ vfree(buf->sg_desc.sglist);
+ kfree(buf->pages);
+ kfree(buf);
+}
+
+static void *vb2_dma_sg_vaddr(void *buf_priv)
+{
+ struct vb2_dma_sg_buf *buf = buf_priv;
+
+ BUG_ON(!buf);
+
+ if (!buf->vaddr)
+ buf->vaddr = vm_map_ram(buf->pages,
+ buf->sg_desc.num_pages,
+ -1,
+ PAGE_KERNEL);
+
+ /* add offset in case userptr is not page-aligned */
+ return buf->vaddr + buf->offset;
+}
+
+static unsigned int vb2_dma_sg_num_users(void *buf_priv)
+{
+ struct vb2_dma_sg_buf *buf = buf_priv;
+
+ return atomic_read(&buf->refcount);
+}
+
+static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
+{
+ struct vb2_dma_sg_buf *buf = buf_priv;
+ unsigned long uaddr = vma->vm_start;
+ unsigned long usize = vma->vm_end - vma->vm_start;
+ int i = 0;
+
+ if (!buf) {
+ printk(KERN_ERR "No memory to map\n");
+ return -EINVAL;
+ }
+
+ do {
+ int ret;
+
+ ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
+ if (ret) {
+ printk(KERN_ERR "Remapping memory, error: %d\n", ret);
+ return ret;
+ }
+
+ uaddr += PAGE_SIZE;
+ usize -= PAGE_SIZE;
+ } while (usize > 0);
+
+
+ /*
+ * Use common vm_area operations to track buffer refcount.
+ */
+ vma->vm_private_data = &buf->handler;
+ vma->vm_ops = &vb2_common_vm_ops;
+
+ vma->vm_ops->open(vma);
+
+ return 0;
+}
+
+static void *vb2_dma_sg_cookie(void *buf_priv)
+{
+ struct vb2_dma_sg_buf *buf = buf_priv;
+
+ return &buf->sg_desc;
+}
+
+const struct vb2_mem_ops vb2_dma_sg_memops = {
+ .alloc = vb2_dma_sg_alloc,
+ .put = vb2_dma_sg_put,
+ .get_userptr = vb2_dma_sg_get_userptr,
+ .put_userptr = vb2_dma_sg_put_userptr,
+ .vaddr = vb2_dma_sg_vaddr,
+ .mmap = vb2_dma_sg_mmap,
+ .num_users = vb2_dma_sg_num_users,
+ .cookie = vb2_dma_sg_cookie,
+};
+EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
+
+MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
+MODULE_AUTHOR("Andrzej Pietrasiewicz");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/videobuf2-memops.c b/drivers/media/video/videobuf2-memops.c
new file mode 100644
index 000000000000..053c15794b8d
--- /dev/null
+++ b/drivers/media/video/videobuf2-memops.c
@@ -0,0 +1,232 @@
+/*
+ * videobuf2-memops.c - generic memory handling routines for videobuf2
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Pawel Osciak <p.osciak@samsung.com>
+ * Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/file.h>
+#include <linux/slab.h>
+
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-memops.h>
+
+/**
+ * vb2_get_vma() - acquire and lock the virtual memory area
+ * @vma: given virtual memory area
+ *
+ * This function attempts to acquire an area mapped in the userspace for
+ * the duration of a hardware operation. The area is "locked" by performing
+ * the same set of operation that are done when process calls fork() and
+ * memory areas are duplicated.
+ *
+ * Returns a copy of a virtual memory region on success or NULL.
+ */
+struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma)
+{
+ struct vm_area_struct *vma_copy;
+
+ vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
+ if (vma_copy == NULL)
+ return NULL;
+
+ if (vma->vm_ops && vma->vm_ops->open)
+ vma->vm_ops->open(vma);
+
+ if (vma->vm_file)
+ get_file(vma->vm_file);
+
+ memcpy(vma_copy, vma, sizeof(*vma));
+
+ vma_copy->vm_mm = NULL;
+ vma_copy->vm_next = NULL;
+ vma_copy->vm_prev = NULL;
+
+ return vma_copy;
+}
+
+/**
+ * vb2_put_userptr() - release a userspace virtual memory area
+ * @vma: virtual memory region associated with the area to be released
+ *
+ * This function releases the previously acquired memory area after a hardware
+ * operation.
+ */
+void vb2_put_vma(struct vm_area_struct *vma)
+{
+ if (!vma)
+ return;
+
+ if (vma->vm_file)
+ fput(vma->vm_file);
+
+ if (vma->vm_ops && vma->vm_ops->close)
+ vma->vm_ops->close(vma);
+
+ kfree(vma);
+}
+
+/**
+ * vb2_get_contig_userptr() - lock physically contiguous userspace mapped memory
+ * @vaddr: starting virtual address of the area to be verified
+ * @size: size of the area
+ * @res_paddr: will return physical address for the given vaddr
+ * @res_vma: will return locked copy of struct vm_area for the given area
+ *
+ * This function will go through memory area of size @size mapped at @vaddr and
+ * verify that the underlying physical pages are contiguous. If they are
+ * contiguous the virtual memory area is locked and a @res_vma is filled with
+ * the copy and @res_pa set to the physical address of the buffer.
+ *
+ * Returns 0 on success.
+ */
+int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
+ struct vm_area_struct **res_vma, dma_addr_t *res_pa)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long offset, start, end;
+ unsigned long this_pfn, prev_pfn;
+ dma_addr_t pa = 0;
+ int ret = -EFAULT;
+
+ start = vaddr;
+ offset = start & ~PAGE_MASK;
+ end = start + size;
+
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, start);
+
+ if (vma == NULL || vma->vm_end < end)
+ goto done;
+
+ for (prev_pfn = 0; start < end; start += PAGE_SIZE) {
+ ret = follow_pfn(vma, start, &this_pfn);
+ if (ret)
+ goto done;
+
+ if (prev_pfn == 0)
+ pa = this_pfn << PAGE_SHIFT;
+ else if (this_pfn != prev_pfn + 1) {
+ ret = -EFAULT;
+ goto done;
+ }
+ prev_pfn = this_pfn;
+ }
+
+ /*
+ * Memory is contigous, lock vma and return to the caller
+ */
+ *res_vma = vb2_get_vma(vma);
+ if (*res_vma == NULL) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ *res_pa = pa + offset;
+ ret = 0;
+
+done:
+ up_read(&mm->mmap_sem);
+ return ret;
+}
+
+/**
+ * vb2_mmap_pfn_range() - map physical pages to userspace
+ * @vma: virtual memory region for the mapping
+ * @paddr: starting physical address of the memory to be mapped
+ * @size: size of the memory to be mapped
+ * @vm_ops: vm operations to be assigned to the created area
+ * @priv: private data to be associated with the area
+ *
+ * Returns 0 on success.
+ */
+int vb2_mmap_pfn_range(struct vm_area_struct *vma, unsigned long paddr,
+ unsigned long size,
+ const struct vm_operations_struct *vm_ops,
+ void *priv)
+{
+ int ret;
+
+ size = min_t(unsigned long, vma->vm_end - vma->vm_start, size);
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ ret = remap_pfn_range(vma, vma->vm_start, paddr >> PAGE_SHIFT,
+ size, vma->vm_page_prot);
+ if (ret) {
+ printk(KERN_ERR "Remapping memory failed, error: %d\n", ret);
+ return ret;
+ }
+
+ vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
+ vma->vm_private_data = priv;
+ vma->vm_ops = vm_ops;
+
+ vma->vm_ops->open(vma);
+
+ printk(KERN_DEBUG "%s: mapped paddr 0x%08lx at 0x%08lx, size %ld\n",
+ __func__, paddr, vma->vm_start, size);
+
+ return 0;
+}
+
+/**
+ * vb2_common_vm_open() - increase refcount of the vma
+ * @vma: virtual memory region for the mapping
+ *
+ * This function adds another user to the provided vma. It expects
+ * struct vb2_vmarea_handler pointer in vma->vm_private_data.
+ */
+static void vb2_common_vm_open(struct vm_area_struct *vma)
+{
+ struct vb2_vmarea_handler *h = vma->vm_private_data;
+
+ printk(KERN_DEBUG "%s: %p, refcount: %d, vma: %08lx-%08lx\n",
+ __func__, h, atomic_read(h->refcount), vma->vm_start,
+ vma->vm_end);
+
+ atomic_inc(h->refcount);
+}
+
+/**
+ * vb2_common_vm_close() - decrease refcount of the vma
+ * @vma: virtual memory region for the mapping
+ *
+ * This function releases the user from the provided vma. It expects
+ * struct vb2_vmarea_handler pointer in vma->vm_private_data.
+ */
+static void vb2_common_vm_close(struct vm_area_struct *vma)
+{
+ struct vb2_vmarea_handler *h = vma->vm_private_data;
+
+ printk(KERN_DEBUG "%s: %p, refcount: %d, vma: %08lx-%08lx\n",
+ __func__, h, atomic_read(h->refcount), vma->vm_start,
+ vma->vm_end);
+
+ h->put(h->arg);
+}
+
+/**
+ * vb2_common_vm_ops - common vm_ops used for tracking refcount of mmaped
+ * video buffers
+ */
+const struct vm_operations_struct vb2_common_vm_ops = {
+ .open = vb2_common_vm_open,
+ .close = vb2_common_vm_close,
+};
+EXPORT_SYMBOL_GPL(vb2_common_vm_ops);
+
+MODULE_DESCRIPTION("common memory handling routines for videobuf2");
+MODULE_AUTHOR("Pawel Osciak");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/videobuf2-vmalloc.c b/drivers/media/video/videobuf2-vmalloc.c
new file mode 100644
index 000000000000..b5e6936fb628
--- /dev/null
+++ b/drivers/media/video/videobuf2-vmalloc.c
@@ -0,0 +1,132 @@
+/*
+ * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Pawel Osciak <p.osciak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-memops.h>
+
+struct vb2_vmalloc_buf {
+ void *vaddr;
+ unsigned long size;
+ atomic_t refcount;
+ struct vb2_vmarea_handler handler;
+};
+
+static void vb2_vmalloc_put(void *buf_priv);
+
+static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size)
+{
+ struct vb2_vmalloc_buf *buf;
+
+ buf = kzalloc(sizeof *buf, GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ buf->size = size;
+ buf->vaddr = vmalloc_user(buf->size);
+ buf->handler.refcount = &buf->refcount;
+ buf->handler.put = vb2_vmalloc_put;
+ buf->handler.arg = buf;
+
+ if (!buf->vaddr) {
+ printk(KERN_ERR "vmalloc of size %ld failed\n", buf->size);
+ kfree(buf);
+ return NULL;
+ }
+
+ atomic_inc(&buf->refcount);
+ printk(KERN_DEBUG "Allocated vmalloc buffer of size %ld at vaddr=%p\n",
+ buf->size, buf->vaddr);
+
+ return buf;
+}
+
+static void vb2_vmalloc_put(void *buf_priv)
+{
+ struct vb2_vmalloc_buf *buf = buf_priv;
+
+ if (atomic_dec_and_test(&buf->refcount)) {
+ printk(KERN_DEBUG "%s: Freeing vmalloc mem at vaddr=%p\n",
+ __func__, buf->vaddr);
+ vfree(buf->vaddr);
+ kfree(buf);
+ }
+}
+
+static void *vb2_vmalloc_vaddr(void *buf_priv)
+{
+ struct vb2_vmalloc_buf *buf = buf_priv;
+
+ BUG_ON(!buf);
+
+ if (!buf->vaddr) {
+ printk(KERN_ERR "Address of an unallocated plane requested\n");
+ return NULL;
+ }
+
+ return buf->vaddr;
+}
+
+static unsigned int vb2_vmalloc_num_users(void *buf_priv)
+{
+ struct vb2_vmalloc_buf *buf = buf_priv;
+ return atomic_read(&buf->refcount);
+}
+
+static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
+{
+ struct vb2_vmalloc_buf *buf = buf_priv;
+ int ret;
+
+ if (!buf) {
+ printk(KERN_ERR "No memory to map\n");
+ return -EINVAL;
+ }
+
+ ret = remap_vmalloc_range(vma, buf->vaddr, 0);
+ if (ret) {
+ printk(KERN_ERR "Remapping vmalloc memory, error: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Make sure that vm_areas for 2 buffers won't be merged together
+ */
+ vma->vm_flags |= VM_DONTEXPAND;
+
+ /*
+ * Use common vm_area operations to track buffer refcount.
+ */
+ vma->vm_private_data = &buf->handler;
+ vma->vm_ops = &vb2_common_vm_ops;
+
+ vma->vm_ops->open(vma);
+
+ return 0;
+}
+
+const struct vb2_mem_ops vb2_vmalloc_memops = {
+ .alloc = vb2_vmalloc_alloc,
+ .put = vb2_vmalloc_put,
+ .vaddr = vb2_vmalloc_vaddr,
+ .mmap = vb2_vmalloc_mmap,
+ .num_users = vb2_vmalloc_num_users,
+};
+EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
+
+MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
+MODULE_AUTHOR("Pawel Osciak");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index c49c39386bd0..bd104d0ad36c 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -7,6 +7,9 @@
* John Sokol <sokol--a.t--videotechnology.com>
* http://v4l.videotechnology.com/
*
+ * Conversion to videobuf2 by Pawel Osciak & Marek Szyprowski
+ * Copyright (c) 2010 Samsung Electronics
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the BSD Licence, GNU General Public License
* as published by the Free Software Foundation; either version 2 of the
@@ -23,12 +26,11 @@
#include <linux/mutex.h>
#include <linux/videodev2.h>
#include <linux/kthread.h>
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
#include <linux/freezer.h>
-#endif
-#include <media/videobuf-vmalloc.h>
+#include <media/videobuf2-vmalloc.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-common.h>
#define VIVI_MODULE_NAME "vivi"
@@ -42,7 +44,7 @@
#define MAX_HEIGHT 1200
#define VIVI_MAJOR_VERSION 0
-#define VIVI_MINOR_VERSION 7
+#define VIVI_MINOR_VERSION 8
#define VIVI_RELEASE 0
#define VIVI_VERSION \
KERNEL_VERSION(VIVI_MAJOR_VERSION, VIVI_MINOR_VERSION, VIVI_RELEASE)
@@ -133,16 +135,11 @@ static struct vivi_fmt *get_format(struct v4l2_format *f)
return &formats[k];
}
-struct sg_to_addr {
- int pos;
- struct scatterlist *sg;
-};
-
/* buffer for one video frame */
struct vivi_buffer {
/* common v4l buffer stuff -- must be first */
- struct videobuf_buffer vb;
-
+ struct vb2_buffer vb;
+ struct list_head list;
struct vivi_fmt *fmt;
};
@@ -162,13 +159,20 @@ static LIST_HEAD(vivi_devlist);
struct vivi_dev {
struct list_head vivi_devlist;
struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler ctrl_handler;
/* controls */
- int brightness;
- int contrast;
- int saturation;
- int hue;
- int volume;
+ struct v4l2_ctrl *brightness;
+ struct v4l2_ctrl *contrast;
+ struct v4l2_ctrl *saturation;
+ struct v4l2_ctrl *hue;
+ struct v4l2_ctrl *volume;
+ struct v4l2_ctrl *button;
+ struct v4l2_ctrl *boolean;
+ struct v4l2_ctrl *int32;
+ struct v4l2_ctrl *int64;
+ struct v4l2_ctrl *menu;
+ struct v4l2_ctrl *string;
spinlock_t slock;
struct mutex mutex;
@@ -181,6 +185,7 @@ struct vivi_dev {
/* Several counters */
unsigned ms;
unsigned long jiffies;
+ unsigned button_pressed;
int mv_count; /* Controls bars movement */
@@ -190,9 +195,11 @@ struct vivi_dev {
/* video capture */
struct vivi_fmt *fmt;
unsigned int width, height;
- struct videobuf_queue vb_vidq;
+ struct vb2_queue vb_vidq;
+ enum v4l2_field field;
+ unsigned int field_count;
- unsigned long generating;
+ unsigned int open_count;
u8 bars[9][3];
u8 line[MAX_WIDTH * 4];
};
@@ -443,10 +450,10 @@ static void gen_text(struct vivi_dev *dev, char *basep,
static void vivi_fillbuff(struct vivi_dev *dev, struct vivi_buffer *buf)
{
- int hmax = buf->vb.height;
- int wmax = buf->vb.width;
+ int wmax = dev->width;
+ int hmax = dev->height;
struct timeval ts;
- void *vbuf = videobuf_to_vmalloc(&buf->vb);
+ void *vbuf = vb2_plane_vaddr(&buf->vb, 0);
unsigned ms;
char str[100];
int h, line = 1;
@@ -472,22 +479,38 @@ static void vivi_fillbuff(struct vivi_dev *dev, struct vivi_buffer *buf)
dev->width, dev->height, dev->input);
gen_text(dev, vbuf, line++ * 16, 16, str);
+ mutex_lock(&dev->ctrl_handler.lock);
snprintf(str, sizeof(str), " brightness %3d, contrast %3d, saturation %3d, hue %d ",
- dev->brightness,
- dev->contrast,
- dev->saturation,
- dev->hue);
+ dev->brightness->cur.val,
+ dev->contrast->cur.val,
+ dev->saturation->cur.val,
+ dev->hue->cur.val);
gen_text(dev, vbuf, line++ * 16, 16, str);
- snprintf(str, sizeof(str), " volume %3d ", dev->volume);
+ snprintf(str, sizeof(str), " volume %3d ", dev->volume->cur.val);
gen_text(dev, vbuf, line++ * 16, 16, str);
+ snprintf(str, sizeof(str), " int32 %d, int64 %lld ",
+ dev->int32->cur.val,
+ dev->int64->cur.val64);
+ gen_text(dev, vbuf, line++ * 16, 16, str);
+ snprintf(str, sizeof(str), " boolean %d, menu %s, string \"%s\" ",
+ dev->boolean->cur.val,
+ dev->menu->qmenu[dev->menu->cur.val],
+ dev->string->cur.string);
+ mutex_unlock(&dev->ctrl_handler.lock);
+ gen_text(dev, vbuf, line++ * 16, 16, str);
+ if (dev->button_pressed) {
+ dev->button_pressed--;
+ snprintf(str, sizeof(str), " button pressed!");
+ gen_text(dev, vbuf, line++ * 16, 16, str);
+ }
dev->mv_count += 2;
- /* Advice that buffer was filled */
- buf->vb.field_count++;
+ buf->vb.v4l2_buf.field = dev->field;
+ dev->field_count++;
+ buf->vb.v4l2_buf.sequence = dev->field_count >> 1;
do_gettimeofday(&ts);
- buf->vb.ts = ts;
- buf->vb.state = VIDEOBUF_DONE;
+ buf->vb.v4l2_buf.timestamp = ts;
}
static void vivi_thread_tick(struct vivi_dev *dev)
@@ -504,23 +527,17 @@ static void vivi_thread_tick(struct vivi_dev *dev)
goto unlock;
}
- buf = list_entry(dma_q->active.next,
- struct vivi_buffer, vb.queue);
+ buf = list_entry(dma_q->active.next, struct vivi_buffer, list);
+ list_del(&buf->list);
- /* Nobody is waiting on this buffer, return */
- if (!waitqueue_active(&buf->vb.done))
- goto unlock;
-
- list_del(&buf->vb.queue);
-
- do_gettimeofday(&buf->vb.ts);
+ do_gettimeofday(&buf->vb.v4l2_buf.timestamp);
/* Fill buffer */
vivi_fillbuff(dev, buf);
dprintk(dev, 1, "filled buffer %p\n", buf);
- wake_up(&buf->vb.done);
- dprintk(dev, 2, "[%p/%d] wakeup\n", buf, buf->vb. i);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
+ dprintk(dev, 2, "[%p/%d] done\n", buf, buf->vb.v4l2_buf.index);
unlock:
spin_unlock_irqrestore(&dev->slock, flags);
}
@@ -571,17 +588,12 @@ static int vivi_thread(void *data)
return 0;
}
-static void vivi_start_generating(struct file *file)
+static int vivi_start_generating(struct vivi_dev *dev)
{
- struct vivi_dev *dev = video_drvdata(file);
struct vivi_dmaqueue *dma_q = &dev->vidq;
dprintk(dev, 1, "%s\n", __func__);
- if (test_and_set_bit(0, &dev->generating))
- return;
- file->private_data = dev;
-
/* Resets frame counters */
dev->ms = 0;
dev->mv_count = 0;
@@ -593,146 +605,200 @@ static void vivi_start_generating(struct file *file)
if (IS_ERR(dma_q->kthread)) {
v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
- clear_bit(0, &dev->generating);
- return;
+ return PTR_ERR(dma_q->kthread);
}
/* Wakes thread */
wake_up_interruptible(&dma_q->wq);
dprintk(dev, 1, "returning from %s\n", __func__);
+ return 0;
}
-static void vivi_stop_generating(struct file *file)
+static void vivi_stop_generating(struct vivi_dev *dev)
{
- struct vivi_dev *dev = video_drvdata(file);
struct vivi_dmaqueue *dma_q = &dev->vidq;
dprintk(dev, 1, "%s\n", __func__);
- if (!file->private_data)
- return;
- if (!test_and_clear_bit(0, &dev->generating))
- return;
-
/* shutdown control thread */
if (dma_q->kthread) {
kthread_stop(dma_q->kthread);
dma_q->kthread = NULL;
}
- videobuf_stop(&dev->vb_vidq);
- videobuf_mmap_free(&dev->vb_vidq);
-}
-static int vivi_is_generating(struct vivi_dev *dev)
-{
- return test_bit(0, &dev->generating);
+ /*
+ * Typical driver might need to wait here until dma engine stops.
+ * In this case we can abort imiedetly, so it's just a noop.
+ */
+
+ /* Release all active buffers */
+ while (!list_empty(&dma_q->active)) {
+ struct vivi_buffer *buf;
+ buf = list_entry(dma_q->active.next, struct vivi_buffer, list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ dprintk(dev, 2, "[%p/%d] done\n", buf, buf->vb.v4l2_buf.index);
+ }
}
-
/* ------------------------------------------------------------------
Videobuf operations
------------------------------------------------------------------*/
-static int
-buffer_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size)
+static int queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned long sizes[],
+ void *alloc_ctxs[])
{
- struct vivi_dev *dev = vq->priv_data;
+ struct vivi_dev *dev = vb2_get_drv_priv(vq);
+ unsigned long size;
+
+ size = dev->width * dev->height * 2;
+
+ if (0 == *nbuffers)
+ *nbuffers = 32;
+
+ while (size * *nbuffers > vid_limit * 1024 * 1024)
+ (*nbuffers)--;
- *size = dev->width * dev->height * 2;
+ *nplanes = 1;
- if (0 == *count)
- *count = 32;
+ sizes[0] = size;
- while (*size * *count > vid_limit * 1024 * 1024)
- (*count)--;
+ /*
+ * videobuf2-vmalloc allocator is context-less so no need to set
+ * alloc_ctxs array.
+ */
- dprintk(dev, 1, "%s, count=%d, size=%d\n", __func__,
- *count, *size);
+ dprintk(dev, 1, "%s, count=%d, size=%ld\n", __func__,
+ *nbuffers, size);
return 0;
}
-static void free_buffer(struct videobuf_queue *vq, struct vivi_buffer *buf)
+static int buffer_init(struct vb2_buffer *vb)
{
- struct vivi_dev *dev = vq->priv_data;
+ struct vivi_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
- dprintk(dev, 1, "%s, state: %i\n", __func__, buf->vb.state);
+ BUG_ON(NULL == dev->fmt);
+
+ /*
+ * This callback is called once per buffer, after its allocation.
+ *
+ * Vivi does not allow changing format during streaming, but it is
+ * possible to do so when streaming is paused (i.e. in streamoff state).
+ * Buffers however are not freed when going into streamoff and so
+ * buffer size verification has to be done in buffer_prepare, on each
+ * qbuf.
+ * It would be best to move verification code here to buf_init and
+ * s_fmt though.
+ */
- videobuf_vmalloc_free(&buf->vb);
- dprintk(dev, 1, "free_buffer: freed\n");
- buf->vb.state = VIDEOBUF_NEEDS_INIT;
+ return 0;
}
-static int
-buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
- enum v4l2_field field)
+static int buffer_prepare(struct vb2_buffer *vb)
{
- struct vivi_dev *dev = vq->priv_data;
+ struct vivi_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
struct vivi_buffer *buf = container_of(vb, struct vivi_buffer, vb);
- int rc;
+ unsigned long size;
- dprintk(dev, 1, "%s, field=%d\n", __func__, field);
+ dprintk(dev, 1, "%s, field=%d\n", __func__, vb->v4l2_buf.field);
BUG_ON(NULL == dev->fmt);
+ /*
+ * Theses properties only change when queue is idle, see s_fmt.
+ * The below checks should not be performed here, on each
+ * buffer_prepare (i.e. on each qbuf). Most of the code in this function
+ * should thus be moved to buffer_init and s_fmt.
+ */
if (dev->width < 48 || dev->width > MAX_WIDTH ||
dev->height < 32 || dev->height > MAX_HEIGHT)
return -EINVAL;
- buf->vb.size = dev->width * dev->height * 2;
- if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
+ size = dev->width * dev->height * 2;
+ if (vb2_plane_size(vb, 0) < size) {
+ dprintk(dev, 1, "%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0), size);
return -EINVAL;
+ }
+
+ vb2_set_plane_payload(&buf->vb, 0, size);
- /* These properties only change when queue is idle, see s_fmt */
- buf->fmt = dev->fmt;
- buf->vb.width = dev->width;
- buf->vb.height = dev->height;
- buf->vb.field = field;
+ buf->fmt = dev->fmt;
precalculate_bars(dev);
precalculate_line(dev);
- if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
- rc = videobuf_iolock(vq, &buf->vb, NULL);
- if (rc < 0)
- goto fail;
- }
+ return 0;
+}
- buf->vb.state = VIDEOBUF_PREPARED;
+static int buffer_finish(struct vb2_buffer *vb)
+{
+ struct vivi_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ dprintk(dev, 1, "%s\n", __func__);
return 0;
+}
+
+static void buffer_cleanup(struct vb2_buffer *vb)
+{
+ struct vivi_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ dprintk(dev, 1, "%s\n", __func__);
-fail:
- free_buffer(vq, buf);
- return rc;
}
-static void
-buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+static void buffer_queue(struct vb2_buffer *vb)
{
- struct vivi_dev *dev = vq->priv_data;
+ struct vivi_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
struct vivi_buffer *buf = container_of(vb, struct vivi_buffer, vb);
struct vivi_dmaqueue *vidq = &dev->vidq;
+ unsigned long flags = 0;
dprintk(dev, 1, "%s\n", __func__);
- buf->vb.state = VIDEOBUF_QUEUED;
- list_add_tail(&buf->vb.queue, &vidq->active);
+ spin_lock_irqsave(&dev->slock, flags);
+ list_add_tail(&buf->list, &vidq->active);
+ spin_unlock_irqrestore(&dev->slock, flags);
}
-static void buffer_release(struct videobuf_queue *vq,
- struct videobuf_buffer *vb)
+static int start_streaming(struct vb2_queue *vq)
{
- struct vivi_dev *dev = vq->priv_data;
- struct vivi_buffer *buf = container_of(vb, struct vivi_buffer, vb);
+ struct vivi_dev *dev = vb2_get_drv_priv(vq);
+ dprintk(dev, 1, "%s\n", __func__);
+ return vivi_start_generating(dev);
+}
+/* abort streaming and wait for last buffer */
+static int stop_streaming(struct vb2_queue *vq)
+{
+ struct vivi_dev *dev = vb2_get_drv_priv(vq);
dprintk(dev, 1, "%s\n", __func__);
+ vivi_stop_generating(dev);
+ return 0;
+}
+
+static void vivi_lock(struct vb2_queue *vq)
+{
+ struct vivi_dev *dev = vb2_get_drv_priv(vq);
+ mutex_lock(&dev->mutex);
+}
- free_buffer(vq, buf);
+static void vivi_unlock(struct vb2_queue *vq)
+{
+ struct vivi_dev *dev = vb2_get_drv_priv(vq);
+ mutex_unlock(&dev->mutex);
}
-static struct videobuf_queue_ops vivi_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
- .buf_queue = buffer_queue,
- .buf_release = buffer_release,
+
+static struct vb2_ops vivi_video_qops = {
+ .queue_setup = queue_setup,
+ .buf_init = buffer_init,
+ .buf_prepare = buffer_prepare,
+ .buf_finish = buffer_finish,
+ .buf_cleanup = buffer_cleanup,
+ .buf_queue = buffer_queue,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+ .wait_prepare = vivi_unlock,
+ .wait_finish = vivi_lock,
};
/* ------------------------------------------------------------------
@@ -774,7 +840,7 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.width = dev->width;
f->fmt.pix.height = dev->height;
- f->fmt.pix.field = dev->vb_vidq.field;
+ f->fmt.pix.field = dev->field;
f->fmt.pix.pixelformat = dev->fmt->fourcc;
f->fmt.pix.bytesperline =
(f->fmt.pix.width * dev->fmt->depth) >> 3;
@@ -820,82 +886,60 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct vivi_dev *dev = video_drvdata(file);
+ struct vb2_queue *q = &dev->vb_vidq;
int ret = vidioc_try_fmt_vid_cap(file, priv, f);
if (ret < 0)
return ret;
- if (vivi_is_generating(dev)) {
+ if (vb2_is_streaming(q)) {
dprintk(dev, 1, "%s device busy\n", __func__);
- ret = -EBUSY;
- goto out;
+ return -EBUSY;
}
dev->fmt = get_format(f);
dev->width = f->fmt.pix.width;
dev->height = f->fmt.pix.height;
- dev->vb_vidq.field = f->fmt.pix.field;
- ret = 0;
-out:
- return ret;
+ dev->field = f->fmt.pix.field;
+
+ return 0;
}
static int vidioc_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *p)
{
struct vivi_dev *dev = video_drvdata(file);
-
- return videobuf_reqbufs(&dev->vb_vidq, p);
+ return vb2_reqbufs(&dev->vb_vidq, p);
}
static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
{
struct vivi_dev *dev = video_drvdata(file);
-
- return videobuf_querybuf(&dev->vb_vidq, p);
+ return vb2_querybuf(&dev->vb_vidq, p);
}
static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
{
struct vivi_dev *dev = video_drvdata(file);
-
- return videobuf_qbuf(&dev->vb_vidq, p);
+ return vb2_qbuf(&dev->vb_vidq, p);
}
static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
{
struct vivi_dev *dev = video_drvdata(file);
-
- return videobuf_dqbuf(&dev->vb_vidq, p,
- file->f_flags & O_NONBLOCK);
+ return vb2_dqbuf(&dev->vb_vidq, p, file->f_flags & O_NONBLOCK);
}
static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
{
struct vivi_dev *dev = video_drvdata(file);
- int ret;
-
- if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
- ret = videobuf_streamon(&dev->vb_vidq);
- if (ret)
- return ret;
-
- vivi_start_generating(file);
- return 0;
+ return vb2_streamon(&dev->vb_vidq, i);
}
static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
{
struct vivi_dev *dev = video_drvdata(file);
- int ret;
-
- if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
- ret = videobuf_streamoff(&dev->vb_vidq);
- if (!ret)
- vivi_stop_generating(file);
- return ret;
+ return vb2_streamoff(&dev->vb_vidq, i);
}
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *i)
@@ -938,106 +982,47 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
}
/* --- controls ---------------------------------------------- */
-static int vidioc_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
-{
- switch (qc->id) {
- case V4L2_CID_AUDIO_VOLUME:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 200);
- case V4L2_CID_BRIGHTNESS:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 127);
- case V4L2_CID_CONTRAST:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 16);
- case V4L2_CID_SATURATION:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 127);
- case V4L2_CID_HUE:
- return v4l2_ctrl_query_fill(qc, -128, 127, 1, 0);
- }
- return -EINVAL;
-}
-static int vidioc_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
+static int vivi_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct vivi_dev *dev = video_drvdata(file);
+ struct vivi_dev *dev = container_of(ctrl->handler, struct vivi_dev, ctrl_handler);
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_VOLUME:
- ctrl->value = dev->volume;
- return 0;
- case V4L2_CID_BRIGHTNESS:
- ctrl->value = dev->brightness;
- return 0;
- case V4L2_CID_CONTRAST:
- ctrl->value = dev->contrast;
- return 0;
- case V4L2_CID_SATURATION:
- ctrl->value = dev->saturation;
- return 0;
- case V4L2_CID_HUE:
- ctrl->value = dev->hue;
- return 0;
- }
- return -EINVAL;
-}
-
-static int vidioc_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct vivi_dev *dev = video_drvdata(file);
- struct v4l2_queryctrl qc;
- int err;
-
- qc.id = ctrl->id;
- err = vidioc_queryctrl(file, priv, &qc);
- if (err < 0)
- return err;
- if (ctrl->value < qc.minimum || ctrl->value > qc.maximum)
- return -ERANGE;
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_VOLUME:
- dev->volume = ctrl->value;
- return 0;
- case V4L2_CID_BRIGHTNESS:
- dev->brightness = ctrl->value;
- return 0;
- case V4L2_CID_CONTRAST:
- dev->contrast = ctrl->value;
- return 0;
- case V4L2_CID_SATURATION:
- dev->saturation = ctrl->value;
- return 0;
- case V4L2_CID_HUE:
- dev->hue = ctrl->value;
- return 0;
- }
- return -EINVAL;
+ if (ctrl == dev->button)
+ dev->button_pressed = 30;
+ return 0;
}
/* ------------------------------------------------------------------
File operations for the device
------------------------------------------------------------------*/
+static int vivi_open(struct file *file)
+{
+ struct vivi_dev *dev = video_drvdata(file);
+
+ dprintk(dev, 1, "%s, %p\n", __func__, file);
+ dev->open_count++;
+ return 0;
+}
+
static ssize_t
vivi_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
{
struct vivi_dev *dev = video_drvdata(file);
- vivi_start_generating(file);
- return videobuf_read_stream(&dev->vb_vidq, data, count, ppos, 0,
- file->f_flags & O_NONBLOCK);
+ dprintk(dev, 1, "read called\n");
+ return vb2_read(&dev->vb_vidq, data, count, ppos,
+ file->f_flags & O_NONBLOCK);
}
static unsigned int
vivi_poll(struct file *file, struct poll_table_struct *wait)
{
struct vivi_dev *dev = video_drvdata(file);
- struct videobuf_queue *q = &dev->vb_vidq;
+ struct vb2_queue *q = &dev->vb_vidq;
dprintk(dev, 1, "%s\n", __func__);
-
- vivi_start_generating(file);
- return videobuf_poll_stream(file, q, wait);
+ return vb2_poll(q, file, wait);
}
static int vivi_close(struct file *file)
@@ -1045,10 +1030,11 @@ static int vivi_close(struct file *file)
struct video_device *vdev = video_devdata(file);
struct vivi_dev *dev = video_drvdata(file);
- vivi_stop_generating(file);
+ dprintk(dev, 1, "close called (dev=%s), file %p\n",
+ video_device_node_name(vdev), file);
- dprintk(dev, 1, "close called (dev=%s)\n",
- video_device_node_name(vdev));
+ if (--dev->open_count == 0)
+ vb2_queue_release(&dev->vb_vidq);
return 0;
}
@@ -1059,8 +1045,7 @@ static int vivi_mmap(struct file *file, struct vm_area_struct *vma)
dprintk(dev, 1, "mmap called, vma=0x%08lx\n", (unsigned long)vma);
- ret = videobuf_mmap_mapper(&dev->vb_vidq, vma);
-
+ ret = vb2_mmap(&dev->vb_vidq, vma);
dprintk(dev, 1, "vma start=0x%08lx, size=%ld, ret=%d\n",
(unsigned long)vma->vm_start,
(unsigned long)vma->vm_end - (unsigned long)vma->vm_start,
@@ -1068,8 +1053,82 @@ static int vivi_mmap(struct file *file, struct vm_area_struct *vma)
return ret;
}
+static const struct v4l2_ctrl_ops vivi_ctrl_ops = {
+ .s_ctrl = vivi_s_ctrl,
+};
+
+#define VIVI_CID_CUSTOM_BASE (V4L2_CID_USER_BASE | 0xf000)
+
+static const struct v4l2_ctrl_config vivi_ctrl_button = {
+ .ops = &vivi_ctrl_ops,
+ .id = VIVI_CID_CUSTOM_BASE + 0,
+ .name = "Button",
+ .type = V4L2_CTRL_TYPE_BUTTON,
+};
+
+static const struct v4l2_ctrl_config vivi_ctrl_boolean = {
+ .ops = &vivi_ctrl_ops,
+ .id = VIVI_CID_CUSTOM_BASE + 1,
+ .name = "Boolean",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 1,
+};
+
+static const struct v4l2_ctrl_config vivi_ctrl_int32 = {
+ .ops = &vivi_ctrl_ops,
+ .id = VIVI_CID_CUSTOM_BASE + 2,
+ .name = "Integer 32 Bits",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0x80000000,
+ .max = 0x7fffffff,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivi_ctrl_int64 = {
+ .ops = &vivi_ctrl_ops,
+ .id = VIVI_CID_CUSTOM_BASE + 3,
+ .name = "Integer 64 Bits",
+ .type = V4L2_CTRL_TYPE_INTEGER64,
+};
+
+static const char * const vivi_ctrl_menu_strings[] = {
+ "Menu Item 0 (Skipped)",
+ "Menu Item 1",
+ "Menu Item 2 (Skipped)",
+ "Menu Item 3",
+ "Menu Item 4",
+ "Menu Item 5 (Skipped)",
+ NULL,
+};
+
+static const struct v4l2_ctrl_config vivi_ctrl_menu = {
+ .ops = &vivi_ctrl_ops,
+ .id = VIVI_CID_CUSTOM_BASE + 4,
+ .name = "Menu",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .min = 1,
+ .max = 4,
+ .def = 3,
+ .menu_skip_mask = 0x04,
+ .qmenu = vivi_ctrl_menu_strings,
+};
+
+static const struct v4l2_ctrl_config vivi_ctrl_string = {
+ .ops = &vivi_ctrl_ops,
+ .id = VIVI_CID_CUSTOM_BASE + 5,
+ .name = "String",
+ .type = V4L2_CTRL_TYPE_STRING,
+ .min = 2,
+ .max = 4,
+ .step = 1,
+};
+
static const struct v4l2_file_operations vivi_fops = {
.owner = THIS_MODULE,
+ .open = vivi_open,
.release = vivi_close,
.read = vivi_read,
.poll = vivi_poll,
@@ -1093,9 +1152,6 @@ static const struct v4l2_ioctl_ops vivi_ioctl_ops = {
.vidioc_s_input = vidioc_s_input,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
- .vidioc_queryctrl = vidioc_queryctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
};
static struct video_device vivi_template = {
@@ -1126,6 +1182,7 @@ static int vivi_release(void)
video_device_node_name(dev->vfd));
video_unregister_device(dev->vfd);
v4l2_device_unregister(&dev->v4l2_dev);
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
kfree(dev);
}
@@ -1136,6 +1193,8 @@ static int __init vivi_create_instance(int inst)
{
struct vivi_dev *dev;
struct video_device *vfd;
+ struct v4l2_ctrl_handler *hdl;
+ struct vb2_queue *q;
int ret;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -1151,20 +1210,46 @@ static int __init vivi_create_instance(int inst)
dev->fmt = &formats[0];
dev->width = 640;
dev->height = 480;
- dev->volume = 200;
- dev->brightness = 127;
- dev->contrast = 16;
- dev->saturation = 127;
- dev->hue = 0;
+ hdl = &dev->ctrl_handler;
+ v4l2_ctrl_handler_init(hdl, 11);
+ dev->volume = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
+ V4L2_CID_AUDIO_VOLUME, 0, 255, 1, 200);
+ dev->brightness = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 127);
+ dev->contrast = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, 16);
+ dev->saturation = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 255, 1, 127);
+ dev->hue = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
+ V4L2_CID_HUE, -128, 127, 1, 0);
+ dev->button = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_button, NULL);
+ dev->int32 = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_int32, NULL);
+ dev->int64 = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_int64, NULL);
+ dev->boolean = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_boolean, NULL);
+ dev->menu = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_menu, NULL);
+ dev->string = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_string, NULL);
+ if (hdl->error) {
+ ret = hdl->error;
+ goto unreg_dev;
+ }
+ dev->v4l2_dev.ctrl_handler = hdl;
/* initialize locks */
spin_lock_init(&dev->slock);
- mutex_init(&dev->mutex);
- videobuf_queue_vmalloc_init(&dev->vb_vidq, &vivi_video_qops,
- NULL, &dev->slock, V4L2_BUF_TYPE_VIDEO_CAPTURE,
- V4L2_FIELD_INTERLACED,
- sizeof(struct vivi_buffer), dev, &dev->mutex);
+ /* initialize queue */
+ q = &dev->vb_vidq;
+ memset(q, 0, sizeof(dev->vb_vidq));
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct vivi_buffer);
+ q->ops = &vivi_video_qops;
+ q->mem_ops = &vb2_vmalloc_memops;
+
+ vb2_queue_init(q);
+
+ mutex_init(&dev->mutex);
/* init video dma queues */
INIT_LIST_HEAD(&dev->vidq.active);
@@ -1178,6 +1263,11 @@ static int __init vivi_create_instance(int inst)
*vfd = vivi_template;
vfd->debug = debug;
vfd->v4l2_dev = &dev->v4l2_dev;
+
+ /*
+ * Provide a mutex to v4l2 core. It will be used to protect
+ * all fops and v4l2 ioctls.
+ */
vfd->lock = &dev->mutex;
ret = video_register_device(vfd, VFL_TYPE_GRABBER, video_nr);
@@ -1200,6 +1290,7 @@ static int __init vivi_create_instance(int inst)
rel_vdev:
video_device_release(vfd);
unreg_dev:
+ v4l2_ctrl_handler_free(hdl);
v4l2_device_unregister(&dev->v4l2_dev);
free_dev:
kfree(dev);
diff --git a/drivers/media/video/vpx3220.c b/drivers/media/video/vpx3220.c
index 91a01b3cdf8c..75301d10a838 100644
--- a/drivers/media/video/vpx3220.c
+++ b/drivers/media/video/vpx3220.c
@@ -28,6 +28,7 @@
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-ctrls.h>
MODULE_DESCRIPTION("vpx3220a/vpx3216b/vpx3214c video decoder driver");
MODULE_AUTHOR("Laurent Pinchart");
@@ -44,16 +45,13 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)");
struct vpx3220 {
struct v4l2_subdev sd;
+ struct v4l2_ctrl_handler hdl;
unsigned char reg[255];
v4l2_std_id norm;
int ident;
int input;
int enable;
- int bright;
- int contrast;
- int hue;
- int sat;
};
static inline struct vpx3220 *to_vpx3220(struct v4l2_subdev *sd)
@@ -61,6 +59,11 @@ static inline struct vpx3220 *to_vpx3220(struct v4l2_subdev *sd)
return container_of(sd, struct vpx3220, sd);
}
+static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct vpx3220, hdl)->sd;
+}
+
static char *inputs[] = { "internal", "composite", "svideo" };
/* ----------------------------------------------------------------------- */
@@ -417,88 +420,26 @@ static int vpx3220_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
}
-static int vpx3220_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
-{
- switch (qc->id) {
- case V4L2_CID_BRIGHTNESS:
- v4l2_ctrl_query_fill(qc, -128, 127, 1, 0);
- break;
-
- case V4L2_CID_CONTRAST:
- v4l2_ctrl_query_fill(qc, 0, 63, 1, 32);
- break;
-
- case V4L2_CID_SATURATION:
- v4l2_ctrl_query_fill(qc, 0, 4095, 1, 2048);
- break;
-
- case V4L2_CID_HUE:
- v4l2_ctrl_query_fill(qc, -512, 511, 1, 0);
- break;
-
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int vpx3220_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int vpx3220_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct vpx3220 *decoder = to_vpx3220(sd);
+ struct v4l2_subdev *sd = to_sd(ctrl);
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
- ctrl->value = decoder->bright;
- break;
+ vpx3220_write(sd, 0xe6, ctrl->val);
+ return 0;
case V4L2_CID_CONTRAST:
- ctrl->value = decoder->contrast;
- break;
+ /* Bit 7 and 8 is for noise shaping */
+ vpx3220_write(sd, 0xe7, ctrl->val + 192);
+ return 0;
case V4L2_CID_SATURATION:
- ctrl->value = decoder->sat;
- break;
+ vpx3220_fp_write(sd, 0xa0, ctrl->val);
+ return 0;
case V4L2_CID_HUE:
- ctrl->value = decoder->hue;
- break;
- default:
- return -EINVAL;
+ vpx3220_fp_write(sd, 0x1c, ctrl->val);
+ return 0;
}
- return 0;
-}
-
-static int vpx3220_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct vpx3220 *decoder = to_vpx3220(sd);
-
- switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- if (decoder->bright != ctrl->value) {
- decoder->bright = ctrl->value;
- vpx3220_write(sd, 0xe6, decoder->bright);
- }
- break;
- case V4L2_CID_CONTRAST:
- if (decoder->contrast != ctrl->value) {
- /* Bit 7 and 8 is for noise shaping */
- decoder->contrast = ctrl->value;
- vpx3220_write(sd, 0xe7, decoder->contrast + 192);
- }
- break;
- case V4L2_CID_SATURATION:
- if (decoder->sat != ctrl->value) {
- decoder->sat = ctrl->value;
- vpx3220_fp_write(sd, 0xa0, decoder->sat);
- }
- break;
- case V4L2_CID_HUE:
- if (decoder->hue != ctrl->value) {
- decoder->hue = ctrl->value;
- vpx3220_fp_write(sd, 0x1c, decoder->hue);
- }
- break;
- default:
- return -EINVAL;
- }
- return 0;
+ return -EINVAL;
}
static int vpx3220_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip)
@@ -511,12 +452,20 @@ static int vpx3220_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ide
/* ----------------------------------------------------------------------- */
+static const struct v4l2_ctrl_ops vpx3220_ctrl_ops = {
+ .s_ctrl = vpx3220_s_ctrl,
+};
+
static const struct v4l2_subdev_core_ops vpx3220_core_ops = {
.g_chip_ident = vpx3220_g_chip_ident,
.init = vpx3220_init,
- .g_ctrl = vpx3220_g_ctrl,
- .s_ctrl = vpx3220_s_ctrl,
- .queryctrl = vpx3220_queryctrl,
+ .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
+ .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
+ .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
+ .g_ctrl = v4l2_subdev_g_ctrl,
+ .s_ctrl = v4l2_subdev_s_ctrl,
+ .queryctrl = v4l2_subdev_queryctrl,
+ .querymenu = v4l2_subdev_querymenu,
.s_std = vpx3220_s_std,
};
@@ -558,10 +507,24 @@ static int vpx3220_probe(struct i2c_client *client,
decoder->norm = V4L2_STD_PAL;
decoder->input = 0;
decoder->enable = 1;
- decoder->bright = 32768;
- decoder->contrast = 32768;
- decoder->hue = 32768;
- decoder->sat = 32768;
+ v4l2_ctrl_handler_init(&decoder->hdl, 4);
+ v4l2_ctrl_new_std(&decoder->hdl, &vpx3220_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, -128, 127, 1, 0);
+ v4l2_ctrl_new_std(&decoder->hdl, &vpx3220_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 63, 1, 32);
+ v4l2_ctrl_new_std(&decoder->hdl, &vpx3220_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 4095, 1, 2048);
+ v4l2_ctrl_new_std(&decoder->hdl, &vpx3220_ctrl_ops,
+ V4L2_CID_HUE, -512, 511, 1, 0);
+ sd->ctrl_handler = &decoder->hdl;
+ if (decoder->hdl.error) {
+ int err = decoder->hdl.error;
+
+ v4l2_ctrl_handler_free(&decoder->hdl);
+ kfree(decoder);
+ return err;
+ }
+ v4l2_ctrl_handler_setup(&decoder->hdl);
ver = i2c_smbus_read_byte_data(client, 0x00);
pn = (i2c_smbus_read_byte_data(client, 0x02) << 8) +
@@ -599,9 +562,11 @@ static int vpx3220_probe(struct i2c_client *client,
static int vpx3220_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct vpx3220 *decoder = to_vpx3220(sd);
v4l2_device_unregister_subdev(sd);
- kfree(to_vpx3220(sd));
+ v4l2_ctrl_handler_free(&decoder->hdl);
+ kfree(decoder);
return 0;
}
diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h
index 013c7d881948..22027e7946f7 100644
--- a/drivers/message/fusion/lsi/mpi_cnfg.h
+++ b/drivers/message/fusion/lsi/mpi_cnfg.h
@@ -2593,6 +2593,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_0
#define MPI_SAS_IOUNIT0_RATE_SATA_OOB_COMPLETE (0x03)
#define MPI_SAS_IOUNIT0_RATE_1_5 (0x08)
#define MPI_SAS_IOUNIT0_RATE_3_0 (0x09)
+#define MPI_SAS_IOUNIT0_RATE_6_0 (0x0A)
/* see mpi_sas.h for values for SAS IO Unit Page 0 ControllerPhyDeviceInfo values */
diff --git a/drivers/message/fusion/lsi/mpi_ioc.h b/drivers/message/fusion/lsi/mpi_ioc.h
index 8faa4fab7b89..fd6222882a0e 100644
--- a/drivers/message/fusion/lsi/mpi_ioc.h
+++ b/drivers/message/fusion/lsi/mpi_ioc.h
@@ -841,6 +841,7 @@ typedef struct _EVENT_DATA_SAS_PHY_LINK_STATUS
#define MPI_EVENT_SAS_PLS_LR_RATE_SATA_OOB_COMPLETE (0x03)
#define MPI_EVENT_SAS_PLS_LR_RATE_1_5 (0x08)
#define MPI_EVENT_SAS_PLS_LR_RATE_3_0 (0x09)
+#define MPI_EVENT_SAS_PLS_LR_RATE_6_0 (0x0A)
/* SAS Discovery Event data */
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 3358c0af3466..ec8080c98081 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -7418,7 +7418,12 @@ mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply)
case MPI_EVENT_SAS_PLS_LR_RATE_3_0:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS PHY Link Status: Phy=%d:"
- " Rate 3.0 Gpbs",PhyNumber);
+ " Rate 3.0 Gbps", PhyNumber);
+ break;
+ case MPI_EVENT_SAS_PLS_LR_RATE_6_0:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS PHY Link Status: Phy=%d:"
+ " Rate 6.0 Gbps", PhyNumber);
break;
default:
snprintf(evStr, EVENT_DESCR_STR_SZ,
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index e8deb8ed0499..878bda0cce70 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -1314,8 +1314,10 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
else
karg->adapterType = MPT_IOCTL_INTERFACE_SCSI;
- if (karg->hdr.port > 1)
+ if (karg->hdr.port > 1) {
+ kfree(karg);
return -EINVAL;
+ }
port = karg->hdr.port;
karg->port = port;
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 8aefb1829fcd..f5a14afad2cd 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1973,7 +1973,6 @@ static struct scsi_host_template mptsas_driver_template = {
.change_queue_depth = mptscsih_change_queue_depth,
.eh_abort_handler = mptscsih_abort,
.eh_device_reset_handler = mptscsih_dev_reset,
- .eh_bus_reset_handler = mptscsih_bus_reset,
.eh_host_reset_handler = mptscsih_host_reset,
.bios_param = mptscsih_bios_param,
.can_queue = MPT_SAS_CAN_QUEUE,
@@ -3063,6 +3062,9 @@ static int mptsas_probe_one_phy(struct device *dev,
case MPI_SAS_IOUNIT0_RATE_3_0:
phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS;
break;
+ case MPI_SAS_IOUNIT0_RATE_6_0:
+ phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS;
+ break;
case MPI_SAS_IOUNIT0_RATE_SATA_OOB_COMPLETE:
case MPI_SAS_IOUNIT0_RATE_UNKNOWN:
default:
@@ -3691,7 +3693,8 @@ mptsas_send_link_status_event(struct fw_event_work *fw_event)
}
if (link_rate == MPI_SAS_IOUNIT0_RATE_1_5 ||
- link_rate == MPI_SAS_IOUNIT0_RATE_3_0) {
+ link_rate == MPI_SAS_IOUNIT0_RATE_3_0 ||
+ link_rate == MPI_SAS_IOUNIT0_RATE_6_0) {
if (!port_info) {
if (ioc->old_sas_discovery_protocal) {
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c
index a0421efe04ca..8a5b2d8f4daf 100644
--- a/drivers/message/i2o/driver.c
+++ b/drivers/message/i2o/driver.c
@@ -84,7 +84,8 @@ int i2o_driver_register(struct i2o_driver *drv)
osm_debug("Register driver %s\n", drv->name);
if (drv->event) {
- drv->event_queue = create_workqueue(drv->name);
+ drv->event_queue = alloc_workqueue(drv->name,
+ WQ_MEM_RECLAIM, 1);
if (!drv->event_queue) {
osm_err("Could not initialize event queue for driver "
"%s\n", drv->name);
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index 7d3cc575c361..098de2b35784 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -1044,8 +1044,7 @@ static long i2o_cfg_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
static int cfg_open(struct inode *inode, struct file *file)
{
- struct i2o_cfg_info *tmp =
- (struct i2o_cfg_info *)kmalloc(sizeof(struct i2o_cfg_info),
+ struct i2o_cfg_info *tmp = kmalloc(sizeof(struct i2o_cfg_info),
GFP_KERNEL);
unsigned long flags;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index fd018366d670..88ff35c3f76c 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -167,6 +167,16 @@ config TWL4030_CORE
high speed USB OTG transceiver, an audio codec (on most
versions) and many other features.
+config TWL4030_MADC
+ tristate "Texas Instruments TWL4030 MADC"
+ depends on TWL4030_CORE
+ help
+ This driver provides support for triton TWL4030-MADC. The
+ driver supports both RT and SW conversion methods.
+
+ This driver can be built as a module. If so it will be
+ named twl4030-madc
+
config TWL4030_POWER
bool "Support power resources on TWL4030 family chips"
depends on TWL4030_CORE && ARM
@@ -523,6 +533,13 @@ config AB8500_DEBUG
Select this option if you want debug information using the debug
filesystem, debugfs.
+config AB8500_GPADC
+ bool "AB8500 GPADC driver"
+ depends on AB8500_CORE && REGULATOR_AB8500
+ default y
+ help
+ AB8500 GPADC driver used to convert Acc and battery/ac/usb voltage
+
config AB3550_CORE
bool "ST-Ericsson AB3550 Mixed Signal Circuit core functions"
select MFD_CORE
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index a54e2c7c6a1c..7af2db4a4bff 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_TPS6507X) += tps6507x.o
obj-$(CONFIG_MENELAUS) += menelaus.o
obj-$(CONFIG_TWL4030_CORE) += twl-core.o twl4030-irq.o twl6030-irq.o
+obj-$(CONFIG_TWL4030_MADC) += twl4030-madc.o
obj-$(CONFIG_TWL4030_POWER) += twl4030-power.o
obj-$(CONFIG_TWL4030_CODEC) += twl4030-codec.o
obj-$(CONFIG_TWL6030_PWM) += twl6030-pwm.o
@@ -70,9 +71,10 @@ obj-$(CONFIG_ABX500_CORE) += abx500-core.o
obj-$(CONFIG_AB3100_CORE) += ab3100-core.o
obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o
obj-$(CONFIG_AB3550_CORE) += ab3550-core.o
-obj-$(CONFIG_AB8500_CORE) += ab8500-core.o
+obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-sysctrl.o
obj-$(CONFIG_AB8500_I2C_CORE) += ab8500-i2c.o
obj-$(CONFIG_AB8500_DEBUG) += ab8500-debugfs.o
+obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o
obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o
obj-$(CONFIG_PMIC_ADP5520) += adp5520.o
obj-$(CONFIG_LPC_SCH) += lpc_sch.o
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index 4193af5f2743..a751927047ac 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -613,7 +613,7 @@ static void ab3100_setup_debugfs(struct ab3100 *ab3100)
ab3100_get_priv.ab3100 = ab3100;
ab3100_get_priv.mode = false;
ab3100_get_reg_file = debugfs_create_file("get_reg",
- S_IWUGO, ab3100_dir, &ab3100_get_priv,
+ S_IWUSR, ab3100_dir, &ab3100_get_priv,
&ab3100_get_set_reg_fops);
if (!ab3100_get_reg_file) {
err = -ENOMEM;
@@ -623,7 +623,7 @@ static void ab3100_setup_debugfs(struct ab3100 *ab3100)
ab3100_set_priv.ab3100 = ab3100;
ab3100_set_priv.mode = true;
ab3100_set_reg_file = debugfs_create_file("set_reg",
- S_IWUGO, ab3100_dir, &ab3100_set_priv,
+ S_IWUSR, ab3100_dir, &ab3100_set_priv,
&ab3100_get_set_reg_fops);
if (!ab3100_set_reg_file) {
err = -ENOMEM;
@@ -949,10 +949,8 @@ static int __devinit ab3100_probe(struct i2c_client *client,
goto exit_no_ops;
/* Set up and register the platform devices. */
- for (i = 0; i < ARRAY_SIZE(ab3100_devs); i++) {
- ab3100_devs[i].platform_data = ab3100_plf_data;
- ab3100_devs[i].data_size = sizeof(struct ab3100_platform_data);
- }
+ for (i = 0; i < ARRAY_SIZE(ab3100_devs); i++)
+ ab3100_devs[i].mfd_data = ab3100_plf_data;
err = mfd_add_devices(&client->dev, 0, ab3100_devs,
ARRAY_SIZE(ab3100_devs), NULL, 0);
diff --git a/drivers/mfd/ab3550-core.c b/drivers/mfd/ab3550-core.c
index 5fbca346b998..c12d04285226 100644
--- a/drivers/mfd/ab3550-core.c
+++ b/drivers/mfd/ab3550-core.c
@@ -1053,17 +1053,17 @@ static inline void ab3550_setup_debugfs(struct ab3550 *ab)
goto exit_destroy_dir;
ab3550_bank_file = debugfs_create_file("register-bank",
- (S_IRUGO | S_IWUGO), ab3550_dir, ab, &ab3550_bank_fops);
+ (S_IRUGO | S_IWUSR), ab3550_dir, ab, &ab3550_bank_fops);
if (!ab3550_bank_file)
goto exit_destroy_reg;
ab3550_address_file = debugfs_create_file("register-address",
- (S_IRUGO | S_IWUGO), ab3550_dir, ab, &ab3550_address_fops);
+ (S_IRUGO | S_IWUSR), ab3550_dir, ab, &ab3550_address_fops);
if (!ab3550_address_file)
goto exit_destroy_bank;
ab3550_val_file = debugfs_create_file("register-value",
- (S_IRUGO | S_IWUGO), ab3550_dir, ab, &ab3550_val_fops);
+ (S_IRUGO | S_IWUSR), ab3550_dir, ab, &ab3550_val_fops);
if (!ab3550_val_file)
goto exit_destroy_address;
@@ -1320,10 +1320,8 @@ static int __init ab3550_probe(struct i2c_client *client,
goto exit_no_ops;
/* Set up and register the platform devices. */
- for (i = 0; i < AB3550_NUM_DEVICES; i++) {
- ab3550_devs[i].platform_data = ab3550_plf_data->dev_data[i];
- ab3550_devs[i].data_size = ab3550_plf_data->dev_data_sz[i];
- }
+ for (i = 0; i < AB3550_NUM_DEVICES; i++)
+ ab3550_devs[i].mfd_data = ab3550_plf_data->dev_data[i];
err = mfd_add_devices(&client->dev, 0, ab3550_devs,
ARRAY_SIZE(ab3550_devs), NULL,
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index b6887014d687..6e185b272d00 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -4,7 +4,7 @@
* License Terms: GNU General Public License v2
* Author: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
* Author: Rabin Vincent <rabin.vincent@stericsson.com>
- * Changes: Mattias Wallin <mattias.wallin@stericsson.com>
+ * Author: Mattias Wallin <mattias.wallin@stericsson.com>
*/
#include <linux/kernel.h>
@@ -90,6 +90,7 @@
#define AB8500_IT_MASK24_REG 0x57
#define AB8500_REV_REG 0x80
+#define AB8500_SWITCH_OFF_STATUS 0x00
/*
* Map interrupt numbers to the LATCH and MASK register offsets, Interrupt
@@ -652,10 +653,38 @@ static ssize_t show_chip_id(struct device *dev,
return sprintf(buf, "%#x\n", ab8500 ? ab8500->chip_id : -EINVAL);
}
+/*
+ * ab8500 has switched off due to (SWITCH_OFF_STATUS):
+ * 0x01 Swoff bit programming
+ * 0x02 Thermal protection activation
+ * 0x04 Vbat lower then BattOk falling threshold
+ * 0x08 Watchdog expired
+ * 0x10 Non presence of 32kHz clock
+ * 0x20 Battery level lower than power on reset threshold
+ * 0x40 Power on key 1 pressed longer than 10 seconds
+ * 0x80 DB8500 thermal shutdown
+ */
+static ssize_t show_switch_off_status(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ u8 value;
+ struct ab8500 *ab8500;
+
+ ab8500 = dev_get_drvdata(dev);
+ ret = get_register_interruptible(ab8500, AB8500_RTC,
+ AB8500_SWITCH_OFF_STATUS, &value);
+ if (ret < 0)
+ return ret;
+ return sprintf(buf, "%#x\n", value);
+}
+
static DEVICE_ATTR(chip_id, S_IRUGO, show_chip_id, NULL);
+static DEVICE_ATTR(switch_off_status, S_IRUGO, show_switch_off_status, NULL);
static struct attribute *ab8500_sysfs_entries[] = {
&dev_attr_chip_id.attr,
+ &dev_attr_switch_off_status.attr,
NULL,
};
@@ -686,9 +715,10 @@ int __devinit ab8500_init(struct ab8500 *ab8500)
* 0x10 - Cut 1.0
* 0x11 - Cut 1.1
* 0x20 - Cut 2.0
+ * 0x30 - Cut 3.0
*/
- if (value == 0x0 || value == 0x10 || value == 0x11 || value == 0x20) {
- ab8500->revision = value;
+ if (value == 0x0 || value == 0x10 || value == 0x11 || value == 0x20 ||
+ value == 0x30) {
dev_info(ab8500->dev, "detected chip, revision: %#x\n", value);
} else {
dev_err(ab8500->dev, "unknown chip, revision: %#x\n", value);
@@ -696,6 +726,24 @@ int __devinit ab8500_init(struct ab8500 *ab8500)
}
ab8500->chip_id = value;
+ /*
+ * ab8500 has switched off due to (SWITCH_OFF_STATUS):
+ * 0x01 Swoff bit programming
+ * 0x02 Thermal protection activation
+ * 0x04 Vbat lower then BattOk falling threshold
+ * 0x08 Watchdog expired
+ * 0x10 Non presence of 32kHz clock
+ * 0x20 Battery level lower than power on reset threshold
+ * 0x40 Power on key 1 pressed longer than 10 seconds
+ * 0x80 DB8500 thermal shutdown
+ */
+
+ ret = get_register_interruptible(ab8500, AB8500_RTC,
+ AB8500_SWITCH_OFF_STATUS, &value);
+ if (ret < 0)
+ return ret;
+ dev_info(ab8500->dev, "switch off status: %#x", value);
+
if (plat && plat->init)
plat->init(ab8500);
@@ -764,6 +812,6 @@ int __devexit ab8500_exit(struct ab8500 *ab8500)
return 0;
}
-MODULE_AUTHOR("Srinidhi Kasagar, Rabin Vincent");
+MODULE_AUTHOR("Mattias Wallin, Srinidhi Kasagar, Rabin Vincent");
MODULE_DESCRIPTION("AB8500 MFD core");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 3c1541ae7223..64748e42ac03 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -585,18 +585,18 @@ static int __devinit ab8500_debug_probe(struct platform_device *plf)
goto exit_destroy_dir;
ab8500_bank_file = debugfs_create_file("register-bank",
- (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, &ab8500_bank_fops);
+ (S_IRUGO | S_IWUSR), ab8500_dir, &plf->dev, &ab8500_bank_fops);
if (!ab8500_bank_file)
goto exit_destroy_reg;
ab8500_address_file = debugfs_create_file("register-address",
- (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev,
+ (S_IRUGO | S_IWUSR), ab8500_dir, &plf->dev,
&ab8500_address_fops);
if (!ab8500_address_file)
goto exit_destroy_bank;
ab8500_val_file = debugfs_create_file("register-value",
- (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, &ab8500_val_fops);
+ (S_IRUGO | S_IWUSR), ab8500_dir, &plf->dev, &ab8500_val_fops);
if (!ab8500_val_file)
goto exit_destroy_address;
diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c
new file mode 100644
index 000000000000..19339bc439ca
--- /dev/null
+++ b/drivers/mfd/ab8500-gpadc.c
@@ -0,0 +1,296 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Arun R Murthy <arun.murthy@stericsson.com>
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/completion.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/mfd/ab8500.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/ab8500-gpadc.h>
+
+/*
+ * GPADC register offsets
+ * Bank : 0x0A
+ */
+#define AB8500_GPADC_CTRL1_REG 0x00
+#define AB8500_GPADC_CTRL2_REG 0x01
+#define AB8500_GPADC_CTRL3_REG 0x02
+#define AB8500_GPADC_AUTO_TIMER_REG 0x03
+#define AB8500_GPADC_STAT_REG 0x04
+#define AB8500_GPADC_MANDATAL_REG 0x05
+#define AB8500_GPADC_MANDATAH_REG 0x06
+#define AB8500_GPADC_AUTODATAL_REG 0x07
+#define AB8500_GPADC_AUTODATAH_REG 0x08
+#define AB8500_GPADC_MUX_CTRL_REG 0x09
+
+/* gpadc constants */
+#define EN_VINTCORE12 0x04
+#define EN_VTVOUT 0x02
+#define EN_GPADC 0x01
+#define DIS_GPADC 0x00
+#define SW_AVG_16 0x60
+#define ADC_SW_CONV 0x04
+#define EN_BUF 0x40
+#define DIS_ZERO 0x00
+#define GPADC_BUSY 0x01
+
+/**
+ * struct ab8500_gpadc - ab8500 GPADC device information
+ * @dev: pointer to the struct device
+ * @parent: pointer to the parent device structure ab8500
+ * @ab8500_gpadc_complete: pointer to the struct completion, to indicate
+ * the completion of gpadc conversion
+ * @ab8500_gpadc_lock: structure of type mutex
+ * @regu: pointer to the struct regulator
+ * @irq: interrupt number that is used by gpadc
+ */
+static struct ab8500_gpadc {
+ struct device *dev;
+ struct ab8500 *parent;
+ struct completion ab8500_gpadc_complete;
+ struct mutex ab8500_gpadc_lock;
+ struct regulator *regu;
+ int irq;
+} *di;
+
+/**
+ * ab8500_gpadc_convert() - gpadc conversion
+ * @input: analog input to be converted to digital data
+ *
+ * This function converts the selected analog i/p to digital
+ * data. Thereafter calibration has to be made to obtain the
+ * data in the required quantity measurement.
+ */
+int ab8500_gpadc_convert(u8 input)
+{
+ int ret;
+ u16 data = 0;
+ int looplimit = 0;
+ u8 val, low_data, high_data;
+
+ if (!di)
+ return -ENODEV;
+
+ mutex_lock(&di->ab8500_gpadc_lock);
+ /* Enable VTVout LDO this is required for GPADC */
+ regulator_enable(di->regu);
+
+ /* Check if ADC is not busy, lock and proceed */
+ do {
+ ret = abx500_get_register_interruptible(di->dev, AB8500_GPADC,
+ AB8500_GPADC_STAT_REG, &val);
+ if (ret < 0)
+ goto out;
+ if (!(val & GPADC_BUSY))
+ break;
+ msleep(10);
+ } while (++looplimit < 10);
+ if (looplimit >= 10 && (val & GPADC_BUSY)) {
+ dev_err(di->dev, "gpadc_conversion: GPADC busy");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Enable GPADC */
+ ret = abx500_mask_and_set_register_interruptible(di->dev, AB8500_GPADC,
+ AB8500_GPADC_CTRL1_REG, EN_GPADC, EN_GPADC);
+ if (ret < 0) {
+ dev_err(di->dev, "gpadc_conversion: enable gpadc failed\n");
+ goto out;
+ }
+ /* Select the input source and set average samples to 16 */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_GPADC,
+ AB8500_GPADC_CTRL2_REG, (input | SW_AVG_16));
+ if (ret < 0) {
+ dev_err(di->dev,
+ "gpadc_conversion: set avg samples failed\n");
+ goto out;
+ }
+ /* Enable ADC, Buffering and select rising edge, start Conversion */
+ ret = abx500_mask_and_set_register_interruptible(di->dev, AB8500_GPADC,
+ AB8500_GPADC_CTRL1_REG, EN_BUF, EN_BUF);
+ if (ret < 0) {
+ dev_err(di->dev,
+ "gpadc_conversion: select falling edge failed\n");
+ goto out;
+ }
+ ret = abx500_mask_and_set_register_interruptible(di->dev, AB8500_GPADC,
+ AB8500_GPADC_CTRL1_REG, ADC_SW_CONV, ADC_SW_CONV);
+ if (ret < 0) {
+ dev_err(di->dev,
+ "gpadc_conversion: start s/w conversion failed\n");
+ goto out;
+ }
+ /* wait for completion of conversion */
+ if (!wait_for_completion_timeout(&di->ab8500_gpadc_complete, 2*HZ)) {
+ dev_err(di->dev,
+ "timeout: didnt recieve GPADC conversion interrupt\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Read the converted RAW data */
+ ret = abx500_get_register_interruptible(di->dev, AB8500_GPADC,
+ AB8500_GPADC_MANDATAL_REG, &low_data);
+ if (ret < 0) {
+ dev_err(di->dev, "gpadc_conversion: read low data failed\n");
+ goto out;
+ }
+
+ ret = abx500_get_register_interruptible(di->dev, AB8500_GPADC,
+ AB8500_GPADC_MANDATAH_REG, &high_data);
+ if (ret < 0) {
+ dev_err(di->dev, "gpadc_conversion: read high data failed\n");
+ goto out;
+ }
+
+ data = (high_data << 8) | low_data;
+ /* Disable GPADC */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_GPADC,
+ AB8500_GPADC_CTRL1_REG, DIS_GPADC);
+ if (ret < 0) {
+ dev_err(di->dev, "gpadc_conversion: disable gpadc failed\n");
+ goto out;
+ }
+ /* Disable VTVout LDO this is required for GPADC */
+ regulator_disable(di->regu);
+ mutex_unlock(&di->ab8500_gpadc_lock);
+ return data;
+
+out:
+ /*
+ * It has shown to be needed to turn off the GPADC if an error occurs,
+ * otherwise we might have problem when waiting for the busy bit in the
+ * GPADC status register to go low. In V1.1 there wait_for_completion
+ * seems to timeout when waiting for an interrupt.. Not seen in V2.0
+ */
+ (void) abx500_set_register_interruptible(di->dev, AB8500_GPADC,
+ AB8500_GPADC_CTRL1_REG, DIS_GPADC);
+ regulator_disable(di->regu);
+ mutex_unlock(&di->ab8500_gpadc_lock);
+ dev_err(di->dev, "gpadc_conversion: Failed to AD convert channel %d\n",
+ input);
+ return ret;
+}
+EXPORT_SYMBOL(ab8500_gpadc_convert);
+
+/**
+ * ab8500_bm_gpswadcconvend_handler() - isr for s/w gpadc conversion completion
+ * @irq: irq number
+ * @data: pointer to the data passed during request irq
+ *
+ * This is a interrupt service routine for s/w gpadc conversion completion.
+ * Notifies the gpadc completion is completed and the converted raw value
+ * can be read from the registers.
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_bm_gpswadcconvend_handler(int irq, void *_di)
+{
+ struct ab8500_gpadc *gpadc = _di;
+
+ complete(&gpadc->ab8500_gpadc_complete);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit ab8500_gpadc_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = kzalloc(sizeof(struct ab8500_gpadc), GFP_KERNEL);
+ if (!gpadc) {
+ dev_err(&pdev->dev, "Error: No memory\n");
+ return -ENOMEM;
+ }
+
+ gpadc->parent = dev_get_drvdata(pdev->dev.parent);
+ gpadc->irq = platform_get_irq_byname(pdev, "SW_CONV_END");
+ if (gpadc->irq < 0) {
+ dev_err(gpadc->dev, "failed to get platform irq-%d\n", di->irq);
+ ret = gpadc->irq;
+ goto fail;
+ }
+
+ gpadc->dev = &pdev->dev;
+ mutex_init(&di->ab8500_gpadc_lock);
+
+ /* Initialize completion used to notify completion of conversion */
+ init_completion(&gpadc->ab8500_gpadc_complete);
+
+ /* Register interrupt - SwAdcComplete */
+ ret = request_threaded_irq(gpadc->irq, NULL,
+ ab8500_bm_gpswadcconvend_handler,
+ IRQF_NO_SUSPEND | IRQF_SHARED, "ab8500-gpadc", gpadc);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "Failed to register interrupt, irq: %d\n",
+ gpadc->irq);
+ goto fail;
+ }
+
+ /* VTVout LDO used to power up ab8500-GPADC */
+ gpadc->regu = regulator_get(&pdev->dev, "vddadc");
+ if (IS_ERR(gpadc->regu)) {
+ ret = PTR_ERR(gpadc->regu);
+ dev_err(gpadc->dev, "failed to get vtvout LDO\n");
+ goto fail;
+ }
+ di = gpadc;
+ dev_dbg(gpadc->dev, "probe success\n");
+ return 0;
+fail:
+ kfree(gpadc);
+ gpadc = NULL;
+ return ret;
+}
+
+static int __devexit ab8500_gpadc_remove(struct platform_device *pdev)
+{
+ struct ab8500_gpadc *gpadc = platform_get_drvdata(pdev);
+
+ /* remove interrupt - completion of Sw ADC conversion */
+ free_irq(gpadc->irq, di);
+ /* disable VTVout LDO that is being used by GPADC */
+ regulator_put(gpadc->regu);
+ kfree(gpadc);
+ gpadc = NULL;
+ return 0;
+}
+
+static struct platform_driver ab8500_gpadc_driver = {
+ .probe = ab8500_gpadc_probe,
+ .remove = __devexit_p(ab8500_gpadc_remove),
+ .driver = {
+ .name = "ab8500-gpadc",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ab8500_gpadc_init(void)
+{
+ return platform_driver_register(&ab8500_gpadc_driver);
+}
+
+static void __exit ab8500_gpadc_exit(void)
+{
+ platform_driver_unregister(&ab8500_gpadc_driver);
+}
+
+subsys_initcall_sync(ab8500_gpadc_init);
+module_exit(ab8500_gpadc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Arun R Murthy");
+MODULE_ALIAS("platform:ab8500_gpadc");
+MODULE_DESCRIPTION("AB8500 GPADC driver");
diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
new file mode 100644
index 000000000000..392185965b39
--- /dev/null
+++ b/drivers/mfd/ab8500-sysctrl.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com> for ST Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/ab8500.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/ab8500/sysctrl.h>
+
+static struct device *sysctrl_dev;
+
+static inline bool valid_bank(u8 bank)
+{
+ return ((bank == AB8500_SYS_CTRL1_BLOCK) ||
+ (bank == AB8500_SYS_CTRL2_BLOCK));
+}
+
+int ab8500_sysctrl_read(u16 reg, u8 *value)
+{
+ u8 bank;
+
+ if (sysctrl_dev == NULL)
+ return -EAGAIN;
+
+ bank = (reg >> 8);
+ if (!valid_bank(bank))
+ return -EINVAL;
+
+ return abx500_get_register_interruptible(sysctrl_dev, bank,
+ (u8)(reg & 0xFF), value);
+}
+
+int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value)
+{
+ u8 bank;
+
+ if (sysctrl_dev == NULL)
+ return -EAGAIN;
+
+ bank = (reg >> 8);
+ if (!valid_bank(bank))
+ return -EINVAL;
+
+ return abx500_mask_and_set_register_interruptible(sysctrl_dev, bank,
+ (u8)(reg & 0xFF), mask, value);
+}
+
+static int __devinit ab8500_sysctrl_probe(struct platform_device *pdev)
+{
+ sysctrl_dev = &pdev->dev;
+ return 0;
+}
+
+static int __devexit ab8500_sysctrl_remove(struct platform_device *pdev)
+{
+ sysctrl_dev = NULL;
+ return 0;
+}
+
+static struct platform_driver ab8500_sysctrl_driver = {
+ .driver = {
+ .name = "ab8500-sysctrl",
+ .owner = THIS_MODULE,
+ },
+ .probe = ab8500_sysctrl_probe,
+ .remove = __devexit_p(ab8500_sysctrl_remove),
+};
+
+static int __init ab8500_sysctrl_init(void)
+{
+ return platform_driver_register(&ab8500_sysctrl_driver);
+}
+subsys_initcall(ab8500_sysctrl_init);
+
+MODULE_AUTHOR("Mattias Nilsson <mattias.i.nilsson@stericsson.com");
+MODULE_DESCRIPTION("AB8500 system control driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c
index 3122139b4300..f1d88483112c 100644
--- a/drivers/mfd/adp5520.c
+++ b/drivers/mfd/adp5520.c
@@ -321,27 +321,27 @@ static int __devexit adp5520_remove(struct i2c_client *client)
}
#ifdef CONFIG_PM
-static int adp5520_suspend(struct i2c_client *client,
- pm_message_t state)
+static int adp5520_suspend(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct adp5520_chip *chip = dev_get_drvdata(&client->dev);
adp5520_clr_bits(chip->dev, ADP5520_MODE_STATUS, ADP5520_nSTNBY);
return 0;
}
-static int adp5520_resume(struct i2c_client *client)
+static int adp5520_resume(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct adp5520_chip *chip = dev_get_drvdata(&client->dev);
adp5520_set_bits(chip->dev, ADP5520_MODE_STATUS, ADP5520_nSTNBY);
return 0;
}
-#else
-#define adp5520_suspend NULL
-#define adp5520_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(adp5520_pm, adp5520_suspend, adp5520_resume);
+
static const struct i2c_device_id adp5520_id[] = {
{ "pmic-adp5520", ID_ADP5520 },
{ "pmic-adp5501", ID_ADP5501 },
@@ -353,11 +353,10 @@ static struct i2c_driver adp5520_driver = {
.driver = {
.name = "adp5520",
.owner = THIS_MODULE,
+ .pm = &adp5520_pm,
},
.probe = adp5520_probe,
.remove = __devexit_p(adp5520_remove),
- .suspend = adp5520_suspend,
- .resume = adp5520_resume,
.id_table = adp5520_id,
};
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 6a1f94042612..0241f08fc00d 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -143,9 +143,9 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc)
unsigned long flags;
struct asic3 *asic;
- desc->chip->ack(irq);
+ desc->irq_data.chip->irq_ack(&desc->irq_data);
- asic = desc->handler_data;
+ asic = get_irq_data(irq);
for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) {
u32 status;
@@ -682,7 +682,7 @@ static struct mfd_cell asic3_cell_ds1wm = {
.name = "ds1wm",
.enable = ds1wm_enable,
.disable = ds1wm_disable,
- .driver_data = &ds1wm_pdata,
+ .mfd_data = &ds1wm_pdata,
.num_resources = ARRAY_SIZE(ds1wm_resources),
.resources = ds1wm_resources,
};
@@ -783,7 +783,7 @@ static struct mfd_cell asic3_cell_mmc = {
.name = "tmio-mmc",
.enable = asic3_mmc_enable,
.disable = asic3_mmc_disable,
- .driver_data = &asic3_mmc_data,
+ .mfd_data = &asic3_mmc_data,
.num_resources = ARRAY_SIZE(asic3_mmc_resources),
.resources = asic3_mmc_resources,
};
@@ -810,9 +810,6 @@ static int __init asic3_mfd_probe(struct platform_device *pdev,
ds1wm_resources[0].start >>= asic->bus_shift;
ds1wm_resources[0].end >>= asic->bus_shift;
- asic3_cell_ds1wm.platform_data = &asic3_cell_ds1wm;
- asic3_cell_ds1wm.data_size = sizeof(asic3_cell_ds1wm);
-
/* MMC */
asic->tmio_cnf = ioremap((ASIC3_SD_CONFIG_BASE >> asic->bus_shift) +
mem_sdio->start, 0x400 >> asic->bus_shift);
@@ -824,9 +821,6 @@ static int __init asic3_mfd_probe(struct platform_device *pdev,
asic3_mmc_resources[0].start >>= asic->bus_shift;
asic3_mmc_resources[0].end >>= asic->bus_shift;
- asic3_cell_mmc.platform_data = &asic3_cell_mmc;
- asic3_cell_mmc.data_size = sizeof(asic3_cell_mmc);
-
ret = mfd_add_devices(&pdev->dev, pdev->id,
&asic3_cell_ds1wm, 1, mem, asic->irq_base);
if (ret < 0)
diff --git a/drivers/mfd/cs5535-mfd.c b/drivers/mfd/cs5535-mfd.c
index 59ca6f151e78..886a06871065 100644
--- a/drivers/mfd/cs5535-mfd.c
+++ b/drivers/mfd/cs5535-mfd.c
@@ -39,6 +39,37 @@ enum cs5535_mfd_bars {
NR_BARS,
};
+static int cs5535_mfd_res_enable(struct platform_device *pdev)
+{
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "can't fetch device resource info\n");
+ return -EIO;
+ }
+
+ if (!request_region(res->start, resource_size(res), DRV_NAME)) {
+ dev_err(&pdev->dev, "can't request region\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int cs5535_mfd_res_disable(struct platform_device *pdev)
+{
+ struct resource *res;
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "can't fetch device resource info\n");
+ return -EIO;
+ }
+
+ release_region(res->start, resource_size(res));
+ return 0;
+}
+
static __devinitdata struct resource cs5535_mfd_resources[NR_BARS];
static __devinitdata struct mfd_cell cs5535_mfd_cells[] = {
@@ -65,12 +96,18 @@ static __devinitdata struct mfd_cell cs5535_mfd_cells[] = {
.name = "cs5535-pms",
.num_resources = 1,
.resources = &cs5535_mfd_resources[PMS_BAR],
+
+ .enable = cs5535_mfd_res_enable,
+ .disable = cs5535_mfd_res_disable,
},
{
.id = ACPI_BAR,
.name = "cs5535-acpi",
.num_resources = 1,
.resources = &cs5535_mfd_resources[ACPI_BAR],
+
+ .enable = cs5535_mfd_res_enable,
+ .disable = cs5535_mfd_res_disable,
},
};
diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c
index 33c923d215c7..414783b04849 100644
--- a/drivers/mfd/davinci_voicecodec.c
+++ b/drivers/mfd/davinci_voicecodec.c
@@ -118,13 +118,13 @@ static int __init davinci_vc_probe(struct platform_device *pdev)
/* Voice codec interface client */
cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL];
- cell->name = "davinci_vcif";
- cell->driver_data = davinci_vc;
+ cell->name = "davinci-vcif";
+ cell->mfd_data = davinci_vc;
/* Voice codec CQ93VC client */
cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL];
- cell->name = "cq93vc";
- cell->driver_data = davinci_vc;
+ cell->name = "cq93vc-codec";
+ cell->mfd_data = davinci_vc;
ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells,
DAVINCI_VC_CELLS, NULL, 0);
diff --git a/drivers/mfd/htc-pasic3.c b/drivers/mfd/htc-pasic3.c
index 7bc752272dc1..fb9770b39a32 100644
--- a/drivers/mfd/htc-pasic3.c
+++ b/drivers/mfd/htc-pasic3.c
@@ -117,7 +117,7 @@ static struct mfd_cell ds1wm_cell __initdata = {
.name = "ds1wm",
.enable = ds1wm_enable,
.disable = ds1wm_disable,
- .driver_data = &ds1wm_pdata,
+ .mfd_data = &ds1wm_pdata,
.num_resources = 2,
.resources = ds1wm_resources,
};
@@ -165,8 +165,6 @@ static int __init pasic3_probe(struct platform_device *pdev)
ds1wm_pdata.clock_rate = pdata->clock_rate;
/* the first 5 PASIC3 registers control the DS1WM */
ds1wm_resources[0].end = (5 << asic->bus_shift) - 1;
- ds1wm_cell.platform_data = &ds1wm_cell;
- ds1wm_cell.data_size = sizeof(ds1wm_cell);
ret = mfd_add_devices(&pdev->dev, pdev->id,
&ds1wm_cell, 1, r, irq);
if (ret < 0)
@@ -174,9 +172,6 @@ static int __init pasic3_probe(struct platform_device *pdev)
}
if (pdata && pdata->led_pdata) {
- led_cell.driver_data = pdata->led_pdata;
- led_cell.platform_data = &led_cell;
- led_cell.data_size = sizeof(ds1wm_cell);
ret = mfd_add_devices(&pdev->dev, pdev->id, &led_cell, 1, r, 0);
if (ret < 0)
dev_warn(dev, "failed to register LED device\n");
diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
index 36a166bcdb08..fc4191137e90 100644
--- a/drivers/mfd/janz-cmodio.c
+++ b/drivers/mfd/janz-cmodio.c
@@ -86,8 +86,7 @@ static int __devinit cmodio_setup_subdevice(struct cmodio_device *priv,
/* Add platform data */
pdata->modno = modno;
- cell->platform_data = pdata;
- cell->data_size = sizeof(*pdata);
+ cell->mfd_data = pdata;
/* MODULbus registers -- PCI BAR3 is big-endian MODULbus access */
res->flags = IORESOURCE_MEM;
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index 0cc59795f600..aa518b9beaf5 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -232,8 +232,6 @@ const struct mfd_cell jz4740_adc_cells[] = {
.name = "jz4740-hwmon",
.num_resources = ARRAY_SIZE(jz4740_hwmon_resources),
.resources = jz4740_hwmon_resources,
- .platform_data = (void *)&jz4740_adc_cells[0],
- .data_size = sizeof(struct mfd_cell),
.enable = jz4740_adc_cell_enable,
.disable = jz4740_adc_cell_disable,
@@ -243,8 +241,6 @@ const struct mfd_cell jz4740_adc_cells[] = {
.name = "jz4740-battery",
.num_resources = ARRAY_SIZE(jz4740_battery_resources),
.resources = jz4740_battery_resources,
- .platform_data = (void *)&jz4740_adc_cells[1],
- .data_size = sizeof(struct mfd_cell),
.enable = jz4740_adc_cell_enable,
.disable = jz4740_adc_cell_disable,
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index bbfe86732602..c00214257da2 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -233,7 +233,7 @@ struct max8998_reg_dump {
u8 val;
};
#define SAVE_ITEM(x) { .addr = (x), .val = 0x0, }
-struct max8998_reg_dump max8998_dump[] = {
+static struct max8998_reg_dump max8998_dump[] = {
SAVE_ITEM(MAX8998_REG_IRQM1),
SAVE_ITEM(MAX8998_REG_IRQM2),
SAVE_ITEM(MAX8998_REG_IRQM3),
@@ -298,7 +298,7 @@ static int max8998_restore(struct device *dev)
return 0;
}
-const struct dev_pm_ops max8998_pm = {
+static const struct dev_pm_ops max8998_pm = {
.suspend = max8998_suspend,
.resume = max8998_resume,
.freeze = max8998_freeze,
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index b9fcaf0004da..668634e89e81 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -683,14 +683,13 @@ out:
EXPORT_SYMBOL_GPL(mc13783_adc_do_conversion);
static int mc13xxx_add_subdevice_pdata(struct mc13xxx *mc13xxx,
- const char *format, void *pdata, size_t pdata_size)
+ const char *format, void *pdata)
{
char buf[30];
const char *name = mc13xxx_get_chipname(mc13xxx);
struct mfd_cell cell = {
- .platform_data = pdata,
- .data_size = pdata_size,
+ .mfd_data = pdata,
};
/* there is no asnprintf in the kernel :-( */
@@ -706,7 +705,7 @@ static int mc13xxx_add_subdevice_pdata(struct mc13xxx *mc13xxx,
static int mc13xxx_add_subdevice(struct mc13xxx *mc13xxx, const char *format)
{
- return mc13xxx_add_subdevice_pdata(mc13xxx, format, NULL, 0);
+ return mc13xxx_add_subdevice_pdata(mc13xxx, format, NULL);
}
static int mc13xxx_probe(struct spi_device *spi)
@@ -764,13 +763,8 @@ err_revision:
mc13xxx_add_subdevice(mc13xxx, "%s-codec");
if (pdata->flags & MC13XXX_USE_REGULATOR) {
- struct mc13xxx_regulator_platform_data regulator_pdata = {
- .num_regulators = pdata->num_regulators,
- .regulators = pdata->regulators,
- };
-
mc13xxx_add_subdevice_pdata(mc13xxx, "%s-regulator",
- &regulator_pdata, sizeof(regulator_pdata));
+ &pdata->regulators);
}
if (pdata->flags & MC13XXX_USE_RTC)
@@ -779,10 +773,8 @@ err_revision:
if (pdata->flags & MC13XXX_USE_TOUCHSCREEN)
mc13xxx_add_subdevice(mc13xxx, "%s-ts");
- if (pdata->flags & MC13XXX_USE_LED) {
- mc13xxx_add_subdevice_pdata(mc13xxx, "%s-led",
- pdata->leds, sizeof(*pdata->leds));
- }
+ if (pdata->flags & MC13XXX_USE_LED)
+ mc13xxx_add_subdevice_pdata(mc13xxx, "%s-led", pdata->leds);
return 0;
}
@@ -811,6 +803,7 @@ static const struct spi_device_id mc13xxx_device_id[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(spi, mc13xxx_device_id);
static struct spi_driver mc13xxx_driver = {
.id_table = mc13xxx_device_id,
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index d83ad0f141af..bb2264cc410b 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -18,6 +18,43 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+int mfd_shared_cell_enable(struct platform_device *pdev)
+{
+ const struct mfd_cell *cell = mfd_get_cell(pdev);
+ int err = 0;
+
+ /* only call enable hook if the cell wasn't previously enabled */
+ if (atomic_inc_return(cell->usage_count) == 1)
+ err = cell->enable(pdev);
+
+ /* if the enable hook failed, decrement counter to allow retries */
+ if (err)
+ atomic_dec(cell->usage_count);
+
+ return err;
+}
+EXPORT_SYMBOL(mfd_shared_cell_enable);
+
+int mfd_shared_cell_disable(struct platform_device *pdev)
+{
+ const struct mfd_cell *cell = mfd_get_cell(pdev);
+ int err = 0;
+
+ /* only disable if no other clients are using it */
+ if (atomic_dec_return(cell->usage_count) == 0)
+ err = cell->disable(pdev);
+
+ /* if the disable hook failed, increment to allow retries */
+ if (err)
+ atomic_inc(cell->usage_count);
+
+ /* sanity check; did someone call disable too many times? */
+ WARN_ON(atomic_read(cell->usage_count) < 0);
+
+ return err;
+}
+EXPORT_SYMBOL(mfd_shared_cell_disable);
+
static int mfd_add_device(struct device *parent, int id,
const struct mfd_cell *cell,
struct resource *mem_base,
@@ -37,14 +74,10 @@ static int mfd_add_device(struct device *parent, int id,
goto fail_device;
pdev->dev.parent = parent;
- platform_set_drvdata(pdev, cell->driver_data);
- if (cell->data_size) {
- ret = platform_device_add_data(pdev,
- cell->platform_data, cell->data_size);
- if (ret)
- goto fail_res;
- }
+ ret = platform_device_add_data(pdev, cell, sizeof(*cell));
+ if (ret)
+ goto fail_res;
for (r = 0; r < cell->num_resources; r++) {
res[r].name = cell->resources[r].name;
@@ -100,14 +133,22 @@ fail_alloc:
}
int mfd_add_devices(struct device *parent, int id,
- const struct mfd_cell *cells, int n_devs,
+ struct mfd_cell *cells, int n_devs,
struct resource *mem_base,
int irq_base)
{
int i;
int ret = 0;
+ atomic_t *cnts;
+
+ /* initialize reference counting for all cells */
+ cnts = kcalloc(sizeof(*cnts), n_devs, GFP_KERNEL);
+ if (!cnts)
+ return -ENOMEM;
for (i = 0; i < n_devs; i++) {
+ atomic_set(&cnts[i], 0);
+ cells[i].usage_count = &cnts[i];
ret = mfd_add_device(parent, id, cells + i, mem_base, irq_base);
if (ret)
break;
@@ -120,17 +161,89 @@ int mfd_add_devices(struct device *parent, int id,
}
EXPORT_SYMBOL(mfd_add_devices);
-static int mfd_remove_devices_fn(struct device *dev, void *unused)
+static int mfd_remove_devices_fn(struct device *dev, void *c)
{
- platform_device_unregister(to_platform_device(dev));
+ struct platform_device *pdev = to_platform_device(dev);
+ const struct mfd_cell *cell = mfd_get_cell(pdev);
+ atomic_t **usage_count = c;
+
+ /* find the base address of usage_count pointers (for freeing) */
+ if (!*usage_count || (cell->usage_count < *usage_count))
+ *usage_count = cell->usage_count;
+
+ platform_device_unregister(pdev);
return 0;
}
void mfd_remove_devices(struct device *parent)
{
- device_for_each_child(parent, NULL, mfd_remove_devices_fn);
+ atomic_t *cnts = NULL;
+
+ device_for_each_child(parent, &cnts, mfd_remove_devices_fn);
+ kfree(cnts);
}
EXPORT_SYMBOL(mfd_remove_devices);
+static int add_shared_platform_device(const char *cell, const char *name)
+{
+ struct mfd_cell cell_entry;
+ struct device *dev;
+ struct platform_device *pdev;
+ int err;
+
+ /* check if we've already registered a device (don't fail if we have) */
+ if (bus_find_device_by_name(&platform_bus_type, NULL, name))
+ return 0;
+
+ /* fetch the parent cell's device (should already be registered!) */
+ dev = bus_find_device_by_name(&platform_bus_type, NULL, cell);
+ if (!dev) {
+ printk(KERN_ERR "failed to find device for cell %s\n", cell);
+ return -ENODEV;
+ }
+ pdev = to_platform_device(dev);
+ memcpy(&cell_entry, mfd_get_cell(pdev), sizeof(cell_entry));
+
+ WARN_ON(!cell_entry.enable);
+
+ cell_entry.name = name;
+ err = mfd_add_device(pdev->dev.parent, -1, &cell_entry, NULL, 0);
+ if (err)
+ dev_err(dev, "MFD add devices failed: %d\n", err);
+ return err;
+}
+
+int mfd_shared_platform_driver_register(struct platform_driver *drv,
+ const char *cellname)
+{
+ int err;
+
+ err = add_shared_platform_device(cellname, drv->driver.name);
+ if (err)
+ printk(KERN_ERR "failed to add platform device %s\n",
+ drv->driver.name);
+
+ err = platform_driver_register(drv);
+ if (err)
+ printk(KERN_ERR "failed to add platform driver %s\n",
+ drv->driver.name);
+
+ return err;
+}
+EXPORT_SYMBOL(mfd_shared_platform_driver_register);
+
+void mfd_shared_platform_driver_unregister(struct platform_driver *drv)
+{
+ struct device *dev;
+
+ dev = bus_find_device_by_name(&platform_bus_type, NULL,
+ drv->driver.name);
+ if (dev)
+ platform_device_unregister(to_platform_device(dev));
+
+ platform_driver_unregister(drv);
+}
+EXPORT_SYMBOL(mfd_shared_platform_driver_unregister);
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov");
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 501ce13b693e..c1306ed43e3c 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -21,6 +21,7 @@
#include <linux/workqueue.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
+#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/mfd/pcf50633/core.h>
@@ -230,27 +231,26 @@ pcf50633_client_dev_register(struct pcf50633 *pcf, const char *name,
}
}
-#ifdef CONFIG_PM
-static int pcf50633_suspend(struct i2c_client *client, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int pcf50633_suspend(struct device *dev)
{
- struct pcf50633 *pcf;
- pcf = i2c_get_clientdata(client);
+ struct i2c_client *client = to_i2c_client(dev);
+ struct pcf50633 *pcf = i2c_get_clientdata(client);
return pcf50633_irq_suspend(pcf);
}
-static int pcf50633_resume(struct i2c_client *client)
+static int pcf50633_resume(struct device *dev)
{
- struct pcf50633 *pcf;
- pcf = i2c_get_clientdata(client);
+ struct i2c_client *client = to_i2c_client(dev);
+ struct pcf50633 *pcf = i2c_get_clientdata(client);
return pcf50633_irq_resume(pcf);
}
-#else
-#define pcf50633_suspend NULL
-#define pcf50633_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(pcf50633_pm, pcf50633_suspend, pcf50633_resume);
+
static int __devinit pcf50633_probe(struct i2c_client *client,
const struct i2c_device_id *ids)
{
@@ -360,16 +360,16 @@ static struct i2c_device_id pcf50633_id_table[] = {
{"pcf50633", 0x73},
{/* end of list */}
};
+MODULE_DEVICE_TABLE(i2c, pcf50633_id_table);
static struct i2c_driver pcf50633_driver = {
.driver = {
.name = "pcf50633",
+ .pm = &pcf50633_pm,
},
.id_table = pcf50633_id_table,
.probe = pcf50633_probe,
.remove = __devexit_p(pcf50633_remove),
- .suspend = pcf50633_suspend,
- .resume = pcf50633_resume,
};
static int __init pcf50633_init(void)
diff --git a/drivers/mfd/rdc321x-southbridge.c b/drivers/mfd/rdc321x-southbridge.c
index 50922975bda3..193c940225b5 100644
--- a/drivers/mfd/rdc321x-southbridge.c
+++ b/drivers/mfd/rdc321x-southbridge.c
@@ -61,12 +61,12 @@ static struct mfd_cell rdc321x_sb_cells[] = {
.name = "rdc321x-wdt",
.resources = rdc321x_wdt_resource,
.num_resources = ARRAY_SIZE(rdc321x_wdt_resource),
- .driver_data = &rdc321x_wdt_pdata,
+ .mfd_data = &rdc321x_wdt_pdata,
}, {
.name = "rdc321x-gpio",
.resources = rdc321x_gpio_resources,
.num_resources = ARRAY_SIZE(rdc321x_gpio_resources),
- .driver_data = &rdc321x_gpio_pdata,
+ .mfd_data = &rdc321x_gpio_pdata,
},
};
diff --git a/drivers/mfd/sh_mobile_sdhi.c b/drivers/mfd/sh_mobile_sdhi.c
index 0a7df44a93c0..53a63024bf11 100644
--- a/drivers/mfd/sh_mobile_sdhi.c
+++ b/drivers/mfd/sh_mobile_sdhi.c
@@ -146,9 +146,7 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
}
memcpy(&priv->cell_mmc, &sh_mobile_sdhi_cell, sizeof(priv->cell_mmc));
- priv->cell_mmc.driver_data = mmc_data;
- priv->cell_mmc.platform_data = &priv->cell_mmc;
- priv->cell_mmc.data_size = sizeof(priv->cell_mmc);
+ priv->cell_mmc.mfd_data = mmc_data;
platform_set_drvdata(pdev, priv);
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index 9caeb4ac6ea6..af57fc706a4c 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -170,7 +170,7 @@ static struct mfd_cell t7l66xb_cells[] = {
.name = "tmio-mmc",
.enable = t7l66xb_mmc_enable,
.disable = t7l66xb_mmc_disable,
- .driver_data = &t7166xb_mmc_data,
+ .mfd_data = &t7166xb_mmc_data,
.num_resources = ARRAY_SIZE(t7l66xb_mmc_resources),
.resources = t7l66xb_mmc_resources,
},
@@ -383,16 +383,7 @@ static int t7l66xb_probe(struct platform_device *dev)
t7l66xb_attach_irq(dev);
- t7l66xb_cells[T7L66XB_CELL_NAND].driver_data = pdata->nand_data;
- t7l66xb_cells[T7L66XB_CELL_NAND].platform_data =
- &t7l66xb_cells[T7L66XB_CELL_NAND];
- t7l66xb_cells[T7L66XB_CELL_NAND].data_size =
- sizeof(t7l66xb_cells[T7L66XB_CELL_NAND]);
-
- t7l66xb_cells[T7L66XB_CELL_MMC].platform_data =
- &t7l66xb_cells[T7L66XB_CELL_MMC];
- t7l66xb_cells[T7L66XB_CELL_MMC].data_size =
- sizeof(t7l66xb_cells[T7L66XB_CELL_MMC]);
+ t7l66xb_cells[T7L66XB_CELL_NAND].mfd_data = pdata->nand_data;
ret = mfd_add_devices(&dev->dev, dev->id,
t7l66xb_cells, ARRAY_SIZE(t7l66xb_cells),
diff --git a/drivers/mfd/tc6387xb.c b/drivers/mfd/tc6387xb.c
index 6315f63f017d..b006f7cee952 100644
--- a/drivers/mfd/tc6387xb.c
+++ b/drivers/mfd/tc6387xb.c
@@ -131,7 +131,7 @@ static struct mfd_cell tc6387xb_cells[] = {
.name = "tmio-mmc",
.enable = tc6387xb_mmc_enable,
.disable = tc6387xb_mmc_disable,
- .driver_data = &tc6387xb_mmc_data,
+ .mfd_data = &tc6387xb_mmc_data,
.num_resources = ARRAY_SIZE(tc6387xb_mmc_resources),
.resources = tc6387xb_mmc_resources,
},
@@ -190,11 +190,6 @@ static int __devinit tc6387xb_probe(struct platform_device *dev)
printk(KERN_INFO "Toshiba tc6387xb initialised\n");
- tc6387xb_cells[TC6387XB_CELL_MMC].platform_data =
- &tc6387xb_cells[TC6387XB_CELL_MMC];
- tc6387xb_cells[TC6387XB_CELL_MMC].data_size =
- sizeof(tc6387xb_cells[TC6387XB_CELL_MMC]);
-
ret = mfd_add_devices(&dev->dev, dev->id, tc6387xb_cells,
ARRAY_SIZE(tc6387xb_cells), iomem, irq);
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 9a238633a54d..3d62ded86a8f 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -393,7 +393,7 @@ static struct mfd_cell __devinitdata tc6393xb_cells[] = {
.name = "tmio-mmc",
.enable = tc6393xb_mmc_enable,
.resume = tc6393xb_mmc_resume,
- .driver_data = &tc6393xb_mmc_data,
+ .mfd_data = &tc6393xb_mmc_data,
.num_resources = ARRAY_SIZE(tc6393xb_mmc_resources),
.resources = tc6393xb_mmc_resources,
},
@@ -693,27 +693,8 @@ static int __devinit tc6393xb_probe(struct platform_device *dev)
goto err_setup;
}
- tc6393xb_cells[TC6393XB_CELL_NAND].driver_data = tcpd->nand_data;
- tc6393xb_cells[TC6393XB_CELL_NAND].platform_data =
- &tc6393xb_cells[TC6393XB_CELL_NAND];
- tc6393xb_cells[TC6393XB_CELL_NAND].data_size =
- sizeof(tc6393xb_cells[TC6393XB_CELL_NAND]);
-
- tc6393xb_cells[TC6393XB_CELL_MMC].platform_data =
- &tc6393xb_cells[TC6393XB_CELL_MMC];
- tc6393xb_cells[TC6393XB_CELL_MMC].data_size =
- sizeof(tc6393xb_cells[TC6393XB_CELL_MMC]);
-
- tc6393xb_cells[TC6393XB_CELL_OHCI].platform_data =
- &tc6393xb_cells[TC6393XB_CELL_OHCI];
- tc6393xb_cells[TC6393XB_CELL_OHCI].data_size =
- sizeof(tc6393xb_cells[TC6393XB_CELL_OHCI]);
-
- tc6393xb_cells[TC6393XB_CELL_FB].driver_data = tcpd->fb_data;
- tc6393xb_cells[TC6393XB_CELL_FB].platform_data =
- &tc6393xb_cells[TC6393XB_CELL_FB];
- tc6393xb_cells[TC6393XB_CELL_FB].data_size =
- sizeof(tc6393xb_cells[TC6393XB_CELL_FB]);
+ tc6393xb_cells[TC6393XB_CELL_NAND].mfd_data = tcpd->nand_data;
+ tc6393xb_cells[TC6393XB_CELL_FB].mfd_data = tcpd->fb_data;
ret = mfd_add_devices(&dev->dev, dev->id,
tc6393xb_cells, ARRAY_SIZE(tc6393xb_cells),
diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c
index 6ad8a7f8d390..94c6c8afad12 100644
--- a/drivers/mfd/timberdale.c
+++ b/drivers/mfd/timberdale.c
@@ -384,8 +384,7 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg0[] = {
.name = "timb-dma",
.num_resources = ARRAY_SIZE(timberdale_dma_resources),
.resources = timberdale_dma_resources,
- .platform_data = &timb_dma_platform_data,
- .data_size = sizeof(timb_dma_platform_data),
+ .mfd_data = &timb_dma_platform_data,
},
{
.name = "timb-uart",
@@ -396,43 +395,37 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg0[] = {
.name = "xiic-i2c",
.num_resources = ARRAY_SIZE(timberdale_xiic_resources),
.resources = timberdale_xiic_resources,
- .platform_data = &timberdale_xiic_platform_data,
- .data_size = sizeof(timberdale_xiic_platform_data),
+ .mfd_data = &timberdale_xiic_platform_data,
},
{
.name = "timb-gpio",
.num_resources = ARRAY_SIZE(timberdale_gpio_resources),
.resources = timberdale_gpio_resources,
- .platform_data = &timberdale_gpio_platform_data,
- .data_size = sizeof(timberdale_gpio_platform_data),
+ .mfd_data = &timberdale_gpio_platform_data,
},
{
.name = "timb-video",
.num_resources = ARRAY_SIZE(timberdale_video_resources),
.resources = timberdale_video_resources,
- .platform_data = &timberdale_video_platform_data,
- .data_size = sizeof(timberdale_video_platform_data),
+ .mfd_data = &timberdale_video_platform_data,
},
{
.name = "timb-radio",
.num_resources = ARRAY_SIZE(timberdale_radio_resources),
.resources = timberdale_radio_resources,
- .platform_data = &timberdale_radio_platform_data,
- .data_size = sizeof(timberdale_radio_platform_data),
+ .mfd_data = &timberdale_radio_platform_data,
},
{
.name = "xilinx_spi",
.num_resources = ARRAY_SIZE(timberdale_spi_resources),
.resources = timberdale_spi_resources,
- .platform_data = &timberdale_xspi_platform_data,
- .data_size = sizeof(timberdale_xspi_platform_data),
+ .mfd_data = &timberdale_xspi_platform_data,
},
{
.name = "ks8842",
.num_resources = ARRAY_SIZE(timberdale_eth_resources),
.resources = timberdale_eth_resources,
- .platform_data = &timberdale_ks8842_platform_data,
- .data_size = sizeof(timberdale_ks8842_platform_data)
+ .mfd_data = &timberdale_ks8842_platform_data,
},
};
@@ -441,8 +434,7 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
.name = "timb-dma",
.num_resources = ARRAY_SIZE(timberdale_dma_resources),
.resources = timberdale_dma_resources,
- .platform_data = &timb_dma_platform_data,
- .data_size = sizeof(timb_dma_platform_data),
+ .mfd_data = &timb_dma_platform_data,
},
{
.name = "timb-uart",
@@ -458,15 +450,13 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
.name = "xiic-i2c",
.num_resources = ARRAY_SIZE(timberdale_xiic_resources),
.resources = timberdale_xiic_resources,
- .platform_data = &timberdale_xiic_platform_data,
- .data_size = sizeof(timberdale_xiic_platform_data),
+ .mfd_data = &timberdale_xiic_platform_data,
},
{
.name = "timb-gpio",
.num_resources = ARRAY_SIZE(timberdale_gpio_resources),
.resources = timberdale_gpio_resources,
- .platform_data = &timberdale_gpio_platform_data,
- .data_size = sizeof(timberdale_gpio_platform_data),
+ .mfd_data = &timberdale_gpio_platform_data,
},
{
.name = "timb-mlogicore",
@@ -477,29 +467,25 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
.name = "timb-video",
.num_resources = ARRAY_SIZE(timberdale_video_resources),
.resources = timberdale_video_resources,
- .platform_data = &timberdale_video_platform_data,
- .data_size = sizeof(timberdale_video_platform_data),
+ .mfd_data = &timberdale_video_platform_data,
},
{
.name = "timb-radio",
.num_resources = ARRAY_SIZE(timberdale_radio_resources),
.resources = timberdale_radio_resources,
- .platform_data = &timberdale_radio_platform_data,
- .data_size = sizeof(timberdale_radio_platform_data),
+ .mfd_data = &timberdale_radio_platform_data,
},
{
.name = "xilinx_spi",
.num_resources = ARRAY_SIZE(timberdale_spi_resources),
.resources = timberdale_spi_resources,
- .platform_data = &timberdale_xspi_platform_data,
- .data_size = sizeof(timberdale_xspi_platform_data),
+ .mfd_data = &timberdale_xspi_platform_data,
},
{
.name = "ks8842",
.num_resources = ARRAY_SIZE(timberdale_eth_resources),
.resources = timberdale_eth_resources,
- .platform_data = &timberdale_ks8842_platform_data,
- .data_size = sizeof(timberdale_ks8842_platform_data)
+ .mfd_data = &timberdale_ks8842_platform_data,
},
};
@@ -508,8 +494,7 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = {
.name = "timb-dma",
.num_resources = ARRAY_SIZE(timberdale_dma_resources),
.resources = timberdale_dma_resources,
- .platform_data = &timb_dma_platform_data,
- .data_size = sizeof(timb_dma_platform_data),
+ .mfd_data = &timb_dma_platform_data,
},
{
.name = "timb-uart",
@@ -520,36 +505,31 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = {
.name = "xiic-i2c",
.num_resources = ARRAY_SIZE(timberdale_xiic_resources),
.resources = timberdale_xiic_resources,
- .platform_data = &timberdale_xiic_platform_data,
- .data_size = sizeof(timberdale_xiic_platform_data),
+ .mfd_data = &timberdale_xiic_platform_data,
},
{
.name = "timb-gpio",
.num_resources = ARRAY_SIZE(timberdale_gpio_resources),
.resources = timberdale_gpio_resources,
- .platform_data = &timberdale_gpio_platform_data,
- .data_size = sizeof(timberdale_gpio_platform_data),
+ .mfd_data = &timberdale_gpio_platform_data,
},
{
.name = "timb-video",
.num_resources = ARRAY_SIZE(timberdale_video_resources),
.resources = timberdale_video_resources,
- .platform_data = &timberdale_video_platform_data,
- .data_size = sizeof(timberdale_video_platform_data),
+ .mfd_data = &timberdale_video_platform_data,
},
{
.name = "timb-radio",
.num_resources = ARRAY_SIZE(timberdale_radio_resources),
.resources = timberdale_radio_resources,
- .platform_data = &timberdale_radio_platform_data,
- .data_size = sizeof(timberdale_radio_platform_data),
+ .mfd_data = &timberdale_radio_platform_data,
},
{
.name = "xilinx_spi",
.num_resources = ARRAY_SIZE(timberdale_spi_resources),
.resources = timberdale_spi_resources,
- .platform_data = &timberdale_xspi_platform_data,
- .data_size = sizeof(timberdale_xspi_platform_data),
+ .mfd_data = &timberdale_xspi_platform_data,
},
};
@@ -558,8 +538,7 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = {
.name = "timb-dma",
.num_resources = ARRAY_SIZE(timberdale_dma_resources),
.resources = timberdale_dma_resources,
- .platform_data = &timb_dma_platform_data,
- .data_size = sizeof(timb_dma_platform_data),
+ .mfd_data = &timb_dma_platform_data,
},
{
.name = "timb-uart",
@@ -570,43 +549,37 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = {
.name = "ocores-i2c",
.num_resources = ARRAY_SIZE(timberdale_ocores_resources),
.resources = timberdale_ocores_resources,
- .platform_data = &timberdale_ocores_platform_data,
- .data_size = sizeof(timberdale_ocores_platform_data),
+ .mfd_data = &timberdale_ocores_platform_data,
},
{
.name = "timb-gpio",
.num_resources = ARRAY_SIZE(timberdale_gpio_resources),
.resources = timberdale_gpio_resources,
- .platform_data = &timberdale_gpio_platform_data,
- .data_size = sizeof(timberdale_gpio_platform_data),
+ .mfd_data = &timberdale_gpio_platform_data,
},
{
.name = "timb-video",
.num_resources = ARRAY_SIZE(timberdale_video_resources),
.resources = timberdale_video_resources,
- .platform_data = &timberdale_video_platform_data,
- .data_size = sizeof(timberdale_video_platform_data),
+ .mfd_data = &timberdale_video_platform_data,
},
{
.name = "timb-radio",
.num_resources = ARRAY_SIZE(timberdale_radio_resources),
.resources = timberdale_radio_resources,
- .platform_data = &timberdale_radio_platform_data,
- .data_size = sizeof(timberdale_radio_platform_data),
+ .mfd_data = &timberdale_radio_platform_data,
},
{
.name = "xilinx_spi",
.num_resources = ARRAY_SIZE(timberdale_spi_resources),
.resources = timberdale_spi_resources,
- .platform_data = &timberdale_xspi_platform_data,
- .data_size = sizeof(timberdale_xspi_platform_data),
+ .mfd_data = &timberdale_xspi_platform_data,
},
{
.name = "ks8842",
.num_resources = ARRAY_SIZE(timberdale_eth_resources),
.resources = timberdale_eth_resources,
- .platform_data = &timberdale_ks8842_platform_data,
- .data_size = sizeof(timberdale_ks8842_platform_data)
+ .mfd_data = &timberdale_ks8842_platform_data,
},
};
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index 627cf577b16d..0aa9186aec19 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -150,12 +150,12 @@ static inline int __tps6586x_write(struct i2c_client *client,
static inline int __tps6586x_writes(struct i2c_client *client, int reg,
int len, uint8_t *val)
{
- int ret;
+ int ret, i;
- ret = i2c_smbus_write_i2c_block_data(client, reg, len, val);
- if (ret < 0) {
- dev_err(&client->dev, "failed writings to 0x%02x\n", reg);
- return ret;
+ for (i = 0; i < len; i++) {
+ ret = __tps6586x_write(client, reg + i, *(val + i));
+ if (ret < 0)
+ return ret;
}
return 0;
@@ -288,12 +288,10 @@ static int tps6586x_gpio_output(struct gpio_chip *gc, unsigned offset,
return tps6586x_update(tps6586x->dev, TPS6586X_GPIOSET1, val, mask);
}
-static void tps6586x_gpio_init(struct tps6586x *tps6586x, int gpio_base)
+static int tps6586x_gpio_init(struct tps6586x *tps6586x, int gpio_base)
{
- int ret;
-
if (!gpio_base)
- return;
+ return 0;
tps6586x->gpio.owner = THIS_MODULE;
tps6586x->gpio.label = tps6586x->client->name;
@@ -307,9 +305,7 @@ static void tps6586x_gpio_init(struct tps6586x *tps6586x, int gpio_base)
tps6586x->gpio.set = tps6586x_gpio_set;
tps6586x->gpio.get = tps6586x_gpio_get;
- ret = gpiochip_add(&tps6586x->gpio);
- if (ret)
- dev_warn(tps6586x->dev, "GPIO registration failed: %d\n", ret);
+ return gpiochip_add(&tps6586x->gpio);
}
static int __remove_subdev(struct device *dev, void *unused)
@@ -517,17 +513,28 @@ static int __devinit tps6586x_i2c_probe(struct i2c_client *client,
}
}
+ ret = tps6586x_gpio_init(tps6586x, pdata->gpio_base);
+ if (ret) {
+ dev_err(&client->dev, "GPIO registration failed: %d\n", ret);
+ goto err_gpio_init;
+ }
+
ret = tps6586x_add_subdevs(tps6586x, pdata);
if (ret) {
dev_err(&client->dev, "add devices failed: %d\n", ret);
goto err_add_devs;
}
- tps6586x_gpio_init(tps6586x, pdata->gpio_base);
-
return 0;
err_add_devs:
+ if (pdata->gpio_base) {
+ ret = gpiochip_remove(&tps6586x->gpio);
+ if (ret)
+ dev_err(&client->dev, "Can't remove gpio chip: %d\n",
+ ret);
+ }
+err_gpio_init:
if (client->irq)
free_irq(client->irq, tps6586x);
err_irq_init:
@@ -587,4 +594,3 @@ module_exit(tps6586x_exit);
MODULE_DESCRIPTION("TPS6586X core driver");
MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>");
MODULE_LICENSE("GPL");
-
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index a35fa7dcbf53..960b5bed7f52 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -721,13 +721,13 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
}
- if (twl_has_watchdog()) {
+ if (twl_has_watchdog() && twl_class_is_4030()) {
child = add_child(0, "twl4030_wdt", NULL, 0, false, 0, 0);
if (IS_ERR(child))
return PTR_ERR(child);
}
- if (twl_has_pwrbutton()) {
+ if (twl_has_pwrbutton() && twl_class_is_4030()) {
child = add_child(1, "twl4030_pwrbutton",
NULL, 0, true, pdata->irq_base + 8 + 0, 0);
if (IS_ERR(child))
@@ -864,6 +864,10 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
child = add_regulator(TWL6030_REG_VAUX3_6030, pdata->vaux3);
if (IS_ERR(child))
return PTR_ERR(child);
+
+ child = add_regulator(TWL6030_REG_CLK32KG, pdata->clk32kg);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
}
if (twl_has_bci() && pdata->bci &&
diff --git a/drivers/mfd/twl4030-codec.c b/drivers/mfd/twl4030-codec.c
index 9a4b196d6deb..c02fded316c9 100644
--- a/drivers/mfd/twl4030-codec.c
+++ b/drivers/mfd/twl4030-codec.c
@@ -208,15 +208,13 @@ static int __devinit twl4030_codec_probe(struct platform_device *pdev)
if (pdata->audio) {
cell = &codec->cells[childs];
cell->name = "twl4030-codec";
- cell->platform_data = pdata->audio;
- cell->data_size = sizeof(*pdata->audio);
+ cell->mfd_data = pdata->audio;
childs++;
}
if (pdata->vibra) {
cell = &codec->cells[childs];
cell->name = "twl4030-vibra";
- cell->platform_data = pdata->vibra;
- cell->data_size = sizeof(*pdata->vibra);
+ cell->mfd_data = pdata->vibra;
childs++;
}
diff --git a/drivers/mfd/twl4030-madc.c b/drivers/mfd/twl4030-madc.c
new file mode 100644
index 000000000000..e9884e2583bf
--- /dev/null
+++ b/drivers/mfd/twl4030-madc.c
@@ -0,0 +1,802 @@
+/*
+ *
+ * TWL4030 MADC module driver-This driver monitors the real time
+ * conversion of analog signals like battery temperature,
+ * battery type, battery level etc.
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * J Keerthy <j-keerthy@ti.com>
+ *
+ * Based on twl4030-madc.c
+ * Copyright (C) 2008 Nokia Corporation
+ * Mikko Ylinen <mikko.k.ylinen@nokia.com>
+ *
+ * Amit Kucheria <amit.kucheria@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/i2c/twl.h>
+#include <linux/i2c/twl4030-madc.h>
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+#include <linux/jiffies.h>
+#include <linux/types.h>
+#include <linux/gfp.h>
+#include <linux/err.h>
+
+/*
+ * struct twl4030_madc_data - a container for madc info
+ * @dev - pointer to device structure for madc
+ * @lock - mutex protecting this data structure
+ * @requests - Array of request struct corresponding to SW1, SW2 and RT
+ * @imr - Interrupt mask register of MADC
+ * @isr - Interrupt status register of MADC
+ */
+struct twl4030_madc_data {
+ struct device *dev;
+ struct mutex lock; /* mutex protecting this data structure */
+ struct twl4030_madc_request requests[TWL4030_MADC_NUM_METHODS];
+ int imr;
+ int isr;
+};
+
+static struct twl4030_madc_data *twl4030_madc;
+
+struct twl4030_prescale_divider_ratios {
+ s16 numerator;
+ s16 denominator;
+};
+
+static const struct twl4030_prescale_divider_ratios
+twl4030_divider_ratios[16] = {
+ {1, 1}, /* CHANNEL 0 No Prescaler */
+ {1, 1}, /* CHANNEL 1 No Prescaler */
+ {6, 10}, /* CHANNEL 2 */
+ {6, 10}, /* CHANNEL 3 */
+ {6, 10}, /* CHANNEL 4 */
+ {6, 10}, /* CHANNEL 5 */
+ {6, 10}, /* CHANNEL 6 */
+ {6, 10}, /* CHANNEL 7 */
+ {3, 14}, /* CHANNEL 8 */
+ {1, 3}, /* CHANNEL 9 */
+ {1, 1}, /* CHANNEL 10 No Prescaler */
+ {15, 100}, /* CHANNEL 11 */
+ {1, 4}, /* CHANNEL 12 */
+ {1, 1}, /* CHANNEL 13 Reserved channels */
+ {1, 1}, /* CHANNEL 14 Reseved channels */
+ {5, 11}, /* CHANNEL 15 */
+};
+
+
+/*
+ * Conversion table from -3 to 55 degree Celcius
+ */
+static int therm_tbl[] = {
+30800, 29500, 28300, 27100,
+26000, 24900, 23900, 22900, 22000, 21100, 20300, 19400, 18700, 17900,
+17200, 16500, 15900, 15300, 14700, 14100, 13600, 13100, 12600, 12100,
+11600, 11200, 10800, 10400, 10000, 9630, 9280, 8950, 8620, 8310,
+8020, 7730, 7460, 7200, 6950, 6710, 6470, 6250, 6040, 5830,
+5640, 5450, 5260, 5090, 4920, 4760, 4600, 4450, 4310, 4170,
+4040, 3910, 3790, 3670, 3550
+};
+
+/*
+ * Structure containing the registers
+ * of different conversion methods supported by MADC.
+ * Hardware or RT real time conversion request initiated by external host
+ * processor for RT Signal conversions.
+ * External host processors can also request for non RT conversions
+ * SW1 and SW2 software conversions also called asynchronous or GPC request.
+ */
+static
+const struct twl4030_madc_conversion_method twl4030_conversion_methods[] = {
+ [TWL4030_MADC_RT] = {
+ .sel = TWL4030_MADC_RTSELECT_LSB,
+ .avg = TWL4030_MADC_RTAVERAGE_LSB,
+ .rbase = TWL4030_MADC_RTCH0_LSB,
+ },
+ [TWL4030_MADC_SW1] = {
+ .sel = TWL4030_MADC_SW1SELECT_LSB,
+ .avg = TWL4030_MADC_SW1AVERAGE_LSB,
+ .rbase = TWL4030_MADC_GPCH0_LSB,
+ .ctrl = TWL4030_MADC_CTRL_SW1,
+ },
+ [TWL4030_MADC_SW2] = {
+ .sel = TWL4030_MADC_SW2SELECT_LSB,
+ .avg = TWL4030_MADC_SW2AVERAGE_LSB,
+ .rbase = TWL4030_MADC_GPCH0_LSB,
+ .ctrl = TWL4030_MADC_CTRL_SW2,
+ },
+};
+
+/*
+ * Function to read a particular channel value.
+ * @madc - pointer to struct twl4030_madc_data
+ * @reg - lsb of ADC Channel
+ * If the i2c read fails it returns an error else returns 0.
+ */
+static int twl4030_madc_channel_raw_read(struct twl4030_madc_data *madc, u8 reg)
+{
+ u8 msb, lsb;
+ int ret;
+ /*
+ * For each ADC channel, we have MSB and LSB register pair. MSB address
+ * is always LSB address+1. reg parameter is the address of LSB register
+ */
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &msb, reg + 1);
+ if (ret) {
+ dev_err(madc->dev, "unable to read MSB register 0x%X\n",
+ reg + 1);
+ return ret;
+ }
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &lsb, reg);
+ if (ret) {
+ dev_err(madc->dev, "unable to read LSB register 0x%X\n", reg);
+ return ret;
+ }
+
+ return (int)(((msb << 8) | lsb) >> 6);
+}
+
+/*
+ * Return battery temperature
+ * Or < 0 on failure.
+ */
+static int twl4030battery_temperature(int raw_volt)
+{
+ u8 val;
+ int temp, curr, volt, res, ret;
+
+ volt = (raw_volt * TEMP_STEP_SIZE) / TEMP_PSR_R;
+ /* Getting and calculating the supply current in micro ampers */
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE, &val,
+ REG_BCICTL2);
+ if (ret < 0)
+ return ret;
+ curr = ((val & TWL4030_BCI_ITHEN) + 1) * 10;
+ /* Getting and calculating the thermistor resistance in ohms */
+ res = volt * 1000 / curr;
+ /* calculating temperature */
+ for (temp = 58; temp >= 0; temp--) {
+ int actual = therm_tbl[temp];
+
+ if ((actual - res) >= 0)
+ break;
+ }
+
+ return temp + 1;
+}
+
+static int twl4030battery_current(int raw_volt)
+{
+ int ret;
+ u8 val;
+
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE, &val,
+ TWL4030_BCI_BCICTL1);
+ if (ret)
+ return ret;
+ if (val & TWL4030_BCI_CGAIN) /* slope of 0.44 mV/mA */
+ return (raw_volt * CURR_STEP_SIZE) / CURR_PSR_R1;
+ else /* slope of 0.88 mV/mA */
+ return (raw_volt * CURR_STEP_SIZE) / CURR_PSR_R2;
+}
+/*
+ * Function to read channel values
+ * @madc - pointer to twl4030_madc_data struct
+ * @reg_base - Base address of the first channel
+ * @Channels - 16 bit bitmap. If the bit is set, channel value is read
+ * @buf - The channel values are stored here. if read fails error
+ * value is stored
+ * Returns the number of successfully read channels.
+ */
+static int twl4030_madc_read_channels(struct twl4030_madc_data *madc,
+ u8 reg_base, unsigned
+ long channels, int *buf)
+{
+ int count = 0, count_req = 0, i;
+ u8 reg;
+
+ for_each_set_bit(i, &channels, TWL4030_MADC_MAX_CHANNELS) {
+ reg = reg_base + 2 * i;
+ buf[i] = twl4030_madc_channel_raw_read(madc, reg);
+ if (buf[i] < 0) {
+ dev_err(madc->dev,
+ "Unable to read register 0x%X\n", reg);
+ count_req++;
+ continue;
+ }
+ switch (i) {
+ case 10:
+ buf[i] = twl4030battery_current(buf[i]);
+ if (buf[i] < 0) {
+ dev_err(madc->dev, "err reading current\n");
+ count_req++;
+ } else {
+ count++;
+ buf[i] = buf[i] - 750;
+ }
+ break;
+ case 1:
+ buf[i] = twl4030battery_temperature(buf[i]);
+ if (buf[i] < 0) {
+ dev_err(madc->dev, "err reading temperature\n");
+ count_req++;
+ } else {
+ buf[i] -= 3;
+ count++;
+ }
+ break;
+ default:
+ count++;
+ /* Analog Input (V) = conv_result * step_size / R
+ * conv_result = decimal value of 10-bit conversion
+ * result
+ * step size = 1.5 / (2 ^ 10 -1)
+ * R = Prescaler ratio for input channels.
+ * Result given in mV hence multiplied by 1000.
+ */
+ buf[i] = (buf[i] * 3 * 1000 *
+ twl4030_divider_ratios[i].denominator)
+ / (2 * 1023 *
+ twl4030_divider_ratios[i].numerator);
+ }
+ }
+ if (count_req)
+ dev_err(madc->dev, "%d channel conversion failed\n", count_req);
+
+ return count;
+}
+
+/*
+ * Enables irq.
+ * @madc - pointer to twl4030_madc_data struct
+ * @id - irq number to be enabled
+ * can take one of TWL4030_MADC_RT, TWL4030_MADC_SW1, TWL4030_MADC_SW2
+ * corresponding to RT, SW1, SW2 conversion requests.
+ * If the i2c read fails it returns an error else returns 0.
+ */
+static int twl4030_madc_enable_irq(struct twl4030_madc_data *madc, u8 id)
+{
+ u8 val;
+ int ret;
+
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &val, madc->imr);
+ if (ret) {
+ dev_err(madc->dev, "unable to read imr register 0x%X\n",
+ madc->imr);
+ return ret;
+ }
+ val &= ~(1 << id);
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, val, madc->imr);
+ if (ret) {
+ dev_err(madc->dev,
+ "unable to write imr register 0x%X\n", madc->imr);
+ return ret;
+
+ }
+
+ return 0;
+}
+
+/*
+ * Disables irq.
+ * @madc - pointer to twl4030_madc_data struct
+ * @id - irq number to be disabled
+ * can take one of TWL4030_MADC_RT, TWL4030_MADC_SW1, TWL4030_MADC_SW2
+ * corresponding to RT, SW1, SW2 conversion requests.
+ * Returns error if i2c read/write fails.
+ */
+static int twl4030_madc_disable_irq(struct twl4030_madc_data *madc, u8 id)
+{
+ u8 val;
+ int ret;
+
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &val, madc->imr);
+ if (ret) {
+ dev_err(madc->dev, "unable to read imr register 0x%X\n",
+ madc->imr);
+ return ret;
+ }
+ val |= (1 << id);
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, val, madc->imr);
+ if (ret) {
+ dev_err(madc->dev,
+ "unable to write imr register 0x%X\n", madc->imr);
+ return ret;
+ }
+
+ return 0;
+}
+
+static irqreturn_t twl4030_madc_threaded_irq_handler(int irq, void *_madc)
+{
+ struct twl4030_madc_data *madc = _madc;
+ const struct twl4030_madc_conversion_method *method;
+ u8 isr_val, imr_val;
+ int i, len, ret;
+ struct twl4030_madc_request *r;
+
+ mutex_lock(&madc->lock);
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &isr_val, madc->isr);
+ if (ret) {
+ dev_err(madc->dev, "unable to read isr register 0x%X\n",
+ madc->isr);
+ goto err_i2c;
+ }
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &imr_val, madc->imr);
+ if (ret) {
+ dev_err(madc->dev, "unable to read imr register 0x%X\n",
+ madc->imr);
+ goto err_i2c;
+ }
+ isr_val &= ~imr_val;
+ for (i = 0; i < TWL4030_MADC_NUM_METHODS; i++) {
+ if (!(isr_val & (1 << i)))
+ continue;
+ ret = twl4030_madc_disable_irq(madc, i);
+ if (ret < 0)
+ dev_dbg(madc->dev, "Disable interrupt failed%d\n", i);
+ madc->requests[i].result_pending = 1;
+ }
+ for (i = 0; i < TWL4030_MADC_NUM_METHODS; i++) {
+ r = &madc->requests[i];
+ /* No pending results for this method, move to next one */
+ if (!r->result_pending)
+ continue;
+ method = &twl4030_conversion_methods[r->method];
+ /* Read results */
+ len = twl4030_madc_read_channels(madc, method->rbase,
+ r->channels, r->rbuf);
+ /* Return results to caller */
+ if (r->func_cb != NULL) {
+ r->func_cb(len, r->channels, r->rbuf);
+ r->func_cb = NULL;
+ }
+ /* Free request */
+ r->result_pending = 0;
+ r->active = 0;
+ }
+ mutex_unlock(&madc->lock);
+
+ return IRQ_HANDLED;
+
+err_i2c:
+ /*
+ * In case of error check whichever request is active
+ * and service the same.
+ */
+ for (i = 0; i < TWL4030_MADC_NUM_METHODS; i++) {
+ r = &madc->requests[i];
+ if (r->active == 0)
+ continue;
+ method = &twl4030_conversion_methods[r->method];
+ /* Read results */
+ len = twl4030_madc_read_channels(madc, method->rbase,
+ r->channels, r->rbuf);
+ /* Return results to caller */
+ if (r->func_cb != NULL) {
+ r->func_cb(len, r->channels, r->rbuf);
+ r->func_cb = NULL;
+ }
+ /* Free request */
+ r->result_pending = 0;
+ r->active = 0;
+ }
+ mutex_unlock(&madc->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int twl4030_madc_set_irq(struct twl4030_madc_data *madc,
+ struct twl4030_madc_request *req)
+{
+ struct twl4030_madc_request *p;
+ int ret;
+
+ p = &madc->requests[req->method];
+ memcpy(p, req, sizeof(*req));
+ ret = twl4030_madc_enable_irq(madc, req->method);
+ if (ret < 0) {
+ dev_err(madc->dev, "enable irq failed!!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Function which enables the madc conversion
+ * by writing to the control register.
+ * @madc - pointer to twl4030_madc_data struct
+ * @conv_method - can be TWL4030_MADC_RT, TWL4030_MADC_SW2, TWL4030_MADC_SW1
+ * corresponding to RT SW1 or SW2 conversion methods.
+ * Returns 0 if succeeds else a negative error value
+ */
+static int twl4030_madc_start_conversion(struct twl4030_madc_data *madc,
+ int conv_method)
+{
+ const struct twl4030_madc_conversion_method *method;
+ int ret = 0;
+ method = &twl4030_conversion_methods[conv_method];
+ switch (conv_method) {
+ case TWL4030_MADC_SW1:
+ case TWL4030_MADC_SW2:
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MADC,
+ TWL4030_MADC_SW_START, method->ctrl);
+ if (ret) {
+ dev_err(madc->dev,
+ "unable to write ctrl register 0x%X\n",
+ method->ctrl);
+ return ret;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * Function that waits for conversion to be ready
+ * @madc - pointer to twl4030_madc_data struct
+ * @timeout_ms - timeout value in milliseconds
+ * @status_reg - ctrl register
+ * returns 0 if succeeds else a negative error value
+ */
+static int twl4030_madc_wait_conversion_ready(struct twl4030_madc_data *madc,
+ unsigned int timeout_ms,
+ u8 status_reg)
+{
+ unsigned long timeout;
+ int ret;
+
+ timeout = jiffies + msecs_to_jiffies(timeout_ms);
+ do {
+ u8 reg;
+
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &reg, status_reg);
+ if (ret) {
+ dev_err(madc->dev,
+ "unable to read status register 0x%X\n",
+ status_reg);
+ return ret;
+ }
+ if (!(reg & TWL4030_MADC_BUSY) && (reg & TWL4030_MADC_EOC_SW))
+ return 0;
+ usleep_range(500, 2000);
+ } while (!time_after(jiffies, timeout));
+ dev_err(madc->dev, "conversion timeout!\n");
+
+ return -EAGAIN;
+}
+
+/*
+ * An exported function which can be called from other kernel drivers.
+ * @req twl4030_madc_request structure
+ * req->rbuf will be filled with read values of channels based on the
+ * channel index. If a particular channel reading fails there will
+ * be a negative error value in the corresponding array element.
+ * returns 0 if succeeds else error value
+ */
+int twl4030_madc_conversion(struct twl4030_madc_request *req)
+{
+ const struct twl4030_madc_conversion_method *method;
+ u8 ch_msb, ch_lsb;
+ int ret;
+
+ if (!req)
+ return -EINVAL;
+ mutex_lock(&twl4030_madc->lock);
+ if (req->method < TWL4030_MADC_RT || req->method > TWL4030_MADC_SW2) {
+ ret = -EINVAL;
+ goto out;
+ }
+ /* Do we have a conversion request ongoing */
+ if (twl4030_madc->requests[req->method].active) {
+ ret = -EBUSY;
+ goto out;
+ }
+ ch_msb = (req->channels >> 8) & 0xff;
+ ch_lsb = req->channels & 0xff;
+ method = &twl4030_conversion_methods[req->method];
+ /* Select channels to be converted */
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, ch_msb, method->sel + 1);
+ if (ret) {
+ dev_err(twl4030_madc->dev,
+ "unable to write sel register 0x%X\n", method->sel + 1);
+ return ret;
+ }
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, ch_lsb, method->sel);
+ if (ret) {
+ dev_err(twl4030_madc->dev,
+ "unable to write sel register 0x%X\n", method->sel + 1);
+ return ret;
+ }
+ /* Select averaging for all channels if do_avg is set */
+ if (req->do_avg) {
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MADC,
+ ch_msb, method->avg + 1);
+ if (ret) {
+ dev_err(twl4030_madc->dev,
+ "unable to write avg register 0x%X\n",
+ method->avg + 1);
+ return ret;
+ }
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MADC,
+ ch_lsb, method->avg);
+ if (ret) {
+ dev_err(twl4030_madc->dev,
+ "unable to write sel reg 0x%X\n",
+ method->sel + 1);
+ return ret;
+ }
+ }
+ if (req->type == TWL4030_MADC_IRQ_ONESHOT && req->func_cb != NULL) {
+ ret = twl4030_madc_set_irq(twl4030_madc, req);
+ if (ret < 0)
+ goto out;
+ ret = twl4030_madc_start_conversion(twl4030_madc, req->method);
+ if (ret < 0)
+ goto out;
+ twl4030_madc->requests[req->method].active = 1;
+ ret = 0;
+ goto out;
+ }
+ /* With RT method we should not be here anymore */
+ if (req->method == TWL4030_MADC_RT) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = twl4030_madc_start_conversion(twl4030_madc, req->method);
+ if (ret < 0)
+ goto out;
+ twl4030_madc->requests[req->method].active = 1;
+ /* Wait until conversion is ready (ctrl register returns EOC) */
+ ret = twl4030_madc_wait_conversion_ready(twl4030_madc, 5, method->ctrl);
+ if (ret) {
+ twl4030_madc->requests[req->method].active = 0;
+ goto out;
+ }
+ ret = twl4030_madc_read_channels(twl4030_madc, method->rbase,
+ req->channels, req->rbuf);
+ twl4030_madc->requests[req->method].active = 0;
+
+out:
+ mutex_unlock(&twl4030_madc->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(twl4030_madc_conversion);
+
+/*
+ * Return channel value
+ * Or < 0 on failure.
+ */
+int twl4030_get_madc_conversion(int channel_no)
+{
+ struct twl4030_madc_request req;
+ int temp = 0;
+ int ret;
+
+ req.channels = (1 << channel_no);
+ req.method = TWL4030_MADC_SW2;
+ req.active = 0;
+ req.func_cb = NULL;
+ ret = twl4030_madc_conversion(&req);
+ if (ret < 0)
+ return ret;
+ if (req.rbuf[channel_no] > 0)
+ temp = req.rbuf[channel_no];
+
+ return temp;
+}
+EXPORT_SYMBOL_GPL(twl4030_get_madc_conversion);
+
+/*
+ * Function to enable or disable bias current for
+ * main battery type reading or temperature sensing
+ * @madc - pointer to twl4030_madc_data struct
+ * @chan - can be one of the two values
+ * TWL4030_BCI_ITHEN - Enables bias current for main battery type reading
+ * TWL4030_BCI_TYPEN - Enables bias current for main battery temperature
+ * sensing
+ * @on - enable or disable chan.
+ */
+static int twl4030_madc_set_current_generator(struct twl4030_madc_data *madc,
+ int chan, int on)
+{
+ int ret;
+ u8 regval;
+
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE,
+ &regval, TWL4030_BCI_BCICTL1);
+ if (ret) {
+ dev_err(madc->dev, "unable to read BCICTL1 reg 0x%X",
+ TWL4030_BCI_BCICTL1);
+ return ret;
+ }
+ if (on)
+ regval |= chan ? TWL4030_BCI_ITHEN : TWL4030_BCI_TYPEN;
+ else
+ regval &= chan ? ~TWL4030_BCI_ITHEN : ~TWL4030_BCI_TYPEN;
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MAIN_CHARGE,
+ regval, TWL4030_BCI_BCICTL1);
+ if (ret) {
+ dev_err(madc->dev, "unable to write BCICTL1 reg 0x%X\n",
+ TWL4030_BCI_BCICTL1);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Function that sets MADC software power on bit to enable MADC
+ * @madc - pointer to twl4030_madc_data struct
+ * @on - Enable or disable MADC software powen on bit.
+ * returns error if i2c read/write fails else 0
+ */
+static int twl4030_madc_set_power(struct twl4030_madc_data *madc, int on)
+{
+ u8 regval;
+ int ret;
+
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE,
+ &regval, TWL4030_MADC_CTRL1);
+ if (ret) {
+ dev_err(madc->dev, "unable to read madc ctrl1 reg 0x%X\n",
+ TWL4030_MADC_CTRL1);
+ return ret;
+ }
+ if (on)
+ regval |= TWL4030_MADC_MADCON;
+ else
+ regval &= ~TWL4030_MADC_MADCON;
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, regval, TWL4030_MADC_CTRL1);
+ if (ret) {
+ dev_err(madc->dev, "unable to write madc ctrl1 reg 0x%X\n",
+ TWL4030_MADC_CTRL1);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Initialize MADC and request for threaded irq
+ */
+static int __devinit twl4030_madc_probe(struct platform_device *pdev)
+{
+ struct twl4030_madc_data *madc;
+ struct twl4030_madc_platform_data *pdata = pdev->dev.platform_data;
+ int ret;
+ u8 regval;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "platform_data not available\n");
+ return -EINVAL;
+ }
+ madc = kzalloc(sizeof(*madc), GFP_KERNEL);
+ if (!madc)
+ return -ENOMEM;
+
+ /*
+ * Phoenix provides 2 interrupt lines. The first one is connected to
+ * the OMAP. The other one can be connected to the other processor such
+ * as modem. Hence two separate ISR and IMR registers.
+ */
+ madc->imr = (pdata->irq_line == 1) ?
+ TWL4030_MADC_IMR1 : TWL4030_MADC_IMR2;
+ madc->isr = (pdata->irq_line == 1) ?
+ TWL4030_MADC_ISR1 : TWL4030_MADC_ISR2;
+ ret = twl4030_madc_set_power(madc, 1);
+ if (ret < 0)
+ goto err_power;
+ ret = twl4030_madc_set_current_generator(madc, 0, 1);
+ if (ret < 0)
+ goto err_current_generator;
+
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE,
+ &regval, TWL4030_BCI_BCICTL1);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to read reg BCI CTL1 0x%X\n",
+ TWL4030_BCI_BCICTL1);
+ goto err_i2c;
+ }
+ regval |= TWL4030_BCI_MESBAT;
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MAIN_CHARGE,
+ regval, TWL4030_BCI_BCICTL1);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to write reg BCI Ctl1 0x%X\n",
+ TWL4030_BCI_BCICTL1);
+ goto err_i2c;
+ }
+ platform_set_drvdata(pdev, madc);
+ mutex_init(&madc->lock);
+ ret = request_threaded_irq(platform_get_irq(pdev, 0), NULL,
+ twl4030_madc_threaded_irq_handler,
+ IRQF_TRIGGER_RISING, "twl4030_madc", madc);
+ if (ret) {
+ dev_dbg(&pdev->dev, "could not request irq\n");
+ goto err_irq;
+ }
+ twl4030_madc = madc;
+ return 0;
+err_irq:
+ platform_set_drvdata(pdev, NULL);
+err_i2c:
+ twl4030_madc_set_current_generator(madc, 0, 0);
+err_current_generator:
+ twl4030_madc_set_power(madc, 0);
+err_power:
+ kfree(madc);
+
+ return ret;
+}
+
+static int __devexit twl4030_madc_remove(struct platform_device *pdev)
+{
+ struct twl4030_madc_data *madc = platform_get_drvdata(pdev);
+
+ free_irq(platform_get_irq(pdev, 0), madc);
+ platform_set_drvdata(pdev, NULL);
+ twl4030_madc_set_current_generator(madc, 0, 0);
+ twl4030_madc_set_power(madc, 0);
+ kfree(madc);
+
+ return 0;
+}
+
+static struct platform_driver twl4030_madc_driver = {
+ .probe = twl4030_madc_probe,
+ .remove = __exit_p(twl4030_madc_remove),
+ .driver = {
+ .name = "twl4030_madc",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init twl4030_madc_init(void)
+{
+ return platform_driver_register(&twl4030_madc_driver);
+}
+
+module_init(twl4030_madc_init);
+
+static void __exit twl4030_madc_exit(void)
+{
+ platform_driver_unregister(&twl4030_madc_driver);
+}
+
+module_exit(twl4030_madc_exit);
+
+MODULE_DESCRIPTION("TWL4030 ADC driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("J Keerthy");
+MODULE_ALIAS("twl4030_madc");
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c
index 000cb414a78a..38ffbd50a0d2 100644
--- a/drivers/mfd/ucb1x00-ts.c
+++ b/drivers/mfd/ucb1x00-ts.c
@@ -60,6 +60,7 @@ static inline void ucb1x00_ts_evt_add(struct ucb1x00_ts *ts, u16 pressure, u16 x
input_report_abs(idev, ABS_X, x);
input_report_abs(idev, ABS_Y, y);
input_report_abs(idev, ABS_PRESSURE, pressure);
+ input_report_key(idev, BTN_TOUCH, 1);
input_sync(idev);
}
@@ -68,6 +69,7 @@ static inline void ucb1x00_ts_event_release(struct ucb1x00_ts *ts)
struct input_dev *idev = ts->idev;
input_report_abs(idev, ABS_PRESSURE, 0);
+ input_report_key(idev, BTN_TOUCH, 0);
input_sync(idev);
}
@@ -384,13 +386,20 @@ static int ucb1x00_ts_add(struct ucb1x00_dev *dev)
idev->open = ucb1x00_ts_open;
idev->close = ucb1x00_ts_close;
- __set_bit(EV_ABS, idev->evbit);
- __set_bit(ABS_X, idev->absbit);
- __set_bit(ABS_Y, idev->absbit);
- __set_bit(ABS_PRESSURE, idev->absbit);
+ idev->evbit[0] = BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY);
+ idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
input_set_drvdata(idev, ts);
+ ucb1x00_adc_enable(ts->ucb);
+ ts->x_res = ucb1x00_ts_read_xres(ts);
+ ts->y_res = ucb1x00_ts_read_yres(ts);
+ ucb1x00_adc_disable(ts->ucb);
+
+ input_set_abs_params(idev, ABS_X, 0, ts->x_res, 0, 0);
+ input_set_abs_params(idev, ABS_Y, 0, ts->y_res, 0, 0);
+ input_set_abs_params(idev, ABS_PRESSURE, 0, 0, 0, 0);
+
err = input_register_device(idev);
if (err)
goto fail;
diff --git a/drivers/mfd/vx855.c b/drivers/mfd/vx855.c
index 348052aa5dbf..d698703dbd46 100644
--- a/drivers/mfd/vx855.c
+++ b/drivers/mfd/vx855.c
@@ -122,6 +122,7 @@ static struct pci_device_id vx855_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX855) },
{ 0, }
};
+MODULE_DEVICE_TABLE(pci, vx855_pci_tbl);
static struct pci_driver vx855_pci_driver = {
.name = "vx855",
diff --git a/drivers/mfd/wl1273-core.c b/drivers/mfd/wl1273-core.c
index d2ecc2435736..529d65ba5353 100644
--- a/drivers/mfd/wl1273-core.c
+++ b/drivers/mfd/wl1273-core.c
@@ -38,7 +38,6 @@ static int wl1273_core_remove(struct i2c_client *client)
dev_dbg(&client->dev, "%s\n", __func__);
mfd_remove_devices(&client->dev);
- i2c_set_clientdata(client, NULL);
kfree(core);
return 0;
@@ -79,8 +78,7 @@ static int __devinit wl1273_core_probe(struct i2c_client *client,
cell = &core->cells[children];
cell->name = "wl1273_fm_radio";
- cell->platform_data = &core;
- cell->data_size = sizeof(core);
+ cell->mfd_data = &core;
children++;
if (pdata->children & WL1273_CODEC_CHILD) {
@@ -88,8 +86,7 @@ static int __devinit wl1273_core_probe(struct i2c_client *client,
dev_dbg(&client->dev, "%s: Have codec.\n", __func__);
cell->name = "wl1273-codec";
- cell->platform_data = &core;
- cell->data_size = sizeof(core);
+ cell->mfd_data = &core;
children++;
}
@@ -104,7 +101,6 @@ static int __devinit wl1273_core_probe(struct i2c_client *client,
return 0;
err:
- i2c_set_clientdata(client, NULL);
pdata->free_resources();
kfree(core);
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index f7192d438aab..a5cd17e18d09 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -26,15 +26,6 @@
#include <linux/delay.h>
-/*
- * Since generic IRQs don't currently support interrupt controllers on
- * interrupt driven buses we don't use genirq but instead provide an
- * interface that looks very much like the standard ones. This leads
- * to some bodges, including storing interrupt handler information in
- * the static irq_data table we use to look up the data for individual
- * interrupts, but hopefully won't last too long.
- */
-
struct wm831x_irq_data {
int primary;
int reg;
@@ -361,6 +352,10 @@ static void wm831x_irq_sync_unlock(struct irq_data *data)
/* If there's been a change in the mask write it back
* to the hardware. */
if (wm831x->irq_masks_cur[i] != wm831x->irq_masks_cache[i]) {
+ dev_dbg(wm831x->dev, "IRQ mask sync: %x = %x\n",
+ WM831X_INTERRUPT_STATUS_1_MASK + i,
+ wm831x->irq_masks_cur[i]);
+
wm831x->irq_masks_cache[i] = wm831x->irq_masks_cur[i];
wm831x_reg_write(wm831x,
WM831X_INTERRUPT_STATUS_1_MASK + i,
@@ -371,7 +366,7 @@ static void wm831x_irq_sync_unlock(struct irq_data *data)
mutex_unlock(&wm831x->irq_lock);
}
-static void wm831x_irq_unmask(struct irq_data *data)
+static void wm831x_irq_enable(struct irq_data *data)
{
struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x,
@@ -380,7 +375,7 @@ static void wm831x_irq_unmask(struct irq_data *data)
wm831x->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
}
-static void wm831x_irq_mask(struct irq_data *data)
+static void wm831x_irq_disable(struct irq_data *data)
{
struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x,
@@ -426,8 +421,8 @@ static struct irq_chip wm831x_irq_chip = {
.name = "wm831x",
.irq_bus_lock = wm831x_irq_lock,
.irq_bus_sync_unlock = wm831x_irq_sync_unlock,
- .irq_mask = wm831x_irq_mask,
- .irq_unmask = wm831x_irq_unmask,
+ .irq_disable = wm831x_irq_disable,
+ .irq_enable = wm831x_irq_enable,
.irq_set_type = wm831x_irq_set_type,
};
@@ -449,6 +444,18 @@ static irqreturn_t wm831x_irq_thread(int irq, void *data)
goto out;
}
+ /* The touch interrupts are visible in the primary register as
+ * an optimisation; open code this to avoid complicating the
+ * main handling loop and so we can also skip iterating the
+ * descriptors.
+ */
+ if (primary & WM831X_TCHPD_INT)
+ handle_nested_irq(wm831x->irq_base + WM831X_IRQ_TCHPD);
+ if (primary & WM831X_TCHDATA_INT)
+ handle_nested_irq(wm831x->irq_base + WM831X_IRQ_TCHDATA);
+ if (primary & (WM831X_TCHDATA_EINT | WM831X_TCHPD_EINT))
+ goto out;
+
for (i = 0; i < ARRAY_SIZE(wm831x_irqs); i++) {
int offset = wm831x_irqs[i].reg - 1;
@@ -481,6 +488,9 @@ static irqreturn_t wm831x_irq_thread(int irq, void *data)
}
out:
+ /* Touchscreen interrupts are handled specially in the driver */
+ status_regs[0] &= ~(WM831X_TCHDATA_EINT | WM831X_TCHPD_EINT);
+
for (i = 0; i < ARRAY_SIZE(status_regs); i++) {
if (status_regs[i])
wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_1 + i,
@@ -517,6 +527,14 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
return 0;
}
+ if (pdata->irq_cmos)
+ i = 0;
+ else
+ i = WM831X_IRQ_OD;
+
+ wm831x_set_bits(wm831x, WM831X_IRQ_CONFIG,
+ WM831X_IRQ_OD, i);
+
/* Try to flag /IRQ as a wake source; there are a number of
* unconditional wake sources in the PMIC so this isn't
* conditional but we don't actually care *too* much if it
diff --git a/drivers/mfd/wm831x-spi.c b/drivers/mfd/wm831x-spi.c
index 0a8f772be88c..eed8e4f7a5a1 100644
--- a/drivers/mfd/wm831x-spi.c
+++ b/drivers/mfd/wm831x-spi.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/pm.h>
#include <linux/spi/spi.h>
#include <linux/mfd/wm831x/core.h>
@@ -113,22 +114,27 @@ static int __devexit wm831x_spi_remove(struct spi_device *spi)
return 0;
}
-static int wm831x_spi_suspend(struct spi_device *spi, pm_message_t m)
+static int wm831x_spi_suspend(struct device *dev)
{
- struct wm831x *wm831x = dev_get_drvdata(&spi->dev);
+ struct wm831x *wm831x = dev_get_drvdata(dev);
return wm831x_device_suspend(wm831x);
}
+static const struct dev_pm_ops wm831x_spi_pm = {
+ .freeze = wm831x_spi_suspend,
+ .suspend = wm831x_spi_suspend,
+};
+
static struct spi_driver wm8310_spi_driver = {
.driver = {
.name = "wm8310",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .pm = &wm831x_spi_pm,
},
.probe = wm831x_spi_probe,
.remove = __devexit_p(wm831x_spi_remove),
- .suspend = wm831x_spi_suspend,
};
static struct spi_driver wm8311_spi_driver = {
@@ -136,10 +142,10 @@ static struct spi_driver wm8311_spi_driver = {
.name = "wm8311",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .pm = &wm831x_spi_pm,
},
.probe = wm831x_spi_probe,
.remove = __devexit_p(wm831x_spi_remove),
- .suspend = wm831x_spi_suspend,
};
static struct spi_driver wm8312_spi_driver = {
@@ -147,10 +153,10 @@ static struct spi_driver wm8312_spi_driver = {
.name = "wm8312",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .pm = &wm831x_spi_pm,
},
.probe = wm831x_spi_probe,
.remove = __devexit_p(wm831x_spi_remove),
- .suspend = wm831x_spi_suspend,
};
static struct spi_driver wm8320_spi_driver = {
@@ -158,10 +164,10 @@ static struct spi_driver wm8320_spi_driver = {
.name = "wm8320",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .pm = &wm831x_spi_pm,
},
.probe = wm831x_spi_probe,
.remove = __devexit_p(wm831x_spi_remove),
- .suspend = wm831x_spi_suspend,
};
static struct spi_driver wm8321_spi_driver = {
@@ -169,10 +175,10 @@ static struct spi_driver wm8321_spi_driver = {
.name = "wm8321",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .pm = &wm831x_spi_pm,
},
.probe = wm831x_spi_probe,
.remove = __devexit_p(wm831x_spi_remove),
- .suspend = wm831x_spi_suspend,
};
static struct spi_driver wm8325_spi_driver = {
@@ -180,10 +186,10 @@ static struct spi_driver wm8325_spi_driver = {
.name = "wm8325",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .pm = &wm831x_spi_pm,
},
.probe = wm831x_spi_probe,
.remove = __devexit_p(wm831x_spi_remove),
- .suspend = wm831x_spi_suspend,
};
static struct spi_driver wm8326_spi_driver = {
@@ -191,10 +197,10 @@ static struct spi_driver wm8326_spi_driver = {
.name = "wm8326",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .pm = &wm831x_spi_pm,
},
.probe = wm831x_spi_probe,
.remove = __devexit_p(wm831x_spi_remove),
- .suspend = wm831x_spi_suspend,
};
static int __init wm831x_spi_init(void)
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index 1bfef4846b07..3a6e78cb0384 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -245,7 +245,7 @@ static int wm8400_register_codec(struct wm8400 *wm8400)
{
struct mfd_cell cell = {
.name = "wm8400-codec",
- .driver_data = wm8400,
+ .mfd_data = wm8400,
};
return mfd_add_devices(wm8400->dev, -1, &cell, 1, NULL, 0);
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 41233c7fa581..e673bda21f5d 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -246,6 +246,16 @@ static int wm8994_suspend(struct device *dev)
struct wm8994 *wm8994 = dev_get_drvdata(dev);
int ret;
+ /* Don't actually go through with the suspend if the CODEC is
+ * still active (eg, for audio passthrough from CP. */
+ ret = wm8994_reg_read(wm8994, WM8994_POWER_MANAGEMENT_1);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read power status: %d\n", ret);
+ } else if (ret & WM8994_VMID_SEL_MASK) {
+ dev_dbg(dev, "CODEC still active, ignoring suspend\n");
+ return 0;
+ }
+
/* GPIO configuration state is saved here since we may be configuring
* the GPIO alternate functions even if we're not using the gpiolib
* driver for them.
@@ -261,6 +271,13 @@ static int wm8994_suspend(struct device *dev)
if (ret < 0)
dev_err(dev, "Failed to save LDO registers: %d\n", ret);
+ /* Explicitly put the device into reset in case regulators
+ * don't get disabled in order to ensure consistent restart.
+ */
+ wm8994_reg_write(wm8994, WM8994_SOFTWARE_RESET, 0x8994);
+
+ wm8994->suspended = true;
+
ret = regulator_bulk_disable(wm8994->num_supplies,
wm8994->supplies);
if (ret != 0) {
@@ -276,6 +293,10 @@ static int wm8994_resume(struct device *dev)
struct wm8994 *wm8994 = dev_get_drvdata(dev);
int ret;
+ /* We may have lied to the PM core about suspending */
+ if (!wm8994->suspended)
+ return 0;
+
ret = regulator_bulk_enable(wm8994->num_supplies,
wm8994->supplies);
if (ret != 0) {
@@ -298,6 +319,8 @@ static int wm8994_resume(struct device *dev)
if (ret < 0)
dev_err(dev, "Failed to restore GPIO registers: %d\n", ret);
+ wm8994->suspended = false;
+
return 0;
}
#endif
diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c
index 29e8faf9c01c..f5e439a37dc5 100644
--- a/drivers/mfd/wm8994-irq.c
+++ b/drivers/mfd/wm8994-irq.c
@@ -182,7 +182,7 @@ static void wm8994_irq_sync_unlock(struct irq_data *data)
mutex_unlock(&wm8994->irq_lock);
}
-static void wm8994_irq_unmask(struct irq_data *data)
+static void wm8994_irq_enable(struct irq_data *data)
{
struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data);
struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994,
@@ -191,7 +191,7 @@ static void wm8994_irq_unmask(struct irq_data *data)
wm8994->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
}
-static void wm8994_irq_mask(struct irq_data *data)
+static void wm8994_irq_disable(struct irq_data *data)
{
struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data);
struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994,
@@ -204,8 +204,8 @@ static struct irq_chip wm8994_irq_chip = {
.name = "wm8994",
.irq_bus_lock = wm8994_irq_lock,
.irq_bus_sync_unlock = wm8994_irq_sync_unlock,
- .irq_mask = wm8994_irq_mask,
- .irq_unmask = wm8994_irq_unmask,
+ .irq_disable = wm8994_irq_disable,
+ .irq_enable = wm8994_irq_enable,
};
/* The processing of the primary interrupt occurs in a thread so that
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index cc8e49db45fe..a8ad7eacf0aa 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -2,6 +2,14 @@
# Misc strange devices
#
+# This one has to live outside of the MISC_DEVICES conditional,
+# because it may be selected by drivers/platform/x86/hp_accel.
+config SENSORS_LIS3LV02D
+ tristate
+ depends on INPUT
+ select INPUT_POLLDEV
+ default n
+
menuconfig MISC_DEVICES
bool "Misc devices"
---help---
@@ -457,5 +465,6 @@ source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
source "drivers/misc/iwmc3200top/Kconfig"
source "drivers/misc/ti-st/Kconfig"
+source "drivers/misc/lis3lv02d/Kconfig"
endif # MISC_DEVICES
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 98009cc20cb9..804f421bc079 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -42,3 +42,4 @@ obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o
obj-$(CONFIG_PCH_PHUB) += pch_phub.o
obj-y += ti-st/
obj-$(CONFIG_AB8500_PWM) += ab8500-pwm.o
+obj-y += lis3lv02d/
diff --git a/drivers/misc/iwmc3200top/iwmc3200top.h b/drivers/misc/iwmc3200top/iwmc3200top.h
index 740ff0738ea8..620973ed8bf9 100644
--- a/drivers/misc/iwmc3200top/iwmc3200top.h
+++ b/drivers/misc/iwmc3200top/iwmc3200top.h
@@ -183,9 +183,7 @@ struct iwmct_priv {
u32 barker;
struct iwmct_dbg dbg;
- /* drivers work queue */
- struct workqueue_struct *wq;
- struct workqueue_struct *bus_rescan_wq;
+ /* drivers work items */
struct work_struct bus_rescan_worker;
struct work_struct isr_worker;
diff --git a/drivers/misc/iwmc3200top/main.c b/drivers/misc/iwmc3200top/main.c
index c73cef2c3c5e..727af07f1fbd 100644
--- a/drivers/misc/iwmc3200top/main.c
+++ b/drivers/misc/iwmc3200top/main.c
@@ -89,7 +89,7 @@ static void op_top_message(struct iwmct_priv *priv, struct top_msg *msg)
switch (msg->hdr.opcode) {
case OP_OPR_ALIVE:
LOG_INFO(priv, FW_MSG, "Got ALIVE from device, wake rescan\n");
- queue_work(priv->bus_rescan_wq, &priv->bus_rescan_worker);
+ schedule_work(&priv->bus_rescan_worker);
break;
default:
LOG_INFO(priv, FW_MSG, "Received msg opcode 0x%X\n",
@@ -360,7 +360,7 @@ static void iwmct_irq(struct sdio_func *func)
/* clear the function's interrupt request bit (write 1 to clear) */
sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret);
- queue_work(priv->wq, &priv->isr_worker);
+ schedule_work(&priv->isr_worker);
LOG_TRACE(priv, IRQ, "exit iwmct_irq\n");
@@ -506,10 +506,6 @@ static int iwmct_probe(struct sdio_func *func,
priv->func = func;
sdio_set_drvdata(func, priv);
-
- /* create drivers work queue */
- priv->wq = create_workqueue(DRV_NAME "_wq");
- priv->bus_rescan_wq = create_workqueue(DRV_NAME "_rescan_wq");
INIT_WORK(&priv->bus_rescan_worker, iwmct_rescan_worker);
INIT_WORK(&priv->isr_worker, iwmct_irq_read_worker);
@@ -604,9 +600,9 @@ static void iwmct_remove(struct sdio_func *func)
sdio_release_irq(func);
sdio_release_host(func);
- /* Safely destroy osc workqueue */
- destroy_workqueue(priv->bus_rescan_wq);
- destroy_workqueue(priv->wq);
+ /* Make sure works are finished */
+ flush_work_sync(&priv->bus_rescan_worker);
+ flush_work_sync(&priv->isr_worker);
sdio_claim_host(func);
sdio_disable_func(func);
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index 59c118c19a91..27dc463097f3 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -988,7 +988,7 @@ static void kgdbts_run_tests(void)
static int kgdbts_option_setup(char *opt)
{
- if (strlen(opt) > MAX_CONFIG_LEN) {
+ if (strlen(opt) >= MAX_CONFIG_LEN) {
printk(KERN_ERR "kgdbts: config string too long\n");
return -ENOSPC;
}
diff --git a/drivers/misc/lis3lv02d/Kconfig b/drivers/misc/lis3lv02d/Kconfig
new file mode 100644
index 000000000000..8f474e6fc7b4
--- /dev/null
+++ b/drivers/misc/lis3lv02d/Kconfig
@@ -0,0 +1,37 @@
+#
+# STMicroelectonics LIS3LV02D and similar accelerometers
+#
+
+config SENSORS_LIS3_SPI
+ tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer (SPI)"
+ depends on !ACPI && SPI_MASTER && INPUT
+ select SENSORS_LIS3LV02D
+ default n
+ help
+ This driver provides support for the LIS3LV02Dx accelerometer connected
+ via SPI. The accelerometer data is readable via
+ /sys/devices/platform/lis3lv02d.
+
+ This driver also provides an absolute input class device, allowing
+ the laptop to act as a pinball machine-esque joystick.
+
+ This driver can also be built as modules. If so, the core module
+ will be called lis3lv02d and a specific module for the SPI transport
+ is called lis3lv02d_spi.
+
+config SENSORS_LIS3_I2C
+ tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer (I2C)"
+ depends on I2C && INPUT
+ select SENSORS_LIS3LV02D
+ default n
+ help
+ This driver provides support for the LIS3LV02Dx accelerometer connected
+ via I2C. The accelerometer data is readable via
+ /sys/devices/platform/lis3lv02d.
+
+ This driver also provides an absolute input class device, allowing
+ the device to act as a pinball machine-esque joystick.
+
+ This driver can also be built as modules. If so, the core module
+ will be called lis3lv02d and a specific module for the I2C transport
+ is called lis3lv02d_i2c.
diff --git a/drivers/misc/lis3lv02d/Makefile b/drivers/misc/lis3lv02d/Makefile
new file mode 100644
index 000000000000..4bf58b16fcf8
--- /dev/null
+++ b/drivers/misc/lis3lv02d/Makefile
@@ -0,0 +1,7 @@
+#
+# STMicroelectonics LIS3LV02D and similar accelerometers
+#
+
+obj-$(CONFIG_SENSORS_LIS3LV02D) += lis3lv02d.o
+obj-$(CONFIG_SENSORS_LIS3_SPI) += lis3lv02d_spi.o
+obj-$(CONFIG_SENSORS_LIS3_I2C) += lis3lv02d_i2c.o
diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index d805e8e57967..b928bc14e97b 100644
--- a/drivers/hwmon/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -38,7 +38,7 @@
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
#include <linux/pm_runtime.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "lis3lv02d.h"
#define DRIVER_NAME "lis3lv02d"
@@ -88,7 +88,6 @@
struct lis3lv02d lis3_dev = {
.misc_wait = __WAIT_QUEUE_HEAD_INITIALIZER(lis3_dev.misc_wait),
};
-
EXPORT_SYMBOL_GPL(lis3_dev);
/* just like param_set_int() but does sanity-check so that it won't point
diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
index a1939589eb2c..a1939589eb2c 100644
--- a/drivers/hwmon/lis3lv02d.h
+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
diff --git a/drivers/hwmon/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
index 8853afce85ce..b20dfb4522d2 100644
--- a/drivers/hwmon/lis3lv02d_i2c.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
@@ -33,7 +33,7 @@
#include <linux/delay.h>
#include "lis3lv02d.h"
-#define DRV_NAME "lis3lv02d_i2c"
+#define DRV_NAME "lis3lv02d_i2c"
static const char reg_vdd[] = "Vdd";
static const char reg_vdd_io[] = "Vdd_IO";
diff --git a/drivers/hwmon/lis3lv02d_spi.c b/drivers/misc/lis3lv02d/lis3lv02d_spi.c
index 2549de1de4e2..962c9642cf35 100644
--- a/drivers/hwmon/lis3lv02d_spi.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_spi.c
@@ -16,10 +16,11 @@
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/spi/spi.h>
+#include <linux/pm.h>
#include "lis3lv02d.h"
-#define DRV_NAME "lis3lv02d_spi"
+#define DRV_NAME "lis3lv02d_spi"
#define LIS3_SPI_READ 0x80
static int lis3_spi_read(struct lis3lv02d *lis3, int reg, u8 *v)
@@ -88,9 +89,10 @@ static int __devexit lis302dl_spi_remove(struct spi_device *spi)
return lis3lv02d_remove_fs(&lis3_dev);
}
-#ifdef CONFIG_PM
-static int lis3lv02d_spi_suspend(struct spi_device *spi, pm_message_t mesg)
+#ifdef CONFIG_PM_SLEEP
+static int lis3lv02d_spi_suspend(struct device *dev)
{
+ struct spi_device *spi = to_spi_device(dev);
struct lis3lv02d *lis3 = spi_get_drvdata(spi);
if (!lis3->pdata || !lis3->pdata->wakeup_flags)
@@ -99,8 +101,9 @@ static int lis3lv02d_spi_suspend(struct spi_device *spi, pm_message_t mesg)
return 0;
}
-static int lis3lv02d_spi_resume(struct spi_device *spi)
+static int lis3lv02d_spi_resume(struct device *dev)
{
+ struct spi_device *spi = to_spi_device(dev);
struct lis3lv02d *lis3 = spi_get_drvdata(spi);
if (!lis3->pdata || !lis3->pdata->wakeup_flags)
@@ -108,21 +111,19 @@ static int lis3lv02d_spi_resume(struct spi_device *spi)
return 0;
}
-
-#else
-#define lis3lv02d_spi_suspend NULL
-#define lis3lv02d_spi_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(lis3lv02d_spi_pm, lis3lv02d_spi_suspend,
+ lis3lv02d_spi_resume);
+
static struct spi_driver lis302dl_spi_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
+ .pm = &lis3lv02d_spi_pm,
},
.probe = lis302dl_spi_probe,
.remove = __devexit_p(lis302dl_spi_remove),
- .suspend = lis3lv02d_spi_suspend,
- .resume = lis3lv02d_spi_resume,
};
static int __init lis302dl_init(void)
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 2a876c4099cd..3b1f783bf924 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -58,12 +58,11 @@ config SDIO_UART
config MMC_TEST
tristate "MMC host test driver"
- default n
help
Development driver that performs a series of reads and writes
to a memory card in order to expose certain well known bugs
in host controllers. The tests are executed by writing to the
- "test" file in sysfs under each card. Note that whatever is
+ "test" file in debugfs under each card. Note that whatever is
on your card will be overwritten by these tests.
This driver is only of interest to those developing or
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index bfc8a8ae55df..61d233a7c118 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -621,6 +621,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
md->disk->private_data = md;
md->disk->queue = md->queue.queue;
md->disk->driverfs_dev = &card->dev;
+ set_disk_ro(md->disk, md->read_only);
/*
* As discussed on lkml, GENHD_FL_REMOVABLE should:
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 21adc27f4132..5ec8eddfcf6e 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -88,6 +88,7 @@ struct mmc_test_area {
* @sectors: amount of sectors to check in one group
* @ts: time values of transfer
* @rate: calculated transfer rate
+ * @iops: I/O operations per second (times 100)
*/
struct mmc_test_transfer_result {
struct list_head link;
@@ -95,6 +96,7 @@ struct mmc_test_transfer_result {
unsigned int sectors;
struct timespec ts;
unsigned int rate;
+ unsigned int iops;
};
/**
@@ -226,9 +228,10 @@ static int mmc_test_wait_busy(struct mmc_test_card *test)
if (!busy && mmc_test_busy(&cmd)) {
busy = 1;
- printk(KERN_INFO "%s: Warning: Host did not "
- "wait for busy state to end.\n",
- mmc_hostname(test->card->host));
+ if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
+ printk(KERN_INFO "%s: Warning: Host did not "
+ "wait for busy state to end.\n",
+ mmc_hostname(test->card->host));
}
} while (mmc_test_busy(&cmd));
@@ -494,7 +497,7 @@ static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
*/
static void mmc_test_save_transfer_result(struct mmc_test_card *test,
unsigned int count, unsigned int sectors, struct timespec ts,
- unsigned int rate)
+ unsigned int rate, unsigned int iops)
{
struct mmc_test_transfer_result *tr;
@@ -509,6 +512,7 @@ static void mmc_test_save_transfer_result(struct mmc_test_card *test,
tr->sectors = sectors;
tr->ts = ts;
tr->rate = rate;
+ tr->iops = iops;
list_add_tail(&tr->link, &test->gr->tr_lst);
}
@@ -519,20 +523,22 @@ static void mmc_test_save_transfer_result(struct mmc_test_card *test,
static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
struct timespec *ts1, struct timespec *ts2)
{
- unsigned int rate, sectors = bytes >> 9;
+ unsigned int rate, iops, sectors = bytes >> 9;
struct timespec ts;
ts = timespec_sub(*ts2, *ts1);
rate = mmc_test_rate(bytes, &ts);
+ iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
- "seconds (%u kB/s, %u KiB/s)\n",
+ "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
mmc_hostname(test->card->host), sectors, sectors >> 1,
(sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
- (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024);
+ (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
+ iops / 100, iops % 100);
- mmc_test_save_transfer_result(test, 1, sectors, ts, rate);
+ mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
}
/*
@@ -542,22 +548,24 @@ static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
unsigned int count, struct timespec *ts1,
struct timespec *ts2)
{
- unsigned int rate, sectors = bytes >> 9;
+ unsigned int rate, iops, sectors = bytes >> 9;
uint64_t tot = bytes * count;
struct timespec ts;
ts = timespec_sub(*ts2, *ts1);
rate = mmc_test_rate(tot, &ts);
+ iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
- "%lu.%09lu seconds (%u kB/s, %u KiB/s)\n",
+ "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
+ "%u.%02u IOPS)\n",
mmc_hostname(test->card->host), count, sectors, count,
sectors >> 1, (sectors & 1 ? ".5" : ""),
(unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
- rate / 1000, rate / 1024);
+ rate / 1000, rate / 1024, iops / 100, iops % 100);
- mmc_test_save_transfer_result(test, count, sectors, ts, rate);
+ mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
}
/*
@@ -1425,28 +1433,29 @@ static int mmc_test_area_cleanup(struct mmc_test_card *test)
}
/*
- * Initialize an area for testing large transfers. The size of the area is the
- * preferred erase size which is a good size for optimal transfer speed. Note
- * that is typically 4MiB for modern cards. The test area is set to the middle
- * of the card because cards may have different charateristics at the front
- * (for FAT file system optimization). Optionally, the area is erased (if the
- * card supports it) which may improve write performance. Optionally, the area
- * is filled with data for subsequent read tests.
+ * Initialize an area for testing large transfers. The test area is set to the
+ * middle of the card because cards may have different charateristics at the
+ * front (for FAT file system optimization). Optionally, the area is erased
+ * (if the card supports it) which may improve write performance. Optionally,
+ * the area is filled with data for subsequent read tests.
*/
static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
{
struct mmc_test_area *t = &test->area;
- unsigned long min_sz = 64 * 1024;
+ unsigned long min_sz = 64 * 1024, sz;
int ret;
ret = mmc_test_set_blksize(test, 512);
if (ret)
return ret;
- if (test->card->pref_erase > TEST_AREA_MAX_SIZE >> 9)
- t->max_sz = TEST_AREA_MAX_SIZE;
- else
- t->max_sz = (unsigned long)test->card->pref_erase << 9;
+ /* Make the test area size about 4MiB */
+ sz = (unsigned long)test->card->pref_erase << 9;
+ t->max_sz = sz;
+ while (t->max_sz < 4 * 1024 * 1024)
+ t->max_sz += sz;
+ while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
+ t->max_sz -= sz;
t->max_segs = test->card->host->max_segs;
t->max_seg_sz = test->card->host->max_seg_size;
@@ -1766,6 +1775,188 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
return 0;
}
+static unsigned int rnd_next = 1;
+
+static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
+{
+ uint64_t r;
+
+ rnd_next = rnd_next * 1103515245 + 12345;
+ r = (rnd_next >> 16) & 0x7fff;
+ return (r * rnd_cnt) >> 15;
+}
+
+static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
+ unsigned long sz)
+{
+ unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
+ unsigned int ssz;
+ struct timespec ts1, ts2, ts;
+ int ret;
+
+ ssz = sz >> 9;
+
+ rnd_addr = mmc_test_capacity(test->card) / 4;
+ range1 = rnd_addr / test->card->pref_erase;
+ range2 = range1 / ssz;
+
+ getnstimeofday(&ts1);
+ for (cnt = 0; cnt < UINT_MAX; cnt++) {
+ getnstimeofday(&ts2);
+ ts = timespec_sub(ts2, ts1);
+ if (ts.tv_sec >= 10)
+ break;
+ ea = mmc_test_rnd_num(range1);
+ if (ea == last_ea)
+ ea -= 1;
+ last_ea = ea;
+ dev_addr = rnd_addr + test->card->pref_erase * ea +
+ ssz * mmc_test_rnd_num(range2);
+ ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
+ if (ret)
+ return ret;
+ }
+ if (print)
+ mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
+ return 0;
+}
+
+static int mmc_test_random_perf(struct mmc_test_card *test, int write)
+{
+ unsigned int next;
+ unsigned long sz;
+ int ret;
+
+ for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
+ /*
+ * When writing, try to get more consistent results by running
+ * the test twice with exactly the same I/O but outputting the
+ * results only for the 2nd run.
+ */
+ if (write) {
+ next = rnd_next;
+ ret = mmc_test_rnd_perf(test, write, 0, sz);
+ if (ret)
+ return ret;
+ rnd_next = next;
+ }
+ ret = mmc_test_rnd_perf(test, write, 1, sz);
+ if (ret)
+ return ret;
+ }
+ sz = test->area.max_tfr;
+ if (write) {
+ next = rnd_next;
+ ret = mmc_test_rnd_perf(test, write, 0, sz);
+ if (ret)
+ return ret;
+ rnd_next = next;
+ }
+ return mmc_test_rnd_perf(test, write, 1, sz);
+}
+
+/*
+ * Random read performance by transfer size.
+ */
+static int mmc_test_random_read_perf(struct mmc_test_card *test)
+{
+ return mmc_test_random_perf(test, 0);
+}
+
+/*
+ * Random write performance by transfer size.
+ */
+static int mmc_test_random_write_perf(struct mmc_test_card *test)
+{
+ return mmc_test_random_perf(test, 1);
+}
+
+static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
+ unsigned int tot_sz, int max_scatter)
+{
+ unsigned int dev_addr, i, cnt, sz, ssz;
+ struct timespec ts1, ts2, ts;
+ int ret;
+
+ sz = test->area.max_tfr;
+ /*
+ * In the case of a maximally scattered transfer, the maximum transfer
+ * size is further limited by using PAGE_SIZE segments.
+ */
+ if (max_scatter) {
+ struct mmc_test_area *t = &test->area;
+ unsigned long max_tfr;
+
+ if (t->max_seg_sz >= PAGE_SIZE)
+ max_tfr = t->max_segs * PAGE_SIZE;
+ else
+ max_tfr = t->max_segs * t->max_seg_sz;
+ if (sz > max_tfr)
+ sz = max_tfr;
+ }
+
+ ssz = sz >> 9;
+ dev_addr = mmc_test_capacity(test->card) / 4;
+ if (tot_sz > dev_addr << 9)
+ tot_sz = dev_addr << 9;
+ cnt = tot_sz / sz;
+ dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
+
+ getnstimeofday(&ts1);
+ for (i = 0; i < cnt; i++) {
+ ret = mmc_test_area_io(test, sz, dev_addr, write,
+ max_scatter, 0);
+ if (ret)
+ return ret;
+ dev_addr += ssz;
+ }
+ getnstimeofday(&ts2);
+
+ ts = timespec_sub(ts2, ts1);
+ mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
+
+ return 0;
+}
+
+static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
+{
+ int ret, i;
+
+ for (i = 0; i < 10; i++) {
+ ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
+ if (ret)
+ return ret;
+ }
+ for (i = 0; i < 5; i++) {
+ ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
+ if (ret)
+ return ret;
+ }
+ for (i = 0; i < 3; i++) {
+ ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+/*
+ * Large sequential read performance.
+ */
+static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
+{
+ return mmc_test_large_seq_perf(test, 0);
+}
+
+/*
+ * Large sequential write performance.
+ */
+static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
+{
+ return mmc_test_large_seq_perf(test, 1);
+}
+
static const struct mmc_test_case mmc_test_cases[] = {
{
.name = "Basic write (no data verification)",
@@ -2005,6 +2196,34 @@ static const struct mmc_test_case mmc_test_cases[] = {
.cleanup = mmc_test_area_cleanup,
},
+ {
+ .name = "Random read performance by transfer size",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_random_read_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Random write performance by transfer size",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_random_write_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Large sequential read into scattered pages",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_large_seq_read_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Large sequential write from scattered pages",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_large_seq_write_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
};
static DEFINE_MUTEX(mmc_test_lock);
@@ -2148,11 +2367,11 @@ static int mtf_test_show(struct seq_file *sf, void *data)
seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
list_for_each_entry(tr, &gr->tr_lst, link) {
- seq_printf(sf, "%u %d %lu.%09lu %u\n",
+ seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
tr->count, tr->sectors,
(unsigned long)tr->ts.tv_sec,
(unsigned long)tr->ts.tv_nsec,
- tr->rate);
+ tr->rate, tr->iops / 100, tr->iops % 100);
}
}
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index 86b479119332..639501970b41 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_MMC) += mmc_core.o
mmc_core-y := core.o bus.o host.o \
mmc.o mmc_ops.o sd.o sd_ops.o \
sdio.o sdio_ops.o sdio_bus.o \
- sdio_cis.o sdio_io.o sdio_irq.o
+ sdio_cis.o sdio_io.o sdio_irq.o \
+ quirks.o
mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 6625c057be05..579ba1e97c8c 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -167,8 +167,6 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
WARN_ON(!host->claimed);
- led_trigger_event(host->led, LED_FULL);
-
mrq->cmd->error = 0;
mrq->cmd->mrq = mrq;
if (mrq->data) {
@@ -194,6 +192,7 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
}
}
mmc_host_clk_ungate(host);
+ led_trigger_event(host->led, LED_FULL);
host->ops->request(host, mrq);
}
@@ -1495,6 +1494,12 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
mmc_hostname(host), __func__, host->f_init);
#endif
mmc_power_up(host);
+
+ /*
+ * sdio_reset sends CMD52 to reset card. Since we do not know
+ * if the card is being re-initialzed just send it. CMD52
+ * should be ignored by SD/eMMC cards.
+ */
sdio_reset(host);
mmc_go_idle(host);
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index ca1fdde29df6..20b1c0831eac 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -61,6 +61,8 @@ int mmc_attach_mmc(struct mmc_host *host);
int mmc_attach_sd(struct mmc_host *host);
int mmc_attach_sdio(struct mmc_host *host);
+void mmc_fixup_device(struct mmc_card *card);
+
/* Module parameters */
extern int use_spi_crc;
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index b3ac6c5bc5c6..461e6a17fb90 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -160,10 +160,7 @@ static bool mmc_host_may_gate_card(struct mmc_card *card)
* gate the clock, because there is somebody out there that may still
* be using it.
*/
- if (mmc_card_sdio(card))
- return false;
-
- return true;
+ return !(card->quirks & MMC_QUIRK_BROKEN_CLK_GATING);
}
/**
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 16006ef153fe..14e95f39a7bf 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -302,6 +302,44 @@ static int mmc_read_ext_csd(struct mmc_card *card)
}
if (card->ext_csd.rev >= 4) {
+ /*
+ * Enhanced area feature support -- check whether the eMMC
+ * card has the Enhanced area enabled. If so, export enhanced
+ * area offset and size to user by adding sysfs interface.
+ */
+ if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
+ (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
+ u8 hc_erase_grp_sz =
+ ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
+ u8 hc_wp_grp_sz =
+ ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
+
+ card->ext_csd.enhanced_area_en = 1;
+ /*
+ * calculate the enhanced data area offset, in bytes
+ */
+ card->ext_csd.enhanced_area_offset =
+ (ext_csd[139] << 24) + (ext_csd[138] << 16) +
+ (ext_csd[137] << 8) + ext_csd[136];
+ if (mmc_card_blockaddr(card))
+ card->ext_csd.enhanced_area_offset <<= 9;
+ /*
+ * calculate the enhanced data area size, in kilobytes
+ */
+ card->ext_csd.enhanced_area_size =
+ (ext_csd[142] << 16) + (ext_csd[141] << 8) +
+ ext_csd[140];
+ card->ext_csd.enhanced_area_size *=
+ (size_t)(hc_erase_grp_sz * hc_wp_grp_sz);
+ card->ext_csd.enhanced_area_size <<= 9;
+ } else {
+ /*
+ * If the enhanced area is not enabled, disable these
+ * device attributes.
+ */
+ card->ext_csd.enhanced_area_offset = -EINVAL;
+ card->ext_csd.enhanced_area_size = -EINVAL;
+ }
card->ext_csd.sec_trim_mult =
ext_csd[EXT_CSD_SEC_TRIM_MULT];
card->ext_csd.sec_erase_mult =
@@ -336,6 +374,9 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
+MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
+ card->ext_csd.enhanced_area_offset);
+MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
static struct attribute *mmc_std_attrs[] = {
&dev_attr_cid.attr,
@@ -349,6 +390,8 @@ static struct attribute *mmc_std_attrs[] = {
&dev_attr_name.attr,
&dev_attr_oemid.attr,
&dev_attr_serial.attr,
+ &dev_attr_enhanced_area_offset.attr,
+ &dev_attr_enhanced_area_size.attr,
NULL,
};
@@ -378,6 +421,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
int err, ddr = 0;
u32 cid[4];
unsigned int max_dtr;
+ u32 rocr;
BUG_ON(!host);
WARN_ON(!host->claimed);
@@ -391,7 +435,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
mmc_go_idle(host);
/* The extra bit indicates that we support high capacity */
- err = mmc_send_op_cond(host, ocr | (1 << 30), NULL);
+ err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
if (err)
goto err;
@@ -479,11 +523,51 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
err = mmc_read_ext_csd(card);
if (err)
goto free_card;
+
+ /* If doing byte addressing, check if required to do sector
+ * addressing. Handle the case of <2GB cards needing sector
+ * addressing. See section 8.1 JEDEC Standard JED84-A441;
+ * ocr register has bit 30 set for sector addressing.
+ */
+ if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30)))
+ mmc_card_set_blockaddr(card);
+
/* Erase size depends on CSD and Extended CSD */
mmc_set_erase_size(card);
}
/*
+ * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF
+ * bit. This bit will be lost everytime after a reset or power off.
+ */
+ if (card->ext_csd.enhanced_area_en) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_ERASE_GROUP_DEF, 1);
+
+ if (err && err != -EBADMSG)
+ goto free_card;
+
+ if (err) {
+ err = 0;
+ /*
+ * Just disable enhanced area off & sz
+ * will try to enable ERASE_GROUP_DEF
+ * during next time reinit
+ */
+ card->ext_csd.enhanced_area_offset = -EINVAL;
+ card->ext_csd.enhanced_area_size = -EINVAL;
+ } else {
+ card->ext_csd.erase_group_def = 1;
+ /*
+ * enable ERASE_GRP_DEF successfully.
+ * This will affect the erase size, so
+ * here need to reset erase size
+ */
+ mmc_set_erase_size(card);
+ }
+ }
+
+ /*
* Activate high speed (if supported)
*/
if ((card->ext_csd.hs_max_dtr != 0) &&
diff --git a/drivers/mmc/core/quirks.c b/drivers/mmc/core/quirks.c
new file mode 100644
index 000000000000..4fb16ac007f0
--- /dev/null
+++ b/drivers/mmc/core/quirks.c
@@ -0,0 +1,85 @@
+/*
+ * This file contains work-arounds for many known sdio hardware
+ * bugs.
+ *
+ * Copyright (c) 2011 Pierre Tardy <tardyp@gmail.com>
+ * Inspired from pci fixup code:
+ * Copyright (c) 1999 Martin Mares <mj@ucw.cz>
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mmc/card.h>
+#include <linux/mod_devicetable.h>
+
+/*
+ * The world is not perfect and supplies us with broken mmc/sdio devices.
+ * For at least a part of these bugs we need a work-around
+ */
+
+struct mmc_fixup {
+ u16 vendor, device; /* You can use SDIO_ANY_ID here of course */
+ void (*vendor_fixup)(struct mmc_card *card, int data);
+ int data;
+};
+
+/*
+ * This hook just adds a quirk unconditionnally
+ */
+static void __maybe_unused add_quirk(struct mmc_card *card, int data)
+{
+ card->quirks |= data;
+}
+
+/*
+ * This hook just removes a quirk unconditionnally
+ */
+static void __maybe_unused remove_quirk(struct mmc_card *card, int data)
+{
+ card->quirks &= ~data;
+}
+
+/*
+ * This hook just adds a quirk for all sdio devices
+ */
+static void add_quirk_for_sdio_devices(struct mmc_card *card, int data)
+{
+ if (mmc_card_sdio(card))
+ card->quirks |= data;
+}
+
+#ifndef SDIO_VENDOR_ID_TI
+#define SDIO_VENDOR_ID_TI 0x0097
+#endif
+
+#ifndef SDIO_DEVICE_ID_TI_WL1271
+#define SDIO_DEVICE_ID_TI_WL1271 0x4076
+#endif
+
+static const struct mmc_fixup mmc_fixup_methods[] = {
+ /* by default sdio devices are considered CLK_GATING broken */
+ /* good cards will be whitelisted as they are tested */
+ { SDIO_ANY_ID, SDIO_ANY_ID,
+ add_quirk_for_sdio_devices, MMC_QUIRK_BROKEN_CLK_GATING },
+ { SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
+ remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING },
+ { 0 }
+};
+
+void mmc_fixup_device(struct mmc_card *card)
+{
+ const struct mmc_fixup *f;
+
+ for (f = mmc_fixup_methods; f->vendor_fixup; f++) {
+ if ((f->vendor == card->cis.vendor
+ || f->vendor == (u16) SDIO_ANY_ID) &&
+ (f->device == card->cis.device
+ || f->device == (u16) SDIO_ANY_ID)) {
+ dev_dbg(&card->dev, "calling %pF\n", f->vendor_fixup);
+ f->vendor_fixup(card, f->data);
+ }
+ }
+}
+EXPORT_SYMBOL(mmc_fixup_device);
+
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index d18c32bca99b..6dac89fe0535 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -21,6 +21,7 @@
#include "core.h"
#include "bus.h"
#include "mmc_ops.h"
+#include "sd.h"
#include "sd_ops.h"
static const unsigned int tran_exp[] = {
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 5c4a54d9b6a4..617e9adb80f1 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -458,6 +458,7 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
card = oldcard;
}
+ mmc_fixup_device(card);
if (card->type == MMC_TYPE_SD_COMBO) {
err = mmc_sd_setup_card(host, card, oldcard != NULL);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index afe8c6fa166a..1a21c6427a19 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -225,7 +225,7 @@ config MMC_OMAP
config MMC_OMAP_HS
tristate "TI OMAP High Speed Multimedia Card Interface support"
- depends on ARCH_OMAP2430 || ARCH_OMAP3 || ARCH_OMAP4
+ depends on SOC_OMAP2430 || ARCH_OMAP3 || ARCH_OMAP4
help
This selects the TI OMAP High Speed Multimedia card Interface.
If you have an OMAP2430 or OMAP3 board or OMAP4 board with a
@@ -311,7 +311,7 @@ config MMC_MSM
config MMC_MXC
tristate "Freescale i.MX2/3 Multimedia Card Interface support"
- depends on ARCH_MXC
+ depends on MACH_MX21 || MACH_MX27 || ARCH_MX31
help
This selects the Freescale i.MX2/3 Multimedia card Interface.
If you have a i.MX platform with a Multimedia Card slot,
@@ -319,6 +319,15 @@ config MMC_MXC
If unsure, say N.
+config MMC_MXS
+ tristate "Freescale MXS Multimedia Card Interface support"
+ depends on ARCH_MXS && MXS_DMA
+ help
+ This selects the Freescale SSP MMC controller found on MXS based
+ platforms like mx23/28.
+
+ If unsure, say N.
+
config MMC_TIFM_SD
tristate "TI Flash Media MMC/SD Interface support (EXPERIMENTAL)"
depends on EXPERIMENTAL && PCI
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index e834fb223e9a..30aa6867745f 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_MMC_ARMMMCI) += mmci.o
obj-$(CONFIG_MMC_PXA) += pxamci.o
obj-$(CONFIG_MMC_IMX) += imxmmc.o
obj-$(CONFIG_MMC_MXC) += mxcmmc.o
+obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
obj-$(CONFIG_MMC_SDHCI_PXA) += sdhci-pxa.o
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index ad2a7a032cdf..80bc9a5c25cc 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -578,7 +578,8 @@ static void atmci_dma_cleanup(struct atmel_mci *host)
struct mmc_data *data = host->data;
if (data)
- dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
+ dma_unmap_sg(host->dma.chan->device->dev,
+ data->sg, data->sg_len,
((data->flags & MMC_DATA_WRITE)
? DMA_TO_DEVICE : DMA_FROM_DEVICE));
}
@@ -588,7 +589,7 @@ static void atmci_stop_dma(struct atmel_mci *host)
struct dma_chan *chan = host->data_chan;
if (chan) {
- chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ dmaengine_terminate_all(chan);
atmci_dma_cleanup(host);
} else {
/* Data transfer was stopped by the interrupt handler */
@@ -684,11 +685,11 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
else
direction = DMA_TO_DEVICE;
- sglen = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, direction);
- if (sglen != data->sg_len)
- goto unmap_exit;
+ sglen = dma_map_sg(chan->device->dev, data->sg,
+ data->sg_len, direction);
+
desc = chan->device->device_prep_slave_sg(chan,
- data->sg, data->sg_len, direction,
+ data->sg, sglen, direction,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc)
goto unmap_exit;
@@ -699,7 +700,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
return 0;
unmap_exit:
- dma_unmap_sg(&host->pdev->dev, data->sg, sglen, direction);
+ dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, direction);
return -ENOMEM;
}
@@ -709,8 +710,8 @@ static void atmci_submit_data(struct atmel_mci *host)
struct dma_async_tx_descriptor *desc = host->dma.data_desc;
if (chan) {
- desc->tx_submit(desc);
- chan->device->device_issue_pending(chan);
+ dmaengine_submit(desc);
+ dma_async_issue_pending(chan);
}
}
diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c
index 66b4ce587f4b..ce2a47b71dd6 100644
--- a/drivers/mmc/host/cb710-mmc.c
+++ b/drivers/mmc/host/cb710-mmc.c
@@ -205,7 +205,7 @@ static int cb710_wait_while_busy(struct cb710_slot *slot, uint8_t mask)
"WAIT12: waited %d loops, mask %02X, entry val %08X, exit val %08X\n",
limit, mask, e, x);
#endif
- return 0;
+ return err;
}
static void cb710_mmc_set_transfer_size(struct cb710_slot *slot,
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 2fcc82577c1b..299c1d61b6b3 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -562,7 +562,8 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot)
SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
/* enable clock */
- mci_writel(host, CLKENA, SDMMC_CLKEN_ENABLE);
+ mci_writel(host, CLKENA, SDMMC_CLKEN_ENABLE |
+ SDMMC_CLKEN_LOW_PWR);
/* inform CIU */
mci_send_cmd(slot,
@@ -661,6 +662,7 @@ static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
+ u32 regs;
/* set default 1 bit mode */
slot->ctype = SDMMC_CTYPE_1BIT;
@@ -672,6 +674,16 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
case MMC_BUS_WIDTH_4:
slot->ctype = SDMMC_CTYPE_4BIT;
break;
+ case MMC_BUS_WIDTH_8:
+ slot->ctype = SDMMC_CTYPE_8BIT;
+ break;
+ }
+
+ /* DDR mode set */
+ if (ios->ddr) {
+ regs = mci_readl(slot->host, UHS_REG);
+ regs |= (0x1 << slot->id) << 16;
+ mci_writel(slot->host, UHS_REG, regs);
}
if (ios->clock) {
@@ -1019,13 +1031,10 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
struct mmc_data *data = host->data;
int shift = host->data_shift;
u32 status;
- unsigned int nbytes = 0, len, old_len, count = 0;
+ unsigned int nbytes = 0, len;
do {
len = SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift;
- if (count == 0)
- old_len = len;
-
if (offset + len <= sg->length) {
host->pull_data(host, (void *)(buf + offset), len);
@@ -1070,7 +1079,6 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
tasklet_schedule(&host->tasklet);
return;
}
- count++;
} while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
len = SDMMC_GET_FCNT(mci_readl(host, STATUS));
host->pio_offset = offset;
@@ -1441,6 +1449,12 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
/* Card initially undetected */
slot->last_detect_state = 0;
+ /*
+ * Card may have been plugged in prior to boot so we
+ * need to run the detect tasklet
+ */
+ tasklet_schedule(&host->card_tasklet);
+
return 0;
}
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 5dd55a75233d..23c662af5616 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -43,6 +43,7 @@
#define SDMMC_USRID 0x068
#define SDMMC_VERID 0x06c
#define SDMMC_HCON 0x070
+#define SDMMC_UHS_REG 0x074
#define SDMMC_BMOD 0x080
#define SDMMC_PLDMND 0x084
#define SDMMC_DBADDR 0x088
@@ -51,7 +52,6 @@
#define SDMMC_DSCADDR 0x094
#define SDMMC_BUFADDR 0x098
#define SDMMC_DATA 0x100
-#define SDMMC_DATA_ADR 0x100
/* shift bit field */
#define _SBF(f, v) ((v) << (f))
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index fd877f633dd2..2f7fc0c5146f 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1516,21 +1516,17 @@ static int __devexit mmc_spi_remove(struct spi_device *spi)
return 0;
}
-#if defined(CONFIG_OF)
static struct of_device_id mmc_spi_of_match_table[] __devinitdata = {
{ .compatible = "mmc-spi-slot", },
{},
};
-#endif
static struct spi_driver mmc_spi_driver = {
.driver = {
.name = "mmc_spi",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
-#if defined(CONFIG_OF)
.of_match_table = mmc_spi_of_match_table,
-#endif
},
.probe = mmc_spi_probe,
.remove = __devexit_p(mmc_spi_remove),
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 2d6de3e03e2d..5bbb87d10251 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -2,7 +2,7 @@
* linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
*
* Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
- * Copyright (C) 2010 ST-Ericsson AB.
+ * Copyright (C) 2010 ST-Ericsson SA
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -25,8 +25,10 @@
#include <linux/clk.h>
#include <linux/scatterlist.h>
#include <linux/gpio.h>
-#include <linux/amba/mmci.h>
#include <linux/regulator/consumer.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/amba/mmci.h>
#include <asm/div64.h>
#include <asm/io.h>
@@ -142,9 +144,6 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
host->mrq = NULL;
host->cmd = NULL;
- if (mrq->data)
- mrq->data->bytes_xfered = host->data_xfered;
-
/*
* Need to drop the host lock here; mmc_request_done may call
* back into the driver...
@@ -189,6 +188,248 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
}
+/*
+ * All the DMA operation mode stuff goes inside this ifdef.
+ * This assumes that you have a generic DMA device interface,
+ * no custom DMA interfaces are supported.
+ */
+#ifdef CONFIG_DMA_ENGINE
+static void __devinit mmci_dma_setup(struct mmci_host *host)
+{
+ struct mmci_platform_data *plat = host->plat;
+ const char *rxname, *txname;
+ dma_cap_mask_t mask;
+
+ if (!plat || !plat->dma_filter) {
+ dev_info(mmc_dev(host->mmc), "no DMA platform data\n");
+ return;
+ }
+
+ /* Try to acquire a generic DMA engine slave channel */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /*
+ * If only an RX channel is specified, the driver will
+ * attempt to use it bidirectionally, however if it is
+ * is specified but cannot be located, DMA will be disabled.
+ */
+ if (plat->dma_rx_param) {
+ host->dma_rx_channel = dma_request_channel(mask,
+ plat->dma_filter,
+ plat->dma_rx_param);
+ /* E.g if no DMA hardware is present */
+ if (!host->dma_rx_channel)
+ dev_err(mmc_dev(host->mmc), "no RX DMA channel\n");
+ }
+
+ if (plat->dma_tx_param) {
+ host->dma_tx_channel = dma_request_channel(mask,
+ plat->dma_filter,
+ plat->dma_tx_param);
+ if (!host->dma_tx_channel)
+ dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n");
+ } else {
+ host->dma_tx_channel = host->dma_rx_channel;
+ }
+
+ if (host->dma_rx_channel)
+ rxname = dma_chan_name(host->dma_rx_channel);
+ else
+ rxname = "none";
+
+ if (host->dma_tx_channel)
+ txname = dma_chan_name(host->dma_tx_channel);
+ else
+ txname = "none";
+
+ dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
+ rxname, txname);
+
+ /*
+ * Limit the maximum segment size in any SG entry according to
+ * the parameters of the DMA engine device.
+ */
+ if (host->dma_tx_channel) {
+ struct device *dev = host->dma_tx_channel->device->dev;
+ unsigned int max_seg_size = dma_get_max_seg_size(dev);
+
+ if (max_seg_size < host->mmc->max_seg_size)
+ host->mmc->max_seg_size = max_seg_size;
+ }
+ if (host->dma_rx_channel) {
+ struct device *dev = host->dma_rx_channel->device->dev;
+ unsigned int max_seg_size = dma_get_max_seg_size(dev);
+
+ if (max_seg_size < host->mmc->max_seg_size)
+ host->mmc->max_seg_size = max_seg_size;
+ }
+}
+
+/*
+ * This is used in __devinit or __devexit so inline it
+ * so it can be discarded.
+ */
+static inline void mmci_dma_release(struct mmci_host *host)
+{
+ struct mmci_platform_data *plat = host->plat;
+
+ if (host->dma_rx_channel)
+ dma_release_channel(host->dma_rx_channel);
+ if (host->dma_tx_channel && plat->dma_tx_param)
+ dma_release_channel(host->dma_tx_channel);
+ host->dma_rx_channel = host->dma_tx_channel = NULL;
+}
+
+static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
+{
+ struct dma_chan *chan = host->dma_current;
+ enum dma_data_direction dir;
+ u32 status;
+ int i;
+
+ /* Wait up to 1ms for the DMA to complete */
+ for (i = 0; ; i++) {
+ status = readl(host->base + MMCISTATUS);
+ if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
+ break;
+ udelay(10);
+ }
+
+ /*
+ * Check to see whether we still have some data left in the FIFO -
+ * this catches DMA controllers which are unable to monitor the
+ * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
+ * contiguous buffers. On TX, we'll get a FIFO underrun error.
+ */
+ if (status & MCI_RXDATAAVLBLMASK) {
+ dmaengine_terminate_all(chan);
+ if (!data->error)
+ data->error = -EIO;
+ }
+
+ if (data->flags & MMC_DATA_WRITE) {
+ dir = DMA_TO_DEVICE;
+ } else {
+ dir = DMA_FROM_DEVICE;
+ }
+
+ dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
+
+ /*
+ * Use of DMA with scatter-gather is impossible.
+ * Give up with DMA and switch back to PIO mode.
+ */
+ if (status & MCI_RXDATAAVLBLMASK) {
+ dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
+ mmci_dma_release(host);
+ }
+}
+
+static void mmci_dma_data_error(struct mmci_host *host)
+{
+ dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
+ dmaengine_terminate_all(host->dma_current);
+}
+
+static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
+{
+ struct variant_data *variant = host->variant;
+ struct dma_slave_config conf = {
+ .src_addr = host->phybase + MMCIFIFO,
+ .dst_addr = host->phybase + MMCIFIFO,
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
+ .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
+ };
+ struct mmc_data *data = host->data;
+ struct dma_chan *chan;
+ struct dma_device *device;
+ struct dma_async_tx_descriptor *desc;
+ int nr_sg;
+
+ host->dma_current = NULL;
+
+ if (data->flags & MMC_DATA_READ) {
+ conf.direction = DMA_FROM_DEVICE;
+ chan = host->dma_rx_channel;
+ } else {
+ conf.direction = DMA_TO_DEVICE;
+ chan = host->dma_tx_channel;
+ }
+
+ /* If there's no DMA channel, fall back to PIO */
+ if (!chan)
+ return -EINVAL;
+
+ /* If less than or equal to the fifo size, don't bother with DMA */
+ if (host->size <= variant->fifosize)
+ return -EINVAL;
+
+ device = chan->device;
+ nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction);
+ if (nr_sg == 0)
+ return -EINVAL;
+
+ dmaengine_slave_config(chan, &conf);
+ desc = device->device_prep_slave_sg(chan, data->sg, nr_sg,
+ conf.direction, DMA_CTRL_ACK);
+ if (!desc)
+ goto unmap_exit;
+
+ /* Okay, go for it. */
+ host->dma_current = chan;
+
+ dev_vdbg(mmc_dev(host->mmc),
+ "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
+ data->sg_len, data->blksz, data->blocks, data->flags);
+ dmaengine_submit(desc);
+ dma_async_issue_pending(chan);
+
+ datactrl |= MCI_DPSM_DMAENABLE;
+
+ /* Trigger the DMA transfer */
+ writel(datactrl, host->base + MMCIDATACTRL);
+
+ /*
+ * Let the MMCI say when the data is ended and it's time
+ * to fire next DMA request. When that happens, MMCI will
+ * call mmci_data_end()
+ */
+ writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
+ host->base + MMCIMASK0);
+ return 0;
+
+unmap_exit:
+ dmaengine_terminate_all(chan);
+ dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
+ return -ENOMEM;
+}
+#else
+/* Blank functions if the DMA engine is not available */
+static inline void mmci_dma_setup(struct mmci_host *host)
+{
+}
+
+static inline void mmci_dma_release(struct mmci_host *host)
+{
+}
+
+static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
+{
+}
+
+static inline void mmci_dma_data_error(struct mmci_host *host)
+{
+}
+
+static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
+{
+ return -ENOSYS;
+}
+#endif
+
static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
{
struct variant_data *variant = host->variant;
@@ -202,9 +443,7 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
host->data = data;
host->size = data->blksz * data->blocks;
- host->data_xfered = 0;
-
- mmci_init_sg(host, data);
+ data->bytes_xfered = 0;
clks = (unsigned long long)data->timeout_ns * host->cclk;
do_div(clks, 1000000000UL);
@@ -219,15 +458,29 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
BUG_ON(1 << blksz_bits != data->blksz);
datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
- if (data->flags & MMC_DATA_READ) {
+
+ if (data->flags & MMC_DATA_READ)
datactrl |= MCI_DPSM_DIRECTION;
+
+ /*
+ * Attempt to use DMA operation mode, if this
+ * should fail, fall back to PIO mode
+ */
+ if (!mmci_dma_start_data(host, datactrl))
+ return;
+
+ /* IRQ mode, map the SG list for CPU reading/writing */
+ mmci_init_sg(host, data);
+
+ if (data->flags & MMC_DATA_READ) {
irqmask = MCI_RXFIFOHALFFULLMASK;
/*
- * If we have less than a FIFOSIZE of bytes to transfer,
- * trigger a PIO interrupt as soon as any data is available.
+ * If we have less than the fifo 'half-full' threshold to
+ * transfer, trigger a PIO interrupt as soon as any data
+ * is available.
*/
- if (host->size < variant->fifosize)
+ if (host->size < variant->fifohalfsize)
irqmask |= MCI_RXDATAAVLBLMASK;
} else {
/*
@@ -283,49 +536,51 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
u32 remain, success;
- /* Calculate how far we are into the transfer */
+ /* Terminate the DMA transfer */
+ if (dma_inprogress(host))
+ mmci_dma_data_error(host);
+
+ /*
+ * Calculate how far we are into the transfer. Note that
+ * the data counter gives the number of bytes transferred
+ * on the MMC bus, not on the host side. On reads, this
+ * can be as much as a FIFO-worth of data ahead. This
+ * matters for FIFO overruns only.
+ */
remain = readl(host->base + MMCIDATACNT);
success = data->blksz * data->blocks - remain;
- dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status);
+ dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
+ status, success);
if (status & MCI_DATACRCFAIL) {
/* Last block was not successful */
- host->data_xfered = round_down(success - 1, data->blksz);
+ success -= 1;
data->error = -EILSEQ;
} else if (status & MCI_DATATIMEOUT) {
- host->data_xfered = round_down(success, data->blksz);
data->error = -ETIMEDOUT;
- } else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
- host->data_xfered = round_down(success, data->blksz);
+ } else if (status & MCI_TXUNDERRUN) {
+ data->error = -EIO;
+ } else if (status & MCI_RXOVERRUN) {
+ if (success > host->variant->fifosize)
+ success -= host->variant->fifosize;
+ else
+ success = 0;
data->error = -EIO;
}
-
- /*
- * We hit an error condition. Ensure that any data
- * partially written to a page is properly coherent.
- */
- if (data->flags & MMC_DATA_READ) {
- struct sg_mapping_iter *sg_miter = &host->sg_miter;
- unsigned long flags;
-
- local_irq_save(flags);
- if (sg_miter_next(sg_miter)) {
- flush_dcache_page(sg_miter->page);
- sg_miter_stop(sg_miter);
- }
- local_irq_restore(flags);
- }
+ data->bytes_xfered = round_down(success, data->blksz);
}
if (status & MCI_DATABLOCKEND)
dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
if (status & MCI_DATAEND || data->error) {
+ if (dma_inprogress(host))
+ mmci_dma_unmap(host, data);
mmci_stop_data(host);
if (!data->error)
/* The error clause is handled above, success! */
- host->data_xfered += data->blksz * data->blocks;
+ data->bytes_xfered = data->blksz * data->blocks;
if (!data->stop) {
mmci_request_end(host, data->mrq);
@@ -498,9 +753,6 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
if (remain)
break;
- if (status & MCI_RXACTIVE)
- flush_dcache_page(sg_miter->page);
-
status = readl(base + MMCISTATUS);
} while (1);
@@ -509,10 +761,10 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
local_irq_restore(flags);
/*
- * If we're nearing the end of the read, switch to
- * "any data available" mode.
+ * If we have less than the fifo 'half-full' threshold to transfer,
+ * trigger a PIO interrupt as soon as any data is available.
*/
- if (status & MCI_RXACTIVE && host->size < variant->fifosize)
+ if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
/*
@@ -713,7 +965,8 @@ static const struct mmc_host_ops mmci_ops = {
.get_cd = mmci_get_cd,
};
-static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
+static int __devinit mmci_probe(struct amba_device *dev,
+ const struct amba_id *id)
{
struct mmci_platform_data *plat = dev->dev.platform_data;
struct variant_data *variant = id->data;
@@ -776,6 +1029,7 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
host->mclk);
}
+ host->phybase = dev->res.start;
host->base = ioremap(dev->res.start, resource_size(&dev->res));
if (!host->base) {
ret = -ENOMEM;
@@ -903,9 +1157,12 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
amba_set_drvdata(dev, mmc);
- dev_info(&dev->dev, "%s: PL%03x rev%u at 0x%08llx irq %d,%d\n",
- mmc_hostname(mmc), amba_part(dev), amba_rev(dev),
- (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]);
+ dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
+ mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
+ amba_rev(dev), (unsigned long long)dev->res.start,
+ dev->irq[0], dev->irq[1]);
+
+ mmci_dma_setup(host);
mmc_add_host(mmc);
@@ -952,6 +1209,7 @@ static int __devexit mmci_remove(struct amba_device *dev)
writel(0, host->base + MMCICOMMAND);
writel(0, host->base + MMCIDATACTRL);
+ mmci_dma_release(host);
free_irq(dev->irq[0], host);
if (!host->singleirq)
free_irq(dev->irq[1], host);
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index c1df7b82d36c..ec9a7bc6d0df 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -148,8 +148,10 @@
struct clk;
struct variant_data;
+struct dma_chan;
struct mmci_host {
+ phys_addr_t phybase;
void __iomem *base;
struct mmc_request *mrq;
struct mmc_command *cmd;
@@ -161,8 +163,6 @@ struct mmci_host {
int gpio_cd_irq;
bool singleirq;
- unsigned int data_xfered;
-
spinlock_t lock;
unsigned int mclk;
@@ -181,5 +181,16 @@ struct mmci_host {
struct sg_mapping_iter sg_miter;
unsigned int size;
struct regulator *vcc;
+
+#ifdef CONFIG_DMA_ENGINE
+ /* DMA stuff */
+ struct dma_chan *dma_current;
+ struct dma_chan *dma_rx_channel;
+ struct dma_chan *dma_tx_channel;
+
+#define dma_inprogress(host) ((host)->dma_current)
+#else
+#define dma_inprogress(host) (0)
+#endif
};
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 153ab977a013..a4c865a5286b 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -36,6 +36,7 @@
#include <linux/io.h>
#include <linux/memory.h>
#include <linux/gfp.h>
+#include <linux/gpio.h>
#include <asm/cacheflush.h>
#include <asm/div64.h>
@@ -266,14 +267,6 @@ msmsdcc_dma_complete_tlet(unsigned long data)
dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents,
host->dma.dir);
- if (host->curr.user_pages) {
- struct scatterlist *sg = host->dma.sg;
- int i;
-
- for (i = 0; i < host->dma.num_ents; i++)
- flush_dcache_page(sg_page(sg++));
- }
-
host->dma.sg = NULL;
host->dma.busy = 0;
@@ -941,6 +934,38 @@ msmsdcc_request(struct mmc_host *mmc, struct mmc_request *mrq)
spin_unlock_irqrestore(&host->lock, flags);
}
+static void msmsdcc_setup_gpio(struct msmsdcc_host *host, bool enable)
+{
+ struct msm_mmc_gpio_data *curr;
+ int i, rc = 0;
+
+ if (!host->plat->gpio_data && host->gpio_config_status == enable)
+ return;
+
+ curr = host->plat->gpio_data;
+ for (i = 0; i < curr->size; i++) {
+ if (enable) {
+ rc = gpio_request(curr->gpio[i].no,
+ curr->gpio[i].name);
+ if (rc) {
+ pr_err("%s: gpio_request(%d, %s) failed %d\n",
+ mmc_hostname(host->mmc),
+ curr->gpio[i].no,
+ curr->gpio[i].name, rc);
+ goto free_gpios;
+ }
+ } else {
+ gpio_free(curr->gpio[i].no);
+ }
+ }
+ host->gpio_config_status = enable;
+ return;
+
+free_gpios:
+ for (; i >= 0; i--)
+ gpio_free(curr->gpio[i].no);
+}
+
static void
msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
@@ -953,6 +978,8 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
msmsdcc_enable_clocks(host);
+ spin_unlock_irqrestore(&host->lock, flags);
+
if (ios->clock) {
if (ios->clock != host->clk_rate) {
rc = clk_set_rate(host->clk, ios->clock);
@@ -979,9 +1006,11 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
switch (ios->power_mode) {
case MMC_POWER_OFF:
+ msmsdcc_setup_gpio(host, false);
break;
case MMC_POWER_UP:
pwr |= MCI_PWR_UP;
+ msmsdcc_setup_gpio(host, true);
break;
case MMC_POWER_ON:
pwr |= MCI_PWR_ON;
@@ -998,9 +1027,10 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
msmsdcc_writel(host, pwr, MMCIPOWER);
}
#if BUSCLK_PWRSAVE
+ spin_lock_irqsave(&host->lock, flags);
msmsdcc_disable_clocks(host, 1);
-#endif
spin_unlock_irqrestore(&host->lock, flags);
+#endif
}
static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable)
diff --git a/drivers/mmc/host/msm_sdcc.h b/drivers/mmc/host/msm_sdcc.h
index 939557af266d..42d7bbc977c5 100644
--- a/drivers/mmc/host/msm_sdcc.h
+++ b/drivers/mmc/host/msm_sdcc.h
@@ -243,6 +243,7 @@ struct msmsdcc_host {
unsigned int cmd_datactrl;
struct mmc_command *cmd_cmd;
u32 cmd_c;
+ bool gpio_config_status;
bool prog_scan;
bool prog_enable;
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 4428594261c5..cc20e0259325 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -32,16 +32,14 @@
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
+#include <linux/dmaengine.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/sizes.h>
#include <mach/mmc.h>
-#ifdef CONFIG_ARCH_MX2
-#include <mach/dma-mx1-mx2.h>
-#define HAS_DMA
-#endif
+#include <mach/dma.h>
#define DRIVER_NAME "mxc-mmc"
@@ -118,7 +116,8 @@ struct mxcmci_host {
void __iomem *base;
int irq;
int detect_irq;
- int dma;
+ struct dma_chan *dma;
+ struct dma_async_tx_descriptor *desc;
int do_dma;
int default_irq_mask;
int use_sdio;
@@ -129,7 +128,6 @@ struct mxcmci_host {
struct mmc_command *cmd;
struct mmc_data *data;
- unsigned int dma_nents;
unsigned int datasize;
unsigned int dma_dir;
@@ -144,6 +142,11 @@ struct mxcmci_host {
spinlock_t lock;
struct regulator *vcc;
+
+ int burstlen;
+ int dmareq;
+ struct dma_slave_config dma_slave_config;
+ struct imx_dma_data dma_data;
};
static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
@@ -206,17 +209,16 @@ static void mxcmci_softreset(struct mxcmci_host *host)
writew(0xff, host->base + MMC_REG_RES_TO);
}
+static int mxcmci_setup_dma(struct mmc_host *mmc);
static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
{
unsigned int nob = data->blocks;
unsigned int blksz = data->blksz;
unsigned int datasize = nob * blksz;
-#ifdef HAS_DMA
struct scatterlist *sg;
- int i;
- int ret;
-#endif
+ int i, nents;
+
if (data->flags & MMC_DATA_STREAM)
nob = 0xffff;
@@ -227,7 +229,9 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
writew(blksz, host->base + MMC_REG_BLK_LEN);
host->datasize = datasize;
-#ifdef HAS_DMA
+ if (!mxcmci_use_dma(host))
+ return 0;
+
for_each_sg(data->sg, sg, data->sg_len, i) {
if (sg->offset & 3 || sg->length & 3) {
host->do_dma = 0;
@@ -235,34 +239,30 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
}
}
- if (data->flags & MMC_DATA_READ) {
+ if (data->flags & MMC_DATA_READ)
host->dma_dir = DMA_FROM_DEVICE;
- host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len, host->dma_dir);
-
- ret = imx_dma_setup_sg(host->dma, data->sg, host->dma_nents,
- datasize,
- host->res->start + MMC_REG_BUFFER_ACCESS,
- DMA_MODE_READ);
- } else {
+ else
host->dma_dir = DMA_TO_DEVICE;
- host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len, host->dma_dir);
- ret = imx_dma_setup_sg(host->dma, data->sg, host->dma_nents,
- datasize,
- host->res->start + MMC_REG_BUFFER_ACCESS,
- DMA_MODE_WRITE);
- }
+ nents = dma_map_sg(host->dma->device->dev, data->sg,
+ data->sg_len, host->dma_dir);
+ if (nents != data->sg_len)
+ return -EINVAL;
+
+ host->desc = host->dma->device->device_prep_slave_sg(host->dma,
+ data->sg, data->sg_len, host->dma_dir,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (ret) {
- dev_err(mmc_dev(host->mmc), "failed to setup DMA : %d\n", ret);
- return ret;
+ if (!host->desc) {
+ dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
+ host->dma_dir);
+ host->do_dma = 0;
+ return 0; /* Fall back to PIO */
}
wmb();
- imx_dma_enable(host->dma);
-#endif /* HAS_DMA */
+ dmaengine_submit(host->desc);
+
return 0;
}
@@ -337,13 +337,11 @@ static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat)
struct mmc_data *data = host->data;
int data_error;
-#ifdef HAS_DMA
if (mxcmci_use_dma(host)) {
- imx_dma_disable(host->dma);
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_nents,
+ dmaengine_terminate_all(host->dma);
+ dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
host->dma_dir);
}
-#endif
if (stat & STATUS_ERR_MASK) {
dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",
@@ -545,7 +543,6 @@ static void mxcmci_datawork(struct work_struct *work)
}
}
-#ifdef HAS_DMA
static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat)
{
struct mmc_data *data = host->data;
@@ -568,7 +565,6 @@ static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat)
mxcmci_finish_request(host, host->req);
}
}
-#endif /* HAS_DMA */
static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat)
{
@@ -606,12 +602,10 @@ static irqreturn_t mxcmci_irq(int irq, void *devid)
sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio;
spin_unlock_irqrestore(&host->lock, flags);
-#ifdef HAS_DMA
if (mxcmci_use_dma(host) &&
(stat & (STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE)))
writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
host->base + MMC_REG_STATUS);
-#endif
if (sdio_irq) {
writel(STATUS_SDIO_INT_ACTIVE, host->base + MMC_REG_STATUS);
@@ -621,14 +615,14 @@ static irqreturn_t mxcmci_irq(int irq, void *devid)
if (stat & STATUS_END_CMD_RESP)
mxcmci_cmd_done(host, stat);
-#ifdef HAS_DMA
if (mxcmci_use_dma(host) &&
(stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE)))
mxcmci_data_done(host, stat);
-#endif
+
if (host->default_irq_mask &&
(stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL)))
mmc_detect_change(host->mmc, msecs_to_jiffies(200));
+
return IRQ_HANDLED;
}
@@ -642,9 +636,10 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
host->req = req;
host->cmdat &= ~CMD_DAT_CONT_INIT;
-#ifdef HAS_DMA
- host->do_dma = 1;
-#endif
+
+ if (host->dma)
+ host->do_dma = 1;
+
if (req->data) {
error = mxcmci_setup_data(host, req->data);
if (error) {
@@ -660,6 +655,7 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
}
error = mxcmci_start_cmd(host, req->cmd, cmdat);
+
out:
if (error)
mxcmci_finish_request(host, req);
@@ -698,22 +694,46 @@ static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios)
prescaler, divider, clk_in, clk_ios);
}
+static int mxcmci_setup_dma(struct mmc_host *mmc)
+{
+ struct mxcmci_host *host = mmc_priv(mmc);
+ struct dma_slave_config *config = &host->dma_slave_config;
+
+ config->dst_addr = host->res->start + MMC_REG_BUFFER_ACCESS;
+ config->src_addr = host->res->start + MMC_REG_BUFFER_ACCESS;
+ config->dst_addr_width = 4;
+ config->src_addr_width = 4;
+ config->dst_maxburst = host->burstlen;
+ config->src_maxburst = host->burstlen;
+
+ return dmaengine_slave_config(host->dma, config);
+}
+
static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct mxcmci_host *host = mmc_priv(mmc);
-#ifdef HAS_DMA
- unsigned int blen;
+ int burstlen, ret;
+
/*
* use burstlen of 64 in 4 bit mode (--> reg value 0)
* use burstlen of 16 in 1 bit mode (--> reg value 16)
*/
if (ios->bus_width == MMC_BUS_WIDTH_4)
- blen = 0;
+ burstlen = 64;
else
- blen = 16;
+ burstlen = 16;
+
+ if (mxcmci_use_dma(host) && burstlen != host->burstlen) {
+ host->burstlen = burstlen;
+ ret = mxcmci_setup_dma(mmc);
+ if (ret) {
+ dev_err(mmc_dev(host->mmc),
+ "failed to config DMA channel. Falling back to PIO\n");
+ dma_release_channel(host->dma);
+ host->do_dma = 0;
+ }
+ }
- imx_dma_config_burstlen(host->dma, blen);
-#endif
if (ios->bus_width == MMC_BUS_WIDTH_4)
host->cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;
else
@@ -794,6 +814,18 @@ static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card)
host->caps |= MMC_CAP_4_BIT_DATA;
}
+static bool filter(struct dma_chan *chan, void *param)
+{
+ struct mxcmci_host *host = param;
+
+ if (!imx_dma_is_general_purpose(chan))
+ return false;
+
+ chan->private = &host->dma_data;
+
+ return true;
+}
+
static const struct mmc_host_ops mxcmci_ops = {
.request = mxcmci_request,
.set_ios = mxcmci_set_ios,
@@ -808,6 +840,7 @@ static int mxcmci_probe(struct platform_device *pdev)
struct mxcmci_host *host = NULL;
struct resource *iores, *r;
int ret = 0, irq;
+ dma_cap_mask_t mask;
printk(KERN_INFO "i.MX SDHC driver\n");
@@ -883,29 +916,23 @@ static int mxcmci_probe(struct platform_device *pdev)
writel(host->default_irq_mask, host->base + MMC_REG_INT_CNTR);
-#ifdef HAS_DMA
- host->dma = imx_dma_request_by_prio(DRIVER_NAME, DMA_PRIO_LOW);
- if (host->dma < 0) {
- dev_err(mmc_dev(host->mmc), "imx_dma_request_by_prio failed\n");
- ret = -EBUSY;
- goto out_clk_put;
- }
-
r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (!r) {
- ret = -EINVAL;
- goto out_free_dma;
+ if (r) {
+ host->dmareq = r->start;
+ host->dma_data.peripheral_type = IMX_DMATYPE_SDHC;
+ host->dma_data.priority = DMA_PRIO_LOW;
+ host->dma_data.dma_request = host->dmareq;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ host->dma = dma_request_channel(mask, filter, host);
+ if (host->dma)
+ mmc->max_seg_size = dma_get_max_seg_size(
+ host->dma->device->dev);
}
- ret = imx_dma_config_channel(host->dma,
- IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_FIFO,
- IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
- r->start, 0);
- if (ret) {
- dev_err(mmc_dev(host->mmc), "failed to config DMA channel\n");
- goto out_free_dma;
- }
-#endif
+ if (!host->dma)
+ dev_info(mmc_dev(host->mmc), "dma not available. Using PIO\n");
+
INIT_WORK(&host->datawork, mxcmci_datawork);
ret = request_irq(host->irq, mxcmci_irq, 0, DRIVER_NAME, host);
@@ -928,9 +955,8 @@ static int mxcmci_probe(struct platform_device *pdev)
out_free_irq:
free_irq(host->irq, host);
out_free_dma:
-#ifdef HAS_DMA
- imx_dma_free(host->dma);
-#endif
+ if (host->dma)
+ dma_release_channel(host->dma);
out_clk_put:
clk_disable(host->clk);
clk_put(host->clk);
@@ -960,9 +986,10 @@ static int mxcmci_remove(struct platform_device *pdev)
free_irq(host->irq, host);
iounmap(host->base);
-#ifdef HAS_DMA
- imx_dma_free(host->dma);
-#endif
+
+ if (host->dma)
+ dma_release_channel(host->dma);
+
clk_disable(host->clk);
clk_put(host->clk);
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
new file mode 100644
index 000000000000..99d39a6a1032
--- /dev/null
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -0,0 +1,874 @@
+/*
+ * Portions copyright (C) 2003 Russell King, PXA MMCI Driver
+ * Portions copyright (C) 2004-2005 Pierre Ossman, W83L51xD SD/MMC driver
+ *
+ * Copyright 2008 Embedded Alley Solutions, Inc.
+ * Copyright 2009-2011 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/highmem.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/completion.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sdio.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+
+#include <mach/mxs.h>
+#include <mach/common.h>
+#include <mach/dma.h>
+#include <mach/mmc.h>
+
+#define DRIVER_NAME "mxs-mmc"
+
+/* card detect polling timeout */
+#define MXS_MMC_DETECT_TIMEOUT (HZ/2)
+
+#define SSP_VERSION_LATEST 4
+#define ssp_is_old() (host->version < SSP_VERSION_LATEST)
+
+/* SSP registers */
+#define HW_SSP_CTRL0 0x000
+#define BM_SSP_CTRL0_RUN (1 << 29)
+#define BM_SSP_CTRL0_SDIO_IRQ_CHECK (1 << 28)
+#define BM_SSP_CTRL0_IGNORE_CRC (1 << 26)
+#define BM_SSP_CTRL0_READ (1 << 25)
+#define BM_SSP_CTRL0_DATA_XFER (1 << 24)
+#define BP_SSP_CTRL0_BUS_WIDTH (22)
+#define BM_SSP_CTRL0_BUS_WIDTH (0x3 << 22)
+#define BM_SSP_CTRL0_WAIT_FOR_IRQ (1 << 21)
+#define BM_SSP_CTRL0_LONG_RESP (1 << 19)
+#define BM_SSP_CTRL0_GET_RESP (1 << 17)
+#define BM_SSP_CTRL0_ENABLE (1 << 16)
+#define BP_SSP_CTRL0_XFER_COUNT (0)
+#define BM_SSP_CTRL0_XFER_COUNT (0xffff)
+#define HW_SSP_CMD0 0x010
+#define BM_SSP_CMD0_DBL_DATA_RATE_EN (1 << 25)
+#define BM_SSP_CMD0_SLOW_CLKING_EN (1 << 22)
+#define BM_SSP_CMD0_CONT_CLKING_EN (1 << 21)
+#define BM_SSP_CMD0_APPEND_8CYC (1 << 20)
+#define BP_SSP_CMD0_BLOCK_SIZE (16)
+#define BM_SSP_CMD0_BLOCK_SIZE (0xf << 16)
+#define BP_SSP_CMD0_BLOCK_COUNT (8)
+#define BM_SSP_CMD0_BLOCK_COUNT (0xff << 8)
+#define BP_SSP_CMD0_CMD (0)
+#define BM_SSP_CMD0_CMD (0xff)
+#define HW_SSP_CMD1 0x020
+#define HW_SSP_XFER_SIZE 0x030
+#define HW_SSP_BLOCK_SIZE 0x040
+#define BP_SSP_BLOCK_SIZE_BLOCK_COUNT (4)
+#define BM_SSP_BLOCK_SIZE_BLOCK_COUNT (0xffffff << 4)
+#define BP_SSP_BLOCK_SIZE_BLOCK_SIZE (0)
+#define BM_SSP_BLOCK_SIZE_BLOCK_SIZE (0xf)
+#define HW_SSP_TIMING (ssp_is_old() ? 0x050 : 0x070)
+#define BP_SSP_TIMING_TIMEOUT (16)
+#define BM_SSP_TIMING_TIMEOUT (0xffff << 16)
+#define BP_SSP_TIMING_CLOCK_DIVIDE (8)
+#define BM_SSP_TIMING_CLOCK_DIVIDE (0xff << 8)
+#define BP_SSP_TIMING_CLOCK_RATE (0)
+#define BM_SSP_TIMING_CLOCK_RATE (0xff)
+#define HW_SSP_CTRL1 (ssp_is_old() ? 0x060 : 0x080)
+#define BM_SSP_CTRL1_SDIO_IRQ (1 << 31)
+#define BM_SSP_CTRL1_SDIO_IRQ_EN (1 << 30)
+#define BM_SSP_CTRL1_RESP_ERR_IRQ (1 << 29)
+#define BM_SSP_CTRL1_RESP_ERR_IRQ_EN (1 << 28)
+#define BM_SSP_CTRL1_RESP_TIMEOUT_IRQ (1 << 27)
+#define BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN (1 << 26)
+#define BM_SSP_CTRL1_DATA_TIMEOUT_IRQ (1 << 25)
+#define BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN (1 << 24)
+#define BM_SSP_CTRL1_DATA_CRC_IRQ (1 << 23)
+#define BM_SSP_CTRL1_DATA_CRC_IRQ_EN (1 << 22)
+#define BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ (1 << 21)
+#define BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ_EN (1 << 20)
+#define BM_SSP_CTRL1_RECV_TIMEOUT_IRQ (1 << 17)
+#define BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN (1 << 16)
+#define BM_SSP_CTRL1_FIFO_OVERRUN_IRQ (1 << 15)
+#define BM_SSP_CTRL1_FIFO_OVERRUN_IRQ_EN (1 << 14)
+#define BM_SSP_CTRL1_DMA_ENABLE (1 << 13)
+#define BM_SSP_CTRL1_POLARITY (1 << 9)
+#define BP_SSP_CTRL1_WORD_LENGTH (4)
+#define BM_SSP_CTRL1_WORD_LENGTH (0xf << 4)
+#define BP_SSP_CTRL1_SSP_MODE (0)
+#define BM_SSP_CTRL1_SSP_MODE (0xf)
+#define HW_SSP_SDRESP0 (ssp_is_old() ? 0x080 : 0x0a0)
+#define HW_SSP_SDRESP1 (ssp_is_old() ? 0x090 : 0x0b0)
+#define HW_SSP_SDRESP2 (ssp_is_old() ? 0x0a0 : 0x0c0)
+#define HW_SSP_SDRESP3 (ssp_is_old() ? 0x0b0 : 0x0d0)
+#define HW_SSP_STATUS (ssp_is_old() ? 0x0c0 : 0x100)
+#define BM_SSP_STATUS_CARD_DETECT (1 << 28)
+#define BM_SSP_STATUS_SDIO_IRQ (1 << 17)
+#define HW_SSP_VERSION (cpu_is_mx23() ? 0x110 : 0x130)
+#define BP_SSP_VERSION_MAJOR (24)
+
+#define BF_SSP(value, field) (((value) << BP_SSP_##field) & BM_SSP_##field)
+
+#define MXS_MMC_IRQ_BITS (BM_SSP_CTRL1_SDIO_IRQ | \
+ BM_SSP_CTRL1_RESP_ERR_IRQ | \
+ BM_SSP_CTRL1_RESP_TIMEOUT_IRQ | \
+ BM_SSP_CTRL1_DATA_TIMEOUT_IRQ | \
+ BM_SSP_CTRL1_DATA_CRC_IRQ | \
+ BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ | \
+ BM_SSP_CTRL1_RECV_TIMEOUT_IRQ | \
+ BM_SSP_CTRL1_FIFO_OVERRUN_IRQ)
+
+#define SSP_PIO_NUM 3
+
+struct mxs_mmc_host {
+ struct mmc_host *mmc;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+
+ void __iomem *base;
+ int irq;
+ struct resource *res;
+ struct resource *dma_res;
+ struct clk *clk;
+ unsigned int clk_rate;
+
+ struct dma_chan *dmach;
+ struct mxs_dma_data dma_data;
+ unsigned int dma_dir;
+ u32 ssp_pio_words[SSP_PIO_NUM];
+
+ unsigned int version;
+ unsigned char bus_width;
+ spinlock_t lock;
+ int sdio_irq_en;
+};
+
+static int mxs_mmc_get_ro(struct mmc_host *mmc)
+{
+ struct mxs_mmc_host *host = mmc_priv(mmc);
+ struct mxs_mmc_platform_data *pdata =
+ mmc_dev(host->mmc)->platform_data;
+
+ if (!pdata)
+ return -EFAULT;
+
+ if (!gpio_is_valid(pdata->wp_gpio))
+ return -EINVAL;
+
+ return gpio_get_value(pdata->wp_gpio);
+}
+
+static int mxs_mmc_get_cd(struct mmc_host *mmc)
+{
+ struct mxs_mmc_host *host = mmc_priv(mmc);
+
+ return !(readl(host->base + HW_SSP_STATUS) &
+ BM_SSP_STATUS_CARD_DETECT);
+}
+
+static void mxs_mmc_reset(struct mxs_mmc_host *host)
+{
+ u32 ctrl0, ctrl1;
+
+ mxs_reset_block(host->base);
+
+ ctrl0 = BM_SSP_CTRL0_IGNORE_CRC;
+ ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) |
+ BF_SSP(0x7, CTRL1_WORD_LENGTH) |
+ BM_SSP_CTRL1_DMA_ENABLE |
+ BM_SSP_CTRL1_POLARITY |
+ BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN |
+ BM_SSP_CTRL1_DATA_CRC_IRQ_EN |
+ BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN |
+ BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN |
+ BM_SSP_CTRL1_RESP_ERR_IRQ_EN;
+
+ writel(BF_SSP(0xffff, TIMING_TIMEOUT) |
+ BF_SSP(2, TIMING_CLOCK_DIVIDE) |
+ BF_SSP(0, TIMING_CLOCK_RATE),
+ host->base + HW_SSP_TIMING);
+
+ if (host->sdio_irq_en) {
+ ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
+ ctrl1 |= BM_SSP_CTRL1_SDIO_IRQ_EN;
+ }
+
+ writel(ctrl0, host->base + HW_SSP_CTRL0);
+ writel(ctrl1, host->base + HW_SSP_CTRL1);
+}
+
+static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
+ struct mmc_command *cmd);
+
+static void mxs_mmc_request_done(struct mxs_mmc_host *host)
+{
+ struct mmc_command *cmd = host->cmd;
+ struct mmc_data *data = host->data;
+ struct mmc_request *mrq = host->mrq;
+
+ if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
+ if (mmc_resp_type(cmd) & MMC_RSP_136) {
+ cmd->resp[3] = readl(host->base + HW_SSP_SDRESP0);
+ cmd->resp[2] = readl(host->base + HW_SSP_SDRESP1);
+ cmd->resp[1] = readl(host->base + HW_SSP_SDRESP2);
+ cmd->resp[0] = readl(host->base + HW_SSP_SDRESP3);
+ } else {
+ cmd->resp[0] = readl(host->base + HW_SSP_SDRESP0);
+ }
+ }
+
+ if (data) {
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len, host->dma_dir);
+ /*
+ * If there was an error on any block, we mark all
+ * data blocks as being in error.
+ */
+ if (!data->error)
+ data->bytes_xfered = data->blocks * data->blksz;
+ else
+ data->bytes_xfered = 0;
+
+ host->data = NULL;
+ if (mrq->stop) {
+ mxs_mmc_start_cmd(host, mrq->stop);
+ return;
+ }
+ }
+
+ host->mrq = NULL;
+ mmc_request_done(host->mmc, mrq);
+}
+
+static void mxs_mmc_dma_irq_callback(void *param)
+{
+ struct mxs_mmc_host *host = param;
+
+ mxs_mmc_request_done(host);
+}
+
+static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
+{
+ struct mxs_mmc_host *host = dev_id;
+ struct mmc_command *cmd = host->cmd;
+ struct mmc_data *data = host->data;
+ u32 stat;
+
+ spin_lock(&host->lock);
+
+ stat = readl(host->base + HW_SSP_CTRL1);
+ writel(stat & MXS_MMC_IRQ_BITS,
+ host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR);
+
+ if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
+ mmc_signal_sdio_irq(host->mmc);
+
+ spin_unlock(&host->lock);
+
+ if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ)
+ cmd->error = -ETIMEDOUT;
+ else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ)
+ cmd->error = -EIO;
+
+ if (data) {
+ if (stat & (BM_SSP_CTRL1_DATA_TIMEOUT_IRQ |
+ BM_SSP_CTRL1_RECV_TIMEOUT_IRQ))
+ data->error = -ETIMEDOUT;
+ else if (stat & BM_SSP_CTRL1_DATA_CRC_IRQ)
+ data->error = -EILSEQ;
+ else if (stat & (BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ |
+ BM_SSP_CTRL1_FIFO_OVERRUN_IRQ))
+ data->error = -EIO;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
+ struct mxs_mmc_host *host, unsigned int append)
+{
+ struct dma_async_tx_descriptor *desc;
+ struct mmc_data *data = host->data;
+ struct scatterlist * sgl;
+ unsigned int sg_len;
+
+ if (data) {
+ /* data */
+ dma_map_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len, host->dma_dir);
+ sgl = data->sg;
+ sg_len = data->sg_len;
+ } else {
+ /* pio */
+ sgl = (struct scatterlist *) host->ssp_pio_words;
+ sg_len = SSP_PIO_NUM;
+ }
+
+ desc = host->dmach->device->device_prep_slave_sg(host->dmach,
+ sgl, sg_len, host->dma_dir, append);
+ if (desc) {
+ desc->callback = mxs_mmc_dma_irq_callback;
+ desc->callback_param = host;
+ } else {
+ if (data)
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len, host->dma_dir);
+ }
+
+ return desc;
+}
+
+static void mxs_mmc_bc(struct mxs_mmc_host *host)
+{
+ struct mmc_command *cmd = host->cmd;
+ struct dma_async_tx_descriptor *desc;
+ u32 ctrl0, cmd0, cmd1;
+
+ ctrl0 = BM_SSP_CTRL0_ENABLE | BM_SSP_CTRL0_IGNORE_CRC;
+ cmd0 = BF_SSP(cmd->opcode, CMD0_CMD) | BM_SSP_CMD0_APPEND_8CYC;
+ cmd1 = cmd->arg;
+
+ if (host->sdio_irq_en) {
+ ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
+ cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
+ }
+
+ host->ssp_pio_words[0] = ctrl0;
+ host->ssp_pio_words[1] = cmd0;
+ host->ssp_pio_words[2] = cmd1;
+ host->dma_dir = DMA_NONE;
+ desc = mxs_mmc_prep_dma(host, 0);
+ if (!desc)
+ goto out;
+
+ dmaengine_submit(desc);
+ return;
+
+out:
+ dev_warn(mmc_dev(host->mmc),
+ "%s: failed to prep dma\n", __func__);
+}
+
+static void mxs_mmc_ac(struct mxs_mmc_host *host)
+{
+ struct mmc_command *cmd = host->cmd;
+ struct dma_async_tx_descriptor *desc;
+ u32 ignore_crc, get_resp, long_resp;
+ u32 ctrl0, cmd0, cmd1;
+
+ ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
+ 0 : BM_SSP_CTRL0_IGNORE_CRC;
+ get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
+ BM_SSP_CTRL0_GET_RESP : 0;
+ long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
+ BM_SSP_CTRL0_LONG_RESP : 0;
+
+ ctrl0 = BM_SSP_CTRL0_ENABLE | ignore_crc | get_resp | long_resp;
+ cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
+ cmd1 = cmd->arg;
+
+ if (host->sdio_irq_en) {
+ ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
+ cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
+ }
+
+ host->ssp_pio_words[0] = ctrl0;
+ host->ssp_pio_words[1] = cmd0;
+ host->ssp_pio_words[2] = cmd1;
+ host->dma_dir = DMA_NONE;
+ desc = mxs_mmc_prep_dma(host, 0);
+ if (!desc)
+ goto out;
+
+ dmaengine_submit(desc);
+ return;
+
+out:
+ dev_warn(mmc_dev(host->mmc),
+ "%s: failed to prep dma\n", __func__);
+}
+
+static unsigned short mxs_ns_to_ssp_ticks(unsigned clock_rate, unsigned ns)
+{
+ const unsigned int ssp_timeout_mul = 4096;
+ /*
+ * Calculate ticks in ms since ns are large numbers
+ * and might overflow
+ */
+ const unsigned int clock_per_ms = clock_rate / 1000;
+ const unsigned int ms = ns / 1000;
+ const unsigned int ticks = ms * clock_per_ms;
+ const unsigned int ssp_ticks = ticks / ssp_timeout_mul;
+
+ WARN_ON(ssp_ticks == 0);
+ return ssp_ticks;
+}
+
+static void mxs_mmc_adtc(struct mxs_mmc_host *host)
+{
+ struct mmc_command *cmd = host->cmd;
+ struct mmc_data *data = cmd->data;
+ struct dma_async_tx_descriptor *desc;
+ struct scatterlist *sgl = data->sg, *sg;
+ unsigned int sg_len = data->sg_len;
+ int i;
+
+ unsigned short dma_data_dir, timeout;
+ unsigned int data_size = 0, log2_blksz;
+ unsigned int blocks = data->blocks;
+
+ u32 ignore_crc, get_resp, long_resp, read;
+ u32 ctrl0, cmd0, cmd1, val;
+
+ ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
+ 0 : BM_SSP_CTRL0_IGNORE_CRC;
+ get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
+ BM_SSP_CTRL0_GET_RESP : 0;
+ long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
+ BM_SSP_CTRL0_LONG_RESP : 0;
+
+ if (data->flags & MMC_DATA_WRITE) {
+ dma_data_dir = DMA_TO_DEVICE;
+ read = 0;
+ } else {
+ dma_data_dir = DMA_FROM_DEVICE;
+ read = BM_SSP_CTRL0_READ;
+ }
+
+ ctrl0 = BF_SSP(host->bus_width, CTRL0_BUS_WIDTH) |
+ ignore_crc | get_resp | long_resp |
+ BM_SSP_CTRL0_DATA_XFER | read |
+ BM_SSP_CTRL0_WAIT_FOR_IRQ |
+ BM_SSP_CTRL0_ENABLE;
+
+ cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
+
+ /* get logarithm to base 2 of block size for setting register */
+ log2_blksz = ilog2(data->blksz);
+
+ /*
+ * take special care of the case that data size from data->sg
+ * is not equal to blocks x blksz
+ */
+ for_each_sg(sgl, sg, sg_len, i)
+ data_size += sg->length;
+
+ if (data_size != data->blocks * data->blksz)
+ blocks = 1;
+
+ /* xfer count, block size and count need to be set differently */
+ if (ssp_is_old()) {
+ ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT);
+ cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) |
+ BF_SSP(blocks - 1, CMD0_BLOCK_COUNT);
+ } else {
+ writel(data_size, host->base + HW_SSP_XFER_SIZE);
+ writel(BF_SSP(log2_blksz, BLOCK_SIZE_BLOCK_SIZE) |
+ BF_SSP(blocks - 1, BLOCK_SIZE_BLOCK_COUNT),
+ host->base + HW_SSP_BLOCK_SIZE);
+ }
+
+ if ((cmd->opcode == MMC_STOP_TRANSMISSION) ||
+ (cmd->opcode == SD_IO_RW_EXTENDED))
+ cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
+
+ cmd1 = cmd->arg;
+
+ if (host->sdio_irq_en) {
+ ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
+ cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
+ }
+
+ /* set the timeout count */
+ timeout = mxs_ns_to_ssp_ticks(host->clk_rate, data->timeout_ns);
+ val = readl(host->base + HW_SSP_TIMING);
+ val &= ~(BM_SSP_TIMING_TIMEOUT);
+ val |= BF_SSP(timeout, TIMING_TIMEOUT);
+ writel(val, host->base + HW_SSP_TIMING);
+
+ /* pio */
+ host->ssp_pio_words[0] = ctrl0;
+ host->ssp_pio_words[1] = cmd0;
+ host->ssp_pio_words[2] = cmd1;
+ host->dma_dir = DMA_NONE;
+ desc = mxs_mmc_prep_dma(host, 0);
+ if (!desc)
+ goto out;
+
+ /* append data sg */
+ WARN_ON(host->data != NULL);
+ host->data = data;
+ host->dma_dir = dma_data_dir;
+ desc = mxs_mmc_prep_dma(host, 1);
+ if (!desc)
+ goto out;
+
+ dmaengine_submit(desc);
+ return;
+out:
+ dev_warn(mmc_dev(host->mmc),
+ "%s: failed to prep dma\n", __func__);
+}
+
+static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
+ struct mmc_command *cmd)
+{
+ host->cmd = cmd;
+
+ switch (mmc_cmd_type(cmd)) {
+ case MMC_CMD_BC:
+ mxs_mmc_bc(host);
+ break;
+ case MMC_CMD_BCR:
+ mxs_mmc_ac(host);
+ break;
+ case MMC_CMD_AC:
+ mxs_mmc_ac(host);
+ break;
+ case MMC_CMD_ADTC:
+ mxs_mmc_adtc(host);
+ break;
+ default:
+ dev_warn(mmc_dev(host->mmc),
+ "%s: unknown MMC command\n", __func__);
+ break;
+ }
+}
+
+static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct mxs_mmc_host *host = mmc_priv(mmc);
+
+ WARN_ON(host->mrq != NULL);
+ host->mrq = mrq;
+ mxs_mmc_start_cmd(host, mrq->cmd);
+}
+
+static void mxs_mmc_set_clk_rate(struct mxs_mmc_host *host, unsigned int rate)
+{
+ unsigned int ssp_rate, bit_rate;
+ u32 div1, div2;
+ u32 val;
+
+ ssp_rate = clk_get_rate(host->clk);
+
+ for (div1 = 2; div1 < 254; div1 += 2) {
+ div2 = ssp_rate / rate / div1;
+ if (div2 < 0x100)
+ break;
+ }
+
+ if (div1 >= 254) {
+ dev_err(mmc_dev(host->mmc),
+ "%s: cannot set clock to %d\n", __func__, rate);
+ return;
+ }
+
+ if (div2 == 0)
+ bit_rate = ssp_rate / div1;
+ else
+ bit_rate = ssp_rate / div1 / div2;
+
+ val = readl(host->base + HW_SSP_TIMING);
+ val &= ~(BM_SSP_TIMING_CLOCK_DIVIDE | BM_SSP_TIMING_CLOCK_RATE);
+ val |= BF_SSP(div1, TIMING_CLOCK_DIVIDE);
+ val |= BF_SSP(div2 - 1, TIMING_CLOCK_RATE);
+ writel(val, host->base + HW_SSP_TIMING);
+
+ host->clk_rate = bit_rate;
+
+ dev_dbg(mmc_dev(host->mmc),
+ "%s: div1 %d, div2 %d, ssp %d, bit %d, rate %d\n",
+ __func__, div1, div2, ssp_rate, bit_rate, rate);
+}
+
+static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct mxs_mmc_host *host = mmc_priv(mmc);
+
+ if (ios->bus_width == MMC_BUS_WIDTH_8)
+ host->bus_width = 2;
+ else if (ios->bus_width == MMC_BUS_WIDTH_4)
+ host->bus_width = 1;
+ else
+ host->bus_width = 0;
+
+ if (ios->clock)
+ mxs_mmc_set_clk_rate(host, ios->clock);
+}
+
+static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct mxs_mmc_host *host = mmc_priv(mmc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ host->sdio_irq_en = enable;
+
+ if (enable) {
+ writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
+ host->base + HW_SSP_CTRL0 + MXS_SET_ADDR);
+ writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
+ host->base + HW_SSP_CTRL1 + MXS_SET_ADDR);
+
+ if (readl(host->base + HW_SSP_STATUS) & BM_SSP_STATUS_SDIO_IRQ)
+ mmc_signal_sdio_irq(host->mmc);
+
+ } else {
+ writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
+ host->base + HW_SSP_CTRL0 + MXS_CLR_ADDR);
+ writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
+ host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR);
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static const struct mmc_host_ops mxs_mmc_ops = {
+ .request = mxs_mmc_request,
+ .get_ro = mxs_mmc_get_ro,
+ .get_cd = mxs_mmc_get_cd,
+ .set_ios = mxs_mmc_set_ios,
+ .enable_sdio_irq = mxs_mmc_enable_sdio_irq,
+};
+
+static bool mxs_mmc_dma_filter(struct dma_chan *chan, void *param)
+{
+ struct mxs_mmc_host *host = param;
+
+ if (!mxs_dma_is_apbh(chan))
+ return false;
+
+ if (chan->chan_id != host->dma_res->start)
+ return false;
+
+ chan->private = &host->dma_data;
+
+ return true;
+}
+
+static int mxs_mmc_probe(struct platform_device *pdev)
+{
+ struct mxs_mmc_host *host;
+ struct mmc_host *mmc;
+ struct resource *iores, *dmares, *r;
+ struct mxs_mmc_platform_data *pdata;
+ int ret = 0, irq_err, irq_dma;
+ dma_cap_mask_t mask;
+
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ irq_err = platform_get_irq(pdev, 0);
+ irq_dma = platform_get_irq(pdev, 1);
+ if (!iores || !dmares || irq_err < 0 || irq_dma < 0)
+ return -EINVAL;
+
+ r = request_mem_region(iores->start, resource_size(iores), pdev->name);
+ if (!r)
+ return -EBUSY;
+
+ mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev);
+ if (!mmc) {
+ ret = -ENOMEM;
+ goto out_release_mem;
+ }
+
+ host = mmc_priv(mmc);
+ host->base = ioremap(r->start, resource_size(r));
+ if (!host->base) {
+ ret = -ENOMEM;
+ goto out_mmc_free;
+ }
+
+ /* only major verion does matter */
+ host->version = readl(host->base + HW_SSP_VERSION) >>
+ BP_SSP_VERSION_MAJOR;
+
+ host->mmc = mmc;
+ host->res = r;
+ host->dma_res = dmares;
+ host->irq = irq_err;
+ host->sdio_irq_en = 0;
+
+ host->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(host->clk)) {
+ ret = PTR_ERR(host->clk);
+ goto out_iounmap;
+ }
+ clk_enable(host->clk);
+
+ mxs_mmc_reset(host);
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ host->dma_data.chan_irq = irq_dma;
+ host->dmach = dma_request_channel(mask, mxs_mmc_dma_filter, host);
+ if (!host->dmach) {
+ dev_err(mmc_dev(host->mmc),
+ "%s: failed to request dma\n", __func__);
+ goto out_clk_put;
+ }
+
+ /* set mmc core parameters */
+ mmc->ops = &mxs_mmc_ops;
+ mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
+ MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL;
+
+ pdata = mmc_dev(host->mmc)->platform_data;
+ if (pdata) {
+ if (pdata->flags & SLOTF_8_BIT_CAPABLE)
+ mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
+ if (pdata->flags & SLOTF_4_BIT_CAPABLE)
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
+ }
+
+ mmc->f_min = 400000;
+ mmc->f_max = 288000000;
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+
+ mmc->max_segs = 52;
+ mmc->max_blk_size = 1 << 0xf;
+ mmc->max_blk_count = (ssp_is_old()) ? 0xff : 0xffffff;
+ mmc->max_req_size = (ssp_is_old()) ? 0xffff : 0xffffffff;
+ mmc->max_seg_size = dma_get_max_seg_size(host->dmach->device->dev);
+
+ platform_set_drvdata(pdev, mmc);
+
+ ret = request_irq(host->irq, mxs_mmc_irq_handler, 0, DRIVER_NAME, host);
+ if (ret)
+ goto out_free_dma;
+
+ spin_lock_init(&host->lock);
+
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto out_free_irq;
+
+ dev_info(mmc_dev(host->mmc), "initialized\n");
+
+ return 0;
+
+out_free_irq:
+ free_irq(host->irq, host);
+out_free_dma:
+ if (host->dmach)
+ dma_release_channel(host->dmach);
+out_clk_put:
+ clk_disable(host->clk);
+ clk_put(host->clk);
+out_iounmap:
+ iounmap(host->base);
+out_mmc_free:
+ mmc_free_host(mmc);
+out_release_mem:
+ release_mem_region(iores->start, resource_size(iores));
+ return ret;
+}
+
+static int mxs_mmc_remove(struct platform_device *pdev)
+{
+ struct mmc_host *mmc = platform_get_drvdata(pdev);
+ struct mxs_mmc_host *host = mmc_priv(mmc);
+ struct resource *res = host->res;
+
+ mmc_remove_host(mmc);
+
+ free_irq(host->irq, host);
+
+ platform_set_drvdata(pdev, NULL);
+
+ if (host->dmach)
+ dma_release_channel(host->dmach);
+
+ clk_disable(host->clk);
+ clk_put(host->clk);
+
+ iounmap(host->base);
+
+ mmc_free_host(mmc);
+
+ release_mem_region(res->start, resource_size(res));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int mxs_mmc_suspend(struct device *dev)
+{
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct mxs_mmc_host *host = mmc_priv(mmc);
+ int ret = 0;
+
+ ret = mmc_suspend_host(mmc);
+
+ clk_disable(host->clk);
+
+ return ret;
+}
+
+static int mxs_mmc_resume(struct device *dev)
+{
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct mxs_mmc_host *host = mmc_priv(mmc);
+ int ret = 0;
+
+ clk_enable(host->clk);
+
+ ret = mmc_resume_host(mmc);
+
+ return ret;
+}
+
+static const struct dev_pm_ops mxs_mmc_pm_ops = {
+ .suspend = mxs_mmc_suspend,
+ .resume = mxs_mmc_resume,
+};
+#endif
+
+static struct platform_driver mxs_mmc_driver = {
+ .probe = mxs_mmc_probe,
+ .remove = mxs_mmc_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &mxs_mmc_pm_ops,
+#endif
+ },
+};
+
+static int __init mxs_mmc_init(void)
+{
+ return platform_driver_register(&mxs_mmc_driver);
+}
+
+static void __exit mxs_mmc_exit(void)
+{
+ platform_driver_unregister(&mxs_mmc_driver);
+}
+
+module_init(mxs_mmc_init);
+module_exit(mxs_mmc_exit);
+
+MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral");
+MODULE_AUTHOR("Freescale Semiconductor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 078fdf11af03..191332b845cc 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -118,7 +118,7 @@
#define MMC_TIMEOUT_MS 20
#define OMAP_MMC_MASTER_CLOCK 96000000
-#define DRIVER_NAME "mmci-omap-hs"
+#define DRIVER_NAME "omap_hsmmc"
/* Timeouts for entering power saving states on inactivity, msec */
#define OMAP_MMC_DISABLED_TIMEOUT 100
@@ -260,7 +260,7 @@ static int omap_hsmmc_1_set_power(struct device *dev, int slot, int power_on,
return ret;
}
-static int omap_hsmmc_23_set_power(struct device *dev, int slot, int power_on,
+static int omap_hsmmc_235_set_power(struct device *dev, int slot, int power_on,
int vdd)
{
struct omap_hsmmc_host *host =
@@ -316,6 +316,12 @@ static int omap_hsmmc_23_set_power(struct device *dev, int slot, int power_on,
return ret;
}
+static int omap_hsmmc_4_set_power(struct device *dev, int slot, int power_on,
+ int vdd)
+{
+ return 0;
+}
+
static int omap_hsmmc_1_set_sleep(struct device *dev, int slot, int sleep,
int vdd, int cardsleep)
{
@@ -326,7 +332,7 @@ static int omap_hsmmc_1_set_sleep(struct device *dev, int slot, int sleep,
return regulator_set_mode(host->vcc, mode);
}
-static int omap_hsmmc_23_set_sleep(struct device *dev, int slot, int sleep,
+static int omap_hsmmc_235_set_sleep(struct device *dev, int slot, int sleep,
int vdd, int cardsleep)
{
struct omap_hsmmc_host *host =
@@ -365,6 +371,12 @@ static int omap_hsmmc_23_set_sleep(struct device *dev, int slot, int sleep,
return regulator_enable(host->vcc_aux);
}
+static int omap_hsmmc_4_set_sleep(struct device *dev, int slot, int sleep,
+ int vdd, int cardsleep)
+{
+ return 0;
+}
+
static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
{
struct regulator *reg;
@@ -379,10 +391,14 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
break;
case OMAP_MMC2_DEVID:
case OMAP_MMC3_DEVID:
+ case OMAP_MMC5_DEVID:
/* Off-chip level shifting, or none */
- mmc_slot(host).set_power = omap_hsmmc_23_set_power;
- mmc_slot(host).set_sleep = omap_hsmmc_23_set_sleep;
+ mmc_slot(host).set_power = omap_hsmmc_235_set_power;
+ mmc_slot(host).set_sleep = omap_hsmmc_235_set_sleep;
break;
+ case OMAP_MMC4_DEVID:
+ mmc_slot(host).set_power = omap_hsmmc_4_set_power;
+ mmc_slot(host).set_sleep = omap_hsmmc_4_set_sleep;
default:
pr_err("MMC%d configuration not supported!\n", host->id);
return -EINVAL;
@@ -1555,7 +1571,7 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
break;
}
- if (host->id == OMAP_MMC1_DEVID) {
+ if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
/* Only MMC1 can interface at 3V without some flavor
* of external transceiver; but they all handle 1.8V.
*/
@@ -1647,7 +1663,7 @@ static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host)
u32 hctl, capa, value;
/* Only MMC1 supports 3.0V */
- if (host->id == OMAP_MMC1_DEVID) {
+ if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
hctl = SDVS30;
capa = VS30 | VS18;
} else {
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 9b82910b9dbb..3b5248567973 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -15,9 +15,11 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/clk.h>
+#include <linux/gpio.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sdhci-pltfm.h>
#include <mach/hardware.h>
+#include <mach/esdhc.h>
#include "sdhci.h"
#include "sdhci-pltfm.h"
#include "sdhci-esdhc.h"
@@ -30,6 +32,39 @@ static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, i
writel(((readl(base) & ~(mask << shift)) | (val << shift)), base);
}
+static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
+{
+ /* fake CARD_PRESENT flag on mx25/35 */
+ u32 val = readl(host->ioaddr + reg);
+
+ if (unlikely(reg == SDHCI_PRESENT_STATE)) {
+ struct esdhc_platform_data *boarddata =
+ host->mmc->parent->platform_data;
+
+ if (boarddata && gpio_is_valid(boarddata->cd_gpio)
+ && gpio_get_value(boarddata->cd_gpio))
+ /* no card, if a valid gpio says so... */
+ val &= SDHCI_CARD_PRESENT;
+ else
+ /* ... in all other cases assume card is present */
+ val |= SDHCI_CARD_PRESENT;
+ }
+
+ return val;
+}
+
+static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
+{
+ if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE))
+ /*
+ * these interrupts won't work with a custom card_detect gpio
+ * (only applied to mx25/35)
+ */
+ val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
+
+ writel(val, host->ioaddr + reg);
+}
+
static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
{
if (unlikely(reg == SDHCI_HOST_VERSION))
@@ -100,10 +135,39 @@ static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host)
return clk_get_rate(pltfm_host->clk) / 256 / 16;
}
+static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
+{
+ struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data;
+
+ if (boarddata && gpio_is_valid(boarddata->wp_gpio))
+ return gpio_get_value(boarddata->wp_gpio);
+ else
+ return -ENOSYS;
+}
+
+static struct sdhci_ops sdhci_esdhc_ops = {
+ .read_w = esdhc_readw_le,
+ .write_w = esdhc_writew_le,
+ .write_b = esdhc_writeb_le,
+ .set_clock = esdhc_set_clock,
+ .get_max_clock = esdhc_pltfm_get_max_clock,
+ .get_min_clock = esdhc_pltfm_get_min_clock,
+};
+
+static irqreturn_t cd_irq(int irq, void *data)
+{
+ struct sdhci_host *sdhost = (struct sdhci_host *)data;
+
+ tasklet_schedule(&sdhost->card_tasklet);
+ return IRQ_HANDLED;
+};
+
static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pdata)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data;
struct clk *clk;
+ int err;
clk = clk_get(mmc_dev(host->mmc), NULL);
if (IS_ERR(clk)) {
@@ -116,32 +180,78 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd
if (cpu_is_mx35() || cpu_is_mx51())
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
- /* Fix errata ENGcm07207 which is present on i.MX25 and i.MX35 */
- if (cpu_is_mx25() || cpu_is_mx35())
+ if (cpu_is_mx25() || cpu_is_mx35()) {
+ /* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */
host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK;
+ /* write_protect can't be routed to controller, use gpio */
+ sdhci_esdhc_ops.get_ro = esdhc_pltfm_get_ro;
+ }
+
+ if (boarddata) {
+ err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP");
+ if (err) {
+ dev_warn(mmc_dev(host->mmc),
+ "no write-protect pin available!\n");
+ boarddata->wp_gpio = err;
+ }
+
+ err = gpio_request_one(boarddata->cd_gpio, GPIOF_IN, "ESDHC_CD");
+ if (err) {
+ dev_warn(mmc_dev(host->mmc),
+ "no card-detect pin available!\n");
+ goto no_card_detect_pin;
+ }
+
+ /* i.MX5x has issues to be researched */
+ if (!cpu_is_mx25() && !cpu_is_mx35())
+ goto not_supported;
+
+ err = request_irq(gpio_to_irq(boarddata->cd_gpio), cd_irq,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ mmc_hostname(host->mmc), host);
+ if (err) {
+ dev_warn(mmc_dev(host->mmc), "request irq error\n");
+ goto no_card_detect_irq;
+ }
+
+ sdhci_esdhc_ops.write_l = esdhc_writel_le;
+ sdhci_esdhc_ops.read_l = esdhc_readl_le;
+ /* Now we have a working card_detect again */
+ host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+ }
+
+ return 0;
+ no_card_detect_irq:
+ gpio_free(boarddata->cd_gpio);
+ no_card_detect_pin:
+ boarddata->cd_gpio = err;
+ not_supported:
return 0;
}
static void esdhc_pltfm_exit(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data;
+
+ if (boarddata && gpio_is_valid(boarddata->wp_gpio))
+ gpio_free(boarddata->wp_gpio);
+
+ if (boarddata && gpio_is_valid(boarddata->cd_gpio)) {
+ gpio_free(boarddata->cd_gpio);
+
+ if (!(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION))
+ free_irq(gpio_to_irq(boarddata->cd_gpio), host);
+ }
clk_disable(pltfm_host->clk);
clk_put(pltfm_host->clk);
}
-static struct sdhci_ops sdhci_esdhc_ops = {
- .read_w = esdhc_readw_le,
- .write_w = esdhc_writew_le,
- .write_b = esdhc_writeb_le,
- .set_clock = esdhc_set_clock,
- .get_max_clock = esdhc_pltfm_get_max_clock,
- .get_min_clock = esdhc_pltfm_get_min_clock,
-};
-
struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
- .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_ADMA,
+ .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_ADMA
+ | SDHCI_QUIRK_BROKEN_CARD_DETECTION,
/* ADMA has issues. Might be fixable */
.ops = &sdhci_esdhc_ops,
.init = esdhc_pltfm_init,
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index afaf1bc4913a..c55aae828aac 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -19,7 +19,6 @@
*/
#define ESDHC_DEFAULT_QUIRKS (SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \
- SDHCI_QUIRK_BROKEN_CARD_DETECTION | \
SDHCI_QUIRK_NO_BUSY_IRQ | \
SDHCI_QUIRK_NONSTANDARD_CLOCK | \
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \
diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c
index dd84124f4209..f9b611fc773e 100644
--- a/drivers/mmc/host/sdhci-of-core.c
+++ b/drivers/mmc/host/sdhci-of-core.c
@@ -124,17 +124,20 @@ static bool __devinit sdhci_of_wp_inverted(struct device_node *np)
#endif
}
-static int __devinit sdhci_of_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit sdhci_of_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
- struct sdhci_of_data *sdhci_of_data = match->data;
+ struct sdhci_of_data *sdhci_of_data;
struct sdhci_host *host;
struct sdhci_of_host *of_host;
const __be32 *clk;
int size;
int ret;
+ if (!ofdev->dev.of_match)
+ return -EINVAL;
+ sdhci_of_data = ofdev->dev.of_match->data;
+
if (!of_device_is_available(np))
return -ENODEV;
@@ -217,7 +220,7 @@ static const struct of_device_id sdhci_of_match[] = {
};
MODULE_DEVICE_TABLE(of, sdhci_of_match);
-static struct of_platform_driver sdhci_of_driver = {
+static struct platform_driver sdhci_of_driver = {
.driver = {
.name = "sdhci-of",
.owner = THIS_MODULE,
@@ -231,13 +234,13 @@ static struct of_platform_driver sdhci_of_driver = {
static int __init sdhci_of_init(void)
{
- return of_register_platform_driver(&sdhci_of_driver);
+ return platform_driver_register(&sdhci_of_driver);
}
module_init(sdhci_of_init);
static void __exit sdhci_of_exit(void)
{
- of_unregister_platform_driver(&sdhci_of_driver);
+ platform_driver_unregister(&sdhci_of_driver);
}
module_exit(sdhci_of_exit);
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index fcd0e1fcba44..08161f690ae8 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -73,7 +73,8 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
}
struct sdhci_of_data sdhci_esdhc = {
- .quirks = ESDHC_DEFAULT_QUIRKS,
+ /* card detection could be handled via GPIO */
+ .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION,
.ops = {
.read_l = sdhci_be32bs_readl,
.read_w = esdhc_readw,
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 0dc905b20eee..2f8d46854acd 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -547,6 +547,14 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
},
{
+ .vendor = PCI_VENDOR_ID_RICOH,
+ .device = 0xe823,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_ricoh_mmc,
+ },
+
+ {
.vendor = PCI_VENDOR_ID_ENE,
.device = PCI_DEVICE_ID_ENE_CB712_SD,
.subvendor = PCI_ANY_ID,
@@ -900,9 +908,6 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
{
struct sdhci_pci_slot *slot;
struct sdhci_host *host;
-
- resource_size_t addr;
-
int ret;
if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
@@ -949,7 +954,6 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
goto free;
}
- addr = pci_resource_start(pdev, bar);
host->ioaddr = pci_ioremap_bar(pdev, bar);
if (!host->ioaddr) {
dev_err(&pdev->dev, "failed to remap registers\n");
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 5309ab95aada..69e3ee321eb5 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -499,6 +499,9 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
* SDHCI block, or a missing configuration that needs to be set. */
host->quirks |= SDHCI_QUIRK_NO_BUSY_IRQ;
+ /* This host supports the Auto CMD12 */
+ host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
+
if (pdata->cd_type == S3C_SDHCI_CD_NONE ||
pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 4823ee94a63f..f7e1f964395f 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -169,7 +169,7 @@ static int tegra_sdhci_pltfm_init(struct sdhci_host *host,
if (rc) {
dev_err(mmc_dev(host->mmc),
"failed to allocate wp gpio\n");
- goto out_cd;
+ goto out_irq;
}
tegra_gpio_enable(plat->wp_gpio);
gpio_direction_input(plat->wp_gpio);
@@ -195,6 +195,9 @@ out_wp:
gpio_free(plat->wp_gpio);
}
+out_irq:
+ if (gpio_is_valid(plat->cd_gpio))
+ free_irq(gpio_to_irq(plat->cd_gpio), host);
out_cd:
if (gpio_is_valid(plat->cd_gpio)) {
tegra_gpio_disable(plat->cd_gpio);
@@ -225,6 +228,7 @@ static void tegra_sdhci_pltfm_exit(struct sdhci_host *host)
}
if (gpio_is_valid(plat->cd_gpio)) {
+ free_irq(gpio_to_irq(plat->cd_gpio), host);
tegra_gpio_disable(plat->cd_gpio);
gpio_free(plat->cd_gpio);
}
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 12884c270171..af97015a2fc7 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -169,7 +169,7 @@ struct sh_mmcif_host {
struct dma_chan *chan_rx;
struct dma_chan *chan_tx;
struct completion dma_complete;
- unsigned int dma_sglen;
+ bool dma_active;
};
static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
@@ -194,10 +194,12 @@ static void mmcif_dma_complete(void *arg)
return;
if (host->data->flags & MMC_DATA_READ)
- dma_unmap_sg(&host->pd->dev, host->data->sg, host->dma_sglen,
+ dma_unmap_sg(host->chan_rx->device->dev,
+ host->data->sg, host->data->sg_len,
DMA_FROM_DEVICE);
else
- dma_unmap_sg(&host->pd->dev, host->data->sg, host->dma_sglen,
+ dma_unmap_sg(host->chan_tx->device->dev,
+ host->data->sg, host->data->sg_len,
DMA_TO_DEVICE);
complete(&host->dma_complete);
@@ -211,9 +213,10 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
dma_cookie_t cookie = -EINVAL;
int ret;
- ret = dma_map_sg(&host->pd->dev, sg, host->data->sg_len, DMA_FROM_DEVICE);
+ ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
+ DMA_FROM_DEVICE);
if (ret > 0) {
- host->dma_sglen = ret;
+ host->dma_active = true;
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
}
@@ -221,14 +224,9 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
if (desc) {
desc->callback = mmcif_dma_complete;
desc->callback_param = host;
- cookie = desc->tx_submit(desc);
- if (cookie < 0) {
- desc = NULL;
- ret = cookie;
- } else {
- sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
- chan->device->device_issue_pending(chan);
- }
+ cookie = dmaengine_submit(desc);
+ sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
+ dma_async_issue_pending(chan);
}
dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
__func__, host->data->sg_len, ret, cookie);
@@ -238,7 +236,7 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
if (ret >= 0)
ret = -EIO;
host->chan_rx = NULL;
- host->dma_sglen = 0;
+ host->dma_active = false;
dma_release_channel(chan);
/* Free the Tx channel too */
chan = host->chan_tx;
@@ -263,9 +261,10 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
dma_cookie_t cookie = -EINVAL;
int ret;
- ret = dma_map_sg(&host->pd->dev, sg, host->data->sg_len, DMA_TO_DEVICE);
+ ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
+ DMA_TO_DEVICE);
if (ret > 0) {
- host->dma_sglen = ret;
+ host->dma_active = true;
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
}
@@ -273,14 +272,9 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
if (desc) {
desc->callback = mmcif_dma_complete;
desc->callback_param = host;
- cookie = desc->tx_submit(desc);
- if (cookie < 0) {
- desc = NULL;
- ret = cookie;
- } else {
- sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
- chan->device->device_issue_pending(chan);
- }
+ cookie = dmaengine_submit(desc);
+ sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
+ dma_async_issue_pending(chan);
}
dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
__func__, host->data->sg_len, ret, cookie);
@@ -290,7 +284,7 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
if (ret >= 0)
ret = -EIO;
host->chan_tx = NULL;
- host->dma_sglen = 0;
+ host->dma_active = false;
dma_release_channel(chan);
/* Free the Rx channel too */
chan = host->chan_rx;
@@ -317,7 +311,7 @@ static bool sh_mmcif_filter(struct dma_chan *chan, void *arg)
static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
struct sh_mmcif_plat_data *pdata)
{
- host->dma_sglen = 0;
+ host->dma_active = false;
/* We can only either use DMA for both Tx and Rx or not use it at all */
if (pdata->dma) {
@@ -364,7 +358,7 @@ static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
dma_release_channel(chan);
}
- host->dma_sglen = 0;
+ host->dma_active = false;
}
static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
@@ -753,7 +747,7 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
}
sh_mmcif_get_response(host, cmd);
if (host->data) {
- if (!host->dma_sglen) {
+ if (!host->dma_active) {
ret = sh_mmcif_data_trans(host, mrq, cmd->opcode);
} else {
long time =
@@ -765,7 +759,7 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
ret = time;
sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
- host->dma_sglen = 0;
+ host->dma_active = false;
}
if (ret < 0)
mrq->data->bytes_xfered = 0;
@@ -850,15 +844,15 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct sh_mmcif_host *host = mmc_priv(mmc);
struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
- if (ios->power_mode == MMC_POWER_OFF) {
+ if (ios->power_mode == MMC_POWER_UP) {
+ if (p->set_pwr)
+ p->set_pwr(host->pd, ios->power_mode);
+ } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
/* clock stop */
sh_mmcif_clock_control(host, 0);
- if (p->down_pwr)
+ if (ios->power_mode == MMC_POWER_OFF && p->down_pwr)
p->down_pwr(host->pd);
return;
- } else if (ios->power_mode == MMC_POWER_UP) {
- if (p->set_pwr)
- p->set_pwr(host->pd, ios->power_mode);
}
if (ios->clock)
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index e3c6ef208391..39918cccab68 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -152,7 +152,6 @@ struct tmio_mmc_host {
struct tasklet_struct dma_complete;
struct tasklet_struct dma_issue;
#ifdef CONFIG_TMIO_MMC_DMA
- unsigned int dma_sglen;
u8 bounce_buf[PAGE_CACHE_SIZE] __attribute__((aligned(MAX_ALIGN)));
struct scatterlist bounce_sg;
#endif
@@ -228,36 +227,40 @@ static void tmio_mmc_kunmap_atomic(void *virt, unsigned long *flags)
#ifdef CONFIG_MMC_DEBUG
-#define STATUS_TO_TEXT(a) \
+#define STATUS_TO_TEXT(a, status, i) \
do { \
- if (status & TMIO_STAT_##a) \
+ if (status & TMIO_STAT_##a) { \
+ if (i++) \
+ printk(" | "); \
printk(#a); \
+ } \
} while (0)
void pr_debug_status(u32 status)
{
+ int i = 0;
printk(KERN_DEBUG "status: %08x = ", status);
- STATUS_TO_TEXT(CARD_REMOVE);
- STATUS_TO_TEXT(CARD_INSERT);
- STATUS_TO_TEXT(SIGSTATE);
- STATUS_TO_TEXT(WRPROTECT);
- STATUS_TO_TEXT(CARD_REMOVE_A);
- STATUS_TO_TEXT(CARD_INSERT_A);
- STATUS_TO_TEXT(SIGSTATE_A);
- STATUS_TO_TEXT(CMD_IDX_ERR);
- STATUS_TO_TEXT(STOPBIT_ERR);
- STATUS_TO_TEXT(ILL_FUNC);
- STATUS_TO_TEXT(CMD_BUSY);
- STATUS_TO_TEXT(CMDRESPEND);
- STATUS_TO_TEXT(DATAEND);
- STATUS_TO_TEXT(CRCFAIL);
- STATUS_TO_TEXT(DATATIMEOUT);
- STATUS_TO_TEXT(CMDTIMEOUT);
- STATUS_TO_TEXT(RXOVERFLOW);
- STATUS_TO_TEXT(TXUNDERRUN);
- STATUS_TO_TEXT(RXRDY);
- STATUS_TO_TEXT(TXRQ);
- STATUS_TO_TEXT(ILL_ACCESS);
+ STATUS_TO_TEXT(CARD_REMOVE, status, i);
+ STATUS_TO_TEXT(CARD_INSERT, status, i);
+ STATUS_TO_TEXT(SIGSTATE, status, i);
+ STATUS_TO_TEXT(WRPROTECT, status, i);
+ STATUS_TO_TEXT(CARD_REMOVE_A, status, i);
+ STATUS_TO_TEXT(CARD_INSERT_A, status, i);
+ STATUS_TO_TEXT(SIGSTATE_A, status, i);
+ STATUS_TO_TEXT(CMD_IDX_ERR, status, i);
+ STATUS_TO_TEXT(STOPBIT_ERR, status, i);
+ STATUS_TO_TEXT(ILL_FUNC, status, i);
+ STATUS_TO_TEXT(CMD_BUSY, status, i);
+ STATUS_TO_TEXT(CMDRESPEND, status, i);
+ STATUS_TO_TEXT(DATAEND, status, i);
+ STATUS_TO_TEXT(CRCFAIL, status, i);
+ STATUS_TO_TEXT(DATATIMEOUT, status, i);
+ STATUS_TO_TEXT(CMDTIMEOUT, status, i);
+ STATUS_TO_TEXT(RXOVERFLOW, status, i);
+ STATUS_TO_TEXT(TXUNDERRUN, status, i);
+ STATUS_TO_TEXT(RXRDY, status, i);
+ STATUS_TO_TEXT(TXRQ, status, i);
+ STATUS_TO_TEXT(ILL_ACCESS, status, i);
printk("\n");
}
@@ -300,8 +303,7 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
{
- struct mfd_cell *cell = host->pdev->dev.platform_data;
- struct tmio_mmc_data *pdata = cell->driver_data;
+ struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
/*
* Testing on sh-mobile showed that SDIO IRQs are unmasked when
@@ -324,8 +326,7 @@ static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
{
- struct mfd_cell *cell = host->pdev->dev.platform_data;
- struct tmio_mmc_data *pdata = cell->driver_data;
+ struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
@@ -666,8 +667,7 @@ out:
static irqreturn_t tmio_mmc_irq(int irq, void *devid)
{
struct tmio_mmc_host *host = devid;
- struct mfd_cell *cell = host->pdev->dev.platform_data;
- struct tmio_mmc_data *pdata = cell->driver_data;
+ struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
unsigned int ireg, irq_mask, status;
unsigned int sdio_ireg, sdio_irq_mask, sdio_status;
@@ -796,8 +796,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_rx;
- struct mfd_cell *cell = host->pdev->dev.platform_data;
- struct tmio_mmc_data *pdata = cell->driver_data;
+ struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
dma_cookie_t cookie;
int ret, i;
bool aligned = true, multiple = true;
@@ -825,23 +824,16 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
sg = host->sg_ptr;
}
- ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE);
- if (ret > 0) {
- host->dma_sglen = ret;
+ ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
+ if (ret > 0)
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- }
if (desc) {
desc->callback = tmio_dma_complete;
desc->callback_param = host;
- cookie = desc->tx_submit(desc);
- if (cookie < 0) {
- desc = NULL;
- ret = cookie;
- } else {
- chan->device->device_issue_pending(chan);
- }
+ cookie = dmaengine_submit(desc);
+ dma_async_issue_pending(chan);
}
dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
__func__, host->sg_len, ret, cookie, host->mrq);
@@ -873,8 +865,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_tx;
- struct mfd_cell *cell = host->pdev->dev.platform_data;
- struct tmio_mmc_data *pdata = cell->driver_data;
+ struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
dma_cookie_t cookie;
int ret, i;
bool aligned = true, multiple = true;
@@ -906,21 +897,15 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
sg = host->sg_ptr;
}
- ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE);
- if (ret > 0) {
- host->dma_sglen = ret;
+ ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
+ if (ret > 0)
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- }
if (desc) {
desc->callback = tmio_dma_complete;
desc->callback_param = host;
- cookie = desc->tx_submit(desc);
- if (cookie < 0) {
- desc = NULL;
- ret = cookie;
- }
+ cookie = dmaengine_submit(desc);
}
dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
__func__, host->sg_len, ret, cookie, host->mrq);
@@ -964,7 +949,7 @@ static void tmio_issue_tasklet_fn(unsigned long priv)
struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
struct dma_chan *chan = host->chan_tx;
- chan->device->device_issue_pending(chan);
+ dma_async_issue_pending(chan);
}
static void tmio_tasklet_fn(unsigned long arg)
@@ -978,10 +963,12 @@ static void tmio_tasklet_fn(unsigned long arg)
goto out;
if (host->data->flags & MMC_DATA_READ)
- dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen,
+ dma_unmap_sg(host->chan_rx->device->dev,
+ host->sg_ptr, host->sg_len,
DMA_FROM_DEVICE);
else
- dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen,
+ dma_unmap_sg(host->chan_tx->device->dev,
+ host->sg_ptr, host->sg_len,
DMA_TO_DEVICE);
tmio_mmc_do_data_irq(host);
@@ -1071,8 +1058,7 @@ static void tmio_mmc_release_dma(struct tmio_mmc_host *host)
static int tmio_mmc_start_data(struct tmio_mmc_host *host,
struct mmc_data *data)
{
- struct mfd_cell *cell = host->pdev->dev.platform_data;
- struct tmio_mmc_data *pdata = cell->driver_data;
+ struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
data->blksz, data->blocks);
@@ -1177,8 +1163,7 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
static int tmio_mmc_get_ro(struct mmc_host *mmc)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
- struct mfd_cell *cell = host->pdev->dev.platform_data;
- struct tmio_mmc_data *pdata = cell->driver_data;
+ struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)) ? 0 : 1;
@@ -1187,8 +1172,7 @@ static int tmio_mmc_get_ro(struct mmc_host *mmc)
static int tmio_mmc_get_cd(struct mmc_host *mmc)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
- struct mfd_cell *cell = host->pdev->dev.platform_data;
- struct tmio_mmc_data *pdata = cell->driver_data;
+ struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
if (!pdata->get_cd)
return -ENOSYS;
@@ -1207,7 +1191,7 @@ static const struct mmc_host_ops tmio_mmc_ops = {
#ifdef CONFIG_PM
static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
struct mmc_host *mmc = platform_get_drvdata(dev);
int ret;
@@ -1222,7 +1206,7 @@ static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
static int tmio_mmc_resume(struct platform_device *dev)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
struct mmc_host *mmc = platform_get_drvdata(dev);
int ret = 0;
@@ -1245,7 +1229,7 @@ out:
static int __devinit tmio_mmc_probe(struct platform_device *dev)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
struct tmio_mmc_data *pdata;
struct resource *res_ctl;
struct tmio_mmc_host *host;
@@ -1260,7 +1244,7 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
if (!res_ctl)
goto out;
- pdata = cell->driver_data;
+ pdata = mfd_get_data(dev);
if (!pdata || !pdata->hclk)
goto out;
@@ -1360,7 +1344,7 @@ out:
static int __devexit tmio_mmc_remove(struct platform_device *dev)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
struct mmc_host *mmc = platform_get_drvdata(dev);
platform_set_drvdata(dev, NULL);
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index 9ed84ddb4780..8c5b4881ccd6 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -802,12 +802,9 @@ static const struct mmc_host_ops via_sdc_ops = {
static void via_reset_pcictrl(struct via_crdr_mmc_host *host)
{
- void __iomem *addrbase;
unsigned long flags;
u8 gatt;
- addrbase = host->pcictrl_mmiobase;
-
spin_lock_irqsave(&host->lock, flags);
via_save_pcictrlreg(host);
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 8506578e6a35..3db0cb083d31 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -216,8 +216,7 @@ static void __devinit of_free_probes(const char **probes)
}
#endif
-static int __devinit of_flash_probe(struct platform_device *dev,
- const struct of_device_id *match)
+static int __devinit of_flash_probe(struct platform_device *dev)
{
#ifdef CONFIG_MTD_PARTITIONS
const char **part_probe_types;
@@ -225,7 +224,7 @@ static int __devinit of_flash_probe(struct platform_device *dev,
struct device_node *dp = dev->dev.of_node;
struct resource res;
struct of_flash *info;
- const char *probe_type = match->data;
+ const char *probe_type;
const __be32 *width;
int err;
int i;
@@ -235,6 +234,10 @@ static int __devinit of_flash_probe(struct platform_device *dev,
struct mtd_info **mtd_list = NULL;
resource_size_t res_size;
+ if (!dev->dev.of_match)
+ return -EINVAL;
+ probe_type = dev->dev.of_match->data;
+
reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32);
/*
@@ -418,7 +421,7 @@ static struct of_device_id of_flash_match[] = {
};
MODULE_DEVICE_TABLE(of, of_flash_match);
-static struct of_platform_driver of_flash_driver = {
+static struct platform_driver of_flash_driver = {
.driver = {
.name = "of-flash",
.owner = THIS_MODULE,
@@ -430,12 +433,12 @@ static struct of_platform_driver of_flash_driver = {
static int __init of_flash_init(void)
{
- return of_register_platform_driver(&of_flash_driver);
+ return platform_driver_register(&of_flash_driver);
}
static void __exit of_flash_exit(void)
{
- of_unregister_platform_driver(&of_flash_driver);
+ platform_driver_unregister(&of_flash_driver);
}
module_init(of_flash_init);
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index 3582ba1f9b09..3f1cb328a574 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -108,7 +108,7 @@ int uflash_devinit(struct platform_device *op, struct device_node *dp)
return 0;
}
-static int __devinit uflash_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit uflash_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
@@ -148,7 +148,7 @@ static const struct of_device_id uflash_match[] = {
MODULE_DEVICE_TABLE(of, uflash_match);
-static struct of_platform_driver uflash_driver = {
+static struct platform_driver uflash_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
@@ -160,12 +160,12 @@ static struct of_platform_driver uflash_driver = {
static int __init uflash_init(void)
{
- return of_register_platform_driver(&uflash_driver);
+ return platform_driver_register(&uflash_driver);
}
static void __exit uflash_exit(void)
{
- of_unregister_platform_driver(&uflash_driver);
+ platform_driver_unregister(&uflash_driver);
}
module_init(uflash_init);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index c89592239bc7..178e2006063d 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -106,23 +106,6 @@ config MTD_NAND_OMAP2
help
Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms.
-config MTD_NAND_OMAP_PREFETCH
- bool "GPMC prefetch support for NAND Flash device"
- depends on MTD_NAND_OMAP2
- default y
- help
- The NAND device can be accessed for Read/Write using GPMC PREFETCH engine
- to improve the performance.
-
-config MTD_NAND_OMAP_PREFETCH_DMA
- depends on MTD_NAND_OMAP_PREFETCH
- bool "DMA mode"
- default n
- help
- The GPMC PREFETCH engine can be configured eigther in MPU interrupt mode
- or in DMA interrupt mode.
- Say y for DMA mode or MPU mode will be used
-
config MTD_NAND_IDS
tristate
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index efdcca94ce55..073ee026a17c 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -217,8 +217,7 @@ err:
return ret;
}
-static int __devinit fun_probe(struct platform_device *ofdev,
- const struct of_device_id *ofid)
+static int __devinit fun_probe(struct platform_device *ofdev)
{
struct fsl_upm_nand *fun;
struct resource io_res;
@@ -360,7 +359,7 @@ static const struct of_device_id of_fun_match[] = {
};
MODULE_DEVICE_TABLE(of, of_fun_match);
-static struct of_platform_driver of_fun_driver = {
+static struct platform_driver of_fun_driver = {
.driver = {
.name = "fsl,upm-nand",
.owner = THIS_MODULE,
@@ -372,13 +371,13 @@ static struct of_platform_driver of_fun_driver = {
static int __init fun_module_init(void)
{
- return of_register_platform_driver(&of_fun_driver);
+ return platform_driver_register(&of_fun_driver);
}
module_init(fun_module_init);
static void __exit fun_module_exit(void)
{
- of_unregister_platform_driver(&of_fun_driver);
+ platform_driver_unregister(&of_fun_driver);
}
module_exit(fun_module_exit);
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index 469e649c911c..c2f95437e5e9 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -650,8 +650,7 @@ static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
iounmap(prv->csreg);
}
-static int __devinit mpc5121_nfc_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit mpc5121_nfc_probe(struct platform_device *op)
{
struct device_node *rootnode, *dn = op->dev.of_node;
struct device *dev = &op->dev;
@@ -891,7 +890,7 @@ static struct of_device_id mpc5121_nfc_match[] __devinitdata = {
{},
};
-static struct of_platform_driver mpc5121_nfc_driver = {
+static struct platform_driver mpc5121_nfc_driver = {
.probe = mpc5121_nfc_probe,
.remove = __devexit_p(mpc5121_nfc_remove),
.driver = {
@@ -903,14 +902,14 @@ static struct of_platform_driver mpc5121_nfc_driver = {
static int __init mpc5121_nfc_init(void)
{
- return of_register_platform_driver(&mpc5121_nfc_driver);
+ return platform_driver_register(&mpc5121_nfc_driver);
}
module_init(mpc5121_nfc_init);
static void __exit mpc5121_nfc_cleanup(void)
{
- of_unregister_platform_driver(&mpc5121_nfc_driver);
+ platform_driver_unregister(&mpc5121_nfc_driver);
}
module_exit(mpc5121_nfc_cleanup);
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index ef932ba55a0b..5ae1d9ee2cf1 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -722,9 +722,8 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
/*
* MXC NANDFC can only perform full page+spare or
* spare-only read/write. When the upper layers
- * layers perform a read/write buf operation,
- * we will used the saved column address to index into
- * the full page.
+ * perform a read/write buf operation, the saved column
+ * address is used to index into the full page.
*/
host->send_addr(host, 0, page_addr == -1);
if (mtd->writesize > 512)
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index c9ae0a5023b6..bbe6d451290d 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -225,8 +225,7 @@ err:
return ret;
}
-static int __devinit ndfc_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit ndfc_probe(struct platform_device *ofdev)
{
struct ndfc_controller *ndfc = &ndfc_ctrl;
const __be32 *reg;
@@ -292,7 +291,7 @@ static const struct of_device_id ndfc_match[] = {
};
MODULE_DEVICE_TABLE(of, ndfc_match);
-static struct of_platform_driver ndfc_driver = {
+static struct platform_driver ndfc_driver = {
.driver = {
.name = "ndfc",
.owner = THIS_MODULE,
@@ -304,12 +303,12 @@ static struct of_platform_driver ndfc_driver = {
static int __init ndfc_nand_init(void)
{
- return of_register_platform_driver(&ndfc_driver);
+ return platform_driver_register(&ndfc_driver);
}
static void __exit ndfc_nand_exit(void)
{
- of_unregister_platform_driver(&ndfc_driver);
+ platform_driver_unregister(&ndfc_driver);
}
module_init(ndfc_nand_init);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 15682ec8530e..4e33972ad17a 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -11,6 +11,7 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
+#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/sched.h>
#include <linux/mtd/mtd.h>
@@ -24,6 +25,7 @@
#include <plat/nand.h>
#define DRIVER_NAME "omap2-nand"
+#define OMAP_NAND_TIMEOUT_MS 5000
#define NAND_Ecc_P1e (1 << 0)
#define NAND_Ecc_P2e (1 << 1)
@@ -96,26 +98,19 @@
static const char *part_probes[] = { "cmdlinepart", NULL };
#endif
-#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH
-static int use_prefetch = 1;
-
-/* "modprobe ... use_prefetch=0" etc */
-module_param(use_prefetch, bool, 0);
-MODULE_PARM_DESC(use_prefetch, "enable/disable use of PREFETCH");
-
-#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA
-static int use_dma = 1;
+/* oob info generated runtime depending on ecc algorithm and layout selected */
+static struct nand_ecclayout omap_oobinfo;
+/* Define some generic bad / good block scan pattern which are used
+ * while scanning a device for factory marked good / bad blocks
+ */
+static uint8_t scan_ff_pattern[] = { 0xff };
+static struct nand_bbt_descr bb_descrip_flashbased = {
+ .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
+ .offs = 0,
+ .len = 1,
+ .pattern = scan_ff_pattern,
+};
-/* "modprobe ... use_dma=0" etc */
-module_param(use_dma, bool, 0);
-MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
-#else
-static const int use_dma;
-#endif
-#else
-const int use_prefetch;
-static const int use_dma;
-#endif
struct omap_nand_info {
struct nand_hw_control controller;
@@ -129,6 +124,13 @@ struct omap_nand_info {
unsigned long phys_base;
struct completion comp;
int dma_ch;
+ int gpmc_irq;
+ enum {
+ OMAP_NAND_IO_READ = 0, /* read */
+ OMAP_NAND_IO_WRITE, /* write */
+ } iomode;
+ u_char *buf;
+ int buf_len;
};
/**
@@ -256,7 +258,8 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
}
/* configure and start prefetch transfer */
- ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0);
+ ret = gpmc_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0);
if (ret) {
/* PFPW engine is busy, use cpu copy method */
if (info->nand.options & NAND_BUSWIDTH_16)
@@ -288,9 +291,10 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
{
struct omap_nand_info *info = container_of(mtd,
struct omap_nand_info, mtd);
- uint32_t pref_count = 0, w_count = 0;
+ uint32_t w_count = 0;
int i = 0, ret = 0;
u16 *p;
+ unsigned long tim, limit;
/* take care of subpage writes */
if (len % 2 != 0) {
@@ -300,7 +304,8 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
}
/* configure and start prefetch transfer */
- ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x1);
+ ret = gpmc_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1);
if (ret) {
/* PFPW engine is busy, use cpu copy method */
if (info->nand.options & NAND_BUSWIDTH_16)
@@ -316,15 +321,17 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
iowrite16(*p++, info->nand.IO_ADDR_W);
}
/* wait for data to flushed-out before reset the prefetch */
- do {
- pref_count = gpmc_read_status(GPMC_PREFETCH_COUNT);
- } while (pref_count);
+ tim = 0;
+ limit = (loops_per_jiffy *
+ msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+ while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+ cpu_relax();
+
/* disable and stop the PFPW engine */
gpmc_prefetch_reset(info->gpmc_cs);
}
}
-#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA
/*
* omap_nand_dma_cb: callback on the completion of dma transfer
* @lch: logical channel
@@ -348,14 +355,15 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
{
struct omap_nand_info *info = container_of(mtd,
struct omap_nand_info, mtd);
- uint32_t prefetch_status = 0;
enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
DMA_FROM_DEVICE;
dma_addr_t dma_addr;
int ret;
+ unsigned long tim, limit;
- /* The fifo depth is 64 bytes. We have a sync at each frame and frame
- * length is 64 bytes.
+ /* The fifo depth is 64 bytes max.
+ * But configure the FIFO-threahold to 32 to get a sync at each frame
+ * and frame length is 32 bytes.
*/
int buf_len = len >> 6;
@@ -396,9 +404,10 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
}
/* configure and start prefetch transfer */
- ret = gpmc_prefetch_enable(info->gpmc_cs, 0x1, len, is_write);
+ ret = gpmc_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
if (ret)
- /* PFPW engine is busy, use cpu copy methode */
+ /* PFPW engine is busy, use cpu copy method */
goto out_copy;
init_completion(&info->comp);
@@ -407,10 +416,11 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
/* setup and start DMA using dma_addr */
wait_for_completion(&info->comp);
+ tim = 0;
+ limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+ while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+ cpu_relax();
- do {
- prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT);
- } while (prefetch_status);
/* disable and stop the PFPW engine */
gpmc_prefetch_reset(info->gpmc_cs);
@@ -426,14 +436,6 @@ out_copy:
: omap_write_buf8(mtd, (u_char *) addr, len);
return 0;
}
-#else
-static void omap_nand_dma_cb(int lch, u16 ch_status, void *data) {}
-static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
- unsigned int len, int is_write)
-{
- return 0;
-}
-#endif
/**
* omap_read_buf_dma_pref - read data from NAND controller into buffer
@@ -466,6 +468,157 @@ static void omap_write_buf_dma_pref(struct mtd_info *mtd,
omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
}
+/*
+ * omap_nand_irq - GMPC irq handler
+ * @this_irq: gpmc irq number
+ * @dev: omap_nand_info structure pointer is passed here
+ */
+static irqreturn_t omap_nand_irq(int this_irq, void *dev)
+{
+ struct omap_nand_info *info = (struct omap_nand_info *) dev;
+ u32 bytes;
+ u32 irq_stat;
+
+ irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS);
+ bytes = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
+ bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */
+ if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
+ if (irq_stat & 0x2)
+ goto done;
+
+ if (info->buf_len && (info->buf_len < bytes))
+ bytes = info->buf_len;
+ else if (!info->buf_len)
+ bytes = 0;
+ iowrite32_rep(info->nand.IO_ADDR_W,
+ (u32 *)info->buf, bytes >> 2);
+ info->buf = info->buf + bytes;
+ info->buf_len -= bytes;
+
+ } else {
+ ioread32_rep(info->nand.IO_ADDR_R,
+ (u32 *)info->buf, bytes >> 2);
+ info->buf = info->buf + bytes;
+
+ if (irq_stat & 0x2)
+ goto done;
+ }
+ gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
+
+ return IRQ_HANDLED;
+
+done:
+ complete(&info->comp);
+ /* disable irq */
+ gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0);
+
+ /* clear status */
+ gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * omap_read_buf_irq_pref - read data from NAND controller into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ int ret = 0;
+
+ if (len <= mtd->oobsize) {
+ omap_read_buf_pref(mtd, buf, len);
+ return;
+ }
+
+ info->iomode = OMAP_NAND_IO_READ;
+ info->buf = buf;
+ init_completion(&info->comp);
+
+ /* configure and start prefetch transfer */
+ ret = gpmc_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0);
+ if (ret)
+ /* PFPW engine is busy, use cpu copy method */
+ goto out_copy;
+
+ info->buf_len = len;
+ /* enable irq */
+ gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
+ (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
+
+ /* waiting for read to complete */
+ wait_for_completion(&info->comp);
+
+ /* disable and stop the PFPW engine */
+ gpmc_prefetch_reset(info->gpmc_cs);
+ return;
+
+out_copy:
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ omap_read_buf16(mtd, buf, len);
+ else
+ omap_read_buf8(mtd, buf, len);
+}
+
+/*
+ * omap_write_buf_irq_pref - write buffer to NAND controller
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void omap_write_buf_irq_pref(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ int ret = 0;
+ unsigned long tim, limit;
+
+ if (len <= mtd->oobsize) {
+ omap_write_buf_pref(mtd, buf, len);
+ return;
+ }
+
+ info->iomode = OMAP_NAND_IO_WRITE;
+ info->buf = (u_char *) buf;
+ init_completion(&info->comp);
+
+ /* configure and start prefetch transfer : size=24 */
+ ret = gpmc_prefetch_enable(info->gpmc_cs,
+ (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1);
+ if (ret)
+ /* PFPW engine is busy, use cpu copy method */
+ goto out_copy;
+
+ info->buf_len = len;
+ /* enable irq */
+ gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
+ (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
+
+ /* waiting for write to complete */
+ wait_for_completion(&info->comp);
+ /* wait for data to flushed-out before reset the prefetch */
+ tim = 0;
+ limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+ while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+ cpu_relax();
+
+ /* disable and stop the PFPW engine */
+ gpmc_prefetch_reset(info->gpmc_cs);
+ return;
+
+out_copy:
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ omap_write_buf16(mtd, buf, len);
+ else
+ omap_write_buf8(mtd, buf, len);
+}
+
/**
* omap_verify_buf - Verify chip data against buffer
* @mtd: MTD device structure
@@ -487,8 +640,6 @@ static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
return 0;
}
-#ifdef CONFIG_MTD_NAND_OMAP_HWECC
-
/**
* gen_true_ecc - This function will generate true ECC value
* @ecc_buf: buffer to store ecc code
@@ -708,8 +859,6 @@ static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size);
}
-#endif
-
/**
* omap_wait - wait until the command is done
* @mtd: MTD device structure
@@ -779,6 +928,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
struct omap_nand_info *info;
struct omap_nand_platform_data *pdata;
int err;
+ int i, offset;
pdata = pdev->dev.platform_data;
if (pdata == NULL) {
@@ -804,7 +954,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
info->mtd.name = dev_name(&pdev->dev);
info->mtd.owner = THIS_MODULE;
- info->nand.options |= pdata->devsize ? NAND_BUSWIDTH_16 : 0;
+ info->nand.options = pdata->devsize;
info->nand.options |= NAND_SKIP_BBTSCAN;
/* NAND write protect off */
@@ -842,28 +992,13 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
info->nand.chip_delay = 50;
}
- if (use_prefetch) {
-
+ switch (pdata->xfer_type) {
+ case NAND_OMAP_PREFETCH_POLLED:
info->nand.read_buf = omap_read_buf_pref;
info->nand.write_buf = omap_write_buf_pref;
- if (use_dma) {
- err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
- omap_nand_dma_cb, &info->comp, &info->dma_ch);
- if (err < 0) {
- info->dma_ch = -1;
- printk(KERN_WARNING "DMA request failed."
- " Non-dma data transfer mode\n");
- } else {
- omap_set_dma_dest_burst_mode(info->dma_ch,
- OMAP_DMA_DATA_BURST_16);
- omap_set_dma_src_burst_mode(info->dma_ch,
- OMAP_DMA_DATA_BURST_16);
-
- info->nand.read_buf = omap_read_buf_dma_pref;
- info->nand.write_buf = omap_write_buf_dma_pref;
- }
- }
- } else {
+ break;
+
+ case NAND_OMAP_POLLED:
if (info->nand.options & NAND_BUSWIDTH_16) {
info->nand.read_buf = omap_read_buf16;
info->nand.write_buf = omap_write_buf16;
@@ -871,20 +1006,61 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
info->nand.read_buf = omap_read_buf8;
info->nand.write_buf = omap_write_buf8;
}
+ break;
+
+ case NAND_OMAP_PREFETCH_DMA:
+ err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
+ omap_nand_dma_cb, &info->comp, &info->dma_ch);
+ if (err < 0) {
+ info->dma_ch = -1;
+ dev_err(&pdev->dev, "DMA request failed!\n");
+ goto out_release_mem_region;
+ } else {
+ omap_set_dma_dest_burst_mode(info->dma_ch,
+ OMAP_DMA_DATA_BURST_16);
+ omap_set_dma_src_burst_mode(info->dma_ch,
+ OMAP_DMA_DATA_BURST_16);
+
+ info->nand.read_buf = omap_read_buf_dma_pref;
+ info->nand.write_buf = omap_write_buf_dma_pref;
+ }
+ break;
+
+ case NAND_OMAP_PREFETCH_IRQ:
+ err = request_irq(pdata->gpmc_irq,
+ omap_nand_irq, IRQF_SHARED, "gpmc-nand", info);
+ if (err) {
+ dev_err(&pdev->dev, "requesting irq(%d) error:%d",
+ pdata->gpmc_irq, err);
+ goto out_release_mem_region;
+ } else {
+ info->gpmc_irq = pdata->gpmc_irq;
+ info->nand.read_buf = omap_read_buf_irq_pref;
+ info->nand.write_buf = omap_write_buf_irq_pref;
+ }
+ break;
+
+ default:
+ dev_err(&pdev->dev,
+ "xfer_type(%d) not supported!\n", pdata->xfer_type);
+ err = -EINVAL;
+ goto out_release_mem_region;
}
- info->nand.verify_buf = omap_verify_buf;
-#ifdef CONFIG_MTD_NAND_OMAP_HWECC
- info->nand.ecc.bytes = 3;
- info->nand.ecc.size = 512;
- info->nand.ecc.calculate = omap_calculate_ecc;
- info->nand.ecc.hwctl = omap_enable_hwecc;
- info->nand.ecc.correct = omap_correct_data;
- info->nand.ecc.mode = NAND_ECC_HW;
+ info->nand.verify_buf = omap_verify_buf;
-#else
- info->nand.ecc.mode = NAND_ECC_SOFT;
-#endif
+ /* selsect the ecc type */
+ if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT)
+ info->nand.ecc.mode = NAND_ECC_SOFT;
+ else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) ||
+ (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) {
+ info->nand.ecc.bytes = 3;
+ info->nand.ecc.size = 512;
+ info->nand.ecc.calculate = omap_calculate_ecc;
+ info->nand.ecc.hwctl = omap_enable_hwecc;
+ info->nand.ecc.correct = omap_correct_data;
+ info->nand.ecc.mode = NAND_ECC_HW;
+ }
/* DIP switches on some boards change between 8 and 16 bit
* bus widths for flash. Try the other width if the first try fails.
@@ -897,6 +1073,26 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
}
}
+ /* rom code layout */
+ if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE) {
+
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ offset = 2;
+ else {
+ offset = 1;
+ info->nand.badblock_pattern = &bb_descrip_flashbased;
+ }
+ omap_oobinfo.eccbytes = 3 * (info->mtd.oobsize/16);
+ for (i = 0; i < omap_oobinfo.eccbytes; i++)
+ omap_oobinfo.eccpos[i] = i+offset;
+
+ omap_oobinfo.oobfree->offset = offset + omap_oobinfo.eccbytes;
+ omap_oobinfo.oobfree->length = info->mtd.oobsize -
+ (offset + omap_oobinfo.eccbytes);
+
+ info->nand.ecc.layout = &omap_oobinfo;
+ }
+
#ifdef CONFIG_MTD_PARTITIONS
err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
if (err > 0)
@@ -926,9 +1122,12 @@ static int omap_nand_remove(struct platform_device *pdev)
mtd);
platform_set_drvdata(pdev, NULL);
- if (use_dma)
+ if (info->dma_ch != -1)
omap_free_dma(info->dma_ch);
+ if (info->gpmc_irq)
+ free_irq(info->gpmc_irq, info);
+
/* Release NAND device, its internal structures and partitions */
nand_release(&info->mtd);
iounmap(info->nand.IO_ADDR_R);
@@ -947,16 +1146,8 @@ static struct platform_driver omap_nand_driver = {
static int __init omap_nand_init(void)
{
- printk(KERN_INFO "%s driver initializing\n", DRIVER_NAME);
+ pr_info("%s driver initializing\n", DRIVER_NAME);
- /* This check is required if driver is being
- * loaded run time as a module
- */
- if ((1 == use_dma) && (0 == use_prefetch)) {
- printk(KERN_INFO"Wrong parameters: 'use_dma' can not be 1 "
- "without use_prefetch'. Prefetch will not be"
- " used in either mode (mpu or dma)\n");
- }
return platform_driver_register(&omap_nand_driver);
}
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index bb277a54986f..59efa829ef24 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -89,8 +89,7 @@ int pasemi_device_ready(struct mtd_info *mtd)
return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR);
}
-static int __devinit pasemi_nand_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit pasemi_nand_probe(struct platform_device *ofdev)
{
struct pci_dev *pdev;
struct device_node *np = ofdev->dev.of_node;
@@ -219,7 +218,7 @@ static const struct of_device_id pasemi_nand_match[] =
MODULE_DEVICE_TABLE(of, pasemi_nand_match);
-static struct of_platform_driver pasemi_nand_driver =
+static struct platform_driver pasemi_nand_driver =
{
.driver = {
.name = (char*)driver_name,
@@ -232,13 +231,13 @@ static struct of_platform_driver pasemi_nand_driver =
static int __init pasemi_nand_init(void)
{
- return of_register_platform_driver(&pasemi_nand_driver);
+ return platform_driver_register(&pasemi_nand_driver);
}
module_init(pasemi_nand_init);
static void __exit pasemi_nand_exit(void)
{
- of_unregister_platform_driver(&pasemi_nand_driver);
+ platform_driver_unregister(&pasemi_nand_driver);
}
module_exit(pasemi_nand_exit);
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index a8e403eebedb..a853548986f0 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -162,8 +162,7 @@ static const char *part_probes[] = { "cmdlinepart", NULL };
/*
* Probe for the NAND device.
*/
-static int __devinit socrates_nand_probe(struct platform_device *ofdev,
- const struct of_device_id *ofid)
+static int __devinit socrates_nand_probe(struct platform_device *ofdev)
{
struct socrates_nand_host *host;
struct mtd_info *mtd;
@@ -300,7 +299,7 @@ static const struct of_device_id socrates_nand_match[] =
MODULE_DEVICE_TABLE(of, socrates_nand_match);
-static struct of_platform_driver socrates_nand_driver = {
+static struct platform_driver socrates_nand_driver = {
.driver = {
.name = "socrates_nand",
.owner = THIS_MODULE,
@@ -312,12 +311,12 @@ static struct of_platform_driver socrates_nand_driver = {
static int __init socrates_nand_init(void)
{
- return of_register_platform_driver(&socrates_nand_driver);
+ return platform_driver_register(&socrates_nand_driver);
}
static void __exit socrates_nand_exit(void)
{
- of_unregister_platform_driver(&socrates_nand_driver);
+ platform_driver_unregister(&socrates_nand_driver);
}
module_init(socrates_nand_init);
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index 3041d1f7ae3f..38fb16771f85 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -319,7 +319,7 @@ static int tmio_nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
{
- struct mfd_cell *cell = dev_get_platdata(&dev->dev);
+ const struct mfd_cell *cell = mfd_get_cell(dev);
int ret;
if (cell->enable) {
@@ -363,7 +363,7 @@ static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
{
- struct mfd_cell *cell = dev_get_platdata(&dev->dev);
+ const struct mfd_cell *cell = mfd_get_cell(dev);
tmio_iowrite8(FCR_MODE_POWER_OFF, tmio->fcr + FCR_MODE);
if (cell->disable)
@@ -372,8 +372,7 @@ static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
static int tmio_probe(struct platform_device *dev)
{
- struct mfd_cell *cell = dev_get_platdata(&dev->dev);
- struct tmio_nand_data *data = cell->driver_data;
+ struct tmio_nand_data *data = mfd_get_data(dev);
struct resource *fcr = platform_get_resource(dev,
IORESOURCE_MEM, 0);
struct resource *ccr = platform_get_resource(dev,
@@ -516,7 +515,7 @@ static int tmio_remove(struct platform_device *dev)
#ifdef CONFIG_PM
static int tmio_suspend(struct platform_device *dev, pm_message_t state)
{
- struct mfd_cell *cell = dev_get_platdata(&dev->dev);
+ const struct mfd_cell *cell = mfd_get_cell(dev);
if (cell->suspend)
cell->suspend(dev);
@@ -527,7 +526,7 @@ static int tmio_suspend(struct platform_device *dev, pm_message_t state)
static int tmio_resume(struct platform_device *dev)
{
- struct mfd_cell *cell = dev_get_platdata(&dev->dev);
+ const struct mfd_cell *cell = mfd_get_cell(dev);
/* FIXME - is this required or merely another attack of the broken
* SHARP platform? Looks suspicious.
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index 4dbd0f58eebf..4f426195f8db 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -32,7 +32,7 @@ config MTD_ONENAND_OMAP2
config MTD_ONENAND_SAMSUNG
tristate "OneNAND on Samsung SOC controller support"
- depends on ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_S5PV310
+ depends on ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS4
help
Support for a OneNAND flash device connected to an Samsung SOC.
S3C64XX/S5PC100 use command mapping method.
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index ac31f461cc1c..ec26399e3cf2 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -63,7 +63,7 @@ struct omap2_onenand {
struct completion dma_done;
int dma_channel;
int freq;
- int (*setup)(void __iomem *base, int freq);
+ int (*setup)(void __iomem *base, int *freq_ptr);
struct regulator *regulator;
};
@@ -148,11 +148,9 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
wait_err("controller error", state, ctrl, intr);
return -EIO;
}
- if ((intr & intr_flags) != intr_flags) {
- wait_err("timeout", state, ctrl, intr);
- return -EIO;
- }
- return 0;
+ if ((intr & intr_flags) == intr_flags)
+ return 0;
+ /* Continue in wait for interrupt branch */
}
if (state != FL_READING) {
@@ -581,7 +579,7 @@ static int __adjust_timing(struct device *dev, void *data)
/* DMA is not in use so this is all that is needed */
/* Revisit for OMAP3! */
- ret = c->setup(c->onenand.base, c->freq);
+ ret = c->setup(c->onenand.base, &c->freq);
return ret;
}
@@ -673,7 +671,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
}
if (pdata->onenand_setup != NULL) {
- r = pdata->onenand_setup(c->onenand.base, c->freq);
+ r = pdata->onenand_setup(c->onenand.base, &c->freq);
if (r < 0) {
dev_err(&pdev->dev, "Onenand platform setup failed: "
"%d\n", r);
@@ -718,8 +716,8 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
}
dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
- "base %p\n", c->gpmc_cs, c->phys_base,
- c->onenand.base);
+ "base %p, freq %d MHz\n", c->gpmc_cs, c->phys_base,
+ c->onenand.base, c->freq);
c->pdev = pdev;
c->mtd.name = dev_name(&pdev->dev);
@@ -754,24 +752,6 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
if ((r = onenand_scan(&c->mtd, 1)) < 0)
goto err_release_regulator;
- switch ((c->onenand.version_id >> 4) & 0xf) {
- case 0:
- c->freq = 40;
- break;
- case 1:
- c->freq = 54;
- break;
- case 2:
- c->freq = 66;
- break;
- case 3:
- c->freq = 83;
- break;
- case 4:
- c->freq = 104;
- break;
- }
-
#ifdef CONFIG_MTD_PARTITIONS
r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0);
if (r > 0)
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 5ebe280225d6..ec0ad19c691a 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -711,7 +711,7 @@ static int io_init(struct ubi_device *ubi)
}
/* Similar for the data offset */
- ubi->leb_start = ubi->vid_hdr_offset + UBI_EC_HDR_SIZE;
+ ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset);
@@ -923,6 +923,8 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
spin_lock_init(&ubi->volumes_lock);
ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
+ dbg_msg("sizeof(struct ubi_scan_leb) %zu", sizeof(struct ubi_scan_leb));
+ dbg_msg("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
err = io_init(ubi);
if (err)
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 811775aa8ee8..889e25c49323 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -146,6 +146,28 @@ int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
if (err)
return err;
+ /*
+ * Deliberately corrupt the buffer to improve robustness. Indeed, if we
+ * do not do this, the following may happen:
+ * 1. The buffer contains data from previous operation, e.g., read from
+ * another PEB previously. The data looks like expected, e.g., if we
+ * just do not read anything and return - the caller would not
+ * notice this. E.g., if we are reading a VID header, the buffer may
+ * contain a valid VID header from another PEB.
+ * 2. The driver is buggy and returns us success or -EBADMSG or
+ * -EUCLEAN, but it does not actually put any data to the buffer.
+ *
+ * This may confuse UBI or upper layers - they may think the buffer
+ * contains valid data while in fact it is just old data. This is
+ * especially possible because UBI (and UBIFS) relies on CRC, and
+ * treats data as correct even in case of ECC errors if the CRC is
+ * correct.
+ *
+ * Try to prevent this situation by changing the first byte of the
+ * buffer.
+ */
+ *((uint8_t *)buf) ^= 0xFF;
+
addr = (loff_t)pnum * ubi->peb_size + offset;
retry:
err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
@@ -166,7 +188,7 @@ retry:
return UBI_IO_BITFLIPS;
}
- if (read != len && retries++ < UBI_IO_RETRIES) {
+ if (retries++ < UBI_IO_RETRIES) {
dbg_io("error %d%s while reading %d bytes from PEB %d:%d,"
" read only %zd bytes, retry",
err, errstr, len, pnum, offset, read);
@@ -480,6 +502,13 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
size_t written;
loff_t addr;
uint32_t data = 0;
+ /*
+ * Note, we cannot generally define VID header buffers on stack,
+ * because of the way we deal with these buffers (see the header
+ * comment in this file). But we know this is a NOR-specific piece of
+ * code, so we can do this. But yes, this is error-prone and we should
+ * (pre-)allocate VID header buffer instead.
+ */
struct ubi_vid_hdr vid_hdr;
/*
@@ -507,11 +536,13 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
* PEB.
*/
err1 = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0);
- if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR) {
+ if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR ||
+ err1 == UBI_IO_FF) {
struct ubi_ec_hdr ec_hdr;
err1 = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0);
- if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR)
+ if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR ||
+ err1 == UBI_IO_FF)
/*
* Both VID and EC headers are corrupted, so we can
* safely erase this PEB and not afraid that it will be
@@ -1294,10 +1325,12 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
int offset, int len)
{
int err, i;
+ size_t read;
+ loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
mutex_lock(&ubi->dbg_buf_mutex);
- err = ubi_io_read(ubi, ubi->dbg_peb_buf, pnum, offset, len);
- if (err)
+ err = ubi->mtd->read(ubi->mtd, addr, len, &read, ubi->dbg_peb_buf);
+ if (err && err != -EUCLEAN)
goto out_unlock;
for (i = 0; i < len; i++) {
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index 79ca304fc4db..b65cc088fde5 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -39,32 +39,46 @@
* eraseblocks are put to the @free list and the physical eraseblock to be
* erased are put to the @erase list.
*
+ * About corruptions
+ * ~~~~~~~~~~~~~~~~~
+ *
+ * UBI protects EC and VID headers with CRC-32 checksums, so it can detect
+ * whether the headers are corrupted or not. Sometimes UBI also protects the
+ * data with CRC-32, e.g., when it executes the atomic LEB change operation, or
+ * when it moves the contents of a PEB for wear-leveling purposes.
+ *
* UBI tries to distinguish between 2 types of corruptions.
- * 1. Corruptions caused by power cuts. These are harmless and expected
- * corruptions and UBI tries to handle them gracefully, without printing too
- * many warnings and error messages. The idea is that we do not lose
- * important data in these case - we may lose only the data which was being
- * written to the media just before the power cut happened, and the upper
- * layers (e.g., UBIFS) are supposed to handle these situations. UBI puts
- * these PEBs to the head of the @erase list and they are scheduled for
- * erasure.
+ *
+ * 1. Corruptions caused by power cuts. These are expected corruptions and UBI
+ * tries to handle them gracefully, without printing too many warnings and
+ * error messages. The idea is that we do not lose important data in these case
+ * - we may lose only the data which was being written to the media just before
+ * the power cut happened, and the upper layers (e.g., UBIFS) are supposed to
+ * handle such data losses (e.g., by using the FS journal).
+ *
+ * When UBI detects a corruption (CRC-32 mismatch) in a PEB, and it looks like
+ * the reason is a power cut, UBI puts this PEB to the @erase list, and all
+ * PEBs in the @erase list are scheduled for erasure later.
*
* 2. Unexpected corruptions which are not caused by power cuts. During
- * scanning, such PEBs are put to the @corr list and UBI preserves them.
- * Obviously, this lessens the amount of available PEBs, and if at some
- * point UBI runs out of free PEBs, it switches to R/O mode. UBI also loudly
- * informs about such PEBs every time the MTD device is attached.
+ * scanning, such PEBs are put to the @corr list and UBI preserves them.
+ * Obviously, this lessens the amount of available PEBs, and if at some point
+ * UBI runs out of free PEBs, it switches to R/O mode. UBI also loudly informs
+ * about such PEBs every time the MTD device is attached.
*
* However, it is difficult to reliably distinguish between these types of
- * corruptions and UBI's strategy is as follows. UBI assumes (2.) if the VID
- * header is corrupted and the data area does not contain all 0xFFs, and there
- * were not bit-flips or integrity errors while reading the data area. Otherwise
- * UBI assumes (1.). The assumptions are:
- * o if the data area contains only 0xFFs, there is no data, and it is safe
- * to just erase this PEB.
- * o if the data area has bit-flips and data integrity errors (ECC errors on
+ * corruptions and UBI's strategy is as follows. UBI assumes corruption type 2
+ * if the VID header is corrupted and the data area does not contain all 0xFFs,
+ * and there were no bit-flips or integrity errors while reading the data area.
+ * Otherwise UBI assumes corruption type 1. So the decision criteria are as
+ * follows.
+ * o If the data area contains only 0xFFs, there is no data, and it is safe
+ * to just erase this PEB - this is corruption type 1.
+ * o If the data area has bit-flips or data integrity errors (ECC errors on
* NAND), it is probably a PEB which was being erased when power cut
- * happened.
+ * happened, so this is corruption type 1. However, this is just a guess,
+ * which might be wrong.
+ * o Otherwise this it corruption type 2.
*/
#include <linux/err.h>
@@ -115,7 +129,7 @@ static int add_to_list(struct ubi_scan_info *si, int pnum, int ec, int to_head,
} else
BUG();
- seb = kmalloc(sizeof(struct ubi_scan_leb), GFP_KERNEL);
+ seb = kmem_cache_alloc(si->scan_leb_slab, GFP_KERNEL);
if (!seb)
return -ENOMEM;
@@ -144,7 +158,7 @@ static int add_corrupted(struct ubi_scan_info *si, int pnum, int ec)
dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec);
- seb = kmalloc(sizeof(struct ubi_scan_leb), GFP_KERNEL);
+ seb = kmem_cache_alloc(si->scan_leb_slab, GFP_KERNEL);
if (!seb)
return -ENOMEM;
@@ -553,7 +567,7 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
if (err)
return err;
- seb = kmalloc(sizeof(struct ubi_scan_leb), GFP_KERNEL);
+ seb = kmem_cache_alloc(si->scan_leb_slab, GFP_KERNEL);
if (!seb)
return -ENOMEM;
@@ -1152,9 +1166,15 @@ struct ubi_scan_info *ubi_scan(struct ubi_device *ubi)
si->volumes = RB_ROOT;
err = -ENOMEM;
+ si->scan_leb_slab = kmem_cache_create("ubi_scan_leb_slab",
+ sizeof(struct ubi_scan_leb),
+ 0, 0, NULL);
+ if (!si->scan_leb_slab)
+ goto out_si;
+
ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!ech)
- goto out_si;
+ goto out_slab;
vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
if (!vidh)
@@ -1215,6 +1235,8 @@ out_vidh:
ubi_free_vid_hdr(ubi, vidh);
out_ech:
kfree(ech);
+out_slab:
+ kmem_cache_destroy(si->scan_leb_slab);
out_si:
ubi_scan_destroy_si(si);
return ERR_PTR(err);
@@ -1223,11 +1245,12 @@ out_si:
/**
* destroy_sv - free the scanning volume information
* @sv: scanning volume information
+ * @si: scanning information
*
* This function destroys the volume RB-tree (@sv->root) and the scanning
* volume information.
*/
-static void destroy_sv(struct ubi_scan_volume *sv)
+static void destroy_sv(struct ubi_scan_info *si, struct ubi_scan_volume *sv)
{
struct ubi_scan_leb *seb;
struct rb_node *this = sv->root.rb_node;
@@ -1247,7 +1270,7 @@ static void destroy_sv(struct ubi_scan_volume *sv)
this->rb_right = NULL;
}
- kfree(seb);
+ kmem_cache_free(si->scan_leb_slab, seb);
}
}
kfree(sv);
@@ -1265,19 +1288,19 @@ void ubi_scan_destroy_si(struct ubi_scan_info *si)
list_for_each_entry_safe(seb, seb_tmp, &si->alien, u.list) {
list_del(&seb->u.list);
- kfree(seb);
+ kmem_cache_free(si->scan_leb_slab, seb);
}
list_for_each_entry_safe(seb, seb_tmp, &si->erase, u.list) {
list_del(&seb->u.list);
- kfree(seb);
+ kmem_cache_free(si->scan_leb_slab, seb);
}
list_for_each_entry_safe(seb, seb_tmp, &si->corr, u.list) {
list_del(&seb->u.list);
- kfree(seb);
+ kmem_cache_free(si->scan_leb_slab, seb);
}
list_for_each_entry_safe(seb, seb_tmp, &si->free, u.list) {
list_del(&seb->u.list);
- kfree(seb);
+ kmem_cache_free(si->scan_leb_slab, seb);
}
/* Destroy the volume RB-tree */
@@ -1298,10 +1321,11 @@ void ubi_scan_destroy_si(struct ubi_scan_info *si)
rb->rb_right = NULL;
}
- destroy_sv(sv);
+ destroy_sv(si, sv);
}
}
+ kmem_cache_destroy(si->scan_leb_slab);
kfree(si);
}
diff --git a/drivers/mtd/ubi/scan.h b/drivers/mtd/ubi/scan.h
index a3264f0bef2b..d48aef15ab5d 100644
--- a/drivers/mtd/ubi/scan.h
+++ b/drivers/mtd/ubi/scan.h
@@ -109,6 +109,7 @@ struct ubi_scan_volume {
* @mean_ec: mean erase counter value
* @ec_sum: a temporary variable used when calculating @mean_ec
* @ec_count: a temporary variable used when calculating @mean_ec
+ * @scan_leb_slab: slab cache for &struct ubi_scan_leb objects
*
* This data structure contains the result of scanning and may be used by other
* UBI sub-systems to build final UBI data structures, further error-recovery
@@ -134,6 +135,7 @@ struct ubi_scan_info {
int mean_ec;
uint64_t ec_sum;
int ec_count;
+ struct kmem_cache *scan_leb_slab;
};
struct ubi_device;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 03823327db25..1bf2d7ce83dc 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -238,8 +238,8 @@ source "drivers/net/arm/Kconfig"
config AX88796
tristate "ASIX AX88796 NE2000 clone support"
depends on ARM || MIPS || SUPERH
- select CRC32
- select MII
+ select PHYLIB
+ select MDIO_BITBANG
help
AX88796 driver, using platform bus to provide
chip detection and resources
@@ -1944,7 +1944,8 @@ config 68360_ENET
config FEC
bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
- MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 || SOC_IMX28
+ IMX_HAVE_PLATFORM_FEC || MXS_HAVE_PLATFORM_FEC
+ default IMX_HAVE_PLATFORM_FEC || MXS_HAVE_PLATFORM_FEC if ARM
select PHYLIB
help
Say Y here if you want to use the built-in 10/100 Fast ethernet
@@ -3422,4 +3423,18 @@ config VMXNET3
To compile this driver as a module, choose M here: the
module will be called vmxnet3.
+config VBUS_ENET
+ tristate "VBUS Ethernet Driver"
+ default n
+ depends on VBUS_PROXY
+ help
+ A virtualized 802.x network device based on the VBUS
+ "virtual-ethernet" interface. It can be used with any
+ hypervisor/kernel that supports the vbus+venet protocol.
+
+config VBUS_ENET_DEBUG
+ bool "Enable Debugging"
+ depends on VBUS_ENET
+ default n
+
endif # NETDEVICES
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index b90738d13994..adc48c45a741 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -294,6 +294,7 @@ obj-$(CONFIG_FS_ENET) += fs_enet/
obj-$(CONFIG_NETXEN_NIC) += netxen/
obj-$(CONFIG_NIU) += niu.o
obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
+obj-$(CONFIG_VBUS_ENET) += vbus-enet.o
obj-$(CONFIG_SFC) += sfc/
obj-$(CONFIG_WIMAX) += wimax/
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
index 9ab58097fa2e..7cb375e0e29c 100644
--- a/drivers/net/atl1c/atl1c.h
+++ b/drivers/net/atl1c/atl1c.h
@@ -265,7 +265,7 @@ struct atl1c_recv_ret_status {
__le32 word3;
};
-/* RFD desciptor */
+/* RFD descriptor */
struct atl1c_rx_free_desc {
__le64 buffer_addr;
};
@@ -531,7 +531,7 @@ struct atl1c_rfd_ring {
struct atl1c_buffer *buffer_info;
};
-/* receive return desciptor (rrd) ring */
+/* receive return descriptor (rrd) ring */
struct atl1c_rrd_ring {
void *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c
index 1bf672009948..23f2ab0f2fa8 100644
--- a/drivers/net/atl1c/atl1c_hw.c
+++ b/drivers/net/atl1c/atl1c_hw.c
@@ -345,7 +345,7 @@ int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data)
*/
static int atl1c_phy_setup_adv(struct atl1c_hw *hw)
{
- u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_SPEED_MASK;
+ u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_ALL;
u16 mii_giga_ctrl_data = GIGA_CR_1000T_DEFAULT_CAP &
~GIGA_CR_1000T_SPEED_MASK;
@@ -373,7 +373,7 @@ static int atl1c_phy_setup_adv(struct atl1c_hw *hw)
}
if (atl1c_write_phy_reg(hw, MII_ADVERTISE, mii_adv_data) != 0 ||
- atl1c_write_phy_reg(hw, MII_GIGA_CR, mii_giga_ctrl_data) != 0)
+ atl1c_write_phy_reg(hw, MII_CTRL1000, mii_giga_ctrl_data) != 0)
return -1;
return 0;
}
@@ -517,19 +517,18 @@ int atl1c_phy_init(struct atl1c_hw *hw)
"Error Setting up Auto-Negotiation\n");
return ret_val;
}
- mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG;
+ mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART;
break;
case MEDIA_TYPE_100M_FULL:
- mii_bmcr_data |= BMCR_SPEED_100 | BMCR_FULL_DUPLEX;
+ mii_bmcr_data |= BMCR_SPEED100 | BMCR_FULLDPLX;
break;
case MEDIA_TYPE_100M_HALF:
- mii_bmcr_data |= BMCR_SPEED_100;
+ mii_bmcr_data |= BMCR_SPEED100;
break;
case MEDIA_TYPE_10M_FULL:
- mii_bmcr_data |= BMCR_SPEED_10 | BMCR_FULL_DUPLEX;
+ mii_bmcr_data |= BMCR_FULLDPLX;
break;
case MEDIA_TYPE_10M_HALF:
- mii_bmcr_data |= BMCR_SPEED_10;
break;
default:
if (netif_msg_link(adapter))
@@ -657,7 +656,7 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw)
err = atl1c_phy_setup_adv(hw);
if (err)
return err;
- mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG;
+ mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART;
return atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data);
}
diff --git a/drivers/net/atl1c/atl1c_hw.h b/drivers/net/atl1c/atl1c_hw.h
index 3dd675979aa1..655fc6c4a8a4 100644
--- a/drivers/net/atl1c/atl1c_hw.h
+++ b/drivers/net/atl1c/atl1c_hw.h
@@ -736,55 +736,16 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
#define REG_DEBUG_DATA0 0x1900
#define REG_DEBUG_DATA1 0x1904
-/* PHY Control Register */
-#define MII_BMCR 0x00
-#define BMCR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define BMCR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
-#define BMCR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
-#define BMCR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
-#define BMCR_ISOLATE 0x0400 /* Isolate PHY from MII */
-#define BMCR_POWER_DOWN 0x0800 /* Power down */
-#define BMCR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
-#define BMCR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define BMCR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
-#define BMCR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
-#define BMCR_SPEED_MASK 0x2040
-#define BMCR_SPEED_1000 0x0040
-#define BMCR_SPEED_100 0x2000
-#define BMCR_SPEED_10 0x0000
-
-/* PHY Status Register */
-#define MII_BMSR 0x01
-#define BMMSR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
-#define BMSR_JABBER_DETECT 0x0002 /* Jabber Detected */
-#define BMSR_LINK_STATUS 0x0004 /* Link Status 1 = link */
-#define BMSR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
-#define BMSR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
-#define BMSR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
-#define BMSR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
-#define BMSR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
-#define BMSR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
-#define BMSR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
-#define BMSR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
-#define BMSR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
-#define BMSR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
-#define BMMII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
-#define BMMII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
-
-#define MII_PHYSID1 0x02
-#define MII_PHYSID2 0x03
#define L1D_MPW_PHYID1 0xD01C /* V7 */
#define L1D_MPW_PHYID2 0xD01D /* V1-V6 */
#define L1D_MPW_PHYID3 0xD01E /* V8 */
/* Autoneg Advertisement Register */
-#define MII_ADVERTISE 0x04
-#define ADVERTISE_SPEED_MASK 0x01E0
-#define ADVERTISE_DEFAULT_CAP 0x0DE0
+#define ADVERTISE_DEFAULT_CAP \
+ (ADVERTISE_ALL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)
/* 1000BASE-T Control Register */
-#define MII_GIGA_CR 0x09
#define GIGA_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port 0=DTE device */
#define GIGA_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master 0=Configure PHY as Slave */
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 3824382faecc..7d9d5067a65c 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -1102,10 +1102,10 @@ static void atl1c_configure_tx(struct atl1c_adapter *adapter)
AT_READ_REG(hw, REG_DEVICE_CTRL, &dev_ctrl_data);
max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT) &
DEVICE_CTRL_MAX_PAYLOAD_MASK;
- hw->dmaw_block = min(max_pay_load, hw->dmaw_block);
+ hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block);
max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT) &
DEVICE_CTRL_MAX_RREQ_SZ_MASK;
- hw->dmar_block = min(max_pay_load, hw->dmar_block);
+ hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block);
txq_ctrl_data = (hw->tpd_burst & TXQ_NUM_TPD_BURST_MASK) <<
TXQ_NUM_TPD_BURST_SHIFT;
@@ -2718,7 +2718,6 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
goto err_reset;
}
- device_init_wakeup(&pdev->dev, 1);
/* reset the controller to
* put the device in a known good starting state */
err = atl1c_phy_init(&adapter->hw);
diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c
index 6943a6c3b948..1209297433b8 100644
--- a/drivers/net/atl1e/atl1e_ethtool.c
+++ b/drivers/net/atl1e/atl1e_ethtool.c
@@ -95,18 +95,18 @@ static int atl1e_set_settings(struct net_device *netdev,
ecmd->advertising = hw->autoneg_advertised |
ADVERTISED_TP | ADVERTISED_Autoneg;
- adv4 = hw->mii_autoneg_adv_reg & ~MII_AR_SPEED_MASK;
+ adv4 = hw->mii_autoneg_adv_reg & ~ADVERTISE_ALL;
adv9 = hw->mii_1000t_ctrl_reg & ~MII_AT001_CR_1000T_SPEED_MASK;
if (hw->autoneg_advertised & ADVERTISE_10_HALF)
- adv4 |= MII_AR_10T_HD_CAPS;
+ adv4 |= ADVERTISE_10HALF;
if (hw->autoneg_advertised & ADVERTISE_10_FULL)
- adv4 |= MII_AR_10T_FD_CAPS;
+ adv4 |= ADVERTISE_10FULL;
if (hw->autoneg_advertised & ADVERTISE_100_HALF)
- adv4 |= MII_AR_100TX_HD_CAPS;
+ adv4 |= ADVERTISE_100HALF;
if (hw->autoneg_advertised & ADVERTISE_100_FULL)
- adv4 |= MII_AR_100TX_FD_CAPS;
+ adv4 |= ADVERTISE_100FULL;
if (hw->autoneg_advertised & ADVERTISE_1000_FULL)
- adv9 |= MII_AT001_CR_1000T_FD_CAPS;
+ adv9 |= ADVERTISE_1000FULL;
if (adv4 != hw->mii_autoneg_adv_reg ||
adv9 != hw->mii_1000t_ctrl_reg) {
diff --git a/drivers/net/atl1e/atl1e_hw.c b/drivers/net/atl1e/atl1e_hw.c
index 76cc043def8c..923063d2e5bb 100644
--- a/drivers/net/atl1e/atl1e_hw.c
+++ b/drivers/net/atl1e/atl1e_hw.c
@@ -318,7 +318,7 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
* Advertisement Register (Address 4) and the 1000 mb speed bits in
* the 1000Base-T control Register (Address 9).
*/
- mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
+ mii_autoneg_adv_reg &= ~ADVERTISE_ALL;
mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK;
/*
@@ -327,44 +327,37 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
*/
switch (hw->media_type) {
case MEDIA_TYPE_AUTO_SENSOR:
- mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS |
- MII_AR_10T_FD_CAPS |
- MII_AR_100TX_HD_CAPS |
- MII_AR_100TX_FD_CAPS);
- hw->autoneg_advertised = ADVERTISE_10_HALF |
- ADVERTISE_10_FULL |
- ADVERTISE_100_HALF |
- ADVERTISE_100_FULL;
+ mii_autoneg_adv_reg |= ADVERTISE_ALL;
+ hw->autoneg_advertised = ADVERTISE_ALL;
if (hw->nic_type == athr_l1e) {
- mii_1000t_ctrl_reg |=
- MII_AT001_CR_1000T_FD_CAPS;
+ mii_1000t_ctrl_reg |= ADVERTISE_1000FULL;
hw->autoneg_advertised |= ADVERTISE_1000_FULL;
}
break;
case MEDIA_TYPE_100M_FULL:
- mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
+ mii_autoneg_adv_reg |= ADVERTISE_100FULL;
hw->autoneg_advertised = ADVERTISE_100_FULL;
break;
case MEDIA_TYPE_100M_HALF:
- mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
+ mii_autoneg_adv_reg |= ADVERTISE_100_HALF;
hw->autoneg_advertised = ADVERTISE_100_HALF;
break;
case MEDIA_TYPE_10M_FULL:
- mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
+ mii_autoneg_adv_reg |= ADVERTISE_10_FULL;
hw->autoneg_advertised = ADVERTISE_10_FULL;
break;
default:
- mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
+ mii_autoneg_adv_reg |= ADVERTISE_10_HALF;
hw->autoneg_advertised = ADVERTISE_10_HALF;
break;
}
/* flow control fixed to enable all */
- mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
+ mii_autoneg_adv_reg |= (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
@@ -374,7 +367,7 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
return ret_val;
if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
- ret_val = atl1e_write_phy_reg(hw, MII_AT001_CR,
+ ret_val = atl1e_write_phy_reg(hw, MII_CTRL1000,
mii_1000t_ctrl_reg);
if (ret_val)
return ret_val;
@@ -397,7 +390,7 @@ int atl1e_phy_commit(struct atl1e_hw *hw)
int ret_val;
u16 phy_data;
- phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG;
+ phy_data = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART;
ret_val = atl1e_write_phy_reg(hw, MII_BMCR, phy_data);
if (ret_val) {
@@ -645,15 +638,14 @@ int atl1e_restart_autoneg(struct atl1e_hw *hw)
return err;
if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
- err = atl1e_write_phy_reg(hw, MII_AT001_CR,
+ err = atl1e_write_phy_reg(hw, MII_CTRL1000,
hw->mii_1000t_ctrl_reg);
if (err)
return err;
}
err = atl1e_write_phy_reg(hw, MII_BMCR,
- MII_CR_RESET | MII_CR_AUTO_NEG_EN |
- MII_CR_RESTART_AUTO_NEG);
+ BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART);
return err;
}
diff --git a/drivers/net/atl1e/atl1e_hw.h b/drivers/net/atl1e/atl1e_hw.h
index 5ea2f4d86cfa..74df16aef793 100644
--- a/drivers/net/atl1e/atl1e_hw.h
+++ b/drivers/net/atl1e/atl1e_hw.h
@@ -629,127 +629,24 @@ s32 atl1e_restart_autoneg(struct atl1e_hw *hw);
/***************************** MII definition ***************************************/
/* PHY Common Register */
-#define MII_BMCR 0x00
-#define MII_BMSR 0x01
-#define MII_PHYSID1 0x02
-#define MII_PHYSID2 0x03
-#define MII_ADVERTISE 0x04
-#define MII_LPA 0x05
-#define MII_EXPANSION 0x06
-#define MII_AT001_CR 0x09
-#define MII_AT001_SR 0x0A
-#define MII_AT001_ESR 0x0F
#define MII_AT001_PSCR 0x10
#define MII_AT001_PSSR 0x11
#define MII_INT_CTRL 0x12
#define MII_INT_STATUS 0x13
#define MII_SMARTSPEED 0x14
-#define MII_RERRCOUNTER 0x15
-#define MII_SREVISION 0x16
-#define MII_RESV1 0x17
#define MII_LBRERROR 0x18
-#define MII_PHYADDR 0x19
#define MII_RESV2 0x1a
-#define MII_TPISTATUS 0x1b
-#define MII_NCONFIG 0x1c
#define MII_DBG_ADDR 0x1D
#define MII_DBG_DATA 0x1E
-
-/* PHY Control Register */
-#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
-#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
-#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
-#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
-#define MII_CR_POWER_DOWN 0x0800 /* Power down */
-#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
-#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
-#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
-#define MII_CR_SPEED_MASK 0x2040
-#define MII_CR_SPEED_1000 0x0040
-#define MII_CR_SPEED_100 0x2000
-#define MII_CR_SPEED_10 0x0000
-
-
-/* PHY Status Register */
-#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
-#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
-#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
-#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
-#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
-#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
-#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
-#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
-#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
-#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
-#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
-#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
-#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
-#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
-#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
-
-/* Link partner ability register. */
-#define MII_LPA_SLCT 0x001f /* Same as advertise selector */
-#define MII_LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */
-#define MII_LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */
-#define MII_LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */
-#define MII_LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */
-#define MII_LPA_100BASE4 0x0200 /* 100BASE-T4 */
-#define MII_LPA_PAUSE 0x0400 /* PAUSE */
-#define MII_LPA_ASYPAUSE 0x0800 /* Asymmetrical PAUSE */
-#define MII_LPA_RFAULT 0x2000 /* Link partner faulted */
-#define MII_LPA_LPACK 0x4000 /* Link partner acked us */
-#define MII_LPA_NPAGE 0x8000 /* Next page bit */
-
/* Autoneg Advertisement Register */
-#define MII_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */
-#define MII_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
-#define MII_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
-#define MII_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
-#define MII_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
-#define MII_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
-#define MII_AR_PAUSE 0x0400 /* Pause operation desired */
-#define MII_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
-#define MII_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
-#define MII_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */
-#define MII_AR_SPEED_MASK 0x01E0
-#define MII_AR_DEFAULT_CAP_MASK 0x0DE0
+#define MII_AR_DEFAULT_CAP_MASK 0
/* 1000BASE-T Control Register */
-#define MII_AT001_CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
-#define MII_AT001_CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
-#define MII_AT001_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */
-/* 0=DTE device */
-#define MII_AT001_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
-/* 0=Configure PHY as Slave */
-#define MII_AT001_CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
-/* 0=Automatic Master/Slave config */
-#define MII_AT001_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
-#define MII_AT001_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
-#define MII_AT001_CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
-#define MII_AT001_CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
-#define MII_AT001_CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
-#define MII_AT001_CR_1000T_SPEED_MASK 0x0300
-#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK 0x0300
-
-/* 1000BASE-T Status Register */
-#define MII_AT001_SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
-#define MII_AT001_SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
-#define MII_AT001_SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
-#define MII_AT001_SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
-#define MII_AT001_SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */
-#define MII_AT001_SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */
-#define MII_AT001_SR_1000T_REMOTE_RX_STATUS_SHIFT 12
-#define MII_AT001_SR_1000T_LOCAL_RX_STATUS_SHIFT 13
-
-/* Extended Status Register */
-#define MII_AT001_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */
-#define MII_AT001_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */
-#define MII_AT001_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */
-#define MII_AT001_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */
+#define MII_AT001_CR_1000T_SPEED_MASK \
+ (ADVERTISE_1000FULL | ADVERTISE_1000HALF)
+#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK MII_AT001_CR_1000T_SPEED_MASK
/* AT001 PHY Specific Control Register */
#define MII_AT001_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index e28f8baf394e..1ff001a8270c 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -547,8 +547,8 @@ static int __devinit atl1e_sw_init(struct atl1e_adapter *adapter)
hw->device_id = pdev->device;
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_id = pdev->subsystem_device;
+ hw->revision_id = pdev->revision;
- pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS);
@@ -932,11 +932,11 @@ static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT)) &
DEVICE_CTRL_MAX_PAYLOAD_MASK;
- hw->dmaw_block = min(max_pay_load, hw->dmaw_block);
+ hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block);
max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT)) &
DEVICE_CTRL_MAX_RREQ_SZ_MASK;
- hw->dmar_block = min(max_pay_load, hw->dmar_block);
+ hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block);
if (hw->nic_type != athr_l2e_revB)
AT_WRITE_REGW(hw, REG_TXQ_CTRL + 2,
@@ -2051,9 +2051,9 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
- mii_advertise_data = MII_AR_10T_HD_CAPS;
+ mii_advertise_data = ADVERTISE_10HALF;
- if ((atl1e_write_phy_reg(hw, MII_AT001_CR, 0) != 0) ||
+ if ((atl1e_write_phy_reg(hw, MII_CTRL1000, 0) != 0) ||
(atl1e_write_phy_reg(hw,
MII_ADVERTISE, mii_advertise_data) != 0) ||
(atl1e_phy_commit(hw)) != 0) {
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 3b527687c28f..67f40b9c16ed 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -950,6 +950,7 @@ static int __devinit atl1_sw_init(struct atl1_adapter *adapter)
hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
adapter->wol = 0;
+ device_set_wakeup_enable(&adapter->pdev->dev, false);
adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7;
adapter->ict = 50000; /* 100ms */
adapter->link_speed = SPEED_0; /* hardware init */
@@ -2735,15 +2736,15 @@ static int atl1_close(struct net_device *netdev)
}
#ifdef CONFIG_PM
-static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
+static int atl1_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1_adapter *adapter = netdev_priv(netdev);
struct atl1_hw *hw = &adapter->hw;
u32 ctrl = 0;
u32 wufc = adapter->wol;
u32 val;
- int retval;
u16 speed;
u16 duplex;
@@ -2751,17 +2752,15 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
if (netif_running(netdev))
atl1_down(adapter);
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
-
atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
val = ctrl & BMSR_LSTATUS;
if (val)
wufc &= ~ATLX_WUFC_LNKC;
+ if (!wufc)
+ goto disable_wol;
- if (val && wufc) {
+ if (val) {
val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
if (val) {
if (netif_msg_ifdown(adapter))
@@ -2798,23 +2797,18 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
-
- pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
- goto exit;
- }
-
- if (!val && wufc) {
+ } else {
ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
ioread32(hw->hw_addr + REG_WOL_CTRL);
iowrite32(0, hw->hw_addr + REG_MAC_CTRL);
ioread32(hw->hw_addr + REG_MAC_CTRL);
hw->phy_configured = false;
- pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
- goto exit;
}
-disable_wol:
+ return 0;
+
+ disable_wol:
iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
ioread32(hw->hw_addr + REG_WOL_CTRL);
ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
@@ -2822,37 +2816,17 @@ disable_wol:
iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
hw->phy_configured = false;
- pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
-exit:
- if (netif_running(netdev))
- pci_disable_msi(adapter->pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
-static int atl1_resume(struct pci_dev *pdev)
+static int atl1_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1_adapter *adapter = netdev_priv(netdev);
- u32 err;
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
- err = pci_enable_device(pdev);
- if (err) {
- if (netif_msg_ifup(adapter))
- dev_printk(KERN_DEBUG, &pdev->dev,
- "error enabling pci device\n");
- return err;
- }
-
- pci_set_master(pdev);
iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_enable_wake(pdev, PCI_D3cold, 0);
atl1_reset_hw(&adapter->hw);
@@ -2864,16 +2838,25 @@ static int atl1_resume(struct pci_dev *pdev)
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(atl1_pm_ops, atl1_suspend, atl1_resume);
+#define ATL1_PM_OPS (&atl1_pm_ops)
+
#else
-#define atl1_suspend NULL
-#define atl1_resume NULL
+
+static int atl1_suspend(struct device *dev) { return 0; }
+
+#define ATL1_PM_OPS NULL
#endif
static void atl1_shutdown(struct pci_dev *pdev)
{
-#ifdef CONFIG_PM
- atl1_suspend(pdev, PMSG_SUSPEND);
-#endif
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+
+ atl1_suspend(&pdev->dev);
+ pci_wake_from_d3(pdev, adapter->wol);
+ pci_set_power_state(pdev, PCI_D3hot);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3117,9 +3100,8 @@ static struct pci_driver atl1_driver = {
.id_table = atl1_pci_tbl,
.probe = atl1_probe,
.remove = __devexit_p(atl1_remove),
- .suspend = atl1_suspend,
- .resume = atl1_resume,
- .shutdown = atl1_shutdown
+ .shutdown = atl1_shutdown,
+ .driver.pm = ATL1_PM_OPS,
};
/*
@@ -3409,6 +3391,9 @@ static int atl1_set_wol(struct net_device *netdev,
adapter->wol = 0;
if (wol->wolopts & WAKE_MAGIC)
adapter->wol |= ATLX_WUFC_MAG;
+
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
return 0;
}
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index 4e6f4e95a5a0..e637e9f28fd4 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -93,8 +93,8 @@ static int __devinit atl2_sw_init(struct atl2_adapter *adapter)
hw->device_id = pdev->device;
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_id = pdev->subsystem_device;
+ hw->revision_id = pdev->revision;
- pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
adapter->wol = 0;
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index 4bebff3faeab..e7cb8c8b9776 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -9,7 +9,7 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
-*/
+ */
#include <linux/module.h>
#include <linux/kernel.h>
@@ -17,46 +17,45 @@
#include <linux/isapnp.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
-#include <linux/mii.h>
+#include <linux/mdio-bitbang.h>
+#include <linux/phy.h>
#include <linux/eeprom_93cx6.h>
#include <linux/slab.h>
#include <net/ax88796.h>
#include <asm/system.h>
-#include <asm/io.h>
-
-static int phy_debug = 0;
/* Rename the lib8390.c functions to show that they are in this driver */
-#define __ei_open ax_ei_open
-#define __ei_close ax_ei_close
-#define __ei_poll ax_ei_poll
+#define __ei_open ax_ei_open
+#define __ei_close ax_ei_close
+#define __ei_poll ax_ei_poll
#define __ei_start_xmit ax_ei_start_xmit
#define __ei_tx_timeout ax_ei_tx_timeout
-#define __ei_get_stats ax_ei_get_stats
+#define __ei_get_stats ax_ei_get_stats
#define __ei_set_multicast_list ax_ei_set_multicast_list
-#define __ei_interrupt ax_ei_interrupt
+#define __ei_interrupt ax_ei_interrupt
#define ____alloc_ei_netdev ax__alloc_ei_netdev
-#define __NS8390_init ax_NS8390_init
+#define __NS8390_init ax_NS8390_init
/* force unsigned long back to 'void __iomem *' */
#define ax_convert_addr(_a) ((void __force __iomem *)(_a))
-#define ei_inb(_a) readb(ax_convert_addr(_a))
+#define ei_inb(_a) readb(ax_convert_addr(_a))
#define ei_outb(_v, _a) writeb(_v, ax_convert_addr(_a))
-#define ei_inb_p(_a) ei_inb(_a)
+#define ei_inb_p(_a) ei_inb(_a)
#define ei_outb_p(_v, _a) ei_outb(_v, _a)
/* define EI_SHIFT() to take into account our register offsets */
-#define EI_SHIFT(x) (ei_local->reg_offset[(x)])
+#define EI_SHIFT(x) (ei_local->reg_offset[(x)])
/* Ensure we have our RCR base value */
#define AX88796_PLATFORM
@@ -74,43 +73,46 @@ static unsigned char version[] = "ax88796.c: Copyright 2005,2007 Simtec Electron
#define NE_DATAPORT EI_SHIFT(0x10)
#define NE1SM_START_PG 0x20 /* First page of TX buffer */
-#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */
+#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */
#define NESM_START_PG 0x40 /* First page of TX buffer */
#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
+#define AX_GPOC_PPDSET BIT(6)
+
/* device private data */
struct ax_device {
- struct timer_list mii_timer;
- spinlock_t mii_lock;
- struct mii_if_info mii;
-
- u32 msg_enable;
- void __iomem *map2;
- struct platform_device *dev;
- struct resource *mem;
- struct resource *mem2;
- struct ax_plat_data *plat;
-
- unsigned char running;
- unsigned char resume_open;
- unsigned int irqflags;
-
- u32 reg_offsets[0x20];
+ struct mii_bus *mii_bus;
+ struct mdiobb_ctrl bb_ctrl;
+ struct phy_device *phy_dev;
+ void __iomem *addr_memr;
+ u8 reg_memr;
+ int link;
+ int speed;
+ int duplex;
+
+ void __iomem *map2;
+ const struct ax_plat_data *plat;
+
+ unsigned char running;
+ unsigned char resume_open;
+ unsigned int irqflags;
+
+ u32 reg_offsets[0x20];
};
static inline struct ax_device *to_ax_dev(struct net_device *dev)
{
struct ei_device *ei_local = netdev_priv(dev);
- return (struct ax_device *)(ei_local+1);
+ return (struct ax_device *)(ei_local + 1);
}
-/* ax_initial_check
+/*
+ * ax_initial_check
*
* do an initial probe for the card to check wether it exists
* and is functional
*/
-
static int ax_initial_check(struct net_device *dev)
{
struct ei_device *ei_local = netdev_priv(dev);
@@ -122,10 +124,10 @@ static int ax_initial_check(struct net_device *dev)
if (reg0 == 0xFF)
return -ENODEV;
- ei_outb(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD);
+ ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP, ioaddr + E8390_CMD);
regd = ei_inb(ioaddr + 0x0d);
ei_outb(0xff, ioaddr + 0x0d);
- ei_outb(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD);
+ ei_outb(E8390_NODMA + E8390_PAGE0, ioaddr + E8390_CMD);
ei_inb(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */
if (ei_inb(ioaddr + EN0_COUNTER0) != 0) {
ei_outb(reg0, ioaddr);
@@ -136,29 +138,28 @@ static int ax_initial_check(struct net_device *dev)
return 0;
}
-/* Hard reset the card. This used to pause for the same period that a
- 8390 reset command required, but that shouldn't be necessary. */
-
+/*
+ * Hard reset the card. This used to pause for the same period that a
+ * 8390 reset command required, but that shouldn't be necessary.
+ */
static void ax_reset_8390(struct net_device *dev)
{
struct ei_device *ei_local = netdev_priv(dev);
- struct ax_device *ax = to_ax_dev(dev);
unsigned long reset_start_time = jiffies;
void __iomem *addr = (void __iomem *)dev->base_addr;
if (ei_debug > 1)
- dev_dbg(&ax->dev->dev, "resetting the 8390 t=%ld\n", jiffies);
+ netdev_dbg(dev, "resetting the 8390 t=%ld\n", jiffies);
ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET);
- ei_status.txing = 0;
- ei_status.dmaing = 0;
+ ei_local->txing = 0;
+ ei_local->dmaing = 0;
/* This check _should_not_ be necessary, omit eventually. */
while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) {
- if (jiffies - reset_start_time > 2*HZ/100) {
- dev_warn(&ax->dev->dev, "%s: %s did not complete.\n",
- __func__, dev->name);
+ if (jiffies - reset_start_time > 2 * HZ / 100) {
+ netdev_warn(dev, "%s: did not complete.\n", __func__);
break;
}
}
@@ -171,70 +172,72 @@ static void ax_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
int ring_page)
{
struct ei_device *ei_local = netdev_priv(dev);
- struct ax_device *ax = to_ax_dev(dev);
void __iomem *nic_base = ei_local->mem;
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
- if (ei_status.dmaing) {
- dev_err(&ax->dev->dev, "%s: DMAing conflict in %s "
+ if (ei_local->dmaing) {
+ netdev_err(dev, "DMAing conflict in %s "
"[DMAstat:%d][irqlock:%d].\n",
- dev->name, __func__,
- ei_status.dmaing, ei_status.irqlock);
+ __func__,
+ ei_local->dmaing, ei_local->irqlock);
return;
}
- ei_status.dmaing |= 0x01;
- ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ ei_local->dmaing |= 0x01;
+ ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD);
ei_outb(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
ei_outb(0, nic_base + EN0_RCNTHI);
ei_outb(0, nic_base + EN0_RSARLO); /* On page boundary */
ei_outb(ring_page, nic_base + EN0_RSARHI);
ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
- if (ei_status.word16)
- readsw(nic_base + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
+ if (ei_local->word16)
+ readsw(nic_base + NE_DATAPORT, hdr,
+ sizeof(struct e8390_pkt_hdr) >> 1);
else
- readsb(nic_base + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr));
+ readsb(nic_base + NE_DATAPORT, hdr,
+ sizeof(struct e8390_pkt_hdr));
ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
- ei_status.dmaing &= ~0x01;
+ ei_local->dmaing &= ~0x01;
le16_to_cpus(&hdr->count);
}
-/* Block input and output, similar to the Crynwr packet driver. If you
- are porting to a new ethercard, look at the packet driver source for hints.
- The NEx000 doesn't share the on-board packet memory -- you have to put
- the packet out through the "remote DMA" dataport using ei_outb. */
-
+/*
+ * Block input and output, similar to the Crynwr packet driver. If
+ * you are porting to a new ethercard, look at the packet driver
+ * source for hints. The NEx000 doesn't share the on-board packet
+ * memory -- you have to put the packet out through the "remote DMA"
+ * dataport using ei_outb.
+ */
static void ax_block_input(struct net_device *dev, int count,
struct sk_buff *skb, int ring_offset)
{
struct ei_device *ei_local = netdev_priv(dev);
- struct ax_device *ax = to_ax_dev(dev);
void __iomem *nic_base = ei_local->mem;
char *buf = skb->data;
- if (ei_status.dmaing) {
- dev_err(&ax->dev->dev,
- "%s: DMAing conflict in %s "
+ if (ei_local->dmaing) {
+ netdev_err(dev,
+ "DMAing conflict in %s "
"[DMAstat:%d][irqlock:%d].\n",
- dev->name, __func__,
- ei_status.dmaing, ei_status.irqlock);
+ __func__,
+ ei_local->dmaing, ei_local->irqlock);
return;
}
- ei_status.dmaing |= 0x01;
+ ei_local->dmaing |= 0x01;
- ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + NE_CMD);
ei_outb(count & 0xff, nic_base + EN0_RCNTLO);
ei_outb(count >> 8, nic_base + EN0_RCNTHI);
ei_outb(ring_offset & 0xff, nic_base + EN0_RSARLO);
ei_outb(ring_offset >> 8, nic_base + EN0_RSARHI);
ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
- if (ei_status.word16) {
+ if (ei_local->word16) {
readsw(nic_base + NE_DATAPORT, buf, count >> 1);
if (count & 0x01)
buf[count-1] = ei_inb(nic_base + NE_DATAPORT);
@@ -243,34 +246,34 @@ static void ax_block_input(struct net_device *dev, int count,
readsb(nic_base + NE_DATAPORT, buf, count);
}
- ei_status.dmaing &= ~1;
+ ei_local->dmaing &= ~1;
}
static void ax_block_output(struct net_device *dev, int count,
const unsigned char *buf, const int start_page)
{
struct ei_device *ei_local = netdev_priv(dev);
- struct ax_device *ax = to_ax_dev(dev);
void __iomem *nic_base = ei_local->mem;
unsigned long dma_start;
- /* Round the count up for word writes. Do we need to do this?
- What effect will an odd byte count have on the 8390?
- I should check someday. */
-
- if (ei_status.word16 && (count & 0x01))
+ /*
+ * Round the count up for word writes. Do we need to do this?
+ * What effect will an odd byte count have on the 8390? I
+ * should check someday.
+ */
+ if (ei_local->word16 && (count & 0x01))
count++;
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
- if (ei_status.dmaing) {
- dev_err(&ax->dev->dev, "%s: DMAing conflict in %s."
+ if (ei_local->dmaing) {
+ netdev_err(dev, "DMAing conflict in %s."
"[DMAstat:%d][irqlock:%d]\n",
- dev->name, __func__,
- ei_status.dmaing, ei_status.irqlock);
+ __func__,
+ ei_local->dmaing, ei_local->irqlock);
return;
}
- ei_status.dmaing |= 0x01;
+ ei_local->dmaing |= 0x01;
/* We should already be in page 0, but to be safe... */
ei_outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
@@ -278,250 +281,170 @@ static void ax_block_output(struct net_device *dev, int count,
/* Now the normal output. */
ei_outb(count & 0xff, nic_base + EN0_RCNTLO);
- ei_outb(count >> 8, nic_base + EN0_RCNTHI);
+ ei_outb(count >> 8, nic_base + EN0_RCNTHI);
ei_outb(0x00, nic_base + EN0_RSARLO);
ei_outb(start_page, nic_base + EN0_RSARHI);
ei_outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
- if (ei_status.word16) {
- writesw(nic_base + NE_DATAPORT, buf, count>>1);
- } else {
+ if (ei_local->word16)
+ writesw(nic_base + NE_DATAPORT, buf, count >> 1);
+ else
writesb(nic_base + NE_DATAPORT, buf, count);
- }
dma_start = jiffies;
while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) {
- if (jiffies - dma_start > 2*HZ/100) { /* 20ms */
- dev_warn(&ax->dev->dev,
- "%s: timeout waiting for Tx RDC.\n", dev->name);
+ if (jiffies - dma_start > 2 * HZ / 100) { /* 20ms */
+ netdev_warn(dev, "timeout waiting for Tx RDC.\n");
ax_reset_8390(dev);
- ax_NS8390_init(dev,1);
+ ax_NS8390_init(dev, 1);
break;
}
}
ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
- ei_status.dmaing &= ~0x01;
+ ei_local->dmaing &= ~0x01;
}
/* definitions for accessing MII/EEPROM interface */
#define AX_MEMR EI_SHIFT(0x14)
-#define AX_MEMR_MDC (1<<0)
-#define AX_MEMR_MDIR (1<<1)
-#define AX_MEMR_MDI (1<<2)
-#define AX_MEMR_MDO (1<<3)
-#define AX_MEMR_EECS (1<<4)
-#define AX_MEMR_EEI (1<<5)
-#define AX_MEMR_EEO (1<<6)
-#define AX_MEMR_EECLK (1<<7)
-
-/* ax_mii_ei_outbits
- *
- * write the specified set of bits to the phy
-*/
-
-static void
-ax_mii_ei_outbits(struct net_device *dev, unsigned int bits, int len)
+#define AX_MEMR_MDC BIT(0)
+#define AX_MEMR_MDIR BIT(1)
+#define AX_MEMR_MDI BIT(2)
+#define AX_MEMR_MDO BIT(3)
+#define AX_MEMR_EECS BIT(4)
+#define AX_MEMR_EEI BIT(5)
+#define AX_MEMR_EEO BIT(6)
+#define AX_MEMR_EECLK BIT(7)
+
+static void ax_handle_link_change(struct net_device *dev)
{
- struct ei_device *ei_local = netdev_priv(dev);
- void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR;
- unsigned int memr;
-
- /* clock low, data to output mode */
- memr = ei_inb(memr_addr);
- memr &= ~(AX_MEMR_MDC | AX_MEMR_MDIR);
- ei_outb(memr, memr_addr);
-
- for (len--; len >= 0; len--) {
- if (bits & (1 << len))
- memr |= AX_MEMR_MDO;
- else
- memr &= ~AX_MEMR_MDO;
-
- ei_outb(memr, memr_addr);
-
- /* clock high */
+ struct ax_device *ax = to_ax_dev(dev);
+ struct phy_device *phy_dev = ax->phy_dev;
+ int status_change = 0;
- ei_outb(memr | AX_MEMR_MDC, memr_addr);
- udelay(1);
+ if (phy_dev->link && ((ax->speed != phy_dev->speed) ||
+ (ax->duplex != phy_dev->duplex))) {
- /* clock low */
- ei_outb(memr, memr_addr);
+ ax->speed = phy_dev->speed;
+ ax->duplex = phy_dev->duplex;
+ status_change = 1;
}
- /* leaves the clock line low, mdir input */
- memr |= AX_MEMR_MDIR;
- ei_outb(memr, (void __iomem *)dev->base_addr + AX_MEMR);
-}
-
-/* ax_phy_ei_inbits
- *
- * read a specified number of bits from the phy
-*/
-
-static unsigned int
-ax_phy_ei_inbits(struct net_device *dev, int no)
-{
- struct ei_device *ei_local = netdev_priv(dev);
- void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR;
- unsigned int memr;
- unsigned int result = 0;
-
- /* clock low, data to input mode */
- memr = ei_inb(memr_addr);
- memr &= ~AX_MEMR_MDC;
- memr |= AX_MEMR_MDIR;
- ei_outb(memr, memr_addr);
-
- for (no--; no >= 0; no--) {
- ei_outb(memr | AX_MEMR_MDC, memr_addr);
-
- udelay(1);
-
- if (ei_inb(memr_addr) & AX_MEMR_MDI)
- result |= (1<<no);
+ if (phy_dev->link != ax->link) {
+ if (!phy_dev->link) {
+ ax->speed = 0;
+ ax->duplex = -1;
+ }
+ ax->link = phy_dev->link;
- ei_outb(memr, memr_addr);
+ status_change = 1;
}
- return result;
-}
-
-/* ax_phy_issueaddr
- *
- * use the low level bit shifting routines to send the address
- * and command to the specified phy
-*/
-
-static void
-ax_phy_issueaddr(struct net_device *dev, int phy_addr, int reg, int opc)
-{
- if (phy_debug)
- pr_debug("%s: dev %p, %04x, %04x, %d\n",
- __func__, dev, phy_addr, reg, opc);
-
- ax_mii_ei_outbits(dev, 0x3f, 6); /* pre-amble */
- ax_mii_ei_outbits(dev, 1, 2); /* frame-start */
- ax_mii_ei_outbits(dev, opc, 2); /* op code */
- ax_mii_ei_outbits(dev, phy_addr, 5); /* phy address */
- ax_mii_ei_outbits(dev, reg, 5); /* reg address */
+ if (status_change)
+ phy_print_status(phy_dev);
}
-static int
-ax_phy_read(struct net_device *dev, int phy_addr, int reg)
+static int ax_mii_probe(struct net_device *dev)
{
- struct ei_device *ei_local = netdev_priv(dev);
- unsigned long flags;
- unsigned int result;
+ struct ax_device *ax = to_ax_dev(dev);
+ struct phy_device *phy_dev = NULL;
+ int ret;
- spin_lock_irqsave(&ei_local->page_lock, flags);
+ /* find the first phy */
+ phy_dev = phy_find_first(ax->mii_bus);
+ if (!phy_dev) {
+ netdev_err(dev, "no PHY found\n");
+ return -ENODEV;
+ }
- ax_phy_issueaddr(dev, phy_addr, reg, 2);
+ ret = phy_connect_direct(dev, phy_dev, ax_handle_link_change, 0,
+ PHY_INTERFACE_MODE_MII);
+ if (ret) {
+ netdev_err(dev, "Could not attach to PHY\n");
+ return ret;
+ }
- result = ax_phy_ei_inbits(dev, 17);
- result &= ~(3<<16);
+ /* mask with MAC supported features */
+ phy_dev->supported &= PHY_BASIC_FEATURES;
+ phy_dev->advertising = phy_dev->supported;
- spin_unlock_irqrestore(&ei_local->page_lock, flags);
+ ax->phy_dev = phy_dev;
- if (phy_debug)
- pr_debug("%s: %04x.%04x => read %04x\n", __func__,
- phy_addr, reg, result);
+ netdev_info(dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+ phy_dev->drv->name, dev_name(&phy_dev->dev), phy_dev->irq);
- return result;
+ return 0;
}
-static void
-ax_phy_write(struct net_device *dev, int phy_addr, int reg, int value)
+static void ax_phy_switch(struct net_device *dev, int on)
{
- struct ei_device *ei = netdev_priv(dev);
- struct ax_device *ax = to_ax_dev(dev);
- unsigned long flags;
-
- dev_dbg(&ax->dev->dev, "%s: %p, %04x, %04x %04x\n",
- __func__, dev, phy_addr, reg, value);
-
- spin_lock_irqsave(&ei->page_lock, flags);
-
- ax_phy_issueaddr(dev, phy_addr, reg, 1);
- ax_mii_ei_outbits(dev, 2, 2); /* send TA */
- ax_mii_ei_outbits(dev, value, 16);
-
- spin_unlock_irqrestore(&ei->page_lock, flags);
-}
+ struct ei_device *ei_local = netdev_priv(dev);
+ struct ax_device *ax = to_ax_dev(dev);
-static void ax_mii_expiry(unsigned long data)
-{
- struct net_device *dev = (struct net_device *)data;
- struct ax_device *ax = to_ax_dev(dev);
- unsigned long flags;
+ u8 reg_gpoc = ax->plat->gpoc_val;
- spin_lock_irqsave(&ax->mii_lock, flags);
- mii_check_media(&ax->mii, netif_msg_link(ax), 0);
- spin_unlock_irqrestore(&ax->mii_lock, flags);
+ if (!!on)
+ reg_gpoc &= ~AX_GPOC_PPDSET;
+ else
+ reg_gpoc |= AX_GPOC_PPDSET;
- if (ax->running) {
- ax->mii_timer.expires = jiffies + HZ*2;
- add_timer(&ax->mii_timer);
- }
+ ei_outb(reg_gpoc, ei_local->mem + EI_SHIFT(0x17));
}
static int ax_open(struct net_device *dev)
{
- struct ax_device *ax = to_ax_dev(dev);
- struct ei_device *ei_local = netdev_priv(dev);
+ struct ax_device *ax = to_ax_dev(dev);
int ret;
- dev_dbg(&ax->dev->dev, "%s: open\n", dev->name);
+ netdev_dbg(dev, "open\n");
ret = request_irq(dev->irq, ax_ei_interrupt, ax->irqflags,
dev->name, dev);
if (ret)
- return ret;
-
- ret = ax_ei_open(dev);
- if (ret) {
- free_irq(dev->irq, dev);
- return ret;
- }
+ goto failed_request_irq;
/* turn the phy on (if turned off) */
+ ax_phy_switch(dev, 1);
- ei_outb(ax->plat->gpoc_val, ei_local->mem + EI_SHIFT(0x17));
- ax->running = 1;
-
- /* start the MII timer */
-
- init_timer(&ax->mii_timer);
+ ret = ax_mii_probe(dev);
+ if (ret)
+ goto failed_mii_probe;
+ phy_start(ax->phy_dev);
- ax->mii_timer.expires = jiffies+1;
- ax->mii_timer.data = (unsigned long) dev;
- ax->mii_timer.function = ax_mii_expiry;
+ ret = ax_ei_open(dev);
+ if (ret)
+ goto failed_ax_ei_open;
- add_timer(&ax->mii_timer);
+ ax->running = 1;
return 0;
+
+ failed_ax_ei_open:
+ phy_disconnect(ax->phy_dev);
+ failed_mii_probe:
+ ax_phy_switch(dev, 0);
+ free_irq(dev->irq, dev);
+ failed_request_irq:
+ return ret;
}
static int ax_close(struct net_device *dev)
{
struct ax_device *ax = to_ax_dev(dev);
- struct ei_device *ei_local = netdev_priv(dev);
- dev_dbg(&ax->dev->dev, "%s: close\n", dev->name);
-
- /* turn the phy off */
-
- ei_outb(ax->plat->gpoc_val | (1<<6),
- ei_local->mem + EI_SHIFT(0x17));
+ netdev_dbg(dev, "close\n");
ax->running = 0;
wmb();
- del_timer_sync(&ax->mii_timer);
ax_ei_close(dev);
+ /* turn the phy off */
+ ax_phy_switch(dev, 0);
+ phy_disconnect(ax->phy_dev);
+
free_irq(dev->irq, dev);
return 0;
}
@@ -529,17 +452,15 @@ static int ax_close(struct net_device *dev)
static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
struct ax_device *ax = to_ax_dev(dev);
- unsigned long flags;
- int rc;
+ struct phy_device *phy_dev = ax->phy_dev;
if (!netif_running(dev))
return -EINVAL;
- spin_lock_irqsave(&ax->mii_lock, flags);
- rc = generic_mii_ioctl(&ax->mii, if_mii(req), cmd, NULL);
- spin_unlock_irqrestore(&ax->mii_lock, flags);
+ if (!phy_dev)
+ return -ENODEV;
- return rc;
+ return phy_mii_ioctl(phy_dev, req, cmd);
}
/* ethtool ops */
@@ -547,56 +468,40 @@ static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
static void ax_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- struct ax_device *ax = to_ax_dev(dev);
+ struct platform_device *pdev = to_platform_device(dev->dev.parent);
strcpy(info->driver, DRV_NAME);
strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, ax->dev->name);
+ strcpy(info->bus_info, pdev->name);
}
static int ax_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct ax_device *ax = to_ax_dev(dev);
- unsigned long flags;
+ struct phy_device *phy_dev = ax->phy_dev;
- spin_lock_irqsave(&ax->mii_lock, flags);
- mii_ethtool_gset(&ax->mii, cmd);
- spin_unlock_irqrestore(&ax->mii_lock, flags);
+ if (!phy_dev)
+ return -ENODEV;
- return 0;
+ return phy_ethtool_gset(phy_dev, cmd);
}
static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct ax_device *ax = to_ax_dev(dev);
- unsigned long flags;
- int rc;
+ struct phy_device *phy_dev = ax->phy_dev;
- spin_lock_irqsave(&ax->mii_lock, flags);
- rc = mii_ethtool_sset(&ax->mii, cmd);
- spin_unlock_irqrestore(&ax->mii_lock, flags);
-
- return rc;
-}
-
-static int ax_nway_reset(struct net_device *dev)
-{
- struct ax_device *ax = to_ax_dev(dev);
- return mii_nway_restart(&ax->mii);
-}
+ if (!phy_dev)
+ return -ENODEV;
-static u32 ax_get_link(struct net_device *dev)
-{
- struct ax_device *ax = to_ax_dev(dev);
- return mii_link_ok(&ax->mii);
+ return phy_ethtool_sset(phy_dev, cmd);
}
static const struct ethtool_ops ax_ethtool_ops = {
.get_drvinfo = ax_get_drvinfo,
.get_settings = ax_get_settings,
.set_settings = ax_set_settings,
- .nway_reset = ax_nway_reset,
- .get_link = ax_get_link,
+ .get_link = ethtool_op_get_link,
};
#ifdef CONFIG_AX88796_93CX6
@@ -640,37 +545,131 @@ static const struct net_device_ops ax_netdev_ops = {
.ndo_get_stats = ax_ei_get_stats,
.ndo_set_multicast_list = ax_ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ax_ei_poll,
#endif
};
+static void ax_bb_mdc(struct mdiobb_ctrl *ctrl, int level)
+{
+ struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
+
+ if (level)
+ ax->reg_memr |= AX_MEMR_MDC;
+ else
+ ax->reg_memr &= ~AX_MEMR_MDC;
+
+ ei_outb(ax->reg_memr, ax->addr_memr);
+}
+
+static void ax_bb_dir(struct mdiobb_ctrl *ctrl, int output)
+{
+ struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
+
+ if (output)
+ ax->reg_memr &= ~AX_MEMR_MDIR;
+ else
+ ax->reg_memr |= AX_MEMR_MDIR;
+
+ ei_outb(ax->reg_memr, ax->addr_memr);
+}
+
+static void ax_bb_set_data(struct mdiobb_ctrl *ctrl, int value)
+{
+ struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
+
+ if (value)
+ ax->reg_memr |= AX_MEMR_MDO;
+ else
+ ax->reg_memr &= ~AX_MEMR_MDO;
+
+ ei_outb(ax->reg_memr, ax->addr_memr);
+}
+
+static int ax_bb_get_data(struct mdiobb_ctrl *ctrl)
+{
+ struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
+ int reg_memr = ei_inb(ax->addr_memr);
+
+ return reg_memr & AX_MEMR_MDI ? 1 : 0;
+}
+
+static struct mdiobb_ops bb_ops = {
+ .owner = THIS_MODULE,
+ .set_mdc = ax_bb_mdc,
+ .set_mdio_dir = ax_bb_dir,
+ .set_mdio_data = ax_bb_set_data,
+ .get_mdio_data = ax_bb_get_data,
+};
+
/* setup code */
+static int ax_mii_init(struct net_device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev->dev.parent);
+ struct ei_device *ei_local = netdev_priv(dev);
+ struct ax_device *ax = to_ax_dev(dev);
+ int err, i;
+
+ ax->bb_ctrl.ops = &bb_ops;
+ ax->addr_memr = ei_local->mem + AX_MEMR;
+ ax->mii_bus = alloc_mdio_bitbang(&ax->bb_ctrl);
+ if (!ax->mii_bus) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ ax->mii_bus->name = "ax88796_mii_bus";
+ ax->mii_bus->parent = dev->dev.parent;
+ snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
+
+ ax->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!ax->mii_bus->irq) {
+ err = -ENOMEM;
+ goto out_free_mdio_bitbang;
+ }
+
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ ax->mii_bus->irq[i] = PHY_POLL;
+
+ err = mdiobus_register(ax->mii_bus);
+ if (err)
+ goto out_free_irq;
+
+ return 0;
+
+ out_free_irq:
+ kfree(ax->mii_bus->irq);
+ out_free_mdio_bitbang:
+ free_mdio_bitbang(ax->mii_bus);
+ out:
+ return err;
+}
+
static void ax_initial_setup(struct net_device *dev, struct ei_device *ei_local)
{
void __iomem *ioaddr = ei_local->mem;
struct ax_device *ax = to_ax_dev(dev);
- /* Select page 0*/
- ei_outb(E8390_NODMA+E8390_PAGE0+E8390_STOP, ioaddr + E8390_CMD);
+ /* Select page 0 */
+ ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_STOP, ioaddr + E8390_CMD);
/* set to byte access */
ei_outb(ax->plat->dcr_val & ~1, ioaddr + EN0_DCFG);
ei_outb(ax->plat->gpoc_val, ioaddr + EI_SHIFT(0x17));
}
-/* ax_init_dev
+/*
+ * ax_init_dev
*
* initialise the specified device, taking care to note the MAC
* address it may already have (if configured), ensure
* the device is ready to be used by lib8390.c and registerd with
* the network layer.
*/
-
-static int ax_init_dev(struct net_device *dev, int first_init)
+static int ax_init_dev(struct net_device *dev)
{
struct ei_device *ei_local = netdev_priv(dev);
struct ax_device *ax = to_ax_dev(dev);
@@ -690,23 +689,23 @@ static int ax_init_dev(struct net_device *dev, int first_init)
/* read the mac from the card prom if we need it */
- if (first_init && ax->plat->flags & AXFLG_HAS_EEPROM) {
+ if (ax->plat->flags & AXFLG_HAS_EEPROM) {
unsigned char SA_prom[32];
- for(i = 0; i < sizeof(SA_prom); i+=2) {
+ for (i = 0; i < sizeof(SA_prom); i += 2) {
SA_prom[i] = ei_inb(ioaddr + NE_DATAPORT);
- SA_prom[i+1] = ei_inb(ioaddr + NE_DATAPORT);
+ SA_prom[i + 1] = ei_inb(ioaddr + NE_DATAPORT);
}
if (ax->plat->wordlength == 2)
for (i = 0; i < 16; i++)
SA_prom[i] = SA_prom[i+i];
- memcpy(dev->dev_addr, SA_prom, 6);
+ memcpy(dev->dev_addr, SA_prom, 6);
}
#ifdef CONFIG_AX88796_93CX6
- if (first_init && ax->plat->flags & AXFLG_HAS_93CX6) {
+ if (ax->plat->flags & AXFLG_HAS_93CX6) {
unsigned char mac_addr[6];
struct eeprom_93cx6 eeprom;
@@ -719,7 +718,7 @@ static int ax_init_dev(struct net_device *dev, int first_init)
(__le16 __force *)mac_addr,
sizeof(mac_addr) >> 1);
- memcpy(dev->dev_addr, mac_addr, 6);
+ memcpy(dev->dev_addr, mac_addr, 6);
}
#endif
if (ax->plat->wordlength == 2) {
@@ -732,67 +731,56 @@ static int ax_init_dev(struct net_device *dev, int first_init)
stop_page = NE1SM_STOP_PG;
}
- /* load the mac-address from the device if this is the
- * first time we've initialised */
-
- if (first_init) {
- if (ax->plat->flags & AXFLG_MAC_FROMDEV) {
- ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
- ei_local->mem + E8390_CMD); /* 0x61 */
- for (i = 0; i < ETHER_ADDR_LEN; i++)
- dev->dev_addr[i] =
- ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
- }
-
- if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) &&
- ax->plat->mac_addr)
- memcpy(dev->dev_addr, ax->plat->mac_addr,
- ETHER_ADDR_LEN);
+ /* load the mac-address from the device */
+ if (ax->plat->flags & AXFLG_MAC_FROMDEV) {
+ ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
+ ei_local->mem + E8390_CMD); /* 0x61 */
+ for (i = 0; i < ETHER_ADDR_LEN; i++)
+ dev->dev_addr[i] =
+ ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
}
+ if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) &&
+ ax->plat->mac_addr)
+ memcpy(dev->dev_addr, ax->plat->mac_addr,
+ ETHER_ADDR_LEN);
+
ax_reset_8390(dev);
- ei_status.name = "AX88796";
- ei_status.tx_start_page = start_page;
- ei_status.stop_page = stop_page;
- ei_status.word16 = (ax->plat->wordlength == 2);
- ei_status.rx_start_page = start_page + TX_PAGES;
+ ei_local->name = "AX88796";
+ ei_local->tx_start_page = start_page;
+ ei_local->stop_page = stop_page;
+ ei_local->word16 = (ax->plat->wordlength == 2);
+ ei_local->rx_start_page = start_page + TX_PAGES;
#ifdef PACKETBUF_MEMSIZE
- /* Allow the packet buffer size to be overridden by know-it-alls. */
- ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE;
+ /* Allow the packet buffer size to be overridden by know-it-alls. */
+ ei_local->stop_page = ei_local->tx_start_page + PACKETBUF_MEMSIZE;
#endif
- ei_status.reset_8390 = &ax_reset_8390;
- ei_status.block_input = &ax_block_input;
- ei_status.block_output = &ax_block_output;
- ei_status.get_8390_hdr = &ax_get_8390_hdr;
- ei_status.priv = 0;
-
- dev->netdev_ops = &ax_netdev_ops;
- dev->ethtool_ops = &ax_ethtool_ops;
-
- ax->msg_enable = NETIF_MSG_LINK;
- ax->mii.phy_id_mask = 0x1f;
- ax->mii.reg_num_mask = 0x1f;
- ax->mii.phy_id = 0x10; /* onboard phy */
- ax->mii.force_media = 0;
- ax->mii.full_duplex = 0;
- ax->mii.mdio_read = ax_phy_read;
- ax->mii.mdio_write = ax_phy_write;
- ax->mii.dev = dev;
+ ei_local->reset_8390 = &ax_reset_8390;
+ ei_local->block_input = &ax_block_input;
+ ei_local->block_output = &ax_block_output;
+ ei_local->get_8390_hdr = &ax_get_8390_hdr;
+ ei_local->priv = 0;
- ax_NS8390_init(dev, 0);
+ dev->netdev_ops = &ax_netdev_ops;
+ dev->ethtool_ops = &ax_ethtool_ops;
- if (first_init)
- dev_info(&ax->dev->dev, "%dbit, irq %d, %lx, MAC: %pM\n",
- ei_status.word16 ? 16:8, dev->irq, dev->base_addr,
- dev->dev_addr);
+ ret = ax_mii_init(dev);
+ if (ret)
+ goto out_irq;
+
+ ax_NS8390_init(dev, 0);
ret = register_netdev(dev);
if (ret)
goto out_irq;
+ netdev_info(dev, "%dbit, irq %d, %lx, MAC: %pM\n",
+ ei_local->word16 ? 16 : 8, dev->irq, dev->base_addr,
+ dev->dev_addr);
+
return 0;
out_irq:
@@ -802,24 +790,24 @@ static int ax_init_dev(struct net_device *dev, int first_init)
return ret;
}
-static int ax_remove(struct platform_device *_dev)
+static int ax_remove(struct platform_device *pdev)
{
- struct net_device *dev = platform_get_drvdata(_dev);
- struct ax_device *ax;
-
- ax = to_ax_dev(dev);
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct ei_device *ei_local = netdev_priv(dev);
+ struct ax_device *ax = to_ax_dev(dev);
+ struct resource *mem;
unregister_netdev(dev);
free_irq(dev->irq, dev);
- iounmap(ei_status.mem);
- release_resource(ax->mem);
- kfree(ax->mem);
+ iounmap(ei_local->mem);
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mem->start, resource_size(mem));
if (ax->map2) {
iounmap(ax->map2);
- release_resource(ax->mem2);
- kfree(ax->mem2);
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ release_mem_region(mem->start, resource_size(mem));
}
free_netdev(dev);
@@ -827,19 +815,20 @@ static int ax_remove(struct platform_device *_dev)
return 0;
}
-/* ax_probe
+/*
+ * ax_probe
*
* This is the entry point when the platform device system uses to
- * notify us of a new device to attach to. Allocate memory, find
- * the resources and information passed, and map the necessary registers.
-*/
-
+ * notify us of a new device to attach to. Allocate memory, find the
+ * resources and information passed, and map the necessary registers.
+ */
static int ax_probe(struct platform_device *pdev)
{
struct net_device *dev;
- struct ax_device *ax;
- struct resource *res;
- size_t size;
+ struct ei_device *ei_local;
+ struct ax_device *ax;
+ struct resource *irq, *mem, *mem2;
+ resource_size_t mem_size, mem2_size = 0;
int ret = 0;
dev = ax__alloc_ei_netdev(sizeof(struct ax_device));
@@ -847,120 +836,107 @@ static int ax_probe(struct platform_device *pdev)
return -ENOMEM;
/* ok, let's setup our device */
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ ei_local = netdev_priv(dev);
ax = to_ax_dev(dev);
- memset(ax, 0, sizeof(struct ax_device));
-
- spin_lock_init(&ax->mii_lock);
-
- ax->dev = pdev;
ax->plat = pdev->dev.platform_data;
platform_set_drvdata(pdev, dev);
- ei_status.rxcr_base = ax->plat->rcr_val;
+ ei_local->rxcr_base = ax->plat->rcr_val;
/* find the platform resources */
-
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res == NULL) {
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq) {
dev_err(&pdev->dev, "no IRQ specified\n");
ret = -ENXIO;
goto exit_mem;
}
- dev->irq = res->start;
- ax->irqflags = res->flags & IRQF_TRIGGER_MASK;
+ dev->irq = irq->start;
+ ax->irqflags = irq->flags & IRQF_TRIGGER_MASK;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
dev_err(&pdev->dev, "no MEM specified\n");
ret = -ENXIO;
goto exit_mem;
}
- size = (res->end - res->start) + 1;
-
- /* setup the register offsets from either the platform data
- * or by using the size of the resource provided */
+ mem_size = resource_size(mem);
+ /*
+ * setup the register offsets from either the platform data or
+ * by using the size of the resource provided
+ */
if (ax->plat->reg_offsets)
- ei_status.reg_offset = ax->plat->reg_offsets;
+ ei_local->reg_offset = ax->plat->reg_offsets;
else {
- ei_status.reg_offset = ax->reg_offsets;
+ ei_local->reg_offset = ax->reg_offsets;
for (ret = 0; ret < 0x18; ret++)
- ax->reg_offsets[ret] = (size / 0x18) * ret;
+ ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
}
- ax->mem = request_mem_region(res->start, size, pdev->name);
- if (ax->mem == NULL) {
+ if (!request_mem_region(mem->start, mem_size, pdev->name)) {
dev_err(&pdev->dev, "cannot reserve registers\n");
- ret = -ENXIO;
+ ret = -ENXIO;
goto exit_mem;
}
- ei_status.mem = ioremap(res->start, size);
- dev->base_addr = (unsigned long)ei_status.mem;
+ ei_local->mem = ioremap(mem->start, mem_size);
+ dev->base_addr = (unsigned long)ei_local->mem;
- if (ei_status.mem == NULL) {
- dev_err(&pdev->dev, "Cannot ioremap area (%08llx,%08llx)\n",
- (unsigned long long)res->start,
- (unsigned long long)res->end);
+ if (ei_local->mem == NULL) {
+ dev_err(&pdev->dev, "Cannot ioremap area %pR\n", mem);
- ret = -ENXIO;
+ ret = -ENXIO;
goto exit_req;
}
/* look for reset area */
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (res == NULL) {
+ mem2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!mem2) {
if (!ax->plat->reg_offsets) {
for (ret = 0; ret < 0x20; ret++)
- ax->reg_offsets[ret] = (size / 0x20) * ret;
+ ax->reg_offsets[ret] = (mem_size / 0x20) * ret;
}
-
- ax->map2 = NULL;
} else {
- size = (res->end - res->start) + 1;
+ mem2_size = resource_size(mem2);
- ax->mem2 = request_mem_region(res->start, size, pdev->name);
- if (ax->mem2 == NULL) {
+ if (!request_mem_region(mem2->start, mem2_size, pdev->name)) {
dev_err(&pdev->dev, "cannot reserve registers\n");
ret = -ENXIO;
goto exit_mem1;
}
- ax->map2 = ioremap(res->start, size);
- if (ax->map2 == NULL) {
+ ax->map2 = ioremap(mem2->start, mem2_size);
+ if (!ax->map2) {
dev_err(&pdev->dev, "cannot map reset register\n");
ret = -ENXIO;
goto exit_mem2;
}
- ei_status.reg_offset[0x1f] = ax->map2 - ei_status.mem;
+ ei_local->reg_offset[0x1f] = ax->map2 - ei_local->mem;
}
/* got resources, now initialise and register device */
-
- ret = ax_init_dev(dev, 1);
+ ret = ax_init_dev(dev);
if (!ret)
return 0;
- if (ax->map2 == NULL)
+ if (!ax->map2)
goto exit_mem1;
iounmap(ax->map2);
exit_mem2:
- release_resource(ax->mem2);
- kfree(ax->mem2);
+ release_mem_region(mem2->start, mem2_size);
exit_mem1:
- iounmap(ei_status.mem);
+ iounmap(ei_local->mem);
exit_req:
- release_resource(ax->mem);
- kfree(ax->mem);
+ release_mem_region(mem->start, mem_size);
exit_mem:
free_netdev(dev);
@@ -974,7 +950,7 @@ static int ax_probe(struct platform_device *pdev)
static int ax_suspend(struct platform_device *dev, pm_message_t state)
{
struct net_device *ndev = platform_get_drvdata(dev);
- struct ax_device *ax = to_ax_dev(ndev);
+ struct ax_device *ax = to_ax_dev(ndev);
ax->resume_open = ax->running;
@@ -987,7 +963,7 @@ static int ax_suspend(struct platform_device *dev, pm_message_t state)
static int ax_resume(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
- struct ax_device *ax = to_ax_dev(ndev);
+ struct ax_device *ax = to_ax_dev(ndev);
ax_initial_setup(ndev, netdev_priv(ndev));
ax_NS8390_init(ndev, ax->resume_open);
@@ -1001,7 +977,7 @@ static int ax_resume(struct platform_device *pdev)
#else
#define ax_suspend NULL
-#define ax_resume NULL
+#define ax_resume NULL
#endif
static struct platform_driver axdrv = {
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index add0b93350dd..ed709a5d07d7 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -225,6 +225,10 @@ struct be_rx_obj {
u32 cache_line_barrier[15];
};
+struct be_drv_stats {
+ u8 be_on_die_temperature;
+};
+
struct be_vf_cfg {
unsigned char vf_mac_addr[ETH_ALEN];
u32 vf_if_handle;
@@ -234,6 +238,7 @@ struct be_vf_cfg {
};
#define BE_INVALID_PMAC_ID 0xffffffff
+
struct be_adapter {
struct pci_dev *pdev;
struct net_device *netdev;
@@ -269,6 +274,7 @@ struct be_adapter {
u32 big_page_size; /* Compounded page size shared by rx wrbs */
u8 msix_vec_next_idx;
+ struct be_drv_stats drv_stats;
struct vlan_group *vlan_grp;
u16 vlans_added;
@@ -281,6 +287,7 @@ struct be_adapter {
struct be_dma_mem stats_cmd;
/* Work queue used to perform periodic tasks like getting statistics */
struct delayed_work work;
+ u16 work_counter;
/* Ethtool knobs and info */
bool rx_csum; /* BE card must perform rx-checksumming */
@@ -298,7 +305,7 @@ struct be_adapter {
u32 rx_fc; /* Rx flow control */
u32 tx_fc; /* Tx flow control */
bool ue_detected;
- bool stats_ioctl_sent;
+ bool stats_cmd_sent;
int link_speed;
u8 port_type;
u8 transceiver;
@@ -311,6 +318,8 @@ struct be_adapter {
struct be_vf_cfg vf_cfg[BE_MAX_VF];
u8 is_virtfn;
u32 sli_family;
+ u8 hba_port_num;
+ u16 pvid;
};
#define be_physfn(adapter) (!adapter->is_virtfn)
@@ -450,9 +459,8 @@ static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
mac[5] = (u8)(addr & 0xFF);
mac[4] = (u8)((addr >> 8) & 0xFF);
mac[3] = (u8)((addr >> 16) & 0xFF);
- mac[2] = 0xC9;
- mac[1] = 0x00;
- mac[0] = 0x00;
+ /* Use the OUI from the current MAC address */
+ memcpy(mac, adapter->netdev->dev_addr, 3);
}
extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index a179cc6d79f2..1822ecdadc7e 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -18,11 +18,20 @@
#include "be.h"
#include "be_cmds.h"
+/* Must be a power of 2 or else MODULO will BUG_ON */
+static int be_get_temp_freq = 32;
+
static void be_mcc_notify(struct be_adapter *adapter)
{
struct be_queue_info *mccq = &adapter->mcc_obj.q;
u32 val = 0;
+ if (adapter->eeh_err) {
+ dev_info(&adapter->pdev->dev,
+ "Error in Card Detected! Cannot issue commands\n");
+ return;
+ }
+
val |= mccq->id & DB_MCCQ_RING_ID_MASK;
val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
@@ -75,7 +84,7 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
be_dws_le_to_cpu(&resp->hw_stats,
sizeof(resp->hw_stats));
netdev_stats_update(adapter);
- adapter->stats_ioctl_sent = false;
+ adapter->stats_cmd_sent = false;
}
} else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) &&
(compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) {
@@ -102,6 +111,7 @@ static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
{
if (evt->valid) {
adapter->vlan_prio_bmap = evt->available_priority_bmap;
+ adapter->recommended_prio &= ~VLAN_PRIO_MASK;
adapter->recommended_prio =
evt->reco_default_priority << VLAN_PRIO_SHIFT;
}
@@ -117,6 +127,16 @@ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
}
}
+/*Grp5 PVID evt*/
+static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
+ struct be_async_event_grp5_pvid_state *evt)
+{
+ if (evt->enabled)
+ adapter->pvid = evt->tag;
+ else
+ adapter->pvid = 0;
+}
+
static void be_async_grp5_evt_process(struct be_adapter *adapter,
u32 trailer, struct be_mcc_compl *evt)
{
@@ -134,6 +154,10 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
be_async_grp5_qos_speed_process(adapter,
(struct be_async_event_grp5_qos_link_speed *)evt);
break;
+ case ASYNC_EVENT_PVID_STATE:
+ be_async_grp5_pvid_state_process(adapter,
+ (struct be_async_event_grp5_pvid_state *)evt);
+ break;
default:
dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
break;
@@ -216,6 +240,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
int i, num, status = 0;
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+ if (adapter->eeh_err)
+ return -EIO;
+
for (i = 0; i < mcc_timeout; i++) {
num = be_process_mcc(adapter, &status);
if (num)
@@ -245,6 +272,12 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
int msecs = 0;
u32 ready;
+ if (adapter->eeh_err) {
+ dev_err(&adapter->pdev->dev,
+ "Error detected in card.Cannot issue commands\n");
+ return -EIO;
+ }
+
do {
ready = ioread32(db);
if (ready == 0xffffffff) {
@@ -598,7 +631,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
/* Uses synchronous MCCQ */
int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
- u32 if_id, u32 *pmac_id)
+ u32 if_id, u32 *pmac_id, u32 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_pmac_add *req;
@@ -619,6 +652,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
+ req->hdr.domain = domain;
req->if_id = cpu_to_le32(if_id);
memcpy(req->mac_address, mac_addr, ETH_ALEN);
@@ -634,7 +668,7 @@ err:
}
/* Uses synchronous MCCQ */
-int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
+int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_pmac_del *req;
@@ -655,6 +689,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
+ req->hdr.domain = dom;
req->if_id = cpu_to_le32(if_id);
req->pmac_id = cpu_to_le32(pmac_id);
@@ -995,7 +1030,7 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
}
/* Uses mbox */
-int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
+int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_if_destroy *req;
@@ -1016,6 +1051,7 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
+ req->hdr.domain = domain;
req->interface_id = cpu_to_le32(interface_id);
status = be_mbox_notify_wait(adapter);
@@ -1036,6 +1072,9 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
struct be_sge *sge;
int status = 0;
+ if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
+ be_cmd_get_die_temperature(adapter);
+
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
@@ -1056,7 +1095,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
sge->len = cpu_to_le32(nonemb_cmd->size);
be_mcc_notify(adapter);
- adapter->stats_ioctl_sent = true;
+ adapter->stats_cmd_sent = true;
err:
spin_unlock_bh(&adapter->mcc_lock);
@@ -1103,6 +1142,44 @@ err:
return status;
}
+/* Uses synchronous mcc */
+int be_cmd_get_die_temperature(struct be_adapter *adapter)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_cntl_addnl_attribs *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req));
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_cntl_addnl_attribs *resp =
+ embedded_payload(wrb);
+ adapter->drv_stats.be_on_die_temperature =
+ resp->on_die_temperature;
+ }
+ /* If IOCTL fails once, do not bother issuing it again */
+ else
+ be_get_temp_freq = 0;
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
/* Uses Mbox */
int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
{
@@ -1868,8 +1945,8 @@ int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
OPCODE_COMMON_SET_QOS, sizeof(*req));
req->hdr.domain = domain;
- req->valid_bits = BE_QOS_BITS_NIC;
- req->max_bps_nic = bps;
+ req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
+ req->max_bps_nic = cpu_to_le32(bps);
status = be_mcc_notify_wait(adapter);
@@ -1877,3 +1954,57 @@ err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
}
+
+int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_cntl_attribs *req;
+ struct be_cmd_resp_cntl_attribs *resp;
+ struct be_sge *sge;
+ int status;
+ int payload_len = max(sizeof(*req), sizeof(*resp));
+ struct mgmt_controller_attrib *attribs;
+ struct be_dma_mem attribs_cmd;
+
+ memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
+ attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
+ attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
+ &attribs_cmd.dma);
+ if (!attribs_cmd.va) {
+ dev_err(&adapter->pdev->dev,
+ "Memory allocation failure\n");
+ return -ENOMEM;
+ }
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = attribs_cmd.va;
+ sge = nonembedded_sgl(wrb);
+
+ be_wrb_hdr_prepare(wrb, payload_len, false, 1,
+ OPCODE_COMMON_GET_CNTL_ATTRIBUTES);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len);
+ sge->pa_hi = cpu_to_le32(upper_32_bits(attribs_cmd.dma));
+ sge->pa_lo = cpu_to_le32(attribs_cmd.dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(attribs_cmd.size);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ attribs = (struct mgmt_controller_attrib *)( attribs_cmd.va +
+ sizeof(struct be_cmd_resp_hdr));
+ adapter->hba_port_num = attribs->hba_attribs.phy_port;
+ }
+
+err:
+ mutex_unlock(&adapter->mbox_lock);
+ pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
+ attribs_cmd.dma);
+ return status;
+}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 83d15c8a9fa3..93e5768fc705 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -88,6 +88,7 @@ struct be_mcc_compl {
#define ASYNC_EVENT_CODE_GRP_5 0x5
#define ASYNC_EVENT_QOS_SPEED 0x1
#define ASYNC_EVENT_COS_PRIORITY 0x2
+#define ASYNC_EVENT_PVID_STATE 0x3
struct be_async_event_trailer {
u32 code;
};
@@ -134,6 +135,18 @@ struct be_async_event_grp5_cos_priority {
struct be_async_event_trailer trailer;
} __packed;
+/* When the event code of an async trailer is GRP5 and event type is
+ * PVID state, the mcc_compl must be interpreted as follows
+ */
+struct be_async_event_grp5_pvid_state {
+ u8 enabled;
+ u8 rsvd0;
+ u16 tag;
+ u32 event_tag;
+ u32 rsvd1;
+ struct be_async_event_trailer trailer;
+} __packed;
+
struct be_mcc_mailbox {
struct be_mcc_wrb wrb;
struct be_mcc_compl compl;
@@ -156,6 +169,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_SET_QOS 28
#define OPCODE_COMMON_MCC_CREATE_EXT 90
#define OPCODE_COMMON_SEEPROM_READ 30
+#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32
#define OPCODE_COMMON_NTWK_RX_FILTER 34
#define OPCODE_COMMON_GET_FW_VERSION 35
#define OPCODE_COMMON_SET_FLOW_CONTROL 36
@@ -176,6 +190,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_GET_BEACON_STATE 70
#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
#define OPCODE_COMMON_GET_PHY_DETAILS 102
+#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
#define OPCODE_ETH_RSS_CONFIG 1
#define OPCODE_ETH_ACPI_CONFIG 2
@@ -619,7 +634,10 @@ struct be_rxf_stats {
u32 rx_drops_invalid_ring; /* dword 145*/
u32 forwarded_packets; /* dword 146*/
u32 rx_drops_mtu; /* dword 147*/
- u32 rsvd0[15];
+ u32 rsvd0[7];
+ u32 port0_jabber_events;
+ u32 port1_jabber_events;
+ u32 rsvd1[6];
};
struct be_erx_stats {
@@ -630,11 +648,16 @@ struct be_erx_stats {
u32 debug_pmem_pbuf_dealloc; /* dword 47*/
};
+struct be_pmem_stats {
+ u32 eth_red_drops;
+ u32 rsvd[4];
+};
+
struct be_hw_stats {
struct be_rxf_stats rxf;
u32 rsvd[48];
struct be_erx_stats erx;
- u32 rsvd1[6];
+ struct be_pmem_stats pmem;
};
struct be_cmd_req_get_stats {
@@ -647,6 +670,20 @@ struct be_cmd_resp_get_stats {
struct be_hw_stats hw_stats;
};
+struct be_cmd_req_get_cntl_addnl_attribs {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct be_cmd_resp_get_cntl_addnl_attribs {
+ struct be_cmd_resp_hdr hdr;
+ u16 ipl_file_number;
+ u8 ipl_file_version;
+ u8 rsvd0;
+ u8 on_die_temperature; /* in degrees centigrade*/
+ u8 rsvd1[3];
+};
+
struct be_cmd_req_vlan_config {
struct be_cmd_req_hdr hdr;
u8 interface_id;
@@ -994,17 +1031,29 @@ struct be_cmd_resp_set_qos {
u32 rsvd;
};
+/*********************** Controller Attributes ***********************/
+struct be_cmd_req_cntl_attribs {
+ struct be_cmd_req_hdr hdr;
+};
+
+struct be_cmd_resp_cntl_attribs {
+ struct be_cmd_resp_hdr hdr;
+ struct mgmt_controller_attrib attribs;
+};
+
extern int be_pci_fnum_get(struct be_adapter *adapter);
extern int be_cmd_POST(struct be_adapter *adapter);
extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
u8 type, bool permanent, u32 if_handle);
extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
- u32 if_id, u32 *pmac_id);
-extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id);
+ u32 if_id, u32 *pmac_id, u32 domain);
+extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
+ u32 pmac_id, u32 domain);
extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
u32 en_flags, u8 *mac, bool pmac_invalid,
u32 *if_handle, u32 *pmac_id, u32 domain);
-extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle);
+extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle,
+ u32 domain);
extern int be_cmd_eq_create(struct be_adapter *adapter,
struct be_queue_info *eq, int eq_delay);
extern int be_cmd_cq_create(struct be_adapter *adapter,
@@ -1076,4 +1125,6 @@ extern int be_cmd_get_phy_info(struct be_adapter *adapter,
struct be_dma_mem *cmd);
extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
extern void be_detect_dump_ue(struct be_adapter *adapter);
+extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
+extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index b4be0271efe0..6e5e43380c2a 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -26,7 +26,8 @@ struct be_ethtool_stat {
int offset;
};
-enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT};
+enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT,
+ PMEMSTAT, DRVSTAT};
#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
offsetof(_struct, field)
#define NETSTAT_INFO(field) #field, NETSTAT,\
@@ -43,6 +44,11 @@ enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT};
field)
#define ERXSTAT_INFO(field) #field, ERXSTAT,\
FIELDINFO(struct be_erx_stats, field)
+#define PMEMSTAT_INFO(field) #field, PMEMSTAT,\
+ FIELDINFO(struct be_pmem_stats, field)
+#define DRVSTAT_INFO(field) #field, DRVSTAT,\
+ FIELDINFO(struct be_drv_stats, \
+ field)
static const struct be_ethtool_stat et_stats[] = {
{NETSTAT_INFO(rx_packets)},
@@ -99,7 +105,11 @@ static const struct be_ethtool_stat et_stats[] = {
{MISCSTAT_INFO(rx_drops_too_many_frags)},
{MISCSTAT_INFO(rx_drops_invalid_ring)},
{MISCSTAT_INFO(forwarded_packets)},
- {MISCSTAT_INFO(rx_drops_mtu)}
+ {MISCSTAT_INFO(rx_drops_mtu)},
+ {MISCSTAT_INFO(port0_jabber_events)},
+ {MISCSTAT_INFO(port1_jabber_events)},
+ {PMEMSTAT_INFO(eth_red_drops)},
+ {DRVSTAT_INFO(be_on_die_temperature)}
};
#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
@@ -121,7 +131,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
"MAC Loopback test",
"PHY Loopback test",
"External Loopback test",
- "DDR DMA test"
+ "DDR DMA test",
"Link test"
};
@@ -276,6 +286,12 @@ be_get_ethtool_stats(struct net_device *netdev,
case MISCSTAT:
p = &hw_stats->rxf;
break;
+ case PMEMSTAT:
+ p = &hw_stats->pmem;
+ break;
+ case DRVSTAT:
+ p = &adapter->drv_stats;
+ break;
}
p = (u8 *)p + et_stats[i].offset;
@@ -376,8 +392,9 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
}
phy_cmd.size = sizeof(struct be_cmd_req_get_phy_info);
- phy_cmd.va = pci_alloc_consistent(adapter->pdev, phy_cmd.size,
- &phy_cmd.dma);
+ phy_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+ phy_cmd.size, &phy_cmd.dma,
+ GFP_KERNEL);
if (!phy_cmd.va) {
dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
return -ENOMEM;
@@ -416,8 +433,8 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
adapter->port_type = ecmd->port;
adapter->transceiver = ecmd->transceiver;
adapter->autoneg = ecmd->autoneg;
- pci_free_consistent(adapter->pdev, phy_cmd.size,
- phy_cmd.va, phy_cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, phy_cmd.size, phy_cmd.va,
+ phy_cmd.dma);
} else {
ecmd->speed = adapter->link_speed;
ecmd->port = adapter->port_type;
@@ -496,7 +513,7 @@ be_phys_id(struct net_device *netdev, u32 data)
int status;
u32 cur;
- be_cmd_get_beacon_state(adapter, adapter->port_num, &cur);
+ be_cmd_get_beacon_state(adapter, adapter->hba_port_num, &cur);
if (cur == BEACON_STATE_ENABLED)
return 0;
@@ -504,23 +521,34 @@ be_phys_id(struct net_device *netdev, u32 data)
if (data < 2)
data = 2;
- status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0,
+ status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
BEACON_STATE_ENABLED);
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(data*HZ);
- status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0,
+ status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
BEACON_STATE_DISABLED);
return status;
}
+static bool
+be_is_wol_supported(struct be_adapter *adapter)
+{
+ if (!be_physfn(adapter))
+ return false;
+ else
+ return true;
+}
+
static void
be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct be_adapter *adapter = netdev_priv(netdev);
- wol->supported = WAKE_MAGIC;
+ if (be_is_wol_supported(adapter))
+ wol->supported = WAKE_MAGIC;
+
if (adapter->wol)
wol->wolopts = WAKE_MAGIC;
else
@@ -536,7 +564,7 @@ be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
if (wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
- if (wol->wolopts & WAKE_MAGIC)
+ if ((wol->wolopts & WAKE_MAGIC) && be_is_wol_supported(adapter))
adapter->wol = true;
else
adapter->wol = false;
@@ -554,8 +582,8 @@ be_test_ddr_dma(struct be_adapter *adapter)
};
ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
- ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size,
- &ddrdma_cmd.dma);
+ ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
+ &ddrdma_cmd.dma, GFP_KERNEL);
if (!ddrdma_cmd.va) {
dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
return -ENOMEM;
@@ -569,20 +597,20 @@ be_test_ddr_dma(struct be_adapter *adapter)
}
err:
- pci_free_consistent(adapter->pdev, ddrdma_cmd.size,
- ddrdma_cmd.va, ddrdma_cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va,
+ ddrdma_cmd.dma);
return ret;
}
static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
u64 *status)
{
- be_cmd_set_loopback(adapter, adapter->port_num,
+ be_cmd_set_loopback(adapter, adapter->hba_port_num,
loopback_type, 1);
- *status = be_cmd_loopback_test(adapter, adapter->port_num,
+ *status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
loopback_type, 1500,
2, 0xabc);
- be_cmd_set_loopback(adapter, adapter->port_num,
+ be_cmd_set_loopback(adapter, adapter->hba_port_num,
BE_NO_LOOPBACK, 1);
return *status;
}
@@ -621,7 +649,8 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
&qos_link_speed) != 0) {
test->flags |= ETH_TEST_FL_FAILED;
data[4] = -1;
- } else if (mac_speed) {
+ } else if (!mac_speed) {
+ test->flags |= ETH_TEST_FL_FAILED;
data[4] = 1;
}
}
@@ -662,8 +691,8 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
- eeprom_cmd.va = pci_alloc_consistent(adapter->pdev, eeprom_cmd.size,
- &eeprom_cmd.dma);
+ eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
+ &eeprom_cmd.dma, GFP_KERNEL);
if (!eeprom_cmd.va) {
dev_err(&adapter->pdev->dev,
@@ -677,8 +706,8 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va;
memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
}
- pci_free_consistent(adapter->pdev, eeprom_cmd.size, eeprom_cmd.va,
- eeprom_cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va,
+ eeprom_cmd.dma);
return status;
}
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index 4096d9778234..3f459f76cd1d 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -327,6 +327,53 @@ struct be_eth_rx_compl {
u32 dw[4];
};
+struct mgmt_hba_attribs {
+ u8 flashrom_version_string[32];
+ u8 manufacturer_name[32];
+ u32 supported_modes;
+ u32 rsvd0[3];
+ u8 ncsi_ver_string[12];
+ u32 default_extended_timeout;
+ u8 controller_model_number[32];
+ u8 controller_description[64];
+ u8 controller_serial_number[32];
+ u8 ip_version_string[32];
+ u8 firmware_version_string[32];
+ u8 bios_version_string[32];
+ u8 redboot_version_string[32];
+ u8 driver_version_string[32];
+ u8 fw_on_flash_version_string[32];
+ u32 functionalities_supported;
+ u16 max_cdblength;
+ u8 asic_revision;
+ u8 generational_guid[16];
+ u8 hba_port_count;
+ u16 default_link_down_timeout;
+ u8 iscsi_ver_min_max;
+ u8 multifunction_device;
+ u8 cache_valid;
+ u8 hba_status;
+ u8 max_domains_supported;
+ u8 phy_port;
+ u32 firmware_post_status;
+ u32 hba_mtu[8];
+ u32 rsvd1[4];
+};
+
+struct mgmt_controller_attrib {
+ struct mgmt_hba_attribs hba_attribs;
+ u16 pci_vendor_id;
+ u16 pci_device_id;
+ u16 pci_sub_vendor_id;
+ u16 pci_sub_system_id;
+ u8 pci_bus_number;
+ u8 pci_device_number;
+ u8 pci_function_number;
+ u8 interface_type;
+ u64 unique_identifier;
+ u32 rsvd0[5];
+};
+
struct controller_id {
u32 vendor;
u32 device;
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 28a32a6c8bf1..0bdccb10aac5 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -125,8 +125,8 @@ static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
{
struct be_dma_mem *mem = &q->dma_mem;
if (mem->va)
- pci_free_consistent(adapter->pdev, mem->size,
- mem->va, mem->dma);
+ dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
+ mem->dma);
}
static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
@@ -138,7 +138,8 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
q->len = len;
q->entry_size = entry_size;
mem->size = len * entry_size;
- mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
+ mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
+ GFP_KERNEL);
if (!mem->va)
return -1;
memset(mem->va, 0, mem->size);
@@ -235,12 +236,13 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
if (!be_physfn(adapter))
goto netdev_addr;
- status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
+ status = be_cmd_pmac_del(adapter, adapter->if_handle,
+ adapter->pmac_id, 0);
if (status)
return status;
status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
- adapter->if_handle, &adapter->pmac_id);
+ adapter->if_handle, &adapter->pmac_id, 0);
netdev_addr:
if (!status)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
@@ -484,7 +486,7 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
}
-static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
+static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
bool unmap_single)
{
dma_addr_t dma;
@@ -494,11 +496,10 @@ static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
if (wrb->frag_len) {
if (unmap_single)
- pci_unmap_single(pdev, dma, wrb->frag_len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(dev, dma, wrb->frag_len,
+ DMA_TO_DEVICE);
else
- pci_unmap_page(pdev, dma, wrb->frag_len,
- PCI_DMA_TODEVICE);
+ dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
}
}
@@ -507,7 +508,7 @@ static int make_tx_wrbs(struct be_adapter *adapter,
{
dma_addr_t busaddr;
int i, copied = 0;
- struct pci_dev *pdev = adapter->pdev;
+ struct device *dev = &adapter->pdev->dev;
struct sk_buff *first_skb = skb;
struct be_queue_info *txq = &adapter->tx_obj.q;
struct be_eth_wrb *wrb;
@@ -521,9 +522,8 @@ static int make_tx_wrbs(struct be_adapter *adapter,
if (skb->len > skb->data_len) {
int len = skb_headlen(skb);
- busaddr = pci_map_single(pdev, skb->data, len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, busaddr))
+ busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, busaddr))
goto dma_err;
map_single = true;
wrb = queue_head_node(txq);
@@ -536,10 +536,9 @@ static int make_tx_wrbs(struct be_adapter *adapter,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
struct skb_frag_struct *frag =
&skb_shinfo(skb)->frags[i];
- busaddr = pci_map_page(pdev, frag->page,
- frag->page_offset,
- frag->size, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, busaddr))
+ busaddr = dma_map_page(dev, frag->page, frag->page_offset,
+ frag->size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, busaddr))
goto dma_err;
wrb = queue_head_node(txq);
wrb_fill(wrb, busaddr, frag->size);
@@ -563,7 +562,7 @@ dma_err:
txq->head = map_head;
while (copied) {
wrb = queue_head_node(txq);
- unmap_tx_frag(pdev, wrb, map_single);
+ unmap_tx_frag(dev, wrb, map_single);
map_single = false;
copied -= wrb->frag_len;
queue_head_inc(txq);
@@ -743,11 +742,11 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
status = be_cmd_pmac_del(adapter,
adapter->vf_cfg[vf].vf_if_handle,
- adapter->vf_cfg[vf].vf_pmac_id);
+ adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
status = be_cmd_pmac_add(adapter, mac,
adapter->vf_cfg[vf].vf_if_handle,
- &adapter->vf_cfg[vf].vf_pmac_id);
+ &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
if (status)
dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
@@ -822,7 +821,7 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
rate = 10000;
adapter->vf_cfg[vf].vf_tx_rate = rate;
- status = be_cmd_set_qos(adapter, rate / 10, vf);
+ status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
if (status)
dev_info(&adapter->pdev->dev,
@@ -888,8 +887,9 @@ get_rx_page_info(struct be_adapter *adapter,
BUG_ON(!rx_page_info->page);
if (rx_page_info->last_page_user) {
- pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus),
- adapter->big_page_size, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&adapter->pdev->dev,
+ dma_unmap_addr(rx_page_info, bus),
+ adapter->big_page_size, DMA_FROM_DEVICE);
rx_page_info->last_page_user = false;
}
@@ -1047,6 +1047,9 @@ static void be_rx_compl_process(struct be_adapter *adapter,
if ((adapter->function_mode & 0x400) && !vtm)
vlanf = 0;
+ if ((adapter->pvid == vlanf) && !adapter->vlan_tag[vlanf])
+ vlanf = 0;
+
if (unlikely(vlanf)) {
if (!adapter->vlan_grp || adapter->vlans_added == 0) {
kfree_skb(skb);
@@ -1087,6 +1090,9 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
if ((adapter->function_mode & 0x400) && !vtm)
vlanf = 0;
+ if ((adapter->pvid == vlanf) && !adapter->vlan_tag[vlanf])
+ vlanf = 0;
+
skb = napi_get_frags(&eq_obj->napi);
if (!skb) {
be_rx_compl_discard(adapter, rxo, rxcp);
@@ -1195,9 +1201,9 @@ static void be_post_rx_frags(struct be_rx_obj *rxo)
rxo->stats.rx_post_fail++;
break;
}
- page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
- adapter->big_page_size,
- PCI_DMA_FROMDEVICE);
+ page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
+ 0, adapter->big_page_size,
+ DMA_FROM_DEVICE);
page_info->page_offset = 0;
} else {
get_page(pagep);
@@ -1270,8 +1276,8 @@ static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
do {
cur_index = txq->tail;
wrb = queue_tail_node(txq);
- unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
- skb_headlen(sent_skb)));
+ unmap_tx_frag(&adapter->pdev->dev, wrb,
+ (unmap_skb_hdr && skb_headlen(sent_skb)));
unmap_skb_hdr = false;
num_wrbs++;
@@ -1827,6 +1833,7 @@ void be_detect_dump_ue(struct be_adapter *adapter)
if (ue_status_lo || ue_status_hi) {
adapter->ue_detected = true;
+ adapter->eeh_err = true;
dev_err(&adapter->pdev->dev, "UE Detected!!\n");
}
@@ -1865,10 +1872,14 @@ static void be_worker(struct work_struct *work)
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
}
+
+ if (!adapter->ue_detected && !lancer_chip(adapter))
+ be_detect_dump_ue(adapter);
+
goto reschedule;
}
- if (!adapter->stats_ioctl_sent)
+ if (!adapter->stats_cmd_sent)
be_cmd_get_stats(adapter, &adapter->stats_cmd);
be_tx_rate_update(adapter);
@@ -2179,7 +2190,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
memset(mac, 0, ETH_ALEN);
cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_KERNEL);
if (cmd.va == NULL)
return -1;
memset(cmd.va, 0, cmd.size);
@@ -2190,8 +2202,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
if (status) {
dev_err(&adapter->pdev->dev,
"Could not enable Wake-on-lan\n");
- pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
- cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+ cmd.dma);
return status;
}
status = be_cmd_enable_magic_wol(adapter,
@@ -2204,7 +2216,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
}
- pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
return status;
}
@@ -2225,7 +2237,8 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
for (vf = 0; vf < num_vfs; vf++) {
status = be_cmd_pmac_add(adapter, mac,
adapter->vf_cfg[vf].vf_if_handle,
- &adapter->vf_cfg[vf].vf_pmac_id);
+ &adapter->vf_cfg[vf].vf_pmac_id,
+ vf + 1);
if (status)
dev_err(&adapter->pdev->dev,
"Mac address add failed for VF %d\n", vf);
@@ -2245,7 +2258,7 @@ static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
be_cmd_pmac_del(adapter,
adapter->vf_cfg[vf].vf_if_handle,
- adapter->vf_cfg[vf].vf_pmac_id);
+ adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
}
}
@@ -2277,22 +2290,26 @@ static int be_setup(struct be_adapter *adapter)
goto do_none;
if (be_physfn(adapter)) {
- while (vf < num_vfs) {
- cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
- | BE_IF_FLAGS_BROADCAST;
- status = be_cmd_if_create(adapter, cap_flags, en_flags,
- mac, true,
+ if (adapter->sriov_enabled) {
+ while (vf < num_vfs) {
+ cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
+ BE_IF_FLAGS_BROADCAST;
+ status = be_cmd_if_create(adapter, cap_flags,
+ en_flags, mac, true,
&adapter->vf_cfg[vf].vf_if_handle,
NULL, vf+1);
- if (status) {
- dev_err(&adapter->pdev->dev,
- "Interface Create failed for VF %d\n", vf);
- goto if_destroy;
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Interface Create failed for VF %d\n",
+ vf);
+ goto if_destroy;
+ }
+ adapter->vf_cfg[vf].vf_pmac_id =
+ BE_INVALID_PMAC_ID;
+ vf++;
}
- adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
- vf++;
}
- } else if (!be_physfn(adapter)) {
+ } else {
status = be_cmd_mac_addr_query(adapter, mac,
MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
if (!status) {
@@ -2313,44 +2330,46 @@ static int be_setup(struct be_adapter *adapter)
if (status != 0)
goto rx_qs_destroy;
- if (be_physfn(adapter)) {
- status = be_vf_eth_addr_config(adapter);
- if (status)
- goto mcc_q_destroy;
- }
-
adapter->link_speed = -1;
return 0;
-mcc_q_destroy:
- if (be_physfn(adapter))
- be_vf_eth_addr_rem(adapter);
be_mcc_queues_destroy(adapter);
rx_qs_destroy:
be_rx_queues_destroy(adapter);
tx_qs_destroy:
be_tx_queues_destroy(adapter);
if_destroy:
- for (vf = 0; vf < num_vfs; vf++)
- if (adapter->vf_cfg[vf].vf_if_handle)
- be_cmd_if_destroy(adapter,
- adapter->vf_cfg[vf].vf_if_handle);
- be_cmd_if_destroy(adapter, adapter->if_handle);
+ if (be_physfn(adapter) && adapter->sriov_enabled)
+ for (vf = 0; vf < num_vfs; vf++)
+ if (adapter->vf_cfg[vf].vf_if_handle)
+ be_cmd_if_destroy(adapter,
+ adapter->vf_cfg[vf].vf_if_handle,
+ vf + 1);
+ be_cmd_if_destroy(adapter, adapter->if_handle, 0);
do_none:
return status;
}
static int be_clear(struct be_adapter *adapter)
{
- if (be_physfn(adapter))
+ int vf;
+
+ if (be_physfn(adapter) && adapter->sriov_enabled)
be_vf_eth_addr_rem(adapter);
be_mcc_queues_destroy(adapter);
be_rx_queues_destroy(adapter);
be_tx_queues_destroy(adapter);
- be_cmd_if_destroy(adapter, adapter->if_handle);
+ if (be_physfn(adapter) && adapter->sriov_enabled)
+ for (vf = 0; vf < num_vfs; vf++)
+ if (adapter->vf_cfg[vf].vf_if_handle)
+ be_cmd_if_destroy(adapter,
+ adapter->vf_cfg[vf].vf_if_handle,
+ vf + 1);
+
+ be_cmd_if_destroy(adapter, adapter->if_handle, 0);
/* tell fw we're done with firing cmds */
be_cmd_fw_clean(adapter);
@@ -2453,8 +2472,8 @@ static int be_flash_data(struct be_adapter *adapter,
continue;
if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
(!be_flash_redboot(adapter, fw->data,
- pflashcomp[i].offset, pflashcomp[i].size,
- filehdr_size)))
+ pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
+ (num_of_images * sizeof(struct image_hdr)))))
continue;
p = fw->data;
p += filehdr_size + pflashcomp[i].offset
@@ -2528,8 +2547,8 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
- flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
- &flash_cmd.dma);
+ flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
+ &flash_cmd.dma, GFP_KERNEL);
if (!flash_cmd.va) {
status = -ENOMEM;
dev_err(&adapter->pdev->dev,
@@ -2558,8 +2577,8 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
status = -1;
}
- pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
- flash_cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
+ flash_cmd.dma);
if (status) {
dev_err(&adapter->pdev->dev, "Firmware load error\n");
goto fw_exit;
@@ -2700,13 +2719,13 @@ static void be_ctrl_cleanup(struct be_adapter *adapter)
be_unmap_pci_bars(adapter);
if (mem->va)
- pci_free_consistent(adapter->pdev, mem->size,
- mem->va, mem->dma);
+ dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
+ mem->dma);
mem = &adapter->mc_cmd_mem;
if (mem->va)
- pci_free_consistent(adapter->pdev, mem->size,
- mem->va, mem->dma);
+ dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
+ mem->dma);
}
static int be_ctrl_init(struct be_adapter *adapter)
@@ -2721,8 +2740,10 @@ static int be_ctrl_init(struct be_adapter *adapter)
goto done;
mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
- mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
- mbox_mem_alloc->size, &mbox_mem_alloc->dma);
+ mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
+ mbox_mem_alloc->size,
+ &mbox_mem_alloc->dma,
+ GFP_KERNEL);
if (!mbox_mem_alloc->va) {
status = -ENOMEM;
goto unmap_pci_bars;
@@ -2734,8 +2755,9 @@ static int be_ctrl_init(struct be_adapter *adapter)
memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
- mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
- &mc_cmd_mem->dma);
+ mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
+ mc_cmd_mem->size, &mc_cmd_mem->dma,
+ GFP_KERNEL);
if (mc_cmd_mem->va == NULL) {
status = -ENOMEM;
goto free_mbox;
@@ -2751,8 +2773,8 @@ static int be_ctrl_init(struct be_adapter *adapter)
return 0;
free_mbox:
- pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
- mbox_mem_alloc->va, mbox_mem_alloc->dma);
+ dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
+ mbox_mem_alloc->va, mbox_mem_alloc->dma);
unmap_pci_bars:
be_unmap_pci_bars(adapter);
@@ -2766,8 +2788,8 @@ static void be_stats_cleanup(struct be_adapter *adapter)
struct be_dma_mem *cmd = &adapter->stats_cmd;
if (cmd->va)
- pci_free_consistent(adapter->pdev, cmd->size,
- cmd->va, cmd->dma);
+ dma_free_coherent(&adapter->pdev->dev, cmd->size,
+ cmd->va, cmd->dma);
}
static int be_stats_init(struct be_adapter *adapter)
@@ -2775,7 +2797,8 @@ static int be_stats_init(struct be_adapter *adapter)
struct be_dma_mem *cmd = &adapter->stats_cmd;
cmd->size = sizeof(struct be_cmd_req_get_stats);
- cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
+ cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
+ GFP_KERNEL);
if (cmd->va == NULL)
return -1;
memset(cmd->va, 0, cmd->size);
@@ -2845,6 +2868,10 @@ static int be_get_config(struct be_adapter *adapter)
else
adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
+ status = be_cmd_get_cntl_attributes(adapter);
+ if (status)
+ return status;
+
return 0;
}
@@ -2918,11 +2945,11 @@ static int __devinit be_probe(struct pci_dev *pdev,
adapter->netdev = netdev;
SET_NETDEV_DEV(netdev, &pdev->dev);
- status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!status) {
netdev->features |= NETIF_F_HIGHDMA;
} else {
- status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (status) {
dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
goto free_netdev;
@@ -2947,11 +2974,9 @@ static int __devinit be_probe(struct pci_dev *pdev,
if (status)
goto ctrl_clean;
- if (be_physfn(adapter)) {
- status = be_cmd_reset_function(adapter);
- if (status)
- goto ctrl_clean;
- }
+ status = be_cmd_reset_function(adapter);
+ if (status)
+ goto ctrl_clean;
status = be_stats_init(adapter);
if (status)
@@ -2975,10 +3000,18 @@ static int __devinit be_probe(struct pci_dev *pdev,
goto unsetup;
netif_carrier_off(netdev);
+ if (be_physfn(adapter) && adapter->sriov_enabled) {
+ status = be_vf_eth_addr_config(adapter);
+ if (status)
+ goto unreg_netdev;
+ }
+
dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
return 0;
+unreg_netdev:
+ unregister_netdev(netdev);
unsetup:
be_clear(adapter);
msix_disable:
@@ -3005,6 +3038,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
struct be_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
+ cancel_delayed_work_sync(&adapter->work);
if (adapter->wol)
be_setup_wol(adapter, true);
@@ -3017,6 +3051,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
be_clear(adapter);
+ be_msix_disable(adapter);
pci_save_state(pdev);
pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
@@ -3038,6 +3073,7 @@ static int be_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, 0);
pci_restore_state(pdev);
+ be_msix_enable(adapter);
/* tell fw we're ready to fire cmds */
status = be_cmd_fw_init(adapter);
if (status)
@@ -3053,6 +3089,8 @@ static int be_resume(struct pci_dev *pdev)
if (adapter->wol)
be_setup_wol(adapter, false);
+
+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
return 0;
}
@@ -3064,6 +3102,9 @@ static void be_shutdown(struct pci_dev *pdev)
struct be_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
+ if (netif_running(netdev))
+ cancel_delayed_work_sync(&adapter->work);
+
netif_device_detach(netdev);
be_cmd_reset_function(adapter);
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
index fad912656fe4..9f356d5d0f33 100644
--- a/drivers/net/bna/bnad.c
+++ b/drivers/net/bna/bnad.c
@@ -126,22 +126,22 @@ bnad_free_all_txbufs(struct bnad *bnad,
}
unmap_array[unmap_cons].skb = NULL;
- pci_unmap_single(bnad->pcidev,
- pci_unmap_addr(&unmap_array[unmap_cons],
+ dma_unmap_single(&bnad->pcidev->dev,
+ dma_unmap_addr(&unmap_array[unmap_cons],
dma_addr), skb_headlen(skb),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
- pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
+ dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
if (++unmap_cons >= unmap_q->q_depth)
break;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- pci_unmap_page(bnad->pcidev,
- pci_unmap_addr(&unmap_array[unmap_cons],
+ dma_unmap_page(&bnad->pcidev->dev,
+ dma_unmap_addr(&unmap_array[unmap_cons],
dma_addr),
skb_shinfo(skb)->frags[i].size,
- PCI_DMA_TODEVICE);
- pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
+ DMA_TO_DEVICE);
+ dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
0);
if (++unmap_cons >= unmap_q->q_depth)
break;
@@ -199,23 +199,23 @@ bnad_free_txbufs(struct bnad *bnad,
sent_bytes += skb->len;
wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
- pci_unmap_single(bnad->pcidev,
- pci_unmap_addr(&unmap_array[unmap_cons],
+ dma_unmap_single(&bnad->pcidev->dev,
+ dma_unmap_addr(&unmap_array[unmap_cons],
dma_addr), skb_headlen(skb),
- PCI_DMA_TODEVICE);
- pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
+ DMA_TO_DEVICE);
+ dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
prefetch(&unmap_array[unmap_cons + 1]);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
prefetch(&unmap_array[unmap_cons + 1]);
- pci_unmap_page(bnad->pcidev,
- pci_unmap_addr(&unmap_array[unmap_cons],
+ dma_unmap_page(&bnad->pcidev->dev,
+ dma_unmap_addr(&unmap_array[unmap_cons],
dma_addr),
skb_shinfo(skb)->frags[i].size,
- PCI_DMA_TODEVICE);
- pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
+ DMA_TO_DEVICE);
+ dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
0);
BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
}
@@ -340,19 +340,22 @@ static void
bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
{
struct bnad_unmap_q *unmap_q;
+ struct bnad_skb_unmap *unmap_array;
struct sk_buff *skb;
int unmap_cons;
unmap_q = rcb->unmap_q;
+ unmap_array = unmap_q->unmap_array;
for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
- skb = unmap_q->unmap_array[unmap_cons].skb;
+ skb = unmap_array[unmap_cons].skb;
if (!skb)
continue;
- unmap_q->unmap_array[unmap_cons].skb = NULL;
- pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q->
- unmap_array[unmap_cons],
- dma_addr), rcb->rxq->buffer_size,
- PCI_DMA_FROMDEVICE);
+ unmap_array[unmap_cons].skb = NULL;
+ dma_unmap_single(&bnad->pcidev->dev,
+ dma_unmap_addr(&unmap_array[unmap_cons],
+ dma_addr),
+ rcb->rxq->buffer_size,
+ DMA_FROM_DEVICE);
dev_kfree_skb(skb);
}
bnad_reset_rcb(bnad, rcb);
@@ -391,9 +394,10 @@ bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
skb->dev = bnad->netdev;
skb_reserve(skb, NET_IP_ALIGN);
unmap_array[unmap_prod].skb = skb;
- dma_addr = pci_map_single(bnad->pcidev, skb->data,
- rcb->rxq->buffer_size, PCI_DMA_FROMDEVICE);
- pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
+ dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
+ rcb->rxq->buffer_size,
+ DMA_FROM_DEVICE);
+ dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
dma_addr);
BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
@@ -434,8 +438,9 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
struct bna_rcb *rcb = NULL;
unsigned int wi_range, packets = 0, wis = 0;
struct bnad_unmap_q *unmap_q;
+ struct bnad_skb_unmap *unmap_array;
struct sk_buff *skb;
- u32 flags;
+ u32 flags, unmap_cons;
u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
@@ -456,17 +461,17 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
rcb = ccb->rcb[1];
unmap_q = rcb->unmap_q;
+ unmap_array = unmap_q->unmap_array;
+ unmap_cons = unmap_q->consumer_index;
- skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+ skb = unmap_array[unmap_cons].skb;
BUG_ON(!(skb));
- unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
- pci_unmap_single(bnad->pcidev,
- pci_unmap_addr(&unmap_q->
- unmap_array[unmap_q->
- consumer_index],
+ unmap_array[unmap_cons].skb = NULL;
+ dma_unmap_single(&bnad->pcidev->dev,
+ dma_unmap_addr(&unmap_array[unmap_cons],
dma_addr),
- rcb->rxq->buffer_size,
- PCI_DMA_FROMDEVICE);
+ rcb->rxq->buffer_size,
+ DMA_FROM_DEVICE);
BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
/* Should be more efficient ? Performance ? */
@@ -1015,9 +1020,9 @@ bnad_mem_free(struct bnad *bnad,
if (mem_info->mem_type == BNA_MEM_T_DMA) {
BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
dma_pa);
- pci_free_consistent(bnad->pcidev,
- mem_info->mdl[i].len,
- mem_info->mdl[i].kva, dma_pa);
+ dma_free_coherent(&bnad->pcidev->dev,
+ mem_info->mdl[i].len,
+ mem_info->mdl[i].kva, dma_pa);
} else
kfree(mem_info->mdl[i].kva);
}
@@ -1047,8 +1052,9 @@ bnad_mem_alloc(struct bnad *bnad,
for (i = 0; i < mem_info->num; i++) {
mem_info->mdl[i].len = mem_info->len;
mem_info->mdl[i].kva =
- pci_alloc_consistent(bnad->pcidev,
- mem_info->len, &dma_pa);
+ dma_alloc_coherent(&bnad->pcidev->dev,
+ mem_info->len, &dma_pa,
+ GFP_KERNEL);
if (mem_info->mdl[i].kva == NULL)
goto err_return;
@@ -2600,9 +2606,9 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
unmap_q->unmap_array[unmap_prod].skb = skb;
BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
txqent->vector[vect_id].length = htons(skb_headlen(skb));
- dma_addr = pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb),
- PCI_DMA_TODEVICE);
- pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+ dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
dma_addr);
BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
@@ -2630,11 +2636,9 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
txqent->vector[vect_id].length = htons(size);
- dma_addr =
- pci_map_page(bnad->pcidev, frag->page,
- frag->page_offset, size,
- PCI_DMA_TODEVICE);
- pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+ dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page,
+ frag->page_offset, size, DMA_TO_DEVICE);
+ dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
dma_addr);
BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
@@ -3022,14 +3026,14 @@ bnad_pci_init(struct bnad *bnad,
err = pci_request_regions(pdev, BNAD_NAME);
if (err)
goto disable_device;
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
- !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+ !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
*using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
if (err)
goto release_regions;
}
diff --git a/drivers/net/bna/bnad.h b/drivers/net/bna/bnad.h
index 8b1d51557def..a89117fa4970 100644
--- a/drivers/net/bna/bnad.h
+++ b/drivers/net/bna/bnad.h
@@ -181,7 +181,7 @@ struct bnad_rx_info {
/* Unmap queues for Tx / Rx cleanup */
struct bnad_skb_unmap {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(dma_addr)
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
};
struct bnad_unmap_q {
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 0ba59d5aeb7f..2a961b7f7e17 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -435,7 +435,8 @@ bnx2_cnic_stop(struct bnx2 *bp)
struct cnic_ctl_info info;
mutex_lock(&bp->cnic_lock);
- c_ops = bp->cnic_ops;
+ c_ops = rcu_dereference_protected(bp->cnic_ops,
+ lockdep_is_held(&bp->cnic_lock));
if (c_ops) {
info.cmd = CNIC_CTL_STOP_CMD;
c_ops->cnic_ctl(bp->cnic_data, &info);
@@ -450,7 +451,8 @@ bnx2_cnic_start(struct bnx2 *bp)
struct cnic_ctl_info info;
mutex_lock(&bp->cnic_lock);
- c_ops = bp->cnic_ops;
+ c_ops = rcu_dereference_protected(bp->cnic_ops,
+ lockdep_is_held(&bp->cnic_lock));
if (c_ops) {
if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
@@ -8315,7 +8317,7 @@ static const struct net_device_ops bnx2_netdev_ops = {
#endif
};
-static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
+static void inline vlan_features_add(struct net_device *dev, u32 flags)
{
dev->vlan_features |= flags;
}
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index f459fb2f9add..7a5e88f831f6 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6207,6 +6207,8 @@ struct l2_fhdr {
#define BNX2_CP_SCRATCH 0x001a0000
+#define BNX2_FW_MAX_ISCSI_CONN 0x001a0080
+
/*
* mcp_reg definition
@@ -6759,7 +6761,7 @@ struct bnx2 {
u32 tx_wake_thresh;
#ifdef BCM_CNIC
- struct cnic_ops *cnic_ops;
+ struct cnic_ops __rcu *cnic_ops;
void *cnic_data;
#endif
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 653c62475cb6..50d1e0793091 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -22,8 +22,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.62.00-5"
-#define DRV_MODULE_RELDATE "2011/01/30"
+#define DRV_MODULE_VERSION "1.62.11-0"
+#define DRV_MODULE_RELDATE "2011/01/31"
#define BNX2X_BC_VER 0x040200
#define BNX2X_MULTI_QUEUE
@@ -31,7 +31,7 @@
#define BNX2X_NEW_NAPI
#if defined(CONFIG_DCB)
-#define BCM_DCB
+#define BCM_DCBNL
#endif
#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
#define BCM_CNIC 1
@@ -129,6 +129,7 @@ void bnx2x_panic_dump(struct bnx2x *bp);
#endif
#define bnx2x_mc_addr(ha) ((ha)->addr)
+#define bnx2x_uc_addr(ha) ((ha)->addr)
#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff)
#define U64_HI(x) (u32)(((u64)(x)) >> 32)
@@ -341,6 +342,8 @@ struct bnx2x_fastpath {
/* chip independed shortcut into rx_prods_offset memory */
u32 ustorm_rx_prods_offset;
+ u32 rx_buf_size;
+
dma_addr_t status_blk_mapping;
struct sw_tx_bd *tx_buf_ring;
@@ -428,6 +431,10 @@ struct bnx2x_fastpath {
};
#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
+
+/* Use 2500 as a mini-jumbo MTU for FCoE */
+#define BNX2X_FCOE_MINI_JUMBO_MTU 2500
+
#ifdef BCM_CNIC
/* FCoE L2 `fastpath' is right after the eth entries */
#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp)
@@ -810,6 +817,7 @@ struct bnx2x_slowpath {
struct eth_stats_query fw_stats;
struct mac_configuration_cmd mac_config;
struct mac_configuration_cmd mcast_config;
+ struct mac_configuration_cmd uc_mac_config;
struct client_init_ramrod_data client_init_data;
/* used by dmae command executer */
@@ -911,7 +919,6 @@ struct bnx2x {
int tx_ring_size;
u32 rx_csum;
- u32 rx_buf_size;
/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
#define ETH_MIN_PACKET_SIZE 60
@@ -939,7 +946,7 @@ struct bnx2x {
struct eth_spe *spq_prod_bd;
struct eth_spe *spq_last_bd;
__le16 *dsb_sp_prod;
- atomic_t spq_left; /* serialize spq */
+ atomic_t cq_spq_left; /* ETH_XXX ramrods credit */
/* used to synchronize spq accesses */
spinlock_t spq_lock;
@@ -949,6 +956,7 @@ struct bnx2x {
u16 eq_prod;
u16 eq_cons;
__le16 *eq_cons_sb;
+ atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */
/* Flags for marking that there is a STAT_QUERY or
SET_MAC ramrod pending */
@@ -976,8 +984,12 @@ struct bnx2x {
#define MF_FUNC_DIS 0x1000
#define FCOE_MACS_SET 0x2000
#define NO_FCOE_FLAG 0x4000
+#define NO_ISCSI_OOO_FLAG 0x8000
+#define NO_ISCSI_FLAG 0x10000
#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG)
+#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
+#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
int pf_num; /* absolute PF number */
int pfid; /* per-path PF number */
@@ -1064,6 +1076,7 @@ struct bnx2x {
int num_queues;
int disable_tpa;
int int_mode;
+ u32 *rx_indir_table;
struct tstorm_eth_mac_filter_config mac_filters;
#define BNX2X_ACCEPT_NONE 0x0000
@@ -1110,7 +1123,7 @@ struct bnx2x {
#define BNX2X_CNIC_FLAG_MAC_SET 1
void *t2;
dma_addr_t t2_mapping;
- struct cnic_ops *cnic_ops;
+ struct cnic_ops __rcu *cnic_ops;
void *cnic_data;
u32 cnic_tag;
struct cnic_eth_dev cnic_eth_dev;
@@ -1125,13 +1138,12 @@ struct bnx2x {
u16 cnic_kwq_pending;
u16 cnic_spq_pending;
struct mutex cnic_mutex;
- u8 iscsi_mac[ETH_ALEN];
u8 fip_mac[ETH_ALEN];
#endif
int dmae_ready;
/* used to synchronize dmae accesses */
- struct mutex dmae_mutex;
+ spinlock_t dmae_lock;
/* used to protect the FW mail box */
struct mutex fw_mb_mutex;
@@ -1447,6 +1459,12 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
void bnx2x_calc_fc_adv(struct bnx2x *bp);
int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
u32 data_hi, u32 data_lo, int common);
+
+/* Clears multicast and unicast list configuration in the chip. */
+void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp);
+void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp);
+void bnx2x_invalidate_uc_list(struct bnx2x *bp);
+
void bnx2x_update_coalesce(struct bnx2x *bp);
int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
@@ -1613,19 +1631,23 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define BNX2X_BTR 4
#define MAX_SPQ_PENDING 8
-
-/* CMNG constants
- derived from lab experiments, and not from system spec calculations !!! */
-#define DEF_MIN_RATE 100
+/* CMNG constants, as derived from system spec calculations */
+/* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */
+#define DEF_MIN_RATE 100
/* resolution of the rate shaping timer - 100 usec */
-#define RS_PERIODIC_TIMEOUT_USEC 100
-/* resolution of fairness algorithm in usecs -
- coefficient for calculating the actual t fair */
-#define T_FAIR_COEF 10000000
+#define RS_PERIODIC_TIMEOUT_USEC 100
/* number of bytes in single QM arbitration cycle -
- coefficient for calculating the fairness timer */
-#define QM_ARB_BYTES 40000
-#define FAIR_MEM 2
+ * coefficient for calculating the fairness timer */
+#define QM_ARB_BYTES 160000
+/* resolution of Min algorithm 1:100 */
+#define MIN_RES 100
+/* how many bytes above threshold for the minimal credit of Min algorithm*/
+#define MIN_ABOVE_THRESH 32768
+/* Fairness algorithm integration time coefficient -
+ * for calculating the actual Tfair */
+#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES)
+/* Memory of fairness algorithm . 2 cycles */
+#define FAIR_MEM 2
#define ATTN_NIG_FOR_FUNC (1L << 8)
@@ -1782,5 +1804,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
BNX2X_EXTERN int load_count[2][3]; /* per path: 0-common, 1-port0, 2-port1 */
extern void bnx2x_set_ethtool_ops(struct net_device *netdev);
+void bnx2x_push_indir_table(struct bnx2x *bp);
#endif /* bnx2x.h */
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 710ce5d04c53..b01b622f4e13 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -232,7 +232,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
/* move empty skb from pool to prod and map it */
prod_rx_buf->skb = fp->tpa_pool[queue].skb;
mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
- bp->rx_buf_size, DMA_FROM_DEVICE);
+ fp->rx_buf_size, DMA_FROM_DEVICE);
dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
/* move partial skb from cons to pool (don't unmap yet) */
@@ -259,10 +259,44 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
#endif
}
+/* Timestamp option length allowed for TPA aggregation:
+ *
+ * nop nop kind length echo val
+ */
+#define TPA_TSTAMP_OPT_LEN 12
+/**
+ * Calculate the approximate value of the MSS for this
+ * aggregation using the first packet of it.
+ *
+ * @param bp
+ * @param parsing_flags Parsing flags from the START CQE
+ * @param len_on_bd Total length of the first packet for the
+ * aggregation.
+ */
+static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
+ u16 len_on_bd)
+{
+ /* TPA arrgregation won't have an IP options and TCP options
+ * other than timestamp.
+ */
+ u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr);
+
+
+ /* Check if there was a TCP timestamp, if there is it's will
+ * always be 12 bytes length: nop nop kind length echo val.
+ *
+ * Otherwise FW would close the aggregation.
+ */
+ if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
+ hdrs_len += TPA_TSTAMP_OPT_LEN;
+
+ return len_on_bd - hdrs_len;
+}
+
static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
struct sk_buff *skb,
struct eth_fast_path_rx_cqe *fp_cqe,
- u16 cqe_idx)
+ u16 cqe_idx, u16 parsing_flags)
{
struct sw_rx_page *rx_pg, old_rx_pg;
u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
@@ -275,8 +309,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* This is needed in order to enable forwarding support */
if (frag_size)
- skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
- max(frag_size, (u32)len_on_bd));
+ skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags,
+ len_on_bd);
#ifdef BNX2X_STOP_ON_ERROR
if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
@@ -333,26 +367,28 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
struct sk_buff *skb = rx_buf->skb;
/* alloc new skb */
- struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+ struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
/* Unmap skb in the pool anyway, as we are going to change
pool entry status to BNX2X_TPA_STOP even if new skb allocation
fails. */
dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
- bp->rx_buf_size, DMA_FROM_DEVICE);
+ fp->rx_buf_size, DMA_FROM_DEVICE);
if (likely(new_skb)) {
/* fix ip xsum and give it to the stack */
/* (no need to map the new skb) */
+ u16 parsing_flags =
+ le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags);
prefetch(skb);
prefetch(((char *)(skb)) + L1_CACHE_BYTES);
#ifdef BNX2X_STOP_ON_ERROR
- if (pad + len > bp->rx_buf_size) {
+ if (pad + len > fp->rx_buf_size) {
BNX2X_ERR("skb_put is about to fail... "
"pad %d len %d rx_buf_size %d\n",
- pad, len, bp->rx_buf_size);
+ pad, len, fp->rx_buf_size);
bnx2x_panic();
return;
}
@@ -373,9 +409,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
}
if (!bnx2x_fill_frag_skb(bp, fp, skb,
- &cqe->fast_path_cqe, cqe_idx)) {
- if ((le16_to_cpu(cqe->fast_path_cqe.
- pars_flags.flags) & PARSING_FLAGS_VLAN))
+ &cqe->fast_path_cqe, cqe_idx,
+ parsing_flags)) {
+ if (parsing_flags & PARSING_FLAGS_VLAN)
__vlan_hwaccel_put_tag(skb,
le16_to_cpu(cqe->fast_path_cqe.
vlan_tag));
@@ -582,7 +618,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
dma_unmap_single(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
- bp->rx_buf_size,
+ fp->rx_buf_size,
DMA_FROM_DEVICE);
skb_reserve(skb, pad);
skb_put(skb, len);
@@ -703,19 +739,20 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp)
{
u16 line_speed = bp->link_vars.line_speed;
if (IS_MF(bp)) {
- u16 maxCfg = (bp->mf_config[BP_VN(bp)] &
- FUNC_MF_CFG_MAX_BW_MASK) >>
- FUNC_MF_CFG_MAX_BW_SHIFT;
- /* Calculate the current MAX line speed limit for the DCC
- * capable devices
+ u16 maxCfg = bnx2x_extract_max_cfg(bp,
+ bp->mf_config[BP_VN(bp)]);
+
+ /* Calculate the current MAX line speed limit for the MF
+ * devices
*/
- if (IS_MF_SD(bp)) {
+ if (IS_MF_SI(bp))
+ line_speed = (line_speed * maxCfg) / 100;
+ else { /* SD mode */
u16 vn_max_rate = maxCfg * 100;
if (vn_max_rate < line_speed)
line_speed = vn_max_rate;
- } else /* IS_MF_SI(bp)) */
- line_speed = (line_speed * maxCfg) / 100;
+ }
}
return line_speed;
@@ -821,19 +858,16 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
u16 ring_prod;
int i, j;
- bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
- IP_HEADER_ALIGNMENT_PADDING;
-
- DP(NETIF_MSG_IFUP,
- "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
-
for_each_rx_queue(bp, j) {
struct bnx2x_fastpath *fp = &bp->fp[j];
+ DP(NETIF_MSG_IFUP,
+ "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
+
if (!fp->disable_tpa) {
for (i = 0; i < max_agg_queues; i++) {
fp->tpa_pool[i].skb =
- netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+ netdev_alloc_skb(bp->dev, fp->rx_buf_size);
if (!fp->tpa_pool[i].skb) {
BNX2X_ERR("Failed to allocate TPA "
"skb pool for queue[%d] - "
@@ -941,7 +975,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
dma_unmap_single(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
- bp->rx_buf_size, DMA_FROM_DEVICE);
+ fp->rx_buf_size, DMA_FROM_DEVICE);
rx_buf->skb = NULL;
dev_kfree_skb(skb);
@@ -1249,6 +1283,31 @@ static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
return rc;
}
+static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ /* Always use a mini-jumbo MTU for the FCoE L2 ring */
+ if (IS_FCOE_IDX(i))
+ /*
+ * Although there are no IP frames expected to arrive to
+ * this ring we still want to add an
+ * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
+ * overrun attack.
+ */
+ fp->rx_buf_size =
+ BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
+ BNX2X_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
+ else
+ fp->rx_buf_size =
+ bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
+ IP_HEADER_ALIGNMENT_PADDING;
+ }
+}
+
/* must be called with rtnl_lock */
int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
{
@@ -1272,6 +1331,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* must be called before memory allocation and HW init */
bnx2x_ilt_set_info(bp);
+ /* Set the receive queues buffer size */
+ bnx2x_set_rx_buf_size(bp);
+
if (bnx2x_alloc_mem(bp))
return -ENOMEM;
@@ -1427,28 +1489,35 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bnx2x_set_eth_mac(bp, 1);
+ /* Clear MC configuration */
+ if (CHIP_IS_E1(bp))
+ bnx2x_invalidate_e1_mc_list(bp);
+ else
+ bnx2x_invalidate_e1h_mc_list(bp);
+
+ /* Clear UC lists configuration */
+ bnx2x_invalidate_uc_list(bp);
+
if (bp->port.pmf)
bnx2x_initial_phy_init(bp, load_mode);
+ /* Initialize Rx filtering */
+ bnx2x_set_rx_mode(bp->dev);
+
/* Start fast path */
switch (load_mode) {
case LOAD_NORMAL:
/* Tx queue should be only reenabled */
netif_tx_wake_all_queues(bp->dev);
/* Initialize the receive filter. */
- bnx2x_set_rx_mode(bp->dev);
break;
case LOAD_OPEN:
netif_tx_start_all_queues(bp->dev);
smp_mb__after_clear_bit();
- /* Initialize the receive filter. */
- bnx2x_set_rx_mode(bp->dev);
break;
case LOAD_DIAG:
- /* Initialize the receive filter. */
- bnx2x_set_rx_mode(bp->dev);
bp->state = BNX2X_STATE_DIAG;
break;
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index 03eb4d68e6bb..8c401c990bf2 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -822,11 +822,11 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
dma_addr_t mapping;
- skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+ skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
if (unlikely(skb == NULL))
return -ENOMEM;
- mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
+ mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
dev_kfree_skb(skb);
@@ -892,7 +892,7 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
if (fp->tpa_state[i] == BNX2X_TPA_START)
dma_unmap_single(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
- bp->rx_buf_size, DMA_FROM_DEVICE);
+ fp->rx_buf_size, DMA_FROM_DEVICE);
dev_kfree_skb(skb);
rx_buf->skb = NULL;
@@ -1044,4 +1044,24 @@ static inline void storm_memset_cmng(struct bnx2x *bp,
void bnx2x_acquire_phy_lock(struct bnx2x *bp);
void bnx2x_release_phy_lock(struct bnx2x *bp);
+/**
+ * Extracts MAX BW part from MF configuration.
+ *
+ * @param bp
+ * @param mf_cfg
+ *
+ * @return u16
+ */
+static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
+{
+ u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
+ FUNC_MF_CFG_MAX_BW_SHIFT;
+ if (!max_cfg) {
+ BNX2X_ERR("Illegal configuration detected for Max BW - "
+ "using 100 instead\n");
+ max_cfg = 100;
+ }
+ return max_cfg;
+}
+
#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c
index fb60021f81fb..9a24d79c71d9 100644
--- a/drivers/net/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/bnx2x/bnx2x_dcb.c
@@ -19,6 +19,9 @@
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/errno.h>
+#ifdef BCM_DCBNL
+#include <linux/dcbnl.h>
+#endif
#include "bnx2x.h"
#include "bnx2x_cmn.h"
@@ -508,13 +511,75 @@ static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp)
return 0;
}
+
+#ifdef BCM_DCBNL
+static inline
+u8 bnx2x_dcbx_dcbnl_app_up(struct dcbx_app_priority_entry *ent)
+{
+ u8 pri;
+
+ /* Choose the highest priority */
+ for (pri = MAX_PFC_PRIORITIES - 1; pri > 0; pri--)
+ if (ent->pri_bitmap & (1 << pri))
+ break;
+ return pri;
+}
+
+static inline
+u8 bnx2x_dcbx_dcbnl_app_idtype(struct dcbx_app_priority_entry *ent)
+{
+ return ((ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) ==
+ DCBX_APP_SF_PORT) ? DCB_APP_IDTYPE_PORTNUM :
+ DCB_APP_IDTYPE_ETHTYPE;
+}
+
+static inline
+void bnx2x_dcbx_invalidate_local_apps(struct bnx2x *bp)
+{
+ int i;
+ for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
+ bp->dcbx_local_feat.app.app_pri_tbl[i].appBitfield &=
+ ~DCBX_APP_ENTRY_VALID;
+}
+
+int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall)
+{
+ int i, err = 0;
+
+ for (i = 0; i < DCBX_MAX_APP_PROTOCOL && err == 0; i++) {
+ struct dcbx_app_priority_entry *ent =
+ &bp->dcbx_local_feat.app.app_pri_tbl[i];
+
+ if (ent->appBitfield & DCBX_APP_ENTRY_VALID) {
+ u8 up = bnx2x_dcbx_dcbnl_app_up(ent);
+
+ /* avoid invalid user-priority */
+ if (up) {
+ struct dcb_app app;
+ app.selector = bnx2x_dcbx_dcbnl_app_idtype(ent);
+ app.protocol = ent->app_id;
+ app.priority = delall ? 0 : up;
+ err = dcb_setapp(bp->dev, &app);
+ }
+ }
+ }
+ return err;
+}
+#endif
+
void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
{
switch (state) {
case BNX2X_DCBX_STATE_NEG_RECEIVED:
{
DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n");
-
+#ifdef BCM_DCBNL
+ /**
+ * Delete app tlvs from dcbnl before reading new
+ * negotiation results
+ */
+ bnx2x_dcbnl_update_applist(bp, true);
+#endif
/* Read neg results if dcbx is in the FW */
if (bnx2x_dcbx_read_shmem_neg_results(bp))
return;
@@ -526,10 +591,24 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
bp->dcbx_error);
if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) {
+#ifdef BCM_DCBNL
+ /**
+ * Add new app tlvs to dcbnl
+ */
+ bnx2x_dcbnl_update_applist(bp, false);
+#endif
bnx2x_dcbx_stop_hw_tx(bp);
return;
}
/* fall through */
+#ifdef BCM_DCBNL
+ /**
+ * Invalidate the local app tlvs if they are not added
+ * to the dcbnl app list to avoid deleting them from
+ * the list later on
+ */
+ bnx2x_dcbx_invalidate_local_apps(bp);
+#endif
}
case BNX2X_DCBX_STATE_TX_PAUSED:
DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n");
@@ -1505,8 +1584,7 @@ static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp)
bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg);
}
/* DCB netlink */
-#ifdef BCM_DCB
-#include <linux/dcbnl.h>
+#ifdef BCM_DCBNL
#define BNX2X_DCBX_CAPS (DCB_CAP_DCBX_LLD_MANAGED | \
DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_STATIC)
@@ -1816,32 +1894,6 @@ static void bnx2x_dcbnl_set_pfc_state(struct net_device *netdev, u8 state)
bp->dcbx_config_params.admin_pfc_enable = (state ? 1 : 0);
}
-static bool bnx2x_app_is_equal(struct dcbx_app_priority_entry *app_ent,
- u8 idtype, u16 idval)
-{
- if (!(app_ent->appBitfield & DCBX_APP_ENTRY_VALID))
- return false;
-
- switch (idtype) {
- case DCB_APP_IDTYPE_ETHTYPE:
- if ((app_ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) !=
- DCBX_APP_SF_ETH_TYPE)
- return false;
- break;
- case DCB_APP_IDTYPE_PORTNUM:
- if ((app_ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) !=
- DCBX_APP_SF_PORT)
- return false;
- break;
- default:
- return false;
- }
- if (app_ent->app_id != idval)
- return false;
-
- return true;
-}
-
static void bnx2x_admin_app_set_ent(
struct bnx2x_admin_priority_app_table *app_ent,
u8 idtype, u16 idval, u8 up)
@@ -1943,30 +1995,6 @@ static u8 bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype,
return bnx2x_set_admin_app_up(bp, idtype, idval, up);
}
-static u8 bnx2x_dcbnl_get_app_up(struct net_device *netdev, u8 idtype,
- u16 idval)
-{
- int i;
- u8 up = 0;
-
- struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "app_type %d, app_id 0x%x\n", idtype, idval);
-
- /* iterate over the app entries looking for idtype and idval */
- for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
- if (bnx2x_app_is_equal(&bp->dcbx_local_feat.app.app_pri_tbl[i],
- idtype, idval))
- break;
-
- if (i < DCBX_MAX_APP_PROTOCOL)
- /* if found return up */
- up = bp->dcbx_local_feat.app.app_pri_tbl[i].pri_bitmap;
- else
- DP(NETIF_MSG_LINK, "app not found\n");
-
- return up;
-}
-
static u8 bnx2x_dcbnl_get_dcbx(struct net_device *netdev)
{
struct bnx2x *bp = netdev_priv(netdev);
@@ -2107,7 +2135,6 @@ const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops = {
.setnumtcs = bnx2x_dcbnl_set_numtcs,
.getpfcstate = bnx2x_dcbnl_get_pfc_state,
.setpfcstate = bnx2x_dcbnl_set_pfc_state,
- .getapp = bnx2x_dcbnl_get_app_up,
.setapp = bnx2x_dcbnl_set_app_up,
.getdcbx = bnx2x_dcbnl_get_dcbx,
.setdcbx = bnx2x_dcbnl_set_dcbx,
@@ -2115,4 +2142,4 @@ const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops = {
.setfeatcfg = bnx2x_dcbnl_set_featcfg,
};
-#endif /* BCM_DCB */
+#endif /* BCM_DCBNL */
diff --git a/drivers/net/bnx2x/bnx2x_dcb.h b/drivers/net/bnx2x/bnx2x_dcb.h
index f650f98e4092..71b8eda43bd0 100644
--- a/drivers/net/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/bnx2x/bnx2x_dcb.h
@@ -189,8 +189,9 @@ enum {
void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state);
/* DCB netlink */
-#ifdef BCM_DCB
+#ifdef BCM_DCBNL
extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops;
-#endif /* BCM_DCB */
+int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall);
+#endif /* BCM_DCBNL */
#endif /* BNX2X_DCB_H */
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index 5b44a8b48509..85291d8b3329 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -238,7 +238,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
speed |= (cmd->speed_hi << 16);
if (IS_MF_SI(bp)) {
- u32 param = 0;
+ u32 param = 0, part;
u32 line_speed = bp->link_vars.line_speed;
/* use 10G if no link detected */
@@ -251,9 +251,11 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
REQ_BC_VER_4_SET_MF_BW);
return -EINVAL;
}
- if (line_speed < speed) {
- BNX2X_DEV_INFO("New speed should be less or equal "
- "to actual line speed\n");
+ part = (speed * 100) / line_speed;
+ if (line_speed < speed || !part) {
+ BNX2X_DEV_INFO("Speed setting should be in a range "
+ "from 1%% to 100%% "
+ "of actual line speed\n");
return -EINVAL;
}
/* load old values */
@@ -263,8 +265,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
param &= FUNC_MF_CFG_MIN_BW_MASK;
/* set new MAX value */
- param |= (((speed * 100) / line_speed)
- << FUNC_MF_CFG_MAX_BW_SHIFT)
+ param |= (part << FUNC_MF_CFG_MAX_BW_SHIFT)
& FUNC_MF_CFG_MAX_BW_MASK;
bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, param);
@@ -1618,7 +1619,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
/* prepare the loopback packet */
pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
- skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+ skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size);
if (!skb) {
rc = -ENOMEM;
goto test_loopback_exit;
@@ -1781,9 +1782,7 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
{ 0x100, 0x350 }, /* manuf_info */
{ 0x450, 0xf0 }, /* feature_info */
{ 0x640, 0x64 }, /* upgrade_key_info */
- { 0x6a4, 0x64 },
{ 0x708, 0x70 }, /* manuf_key_info */
- { 0x778, 0x70 },
{ 0, 0 }
};
__be32 buf[0x350 / 4];
@@ -1933,11 +1932,11 @@ static void bnx2x_self_test(struct net_device *dev,
buf[4] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
- if (bp->port.pmf)
- if (bnx2x_link_test(bp, is_serdes) != 0) {
- buf[5] = 1;
- etest->flags |= ETH_TEST_FL_FAILED;
- }
+
+ if (bnx2x_link_test(bp, is_serdes) != 0) {
+ buf[5] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
#ifdef BNX2X_EXTRA_DEBUG
bnx2x_panic_dump(bp);
@@ -2134,6 +2133,59 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
return 0;
}
+static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ void *rules __always_unused)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = BNX2X_NUM_ETH_QUEUES(bp);
+ return 0;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int bnx2x_get_rxfh_indir(struct net_device *dev,
+ struct ethtool_rxfh_indir *indir)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ size_t copy_size =
+ min_t(size_t, indir->size, TSTORM_INDIRECTION_TABLE_SIZE);
+
+ if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
+ return -EOPNOTSUPP;
+
+ indir->size = TSTORM_INDIRECTION_TABLE_SIZE;
+ memcpy(indir->ring_index, bp->rx_indir_table,
+ copy_size * sizeof(bp->rx_indir_table[0]));
+ return 0;
+}
+
+static int bnx2x_set_rxfh_indir(struct net_device *dev,
+ const struct ethtool_rxfh_indir *indir)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ size_t i;
+
+ if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
+ return -EOPNOTSUPP;
+
+ /* Validate size and indices */
+ if (indir->size != TSTORM_INDIRECTION_TABLE_SIZE)
+ return -EINVAL;
+ for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
+ if (indir->ring_index[i] >= BNX2X_NUM_ETH_QUEUES(bp))
+ return -EINVAL;
+
+ memcpy(bp->rx_indir_table, indir->ring_index,
+ indir->size * sizeof(bp->rx_indir_table[0]));
+ bnx2x_push_indir_table(bp);
+ return 0;
+}
+
static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_settings = bnx2x_get_settings,
.set_settings = bnx2x_set_settings,
@@ -2170,6 +2222,9 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_strings = bnx2x_get_strings,
.phys_id = bnx2x_phys_id,
.get_ethtool_stats = bnx2x_get_ethtool_stats,
+ .get_rxnfc = bnx2x_get_rxnfc,
+ .get_rxfh_indir = bnx2x_get_rxfh_indir,
+ .set_rxfh_indir = bnx2x_set_rxfh_indir,
};
void bnx2x_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 548f5631c0dc..be503cc0a50b 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -11,20 +11,27 @@
#include "bnx2x_fw_defs.h"
+#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e
+
struct license_key {
u32 reserved[6];
-#if defined(__BIG_ENDIAN)
- u16 max_iscsi_init_conn;
- u16 max_iscsi_trgt_conn;
-#elif defined(__LITTLE_ENDIAN)
- u16 max_iscsi_trgt_conn;
- u16 max_iscsi_init_conn;
-#endif
+ u32 max_iscsi_conn;
+#define BNX2X_MAX_ISCSI_TRGT_CONN_MASK 0xFFFF
+#define BNX2X_MAX_ISCSI_TRGT_CONN_SHIFT 0
+#define BNX2X_MAX_ISCSI_INIT_CONN_MASK 0xFFFF0000
+#define BNX2X_MAX_ISCSI_INIT_CONN_SHIFT 16
- u32 reserved_a[6];
-};
+ u32 reserved_a;
+
+ u32 max_fcoe_conn;
+#define BNX2X_MAX_FCOE_TRGT_CONN_MASK 0xFFFF
+#define BNX2X_MAX_FCOE_TRGT_CONN_SHIFT 0
+#define BNX2X_MAX_FCOE_INIT_CONN_MASK 0xFFFF0000
+#define BNX2X_MAX_FCOE_INIT_CONN_SHIFT 16
+ u32 reserved_b[4];
+};
#define PORT_0 0
#define PORT_1 1
@@ -237,8 +244,26 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
#define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16
- u32 Reserved0[16]; /* 0x158 */
-
+ u32 Reserved0[3]; /* 0x158 */
+ /* Controls the TX laser of the SFP+ module */
+ u32 sfp_ctrl; /* 0x164 */
+#define PORT_HW_CFG_TX_LASER_MASK 0x000000FF
+#define PORT_HW_CFG_TX_LASER_SHIFT 0
+#define PORT_HW_CFG_TX_LASER_MDIO 0x00000000
+#define PORT_HW_CFG_TX_LASER_GPIO0 0x00000001
+#define PORT_HW_CFG_TX_LASER_GPIO1 0x00000002
+#define PORT_HW_CFG_TX_LASER_GPIO2 0x00000003
+#define PORT_HW_CFG_TX_LASER_GPIO3 0x00000004
+
+ /* Controls the fault module LED of the SFP+ */
+#define PORT_HW_CFG_FAULT_MODULE_LED_MASK 0x0000FF00
+#define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT 8
+#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0 0x00000000
+#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1 0x00000100
+#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2 0x00000200
+#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3 0x00000300
+#define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED 0x00000400
+ u32 Reserved01[12]; /* 0x158 */
/* for external PHY, or forced mode or during AN */
u16 xgxs_config_rx[4]; /* 0x198 */
@@ -246,12 +271,78 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
u32 Reserved1[56]; /* 0x1A8 */
u32 default_cfg; /* 0x288 */
+#define PORT_HW_CFG_GPIO0_CONFIG_MASK 0x00000003
+#define PORT_HW_CFG_GPIO0_CONFIG_SHIFT 0
+#define PORT_HW_CFG_GPIO0_CONFIG_NA 0x00000000
+#define PORT_HW_CFG_GPIO0_CONFIG_LOW 0x00000001
+#define PORT_HW_CFG_GPIO0_CONFIG_HIGH 0x00000002
+#define PORT_HW_CFG_GPIO0_CONFIG_INPUT 0x00000003
+
+#define PORT_HW_CFG_GPIO1_CONFIG_MASK 0x0000000C
+#define PORT_HW_CFG_GPIO1_CONFIG_SHIFT 2
+#define PORT_HW_CFG_GPIO1_CONFIG_NA 0x00000000
+#define PORT_HW_CFG_GPIO1_CONFIG_LOW 0x00000004
+#define PORT_HW_CFG_GPIO1_CONFIG_HIGH 0x00000008
+#define PORT_HW_CFG_GPIO1_CONFIG_INPUT 0x0000000c
+
+#define PORT_HW_CFG_GPIO2_CONFIG_MASK 0x00000030
+#define PORT_HW_CFG_GPIO2_CONFIG_SHIFT 4
+#define PORT_HW_CFG_GPIO2_CONFIG_NA 0x00000000
+#define PORT_HW_CFG_GPIO2_CONFIG_LOW 0x00000010
+#define PORT_HW_CFG_GPIO2_CONFIG_HIGH 0x00000020
+#define PORT_HW_CFG_GPIO2_CONFIG_INPUT 0x00000030
+
+#define PORT_HW_CFG_GPIO3_CONFIG_MASK 0x000000C0
+#define PORT_HW_CFG_GPIO3_CONFIG_SHIFT 6
+#define PORT_HW_CFG_GPIO3_CONFIG_NA 0x00000000
+#define PORT_HW_CFG_GPIO3_CONFIG_LOW 0x00000040
+#define PORT_HW_CFG_GPIO3_CONFIG_HIGH 0x00000080
+#define PORT_HW_CFG_GPIO3_CONFIG_INPUT 0x000000c0
+
+ /*
+ * When KR link is required to be set to force which is not
+ * KR-compliant, this parameter determine what is the trigger for it.
+ * When GPIO is selected, low input will force the speed. Currently
+ * default speed is 1G. In the future, it may be widen to select the
+ * forced speed in with another parameter. Note when force-1G is
+ * enabled, it override option 56: Link Speed option.
+ */
+#define PORT_HW_CFG_FORCE_KR_ENABLER_MASK 0x00000F00
+#define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT 8
+#define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED 0x00000000
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0 0x00000100
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0 0x00000200
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0 0x00000300
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0 0x00000400
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1 0x00000500
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1 0x00000600
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1 0x00000700
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1 0x00000800
+#define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED 0x00000900
+ /* Enable to determine with which GPIO to reset the external phy */
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK 0x000F0000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT 16
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE 0x00000000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0 0x00010000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0 0x00020000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0 0x00030000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0 0x00040000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1 0x00050000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1 0x00060000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1 0x00070000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1 0x00080000
/* Enable BAM on KR */
#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000
#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20
#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000
#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000
+ /* Enable Common Mode Sense */
+#define PORT_HW_CFG_ENABLE_CMS_MASK 0x00200000
+#define PORT_HW_CFG_ENABLE_CMS_SHIFT 21
+#define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000
+#define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000
+
u32 speed_capability_mask2; /* 0x28C */
#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF
#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0
@@ -381,6 +472,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00
+#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833 0x00000d00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h
index 5a268e9a0895..fa6dbe3f2058 100644
--- a/drivers/net/bnx2x/bnx2x_init.h
+++ b/drivers/net/bnx2x/bnx2x_init.h
@@ -241,7 +241,7 @@ static const struct {
/* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
* want to handle "system kill" flow at the moment.
*/
- BLOCK_PRTY_INFO(PXP, 0x3ffffff, 0x3ffffff, 0x3ffffff, 0x3ffffff),
+ BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff),
BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff),
BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff),
BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0),
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index dd1210fddfff..f2f367d4e74d 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -1,4 +1,4 @@
-/* Copyright 2008-2009 Broadcom Corporation
+/* Copyright 2008-2011 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -28,12 +28,13 @@
/********************************************************/
#define ETH_HLEN 14
-#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)/* 16 for CRC + VLAN + LLC */
+/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
+#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
#define ETH_MIN_PACKET_SIZE 60
#define ETH_MAX_PACKET_SIZE 1500
#define ETH_MAX_JUMBO_PACKET_SIZE 9600
#define MDIO_ACCESS_TIMEOUT 1000
-#define BMAC_CONTROL_RX_ENABLE 2
+#define BMAC_CONTROL_RX_ENABLE 2
/***********************************************************/
/* Shortcut definitions */
@@ -79,7 +80,7 @@
#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37
#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73
-#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM
+#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM
#define AUTONEG_PARALLEL \
SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION
#define AUTONEG_SGMII_FIBER_AUTODET \
@@ -112,10 +113,10 @@
#define GP_STATUS_10G_KX4 \
MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4
-#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD
-#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD
+#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD
+#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD
#define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD
-#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4
+#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4
#define LINK_100TXFD LINK_STATUS_SPEED_AND_DUPLEX_100TXFD
#define LINK_1000THD LINK_STATUS_SPEED_AND_DUPLEX_1000THD
#define LINK_1000TFD LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
@@ -123,18 +124,18 @@
#define LINK_2500THD LINK_STATUS_SPEED_AND_DUPLEX_2500THD
#define LINK_2500TFD LINK_STATUS_SPEED_AND_DUPLEX_2500TFD
#define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD
-#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
-#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
-#define LINK_12GTFD LINK_STATUS_SPEED_AND_DUPLEX_12GTFD
-#define LINK_12GXFD LINK_STATUS_SPEED_AND_DUPLEX_12GXFD
+#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
+#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
+#define LINK_12GTFD LINK_STATUS_SPEED_AND_DUPLEX_12GTFD
+#define LINK_12GXFD LINK_STATUS_SPEED_AND_DUPLEX_12GXFD
#define LINK_12_5GTFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD
#define LINK_12_5GXFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD
-#define LINK_13GTFD LINK_STATUS_SPEED_AND_DUPLEX_13GTFD
-#define LINK_13GXFD LINK_STATUS_SPEED_AND_DUPLEX_13GXFD
-#define LINK_15GTFD LINK_STATUS_SPEED_AND_DUPLEX_15GTFD
-#define LINK_15GXFD LINK_STATUS_SPEED_AND_DUPLEX_15GXFD
-#define LINK_16GTFD LINK_STATUS_SPEED_AND_DUPLEX_16GTFD
-#define LINK_16GXFD LINK_STATUS_SPEED_AND_DUPLEX_16GXFD
+#define LINK_13GTFD LINK_STATUS_SPEED_AND_DUPLEX_13GTFD
+#define LINK_13GXFD LINK_STATUS_SPEED_AND_DUPLEX_13GXFD
+#define LINK_15GTFD LINK_STATUS_SPEED_AND_DUPLEX_15GTFD
+#define LINK_15GXFD LINK_STATUS_SPEED_AND_DUPLEX_15GXFD
+#define LINK_16GTFD LINK_STATUS_SPEED_AND_DUPLEX_16GTFD
+#define LINK_16GXFD LINK_STATUS_SPEED_AND_DUPLEX_16GXFD
#define PHY_XGXS_FLAG 0x1
#define PHY_SGMII_FLAG 0x2
@@ -142,7 +143,7 @@
/* */
#define SFP_EEPROM_CON_TYPE_ADDR 0x2
- #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
+ #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
#define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
@@ -153,15 +154,15 @@
#define SFP_EEPROM_FC_TX_TECH_ADDR 0x8
#define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
- #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8
+ #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8
-#define SFP_EEPROM_OPTIONS_ADDR 0x40
+#define SFP_EEPROM_OPTIONS_ADDR 0x40
#define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1
-#define SFP_EEPROM_OPTIONS_SIZE 2
+#define SFP_EEPROM_OPTIONS_SIZE 2
-#define EDC_MODE_LINEAR 0x0022
-#define EDC_MODE_LIMITING 0x0044
-#define EDC_MODE_PASSIVE_DAC 0x0055
+#define EDC_MODE_LINEAR 0x0022
+#define EDC_MODE_LIMITING 0x0044
+#define EDC_MODE_PASSIVE_DAC 0x0055
#define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000)
@@ -170,24 +171,18 @@
/* INTERFACE */
/**********************************************************/
-#define CL45_WR_OVER_CL22(_bp, _phy, _bank, _addr, _val) \
+#define CL22_WR_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
bnx2x_cl45_write(_bp, _phy, \
(_phy)->def_md_devad, \
(_bank + (_addr & 0xf)), \
_val)
-#define CL45_RD_OVER_CL22(_bp, _phy, _bank, _addr, _val) \
+#define CL22_RD_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
bnx2x_cl45_read(_bp, _phy, \
(_phy)->def_md_devad, \
(_bank + (_addr & 0xf)), \
_val)
-static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
- u8 devad, u16 reg, u16 *ret_val);
-
-static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
- u8 devad, u16 reg, u16 val);
-
static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
{
u32 val = REG_RD(bp, reg);
@@ -216,7 +211,7 @@ void bnx2x_ets_disabled(struct link_params *params)
DP(NETIF_MSG_LINK, "ETS disabled configuration\n");
- /**
+ /*
* mapping between entry priority to client number (0,1,2 -debug and
* management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
* 3bits client num.
@@ -225,7 +220,7 @@ void bnx2x_ets_disabled(struct link_params *params)
*/
REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688);
- /**
+ /*
* Bitmap of 5bits length. Each bit specifies whether the entry behaves
* as strict. Bits 0,1,2 - debug and management entries, 3 -
* COS0 entry, 4 - COS1 entry.
@@ -237,12 +232,12 @@ void bnx2x_ets_disabled(struct link_params *params)
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
/* defines which entries (clients) are subjected to WFQ arbitration */
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
- /**
- * For strict priority entries defines the number of consecutive
- * slots for the highest priority.
- */
+ /*
+ * For strict priority entries defines the number of consecutive
+ * slots for the highest priority.
+ */
REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
- /**
+ /*
* mapping between the CREDIT_WEIGHT registers and actual client
* numbers
*/
@@ -255,7 +250,7 @@ void bnx2x_ets_disabled(struct link_params *params)
REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0);
/* ETS mode disable */
REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
- /**
+ /*
* If ETS mode is enabled (there is no strict priority) defines a WFQ
* weight for COS0/COS1.
*/
@@ -268,24 +263,24 @@ void bnx2x_ets_disabled(struct link_params *params)
REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
}
-void bnx2x_ets_bw_limit_common(const struct link_params *params)
+static void bnx2x_ets_bw_limit_common(const struct link_params *params)
{
/* ETS disabled configuration */
struct bnx2x *bp = params->bp;
DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
- /**
- * defines which entries (clients) are subjected to WFQ arbitration
- * COS0 0x8
- * COS1 0x10
- */
+ /*
+ * defines which entries (clients) are subjected to WFQ arbitration
+ * COS0 0x8
+ * COS1 0x10
+ */
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18);
- /**
- * mapping between the ARB_CREDIT_WEIGHT registers and actual
- * client numbers (WEIGHT_0 does not actually have to represent
- * client 0)
- * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
- * cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010
- */
+ /*
+ * mapping between the ARB_CREDIT_WEIGHT registers and actual
+ * client numbers (WEIGHT_0 does not actually have to represent
+ * client 0)
+ * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
+ * cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010
+ */
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0,
@@ -298,14 +293,14 @@ void bnx2x_ets_bw_limit_common(const struct link_params *params)
/* Defines the number of consecutive slots for the strict priority */
REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
- /**
- * Bitmap of 5bits length. Each bit specifies whether the entry behaves
- * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0
- * entry, 4 - COS1 entry.
- * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
- * bit4 bit3 bit2 bit1 bit0
- * MCP and debug are strict
- */
+ /*
+ * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+ * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0
+ * entry, 4 - COS1 entry.
+ * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
+ * bit4 bit3 bit2 bit1 bit0
+ * MCP and debug are strict
+ */
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
/* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/
@@ -329,8 +324,7 @@ void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
if ((0 == total_bw) ||
(0 == cos0_bw) ||
(0 == cos1_bw)) {
- DP(NETIF_MSG_LINK,
- "bnx2x_ets_bw_limit: Total BW can't be zero\n");
+ DP(NETIF_MSG_LINK, "Total BW can't be zero\n");
return;
}
@@ -355,7 +349,7 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
u32 val = 0;
DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n");
- /**
+ /*
* Bitmap of 5bits length. Each bit specifies whether the entry behaves
* as strict. Bits 0,1,2 - debug and management entries,
* 3 - COS0 entry, 4 - COS1 entry.
@@ -364,7 +358,7 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
* MCP and debug are strict
*/
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F);
- /**
+ /*
* For strict priority entries defines the number of consecutive slots
* for the highest priority.
*/
@@ -377,14 +371,14 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
/* Defines the number of consecutive slots for the strict priority */
REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos);
- /**
- * mapping between entry priority to client number (0,1,2 -debug and
- * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
- * 3bits client num.
- * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
- * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000
- * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000
- */
+ /*
+ * mapping between entry priority to client number (0,1,2 -debug and
+ * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
+ * 3bits client num.
+ * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
+ * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000
+ * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000
+ */
val = (0 == strict_cos) ? 0x2318 : 0x22E0;
REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val);
@@ -471,7 +465,7 @@ void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
/* MAC/PBF section */
/******************************************************************/
static void bnx2x_emac_init(struct link_params *params,
- struct link_vars *vars)
+ struct link_vars *vars)
{
/* reset and unreset the emac core */
struct bnx2x *bp = params->bp;
@@ -481,10 +475,10 @@ static void bnx2x_emac_init(struct link_params *params,
u16 timeout;
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
- (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
+ (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
udelay(5);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
- (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
+ (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
/* init emac - use read-modify-write */
/* self clear reset */
@@ -515,7 +509,7 @@ static void bnx2x_emac_init(struct link_params *params,
}
static u8 bnx2x_emac_enable(struct link_params *params,
- struct link_vars *vars, u8 lb)
+ struct link_vars *vars, u8 lb)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
@@ -527,55 +521,33 @@ static u8 bnx2x_emac_enable(struct link_params *params,
/* enable emac and not bmac */
REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
- /* for paladium */
- if (CHIP_REV_IS_EMUL(bp)) {
- /* Use lane 1 (of lanes 0-3) */
- REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
- REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
- port*4, 1);
- }
- /* for fpga */
- else
-
- if (CHIP_REV_IS_FPGA(bp)) {
- /* Use lane 1 (of lanes 0-3) */
- DP(NETIF_MSG_LINK, "bnx2x_emac_enable: Setting FPGA\n");
-
- REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
- REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4,
- 0);
- } else
/* ASIC */
if (vars->phy_flags & PHY_XGXS_FLAG) {
u32 ser_lane = ((params->lane_config &
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
DP(NETIF_MSG_LINK, "XGXS\n");
/* select the master lanes (out of 0-3) */
- REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 +
- port*4, ser_lane);
+ REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, ser_lane);
/* select XGXS */
- REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
- port*4, 1);
+ REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
} else { /* SerDes */
DP(NETIF_MSG_LINK, "SerDes\n");
/* select SerDes */
- REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
- port*4, 0);
+ REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
}
bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
- EMAC_RX_MODE_RESET);
+ EMAC_RX_MODE_RESET);
bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
- EMAC_TX_MODE_RESET);
+ EMAC_TX_MODE_RESET);
if (CHIP_REV_IS_SLOW(bp)) {
/* config GMII mode */
val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
- EMAC_WR(bp, EMAC_REG_EMAC_MODE,
- (val | EMAC_MODE_PORT_GMII));
+ EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
} else { /* ASIC */
/* pause enable/disable */
bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
@@ -605,14 +577,14 @@ static u8 bnx2x_emac_enable(struct link_params *params,
val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
- /**
- * Setting this bit causes MAC control frames (except for pause
- * frames) to be passed on for processing. This setting has no
- * affect on the operation of the pause frames. This bit effects
- * all packets regardless of RX Parser packet sorting logic.
- * Turn the PFC off to make sure we are in Xon state before
- * enabling it.
- */
+ /*
+ * Setting this bit causes MAC control frames (except for pause
+ * frames) to be passed on for processing. This setting has no
+ * affect on the operation of the pause frames. This bit effects
+ * all packets regardless of RX Parser packet sorting logic.
+ * Turn the PFC off to make sure we are in Xon state before
+ * enabling it.
+ */
EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0);
if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
DP(NETIF_MSG_LINK, "PFC is enabled\n");
@@ -666,16 +638,7 @@ static u8 bnx2x_emac_enable(struct link_params *params,
REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
- if (CHIP_REV_IS_EMUL(bp)) {
- /* take the BigMac out of reset */
- REG_WR(bp,
- GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
-
- /* enable access for bmac registers */
- REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
- } else
- REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
+ REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
vars->mac_type = MAC_TYPE_EMAC;
return 0;
@@ -731,8 +694,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
val |= (1<<5);
wb_data[0] = val;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, wb_data, 2);
udelay(30);
/* Tx control */
@@ -768,12 +730,12 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2);
- /**
- * Set Time (based unit is 512 bit time) between automatic
- * re-sending of PP packets amd enable automatic re-send of
- * Per-Priroity Packet as long as pp_gen is asserted and
- * pp_disable is low.
- */
+ /*
+ * Set Time (based unit is 512 bit time) between automatic
+ * re-sending of PP packets amd enable automatic re-send of
+ * Per-Priroity Packet as long as pp_gen is asserted and
+ * pp_disable is low.
+ */
val = 0x8000;
if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
val |= (1<<16); /* enable automatic re-send */
@@ -781,7 +743,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
wb_data[0] = val;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL,
- wb_data, 2);
+ wb_data, 2);
/* mac control */
val = 0x3; /* Enable RX and TX */
@@ -795,8 +757,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
wb_data[0] = val;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
}
static void bnx2x_update_pfc_brb(struct link_params *params,
@@ -825,17 +786,25 @@ static void bnx2x_update_pfc_brb(struct link_params *params,
full_xon_th =
PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
}
- /* The number of free blocks below which the pause signal to class 0
- of MAC #n is asserted. n=0,1 */
+ /*
+ * The number of free blocks below which the pause signal to class 0
+ * of MAC #n is asserted. n=0,1
+ */
REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , pause_xoff_th);
- /* The number of free blocks above which the pause signal to class 0
- of MAC #n is de-asserted. n=0,1 */
+ /*
+ * The number of free blocks above which the pause signal to class 0
+ * of MAC #n is de-asserted. n=0,1
+ */
REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , pause_xon_th);
- /* The number of free blocks below which the full signal to class 0
- of MAC #n is asserted. n=0,1 */
+ /*
+ * The number of free blocks below which the full signal to class 0
+ * of MAC #n is asserted. n=0,1
+ */
REG_WR(bp, BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , full_xoff_th);
- /* The number of free blocks above which the full signal to class 0
- of MAC #n is de-asserted. n=0,1 */
+ /*
+ * The number of free blocks above which the full signal to class 0
+ * of MAC #n is de-asserted. n=0,1
+ */
REG_WR(bp, BRB1_REG_FULL_0_XON_THRESHOLD_0 , full_xon_th);
if (set_pfc && pfc_params) {
@@ -859,25 +828,25 @@ static void bnx2x_update_pfc_brb(struct link_params *params,
full_xon_th =
PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
}
- /**
+ /*
* The number of free blocks below which the pause signal to
* class 1 of MAC #n is asserted. n=0,1
- **/
+ */
REG_WR(bp, BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, pause_xoff_th);
- /**
+ /*
* The number of free blocks above which the pause signal to
* class 1 of MAC #n is de-asserted. n=0,1
- **/
+ */
REG_WR(bp, BRB1_REG_PAUSE_1_XON_THRESHOLD_0, pause_xon_th);
- /**
+ /*
* The number of free blocks below which the full signal to
* class 1 of MAC #n is asserted. n=0,1
- **/
+ */
REG_WR(bp, BRB1_REG_FULL_1_XOFF_THRESHOLD_0, full_xoff_th);
- /**
+ /*
* The number of free blocks above which the full signal to
* class 1 of MAC #n is de-asserted. n=0,1
- **/
+ */
REG_WR(bp, BRB1_REG_FULL_1_XON_THRESHOLD_0, full_xon_th);
}
}
@@ -896,7 +865,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
FEATURE_CONFIG_PFC_ENABLED;
DP(NETIF_MSG_LINK, "updating pfc nig parameters\n");
- /**
+ /*
* When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
* MAC control frames (that are not pause packets)
* will be forwarded to the XCM.
@@ -904,7 +873,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
xcm_mask = REG_RD(bp,
port ? NIG_REG_LLH1_XCM_MASK :
NIG_REG_LLH0_XCM_MASK);
- /**
+ /*
* nig params will override non PFC params, since it's possible to
* do transition from PFC to SAFC
*/
@@ -994,7 +963,7 @@ void bnx2x_update_pfc(struct link_params *params,
struct link_vars *vars,
struct bnx2x_nig_brb_pfc_port_params *pfc_params)
{
- /**
+ /*
* The PFC and pause are orthogonal to one another, meaning when
* PFC is enabled, the pause are disabled, and when PFC is
* disabled, pause are set according to the pause result.
@@ -1035,7 +1004,7 @@ void bnx2x_update_pfc(struct link_params *params,
static u8 bnx2x_bmac1_enable(struct link_params *params,
struct link_vars *vars,
- u8 is_lb)
+ u8 is_lb)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
@@ -1049,9 +1018,8 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
/* XGXS control */
wb_data[0] = 0x3c;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr +
- BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
+ wb_data, 2);
/* tx MAC SA */
wb_data[0] = ((params->mac_addr[2] << 24) |
@@ -1060,8 +1028,7 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
params->mac_addr[5]);
wb_data[1] = ((params->mac_addr[0] << 8) |
params->mac_addr[1]);
- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2);
/* mac control */
val = 0x3;
@@ -1071,43 +1038,30 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
}
wb_data[0] = val;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2);
/* set rx mtu */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2);
bnx2x_update_pfc_bmac1(params, vars);
/* set tx mtu */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2);
/* set cnt max size */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2);
/* configure safc */
wb_data[0] = 0x1000200;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
wb_data, 2);
- /* fix for emulation */
- if (CHIP_REV_IS_EMUL(bp)) {
- wb_data[0] = 0xf000;
- wb_data[1] = 0;
- REG_WR_DMAE(bp,
- bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD,
- wb_data, 2);
- }
-
return 0;
}
@@ -1126,16 +1080,14 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
wb_data[0] = 0;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
udelay(30);
/* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */
wb_data[0] = 0x3c;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr +
- BIGMAC2_REGISTER_BMAC_XGXS_CONTROL,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_XGXS_CONTROL,
+ wb_data, 2);
udelay(30);
@@ -1147,7 +1099,7 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
wb_data[1] = ((params->mac_addr[0] << 8) |
params->mac_addr[1]);
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR,
- wb_data, 2);
+ wb_data, 2);
udelay(30);
@@ -1155,27 +1107,24 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
wb_data[0] = 0x1000200;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS,
- wb_data, 2);
+ wb_data, 2);
udelay(30);
/* set rx mtu */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2);
udelay(30);
/* set tx mtu */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2);
udelay(30);
/* set cnt max size */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2);
udelay(30);
bnx2x_update_pfc_bmac2(params, vars, is_lb);
@@ -1191,11 +1140,11 @@ static u8 bnx2x_bmac_enable(struct link_params *params,
u32 val;
/* reset and unreset the BigMac */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
msleep(1);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
/* enable access for bmac registers */
REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
@@ -1230,15 +1179,14 @@ static void bnx2x_update_mng(struct link_params *params, u32 link_status)
struct bnx2x *bp = params->bp;
REG_WR(bp, params->shmem_base +
- offsetof(struct shmem_region,
- port_mb[params->port].link_status),
- link_status);
+ offsetof(struct shmem_region,
+ port_mb[params->port].link_status), link_status);
}
static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
{
u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
- NIG_REG_INGRESS_BMAC0_MEM;
+ NIG_REG_INGRESS_BMAC0_MEM;
u32 wb_data[2];
u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
@@ -1250,12 +1198,12 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
if (CHIP_IS_E2(bp)) {
/* Clear Rx Enable bit in BMAC_CONTROL register */
REG_RD_DMAE(bp, bmac_addr +
- BIGMAC2_REGISTER_BMAC_CONTROL,
- wb_data, 2);
+ BIGMAC2_REGISTER_BMAC_CONTROL,
+ wb_data, 2);
wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
REG_WR_DMAE(bp, bmac_addr +
- BIGMAC2_REGISTER_BMAC_CONTROL,
- wb_data, 2);
+ BIGMAC2_REGISTER_BMAC_CONTROL,
+ wb_data, 2);
} else {
/* Clear Rx Enable bit in BMAC_CONTROL register */
REG_RD_DMAE(bp, bmac_addr +
@@ -1271,7 +1219,7 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
}
static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
- u32 line_speed)
+ u32 line_speed)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
@@ -1308,7 +1256,7 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
/* update threshold */
REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
/* update init credit */
- init_crd = 778; /* (800-18-4) */
+ init_crd = 778; /* (800-18-4) */
} else {
u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
@@ -1353,6 +1301,23 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
return 0;
}
+/*
+ * get_emac_base
+ *
+ * @param cb
+ * @param mdc_mdio_access
+ * @param port
+ *
+ * @return u32
+ *
+ * This function selects the MDC/MDIO access (through emac0 or
+ * emac1) depend on the mdc_mdio_access, port, port swapped. Each
+ * phy has a default access mode, which could also be overridden
+ * by nvram configuration. This parameter, whether this is the
+ * default phy configuration, or the nvram overrun
+ * configuration, is passed here as mdc_mdio_access and selects
+ * the emac_base for the CL45 read/writes operations
+ */
static u32 bnx2x_get_emac_base(struct bnx2x *bp,
u32 mdc_mdio_access, u8 port)
{
@@ -1385,13 +1350,16 @@ static u32 bnx2x_get_emac_base(struct bnx2x *bp,
}
-u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
- u8 devad, u16 reg, u16 val)
+/******************************************************************/
+/* CL45 access functions */
+/******************************************************************/
+static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 val)
{
u32 tmp, saved_mode;
u8 i, rc = 0;
-
- /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
+ /*
+ * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
* (a value of 49==0x31) and make sure that the AUTO poll is off
*/
@@ -1414,8 +1382,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
for (i = 0; i < 50; i++) {
udelay(10);
- tmp = REG_RD(bp, phy->mdio_ctrl +
- EMAC_REG_EMAC_MDIO_COMM);
+ tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
udelay(5);
break;
@@ -1423,6 +1390,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
}
if (tmp & EMAC_MDIO_COMM_START_BUSY) {
DP(NETIF_MSG_LINK, "write phy register failed\n");
+ netdev_err(bp->dev, "MDC/MDIO access timeout\n");
rc = -EFAULT;
} else {
/* data */
@@ -1435,7 +1403,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
udelay(10);
tmp = REG_RD(bp, phy->mdio_ctrl +
- EMAC_REG_EMAC_MDIO_COMM);
+ EMAC_REG_EMAC_MDIO_COMM);
if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
udelay(5);
break;
@@ -1443,6 +1411,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
}
if (tmp & EMAC_MDIO_COMM_START_BUSY) {
DP(NETIF_MSG_LINK, "write phy register failed\n");
+ netdev_err(bp->dev, "MDC/MDIO access timeout\n");
rc = -EFAULT;
}
}
@@ -1453,20 +1422,20 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
return rc;
}
-u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
- u8 devad, u16 reg, u16 *ret_val)
+static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 *ret_val)
{
u32 val, saved_mode;
u16 i;
u8 rc = 0;
-
- /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
+ /*
+ * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
* (a value of 49==0x31) and make sure that the AUTO poll is off
*/
saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
val = saved_mode & ~((EMAC_MDIO_MODE_AUTO_POLL |
- EMAC_MDIO_MODE_CLOCK_CNT));
+ EMAC_MDIO_MODE_CLOCK_CNT));
val |= (EMAC_MDIO_MODE_CLAUSE_45 |
(49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
@@ -1490,7 +1459,7 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
}
if (val & EMAC_MDIO_COMM_START_BUSY) {
DP(NETIF_MSG_LINK, "read phy register failed\n");
-
+ netdev_err(bp->dev, "MDC/MDIO access timeout\n");
*ret_val = 0;
rc = -EFAULT;
@@ -1505,7 +1474,7 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
udelay(10);
val = REG_RD(bp, phy->mdio_ctrl +
- EMAC_REG_EMAC_MDIO_COMM);
+ EMAC_REG_EMAC_MDIO_COMM);
if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
*ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
break;
@@ -1513,7 +1482,7 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
}
if (val & EMAC_MDIO_COMM_START_BUSY) {
DP(NETIF_MSG_LINK, "read phy register failed\n");
-
+ netdev_err(bp->dev, "MDC/MDIO access timeout\n");
*ret_val = 0;
rc = -EFAULT;
}
@@ -1529,7 +1498,7 @@ u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
u8 devad, u16 reg, u16 *ret_val)
{
u8 phy_index;
- /**
+ /*
* Probe for the phy according to the given phy_addr, and execute
* the read request on it
*/
@@ -1547,7 +1516,7 @@ u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
u8 devad, u16 reg, u16 val)
{
u8 phy_index;
- /**
+ /*
* Probe for the phy according to the given phy_addr, and execute
* the write request on it
*/
@@ -1576,16 +1545,15 @@ static void bnx2x_set_aer_mmd_xgxs(struct link_params *params,
aer_val = 0x3800 + offset - 1;
else
aer_val = 0x3800 + offset;
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_AER_BLOCK,
- MDIO_AER_BLOCK_AER_REG, aer_val);
+ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, aer_val);
}
static void bnx2x_set_aer_mmd_serdes(struct bnx2x *bp,
struct bnx2x_phy *phy)
{
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_AER_BLOCK,
- MDIO_AER_BLOCK_AER_REG, 0x3800);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, 0x3800);
}
/******************************************************************/
@@ -1621,9 +1589,8 @@ static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
bnx2x_set_serdes_access(bp, port);
- REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD +
- port*0x10,
- DEFAULT_PHY_DEV_ADDR);
+ REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + port*0x10,
+ DEFAULT_PHY_DEV_ADDR);
}
static void bnx2x_xgxs_deassert(struct link_params *params)
@@ -1641,23 +1608,22 @@ static void bnx2x_xgxs_deassert(struct link_params *params)
udelay(500);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
- REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST +
- port*0x18, 0);
+ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + port*0x18, 0);
REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
- params->phy[INT_PHY].def_md_devad);
+ params->phy[INT_PHY].def_md_devad);
}
void bnx2x_link_status_update(struct link_params *params,
- struct link_vars *vars)
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 link_10g;
u8 port = params->port;
vars->link_status = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region,
- port_mb[port].link_status));
+ offsetof(struct shmem_region,
+ port_mb[port].link_status));
vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
@@ -1667,7 +1633,7 @@ void bnx2x_link_status_update(struct link_params *params,
vars->phy_link_up = 1;
vars->duplex = DUPLEX_FULL;
switch (vars->link_status &
- LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
+ LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
case LINK_10THD:
vars->duplex = DUPLEX_HALF;
/* fall thru */
@@ -1779,20 +1745,20 @@ static void bnx2x_set_master_ln(struct link_params *params,
{
struct bnx2x *bp = params->bp;
u16 new_master_ln, ser_lane;
- ser_lane = ((params->lane_config &
+ ser_lane = ((params->lane_config &
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
/* set the master_ln for AN */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_XGXS_BLOCK2,
- MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
- &new_master_ln);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
+ &new_master_ln);
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_XGXS_BLOCK2 ,
- MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
- (new_master_ln | ser_lane));
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2 ,
+ MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
+ (new_master_ln | ser_lane));
}
static u8 bnx2x_reset_unicore(struct link_params *params,
@@ -1802,17 +1768,16 @@ static u8 bnx2x_reset_unicore(struct link_params *params,
struct bnx2x *bp = params->bp;
u16 mii_control;
u16 i;
-
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
/* reset the unicore */
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL,
- (mii_control |
- MDIO_COMBO_IEEO_MII_CONTROL_RESET));
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ (mii_control |
+ MDIO_COMBO_IEEO_MII_CONTROL_RESET));
if (set_serdes)
bnx2x_set_serdes_access(bp, params->port);
@@ -1821,10 +1786,10 @@ static u8 bnx2x_reset_unicore(struct link_params *params,
udelay(5);
/* the reset erased the previous bank value */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL,
- &mii_control);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ &mii_control);
if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
udelay(5);
@@ -1832,6 +1797,9 @@ static u8 bnx2x_reset_unicore(struct link_params *params,
}
}
+ netdev_err(bp->dev, "Warning: PHY was not initialized,"
+ " Port %d\n",
+ params->port);
DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n");
return -EINVAL;
@@ -1841,43 +1809,45 @@ static void bnx2x_set_swap_lanes(struct link_params *params,
struct bnx2x_phy *phy)
{
struct bnx2x *bp = params->bp;
- /* Each two bits represents a lane number:
- No swap is 0123 => 0x1b no need to enable the swap */
+ /*
+ * Each two bits represents a lane number:
+ * No swap is 0123 => 0x1b no need to enable the swap
+ */
u16 ser_lane, rx_lane_swap, tx_lane_swap;
ser_lane = ((params->lane_config &
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
rx_lane_swap = ((params->lane_config &
- PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
- PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
+ PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
tx_lane_swap = ((params->lane_config &
- PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
- PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
+ PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
if (rx_lane_swap != 0x1b) {
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_XGXS_BLOCK2,
- MDIO_XGXS_BLOCK2_RX_LN_SWAP,
- (rx_lane_swap |
- MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
- MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_RX_LN_SWAP,
+ (rx_lane_swap |
+ MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
+ MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
} else {
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_XGXS_BLOCK2,
- MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
}
if (tx_lane_swap != 0x1b) {
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_XGXS_BLOCK2,
- MDIO_XGXS_BLOCK2_TX_LN_SWAP,
- (tx_lane_swap |
- MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_TX_LN_SWAP,
+ (tx_lane_swap |
+ MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
} else {
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_XGXS_BLOCK2,
- MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
}
}
@@ -1886,66 +1856,66 @@ static void bnx2x_set_parallel_detection(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u16 control2;
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
- &control2);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
+ &control2);
if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
else
control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n",
phy->speed_cap_mask, control2);
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
- control2);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
+ control2);
if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
(phy->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
DP(NETIF_MSG_LINK, "XGXS\n");
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_10G_PARALLEL_DETECT,
- MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
- MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_10G_PARALLEL_DETECT,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_10G_PARALLEL_DETECT,
- MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
- &control2);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_10G_PARALLEL_DETECT,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
+ &control2);
control2 |=
MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_10G_PARALLEL_DETECT,
- MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
- control2);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_10G_PARALLEL_DETECT,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
+ control2);
/* Disable parallel detection of HiG */
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_XGXS_BLOCK2,
- MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
- MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
- MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
+ MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
+ MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
}
}
static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
struct link_params *params,
- struct link_vars *vars,
- u8 enable_cl73)
+ struct link_vars *vars,
+ u8 enable_cl73)
{
struct bnx2x *bp = params->bp;
u16 reg_val;
/* CL37 Autoneg */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
/* CL37 Autoneg Enabled */
if (vars->line_speed == SPEED_AUTO_NEG)
@@ -1954,15 +1924,15 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
/* Enable/Disable Autodetection */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN |
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT);
reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE;
@@ -1971,14 +1941,14 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
else
reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
/* Enable TetonII and BAM autoneg */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_BAM_NEXT_PAGE,
- MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_BAM_NEXT_PAGE,
+ MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
&reg_val);
if (vars->line_speed == SPEED_AUTO_NEG) {
/* Enable BAM aneg Mode and TetonII aneg Mode */
@@ -1989,20 +1959,20 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
}
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_BAM_NEXT_PAGE,
- MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
- reg_val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_BAM_NEXT_PAGE,
+ MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
+ reg_val);
if (enable_cl73) {
/* Enable Cl73 FSM status bits */
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_USERB0,
- MDIO_CL73_USERB0_CL73_UCTRL,
- 0xe);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_USERB0,
+ MDIO_CL73_USERB0_CL73_UCTRL,
+ 0xe);
/* Enable BAM Station Manager*/
- CL45_WR_OVER_CL22(bp, phy,
+ CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_CL73_USERB0,
MDIO_CL73_USERB0_CL73_BAM_CTRL1,
MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
@@ -2010,10 +1980,10 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
/* Advertise CL73 link speeds */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB1,
- MDIO_CL73_IEEEB1_AN_ADV2,
- &reg_val);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV2,
+ &reg_val);
if (phy->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
@@ -2021,10 +1991,10 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB1,
- MDIO_CL73_IEEEB1_AN_ADV2,
- reg_val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV2,
+ reg_val);
/* CL73 Autoneg Enabled */
reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
@@ -2032,37 +2002,39 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
} else /* CL73 Autoneg Disabled */
reg_val = 0;
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB0,
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
}
/* program SerDes, forced speed */
static void bnx2x_program_serdes(struct bnx2x_phy *phy,
struct link_params *params,
- struct link_vars *vars)
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u16 reg_val;
/* program duplex, disable autoneg and sgmii*/
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK);
if (phy->req_duplex == DUPLEX_FULL)
reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
-
- /* program speed
- - needed only if the speed is greater than 1G (2.5G or 10G) */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_MISC1, &reg_val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
+
+ /*
+ * program speed
+ * - needed only if the speed is greater than 1G (2.5G or 10G)
+ */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_MISC1, &reg_val);
/* clearing the speed value before setting the right speed */
DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
@@ -2083,9 +2055,9 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy,
MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G;
}
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_MISC1, reg_val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_MISC1, reg_val);
}
@@ -2102,13 +2074,13 @@ static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x_phy *phy,
val |= MDIO_OVER_1G_UP1_2_5G;
if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
val |= MDIO_OVER_1G_UP1_10G;
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_OVER_1G,
- MDIO_OVER_1G_UP1, val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_OVER_1G,
+ MDIO_OVER_1G_UP1, val);
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_OVER_1G,
- MDIO_OVER_1G_UP3, 0x400);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_OVER_1G,
+ MDIO_OVER_1G_UP3, 0x400);
}
static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
@@ -2116,22 +2088,21 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
*ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
- /* resolve pause mode and advertisement
- * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
+ /*
+ * Resolve pause mode and advertisement.
+ * Please refer to Table 28B-3 of the 802.3ab-1999 spec
+ */
switch (phy->req_flow_ctrl) {
case BNX2X_FLOW_CTRL_AUTO:
- if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) {
- *ieee_fc |=
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
- } else {
+ if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH)
+ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ else
*ieee_fc |=
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
- }
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
break;
case BNX2X_FLOW_CTRL_TX:
- *ieee_fc |=
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
break;
case BNX2X_FLOW_CTRL_RX:
@@ -2149,23 +2120,23 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x_phy *phy,
struct link_params *params,
- u16 ieee_fc)
+ u16 ieee_fc)
{
struct bnx2x *bp = params->bp;
u16 val;
/* for AN, we are always publishing full duplex */
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB1,
- MDIO_CL73_IEEEB1_AN_ADV1, &val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV1, &val);
val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH;
val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK);
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB1,
- MDIO_CL73_IEEEB1_AN_ADV1, val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV1, val);
}
static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
@@ -2179,67 +2150,67 @@ static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
/* Enable and restart BAM/CL37 aneg */
if (enable_cl73) {
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB0,
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
- &mii_control);
-
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB0,
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
- (mii_control |
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+ &mii_control);
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+ (mii_control |
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
} else {
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL,
- &mii_control);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ &mii_control);
DP(NETIF_MSG_LINK,
"bnx2x_restart_autoneg mii_control before = 0x%x\n",
mii_control);
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL,
- (mii_control |
- MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
- MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ (mii_control |
+ MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+ MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
}
}
static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
struct link_params *params,
- struct link_vars *vars)
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u16 control1;
/* in SGMII mode, the unicore is always slave */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
- &control1);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
+ &control1);
control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
/* set sgmii mode (and not fiber) */
control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
- control1);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
+ control1);
/* if forced speed */
if (!(vars->line_speed == SPEED_AUTO_NEG)) {
/* set speed, disable autoneg */
u16 mii_control;
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL,
- &mii_control);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ &mii_control);
mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK|
MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
@@ -2267,10 +2238,10 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
if (phy->req_duplex == DUPLEX_FULL)
mii_control |=
MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL,
- mii_control);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ mii_control);
} else { /* AN mode */
/* enable and restart AN */
@@ -2285,19 +2256,19 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
{ /* LD LP */
- switch (pause_result) { /* ASYM P ASYM P */
- case 0xb: /* 1 0 1 1 */
+ switch (pause_result) { /* ASYM P ASYM P */
+ case 0xb: /* 1 0 1 1 */
vars->flow_ctrl = BNX2X_FLOW_CTRL_TX;
break;
- case 0xe: /* 1 1 1 0 */
+ case 0xe: /* 1 1 1 0 */
vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
break;
- case 0x5: /* 0 1 0 1 */
- case 0x7: /* 0 1 1 1 */
- case 0xd: /* 1 1 0 1 */
- case 0xf: /* 1 1 1 1 */
+ case 0x5: /* 0 1 0 1 */
+ case 0x7: /* 0 1 1 1 */
+ case 0xd: /* 1 1 0 1 */
+ case 0xf: /* 1 1 1 1 */
vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
break;
@@ -2317,24 +2288,24 @@ static u8 bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
u16 pd_10g, status2_1000x;
if (phy->req_line_speed != SPEED_AUTO_NEG)
return 0;
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
- &status2_1000x);
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
- &status2_1000x);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
+ &status2_1000x);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
+ &status2_1000x);
if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) {
DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n",
params->port);
return 1;
}
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_10G_PARALLEL_DETECT,
- MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
- &pd_10g);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_10G_PARALLEL_DETECT,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
+ &pd_10g);
if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) {
DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n",
@@ -2373,14 +2344,14 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
(MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB1,
- MDIO_CL73_IEEEB1_AN_ADV1,
- &ld_pause);
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB1,
- MDIO_CL73_IEEEB1_AN_LP_ADV1,
- &lp_pause);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV1,
+ &ld_pause);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_LP_ADV1,
+ &lp_pause);
pause_result = (ld_pause &
MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK)
>> 8;
@@ -2390,18 +2361,18 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n",
pause_result);
} else {
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
- &ld_pause);
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
- &lp_pause);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
+ &ld_pause);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
+ &lp_pause);
pause_result = (ld_pause &
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
pause_result |= (lp_pause &
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n",
pause_result);
}
@@ -2417,25 +2388,25 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
u16 rx_status, ustat_val, cl37_fsm_recieved;
DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n");
/* Step 1: Make sure signal is detected */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_RX0,
- MDIO_RX0_RX_STATUS,
- &rx_status);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_RX0,
+ MDIO_RX0_RX_STATUS,
+ &rx_status);
if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) !=
(MDIO_RX0_RX_STATUS_SIGDET)) {
DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73."
"rx_status(0x80b0) = 0x%x\n", rx_status);
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB0,
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
return;
}
/* Step 2: Check CL73 state machine */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_USERB0,
- MDIO_CL73_USERB0_CL73_USTAT1,
- &ustat_val);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_USERB0,
+ MDIO_CL73_USERB0_CL73_USTAT1,
+ &ustat_val);
if ((ustat_val &
(MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) !=
@@ -2445,12 +2416,14 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
"ustat_val(0x8371) = 0x%x\n", ustat_val);
return;
}
- /* Step 3: Check CL37 Message Pages received to indicate LP
- supports only CL37 */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_REMOTE_PHY,
- MDIO_REMOTE_PHY_MISC_RX_STATUS,
- &cl37_fsm_recieved);
+ /*
+ * Step 3: Check CL37 Message Pages received to indicate LP
+ * supports only CL37
+ */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_REMOTE_PHY,
+ MDIO_REMOTE_PHY_MISC_RX_STATUS,
+ &cl37_fsm_recieved);
if ((cl37_fsm_recieved &
(MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) !=
@@ -2461,14 +2434,18 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
cl37_fsm_recieved);
return;
}
- /* The combined cl37/cl73 fsm state information indicating that we are
- connected to a device which does not support cl73, but does support
- cl37 BAM. In this case we disable cl73 and restart cl37 auto-neg */
+ /*
+ * The combined cl37/cl73 fsm state information indicating that
+ * we are connected to a device which does not support cl73, but
+ * does support cl37 BAM. In this case we disable cl73 and
+ * restart cl37 auto-neg
+ */
+
/* Disable CL73 */
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB0,
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
- 0);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+ 0);
/* Restart CL37 autoneg */
bnx2x_restart_autoneg(phy, params, 0);
DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n");
@@ -2493,14 +2470,14 @@ static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u16 new_line_speed , gp_status;
+ u16 new_line_speed, gp_status;
u8 rc = 0;
/* Read gp_status */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_GP_STATUS,
- MDIO_GP_STATUS_TOP_AN_STATUS1,
- &gp_status);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_GP_STATUS,
+ MDIO_GP_STATUS_TOP_AN_STATUS1,
+ &gp_status);
if (phy->req_line_speed == SPEED_AUTO_NEG)
vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
@@ -2637,9 +2614,9 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
u16 bank;
/* read precomp */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_OVER_1G,
- MDIO_OVER_1G_LP_UP2, &lp_up2);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_OVER_1G,
+ MDIO_OVER_1G_LP_UP2, &lp_up2);
/* bits [10:7] at lp_up2, positioned at [15:12] */
lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
@@ -2651,18 +2628,18 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3;
bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) {
- CL45_RD_OVER_CL22(bp, phy,
- bank,
- MDIO_TX0_TX_DRIVER, &tx_driver);
+ CL22_RD_OVER_CL45(bp, phy,
+ bank,
+ MDIO_TX0_TX_DRIVER, &tx_driver);
/* replace tx_driver bits [15:12] */
if (lp_up2 !=
(tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
tx_driver |= lp_up2;
- CL45_WR_OVER_CL22(bp, phy,
- bank,
- MDIO_TX0_TX_DRIVER, tx_driver);
+ CL22_WR_OVER_CL45(bp, phy,
+ bank,
+ MDIO_TX0_TX_DRIVER, tx_driver);
}
}
}
@@ -2676,10 +2653,10 @@ static u8 bnx2x_emac_program(struct link_params *params,
DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 +
- EMAC_REG_EMAC_MODE,
- (EMAC_MODE_25G_MODE |
- EMAC_MODE_PORT_MII_10M |
- EMAC_MODE_HALF_DUPLEX));
+ EMAC_REG_EMAC_MODE,
+ (EMAC_MODE_25G_MODE |
+ EMAC_MODE_PORT_MII_10M |
+ EMAC_MODE_HALF_DUPLEX));
switch (vars->line_speed) {
case SPEED_10:
mode |= EMAC_MODE_PORT_MII_10M;
@@ -2707,8 +2684,8 @@ static u8 bnx2x_emac_program(struct link_params *params,
if (vars->duplex == DUPLEX_HALF)
mode |= EMAC_MODE_HALF_DUPLEX;
bnx2x_bits_en(bp,
- GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
- mode);
+ GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
+ mode);
bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
return 0;
@@ -2723,7 +2700,7 @@ static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3;
bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) {
- CL45_WR_OVER_CL22(bp, phy,
+ CL22_WR_OVER_CL45(bp, phy,
bank,
MDIO_RX0_RX_EQ_BOOST,
phy->rx_preemphasis[i]);
@@ -2731,7 +2708,7 @@ static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3;
bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) {
- CL45_WR_OVER_CL22(bp, phy,
+ CL22_WR_OVER_CL45(bp, phy,
bank,
MDIO_TX0_TX_DRIVER,
phy->tx_preemphasis[i]);
@@ -2754,7 +2731,7 @@ static void bnx2x_init_internal_phy(struct bnx2x_phy *phy,
/* forced speed requested? */
if (vars->line_speed != SPEED_AUTO_NEG ||
(SINGLE_MEDIA_DIRECT(params) &&
- params->loopback_mode == LOOPBACK_EXT)) {
+ params->loopback_mode == LOOPBACK_EXT)) {
DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
/* disable autoneg */
@@ -2771,7 +2748,7 @@ static void bnx2x_init_internal_phy(struct bnx2x_phy *phy,
/* program duplex & pause advertisement (for aneg) */
bnx2x_set_ieee_aneg_advertisment(phy, params,
- vars->ieee_fc);
+ vars->ieee_fc);
/* enable autoneg */
bnx2x_set_autoneg(phy, params, vars, enable_cl73);
@@ -2842,7 +2819,8 @@ static u8 bnx2x_init_xgxs(struct bnx2x_phy *phy,
}
static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
- struct bnx2x_phy *phy)
+ struct bnx2x_phy *phy,
+ struct link_params *params)
{
u16 cnt, ctrl;
/* Wait for soft reset to get cleared upto 1 sec */
@@ -2853,6 +2831,11 @@ static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
break;
msleep(1);
}
+
+ if (cnt == 1000)
+ netdev_err(bp->dev, "Warning: PHY was not initialized,"
+ " Port %d\n",
+ params->port);
DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt);
return cnt;
}
@@ -2863,9 +2846,7 @@ static void bnx2x_link_int_enable(struct link_params *params)
u32 mask;
struct bnx2x *bp = params->bp;
- /* setting the status to report on link up
- for either XGXS or SerDes */
-
+ /* Setting the status to report on link up for either XGXS or SerDes */
if (params->switch_cfg == SWITCH_CFG_10G) {
mask = (NIG_MASK_XGXS0_LINK10G |
NIG_MASK_XGXS0_LINK_STATUS);
@@ -2908,7 +2889,7 @@ static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
{
u32 latch_status = 0;
- /**
+ /*
* Disable the MI INT ( external phy int ) by writing 1 to the
* status register. Link down indication is high-active-signal,
* so in this case we need to write the status to clear the XOR
@@ -2933,27 +2914,30 @@ static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
/* For all latched-signal=up : Re-Arm Latch signals */
REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8,
- (latch_status & 0xfffe) | (latch_status & 1));
+ (latch_status & 0xfffe) | (latch_status & 1));
}
/* For all latched-signal=up,Write original_signal to status */
}
static void bnx2x_link_int_ack(struct link_params *params,
- struct link_vars *vars, u8 is_10g)
+ struct link_vars *vars, u8 is_10g)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
- /* first reset all status
- * we assume only one line will be change at a time */
+ /*
+ * First reset all status we assume only one line will be
+ * change at a time
+ */
bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
- (NIG_STATUS_XGXS0_LINK10G |
- NIG_STATUS_XGXS0_LINK_STATUS |
- NIG_STATUS_SERDES0_LINK_STATUS));
+ (NIG_STATUS_XGXS0_LINK10G |
+ NIG_STATUS_XGXS0_LINK_STATUS |
+ NIG_STATUS_SERDES0_LINK_STATUS));
if (vars->phy_link_up) {
if (is_10g) {
- /* Disable the 10G link interrupt
- * by writing 1 to the status register
+ /*
+ * Disable the 10G link interrupt by writing 1 to the
+ * status register
*/
DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
bnx2x_bits_en(bp,
@@ -2961,9 +2945,9 @@ static void bnx2x_link_int_ack(struct link_params *params,
NIG_STATUS_XGXS0_LINK10G);
} else if (params->switch_cfg == SWITCH_CFG_10G) {
- /* Disable the link interrupt
- * by writing 1 to the relevant lane
- * in the status register
+ /*
+ * Disable the link interrupt by writing 1 to the
+ * relevant lane in the status register
*/
u32 ser_lane = ((params->lane_config &
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
@@ -2978,8 +2962,9 @@ static void bnx2x_link_int_ack(struct link_params *params,
} else { /* SerDes */
DP(NETIF_MSG_LINK, "SerDes phy link up\n");
- /* Disable the link interrupt
- * by writing 1 to the status register
+ /*
+ * Disable the link interrupt by writing 1 to the status
+ * register
*/
bnx2x_bits_en(bp,
NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
@@ -3059,8 +3044,7 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
}
if ((params->num_phys == MAX_PHYS) &&
(params->phy[EXT_PHY2].ver_addr != 0)) {
- spirom_ver = REG_RD(bp,
- params->phy[EXT_PHY2].ver_addr);
+ spirom_ver = REG_RD(bp, params->phy[EXT_PHY2].ver_addr);
if (params->phy[EXT_PHY2].format_fw_ver) {
*ver_p = '/';
ver_p++;
@@ -3089,29 +3073,27 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
/* change the uni_phy_addr in the nig */
md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
- port*0x18));
+ port*0x18));
REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
bnx2x_cl45_write(bp, phy,
- 5,
- (MDIO_REG_BANK_AER_BLOCK +
- (MDIO_AER_BLOCK_AER_REG & 0xf)),
- 0x2800);
+ 5,
+ (MDIO_REG_BANK_AER_BLOCK +
+ (MDIO_AER_BLOCK_AER_REG & 0xf)),
+ 0x2800);
bnx2x_cl45_write(bp, phy,
- 5,
- (MDIO_REG_BANK_CL73_IEEEB0 +
- (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
- 0x6041);
+ 5,
+ (MDIO_REG_BANK_CL73_IEEEB0 +
+ (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
+ 0x6041);
msleep(200);
/* set aer mmd back */
bnx2x_set_aer_mmd_xgxs(params, phy);
/* and md_devad */
- REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
- md_devad);
-
+ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad);
} else {
u16 mii_ctrl;
DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
@@ -3152,26 +3134,26 @@ u8 bnx2x_set_led(struct link_params *params,
case LED_MODE_OFF:
REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
- SHARED_HW_CFG_LED_MAC1);
+ SHARED_HW_CFG_LED_MAC1);
tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
break;
case LED_MODE_OPER:
- /**
+ /*
* For all other phys, OPER mode is same as ON, so in case
* link is down, do nothing
- **/
+ */
if (!vars->link_up)
break;
case LED_MODE_ON:
if (params->phy[EXT_PHY1].type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 &&
CHIP_IS_E2(bp) && params->num_phys == 2) {
- /**
- * This is a work-around for E2+8727 Configurations
- */
+ /*
+ * This is a work-around for E2+8727 Configurations
+ */
if (mode == LED_MODE_ON ||
speed == SPEED_10000){
REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
@@ -3183,41 +3165,40 @@ u8 bnx2x_set_led(struct link_params *params,
return rc;
}
} else if (SINGLE_MEDIA_DIRECT(params)) {
- /**
- * This is a work-around for HW issue found when link
- * is up in CL73
- */
+ /*
+ * This is a work-around for HW issue found when link
+ * is up in CL73
+ */
REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
} else {
- REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
- hw_led_mode);
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode);
}
- REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 +
- port*4, 0);
+ REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
/* Set blinking rate to ~15.9Hz */
REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
- LED_BLINK_RATE_VAL);
+ LED_BLINK_RATE_VAL);
REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
- port*4, 1);
+ port*4, 1);
tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
- EMAC_WR(bp, EMAC_REG_EMAC_LED,
- (tmp & (~EMAC_LED_OVERRIDE)));
+ EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp & (~EMAC_LED_OVERRIDE)));
if (CHIP_IS_E1(bp) &&
((speed == SPEED_2500) ||
(speed == SPEED_1000) ||
(speed == SPEED_100) ||
(speed == SPEED_10))) {
- /* On Everest 1 Ax chip versions for speeds less than
- 10G LED scheme is different */
+ /*
+ * On Everest 1 Ax chip versions for speeds less than
+ * 10G LED scheme is different
+ */
REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
- + port*4, 1);
+ + port*4, 1);
REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
- port*4, 0);
+ port*4, 0);
REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 +
- port*4, 1);
+ port*4, 1);
}
break;
@@ -3231,7 +3212,7 @@ u8 bnx2x_set_led(struct link_params *params,
}
-/**
+/*
* This function comes to reflect the actual link state read DIRECTLY from the
* HW
*/
@@ -3243,10 +3224,10 @@ u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars,
u8 ext_phy_link_up = 0, serdes_phy_type;
struct link_vars temp_vars;
- CL45_RD_OVER_CL22(bp, &params->phy[INT_PHY],
- MDIO_REG_BANK_GP_STATUS,
- MDIO_GP_STATUS_TOP_AN_STATUS1,
- &gp_status);
+ CL22_RD_OVER_CL45(bp, &params->phy[INT_PHY],
+ MDIO_REG_BANK_GP_STATUS,
+ MDIO_GP_STATUS_TOP_AN_STATUS1,
+ &gp_status);
/* link is up only if both local phy and external phy are up */
if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
return -ESRCH;
@@ -3290,15 +3271,15 @@ static u8 bnx2x_link_initialize(struct link_params *params,
u8 rc = 0;
u8 phy_index, non_ext_phy;
struct bnx2x *bp = params->bp;
- /**
- * In case of external phy existence, the line speed would be the
- * line speed linked up by the external phy. In case it is direct
- * only, then the line_speed during initialization will be
- * equal to the req_line_speed
- */
+ /*
+ * In case of external phy existence, the line speed would be the
+ * line speed linked up by the external phy. In case it is direct
+ * only, then the line_speed during initialization will be
+ * equal to the req_line_speed
+ */
vars->line_speed = params->phy[INT_PHY].req_line_speed;
- /**
+ /*
* Initialize the internal phy in case this is a direct board
* (no external phys), or this board has external phy which requires
* to first.
@@ -3326,17 +3307,16 @@ static u8 bnx2x_link_initialize(struct link_params *params,
if (!non_ext_phy)
for (phy_index = EXT_PHY1; phy_index < params->num_phys;
phy_index++) {
- /**
+ /*
* No need to initialize second phy in case of first
* phy only selection. In case of second phy, we do
* need to initialize the first phy, since they are
* connected.
- **/
+ */
if (phy_index == EXT_PHY2 &&
(bnx2x_phy_selection(params) ==
PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) {
- DP(NETIF_MSG_LINK, "Not initializing"
- "second phy\n");
+ DP(NETIF_MSG_LINK, "Ignoring second phy\n");
continue;
}
params->phy[phy_index].config_init(
@@ -3358,9 +3338,8 @@ static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
/* reset the SerDes/XGXS */
- REG_WR(params->bp, GRCBASE_MISC +
- MISC_REGISTERS_RESET_REG_3_CLEAR,
- (0x1ff << (params->port*16)));
+ REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
+ (0x1ff << (params->port*16)));
}
static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
@@ -3374,11 +3353,11 @@ static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
else
gpio_port = params->port;
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW,
- gpio_port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ gpio_port);
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_LOW,
- gpio_port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ gpio_port);
DP(NETIF_MSG_LINK, "reset external PHY\n");
}
@@ -3409,9 +3388,8 @@ static u8 bnx2x_update_link_down(struct link_params *params,
/* reset BigMac */
bnx2x_bmac_rx_disable(bp, params->port);
- REG_WR(bp, GRCBASE_MISC +
- MISC_REGISTERS_RESET_REG_2_CLEAR,
- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
return 0;
}
@@ -3462,7 +3440,7 @@ static u8 bnx2x_update_link_up(struct link_params *params,
msleep(20);
return rc;
}
-/**
+/*
* The bnx2x_link_update function should be called upon link
* interrupt.
* Link is considered up as follows:
@@ -3501,12 +3479,11 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT +
- port*0x18) > 0);
+ port*0x18) > 0);
DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n",
REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
is_mi_int,
- REG_RD(bp,
- NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
+ REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
@@ -3515,14 +3492,14 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
/* disable emac */
REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
- /**
- * Step 1:
- * Check external link change only for external phys, and apply
- * priority selection between them in case the link on both phys
- * is up. Note that the instead of the common vars, a temporary
- * vars argument is used since each phy may have different link/
- * speed/duplex result
- */
+ /*
+ * Step 1:
+ * Check external link change only for external phys, and apply
+ * priority selection between them in case the link on both phys
+ * is up. Note that the instead of the common vars, a temporary
+ * vars argument is used since each phy may have different link/
+ * speed/duplex result
+ */
for (phy_index = EXT_PHY1; phy_index < params->num_phys;
phy_index++) {
struct bnx2x_phy *phy = &params->phy[phy_index];
@@ -3547,22 +3524,22 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
switch (bnx2x_phy_selection(params)) {
case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
- /**
+ /*
* In this option, the first PHY makes sure to pass the
* traffic through itself only.
* Its not clear how to reset the link on the second phy
- **/
+ */
active_external_phy = EXT_PHY1;
break;
case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
- /**
+ /*
* In this option, the first PHY makes sure to pass the
* traffic through the second PHY.
- **/
+ */
active_external_phy = EXT_PHY2;
break;
default:
- /**
+ /*
* Link indication on both PHYs with the following cases
* is invalid:
* - FIRST_PHY means that second phy wasn't initialized,
@@ -3570,7 +3547,7 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
* - SECOND_PHY means that first phy should not be able
* to link up by itself (using configuration)
* - DEFAULT should be overriden during initialiazation
- **/
+ */
DP(NETIF_MSG_LINK, "Invalid link indication"
"mpc=0x%x. DISABLING LINK !!!\n",
params->multi_phy_config);
@@ -3580,18 +3557,18 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
}
}
prev_line_speed = vars->line_speed;
- /**
- * Step 2:
- * Read the status of the internal phy. In case of
- * DIRECT_SINGLE_MEDIA board, this link is the external link,
- * otherwise this is the link between the 577xx and the first
- * external phy
- */
+ /*
+ * Step 2:
+ * Read the status of the internal phy. In case of
+ * DIRECT_SINGLE_MEDIA board, this link is the external link,
+ * otherwise this is the link between the 577xx and the first
+ * external phy
+ */
if (params->phy[INT_PHY].read_status)
params->phy[INT_PHY].read_status(
&params->phy[INT_PHY],
params, vars);
- /**
+ /*
* The INT_PHY flow control reside in the vars. This include the
* case where the speed or flow control are not set to AUTO.
* Otherwise, the active external phy flow control result is set
@@ -3601,13 +3578,13 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
*/
if (active_external_phy > INT_PHY) {
vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl;
- /**
+ /*
* Link speed is taken from the XGXS. AN and FC result from
* the external phy.
*/
vars->link_status |= phy_vars[active_external_phy].link_status;
- /**
+ /*
* if active_external_phy is first PHY and link is up - disable
* disable TX on second external PHY
*/
@@ -3643,7 +3620,7 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
" ext_phy_line_speed = %d\n", vars->flow_ctrl,
vars->link_status, ext_phy_line_speed);
- /**
+ /*
* Upon link speed change set the NIG into drain mode. Comes to
* deals with possible FIFO glitch due to clk change when speed
* is decreased without link down indicator
@@ -3658,8 +3635,8 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
ext_phy_line_speed);
vars->phy_link_up = 0;
} else if (prev_line_speed != vars->line_speed) {
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
- + params->port*4, 0);
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4,
+ 0);
msleep(1);
}
}
@@ -3674,14 +3651,14 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
bnx2x_link_int_ack(params, vars, link_10g);
- /**
- * In case external phy link is up, and internal link is down
- * (not initialized yet probably after link initialization, it
- * needs to be initialized.
- * Note that after link down-up as result of cable plug, the xgxs
- * link would probably become up again without the need
- * initialize it
- */
+ /*
+ * In case external phy link is up, and internal link is down
+ * (not initialized yet probably after link initialization, it
+ * needs to be initialized.
+ * Note that after link down-up as result of cable plug, the xgxs
+ * link would probably become up again without the need
+ * initialize it
+ */
if (!(SINGLE_MEDIA_DIRECT(params))) {
DP(NETIF_MSG_LINK, "ext_phy_link_up = %d, int_link_up = %d,"
" init_preceding = %d\n", ext_phy_link_up,
@@ -3701,9 +3678,9 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
vars);
}
}
- /**
- * Link is up only if both local phy and external phy (in case of
- * non-direct board) are up
+ /*
+ * Link is up only if both local phy and external phy (in case of
+ * non-direct board) are up
*/
vars->link_up = (vars->phy_link_up &&
(ext_phy_link_up ||
@@ -3724,10 +3701,10 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
{
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
msleep(1);
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
}
static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port,
@@ -3747,9 +3724,9 @@ static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp,
u16 fw_ver1, fw_ver2;
bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER1, &fw_ver1);
+ MDIO_PMA_REG_ROM_VER1, &fw_ver1);
bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER2, &fw_ver2);
+ MDIO_PMA_REG_ROM_VER2, &fw_ver2);
bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2),
phy->ver_addr);
}
@@ -3770,7 +3747,7 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params,
if ((vars->ieee_fc &
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
- val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
+ val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
}
if ((vars->ieee_fc &
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
@@ -3801,11 +3778,11 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
ret = 1;
bnx2x_cl45_read(bp, phy,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_ADV_PAUSE, &ld_pause);
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_ADV_PAUSE, &ld_pause);
bnx2x_cl45_read(bp, phy,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
pause_result = (ld_pause &
MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
pause_result |= (lp_pause &
@@ -3881,31 +3858,31 @@ static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
/* Boot port from external ROM */
/* EDC grst */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- 0x0001);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ 0x0001);
/* ucode reboot and rst */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- 0x008c);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ 0x008c);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0001);
/* Reset internal microprocessor */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
/* Release srst bit */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
/* Delay 100ms per the PHY specifications */
msleep(100);
@@ -3936,8 +3913,8 @@ static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
/* Clear ser_boot_ctl bit */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0000);
bnx2x_save_bcm_spirom_ver(bp, phy, port);
DP(NETIF_MSG_LINK,
@@ -3958,8 +3935,8 @@ static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
/* Read 8073 HW revision*/
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8073_CHIP_REV, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_CHIP_REV, &val);
if (val != 1) {
/* No need to workaround in 8073 A1 */
@@ -3967,8 +3944,8 @@ static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
}
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER2, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2, &val);
/* SNR should be applied only for version 0x102 */
if (val != 0x102)
@@ -3982,8 +3959,8 @@ static u8 bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
u16 val, cnt, cnt1 ;
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8073_CHIP_REV, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_CHIP_REV, &val);
if (val > 0) {
/* No need to workaround in 8073 A1 */
@@ -3991,26 +3968,32 @@ static u8 bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
}
/* XAUI workaround in 8073 A0: */
- /* After loading the boot ROM and restarting Autoneg,
- poll Dev1, Reg $C820: */
+ /*
+ * After loading the boot ROM and restarting Autoneg, poll
+ * Dev1, Reg $C820:
+ */
for (cnt = 0; cnt < 1000; cnt++) {
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
- &val);
- /* If bit [14] = 0 or bit [13] = 0, continue on with
- system initialization (XAUI work-around not required,
- as these bits indicate 2.5G or 1G link up). */
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
+ &val);
+ /*
+ * If bit [14] = 0 or bit [13] = 0, continue on with
+ * system initialization (XAUI work-around not required, as
+ * these bits indicate 2.5G or 1G link up).
+ */
if (!(val & (1<<14)) || !(val & (1<<13))) {
DP(NETIF_MSG_LINK, "XAUI work-around not required\n");
return 0;
} else if (!(val & (1<<15))) {
- DP(NETIF_MSG_LINK, "clc bit 15 went off\n");
- /* If bit 15 is 0, then poll Dev1, Reg $C841 until
- it's MSB (bit 15) goes to 1 (indicating that the
- XAUI workaround has completed),
- then continue on with system initialization.*/
+ DP(NETIF_MSG_LINK, "bit 15 went off\n");
+ /*
+ * If bit 15 is 0, then poll Dev1, Reg $C841 until it's
+ * MSB (bit15) goes to 1 (indicating that the XAUI
+ * workaround has completed), then continue on with
+ * system initialization.
+ */
for (cnt1 = 0; cnt1 < 1000; cnt1++) {
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
@@ -4093,10 +4076,10 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
gpio_port = params->port;
/* Restore normal power mode*/
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
/* enable LASI */
bnx2x_cl45_write(bp, phy,
@@ -4114,10 +4097,6 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
- /**
- * If this is forced speed, set to KR or KX (all other are not
- * supported)
- */
/* Swap polarity if required - Must be done only in non-1G mode */
if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
/* Configure the 8073 to swap _P and _N of the KR lines */
@@ -4160,8 +4139,10 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
val = (1<<7);
} else if (phy->req_line_speed == SPEED_2500) {
val = (1<<5);
- /* Note that 2.5G works only
- when used with 1G advertisment */
+ /*
+ * Note that 2.5G works only when used with 1G
+ * advertisment
+ */
} else
val = (1<<5);
} else {
@@ -4170,8 +4151,7 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
val |= (1<<7);
- /* Note that 2.5G works only when
- used with 1G advertisment */
+ /* Note that 2.5G works only when used with 1G advertisment */
if (phy->speed_cap_mask &
(PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
@@ -4211,9 +4191,11 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
/* Add support for CL37 (passive mode) III */
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
- /* The SNR will improve about 2db by changing
- BW and FEE main tap. Rest commands are executed
- after link is up*/
+ /*
+ * The SNR will improve about 2db by changing BW and FEE main
+ * tap. Rest commands are executed after link is up
+ * Change FFE main cursor to 5 in EDC register
+ */
if (bnx2x_8073_is_snr_needed(bp, phy))
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN,
@@ -4297,12 +4279,11 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1)));
if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) {
- /* The SNR will improve about 2dbby
- changing the BW and FEE main tap.*/
- /* The 1st write to change FFE main
- tap is set before restart AN */
- /* Change PLL Bandwidth in EDC
- register */
+ /*
+ * The SNR will improve about 2dbby changing the BW and FEE main
+ * tap. The 1st write to change FFE main tap is set before
+ * restart AN. Change PLL Bandwidth in EDC register
+ */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH,
0x26BC);
@@ -4346,10 +4327,10 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
bnx2x_cl45_read(bp, phy,
MDIO_XS_DEVAD,
MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1);
- /**
- * Set bit 3 to invert Rx in 1G mode and clear this bit
- * when it`s in 10G mode.
- */
+ /*
+ * Set bit 3 to invert Rx in 1G mode and clear this bit
+ * when it`s in 10G mode.
+ */
if (vars->line_speed == SPEED_1000) {
DP(NETIF_MSG_LINK, "Swapping 1G polarity for"
"the 8073\n");
@@ -4381,8 +4362,8 @@ static void bnx2x_8073_link_reset(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n",
gpio_port);
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_LOW,
- gpio_port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ gpio_port);
}
/******************************************************************/
@@ -4396,11 +4377,11 @@ static u8 bnx2x_8705_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "init 8705\n");
/* Restore normal power mode*/
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
/* HW reset */
bnx2x_ext_phy_hw_reset(bp, params->port);
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
- bnx2x_wait_reset_complete(bp, phy);
+ bnx2x_wait_reset_complete(bp, phy, params);
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288);
@@ -4451,35 +4432,79 @@ static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy,
/******************************************************************/
/* SFP+ module Section */
/******************************************************************/
-static void bnx2x_sfp_set_transmitter(struct bnx2x *bp,
+static u8 bnx2x_get_gpio_port(struct link_params *params)
+{
+ u8 gpio_port;
+ u32 swap_val, swap_override;
+ struct bnx2x *bp = params->bp;
+ if (CHIP_IS_E2(bp))
+ gpio_port = BP_PATH(bp);
+ else
+ gpio_port = params->port;
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+ return gpio_port ^ (swap_val && swap_override);
+}
+static void bnx2x_sfp_set_transmitter(struct link_params *params,
struct bnx2x_phy *phy,
- u8 port,
u8 tx_en)
{
u16 val;
+ u8 port = params->port;
+ struct bnx2x *bp = params->bp;
+ u32 tx_en_mode;
- DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x\n",
- tx_en, port);
/* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/
- bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER,
- &val);
+ tx_en_mode = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].sfp_ctrl)) &
+ PORT_HW_CFG_TX_LASER_MASK;
+ DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x "
+ "mode = %x\n", tx_en, port, tx_en_mode);
+ switch (tx_en_mode) {
+ case PORT_HW_CFG_TX_LASER_MDIO:
- if (tx_en)
- val &= ~(1<<15);
- else
- val |= (1<<15);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ &val);
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER,
- val);
+ if (tx_en)
+ val &= ~(1<<15);
+ else
+ val |= (1<<15);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ val);
+ break;
+ case PORT_HW_CFG_TX_LASER_GPIO0:
+ case PORT_HW_CFG_TX_LASER_GPIO1:
+ case PORT_HW_CFG_TX_LASER_GPIO2:
+ case PORT_HW_CFG_TX_LASER_GPIO3:
+ {
+ u16 gpio_pin;
+ u8 gpio_port, gpio_mode;
+ if (tx_en)
+ gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_HIGH;
+ else
+ gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_LOW;
+
+ gpio_pin = tx_en_mode - PORT_HW_CFG_TX_LASER_GPIO0;
+ gpio_port = bnx2x_get_gpio_port(params);
+ bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
+ break;
+ }
+ default:
+ DP(NETIF_MSG_LINK, "Invalid TX_LASER_MDIO 0x%x\n", tx_en_mode);
+ break;
+ }
}
static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params,
- u16 addr, u8 byte_cnt, u8 *o_buf)
+ u16 addr, u8 byte_cnt, u8 *o_buf)
{
struct bnx2x *bp = params->bp;
u16 val = 0;
@@ -4492,23 +4517,23 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
/* Set the read command byte count */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
- (byte_cnt | 0xa000));
+ (byte_cnt | 0xa000));
/* Set the read command address */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
- addr);
+ addr);
/* Activate read command */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
- 0x2c0f);
+ 0x2c0f);
/* Wait up to 500us for command complete status */
for (i = 0; i < 100; i++) {
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
break;
@@ -4526,15 +4551,15 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
/* Read the buffer */
for (i = 0; i < byte_cnt; i++) {
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK);
}
for (i = 0; i < 100; i++) {
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
return 0;
@@ -4545,7 +4570,7 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params,
- u16 addr, u8 byte_cnt, u8 *o_buf)
+ u16 addr, u8 byte_cnt, u8 *o_buf)
{
struct bnx2x *bp = params->bp;
u16 val, i;
@@ -4558,41 +4583,43 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
/* Need to read from 1.8000 to clear it */
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
- &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+ &val);
/* Set the read command byte count */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
- ((byte_cnt < 2) ? 2 : byte_cnt));
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
+ ((byte_cnt < 2) ? 2 : byte_cnt));
/* Set the read command address */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
- addr);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
+ addr);
/* Set the destination address */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- 0x8004,
- MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
+ MDIO_PMA_DEVAD,
+ 0x8004,
+ MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
/* Activate read command */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
- 0x8002);
- /* Wait appropriate time for two-wire command to finish before
- polling the status register */
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+ 0x8002);
+ /*
+ * Wait appropriate time for two-wire command to finish before
+ * polling the status register
+ */
msleep(1);
/* Wait up to 500us for command complete status */
for (i = 0; i < 100; i++) {
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
break;
@@ -4604,21 +4631,21 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK,
"Got bad status 0x%x when reading from SFP+ EEPROM\n",
(val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
- return -EINVAL;
+ return -EFAULT;
}
/* Read the buffer */
for (i = 0; i < byte_cnt; i++) {
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK);
}
for (i = 0; i < 100; i++) {
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
return 0;
@@ -4628,22 +4655,22 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
return -EINVAL;
}
-static u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
- struct link_params *params, u16 addr,
- u8 byte_cnt, u8 *o_buf)
+u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params, u16 addr,
+ u8 byte_cnt, u8 *o_buf)
{
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
- byte_cnt, o_buf);
+ byte_cnt, o_buf);
else if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
return bnx2x_8727_read_sfp_module_eeprom(phy, params, addr,
- byte_cnt, o_buf);
+ byte_cnt, o_buf);
return -EINVAL;
}
static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
struct link_params *params,
- u16 *edc_mode)
+ u16 *edc_mode)
{
struct bnx2x *bp = params->bp;
u8 val, check_limiting_mode = 0;
@@ -4664,8 +4691,10 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
{
u8 copper_module_type;
- /* Check if its active cable( includes SFP+ module)
- of passive cable*/
+ /*
+ * Check if its active cable (includes SFP+ module)
+ * of passive cable
+ */
if (bnx2x_read_sfp_module_eeprom(phy,
params,
SFP_EEPROM_FC_TX_TECH_ADDR,
@@ -4724,8 +4753,10 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode);
return 0;
}
-/* This function read the relevant field from the module ( SFP+ ),
- and verify it is compliant with this board */
+/*
+ * This function read the relevant field from the module (SFP+), and verify it
+ * is compliant with this board
+ */
static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
struct link_params *params)
{
@@ -4774,24 +4805,24 @@ static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
/* format the warning message */
if (bnx2x_read_sfp_module_eeprom(phy,
params,
- SFP_EEPROM_VENDOR_NAME_ADDR,
- SFP_EEPROM_VENDOR_NAME_SIZE,
- (u8 *)vendor_name))
+ SFP_EEPROM_VENDOR_NAME_ADDR,
+ SFP_EEPROM_VENDOR_NAME_SIZE,
+ (u8 *)vendor_name))
vendor_name[0] = '\0';
else
vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
if (bnx2x_read_sfp_module_eeprom(phy,
params,
- SFP_EEPROM_PART_NO_ADDR,
- SFP_EEPROM_PART_NO_SIZE,
- (u8 *)vendor_pn))
+ SFP_EEPROM_PART_NO_ADDR,
+ SFP_EEPROM_PART_NO_SIZE,
+ (u8 *)vendor_pn))
vendor_pn[0] = '\0';
else
vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0';
- netdev_info(bp->dev, "Warning: Unqualified SFP+ module detected,"
- " Port %d from %s part number %s\n",
- params->port, vendor_name, vendor_pn);
+ netdev_err(bp->dev, "Warning: Unqualified SFP+ module detected,"
+ " Port %d from %s part number %s\n",
+ params->port, vendor_name, vendor_pn);
phy->flags |= FLAGS_SFP_NOT_APPROVED;
return -EINVAL;
}
@@ -4803,8 +4834,11 @@ static u8 bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
u8 val;
struct bnx2x *bp = params->bp;
u16 timeout;
- /* Initialization time after hot-plug may take up to 300ms for some
- phys type ( e.g. JDSU ) */
+ /*
+ * Initialization time after hot-plug may take up to 300ms for
+ * some phys type ( e.g. JDSU )
+ */
+
for (timeout = 0; timeout < 60; timeout++) {
if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val)
== 0) {
@@ -4823,16 +4857,14 @@ static void bnx2x_8727_power_module(struct bnx2x *bp,
/* Make sure GPIOs are not using for LED mode */
u16 val;
/*
- * In the GPIO register, bit 4 is use to detemine if the GPIOs are
+ * In the GPIO register, bit 4 is use to determine if the GPIOs are
* operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
* output
* Bits 0-1 determine the gpios value for OUTPUT in case bit 4 val is 0
* Bits 8-9 determine the gpios value for INPUT in case bit 4 val is 1
* where the 1st bit is the over-current(only input), and 2nd bit is
* for power( only output )
- */
-
- /*
+ *
* In case of NOC feature is disabled and power is up, set GPIO control
* as input to enable listening of over-current indication
*/
@@ -4861,15 +4893,14 @@ static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
u16 cur_limiting_mode;
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER2,
- &cur_limiting_mode);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ &cur_limiting_mode);
DP(NETIF_MSG_LINK, "Current Limiting mode is 0x%x\n",
cur_limiting_mode);
if (edc_mode == EDC_MODE_LIMITING) {
- DP(NETIF_MSG_LINK,
- "Setting LIMITING MODE\n");
+ DP(NETIF_MSG_LINK, "Setting LIMITING MODE\n");
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_ROM_VER2,
@@ -4878,62 +4909,63 @@ static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
DP(NETIF_MSG_LINK, "Setting LRM MODE\n");
- /* Changing to LRM mode takes quite few seconds.
- So do it only if current mode is limiting
- ( default is LRM )*/
+ /*
+ * Changing to LRM mode takes quite few seconds. So do it only
+ * if current mode is limiting (default is LRM)
+ */
if (cur_limiting_mode != EDC_MODE_LIMITING)
return 0;
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LRM_MODE,
- 0);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_LRM_MODE,
+ 0);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER2,
- 0x128);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ 0x128);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL0,
- 0x4008);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL0,
+ 0x4008);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LRM_MODE,
- 0xaaaa);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_LRM_MODE,
+ 0xaaaa);
}
return 0;
}
static u8 bnx2x_8727_set_limiting_mode(struct bnx2x *bp,
struct bnx2x_phy *phy,
- u16 edc_mode)
+ u16 edc_mode)
{
u16 phy_identifier;
u16 rom_ver2_val;
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER,
- &phy_identifier);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ &phy_identifier);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER,
- (phy_identifier & ~(1<<9)));
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ (phy_identifier & ~(1<<9)));
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER2,
- &rom_ver2_val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ &rom_ver2_val);
/* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER2,
- (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER,
- (phy_identifier | (1<<9)));
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ (phy_identifier | (1<<9)));
return 0;
}
@@ -4946,11 +4978,11 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
switch (action) {
case DISABLE_TX:
- bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+ bnx2x_sfp_set_transmitter(params, phy, 0);
break;
case ENABLE_TX:
if (!(phy->flags & FLAGS_SFP_NOT_APPROVED))
- bnx2x_sfp_set_transmitter(bp, phy, params->port, 1);
+ bnx2x_sfp_set_transmitter(params, phy, 1);
break;
default:
DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
@@ -4959,6 +4991,38 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
}
}
+static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
+ u8 gpio_mode)
+{
+ struct bnx2x *bp = params->bp;
+
+ u32 fault_led_gpio = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].sfp_ctrl)) &
+ PORT_HW_CFG_FAULT_MODULE_LED_MASK;
+ switch (fault_led_gpio) {
+ case PORT_HW_CFG_FAULT_MODULE_LED_DISABLED:
+ return;
+ case PORT_HW_CFG_FAULT_MODULE_LED_GPIO0:
+ case PORT_HW_CFG_FAULT_MODULE_LED_GPIO1:
+ case PORT_HW_CFG_FAULT_MODULE_LED_GPIO2:
+ case PORT_HW_CFG_FAULT_MODULE_LED_GPIO3:
+ {
+ u8 gpio_port = bnx2x_get_gpio_port(params);
+ u16 gpio_pin = fault_led_gpio -
+ PORT_HW_CFG_FAULT_MODULE_LED_GPIO0;
+ DP(NETIF_MSG_LINK, "Set fault module-detected led "
+ "pin %x port %x mode %x\n",
+ gpio_pin, gpio_port, gpio_mode);
+ bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
+ }
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Error: Invalid fault led mode 0x%x\n",
+ fault_led_gpio);
+ }
+}
+
static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
struct link_params *params)
{
@@ -4976,15 +5040,14 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) {
DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
return -EINVAL;
- } else if (bnx2x_verify_sfp_module(phy, params) !=
- 0) {
+ } else if (bnx2x_verify_sfp_module(phy, params) != 0) {
/* check SFP+ module compatibility */
DP(NETIF_MSG_LINK, "Module verification failed!!\n");
rc = -EINVAL;
/* Turn on fault module-detected led */
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
- MISC_REGISTERS_GPIO_HIGH,
- params->port);
+ bnx2x_set_sfp_module_fault_led(params,
+ MISC_REGISTERS_GPIO_HIGH);
+
if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) &&
((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN)) {
@@ -4995,18 +5058,17 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
}
} else {
/* Turn off fault module-detected led */
- DP(NETIF_MSG_LINK, "Turn off fault module-detected led\n");
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
- MISC_REGISTERS_GPIO_LOW,
- params->port);
+ bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW);
}
/* power up the SFP module */
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
bnx2x_8727_power_module(bp, phy, 1);
- /* Check and set limiting mode / LRM mode on 8726.
- On 8727 it is done automatically */
+ /*
+ * Check and set limiting mode / LRM mode on 8726. On 8727 it
+ * is done automatically
+ */
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
bnx2x_8726_set_limiting_mode(bp, phy, edc_mode);
else
@@ -5018,9 +5080,9 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
if (rc == 0 ||
(val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
- bnx2x_sfp_set_transmitter(bp, phy, params->port, 1);
+ bnx2x_sfp_set_transmitter(params, phy, 1);
else
- bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+ bnx2x_sfp_set_transmitter(params, phy, 0);
return rc;
}
@@ -5033,11 +5095,9 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
u8 port = params->port;
/* Set valid module led off */
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
- MISC_REGISTERS_GPIO_HIGH,
- params->port);
+ bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH);
- /* Get current gpio val refelecting module plugged in / out*/
+ /* Get current gpio val reflecting module plugged in / out*/
gpio_val = bnx2x_get_gpio(bp, MISC_REGISTERS_GPIO_3, port);
/* Call the handling function in case module is detected */
@@ -5053,18 +5113,20 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
} else {
u32 val = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region, dev_info.
- port_feature_config[params->port].
- config));
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].
+ config));
bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3,
MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
port);
- /* Module was plugged out. */
- /* Disable transmit for this module */
+ /*
+ * Module was plugged out.
+ * Disable transmit for this module
+ */
if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
- bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+ bnx2x_sfp_set_transmitter(params, phy, 0);
}
}
@@ -5100,9 +5162,9 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
" link_status 0x%x\n", rx_sd, pcs_status, val2);
- /* link is up if both bit 0 of pmd_rx_sd and
- * bit 0 of pcs_status are set, or if the autoneg bit
- * 1 is set
+ /*
+ * link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
+ * are set, or if the autoneg bit 1 is set
*/
link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
if (link_up) {
@@ -5123,14 +5185,15 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
- u16 cnt, val;
+ u32 tx_en_mode;
+ u16 cnt, val, tmp1;
struct bnx2x *bp = params->bp;
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
/* HW reset */
bnx2x_ext_phy_hw_reset(bp, params->port);
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
- bnx2x_wait_reset_complete(bp, phy);
+ bnx2x_wait_reset_complete(bp, phy, params);
/* Wait until fw is loaded */
for (cnt = 0; cnt < 100; cnt++) {
@@ -5197,6 +5260,26 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
0x0004);
}
bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
+
+ /*
+ * If TX Laser is controlled by GPIO_0, do not let PHY go into low
+ * power mode, if TX Laser is disabled
+ */
+
+ tx_en_mode = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].sfp_ctrl))
+ & PORT_HW_CFG_TX_LASER_MASK;
+
+ if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
+ DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, &tmp1);
+ tmp1 |= 0x1;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, tmp1);
+ }
+
return 0;
}
@@ -5231,26 +5314,26 @@ static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy,
/* Set soft reset */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0001);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
/* wait for 150ms for microcode load */
msleep(150);
/* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0000);
msleep(200);
bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
@@ -5285,23 +5368,18 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
u32 val;
u32 swap_val, swap_override, aeu_gpio_mask, offset;
DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
- /* Restore normal power mode*/
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
-
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
- bnx2x_wait_reset_complete(bp, phy);
+ bnx2x_wait_reset_complete(bp, phy, params);
bnx2x_8726_external_rom_boot(phy, params);
- /* Need to call module detected on initialization since
- the module detection triggered by actual module
- insertion might occur before driver is loaded, and when
- driver is loaded, it reset all registers, including the
- transmitter */
+ /*
+ * Need to call module detected on initialization since the module
+ * detection triggered by actual module insertion might occur before
+ * driver is loaded, and when driver is loaded, it reset all
+ * registers, including the transmitter
+ */
bnx2x_sfp_module_detection(phy, params);
if (phy->req_line_speed == SPEED_1000) {
@@ -5334,8 +5412,10 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
- /* Enable RX-ALARM control to receive
- interrupt for 1G speed change */
+ /*
+ * Enable RX-ALARM control to receive interrupt for 1G speed
+ * change
+ */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x4);
bnx2x_cl45_write(bp, phy,
@@ -5367,7 +5447,7 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
/* Set GPIO3 to trigger SFP+ module insertion/removal */
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
- MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port);
+ MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port);
/* The GPIO should be swapped if the swap register is set and active */
swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
@@ -5458,7 +5538,7 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
struct link_params *params) {
u32 swap_val, swap_override;
u8 port;
- /**
+ /*
* The PHY reset is controlled by GPIO 1. Fake the port number
* to cancel the swap done in set_gpio()
*/
@@ -5467,20 +5547,21 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
port = (swap_val && swap_override) ^ 1;
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
}
static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
- u16 tmp1, val, mod_abs;
+ u32 tx_en_mode;
+ u16 tmp1, val, mod_abs, tmp2;
u16 rx_alarm_ctrl_val;
u16 lasi_ctrl_val;
struct bnx2x *bp = params->bp;
/* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
- bnx2x_wait_reset_complete(bp, phy);
+ bnx2x_wait_reset_complete(bp, phy, params);
rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
lasi_ctrl_val = 0x0004;
@@ -5493,14 +5574,17 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, lasi_ctrl_val);
- /* Initially configure MOD_ABS to interrupt when
- module is presence( bit 8) */
+ /*
+ * Initially configure MOD_ABS to interrupt when module is
+ * presence( bit 8)
+ */
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
- /* Set EDC off by setting OPTXLOS signal input to low
- (bit 9).
- When the EDC is off it locks onto a reference clock and
- avoids becoming 'lost'.*/
+ /*
+ * Set EDC off by setting OPTXLOS signal input to low (bit 9).
+ * When the EDC is off it locks onto a reference clock and avoids
+ * becoming 'lost'
+ */
mod_abs &= ~(1<<8);
if (!(phy->flags & FLAGS_NOC))
mod_abs &= ~(1<<9);
@@ -5515,7 +5599,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
if (phy->flags & FLAGS_NOC)
val |= (3<<5);
- /**
+ /*
* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
* status which reflect SFP+ module over-current
*/
@@ -5542,7 +5626,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
- /**
+ /*
* Power down the XAUI until link is up in case of dual-media
* and 1G
*/
@@ -5568,7 +5652,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
} else {
- /**
+ /*
* Since the 8727 has only single reset pin, need to set the 10G
* registers although it is default
*/
@@ -5584,7 +5668,8 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
0x0008);
}
- /* Set 2-wire transfer rate of SFP+ module EEPROM
+ /*
+ * Set 2-wire transfer rate of SFP+ module EEPROM
* to 100Khz since some DACs(direct attached cables) do
* not work at 400Khz.
*/
@@ -5607,6 +5692,26 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
phy->tx_preemphasis[1]);
}
+ /*
+ * If TX Laser is controlled by GPIO_0, do not let PHY go into low
+ * power mode, if TX Laser is disabled
+ */
+ tx_en_mode = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].sfp_ctrl))
+ & PORT_HW_CFG_TX_LASER_MASK;
+
+ if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
+
+ DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, &tmp2);
+ tmp2 |= 0x1000;
+ tmp2 &= 0xFFEF;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2);
+ }
+
return 0;
}
@@ -5620,46 +5725,49 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
port_feature_config[params->port].
config));
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
if (mod_abs & (1<<8)) {
/* Module is absent */
DP(NETIF_MSG_LINK, "MOD_ABS indication "
"show module is absent\n");
- /* 1. Set mod_abs to detect next module
- presence event
- 2. Set EDC off by setting OPTXLOS signal input to low
- (bit 9).
- When the EDC is off it locks onto a reference clock and
- avoids becoming 'lost'.*/
+ /*
+ * 1. Set mod_abs to detect next module
+ * presence event
+ * 2. Set EDC off by setting OPTXLOS signal input to low
+ * (bit 9).
+ * When the EDC is off it locks onto a reference clock and
+ * avoids becoming 'lost'.
+ */
mod_abs &= ~(1<<8);
if (!(phy->flags & FLAGS_NOC))
mod_abs &= ~(1<<9);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
- /* Clear RX alarm since it stays up as long as
- the mod_abs wasn't changed */
+ /*
+ * Clear RX alarm since it stays up as long as
+ * the mod_abs wasn't changed
+ */
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
} else {
/* Module is present */
DP(NETIF_MSG_LINK, "MOD_ABS indication "
"show module is present\n");
- /* First thing, disable transmitter,
- and if the module is ok, the
- module_detection will enable it*/
-
- /* 1. Set mod_abs to detect next module
- absent event ( bit 8)
- 2. Restore the default polarity of the OPRXLOS signal and
- this signal will then correctly indicate the presence or
- absence of the Rx signal. (bit 9) */
+ /*
+ * First disable transmitter, and if the module is ok, the
+ * module_detection will enable it
+ * 1. Set mod_abs to detect next module absent event ( bit 8)
+ * 2. Restore the default polarity of the OPRXLOS signal and
+ * this signal will then correctly indicate the presence or
+ * absence of the Rx signal. (bit 9)
+ */
mod_abs |= (1<<8);
if (!(phy->flags & FLAGS_NOC))
mod_abs |= (1<<9);
@@ -5667,10 +5775,12 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
- /* Clear RX alarm since it stays up as long as
- the mod_abs wasn't changed. This is need to be done
- before calling the module detection, otherwise it will clear
- the link update alarm */
+ /*
+ * Clear RX alarm since it stays up as long as the mod_abs
+ * wasn't changed. This is need to be done before calling the
+ * module detection, otherwise it will clear* the link update
+ * alarm
+ */
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
@@ -5678,7 +5788,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
- bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+ bnx2x_sfp_set_transmitter(params, phy, 0);
if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
bnx2x_sfp_module_detection(phy, params);
@@ -5687,9 +5797,8 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
}
DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
- rx_alarm_status);
- /* No need to check link status in case of
- module plugged in/out */
+ rx_alarm_status);
+ /* No need to check link status in case of module plugged in/out */
}
static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
@@ -5725,7 +5834,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
- /**
+ /*
* If a module is present and there is need to check
* for over current
*/
@@ -5745,12 +5854,8 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
" Please remove the SFP+ module and"
" restart the system to clear this"
" error.\n",
- params->port);
-
- /*
- * Disable all RX_ALARMs except for
- * mod_abs
- */
+ params->port);
+ /* Disable all RX_ALARMs except for mod_abs */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_RX_ALARM_CTRL, (1<<5));
@@ -5793,11 +5898,15 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status);
- /* Bits 0..2 --> speed detected,
- bits 13..15--> link is down */
+ /*
+ * Bits 0..2 --> speed detected,
+ * Bits 13..15--> link is down
+ */
if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
link_up = 1;
vars->line_speed = SPEED_10000;
+ DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n",
+ params->port);
} else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
link_up = 1;
vars->line_speed = SPEED_1000;
@@ -5819,7 +5928,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8727_PCS_GP, &val1);
- /**
+ /*
* In case of dual-media board and 1G, power up the XAUI side,
* otherwise power it down. For 10G it is done automatically
*/
@@ -5839,7 +5948,7 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
/* Disable Transmitter */
- bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+ bnx2x_sfp_set_transmitter(params, phy, 0);
/* Clear LASI */
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0);
@@ -5851,19 +5960,23 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
struct link_params *params)
{
- u16 val, fw_ver1, fw_ver2, cnt;
+ u16 val, fw_ver1, fw_ver2, cnt, adj;
struct bnx2x *bp = params->bp;
+ adj = 0;
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ adj = -1;
+
/* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/
/* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0014);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, 0x0000);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, 0x0300);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x0009);
for (cnt = 0; cnt < 100; cnt++) {
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val);
if (val & 1)
break;
udelay(5);
@@ -5877,11 +5990,11 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
/* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0000);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x000A);
for (cnt = 0; cnt < 100; cnt++) {
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val);
if (val & 1)
break;
udelay(5);
@@ -5894,9 +6007,9 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
}
/* lower 16 bits of the register SPI_FW_STATUS */
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, &fw_ver1);
/* upper 16 bits of register SPI_FW_STATUS */
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, &fw_ver2);
bnx2x_save_spirom_version(bp, params->port, (fw_ver2<<16) | fw_ver1,
phy->ver_addr);
@@ -5905,49 +6018,53 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
static void bnx2x_848xx_set_led(struct bnx2x *bp,
struct bnx2x_phy *phy)
{
- u16 val;
+ u16 val, adj;
+
+ adj = 0;
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ adj = -1;
/* PHYC_CTL_LED_CTL */
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LINK_SIGNAL, &val);
+ MDIO_PMA_REG_8481_LINK_SIGNAL + adj, &val);
val &= 0xFE00;
val |= 0x0092;
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LINK_SIGNAL, val);
+ MDIO_PMA_REG_8481_LINK_SIGNAL + adj, val);
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK,
+ MDIO_PMA_REG_8481_LED1_MASK + adj,
0x80);
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED2_MASK,
+ MDIO_PMA_REG_8481_LED2_MASK + adj,
0x18);
/* Select activity source by Tx and Rx, as suggested by PHY AE */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED3_MASK,
+ MDIO_PMA_REG_8481_LED3_MASK + adj,
0x0006);
/* Select the closest activity blink rate to that in 10/100/1000 */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED3_BLINK,
+ MDIO_PMA_REG_8481_LED3_BLINK + adj,
0);
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val);
+ MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, &val);
val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_84823_CTL_LED_CTL_1, val);
+ MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, val);
/* 'Interrupt Mask' */
bnx2x_cl45_write(bp, phy,
@@ -5961,7 +6078,11 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u16 autoneg_val, an_1000_val, an_10_100_val;
-
+ /*
+ * This phy uses the NIG latch mechanism since link indication
+ * arrives through its LED4 and not via its LASI signal, so we
+ * get steady signal instead of clear on read
+ */
bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
1 << NIG_LATCH_BC_ENABLE_MI_INT);
@@ -6086,11 +6207,11 @@ static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
/* Restore normal power mode*/
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
/* HW reset */
bnx2x_ext_phy_hw_reset(bp, params->port);
- bnx2x_wait_reset_complete(bp, phy);
+ bnx2x_wait_reset_complete(bp, phy, params);
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
return bnx2x_848xx_cmn_config_init(phy, params, vars);
@@ -6102,12 +6223,15 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u8 port, initialize = 1;
- u16 val;
+ u16 val, adj;
u16 temp;
- u32 actual_phy_selection;
+ u32 actual_phy_selection, cms_enable;
u8 rc = 0;
/* This is just for MDIO_CTL_REG_84823_MEDIA register. */
+ adj = 0;
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ adj = 3;
msleep(1);
if (CHIP_IS_E2(bp))
@@ -6117,11 +6241,12 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
MISC_REGISTERS_GPIO_OUTPUT_HIGH,
port);
- bnx2x_wait_reset_complete(bp, phy);
+ bnx2x_wait_reset_complete(bp, phy, params);
/* Wait for GPHY to come out of reset */
msleep(50);
- /* BCM84823 requires that XGXS links up first @ 10G for normal
- behavior */
+ /*
+ * BCM84823 requires that XGXS links up first @ 10G for normal behavior
+ */
temp = vars->line_speed;
vars->line_speed = SPEED_10000;
bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
@@ -6131,7 +6256,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
/* Set dual-media configuration according to configuration */
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
- MDIO_CTL_REG_84823_MEDIA, &val);
+ MDIO_CTL_REG_84823_MEDIA + adj, &val);
val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
MDIO_CTL_REG_84823_MEDIA_LINE_MASK |
MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN |
@@ -6164,7 +6289,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G;
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_CTL_REG_84823_MEDIA, val);
+ MDIO_CTL_REG_84823_MEDIA + adj, val);
DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
params->multi_phy_config, val);
@@ -6172,23 +6297,43 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
else
bnx2x_save_848xx_spirom_version(phy, params);
+ cms_enable = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].default_cfg)) &
+ PORT_HW_CFG_ENABLE_CMS_MASK;
+
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_CTL_REG_84823_USER_CTRL_REG, &val);
+ if (cms_enable)
+ val |= MDIO_CTL_REG_84823_USER_CTRL_CMS;
+ else
+ val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS;
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_CTL_REG_84823_USER_CTRL_REG, val);
+
+
return rc;
}
static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u16 val, val1, val2;
+ u16 val, val1, val2, adj;
u8 link_up = 0;
+ /* Reg offset adjustment for 84833 */
+ adj = 0;
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ adj = -1;
+
/* Check 10G-BaseT link status */
/* Check PMD signal ok */
bnx2x_cl45_read(bp, phy,
MDIO_AN_DEVAD, 0xFFFA, &val1);
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL + adj,
&val2);
DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2);
@@ -6273,9 +6418,9 @@ static void bnx2x_8481_hw_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, 0);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, 0);
bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, 1);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, 1);
}
static void bnx2x_8481_link_reset(struct bnx2x_phy *phy,
@@ -6297,8 +6442,8 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
else
port = params->port;
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
- MISC_REGISTERS_GPIO_OUTPUT_LOW,
- port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ port);
}
static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
@@ -6353,24 +6498,24 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
/* Set LED masks */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK,
- 0x0);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED2_MASK,
- 0x0);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x0);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED3_MASK,
- 0x0);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x0);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED5_MASK,
- 0x20);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x20);
} else {
bnx2x_cl45_write(bp, phy,
@@ -6394,35 +6539,35 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
val |= 0x2492;
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LINK_SIGNAL,
- val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ val);
/* Set LED masks */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK,
- 0x0);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED2_MASK,
- 0x20);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x20);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED3_MASK,
- 0x20);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x20);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED5_MASK,
- 0x0);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x0);
} else {
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK,
- 0x20);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x20);
}
break;
@@ -6440,9 +6585,9 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
&val);
if (!((val &
- MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK)
- >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)){
- DP(NETIF_MSG_LINK, "Seting LINK_SIGNAL\n");
+ MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK)
+ >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)) {
+ DP(NETIF_MSG_LINK, "Setting LINK_SIGNAL\n");
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LINK_SIGNAL,
@@ -6451,24 +6596,24 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
/* Set LED masks */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK,
- 0x10);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x10);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED2_MASK,
- 0x80);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x80);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED3_MASK,
- 0x98);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x98);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED5_MASK,
- 0x40);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x40);
} else {
bnx2x_cl45_write(bp, phy,
@@ -6513,10 +6658,10 @@ static u8 bnx2x_7101_config_init(struct bnx2x_phy *phy,
/* Restore normal power mode*/
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
/* HW reset */
bnx2x_ext_phy_hw_reset(bp, params->port);
- bnx2x_wait_reset_complete(bp, phy);
+ bnx2x_wait_reset_complete(bp, phy, params);
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x1);
@@ -6563,9 +6708,7 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n",
val2, val1);
link_up = ((val1 & 4) == 4);
- /* if link is up
- * print the AN outcome of the SFX7101 PHY
- */
+ /* if link is up print the AN outcome of the SFX7101 PHY */
if (link_up) {
bnx2x_cl45_read(bp, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
@@ -6599,20 +6742,20 @@ void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy)
u16 val, cnt;
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_7101_RESET, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7101_RESET, &val);
for (cnt = 0; cnt < 10; cnt++) {
msleep(50);
/* Writes a self-clearing reset */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_7101_RESET,
- (val | (1<<15)));
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7101_RESET,
+ (val | (1<<15)));
/* Wait for clear */
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_7101_RESET, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7101_RESET, &val);
if ((val & (1<<15)) == 0)
break;
@@ -6623,10 +6766,10 @@ static void bnx2x_7101_hw_reset(struct bnx2x_phy *phy,
struct link_params *params) {
/* Low power mode is controlled by GPIO 2 */
bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
/* The PHY reset is controlled by GPIO 1 */
bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
}
static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy,
@@ -6668,9 +6811,9 @@ static struct bnx2x_phy phy_null = {
.supported = 0,
.media_type = ETH_PHY_NOT_PRESENT,
.ver_addr = 0,
- .req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
.config_init = (config_init_t)NULL,
@@ -6705,8 +6848,8 @@ static struct bnx2x_phy phy_serdes = {
.media_type = ETH_PHY_UNSPECIFIED,
.ver_addr = 0,
.req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
.config_init = (config_init_t)bnx2x_init_serdes,
@@ -6742,8 +6885,8 @@ static struct bnx2x_phy phy_xgxs = {
.media_type = ETH_PHY_UNSPECIFIED,
.ver_addr = 0,
.req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
.config_init = (config_init_t)bnx2x_init_xgxs,
@@ -6773,8 +6916,8 @@ static struct bnx2x_phy phy_7101 = {
.media_type = ETH_PHY_BASE_T,
.ver_addr = 0,
.req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
.config_init = (config_init_t)bnx2x_7101_config_init,
@@ -6804,9 +6947,9 @@ static struct bnx2x_phy phy_8073 = {
SUPPORTED_Asym_Pause),
.media_type = ETH_PHY_UNSPECIFIED,
.ver_addr = 0,
- .req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
.config_init = (config_init_t)bnx2x_8073_config_init,
@@ -7015,6 +7158,43 @@ static struct bnx2x_phy phy_84823 = {
.phy_specific_func = (phy_specific_func_t)NULL
};
+static struct bnx2x_phy phy_84833 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833,
+ .addr = 0xff,
+ .flags = FLAGS_FAN_FAILURE_DET_REQ |
+ FLAGS_REARM_LATCH_SIGNAL,
+ .def_md_devad = 0,
+ .reserved = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_848x3_config_init,
+ .read_status = (read_status_t)bnx2x_848xx_read_status,
+ .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
/*****************************************************************/
/* */
/* Populate the phy according. Main function: bnx2x_populate_phy */
@@ -7028,7 +7208,7 @@ static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
/* Get the 4 lanes xgxs config rx and tx */
u32 rx = 0, tx = 0, i;
for (i = 0; i < 2; i++) {
- /**
+ /*
* INT_PHY and EXT_PHY1 share the same value location in the
* shmem. When num_phys is greater than 1, than this value
* applies only to EXT_PHY1
@@ -7036,19 +7216,19 @@ static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
if (phy_index == INT_PHY || phy_index == EXT_PHY1) {
rx = REG_RD(bp, shmem_base +
offsetof(struct shmem_region,
- dev_info.port_hw_config[port].xgxs_config_rx[i<<1]));
+ dev_info.port_hw_config[port].xgxs_config_rx[i<<1]));
tx = REG_RD(bp, shmem_base +
offsetof(struct shmem_region,
- dev_info.port_hw_config[port].xgxs_config_tx[i<<1]));
+ dev_info.port_hw_config[port].xgxs_config_tx[i<<1]));
} else {
rx = REG_RD(bp, shmem_base +
offsetof(struct shmem_region,
- dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
+ dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
tx = REG_RD(bp, shmem_base +
offsetof(struct shmem_region,
- dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
+ dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
}
phy->rx_preemphasis[i << 1] = ((rx>>16) & 0xffff);
@@ -7168,6 +7348,9 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
*phy = phy_84823;
break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
+ *phy = phy_84833;
+ break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
*phy = phy_7101;
break;
@@ -7182,21 +7365,21 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index);
- /**
- * The shmem address of the phy version is located on different
- * structures. In case this structure is too old, do not set
- * the address
- */
+ /*
+ * The shmem address of the phy version is located on different
+ * structures. In case this structure is too old, do not set
+ * the address
+ */
config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region,
dev_info.shared_hw_config.config2));
if (phy_index == EXT_PHY1) {
phy->ver_addr = shmem_base + offsetof(struct shmem_region,
port_mb[port].ext_phy_fw_version);
- /* Check specific mdc mdio settings */
- if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK)
- mdc_mdio_access = config2 &
- SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK;
+ /* Check specific mdc mdio settings */
+ if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK)
+ mdc_mdio_access = config2 &
+ SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK;
} else {
u32 size = REG_RD(bp, shmem2_base);
@@ -7215,7 +7398,7 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
}
phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
- /**
+ /*
* In case mdc/mdio_access of the external phy is different than the
* mdc/mdio access of the XGXS, a HW lock must be taken in each access
* to prevent one port interfere with another port's CL45 operations.
@@ -7250,18 +7433,20 @@ static void bnx2x_phy_def_cfg(struct link_params *params,
/* Populate the default phy configuration for MF mode */
if (phy_index == EXT_PHY2) {
link_config = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region, dev_info.
+ offsetof(struct shmem_region, dev_info.
port_feature_config[params->port].link_config2));
phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region, dev_info.
+ offsetof(struct shmem_region,
+ dev_info.
port_hw_config[params->port].speed_capability_mask2));
} else {
link_config = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region, dev_info.
+ offsetof(struct shmem_region, dev_info.
port_feature_config[params->port].link_config));
phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region, dev_info.
- port_hw_config[params->port].speed_capability_mask));
+ offsetof(struct shmem_region,
+ dev_info.
+ port_hw_config[params->port].speed_capability_mask));
}
DP(NETIF_MSG_LINK, "Default config phy idx %x cfg 0x%x speed_cap_mask"
" 0x%x\n", phy_index, link_config, phy->speed_cap_mask);
@@ -7408,7 +7593,7 @@ static void set_phy_vars(struct link_params *params)
else if (phy_index == EXT_PHY2)
actual_phy_idx = EXT_PHY1;
}
- params->phy[actual_phy_idx].req_flow_ctrl =
+ params->phy[actual_phy_idx].req_flow_ctrl =
params->req_flow_ctrl[link_cfg_idx];
params->phy[actual_phy_idx].req_line_speed =
@@ -7461,57 +7646,6 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
set_phy_vars(params);
DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys);
- if (CHIP_REV_IS_FPGA(bp)) {
-
- vars->link_up = 1;
- vars->line_speed = SPEED_10000;
- vars->duplex = DUPLEX_FULL;
- vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
- vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD);
- /* enable on E1.5 FPGA */
- if (CHIP_IS_E1H(bp)) {
- vars->flow_ctrl |=
- (BNX2X_FLOW_CTRL_TX |
- BNX2X_FLOW_CTRL_RX);
- vars->link_status |=
- (LINK_STATUS_TX_FLOW_CONTROL_ENABLED |
- LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
- }
-
- bnx2x_emac_enable(params, vars, 0);
- if (!(CHIP_IS_E2(bp)))
- bnx2x_pbf_update(params, vars->flow_ctrl,
- vars->line_speed);
- /* disable drain */
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
-
- /* update shared memory */
- bnx2x_update_mng(params, vars->link_status);
-
- return 0;
-
- } else
- if (CHIP_REV_IS_EMUL(bp)) {
-
- vars->link_up = 1;
- vars->line_speed = SPEED_10000;
- vars->duplex = DUPLEX_FULL;
- vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
- vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD);
-
- bnx2x_bmac_enable(params, vars, 0);
-
- bnx2x_pbf_update(params, vars->flow_ctrl, vars->line_speed);
- /* Disable drain */
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
- + params->port*4, 0);
-
- /* update shared memory */
- bnx2x_update_mng(params, vars->link_status);
-
- return 0;
-
- } else
if (params->loopback_mode == LOOPBACK_BMAC) {
vars->link_up = 1;
@@ -7527,8 +7661,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
/* set bmac loopback */
bnx2x_bmac_enable(params, vars, 1);
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
- params->port*4, 0);
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
} else if (params->loopback_mode == LOOPBACK_EMAC) {
@@ -7544,8 +7677,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
/* set bmac loopback */
bnx2x_emac_enable(params, vars, 1);
bnx2x_emac_program(params, vars);
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
- params->port*4, 0);
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
} else if ((params->loopback_mode == LOOPBACK_XGXS) ||
(params->loopback_mode == LOOPBACK_EXT_PHY)) {
@@ -7568,8 +7700,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
bnx2x_emac_program(params, vars);
bnx2x_emac_enable(params, vars, 0);
} else
- bnx2x_bmac_enable(params, vars, 0);
-
+ bnx2x_bmac_enable(params, vars, 0);
if (params->loopback_mode == LOOPBACK_XGXS) {
/* set 10G XGXS loopback */
params->phy[INT_PHY].config_loopback(
@@ -7587,9 +7718,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
params);
}
}
-
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
- params->port*4, 0);
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
bnx2x_set_led(params, vars,
LED_MODE_OPER, vars->line_speed);
@@ -7608,7 +7737,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
return 0;
}
u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
- u8 reset_ext_phy)
+ u8 reset_ext_phy)
{
struct bnx2x *bp = params->bp;
u8 phy_index, port = params->port, clear_latch_ind = 0;
@@ -7617,10 +7746,10 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
vars->link_status = 0;
bnx2x_update_mng(params, vars->link_status);
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
- (NIG_MASK_XGXS0_LINK_STATUS |
- NIG_MASK_XGXS0_LINK10G |
- NIG_MASK_SERDES0_LINK_STATUS |
- NIG_MASK_MI_INT));
+ (NIG_MASK_XGXS0_LINK_STATUS |
+ NIG_MASK_XGXS0_LINK10G |
+ NIG_MASK_SERDES0_LINK_STATUS |
+ NIG_MASK_MI_INT));
/* activate nig drain */
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
@@ -7719,21 +7848,22 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
/* disable attentions */
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
port_of_path*4,
- (NIG_MASK_XGXS0_LINK_STATUS |
- NIG_MASK_XGXS0_LINK10G |
- NIG_MASK_SERDES0_LINK_STATUS |
- NIG_MASK_MI_INT));
+ (NIG_MASK_XGXS0_LINK_STATUS |
+ NIG_MASK_XGXS0_LINK10G |
+ NIG_MASK_SERDES0_LINK_STATUS |
+ NIG_MASK_MI_INT));
/* Need to take the phy out of low power mode in order
to write to access its registers */
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+ port);
/* Reset the phy */
bnx2x_cl45_write(bp, &phy[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL,
- 1<<15);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_CTRL,
+ 1<<15);
}
/* Add delay of 150ms after reset */
@@ -7762,18 +7892,20 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
/* Only set bit 10 = 1 (Tx power down) */
bnx2x_cl45_read(bp, phy_blk[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_TX_POWER_DOWN, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN, &val);
/* Phase1 of TX_POWER_DOWN reset */
bnx2x_cl45_write(bp, phy_blk[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_TX_POWER_DOWN,
- (val | 1<<10));
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN,
+ (val | 1<<10));
}
- /* Toggle Transmitter: Power down and then up with 600ms
- delay between */
+ /*
+ * Toggle Transmitter: Power down and then up with 600ms delay
+ * between
+ */
msleep(600);
/* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */
@@ -7781,25 +7913,25 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
/* Phase2 of POWER_DOWN_RESET */
/* Release bit 10 (Release Tx power down) */
bnx2x_cl45_read(bp, phy_blk[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_TX_POWER_DOWN, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN, &val);
bnx2x_cl45_write(bp, phy_blk[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
msleep(15);
/* Read modify write the SPI-ROM version select register */
bnx2x_cl45_read(bp, phy_blk[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_EDC_FFE_MAIN, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_EDC_FFE_MAIN, &val);
bnx2x_cl45_write(bp, phy_blk[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
/* set GPIO2 back to LOW */
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
}
return 0;
}
@@ -7846,32 +7978,90 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp,
/* Set fault module detected LED on */
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
- MISC_REGISTERS_GPIO_HIGH,
- port);
+ MISC_REGISTERS_GPIO_HIGH,
+ port);
}
return 0;
}
+static void bnx2x_get_ext_phy_reset_gpio(struct bnx2x *bp, u32 shmem_base,
+ u8 *io_gpio, u8 *io_port)
+{
+
+ u32 phy_gpio_reset = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[PORT_0].default_cfg));
+ switch (phy_gpio_reset) {
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0:
+ *io_gpio = 0;
+ *io_port = 0;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0:
+ *io_gpio = 1;
+ *io_port = 0;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0:
+ *io_gpio = 2;
+ *io_port = 0;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0:
+ *io_gpio = 3;
+ *io_port = 0;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1:
+ *io_gpio = 0;
+ *io_port = 1;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1:
+ *io_gpio = 1;
+ *io_port = 1;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1:
+ *io_gpio = 2;
+ *io_port = 1;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1:
+ *io_gpio = 3;
+ *io_port = 1;
+ break;
+ default:
+ /* Don't override the io_gpio and io_port */
+ break;
+ }
+}
static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
u32 shmem_base_path[],
u32 shmem2_base_path[], u8 phy_index,
u32 chip_id)
{
- s8 port;
+ s8 port, reset_gpio;
u32 swap_val, swap_override;
struct bnx2x_phy phy[PORT_MAX];
struct bnx2x_phy *phy_blk[PORT_MAX];
s8 port_of_path;
- swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
- swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+ reset_gpio = MISC_REGISTERS_GPIO_1;
port = 1;
- bnx2x_ext_phy_hw_reset(bp, port ^ (swap_val && swap_override));
+ /*
+ * Retrieve the reset gpio/port which control the reset.
+ * Default is GPIO1, PORT1
+ */
+ bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0],
+ (u8 *)&reset_gpio, (u8 *)&port);
/* Calculate the port based on port swap */
port ^= (swap_val && swap_override);
+ /* Initiate PHY reset*/
+ bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ port);
+ msleep(1);
+ bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+ port);
+
msleep(5);
/* PART1 - Reset both phys */
@@ -7907,9 +8097,7 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
/* Reset the phy */
bnx2x_cl45_write(bp, &phy[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL,
- 1<<15);
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
}
/* Add delay of 150ms after reset */
@@ -7923,7 +8111,7 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
}
/* PART2 - Download firmware to both phys */
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
- if (CHIP_IS_E2(bp))
+ if (CHIP_IS_E2(bp))
port_of_path = 0;
else
port_of_path = port;
@@ -7958,8 +8146,10 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
- /* GPIO1 affects both ports, so there's need to pull
- it for single port alone */
+ /*
+ * GPIO1 affects both ports, so there's need to pull
+ * it for single port alone
+ */
rc = bnx2x_8726_common_init_phy(bp, shmem_base_path,
shmem2_base_path,
phy_index, chip_id);
@@ -7969,11 +8159,15 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
break;
default:
DP(NETIF_MSG_LINK,
- "bnx2x_common_init_phy: ext_phy 0x%x not required\n",
- ext_phy_type);
+ "ext_phy 0x%x common init not required\n",
+ ext_phy_type);
break;
}
+ if (rc != 0)
+ netdev_err(bp->dev, "Warning: PHY was not initialized,"
+ " Port %d\n",
+ 0);
return rc;
}
@@ -7986,9 +8180,6 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
u32 ext_phy_type, ext_phy_config;
DP(NETIF_MSG_LINK, "Begin common phy init\n");
- if (CHIP_REV_IS_EMUL(bp))
- return 0;
-
/* Check if common init was already done */
phy_ver = REG_RD(bp, shmem_base_path[0] +
offsetof(struct shmem_region,
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
index bedab1a942c4..92f36b6950dc 100644
--- a/drivers/net/bnx2x/bnx2x_link.h
+++ b/drivers/net/bnx2x/bnx2x_link.h
@@ -1,4 +1,4 @@
-/* Copyright 2008-2010 Broadcom Corporation
+/* Copyright 2008-2011 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -33,7 +33,7 @@
#define BNX2X_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH
#define BNX2X_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE
-#define SPEED_AUTO_NEG 0
+#define SPEED_AUTO_NEG 0
#define SPEED_12000 12000
#define SPEED_12500 12500
#define SPEED_13000 13000
@@ -44,8 +44,8 @@
#define SFP_EEPROM_VENDOR_NAME_SIZE 16
#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25
#define SFP_EEPROM_VENDOR_OUI_SIZE 3
-#define SFP_EEPROM_PART_NO_ADDR 0x28
-#define SFP_EEPROM_PART_NO_SIZE 16
+#define SFP_EEPROM_PART_NO_ADDR 0x28
+#define SFP_EEPROM_PART_NO_SIZE 16
#define PWR_FLT_ERR_MSG_LEN 250
#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
@@ -62,7 +62,7 @@
#define SINGLE_MEDIA(params) (params->num_phys == 2)
/* Dual Media board contains two external phy with different media */
#define DUAL_MEDIA(params) (params->num_phys == 3)
-#define FW_PARAM_MDIO_CTRL_OFFSET 16
+#define FW_PARAM_MDIO_CTRL_OFFSET 16
#define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \
(phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET)
@@ -201,12 +201,14 @@ struct link_params {
/* Default / User Configuration */
u8 loopback_mode;
-#define LOOPBACK_NONE 0
-#define LOOPBACK_EMAC 1
-#define LOOPBACK_BMAC 2
+#define LOOPBACK_NONE 0
+#define LOOPBACK_EMAC 1
+#define LOOPBACK_BMAC 2
#define LOOPBACK_XGXS 3
#define LOOPBACK_EXT_PHY 4
-#define LOOPBACK_EXT 5
+#define LOOPBACK_EXT 5
+#define LOOPBACK_UMAC 6
+#define LOOPBACK_XMAC 7
/* Device parameters */
u8 mac_addr[6];
@@ -230,10 +232,11 @@ struct link_params {
/* Phy register parameter */
u32 chip_id;
+ /* features */
u32 feature_config_flags;
-#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
-#define FEATURE_CONFIG_PFC_ENABLED (1<<1)
-#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
+#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
+#define FEATURE_CONFIG_PFC_ENABLED (1<<1)
+#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3)
/* Will be populated during common init */
struct bnx2x_phy phy[MAX_PHYS];
@@ -334,6 +337,11 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
/* Reset the external of SFX7101 */
void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
+/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */
+u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params, u16 addr,
+ u8 byte_cnt, u8 *o_buf);
+
void bnx2x_hw_reset_phy(struct link_params *params);
/* Checks if HW lock is required for this phy/board type */
@@ -379,7 +387,7 @@ void bnx2x_ets_disabled(struct link_params *params);
/* Used to configure the ETS to BW limited */
void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
- const u32 cos1_bw);
+ const u32 cos1_bw);
/* Used to configure the ETS to strict */
u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index d584d32c747d..c1b8afe2296e 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -145,13 +145,6 @@ static struct {
{ "Broadcom NetXtreme II BCM57712E XGb" }
};
-#ifndef PCI_DEVICE_ID_NX2_57712
-#define PCI_DEVICE_ID_NX2_57712 0x1662
-#endif
-#ifndef PCI_DEVICE_ID_NX2_57712E
-#define PCI_DEVICE_ID_NX2_57712E 0x1663
-#endif
-
static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
@@ -586,7 +579,7 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
/* lock the dmae channel */
- mutex_lock(&bp->dmae_mutex);
+ spin_lock_bh(&bp->dmae_lock);
/* reset completion */
*wb_comp = 0;
@@ -617,7 +610,7 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
unlock:
- mutex_unlock(&bp->dmae_mutex);
+ spin_unlock_bh(&bp->dmae_lock);
return rc;
}
@@ -1397,7 +1390,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp,
}
smp_mb__before_atomic_inc();
- atomic_inc(&bp->spq_left);
+ atomic_inc(&bp->cq_spq_left);
/* push the change in fp->state and towards the memory */
smp_wmb();
@@ -1974,13 +1967,22 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
vn_max_rate = 0;
} else {
+ u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
+
vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
- /* If min rate is zero - set it to 1 */
+ /* If fairness is enabled (not all min rates are zeroes) and
+ if current min rate is zero - set it to 1.
+ This is a requirement of the algorithm. */
if (bp->vn_weight_sum && (vn_min_rate == 0))
vn_min_rate = DEF_MIN_RATE;
- vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
- FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
+
+ if (IS_MF_SI(bp))
+ /* maxCfg in percents of linkspeed */
+ vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
+ else
+ /* maxCfg is absolute in 100Mb units */
+ vn_max_rate = maxCfg * 100;
}
DP(NETIF_MSG_IFUP,
@@ -2006,7 +2008,8 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
m_fair_vn.vn_credit_delta =
max_t(u32, (vn_min_rate * (T_FAIR_COEF /
(8 * bp->vn_weight_sum))),
- (bp->cmng.fair_vars.fair_threshold * 2));
+ (bp->cmng.fair_vars.fair_threshold +
+ MIN_ABOVE_THRESH));
DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
m_fair_vn.vn_credit_delta);
}
@@ -2473,8 +2476,14 @@ static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
rxq_init->sge_map = fp->rx_sge_mapping;
rxq_init->rcq_map = fp->rx_comp_mapping;
rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
- rxq_init->mtu = bp->dev->mtu;
- rxq_init->buf_sz = bp->rx_buf_size;
+
+ /* Always use mini-jumbo MTU for FCoE L2 ring */
+ if (IS_FCOE_FP(fp))
+ rxq_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
+ else
+ rxq_init->mtu = bp->dev->mtu;
+
+ rxq_init->buf_sz = fp->rx_buf_size;
rxq_init->cl_qzone_id = fp->cl_qzone_id;
rxq_init->cl_id = fp->cl_id;
rxq_init->spcl_id = fp->cl_id;
@@ -2726,11 +2735,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
spin_lock_bh(&bp->spq_lock);
- if (!atomic_read(&bp->spq_left)) {
- BNX2X_ERR("BUG! SPQ ring full!\n");
- spin_unlock_bh(&bp->spq_lock);
- bnx2x_panic();
- return -EBUSY;
+ if (common) {
+ if (!atomic_read(&bp->eq_spq_left)) {
+ BNX2X_ERR("BUG! EQ ring full!\n");
+ spin_unlock_bh(&bp->spq_lock);
+ bnx2x_panic();
+ return -EBUSY;
+ }
+ } else if (!atomic_read(&bp->cq_spq_left)) {
+ BNX2X_ERR("BUG! SPQ ring full!\n");
+ spin_unlock_bh(&bp->spq_lock);
+ bnx2x_panic();
+ return -EBUSY;
}
spe = bnx2x_sp_get_next(bp);
@@ -2761,20 +2777,26 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
/* stats ramrod has it's own slot on the spq */
- if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
+ if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) {
/* It's ok if the actual decrement is issued towards the memory
* somewhere between the spin_lock and spin_unlock. Thus no
* more explict memory barrier is needed.
*/
- atomic_dec(&bp->spq_left);
+ if (common)
+ atomic_dec(&bp->eq_spq_left);
+ else
+ atomic_dec(&bp->cq_spq_left);
+ }
+
DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
"SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
- "type(0x%x) left %x\n",
+ "type(0x%x) left (ETH, COMMON) (%x,%x)\n",
bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
(u32)(U64_LO(bp->spq_mapping) +
(void *)bp->spq_prod_bd - (void *)bp->spq), command,
- HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
+ HW_CID(bp, cid), data_hi, data_lo, type,
+ atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
bnx2x_sp_prod_update(bp);
spin_unlock_bh(&bp->spq_lock);
@@ -3686,8 +3708,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
sw_cons = bp->eq_cons;
sw_prod = bp->eq_prod;
- DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
- hw_cons, sw_cons, atomic_read(&bp->spq_left));
+ DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->cq_spq_left %u\n",
+ hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
for (; sw_cons != hw_cons;
sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
@@ -3752,13 +3774,15 @@ static void bnx2x_eq_int(struct bnx2x *bp)
case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
- bp->set_mac_pending = 0;
+ if (elem->message.data.set_mac_event.echo)
+ bp->set_mac_pending = 0;
break;
case (EVENT_RING_OPCODE_SET_MAC |
BNX2X_STATE_CLOSING_WAIT4_HALT):
DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
- bp->set_mac_pending = 0;
+ if (elem->message.data.set_mac_event.echo)
+ bp->set_mac_pending = 0;
break;
default:
/* unknown event log error and continue */
@@ -3770,7 +3794,7 @@ next_spqe:
} /* for */
smp_mb__before_atomic_inc();
- atomic_add(spqe_cnt, &bp->spq_left);
+ atomic_add(spqe_cnt, &bp->eq_spq_left);
bp->eq_cons = sw_cons;
bp->eq_prod = sw_prod;
@@ -4203,7 +4227,7 @@ void bnx2x_update_coalesce(struct bnx2x *bp)
static void bnx2x_init_sp_ring(struct bnx2x *bp)
{
spin_lock_init(&bp->spq_lock);
- atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
+ atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
bp->spq_prod_idx = 0;
bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
@@ -4228,9 +4252,12 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp)
bp->eq_cons = 0;
bp->eq_prod = NUM_EQ_DESC;
bp->eq_cons_sb = BNX2X_EQ_INDEX;
+ /* we want a warning message before it gets rought... */
+ atomic_set(&bp->eq_spq_left,
+ min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
}
-static void bnx2x_init_ind_table(struct bnx2x *bp)
+void bnx2x_push_indir_table(struct bnx2x *bp)
{
int func = BP_FUNC(bp);
int i;
@@ -4238,13 +4265,20 @@ static void bnx2x_init_ind_table(struct bnx2x *bp)
if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
return;
- DP(NETIF_MSG_IFUP,
- "Initializing indirection table multi_mode %d\n", bp->multi_mode);
for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
REG_WR8(bp, BAR_TSTRORM_INTMEM +
TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
- bp->fp->cl_id + (i % (bp->num_queues -
- NONE_ETH_CONTEXT_USE)));
+ bp->fp->cl_id + bp->rx_indir_table[i]);
+}
+
+static void bnx2x_init_ind_table(struct bnx2x *bp)
+{
+ int i;
+
+ for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
+ bp->rx_indir_table[i] = i % BNX2X_NUM_ETH_QUEUES(bp);
+
+ bnx2x_push_indir_table(bp);
}
void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
@@ -5840,7 +5874,7 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
BP_ABS_FUNC(bp), load_code);
bp->dmae_ready = 0;
- mutex_init(&bp->dmae_mutex);
+ spin_lock_init(&bp->dmae_lock);
rc = bnx2x_gunzip_init(bp);
if (rc)
return rc;
@@ -5992,6 +6026,8 @@ void bnx2x_free_mem(struct bnx2x *bp)
BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
BCM_PAGE_SIZE * NUM_EQ_PAGES);
+ BNX2X_FREE(bp->rx_indir_table);
+
#undef BNX2X_PCI_FREE
#undef BNX2X_KFREE
}
@@ -6122,6 +6158,9 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
/* EQ */
BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
BCM_PAGE_SIZE * NUM_EQ_PAGES);
+
+ BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) *
+ TSTORM_INDIRECTION_TABLE_SIZE);
return 0;
alloc_mem_err:
@@ -6175,12 +6214,14 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
int ramrod_flags = WAIT_RAMROD_COMMON;
bp->set_mac_pending = 1;
- smp_wmb();
config->hdr.length = 1;
config->hdr.offset = cam_offset;
config->hdr.client_id = 0xff;
- config->hdr.reserved1 = 0;
+ /* Mark the single MAC configuration ramrod as opposed to a
+ * UC/MC list configuration).
+ */
+ config->hdr.echo = 1;
/* primary MAC */
config->config_table[0].msb_mac_addr =
@@ -6212,6 +6253,8 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
config->config_table[0].middle_mac_addr,
config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
+ mb();
+
bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
U64_HI(bnx2x_sp_mapping(bp, mac_config)),
U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
@@ -6276,20 +6319,15 @@ static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
if (CHIP_IS_E1H(bp))
return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
else if (CHIP_MODE_IS_4_PORT(bp))
- return BP_FUNC(bp) * 32 + rel_offset;
+ return E2_FUNC_MAX * rel_offset + BP_FUNC(bp);
else
- return BP_VN(bp) * 32 + rel_offset;
+ return E2_FUNC_MAX * rel_offset + BP_VN(bp);
}
/**
* LLH CAM line allocations: currently only iSCSI and ETH macs are
* relevant. In addition, current implementation is tuned for a
* single ETH MAC.
- *
- * When multiple unicast ETH MACs PF configuration in switch
- * independent mode is required (NetQ, multiple netdev MACs,
- * etc.), consider better utilisation of 16 per function MAC
- * entries in the LLH memory.
*/
enum {
LLH_CAM_ISCSI_ETH_LINE = 0,
@@ -6364,14 +6402,37 @@ void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
}
}
-static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
+
+static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp)
+{
+ return CHIP_REV_IS_SLOW(bp) ?
+ (BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) :
+ (BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp)));
+}
+
+/* set mc list, do not wait as wait implies sleep and
+ * set_rx_mode can be invoked from non-sleepable context.
+ *
+ * Instead we use the same ramrod data buffer each time we need
+ * to configure a list of addresses, and use the fact that the
+ * list of MACs is changed in an incremental way and that the
+ * function is called under the netif_addr_lock. A temporary
+ * inconsistent CAM configuration (possible in case of a very fast
+ * sequence of add/del/add on the host side) will shortly be
+ * restored by the handler of the last ramrod.
+ */
+static int bnx2x_set_e1_mc_list(struct bnx2x *bp)
{
int i = 0, old;
struct net_device *dev = bp->dev;
+ u8 offset = bnx2x_e1_cam_mc_offset(bp);
struct netdev_hw_addr *ha;
struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
+ if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST)
+ return -EINVAL;
+
netdev_for_each_mc_addr(ha, dev) {
/* copy mac */
config_cmd->config_table[i].msb_mac_addr =
@@ -6412,32 +6473,47 @@ static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
}
}
+ wmb();
+
config_cmd->hdr.length = i;
config_cmd->hdr.offset = offset;
config_cmd->hdr.client_id = 0xff;
- config_cmd->hdr.reserved1 = 0;
+ /* Mark that this ramrod doesn't use bp->set_mac_pending for
+ * synchronization.
+ */
+ config_cmd->hdr.echo = 0;
- bp->set_mac_pending = 1;
- smp_wmb();
+ mb();
- bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
}
-static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
+
+void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp)
{
int i;
struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
int ramrod_flags = WAIT_RAMROD_COMMON;
+ u8 offset = bnx2x_e1_cam_mc_offset(bp);
- bp->set_mac_pending = 1;
- smp_wmb();
-
- for (i = 0; i < config_cmd->hdr.length; i++)
+ for (i = 0; i < BNX2X_MAX_MULTICAST; i++)
SET_FLAG(config_cmd->config_table[i].flags,
MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
T_ETH_MAC_COMMAND_INVALIDATE);
+ wmb();
+
+ config_cmd->hdr.length = BNX2X_MAX_MULTICAST;
+ config_cmd->hdr.offset = offset;
+ config_cmd->hdr.client_id = 0xff;
+ /* We'll wait for a completion this time... */
+ config_cmd->hdr.echo = 1;
+
+ bp->set_mac_pending = 1;
+
+ mb();
+
bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
@@ -6447,6 +6523,44 @@ static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
}
+/* Accept one or more multicasts */
+static int bnx2x_set_e1h_mc_list(struct bnx2x *bp)
+{
+ struct net_device *dev = bp->dev;
+ struct netdev_hw_addr *ha;
+ u32 mc_filter[MC_HASH_SIZE];
+ u32 crc, bit, regidx;
+ int i;
+
+ memset(mc_filter, 0, 4 * MC_HASH_SIZE);
+
+ netdev_for_each_mc_addr(ha, dev) {
+ DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
+ bnx2x_mc_addr(ha));
+
+ crc = crc32c_le(0, bnx2x_mc_addr(ha),
+ ETH_ALEN);
+ bit = (crc >> 24) & 0xff;
+ regidx = bit >> 5;
+ bit &= 0x1f;
+ mc_filter[regidx] |= (1 << bit);
+ }
+
+ for (i = 0; i < MC_HASH_SIZE; i++)
+ REG_WR(bp, MC_HASH_OFFSET(bp, i),
+ mc_filter[i]);
+
+ return 0;
+}
+
+void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
+{
+ int i;
+
+ for (i = 0; i < MC_HASH_SIZE; i++)
+ REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
+}
+
#ifdef BCM_CNIC
/**
* Set iSCSI MAC(s) at the next enties in the CAM after the ETH
@@ -6465,12 +6579,13 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
+ u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
/* Send a SET_MAC ramrod */
- bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
+ bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec,
cam_offset, 0);
- bnx2x_set_mac_in_nig(bp, set, bp->iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
+ bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
return 0;
}
@@ -7112,20 +7227,15 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
/* Give HW time to discard old tx messages */
msleep(1);
- if (CHIP_IS_E1(bp)) {
- /* invalidate mc list,
- * wait and poll (interrupts are off)
- */
- bnx2x_invlidate_e1_mc_list(bp);
- bnx2x_set_eth_mac(bp, 0);
+ bnx2x_set_eth_mac(bp, 0);
- } else {
- REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
+ bnx2x_invalidate_uc_list(bp);
- bnx2x_set_eth_mac(bp, 0);
-
- for (i = 0; i < MC_HASH_SIZE; i++)
- REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
+ if (CHIP_IS_E1(bp))
+ bnx2x_invalidate_e1_mc_list(bp);
+ else {
+ bnx2x_invalidate_e1h_mc_list(bp);
+ REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
}
#ifdef BCM_CNIC
@@ -8394,11 +8504,47 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
bp->common.shmem2_base);
}
+#ifdef BCM_CNIC
+static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
+{
+ u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+ drv_lic_key[BP_PORT(bp)].max_iscsi_conn);
+ u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+ drv_lic_key[BP_PORT(bp)].max_fcoe_conn);
+
+ /* Get the number of maximum allowed iSCSI and FCoE connections */
+ bp->cnic_eth_dev.max_iscsi_conn =
+ (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
+ BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
+
+ bp->cnic_eth_dev.max_fcoe_conn =
+ (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
+ BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
+
+ BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
+ bp->cnic_eth_dev.max_iscsi_conn,
+ bp->cnic_eth_dev.max_fcoe_conn);
+
+ /* If mamimum allowed number of connections is zero -
+ * disable the feature.
+ */
+ if (!bp->cnic_eth_dev.max_iscsi_conn)
+ bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
+
+ if (!bp->cnic_eth_dev.max_fcoe_conn)
+ bp->flags |= NO_FCOE_FLAG;
+}
+#endif
+
static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
{
u32 val, val2;
int func = BP_ABS_FUNC(bp);
int port = BP_PORT(bp);
+#ifdef BCM_CNIC
+ u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
+ u8 *fip_mac = bp->fip_mac;
+#endif
if (BP_NOMCP(bp)) {
BNX2X_ERROR("warning: random MAC workaround active\n");
@@ -8411,7 +8557,9 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
#ifdef BCM_CNIC
- /* iSCSI NPAR MAC */
+ /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
+ * FCoE MAC then the appropriate feature should be disabled.
+ */
if (IS_MF_SI(bp)) {
u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
@@ -8419,8 +8567,39 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
iscsi_mac_addr_upper);
val = MF_CFG_RD(bp, func_ext_config[func].
iscsi_mac_addr_lower);
- bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
- }
+ BNX2X_DEV_INFO("Read iSCSI MAC: "
+ "0x%x:0x%04x\n", val2, val);
+ bnx2x_set_mac_buf(iscsi_mac, val, val2);
+
+ /* Disable iSCSI OOO if MAC configuration is
+ * invalid.
+ */
+ if (!is_valid_ether_addr(iscsi_mac)) {
+ bp->flags |= NO_ISCSI_OOO_FLAG |
+ NO_ISCSI_FLAG;
+ memset(iscsi_mac, 0, ETH_ALEN);
+ }
+ } else
+ bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
+
+ if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
+ val2 = MF_CFG_RD(bp, func_ext_config[func].
+ fcoe_mac_addr_upper);
+ val = MF_CFG_RD(bp, func_ext_config[func].
+ fcoe_mac_addr_lower);
+ BNX2X_DEV_INFO("Read FCoE MAC to "
+ "0x%x:0x%04x\n", val2, val);
+ bnx2x_set_mac_buf(fip_mac, val, val2);
+
+ /* Disable FCoE if MAC configuration is
+ * invalid.
+ */
+ if (!is_valid_ether_addr(fip_mac)) {
+ bp->flags |= NO_FCOE_FLAG;
+ memset(bp->fip_mac, 0, ETH_ALEN);
+ }
+ } else
+ bp->flags |= NO_FCOE_FLAG;
}
#endif
} else {
@@ -8434,7 +8613,7 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
iscsi_mac_upper);
val = SHMEM_RD(bp, dev_info.port_hw_config[port].
iscsi_mac_lower);
- bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
+ bnx2x_set_mac_buf(iscsi_mac, val, val2);
#endif
}
@@ -8442,14 +8621,12 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
#ifdef BCM_CNIC
- /* Inform the upper layers about FCoE MAC */
+ /* Set the FCoE MAC in modes other then MF_SI */
if (!CHIP_IS_E1x(bp)) {
if (IS_MF_SD(bp))
- memcpy(bp->fip_mac, bp->dev->dev_addr,
- sizeof(bp->fip_mac));
- else
- memcpy(bp->fip_mac, bp->iscsi_mac,
- sizeof(bp->fip_mac));
+ memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
+ else if (!IS_MF(bp))
+ memcpy(fip_mac, iscsi_mac, ETH_ALEN);
}
#endif
}
@@ -8612,6 +8789,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
/* Get MAC addresses */
bnx2x_get_mac_hwinfo(bp);
+#ifdef BCM_CNIC
+ bnx2x_get_cnic_info(bp);
+#endif
+
return rc;
}
@@ -8826,12 +9007,197 @@ static int bnx2x_close(struct net_device *dev)
return 0;
}
+#define E1_MAX_UC_LIST 29
+#define E1H_MAX_UC_LIST 30
+#define E2_MAX_UC_LIST 14
+static inline u8 bnx2x_max_uc_list(struct bnx2x *bp)
+{
+ if (CHIP_IS_E1(bp))
+ return E1_MAX_UC_LIST;
+ else if (CHIP_IS_E1H(bp))
+ return E1H_MAX_UC_LIST;
+ else
+ return E2_MAX_UC_LIST;
+}
+
+
+static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp)
+{
+ if (CHIP_IS_E1(bp))
+ /* CAM Entries for Port0:
+ * 0 - prim ETH MAC
+ * 1 - BCAST MAC
+ * 2 - iSCSI L2 ring ETH MAC
+ * 3-31 - UC MACs
+ *
+ * Port1 entries are allocated the same way starting from
+ * entry 32.
+ */
+ return 3 + 32 * BP_PORT(bp);
+ else if (CHIP_IS_E1H(bp)) {
+ /* CAM Entries:
+ * 0-7 - prim ETH MAC for each function
+ * 8-15 - iSCSI L2 ring ETH MAC for each function
+ * 16 till 255 UC MAC lists for each function
+ *
+ * Remark: There is no FCoE support for E1H, thus FCoE related
+ * MACs are not considered.
+ */
+ return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) +
+ bnx2x_max_uc_list(bp) * BP_FUNC(bp);
+ } else {
+ /* CAM Entries (there is a separate CAM per engine):
+ * 0-4 - prim ETH MAC for each function
+ * 4-7 - iSCSI L2 ring ETH MAC for each function
+ * 8-11 - FIP ucast L2 MAC for each function
+ * 12-15 - ALL_ENODE_MACS mcast MAC for each function
+ * 16 till 71 UC MAC lists for each function
+ */
+ u8 func_idx =
+ (CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp));
+
+ return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) +
+ bnx2x_max_uc_list(bp) * func_idx;
+ }
+}
+
+/* set uc list, do not wait as wait implies sleep and
+ * set_rx_mode can be invoked from non-sleepable context.
+ *
+ * Instead we use the same ramrod data buffer each time we need
+ * to configure a list of addresses, and use the fact that the
+ * list of MACs is changed in an incremental way and that the
+ * function is called under the netif_addr_lock. A temporary
+ * inconsistent CAM configuration (possible in case of very fast
+ * sequence of add/del/add on the host side) will shortly be
+ * restored by the handler of the last ramrod.
+ */
+static int bnx2x_set_uc_list(struct bnx2x *bp)
+{
+ int i = 0, old;
+ struct net_device *dev = bp->dev;
+ u8 offset = bnx2x_uc_list_cam_offset(bp);
+ struct netdev_hw_addr *ha;
+ struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
+ dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
+
+ if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp))
+ return -EINVAL;
+
+ netdev_for_each_uc_addr(ha, dev) {
+ /* copy mac */
+ config_cmd->config_table[i].msb_mac_addr =
+ swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]);
+ config_cmd->config_table[i].middle_mac_addr =
+ swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]);
+ config_cmd->config_table[i].lsb_mac_addr =
+ swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]);
+
+ config_cmd->config_table[i].vlan_id = 0;
+ config_cmd->config_table[i].pf_id = BP_FUNC(bp);
+ config_cmd->config_table[i].clients_bit_vector =
+ cpu_to_le32(1 << BP_L_ID(bp));
+
+ SET_FLAG(config_cmd->config_table[i].flags,
+ MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+ T_ETH_MAC_COMMAND_SET);
+
+ DP(NETIF_MSG_IFUP,
+ "setting UCAST[%d] (%04x:%04x:%04x)\n", i,
+ config_cmd->config_table[i].msb_mac_addr,
+ config_cmd->config_table[i].middle_mac_addr,
+ config_cmd->config_table[i].lsb_mac_addr);
+
+ i++;
+
+ /* Set uc MAC in NIG */
+ bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha),
+ LLH_CAM_ETH_LINE + i);
+ }
+ old = config_cmd->hdr.length;
+ if (old > i) {
+ for (; i < old; i++) {
+ if (CAM_IS_INVALID(config_cmd->
+ config_table[i])) {
+ /* already invalidated */
+ break;
+ }
+ /* invalidate */
+ SET_FLAG(config_cmd->config_table[i].flags,
+ MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+ T_ETH_MAC_COMMAND_INVALIDATE);
+ }
+ }
+
+ wmb();
+
+ config_cmd->hdr.length = i;
+ config_cmd->hdr.offset = offset;
+ config_cmd->hdr.client_id = 0xff;
+ /* Mark that this ramrod doesn't use bp->set_mac_pending for
+ * synchronization.
+ */
+ config_cmd->hdr.echo = 0;
+
+ mb();
+
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
+ U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
+
+}
+
+void bnx2x_invalidate_uc_list(struct bnx2x *bp)
+{
+ int i;
+ struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
+ dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
+ int ramrod_flags = WAIT_RAMROD_COMMON;
+ u8 offset = bnx2x_uc_list_cam_offset(bp);
+ u8 max_list_size = bnx2x_max_uc_list(bp);
+
+ for (i = 0; i < max_list_size; i++) {
+ SET_FLAG(config_cmd->config_table[i].flags,
+ MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+ T_ETH_MAC_COMMAND_INVALIDATE);
+ bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i);
+ }
+
+ wmb();
+
+ config_cmd->hdr.length = max_list_size;
+ config_cmd->hdr.offset = offset;
+ config_cmd->hdr.client_id = 0xff;
+ /* We'll wait for a completion this time... */
+ config_cmd->hdr.echo = 1;
+
+ bp->set_mac_pending = 1;
+
+ mb();
+
+ bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
+ U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
+
+ /* Wait for a completion */
+ bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
+ ramrod_flags);
+
+}
+
+static inline int bnx2x_set_mc_list(struct bnx2x *bp)
+{
+ /* some multicasts */
+ if (CHIP_IS_E1(bp)) {
+ return bnx2x_set_e1_mc_list(bp);
+ } else { /* E1H and newer */
+ return bnx2x_set_e1h_mc_list(bp);
+ }
+}
+
/* called with netif_tx_lock from dev_mcast.c */
void bnx2x_set_rx_mode(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
u32 rx_mode = BNX2X_RX_MODE_NORMAL;
- int port = BP_PORT(bp);
if (bp->state != BNX2X_STATE_OPEN) {
DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
@@ -8842,47 +9208,16 @@ void bnx2x_set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC)
rx_mode = BNX2X_RX_MODE_PROMISC;
- else if ((dev->flags & IFF_ALLMULTI) ||
- ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
- CHIP_IS_E1(bp)))
+ else if (dev->flags & IFF_ALLMULTI)
rx_mode = BNX2X_RX_MODE_ALLMULTI;
- else { /* some multicasts */
- if (CHIP_IS_E1(bp)) {
- /*
- * set mc list, do not wait as wait implies sleep
- * and set_rx_mode can be invoked from non-sleepable
- * context
- */
- u8 offset = (CHIP_REV_IS_SLOW(bp) ?
- BNX2X_MAX_EMUL_MULTI*(1 + port) :
- BNX2X_MAX_MULTICAST*(1 + port));
-
- bnx2x_set_e1_mc_list(bp, offset);
- } else { /* E1H */
- /* Accept one or more multicasts */
- struct netdev_hw_addr *ha;
- u32 mc_filter[MC_HASH_SIZE];
- u32 crc, bit, regidx;
- int i;
-
- memset(mc_filter, 0, 4 * MC_HASH_SIZE);
-
- netdev_for_each_mc_addr(ha, dev) {
- DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
- bnx2x_mc_addr(ha));
-
- crc = crc32c_le(0, bnx2x_mc_addr(ha),
- ETH_ALEN);
- bit = (crc >> 24) & 0xff;
- regidx = bit >> 5;
- bit &= 0x1f;
- mc_filter[regidx] |= (1 << bit);
- }
+ else {
+ /* some multicasts */
+ if (bnx2x_set_mc_list(bp))
+ rx_mode = BNX2X_RX_MODE_ALLMULTI;
- for (i = 0; i < MC_HASH_SIZE; i++)
- REG_WR(bp, MC_HASH_OFFSET(bp, i),
- mc_filter[i]);
- }
+ /* some unicasts */
+ if (bnx2x_set_uc_list(bp))
+ rx_mode = BNX2X_RX_MODE_PROMISC;
}
bp->rx_mode = rx_mode;
@@ -8963,7 +9298,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_stop = bnx2x_close,
.ndo_start_xmit = bnx2x_start_xmit,
.ndo_select_queue = bnx2x_select_queue,
- .ndo_set_multicast_list = bnx2x_set_rx_mode,
+ .ndo_set_rx_mode = bnx2x_set_rx_mode,
.ndo_set_mac_address = bnx2x_change_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = bnx2x_ioctl,
@@ -9109,7 +9444,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
dev->vlan_features |= NETIF_F_TSO6;
-#ifdef BCM_DCB
+#ifdef BCM_DCBNL
dev->dcbnl_ops = &bnx2x_dcbnl_ops;
#endif
@@ -9516,6 +9851,11 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
}
#endif
+#ifdef BCM_DCBNL
+ /* Delete app tlvs from dcbnl */
+ bnx2x_dcbnl_update_applist(bp, true);
+#endif
+
unregister_netdev(dev);
/* Delete all NAPI objects */
@@ -9789,15 +10129,21 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
HW_CID(bp, BNX2X_ISCSI_ETH_CID));
}
- /* There may be not more than 8 L2 and COMMON SPEs and not more
- * than 8 L5 SPEs in the air.
+ /* There may be not more than 8 L2 and not more than 8 L5 SPEs
+ * We also check that the number of outstanding
+ * COMMON ramrods is not more than the EQ and SPQ can
+ * accommodate.
*/
- if ((type == NONE_CONNECTION_TYPE) ||
- (type == ETH_CONNECTION_TYPE)) {
- if (!atomic_read(&bp->spq_left))
+ if (type == ETH_CONNECTION_TYPE) {
+ if (!atomic_read(&bp->cq_spq_left))
break;
else
- atomic_dec(&bp->spq_left);
+ atomic_dec(&bp->cq_spq_left);
+ } else if (type == NONE_CONNECTION_TYPE) {
+ if (!atomic_read(&bp->eq_spq_left))
+ break;
+ else
+ atomic_dec(&bp->eq_spq_left);
} else if ((type == ISCSI_CONNECTION_TYPE) ||
(type == FCOE_CONNECTION_TYPE)) {
if (bp->cnic_spq_pending >=
@@ -9875,7 +10221,8 @@ static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
int rc = 0;
mutex_lock(&bp->cnic_mutex);
- c_ops = bp->cnic_ops;
+ c_ops = rcu_dereference_protected(bp->cnic_ops,
+ lockdep_is_held(&bp->cnic_mutex));
if (c_ops)
rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
mutex_unlock(&bp->cnic_mutex);
@@ -9989,7 +10336,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
int count = ctl->data.credit.credit_count;
smp_mb__before_atomic_inc();
- atomic_add(count, &bp->spq_left);
+ atomic_add(count, &bp->cq_spq_left);
smp_mb__after_atomic_inc();
break;
}
@@ -10085,6 +10432,13 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
struct bnx2x *bp = netdev_priv(dev);
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+ /* If both iSCSI and FCoE are disabled - return NULL in
+ * order to indicate CNIC that it should not try to work
+ * with this device.
+ */
+ if (NO_ISCSI(bp) && NO_FCOE(bp))
+ return NULL;
+
cp->drv_owner = THIS_MODULE;
cp->chip_id = CHIP_ID(bp);
cp->pdev = bp->pdev;
@@ -10105,6 +10459,15 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
+ if (NO_ISCSI_OOO(bp))
+ cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
+
+ if (NO_ISCSI(bp))
+ cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
+
+ if (NO_FCOE(bp))
+ cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
+
DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
"starting cid %d\n",
cp->ctx_blk_size,
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index e01330bb36c7..1c89f19a4425 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -6083,6 +6083,7 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808
#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e
#define MDIO_PMA_REG_8727_PCS_GP 0xc842
+#define MDIO_PMA_REG_8727_OPT_CFG_REG 0xc8e4
#define MDIO_AN_REG_8727_MISC_CTRL 0x8309
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index bda60d590fa8..3445ded6674f 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -1239,14 +1239,14 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
if (unlikely(bp->panic))
return;
+ bnx2x_stats_stm[bp->stats_state][event].action(bp);
+
/* Protect a state change flow */
spin_lock_bh(&bp->stats_lock);
state = bp->stats_state;
bp->stats_state = bnx2x_stats_stm[state][event].next_state;
spin_unlock_bh(&bp->stats_lock);
- bnx2x_stats_stm[state][event].action(bp);
-
if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
state, event, bp->stats_state);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 5c6fba802f2b..9bc5de3e04a8 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -604,7 +604,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
_lock_rx_hashtbl(bond);
- hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_src));
+ hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_dst));
client_info = &(bond_info->rx_hashtbl[hash_index]);
if (client_info->assigned) {
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 163e0b06eaa5..584f97b73060 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -59,7 +59,6 @@
#include <linux/uaccess.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
-#include <linux/netpoll.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/etherdevice.h>
@@ -424,15 +423,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
{
skb->dev = slave_dev;
skb->priority = 1;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- if (unlikely(bond->dev->priv_flags & IFF_IN_NETPOLL)) {
- struct netpoll *np = bond->dev->npinfo->netpoll;
- slave_dev->npinfo = bond->dev->npinfo;
- slave_dev->priv_flags |= IFF_IN_NETPOLL;
- netpoll_send_skb_on_dev(np, skb, slave_dev);
- slave_dev->priv_flags &= ~IFF_IN_NETPOLL;
- } else
-#endif
+ if (unlikely(netpoll_tx_running(slave_dev)))
+ bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
+ else
dev_queue_xmit(skb);
return 0;
@@ -1288,63 +1281,113 @@ static void bond_detach_slave(struct bonding *bond, struct slave *slave)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * You must hold read lock on bond->lock before calling this.
- */
-static bool slaves_support_netpoll(struct net_device *bond_dev)
+static inline int slave_enable_netpoll(struct slave *slave)
{
- struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave;
- int i = 0;
- bool ret = true;
+ struct netpoll *np;
+ int err = 0;
- bond_for_each_slave(bond, slave, i) {
- if ((slave->dev->priv_flags & IFF_DISABLE_NETPOLL) ||
- !slave->dev->netdev_ops->ndo_poll_controller)
- ret = false;
+ np = kzalloc(sizeof(*np), GFP_KERNEL);
+ err = -ENOMEM;
+ if (!np)
+ goto out;
+
+ np->dev = slave->dev;
+ err = __netpoll_setup(np);
+ if (err) {
+ kfree(np);
+ goto out;
}
- return i != 0 && ret;
+ slave->np = np;
+out:
+ return err;
+}
+static inline void slave_disable_netpoll(struct slave *slave)
+{
+ struct netpoll *np = slave->np;
+
+ if (!np)
+ return;
+
+ slave->np = NULL;
+ synchronize_rcu_bh();
+ __netpoll_cleanup(np);
+ kfree(np);
+}
+static inline bool slave_dev_support_netpoll(struct net_device *slave_dev)
+{
+ if (slave_dev->priv_flags & IFF_DISABLE_NETPOLL)
+ return false;
+ if (!slave_dev->netdev_ops->ndo_poll_controller)
+ return false;
+ return true;
}
static void bond_poll_controller(struct net_device *bond_dev)
{
- struct bonding *bond = netdev_priv(bond_dev);
+}
+
+static void __bond_netpoll_cleanup(struct bonding *bond)
+{
struct slave *slave;
int i;
- bond_for_each_slave(bond, slave, i) {
- if (slave->dev && IS_UP(slave->dev))
- netpoll_poll_dev(slave->dev);
- }
+ bond_for_each_slave(bond, slave, i)
+ if (IS_UP(slave->dev))
+ slave_disable_netpoll(slave);
}
-
static void bond_netpoll_cleanup(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
+
+ read_lock(&bond->lock);
+ __bond_netpoll_cleanup(bond);
+ read_unlock(&bond->lock);
+}
+
+static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
+{
+ struct bonding *bond = netdev_priv(dev);
struct slave *slave;
- const struct net_device_ops *ops;
- int i;
+ int i, err = 0;
read_lock(&bond->lock);
- bond_dev->npinfo = NULL;
bond_for_each_slave(bond, slave, i) {
- if (slave->dev) {
- ops = slave->dev->netdev_ops;
- if (ops->ndo_netpoll_cleanup)
- ops->ndo_netpoll_cleanup(slave->dev);
- else
- slave->dev->npinfo = NULL;
+ if (!IS_UP(slave->dev))
+ continue;
+ err = slave_enable_netpoll(slave);
+ if (err) {
+ __bond_netpoll_cleanup(bond);
+ break;
}
}
read_unlock(&bond->lock);
+ return err;
}
-#else
+static struct netpoll_info *bond_netpoll_info(struct bonding *bond)
+{
+ return bond->dev->npinfo;
+}
+#else
+static inline int slave_enable_netpoll(struct slave *slave)
+{
+ return 0;
+}
+static inline void slave_disable_netpoll(struct slave *slave)
+{
+}
static void bond_netpoll_cleanup(struct net_device *bond_dev)
{
}
-
+static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
+{
+ return 0;
+}
+static struct netpoll_info *bond_netpoll_info(struct bonding *bond)
+{
+ return NULL;
+}
#endif
/*---------------------------------- IOCTL ----------------------------------*/
@@ -1372,8 +1415,8 @@ static int bond_compute_features(struct bonding *bond)
{
struct slave *slave;
struct net_device *bond_dev = bond->dev;
- unsigned long features = bond_dev->features;
- unsigned long vlan_features = 0;
+ u32 features = bond_dev->features;
+ u32 vlan_features = 0;
unsigned short max_hard_header_len = max((u16)ETH_HLEN,
bond_dev->hard_header_len);
int i;
@@ -1400,8 +1443,8 @@ static int bond_compute_features(struct bonding *bond)
done:
features |= (bond_dev->features & BOND_VLAN_FEATURES);
- bond_dev->features = netdev_fix_features(features, NULL);
- bond_dev->vlan_features = netdev_fix_features(vlan_features, NULL);
+ bond_dev->features = netdev_fix_features(bond_dev, features);
+ bond_dev->vlan_features = netdev_fix_features(bond_dev, vlan_features);
bond_dev->hard_header_len = max_hard_header_len;
return 0;
@@ -1423,6 +1466,67 @@ static void bond_setup_by_slave(struct net_device *bond_dev,
bond->setup_by_slave = 1;
}
+/* On bonding slaves other than the currently active slave, suppress
+ * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
+ * ARP on active-backup slaves with arp_validate enabled.
+ */
+static bool bond_should_deliver_exact_match(struct sk_buff *skb,
+ struct net_device *slave_dev,
+ struct net_device *bond_dev)
+{
+ if (slave_dev->priv_flags & IFF_SLAVE_INACTIVE) {
+ if (slave_dev->priv_flags & IFF_SLAVE_NEEDARP &&
+ skb->protocol == __cpu_to_be16(ETH_P_ARP))
+ return false;
+
+ if (bond_dev->priv_flags & IFF_MASTER_ALB &&
+ skb->pkt_type != PACKET_BROADCAST &&
+ skb->pkt_type != PACKET_MULTICAST)
+ return false;
+
+ if (bond_dev->priv_flags & IFF_MASTER_8023AD &&
+ skb->protocol == __cpu_to_be16(ETH_P_SLOW))
+ return false;
+
+ return true;
+ }
+ return false;
+}
+
+static struct sk_buff *bond_handle_frame(struct sk_buff *skb)
+{
+ struct net_device *slave_dev;
+ struct net_device *bond_dev;
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (unlikely(!skb))
+ return NULL;
+ slave_dev = skb->dev;
+ bond_dev = ACCESS_ONCE(slave_dev->master);
+ if (unlikely(!bond_dev))
+ return skb;
+
+ if (bond_dev->priv_flags & IFF_MASTER_ARPMON)
+ slave_dev->last_rx = jiffies;
+
+ if (bond_should_deliver_exact_match(skb, slave_dev, bond_dev)) {
+ skb->deliver_no_wcard = 1;
+ return skb;
+ }
+
+ skb->dev = bond_dev;
+
+ if (bond_dev->priv_flags & IFF_MASTER_ALB &&
+ bond_dev->priv_flags & IFF_BRIDGE_PORT &&
+ skb->pkt_type == PACKET_HOST) {
+ u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
+
+ memcpy(dest, bond_dev->dev_addr, ETH_ALEN);
+ }
+
+ return skb;
+}
+
/* enslave device <slave> to bond device <master> */
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
{
@@ -1594,16 +1698,22 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
}
- res = netdev_set_master(slave_dev, bond_dev);
+ res = netdev_set_bond_master(slave_dev, bond_dev);
if (res) {
- pr_debug("Error %d calling netdev_set_master\n", res);
+ pr_debug("Error %d calling netdev_set_bond_master\n", res);
goto err_restore_mac;
}
+ res = netdev_rx_handler_register(slave_dev, bond_handle_frame, NULL);
+ if (res) {
+ pr_debug("Error %d calling netdev_rx_handler_register\n", res);
+ goto err_unset_master;
+ }
+
/* open the slave since the application closed it */
res = dev_open(slave_dev);
if (res) {
pr_debug("Opening slave %s failed\n", slave_dev->name);
- goto err_unset_master;
+ goto err_unreg_rxhandler;
}
new_slave->dev = slave_dev;
@@ -1782,17 +1892,19 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_set_carrier(bond);
#ifdef CONFIG_NET_POLL_CONTROLLER
- if (slaves_support_netpoll(bond_dev)) {
- bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
- if (bond_dev->npinfo)
- slave_dev->npinfo = bond_dev->npinfo;
- } else if (!(bond_dev->priv_flags & IFF_DISABLE_NETPOLL)) {
- bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
- pr_info("New slave device %s does not support netpoll\n",
- slave_dev->name);
- pr_info("Disabling netpoll support for %s\n", bond_dev->name);
+ slave_dev->npinfo = bond_netpoll_info(bond);
+ if (slave_dev->npinfo) {
+ if (slave_enable_netpoll(new_slave)) {
+ read_unlock(&bond->lock);
+ pr_info("Error, %s: master_dev is using netpoll, "
+ "but new slave device does not support netpoll.\n",
+ bond_dev->name);
+ res = -EBUSY;
+ goto err_close;
+ }
}
#endif
+
read_unlock(&bond->lock);
res = bond_create_slave_symlinks(bond_dev, slave_dev);
@@ -1811,8 +1923,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
err_close:
dev_close(slave_dev);
+err_unreg_rxhandler:
+ netdev_rx_handler_unregister(slave_dev);
+
err_unset_master:
- netdev_set_master(slave_dev, NULL);
+ netdev_set_bond_master(slave_dev, NULL);
err_restore_mac:
if (!bond->params.fail_over_mac) {
@@ -1992,19 +2107,10 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
netif_addr_unlock_bh(bond_dev);
}
- netdev_set_master(slave_dev, NULL);
+ netdev_rx_handler_unregister(slave_dev);
+ netdev_set_bond_master(slave_dev, NULL);
-#ifdef CONFIG_NET_POLL_CONTROLLER
- read_lock_bh(&bond->lock);
-
- if (slaves_support_netpoll(bond_dev))
- bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
- read_unlock_bh(&bond->lock);
- if (slave_dev->netdev_ops->ndo_netpoll_cleanup)
- slave_dev->netdev_ops->ndo_netpoll_cleanup(slave_dev);
- else
- slave_dev->npinfo = NULL;
-#endif
+ slave_disable_netpoll(slave);
/* close slave before restoring its mac address */
dev_close(slave_dev);
@@ -2039,6 +2145,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
ret = bond_release(bond_dev, slave_dev);
if ((ret == 0) && (bond->slave_cnt == 0)) {
+ bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
pr_info("%s: destroying bond %s.\n",
bond_dev->name, bond_dev->name);
unregister_netdevice(bond_dev);
@@ -2114,7 +2221,10 @@ static int bond_release_all(struct net_device *bond_dev)
netif_addr_unlock_bh(bond_dev);
}
- netdev_set_master(slave_dev, NULL);
+ netdev_rx_handler_unregister(slave_dev);
+ netdev_set_bond_master(slave_dev, NULL);
+
+ slave_disable_netpoll(slave);
/* close slave before restoring its mac address */
dev_close(slave_dev);
@@ -4654,9 +4764,12 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_netpoll_setup = bond_netpoll_setup,
.ndo_netpoll_cleanup = bond_netpoll_cleanup,
.ndo_poll_controller = bond_poll_controller,
#endif
+ .ndo_add_slave = bond_enslave,
+ .ndo_del_slave = bond_release,
};
static void bond_destructor(struct net_device *bond_dev)
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 8fd0174c5380..72bb0f6cc9bf 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1198,7 +1198,7 @@ static ssize_t bonding_store_carrier(struct device *d,
bond->dev->name, new_value);
}
out:
- return count;
+ return ret;
}
static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR,
bonding_show_carrier, bonding_store_carrier);
@@ -1595,7 +1595,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
}
}
out:
- return count;
+ return ret;
}
static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
bonding_show_slaves_active, bonding_store_slaves_active);
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 31fe980e4e28..ff4e26980220 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -20,6 +20,7 @@
#include <linux/if_bonding.h>
#include <linux/cpumask.h>
#include <linux/in6.h>
+#include <linux/netpoll.h>
#include "bond_3ad.h"
#include "bond_alb.h"
@@ -132,7 +133,7 @@ static inline void unblock_netpoll_tx(void)
static inline int is_netpoll_tx_blocked(struct net_device *dev)
{
- if (unlikely(dev->priv_flags & IFF_IN_NETPOLL))
+ if (unlikely(netpoll_tx_running(dev)))
return atomic_read(&netpoll_block_tx);
return 0;
}
@@ -198,6 +199,9 @@ struct slave {
u16 queue_id;
struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */
struct tlb_slave_info tlb_info;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ struct netpoll *np;
+#endif
};
/*
@@ -265,7 +269,8 @@ struct bonding {
*
* Caller must hold bond lock for read
*/
-static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, struct net_device *slave_dev)
+static inline struct slave *bond_get_slave_by_dev(struct bonding *bond,
+ struct net_device *slave_dev)
{
struct slave *slave = NULL;
int i;
@@ -276,7 +281,7 @@ static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, struct n
}
}
- return 0;
+ return NULL;
}
static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
@@ -323,6 +328,22 @@ static inline unsigned long slave_last_rx(struct bonding *bond,
return slave->dev->last_rx;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static inline void bond_netpoll_send_skb(const struct slave *slave,
+ struct sk_buff *skb)
+{
+ struct netpoll *np = slave->np;
+
+ if (np)
+ netpoll_send_skb(np, skb);
+}
+#else
+static inline void bond_netpoll_send_skb(const struct slave *slave,
+ struct sk_buff *skb)
+{
+}
+#endif
+
static inline void bond_set_slave_inactive_flags(struct slave *slave)
{
struct bonding *bond = netdev_priv(slave->dev->master);
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 5dec456fd4a4..1d699e3df547 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -115,6 +115,8 @@ source "drivers/net/can/mscan/Kconfig"
source "drivers/net/can/sja1000/Kconfig"
+source "drivers/net/can/c_can/Kconfig"
+
source "drivers/net/can/usb/Kconfig"
source "drivers/net/can/softing/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 53c82a71778e..24ebfe8d758a 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -13,6 +13,7 @@ obj-y += softing/
obj-$(CONFIG_CAN_SJA1000) += sja1000/
obj-$(CONFIG_CAN_MSCAN) += mscan/
+obj-$(CONFIG_CAN_C_CAN) += c_can/
obj-$(CONFIG_CAN_AT91) += at91_can.o
obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig
new file mode 100644
index 000000000000..ffb9773d102d
--- /dev/null
+++ b/drivers/net/can/c_can/Kconfig
@@ -0,0 +1,15 @@
+menuconfig CAN_C_CAN
+ tristate "Bosch C_CAN devices"
+ depends on CAN_DEV && HAS_IOMEM
+
+if CAN_C_CAN
+
+config CAN_C_CAN_PLATFORM
+ tristate "Generic Platform Bus based C_CAN driver"
+ ---help---
+ This driver adds support for the C_CAN chips connected to
+ the "platform bus" (Linux abstraction for directly to the
+ processor attached devices) which can be found on various
+ boards from ST Microelectronics (http://www.st.com)
+ like the SPEAr1310 and SPEAr320 evaluation boards.
+endif
diff --git a/drivers/net/can/c_can/Makefile b/drivers/net/can/c_can/Makefile
new file mode 100644
index 000000000000..9273f6d5c4b7
--- /dev/null
+++ b/drivers/net/can/c_can/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the Bosch C_CAN controller drivers.
+#
+
+obj-$(CONFIG_CAN_C_CAN) += c_can.o
+obj-$(CONFIG_CAN_C_CAN_PLATFORM) += c_can_platform.o
+
+ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
new file mode 100644
index 000000000000..14050786218a
--- /dev/null
+++ b/drivers/net/can/c_can/c_can.c
@@ -0,0 +1,1158 @@
+/*
+ * CAN bus driver for Bosch C_CAN controller
+ *
+ * Copyright (C) 2010 ST Microelectronics
+ * Bhupesh Sharma <bhupesh.sharma@st.com>
+ *
+ * Borrowed heavily from the C_CAN driver originally written by:
+ * Copyright (C) 2007
+ * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
+ * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
+ *
+ * TX and RX NAPI implementation has been borrowed from at91 CAN driver
+ * written by:
+ * Copyright
+ * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
+ * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
+ *
+ * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
+ * Bosch C_CAN user manual can be obtained from:
+ * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
+ * users_manual_c_can.pdf
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+#include "c_can.h"
+
+/* control register */
+#define CONTROL_TEST BIT(7)
+#define CONTROL_CCE BIT(6)
+#define CONTROL_DISABLE_AR BIT(5)
+#define CONTROL_ENABLE_AR (0 << 5)
+#define CONTROL_EIE BIT(3)
+#define CONTROL_SIE BIT(2)
+#define CONTROL_IE BIT(1)
+#define CONTROL_INIT BIT(0)
+
+/* test register */
+#define TEST_RX BIT(7)
+#define TEST_TX1 BIT(6)
+#define TEST_TX2 BIT(5)
+#define TEST_LBACK BIT(4)
+#define TEST_SILENT BIT(3)
+#define TEST_BASIC BIT(2)
+
+/* status register */
+#define STATUS_BOFF BIT(7)
+#define STATUS_EWARN BIT(6)
+#define STATUS_EPASS BIT(5)
+#define STATUS_RXOK BIT(4)
+#define STATUS_TXOK BIT(3)
+
+/* error counter register */
+#define ERR_CNT_TEC_MASK 0xff
+#define ERR_CNT_TEC_SHIFT 0
+#define ERR_CNT_REC_SHIFT 8
+#define ERR_CNT_REC_MASK (0x7f << ERR_CNT_REC_SHIFT)
+#define ERR_CNT_RP_SHIFT 15
+#define ERR_CNT_RP_MASK (0x1 << ERR_CNT_RP_SHIFT)
+
+/* bit-timing register */
+#define BTR_BRP_MASK 0x3f
+#define BTR_BRP_SHIFT 0
+#define BTR_SJW_SHIFT 6
+#define BTR_SJW_MASK (0x3 << BTR_SJW_SHIFT)
+#define BTR_TSEG1_SHIFT 8
+#define BTR_TSEG1_MASK (0xf << BTR_TSEG1_SHIFT)
+#define BTR_TSEG2_SHIFT 12
+#define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT)
+
+/* brp extension register */
+#define BRP_EXT_BRPE_MASK 0x0f
+#define BRP_EXT_BRPE_SHIFT 0
+
+/* IFx command request */
+#define IF_COMR_BUSY BIT(15)
+
+/* IFx command mask */
+#define IF_COMM_WR BIT(7)
+#define IF_COMM_MASK BIT(6)
+#define IF_COMM_ARB BIT(5)
+#define IF_COMM_CONTROL BIT(4)
+#define IF_COMM_CLR_INT_PND BIT(3)
+#define IF_COMM_TXRQST BIT(2)
+#define IF_COMM_DATAA BIT(1)
+#define IF_COMM_DATAB BIT(0)
+#define IF_COMM_ALL (IF_COMM_MASK | IF_COMM_ARB | \
+ IF_COMM_CONTROL | IF_COMM_TXRQST | \
+ IF_COMM_DATAA | IF_COMM_DATAB)
+
+/* IFx arbitration */
+#define IF_ARB_MSGVAL BIT(15)
+#define IF_ARB_MSGXTD BIT(14)
+#define IF_ARB_TRANSMIT BIT(13)
+
+/* IFx message control */
+#define IF_MCONT_NEWDAT BIT(15)
+#define IF_MCONT_MSGLST BIT(14)
+#define IF_MCONT_CLR_MSGLST (0 << 14)
+#define IF_MCONT_INTPND BIT(13)
+#define IF_MCONT_UMASK BIT(12)
+#define IF_MCONT_TXIE BIT(11)
+#define IF_MCONT_RXIE BIT(10)
+#define IF_MCONT_RMTEN BIT(9)
+#define IF_MCONT_TXRQST BIT(8)
+#define IF_MCONT_EOB BIT(7)
+#define IF_MCONT_DLC_MASK 0xf
+
+/*
+ * IFx register masks:
+ * allow easy operation on 16-bit registers when the
+ * argument is 32-bit instead
+ */
+#define IFX_WRITE_LOW_16BIT(x) ((x) & 0xFFFF)
+#define IFX_WRITE_HIGH_16BIT(x) (((x) & 0xFFFF0000) >> 16)
+
+/* message object split */
+#define C_CAN_NO_OF_OBJECTS 32
+#define C_CAN_MSG_OBJ_RX_NUM 16
+#define C_CAN_MSG_OBJ_TX_NUM 16
+
+#define C_CAN_MSG_OBJ_RX_FIRST 1
+#define C_CAN_MSG_OBJ_RX_LAST (C_CAN_MSG_OBJ_RX_FIRST + \
+ C_CAN_MSG_OBJ_RX_NUM - 1)
+
+#define C_CAN_MSG_OBJ_TX_FIRST (C_CAN_MSG_OBJ_RX_LAST + 1)
+#define C_CAN_MSG_OBJ_TX_LAST (C_CAN_MSG_OBJ_TX_FIRST + \
+ C_CAN_MSG_OBJ_TX_NUM - 1)
+
+#define C_CAN_MSG_OBJ_RX_SPLIT 9
+#define C_CAN_MSG_RX_LOW_LAST (C_CAN_MSG_OBJ_RX_SPLIT - 1)
+
+#define C_CAN_NEXT_MSG_OBJ_MASK (C_CAN_MSG_OBJ_TX_NUM - 1)
+#define RECEIVE_OBJECT_BITS 0x0000ffff
+
+/* status interrupt */
+#define STATUS_INTERRUPT 0x8000
+
+/* global interrupt masks */
+#define ENABLE_ALL_INTERRUPTS 1
+#define DISABLE_ALL_INTERRUPTS 0
+
+/* minimum timeout for checking BUSY status */
+#define MIN_TIMEOUT_VALUE 6
+
+/* napi related */
+#define C_CAN_NAPI_WEIGHT C_CAN_MSG_OBJ_RX_NUM
+
+/* c_can lec values */
+enum c_can_lec_type {
+ LEC_NO_ERROR = 0,
+ LEC_STUFF_ERROR,
+ LEC_FORM_ERROR,
+ LEC_ACK_ERROR,
+ LEC_BIT1_ERROR,
+ LEC_BIT0_ERROR,
+ LEC_CRC_ERROR,
+ LEC_UNUSED,
+};
+
+/*
+ * c_can error types:
+ * Bus errors (BUS_OFF, ERROR_WARNING, ERROR_PASSIVE) are supported
+ */
+enum c_can_bus_error_types {
+ C_CAN_NO_ERROR = 0,
+ C_CAN_BUS_OFF,
+ C_CAN_ERROR_WARNING,
+ C_CAN_ERROR_PASSIVE,
+};
+
+static struct can_bittiming_const c_can_bittiming_const = {
+ .name = KBUILD_MODNAME,
+ .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
+ .tseg1_max = 16,
+ .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 1024, /* 6-bit BRP field + 4-bit BRPE field*/
+ .brp_inc = 1,
+};
+
+static inline int get_tx_next_msg_obj(const struct c_can_priv *priv)
+{
+ return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) +
+ C_CAN_MSG_OBJ_TX_FIRST;
+}
+
+static inline int get_tx_echo_msg_obj(const struct c_can_priv *priv)
+{
+ return (priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) +
+ C_CAN_MSG_OBJ_TX_FIRST;
+}
+
+static u32 c_can_read_reg32(struct c_can_priv *priv, void *reg)
+{
+ u32 val = priv->read_reg(priv, reg);
+ val |= ((u32) priv->read_reg(priv, reg + 2)) << 16;
+ return val;
+}
+
+static void c_can_enable_all_interrupts(struct c_can_priv *priv,
+ int enable)
+{
+ unsigned int cntrl_save = priv->read_reg(priv,
+ &priv->regs->control);
+
+ if (enable)
+ cntrl_save |= (CONTROL_SIE | CONTROL_EIE | CONTROL_IE);
+ else
+ cntrl_save &= ~(CONTROL_EIE | CONTROL_IE | CONTROL_SIE);
+
+ priv->write_reg(priv, &priv->regs->control, cntrl_save);
+}
+
+static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface)
+{
+ int count = MIN_TIMEOUT_VALUE;
+
+ while (count && priv->read_reg(priv,
+ &priv->regs->ifregs[iface].com_req) &
+ IF_COMR_BUSY) {
+ count--;
+ udelay(1);
+ }
+
+ if (!count)
+ return 1;
+
+ return 0;
+}
+
+static inline void c_can_object_get(struct net_device *dev,
+ int iface, int objno, int mask)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ /*
+ * As per specs, after writting the message object number in the
+ * IF command request register the transfer b/w interface
+ * register and message RAM must be complete in 6 CAN-CLK
+ * period.
+ */
+ priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask,
+ IFX_WRITE_LOW_16BIT(mask));
+ priv->write_reg(priv, &priv->regs->ifregs[iface].com_req,
+ IFX_WRITE_LOW_16BIT(objno));
+
+ if (c_can_msg_obj_is_busy(priv, iface))
+ netdev_err(dev, "timed out in object get\n");
+}
+
+static inline void c_can_object_put(struct net_device *dev,
+ int iface, int objno, int mask)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ /*
+ * As per specs, after writting the message object number in the
+ * IF command request register the transfer b/w interface
+ * register and message RAM must be complete in 6 CAN-CLK
+ * period.
+ */
+ priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask,
+ (IF_COMM_WR | IFX_WRITE_LOW_16BIT(mask)));
+ priv->write_reg(priv, &priv->regs->ifregs[iface].com_req,
+ IFX_WRITE_LOW_16BIT(objno));
+
+ if (c_can_msg_obj_is_busy(priv, iface))
+ netdev_err(dev, "timed out in object put\n");
+}
+
+static void c_can_write_msg_object(struct net_device *dev,
+ int iface, struct can_frame *frame, int objno)
+{
+ int i;
+ u16 flags = 0;
+ unsigned int id;
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ if (!(frame->can_id & CAN_RTR_FLAG))
+ flags |= IF_ARB_TRANSMIT;
+
+ if (frame->can_id & CAN_EFF_FLAG) {
+ id = frame->can_id & CAN_EFF_MASK;
+ flags |= IF_ARB_MSGXTD;
+ } else
+ id = ((frame->can_id & CAN_SFF_MASK) << 18);
+
+ flags |= IF_ARB_MSGVAL;
+
+ priv->write_reg(priv, &priv->regs->ifregs[iface].arb1,
+ IFX_WRITE_LOW_16BIT(id));
+ priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, flags |
+ IFX_WRITE_HIGH_16BIT(id));
+
+ for (i = 0; i < frame->can_dlc; i += 2) {
+ priv->write_reg(priv, &priv->regs->ifregs[iface].data[i / 2],
+ frame->data[i] | (frame->data[i + 1] << 8));
+ }
+
+ /* enable interrupt for this message object */
+ priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+ IF_MCONT_TXIE | IF_MCONT_TXRQST | IF_MCONT_EOB |
+ frame->can_dlc);
+ c_can_object_put(dev, iface, objno, IF_COMM_ALL);
+}
+
+static inline void c_can_mark_rx_msg_obj(struct net_device *dev,
+ int iface, int ctrl_mask,
+ int obj)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+ ctrl_mask & ~(IF_MCONT_MSGLST | IF_MCONT_INTPND));
+ c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
+
+}
+
+static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
+ int iface,
+ int ctrl_mask)
+{
+ int i;
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) {
+ priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+ ctrl_mask & ~(IF_MCONT_MSGLST |
+ IF_MCONT_INTPND | IF_MCONT_NEWDAT));
+ c_can_object_put(dev, iface, i, IF_COMM_CONTROL);
+ }
+}
+
+static inline void c_can_activate_rx_msg_obj(struct net_device *dev,
+ int iface, int ctrl_mask,
+ int obj)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+ ctrl_mask & ~(IF_MCONT_MSGLST |
+ IF_MCONT_INTPND | IF_MCONT_NEWDAT));
+ c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
+}
+
+static void c_can_handle_lost_msg_obj(struct net_device *dev,
+ int iface, int objno)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct sk_buff *skb;
+ struct can_frame *frame;
+
+ netdev_err(dev, "msg lost in buffer %d\n", objno);
+
+ c_can_object_get(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
+
+ priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+ IF_MCONT_CLR_MSGLST);
+
+ c_can_object_put(dev, 0, objno, IF_COMM_CONTROL);
+
+ /* create an error msg */
+ skb = alloc_can_err_skb(dev, &frame);
+ if (unlikely(!skb))
+ return;
+
+ frame->can_id |= CAN_ERR_CRTL;
+ frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ stats->rx_errors++;
+ stats->rx_over_errors++;
+
+ netif_receive_skb(skb);
+}
+
+static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
+{
+ u16 flags, data;
+ int i;
+ unsigned int val;
+ struct c_can_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct sk_buff *skb;
+ struct can_frame *frame;
+
+ skb = alloc_can_skb(dev, &frame);
+ if (!skb) {
+ stats->rx_dropped++;
+ return -ENOMEM;
+ }
+
+ frame->can_dlc = get_can_dlc(ctrl & 0x0F);
+
+ flags = priv->read_reg(priv, &priv->regs->ifregs[iface].arb2);
+ val = priv->read_reg(priv, &priv->regs->ifregs[iface].arb1) |
+ (flags << 16);
+
+ if (flags & IF_ARB_MSGXTD)
+ frame->can_id = (val & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ else
+ frame->can_id = (val >> 18) & CAN_SFF_MASK;
+
+ if (flags & IF_ARB_TRANSMIT)
+ frame->can_id |= CAN_RTR_FLAG;
+ else {
+ for (i = 0; i < frame->can_dlc; i += 2) {
+ data = priv->read_reg(priv,
+ &priv->regs->ifregs[iface].data[i / 2]);
+ frame->data[i] = data;
+ frame->data[i + 1] = data >> 8;
+ }
+ }
+
+ netif_receive_skb(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += frame->can_dlc;
+
+ return 0;
+}
+
+static void c_can_setup_receive_object(struct net_device *dev, int iface,
+ int objno, unsigned int mask,
+ unsigned int id, unsigned int mcont)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ priv->write_reg(priv, &priv->regs->ifregs[iface].mask1,
+ IFX_WRITE_LOW_16BIT(mask));
+ priv->write_reg(priv, &priv->regs->ifregs[iface].mask2,
+ IFX_WRITE_HIGH_16BIT(mask));
+
+ priv->write_reg(priv, &priv->regs->ifregs[iface].arb1,
+ IFX_WRITE_LOW_16BIT(id));
+ priv->write_reg(priv, &priv->regs->ifregs[iface].arb2,
+ (IF_ARB_MSGVAL | IFX_WRITE_HIGH_16BIT(id)));
+
+ priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, mcont);
+ c_can_object_put(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
+
+ netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
+ c_can_read_reg32(priv, &priv->regs->msgval1));
+}
+
+static void c_can_inval_msg_object(struct net_device *dev, int iface, int objno)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ priv->write_reg(priv, &priv->regs->ifregs[iface].arb1, 0);
+ priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, 0);
+ priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, 0);
+
+ c_can_object_put(dev, iface, objno, IF_COMM_ARB | IF_COMM_CONTROL);
+
+ netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
+ c_can_read_reg32(priv, &priv->regs->msgval1));
+}
+
+static inline int c_can_is_next_tx_obj_busy(struct c_can_priv *priv, int objno)
+{
+ int val = c_can_read_reg32(priv, &priv->regs->txrqst1);
+
+ /*
+ * as transmission request register's bit n-1 corresponds to
+ * message object n, we need to handle the same properly.
+ */
+ if (val & (1 << (objno - 1)))
+ return 1;
+
+ return 0;
+}
+
+static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ u32 msg_obj_no;
+ struct c_can_priv *priv = netdev_priv(dev);
+ struct can_frame *frame = (struct can_frame *)skb->data;
+
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
+ msg_obj_no = get_tx_next_msg_obj(priv);
+
+ /* prepare message object for transmission */
+ c_can_write_msg_object(dev, 0, frame, msg_obj_no);
+ can_put_echo_skb(skb, dev, msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
+
+ /*
+ * we have to stop the queue in case of a wrap around or
+ * if the next TX message object is still in use
+ */
+ priv->tx_next++;
+ if (c_can_is_next_tx_obj_busy(priv, get_tx_next_msg_obj(priv)) ||
+ (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) == 0)
+ netif_stop_queue(dev);
+
+ return NETDEV_TX_OK;
+}
+
+static int c_can_set_bittiming(struct net_device *dev)
+{
+ unsigned int reg_btr, reg_brpe, ctrl_save;
+ u8 brp, brpe, sjw, tseg1, tseg2;
+ u32 ten_bit_brp;
+ struct c_can_priv *priv = netdev_priv(dev);
+ const struct can_bittiming *bt = &priv->can.bittiming;
+
+ /* c_can provides a 6-bit brp and 4-bit brpe fields */
+ ten_bit_brp = bt->brp - 1;
+ brp = ten_bit_brp & BTR_BRP_MASK;
+ brpe = ten_bit_brp >> 6;
+
+ sjw = bt->sjw - 1;
+ tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
+ tseg2 = bt->phase_seg2 - 1;
+ reg_btr = brp | (sjw << BTR_SJW_SHIFT) | (tseg1 << BTR_TSEG1_SHIFT) |
+ (tseg2 << BTR_TSEG2_SHIFT);
+ reg_brpe = brpe & BRP_EXT_BRPE_MASK;
+
+ netdev_info(dev,
+ "setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe);
+
+ ctrl_save = priv->read_reg(priv, &priv->regs->control);
+ priv->write_reg(priv, &priv->regs->control,
+ ctrl_save | CONTROL_CCE | CONTROL_INIT);
+ priv->write_reg(priv, &priv->regs->btr, reg_btr);
+ priv->write_reg(priv, &priv->regs->brp_ext, reg_brpe);
+ priv->write_reg(priv, &priv->regs->control, ctrl_save);
+
+ return 0;
+}
+
+/*
+ * Configure C_CAN message objects for Tx and Rx purposes:
+ * C_CAN provides a total of 32 message objects that can be configured
+ * either for Tx or Rx purposes. Here the first 16 message objects are used as
+ * a reception FIFO. The end of reception FIFO is signified by the EoB bit
+ * being SET. The remaining 16 message objects are kept aside for Tx purposes.
+ * See user guide document for further details on configuring message
+ * objects.
+ */
+static void c_can_configure_msg_objects(struct net_device *dev)
+{
+ int i;
+
+ /* first invalidate all message objects */
+ for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_NO_OF_OBJECTS; i++)
+ c_can_inval_msg_object(dev, 0, i);
+
+ /* setup receive message objects */
+ for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++)
+ c_can_setup_receive_object(dev, 0, i, 0, 0,
+ (IF_MCONT_RXIE | IF_MCONT_UMASK) & ~IF_MCONT_EOB);
+
+ c_can_setup_receive_object(dev, 0, C_CAN_MSG_OBJ_RX_LAST, 0, 0,
+ IF_MCONT_EOB | IF_MCONT_RXIE | IF_MCONT_UMASK);
+}
+
+/*
+ * Configure C_CAN chip:
+ * - enable/disable auto-retransmission
+ * - set operating mode
+ * - configure message objects
+ */
+static void c_can_chip_config(struct net_device *dev)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ /* disable automatic retransmission */
+ priv->write_reg(priv, &priv->regs->control,
+ CONTROL_DISABLE_AR);
+ else
+ /* enable automatic retransmission */
+ priv->write_reg(priv, &priv->regs->control,
+ CONTROL_ENABLE_AR);
+
+ if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY &
+ CAN_CTRLMODE_LOOPBACK)) {
+ /* loopback + silent mode : useful for hot self-test */
+ priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
+ CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
+ priv->write_reg(priv, &priv->regs->test,
+ TEST_LBACK | TEST_SILENT);
+ } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
+ /* loopback mode : useful for self-test function */
+ priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
+ CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
+ priv->write_reg(priv, &priv->regs->test, TEST_LBACK);
+ } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
+ /* silent mode : bus-monitoring mode */
+ priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
+ CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
+ priv->write_reg(priv, &priv->regs->test, TEST_SILENT);
+ } else
+ /* normal mode*/
+ priv->write_reg(priv, &priv->regs->control,
+ CONTROL_EIE | CONTROL_SIE | CONTROL_IE);
+
+ /* configure message objects */
+ c_can_configure_msg_objects(dev);
+
+ /* set a `lec` value so that we can check for updates later */
+ priv->write_reg(priv, &priv->regs->status, LEC_UNUSED);
+
+ /* set bittiming params */
+ c_can_set_bittiming(dev);
+}
+
+static void c_can_start(struct net_device *dev)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ /* enable status change, error and module interrupts */
+ c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
+
+ /* basic c_can configuration */
+ c_can_chip_config(dev);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ /* reset tx helper pointers */
+ priv->tx_next = priv->tx_echo = 0;
+}
+
+static void c_can_stop(struct net_device *dev)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ /* disable all interrupts */
+ c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
+
+ /* set the state as STOPPED */
+ priv->can.state = CAN_STATE_STOPPED;
+}
+
+static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
+{
+ switch (mode) {
+ case CAN_MODE_START:
+ c_can_start(dev);
+ netif_wake_queue(dev);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int c_can_get_berr_counter(const struct net_device *dev,
+ struct can_berr_counter *bec)
+{
+ unsigned int reg_err_counter;
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt);
+ bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
+ ERR_CNT_REC_SHIFT;
+ bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
+
+ return 0;
+}
+
+/*
+ * theory of operation:
+ *
+ * priv->tx_echo holds the number of the oldest can_frame put for
+ * transmission into the hardware, but not yet ACKed by the CAN tx
+ * complete IRQ.
+ *
+ * We iterate from priv->tx_echo to priv->tx_next and check if the
+ * packet has been transmitted, echo it back to the CAN framework.
+ * If we discover a not yet transmitted package, stop looking for more.
+ */
+static void c_can_do_tx(struct net_device *dev)
+{
+ u32 val;
+ u32 msg_obj_no;
+ struct c_can_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+
+ for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
+ msg_obj_no = get_tx_echo_msg_obj(priv);
+ c_can_inval_msg_object(dev, 0, msg_obj_no);
+ val = c_can_read_reg32(priv, &priv->regs->txrqst1);
+ if (!(val & (1 << msg_obj_no))) {
+ can_get_echo_skb(dev,
+ msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
+ stats->tx_bytes += priv->read_reg(priv,
+ &priv->regs->ifregs[0].msg_cntrl)
+ & IF_MCONT_DLC_MASK;
+ stats->tx_packets++;
+ }
+ }
+
+ /* restart queue if wrap-up or if queue stalled on last pkt */
+ if (((priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) != 0) ||
+ ((priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) == 0))
+ netif_wake_queue(dev);
+}
+
+/*
+ * theory of operation:
+ *
+ * c_can core saves a received CAN message into the first free message
+ * object it finds free (starting with the lowest). Bits NEWDAT and
+ * INTPND are set for this message object indicating that a new message
+ * has arrived. To work-around this issue, we keep two groups of message
+ * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
+ *
+ * To ensure in-order frame reception we use the following
+ * approach while re-activating a message object to receive further
+ * frames:
+ * - if the current message object number is lower than
+ * C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing
+ * the INTPND bit.
+ * - if the current message object number is equal to
+ * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower
+ * receive message objects.
+ * - if the current message object number is greater than
+ * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
+ * only this message object.
+ */
+static int c_can_do_rx_poll(struct net_device *dev, int quota)
+{
+ u32 num_rx_pkts = 0;
+ unsigned int msg_obj, msg_ctrl_save;
+ struct c_can_priv *priv = netdev_priv(dev);
+ u32 val = c_can_read_reg32(priv, &priv->regs->intpnd1);
+
+ for (msg_obj = C_CAN_MSG_OBJ_RX_FIRST;
+ msg_obj <= C_CAN_MSG_OBJ_RX_LAST && quota > 0;
+ val = c_can_read_reg32(priv, &priv->regs->intpnd1),
+ msg_obj++) {
+ /*
+ * as interrupt pending register's bit n-1 corresponds to
+ * message object n, we need to handle the same properly.
+ */
+ if (val & (1 << (msg_obj - 1))) {
+ c_can_object_get(dev, 0, msg_obj, IF_COMM_ALL &
+ ~IF_COMM_TXRQST);
+ msg_ctrl_save = priv->read_reg(priv,
+ &priv->regs->ifregs[0].msg_cntrl);
+
+ if (msg_ctrl_save & IF_MCONT_EOB)
+ return num_rx_pkts;
+
+ if (msg_ctrl_save & IF_MCONT_MSGLST) {
+ c_can_handle_lost_msg_obj(dev, 0, msg_obj);
+ num_rx_pkts++;
+ quota--;
+ continue;
+ }
+
+ if (!(msg_ctrl_save & IF_MCONT_NEWDAT))
+ continue;
+
+ /* read the data from the message object */
+ c_can_read_msg_object(dev, 0, msg_ctrl_save);
+
+ if (msg_obj < C_CAN_MSG_RX_LOW_LAST)
+ c_can_mark_rx_msg_obj(dev, 0,
+ msg_ctrl_save, msg_obj);
+ else if (msg_obj > C_CAN_MSG_RX_LOW_LAST)
+ /* activate this msg obj */
+ c_can_activate_rx_msg_obj(dev, 0,
+ msg_ctrl_save, msg_obj);
+ else if (msg_obj == C_CAN_MSG_RX_LOW_LAST)
+ /* activate all lower message objects */
+ c_can_activate_all_lower_rx_msg_obj(dev,
+ 0, msg_ctrl_save);
+
+ num_rx_pkts++;
+ quota--;
+ }
+ }
+
+ return num_rx_pkts;
+}
+
+static inline int c_can_has_and_handle_berr(struct c_can_priv *priv)
+{
+ return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
+ (priv->current_status & LEC_UNUSED);
+}
+
+static int c_can_handle_state_change(struct net_device *dev,
+ enum c_can_bus_error_types error_type)
+{
+ unsigned int reg_err_counter;
+ unsigned int rx_err_passive;
+ struct c_can_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct can_berr_counter bec;
+
+ /* propogate the error condition to the CAN stack */
+ skb = alloc_can_err_skb(dev, &cf);
+ if (unlikely(!skb))
+ return 0;
+
+ c_can_get_berr_counter(dev, &bec);
+ reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt);
+ rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >>
+ ERR_CNT_RP_SHIFT;
+
+ switch (error_type) {
+ case C_CAN_ERROR_WARNING:
+ /* error warning state */
+ priv->can.can_stats.error_warning++;
+ priv->can.state = CAN_STATE_ERROR_WARNING;
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = (bec.txerr > bec.rxerr) ?
+ CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+
+ break;
+ case C_CAN_ERROR_PASSIVE:
+ /* error passive state */
+ priv->can.can_stats.error_passive++;
+ priv->can.state = CAN_STATE_ERROR_PASSIVE;
+ cf->can_id |= CAN_ERR_CRTL;
+ if (rx_err_passive)
+ cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
+ if (bec.txerr > 127)
+ cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
+
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ break;
+ case C_CAN_BUS_OFF:
+ /* bus-off state */
+ priv->can.state = CAN_STATE_BUS_OFF;
+ cf->can_id |= CAN_ERR_BUSOFF;
+ /*
+ * disable all interrupts in bus-off mode to ensure that
+ * the CPU is not hogged down
+ */
+ c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
+ can_bus_off(dev);
+ break;
+ default:
+ break;
+ }
+
+ netif_receive_skb(skb);
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
+ return 1;
+}
+
+static int c_can_handle_bus_err(struct net_device *dev,
+ enum c_can_lec_type lec_type)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ /*
+ * early exit if no lec update or no error.
+ * no lec update means that no CAN bus event has been detected
+ * since CPU wrote 0x7 value to status reg.
+ */
+ if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR)
+ return 0;
+
+ /* propogate the error condition to the CAN stack */
+ skb = alloc_can_err_skb(dev, &cf);
+ if (unlikely(!skb))
+ return 0;
+
+ /*
+ * check for 'last error code' which tells us the
+ * type of the last error to occur on the CAN bus
+ */
+
+ /* common for all type of bus errors */
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ cf->data[2] |= CAN_ERR_PROT_UNSPEC;
+
+ switch (lec_type) {
+ case LEC_STUFF_ERROR:
+ netdev_dbg(dev, "stuff error\n");
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+ case LEC_FORM_ERROR:
+ netdev_dbg(dev, "form error\n");
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+ case LEC_ACK_ERROR:
+ netdev_dbg(dev, "ack error\n");
+ cf->data[2] |= (CAN_ERR_PROT_LOC_ACK |
+ CAN_ERR_PROT_LOC_ACK_DEL);
+ break;
+ case LEC_BIT1_ERROR:
+ netdev_dbg(dev, "bit1 error\n");
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+ break;
+ case LEC_BIT0_ERROR:
+ netdev_dbg(dev, "bit0 error\n");
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+ break;
+ case LEC_CRC_ERROR:
+ netdev_dbg(dev, "CRC error\n");
+ cf->data[2] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
+ CAN_ERR_PROT_LOC_CRC_DEL);
+ break;
+ default:
+ break;
+ }
+
+ /* set a `lec` value so that we can check for updates later */
+ priv->write_reg(priv, &priv->regs->status, LEC_UNUSED);
+
+ netif_receive_skb(skb);
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
+ return 1;
+}
+
+static int c_can_poll(struct napi_struct *napi, int quota)
+{
+ u16 irqstatus;
+ int lec_type = 0;
+ int work_done = 0;
+ struct net_device *dev = napi->dev;
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
+ if (!irqstatus)
+ goto end;
+
+ /* status events have the highest priority */
+ if (irqstatus == STATUS_INTERRUPT) {
+ priv->current_status = priv->read_reg(priv,
+ &priv->regs->status);
+
+ /* handle Tx/Rx events */
+ if (priv->current_status & STATUS_TXOK)
+ priv->write_reg(priv, &priv->regs->status,
+ priv->current_status & ~STATUS_TXOK);
+
+ if (priv->current_status & STATUS_RXOK)
+ priv->write_reg(priv, &priv->regs->status,
+ priv->current_status & ~STATUS_RXOK);
+
+ /* handle state changes */
+ if ((priv->current_status & STATUS_EWARN) &&
+ (!(priv->last_status & STATUS_EWARN))) {
+ netdev_dbg(dev, "entered error warning state\n");
+ work_done += c_can_handle_state_change(dev,
+ C_CAN_ERROR_WARNING);
+ }
+ if ((priv->current_status & STATUS_EPASS) &&
+ (!(priv->last_status & STATUS_EPASS))) {
+ netdev_dbg(dev, "entered error passive state\n");
+ work_done += c_can_handle_state_change(dev,
+ C_CAN_ERROR_PASSIVE);
+ }
+ if ((priv->current_status & STATUS_BOFF) &&
+ (!(priv->last_status & STATUS_BOFF))) {
+ netdev_dbg(dev, "entered bus off state\n");
+ work_done += c_can_handle_state_change(dev,
+ C_CAN_BUS_OFF);
+ }
+
+ /* handle bus recovery events */
+ if ((!(priv->current_status & STATUS_BOFF)) &&
+ (priv->last_status & STATUS_BOFF)) {
+ netdev_dbg(dev, "left bus off state\n");
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ }
+ if ((!(priv->current_status & STATUS_EPASS)) &&
+ (priv->last_status & STATUS_EPASS)) {
+ netdev_dbg(dev, "left error passive state\n");
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ }
+
+ priv->last_status = priv->current_status;
+
+ /* handle lec errors on the bus */
+ lec_type = c_can_has_and_handle_berr(priv);
+ if (lec_type)
+ work_done += c_can_handle_bus_err(dev, lec_type);
+ } else if ((irqstatus >= C_CAN_MSG_OBJ_RX_FIRST) &&
+ (irqstatus <= C_CAN_MSG_OBJ_RX_LAST)) {
+ /* handle events corresponding to receive message objects */
+ work_done += c_can_do_rx_poll(dev, (quota - work_done));
+ } else if ((irqstatus >= C_CAN_MSG_OBJ_TX_FIRST) &&
+ (irqstatus <= C_CAN_MSG_OBJ_TX_LAST)) {
+ /* handle events corresponding to transmit message objects */
+ c_can_do_tx(dev);
+ }
+
+end:
+ if (work_done < quota) {
+ napi_complete(napi);
+ /* enable all IRQs */
+ c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
+ }
+
+ return work_done;
+}
+
+static irqreturn_t c_can_isr(int irq, void *dev_id)
+{
+ u16 irqstatus;
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
+ if (!irqstatus)
+ return IRQ_NONE;
+
+ /* disable all interrupts and schedule the NAPI */
+ c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
+ napi_schedule(&priv->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int c_can_open(struct net_device *dev)
+{
+ int err;
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ /* open the can device */
+ err = open_candev(dev);
+ if (err) {
+ netdev_err(dev, "failed to open can device\n");
+ return err;
+ }
+
+ /* register interrupt handler */
+ err = request_irq(dev->irq, &c_can_isr, IRQF_SHARED, dev->name,
+ dev);
+ if (err < 0) {
+ netdev_err(dev, "failed to request interrupt\n");
+ goto exit_irq_fail;
+ }
+
+ /* start the c_can controller */
+ c_can_start(dev);
+
+ napi_enable(&priv->napi);
+ netif_start_queue(dev);
+
+ return 0;
+
+exit_irq_fail:
+ close_candev(dev);
+ return err;
+}
+
+static int c_can_close(struct net_device *dev)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ napi_disable(&priv->napi);
+ c_can_stop(dev);
+ free_irq(dev->irq, dev);
+ close_candev(dev);
+
+ return 0;
+}
+
+struct net_device *alloc_c_can_dev(void)
+{
+ struct net_device *dev;
+ struct c_can_priv *priv;
+
+ dev = alloc_candev(sizeof(struct c_can_priv), C_CAN_MSG_OBJ_TX_NUM);
+ if (!dev)
+ return NULL;
+
+ priv = netdev_priv(dev);
+ netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT);
+
+ priv->dev = dev;
+ priv->can.bittiming_const = &c_can_bittiming_const;
+ priv->can.do_set_mode = c_can_set_mode;
+ priv->can.do_get_berr_counter = c_can_get_berr_counter;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_ONE_SHOT |
+ CAN_CTRLMODE_LOOPBACK |
+ CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_BERR_REPORTING;
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(alloc_c_can_dev);
+
+void free_c_can_dev(struct net_device *dev)
+{
+ free_candev(dev);
+}
+EXPORT_SYMBOL_GPL(free_c_can_dev);
+
+static const struct net_device_ops c_can_netdev_ops = {
+ .ndo_open = c_can_open,
+ .ndo_stop = c_can_close,
+ .ndo_start_xmit = c_can_start_xmit,
+};
+
+int register_c_can_dev(struct net_device *dev)
+{
+ dev->flags |= IFF_ECHO; /* we support local echo */
+ dev->netdev_ops = &c_can_netdev_ops;
+
+ return register_candev(dev);
+}
+EXPORT_SYMBOL_GPL(register_c_can_dev);
+
+void unregister_c_can_dev(struct net_device *dev)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ /* disable all interrupts */
+ c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
+
+ unregister_candev(dev);
+}
+EXPORT_SYMBOL_GPL(unregister_c_can_dev);
+
+MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CAN bus driver for Bosch C_CAN controller");
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
new file mode 100644
index 000000000000..9b7fbef3d09a
--- /dev/null
+++ b/drivers/net/can/c_can/c_can.h
@@ -0,0 +1,86 @@
+/*
+ * CAN bus driver for Bosch C_CAN controller
+ *
+ * Copyright (C) 2010 ST Microelectronics
+ * Bhupesh Sharma <bhupesh.sharma@st.com>
+ *
+ * Borrowed heavily from the C_CAN driver originally written by:
+ * Copyright (C) 2007
+ * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
+ * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
+ *
+ * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
+ * Bosch C_CAN user manual can be obtained from:
+ * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
+ * users_manual_c_can.pdf
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef C_CAN_H
+#define C_CAN_H
+
+/* c_can IF registers */
+struct c_can_if_regs {
+ u16 com_req;
+ u16 com_mask;
+ u16 mask1;
+ u16 mask2;
+ u16 arb1;
+ u16 arb2;
+ u16 msg_cntrl;
+ u16 data[4];
+ u16 _reserved[13];
+};
+
+/* c_can hardware registers */
+struct c_can_regs {
+ u16 control;
+ u16 status;
+ u16 err_cnt;
+ u16 btr;
+ u16 interrupt;
+ u16 test;
+ u16 brp_ext;
+ u16 _reserved1;
+ struct c_can_if_regs ifregs[2]; /* [0] = IF1 and [1] = IF2 */
+ u16 _reserved2[8];
+ u16 txrqst1;
+ u16 txrqst2;
+ u16 _reserved3[6];
+ u16 newdat1;
+ u16 newdat2;
+ u16 _reserved4[6];
+ u16 intpnd1;
+ u16 intpnd2;
+ u16 _reserved5[6];
+ u16 msgval1;
+ u16 msgval2;
+ u16 _reserved6[6];
+};
+
+/* c_can private data structure */
+struct c_can_priv {
+ struct can_priv can; /* must be the first member */
+ struct napi_struct napi;
+ struct net_device *dev;
+ int tx_object;
+ int current_status;
+ int last_status;
+ u16 (*read_reg) (struct c_can_priv *priv, void *reg);
+ void (*write_reg) (struct c_can_priv *priv, void *reg, u16 val);
+ struct c_can_regs __iomem *regs;
+ unsigned long irq_flags; /* for request_irq() */
+ unsigned int tx_next;
+ unsigned int tx_echo;
+ void *priv; /* for board-specific data */
+};
+
+struct net_device *alloc_c_can_dev(void);
+void free_c_can_dev(struct net_device *dev);
+int register_c_can_dev(struct net_device *dev);
+void unregister_c_can_dev(struct net_device *dev);
+
+#endif /* C_CAN_H */
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
new file mode 100644
index 000000000000..e629b961ae2d
--- /dev/null
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -0,0 +1,215 @@
+/*
+ * Platform CAN bus driver for Bosch C_CAN controller
+ *
+ * Copyright (C) 2010 ST Microelectronics
+ * Bhupesh Sharma <bhupesh.sharma@st.com>
+ *
+ * Borrowed heavily from the C_CAN driver originally written by:
+ * Copyright (C) 2007
+ * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
+ * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
+ *
+ * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
+ * Bosch C_CAN user manual can be obtained from:
+ * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
+ * users_manual_c_can.pdf
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#include <linux/can/dev.h>
+
+#include "c_can.h"
+
+/*
+ * 16-bit c_can registers can be arranged differently in the memory
+ * architecture of different implementations. For example: 16-bit
+ * registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
+ * Handle the same by providing a common read/write interface.
+ */
+static u16 c_can_plat_read_reg_aligned_to_16bit(struct c_can_priv *priv,
+ void *reg)
+{
+ return readw(reg);
+}
+
+static void c_can_plat_write_reg_aligned_to_16bit(struct c_can_priv *priv,
+ void *reg, u16 val)
+{
+ writew(val, reg);
+}
+
+static u16 c_can_plat_read_reg_aligned_to_32bit(struct c_can_priv *priv,
+ void *reg)
+{
+ return readw(reg + (long)reg - (long)priv->regs);
+}
+
+static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv,
+ void *reg, u16 val)
+{
+ writew(val, reg + (long)reg - (long)priv->regs);
+}
+
+static int __devinit c_can_plat_probe(struct platform_device *pdev)
+{
+ int ret;
+ void __iomem *addr;
+ struct net_device *dev;
+ struct c_can_priv *priv;
+ struct resource *mem, *irq;
+#ifdef CONFIG_HAVE_CLK
+ struct clk *clk;
+
+ /* get the appropriate clk */
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "no clock defined\n");
+ ret = -ENODEV;
+ goto exit;
+ }
+#endif
+
+ /* get the platform data */
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!mem || (irq <= 0)) {
+ ret = -ENODEV;
+ goto exit_free_clk;
+ }
+
+ if (!request_mem_region(mem->start, resource_size(mem),
+ KBUILD_MODNAME)) {
+ dev_err(&pdev->dev, "resource unavailable\n");
+ ret = -ENODEV;
+ goto exit_free_clk;
+ }
+
+ addr = ioremap(mem->start, resource_size(mem));
+ if (!addr) {
+ dev_err(&pdev->dev, "failed to map can port\n");
+ ret = -ENOMEM;
+ goto exit_release_mem;
+ }
+
+ /* allocate the c_can device */
+ dev = alloc_c_can_dev();
+ if (!dev) {
+ ret = -ENOMEM;
+ goto exit_iounmap;
+ }
+
+ priv = netdev_priv(dev);
+
+ dev->irq = irq->start;
+ priv->regs = addr;
+#ifdef CONFIG_HAVE_CLK
+ priv->can.clock.freq = clk_get_rate(clk);
+ priv->priv = clk;
+#endif
+
+ switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) {
+ case IORESOURCE_MEM_32BIT:
+ priv->read_reg = c_can_plat_read_reg_aligned_to_32bit;
+ priv->write_reg = c_can_plat_write_reg_aligned_to_32bit;
+ break;
+ case IORESOURCE_MEM_16BIT:
+ default:
+ priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
+ priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
+ break;
+ }
+
+ platform_set_drvdata(pdev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ ret = register_c_can_dev(dev);
+ if (ret) {
+ dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
+ KBUILD_MODNAME, ret);
+ goto exit_free_device;
+ }
+
+ dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
+ KBUILD_MODNAME, priv->regs, dev->irq);
+ return 0;
+
+exit_free_device:
+ platform_set_drvdata(pdev, NULL);
+ free_c_can_dev(dev);
+exit_iounmap:
+ iounmap(addr);
+exit_release_mem:
+ release_mem_region(mem->start, resource_size(mem));
+exit_free_clk:
+#ifdef CONFIG_HAVE_CLK
+ clk_put(clk);
+exit:
+#endif
+ dev_err(&pdev->dev, "probe failed\n");
+
+ return ret;
+}
+
+static int __devexit c_can_plat_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct c_can_priv *priv = netdev_priv(dev);
+ struct resource *mem;
+
+ unregister_c_can_dev(dev);
+ platform_set_drvdata(pdev, NULL);
+
+ free_c_can_dev(dev);
+ iounmap(priv->regs);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mem->start, resource_size(mem));
+
+#ifdef CONFIG_HAVE_CLK
+ clk_put(priv->priv);
+#endif
+
+ return 0;
+}
+
+static struct platform_driver c_can_plat_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = c_can_plat_probe,
+ .remove = __devexit_p(c_can_plat_remove),
+};
+
+static int __init c_can_plat_init(void)
+{
+ return platform_driver_register(&c_can_plat_driver);
+}
+module_init(c_can_plat_init);
+
+static void __exit c_can_plat_exit(void)
+{
+ platform_driver_unregister(&c_can_plat_driver);
+}
+module_exit(c_can_plat_exit);
+
+MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Platform CAN bus driver for Bosch C_CAN controller");
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 366f5cc050ae..102b16c6cc97 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -15,6 +15,7 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
#include <linux/netdevice.h>
#include <linux/can.h>
@@ -1643,7 +1644,7 @@ static int __devinit ican3_probe(struct platform_device *pdev)
struct device *dev;
int ret;
- pdata = pdev->dev.platform_data;
+ pdata = mfd_get_data(pdev);
if (!pdata)
return -ENXIO;
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 312b9c8f4f3b..c0a1bc5b1435 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -247,10 +247,9 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
}
#endif /* CONFIG_PPC_MPC512x */
-static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev,
- const struct of_device_id *id)
+static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev)
{
- struct mpc5xxx_can_data *data = (struct mpc5xxx_can_data *)id->data;
+ struct mpc5xxx_can_data *data;
struct device_node *np = ofdev->dev.of_node;
struct net_device *dev;
struct mscan_priv *priv;
@@ -259,6 +258,10 @@ static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev,
int irq, mscan_clksrc = 0;
int err = -ENOMEM;
+ if (!ofdev->dev.of_match)
+ return -EINVAL;
+ data = (struct mpc5xxx_can_data *)of_dev->dev.of_match->data;
+
base = of_iomap(np, 0);
if (!base) {
dev_err(&ofdev->dev, "couldn't ioremap\n");
@@ -391,7 +394,7 @@ static struct of_device_id __devinitdata mpc5xxx_can_table[] = {
{},
};
-static struct of_platform_driver mpc5xxx_can_driver = {
+static struct platform_driver mpc5xxx_can_driver = {
.driver = {
.name = "mpc5xxx_can",
.owner = THIS_MODULE,
@@ -407,13 +410,13 @@ static struct of_platform_driver mpc5xxx_can_driver = {
static int __init mpc5xxx_can_init(void)
{
- return of_register_platform_driver(&mpc5xxx_can_driver);
+ return platform_driver_register(&mpc5xxx_can_driver);
}
module_init(mpc5xxx_can_init);
static void __exit mpc5xxx_can_exit(void)
{
- return of_unregister_platform_driver(&mpc5xxx_can_driver);
+ platform_driver_unregister(&mpc5xxx_can_driver);
};
module_exit(mpc5xxx_can_exit);
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 09c3e9db9316..9793df6e3455 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -87,8 +87,7 @@ static int __devexit sja1000_ofp_remove(struct platform_device *ofdev)
return 0;
}
-static int __devinit sja1000_ofp_probe(struct platform_device *ofdev,
- const struct of_device_id *id)
+static int __devinit sja1000_ofp_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct net_device *dev;
@@ -210,7 +209,7 @@ static struct of_device_id __devinitdata sja1000_ofp_table[] = {
};
MODULE_DEVICE_TABLE(of, sja1000_ofp_table);
-static struct of_platform_driver sja1000_ofp_driver = {
+static struct platform_driver sja1000_ofp_driver = {
.driver = {
.owner = THIS_MODULE,
.name = DRV_NAME,
@@ -222,12 +221,12 @@ static struct of_platform_driver sja1000_ofp_driver = {
static int __init sja1000_ofp_init(void)
{
- return of_register_platform_driver(&sja1000_ofp_driver);
+ return platform_driver_register(&sja1000_ofp_driver);
}
module_init(sja1000_ofp_init);
static void __exit sja1000_ofp_exit(void)
{
- return of_unregister_platform_driver(&sja1000_ofp_driver);
+ return platform_driver_unregister(&sja1000_ofp_driver);
};
module_exit(sja1000_ofp_exit);
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 5157e15e96eb..aeea9f9ff6e8 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -633,6 +633,7 @@ static const struct net_device_ops softing_netdev_ops = {
};
static const struct can_bittiming_const softing_btr_const = {
+ .name = "softing",
.tseg1_min = 1,
.tseg1_max = 16,
.tseg2_min = 1,
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 7ff170cbc7dc..9fc6c587442a 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -65,7 +65,14 @@ static LIST_HEAD(cnic_udev_list);
static DEFINE_RWLOCK(cnic_dev_lock);
static DEFINE_MUTEX(cnic_lock);
-static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
+static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
+
+/* helper function, assuming cnic_lock is held */
+static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
+{
+ return rcu_dereference_protected(cnic_ulp_tbl[type],
+ lockdep_is_held(&cnic_lock));
+}
static int cnic_service_bnx2(void *, void *);
static int cnic_service_bnx2x(void *, void *);
@@ -435,7 +442,7 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
return -EINVAL;
}
mutex_lock(&cnic_lock);
- if (cnic_ulp_tbl[ulp_type]) {
+ if (cnic_ulp_tbl_prot(ulp_type)) {
pr_err("%s: Type %d has already been registered\n",
__func__, ulp_type);
mutex_unlock(&cnic_lock);
@@ -478,7 +485,7 @@ int cnic_unregister_driver(int ulp_type)
return -EINVAL;
}
mutex_lock(&cnic_lock);
- ulp_ops = cnic_ulp_tbl[ulp_type];
+ ulp_ops = cnic_ulp_tbl_prot(ulp_type);
if (!ulp_ops) {
pr_err("%s: Type %d has not been registered\n",
__func__, ulp_type);
@@ -529,7 +536,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
return -EINVAL;
}
mutex_lock(&cnic_lock);
- if (cnic_ulp_tbl[ulp_type] == NULL) {
+ if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
pr_err("%s: Driver with type %d has not been registered\n",
__func__, ulp_type);
mutex_unlock(&cnic_lock);
@@ -544,7 +551,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
cp->ulp_handle[ulp_type] = ulp_ctx;
- ulp_ops = cnic_ulp_tbl[ulp_type];
+ ulp_ops = cnic_ulp_tbl_prot(ulp_type);
rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
cnic_hold(dev);
@@ -2760,6 +2767,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
int kcqe_cnt;
+ /* status block index must be read before reading other fields */
+ rmb();
cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
@@ -2770,6 +2779,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
barrier();
if (status_idx != *cp->kcq1.status_idx_ptr) {
status_idx = (u16) *cp->kcq1.status_idx_ptr;
+ /* status block index must be read first */
+ rmb();
cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
} else
break;
@@ -2888,6 +2899,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
u32 last_status = *info->status_idx_ptr;
int kcqe_cnt;
+ /* status block index must be read before reading the KCQ */
+ rmb();
while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
service_kcqes(dev, kcqe_cnt);
@@ -2898,6 +2911,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
break;
last_status = *info->status_idx_ptr;
+ /* status block index must be read before reading the KCQ */
+ rmb();
}
return last_status;
}
@@ -2906,26 +2921,35 @@ static void cnic_service_bnx2x_bh(unsigned long data)
{
struct cnic_dev *dev = (struct cnic_dev *) data;
struct cnic_local *cp = dev->cnic_priv;
- u32 status_idx;
+ u32 status_idx, new_status_idx;
if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
return;
- status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
+ while (1) {
+ status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
- CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
+ CNIC_WR16(dev, cp->kcq1.io_addr,
+ cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
- if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
- status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
+ if (!BNX2X_CHIP_IS_E2(cp->chip_id)) {
+ cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
+ status_idx, IGU_INT_ENABLE, 1);
+ break;
+ }
+
+ new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
+
+ if (new_status_idx != status_idx)
+ continue;
CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
MAX_KCQ_IDX);
cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
status_idx, IGU_INT_ENABLE, 1);
- } else {
- cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
- status_idx, IGU_INT_ENABLE, 1);
+
+ break;
}
}
@@ -2953,7 +2977,8 @@ static void cnic_ulp_stop(struct cnic_dev *dev)
struct cnic_ulp_ops *ulp_ops;
mutex_lock(&cnic_lock);
- ulp_ops = cp->ulp_ops[if_type];
+ ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
+ lockdep_is_held(&cnic_lock));
if (!ulp_ops) {
mutex_unlock(&cnic_lock);
continue;
@@ -2977,7 +3002,8 @@ static void cnic_ulp_start(struct cnic_dev *dev)
struct cnic_ulp_ops *ulp_ops;
mutex_lock(&cnic_lock);
- ulp_ops = cp->ulp_ops[if_type];
+ ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
+ lockdep_is_held(&cnic_lock));
if (!ulp_ops || !ulp_ops->cnic_start) {
mutex_unlock(&cnic_lock);
continue;
@@ -3041,7 +3067,7 @@ static void cnic_ulp_init(struct cnic_dev *dev)
struct cnic_ulp_ops *ulp_ops;
mutex_lock(&cnic_lock);
- ulp_ops = cnic_ulp_tbl[i];
+ ulp_ops = cnic_ulp_tbl_prot(i);
if (!ulp_ops || !ulp_ops->cnic_init) {
mutex_unlock(&cnic_lock);
continue;
@@ -3065,7 +3091,7 @@ static void cnic_ulp_exit(struct cnic_dev *dev)
struct cnic_ulp_ops *ulp_ops;
mutex_lock(&cnic_lock);
- ulp_ops = cnic_ulp_tbl[i];
+ ulp_ops = cnic_ulp_tbl_prot(i);
if (!ulp_ops || !ulp_ops->cnic_exit) {
mutex_unlock(&cnic_lock);
continue;
@@ -4170,6 +4196,14 @@ static void cnic_enable_bnx2_int(struct cnic_dev *dev)
BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
}
+static void cnic_get_bnx2_iscsi_info(struct cnic_dev *dev)
+{
+ u32 max_conn;
+
+ max_conn = cnic_reg_rd_ind(dev, BNX2_FW_MAX_ISCSI_CONN);
+ dev->max_iscsi_conn = max_conn;
+}
+
static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
@@ -4494,6 +4528,8 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
return err;
}
+ cnic_get_bnx2_iscsi_info(dev);
+
return 0;
}
@@ -4705,129 +4741,6 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
cp->rx_cons = *cp->rx_cons_ptr;
}
-static int cnic_read_bnx2x_iscsi_mac(struct cnic_dev *dev, u32 upper_addr,
- u32 lower_addr)
-{
- u32 val;
- u8 mac[6];
-
- val = CNIC_RD(dev, upper_addr);
-
- mac[0] = (u8) (val >> 8);
- mac[1] = (u8) val;
-
- val = CNIC_RD(dev, lower_addr);
-
- mac[2] = (u8) (val >> 24);
- mac[3] = (u8) (val >> 16);
- mac[4] = (u8) (val >> 8);
- mac[5] = (u8) val;
-
- if (is_valid_ether_addr(mac)) {
- memcpy(dev->mac_addr, mac, 6);
- return 0;
- } else {
- return -EINVAL;
- }
-}
-
-static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
-{
- struct cnic_local *cp = dev->cnic_priv;
- u32 base, base2, addr, addr1, val;
- int port = CNIC_PORT(cp);
-
- dev->max_iscsi_conn = 0;
- base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR);
- if (base == 0)
- return;
-
- base2 = CNIC_RD(dev, (CNIC_PATH(cp) ? MISC_REG_GENERIC_CR_1 :
- MISC_REG_GENERIC_CR_0));
- addr = BNX2X_SHMEM_ADDR(base,
- dev_info.port_hw_config[port].iscsi_mac_upper);
-
- addr1 = BNX2X_SHMEM_ADDR(base,
- dev_info.port_hw_config[port].iscsi_mac_lower);
-
- cnic_read_bnx2x_iscsi_mac(dev, addr, addr1);
-
- addr = BNX2X_SHMEM_ADDR(base, validity_map[port]);
- val = CNIC_RD(dev, addr);
-
- if (!(val & SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT)) {
- u16 val16;
-
- addr = BNX2X_SHMEM_ADDR(base,
- drv_lic_key[port].max_iscsi_init_conn);
- val16 = CNIC_RD16(dev, addr);
-
- if (val16)
- val16 ^= 0x1e1e;
- dev->max_iscsi_conn = val16;
- }
-
- if (BNX2X_CHIP_IS_E2(cp->chip_id))
- dev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
-
- if (BNX2X_CHIP_IS_E1H(cp->chip_id) || BNX2X_CHIP_IS_E2(cp->chip_id)) {
- int func = CNIC_FUNC(cp);
- u32 mf_cfg_addr;
-
- if (BNX2X_SHMEM2_HAS(base2, mf_cfg_addr))
- mf_cfg_addr = CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base2,
- mf_cfg_addr));
- else
- mf_cfg_addr = base + BNX2X_SHMEM_MF_BLK_OFFSET;
-
- if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
- /* Must determine if the MF is SD vs SI mode */
- addr = BNX2X_SHMEM_ADDR(base,
- dev_info.shared_feature_config.config);
- val = CNIC_RD(dev, addr);
- if ((val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) ==
- SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT) {
- int rc;
-
- /* MULTI_FUNCTION_SI mode */
- addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
- func_ext_config[func].func_cfg);
- val = CNIC_RD(dev, addr);
- if (!(val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD))
- dev->max_iscsi_conn = 0;
-
- if (!(val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
- dev->max_fcoe_conn = 0;
-
- addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
- func_ext_config[func].
- iscsi_mac_addr_upper);
- addr1 = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
- func_ext_config[func].
- iscsi_mac_addr_lower);
- rc = cnic_read_bnx2x_iscsi_mac(dev, addr,
- addr1);
- if (rc && func > 1)
- dev->max_iscsi_conn = 0;
-
- return;
- }
- }
-
- addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
- func_mf_config[func].e1hov_tag);
-
- val = CNIC_RD(dev, addr);
- val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
- if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
- dev->max_fcoe_conn = 0;
- dev->max_iscsi_conn = 0;
- }
- }
- if (!is_valid_ether_addr(dev->mac_addr))
- dev->max_iscsi_conn = 0;
-}
-
static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
@@ -4909,8 +4822,6 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
cnic_init_bnx2x_kcq(dev);
- cnic_get_bnx2x_iscsi_info(dev);
-
/* Only 1 EQ */
CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
CNIC_WR(dev, BAR_CSTRORM_INTMEM +
@@ -5264,15 +5175,11 @@ static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
dev_hold(dev);
pci_dev_get(pdev);
- if (pdev->device == PCI_DEVICE_ID_NX2_5709 ||
- pdev->device == PCI_DEVICE_ID_NX2_5709S) {
- u8 rev;
-
- pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
- if (rev < 0x10) {
- pci_dev_put(pdev);
- goto cnic_err;
- }
+ if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
+ pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
+ (pdev->revision < 0x10)) {
+ pci_dev_put(pdev);
+ goto cnic_err;
}
pci_dev_put(pdev);
@@ -5343,6 +5250,14 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
cdev->pcidev = pdev;
cp->chip_id = ethdev->chip_id;
+ if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
+ cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
+ if (BNX2X_CHIP_IS_E2(cp->chip_id) &&
+ !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
+ cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
+
+ memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
+
cp->cnic_ops = &cnic_bnx2x_ops;
cp->start_hw = cnic_start_bnx2x_hw;
cp->stop_hw = cnic_stop_bnx2x_hw;
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index b328f6c924c3..4456260c653c 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -220,7 +220,7 @@ struct cnic_local {
#define ULP_F_INIT 0
#define ULP_F_START 1
#define ULP_F_CALL_PENDING 2
- struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE];
+ struct cnic_ulp_ops __rcu *ulp_ops[MAX_CNIC_ULP_TYPE];
unsigned long cnic_local_flags;
#define CNIC_LCL_FL_KWQ_INIT 0x0
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index 9f44e0ffe003..e01b49ee3591 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -12,8 +12,8 @@
#ifndef CNIC_IF_H
#define CNIC_IF_H
-#define CNIC_MODULE_VERSION "2.2.12"
-#define CNIC_MODULE_RELDATE "Jan 03, 2011"
+#define CNIC_MODULE_VERSION "2.2.13"
+#define CNIC_MODULE_RELDATE "Jan 31, 2011"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
@@ -159,6 +159,9 @@ struct cnic_eth_dev {
u32 drv_state;
#define CNIC_DRV_STATE_REGD 0x00000001
#define CNIC_DRV_STATE_USING_MSIX 0x00000002
+#define CNIC_DRV_STATE_NO_ISCSI_OOO 0x00000004
+#define CNIC_DRV_STATE_NO_ISCSI 0x00000008
+#define CNIC_DRV_STATE_NO_FCOE 0x00000010
u32 chip_id;
u32 max_kwqe_pending;
struct pci_dev *pdev;
@@ -176,6 +179,7 @@ struct cnic_eth_dev {
u32 fcoe_init_cid;
u16 iscsi_l2_client_id;
u16 iscsi_l2_cid;
+ u8 iscsi_mac[ETH_ALEN];
int num_irq;
struct cnic_irq irq_arr[MAX_CNIC_VEC];
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index ef02aa68c926..862804f32b6e 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -186,9 +186,10 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
dev = NULL;
if (grp)
dev = vlan_group_get_device(grp, vlan);
- } else
+ } else if (netif_is_bond_slave(dev)) {
while (dev->master)
dev = dev->master;
+ }
return dev;
}
}
@@ -967,8 +968,6 @@ static int nb_callback(struct notifier_block *self, unsigned long event,
cxgb_neigh_update((struct neighbour *)ctx);
break;
}
- case (NETEVENT_PMTU_UPDATE):
- break;
case (NETEVENT_REDIRECT):{
struct netevent_redirect *nr = ctx;
cxgb_redirect(nr->old, nr->new);
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index ec35d458102c..5352c8a23f4d 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -2471,7 +2471,6 @@ static int netevent_cb(struct notifier_block *nb, unsigned long event,
case NETEVENT_NEIGH_UPDATE:
check_neigh_update(data);
break;
- case NETEVENT_PMTU_UPDATE:
case NETEVENT_REDIRECT:
default:
break;
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 2a628d17d178..7018bfe408a4 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -1008,7 +1008,7 @@ static void emac_rx_handler(void *token, int len, int status)
int ret;
/* free and bail if we are shutting down */
- if (unlikely(!netif_running(ndev))) {
+ if (unlikely(!netif_running(ndev) || !netif_carrier_ok(ndev))) {
dev_kfree_skb_any(skb);
return;
}
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 2d4c4fc1d900..317708113601 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -802,10 +802,7 @@ dm9000_init_dm9000(struct net_device *dev)
/* Checksum mode */
dm9000_set_rx_csum_unlocked(dev, db->rx_csum);
- /* GPIO0 on pre-activate PHY */
- iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
- iow(db, DM9000_GPR, 0); /* Enable PHY */
ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
@@ -852,8 +849,8 @@ static void dm9000_timeout(struct net_device *dev)
unsigned long flags;
/* Save previous register address */
- reg_save = readb(db->io_addr);
spin_lock_irqsave(&db->lock, flags);
+ reg_save = readb(db->io_addr);
netif_stop_queue(dev);
dm9000_reset(db);
@@ -1194,6 +1191,10 @@ dm9000_open(struct net_device *dev)
if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
return -EAGAIN;
+ /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
+ iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
+ mdelay(1); /* delay needs by DM9000B */
+
/* Initialize DM9000 board */
dm9000_reset(db);
dm9000_init_dm9000(dev);
@@ -1592,10 +1593,15 @@ dm9000_probe(struct platform_device *pdev)
ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
}
- if (!is_valid_ether_addr(ndev->dev_addr))
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please "
"set using ifconfig\n", ndev->name);
+ random_ether_addr(ndev->dev_addr);
+ mac_src = "random";
+ }
+
+
platform_set_drvdata(pdev, ndev);
ret = register_netdev(ndev);
diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c
index 9d8a20b72fa9..8318ea06cb6d 100644
--- a/drivers/net/dnet.c
+++ b/drivers/net/dnet.c
@@ -337,8 +337,6 @@ static int dnet_mii_init(struct dnet *bp)
for (i = 0; i < PHY_MAX_ADDR; i++)
bp->mii_bus->irq[i] = PHY_POLL;
- platform_set_drvdata(bp->dev, bp->mii_bus);
-
if (mdiobus_register(bp->mii_bus)) {
err = -ENXIO;
goto err_out_free_mdio_irq;
@@ -863,6 +861,7 @@ static int __devinit dnet_probe(struct platform_device *pdev)
bp = netdev_priv(dev);
bp->dev = dev;
+ platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
spin_lock_init(&bp->lock);
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index 55c1711f1688..33e7c45a4fe4 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -42,7 +42,8 @@
#define GBE_CONFIG_RAM_BASE \
((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET))
-#define GBE_CONFIG_BASE_VIRT phys_to_virt(GBE_CONFIG_RAM_BASE)
+#define GBE_CONFIG_BASE_VIRT \
+ ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE))
#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \
(iowrite16_rep(base + offset, data, count))
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index e610e1369053..00bf595ebd67 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -364,6 +364,7 @@ struct e1000_adapter {
/* structs defined in e1000_hw.h */
struct e1000_hw hw;
+ spinlock_t stats64_lock;
struct e1000_hw_stats stats;
struct e1000_phy_info phy_info;
struct e1000_phy_stats phy_stats;
@@ -494,7 +495,9 @@ extern int e1000e_setup_rx_resources(struct e1000_adapter *adapter);
extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter);
extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
-extern void e1000e_update_stats(struct e1000_adapter *adapter);
+extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64
+ *stats);
extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index fa08b6336cfb..d4e51aa231b9 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -46,15 +46,15 @@ struct e1000_stats {
};
#define E1000_STAT(str, m) { \
- .stat_string = str, \
- .type = E1000_STATS, \
- .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \
- .stat_offset = offsetof(struct e1000_adapter, m) }
+ .stat_string = str, \
+ .type = E1000_STATS, \
+ .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \
+ .stat_offset = offsetof(struct e1000_adapter, m) }
#define E1000_NETDEV_STAT(str, m) { \
- .stat_string = str, \
- .type = NETDEV_STATS, \
- .sizeof_stat = sizeof(((struct net_device *)0)->m), \
- .stat_offset = offsetof(struct net_device, m) }
+ .stat_string = str, \
+ .type = NETDEV_STATS, \
+ .sizeof_stat = sizeof(((struct rtnl_link_stats64 *)0)->m), \
+ .stat_offset = offsetof(struct rtnl_link_stats64, m) }
static const struct e1000_stats e1000_gstrings_stats[] = {
E1000_STAT("rx_packets", stats.gprc),
@@ -65,21 +65,21 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
E1000_STAT("tx_broadcast", stats.bptc),
E1000_STAT("rx_multicast", stats.mprc),
E1000_STAT("tx_multicast", stats.mptc),
- E1000_NETDEV_STAT("rx_errors", stats.rx_errors),
- E1000_NETDEV_STAT("tx_errors", stats.tx_errors),
- E1000_NETDEV_STAT("tx_dropped", stats.tx_dropped),
+ E1000_NETDEV_STAT("rx_errors", rx_errors),
+ E1000_NETDEV_STAT("tx_errors", tx_errors),
+ E1000_NETDEV_STAT("tx_dropped", tx_dropped),
E1000_STAT("multicast", stats.mprc),
E1000_STAT("collisions", stats.colc),
- E1000_NETDEV_STAT("rx_length_errors", stats.rx_length_errors),
- E1000_NETDEV_STAT("rx_over_errors", stats.rx_over_errors),
+ E1000_NETDEV_STAT("rx_length_errors", rx_length_errors),
+ E1000_NETDEV_STAT("rx_over_errors", rx_over_errors),
E1000_STAT("rx_crc_errors", stats.crcerrs),
- E1000_NETDEV_STAT("rx_frame_errors", stats.rx_frame_errors),
+ E1000_NETDEV_STAT("rx_frame_errors", rx_frame_errors),
E1000_STAT("rx_no_buffer_count", stats.rnbc),
E1000_STAT("rx_missed_errors", stats.mpc),
E1000_STAT("tx_aborted_errors", stats.ecol),
E1000_STAT("tx_carrier_errors", stats.tncrs),
- E1000_NETDEV_STAT("tx_fifo_errors", stats.tx_fifo_errors),
- E1000_NETDEV_STAT("tx_heartbeat_errors", stats.tx_heartbeat_errors),
+ E1000_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors),
+ E1000_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors),
E1000_STAT("tx_window_errors", stats.latecol),
E1000_STAT("tx_abort_late_coll", stats.latecol),
E1000_STAT("tx_deferred_ok", stats.dc),
@@ -433,13 +433,11 @@ static void e1000_get_regs(struct net_device *netdev,
struct e1000_hw *hw = &adapter->hw;
u32 *regs_buff = p;
u16 phy_data;
- u8 revision_id;
memset(p, 0, E1000_REGS_LEN * sizeof(u32));
- pci_read_config_byte(adapter->pdev, PCI_REVISION_ID, &revision_id);
-
- regs->version = (1 << 24) | (revision_id << 16) | adapter->pdev->device;
+ regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
+ adapter->pdev->device;
regs_buff[0] = er32(CTRL);
regs_buff[1] = er32(STATUS);
@@ -684,20 +682,13 @@ static int e1000_set_ringparam(struct net_device *netdev,
rx_old = adapter->rx_ring;
err = -ENOMEM;
- tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
+ tx_ring = kmemdup(tx_old, sizeof(struct e1000_ring), GFP_KERNEL);
if (!tx_ring)
goto err_alloc_tx;
- /*
- * use a memcpy to save any previously configured
- * items like napi structs from having to be
- * reinitialized
- */
- memcpy(tx_ring, tx_old, sizeof(struct e1000_ring));
- rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
+ rx_ring = kmemdup(rx_old, sizeof(struct e1000_ring), GFP_KERNEL);
if (!rx_ring)
goto err_alloc_rx;
- memcpy(rx_ring, rx_old, sizeof(struct e1000_ring));
adapter->tx_ring = tx_ring;
adapter->rx_ring = rx_ring;
@@ -1255,7 +1246,6 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 ctrl_reg = 0;
- u32 stat_reg = 0;
u16 phy_reg = 0;
s32 ret_val = 0;
@@ -1363,8 +1353,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
* Set the ILOS bit on the fiber Nic if half duplex link is
* detected.
*/
- stat_reg = er32(STATUS);
- if ((stat_reg & E1000_STATUS_FD) == 0)
+ if ((er32(STATUS) & E1000_STATUS_FD) == 0)
ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
}
@@ -1972,8 +1961,15 @@ static int e1000_set_coalesce(struct net_device *netdev,
static int e1000_nway_reset(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- if (netif_running(netdev))
- e1000e_reinit_locked(adapter);
+
+ if (!netif_running(netdev))
+ return -EAGAIN;
+
+ if (!adapter->hw.mac.autoneg)
+ return -EINVAL;
+
+ e1000e_reinit_locked(adapter);
+
return 0;
}
@@ -1982,14 +1978,15 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
u64 *data)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct rtnl_link_stats64 net_stats;
int i;
char *p = NULL;
- e1000e_update_stats(adapter);
+ e1000e_get_stats64(netdev, &net_stats);
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
switch (e1000_gstrings_stats[i].type) {
case NETDEV_STATS:
- p = (char *) netdev +
+ p = (char *) &net_stats +
e1000_gstrings_stats[i].stat_offset;
break;
case E1000_STATS:
@@ -2014,7 +2011,7 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
switch (stringset) {
case ETH_SS_TEST:
- memcpy(data, *e1000_gstrings_test, sizeof(e1000_gstrings_test));
+ memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test));
break;
case ETH_SS_STATS:
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index fb46974cfec1..232b42b7f7ce 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -2104,7 +2104,6 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
{
union ich8_hws_flash_status hsfsts;
s32 ret_val = -E1000_ERR_NVM;
- s32 i = 0;
hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
@@ -2140,6 +2139,8 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
ret_val = 0;
} else {
+ s32 i = 0;
+
/*
* Otherwise poll for sometime so the current
* cycle has a chance to end before giving up.
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 68aa1749bf66..96921de5df2e 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1978,15 +1978,15 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
{
struct e1000_nvm_info *nvm = &hw->nvm;
u32 eecd = er32(EECD);
- u16 timeout = 0;
u8 spi_stat_reg;
if (nvm->type == e1000_nvm_eeprom_spi) {
+ u16 timeout = NVM_MAX_RETRY_SPI;
+
/* Clear SK and CS */
eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
ew32(EECD, eecd);
udelay(1);
- timeout = NVM_MAX_RETRY_SPI;
/*
* Read "Status Register" repeatedly until the LSB is cleared.
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 3fa110ddb041..660eeb28ed40 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -900,8 +900,6 @@ next_desc:
adapter->total_rx_bytes += total_rx_bytes;
adapter->total_rx_packets += total_rx_packets;
- netdev->stats.rx_bytes += total_rx_bytes;
- netdev->stats.rx_packets += total_rx_packets;
return cleaned;
}
@@ -1060,8 +1058,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
}
adapter->total_tx_bytes += total_tx_bytes;
adapter->total_tx_packets += total_tx_packets;
- netdev->stats.tx_bytes += total_tx_bytes;
- netdev->stats.tx_packets += total_tx_packets;
return count < tx_ring->count;
}
@@ -1248,8 +1244,6 @@ next_desc:
adapter->total_rx_bytes += total_rx_bytes;
adapter->total_rx_packets += total_rx_packets;
- netdev->stats.rx_bytes += total_rx_bytes;
- netdev->stats.rx_packets += total_rx_packets;
return cleaned;
}
@@ -1429,8 +1423,6 @@ next_desc:
adapter->total_rx_bytes += total_rx_bytes;
adapter->total_rx_packets += total_rx_packets;
- netdev->stats.rx_bytes += total_rx_bytes;
- netdev->stats.rx_packets += total_rx_packets;
return cleaned;
}
@@ -1857,7 +1849,9 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
int err = 0, vector = 0;
if (strlen(netdev->name) < (IFNAMSIZ - 5))
- sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name);
+ snprintf(adapter->rx_ring->name,
+ sizeof(adapter->rx_ring->name) - 1,
+ "%s-rx-0", netdev->name);
else
memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
err = request_irq(adapter->msix_entries[vector].vector,
@@ -1870,7 +1864,9 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
vector++;
if (strlen(netdev->name) < (IFNAMSIZ - 5))
- sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name);
+ snprintf(adapter->tx_ring->name,
+ sizeof(adapter->tx_ring->name) - 1,
+ "%s-tx-0", netdev->name);
else
memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
err = request_irq(adapter->msix_entries[vector].vector,
@@ -2734,7 +2730,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 rctl, rfctl;
- u32 psrctl = 0;
u32 pages = 0;
/* Workaround Si errata on 82579 - configure jumbo frame flow */
@@ -2833,6 +2828,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
adapter->rx_ps_pages = 0;
if (adapter->rx_ps_pages) {
+ u32 psrctl = 0;
+
/* Configure extra packet-split registers */
rfctl = er32(RFCTL);
rfctl |= E1000_RFCTL_EXTEN;
@@ -3034,7 +3031,6 @@ static void e1000_set_multi(struct net_device *netdev)
struct netdev_hw_addr *ha;
u8 *mta_list;
u32 rctl;
- int i;
/* Check for Promiscuous and All Multicast modes */
@@ -3057,12 +3053,13 @@ static void e1000_set_multi(struct net_device *netdev)
ew32(RCTL, rctl);
if (!netdev_mc_empty(netdev)) {
+ int i = 0;
+
mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
if (!mta_list)
return;
/* prepare a packed array of only addresses. */
- i = 0;
netdev_for_each_mc_addr(ha, netdev)
memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
@@ -3359,6 +3356,8 @@ static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
e1e_flush();
}
+static void e1000e_update_stats(struct e1000_adapter *adapter);
+
void e1000e_down(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -3393,6 +3392,11 @@ void e1000e_down(struct e1000_adapter *adapter)
del_timer_sync(&adapter->phy_info_timer);
netif_carrier_off(netdev);
+
+ spin_lock(&adapter->stats64_lock);
+ e1000e_update_stats(adapter);
+ spin_unlock(&adapter->stats64_lock);
+
adapter->link_speed = 0;
adapter->link_duplex = 0;
@@ -3437,6 +3441,8 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+ spin_lock_init(&adapter->stats64_lock);
+
e1000e_set_interrupt_capability(adapter);
if (e1000_alloc_queues(adapter))
@@ -3918,7 +3924,7 @@ release:
* e1000e_update_stats - Update the board statistics counters
* @adapter: board private structure
**/
-void e1000e_update_stats(struct e1000_adapter *adapter)
+static void e1000e_update_stats(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct e1000_hw *hw = &adapter->hw;
@@ -4030,10 +4036,11 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct e1000_phy_regs *phy = &adapter->phy_regs;
- int ret_val;
if ((er32(STATUS) & E1000_STATUS_LU) &&
(adapter->hw.phy.media_type == e1000_media_type_copper)) {
+ int ret_val;
+
ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
@@ -4179,7 +4186,6 @@ static void e1000_watchdog_task(struct work_struct *work)
struct e1000_ring *tx_ring = adapter->tx_ring;
struct e1000_hw *hw = &adapter->hw;
u32 link, tctl;
- int tx_pending = 0;
if (test_bit(__E1000_DOWN, &adapter->state))
return;
@@ -4320,7 +4326,9 @@ static void e1000_watchdog_task(struct work_struct *work)
}
link_up:
+ spin_lock(&adapter->stats64_lock);
e1000e_update_stats(adapter);
+ spin_unlock(&adapter->stats64_lock);
mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
adapter->tpt_old = adapter->stats.tpt;
@@ -4334,20 +4342,17 @@ link_up:
e1000e_update_adaptive(&adapter->hw);
- if (!netif_carrier_ok(netdev)) {
- tx_pending = (e1000_desc_unused(tx_ring) + 1 <
- tx_ring->count);
- if (tx_pending) {
- /*
- * We've lost link, so the controller stops DMA,
- * but we've got queued Tx work that's never going
- * to get done, so reset controller to flush Tx.
- * (Do the reset outside of interrupt context).
- */
- schedule_work(&adapter->reset_task);
- /* return immediately since reset is imminent */
- return;
- }
+ if (!netif_carrier_ok(netdev) &&
+ (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
+ /*
+ * We've lost link, so the controller stops DMA,
+ * but we've got queued Tx work that's never going
+ * to get done, so reset controller to flush Tx.
+ * (Do the reset outside of interrupt context).
+ */
+ schedule_work(&adapter->reset_task);
+ /* return immediately since reset is imminent */
+ return;
}
/* Simple mode for Interrupt Throttle Rate (ITR) */
@@ -4411,13 +4416,13 @@ static int e1000_tso(struct e1000_adapter *adapter,
u32 cmd_length = 0;
u16 ipcse = 0, tucse, mss;
u8 ipcss, ipcso, tucss, tucso, hdr_len;
- int err;
if (!skb_is_gso(skb))
return 0;
if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+
if (err)
return err;
}
@@ -4928,16 +4933,55 @@ static void e1000_reset_task(struct work_struct *work)
}
/**
- * e1000_get_stats - Get System Network Statistics
+ * e1000_get_stats64 - Get System Network Statistics
* @netdev: network interface device structure
+ * @stats: rtnl_link_stats64 pointer
*
* Returns the address of the device statistics structure.
- * The statistics are actually updated from the timer callback.
**/
-static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
+struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
- /* only return the current stats */
- return &netdev->stats;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ memset(stats, 0, sizeof(struct rtnl_link_stats64));
+ spin_lock(&adapter->stats64_lock);
+ e1000e_update_stats(adapter);
+ /* Fill out the OS statistics structure */
+ stats->rx_bytes = adapter->stats.gorc;
+ stats->rx_packets = adapter->stats.gprc;
+ stats->tx_bytes = adapter->stats.gotc;
+ stats->tx_packets = adapter->stats.gptc;
+ stats->multicast = adapter->stats.mprc;
+ stats->collisions = adapter->stats.colc;
+
+ /* Rx Errors */
+
+ /*
+ * RLEC on some newer hardware can be incorrect so build
+ * our own version based on RUC and ROC
+ */
+ stats->rx_errors = adapter->stats.rxerrc +
+ adapter->stats.crcerrs + adapter->stats.algnerrc +
+ adapter->stats.ruc + adapter->stats.roc +
+ adapter->stats.cexterr;
+ stats->rx_length_errors = adapter->stats.ruc +
+ adapter->stats.roc;
+ stats->rx_crc_errors = adapter->stats.crcerrs;
+ stats->rx_frame_errors = adapter->stats.algnerrc;
+ stats->rx_missed_errors = adapter->stats.mpc;
+
+ /* Tx Errors */
+ stats->tx_errors = adapter->stats.ecol +
+ adapter->stats.latecol;
+ stats->tx_aborted_errors = adapter->stats.ecol;
+ stats->tx_window_errors = adapter->stats.latecol;
+ stats->tx_carrier_errors = adapter->stats.tncrs;
+
+ /* Tx Dropped needs to be maintained elsewhere */
+
+ spin_unlock(&adapter->stats64_lock);
+ return stats;
}
/**
@@ -5338,7 +5382,7 @@ void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
__e1000e_disable_aspm(pdev, state);
}
-#ifdef CONFIG_PM_OPS
+#ifdef CONFIG_PM
static bool e1000e_pm_ready(struct e1000_adapter *adapter)
{
return !!adapter->tx_ring->buffer_info;
@@ -5489,7 +5533,7 @@ static int e1000_runtime_resume(struct device *dev)
return __e1000_resume(pdev);
}
#endif /* CONFIG_PM_RUNTIME */
-#endif /* CONFIG_PM_OPS */
+#endif /* CONFIG_PM */
static void e1000_shutdown(struct pci_dev *pdev)
{
@@ -5507,9 +5551,10 @@ static irqreturn_t e1000_intr_msix(int irq, void *data)
{
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
- int vector, msix_irq;
if (adapter->msix_entries) {
+ int vector, msix_irq;
+
vector = 0;
msix_irq = adapter->msix_entries[vector].vector;
disable_irq(msix_irq);
@@ -5706,7 +5751,7 @@ static const struct net_device_ops e1000e_netdev_ops = {
.ndo_open = e1000_open,
.ndo_stop = e1000_close,
.ndo_start_xmit = e1000_xmit_frame,
- .ndo_get_stats = e1000_get_stats,
+ .ndo_get_stats64 = e1000e_get_stats64,
.ndo_set_multicast_list = e1000_set_multi,
.ndo_set_mac_address = e1000_set_mac,
.ndo_change_mtu = e1000_change_mtu,
@@ -5967,7 +6012,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
/* APME bit in EEPROM is mapped to WUC.APME */
eeprom_data = er32(WUC);
eeprom_apme_mask = E1000_WUC_APME;
- if (eeprom_data & E1000_WUC_PHY_WAKE)
+ if ((hw->mac.type > e1000_ich10lan) &&
+ (eeprom_data & E1000_WUC_PHY_WAKE))
adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
} else if (adapter->flags & FLAG_APME_IN_CTRL3) {
if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
@@ -6195,7 +6241,7 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
};
MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
-#ifdef CONFIG_PM_OPS
+#ifdef CONFIG_PM
static const struct dev_pm_ops e1000_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
@@ -6209,7 +6255,7 @@ static struct pci_driver e1000_driver = {
.id_table = e1000_pci_tbl,
.probe = e1000_probe,
.remove = __devexit_p(e1000_remove),
-#ifdef CONFIG_PM_OPS
+#ifdef CONFIG_PM
.driver.pm = &e1000_pm_ops,
#endif
.shutdown = e1000_shutdown,
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 6bea051b134b..6ae31fcfb629 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -2409,9 +2409,7 @@ static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg)
s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
{
s32 ret_val;
- u32 page_select = 0;
u32 page = offset >> IGP_PAGE_SHIFT;
- u32 page_shift = 0;
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
@@ -2427,6 +2425,8 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
if (offset > MAX_PHY_MULTI_PAGE_REG) {
+ u32 page_shift, page_select;
+
/*
* Page select is register 31 for phy address 1 and 22 for
* phy address 2 and 3. Page select is shifted only for
@@ -2468,9 +2468,7 @@ out:
s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
{
s32 ret_val;
- u32 page_select = 0;
u32 page = offset >> IGP_PAGE_SHIFT;
- u32 page_shift = 0;
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
@@ -2486,6 +2484,8 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
if (offset > MAX_PHY_MULTI_PAGE_REG) {
+ u32 page_shift, page_select;
+
/*
* Page select is register 31 for phy address 1 and 22 for
* phy address 2 and 3. Page select is shifted only for
diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
index e7b6c31880ba..2e573be16c13 100644
--- a/drivers/net/enic/Makefile
+++ b/drivers/net/enic/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_ENIC) := enic.o
enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
- enic_res.o vnic_dev.o vnic_rq.o vnic_vic.o
+ enic_res.o enic_dev.o vnic_dev.o vnic_rq.o vnic_vic.o
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index a937f49d9db7..e816bbb9fbf9 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -32,13 +32,13 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "1.4.1.10"
-#define DRV_COPYRIGHT "Copyright 2008-2010 Cisco Systems, Inc"
+#define DRV_VERSION "2.1.1.10"
+#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
-#define ENIC_WQ_MAX 8
-#define ENIC_RQ_MAX 8
+#define ENIC_WQ_MAX 1
+#define ENIC_RQ_MAX 1
#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
@@ -49,7 +49,7 @@ struct enic_msix_entry {
void *devid;
};
-#define ENIC_SET_APPLIED (1 << 0)
+#define ENIC_PORT_REQUEST_APPLIED (1 << 0)
#define ENIC_SET_REQUEST (1 << 1)
#define ENIC_SET_NAME (1 << 2)
#define ENIC_SET_INSTANCE (1 << 3)
@@ -101,7 +101,6 @@ struct enic {
/* receive queue cache line section */
____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX];
unsigned int rq_count;
- int (*rq_alloc_buf)(struct vnic_rq *rq);
u64 rq_truncated_pkts;
u64 rq_bad_fcs;
struct napi_struct napi[ENIC_RQ_MAX];
diff --git a/drivers/net/enic/enic_dev.c b/drivers/net/enic/enic_dev.c
new file mode 100644
index 000000000000..37ad3a1c82ee
--- /dev/null
+++ b/drivers/net/enic/enic_dev.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright 2011 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+
+#include "vnic_dev.h"
+#include "vnic_vic.h"
+#include "enic_res.h"
+#include "enic.h"
+#include "enic_dev.h"
+
+int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_fw_info(enic->vdev, fw_info);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_stats_dump(enic->vdev, vstats);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_add_station_addr(struct enic *enic)
+{
+ int err;
+
+ if (!is_valid_ether_addr(enic->netdev->dev_addr))
+ return -EADDRNOTAVAIL;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_del_station_addr(struct enic *enic)
+{
+ int err;
+
+ if (!is_valid_ether_addr(enic->netdev->dev_addr))
+ return -EADDRNOTAVAIL;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
+ int broadcast, int promisc, int allmulti)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_packet_filter(enic->vdev, directed,
+ multicast, broadcast, promisc, allmulti);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_add_addr(struct enic *enic, u8 *addr)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_add_addr(enic->vdev, addr);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_del_addr(struct enic *enic, u8 *addr)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_del_addr(enic->vdev, addr);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_notify_unset(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_notify_unset(enic->vdev);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_hang_notify(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_hang_notify(enic->vdev);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
+ IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_enable(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_enable_wait(enic->vdev);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_disable(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_disable(enic->vdev);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_vnic_dev_deinit(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_deinit(enic->vdev);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_init_prov(enic->vdev,
+ (u8 *)vp, vic_provinfo_size(vp));
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_init_done(struct enic *enic, int *done, int *error)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_init_done(enic->vdev, done, error);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+/* rtnl lock is held */
+void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ spin_lock(&enic->devcmd_lock);
+ enic_add_vlan(enic, vid);
+ spin_unlock(&enic->devcmd_lock);
+}
+
+/* rtnl lock is held */
+void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ spin_lock(&enic->devcmd_lock);
+ enic_del_vlan(enic, vid);
+ spin_unlock(&enic->devcmd_lock);
+}
diff --git a/drivers/net/enic/enic_dev.h b/drivers/net/enic/enic_dev.h
new file mode 100644
index 000000000000..495f57fcb887
--- /dev/null
+++ b/drivers/net/enic/enic_dev.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2011 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _ENIC_DEV_H_
+#define _ENIC_DEV_H_
+
+int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info);
+int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats);
+int enic_dev_add_station_addr(struct enic *enic);
+int enic_dev_del_station_addr(struct enic *enic);
+int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
+ int broadcast, int promisc, int allmulti);
+int enic_dev_add_addr(struct enic *enic, u8 *addr);
+int enic_dev_del_addr(struct enic *enic, u8 *addr);
+void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+int enic_dev_notify_unset(struct enic *enic);
+int enic_dev_hang_notify(struct enic *enic);
+int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic);
+int enic_dev_enable(struct enic *enic);
+int enic_dev_disable(struct enic *enic);
+int enic_vnic_dev_deinit(struct enic *enic);
+int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp);
+int enic_dev_init_done(struct enic *enic, int *done, int *error);
+
+#endif /* _ENIC_DEV_H_ */
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index a0af48c51fb3..8b9cad5e9712 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -44,6 +44,7 @@
#include "vnic_vic.h"
#include "enic_res.h"
#include "enic.h"
+#include "enic_dev.h"
#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
@@ -190,18 +191,6 @@ static int enic_get_settings(struct net_device *netdev,
return 0;
}
-static int enic_dev_fw_info(struct enic *enic,
- struct vnic_devcmd_fw_info **fw_info)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_fw_info(enic->vdev, fw_info);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
static void enic_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
@@ -246,17 +235,6 @@ static int enic_get_sset_count(struct net_device *netdev, int sset)
}
}
-static int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_stats_dump(enic->vdev, vstats);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
static void enic_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
@@ -896,9 +874,10 @@ static struct net_device_stats *enic_get_stats(struct net_device *netdev)
return net_stats;
}
-static void enic_reset_multicast_list(struct enic *enic)
+static void enic_reset_addr_lists(struct enic *enic)
{
enic->mc_count = 0;
+ enic->uc_count = 0;
enic->flags = 0;
}
@@ -919,32 +898,6 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr)
return 0;
}
-static int enic_dev_add_station_addr(struct enic *enic)
-{
- int err = 0;
-
- if (is_valid_ether_addr(enic->netdev->dev_addr)) {
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr);
- spin_unlock(&enic->devcmd_lock);
- }
-
- return err;
-}
-
-static int enic_dev_del_station_addr(struct enic *enic)
-{
- int err = 0;
-
- if (is_valid_ether_addr(enic->netdev->dev_addr)) {
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr);
- spin_unlock(&enic->devcmd_lock);
- }
-
- return err;
-}
-
static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
{
struct enic *enic = netdev_priv(netdev);
@@ -989,42 +942,7 @@ static int enic_set_mac_address(struct net_device *netdev, void *p)
return enic_dev_add_station_addr(enic);
}
-static int enic_dev_packet_filter(struct enic *enic, int directed,
- int multicast, int broadcast, int promisc, int allmulti)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_packet_filter(enic->vdev, directed,
- multicast, broadcast, promisc, allmulti);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
-static int enic_dev_add_addr(struct enic *enic, u8 *addr)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_add_addr(enic->vdev, addr);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
-static int enic_dev_del_addr(struct enic *enic, u8 *addr)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_del_addr(enic->vdev, addr);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
-static void enic_add_multicast_addr_list(struct enic *enic)
+static void enic_update_multicast_addr_list(struct enic *enic)
{
struct net_device *netdev = enic->netdev;
struct netdev_hw_addr *ha;
@@ -1079,7 +997,7 @@ static void enic_add_multicast_addr_list(struct enic *enic)
enic->mc_count = mc_count;
}
-static void enic_add_unicast_addr_list(struct enic *enic)
+static void enic_update_unicast_addr_list(struct enic *enic)
{
struct net_device *netdev = enic->netdev;
struct netdev_hw_addr *ha;
@@ -1156,9 +1074,9 @@ static void enic_set_rx_mode(struct net_device *netdev)
}
if (!promisc) {
- enic_add_unicast_addr_list(enic);
+ enic_update_unicast_addr_list(enic);
if (!allmulti)
- enic_add_multicast_addr_list(enic);
+ enic_update_multicast_addr_list(enic);
}
}
@@ -1170,26 +1088,6 @@ static void enic_vlan_rx_register(struct net_device *netdev,
enic->vlan_group = vlan_group;
}
-/* rtnl lock is held */
-static void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
-{
- struct enic *enic = netdev_priv(netdev);
-
- spin_lock(&enic->devcmd_lock);
- enic_add_vlan(enic, vid);
- spin_unlock(&enic->devcmd_lock);
-}
-
-/* rtnl lock is held */
-static void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
-{
- struct enic *enic = netdev_priv(netdev);
-
- spin_lock(&enic->devcmd_lock);
- enic_del_vlan(enic, vid);
- spin_unlock(&enic->devcmd_lock);
-}
-
/* netif_tx_lock held, BHs disabled */
static void enic_tx_timeout(struct net_device *netdev)
{
@@ -1197,40 +1095,6 @@ static void enic_tx_timeout(struct net_device *netdev)
schedule_work(&enic->reset);
}
-static int enic_vnic_dev_deinit(struct enic *enic)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_deinit(enic->vdev);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
-static int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_init_prov(enic->vdev,
- (u8 *)vp, vic_provinfo_size(vp));
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
-static int enic_dev_init_done(struct enic *enic, int *done, int *error)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_init_done(enic->vdev, done, error);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
struct enic *enic = netdev_priv(netdev);
@@ -1262,6 +1126,8 @@ static int enic_set_port_profile(struct enic *enic, u8 *mac)
if (err)
return err;
+ enic_reset_addr_lists(enic);
+
switch (enic->pp.request) {
case PORT_REQUEST_ASSOCIATE:
@@ -1318,18 +1184,20 @@ static int enic_set_port_profile(struct enic *enic, u8 *mac)
vic_provinfo_free(vp);
if (err)
return err;
-
- enic->pp.set |= ENIC_SET_APPLIED;
break;
case PORT_REQUEST_DISASSOCIATE:
- enic->pp.set &= ~ENIC_SET_APPLIED;
break;
default:
return -EINVAL;
}
+ /* Set flag to indicate that the port assoc/disassoc
+ * request has been sent out to fw
+ */
+ enic->pp.set |= ENIC_PORT_REQUEST_APPLIED;
+
return 0;
}
@@ -1379,9 +1247,6 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
if (is_zero_ether_addr(netdev->dev_addr))
random_ether_addr(netdev->dev_addr);
- } else if (new_pp.request == PORT_REQUEST_DISASSOCIATE) {
- if (!is_zero_ether_addr(enic->pp.mac_addr))
- enic_dev_del_addr(enic, enic->pp.mac_addr);
}
memcpy(&enic->pp, &new_pp, sizeof(struct enic_port_profile));
@@ -1390,9 +1255,6 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
if (err)
goto set_port_profile_cleanup;
- if (!is_zero_ether_addr(enic->pp.mac_addr))
- enic_dev_add_addr(enic, enic->pp.mac_addr);
-
set_port_profile_cleanup:
memset(enic->pp.vf_mac, 0, ETH_ALEN);
@@ -1411,7 +1273,7 @@ static int enic_get_vf_port(struct net_device *netdev, int vf,
int err, error, done;
u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
- if (!(enic->pp.set & ENIC_SET_APPLIED))
+ if (!(enic->pp.set & ENIC_PORT_REQUEST_APPLIED))
return -ENODATA;
err = enic_dev_init_done(enic, &done, &error);
@@ -1489,62 +1351,6 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
return 0;
}
-static int enic_rq_alloc_buf_a1(struct vnic_rq *rq)
-{
- struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
-
- if (vnic_rq_posting_soon(rq)) {
-
- /* SW workaround for A0 HW erratum: if we're just about
- * to write posted_index, insert a dummy desc
- * of type resvd
- */
-
- rq_enet_desc_enc(desc, 0, RQ_ENET_TYPE_RESV2, 0);
- vnic_rq_post(rq, 0, 0, 0, 0);
- } else {
- return enic_rq_alloc_buf(rq);
- }
-
- return 0;
-}
-
-static int enic_dev_hw_version(struct enic *enic,
- enum vnic_dev_hw_version *hw_ver)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_hw_version(enic->vdev, hw_ver);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
-static int enic_set_rq_alloc_buf(struct enic *enic)
-{
- enum vnic_dev_hw_version hw_ver;
- int err;
-
- err = enic_dev_hw_version(enic, &hw_ver);
- if (err)
- return err;
-
- switch (hw_ver) {
- case VNIC_DEV_HW_VER_A1:
- enic->rq_alloc_buf = enic_rq_alloc_buf_a1;
- break;
- case VNIC_DEV_HW_VER_A2:
- case VNIC_DEV_HW_VER_UNKNOWN:
- enic->rq_alloc_buf = enic_rq_alloc_buf;
- break;
- default:
- return -ENODEV;
- }
-
- return 0;
-}
-
static void enic_rq_indicate_buf(struct vnic_rq *rq,
struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
int skipped, void *opaque)
@@ -1681,7 +1487,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
0 /* don't unmask intr */,
0 /* don't reset intr timer */);
- err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
+ err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
/* Buffer allocation failed. Stay in polling
* mode so we can try to fill the ring again.
@@ -1731,7 +1537,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
0 /* don't unmask intr */,
0 /* don't reset intr timer */);
- err = vnic_rq_fill(&enic->rq[rq], enic->rq_alloc_buf);
+ err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
/* Buffer allocation failed. Stay in polling mode
* so we can try to fill the ring again.
@@ -1901,39 +1707,6 @@ static int enic_dev_notify_set(struct enic *enic)
return err;
}
-static int enic_dev_notify_unset(struct enic *enic)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_notify_unset(enic->vdev);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
-static int enic_dev_enable(struct enic *enic)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_enable_wait(enic->vdev);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
-static int enic_dev_disable(struct enic *enic)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_disable(enic->vdev);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
static void enic_notify_timer_start(struct enic *enic)
{
switch (vnic_dev_get_intr_mode(enic->vdev)) {
@@ -1967,7 +1740,7 @@ static int enic_open(struct net_device *netdev)
}
for (i = 0; i < enic->rq_count; i++) {
- vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf);
+ vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
/* Need at least one buffer on ring to get going */
if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
netdev_err(netdev, "Unable to alloc receive buffers\n");
@@ -2285,29 +2058,6 @@ static int enic_set_rss_nic_cfg(struct enic *enic)
rss_hash_bits, rss_base_cpu, rss_enable);
}
-static int enic_dev_hang_notify(struct enic *enic)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_hang_notify(enic->vdev);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
-static int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
- IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
static void enic_reset(struct work_struct *work)
{
struct enic *enic = container_of(work, struct enic, reset);
@@ -2320,7 +2070,7 @@ static void enic_reset(struct work_struct *work)
enic_dev_hang_notify(enic);
enic_stop(enic->netdev);
enic_dev_hang_reset(enic);
- enic_reset_multicast_list(enic);
+ enic_reset_addr_lists(enic);
enic_init_vnic_resources(enic);
enic_set_rss_nic_cfg(enic);
enic_dev_set_ig_vlan_rewrite_mode(enic);
@@ -2332,7 +2082,7 @@ static void enic_reset(struct work_struct *work)
static int enic_set_intr_mode(struct enic *enic)
{
unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
- unsigned int m = 1;
+ unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
unsigned int i;
/* Set interrupt mode (INTx, MSI, MSI-X) depending
@@ -2475,9 +2225,7 @@ static const struct net_device_ops enic_netdev_dynamic_ops = {
.ndo_tx_timeout = enic_tx_timeout,
.ndo_set_vf_port = enic_set_vf_port,
.ndo_get_vf_port = enic_get_vf_port,
-#ifdef IFLA_VF_MAX
.ndo_set_vf_mac = enic_set_vf_mac,
-#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = enic_poll_controller,
#endif
@@ -2556,25 +2304,12 @@ static int enic_dev_init(struct enic *enic)
enic_init_vnic_resources(enic);
- err = enic_set_rq_alloc_buf(enic);
- if (err) {
- dev_err(dev, "Failed to set RQ buffer allocator, aborting\n");
- goto err_out_free_vnic_resources;
- }
-
err = enic_set_rss_nic_cfg(enic);
if (err) {
dev_err(dev, "Failed to config nic, aborting\n");
goto err_out_free_vnic_resources;
}
- err = enic_dev_set_ig_vlan_rewrite_mode(enic);
- if (err) {
- dev_err(dev,
- "Failed to set ingress vlan rewrite mode, aborting.\n");
- goto err_out_free_vnic_resources;
- }
-
switch (vnic_dev_get_intr_mode(enic->vdev)) {
default:
netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
@@ -2713,6 +2448,22 @@ static int __devinit enic_probe(struct pci_dev *pdev,
goto err_out_vnic_unregister;
}
+ /* Setup devcmd lock
+ */
+
+ spin_lock_init(&enic->devcmd_lock);
+
+ /*
+ * Set ingress vlan rewrite mode before vnic initialization
+ */
+
+ err = enic_dev_set_ig_vlan_rewrite_mode(enic);
+ if (err) {
+ dev_err(dev,
+ "Failed to set ingress vlan rewrite mode, aborting.\n");
+ goto err_out_dev_close;
+ }
+
/* Issue device init to initialize the vnic-to-switch link.
* We'll start with carrier off and wait for link UP
* notification later to turn on carrier. We don't need
@@ -2736,11 +2487,6 @@ static int __devinit enic_probe(struct pci_dev *pdev,
}
}
- /* Setup devcmd lock
- */
-
- spin_lock_init(&enic->devcmd_lock);
-
err = enic_dev_init(enic);
if (err) {
dev_err(dev, "Device initialization failed, aborting\n");
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index fb35d8b17668..c489e72107de 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -419,25 +419,6 @@ int vnic_dev_fw_info(struct vnic_dev *vdev,
return err;
}
-int vnic_dev_hw_version(struct vnic_dev *vdev, enum vnic_dev_hw_version *hw_ver)
-{
- struct vnic_devcmd_fw_info *fw_info;
- int err;
-
- err = vnic_dev_fw_info(vdev, &fw_info);
- if (err)
- return err;
-
- if (strncmp(fw_info->hw_version, "A1", sizeof("A1")) == 0)
- *hw_ver = VNIC_DEV_HW_VER_A1;
- else if (strncmp(fw_info->hw_version, "A2", sizeof("A2")) == 0)
- *hw_ver = VNIC_DEV_HW_VER_A2;
- else
- *hw_ver = VNIC_DEV_HW_VER_UNKNOWN;
-
- return 0;
-}
-
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
void *value)
{
diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h
index 05f9a24cd459..e837546213a8 100644
--- a/drivers/net/enic/vnic_dev.h
+++ b/drivers/net/enic/vnic_dev.h
@@ -44,12 +44,6 @@ static inline void writeq(u64 val, void __iomem *reg)
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-enum vnic_dev_hw_version {
- VNIC_DEV_HW_VER_UNKNOWN,
- VNIC_DEV_HW_VER_A1,
- VNIC_DEV_HW_VER_A2,
-};
-
enum vnic_dev_intr_mode {
VNIC_DEV_INTR_MODE_UNKNOWN,
VNIC_DEV_INTR_MODE_INTX,
@@ -93,8 +87,6 @@ int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
u64 *a0, u64 *a1, int wait);
int vnic_dev_fw_info(struct vnic_dev *vdev,
struct vnic_devcmd_fw_info **fw_info);
-int vnic_dev_hw_version(struct vnic_dev *vdev,
- enum vnic_dev_hw_version *hw_ver);
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
void *value);
int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
diff --git a/drivers/net/enic/vnic_rq.h b/drivers/net/enic/vnic_rq.h
index 37f08de2454a..2056586f4d4b 100644
--- a/drivers/net/enic/vnic_rq.h
+++ b/drivers/net/enic/vnic_rq.h
@@ -141,11 +141,6 @@ static inline void vnic_rq_post(struct vnic_rq *rq,
}
}
-static inline int vnic_rq_posting_soon(struct vnic_rq *rq)
-{
- return (rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0;
-}
-
static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
{
rq->ring.desc_avail += count;
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index b79d7e1555d5..db0290f05bdf 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -1163,15 +1163,11 @@ static int ethoc_resume(struct platform_device *pdev)
# define ethoc_resume NULL
#endif
-#ifdef CONFIG_OF
static struct of_device_id ethoc_match[] = {
- {
- .compatible = "opencores,ethoc",
- },
+ { .compatible = "opencores,ethoc", },
{},
};
MODULE_DEVICE_TABLE(of, ethoc_match);
-#endif
static struct platform_driver ethoc_driver = {
.probe = ethoc_probe,
@@ -1181,9 +1177,7 @@ static struct platform_driver ethoc_driver = {
.driver = {
.name = "ethoc",
.owner = THIS_MODULE,
-#ifdef CONFIG_OF
.of_match_table = ethoc_match,
-#endif
},
};
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 2a71373719ae..885d8baff7d5 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -54,7 +54,7 @@
#include "fec.h"
-#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
+#if defined(CONFIG_ARM)
#define FEC_ALIGNMENT 0xf
#else
#define FEC_ALIGNMENT 0x3
@@ -74,7 +74,8 @@ static struct platform_device_id fec_devtype[] = {
}, {
.name = "imx28-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
- }
+ },
+ { }
};
static unsigned char macaddr[ETH_ALEN];
@@ -147,8 +148,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
* account when setting it.
*/
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
- defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
#else
#define OPT_FRAME_SIZE 0
@@ -183,7 +183,7 @@ struct fec_enet_private {
struct bufdesc *rx_bd_base;
struct bufdesc *tx_bd_base;
/* The next free ring entry */
- struct bufdesc *cur_rx, *cur_tx;
+ struct bufdesc *cur_rx, *cur_tx;
/* The ring entries to be free()ed */
struct bufdesc *dirty_tx;
@@ -191,28 +191,21 @@ struct fec_enet_private {
/* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
spinlock_t hw_lock;
- struct platform_device *pdev;
+ struct platform_device *pdev;
int opened;
/* Phylib and MDIO interface */
- struct mii_bus *mii_bus;
- struct phy_device *phy_dev;
- int mii_timeout;
- uint phy_speed;
+ struct mii_bus *mii_bus;
+ struct phy_device *phy_dev;
+ int mii_timeout;
+ uint phy_speed;
phy_interface_t phy_interface;
int link;
int full_duplex;
struct completion mdio_done;
};
-static irqreturn_t fec_enet_interrupt(int irq, void * dev_id);
-static void fec_enet_tx(struct net_device *dev);
-static void fec_enet_rx(struct net_device *dev);
-static int fec_enet_close(struct net_device *dev);
-static void fec_restart(struct net_device *dev, int duplex);
-static void fec_stop(struct net_device *dev);
-
/* FEC MII MMFR bits definition */
#define FEC_MMFR_ST (1 << 30)
#define FEC_MMFR_OP_READ (2 << 28)
@@ -239,9 +232,9 @@ static void *swap_buffer(void *bufaddr, int len)
}
static netdev_tx_t
-fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
struct bufdesc *bdp;
@@ -262,9 +255,9 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (status & BD_ENET_TX_READY) {
/* Ooops. All transmit buffers are full. Bail out.
- * This should not happen, since dev->tbusy should be set.
+ * This should not happen, since ndev->tbusy should be set.
*/
- printk("%s: tx queue full!.\n", dev->name);
+ printk("%s: tx queue full!.\n", ndev->name);
spin_unlock_irqrestore(&fep->hw_lock, flags);
return NETDEV_TX_BUSY;
}
@@ -284,7 +277,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
unsigned int index;
index = bdp - fep->tx_bd_base;
- memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len);
+ memcpy(fep->tx_bounce[index], skb->data, skb->len);
bufaddr = fep->tx_bounce[index];
}
@@ -299,13 +292,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Save skb pointer */
fep->tx_skbuff[fep->skb_cur] = skb;
- dev->stats.tx_bytes += skb->len;
+ ndev->stats.tx_bytes += skb->len;
fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
/* Push the data cache so the CPM does not get stale memory
* data.
*/
- bdp->cbd_bufaddr = dma_map_single(&dev->dev, bufaddr,
+ bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
/* Send it on its way. Tell FEC it's ready, interrupt when done,
@@ -326,7 +319,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (bdp == fep->dirty_tx) {
fep->tx_full = 1;
- netif_stop_queue(dev);
+ netif_stop_queue(ndev);
}
fep->cur_tx = bdp;
@@ -336,62 +329,170 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+/* This function is called to start or restart the FEC during a link
+ * change. This only happens when switching between half and full
+ * duplex.
+ */
static void
-fec_timeout(struct net_device *dev)
+fec_restart(struct net_device *ndev, int duplex)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+ int i;
+ u32 temp_mac[2];
+ u32 rcntl = OPT_FRAME_SIZE | 0x04;
- dev->stats.tx_errors++;
+ /* Whack a reset. We should wait for this. */
+ writel(1, fep->hwp + FEC_ECNTRL);
+ udelay(10);
- fec_restart(dev, fep->full_duplex);
- netif_wake_queue(dev);
-}
+ /*
+ * enet-mac reset will reset mac address registers too,
+ * so need to reconfigure it.
+ */
+ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+ memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
+ writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
+ writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
+ }
-static irqreturn_t
-fec_enet_interrupt(int irq, void * dev_id)
-{
- struct net_device *dev = dev_id;
- struct fec_enet_private *fep = netdev_priv(dev);
- uint int_events;
- irqreturn_t ret = IRQ_NONE;
+ /* Clear any outstanding interrupt. */
+ writel(0xffc00000, fep->hwp + FEC_IEVENT);
- do {
- int_events = readl(fep->hwp + FEC_IEVENT);
- writel(int_events, fep->hwp + FEC_IEVENT);
+ /* Reset all multicast. */
+ writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+ writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+#ifndef CONFIG_M5272
+ writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
+ writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
+#endif
- if (int_events & FEC_ENET_RXF) {
- ret = IRQ_HANDLED;
- fec_enet_rx(dev);
- }
+ /* Set maximum receive buffer size. */
+ writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
- /* Transmit OK, or non-fatal error. Update the buffer
- * descriptors. FEC handles all errors, we just discover
- * them as part of the transmit process.
- */
- if (int_events & FEC_ENET_TXF) {
- ret = IRQ_HANDLED;
- fec_enet_tx(dev);
+ /* Set receive and transmit descriptor base. */
+ writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
+ writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
+ fep->hwp + FEC_X_DES_START);
+
+ fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
+ fep->cur_rx = fep->rx_bd_base;
+
+ /* Reset SKB transmit buffers. */
+ fep->skb_cur = fep->skb_dirty = 0;
+ for (i = 0; i <= TX_RING_MOD_MASK; i++) {
+ if (fep->tx_skbuff[i]) {
+ dev_kfree_skb_any(fep->tx_skbuff[i]);
+ fep->tx_skbuff[i] = NULL;
}
+ }
- if (int_events & FEC_ENET_MII) {
- ret = IRQ_HANDLED;
- complete(&fep->mdio_done);
+ /* Enable MII mode */
+ if (duplex) {
+ /* FD enable */
+ writel(0x04, fep->hwp + FEC_X_CNTRL);
+ } else {
+ /* No Rcv on Xmit */
+ rcntl |= 0x02;
+ writel(0x0, fep->hwp + FEC_X_CNTRL);
+ }
+
+ fep->full_duplex = duplex;
+
+ /* Set MII speed */
+ writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+
+ /*
+ * The phy interface and speed need to get configured
+ * differently on enet-mac.
+ */
+ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+ /* Enable flow control and length check */
+ rcntl |= 0x40000000 | 0x00000020;
+
+ /* MII or RMII */
+ if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
+ rcntl |= (1 << 8);
+ else
+ rcntl &= ~(1 << 8);
+
+ /* 10M or 100M */
+ if (fep->phy_dev && fep->phy_dev->speed == SPEED_100)
+ rcntl &= ~(1 << 9);
+ else
+ rcntl |= (1 << 9);
+
+ } else {
+#ifdef FEC_MIIGSK_ENR
+ if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
+ /* disable the gasket and wait */
+ writel(0, fep->hwp + FEC_MIIGSK_ENR);
+ while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
+ udelay(1);
+
+ /*
+ * configure the gasket:
+ * RMII, 50 MHz, no loopback, no echo
+ */
+ writel(1, fep->hwp + FEC_MIIGSK_CFGR);
+
+ /* re-enable the gasket */
+ writel(2, fep->hwp + FEC_MIIGSK_ENR);
}
- } while (int_events);
+#endif
+ }
+ writel(rcntl, fep->hwp + FEC_R_CNTRL);
- return ret;
+ /* And last, enable the transmit and receive processing */
+ writel(2, fep->hwp + FEC_ECNTRL);
+ writel(0, fep->hwp + FEC_R_DES_ACTIVE);
+
+ /* Enable interrupts we wish to service */
+ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
}
+static void
+fec_stop(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ /* We cannot expect a graceful transmit stop without link !!! */
+ if (fep->link) {
+ writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
+ udelay(10);
+ if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
+ printk("fec_stop : Graceful transmit stop did not complete !\n");
+ }
+
+ /* Whack a reset. We should wait for this. */
+ writel(1, fep->hwp + FEC_ECNTRL);
+ udelay(10);
+ writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+}
+
+
+static void
+fec_timeout(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ ndev->stats.tx_errors++;
+
+ fec_restart(ndev, fep->full_duplex);
+ netif_wake_queue(ndev);
+}
static void
-fec_enet_tx(struct net_device *dev)
+fec_enet_tx(struct net_device *ndev)
{
struct fec_enet_private *fep;
struct bufdesc *bdp;
unsigned short status;
struct sk_buff *skb;
- fep = netdev_priv(dev);
+ fep = netdev_priv(ndev);
spin_lock(&fep->hw_lock);
bdp = fep->dirty_tx;
@@ -399,7 +500,8 @@ fec_enet_tx(struct net_device *dev)
if (bdp == fep->cur_tx && fep->tx_full == 0)
break;
- dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
+ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+ FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
bdp->cbd_bufaddr = 0;
skb = fep->tx_skbuff[fep->skb_dirty];
@@ -407,19 +509,19 @@ fec_enet_tx(struct net_device *dev)
if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
BD_ENET_TX_RL | BD_ENET_TX_UN |
BD_ENET_TX_CSL)) {
- dev->stats.tx_errors++;
+ ndev->stats.tx_errors++;
if (status & BD_ENET_TX_HB) /* No heartbeat */
- dev->stats.tx_heartbeat_errors++;
+ ndev->stats.tx_heartbeat_errors++;
if (status & BD_ENET_TX_LC) /* Late collision */
- dev->stats.tx_window_errors++;
+ ndev->stats.tx_window_errors++;
if (status & BD_ENET_TX_RL) /* Retrans limit */
- dev->stats.tx_aborted_errors++;
+ ndev->stats.tx_aborted_errors++;
if (status & BD_ENET_TX_UN) /* Underrun */
- dev->stats.tx_fifo_errors++;
+ ndev->stats.tx_fifo_errors++;
if (status & BD_ENET_TX_CSL) /* Carrier lost */
- dev->stats.tx_carrier_errors++;
+ ndev->stats.tx_carrier_errors++;
} else {
- dev->stats.tx_packets++;
+ ndev->stats.tx_packets++;
}
if (status & BD_ENET_TX_READY)
@@ -429,7 +531,7 @@ fec_enet_tx(struct net_device *dev)
* but we eventually sent the packet OK.
*/
if (status & BD_ENET_TX_DEF)
- dev->stats.collisions++;
+ ndev->stats.collisions++;
/* Free the sk buffer associated with this last transmit */
dev_kfree_skb_any(skb);
@@ -446,8 +548,8 @@ fec_enet_tx(struct net_device *dev)
*/
if (fep->tx_full) {
fep->tx_full = 0;
- if (netif_queue_stopped(dev))
- netif_wake_queue(dev);
+ if (netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
}
}
fep->dirty_tx = bdp;
@@ -461,9 +563,9 @@ fec_enet_tx(struct net_device *dev)
* effectively tossing the packet.
*/
static void
-fec_enet_rx(struct net_device *dev)
+fec_enet_rx(struct net_device *ndev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
struct bufdesc *bdp;
@@ -497,17 +599,17 @@ fec_enet_rx(struct net_device *dev)
/* Check for errors. */
if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
BD_ENET_RX_CR | BD_ENET_RX_OV)) {
- dev->stats.rx_errors++;
+ ndev->stats.rx_errors++;
if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
/* Frame too long or too short. */
- dev->stats.rx_length_errors++;
+ ndev->stats.rx_length_errors++;
}
if (status & BD_ENET_RX_NO) /* Frame alignment */
- dev->stats.rx_frame_errors++;
+ ndev->stats.rx_frame_errors++;
if (status & BD_ENET_RX_CR) /* CRC Error */
- dev->stats.rx_crc_errors++;
+ ndev->stats.rx_crc_errors++;
if (status & BD_ENET_RX_OV) /* FIFO overrun */
- dev->stats.rx_fifo_errors++;
+ ndev->stats.rx_fifo_errors++;
}
/* Report late collisions as a frame error.
@@ -515,19 +617,19 @@ fec_enet_rx(struct net_device *dev)
* have in the buffer. So, just drop this frame on the floor.
*/
if (status & BD_ENET_RX_CL) {
- dev->stats.rx_errors++;
- dev->stats.rx_frame_errors++;
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_frame_errors++;
goto rx_processing_done;
}
/* Process the incoming frame. */
- dev->stats.rx_packets++;
+ ndev->stats.rx_packets++;
pkt_len = bdp->cbd_datlen;
- dev->stats.rx_bytes += pkt_len;
+ ndev->stats.rx_bytes += pkt_len;
data = (__u8*)__va(bdp->cbd_bufaddr);
- dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen,
- DMA_FROM_DEVICE);
+ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+ FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
swap_buffer(data, pkt_len);
@@ -541,18 +643,18 @@ fec_enet_rx(struct net_device *dev)
if (unlikely(!skb)) {
printk("%s: Memory squeeze, dropping packet.\n",
- dev->name);
- dev->stats.rx_dropped++;
+ ndev->name);
+ ndev->stats.rx_dropped++;
} else {
skb_reserve(skb, NET_IP_ALIGN);
skb_put(skb, pkt_len - 4); /* Make room */
skb_copy_to_linear_data(skb, data, pkt_len - 4);
- skb->protocol = eth_type_trans(skb, dev);
+ skb->protocol = eth_type_trans(skb, ndev);
netif_rx(skb);
}
- bdp->cbd_bufaddr = dma_map_single(NULL, data, bdp->cbd_datlen,
- DMA_FROM_DEVICE);
+ bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
+ FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
rx_processing_done:
/* Clear the status flags for this buffer */
status &= ~BD_ENET_RX_STATS;
@@ -577,10 +679,47 @@ rx_processing_done:
spin_unlock(&fep->hw_lock);
}
+static irqreturn_t
+fec_enet_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ uint int_events;
+ irqreturn_t ret = IRQ_NONE;
+
+ do {
+ int_events = readl(fep->hwp + FEC_IEVENT);
+ writel(int_events, fep->hwp + FEC_IEVENT);
+
+ if (int_events & FEC_ENET_RXF) {
+ ret = IRQ_HANDLED;
+ fec_enet_rx(ndev);
+ }
+
+ /* Transmit OK, or non-fatal error. Update the buffer
+ * descriptors. FEC handles all errors, we just discover
+ * them as part of the transmit process.
+ */
+ if (int_events & FEC_ENET_TXF) {
+ ret = IRQ_HANDLED;
+ fec_enet_tx(ndev);
+ }
+
+ if (int_events & FEC_ENET_MII) {
+ ret = IRQ_HANDLED;
+ complete(&fep->mdio_done);
+ }
+ } while (int_events);
+
+ return ret;
+}
+
+
+
/* ------------------------------------------------------------------------- */
-static void __inline__ fec_get_mac(struct net_device *dev)
+static void __inline__ fec_get_mac(struct net_device *ndev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
unsigned char *iap, tmpaddr[ETH_ALEN];
@@ -616,11 +755,11 @@ static void __inline__ fec_get_mac(struct net_device *dev)
iap = &tmpaddr[0];
}
- memcpy(dev->dev_addr, iap, ETH_ALEN);
+ memcpy(ndev->dev_addr, iap, ETH_ALEN);
/* Adjust MAC if using macaddr */
if (iap == macaddr)
- dev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
+ ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
}
/* ------------------------------------------------------------------------- */
@@ -628,9 +767,9 @@ static void __inline__ fec_get_mac(struct net_device *dev)
/*
* Phy section
*/
-static void fec_enet_adjust_link(struct net_device *dev)
+static void fec_enet_adjust_link(struct net_device *ndev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
struct phy_device *phy_dev = fep->phy_dev;
unsigned long flags;
@@ -647,7 +786,7 @@ static void fec_enet_adjust_link(struct net_device *dev)
/* Duplex link change */
if (phy_dev->link) {
if (fep->full_duplex != phy_dev->duplex) {
- fec_restart(dev, phy_dev->duplex);
+ fec_restart(ndev, phy_dev->duplex);
status_change = 1;
}
}
@@ -656,9 +795,9 @@ static void fec_enet_adjust_link(struct net_device *dev)
if (phy_dev->link != fep->link) {
fep->link = phy_dev->link;
if (phy_dev->link)
- fec_restart(dev, phy_dev->duplex);
+ fec_restart(ndev, phy_dev->duplex);
else
- fec_stop(dev);
+ fec_stop(ndev);
status_change = 1;
}
@@ -727,9 +866,9 @@ static int fec_enet_mdio_reset(struct mii_bus *bus)
return 0;
}
-static int fec_enet_mii_probe(struct net_device *dev)
+static int fec_enet_mii_probe(struct net_device *ndev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
struct phy_device *phy_dev = NULL;
char mdio_bus_id[MII_BUS_ID_SIZE];
char phy_name[MII_BUS_ID_SIZE + 3];
@@ -754,16 +893,16 @@ static int fec_enet_mii_probe(struct net_device *dev)
if (phy_id >= PHY_MAX_ADDR) {
printk(KERN_INFO "%s: no PHY, assuming direct connection "
- "to switch\n", dev->name);
+ "to switch\n", ndev->name);
strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE);
phy_id = 0;
}
snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
- phy_dev = phy_connect(dev, phy_name, &fec_enet_adjust_link, 0,
+ phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 0,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(phy_dev)) {
- printk(KERN_ERR "%s: could not attach to PHY\n", dev->name);
+ printk(KERN_ERR "%s: could not attach to PHY\n", ndev->name);
return PTR_ERR(phy_dev);
}
@@ -776,7 +915,7 @@ static int fec_enet_mii_probe(struct net_device *dev)
fep->full_duplex = 0;
printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
- "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name,
+ "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name,
fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
fep->phy_dev->irq);
@@ -786,8 +925,8 @@ static int fec_enet_mii_probe(struct net_device *dev)
static int fec_enet_mii_init(struct platform_device *pdev)
{
static struct mii_bus *fec0_mii_bus;
- struct net_device *dev = platform_get_drvdata(pdev);
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
int err = -ENXIO, i;
@@ -845,8 +984,6 @@ static int fec_enet_mii_init(struct platform_device *pdev)
for (i = 0; i < PHY_MAX_ADDR; i++)
fep->mii_bus->irq[i] = PHY_POLL;
- platform_set_drvdata(dev, fep->mii_bus);
-
if (mdiobus_register(fep->mii_bus))
goto err_out_free_mdio_irq;
@@ -873,10 +1010,10 @@ static void fec_enet_mii_remove(struct fec_enet_private *fep)
mdiobus_free(fep->mii_bus);
}
-static int fec_enet_get_settings(struct net_device *dev,
+static int fec_enet_get_settings(struct net_device *ndev,
struct ethtool_cmd *cmd)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
struct phy_device *phydev = fep->phy_dev;
if (!phydev)
@@ -885,10 +1022,10 @@ static int fec_enet_get_settings(struct net_device *dev,
return phy_ethtool_gset(phydev, cmd);
}
-static int fec_enet_set_settings(struct net_device *dev,
+static int fec_enet_set_settings(struct net_device *ndev,
struct ethtool_cmd *cmd)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
struct phy_device *phydev = fep->phy_dev;
if (!phydev)
@@ -897,14 +1034,14 @@ static int fec_enet_set_settings(struct net_device *dev,
return phy_ethtool_sset(phydev, cmd);
}
-static void fec_enet_get_drvinfo(struct net_device *dev,
+static void fec_enet_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
strcpy(info->driver, fep->pdev->dev.driver->name);
strcpy(info->version, "Revision: 1.0");
- strcpy(info->bus_info, dev_name(&dev->dev));
+ strcpy(info->bus_info, dev_name(&ndev->dev));
}
static struct ethtool_ops fec_enet_ethtool_ops = {
@@ -914,12 +1051,12 @@ static struct ethtool_ops fec_enet_ethtool_ops = {
.get_link = ethtool_op_get_link,
};
-static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
struct phy_device *phydev = fep->phy_dev;
- if (!netif_running(dev))
+ if (!netif_running(ndev))
return -EINVAL;
if (!phydev)
@@ -928,9 +1065,9 @@ static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return phy_mii_ioctl(phydev, rq, cmd);
}
-static void fec_enet_free_buffers(struct net_device *dev)
+static void fec_enet_free_buffers(struct net_device *ndev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
int i;
struct sk_buff *skb;
struct bufdesc *bdp;
@@ -940,7 +1077,7 @@ static void fec_enet_free_buffers(struct net_device *dev)
skb = fep->rx_skbuff[i];
if (bdp->cbd_bufaddr)
- dma_unmap_single(&dev->dev, bdp->cbd_bufaddr,
+ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
if (skb)
dev_kfree_skb(skb);
@@ -952,9 +1089,9 @@ static void fec_enet_free_buffers(struct net_device *dev)
kfree(fep->tx_bounce[i]);
}
-static int fec_enet_alloc_buffers(struct net_device *dev)
+static int fec_enet_alloc_buffers(struct net_device *ndev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
int i;
struct sk_buff *skb;
struct bufdesc *bdp;
@@ -963,12 +1100,12 @@ static int fec_enet_alloc_buffers(struct net_device *dev)
for (i = 0; i < RX_RING_SIZE; i++) {
skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE);
if (!skb) {
- fec_enet_free_buffers(dev);
+ fec_enet_free_buffers(ndev);
return -ENOMEM;
}
fep->rx_skbuff[i] = skb;
- bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data,
+ bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
bdp->cbd_sc = BD_ENET_RX_EMPTY;
bdp++;
@@ -995,45 +1132,47 @@ static int fec_enet_alloc_buffers(struct net_device *dev)
}
static int
-fec_enet_open(struct net_device *dev)
+fec_enet_open(struct net_device *ndev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
int ret;
/* I should reset the ring buffers here, but I don't yet know
* a simple way to do that.
*/
- ret = fec_enet_alloc_buffers(dev);
+ ret = fec_enet_alloc_buffers(ndev);
if (ret)
return ret;
/* Probe and connect to PHY when open the interface */
- ret = fec_enet_mii_probe(dev);
+ ret = fec_enet_mii_probe(ndev);
if (ret) {
- fec_enet_free_buffers(dev);
+ fec_enet_free_buffers(ndev);
return ret;
}
phy_start(fep->phy_dev);
- netif_start_queue(dev);
+ netif_start_queue(ndev);
fep->opened = 1;
return 0;
}
static int
-fec_enet_close(struct net_device *dev)
+fec_enet_close(struct net_device *ndev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
/* Don't know what to do yet. */
fep->opened = 0;
- netif_stop_queue(dev);
- fec_stop(dev);
+ netif_stop_queue(ndev);
+ fec_stop(ndev);
- if (fep->phy_dev)
+ if (fep->phy_dev) {
+ phy_stop(fep->phy_dev);
phy_disconnect(fep->phy_dev);
+ }
- fec_enet_free_buffers(dev);
+ fec_enet_free_buffers(ndev);
return 0;
}
@@ -1051,14 +1190,14 @@ fec_enet_close(struct net_device *dev)
#define HASH_BITS 6 /* #bits in hash */
#define CRC32_POLY 0xEDB88320
-static void set_multicast_list(struct net_device *dev)
+static void set_multicast_list(struct net_device *ndev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
struct netdev_hw_addr *ha;
unsigned int i, bit, data, crc, tmp;
unsigned char hash;
- if (dev->flags & IFF_PROMISC) {
+ if (ndev->flags & IFF_PROMISC) {
tmp = readl(fep->hwp + FEC_R_CNTRL);
tmp |= 0x8;
writel(tmp, fep->hwp + FEC_R_CNTRL);
@@ -1069,7 +1208,7 @@ static void set_multicast_list(struct net_device *dev)
tmp &= ~0x8;
writel(tmp, fep->hwp + FEC_R_CNTRL);
- if (dev->flags & IFF_ALLMULTI) {
+ if (ndev->flags & IFF_ALLMULTI) {
/* Catch all multicast addresses, so set the
* filter to all 1's
*/
@@ -1084,7 +1223,7 @@ static void set_multicast_list(struct net_device *dev)
writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
- netdev_for_each_mc_addr(ha, dev) {
+ netdev_for_each_mc_addr(ha, ndev) {
/* Only support group multicast for now */
if (!(ha->addr[0] & 1))
continue;
@@ -1092,7 +1231,7 @@ static void set_multicast_list(struct net_device *dev)
/* calculate crc32 value of mac address */
crc = 0xffffffff;
- for (i = 0; i < dev->addr_len; i++) {
+ for (i = 0; i < ndev->addr_len; i++) {
data = ha->addr[i];
for (bit = 0; bit < 8; bit++, data >>= 1) {
crc = (crc >> 1) ^
@@ -1119,20 +1258,20 @@ static void set_multicast_list(struct net_device *dev)
/* Set a MAC change in hardware. */
static int
-fec_set_mac_address(struct net_device *dev, void *p)
+fec_set_mac_address(struct net_device *ndev, void *p)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
- writel(dev->dev_addr[3] | (dev->dev_addr[2] << 8) |
- (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24),
+ writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
+ (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
fep->hwp + FEC_ADDR_LOW);
- writel((dev->dev_addr[5] << 16) | (dev->dev_addr[4] << 24),
+ writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
fep->hwp + FEC_ADDR_HIGH);
return 0;
}
@@ -1146,16 +1285,16 @@ static const struct net_device_ops fec_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = fec_timeout,
.ndo_set_mac_address = fec_set_mac_address,
- .ndo_do_ioctl = fec_enet_ioctl,
+ .ndo_do_ioctl = fec_enet_ioctl,
};
/*
* XXX: We need to clean up on failure exits here.
*
*/
-static int fec_enet_init(struct net_device *dev)
+static int fec_enet_init(struct net_device *ndev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
struct bufdesc *cbd_base;
struct bufdesc *bdp;
int i;
@@ -1170,20 +1309,19 @@ static int fec_enet_init(struct net_device *dev)
spin_lock_init(&fep->hw_lock);
- fep->hwp = (void __iomem *)dev->base_addr;
- fep->netdev = dev;
+ fep->netdev = ndev;
/* Get the Ethernet address */
- fec_get_mac(dev);
+ fec_get_mac(ndev);
/* Set receive and transmit descriptor base. */
fep->rx_bd_base = cbd_base;
fep->tx_bd_base = cbd_base + RX_RING_SIZE;
/* The FEC Ethernet specific entries in the device structure */
- dev->watchdog_timeo = TX_TIMEOUT;
- dev->netdev_ops = &fec_netdev_ops;
- dev->ethtool_ops = &fec_enet_ethtool_ops;
+ ndev->watchdog_timeo = TX_TIMEOUT;
+ ndev->netdev_ops = &fec_netdev_ops;
+ ndev->ethtool_ops = &fec_enet_ethtool_ops;
/* Initialize the receive buffer descriptors. */
bdp = fep->rx_bd_base;
@@ -1212,152 +1350,11 @@ static int fec_enet_init(struct net_device *dev)
bdp--;
bdp->cbd_sc |= BD_SC_WRAP;
- fec_restart(dev, 0);
+ fec_restart(ndev, 0);
return 0;
}
-/* This function is called to start or restart the FEC during a link
- * change. This only happens when switching between half and full
- * duplex.
- */
-static void
-fec_restart(struct net_device *dev, int duplex)
-{
- struct fec_enet_private *fep = netdev_priv(dev);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
- int i;
- u32 val, temp_mac[2];
-
- /* Whack a reset. We should wait for this. */
- writel(1, fep->hwp + FEC_ECNTRL);
- udelay(10);
-
- /*
- * enet-mac reset will reset mac address registers too,
- * so need to reconfigure it.
- */
- if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
- memcpy(&temp_mac, dev->dev_addr, ETH_ALEN);
- writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
- writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
- }
-
- /* Clear any outstanding interrupt. */
- writel(0xffc00000, fep->hwp + FEC_IEVENT);
-
- /* Reset all multicast. */
- writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
- writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
-#ifndef CONFIG_M5272
- writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
- writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
-#endif
-
- /* Set maximum receive buffer size. */
- writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
-
- /* Set receive and transmit descriptor base. */
- writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
- writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
- fep->hwp + FEC_X_DES_START);
-
- fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
- fep->cur_rx = fep->rx_bd_base;
-
- /* Reset SKB transmit buffers. */
- fep->skb_cur = fep->skb_dirty = 0;
- for (i = 0; i <= TX_RING_MOD_MASK; i++) {
- if (fep->tx_skbuff[i]) {
- dev_kfree_skb_any(fep->tx_skbuff[i]);
- fep->tx_skbuff[i] = NULL;
- }
- }
-
- /* Enable MII mode */
- if (duplex) {
- /* MII enable / FD enable */
- writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL);
- writel(0x04, fep->hwp + FEC_X_CNTRL);
- } else {
- /* MII enable / No Rcv on Xmit */
- writel(OPT_FRAME_SIZE | 0x06, fep->hwp + FEC_R_CNTRL);
- writel(0x0, fep->hwp + FEC_X_CNTRL);
- }
- fep->full_duplex = duplex;
-
- /* Set MII speed */
- writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
-
- /*
- * The phy interface and speed need to get configured
- * differently on enet-mac.
- */
- if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
- val = readl(fep->hwp + FEC_R_CNTRL);
-
- /* MII or RMII */
- if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
- val |= (1 << 8);
- else
- val &= ~(1 << 8);
-
- /* 10M or 100M */
- if (fep->phy_dev && fep->phy_dev->speed == SPEED_100)
- val &= ~(1 << 9);
- else
- val |= (1 << 9);
-
- writel(val, fep->hwp + FEC_R_CNTRL);
- } else {
-#ifdef FEC_MIIGSK_ENR
- if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
- /* disable the gasket and wait */
- writel(0, fep->hwp + FEC_MIIGSK_ENR);
- while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
- udelay(1);
-
- /*
- * configure the gasket:
- * RMII, 50 MHz, no loopback, no echo
- */
- writel(1, fep->hwp + FEC_MIIGSK_CFGR);
-
- /* re-enable the gasket */
- writel(2, fep->hwp + FEC_MIIGSK_ENR);
- }
-#endif
- }
-
- /* And last, enable the transmit and receive processing */
- writel(2, fep->hwp + FEC_ECNTRL);
- writel(0, fep->hwp + FEC_R_DES_ACTIVE);
-
- /* Enable interrupts we wish to service */
- writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
-}
-
-static void
-fec_stop(struct net_device *dev)
-{
- struct fec_enet_private *fep = netdev_priv(dev);
-
- /* We cannot expect a graceful transmit stop without link !!! */
- if (fep->link) {
- writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
- udelay(10);
- if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
- printk("fec_stop : Graceful transmit stop did not complete !\n");
- }
-
- /* Whack a reset. We should wait for this. */
- writel(1, fep->hwp + FEC_ECNTRL);
- udelay(10);
- writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
- writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
-}
-
static int __devinit
fec_probe(struct platform_device *pdev)
{
@@ -1377,19 +1374,20 @@ fec_probe(struct platform_device *pdev)
/* Init network device */
ndev = alloc_etherdev(sizeof(struct fec_enet_private));
- if (!ndev)
- return -ENOMEM;
+ if (!ndev) {
+ ret = -ENOMEM;
+ goto failed_alloc_etherdev;
+ }
SET_NETDEV_DEV(ndev, &pdev->dev);
/* setup board info structure */
fep = netdev_priv(ndev);
- memset(fep, 0, sizeof(*fep));
- ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r));
+ fep->hwp = ioremap(r->start, resource_size(r));
fep->pdev = pdev;
- if (!ndev->base_addr) {
+ if (!fep->hwp) {
ret = -ENOMEM;
goto failed_ioremap;
}
@@ -1407,10 +1405,9 @@ fec_probe(struct platform_device *pdev)
break;
ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
if (ret) {
- while (i >= 0) {
+ while (--i >= 0) {
irq = platform_get_irq(pdev, i);
free_irq(irq, ndev);
- i--;
}
goto failed_irq;
}
@@ -1453,9 +1450,11 @@ failed_clk:
free_irq(irq, ndev);
}
failed_irq:
- iounmap((void __iomem *)ndev->base_addr);
+ iounmap(fep->hwp);
failed_ioremap:
free_netdev(ndev);
+failed_alloc_etherdev:
+ release_mem_region(r->start, resource_size(r));
return ret;
}
@@ -1465,16 +1464,22 @@ fec_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
-
- platform_set_drvdata(pdev, NULL);
+ struct resource *r;
fec_stop(ndev);
fec_enet_mii_remove(fep);
clk_disable(fep->clk);
clk_put(fep->clk);
- iounmap((void __iomem *)ndev->base_addr);
+ iounmap(fep->hwp);
unregister_netdev(ndev);
free_netdev(ndev);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ BUG_ON(!r);
+ release_mem_region(r->start, resource_size(r));
+
+ platform_set_drvdata(pdev, NULL);
+
return 0;
}
@@ -1483,16 +1488,14 @@ static int
fec_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
- struct fec_enet_private *fep;
+ struct fec_enet_private *fep = netdev_priv(ndev);
- if (ndev) {
- fep = netdev_priv(ndev);
- if (netif_running(ndev)) {
- fec_stop(ndev);
- netif_device_detach(ndev);
- }
- clk_disable(fep->clk);
+ if (netif_running(ndev)) {
+ fec_stop(ndev);
+ netif_device_detach(ndev);
}
+ clk_disable(fep->clk);
+
return 0;
}
@@ -1500,16 +1503,14 @@ static int
fec_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
- struct fec_enet_private *fep;
+ struct fec_enet_private *fep = netdev_priv(ndev);
- if (ndev) {
- fep = netdev_priv(ndev);
- clk_enable(fep->clk);
- if (netif_running(ndev)) {
- fec_restart(ndev, fep->full_duplex);
- netif_device_attach(ndev);
- }
+ clk_enable(fep->clk);
+ if (netif_running(ndev)) {
+ fec_restart(ndev, fep->full_duplex);
+ netif_device_attach(ndev);
}
+
return 0;
}
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index 50c1213f61fe..9f81b1ac130e 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -840,8 +840,7 @@ static const struct net_device_ops mpc52xx_fec_netdev_ops = {
/* OF Driver */
/* ======================================================================== */
-static int __devinit
-mpc52xx_fec_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit mpc52xx_fec_probe(struct platform_device *op)
{
int rv;
struct net_device *ndev;
@@ -1049,7 +1048,7 @@ static struct of_device_id mpc52xx_fec_match[] = {
MODULE_DEVICE_TABLE(of, mpc52xx_fec_match);
-static struct of_platform_driver mpc52xx_fec_driver = {
+static struct platform_driver mpc52xx_fec_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
@@ -1073,21 +1072,21 @@ mpc52xx_fec_init(void)
{
#ifdef CONFIG_FEC_MPC52xx_MDIO
int ret;
- ret = of_register_platform_driver(&mpc52xx_fec_mdio_driver);
+ ret = platform_driver_register(&mpc52xx_fec_mdio_driver);
if (ret) {
printk(KERN_ERR DRIVER_NAME ": failed to register mdio driver\n");
return ret;
}
#endif
- return of_register_platform_driver(&mpc52xx_fec_driver);
+ return platform_driver_register(&mpc52xx_fec_driver);
}
static void __exit
mpc52xx_fec_exit(void)
{
- of_unregister_platform_driver(&mpc52xx_fec_driver);
+ platform_driver_unregister(&mpc52xx_fec_driver);
#ifdef CONFIG_FEC_MPC52xx_MDIO
- of_unregister_platform_driver(&mpc52xx_fec_mdio_driver);
+ platform_driver_unregister(&mpc52xx_fec_mdio_driver);
#endif
}
diff --git a/drivers/net/fec_mpc52xx.h b/drivers/net/fec_mpc52xx.h
index a227a525bdbb..41d2dffde55b 100644
--- a/drivers/net/fec_mpc52xx.h
+++ b/drivers/net/fec_mpc52xx.h
@@ -289,6 +289,6 @@ struct mpc52xx_fec {
#define FEC_XMIT_FSM_ENABLE_CRC 0x01000000
-extern struct of_platform_driver mpc52xx_fec_mdio_driver;
+extern struct platform_driver mpc52xx_fec_mdio_driver;
#endif /* __DRIVERS_NET_MPC52XX_FEC_H__ */
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c
index 0b4cb6f15984..360a578c2bb7 100644
--- a/drivers/net/fec_mpc52xx_phy.c
+++ b/drivers/net/fec_mpc52xx_phy.c
@@ -61,8 +61,7 @@ static int mpc52xx_fec_mdio_write(struct mii_bus *bus, int phy_id, int reg,
data | FEC_MII_WRITE_FRAME);
}
-static int mpc52xx_fec_mdio_probe(struct platform_device *of,
- const struct of_device_id *match)
+static int mpc52xx_fec_mdio_probe(struct platform_device *of)
{
struct device *dev = &of->dev;
struct device_node *np = of->dev.of_node;
@@ -145,7 +144,7 @@ static struct of_device_id mpc52xx_fec_mdio_match[] = {
};
MODULE_DEVICE_TABLE(of, mpc52xx_fec_mdio_match);
-struct of_platform_driver mpc52xx_fec_mdio_driver = {
+struct platform_driver mpc52xx_fec_mdio_driver = {
.driver = {
.name = "mpc5200b-fec-phy",
.owner = THIS_MODULE,
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 7a1f3d0ffa78..24cb953900dd 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -998,8 +998,7 @@ static const struct net_device_ops fs_enet_netdev_ops = {
#endif
};
-static int __devinit fs_enet_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit fs_enet_probe(struct platform_device *ofdev)
{
struct net_device *ndev;
struct fs_enet_private *fep;
@@ -1008,11 +1007,14 @@ static int __devinit fs_enet_probe(struct platform_device *ofdev,
const u8 *mac_addr;
int privsize, len, ret = -ENODEV;
+ if (!ofdev->dev.of_match)
+ return -EINVAL;
+
fpi = kzalloc(sizeof(*fpi), GFP_KERNEL);
if (!fpi)
return -ENOMEM;
- if (!IS_FEC(match)) {
+ if (!IS_FEC(ofdev->dev.of_match)) {
data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len);
if (!data || len != 4)
goto out_free_fpi;
@@ -1047,7 +1049,7 @@ static int __devinit fs_enet_probe(struct platform_device *ofdev,
fep->dev = &ofdev->dev;
fep->ndev = ndev;
fep->fpi = fpi;
- fep->ops = match->data;
+ fep->ops = ofdev->dev.of_match->data;
ret = fep->ops->setup_data(ndev);
if (ret)
@@ -1156,7 +1158,7 @@ static struct of_device_id fs_enet_match[] = {
};
MODULE_DEVICE_TABLE(of, fs_enet_match);
-static struct of_platform_driver fs_enet_driver = {
+static struct platform_driver fs_enet_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "fs_enet",
@@ -1168,12 +1170,12 @@ static struct of_platform_driver fs_enet_driver = {
static int __init fs_init(void)
{
- return of_register_platform_driver(&fs_enet_driver);
+ return platform_driver_register(&fs_enet_driver);
}
static void __exit fs_cleanup(void)
{
- of_unregister_platform_driver(&fs_enet_driver);
+ platform_driver_unregister(&fs_enet_driver);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c
index 3cda2b515471..ad2975440719 100644
--- a/drivers/net/fs_enet/mii-bitbang.c
+++ b/drivers/net/fs_enet/mii-bitbang.c
@@ -150,8 +150,7 @@ static int __devinit fs_mii_bitbang_init(struct mii_bus *bus,
return 0;
}
-static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev)
{
struct mii_bus *new_bus;
struct bb_info *bitbang;
@@ -223,7 +222,7 @@ static struct of_device_id fs_enet_mdio_bb_match[] = {
};
MODULE_DEVICE_TABLE(of, fs_enet_mdio_bb_match);
-static struct of_platform_driver fs_enet_bb_mdio_driver = {
+static struct platform_driver fs_enet_bb_mdio_driver = {
.driver = {
.name = "fsl-bb-mdio",
.owner = THIS_MODULE,
@@ -235,12 +234,12 @@ static struct of_platform_driver fs_enet_bb_mdio_driver = {
static int fs_enet_mdio_bb_init(void)
{
- return of_register_platform_driver(&fs_enet_bb_mdio_driver);
+ return platform_driver_register(&fs_enet_bb_mdio_driver);
}
static void fs_enet_mdio_bb_exit(void)
{
- of_unregister_platform_driver(&fs_enet_bb_mdio_driver);
+ platform_driver_unregister(&fs_enet_bb_mdio_driver);
}
module_init(fs_enet_mdio_bb_init);
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
index dbb9c48623df..7e840d373ab3 100644
--- a/drivers/net/fs_enet/mii-fec.c
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -101,15 +101,18 @@ static int fs_enet_fec_mii_reset(struct mii_bus *bus)
return 0;
}
-static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev)
{
struct resource res;
struct mii_bus *new_bus;
struct fec_info *fec;
- int (*get_bus_freq)(struct device_node *) = match->data;
+ int (*get_bus_freq)(struct device_node *);
int ret = -ENOMEM, clock, speed;
+ if (!ofdev->dev.of_match)
+ return -EINVAL;
+ get_bus_freq = ofdev->dev.of_match->data;
+
new_bus = mdiobus_alloc();
if (!new_bus)
goto out;
@@ -221,7 +224,7 @@ static struct of_device_id fs_enet_mdio_fec_match[] = {
};
MODULE_DEVICE_TABLE(of, fs_enet_mdio_fec_match);
-static struct of_platform_driver fs_enet_fec_mdio_driver = {
+static struct platform_driver fs_enet_fec_mdio_driver = {
.driver = {
.name = "fsl-fec-mdio",
.owner = THIS_MODULE,
@@ -233,12 +236,12 @@ static struct of_platform_driver fs_enet_fec_mdio_driver = {
static int fs_enet_mdio_fec_init(void)
{
- return of_register_platform_driver(&fs_enet_fec_mdio_driver);
+ return platform_driver_register(&fs_enet_fec_mdio_driver);
}
static void fs_enet_mdio_fec_exit(void)
{
- of_unregister_platform_driver(&fs_enet_fec_mdio_driver);
+ platform_driver_unregister(&fs_enet_fec_mdio_driver);
}
module_init(fs_enet_mdio_fec_init);
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index 8d3a2ccbc953..52f4e8ad48e7 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -265,8 +265,7 @@ static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
#endif
-static int fsl_pq_mdio_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int fsl_pq_mdio_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct device_node *tbi;
@@ -471,7 +470,7 @@ static struct of_device_id fsl_pq_mdio_match[] = {
};
MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
-static struct of_platform_driver fsl_pq_mdio_driver = {
+static struct platform_driver fsl_pq_mdio_driver = {
.driver = {
.name = "fsl-pq_mdio",
.owner = THIS_MODULE,
@@ -483,13 +482,13 @@ static struct of_platform_driver fsl_pq_mdio_driver = {
int __init fsl_pq_mdio_init(void)
{
- return of_register_platform_driver(&fsl_pq_mdio_driver);
+ return platform_driver_register(&fsl_pq_mdio_driver);
}
module_init(fsl_pq_mdio_init);
void fsl_pq_mdio_exit(void)
{
- of_unregister_platform_driver(&fsl_pq_mdio_driver);
+ platform_driver_unregister(&fsl_pq_mdio_driver);
}
module_exit(fsl_pq_mdio_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 5ed8f9f9419f..ccb231c4d933 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -123,8 +123,7 @@ static irqreturn_t gfar_interrupt(int irq, void *dev_id);
static void adjust_link(struct net_device *dev);
static void init_registers(struct net_device *dev);
static int init_phy(struct net_device *dev);
-static int gfar_probe(struct platform_device *ofdev,
- const struct of_device_id *match);
+static int gfar_probe(struct platform_device *ofdev);
static int gfar_remove(struct platform_device *ofdev);
static void free_skb_resources(struct gfar_private *priv);
static void gfar_set_multi(struct net_device *dev);
@@ -957,8 +956,7 @@ static void gfar_detect_errata(struct gfar_private *priv)
/* Set up the ethernet device structure, private data,
* and anything else we need before we start */
-static int gfar_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int gfar_probe(struct platform_device *ofdev)
{
u32 tempval;
struct net_device *dev = NULL;
@@ -3256,7 +3254,7 @@ static struct of_device_id gfar_match[] =
MODULE_DEVICE_TABLE(of, gfar_match);
/* Structure for a device driver */
-static struct of_platform_driver gfar_driver = {
+static struct platform_driver gfar_driver = {
.driver = {
.name = "fsl-gianfar",
.owner = THIS_MODULE,
@@ -3269,12 +3267,12 @@ static struct of_platform_driver gfar_driver = {
static int __init gfar_init(void)
{
- return of_register_platform_driver(&gfar_driver);
+ return platform_driver_register(&gfar_driver);
}
static void __exit gfar_exit(void)
{
- of_unregister_platform_driver(&gfar_driver);
+ platform_driver_unregister(&gfar_driver);
}
module_init(gfar_init);
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index fdb0333f5cb6..396ff7d785d1 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -1411,7 +1411,7 @@ error:
}
/* Initialize the GRETH MAC */
-static int __devinit greth_of_probe(struct platform_device *ofdev, const struct of_device_id *match)
+static int __devinit greth_of_probe(struct platform_device *ofdev)
{
struct net_device *dev;
struct greth_private *greth;
@@ -1646,7 +1646,7 @@ static struct of_device_id greth_of_match[] = {
MODULE_DEVICE_TABLE(of, greth_of_match);
-static struct of_platform_driver greth_of_driver = {
+static struct platform_driver greth_of_driver = {
.driver = {
.name = "grlib-greth",
.owner = THIS_MODULE,
@@ -1658,12 +1658,12 @@ static struct of_platform_driver greth_of_driver = {
static int __init greth_init(void)
{
- return of_register_platform_driver(&greth_of_driver);
+ return platform_driver_register(&greth_of_driver);
}
static void __exit greth_cleanup(void)
{
- of_unregister_platform_driver(&greth_of_driver);
+ platform_driver_unregister(&greth_of_driver);
}
module_init(greth_init);
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index ac1d323c5eb5..8931168d3e74 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -400,13 +400,14 @@ static void *bpq_seq_start(struct seq_file *seq, loff_t *pos)
static void *bpq_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct list_head *p;
+ struct bpqdev *bpqdev = v;
++*pos;
if (v == SEQ_START_TOKEN)
- p = rcu_dereference(bpq_devices.next);
+ p = rcu_dereference(list_next_rcu(&bpq_devices));
else
- p = rcu_dereference(((struct bpqdev *)v)->bpq_list.next);
+ p = rcu_dereference(list_next_rcu(&bpqdev->bpq_list));
return (p == &bpq_devices) ? NULL
: list_entry(p, struct bpqdev, bpq_list);
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 6d9275c52e05..3bb990b6651a 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2719,8 +2719,7 @@ static const struct net_device_ops emac_gige_netdev_ops = {
.ndo_change_mtu = emac_change_mtu,
};
-static int __devinit emac_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit emac_probe(struct platform_device *ofdev)
{
struct net_device *ndev;
struct emac_instance *dev;
@@ -2994,7 +2993,7 @@ static struct of_device_id emac_match[] =
};
MODULE_DEVICE_TABLE(of, emac_match);
-static struct of_platform_driver emac_driver = {
+static struct platform_driver emac_driver = {
.driver = {
.name = "emac",
.owner = THIS_MODULE,
@@ -3069,7 +3068,7 @@ static int __init emac_init(void)
rc = tah_init();
if (rc)
goto err_rgmii;
- rc = of_register_platform_driver(&emac_driver);
+ rc = platform_driver_register(&emac_driver);
if (rc)
goto err_tah;
@@ -3091,7 +3090,7 @@ static void __exit emac_exit(void)
{
int i;
- of_unregister_platform_driver(&emac_driver);
+ platform_driver_unregister(&emac_driver);
tah_exit();
rgmii_exit();
diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c
index d5717e2123e1..d268f404b7b0 100644
--- a/drivers/net/ibm_newemac/mal.c
+++ b/drivers/net/ibm_newemac/mal.c
@@ -517,8 +517,7 @@ void *mal_dump_regs(struct mal_instance *mal, void *buf)
return regs + 1;
}
-static int __devinit mal_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit mal_probe(struct platform_device *ofdev)
{
struct mal_instance *mal;
int err = 0, i, bd_size;
@@ -789,7 +788,7 @@ static struct of_device_id mal_platform_match[] =
{},
};
-static struct of_platform_driver mal_of_driver = {
+static struct platform_driver mal_of_driver = {
.driver = {
.name = "mcmal",
.owner = THIS_MODULE,
@@ -801,10 +800,10 @@ static struct of_platform_driver mal_of_driver = {
int __init mal_init(void)
{
- return of_register_platform_driver(&mal_of_driver);
+ return platform_driver_register(&mal_of_driver);
}
void mal_exit(void)
{
- of_unregister_platform_driver(&mal_of_driver);
+ platform_driver_unregister(&mal_of_driver);
}
diff --git a/drivers/net/ibm_newemac/rgmii.c b/drivers/net/ibm_newemac/rgmii.c
index dd61798897ac..4fa53f3def64 100644
--- a/drivers/net/ibm_newemac/rgmii.c
+++ b/drivers/net/ibm_newemac/rgmii.c
@@ -228,8 +228,7 @@ void *rgmii_dump_regs(struct platform_device *ofdev, void *buf)
}
-static int __devinit rgmii_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit rgmii_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct rgmii_instance *dev;
@@ -318,7 +317,7 @@ static struct of_device_id rgmii_match[] =
{},
};
-static struct of_platform_driver rgmii_driver = {
+static struct platform_driver rgmii_driver = {
.driver = {
.name = "emac-rgmii",
.owner = THIS_MODULE,
@@ -330,10 +329,10 @@ static struct of_platform_driver rgmii_driver = {
int __init rgmii_init(void)
{
- return of_register_platform_driver(&rgmii_driver);
+ return platform_driver_register(&rgmii_driver);
}
void rgmii_exit(void)
{
- of_unregister_platform_driver(&rgmii_driver);
+ platform_driver_unregister(&rgmii_driver);
}
diff --git a/drivers/net/ibm_newemac/tah.c b/drivers/net/ibm_newemac/tah.c
index 299aa49490c0..8ead6a96abaa 100644
--- a/drivers/net/ibm_newemac/tah.c
+++ b/drivers/net/ibm_newemac/tah.c
@@ -87,8 +87,7 @@ void *tah_dump_regs(struct platform_device *ofdev, void *buf)
return regs + 1;
}
-static int __devinit tah_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit tah_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct tah_instance *dev;
@@ -165,7 +164,7 @@ static struct of_device_id tah_match[] =
{},
};
-static struct of_platform_driver tah_driver = {
+static struct platform_driver tah_driver = {
.driver = {
.name = "emac-tah",
.owner = THIS_MODULE,
@@ -177,10 +176,10 @@ static struct of_platform_driver tah_driver = {
int __init tah_init(void)
{
- return of_register_platform_driver(&tah_driver);
+ return platform_driver_register(&tah_driver);
}
void tah_exit(void)
{
- of_unregister_platform_driver(&tah_driver);
+ platform_driver_unregister(&tah_driver);
}
diff --git a/drivers/net/ibm_newemac/zmii.c b/drivers/net/ibm_newemac/zmii.c
index 34ed6ee8ca8a..97449e786d61 100644
--- a/drivers/net/ibm_newemac/zmii.c
+++ b/drivers/net/ibm_newemac/zmii.c
@@ -231,8 +231,7 @@ void *zmii_dump_regs(struct platform_device *ofdev, void *buf)
return regs + 1;
}
-static int __devinit zmii_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit zmii_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct zmii_instance *dev;
@@ -312,7 +311,7 @@ static struct of_device_id zmii_match[] =
{},
};
-static struct of_platform_driver zmii_driver = {
+static struct platform_driver zmii_driver = {
.driver = {
.name = "emac-zmii",
.owner = THIS_MODULE,
@@ -324,10 +323,10 @@ static struct of_platform_driver zmii_driver = {
int __init zmii_init(void)
{
- return of_register_platform_driver(&zmii_driver);
+ return platform_driver_register(&zmii_driver);
}
void zmii_exit(void)
{
- of_unregister_platform_driver(&zmii_driver);
+ platform_driver_unregister(&zmii_driver);
}
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index 0a2368fa6bc6..65c1833244f7 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -129,6 +129,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
break;
case E1000_DEV_ID_82580_COPPER:
case E1000_DEV_ID_82580_FIBER:
+ case E1000_DEV_ID_82580_QUAD_FIBER:
case E1000_DEV_ID_82580_SERDES:
case E1000_DEV_ID_82580_SGMII:
case E1000_DEV_ID_82580_COPPER_DUAL:
@@ -237,9 +238,15 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
size = 14;
nvm->word_size = 1 << size;
- /* if 82576 then initialize mailbox parameters */
- if (mac->type == e1000_82576)
+ /* if part supports SR-IOV then initialize mailbox parameters */
+ switch (mac->type) {
+ case e1000_82576:
+ case e1000_i350:
igb_init_mbx_params_pf(hw);
+ break;
+ default:
+ break;
+ }
/* setup PHY parameters */
if (phy->media_type != e1000_media_type_copper) {
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index 6319ed902bc0..ff46c91520af 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -770,4 +770,11 @@
#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based
on DMA coal */
+/* Tx Rate-Scheduler Config fields */
+#define E1000_RTTBCNRC_RS_ENA 0x80000000
+#define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF
+#define E1000_RTTBCNRC_RF_INT_SHIFT 14
+#define E1000_RTTBCNRC_RF_INT_MASK \
+ (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT)
+
#endif
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index e2638afb8cdc..281324e85980 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -54,6 +54,7 @@ struct e1000_hw;
#define E1000_DEV_ID_82580_SERDES 0x1510
#define E1000_DEV_ID_82580_SGMII 0x1511
#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
+#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527
#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438
#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A
#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C
diff --git a/drivers/net/igb/e1000_mbx.c b/drivers/net/igb/e1000_mbx.c
index c474cdb70047..78d48c7fa859 100644
--- a/drivers/net/igb/e1000_mbx.c
+++ b/drivers/net/igb/e1000_mbx.c
@@ -422,26 +422,24 @@ s32 igb_init_mbx_params_pf(struct e1000_hw *hw)
{
struct e1000_mbx_info *mbx = &hw->mbx;
- if (hw->mac.type == e1000_82576) {
- mbx->timeout = 0;
- mbx->usec_delay = 0;
-
- mbx->size = E1000_VFMAILBOX_SIZE;
-
- mbx->ops.read = igb_read_mbx_pf;
- mbx->ops.write = igb_write_mbx_pf;
- mbx->ops.read_posted = igb_read_posted_mbx;
- mbx->ops.write_posted = igb_write_posted_mbx;
- mbx->ops.check_for_msg = igb_check_for_msg_pf;
- mbx->ops.check_for_ack = igb_check_for_ack_pf;
- mbx->ops.check_for_rst = igb_check_for_rst_pf;
-
- mbx->stats.msgs_tx = 0;
- mbx->stats.msgs_rx = 0;
- mbx->stats.reqs = 0;
- mbx->stats.acks = 0;
- mbx->stats.rsts = 0;
- }
+ mbx->timeout = 0;
+ mbx->usec_delay = 0;
+
+ mbx->size = E1000_VFMAILBOX_SIZE;
+
+ mbx->ops.read = igb_read_mbx_pf;
+ mbx->ops.write = igb_write_mbx_pf;
+ mbx->ops.read_posted = igb_read_posted_mbx;
+ mbx->ops.write_posted = igb_write_posted_mbx;
+ mbx->ops.check_for_msg = igb_check_for_msg_pf;
+ mbx->ops.check_for_ack = igb_check_for_ack_pf;
+ mbx->ops.check_for_rst = igb_check_for_rst_pf;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
return 0;
}
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
index 8ac83c5190d5..3a6f8471aea2 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/igb/e1000_regs.h
@@ -106,6 +106,10 @@
#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40))
+/* TX Rate Limit Registers */
+#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */
+#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */
+
/* Split and Replication RX Control - RW */
#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
/*
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 92a4ef09e55c..bbc5ebfe254a 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -77,6 +77,7 @@ struct vf_data_storage {
unsigned long last_nack;
u16 pf_vlan; /* When set, guest VLAN config not allowed. */
u16 pf_qos;
+ u16 tx_rate;
};
#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
@@ -323,6 +324,7 @@ struct igb_adapter {
u16 rx_ring_count;
unsigned int vfs_allocated_count;
struct vf_data_storage *vf_data;
+ int vf_rate_link_speed;
u32 rss_queues;
u32 wvbr;
};
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 58c665b7513d..579dbba5f9e4 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -50,12 +50,12 @@
#endif
#include "igb.h"
-#define DRV_VERSION "2.1.0-k2"
+#define DRV_VERSION "2.4.13-k2"
char igb_driver_name[] = "igb";
char igb_driver_version[] = DRV_VERSION;
static const char igb_driver_string[] =
"Intel(R) Gigabit Ethernet Network Driver";
-static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
+static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation.";
static const struct e1000_info *igb_info_tbl[] = {
[board_82575] = &e1000_82575_info,
@@ -68,6 +68,7 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
@@ -149,6 +150,7 @@ static int igb_ndo_set_vf_vlan(struct net_device *netdev,
static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
struct ifla_vf_info *ivi);
+static void igb_check_vf_rate_limit(struct igb_adapter *);
#ifdef CONFIG_PM
static int igb_suspend(struct pci_dev *, pm_message_t);
@@ -2286,9 +2288,14 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
spin_lock_init(&adapter->stats64_lock);
#ifdef CONFIG_PCI_IOV
- if (hw->mac.type == e1000_82576)
+ switch (hw->mac.type) {
+ case e1000_82576:
+ case e1000_i350:
adapter->vfs_allocated_count = (max_vfs > 7) ? 7 : max_vfs;
-
+ break;
+ default:
+ break;
+ }
#endif /* CONFIG_PCI_IOV */
adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
@@ -3505,6 +3512,7 @@ static void igb_watchdog_task(struct work_struct *work)
netif_carrier_on(netdev);
igb_ping_all_vfs(adapter);
+ igb_check_vf_rate_limit(adapter);
/* link state has changed, schedule phy info update */
if (!test_bit(__IGB_DOWN, &adapter->state))
@@ -6593,9 +6601,91 @@ static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
return igb_set_vf_mac(adapter, vf, mac);
}
+static int igb_link_mbps(int internal_link_speed)
+{
+ switch (internal_link_speed) {
+ case SPEED_100:
+ return 100;
+ case SPEED_1000:
+ return 1000;
+ default:
+ return 0;
+ }
+}
+
+static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
+ int link_speed)
+{
+ int rf_dec, rf_int;
+ u32 bcnrc_val;
+
+ if (tx_rate != 0) {
+ /* Calculate the rate factor values to set */
+ rf_int = link_speed / tx_rate;
+ rf_dec = (link_speed - (rf_int * tx_rate));
+ rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
+
+ bcnrc_val = E1000_RTTBCNRC_RS_ENA;
+ bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
+ E1000_RTTBCNRC_RF_INT_MASK);
+ bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
+ } else {
+ bcnrc_val = 0;
+ }
+
+ wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
+ wr32(E1000_RTTBCNRC, bcnrc_val);
+}
+
+static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
+{
+ int actual_link_speed, i;
+ bool reset_rate = false;
+
+ /* VF TX rate limit was not set or not supported */
+ if ((adapter->vf_rate_link_speed == 0) ||
+ (adapter->hw.mac.type != e1000_82576))
+ return;
+
+ actual_link_speed = igb_link_mbps(adapter->link_speed);
+ if (actual_link_speed != adapter->vf_rate_link_speed) {
+ reset_rate = true;
+ adapter->vf_rate_link_speed = 0;
+ dev_info(&adapter->pdev->dev,
+ "Link speed has been changed. VF Transmit "
+ "rate is disabled\n");
+ }
+
+ for (i = 0; i < adapter->vfs_allocated_count; i++) {
+ if (reset_rate)
+ adapter->vf_data[i].tx_rate = 0;
+
+ igb_set_vf_rate_limit(&adapter->hw, i,
+ adapter->vf_data[i].tx_rate,
+ actual_link_speed);
+ }
+}
+
static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
{
- return -EOPNOTSUPP;
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ int actual_link_speed;
+
+ if (hw->mac.type != e1000_82576)
+ return -EOPNOTSUPP;
+
+ actual_link_speed = igb_link_mbps(adapter->link_speed);
+ if ((vf >= adapter->vfs_allocated_count) ||
+ (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
+ (tx_rate < 0) || (tx_rate > actual_link_speed))
+ return -EINVAL;
+
+ adapter->vf_rate_link_speed = actual_link_speed;
+ adapter->vf_data[vf].tx_rate = (u16)tx_rate;
+ igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
+
+ return 0;
}
static int igb_ndo_get_vf_config(struct net_device *netdev,
@@ -6606,7 +6696,7 @@ static int igb_ndo_get_vf_config(struct net_device *netdev,
return -EINVAL;
ivi->vf = vf;
memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
- ivi->tx_rate = 0;
+ ivi->tx_rate = adapter->vf_data[vf].tx_rate;
ivi->vlan = adapter->vf_data[vf].pf_vlan;
ivi->qos = adapter->vf_data[vf].pf_qos;
return 0;
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
index ed6e3d910247..1d943aa7c7a6 100644
--- a/drivers/net/igbvf/ethtool.c
+++ b/drivers/net/igbvf/ethtool.c
@@ -201,13 +201,11 @@ static void igbvf_get_regs(struct net_device *netdev,
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u32 *regs_buff = p;
- u8 revision_id;
memset(p, 0, IGBVF_REGS_LEN * sizeof(u32));
- pci_read_config_byte(adapter->pdev, PCI_REVISION_ID, &revision_id);
-
- regs->version = (1 << 24) | (revision_id << 16) | adapter->pdev->device;
+ regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
+ adapter->pdev->device;
regs_buff[0] = er32(CTRL);
regs_buff[1] = er32(STATUS);
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h
index 990c329e6c3b..d5dad5d607d6 100644
--- a/drivers/net/igbvf/igbvf.h
+++ b/drivers/net/igbvf/igbvf.h
@@ -201,9 +201,6 @@ struct igbvf_adapter {
unsigned int restart_queue;
u32 txd_cmd;
- bool detect_tx_hung;
- u8 tx_timeout_factor;
-
u32 tx_int_delay;
u32 tx_abs_int_delay;
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 6352c8158e6d..6ccc32fd7338 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -396,35 +396,6 @@ static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
buffer_info->time_stamp = 0;
}
-static void igbvf_print_tx_hang(struct igbvf_adapter *adapter)
-{
- struct igbvf_ring *tx_ring = adapter->tx_ring;
- unsigned int i = tx_ring->next_to_clean;
- unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
- union e1000_adv_tx_desc *eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
-
- /* detected Tx unit hang */
- dev_err(&adapter->pdev->dev,
- "Detected Tx Unit Hang:\n"
- " TDH <%x>\n"
- " TDT <%x>\n"
- " next_to_use <%x>\n"
- " next_to_clean <%x>\n"
- "buffer_info[next_to_clean]:\n"
- " time_stamp <%lx>\n"
- " next_to_watch <%x>\n"
- " jiffies <%lx>\n"
- " next_to_watch.status <%x>\n",
- readl(adapter->hw.hw_addr + tx_ring->head),
- readl(adapter->hw.hw_addr + tx_ring->tail),
- tx_ring->next_to_use,
- tx_ring->next_to_clean,
- tx_ring->buffer_info[eop].time_stamp,
- eop,
- jiffies,
- eop_desc->wb.status);
-}
-
/**
* igbvf_setup_tx_resources - allocate Tx resources (Descriptors)
* @adapter: board private structure
@@ -771,7 +742,6 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter)
static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
{
struct igbvf_adapter *adapter = tx_ring->adapter;
- struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
struct igbvf_buffer *buffer_info;
struct sk_buff *skb;
@@ -832,22 +802,6 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
}
}
- if (adapter->detect_tx_hung) {
- /* Detect a transmit hang in hardware, this serializes the
- * check with the clearing of time_stamp and movement of i */
- adapter->detect_tx_hung = false;
- if (tx_ring->buffer_info[i].time_stamp &&
- time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
- (adapter->tx_timeout_factor * HZ)) &&
- !(er32(STATUS) & E1000_STATUS_TXOFF)) {
-
- tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
- /* detected Tx unit hang */
- igbvf_print_tx_hang(adapter);
-
- netif_stop_queue(netdev);
- }
- }
adapter->net_stats.tx_bytes += total_bytes;
adapter->net_stats.tx_packets += total_packets;
return count < tx_ring->count;
@@ -1863,17 +1817,6 @@ static void igbvf_watchdog_task(struct work_struct *work)
&adapter->link_duplex);
igbvf_print_link_info(adapter);
- /* adjust timeout factor according to speed/duplex */
- adapter->tx_timeout_factor = 1;
- switch (adapter->link_speed) {
- case SPEED_10:
- adapter->tx_timeout_factor = 16;
- break;
- case SPEED_100:
- /* maybe add some timeout factor ? */
- break;
- }
-
netif_carrier_on(netdev);
netif_wake_queue(netdev);
}
@@ -1907,9 +1850,6 @@ static void igbvf_watchdog_task(struct work_struct *work)
/* Cause software interrupt to ensure Rx ring is cleaned */
ew32(EICS, adapter->rx_ring->eims_value);
- /* Force detection of hung controller every watchdog period */
- adapter->detect_tx_hung = 1;
-
/* Reset the timer */
if (!test_bit(__IGBVF_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer,
@@ -2699,8 +2639,7 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
hw->device_id = pdev->device;
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_device_id = pdev->subsystem_device;
-
- pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+ hw->revision_id = pdev->revision;
err = -EIO;
adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0),
diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c
index 74486a8b009a..af3822f9ea9a 100644
--- a/drivers/net/igbvf/vf.c
+++ b/drivers/net/igbvf/vf.c
@@ -220,7 +220,7 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr)
* The parameter rar_count will usually be hw->mac.rar_entry_count
* unless there are workarounds that change this.
**/
-void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
+static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
u8 *mc_addr_list, u32 mc_addr_count,
u32 rar_used_count, u32 rar_count)
{
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index aa93655c3aa7..a5b0f0e194bb 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -2025,7 +2025,6 @@ static void ipg_init_mii(struct net_device *dev)
if (phyaddr != 0x1f) {
u16 mii_phyctrl, mii_1000cr;
- u8 revisionid = 0;
mii_1000cr = mdio_read(dev, phyaddr, MII_CTRL1000);
mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF |
@@ -2035,8 +2034,7 @@ static void ipg_init_mii(struct net_device *dev)
mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR);
/* Set default phyparam */
- pci_read_config_byte(sp->pdev, PCI_REVISION_ID, &revisionid);
- ipg_set_phy_default_param(revisionid, dev, phyaddr);
+ ipg_set_phy_default_param(sp->pdev->revision, dev, phyaddr);
/* Reset PHY */
mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART;
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 3b8c92463617..12769b58c2e7 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -334,6 +334,10 @@ struct ixgbe_adapter {
u16 bd_number;
struct work_struct reset_task;
struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
+
+ /* DCB parameters */
+ struct ieee_pfc *ixgbe_ieee_pfc;
+ struct ieee_ets *ixgbe_ieee_ets;
struct ixgbe_dcb_config dcb_cfg;
struct ixgbe_dcb_config temp_dcb_cfg;
u8 dcb_set_bitmap;
@@ -521,7 +525,6 @@ extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
extern int ethtool_ioctl(struct ifreq *ifr);
-extern u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 index);
extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 66ed045a8cf0..90cceb4a6317 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -29,6 +29,7 @@
#define _IXGBE_COMMON_H_
#include "ixgbe_type.h"
+#include "ixgbe.h"
u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
@@ -110,9 +111,8 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
-extern struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw);
#define hw_dbg(hw, format, arg...) \
- netdev_dbg(ixgbe_get_hw_dev(hw), format, ##arg)
+ netdev_dbg(((struct ixgbe_adapter *)(hw->back))->netdev, format, ##arg)
#define e_dev_info(format, arg...) \
dev_info(&adapter->pdev->dev, format, ## arg)
#define e_dev_warn(format, arg...) \
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
index d16c260c1f50..13c962efbfc9 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -34,6 +34,42 @@
#include "ixgbe_dcb_82599.h"
/**
+ * ixgbe_ieee_credits - This calculates the ieee traffic class
+ * credits from the configured bandwidth percentages. Credits
+ * are the smallest unit programable into the underlying
+ * hardware. The IEEE 802.1Qaz specification do not use bandwidth
+ * groups so this is much simplified from the CEE case.
+ */
+s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame)
+{
+ int min_percent = 100;
+ int min_credit, multiplier;
+ int i;
+
+ min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) /
+ DCB_CREDIT_QUANTUM;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ if (bw[i] < min_percent && bw[i])
+ min_percent = bw[i];
+ }
+
+ multiplier = (min_credit / min_percent) + 1;
+
+ /* Find out the hw credits for each TC */
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ int val = min(bw[i] * multiplier, MAX_CREDIT_REFILL);
+
+ if (val < min_credit)
+ val = min_credit;
+ refill[i] = val;
+
+ max[i] = (bw[i] * MAX_CREDIT)/100;
+ }
+ return 0;
+}
+
+/**
* ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits
* @ixgbe_dcb_config: Struct containing DCB settings.
* @direction: Configuring either Tx or Rx.
@@ -141,6 +177,59 @@ out:
return ret_val;
}
+void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en)
+{
+ int i;
+
+ *pfc_en = 0;
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
+ *pfc_en |= (cfg->tc_config[i].dcb_pfc & 0xF) << i;
+}
+
+void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction,
+ u16 *refill)
+{
+ struct tc_bw_alloc *p;
+ int i;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ p = &cfg->tc_config[i].path[direction];
+ refill[i] = p->data_credits_refill;
+ }
+}
+
+void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max)
+{
+ int i;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
+ max[i] = cfg->tc_config[i].desc_credits_max;
+}
+
+void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction,
+ u8 *bwgid)
+{
+ struct tc_bw_alloc *p;
+ int i;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ p = &cfg->tc_config[i].path[direction];
+ bwgid[i] = p->bwg_id;
+ }
+}
+
+void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction,
+ u8 *ptype)
+{
+ struct tc_bw_alloc *p;
+ int i;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ p = &cfg->tc_config[i].path[direction];
+ ptype[i] = p->prio_type;
+ }
+}
+
/**
* ixgbe_dcb_hw_config - Config and enable DCB
* @hw: pointer to hardware structure
@@ -152,13 +241,30 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
s32 ret = 0;
+ u8 pfc_en;
+ u8 ptype[MAX_TRAFFIC_CLASS];
+ u8 bwgid[MAX_TRAFFIC_CLASS];
+ u16 refill[MAX_TRAFFIC_CLASS];
+ u16 max[MAX_TRAFFIC_CLASS];
+
+ /* Unpack CEE standard containers */
+ ixgbe_dcb_unpack_pfc(dcb_config, &pfc_en);
+ ixgbe_dcb_unpack_refill(dcb_config, DCB_TX_CONFIG, refill);
+ ixgbe_dcb_unpack_max(dcb_config, max);
+ ixgbe_dcb_unpack_bwgid(dcb_config, DCB_TX_CONFIG, bwgid);
+ ixgbe_dcb_unpack_prio(dcb_config, DCB_TX_CONFIG, ptype);
+
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
- ret = ixgbe_dcb_hw_config_82598(hw, dcb_config);
+ ret = ixgbe_dcb_hw_config_82598(hw, dcb_config->rx_pba_cfg,
+ pfc_en, refill, max, bwgid,
+ ptype);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
- ret = ixgbe_dcb_hw_config_82599(hw, dcb_config);
+ ret = ixgbe_dcb_hw_config_82599(hw, dcb_config->rx_pba_cfg,
+ pfc_en, refill, max, bwgid,
+ ptype);
break;
default:
break;
@@ -166,3 +272,70 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
return ret;
}
+/* Helper routines to abstract HW specifics from DCB netlink ops */
+s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en)
+{
+ int ret = -EINVAL;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
+ u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa)
+{
+ int i;
+ u8 prio_type[IEEE_8021QAZ_MAX_TCS];
+
+ /* Map TSA onto CEE prio type */
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ switch (tsa[i]) {
+ case IEEE_8021QAZ_TSA_STRICT:
+ prio_type[i] = 2;
+ break;
+ case IEEE_8021QAZ_TSA_ETS:
+ prio_type[i] = 0;
+ break;
+ default:
+ /* Hardware only supports priority strict or
+ * ETS transmission selection algorithms if
+ * we receive some other value from dcbnl
+ * throw an error
+ */
+ return -EINVAL;
+ }
+ }
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max,
+ prio_type);
+ ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
+ bwg_id, prio_type);
+ ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
+ bwg_id, prio_type);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max,
+ bwg_id, prio_type);
+ ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
+ bwg_id, prio_type);
+ ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
+ bwg_id, prio_type);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
index 1cfe38ee1644..e5935114815e 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -139,7 +139,6 @@ struct ixgbe_dcb_config {
struct tc_configuration tc_config[MAX_TRAFFIC_CLASS];
u8 bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */
bool pfc_mode_enable;
- bool round_robin_enable;
enum dcb_rx_pba_cfg rx_pba_cfg;
@@ -148,12 +147,21 @@ struct ixgbe_dcb_config {
};
/* DCB driver APIs */
+void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en);
+void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *, int, u16 *);
+void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *, u16 *);
+void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *);
+void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *);
/* DCB credits calculation */
+s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame);
s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
struct ixgbe_dcb_config *, int, u8);
/* DCB hw initialization */
+s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
+ u16 *refill, u16 *max, u8 *bwg_id, u8 *prio_type);
+s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en);
s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
/* DCB definitions for credit calculation */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c
index 9a5e89c12e05..2965edcdac7b 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c
@@ -38,15 +38,14 @@
*
* Configure packet buffers for DCB mode.
*/
-static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw, u8 rx_pba)
{
s32 ret_val = 0;
u32 value = IXGBE_RXPBSIZE_64KB;
u8 i = 0;
/* Setup Rx packet buffer sizes */
- switch (dcb_config->rx_pba_cfg) {
+ switch (rx_pba) {
case pba_80_48:
/* Setup the first four at 80KB */
value = IXGBE_RXPBSIZE_80KB;
@@ -78,10 +77,11 @@ static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw,
*
* Configure Rx Data Arbiter and credits for each traffic class.
*/
-static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *prio_type)
{
- struct tc_bw_alloc *p;
u32 reg = 0;
u32 credit_refill = 0;
u32 credit_max = 0;
@@ -102,13 +102,12 @@ static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG];
- credit_refill = p->data_credits_refill;
- credit_max = p->data_credits_max;
+ credit_refill = refill[i];
+ credit_max = max[i];
reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT);
- if (p->prio_type == prio_link)
+ if (prio_type[i] == prio_link)
reg |= IXGBE_RT2CR_LSP;
IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg);
@@ -135,10 +134,12 @@ static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
-static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type)
{
- struct tc_bw_alloc *p;
u32 reg, max_credits;
u8 i;
@@ -146,10 +147,8 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
/* Enable arbiter */
reg &= ~IXGBE_DPMCS_ARBDIS;
- if (!(dcb_config->round_robin_enable)) {
- /* Enable DFP and Recycle mode */
- reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
- }
+ /* Enable DFP and Recycle mode */
+ reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
reg |= IXGBE_DPMCS_TSOEF;
/* Configure Max TSO packet size 34KB including payload and headers */
reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
@@ -158,16 +157,15 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
- max_credits = dcb_config->tc_config[i].desc_credits_max;
+ max_credits = max[i];
reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT;
- reg |= p->data_credits_refill;
- reg |= (u32)(p->bwg_id) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
+ reg |= refill[i];
+ reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
- if (p->prio_type == prio_group)
+ if (prio_type[i] == prio_group)
reg |= IXGBE_TDTQ2TCCR_GSP;
- if (p->prio_type == prio_link)
+ if (prio_type[i] == prio_link)
reg |= IXGBE_TDTQ2TCCR_LSP;
IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg);
@@ -183,10 +181,12 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
*
* Configure Tx Data Arbiter and credits for each traffic class.
*/
-static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type)
{
- struct tc_bw_alloc *p;
u32 reg;
u8 i;
@@ -200,15 +200,14 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
- reg = p->data_credits_refill;
- reg |= (u32)(p->data_credits_max) << IXGBE_TDPT2TCCR_MCL_SHIFT;
- reg |= (u32)(p->bwg_id) << IXGBE_TDPT2TCCR_BWG_SHIFT;
+ reg = refill[i];
+ reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT;
+ reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT;
- if (p->prio_type == prio_group)
+ if (prio_type[i] == prio_group)
reg |= IXGBE_TDPT2TCCR_GSP;
- if (p->prio_type == prio_link)
+ if (prio_type[i] == prio_link)
reg |= IXGBE_TDPT2TCCR_LSP;
IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg);
@@ -229,13 +228,12 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
*
* Configure Priority Flow Control for each traffic class.
*/
-s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
{
u32 reg, rx_pba_size;
u8 i;
- if (!dcb_config->pfc_mode_enable)
+ if (!pfc_en)
goto out;
/* Enable Transmit Priority Flow Control */
@@ -256,19 +254,20 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw,
* for each traffic class.
*/
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ int enabled = pfc_en & (1 << i);
rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
reg = (rx_pba_size - hw->fc.low_water) << 10;
- if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
- dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
+ if (enabled == pfc_enabled_tx ||
+ enabled == pfc_enabled_full)
reg |= IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg);
reg = (rx_pba_size - hw->fc.high_water) << 10;
- if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
- dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
+ if (enabled == pfc_enabled_tx ||
+ enabled == pfc_enabled_full)
reg |= IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
@@ -292,7 +291,7 @@ out:
* Configure queue statistics registers, all queues belonging to same traffic
* class uses a single set of queue statistics counters.
*/
-static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
+s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
{
u32 reg = 0;
u8 i = 0;
@@ -325,13 +324,16 @@ static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
* Configure dcb settings and enable dcb mode.
*/
s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+ u8 rx_pba, u8 pfc_en, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type)
{
- ixgbe_dcb_config_packet_buffers_82598(hw, dcb_config);
- ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config);
- ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config);
- ixgbe_dcb_config_tx_data_arbiter_82598(hw, dcb_config);
- ixgbe_dcb_config_pfc_82598(hw, dcb_config);
+ ixgbe_dcb_config_packet_buffers_82598(hw, rx_pba);
+ ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type);
+ ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
+ bwg_id, prio_type);
+ ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
+ bwg_id, prio_type);
+ ixgbe_dcb_config_pfc_82598(hw, pfc_en);
ixgbe_dcb_config_tc_stats_82598(hw);
return 0;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ixgbe/ixgbe_dcb_82598.h
index abc03ccfa088..0d2a758effce 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.h
@@ -71,9 +71,28 @@
/* DCB hardware-specific driver APIs */
/* DCB PFC functions */
-s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *);
+s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en);
/* DCB hw initialization */
-s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *);
+s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *prio_type);
+
+s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type);
+
+s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type);
+
+s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
+ u8 rx_pba, u8 pfc_en, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type);
#endif /* _DCB_82598_CONFIG_H */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index 374e1f74d0f5..b0d97a98c84d 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -33,19 +33,18 @@
/**
* ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @rx_pba: method to distribute packet buffer
*
* Configure packet buffers for DCB mode.
*/
-static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, u8 rx_pba)
{
s32 ret_val = 0;
u32 value = IXGBE_RXPBSIZE_64KB;
u8 i = 0;
/* Setup Rx packet buffer sizes */
- switch (dcb_config->rx_pba_cfg) {
+ switch (rx_pba) {
case pba_80_48:
/* Setup the first four at 80KB */
value = IXGBE_RXPBSIZE_80KB;
@@ -75,14 +74,19 @@ static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw,
/**
* ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
*
* Configure Rx Packet Arbiter and credits for each traffic class.
*/
-static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type)
{
- struct tc_bw_alloc *p;
u32 reg = 0;
u32 credit_refill = 0;
u32 credit_max = 0;
@@ -103,15 +107,13 @@ static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG];
-
- credit_refill = p->data_credits_refill;
- credit_max = p->data_credits_max;
+ credit_refill = refill[i];
+ credit_max = max[i];
reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
- reg |= (u32)(p->bwg_id) << IXGBE_RTRPT4C_BWG_SHIFT;
+ reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
- if (p->prio_type == prio_link)
+ if (prio_type[i] == prio_link)
reg |= IXGBE_RTRPT4C_LSP;
IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
@@ -130,14 +132,19 @@ static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
/**
* ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
-static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type)
{
- struct tc_bw_alloc *p;
u32 reg, max_credits;
u8 i;
@@ -149,16 +156,15 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
- max_credits = dcb_config->tc_config[i].desc_credits_max;
+ max_credits = max[i];
reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
- reg |= p->data_credits_refill;
- reg |= (u32)(p->bwg_id) << IXGBE_RTTDT2C_BWG_SHIFT;
+ reg |= refill[i];
+ reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
- if (p->prio_type == prio_group)
+ if (prio_type[i] == prio_group)
reg |= IXGBE_RTTDT2C_GSP;
- if (p->prio_type == prio_link)
+ if (prio_type[i] == prio_link)
reg |= IXGBE_RTTDT2C_LSP;
IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
@@ -177,14 +183,19 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
/**
* ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
*
* Configure Tx Packet Arbiter and credits for each traffic class.
*/
-static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type)
{
- struct tc_bw_alloc *p;
u32 reg;
u8 i;
@@ -205,15 +216,14 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
- reg = p->data_credits_refill;
- reg |= (u32)(p->data_credits_max) << IXGBE_RTTPT2C_MCL_SHIFT;
- reg |= (u32)(p->bwg_id) << IXGBE_RTTPT2C_BWG_SHIFT;
+ reg = refill[i];
+ reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
+ reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
- if (p->prio_type == prio_group)
+ if (prio_type[i] == prio_group)
reg |= IXGBE_RTTPT2C_GSP;
- if (p->prio_type == prio_link)
+ if (prio_type[i] == prio_link)
reg |= IXGBE_RTTPT2C_LSP;
IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
@@ -233,17 +243,16 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
/**
* ixgbe_dcb_config_pfc_82599 - Configure priority flow control
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @pfc_en: enabled pfc bitmask
*
* Configure Priority Flow Control (PFC) for each traffic class.
*/
-s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en)
{
u32 i, reg, rx_pba_size;
/* If PFC is disabled globally then fall back to LFC. */
- if (!dcb_config->pfc_mode_enable) {
+ if (!pfc_en) {
for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
hw->mac.ops.fc_enable(hw, i);
goto out;
@@ -251,19 +260,18 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
/* Configure PFC Tx thresholds per TC */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ int enabled = pfc_en & (1 << i);
rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
reg = (rx_pba_size - hw->fc.low_water) << 10;
- if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
- dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
+ if (enabled)
reg |= IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
reg = (rx_pba_size - hw->fc.high_water) << 10;
- if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
- dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
+ if (enabled)
reg |= IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
}
@@ -349,7 +357,6 @@ static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
/**
* ixgbe_dcb_config_82599 - Configure general DCB parameters
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure general DCB parameters.
*/
@@ -406,19 +413,27 @@ static s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw)
/**
* ixgbe_dcb_hw_config_82599 - Configure and enable DCB
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @rx_pba: method to distribute packet buffer
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
+ * @pfc_en: enabled pfc bitmask
*
* Configure dcb settings and enable dcb mode.
*/
s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+ u8 rx_pba, u8 pfc_en, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type)
{
- ixgbe_dcb_config_packet_buffers_82599(hw, dcb_config);
+ ixgbe_dcb_config_packet_buffers_82599(hw, rx_pba);
ixgbe_dcb_config_82599(hw);
- ixgbe_dcb_config_rx_arbiter_82599(hw, dcb_config);
- ixgbe_dcb_config_tx_desc_arbiter_82599(hw, dcb_config);
- ixgbe_dcb_config_tx_data_arbiter_82599(hw, dcb_config);
- ixgbe_dcb_config_pfc_82599(hw, dcb_config);
+ ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, prio_type);
+ ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
+ bwg_id, prio_type);
+ ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
+ bwg_id, prio_type);
+ ixgbe_dcb_config_pfc_82599(hw, pfc_en);
ixgbe_dcb_config_tc_stats_82599(hw);
return 0;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ixgbe/ixgbe_dcb_82599.h
index 3841649fb954..5b0ca85614d1 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.h
@@ -102,11 +102,29 @@
/* DCB hardware-specific driver APIs */
/* DCB PFC functions */
-s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config);
+s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en);
/* DCB hw initialization */
+s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type);
+
+s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type);
+
+s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type);
+
s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *config);
+ u8 rx_pba, u8 pfc_en, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type);
#endif /* _DCB_82599_CONFIG_H */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index bf566e8a455e..a977df3fe81b 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -37,7 +37,6 @@
#define BIT_PG_RX 0x04
#define BIT_PG_TX 0x08
#define BIT_APP_UPCHG 0x10
-#define BIT_RESETLINK 0x40
#define BIT_LINKSPEED 0x80
/* Responses for the DCB_C_SET_ALL command */
@@ -225,10 +224,8 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
(adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent !=
adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) ||
(adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
- adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)) {
+ adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap))
adapter->dcb_set_bitmap |= BIT_PG_TX;
- adapter->dcb_set_bitmap |= BIT_RESETLINK;
- }
}
static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
@@ -239,10 +236,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct;
if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] !=
- adapter->dcb_cfg.bw_percentage[0][bwg_id]) {
+ adapter->dcb_cfg.bw_percentage[0][bwg_id])
adapter->dcb_set_bitmap |= BIT_PG_TX;
- adapter->dcb_set_bitmap |= BIT_RESETLINK;
- }
}
static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
@@ -269,10 +264,8 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
(adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent !=
adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) ||
(adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
- adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)) {
+ adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap))
adapter->dcb_set_bitmap |= BIT_PG_RX;
- adapter->dcb_set_bitmap |= BIT_RESETLINK;
- }
}
static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
@@ -283,10 +276,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct;
if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] !=
- adapter->dcb_cfg.bw_percentage[1][bwg_id]) {
+ adapter->dcb_cfg.bw_percentage[1][bwg_id])
adapter->dcb_set_bitmap |= BIT_PG_RX;
- adapter->dcb_set_bitmap |= BIT_RESETLINK;
- }
}
static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
@@ -365,21 +356,17 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
return DCB_NO_HW_CHG;
/*
- * Only take down the adapter if the configuration change
- * requires a reset.
+ * Only take down the adapter if an app change occured. FCoE
+ * may shuffle tx rings in this case and this can not be done
+ * without a reset currently.
*/
- if (adapter->dcb_set_bitmap & BIT_RESETLINK) {
+ if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
msleep(1);
- if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
- if (netif_running(netdev))
- netdev->netdev_ops->ndo_stop(netdev);
- ixgbe_clear_interrupt_scheme(adapter);
- } else {
- if (netif_running(netdev))
- ixgbe_down(adapter);
- }
+ if (netif_running(netdev))
+ netdev->netdev_ops->ndo_stop(netdev);
+ ixgbe_clear_interrupt_scheme(adapter);
}
if (adapter->dcb_cfg.pfc_mode_enable) {
@@ -408,29 +395,51 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
}
}
- if (adapter->dcb_set_bitmap & BIT_RESETLINK) {
- if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
- ixgbe_init_interrupt_scheme(adapter);
- if (netif_running(netdev))
- netdev->netdev_ops->ndo_open(netdev);
- } else {
- if (netif_running(netdev))
- ixgbe_up(adapter);
- }
+ if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
+ ixgbe_init_interrupt_scheme(adapter);
+ if (netif_running(netdev))
+ netdev->netdev_ops->ndo_open(netdev);
ret = DCB_HW_CHG_RST;
- } else if (adapter->dcb_set_bitmap & BIT_PFC) {
- if (adapter->hw.mac.type == ixgbe_mac_82598EB)
- ixgbe_dcb_config_pfc_82598(&adapter->hw,
- &adapter->dcb_cfg);
- else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
- ixgbe_dcb_config_pfc_82599(&adapter->hw,
- &adapter->dcb_cfg);
+ }
+
+ if (adapter->dcb_set_bitmap & BIT_PFC) {
+ u8 pfc_en;
+ ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en);
+ ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en);
ret = DCB_HW_CHG;
}
+
+ if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
+ u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
+ u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS];
+ int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+
+#ifdef CONFIG_FCOE
+ if (adapter->netdev->features & NETIF_F_FCOE_MTU)
+ max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
+#endif
+
+ ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
+ max_frame, DCB_TX_CONFIG);
+ ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
+ max_frame, DCB_RX_CONFIG);
+
+ ixgbe_dcb_unpack_refill(&adapter->dcb_cfg,
+ DCB_TX_CONFIG, refill);
+ ixgbe_dcb_unpack_max(&adapter->dcb_cfg, max);
+ ixgbe_dcb_unpack_bwgid(&adapter->dcb_cfg,
+ DCB_TX_CONFIG, bwg_id);
+ ixgbe_dcb_unpack_prio(&adapter->dcb_cfg,
+ DCB_TX_CONFIG, prio_type);
+
+ ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max,
+ bwg_id, prio_type);
+ }
+
if (adapter->dcb_cfg.pfc_mode_enable)
adapter->hw.fc.current_mode = ixgbe_fc_pfc;
- if (adapter->dcb_set_bitmap & BIT_RESETLINK)
+ if (adapter->dcb_set_bitmap & BIT_APP_UPCHG)
clear_bit(__IXGBE_RESETTING, &adapter->state);
adapter->dcb_set_bitmap = 0x00;
return ret;
@@ -568,18 +577,29 @@ static u8 ixgbe_dcbnl_setapp(struct net_device *netdev,
case DCB_APP_IDTYPE_ETHTYPE:
#ifdef IXGBE_FCOE
if (id == ETH_P_FCOE) {
- u8 tc;
- struct ixgbe_adapter *adapter;
+ u8 old_tc;
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
- adapter = netdev_priv(netdev);
- tc = adapter->fcoe.tc;
+ /* Get current programmed tc */
+ old_tc = adapter->fcoe.tc;
rval = ixgbe_fcoe_setapp(adapter, up);
- if ((!rval) && (tc != adapter->fcoe.tc) &&
- (adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
- (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
+
+ if (rval ||
+ !(adapter->flags & IXGBE_FLAG_DCB_ENABLED) ||
+ !(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+ break;
+
+ /* The FCoE application priority may be changed multiple
+ * times in quick sucession with switches that build up
+ * TLVs. To avoid creating uneeded device resets this
+ * checks the actual HW configuration and clears
+ * BIT_APP_UPCHG if a HW configuration change is not
+ * need
+ */
+ if (old_tc == adapter->fcoe.tc)
+ adapter->dcb_set_bitmap &= ~BIT_APP_UPCHG;
+ else
adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
- adapter->dcb_set_bitmap |= BIT_RESETLINK;
- }
}
#endif
break;
@@ -591,7 +611,98 @@ static u8 ixgbe_dcbnl_setapp(struct net_device *netdev,
return rval;
}
+static int ixgbe_dcbnl_ieee_getets(struct net_device *dev,
+ struct ieee_ets *ets)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets;
+
+ /* No IEEE PFC settings available */
+ if (!my_ets)
+ return -EINVAL;
+
+ ets->ets_cap = MAX_TRAFFIC_CLASS;
+ ets->cbs = my_ets->cbs;
+ memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
+ memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
+ memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
+ memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
+ return 0;
+}
+
+static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
+ struct ieee_ets *ets)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS];
+ int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ int err;
+ /* naively give each TC a bwg to map onto CEE hardware */
+ __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7};
+
+ if (!adapter->ixgbe_ieee_ets) {
+ adapter->ixgbe_ieee_ets = kmalloc(sizeof(struct ieee_ets),
+ GFP_KERNEL);
+ if (!adapter->ixgbe_ieee_ets)
+ return -ENOMEM;
+ }
+
+
+ memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets));
+
+ ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame);
+ err = ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max,
+ bwg_id, ets->tc_tsa);
+ return err;
+}
+
+static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
+ struct ieee_pfc *pfc)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc;
+ int i;
+
+ /* No IEEE PFC settings available */
+ if (!my_pfc)
+ return -EINVAL;
+
+ pfc->pfc_cap = MAX_TRAFFIC_CLASS;
+ pfc->pfc_en = my_pfc->pfc_en;
+ pfc->mbc = my_pfc->mbc;
+ pfc->delay = my_pfc->delay;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ pfc->requests[i] = adapter->stats.pxoffrxc[i];
+ pfc->indications[i] = adapter->stats.pxofftxc[i];
+ }
+
+ return 0;
+}
+
+static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
+ struct ieee_pfc *pfc)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ int err;
+
+ if (!adapter->ixgbe_ieee_pfc) {
+ adapter->ixgbe_ieee_pfc = kmalloc(sizeof(struct ieee_pfc),
+ GFP_KERNEL);
+ if (!adapter->ixgbe_ieee_pfc)
+ return -ENOMEM;
+ }
+
+ memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc));
+ err = ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en);
+ return err;
+}
+
const struct dcbnl_rtnl_ops dcbnl_ops = {
+ .ieee_getets = ixgbe_dcbnl_ieee_getets,
+ .ieee_setets = ixgbe_dcbnl_ieee_setets,
+ .ieee_getpfc = ixgbe_dcbnl_ieee_getpfc,
+ .ieee_setpfc = ixgbe_dcbnl_ieee_setpfc,
.getstate = ixgbe_dcbnl_get_state,
.setstate = ixgbe_dcbnl_set_state,
.getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr,
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 2002ea88ca2a..309272f8f103 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -152,7 +152,17 @@ static int ixgbe_get_settings(struct net_device *netdev,
ecmd->supported |= (SUPPORTED_1000baseT_Full |
SUPPORTED_Autoneg);
+ switch (hw->mac.type) {
+ case ixgbe_mac_X540:
+ ecmd->supported |= SUPPORTED_100baseT_Full;
+ break;
+ default:
+ break;
+ }
+
ecmd->advertising = ADVERTISED_Autoneg;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
ecmd->advertising |= ADVERTISED_10000baseT_Full;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
@@ -167,6 +177,15 @@ static int ixgbe_get_settings(struct net_device *netdev,
ecmd->advertising |= (ADVERTISED_10000baseT_Full |
ADVERTISED_1000baseT_Full);
+ switch (hw->mac.type) {
+ case ixgbe_mac_X540:
+ if (!(ecmd->advertising & ADVERTISED_100baseT_Full))
+ ecmd->advertising |= (ADVERTISED_100baseT_Full);
+ break;
+ default:
+ break;
+ }
+
if (hw->phy.media_type == ixgbe_media_type_copper) {
ecmd->supported |= SUPPORTED_TP;
ecmd->advertising |= ADVERTISED_TP;
@@ -271,8 +290,19 @@ static int ixgbe_get_settings(struct net_device *netdev,
hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
if (link_up) {
- ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
- SPEED_10000 : SPEED_1000;
+ switch (link_speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ ecmd->speed = SPEED_10000;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ ecmd->speed = SPEED_1000;
+ break;
+ case IXGBE_LINK_SPEED_100_FULL:
+ ecmd->speed = SPEED_100;
+ break;
+ default:
+ break;
+ }
ecmd->duplex = DUPLEX_FULL;
} else {
ecmd->speed = -1;
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 30f9ccfb4f87..f0d0c5aad2b4 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -648,7 +648,7 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
*
* Returns : a tc index for use in range 0-7, or 0-3
*/
-u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx)
+static u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx)
{
int tc = -1;
int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
@@ -3077,6 +3077,14 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
ixgbe_configure_srrctl(adapter, ring);
ixgbe_configure_rscctl(adapter, ring);
+ /* If operating in IOV mode set RLPML for X540 */
+ if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
+ hw->mac.type == ixgbe_mac_X540) {
+ rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
+ rxdctl |= ((ring->netdev->mtu + ETH_HLEN +
+ ETH_FCS_LEN + VLAN_HLEN) | IXGBE_RXDCTL_RLPML_EN);
+ }
+
if (hw->mac.type == ixgbe_mac_82598EB) {
/*
* enable cache line friendly hardware writes:
@@ -5174,7 +5182,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
adapter->dcb_cfg.rx_pba_cfg = pba_equal;
adapter->dcb_cfg.pfc_mode_enable = false;
- adapter->dcb_cfg.round_robin_enable = false;
adapter->dcb_set_bitmap = 0x00;
ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
adapter->ring_feature[RING_F_DCB].indices);
@@ -5442,8 +5449,14 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
/* MTU < 68 is an error and causes problems on some kernels */
- if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
- return -EINVAL;
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED &&
+ hw->mac.type != ixgbe_mac_X540) {
+ if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
+ return -EINVAL;
+ } else {
+ if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
+ return -EINVAL;
+ }
e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
/* must set new MTU before calling down or up */
@@ -5611,6 +5624,10 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
}
ixgbe_clear_interrupt_scheme(adapter);
+#ifdef CONFIG_DCB
+ kfree(adapter->ixgbe_ieee_pfc);
+ kfree(adapter->ixgbe_ieee_ets);
+#endif
#ifdef CONFIG_PM
retval = pci_save_state(pdev);
@@ -6101,7 +6118,10 @@ static void ixgbe_watchdog_task(struct work_struct *work)
(link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
"10 Gbps" :
(link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
- "1 Gbps" : "unknown speed")),
+ "1 Gbps" :
+ (link_speed == IXGBE_LINK_SPEED_100_FULL ?
+ "100 Mbps" :
+ "unknown speed"))),
((flow_rx && flow_tx) ? "RX/TX" :
(flow_rx ? "RX" :
(flow_tx ? "TX" : "None"))));
@@ -7706,16 +7726,6 @@ static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
#endif /* CONFIG_IXGBE_DCA */
-/**
- * ixgbe_get_hw_dev return device
- * used by hardware layer to print debugging information
- **/
-struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw)
-{
- struct ixgbe_adapter *adapter = hw->back;
- return adapter->netdev;
-}
-
module_exit(ixgbe_exit_module);
/* ixgbe_main.c */
diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c
index ea82c5a1cd3e..f215c4c296c4 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ixgbe/ixgbe_mbx.c
@@ -437,6 +437,7 @@ out_no_read:
return ret_val;
}
+#ifdef CONFIG_PCI_IOV
/**
* ixgbe_init_mbx_params_pf - set initial values for pf mailbox
* @hw: pointer to the HW structure
@@ -465,6 +466,7 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
break;
}
}
+#endif /* CONFIG_PCI_IOV */
struct ixgbe_mbx_operations mbx_ops_generic = {
.read = ixgbe_read_mbx_pf,
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h
index 3df9b1590218..ada0ce32a7a6 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ixgbe/ixgbe_mbx.h
@@ -86,7 +86,9 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
+#ifdef CONFIG_PCI_IOV
void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
+#endif /* CONFIG_PCI_IOV */
extern struct ixgbe_mbx_operations mbx_ops_generic;
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index 187b3a16ec1f..fb4868d0a32d 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -110,6 +110,33 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
}
+void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int new_mtu = msgbuf[1];
+ u32 max_frs;
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ /* Only X540 supports jumbo frames in IOV mode */
+ if (adapter->hw.mac.type != ixgbe_mac_X540)
+ return;
+
+ /* MTU < 68 is an error and causes problems on some kernels */
+ if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) {
+ e_err(drv, "VF mtu %d out of range\n", new_mtu);
+ return;
+ }
+
+ max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
+ IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
+ if (max_frs < new_mtu) {
+ max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
+ }
+
+ e_info(hw, "VF requests change max MTU to %d\n", new_mtu);
+}
+
static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
{
u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
@@ -302,7 +329,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
hash_list, vf);
break;
case IXGBE_VF_SET_LPE:
- WARN_ON((msgbuf[0] & 0xFFFF) == IXGBE_VF_SET_LPE);
+ ixgbe_set_vf_lpe(adapter, msgbuf);
break;
case IXGBE_VF_SET_VLAN:
add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index fd3358f54139..ab65d13969fd 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1680,6 +1680,8 @@
#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
+#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */
+#define IXGBE_RXDCTL_RLPML_EN 0x00008000
#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/
diff --git a/drivers/net/ixgbevf/defines.h b/drivers/net/ixgbevf/defines.h
index de643eb2ada6..78abb6f1a866 100644
--- a/drivers/net/ixgbevf/defines.h
+++ b/drivers/net/ixgbevf/defines.h
@@ -65,6 +65,8 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
+#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */
+#define IXGBE_RXDCTL_RLPML_EN 0x00008000
/* DCA Control */
#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index 464e6c9d3fc2..1e735a14091c 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -51,7 +51,7 @@ char ixgbevf_driver_name[] = "ixgbevf";
static const char ixgbevf_driver_string[] =
"Intel(R) 82599 Virtual Function";
-#define DRV_VERSION "1.0.19-k0"
+#define DRV_VERSION "1.1.0-k0"
const char ixgbevf_driver_version[] = DRV_VERSION;
static char ixgbevf_copyright[] =
"Copyright (c) 2009 - 2010 Intel Corporation.";
@@ -107,7 +107,7 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
}
/*
- * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
+ * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
* @adapter: pointer to adapter struct
* @direction: 0 for Rx, 1 for Tx, -1 for other causes
* @queue: queue to map the corresponding interrupt to
@@ -1017,7 +1017,7 @@ static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
}
/**
- * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
+ * ixgbevf_msix_clean_rx - single unshared vector rx clean (all queues)
* @irq: unused
* @data: pointer to our q_vector struct for this interrupt vector
**/
@@ -1665,6 +1665,11 @@ static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
j = adapter->rx_ring[i].reg_idx;
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
rxdctl |= IXGBE_RXDCTL_ENABLE;
+ if (hw->mac.type == ixgbe_mac_X540_vf) {
+ rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
+ rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
+ IXGBE_RXDCTL_RLPML_EN);
+ }
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
ixgbevf_rx_desc_queue_enable(adapter, i);
}
@@ -1967,7 +1972,7 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
}
/*
- * ixgbe_set_num_queues: Allocate queues for device, feature dependant
+ * ixgbevf_set_num_queues: Allocate queues for device, feature dependant
* @adapter: board private structure to initialize
*
* This is the top level queue allocation routine. The order here is very
@@ -2216,7 +2221,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
hw->vendor_id = pdev->vendor;
hw->device_id = pdev->device;
- pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+ hw->revision_id = pdev->revision;
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_device_id = pdev->subsystem_device;
@@ -3217,10 +3222,16 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+ int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
+ u32 msg[2];
+
+ if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
+ max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
/* MTU < 68 is an error and causes problems on some kernels */
- if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
+ if ((new_mtu < 68) || (max_frame > max_possible_frame))
return -EINVAL;
hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
@@ -3228,6 +3239,10 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
/* must set new MTU before calling down or up */
netdev->mtu = new_mtu;
+ msg[0] = IXGBE_VF_SET_LPE;
+ msg[1] = max_frame;
+ hw->mbx.ops.write_posted(hw, msg, 2);
+
if (netif_running(netdev))
ixgbevf_reinit_locked(adapter);
@@ -3519,9 +3534,9 @@ static struct pci_driver ixgbevf_driver = {
};
/**
- * ixgbe_init_module - Driver Registration Routine
+ * ixgbevf_init_module - Driver Registration Routine
*
- * ixgbe_init_module is the first routine called when the driver is
+ * ixgbevf_init_module is the first routine called when the driver is
* loaded. All it does is register with the PCI subsystem.
**/
static int __init ixgbevf_init_module(void)
@@ -3539,9 +3554,9 @@ static int __init ixgbevf_init_module(void)
module_init(ixgbevf_init_module);
/**
- * ixgbe_exit_module - Driver Exit Cleanup Routine
+ * ixgbevf_exit_module - Driver Exit Cleanup Routine
*
- * ixgbe_exit_module is called just before the driver is removed
+ * ixgbevf_exit_module is called just before the driver is removed
* from memory.
**/
static void __exit ixgbevf_exit_module(void)
@@ -3551,7 +3566,7 @@ static void __exit ixgbevf_exit_module(void)
#ifdef DEBUG
/**
- * ixgbe_get_hw_dev_name - return device name string
+ * ixgbevf_get_hw_dev_name - return device name string
* used by hardware layer to print debugging information
**/
char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index e97ebef3cf47..f690474f4409 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -161,6 +161,67 @@ jme_setup_wakeup_frame(struct jme_adapter *jme,
}
static inline void
+jme_mac_rxclk_off(struct jme_adapter *jme)
+{
+ jme->reg_gpreg1 |= GPREG1_RXCLKOFF;
+ jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
+}
+
+static inline void
+jme_mac_rxclk_on(struct jme_adapter *jme)
+{
+ jme->reg_gpreg1 &= ~GPREG1_RXCLKOFF;
+ jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
+}
+
+static inline void
+jme_mac_txclk_off(struct jme_adapter *jme)
+{
+ jme->reg_ghc &= ~(GHC_TO_CLK_SRC | GHC_TXMAC_CLK_SRC);
+ jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
+jme_mac_txclk_on(struct jme_adapter *jme)
+{
+ u32 speed = jme->reg_ghc & GHC_SPEED;
+ if (speed == GHC_SPEED_1000M)
+ jme->reg_ghc |= GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY;
+ else
+ jme->reg_ghc |= GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
+ jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
+jme_reset_ghc_speed(struct jme_adapter *jme)
+{
+ jme->reg_ghc &= ~(GHC_SPEED | GHC_DPX);
+ jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
+jme_reset_250A2_workaround(struct jme_adapter *jme)
+{
+ jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
+ GPREG1_RSSPATCH);
+ jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
+}
+
+static inline void
+jme_assert_ghc_reset(struct jme_adapter *jme)
+{
+ jme->reg_ghc |= GHC_SWRST;
+ jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
+jme_clear_ghc_reset(struct jme_adapter *jme)
+{
+ jme->reg_ghc &= ~GHC_SWRST;
+ jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
jme_reset_mac_processor(struct jme_adapter *jme)
{
static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0};
@@ -168,9 +229,24 @@ jme_reset_mac_processor(struct jme_adapter *jme)
u32 gpreg0;
int i;
- jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST);
- udelay(2);
- jwrite32(jme, JME_GHC, jme->reg_ghc);
+ jme_reset_ghc_speed(jme);
+ jme_reset_250A2_workaround(jme);
+
+ jme_mac_rxclk_on(jme);
+ jme_mac_txclk_on(jme);
+ udelay(1);
+ jme_assert_ghc_reset(jme);
+ udelay(1);
+ jme_mac_rxclk_off(jme);
+ jme_mac_txclk_off(jme);
+ udelay(1);
+ jme_clear_ghc_reset(jme);
+ udelay(1);
+ jme_mac_rxclk_on(jme);
+ jme_mac_txclk_on(jme);
+ udelay(1);
+ jme_mac_rxclk_off(jme);
+ jme_mac_txclk_off(jme);
jwrite32(jme, JME_RXDBA_LO, 0x00000000);
jwrite32(jme, JME_RXDBA_HI, 0x00000000);
@@ -190,14 +266,6 @@ jme_reset_mac_processor(struct jme_adapter *jme)
else
gpreg0 = GPREG0_DEFAULT;
jwrite32(jme, JME_GPREG0, gpreg0);
- jwrite32(jme, JME_GPREG1, GPREG1_DEFAULT);
-}
-
-static inline void
-jme_reset_ghc_speed(struct jme_adapter *jme)
-{
- jme->reg_ghc &= ~(GHC_SPEED_1000M | GHC_DPX);
- jwrite32(jme, JME_GHC, jme->reg_ghc);
}
static inline void
@@ -336,13 +404,13 @@ jme_linkstat_from_phy(struct jme_adapter *jme)
}
static inline void
-jme_set_phyfifoa(struct jme_adapter *jme)
+jme_set_phyfifo_5level(struct jme_adapter *jme)
{
jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004);
}
static inline void
-jme_set_phyfifob(struct jme_adapter *jme)
+jme_set_phyfifo_8level(struct jme_adapter *jme)
{
jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000);
}
@@ -351,7 +419,7 @@ static int
jme_check_link(struct net_device *netdev, int testonly)
{
struct jme_adapter *jme = netdev_priv(netdev);
- u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr, gpreg1;
+ u32 phylink, cnt = JME_SPDRSV_TIMEOUT, bmcr;
char linkmsg[64];
int rc = 0;
@@ -414,23 +482,21 @@ jme_check_link(struct net_device *netdev, int testonly)
jme->phylink = phylink;
- ghc = jme->reg_ghc & ~(GHC_SPEED | GHC_DPX |
- GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE |
- GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY);
+ /*
+ * The speed/duplex setting of jme->reg_ghc already cleared
+ * by jme_reset_mac_processor()
+ */
switch (phylink & PHY_LINK_SPEED_MASK) {
case PHY_LINK_SPEED_10M:
- ghc |= GHC_SPEED_10M |
- GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
+ jme->reg_ghc |= GHC_SPEED_10M;
strcat(linkmsg, "10 Mbps, ");
break;
case PHY_LINK_SPEED_100M:
- ghc |= GHC_SPEED_100M |
- GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
+ jme->reg_ghc |= GHC_SPEED_100M;
strcat(linkmsg, "100 Mbps, ");
break;
case PHY_LINK_SPEED_1000M:
- ghc |= GHC_SPEED_1000M |
- GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY;
+ jme->reg_ghc |= GHC_SPEED_1000M;
strcat(linkmsg, "1000 Mbps, ");
break;
default:
@@ -439,42 +505,40 @@ jme_check_link(struct net_device *netdev, int testonly)
if (phylink & PHY_LINK_DUPLEX) {
jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
- ghc |= GHC_DPX;
+ jwrite32(jme, JME_TXTRHD, TXTRHD_FULLDUPLEX);
+ jme->reg_ghc |= GHC_DPX;
} else {
jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
TXMCS_BACKOFF |
TXMCS_CARRIERSENSE |
TXMCS_COLLISION);
- jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN |
- ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
- TXTRHD_TXREN |
- ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL));
+ jwrite32(jme, JME_TXTRHD, TXTRHD_HALFDUPLEX);
}
- gpreg1 = GPREG1_DEFAULT;
+ jwrite32(jme, JME_GHC, jme->reg_ghc);
+
if (is_buggy250(jme->pdev->device, jme->chiprev)) {
+ jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
+ GPREG1_RSSPATCH);
if (!(phylink & PHY_LINK_DUPLEX))
- gpreg1 |= GPREG1_HALFMODEPATCH;
+ jme->reg_gpreg1 |= GPREG1_HALFMODEPATCH;
switch (phylink & PHY_LINK_SPEED_MASK) {
case PHY_LINK_SPEED_10M:
- jme_set_phyfifoa(jme);
- gpreg1 |= GPREG1_RSSPATCH;
+ jme_set_phyfifo_8level(jme);
+ jme->reg_gpreg1 |= GPREG1_RSSPATCH;
break;
case PHY_LINK_SPEED_100M:
- jme_set_phyfifob(jme);
- gpreg1 |= GPREG1_RSSPATCH;
+ jme_set_phyfifo_5level(jme);
+ jme->reg_gpreg1 |= GPREG1_RSSPATCH;
break;
case PHY_LINK_SPEED_1000M:
- jme_set_phyfifoa(jme);
+ jme_set_phyfifo_8level(jme);
break;
default:
break;
}
}
-
- jwrite32(jme, JME_GPREG1, gpreg1);
- jwrite32(jme, JME_GHC, ghc);
- jme->reg_ghc = ghc;
+ jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ?
"Full-Duplex, " :
@@ -613,10 +677,14 @@ jme_enable_tx_engine(struct jme_adapter *jme)
* Enable TX Engine
*/
wmb();
- jwrite32(jme, JME_TXCS, jme->reg_txcs |
+ jwrite32f(jme, JME_TXCS, jme->reg_txcs |
TXCS_SELECT_QUEUE0 |
TXCS_ENABLE);
+ /*
+ * Start clock for TX MAC Processor
+ */
+ jme_mac_txclk_on(jme);
}
static inline void
@@ -651,6 +719,11 @@ jme_disable_tx_engine(struct jme_adapter *jme)
if (!i)
pr_err("Disable TX engine timeout\n");
+
+ /*
+ * Stop clock for TX MAC Processor
+ */
+ jme_mac_txclk_off(jme);
}
static void
@@ -825,16 +898,22 @@ jme_enable_rx_engine(struct jme_adapter *jme)
/*
* Setup Unicast Filter
*/
+ jme_set_unicastaddr(jme->dev);
jme_set_multi(jme->dev);
/*
* Enable RX Engine
*/
wmb();
- jwrite32(jme, JME_RXCS, jme->reg_rxcs |
+ jwrite32f(jme, JME_RXCS, jme->reg_rxcs |
RXCS_QUEUESEL_Q0 |
RXCS_ENABLE |
RXCS_QST);
+
+ /*
+ * Start clock for RX MAC Processor
+ */
+ jme_mac_rxclk_on(jme);
}
static inline void
@@ -871,10 +950,40 @@ jme_disable_rx_engine(struct jme_adapter *jme)
if (!i)
pr_err("Disable RX engine timeout\n");
+ /*
+ * Stop clock for RX MAC Processor
+ */
+ jme_mac_rxclk_off(jme);
+}
+
+static u16
+jme_udpsum(struct sk_buff *skb)
+{
+ u16 csum = 0xFFFFu;
+
+ if (skb->len < (ETH_HLEN + sizeof(struct iphdr)))
+ return csum;
+ if (skb->protocol != htons(ETH_P_IP))
+ return csum;
+ skb_set_network_header(skb, ETH_HLEN);
+ if ((ip_hdr(skb)->protocol != IPPROTO_UDP) ||
+ (skb->len < (ETH_HLEN +
+ (ip_hdr(skb)->ihl << 2) +
+ sizeof(struct udphdr)))) {
+ skb_reset_network_header(skb);
+ return csum;
+ }
+ skb_set_transport_header(skb,
+ ETH_HLEN + (ip_hdr(skb)->ihl << 2));
+ csum = udp_hdr(skb)->check;
+ skb_reset_transport_header(skb);
+ skb_reset_network_header(skb);
+
+ return csum;
}
static int
-jme_rxsum_ok(struct jme_adapter *jme, u16 flags)
+jme_rxsum_ok(struct jme_adapter *jme, u16 flags, struct sk_buff *skb)
{
if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4)))
return false;
@@ -887,7 +996,7 @@ jme_rxsum_ok(struct jme_adapter *jme, u16 flags)
}
if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS))
- == RXWBFLAG_UDPON)) {
+ == RXWBFLAG_UDPON) && jme_udpsum(skb)) {
if (flags & RXWBFLAG_IPV4)
netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n");
return false;
@@ -935,7 +1044,7 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
skb_put(skb, framesize);
skb->protocol = eth_type_trans(skb, jme->dev);
- if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags)))
+ if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags), skb))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
@@ -1207,7 +1316,6 @@ jme_link_change_tasklet(unsigned long arg)
tasklet_disable(&jme->rxempty_task);
if (netif_carrier_ok(netdev)) {
- jme_reset_ghc_speed(jme);
jme_disable_rx_engine(jme);
jme_disable_tx_engine(jme);
jme_reset_mac_processor(jme);
@@ -1577,6 +1685,38 @@ jme_free_irq(struct jme_adapter *jme)
}
static inline void
+jme_new_phy_on(struct jme_adapter *jme)
+{
+ u32 reg;
+
+ reg = jread32(jme, JME_PHY_PWR);
+ reg &= ~(PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
+ PHY_PWR_DWN2 | PHY_PWR_CLKSEL);
+ jwrite32(jme, JME_PHY_PWR, reg);
+
+ pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, &reg);
+ reg &= ~PE1_GPREG0_PBG;
+ reg |= PE1_GPREG0_ENBG;
+ pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
+}
+
+static inline void
+jme_new_phy_off(struct jme_adapter *jme)
+{
+ u32 reg;
+
+ reg = jread32(jme, JME_PHY_PWR);
+ reg |= PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
+ PHY_PWR_DWN2 | PHY_PWR_CLKSEL;
+ jwrite32(jme, JME_PHY_PWR, reg);
+
+ pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, &reg);
+ reg &= ~PE1_GPREG0_PBG;
+ reg |= PE1_GPREG0_PDD3COLD;
+ pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
+}
+
+static inline void
jme_phy_on(struct jme_adapter *jme)
{
u32 bmcr;
@@ -1584,6 +1724,22 @@ jme_phy_on(struct jme_adapter *jme)
bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
bmcr &= ~BMCR_PDOWN;
jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
+
+ if (new_phy_power_ctrl(jme->chip_main_rev))
+ jme_new_phy_on(jme);
+}
+
+static inline void
+jme_phy_off(struct jme_adapter *jme)
+{
+ u32 bmcr;
+
+ bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
+ bmcr |= BMCR_PDOWN;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
+
+ if (new_phy_power_ctrl(jme->chip_main_rev))
+ jme_new_phy_off(jme);
}
static int
@@ -1606,12 +1762,11 @@ jme_open(struct net_device *netdev)
jme_start_irq(jme);
- if (test_bit(JME_FLAG_SSET, &jme->flags)) {
- jme_phy_on(jme);
+ jme_phy_on(jme);
+ if (test_bit(JME_FLAG_SSET, &jme->flags))
jme_set_settings(netdev, &jme->old_ecmd);
- } else {
+ else
jme_reset_phy_processor(jme);
- }
jme_reset_link(jme);
@@ -1657,12 +1812,6 @@ jme_wait_link(struct jme_adapter *jme)
}
}
-static inline void
-jme_phy_off(struct jme_adapter *jme)
-{
- jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
-}
-
static void
jme_powersave_phy(struct jme_adapter *jme)
{
@@ -1696,7 +1845,6 @@ jme_close(struct net_device *netdev)
tasklet_disable(&jme->rxclean_task);
tasklet_disable(&jme->rxempty_task);
- jme_reset_ghc_speed(jme);
jme_disable_rx_engine(jme);
jme_disable_tx_engine(jme);
jme_reset_mac_processor(jme);
@@ -1993,27 +2141,34 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
+static void
+jme_set_unicastaddr(struct net_device *netdev)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ u32 val;
+
+ val = (netdev->dev_addr[3] & 0xff) << 24 |
+ (netdev->dev_addr[2] & 0xff) << 16 |
+ (netdev->dev_addr[1] & 0xff) << 8 |
+ (netdev->dev_addr[0] & 0xff);
+ jwrite32(jme, JME_RXUMA_LO, val);
+ val = (netdev->dev_addr[5] & 0xff) << 8 |
+ (netdev->dev_addr[4] & 0xff);
+ jwrite32(jme, JME_RXUMA_HI, val);
+}
+
static int
jme_set_macaddr(struct net_device *netdev, void *p)
{
struct jme_adapter *jme = netdev_priv(netdev);
struct sockaddr *addr = p;
- u32 val;
if (netif_running(netdev))
return -EBUSY;
spin_lock_bh(&jme->macaddr_lock);
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
-
- val = (addr->sa_data[3] & 0xff) << 24 |
- (addr->sa_data[2] & 0xff) << 16 |
- (addr->sa_data[1] & 0xff) << 8 |
- (addr->sa_data[0] & 0xff);
- jwrite32(jme, JME_RXUMA_LO, val);
- val = (addr->sa_data[5] & 0xff) << 8 |
- (addr->sa_data[4] & 0xff);
- jwrite32(jme, JME_RXUMA_HI, val);
+ jme_set_unicastaddr(netdev);
spin_unlock_bh(&jme->macaddr_lock);
return 0;
@@ -2731,6 +2886,8 @@ jme_check_hw_ver(struct jme_adapter *jme)
jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT;
jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT;
+ jme->chip_main_rev = jme->chiprev & 0xF;
+ jme->chip_sub_rev = (jme->chiprev >> 4) & 0xF;
}
static const struct net_device_ops jme_netdev_ops = {
@@ -2880,6 +3037,7 @@ jme_init_one(struct pci_dev *pdev,
jme->reg_rxmcs = RXMCS_DEFAULT;
jme->reg_txpfc = 0;
jme->reg_pmcs = PMCS_MFEN;
+ jme->reg_gpreg1 = GPREG1_DEFAULT;
set_bit(JME_FLAG_TXCSUM, &jme->flags);
set_bit(JME_FLAG_TSO, &jme->flags);
@@ -2936,8 +3094,8 @@ jme_init_one(struct pci_dev *pdev,
jme->mii_if.mdio_write = jme_mdio_write;
jme_clear_pm(jme);
- jme_set_phyfifoa(jme);
- pci_read_config_byte(pdev, PCI_REVISION_ID, &jme->rev);
+ jme_set_phyfifo_5level(jme);
+ jme->pcirev = pdev->revision;
if (!jme->fpgaver)
jme_phy_init(jme);
jme_phy_off(jme);
@@ -2964,14 +3122,14 @@ jme_init_one(struct pci_dev *pdev,
goto err_out_unmap;
}
- netif_info(jme, probe, jme->dev, "%s%s ver:%x rev:%x macaddr:%pM\n",
+ netif_info(jme, probe, jme->dev, "%s%s chiprev:%x pcirev:%x macaddr:%pM\n",
(jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ?
"JMC250 Gigabit Ethernet" :
(jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ?
"JMC260 Fast Ethernet" : "Unknown",
(jme->fpgaver != 0) ? " (FPGA)" : "",
(jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
- jme->rev, netdev->dev_addr);
+ jme->pcirev, netdev->dev_addr);
return 0;
@@ -3035,7 +3193,6 @@ jme_suspend(struct pci_dev *pdev, pm_message_t state)
jme_polling_mode(jme);
jme_stop_pcc_timer(jme);
- jme_reset_ghc_speed(jme);
jme_disable_rx_engine(jme);
jme_disable_tx_engine(jme);
jme_reset_mac_processor(jme);
@@ -3066,12 +3223,11 @@ jme_resume(struct pci_dev *pdev)
jme_clear_pm(jme);
pci_restore_state(pdev);
- if (test_bit(JME_FLAG_SSET, &jme->flags)) {
- jme_phy_on(jme);
+ jme_phy_on(jme);
+ if (test_bit(JME_FLAG_SSET, &jme->flags))
jme_set_settings(netdev, &jme->old_ecmd);
- } else {
+ else
jme_reset_phy_processor(jme);
- }
jme_start_irq(jme);
netif_device_attach(netdev);
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
index eac09264bf2a..8bf30451e821 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/jme.h
@@ -26,7 +26,7 @@
#define __JME_H_INCLUDED__
#define DRV_NAME "jme"
-#define DRV_VERSION "1.0.7"
+#define DRV_VERSION "1.0.8"
#define PFX DRV_NAME ": "
#define PCI_DEVICE_ID_JMICRON_JMC250 0x0250
@@ -103,6 +103,37 @@ enum jme_spi_op_bits {
#define HALF_US 500 /* 500 ns */
#define JMESPIIOCTL SIOCDEVPRIVATE
+#define PCI_PRIV_PE1 0xE4
+
+enum pci_priv_pe1_bit_masks {
+ PE1_ASPMSUPRT = 0x00000003, /*
+ * RW:
+ * Aspm_support[1:0]
+ * (R/W Port of 5C[11:10])
+ */
+ PE1_MULTIFUN = 0x00000004, /* RW: Multi_fun_bit */
+ PE1_RDYDMA = 0x00000008, /* RO: ~link.rdy_for_dma */
+ PE1_ASPMOPTL = 0x00000030, /* RW: link.rx10s_option[1:0] */
+ PE1_ASPMOPTH = 0x000000C0, /* RW: 10_req=[3]?HW:[2] */
+ PE1_GPREG0 = 0x0000FF00, /*
+ * SRW:
+ * Cfg_gp_reg0
+ * [7:6] phy_giga BG control
+ * [5] CREQ_N as CREQ_N1 (CPPE# as CREQ#)
+ * [4:0] Reserved
+ */
+ PE1_GPREG0_PBG = 0x0000C000, /* phy_giga BG control */
+ PE1_GPREG1 = 0x00FF0000, /* RW: Cfg_gp_reg1 */
+ PE1_REVID = 0xFF000000, /* RO: Rev ID */
+};
+
+enum pci_priv_pe1_values {
+ PE1_GPREG0_ENBG = 0x00000000, /* en BG */
+ PE1_GPREG0_PDD3COLD = 0x00004000, /* giga_PD + d3cold */
+ PE1_GPREG0_PDPCIESD = 0x00008000, /* giga_PD + pcie_shutdown */
+ PE1_GPREG0_PDPCIEIDDQ = 0x0000C000, /* giga_PD + pcie_iddq */
+};
+
/*
* Dynamic(adaptive)/Static PCC values
*/
@@ -403,6 +434,7 @@ struct jme_adapter {
u32 reg_rxmcs;
u32 reg_ghc;
u32 reg_pmcs;
+ u32 reg_gpreg1;
u32 phylink;
u32 tx_ring_size;
u32 tx_ring_mask;
@@ -411,8 +443,10 @@ struct jme_adapter {
u32 rx_ring_mask;
u8 mrrs;
unsigned int fpgaver;
- unsigned int chiprev;
- u8 rev;
+ u8 chiprev;
+ u8 chip_main_rev;
+ u8 chip_sub_rev;
+ u8 pcirev;
u32 msg_enable;
struct ethtool_cmd old_ecmd;
unsigned int old_mtu;
@@ -497,6 +531,7 @@ enum jme_iomap_regs {
JME_PMCS = JME_MAC | 0x60, /* Power Management Control/Stat */
+ JME_PHY_PWR = JME_PHY | 0x24, /* New PHY Power Ctrl Register */
JME_PHY_CS = JME_PHY | 0x28, /* PHY Ctrl and Status Register */
JME_PHY_LINK = JME_PHY | 0x30, /* PHY Link Status Register */
JME_SMBCSR = JME_PHY | 0x40, /* SMB Control and Status */
@@ -624,6 +659,14 @@ enum jme_txtrhd_shifts {
TXTRHD_TXRL_SHIFT = 0,
};
+enum jme_txtrhd_values {
+ TXTRHD_FULLDUPLEX = 0x00000000,
+ TXTRHD_HALFDUPLEX = TXTRHD_TXPEN |
+ ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
+ TXTRHD_TXREN |
+ ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL),
+};
+
/*
* RX Control/Status Bits
*/
@@ -779,6 +822,8 @@ static inline u32 smi_phy_addr(int x)
*/
enum jme_ghc_bit_mask {
GHC_SWRST = 0x40000000,
+ GHC_TO_CLK_SRC = 0x00C00000,
+ GHC_TXMAC_CLK_SRC = 0x00300000,
GHC_DPX = 0x00000040,
GHC_SPEED = 0x00000030,
GHC_LINK_POLL = 0x00000001,
@@ -833,6 +878,21 @@ enum jme_pmcs_bit_masks {
};
/*
+ * New PHY Power Control Register
+ */
+enum jme_phy_pwr_bit_masks {
+ PHY_PWR_DWN1SEL = 0x01000000, /* Phy_giga.p_PWR_DOWN1_SEL */
+ PHY_PWR_DWN1SW = 0x02000000, /* Phy_giga.p_PWR_DOWN1_SW */
+ PHY_PWR_DWN2 = 0x04000000, /* Phy_giga.p_PWR_DOWN2 */
+ PHY_PWR_CLKSEL = 0x08000000, /*
+ * XTL_OUT Clock select
+ * (an internal free-running clock)
+ * 0: xtl_out = phy_giga.A_XTL25_O
+ * 1: xtl_out = phy_giga.PD_OSC
+ */
+};
+
+/*
* Giga PHY Status Registers
*/
enum jme_phy_link_bit_mask {
@@ -942,18 +1002,17 @@ enum jme_gpreg0_vals {
/*
* General Purpose REG-1
- * Note: All theses bits defined here are for
- * Chip mode revision 0x11 only
*/
-enum jme_gpreg1_masks {
+enum jme_gpreg1_bit_masks {
+ GPREG1_RXCLKOFF = 0x04000000,
+ GPREG1_PCREQN = 0x00020000,
+ GPREG1_HALFMODEPATCH = 0x00000040, /* For Chip revision 0x11 only */
+ GPREG1_RSSPATCH = 0x00000020, /* For Chip revision 0x11 only */
GPREG1_INTRDELAYUNIT = 0x00000018,
GPREG1_INTRDELAYENABLE = 0x00000007,
};
enum jme_gpreg1_vals {
- GPREG1_RSSPATCH = 0x00000040,
- GPREG1_HALFMODEPATCH = 0x00000020,
-
GPREG1_INTDLYUNIT_16NS = 0x00000000,
GPREG1_INTDLYUNIT_256NS = 0x00000008,
GPREG1_INTDLYUNIT_1US = 0x00000010,
@@ -967,7 +1026,7 @@ enum jme_gpreg1_vals {
GPREG1_INTDLYEN_6U = 0x00000006,
GPREG1_INTDLYEN_7U = 0x00000007,
- GPREG1_DEFAULT = 0x00000000,
+ GPREG1_DEFAULT = GPREG1_PCREQN,
};
/*
@@ -1184,16 +1243,22 @@ enum jme_phy_reg17_vals {
/*
* Workaround
*/
-static inline int is_buggy250(unsigned short device, unsigned int chiprev)
+static inline int is_buggy250(unsigned short device, u8 chiprev)
{
return device == PCI_DEVICE_ID_JMICRON_JMC250 && chiprev == 0x11;
}
+static inline int new_phy_power_ctrl(u8 chip_main_rev)
+{
+ return chip_main_rev >= 5;
+}
+
/*
* Function prototypes
*/
static int jme_set_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd);
+static void jme_set_unicastaddr(struct net_device *netdev);
static void jme_set_multi(struct net_device *netdev);
#endif
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
index 928b2b83cef5..efd44afeae83 100644
--- a/drivers/net/ks8842.c
+++ b/drivers/net/ks8842.c
@@ -26,6 +26,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -1145,7 +1146,7 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
struct resource *iomem;
struct net_device *netdev;
struct ks8842_adapter *adapter;
- struct ks8842_platform_data *pdata = pdev->dev.platform_data;
+ struct ks8842_platform_data *pdata = mfd_get_data(pdev);
u16 id;
unsigned i;
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index f35554d11441..b7948ccfcf7d 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -952,8 +952,7 @@ static const struct attribute_group temac_attr_group = {
.attrs = temac_device_attrs,
};
-static int __devinit
-temac_of_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit temac_of_probe(struct platform_device *op)
{
struct device_node *np;
struct temac_local *lp;
@@ -1123,7 +1122,7 @@ static struct of_device_id temac_of_match[] __devinitdata = {
};
MODULE_DEVICE_TABLE(of, temac_of_match);
-static struct of_platform_driver temac_of_driver = {
+static struct platform_driver temac_of_driver = {
.probe = temac_of_probe,
.remove = __devexit_p(temac_of_remove),
.driver = {
@@ -1135,13 +1134,13 @@ static struct of_platform_driver temac_of_driver = {
static int __init temac_init(void)
{
- return of_register_platform_driver(&temac_of_driver);
+ return platform_driver_register(&temac_of_driver);
}
module_init(temac_init);
static void __exit temac_exit(void)
{
- of_unregister_platform_driver(&temac_of_driver);
+ platform_driver_unregister(&temac_of_driver);
}
module_exit(temac_exit);
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 2d9663a1c54d..ea0dc451da9c 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -129,10 +129,6 @@ static u32 always_on(struct net_device *dev)
static const struct ethtool_ops loopback_ethtool_ops = {
.get_link = always_on,
- .set_tso = ethtool_op_set_tso,
- .get_tx_csum = always_on,
- .get_sg = always_on,
- .get_rx_csum = always_on,
};
static int loopback_dev_init(struct net_device *dev)
@@ -169,9 +165,12 @@ static void loopback_setup(struct net_device *dev)
dev->type = ARPHRD_LOOPBACK; /* 0x0001*/
dev->flags = IFF_LOOPBACK;
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+ dev->hw_features = NETIF_F_ALL_TSO | NETIF_F_UFO;
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
- | NETIF_F_TSO
+ | NETIF_F_ALL_TSO
+ | NETIF_F_UFO
| NETIF_F_NO_CSUM
+ | NETIF_F_RXCSUM
| NETIF_F_HIGHDMA
| NETIF_F_LLTX
| NETIF_F_NETNS_LOCAL;
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index f69e73e2191e..79ccb54ab00c 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -260,7 +260,7 @@ static int macb_mii_init(struct macb *bp)
for (i = 0; i < PHY_MAX_ADDR; i++)
bp->mii_bus->irq[i] = PHY_POLL;
- platform_set_drvdata(bp->dev, bp->mii_bus);
+ dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
if (mdiobus_register(bp->mii_bus))
goto err_out_free_mdio_irq;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 5933621ac3ff..2300e4599520 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -39,7 +39,7 @@ struct macvtap_queue {
struct socket sock;
struct socket_wq wq;
int vnet_hdr_sz;
- struct macvlan_dev *vlan;
+ struct macvlan_dev __rcu *vlan;
struct file *file;
unsigned int flags;
};
@@ -141,7 +141,8 @@ static void macvtap_put_queue(struct macvtap_queue *q)
struct macvlan_dev *vlan;
spin_lock(&macvtap_lock);
- vlan = rcu_dereference(q->vlan);
+ vlan = rcu_dereference_protected(q->vlan,
+ lockdep_is_held(&macvtap_lock));
if (vlan) {
int index = get_slot(vlan, q);
@@ -219,7 +220,8 @@ static void macvtap_del_queues(struct net_device *dev)
/* macvtap_put_queue can free some slots, so go through all slots */
spin_lock(&macvtap_lock);
for (i = 0; i < MAX_MACVTAP_QUEUES && vlan->numvtaps; i++) {
- q = rcu_dereference(vlan->taps[i]);
+ q = rcu_dereference_protected(vlan->taps[i],
+ lockdep_is_held(&macvtap_lock));
if (q) {
qlist[j++] = q;
rcu_assign_pointer(vlan->taps[i], NULL);
@@ -569,7 +571,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q,
}
rcu_read_lock_bh();
- vlan = rcu_dereference(q->vlan);
+ vlan = rcu_dereference_bh(q->vlan);
if (vlan)
macvlan_start_xmit(skb, vlan->dev);
else
@@ -583,7 +585,7 @@ err_kfree:
err:
rcu_read_lock_bh();
- vlan = rcu_dereference(q->vlan);
+ vlan = rcu_dereference_bh(q->vlan);
if (vlan)
vlan->dev->stats.tx_dropped++;
rcu_read_unlock_bh();
@@ -631,7 +633,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len);
rcu_read_lock_bh();
- vlan = rcu_dereference(q->vlan);
+ vlan = rcu_dereference_bh(q->vlan);
if (vlan)
macvlan_count_rx(vlan, len, ret == 0, 0);
rcu_read_unlock_bh();
@@ -727,7 +729,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
case TUNGETIFF:
rcu_read_lock_bh();
- vlan = rcu_dereference(q->vlan);
+ vlan = rcu_dereference_bh(q->vlan);
if (vlan)
dev_hold(vlan->dev);
rcu_read_unlock_bh();
@@ -736,7 +738,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
return -ENOLINK;
ret = 0;
- if (copy_to_user(&ifr->ifr_name, q->vlan->dev->name, IFNAMSIZ) ||
+ if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
put_user(q->flags, &ifr->ifr_flags))
ret = -EFAULT;
dev_put(vlan->dev);
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index ea5cfe2c3a04..a7f2eed9a08a 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -253,7 +253,7 @@ struct myri10ge_priv {
unsigned long serial_number;
int vendor_specific_offset;
int fw_multicast_support;
- unsigned long features;
+ u32 features;
u32 max_tso6;
u32 read_dma;
u32 write_dma;
@@ -1776,7 +1776,7 @@ static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled)
static int myri10ge_set_tso(struct net_device *netdev, u32 tso_enabled)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
- unsigned long flags = mgp->features & (NETIF_F_TSO6 | NETIF_F_TSO);
+ u32 flags = mgp->features & (NETIF_F_TSO6 | NETIF_F_TSO);
if (tso_enabled)
netdev->features |= flags;
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index 4846e131a04e..a761076b69c3 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -926,7 +926,7 @@ static const struct net_device_ops myri_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int __devinit myri_sbus_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit myri_sbus_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
static unsigned version_printed;
@@ -1160,7 +1160,7 @@ static const struct of_device_id myri_sbus_match[] = {
MODULE_DEVICE_TABLE(of, myri_sbus_match);
-static struct of_platform_driver myri_sbus_driver = {
+static struct platform_driver myri_sbus_driver = {
.driver = {
.name = "myri",
.owner = THIS_MODULE,
@@ -1172,12 +1172,12 @@ static struct of_platform_driver myri_sbus_driver = {
static int __init myri_sbus_init(void)
{
- return of_register_platform_driver(&myri_sbus_driver);
+ return platform_driver_register(&myri_sbus_driver);
}
static void __exit myri_sbus_exit(void)
{
- of_unregister_platform_driver(&myri_sbus_driver);
+ platform_driver_unregister(&myri_sbus_driver);
}
module_init(myri_sbus_init);
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 9fb59d3f9c92..40fa59e2fd5c 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -10062,8 +10062,7 @@ static const struct niu_ops niu_phys_ops = {
.unmap_single = niu_phys_unmap_single,
};
-static int __devinit niu_of_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit niu_of_probe(struct platform_device *op)
{
union niu_parent_id parent_id;
struct net_device *dev;
@@ -10223,7 +10222,7 @@ static const struct of_device_id niu_match[] = {
};
MODULE_DEVICE_TABLE(of, niu_match);
-static struct of_platform_driver niu_of_driver = {
+static struct platform_driver niu_of_driver = {
.driver = {
.name = "niu",
.owner = THIS_MODULE,
@@ -10244,14 +10243,14 @@ static int __init niu_init(void)
niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT);
#ifdef CONFIG_SPARC64
- err = of_register_platform_driver(&niu_of_driver);
+ err = platform_driver_register(&niu_of_driver);
#endif
if (!err) {
err = pci_register_driver(&niu_pci_driver);
#ifdef CONFIG_SPARC64
if (err)
- of_unregister_platform_driver(&niu_of_driver);
+ platform_driver_unregister(&niu_of_driver);
#endif
}
@@ -10262,7 +10261,7 @@ static void __exit niu_exit(void)
{
pci_unregister_driver(&niu_pci_driver);
#ifdef CONFIG_SPARC64
- of_unregister_platform_driver(&niu_of_driver);
+ platform_driver_unregister(&niu_of_driver);
#endif
}
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index b99e90aca37d..8c66e22c3a0a 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -2446,7 +2446,7 @@ static struct pci_driver pch_gbe_pcidev = {
.id_table = pch_gbe_pcidev_id,
.probe = pch_gbe_probe,
.remove = pch_gbe_remove,
-#ifdef CONFIG_PM_OPS
+#ifdef CONFIG_PM
.driver.pm = &pch_gbe_pm_ops,
#endif
.shutdown = pch_gbe_shutdown,
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 9226cda4d054..530ab5a10bd3 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -691,6 +691,7 @@ static struct pcmcia_device_id fmvj18x_ids[] = {
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05),
+ PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0b05),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101),
PCMCIA_DEVICE_NULL,
};
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 35fda5ac8120..392a6c4b72e5 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -77,7 +77,6 @@ config NATIONAL_PHY
Currently supports the DP83865 PHY.
config STE10XP
- depends on PHYLIB
tristate "Driver for STMicroelectronics STe10Xp PHYs"
---help---
This is the driver for the STe100p and STe101p PHYs.
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index f62c7b717bc8..47c8339a0359 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -188,8 +188,7 @@ static int __devexit mdio_gpio_remove(struct platform_device *pdev)
#ifdef CONFIG_OF_GPIO
-static int __devinit mdio_ofgpio_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit mdio_ofgpio_probe(struct platform_device *ofdev)
{
struct mdio_gpio_platform_data *pdata;
struct mii_bus *new_bus;
@@ -240,7 +239,7 @@ static struct of_device_id mdio_ofgpio_match[] = {
};
MODULE_DEVICE_TABLE(of, mdio_ofgpio_match);
-static struct of_platform_driver mdio_ofgpio_driver = {
+static struct platform_driver mdio_ofgpio_driver = {
.driver = {
.name = "mdio-gpio",
.owner = THIS_MODULE,
@@ -252,12 +251,12 @@ static struct of_platform_driver mdio_ofgpio_driver = {
static inline int __init mdio_ofgpio_init(void)
{
- return of_register_platform_driver(&mdio_ofgpio_driver);
+ return platform_driver_register(&mdio_ofgpio_driver);
}
static inline void __exit mdio_ofgpio_exit(void)
{
- of_unregister_platform_driver(&mdio_ofgpio_driver);
+ platform_driver_unregister(&mdio_ofgpio_driver);
}
#else
static inline int __init mdio_ofgpio_init(void) { return 0; }
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 0fd1678bc5a9..590f902deb6b 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -19,13 +19,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/phy.h>
-
-#define PHY_ID_KSZ9021 0x00221611
-#define PHY_ID_KS8737 0x00221720
-#define PHY_ID_KS8041 0x00221510
-#define PHY_ID_KS8051 0x00221550
-/* both for ks8001 Rev. A/B, and for ks8721 Rev 3. */
-#define PHY_ID_KS8001 0x0022161A
+#include <linux/micrel_phy.h>
/* general Interrupt control/status reg in vendor specific block. */
#define MII_KSZPHY_INTCS 0x1B
@@ -46,6 +40,7 @@
#define KSZPHY_CTRL_INT_ACTIVE_HIGH (1 << 9)
#define KSZ9021_CTRL_INT_ACTIVE_HIGH (1 << 14)
#define KS8737_CTRL_INT_ACTIVE_HIGH (1 << 14)
+#define KSZ8051_RMII_50MHZ_CLK (1 << 7)
static int kszphy_ack_interrupt(struct phy_device *phydev)
{
@@ -106,6 +101,19 @@ static int kszphy_config_init(struct phy_device *phydev)
return 0;
}
+static int ks8051_config_init(struct phy_device *phydev)
+{
+ int regval;
+
+ if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK) {
+ regval = phy_read(phydev, MII_KSZPHY_CTRL);
+ regval |= KSZ8051_RMII_50MHZ_CLK;
+ phy_write(phydev, MII_KSZPHY_CTRL, regval);
+ }
+
+ return 0;
+}
+
static struct phy_driver ks8737_driver = {
.phy_id = PHY_ID_KS8737,
.phy_id_mask = 0x00fffff0,
@@ -142,7 +150,7 @@ static struct phy_driver ks8051_driver = {
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
| SUPPORTED_Asym_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
- .config_init = kszphy_config_init,
+ .config_init = ks8051_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index c7a6c4466978..9f6d670748d1 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -592,8 +592,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
ppp_release(NULL, file);
err = 0;
} else
- printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%ld\n",
- atomic_long_read(&file->f_count));
+ pr_warn("PPPIOCDETACH file->f_count=%ld\n",
+ atomic_long_read(&file->f_count));
mutex_unlock(&ppp_mutex);
return err;
}
@@ -630,7 +630,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (pf->kind != INTERFACE) {
/* can't happen */
- printk(KERN_ERR "PPP: not interface or channel??\n");
+ pr_err("PPP: not interface or channel??\n");
return -EINVAL;
}
@@ -704,7 +704,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
vj = slhc_init(val2+1, val+1);
if (!vj) {
- printk(KERN_ERR "PPP: no memory (VJ compressor)\n");
+ netdev_err(ppp->dev,
+ "PPP: no memory (VJ compressor)\n");
err = -ENOMEM;
break;
}
@@ -898,17 +899,17 @@ static int __init ppp_init(void)
{
int err;
- printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n");
+ pr_info("PPP generic driver version " PPP_VERSION "\n");
err = register_pernet_device(&ppp_net_ops);
if (err) {
- printk(KERN_ERR "failed to register PPP pernet device (%d)\n", err);
+ pr_err("failed to register PPP pernet device (%d)\n", err);
goto out;
}
err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops);
if (err) {
- printk(KERN_ERR "failed to register PPP device (%d)\n", err);
+ pr_err("failed to register PPP device (%d)\n", err);
goto out_net;
}
@@ -1078,7 +1079,7 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
new_skb = alloc_skb(new_skb_size, GFP_ATOMIC);
if (!new_skb) {
if (net_ratelimit())
- printk(KERN_ERR "PPP: no memory (comp pkt)\n");
+ netdev_err(ppp->dev, "PPP: no memory (comp pkt)\n");
return NULL;
}
if (ppp->dev->hard_header_len > PPP_HDRLEN)
@@ -1108,7 +1109,7 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
* the same number.
*/
if (net_ratelimit())
- printk(KERN_ERR "ppp: compressor dropped pkt\n");
+ netdev_err(ppp->dev, "ppp: compressor dropped pkt\n");
kfree_skb(skb);
kfree_skb(new_skb);
new_skb = NULL;
@@ -1138,7 +1139,9 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
if (ppp->pass_filter &&
sk_run_filter(skb, ppp->pass_filter) == 0) {
if (ppp->debug & 1)
- printk(KERN_DEBUG "PPP: outbound frame not passed\n");
+ netdev_printk(KERN_DEBUG, ppp->dev,
+ "PPP: outbound frame "
+ "not passed\n");
kfree_skb(skb);
return;
}
@@ -1164,7 +1167,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2,
GFP_ATOMIC);
if (!new_skb) {
- printk(KERN_ERR "PPP: no memory (VJ comp pkt)\n");
+ netdev_err(ppp->dev, "PPP: no memory (VJ comp pkt)\n");
goto drop;
}
skb_reserve(new_skb, ppp->dev->hard_header_len - 2);
@@ -1202,7 +1205,9 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
proto != PPP_LCP && proto != PPP_CCP) {
if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) {
if (net_ratelimit())
- printk(KERN_ERR "ppp: compression required but down - pkt dropped.\n");
+ netdev_err(ppp->dev,
+ "ppp: compression required but "
+ "down - pkt dropped.\n");
goto drop;
}
skb = pad_compress_skb(ppp, skb);
@@ -1505,7 +1510,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
noskb:
spin_unlock_bh(&pch->downl);
if (ppp->debug & 1)
- printk(KERN_ERR "PPP: no memory (fragment)\n");
+ netdev_err(ppp->dev, "PPP: no memory (fragment)\n");
++ppp->dev->stats.tx_errors;
++ppp->nxseq;
return 1; /* abandon the frame */
@@ -1686,7 +1691,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
/* copy to a new sk_buff with more tailroom */
ns = dev_alloc_skb(skb->len + 128);
if (!ns) {
- printk(KERN_ERR"PPP: no memory (VJ decomp)\n");
+ netdev_err(ppp->dev, "PPP: no memory "
+ "(VJ decomp)\n");
goto err;
}
skb_reserve(ns, 2);
@@ -1699,7 +1705,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2);
if (len <= 0) {
- printk(KERN_DEBUG "PPP: VJ decompression error\n");
+ netdev_printk(KERN_DEBUG, ppp->dev,
+ "PPP: VJ decompression error\n");
goto err;
}
len += 2;
@@ -1721,7 +1728,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
goto err;
if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) {
- printk(KERN_ERR "PPP: VJ uncompressed error\n");
+ netdev_err(ppp->dev, "PPP: VJ uncompressed error\n");
goto err;
}
proto = PPP_IP;
@@ -1762,8 +1769,9 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
if (ppp->pass_filter &&
sk_run_filter(skb, ppp->pass_filter) == 0) {
if (ppp->debug & 1)
- printk(KERN_DEBUG "PPP: inbound frame "
- "not passed\n");
+ netdev_printk(KERN_DEBUG, ppp->dev,
+ "PPP: inbound frame "
+ "not passed\n");
kfree_skb(skb);
return;
}
@@ -1821,7 +1829,8 @@ ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb)
ns = dev_alloc_skb(obuff_size);
if (!ns) {
- printk(KERN_ERR "ppp_decompress_frame: no memory\n");
+ netdev_err(ppp->dev, "ppp_decompress_frame: "
+ "no memory\n");
goto err;
}
/* the decompressor still expects the A/C bytes in the hdr */
@@ -1989,7 +1998,7 @@ ppp_mp_reconstruct(struct ppp *ppp)
u32 seq = ppp->nextseq;
u32 minseq = ppp->minseq;
struct sk_buff_head *list = &ppp->mrq;
- struct sk_buff *p, *next;
+ struct sk_buff *p, *tmp;
struct sk_buff *head, *tail;
struct sk_buff *skb = NULL;
int lost = 0, len = 0;
@@ -1998,13 +2007,15 @@ ppp_mp_reconstruct(struct ppp *ppp)
return NULL;
head = list->next;
tail = NULL;
- for (p = head; p != (struct sk_buff *) list; p = next) {
- next = p->next;
+ skb_queue_walk_safe(list, p, tmp) {
+ again:
if (seq_before(PPP_MP_CB(p)->sequence, seq)) {
/* this can't happen, anyway ignore the skb */
- printk(KERN_ERR "ppp_mp_reconstruct bad seq %u < %u\n",
- PPP_MP_CB(p)->sequence, seq);
- head = next;
+ netdev_err(ppp->dev, "ppp_mp_reconstruct bad "
+ "seq %u < %u\n",
+ PPP_MP_CB(p)->sequence, seq);
+ __skb_unlink(p, list);
+ kfree_skb(p);
continue;
}
if (PPP_MP_CB(p)->sequence != seq) {
@@ -2016,8 +2027,7 @@ ppp_mp_reconstruct(struct ppp *ppp)
lost = 1;
seq = seq_before(minseq, PPP_MP_CB(p)->sequence)?
minseq + 1: PPP_MP_CB(p)->sequence;
- next = p;
- continue;
+ goto again;
}
/*
@@ -2042,17 +2052,9 @@ ppp_mp_reconstruct(struct ppp *ppp)
(PPP_MP_CB(head)->BEbits & B)) {
if (len > ppp->mrru + 2) {
++ppp->dev->stats.rx_length_errors;
- printk(KERN_DEBUG "PPP: reconstructed packet"
- " is too long (%d)\n", len);
- } else if (p == head) {
- /* fragment is complete packet - reuse skb */
- tail = p;
- skb = skb_get(p);
- break;
- } else if ((skb = dev_alloc_skb(len)) == NULL) {
- ++ppp->dev->stats.rx_missed_errors;
- printk(KERN_DEBUG "PPP: no memory for "
- "reconstructed packet");
+ netdev_printk(KERN_DEBUG, ppp->dev,
+ "PPP: reconstructed packet"
+ " is too long (%d)\n", len);
} else {
tail = p;
break;
@@ -2065,9 +2067,17 @@ ppp_mp_reconstruct(struct ppp *ppp)
* and we haven't found a complete valid packet yet,
* we can discard up to and including this fragment.
*/
- if (PPP_MP_CB(p)->BEbits & E)
- head = next;
+ if (PPP_MP_CB(p)->BEbits & E) {
+ struct sk_buff *tmp2;
+ skb_queue_reverse_walk_from_safe(list, p, tmp2) {
+ __skb_unlink(p, list);
+ kfree_skb(p);
+ }
+ head = skb_peek(list);
+ if (!head)
+ break;
+ }
++seq;
}
@@ -2077,26 +2087,37 @@ ppp_mp_reconstruct(struct ppp *ppp)
signal a receive error. */
if (PPP_MP_CB(head)->sequence != ppp->nextseq) {
if (ppp->debug & 1)
- printk(KERN_DEBUG " missed pkts %u..%u\n",
- ppp->nextseq,
- PPP_MP_CB(head)->sequence-1);
+ netdev_printk(KERN_DEBUG, ppp->dev,
+ " missed pkts %u..%u\n",
+ ppp->nextseq,
+ PPP_MP_CB(head)->sequence-1);
++ppp->dev->stats.rx_dropped;
ppp_receive_error(ppp);
}
- if (head != tail)
- /* copy to a single skb */
- for (p = head; p != tail->next; p = p->next)
- skb_copy_bits(p, 0, skb_put(skb, p->len), p->len);
- ppp->nextseq = PPP_MP_CB(tail)->sequence + 1;
- head = tail->next;
- }
+ skb = head;
+ if (head != tail) {
+ struct sk_buff **fragpp = &skb_shinfo(skb)->frag_list;
+ p = skb_queue_next(list, head);
+ __skb_unlink(skb, list);
+ skb_queue_walk_from_safe(list, p, tmp) {
+ __skb_unlink(p, list);
+ *fragpp = p;
+ p->next = NULL;
+ fragpp = &p->next;
+
+ skb->len += p->len;
+ skb->data_len += p->len;
+ skb->truesize += p->len;
+
+ if (p == tail)
+ break;
+ }
+ } else {
+ __skb_unlink(skb, list);
+ }
- /* Discard all the skbuffs that we have copied the data out of
- or that we can't use. */
- while ((p = list->next) != head) {
- __skb_unlink(p, list);
- kfree_skb(p);
+ ppp->nextseq = PPP_MP_CB(tail)->sequence + 1;
}
return skb;
@@ -2617,8 +2638,8 @@ ppp_create_interface(struct net *net, int unit, int *retp)
ret = register_netdev(dev);
if (ret != 0) {
unit_put(&pn->units_idr, unit);
- printk(KERN_ERR "PPP: couldn't register device %s (%d)\n",
- dev->name, ret);
+ netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n",
+ dev->name, ret);
goto out2;
}
@@ -2690,9 +2711,9 @@ static void ppp_destroy_interface(struct ppp *ppp)
if (!ppp->file.dead || ppp->n_channels) {
/* "can't happen" */
- printk(KERN_ERR "ppp: destroying ppp struct %p but dead=%d "
- "n_channels=%d !\n", ppp, ppp->file.dead,
- ppp->n_channels);
+ netdev_err(ppp->dev, "ppp: destroying ppp struct %p "
+ "but dead=%d n_channels=%d !\n",
+ ppp, ppp->file.dead, ppp->n_channels);
return;
}
@@ -2834,8 +2855,7 @@ static void ppp_destroy_channel(struct channel *pch)
if (!pch->file.dead) {
/* "can't happen" */
- printk(KERN_ERR "ppp: destroying undead channel %p !\n",
- pch);
+ pr_err("ppp: destroying undead channel %p !\n", pch);
return;
}
skb_queue_purge(&pch->file.xq);
@@ -2847,7 +2867,7 @@ static void __exit ppp_cleanup(void)
{
/* should never happen */
if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
- printk(KERN_ERR "PPP: removing module but units remain!\n");
+ pr_err("PPP: removing module but units remain!\n");
unregister_chrdev(PPP_MAJOR, "ppp");
device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
class_destroy(ppp_class);
@@ -2865,7 +2885,7 @@ static int __unit_alloc(struct idr *p, void *ptr, int n)
again:
if (!idr_pre_get(p, GFP_KERNEL)) {
- printk(KERN_ERR "PPP: No free memory for idr\n");
+ pr_err("PPP: No free memory for idr\n");
return -ENOMEM;
}
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 1a3584edd79c..348b4f1367c9 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -379,7 +379,7 @@ static void fm93c56a_select(struct ql3_adapter *qdev)
{
struct ql3xxx_port_registers __iomem *port_regs =
qdev->mem_map_registers;
- u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+ __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
@@ -398,7 +398,7 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
u32 previousBit;
struct ql3xxx_port_registers __iomem *port_regs =
qdev->mem_map_registers;
- u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+ __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
/* Clock in a zero, then do the start bit */
ql_write_nvram_reg(qdev, spir,
@@ -467,7 +467,7 @@ static void fm93c56a_deselect(struct ql3_adapter *qdev)
{
struct ql3xxx_port_registers __iomem *port_regs =
qdev->mem_map_registers;
- u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+ __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
@@ -483,7 +483,7 @@ static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
u32 dataBit;
struct ql3xxx_port_registers __iomem *port_regs =
qdev->mem_map_registers;
- u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+ __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
/* Read the data bits */
/* The first bit is a dummy. Clock right over it. */
@@ -2460,7 +2460,7 @@ map_error:
* The 3032 supports sglists by using the 3 addr/len pairs (ALP)
* in the IOCB plus a chain of outbound address lists (OAL) that
* each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
- * will used to point to an OAL when more ALP entries are required.
+ * will be used to point to an OAL when more ALP entries are required.
* The IOCB is always the top of the chain followed by one or more
* OALs (when necessary).
*/
@@ -3011,7 +3011,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
u32 value;
struct ql3xxx_port_registers __iomem *port_regs =
qdev->mem_map_registers;
- u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+ __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
struct ql3xxx_host_memory_registers __iomem *hmem_regs =
(void __iomem *)port_regs;
u32 delay = 10;
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 44e316fd67b8..dc44564ef6f9 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -867,7 +867,6 @@ struct qlcnic_nic_intr_coalesce {
#define LINKEVENT_LINKSPEED_MBPS 0
#define LINKEVENT_LINKSPEED_ENCODED 1
-#define AUTO_FW_RESET_ENABLED 0x01
/* firmware response header:
* 63:58 - message type
* 57:56 - owner
@@ -1133,14 +1132,10 @@ struct qlcnic_eswitch {
#define MAX_BW 100 /* % of link speed */
#define MAX_VLAN_ID 4095
#define MIN_VLAN_ID 2
-#define MAX_TX_QUEUES 1
-#define MAX_RX_QUEUES 4
#define DEFAULT_MAC_LEARN 1
#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID)
#define IS_VALID_BW(bw) (bw <= MAX_BW)
-#define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES)
-#define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES)
struct qlcnic_pci_func_cfg {
u16 func_type;
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 37c04b4fade3..cd88c7e1bfa9 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -42,7 +42,7 @@ static int use_msi_x = 1;
module_param(use_msi_x, int, 0444);
MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
-static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
+static int auto_fw_reset = 1;
module_param(auto_fw_reset, int, 0644);
MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
@@ -2959,8 +2959,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
if (adapter->need_fw_reset)
goto detach;
- if (adapter->reset_context &&
- auto_fw_reset == AUTO_FW_RESET_ENABLED) {
+ if (adapter->reset_context && auto_fw_reset) {
qlcnic_reset_hw_context(adapter);
adapter->netdev->trans_start = jiffies;
}
@@ -2973,7 +2972,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
qlcnic_dev_request_reset(adapter);
- if ((auto_fw_reset == AUTO_FW_RESET_ENABLED))
+ if (auto_fw_reset)
clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
dev_info(&netdev->dev, "firmware hang detected\n");
@@ -2982,7 +2981,7 @@ detach:
adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
QLCNIC_DEV_NEED_RESET;
- if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
+ if (auto_fw_reset &&
!test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
@@ -3654,10 +3653,8 @@ validate_npar_config(struct qlcnic_adapter *adapter,
if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
return QL_STATUS_INVALID_PARAM;
- if (!IS_VALID_BW(np_cfg[i].min_bw)
- || !IS_VALID_BW(np_cfg[i].max_bw)
- || !IS_VALID_RX_QUEUES(np_cfg[i].max_rx_queues)
- || !IS_VALID_TX_QUEUES(np_cfg[i].max_tx_queues))
+ if (!IS_VALID_BW(np_cfg[i].min_bw) ||
+ !IS_VALID_BW(np_cfg[i].max_bw))
return QL_STATUS_INVALID_PARAM;
}
return 0;
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 27e6f6d43cac..e3ebd90ae651 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -49,8 +49,8 @@
#include <asm/processor.h>
#define DRV_NAME "r6040"
-#define DRV_VERSION "0.26"
-#define DRV_RELDATE "30May2010"
+#define DRV_VERSION "0.27"
+#define DRV_RELDATE "23Feb2011"
/* PHY CHIP Address */
#define PHY1_ADDR 1 /* For MAC1 */
@@ -69,6 +69,8 @@
/* MAC registers */
#define MCR0 0x00 /* Control register 0 */
+#define MCR0_PROMISC 0x0020 /* Promiscuous mode */
+#define MCR0_HASH_EN 0x0100 /* Enable multicast hash table function */
#define MCR1 0x04 /* Control register 1 */
#define MAC_RST 0x0001 /* Reset the MAC */
#define MBCR 0x08 /* Bus control */
@@ -851,77 +853,92 @@ static void r6040_multicast_list(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
- u16 *adrp;
- u16 reg;
unsigned long flags;
struct netdev_hw_addr *ha;
int i;
+ u16 *adrp;
+ u16 hash_table[4] = { 0 };
+
+ spin_lock_irqsave(&lp->lock, flags);
- /* MAC Address */
+ /* Keep our MAC Address */
adrp = (u16 *)dev->dev_addr;
iowrite16(adrp[0], ioaddr + MID_0L);
iowrite16(adrp[1], ioaddr + MID_0M);
iowrite16(adrp[2], ioaddr + MID_0H);
- /* Promiscous Mode */
- spin_lock_irqsave(&lp->lock, flags);
-
/* Clear AMCP & PROM bits */
- reg = ioread16(ioaddr) & ~0x0120;
- if (dev->flags & IFF_PROMISC) {
- reg |= 0x0020;
- lp->mcr0 |= 0x0020;
- }
- /* Too many multicast addresses
- * accept all traffic */
- else if ((netdev_mc_count(dev) > MCAST_MAX) ||
- (dev->flags & IFF_ALLMULTI))
- reg |= 0x0020;
+ lp->mcr0 = ioread16(ioaddr + MCR0) & ~(MCR0_PROMISC | MCR0_HASH_EN);
- iowrite16(reg, ioaddr);
- spin_unlock_irqrestore(&lp->lock, flags);
+ /* Promiscuous mode */
+ if (dev->flags & IFF_PROMISC)
+ lp->mcr0 |= MCR0_PROMISC;
- /* Build the hash table */
- if (netdev_mc_count(dev) > MCAST_MAX) {
- u16 hash_table[4];
- u32 crc;
+ /* Enable multicast hash table function to
+ * receive all multicast packets. */
+ else if (dev->flags & IFF_ALLMULTI) {
+ lp->mcr0 |= MCR0_HASH_EN;
- for (i = 0; i < 4; i++)
- hash_table[i] = 0;
+ for (i = 0; i < MCAST_MAX ; i++) {
+ iowrite16(0, ioaddr + MID_1L + 8 * i);
+ iowrite16(0, ioaddr + MID_1M + 8 * i);
+ iowrite16(0, ioaddr + MID_1H + 8 * i);
+ }
+ for (i = 0; i < 4; i++)
+ hash_table[i] = 0xffff;
+ }
+ /* Use internal multicast address registers if the number of
+ * multicast addresses is not greater than MCAST_MAX. */
+ else if (netdev_mc_count(dev) <= MCAST_MAX) {
+ i = 0;
netdev_for_each_mc_addr(ha, dev) {
- char *addrs = ha->addr;
+ u16 *adrp = (u16 *) ha->addr;
+ iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
+ iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
+ iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
+ i++;
+ }
+ while (i < MCAST_MAX) {
+ iowrite16(0, ioaddr + MID_1L + 8 * i);
+ iowrite16(0, ioaddr + MID_1M + 8 * i);
+ iowrite16(0, ioaddr + MID_1H + 8 * i);
+ i++;
+ }
+ }
+ /* Otherwise, Enable multicast hash table function. */
+ else {
+ u32 crc;
- if (!(*addrs & 1))
- continue;
+ lp->mcr0 |= MCR0_HASH_EN;
+
+ for (i = 0; i < MCAST_MAX ; i++) {
+ iowrite16(0, ioaddr + MID_1L + 8 * i);
+ iowrite16(0, ioaddr + MID_1M + 8 * i);
+ iowrite16(0, ioaddr + MID_1H + 8 * i);
+ }
- crc = ether_crc_le(6, addrs);
+ /* Build multicast hash table */
+ netdev_for_each_mc_addr(ha, dev) {
+ u8 *addrs = ha->addr;
+
+ crc = ether_crc(ETH_ALEN, addrs);
crc >>= 26;
- hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
+ hash_table[crc >> 4] |= 1 << (crc & 0xf);
}
- /* Fill the MAC hash tables with their values */
+ }
+
+ iowrite16(lp->mcr0, ioaddr + MCR0);
+
+ /* Fill the MAC hash tables with their values */
+ if (lp->mcr0 && MCR0_HASH_EN) {
iowrite16(hash_table[0], ioaddr + MAR0);
iowrite16(hash_table[1], ioaddr + MAR1);
iowrite16(hash_table[2], ioaddr + MAR2);
iowrite16(hash_table[3], ioaddr + MAR3);
}
- /* Multicast Address 1~4 case */
- i = 0;
- netdev_for_each_mc_addr(ha, dev) {
- if (i >= MCAST_MAX)
- break;
- adrp = (u16 *) ha->addr;
- iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
- iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
- iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
- i++;
- }
- while (i < MCAST_MAX) {
- iowrite16(0xffff, ioaddr + MID_1L + 8 * i);
- iowrite16(0xffff, ioaddr + MID_1M + 8 * i);
- iowrite16(0xffff, ioaddr + MID_1H + 8 * i);
- i++;
- }
+
+ spin_unlock_irqrestore(&lp->lock, flags);
}
static void netdev_get_drvinfo(struct net_device *dev,
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 469ab0b7ce31..ef2133b16f8c 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -617,8 +617,9 @@ static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
}
}
-static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd)
+static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
{
+ void __iomem *ioaddr = tp->mmio_addr;
int i;
RTL_W8(ERIDR, cmd);
@@ -630,7 +631,7 @@ static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd)
break;
}
- ocp_write(ioaddr, 0x1, 0x30, 0x00000001);
+ ocp_write(tp, 0x1, 0x30, 0x00000001);
}
#define OOB_CMD_RESET 0x00
@@ -2868,8 +2869,11 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
- if (tp->mac_version == RTL_GIGA_MAC_VER_27)
+ if (((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
+ (tp->mac_version == RTL_GIGA_MAC_VER_28)) &&
+ (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) {
return;
+ }
if (((tp->mac_version == RTL_GIGA_MAC_VER_23) ||
(tp->mac_version == RTL_GIGA_MAC_VER_24)) &&
@@ -2891,6 +2895,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_25:
case RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_27:
+ case RTL_GIGA_MAC_VER_28:
RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
break;
}
@@ -2900,12 +2906,17 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
- if (tp->mac_version == RTL_GIGA_MAC_VER_27)
+ if (((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
+ (tp->mac_version == RTL_GIGA_MAC_VER_28)) &&
+ (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) {
return;
+ }
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_25:
case RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_27:
+ case RTL_GIGA_MAC_VER_28:
RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
break;
}
@@ -3042,7 +3053,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_mwi_2;
}
- tp->cp_cmd = PCIMulRW | RxChkSum;
+ tp->cp_cmd = RxChkSum;
if ((sizeof(dma_addr_t) > 4) &&
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
@@ -3318,7 +3329,8 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
/* Disable interrupts */
rtl8169_irq_mask_and_ack(ioaddr);
- if (tp->mac_version == RTL_GIGA_MAC_VER_28) {
+ if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_28) {
while (RTL_R8(TxPoll) & NPQ)
udelay(20);
@@ -3847,8 +3859,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
Cxpl_dbg_sel | \
ASF | \
PktCntrDisable | \
- PCIDAC | \
- PCIMulRW)
+ Mac_dbgo_sel)
static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
{
@@ -3878,8 +3889,6 @@ static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
RTL_W8(Config1, cfg1 & ~LEDS0);
- RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
-
rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
}
@@ -3891,8 +3900,6 @@ static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
-
- RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
}
static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
@@ -3918,6 +3925,8 @@ static void rtl_hw_start_8101(struct net_device *dev)
}
}
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_07:
rtl_hw_start_8102e_1(ioaddr, pdev);
@@ -3932,14 +3941,13 @@ static void rtl_hw_start_8101(struct net_device *dev)
break;
}
- RTL_W8(Cfg9346, Cfg9346_Unlock);
+ RTL_W8(Cfg9346, Cfg9346_Lock);
RTL_W8(MaxTxPacketSize, TxPacketMax);
rtl_set_rx_max_size(ioaddr, rx_buf_sz);
- tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
-
+ tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
RTL_W16(CPlusCmd, tp->cp_cmd);
RTL_W16(IntrMitigate, 0x0000);
@@ -3949,14 +3957,10 @@ static void rtl_hw_start_8101(struct net_device *dev)
RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
rtl_set_rx_tx_config_registers(tp);
- RTL_W8(Cfg9346, Cfg9346_Lock);
-
RTL_R8(IntrMask);
rtl_set_rx_mode(dev);
- RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
-
RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
RTL_W16(IntrMask, tp->intr_event);
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 002bac743843..d563049859a8 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2009 Solarflare Communications Inc.
+ * Copyright 2005-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -21,6 +21,7 @@
#include <linux/ethtool.h>
#include <linux/topology.h>
#include <linux/gfp.h>
+#include <linux/cpu_rmap.h>
#include "net_driver.h"
#include "efx.h"
#include "nic.h"
@@ -307,6 +308,8 @@ static int efx_poll(struct napi_struct *napi, int budget)
channel->irq_mod_score = 0;
}
+ efx_filter_rfs_expire(channel);
+
/* There is no race here; although napi_disable() will
* only wait for napi_complete(), this isn't a problem
* since efx_channel_processed() will have no effect if
@@ -673,7 +676,7 @@ static void efx_fini_channels(struct efx_nic *efx)
efx_for_each_channel_rx_queue(rx_queue, channel)
efx_fini_rx_queue(rx_queue);
- efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel)
efx_fini_tx_queue(tx_queue);
efx_fini_eventq(channel);
}
@@ -689,7 +692,7 @@ static void efx_remove_channel(struct efx_channel *channel)
efx_for_each_channel_rx_queue(rx_queue, channel)
efx_remove_rx_queue(rx_queue);
- efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel)
efx_remove_tx_queue(tx_queue);
efx_remove_eventq(channel);
}
@@ -1175,10 +1178,32 @@ static int efx_wanted_channels(void)
return count;
}
+static int
+efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries)
+{
+#ifdef CONFIG_RFS_ACCEL
+ int i, rc;
+
+ efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels);
+ if (!efx->net_dev->rx_cpu_rmap)
+ return -ENOMEM;
+ for (i = 0; i < efx->n_rx_channels; i++) {
+ rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
+ xentries[i].vector);
+ if (rc) {
+ free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
+ efx->net_dev->rx_cpu_rmap = NULL;
+ return rc;
+ }
+ }
+#endif
+ return 0;
+}
+
/* Probe the number and type of interrupts we are able to obtain, and
* the resulting numbers of channels and RX queues.
*/
-static void efx_probe_interrupts(struct efx_nic *efx)
+static int efx_probe_interrupts(struct efx_nic *efx)
{
int max_channels =
min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
@@ -1220,6 +1245,11 @@ static void efx_probe_interrupts(struct efx_nic *efx)
efx->n_tx_channels = efx->n_channels;
efx->n_rx_channels = efx->n_channels;
}
+ rc = efx_init_rx_cpu_rmap(efx, xentries);
+ if (rc) {
+ pci_disable_msix(efx->pci_dev);
+ return rc;
+ }
for (i = 0; i < n_channels; i++)
efx_get_channel(efx, i)->irq =
xentries[i].vector;
@@ -1253,6 +1283,8 @@ static void efx_probe_interrupts(struct efx_nic *efx)
efx->n_tx_channels = 1;
efx->legacy_irq = efx->pci_dev->irq;
}
+
+ return 0;
}
static void efx_remove_interrupts(struct efx_nic *efx)
@@ -1271,21 +1303,8 @@ static void efx_remove_interrupts(struct efx_nic *efx)
static void efx_set_channels(struct efx_nic *efx)
{
- struct efx_channel *channel;
- struct efx_tx_queue *tx_queue;
-
efx->tx_channel_offset =
separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
-
- /* Channel pointers were set in efx_init_struct() but we now
- * need to clear them for TX queues in any RX-only channels. */
- efx_for_each_channel(channel, efx) {
- if (channel->channel - efx->tx_channel_offset >=
- efx->n_tx_channels) {
- efx_for_each_channel_tx_queue(tx_queue, channel)
- tx_queue->channel = NULL;
- }
- }
}
static int efx_probe_nic(struct efx_nic *efx)
@@ -1302,7 +1321,9 @@ static int efx_probe_nic(struct efx_nic *efx)
/* Determine the number of channels and queues by trying to hook
* in MSI-X interrupts. */
- efx_probe_interrupts(efx);
+ rc = efx_probe_interrupts(efx);
+ if (rc)
+ goto fail;
if (efx->n_channels > 1)
get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
@@ -1317,6 +1338,10 @@ static int efx_probe_nic(struct efx_nic *efx)
efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true);
return 0;
+
+fail:
+ efx->type->remove(efx);
+ return rc;
}
static void efx_remove_nic(struct efx_nic *efx)
@@ -1531,9 +1556,9 @@ void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
efx->irq_rx_adaptive = rx_adaptive;
efx->irq_rx_moderation = rx_ticks;
efx_for_each_channel(channel, efx) {
- if (efx_channel_get_rx_queue(channel))
+ if (efx_channel_has_rx_queue(channel))
channel->irq_moderation = rx_ticks;
- else if (efx_channel_get_tx_queue(channel, 0))
+ else if (efx_channel_has_tx_queues(channel))
channel->irq_moderation = tx_ticks;
}
}
@@ -1849,6 +1874,10 @@ static const struct net_device_ops efx_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = efx_netpoll,
#endif
+ .ndo_setup_tc = efx_setup_tc,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = efx_filter_rfs,
+#endif
};
static void efx_update_name(struct efx_nic *efx)
@@ -1910,10 +1939,8 @@ static int efx_register_netdev(struct efx_nic *efx)
efx_for_each_channel(channel, efx) {
struct efx_tx_queue *tx_queue;
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- tx_queue->core_txq = netdev_get_tx_queue(
- efx->net_dev, tx_queue->queue / EFX_TXQ_TYPES);
- }
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_init_tx_queue_core_txq(tx_queue);
}
/* Always start with carrier off; PHY events will detect the link */
@@ -2288,6 +2315,10 @@ static void efx_fini_struct(struct efx_nic *efx)
*/
static void efx_pci_remove_main(struct efx_nic *efx)
{
+#ifdef CONFIG_RFS_ACCEL
+ free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
+ efx->net_dev->rx_cpu_rmap = NULL;
+#endif
efx_nic_fini_interrupt(efx);
efx_fini_channels(efx);
efx_fini_port(efx);
@@ -2401,7 +2432,8 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
int i, rc;
/* Allocate and initialise a struct net_device and struct efx_nic */
- net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES);
+ net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
+ EFX_MAX_RX_QUEUES);
if (!net_dev)
return -ENOMEM;
net_dev->features |= (type->offload_features | NETIF_F_SG |
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index d43a7e5212b1..3d83a1f74fef 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -29,6 +29,7 @@
extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
+extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
extern netdev_tx_t
@@ -36,6 +37,7 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
extern netdev_tx_t
efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
+extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
/* RX */
extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
@@ -74,6 +76,21 @@ extern int efx_filter_remove_filter(struct efx_nic *efx,
struct efx_filter_spec *spec);
extern void efx_filter_clear_rx(struct efx_nic *efx,
enum efx_filter_priority priority);
+#ifdef CONFIG_RFS_ACCEL
+extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id);
+extern bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
+static inline void efx_filter_rfs_expire(struct efx_channel *channel)
+{
+ if (channel->rfs_filters_added >= 60 &&
+ __efx_filter_rfs_expire(channel->efx, 100))
+ channel->rfs_filters_added -= 60;
+}
+#define efx_filter_rfs_enabled() 1
+#else
+static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
+#define efx_filter_rfs_enabled() 0
+#endif
/* Channels */
extern void efx_process_channel_now(struct efx_channel *channel);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 0e8bb19ed60d..807178ef65ad 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -28,7 +28,8 @@ struct efx_ethtool_stat {
enum {
EFX_ETHTOOL_STAT_SOURCE_mac_stats,
EFX_ETHTOOL_STAT_SOURCE_nic,
- EFX_ETHTOOL_STAT_SOURCE_channel
+ EFX_ETHTOOL_STAT_SOURCE_channel,
+ EFX_ETHTOOL_STAT_SOURCE_tx_queue
} source;
unsigned offset;
u64(*get_stat) (void *field); /* Reader function */
@@ -86,6 +87,10 @@ static u64 efx_get_atomic_stat(void *field)
EFX_ETHTOOL_STAT(field, channel, n_##field, \
unsigned int, efx_get_uint_stat)
+#define EFX_ETHTOOL_UINT_TXQ_STAT(field) \
+ EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
+ unsigned int, efx_get_uint_stat)
+
static struct efx_ethtool_stat efx_ethtool_stats[] = {
EFX_ETHTOOL_U64_MAC_STAT(tx_bytes),
EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes),
@@ -116,6 +121,10 @@ static struct efx_ethtool_stat efx_ethtool_stats[] = {
EFX_ETHTOOL_ULONG_MAC_STAT(tx_non_tcpudp),
EFX_ETHTOOL_ULONG_MAC_STAT(tx_mac_src_error),
EFX_ETHTOOL_ULONG_MAC_STAT(tx_ip_src_error),
+ EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
+ EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
+ EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
+ EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
EFX_ETHTOOL_U64_MAC_STAT(rx_bytes),
EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes),
EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes),
@@ -237,8 +246,8 @@ static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
- siena_print_fwver(efx, info->fw_version,
- sizeof(info->fw_version));
+ efx_mcdi_print_fwver(efx, info->fw_version,
+ sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
}
@@ -470,6 +479,7 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
struct efx_mac_stats *mac_stats = &efx->mac_stats;
struct efx_ethtool_stat *stat;
struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
struct rtnl_link_stats64 temp;
int i;
@@ -495,6 +505,15 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
data[i] += stat->get_stat((void *)channel +
stat->offset);
break;
+ case EFX_ETHTOOL_STAT_SOURCE_tx_queue:
+ data[i] = 0;
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ data[i] +=
+ stat->get_stat((void *)tx_queue
+ + stat->offset);
+ }
+ break;
}
}
}
@@ -502,7 +521,7 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
{
struct efx_nic *efx __attribute__ ((unused)) = netdev_priv(net_dev);
- unsigned long features;
+ u32 features;
features = NETIF_F_TSO;
if (efx->type->offload_features & NETIF_F_V6_CSUM)
@@ -519,7 +538,7 @@ static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
{
struct efx_nic *efx = netdev_priv(net_dev);
- unsigned long features = efx->type->offload_features & NETIF_F_ALL_CSUM;
+ u32 features = efx->type->offload_features & NETIF_F_ALL_CSUM;
if (enable)
net_dev->features |= features;
@@ -569,9 +588,14 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
struct ethtool_test *test, u64 *data)
{
struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_self_tests efx_tests;
+ struct efx_self_tests *efx_tests;
int already_up;
- int rc;
+ int rc = -ENOMEM;
+
+ efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
+ if (!efx_tests)
+ goto fail;
+
ASSERT_RTNL();
if (efx->state != STATE_RUNNING) {
@@ -589,13 +613,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed opening device.\n");
- goto fail2;
+ goto fail1;
}
}
- memset(&efx_tests, 0, sizeof(efx_tests));
-
- rc = efx_selftest(efx, &efx_tests, test->flags);
+ rc = efx_selftest(efx, efx_tests, test->flags);
if (!already_up)
dev_close(efx->net_dev);
@@ -604,10 +626,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
rc == 0 ? "passed" : "failed",
(test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
- fail2:
- fail1:
+fail1:
/* Fill ethtool results structures */
- efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data);
+ efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
+ kfree(efx_tests);
+fail:
if (rc)
test->flags |= ETH_TEST_FL_FAILED;
}
@@ -631,7 +654,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
/* Find lowest IRQ moderation across all used TX queues */
coalesce->tx_coalesce_usecs_irq = ~((u32) 0);
efx_for_each_channel(channel, efx) {
- if (!efx_channel_get_tx_queue(channel, 0))
+ if (!efx_channel_has_tx_queues(channel))
continue;
if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) {
if (channel->channel < efx->n_rx_channels)
@@ -676,8 +699,8 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
/* If the channel is shared only allow RX parameters to be set */
efx_for_each_channel(channel, efx) {
- if (efx_channel_get_rx_queue(channel) &&
- efx_channel_get_tx_queue(channel, 0) &&
+ if (efx_channel_has_rx_queue(channel) &&
+ efx_channel_has_tx_queues(channel) &&
tx_usecs) {
netif_err(efx, drv, efx->net_dev, "Channel is shared. "
"Only RX coalescing may be set\n");
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 61ddd2c6e750..734fcfb52e85 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -1478,36 +1478,26 @@ static void falcon_init_rx_cfg(struct efx_nic *efx)
/* RX control FIFO thresholds (32 entries) */
const unsigned ctrl_xon_thr = 20;
const unsigned ctrl_xoff_thr = 25;
- /* RX data FIFO thresholds (256-byte units; size varies) */
- int data_xon_thr = efx_nic_rx_xon_thresh >> 8;
- int data_xoff_thr = efx_nic_rx_xoff_thresh >> 8;
efx_oword_t reg;
efx_reado(efx, &reg, FR_AZ_RX_CFG);
if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
/* Data FIFO size is 5.5K */
- if (data_xon_thr < 0)
- data_xon_thr = 512 >> 8;
- if (data_xoff_thr < 0)
- data_xoff_thr = 2048 >> 8;
EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
huge_buf_size);
- EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, data_xon_thr);
- EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, data_xoff_thr);
+ EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8);
+ EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8);
EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
} else {
/* Data FIFO size is 80K; register fields moved */
- if (data_xon_thr < 0)
- data_xon_thr = 27648 >> 8; /* ~3*max MTU */
- if (data_xoff_thr < 0)
- data_xoff_thr = 54272 >> 8; /* ~80Kb - 3*max MTU */
EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
huge_buf_size);
- EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, data_xon_thr);
- EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, data_xoff_thr);
+ /* Send XON and XOFF at ~3 * max MTU away from empty/full */
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8);
EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
index 2dd16f0b3ced..b9cc846811d6 100644
--- a/drivers/net/sfc/falcon_boards.c
+++ b/drivers/net/sfc/falcon_boards.c
@@ -1,6 +1,6 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2007-2009 Solarflare Communications Inc.
+ * Copyright 2007-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index b49e84394641..2c9ee5db3bf7 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/filter.c b/drivers/net/sfc/filter.c
index d4722c41c4ce..95a980fd63d5 100644
--- a/drivers/net/sfc/filter.c
+++ b/drivers/net/sfc/filter.c
@@ -8,6 +8,7 @@
*/
#include <linux/in.h>
+#include <net/ip.h>
#include "efx.h"
#include "filter.h"
#include "io.h"
@@ -27,6 +28,10 @@
*/
#define FILTER_CTL_SRCH_MAX 200
+/* Don't try very hard to find space for performance hints, as this is
+ * counter-productive. */
+#define FILTER_CTL_SRCH_HINT_MAX 5
+
enum efx_filter_table_id {
EFX_FILTER_TABLE_RX_IP = 0,
EFX_FILTER_TABLE_RX_MAC,
@@ -47,6 +52,10 @@ struct efx_filter_table {
struct efx_filter_state {
spinlock_t lock;
struct efx_filter_table table[EFX_FILTER_TABLE_COUNT];
+#ifdef CONFIG_RFS_ACCEL
+ u32 *rps_flow_id;
+ unsigned rps_expire_index;
+#endif
};
/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
@@ -325,15 +334,16 @@ static int efx_filter_search(struct efx_filter_table *table,
struct efx_filter_spec *spec, u32 key,
bool for_insert, int *depth_required)
{
- unsigned hash, incr, filter_idx, depth;
+ unsigned hash, incr, filter_idx, depth, depth_max;
struct efx_filter_spec *cmp;
hash = efx_filter_hash(key);
incr = efx_filter_increment(key);
+ depth_max = (spec->priority <= EFX_FILTER_PRI_HINT ?
+ FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX);
for (depth = 1, filter_idx = hash & (table->size - 1);
- depth <= FILTER_CTL_SRCH_MAX &&
- test_bit(filter_idx, table->used_bitmap);
+ depth <= depth_max && test_bit(filter_idx, table->used_bitmap);
++depth) {
cmp = &table->spec[filter_idx];
if (efx_filter_equal(spec, cmp))
@@ -342,7 +352,7 @@ static int efx_filter_search(struct efx_filter_table *table,
}
if (!for_insert)
return -ENOENT;
- if (depth > FILTER_CTL_SRCH_MAX)
+ if (depth > depth_max)
return -EBUSY;
found:
*depth_required = depth;
@@ -562,6 +572,13 @@ int efx_probe_filters(struct efx_nic *efx)
spin_lock_init(&state->lock);
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+#ifdef CONFIG_RFS_ACCEL
+ state->rps_flow_id = kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS,
+ sizeof(*state->rps_flow_id),
+ GFP_KERNEL);
+ if (!state->rps_flow_id)
+ goto fail;
+#endif
table = &state->table[EFX_FILTER_TABLE_RX_IP];
table->id = EFX_FILTER_TABLE_RX_IP;
table->offset = FR_BZ_RX_FILTER_TBL0;
@@ -607,5 +624,97 @@ void efx_remove_filters(struct efx_nic *efx)
kfree(state->table[table_id].used_bitmap);
vfree(state->table[table_id].spec);
}
+#ifdef CONFIG_RFS_ACCEL
+ kfree(state->rps_flow_id);
+#endif
kfree(state);
}
+
+#ifdef CONFIG_RFS_ACCEL
+
+int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_channel *channel;
+ struct efx_filter_state *state = efx->filter_state;
+ struct efx_filter_spec spec;
+ const struct iphdr *ip;
+ const __be16 *ports;
+ int nhoff;
+ int rc;
+
+ nhoff = skb_network_offset(skb);
+
+ if (skb->protocol != htons(ETH_P_IP))
+ return -EPROTONOSUPPORT;
+
+ /* RFS must validate the IP header length before calling us */
+ EFX_BUG_ON_PARANOID(!pskb_may_pull(skb, nhoff + sizeof(*ip)));
+ ip = (const struct iphdr *)(skb->data + nhoff);
+ if (ip->frag_off & htons(IP_MF | IP_OFFSET))
+ return -EPROTONOSUPPORT;
+ EFX_BUG_ON_PARANOID(!pskb_may_pull(skb, nhoff + 4 * ip->ihl + 4));
+ ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
+
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 0, rxq_index);
+ rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
+ ip->daddr, ports[1], ip->saddr, ports[0]);
+ if (rc)
+ return rc;
+
+ rc = efx_filter_insert_filter(efx, &spec, true);
+ if (rc < 0)
+ return rc;
+
+ /* Remember this so we can check whether to expire the filter later */
+ state->rps_flow_id[rc] = flow_id;
+ channel = efx_get_channel(efx, skb_get_rx_queue(skb));
+ ++channel->rfs_filters_added;
+
+ netif_info(efx, rx_status, efx->net_dev,
+ "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
+ (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
+ &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
+ rxq_index, flow_id, rc);
+
+ return rc;
+}
+
+bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota)
+{
+ struct efx_filter_state *state = efx->filter_state;
+ struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_IP];
+ unsigned mask = table->size - 1;
+ unsigned index;
+ unsigned stop;
+
+ if (!spin_trylock_bh(&state->lock))
+ return false;
+
+ index = state->rps_expire_index;
+ stop = (index + quota) & mask;
+
+ while (index != stop) {
+ if (test_bit(index, table->used_bitmap) &&
+ table->spec[index].priority == EFX_FILTER_PRI_HINT &&
+ rps_may_expire_flow(efx->net_dev,
+ table->spec[index].dmaq_id,
+ state->rps_flow_id[index], index)) {
+ netif_info(efx, rx_status, efx->net_dev,
+ "expiring filter %d [flow %u]\n",
+ index, state->rps_flow_id[index]);
+ efx_filter_table_clear_entry(efx, table, index);
+ }
+ index = (index + 1) & mask;
+ }
+
+ state->rps_expire_index = stop;
+ if (table->used == 0)
+ efx_filter_table_reset_search_depth(table);
+
+ spin_unlock_bh(&state->lock);
+ return true;
+}
+
+#endif /* CONFIG_RFS_ACCEL */
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h
index 6da4ae20a039..dc45110b2456 100644
--- a/drivers/net/sfc/io.h
+++ b/drivers/net/sfc/io.h
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index b716e827b291..8bba8955f310 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -1,6 +1,6 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2008-2009 Solarflare Communications Inc.
+ * Copyright 2008-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -602,7 +602,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
**************************************************************************
*/
-int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build)
+void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
{
u8 outbuf[ALIGN(MC_CMD_GET_VERSION_V1_OUT_LEN, 4)];
size_t outlength;
@@ -616,29 +616,20 @@ int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build)
if (rc)
goto fail;
- if (outlength == MC_CMD_GET_VERSION_V0_OUT_LEN) {
- *version = 0;
- *build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE);
- return 0;
- }
-
if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) {
rc = -EIO;
goto fail;
}
ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
- *version = (((u64)le16_to_cpu(ver_words[0]) << 48) |
- ((u64)le16_to_cpu(ver_words[1]) << 32) |
- ((u64)le16_to_cpu(ver_words[2]) << 16) |
- le16_to_cpu(ver_words[3]));
- *build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE);
-
- return 0;
+ snprintf(buf, len, "%u.%u.%u.%u",
+ le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
+ le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
+ return;
fail:
netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
+ buf[0] = 0;
}
int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
diff --git a/drivers/net/sfc/mcdi.h b/drivers/net/sfc/mcdi.h
index c792f1d65e48..aced2a7856fc 100644
--- a/drivers/net/sfc/mcdi.h
+++ b/drivers/net/sfc/mcdi.h
@@ -1,6 +1,6 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2008-2009 Solarflare Communications Inc.
+ * Copyright 2008-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -93,7 +93,7 @@ extern void efx_mcdi_process_event(struct efx_channel *channel,
#define MCDI_EVENT_FIELD(_ev, _field) \
EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
-extern int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build);
+extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
bool *was_attached_out);
extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
diff --git a/drivers/net/sfc/mcdi_mac.c b/drivers/net/sfc/mcdi_mac.c
index f88f4bf986ff..33f7294edb47 100644
--- a/drivers/net/sfc/mcdi_mac.c
+++ b/drivers/net/sfc/mcdi_mac.c
@@ -1,6 +1,6 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2009 Solarflare Communications Inc.
+ * Copyright 2009-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/mcdi_pcol.h b/drivers/net/sfc/mcdi_pcol.h
index 90359e644006..b86a15f221ad 100644
--- a/drivers/net/sfc/mcdi_pcol.h
+++ b/drivers/net/sfc/mcdi_pcol.h
@@ -1,6 +1,6 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2009 Solarflare Communications Inc.
+ * Copyright 2009-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index 0e97eed663c6..ec3f740f5465 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -1,6 +1,6 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2009 Solarflare Communications Inc.
+ * Copyright 2009-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index 56b0266b441f..19e68c26d103 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -1,6 +1,6 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -51,13 +51,10 @@ int efx_mdio_reset_mmd(struct efx_nic *port, int mmd,
return spins ? spins : -ETIMEDOUT;
}
-static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd, int fault_fatal)
+static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd)
{
int status;
- if (LOOPBACK_INTERNAL(efx))
- return 0;
-
if (mmd != MDIO_MMD_AN) {
/* Read MMD STATUS2 to check it is responding. */
status = efx_mdio_read(efx, mmd, MDIO_STAT2);
@@ -68,20 +65,6 @@ static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd, int fault_fatal)
}
}
- /* Read MMD STATUS 1 to check for fault. */
- status = efx_mdio_read(efx, mmd, MDIO_STAT1);
- if (status & MDIO_STAT1_FAULT) {
- if (fault_fatal) {
- netif_err(efx, hw, efx->net_dev,
- "PHY MMD %d reporting fatal"
- " fault: status %x\n", mmd, status);
- return -EIO;
- } else {
- netif_dbg(efx, hw, efx->net_dev,
- "PHY MMD %d reporting status"
- " %x (expected)\n", mmd, status);
- }
- }
return 0;
}
@@ -130,8 +113,7 @@ int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask)
return rc;
}
-int efx_mdio_check_mmds(struct efx_nic *efx,
- unsigned int mmd_mask, unsigned int fatal_mask)
+int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask)
{
int mmd = 0, probe_mmd, devs1, devs2;
u32 devices;
@@ -161,13 +143,9 @@ int efx_mdio_check_mmds(struct efx_nic *efx,
/* Check all required MMDs are responding and happy. */
while (mmd_mask) {
- if (mmd_mask & 1) {
- int fault_fatal = fatal_mask & 1;
- if (efx_mdio_check_mmd(efx, mmd, fault_fatal))
- return -EIO;
- }
+ if ((mmd_mask & 1) && efx_mdio_check_mmd(efx, mmd))
+ return -EIO;
mmd_mask = mmd_mask >> 1;
- fatal_mask = fatal_mask >> 1;
mmd++;
}
@@ -337,7 +315,7 @@ int efx_mdio_test_alive(struct efx_nic *efx)
"no MDIO PHY present with ID %d\n", efx->mdio.prtad);
rc = -EINVAL;
} else {
- rc = efx_mdio_check_mmds(efx, efx->mdio.mmds, 0);
+ rc = efx_mdio_check_mmds(efx, efx->mdio.mmds);
}
mutex_unlock(&efx->mac_lock);
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index 75791d3d4963..df0703940c83 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -1,6 +1,6 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -68,8 +68,7 @@ extern int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd,
int spins, int spintime);
/* As efx_mdio_check_mmd but for multiple MMDs */
-int efx_mdio_check_mmds(struct efx_nic *efx,
- unsigned int mmd_mask, unsigned int fatal_mask);
+int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask);
/* Check the link status of specified mmds in bit mask */
extern bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask);
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c
index d38627448c22..e646bfce2d84 100644
--- a/drivers/net/sfc/mtd.c
+++ b/drivers/net/sfc/mtd.c
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 28df8665256a..215d5c51bfa0 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2009 Solarflare Communications Inc.
+ * Copyright 2005-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -41,7 +41,7 @@
*
**************************************************************************/
-#define EFX_DRIVER_VERSION "3.0"
+#define EFX_DRIVER_VERSION "3.1"
#ifdef EFX_ENABLE_DEBUG
#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
@@ -63,10 +63,12 @@
/* Checksum generation is a per-queue option in hardware, so each
* queue visible to the networking core is backed by two hardware TX
* queues. */
-#define EFX_MAX_CORE_TX_QUEUES EFX_MAX_CHANNELS
-#define EFX_TXQ_TYPE_OFFLOAD 1
-#define EFX_TXQ_TYPES 2
-#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CORE_TX_QUEUES)
+#define EFX_MAX_TX_TC 2
+#define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS)
+#define EFX_TXQ_TYPE_OFFLOAD 1 /* flag */
+#define EFX_TXQ_TYPE_HIGHPRI 2 /* flag */
+#define EFX_TXQ_TYPES 4
+#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
/**
* struct efx_special_buffer - An Efx special buffer
@@ -140,6 +142,7 @@ struct efx_tx_buffer {
* @buffer: The software buffer ring
* @txd: The hardware descriptor ring
* @ptr_mask: The size of the ring minus 1.
+ * @initialised: Has hardware queue been initialised?
* @flushed: Used when handling queue flushing
* @read_count: Current read pointer.
* This is the number of buffers that have been removed from both rings.
@@ -182,6 +185,7 @@ struct efx_tx_queue {
struct efx_tx_buffer *buffer;
struct efx_special_buffer txd;
unsigned int ptr_mask;
+ bool initialised;
enum efx_flush_state flushed;
/* Members used mainly on the completion path */
@@ -210,15 +214,17 @@ struct efx_tx_queue {
* If both this and page are %NULL, the buffer slot is currently free.
* @page: The associated page buffer, if any.
* If both this and skb are %NULL, the buffer slot is currently free.
- * @data: Pointer to ethernet header
* @len: Buffer length, in bytes.
+ * @is_page: Indicates if @page is valid. If false, @skb is valid.
*/
struct efx_rx_buffer {
dma_addr_t dma_addr;
- struct sk_buff *skb;
- struct page *page;
- char *data;
+ union {
+ struct sk_buff *skb;
+ struct page *page;
+ } u;
unsigned int len;
+ bool is_page;
};
/**
@@ -358,6 +364,9 @@ struct efx_channel {
unsigned int irq_count;
unsigned int irq_mod_score;
+#ifdef CONFIG_RFS_ACCEL
+ unsigned int rfs_filters_added;
+#endif
int rx_alloc_level;
int rx_alloc_push_pages;
@@ -377,7 +386,7 @@ struct efx_channel {
bool rx_pkt_csummed;
struct efx_rx_queue rx_queue;
- struct efx_tx_queue tx_queue[2];
+ struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
};
enum efx_led_mode {
@@ -906,7 +915,7 @@ struct efx_nic_type {
unsigned int phys_addr_channels;
unsigned int tx_dc_base;
unsigned int rx_dc_base;
- unsigned long offload_features;
+ u32 offload_features;
u32 reset_world_flags;
};
@@ -938,18 +947,40 @@ efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
}
+static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
+{
+ return channel->channel - channel->efx->tx_channel_offset <
+ channel->efx->n_tx_channels;
+}
+
static inline struct efx_tx_queue *
efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
{
- struct efx_tx_queue *tx_queue = channel->tx_queue;
- EFX_BUG_ON_PARANOID(type >= EFX_TXQ_TYPES);
- return tx_queue->channel ? tx_queue + type : NULL;
+ EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) ||
+ type >= EFX_TXQ_TYPES);
+ return &channel->tx_queue[type];
+}
+
+static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
+{
+ return !(tx_queue->efx->net_dev->num_tc < 2 &&
+ tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI);
}
/* Iterate over all TX queues belonging to a channel */
#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
- for (_tx_queue = efx_channel_get_tx_queue(channel, 0); \
- _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
+ if (!efx_channel_has_tx_queues(_channel)) \
+ ; \
+ else \
+ for (_tx_queue = (_channel)->tx_queue; \
+ _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \
+ efx_tx_queue_used(_tx_queue); \
+ _tx_queue++)
+
+/* Iterate over all possible TX queues belonging to a channel */
+#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \
+ for (_tx_queue = (_channel)->tx_queue; \
+ _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
_tx_queue++)
static inline struct efx_rx_queue *
@@ -959,18 +990,26 @@ efx_get_rx_queue(struct efx_nic *efx, unsigned index)
return &efx->channel[index]->rx_queue;
}
+static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
+{
+ return channel->channel < channel->efx->n_rx_channels;
+}
+
static inline struct efx_rx_queue *
efx_channel_get_rx_queue(struct efx_channel *channel)
{
- return channel->channel < channel->efx->n_rx_channels ?
- &channel->rx_queue : NULL;
+ EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel));
+ return &channel->rx_queue;
}
/* Iterate over all RX queues belonging to a channel */
#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
- for (_rx_queue = efx_channel_get_rx_queue(channel); \
- _rx_queue; \
- _rx_queue = NULL)
+ if (!efx_channel_has_rx_queue(_channel)) \
+ ; \
+ else \
+ for (_rx_queue = &(_channel)->rx_queue; \
+ _rx_queue; \
+ _rx_queue = NULL)
static inline struct efx_channel *
efx_rx_queue_channel(struct efx_rx_queue *rx_queue)
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index da386599ab68..e8396614daf3 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -41,26 +41,6 @@
#define RX_DC_ENTRIES 64
#define RX_DC_ENTRIES_ORDER 3
-/* RX FIFO XOFF watermark
- *
- * When the amount of the RX FIFO increases used increases past this
- * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A)
- * This also has an effect on RX/TX arbitration
- */
-int efx_nic_rx_xoff_thresh = -1;
-module_param_named(rx_xoff_thresh_bytes, efx_nic_rx_xoff_thresh, int, 0644);
-MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold");
-
-/* RX FIFO XON watermark
- *
- * When the amount of the RX FIFO used decreases below this
- * watermark send XON. Only used if TX flow control is enabled (ethtool -A)
- * This also has an effect on RX/TX arbitration
- */
-int efx_nic_rx_xon_thresh = -1;
-module_param_named(rx_xon_thresh_bytes, efx_nic_rx_xon_thresh, int, 0644);
-MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
-
/* If EFX_MAX_INT_ERRORS internal errors occur within
* EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
* disable it.
@@ -445,8 +425,8 @@ int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
{
- efx_oword_t tx_desc_ptr;
struct efx_nic *efx = tx_queue->efx;
+ efx_oword_t reg;
tx_queue->flushed = FLUSH_NONE;
@@ -454,7 +434,7 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
efx_init_special_buffer(efx, &tx_queue->txd);
/* Push TX descriptor ring to card */
- EFX_POPULATE_OWORD_10(tx_desc_ptr,
+ EFX_POPULATE_OWORD_10(reg,
FRF_AZ_TX_DESCQ_EN, 1,
FRF_AZ_TX_ISCSI_DDIG_EN, 0,
FRF_AZ_TX_ISCSI_HDIG_EN, 0,
@@ -470,17 +450,15 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
- EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
- EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS,
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
!csum);
}
- efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
+ efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
tx_queue->queue);
if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
- efx_oword_t reg;
-
/* Only 128 bits in this register */
BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
@@ -491,6 +469,16 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
set_bit_le(tx_queue->queue, (void *)&reg);
efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
}
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ EFX_POPULATE_OWORD_1(reg,
+ FRF_BZ_TX_PACE,
+ (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
+ FFE_BZ_TX_PACE_OFF :
+ FFE_BZ_TX_PACE_RESERVED);
+ efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
+ tx_queue->queue);
+ }
}
static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
@@ -1238,8 +1226,10 @@ int efx_nic_flush_queues(struct efx_nic *efx)
/* Flush all tx queues in parallel */
efx_for_each_channel(channel, efx) {
- efx_for_each_channel_tx_queue(tx_queue, channel)
- efx_flush_tx_queue(tx_queue);
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
+ if (tx_queue->initialised)
+ efx_flush_tx_queue(tx_queue);
+ }
}
/* The hardware supports four concurrent rx flushes, each of which may
@@ -1262,8 +1252,9 @@ int efx_nic_flush_queues(struct efx_nic *efx)
++rx_pending;
}
}
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- if (tx_queue->flushed != FLUSH_DONE)
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
+ if (tx_queue->initialised &&
+ tx_queue->flushed != FLUSH_DONE)
++tx_pending;
}
}
@@ -1278,8 +1269,9 @@ int efx_nic_flush_queues(struct efx_nic *efx)
/* Mark the queues as all flushed. We're going to return failure
* leading to a reset, or fake up success anyway */
efx_for_each_channel(channel, efx) {
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- if (tx_queue->flushed != FLUSH_DONE)
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
+ if (tx_queue->initialised &&
+ tx_queue->flushed != FLUSH_DONE)
netif_err(efx, hw, efx->net_dev,
"tx queue %d flush command timed out\n",
tx_queue->queue);
@@ -1682,6 +1674,19 @@ void efx_nic_init_common(struct efx_nic *efx)
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ EFX_POPULATE_OWORD_4(temp,
+ /* Default values */
+ FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
+ FRF_BZ_TX_PACE_SB_AF, 0xb,
+ FRF_BZ_TX_PACE_FB_BASE, 0,
+ /* Allow large pace values in the
+ * fast bin. */
+ FRF_BZ_TX_PACE_BIN_TH,
+ FFE_BZ_TX_PACE_RESERVED);
+ efx_writeo(efx, &temp, FR_BZ_TX_PACE);
+ }
}
/* Register dump */
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index eb0586925b51..d9de1b647d41 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -142,20 +142,14 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
/**
* struct siena_nic_data - Siena NIC state
- * @fw_version: Management controller firmware version
- * @fw_build: Firmware build number
* @mcdi: Management-Controller-to-Driver Interface
* @wol_filter_id: Wake-on-LAN packet filter id
*/
struct siena_nic_data {
- u64 fw_version;
- u32 fw_build;
struct efx_mcdi_iface mcdi;
int wol_filter_id;
};
-extern void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len);
-
extern struct efx_nic_type falcon_a1_nic_type;
extern struct efx_nic_type falcon_b0_nic_type;
extern struct efx_nic_type siena_a0_nic_type;
@@ -194,7 +188,6 @@ extern void efx_nic_eventq_read_ack(struct efx_channel *channel);
/* MAC/PHY */
extern void falcon_drain_tx_fifo(struct efx_nic *efx);
extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
-extern int efx_nic_rx_xoff_thresh, efx_nic_rx_xon_thresh;
/* Interrupts and test events */
extern int efx_nic_init_interrupt(struct efx_nic *efx);
diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h
index 1dab609757fb..b3b79472421e 100644
--- a/drivers/net/sfc/phy.h
+++ b/drivers/net/sfc/phy.h
@@ -1,6 +1,6 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2007-2009 Solarflare Communications Inc.
+ * Copyright 2007-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index ea3ae0089315..55f90924247e 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -1,6 +1,6 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/regs.h b/drivers/net/sfc/regs.h
index 96430ed81c36..cc2c86b76a7b 100644
--- a/drivers/net/sfc/regs.h
+++ b/drivers/net/sfc/regs.h
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -2907,6 +2907,12 @@
#define FRF_CZ_TMFT_SRC_MAC_HI_LBN 44
#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH 16
+/* TX_PACE_TBL */
+/* Values >20 are documented as reserved, but will result in a queue going
+ * into the fast bin with a pace value of zero. */
+#define FFE_BZ_TX_PACE_OFF 0
+#define FFE_BZ_TX_PACE_RESERVED 21
+
/* DRIVER_EV */
/* Sub-fields of an RX flush completion event */
#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 3925fd621177..c0fdb59030fb 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2009 Solarflare Communications Inc.
+ * Copyright 2005-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -89,24 +89,37 @@ static unsigned int rx_refill_limit = 95;
*/
#define EFX_RXD_HEAD_ROOM 2
-static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf)
+/* Offset of ethernet header within page */
+static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
+ struct efx_rx_buffer *buf)
{
/* Offset is always within one page, so we don't need to consider
* the page order.
*/
- return (__force unsigned long) buf->data & (PAGE_SIZE - 1);
+ return (((__force unsigned long) buf->dma_addr & (PAGE_SIZE - 1)) +
+ efx->type->rx_buffer_hash_size);
}
static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
{
return PAGE_SIZE << efx->rx_buffer_order;
}
-static inline u32 efx_rx_buf_hash(struct efx_rx_buffer *buf)
+static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
{
+ if (buf->is_page)
+ return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf);
+ else
+ return ((u8 *)buf->u.skb->data +
+ efx->type->rx_buffer_hash_size);
+}
+
+static inline u32 efx_rx_buf_hash(const u8 *eh)
+{
+ /* The ethernet header is always directly after any hash. */
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
- return __le32_to_cpup((const __le32 *)(buf->data - 4));
+ return __le32_to_cpup((const __le32 *)(eh - 4));
#else
- const u8 *data = (const u8 *)(buf->data - 4);
+ const u8 *data = eh - 4;
return ((u32)data[0] |
(u32)data[1] << 8 |
(u32)data[2] << 16 |
@@ -129,6 +142,7 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
struct efx_nic *efx = rx_queue->efx;
struct net_device *net_dev = efx->net_dev;
struct efx_rx_buffer *rx_buf;
+ struct sk_buff *skb;
int skb_len = efx->rx_buffer_len;
unsigned index, count;
@@ -136,24 +150,23 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = efx_rx_buffer(rx_queue, index);
- rx_buf->skb = netdev_alloc_skb(net_dev, skb_len);
- if (unlikely(!rx_buf->skb))
+ rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len);
+ if (unlikely(!skb))
return -ENOMEM;
- rx_buf->page = NULL;
/* Adjust the SKB for padding and checksum */
- skb_reserve(rx_buf->skb, NET_IP_ALIGN);
+ skb_reserve(skb, NET_IP_ALIGN);
rx_buf->len = skb_len - NET_IP_ALIGN;
- rx_buf->data = (char *)rx_buf->skb->data;
- rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY;
+ rx_buf->is_page = false;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
rx_buf->dma_addr = pci_map_single(efx->pci_dev,
- rx_buf->data, rx_buf->len,
+ skb->data, rx_buf->len,
PCI_DMA_FROMDEVICE);
if (unlikely(pci_dma_mapping_error(efx->pci_dev,
rx_buf->dma_addr))) {
- dev_kfree_skb_any(rx_buf->skb);
- rx_buf->skb = NULL;
+ dev_kfree_skb_any(skb);
+ rx_buf->u.skb = NULL;
return -EIO;
}
@@ -211,10 +224,9 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = efx_rx_buffer(rx_queue, index);
rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
- rx_buf->skb = NULL;
- rx_buf->page = page;
- rx_buf->data = page_addr + EFX_PAGE_IP_ALIGN;
+ rx_buf->u.page = page;
rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
+ rx_buf->is_page = true;
++rx_queue->added_count;
++rx_queue->alloc_page_count;
++state->refcnt;
@@ -235,19 +247,17 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
static void efx_unmap_rx_buffer(struct efx_nic *efx,
struct efx_rx_buffer *rx_buf)
{
- if (rx_buf->page) {
+ if (rx_buf->is_page && rx_buf->u.page) {
struct efx_rx_page_state *state;
- EFX_BUG_ON_PARANOID(rx_buf->skb);
-
- state = page_address(rx_buf->page);
+ state = page_address(rx_buf->u.page);
if (--state->refcnt == 0) {
pci_unmap_page(efx->pci_dev,
state->dma_addr,
efx_rx_buf_size(efx),
PCI_DMA_FROMDEVICE);
}
- } else if (likely(rx_buf->skb)) {
+ } else if (!rx_buf->is_page && rx_buf->u.skb) {
pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
rx_buf->len, PCI_DMA_FROMDEVICE);
}
@@ -256,12 +266,12 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
static void efx_free_rx_buffer(struct efx_nic *efx,
struct efx_rx_buffer *rx_buf)
{
- if (rx_buf->page) {
- __free_pages(rx_buf->page, efx->rx_buffer_order);
- rx_buf->page = NULL;
- } else if (likely(rx_buf->skb)) {
- dev_kfree_skb_any(rx_buf->skb);
- rx_buf->skb = NULL;
+ if (rx_buf->is_page && rx_buf->u.page) {
+ __free_pages(rx_buf->u.page, efx->rx_buffer_order);
+ rx_buf->u.page = NULL;
+ } else if (!rx_buf->is_page && rx_buf->u.skb) {
+ dev_kfree_skb_any(rx_buf->u.skb);
+ rx_buf->u.skb = NULL;
}
}
@@ -277,7 +287,7 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
struct efx_rx_buffer *rx_buf)
{
- struct efx_rx_page_state *state = page_address(rx_buf->page);
+ struct efx_rx_page_state *state = page_address(rx_buf->u.page);
struct efx_rx_buffer *new_buf;
unsigned fill_level, index;
@@ -292,16 +302,14 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
}
++state->refcnt;
- get_page(rx_buf->page);
+ get_page(rx_buf->u.page);
index = rx_queue->added_count & rx_queue->ptr_mask;
new_buf = efx_rx_buffer(rx_queue, index);
new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
- new_buf->skb = NULL;
- new_buf->page = rx_buf->page;
- new_buf->data = (void *)
- ((__force unsigned long)rx_buf->data ^ (PAGE_SIZE >> 1));
+ new_buf->u.page = rx_buf->u.page;
new_buf->len = rx_buf->len;
+ new_buf->is_page = true;
++rx_queue->added_count;
}
@@ -315,16 +323,15 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
struct efx_rx_buffer *new_buf;
unsigned index;
- if (rx_buf->page != NULL && efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
- page_count(rx_buf->page) == 1)
+ if (rx_buf->is_page && efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
+ page_count(rx_buf->u.page) == 1)
efx_resurrect_rx_buffer(rx_queue, rx_buf);
index = rx_queue->added_count & rx_queue->ptr_mask;
new_buf = efx_rx_buffer(rx_queue, index);
memcpy(new_buf, rx_buf, sizeof(*new_buf));
- rx_buf->page = NULL;
- rx_buf->skb = NULL;
+ rx_buf->u.page = NULL;
++rx_queue->added_count;
}
@@ -428,7 +435,7 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
* data at the end of the skb will be trashed. So
* we have no choice but to leak the fragment.
*/
- *leak_packet = (rx_buf->skb != NULL);
+ *leak_packet = !rx_buf->is_page;
efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
} else {
if (net_ratelimit())
@@ -448,19 +455,18 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
*/
static void efx_rx_packet_gro(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf,
- bool checksummed)
+ const u8 *eh, bool checksummed)
{
struct napi_struct *napi = &channel->napi_str;
gro_result_t gro_result;
/* Pass the skb/page into the GRO engine */
- if (rx_buf->page) {
+ if (rx_buf->is_page) {
struct efx_nic *efx = channel->efx;
- struct page *page = rx_buf->page;
+ struct page *page = rx_buf->u.page;
struct sk_buff *skb;
- EFX_BUG_ON_PARANOID(rx_buf->skb);
- rx_buf->page = NULL;
+ rx_buf->u.page = NULL;
skb = napi_get_frags(napi);
if (!skb) {
@@ -469,11 +475,11 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
}
if (efx->net_dev->features & NETIF_F_RXHASH)
- skb->rxhash = efx_rx_buf_hash(rx_buf);
+ skb->rxhash = efx_rx_buf_hash(eh);
skb_shinfo(skb)->frags[0].page = page;
skb_shinfo(skb)->frags[0].page_offset =
- efx_rx_buf_offset(rx_buf);
+ efx_rx_buf_offset(efx, rx_buf);
skb_shinfo(skb)->frags[0].size = rx_buf->len;
skb_shinfo(skb)->nr_frags = 1;
@@ -487,11 +493,10 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
gro_result = napi_gro_frags(napi);
} else {
- struct sk_buff *skb = rx_buf->skb;
+ struct sk_buff *skb = rx_buf->u.skb;
- EFX_BUG_ON_PARANOID(!skb);
EFX_BUG_ON_PARANOID(!checksummed);
- rx_buf->skb = NULL;
+ rx_buf->u.skb = NULL;
gro_result = napi_gro_receive(napi, skb);
}
@@ -513,9 +518,6 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
bool leak_packet = false;
rx_buf = efx_rx_buffer(rx_queue, index);
- EFX_BUG_ON_PARANOID(!rx_buf->data);
- EFX_BUG_ON_PARANOID(rx_buf->skb && rx_buf->page);
- EFX_BUG_ON_PARANOID(!(rx_buf->skb || rx_buf->page));
/* This allows the refill path to post another buffer.
* EFX_RXD_HEAD_ROOM ensures that the slot we are using
@@ -554,12 +556,12 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
/* Prefetch nice and early so data will (hopefully) be in cache by
* the time we look at it.
*/
- prefetch(rx_buf->data);
+ prefetch(efx_rx_buf_eh(efx, rx_buf));
/* Pipeline receives so that we give time for packet headers to be
* prefetched into cache.
*/
- rx_buf->len = len;
+ rx_buf->len = len - efx->type->rx_buffer_hash_size;
out:
if (channel->rx_pkt)
__efx_rx_packet(channel,
@@ -574,45 +576,43 @@ void __efx_rx_packet(struct efx_channel *channel,
{
struct efx_nic *efx = channel->efx;
struct sk_buff *skb;
-
- rx_buf->data += efx->type->rx_buffer_hash_size;
- rx_buf->len -= efx->type->rx_buffer_hash_size;
+ u8 *eh = efx_rx_buf_eh(efx, rx_buf);
/* If we're in loopback test, then pass the packet directly to the
* loopback layer, and free the rx_buf here
*/
if (unlikely(efx->loopback_selftest)) {
- efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len);
+ efx_loopback_rx_packet(efx, eh, rx_buf->len);
efx_free_rx_buffer(efx, rx_buf);
return;
}
- if (rx_buf->skb) {
- prefetch(skb_shinfo(rx_buf->skb));
+ if (!rx_buf->is_page) {
+ skb = rx_buf->u.skb;
- skb_reserve(rx_buf->skb, efx->type->rx_buffer_hash_size);
- skb_put(rx_buf->skb, rx_buf->len);
+ prefetch(skb_shinfo(skb));
+
+ skb_reserve(skb, efx->type->rx_buffer_hash_size);
+ skb_put(skb, rx_buf->len);
if (efx->net_dev->features & NETIF_F_RXHASH)
- rx_buf->skb->rxhash = efx_rx_buf_hash(rx_buf);
+ skb->rxhash = efx_rx_buf_hash(eh);
/* Move past the ethernet header. rx_buf->data still points
* at the ethernet header */
- rx_buf->skb->protocol = eth_type_trans(rx_buf->skb,
- efx->net_dev);
+ skb->protocol = eth_type_trans(skb, efx->net_dev);
- skb_record_rx_queue(rx_buf->skb, channel->channel);
+ skb_record_rx_queue(skb, channel->channel);
}
- if (likely(checksummed || rx_buf->page)) {
- efx_rx_packet_gro(channel, rx_buf, checksummed);
+ if (likely(checksummed || rx_buf->is_page)) {
+ efx_rx_packet_gro(channel, rx_buf, eh, checksummed);
return;
}
/* We now own the SKB */
- skb = rx_buf->skb;
- rx_buf->skb = NULL;
- EFX_BUG_ON_PARANOID(!skb);
+ skb = rx_buf->u.skb;
+ rx_buf->u.skb = NULL;
/* Set the SKB flags */
skb_checksum_none_assert(skb);
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 0ebfb99f1299..a0f49b348d62 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -644,7 +644,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
goto out;
}
- /* Test both types of TX queue */
+ /* Test all enabled types of TX queue */
efx_for_each_channel_tx_queue(tx_queue, channel) {
state->offload_csum = (tx_queue->queue &
EFX_TXQ_TYPE_OFFLOAD);
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h
index aed495a4dad7..dba5456e70f3 100644
--- a/drivers/net/sfc/selftest.h
+++ b/drivers/net/sfc/selftest.h
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2008 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index bf8456176443..e4dd8986b1fe 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -227,13 +227,6 @@ static int siena_probe_nic(struct efx_nic *efx)
if (rc)
goto fail1;
- rc = efx_mcdi_fwver(efx, &nic_data->fw_version, &nic_data->fw_build);
- if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "Failed to read MCPU firmware version - rc %d\n", rc);
- goto fail1; /* MCPU absent? */
- }
-
/* Let the BMC know that the driver is now in charge of link and
* filter settings. We must do this before we reset the NIC */
rc = efx_mcdi_drv_attach(efx, true, &already_attached);
@@ -348,11 +341,6 @@ static int siena_init_nic(struct efx_nic *efx)
FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
- if (efx_nic_rx_xoff_thresh >= 0 || efx_nic_rx_xon_thresh >= 0)
- /* No MCDI operation has been defined to set thresholds */
- netif_err(efx, hw, efx->net_dev,
- "ignoring RX flow control thresholds\n");
-
/* Enable event logging */
rc = efx_mcdi_log_ctrl(efx, true, false, 0);
if (rc)
@@ -514,16 +502,6 @@ static void siena_stop_nic_stats(struct efx_nic *efx)
efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0);
}
-void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len)
-{
- struct siena_nic_data *nic_data = efx->nic_data;
- snprintf(buf, len, "%u.%u.%u.%u",
- (unsigned int)(nic_data->fw_version >> 48),
- (unsigned int)(nic_data->fw_version >> 32 & 0xffff),
- (unsigned int)(nic_data->fw_version >> 16 & 0xffff),
- (unsigned int)(nic_data->fw_version & 0xffff));
-}
-
/**************************************************************************
*
* Wake on LAN
diff --git a/drivers/net/sfc/spi.h b/drivers/net/sfc/spi.h
index 879b7f6bde3d..71f2e3ebe1c7 100644
--- a/drivers/net/sfc/spi.h
+++ b/drivers/net/sfc/spi.h
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005 Fen Systems Ltd.
- * Copyright 2006 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index f102912eba91..efdceb35aaae 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -1,6 +1,6 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2007-2009 Solarflare Communications Inc.
+ * Copyright 2007-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -196,7 +196,7 @@ static int tenxpress_phy_init(struct efx_nic *efx)
if (rc < 0)
return rc;
- rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0);
+ rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS);
if (rc < 0)
return rc;
}
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 2f5e9da657bf..139801908217 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2009 Solarflare Communications Inc.
+ * Copyright 2005-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -336,17 +336,91 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_tx_queue *tx_queue;
+ unsigned index, type;
if (unlikely(efx->port_inhibited))
return NETDEV_TX_BUSY;
- tx_queue = efx_get_tx_queue(efx, skb_get_queue_mapping(skb),
- skb->ip_summed == CHECKSUM_PARTIAL ?
- EFX_TXQ_TYPE_OFFLOAD : 0);
+ index = skb_get_queue_mapping(skb);
+ type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
+ if (index >= efx->n_tx_channels) {
+ index -= efx->n_tx_channels;
+ type |= EFX_TXQ_TYPE_HIGHPRI;
+ }
+ tx_queue = efx_get_tx_queue(efx, index, type);
return efx_enqueue_skb(tx_queue, skb);
}
+void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+
+ /* Must be inverse of queue lookup in efx_hard_start_xmit() */
+ tx_queue->core_txq =
+ netdev_get_tx_queue(efx->net_dev,
+ tx_queue->queue / EFX_TXQ_TYPES +
+ ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
+ efx->n_tx_channels : 0));
+}
+
+int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ unsigned tc;
+ int rc;
+
+ if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
+ return -EINVAL;
+
+ if (num_tc == net_dev->num_tc)
+ return 0;
+
+ for (tc = 0; tc < num_tc; tc++) {
+ net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
+ net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
+ }
+
+ if (num_tc > net_dev->num_tc) {
+ /* Initialise high-priority queues as necessary */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_possible_channel_tx_queue(tx_queue,
+ channel) {
+ if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
+ continue;
+ if (!tx_queue->buffer) {
+ rc = efx_probe_tx_queue(tx_queue);
+ if (rc)
+ return rc;
+ }
+ if (!tx_queue->initialised)
+ efx_init_tx_queue(tx_queue);
+ efx_init_tx_queue_core_txq(tx_queue);
+ }
+ }
+ } else {
+ /* Reduce number of classes before number of queues */
+ net_dev->num_tc = num_tc;
+ }
+
+ rc = netif_set_real_num_tx_queues(net_dev,
+ max_t(int, num_tc, 1) *
+ efx->n_tx_channels);
+ if (rc)
+ return rc;
+
+ /* Do not destroy high-priority queues when they become
+ * unused. We would have to flush them first, and it is
+ * fairly difficult to flush a subset of TX queues. Leave
+ * it to efx_fini_channels().
+ */
+
+ net_dev->num_tc = num_tc;
+ return 0;
+}
+
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{
unsigned fill_level;
@@ -430,6 +504,8 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
/* Set up TX descriptor ring */
efx_nic_init_tx(tx_queue);
+
+ tx_queue->initialised = true;
}
void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
@@ -452,9 +528,14 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
{
+ if (!tx_queue->initialised)
+ return;
+
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
"shutting down TX queue %d\n", tx_queue->queue);
+ tx_queue->initialised = false;
+
/* Flush TX queue, remove descriptor ring */
efx_nic_fini_tx(tx_queue);
@@ -466,6 +547,9 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
{
+ if (!tx_queue->buffer)
+ return;
+
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
"destroying TX queue %d\n", tx_queue->queue);
efx_nic_remove_tx(tx_queue);
diff --git a/drivers/net/sfc/txc43128_phy.c b/drivers/net/sfc/txc43128_phy.c
index 351794a79215..d9886addcc99 100644
--- a/drivers/net/sfc/txc43128_phy.c
+++ b/drivers/net/sfc/txc43128_phy.c
@@ -1,6 +1,6 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -193,7 +193,7 @@ static int txc_reset_phy(struct efx_nic *efx)
goto fail;
/* Check that all the MMDs we expect are present and responding. */
- rc = efx_mdio_check_mmds(efx, TXC_REQUIRED_DEVS, 0);
+ rc = efx_mdio_check_mmds(efx, TXC_REQUIRED_DEVS);
if (rc < 0)
goto fail;
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index e0d63083c3a8..e4dd3a7f304b 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -1,6 +1,6 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 819c1750e2ab..095e52580884 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -32,10 +32,17 @@
#include <linux/io.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <linux/ethtool.h>
#include <asm/cacheflush.h>
#include "sh_eth.h"
+#define SH_ETH_DEF_MSG_ENABLE \
+ (NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_RX_ERR| \
+ NETIF_MSG_TX_ERR)
+
/* There is CPU dependent code */
#if defined(CONFIG_CPU_SUBTYPE_SH7724)
#define SH_ETH_RESET_DEFAULT 1
@@ -817,6 +824,20 @@ static int sh_eth_rx(struct net_device *ndev)
return 0;
}
+static void sh_eth_rcv_snd_disable(u32 ioaddr)
+{
+ /* disable tx and rx */
+ writel(readl(ioaddr + ECMR) &
+ ~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
+}
+
+static void sh_eth_rcv_snd_enable(u32 ioaddr)
+{
+ /* enable tx and rx */
+ writel(readl(ioaddr + ECMR) |
+ (ECMR_RE | ECMR_TE), ioaddr + ECMR);
+}
+
/* error control function */
static void sh_eth_error(struct net_device *ndev, int intr_status)
{
@@ -843,11 +864,9 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
if (mdp->ether_link_active_low)
link_stat = ~link_stat;
}
- if (!(link_stat & PHY_ST_LINK)) {
- /* Link Down : disable tx and rx */
- writel(readl(ioaddr + ECMR) &
- ~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
- } else {
+ if (!(link_stat & PHY_ST_LINK))
+ sh_eth_rcv_snd_disable(ioaddr);
+ else {
/* Link Up */
writel(readl(ioaddr + EESIPR) &
~DMAC_M_ECI, ioaddr + EESIPR);
@@ -857,8 +876,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
writel(readl(ioaddr + EESIPR) |
DMAC_M_ECI, ioaddr + EESIPR);
/* enable tx and rx */
- writel(readl(ioaddr + ECMR) |
- (ECMR_RE | ECMR_TE), ioaddr + ECMR);
+ sh_eth_rcv_snd_enable(ioaddr);
}
}
}
@@ -867,6 +885,8 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
/* Write buck end. unused write back interrupt */
if (intr_status & EESR_TABT) /* Transmit Abort int */
mdp->stats.tx_aborted_errors++;
+ if (netif_msg_tx_err(mdp))
+ dev_err(&ndev->dev, "Transmit Abort\n");
}
if (intr_status & EESR_RABT) {
@@ -874,14 +894,23 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
if (intr_status & EESR_RFRMER) {
/* Receive Frame Overflow int */
mdp->stats.rx_frame_errors++;
- dev_err(&ndev->dev, "Receive Frame Overflow\n");
+ if (netif_msg_rx_err(mdp))
+ dev_err(&ndev->dev, "Receive Abort\n");
}
}
- if (!mdp->cd->no_ade) {
- if (intr_status & EESR_ADE && intr_status & EESR_TDE &&
- intr_status & EESR_TFE)
- mdp->stats.tx_fifo_errors++;
+ if (intr_status & EESR_TDE) {
+ /* Transmit Descriptor Empty int */
+ mdp->stats.tx_fifo_errors++;
+ if (netif_msg_tx_err(mdp))
+ dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
+ }
+
+ if (intr_status & EESR_TFE) {
+ /* FIFO under flow */
+ mdp->stats.tx_fifo_errors++;
+ if (netif_msg_tx_err(mdp))
+ dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
}
if (intr_status & EESR_RDE) {
@@ -890,12 +919,22 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
if (readl(ioaddr + EDRRR) ^ EDRRR_R)
writel(EDRRR_R, ioaddr + EDRRR);
- dev_err(&ndev->dev, "Receive Descriptor Empty\n");
+ if (netif_msg_rx_err(mdp))
+ dev_err(&ndev->dev, "Receive Descriptor Empty\n");
}
+
if (intr_status & EESR_RFE) {
/* Receive FIFO Overflow int */
mdp->stats.rx_fifo_errors++;
- dev_err(&ndev->dev, "Receive FIFO Overflow\n");
+ if (netif_msg_rx_err(mdp))
+ dev_err(&ndev->dev, "Receive FIFO Overflow\n");
+ }
+
+ if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
+ /* Address Error */
+ mdp->stats.tx_fifo_errors++;
+ if (netif_msg_tx_err(mdp))
+ dev_err(&ndev->dev, "Address Error\n");
}
mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
@@ -1012,7 +1051,7 @@ static void sh_eth_adjust_link(struct net_device *ndev)
mdp->duplex = -1;
}
- if (new_state)
+ if (new_state && netif_msg_link(mdp))
phy_print_status(phydev);
}
@@ -1063,6 +1102,132 @@ static int sh_eth_phy_start(struct net_device *ndev)
return 0;
}
+static int sh_eth_get_settings(struct net_device *ndev,
+ struct ethtool_cmd *ecmd)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&mdp->lock, flags);
+ ret = phy_ethtool_gset(mdp->phydev, ecmd);
+ spin_unlock_irqrestore(&mdp->lock, flags);
+
+ return ret;
+}
+
+static int sh_eth_set_settings(struct net_device *ndev,
+ struct ethtool_cmd *ecmd)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ unsigned long flags;
+ int ret;
+ u32 ioaddr = ndev->base_addr;
+
+ spin_lock_irqsave(&mdp->lock, flags);
+
+ /* disable tx and rx */
+ sh_eth_rcv_snd_disable(ioaddr);
+
+ ret = phy_ethtool_sset(mdp->phydev, ecmd);
+ if (ret)
+ goto error_exit;
+
+ if (ecmd->duplex == DUPLEX_FULL)
+ mdp->duplex = 1;
+ else
+ mdp->duplex = 0;
+
+ if (mdp->cd->set_duplex)
+ mdp->cd->set_duplex(ndev);
+
+error_exit:
+ mdelay(1);
+
+ /* enable tx and rx */
+ sh_eth_rcv_snd_enable(ioaddr);
+
+ spin_unlock_irqrestore(&mdp->lock, flags);
+
+ return ret;
+}
+
+static int sh_eth_nway_reset(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&mdp->lock, flags);
+ ret = phy_start_aneg(mdp->phydev);
+ spin_unlock_irqrestore(&mdp->lock, flags);
+
+ return ret;
+}
+
+static u32 sh_eth_get_msglevel(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ return mdp->msg_enable;
+}
+
+static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ mdp->msg_enable = value;
+}
+
+static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "rx_current", "tx_current",
+ "rx_dirty", "tx_dirty",
+};
+#define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
+
+static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return SH_ETH_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void sh_eth_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ int i = 0;
+
+ /* device-specific stats */
+ data[i++] = mdp->cur_rx;
+ data[i++] = mdp->cur_tx;
+ data[i++] = mdp->dirty_rx;
+ data[i++] = mdp->dirty_tx;
+}
+
+static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, *sh_eth_gstrings_stats,
+ sizeof(sh_eth_gstrings_stats));
+ break;
+ }
+}
+
+static struct ethtool_ops sh_eth_ethtool_ops = {
+ .get_settings = sh_eth_get_settings,
+ .set_settings = sh_eth_set_settings,
+ .nway_reset = sh_eth_nway_reset,
+ .get_msglevel = sh_eth_get_msglevel,
+ .set_msglevel = sh_eth_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .get_strings = sh_eth_get_strings,
+ .get_ethtool_stats = sh_eth_get_ethtool_stats,
+ .get_sset_count = sh_eth_get_sset_count,
+};
+
/* network device open function */
static int sh_eth_open(struct net_device *ndev)
{
@@ -1073,8 +1238,8 @@ static int sh_eth_open(struct net_device *ndev)
ret = request_irq(ndev->irq, sh_eth_interrupt,
#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \
- defined(CONFIG_CPU_SUBTYPE_SH7764) || \
- defined(CONFIG_CPU_SUBTYPE_SH7757)
+ defined(CONFIG_CPU_SUBTYPE_SH7764) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7757)
IRQF_SHARED,
#else
0,
@@ -1123,8 +1288,8 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
netif_stop_queue(ndev);
- /* worning message out. */
- printk(KERN_WARNING "%s: transmit timed out, status %8.8x,"
+ if (netif_msg_timer(mdp))
+ dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
" resetting...\n", ndev->name, (int)readl(ioaddr + EESR));
/* tx_errors count up */
@@ -1167,6 +1332,8 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
spin_lock_irqsave(&mdp->lock, flags);
if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
if (!sh_eth_txfree(ndev)) {
+ if (netif_msg_tx_queued(mdp))
+ dev_warn(&ndev->dev, "TxFD exhausted.\n");
netif_stop_queue(ndev);
spin_unlock_irqrestore(&mdp->lock, flags);
return NETDEV_TX_BUSY;
@@ -1497,8 +1664,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
/* set function */
ndev->netdev_ops = &sh_eth_netdev_ops;
+ SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
ndev->watchdog_timeo = TX_TIMEOUT;
+ /* debug message level */
+ mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
mdp->post_rx = POST_RX >> (devno << 1);
mdp->post_fw = POST_FW >> (devno << 1);
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 640e368ebeee..84d4167eee9a 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -495,7 +495,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
sis_priv->mii_info.reg_num_mask = 0x1f;
/* Get Mac address according to the chip revision */
- pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &(sis_priv->chipset_rev));
+ sis_priv->chipset_rev = pci_dev->revision;
if(netif_msg_probe(sis_priv))
printk(KERN_DEBUG "%s: detected revision %2.2x, "
"trying to get MAC address...\n",
@@ -532,7 +532,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
/* save our host bridge revision */
dev = pci_get_device(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_630, NULL);
if (dev) {
- pci_read_config_byte(dev, PCI_CLASS_REVISION, &sis_priv->host_bridge_rev);
+ sis_priv->host_bridge_rev = dev->revision;
pci_dev_put(dev);
}
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 42daf98ba736..35b28f42d208 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3856,9 +3856,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
- /* device is off until link detection */
- netif_carrier_off(dev);
-
return dev;
}
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 726df611ee17..43654a3bb0ec 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -81,6 +81,7 @@ static const char version[] =
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/workqueue.h>
+#include <linux/of.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -2394,6 +2395,15 @@ static int smc_drv_resume(struct device *dev)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id smc91x_match[] = {
+ { .compatible = "smsc,lan91c94", },
+ { .compatible = "smsc,lan91c111", },
+ {},
+}
+MODULE_DEVICE_TABLE(of, smc91x_match);
+#endif
+
static struct dev_pm_ops smc_drv_pm_ops = {
.suspend = smc_drv_suspend,
.resume = smc_drv_resume,
@@ -2406,6 +2416,9 @@ static struct platform_driver smc_driver = {
.name = CARDNAME,
.owner = THIS_MODULE,
.pm = &smc_drv_pm_ops,
+#ifdef CONFIG_OF
+ .of_match_table = smc91x_match,
+#endif
},
};
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index ee747919a766..68d48ab6eacf 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -206,68 +206,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
#define RPC_LSA_DEFAULT RPC_LED_TX_RX
#define RPC_LSB_DEFAULT RPC_LED_100_10
-#elif defined(CONFIG_MACH_LPD79520) || \
- defined(CONFIG_MACH_LPD7A400) || \
- defined(CONFIG_MACH_LPD7A404)
-
-/* The LPD7X_IOBARRIER is necessary to overcome a mismatch between the
- * way that the CPU handles chip selects and the way that the SMC chip
- * expects the chip select to operate. Refer to
- * Documentation/arm/Sharp-LH/IOBarrier for details. The read from
- * IOBARRIER is a byte, in order that we read the least-common
- * denominator. It would be wasteful to read 32 bits from an 8-bit
- * accessible region.
- *
- * There is no explicit protection against interrupts intervening
- * between the writew and the IOBARRIER. In SMC ISR there is a
- * preamble that performs an IOBARRIER in the extremely unlikely event
- * that the driver interrupts itself between a writew to the chip an
- * the IOBARRIER that follows *and* the cache is large enough that the
- * first off-chip access while handing the interrupt is to the SMC
- * chip. Other devices in the same address space as the SMC chip must
- * be aware of the potential for trouble and perform a similar
- * IOBARRIER on entry to their ISR.
- */
-
-#include <mach/constants.h> /* IOBARRIER_VIRT */
-
-#define SMC_CAN_USE_8BIT 0
-#define SMC_CAN_USE_16BIT 1
-#define SMC_CAN_USE_32BIT 0
-#define SMC_NOWAIT 0
-#define LPD7X_IOBARRIER readb (IOBARRIER_VIRT)
-
-#define SMC_inw(a,r)\
- ({ unsigned short v = readw ((void*) ((a) + (r))); LPD7X_IOBARRIER; v; })
-#define SMC_outw(v,a,r) ({ writew ((v), (a) + (r)); LPD7X_IOBARRIER; })
-
-#define SMC_insw LPD7_SMC_insw
-static inline void LPD7_SMC_insw (unsigned char* a, int r,
- unsigned char* p, int l)
-{
- unsigned short* ps = (unsigned short*) p;
- while (l-- > 0) {
- *ps++ = readw (a + r);
- LPD7X_IOBARRIER;
- }
-}
-
-#define SMC_outsw LPD7_SMC_outsw
-static inline void LPD7_SMC_outsw (unsigned char* a, int r,
- unsigned char* p, int l)
-{
- unsigned short* ps = (unsigned short*) p;
- while (l-- > 0) {
- writew (*ps++, a + r);
- LPD7X_IOBARRIER;
- }
-}
-
-#define SMC_INTERRUPT_PREAMBLE LPD7X_IOBARRIER
-
-#define RPC_LSA_DEFAULT RPC_LED_TX_RX
-#define RPC_LSB_DEFAULT RPC_LED_100_10
-
#elif defined(CONFIG_ARCH_VERSATILE)
#define SMC_CAN_USE_8BIT 1
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index 0a6a5ced3c1c..aa4765803a4c 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -1242,8 +1242,7 @@ fail_and_cleanup:
/* QEC can be the parent of either QuadEthernet or a BigMAC. We want
* the latter.
*/
-static int __devinit bigmac_sbus_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit bigmac_sbus_probe(struct platform_device *op)
{
struct device *parent = op->dev.parent;
struct platform_device *qec_op;
@@ -1289,7 +1288,7 @@ static const struct of_device_id bigmac_sbus_match[] = {
MODULE_DEVICE_TABLE(of, bigmac_sbus_match);
-static struct of_platform_driver bigmac_sbus_driver = {
+static struct platform_driver bigmac_sbus_driver = {
.driver = {
.name = "sunbmac",
.owner = THIS_MODULE,
@@ -1301,12 +1300,12 @@ static struct of_platform_driver bigmac_sbus_driver = {
static int __init bigmac_init(void)
{
- return of_register_platform_driver(&bigmac_sbus_driver);
+ return platform_driver_register(&bigmac_sbus_driver);
}
static void __exit bigmac_exit(void)
{
- of_unregister_platform_driver(&bigmac_sbus_driver);
+ platform_driver_unregister(&bigmac_sbus_driver);
}
module_init(bigmac_init);
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 1c5408f83937..c1a344829b54 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -320,28 +320,28 @@ static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s
if (txmac_stat & MAC_TXSTAT_URUN) {
netdev_err(dev, "TX MAC xmit underrun\n");
- gp->net_stats.tx_fifo_errors++;
+ dev->stats.tx_fifo_errors++;
}
if (txmac_stat & MAC_TXSTAT_MPE) {
netdev_err(dev, "TX MAC max packet size error\n");
- gp->net_stats.tx_errors++;
+ dev->stats.tx_errors++;
}
/* The rest are all cases of one of the 16-bit TX
* counters expiring.
*/
if (txmac_stat & MAC_TXSTAT_NCE)
- gp->net_stats.collisions += 0x10000;
+ dev->stats.collisions += 0x10000;
if (txmac_stat & MAC_TXSTAT_ECE) {
- gp->net_stats.tx_aborted_errors += 0x10000;
- gp->net_stats.collisions += 0x10000;
+ dev->stats.tx_aborted_errors += 0x10000;
+ dev->stats.collisions += 0x10000;
}
if (txmac_stat & MAC_TXSTAT_LCE) {
- gp->net_stats.tx_aborted_errors += 0x10000;
- gp->net_stats.collisions += 0x10000;
+ dev->stats.tx_aborted_errors += 0x10000;
+ dev->stats.collisions += 0x10000;
}
/* We do not keep track of MAC_TXSTAT_FCE and
@@ -469,20 +469,20 @@ static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s
u32 smac = readl(gp->regs + MAC_SMACHINE);
netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac);
- gp->net_stats.rx_over_errors++;
- gp->net_stats.rx_fifo_errors++;
+ dev->stats.rx_over_errors++;
+ dev->stats.rx_fifo_errors++;
ret = gem_rxmac_reset(gp);
}
if (rxmac_stat & MAC_RXSTAT_ACE)
- gp->net_stats.rx_frame_errors += 0x10000;
+ dev->stats.rx_frame_errors += 0x10000;
if (rxmac_stat & MAC_RXSTAT_CCE)
- gp->net_stats.rx_crc_errors += 0x10000;
+ dev->stats.rx_crc_errors += 0x10000;
if (rxmac_stat & MAC_RXSTAT_LCE)
- gp->net_stats.rx_length_errors += 0x10000;
+ dev->stats.rx_length_errors += 0x10000;
/* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE
* events.
@@ -594,7 +594,7 @@ static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_stat
if (netif_msg_rx_err(gp))
printk(KERN_DEBUG "%s: no buffer for rx frame\n",
gp->dev->name);
- gp->net_stats.rx_dropped++;
+ dev->stats.rx_dropped++;
}
if (gem_status & GREG_STAT_RXTAGERR) {
@@ -602,7 +602,7 @@ static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_stat
if (netif_msg_rx_err(gp))
printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
gp->dev->name);
- gp->net_stats.rx_errors++;
+ dev->stats.rx_errors++;
goto do_reset;
}
@@ -684,7 +684,7 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st
break;
}
gp->tx_skbs[entry] = NULL;
- gp->net_stats.tx_bytes += skb->len;
+ dev->stats.tx_bytes += skb->len;
for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
txd = &gp->init_block->txd[entry];
@@ -696,7 +696,7 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st
entry = NEXT_TX(entry);
}
- gp->net_stats.tx_packets++;
+ dev->stats.tx_packets++;
dev_kfree_skb_irq(skb);
}
gp->tx_old = entry;
@@ -738,6 +738,7 @@ static __inline__ void gem_post_rxds(struct gem *gp, int limit)
static int gem_rx(struct gem *gp, int work_to_do)
{
+ struct net_device *dev = gp->dev;
int entry, drops, work_done = 0;
u32 done;
__sum16 csum;
@@ -782,15 +783,15 @@ static int gem_rx(struct gem *gp, int work_to_do)
len = (status & RXDCTRL_BUFSZ) >> 16;
if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) {
- gp->net_stats.rx_errors++;
+ dev->stats.rx_errors++;
if (len < ETH_ZLEN)
- gp->net_stats.rx_length_errors++;
+ dev->stats.rx_length_errors++;
if (len & RXDCTRL_BAD)
- gp->net_stats.rx_crc_errors++;
+ dev->stats.rx_crc_errors++;
/* We'll just return it to GEM. */
drop_it:
- gp->net_stats.rx_dropped++;
+ dev->stats.rx_dropped++;
goto next;
}
@@ -843,8 +844,8 @@ static int gem_rx(struct gem *gp, int work_to_do)
netif_receive_skb(skb);
- gp->net_stats.rx_packets++;
- gp->net_stats.rx_bytes += len;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
next:
entry = NEXT_RX(entry);
@@ -2472,7 +2473,6 @@ static int gem_resume(struct pci_dev *pdev)
static struct net_device_stats *gem_get_stats(struct net_device *dev)
{
struct gem *gp = netdev_priv(dev);
- struct net_device_stats *stats = &gp->net_stats;
spin_lock_irq(&gp->lock);
spin_lock(&gp->tx_lock);
@@ -2481,17 +2481,17 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev)
* so we shield against this
*/
if (gp->running) {
- stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR);
+ dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR);
writel(0, gp->regs + MAC_FCSERR);
- stats->rx_frame_errors += readl(gp->regs + MAC_AERR);
+ dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR);
writel(0, gp->regs + MAC_AERR);
- stats->rx_length_errors += readl(gp->regs + MAC_LERR);
+ dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR);
writel(0, gp->regs + MAC_LERR);
- stats->tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
- stats->collisions +=
+ dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
+ dev->stats.collisions +=
(readl(gp->regs + MAC_ECOLL) +
readl(gp->regs + MAC_LCOLL));
writel(0, gp->regs + MAC_ECOLL);
@@ -2501,7 +2501,7 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev)
spin_unlock(&gp->tx_lock);
spin_unlock_irq(&gp->lock);
- return &gp->net_stats;
+ return &dev->stats;
}
static int gem_set_mac_address(struct net_device *dev, void *addr)
diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h
index 19905460def6..d225077964e2 100644
--- a/drivers/net/sungem.h
+++ b/drivers/net/sungem.h
@@ -843,7 +843,7 @@ struct gem_txd {
/* GEM requires that RX descriptors are provided four at a time,
* aligned. Also, the RX ring may not wrap around. This means that
- * there will be at least 4 unused desciptor entries in the middle
+ * there will be at least 4 unused descriptor entries in the middle
* of the RX ring at all times.
*
* Similar to HME, GEM assumes that it can write garbage bytes before
@@ -994,7 +994,6 @@ struct gem {
u32 status;
struct napi_struct napi;
- struct net_device_stats net_stats;
int tx_fifo_sz;
int rx_fifo_sz;
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 55bbb9c15d96..eb4f59fb01e9 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -3237,11 +3237,15 @@ static void happy_meal_pci_exit(void)
#endif
#ifdef CONFIG_SBUS
-static int __devinit hme_sbus_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit hme_sbus_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
const char *model = of_get_property(dp, "model", NULL);
- int is_qfe = (match->data != NULL);
+ int is_qfe;
+
+ if (!op->dev.of_match)
+ return -EINVAL;
+ is_qfe = (op->dev.of_match->data != NULL);
if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe"))
is_qfe = 1;
@@ -3292,7 +3296,7 @@ static const struct of_device_id hme_sbus_match[] = {
MODULE_DEVICE_TABLE(of, hme_sbus_match);
-static struct of_platform_driver hme_sbus_driver = {
+static struct platform_driver hme_sbus_driver = {
.driver = {
.name = "hme",
.owner = THIS_MODULE,
@@ -3306,7 +3310,7 @@ static int __init happy_meal_sbus_init(void)
{
int err;
- err = of_register_platform_driver(&hme_sbus_driver);
+ err = platform_driver_register(&hme_sbus_driver);
if (!err)
err = quattro_sbus_register_irqs();
@@ -3315,7 +3319,7 @@ static int __init happy_meal_sbus_init(void)
static void happy_meal_sbus_exit(void)
{
- of_unregister_platform_driver(&hme_sbus_driver);
+ platform_driver_unregister(&hme_sbus_driver);
quattro_sbus_free_irqs();
while (qfe_sbus_list) {
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 767e1e2b210d..32a5c7f63c43 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1495,7 +1495,7 @@ fail:
return -ENODEV;
}
-static int __devinit sunlance_sbus_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit sunlance_sbus_probe(struct platform_device *op)
{
struct platform_device *parent = to_platform_device(op->dev.parent);
struct device_node *parent_dp = parent->dev.of_node;
@@ -1536,7 +1536,7 @@ static const struct of_device_id sunlance_sbus_match[] = {
MODULE_DEVICE_TABLE(of, sunlance_sbus_match);
-static struct of_platform_driver sunlance_sbus_driver = {
+static struct platform_driver sunlance_sbus_driver = {
.driver = {
.name = "sunlance",
.owner = THIS_MODULE,
@@ -1550,12 +1550,12 @@ static struct of_platform_driver sunlance_sbus_driver = {
/* Find all the lance cards on the system and initialize them */
static int __init sparc_lance_init(void)
{
- return of_register_platform_driver(&sunlance_sbus_driver);
+ return platform_driver_register(&sunlance_sbus_driver);
}
static void __exit sparc_lance_exit(void)
{
- of_unregister_platform_driver(&sunlance_sbus_driver);
+ platform_driver_unregister(&sunlance_sbus_driver);
}
module_init(sparc_lance_init);
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index 9536b2f010be..18ecdc303751 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -941,7 +941,7 @@ fail:
return res;
}
-static int __devinit qec_sbus_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit qec_sbus_probe(struct platform_device *op)
{
return qec_ether_init(op);
}
@@ -976,7 +976,7 @@ static const struct of_device_id qec_sbus_match[] = {
MODULE_DEVICE_TABLE(of, qec_sbus_match);
-static struct of_platform_driver qec_sbus_driver = {
+static struct platform_driver qec_sbus_driver = {
.driver = {
.name = "qec",
.owner = THIS_MODULE,
@@ -988,12 +988,12 @@ static struct of_platform_driver qec_sbus_driver = {
static int __init qec_init(void)
{
- return of_register_platform_driver(&qec_sbus_driver);
+ return platform_driver_register(&qec_sbus_driver);
}
static void __exit qec_exit(void)
{
- of_unregister_platform_driver(&qec_sbus_driver);
+ platform_driver_unregister(&qec_sbus_driver);
while (root_qec_dev) {
struct sunqec *next = root_qec_dev->next_module;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 06c0e5033656..6be418591df9 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -4,7 +4,7 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2005-2010 Broadcom Corporation.
+ * Copyright (C) 2005-2011 Broadcom Corporation.
*
* Firmware is:
* Derived from proprietary unpublished source code,
@@ -64,10 +64,10 @@
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 116
+#define TG3_MIN_NUM 117
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "December 3, 2010"
+#define DRV_MODULE_RELDATE "January 25, 2011"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -1776,9 +1776,29 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
tg3_phy_cl45_read(tp, MDIO_MMD_AN,
TG3_CL45_D7_EEERES_STAT, &val);
- if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
- val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
+ switch (val) {
+ case TG3_CL45_D7_EEERES_STAT_LP_1000T:
+ switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
+ case ASIC_REV_5717:
+ case ASIC_REV_5719:
+ case ASIC_REV_57765:
+ /* Enable SM_DSP clock and tx 6dB coding. */
+ val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
+ MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
+ MII_TG3_AUXCTL_ACTL_TX_6DB;
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
+
+ tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
+
+ /* Turn off SM_DSP clock. */
+ val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
+ MII_TG3_AUXCTL_ACTL_TX_6DB;
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
+ }
+ /* Fallthrough */
+ case TG3_CL45_D7_EEERES_STAT_LP_100TX:
tp->setlpicnt = 2;
+ }
}
if (!tp->setlpicnt) {
@@ -2968,11 +2988,19 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
MII_TG3_AUXCTL_ACTL_TX_6DB;
tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
- if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
- !tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
- tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2,
- val | MII_TG3_DSP_CH34TP2_HIBW01);
+ switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
+ case ASIC_REV_5717:
+ case ASIC_REV_57765:
+ if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
+ tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
+ MII_TG3_DSP_CH34TP2_HIBW01);
+ /* Fall through */
+ case ASIC_REV_5719:
+ val = MII_TG3_DSP_TAP26_ALNOKO |
+ MII_TG3_DSP_TAP26_RMRXSTO |
+ MII_TG3_DSP_TAP26_OPCSINPT;
+ tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
+ }
val = 0;
if (tp->link_config.autoneg == AUTONEG_ENABLE) {
@@ -7801,7 +7829,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
TG3_CPMU_DBTMR1_LNKIDLE_2047US);
tw32_f(TG3_CPMU_EEE_DBTMR2,
- TG3_CPMU_DBTMR1_APE_TX_2047US |
+ TG3_CPMU_DBTMR2_APE_TX_2047US |
TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
}
@@ -8075,8 +8103,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
/* Program the jumbo buffer descriptor ring control
* blocks on those devices that have them.
*/
- if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
- !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
+ ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
+ !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))) {
/* Setup replenish threshold. */
tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
@@ -8194,8 +8223,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
val = tr32(TG3_RDMA_RSRVCTRL_REG);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
- val &= ~TG3_RDMA_RSRVCTRL_TXMRGN_MASK;
- val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B;
+ val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
+ TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
+ TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
+ val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
+ TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
+ TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
}
tw32(TG3_RDMA_RSRVCTRL_REG,
val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
@@ -8317,7 +8350,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
udelay(100);
- if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) {
+ if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
+ tp->irq_cnt > 1) {
val = tr32(MSGINT_MODE);
val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
tw32(MSGINT_MODE, val);
@@ -9057,7 +9091,8 @@ static void tg3_ints_init(struct tg3 *tp)
if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
u32 msi_mode = tr32(MSGINT_MODE);
- if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
+ if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
+ tp->irq_cnt > 1)
msi_mode |= MSGINT_MODE_MULTIVEC_EN;
tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
}
@@ -10833,13 +10868,16 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
if (loopback_mode == TG3_MAC_LOOPBACK) {
/* HW errata - mac loopback fails in some cases on 5780.
* Normal traffic and PHY loopback are not affected by
- * errata.
+ * errata. Also, the MAC loopback test is deprecated for
+ * all newer ASIC revisions.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
+ (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
return 0;
- mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
- MAC_MODE_PORT_INT_LPBACK;
+ mac_mode = tp->mac_mode &
+ ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
+ mac_mode |= MAC_MODE_PORT_INT_LPBACK;
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
mac_mode |= MAC_MODE_LINK_POLARITY;
if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
@@ -10861,7 +10899,8 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
tg3_writephy(tp, MII_BMCR, val);
udelay(40);
- mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
+ mac_mode = tp->mac_mode &
+ ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
tg3_writephy(tp, MII_TG3_FET_PTEST,
MII_TG3_FET_PTEST_FRC_TX_LINK |
@@ -10889,6 +10928,13 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
MII_TG3_EXT_CTRL_LNK3_LED_MODE);
}
tw32(MAC_MODE, mac_mode);
+
+ /* Wait for link */
+ for (i = 0; i < 100; i++) {
+ if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
+ break;
+ mdelay(1);
+ }
} else {
return -EINVAL;
}
@@ -10995,14 +11041,19 @@ out:
static int tg3_test_loopback(struct tg3 *tp)
{
int err = 0;
- u32 cpmuctrl = 0;
+ u32 eee_cap, cpmuctrl = 0;
if (!netif_running(tp->dev))
return TG3_LOOPBACK_FAILED;
+ eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
+ tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
+
err = tg3_reset_hw(tp, 1);
- if (err)
- return TG3_LOOPBACK_FAILED;
+ if (err) {
+ err = TG3_LOOPBACK_FAILED;
+ goto done;
+ }
/* Turn off gphy autopowerdown. */
if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
@@ -11022,8 +11073,10 @@ static int tg3_test_loopback(struct tg3 *tp)
udelay(10);
}
- if (status != CPMU_MUTEX_GNT_DRIVER)
- return TG3_LOOPBACK_FAILED;
+ if (status != CPMU_MUTEX_GNT_DRIVER) {
+ err = TG3_LOOPBACK_FAILED;
+ goto done;
+ }
/* Turn off link-based power management. */
cpmuctrl = tr32(TG3_CPMU_CTRL);
@@ -11052,6 +11105,9 @@ static int tg3_test_loopback(struct tg3 *tp)
if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
tg3_phy_toggle_apd(tp, true);
+done:
+ tp->phy_flags |= eee_cap;
+
return err;
}
@@ -12407,9 +12463,11 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
}
done:
- device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
- device_set_wakeup_enable(&tp->pdev->dev,
+ if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
+ device_set_wakeup_enable(&tp->pdev->dev,
tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
+ else
+ device_set_wakeup_capable(&tp->pdev->dev, false);
}
static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
@@ -13262,7 +13320,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
}
/* Determine TSO capabilities */
- if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ ; /* Do nothing. HW bug. */
+ else if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
@@ -13313,7 +13373,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
}
- if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
+ if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
@@ -13331,42 +13392,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
tp->pcie_readrq = 4096;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
- u16 word;
-
- pci_read_config_word(tp->pdev,
- tp->pcie_cap + PCI_EXP_LNKSTA,
- &word);
- switch (word & PCI_EXP_LNKSTA_CLS) {
- case PCI_EXP_LNKSTA_CLS_2_5GB:
- word &= PCI_EXP_LNKSTA_NLW;
- word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
- switch (word) {
- case 2:
- tp->pcie_readrq = 2048;
- break;
- case 4:
- tp->pcie_readrq = 1024;
- break;
- }
- break;
-
- case PCI_EXP_LNKSTA_CLS_5_0GB:
- word &= PCI_EXP_LNKSTA_NLW;
- word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
- switch (word) {
- case 1:
- tp->pcie_readrq = 2048;
- break;
- case 2:
- tp->pcie_readrq = 1024;
- break;
- case 4:
- tp->pcie_readrq = 512;
- break;
- }
- }
- }
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ tp->pcie_readrq = 2048;
pcie_set_readrq(tp->pdev, tp->pcie_readrq);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index f528243e1a4f..73884b69b749 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -4,7 +4,7 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2007-2010 Broadcom Corporation.
+ * Copyright (C) 2007-2011 Broadcom Corporation.
*/
#ifndef _T3_H
@@ -141,6 +141,7 @@
#define CHIPREV_ID_57780_A1 0x57780001
#define CHIPREV_ID_5717_A0 0x05717000
#define CHIPREV_ID_57765_A0 0x57785000
+#define CHIPREV_ID_5719_A0 0x05719000
#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12)
#define ASIC_REV_5700 0x07
#define ASIC_REV_5701 0x00
@@ -1105,7 +1106,7 @@
#define TG3_CPMU_DBTMR1_PCIEXIT_2047US 0x07ff0000
#define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000070ff
#define TG3_CPMU_EEE_DBTMR2 0x000036b8
-#define TG3_CPMU_DBTMR1_APE_TX_2047US 0x07ff0000
+#define TG3_CPMU_DBTMR2_APE_TX_2047US 0x07ff0000
#define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000070ff
#define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc
#define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000
@@ -1333,6 +1334,10 @@
#define TG3_RDMA_RSRVCTRL_REG 0x00004900
#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004
+#define TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K 0x00000c00
+#define TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK 0x00000ff0
+#define TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K 0x000c0000
+#define TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK 0x000ff000
#define TG3_RDMA_RSRVCTRL_TXMRGN_320B 0x28000000
#define TG3_RDMA_RSRVCTRL_TXMRGN_MASK 0xffe00000
/* 0x4904 --> 0x4910 unused */
@@ -2108,6 +2113,10 @@
#define MII_TG3_DSP_TAP1 0x0001
#define MII_TG3_DSP_TAP1_AGCTGT_DFLT 0x0007
+#define MII_TG3_DSP_TAP26 0x001a
+#define MII_TG3_DSP_TAP26_ALNOKO 0x0001
+#define MII_TG3_DSP_TAP26_RMRXSTO 0x0002
+#define MII_TG3_DSP_TAP26_OPCSINPT 0x0004
#define MII_TG3_DSP_AADJ1CH0 0x001f
#define MII_TG3_DSP_CH34TP2 0x4022
#define MII_TG3_DSP_CH34TP2_HIBW01 0x0010
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index f8e463cd8ecc..e48a80885343 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -63,45 +63,45 @@
* - Other minor stuff
*
* v1.4 Feb 10, 2000 - Updated with more changes required after Dave's
- * network cleanup in 2.3.43pre7 (Tigran & myself)
- * - Minor stuff.
+ * network cleanup in 2.3.43pre7 (Tigran & myself)
+ * - Minor stuff.
*
- * v1.5 March 22, 2000 - Fixed another timer bug that would hang the driver
- * if no cable/link were present.
+ * v1.5 March 22, 2000 - Fixed another timer bug that would hang the
+ * driver if no cable/link were present.
* - Cosmetic changes.
* - TODO: Port completely to new PCI/DMA API
- * Auto-Neg fallback.
- *
- * v1.6 April 04, 2000 - Fixed driver support for kernel-parameters. Haven't
- * tested it though, as the kernel support is currently
- * broken (2.3.99p4p3).
- * - Updated tlan.txt accordingly.
- * - Adjusted minimum/maximum frame length.
- * - There is now a TLAN website up at
- * http://hp.sourceforge.net/
- *
- * v1.7 April 07, 2000 - Started to implement custom ioctls. Driver now
- * reports PHY information when used with Donald
- * Beckers userspace MII diagnostics utility.
- *
- * v1.8 April 23, 2000 - Fixed support for forced speed/duplex settings.
- * - Added link information to Auto-Neg and forced
- * modes. When NIC operates with auto-neg the driver
- * will report Link speed & duplex modes as well as
- * link partner abilities. When forced link is used,
- * the driver will report status of the established
- * link.
- * Please read tlan.txt for additional information.
- * - Removed call to check_region(), and used
- * return value of request_region() instead.
+ * Auto-Neg fallback.
+ *
+ * v1.6 April 04, 2000 - Fixed driver support for kernel-parameters.
+ * Haven't tested it though, as the kernel support
+ * is currently broken (2.3.99p4p3).
+ * - Updated tlan.txt accordingly.
+ * - Adjusted minimum/maximum frame length.
+ * - There is now a TLAN website up at
+ * http://hp.sourceforge.net/
+ *
+ * v1.7 April 07, 2000 - Started to implement custom ioctls. Driver now
+ * reports PHY information when used with Donald
+ * Beckers userspace MII diagnostics utility.
+ *
+ * v1.8 April 23, 2000 - Fixed support for forced speed/duplex settings.
+ * - Added link information to Auto-Neg and forced
+ * modes. When NIC operates with auto-neg the driver
+ * will report Link speed & duplex modes as well as
+ * link partner abilities. When forced link is used,
+ * the driver will report status of the established
+ * link.
+ * Please read tlan.txt for additional information.
+ * - Removed call to check_region(), and used
+ * return value of request_region() instead.
*
* v1.8a May 28, 2000 - Minor updates.
*
* v1.9 July 25, 2000 - Fixed a few remaining Full-Duplex issues.
- * - Updated with timer fixes from Andrew Morton.
- * - Fixed module race in TLan_Open.
- * - Added routine to monitor PHY status.
- * - Added activity led support for Proliant devices.
+ * - Updated with timer fixes from Andrew Morton.
+ * - Fixed module race in TLan_Open.
+ * - Added routine to monitor PHY status.
+ * - Added activity led support for Proliant devices.
*
* v1.10 Aug 30, 2000 - Added support for EISA based tlan controllers
* like the Compaq NetFlex3/E.
@@ -111,8 +111,8 @@
* hardware probe is done with kernel API and
* TLan_EisaProbe.
* - Adjusted debug information for probing.
- * - Fixed bug that would cause general debug information
- * to be printed after driver removal.
+ * - Fixed bug that would cause general debug
+ * information to be printed after driver removal.
* - Added transmit timeout handling.
* - Fixed OOM return values in tlan_probe.
* - Fixed possible mem leak in tlan_exit
@@ -136,8 +136,8 @@
*
* v1.12 Oct 12, 2000 - Minor fixes (memleak, init, etc.)
*
- * v1.13 Nov 28, 2000 - Stop flooding console with auto-neg issues
- * when link can't be established.
+ * v1.13 Nov 28, 2000 - Stop flooding console with auto-neg issues
+ * when link can't be established.
* - Added the bbuf option as a kernel parameter.
* - Fixed ioaddr probe bug.
* - Fixed stupid deadlock with MII interrupts.
@@ -147,28 +147,30 @@
* TLAN v1.0 silicon. This needs to be investigated
* further.
*
- * v1.14 Dec 16, 2000 - Added support for servicing multiple frames per.
- * interrupt. Thanks goes to
- * Adam Keys <adam@ti.com>
- * Denis Beaudoin <dbeaudoin@ti.com>
- * for providing the patch.
- * - Fixed auto-neg output when using multiple
- * adapters.
- * - Converted to use new taskq interface.
+ * v1.14 Dec 16, 2000 - Added support for servicing multiple frames per.
+ * interrupt. Thanks goes to
+ * Adam Keys <adam@ti.com>
+ * Denis Beaudoin <dbeaudoin@ti.com>
+ * for providing the patch.
+ * - Fixed auto-neg output when using multiple
+ * adapters.
+ * - Converted to use new taskq interface.
*
- * v1.14a Jan 6, 2001 - Minor adjustments (spinlocks, etc.)
+ * v1.14a Jan 6, 2001 - Minor adjustments (spinlocks, etc.)
*
* Samuel Chessman <chessman@tux.org> New Maintainer!
*
* v1.15 Apr 4, 2002 - Correct operation when aui=1 to be
- * 10T half duplex no loopback
- * Thanks to Gunnar Eikman
+ * 10T half duplex no loopback
+ * Thanks to Gunnar Eikman
*
* Sakari Ailus <sakari.ailus@iki.fi>:
*
* v1.15a Dec 15 2008 - Remove bbuf support, it doesn't work anyway.
+ * v1.16 Jan 6 2011 - Make checkpatch.pl happy.
+ * v1.17 Jan 6 2011 - Add suspend/resume support.
*
- *******************************************************************************/
+ ******************************************************************************/
#include <linux/module.h>
#include <linux/init.h>
@@ -185,13 +187,11 @@
#include "tlan.h"
-typedef u32 (TLanIntVectorFunc)( struct net_device *, u16 );
-
/* For removing EISA devices */
-static struct net_device *TLan_Eisa_Devices;
+static struct net_device *tlan_eisa_devices;
-static int TLanDevicesInstalled;
+static int tlan_devices_installed;
/* Set speed, duplex and aui settings */
static int aui[MAX_TLAN_BOARDS];
@@ -202,7 +202,8 @@ module_param_array(aui, int, NULL, 0);
module_param_array(duplex, int, NULL, 0);
module_param_array(speed, int, NULL, 0);
MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)");
-MODULE_PARM_DESC(duplex, "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
+MODULE_PARM_DESC(duplex,
+ "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
MODULE_PARM_DESC(speed, "ThunderLAN port speen setting(s) (0,10,100)");
MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>");
@@ -218,139 +219,144 @@ static int debug;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "ThunderLAN debug mask");
-static const char TLanSignature[] = "TLAN";
-static const char tlan_banner[] = "ThunderLAN driver v1.15a\n";
+static const char tlan_signature[] = "TLAN";
+static const char tlan_banner[] = "ThunderLAN driver v1.17\n";
static int tlan_have_pci;
static int tlan_have_eisa;
-static const char *media[] = {
- "10BaseT-HD ", "10BaseT-FD ","100baseTx-HD ",
- "100baseTx-FD", "100baseT4", NULL
+static const char * const media[] = {
+ "10BaseT-HD", "10BaseT-FD", "100baseTx-HD",
+ "100BaseTx-FD", "100BaseT4", NULL
};
static struct board {
- const char *deviceLabel;
- u32 flags;
- u16 addrOfs;
+ const char *device_label;
+ u32 flags;
+ u16 addr_ofs;
} board_info[] = {
{ "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
- { "Compaq Netelligent 10/100 TX PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
+ { "Compaq Netelligent 10/100 TX PCI UTP",
+ TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
{ "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
{ "Compaq NetFlex-3/P",
TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
{ "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
{ "Compaq Netelligent Integrated 10/100 TX UTP",
TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
- { "Compaq Netelligent Dual 10/100 TX PCI UTP", TLAN_ADAPTER_NONE, 0x83 },
- { "Compaq Netelligent 10/100 TX Embedded UTP", TLAN_ADAPTER_NONE, 0x83 },
+ { "Compaq Netelligent Dual 10/100 TX PCI UTP",
+ TLAN_ADAPTER_NONE, 0x83 },
+ { "Compaq Netelligent 10/100 TX Embedded UTP",
+ TLAN_ADAPTER_NONE, 0x83 },
{ "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 },
- { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xF8 },
- { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xF8 },
+ { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xf8 },
+ { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xf8 },
{ "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
- { "Compaq Netelligent 10 T/2 PCI UTP/Coax", TLAN_ADAPTER_NONE, 0x83 },
+ { "Compaq Netelligent 10 T/2 PCI UTP/coax", TLAN_ADAPTER_NONE, 0x83 },
{ "Compaq NetFlex-3/E",
- TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */
+ TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */
TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
- { "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
+ { "Compaq NetFlex-3/E",
+ TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
};
static DEFINE_PCI_DEVICE_TABLE(tlan_pci_tbl) = {
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3I,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_THUNDER,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3B,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100PI,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100D,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100I,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
{ PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2183,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
{ PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2325,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
{ PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2326,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_T2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
{ 0,}
};
MODULE_DEVICE_TABLE(pci, tlan_pci_tbl);
-static void TLan_EisaProbe( void );
-static void TLan_Eisa_Cleanup( void );
-static int TLan_Init( struct net_device * );
-static int TLan_Open( struct net_device *dev );
-static netdev_tx_t TLan_StartTx( struct sk_buff *, struct net_device *);
-static irqreturn_t TLan_HandleInterrupt( int, void *);
-static int TLan_Close( struct net_device *);
-static struct net_device_stats *TLan_GetStats( struct net_device *);
-static void TLan_SetMulticastList( struct net_device *);
-static int TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd);
-static int TLan_probe1( struct pci_dev *pdev, long ioaddr,
- int irq, int rev, const struct pci_device_id *ent);
-static void TLan_tx_timeout( struct net_device *dev);
-static void TLan_tx_timeout_work(struct work_struct *work);
-static int tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent);
-
-static u32 TLan_HandleTxEOF( struct net_device *, u16 );
-static u32 TLan_HandleStatOverflow( struct net_device *, u16 );
-static u32 TLan_HandleRxEOF( struct net_device *, u16 );
-static u32 TLan_HandleDummy( struct net_device *, u16 );
-static u32 TLan_HandleTxEOC( struct net_device *, u16 );
-static u32 TLan_HandleStatusCheck( struct net_device *, u16 );
-static u32 TLan_HandleRxEOC( struct net_device *, u16 );
-
-static void TLan_Timer( unsigned long );
-
-static void TLan_ResetLists( struct net_device * );
-static void TLan_FreeLists( struct net_device * );
-static void TLan_PrintDio( u16 );
-static void TLan_PrintList( TLanList *, char *, int );
-static void TLan_ReadAndClearStats( struct net_device *, int );
-static void TLan_ResetAdapter( struct net_device * );
-static void TLan_FinishReset( struct net_device * );
-static void TLan_SetMac( struct net_device *, int areg, char *mac );
-
-static void TLan_PhyPrint( struct net_device * );
-static void TLan_PhyDetect( struct net_device * );
-static void TLan_PhyPowerDown( struct net_device * );
-static void TLan_PhyPowerUp( struct net_device * );
-static void TLan_PhyReset( struct net_device * );
-static void TLan_PhyStartLink( struct net_device * );
-static void TLan_PhyFinishAutoNeg( struct net_device * );
+static void tlan_eisa_probe(void);
+static void tlan_eisa_cleanup(void);
+static int tlan_init(struct net_device *);
+static int tlan_open(struct net_device *dev);
+static netdev_tx_t tlan_start_tx(struct sk_buff *, struct net_device *);
+static irqreturn_t tlan_handle_interrupt(int, void *);
+static int tlan_close(struct net_device *);
+static struct net_device_stats *tlan_get_stats(struct net_device *);
+static void tlan_set_multicast_list(struct net_device *);
+static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int tlan_probe1(struct pci_dev *pdev, long ioaddr,
+ int irq, int rev, const struct pci_device_id *ent);
+static void tlan_tx_timeout(struct net_device *dev);
+static void tlan_tx_timeout_work(struct work_struct *work);
+static int tlan_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+
+static u32 tlan_handle_tx_eof(struct net_device *, u16);
+static u32 tlan_handle_stat_overflow(struct net_device *, u16);
+static u32 tlan_handle_rx_eof(struct net_device *, u16);
+static u32 tlan_handle_dummy(struct net_device *, u16);
+static u32 tlan_handle_tx_eoc(struct net_device *, u16);
+static u32 tlan_handle_status_check(struct net_device *, u16);
+static u32 tlan_handle_rx_eoc(struct net_device *, u16);
+
+static void tlan_timer(unsigned long);
+
+static void tlan_reset_lists(struct net_device *);
+static void tlan_free_lists(struct net_device *);
+static void tlan_print_dio(u16);
+static void tlan_print_list(struct tlan_list *, char *, int);
+static void tlan_read_and_clear_stats(struct net_device *, int);
+static void tlan_reset_adapter(struct net_device *);
+static void tlan_finish_reset(struct net_device *);
+static void tlan_set_mac(struct net_device *, int areg, char *mac);
+
+static void tlan_phy_print(struct net_device *);
+static void tlan_phy_detect(struct net_device *);
+static void tlan_phy_power_down(struct net_device *);
+static void tlan_phy_power_up(struct net_device *);
+static void tlan_phy_reset(struct net_device *);
+static void tlan_phy_start_link(struct net_device *);
+static void tlan_phy_finish_auto_neg(struct net_device *);
#ifdef MONITOR
-static void TLan_PhyMonitor( struct net_device * );
+static void tlan_phy_monitor(struct net_device *);
#endif
/*
-static int TLan_PhyNop( struct net_device * );
-static int TLan_PhyInternalCheck( struct net_device * );
-static int TLan_PhyInternalService( struct net_device * );
-static int TLan_PhyDp83840aCheck( struct net_device * );
+ static int tlan_phy_nop(struct net_device *);
+ static int tlan_phy_internal_check(struct net_device *);
+ static int tlan_phy_internal_service(struct net_device *);
+ static int tlan_phy_dp83840a_check(struct net_device *);
*/
-static bool TLan_MiiReadReg( struct net_device *, u16, u16, u16 * );
-static void TLan_MiiSendData( u16, u32, unsigned );
-static void TLan_MiiSync( u16 );
-static void TLan_MiiWriteReg( struct net_device *, u16, u16, u16 );
+static bool tlan_mii_read_reg(struct net_device *, u16, u16, u16 *);
+static void tlan_mii_send_data(u16, u32, unsigned);
+static void tlan_mii_sync(u16);
+static void tlan_mii_write_reg(struct net_device *, u16, u16, u16);
-static void TLan_EeSendStart( u16 );
-static int TLan_EeSendByte( u16, u8, int );
-static void TLan_EeReceiveByte( u16, u8 *, int );
-static int TLan_EeReadByte( struct net_device *, u8, u8 * );
+static void tlan_ee_send_start(u16);
+static int tlan_ee_send_byte(u16, u8, int);
+static void tlan_ee_receive_byte(u16, u8 *, int);
+static int tlan_ee_read_byte(struct net_device *, u8, u8 *);
static inline void
-TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb)
+tlan_store_skb(struct tlan_list *tag, struct sk_buff *skb)
{
unsigned long addr = (unsigned long)skb;
tag->buffer[9].address = addr;
@@ -358,7 +364,7 @@ TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb)
}
static inline struct sk_buff *
-TLan_GetSKB( const struct tlan_list_tag *tag)
+tlan_get_skb(const struct tlan_list *tag)
{
unsigned long addr;
@@ -367,50 +373,50 @@ TLan_GetSKB( const struct tlan_list_tag *tag)
return (struct sk_buff *) addr;
}
-
-static TLanIntVectorFunc *TLanIntVector[TLAN_INT_NUMBER_OF_INTS] = {
+static u32
+(*tlan_int_vector[TLAN_INT_NUMBER_OF_INTS])(struct net_device *, u16) = {
NULL,
- TLan_HandleTxEOF,
- TLan_HandleStatOverflow,
- TLan_HandleRxEOF,
- TLan_HandleDummy,
- TLan_HandleTxEOC,
- TLan_HandleStatusCheck,
- TLan_HandleRxEOC
+ tlan_handle_tx_eof,
+ tlan_handle_stat_overflow,
+ tlan_handle_rx_eof,
+ tlan_handle_dummy,
+ tlan_handle_tx_eoc,
+ tlan_handle_status_check,
+ tlan_handle_rx_eoc
};
static inline void
-TLan_SetTimer( struct net_device *dev, u32 ticks, u32 type )
+tlan_set_timer(struct net_device *dev, u32 ticks, u32 type)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
unsigned long flags = 0;
if (!in_irq())
spin_lock_irqsave(&priv->lock, flags);
- if ( priv->timer.function != NULL &&
- priv->timerType != TLAN_TIMER_ACTIVITY ) {
+ if (priv->timer.function != NULL &&
+ priv->timer_type != TLAN_TIMER_ACTIVITY) {
if (!in_irq())
spin_unlock_irqrestore(&priv->lock, flags);
return;
}
- priv->timer.function = TLan_Timer;
+ priv->timer.function = tlan_timer;
if (!in_irq())
spin_unlock_irqrestore(&priv->lock, flags);
priv->timer.data = (unsigned long) dev;
- priv->timerSetAt = jiffies;
- priv->timerType = type;
+ priv->timer_set_at = jiffies;
+ priv->timer_type = type;
mod_timer(&priv->timer, jiffies + ticks);
-} /* TLan_SetTimer */
+}
/*****************************************************************************
******************************************************************************
- ThunderLAN Driver Primary Functions
+ThunderLAN driver primary functions
- These functions are more or less common to all Linux network drivers.
+these functions are more or less common to all linux network drivers.
******************************************************************************
*****************************************************************************/
@@ -419,49 +425,117 @@ TLan_SetTimer( struct net_device *dev, u32 ticks, u32 type )
- /***************************************************************
- * tlan_remove_one
- *
- * Returns:
- * Nothing
- * Parms:
- * None
- *
- * Goes through the TLanDevices list and frees the device
- * structs and memory associated with each device (lists
- * and buffers). It also ureserves the IO port regions
- * associated with this device.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_remove_one
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * None
+ *
+ * Goes through the TLanDevices list and frees the device
+ * structs and memory associated with each device (lists
+ * and buffers). It also ureserves the IO port regions
+ * associated with this device.
+ *
+ **************************************************************/
-static void __devexit tlan_remove_one( struct pci_dev *pdev)
+static void __devexit tlan_remove_one(struct pci_dev *pdev)
{
- struct net_device *dev = pci_get_drvdata( pdev );
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct tlan_priv *priv = netdev_priv(dev);
- unregister_netdev( dev );
+ unregister_netdev(dev);
- if ( priv->dmaStorage ) {
- pci_free_consistent(priv->pciDev,
- priv->dmaSize, priv->dmaStorage,
- priv->dmaStorageDMA );
+ if (priv->dma_storage) {
+ pci_free_consistent(priv->pci_dev,
+ priv->dma_size, priv->dma_storage,
+ priv->dma_storage_dma);
}
#ifdef CONFIG_PCI
pci_release_regions(pdev);
#endif
- free_netdev( dev );
+ free_netdev(dev);
+
+ pci_set_drvdata(pdev, NULL);
+}
+
+static void tlan_start(struct net_device *dev)
+{
+ tlan_reset_lists(dev);
+ /* NOTE: It might not be necessary to read the stats before a
+ reset if you don't care what the values are.
+ */
+ tlan_read_and_clear_stats(dev, TLAN_IGNORE);
+ tlan_reset_adapter(dev);
+ netif_wake_queue(dev);
+}
+
+static void tlan_stop(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+
+ tlan_read_and_clear_stats(dev, TLAN_RECORD);
+ outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
+ /* Reset and power down phy */
+ tlan_reset_adapter(dev);
+ if (priv->timer.function != NULL) {
+ del_timer_sync(&priv->timer);
+ priv->timer.function = NULL;
+ }
+}
+
+#ifdef CONFIG_PM
+
+static int tlan_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (netif_running(dev))
+ tlan_stop(dev);
+
+ netif_device_detach(dev);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_wake_from_d3(pdev, false);
+ pci_set_power_state(pdev, PCI_D3hot);
- pci_set_drvdata( pdev, NULL );
+ return 0;
}
+static int tlan_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_enable_wake(pdev, 0, 0);
+ netif_device_attach(dev);
+
+ if (netif_running(dev))
+ tlan_start(dev);
+
+ return 0;
+}
+
+#else /* CONFIG_PM */
+
+#define tlan_suspend NULL
+#define tlan_resume NULL
+
+#endif /* CONFIG_PM */
+
+
static struct pci_driver tlan_driver = {
.name = "tlan",
.id_table = tlan_pci_tbl,
.probe = tlan_init_one,
.remove = __devexit_p(tlan_remove_one),
+ .suspend = tlan_suspend,
+ .resume = tlan_resume,
};
static int __init tlan_probe(void)
@@ -482,13 +556,13 @@ static int __init tlan_probe(void)
}
TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n");
- TLan_EisaProbe();
+ tlan_eisa_probe();
printk(KERN_INFO "TLAN: %d device%s installed, PCI: %d EISA: %d\n",
- TLanDevicesInstalled, TLanDevicesInstalled == 1 ? "" : "s",
- tlan_have_pci, tlan_have_eisa);
+ tlan_devices_installed, tlan_devices_installed == 1 ? "" : "s",
+ tlan_have_pci, tlan_have_eisa);
- if (TLanDevicesInstalled == 0) {
+ if (tlan_devices_installed == 0) {
rc = -ENODEV;
goto err_out_pci_unreg;
}
@@ -501,39 +575,39 @@ err_out_pci_free:
}
-static int __devinit tlan_init_one( struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int __devinit tlan_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
- return TLan_probe1( pdev, -1, -1, 0, ent);
+ return tlan_probe1(pdev, -1, -1, 0, ent);
}
/*
- ***************************************************************
- * tlan_probe1
- *
- * Returns:
- * 0 on success, error code on error
- * Parms:
- * none
- *
- * The name is lower case to fit in with all the rest of
- * the netcard_probe names. This function looks for
- * another TLan based adapter, setting it up with the
- * allocated device struct if one is found.
- * tlan_probe has been ported to the new net API and
- * now allocates its own device structure. This function
- * is also used by modules.
- *
- **************************************************************/
-
-static int __devinit TLan_probe1(struct pci_dev *pdev,
+***************************************************************
+* tlan_probe1
+*
+* Returns:
+* 0 on success, error code on error
+* Parms:
+* none
+*
+* The name is lower case to fit in with all the rest of
+* the netcard_probe names. This function looks for
+* another TLan based adapter, setting it up with the
+* allocated device struct if one is found.
+* tlan_probe has been ported to the new net API and
+* now allocates its own device structure. This function
+* is also used by modules.
+*
+**************************************************************/
+
+static int __devinit tlan_probe1(struct pci_dev *pdev,
long ioaddr, int irq, int rev,
- const struct pci_device_id *ent )
+ const struct pci_device_id *ent)
{
struct net_device *dev;
- TLanPrivateInfo *priv;
+ struct tlan_priv *priv;
u16 device_id;
int reg, rc = -ENODEV;
@@ -543,7 +617,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
if (rc)
return rc;
- rc = pci_request_regions(pdev, TLanSignature);
+ rc = pci_request_regions(pdev, tlan_signature);
if (rc) {
printk(KERN_ERR "TLAN: Could not reserve IO regions\n");
goto err_out;
@@ -551,7 +625,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
}
#endif /* CONFIG_PCI */
- dev = alloc_etherdev(sizeof(TLanPrivateInfo));
+ dev = alloc_etherdev(sizeof(struct tlan_priv));
if (dev == NULL) {
printk(KERN_ERR "TLAN: Could not allocate memory for device.\n");
rc = -ENOMEM;
@@ -561,26 +635,28 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
priv = netdev_priv(dev);
- priv->pciDev = pdev;
+ priv->pci_dev = pdev;
priv->dev = dev;
/* Is this a PCI device? */
if (pdev) {
- u32 pci_io_base = 0;
+ u32 pci_io_base = 0;
priv->adapter = &board_info[ent->driver_data];
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- printk(KERN_ERR "TLAN: No suitable PCI mapping available.\n");
+ printk(KERN_ERR
+ "TLAN: No suitable PCI mapping available.\n");
goto err_out_free_dev;
}
- for ( reg= 0; reg <= 5; reg ++ ) {
+ for (reg = 0; reg <= 5; reg++) {
if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) {
pci_io_base = pci_resource_start(pdev, reg);
- TLAN_DBG( TLAN_DEBUG_GNRL, "IO mapping is available at %x.\n",
- pci_io_base);
+ TLAN_DBG(TLAN_DEBUG_GNRL,
+ "IO mapping is available at %x.\n",
+ pci_io_base);
break;
}
}
@@ -592,7 +668,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
dev->base_addr = pci_io_base;
dev->irq = pdev->irq;
- priv->adapterRev = pdev->revision;
+ priv->adapter_rev = pdev->revision;
pci_set_master(pdev);
pci_set_drvdata(pdev, dev);
@@ -602,11 +678,11 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
device_id = inw(ioaddr + EISA_ID2);
priv->is_eisa = 1;
if (device_id == 0x20F1) {
- priv->adapter = &board_info[13]; /* NetFlex-3/E */
- priv->adapterRev = 23; /* TLAN 2.3 */
+ priv->adapter = &board_info[13]; /* NetFlex-3/E */
+ priv->adapter_rev = 23; /* TLAN 2.3 */
} else {
priv->adapter = &board_info[14];
- priv->adapterRev = 10; /* TLAN 1.0 */
+ priv->adapter_rev = 10; /* TLAN 1.0 */
}
dev->base_addr = ioaddr;
dev->irq = irq;
@@ -620,11 +696,11 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0
: (dev->mem_start & 0x18) >> 3;
- if (priv->speed == 0x1) {
+ if (priv->speed == 0x1)
priv->speed = TLAN_SPEED_10;
- } else if (priv->speed == 0x2) {
+ else if (priv->speed == 0x2)
priv->speed = TLAN_SPEED_100;
- }
+
debug = priv->debug = dev->mem_end;
} else {
priv->aui = aui[boards_found];
@@ -635,11 +711,11 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
/* This will be used when we get an adapter error from
* within our irq handler */
- INIT_WORK(&priv->tlan_tqueue, TLan_tx_timeout_work);
+ INIT_WORK(&priv->tlan_tqueue, tlan_tx_timeout_work);
spin_lock_init(&priv->lock);
- rc = TLan_Init(dev);
+ rc = tlan_init(dev);
if (rc) {
printk(KERN_ERR "TLAN: Could not set up device.\n");
goto err_out_free_dev;
@@ -652,29 +728,29 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
}
- TLanDevicesInstalled++;
+ tlan_devices_installed++;
boards_found++;
/* pdev is NULL if this is an EISA device */
if (pdev)
tlan_have_pci++;
else {
- priv->nextDevice = TLan_Eisa_Devices;
- TLan_Eisa_Devices = dev;
+ priv->next_device = tlan_eisa_devices;
+ tlan_eisa_devices = dev;
tlan_have_eisa++;
}
printk(KERN_INFO "TLAN: %s irq=%2d, io=%04x, %s, Rev. %d\n",
- dev->name,
- (int) dev->irq,
- (int) dev->base_addr,
- priv->adapter->deviceLabel,
- priv->adapterRev);
+ dev->name,
+ (int) dev->irq,
+ (int) dev->base_addr,
+ priv->adapter->device_label,
+ priv->adapter_rev);
return 0;
err_out_uninit:
- pci_free_consistent(priv->pciDev, priv->dmaSize, priv->dmaStorage,
- priv->dmaStorageDMA );
+ pci_free_consistent(priv->pci_dev, priv->dma_size, priv->dma_storage,
+ priv->dma_storage_dma);
err_out_free_dev:
free_netdev(dev);
err_out_regions:
@@ -689,22 +765,23 @@ err_out:
}
-static void TLan_Eisa_Cleanup(void)
+static void tlan_eisa_cleanup(void)
{
struct net_device *dev;
- TLanPrivateInfo *priv;
+ struct tlan_priv *priv;
- while( tlan_have_eisa ) {
- dev = TLan_Eisa_Devices;
+ while (tlan_have_eisa) {
+ dev = tlan_eisa_devices;
priv = netdev_priv(dev);
- if (priv->dmaStorage) {
- pci_free_consistent(priv->pciDev, priv->dmaSize,
- priv->dmaStorage, priv->dmaStorageDMA );
+ if (priv->dma_storage) {
+ pci_free_consistent(priv->pci_dev, priv->dma_size,
+ priv->dma_storage,
+ priv->dma_storage_dma);
}
- release_region( dev->base_addr, 0x10);
- unregister_netdev( dev );
- TLan_Eisa_Devices = priv->nextDevice;
- free_netdev( dev );
+ release_region(dev->base_addr, 0x10);
+ unregister_netdev(dev);
+ tlan_eisa_devices = priv->next_device;
+ free_netdev(dev);
tlan_have_eisa--;
}
}
@@ -715,7 +792,7 @@ static void __exit tlan_exit(void)
pci_unregister_driver(&tlan_driver);
if (tlan_have_eisa)
- TLan_Eisa_Cleanup();
+ tlan_eisa_cleanup();
}
@@ -726,24 +803,24 @@ module_exit(tlan_exit);
- /**************************************************************
- * TLan_EisaProbe
- *
- * Returns: 0 on success, 1 otherwise
- *
- * Parms: None
- *
- *
- * This functions probes for EISA devices and calls
- * TLan_probe1 when one is found.
- *
- *************************************************************/
+/**************************************************************
+ * tlan_eisa_probe
+ *
+ * Returns: 0 on success, 1 otherwise
+ *
+ * Parms: None
+ *
+ *
+ * This functions probes for EISA devices and calls
+ * TLan_probe1 when one is found.
+ *
+ *************************************************************/
-static void __init TLan_EisaProbe (void)
+static void __init tlan_eisa_probe(void)
{
- long ioaddr;
- int rc = -ENODEV;
- int irq;
+ long ioaddr;
+ int rc = -ENODEV;
+ int irq;
u16 device_id;
if (!EISA_bus) {
@@ -754,15 +831,16 @@ static void __init TLan_EisaProbe (void)
/* Loop through all slots of the EISA bus */
for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
- TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n",
- (int) ioaddr + 0xC80, inw(ioaddr + EISA_ID));
- TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n",
- (int) ioaddr + 0xC82, inw(ioaddr + EISA_ID2));
+ TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
+ (int) ioaddr + 0xc80, inw(ioaddr + EISA_ID));
+ TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
+ (int) ioaddr + 0xc82, inw(ioaddr + EISA_ID2));
- TLAN_DBG(TLAN_DEBUG_PROBE, "Probing for EISA adapter at IO: 0x%4x : ",
- (int) ioaddr);
- if (request_region(ioaddr, 0x10, TLanSignature) == NULL)
+ TLAN_DBG(TLAN_DEBUG_PROBE,
+ "Probing for EISA adapter at IO: 0x%4x : ",
+ (int) ioaddr);
+ if (request_region(ioaddr, 0x10, tlan_signature) == NULL)
goto out;
if (inw(ioaddr + EISA_ID) != 0x110E) {
@@ -772,326 +850,326 @@ static void __init TLan_EisaProbe (void)
device_id = inw(ioaddr + EISA_ID2);
if (device_id != 0x20F1 && device_id != 0x40F1) {
- release_region (ioaddr, 0x10);
+ release_region(ioaddr, 0x10);
goto out;
}
- if (inb(ioaddr + EISA_CR) != 0x1) { /* Check if adapter is enabled */
- release_region (ioaddr, 0x10);
+ /* check if adapter is enabled */
+ if (inb(ioaddr + EISA_CR) != 0x1) {
+ release_region(ioaddr, 0x10);
goto out2;
}
if (debug == 0x10)
- printk("Found one\n");
+ printk(KERN_INFO "Found one\n");
/* Get irq from board */
- switch (inb(ioaddr + 0xCC0)) {
- case(0x10):
- irq=5;
- break;
- case(0x20):
- irq=9;
- break;
- case(0x40):
- irq=10;
- break;
- case(0x80):
- irq=11;
- break;
- default:
- goto out;
+ switch (inb(ioaddr + 0xcc0)) {
+ case(0x10):
+ irq = 5;
+ break;
+ case(0x20):
+ irq = 9;
+ break;
+ case(0x40):
+ irq = 10;
+ break;
+ case(0x80):
+ irq = 11;
+ break;
+ default:
+ goto out;
}
/* Setup the newly found eisa adapter */
- rc = TLan_probe1( NULL, ioaddr, irq,
- 12, NULL);
+ rc = tlan_probe1(NULL, ioaddr, irq,
+ 12, NULL);
continue;
- out:
- if (debug == 0x10)
- printk("None found\n");
- continue;
+out:
+ if (debug == 0x10)
+ printk(KERN_INFO "None found\n");
+ continue;
- out2: if (debug == 0x10)
- printk("Card found but it is not enabled, skipping\n");
- continue;
+out2:
+ if (debug == 0x10)
+ printk(KERN_INFO "Card found but it is not enabled, skipping\n");
+ continue;
}
-} /* TLan_EisaProbe */
+}
#ifdef CONFIG_NET_POLL_CONTROLLER
-static void TLan_Poll(struct net_device *dev)
+static void tlan_poll(struct net_device *dev)
{
disable_irq(dev->irq);
- TLan_HandleInterrupt(dev->irq, dev);
+ tlan_handle_interrupt(dev->irq, dev);
enable_irq(dev->irq);
}
#endif
-static const struct net_device_ops TLan_netdev_ops = {
- .ndo_open = TLan_Open,
- .ndo_stop = TLan_Close,
- .ndo_start_xmit = TLan_StartTx,
- .ndo_tx_timeout = TLan_tx_timeout,
- .ndo_get_stats = TLan_GetStats,
- .ndo_set_multicast_list = TLan_SetMulticastList,
- .ndo_do_ioctl = TLan_ioctl,
+static const struct net_device_ops tlan_netdev_ops = {
+ .ndo_open = tlan_open,
+ .ndo_stop = tlan_close,
+ .ndo_start_xmit = tlan_start_tx,
+ .ndo_tx_timeout = tlan_tx_timeout,
+ .ndo_get_stats = tlan_get_stats,
+ .ndo_set_multicast_list = tlan_set_multicast_list,
+ .ndo_do_ioctl = tlan_ioctl,
.ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = TLan_Poll,
+ .ndo_poll_controller = tlan_poll,
#endif
};
- /***************************************************************
- * TLan_Init
- *
- * Returns:
- * 0 on success, error code otherwise.
- * Parms:
- * dev The structure of the device to be
- * init'ed.
- *
- * This function completes the initialization of the
- * device structure and driver. It reserves the IO
- * addresses, allocates memory for the lists and bounce
- * buffers, retrieves the MAC address from the eeprom
- * and assignes the device's methods.
- *
- **************************************************************/
-
-static int TLan_Init( struct net_device *dev )
+/***************************************************************
+ * tlan_init
+ *
+ * Returns:
+ * 0 on success, error code otherwise.
+ * Parms:
+ * dev The structure of the device to be
+ * init'ed.
+ *
+ * This function completes the initialization of the
+ * device structure and driver. It reserves the IO
+ * addresses, allocates memory for the lists and bounce
+ * buffers, retrieves the MAC address from the eeprom
+ * and assignes the device's methods.
+ *
+ **************************************************************/
+
+static int tlan_init(struct net_device *dev)
{
int dma_size;
- int err;
+ int err;
int i;
- TLanPrivateInfo *priv;
+ struct tlan_priv *priv;
priv = netdev_priv(dev);
- dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS )
- * ( sizeof(TLanList) );
- priv->dmaStorage = pci_alloc_consistent(priv->pciDev,
- dma_size, &priv->dmaStorageDMA);
- priv->dmaSize = dma_size;
-
- if ( priv->dmaStorage == NULL ) {
- printk(KERN_ERR "TLAN: Could not allocate lists and buffers for %s.\n",
- dev->name );
+ dma_size = (TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS)
+ * (sizeof(struct tlan_list));
+ priv->dma_storage = pci_alloc_consistent(priv->pci_dev,
+ dma_size,
+ &priv->dma_storage_dma);
+ priv->dma_size = dma_size;
+
+ if (priv->dma_storage == NULL) {
+ printk(KERN_ERR
+ "TLAN: Could not allocate lists and buffers for %s.\n",
+ dev->name);
return -ENOMEM;
}
- memset( priv->dmaStorage, 0, dma_size );
- priv->rxList = (TLanList *) ALIGN((unsigned long)priv->dmaStorage, 8);
- priv->rxListDMA = ALIGN(priv->dmaStorageDMA, 8);
- priv->txList = priv->rxList + TLAN_NUM_RX_LISTS;
- priv->txListDMA = priv->rxListDMA + sizeof(TLanList) * TLAN_NUM_RX_LISTS;
+ memset(priv->dma_storage, 0, dma_size);
+ priv->rx_list = (struct tlan_list *)
+ ALIGN((unsigned long)priv->dma_storage, 8);
+ priv->rx_list_dma = ALIGN(priv->dma_storage_dma, 8);
+ priv->tx_list = priv->rx_list + TLAN_NUM_RX_LISTS;
+ priv->tx_list_dma =
+ priv->rx_list_dma + sizeof(struct tlan_list)*TLAN_NUM_RX_LISTS;
err = 0;
- for ( i = 0; i < 6 ; i++ )
- err |= TLan_EeReadByte( dev,
- (u8) priv->adapter->addrOfs + i,
- (u8 *) &dev->dev_addr[i] );
- if ( err ) {
+ for (i = 0; i < 6 ; i++)
+ err |= tlan_ee_read_byte(dev,
+ (u8) priv->adapter->addr_ofs + i,
+ (u8 *) &dev->dev_addr[i]);
+ if (err) {
printk(KERN_ERR "TLAN: %s: Error reading MAC from eeprom: %d\n",
- dev->name,
- err );
+ dev->name,
+ err);
}
dev->addr_len = 6;
netif_carrier_off(dev);
/* Device methods */
- dev->netdev_ops = &TLan_netdev_ops;
+ dev->netdev_ops = &tlan_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
return 0;
-} /* TLan_Init */
+}
- /***************************************************************
- * TLan_Open
- *
- * Returns:
- * 0 on success, error code otherwise.
- * Parms:
- * dev Structure of device to be opened.
- *
- * This routine puts the driver and TLAN adapter in a
- * state where it is ready to send and receive packets.
- * It allocates the IRQ, resets and brings the adapter
- * out of reset, and allows interrupts. It also delays
- * the startup for autonegotiation or sends a Rx GO
- * command to the adapter, as appropriate.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_open
+ *
+ * Returns:
+ * 0 on success, error code otherwise.
+ * Parms:
+ * dev Structure of device to be opened.
+ *
+ * This routine puts the driver and TLAN adapter in a
+ * state where it is ready to send and receive packets.
+ * It allocates the IRQ, resets and brings the adapter
+ * out of reset, and allows interrupts. It also delays
+ * the startup for autonegotiation or sends a Rx GO
+ * command to the adapter, as appropriate.
+ *
+ **************************************************************/
-static int TLan_Open( struct net_device *dev )
+static int tlan_open(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
int err;
- priv->tlanRev = TLan_DioRead8( dev->base_addr, TLAN_DEF_REVISION );
- err = request_irq( dev->irq, TLan_HandleInterrupt, IRQF_SHARED,
- dev->name, dev );
+ priv->tlan_rev = tlan_dio_read8(dev->base_addr, TLAN_DEF_REVISION);
+ err = request_irq(dev->irq, tlan_handle_interrupt, IRQF_SHARED,
+ dev->name, dev);
- if ( err ) {
+ if (err) {
pr_err("TLAN: Cannot open %s because IRQ %d is already in use.\n",
- dev->name, dev->irq );
+ dev->name, dev->irq);
return err;
}
init_timer(&priv->timer);
- netif_start_queue(dev);
- /* NOTE: It might not be necessary to read the stats before a
- reset if you don't care what the values are.
- */
- TLan_ResetLists( dev );
- TLan_ReadAndClearStats( dev, TLAN_IGNORE );
- TLan_ResetAdapter( dev );
+ tlan_start(dev);
- TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n",
- dev->name, priv->tlanRev );
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n",
+ dev->name, priv->tlan_rev);
return 0;
-} /* TLan_Open */
+}
- /**************************************************************
- * TLan_ioctl
- *
- * Returns:
- * 0 on success, error code otherwise
- * Params:
- * dev structure of device to receive ioctl.
- *
- * rq ifreq structure to hold userspace data.
- *
- * cmd ioctl command.
- *
- *
- *************************************************************/
+/**************************************************************
+ * tlan_ioctl
+ *
+ * Returns:
+ * 0 on success, error code otherwise
+ * Params:
+ * dev structure of device to receive ioctl.
+ *
+ * rq ifreq structure to hold userspace data.
+ *
+ * cmd ioctl command.
+ *
+ *
+ *************************************************************/
-static int TLan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
struct mii_ioctl_data *data = if_mii(rq);
- u32 phy = priv->phy[priv->phyNum];
+ u32 phy = priv->phy[priv->phy_num];
- if (!priv->phyOnline)
+ if (!priv->phy_online)
return -EAGAIN;
- switch(cmd) {
- case SIOCGMIIPHY: /* Get address of MII PHY in use. */
- data->phy_id = phy;
+ switch (cmd) {
+ case SIOCGMIIPHY: /* get address of MII PHY in use. */
+ data->phy_id = phy;
- case SIOCGMIIREG: /* Read MII PHY register. */
- TLan_MiiReadReg(dev, data->phy_id & 0x1f,
- data->reg_num & 0x1f, &data->val_out);
- return 0;
+ case SIOCGMIIREG: /* read MII PHY register. */
+ tlan_mii_read_reg(dev, data->phy_id & 0x1f,
+ data->reg_num & 0x1f, &data->val_out);
+ return 0;
- case SIOCSMIIREG: /* Write MII PHY register. */
- TLan_MiiWriteReg(dev, data->phy_id & 0x1f,
- data->reg_num & 0x1f, data->val_in);
- return 0;
- default:
- return -EOPNOTSUPP;
+ case SIOCSMIIREG: /* write MII PHY register. */
+ tlan_mii_write_reg(dev, data->phy_id & 0x1f,
+ data->reg_num & 0x1f, data->val_in);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
}
-} /* tlan_ioctl */
+}
- /***************************************************************
- * TLan_tx_timeout
- *
- * Returns: nothing
- *
- * Params:
- * dev structure of device which timed out
- * during transmit.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_tx_timeout
+ *
+ * Returns: nothing
+ *
+ * Params:
+ * dev structure of device which timed out
+ * during transmit.
+ *
+ **************************************************************/
-static void TLan_tx_timeout(struct net_device *dev)
+static void tlan_tx_timeout(struct net_device *dev)
{
- TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
/* Ok so we timed out, lets see what we can do about it...*/
- TLan_FreeLists( dev );
- TLan_ResetLists( dev );
- TLan_ReadAndClearStats( dev, TLAN_IGNORE );
- TLan_ResetAdapter( dev );
+ tlan_free_lists(dev);
+ tlan_reset_lists(dev);
+ tlan_read_and_clear_stats(dev, TLAN_IGNORE);
+ tlan_reset_adapter(dev);
dev->trans_start = jiffies; /* prevent tx timeout */
- netif_wake_queue( dev );
+ netif_wake_queue(dev);
}
- /***************************************************************
- * TLan_tx_timeout_work
- *
- * Returns: nothing
- *
- * Params:
- * work work item of device which timed out
- *
- **************************************************************/
+/***************************************************************
+ * tlan_tx_timeout_work
+ *
+ * Returns: nothing
+ *
+ * Params:
+ * work work item of device which timed out
+ *
+ **************************************************************/
-static void TLan_tx_timeout_work(struct work_struct *work)
+static void tlan_tx_timeout_work(struct work_struct *work)
{
- TLanPrivateInfo *priv =
- container_of(work, TLanPrivateInfo, tlan_tqueue);
+ struct tlan_priv *priv =
+ container_of(work, struct tlan_priv, tlan_tqueue);
- TLan_tx_timeout(priv->dev);
+ tlan_tx_timeout(priv->dev);
}
- /***************************************************************
- * TLan_StartTx
- *
- * Returns:
- * 0 on success, non-zero on failure.
- * Parms:
- * skb A pointer to the sk_buff containing the
- * frame to be sent.
- * dev The device to send the data on.
- *
- * This function adds a frame to the Tx list to be sent
- * ASAP. First it verifies that the adapter is ready and
- * there is room in the queue. Then it sets up the next
- * available list, copies the frame to the corresponding
- * buffer. If the adapter Tx channel is idle, it gives
- * the adapter a Tx Go command on the list, otherwise it
- * sets the forward address of the previous list to point
- * to this one. Then it frees the sk_buff.
- *
- **************************************************************/
-
-static netdev_tx_t TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
+/***************************************************************
+ * tlan_start_tx
+ *
+ * Returns:
+ * 0 on success, non-zero on failure.
+ * Parms:
+ * skb A pointer to the sk_buff containing the
+ * frame to be sent.
+ * dev The device to send the data on.
+ *
+ * This function adds a frame to the Tx list to be sent
+ * ASAP. First it verifies that the adapter is ready and
+ * there is room in the queue. Then it sets up the next
+ * available list, copies the frame to the corresponding
+ * buffer. If the adapter Tx channel is idle, it gives
+ * the adapter a Tx Go command on the list, otherwise it
+ * sets the forward address of the previous list to point
+ * to this one. Then it frees the sk_buff.
+ *
+ **************************************************************/
+
+static netdev_tx_t tlan_start_tx(struct sk_buff *skb, struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
dma_addr_t tail_list_phys;
- TLanList *tail_list;
+ struct tlan_list *tail_list;
unsigned long flags;
unsigned int txlen;
- if ( ! priv->phyOnline ) {
- TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n",
- dev->name );
+ if (!priv->phy_online) {
+ TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n",
+ dev->name);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -1100,218 +1178,214 @@ static netdev_tx_t TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
return NETDEV_TX_OK;
txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE);
- tail_list = priv->txList + priv->txTail;
- tail_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txTail;
+ tail_list = priv->tx_list + priv->tx_tail;
+ tail_list_phys =
+ priv->tx_list_dma + sizeof(struct tlan_list)*priv->tx_tail;
- if ( tail_list->cStat != TLAN_CSTAT_UNUSED ) {
- TLAN_DBG( TLAN_DEBUG_TX,
- "TRANSMIT: %s is busy (Head=%d Tail=%d)\n",
- dev->name, priv->txHead, priv->txTail );
+ if (tail_list->c_stat != TLAN_CSTAT_UNUSED) {
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: %s is busy (Head=%d Tail=%d)\n",
+ dev->name, priv->tx_head, priv->tx_tail);
netif_stop_queue(dev);
- priv->txBusyCount++;
+ priv->tx_busy_count++;
return NETDEV_TX_BUSY;
}
tail_list->forward = 0;
- tail_list->buffer[0].address = pci_map_single(priv->pciDev,
+ tail_list->buffer[0].address = pci_map_single(priv->pci_dev,
skb->data, txlen,
PCI_DMA_TODEVICE);
- TLan_StoreSKB(tail_list, skb);
+ tlan_store_skb(tail_list, skb);
- tail_list->frameSize = (u16) txlen;
+ tail_list->frame_size = (u16) txlen;
tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen;
tail_list->buffer[1].count = 0;
tail_list->buffer[1].address = 0;
spin_lock_irqsave(&priv->lock, flags);
- tail_list->cStat = TLAN_CSTAT_READY;
- if ( ! priv->txInProgress ) {
- priv->txInProgress = 1;
- TLAN_DBG( TLAN_DEBUG_TX,
- "TRANSMIT: Starting TX on buffer %d\n", priv->txTail );
- outl( tail_list_phys, dev->base_addr + TLAN_CH_PARM );
- outl( TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD );
+ tail_list->c_stat = TLAN_CSTAT_READY;
+ if (!priv->tx_in_progress) {
+ priv->tx_in_progress = 1;
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: Starting TX on buffer %d\n",
+ priv->tx_tail);
+ outl(tail_list_phys, dev->base_addr + TLAN_CH_PARM);
+ outl(TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD);
} else {
- TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Adding buffer %d to TX channel\n",
- priv->txTail );
- if ( priv->txTail == 0 ) {
- ( priv->txList + ( TLAN_NUM_TX_LISTS - 1 ) )->forward
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: Adding buffer %d to TX channel\n",
+ priv->tx_tail);
+ if (priv->tx_tail == 0) {
+ (priv->tx_list + (TLAN_NUM_TX_LISTS - 1))->forward
= tail_list_phys;
} else {
- ( priv->txList + ( priv->txTail - 1 ) )->forward
+ (priv->tx_list + (priv->tx_tail - 1))->forward
= tail_list_phys;
}
}
spin_unlock_irqrestore(&priv->lock, flags);
- CIRC_INC( priv->txTail, TLAN_NUM_TX_LISTS );
+ CIRC_INC(priv->tx_tail, TLAN_NUM_TX_LISTS);
return NETDEV_TX_OK;
-} /* TLan_StartTx */
+}
- /***************************************************************
- * TLan_HandleInterrupt
- *
- * Returns:
- * Nothing
- * Parms:
- * irq The line on which the interrupt
- * occurred.
- * dev_id A pointer to the device assigned to
- * this irq line.
- *
- * This function handles an interrupt generated by its
- * assigned TLAN adapter. The function deactivates
- * interrupts on its adapter, records the type of
- * interrupt, executes the appropriate subhandler, and
- * acknowdges the interrupt to the adapter (thus
- * re-enabling adapter interrupts.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_handle_interrupt
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * irq The line on which the interrupt
+ * occurred.
+ * dev_id A pointer to the device assigned to
+ * this irq line.
+ *
+ * This function handles an interrupt generated by its
+ * assigned TLAN adapter. The function deactivates
+ * interrupts on its adapter, records the type of
+ * interrupt, executes the appropriate subhandler, and
+ * acknowdges the interrupt to the adapter (thus
+ * re-enabling adapter interrupts.
+ *
+ **************************************************************/
-static irqreturn_t TLan_HandleInterrupt(int irq, void *dev_id)
+static irqreturn_t tlan_handle_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 host_int;
u16 type;
spin_lock(&priv->lock);
- host_int = inw( dev->base_addr + TLAN_HOST_INT );
- type = ( host_int & TLAN_HI_IT_MASK ) >> 2;
- if ( type ) {
+ host_int = inw(dev->base_addr + TLAN_HOST_INT);
+ type = (host_int & TLAN_HI_IT_MASK) >> 2;
+ if (type) {
u32 ack;
u32 host_cmd;
- outw( host_int, dev->base_addr + TLAN_HOST_INT );
- ack = TLanIntVector[type]( dev, host_int );
+ outw(host_int, dev->base_addr + TLAN_HOST_INT);
+ ack = tlan_int_vector[type](dev, host_int);
- if ( ack ) {
- host_cmd = TLAN_HC_ACK | ack | ( type << 18 );
- outl( host_cmd, dev->base_addr + TLAN_HOST_CMD );
+ if (ack) {
+ host_cmd = TLAN_HC_ACK | ack | (type << 18);
+ outl(host_cmd, dev->base_addr + TLAN_HOST_CMD);
}
}
spin_unlock(&priv->lock);
return IRQ_RETVAL(type);
-} /* TLan_HandleInterrupts */
+}
- /***************************************************************
- * TLan_Close
- *
- * Returns:
- * An error code.
- * Parms:
- * dev The device structure of the device to
- * close.
- *
- * This function shuts down the adapter. It records any
- * stats, puts the adapter into reset state, deactivates
- * its time as needed, and frees the irq it is using.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_close
+ *
+ * Returns:
+ * An error code.
+ * Parms:
+ * dev The device structure of the device to
+ * close.
+ *
+ * This function shuts down the adapter. It records any
+ * stats, puts the adapter into reset state, deactivates
+ * its time as needed, and frees the irq it is using.
+ *
+ **************************************************************/
-static int TLan_Close(struct net_device *dev)
+static int tlan_close(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
- netif_stop_queue(dev);
priv->neg_be_verbose = 0;
+ tlan_stop(dev);
- TLan_ReadAndClearStats( dev, TLAN_RECORD );
- outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD );
- if ( priv->timer.function != NULL ) {
- del_timer_sync( &priv->timer );
- priv->timer.function = NULL;
- }
-
- free_irq( dev->irq, dev );
- TLan_FreeLists( dev );
- TLAN_DBG( TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name );
+ free_irq(dev->irq, dev);
+ tlan_free_lists(dev);
+ TLAN_DBG(TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name);
return 0;
-} /* TLan_Close */
+}
- /***************************************************************
- * TLan_GetStats
- *
- * Returns:
- * A pointer to the device's statistics structure.
- * Parms:
- * dev The device structure to return the
- * stats for.
- *
- * This function updates the devices statistics by reading
- * the TLAN chip's onboard registers. Then it returns the
- * address of the statistics structure.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_get_stats
+ *
+ * Returns:
+ * A pointer to the device's statistics structure.
+ * Parms:
+ * dev The device structure to return the
+ * stats for.
+ *
+ * This function updates the devices statistics by reading
+ * the TLAN chip's onboard registers. Then it returns the
+ * address of the statistics structure.
+ *
+ **************************************************************/
-static struct net_device_stats *TLan_GetStats( struct net_device *dev )
+static struct net_device_stats *tlan_get_stats(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
int i;
/* Should only read stats if open ? */
- TLan_ReadAndClearStats( dev, TLAN_RECORD );
+ tlan_read_and_clear_stats(dev, TLAN_RECORD);
- TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name,
- priv->rxEocCount );
- TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name,
- priv->txBusyCount );
- if ( debug & TLAN_DEBUG_GNRL ) {
- TLan_PrintDio( dev->base_addr );
- TLan_PhyPrint( dev );
+ TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name,
+ priv->rx_eoc_count);
+ TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name,
+ priv->tx_busy_count);
+ if (debug & TLAN_DEBUG_GNRL) {
+ tlan_print_dio(dev->base_addr);
+ tlan_phy_print(dev);
}
- if ( debug & TLAN_DEBUG_LIST ) {
- for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ )
- TLan_PrintList( priv->rxList + i, "RX", i );
- for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ )
- TLan_PrintList( priv->txList + i, "TX", i );
+ if (debug & TLAN_DEBUG_LIST) {
+ for (i = 0; i < TLAN_NUM_RX_LISTS; i++)
+ tlan_print_list(priv->rx_list + i, "RX", i);
+ for (i = 0; i < TLAN_NUM_TX_LISTS; i++)
+ tlan_print_list(priv->tx_list + i, "TX", i);
}
return &dev->stats;
-} /* TLan_GetStats */
+}
- /***************************************************************
- * TLan_SetMulticastList
- *
- * Returns:
- * Nothing
- * Parms:
- * dev The device structure to set the
- * multicast list for.
- *
- * This function sets the TLAN adaptor to various receive
- * modes. If the IFF_PROMISC flag is set, promiscuous
- * mode is acitviated. Otherwise, promiscuous mode is
- * turned off. If the IFF_ALLMULTI flag is set, then
- * the hash table is set to receive all group addresses.
- * Otherwise, the first three multicast addresses are
- * stored in AREG_1-3, and the rest are selected via the
- * hash table, as necessary.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_set_multicast_list
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev The device structure to set the
+ * multicast list for.
+ *
+ * This function sets the TLAN adaptor to various receive
+ * modes. If the IFF_PROMISC flag is set, promiscuous
+ * mode is acitviated. Otherwise, promiscuous mode is
+ * turned off. If the IFF_ALLMULTI flag is set, then
+ * the hash table is set to receive all group addresses.
+ * Otherwise, the first three multicast addresses are
+ * stored in AREG_1-3, and the rest are selected via the
+ * hash table, as necessary.
+ *
+ **************************************************************/
-static void TLan_SetMulticastList( struct net_device *dev )
+static void tlan_set_multicast_list(struct net_device *dev)
{
struct netdev_hw_addr *ha;
u32 hash1 = 0;
@@ -1320,53 +1394,56 @@ static void TLan_SetMulticastList( struct net_device *dev )
u32 offset;
u8 tmp;
- if ( dev->flags & IFF_PROMISC ) {
- tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD );
- TLan_DioWrite8( dev->base_addr,
- TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF );
+ if (dev->flags & IFF_PROMISC) {
+ tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
+ tlan_dio_write8(dev->base_addr,
+ TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF);
} else {
- tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD );
- TLan_DioWrite8( dev->base_addr,
- TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF );
- if ( dev->flags & IFF_ALLMULTI ) {
- for ( i = 0; i < 3; i++ )
- TLan_SetMac( dev, i + 1, NULL );
- TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, 0xFFFFFFFF );
- TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF );
+ tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
+ tlan_dio_write8(dev->base_addr,
+ TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF);
+ if (dev->flags & IFF_ALLMULTI) {
+ for (i = 0; i < 3; i++)
+ tlan_set_mac(dev, i + 1, NULL);
+ tlan_dio_write32(dev->base_addr, TLAN_HASH_1,
+ 0xffffffff);
+ tlan_dio_write32(dev->base_addr, TLAN_HASH_2,
+ 0xffffffff);
} else {
i = 0;
netdev_for_each_mc_addr(ha, dev) {
- if ( i < 3 ) {
- TLan_SetMac( dev, i + 1,
+ if (i < 3) {
+ tlan_set_mac(dev, i + 1,
(char *) &ha->addr);
} else {
- offset = TLan_HashFunc((u8 *)&ha->addr);
- if ( offset < 32 )
- hash1 |= ( 1 << offset );
+ offset =
+ tlan_hash_func((u8 *)&ha->addr);
+ if (offset < 32)
+ hash1 |= (1 << offset);
else
- hash2 |= ( 1 << ( offset - 32 ) );
+ hash2 |= (1 << (offset - 32));
}
i++;
}
- for ( ; i < 3; i++ )
- TLan_SetMac( dev, i + 1, NULL );
- TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, hash1 );
- TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, hash2 );
+ for ( ; i < 3; i++)
+ tlan_set_mac(dev, i + 1, NULL);
+ tlan_dio_write32(dev->base_addr, TLAN_HASH_1, hash1);
+ tlan_dio_write32(dev->base_addr, TLAN_HASH_2, hash2);
}
}
-} /* TLan_SetMulticastList */
+}
/*****************************************************************************
******************************************************************************
- ThunderLAN Driver Interrupt Vectors and Table
+ThunderLAN driver interrupt vectors and table
- Please see Chap. 4, "Interrupt Handling" of the "ThunderLAN
- Programmer's Guide" for more informations on handling interrupts
- generated by TLAN based adapters.
+please see chap. 4, "Interrupt Handling" of the "ThunderLAN
+Programmer's Guide" for more informations on handling interrupts
+generated by TLAN based adapters.
******************************************************************************
*****************************************************************************/
@@ -1374,46 +1451,48 @@ static void TLan_SetMulticastList( struct net_device *dev )
- /***************************************************************
- * TLan_HandleTxEOF
- *
- * Returns:
- * 1
- * Parms:
- * dev Device assigned the IRQ that was
- * raised.
- * host_int The contents of the HOST_INT
- * port.
- *
- * This function handles Tx EOF interrupts which are raised
- * by the adapter when it has completed sending the
- * contents of a buffer. If detemines which list/buffer
- * was completed and resets it. If the buffer was the last
- * in the channel (EOC), then the function checks to see if
- * another buffer is ready to send, and if so, sends a Tx
- * Go command. Finally, the driver activates/continues the
- * activity LED.
- *
- **************************************************************/
-
-static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
+/***************************************************************
+ * tlan_handle_tx_eof
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles Tx EOF interrupts which are raised
+ * by the adapter when it has completed sending the
+ * contents of a buffer. If detemines which list/buffer
+ * was completed and resets it. If the buffer was the last
+ * in the channel (EOC), then the function checks to see if
+ * another buffer is ready to send, and if so, sends a Tx
+ * Go command. Finally, the driver activates/continues the
+ * activity LED.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
int eoc = 0;
- TLanList *head_list;
+ struct tlan_list *head_list;
dma_addr_t head_list_phys;
u32 ack = 0;
- u16 tmpCStat;
+ u16 tmp_c_stat;
- TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n",
- priv->txHead, priv->txTail );
- head_list = priv->txList + priv->txHead;
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n",
+ priv->tx_head, priv->tx_tail);
+ head_list = priv->tx_list + priv->tx_head;
- while (((tmpCStat = head_list->cStat ) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) {
- struct sk_buff *skb = TLan_GetSKB(head_list);
+ while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
+ && (ack < 255)) {
+ struct sk_buff *skb = tlan_get_skb(head_list);
ack++;
- pci_unmap_single(priv->pciDev, head_list->buffer[0].address,
+ pci_unmap_single(priv->pci_dev, head_list->buffer[0].address,
max(skb->len,
(unsigned int)TLAN_MIN_FRAME_SIZE),
PCI_DMA_TODEVICE);
@@ -1421,304 +1500,313 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
head_list->buffer[8].address = 0;
head_list->buffer[9].address = 0;
- if ( tmpCStat & TLAN_CSTAT_EOC )
+ if (tmp_c_stat & TLAN_CSTAT_EOC)
eoc = 1;
- dev->stats.tx_bytes += head_list->frameSize;
+ dev->stats.tx_bytes += head_list->frame_size;
- head_list->cStat = TLAN_CSTAT_UNUSED;
+ head_list->c_stat = TLAN_CSTAT_UNUSED;
netif_start_queue(dev);
- CIRC_INC( priv->txHead, TLAN_NUM_TX_LISTS );
- head_list = priv->txList + priv->txHead;
+ CIRC_INC(priv->tx_head, TLAN_NUM_TX_LISTS);
+ head_list = priv->tx_list + priv->tx_head;
}
if (!ack)
- printk(KERN_INFO "TLAN: Received interrupt for uncompleted TX frame.\n");
-
- if ( eoc ) {
- TLAN_DBG( TLAN_DEBUG_TX,
- "TRANSMIT: Handling TX EOC (Head=%d Tail=%d)\n",
- priv->txHead, priv->txTail );
- head_list = priv->txList + priv->txHead;
- head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead;
- if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) {
- outl(head_list_phys, dev->base_addr + TLAN_CH_PARM );
+ printk(KERN_INFO
+ "TLAN: Received interrupt for uncompleted TX frame.\n");
+
+ if (eoc) {
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: handling TX EOC (Head=%d Tail=%d)\n",
+ priv->tx_head, priv->tx_tail);
+ head_list = priv->tx_list + priv->tx_head;
+ head_list_phys = priv->tx_list_dma
+ + sizeof(struct tlan_list)*priv->tx_head;
+ if ((head_list->c_stat & TLAN_CSTAT_READY)
+ == TLAN_CSTAT_READY) {
+ outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
ack |= TLAN_HC_GO;
} else {
- priv->txInProgress = 0;
+ priv->tx_in_progress = 0;
}
}
- if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) {
- TLan_DioWrite8( dev->base_addr,
- TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
- if ( priv->timer.function == NULL ) {
- priv->timer.function = TLan_Timer;
- priv->timer.data = (unsigned long) dev;
- priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
- priv->timerSetAt = jiffies;
- priv->timerType = TLAN_TIMER_ACTIVITY;
- add_timer(&priv->timer);
- } else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) {
- priv->timerSetAt = jiffies;
+ if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
+ tlan_dio_write8(dev->base_addr,
+ TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
+ if (priv->timer.function == NULL) {
+ priv->timer.function = tlan_timer;
+ priv->timer.data = (unsigned long) dev;
+ priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
+ priv->timer_set_at = jiffies;
+ priv->timer_type = TLAN_TIMER_ACTIVITY;
+ add_timer(&priv->timer);
+ } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
+ priv->timer_set_at = jiffies;
}
}
return ack;
-} /* TLan_HandleTxEOF */
+}
- /***************************************************************
- * TLan_HandleStatOverflow
- *
- * Returns:
- * 1
- * Parms:
- * dev Device assigned the IRQ that was
- * raised.
- * host_int The contents of the HOST_INT
- * port.
- *
- * This function handles the Statistics Overflow interrupt
- * which means that one or more of the TLAN statistics
- * registers has reached 1/2 capacity and needs to be read.
- *
- **************************************************************/
+/***************************************************************
+ * TLan_HandleStatOverflow
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles the Statistics Overflow interrupt
+ * which means that one or more of the TLAN statistics
+ * registers has reached 1/2 capacity and needs to be read.
+ *
+ **************************************************************/
-static u32 TLan_HandleStatOverflow( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_stat_overflow(struct net_device *dev, u16 host_int)
{
- TLan_ReadAndClearStats( dev, TLAN_RECORD );
+ tlan_read_and_clear_stats(dev, TLAN_RECORD);
return 1;
-} /* TLan_HandleStatOverflow */
-
-
-
-
- /***************************************************************
- * TLan_HandleRxEOF
- *
- * Returns:
- * 1
- * Parms:
- * dev Device assigned the IRQ that was
- * raised.
- * host_int The contents of the HOST_INT
- * port.
- *
- * This function handles the Rx EOF interrupt which
- * indicates a frame has been received by the adapter from
- * the net and the frame has been transferred to memory.
- * The function determines the bounce buffer the frame has
- * been loaded into, creates a new sk_buff big enough to
- * hold the frame, and sends it to protocol stack. It
- * then resets the used buffer and appends it to the end
- * of the list. If the frame was the last in the Rx
- * channel (EOC), the function restarts the receive channel
- * by sending an Rx Go command to the adapter. Then it
- * activates/continues the activity LED.
- *
- **************************************************************/
-
-static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
+}
+
+
+
+
+/***************************************************************
+ * TLan_HandleRxEOF
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles the Rx EOF interrupt which
+ * indicates a frame has been received by the adapter from
+ * the net and the frame has been transferred to memory.
+ * The function determines the bounce buffer the frame has
+ * been loaded into, creates a new sk_buff big enough to
+ * hold the frame, and sends it to protocol stack. It
+ * then resets the used buffer and appends it to the end
+ * of the list. If the frame was the last in the Rx
+ * channel (EOC), the function restarts the receive channel
+ * by sending an Rx Go command to the adapter. Then it
+ * activates/continues the activity LED.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_rx_eof(struct net_device *dev, u16 host_int)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u32 ack = 0;
int eoc = 0;
- TLanList *head_list;
+ struct tlan_list *head_list;
struct sk_buff *skb;
- TLanList *tail_list;
- u16 tmpCStat;
+ struct tlan_list *tail_list;
+ u16 tmp_c_stat;
dma_addr_t head_list_phys;
- TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: Handling RX EOF (Head=%d Tail=%d)\n",
- priv->rxHead, priv->rxTail );
- head_list = priv->rxList + priv->rxHead;
- head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
+ TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: handling RX EOF (Head=%d Tail=%d)\n",
+ priv->rx_head, priv->rx_tail);
+ head_list = priv->rx_list + priv->rx_head;
+ head_list_phys =
+ priv->rx_list_dma + sizeof(struct tlan_list)*priv->rx_head;
- while (((tmpCStat = head_list->cStat) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) {
- dma_addr_t frameDma = head_list->buffer[0].address;
- u32 frameSize = head_list->frameSize;
+ while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
+ && (ack < 255)) {
+ dma_addr_t frame_dma = head_list->buffer[0].address;
+ u32 frame_size = head_list->frame_size;
struct sk_buff *new_skb;
ack++;
- if (tmpCStat & TLAN_CSTAT_EOC)
+ if (tmp_c_stat & TLAN_CSTAT_EOC)
eoc = 1;
new_skb = netdev_alloc_skb_ip_align(dev,
TLAN_MAX_FRAME_SIZE + 5);
- if ( !new_skb )
+ if (!new_skb)
goto drop_and_reuse;
- skb = TLan_GetSKB(head_list);
- pci_unmap_single(priv->pciDev, frameDma,
+ skb = tlan_get_skb(head_list);
+ pci_unmap_single(priv->pci_dev, frame_dma,
TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
- skb_put( skb, frameSize );
+ skb_put(skb, frame_size);
- dev->stats.rx_bytes += frameSize;
+ dev->stats.rx_bytes += frame_size;
- skb->protocol = eth_type_trans( skb, dev );
- netif_rx( skb );
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
- head_list->buffer[0].address = pci_map_single(priv->pciDev,
- new_skb->data,
- TLAN_MAX_FRAME_SIZE,
- PCI_DMA_FROMDEVICE);
+ head_list->buffer[0].address =
+ pci_map_single(priv->pci_dev, new_skb->data,
+ TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
- TLan_StoreSKB(head_list, new_skb);
+ tlan_store_skb(head_list, new_skb);
drop_and_reuse:
head_list->forward = 0;
- head_list->cStat = 0;
- tail_list = priv->rxList + priv->rxTail;
+ head_list->c_stat = 0;
+ tail_list = priv->rx_list + priv->rx_tail;
tail_list->forward = head_list_phys;
- CIRC_INC( priv->rxHead, TLAN_NUM_RX_LISTS );
- CIRC_INC( priv->rxTail, TLAN_NUM_RX_LISTS );
- head_list = priv->rxList + priv->rxHead;
- head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
+ CIRC_INC(priv->rx_head, TLAN_NUM_RX_LISTS);
+ CIRC_INC(priv->rx_tail, TLAN_NUM_RX_LISTS);
+ head_list = priv->rx_list + priv->rx_head;
+ head_list_phys = priv->rx_list_dma
+ + sizeof(struct tlan_list)*priv->rx_head;
}
if (!ack)
- printk(KERN_INFO "TLAN: Received interrupt for uncompleted RX frame.\n");
-
-
- if ( eoc ) {
- TLAN_DBG( TLAN_DEBUG_RX,
- "RECEIVE: Handling RX EOC (Head=%d Tail=%d)\n",
- priv->rxHead, priv->rxTail );
- head_list = priv->rxList + priv->rxHead;
- head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
- outl(head_list_phys, dev->base_addr + TLAN_CH_PARM );
+ printk(KERN_INFO
+ "TLAN: Received interrupt for uncompleted RX frame.\n");
+
+
+ if (eoc) {
+ TLAN_DBG(TLAN_DEBUG_RX,
+ "RECEIVE: handling RX EOC (Head=%d Tail=%d)\n",
+ priv->rx_head, priv->rx_tail);
+ head_list = priv->rx_list + priv->rx_head;
+ head_list_phys = priv->rx_list_dma
+ + sizeof(struct tlan_list)*priv->rx_head;
+ outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
ack |= TLAN_HC_GO | TLAN_HC_RT;
- priv->rxEocCount++;
+ priv->rx_eoc_count++;
}
- if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) {
- TLan_DioWrite8( dev->base_addr,
- TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
- if ( priv->timer.function == NULL ) {
- priv->timer.function = TLan_Timer;
+ if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
+ tlan_dio_write8(dev->base_addr,
+ TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
+ if (priv->timer.function == NULL) {
+ priv->timer.function = tlan_timer;
priv->timer.data = (unsigned long) dev;
priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
- priv->timerSetAt = jiffies;
- priv->timerType = TLAN_TIMER_ACTIVITY;
+ priv->timer_set_at = jiffies;
+ priv->timer_type = TLAN_TIMER_ACTIVITY;
add_timer(&priv->timer);
- } else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) {
- priv->timerSetAt = jiffies;
+ } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
+ priv->timer_set_at = jiffies;
}
}
return ack;
-} /* TLan_HandleRxEOF */
+}
- /***************************************************************
- * TLan_HandleDummy
- *
- * Returns:
- * 1
- * Parms:
- * dev Device assigned the IRQ that was
- * raised.
- * host_int The contents of the HOST_INT
- * port.
- *
- * This function handles the Dummy interrupt, which is
- * raised whenever a test interrupt is generated by setting
- * the Req_Int bit of HOST_CMD to 1.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_handle_dummy
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles the Dummy interrupt, which is
+ * raised whenever a test interrupt is generated by setting
+ * the Req_Int bit of HOST_CMD to 1.
+ *
+ **************************************************************/
-static u32 TLan_HandleDummy( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_dummy(struct net_device *dev, u16 host_int)
{
- printk( "TLAN: Test interrupt on %s.\n", dev->name );
+ pr_info("TLAN: Test interrupt on %s.\n", dev->name);
return 1;
-} /* TLan_HandleDummy */
+}
- /***************************************************************
- * TLan_HandleTxEOC
- *
- * Returns:
- * 1
- * Parms:
- * dev Device assigned the IRQ that was
- * raised.
- * host_int The contents of the HOST_INT
- * port.
- *
- * This driver is structured to determine EOC occurrences by
- * reading the CSTAT member of the list structure. Tx EOC
- * interrupts are disabled via the DIO INTDIS register.
- * However, TLAN chips before revision 3.0 didn't have this
- * functionality, so process EOC events if this is the
- * case.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_handle_tx_eoc
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This driver is structured to determine EOC occurrences by
+ * reading the CSTAT member of the list structure. Tx EOC
+ * interrupts are disabled via the DIO INTDIS register.
+ * However, TLAN chips before revision 3.0 didn't have this
+ * functionality, so process EOC events if this is the
+ * case.
+ *
+ **************************************************************/
-static u32 TLan_HandleTxEOC( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_tx_eoc(struct net_device *dev, u16 host_int)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
- TLanList *head_list;
+ struct tlan_priv *priv = netdev_priv(dev);
+ struct tlan_list *head_list;
dma_addr_t head_list_phys;
u32 ack = 1;
host_int = 0;
- if ( priv->tlanRev < 0x30 ) {
- TLAN_DBG( TLAN_DEBUG_TX,
- "TRANSMIT: Handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
- priv->txHead, priv->txTail );
- head_list = priv->txList + priv->txHead;
- head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead;
- if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) {
+ if (priv->tlan_rev < 0x30) {
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
+ priv->tx_head, priv->tx_tail);
+ head_list = priv->tx_list + priv->tx_head;
+ head_list_phys = priv->tx_list_dma
+ + sizeof(struct tlan_list)*priv->tx_head;
+ if ((head_list->c_stat & TLAN_CSTAT_READY)
+ == TLAN_CSTAT_READY) {
netif_stop_queue(dev);
- outl( head_list_phys, dev->base_addr + TLAN_CH_PARM );
+ outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
ack |= TLAN_HC_GO;
} else {
- priv->txInProgress = 0;
+ priv->tx_in_progress = 0;
}
}
return ack;
-} /* TLan_HandleTxEOC */
+}
- /***************************************************************
- * TLan_HandleStatusCheck
- *
- * Returns:
- * 0 if Adapter check, 1 if Network Status check.
- * Parms:
- * dev Device assigned the IRQ that was
- * raised.
- * host_int The contents of the HOST_INT
- * port.
- *
- * This function handles Adapter Check/Network Status
- * interrupts generated by the adapter. It checks the
- * vector in the HOST_INT register to determine if it is
- * an Adapter Check interrupt. If so, it resets the
- * adapter. Otherwise it clears the status registers
- * and services the PHY.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_handle_status_check
+ *
+ * Returns:
+ * 0 if Adapter check, 1 if Network Status check.
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles Adapter Check/Network Status
+ * interrupts generated by the adapter. It checks the
+ * vector in the HOST_INT register to determine if it is
+ * an Adapter Check interrupt. If so, it resets the
+ * adapter. Otherwise it clears the status registers
+ * and services the PHY.
+ *
+ **************************************************************/
-static u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_status_check(struct net_device *dev, u16 host_int)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u32 ack;
u32 error;
u8 net_sts;
@@ -1727,92 +1815,94 @@ static u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int )
u16 tlphy_sts;
ack = 1;
- if ( host_int & TLAN_HI_IV_MASK ) {
- netif_stop_queue( dev );
- error = inl( dev->base_addr + TLAN_CH_PARM );
- printk( "TLAN: %s: Adaptor Error = 0x%x\n", dev->name, error );
- TLan_ReadAndClearStats( dev, TLAN_RECORD );
- outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD );
+ if (host_int & TLAN_HI_IV_MASK) {
+ netif_stop_queue(dev);
+ error = inl(dev->base_addr + TLAN_CH_PARM);
+ pr_info("TLAN: %s: Adaptor Error = 0x%x\n", dev->name, error);
+ tlan_read_and_clear_stats(dev, TLAN_RECORD);
+ outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
schedule_work(&priv->tlan_tqueue);
netif_wake_queue(dev);
ack = 0;
} else {
- TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name );
- phy = priv->phy[priv->phyNum];
-
- net_sts = TLan_DioRead8( dev->base_addr, TLAN_NET_STS );
- if ( net_sts ) {
- TLan_DioWrite8( dev->base_addr, TLAN_NET_STS, net_sts );
- TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n",
- dev->name, (unsigned) net_sts );
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name);
+ phy = priv->phy[priv->phy_num];
+
+ net_sts = tlan_dio_read8(dev->base_addr, TLAN_NET_STS);
+ if (net_sts) {
+ tlan_dio_write8(dev->base_addr, TLAN_NET_STS, net_sts);
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n",
+ dev->name, (unsigned) net_sts);
}
- if ( ( net_sts & TLAN_NET_STS_MIRQ ) && ( priv->phyNum == 0 ) ) {
- TLan_MiiReadReg( dev, phy, TLAN_TLPHY_STS, &tlphy_sts );
- TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl );
- if ( ! ( tlphy_sts & TLAN_TS_POLOK ) &&
- ! ( tlphy_ctl & TLAN_TC_SWAPOL ) ) {
- tlphy_ctl |= TLAN_TC_SWAPOL;
- TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
- } else if ( ( tlphy_sts & TLAN_TS_POLOK ) &&
- ( tlphy_ctl & TLAN_TC_SWAPOL ) ) {
- tlphy_ctl &= ~TLAN_TC_SWAPOL;
- TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
- }
-
- if (debug) {
- TLan_PhyPrint( dev );
+ if ((net_sts & TLAN_NET_STS_MIRQ) && (priv->phy_num == 0)) {
+ tlan_mii_read_reg(dev, phy, TLAN_TLPHY_STS, &tlphy_sts);
+ tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
+ if (!(tlphy_sts & TLAN_TS_POLOK) &&
+ !(tlphy_ctl & TLAN_TC_SWAPOL)) {
+ tlphy_ctl |= TLAN_TC_SWAPOL;
+ tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
+ tlphy_ctl);
+ } else if ((tlphy_sts & TLAN_TS_POLOK) &&
+ (tlphy_ctl & TLAN_TC_SWAPOL)) {
+ tlphy_ctl &= ~TLAN_TC_SWAPOL;
+ tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
+ tlphy_ctl);
}
+
+ if (debug)
+ tlan_phy_print(dev);
}
}
return ack;
-} /* TLan_HandleStatusCheck */
+}
- /***************************************************************
- * TLan_HandleRxEOC
- *
- * Returns:
- * 1
- * Parms:
- * dev Device assigned the IRQ that was
- * raised.
- * host_int The contents of the HOST_INT
- * port.
- *
- * This driver is structured to determine EOC occurrences by
- * reading the CSTAT member of the list structure. Rx EOC
- * interrupts are disabled via the DIO INTDIS register.
- * However, TLAN chips before revision 3.0 didn't have this
- * CSTAT member or a INTDIS register, so if this chip is
- * pre-3.0, process EOC interrupts normally.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_handle_rx_eoc
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This driver is structured to determine EOC occurrences by
+ * reading the CSTAT member of the list structure. Rx EOC
+ * interrupts are disabled via the DIO INTDIS register.
+ * However, TLAN chips before revision 3.0 didn't have this
+ * CSTAT member or a INTDIS register, so if this chip is
+ * pre-3.0, process EOC interrupts normally.
+ *
+ **************************************************************/
-static u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_rx_eoc(struct net_device *dev, u16 host_int)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
dma_addr_t head_list_phys;
u32 ack = 1;
- if ( priv->tlanRev < 0x30 ) {
- TLAN_DBG( TLAN_DEBUG_RX,
- "RECEIVE: Handling RX EOC (Head=%d Tail=%d) -- IRQ\n",
- priv->rxHead, priv->rxTail );
- head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
- outl( head_list_phys, dev->base_addr + TLAN_CH_PARM );
+ if (priv->tlan_rev < 0x30) {
+ TLAN_DBG(TLAN_DEBUG_RX,
+ "RECEIVE: Handling RX EOC (head=%d tail=%d) -- IRQ\n",
+ priv->rx_head, priv->rx_tail);
+ head_list_phys = priv->rx_list_dma
+ + sizeof(struct tlan_list)*priv->rx_head;
+ outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
ack |= TLAN_HC_GO | TLAN_HC_RT;
- priv->rxEocCount++;
+ priv->rx_eoc_count++;
}
return ack;
-} /* TLan_HandleRxEOC */
+}
@@ -1820,98 +1910,98 @@ static u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int )
/*****************************************************************************
******************************************************************************
- ThunderLAN Driver Timer Function
+ThunderLAN driver timer function
******************************************************************************
*****************************************************************************/
- /***************************************************************
- * TLan_Timer
- *
- * Returns:
- * Nothing
- * Parms:
- * data A value given to add timer when
- * add_timer was called.
- *
- * This function handles timed functionality for the
- * TLAN driver. The two current timer uses are for
- * delaying for autonegotionation and driving the ACT LED.
- * - Autonegotiation requires being allowed about
- * 2 1/2 seconds before attempting to transmit a
- * packet. It would be a very bad thing to hang
- * the kernel this long, so the driver doesn't
- * allow transmission 'til after this time, for
- * certain PHYs. It would be much nicer if all
- * PHYs were interrupt-capable like the internal
- * PHY.
- * - The ACT LED, which shows adapter activity, is
- * driven by the driver, and so must be left on
- * for a short period to power up the LED so it
- * can be seen. This delay can be changed by
- * changing the TLAN_TIMER_ACT_DELAY in tlan.h,
- * if desired. 100 ms produces a slightly
- * sluggish response.
- *
- **************************************************************/
-
-static void TLan_Timer( unsigned long data )
+/***************************************************************
+ * tlan_timer
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * data A value given to add timer when
+ * add_timer was called.
+ *
+ * This function handles timed functionality for the
+ * TLAN driver. The two current timer uses are for
+ * delaying for autonegotionation and driving the ACT LED.
+ * - Autonegotiation requires being allowed about
+ * 2 1/2 seconds before attempting to transmit a
+ * packet. It would be a very bad thing to hang
+ * the kernel this long, so the driver doesn't
+ * allow transmission 'til after this time, for
+ * certain PHYs. It would be much nicer if all
+ * PHYs were interrupt-capable like the internal
+ * PHY.
+ * - The ACT LED, which shows adapter activity, is
+ * driven by the driver, and so must be left on
+ * for a short period to power up the LED so it
+ * can be seen. This delay can be changed by
+ * changing the TLAN_TIMER_ACT_DELAY in tlan.h,
+ * if desired. 100 ms produces a slightly
+ * sluggish response.
+ *
+ **************************************************************/
+
+static void tlan_timer(unsigned long data)
{
struct net_device *dev = (struct net_device *) data;
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u32 elapsed;
unsigned long flags = 0;
priv->timer.function = NULL;
- switch ( priv->timerType ) {
+ switch (priv->timer_type) {
#ifdef MONITOR
- case TLAN_TIMER_LINK_BEAT:
- TLan_PhyMonitor( dev );
- break;
+ case TLAN_TIMER_LINK_BEAT:
+ tlan_phy_monitor(dev);
+ break;
#endif
- case TLAN_TIMER_PHY_PDOWN:
- TLan_PhyPowerDown( dev );
- break;
- case TLAN_TIMER_PHY_PUP:
- TLan_PhyPowerUp( dev );
- break;
- case TLAN_TIMER_PHY_RESET:
- TLan_PhyReset( dev );
- break;
- case TLAN_TIMER_PHY_START_LINK:
- TLan_PhyStartLink( dev );
- break;
- case TLAN_TIMER_PHY_FINISH_AN:
- TLan_PhyFinishAutoNeg( dev );
- break;
- case TLAN_TIMER_FINISH_RESET:
- TLan_FinishReset( dev );
- break;
- case TLAN_TIMER_ACTIVITY:
- spin_lock_irqsave(&priv->lock, flags);
- if ( priv->timer.function == NULL ) {
- elapsed = jiffies - priv->timerSetAt;
- if ( elapsed >= TLAN_TIMER_ACT_DELAY ) {
- TLan_DioWrite8( dev->base_addr,
- TLAN_LED_REG, TLAN_LED_LINK );
- } else {
- priv->timer.function = TLan_Timer;
- priv->timer.expires = priv->timerSetAt
- + TLAN_TIMER_ACT_DELAY;
- spin_unlock_irqrestore(&priv->lock, flags);
- add_timer( &priv->timer );
- break;
- }
+ case TLAN_TIMER_PHY_PDOWN:
+ tlan_phy_power_down(dev);
+ break;
+ case TLAN_TIMER_PHY_PUP:
+ tlan_phy_power_up(dev);
+ break;
+ case TLAN_TIMER_PHY_RESET:
+ tlan_phy_reset(dev);
+ break;
+ case TLAN_TIMER_PHY_START_LINK:
+ tlan_phy_start_link(dev);
+ break;
+ case TLAN_TIMER_PHY_FINISH_AN:
+ tlan_phy_finish_auto_neg(dev);
+ break;
+ case TLAN_TIMER_FINISH_RESET:
+ tlan_finish_reset(dev);
+ break;
+ case TLAN_TIMER_ACTIVITY:
+ spin_lock_irqsave(&priv->lock, flags);
+ if (priv->timer.function == NULL) {
+ elapsed = jiffies - priv->timer_set_at;
+ if (elapsed >= TLAN_TIMER_ACT_DELAY) {
+ tlan_dio_write8(dev->base_addr,
+ TLAN_LED_REG, TLAN_LED_LINK);
+ } else {
+ priv->timer.function = tlan_timer;
+ priv->timer.expires = priv->timer_set_at
+ + TLAN_TIMER_ACT_DELAY;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ add_timer(&priv->timer);
+ break;
}
- spin_unlock_irqrestore(&priv->lock, flags);
- break;
- default:
- break;
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+ break;
+ default:
+ break;
}
-} /* TLan_Timer */
+}
@@ -1919,39 +2009,39 @@ static void TLan_Timer( unsigned long data )
/*****************************************************************************
******************************************************************************
- ThunderLAN Driver Adapter Related Routines
+ThunderLAN driver adapter related routines
******************************************************************************
*****************************************************************************/
- /***************************************************************
- * TLan_ResetLists
- *
- * Returns:
- * Nothing
- * Parms:
- * dev The device structure with the list
- * stuctures to be reset.
- *
- * This routine sets the variables associated with managing
- * the TLAN lists to their initial values.
- *
- **************************************************************/
-
-static void TLan_ResetLists( struct net_device *dev )
+/***************************************************************
+ * tlan_reset_lists
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev The device structure with the list
+ * stuctures to be reset.
+ *
+ * This routine sets the variables associated with managing
+ * the TLAN lists to their initial values.
+ *
+ **************************************************************/
+
+static void tlan_reset_lists(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
int i;
- TLanList *list;
+ struct tlan_list *list;
dma_addr_t list_phys;
struct sk_buff *skb;
- priv->txHead = 0;
- priv->txTail = 0;
- for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) {
- list = priv->txList + i;
- list->cStat = TLAN_CSTAT_UNUSED;
+ priv->tx_head = 0;
+ priv->tx_tail = 0;
+ for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
+ list = priv->tx_list + i;
+ list->c_stat = TLAN_CSTAT_UNUSED;
list->buffer[0].address = 0;
list->buffer[2].count = 0;
list->buffer[2].address = 0;
@@ -1959,169 +2049,169 @@ static void TLan_ResetLists( struct net_device *dev )
list->buffer[9].address = 0;
}
- priv->rxHead = 0;
- priv->rxTail = TLAN_NUM_RX_LISTS - 1;
- for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) {
- list = priv->rxList + i;
- list_phys = priv->rxListDMA + sizeof(TLanList) * i;
- list->cStat = TLAN_CSTAT_READY;
- list->frameSize = TLAN_MAX_FRAME_SIZE;
+ priv->rx_head = 0;
+ priv->rx_tail = TLAN_NUM_RX_LISTS - 1;
+ for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
+ list = priv->rx_list + i;
+ list_phys = priv->rx_list_dma + sizeof(struct tlan_list)*i;
+ list->c_stat = TLAN_CSTAT_READY;
+ list->frame_size = TLAN_MAX_FRAME_SIZE;
list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
- if ( !skb ) {
- pr_err("TLAN: out of memory for received data.\n" );
+ if (!skb) {
+ pr_err("TLAN: out of memory for received data.\n");
break;
}
- list->buffer[0].address = pci_map_single(priv->pciDev,
+ list->buffer[0].address = pci_map_single(priv->pci_dev,
skb->data,
TLAN_MAX_FRAME_SIZE,
PCI_DMA_FROMDEVICE);
- TLan_StoreSKB(list, skb);
+ tlan_store_skb(list, skb);
list->buffer[1].count = 0;
list->buffer[1].address = 0;
- list->forward = list_phys + sizeof(TLanList);
+ list->forward = list_phys + sizeof(struct tlan_list);
}
/* in case ran out of memory early, clear bits */
while (i < TLAN_NUM_RX_LISTS) {
- TLan_StoreSKB(priv->rxList + i, NULL);
+ tlan_store_skb(priv->rx_list + i, NULL);
++i;
}
list->forward = 0;
-} /* TLan_ResetLists */
+}
-static void TLan_FreeLists( struct net_device *dev )
+static void tlan_free_lists(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
int i;
- TLanList *list;
+ struct tlan_list *list;
struct sk_buff *skb;
- for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) {
- list = priv->txList + i;
- skb = TLan_GetSKB(list);
- if ( skb ) {
+ for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
+ list = priv->tx_list + i;
+ skb = tlan_get_skb(list);
+ if (skb) {
pci_unmap_single(
- priv->pciDev,
+ priv->pci_dev,
list->buffer[0].address,
max(skb->len,
(unsigned int)TLAN_MIN_FRAME_SIZE),
PCI_DMA_TODEVICE);
- dev_kfree_skb_any( skb );
+ dev_kfree_skb_any(skb);
list->buffer[8].address = 0;
list->buffer[9].address = 0;
}
}
- for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) {
- list = priv->rxList + i;
- skb = TLan_GetSKB(list);
- if ( skb ) {
- pci_unmap_single(priv->pciDev,
+ for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
+ list = priv->rx_list + i;
+ skb = tlan_get_skb(list);
+ if (skb) {
+ pci_unmap_single(priv->pci_dev,
list->buffer[0].address,
TLAN_MAX_FRAME_SIZE,
PCI_DMA_FROMDEVICE);
- dev_kfree_skb_any( skb );
+ dev_kfree_skb_any(skb);
list->buffer[8].address = 0;
list->buffer[9].address = 0;
}
}
-} /* TLan_FreeLists */
+}
- /***************************************************************
- * TLan_PrintDio
- *
- * Returns:
- * Nothing
- * Parms:
- * io_base Base IO port of the device of
- * which to print DIO registers.
- *
- * This function prints out all the internal (DIO)
- * registers of a TLAN chip.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_print_dio
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * io_base Base IO port of the device of
+ * which to print DIO registers.
+ *
+ * This function prints out all the internal (DIO)
+ * registers of a TLAN chip.
+ *
+ **************************************************************/
-static void TLan_PrintDio( u16 io_base )
+static void tlan_print_dio(u16 io_base)
{
u32 data0, data1;
int i;
- printk( "TLAN: Contents of internal registers for io base 0x%04hx.\n",
- io_base );
- printk( "TLAN: Off. +0 +4\n" );
- for ( i = 0; i < 0x4C; i+= 8 ) {
- data0 = TLan_DioRead32( io_base, i );
- data1 = TLan_DioRead32( io_base, i + 0x4 );
- printk( "TLAN: 0x%02x 0x%08x 0x%08x\n", i, data0, data1 );
+ pr_info("TLAN: Contents of internal registers for io base 0x%04hx.\n",
+ io_base);
+ pr_info("TLAN: Off. +0 +4\n");
+ for (i = 0; i < 0x4C; i += 8) {
+ data0 = tlan_dio_read32(io_base, i);
+ data1 = tlan_dio_read32(io_base, i + 0x4);
+ pr_info("TLAN: 0x%02x 0x%08x 0x%08x\n", i, data0, data1);
}
-} /* TLan_PrintDio */
+}
- /***************************************************************
- * TLan_PrintList
- *
- * Returns:
- * Nothing
- * Parms:
- * list A pointer to the TLanList structure to
- * be printed.
- * type A string to designate type of list,
- * "Rx" or "Tx".
- * num The index of the list.
- *
- * This function prints out the contents of the list
- * pointed to by the list parameter.
- *
- **************************************************************/
+/***************************************************************
+ * TLan_PrintList
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * list A pointer to the struct tlan_list structure to
+ * be printed.
+ * type A string to designate type of list,
+ * "Rx" or "Tx".
+ * num The index of the list.
+ *
+ * This function prints out the contents of the list
+ * pointed to by the list parameter.
+ *
+ **************************************************************/
-static void TLan_PrintList( TLanList *list, char *type, int num)
+static void tlan_print_list(struct tlan_list *list, char *type, int num)
{
int i;
- printk( "TLAN: %s List %d at %p\n", type, num, list );
- printk( "TLAN: Forward = 0x%08x\n", list->forward );
- printk( "TLAN: CSTAT = 0x%04hx\n", list->cStat );
- printk( "TLAN: Frame Size = 0x%04hx\n", list->frameSize );
- /* for ( i = 0; i < 10; i++ ) { */
- for ( i = 0; i < 2; i++ ) {
- printk( "TLAN: Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
- i, list->buffer[i].count, list->buffer[i].address );
+ pr_info("TLAN: %s List %d at %p\n", type, num, list);
+ pr_info("TLAN: Forward = 0x%08x\n", list->forward);
+ pr_info("TLAN: CSTAT = 0x%04hx\n", list->c_stat);
+ pr_info("TLAN: Frame Size = 0x%04hx\n", list->frame_size);
+ /* for (i = 0; i < 10; i++) { */
+ for (i = 0; i < 2; i++) {
+ pr_info("TLAN: Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
+ i, list->buffer[i].count, list->buffer[i].address);
}
-} /* TLan_PrintList */
+}
- /***************************************************************
- * TLan_ReadAndClearStats
- *
- * Returns:
- * Nothing
- * Parms:
- * dev Pointer to device structure of adapter
- * to which to read stats.
- * record Flag indicating whether to add
- *
- * This functions reads all the internal status registers
- * of the TLAN chip, which clears them as a side effect.
- * It then either adds the values to the device's status
- * struct, or discards them, depending on whether record
- * is TLAN_RECORD (!=0) or TLAN_IGNORE (==0).
- *
- **************************************************************/
+/***************************************************************
+ * tlan_read_and_clear_stats
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev Pointer to device structure of adapter
+ * to which to read stats.
+ * record Flag indicating whether to add
+ *
+ * This functions reads all the internal status registers
+ * of the TLAN chip, which clears them as a side effect.
+ * It then either adds the values to the device's status
+ * struct, or discards them, depending on whether record
+ * is TLAN_RECORD (!=0) or TLAN_IGNORE (==0).
+ *
+ **************************************************************/
-static void TLan_ReadAndClearStats( struct net_device *dev, int record )
+static void tlan_read_and_clear_stats(struct net_device *dev, int record)
{
u32 tx_good, tx_under;
u32 rx_good, rx_over;
@@ -2129,41 +2219,42 @@ static void TLan_ReadAndClearStats( struct net_device *dev, int record )
u32 multi_col, single_col;
u32 excess_col, late_col, loss;
- outw( TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR );
- tx_good = inb( dev->base_addr + TLAN_DIO_DATA );
- tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
- tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16;
- tx_under = inb( dev->base_addr + TLAN_DIO_DATA + 3 );
-
- outw( TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR );
- rx_good = inb( dev->base_addr + TLAN_DIO_DATA );
- rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
- rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16;
- rx_over = inb( dev->base_addr + TLAN_DIO_DATA + 3 );
-
- outw( TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR );
- def_tx = inb( dev->base_addr + TLAN_DIO_DATA );
- def_tx += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
- crc = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
- code = inb( dev->base_addr + TLAN_DIO_DATA + 3 );
-
- outw( TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR );
- multi_col = inb( dev->base_addr + TLAN_DIO_DATA );
- multi_col += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
- single_col = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
- single_col += inb( dev->base_addr + TLAN_DIO_DATA + 3 ) << 8;
-
- outw( TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR );
- excess_col = inb( dev->base_addr + TLAN_DIO_DATA );
- late_col = inb( dev->base_addr + TLAN_DIO_DATA + 1 );
- loss = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
-
- if ( record ) {
+ outw(TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR);
+ tx_good = inb(dev->base_addr + TLAN_DIO_DATA);
+ tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+ tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
+ tx_under = inb(dev->base_addr + TLAN_DIO_DATA + 3);
+
+ outw(TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR);
+ rx_good = inb(dev->base_addr + TLAN_DIO_DATA);
+ rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+ rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
+ rx_over = inb(dev->base_addr + TLAN_DIO_DATA + 3);
+
+ outw(TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR);
+ def_tx = inb(dev->base_addr + TLAN_DIO_DATA);
+ def_tx += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+ crc = inb(dev->base_addr + TLAN_DIO_DATA + 2);
+ code = inb(dev->base_addr + TLAN_DIO_DATA + 3);
+
+ outw(TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
+ multi_col = inb(dev->base_addr + TLAN_DIO_DATA);
+ multi_col += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+ single_col = inb(dev->base_addr + TLAN_DIO_DATA + 2);
+ single_col += inb(dev->base_addr + TLAN_DIO_DATA + 3) << 8;
+
+ outw(TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
+ excess_col = inb(dev->base_addr + TLAN_DIO_DATA);
+ late_col = inb(dev->base_addr + TLAN_DIO_DATA + 1);
+ loss = inb(dev->base_addr + TLAN_DIO_DATA + 2);
+
+ if (record) {
dev->stats.rx_packets += rx_good;
dev->stats.rx_errors += rx_over + crc + code;
dev->stats.tx_packets += tx_good;
dev->stats.tx_errors += tx_under + loss;
- dev->stats.collisions += multi_col + single_col + excess_col + late_col;
+ dev->stats.collisions += multi_col
+ + single_col + excess_col + late_col;
dev->stats.rx_over_errors += rx_over;
dev->stats.rx_crc_errors += crc;
@@ -2173,39 +2264,39 @@ static void TLan_ReadAndClearStats( struct net_device *dev, int record )
dev->stats.tx_carrier_errors += loss;
}
-} /* TLan_ReadAndClearStats */
+}
- /***************************************************************
- * TLan_Reset
- *
- * Returns:
- * 0
- * Parms:
- * dev Pointer to device structure of adapter
- * to be reset.
- *
- * This function resets the adapter and it's physical
- * device. See Chap. 3, pp. 9-10 of the "ThunderLAN
- * Programmer's Guide" for details. The routine tries to
- * implement what is detailed there, though adjustments
- * have been made.
- *
- **************************************************************/
+/***************************************************************
+ * TLan_Reset
+ *
+ * Returns:
+ * 0
+ * Parms:
+ * dev Pointer to device structure of adapter
+ * to be reset.
+ *
+ * This function resets the adapter and it's physical
+ * device. See Chap. 3, pp. 9-10 of the "ThunderLAN
+ * Programmer's Guide" for details. The routine tries to
+ * implement what is detailed there, though adjustments
+ * have been made.
+ *
+ **************************************************************/
static void
-TLan_ResetAdapter( struct net_device *dev )
+tlan_reset_adapter(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
int i;
u32 addr;
u32 data;
u8 data8;
- priv->tlanFullDuplex = false;
- priv->phyOnline=0;
+ priv->tlan_full_duplex = false;
+ priv->phy_online = 0;
netif_carrier_off(dev);
/* 1. Assert reset bit. */
@@ -2216,7 +2307,7 @@ TLan_ResetAdapter( struct net_device *dev )
udelay(1000);
-/* 2. Turn off interrupts. ( Probably isn't necessary ) */
+/* 2. Turn off interrupts. (Probably isn't necessary) */
data = inl(dev->base_addr + TLAN_HOST_CMD);
data |= TLAN_HC_INT_OFF;
@@ -2224,207 +2315,208 @@ TLan_ResetAdapter( struct net_device *dev )
/* 3. Clear AREGs and HASHs. */
- for ( i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4 ) {
- TLan_DioWrite32( dev->base_addr, (u16) i, 0 );
- }
+ for (i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4)
+ tlan_dio_write32(dev->base_addr, (u16) i, 0);
/* 4. Setup NetConfig register. */
data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
- TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data );
+ tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
/* 5. Load Ld_Tmr and Ld_Thr in HOST_CMD. */
- outl( TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD );
- outl( TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD );
+ outl(TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD);
+ outl(TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD);
/* 6. Unreset the MII by setting NMRST (in NetSio) to 1. */
- outw( TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR );
+ outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
- TLan_SetBit( TLAN_NET_SIO_NMRST, addr );
+ tlan_set_bit(TLAN_NET_SIO_NMRST, addr);
/* 7. Setup the remaining registers. */
- if ( priv->tlanRev >= 0x30 ) {
+ if (priv->tlan_rev >= 0x30) {
data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC;
- TLan_DioWrite8( dev->base_addr, TLAN_INT_DIS, data8 );
+ tlan_dio_write8(dev->base_addr, TLAN_INT_DIS, data8);
}
- TLan_PhyDetect( dev );
+ tlan_phy_detect(dev);
data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN;
- if ( priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY ) {
+ if (priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY) {
data |= TLAN_NET_CFG_BIT;
- if ( priv->aui == 1 ) {
- TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x0a );
- } else if ( priv->duplex == TLAN_DUPLEX_FULL ) {
- TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x00 );
- priv->tlanFullDuplex = true;
+ if (priv->aui == 1) {
+ tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x0a);
+ } else if (priv->duplex == TLAN_DUPLEX_FULL) {
+ tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x00);
+ priv->tlan_full_duplex = true;
} else {
- TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x08 );
+ tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x08);
}
}
- if ( priv->phyNum == 0 ) {
+ if (priv->phy_num == 0)
data |= TLAN_NET_CFG_PHY_EN;
- }
- TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data );
+ tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
- if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) {
- TLan_FinishReset( dev );
- } else {
- TLan_PhyPowerDown( dev );
- }
+ if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY)
+ tlan_finish_reset(dev);
+ else
+ tlan_phy_power_down(dev);
-} /* TLan_ResetAdapter */
+}
static void
-TLan_FinishReset( struct net_device *dev )
+tlan_finish_reset(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u8 data;
u32 phy;
u8 sio;
u16 status;
u16 partner;
u16 tlphy_ctl;
- u16 tlphy_par;
+ u16 tlphy_par;
u16 tlphy_id1, tlphy_id2;
- int i;
+ int i;
- phy = priv->phy[priv->phyNum];
+ phy = priv->phy[priv->phy_num];
data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP;
- if ( priv->tlanFullDuplex ) {
+ if (priv->tlan_full_duplex)
data |= TLAN_NET_CMD_DUPLEX;
- }
- TLan_DioWrite8( dev->base_addr, TLAN_NET_CMD, data );
+ tlan_dio_write8(dev->base_addr, TLAN_NET_CMD, data);
data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5;
- if ( priv->phyNum == 0 ) {
+ if (priv->phy_num == 0)
data |= TLAN_NET_MASK_MASK7;
- }
- TLan_DioWrite8( dev->base_addr, TLAN_NET_MASK, data );
- TLan_DioWrite16( dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7 );
- TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &tlphy_id1 );
- TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &tlphy_id2 );
+ tlan_dio_write8(dev->base_addr, TLAN_NET_MASK, data);
+ tlan_dio_write16(dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7);
+ tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &tlphy_id1);
+ tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &tlphy_id2);
- if ( ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) ||
- ( priv->aui ) ) {
+ if ((priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) ||
+ (priv->aui)) {
status = MII_GS_LINK;
- printk( "TLAN: %s: Link forced.\n", dev->name );
+ pr_info("TLAN: %s: Link forced.\n", dev->name);
} else {
- TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
- udelay( 1000 );
- TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
- if ( (status & MII_GS_LINK) &&
- /* We only support link info on Nat.Sem. PHY's */
- (tlphy_id1 == NAT_SEM_ID1) &&
- (tlphy_id2 == NAT_SEM_ID2) ) {
- TLan_MiiReadReg( dev, phy, MII_AN_LPA, &partner );
- TLan_MiiReadReg( dev, phy, TLAN_TLPHY_PAR, &tlphy_par );
-
- printk( "TLAN: %s: Link active with ", dev->name );
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+ udelay(1000);
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+ if ((status & MII_GS_LINK) &&
+ /* We only support link info on Nat.Sem. PHY's */
+ (tlphy_id1 == NAT_SEM_ID1) &&
+ (tlphy_id2 == NAT_SEM_ID2)) {
+ tlan_mii_read_reg(dev, phy, MII_AN_LPA, &partner);
+ tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR, &tlphy_par);
+
+ pr_info("TLAN: %s: Link active with ", dev->name);
if (!(tlphy_par & TLAN_PHY_AN_EN_STAT)) {
- printk( "forced 10%sMbps %s-Duplex\n",
- tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0",
- tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half");
+ pr_info("forced 10%sMbps %s-Duplex\n",
+ tlphy_par & TLAN_PHY_SPEED_100
+ ? "" : "0",
+ tlphy_par & TLAN_PHY_DUPLEX_FULL
+ ? "Full" : "Half");
} else {
- printk( "AutoNegotiation enabled, at 10%sMbps %s-Duplex\n",
- tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0",
- tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half");
- printk("TLAN: Partner capability: ");
- for (i = 5; i <= 10; i++)
- if (partner & (1<<i))
- printk("%s",media[i-5]);
+ pr_info("Autonegotiation enabled, at 10%sMbps %s-Duplex\n",
+ tlphy_par & TLAN_PHY_SPEED_100
+ ? "" : "0",
+ tlphy_par & TLAN_PHY_DUPLEX_FULL
+ ? "Full" : "half");
+ pr_info("TLAN: Partner capability: ");
+ for (i = 5; i <= 10; i++)
+ if (partner & (1<<i))
+ printk("%s", media[i-5]);
printk("\n");
}
- TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK );
+ tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
+ TLAN_LED_LINK);
#ifdef MONITOR
/* We have link beat..for now anyway */
- priv->link = 1;
- /*Enabling link beat monitoring */
- TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_LINK_BEAT );
+ priv->link = 1;
+ /*Enabling link beat monitoring */
+ tlan_set_timer(dev, (10*HZ), TLAN_TIMER_LINK_BEAT);
#endif
} else if (status & MII_GS_LINK) {
- printk( "TLAN: %s: Link active\n", dev->name );
- TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK );
+ pr_info("TLAN: %s: Link active\n", dev->name);
+ tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
+ TLAN_LED_LINK);
}
}
- if ( priv->phyNum == 0 ) {
- TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl );
- tlphy_ctl |= TLAN_TC_INTEN;
- TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl );
- sio = TLan_DioRead8( dev->base_addr, TLAN_NET_SIO );
- sio |= TLAN_NET_SIO_MINTEN;
- TLan_DioWrite8( dev->base_addr, TLAN_NET_SIO, sio );
- }
-
- if ( status & MII_GS_LINK ) {
- TLan_SetMac( dev, 0, dev->dev_addr );
- priv->phyOnline = 1;
- outb( ( TLAN_HC_INT_ON >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 );
- if ( debug >= 1 && debug != TLAN_DEBUG_PROBE ) {
- outb( ( TLAN_HC_REQ_INT >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 );
- }
- outl( priv->rxListDMA, dev->base_addr + TLAN_CH_PARM );
- outl( TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD );
+ if (priv->phy_num == 0) {
+ tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
+ tlphy_ctl |= TLAN_TC_INTEN;
+ tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
+ sio = tlan_dio_read8(dev->base_addr, TLAN_NET_SIO);
+ sio |= TLAN_NET_SIO_MINTEN;
+ tlan_dio_write8(dev->base_addr, TLAN_NET_SIO, sio);
+ }
+
+ if (status & MII_GS_LINK) {
+ tlan_set_mac(dev, 0, dev->dev_addr);
+ priv->phy_online = 1;
+ outb((TLAN_HC_INT_ON >> 8), dev->base_addr + TLAN_HOST_CMD + 1);
+ if (debug >= 1 && debug != TLAN_DEBUG_PROBE)
+ outb((TLAN_HC_REQ_INT >> 8),
+ dev->base_addr + TLAN_HOST_CMD + 1);
+ outl(priv->rx_list_dma, dev->base_addr + TLAN_CH_PARM);
+ outl(TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD);
netif_carrier_on(dev);
} else {
- printk( "TLAN: %s: Link inactive, will retry in 10 secs...\n",
- dev->name );
- TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_FINISH_RESET );
+ pr_info("TLAN: %s: Link inactive, will retry in 10 secs...\n",
+ dev->name);
+ tlan_set_timer(dev, (10*HZ), TLAN_TIMER_FINISH_RESET);
return;
}
- TLan_SetMulticastList(dev);
+ tlan_set_multicast_list(dev);
-} /* TLan_FinishReset */
+}
- /***************************************************************
- * TLan_SetMac
- *
- * Returns:
- * Nothing
- * Parms:
- * dev Pointer to device structure of adapter
- * on which to change the AREG.
- * areg The AREG to set the address in (0 - 3).
- * mac A pointer to an array of chars. Each
- * element stores one byte of the address.
- * IE, it isn't in ascii.
- *
- * This function transfers a MAC address to one of the
- * TLAN AREGs (address registers). The TLAN chip locks
- * the register on writing to offset 0 and unlocks the
- * register after writing to offset 5. If NULL is passed
- * in mac, then the AREG is filled with 0's.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_set_mac
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev Pointer to device structure of adapter
+ * on which to change the AREG.
+ * areg The AREG to set the address in (0 - 3).
+ * mac A pointer to an array of chars. Each
+ * element stores one byte of the address.
+ * IE, it isn't in ascii.
+ *
+ * This function transfers a MAC address to one of the
+ * TLAN AREGs (address registers). The TLAN chip locks
+ * the register on writing to offset 0 and unlocks the
+ * register after writing to offset 5. If NULL is passed
+ * in mac, then the AREG is filled with 0's.
+ *
+ **************************************************************/
-static void TLan_SetMac( struct net_device *dev, int areg, char *mac )
+static void tlan_set_mac(struct net_device *dev, int areg, char *mac)
{
int i;
areg *= 6;
- if ( mac != NULL ) {
- for ( i = 0; i < 6; i++ )
- TLan_DioWrite8( dev->base_addr,
- TLAN_AREG_0 + areg + i, mac[i] );
+ if (mac != NULL) {
+ for (i = 0; i < 6; i++)
+ tlan_dio_write8(dev->base_addr,
+ TLAN_AREG_0 + areg + i, mac[i]);
} else {
- for ( i = 0; i < 6; i++ )
- TLan_DioWrite8( dev->base_addr,
- TLAN_AREG_0 + areg + i, 0 );
+ for (i = 0; i < 6; i++)
+ tlan_dio_write8(dev->base_addr,
+ TLAN_AREG_0 + areg + i, 0);
}
-} /* TLan_SetMac */
+}
@@ -2432,205 +2524,202 @@ static void TLan_SetMac( struct net_device *dev, int areg, char *mac )
/*****************************************************************************
******************************************************************************
- ThunderLAN Driver PHY Layer Routines
+ThunderLAN driver PHY layer routines
******************************************************************************
*****************************************************************************/
- /*********************************************************************
- * TLan_PhyPrint
- *
- * Returns:
- * Nothing
- * Parms:
- * dev A pointer to the device structure of the
- * TLAN device having the PHYs to be detailed.
- *
- * This function prints the registers a PHY (aka transceiver).
- *
- ********************************************************************/
+/*********************************************************************
+ * tlan_phy_print
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev A pointer to the device structure of the
+ * TLAN device having the PHYs to be detailed.
+ *
+ * This function prints the registers a PHY (aka transceiver).
+ *
+ ********************************************************************/
-static void TLan_PhyPrint( struct net_device *dev )
+static void tlan_phy_print(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 i, data0, data1, data2, data3, phy;
- phy = priv->phy[priv->phyNum];
-
- if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) {
- printk( "TLAN: Device %s, Unmanaged PHY.\n", dev->name );
- } else if ( phy <= TLAN_PHY_MAX_ADDR ) {
- printk( "TLAN: Device %s, PHY 0x%02x.\n", dev->name, phy );
- printk( "TLAN: Off. +0 +1 +2 +3\n" );
- for ( i = 0; i < 0x20; i+= 4 ) {
- printk( "TLAN: 0x%02x", i );
- TLan_MiiReadReg( dev, phy, i, &data0 );
- printk( " 0x%04hx", data0 );
- TLan_MiiReadReg( dev, phy, i + 1, &data1 );
- printk( " 0x%04hx", data1 );
- TLan_MiiReadReg( dev, phy, i + 2, &data2 );
- printk( " 0x%04hx", data2 );
- TLan_MiiReadReg( dev, phy, i + 3, &data3 );
- printk( " 0x%04hx\n", data3 );
+ phy = priv->phy[priv->phy_num];
+
+ if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
+ pr_info("TLAN: Device %s, Unmanaged PHY.\n", dev->name);
+ } else if (phy <= TLAN_PHY_MAX_ADDR) {
+ pr_info("TLAN: Device %s, PHY 0x%02x.\n", dev->name, phy);
+ pr_info("TLAN: Off. +0 +1 +2 +3\n");
+ for (i = 0; i < 0x20; i += 4) {
+ pr_info("TLAN: 0x%02x", i);
+ tlan_mii_read_reg(dev, phy, i, &data0);
+ printk(" 0x%04hx", data0);
+ tlan_mii_read_reg(dev, phy, i + 1, &data1);
+ printk(" 0x%04hx", data1);
+ tlan_mii_read_reg(dev, phy, i + 2, &data2);
+ printk(" 0x%04hx", data2);
+ tlan_mii_read_reg(dev, phy, i + 3, &data3);
+ printk(" 0x%04hx\n", data3);
}
} else {
- printk( "TLAN: Device %s, Invalid PHY.\n", dev->name );
+ pr_info("TLAN: Device %s, Invalid PHY.\n", dev->name);
}
-} /* TLan_PhyPrint */
+}
- /*********************************************************************
- * TLan_PhyDetect
- *
- * Returns:
- * Nothing
- * Parms:
- * dev A pointer to the device structure of the adapter
- * for which the PHY needs determined.
- *
- * So far I've found that adapters which have external PHYs
- * may also use the internal PHY for part of the functionality.
- * (eg, AUI/Thinnet). This function finds out if this TLAN
- * chip has an internal PHY, and then finds the first external
- * PHY (starting from address 0) if it exists).
- *
- ********************************************************************/
+/*********************************************************************
+ * tlan_phy_detect
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev A pointer to the device structure of the adapter
+ * for which the PHY needs determined.
+ *
+ * So far I've found that adapters which have external PHYs
+ * may also use the internal PHY for part of the functionality.
+ * (eg, AUI/Thinnet). This function finds out if this TLAN
+ * chip has an internal PHY, and then finds the first external
+ * PHY (starting from address 0) if it exists).
+ *
+ ********************************************************************/
-static void TLan_PhyDetect( struct net_device *dev )
+static void tlan_phy_detect(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 control;
u16 hi;
u16 lo;
u32 phy;
- if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) {
- priv->phyNum = 0xFFFF;
+ if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
+ priv->phy_num = 0xffff;
return;
}
- TLan_MiiReadReg( dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi );
+ tlan_mii_read_reg(dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi);
- if ( hi != 0xFFFF ) {
+ if (hi != 0xffff)
priv->phy[0] = TLAN_PHY_MAX_ADDR;
- } else {
+ else
priv->phy[0] = TLAN_PHY_NONE;
- }
priv->phy[1] = TLAN_PHY_NONE;
- for ( phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++ ) {
- TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &control );
- TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &hi );
- TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &lo );
- if ( ( control != 0xFFFF ) ||
- ( hi != 0xFFFF ) || ( lo != 0xFFFF ) ) {
- TLAN_DBG( TLAN_DEBUG_GNRL,
- "PHY found at %02x %04x %04x %04x\n",
- phy, control, hi, lo );
- if ( ( priv->phy[1] == TLAN_PHY_NONE ) &&
- ( phy != TLAN_PHY_MAX_ADDR ) ) {
+ for (phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++) {
+ tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &control);
+ tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &hi);
+ tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &lo);
+ if ((control != 0xffff) ||
+ (hi != 0xffff) || (lo != 0xffff)) {
+ TLAN_DBG(TLAN_DEBUG_GNRL,
+ "PHY found at %02x %04x %04x %04x\n",
+ phy, control, hi, lo);
+ if ((priv->phy[1] == TLAN_PHY_NONE) &&
+ (phy != TLAN_PHY_MAX_ADDR)) {
priv->phy[1] = phy;
}
}
}
- if ( priv->phy[1] != TLAN_PHY_NONE ) {
- priv->phyNum = 1;
- } else if ( priv->phy[0] != TLAN_PHY_NONE ) {
- priv->phyNum = 0;
- } else {
- printk( "TLAN: Cannot initialize device, no PHY was found!\n" );
- }
+ if (priv->phy[1] != TLAN_PHY_NONE)
+ priv->phy_num = 1;
+ else if (priv->phy[0] != TLAN_PHY_NONE)
+ priv->phy_num = 0;
+ else
+ pr_info("TLAN: Cannot initialize device, no PHY was found!\n");
-} /* TLan_PhyDetect */
+}
-static void TLan_PhyPowerDown( struct net_device *dev )
+static void tlan_phy_power_down(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 value;
- TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name );
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name);
value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE;
- TLan_MiiSync( dev->base_addr );
- TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value );
- if ( ( priv->phyNum == 0 ) &&
- ( priv->phy[1] != TLAN_PHY_NONE ) &&
- ( ! ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) ) ) {
- TLan_MiiSync( dev->base_addr );
- TLan_MiiWriteReg( dev, priv->phy[1], MII_GEN_CTL, value );
+ tlan_mii_sync(dev->base_addr);
+ tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
+ if ((priv->phy_num == 0) &&
+ (priv->phy[1] != TLAN_PHY_NONE) &&
+ (!(priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10))) {
+ tlan_mii_sync(dev->base_addr);
+ tlan_mii_write_reg(dev, priv->phy[1], MII_GEN_CTL, value);
}
/* Wait for 50 ms and powerup
* This is abitrary. It is intended to make sure the
* transceiver settles.
*/
- TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_PUP );
+ tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_PUP);
-} /* TLan_PhyPowerDown */
+}
-static void TLan_PhyPowerUp( struct net_device *dev )
+static void tlan_phy_power_up(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 value;
- TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name );
- TLan_MiiSync( dev->base_addr );
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name);
+ tlan_mii_sync(dev->base_addr);
value = MII_GC_LOOPBK;
- TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value );
- TLan_MiiSync(dev->base_addr);
+ tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
+ tlan_mii_sync(dev->base_addr);
/* Wait for 500 ms and reset the
* transceiver. The TLAN docs say both 50 ms and
* 500 ms, so do the longer, just in case.
*/
- TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_RESET );
+ tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_RESET);
-} /* TLan_PhyPowerUp */
+}
-static void TLan_PhyReset( struct net_device *dev )
+static void tlan_phy_reset(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 phy;
u16 value;
- phy = priv->phy[priv->phyNum];
+ phy = priv->phy[priv->phy_num];
- TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name );
- TLan_MiiSync( dev->base_addr );
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name);
+ tlan_mii_sync(dev->base_addr);
value = MII_GC_LOOPBK | MII_GC_RESET;
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, value );
- TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value );
- while ( value & MII_GC_RESET ) {
- TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value );
- }
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value);
+ tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
+ while (value & MII_GC_RESET)
+ tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
/* Wait for 500 ms and initialize.
* I don't remember why I wait this long.
* I've changed this to 50ms, as it seems long enough.
*/
- TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_START_LINK );
+ tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_START_LINK);
-} /* TLan_PhyReset */
+}
-static void TLan_PhyStartLink( struct net_device *dev )
+static void tlan_phy_start_link(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 ability;
u16 control;
u16 data;
@@ -2638,86 +2727,88 @@ static void TLan_PhyStartLink( struct net_device *dev )
u16 status;
u16 tctl;
- phy = priv->phy[priv->phyNum];
- TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name );
- TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
- TLan_MiiReadReg( dev, phy, MII_GEN_STS, &ability );
+ phy = priv->phy[priv->phy_num];
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name);
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &ability);
- if ( ( status & MII_GS_AUTONEG ) &&
- ( ! priv->aui ) ) {
+ if ((status & MII_GS_AUTONEG) &&
+ (!priv->aui)) {
ability = status >> 11;
- if ( priv->speed == TLAN_SPEED_10 &&
- priv->duplex == TLAN_DUPLEX_HALF) {
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0000);
- } else if ( priv->speed == TLAN_SPEED_10 &&
- priv->duplex == TLAN_DUPLEX_FULL) {
- priv->tlanFullDuplex = true;
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0100);
- } else if ( priv->speed == TLAN_SPEED_100 &&
- priv->duplex == TLAN_DUPLEX_HALF) {
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2000);
- } else if ( priv->speed == TLAN_SPEED_100 &&
- priv->duplex == TLAN_DUPLEX_FULL) {
- priv->tlanFullDuplex = true;
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2100);
+ if (priv->speed == TLAN_SPEED_10 &&
+ priv->duplex == TLAN_DUPLEX_HALF) {
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0000);
+ } else if (priv->speed == TLAN_SPEED_10 &&
+ priv->duplex == TLAN_DUPLEX_FULL) {
+ priv->tlan_full_duplex = true;
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0100);
+ } else if (priv->speed == TLAN_SPEED_100 &&
+ priv->duplex == TLAN_DUPLEX_HALF) {
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2000);
+ } else if (priv->speed == TLAN_SPEED_100 &&
+ priv->duplex == TLAN_DUPLEX_FULL) {
+ priv->tlan_full_duplex = true;
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2100);
} else {
/* Set Auto-Neg advertisement */
- TLan_MiiWriteReg( dev, phy, MII_AN_ADV, (ability << 5) | 1);
+ tlan_mii_write_reg(dev, phy, MII_AN_ADV,
+ (ability << 5) | 1);
/* Enablee Auto-Neg */
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1000 );
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1000);
/* Restart Auto-Neg */
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1200 );
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1200);
/* Wait for 4 sec for autonegotiation
- * to complete. The max spec time is less than this
- * but the card need additional time to start AN.
- * .5 sec should be plenty extra.
- */
- printk( "TLAN: %s: Starting autonegotiation.\n", dev->name );
- TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN );
+ * to complete. The max spec time is less than this
+ * but the card need additional time to start AN.
+ * .5 sec should be plenty extra.
+ */
+ pr_info("TLAN: %s: Starting autonegotiation.\n",
+ dev->name);
+ tlan_set_timer(dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN);
return;
}
}
- if ( ( priv->aui ) && ( priv->phyNum != 0 ) ) {
- priv->phyNum = 0;
- data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
- TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data );
- TLan_SetTimer( dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN );
+ if ((priv->aui) && (priv->phy_num != 0)) {
+ priv->phy_num = 0;
+ data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
+ | TLAN_NET_CFG_PHY_EN;
+ tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
+ tlan_set_timer(dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN);
return;
- } else if ( priv->phyNum == 0 ) {
+ } else if (priv->phy_num == 0) {
control = 0;
- TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tctl );
- if ( priv->aui ) {
- tctl |= TLAN_TC_AUISEL;
+ tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tctl);
+ if (priv->aui) {
+ tctl |= TLAN_TC_AUISEL;
} else {
- tctl &= ~TLAN_TC_AUISEL;
- if ( priv->duplex == TLAN_DUPLEX_FULL ) {
+ tctl &= ~TLAN_TC_AUISEL;
+ if (priv->duplex == TLAN_DUPLEX_FULL) {
control |= MII_GC_DUPLEX;
- priv->tlanFullDuplex = true;
+ priv->tlan_full_duplex = true;
}
- if ( priv->speed == TLAN_SPEED_100 ) {
+ if (priv->speed == TLAN_SPEED_100)
control |= MII_GC_SPEEDSEL;
- }
}
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, control );
- TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tctl );
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, control);
+ tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tctl);
}
/* Wait for 2 sec to give the transceiver time
* to establish link.
*/
- TLan_SetTimer( dev, (4*HZ), TLAN_TIMER_FINISH_RESET );
+ tlan_set_timer(dev, (4*HZ), TLAN_TIMER_FINISH_RESET);
-} /* TLan_PhyStartLink */
+}
-static void TLan_PhyFinishAutoNeg( struct net_device *dev )
+static void tlan_phy_finish_auto_neg(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 an_adv;
u16 an_lpa;
u16 data;
@@ -2725,115 +2816,118 @@ static void TLan_PhyFinishAutoNeg( struct net_device *dev )
u16 phy;
u16 status;
- phy = priv->phy[priv->phyNum];
+ phy = priv->phy[priv->phy_num];
- TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
- udelay( 1000 );
- TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+ udelay(1000);
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
- if ( ! ( status & MII_GS_AUTOCMPLT ) ) {
+ if (!(status & MII_GS_AUTOCMPLT)) {
/* Wait for 8 sec to give the process
* more time. Perhaps we should fail after a while.
*/
- if (!priv->neg_be_verbose++) {
- pr_info("TLAN: Giving autonegotiation more time.\n");
- pr_info("TLAN: Please check that your adapter has\n");
- pr_info("TLAN: been properly connected to a HUB or Switch.\n");
- pr_info("TLAN: Trying to establish link in the background...\n");
- }
- TLan_SetTimer( dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN );
+ if (!priv->neg_be_verbose++) {
+ pr_info("TLAN: Giving autonegotiation more time.\n");
+ pr_info("TLAN: Please check that your adapter has\n");
+ pr_info("TLAN: been properly connected to a HUB or Switch.\n");
+ pr_info("TLAN: Trying to establish link in the background...\n");
+ }
+ tlan_set_timer(dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN);
return;
}
- printk( "TLAN: %s: Autonegotiation complete.\n", dev->name );
- TLan_MiiReadReg( dev, phy, MII_AN_ADV, &an_adv );
- TLan_MiiReadReg( dev, phy, MII_AN_LPA, &an_lpa );
+ pr_info("TLAN: %s: Autonegotiation complete.\n", dev->name);
+ tlan_mii_read_reg(dev, phy, MII_AN_ADV, &an_adv);
+ tlan_mii_read_reg(dev, phy, MII_AN_LPA, &an_lpa);
mode = an_adv & an_lpa & 0x03E0;
- if ( mode & 0x0100 ) {
- priv->tlanFullDuplex = true;
- } else if ( ! ( mode & 0x0080 ) && ( mode & 0x0040 ) ) {
- priv->tlanFullDuplex = true;
- }
-
- if ( ( ! ( mode & 0x0180 ) ) &&
- ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) &&
- ( priv->phyNum != 0 ) ) {
- priv->phyNum = 0;
- data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
- TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data );
- TLan_SetTimer( dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN );
+ if (mode & 0x0100)
+ priv->tlan_full_duplex = true;
+ else if (!(mode & 0x0080) && (mode & 0x0040))
+ priv->tlan_full_duplex = true;
+
+ if ((!(mode & 0x0180)) &&
+ (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) &&
+ (priv->phy_num != 0)) {
+ priv->phy_num = 0;
+ data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
+ | TLAN_NET_CFG_PHY_EN;
+ tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
+ tlan_set_timer(dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN);
return;
}
- if ( priv->phyNum == 0 ) {
- if ( ( priv->duplex == TLAN_DUPLEX_FULL ) ||
- ( an_adv & an_lpa & 0x0040 ) ) {
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL,
- MII_GC_AUTOENB | MII_GC_DUPLEX );
- pr_info("TLAN: Starting internal PHY with FULL-DUPLEX\n" );
+ if (priv->phy_num == 0) {
+ if ((priv->duplex == TLAN_DUPLEX_FULL) ||
+ (an_adv & an_lpa & 0x0040)) {
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
+ MII_GC_AUTOENB | MII_GC_DUPLEX);
+ pr_info("TLAN: Starting internal PHY with FULL-DUPLEX\n");
} else {
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, MII_GC_AUTOENB );
- pr_info( "TLAN: Starting internal PHY with HALF-DUPLEX\n" );
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
+ MII_GC_AUTOENB);
+ pr_info("TLAN: Starting internal PHY with HALF-DUPLEX\n");
}
}
/* Wait for 100 ms. No reason in partiticular.
*/
- TLan_SetTimer( dev, (HZ/10), TLAN_TIMER_FINISH_RESET );
+ tlan_set_timer(dev, (HZ/10), TLAN_TIMER_FINISH_RESET);
-} /* TLan_PhyFinishAutoNeg */
+}
#ifdef MONITOR
- /*********************************************************************
- *
- * TLan_phyMonitor
- *
- * Returns:
- * None
- *
- * Params:
- * dev The device structure of this device.
- *
- *
- * This function monitors PHY condition by reading the status
- * register via the MII bus. This can be used to give info
- * about link changes (up/down), and possible switch to alternate
- * media.
- *
- * ******************************************************************/
-
-void TLan_PhyMonitor( struct net_device *dev )
+/*********************************************************************
+ *
+ * tlan_phy_monitor
+ *
+ * Returns:
+ * None
+ *
+ * Params:
+ * dev The device structure of this device.
+ *
+ *
+ * This function monitors PHY condition by reading the status
+ * register via the MII bus. This can be used to give info
+ * about link changes (up/down), and possible switch to alternate
+ * media.
+ *
+ *******************************************************************/
+
+void tlan_phy_monitor(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 phy;
u16 phy_status;
- phy = priv->phy[priv->phyNum];
+ phy = priv->phy[priv->phy_num];
- /* Get PHY status register */
- TLan_MiiReadReg( dev, phy, MII_GEN_STS, &phy_status );
+ /* Get PHY status register */
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &phy_status);
- /* Check if link has been lost */
- if (!(phy_status & MII_GS_LINK)) {
- if (priv->link) {
- priv->link = 0;
- printk(KERN_DEBUG "TLAN: %s has lost link\n", dev->name);
- netif_carrier_off(dev);
- TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_LINK_BEAT );
- return;
+ /* Check if link has been lost */
+ if (!(phy_status & MII_GS_LINK)) {
+ if (priv->link) {
+ priv->link = 0;
+ printk(KERN_DEBUG "TLAN: %s has lost link\n",
+ dev->name);
+ netif_carrier_off(dev);
+ tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
+ return;
}
}
- /* Link restablished? */
- if ((phy_status & MII_GS_LINK) && !priv->link) {
- priv->link = 1;
- printk(KERN_DEBUG "TLAN: %s has reestablished link\n", dev->name);
+ /* Link restablished? */
+ if ((phy_status & MII_GS_LINK) && !priv->link) {
+ priv->link = 1;
+ printk(KERN_DEBUG "TLAN: %s has reestablished link\n",
+ dev->name);
netif_carrier_on(dev);
- }
+ }
/* Setup a new monitor */
- TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_LINK_BEAT );
+ tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
}
#endif /* MONITOR */
@@ -2842,47 +2936,48 @@ void TLan_PhyMonitor( struct net_device *dev )
/*****************************************************************************
******************************************************************************
- ThunderLAN Driver MII Routines
+ThunderLAN driver MII routines
- These routines are based on the information in Chap. 2 of the
- "ThunderLAN Programmer's Guide", pp. 15-24.
+these routines are based on the information in chap. 2 of the
+"ThunderLAN Programmer's Guide", pp. 15-24.
******************************************************************************
*****************************************************************************/
- /***************************************************************
- * TLan_MiiReadReg
- *
- * Returns:
- * false if ack received ok
- * true if no ack received or other error
- *
- * Parms:
- * dev The device structure containing
- * The io address and interrupt count
- * for this device.
- * phy The address of the PHY to be queried.
- * reg The register whose contents are to be
- * retrieved.
- * val A pointer to a variable to store the
- * retrieved value.
- *
- * This function uses the TLAN's MII bus to retrieve the contents
- * of a given register on a PHY. It sends the appropriate info
- * and then reads the 16-bit register value from the MII bus via
- * the TLAN SIO register.
- *
- **************************************************************/
-
-static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val )
+/***************************************************************
+ * tlan_mii_read_reg
+ *
+ * Returns:
+ * false if ack received ok
+ * true if no ack received or other error
+ *
+ * Parms:
+ * dev The device structure containing
+ * The io address and interrupt count
+ * for this device.
+ * phy The address of the PHY to be queried.
+ * reg The register whose contents are to be
+ * retrieved.
+ * val A pointer to a variable to store the
+ * retrieved value.
+ *
+ * This function uses the TLAN's MII bus to retrieve the contents
+ * of a given register on a PHY. It sends the appropriate info
+ * and then reads the 16-bit register value from the MII bus via
+ * the TLAN SIO register.
+ *
+ **************************************************************/
+
+static bool
+tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val)
{
u8 nack;
u16 sio, tmp;
- u32 i;
+ u32 i;
bool err;
int minten;
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
unsigned long flags = 0;
err = false;
@@ -2892,48 +2987,48 @@ static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val
if (!in_irq())
spin_lock_irqsave(&priv->lock, flags);
- TLan_MiiSync(dev->base_addr);
+ tlan_mii_sync(dev->base_addr);
- minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio );
- if ( minten )
- TLan_ClearBit(TLAN_NET_SIO_MINTEN, sio);
+ minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
+ if (minten)
+ tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
- TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Start ( 01b ) */
- TLan_MiiSendData( dev->base_addr, 0x2, 2 ); /* Read ( 10b ) */
- TLan_MiiSendData( dev->base_addr, phy, 5 ); /* Device # */
- TLan_MiiSendData( dev->base_addr, reg, 5 ); /* Register # */
+ tlan_mii_send_data(dev->base_addr, 0x1, 2); /* start (01b) */
+ tlan_mii_send_data(dev->base_addr, 0x2, 2); /* read (10b) */
+ tlan_mii_send_data(dev->base_addr, phy, 5); /* device # */
+ tlan_mii_send_data(dev->base_addr, reg, 5); /* register # */
- TLan_ClearBit(TLAN_NET_SIO_MTXEN, sio); /* Change direction */
+ tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio); /* change direction */
- TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Clock Idle bit */
- TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
- TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Wait 300ns */
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* clock idle bit */
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* wait 300ns */
- nack = TLan_GetBit(TLAN_NET_SIO_MDATA, sio); /* Check for ACK */
- TLan_SetBit(TLAN_NET_SIO_MCLK, sio); /* Finish ACK */
- if (nack) { /* No ACK, so fake it */
+ nack = tlan_get_bit(TLAN_NET_SIO_MDATA, sio); /* check for ACK */
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio); /* finish ACK */
+ if (nack) { /* no ACK, so fake it */
for (i = 0; i < 16; i++) {
- TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);
- TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
}
tmp = 0xffff;
err = true;
} else { /* ACK, so read data */
for (tmp = 0, i = 0x8000; i; i >>= 1) {
- TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);
- if (TLan_GetBit(TLAN_NET_SIO_MDATA, sio))
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+ if (tlan_get_bit(TLAN_NET_SIO_MDATA, sio))
tmp |= i;
- TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
}
}
- TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Idle cycle */
- TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* idle cycle */
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
- if ( minten )
- TLan_SetBit(TLAN_NET_SIO_MINTEN, sio);
+ if (minten)
+ tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
*val = tmp;
@@ -2942,116 +3037,117 @@ static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val
return err;
-} /* TLan_MiiReadReg */
+}
- /***************************************************************
- * TLan_MiiSendData
- *
- * Returns:
- * Nothing
- * Parms:
- * base_port The base IO port of the adapter in
- * question.
- * dev The address of the PHY to be queried.
- * data The value to be placed on the MII bus.
- * num_bits The number of bits in data that are to
- * be placed on the MII bus.
- *
- * This function sends on sequence of bits on the MII
- * configuration bus.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_mii_send_data
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * base_port The base IO port of the adapter in
+ * question.
+ * dev The address of the PHY to be queried.
+ * data The value to be placed on the MII bus.
+ * num_bits The number of bits in data that are to
+ * be placed on the MII bus.
+ *
+ * This function sends on sequence of bits on the MII
+ * configuration bus.
+ *
+ **************************************************************/
-static void TLan_MiiSendData( u16 base_port, u32 data, unsigned num_bits )
+static void tlan_mii_send_data(u16 base_port, u32 data, unsigned num_bits)
{
u16 sio;
u32 i;
- if ( num_bits == 0 )
+ if (num_bits == 0)
return;
- outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR );
+ outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
- TLan_SetBit( TLAN_NET_SIO_MTXEN, sio );
+ tlan_set_bit(TLAN_NET_SIO_MTXEN, sio);
- for ( i = ( 0x1 << ( num_bits - 1 ) ); i; i >>= 1 ) {
- TLan_ClearBit( TLAN_NET_SIO_MCLK, sio );
- (void) TLan_GetBit( TLAN_NET_SIO_MCLK, sio );
- if ( data & i )
- TLan_SetBit( TLAN_NET_SIO_MDATA, sio );
+ for (i = (0x1 << (num_bits - 1)); i; i >>= 1) {
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+ (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
+ if (data & i)
+ tlan_set_bit(TLAN_NET_SIO_MDATA, sio);
else
- TLan_ClearBit( TLAN_NET_SIO_MDATA, sio );
- TLan_SetBit( TLAN_NET_SIO_MCLK, sio );
- (void) TLan_GetBit( TLAN_NET_SIO_MCLK, sio );
+ tlan_clear_bit(TLAN_NET_SIO_MDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+ (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
}
-} /* TLan_MiiSendData */
+}
- /***************************************************************
- * TLan_MiiSync
- *
- * Returns:
- * Nothing
- * Parms:
- * base_port The base IO port of the adapter in
- * question.
- *
- * This functions syncs all PHYs in terms of the MII configuration
- * bus.
- *
- **************************************************************/
+/***************************************************************
+ * TLan_MiiSync
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * base_port The base IO port of the adapter in
+ * question.
+ *
+ * This functions syncs all PHYs in terms of the MII configuration
+ * bus.
+ *
+ **************************************************************/
-static void TLan_MiiSync( u16 base_port )
+static void tlan_mii_sync(u16 base_port)
{
int i;
u16 sio;
- outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR );
+ outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
- TLan_ClearBit( TLAN_NET_SIO_MTXEN, sio );
- for ( i = 0; i < 32; i++ ) {
- TLan_ClearBit( TLAN_NET_SIO_MCLK, sio );
- TLan_SetBit( TLAN_NET_SIO_MCLK, sio );
+ tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);
+ for (i = 0; i < 32; i++) {
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
}
-} /* TLan_MiiSync */
+}
- /***************************************************************
- * TLan_MiiWriteReg
- *
- * Returns:
- * Nothing
- * Parms:
- * dev The device structure for the device
- * to write to.
- * phy The address of the PHY to be written to.
- * reg The register whose contents are to be
- * written.
- * val The value to be written to the register.
- *
- * This function uses the TLAN's MII bus to write the contents of a
- * given register on a PHY. It sends the appropriate info and then
- * writes the 16-bit register value from the MII configuration bus
- * via the TLAN SIO register.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_mii_write_reg
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev The device structure for the device
+ * to write to.
+ * phy The address of the PHY to be written to.
+ * reg The register whose contents are to be
+ * written.
+ * val The value to be written to the register.
+ *
+ * This function uses the TLAN's MII bus to write the contents of a
+ * given register on a PHY. It sends the appropriate info and then
+ * writes the 16-bit register value from the MII configuration bus
+ * via the TLAN SIO register.
+ *
+ **************************************************************/
-static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val )
+static void
+tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
{
u16 sio;
int minten;
unsigned long flags = 0;
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
@@ -3059,30 +3155,30 @@ static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val
if (!in_irq())
spin_lock_irqsave(&priv->lock, flags);
- TLan_MiiSync( dev->base_addr );
+ tlan_mii_sync(dev->base_addr);
- minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio );
- if ( minten )
- TLan_ClearBit( TLAN_NET_SIO_MINTEN, sio );
+ minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
+ if (minten)
+ tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
- TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Start ( 01b ) */
- TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Write ( 01b ) */
- TLan_MiiSendData( dev->base_addr, phy, 5 ); /* Device # */
- TLan_MiiSendData( dev->base_addr, reg, 5 ); /* Register # */
+ tlan_mii_send_data(dev->base_addr, 0x1, 2); /* start (01b) */
+ tlan_mii_send_data(dev->base_addr, 0x1, 2); /* write (01b) */
+ tlan_mii_send_data(dev->base_addr, phy, 5); /* device # */
+ tlan_mii_send_data(dev->base_addr, reg, 5); /* register # */
- TLan_MiiSendData( dev->base_addr, 0x2, 2 ); /* Send ACK */
- TLan_MiiSendData( dev->base_addr, val, 16 ); /* Send Data */
+ tlan_mii_send_data(dev->base_addr, 0x2, 2); /* send ACK */
+ tlan_mii_send_data(dev->base_addr, val, 16); /* send data */
- TLan_ClearBit( TLAN_NET_SIO_MCLK, sio ); /* Idle cycle */
- TLan_SetBit( TLAN_NET_SIO_MCLK, sio );
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* idle cycle */
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
- if ( minten )
- TLan_SetBit( TLAN_NET_SIO_MINTEN, sio );
+ if (minten)
+ tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
if (!in_irq())
spin_unlock_irqrestore(&priv->lock, flags);
-} /* TLan_MiiWriteReg */
+}
@@ -3090,229 +3186,226 @@ static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val
/*****************************************************************************
******************************************************************************
- ThunderLAN Driver Eeprom routines
+ThunderLAN driver eeprom routines
- The Compaq Netelligent 10 and 10/100 cards use a Microchip 24C02A
- EEPROM. These functions are based on information in Microchip's
- data sheet. I don't know how well this functions will work with
- other EEPROMs.
+the Compaq netelligent 10 and 10/100 cards use a microchip 24C02A
+EEPROM. these functions are based on information in microchip's
+data sheet. I don't know how well this functions will work with
+other Eeproms.
******************************************************************************
*****************************************************************************/
- /***************************************************************
- * TLan_EeSendStart
- *
- * Returns:
- * Nothing
- * Parms:
- * io_base The IO port base address for the
- * TLAN device with the EEPROM to
- * use.
- *
- * This function sends a start cycle to an EEPROM attached
- * to a TLAN chip.
- *
- **************************************************************/
-
-static void TLan_EeSendStart( u16 io_base )
+/***************************************************************
+ * tlan_ee_send_start
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ *
+ * This function sends a start cycle to an EEPROM attached
+ * to a TLAN chip.
+ *
+ **************************************************************/
+
+static void tlan_ee_send_start(u16 io_base)
{
u16 sio;
- outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR );
+ outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
- TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
- TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
- TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
- TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
- TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
-
-} /* TLan_EeSendStart */
-
-
-
-
- /***************************************************************
- * TLan_EeSendByte
- *
- * Returns:
- * If the correct ack was received, 0, otherwise 1
- * Parms: io_base The IO port base address for the
- * TLAN device with the EEPROM to
- * use.
- * data The 8 bits of information to
- * send to the EEPROM.
- * stop If TLAN_EEPROM_STOP is passed, a
- * stop cycle is sent after the
- * byte is sent after the ack is
- * read.
- *
- * This function sends a byte on the serial EEPROM line,
- * driving the clock to send each bit. The function then
- * reverses transmission direction and reads an acknowledge
- * bit.
- *
- **************************************************************/
-
-static int TLan_EeSendByte( u16 io_base, u8 data, int stop )
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_ee_send_byte
+ *
+ * Returns:
+ * If the correct ack was received, 0, otherwise 1
+ * Parms: io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ * data The 8 bits of information to
+ * send to the EEPROM.
+ * stop If TLAN_EEPROM_STOP is passed, a
+ * stop cycle is sent after the
+ * byte is sent after the ack is
+ * read.
+ *
+ * This function sends a byte on the serial EEPROM line,
+ * driving the clock to send each bit. The function then
+ * reverses transmission direction and reads an acknowledge
+ * bit.
+ *
+ **************************************************************/
+
+static int tlan_ee_send_byte(u16 io_base, u8 data, int stop)
{
int err;
u8 place;
u16 sio;
- outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR );
+ outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
/* Assume clock is low, tx is enabled; */
- for ( place = 0x80; place != 0; place >>= 1 ) {
- if ( place & data )
- TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
+ for (place = 0x80; place != 0; place >>= 1) {
+ if (place & data)
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
else
- TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
- TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
- TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
}
- TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio );
- TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
- err = TLan_GetBit( TLAN_NET_SIO_EDATA, sio );
- TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
- TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
+ tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ err = tlan_get_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
- if ( ( ! err ) && stop ) {
+ if ((!err) && stop) {
/* STOP, raise data while clock is high */
- TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
- TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
- TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
}
return err;
-} /* TLan_EeSendByte */
-
-
-
-
- /***************************************************************
- * TLan_EeReceiveByte
- *
- * Returns:
- * Nothing
- * Parms:
- * io_base The IO port base address for the
- * TLAN device with the EEPROM to
- * use.
- * data An address to a char to hold the
- * data sent from the EEPROM.
- * stop If TLAN_EEPROM_STOP is passed, a
- * stop cycle is sent after the
- * byte is received, and no ack is
- * sent.
- *
- * This function receives 8 bits of data from the EEPROM
- * over the serial link. It then sends and ack bit, or no
- * ack and a stop bit. This function is used to retrieve
- * data after the address of a byte in the EEPROM has been
- * sent.
- *
- **************************************************************/
-
-static void TLan_EeReceiveByte( u16 io_base, u8 *data, int stop )
+}
+
+
+
+
+/***************************************************************
+ * tlan_ee_receive_byte
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ * data An address to a char to hold the
+ * data sent from the EEPROM.
+ * stop If TLAN_EEPROM_STOP is passed, a
+ * stop cycle is sent after the
+ * byte is received, and no ack is
+ * sent.
+ *
+ * This function receives 8 bits of data from the EEPROM
+ * over the serial link. It then sends and ack bit, or no
+ * ack and a stop bit. This function is used to retrieve
+ * data after the address of a byte in the EEPROM has been
+ * sent.
+ *
+ **************************************************************/
+
+static void tlan_ee_receive_byte(u16 io_base, u8 *data, int stop)
{
u8 place;
u16 sio;
- outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR );
+ outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
*data = 0;
/* Assume clock is low, tx is enabled; */
- TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio );
- for ( place = 0x80; place; place >>= 1 ) {
- TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
- if ( TLan_GetBit( TLAN_NET_SIO_EDATA, sio ) )
+ tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
+ for (place = 0x80; place; place >>= 1) {
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ if (tlan_get_bit(TLAN_NET_SIO_EDATA, sio))
*data |= place;
- TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
}
- TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
- if ( ! stop ) {
- TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); /* Ack = 0 */
- TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
- TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+ tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
+ if (!stop) {
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); /* ack = 0 */
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
} else {
- TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); /* No ack = 1 (?) */
- TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
- TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio); /* no ack = 1 (?) */
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
/* STOP, raise data while clock is high */
- TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
- TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
- TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
- }
-
-} /* TLan_EeReceiveByte */
-
-
-
-
- /***************************************************************
- * TLan_EeReadByte
- *
- * Returns:
- * No error = 0, else, the stage at which the error
- * occurred.
- * Parms:
- * io_base The IO port base address for the
- * TLAN device with the EEPROM to
- * use.
- * ee_addr The address of the byte in the
- * EEPROM whose contents are to be
- * retrieved.
- * data An address to a char to hold the
- * data obtained from the EEPROM.
- *
- * This function reads a byte of information from an byte
- * cell in the EEPROM.
- *
- **************************************************************/
-
-static int TLan_EeReadByte( struct net_device *dev, u8 ee_addr, u8 *data )
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
+ }
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_ee_read_byte
+ *
+ * Returns:
+ * No error = 0, else, the stage at which the error
+ * occurred.
+ * Parms:
+ * io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ * ee_addr The address of the byte in the
+ * EEPROM whose contents are to be
+ * retrieved.
+ * data An address to a char to hold the
+ * data obtained from the EEPROM.
+ *
+ * This function reads a byte of information from an byte
+ * cell in the EEPROM.
+ *
+ **************************************************************/
+
+static int tlan_ee_read_byte(struct net_device *dev, u8 ee_addr, u8 *data)
{
int err;
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
unsigned long flags = 0;
- int ret=0;
+ int ret = 0;
spin_lock_irqsave(&priv->lock, flags);
- TLan_EeSendStart( dev->base_addr );
- err = TLan_EeSendByte( dev->base_addr, 0xA0, TLAN_EEPROM_ACK );
- if (err)
- {
- ret=1;
+ tlan_ee_send_start(dev->base_addr);
+ err = tlan_ee_send_byte(dev->base_addr, 0xa0, TLAN_EEPROM_ACK);
+ if (err) {
+ ret = 1;
goto fail;
}
- err = TLan_EeSendByte( dev->base_addr, ee_addr, TLAN_EEPROM_ACK );
- if (err)
- {
- ret=2;
+ err = tlan_ee_send_byte(dev->base_addr, ee_addr, TLAN_EEPROM_ACK);
+ if (err) {
+ ret = 2;
goto fail;
}
- TLan_EeSendStart( dev->base_addr );
- err = TLan_EeSendByte( dev->base_addr, 0xA1, TLAN_EEPROM_ACK );
- if (err)
- {
- ret=3;
+ tlan_ee_send_start(dev->base_addr);
+ err = tlan_ee_send_byte(dev->base_addr, 0xa1, TLAN_EEPROM_ACK);
+ if (err) {
+ ret = 3;
goto fail;
}
- TLan_EeReceiveByte( dev->base_addr, data, TLAN_EEPROM_STOP );
+ tlan_ee_receive_byte(dev->base_addr, data, TLAN_EEPROM_STOP);
fail:
spin_unlock_irqrestore(&priv->lock, flags);
return ret;
-} /* TLan_EeReadByte */
+}
diff --git a/drivers/net/tlan.h b/drivers/net/tlan.h
index 3315ced774e2..5fc98a8e4889 100644
--- a/drivers/net/tlan.h
+++ b/drivers/net/tlan.h
@@ -20,8 +20,8 @@
********************************************************************/
-#include <asm/io.h>
-#include <asm/types.h>
+#include <linux/io.h>
+#include <linux/types.h>
#include <linux/netdevice.h>
@@ -40,8 +40,11 @@
#define TLAN_IGNORE 0
#define TLAN_RECORD 1
-#define TLAN_DBG(lvl, format, args...) \
- do { if (debug&lvl) printk(KERN_DEBUG "TLAN: " format, ##args ); } while(0)
+#define TLAN_DBG(lvl, format, args...) \
+ do { \
+ if (debug&lvl) \
+ printk(KERN_DEBUG "TLAN: " format, ##args); \
+ } while (0)
#define TLAN_DEBUG_GNRL 0x0001
#define TLAN_DEBUG_TX 0x0002
@@ -50,7 +53,8 @@
#define TLAN_DEBUG_PROBE 0x0010
#define TX_TIMEOUT (10*HZ) /* We need time for auto-neg */
-#define MAX_TLAN_BOARDS 8 /* Max number of boards installed at a time */
+#define MAX_TLAN_BOARDS 8 /* Max number of boards installed
+ at a time */
/*****************************************************************
@@ -70,13 +74,13 @@
#define PCI_DEVICE_ID_OLICOM_OC2326 0x0014
#endif
-typedef struct tlan_adapter_entry {
- u16 vendorId;
- u16 deviceId;
- char *deviceLabel;
+struct tlan_adapter_entry {
+ u16 vendor_id;
+ u16 device_id;
+ char *device_label;
u32 flags;
- u16 addrOfs;
-} TLanAdapterEntry;
+ u16 addr_ofs;
+};
#define TLAN_ADAPTER_NONE 0x00000000
#define TLAN_ADAPTER_UNMANAGED_PHY 0x00000001
@@ -129,18 +133,18 @@ typedef struct tlan_adapter_entry {
#define TLAN_CSTAT_DP_PR 0x0100
-typedef struct tlan_buffer_ref_tag {
+struct tlan_buffer {
u32 count;
u32 address;
-} TLanBufferRef;
+};
-typedef struct tlan_list_tag {
+struct tlan_list {
u32 forward;
- u16 cStat;
- u16 frameSize;
- TLanBufferRef buffer[TLAN_BUFFERS_PER_LIST];
-} TLanList;
+ u16 c_stat;
+ u16 frame_size;
+ struct tlan_buffer buffer[TLAN_BUFFERS_PER_LIST];
+};
typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE];
@@ -164,49 +168,49 @@ typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE];
*
****************************************************************/
-typedef struct tlan_private_tag {
- struct net_device *nextDevice;
- struct pci_dev *pciDev;
+struct tlan_priv {
+ struct net_device *next_device;
+ struct pci_dev *pci_dev;
struct net_device *dev;
- void *dmaStorage;
- dma_addr_t dmaStorageDMA;
- unsigned int dmaSize;
- u8 *padBuffer;
- TLanList *rxList;
- dma_addr_t rxListDMA;
- u8 *rxBuffer;
- dma_addr_t rxBufferDMA;
- u32 rxHead;
- u32 rxTail;
- u32 rxEocCount;
- TLanList *txList;
- dma_addr_t txListDMA;
- u8 *txBuffer;
- dma_addr_t txBufferDMA;
- u32 txHead;
- u32 txInProgress;
- u32 txTail;
- u32 txBusyCount;
- u32 phyOnline;
- u32 timerSetAt;
- u32 timerType;
+ void *dma_storage;
+ dma_addr_t dma_storage_dma;
+ unsigned int dma_size;
+ u8 *pad_buffer;
+ struct tlan_list *rx_list;
+ dma_addr_t rx_list_dma;
+ u8 *rx_buffer;
+ dma_addr_t rx_buffer_dma;
+ u32 rx_head;
+ u32 rx_tail;
+ u32 rx_eoc_count;
+ struct tlan_list *tx_list;
+ dma_addr_t tx_list_dma;
+ u8 *tx_buffer;
+ dma_addr_t tx_buffer_dma;
+ u32 tx_head;
+ u32 tx_in_progress;
+ u32 tx_tail;
+ u32 tx_busy_count;
+ u32 phy_online;
+ u32 timer_set_at;
+ u32 timer_type;
struct timer_list timer;
struct board *adapter;
- u32 adapterRev;
+ u32 adapter_rev;
u32 aui;
u32 debug;
u32 duplex;
u32 phy[2];
- u32 phyNum;
+ u32 phy_num;
u32 speed;
- u8 tlanRev;
- u8 tlanFullDuplex;
+ u8 tlan_rev;
+ u8 tlan_full_duplex;
spinlock_t lock;
u8 link;
u8 is_eisa;
struct work_struct tlan_tqueue;
u8 neg_be_verbose;
-} TLanPrivateInfo;
+};
@@ -247,7 +251,7 @@ typedef struct tlan_private_tag {
****************************************************************/
#define TLAN_HOST_CMD 0x00
-#define TLAN_HC_GO 0x80000000
+#define TLAN_HC_GO 0x80000000
#define TLAN_HC_STOP 0x40000000
#define TLAN_HC_ACK 0x20000000
#define TLAN_HC_CS_MASK 0x1FE00000
@@ -283,7 +287,7 @@ typedef struct tlan_private_tag {
#define TLAN_NET_CMD_TRFRAM 0x02
#define TLAN_NET_CMD_TXPACE 0x01
#define TLAN_NET_SIO 0x01
-#define TLAN_NET_SIO_MINTEN 0x80
+#define TLAN_NET_SIO_MINTEN 0x80
#define TLAN_NET_SIO_ECLOK 0x40
#define TLAN_NET_SIO_ETXEN 0x20
#define TLAN_NET_SIO_EDATA 0x10
@@ -304,7 +308,7 @@ typedef struct tlan_private_tag {
#define TLAN_NET_MASK_MASK4 0x10
#define TLAN_NET_MASK_RSRVD 0x0F
#define TLAN_NET_CONFIG 0x04
-#define TLAN_NET_CFG_RCLK 0x8000
+#define TLAN_NET_CFG_RCLK 0x8000
#define TLAN_NET_CFG_TCLK 0x4000
#define TLAN_NET_CFG_BIT 0x2000
#define TLAN_NET_CFG_RXCRC 0x1000
@@ -372,7 +376,7 @@ typedef struct tlan_private_tag {
/* Generic MII/PHY Registers */
#define MII_GEN_CTL 0x00
-#define MII_GC_RESET 0x8000
+#define MII_GC_RESET 0x8000
#define MII_GC_LOOPBK 0x4000
#define MII_GC_SPEEDSEL 0x2000
#define MII_GC_AUTOENB 0x1000
@@ -397,9 +401,9 @@ typedef struct tlan_private_tag {
#define MII_GS_EXTCAP 0x0001
#define MII_GEN_ID_HI 0x02
#define MII_GEN_ID_LO 0x03
-#define MII_GIL_OUI 0xFC00
-#define MII_GIL_MODEL 0x03F0
-#define MII_GIL_REVISION 0x000F
+#define MII_GIL_OUI 0xFC00
+#define MII_GIL_MODEL 0x03F0
+#define MII_GIL_REVISION 0x000F
#define MII_AN_ADV 0x04
#define MII_AN_LPA 0x05
#define MII_AN_EXP 0x06
@@ -408,7 +412,7 @@ typedef struct tlan_private_tag {
#define TLAN_TLPHY_ID 0x10
#define TLAN_TLPHY_CTL 0x11
-#define TLAN_TC_IGLINK 0x8000
+#define TLAN_TC_IGLINK 0x8000
#define TLAN_TC_SWAPOL 0x4000
#define TLAN_TC_AUISEL 0x2000
#define TLAN_TC_SQEEN 0x1000
@@ -435,41 +439,41 @@ typedef struct tlan_private_tag {
#define LEVEL1_ID1 0x7810
#define LEVEL1_ID2 0x0000
-#define CIRC_INC( a, b ) if ( ++a >= b ) a = 0
+#define CIRC_INC(a, b) if (++a >= b) a = 0
/* Routines to access internal registers. */
-static inline u8 TLan_DioRead8(u16 base_addr, u16 internal_addr)
+static inline u8 tlan_dio_read8(u16 base_addr, u16 internal_addr)
{
outw(internal_addr, base_addr + TLAN_DIO_ADR);
return inb((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x3));
-} /* TLan_DioRead8 */
+}
-static inline u16 TLan_DioRead16(u16 base_addr, u16 internal_addr)
+static inline u16 tlan_dio_read16(u16 base_addr, u16 internal_addr)
{
outw(internal_addr, base_addr + TLAN_DIO_ADR);
return inw((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x2));
-} /* TLan_DioRead16 */
+}
-static inline u32 TLan_DioRead32(u16 base_addr, u16 internal_addr)
+static inline u32 tlan_dio_read32(u16 base_addr, u16 internal_addr)
{
outw(internal_addr, base_addr + TLAN_DIO_ADR);
return inl(base_addr + TLAN_DIO_DATA);
-} /* TLan_DioRead32 */
+}
-static inline void TLan_DioWrite8(u16 base_addr, u16 internal_addr, u8 data)
+static inline void tlan_dio_write8(u16 base_addr, u16 internal_addr, u8 data)
{
outw(internal_addr, base_addr + TLAN_DIO_ADR);
outb(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x3));
@@ -479,7 +483,7 @@ static inline void TLan_DioWrite8(u16 base_addr, u16 internal_addr, u8 data)
-static inline void TLan_DioWrite16(u16 base_addr, u16 internal_addr, u16 data)
+static inline void tlan_dio_write16(u16 base_addr, u16 internal_addr, u16 data)
{
outw(internal_addr, base_addr + TLAN_DIO_ADR);
outw(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
@@ -489,16 +493,16 @@ static inline void TLan_DioWrite16(u16 base_addr, u16 internal_addr, u16 data)
-static inline void TLan_DioWrite32(u16 base_addr, u16 internal_addr, u32 data)
+static inline void tlan_dio_write32(u16 base_addr, u16 internal_addr, u32 data)
{
outw(internal_addr, base_addr + TLAN_DIO_ADR);
outl(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
}
-#define TLan_ClearBit( bit, port ) outb_p(inb_p(port) & ~bit, port)
-#define TLan_GetBit( bit, port ) ((int) (inb_p(port) & bit))
-#define TLan_SetBit( bit, port ) outb_p(inb_p(port) | bit, port)
+#define tlan_clear_bit(bit, port) outb_p(inb_p(port) & ~bit, port)
+#define tlan_get_bit(bit, port) ((int) (inb_p(port) & bit))
+#define tlan_set_bit(bit, port) outb_p(inb_p(port) | bit, port)
/*
* given 6 bytes, view them as 8 6-bit numbers and return the XOR of those
@@ -506,37 +510,37 @@ static inline void TLan_DioWrite32(u16 base_addr, u16 internal_addr, u32 data)
*
* The original code was:
*
- * u32 xor( u32 a, u32 b ) { return ( ( a && ! b ) || ( ! a && b ) ); }
+ * u32 xor(u32 a, u32 b) { return ((a && !b ) || (! a && b )); }
*
- * #define XOR8( a, b, c, d, e, f, g, h ) \
- * xor( a, xor( b, xor( c, xor( d, xor( e, xor( f, xor( g, h ) ) ) ) ) ) )
- * #define DA( a, bit ) ( ( (u8) a[bit/8] ) & ( (u8) ( 1 << bit%8 ) ) )
+ * #define XOR8(a, b, c, d, e, f, g, h) \
+ * xor(a, xor(b, xor(c, xor(d, xor(e, xor(f, xor(g, h)) ) ) ) ) )
+ * #define DA(a, bit) (( (u8) a[bit/8] ) & ( (u8) (1 << bit%8)) )
*
- * hash = XOR8( DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24),
- * DA(a,30), DA(a,36), DA(a,42) );
- * hash |= XOR8( DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25),
- * DA(a,31), DA(a,37), DA(a,43) ) << 1;
- * hash |= XOR8( DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26),
- * DA(a,32), DA(a,38), DA(a,44) ) << 2;
- * hash |= XOR8( DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27),
- * DA(a,33), DA(a,39), DA(a,45) ) << 3;
- * hash |= XOR8( DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28),
- * DA(a,34), DA(a,40), DA(a,46) ) << 4;
- * hash |= XOR8( DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29),
- * DA(a,35), DA(a,41), DA(a,47) ) << 5;
+ * hash = XOR8(DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24),
+ * DA(a,30), DA(a,36), DA(a,42));
+ * hash |= XOR8(DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25),
+ * DA(a,31), DA(a,37), DA(a,43)) << 1;
+ * hash |= XOR8(DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26),
+ * DA(a,32), DA(a,38), DA(a,44)) << 2;
+ * hash |= XOR8(DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27),
+ * DA(a,33), DA(a,39), DA(a,45)) << 3;
+ * hash |= XOR8(DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28),
+ * DA(a,34), DA(a,40), DA(a,46)) << 4;
+ * hash |= XOR8(DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29),
+ * DA(a,35), DA(a,41), DA(a,47)) << 5;
*
*/
-static inline u32 TLan_HashFunc( const u8 *a )
+static inline u32 tlan_hash_func(const u8 *a)
{
- u8 hash;
+ u8 hash;
- hash = (a[0]^a[3]); /* & 077 */
- hash ^= ((a[0]^a[3])>>6); /* & 003 */
- hash ^= ((a[1]^a[4])<<2); /* & 074 */
- hash ^= ((a[1]^a[4])>>4); /* & 017 */
- hash ^= ((a[2]^a[5])<<4); /* & 060 */
- hash ^= ((a[2]^a[5])>>2); /* & 077 */
+ hash = (a[0]^a[3]); /* & 077 */
+ hash ^= ((a[0]^a[3])>>6); /* & 003 */
+ hash ^= ((a[1]^a[4])<<2); /* & 074 */
+ hash ^= ((a[1]^a[4])>>4); /* & 017 */
+ hash ^= ((a[2]^a[5])<<4); /* & 060 */
+ hash ^= ((a[2]^a[5])>>2); /* & 077 */
- return hash & 077;
+ return hash & 077;
}
#endif
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index b100bd50a0d7..55786a0efc41 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1142,7 +1142,7 @@ static int tun_get_iff(struct net *net, struct tun_struct *tun,
* privs required. */
static int set_offload(struct net_device *dev, unsigned long arg)
{
- unsigned int old_features, features;
+ u32 old_features, features;
old_features = dev->features;
/* Unset features, set them as we chew on the arg. */
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index a3c46f6a15e7..7fa5ec2de942 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -123,12 +123,11 @@ static const int multicast_filter_limit = 32;
#include <linux/in6.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
-#include <generated/utsrelease.h>
#include "typhoon.h"
MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
-MODULE_VERSION(UTS_RELEASE);
+MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FIRMWARE_NAME);
MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 715e7b47e7e9..ef041057d9d3 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -3740,7 +3740,7 @@ static const struct net_device_ops ucc_geth_netdev_ops = {
#endif
};
-static int ucc_geth_probe(struct platform_device* ofdev, const struct of_device_id *match)
+static int ucc_geth_probe(struct platform_device* ofdev)
{
struct device *device = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
@@ -3986,7 +3986,7 @@ static struct of_device_id ucc_geth_match[] = {
MODULE_DEVICE_TABLE(of, ucc_geth_match);
-static struct of_platform_driver ucc_geth_driver = {
+static struct platform_driver ucc_geth_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
@@ -4008,14 +4008,14 @@ static int __init ucc_geth_init(void)
memcpy(&(ugeth_info[i]), &ugeth_primary_info,
sizeof(ugeth_primary_info));
- ret = of_register_platform_driver(&ucc_geth_driver);
+ ret = platform_driver_register(&ucc_geth_driver);
return ret;
}
static void __exit ucc_geth_exit(void)
{
- of_unregister_platform_driver(&ucc_geth_driver);
+ platform_driver_unregister(&ucc_geth_driver);
}
module_init(ucc_geth_init);
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 02b622e3b9fb..5002f5be47be 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -651,6 +651,10 @@ static const struct usb_device_id products[] = {
.driver_info = (unsigned long)&dm9601_info,
},
{
+ USB_DEVICE(0x0fe6, 0x9700), /* DM9601 USB to Fast Ethernet Adapter */
+ .driver_info = (unsigned long)&dm9601_info,
+ },
+ {
USB_DEVICE(0x0a46, 0x9000), /* DM9000E */
.driver_info = (unsigned long)&dm9601_info,
},
diff --git a/drivers/net/vbus-enet.c b/drivers/net/vbus-enet.c
new file mode 100644
index 000000000000..94b86d482cee
--- /dev/null
+++ b/drivers/net/vbus-enet.c
@@ -0,0 +1,1560 @@
+/*
+ * vbus_enet - A virtualized 802.x network device based on the VBUS interface
+ *
+ * Copyright (C) 2009 Novell, Gregory Haskins <ghaskins@novell.com>
+ *
+ * Derived from the SNULL example from the book "Linux Device Drivers" by
+ * Alessandro Rubini, Jonathan Corbet, and Greg Kroah-Hartman, published
+ * by O'Reilly & Associates.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+
+#include <linux/in.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/ioq.h>
+#include <linux/vbus_driver.h>
+
+#include <linux/in6.h>
+#include <asm/checksum.h>
+
+#include <linux/venet.h>
+
+MODULE_AUTHOR("Gregory Haskins");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("virtual-ethernet");
+MODULE_VERSION("1");
+
+static int rx_ringlen = 256;
+module_param(rx_ringlen, int, 0444);
+static int tx_ringlen = 256;
+module_param(tx_ringlen, int, 0444);
+static int sg_enabled = 1;
+module_param(sg_enabled, int, 0444);
+
+#define PDEBUG(_dev, fmt, args...) dev_dbg(&(_dev)->dev, fmt, ## args)
+
+#define SG_DESC_SIZE VSG_DESC_SIZE(MAX_SKB_FRAGS)
+
+struct vbus_enet_queue {
+ struct ioq *queue;
+ struct ioq_notifier notifier;
+ unsigned long count;
+};
+
+struct vbus_enet_priv {
+ spinlock_t lock;
+ struct net_device *dev;
+ struct vbus_device_proxy *vdev;
+ struct napi_struct napi;
+ struct vbus_enet_queue rxq;
+ struct {
+ struct vbus_enet_queue veq;
+ struct tasklet_struct task;
+ struct sk_buff_head outstanding;
+ } tx;
+ bool sg;
+ struct {
+ bool enabled;
+ char *pool;
+ } pmtd; /* pre-mapped transmit descriptors */
+ struct {
+ bool enabled;
+ bool linkstate;
+ bool txc;
+ unsigned long evsize;
+ struct vbus_enet_queue veq;
+ struct tasklet_struct task;
+ char *pool;
+ } evq;
+ struct {
+ bool available;
+ char *pool;
+ struct vbus_enet_queue pageq;
+ } l4ro;
+
+ struct sk_buff *(*import)(struct vbus_enet_priv *priv,
+ struct ioq_ring_desc *desc);
+};
+
+static void vbus_enet_tx_reap(struct vbus_enet_priv *priv);
+
+static struct vbus_enet_priv *
+napi_to_priv(struct napi_struct *napi)
+{
+ return container_of(napi, struct vbus_enet_priv, napi);
+}
+
+static int
+queue_init(struct vbus_enet_priv *priv,
+ struct vbus_enet_queue *q,
+ const char *name,
+ int qid,
+ size_t ringsize,
+ void (*func)(struct ioq_notifier *))
+{
+ struct vbus_device_proxy *dev = priv->vdev;
+ int ret;
+ char _name[64];
+
+ if (name)
+ snprintf(_name, sizeof(_name), "%s-%s", priv->dev->name, name);
+
+ ret = vbus_driver_ioq_alloc(dev, name ? _name : NULL, qid, 0,
+ ringsize, &q->queue);
+ if (ret < 0)
+ panic("ioq_alloc failed: %d\n", ret);
+
+ if (func) {
+ q->notifier.signal = func;
+ q->queue->notifier = &q->notifier;
+ }
+
+ q->count = ringsize;
+
+ return 0;
+}
+
+static int
+devcall(struct vbus_enet_priv *priv, u32 func, void *data, size_t len)
+{
+ struct vbus_device_proxy *dev = priv->vdev;
+
+ return dev->ops->call(dev, func, data, len, 0);
+}
+
+/*
+ * ---------------
+ * rx descriptors
+ * ---------------
+ */
+
+static void
+rxdesc_alloc(struct vbus_enet_priv *priv, struct ioq_ring_desc *desc, size_t len)
+{
+ struct net_device *dev = priv->dev;
+ struct sk_buff *skb;
+
+ len += ETH_HLEN;
+
+ skb = netdev_alloc_skb(dev, len + NET_IP_ALIGN);
+ BUG_ON(!skb);
+
+ skb_reserve(skb, NET_IP_ALIGN); /* align IP on 16B boundary */
+
+ if (priv->l4ro.available) {
+ /*
+ * We will populate an SG descriptor initially with one
+ * IOV filled with an MTU SKB. If the packet needs to be
+ * larger than MTU, the host will grab pages out of the
+ * page-queue and populate additional IOVs
+ */
+ struct venet_sg *vsg = (struct venet_sg *)(unsigned long)desc->cookie;
+ struct venet_iov *iov = &vsg->iov[0];
+
+ memset(vsg, 0, SG_DESC_SIZE);
+
+ vsg->cookie = (u64)(unsigned long)skb;
+ vsg->count = 1;
+
+ iov->ptr = (u64)__pa(skb->data);
+ iov->len = len;
+ } else {
+ desc->cookie = (u64)(unsigned long)skb;
+ desc->ptr = cpu_to_le64(__pa(skb->data));
+ desc->len = cpu_to_le64(len); /* total length */
+ }
+
+ desc->valid = 1;
+}
+
+static void
+rx_pageq_refill(struct vbus_enet_priv *priv, gfp_t gfp_mask)
+{
+ struct ioq *ioq = priv->l4ro.pageq.queue;
+ struct ioq_iterator iter;
+ int ret, added = 0;
+
+ if (ioq_full(ioq, ioq_idxtype_inuse))
+ /* nothing to do if the pageq is already fully populated */
+ return;
+
+ ret = ioq_iter_init(ioq, &iter, ioq_idxtype_inuse, 0);
+ BUG_ON(ret < 0); /* will never fail unless seriously broken */
+
+ ret = ioq_iter_seek(&iter, ioq_seek_tail, 0, 0);
+ BUG_ON(ret < 0);
+
+ /*
+ * Now populate each descriptor with an empty page
+ */
+ while (!iter.desc->sown) {
+ struct page *page = NULL;
+
+ page = alloc_page(gfp_mask);
+
+ if (!page)
+ break;
+
+ added = 1;
+ iter.desc->cookie = (u64)(unsigned long)page;
+ iter.desc->ptr = cpu_to_le64(__pa(page_address(page)));
+ iter.desc->len = cpu_to_le64(PAGE_SIZE);
+
+ ret = ioq_iter_push(&iter, 0);
+ BUG_ON(ret < 0);
+ }
+
+ if (added)
+ ioq_signal(ioq, 0);
+}
+
+static void
+rx_setup(struct vbus_enet_priv *priv)
+{
+ struct ioq *ioq = priv->rxq.queue;
+ struct ioq_iterator iter;
+ int ret;
+ int i = 0;
+
+ /*
+ * We want to iterate on the "valid" index. By default the iterator
+ * will not "autoupdate" which means it will not hypercall the host
+ * with our changes. This is good, because we are really just
+ * initializing stuff here anyway. Note that you can always manually
+ * signal the host with ioq_signal() if the autoupdate feature is not
+ * used.
+ */
+ ret = ioq_iter_init(ioq, &iter, ioq_idxtype_valid, 0);
+ BUG_ON(ret < 0); /* will never fail unless seriously broken */
+
+ /*
+ * Seek to the tail of the valid index (which should be our first
+ * item, since the queue is brand-new)
+ */
+ ret = ioq_iter_seek(&iter, ioq_seek_tail, 0, 0);
+ BUG_ON(ret < 0);
+
+ /*
+ * Now populate each descriptor with an empty buffer and mark it valid
+ */
+ while (!iter.desc->valid) {
+ if (priv->l4ro.available) {
+ size_t offset = (i * SG_DESC_SIZE);
+ void *addr = &priv->l4ro.pool[offset];
+
+ iter.desc->ptr = cpu_to_le64(offset);
+ iter.desc->cookie = (u64)(unsigned long)addr;
+ iter.desc->len = cpu_to_le64(SG_DESC_SIZE);
+ }
+
+ rxdesc_alloc(priv, iter.desc, priv->dev->mtu);
+
+ /*
+ * This push operation will simultaneously advance the
+ * valid-head index and increment our position in the queue
+ * by one.
+ */
+ ret = ioq_iter_push(&iter, 0);
+ BUG_ON(ret < 0);
+
+ i++;
+ }
+
+ if (priv->l4ro.available)
+ rx_pageq_refill(priv, GFP_KERNEL);
+}
+
+static void
+rx_rxq_teardown(struct vbus_enet_priv *priv)
+{
+ struct ioq *ioq = priv->rxq.queue;
+ struct ioq_iterator iter;
+ int ret;
+
+ ret = ioq_iter_init(ioq, &iter, ioq_idxtype_valid, 0);
+ BUG_ON(ret < 0);
+
+ ret = ioq_iter_seek(&iter, ioq_seek_head, 0, 0);
+ BUG_ON(ret < 0);
+
+ /*
+ * free each valid descriptor
+ */
+ while (iter.desc->valid) {
+ struct sk_buff *skb;
+
+ if (priv->l4ro.available) {
+ struct venet_sg *vsg;
+ int i;
+
+ vsg = (struct venet_sg *)(unsigned long)iter.desc->cookie;
+
+ /* skip i=0, since that is the skb->data IOV */
+ for (i = 1; i < vsg->count; i++) {
+ struct venet_iov *iov = &vsg->iov[i];
+ struct page *page = (struct page *)(unsigned long)iov->ptr;
+
+ put_page(page);
+ }
+
+ skb = (struct sk_buff *)(unsigned long)vsg->cookie;
+ } else
+ skb = (struct sk_buff *)(unsigned long)iter.desc->cookie;
+
+ iter.desc->valid = 0;
+ wmb();
+
+ iter.desc->ptr = 0;
+ iter.desc->cookie = 0;
+
+ ret = ioq_iter_pop(&iter, 0);
+ BUG_ON(ret < 0);
+
+ dev_kfree_skb(skb);
+ }
+}
+
+static void
+rx_l4ro_teardown(struct vbus_enet_priv *priv)
+{
+ struct ioq *ioq = priv->l4ro.pageq.queue;
+ struct ioq_iterator iter;
+ int ret;
+
+ ret = ioq_iter_init(ioq, &iter, ioq_idxtype_inuse, 0);
+ BUG_ON(ret < 0);
+
+ ret = ioq_iter_seek(&iter, ioq_seek_head, 0, 0);
+ BUG_ON(ret < 0);
+
+ /*
+ * free each valid descriptor
+ */
+ while (iter.desc->sown) {
+ struct page *page = (struct page *)(unsigned long)iter.desc->cookie;
+
+ iter.desc->valid = 0;
+ wmb();
+
+ iter.desc->ptr = 0;
+ iter.desc->cookie = 0;
+
+ ret = ioq_iter_pop(&iter, 0);
+ BUG_ON(ret < 0);
+
+ put_page(page);
+ }
+
+ ioq_put(ioq);
+ kfree(priv->l4ro.pool);
+}
+
+static void
+rx_teardown(struct vbus_enet_priv *priv)
+{
+ rx_rxq_teardown(priv);
+
+ if (priv->l4ro.available)
+ rx_l4ro_teardown(priv);
+}
+
+static int
+tx_setup(struct vbus_enet_priv *priv)
+{
+ struct ioq *ioq = priv->tx.veq.queue;
+ struct ioq_iterator iter;
+ int i;
+ int ret;
+
+ if (!priv->sg)
+ /*
+ * There is nothing to do for a ring that is not using
+ * scatter-gather
+ */
+ return 0;
+
+ /* pre-allocate our descriptor pool if pmtd is enabled */
+ if (priv->pmtd.enabled) {
+ struct vbus_device_proxy *dev = priv->vdev;
+ size_t poollen = SG_DESC_SIZE * priv->tx.veq.count;
+ char *pool;
+ int shmid;
+
+ /* pmtdquery will return the shm-id to use for the pool */
+ ret = devcall(priv, VENET_FUNC_PMTDQUERY, NULL, 0);
+ BUG_ON(ret < 0);
+
+ shmid = ret;
+
+ pool = kzalloc(poollen, GFP_KERNEL | GFP_DMA);
+ if (!pool)
+ return -ENOMEM;
+
+ priv->pmtd.pool = pool;
+
+ ret = dev->ops->shm(dev, NULL, shmid, 0, pool, poollen,
+ NULL, NULL, 0);
+ BUG_ON(ret < 0);
+ }
+
+ ret = ioq_iter_init(ioq, &iter, ioq_idxtype_valid, 0);
+ BUG_ON(ret < 0);
+
+ ret = ioq_iter_seek(&iter, ioq_seek_set, 0, 0);
+ BUG_ON(ret < 0);
+
+ /*
+ * Now populate each descriptor with an empty SG descriptor
+ */
+ for (i = 0; i < priv->tx.veq.count; i++) {
+ struct venet_sg *vsg;
+
+ if (priv->pmtd.enabled) {
+ size_t offset = (i * SG_DESC_SIZE);
+
+ vsg = (struct venet_sg *)&priv->pmtd.pool[offset];
+ iter.desc->ptr = cpu_to_le64(offset);
+ } else {
+ vsg = kzalloc(SG_DESC_SIZE, GFP_KERNEL);
+ if (!vsg)
+ return -ENOMEM;
+
+ iter.desc->ptr = cpu_to_le64(__pa(vsg));
+ }
+
+ iter.desc->cookie = (u64)(unsigned long)vsg;
+ iter.desc->len = cpu_to_le64(SG_DESC_SIZE);
+
+ ret = ioq_iter_seek(&iter, ioq_seek_next, 0, 0);
+ BUG_ON(ret < 0);
+ }
+
+ return 0;
+}
+
+static void
+tx_teardown(struct vbus_enet_priv *priv)
+{
+ struct ioq *ioq = priv->tx.veq.queue;
+ struct ioq_iterator iter;
+ struct sk_buff *skb;
+ int ret;
+
+ /* forcefully free all outstanding transmissions */
+ while ((skb = __skb_dequeue(&priv->tx.outstanding)))
+ dev_kfree_skb(skb);
+
+ if (!priv->sg)
+ /*
+ * There is nothing else to do for a ring that is not using
+ * scatter-gather
+ */
+ return;
+
+ if (priv->pmtd.enabled) {
+ /*
+ * PMTD mode means we only need to free the pool
+ */
+ kfree(priv->pmtd.pool);
+ return;
+ }
+
+ ret = ioq_iter_init(ioq, &iter, ioq_idxtype_valid, 0);
+ BUG_ON(ret < 0);
+
+ /* seek to position 0 */
+ ret = ioq_iter_seek(&iter, ioq_seek_set, 0, 0);
+ BUG_ON(ret < 0);
+
+ /*
+ * free each valid descriptor
+ */
+ while (iter.desc->cookie) {
+ struct venet_sg *vsg = (struct venet_sg *)(unsigned long)iter.desc->cookie;
+
+ iter.desc->valid = 0;
+ wmb();
+
+ iter.desc->ptr = 0;
+ iter.desc->cookie = 0;
+
+ ret = ioq_iter_seek(&iter, ioq_seek_next, 0, 0);
+ BUG_ON(ret < 0);
+
+ kfree(vsg);
+ }
+}
+
+static void
+evq_teardown(struct vbus_enet_priv *priv)
+{
+ if (!priv->evq.enabled)
+ return;
+
+ ioq_put(priv->evq.veq.queue);
+ kfree(priv->evq.pool);
+}
+
+/*
+ * Open and close
+ */
+
+static int
+vbus_enet_open(struct net_device *dev)
+{
+ struct vbus_enet_priv *priv = netdev_priv(dev);
+ int ret;
+
+ ret = devcall(priv, VENET_FUNC_LINKUP, NULL, 0);
+ BUG_ON(ret < 0);
+
+ napi_enable(&priv->napi);
+
+ return 0;
+}
+
+static int
+vbus_enet_stop(struct net_device *dev)
+{
+ struct vbus_enet_priv *priv = netdev_priv(dev);
+ int ret;
+
+ napi_disable(&priv->napi);
+
+ ret = devcall(priv, VENET_FUNC_LINKDOWN, NULL, 0);
+ BUG_ON(ret < 0);
+
+ return 0;
+}
+
+/*
+ * Configuration changes (passed on by ifconfig)
+ */
+static int
+vbus_enet_config(struct net_device *dev, struct ifmap *map)
+{
+ if (dev->flags & IFF_UP) /* can't act on a running interface */
+ return -EBUSY;
+
+ /* Don't allow changing the I/O address */
+ if (map->base_addr != dev->base_addr) {
+ dev_warn(&dev->dev, "Can't change I/O address\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* ignore other fields */
+ return 0;
+}
+
+static void
+vbus_enet_schedule_rx(struct vbus_enet_priv *priv)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (napi_schedule_prep(&priv->napi)) {
+ /* Disable further interrupts */
+ ioq_notify_disable(priv->rxq.queue, 0);
+ __napi_schedule(&priv->napi);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static int
+vbus_enet_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct vbus_enet_priv *priv = netdev_priv(dev);
+ int ret;
+
+ dev->mtu = new_mtu;
+
+ /*
+ * FLUSHRX will cause the device to flush any outstanding
+ * RX buffers. They will appear to come in as 0 length
+ * packets which we can simply discard and replace with new_mtu
+ * buffers for the future.
+ */
+ ret = devcall(priv, VENET_FUNC_FLUSHRX, NULL, 0);
+ BUG_ON(ret < 0);
+
+ vbus_enet_schedule_rx(priv);
+
+ return 0;
+}
+
+static struct sk_buff *
+vbus_enet_l4ro_import(struct vbus_enet_priv *priv, struct ioq_ring_desc *desc)
+{
+ struct venet_sg *vsg = (struct venet_sg *)(unsigned long)desc->cookie;
+ struct sk_buff *skb = (struct sk_buff *)(unsigned long)vsg->cookie;
+ struct skb_shared_info *sinfo = skb_shinfo(skb);
+ int i;
+
+ rx_pageq_refill(priv, GFP_ATOMIC);
+
+ if (!vsg->len)
+ /*
+ * the device may send a zero-length packet when its
+ * flushing references on the ring. We can just drop
+ * these on the floor
+ */
+ goto fail;
+
+ /* advance only by the linear portion in IOV[0] */
+ skb_put(skb, vsg->iov[0].len);
+
+ /* skip i=0, since that is the skb->data IOV */
+ for (i = 1; i < vsg->count; i++) {
+ struct venet_iov *iov = &vsg->iov[i];
+ struct page *page = (struct page *)(unsigned long)iov->ptr;
+ skb_frag_t *f = &sinfo->frags[i-1];
+
+ f->page = page;
+ f->page_offset = 0;
+ f->size = iov->len;
+
+ PDEBUG(priv->dev, "SG: Importing %d byte page[%i]\n",
+ f->size, i);
+
+ skb->data_len += f->size;
+ skb->len += f->size;
+ skb->truesize += f->size;
+ sinfo->nr_frags++;
+ }
+
+ if (vsg->flags & VENET_SG_FLAG_NEEDS_CSUM
+ && !skb_partial_csum_set(skb, vsg->csum.start,
+ vsg->csum.offset)) {
+ priv->dev->stats.rx_frame_errors++;
+ goto fail;
+ }
+
+ if (vsg->flags & VENET_SG_FLAG_GSO) {
+ PDEBUG(priv->dev, "L4RO packet detected\n");
+
+ switch (vsg->gso.type) {
+ case VENET_GSO_TYPE_TCPV4:
+ sinfo->gso_type = SKB_GSO_TCPV4;
+ break;
+ case VENET_GSO_TYPE_TCPV6:
+ sinfo->gso_type = SKB_GSO_TCPV6;
+ break;
+ case VENET_GSO_TYPE_UDP:
+ sinfo->gso_type = SKB_GSO_UDP;
+ break;
+ default:
+ PDEBUG(priv->dev, "Illegal L4RO type: %d\n",
+ vsg->gso.type);
+ priv->dev->stats.rx_frame_errors++;
+ goto fail;
+ }
+
+ if (vsg->flags & VENET_SG_FLAG_ECN)
+ sinfo->gso_type |= SKB_GSO_TCP_ECN;
+
+ sinfo->gso_size = vsg->gso.size;
+ if (sinfo->gso_size == 0) {
+ PDEBUG(priv->dev, "Illegal L4RO size: %d\n",
+ vsg->gso.size);
+ priv->dev->stats.rx_frame_errors++;
+ goto fail;
+ }
+
+ /*
+ * Header must be checked, and gso_segs
+ * computed.
+ */
+ sinfo->gso_type |= SKB_GSO_DODGY;
+ sinfo->gso_segs = 0;
+ }
+
+ return skb;
+
+fail:
+ dev_kfree_skb(skb);
+
+ return NULL;
+}
+
+static struct sk_buff *
+vbus_enet_flat_import(struct vbus_enet_priv *priv, struct ioq_ring_desc *desc)
+{
+ struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->cookie;
+
+ if (!desc->len) {
+ /*
+ * the device may send a zero-length packet when its
+ * flushing references on the ring. We can just drop
+ * these on the floor
+ */
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+
+ skb_put(skb, le64_to_cpu(desc->len));
+
+ return skb;
+}
+
+/*
+ * The poll implementation.
+ */
+static int
+vbus_enet_poll(struct napi_struct *napi, int budget)
+{
+ struct vbus_enet_priv *priv = napi_to_priv(napi);
+ int npackets = 0;
+ struct ioq_iterator iter;
+ int ret;
+
+ PDEBUG(priv->dev, "polling...\n");
+
+ /* We want to iterate on the head of the in-use index */
+ ret = ioq_iter_init(priv->rxq.queue, &iter, ioq_idxtype_inuse,
+ IOQ_ITER_AUTOUPDATE);
+ BUG_ON(ret < 0);
+
+ ret = ioq_iter_seek(&iter, ioq_seek_head, 0, 0);
+ BUG_ON(ret < 0);
+
+ /*
+ * We stop if we have met the quota or there are no more packets.
+ * The EOM is indicated by finding a packet that is still owned by
+ * the south side
+ */
+ while ((npackets < budget) && (!iter.desc->sown)) {
+ struct sk_buff *skb;
+
+ skb = priv->import(priv, iter.desc);
+ if (skb) {
+ /* Maintain stats */
+ npackets++;
+ priv->dev->stats.rx_packets++;
+ priv->dev->stats.rx_bytes += skb->len;
+
+ /* Pass the buffer up to the stack */
+ skb->dev = priv->dev;
+ skb->protocol = eth_type_trans(skb, priv->dev);
+ netif_receive_skb(skb);
+
+ mb();
+ }
+
+ /* Grab a new buffer to put in the ring */
+ rxdesc_alloc(priv, iter.desc, priv->dev->mtu);
+
+ /* Advance the in-use tail */
+ ret = ioq_iter_pop(&iter, 0);
+ BUG_ON(ret < 0);
+ }
+
+ PDEBUG(priv->dev, "%d packets received\n", npackets);
+
+ /*
+ * If we processed all packets, we're done; tell the kernel and
+ * reenable ints
+ */
+ if (ioq_empty(priv->rxq.queue, ioq_idxtype_inuse)) {
+ napi_complete(napi);
+ ioq_notify_enable(priv->rxq.queue, 0);
+ ret = 0;
+ } else
+ /* We couldn't process everything. */
+ ret = 1;
+
+ return ret;
+}
+
+/*
+ * Transmit a packet (called by the kernel)
+ */
+static int
+vbus_enet_tx_start(struct sk_buff *skb, struct net_device *dev)
+{
+ struct vbus_enet_priv *priv = netdev_priv(dev);
+ struct ioq_iterator iter;
+ int ret;
+ unsigned long flags;
+
+ PDEBUG(priv->dev, "sending %d bytes\n", skb->len);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (ioq_full(priv->tx.veq.queue, ioq_idxtype_valid)) {
+ /*
+ * We must flow-control the kernel by disabling the
+ * queue
+ */
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_stop_queue(dev);
+ dev_err(&priv->dev->dev, "tx on full queue bug\n");
+ return 1;
+ }
+
+ /*
+ * We want to iterate on the tail of both the "inuse" and "valid" index
+ * so we specify the "both" index
+ */
+ ret = ioq_iter_init(priv->tx.veq.queue, &iter, ioq_idxtype_both,
+ IOQ_ITER_AUTOUPDATE);
+ BUG_ON(ret < 0);
+
+ ret = ioq_iter_seek(&iter, ioq_seek_tail, 0, 0);
+ BUG_ON(ret < 0);
+ BUG_ON(iter.desc->sown);
+
+ if (priv->sg) {
+ struct venet_sg *vsg = (struct venet_sg *)(unsigned long)iter.desc->cookie;
+ struct scatterlist sgl[MAX_SKB_FRAGS+1];
+ struct scatterlist *sg;
+ int count, maxcount = ARRAY_SIZE(sgl);
+
+ sg_init_table(sgl, maxcount);
+
+ memset(vsg, 0, sizeof(*vsg));
+
+ vsg->cookie = (u64)(unsigned long)skb;
+ vsg->len = skb->len;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ vsg->flags |= VENET_SG_FLAG_NEEDS_CSUM;
+ vsg->csum.start = skb->csum_start - skb_headroom(skb);
+ vsg->csum.offset = skb->csum_offset;
+ }
+
+ if (skb_is_gso(skb)) {
+ struct skb_shared_info *sinfo = skb_shinfo(skb);
+
+ vsg->flags |= VENET_SG_FLAG_GSO;
+
+ vsg->gso.hdrlen = skb_headlen(skb);
+ vsg->gso.size = sinfo->gso_size;
+ if (sinfo->gso_type & SKB_GSO_TCPV4)
+ vsg->gso.type = VENET_GSO_TYPE_TCPV4;
+ else if (sinfo->gso_type & SKB_GSO_TCPV6)
+ vsg->gso.type = VENET_GSO_TYPE_TCPV6;
+ else if (sinfo->gso_type & SKB_GSO_UDP)
+ vsg->gso.type = VENET_GSO_TYPE_UDP;
+ else
+ panic("Virtual-Ethernet: unknown GSO type " \
+ "0x%x\n", sinfo->gso_type);
+
+ if (sinfo->gso_type & SKB_GSO_TCP_ECN)
+ vsg->flags |= VENET_SG_FLAG_ECN;
+ }
+
+ count = skb_to_sgvec(skb, sgl, 0, skb->len);
+
+ BUG_ON(count > maxcount);
+
+ for (sg = &sgl[0]; sg; sg = sg_next(sg)) {
+ struct venet_iov *iov = &vsg->iov[vsg->count++];
+
+ iov->len = sg->length;
+ iov->ptr = (u64)sg_phys(sg);
+ }
+
+ iter.desc->len = cpu_to_le64(VSG_DESC_SIZE(vsg->count));
+
+ } else {
+ /*
+ * non scatter-gather mode: simply put the skb right onto the
+ * ring.
+ */
+ iter.desc->cookie = (u64)(unsigned long)skb;
+ iter.desc->len = cpu_to_le64(skb->len);
+ iter.desc->ptr = cpu_to_le64(__pa(skb->data));
+ }
+
+ iter.desc->valid = 1;
+
+ priv->dev->stats.tx_packets++;
+ priv->dev->stats.tx_bytes += skb->len;
+
+ skb_queue_tail(&priv->tx.outstanding, skb);
+
+ /*
+ * This advances both indexes together implicitly, and then
+ * signals the south side to consume the packet
+ */
+ ret = ioq_iter_push(&iter, 0);
+ BUG_ON(ret < 0);
+
+ dev->trans_start = jiffies; /* save the timestamp */
+
+ if (ioq_full(priv->tx.veq.queue, ioq_idxtype_valid)) {
+ /*
+ * If the queue is congested, we must flow-control the kernel
+ */
+ PDEBUG(priv->dev, "backpressure tx queue\n");
+ netif_stop_queue(dev);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+/* assumes priv->lock held */
+static void
+vbus_enet_skb_complete(struct vbus_enet_priv *priv, struct sk_buff *skb)
+{
+ PDEBUG(priv->dev, "completed sending %d bytes\n",
+ skb->len);
+
+ skb_unlink(skb, &priv->tx.outstanding);
+ dev_kfree_skb(skb);
+}
+
+/*
+ * reclaim any outstanding completed tx packets
+ *
+ * assumes priv->lock held
+ */
+static struct sk_buff *
+vbus_enet_tx_reap_one(struct vbus_enet_priv *priv)
+{
+ struct sk_buff *skb = NULL;
+ struct ioq_iterator iter;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /*
+ * We want to iterate on the head of the valid index, but we
+ * do not want the iter_pop (below) to flip the ownership, so
+ * we set the NOFLIPOWNER option
+ */
+ ret = ioq_iter_init(priv->tx.veq.queue, &iter, ioq_idxtype_valid,
+ IOQ_ITER_NOFLIPOWNER);
+ BUG_ON(ret < 0);
+
+ ret = ioq_iter_seek(&iter, ioq_seek_head, 0, 0);
+ BUG_ON(ret < 0);
+
+ if (iter.desc->valid && !iter.desc->sown) {
+
+ if (priv->sg) {
+ struct venet_sg *vsg;
+
+ vsg = (struct venet_sg *)(unsigned long)iter.desc->cookie;
+ skb = (struct sk_buff *)(unsigned long)vsg->cookie;
+ } else
+ skb = (struct sk_buff *)(unsigned long)iter.desc->cookie;
+
+ /* Reset the descriptor */
+ iter.desc->valid = 0;
+
+ /* Advance the valid-index head */
+ ret = ioq_iter_pop(&iter, 0);
+ BUG_ON(ret < 0);
+ }
+
+ /*
+ * If we were previously stopped due to flow control, restart the
+ * processing
+ */
+ if (netif_queue_stopped(priv->dev)
+ && !ioq_full(priv->tx.veq.queue, ioq_idxtype_valid)) {
+ PDEBUG(priv->dev, "re-enabling tx queue\n");
+ netif_wake_queue(priv->dev);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return skb;
+}
+
+static void
+vbus_enet_tx_reap(struct vbus_enet_priv *priv)
+{
+ struct sk_buff *skb;
+
+ while ((skb = vbus_enet_tx_reap_one(priv))) {
+ if (!priv->evq.txc)
+ /*
+ * We are responsible for freeing the packet upon
+ * reap if TXC is not enabled
+ */
+ vbus_enet_skb_complete(priv, skb);
+ }
+}
+
+static void
+vbus_enet_timeout(struct net_device *dev)
+{
+ struct vbus_enet_priv *priv = netdev_priv(dev);
+
+ dev_dbg(&dev->dev, "Transmit timeout\n");
+
+ vbus_enet_tx_reap(priv);
+}
+
+static void
+rx_isr(struct ioq_notifier *notifier)
+{
+ struct vbus_enet_priv *priv;
+ struct net_device *dev;
+
+ priv = container_of(notifier, struct vbus_enet_priv, rxq.notifier);
+ dev = priv->dev;
+
+ if (!ioq_empty(priv->rxq.queue, ioq_idxtype_inuse))
+ vbus_enet_schedule_rx(priv);
+}
+
+static void
+deferred_tx_isr(unsigned long data)
+{
+ struct vbus_enet_priv *priv = (struct vbus_enet_priv *)data;
+
+ PDEBUG(priv->dev, "deferred_tx_isr\n");
+
+ vbus_enet_tx_reap(priv);
+
+ ioq_notify_enable(priv->tx.veq.queue, 0);
+}
+
+static void
+tx_isr(struct ioq_notifier *notifier)
+{
+ struct vbus_enet_priv *priv;
+
+ priv = container_of(notifier, struct vbus_enet_priv, tx.veq.notifier);
+
+ PDEBUG(priv->dev, "tx_isr\n");
+
+ ioq_notify_disable(priv->tx.veq.queue, 0);
+ tasklet_schedule(&priv->tx.task);
+}
+
+static void
+evq_linkstate_event(struct vbus_enet_priv *priv,
+ struct venet_event_header *header)
+{
+ struct venet_event_linkstate *event =
+ (struct venet_event_linkstate *)header;
+
+ switch (event->state) {
+ case 0:
+ netif_carrier_off(priv->dev);
+ break;
+ case 1:
+ netif_carrier_on(priv->dev);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+evq_txc_event(struct vbus_enet_priv *priv,
+ struct venet_event_header *header)
+{
+ struct venet_event_txc *event =
+ (struct venet_event_txc *)header;
+
+ vbus_enet_tx_reap(priv);
+
+ vbus_enet_skb_complete(priv, (struct sk_buff *)(unsigned long)event->cookie);
+}
+
+static void
+deferred_evq_isr(unsigned long data)
+{
+ struct vbus_enet_priv *priv = (struct vbus_enet_priv *)data;
+ int nevents = 0;
+ struct ioq_iterator iter;
+ int ret;
+
+ PDEBUG(priv->dev, "evq: polling...\n");
+
+ /* We want to iterate on the head of the in-use index */
+ ret = ioq_iter_init(priv->evq.veq.queue, &iter, ioq_idxtype_inuse,
+ IOQ_ITER_AUTOUPDATE);
+ BUG_ON(ret < 0);
+
+ ret = ioq_iter_seek(&iter, ioq_seek_head, 0, 0);
+ BUG_ON(ret < 0);
+
+ /*
+ * The EOM is indicated by finding a packet that is still owned by
+ * the south side
+ */
+ while (!iter.desc->sown) {
+ struct venet_event_header *header;
+
+ header = (struct venet_event_header *)(unsigned long)iter.desc->cookie;
+
+ switch (header->id) {
+ case VENET_EVENT_LINKSTATE:
+ evq_linkstate_event(priv, header);
+ break;
+ case VENET_EVENT_TXC:
+ evq_txc_event(priv, header);
+ break;
+ default:
+ panic("venet: unexpected event id:%d of size %d\n",
+ header->id, header->size);
+ break;
+ }
+
+ memset((void *)(unsigned long)iter.desc->cookie, 0, priv->evq.evsize);
+
+ /* Advance the in-use tail */
+ ret = ioq_iter_pop(&iter, 0);
+ BUG_ON(ret < 0);
+
+ nevents++;
+ }
+
+ PDEBUG(priv->dev, "%d events received\n", nevents);
+
+ ioq_notify_enable(priv->evq.veq.queue, 0);
+}
+
+static void
+evq_isr(struct ioq_notifier *notifier)
+{
+ struct vbus_enet_priv *priv;
+
+ priv = container_of(notifier, struct vbus_enet_priv, evq.veq.notifier);
+
+ PDEBUG(priv->dev, "evq_isr\n");
+
+ ioq_notify_disable(priv->evq.veq.queue, 0);
+ tasklet_schedule(&priv->evq.task);
+}
+
+static int
+vbus_enet_sg_negcap(struct vbus_enet_priv *priv)
+{
+ struct net_device *dev = priv->dev;
+ struct venet_capabilities caps;
+ int ret;
+
+ memset(&caps, 0, sizeof(caps));
+
+ if (sg_enabled) {
+ caps.gid = VENET_CAP_GROUP_SG;
+ caps.bits |= (VENET_CAP_SG|VENET_CAP_TSO4|VENET_CAP_TSO6
+ |VENET_CAP_ECN|VENET_CAP_PMTD);
+ /* note: exclude UFO for now due to stack bug */
+ }
+
+ ret = devcall(priv, VENET_FUNC_NEGCAP, &caps, sizeof(caps));
+ if (ret < 0)
+ return ret;
+
+ if (caps.bits & VENET_CAP_SG) {
+ priv->sg = true;
+
+ dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM|NETIF_F_FRAGLIST;
+
+ if (caps.bits & VENET_CAP_TSO4)
+ dev->features |= NETIF_F_TSO;
+ if (caps.bits & VENET_CAP_UFO)
+ dev->features |= NETIF_F_UFO;
+ if (caps.bits & VENET_CAP_TSO6)
+ dev->features |= NETIF_F_TSO6;
+ if (caps.bits & VENET_CAP_ECN)
+ dev->features |= NETIF_F_TSO_ECN;
+
+ if (caps.bits & VENET_CAP_PMTD)
+ priv->pmtd.enabled = true;
+ }
+
+ return 0;
+}
+
+static int
+vbus_enet_evq_negcap(struct vbus_enet_priv *priv, unsigned long count)
+{
+ struct venet_capabilities caps;
+ int ret;
+
+ memset(&caps, 0, sizeof(caps));
+
+ caps.gid = VENET_CAP_GROUP_EVENTQ;
+ caps.bits |= VENET_CAP_EVQ_LINKSTATE;
+ caps.bits |= VENET_CAP_EVQ_TXC;
+
+ ret = devcall(priv, VENET_FUNC_NEGCAP, &caps, sizeof(caps));
+ if (ret < 0)
+ return ret;
+
+ if (caps.bits) {
+ struct vbus_device_proxy *dev = priv->vdev;
+ struct venet_eventq_query query;
+ size_t poollen;
+ struct ioq_iterator iter;
+ char *pool;
+ int i;
+
+ priv->evq.enabled = true;
+
+ if (caps.bits & VENET_CAP_EVQ_LINKSTATE) {
+ /*
+ * We will assume there is no carrier until we get
+ * an event telling us otherwise
+ */
+ netif_carrier_off(priv->dev);
+ priv->evq.linkstate = true;
+ }
+
+ if (caps.bits & VENET_CAP_EVQ_TXC)
+ priv->evq.txc = true;
+
+ memset(&query, 0, sizeof(query));
+
+ ret = devcall(priv, VENET_FUNC_EVQQUERY, &query, sizeof(query));
+ if (ret < 0)
+ return ret;
+
+ priv->evq.evsize = query.evsize;
+ poollen = query.evsize * count;
+
+ pool = kzalloc(poollen, GFP_KERNEL | GFP_DMA);
+ if (!pool)
+ return -ENOMEM;
+
+ priv->evq.pool = pool;
+
+ ret = dev->ops->shm(dev, NULL, query.dpid, 0,
+ pool, poollen, NULL, NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ queue_init(priv, &priv->evq.veq, "evq",
+ query.qid, count, evq_isr);
+
+ ret = ioq_iter_init(priv->evq.veq.queue,
+ &iter, ioq_idxtype_valid, 0);
+ BUG_ON(ret < 0);
+
+ ret = ioq_iter_seek(&iter, ioq_seek_set, 0, 0);
+ BUG_ON(ret < 0);
+
+ /* Now populate each descriptor with an empty event */
+ for (i = 0; i < count; i++) {
+ size_t offset = (i * query.evsize);
+ void *addr = &priv->evq.pool[offset];
+
+ iter.desc->ptr = cpu_to_le64(offset);
+ iter.desc->cookie = (u64)(unsigned long)addr;
+ iter.desc->len = cpu_to_le64(query.evsize);
+
+ ret = ioq_iter_push(&iter, 0);
+ BUG_ON(ret < 0);
+ }
+
+ /* Finally, enable interrupts */
+ tasklet_init(&priv->evq.task, deferred_evq_isr,
+ (unsigned long)priv);
+ ioq_notify_enable(priv->evq.veq.queue, 0);
+ }
+
+ return 0;
+}
+
+static int
+vbus_enet_l4ro_negcap(struct vbus_enet_priv *priv, unsigned long count)
+{
+ struct venet_capabilities caps;
+ int ret;
+
+ memset(&caps, 0, sizeof(caps));
+
+ caps.gid = VENET_CAP_GROUP_L4RO;
+ caps.bits |= (VENET_CAP_SG|VENET_CAP_TSO4|VENET_CAP_TSO6
+ |VENET_CAP_ECN);
+
+ ret = devcall(priv, VENET_FUNC_NEGCAP, &caps, sizeof(caps));
+ if (ret < 0) {
+ printk(KERN_ERR "Error negotiating L4RO: %d\n", ret);
+ return ret;
+ }
+
+ if (caps.bits & VENET_CAP_SG) {
+ struct vbus_device_proxy *dev = priv->vdev;
+ size_t poollen = SG_DESC_SIZE * count;
+ struct venet_l4ro_query query;
+ char *pool;
+
+ memset(&query, 0, sizeof(query));
+
+ ret = devcall(priv, VENET_FUNC_L4ROQUERY, &query, sizeof(query));
+ if (ret < 0) {
+ printk(KERN_ERR "Error querying L4RO: %d\n", ret);
+ return ret;
+ }
+
+ pool = kzalloc(poollen, GFP_KERNEL | GFP_DMA);
+ if (!pool)
+ return -ENOMEM;
+
+ /*
+ * pre-mapped descriptor pool
+ */
+ ret = dev->ops->shm(dev, NULL, query.dpid, 0,
+ pool, poollen, NULL, NULL, 0);
+ if (ret < 0) {
+ printk(KERN_ERR "Error registering L4RO pool: %d\n",
+ ret);
+ kfree(pool);
+ return ret;
+ }
+
+ /*
+ * page-queue: contains a ring of arbitrary pages for
+ * consumption by the host for when the SG::IOV count exceeds
+ * one MTU frame. All we need to do is keep it populated
+ * with free pages.
+ */
+ queue_init(priv, &priv->l4ro.pageq, "pageq", query.pqid,
+ count, NULL);
+
+ priv->l4ro.pool = pool;
+ priv->l4ro.available = true;
+ }
+
+ return 0;
+}
+
+static int
+vbus_enet_negcap(struct vbus_enet_priv *priv)
+{
+ int ret;
+
+ ret = vbus_enet_sg_negcap(priv);
+ if (ret < 0)
+ return ret;
+
+ ret = vbus_enet_evq_negcap(priv, tx_ringlen);
+ if (ret < 0)
+ return ret;
+
+ ret = vbus_enet_l4ro_negcap(priv, rx_ringlen);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int vbus_enet_set_tx_csum(struct net_device *dev, u32 data)
+{
+ struct vbus_enet_priv *priv = netdev_priv(dev);
+
+ if (data && !priv->sg)
+ return -ENOSYS;
+
+ return ethtool_op_set_tx_hw_csum(dev, data);
+}
+
+static struct ethtool_ops vbus_enet_ethtool_ops = {
+ .set_tx_csum = vbus_enet_set_tx_csum,
+ .set_sg = ethtool_op_set_sg,
+ .set_tso = ethtool_op_set_tso,
+ .get_link = ethtool_op_get_link,
+};
+
+static const struct net_device_ops vbus_enet_netdev_ops = {
+ .ndo_open = vbus_enet_open,
+ .ndo_stop = vbus_enet_stop,
+ .ndo_set_config = vbus_enet_config,
+ .ndo_start_xmit = vbus_enet_tx_start,
+ .ndo_change_mtu = vbus_enet_change_mtu,
+ .ndo_tx_timeout = vbus_enet_timeout,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+/*
+ * This is called whenever a new vbus_device_proxy is added to the vbus
+ * with the matching VENET_ID
+ */
+static int
+vbus_enet_probe(struct vbus_device_proxy *vdev)
+{
+ struct net_device *dev;
+ struct vbus_enet_priv *priv;
+ int ret;
+
+ printk(KERN_INFO "VENET: Found new device at %lld\n", vdev->id);
+
+ ret = vdev->ops->open(vdev, VENET_VERSION, 0);
+ if (ret < 0)
+ return ret;
+
+ dev = alloc_etherdev(sizeof(struct vbus_enet_priv));
+ if (!dev)
+ return -ENOMEM;
+
+ /*
+ * establish our device-name early so we can incorporate it into
+ * the signal-path names, etc
+ */
+ rtnl_lock();
+
+ ret = dev_alloc_name(dev, dev->name);
+ if (ret < 0)
+ goto out_free;
+
+ priv = netdev_priv(dev);
+
+ spin_lock_init(&priv->lock);
+ priv->dev = dev;
+ priv->vdev = vdev;
+
+ ret = vbus_enet_negcap(priv);
+ if (ret < 0) {
+ printk(KERN_INFO "VENET: Error negotiating capabilities for " \
+ "%lld\n",
+ priv->vdev->id);
+ goto out_free;
+ }
+
+ if (priv->l4ro.available)
+ priv->import = &vbus_enet_l4ro_import;
+ else
+ priv->import = &vbus_enet_flat_import;
+
+ skb_queue_head_init(&priv->tx.outstanding);
+
+ queue_init(priv, &priv->rxq, "rx", VENET_QUEUE_RX, rx_ringlen,
+ rx_isr);
+ queue_init(priv, &priv->tx.veq, "tx", VENET_QUEUE_TX, tx_ringlen,
+ tx_isr);
+
+ rx_setup(priv);
+ tx_setup(priv);
+
+ ioq_notify_enable(priv->rxq.queue, 0); /* enable rx interrupts */
+
+ if (!priv->evq.txc) {
+ /*
+ * If the TXC feature is present, we will recieve our
+ * tx-complete notification via the event-channel. Therefore,
+ * we only enable txq interrupts if the TXC feature is not
+ * present.
+ */
+ tasklet_init(&priv->tx.task, deferred_tx_isr,
+ (unsigned long)priv);
+ ioq_notify_enable(priv->tx.veq.queue, 0);
+ }
+
+ dev->netdev_ops = &vbus_enet_netdev_ops;
+ dev->watchdog_timeo = 5 * HZ;
+ SET_ETHTOOL_OPS(dev, &vbus_enet_ethtool_ops);
+ SET_NETDEV_DEV(dev, &vdev->dev);
+
+ netif_napi_add(dev, &priv->napi, vbus_enet_poll, 128);
+
+ ret = devcall(priv, VENET_FUNC_MACQUERY, priv->dev->dev_addr, ETH_ALEN);
+ if (ret < 0) {
+ printk(KERN_INFO "VENET: Error obtaining MAC address for " \
+ "%lld\n",
+ priv->vdev->id);
+ goto out_free;
+ }
+
+ dev->features |= NETIF_F_HIGHDMA;
+
+ ret = register_netdevice(dev);
+ if (ret < 0) {
+ printk(KERN_INFO "VENET: error %i registering device \"%s\"\n",
+ ret, dev->name);
+ goto out_free;
+ }
+
+ rtnl_unlock();
+
+ vdev->priv = priv;
+
+ return 0;
+
+ out_free:
+ rtnl_unlock();
+
+ free_netdev(dev);
+
+ return ret;
+}
+
+static int
+vbus_enet_remove(struct vbus_device_proxy *vdev)
+{
+ struct vbus_enet_priv *priv = (struct vbus_enet_priv *)vdev->priv;
+ struct vbus_device_proxy *dev = priv->vdev;
+
+ unregister_netdev(priv->dev);
+ napi_disable(&priv->napi);
+
+ rx_teardown(priv);
+ ioq_put(priv->rxq.queue);
+
+ tx_teardown(priv);
+ ioq_put(priv->tx.veq.queue);
+
+ if (priv->evq.enabled)
+ evq_teardown(priv);
+
+ dev->ops->close(dev, 0);
+
+ free_netdev(priv->dev);
+
+ return 0;
+}
+
+/*
+ * Finally, the module stuff
+ */
+
+static struct vbus_driver_ops vbus_enet_driver_ops = {
+ .probe = vbus_enet_probe,
+ .remove = vbus_enet_remove,
+};
+
+static struct vbus_driver vbus_enet_driver = {
+ .type = VENET_TYPE,
+ .owner = THIS_MODULE,
+ .ops = &vbus_enet_driver_ops,
+};
+
+static __init int
+vbus_enet_init_module(void)
+{
+ printk(KERN_INFO "Virtual Ethernet: Copyright (C) 2009 Novell, Gregory Haskins\n");
+ printk(KERN_DEBUG "VENET: Using %d/%d queue depth\n",
+ rx_ringlen, tx_ringlen);
+ return vbus_driver_register(&vbus_enet_driver);
+}
+
+static __exit void
+vbus_enet_cleanup(void)
+{
+ vbus_driver_unregister(&vbus_enet_driver);
+}
+
+module_init(vbus_enet_init_module);
+module_exit(vbus_enet_cleanup);
+
+VBUS_DRIVER_AUTOPROBE(VENET_TYPE);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index cc83fa71c3ff..105d7f0630cc 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -403,17 +403,6 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
if (tb[IFLA_ADDRESS] == NULL)
random_ether_addr(dev->dev_addr);
- if (tb[IFLA_IFNAME])
- nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
- else
- snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
-
- if (strchr(dev->name, '%')) {
- err = dev_alloc_name(dev, dev->name);
- if (err < 0)
- goto err_alloc_name;
- }
-
err = register_netdevice(dev);
if (err < 0)
goto err_register_dev;
@@ -433,7 +422,6 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
err_register_dev:
/* nothing to do */
-err_alloc_name:
err_configure_peer:
unregister_netdevice(peer);
return err;
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 09cac704fdd7..0d6fec6b7d93 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -2923,6 +2923,7 @@ static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
static int velocity_set_wol(struct velocity_info *vptr)
{
struct mac_regs __iomem *regs = vptr->mac_regs;
+ enum speed_opt spd_dpx = vptr->options.spd_dpx;
static u8 buf[256];
int i;
@@ -2968,6 +2969,12 @@ static int velocity_set_wol(struct velocity_info *vptr)
writew(0x0FFF, &regs->WOLSRClr);
+ if (spd_dpx == SPD_DPX_1000_FULL)
+ goto mac_done;
+
+ if (spd_dpx != SPD_DPX_AUTO)
+ goto advertise_done;
+
if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
@@ -2978,6 +2985,7 @@ static int velocity_set_wol(struct velocity_info *vptr)
if (vptr->mii_status & VELOCITY_SPEED_1000)
MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
+advertise_done:
BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
{
@@ -2987,6 +2995,7 @@ static int velocity_set_wol(struct velocity_info *vptr)
writeb(GCR, &regs->CHIPGCR);
}
+mac_done:
BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
/* Turn on SWPTAG just before entering power mode */
BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index aa2e69b9ff61..d7227539484e 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -361,7 +361,7 @@ enum velocity_owner {
#define MAC_REG_CHIPGSR 0x9C
#define MAC_REG_TESTCFG 0x9D
#define MAC_REG_DEBUG 0x9E
-#define MAC_REG_CHIPGCR 0x9F
+#define MAC_REG_CHIPGCR 0x9F /* Chip Operation and Diagnostic Control */
#define MAC_REG_WOLCR0_SET 0xA0
#define MAC_REG_WOLCR1_SET 0xA1
#define MAC_REG_PWCFG_SET 0xA2
@@ -848,10 +848,10 @@ enum velocity_owner {
* Bits in CHIPGCR register
*/
-#define CHIPGCR_FCGMII 0x80 /* enable GMII mode */
-#define CHIPGCR_FCFDX 0x40
+#define CHIPGCR_FCGMII 0x80 /* force GMII (else MII only) */
+#define CHIPGCR_FCFDX 0x40 /* force full duplex */
#define CHIPGCR_FCRESV 0x20
-#define CHIPGCR_FCMODE 0x10
+#define CHIPGCR_FCMODE 0x10 /* enable MAC forced mode */
#define CHIPGCR_LPSOPT 0x08
#define CHIPGCR_TM1US 0x04
#define CHIPGCR_TM0US 0x02
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 228d4f7a58af..e74e4b42592d 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -387,8 +387,8 @@ vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
data1 = steer_ctrl = 0;
status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
VXGE_HW_FW_API_GET_EPROM_REV,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
0, &data0, &data1, &steer_ctrl);
if (status != VXGE_HW_OK)
break;
@@ -2868,6 +2868,8 @@ __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
ring->rxd_init = attr->rxd_init;
ring->rxd_term = attr->rxd_term;
ring->buffer_mode = config->buffer_mode;
+ ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved;
+ ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved;
ring->rxds_limit = config->rxds_limit;
ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
@@ -3511,6 +3513,8 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
/* apply "interrupts per txdl" attribute */
fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
+ fifo->tim_tti_cfg1_saved = vpath->tim_tti_cfg1_saved;
+ fifo->tim_tti_cfg3_saved = vpath->tim_tti_cfg3_saved;
if (fifo->config->intr)
fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
@@ -4377,6 +4381,8 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
}
writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
+ vpath->tim_tti_cfg1_saved = val64;
+
val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -4433,6 +4439,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
}
writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
+ vpath->tim_tti_cfg3_saved = val64;
}
if (config->ring.enable == VXGE_HW_RING_ENABLE) {
@@ -4481,6 +4488,8 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
}
writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
+ vpath->tim_rti_cfg1_saved = val64;
+
val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -4537,6 +4546,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
}
writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
+ vpath->tim_rti_cfg3_saved = val64;
}
val64 = 0;
@@ -4555,26 +4565,6 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
return status;
}
-void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- struct __vxge_hw_virtualpath *vpath;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- struct vxge_hw_vp_config *config;
- u64 val64;
-
- vpath = &hldev->virtual_paths[vp_id];
- vp_reg = vpath->vp_reg;
- config = vpath->vp_config;
-
- if (config->fifo.enable == VXGE_HW_FIFO_ENABLE &&
- config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
- config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
- val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
- writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
- }
-}
-
/*
* __vxge_hw_vpath_initialize
* This routine is the final phase of init which initializes the
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index e249e288d160..3c53aa732c9d 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -682,6 +682,10 @@ struct __vxge_hw_virtualpath {
u32 vsport_number;
u32 max_kdfc_db;
u32 max_nofl_db;
+ u64 tim_tti_cfg1_saved;
+ u64 tim_tti_cfg3_saved;
+ u64 tim_rti_cfg1_saved;
+ u64 tim_rti_cfg3_saved;
struct __vxge_hw_ring *____cacheline_aligned ringh;
struct __vxge_hw_fifo *____cacheline_aligned fifoh;
@@ -921,6 +925,9 @@ struct __vxge_hw_ring {
u32 doorbell_cnt;
u32 total_db_cnt;
u64 rxds_limit;
+ u32 rtimer;
+ u64 tim_rti_cfg1_saved;
+ u64 tim_rti_cfg3_saved;
enum vxge_hw_status (*callback)(
struct __vxge_hw_ring *ringh,
@@ -1000,6 +1007,9 @@ struct __vxge_hw_fifo {
u32 per_txdl_space;
u32 vp_id;
u32 tx_intr_num;
+ u32 rtimer;
+ u64 tim_tti_cfg1_saved;
+ u64 tim_tti_cfg3_saved;
enum vxge_hw_status (*callback)(
struct __vxge_hw_fifo *fifo_handle,
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index c81a6512c683..395423aeec00 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -371,9 +371,6 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
struct vxge_hw_ring_rxd_info ext_info;
vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
ring->ndev->name, __func__, __LINE__);
- ring->pkts_processed = 0;
-
- vxge_hw_ring_replenish(ringh);
do {
prefetch((char *)dtr + L1_CACHE_BYTES);
@@ -1588,6 +1585,36 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
return ret;
}
+/* Configure CI */
+static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev)
+{
+ int i = 0;
+
+ /* Enable CI for RTI */
+ if (vdev->config.intr_type == MSI_X) {
+ for (i = 0; i < vdev->no_of_vpath; i++) {
+ struct __vxge_hw_ring *hw_ring;
+
+ hw_ring = vdev->vpaths[i].ring.handle;
+ vxge_hw_vpath_dynamic_rti_ci_set(hw_ring);
+ }
+ }
+
+ /* Enable CI for TTI */
+ for (i = 0; i < vdev->no_of_vpath; i++) {
+ struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle;
+ vxge_hw_vpath_tti_ci_set(hw_fifo);
+ /*
+ * For Inta (with or without napi), Set CI ON for only one
+ * vpath. (Have only one free running timer).
+ */
+ if ((vdev->config.intr_type == INTA) && (i == 0))
+ break;
+ }
+
+ return;
+}
+
static int do_vxge_reset(struct vxgedev *vdev, int event)
{
enum vxge_hw_status status;
@@ -1753,6 +1780,9 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
netif_tx_wake_all_queues(vdev->ndev);
}
+ /* configure CI */
+ vxge_config_ci_for_tti_rti(vdev);
+
out:
vxge_debug_entryexit(VXGE_TRACE,
"%s:%d Exiting...", __func__, __LINE__);
@@ -1793,22 +1823,29 @@ static void vxge_reset(struct work_struct *work)
*/
static int vxge_poll_msix(struct napi_struct *napi, int budget)
{
- struct vxge_ring *ring =
- container_of(napi, struct vxge_ring, napi);
+ struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi);
+ int pkts_processed;
int budget_org = budget;
- ring->budget = budget;
+ ring->budget = budget;
+ ring->pkts_processed = 0;
vxge_hw_vpath_poll_rx(ring->handle);
+ pkts_processed = ring->pkts_processed;
if (ring->pkts_processed < budget_org) {
napi_complete(napi);
+
/* Re enable the Rx interrupts for the vpath */
vxge_hw_channel_msix_unmask(
(struct __vxge_hw_channel *)ring->handle,
ring->rx_vector_no);
+ mmiowb();
}
- return ring->pkts_processed;
+ /* We are copying and returning the local variable, in case if after
+ * clearing the msix interrupt above, if the interrupt fires right
+ * away which can preempt this NAPI thread */
+ return pkts_processed;
}
static int vxge_poll_inta(struct napi_struct *napi, int budget)
@@ -1824,6 +1861,7 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
for (i = 0; i < vdev->no_of_vpath; i++) {
ring = &vdev->vpaths[i].ring;
ring->budget = budget;
+ ring->pkts_processed = 0;
vxge_hw_vpath_poll_rx(ring->handle);
pkts_processed += ring->pkts_processed;
budget -= ring->pkts_processed;
@@ -2054,6 +2092,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
netdev_get_tx_queue(vdev->ndev, 0);
vpath->fifo.indicate_max_pkts =
vdev->config.fifo_indicate_max_pkts;
+ vpath->fifo.tx_vector_no = 0;
vpath->ring.rx_vector_no = 0;
vpath->ring.rx_csum = vdev->rx_csum;
vpath->ring.rx_hwts = vdev->rx_hwts;
@@ -2079,6 +2118,61 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
return VXGE_HW_OK;
}
+/**
+ * adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing
+ * if the interrupts are not within a range
+ * @fifo: pointer to transmit fifo structure
+ * Description: The function changes boundary timer and restriction timer
+ * value depends on the traffic
+ * Return Value: None
+ */
+static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
+{
+ fifo->interrupt_count++;
+ if (jiffies > fifo->jiffies + HZ / 100) {
+ struct __vxge_hw_fifo *hw_fifo = fifo->handle;
+
+ fifo->jiffies = jiffies;
+ if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT &&
+ hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) {
+ hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL;
+ vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
+ } else if (hw_fifo->rtimer != 0) {
+ hw_fifo->rtimer = 0;
+ vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
+ }
+ fifo->interrupt_count = 0;
+ }
+}
+
+/**
+ * adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing
+ * if the interrupts are not within a range
+ * @ring: pointer to receive ring structure
+ * Description: The function increases of decreases the packet counts within
+ * the ranges of traffic utilization, if the interrupts due to this ring are
+ * not within a fixed range.
+ * Return Value: Nothing
+ */
+static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
+{
+ ring->interrupt_count++;
+ if (jiffies > ring->jiffies + HZ / 100) {
+ struct __vxge_hw_ring *hw_ring = ring->handle;
+
+ ring->jiffies = jiffies;
+ if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT &&
+ hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) {
+ hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL;
+ vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
+ } else if (hw_ring->rtimer != 0) {
+ hw_ring->rtimer = 0;
+ vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
+ }
+ ring->interrupt_count = 0;
+ }
+}
+
/*
* vxge_isr_napi
* @irq: the irq of the device.
@@ -2139,24 +2233,39 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
#ifdef CONFIG_PCI_MSI
-static irqreturn_t
-vxge_tx_msix_handle(int irq, void *dev_id)
+static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
{
struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
+ adaptive_coalesce_tx_interrupts(fifo);
+
+ vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle,
+ fifo->tx_vector_no);
+
+ vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle,
+ fifo->tx_vector_no);
+
VXGE_COMPLETE_VPATH_TX(fifo);
+ vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle,
+ fifo->tx_vector_no);
+
+ mmiowb();
+
return IRQ_HANDLED;
}
-static irqreturn_t
-vxge_rx_msix_napi_handle(int irq, void *dev_id)
+static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id)
{
struct vxge_ring *ring = (struct vxge_ring *)dev_id;
- /* MSIX_IDX for Rx is 1 */
+ adaptive_coalesce_rx_interrupts(ring);
+
vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
- ring->rx_vector_no);
+ ring->rx_vector_no);
+
+ vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle,
+ ring->rx_vector_no);
napi_schedule(&ring->napi);
return IRQ_HANDLED;
@@ -2173,14 +2282,20 @@ vxge_alarm_msix_handle(int irq, void *dev_id)
VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
for (i = 0; i < vdev->no_of_vpath; i++) {
+ /* Reduce the chance of loosing alarm interrupts by masking
+ * the vector. A pending bit will be set if an alarm is
+ * generated and on unmask the interrupt will be fired.
+ */
vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
+ vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id);
+ mmiowb();
status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
vdev->exec_mode);
if (status == VXGE_HW_OK) {
-
vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
- msix_id);
+ msix_id);
+ mmiowb();
continue;
}
vxge_debug_intr(VXGE_ERR,
@@ -2299,6 +2414,9 @@ static int vxge_enable_msix(struct vxgedev *vdev)
vpath->ring.rx_vector_no = (vpath->device_id *
VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
+ vpath->fifo.tx_vector_no = (vpath->device_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE);
+
vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
VXGE_ALARM_MSIX_ID);
}
@@ -2474,8 +2592,9 @@ INTA_MODE:
"%s:vxge:INTA", vdev->ndev->name);
vxge_hw_device_set_intr_type(vdev->devh,
VXGE_HW_INTR_MODE_IRQLINE);
- vxge_hw_vpath_tti_ci_set(vdev->devh,
- vdev->vpaths[0].device_id);
+
+ vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle);
+
ret = request_irq((int) vdev->pdev->irq,
vxge_isr_napi,
IRQF_SHARED, vdev->desc[0], vdev);
@@ -2745,6 +2864,10 @@ static int vxge_open(struct net_device *dev)
}
netif_tx_start_all_queues(vdev->ndev);
+
+ /* configure CI */
+ vxge_config_ci_for_tti_rti(vdev);
+
goto out0;
out2:
@@ -3264,19 +3387,6 @@ static const struct net_device_ops vxge_netdev_ops = {
#endif
};
-static int __devinit vxge_device_revision(struct vxgedev *vdev)
-{
- int ret;
- u8 revision;
-
- ret = pci_read_config_byte(vdev->pdev, PCI_REVISION_ID, &revision);
- if (ret)
- return -EIO;
-
- vdev->titan1 = (revision == VXGE_HW_TITAN1_PCI_REVISION);
- return 0;
-}
-
static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
struct vxge_config *config,
int high_dma, int no_of_vpath,
@@ -3316,10 +3426,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
memcpy(&vdev->config, config, sizeof(struct vxge_config));
vdev->rx_csum = 1; /* Enable Rx CSUM by default. */
vdev->rx_hwts = 0;
-
- ret = vxge_device_revision(vdev);
- if (ret < 0)
- goto _out1;
+ vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION);
SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
@@ -3348,7 +3455,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
vxge_debug_init(VXGE_ERR,
"%s: vpath memory allocation failed",
vdev->ndev->name);
- ret = -ENODEV;
+ ret = -ENOMEM;
goto _out1;
}
@@ -3369,11 +3476,11 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
if (vdev->config.gro_enable)
ndev->features |= NETIF_F_GRO;
- if (register_netdev(ndev)) {
+ ret = register_netdev(ndev);
+ if (ret) {
vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
"%s: %s : device registration failed!",
ndev->name, __func__);
- ret = -ENODEV;
goto _out2;
}
@@ -3444,6 +3551,11 @@ static void vxge_device_unregister(struct __vxge_hw_device *hldev)
/* in 2.6 will call stop() if device is up */
unregister_netdev(dev);
+ kfree(vdev->vpaths);
+
+ /* we are safe to free it now */
+ free_netdev(dev);
+
vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
buf);
vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf,
@@ -3799,7 +3911,7 @@ static void __devinit vxge_device_config_init(
break;
case MSI_X:
- device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX;
+ device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT;
break;
}
@@ -4335,10 +4447,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
goto _exit1;
}
- if (pci_request_region(pdev, 0, VXGE_DRIVER_NAME)) {
+ ret = pci_request_region(pdev, 0, VXGE_DRIVER_NAME);
+ if (ret) {
vxge_debug_init(VXGE_ERR,
"%s : request regions failed", __func__);
- ret = -ENODEV;
goto _exit1;
}
@@ -4446,7 +4558,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
if (!img[i].is_valid)
break;
vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
- "%d.%d.%d.%d\n", VXGE_DRIVER_NAME, i,
+ "%d.%d.%d.%d", VXGE_DRIVER_NAME, i,
VXGE_EPROM_IMG_MAJOR(img[i].version),
VXGE_EPROM_IMG_MINOR(img[i].version),
VXGE_EPROM_IMG_FIX(img[i].version),
@@ -4643,8 +4755,9 @@ _exit6:
_exit5:
vxge_device_unregister(hldev);
_exit4:
- pci_disable_sriov(pdev);
+ pci_set_drvdata(pdev, NULL);
vxge_hw_device_terminate(hldev);
+ pci_disable_sriov(pdev);
_exit3:
iounmap(attr.bar0);
_exit2:
@@ -4655,7 +4768,7 @@ _exit0:
kfree(ll_config);
kfree(device_config);
driver_config->config_dev_cnt--;
- pci_set_drvdata(pdev, NULL);
+ driver_config->total_dev_cnt--;
return ret;
}
@@ -4668,45 +4781,34 @@ _exit0:
static void __devexit vxge_remove(struct pci_dev *pdev)
{
struct __vxge_hw_device *hldev;
- struct vxgedev *vdev = NULL;
- struct net_device *dev;
- int i = 0;
+ struct vxgedev *vdev;
+ int i;
hldev = pci_get_drvdata(pdev);
-
if (hldev == NULL)
return;
- dev = hldev->ndev;
- vdev = netdev_priv(dev);
+ vdev = netdev_priv(hldev->ndev);
vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
-
vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
__func__);
- vxge_device_unregister(hldev);
- for (i = 0; i < vdev->no_of_vpath; i++) {
+ for (i = 0; i < vdev->no_of_vpath; i++)
vxge_free_mac_add_list(&vdev->vpaths[i]);
- vdev->vpaths[i].mcast_addr_cnt = 0;
- vdev->vpaths[i].mac_addr_cnt = 0;
- }
-
- kfree(vdev->vpaths);
+ vxge_device_unregister(hldev);
+ pci_set_drvdata(pdev, NULL);
+ /* Do not call pci_disable_sriov here, as it will break child devices */
+ vxge_hw_device_terminate(hldev);
iounmap(vdev->bar0);
-
- /* we are safe to free it now */
- free_netdev(dev);
+ pci_release_region(pdev, 0);
+ pci_disable_device(pdev);
+ driver_config->config_dev_cnt--;
+ driver_config->total_dev_cnt--;
vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
__func__, __LINE__);
-
- vxge_hw_device_terminate(hldev);
-
- pci_disable_device(pdev);
- pci_release_region(pdev, 0);
- pci_set_drvdata(pdev, NULL);
vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__,
__LINE__);
}
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index 5746fedc356f..40474f0da576 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -59,11 +59,13 @@
#define VXGE_TTI_LTIMER_VAL 1000
#define VXGE_T1A_TTI_LTIMER_VAL 80
#define VXGE_TTI_RTIMER_VAL 0
+#define VXGE_TTI_RTIMER_ADAPT_VAL 10
#define VXGE_T1A_TTI_RTIMER_VAL 400
#define VXGE_RTI_BTIMER_VAL 250
#define VXGE_RTI_LTIMER_VAL 100
#define VXGE_RTI_RTIMER_VAL 0
-#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
+#define VXGE_RTI_RTIMER_ADAPT_VAL 15
+#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
#define VXGE_ISR_POLLING_CNT 8
#define VXGE_MAX_CONFIG_DEV 0xFF
#define VXGE_EXEC_MODE_DISABLE 0
@@ -107,6 +109,14 @@
#define RTI_T1A_RX_UFC_C 50
#define RTI_T1A_RX_UFC_D 60
+/*
+ * The interrupt rate is maintained at 3k per second with the moderation
+ * parameters for most traffic but not all. This is the maximum interrupt
+ * count allowed per function with INTA or per vector in the case of
+ * MSI-X in a 10 millisecond time period. Enabled only for Titan 1A.
+ */
+#define VXGE_T1A_MAX_INTERRUPT_COUNT 100
+#define VXGE_T1A_MAX_TX_INTERRUPT_COUNT 200
/* Milli secs timer period */
#define VXGE_TIMER_DELAY 10000
@@ -247,6 +257,11 @@ struct vxge_fifo {
int tx_steering_type;
int indicate_max_pkts;
+ /* Adaptive interrupt moderation parameters used in T1A */
+ unsigned long interrupt_count;
+ unsigned long jiffies;
+
+ u32 tx_vector_no;
/* Tx stats */
struct vxge_fifo_stats stats;
} ____cacheline_aligned;
@@ -271,6 +286,10 @@ struct vxge_ring {
*/
int driver_id;
+ /* Adaptive interrupt moderation parameters used in T1A */
+ unsigned long interrupt_count;
+ unsigned long jiffies;
+
/* copy of the flag indicating whether rx_csum is to be used */
u32 rx_csum:1,
rx_hwts:1;
@@ -286,7 +305,7 @@ struct vxge_ring {
int vlan_tag_strip;
struct vlan_group *vlgrp;
- int rx_vector_no;
+ u32 rx_vector_no;
enum vxge_hw_status last_status;
/* Rx stats */
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index 4c10d6c4075f..8674f331311c 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -218,6 +218,68 @@ exit:
return status;
}
+void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
+{
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+ struct vxge_hw_vp_config *config;
+ u64 val64;
+
+ if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
+ return;
+
+ vp_reg = fifo->vp_reg;
+ config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
+
+ if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
+ config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
+ val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
+ val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
+ fifo->tim_tti_cfg1_saved = val64;
+ writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
+ }
+}
+
+void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
+{
+ u64 val64 = ring->tim_rti_cfg1_saved;
+
+ val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
+ ring->tim_rti_cfg1_saved = val64;
+ writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
+}
+
+void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
+{
+ u64 val64 = fifo->tim_tti_cfg3_saved;
+ u64 timer = (fifo->rtimer * 1000) / 272;
+
+ val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
+ if (timer)
+ val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
+ VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
+
+ writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
+ /* tti_cfg3_saved is not updated again because it is
+ * initialized at one place only - init time.
+ */
+}
+
+void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
+{
+ u64 val64 = ring->tim_rti_cfg3_saved;
+ u64 timer = (ring->rtimer * 1000) / 272;
+
+ val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
+ if (timer)
+ val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
+ VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
+
+ writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
+ /* rti_cfg3_saved is not updated again because it is
+ * initialized at one place only - init time.
+ */
+}
+
/**
* vxge_hw_channel_msix_mask - Mask MSIX Vector.
* @channeh: Channel for rx or tx handle
@@ -254,6 +316,23 @@ vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
}
/**
+ * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
+ * @channel: Channel for rx or tx handle
+ * @msix_id: MSI ID
+ *
+ * The function unmasks the msix interrupt for the given msix_id
+ * if configured in MSIX oneshot mode
+ *
+ * Returns: 0
+ */
+void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
+{
+ __vxge_hw_pio_mem_write32_upper(
+ (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
+ &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
+}
+
+/**
* vxge_hw_device_set_intr_type - Updates the configuration
* with new interrupt type.
* @hldev: HW device handle.
@@ -2191,19 +2270,14 @@ vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
if (vpath->hldev->config.intr_mode ==
VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
+ VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
+ 0, 32), &vp_reg->one_shot_vect0_en);
+ __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
0, 32), &vp_reg->one_shot_vect1_en);
- }
-
- if (vpath->hldev->config.intr_mode ==
- VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
0, 32), &vp_reg->one_shot_vect2_en);
-
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
- VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
- 0, 32), &vp_reg->one_shot_vect3_en);
}
}
@@ -2229,6 +2303,32 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
}
/**
+ * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
+ * @vp: Virtual Path handle.
+ * @msix_id: MSI ID
+ *
+ * The function clears the msix interrupt for the given msix_id
+ *
+ * Returns: 0,
+ * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
+ * status.
+ * See also:
+ */
+void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
+{
+ struct __vxge_hw_device *hldev = vp->vpath->hldev;
+
+ if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT))
+ __vxge_hw_pio_mem_write32_upper(
+ (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
+ &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
+ else
+ __vxge_hw_pio_mem_write32_upper(
+ (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
+ &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
+}
+
+/**
* vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
* @vp: Virtual Path handle.
* @msix_id: MSI ID
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index d48486d6afa1..9d9dfda4c7ab 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -2142,6 +2142,10 @@ void vxge_hw_device_clear_tx_rx(
* Virtual Paths
*/
+void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring);
+
+void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo);
+
u32 vxge_hw_vpath_id(
struct __vxge_hw_vpath_handle *vpath_handle);
@@ -2245,6 +2249,8 @@ void
vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle,
int msix_id);
+void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id);
+
void vxge_hw_device_flush_io(struct __vxge_hw_device *devh);
void
@@ -2270,6 +2276,9 @@ void
vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id);
void
+vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channelh, int msix_id);
+
+void
vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel,
void **dtrh);
@@ -2282,7 +2291,8 @@ vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh);
int
vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
-void
-vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id);
+void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo);
+
+void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring);
#endif
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
index ad2f99b9bcf3..581e21525e85 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/vxge/vxge-version.h
@@ -16,8 +16,8 @@
#define VXGE_VERSION_MAJOR "2"
#define VXGE_VERSION_MINOR "5"
-#define VXGE_VERSION_FIX "1"
-#define VXGE_VERSION_BUILD "22082"
+#define VXGE_VERSION_FIX "2"
+#define VXGE_VERSION_BUILD "22259"
#define VXGE_VERSION_FOR "k"
#define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld))
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index b4338f389394..7aeb113cbb90 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -274,6 +274,7 @@ source "drivers/net/wireless/b43legacy/Kconfig"
source "drivers/net/wireless/hostap/Kconfig"
source "drivers/net/wireless/ipw2x00/Kconfig"
source "drivers/net/wireless/iwlwifi/Kconfig"
+source "drivers/net/wireless/iwlegacy/Kconfig"
source "drivers/net/wireless/iwmc3200wifi/Kconfig"
source "drivers/net/wireless/libertas/Kconfig"
source "drivers/net/wireless/orinoco/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 9760561a27a5..ddd3fb6ba1d3 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -24,7 +24,7 @@ obj-$(CONFIG_B43LEGACY) += b43legacy/
obj-$(CONFIG_ZD1211RW) += zd1211rw/
obj-$(CONFIG_RTL8180) += rtl818x/
obj-$(CONFIG_RTL8187) += rtl818x/
-obj-$(CONFIG_RTL8192CE) += rtlwifi/
+obj-$(CONFIG_RTLWIFI) += rtlwifi/
# 16-bit wireless PCMCIA client drivers
obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o
@@ -41,7 +41,8 @@ obj-$(CONFIG_ADM8211) += adm8211.o
obj-$(CONFIG_MWL8K) += mwl8k.o
-obj-$(CONFIG_IWLWIFI) += iwlwifi/
+obj-$(CONFIG_IWLAGN) += iwlwifi/
+obj-$(CONFIG_IWLWIFI_LEGACY) += iwlegacy/
obj-$(CONFIG_RT2X00) += rt2x00/
obj-$(CONFIG_P54_COMMON) += p54/
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index f9aa1bc0a947..afe2cbc6cb24 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1658,7 +1658,7 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
}
/* Put adm8211_tx_hdr on skb and transmit */
-static int adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
{
struct adm8211_tx_hdr *txhdr;
size_t payload_len, hdrlen;
@@ -1707,8 +1707,6 @@ static int adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
txhdr->retry_limit = info->control.rates[0].count;
adm8211_tx_raw(dev, skb, plcp_signal, hdrlen);
-
- return NETDEV_TX_OK;
}
static int adm8211_alloc_rings(struct ieee80211_hw *dev)
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 1476314afa8a..298601436ee2 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -1728,7 +1728,7 @@ static void at76_mac80211_tx_callback(struct urb *urb)
ieee80211_wake_queues(priv->hw);
}
-static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct at76_priv *priv = hw->priv;
struct at76_tx_buffer *tx_buffer = priv->bulk_out_buffer;
@@ -1741,7 +1741,8 @@ static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
if (priv->tx_urb->status == -EINPROGRESS) {
wiphy_err(priv->hw->wiphy,
"%s called while tx urb is pending\n", __func__);
- return NETDEV_TX_BUSY;
+ dev_kfree_skb_any(skb);
+ return;
}
/* The following code lines are important when the device is going to
@@ -1755,7 +1756,8 @@ static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
if (compare_ether_addr(priv->bssid, mgmt->bssid)) {
memcpy(priv->bssid, mgmt->bssid, ETH_ALEN);
ieee80211_queue_work(hw, &priv->work_join_bssid);
- return NETDEV_TX_BUSY;
+ dev_kfree_skb_any(skb);
+ return;
}
}
@@ -1795,8 +1797,6 @@ static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
priv->tx_urb,
priv->tx_urb->hcpriv, priv->tx_urb->complete);
}
-
- return 0;
}
static int at76_mac80211_start(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/ath/ar9170/Kconfig b/drivers/net/wireless/ath/ar9170/Kconfig
index d7a4799d20fb..7b9672b0d090 100644
--- a/drivers/net/wireless/ath/ar9170/Kconfig
+++ b/drivers/net/wireless/ath/ar9170/Kconfig
@@ -1,8 +1,10 @@
config AR9170_USB
- tristate "Atheros AR9170 802.11n USB support"
+ tristate "Atheros AR9170 802.11n USB support (OBSOLETE)"
depends on USB && MAC80211
select FW_LOADER
help
+ This driver is going to get replaced by carl9170.
+
This is a driver for the Atheros "otus" 802.11n USB devices.
These devices require additional firmware (2 files).
diff --git a/drivers/net/wireless/ath/ar9170/ar9170.h b/drivers/net/wireless/ath/ar9170/ar9170.h
index 4f845f80c098..371e4ce49528 100644
--- a/drivers/net/wireless/ath/ar9170/ar9170.h
+++ b/drivers/net/wireless/ath/ar9170/ar9170.h
@@ -224,7 +224,7 @@ void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len);
int ar9170_nag_limiter(struct ar9170 *ar);
/* MAC */
-int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+void ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
int ar9170_init_mac(struct ar9170 *ar);
int ar9170_set_qos(struct ar9170 *ar);
int ar9170_update_multicast(struct ar9170 *ar, const u64 mc_hast);
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index 32bf79e6a320..b761fec0d721 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -1475,7 +1475,7 @@ static void ar9170_tx(struct ar9170 *ar)
msecs_to_jiffies(AR9170_JANITOR_DELAY));
}
-int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+void ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct ar9170 *ar = hw->priv;
struct ieee80211_tx_info *info;
@@ -1493,11 +1493,10 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
skb_queue_tail(&ar->tx_pending[queue], skb);
ar9170_tx(ar);
- return NETDEV_TX_OK;
+ return;
err_free:
dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
}
static int ar9170_op_add_interface(struct ieee80211_hw *hw,
@@ -1945,7 +1944,8 @@ static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
static int ar9170_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
{
switch (action) {
case IEEE80211_AMPDU_RX_START:
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index e43210c8585c..a6c6a466000f 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -108,12 +108,14 @@ enum ath_cipher {
* struct ath_ops - Register read/write operations
*
* @read: Register read
+ * @multi_read: Multiple register read
* @write: Register write
* @enable_write_buffer: Enable multiple register writes
* @write_flush: flush buffered register writes and disable buffering
*/
struct ath_ops {
unsigned int (*read)(void *, u32 reg_offset);
+ void (*multi_read)(void *, u32 *addr, u32 *val, u16 count);
void (*write)(void *, u32 val, u32 reg_offset);
void (*enable_write_buffer)(void *);
void (*write_flush) (void *);
diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig
index e0793319389d..e18a9aa7b6ca 100644
--- a/drivers/net/wireless/ath/ath5k/Kconfig
+++ b/drivers/net/wireless/ath/ath5k/Kconfig
@@ -40,6 +40,17 @@ config ATH5K_DEBUG
modprobe ath5k debug=0x00000400
+config ATH5K_TRACER
+ bool "Atheros 5xxx tracer"
+ depends on ATH5K
+ depends on EVENT_TRACING
+ ---help---
+ Say Y here to enable tracepoints for the ath5k driver
+ using the kernel tracing infrastructure. Select this
+ option if you are interested in debugging the driver.
+
+ If unsure, say N.
+
config ATH5K_AHB
bool "Atheros 5xxx AHB bus support"
depends on (ATHEROS_AR231X && !PCI)
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index 707cde149248..ae84b86c3bf2 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -31,7 +31,8 @@ static void ath5k_ahb_read_cachesize(struct ath_common *common, int *csz)
*csz = L1_CACHE_BYTES >> 2;
}
-bool ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
+static bool
+ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
{
struct ath5k_softc *sc = common->priv;
struct platform_device *pdev = to_platform_device(sc->dev);
@@ -46,10 +47,10 @@ bool ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
eeprom += off;
if (eeprom > eeprom_end)
- return -EINVAL;
+ return false;
*data = *eeprom;
- return 0;
+ return true;
}
int ath5k_hw_read_srev(struct ath5k_hw *ah)
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 407e39c2b10b..0ee54eb333de 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -210,14 +210,9 @@
/* Initial values */
#define AR5K_INIT_CYCRSSI_THR1 2
-/* Tx retry limits */
-#define AR5K_INIT_SH_RETRY 10
-#define AR5K_INIT_LG_RETRY AR5K_INIT_SH_RETRY
-/* For station mode */
-#define AR5K_INIT_SSH_RETRY 32
-#define AR5K_INIT_SLG_RETRY AR5K_INIT_SSH_RETRY
-#define AR5K_INIT_TX_RETRY 10
-
+/* Tx retry limit defaults from standard */
+#define AR5K_INIT_RETRY_SHORT 7
+#define AR5K_INIT_RETRY_LONG 4
/* Slot time */
#define AR5K_INIT_SLOT_TIME_TURBO 6
@@ -1057,7 +1052,9 @@ struct ath5k_hw {
#define ah_modes ah_capabilities.cap_mode
#define ah_ee_version ah_capabilities.cap_eeprom.ee_version
- u32 ah_limit_tx_retries;
+ u8 ah_retry_long;
+ u8 ah_retry_short;
+
u8 ah_coverage_class;
bool ah_ack_bitrate_high;
u8 ah_bwmode;
@@ -1067,7 +1064,6 @@ struct ath5k_hw {
u8 ah_ant_mode;
u8 ah_tx_ant;
u8 ah_def_ant;
- bool ah_software_retry;
struct ath5k_capabilities ah_capabilities;
@@ -1162,6 +1158,26 @@ void ath5k_hw_deinit(struct ath5k_hw *ah);
int ath5k_sysfs_register(struct ath5k_softc *sc);
void ath5k_sysfs_unregister(struct ath5k_softc *sc);
+/* base.c */
+struct ath5k_buf;
+struct ath5k_txq;
+
+void set_beacon_filter(struct ieee80211_hw *hw, bool enable);
+bool ath_any_vif_assoc(struct ath5k_softc *sc);
+void ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct ath5k_txq *txq);
+int ath5k_init_hw(struct ath5k_softc *sc);
+int ath5k_stop_hw(struct ath5k_softc *sc);
+void ath5k_mode_setup(struct ath5k_softc *sc, struct ieee80211_vif *vif);
+void ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
+ struct ieee80211_vif *vif);
+int ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan);
+void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
+int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void ath5k_beacon_config(struct ath5k_softc *sc);
+void ath5k_txbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf);
+void ath5k_rxbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf);
+
/*Chip id helper functions */
const char *ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val);
int ath5k_hw_read_srev(struct ath5k_hw *ah);
@@ -1250,6 +1266,8 @@ int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah,
enum ath5k_tx_queue queue_type,
struct ath5k_txq_info *queue_info);
+void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
+ unsigned int queue);
u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue);
void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue);
int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue);
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index cdac5cff0177..bc8240560488 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -118,8 +118,8 @@ int ath5k_hw_init(struct ath5k_softc *sc)
ah->ah_bwmode = AR5K_BWMODE_DEFAULT;
ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
ah->ah_imr = 0;
- ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY;
- ah->ah_software_retry = false;
+ ah->ah_retry_short = AR5K_INIT_RETRY_SHORT;
+ ah->ah_retry_long = AR5K_INIT_RETRY_LONG;
ah->ah_ant_mode = AR5K_ANTMODE_DEFAULT;
ah->ah_noise_floor = -95; /* until first NF calibration is run */
sc->ani_state.ani_mode = ATH5K_ANI_MODE_AUTO;
@@ -220,7 +220,8 @@ int ath5k_hw_init(struct ath5k_softc *sc)
ah->ah_radio = AR5K_RF5112;
ah->ah_single_chip = false;
ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_5112B;
- } else if (ah->ah_mac_version == (AR5K_SREV_AR2415 >> 4)) {
+ } else if (ah->ah_mac_version == (AR5K_SREV_AR2415 >> 4) ||
+ ah->ah_mac_version == (AR5K_SREV_AR2315_R6 >> 4)) {
ah->ah_radio = AR5K_RF2316;
ah->ah_single_chip = true;
ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2316;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 09ae4ef0fd51..91411e9b4b68 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -61,6 +61,9 @@
#include "debug.h"
#include "ani.h"
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
int ath5k_modparam_nohwcrypt;
module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
@@ -242,73 +245,68 @@ static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *re
\********************/
/*
- * Convert IEEE channel number to MHz frequency.
- */
-static inline short
-ath5k_ieee2mhz(short chan)
-{
- if (chan <= 14 || chan >= 27)
- return ieee80211chan2mhz(chan);
- else
- return 2212 + chan * 20;
-}
-
-/*
* Returns true for the channel numbers used without all_channels modparam.
*/
-static bool ath5k_is_standard_channel(short chan)
+static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
{
- return ((chan <= 14) ||
- /* UNII 1,2 */
- ((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
+ if (band == IEEE80211_BAND_2GHZ && chan <= 14)
+ return true;
+
+ return /* UNII 1,2 */
+ (((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
/* midband */
((chan & 3) == 0 && chan >= 100 && chan <= 140) ||
/* UNII-3 */
- ((chan & 3) == 1 && chan >= 149 && chan <= 165));
+ ((chan & 3) == 1 && chan >= 149 && chan <= 165) ||
+ /* 802.11j 5.030-5.080 GHz (20MHz) */
+ (chan == 8 || chan == 12 || chan == 16) ||
+ /* 802.11j 4.9GHz (20MHz) */
+ (chan == 184 || chan == 188 || chan == 192 || chan == 196));
}
static unsigned int
-ath5k_copy_channels(struct ath5k_hw *ah,
- struct ieee80211_channel *channels,
- unsigned int mode,
- unsigned int max)
+ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
+ unsigned int mode, unsigned int max)
{
- unsigned int i, count, size, chfreq, freq, ch;
-
- if (!test_bit(mode, ah->ah_modes))
- return 0;
+ unsigned int count, size, chfreq, freq, ch;
+ enum ieee80211_band band;
switch (mode) {
case AR5K_MODE_11A:
/* 1..220, but 2GHz frequencies are filtered by check_channel */
- size = 220 ;
+ size = 220;
chfreq = CHANNEL_5GHZ;
+ band = IEEE80211_BAND_5GHZ;
break;
case AR5K_MODE_11B:
case AR5K_MODE_11G:
size = 26;
chfreq = CHANNEL_2GHZ;
+ band = IEEE80211_BAND_2GHZ;
break;
default:
ATH5K_WARN(ah->ah_sc, "bad mode, not copying channels\n");
return 0;
}
- for (i = 0, count = 0; i < size && max > 0; i++) {
- ch = i + 1 ;
- freq = ath5k_ieee2mhz(ch);
+ count = 0;
+ for (ch = 1; ch <= size && count < max; ch++) {
+ freq = ieee80211_channel_to_frequency(ch, band);
+
+ if (freq == 0) /* mapping failed - not a standard channel */
+ continue;
/* Check if channel is supported by the chipset */
if (!ath5k_channel_ok(ah, freq, chfreq))
continue;
- if (!modparam_all_channels && !ath5k_is_standard_channel(ch))
+ if (!modparam_all_channels &&
+ !ath5k_is_standard_channel(ch, band))
continue;
/* Write channel info and increment counter */
channels[count].center_freq = freq;
- channels[count].band = (chfreq == CHANNEL_2GHZ) ?
- IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ channels[count].band = band;
switch (mode) {
case AR5K_MODE_11A:
case AR5K_MODE_11G:
@@ -319,7 +317,6 @@ ath5k_copy_channels(struct ath5k_hw *ah,
}
count++;
- max--;
}
return count;
@@ -364,7 +361,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
sband->n_bitrates = 12;
sband->channels = sc->channels;
- sband->n_channels = ath5k_copy_channels(ah, sband->channels,
+ sband->n_channels = ath5k_setup_channels(ah, sband->channels,
AR5K_MODE_11G, max_c);
hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
@@ -390,7 +387,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
}
sband->channels = sc->channels;
- sband->n_channels = ath5k_copy_channels(ah, sband->channels,
+ sband->n_channels = ath5k_setup_channels(ah, sband->channels,
AR5K_MODE_11B, max_c);
hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
@@ -410,7 +407,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
sband->n_bitrates = 8;
sband->channels = &sc->channels[count_c];
- sband->n_channels = ath5k_copy_channels(ah, sband->channels,
+ sband->n_channels = ath5k_setup_channels(ah, sband->channels,
AR5K_MODE_11A, max_c);
hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
@@ -445,18 +442,6 @@ ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
return ath5k_reset(sc, chan, true);
}
-static void
-ath5k_setcurmode(struct ath5k_softc *sc, unsigned int mode)
-{
- sc->curmode = mode;
-
- if (mode == AR5K_MODE_11A) {
- sc->curband = &sc->sbands[IEEE80211_BAND_5GHZ];
- } else {
- sc->curband = &sc->sbands[IEEE80211_BAND_2GHZ];
- }
-}
-
struct ath_vif_iter_data {
const u8 *hw_macaddr;
u8 mask[ETH_ALEN];
@@ -569,7 +554,7 @@ ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix)
"hw_rix out of bounds: %x\n", hw_rix))
return 0;
- rix = sc->rate_idx[sc->curband->band][hw_rix];
+ rix = sc->rate_idx[sc->curchan->band][hw_rix];
if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix))
rix = 0;
@@ -1376,10 +1361,10 @@ ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb,
* right now, so it's not too bad...
*/
rxs->mactime = ath5k_extend_tsf(sc->ah, rs->rs_tstamp);
- rxs->flag |= RX_FLAG_TSFT;
+ rxs->flag |= RX_FLAG_MACTIME_MPDU;
rxs->freq = sc->curchan->center_freq;
- rxs->band = sc->curband->band;
+ rxs->band = sc->curchan->band;
rxs->signal = sc->ah->ah_noise_floor + rs->rs_rssi;
@@ -1394,10 +1379,10 @@ ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb,
rxs->flag |= ath5k_rx_decrypted(sc, skb, rs);
if (rxs->rate_idx >= 0 && rs->rs_rate ==
- sc->curband->bitrates[rxs->rate_idx].hw_value_short)
+ sc->sbands[sc->curchan->band].bitrates[rxs->rate_idx].hw_value_short)
rxs->flag |= RX_FLAG_SHORTPRE;
- ath5k_debug_dump_skb(sc, skb, "RX ", 0);
+ trace_ath5k_rx(sc, skb);
ath5k_update_beacon_rssi(sc, skb, rs->rs_rssi);
@@ -1533,7 +1518,7 @@ unlock:
* TX Handling *
\*************/
-int
+void
ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath5k_txq *txq)
{
@@ -1542,7 +1527,7 @@ ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
unsigned long flags;
int padsize;
- ath5k_debug_dump_skb(sc, skb, "TX ", 1);
+ trace_ath5k_tx(sc, skb, txq);
/*
* The hardware expects the header padded to 4 byte boundaries.
@@ -1582,16 +1567,15 @@ ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
spin_unlock_irqrestore(&sc->txbuflock, flags);
goto drop_packet;
}
- return NETDEV_TX_OK;
+ return;
drop_packet:
dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
}
static void
ath5k_tx_frame_completed(struct ath5k_softc *sc, struct sk_buff *skb,
- struct ath5k_tx_status *ts)
+ struct ath5k_txq *txq, struct ath5k_tx_status *ts)
{
struct ieee80211_tx_info *info;
int i;
@@ -1643,6 +1627,7 @@ ath5k_tx_frame_completed(struct ath5k_softc *sc, struct sk_buff *skb,
else
sc->stats.antenna_tx[0]++; /* invalid */
+ trace_ath5k_tx_complete(sc, skb, txq, ts);
ieee80211_tx_status(sc->hw, skb);
}
@@ -1679,7 +1664,7 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
dma_unmap_single(sc->dev, bf->skbaddr, skb->len,
DMA_TO_DEVICE);
- ath5k_tx_frame_completed(sc, skb, &ts);
+ ath5k_tx_frame_completed(sc, skb, txq, &ts);
}
/*
@@ -1821,8 +1806,6 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
goto out;
}
- ath5k_debug_dump_skb(sc, skb, "BC ", 1);
-
ath5k_txbuf_free_skb(sc, avf->bbuf);
avf->bbuf->skb = skb;
ret = ath5k_beacon_setup(sc, avf->bbuf);
@@ -1917,6 +1900,8 @@ ath5k_beacon_send(struct ath5k_softc *sc)
sc->opmode == NL80211_IFTYPE_MESH_POINT)
ath5k_beacon_update(sc->hw, vif);
+ trace_ath5k_tx(sc, bf->skb, &sc->txqs[sc->bhalq]);
+
ath5k_hw_set_txdp(ah, sc->bhalq, bf->daddr);
ath5k_hw_start_tx_dma(ah, sc->bhalq);
ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n",
@@ -2417,7 +2402,8 @@ ath5k_init_softc(struct ath5k_softc *sc, const struct ath_bus_ops *bus_ops)
/* set up multi-rate retry capabilities */
if (sc->ah->ah_version == AR5K_AR5212) {
hw->max_rates = 4;
- hw->max_rate_tries = 11;
+ hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT,
+ AR5K_INIT_RETRY_LONG);
}
hw->vif_data_size = sizeof(struct ath5k_vif);
@@ -2554,7 +2540,6 @@ ath5k_init_hw(struct ath5k_softc *sc)
* and then setup of the interrupt mask.
*/
sc->curchan = sc->hw->conf.channel;
- sc->curband = &sc->sbands[sc->curchan->band];
sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
@@ -2681,10 +2666,8 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
* so we should also free any remaining
* tx buffers */
ath5k_drain_tx_buffs(sc);
- if (chan) {
+ if (chan)
sc->curchan = chan;
- sc->curband = &sc->sbands[chan->band];
- }
ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, chan != NULL,
skip_pcu);
if (ret) {
@@ -2782,12 +2765,6 @@ ath5k_init(struct ieee80211_hw *hw)
goto err;
}
- /* NB: setup here so ath5k_rate_update is happy */
- if (test_bit(AR5K_MODE_11A, ah->ah_modes))
- ath5k_setcurmode(sc, AR5K_MODE_11A);
- else
- ath5k_setcurmode(sc, AR5K_MODE_11B);
-
/*
* Allocate tx+rx descriptors and populate the lists.
*/
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index 6d511476e4d2..8f919dca95f1 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -183,8 +183,6 @@ struct ath5k_softc {
enum nl80211_iftype opmode;
struct ath5k_hw *ah; /* Atheros HW */
- struct ieee80211_supported_band *curband;
-
#ifdef CONFIG_ATH5K_DEBUG
struct ath5k_dbg_info debug; /* debug info */
#endif /* CONFIG_ATH5K_DEBUG */
@@ -202,7 +200,6 @@ struct ath5k_softc {
#define ATH_STAT_STARTED 4 /* opened & irqs enabled */
unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */
- unsigned int curmode; /* current phy mode */
struct ieee80211_channel *curchan; /* current h/w channel */
u16 nvifs;
diff --git a/drivers/net/wireless/ath/ath5k/caps.c b/drivers/net/wireless/ath/ath5k/caps.c
index 31cad80e9b01..f77e8a703c5c 100644
--- a/drivers/net/wireless/ath/ath5k/caps.c
+++ b/drivers/net/wireless/ath/ath5k/caps.c
@@ -32,23 +32,24 @@
*/
int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
{
+ struct ath5k_capabilities *caps = &ah->ah_capabilities;
u16 ee_header;
/* Capabilities stored in the EEPROM */
- ee_header = ah->ah_capabilities.cap_eeprom.ee_header;
+ ee_header = caps->cap_eeprom.ee_header;
if (ah->ah_version == AR5K_AR5210) {
/*
* Set radio capabilities
* (The AR5110 only supports the middle 5GHz band)
*/
- ah->ah_capabilities.cap_range.range_5ghz_min = 5120;
- ah->ah_capabilities.cap_range.range_5ghz_max = 5430;
- ah->ah_capabilities.cap_range.range_2ghz_min = 0;
- ah->ah_capabilities.cap_range.range_2ghz_max = 0;
+ caps->cap_range.range_5ghz_min = 5120;
+ caps->cap_range.range_5ghz_max = 5430;
+ caps->cap_range.range_2ghz_min = 0;
+ caps->cap_range.range_2ghz_max = 0;
/* Set supported modes */
- __set_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode);
+ __set_bit(AR5K_MODE_11A, caps->cap_mode);
} else {
/*
* XXX The tranceiver supports frequencies from 4920 to 6100GHz
@@ -56,9 +57,8 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
* XXX current ieee80211 implementation because the IEEE
* XXX channel mapping does not support negative channel
* XXX numbers (2312MHz is channel -19). Of course, this
- * XXX doesn't matter because these channels are out of range
- * XXX but some regulation domains like MKK (Japan) will
- * XXX support frequencies somewhere around 4.8GHz.
+ * XXX doesn't matter because these channels are out of the
+ * XXX legal range.
*/
/*
@@ -66,13 +66,14 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
*/
if (AR5K_EEPROM_HDR_11A(ee_header)) {
- /* 4920 */
- ah->ah_capabilities.cap_range.range_5ghz_min = 5005;
- ah->ah_capabilities.cap_range.range_5ghz_max = 6100;
+ if (ath_is_49ghz_allowed(caps->cap_eeprom.ee_regdomain))
+ caps->cap_range.range_5ghz_min = 4920;
+ else
+ caps->cap_range.range_5ghz_min = 5005;
+ caps->cap_range.range_5ghz_max = 6100;
/* Set supported modes */
- __set_bit(AR5K_MODE_11A,
- ah->ah_capabilities.cap_mode);
+ __set_bit(AR5K_MODE_11A, caps->cap_mode);
}
/* Enable 802.11b if a 2GHz capable radio (2111/5112) is
@@ -81,32 +82,29 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
(AR5K_EEPROM_HDR_11G(ee_header) &&
ah->ah_version != AR5K_AR5211)) {
/* 2312 */
- ah->ah_capabilities.cap_range.range_2ghz_min = 2412;
- ah->ah_capabilities.cap_range.range_2ghz_max = 2732;
+ caps->cap_range.range_2ghz_min = 2412;
+ caps->cap_range.range_2ghz_max = 2732;
if (AR5K_EEPROM_HDR_11B(ee_header))
- __set_bit(AR5K_MODE_11B,
- ah->ah_capabilities.cap_mode);
+ __set_bit(AR5K_MODE_11B, caps->cap_mode);
if (AR5K_EEPROM_HDR_11G(ee_header) &&
ah->ah_version != AR5K_AR5211)
- __set_bit(AR5K_MODE_11G,
- ah->ah_capabilities.cap_mode);
+ __set_bit(AR5K_MODE_11G, caps->cap_mode);
}
}
/* Set number of supported TX queues */
if (ah->ah_version == AR5K_AR5210)
- ah->ah_capabilities.cap_queues.q_tx_num =
- AR5K_NUM_TX_QUEUES_NOQCU;
+ caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES_NOQCU;
else
- ah->ah_capabilities.cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
+ caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
/* newer hardware has PHY error counters */
if (ah->ah_mac_srev >= AR5K_SREV_AR5213A)
- ah->ah_capabilities.cap_has_phyerr_counters = true;
+ caps->cap_has_phyerr_counters = true;
else
- ah->ah_capabilities.cap_has_phyerr_counters = false;
+ caps->cap_has_phyerr_counters = false;
return 0;
}
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index d2f84d76bb07..0230f30e9e9a 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -308,8 +308,6 @@ static const struct {
{ ATH5K_DEBUG_CALIBRATE, "calib", "periodic calibration" },
{ ATH5K_DEBUG_TXPOWER, "txpower", "transmit power setting" },
{ ATH5K_DEBUG_LED, "led", "LED management" },
- { ATH5K_DEBUG_DUMP_RX, "dumprx", "print received skb content" },
- { ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" },
{ ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" },
{ ATH5K_DEBUG_DMA, "dma", "dma start/stop" },
{ ATH5K_DEBUG_ANI, "ani", "adaptive noise immunity" },
@@ -1036,24 +1034,6 @@ ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah)
}
void
-ath5k_debug_dump_skb(struct ath5k_softc *sc,
- struct sk_buff *skb, const char *prefix, int tx)
-{
- char buf[16];
-
- if (likely(!((tx && (sc->debug.level & ATH5K_DEBUG_DUMP_TX)) ||
- (!tx && (sc->debug.level & ATH5K_DEBUG_DUMP_RX)))))
- return;
-
- snprintf(buf, sizeof(buf), "%s %s", wiphy_name(sc->hw->wiphy), prefix);
-
- print_hex_dump_bytes(buf, DUMP_PREFIX_NONE, skb->data,
- min(200U, skb->len));
-
- printk(KERN_DEBUG "\n");
-}
-
-void
ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf)
{
struct ath5k_desc *ds = bf->desc;
diff --git a/drivers/net/wireless/ath/ath5k/debug.h b/drivers/net/wireless/ath/ath5k/debug.h
index 3e34428d5126..b0355aef68d3 100644
--- a/drivers/net/wireless/ath/ath5k/debug.h
+++ b/drivers/net/wireless/ath/ath5k/debug.h
@@ -116,8 +116,6 @@ enum ath5k_debug_level {
ATH5K_DEBUG_CALIBRATE = 0x00000020,
ATH5K_DEBUG_TXPOWER = 0x00000040,
ATH5K_DEBUG_LED = 0x00000080,
- ATH5K_DEBUG_DUMP_RX = 0x00000100,
- ATH5K_DEBUG_DUMP_TX = 0x00000200,
ATH5K_DEBUG_DUMPBANDS = 0x00000400,
ATH5K_DEBUG_DMA = 0x00000800,
ATH5K_DEBUG_ANI = 0x00002000,
@@ -152,10 +150,6 @@ void
ath5k_debug_dump_bands(struct ath5k_softc *sc);
void
-ath5k_debug_dump_skb(struct ath5k_softc *sc,
- struct sk_buff *skb, const char *prefix, int tx);
-
-void
ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf);
#else /* no debugging */
@@ -182,10 +176,6 @@ static inline void
ath5k_debug_dump_bands(struct ath5k_softc *sc) {}
static inline void
-ath5k_debug_dump_skb(struct ath5k_softc *sc,
- struct sk_buff *skb, const char *prefix, int tx) {}
-
-static inline void
ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf) {}
#endif /* ifdef CONFIG_ATH5K_DEBUG */
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 80e625608bac..b6561f785c6e 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -72,7 +72,6 @@ static int
ath5k_eeprom_init_header(struct ath5k_hw *ah)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
- int ret;
u16 val;
u32 cksum, offset, eep_max = AR5K_EEPROM_INFO_MAX;
@@ -192,7 +191,7 @@ static int ath5k_eeprom_read_ants(struct ath5k_hw *ah, u32 *offset,
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u32 o = *offset;
u16 val;
- int ret, i = 0;
+ int i = 0;
AR5K_EEPROM_READ(o++, val);
ee->ee_switch_settling[mode] = (val >> 8) & 0x7f;
@@ -252,7 +251,6 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u32 o = *offset;
u16 val;
- int ret;
ee->ee_n_piers[mode] = 0;
AR5K_EEPROM_READ(o++, val);
@@ -515,7 +513,6 @@ ath5k_eeprom_read_freq_list(struct ath5k_hw *ah, int *offset, int max,
int o = *offset;
int i = 0;
u8 freq1, freq2;
- int ret;
u16 val;
ee->ee_n_piers[mode] = 0;
@@ -551,7 +548,7 @@ ath5k_eeprom_init_11a_pcal_freq(struct ath5k_hw *ah, int offset)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
struct ath5k_chan_pcal_info *pcal = ee->ee_pwr_cal_a;
- int i, ret;
+ int i;
u16 val;
u8 mask;
@@ -970,7 +967,6 @@ ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode)
u32 offset;
u8 i, c;
u16 val;
- int ret;
u8 pd_gains = 0;
/* Count how many curves we have and
@@ -1228,7 +1224,7 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
struct ath5k_chan_pcal_info *chinfo;
u8 *pdgain_idx = ee->ee_pdc_to_idx[mode];
u32 offset;
- int idx, i, ret;
+ int idx, i;
u16 val;
u8 pd_gains = 0;
@@ -1419,7 +1415,7 @@ ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode)
u8 *rate_target_pwr_num;
u32 offset;
u16 val;
- int ret, i;
+ int i;
offset = AR5K_EEPROM_TARGET_PWRSTART(ee->ee_misc1);
rate_target_pwr_num = &ee->ee_rate_target_pwr_num[mode];
@@ -1593,7 +1589,7 @@ ath5k_eeprom_read_ctl_info(struct ath5k_hw *ah)
struct ath5k_edge_power *rep;
unsigned int fmask, pmask;
unsigned int ctl_mode;
- int ret, i, j;
+ int i, j;
u32 offset;
u16 val;
@@ -1733,16 +1729,12 @@ int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
u8 mac_d[ETH_ALEN] = {};
u32 total, offset;
u16 data;
- int octet, ret;
+ int octet;
- ret = ath5k_hw_nvram_read(ah, 0x20, &data);
- if (ret)
- return ret;
+ AR5K_EEPROM_READ(0x20, data);
for (offset = 0x1f, octet = 0, total = 0; offset >= 0x1d; offset--) {
- ret = ath5k_hw_nvram_read(ah, offset, &data);
- if (ret)
- return ret;
+ AR5K_EEPROM_READ(offset, data);
total += data;
mac_d[octet + 1] = data & 0xff;
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h
index 7c09e150dbdc..6511c27d938e 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.h
+++ b/drivers/net/wireless/ath/ath5k/eeprom.h
@@ -241,9 +241,8 @@ enum ath5k_eeprom_freq_bands{
#define AR5K_SPUR_SYMBOL_WIDTH_TURBO_100Hz 6250
#define AR5K_EEPROM_READ(_o, _v) do { \
- ret = ath5k_hw_nvram_read(ah, (_o), &(_v)); \
- if (ret) \
- return ret; \
+ if (!ath5k_hw_nvram_read(ah, (_o), &(_v))) \
+ return -EIO; \
} while (0)
#define AR5K_EEPROM_READ_HDR(_o, _v) \
@@ -269,29 +268,6 @@ enum ath5k_ctl_mode {
AR5K_CTL_MODE_M = 15,
};
-/* Default CTL ids for the 3 main reg domains.
- * Atheros only uses these by default but vendors
- * can have up to 32 different CTLs for different
- * scenarios. Note that theese values are ORed with
- * the mode id (above) so we can have up to 24 CTL
- * datasets out of these 3 main regdomains. That leaves
- * 8 ids that can be used by vendors and since 0x20 is
- * missing from HAL sources i guess this is the set of
- * custom CTLs vendors can use. */
-#define AR5K_CTL_FCC 0x10
-#define AR5K_CTL_CUSTOM 0x20
-#define AR5K_CTL_ETSI 0x30
-#define AR5K_CTL_MKK 0x40
-
-/* Indicates a CTL with only mode set and
- * no reg domain mapping, such CTLs are used
- * for world roaming domains or simply when
- * a reg domain is not set */
-#define AR5K_CTL_NO_REGDOMAIN 0xf0
-
-/* Indicates an empty (invalid) CTL */
-#define AR5K_CTL_NO_CTL 0xff
-
/* Per channel calibration data, used for power table setup */
struct ath5k_chan_pcal_info_rf5111 {
/* Power levels in half dbm units
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index d76d68c99f72..1fbe3c0b9f08 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -48,28 +48,11 @@
extern int ath5k_modparam_nohwcrypt;
-/* functions used from base.c */
-void set_beacon_filter(struct ieee80211_hw *hw, bool enable);
-bool ath_any_vif_assoc(struct ath5k_softc *sc);
-int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
- struct ath5k_txq *txq);
-int ath5k_init_hw(struct ath5k_softc *sc);
-int ath5k_stop_hw(struct ath5k_softc *sc);
-void ath5k_mode_setup(struct ath5k_softc *sc, struct ieee80211_vif *vif);
-void ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
- struct ieee80211_vif *vif);
-int ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan);
-void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
-int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
-void ath5k_beacon_config(struct ath5k_softc *sc);
-void ath5k_txbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf);
-void ath5k_rxbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf);
-
/********************\
* Mac80211 functions *
\********************/
-static int
+static void
ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct ath5k_softc *sc = hw->priv;
@@ -77,10 +60,10 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
if (WARN_ON(qnum >= sc->ah->ah_capabilities.cap_queues.q_tx_num)) {
dev_kfree_skb_any(skb);
- return 0;
+ return;
}
- return ath5k_tx_queue(hw, skb, &sc->txqs[qnum]);
+ ath5k_tx_queue(hw, skb, &sc->txqs[qnum]);
}
@@ -226,6 +209,7 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
struct ath5k_hw *ah = sc->ah;
struct ieee80211_conf *conf = &hw->conf;
int ret = 0;
+ int i;
mutex_lock(&sc->lock);
@@ -243,6 +227,14 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2));
}
+ if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
+ ah->ah_retry_long = conf->long_frame_max_tx_count;
+ ah->ah_retry_short = conf->short_frame_max_tx_count;
+
+ for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++)
+ ath5k_hw_set_tx_retry_limits(ah, i);
+ }
+
/* TODO:
* 1) Move this on config_interface and handle each case
* separately eg. when we have only one STA vif, use
diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c
index 7f8c5b0e9d2a..66598a0d1df0 100644
--- a/drivers/net/wireless/ath/ath5k/pci.c
+++ b/drivers/net/wireless/ath/ath5k/pci.c
@@ -69,7 +69,8 @@ static void ath5k_pci_read_cachesize(struct ath_common *common, int *csz)
/*
* Read from eeprom
*/
-bool ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data)
+static bool
+ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data)
{
struct ath5k_hw *ah = (struct ath5k_hw *) common->ah;
u32 status, timeout;
@@ -90,15 +91,15 @@ bool ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data)
status = ath5k_hw_reg_read(ah, AR5K_EEPROM_STATUS);
if (status & AR5K_EEPROM_STAT_RDDONE) {
if (status & AR5K_EEPROM_STAT_RDERR)
- return -EIO;
+ return false;
*data = (u16)(ath5k_hw_reg_read(ah, AR5K_EEPROM_DATA) &
0xffff);
- return 0;
+ return true;
}
udelay(15);
}
- return -ETIMEDOUT;
+ return false;
}
int ath5k_hw_read_srev(struct ath5k_hw *ah)
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 78c26fdccad1..62ce2f4e8605 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -282,6 +282,34 @@ int ath5k_hw_phy_disable(struct ath5k_hw *ah)
return 0;
}
+/*
+ * Wait for synth to settle
+ */
+static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel)
+{
+ /*
+ * On 5211+ read activation -> rx delay
+ * and use it (100ns steps).
+ */
+ if (ah->ah_version != AR5K_AR5210) {
+ u32 delay;
+ delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
+ AR5K_PHY_RX_DELAY_M;
+ delay = (channel->hw_value & CHANNEL_CCK) ?
+ ((delay << 2) / 22) : (delay / 10);
+ if (ah->ah_bwmode == AR5K_BWMODE_10MHZ)
+ delay = delay << 1;
+ if (ah->ah_bwmode == AR5K_BWMODE_5MHZ)
+ delay = delay << 2;
+ /* XXX: /2 on turbo ? Let's be safe
+ * for now */
+ udelay(100 + delay);
+ } else {
+ mdelay(1);
+ }
+}
+
/**********************\
* RF Gain optimization *
@@ -1253,6 +1281,7 @@ static int ath5k_hw_channel(struct ath5k_hw *ah,
case AR5K_RF5111:
ret = ath5k_hw_rf5111_channel(ah, channel);
break;
+ case AR5K_RF2317:
case AR5K_RF2425:
ret = ath5k_hw_rf2425_channel(ah, channel);
break;
@@ -3237,6 +3266,13 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
/* Failed */
if (i >= 100)
return -EIO;
+
+ /* Set channel and wait for synth */
+ ret = ath5k_hw_channel(ah, channel);
+ if (ret)
+ return ret;
+
+ ath5k_hw_wait_for_synth(ah, channel);
}
/*
@@ -3251,13 +3287,53 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
if (ret)
return ret;
+ /* Write OFDM timings on 5212*/
+ if (ah->ah_version == AR5K_AR5212 &&
+ channel->hw_value & CHANNEL_OFDM) {
+
+ ret = ath5k_hw_write_ofdm_timings(ah, channel);
+ if (ret)
+ return ret;
+
+ /* Spur info is available only from EEPROM versions
+ * greater than 5.3, but the EEPROM routines will use
+ * static values for older versions */
+ if (ah->ah_mac_srev >= AR5K_SREV_AR5424)
+ ath5k_hw_set_spur_mitigation_filter(ah,
+ channel);
+ }
+
+ /* If we used fast channel switching
+ * we are done, release RF bus and
+ * fire up NF calibration.
+ *
+ * Note: Only NF calibration due to
+ * channel change, not AGC calibration
+ * since AGC is still running !
+ */
+ if (fast) {
+ /*
+ * Release RF Bus grant
+ */
+ AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ,
+ AR5K_PHY_RFBUS_REQ_REQUEST);
+
+ /*
+ * Start NF calibration
+ */
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
+ AR5K_PHY_AGCCTL_NF);
+
+ return ret;
+ }
+
/*
* For 5210 we do all initialization using
* initvals, so we don't have to modify
* any settings (5210 also only supports
* a/aturbo modes)
*/
- if ((ah->ah_version != AR5K_AR5210) && !fast) {
+ if (ah->ah_version != AR5K_AR5210) {
/*
* Write initial RF gain settings
@@ -3276,22 +3352,6 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
if (ret)
return ret;
- /* Write OFDM timings on 5212*/
- if (ah->ah_version == AR5K_AR5212 &&
- channel->hw_value & CHANNEL_OFDM) {
-
- ret = ath5k_hw_write_ofdm_timings(ah, channel);
- if (ret)
- return ret;
-
- /* Spur info is available only from EEPROM versions
- * greater than 5.3, but the EEPROM routines will use
- * static values for older versions */
- if (ah->ah_mac_srev >= AR5K_SREV_AR5424)
- ath5k_hw_set_spur_mitigation_filter(ah,
- channel);
- }
-
/*Enable/disable 802.11b mode on 5111
(enable 2111 frequency converter + CCK)*/
if (ah->ah_radio == AR5K_RF5111) {
@@ -3322,47 +3382,20 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
*/
ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
+ ath5k_hw_wait_for_synth(ah, channel);
+
/*
- * On 5211+ read activation -> rx delay
- * and use it.
+ * Perform ADC test to see if baseband is ready
+ * Set tx hold and check adc test register
*/
- if (ah->ah_version != AR5K_AR5210) {
- u32 delay;
- delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
- AR5K_PHY_RX_DELAY_M;
- delay = (channel->hw_value & CHANNEL_CCK) ?
- ((delay << 2) / 22) : (delay / 10);
- if (ah->ah_bwmode == AR5K_BWMODE_10MHZ)
- delay = delay << 1;
- if (ah->ah_bwmode == AR5K_BWMODE_5MHZ)
- delay = delay << 2;
- /* XXX: /2 on turbo ? Let's be safe
- * for now */
- udelay(100 + delay);
- } else {
- mdelay(1);
- }
-
- if (fast)
- /*
- * Release RF Bus grant
- */
- AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ,
- AR5K_PHY_RFBUS_REQ_REQUEST);
- else {
- /*
- * Perform ADC test to see if baseband is ready
- * Set tx hold and check adc test register
- */
- phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
- ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
- for (i = 0; i <= 20; i++) {
- if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
- break;
- udelay(200);
- }
- ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
+ phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
+ ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
+ for (i = 0; i <= 20; i++) {
+ if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
+ break;
+ udelay(200);
}
+ ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
/*
* Start automatic gain control calibration
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index 2c9c9e793d4e..3343fb9e4940 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -228,24 +228,9 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
/*
* Set tx retry limits on DCU
*/
-static void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
- unsigned int queue)
+void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
+ unsigned int queue)
{
- u32 retry_lg, retry_sh;
-
- /*
- * Calculate and set retry limits
- */
- if (ah->ah_software_retry) {
- /* XXX Need to test this */
- retry_lg = ah->ah_limit_tx_retries;
- retry_sh = retry_lg = retry_lg > AR5K_DCU_RETRY_LMT_SH_RETRY ?
- AR5K_DCU_RETRY_LMT_SH_RETRY : retry_lg;
- } else {
- retry_lg = AR5K_INIT_LG_RETRY;
- retry_sh = AR5K_INIT_SH_RETRY;
- }
-
/* Single data queue on AR5210 */
if (ah->ah_version == AR5K_AR5210) {
struct ath5k_txq_info *tq = &ah->ah_txq[queue];
@@ -255,25 +240,26 @@ static void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
ath5k_hw_reg_write(ah,
(tq->tqi_cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
- | AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
- AR5K_NODCU_RETRY_LMT_SLG_RETRY)
- | AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
- AR5K_NODCU_RETRY_LMT_SSH_RETRY)
- | AR5K_REG_SM(retry_lg, AR5K_NODCU_RETRY_LMT_LG_RETRY)
- | AR5K_REG_SM(retry_sh, AR5K_NODCU_RETRY_LMT_SH_RETRY),
+ | AR5K_REG_SM(ah->ah_retry_long,
+ AR5K_NODCU_RETRY_LMT_SLG_RETRY)
+ | AR5K_REG_SM(ah->ah_retry_short,
+ AR5K_NODCU_RETRY_LMT_SSH_RETRY)
+ | AR5K_REG_SM(ah->ah_retry_long,
+ AR5K_NODCU_RETRY_LMT_LG_RETRY)
+ | AR5K_REG_SM(ah->ah_retry_short,
+ AR5K_NODCU_RETRY_LMT_SH_RETRY),
AR5K_NODCU_RETRY_LMT);
/* DCU on AR5211+ */
} else {
ath5k_hw_reg_write(ah,
- AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
- AR5K_DCU_RETRY_LMT_SLG_RETRY) |
- AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
- AR5K_DCU_RETRY_LMT_SSH_RETRY) |
- AR5K_REG_SM(retry_lg, AR5K_DCU_RETRY_LMT_LG_RETRY) |
- AR5K_REG_SM(retry_sh, AR5K_DCU_RETRY_LMT_SH_RETRY),
+ AR5K_REG_SM(ah->ah_retry_long,
+ AR5K_DCU_RETRY_LMT_RTS)
+ | AR5K_REG_SM(ah->ah_retry_long,
+ AR5K_DCU_RETRY_LMT_STA_RTS)
+ | AR5K_REG_SM(max(ah->ah_retry_long, ah->ah_retry_short),
+ AR5K_DCU_RETRY_LMT_STA_DATA),
AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
}
- return;
}
/**
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index fd14b9103951..e1c9abd8c879 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -686,16 +686,15 @@
/*
* DCU retry limit registers
+ * all these fields don't allow zero values
*/
#define AR5K_DCU_RETRY_LMT_BASE 0x1080 /* Register Address -Queue0 DCU_RETRY_LMT */
-#define AR5K_DCU_RETRY_LMT_SH_RETRY 0x0000000f /* Short retry limit mask */
-#define AR5K_DCU_RETRY_LMT_SH_RETRY_S 0
-#define AR5K_DCU_RETRY_LMT_LG_RETRY 0x000000f0 /* Long retry limit mask */
-#define AR5K_DCU_RETRY_LMT_LG_RETRY_S 4
-#define AR5K_DCU_RETRY_LMT_SSH_RETRY 0x00003f00 /* Station short retry limit mask (?) */
-#define AR5K_DCU_RETRY_LMT_SSH_RETRY_S 8
-#define AR5K_DCU_RETRY_LMT_SLG_RETRY 0x000fc000 /* Station long retry limit mask (?) */
-#define AR5K_DCU_RETRY_LMT_SLG_RETRY_S 14
+#define AR5K_DCU_RETRY_LMT_RTS 0x0000000f /* RTS failure limit. Transmission fails if no CTS is received for this number of times */
+#define AR5K_DCU_RETRY_LMT_RTS_S 0
+#define AR5K_DCU_RETRY_LMT_STA_RTS 0x00003f00 /* STA RTS failure limit. If exceeded CW reset */
+#define AR5K_DCU_RETRY_LMT_STA_RTS_S 8
+#define AR5K_DCU_RETRY_LMT_STA_DATA 0x000fc000 /* STA data failure limit. If exceeded CW reset. */
+#define AR5K_DCU_RETRY_LMT_STA_DATA_S 14
#define AR5K_QUEUE_DFS_RETRY_LIMIT(_q) AR5K_QUEUE_REG(AR5K_DCU_RETRY_LMT_BASE, _q)
/*
diff --git a/drivers/net/wireless/ath/ath5k/trace.h b/drivers/net/wireless/ath/ath5k/trace.h
new file mode 100644
index 000000000000..2de68adb6240
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/trace.h
@@ -0,0 +1,107 @@
+#if !defined(__TRACE_ATH5K_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __TRACE_ATH5K_H
+
+#include <linux/tracepoint.h>
+#include "base.h"
+
+#ifndef CONFIG_ATH5K_TRACER
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif
+
+struct sk_buff;
+
+#define PRIV_ENTRY __field(struct ath5k_softc *, priv)
+#define PRIV_ASSIGN __entry->priv = priv
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ath5k
+
+TRACE_EVENT(ath5k_rx,
+ TP_PROTO(struct ath5k_softc *priv, struct sk_buff *skb),
+ TP_ARGS(priv, skb),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+ __field(unsigned long, skbaddr)
+ __dynamic_array(u8, frame, skb->len)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->skbaddr = (unsigned long) skb;
+ memcpy(__get_dynamic_array(frame), skb->data, skb->len);
+ ),
+ TP_printk(
+ "[%p] RX skb=%lx", __entry->priv, __entry->skbaddr
+ )
+);
+
+TRACE_EVENT(ath5k_tx,
+ TP_PROTO(struct ath5k_softc *priv, struct sk_buff *skb,
+ struct ath5k_txq *q),
+
+ TP_ARGS(priv, skb, q),
+
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+ __field(unsigned long, skbaddr)
+ __field(u8, qnum)
+ __dynamic_array(u8, frame, skb->len)
+ ),
+
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->skbaddr = (unsigned long) skb;
+ __entry->qnum = (u8) q->qnum;
+ memcpy(__get_dynamic_array(frame), skb->data, skb->len);
+ ),
+
+ TP_printk(
+ "[%p] TX skb=%lx q=%d", __entry->priv, __entry->skbaddr,
+ __entry->qnum
+ )
+);
+
+TRACE_EVENT(ath5k_tx_complete,
+ TP_PROTO(struct ath5k_softc *priv, struct sk_buff *skb,
+ struct ath5k_txq *q, struct ath5k_tx_status *ts),
+
+ TP_ARGS(priv, skb, q, ts),
+
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+ __field(unsigned long, skbaddr)
+ __field(u8, qnum)
+ __field(u8, ts_status)
+ __field(s8, ts_rssi)
+ __field(u8, ts_antenna)
+ ),
+
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->skbaddr = (unsigned long) skb;
+ __entry->qnum = (u8) q->qnum;
+ __entry->ts_status = ts->ts_status;
+ __entry->ts_rssi = ts->ts_rssi;
+ __entry->ts_antenna = ts->ts_antenna;
+ ),
+
+ TP_printk(
+ "[%p] TX end skb=%lx q=%d stat=%x rssi=%d ant=%x",
+ __entry->priv, __entry->skbaddr, __entry->qnum,
+ __entry->ts_status, __entry->ts_rssi, __entry->ts_antenna
+ )
+);
+
+#endif /* __TRACE_ATH5K_H */
+
+#ifdef CONFIG_ATH5K_TRACER
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/net/wireless/ath/ath5k
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
+
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index aca01621c205..4d66ca8042eb 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -4,7 +4,6 @@ ath9k-y += beacon.o \
main.o \
recv.o \
xmit.o \
- virtual.o \
ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o
ath9k-$(CONFIG_PCI) += pci.o
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 25a6e4417cdb..993672105963 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -54,7 +54,6 @@ static struct ath_bus_ops ath_ahb_bus_ops = {
static int ath_ahb_probe(struct platform_device *pdev)
{
void __iomem *mem;
- struct ath_wiphy *aphy;
struct ath_softc *sc;
struct ieee80211_hw *hw;
struct resource *res;
@@ -92,8 +91,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
irq = res->start;
- hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy) +
- sizeof(struct ath_softc), &ath9k_ops);
+ hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
if (hw == NULL) {
dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
ret = -ENOMEM;
@@ -103,11 +101,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
SET_IEEE80211_DEV(hw, &pdev->dev);
platform_set_drvdata(pdev, hw);
- aphy = hw->priv;
- sc = (struct ath_softc *) (aphy + 1);
- aphy->sc = sc;
- aphy->hw = hw;
- sc->pri_wiphy = aphy;
+ sc = hw->priv;
sc->hw = hw;
sc->dev = &pdev->dev;
sc->mem = mem;
@@ -151,8 +145,7 @@ static int ath_ahb_remove(struct platform_device *pdev)
struct ieee80211_hw *hw = platform_get_drvdata(pdev);
if (hw) {
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
void __iomem *mem = sc->mem;
ath9k_deinit_device(sc);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 5e300bd3d264..76388c6d6692 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -805,7 +805,10 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
{
struct ath_common *common = ath9k_hw_common(ah);
- if (AR_SREV_9271(ah) || AR_SREV_9285_12_OR_LATER(ah)) {
+ if (AR_SREV_9271(ah)) {
+ if (!ar9285_hw_cl_cal(ah, chan))
+ return false;
+ } else if (AR_SREV_9285_12_OR_LATER(ah)) {
if (!ar9285_hw_clc(ah, chan))
return false;
} else {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 4819747fa4c3..4a9271802991 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3673,7 +3673,7 @@ static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
return;
reg_pmu_set = (5 << 1) | (7 << 4) | (1 << 8) |
- (7 << 14) | (6 << 17) | (1 << 20) |
+ (2 << 14) | (6 << 17) | (1 << 20) |
(3 << 24) | (1 << 28);
REG_WRITE(ah, AR_PHY_PMU1, reg_pmu_set);
@@ -3959,19 +3959,19 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
{
#define POW_SM(_r, _s) (((_r) & 0x3f) << (_s))
/* make sure forced gain is not set */
- REG_WRITE(ah, 0xa458, 0);
+ REG_WRITE(ah, AR_PHY_TX_FORCED_GAIN, 0);
/* Write the OFDM power per rate set */
/* 6 (LSB), 9, 12, 18 (MSB) */
- REG_WRITE(ah, 0xa3c0,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(0),
POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 24) |
POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 16) |
POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 8) |
POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 0));
/* 24 (LSB), 36, 48, 54 (MSB) */
- REG_WRITE(ah, 0xa3c4,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(1),
POW_SM(pPwrArray[ALL_TARGET_LEGACY_54], 24) |
POW_SM(pPwrArray[ALL_TARGET_LEGACY_48], 16) |
POW_SM(pPwrArray[ALL_TARGET_LEGACY_36], 8) |
@@ -3980,14 +3980,14 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
/* Write the CCK power per rate set */
/* 1L (LSB), reserved, 2L, 2S (MSB) */
- REG_WRITE(ah, 0xa3c8,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(2),
POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 24) |
POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 16) |
/* POW_SM(txPowerTimes2, 8) | this is reserved for AR9003 */
POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0));
/* 5.5L (LSB), 5.5S, 11L, 11S (MSB) */
- REG_WRITE(ah, 0xa3cc,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(3),
POW_SM(pPwrArray[ALL_TARGET_LEGACY_11S], 24) |
POW_SM(pPwrArray[ALL_TARGET_LEGACY_11L], 16) |
POW_SM(pPwrArray[ALL_TARGET_LEGACY_5S], 8) |
@@ -3997,7 +3997,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
/* Write the HT20 power per rate set */
/* 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB) */
- REG_WRITE(ah, 0xa3d0,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(4),
POW_SM(pPwrArray[ALL_TARGET_HT20_5], 24) |
POW_SM(pPwrArray[ALL_TARGET_HT20_4], 16) |
POW_SM(pPwrArray[ALL_TARGET_HT20_1_3_9_11_17_19], 8) |
@@ -4005,7 +4005,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
);
/* 6 (LSB), 7, 12, 13 (MSB) */
- REG_WRITE(ah, 0xa3d4,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(5),
POW_SM(pPwrArray[ALL_TARGET_HT20_13], 24) |
POW_SM(pPwrArray[ALL_TARGET_HT20_12], 16) |
POW_SM(pPwrArray[ALL_TARGET_HT20_7], 8) |
@@ -4013,7 +4013,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
);
/* 14 (LSB), 15, 20, 21 */
- REG_WRITE(ah, 0xa3e4,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(9),
POW_SM(pPwrArray[ALL_TARGET_HT20_21], 24) |
POW_SM(pPwrArray[ALL_TARGET_HT20_20], 16) |
POW_SM(pPwrArray[ALL_TARGET_HT20_15], 8) |
@@ -4023,7 +4023,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
/* Mixed HT20 and HT40 rates */
/* HT20 22 (LSB), HT20 23, HT40 22, HT40 23 (MSB) */
- REG_WRITE(ah, 0xa3e8,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(10),
POW_SM(pPwrArray[ALL_TARGET_HT40_23], 24) |
POW_SM(pPwrArray[ALL_TARGET_HT40_22], 16) |
POW_SM(pPwrArray[ALL_TARGET_HT20_23], 8) |
@@ -4035,7 +4035,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
* correct PAR difference between HT40 and HT20/LEGACY
* 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB)
*/
- REG_WRITE(ah, 0xa3d8,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(6),
POW_SM(pPwrArray[ALL_TARGET_HT40_5], 24) |
POW_SM(pPwrArray[ALL_TARGET_HT40_4], 16) |
POW_SM(pPwrArray[ALL_TARGET_HT40_1_3_9_11_17_19], 8) |
@@ -4043,7 +4043,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
);
/* 6 (LSB), 7, 12, 13 (MSB) */
- REG_WRITE(ah, 0xa3dc,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(7),
POW_SM(pPwrArray[ALL_TARGET_HT40_13], 24) |
POW_SM(pPwrArray[ALL_TARGET_HT40_12], 16) |
POW_SM(pPwrArray[ALL_TARGET_HT40_7], 8) |
@@ -4051,7 +4051,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
);
/* 14 (LSB), 15, 20, 21 */
- REG_WRITE(ah, 0xa3ec,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(11),
POW_SM(pPwrArray[ALL_TARGET_HT40_21], 24) |
POW_SM(pPwrArray[ALL_TARGET_HT40_20], 16) |
POW_SM(pPwrArray[ALL_TARGET_HT40_15], 8) |
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 06fb2c850535..7f5de6e4448b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -28,7 +28,67 @@
*/
static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
{
- if (AR_SREV_9485(ah)) {
+ if (AR_SREV_9485_11(ah)) {
+ /* mac */
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+ ar9485_1_1_mac_core,
+ ARRAY_SIZE(ar9485_1_1_mac_core), 2);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ ar9485_1_1_mac_postamble,
+ ARRAY_SIZE(ar9485_1_1_mac_postamble), 5);
+
+ /* bb */
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], ar9485_1_1,
+ ARRAY_SIZE(ar9485_1_1), 2);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ ar9485_1_1_baseband_core,
+ ARRAY_SIZE(ar9485_1_1_baseband_core), 2);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ ar9485_1_1_baseband_postamble,
+ ARRAY_SIZE(ar9485_1_1_baseband_postamble), 5);
+
+ /* radio */
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ ar9485_1_1_radio_core,
+ ARRAY_SIZE(ar9485_1_1_radio_core), 2);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+ ar9485_1_1_radio_postamble,
+ ARRAY_SIZE(ar9485_1_1_radio_postamble), 2);
+
+ /* soc */
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ ar9485_1_1_soc_preamble,
+ ARRAY_SIZE(ar9485_1_1_soc_preamble), 2);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST], NULL, 0, 0);
+
+ /* rx/tx gain */
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9485_common_rx_gain_1_1,
+ ARRAY_SIZE(ar9485_common_rx_gain_1_1), 2);
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9485_modes_lowest_ob_db_tx_gain_1_1,
+ ARRAY_SIZE(ar9485_modes_lowest_ob_db_tx_gain_1_1),
+ 5);
+
+ /* Load PCIE SERDES settings from INI */
+
+ /* Awake Setting */
+
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9485_1_1_pcie_phy_clkreq_disable_L1,
+ ARRAY_SIZE(ar9485_1_1_pcie_phy_clkreq_disable_L1),
+ 2);
+
+ /* Sleep Setting */
+
+ INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+ ar9485_1_1_pcie_phy_clkreq_disable_L1,
+ ARRAY_SIZE(ar9485_1_1_pcie_phy_clkreq_disable_L1),
+ 2);
+ } else if (AR_SREV_9485(ah)) {
/* mac */
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -85,8 +145,8 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
/* Sleep Setting */
INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
- ar9485_1_0_pcie_phy_pll_on_clkreq_enable_L1,
- ARRAY_SIZE(ar9485_1_0_pcie_phy_pll_on_clkreq_enable_L1),
+ ar9485_1_0_pcie_phy_pll_on_clkreq_disable_L1,
+ ARRAY_SIZE(ar9485_1_0_pcie_phy_pll_on_clkreq_disable_L1),
2);
} else {
/* mac */
@@ -163,7 +223,12 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
switch (ar9003_hw_get_tx_gain_idx(ah)) {
case 0:
default:
- if (AR_SREV_9485(ah))
+ if (AR_SREV_9485_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9485_modes_lowest_ob_db_tx_gain_1_1,
+ ARRAY_SIZE(ar9485_modes_lowest_ob_db_tx_gain_1_1),
+ 5);
+ else if (AR_SREV_9485(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9485Modes_lowest_ob_db_tx_gain_1_0,
ARRAY_SIZE(ar9485Modes_lowest_ob_db_tx_gain_1_0),
@@ -175,10 +240,15 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
5);
break;
case 1:
- if (AR_SREV_9485(ah))
+ if (AR_SREV_9485_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9485Modes_high_ob_db_tx_gain_1_1,
+ ARRAY_SIZE(ar9485Modes_high_ob_db_tx_gain_1_1),
+ 5);
+ else if (AR_SREV_9485(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9485Modes_high_ob_db_tx_gain_1_0,
- ARRAY_SIZE(ar9485Modes_lowest_ob_db_tx_gain_1_0),
+ ARRAY_SIZE(ar9485Modes_high_ob_db_tx_gain_1_0),
5);
else
INIT_INI_ARRAY(&ah->iniModesTxGain,
@@ -187,10 +257,15 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
5);
break;
case 2:
- if (AR_SREV_9485(ah))
+ if (AR_SREV_9485_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9485Modes_low_ob_db_tx_gain_1_1,
+ ARRAY_SIZE(ar9485Modes_low_ob_db_tx_gain_1_1),
+ 5);
+ else if (AR_SREV_9485(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9485Modes_low_ob_db_tx_gain_1_0,
- ARRAY_SIZE(ar9485Modes_lowest_ob_db_tx_gain_1_0),
+ ARRAY_SIZE(ar9485Modes_low_ob_db_tx_gain_1_0),
5);
else
INIT_INI_ARRAY(&ah->iniModesTxGain,
@@ -199,7 +274,12 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
5);
break;
case 3:
- if (AR_SREV_9485(ah))
+ if (AR_SREV_9485_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9485Modes_high_power_tx_gain_1_1,
+ ARRAY_SIZE(ar9485Modes_high_power_tx_gain_1_1),
+ 5);
+ else if (AR_SREV_9485(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9485Modes_high_power_tx_gain_1_0,
ARRAY_SIZE(ar9485Modes_high_power_tx_gain_1_0),
@@ -218,7 +298,12 @@ static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
switch (ar9003_hw_get_rx_gain_idx(ah)) {
case 0:
default:
- if (AR_SREV_9485(ah))
+ if (AR_SREV_9485_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9485_common_rx_gain_1_1,
+ ARRAY_SIZE(ar9485_common_rx_gain_1_1),
+ 2);
+ else if (AR_SREV_9485(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9485Common_rx_gain_1_0,
ARRAY_SIZE(ar9485Common_rx_gain_1_0),
@@ -230,7 +315,12 @@ static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
2);
break;
case 1:
- if (AR_SREV_9485(ah))
+ if (AR_SREV_9485_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9485Common_wo_xlna_rx_gain_1_1,
+ ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_1),
+ 2);
+ else if (AR_SREV_9485(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9485Common_wo_xlna_rx_gain_1_0,
ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_0),
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 4ceddbbdfcee..038a0cbfc6e7 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -615,7 +615,7 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
*/
if (rxsp->status11 & AR_CRCErr)
rxs->rs_status |= ATH9K_RXERR_CRC;
- if (rxsp->status11 & AR_PHYErr) {
+ else if (rxsp->status11 & AR_PHYErr) {
phyerr = MS(rxsp->status11, AR_PHYErrCode);
/*
* If we reach a point here where AR_PostDelimCRCErr is
@@ -638,11 +638,11 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
rxs->rs_phyerr = phyerr;
}
- }
- if (rxsp->status11 & AR_DecryptCRCErr)
+ } else if (rxsp->status11 & AR_DecryptCRCErr)
rxs->rs_status |= ATH9K_RXERR_DECRYPT;
- if (rxsp->status11 & AR_MichaelErr)
+ else if (rxsp->status11 & AR_MichaelErr)
rxs->rs_status |= ATH9K_RXERR_MIC;
+
if (rxsp->status11 & AR_KeyMiss)
rxs->rs_status |= ATH9K_RXERR_DECRYPT;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 59bab6bd8a74..8bdda2cf9dd7 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -486,6 +486,8 @@
#define AR_PHY_HEAVYCLIP_40 (AR_SM_BASE + 0x1ac)
#define AR_PHY_ILLEGAL_TXRATE (AR_SM_BASE + 0x1b0)
+#define AR_PHY_POWER_TX_RATE(_d) (AR_SM_BASE + 0x1c0 + ((_d) << 2))
+
#define AR_PHY_PWRTX_MAX (AR_SM_BASE + 0x1f0)
#define AR_PHY_POWER_TX_SUB (AR_SM_BASE + 0x1f4)
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index 70de3d89a7b5..eac4d8526fc1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -940,4 +940,1145 @@ static const u32 ar9485_1_0_mac_core[][2] = {
{0x000083cc, 0x00000200},
{0x000083d0, 0x000301ff},
};
+
+static const u32 ar9485_1_1_mac_core[][2] = {
+ /* Addr allmodes */
+ {0x00000008, 0x00000000},
+ {0x00000030, 0x00020085},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000000},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x000010f0, 0x00000100},
+ {0x00001270, 0x00000000},
+ {0x000012b0, 0x00000000},
+ {0x000012f0, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00008000, 0x00000000},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000000},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008040, 0x00000000},
+ {0x00008044, 0x00000000},
+ {0x00008048, 0x00000000},
+ {0x0000804c, 0xffffffff},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000310},
+ {0x00008074, 0x00000020},
+ {0x00008078, 0x00000000},
+ {0x0000809c, 0x0000000f},
+ {0x000080a0, 0x00000000},
+ {0x000080a4, 0x02ff0000},
+ {0x000080a8, 0x0e070605},
+ {0x000080ac, 0x0000000d},
+ {0x000080b0, 0x00000000},
+ {0x000080b4, 0x00000000},
+ {0x000080b8, 0x00000000},
+ {0x000080bc, 0x00000000},
+ {0x000080c0, 0x2a800000},
+ {0x000080c4, 0x06900168},
+ {0x000080c8, 0x13881c22},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00252500},
+ {0x000080d4, 0x00a00000},
+ {0x000080d8, 0x00400000},
+ {0x000080dc, 0x00000000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x3f3f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00000000},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000000},
+ {0x00008114, 0x000007ff},
+ {0x00008118, 0x000000aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x0000ffff},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x00008170, 0x18486200},
+ {0x00008174, 0x33332210},
+ {0x00008178, 0x00000000},
+ {0x0000817c, 0x00020000},
+ {0x000081c0, 0x00000000},
+ {0x000081c4, 0x33332210},
+ {0x000081d4, 0x00000000},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f400},
+ {0x00008248, 0x00000800},
+ {0x0000824c, 0x0001e800},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x40000000},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x9ca00010},
+ {0x00008268, 0xffffffff},
+ {0x0000826c, 0x0000ffff},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000004},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x000000ff},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00000140},
+ {0x00008314, 0x00000000},
+ {0x0000831c, 0x0000010d},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x00000007},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000700},
+ {0x00008338, 0x00ff0000},
+ {0x0000833c, 0x02400000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0xa248105b},
+ {0x00008348, 0x008f0000},
+ {0x0000835c, 0x00000000},
+ {0x00008360, 0xffffffff},
+ {0x00008364, 0xffffffff},
+ {0x00008368, 0x00000000},
+ {0x00008370, 0x00000000},
+ {0x00008374, 0x000000ff},
+ {0x00008378, 0x00000000},
+ {0x0000837c, 0x00000000},
+ {0x00008380, 0xffffffff},
+ {0x00008384, 0xffffffff},
+ {0x00008390, 0xffffffff},
+ {0x00008394, 0xffffffff},
+ {0x00008398, 0x00000000},
+ {0x0000839c, 0x00000000},
+ {0x000083a0, 0x00000000},
+ {0x000083a4, 0x0000fa14},
+ {0x000083a8, 0x000f0c00},
+ {0x000083ac, 0x33332210},
+ {0x000083b0, 0x33332210},
+ {0x000083b4, 0x33332210},
+ {0x000083b8, 0x33332210},
+ {0x000083bc, 0x00000000},
+ {0x000083c0, 0x00000000},
+ {0x000083c4, 0x00000000},
+ {0x000083c8, 0x00000000},
+ {0x000083cc, 0x00000200},
+ {0x000083d0, 0x000301ff},
+};
+
+static const u32 ar9485_1_1_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a8f6b},
+ {0x0000980c, 0x04800000},
+ {0x00009814, 0x9280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x5f3ca3de},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x14750600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x52440bbe},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009c04, 0x00000000},
+ {0x00009c08, 0x03200000},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x1883800a},
+ {0x00009d10, 0x01834061},
+ {0x00009d14, 0x00c00400},
+ {0x00009d18, 0x00000000},
+ {0x00009d1c, 0x00000000},
+ {0x00009e08, 0x0038233c},
+ {0x00009e24, 0x9927b515},
+ {0x00009e28, 0x12ef0200},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e40, 0x0d261820},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009fc0, 0x80be4788},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x0000a20c, 0x00000000},
+ {0x0000a210, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a2a0, 0x00000001},
+ {0x0000a2c0, 0x00000001},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2cc, 0x18c43433},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2dc, 0x00000000},
+ {0x0000a2e0, 0x00000000},
+ {0x0000a2e4, 0x00000000},
+ {0x0000a2e8, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
+ {0x0000a3a4, 0x000000ff},
+ {0x0000a3a8, 0x3b3b3b3b},
+ {0x0000a3ac, 0x2f2f2f2f},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000006},
+ {0x0000a3f8, 0x0cdbd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce739cf},
+ {0x0000a418, 0x2d0019ce},
+ {0x0000a41c, 0x1ce739ce},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce739ce},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00000000},
+ {0x0000a440, 0x00000000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x04000000},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a5c4, 0xbfad9d74},
+ {0x0000a5c8, 0x0048060a},
+ {0x0000a5cc, 0x00000637},
+ {0x0000a760, 0x03020100},
+ {0x0000a764, 0x09080504},
+ {0x0000a768, 0x0d0c0b0a},
+ {0x0000a76c, 0x13121110},
+ {0x0000a770, 0x31301514},
+ {0x0000a774, 0x35343332},
+ {0x0000a778, 0x00000036},
+ {0x0000a780, 0x00000838},
+ {0x0000a7c0, 0x00000000},
+ {0x0000a7c4, 0xfffffffc},
+ {0x0000a7c8, 0x00000000},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000000},
+};
+
+static const u32 ar9485Common_1_1[][2] = {
+ /* Addr allmodes */
+ {0x00007010, 0x00000022},
+ {0x00007020, 0x00000000},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000004c2},
+};
+
+static const u32 ar9485_1_1_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
+ {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
+ {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
+ {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
+ {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e},
+ {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
+ {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x01303fc0, 0x01303fc4, 0x01303fc4, 0x01303fc0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
+ {0x0000a234, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff},
+ {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a260, 0x3a021501, 0x3a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0},
+ {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000be04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
+ {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+ {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+ {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
+static const u32 ar9485_modes_lowest_ob_db_tx_gain_1_1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+ {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+ {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
+static const u32 ar9485_1_1_radio_postamble[][2] = {
+ /* Addr allmodes */
+ {0x0001609c, 0x0b283f31},
+ {0x000160ac, 0x24611800},
+ {0x000160b0, 0x03284f3e},
+ {0x0001610c, 0x00170000},
+ {0x00016140, 0x10804008},
+};
+
+static const u32 ar9485_1_1_mac_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+};
+
+static const u32 ar9485_1_1_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00016000, 0x36db6db6},
+ {0x00016004, 0x6db6db40},
+ {0x00016008, 0x73800000},
+ {0x0001600c, 0x00000000},
+ {0x00016040, 0x7f80fff8},
+ {0x0001604c, 0x000f0278},
+ {0x00016050, 0x4db6db8c},
+ {0x00016054, 0x6db60000},
+ {0x00016080, 0x00080000},
+ {0x00016084, 0x0e48048c},
+ {0x00016088, 0x14214514},
+ {0x0001608c, 0x119f081e},
+ {0x00016090, 0x24926490},
+ {0x00016098, 0xd28b3330},
+ {0x000160a0, 0xc2108ffe},
+ {0x000160a4, 0x812fc370},
+ {0x000160a8, 0x423c8000},
+ {0x000160b4, 0x92480040},
+ {0x000160c0, 0x006db6db},
+ {0x000160c4, 0x0186db60},
+ {0x000160c8, 0x6db6db6c},
+ {0x000160cc, 0x6de6fbe0},
+ {0x000160d0, 0xf7dfcf3c},
+ {0x00016100, 0x04cb0001},
+ {0x00016104, 0xfff80015},
+ {0x00016108, 0x00080010},
+ {0x00016144, 0x01884080},
+ {0x00016148, 0x00008040},
+ {0x00016240, 0x08400000},
+ {0x00016244, 0x1bf90f00},
+ {0x00016248, 0x00000000},
+ {0x0001624c, 0x00000000},
+ {0x00016280, 0x01000015},
+ {0x00016284, 0x00d30000},
+ {0x00016288, 0x00318000},
+ {0x0001628c, 0x50000000},
+ {0x00016290, 0x4b96210f},
+ {0x00016380, 0x00000000},
+ {0x00016384, 0x00000000},
+ {0x00016388, 0x00800700},
+ {0x0001638c, 0x00800700},
+ {0x00016390, 0x00800700},
+ {0x00016394, 0x00000000},
+ {0x00016398, 0x00000000},
+ {0x0001639c, 0x00000000},
+ {0x000163a0, 0x00000001},
+ {0x000163a4, 0x00000001},
+ {0x000163a8, 0x00000000},
+ {0x000163ac, 0x00000000},
+ {0x000163b0, 0x00000000},
+ {0x000163b4, 0x00000000},
+ {0x000163b8, 0x00000000},
+ {0x000163bc, 0x00000000},
+ {0x000163c0, 0x000000a0},
+ {0x000163c4, 0x000c0000},
+ {0x000163c8, 0x14021402},
+ {0x000163cc, 0x00001402},
+ {0x000163d0, 0x00000000},
+ {0x000163d4, 0x00000000},
+ {0x00016c40, 0x13188278},
+ {0x00016c44, 0x12000000},
+};
+
+static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_enable_L1[][2] = {
+ /* Addr allmodes */
+ {0x00018c00, 0x10052e5e},
+ {0x00018c04, 0x000801d8},
+ {0x00018c08, 0x0000080c},
+};
+
+static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+ {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+ {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
+static const u32 ar9485_1_1[][2] = {
+ /* Addr allmodes */
+ {0x0000a580, 0x00000000},
+ {0x0000a584, 0x00000000},
+ {0x0000a588, 0x00000000},
+ {0x0000a58c, 0x00000000},
+ {0x0000a590, 0x00000000},
+ {0x0000a594, 0x00000000},
+ {0x0000a598, 0x00000000},
+ {0x0000a59c, 0x00000000},
+ {0x0000a5a0, 0x00000000},
+ {0x0000a5a4, 0x00000000},
+ {0x0000a5a8, 0x00000000},
+ {0x0000a5ac, 0x00000000},
+ {0x0000a5b0, 0x00000000},
+ {0x0000a5b4, 0x00000000},
+ {0x0000a5b8, 0x00000000},
+ {0x0000a5bc, 0x00000000},
+};
+
+static const u32 ar9485_modes_green_ob_db_tx_gain_1_1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+ {0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006},
+ {0x0000a504, 0x05062002, 0x05062002, 0x03000201, 0x03000201},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x06000203, 0x06000203},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0a000401, 0x0a000401},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x0e000403, 0x0e000403},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x12000405, 0x12000405},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x15000604, 0x15000604},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x18000605, 0x18000605},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x1c000a04, 0x1c000a04},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x21000a06, 0x21000a06},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x29000a24, 0x29000a24},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2f000e21, 0x2f000e21},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x31000e20, 0x31000e20},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x33000e20, 0x33000e20},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b50c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b510, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b514, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b518, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b51c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b520, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b524, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b528, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b52c, 0x0000002a, 0x0000002a, 0x0000002a, 0x0000002a},
+ {0x0000b530, 0x0000003a, 0x0000003a, 0x0000003a, 0x0000003a},
+ {0x0000b534, 0x0000004a, 0x0000004a, 0x0000004a, 0x0000004a},
+ {0x0000b538, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b53c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b540, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b544, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b548, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b54c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b550, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b554, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b558, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b55c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b560, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b564, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b568, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b56c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b570, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b574, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b578, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b57c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+ {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
+static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = {
+ /* Addr allmodes */
+ {0x00018c00, 0x10013e5e},
+ {0x00018c04, 0x000801d8},
+ {0x00018c08, 0x0000080c},
+};
+
+static const u32 ar9485_1_1_soc_preamble[][2] = {
+ /* Addr allmodes */
+ {0x00004014, 0xba280400},
+ {0x000040a4, 0x00a0c9c9},
+ {0x00007010, 0x00000022},
+ {0x00007020, 0x00000000},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000004c2},
+ {0x00007048, 0x00000002},
+};
+
+static const u32 ar9485_1_1_baseband_core_txfir_coeff_japan_2484[][2] = {
+ /* Addr allmodes */
+ {0x0000a398, 0x00000000},
+ {0x0000a39c, 0x6f7f0301},
+ {0x0000a3a0, 0xca9228ee},
+};
+
+static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+ {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+ {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
+static const u32 ar9485_fast_clock_1_1_baseband_postamble[][3] = {
+ /* Addr 5G_HT2 5G_HT40 */
+ {0x00009e00, 0x03721821, 0x03721821},
+ {0x0000a230, 0x0000400b, 0x00004016},
+ {0x0000a254, 0x00000898, 0x00001130},
+};
+
+static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = {
+ /* Addr allmodes */
+ {0x00018c00, 0x10012e5e},
+ {0x00018c04, 0x000801d8},
+ {0x00018c08, 0x0000080c},
+};
+
+static const u32 ar9485_common_rx_gain_1_1[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x01800082},
+ {0x0000a014, 0x01820181},
+ {0x0000a018, 0x01840183},
+ {0x0000a01c, 0x01880185},
+ {0x0000a020, 0x018a0189},
+ {0x0000a024, 0x02850284},
+ {0x0000a028, 0x02890288},
+ {0x0000a02c, 0x03850384},
+ {0x0000a030, 0x03890388},
+ {0x0000a034, 0x038b038a},
+ {0x0000a038, 0x038d038c},
+ {0x0000a03c, 0x03910390},
+ {0x0000a040, 0x03930392},
+ {0x0000a044, 0x03950394},
+ {0x0000a048, 0x00000396},
+ {0x0000a04c, 0x00000000},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x28282828},
+ {0x0000a084, 0x28282828},
+ {0x0000a088, 0x28282828},
+ {0x0000a08c, 0x28282828},
+ {0x0000a090, 0x28282828},
+ {0x0000a094, 0x21212128},
+ {0x0000a098, 0x171c1c1c},
+ {0x0000a09c, 0x02020212},
+ {0x0000a0a0, 0x00000202},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x111f1100},
+ {0x0000a0c8, 0x111d111e},
+ {0x0000a0cc, 0x111b111c},
+ {0x0000a0d0, 0x22032204},
+ {0x0000a0d4, 0x22012202},
+ {0x0000a0d8, 0x221f2200},
+ {0x0000a0dc, 0x221d221e},
+ {0x0000a0e0, 0x33013302},
+ {0x0000a0e4, 0x331f3300},
+ {0x0000a0e8, 0x4402331e},
+ {0x0000a0ec, 0x44004401},
+ {0x0000a0f0, 0x441e441f},
+ {0x0000a0f4, 0x55015502},
+ {0x0000a0f8, 0x551f5500},
+ {0x0000a0fc, 0x6602551e},
+ {0x0000a100, 0x66006601},
+ {0x0000a104, 0x661e661f},
+ {0x0000a108, 0x7703661d},
+ {0x0000a10c, 0x77017702},
+ {0x0000a110, 0x00007700},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x111f1100},
+ {0x0000a148, 0x111d111e},
+ {0x0000a14c, 0x111b111c},
+ {0x0000a150, 0x22032204},
+ {0x0000a154, 0x22012202},
+ {0x0000a158, 0x221f2200},
+ {0x0000a15c, 0x221d221e},
+ {0x0000a160, 0x33013302},
+ {0x0000a164, 0x331f3300},
+ {0x0000a168, 0x4402331e},
+ {0x0000a16c, 0x44004401},
+ {0x0000a170, 0x441e441f},
+ {0x0000a174, 0x55015502},
+ {0x0000a178, 0x551f5500},
+ {0x0000a17c, 0x6602551e},
+ {0x0000a180, 0x66006601},
+ {0x0000a184, 0x661e661f},
+ {0x0000a188, 0x7703661d},
+ {0x0000a18c, 0x77017702},
+ {0x0000a190, 0x00007700},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000296},
+};
+
+static const u32 ar9485_1_1_pcie_phy_clkreq_enable_L1[][2] = {
+ /* Addr allmodes */
+ {0x00018c00, 0x10053e5e},
+ {0x00018c04, 0x000801d8},
+ {0x00018c08, 0x0000080c},
+};
+
+static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00060005},
+ {0x0000a004, 0x00810080},
+ {0x0000a008, 0x00830082},
+ {0x0000a00c, 0x00850084},
+ {0x0000a010, 0x01820181},
+ {0x0000a014, 0x01840183},
+ {0x0000a018, 0x01880185},
+ {0x0000a01c, 0x018a0189},
+ {0x0000a020, 0x02850284},
+ {0x0000a024, 0x02890288},
+ {0x0000a028, 0x028b028a},
+ {0x0000a02c, 0x03850384},
+ {0x0000a030, 0x03890388},
+ {0x0000a034, 0x038b038a},
+ {0x0000a038, 0x038d038c},
+ {0x0000a03c, 0x03910390},
+ {0x0000a040, 0x03930392},
+ {0x0000a044, 0x03950394},
+ {0x0000a048, 0x00000396},
+ {0x0000a04c, 0x00000000},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x28282828},
+ {0x0000a084, 0x28282828},
+ {0x0000a088, 0x28282828},
+ {0x0000a08c, 0x28282828},
+ {0x0000a090, 0x28282828},
+ {0x0000a094, 0x24242428},
+ {0x0000a098, 0x171e1e1e},
+ {0x0000a09c, 0x02020b0b},
+ {0x0000a0a0, 0x02020202},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x22072208},
+ {0x0000a0c4, 0x22052206},
+ {0x0000a0c8, 0x22032204},
+ {0x0000a0cc, 0x22012202},
+ {0x0000a0d0, 0x221f2200},
+ {0x0000a0d4, 0x221d221e},
+ {0x0000a0d8, 0x33023303},
+ {0x0000a0dc, 0x33003301},
+ {0x0000a0e0, 0x331e331f},
+ {0x0000a0e4, 0x4402331d},
+ {0x0000a0e8, 0x44004401},
+ {0x0000a0ec, 0x441e441f},
+ {0x0000a0f0, 0x55025503},
+ {0x0000a0f4, 0x55005501},
+ {0x0000a0f8, 0x551e551f},
+ {0x0000a0fc, 0x6602551d},
+ {0x0000a100, 0x66006601},
+ {0x0000a104, 0x661e661f},
+ {0x0000a108, 0x7703661d},
+ {0x0000a10c, 0x77017702},
+ {0x0000a110, 0x00007700},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x111f1100},
+ {0x0000a148, 0x111d111e},
+ {0x0000a14c, 0x111b111c},
+ {0x0000a150, 0x22032204},
+ {0x0000a154, 0x22012202},
+ {0x0000a158, 0x221f2200},
+ {0x0000a15c, 0x221d221e},
+ {0x0000a160, 0x33013302},
+ {0x0000a164, 0x331f3300},
+ {0x0000a168, 0x4402331e},
+ {0x0000a16c, 0x44004401},
+ {0x0000a170, 0x441e441f},
+ {0x0000a174, 0x55015502},
+ {0x0000a178, 0x551f5500},
+ {0x0000a17c, 0x6602551e},
+ {0x0000a180, 0x66006601},
+ {0x0000a184, 0x661e661f},
+ {0x0000a188, 0x7703661d},
+ {0x0000a18c, 0x77017702},
+ {0x0000a190, 0x00007700},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000296},
+};
+
#endif
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 23838e37d45f..c718ab512a97 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -21,7 +21,6 @@
#include <linux/device.h>
#include <linux/leds.h>
#include <linux/completion.h>
-#include <linux/pm_qos_params.h>
#include "debug.h"
#include "common.h"
@@ -57,8 +56,6 @@ struct ath_node;
#define A_MAX(a, b) ((a) > (b) ? (a) : (b))
-#define ATH9K_PM_QOS_DEFAULT_VALUE 55
-
#define TSF_TO_TU(_h,_l) \
((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
@@ -95,9 +92,9 @@ struct ath_config {
* @BUF_XRETRY: To denote excessive retries of the buffer
*/
enum buffer_type {
- BUF_AMPDU = BIT(2),
- BUF_AGGR = BIT(3),
- BUF_XRETRY = BIT(5),
+ BUF_AMPDU = BIT(0),
+ BUF_AGGR = BIT(1),
+ BUF_XRETRY = BIT(2),
};
#define bf_isampdu(bf) (bf->bf_state.bf_type & BUF_AMPDU)
@@ -137,7 +134,6 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
(((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
WME_AC_VO)
-#define ADDBA_EXCHANGE_ATTEMPTS 10
#define ATH_AGGR_DELIM_SZ 4
#define ATH_AGGR_MINPLEN 256 /* in bytes, minimum packet length */
/* number of delimiters for encryption padding */
@@ -184,7 +180,8 @@ enum ATH_AGGR_STATUS {
#define ATH_TXFIFO_DEPTH 8
struct ath_txq {
- u32 axq_qnum;
+ int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */
+ u32 axq_qnum; /* ath9k hardware queue number */
u32 *axq_link;
struct list_head axq_q;
spinlock_t axq_lock;
@@ -192,6 +189,7 @@ struct ath_txq {
u32 axq_ampdu_depth;
bool stopped;
bool axq_tx_inprogress;
+ bool txq_flush_inprogress;
struct list_head axq_acq;
struct list_head txq_fifo[ATH_TXFIFO_DEPTH];
struct list_head txq_fifo_pending;
@@ -234,7 +232,6 @@ struct ath_buf {
bool bf_stale;
u16 bf_flags;
struct ath_buf_state bf_state;
- struct ath_wiphy *aphy;
};
struct ath_atx_tid {
@@ -255,7 +252,10 @@ struct ath_atx_tid {
};
struct ath_node {
- struct ath_common *common;
+#ifdef CONFIG_ATH9K_DEBUGFS
+ struct list_head list; /* for sc->nodes */
+ struct ieee80211_sta *sta; /* station struct we're part of */
+#endif
struct ath_atx_tid tid[WME_NUM_TID];
struct ath_atx_ac ac[WME_NUM_AC];
u16 maxampdu;
@@ -278,6 +278,11 @@ struct ath_tx_control {
#define ATH_TX_XRETRY 0x02
#define ATH_TX_BAR 0x04
+/**
+ * @txq_map: Index is mac80211 queue number. This is
+ * not necessarily the same as the hardware queue number
+ * (axq_qnum).
+ */
struct ath_tx {
u16 seq_no;
u32 txqsetup;
@@ -304,6 +309,8 @@ struct ath_rx {
struct ath_descdma rxdma;
struct ath_buf *rx_bufptr;
struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
+
+ struct sk_buff *frag;
};
int ath_startrecv(struct ath_softc *sc);
@@ -340,10 +347,10 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid
struct ath_vif {
int av_bslot;
+ bool is_bslot_active;
__le64 tsf_adjust; /* TSF adjustment for staggered beacons */
enum nl80211_iftype av_opmode;
struct ath_buf *av_bcbuf;
- struct ath_tx_control av_btxctl;
u8 bssid[ETH_ALEN]; /* current BSSID from config_interface */
};
@@ -363,7 +370,7 @@ struct ath_vif {
#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
struct ath_beacon_config {
- u16 beacon_interval;
+ int beacon_interval;
u16 listen_interval;
u16 dtim_period;
u16 bmiss_timeout;
@@ -382,7 +389,6 @@ struct ath_beacon {
u32 ast_be_xmit;
u64 bc_tstamp;
struct ieee80211_vif *bslot[ATH_BCBUF];
- struct ath_wiphy *bslot_aphy[ATH_BCBUF];
int slottime;
int slotupdate;
struct ath9k_tx_queue_info beacon_qi;
@@ -393,9 +399,10 @@ struct ath_beacon {
void ath_beacon_tasklet(unsigned long data);
void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif);
-int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif);
+int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp);
int ath_beaconq_config(struct ath_softc *sc);
+void ath9k_set_beaconing_status(struct ath_softc *sc, bool status);
/*******/
/* ANI */
@@ -442,26 +449,21 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc);
#define ATH_LED_PIN_DEF 1
#define ATH_LED_PIN_9287 8
-#define ATH_LED_ON_DURATION_IDLE 350 /* in msecs */
-#define ATH_LED_OFF_DURATION_IDLE 250 /* in msecs */
-
-enum ath_led_type {
- ATH_LED_RADIO,
- ATH_LED_ASSOC,
- ATH_LED_TX,
- ATH_LED_RX
-};
-
-struct ath_led {
- struct ath_softc *sc;
- struct led_classdev led_cdev;
- enum ath_led_type led_type;
- char name[32];
- bool registered;
-};
+#define ATH_LED_PIN_9485 6
+#ifdef CONFIG_MAC80211_LEDS
void ath_init_leds(struct ath_softc *sc);
void ath_deinit_leds(struct ath_softc *sc);
+#else
+static inline void ath_init_leds(struct ath_softc *sc)
+{
+}
+
+static inline void ath_deinit_leds(struct ath_softc *sc)
+{
+}
+#endif
+
/* Antenna diversity/combining */
#define ATH_ANT_RX_CURRENT_SHIFT 4
@@ -530,7 +532,6 @@ struct ath_ant_comb {
#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */
#define ATH_MAX_SW_RETRIES 10
#define ATH_CHAN_MAX 255
-#define IEEE80211_WEP_NKID 4 /* number of key ids */
#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
#define ATH_RATE_DUMMY_MARKER 0
@@ -558,27 +559,28 @@ struct ath_ant_comb {
#define PS_WAIT_FOR_TX_ACK BIT(3)
#define PS_BEACON_SYNC BIT(4)
-struct ath_wiphy;
struct ath_rate_table;
+struct ath9k_vif_iter_data {
+ const u8 *hw_macaddr; /* phy's hardware address, set
+ * before starting iteration for
+ * valid bssid mask.
+ */
+ u8 mask[ETH_ALEN]; /* bssid mask */
+ int naps; /* number of AP vifs */
+ int nmeshes; /* number of mesh vifs */
+ int nstations; /* number of station vifs */
+ int nwds; /* number of nwd vifs */
+ int nadhocs; /* number of adhoc vifs */
+ int nothers; /* number of vifs not specified above. */
+};
+
struct ath_softc {
struct ieee80211_hw *hw;
struct device *dev;
- spinlock_t wiphy_lock; /* spinlock to protect ath_wiphy data */
- struct ath_wiphy *pri_wiphy;
- struct ath_wiphy **sec_wiphy; /* secondary wiphys (virtual radios); may
- * have NULL entries */
- int num_sec_wiphy; /* number of sec_wiphy pointers in the array */
int chan_idx;
int chan_is_ht;
- struct ath_wiphy *next_wiphy;
- struct work_struct chan_work;
- int wiphy_select_failures;
- unsigned long wiphy_select_first_fail;
- struct delayed_work wiphy_work;
- unsigned long wiphy_scheduler_int;
- int wiphy_scheduler_index;
struct survey_info *cur_survey;
struct survey_info survey[ATH9K_NUM_CHANNELS];
@@ -595,14 +597,16 @@ struct ath_softc {
struct work_struct hw_check_work;
struct completion paprd_complete;
+ unsigned int hw_busy_count;
+
u32 intrstatus;
u32 sc_flags; /* SC_OP_* */
u16 ps_flags; /* PS_* */
u16 curtxpow;
- u8 nbcnvifs;
- u16 nvifs;
bool ps_enabled;
bool ps_idle;
+ short nbcnvifs;
+ short nvifs;
unsigned long ps_usecount;
struct ath_config config;
@@ -611,47 +615,29 @@ struct ath_softc {
struct ath_beacon beacon;
struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
- struct ath_led radio_led;
- struct ath_led assoc_led;
- struct ath_led tx_led;
- struct ath_led rx_led;
- struct delayed_work ath_led_blink_work;
- int led_on_duration;
- int led_off_duration;
- int led_on_cnt;
- int led_off_cnt;
+#ifdef CONFIG_MAC80211_LEDS
+ bool led_registered;
+ char led_name[32];
+ struct led_classdev led_cdev;
+#endif
- int beacon_interval;
+ struct ath9k_hw_cal_data caldata;
+ int last_rssi;
#ifdef CONFIG_ATH9K_DEBUGFS
struct ath9k_debug debug;
+ spinlock_t nodes_lock;
+ struct list_head nodes; /* basically, stations */
+ unsigned int tx_complete_poll_work_seen;
#endif
struct ath_beacon_config cur_beacon_conf;
struct delayed_work tx_complete_work;
+ struct delayed_work hw_pll_work;
struct ath_btcoex btcoex;
struct ath_descdma txsdma;
struct ath_ant_comb ant_comb;
-
- struct pm_qos_request_list pm_qos_req;
-};
-
-struct ath_wiphy {
- struct ath_softc *sc; /* shared for all virtual wiphys */
- struct ieee80211_hw *hw;
- struct ath9k_hw_cal_data caldata;
- enum ath_wiphy_state {
- ATH_WIPHY_INACTIVE,
- ATH_WIPHY_ACTIVE,
- ATH_WIPHY_PAUSING,
- ATH_WIPHY_PAUSED,
- ATH_WIPHY_SCAN,
- } state;
- bool idle;
- int chan_idx;
- int chan_is_ht;
- int last_rssi;
};
void ath9k_tasklet(unsigned long data);
@@ -666,7 +652,6 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz)
extern struct ieee80211_ops ath9k_ops;
extern int ath9k_modparam_nohwcrypt;
extern int led_blink;
-extern int ath9k_pm_qos_value;
extern bool is_ath9k_unloaded;
irqreturn_t ath_isr(int irq, void *dev);
@@ -675,14 +660,13 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
const struct ath_bus_ops *bus_ops);
void ath9k_deinit_device(struct ath_softc *sc);
void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
-void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
- struct ath9k_channel *ichan);
int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
struct ath9k_channel *hchan);
void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw);
void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw);
bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode);
+bool ath9k_uses_beacons(int type);
#ifdef CONFIG_PCI
int ath_pci_init(void);
@@ -706,26 +690,12 @@ void ath9k_ps_restore(struct ath_softc *sc);
u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate);
void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
-int ath9k_wiphy_add(struct ath_softc *sc);
-int ath9k_wiphy_del(struct ath_wiphy *aphy);
-void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, int ftype);
-int ath9k_wiphy_pause(struct ath_wiphy *aphy);
-int ath9k_wiphy_unpause(struct ath_wiphy *aphy);
-int ath9k_wiphy_select(struct ath_wiphy *aphy);
-void ath9k_wiphy_set_scheduler(struct ath_softc *sc, unsigned int msec_int);
-void ath9k_wiphy_chan_work(struct work_struct *work);
-bool ath9k_wiphy_started(struct ath_softc *sc);
-void ath9k_wiphy_pause_all_forced(struct ath_softc *sc,
- struct ath_wiphy *selected);
-bool ath9k_wiphy_scanning(struct ath_softc *sc);
-void ath9k_wiphy_work(struct work_struct *work);
-bool ath9k_all_wiphys_idle(struct ath_softc *sc);
-void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle);
-
-void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue);
-bool ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue);
void ath_start_rfkill_poll(struct ath_softc *sc);
extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
+void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ath9k_vif_iter_data *iter_data);
+
#endif /* ATH9K_H */
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 385ba03134ba..a4bdfdb043ef 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -112,8 +112,7 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
static void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_tx_control txctl;
@@ -132,8 +131,7 @@ static void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_buf *bf;
struct ath_vif *avp;
@@ -142,13 +140,10 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info;
int cabq_depth;
- if (aphy->state != ATH_WIPHY_ACTIVE)
- return NULL;
-
avp = (void *)vif->drv_priv;
cabq = sc->beacon.cabq;
- if (avp->av_bcbuf == NULL)
+ if ((avp->av_bcbuf == NULL) || !avp->is_bslot_active)
return NULL;
/* Release the old beacon first */
@@ -225,13 +220,13 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
return bf;
}
-int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
+int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif)
{
- struct ath_softc *sc = aphy->sc;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_vif *avp;
struct ath_buf *bf;
struct sk_buff *skb;
+ struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
__le64 tstamp;
avp = (void *)vif->drv_priv;
@@ -244,9 +239,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
struct ath_buf, list);
list_del(&avp->av_bcbuf->list);
- if (sc->sc_ah->opmode == NL80211_IFTYPE_AP ||
- sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC ||
- sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT) {
+ if (ath9k_uses_beacons(vif->type)) {
int slot;
/*
* Assign the vif to a beacon xmit slot. As
@@ -256,6 +249,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
for (slot = 0; slot < ATH_BCBUF; slot++)
if (sc->beacon.bslot[slot] == NULL) {
avp->av_bslot = slot;
+ avp->is_bslot_active = false;
/* NB: keep looking for a double slot */
if (slot == 0 || !sc->beacon.bslot[slot-1])
@@ -263,7 +257,6 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
}
BUG_ON(sc->beacon.bslot[avp->av_bslot] != NULL);
sc->beacon.bslot[avp->av_bslot] = vif;
- sc->beacon.bslot_aphy[avp->av_bslot] = aphy;
sc->nbcnvifs++;
}
}
@@ -281,10 +274,8 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
/* NB: the beacon data buffer must be 32-bit aligned. */
skb = ieee80211_beacon_get(sc->hw, vif);
- if (skb == NULL) {
- ath_dbg(common, ATH_DBG_BEACON, "cannot get skb\n");
+ if (skb == NULL)
return -ENOMEM;
- }
tstamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
sc->beacon.bc_tstamp = le64_to_cpu(tstamp);
@@ -293,7 +284,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
u64 tsfadjust;
int intval;
- intval = sc->beacon_interval ? : ATH_DEFAULT_BINTVAL;
+ intval = cur_conf->beacon_interval ? : ATH_DEFAULT_BINTVAL;
/*
* Calculate the TSF offset for this beacon slot, i.e., the
@@ -325,6 +316,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
ath_err(common, "dma_mapping_error on beacon alloc\n");
return -ENOMEM;
}
+ avp->is_bslot_active = true;
return 0;
}
@@ -336,7 +328,6 @@ void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp)
if (avp->av_bslot != -1) {
sc->beacon.bslot[avp->av_bslot] = NULL;
- sc->beacon.bslot_aphy[avp->av_bslot] = NULL;
sc->nbcnvifs--;
}
@@ -358,11 +349,11 @@ void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp)
void ath_beacon_tasklet(unsigned long data)
{
struct ath_softc *sc = (struct ath_softc *)data;
+ struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath_buf *bf = NULL;
struct ieee80211_vif *vif;
- struct ath_wiphy *aphy;
int slot;
u32 bfaddr, bc = 0, tsftu;
u64 tsf;
@@ -406,7 +397,7 @@ void ath_beacon_tasklet(unsigned long data)
* on the tsf to safeguard against missing an swba.
*/
- intval = sc->beacon_interval ? : ATH_DEFAULT_BINTVAL;
+ intval = cur_conf->beacon_interval ? : ATH_DEFAULT_BINTVAL;
tsf = ath9k_hw_gettsf64(ah);
tsftu = TSF_TO_TU(tsf>>32, tsf);
@@ -420,7 +411,6 @@ void ath_beacon_tasklet(unsigned long data)
*/
slot = ATH_BCBUF - slot - 1;
vif = sc->beacon.bslot[slot];
- aphy = sc->beacon.bslot_aphy[slot];
ath_dbg(common, ATH_DBG_BEACON,
"slot %d [tsf %llu tsftu %u intval %u] vif %p\n",
@@ -428,7 +418,7 @@ void ath_beacon_tasklet(unsigned long data)
bfaddr = 0;
if (vif) {
- bf = ath_beacon_generate(aphy->hw, vif);
+ bf = ath_beacon_generate(sc->hw, vif);
if (bf != NULL) {
bfaddr = bf->bf_daddr;
bc = 1;
@@ -720,10 +710,10 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
iftype = sc->sc_ah->opmode;
}
- cur_conf->listen_interval = 1;
- cur_conf->dtim_count = 1;
- cur_conf->bmiss_timeout =
- ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
+ cur_conf->listen_interval = 1;
+ cur_conf->dtim_count = 1;
+ cur_conf->bmiss_timeout =
+ ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
/*
* It looks like mac80211 may end up using beacon interval of zero in
@@ -735,8 +725,9 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
cur_conf->beacon_interval = 100;
/*
- * Some times we dont parse dtim period from mac80211, in that case
- * use a default value
+ * We don't parse dtim period from mac80211 during the driver
+ * initialization as it breaks association with hidden-ssid
+ * AP and it causes latency in roaming
*/
if (cur_conf->dtim_period == 0)
cur_conf->dtim_period = 1;
@@ -760,3 +751,36 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
sc->sc_flags |= SC_OP_BEACONS;
}
+
+void ath9k_set_beaconing_status(struct ath_softc *sc, bool status)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_vif *avp;
+ int slot;
+ bool found = false;
+
+ ath9k_ps_wakeup(sc);
+ if (status) {
+ for (slot = 0; slot < ATH_BCBUF; slot++) {
+ if (sc->beacon.bslot[slot]) {
+ avp = (void *)sc->beacon.bslot[slot]->drv_priv;
+ if (avp->is_bslot_active) {
+ found = true;
+ break;
+ }
+ }
+ }
+ if (found) {
+ /* Re-enable beaconing */
+ ah->imask |= ATH9K_INT_SWBA;
+ ath9k_hw_set_interrupts(ah, ah->imask);
+ }
+ } else {
+ /* Disable SWBA interrupt */
+ ah->imask &= ~ATH9K_INT_SWBA;
+ ath9k_hw_set_interrupts(ah, ah->imask);
+ tasklet_kill(&sc->bcon_tasklet);
+ ath9k_hw_stoptxdma(ah, sc->beacon.beaconq);
+ }
+ ath9k_ps_restore(sc);
+}
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index b68a1acbddd0..b4a92a4313f6 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -382,9 +382,8 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
s16 default_nf;
int i, j;
- if (!ah->caldata)
- return;
-
+ ah->caldata->channel = chan->channel;
+ ah->caldata->channelFlags = chan->channelFlags & ~CHANNEL_CW_INT;
h = ah->caldata->nfCalHist;
default_nf = ath9k_hw_get_default_nf(ah, chan);
for (i = 0; i < NUM_NF_READINGS; i++) {
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index df1998d48253..615e68276e72 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -189,6 +189,17 @@ void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common,
}
EXPORT_SYMBOL(ath9k_cmn_btcoex_bt_stomp);
+void ath9k_cmn_update_txpow(struct ath_hw *ah, u16 cur_txpow,
+ u16 new_txpow, u16 *txpower)
+{
+ if (cur_txpow != new_txpow) {
+ ath9k_hw_set_txpowerlimit(ah, new_txpow, false);
+ /* read back in case value is clamped */
+ *txpower = ath9k_hw_regulatory(ah)->power_limit;
+ }
+}
+EXPORT_SYMBOL(ath9k_cmn_update_txpow);
+
static int __init ath9k_cmn_init(void)
{
return 0;
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index a126bddebb0a..b2f7b5f89097 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -23,8 +23,6 @@
/* Common header for Atheros 802.11n base driver cores */
-#define IEEE80211_WEP_NKID 4
-
#define WME_NUM_TID 16
#define WME_BA_BMP_SIZE 64
#define WME_MAX_BA WME_BA_BMP_SIZE
@@ -70,3 +68,5 @@ struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
int ath9k_cmn_count_streams(unsigned int chainmask, int max);
void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common,
enum ath_stomp_type stomp_type);
+void ath9k_cmn_update_txpow(struct ath_hw *ah, u16 cur_txpow,
+ u16 new_txpow, u16 *txpower);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 3586c43077a7..5cfcf8c235a4 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -381,41 +381,40 @@ static const struct file_operations fops_interrupt = {
.llseek = default_llseek,
};
-static const char * ath_wiphy_state_str(enum ath_wiphy_state state)
+static const char *channel_type_str(enum nl80211_channel_type t)
{
- switch (state) {
- case ATH_WIPHY_INACTIVE:
- return "INACTIVE";
- case ATH_WIPHY_ACTIVE:
- return "ACTIVE";
- case ATH_WIPHY_PAUSING:
- return "PAUSING";
- case ATH_WIPHY_PAUSED:
- return "PAUSED";
- case ATH_WIPHY_SCAN:
- return "SCAN";
+ switch (t) {
+ case NL80211_CHAN_NO_HT:
+ return "no ht";
+ case NL80211_CHAN_HT20:
+ return "ht20";
+ case NL80211_CHAN_HT40MINUS:
+ return "ht40-";
+ case NL80211_CHAN_HT40PLUS:
+ return "ht40+";
+ default:
+ return "???";
}
- return "?";
}
static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
- struct ath_wiphy *aphy = sc->pri_wiphy;
- struct ieee80211_channel *chan = aphy->hw->conf.channel;
+ struct ieee80211_channel *chan = sc->hw->conf.channel;
+ struct ieee80211_conf *conf = &(sc->hw->conf);
char buf[512];
unsigned int len = 0;
- int i;
u8 addr[ETH_ALEN];
u32 tmp;
len += snprintf(buf + len, sizeof(buf) - len,
- "primary: %s (%s chan=%d ht=%d)\n",
- wiphy_name(sc->pri_wiphy->hw->wiphy),
- ath_wiphy_state_str(sc->pri_wiphy->state),
+ "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
+ wiphy_name(sc->hw->wiphy),
ieee80211_frequency_to_channel(chan->center_freq),
- aphy->chan_is_ht);
+ chan->center_freq,
+ conf->channel_type,
+ channel_type_str(conf->channel_type));
put_unaligned_le32(REG_READ_D(sc->sc_ah, AR_STA_ID0), addr);
put_unaligned_le16(REG_READ_D(sc->sc_ah, AR_STA_ID1) & 0xffff, addr + 4);
@@ -457,156 +456,82 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
else
len += snprintf(buf + len, sizeof(buf) - len, "\n");
- /* Put variable-length stuff down here, and check for overflows. */
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- struct ath_wiphy *aphy_tmp = sc->sec_wiphy[i];
- if (aphy_tmp == NULL)
- continue;
- chan = aphy_tmp->hw->conf.channel;
- len += snprintf(buf + len, sizeof(buf) - len,
- "secondary: %s (%s chan=%d ht=%d)\n",
- wiphy_name(aphy_tmp->hw->wiphy),
- ath_wiphy_state_str(aphy_tmp->state),
- ieee80211_frequency_to_channel(chan->center_freq),
- aphy_tmp->chan_is_ht);
- }
if (len > sizeof(buf))
len = sizeof(buf);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
-static struct ath_wiphy * get_wiphy(struct ath_softc *sc, const char *name)
-{
- int i;
- if (strcmp(name, wiphy_name(sc->pri_wiphy->hw->wiphy)) == 0)
- return sc->pri_wiphy;
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- struct ath_wiphy *aphy = sc->sec_wiphy[i];
- if (aphy && strcmp(name, wiphy_name(aphy->hw->wiphy)) == 0)
- return aphy;
- }
- return NULL;
-}
-
-static int del_wiphy(struct ath_softc *sc, const char *name)
-{
- struct ath_wiphy *aphy = get_wiphy(sc, name);
- if (!aphy)
- return -ENOENT;
- return ath9k_wiphy_del(aphy);
-}
-
-static int pause_wiphy(struct ath_softc *sc, const char *name)
-{
- struct ath_wiphy *aphy = get_wiphy(sc, name);
- if (!aphy)
- return -ENOENT;
- return ath9k_wiphy_pause(aphy);
-}
-
-static int unpause_wiphy(struct ath_softc *sc, const char *name)
-{
- struct ath_wiphy *aphy = get_wiphy(sc, name);
- if (!aphy)
- return -ENOENT;
- return ath9k_wiphy_unpause(aphy);
-}
-
-static int select_wiphy(struct ath_softc *sc, const char *name)
-{
- struct ath_wiphy *aphy = get_wiphy(sc, name);
- if (!aphy)
- return -ENOENT;
- return ath9k_wiphy_select(aphy);
-}
-
-static int schedule_wiphy(struct ath_softc *sc, const char *msec)
-{
- ath9k_wiphy_set_scheduler(sc, simple_strtoul(msec, NULL, 0));
- return 0;
-}
-
-static ssize_t write_file_wiphy(struct file *file, const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- char buf[50];
- size_t len;
-
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
- buf[len] = '\0';
- if (len > 0 && buf[len - 1] == '\n')
- buf[len - 1] = '\0';
-
- if (strncmp(buf, "add", 3) == 0) {
- int res = ath9k_wiphy_add(sc);
- if (res < 0)
- return res;
- } else if (strncmp(buf, "del=", 4) == 0) {
- int res = del_wiphy(sc, buf + 4);
- if (res < 0)
- return res;
- } else if (strncmp(buf, "pause=", 6) == 0) {
- int res = pause_wiphy(sc, buf + 6);
- if (res < 0)
- return res;
- } else if (strncmp(buf, "unpause=", 8) == 0) {
- int res = unpause_wiphy(sc, buf + 8);
- if (res < 0)
- return res;
- } else if (strncmp(buf, "select=", 7) == 0) {
- int res = select_wiphy(sc, buf + 7);
- if (res < 0)
- return res;
- } else if (strncmp(buf, "schedule=", 9) == 0) {
- int res = schedule_wiphy(sc, buf + 9);
- if (res < 0)
- return res;
- } else
- return -EOPNOTSUPP;
-
- return count;
-}
-
static const struct file_operations fops_wiphy = {
.read = read_file_wiphy,
- .write = write_file_wiphy,
.open = ath9k_debugfs_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
+#define PR_QNUM(_n) sc->tx.txq_map[_n]->axq_qnum
#define PR(str, elem) \
do { \
len += snprintf(buf + len, size - len, \
"%s%13u%11u%10u%10u\n", str, \
- sc->debug.stats.txstats[WME_AC_BE].elem, \
- sc->debug.stats.txstats[WME_AC_BK].elem, \
- sc->debug.stats.txstats[WME_AC_VI].elem, \
- sc->debug.stats.txstats[WME_AC_VO].elem); \
+ sc->debug.stats.txstats[PR_QNUM(WME_AC_BE)].elem, \
+ sc->debug.stats.txstats[PR_QNUM(WME_AC_BK)].elem, \
+ sc->debug.stats.txstats[PR_QNUM(WME_AC_VI)].elem, \
+ sc->debug.stats.txstats[PR_QNUM(WME_AC_VO)].elem); \
+ if (len >= size) \
+ goto done; \
+} while(0)
+
+#define PRX(str, elem) \
+do { \
+ len += snprintf(buf + len, size - len, \
+ "%s%13u%11u%10u%10u\n", str, \
+ (unsigned int)(sc->tx.txq_map[WME_AC_BE]->elem), \
+ (unsigned int)(sc->tx.txq_map[WME_AC_BK]->elem), \
+ (unsigned int)(sc->tx.txq_map[WME_AC_VI]->elem), \
+ (unsigned int)(sc->tx.txq_map[WME_AC_VO]->elem)); \
+ if (len >= size) \
+ goto done; \
} while(0)
+#define PRQLE(str, elem) \
+do { \
+ len += snprintf(buf + len, size - len, \
+ "%s%13i%11i%10i%10i\n", str, \
+ list_empty(&sc->tx.txq_map[WME_AC_BE]->elem), \
+ list_empty(&sc->tx.txq_map[WME_AC_BK]->elem), \
+ list_empty(&sc->tx.txq_map[WME_AC_VI]->elem), \
+ list_empty(&sc->tx.txq_map[WME_AC_VO]->elem)); \
+ if (len >= size) \
+ goto done; \
+} while (0)
+
static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
char *buf;
- unsigned int len = 0, size = 2048;
+ unsigned int len = 0, size = 8000;
+ int i;
ssize_t retval = 0;
+ char tmp[32];
buf = kzalloc(size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- len += sprintf(buf, "%30s %10s%10s%10s\n\n", "BE", "BK", "VI", "VO");
+ len += sprintf(buf, "Num-Tx-Queues: %i tx-queues-setup: 0x%x"
+ " poll-work-seen: %u\n"
+ "%30s %10s%10s%10s\n\n",
+ ATH9K_NUM_TX_QUEUES, sc->tx.txqsetup,
+ sc->tx_complete_poll_work_seen,
+ "BE", "BK", "VI", "VO");
PR("MPDUs Queued: ", queued);
PR("MPDUs Completed: ", completed);
PR("Aggregates: ", a_aggr);
- PR("AMPDUs Queued: ", a_queued);
+ PR("AMPDUs Queued HW:", a_queued_hw);
+ PR("AMPDUs Queued SW:", a_queued_sw);
PR("AMPDUs Completed:", a_completed);
PR("AMPDUs Retried: ", a_retries);
PR("AMPDUs XRetried: ", a_xretries);
@@ -618,6 +543,223 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
PR("DELIM Underrun: ", delim_underrun);
PR("TX-Pkts-All: ", tx_pkts_all);
PR("TX-Bytes-All: ", tx_bytes_all);
+ PR("hw-put-tx-buf: ", puttxbuf);
+ PR("hw-tx-start: ", txstart);
+ PR("hw-tx-proc-desc: ", txprocdesc);
+ len += snprintf(buf + len, size - len,
+ "%s%11p%11p%10p%10p\n", "txq-memory-address:",
+ &(sc->tx.txq_map[WME_AC_BE]),
+ &(sc->tx.txq_map[WME_AC_BK]),
+ &(sc->tx.txq_map[WME_AC_VI]),
+ &(sc->tx.txq_map[WME_AC_VO]));
+ if (len >= size)
+ goto done;
+
+ PRX("axq-qnum: ", axq_qnum);
+ PRX("axq-depth: ", axq_depth);
+ PRX("axq-ampdu_depth: ", axq_ampdu_depth);
+ PRX("axq-stopped ", stopped);
+ PRX("tx-in-progress ", axq_tx_inprogress);
+ PRX("pending-frames ", pending_frames);
+ PRX("txq_headidx: ", txq_headidx);
+ PRX("txq_tailidx: ", txq_headidx);
+
+ PRQLE("axq_q empty: ", axq_q);
+ PRQLE("axq_acq empty: ", axq_acq);
+ PRQLE("txq_fifo_pending: ", txq_fifo_pending);
+ for (i = 0; i < ATH_TXFIFO_DEPTH; i++) {
+ snprintf(tmp, sizeof(tmp) - 1, "txq_fifo[%i] empty: ", i);
+ PRQLE(tmp, txq_fifo[i]);
+ }
+
+ /* Print out more detailed queue-info */
+ for (i = 0; i <= WME_AC_BK; i++) {
+ struct ath_txq *txq = &(sc->tx.txq[i]);
+ struct ath_atx_ac *ac;
+ struct ath_atx_tid *tid;
+ if (len >= size)
+ goto done;
+ spin_lock_bh(&txq->axq_lock);
+ if (!list_empty(&txq->axq_acq)) {
+ ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac,
+ list);
+ len += snprintf(buf + len, size - len,
+ "txq[%i] first-ac: %p sched: %i\n",
+ i, ac, ac->sched);
+ if (list_empty(&ac->tid_q) || (len >= size))
+ goto done_for;
+ tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
+ list);
+ len += snprintf(buf + len, size - len,
+ " first-tid: %p sched: %i paused: %i\n",
+ tid, tid->sched, tid->paused);
+ }
+ done_for:
+ spin_unlock_bh(&txq->axq_lock);
+ }
+
+done:
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static ssize_t read_file_stations(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char *buf;
+ unsigned int len = 0, size = 64000;
+ struct ath_node *an = NULL;
+ ssize_t retval = 0;
+ int q;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ len += snprintf(buf + len, size - len,
+ "Stations:\n"
+ " tid: addr sched paused buf_q-empty an ac\n"
+ " ac: addr sched tid_q-empty txq\n");
+
+ spin_lock(&sc->nodes_lock);
+ list_for_each_entry(an, &sc->nodes, list) {
+ len += snprintf(buf + len, size - len,
+ "%pM\n", an->sta->addr);
+ if (len >= size)
+ goto done;
+
+ for (q = 0; q < WME_NUM_TID; q++) {
+ struct ath_atx_tid *tid = &(an->tid[q]);
+ len += snprintf(buf + len, size - len,
+ " tid: %p %s %s %i %p %p\n",
+ tid, tid->sched ? "sched" : "idle",
+ tid->paused ? "paused" : "running",
+ list_empty(&tid->buf_q),
+ tid->an, tid->ac);
+ if (len >= size)
+ goto done;
+ }
+
+ for (q = 0; q < WME_NUM_AC; q++) {
+ struct ath_atx_ac *ac = &(an->ac[q]);
+ len += snprintf(buf + len, size - len,
+ " ac: %p %s %i %p\n",
+ ac, ac->sched ? "sched" : "idle",
+ list_empty(&ac->tid_q), ac->txq);
+ if (len >= size)
+ goto done;
+ }
+ }
+
+done:
+ spin_unlock(&sc->nodes_lock);
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static ssize_t read_file_misc(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ieee80211_hw *hw = sc->hw;
+ char *buf;
+ unsigned int len = 0, size = 8000;
+ ssize_t retval = 0;
+ const char *tmp;
+ unsigned int reg;
+ struct ath9k_vif_iter_data iter_data;
+
+ ath9k_calculate_iter_data(hw, NULL, &iter_data);
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ switch (sc->sc_ah->opmode) {
+ case NL80211_IFTYPE_ADHOC:
+ tmp = "ADHOC";
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ tmp = "MESH";
+ break;
+ case NL80211_IFTYPE_AP:
+ tmp = "AP";
+ break;
+ case NL80211_IFTYPE_STATION:
+ tmp = "STATION";
+ break;
+ default:
+ tmp = "???";
+ break;
+ }
+
+ len += snprintf(buf + len, size - len,
+ "curbssid: %pM\n"
+ "OP-Mode: %s(%i)\n"
+ "Beacon-Timer-Register: 0x%x\n",
+ common->curbssid,
+ tmp, (int)(sc->sc_ah->opmode),
+ REG_READ(ah, AR_BEACON_PERIOD));
+
+ reg = REG_READ(ah, AR_TIMER_MODE);
+ len += snprintf(buf + len, size - len, "Timer-Mode-Register: 0x%x (",
+ reg);
+ if (reg & AR_TBTT_TIMER_EN)
+ len += snprintf(buf + len, size - len, "TBTT ");
+ if (reg & AR_DBA_TIMER_EN)
+ len += snprintf(buf + len, size - len, "DBA ");
+ if (reg & AR_SWBA_TIMER_EN)
+ len += snprintf(buf + len, size - len, "SWBA ");
+ if (reg & AR_HCF_TIMER_EN)
+ len += snprintf(buf + len, size - len, "HCF ");
+ if (reg & AR_TIM_TIMER_EN)
+ len += snprintf(buf + len, size - len, "TIM ");
+ if (reg & AR_DTIM_TIMER_EN)
+ len += snprintf(buf + len, size - len, "DTIM ");
+ len += snprintf(buf + len, size - len, ")\n");
+
+ reg = sc->sc_ah->imask;
+ len += snprintf(buf + len, size - len, "imask: 0x%x (", reg);
+ if (reg & ATH9K_INT_SWBA)
+ len += snprintf(buf + len, size - len, "SWBA ");
+ if (reg & ATH9K_INT_BMISS)
+ len += snprintf(buf + len, size - len, "BMISS ");
+ if (reg & ATH9K_INT_CST)
+ len += snprintf(buf + len, size - len, "CST ");
+ if (reg & ATH9K_INT_RX)
+ len += snprintf(buf + len, size - len, "RX ");
+ if (reg & ATH9K_INT_RXHP)
+ len += snprintf(buf + len, size - len, "RXHP ");
+ if (reg & ATH9K_INT_RXLP)
+ len += snprintf(buf + len, size - len, "RXLP ");
+ if (reg & ATH9K_INT_BB_WATCHDOG)
+ len += snprintf(buf + len, size - len, "BB_WATCHDOG ");
+ /* there are other IRQs if one wanted to add them. */
+ len += snprintf(buf + len, size - len, ")\n");
+
+ len += snprintf(buf + len, size - len,
+ "VIF Counts: AP: %i STA: %i MESH: %i WDS: %i"
+ " ADHOC: %i OTHER: %i nvifs: %hi beacon-vifs: %hi\n",
+ iter_data.naps, iter_data.nstations, iter_data.nmeshes,
+ iter_data.nwds, iter_data.nadhocs, iter_data.nothers,
+ sc->nvifs, sc->nbcnvifs);
+
+ len += snprintf(buf + len, size - len,
+ "Calculated-BSSID-Mask: %pM\n",
+ iter_data.mask);
if (len > size)
len = size;
@@ -629,9 +771,9 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
}
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
- struct ath_tx_status *ts)
+ struct ath_tx_status *ts, struct ath_txq *txq)
{
- int qnum = skb_get_queue_mapping(bf->bf_mpdu);
+ int qnum = txq->axq_qnum;
TX_STAT_INC(qnum, tx_pkts_all);
sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len;
@@ -666,6 +808,20 @@ static const struct file_operations fops_xmit = {
.llseek = default_llseek,
};
+static const struct file_operations fops_stations = {
+ .read = read_file_stations,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static const struct file_operations fops_misc = {
+ .read = read_file_misc,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
static ssize_t read_file_recv(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -903,6 +1059,14 @@ int ath9k_init_debug(struct ath_hw *ah)
sc, &fops_xmit))
goto err;
+ if (!debugfs_create_file("stations", S_IRUSR, sc->debug.debugfs_phy,
+ sc, &fops_stations))
+ goto err;
+
+ if (!debugfs_create_file("misc", S_IRUSR, sc->debug.debugfs_phy,
+ sc, &fops_misc))
+ goto err;
+
if (!debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy,
sc, &fops_recv))
goto err;
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 1e5078bd0344..59338de0ce19 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -89,7 +89,8 @@ struct ath_interrupt_stats {
* @queued: Total MPDUs (non-aggr) queued
* @completed: Total MPDUs (non-aggr) completed
* @a_aggr: Total no. of aggregates queued
- * @a_queued: Total AMPDUs queued
+ * @a_queued_hw: Total AMPDUs queued to hardware
+ * @a_queued_sw: Total AMPDUs queued to software queues
* @a_completed: Total AMPDUs completed
* @a_retries: No. of AMPDUs retried (SW)
* @a_xretries: No. of AMPDUs dropped due to xretries
@@ -102,6 +103,9 @@ struct ath_interrupt_stats {
* @desc_cfg_err: Descriptor configuration errors
* @data_urn: TX data underrun errors
* @delim_urn: TX delimiter underrun errors
+ * @puttxbuf: Number of times hardware was given txbuf to write.
+ * @txstart: Number of times hardware was told to start tx.
+ * @txprocdesc: Number of times tx descriptor was processed
*/
struct ath_tx_stats {
u32 tx_pkts_all;
@@ -109,7 +113,8 @@ struct ath_tx_stats {
u32 queued;
u32 completed;
u32 a_aggr;
- u32 a_queued;
+ u32 a_queued_hw;
+ u32 a_queued_sw;
u32 a_completed;
u32 a_retries;
u32 a_xretries;
@@ -119,6 +124,9 @@ struct ath_tx_stats {
u32 desc_cfg_err;
u32 data_underrun;
u32 delim_underrun;
+ u32 puttxbuf;
+ u32 txstart;
+ u32 txprocdesc;
};
/**
@@ -167,7 +175,7 @@ int ath9k_init_debug(struct ath_hw *ah);
void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
- struct ath_tx_status *ts);
+ struct ath_tx_status *ts, struct ath_txq *txq);
void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs);
#else
@@ -184,7 +192,8 @@ static inline void ath_debug_stat_interrupt(struct ath_softc *sc,
static inline void ath_debug_stat_tx(struct ath_softc *sc,
struct ath_buf *bf,
- struct ath_tx_status *ts)
+ struct ath_tx_status *ts,
+ struct ath_txq *txq)
{
}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index d05163159572..8c18bed3a558 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -89,6 +89,38 @@ bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize,
return false;
}
+void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data,
+ int eep_start_loc, int size)
+{
+ int i = 0, j, addr;
+ u32 addrdata[8];
+ u32 data[8];
+
+ for (addr = 0; addr < size; addr++) {
+ addrdata[i] = AR5416_EEPROM_OFFSET +
+ ((addr + eep_start_loc) << AR5416_EEPROM_S);
+ i++;
+ if (i == 8) {
+ REG_READ_MULTI(ah, addrdata, data, i);
+
+ for (j = 0; j < i; j++) {
+ *eep_data = data[j];
+ eep_data++;
+ }
+ i = 0;
+ }
+ }
+
+ if (i != 0) {
+ REG_READ_MULTI(ah, addrdata, data, i);
+
+ for (j = 0; j < i; j++) {
+ *eep_data = data[j];
+ eep_data++;
+ }
+ }
+}
+
bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data)
{
return common->bus_ops->eeprom_read(common, off, data);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 58e2ddc927a9..bd82447f5b78 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -665,6 +665,8 @@ int16_t ath9k_hw_interpolate(u16 target, u16 srcLeft, u16 srcRight,
bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize,
u16 *indexL, u16 *indexR);
bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data);
+void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data,
+ int eep_start_loc, int size);
void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList,
u8 *pVpdList, u16 numIntercepts,
u8 *pRetVpdList);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index fbdff7e47952..bc77a308c901 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -27,19 +27,13 @@ static int ath9k_hw_4k_get_eeprom_rev(struct ath_hw *ah)
return ((ah->eeprom.map4k.baseEepHeader.version) & 0xFFF);
}
-static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
-{
#define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
+
+static bool __ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
+{
struct ath_common *common = ath9k_hw_common(ah);
u16 *eep_data = (u16 *)&ah->eeprom.map4k;
- int addr, eep_start_loc = 0;
-
- eep_start_loc = 64;
-
- if (!ath9k_hw_use_flash(ah)) {
- ath_dbg(common, ATH_DBG_EEPROM,
- "Reading from EEPROM, not flash\n");
- }
+ int addr, eep_start_loc = 64;
for (addr = 0; addr < SIZE_EEPROM_4K; addr++) {
if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) {
@@ -51,9 +45,34 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
}
return true;
-#undef SIZE_EEPROM_4K
}
+static bool __ath9k_hw_usb_4k_fill_eeprom(struct ath_hw *ah)
+{
+ u16 *eep_data = (u16 *)&ah->eeprom.map4k;
+
+ ath9k_hw_usb_gen_fill_eeprom(ah, eep_data, 64, SIZE_EEPROM_4K);
+
+ return true;
+}
+
+static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!ath9k_hw_use_flash(ah)) {
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "Reading from EEPROM, not flash\n");
+ }
+
+ if (common->bus_ops->ath_bus_type == ATH_USB)
+ return __ath9k_hw_usb_4k_fill_eeprom(ah);
+ else
+ return __ath9k_hw_4k_fill_eeprom(ah);
+}
+
+#undef SIZE_EEPROM_4K
+
static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
{
#define EEPROM_4K_SIZE (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 9b6bc8a953bc..8cd8333cc086 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -17,7 +17,7 @@
#include "hw.h"
#include "ar9002_phy.h"
-#define NUM_EEP_WORDS (sizeof(struct ar9287_eeprom) / sizeof(u16))
+#define SIZE_EEPROM_AR9287 (sizeof(struct ar9287_eeprom) / sizeof(u16))
static int ath9k_hw_ar9287_get_eeprom_ver(struct ath_hw *ah)
{
@@ -29,25 +29,15 @@ static int ath9k_hw_ar9287_get_eeprom_rev(struct ath_hw *ah)
return (ah->eeprom.map9287.baseEepHeader.version) & 0xFFF;
}
-static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
+static bool __ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
{
struct ar9287_eeprom *eep = &ah->eeprom.map9287;
struct ath_common *common = ath9k_hw_common(ah);
u16 *eep_data;
- int addr, eep_start_loc;
+ int addr, eep_start_loc = AR9287_EEP_START_LOC;
eep_data = (u16 *)eep;
- if (common->bus_ops->ath_bus_type == ATH_USB)
- eep_start_loc = AR9287_HTC_EEP_START_LOC;
- else
- eep_start_loc = AR9287_EEP_START_LOC;
-
- if (!ath9k_hw_use_flash(ah)) {
- ath_dbg(common, ATH_DBG_EEPROM,
- "Reading from EEPROM, not flash\n");
- }
-
- for (addr = 0; addr < NUM_EEP_WORDS; addr++) {
+ for (addr = 0; addr < SIZE_EEPROM_AR9287; addr++) {
if (!ath9k_hw_nvram_read(common, addr + eep_start_loc,
eep_data)) {
ath_dbg(common, ATH_DBG_EEPROM,
@@ -60,6 +50,31 @@ static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
return true;
}
+static bool __ath9k_hw_usb_ar9287_fill_eeprom(struct ath_hw *ah)
+{
+ u16 *eep_data = (u16 *)&ah->eeprom.map9287;
+
+ ath9k_hw_usb_gen_fill_eeprom(ah, eep_data,
+ AR9287_HTC_EEP_START_LOC,
+ SIZE_EEPROM_AR9287);
+ return true;
+}
+
+static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!ath9k_hw_use_flash(ah)) {
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "Reading from EEPROM, not flash\n");
+ }
+
+ if (common->bus_ops->ath_bus_type == ATH_USB)
+ return __ath9k_hw_usb_ar9287_fill_eeprom(ah);
+ else
+ return __ath9k_hw_ar9287_fill_eeprom(ah);
+}
+
static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
{
u32 sum = 0, el, integer;
@@ -86,7 +101,7 @@ static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
need_swap = true;
eepdata = (u16 *)(&ah->eeprom);
- for (addr = 0; addr < NUM_EEP_WORDS; addr++) {
+ for (addr = 0; addr < SIZE_EEPROM_AR9287; addr++) {
temp = swab16(*eepdata);
*eepdata = temp;
eepdata++;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 749a93608664..fccd87df7300 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -86,9 +86,10 @@ static int ath9k_hw_def_get_eeprom_rev(struct ath_hw *ah)
return ((ah->eeprom.def.baseEepHeader.version) & 0xFFF);
}
-static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
-{
#define SIZE_EEPROM_DEF (sizeof(struct ar5416_eeprom_def) / sizeof(u16))
+
+static bool __ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
+{
struct ath_common *common = ath9k_hw_common(ah);
u16 *eep_data = (u16 *)&ah->eeprom.def;
int addr, ar5416_eep_start_loc = 0x100;
@@ -103,9 +104,34 @@ static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
eep_data++;
}
return true;
-#undef SIZE_EEPROM_DEF
}
+static bool __ath9k_hw_usb_def_fill_eeprom(struct ath_hw *ah)
+{
+ u16 *eep_data = (u16 *)&ah->eeprom.def;
+
+ ath9k_hw_usb_gen_fill_eeprom(ah, eep_data,
+ 0x100, SIZE_EEPROM_DEF);
+ return true;
+}
+
+static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!ath9k_hw_use_flash(ah)) {
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "Reading from EEPROM, not flash\n");
+ }
+
+ if (common->bus_ops->ath_bus_type == ATH_USB)
+ return __ath9k_hw_usb_def_fill_eeprom(ah);
+ else
+ return __ath9k_hw_def_fill_eeprom(ah);
+}
+
+#undef SIZE_EEPROM_DEF
+
static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
{
struct ar5416_eeprom_def *eep =
@@ -221,9 +247,9 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
}
/* Enable fixup for AR_AN_TOP2 if necessary */
- if (AR_SREV_9280_20_OR_LATER(ah) &&
- (eep->baseEepHeader.version & 0xff) > 0x0a &&
- eep->baseEepHeader.pwdclkind == 0)
+ if ((ah->hw_version.devid == AR9280_DEVID_PCI) &&
+ ((eep->baseEepHeader.version & 0xff) > 0x0a) &&
+ (eep->baseEepHeader.pwdclkind == 0))
ah->need_an_top2_fixup = 1;
if ((common->bus_ops->ath_bus_type == ATH_USB) &&
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 133764069246..0fb8f8ac275a 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -20,121 +20,31 @@
/* LED functions */
/********************************/
-static void ath_led_blink_work(struct work_struct *work)
-{
- struct ath_softc *sc = container_of(work, struct ath_softc,
- ath_led_blink_work.work);
-
- if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED))
- return;
-
- if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
- (sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
- else
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
- (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
-
- ieee80211_queue_delayed_work(sc->hw,
- &sc->ath_led_blink_work,
- (sc->sc_flags & SC_OP_LED_ON) ?
- msecs_to_jiffies(sc->led_off_duration) :
- msecs_to_jiffies(sc->led_on_duration));
-
- sc->led_on_duration = sc->led_on_cnt ?
- max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) :
- ATH_LED_ON_DURATION_IDLE;
- sc->led_off_duration = sc->led_off_cnt ?
- max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) :
- ATH_LED_OFF_DURATION_IDLE;
- sc->led_on_cnt = sc->led_off_cnt = 0;
- if (sc->sc_flags & SC_OP_LED_ON)
- sc->sc_flags &= ~SC_OP_LED_ON;
- else
- sc->sc_flags |= SC_OP_LED_ON;
-}
-
+#ifdef CONFIG_MAC80211_LEDS
static void ath_led_brightness(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
- struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
- struct ath_softc *sc = led->sc;
-
- switch (brightness) {
- case LED_OFF:
- if (led->led_type == ATH_LED_ASSOC ||
- led->led_type == ATH_LED_RADIO) {
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
- (led->led_type == ATH_LED_RADIO));
- sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
- if (led->led_type == ATH_LED_RADIO)
- sc->sc_flags &= ~SC_OP_LED_ON;
- } else {
- sc->led_off_cnt++;
- }
- break;
- case LED_FULL:
- if (led->led_type == ATH_LED_ASSOC) {
- sc->sc_flags |= SC_OP_LED_ASSOCIATED;
- if (led_blink)
- ieee80211_queue_delayed_work(sc->hw,
- &sc->ath_led_blink_work, 0);
- } else if (led->led_type == ATH_LED_RADIO) {
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
- sc->sc_flags |= SC_OP_LED_ON;
- } else {
- sc->led_on_cnt++;
- }
- break;
- default:
- break;
- }
-}
-
-static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
- char *trigger)
-{
- int ret;
-
- led->sc = sc;
- led->led_cdev.name = led->name;
- led->led_cdev.default_trigger = trigger;
- led->led_cdev.brightness_set = ath_led_brightness;
-
- ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
- if (ret)
- ath_err(ath9k_hw_common(sc->sc_ah),
- "Failed to register led:%s", led->name);
- else
- led->registered = 1;
- return ret;
-}
-
-static void ath_unregister_led(struct ath_led *led)
-{
- if (led->registered) {
- led_classdev_unregister(&led->led_cdev);
- led->registered = 0;
- }
+ struct ath_softc *sc = container_of(led_cdev, struct ath_softc, led_cdev);
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, (brightness == LED_OFF));
}
void ath_deinit_leds(struct ath_softc *sc)
{
- ath_unregister_led(&sc->assoc_led);
- sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
- ath_unregister_led(&sc->tx_led);
- ath_unregister_led(&sc->rx_led);
- ath_unregister_led(&sc->radio_led);
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
+ if (!sc->led_registered)
+ return;
+
+ ath_led_brightness(&sc->led_cdev, LED_OFF);
+ led_classdev_unregister(&sc->led_cdev);
}
void ath_init_leds(struct ath_softc *sc)
{
- char *trigger;
int ret;
if (AR_SREV_9287(sc->sc_ah))
sc->sc_ah->led_pin = ATH_LED_PIN_9287;
+ else if (AR_SREV_9485(sc->sc_ah))
+ sc->sc_ah->led_pin = ATH_LED_PIN_9485;
else
sc->sc_ah->led_pin = ATH_LED_PIN_DEF;
@@ -144,48 +54,22 @@ void ath_init_leds(struct ath_softc *sc)
/* LED off, active low */
ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
- if (led_blink)
- INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work);
-
- trigger = ieee80211_get_radio_led_name(sc->hw);
- snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
- "ath9k-%s::radio", wiphy_name(sc->hw->wiphy));
- ret = ath_register_led(sc, &sc->radio_led, trigger);
- sc->radio_led.led_type = ATH_LED_RADIO;
- if (ret)
- goto fail;
-
- trigger = ieee80211_get_assoc_led_name(sc->hw);
- snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name),
- "ath9k-%s::assoc", wiphy_name(sc->hw->wiphy));
- ret = ath_register_led(sc, &sc->assoc_led, trigger);
- sc->assoc_led.led_type = ATH_LED_ASSOC;
- if (ret)
- goto fail;
-
- trigger = ieee80211_get_tx_led_name(sc->hw);
- snprintf(sc->tx_led.name, sizeof(sc->tx_led.name),
- "ath9k-%s::tx", wiphy_name(sc->hw->wiphy));
- ret = ath_register_led(sc, &sc->tx_led, trigger);
- sc->tx_led.led_type = ATH_LED_TX;
- if (ret)
- goto fail;
-
- trigger = ieee80211_get_rx_led_name(sc->hw);
- snprintf(sc->rx_led.name, sizeof(sc->rx_led.name),
- "ath9k-%s::rx", wiphy_name(sc->hw->wiphy));
- ret = ath_register_led(sc, &sc->rx_led, trigger);
- sc->rx_led.led_type = ATH_LED_RX;
- if (ret)
- goto fail;
-
- return;
-
-fail:
- if (led_blink)
- cancel_delayed_work_sync(&sc->ath_led_blink_work);
- ath_deinit_leds(sc);
+ if (!led_blink)
+ sc->led_cdev.default_trigger =
+ ieee80211_get_radio_led_name(sc->hw);
+
+ snprintf(sc->led_name, sizeof(sc->led_name),
+ "ath9k-%s", wiphy_name(sc->hw->wiphy));
+ sc->led_cdev.name = sc->led_name;
+ sc->led_cdev.brightness_set = ath_led_brightness;
+
+ ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &sc->led_cdev);
+ if (ret < 0)
+ return;
+
+ sc->led_registered = true;
}
+#endif
/*******************/
/* Rfkill */
@@ -201,8 +85,7 @@ static bool ath_is_rfkill_set(struct ath_softc *sc)
void ath9k_rfkill_poll_state(struct ieee80211_hw *hw)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
bool blocked = !!ath_is_rfkill_set(sc);
wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 5ab3084eb9cb..f1b8af64569c 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -52,6 +52,9 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
{ USB_DEVICE(0x083A, 0xA704),
.driver_info = AR9280_USB }, /* SMC Networks */
+ { USB_DEVICE(0x0cf3, 0x20ff),
+ .driver_info = STORAGE_DEVICE },
+
{ },
};
@@ -219,8 +222,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
struct tx_buf *tx_buf = NULL;
struct sk_buff *nskb = NULL;
int ret = 0, i;
- u16 *hdr, tx_skb_cnt = 0;
+ u16 tx_skb_cnt = 0;
u8 *buf;
+ __le16 *hdr;
if (hif_dev->tx.tx_skb_cnt == 0)
return 0;
@@ -245,9 +249,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
buf = tx_buf->buf;
buf += tx_buf->offset;
- hdr = (u16 *)buf;
- *hdr++ = nskb->len;
- *hdr++ = ATH_USB_TX_STREAM_MODE_TAG;
+ hdr = (__le16 *)buf;
+ *hdr++ = cpu_to_le16(nskb->len);
+ *hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG);
buf += 4;
memcpy(buf, nskb->data, nskb->len);
tx_buf->len = nskb->len + 4;
@@ -913,13 +917,11 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev, u32 drv_info)
if (ret) {
dev_err(&hif_dev->udev->dev,
"ath9k_htc: Unable to allocate URBs\n");
- goto err_urb;
+ goto err_fw_download;
}
return 0;
-err_urb:
- ath9k_hif_usb_dealloc_urbs(hif_dev);
err_fw_download:
release_firmware(hif_dev->firmware);
err_fw_req:
@@ -934,6 +936,61 @@ static void ath9k_hif_usb_dev_deinit(struct hif_device_usb *hif_dev)
release_firmware(hif_dev->firmware);
}
+/*
+ * An exact copy of the function from zd1211rw.
+ */
+static int send_eject_command(struct usb_interface *interface)
+{
+ struct usb_device *udev = interface_to_usbdev(interface);
+ struct usb_host_interface *iface_desc = &interface->altsetting[0];
+ struct usb_endpoint_descriptor *endpoint;
+ unsigned char *cmd;
+ u8 bulk_out_ep;
+ int r;
+
+ /* Find bulk out endpoint */
+ for (r = 1; r >= 0; r--) {
+ endpoint = &iface_desc->endpoint[r].desc;
+ if (usb_endpoint_dir_out(endpoint) &&
+ usb_endpoint_xfer_bulk(endpoint)) {
+ bulk_out_ep = endpoint->bEndpointAddress;
+ break;
+ }
+ }
+ if (r == -1) {
+ dev_err(&udev->dev,
+ "ath9k_htc: Could not find bulk out endpoint\n");
+ return -ENODEV;
+ }
+
+ cmd = kzalloc(31, GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENODEV;
+
+ /* USB bulk command block */
+ cmd[0] = 0x55; /* bulk command signature */
+ cmd[1] = 0x53; /* bulk command signature */
+ cmd[2] = 0x42; /* bulk command signature */
+ cmd[3] = 0x43; /* bulk command signature */
+ cmd[14] = 6; /* command length */
+
+ cmd[15] = 0x1b; /* SCSI command: START STOP UNIT */
+ cmd[19] = 0x2; /* eject disc */
+
+ dev_info(&udev->dev, "Ejecting storage device...\n");
+ r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, bulk_out_ep),
+ cmd, 31, NULL, 2000);
+ kfree(cmd);
+ if (r)
+ return r;
+
+ /* At this point, the device disconnects and reconnects with the real
+ * ID numbers. */
+
+ usb_set_intfdata(interface, NULL);
+ return 0;
+}
+
static int ath9k_hif_usb_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
@@ -941,6 +998,9 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
struct hif_device_usb *hif_dev;
int ret = 0;
+ if (id->driver_info == STORAGE_DEVICE)
+ return send_eject_command(interface);
+
hif_dev = kzalloc(sizeof(struct hif_device_usb), GFP_KERNEL);
if (!hif_dev) {
ret = -ENOMEM;
@@ -1027,12 +1087,13 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
struct hif_device_usb *hif_dev = usb_get_intfdata(interface);
bool unplugged = (udev->state == USB_STATE_NOTATTACHED) ? true : false;
- if (hif_dev) {
- ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged);
- ath9k_htc_hw_free(hif_dev->htc_handle);
- ath9k_hif_usb_dev_deinit(hif_dev);
- usb_set_intfdata(interface, NULL);
- }
+ if (!hif_dev)
+ return;
+
+ ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged);
+ ath9k_htc_hw_free(hif_dev->htc_handle);
+ ath9k_hif_usb_dev_deinit(hif_dev);
+ usb_set_intfdata(interface, NULL);
if (!unplugged && (hif_dev->flags & HIF_USB_START))
ath9k_hif_usb_reboot(udev);
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 780ac5eac501..753a245c5ad1 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -32,6 +32,7 @@
#include "wmi.h"
#define ATH_STA_SHORT_CALINTERVAL 1000 /* 1 second */
+#define ATH_AP_SHORT_CALINTERVAL 100 /* 100 ms */
#define ATH_ANI_POLLINTERVAL 100 /* 100 ms */
#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */
#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
@@ -204,8 +205,50 @@ struct ath9k_htc_target_stats {
__be32 ht_tx_xretries;
} __packed;
+#define ATH9K_HTC_MAX_VIF 2
+#define ATH9K_HTC_MAX_BCN_VIF 2
+
+#define INC_VIF(_priv, _type) do { \
+ switch (_type) { \
+ case NL80211_IFTYPE_STATION: \
+ _priv->num_sta_vif++; \
+ break; \
+ case NL80211_IFTYPE_ADHOC: \
+ _priv->num_ibss_vif++; \
+ break; \
+ case NL80211_IFTYPE_AP: \
+ _priv->num_ap_vif++; \
+ break; \
+ default: \
+ break; \
+ } \
+ } while (0)
+
+#define DEC_VIF(_priv, _type) do { \
+ switch (_type) { \
+ case NL80211_IFTYPE_STATION: \
+ _priv->num_sta_vif--; \
+ break; \
+ case NL80211_IFTYPE_ADHOC: \
+ _priv->num_ibss_vif--; \
+ break; \
+ case NL80211_IFTYPE_AP: \
+ _priv->num_ap_vif--; \
+ break; \
+ default: \
+ break; \
+ } \
+ } while (0)
+
struct ath9k_htc_vif {
u8 index;
+ u16 seq_no;
+ bool beacon_configured;
+};
+
+struct ath9k_vif_iter_data {
+ const u8 *hw_macaddr;
+ u8 mask[ETH_ALEN];
};
#define ATH9K_HTC_MAX_STA 8
@@ -310,10 +353,8 @@ struct ath_led {
struct htc_beacon_config {
u16 beacon_interval;
- u16 listen_interval;
u16 dtim_period;
u16 bmiss_timeout;
- u8 dtim_count;
};
struct ath_btcoex {
@@ -333,13 +374,12 @@ void ath_htc_cancel_btcoex_work(struct ath9k_htc_priv *priv);
#define OP_SCANNING BIT(1)
#define OP_LED_ASSOCIATED BIT(2)
#define OP_LED_ON BIT(3)
-#define OP_PREAMBLE_SHORT BIT(4)
-#define OP_PROTECT_ENABLE BIT(5)
-#define OP_ASSOCIATED BIT(6)
-#define OP_ENABLE_BEACON BIT(7)
-#define OP_LED_DEINIT BIT(8)
-#define OP_BT_PRIORITY_DETECTED BIT(9)
-#define OP_BT_SCAN BIT(10)
+#define OP_ENABLE_BEACON BIT(4)
+#define OP_LED_DEINIT BIT(5)
+#define OP_BT_PRIORITY_DETECTED BIT(6)
+#define OP_BT_SCAN BIT(7)
+#define OP_ANI_RUNNING BIT(8)
+#define OP_TSF_RESET BIT(9)
struct ath9k_htc_priv {
struct device *dev;
@@ -358,15 +398,24 @@ struct ath9k_htc_priv {
enum htc_endpoint_id data_vi_ep;
enum htc_endpoint_id data_vo_ep;
+ u8 vif_slot;
+ u8 mon_vif_idx;
+ u8 sta_slot;
+ u8 vif_sta_pos[ATH9K_HTC_MAX_VIF];
+ u8 num_ibss_vif;
+ u8 num_sta_vif;
+ u8 num_ap_vif;
+
u16 op_flags;
u16 curtxpow;
u16 txpowlimit;
u16 nvifs;
u16 nstations;
- u16 seq_no;
u32 bmiss_cnt;
+ bool rearm_ani;
+ bool reconfig_beacon;
- struct ath9k_hw_cal_data caldata[ATH9K_NUM_CHANNELS];
+ struct ath9k_hw_cal_data caldata;
spinlock_t beacon_lock;
@@ -382,7 +431,7 @@ struct ath9k_htc_priv {
struct ath9k_htc_rx rx;
struct tasklet_struct tx_tasklet;
struct sk_buff_head tx_queue;
- struct delayed_work ath9k_ani_work;
+ struct delayed_work ani_work;
struct work_struct ps_work;
struct work_struct fatal_work;
@@ -424,6 +473,7 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv);
void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv);
void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
struct ieee80211_vif *vif);
+void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv);
void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending);
void ath9k_htc_rxep(void *priv, struct sk_buff *skb,
@@ -436,8 +486,9 @@ void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv);
void ath9k_htc_station_work(struct work_struct *work);
void ath9k_htc_aggr_work(struct work_struct *work);
-void ath9k_ani_work(struct work_struct *work);;
-void ath_start_ani(struct ath9k_htc_priv *priv);
+void ath9k_htc_ani_work(struct work_struct *work);
+void ath9k_htc_start_ani(struct ath9k_htc_priv *priv);
+void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv);
int ath9k_tx_init(struct ath9k_htc_priv *priv);
void ath9k_tx_tasklet(unsigned long data);
@@ -460,7 +511,6 @@ void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv);
void ath9k_ps_work(struct work_struct *work);
bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
enum ath9k_power_mode mode);
-void ath_update_txpow(struct ath9k_htc_priv *priv);
void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv);
void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index 87cc65a78a3f..8d1d8792436d 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -123,8 +123,9 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
/* TSF out of range threshold fixed at 1 second */
bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
- ath_dbg(common, ATH_DBG_BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu);
- ath_dbg(common, ATH_DBG_BEACON,
+ ath_dbg(common, ATH_DBG_CONFIG, "intval: %u tsf: %llu tsftu: %u\n",
+ intval, tsf, tsftu);
+ ath_dbg(common, ATH_DBG_CONFIG,
"bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
bs.bs_bmissthreshold, bs.bs_sleepduration,
bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
@@ -138,25 +139,81 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
}
+static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv,
+ struct htc_beacon_config *bss_conf)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ enum ath9k_int imask = 0;
+ u32 nexttbtt, intval, tsftu;
+ __be32 htc_imask = 0;
+ int ret;
+ u8 cmd_rsp;
+ u64 tsf;
+
+ intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD;
+ intval /= ATH9K_HTC_MAX_BCN_VIF;
+ nexttbtt = intval;
+
+ if (priv->op_flags & OP_TSF_RESET) {
+ intval |= ATH9K_BEACON_RESET_TSF;
+ priv->op_flags &= ~OP_TSF_RESET;
+ } else {
+ /*
+ * Pull nexttbtt forward to reflect the current TSF.
+ */
+ tsf = ath9k_hw_gettsf64(priv->ah);
+ tsftu = TSF_TO_TU(tsf >> 32, tsf) + FUDGE;
+ do {
+ nexttbtt += intval;
+ } while (nexttbtt < tsftu);
+ }
+
+ intval |= ATH9K_BEACON_ENA;
+
+ if (priv->op_flags & OP_ENABLE_BEACON)
+ imask |= ATH9K_INT_SWBA;
+
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "AP Beacon config, intval: %d, nexttbtt: %u imask: 0x%x\n",
+ bss_conf->beacon_interval, nexttbtt, imask);
+
+ WMI_CMD(WMI_DISABLE_INTR_CMDID);
+ ath9k_hw_beaconinit(priv->ah, nexttbtt, intval);
+ priv->bmiss_cnt = 0;
+ htc_imask = cpu_to_be32(imask);
+ WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
+}
+
static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
struct htc_beacon_config *bss_conf)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
enum ath9k_int imask = 0;
- u32 nexttbtt, intval;
+ u32 nexttbtt, intval, tsftu;
__be32 htc_imask = 0;
int ret;
u8 cmd_rsp;
+ u64 tsf;
intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD;
nexttbtt = intval;
+
+ /*
+ * Pull nexttbtt forward to reflect the current TSF.
+ */
+ tsf = ath9k_hw_gettsf64(priv->ah);
+ tsftu = TSF_TO_TU(tsf >> 32, tsf) + FUDGE;
+ do {
+ nexttbtt += intval;
+ } while (nexttbtt < tsftu);
+
intval |= ATH9K_BEACON_ENA;
if (priv->op_flags & OP_ENABLE_BEACON)
imask |= ATH9K_INT_SWBA;
- ath_dbg(common, ATH_DBG_BEACON,
- "IBSS Beacon config, intval: %d, imask: 0x%x\n",
- bss_conf->beacon_interval, imask);
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "IBSS Beacon config, intval: %d, nexttbtt: %u, imask: 0x%x\n",
+ bss_conf->beacon_interval, nexttbtt, imask);
WMI_CMD(WMI_DISABLE_INTR_CMDID);
ath9k_hw_beaconinit(priv->ah, nexttbtt, intval);
@@ -207,9 +264,9 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending)
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
struct ieee80211_hdr *hdr =
(struct ieee80211_hdr *) beacon->data;
- priv->seq_no += 0x10;
+ avp->seq_no += 0x10;
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
- hdr->seq_ctrl |= cpu_to_le16(priv->seq_no);
+ hdr->seq_ctrl |= cpu_to_le16(avp->seq_no);
}
tx_ctl.type = ATH9K_HTC_NORMAL;
@@ -253,30 +310,123 @@ void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv)
}
}
+static void ath9k_htc_beacon_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ bool *beacon_configured = (bool *)data;
+ struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ avp->beacon_configured)
+ *beacon_configured = true;
+}
+
+static bool ath9k_htc_check_beacon_config(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ bool beacon_configured;
+
+ /*
+ * Changing the beacon interval when multiple AP interfaces
+ * are configured will affect beacon transmission of all
+ * of them.
+ */
+ if ((priv->ah->opmode == NL80211_IFTYPE_AP) &&
+ (priv->num_ap_vif > 1) &&
+ (vif->type == NL80211_IFTYPE_AP) &&
+ (cur_conf->beacon_interval != bss_conf->beacon_int)) {
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Changing beacon interval of multiple AP interfaces !\n");
+ return false;
+ }
+
+ /*
+ * If the HW is operating in AP mode, any new station interfaces that
+ * are added cannot change the beacon parameters.
+ */
+ if (priv->num_ap_vif &&
+ (vif->type != NL80211_IFTYPE_AP)) {
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "HW in AP mode, cannot set STA beacon parameters\n");
+ return false;
+ }
+
+ /*
+ * The beacon parameters are configured only for the first
+ * station interface.
+ */
+ if ((priv->ah->opmode == NL80211_IFTYPE_STATION) &&
+ (priv->num_sta_vif > 1) &&
+ (vif->type == NL80211_IFTYPE_STATION)) {
+ beacon_configured = false;
+ ieee80211_iterate_active_interfaces_atomic(priv->hw,
+ ath9k_htc_beacon_iter,
+ &beacon_configured);
+
+ if (beacon_configured) {
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Beacon already configured for a station interface\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
struct ieee80211_vif *vif)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
+
+ if (!ath9k_htc_check_beacon_config(priv, vif))
+ return;
cur_conf->beacon_interval = bss_conf->beacon_int;
if (cur_conf->beacon_interval == 0)
cur_conf->beacon_interval = 100;
cur_conf->dtim_period = bss_conf->dtim_period;
- cur_conf->listen_interval = 1;
- cur_conf->dtim_count = 1;
cur_conf->bmiss_timeout =
ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
switch (vif->type) {
case NL80211_IFTYPE_STATION:
ath9k_htc_beacon_config_sta(priv, cur_conf);
+ avp->beacon_configured = true;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ath9k_htc_beacon_config_adhoc(priv, cur_conf);
+ break;
+ case NL80211_IFTYPE_AP:
+ ath9k_htc_beacon_config_ap(priv, cur_conf);
+ break;
+ default:
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Unsupported beaconing mode\n");
+ return;
+ }
+}
+
+void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
+
+ switch (priv->ah->opmode) {
+ case NL80211_IFTYPE_STATION:
+ ath9k_htc_beacon_config_sta(priv, cur_conf);
break;
case NL80211_IFTYPE_ADHOC:
ath9k_htc_beacon_config_adhoc(priv, cur_conf);
break;
+ case NL80211_IFTYPE_AP:
+ ath9k_htc_beacon_config_ap(priv, cur_conf);
+ break;
default:
ath_dbg(common, ATH_DBG_CONFIG,
"Unsupported beaconing mode\n");
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index fe70f67aa088..7e630a81b453 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -389,7 +389,8 @@ void ath9k_htc_radio_enable(struct ieee80211_hw *hw)
ret, ah->curchan->channel);
}
- ath_update_txpow(priv);
+ ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
+ &priv->curtxpow);
/* Start RX */
WMI_CMD(WMI_START_RECV_CMDID);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 0352f0994caa..fc67c937e172 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -294,6 +294,34 @@ static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset)
return be32_to_cpu(val);
}
+static void ath9k_multi_regread(void *hw_priv, u32 *addr,
+ u32 *val, u16 count)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ __be32 tmpaddr[8];
+ __be32 tmpval[8];
+ int i, ret;
+
+ for (i = 0; i < count; i++) {
+ tmpaddr[i] = cpu_to_be32(addr[i]);
+ }
+
+ ret = ath9k_wmi_cmd(priv->wmi, WMI_REG_READ_CMDID,
+ (u8 *)tmpaddr , sizeof(u32) * count,
+ (u8 *)tmpval, sizeof(u32) * count,
+ 100);
+ if (unlikely(ret)) {
+ ath_dbg(common, ATH_DBG_WMI,
+ "Multiple REGISTER READ FAILED (count: %d)\n", count);
+ }
+
+ for (i = 0; i < count; i++) {
+ val[i] = be32_to_cpu(tmpval[i]);
+ }
+}
+
static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset)
{
struct ath_hw *ah = (struct ath_hw *) hw_priv;
@@ -404,6 +432,7 @@ static void ath9k_regwrite_flush(void *hw_priv)
static const struct ath_ops ath9k_common_ops = {
.read = ath9k_regread,
+ .multi_read = ath9k_multi_regread,
.write = ath9k_regwrite,
.enable_write_buffer = ath9k_enable_regwrite_buffer,
.write_flush = ath9k_regwrite_flush,
@@ -650,7 +679,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
(unsigned long)priv);
tasklet_init(&priv->tx_tasklet, ath9k_tx_tasklet,
(unsigned long)priv);
- INIT_DELAYED_WORK(&priv->ath9k_ani_work, ath9k_ani_work);
+ INIT_DELAYED_WORK(&priv->ani_work, ath9k_htc_ani_work);
INIT_WORK(&priv->ps_work, ath9k_ps_work);
INIT_WORK(&priv->fatal_work, ath9k_fatal_work);
@@ -758,6 +787,7 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv,
struct ath_hw *ah;
int error = 0;
struct ath_regulatory *reg;
+ char hw_name[64];
/* Bring up device */
error = ath9k_init_priv(priv, devid, product, drv_info);
@@ -798,6 +828,22 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv,
goto err_world;
}
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "WMI:%d, BCN:%d, CAB:%d, UAPSD:%d, MGMT:%d, "
+ "BE:%d, BK:%d, VI:%d, VO:%d\n",
+ priv->wmi_cmd_ep,
+ priv->beacon_ep,
+ priv->cab_ep,
+ priv->uapsd_ep,
+ priv->mgmt_ep,
+ priv->data_be_ep,
+ priv->data_bk_ep,
+ priv->data_vi_ep,
+ priv->data_vo_ep);
+
+ ath9k_hw_name(priv->ah, hw_name, sizeof(hw_name));
+ wiphy_info(hw->wiphy, "%s\n", hw_name);
+
ath9k_init_leds(priv);
ath9k_start_rfkill_poll(priv);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 6bb59958f71e..db8c0c044e9e 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -24,17 +24,6 @@ static struct dentry *ath9k_debugfs_root;
/* Utilities */
/*************/
-void ath_update_txpow(struct ath9k_htc_priv *priv)
-{
- struct ath_hw *ah = priv->ah;
-
- if (priv->curtxpow != priv->txpowlimit) {
- ath9k_hw_set_txpowerlimit(ah, priv->txpowlimit, false);
- /* read back in case value is clamped */
- priv->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
- }
-}
-
/* HACK Alert: Use 11NG for 2.4, use 11NA for 5 */
static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv,
struct ath9k_channel *ichan)
@@ -116,12 +105,88 @@ void ath9k_ps_work(struct work_struct *work)
ath9k_htc_setpower(priv, ATH9K_PM_NETWORK_SLEEP);
}
+static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct ath9k_htc_priv *priv = data;
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+
+ if ((vif->type == NL80211_IFTYPE_AP) && bss_conf->enable_beacon)
+ priv->reconfig_beacon = true;
+
+ if (bss_conf->assoc) {
+ priv->rearm_ani = true;
+ priv->reconfig_beacon = true;
+ }
+}
+
+static void ath9k_htc_vif_reconfig(struct ath9k_htc_priv *priv)
+{
+ priv->rearm_ani = false;
+ priv->reconfig_beacon = false;
+
+ ieee80211_iterate_active_interfaces_atomic(priv->hw,
+ ath9k_htc_vif_iter, priv);
+ if (priv->rearm_ani)
+ ath9k_htc_start_ani(priv);
+
+ if (priv->reconfig_beacon) {
+ ath9k_htc_ps_wakeup(priv);
+ ath9k_htc_beacon_reconfig(priv);
+ ath9k_htc_ps_restore(priv);
+ }
+}
+
+static void ath9k_htc_bssid_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct ath9k_vif_iter_data *iter_data = data;
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
+}
+
+static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_vif_iter_data iter_data;
+
+ /*
+ * Use the hardware MAC address as reference, the hardware uses it
+ * together with the BSSID mask when matching addresses.
+ */
+ iter_data.hw_macaddr = common->macaddr;
+ memset(&iter_data.mask, 0xff, ETH_ALEN);
+
+ if (vif)
+ ath9k_htc_bssid_iter(&iter_data, vif->addr, vif);
+
+ /* Get list of all active MAC addresses */
+ ieee80211_iterate_active_interfaces_atomic(priv->hw, ath9k_htc_bssid_iter,
+ &iter_data);
+
+ memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
+ ath_hw_setbssidmask(common);
+}
+
+static void ath9k_htc_set_opmode(struct ath9k_htc_priv *priv)
+{
+ if (priv->num_ibss_vif)
+ priv->ah->opmode = NL80211_IFTYPE_ADHOC;
+ else if (priv->num_ap_vif)
+ priv->ah->opmode = NL80211_IFTYPE_AP;
+ else
+ priv->ah->opmode = NL80211_IFTYPE_STATION;
+
+ ath9k_hw_setopmode(priv->ah);
+}
+
void ath9k_htc_reset(struct ath9k_htc_priv *priv)
{
struct ath_hw *ah = priv->ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_channel *channel = priv->hw->conf.channel;
- struct ath9k_hw_cal_data *caldata;
+ struct ath9k_hw_cal_data *caldata = NULL;
enum htc_phymode mode;
__be16 htc_mode;
u8 cmd_rsp;
@@ -130,16 +195,14 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv)
mutex_lock(&priv->mutex);
ath9k_htc_ps_wakeup(priv);
- if (priv->op_flags & OP_ASSOCIATED)
- cancel_delayed_work_sync(&priv->ath9k_ani_work);
-
+ ath9k_htc_stop_ani(priv);
ieee80211_stop_queues(priv->hw);
htc_stop(priv->htc);
WMI_CMD(WMI_DISABLE_INTR_CMDID);
WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
WMI_CMD(WMI_STOP_RECV_CMDID);
- caldata = &priv->caldata[channel->hw_value];
+ caldata = &priv->caldata;
ret = ath9k_hw_reset(ah, ah->curchan, caldata, false);
if (ret) {
ath_err(common,
@@ -147,7 +210,8 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv)
channel->center_freq, ret);
}
- ath_update_txpow(priv);
+ ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
+ &priv->curtxpow);
WMI_CMD(WMI_START_RECV_CMDID);
ath9k_host_rx_init(priv);
@@ -158,12 +222,7 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv)
WMI_CMD(WMI_ENABLE_INTR_CMDID);
htc_start(priv->htc);
-
- if (priv->op_flags & OP_ASSOCIATED) {
- ath9k_htc_beacon_config(priv, priv->vif);
- ath_start_ani(priv);
- }
-
+ ath9k_htc_vif_reconfig(priv);
ieee80211_wake_queues(priv->hw);
ath9k_htc_ps_restore(priv);
@@ -179,7 +238,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
struct ieee80211_conf *conf = &common->hw->conf;
bool fastcc;
struct ieee80211_channel *channel = hw->conf.channel;
- struct ath9k_hw_cal_data *caldata;
+ struct ath9k_hw_cal_data *caldata = NULL;
enum htc_phymode mode;
__be16 htc_mode;
u8 cmd_rsp;
@@ -202,7 +261,8 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf),
fastcc);
- caldata = &priv->caldata[channel->hw_value];
+ if (!fastcc)
+ caldata = &priv->caldata;
ret = ath9k_hw_reset(ah, hchan, caldata, fastcc);
if (ret) {
ath_err(common,
@@ -211,7 +271,8 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
goto err;
}
- ath_update_txpow(priv);
+ ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
+ &priv->curtxpow);
WMI_CMD(WMI_START_RECV_CMDID);
if (ret)
@@ -230,11 +291,23 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
goto err;
htc_start(priv->htc);
+
+ if (!(priv->op_flags & OP_SCANNING) &&
+ !(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
+ ath9k_htc_vif_reconfig(priv);
+
err:
ath9k_htc_ps_restore(priv);
return ret;
}
+/*
+ * Monitor mode handling is a tad complicated because the firmware requires
+ * an interface to be created exclusively, while mac80211 doesn't associate
+ * an interface with the mode.
+ *
+ * So, for now, only one monitor interface can be configured.
+ */
static void __ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
@@ -244,9 +317,10 @@ static void __ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
- hvif.index = 0; /* Should do for now */
+ hvif.index = priv->mon_vif_idx;
WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
priv->nvifs--;
+ priv->vif_slot &= ~(1 << priv->mon_vif_idx);
}
static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv)
@@ -254,70 +328,87 @@ static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv)
struct ath_common *common = ath9k_hw_common(priv->ah);
struct ath9k_htc_target_vif hvif;
struct ath9k_htc_target_sta tsta;
- int ret = 0;
+ int ret = 0, sta_idx;
u8 cmd_rsp;
- if (priv->nvifs > 0)
- return -ENOBUFS;
+ if ((priv->nvifs >= ATH9K_HTC_MAX_VIF) ||
+ (priv->nstations >= ATH9K_HTC_MAX_STA)) {
+ ret = -ENOBUFS;
+ goto err_vif;
+ }
- if (priv->nstations >= ATH9K_HTC_MAX_STA)
- return -ENOBUFS;
+ sta_idx = ffz(priv->sta_slot);
+ if ((sta_idx < 0) || (sta_idx > ATH9K_HTC_MAX_STA)) {
+ ret = -ENOBUFS;
+ goto err_vif;
+ }
/*
* Add an interface.
*/
-
memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
hvif.opmode = cpu_to_be32(HTC_M_MONITOR);
- priv->ah->opmode = NL80211_IFTYPE_MONITOR;
- hvif.index = priv->nvifs;
+ hvif.index = ffz(priv->vif_slot);
WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif);
if (ret)
- return ret;
+ goto err_vif;
+
+ /*
+ * Assign the monitor interface index as a special case here.
+ * This is needed when the interface is brought down.
+ */
+ priv->mon_vif_idx = hvif.index;
+ priv->vif_slot |= (1 << hvif.index);
+
+ /*
+ * Set the hardware mode to monitor only if there are no
+ * other interfaces.
+ */
+ if (!priv->nvifs)
+ priv->ah->opmode = NL80211_IFTYPE_MONITOR;
priv->nvifs++;
/*
* Associate a station with the interface for packet injection.
*/
-
memset(&tsta, 0, sizeof(struct ath9k_htc_target_sta));
memcpy(&tsta.macaddr, common->macaddr, ETH_ALEN);
tsta.is_vif_sta = 1;
- tsta.sta_index = priv->nstations;
+ tsta.sta_index = sta_idx;
tsta.vif_index = hvif.index;
tsta.maxampdu = 0xffff;
WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta);
if (ret) {
ath_err(common, "Unable to add station entry for monitor mode\n");
- goto err_vif;
+ goto err_sta;
}
+ priv->sta_slot |= (1 << sta_idx);
priv->nstations++;
-
- /*
- * Set chainmask etc. on the target.
- */
- ret = ath9k_htc_update_cap_target(priv);
- if (ret)
- ath_dbg(common, ATH_DBG_CONFIG,
- "Failed to update capability in target\n");
-
+ priv->vif_sta_pos[priv->mon_vif_idx] = sta_idx;
priv->ah->is_monitoring = true;
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Attached a monitor interface at idx: %d, sta idx: %d\n",
+ priv->mon_vif_idx, sta_idx);
+
return 0;
-err_vif:
+err_sta:
/*
* Remove the interface from the target.
*/
__ath9k_htc_remove_monitor_interface(priv);
+err_vif:
+ ath_dbg(common, ATH_DBG_FATAL, "Unable to attach a monitor interface\n");
+
return ret;
}
@@ -329,7 +420,7 @@ static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
__ath9k_htc_remove_monitor_interface(priv);
- sta_idx = 0; /* Only single interface, for now */
+ sta_idx = priv->vif_sta_pos[priv->mon_vif_idx];
WMI_CMD_BUF(WMI_NODE_REMOVE_CMDID, &sta_idx);
if (ret) {
@@ -337,9 +428,14 @@ static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
return ret;
}
+ priv->sta_slot &= ~(1 << sta_idx);
priv->nstations--;
priv->ah->is_monitoring = false;
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Removed a monitor interface at idx: %d, sta idx: %d\n",
+ priv->mon_vif_idx, sta_idx);
+
return 0;
}
@@ -351,12 +447,16 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
struct ath9k_htc_target_sta tsta;
struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
struct ath9k_htc_sta *ista;
- int ret;
+ int ret, sta_idx;
u8 cmd_rsp;
if (priv->nstations >= ATH9K_HTC_MAX_STA)
return -ENOBUFS;
+ sta_idx = ffz(priv->sta_slot);
+ if ((sta_idx < 0) || (sta_idx > ATH9K_HTC_MAX_STA))
+ return -ENOBUFS;
+
memset(&tsta, 0, sizeof(struct ath9k_htc_target_sta));
if (sta) {
@@ -366,13 +466,13 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
tsta.associd = common->curaid;
tsta.is_vif_sta = 0;
tsta.valid = true;
- ista->index = priv->nstations;
+ ista->index = sta_idx;
} else {
memcpy(&tsta.macaddr, vif->addr, ETH_ALEN);
tsta.is_vif_sta = 1;
}
- tsta.sta_index = priv->nstations;
+ tsta.sta_index = sta_idx;
tsta.vif_index = avp->index;
tsta.maxampdu = 0xffff;
if (sta && sta->ht_cap.ht_supported)
@@ -387,12 +487,21 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
return ret;
}
- if (sta)
+ if (sta) {
ath_dbg(common, ATH_DBG_CONFIG,
"Added a station entry for: %pM (idx: %d)\n",
sta->addr, tsta.sta_index);
+ } else {
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Added a station entry for VIF %d (idx: %d)\n",
+ avp->index, tsta.sta_index);
+ }
+ priv->sta_slot |= (1 << sta_idx);
priv->nstations++;
+ if (!sta)
+ priv->vif_sta_pos[avp->index] = sta_idx;
+
return 0;
}
@@ -401,6 +510,7 @@ static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv,
struct ieee80211_sta *sta)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
struct ath9k_htc_sta *ista;
int ret;
u8 cmd_rsp, sta_idx;
@@ -409,7 +519,7 @@ static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv,
ista = (struct ath9k_htc_sta *) sta->drv_priv;
sta_idx = ista->index;
} else {
- sta_idx = 0;
+ sta_idx = priv->vif_sta_pos[avp->index];
}
WMI_CMD_BUF(WMI_NODE_REMOVE_CMDID, &sta_idx);
@@ -421,12 +531,19 @@ static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv,
return ret;
}
- if (sta)
+ if (sta) {
ath_dbg(common, ATH_DBG_CONFIG,
"Removed a station entry for: %pM (idx: %d)\n",
sta->addr, sta_idx);
+ } else {
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Removed a station entry for VIF %d (idx: %d)\n",
+ avp->index, sta_idx);
+ }
+ priv->sta_slot &= ~(1 << sta_idx);
priv->nstations--;
+
return 0;
}
@@ -808,7 +925,7 @@ void ath9k_htc_debug_remove_root(void)
/* ANI */
/*******/
-void ath_start_ani(struct ath9k_htc_priv *priv)
+void ath9k_htc_start_ani(struct ath9k_htc_priv *priv)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
unsigned long timestamp = jiffies_to_msecs(jiffies);
@@ -817,15 +934,22 @@ void ath_start_ani(struct ath9k_htc_priv *priv)
common->ani.shortcal_timer = timestamp;
common->ani.checkani_timer = timestamp;
- ieee80211_queue_delayed_work(common->hw, &priv->ath9k_ani_work,
+ priv->op_flags |= OP_ANI_RUNNING;
+
+ ieee80211_queue_delayed_work(common->hw, &priv->ani_work,
msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
}
-void ath9k_ani_work(struct work_struct *work)
+void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv)
+{
+ cancel_delayed_work_sync(&priv->ani_work);
+ priv->op_flags &= ~OP_ANI_RUNNING;
+}
+
+void ath9k_htc_ani_work(struct work_struct *work)
{
struct ath9k_htc_priv *priv =
- container_of(work, struct ath9k_htc_priv,
- ath9k_ani_work.work);
+ container_of(work, struct ath9k_htc_priv, ani_work.work);
struct ath_hw *ah = priv->ah;
struct ath_common *common = ath9k_hw_common(ah);
bool longcal = false;
@@ -834,7 +958,8 @@ void ath9k_ani_work(struct work_struct *work)
unsigned int timestamp = jiffies_to_msecs(jiffies);
u32 cal_interval, short_cal_interval;
- short_cal_interval = ATH_STA_SHORT_CALINTERVAL;
+ short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
+ ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
/* Only calibrate if awake */
if (ah->power_mode != ATH9K_PM_AWAKE)
@@ -903,7 +1028,7 @@ set_timer:
if (!common->ani.caldone)
cal_interval = min(cal_interval, (u32)short_cal_interval);
- ieee80211_queue_delayed_work(common->hw, &priv->ath9k_ani_work,
+ ieee80211_queue_delayed_work(common->hw, &priv->ani_work,
msecs_to_jiffies(cal_interval));
}
@@ -911,7 +1036,7 @@ set_timer:
/* mac80211 Callbacks */
/**********************/
-static int ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
struct ath9k_htc_priv *priv = hw->priv;
@@ -924,7 +1049,7 @@ static int ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
padsize = padpos & 3;
if (padsize && skb->len > padpos) {
if (skb_headroom(skb) < padsize)
- return -1;
+ goto fail_tx;
skb_push(skb, padsize);
memmove(skb->data, skb->data + padsize, padpos);
}
@@ -945,11 +1070,10 @@ static int ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
goto fail_tx;
}
- return 0;
+ return;
fail_tx:
dev_kfree_skb_any(skb);
- return 0;
}
static int ath9k_htc_start(struct ieee80211_hw *hw)
@@ -987,7 +1111,8 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
return ret;
}
- ath_update_txpow(priv);
+ ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
+ &priv->curtxpow);
mode = ath9k_htc_get_curmode(priv, init_channel);
htc_mode = cpu_to_be16(mode);
@@ -997,6 +1122,11 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
ath9k_host_rx_init(priv);
+ ret = ath9k_htc_update_cap_target(priv);
+ if (ret)
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Failed to update capability in target\n");
+
priv->op_flags &= ~OP_INVALID;
htc_start(priv->htc);
@@ -1051,25 +1181,21 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
cancel_work_sync(&priv->fatal_work);
cancel_work_sync(&priv->ps_work);
cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
+ ath9k_htc_stop_ani(priv);
ath9k_led_stop_brightness(priv);
mutex_lock(&priv->mutex);
- /* Remove monitor interface here */
- if (ah->opmode == NL80211_IFTYPE_MONITOR) {
- if (ath9k_htc_remove_monitor_interface(priv))
- ath_err(common, "Unable to remove monitor interface\n");
- else
- ath_dbg(common, ATH_DBG_CONFIG,
- "Monitor interface removed\n");
- }
-
if (ah->btcoex_hw.enabled) {
ath9k_hw_btcoex_disable(ah);
if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
ath_htc_cancel_btcoex_work(priv);
}
+ /* Remove a monitor interface if it's present. */
+ if (priv->ah->is_monitoring)
+ ath9k_htc_remove_monitor_interface(priv);
+
ath9k_hw_phy_disable(ah);
ath9k_hw_disable(ah);
ath9k_htc_ps_restore(priv);
@@ -1093,10 +1219,24 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
mutex_lock(&priv->mutex);
- /* Only one interface for now */
- if (priv->nvifs > 0) {
- ret = -ENOBUFS;
- goto out;
+ if (priv->nvifs >= ATH9K_HTC_MAX_VIF) {
+ mutex_unlock(&priv->mutex);
+ return -ENOBUFS;
+ }
+
+ if (priv->num_ibss_vif ||
+ (priv->nvifs && vif->type == NL80211_IFTYPE_ADHOC)) {
+ ath_err(common, "IBSS coexistence with other modes is not allowed\n");
+ mutex_unlock(&priv->mutex);
+ return -ENOBUFS;
+ }
+
+ if (((vif->type == NL80211_IFTYPE_AP) ||
+ (vif->type == NL80211_IFTYPE_ADHOC)) &&
+ ((priv->num_ap_vif + priv->num_ibss_vif) >= ATH9K_HTC_MAX_BCN_VIF)) {
+ ath_err(common, "Max. number of beaconing interfaces reached\n");
+ mutex_unlock(&priv->mutex);
+ return -ENOBUFS;
}
ath9k_htc_ps_wakeup(priv);
@@ -1110,6 +1250,9 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
case NL80211_IFTYPE_ADHOC:
hvif.opmode = cpu_to_be32(HTC_M_IBSS);
break;
+ case NL80211_IFTYPE_AP:
+ hvif.opmode = cpu_to_be32(HTC_M_HOSTAP);
+ break;
default:
ath_err(common,
"Interface type %d not yet supported\n", vif->type);
@@ -1117,34 +1260,39 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
goto out;
}
- ath_dbg(common, ATH_DBG_CONFIG,
- "Attach a VIF of type: %d\n", vif->type);
-
- priv->ah->opmode = vif->type;
-
/* Index starts from zero on the target */
- avp->index = hvif.index = priv->nvifs;
+ avp->index = hvif.index = ffz(priv->vif_slot);
hvif.rtsthreshold = cpu_to_be16(2304);
WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif);
if (ret)
goto out;
- priv->nvifs++;
-
/*
* We need a node in target to tx mgmt frames
* before association.
*/
ret = ath9k_htc_add_station(priv, vif, NULL);
- if (ret)
+ if (ret) {
+ WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
goto out;
+ }
- ret = ath9k_htc_update_cap_target(priv);
- if (ret)
- ath_dbg(common, ATH_DBG_CONFIG,
- "Failed to update capability in target\n");
+ ath9k_htc_set_bssid_mask(priv, vif);
+ priv->vif_slot |= (1 << avp->index);
+ priv->nvifs++;
priv->vif = vif;
+
+ INC_VIF(priv, vif->type);
+ ath9k_htc_set_opmode(priv);
+
+ if ((priv->ah->opmode == NL80211_IFTYPE_AP) &&
+ !(priv->op_flags & OP_ANI_RUNNING))
+ ath9k_htc_start_ani(priv);
+
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Attach a VIF of type: %d at idx: %d\n", vif->type, avp->index);
+
out:
ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex);
@@ -1162,8 +1310,6 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
int ret = 0;
u8 cmd_rsp;
- ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n");
-
mutex_lock(&priv->mutex);
ath9k_htc_ps_wakeup(priv);
@@ -1172,10 +1318,27 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
hvif.index = avp->index;
WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
priv->nvifs--;
+ priv->vif_slot &= ~(1 << avp->index);
ath9k_htc_remove_station(priv, vif, NULL);
priv->vif = NULL;
+ DEC_VIF(priv, vif->type);
+ ath9k_htc_set_opmode(priv);
+
+ /*
+ * Stop ANI only if there are no associated station interfaces.
+ */
+ if ((vif->type == NL80211_IFTYPE_AP) && (priv->num_ap_vif == 0)) {
+ priv->rearm_ani = false;
+ ieee80211_iterate_active_interfaces_atomic(priv->hw,
+ ath9k_htc_vif_iter, priv);
+ if (!priv->rearm_ani)
+ ath9k_htc_stop_ani(priv);
+ }
+
+ ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface at idx: %d\n", avp->index);
+
ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex);
}
@@ -1211,13 +1374,11 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
* IEEE80211_CONF_CHANGE_CHANNEL is handled.
*/
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
- if (conf->flags & IEEE80211_CONF_MONITOR) {
- if (ath9k_htc_add_monitor_interface(priv))
- ath_err(common, "Failed to set monitor mode\n");
- else
- ath_dbg(common, ATH_DBG_CONFIG,
- "HW opmode set to Monitor mode\n");
- }
+ if ((conf->flags & IEEE80211_CONF_MONITOR) &&
+ !priv->ah->is_monitoring)
+ ath9k_htc_add_monitor_interface(priv);
+ else if (priv->ah->is_monitoring)
+ ath9k_htc_remove_monitor_interface(priv);
}
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
@@ -1252,7 +1413,8 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_POWER) {
priv->txpowlimit = 2 * conf->power_level;
- ath_update_txpow(priv);
+ ath9k_cmn_update_txpow(priv->ah, priv->curtxpow,
+ priv->txpowlimit, &priv->curtxpow);
}
if (changed & IEEE80211_CONF_CHANGE_IDLE) {
@@ -1439,66 +1601,81 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
struct ath9k_htc_priv *priv = hw->priv;
struct ath_hw *ah = priv->ah;
struct ath_common *common = ath9k_hw_common(ah);
+ bool set_assoc;
mutex_lock(&priv->mutex);
ath9k_htc_ps_wakeup(priv);
+ /*
+ * Set the HW AID/BSSID only for the first station interface
+ * or in IBSS mode.
+ */
+ set_assoc = !!((priv->ah->opmode == NL80211_IFTYPE_ADHOC) ||
+ ((priv->ah->opmode == NL80211_IFTYPE_STATION) &&
+ (priv->num_sta_vif == 1)));
+
+
if (changed & BSS_CHANGED_ASSOC) {
- common->curaid = bss_conf->assoc ?
- bss_conf->aid : 0;
- ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
- bss_conf->assoc);
-
- if (bss_conf->assoc) {
- priv->op_flags |= OP_ASSOCIATED;
- ath_start_ani(priv);
- } else {
- priv->op_flags &= ~OP_ASSOCIATED;
- cancel_delayed_work_sync(&priv->ath9k_ani_work);
+ if (set_assoc) {
+ ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
+ bss_conf->assoc);
+
+ common->curaid = bss_conf->assoc ?
+ bss_conf->aid : 0;
+
+ if (bss_conf->assoc)
+ ath9k_htc_start_ani(priv);
+ else
+ ath9k_htc_stop_ani(priv);
}
}
if (changed & BSS_CHANGED_BSSID) {
- /* Set BSSID */
- memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
- ath9k_hw_write_associd(ah);
+ if (set_assoc) {
+ memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
+ ath9k_hw_write_associd(ah);
- ath_dbg(common, ATH_DBG_CONFIG,
- "BSSID: %pM aid: 0x%x\n",
- common->curbssid, common->curaid);
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "BSSID: %pM aid: 0x%x\n",
+ common->curbssid, common->curaid);
+ }
}
- if ((changed & BSS_CHANGED_BEACON_INT) ||
- (changed & BSS_CHANGED_BEACON) ||
- ((changed & BSS_CHANGED_BEACON_ENABLED) &&
- bss_conf->enable_beacon)) {
+ if ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon) {
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Beacon enabled for BSS: %pM\n", bss_conf->bssid);
priv->op_flags |= OP_ENABLE_BEACON;
ath9k_htc_beacon_config(priv, vif);
}
- if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
- !bss_conf->enable_beacon) {
- priv->op_flags &= ~OP_ENABLE_BEACON;
- ath9k_htc_beacon_config(priv, vif);
- }
-
- if (changed & BSS_CHANGED_ERP_PREAMBLE) {
- ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n",
- bss_conf->use_short_preamble);
- if (bss_conf->use_short_preamble)
- priv->op_flags |= OP_PREAMBLE_SHORT;
- else
- priv->op_flags &= ~OP_PREAMBLE_SHORT;
+ if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon) {
+ /*
+ * Disable SWBA interrupt only if there are no
+ * AP/IBSS interfaces.
+ */
+ if ((priv->num_ap_vif <= 1) || priv->num_ibss_vif) {
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Beacon disabled for BSS: %pM\n",
+ bss_conf->bssid);
+ priv->op_flags &= ~OP_ENABLE_BEACON;
+ ath9k_htc_beacon_config(priv, vif);
+ }
}
- if (changed & BSS_CHANGED_ERP_CTS_PROT) {
- ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n",
- bss_conf->use_cts_prot);
- if (bss_conf->use_cts_prot &&
- hw->conf.channel->band != IEEE80211_BAND_5GHZ)
- priv->op_flags |= OP_PROTECT_ENABLE;
- else
- priv->op_flags &= ~OP_PROTECT_ENABLE;
+ if (changed & BSS_CHANGED_BEACON_INT) {
+ /*
+ * Reset the HW TSF for the first AP interface.
+ */
+ if ((priv->ah->opmode == NL80211_IFTYPE_AP) &&
+ (priv->nvifs == 1) &&
+ (priv->num_ap_vif == 1) &&
+ (vif->type == NL80211_IFTYPE_AP)) {
+ priv->op_flags |= OP_TSF_RESET;
+ }
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Beacon interval changed for BSS: %pM\n",
+ bss_conf->bssid);
+ ath9k_htc_beacon_config(priv, vif);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -1557,12 +1734,14 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta,
- u16 tid, u16 *ssn)
+ u16 tid, u16 *ssn, u8 buf_size)
{
struct ath9k_htc_priv *priv = hw->priv;
struct ath9k_htc_sta *ista;
int ret = 0;
+ mutex_lock(&priv->mutex);
+
switch (action) {
case IEEE80211_AMPDU_RX_START:
break;
@@ -1587,6 +1766,8 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
ath_err(ath9k_hw_common(priv->ah), "Unknown AMPDU action\n");
}
+ mutex_unlock(&priv->mutex);
+
return ret;
}
@@ -1599,8 +1780,7 @@ static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
priv->op_flags |= OP_SCANNING;
spin_unlock_bh(&priv->beacon_lock);
cancel_work_sync(&priv->ps_work);
- if (priv->op_flags & OP_ASSOCIATED)
- cancel_delayed_work_sync(&priv->ath9k_ani_work);
+ ath9k_htc_stop_ani(priv);
mutex_unlock(&priv->mutex);
}
@@ -1609,14 +1789,11 @@ static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw)
struct ath9k_htc_priv *priv = hw->priv;
mutex_lock(&priv->mutex);
- ath9k_htc_ps_wakeup(priv);
spin_lock_bh(&priv->beacon_lock);
priv->op_flags &= ~OP_SCANNING;
spin_unlock_bh(&priv->beacon_lock);
- if (priv->op_flags & OP_ASSOCIATED) {
- ath9k_htc_beacon_config(priv, priv->vif);
- ath_start_ani(priv);
- }
+ ath9k_htc_ps_wakeup(priv);
+ ath9k_htc_vif_reconfig(priv);
ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex);
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 7a5ffca21958..4a4f27ba96af 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -84,7 +84,9 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_sta *sta = tx_info->control.sta;
+ struct ieee80211_vif *vif = tx_info->control.vif;
struct ath9k_htc_sta *ista;
+ struct ath9k_htc_vif *avp;
struct ath9k_htc_tx_ctl tx_ctl;
enum htc_endpoint_id epid;
u16 qnum;
@@ -95,18 +97,31 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
hdr = (struct ieee80211_hdr *) skb->data;
fc = hdr->frame_control;
- if (tx_info->control.vif &&
- (struct ath9k_htc_vif *) tx_info->control.vif->drv_priv)
- vif_idx = ((struct ath9k_htc_vif *)
- tx_info->control.vif->drv_priv)->index;
- else
- vif_idx = priv->nvifs;
+ /*
+ * Find out on which interface this packet has to be
+ * sent out.
+ */
+ if (vif) {
+ avp = (struct ath9k_htc_vif *) vif->drv_priv;
+ vif_idx = avp->index;
+ } else {
+ if (!priv->ah->is_monitoring) {
+ ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
+ "VIF is null, but no monitor interface !\n");
+ return -EINVAL;
+ }
+ vif_idx = priv->mon_vif_idx;
+ }
+
+ /*
+ * Find out which station this packet is destined for.
+ */
if (sta) {
ista = (struct ath9k_htc_sta *) sta->drv_priv;
sta_idx = ista->index;
} else {
- sta_idx = 0;
+ sta_idx = priv->vif_sta_pos[vif_idx];
}
memset(&tx_ctl, 0, sizeof(struct ath9k_htc_tx_ctl));
@@ -141,7 +156,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
/* CTS-to-self */
if (!(flags & ATH9K_HTC_TX_RTSCTS) &&
- (priv->op_flags & OP_PROTECT_ENABLE))
+ (vif && vif->bss_conf.use_cts_prot))
flags |= ATH9K_HTC_TX_CTSONLY;
tx_hdr.flags = cpu_to_be32(flags);
@@ -217,6 +232,7 @@ static bool ath9k_htc_check_tx_aggr(struct ath9k_htc_priv *priv,
void ath9k_tx_tasklet(unsigned long data)
{
struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
+ struct ieee80211_vif *vif;
struct ieee80211_sta *sta;
struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *tx_info;
@@ -228,12 +244,16 @@ void ath9k_tx_tasklet(unsigned long data)
hdr = (struct ieee80211_hdr *) skb->data;
fc = hdr->frame_control;
tx_info = IEEE80211_SKB_CB(skb);
+ vif = tx_info->control.vif;
memset(&tx_info->status, 0, sizeof(tx_info->status));
+ if (!vif)
+ goto send_mac80211;
+
rcu_read_lock();
- sta = ieee80211_find_sta(priv->vif, hdr->addr1);
+ sta = ieee80211_find_sta(vif, hdr->addr1);
if (!sta) {
rcu_read_unlock();
ieee80211_tx_status(priv->hw, skb);
@@ -263,6 +283,7 @@ void ath9k_tx_tasklet(unsigned long data)
rcu_read_unlock();
+ send_mac80211:
/* Send status to mac80211 */
ieee80211_tx_status(priv->hw, skb);
}
@@ -386,7 +407,7 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
*/
if (((ah->opmode != NL80211_IFTYPE_AP) &&
(priv->rxfilter & FIF_PROMISC_IN_BSS)) ||
- (ah->opmode == NL80211_IFTYPE_MONITOR))
+ ah->is_monitoring)
rfilt |= ATH9K_RX_FILTER_PROM;
if (priv->rxfilter & FIF_CONTROL)
@@ -398,8 +419,13 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
else
rfilt |= ATH9K_RX_FILTER_BEACON;
- if (conf_is_ht(&priv->hw->conf))
+ if (conf_is_ht(&priv->hw->conf)) {
rfilt |= ATH9K_RX_FILTER_COMP_BAR;
+ rfilt |= ATH9K_RX_FILTER_UNCOMP_BA_BAR;
+ }
+
+ if (priv->rxfilter & FIF_PSPOLL)
+ rfilt |= ATH9K_RX_FILTER_PSPOLL;
return rfilt;
@@ -412,20 +438,12 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
static void ath9k_htc_opmode_init(struct ath9k_htc_priv *priv)
{
struct ath_hw *ah = priv->ah;
- struct ath_common *common = ath9k_hw_common(ah);
-
u32 rfilt, mfilt[2];
/* configure rx filter */
rfilt = ath9k_htc_calcrxfilter(priv);
ath9k_hw_setrxfilter(ah, rfilt);
- /* configure bssid mask */
- ath_hw_setbssidmask(common);
-
- /* configure operational mode */
- ath9k_hw_setopmode(ah);
-
/* calculate and install multicast filter */
mfilt[0] = mfilt[1] = ~0;
ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
@@ -576,31 +594,29 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
ath9k_process_rate(hw, rx_status, rxbuf->rxstatus.rs_rate,
rxbuf->rxstatus.rs_flags);
- if (priv->op_flags & OP_ASSOCIATED) {
- if (rxbuf->rxstatus.rs_rssi != ATH9K_RSSI_BAD &&
- !rxbuf->rxstatus.rs_moreaggr)
- ATH_RSSI_LPF(priv->rx.last_rssi,
- rxbuf->rxstatus.rs_rssi);
+ if (rxbuf->rxstatus.rs_rssi != ATH9K_RSSI_BAD &&
+ !rxbuf->rxstatus.rs_moreaggr)
+ ATH_RSSI_LPF(priv->rx.last_rssi,
+ rxbuf->rxstatus.rs_rssi);
- last_rssi = priv->rx.last_rssi;
+ last_rssi = priv->rx.last_rssi;
- if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
- rxbuf->rxstatus.rs_rssi = ATH_EP_RND(last_rssi,
- ATH_RSSI_EP_MULTIPLIER);
+ if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
+ rxbuf->rxstatus.rs_rssi = ATH_EP_RND(last_rssi,
+ ATH_RSSI_EP_MULTIPLIER);
- if (rxbuf->rxstatus.rs_rssi < 0)
- rxbuf->rxstatus.rs_rssi = 0;
+ if (rxbuf->rxstatus.rs_rssi < 0)
+ rxbuf->rxstatus.rs_rssi = 0;
- if (ieee80211_is_beacon(fc))
- priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi;
- }
+ if (ieee80211_is_beacon(fc))
+ priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi;
rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp);
rx_status->band = hw->conf.channel->band;
rx_status->freq = hw->conf.channel->center_freq;
rx_status->signal = rxbuf->rxstatus.rs_rssi + ATH_DEFAULT_NOISE_FLOOR;
rx_status->antenna = rxbuf->rxstatus.rs_antenna;
- rx_status->flag |= RX_FLAG_TSFT;
+ rx_status->flag |= RX_FLAG_MACTIME_MPDU;
return true;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 9f01e50d5cda..9a3438174f86 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -495,6 +495,17 @@ static int __ath9k_hw_init(struct ath_hw *ah)
if (ah->hw_version.devid == AR5416_AR9100_DEVID)
ah->hw_version.macVersion = AR_SREV_VERSION_9100;
+ ath9k_hw_read_revisions(ah);
+
+ /*
+ * Read back AR_WA into a permanent copy and set bits 14 and 17.
+ * We need to do this to avoid RMW of this register. We cannot
+ * read the reg when chip is asleep.
+ */
+ ah->WARegVal = REG_READ(ah, AR_WA);
+ ah->WARegVal |= (AR_WA_D3_L1_DISABLE |
+ AR_WA_ASPM_TIMER_BASED_DISABLE);
+
if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
ath_err(common, "Couldn't reset chip\n");
return -EIO;
@@ -563,14 +574,6 @@ static int __ath9k_hw_init(struct ath_hw *ah)
ath9k_hw_init_mode_regs(ah);
- /*
- * Read back AR_WA into a permanent copy and set bits 14 and 17.
- * We need to do this to avoid RMW of this register. We cannot
- * read the reg when chip is asleep.
- */
- ah->WARegVal = REG_READ(ah, AR_WA);
- ah->WARegVal |= (AR_WA_D3_L1_DISABLE |
- AR_WA_ASPM_TIMER_BASED_DISABLE);
if (ah->is_pciexpress)
ath9k_hw_configpcipowersave(ah, 0, 0);
@@ -668,14 +671,51 @@ static void ath9k_hw_init_qos(struct ath_hw *ah)
REGWRITE_BUFFER_FLUSH(ah);
}
+unsigned long ar9003_get_pll_sqsum_dvc(struct ath_hw *ah)
+{
+ REG_WRITE(ah, PLL3, (REG_READ(ah, PLL3) & ~(PLL3_DO_MEAS_MASK)));
+ udelay(100);
+ REG_WRITE(ah, PLL3, (REG_READ(ah, PLL3) | PLL3_DO_MEAS_MASK));
+
+ while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0)
+ udelay(100);
+
+ return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3;
+}
+EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc);
+
+#define DPLL2_KD_VAL 0x3D
+#define DPLL2_KI_VAL 0x06
+#define DPLL3_PHASE_SHIFT_VAL 0x1
+
static void ath9k_hw_init_pll(struct ath_hw *ah,
struct ath9k_channel *chan)
{
u32 pll;
- if (AR_SREV_9485(ah))
+ if (AR_SREV_9485(ah)) {
+ REG_WRITE(ah, AR_RTC_PLL_CONTROL2, 0x886666);
+ REG_WRITE(ah, AR_CH0_DDR_DPLL2, 0x19e82f01);
+
+ REG_RMW_FIELD(ah, AR_CH0_DDR_DPLL3,
+ AR_CH0_DPLL3_PHASE_SHIFT, DPLL3_PHASE_SHIFT_VAL);
+
+ REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x1142c);
+ udelay(100);
+
REG_WRITE(ah, AR_RTC_PLL_CONTROL2, 0x886666);
+ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
+ AR_CH0_DPLL2_KD, DPLL2_KD_VAL);
+ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
+ AR_CH0_DPLL2_KI, DPLL2_KI_VAL);
+
+ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3,
+ AR_CH0_DPLL3_PHASE_SHIFT, DPLL3_PHASE_SHIFT_VAL);
+ REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x142c);
+ udelay(110);
+ }
+
pll = ath9k_hw_compute_pll_control(ah, chan);
REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
@@ -1060,7 +1100,6 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
REG_WRITE(ah, AR_RC, AR_RC_AHB);
REG_WRITE(ah, AR_RTC_RESET, 0);
- udelay(2);
REGWRITE_BUFFER_FLUSH(ah);
@@ -1082,8 +1121,6 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
return false;
}
- ath9k_hw_read_revisions(ah);
-
return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM);
}
@@ -1348,8 +1385,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_spur_mitigate_freq(ah, chan);
ah->eep_ops->set_board_values(ah, chan);
- ath9k_hw_set_operating_mode(ah, ah->opmode);
-
ENABLE_REGWRITE_BUFFER(ah);
REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr));
@@ -1367,6 +1402,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
REGWRITE_BUFFER_FLUSH(ah);
+ ath9k_hw_set_operating_mode(ah, ah->opmode);
+
r = ath9k_hw_rf_set_freq(ah, chan);
if (r)
return r;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index ea9fde670646..ef79f4c876ca 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -70,6 +70,9 @@
#define REG_READ(_ah, _reg) \
ath9k_hw_common(_ah)->ops->read((_ah), (_reg))
+#define REG_READ_MULTI(_ah, _addr, _val, _cnt) \
+ ath9k_hw_common(_ah)->ops->multi_read((_ah), (_addr), (_val), (_cnt))
+
#define ENABLE_REGWRITE_BUFFER(_ah) \
do { \
if (ath9k_hw_common(_ah)->ops->enable_write_buffer) \
@@ -926,6 +929,7 @@ void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
void ath9k_hw_reset_tsf(struct ath_hw *ah);
void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting);
void ath9k_hw_init_global_settings(struct ath_hw *ah);
+unsigned long ar9003_get_pll_sqsum_dvc(struct ath_hw *ah);
void ath9k_hw_set11nmac2040(struct ath_hw *ah);
void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 087a6a95edd5..79aec983279f 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -41,10 +41,6 @@ static int ath9k_btcoex_enable;
module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
-int ath9k_pm_qos_value = ATH9K_PM_QOS_DEFAULT_VALUE;
-module_param_named(pmqos, ath9k_pm_qos_value, int, S_IRUSR | S_IRGRP | S_IROTH);
-MODULE_PARM_DESC(pmqos, "User specified PM-QOS value");
-
bool is_ath9k_unloaded;
/* We use the hw_value as an index into our private channel structure */
@@ -144,6 +140,21 @@ static struct ieee80211_rate ath9k_legacy_rates[] = {
RATE(540, 0x0c, 0),
};
+#ifdef CONFIG_MAC80211_LEDS
+static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = {
+ { .throughput = 0 * 1024, .blink_time = 334 },
+ { .throughput = 1 * 1024, .blink_time = 260 },
+ { .throughput = 5 * 1024, .blink_time = 220 },
+ { .throughput = 10 * 1024, .blink_time = 190 },
+ { .throughput = 20 * 1024, .blink_time = 170 },
+ { .throughput = 50 * 1024, .blink_time = 150 },
+ { .throughput = 70 * 1024, .blink_time = 130 },
+ { .throughput = 100 * 1024, .blink_time = 110 },
+ { .throughput = 200 * 1024, .blink_time = 80 },
+ { .throughput = 300 * 1024, .blink_time = 50 },
+};
+#endif
+
static void ath9k_deinit_softc(struct ath_softc *sc);
/*
@@ -254,8 +265,7 @@ static int ath9k_reg_notifier(struct wiphy *wiphy,
struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
return ath_reg_notifier_apply(wiphy, request, reg);
@@ -442,9 +452,10 @@ static int ath9k_init_queues(struct ath_softc *sc)
sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
ath_cabq_update(sc);
- for (i = 0; i < WME_NUM_AC; i++)
+ for (i = 0; i < WME_NUM_AC; i++) {
sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
-
+ sc->tx.txq_map[i]->mac80211_qnum = i;
+ }
return 0;
}
@@ -516,10 +527,8 @@ static void ath9k_init_misc(struct ath_softc *sc)
sc->beacon.slottime = ATH9K_SLOT_TIME_9;
- for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
+ for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++)
sc->beacon.bslot[i] = NULL;
- sc->beacon.bslot_aphy[i] = NULL;
- }
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
@@ -537,6 +546,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
if (!ah)
return -ENOMEM;
+ ah->hw = sc->hw;
ah->hw_version.devid = devid;
ah->hw_version.subsysid = subsysid;
sc->sc_ah = ah;
@@ -554,10 +564,13 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
common->btcoex_enabled = ath9k_btcoex_enable == 1;
spin_lock_init(&common->cc_lock);
- spin_lock_init(&sc->wiphy_lock);
spin_lock_init(&sc->sc_serial_rw);
spin_lock_init(&sc->sc_pm_lock);
mutex_init(&sc->mutex);
+#ifdef CONFIG_ATH9K_DEBUGFS
+ spin_lock_init(&sc->nodes_lock);
+ INIT_LIST_HEAD(&sc->nodes);
+#endif
tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
(unsigned long)sc);
@@ -699,7 +712,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
const struct ath_bus_ops *bus_ops)
{
struct ieee80211_hw *hw = sc->hw;
- struct ath_wiphy *aphy = hw->priv;
struct ath_common *common;
struct ath_hw *ah;
int error = 0;
@@ -734,6 +746,13 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
ath9k_init_txpower_limits(sc);
+#ifdef CONFIG_MAC80211_LEDS
+ /* must be initialized before ieee80211_register_hw */
+ sc->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(sc->hw,
+ IEEE80211_TPT_LEDTRIG_FL_RADIO, ath9k_tpt_blink,
+ ARRAY_SIZE(ath9k_tpt_blink));
+#endif
+
/* Register with mac80211 */
error = ieee80211_register_hw(hw);
if (error)
@@ -754,17 +773,11 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
INIT_WORK(&sc->hw_check_work, ath_hw_check);
INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
- INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
- INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
- sc->wiphy_scheduler_int = msecs_to_jiffies(500);
- aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
+ sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
ath_init_leds(sc);
ath_start_rfkill_poll(sc);
- pm_qos_add_request(&sc->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_DEFAULT_VALUE);
-
return 0;
error_world:
@@ -812,7 +825,6 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
void ath9k_deinit_device(struct ath_softc *sc)
{
struct ieee80211_hw *hw = sc->hw;
- int i = 0;
ath9k_ps_wakeup(sc);
@@ -821,21 +833,10 @@ void ath9k_deinit_device(struct ath_softc *sc)
ath9k_ps_restore(sc);
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- struct ath_wiphy *aphy = sc->sec_wiphy[i];
- if (aphy == NULL)
- continue;
- sc->sec_wiphy[i] = NULL;
- ieee80211_unregister_hw(aphy->hw);
- ieee80211_free_hw(aphy->hw);
- }
-
ieee80211_unregister_hw(hw);
- pm_qos_remove_request(&sc->pm_qos_req);
ath_rx_cleanup(sc);
ath_tx_cleanup(sc);
ath9k_deinit_softc(sc);
- kfree(sc->sec_wiphy);
}
void ath_descdma_cleanup(struct ath_softc *sc,
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 180170d3ce25..5efc869d65ff 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -690,17 +690,23 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
+ /*
+ * Treat these errors as mutually exclusive to avoid spurious
+ * extra error reports from the hardware. If a CRC error is
+ * reported, then decryption and MIC errors are irrelevant,
+ * the frame is going to be dropped either way
+ */
if (ads.ds_rxstatus8 & AR_CRCErr)
rs->rs_status |= ATH9K_RXERR_CRC;
- if (ads.ds_rxstatus8 & AR_PHYErr) {
+ else if (ads.ds_rxstatus8 & AR_PHYErr) {
rs->rs_status |= ATH9K_RXERR_PHY;
phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
rs->rs_phyerr = phyerr;
- }
- if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
+ } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
rs->rs_status |= ATH9K_RXERR_DECRYPT;
- if (ads.ds_rxstatus8 & AR_MichaelErr)
+ else if (ads.ds_rxstatus8 & AR_MichaelErr)
rs->rs_status |= ATH9K_RXERR_MIC;
+
if (ads.ds_rxstatus8 & AR_KeyMiss)
rs->rs_status |= ATH9K_RXERR_DECRYPT;
}
@@ -885,7 +891,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
struct ath_common *common = ath9k_hw_common(ah);
if (!(ints & ATH9K_INT_GLOBAL))
- ath9k_hw_enable_interrupts(ah);
+ ath9k_hw_disable_interrupts(ah);
ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
@@ -963,7 +969,8 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
}
- ath9k_hw_enable_interrupts(ah);
+ if (ints & ATH9K_INT_GLOBAL)
+ ath9k_hw_enable_interrupts(ah);
return;
}
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 7512f97e8f49..04d58ae923bb 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -639,6 +639,8 @@ enum ath9k_rx_filter {
ATH9K_RX_FILTER_PHYERR = 0x00000100,
ATH9K_RX_FILTER_MYBEACON = 0x00000200,
ATH9K_RX_FILTER_COMP_BAR = 0x00000400,
+ ATH9K_RX_FILTER_COMP_BA = 0x00000800,
+ ATH9K_RX_FILTER_UNCOMP_BA_BAR = 0x00001000,
ATH9K_RX_FILTER_PSPOLL = 0x00004000,
ATH9K_RX_FILTER_PHYRADAR = 0x00002000,
ATH9K_RX_FILTER_MCAST_BCAST_ALL = 0x00008000,
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index da5c64597c1f..2e228aada1a9 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -15,20 +15,10 @@
*/
#include <linux/nl80211.h>
+#include <linux/delay.h>
#include "ath9k.h"
#include "btcoex.h"
-static void ath_update_txpow(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
-
- if (sc->curtxpow != sc->config.txpowlimit) {
- ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false);
- /* read back in case value is clamped */
- sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
- }
-}
-
static u8 parse_mpdudensity(u8 mpdudensity)
{
/*
@@ -64,17 +54,19 @@ static u8 parse_mpdudensity(u8 mpdudensity)
}
}
-static struct ath9k_channel *ath_get_curchannel(struct ath_softc *sc,
- struct ieee80211_hw *hw)
+static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq)
{
- struct ieee80211_channel *curchan = hw->conf.channel;
- struct ath9k_channel *channel;
- u8 chan_idx;
+ bool pending = false;
+
+ spin_lock_bh(&txq->axq_lock);
- chan_idx = curchan->hw_value;
- channel = &sc->sc_ah->channels[chan_idx];
- ath9k_update_ichannel(sc, hw, channel);
- return channel;
+ if (txq->axq_depth || !list_empty(&txq->axq_acq))
+ pending = true;
+ else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ pending = !list_empty(&txq->txq_fifo_pending);
+
+ spin_unlock_bh(&txq->axq_lock);
+ return pending;
}
bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
@@ -177,7 +169,12 @@ static void ath_update_survey_nf(struct ath_softc *sc, int channel)
}
}
-static void ath_update_survey_stats(struct ath_softc *sc)
+/*
+ * Updates the survey statistics and returns the busy time since last
+ * update in %, if the measurement duration was long enough for the
+ * result to be useful, -1 otherwise.
+ */
+static int ath_update_survey_stats(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
@@ -185,9 +182,10 @@ static void ath_update_survey_stats(struct ath_softc *sc)
struct survey_info *survey = &sc->survey[pos];
struct ath_cycle_counters *cc = &common->cc_survey;
unsigned int div = common->clockrate * 1000;
+ int ret = 0;
if (!ah->curchan)
- return;
+ return -1;
if (ah->power_mode == ATH9K_PM_AWAKE)
ath_hw_cycle_counters_update(common);
@@ -202,9 +200,18 @@ static void ath_update_survey_stats(struct ath_softc *sc)
survey->channel_time_rx += cc->rx_frame / div;
survey->channel_time_tx += cc->tx_frame / div;
}
+
+ if (cc->cycles < div)
+ return -1;
+
+ if (cc->cycles > 0)
+ ret = cc->rx_busy * 100 / cc->cycles;
+
memset(cc, 0, sizeof(*cc));
ath_update_survey_nf(sc, pos);
+
+ return ret;
}
/*
@@ -215,7 +222,6 @@ static void ath_update_survey_stats(struct ath_softc *sc)
int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
struct ath9k_channel *hchan)
{
- struct ath_wiphy *aphy = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_conf *conf = &common->hw->conf;
@@ -227,10 +233,13 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
if (sc->sc_flags & SC_OP_INVALID)
return -EIO;
+ sc->hw_busy_count = 0;
+
del_timer_sync(&common->ani.timer);
cancel_work_sync(&sc->paprd_work);
cancel_work_sync(&sc->hw_check_work);
cancel_delayed_work_sync(&sc->tx_complete_work);
+ cancel_delayed_work_sync(&sc->hw_pll_work);
ath9k_ps_wakeup(sc);
@@ -251,6 +260,9 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
if (!ath_stoprecv(sc))
stopped = false;
+ if (!ath9k_hw_check_alive(ah))
+ stopped = false;
+
/* XXX: do not flush receive queue here. We don't want
* to flush data frames already in queue because of
* changing channel. */
@@ -259,7 +271,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
fastcc = false;
if (!(sc->sc_flags & SC_OP_OFFCHANNEL))
- caldata = &aphy->caldata;
+ caldata = &sc->caldata;
ath_dbg(common, ATH_DBG_CONFIG,
"(%u MHz) -> (%u MHz), conf_is_ht40: %d fastcc: %d\n",
@@ -281,17 +293,21 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
goto ps_restore;
}
- ath_update_txpow(sc);
+ ath9k_cmn_update_txpow(ah, sc->curtxpow,
+ sc->config.txpowlimit, &sc->curtxpow);
ath9k_hw_set_interrupts(ah, ah->imask);
if (!(sc->sc_flags & (SC_OP_OFFCHANNEL))) {
if (sc->sc_flags & SC_OP_BEACONS)
ath_beacon_config(sc, NULL);
ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
+ ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/2);
ath_start_ani(common);
}
ps_restore:
+ ieee80211_wake_queues(hw);
+
spin_unlock_bh(&sc->sc_pcu_lock);
ath9k_ps_restore(sc);
@@ -549,6 +565,12 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
struct ath_hw *ah = sc->sc_ah;
an = (struct ath_node *)sta->drv_priv;
+#ifdef CONFIG_ATH9K_DEBUGFS
+ spin_lock(&sc->nodes_lock);
+ list_add(&an->list, &sc->nodes);
+ spin_unlock(&sc->nodes_lock);
+ an->sta = sta;
+#endif
if ((ah->caps.hw_caps) & ATH9K_HW_CAP_APM)
sc->sc_flags |= SC_OP_ENABLE_APM;
@@ -564,6 +586,13 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
{
struct ath_node *an = (struct ath_node *)sta->drv_priv;
+#ifdef CONFIG_ATH9K_DEBUGFS
+ spin_lock(&sc->nodes_lock);
+ list_del(&an->list);
+ spin_unlock(&sc->nodes_lock);
+ an->sta = NULL;
+#endif
+
if (sc->sc_flags & SC_OP_TXAGGR)
ath_tx_node_cleanup(sc, an);
}
@@ -571,17 +600,25 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
void ath_hw_check(struct work_struct *work)
{
struct ath_softc *sc = container_of(work, struct ath_softc, hw_check_work);
- int i;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ unsigned long flags;
+ int busy;
ath9k_ps_wakeup(sc);
+ if (ath9k_hw_check_alive(sc->sc_ah))
+ goto out;
- for (i = 0; i < 3; i++) {
- if (ath9k_hw_check_alive(sc->sc_ah))
- goto out;
+ spin_lock_irqsave(&common->cc_lock, flags);
+ busy = ath_update_survey_stats(sc);
+ spin_unlock_irqrestore(&common->cc_lock, flags);
- msleep(1);
- }
- ath_reset(sc, true);
+ ath_dbg(common, ATH_DBG_RESET, "Possible baseband hang, "
+ "busy=%d (try %d)\n", busy, sc->hw_busy_count + 1);
+ if (busy >= 99) {
+ if (++sc->hw_busy_count >= 3)
+ ath_reset(sc, true);
+ } else if (busy >= 0)
+ sc->hw_busy_count = 0;
out:
ath9k_ps_restore(sc);
@@ -604,7 +641,15 @@ void ath9k_tasklet(unsigned long data)
ath9k_ps_wakeup(sc);
spin_lock(&sc->sc_pcu_lock);
- if (!ath9k_hw_check_alive(ah))
+ /*
+ * Only run the baseband hang check if beacons stop working in AP or
+ * IBSS mode, because it has a high false positive rate. For station
+ * mode it should not be necessary, since the upper layers will detect
+ * this through a beacon miss automatically and the following channel
+ * change will trigger a hardware reset anyway
+ */
+ if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0 &&
+ !ath9k_hw_check_alive(ah))
ieee80211_queue_work(sc->hw, &sc->hw_check_work);
if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
@@ -783,54 +828,11 @@ chip_reset:
#undef SCHED_INTR
}
-static u32 ath_get_extchanmode(struct ath_softc *sc,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type)
-{
- u32 chanmode = 0;
-
- switch (chan->band) {
- case IEEE80211_BAND_2GHZ:
- switch(channel_type) {
- case NL80211_CHAN_NO_HT:
- case NL80211_CHAN_HT20:
- chanmode = CHANNEL_G_HT20;
- break;
- case NL80211_CHAN_HT40PLUS:
- chanmode = CHANNEL_G_HT40PLUS;
- break;
- case NL80211_CHAN_HT40MINUS:
- chanmode = CHANNEL_G_HT40MINUS;
- break;
- }
- break;
- case IEEE80211_BAND_5GHZ:
- switch(channel_type) {
- case NL80211_CHAN_NO_HT:
- case NL80211_CHAN_HT20:
- chanmode = CHANNEL_A_HT20;
- break;
- case NL80211_CHAN_HT40PLUS:
- chanmode = CHANNEL_A_HT40PLUS;
- break;
- case NL80211_CHAN_HT40MINUS:
- chanmode = CHANNEL_A_HT40MINUS;
- break;
- }
- break;
- default:
- break;
- }
-
- return chanmode;
-}
-
static void ath9k_bss_assoc_info(struct ath_softc *sc,
struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf)
{
- struct ath_wiphy *aphy = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
@@ -854,7 +856,7 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
ath_beacon_config(sc, vif);
/* Reset rssi stats */
- aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
+ sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
sc->sc_flags |= SC_OP_ANI_RUN;
@@ -881,7 +883,7 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
ath9k_hw_configpcipowersave(ah, 0, 0);
if (!ah->curchan)
- ah->curchan = ath_get_curchannel(sc, sc->hw);
+ ah->curchan = ath9k_cmn_get_curchannel(sc->hw, ah);
r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
if (r) {
@@ -890,7 +892,8 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
channel->center_freq, r);
}
- ath_update_txpow(sc);
+ ath9k_cmn_update_txpow(ah, sc->curtxpow,
+ sc->config.txpowlimit, &sc->curtxpow);
if (ath_startrecv(sc) != 0) {
ath_err(common, "Unable to restart recv logic\n");
goto out;
@@ -907,6 +910,8 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
ath9k_hw_set_gpio(ah, ah->led_pin, 0);
ieee80211_wake_queues(hw);
+ ieee80211_queue_delayed_work(hw, &sc->hw_pll_work, HZ/2);
+
out:
spin_unlock_bh(&sc->sc_pcu_lock);
@@ -920,6 +925,8 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
int r;
ath9k_ps_wakeup(sc);
+ cancel_delayed_work_sync(&sc->hw_pll_work);
+
spin_lock_bh(&sc->sc_pcu_lock);
ieee80211_stop_queues(hw);
@@ -942,7 +949,7 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
ath_flushrecv(sc); /* flush recv queue */
if (!ah->curchan)
- ah->curchan = ath_get_curchannel(sc, hw);
+ ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
if (r) {
@@ -966,6 +973,8 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
struct ieee80211_hw *hw = sc->hw;
int r;
+ sc->hw_busy_count = 0;
+
/* Stop ANI */
del_timer_sync(&common->ani.timer);
@@ -993,7 +1002,8 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
* that changes the channel so update any state that
* might change as a result.
*/
- ath_update_txpow(sc);
+ ath9k_cmn_update_txpow(ah, sc->curtxpow,
+ sc->config.txpowlimit, &sc->curtxpow);
if ((sc->sc_flags & SC_OP_BEACONS) || !(sc->sc_flags & (SC_OP_OFFCHANNEL)))
ath_beacon_config(sc, NULL); /* restart beacons */
@@ -1021,38 +1031,13 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
return r;
}
-/* XXX: Remove me once we don't depend on ath9k_channel for all
- * this redundant data */
-void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
- struct ath9k_channel *ichan)
-{
- struct ieee80211_channel *chan = hw->conf.channel;
- struct ieee80211_conf *conf = &hw->conf;
-
- ichan->channel = chan->center_freq;
- ichan->chan = chan;
-
- if (chan->band == IEEE80211_BAND_2GHZ) {
- ichan->chanmode = CHANNEL_G;
- ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM | CHANNEL_G;
- } else {
- ichan->chanmode = CHANNEL_A;
- ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
- }
-
- if (conf_is_ht(conf))
- ichan->chanmode = ath_get_extchanmode(sc, chan,
- conf->channel_type);
-}
-
/**********************/
/* mac80211 callbacks */
/**********************/
static int ath9k_start(struct ieee80211_hw *hw)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_channel *curchan = hw->conf.channel;
@@ -1065,32 +1050,10 @@ static int ath9k_start(struct ieee80211_hw *hw)
mutex_lock(&sc->mutex);
- if (ath9k_wiphy_started(sc)) {
- if (sc->chan_idx == curchan->hw_value) {
- /*
- * Already on the operational channel, the new wiphy
- * can be marked active.
- */
- aphy->state = ATH_WIPHY_ACTIVE;
- ieee80211_wake_queues(hw);
- } else {
- /*
- * Another wiphy is on another channel, start the new
- * wiphy in paused state.
- */
- aphy->state = ATH_WIPHY_PAUSED;
- ieee80211_stop_queues(hw);
- }
- mutex_unlock(&sc->mutex);
- return 0;
- }
- aphy->state = ATH_WIPHY_ACTIVE;
-
/* setup initial channel */
-
sc->chan_idx = curchan->hw_value;
- init_channel = ath_get_curchannel(sc, hw);
+ init_channel = ath9k_cmn_get_curchannel(hw, ah);
/* Reset SERDES registers */
ath9k_hw_configpcipowersave(ah, 0, 0);
@@ -1116,7 +1079,8 @@ static int ath9k_start(struct ieee80211_hw *hw)
* This is needed only to setup initial state
* but it's best done after a reset.
*/
- ath_update_txpow(sc);
+ ath9k_cmn_update_txpow(ah, sc->curtxpow,
+ sc->config.txpowlimit, &sc->curtxpow);
/*
* Setup the hardware after reset:
@@ -1173,12 +1137,6 @@ static int ath9k_start(struct ieee80211_hw *hw)
ath9k_btcoex_timer_resume(sc);
}
- /* User has the option to provide pm-qos value as a module
- * parameter rather than using the default value of
- * 'ATH9K_PM_QOS_DEFAULT_VALUE'.
- */
- pm_qos_update_request(&sc->pm_qos_req, ath9k_pm_qos_value);
-
if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en)
common->bus_ops->extn_synch_en(common);
@@ -1188,22 +1146,13 @@ mutex_unlock:
return r;
}
-static int ath9k_tx(struct ieee80211_hw *hw,
- struct sk_buff *skb)
+static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_tx_control txctl;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) {
- ath_dbg(common, ATH_DBG_XMIT,
- "ath9k: %s: TX in unexpected wiphy state %d\n",
- wiphy_name(hw->wiphy), aphy->state);
- goto exit;
- }
-
if (sc->ps_enabled) {
/*
* mac80211 does not set PM field for normal data frames, so we
@@ -1254,52 +1203,30 @@ static int ath9k_tx(struct ieee80211_hw *hw,
goto exit;
}
- return 0;
+ return;
exit:
dev_kfree_skb_any(skb);
- return 0;
}
static void ath9k_stop(struct ieee80211_hw *hw)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- int i;
mutex_lock(&sc->mutex);
- aphy->state = ATH_WIPHY_INACTIVE;
-
- if (led_blink)
- cancel_delayed_work_sync(&sc->ath_led_blink_work);
-
cancel_delayed_work_sync(&sc->tx_complete_work);
+ cancel_delayed_work_sync(&sc->hw_pll_work);
cancel_work_sync(&sc->paprd_work);
cancel_work_sync(&sc->hw_check_work);
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- if (sc->sec_wiphy[i])
- break;
- }
-
- if (i == sc->num_sec_wiphy) {
- cancel_delayed_work_sync(&sc->wiphy_work);
- cancel_work_sync(&sc->chan_work);
- }
-
if (sc->sc_flags & SC_OP_INVALID) {
ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
mutex_unlock(&sc->mutex);
return;
}
- if (ath9k_wiphy_started(sc)) {
- mutex_unlock(&sc->mutex);
- return; /* another wiphy still in use */
- }
-
/* Ensure HW is awake when we try to shut it down. */
ath9k_ps_wakeup(sc);
@@ -1325,6 +1252,11 @@ static void ath9k_stop(struct ieee80211_hw *hw)
} else
sc->rx.rxlink = NULL;
+ if (sc->rx.frag) {
+ dev_kfree_skb_any(sc->rx.frag);
+ sc->rx.frag = NULL;
+ }
+
/* disable HAL and put h/w to sleep */
ath9k_hw_disable(ah);
ath9k_hw_configpcipowersave(ah, 1, 1);
@@ -1340,124 +1272,234 @@ static void ath9k_stop(struct ieee80211_hw *hw)
ath9k_ps_restore(sc);
sc->ps_idle = true;
- ath9k_set_wiphy_idle(aphy, true);
ath_radio_disable(sc, hw);
sc->sc_flags |= SC_OP_INVALID;
- pm_qos_update_request(&sc->pm_qos_req, PM_QOS_DEFAULT_VALUE);
-
mutex_unlock(&sc->mutex);
ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n");
}
-static int ath9k_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+bool ath9k_uses_beacons(int type)
+{
+ switch (type) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_MESH_POINT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void ath9k_reclaim_beacon(struct ath_softc *sc,
+ struct ieee80211_vif *vif)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
struct ath_vif *avp = (void *)vif->drv_priv;
- enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED;
- int ret = 0;
- mutex_lock(&sc->mutex);
+ ath9k_set_beaconing_status(sc, false);
+ ath_beacon_return(sc, avp);
+ ath9k_set_beaconing_status(sc, true);
+ sc->sc_flags &= ~SC_OP_BEACONS;
+}
+
+static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct ath9k_vif_iter_data *iter_data = data;
+ int i;
+
+ if (iter_data->hw_macaddr)
+ for (i = 0; i < ETH_ALEN; i++)
+ iter_data->mask[i] &=
+ ~(iter_data->hw_macaddr[i] ^ mac[i]);
switch (vif->type) {
- case NL80211_IFTYPE_STATION:
- ic_opmode = NL80211_IFTYPE_STATION;
+ case NL80211_IFTYPE_AP:
+ iter_data->naps++;
break;
- case NL80211_IFTYPE_WDS:
- ic_opmode = NL80211_IFTYPE_WDS;
+ case NL80211_IFTYPE_STATION:
+ iter_data->nstations++;
break;
case NL80211_IFTYPE_ADHOC:
- case NL80211_IFTYPE_AP:
+ iter_data->nadhocs++;
+ break;
case NL80211_IFTYPE_MESH_POINT:
- if (sc->nbcnvifs >= ATH_BCBUF) {
- ret = -ENOBUFS;
- goto out;
- }
- ic_opmode = vif->type;
+ iter_data->nmeshes++;
+ break;
+ case NL80211_IFTYPE_WDS:
+ iter_data->nwds++;
break;
default:
- ath_err(common, "Interface type %d not yet supported\n",
- vif->type);
- ret = -EOPNOTSUPP;
- goto out;
+ iter_data->nothers++;
+ break;
}
+}
- ath_dbg(common, ATH_DBG_CONFIG,
- "Attach a VIF of type: %d\n", ic_opmode);
+/* Called with sc->mutex held. */
+void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ath9k_vif_iter_data *iter_data)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
- /* Set the VIF opmode */
- avp->av_opmode = ic_opmode;
- avp->av_bslot = -1;
+ /*
+ * Use the hardware MAC address as reference, the hardware uses it
+ * together with the BSSID mask when matching addresses.
+ */
+ memset(iter_data, 0, sizeof(*iter_data));
+ iter_data->hw_macaddr = common->macaddr;
+ memset(&iter_data->mask, 0xff, ETH_ALEN);
- sc->nvifs++;
+ if (vif)
+ ath9k_vif_iter(iter_data, vif->addr, vif);
- ath9k_set_bssid_mask(hw, vif);
+ /* Get list of all active MAC addresses */
+ ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter,
+ iter_data);
+}
+
+/* Called with sc->mutex held. */
+static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_vif_iter_data iter_data;
- if (sc->nvifs > 1)
- goto out; /* skip global settings for secondary vif */
+ ath9k_calculate_iter_data(hw, vif, &iter_data);
- if (ic_opmode == NL80211_IFTYPE_AP) {
+ ath9k_ps_wakeup(sc);
+ /* Set BSSID mask. */
+ memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
+ ath_hw_setbssidmask(common);
+
+ /* Set op-mode & TSF */
+ if (iter_data.naps > 0) {
ath9k_hw_set_tsfadjust(ah, 1);
sc->sc_flags |= SC_OP_TSF_RESET;
- }
+ ah->opmode = NL80211_IFTYPE_AP;
+ } else {
+ ath9k_hw_set_tsfadjust(ah, 0);
+ sc->sc_flags &= ~SC_OP_TSF_RESET;
- /* Set the device opmode */
- ah->opmode = ic_opmode;
+ if (iter_data.nwds + iter_data.nmeshes)
+ ah->opmode = NL80211_IFTYPE_AP;
+ else if (iter_data.nadhocs)
+ ah->opmode = NL80211_IFTYPE_ADHOC;
+ else
+ ah->opmode = NL80211_IFTYPE_STATION;
+ }
/*
* Enable MIB interrupts when there are hardware phy counters.
- * Note we only do this (at the moment) for station mode.
*/
- if ((vif->type == NL80211_IFTYPE_STATION) ||
- (vif->type == NL80211_IFTYPE_ADHOC) ||
- (vif->type == NL80211_IFTYPE_MESH_POINT)) {
+ if ((iter_data.nstations + iter_data.nadhocs + iter_data.nmeshes) > 0) {
if (ah->config.enable_ani)
ah->imask |= ATH9K_INT_MIB;
ah->imask |= ATH9K_INT_TSFOOR;
+ } else {
+ ah->imask &= ~ATH9K_INT_MIB;
+ ah->imask &= ~ATH9K_INT_TSFOOR;
}
ath9k_hw_set_interrupts(ah, ah->imask);
+ ath9k_ps_restore(sc);
- if (vif->type == NL80211_IFTYPE_AP ||
- vif->type == NL80211_IFTYPE_ADHOC) {
+ /* Set up ANI */
+ if ((iter_data.naps + iter_data.nadhocs) > 0) {
sc->sc_flags |= SC_OP_ANI_RUN;
ath_start_ani(common);
+ } else {
+ sc->sc_flags &= ~SC_OP_ANI_RUN;
+ del_timer_sync(&common->ani.timer);
}
+}
-out:
- mutex_unlock(&sc->mutex);
- return ret;
+/* Called with sc->mutex held, vif counts set up properly. */
+static void ath9k_do_vif_add_setup(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath_softc *sc = hw->priv;
+
+ ath9k_calculate_summary_state(hw, vif);
+
+ if (ath9k_uses_beacons(vif->type)) {
+ int error;
+ /* This may fail because upper levels do not have beacons
+ * properly configured yet. That's OK, we assume it
+ * will be properly configured and then we will be notified
+ * in the info_changed method and set up beacons properly
+ * there.
+ */
+ ath9k_set_beaconing_status(sc, false);
+ error = ath_beacon_alloc(sc, vif);
+ if (!error)
+ ath_beacon_config(sc, vif);
+ ath9k_set_beaconing_status(sc, true);
+ }
}
-static void ath9k_reclaim_beacon(struct ath_softc *sc,
- struct ieee80211_vif *vif)
+
+static int ath9k_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
struct ath_vif *avp = (void *)vif->drv_priv;
+ int ret = 0;
- /* Disable SWBA interrupt */
- sc->sc_ah->imask &= ~ATH9K_INT_SWBA;
- ath9k_ps_wakeup(sc);
- ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask);
- ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
- tasklet_kill(&sc->bcon_tasklet);
- ath9k_ps_restore(sc);
+ mutex_lock(&sc->mutex);
- ath_beacon_return(sc, avp);
- sc->sc_flags &= ~SC_OP_BEACONS;
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_WDS:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_MESH_POINT:
+ break;
+ default:
+ ath_err(common, "Interface type %d not yet supported\n",
+ vif->type);
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
- if (sc->nbcnvifs > 0) {
- /* Re-enable beaconing */
- sc->sc_ah->imask |= ATH9K_INT_SWBA;
- ath9k_ps_wakeup(sc);
- ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask);
- ath9k_ps_restore(sc);
+ if (ath9k_uses_beacons(vif->type)) {
+ if (sc->nbcnvifs >= ATH_BCBUF) {
+ ath_err(common, "Not enough beacon buffers when adding"
+ " new interface of type: %i\n",
+ vif->type);
+ ret = -ENOBUFS;
+ goto out;
+ }
+ }
+
+ if ((vif->type == NL80211_IFTYPE_ADHOC) &&
+ sc->nvifs > 0) {
+ ath_err(common, "Cannot create ADHOC interface when other"
+ " interfaces already exist.\n");
+ ret = -EINVAL;
+ goto out;
}
+
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Attach a VIF of type: %d\n", vif->type);
+
+ /* Set the VIF opmode */
+ avp->av_opmode = vif->type;
+ avp->av_bslot = -1;
+
+ sc->nvifs++;
+
+ ath9k_do_vif_add_setup(hw, vif);
+out:
+ mutex_unlock(&sc->mutex);
+ return ret;
}
static int ath9k_change_interface(struct ieee80211_hw *hw,
@@ -1465,40 +1507,40 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
enum nl80211_iftype new_type,
bool p2p)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
int ret = 0;
ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n");
mutex_lock(&sc->mutex);
- switch (new_type) {
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_ADHOC:
+ /* See if new interface type is valid. */
+ if ((new_type == NL80211_IFTYPE_ADHOC) &&
+ (sc->nvifs > 1)) {
+ ath_err(common, "When using ADHOC, it must be the only"
+ " interface.\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (ath9k_uses_beacons(new_type) &&
+ !ath9k_uses_beacons(vif->type)) {
if (sc->nbcnvifs >= ATH_BCBUF) {
ath_err(common, "No beacon slot available\n");
ret = -ENOBUFS;
goto out;
}
- break;
- case NL80211_IFTYPE_STATION:
- /* Stop ANI */
- sc->sc_flags &= ~SC_OP_ANI_RUN;
- del_timer_sync(&common->ani.timer);
- if ((vif->type == NL80211_IFTYPE_AP) ||
- (vif->type == NL80211_IFTYPE_ADHOC))
- ath9k_reclaim_beacon(sc, vif);
- break;
- default:
- ath_err(common, "Interface type %d not yet supported\n",
- vif->type);
- ret = -ENOTSUPP;
- goto out;
}
+
+ /* Clean up old vif stuff */
+ if (ath9k_uses_beacons(vif->type))
+ ath9k_reclaim_beacon(sc, vif);
+
+ /* Add new settings */
vif->type = new_type;
vif->p2p = p2p;
+ ath9k_do_vif_add_setup(hw, vif);
out:
mutex_unlock(&sc->mutex);
return ret;
@@ -1507,25 +1549,20 @@ out:
static void ath9k_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n");
mutex_lock(&sc->mutex);
- /* Stop ANI */
- sc->sc_flags &= ~SC_OP_ANI_RUN;
- del_timer_sync(&common->ani.timer);
+ sc->nvifs--;
/* Reclaim beacon resources */
- if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
- (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
- (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT))
+ if (ath9k_uses_beacons(vif->type))
ath9k_reclaim_beacon(sc, vif);
- sc->nvifs--;
+ ath9k_calculate_summary_state(hw, NULL);
mutex_unlock(&sc->mutex);
}
@@ -1566,12 +1603,11 @@ static void ath9k_disable_ps(struct ath_softc *sc)
static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_conf *conf = &hw->conf;
- bool disable_radio;
+ bool disable_radio = false;
mutex_lock(&sc->mutex);
@@ -1582,29 +1618,13 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
* the end.
*/
if (changed & IEEE80211_CONF_CHANGE_IDLE) {
- bool enable_radio;
- bool all_wiphys_idle;
- bool idle = !!(conf->flags & IEEE80211_CONF_IDLE);
-
- spin_lock_bh(&sc->wiphy_lock);
- all_wiphys_idle = ath9k_all_wiphys_idle(sc);
- ath9k_set_wiphy_idle(aphy, idle);
-
- enable_radio = (!idle && all_wiphys_idle);
-
- /*
- * After we unlock here its possible another wiphy
- * can be re-renabled so to account for that we will
- * only disable the radio toward the end of this routine
- * if by then all wiphys are still idle.
- */
- spin_unlock_bh(&sc->wiphy_lock);
-
- if (enable_radio) {
- sc->ps_idle = false;
+ sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
+ if (!sc->ps_idle) {
ath_radio_enable(sc, hw);
ath_dbg(common, ATH_DBG_CONFIG,
"not-idle: enabling radio\n");
+ } else {
+ disable_radio = true;
}
}
@@ -1645,29 +1665,17 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
if (ah->curchan)
old_pos = ah->curchan - &ah->channels[0];
- aphy->chan_idx = pos;
- aphy->chan_is_ht = conf_is_ht(conf);
if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
sc->sc_flags |= SC_OP_OFFCHANNEL;
else
sc->sc_flags &= ~SC_OP_OFFCHANNEL;
- if (aphy->state == ATH_WIPHY_SCAN ||
- aphy->state == ATH_WIPHY_ACTIVE)
- ath9k_wiphy_pause_all_forced(sc, aphy);
- else {
- /*
- * Do not change operational channel based on a paused
- * wiphy changes.
- */
- goto skip_chan_change;
- }
-
- ath_dbg(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
- curchan->center_freq);
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Set channel: %d MHz type: %d\n",
+ curchan->center_freq, conf->channel_type);
- /* XXX: remove me eventualy */
- ath9k_update_ichannel(sc, hw, &sc->sc_ah->channels[pos]);
+ ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
+ curchan, conf->channel_type);
/* update survey stats for the old channel before switching */
spin_lock_irqsave(&common->cc_lock, flags);
@@ -1709,21 +1717,18 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
ath_update_survey_nf(sc, old_pos);
}
-skip_chan_change:
if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Set power: %d\n", conf->power_level);
sc->config.txpowlimit = 2 * conf->power_level;
ath9k_ps_wakeup(sc);
- ath_update_txpow(sc);
+ ath9k_cmn_update_txpow(ah, sc->curtxpow,
+ sc->config.txpowlimit, &sc->curtxpow);
ath9k_ps_restore(sc);
}
- spin_lock_bh(&sc->wiphy_lock);
- disable_radio = ath9k_all_wiphys_idle(sc);
- spin_unlock_bh(&sc->wiphy_lock);
-
if (disable_radio) {
ath_dbg(common, ATH_DBG_CONFIG, "idle: disabling radio\n");
- sc->ps_idle = true;
ath_radio_disable(sc, hw);
}
@@ -1748,8 +1753,7 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw,
unsigned int *total_flags,
u64 multicast)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
u32 rfilt;
changed_flags &= SUPPORTED_FILTERS;
@@ -1769,8 +1773,7 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
ath_node_attach(sc, sta);
@@ -1781,8 +1784,7 @@ static int ath9k_sta_remove(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
ath_node_detach(sc, sta);
@@ -1792,8 +1794,7 @@ static int ath9k_sta_remove(struct ieee80211_hw *hw,
static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_txq *txq;
struct ath9k_tx_queue_info qi;
@@ -1837,8 +1838,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
int ret = 0;
@@ -1882,8 +1882,8 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
+ struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath_vif *avp = (void *)vif->drv_priv;
@@ -1912,10 +1912,11 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
/* Enable transmission of beacons (AP, IBSS, MESH) */
if ((changed & BSS_CHANGED_BEACON) ||
((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon)) {
- ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
- error = ath_beacon_alloc(aphy, vif);
+ ath9k_set_beaconing_status(sc, false);
+ error = ath_beacon_alloc(sc, vif);
if (!error)
ath_beacon_config(sc, vif);
+ ath9k_set_beaconing_status(sc, true);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -1938,21 +1939,26 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
}
/* Disable transmission of beacons */
- if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon)
- ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
+ if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
+ !bss_conf->enable_beacon) {
+ ath9k_set_beaconing_status(sc, false);
+ avp->is_bslot_active = false;
+ ath9k_set_beaconing_status(sc, true);
+ }
if (changed & BSS_CHANGED_BEACON_INT) {
- sc->beacon_interval = bss_conf->beacon_int;
+ cur_conf->beacon_interval = bss_conf->beacon_int;
/*
* In case of AP mode, the HW TSF has to be reset
* when the beacon interval changes.
*/
if (vif->type == NL80211_IFTYPE_AP) {
sc->sc_flags |= SC_OP_TSF_RESET;
- ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
- error = ath_beacon_alloc(aphy, vif);
+ ath9k_set_beaconing_status(sc, false);
+ error = ath_beacon_alloc(sc, vif);
if (!error)
ath_beacon_config(sc, vif);
+ ath9k_set_beaconing_status(sc, true);
} else {
ath_beacon_config(sc, vif);
}
@@ -1988,9 +1994,8 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
{
+ struct ath_softc *sc = hw->priv;
u64 tsf;
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
@@ -2003,8 +2008,7 @@ static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
static void ath9k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
@@ -2015,8 +2019,7 @@ static void ath9k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
static void ath9k_reset_tsf(struct ieee80211_hw *hw)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
mutex_lock(&sc->mutex);
@@ -2031,10 +2034,9 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta,
- u16 tid, u16 *ssn)
+ u16 tid, u16 *ssn, u8 buf_size)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
int ret = 0;
local_bh_disable();
@@ -2079,8 +2081,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ieee80211_supported_band *sband;
struct ieee80211_channel *chan;
@@ -2114,52 +2115,68 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
return 0;
}
-static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
+static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
mutex_lock(&sc->mutex);
- if (ath9k_wiphy_scanning(sc)) {
- /*
- * There is a race here in mac80211 but fixing it requires
- * we revisit how we handle the scan complete callback.
- * After mac80211 fixes we will not have configured hardware
- * to the home channel nor would we have configured the RX
- * filter yet.
- */
- mutex_unlock(&sc->mutex);
- return;
- }
-
- aphy->state = ATH_WIPHY_SCAN;
- ath9k_wiphy_pause_all_forced(sc, aphy);
+ ah->coverage_class = coverage_class;
+ ath9k_hw_init_global_settings(ah);
mutex_unlock(&sc->mutex);
}
-/*
- * XXX: this requires a revisit after the driver
- * scan_complete gets moved to another place/removed in mac80211.
- */
-static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
+static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+#define ATH_FLUSH_TIMEOUT 60 /* ms */
+ struct ath_softc *sc = hw->priv;
+ struct ath_txq *txq = NULL;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ int i, j, npend = 0;
mutex_lock(&sc->mutex);
- aphy->state = ATH_WIPHY_ACTIVE;
- mutex_unlock(&sc->mutex);
-}
-static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
-{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
- struct ath_hw *ah = sc->sc_ah;
+ cancel_delayed_work_sync(&sc->tx_complete_work);
- mutex_lock(&sc->mutex);
- ah->coverage_class = coverage_class;
- ath9k_hw_init_global_settings(ah);
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+ if (!ATH_TXQ_SETUP(sc, i))
+ continue;
+ txq = &sc->tx.txq[i];
+
+ if (!drop) {
+ for (j = 0; j < ATH_FLUSH_TIMEOUT; j++) {
+ if (!ath9k_has_pending_frames(sc, txq))
+ break;
+ usleep_range(1000, 2000);
+ }
+ }
+
+ if (drop || ath9k_has_pending_frames(sc, txq)) {
+ ath_dbg(common, ATH_DBG_QUEUE, "Drop frames from hw queue:%d\n",
+ txq->axq_qnum);
+ spin_lock_bh(&txq->axq_lock);
+ txq->txq_flush_inprogress = true;
+ spin_unlock_bh(&txq->axq_lock);
+
+ ath9k_ps_wakeup(sc);
+ ath9k_hw_stoptxdma(ah, txq->axq_qnum);
+ npend = ath9k_hw_numtxpending(ah, txq->axq_qnum);
+ ath9k_ps_restore(sc);
+ if (npend)
+ break;
+
+ ath_draintxq(sc, txq, false);
+ txq->txq_flush_inprogress = false;
+ }
+ }
+
+ if (npend) {
+ ath_reset(sc, false);
+ txq->txq_flush_inprogress = false;
+ }
+
+ ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
mutex_unlock(&sc->mutex);
}
@@ -2182,8 +2199,7 @@ struct ieee80211_ops ath9k_ops = {
.reset_tsf = ath9k_reset_tsf,
.ampdu_action = ath9k_ampdu_action,
.get_survey = ath9k_get_survey,
- .sw_scan_start = ath9k_sw_scan_start,
- .sw_scan_complete = ath9k_sw_scan_complete,
.rfkill_poll = ath9k_rfkill_poll_state,
.set_coverage_class = ath9k_set_coverage_class,
+ .flush = ath9k_flush,
};
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 78ef1f13386f..e83128c50f7b 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -126,7 +126,6 @@ static const struct ath_bus_ops ath_pci_bus_ops = {
static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
void __iomem *mem;
- struct ath_wiphy *aphy;
struct ath_softc *sc;
struct ieee80211_hw *hw;
u8 csz;
@@ -198,8 +197,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_iomap;
}
- hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy) +
- sizeof(struct ath_softc), &ath9k_ops);
+ hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
if (!hw) {
dev_err(&pdev->dev, "No memory for ieee80211_hw\n");
ret = -ENOMEM;
@@ -209,11 +207,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
SET_IEEE80211_DEV(hw, &pdev->dev);
pci_set_drvdata(pdev, hw);
- aphy = hw->priv;
- sc = (struct ath_softc *) (aphy + 1);
- aphy->sc = sc;
- aphy->hw = hw;
- sc->pri_wiphy = aphy;
+ sc = hw->priv;
sc->hw = hw;
sc->dev = &pdev->dev;
sc->mem = mem;
@@ -260,8 +254,7 @@ err_dma:
static void ath_pci_remove(struct pci_dev *pdev)
{
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
void __iomem *mem = sc->mem;
if (!is_ath9k_unloaded)
@@ -281,8 +274,7 @@ static int ath_pci_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
@@ -293,8 +285,7 @@ static int ath_pci_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
u32 val;
/*
@@ -320,7 +311,6 @@ static int ath_pci_resume(struct device *device)
ath9k_ps_restore(sc);
sc->ps_idle = true;
- ath9k_set_wiphy_idle(aphy, true);
ath_radio_disable(sc, hw);
return 0;
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index e45147820eae..960d717ca7c2 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1560,8 +1560,7 @@ static void ath_rate_add_sta_debugfs(void *priv, void *priv_sta,
static void *ath_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
{
- struct ath_wiphy *aphy = hw->priv;
- return aphy->sc;
+ return hw->priv;
}
static void ath_rate_free(void *priv)
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index b2497b8601e5..cb559e345b86 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -34,27 +34,6 @@ static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP);
}
-static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc,
- struct ieee80211_hdr *hdr)
-{
- struct ieee80211_hw *hw = sc->pri_wiphy->hw;
- int i;
-
- spin_lock_bh(&sc->wiphy_lock);
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- struct ath_wiphy *aphy = sc->sec_wiphy[i];
- if (aphy == NULL)
- continue;
- if (compare_ether_addr(hdr->addr1, aphy->hw->wiphy->perm_addr)
- == 0) {
- hw = aphy->hw;
- break;
- }
- }
- spin_unlock_bh(&sc->wiphy_lock);
- return hw;
-}
-
/*
* Setup and link descriptors.
*
@@ -230,11 +209,6 @@ static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
int error = 0, i;
u32 size;
-
- common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN +
- ah->caps.rx_status_len,
- min(common->cachelsz, (u16)64));
-
ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
ah->caps.rx_status_len);
@@ -321,12 +295,12 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
sc->sc_flags &= ~SC_OP_RXFLUSH;
spin_lock_init(&sc->rx.rxbuflock);
+ common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
+ sc->sc_ah->caps.rx_status_len;
+
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
return ath_rx_edma_init(sc, nbufs);
} else {
- common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
- min(common->cachelsz, (u16)64));
-
ath_dbg(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
common->cachelsz, common->rx_bufsize);
@@ -463,8 +437,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
if (conf_is_ht(&sc->hw->conf))
rfilt |= ATH9K_RX_FILTER_COMP_BAR;
- if (sc->sec_wiphy || (sc->nvifs > 1) ||
- (sc->rx.rxfilter & FIF_OTHER_BSS)) {
+ if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) {
/* The following may also be needed for other older chips */
if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160)
rfilt |= ATH9K_RX_FILTER_PROM;
@@ -588,8 +561,14 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
return;
mgmt = (struct ieee80211_mgmt *)skb->data;
- if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0)
+ if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) {
+ /* TODO: This doesn't work well if you have stations
+ * associated to two different APs because curbssid
+ * is just the last AP that any of the stations associated
+ * with.
+ */
return; /* not from our current AP */
+ }
sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
@@ -662,37 +641,6 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
}
}
-static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw,
- struct ath_softc *sc, struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr;
-
- hdr = (struct ieee80211_hdr *)skb->data;
-
- /* Send the frame to mac80211 */
- if (is_multicast_ether_addr(hdr->addr1)) {
- int i;
- /*
- * Deliver broadcast/multicast frames to all suitable
- * virtual wiphys.
- */
- /* TODO: filter based on channel configuration */
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- struct ath_wiphy *aphy = sc->sec_wiphy[i];
- struct sk_buff *nskb;
- if (aphy == NULL)
- continue;
- nskb = skb_copy(skb, GFP_ATOMIC);
- if (!nskb)
- continue;
- ieee80211_rx(aphy->hw, nskb);
- }
- ieee80211_rx(sc->hw, skb);
- } else
- /* Deliver unicast frames based on receiver address */
- ieee80211_rx(hw, skb);
-}
-
static bool ath_edma_get_buffers(struct ath_softc *sc,
enum ath9k_rx_qtype qtype)
{
@@ -862,15 +810,9 @@ static bool ath9k_rx_accept(struct ath_common *common,
if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len))
return false;
- /*
- * rs_more indicates chained descriptors which can be used
- * to link buffers together for a sort of scatter-gather
- * operation.
- * reject the frame, we don't support scatter-gather yet and
- * the frame is probably corrupt anyway
- */
+ /* Only use error bits from the last fragment */
if (rx_stats->rs_more)
- return false;
+ return true;
/*
* The rx_stats->rs_status will not be set until the end of the
@@ -974,7 +916,7 @@ static void ath9k_process_rssi(struct ath_common *common,
struct ieee80211_hdr *hdr,
struct ath_rx_status *rx_stats)
{
- struct ath_wiphy *aphy = hw->priv;
+ struct ath_softc *sc = hw->priv;
struct ath_hw *ah = common->ah;
int last_rssi;
__le16 fc;
@@ -984,13 +926,19 @@ static void ath9k_process_rssi(struct ath_common *common,
fc = hdr->frame_control;
if (!ieee80211_is_beacon(fc) ||
- compare_ether_addr(hdr->addr3, common->curbssid))
+ compare_ether_addr(hdr->addr3, common->curbssid)) {
+ /* TODO: This doesn't work well if you have stations
+ * associated to two different APs because curbssid
+ * is just the last AP that any of the stations associated
+ * with.
+ */
return;
+ }
if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr)
- ATH_RSSI_LPF(aphy->last_rssi, rx_stats->rs_rssi);
+ ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
- last_rssi = aphy->last_rssi;
+ last_rssi = sc->last_rssi;
if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
rx_stats->rs_rssi = ATH_EP_RND(last_rssi,
ATH_RSSI_EP_MULTIPLIER);
@@ -1022,6 +970,10 @@ static int ath9k_rx_skb_preprocess(struct ath_common *common,
if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
return -EINVAL;
+ /* Only use status info from the last fragment */
+ if (rx_stats->rs_more)
+ return 0;
+
ath9k_process_rssi(common, hw, hdr, rx_stats);
if (ath9k_process_rate(common, hw, rx_stats, rx_status))
@@ -1031,7 +983,7 @@ static int ath9k_rx_skb_preprocess(struct ath_common *common,
rx_status->freq = hw->conf.channel->center_freq;
rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi;
rx_status->antenna = rx_stats->rs_antenna;
- rx_status->flag |= RX_FLAG_TSFT;
+ rx_status->flag |= RX_FLAG_MACTIME_MPDU;
return 0;
}
@@ -1623,7 +1575,7 @@ div_comb_done:
int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
{
struct ath_buf *bf;
- struct sk_buff *skb = NULL, *requeue_skb;
+ struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
struct ieee80211_rx_status *rxs;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
@@ -1632,7 +1584,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
* virtual wiphy so to account for that we iterate over the active
* wiphys and find the appropriate wiphy and therefore hw.
*/
- struct ieee80211_hw *hw = NULL;
+ struct ieee80211_hw *hw = sc->hw;
struct ieee80211_hdr *hdr;
int retval;
bool decrypt_error = false;
@@ -1674,10 +1626,17 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
if (!skb)
continue;
- hdr = (struct ieee80211_hdr *) (skb->data + rx_status_len);
- rxs = IEEE80211_SKB_RXCB(skb);
+ /*
+ * Take frame header from the first fragment and RX status from
+ * the last one.
+ */
+ if (sc->rx.frag)
+ hdr_skb = sc->rx.frag;
+ else
+ hdr_skb = skb;
- hw = ath_get_virt_hw(sc, hdr);
+ hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
+ rxs = IEEE80211_SKB_RXCB(hdr_skb);
ath_debug_stat_rx(sc, &rs);
@@ -1686,12 +1645,12 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
* chain it back at the queue without processing it.
*/
if (flush)
- goto requeue;
+ goto requeue_drop_frag;
retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
rxs, &decrypt_error);
if (retval)
- goto requeue;
+ goto requeue_drop_frag;
rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
if (rs.rs_tstamp > tsf_lower &&
@@ -1711,7 +1670,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
* skb and put it at the tail of the sc->rx.rxbuf list for
* processing. */
if (!requeue_skb)
- goto requeue;
+ goto requeue_drop_frag;
/* Unmap the frame */
dma_unmap_single(sc->dev, bf->bf_buf_addr,
@@ -1722,8 +1681,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
if (ah->caps.rx_status_len)
skb_pull(skb, ah->caps.rx_status_len);
- ath9k_rx_skb_postprocess(common, skb, &rs,
- rxs, decrypt_error);
+ if (!rs.rs_more)
+ ath9k_rx_skb_postprocess(common, hdr_skb, &rs,
+ rxs, decrypt_error);
/* We will now give hardware our shiny new allocated skb */
bf->bf_mpdu = requeue_skb;
@@ -1736,10 +1696,42 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
bf->bf_mpdu = NULL;
bf->bf_buf_addr = 0;
ath_err(common, "dma_mapping_error() on RX\n");
- ath_rx_send_to_mac80211(hw, sc, skb);
+ ieee80211_rx(hw, skb);
break;
}
+ if (rs.rs_more) {
+ /*
+ * rs_more indicates chained descriptors which can be
+ * used to link buffers together for a sort of
+ * scatter-gather operation.
+ */
+ if (sc->rx.frag) {
+ /* too many fragments - cannot handle frame */
+ dev_kfree_skb_any(sc->rx.frag);
+ dev_kfree_skb_any(skb);
+ skb = NULL;
+ }
+ sc->rx.frag = skb;
+ goto requeue;
+ }
+
+ if (sc->rx.frag) {
+ int space = skb->len - skb_tailroom(hdr_skb);
+
+ sc->rx.frag = NULL;
+
+ if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
+ dev_kfree_skb(skb);
+ goto requeue_drop_frag;
+ }
+
+ skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len),
+ skb->len);
+ dev_kfree_skb_any(skb);
+ skb = hdr_skb;
+ }
+
/*
* change the default rx antenna if rx diversity chooses the
* other antenna 3 times in a row.
@@ -1763,8 +1755,13 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
ath_ant_comb_scan(sc, &rs);
- ath_rx_send_to_mac80211(hw, sc, skb);
+ ieee80211_rx(hw, skb);
+requeue_drop_frag:
+ if (sc->rx.frag) {
+ dev_kfree_skb_any(sc->rx.frag);
+ sc->rx.frag = NULL;
+ }
requeue:
if (edma) {
list_add_tail(&bf->list, &sc->rx.rxbuf);
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 4df5659c6c16..8fa8acfde62e 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -789,6 +789,7 @@
#define AR_SREV_REVISION_9300_20 2 /* 2.0 and 2.1 */
#define AR_SREV_VERSION_9485 0x240
#define AR_SREV_REVISION_9485_10 0
+#define AR_SREV_REVISION_9485_11 1
#define AR_SREV_5416(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \
@@ -866,6 +867,9 @@
#define AR_SREV_9485_10(_ah) \
(AR_SREV_9485(_ah) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9485_10))
+#define AR_SREV_9485_11(_ah) \
+ (AR_SREV_9485(_ah) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9485_11))
#define AR_SREV_9285E_20(_ah) \
(AR_SREV_9285_12_OR_LATER(_ah) && \
@@ -874,6 +878,7 @@
enum ath_usb_dev {
AR9280_USB = 1, /* AR7010 + AR9280, UB94 */
AR9287_USB = 2, /* AR7010 + AR9287, UB95 */
+ STORAGE_DEVICE = 3,
};
#define AR_DEVID_7010(_ah) \
@@ -1083,6 +1088,17 @@ enum {
#define AR_ENT_OTP 0x40d8
#define AR_ENT_OTP_CHAIN2_DISABLE 0x00020000
#define AR_ENT_OTP_MPSD 0x00800000
+#define AR_CH0_BB_DPLL2 0x16184
+#define AR_CH0_BB_DPLL3 0x16188
+#define AR_CH0_DDR_DPLL2 0x16244
+#define AR_CH0_DDR_DPLL3 0x16248
+#define AR_CH0_DPLL2_KD 0x03F80000
+#define AR_CH0_DPLL2_KD_S 19
+#define AR_CH0_DPLL2_KI 0x3C000000
+#define AR_CH0_DPLL2_KI_S 26
+#define AR_CH0_DPLL3_PHASE_SHIFT 0x3F800000
+#define AR_CH0_DPLL3_PHASE_SHIFT_S 23
+#define AR_PHY_CCA_NOM_VAL_2GHZ -118
#define AR_RTC_9300_PLL_DIV 0x000003ff
#define AR_RTC_9300_PLL_DIV_S 0
@@ -1129,6 +1145,12 @@ enum {
#define AR_RTC_PLL_CLKSEL 0x00000300
#define AR_RTC_PLL_CLKSEL_S 8
+#define PLL3 0x16188
+#define PLL3_DO_MEAS_MASK 0x40000000
+#define PLL4 0x1618c
+#define PLL4_MEAS_DONE 0x8
+#define SQSUM_DVC_MASK 0x007ffff8
+
#define AR_RTC_RESET \
((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0040) : 0x7040)
#define AR_RTC_RESET_EN (0x00000001)
diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c
deleted file mode 100644
index 2dc7095e56d1..000000000000
--- a/drivers/net/wireless/ath/ath9k/virtual.c
+++ /dev/null
@@ -1,717 +0,0 @@
-/*
- * Copyright (c) 2008-2009 Atheros Communications Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/slab.h>
-
-#include "ath9k.h"
-
-struct ath9k_vif_iter_data {
- const u8 *hw_macaddr;
- u8 mask[ETH_ALEN];
-};
-
-static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
-{
- struct ath9k_vif_iter_data *iter_data = data;
- int i;
-
- for (i = 0; i < ETH_ALEN; i++)
- iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
-}
-
-void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
-{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath9k_vif_iter_data iter_data;
- int i;
-
- /*
- * Use the hardware MAC address as reference, the hardware uses it
- * together with the BSSID mask when matching addresses.
- */
- iter_data.hw_macaddr = common->macaddr;
- memset(&iter_data.mask, 0xff, ETH_ALEN);
-
- if (vif)
- ath9k_vif_iter(&iter_data, vif->addr, vif);
-
- /* Get list of all active MAC addresses */
- spin_lock_bh(&sc->wiphy_lock);
- ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter,
- &iter_data);
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- if (sc->sec_wiphy[i] == NULL)
- continue;
- ieee80211_iterate_active_interfaces_atomic(
- sc->sec_wiphy[i]->hw, ath9k_vif_iter, &iter_data);
- }
- spin_unlock_bh(&sc->wiphy_lock);
-
- memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
- ath_hw_setbssidmask(common);
-}
-
-int ath9k_wiphy_add(struct ath_softc *sc)
-{
- int i, error;
- struct ath_wiphy *aphy;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ieee80211_hw *hw;
- u8 addr[ETH_ALEN];
-
- hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy), &ath9k_ops);
- if (hw == NULL)
- return -ENOMEM;
-
- spin_lock_bh(&sc->wiphy_lock);
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- if (sc->sec_wiphy[i] == NULL)
- break;
- }
-
- if (i == sc->num_sec_wiphy) {
- /* No empty slot available; increase array length */
- struct ath_wiphy **n;
- n = krealloc(sc->sec_wiphy,
- (sc->num_sec_wiphy + 1) *
- sizeof(struct ath_wiphy *),
- GFP_ATOMIC);
- if (n == NULL) {
- spin_unlock_bh(&sc->wiphy_lock);
- ieee80211_free_hw(hw);
- return -ENOMEM;
- }
- n[i] = NULL;
- sc->sec_wiphy = n;
- sc->num_sec_wiphy++;
- }
-
- SET_IEEE80211_DEV(hw, sc->dev);
-
- aphy = hw->priv;
- aphy->sc = sc;
- aphy->hw = hw;
- sc->sec_wiphy[i] = aphy;
- aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
- spin_unlock_bh(&sc->wiphy_lock);
-
- memcpy(addr, common->macaddr, ETH_ALEN);
- addr[0] |= 0x02; /* Locally managed address */
- /*
- * XOR virtual wiphy index into the least significant bits to generate
- * a different MAC address for each virtual wiphy.
- */
- addr[5] ^= i & 0xff;
- addr[4] ^= (i & 0xff00) >> 8;
- addr[3] ^= (i & 0xff0000) >> 16;
-
- SET_IEEE80211_PERM_ADDR(hw, addr);
-
- ath9k_set_hw_capab(sc, hw);
-
- error = ieee80211_register_hw(hw);
-
- if (error == 0) {
- /* Make sure wiphy scheduler is started (if enabled) */
- ath9k_wiphy_set_scheduler(sc, sc->wiphy_scheduler_int);
- }
-
- return error;
-}
-
-int ath9k_wiphy_del(struct ath_wiphy *aphy)
-{
- struct ath_softc *sc = aphy->sc;
- int i;
-
- spin_lock_bh(&sc->wiphy_lock);
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- if (aphy == sc->sec_wiphy[i]) {
- sc->sec_wiphy[i] = NULL;
- spin_unlock_bh(&sc->wiphy_lock);
- ieee80211_unregister_hw(aphy->hw);
- ieee80211_free_hw(aphy->hw);
- return 0;
- }
- }
- spin_unlock_bh(&sc->wiphy_lock);
- return -ENOENT;
-}
-
-static int ath9k_send_nullfunc(struct ath_wiphy *aphy,
- struct ieee80211_vif *vif, const u8 *bssid,
- int ps)
-{
- struct ath_softc *sc = aphy->sc;
- struct ath_tx_control txctl;
- struct sk_buff *skb;
- struct ieee80211_hdr *hdr;
- __le16 fc;
- struct ieee80211_tx_info *info;
-
- skb = dev_alloc_skb(24);
- if (skb == NULL)
- return -ENOMEM;
- hdr = (struct ieee80211_hdr *) skb_put(skb, 24);
- memset(hdr, 0, 24);
- fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
- IEEE80211_FCTL_TODS);
- if (ps)
- fc |= cpu_to_le16(IEEE80211_FCTL_PM);
- hdr->frame_control = fc;
- memcpy(hdr->addr1, bssid, ETH_ALEN);
- memcpy(hdr->addr2, aphy->hw->wiphy->perm_addr, ETH_ALEN);
- memcpy(hdr->addr3, bssid, ETH_ALEN);
-
- info = IEEE80211_SKB_CB(skb);
- memset(info, 0, sizeof(*info));
- info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS;
- info->control.vif = vif;
- info->control.rates[0].idx = 0;
- info->control.rates[0].count = 4;
- info->control.rates[1].idx = -1;
-
- memset(&txctl, 0, sizeof(struct ath_tx_control));
- txctl.txq = sc->tx.txq_map[WME_AC_VO];
- txctl.frame_type = ps ? ATH9K_IFT_PAUSE : ATH9K_IFT_UNPAUSE;
-
- if (ath_tx_start(aphy->hw, skb, &txctl) != 0)
- goto exit;
-
- return 0;
-exit:
- dev_kfree_skb_any(skb);
- return -1;
-}
-
-static bool __ath9k_wiphy_pausing(struct ath_softc *sc)
-{
- int i;
- if (sc->pri_wiphy->state == ATH_WIPHY_PAUSING)
- return true;
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- if (sc->sec_wiphy[i] &&
- sc->sec_wiphy[i]->state == ATH_WIPHY_PAUSING)
- return true;
- }
- return false;
-}
-
-static bool ath9k_wiphy_pausing(struct ath_softc *sc)
-{
- bool ret;
- spin_lock_bh(&sc->wiphy_lock);
- ret = __ath9k_wiphy_pausing(sc);
- spin_unlock_bh(&sc->wiphy_lock);
- return ret;
-}
-
-static bool __ath9k_wiphy_scanning(struct ath_softc *sc)
-{
- int i;
- if (sc->pri_wiphy->state == ATH_WIPHY_SCAN)
- return true;
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- if (sc->sec_wiphy[i] &&
- sc->sec_wiphy[i]->state == ATH_WIPHY_SCAN)
- return true;
- }
- return false;
-}
-
-bool ath9k_wiphy_scanning(struct ath_softc *sc)
-{
- bool ret;
- spin_lock_bh(&sc->wiphy_lock);
- ret = __ath9k_wiphy_scanning(sc);
- spin_unlock_bh(&sc->wiphy_lock);
- return ret;
-}
-
-static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy);
-
-/* caller must hold wiphy_lock */
-static void __ath9k_wiphy_unpause_ch(struct ath_wiphy *aphy)
-{
- if (aphy == NULL)
- return;
- if (aphy->chan_idx != aphy->sc->chan_idx)
- return; /* wiphy not on the selected channel */
- __ath9k_wiphy_unpause(aphy);
-}
-
-static void ath9k_wiphy_unpause_channel(struct ath_softc *sc)
-{
- int i;
- spin_lock_bh(&sc->wiphy_lock);
- __ath9k_wiphy_unpause_ch(sc->pri_wiphy);
- for (i = 0; i < sc->num_sec_wiphy; i++)
- __ath9k_wiphy_unpause_ch(sc->sec_wiphy[i]);
- spin_unlock_bh(&sc->wiphy_lock);
-}
-
-void ath9k_wiphy_chan_work(struct work_struct *work)
-{
- struct ath_softc *sc = container_of(work, struct ath_softc, chan_work);
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_wiphy *aphy = sc->next_wiphy;
-
- if (aphy == NULL)
- return;
-
- /*
- * All pending interfaces paused; ready to change
- * channels.
- */
-
- /* Change channels */
- mutex_lock(&sc->mutex);
- /* XXX: remove me eventually */
- ath9k_update_ichannel(sc, aphy->hw,
- &sc->sc_ah->channels[sc->chan_idx]);
-
- /* sync hw configuration for hw code */
- common->hw = aphy->hw;
-
- if (ath_set_channel(sc, aphy->hw,
- &sc->sc_ah->channels[sc->chan_idx]) < 0) {
- printk(KERN_DEBUG "ath9k: Failed to set channel for new "
- "virtual wiphy\n");
- mutex_unlock(&sc->mutex);
- return;
- }
- mutex_unlock(&sc->mutex);
-
- ath9k_wiphy_unpause_channel(sc);
-}
-
-/*
- * ath9k version of ieee80211_tx_status() for TX frames that are generated
- * internally in the driver.
- */
-void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, int ftype)
-{
- struct ath_wiphy *aphy = hw->priv;
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
-
- if (ftype == ATH9K_IFT_PAUSE && aphy->state == ATH_WIPHY_PAUSING) {
- if (!(tx_info->flags & IEEE80211_TX_STAT_ACK)) {
- printk(KERN_DEBUG "ath9k: %s: no ACK for pause "
- "frame\n", wiphy_name(hw->wiphy));
- /*
- * The AP did not reply; ignore this to allow us to
- * continue.
- */
- }
- aphy->state = ATH_WIPHY_PAUSED;
- if (!ath9k_wiphy_pausing(aphy->sc)) {
- /*
- * Drop from tasklet to work to allow mutex for channel
- * change.
- */
- ieee80211_queue_work(aphy->sc->hw,
- &aphy->sc->chan_work);
- }
- }
-
- dev_kfree_skb(skb);
-}
-
-static void ath9k_mark_paused(struct ath_wiphy *aphy)
-{
- struct ath_softc *sc = aphy->sc;
- aphy->state = ATH_WIPHY_PAUSED;
- if (!__ath9k_wiphy_pausing(sc))
- ieee80211_queue_work(sc->hw, &sc->chan_work);
-}
-
-static void ath9k_pause_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
-{
- struct ath_wiphy *aphy = data;
- struct ath_vif *avp = (void *) vif->drv_priv;
-
- switch (vif->type) {
- case NL80211_IFTYPE_STATION:
- if (!vif->bss_conf.assoc) {
- ath9k_mark_paused(aphy);
- break;
- }
- /* TODO: could avoid this if already in PS mode */
- if (ath9k_send_nullfunc(aphy, vif, avp->bssid, 1)) {
- printk(KERN_DEBUG "%s: failed to send PS nullfunc\n",
- __func__);
- ath9k_mark_paused(aphy);
- }
- break;
- case NL80211_IFTYPE_AP:
- /* Beacon transmission is paused by aphy->state change */
- ath9k_mark_paused(aphy);
- break;
- default:
- break;
- }
-}
-
-/* caller must hold wiphy_lock */
-static int __ath9k_wiphy_pause(struct ath_wiphy *aphy)
-{
- ieee80211_stop_queues(aphy->hw);
- aphy->state = ATH_WIPHY_PAUSING;
- /*
- * TODO: handle PAUSING->PAUSED for the case where there are multiple
- * active vifs (now we do it on the first vif getting ready; should be
- * on the last)
- */
- ieee80211_iterate_active_interfaces_atomic(aphy->hw, ath9k_pause_iter,
- aphy);
- return 0;
-}
-
-int ath9k_wiphy_pause(struct ath_wiphy *aphy)
-{
- int ret;
- spin_lock_bh(&aphy->sc->wiphy_lock);
- ret = __ath9k_wiphy_pause(aphy);
- spin_unlock_bh(&aphy->sc->wiphy_lock);
- return ret;
-}
-
-static void ath9k_unpause_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
-{
- struct ath_wiphy *aphy = data;
- struct ath_vif *avp = (void *) vif->drv_priv;
-
- switch (vif->type) {
- case NL80211_IFTYPE_STATION:
- if (!vif->bss_conf.assoc)
- break;
- ath9k_send_nullfunc(aphy, vif, avp->bssid, 0);
- break;
- case NL80211_IFTYPE_AP:
- /* Beacon transmission is re-enabled by aphy->state change */
- break;
- default:
- break;
- }
-}
-
-/* caller must hold wiphy_lock */
-static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy)
-{
- ieee80211_iterate_active_interfaces_atomic(aphy->hw,
- ath9k_unpause_iter, aphy);
- aphy->state = ATH_WIPHY_ACTIVE;
- ieee80211_wake_queues(aphy->hw);
- return 0;
-}
-
-int ath9k_wiphy_unpause(struct ath_wiphy *aphy)
-{
- int ret;
- spin_lock_bh(&aphy->sc->wiphy_lock);
- ret = __ath9k_wiphy_unpause(aphy);
- spin_unlock_bh(&aphy->sc->wiphy_lock);
- return ret;
-}
-
-static void __ath9k_wiphy_mark_all_paused(struct ath_softc *sc)
-{
- int i;
- if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE)
- sc->pri_wiphy->state = ATH_WIPHY_PAUSED;
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- if (sc->sec_wiphy[i] &&
- sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE)
- sc->sec_wiphy[i]->state = ATH_WIPHY_PAUSED;
- }
-}
-
-/* caller must hold wiphy_lock */
-static void __ath9k_wiphy_pause_all(struct ath_softc *sc)
-{
- int i;
- if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE)
- __ath9k_wiphy_pause(sc->pri_wiphy);
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- if (sc->sec_wiphy[i] &&
- sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE)
- __ath9k_wiphy_pause(sc->sec_wiphy[i]);
- }
-}
-
-int ath9k_wiphy_select(struct ath_wiphy *aphy)
-{
- struct ath_softc *sc = aphy->sc;
- bool now;
-
- spin_lock_bh(&sc->wiphy_lock);
- if (__ath9k_wiphy_scanning(sc)) {
- /*
- * For now, we are using mac80211 sw scan and it expects to
- * have full control over channel changes, so avoid wiphy
- * scheduling during a scan. This could be optimized if the
- * scanning control were moved into the driver.
- */
- spin_unlock_bh(&sc->wiphy_lock);
- return -EBUSY;
- }
- if (__ath9k_wiphy_pausing(sc)) {
- if (sc->wiphy_select_failures == 0)
- sc->wiphy_select_first_fail = jiffies;
- sc->wiphy_select_failures++;
- if (time_after(jiffies, sc->wiphy_select_first_fail + HZ / 2))
- {
- printk(KERN_DEBUG "ath9k: Previous wiphy select timed "
- "out; disable/enable hw to recover\n");
- __ath9k_wiphy_mark_all_paused(sc);
- /*
- * TODO: this workaround to fix hardware is unlikely to
- * be specific to virtual wiphy changes. It can happen
- * on normal channel change, too, and as such, this
- * should really be made more generic. For example,
- * tricker radio disable/enable on GTT interrupt burst
- * (say, 10 GTT interrupts received without any TX
- * frame being completed)
- */
- spin_unlock_bh(&sc->wiphy_lock);
- ath_radio_disable(sc, aphy->hw);
- ath_radio_enable(sc, aphy->hw);
- /* Only the primary wiphy hw is used for queuing work */
- ieee80211_queue_work(aphy->sc->hw,
- &aphy->sc->chan_work);
- return -EBUSY; /* previous select still in progress */
- }
- spin_unlock_bh(&sc->wiphy_lock);
- return -EBUSY; /* previous select still in progress */
- }
- sc->wiphy_select_failures = 0;
-
- /* Store the new channel */
- sc->chan_idx = aphy->chan_idx;
- sc->chan_is_ht = aphy->chan_is_ht;
- sc->next_wiphy = aphy;
-
- __ath9k_wiphy_pause_all(sc);
- now = !__ath9k_wiphy_pausing(aphy->sc);
- spin_unlock_bh(&sc->wiphy_lock);
-
- if (now) {
- /* Ready to request channel change immediately */
- ieee80211_queue_work(aphy->sc->hw, &aphy->sc->chan_work);
- }
-
- /*
- * wiphys will be unpaused in ath9k_tx_status() once channel has been
- * changed if any wiphy needs time to become paused.
- */
-
- return 0;
-}
-
-bool ath9k_wiphy_started(struct ath_softc *sc)
-{
- int i;
- spin_lock_bh(&sc->wiphy_lock);
- if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE) {
- spin_unlock_bh(&sc->wiphy_lock);
- return true;
- }
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- if (sc->sec_wiphy[i] &&
- sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE) {
- spin_unlock_bh(&sc->wiphy_lock);
- return true;
- }
- }
- spin_unlock_bh(&sc->wiphy_lock);
- return false;
-}
-
-static void ath9k_wiphy_pause_chan(struct ath_wiphy *aphy,
- struct ath_wiphy *selected)
-{
- if (selected->state == ATH_WIPHY_SCAN) {
- if (aphy == selected)
- return;
- /*
- * Pause all other wiphys for the duration of the scan even if
- * they are on the current channel now.
- */
- } else if (aphy->chan_idx == selected->chan_idx)
- return;
- aphy->state = ATH_WIPHY_PAUSED;
- ieee80211_stop_queues(aphy->hw);
-}
-
-void ath9k_wiphy_pause_all_forced(struct ath_softc *sc,
- struct ath_wiphy *selected)
-{
- int i;
- spin_lock_bh(&sc->wiphy_lock);
- if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE)
- ath9k_wiphy_pause_chan(sc->pri_wiphy, selected);
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- if (sc->sec_wiphy[i] &&
- sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE)
- ath9k_wiphy_pause_chan(sc->sec_wiphy[i], selected);
- }
- spin_unlock_bh(&sc->wiphy_lock);
-}
-
-void ath9k_wiphy_work(struct work_struct *work)
-{
- struct ath_softc *sc = container_of(work, struct ath_softc,
- wiphy_work.work);
- struct ath_wiphy *aphy = NULL;
- bool first = true;
-
- spin_lock_bh(&sc->wiphy_lock);
-
- if (sc->wiphy_scheduler_int == 0) {
- /* wiphy scheduler is disabled */
- spin_unlock_bh(&sc->wiphy_lock);
- return;
- }
-
-try_again:
- sc->wiphy_scheduler_index++;
- while (sc->wiphy_scheduler_index <= sc->num_sec_wiphy) {
- aphy = sc->sec_wiphy[sc->wiphy_scheduler_index - 1];
- if (aphy && aphy->state != ATH_WIPHY_INACTIVE)
- break;
-
- sc->wiphy_scheduler_index++;
- aphy = NULL;
- }
- if (aphy == NULL) {
- sc->wiphy_scheduler_index = 0;
- if (sc->pri_wiphy->state == ATH_WIPHY_INACTIVE) {
- if (first) {
- first = false;
- goto try_again;
- }
- /* No wiphy is ready to be scheduled */
- } else
- aphy = sc->pri_wiphy;
- }
-
- spin_unlock_bh(&sc->wiphy_lock);
-
- if (aphy &&
- aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN &&
- ath9k_wiphy_select(aphy)) {
- printk(KERN_DEBUG "ath9k: Failed to schedule virtual wiphy "
- "change\n");
- }
-
- ieee80211_queue_delayed_work(sc->hw,
- &sc->wiphy_work,
- sc->wiphy_scheduler_int);
-}
-
-void ath9k_wiphy_set_scheduler(struct ath_softc *sc, unsigned int msec_int)
-{
- cancel_delayed_work_sync(&sc->wiphy_work);
- sc->wiphy_scheduler_int = msecs_to_jiffies(msec_int);
- if (sc->wiphy_scheduler_int)
- ieee80211_queue_delayed_work(sc->hw, &sc->wiphy_work,
- sc->wiphy_scheduler_int);
-}
-
-/* caller must hold wiphy_lock */
-bool ath9k_all_wiphys_idle(struct ath_softc *sc)
-{
- unsigned int i;
- if (!sc->pri_wiphy->idle)
- return false;
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- struct ath_wiphy *aphy = sc->sec_wiphy[i];
- if (!aphy)
- continue;
- if (!aphy->idle)
- return false;
- }
- return true;
-}
-
-/* caller must hold wiphy_lock */
-void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle)
-{
- struct ath_softc *sc = aphy->sc;
-
- aphy->idle = idle;
- ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
- "Marking %s as %sidle\n",
- wiphy_name(aphy->hw->wiphy), idle ? "" : "not-");
-}
-/* Only bother starting a queue on an active virtual wiphy */
-bool ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue)
-{
- struct ieee80211_hw *hw = sc->pri_wiphy->hw;
- unsigned int i;
- bool txq_started = false;
-
- spin_lock_bh(&sc->wiphy_lock);
-
- /* Start the primary wiphy */
- if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE) {
- ieee80211_wake_queue(hw, skb_queue);
- txq_started = true;
- goto unlock;
- }
-
- /* Now start the secondary wiphy queues */
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- struct ath_wiphy *aphy = sc->sec_wiphy[i];
- if (!aphy)
- continue;
- if (aphy->state != ATH_WIPHY_ACTIVE)
- continue;
-
- hw = aphy->hw;
- ieee80211_wake_queue(hw, skb_queue);
- txq_started = true;
- break;
- }
-
-unlock:
- spin_unlock_bh(&sc->wiphy_lock);
- return txq_started;
-}
-
-/* Go ahead and propagate information to all virtual wiphys, it won't hurt */
-void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue)
-{
- struct ieee80211_hw *hw = sc->pri_wiphy->hw;
- unsigned int i;
-
- spin_lock_bh(&sc->wiphy_lock);
-
- /* Stop the primary wiphy */
- ieee80211_stop_queue(hw, skb_queue);
-
- /* Now stop the secondary wiphy queues */
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- struct ath_wiphy *aphy = sc->sec_wiphy[i];
- if (!aphy)
- continue;
- hw = aphy->hw;
- ieee80211_stop_queue(hw, skb_queue);
- }
- spin_unlock_bh(&sc->wiphy_lock);
-}
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index dc862f5e1162..d3d24904f62f 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -123,12 +123,8 @@ void ath9k_deinit_wmi(struct ath9k_htc_priv *priv)
void ath9k_swba_tasklet(unsigned long data)
{
struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
- struct ath_common *common = ath9k_hw_common(priv->ah);
-
- ath_dbg(common, ATH_DBG_WMI, "SWBA Event received\n");
ath9k_htc_swba(priv, priv->wmi->beacon_pending);
-
}
void ath9k_fatal_work(struct work_struct *work)
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 07b7804aec5b..e16136d61799 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -19,7 +19,6 @@
#define BITS_PER_BYTE 8
#define OFDM_PLCP_BITS 22
-#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
#define L_STF 8
#define L_LTF 8
@@ -32,7 +31,6 @@
#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
-#define OFDM_SIFS_TIME 16
static u16 bits_per_symbol[][2] = {
/* 20MHz 40MHz */
@@ -57,8 +55,9 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
struct list_head *head);
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
-static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
- int nframes, int nbad, int txok, bool update_rc);
+static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
+ struct ath_tx_status *ts, int nframes, int nbad,
+ int txok, bool update_rc);
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
int seqno);
@@ -169,7 +168,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
ath_tx_update_baw(sc, tid, fi->seqno);
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
} else {
- ath_tx_send_normal(sc, txq, tid, &bf_head);
+ ath_tx_send_normal(sc, txq, NULL, &bf_head);
}
spin_lock_bh(&txq->axq_lock);
}
@@ -297,7 +296,6 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
ATH_TXBUF_RESET(tbf);
- tbf->aphy = bf->aphy;
tbf->bf_mpdu = bf->bf_mpdu;
tbf->bf_buf_addr = bf->bf_buf_addr;
memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
@@ -345,7 +343,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
struct ath_node *an = NULL;
struct sk_buff *skb;
struct ieee80211_sta *sta;
- struct ieee80211_hw *hw;
+ struct ieee80211_hw *hw = sc->hw;
struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *tx_info;
struct ath_atx_tid *tid = NULL;
@@ -364,7 +362,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
hdr = (struct ieee80211_hdr *)skb->data;
tx_info = IEEE80211_SKB_CB(skb);
- hw = bf->aphy->hw;
memcpy(rates, tx_info->control.rates, sizeof(rates));
@@ -383,7 +380,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
!bf->bf_stale || bf_next != NULL)
list_move_tail(&bf->list, &bf_head);
- ath_tx_rc_status(bf, ts, 1, 1, 0, false);
+ ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
0, 0);
@@ -429,7 +426,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
while (bf) {
- txfail = txpending = 0;
+ txfail = txpending = sendbar = 0;
bf_next = bf->bf_next;
skb = bf->bf_mpdu;
@@ -489,10 +486,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
memcpy(tx_info->control.rates, rates, sizeof(rates));
- ath_tx_rc_status(bf, ts, nframes, nbad, txok, true);
+ ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
rc_update = false;
} else {
- ath_tx_rc_status(bf, ts, nframes, nbad, txok, false);
+ ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
}
ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
@@ -516,7 +513,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
bf->bf_state.bf_type |=
BUF_XRETRY;
- ath_tx_rc_status(bf, ts, nframes,
+ ath_tx_rc_status(sc, bf, ts, nframes,
nbad, 0, false);
ath_tx_complete_buf(sc, bf, txq,
&bf_head,
@@ -566,8 +563,11 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
rcu_read_unlock();
- if (needreset)
+ if (needreset) {
+ spin_unlock_bh(&sc->sc_pcu_lock);
ath_reset(sc, false);
+ spin_lock_bh(&sc->sc_pcu_lock);
+ }
}
static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
@@ -856,7 +856,10 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
txtid->state |= AGGR_ADDBA_PROGRESS;
txtid->paused = true;
- *ssn = txtid->seq_start;
+ *ssn = txtid->seq_start = txtid->seq_next;
+
+ memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
+ txtid->baw_head = txtid->baw_tail = 0;
return 0;
}
@@ -942,7 +945,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
[WME_AC_VI] = ATH_TXQ_AC_VI,
[WME_AC_VO] = ATH_TXQ_AC_VO,
};
- int qnum, i;
+ int axq_qnum, i;
memset(&qi, 0, sizeof(qi));
qi.tqi_subtype = subtype_txq_to_hwq[subtype];
@@ -976,24 +979,25 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
TXQ_FLAG_TXDESCINT_ENABLE;
}
- qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
- if (qnum == -1) {
+ axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
+ if (axq_qnum == -1) {
/*
* NB: don't print a message, this happens
* normally on parts with too few tx queues
*/
return NULL;
}
- if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
+ if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
ath_err(common, "qnum %u out of range, max %zu!\n",
- qnum, ARRAY_SIZE(sc->tx.txq));
- ath9k_hw_releasetxqueue(ah, qnum);
+ axq_qnum, ARRAY_SIZE(sc->tx.txq));
+ ath9k_hw_releasetxqueue(ah, axq_qnum);
return NULL;
}
- if (!ATH_TXQ_SETUP(sc, qnum)) {
- struct ath_txq *txq = &sc->tx.txq[qnum];
+ if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
+ struct ath_txq *txq = &sc->tx.txq[axq_qnum];
- txq->axq_qnum = qnum;
+ txq->axq_qnum = axq_qnum;
+ txq->mac80211_qnum = -1;
txq->axq_link = NULL;
INIT_LIST_HEAD(&txq->axq_q);
INIT_LIST_HEAD(&txq->axq_acq);
@@ -1001,14 +1005,14 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
txq->axq_depth = 0;
txq->axq_ampdu_depth = 0;
txq->axq_tx_inprogress = false;
- sc->tx.txqsetup |= 1<<qnum;
+ sc->tx.txqsetup |= 1<<axq_qnum;
txq->txq_headidx = txq->txq_tailidx = 0;
for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
INIT_LIST_HEAD(&txq->txq_fifo[i]);
INIT_LIST_HEAD(&txq->txq_fifo_pending);
}
- return &sc->tx.txq[qnum];
+ return &sc->tx.txq[axq_qnum];
}
int ath_txq_update(struct ath_softc *sc, int qnum,
@@ -1051,6 +1055,7 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
int ath_cabq_update(struct ath_softc *sc)
{
struct ath9k_tx_queue_info qi;
+ struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
int qnum = sc->beacon.cabq->axq_qnum;
ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
@@ -1062,7 +1067,7 @@ int ath_cabq_update(struct ath_softc *sc)
else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
- qi.tqi_readyTime = (sc->beacon_interval *
+ qi.tqi_readyTime = (cur_conf->beacon_interval *
sc->config.cabqReadytime) / 100;
ath_txq_update(sc, qnum, &qi);
@@ -1205,8 +1210,17 @@ bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
ath_err(common, "Failed to stop TX DMA!\n");
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
- if (ATH_TXQ_SETUP(sc, i))
- ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
+ if (!ATH_TXQ_SETUP(sc, i))
+ continue;
+
+ /*
+ * The caller will resume queues with ieee80211_wake_queues.
+ * Mark the queue as not stopped to prevent ath_tx_complete
+ * from waking the queue too early.
+ */
+ txq = &sc->tx.txq[i];
+ txq->stopped = false;
+ ath_draintxq(sc, txq, retry_tx);
}
return !npend;
@@ -1218,46 +1232,59 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
}
+/* For each axq_acq entry, for each tid, try to schedule packets
+ * for transmit until ampdu_depth has reached min Q depth.
+ */
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
{
- struct ath_atx_ac *ac;
- struct ath_atx_tid *tid;
+ struct ath_atx_ac *ac, *ac_tmp, *last_ac;
+ struct ath_atx_tid *tid, *last_tid;
- if (list_empty(&txq->axq_acq))
+ if (list_empty(&txq->axq_acq) ||
+ txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
return;
ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
- list_del(&ac->list);
- ac->sched = false;
+ last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
- do {
- if (list_empty(&ac->tid_q))
- return;
+ list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
+ last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
+ list_del(&ac->list);
+ ac->sched = false;
- tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
- list_del(&tid->list);
- tid->sched = false;
+ while (!list_empty(&ac->tid_q)) {
+ tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
+ list);
+ list_del(&tid->list);
+ tid->sched = false;
- if (tid->paused)
- continue;
+ if (tid->paused)
+ continue;
- ath_tx_sched_aggr(sc, txq, tid);
+ ath_tx_sched_aggr(sc, txq, tid);
- /*
- * add tid to round-robin queue if more frames
- * are pending for the tid
- */
- if (!list_empty(&tid->buf_q))
- ath_tx_queue_tid(txq, tid);
+ /*
+ * add tid to round-robin queue if more frames
+ * are pending for the tid
+ */
+ if (!list_empty(&tid->buf_q))
+ ath_tx_queue_tid(txq, tid);
- break;
- } while (!list_empty(&ac->tid_q));
+ if (tid == last_tid ||
+ txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
+ break;
+ }
- if (!list_empty(&ac->tid_q)) {
- if (!ac->sched) {
- ac->sched = true;
- list_add_tail(&ac->list, &txq->axq_acq);
+ if (!list_empty(&ac->tid_q)) {
+ if (!ac->sched) {
+ ac->sched = true;
+ list_add_tail(&ac->list, &txq->axq_acq);
+ }
}
+
+ if (ac == last_ac ||
+ txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
+ return;
}
}
@@ -1301,6 +1328,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
+ TX_STAT_INC(txq->axq_qnum, puttxbuf);
ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
@@ -1308,6 +1336,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
list_splice_tail_init(head, &txq->axq_q);
if (txq->axq_link == NULL) {
+ TX_STAT_INC(txq->axq_qnum, puttxbuf);
ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
txq->axq_qnum, ito64(bf->bf_daddr),
@@ -1321,6 +1350,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
}
ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
&txq->axq_link);
+ TX_STAT_INC(txq->axq_qnum, txstart);
ath9k_hw_txstart(ah, txq->axq_qnum);
}
txq->axq_depth++;
@@ -1335,7 +1365,6 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
struct list_head bf_head;
bf->bf_state.bf_type |= BUF_AMPDU;
- TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
/*
* Do not queue to h/w when any of the following conditions is true:
@@ -1351,6 +1380,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
* Add this frame to software queue for scheduling later
* for aggregation.
*/
+ TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
list_add_tail(&bf->list, &tid->buf_q);
ath_tx_queue_tid(txctl->txq, tid);
return;
@@ -1364,6 +1394,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
ath_tx_addto_baw(sc, tid, fi->seqno);
/* Queue to h/w without aggregation */
+ TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
bf->bf_lastbf = bf;
ath_buf_set_rate(sc, bf, fi->framelen);
ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
@@ -1416,8 +1447,7 @@ static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
int framelen)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_sta *sta = tx_info->control.sta;
struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
@@ -1635,8 +1665,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
struct ath_txq *txq,
struct sk_buff *skb)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_frame_info *fi = get_frame_info(skb);
@@ -1652,7 +1681,6 @@ static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
ATH_TXBUF_RESET(bf);
- bf->aphy = aphy;
bf->bf_flags = setup_tx_flags(skb);
bf->bf_mpdu = skb;
@@ -1741,8 +1769,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_sta *sta = info->control.sta;
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_txq *txq = txctl->txq;
struct ath_buf *bf;
int padpos, padsize;
@@ -1794,7 +1821,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
spin_lock_bh(&txq->axq_lock);
if (txq == sc->tx.txq_map[q] &&
++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
- ath_mac80211_stop_queue(sc, q);
+ ieee80211_stop_queue(sc->hw, q);
txq->stopped = 1;
}
spin_unlock_bh(&txq->axq_lock);
@@ -1809,8 +1836,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
/*****************/
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
- struct ath_wiphy *aphy, int tx_flags, int ftype,
- struct ath_txq *txq)
+ int tx_flags, int ftype, struct ath_txq *txq)
{
struct ieee80211_hw *hw = sc->hw;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -1820,9 +1846,6 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
- if (aphy)
- hw = aphy->hw;
-
if (tx_flags & ATH_TX_BAR)
tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
@@ -1852,19 +1875,20 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
PS_WAIT_FOR_TX_ACK));
}
- if (unlikely(ftype))
- ath9k_tx_status(hw, skb, ftype);
- else {
- q = skb_get_queue_mapping(skb);
- if (txq == sc->tx.txq_map[q]) {
- spin_lock_bh(&txq->axq_lock);
- if (WARN_ON(--txq->pending_frames < 0))
- txq->pending_frames = 0;
- spin_unlock_bh(&txq->axq_lock);
- }
+ q = skb_get_queue_mapping(skb);
+ if (txq == sc->tx.txq_map[q]) {
+ spin_lock_bh(&txq->axq_lock);
+ if (WARN_ON(--txq->pending_frames < 0))
+ txq->pending_frames = 0;
- ieee80211_tx_status(hw, skb);
+ if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
+ ieee80211_wake_queue(sc->hw, q);
+ txq->stopped = 0;
+ }
+ spin_unlock_bh(&txq->axq_lock);
}
+
+ ieee80211_tx_status(hw, skb);
}
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
@@ -1896,8 +1920,8 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
else
complete(&sc->paprd_complete);
} else {
- ath_debug_stat_tx(sc, bf, ts);
- ath_tx_complete(sc, skb, bf->aphy, tx_flags,
+ ath_debug_stat_tx(sc, bf, ts, txq);
+ ath_tx_complete(sc, skb, tx_flags,
bf->bf_state.bfs_ftype, txq);
}
/* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
@@ -1913,14 +1937,14 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
}
-static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
- int nframes, int nbad, int txok, bool update_rc)
+static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
+ struct ath_tx_status *ts, int nframes, int nbad,
+ int txok, bool update_rc)
{
struct sk_buff *skb = bf->bf_mpdu;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- struct ieee80211_hw *hw = bf->aphy->hw;
- struct ath_softc *sc = bf->aphy->sc;
+ struct ieee80211_hw *hw = sc->hw;
struct ath_hw *ah = sc->sc_ah;
u8 i, tx_rateindex;
@@ -1971,19 +1995,6 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
}
-static void ath_wake_mac80211_queue(struct ath_softc *sc, int qnum)
-{
- struct ath_txq *txq;
-
- txq = sc->tx.txq_map[qnum];
- spin_lock_bh(&txq->axq_lock);
- if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
- if (ath_mac80211_start_queue(sc, qnum))
- txq->stopped = 0;
- }
- spin_unlock_bh(&txq->axq_lock);
-}
-
static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
{
struct ath_hw *ah = sc->sc_ah;
@@ -1994,7 +2005,6 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
struct ath_tx_status ts;
int txok;
int status;
- int qnum;
ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
@@ -2004,6 +2014,9 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
spin_lock_bh(&txq->axq_lock);
if (list_empty(&txq->axq_q)) {
txq->axq_link = NULL;
+ if (sc->sc_flags & SC_OP_TXAGGR &&
+ !txq->txq_flush_inprogress)
+ ath_txq_schedule(sc, txq);
spin_unlock_bh(&txq->axq_lock);
break;
}
@@ -2038,6 +2051,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
spin_unlock_bh(&txq->axq_lock);
break;
}
+ TX_STAT_INC(txq->axq_qnum, txprocdesc);
/*
* Remove ath_buf's of the same transmit unit from txq,
@@ -2058,6 +2072,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
if (bf_is_ampdu_not_probing(bf))
txq->axq_ampdu_depth--;
+
spin_unlock_bh(&txq->axq_lock);
if (bf_held)
@@ -2070,27 +2085,45 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
*/
if (ts.ts_status & ATH9K_TXERR_XRETRY)
bf->bf_state.bf_type |= BUF_XRETRY;
- ath_tx_rc_status(bf, &ts, 1, txok ? 0 : 1, txok, true);
+ ath_tx_rc_status(sc, bf, &ts, 1, txok ? 0 : 1, txok, true);
}
- qnum = skb_get_queue_mapping(bf->bf_mpdu);
-
if (bf_isampdu(bf))
ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
true);
else
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
- if (txq == sc->tx.txq_map[qnum])
- ath_wake_mac80211_queue(sc, qnum);
-
spin_lock_bh(&txq->axq_lock);
- if (sc->sc_flags & SC_OP_TXAGGR)
+
+ if (sc->sc_flags & SC_OP_TXAGGR && !txq->txq_flush_inprogress)
ath_txq_schedule(sc, txq);
spin_unlock_bh(&txq->axq_lock);
}
}
+static void ath_hw_pll_work(struct work_struct *work)
+{
+ struct ath_softc *sc = container_of(work, struct ath_softc,
+ hw_pll_work.work);
+ static int count;
+
+ if (AR_SREV_9485(sc->sc_ah)) {
+ if (ar9003_get_pll_sqsum_dvc(sc->sc_ah) >= 0x40000) {
+ count++;
+
+ if (count == 3) {
+ /* Rx is hung for more than 500ms. Reset it */
+ ath_reset(sc, true);
+ count = 0;
+ }
+ } else
+ count = 0;
+
+ ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/5);
+ }
+}
+
static void ath_tx_complete_poll_work(struct work_struct *work)
{
struct ath_softc *sc = container_of(work, struct ath_softc,
@@ -2098,6 +2131,9 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
struct ath_txq *txq;
int i;
bool needreset = false;
+#ifdef CONFIG_ATH9K_DEBUGFS
+ sc->tx_complete_poll_work_seen++;
+#endif
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
if (ATH_TXQ_SETUP(sc, i)) {
@@ -2111,6 +2147,33 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
} else {
txq->axq_tx_inprogress = true;
}
+ } else {
+ /* If the queue has pending buffers, then it
+ * should be doing tx work (and have axq_depth).
+ * Shouldn't get to this state I think..but
+ * we do.
+ */
+ if (!(sc->sc_flags & (SC_OP_OFFCHANNEL)) &&
+ (txq->pending_frames > 0 ||
+ !list_empty(&txq->axq_acq) ||
+ txq->stopped)) {
+ ath_err(ath9k_hw_common(sc->sc_ah),
+ "txq: %p axq_qnum: %u,"
+ " mac80211_qnum: %i"
+ " axq_link: %p"
+ " pending frames: %i"
+ " axq_acq empty: %i"
+ " stopped: %i"
+ " axq_depth: 0 Attempting to"
+ " restart tx logic.\n",
+ txq, txq->axq_qnum,
+ txq->mac80211_qnum,
+ txq->axq_link,
+ txq->pending_frames,
+ list_empty(&txq->axq_acq),
+ txq->stopped);
+ ath_txq_schedule(sc, txq);
+ }
}
spin_unlock_bh(&txq->axq_lock);
}
@@ -2150,7 +2213,6 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
struct list_head bf_head;
int status;
int txok;
- int qnum;
for (;;) {
status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
@@ -2193,11 +2255,9 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
if (!bf_isampdu(bf)) {
if (txs.ts_status & ATH9K_TXERR_XRETRY)
bf->bf_state.bf_type |= BUF_XRETRY;
- ath_tx_rc_status(bf, &txs, 1, txok ? 0 : 1, txok, true);
+ ath_tx_rc_status(sc, bf, &txs, 1, txok ? 0 : 1, txok, true);
}
- qnum = skb_get_queue_mapping(bf->bf_mpdu);
-
if (bf_isampdu(bf))
ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
txok, true);
@@ -2205,19 +2265,20 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
ath_tx_complete_buf(sc, bf, txq, &bf_head,
&txs, txok, 0);
- if (txq == sc->tx.txq_map[qnum])
- ath_wake_mac80211_queue(sc, qnum);
-
spin_lock_bh(&txq->axq_lock);
- if (!list_empty(&txq->txq_fifo_pending)) {
- INIT_LIST_HEAD(&bf_head);
- bf = list_first_entry(&txq->txq_fifo_pending,
- struct ath_buf, list);
- list_cut_position(&bf_head, &txq->txq_fifo_pending,
- &bf->bf_lastbf->list);
- ath_tx_txqaddbuf(sc, txq, &bf_head);
- } else if (sc->sc_flags & SC_OP_TXAGGR)
- ath_txq_schedule(sc, txq);
+
+ if (!txq->txq_flush_inprogress) {
+ if (!list_empty(&txq->txq_fifo_pending)) {
+ INIT_LIST_HEAD(&bf_head);
+ bf = list_first_entry(&txq->txq_fifo_pending,
+ struct ath_buf, list);
+ list_cut_position(&bf_head,
+ &txq->txq_fifo_pending,
+ &bf->bf_lastbf->list);
+ ath_tx_txqaddbuf(sc, txq, &bf_head);
+ } else if (sc->sc_flags & SC_OP_TXAGGR)
+ ath_txq_schedule(sc, txq);
+ }
spin_unlock_bh(&txq->axq_lock);
}
}
@@ -2285,6 +2346,7 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
}
INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
+ INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
error = ath_tx_edma_init(sc);
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index d07ff7f2fd92..c6a5fae634a0 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -283,6 +283,7 @@ struct ar9170 {
unsigned int mem_blocks;
unsigned int mem_block_size;
unsigned int rx_size;
+ unsigned int tx_seq_table;
} fw;
/* reset / stuck frames/queue detection */
@@ -533,7 +534,7 @@ void carl9170_rx(struct ar9170 *ar, void *buf, unsigned int len);
void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len);
/* TX */
-int carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
void carl9170_tx_janitor(struct work_struct *work);
void carl9170_tx_process_status(struct ar9170 *ar,
const struct carl9170_rsp *cmd);
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index 546b4e4ec5ea..9517ede9e2df 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -150,6 +150,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
const struct carl9170fw_otus_desc *otus_desc;
const struct carl9170fw_chk_desc *chk_desc;
const struct carl9170fw_last_desc *last_desc;
+ const struct carl9170fw_txsq_desc *txsq_desc;
last_desc = carl9170_fw_find_desc(ar, LAST_MAGIC,
sizeof(*last_desc), CARL9170FW_LAST_DESC_CUR_VER);
@@ -264,6 +265,9 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
FIF_PROMISC_IN_BSS;
}
+ if (SUPP(CARL9170FW_WOL))
+ device_set_wakeup_enable(&ar->udev->dev, true);
+
ar->fw.vif_num = otus_desc->vif_num;
ar->fw.cmd_bufs = otus_desc->cmd_bufs;
ar->fw.address = le32_to_cpu(otus_desc->fw_address);
@@ -296,6 +300,17 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
}
}
+ txsq_desc = carl9170_fw_find_desc(ar, TXSQ_MAGIC,
+ sizeof(*txsq_desc), CARL9170FW_TXSQ_DESC_CUR_VER);
+
+ if (txsq_desc) {
+ ar->fw.tx_seq_table = le32_to_cpu(txsq_desc->seq_table_addr);
+ if (!valid_cpu_addr(ar->fw.tx_seq_table))
+ return -EINVAL;
+ } else {
+ ar->fw.tx_seq_table = 0;
+ }
+
#undef SUPPORTED
return 0;
}
diff --git a/drivers/net/wireless/ath/carl9170/fwcmd.h b/drivers/net/wireless/ath/carl9170/fwcmd.h
index 3680dfc70f46..30449d21b762 100644
--- a/drivers/net/wireless/ath/carl9170/fwcmd.h
+++ b/drivers/net/wireless/ath/carl9170/fwcmd.h
@@ -167,6 +167,7 @@ struct carl9170_rx_filter_cmd {
#define CARL9170_RX_FILTER_CTL_BACKR 0x20
#define CARL9170_RX_FILTER_MGMT 0x40
#define CARL9170_RX_FILTER_DATA 0x80
+#define CARL9170_RX_FILTER_EVERYTHING (~0)
struct carl9170_bcn_ctrl_cmd {
__le32 vif_id;
diff --git a/drivers/net/wireless/ath/carl9170/fwdesc.h b/drivers/net/wireless/ath/carl9170/fwdesc.h
index 71f3821f6058..921066822dd5 100644
--- a/drivers/net/wireless/ath/carl9170/fwdesc.h
+++ b/drivers/net/wireless/ath/carl9170/fwdesc.h
@@ -69,6 +69,9 @@ enum carl9170fw_feature_list {
/* Firmware RX filter | CARL9170_CMD_RX_FILTER */
CARL9170FW_RX_FILTER,
+ /* Wake up on WLAN */
+ CARL9170FW_WOL,
+
/* KEEP LAST */
__CARL9170FW_FEATURE_NUM
};
@@ -78,6 +81,7 @@ enum carl9170fw_feature_list {
#define FIX_MAGIC "FIX\0"
#define DBG_MAGIC "DBG\0"
#define CHK_MAGIC "CHK\0"
+#define TXSQ_MAGIC "TXSQ"
#define LAST_MAGIC "LAST"
#define CARL9170FW_SET_DAY(d) (((d) - 1) % 31)
@@ -88,8 +92,10 @@ enum carl9170fw_feature_list {
#define CARL9170FW_GET_MONTH(m) ((((m) / 31) % 12) + 1)
#define CARL9170FW_GET_YEAR(y) ((y) / 372 + 10)
+#define CARL9170FW_MAGIC_SIZE 4
+
struct carl9170fw_desc_head {
- u8 magic[4];
+ u8 magic[CARL9170FW_MAGIC_SIZE];
__le16 length;
u8 min_ver;
u8 cur_ver;
@@ -170,6 +176,16 @@ struct carl9170fw_chk_desc {
#define CARL9170FW_CHK_DESC_SIZE \
(sizeof(struct carl9170fw_chk_desc))
+#define CARL9170FW_TXSQ_DESC_MIN_VER 1
+#define CARL9170FW_TXSQ_DESC_CUR_VER 1
+struct carl9170fw_txsq_desc {
+ struct carl9170fw_desc_head head;
+
+ __le32 seq_table_addr;
+} __packed;
+#define CARL9170FW_TXSQ_DESC_SIZE \
+ (sizeof(struct carl9170fw_txsq_desc))
+
#define CARL9170FW_LAST_DESC_MIN_VER 1
#define CARL9170FW_LAST_DESC_CUR_VER 2
struct carl9170fw_last_desc {
@@ -189,8 +205,8 @@ struct carl9170fw_last_desc {
}
static inline void carl9170fw_fill_desc(struct carl9170fw_desc_head *head,
- u8 magic[4], __le16 length,
- u8 min_ver, u8 cur_ver)
+ u8 magic[CARL9170FW_MAGIC_SIZE],
+ __le16 length, u8 min_ver, u8 cur_ver)
{
head->magic[0] = magic[0];
head->magic[1] = magic[1];
@@ -204,7 +220,7 @@ static inline void carl9170fw_fill_desc(struct carl9170fw_desc_head *head,
#define carl9170fw_for_each_hdr(desc, fw_desc) \
for (desc = fw_desc; \
- memcmp(desc->magic, LAST_MAGIC, 4) && \
+ memcmp(desc->magic, LAST_MAGIC, CARL9170FW_MAGIC_SIZE) && \
le16_to_cpu(desc->length) >= CARL9170FW_DESC_HEAD_SIZE && \
le16_to_cpu(desc->length) < CARL9170FW_DESC_MAX_LENGTH; \
desc = (void *)((unsigned long)desc + le16_to_cpu(desc->length)))
@@ -218,8 +234,8 @@ static inline bool carl9170fw_supports(__le32 list, u8 feature)
}
static inline bool carl9170fw_desc_cmp(const struct carl9170fw_desc_head *head,
- const u8 descid[4], u16 min_len,
- u8 compatible_revision)
+ const u8 descid[CARL9170FW_MAGIC_SIZE],
+ u16 min_len, u8 compatible_revision)
{
if (descid[0] == head->magic[0] && descid[1] == head->magic[1] &&
descid[2] == head->magic[2] && descid[3] == head->magic[3] &&
diff --git a/drivers/net/wireless/ath/carl9170/hw.h b/drivers/net/wireless/ath/carl9170/hw.h
index e85df6edfed3..4e30762dd903 100644
--- a/drivers/net/wireless/ath/carl9170/hw.h
+++ b/drivers/net/wireless/ath/carl9170/hw.h
@@ -463,6 +463,8 @@
#define AR9170_PWR_REG_CHIP_REVISION (AR9170_PWR_REG_BASE + 0x010)
#define AR9170_PWR_REG_PLL_ADDAC (AR9170_PWR_REG_BASE + 0x014)
+#define AR9170_PWR_PLL_ADDAC_DIV_S 2
+#define AR9170_PWR_PLL_ADDAC_DIV 0xffc
#define AR9170_PWR_REG_WATCH_DOG_MAGIC (AR9170_PWR_REG_BASE + 0x020)
/* Faraday USB Controller */
@@ -471,6 +473,9 @@
#define AR9170_USB_REG_MAIN_CTRL (AR9170_USB_REG_BASE + 0x000)
#define AR9170_USB_MAIN_CTRL_REMOTE_WAKEUP BIT(0)
#define AR9170_USB_MAIN_CTRL_ENABLE_GLOBAL_INT BIT(2)
+#define AR9170_USB_MAIN_CTRL_GO_TO_SUSPEND BIT(3)
+#define AR9170_USB_MAIN_CTRL_RESET BIT(4)
+#define AR9170_USB_MAIN_CTRL_CHIP_ENABLE BIT(5)
#define AR9170_USB_MAIN_CTRL_HIGHSPEED BIT(6)
#define AR9170_USB_REG_DEVICE_ADDRESS (AR9170_USB_REG_BASE + 0x001)
@@ -499,6 +504,13 @@
#define AR9170_USB_REG_INTR_GROUP (AR9170_USB_REG_BASE + 0x020)
#define AR9170_USB_REG_INTR_SOURCE_0 (AR9170_USB_REG_BASE + 0x021)
+#define AR9170_USB_INTR_SRC0_SETUP BIT(0)
+#define AR9170_USB_INTR_SRC0_IN BIT(1)
+#define AR9170_USB_INTR_SRC0_OUT BIT(2)
+#define AR9170_USB_INTR_SRC0_FAIL BIT(3) /* ??? */
+#define AR9170_USB_INTR_SRC0_END BIT(4) /* ??? */
+#define AR9170_USB_INTR_SRC0_ABORT BIT(7)
+
#define AR9170_USB_REG_INTR_SOURCE_1 (AR9170_USB_REG_BASE + 0x022)
#define AR9170_USB_REG_INTR_SOURCE_2 (AR9170_USB_REG_BASE + 0x023)
#define AR9170_USB_REG_INTR_SOURCE_3 (AR9170_USB_REG_BASE + 0x024)
@@ -506,6 +518,15 @@
#define AR9170_USB_REG_INTR_SOURCE_5 (AR9170_USB_REG_BASE + 0x026)
#define AR9170_USB_REG_INTR_SOURCE_6 (AR9170_USB_REG_BASE + 0x027)
#define AR9170_USB_REG_INTR_SOURCE_7 (AR9170_USB_REG_BASE + 0x028)
+#define AR9170_USB_INTR_SRC7_USB_RESET BIT(1)
+#define AR9170_USB_INTR_SRC7_USB_SUSPEND BIT(2)
+#define AR9170_USB_INTR_SRC7_USB_RESUME BIT(3)
+#define AR9170_USB_INTR_SRC7_ISO_SEQ_ERR BIT(4)
+#define AR9170_USB_INTR_SRC7_ISO_SEQ_ABORT BIT(5)
+#define AR9170_USB_INTR_SRC7_TX0BYTE BIT(6)
+#define AR9170_USB_INTR_SRC7_RX0BYTE BIT(7)
+
+#define AR9170_USB_REG_IDLE_COUNT (AR9170_USB_REG_BASE + 0x02f)
#define AR9170_USB_REG_EP_MAP (AR9170_USB_REG_BASE + 0x030)
#define AR9170_USB_REG_EP1_MAP (AR9170_USB_REG_BASE + 0x030)
@@ -581,6 +602,10 @@
#define AR9170_USB_REG_MAX_AGG_UPLOAD (AR9170_USB_REG_BASE + 0x110)
#define AR9170_USB_REG_UPLOAD_TIME_CTL (AR9170_USB_REG_BASE + 0x114)
+
+#define AR9170_USB_REG_WAKE_UP (AR9170_USB_REG_BASE + 0x120)
+#define AR9170_USB_WAKE_UP_WAKE BIT(0)
+
#define AR9170_USB_REG_CBUS_CTRL (AR9170_USB_REG_BASE + 0x1f0)
#define AR9170_USB_CBUS_CTRL_BUFFER_END (BIT(1))
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 870df8c42622..ede3d7e5a048 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -662,6 +662,13 @@ init:
goto unlock;
}
+ if (ar->fw.tx_seq_table) {
+ err = carl9170_write_reg(ar, ar->fw.tx_seq_table + vif_id * 4,
+ 0);
+ if (err)
+ goto unlock;
+ }
+
unlock:
if (err && (vif_id >= 0)) {
vif_priv->active = false;
@@ -1279,7 +1286,7 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta,
- u16 tid, u16 *ssn)
+ u16 tid, u16 *ssn, u8 buf_size)
{
struct ar9170 *ar = hw->priv;
struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 6cc58e052d10..0ef70b6fc512 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -862,6 +862,9 @@ static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
if (unlikely(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM))
txc->s.misc |= CARL9170_TX_SUPER_MISC_CAB;
+ if (unlikely(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
+ txc->s.misc |= CARL9170_TX_SUPER_MISC_ASSIGN_SEQ;
+
if (unlikely(ieee80211_is_probe_resp(hdr->frame_control)))
txc->s.misc |= CARL9170_TX_SUPER_MISC_FILL_IN_TSF;
@@ -1336,7 +1339,7 @@ err_unlock_rcu:
return false;
}
-int carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct ar9170 *ar = hw->priv;
struct ieee80211_tx_info *info;
@@ -1370,12 +1373,11 @@ int carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
}
carl9170_tx(ar);
- return NETDEV_TX_OK;
+ return;
err_free:
ar->tx_dropped++;
dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
}
void carl9170_tx_scheduler(struct ar9170 *ar)
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index 537732e5964f..f82c400be288 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -118,6 +118,8 @@ static struct usb_device_id carl9170_usb_ids[] = {
{ USB_DEVICE(0x057c, 0x8402) },
/* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */
{ USB_DEVICE(0x1668, 0x1200) },
+ /* Airlive X.USB a/b/g/n */
+ { USB_DEVICE(0x1b75, 0x9170) },
/* terminate */
{}
diff --git a/drivers/net/wireless/ath/carl9170/version.h b/drivers/net/wireless/ath/carl9170/version.h
index ee0f84f2a2f6..15095c035169 100644
--- a/drivers/net/wireless/ath/carl9170/version.h
+++ b/drivers/net/wireless/ath/carl9170/version.h
@@ -1,7 +1,7 @@
#ifndef __CARL9170_SHARED_VERSION_H
#define __CARL9170_SHARED_VERSION_H
-#define CARL9170FW_VERSION_YEAR 10
-#define CARL9170FW_VERSION_MONTH 10
-#define CARL9170FW_VERSION_DAY 29
-#define CARL9170FW_VERSION_GIT "1.9.0"
+#define CARL9170FW_VERSION_YEAR 11
+#define CARL9170FW_VERSION_MONTH 1
+#define CARL9170FW_VERSION_DAY 22
+#define CARL9170FW_VERSION_GIT "1.9.2"
#endif /* __CARL9170_SHARED_VERSION_H */
diff --git a/drivers/net/wireless/ath/carl9170/wlan.h b/drivers/net/wireless/ath/carl9170/wlan.h
index 24d63b583b6b..9e1324b67e08 100644
--- a/drivers/net/wireless/ath/carl9170/wlan.h
+++ b/drivers/net/wireless/ath/carl9170/wlan.h
@@ -251,7 +251,7 @@ struct carl9170_tx_superdesc {
u8 ampdu_commit_factor:1;
u8 ampdu_unused_bit:1;
u8 queue:2;
- u8 reserved:1;
+ u8 assign_seq:1;
u8 vif_id:3;
u8 fill_in_tsf:1;
u8 cab:1;
@@ -299,6 +299,7 @@ struct _ar9170_tx_hwdesc {
#define CARL9170_TX_SUPER_MISC_QUEUE 0x3
#define CARL9170_TX_SUPER_MISC_QUEUE_S 0
+#define CARL9170_TX_SUPER_MISC_ASSIGN_SEQ 0x4
#define CARL9170_TX_SUPER_MISC_VIF_ID 0x38
#define CARL9170_TX_SUPER_MISC_VIF_ID_S 3
#define CARL9170_TX_SUPER_MISC_FILL_IN_TSF 0x40
@@ -413,6 +414,23 @@ enum ar9170_txq {
__AR9170_NUM_TXQ,
};
+/*
+ * This is an workaround for several undocumented bugs.
+ * Don't mess with the QoS/AC <-> HW Queue map, if you don't
+ * know what you are doing.
+ *
+ * Known problems [hardware]:
+ * * The MAC does not aggregate frames on anything other
+ * than the first HW queue.
+ * * when an AMPDU is placed [in the first hw queue] and
+ * additional frames are already queued on a different
+ * hw queue, the MAC will ALWAYS freeze.
+ *
+ * In a nutshell: The hardware can either do QoS or
+ * Aggregation but not both at the same time. As a
+ * result, this makes the device pretty much useless
+ * for any serious 802.11n setup.
+ */
static const u8 ar9170_qmap[__AR9170_NUM_TXQ] = { 2, 1, 0, 3 };
#define AR9170_TXQ_DEPTH 32
diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c
index 5d465e5fcf24..37b8e115375a 100644
--- a/drivers/net/wireless/ath/key.c
+++ b/drivers/net/wireless/ath/key.c
@@ -58,8 +58,11 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry)
REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), 0);
REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
- if (common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)
+ if (common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED) {
REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0);
+ REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
+ AR_KEYTABLE_TYPE_CLR);
+ }
}
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 2b14775e6bc6..f828f294ba89 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -158,6 +158,13 @@ ieee80211_regdomain *ath_world_regdomain(struct ath_regulatory *reg)
}
}
+bool ath_is_49ghz_allowed(u16 regdomain)
+{
+ /* possibly more */
+ return regdomain == MKK9_MKKC;
+}
+EXPORT_SYMBOL(ath_is_49ghz_allowed);
+
/* Frequency is one where radar detection is required */
static bool ath_is_radar_freq(u16 center_freq)
{
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h
index 345dd9721b41..172f63f671cf 100644
--- a/drivers/net/wireless/ath/regd.h
+++ b/drivers/net/wireless/ath/regd.h
@@ -250,6 +250,7 @@ enum CountryCode {
};
bool ath_is_world_regd(struct ath_regulatory *reg);
+bool ath_is_49ghz_allowed(u16 redomain);
int ath_regd_init(struct ath_regulatory *reg, struct wiphy *wiphy,
int (*reg_notifier)(struct wiphy *wiphy,
struct regulatory_request *request));
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 22bc9f17f634..57eb5b649730 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -3203,7 +3203,7 @@ static void b43_tx_work(struct work_struct *work)
mutex_unlock(&wl->mutex);
}
-static int b43_op_tx(struct ieee80211_hw *hw,
+static void b43_op_tx(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
@@ -3211,14 +3211,12 @@ static int b43_op_tx(struct ieee80211_hw *hw,
if (unlikely(skb->len < 2 + 2 + 6)) {
/* Too short, this can't be a valid frame. */
dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
+ return;
}
B43_WARN_ON(skb_shinfo(skb)->nr_frags);
skb_queue_tail(&wl->tx_queue, skb);
ieee80211_queue_work(wl->hw, &wl->tx_work);
-
- return NETDEV_TX_OK;
}
static void b43_qos_params_upload(struct b43_wldev *dev,
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index ab81ed8b19d7..9f5a3c993239 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -430,9 +430,9 @@ static void b43_radio_init2055_post(struct b43_wldev *dev)
bool workaround = false;
if (sprom->revision < 4)
- workaround = (binfo->vendor != PCI_VENDOR_ID_BROADCOM ||
- binfo->type != 0x46D ||
- binfo->rev < 0x41);
+ workaround = (binfo->vendor != PCI_VENDOR_ID_BROADCOM &&
+ binfo->type == 0x46D &&
+ binfo->rev >= 0x41);
else
workaround =
!(sprom->boardflags2_lo & B43_BFL2_RXBB_INT_REG_DIS);
@@ -1281,17 +1281,17 @@ static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
B43_NPHY_TABLE_DATALO, tmp);
}
}
+ }
- b43_nphy_set_rf_sequence(dev, 5,
- rfseq_events, rfseq_delays, 3);
- b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1,
- ~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF,
- 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT);
+ b43_nphy_set_rf_sequence(dev, 5,
+ rfseq_events, rfseq_delays, 3);
+ b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1,
+ ~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF,
+ 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
- b43_phy_maskset(dev, B43_PHY_N(0xC5D),
- 0xFF80, 4);
- }
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ b43_phy_maskset(dev, B43_PHY_N(0xC5D),
+ 0xFF80, 4);
}
}
@@ -2128,7 +2128,7 @@ static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf,
save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S0);
save_regs_phy[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B32S1);
- } else if (dev->phy.rev == 2) {
+ } else {
save_regs_phy[0] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
save_regs_phy[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
save_regs_phy[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
@@ -2179,7 +2179,7 @@ static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf,
b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[5]);
b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, save_regs_phy[6]);
b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, save_regs_phy[7]);
- } else if (dev->phy.rev == 2) {
+ } else {
b43_phy_write(dev, B43_NPHY_AFECTL_C1, save_regs_phy[0]);
b43_phy_write(dev, B43_NPHY_AFECTL_C2, save_regs_phy[1]);
b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[2]);
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index dc8ef09a8552..c42b2acea24e 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -1097,6 +1097,1080 @@ static const u32 b43_ntab_tmap[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
+/* static tables, PHY revision >= 3 */
+static const u32 b43_ntab_framestruct_r3[] = {
+ 0x08004a04, 0x00100000, 0x01000a05, 0x00100020,
+ 0x09804506, 0x00100030, 0x09804507, 0x00100030,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x08004a0c, 0x00100004, 0x01000a0d, 0x00100024,
+ 0x0980450e, 0x00100034, 0x0980450f, 0x00100034,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000a04, 0x00100000, 0x11008a05, 0x00100020,
+ 0x1980c506, 0x00100030, 0x21810506, 0x00100030,
+ 0x21810506, 0x00100030, 0x01800504, 0x00100030,
+ 0x11808505, 0x00100030, 0x29814507, 0x01100030,
+ 0x00000a04, 0x00100000, 0x11008a05, 0x00100020,
+ 0x21810506, 0x00100030, 0x21810506, 0x00100030,
+ 0x29814507, 0x01100030, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000a0c, 0x00100008, 0x11008a0d, 0x00100028,
+ 0x1980c50e, 0x00100038, 0x2181050e, 0x00100038,
+ 0x2181050e, 0x00100038, 0x0180050c, 0x00100038,
+ 0x1180850d, 0x00100038, 0x2981450f, 0x01100038,
+ 0x00000a0c, 0x00100008, 0x11008a0d, 0x00100028,
+ 0x2181050e, 0x00100038, 0x2181050e, 0x00100038,
+ 0x2981450f, 0x01100038, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x08004a04, 0x00100000, 0x01000a05, 0x00100020,
+ 0x1980c506, 0x00100030, 0x1980c506, 0x00100030,
+ 0x11808504, 0x00100030, 0x3981ca05, 0x00100030,
+ 0x29814507, 0x01100030, 0x00000000, 0x00000000,
+ 0x10008a04, 0x00100000, 0x3981ca05, 0x00100030,
+ 0x1980c506, 0x00100030, 0x29814507, 0x01100030,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x08004a0c, 0x00100008, 0x01000a0d, 0x00100028,
+ 0x1980c50e, 0x00100038, 0x1980c50e, 0x00100038,
+ 0x1180850c, 0x00100038, 0x3981ca0d, 0x00100038,
+ 0x2981450f, 0x01100038, 0x00000000, 0x00000000,
+ 0x10008a0c, 0x00100008, 0x3981ca0d, 0x00100038,
+ 0x1980c50e, 0x00100038, 0x2981450f, 0x01100038,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x40021404, 0x00100000, 0x02001405, 0x00100040,
+ 0x0b004a06, 0x01900060, 0x13008a06, 0x01900060,
+ 0x13008a06, 0x01900060, 0x43020a04, 0x00100060,
+ 0x1b00ca05, 0x00100060, 0x23010a07, 0x01500060,
+ 0x40021404, 0x00100000, 0x1a00d405, 0x00100040,
+ 0x13008a06, 0x01900060, 0x13008a06, 0x01900060,
+ 0x23010a07, 0x01500060, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x4002140c, 0x00100010, 0x0200140d, 0x00100050,
+ 0x0b004a0e, 0x01900070, 0x13008a0e, 0x01900070,
+ 0x13008a0e, 0x01900070, 0x43020a0c, 0x00100070,
+ 0x1b00ca0d, 0x00100070, 0x23010a0f, 0x01500070,
+ 0x4002140c, 0x00100010, 0x1a00d40d, 0x00100050,
+ 0x13008a0e, 0x01900070, 0x13008a0e, 0x01900070,
+ 0x23010a0f, 0x01500070, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x50029404, 0x00100000, 0x32019405, 0x00100040,
+ 0x0b004a06, 0x01900060, 0x0b004a06, 0x01900060,
+ 0x5b02ca04, 0x00100060, 0x3b01d405, 0x00100060,
+ 0x23010a07, 0x01500060, 0x00000000, 0x00000000,
+ 0x5802d404, 0x00100000, 0x3b01d405, 0x00100060,
+ 0x0b004a06, 0x01900060, 0x23010a07, 0x01500060,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x5002940c, 0x00100010, 0x3201940d, 0x00100050,
+ 0x0b004a0e, 0x01900070, 0x0b004a0e, 0x01900070,
+ 0x5b02ca0c, 0x00100070, 0x3b01d40d, 0x00100070,
+ 0x23010a0f, 0x01500070, 0x00000000, 0x00000000,
+ 0x5802d40c, 0x00100010, 0x3b01d40d, 0x00100070,
+ 0x0b004a0e, 0x01900070, 0x23010a0f, 0x01500070,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x40021404, 0x000f4800, 0x62031405, 0x00100040,
+ 0x53028a06, 0x01900060, 0x53028a07, 0x01900060,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x4002140c, 0x000f4808, 0x6203140d, 0x00100048,
+ 0x53028a0e, 0x01900068, 0x53028a0f, 0x01900068,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000a0c, 0x00100004, 0x11008a0d, 0x00100024,
+ 0x1980c50e, 0x00100034, 0x2181050e, 0x00100034,
+ 0x2181050e, 0x00100034, 0x0180050c, 0x00100038,
+ 0x1180850d, 0x00100038, 0x1181850d, 0x00100038,
+ 0x2981450f, 0x01100038, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000a0c, 0x00100008, 0x11008a0d, 0x00100028,
+ 0x2181050e, 0x00100038, 0x2181050e, 0x00100038,
+ 0x1181850d, 0x00100038, 0x2981450f, 0x01100038,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x08004a04, 0x00100000, 0x01000a05, 0x00100020,
+ 0x0180c506, 0x00100030, 0x0180c506, 0x00100030,
+ 0x2180c50c, 0x00100030, 0x49820a0d, 0x0016a130,
+ 0x41824a0d, 0x0016a130, 0x2981450f, 0x01100030,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x2000ca0c, 0x00100000, 0x49820a0d, 0x0016a130,
+ 0x1980c50e, 0x00100030, 0x41824a0d, 0x0016a130,
+ 0x2981450f, 0x01100030, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x4002140c, 0x00100008, 0x0200140d, 0x00100048,
+ 0x0b004a0e, 0x01900068, 0x13008a0e, 0x01900068,
+ 0x13008a0e, 0x01900068, 0x43020a0c, 0x00100070,
+ 0x1b00ca0d, 0x00100070, 0x1b014a0d, 0x00100070,
+ 0x23010a0f, 0x01500070, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x4002140c, 0x00100010, 0x1a00d40d, 0x00100050,
+ 0x13008a0e, 0x01900070, 0x13008a0e, 0x01900070,
+ 0x1b014a0d, 0x00100070, 0x23010a0f, 0x01500070,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x50029404, 0x00100000, 0x32019405, 0x00100040,
+ 0x03004a06, 0x01900060, 0x03004a06, 0x01900060,
+ 0x6b030a0c, 0x00100060, 0x4b02140d, 0x0016a160,
+ 0x4302540d, 0x0016a160, 0x23010a0f, 0x01500060,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x6b03140c, 0x00100060, 0x4b02140d, 0x0016a160,
+ 0x0b004a0e, 0x01900060, 0x4302540d, 0x0016a160,
+ 0x23010a0f, 0x01500060, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x40021404, 0x00100000, 0x1a00d405, 0x00100040,
+ 0x53028a06, 0x01900060, 0x5b02ca06, 0x01900060,
+ 0x5b02ca06, 0x01900060, 0x43020a04, 0x00100060,
+ 0x1b00ca05, 0x00100060, 0x53028a07, 0x0190c060,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x4002140c, 0x00100010, 0x1a00d40d, 0x00100050,
+ 0x53028a0e, 0x01900070, 0x5b02ca0e, 0x01900070,
+ 0x5b02ca0e, 0x01900070, 0x43020a0c, 0x00100070,
+ 0x1b00ca0d, 0x00100070, 0x53028a0f, 0x0190c070,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x40021404, 0x00100000, 0x1a00d405, 0x00100040,
+ 0x5b02ca06, 0x01900060, 0x5b02ca06, 0x01900060,
+ 0x53028a07, 0x0190c060, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x4002140c, 0x00100010, 0x1a00d40d, 0x00100050,
+ 0x5b02ca0e, 0x01900070, 0x5b02ca0e, 0x01900070,
+ 0x53028a0f, 0x0190c070, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static const u16 b43_ntab_pilot_r3[] = {
+ 0xff08, 0xff08, 0xff08, 0xff08, 0xff08, 0xff08,
+ 0xff08, 0xff08, 0x80d5, 0x80d5, 0x80d5, 0x80d5,
+ 0x80d5, 0x80d5, 0x80d5, 0x80d5, 0xff0a, 0xff82,
+ 0xffa0, 0xff28, 0xffff, 0xffff, 0xffff, 0xffff,
+ 0xff82, 0xffa0, 0xff28, 0xff0a, 0xffff, 0xffff,
+ 0xffff, 0xffff, 0xf83f, 0xfa1f, 0xfa97, 0xfab5,
+ 0xf2bd, 0xf0bf, 0xffff, 0xffff, 0xf017, 0xf815,
+ 0xf215, 0xf095, 0xf035, 0xf01d, 0xffff, 0xffff,
+ 0xff08, 0xff02, 0xff80, 0xff20, 0xff08, 0xff02,
+ 0xff80, 0xff20, 0xf01f, 0xf817, 0xfa15, 0xf295,
+ 0xf0b5, 0xf03d, 0xffff, 0xffff, 0xf82a, 0xfa0a,
+ 0xfa82, 0xfaa0, 0xf2a8, 0xf0aa, 0xffff, 0xffff,
+ 0xf002, 0xf800, 0xf200, 0xf080, 0xf020, 0xf008,
+ 0xffff, 0xffff, 0xf00a, 0xf802, 0xfa00, 0xf280,
+ 0xf0a0, 0xf028, 0xffff, 0xffff,
+};
+
+static const u32 b43_ntab_tmap_r3[] = {
+ 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+ 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+ 0xf1111110, 0x11111111, 0x11f11111, 0x00000111,
+ 0x11000000, 0x1111f111, 0x11111111, 0x111111f1,
+ 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x000aa888,
+ 0x88880000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+ 0xa1111110, 0x11111111, 0x11c11111, 0x00000111,
+ 0x11000000, 0x1111a111, 0x11111111, 0x111111a1,
+ 0xa2222220, 0x22222222, 0x22c22222, 0x00000222,
+ 0x22000000, 0x2222a222, 0x22222222, 0x222222a2,
+ 0xf1111110, 0x11111111, 0x11f11111, 0x00011111,
+ 0x11110000, 0x1111f111, 0x11111111, 0x111111f1,
+ 0xa8aa88a0, 0xa88888a8, 0xa8a8a88a, 0x00088aaa,
+ 0xaaaa0000, 0xa8a8aa88, 0xa88aaaaa, 0xaaaa8a8a,
+ 0xaaa8aaa0, 0x8aaa8aaa, 0xaa8a8a8a, 0x000aaa88,
+ 0x8aaa0000, 0xaaa8a888, 0x8aa88a8a, 0x8a88a888,
+ 0x08080a00, 0x0a08080a, 0x080a0a08, 0x00080808,
+ 0x080a0000, 0x080a0808, 0x080a0808, 0x0a0a0a08,
+ 0xa0a0a0a0, 0x80a0a080, 0x8080a0a0, 0x00008080,
+ 0x80a00000, 0x80a080a0, 0xa080a0a0, 0x8080a0a0,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x99999000, 0x9b9b99bb, 0x9bb99999, 0x9999b9b9,
+ 0x9b99bb90, 0x9bbbbb9b, 0x9b9b9bb9, 0x00000999,
+ 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+ 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00aaa888,
+ 0x22000000, 0x2222b222, 0x22222222, 0x222222b2,
+ 0xb2222220, 0x22222222, 0x22d22222, 0x00000222,
+ 0x11000000, 0x1111a111, 0x11111111, 0x111111a1,
+ 0xa1111110, 0x11111111, 0x11c11111, 0x00000111,
+ 0x33000000, 0x3333b333, 0x33333333, 0x333333b3,
+ 0xb3333330, 0x33333333, 0x33d33333, 0x00000333,
+ 0x22000000, 0x2222a222, 0x22222222, 0x222222a2,
+ 0xa2222220, 0x22222222, 0x22c22222, 0x00000222,
+ 0x99b99b00, 0x9b9b99bb, 0x9bb99999, 0x9999b9b9,
+ 0x9b99bb99, 0x9bbbbb9b, 0x9b9b9bb9, 0x00000999,
+ 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+ 0x8a88aa88, 0x8aaaaa8a, 0x8a8a8aa8, 0x08aaa888,
+ 0x22222200, 0x2222f222, 0x22222222, 0x222222f2,
+ 0x22222222, 0x22222222, 0x22f22222, 0x00000222,
+ 0x11000000, 0x1111f111, 0x11111111, 0x11111111,
+ 0xf1111111, 0x11111111, 0x11f11111, 0x01111111,
+ 0xbb9bb900, 0xb9b9bb99, 0xb99bbbbb, 0xbbbb9b9b,
+ 0xb9bb99bb, 0xb99999b9, 0xb9b9b99b, 0x00000bbb,
+ 0xaa000000, 0xa8a8aa88, 0xa88aaaaa, 0xaaaa8a8a,
+ 0xa8aa88aa, 0xa88888a8, 0xa8a8a88a, 0x0a888aaa,
+ 0xaa000000, 0xa8a8aa88, 0xa88aaaaa, 0xaaaa8a8a,
+ 0xa8aa88a0, 0xa88888a8, 0xa8a8a88a, 0x00000aaa,
+ 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+ 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+ 0xbbbbbb00, 0x999bbbbb, 0x9bb99b9b, 0xb9b9b9bb,
+ 0xb9b99bbb, 0xb9b9b9bb, 0xb9bb9b99, 0x00000999,
+ 0x8a000000, 0xaa88a888, 0xa88888aa, 0xa88a8a88,
+ 0xa88aa88a, 0x88a8aaaa, 0xa8aa8aaa, 0x0888a88a,
+ 0x0b0b0b00, 0x090b0b0b, 0x0b090b0b, 0x0909090b,
+ 0x09090b0b, 0x09090b0b, 0x09090b09, 0x00000909,
+ 0x0a000000, 0x0a080808, 0x080a080a, 0x080a0a08,
+ 0x080a080a, 0x0808080a, 0x0a0a0a08, 0x0808080a,
+ 0xb0b0b000, 0x9090b0b0, 0x90b09090, 0xb0b0b090,
+ 0xb0b090b0, 0x90b0b0b0, 0xb0b09090, 0x00000090,
+ 0x80000000, 0xa080a080, 0xa08080a0, 0xa0808080,
+ 0xa080a080, 0x80a0a0a0, 0xa0a080a0, 0x00a0a0a0,
+ 0x22000000, 0x2222f222, 0x22222222, 0x222222f2,
+ 0xf2222220, 0x22222222, 0x22f22222, 0x00000222,
+ 0x11000000, 0x1111f111, 0x11111111, 0x111111f1,
+ 0xf1111110, 0x11111111, 0x11f11111, 0x00000111,
+ 0x33000000, 0x3333f333, 0x33333333, 0x333333f3,
+ 0xf3333330, 0x33333333, 0x33f33333, 0x00000333,
+ 0x22000000, 0x2222f222, 0x22222222, 0x222222f2,
+ 0xf2222220, 0x22222222, 0x22f22222, 0x00000222,
+ 0x99000000, 0x9b9b99bb, 0x9bb99999, 0x9999b9b9,
+ 0x9b99bb90, 0x9bbbbb9b, 0x9b9b9bb9, 0x00000999,
+ 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+ 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+ 0x88888000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+ 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+ 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+ 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00aaa888,
+ 0x88a88a00, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+ 0x8a88aa88, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+ 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+ 0x8a88aa88, 0x8aaaaa8a, 0x8a8a8aa8, 0x08aaa888,
+ 0x11000000, 0x1111a111, 0x11111111, 0x111111a1,
+ 0xa1111110, 0x11111111, 0x11c11111, 0x00000111,
+ 0x11000000, 0x1111a111, 0x11111111, 0x111111a1,
+ 0xa1111110, 0x11111111, 0x11c11111, 0x00000111,
+ 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+ 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+ 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+ 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static const u32 b43_ntab_intlevel_r3[] = {
+ 0x00802070, 0x0671188d, 0x0a60192c, 0x0a300e46,
+ 0x00c1188d, 0x080024d2, 0x00000070,
+};
+
+static const u32 b43_ntab_tdtrn_r3[] = {
+ 0x061c061c, 0x0050ee68, 0xf592fe36, 0xfe5212f6,
+ 0x00000c38, 0xfe5212f6, 0xf592fe36, 0x0050ee68,
+ 0x061c061c, 0xee680050, 0xfe36f592, 0x12f6fe52,
+ 0x0c380000, 0x12f6fe52, 0xfe36f592, 0xee680050,
+ 0x061c061c, 0x0050ee68, 0xf592fe36, 0xfe5212f6,
+ 0x00000c38, 0xfe5212f6, 0xf592fe36, 0x0050ee68,
+ 0x061c061c, 0xee680050, 0xfe36f592, 0x12f6fe52,
+ 0x0c380000, 0x12f6fe52, 0xfe36f592, 0xee680050,
+ 0x05e305e3, 0x004def0c, 0xf5f3fe47, 0xfe611246,
+ 0x00000bc7, 0xfe611246, 0xf5f3fe47, 0x004def0c,
+ 0x05e305e3, 0xef0c004d, 0xfe47f5f3, 0x1246fe61,
+ 0x0bc70000, 0x1246fe61, 0xfe47f5f3, 0xef0c004d,
+ 0x05e305e3, 0x004def0c, 0xf5f3fe47, 0xfe611246,
+ 0x00000bc7, 0xfe611246, 0xf5f3fe47, 0x004def0c,
+ 0x05e305e3, 0xef0c004d, 0xfe47f5f3, 0x1246fe61,
+ 0x0bc70000, 0x1246fe61, 0xfe47f5f3, 0xef0c004d,
+ 0xfa58fa58, 0xf895043b, 0xff4c09c0, 0xfbc6ffa8,
+ 0xfb84f384, 0x0798f6f9, 0x05760122, 0x058409f6,
+ 0x0b500000, 0x05b7f542, 0x08860432, 0x06ddfee7,
+ 0xfb84f384, 0xf9d90664, 0xf7e8025c, 0x00fff7bd,
+ 0x05a805a8, 0xf7bd00ff, 0x025cf7e8, 0x0664f9d9,
+ 0xf384fb84, 0xfee706dd, 0x04320886, 0xf54205b7,
+ 0x00000b50, 0x09f60584, 0x01220576, 0xf6f90798,
+ 0xf384fb84, 0xffa8fbc6, 0x09c0ff4c, 0x043bf895,
+ 0x02d402d4, 0x07de0270, 0xfc96079c, 0xf90afe94,
+ 0xfe00ff2c, 0x02d4065d, 0x092a0096, 0x0014fbb8,
+ 0xfd2cfd2c, 0x076afb3c, 0x0096f752, 0xf991fd87,
+ 0xfb2c0200, 0xfeb8f960, 0x08e0fc96, 0x049802a8,
+ 0xfd2cfd2c, 0x02a80498, 0xfc9608e0, 0xf960feb8,
+ 0x0200fb2c, 0xfd87f991, 0xf7520096, 0xfb3c076a,
+ 0xfd2cfd2c, 0xfbb80014, 0x0096092a, 0x065d02d4,
+ 0xff2cfe00, 0xfe94f90a, 0x079cfc96, 0x027007de,
+ 0x02d402d4, 0x027007de, 0x079cfc96, 0xfe94f90a,
+ 0xff2cfe00, 0x065d02d4, 0x0096092a, 0xfbb80014,
+ 0xfd2cfd2c, 0xfb3c076a, 0xf7520096, 0xfd87f991,
+ 0x0200fb2c, 0xf960feb8, 0xfc9608e0, 0x02a80498,
+ 0xfd2cfd2c, 0x049802a8, 0x08e0fc96, 0xfeb8f960,
+ 0xfb2c0200, 0xf991fd87, 0x0096f752, 0x076afb3c,
+ 0xfd2cfd2c, 0x0014fbb8, 0x092a0096, 0x02d4065d,
+ 0xfe00ff2c, 0xf90afe94, 0xfc96079c, 0x07de0270,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x062a0000, 0xfefa0759, 0x08b80908, 0xf396fc2d,
+ 0xf9d6045c, 0xfc4ef608, 0xf748f596, 0x07b207bf,
+ 0x062a062a, 0xf84ef841, 0xf748f596, 0x03b209f8,
+ 0xf9d6045c, 0x0c6a03d3, 0x08b80908, 0x0106f8a7,
+ 0x062a0000, 0xfefaf8a7, 0x08b8f6f8, 0xf39603d3,
+ 0xf9d6fba4, 0xfc4e09f8, 0xf7480a6a, 0x07b2f841,
+ 0x062af9d6, 0xf84e07bf, 0xf7480a6a, 0x03b2f608,
+ 0xf9d6fba4, 0x0c6afc2d, 0x08b8f6f8, 0x01060759,
+ 0x062a0000, 0xfefa0759, 0x08b80908, 0xf396fc2d,
+ 0xf9d6045c, 0xfc4ef608, 0xf748f596, 0x07b207bf,
+ 0x062a062a, 0xf84ef841, 0xf748f596, 0x03b209f8,
+ 0xf9d6045c, 0x0c6a03d3, 0x08b80908, 0x0106f8a7,
+ 0x062a0000, 0xfefaf8a7, 0x08b8f6f8, 0xf39603d3,
+ 0xf9d6fba4, 0xfc4e09f8, 0xf7480a6a, 0x07b2f841,
+ 0x062af9d6, 0xf84e07bf, 0xf7480a6a, 0x03b2f608,
+ 0xf9d6fba4, 0x0c6afc2d, 0x08b8f6f8, 0x01060759,
+ 0x061c061c, 0xff30009d, 0xffb21141, 0xfd87fb54,
+ 0xf65dfe59, 0x02eef99e, 0x0166f03c, 0xfff809b6,
+ 0x000008a4, 0x000af42b, 0x00eff577, 0xfa840bf2,
+ 0xfc02ff51, 0x08260f67, 0xfff0036f, 0x0842f9c3,
+ 0x00000000, 0x063df7be, 0xfc910010, 0xf099f7da,
+ 0x00af03fe, 0xf40e057c, 0x0a89ff11, 0x0bd5fff6,
+ 0xf75c0000, 0xf64a0008, 0x0fc4fe9a, 0x0662fd12,
+ 0x01a709a3, 0x04ac0279, 0xeebf004e, 0xff6300d0,
+ 0xf9e4f9e4, 0x00d0ff63, 0x004eeebf, 0x027904ac,
+ 0x09a301a7, 0xfd120662, 0xfe9a0fc4, 0x0008f64a,
+ 0x0000f75c, 0xfff60bd5, 0xff110a89, 0x057cf40e,
+ 0x03fe00af, 0xf7daf099, 0x0010fc91, 0xf7be063d,
+ 0x00000000, 0xf9c30842, 0x036ffff0, 0x0f670826,
+ 0xff51fc02, 0x0bf2fa84, 0xf57700ef, 0xf42b000a,
+ 0x08a40000, 0x09b6fff8, 0xf03c0166, 0xf99e02ee,
+ 0xfe59f65d, 0xfb54fd87, 0x1141ffb2, 0x009dff30,
+ 0x05e30000, 0xff060705, 0x085408a0, 0xf425fc59,
+ 0xfa1d042a, 0xfc78f67a, 0xf7acf60e, 0x075a0766,
+ 0x05e305e3, 0xf8a6f89a, 0xf7acf60e, 0x03880986,
+ 0xfa1d042a, 0x0bdb03a7, 0x085408a0, 0x00faf8fb,
+ 0x05e30000, 0xff06f8fb, 0x0854f760, 0xf42503a7,
+ 0xfa1dfbd6, 0xfc780986, 0xf7ac09f2, 0x075af89a,
+ 0x05e3fa1d, 0xf8a60766, 0xf7ac09f2, 0x0388f67a,
+ 0xfa1dfbd6, 0x0bdbfc59, 0x0854f760, 0x00fa0705,
+ 0x05e30000, 0xff060705, 0x085408a0, 0xf425fc59,
+ 0xfa1d042a, 0xfc78f67a, 0xf7acf60e, 0x075a0766,
+ 0x05e305e3, 0xf8a6f89a, 0xf7acf60e, 0x03880986,
+ 0xfa1d042a, 0x0bdb03a7, 0x085408a0, 0x00faf8fb,
+ 0x05e30000, 0xff06f8fb, 0x0854f760, 0xf42503a7,
+ 0xfa1dfbd6, 0xfc780986, 0xf7ac09f2, 0x075af89a,
+ 0x05e3fa1d, 0xf8a60766, 0xf7ac09f2, 0x0388f67a,
+ 0xfa1dfbd6, 0x0bdbfc59, 0x0854f760, 0x00fa0705,
+ 0xfa58fa58, 0xf8f0fe00, 0x0448073d, 0xfdc9fe46,
+ 0xf9910258, 0x089d0407, 0xfd5cf71a, 0x02affde0,
+ 0x083e0496, 0xff5a0740, 0xff7afd97, 0x00fe01f1,
+ 0x0009082e, 0xfa94ff75, 0xfecdf8ea, 0xffb0f693,
+ 0xfd2cfa58, 0x0433ff16, 0xfba405dd, 0xfa610341,
+ 0x06a606cb, 0x0039fd2d, 0x0677fa97, 0x01fa05e0,
+ 0xf896003e, 0x075a068b, 0x012cfc3e, 0xfa23f98d,
+ 0xfc7cfd43, 0xff90fc0d, 0x01c10982, 0x00c601d6,
+ 0xfd2cfd2c, 0x01d600c6, 0x098201c1, 0xfc0dff90,
+ 0xfd43fc7c, 0xf98dfa23, 0xfc3e012c, 0x068b075a,
+ 0x003ef896, 0x05e001fa, 0xfa970677, 0xfd2d0039,
+ 0x06cb06a6, 0x0341fa61, 0x05ddfba4, 0xff160433,
+ 0xfa58fd2c, 0xf693ffb0, 0xf8eafecd, 0xff75fa94,
+ 0x082e0009, 0x01f100fe, 0xfd97ff7a, 0x0740ff5a,
+ 0x0496083e, 0xfde002af, 0xf71afd5c, 0x0407089d,
+ 0x0258f991, 0xfe46fdc9, 0x073d0448, 0xfe00f8f0,
+ 0xfd2cfd2c, 0xfce00500, 0xfc09fddc, 0xfe680157,
+ 0x04c70571, 0xfc3aff21, 0xfcd70228, 0x056d0277,
+ 0x0200fe00, 0x0022f927, 0xfe3c032b, 0xfc44ff3c,
+ 0x03e9fbdb, 0x04570313, 0x04c9ff5c, 0x000d03b8,
+ 0xfa580000, 0xfbe900d2, 0xf9d0fe0b, 0x0125fdf9,
+ 0x042501bf, 0x0328fa2b, 0xffa902f0, 0xfa250157,
+ 0x0200fe00, 0x03740438, 0xff0405fd, 0x030cfe52,
+ 0x0037fb39, 0xff6904c5, 0x04f8fd23, 0xfd31fc1b,
+ 0xfd2cfd2c, 0xfc1bfd31, 0xfd2304f8, 0x04c5ff69,
+ 0xfb390037, 0xfe52030c, 0x05fdff04, 0x04380374,
+ 0xfe000200, 0x0157fa25, 0x02f0ffa9, 0xfa2b0328,
+ 0x01bf0425, 0xfdf90125, 0xfe0bf9d0, 0x00d2fbe9,
+ 0x0000fa58, 0x03b8000d, 0xff5c04c9, 0x03130457,
+ 0xfbdb03e9, 0xff3cfc44, 0x032bfe3c, 0xf9270022,
+ 0xfe000200, 0x0277056d, 0x0228fcd7, 0xff21fc3a,
+ 0x057104c7, 0x0157fe68, 0xfddcfc09, 0x0500fce0,
+ 0xfd2cfd2c, 0x0500fce0, 0xfddcfc09, 0x0157fe68,
+ 0x057104c7, 0xff21fc3a, 0x0228fcd7, 0x0277056d,
+ 0xfe000200, 0xf9270022, 0x032bfe3c, 0xff3cfc44,
+ 0xfbdb03e9, 0x03130457, 0xff5c04c9, 0x03b8000d,
+ 0x0000fa58, 0x00d2fbe9, 0xfe0bf9d0, 0xfdf90125,
+ 0x01bf0425, 0xfa2b0328, 0x02f0ffa9, 0x0157fa25,
+ 0xfe000200, 0x04380374, 0x05fdff04, 0xfe52030c,
+ 0xfb390037, 0x04c5ff69, 0xfd2304f8, 0xfc1bfd31,
+ 0xfd2cfd2c, 0xfd31fc1b, 0x04f8fd23, 0xff6904c5,
+ 0x0037fb39, 0x030cfe52, 0xff0405fd, 0x03740438,
+ 0x0200fe00, 0xfa250157, 0xffa902f0, 0x0328fa2b,
+ 0x042501bf, 0x0125fdf9, 0xf9d0fe0b, 0xfbe900d2,
+ 0xfa580000, 0x000d03b8, 0x04c9ff5c, 0x04570313,
+ 0x03e9fbdb, 0xfc44ff3c, 0xfe3c032b, 0x0022f927,
+ 0x0200fe00, 0x056d0277, 0xfcd70228, 0xfc3aff21,
+ 0x04c70571, 0xfe680157, 0xfc09fddc, 0xfce00500,
+ 0x05a80000, 0xff1006be, 0x0800084a, 0xf49cfc7e,
+ 0xfa580400, 0xfc9cf6da, 0xf800f672, 0x0710071c,
+ 0x05a805a8, 0xf8f0f8e4, 0xf800f672, 0x03640926,
+ 0xfa580400, 0x0b640382, 0x0800084a, 0x00f0f942,
+ 0x05a80000, 0xff10f942, 0x0800f7b6, 0xf49c0382,
+ 0xfa58fc00, 0xfc9c0926, 0xf800098e, 0x0710f8e4,
+ 0x05a8fa58, 0xf8f0071c, 0xf800098e, 0x0364f6da,
+ 0xfa58fc00, 0x0b64fc7e, 0x0800f7b6, 0x00f006be,
+ 0x05a80000, 0xff1006be, 0x0800084a, 0xf49cfc7e,
+ 0xfa580400, 0xfc9cf6da, 0xf800f672, 0x0710071c,
+ 0x05a805a8, 0xf8f0f8e4, 0xf800f672, 0x03640926,
+ 0xfa580400, 0x0b640382, 0x0800084a, 0x00f0f942,
+ 0x05a80000, 0xff10f942, 0x0800f7b6, 0xf49c0382,
+ 0xfa58fc00, 0xfc9c0926, 0xf800098e, 0x0710f8e4,
+ 0x05a8fa58, 0xf8f0071c, 0xf800098e, 0x0364f6da,
+ 0xfa58fc00, 0x0b64fc7e, 0x0800f7b6, 0x00f006be,
+};
+
+static const u32 b43_ntab_noisevar0_r3[] = {
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+};
+
+static const u32 b43_ntab_noisevar1_r3[] = {
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+};
+
+static const u16 b43_ntab_mcs_r3[] = {
+ 0x0000, 0x0008, 0x000a, 0x0010, 0x0012, 0x0019,
+ 0x001a, 0x001c, 0x0080, 0x0088, 0x008a, 0x0090,
+ 0x0092, 0x0099, 0x009a, 0x009c, 0x0100, 0x0108,
+ 0x010a, 0x0110, 0x0112, 0x0119, 0x011a, 0x011c,
+ 0x0180, 0x0188, 0x018a, 0x0190, 0x0192, 0x0199,
+ 0x019a, 0x019c, 0x0000, 0x0098, 0x00a0, 0x00a8,
+ 0x009a, 0x00a2, 0x00aa, 0x0120, 0x0128, 0x0128,
+ 0x0130, 0x0138, 0x0138, 0x0140, 0x0122, 0x012a,
+ 0x012a, 0x0132, 0x013a, 0x013a, 0x0142, 0x01a8,
+ 0x01b0, 0x01b8, 0x01b0, 0x01b8, 0x01c0, 0x01c8,
+ 0x01c0, 0x01c8, 0x01d0, 0x01d0, 0x01d8, 0x01aa,
+ 0x01b2, 0x01ba, 0x01b2, 0x01ba, 0x01c2, 0x01ca,
+ 0x01c2, 0x01ca, 0x01d2, 0x01d2, 0x01da, 0x0001,
+ 0x0002, 0x0004, 0x0009, 0x000c, 0x0011, 0x0014,
+ 0x0018, 0x0020, 0x0021, 0x0022, 0x0024, 0x0081,
+ 0x0082, 0x0084, 0x0089, 0x008c, 0x0091, 0x0094,
+ 0x0098, 0x00a0, 0x00a1, 0x00a2, 0x00a4, 0x0007,
+ 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007,
+ 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007,
+ 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007,
+ 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007,
+ 0x0007, 0x0007,
+};
+
+static const u32 b43_ntab_tdi20a0_r3[] = {
+ 0x00091226, 0x000a1429, 0x000b56ad, 0x000c58b0,
+ 0x000d5ab3, 0x000e9cb6, 0x000f9eba, 0x0000c13d,
+ 0x00020301, 0x00030504, 0x00040708, 0x0005090b,
+ 0x00064b8e, 0x00095291, 0x000a5494, 0x000b9718,
+ 0x000c9927, 0x000d9b2a, 0x000edd2e, 0x000fdf31,
+ 0x000101b4, 0x000243b7, 0x000345bb, 0x000447be,
+ 0x00058982, 0x00068c05, 0x00099309, 0x000a950c,
+ 0x000bd78f, 0x000cd992, 0x000ddb96, 0x000f1d99,
+ 0x00005fa8, 0x0001422c, 0x0002842f, 0x00038632,
+ 0x00048835, 0x0005ca38, 0x0006ccbc, 0x0009d3bf,
+ 0x000b1603, 0x000c1806, 0x000d1a0a, 0x000e1c0d,
+ 0x000f5e10, 0x00008093, 0x00018297, 0x0002c49a,
+ 0x0003c680, 0x0004c880, 0x00060b00, 0x00070d00,
+ 0x00000000, 0x00000000, 0x00000000,
+};
+
+static const u32 b43_ntab_tdi20a1_r3[] = {
+ 0x00014b26, 0x00028d29, 0x000393ad, 0x00049630,
+ 0x0005d833, 0x0006da36, 0x00099c3a, 0x000a9e3d,
+ 0x000bc081, 0x000cc284, 0x000dc488, 0x000f068b,
+ 0x0000488e, 0x00018b91, 0x0002d214, 0x0003d418,
+ 0x0004d6a7, 0x000618aa, 0x00071aae, 0x0009dcb1,
+ 0x000b1eb4, 0x000c0137, 0x000d033b, 0x000e053e,
+ 0x000f4702, 0x00008905, 0x00020c09, 0x0003128c,
+ 0x0004148f, 0x00051712, 0x00065916, 0x00091b19,
+ 0x000a1d28, 0x000b5f2c, 0x000c41af, 0x000d43b2,
+ 0x000e85b5, 0x000f87b8, 0x0000c9bc, 0x00024cbf,
+ 0x00035303, 0x00045506, 0x0005978a, 0x0006998d,
+ 0x00095b90, 0x000a5d93, 0x000b9f97, 0x000c821a,
+ 0x000d8400, 0x000ec600, 0x000fc800, 0x00010a00,
+ 0x00000000, 0x00000000, 0x00000000,
+};
+
+static const u32 b43_ntab_tdi40a0_r3[] = {
+ 0x0011a346, 0x00136ccf, 0x0014f5d9, 0x001641e2,
+ 0x0017cb6b, 0x00195475, 0x001b2383, 0x001cad0c,
+ 0x001e7616, 0x0000821f, 0x00020ba8, 0x0003d4b2,
+ 0x00056447, 0x00072dd0, 0x0008b6da, 0x000a02e3,
+ 0x000b8c6c, 0x000d15f6, 0x0011e484, 0x0013ae0d,
+ 0x00153717, 0x00168320, 0x00180ca9, 0x00199633,
+ 0x001b6548, 0x001ceed1, 0x001eb7db, 0x0000c3e4,
+ 0x00024d6d, 0x000416f7, 0x0005a585, 0x00076f0f,
+ 0x0008f818, 0x000a4421, 0x000bcdab, 0x000d9734,
+ 0x00122649, 0x0013efd2, 0x001578dc, 0x0016c4e5,
+ 0x00184e6e, 0x001a17f8, 0x001ba686, 0x001d3010,
+ 0x001ef999, 0x00010522, 0x00028eac, 0x00045835,
+ 0x0005e74a, 0x0007b0d3, 0x00093a5d, 0x000a85e6,
+ 0x000c0f6f, 0x000dd8f9, 0x00126787, 0x00143111,
+ 0x0015ba9a, 0x00170623, 0x00188fad, 0x001a5936,
+ 0x001be84b, 0x001db1d4, 0x001f3b5e, 0x000146e7,
+ 0x00031070, 0x000499fa, 0x00062888, 0x0007f212,
+ 0x00097b9b, 0x000ac7a4, 0x000c50ae, 0x000e1a37,
+ 0x0012a94c, 0x001472d5, 0x0015fc5f, 0x00174868,
+ 0x0018d171, 0x001a9afb, 0x001c2989, 0x001df313,
+ 0x001f7c9c, 0x000188a5, 0x000351af, 0x0004db38,
+ 0x0006aa4d, 0x000833d7, 0x0009bd60, 0x000b0969,
+ 0x000c9273, 0x000e5bfc, 0x00132a8a, 0x0014b414,
+ 0x00163d9d, 0x001789a6, 0x001912b0, 0x001adc39,
+ 0x001c6bce, 0x001e34d8, 0x001fbe61, 0x0001ca6a,
+ 0x00039374, 0x00051cfd, 0x0006ec0b, 0x00087515,
+ 0x0009fe9e, 0x000b4aa7, 0x000cd3b1, 0x000e9d3a,
+ 0x00000000, 0x00000000,
+};
+
+static const u32 b43_ntab_tdi40a1_r3[] = {
+ 0x001edb36, 0x000129ca, 0x0002b353, 0x00047cdd,
+ 0x0005c8e6, 0x000791ef, 0x00091bf9, 0x000aaa07,
+ 0x000c3391, 0x000dfd1a, 0x00120923, 0x0013d22d,
+ 0x00155c37, 0x0016eacb, 0x00187454, 0x001a3dde,
+ 0x001b89e7, 0x001d12f0, 0x001f1cfa, 0x00016b88,
+ 0x00033492, 0x0004be1b, 0x00060a24, 0x0007d32e,
+ 0x00095d38, 0x000aec4c, 0x000c7555, 0x000e3edf,
+ 0x00124ae8, 0x001413f1, 0x0015a37b, 0x00172c89,
+ 0x0018b593, 0x001a419c, 0x001bcb25, 0x001d942f,
+ 0x001f63b9, 0x0001ad4d, 0x00037657, 0x0004c260,
+ 0x00068be9, 0x000814f3, 0x0009a47c, 0x000b2d8a,
+ 0x000cb694, 0x000e429d, 0x00128c26, 0x001455b0,
+ 0x0015e4ba, 0x00176e4e, 0x0018f758, 0x001a8361,
+ 0x001c0cea, 0x001dd674, 0x001fa57d, 0x0001ee8b,
+ 0x0003b795, 0x0005039e, 0x0006cd27, 0x000856b1,
+ 0x0009e5c6, 0x000b6f4f, 0x000cf859, 0x000e8462,
+ 0x00130deb, 0x00149775, 0x00162603, 0x0017af8c,
+ 0x00193896, 0x001ac49f, 0x001c4e28, 0x001e17b2,
+ 0x0000a6c7, 0x00023050, 0x0003f9da, 0x00054563,
+ 0x00070eec, 0x00089876, 0x000a2704, 0x000bb08d,
+ 0x000d3a17, 0x001185a0, 0x00134f29, 0x0014d8b3,
+ 0x001667c8, 0x0017f151, 0x00197adb, 0x001b0664,
+ 0x001c8fed, 0x001e5977, 0x0000e805, 0x0002718f,
+ 0x00043b18, 0x000586a1, 0x0007502b, 0x0008d9b4,
+ 0x000a68c9, 0x000bf252, 0x000dbbdc, 0x0011c7e5,
+ 0x001390ee, 0x00151a78, 0x0016a906, 0x00183290,
+ 0x0019bc19, 0x001b4822, 0x001cd12c, 0x001e9ab5,
+ 0x00000000, 0x00000000,
+};
+
+static const u32 b43_ntab_pilotlt_r3[] = {
+ 0x76540213, 0x62407351, 0x76543210, 0x76540213,
+ 0x76540213, 0x76430521,
+};
+
+static const u32 b43_ntab_channelest_r3[] = {
+ 0x44444444, 0x44444444, 0x44444444, 0x44444444,
+ 0x44444444, 0x44444444, 0x44444444, 0x44444444,
+ 0x10101010, 0x10101010, 0x10101010, 0x10101010,
+ 0x10101010, 0x10101010, 0x10101010, 0x10101010,
+ 0x44444444, 0x44444444, 0x44444444, 0x44444444,
+ 0x44444444, 0x44444444, 0x44444444, 0x44444444,
+ 0x10101010, 0x10101010, 0x10101010, 0x10101010,
+ 0x10101010, 0x10101010, 0x10101010, 0x10101010,
+ 0x44444444, 0x44444444, 0x44444444, 0x44444444,
+ 0x44444444, 0x44444444, 0x44444444, 0x44444444,
+ 0x44444444, 0x44444444, 0x44444444, 0x44444444,
+ 0x44444444, 0x44444444, 0x44444444, 0x44444444,
+ 0x10101010, 0x10101010, 0x10101010, 0x10101010,
+ 0x10101010, 0x10101010, 0x10101010, 0x10101010,
+ 0x10101010, 0x10101010, 0x10101010, 0x10101010,
+ 0x10101010, 0x10101010, 0x10101010, 0x10101010,
+ 0x44444444, 0x44444444, 0x44444444, 0x44444444,
+ 0x44444444, 0x44444444, 0x44444444, 0x44444444,
+ 0x44444444, 0x44444444, 0x44444444, 0x44444444,
+ 0x44444444, 0x44444444, 0x44444444, 0x44444444,
+ 0x10101010, 0x10101010, 0x10101010, 0x10101010,
+ 0x10101010, 0x10101010, 0x10101010, 0x10101010,
+ 0x10101010, 0x10101010, 0x10101010, 0x10101010,
+ 0x10101010, 0x10101010, 0x10101010, 0x10101010,
+};
+
+static const u8 b43_ntab_framelookup_r3[] = {
+ 0x02, 0x04, 0x14, 0x14, 0x03, 0x05, 0x16, 0x16,
+ 0x0a, 0x0c, 0x1c, 0x1c, 0x0b, 0x0d, 0x1e, 0x1e,
+ 0x06, 0x08, 0x18, 0x18, 0x07, 0x09, 0x1a, 0x1a,
+ 0x0e, 0x10, 0x20, 0x28, 0x0f, 0x11, 0x22, 0x2a,
+};
+
+static const u8 b43_ntab_estimatepowerlt0_r3[] = {
+ 0x55, 0x54, 0x54, 0x53, 0x52, 0x52, 0x51, 0x51,
+ 0x50, 0x4f, 0x4f, 0x4e, 0x4e, 0x4d, 0x4c, 0x4c,
+ 0x4b, 0x4a, 0x49, 0x49, 0x48, 0x47, 0x46, 0x46,
+ 0x45, 0x44, 0x43, 0x42, 0x41, 0x40, 0x40, 0x3f,
+ 0x3e, 0x3d, 0x3c, 0x3a, 0x39, 0x38, 0x37, 0x36,
+ 0x35, 0x33, 0x32, 0x31, 0x2f, 0x2e, 0x2c, 0x2b,
+ 0x29, 0x27, 0x25, 0x23, 0x21, 0x1f, 0x1d, 0x1a,
+ 0x18, 0x15, 0x12, 0x0e, 0x0b, 0x07, 0x02, 0xfd,
+};
+
+static const u8 b43_ntab_estimatepowerlt1_r3[] = {
+ 0x55, 0x54, 0x54, 0x53, 0x52, 0x52, 0x51, 0x51,
+ 0x50, 0x4f, 0x4f, 0x4e, 0x4e, 0x4d, 0x4c, 0x4c,
+ 0x4b, 0x4a, 0x49, 0x49, 0x48, 0x47, 0x46, 0x46,
+ 0x45, 0x44, 0x43, 0x42, 0x41, 0x40, 0x40, 0x3f,
+ 0x3e, 0x3d, 0x3c, 0x3a, 0x39, 0x38, 0x37, 0x36,
+ 0x35, 0x33, 0x32, 0x31, 0x2f, 0x2e, 0x2c, 0x2b,
+ 0x29, 0x27, 0x25, 0x23, 0x21, 0x1f, 0x1d, 0x1a,
+ 0x18, 0x15, 0x12, 0x0e, 0x0b, 0x07, 0x02, 0xfd,
+};
+
+static const u8 b43_ntab_adjustpower0_r3[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 b43_ntab_adjustpower1_r3[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u32 b43_ntab_gainctl0_r3[] = {
+ 0x5bf70044, 0x5bf70042, 0x5bf70040, 0x5bf7003e,
+ 0x5bf7003c, 0x5bf7003b, 0x5bf70039, 0x5bf70037,
+ 0x5bf70036, 0x5bf70034, 0x5bf70033, 0x5bf70031,
+ 0x5bf70030, 0x5ba70044, 0x5ba70042, 0x5ba70040,
+ 0x5ba7003e, 0x5ba7003c, 0x5ba7003b, 0x5ba70039,
+ 0x5ba70037, 0x5ba70036, 0x5ba70034, 0x5ba70033,
+ 0x5b770044, 0x5b770042, 0x5b770040, 0x5b77003e,
+ 0x5b77003c, 0x5b77003b, 0x5b770039, 0x5b770037,
+ 0x5b770036, 0x5b770034, 0x5b770033, 0x5b770031,
+ 0x5b770030, 0x5b77002f, 0x5b77002d, 0x5b77002c,
+ 0x5b470044, 0x5b470042, 0x5b470040, 0x5b47003e,
+ 0x5b47003c, 0x5b47003b, 0x5b470039, 0x5b470037,
+ 0x5b470036, 0x5b470034, 0x5b470033, 0x5b470031,
+ 0x5b470030, 0x5b47002f, 0x5b47002d, 0x5b47002c,
+ 0x5b47002b, 0x5b47002a, 0x5b270044, 0x5b270042,
+ 0x5b270040, 0x5b27003e, 0x5b27003c, 0x5b27003b,
+ 0x5b270039, 0x5b270037, 0x5b270036, 0x5b270034,
+ 0x5b270033, 0x5b270031, 0x5b270030, 0x5b27002f,
+ 0x5b170044, 0x5b170042, 0x5b170040, 0x5b17003e,
+ 0x5b17003c, 0x5b17003b, 0x5b170039, 0x5b170037,
+ 0x5b170036, 0x5b170034, 0x5b170033, 0x5b170031,
+ 0x5b170030, 0x5b17002f, 0x5b17002d, 0x5b17002c,
+ 0x5b17002b, 0x5b17002a, 0x5b170028, 0x5b170027,
+ 0x5b170026, 0x5b170025, 0x5b170024, 0x5b170023,
+ 0x5b070044, 0x5b070042, 0x5b070040, 0x5b07003e,
+ 0x5b07003c, 0x5b07003b, 0x5b070039, 0x5b070037,
+ 0x5b070036, 0x5b070034, 0x5b070033, 0x5b070031,
+ 0x5b070030, 0x5b07002f, 0x5b07002d, 0x5b07002c,
+ 0x5b07002b, 0x5b07002a, 0x5b070028, 0x5b070027,
+ 0x5b070026, 0x5b070025, 0x5b070024, 0x5b070023,
+ 0x5b070022, 0x5b070021, 0x5b070020, 0x5b07001f,
+ 0x5b07001e, 0x5b07001d, 0x5b07001d, 0x5b07001c,
+};
+
+static const u32 b43_ntab_gainctl1_r3[] = {
+ 0x5bf70044, 0x5bf70042, 0x5bf70040, 0x5bf7003e,
+ 0x5bf7003c, 0x5bf7003b, 0x5bf70039, 0x5bf70037,
+ 0x5bf70036, 0x5bf70034, 0x5bf70033, 0x5bf70031,
+ 0x5bf70030, 0x5ba70044, 0x5ba70042, 0x5ba70040,
+ 0x5ba7003e, 0x5ba7003c, 0x5ba7003b, 0x5ba70039,
+ 0x5ba70037, 0x5ba70036, 0x5ba70034, 0x5ba70033,
+ 0x5b770044, 0x5b770042, 0x5b770040, 0x5b77003e,
+ 0x5b77003c, 0x5b77003b, 0x5b770039, 0x5b770037,
+ 0x5b770036, 0x5b770034, 0x5b770033, 0x5b770031,
+ 0x5b770030, 0x5b77002f, 0x5b77002d, 0x5b77002c,
+ 0x5b470044, 0x5b470042, 0x5b470040, 0x5b47003e,
+ 0x5b47003c, 0x5b47003b, 0x5b470039, 0x5b470037,
+ 0x5b470036, 0x5b470034, 0x5b470033, 0x5b470031,
+ 0x5b470030, 0x5b47002f, 0x5b47002d, 0x5b47002c,
+ 0x5b47002b, 0x5b47002a, 0x5b270044, 0x5b270042,
+ 0x5b270040, 0x5b27003e, 0x5b27003c, 0x5b27003b,
+ 0x5b270039, 0x5b270037, 0x5b270036, 0x5b270034,
+ 0x5b270033, 0x5b270031, 0x5b270030, 0x5b27002f,
+ 0x5b170044, 0x5b170042, 0x5b170040, 0x5b17003e,
+ 0x5b17003c, 0x5b17003b, 0x5b170039, 0x5b170037,
+ 0x5b170036, 0x5b170034, 0x5b170033, 0x5b170031,
+ 0x5b170030, 0x5b17002f, 0x5b17002d, 0x5b17002c,
+ 0x5b17002b, 0x5b17002a, 0x5b170028, 0x5b170027,
+ 0x5b170026, 0x5b170025, 0x5b170024, 0x5b170023,
+ 0x5b070044, 0x5b070042, 0x5b070040, 0x5b07003e,
+ 0x5b07003c, 0x5b07003b, 0x5b070039, 0x5b070037,
+ 0x5b070036, 0x5b070034, 0x5b070033, 0x5b070031,
+ 0x5b070030, 0x5b07002f, 0x5b07002d, 0x5b07002c,
+ 0x5b07002b, 0x5b07002a, 0x5b070028, 0x5b070027,
+ 0x5b070026, 0x5b070025, 0x5b070024, 0x5b070023,
+ 0x5b070022, 0x5b070021, 0x5b070020, 0x5b07001f,
+ 0x5b07001e, 0x5b07001d, 0x5b07001d, 0x5b07001c,
+};
+
+static const u32 b43_ntab_iqlt0_r3[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static const u32 b43_ntab_iqlt1_r3[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static const u16 b43_ntab_loftlt0_r3[] = {
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000,
+};
+
+static const u16 b43_ntab_loftlt1_r3[] = {
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000,
+};
+
+/* TX gain tables */
const u32 b43_ntab_tx_gain_rev0_1_2[] = {
0x03cc2b44, 0x03cc2b42, 0x03cc2a44, 0x03cc2a42,
0x03cc2944, 0x03c82b44, 0x03c82b42, 0x03c82a44,
@@ -1813,7 +2887,6 @@ void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset,
#define ntab_upload(dev, offset, data) do { \
b43_ntab_write_bulk(dev, offset, offset##_SIZE, data); \
} while (0)
-
void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev)
{
/* Static tables */
@@ -1847,10 +2920,39 @@ void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev)
ntab_upload(dev, B43_NTAB_C1_LOFEEDTH, b43_ntab_loftlt1);
}
+#define ntab_upload_r3(dev, offset, data) do { \
+ b43_ntab_write_bulk(dev, offset, ARRAY_SIZE(data), data); \
+ } while (0)
void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev)
{
/* Static tables */
- /* TODO */
+ ntab_upload_r3(dev, B43_NTAB_FRAMESTRUCT_R3, b43_ntab_framestruct_r3);
+ ntab_upload_r3(dev, B43_NTAB_PILOT_R3, b43_ntab_pilot_r3);
+ ntab_upload_r3(dev, B43_NTAB_TMAP_R3, b43_ntab_tmap_r3);
+ ntab_upload_r3(dev, B43_NTAB_INTLEVEL_R3, b43_ntab_intlevel_r3);
+ ntab_upload_r3(dev, B43_NTAB_TDTRN_R3, b43_ntab_tdtrn_r3);
+ ntab_upload_r3(dev, B43_NTAB_NOISEVAR0_R3, b43_ntab_noisevar0_r3);
+ ntab_upload_r3(dev, B43_NTAB_NOISEVAR1_R3, b43_ntab_noisevar1_r3);
+ ntab_upload_r3(dev, B43_NTAB_MCS_R3, b43_ntab_mcs_r3);
+ ntab_upload_r3(dev, B43_NTAB_TDI20A0_R3, b43_ntab_tdi20a0_r3);
+ ntab_upload_r3(dev, B43_NTAB_TDI20A1_R3, b43_ntab_tdi20a1_r3);
+ ntab_upload_r3(dev, B43_NTAB_TDI40A0_R3, b43_ntab_tdi40a0_r3);
+ ntab_upload_r3(dev, B43_NTAB_TDI40A1_R3, b43_ntab_tdi40a1_r3);
+ ntab_upload_r3(dev, B43_NTAB_PILOTLT_R3, b43_ntab_pilotlt_r3);
+ ntab_upload_r3(dev, B43_NTAB_CHANEST_R3, b43_ntab_channelest_r3);
+ ntab_upload_r3(dev, B43_NTAB_FRAMELT_R3, b43_ntab_framelookup_r3);
+ ntab_upload_r3(dev, B43_NTAB_C0_ESTPLT_R3,
+ b43_ntab_estimatepowerlt0_r3);
+ ntab_upload_r3(dev, B43_NTAB_C1_ESTPLT_R3,
+ b43_ntab_estimatepowerlt1_r3);
+ ntab_upload_r3(dev, B43_NTAB_C0_ADJPLT_R3, b43_ntab_adjustpower0_r3);
+ ntab_upload_r3(dev, B43_NTAB_C1_ADJPLT_R3, b43_ntab_adjustpower1_r3);
+ ntab_upload_r3(dev, B43_NTAB_C0_GAINCTL_R3, b43_ntab_gainctl0_r3);
+ ntab_upload_r3(dev, B43_NTAB_C1_GAINCTL_R3, b43_ntab_gainctl1_r3);
+ ntab_upload_r3(dev, B43_NTAB_C0_IQLT_R3, b43_ntab_iqlt0_r3);
+ ntab_upload_r3(dev, B43_NTAB_C1_IQLT_R3, b43_ntab_iqlt1_r3);
+ ntab_upload_r3(dev, B43_NTAB_C0_LOFEEDTH_R3, b43_ntab_loftlt0_r3);
+ ntab_upload_r3(dev, B43_NTAB_C1_LOFEEDTH_R3, b43_ntab_loftlt1_r3);
/* Volatile tables */
/* TODO */
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index 4ec593ba3eef..016a480b2dc6 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -109,6 +109,33 @@ b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq);
#define B43_NTAB_C1_LOFEEDTH B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */
#define B43_NTAB_C1_LOFEEDTH_SIZE 128
+/* Static N-PHY tables, PHY revision >= 3 */
+#define B43_NTAB_FRAMESTRUCT_R3 B43_NTAB32(10, 000) /* frame struct */
+#define B43_NTAB_PILOT_R3 B43_NTAB16(11, 000) /* pilot */
+#define B43_NTAB_TMAP_R3 B43_NTAB32(12, 000) /* TM AP */
+#define B43_NTAB_INTLEVEL_R3 B43_NTAB32(13, 000) /* INT LV */
+#define B43_NTAB_TDTRN_R3 B43_NTAB32(14, 000) /* TD TRN */
+#define B43_NTAB_NOISEVAR0_R3 B43_NTAB32(16, 000) /* noise variance 0 */
+#define B43_NTAB_NOISEVAR1_R3 B43_NTAB32(16, 128) /* noise variance 1 */
+#define B43_NTAB_MCS_R3 B43_NTAB16(18, 000) /* MCS */
+#define B43_NTAB_TDI20A0_R3 B43_NTAB32(19, 128) /* TDI 20/0 */
+#define B43_NTAB_TDI20A1_R3 B43_NTAB32(19, 256) /* TDI 20/1 */
+#define B43_NTAB_TDI40A0_R3 B43_NTAB32(19, 640) /* TDI 40/0 */
+#define B43_NTAB_TDI40A1_R3 B43_NTAB32(19, 768) /* TDI 40/1 */
+#define B43_NTAB_PILOTLT_R3 B43_NTAB32(20, 000) /* PLT lookup */
+#define B43_NTAB_CHANEST_R3 B43_NTAB32(22, 000) /* channel estimate */
+#define B43_NTAB_FRAMELT_R3 B43_NTAB8 (24, 000) /* frame lookup */
+#define B43_NTAB_C0_ESTPLT_R3 B43_NTAB8 (26, 000) /* estimated power lookup 0 */
+#define B43_NTAB_C1_ESTPLT_R3 B43_NTAB8 (27, 000) /* estimated power lookup 1 */
+#define B43_NTAB_C0_ADJPLT_R3 B43_NTAB8 (26, 064) /* adjusted power lookup 0 */
+#define B43_NTAB_C1_ADJPLT_R3 B43_NTAB8 (27, 064) /* adjusted power lookup 1 */
+#define B43_NTAB_C0_GAINCTL_R3 B43_NTAB32(26, 192) /* gain control lookup 0 */
+#define B43_NTAB_C1_GAINCTL_R3 B43_NTAB32(27, 192) /* gain control lookup 1 */
+#define B43_NTAB_C0_IQLT_R3 B43_NTAB32(26, 320) /* I/Q lookup 0 */
+#define B43_NTAB_C1_IQLT_R3 B43_NTAB32(27, 320) /* I/Q lookup 1 */
+#define B43_NTAB_C0_LOFEEDTH_R3 B43_NTAB16(26, 448) /* Local Oscillator Feed Through lookup 0 */
+#define B43_NTAB_C1_LOFEEDTH_R3 B43_NTAB16(27, 448) /* Local Oscillator Feed Through lookup 1 */
+
#define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_40_SIZE 18
#define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_20_SIZE 18
#define B43_NTAB_TX_IQLO_CAL_IQIMB_LADDER_40_SIZE 18
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index e6b0528f3b52..e5be381c17bc 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -32,6 +32,36 @@
#include "dma.h"
#include "pio.h"
+static const struct b43_tx_legacy_rate_phy_ctl_entry b43_tx_legacy_rate_phy_ctl[] = {
+ { B43_CCK_RATE_1MB, 0x0, 0x0 },
+ { B43_CCK_RATE_2MB, 0x0, 0x1 },
+ { B43_CCK_RATE_5MB, 0x0, 0x2 },
+ { B43_CCK_RATE_11MB, 0x0, 0x3 },
+ { B43_OFDM_RATE_6MB, B43_TXH_PHY1_CRATE_1_2, B43_TXH_PHY1_MODUL_BPSK },
+ { B43_OFDM_RATE_9MB, B43_TXH_PHY1_CRATE_3_4, B43_TXH_PHY1_MODUL_BPSK },
+ { B43_OFDM_RATE_12MB, B43_TXH_PHY1_CRATE_1_2, B43_TXH_PHY1_MODUL_QPSK },
+ { B43_OFDM_RATE_18MB, B43_TXH_PHY1_CRATE_3_4, B43_TXH_PHY1_MODUL_QPSK },
+ { B43_OFDM_RATE_24MB, B43_TXH_PHY1_CRATE_1_2, B43_TXH_PHY1_MODUL_QAM16 },
+ { B43_OFDM_RATE_36MB, B43_TXH_PHY1_CRATE_3_4, B43_TXH_PHY1_MODUL_QAM16 },
+ { B43_OFDM_RATE_48MB, B43_TXH_PHY1_CRATE_2_3, B43_TXH_PHY1_MODUL_QAM64 },
+ { B43_OFDM_RATE_54MB, B43_TXH_PHY1_CRATE_3_4, B43_TXH_PHY1_MODUL_QAM64 },
+};
+
+static const struct b43_tx_legacy_rate_phy_ctl_entry *
+b43_tx_legacy_rate_phy_ctl_ent(u8 bitrate)
+{
+ const struct b43_tx_legacy_rate_phy_ctl_entry *e;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(b43_tx_legacy_rate_phy_ctl); i++) {
+ e = &(b43_tx_legacy_rate_phy_ctl[i]);
+ if (e->bitrate == bitrate)
+ return e;
+ }
+
+ B43_WARN_ON(1);
+ return NULL;
+}
/* Extract the bitrate index out of a CCK PLCP header. */
static int b43_plcp_get_bitrate_idx_cck(struct b43_plcp_hdr6 *plcp)
@@ -145,6 +175,34 @@ void b43_generate_plcp_hdr(struct b43_plcp_hdr4 *plcp,
}
}
+static u16 b43_generate_tx_phy_ctl1(struct b43_wldev *dev, u8 bitrate)
+{
+ const struct b43_phy *phy = &dev->phy;
+ const struct b43_tx_legacy_rate_phy_ctl_entry *e;
+ u16 control = 0;
+ u16 bw;
+
+ if (phy->type == B43_PHYTYPE_LP)
+ bw = B43_TXH_PHY1_BW_20;
+ else /* FIXME */
+ bw = B43_TXH_PHY1_BW_20;
+
+ if (0) { /* FIXME: MIMO */
+ } else if (b43_is_cck_rate(bitrate) && phy->type != B43_PHYTYPE_LP) {
+ control = bw;
+ } else {
+ control = bw;
+ e = b43_tx_legacy_rate_phy_ctl_ent(bitrate);
+ if (e) {
+ control |= e->coding_rate;
+ control |= e->modulation;
+ }
+ control |= B43_TXH_PHY1_MODE_SISO;
+ }
+
+ return control;
+}
+
static u8 b43_calc_fallback_rate(u8 bitrate)
{
switch (bitrate) {
@@ -437,6 +495,14 @@ int b43_generate_txhdr(struct b43_wldev *dev,
extra_ft |= B43_TXH_EFT_RTSFB_OFDM;
else
extra_ft |= B43_TXH_EFT_RTSFB_CCK;
+
+ if (rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS &&
+ phy->type == B43_PHYTYPE_N) {
+ txhdr->phy_ctl1_rts = cpu_to_le16(
+ b43_generate_tx_phy_ctl1(dev, rts_rate));
+ txhdr->phy_ctl1_rts_fb = cpu_to_le16(
+ b43_generate_tx_phy_ctl1(dev, rts_rate_fb));
+ }
}
/* Magic cookie */
@@ -445,6 +511,13 @@ int b43_generate_txhdr(struct b43_wldev *dev,
else
txhdr->new_format.cookie = cpu_to_le16(cookie);
+ if (phy->type == B43_PHYTYPE_N) {
+ txhdr->phy_ctl1 =
+ cpu_to_le16(b43_generate_tx_phy_ctl1(dev, rate));
+ txhdr->phy_ctl1_fb =
+ cpu_to_le16(b43_generate_tx_phy_ctl1(dev, rate_fb));
+ }
+
/* Apply the bitfields */
txhdr->mac_ctl = cpu_to_le32(mac_ctl);
txhdr->phy_ctl = cpu_to_le16(phy_ctl);
@@ -652,7 +725,7 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
status.mactime += mactime;
if (low_mactime_now <= mactime)
status.mactime -= 0x10000;
- status.flag |= RX_FLAG_TSFT;
+ status.flag |= RX_FLAG_MACTIME_MPDU;
}
chanid = (chanstat & B43_RX_CHAN_ID) >> B43_RX_CHAN_ID_SHIFT;
diff --git a/drivers/net/wireless/b43/xmit.h b/drivers/net/wireless/b43/xmit.h
index d4cf9b390af3..42debb5cd6fa 100644
--- a/drivers/net/wireless/b43/xmit.h
+++ b/drivers/net/wireless/b43/xmit.h
@@ -73,6 +73,12 @@ struct b43_txhdr {
} __packed;
} __packed;
+struct b43_tx_legacy_rate_phy_ctl_entry {
+ u8 bitrate;
+ u16 coding_rate;
+ u16 modulation;
+};
+
/* MAC TX control */
#define B43_TXH_MAC_USEFBR 0x10000000 /* Use fallback rate for this AMPDU */
#define B43_TXH_MAC_KEYIDX 0x0FF00000 /* Security key index */
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 1f11e1670bf0..c7fd73e3ad76 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -2442,8 +2442,8 @@ static int b43legacy_rng_init(struct b43legacy_wl *wl)
return err;
}
-static int b43legacy_op_tx(struct ieee80211_hw *hw,
- struct sk_buff *skb)
+static void b43legacy_op_tx(struct ieee80211_hw *hw,
+ struct sk_buff *skb)
{
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
struct b43legacy_wldev *dev = wl->current_dev;
@@ -2466,7 +2466,6 @@ out:
/* Drop the packet. */
dev_kfree_skb_any(skb);
}
- return NETDEV_TX_OK;
}
static int b43legacy_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index 7d177d97f1f7..3a95541708a6 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -572,7 +572,7 @@ void b43legacy_rx(struct b43legacy_wldev *dev,
status.mactime += mactime;
if (low_mactime_now <= mactime)
status.mactime -= 0x10000;
- status.flag |= RX_FLAG_TSFT;
+ status.flag |= RX_FLAG_MACTIME_MPDU;
}
chanid = (chanstat & B43legacy_RX_CHAN_ID) >>
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 61915f371416..4b97f918daff 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -706,11 +706,10 @@ static void schedule_reset(struct ipw2100_priv *priv)
netif_stop_queue(priv->net_dev);
priv->status |= STATUS_RESET_PENDING;
if (priv->reset_backoff)
- queue_delayed_work(priv->workqueue, &priv->reset_work,
- priv->reset_backoff * HZ);
+ schedule_delayed_work(&priv->reset_work,
+ priv->reset_backoff * HZ);
else
- queue_delayed_work(priv->workqueue, &priv->reset_work,
- 0);
+ schedule_delayed_work(&priv->reset_work, 0);
if (priv->reset_backoff < MAX_RESET_BACKOFF)
priv->reset_backoff++;
@@ -1397,7 +1396,7 @@ static int ipw2100_power_cycle_adapter(struct ipw2100_priv *priv)
}
/*
- * Send the CARD_DISABLE_PHY_OFF comamnd to the card to disable it
+ * Send the CARD_DISABLE_PHY_OFF command to the card to disable it
*
* After disabling, if the card was associated, a STATUS_ASSN_LOST will be sent.
*
@@ -1474,7 +1473,7 @@ static int ipw2100_enable_adapter(struct ipw2100_priv *priv)
if (priv->stop_hang_check) {
priv->stop_hang_check = 0;
- queue_delayed_work(priv->workqueue, &priv->hang_check, HZ / 2);
+ schedule_delayed_work(&priv->hang_check, HZ / 2);
}
fail_up:
@@ -1808,8 +1807,8 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
if (priv->stop_rf_kill) {
priv->stop_rf_kill = 0;
- queue_delayed_work(priv->workqueue, &priv->rf_kill,
- round_jiffies_relative(HZ));
+ schedule_delayed_work(&priv->rf_kill,
+ round_jiffies_relative(HZ));
}
deferred = 1;
@@ -2086,7 +2085,7 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status)
priv->status |= STATUS_ASSOCIATING;
priv->connect_start = get_seconds();
- queue_delayed_work(priv->workqueue, &priv->wx_event_work, HZ / 10);
+ schedule_delayed_work(&priv->wx_event_work, HZ / 10);
}
static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
@@ -2166,9 +2165,9 @@ static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status)
return;
if (priv->status & STATUS_SECURITY_UPDATED)
- queue_delayed_work(priv->workqueue, &priv->security_work, 0);
+ schedule_delayed_work(&priv->security_work, 0);
- queue_delayed_work(priv->workqueue, &priv->wx_event_work, 0);
+ schedule_delayed_work(&priv->wx_event_work, 0);
}
static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
@@ -2183,8 +2182,7 @@ static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
/* Make sure the RF Kill check timer is running */
priv->stop_rf_kill = 0;
cancel_delayed_work(&priv->rf_kill);
- queue_delayed_work(priv->workqueue, &priv->rf_kill,
- round_jiffies_relative(HZ));
+ schedule_delayed_work(&priv->rf_kill, round_jiffies_relative(HZ));
}
static void send_scan_event(void *data)
@@ -2219,13 +2217,12 @@ static void isr_scan_complete(struct ipw2100_priv *priv, u32 status)
/* Only userspace-requested scan completion events go out immediately */
if (!priv->user_requested_scan) {
if (!delayed_work_pending(&priv->scan_event_later))
- queue_delayed_work(priv->workqueue,
- &priv->scan_event_later,
- round_jiffies_relative(msecs_to_jiffies(4000)));
+ schedule_delayed_work(&priv->scan_event_later,
+ round_jiffies_relative(msecs_to_jiffies(4000)));
} else {
priv->user_requested_scan = 0;
cancel_delayed_work(&priv->scan_event_later);
- queue_work(priv->workqueue, &priv->scan_event_now);
+ schedule_work(&priv->scan_event_now);
}
}
@@ -4329,8 +4326,8 @@ static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio)
/* Make sure the RF_KILL check timer is running */
priv->stop_rf_kill = 0;
cancel_delayed_work(&priv->rf_kill);
- queue_delayed_work(priv->workqueue, &priv->rf_kill,
- round_jiffies_relative(HZ));
+ schedule_delayed_work(&priv->rf_kill,
+ round_jiffies_relative(HZ));
} else
schedule_reset(priv);
}
@@ -4461,20 +4458,17 @@ static void bd_queue_initialize(struct ipw2100_priv *priv,
IPW_DEBUG_INFO("exit\n");
}
-static void ipw2100_kill_workqueue(struct ipw2100_priv *priv)
+static void ipw2100_kill_works(struct ipw2100_priv *priv)
{
- if (priv->workqueue) {
- priv->stop_rf_kill = 1;
- priv->stop_hang_check = 1;
- cancel_delayed_work(&priv->reset_work);
- cancel_delayed_work(&priv->security_work);
- cancel_delayed_work(&priv->wx_event_work);
- cancel_delayed_work(&priv->hang_check);
- cancel_delayed_work(&priv->rf_kill);
- cancel_delayed_work(&priv->scan_event_later);
- destroy_workqueue(priv->workqueue);
- priv->workqueue = NULL;
- }
+ priv->stop_rf_kill = 1;
+ priv->stop_hang_check = 1;
+ cancel_delayed_work_sync(&priv->reset_work);
+ cancel_delayed_work_sync(&priv->security_work);
+ cancel_delayed_work_sync(&priv->wx_event_work);
+ cancel_delayed_work_sync(&priv->hang_check);
+ cancel_delayed_work_sync(&priv->rf_kill);
+ cancel_work_sync(&priv->scan_event_now);
+ cancel_delayed_work_sync(&priv->scan_event_later);
}
static int ipw2100_tx_allocate(struct ipw2100_priv *priv)
@@ -6046,7 +6040,7 @@ static void ipw2100_hang_check(struct work_struct *work)
priv->last_rtc = rtc;
if (!priv->stop_hang_check)
- queue_delayed_work(priv->workqueue, &priv->hang_check, HZ / 2);
+ schedule_delayed_work(&priv->hang_check, HZ / 2);
spin_unlock_irqrestore(&priv->low_lock, flags);
}
@@ -6062,8 +6056,8 @@ static void ipw2100_rf_kill(struct work_struct *work)
if (rf_kill_active(priv)) {
IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
if (!priv->stop_rf_kill)
- queue_delayed_work(priv->workqueue, &priv->rf_kill,
- round_jiffies_relative(HZ));
+ schedule_delayed_work(&priv->rf_kill,
+ round_jiffies_relative(HZ));
goto exit_unlock;
}
@@ -6209,8 +6203,6 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
INIT_LIST_HEAD(&priv->fw_pend_list);
INIT_STAT(&priv->fw_pend_stat);
- priv->workqueue = create_workqueue(DRV_NAME);
-
INIT_DELAYED_WORK(&priv->reset_work, ipw2100_reset_adapter);
INIT_DELAYED_WORK(&priv->security_work, ipw2100_security_work);
INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work);
@@ -6410,7 +6402,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
if (dev->irq)
free_irq(dev->irq, priv);
- ipw2100_kill_workqueue(priv);
+ ipw2100_kill_works(priv);
/* These are safe to call even if they weren't allocated */
ipw2100_queues_free(priv);
@@ -6460,9 +6452,7 @@ static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev)
* first, then close() will crash. */
unregister_netdev(dev);
- /* ipw2100_down will ensure that there is no more pending work
- * in the workqueue's, so we can safely remove them now. */
- ipw2100_kill_workqueue(priv);
+ ipw2100_kill_works(priv);
ipw2100_queues_free(priv);
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.h b/drivers/net/wireless/ipw2x00/ipw2100.h
index 838002b4881e..99cba968aa58 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.h
+++ b/drivers/net/wireless/ipw2x00/ipw2100.h
@@ -580,7 +580,6 @@ struct ipw2100_priv {
struct tasklet_struct irq_tasklet;
- struct workqueue_struct *workqueue;
struct delayed_work reset_work;
struct delayed_work security_work;
struct delayed_work wx_event_work;
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index ae438ed80c2f..160881f234cc 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -894,9 +894,8 @@ static void ipw_led_link_on(struct ipw_priv *priv)
/* If we aren't associated, schedule turning the LED off */
if (!(priv->status & STATUS_ASSOCIATED))
- queue_delayed_work(priv->workqueue,
- &priv->led_link_off,
- LD_TIME_LINK_ON);
+ schedule_delayed_work(&priv->led_link_off,
+ LD_TIME_LINK_ON);
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -939,8 +938,8 @@ static void ipw_led_link_off(struct ipw_priv *priv)
* turning the LED on (blink while unassociated) */
if (!(priv->status & STATUS_RF_KILL_MASK) &&
!(priv->status & STATUS_ASSOCIATED))
- queue_delayed_work(priv->workqueue, &priv->led_link_on,
- LD_TIME_LINK_OFF);
+ schedule_delayed_work(&priv->led_link_on,
+ LD_TIME_LINK_OFF);
}
@@ -980,13 +979,11 @@ static void __ipw_led_activity_on(struct ipw_priv *priv)
priv->status |= STATUS_LED_ACT_ON;
cancel_delayed_work(&priv->led_act_off);
- queue_delayed_work(priv->workqueue, &priv->led_act_off,
- LD_TIME_ACT_ON);
+ schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
} else {
/* Reschedule LED off for full time period */
cancel_delayed_work(&priv->led_act_off);
- queue_delayed_work(priv->workqueue, &priv->led_act_off,
- LD_TIME_ACT_ON);
+ schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
}
}
@@ -1795,13 +1792,11 @@ static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
if (disable_radio) {
priv->status |= STATUS_RF_KILL_SW;
- if (priv->workqueue) {
- cancel_delayed_work(&priv->request_scan);
- cancel_delayed_work(&priv->request_direct_scan);
- cancel_delayed_work(&priv->request_passive_scan);
- cancel_delayed_work(&priv->scan_event);
- }
- queue_work(priv->workqueue, &priv->down);
+ cancel_delayed_work(&priv->request_scan);
+ cancel_delayed_work(&priv->request_direct_scan);
+ cancel_delayed_work(&priv->request_passive_scan);
+ cancel_delayed_work(&priv->scan_event);
+ schedule_work(&priv->down);
} else {
priv->status &= ~STATUS_RF_KILL_SW;
if (rf_kill_active(priv)) {
@@ -1809,10 +1804,10 @@ static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
"disabled by HW switch\n");
/* Make sure the RF_KILL check timer is running */
cancel_delayed_work(&priv->rf_kill);
- queue_delayed_work(priv->workqueue, &priv->rf_kill,
- round_jiffies_relative(2 * HZ));
+ schedule_delayed_work(&priv->rf_kill,
+ round_jiffies_relative(2 * HZ));
} else
- queue_work(priv->workqueue, &priv->up);
+ schedule_work(&priv->up);
}
return 1;
@@ -2063,7 +2058,7 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
cancel_delayed_work(&priv->request_passive_scan);
cancel_delayed_work(&priv->scan_event);
schedule_work(&priv->link_down);
- queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
+ schedule_delayed_work(&priv->rf_kill, 2 * HZ);
handled |= IPW_INTA_BIT_RF_KILL_DONE;
}
@@ -2103,7 +2098,7 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
priv->status &= ~STATUS_HCMD_ACTIVE;
wake_up_interruptible(&priv->wait_command_queue);
- queue_work(priv->workqueue, &priv->adapter_restart);
+ schedule_work(&priv->adapter_restart);
handled |= IPW_INTA_BIT_FATAL_ERROR;
}
@@ -2323,11 +2318,6 @@ static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
}
-/*
- * NOTE: This must be executed from our workqueue as it results in udelay
- * being called which may corrupt the keyboard if executed on default
- * workqueue
- */
static void ipw_adapter_restart(void *adapter)
{
struct ipw_priv *priv = adapter;
@@ -2368,13 +2358,13 @@ static void ipw_scan_check(void *data)
IPW_DEBUG_SCAN("Scan completion watchdog resetting "
"adapter after (%dms).\n",
jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
- queue_work(priv->workqueue, &priv->adapter_restart);
+ schedule_work(&priv->adapter_restart);
} else if (priv->status & STATUS_SCANNING) {
IPW_DEBUG_SCAN("Scan completion watchdog aborting scan "
"after (%dms).\n",
jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
ipw_abort_scan(priv);
- queue_delayed_work(priv->workqueue, &priv->scan_check, HZ);
+ schedule_delayed_work(&priv->scan_check, HZ);
}
}
@@ -3943,7 +3933,7 @@ static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
if (priv->status & STATUS_ASSOCIATING) {
IPW_DEBUG_ASSOC("Disassociating while associating.\n");
- queue_work(priv->workqueue, &priv->disassociate);
+ schedule_work(&priv->disassociate);
return;
}
@@ -4360,8 +4350,7 @@ static void ipw_gather_stats(struct ipw_priv *priv)
priv->quality = quality;
- queue_delayed_work(priv->workqueue, &priv->gather_stats,
- IPW_STATS_INTERVAL);
+ schedule_delayed_work(&priv->gather_stats, IPW_STATS_INTERVAL);
}
static void ipw_bg_gather_stats(struct work_struct *work)
@@ -4396,10 +4385,10 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv,
IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
IPW_DL_STATE,
"Aborting scan with missed beacon.\n");
- queue_work(priv->workqueue, &priv->abort_scan);
+ schedule_work(&priv->abort_scan);
}
- queue_work(priv->workqueue, &priv->disassociate);
+ schedule_work(&priv->disassociate);
return;
}
@@ -4425,8 +4414,7 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv,
if (!(priv->status & STATUS_ROAMING)) {
priv->status |= STATUS_ROAMING;
if (!(priv->status & STATUS_SCANNING))
- queue_delayed_work(priv->workqueue,
- &priv->request_scan, 0);
+ schedule_delayed_work(&priv->request_scan, 0);
}
return;
}
@@ -4439,7 +4427,7 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv,
* channels..) */
IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
"Aborting scan with missed beacon.\n");
- queue_work(priv->workqueue, &priv->abort_scan);
+ schedule_work(&priv->abort_scan);
}
IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
@@ -4462,8 +4450,8 @@ static void handle_scan_event(struct ipw_priv *priv)
/* Only userspace-requested scan completion events go out immediately */
if (!priv->user_requested_scan) {
if (!delayed_work_pending(&priv->scan_event))
- queue_delayed_work(priv->workqueue, &priv->scan_event,
- round_jiffies_relative(msecs_to_jiffies(4000)));
+ schedule_delayed_work(&priv->scan_event,
+ round_jiffies_relative(msecs_to_jiffies(4000)));
} else {
union iwreq_data wrqu;
@@ -4516,20 +4504,17 @@ static void ipw_rx_notification(struct ipw_priv *priv,
IPW_DEBUG_ASSOC
("queueing adhoc check\n");
- queue_delayed_work(priv->
- workqueue,
- &priv->
- adhoc_check,
- le16_to_cpu(priv->
- assoc_request.
- beacon_interval));
+ schedule_delayed_work(
+ &priv->adhoc_check,
+ le16_to_cpu(priv->
+ assoc_request.
+ beacon_interval));
break;
}
priv->status &= ~STATUS_ASSOCIATING;
priv->status |= STATUS_ASSOCIATED;
- queue_work(priv->workqueue,
- &priv->system_config);
+ schedule_work(&priv->system_config);
#ifdef CONFIG_IPW2200_QOS
#define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
@@ -4792,43 +4777,37 @@ static void ipw_rx_notification(struct ipw_priv *priv,
#ifdef CONFIG_IPW2200_MONITOR
if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
priv->status |= STATUS_SCAN_FORCED;
- queue_delayed_work(priv->workqueue,
- &priv->request_scan, 0);
+ schedule_delayed_work(&priv->request_scan, 0);
break;
}
priv->status &= ~STATUS_SCAN_FORCED;
#endif /* CONFIG_IPW2200_MONITOR */
/* Do queued direct scans first */
- if (priv->status & STATUS_DIRECT_SCAN_PENDING) {
- queue_delayed_work(priv->workqueue,
- &priv->request_direct_scan, 0);
- }
+ if (priv->status & STATUS_DIRECT_SCAN_PENDING)
+ schedule_delayed_work(&priv->request_direct_scan, 0);
if (!(priv->status & (STATUS_ASSOCIATED |
STATUS_ASSOCIATING |
STATUS_ROAMING |
STATUS_DISASSOCIATING)))
- queue_work(priv->workqueue, &priv->associate);
+ schedule_work(&priv->associate);
else if (priv->status & STATUS_ROAMING) {
if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
/* If a scan completed and we are in roam mode, then
* the scan that completed was the one requested as a
* result of entering roam... so, schedule the
* roam work */
- queue_work(priv->workqueue,
- &priv->roam);
+ schedule_work(&priv->roam);
else
/* Don't schedule if we aborted the scan */
priv->status &= ~STATUS_ROAMING;
} else if (priv->status & STATUS_SCAN_PENDING)
- queue_delayed_work(priv->workqueue,
- &priv->request_scan, 0);
+ schedule_delayed_work(&priv->request_scan, 0);
else if (priv->config & CFG_BACKGROUND_SCAN
&& priv->status & STATUS_ASSOCIATED)
- queue_delayed_work(priv->workqueue,
- &priv->request_scan,
- round_jiffies_relative(HZ));
+ schedule_delayed_work(&priv->request_scan,
+ round_jiffies_relative(HZ));
/* Send an empty event to user space.
* We don't send the received data on the event because
@@ -5192,7 +5171,7 @@ static void ipw_rx_queue_restock(struct ipw_priv *priv)
/* If the pre-allocated buffer pool is dropping low, schedule to
* refill it */
if (rxq->free_count <= RX_LOW_WATERMARK)
- queue_work(priv->workqueue, &priv->rx_replenish);
+ schedule_work(&priv->rx_replenish);
/* If we've added more space for the firmware to place data, tell it */
if (write != rxq->write)
@@ -6133,8 +6112,8 @@ static void ipw_adhoc_check(void *data)
return;
}
- queue_delayed_work(priv->workqueue, &priv->adhoc_check,
- le16_to_cpu(priv->assoc_request.beacon_interval));
+ schedule_delayed_work(&priv->adhoc_check,
+ le16_to_cpu(priv->assoc_request.beacon_interval));
}
static void ipw_bg_adhoc_check(struct work_struct *work)
@@ -6523,8 +6502,7 @@ send_request:
} else
priv->status &= ~STATUS_SCAN_PENDING;
- queue_delayed_work(priv->workqueue, &priv->scan_check,
- IPW_SCAN_CHECK_WATCHDOG);
+ schedule_delayed_work(&priv->scan_check, IPW_SCAN_CHECK_WATCHDOG);
done:
mutex_unlock(&priv->mutex);
return err;
@@ -6994,8 +6972,7 @@ static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
!memcmp(network->ssid,
priv->assoc_network->ssid,
network->ssid_len)) {
- queue_work(priv->workqueue,
- &priv->merge_networks);
+ schedule_work(&priv->merge_networks);
}
}
@@ -7663,7 +7640,7 @@ static int ipw_associate(void *data)
if (priv->status & STATUS_DISASSOCIATING) {
IPW_DEBUG_ASSOC("Not attempting association (in "
"disassociating)\n ");
- queue_work(priv->workqueue, &priv->associate);
+ schedule_work(&priv->associate);
return 0;
}
@@ -7731,12 +7708,10 @@ static int ipw_associate(void *data)
if (!(priv->status & STATUS_SCANNING)) {
if (!(priv->config & CFG_SPEED_SCAN))
- queue_delayed_work(priv->workqueue,
- &priv->request_scan,
- SCAN_INTERVAL);
+ schedule_delayed_work(&priv->request_scan,
+ SCAN_INTERVAL);
else
- queue_delayed_work(priv->workqueue,
- &priv->request_scan, 0);
+ schedule_delayed_work(&priv->request_scan, 0);
}
return 0;
@@ -8899,7 +8874,7 @@ static int ipw_wx_set_mode(struct net_device *dev,
priv->ieee->iw_mode = wrqu->mode;
- queue_work(priv->workqueue, &priv->adapter_restart);
+ schedule_work(&priv->adapter_restart);
mutex_unlock(&priv->mutex);
return err;
}
@@ -9598,7 +9573,7 @@ static int ipw_wx_set_scan(struct net_device *dev,
IPW_DEBUG_WX("Start scan\n");
- queue_delayed_work(priv->workqueue, work, 0);
+ schedule_delayed_work(work, 0);
return 0;
}
@@ -9937,7 +9912,7 @@ static int ipw_wx_set_monitor(struct net_device *dev,
#else
priv->net_dev->type = ARPHRD_IEEE80211;
#endif
- queue_work(priv->workqueue, &priv->adapter_restart);
+ schedule_work(&priv->adapter_restart);
}
ipw_set_channel(priv, parms[1]);
@@ -9947,7 +9922,7 @@ static int ipw_wx_set_monitor(struct net_device *dev,
return 0;
}
priv->net_dev->type = ARPHRD_ETHER;
- queue_work(priv->workqueue, &priv->adapter_restart);
+ schedule_work(&priv->adapter_restart);
}
mutex_unlock(&priv->mutex);
return 0;
@@ -9961,7 +9936,7 @@ static int ipw_wx_reset(struct net_device *dev,
{
struct ipw_priv *priv = libipw_priv(dev);
IPW_DEBUG_WX("RESET\n");
- queue_work(priv->workqueue, &priv->adapter_restart);
+ schedule_work(&priv->adapter_restart);
return 0;
}
@@ -10551,7 +10526,7 @@ static int ipw_net_set_mac_address(struct net_device *dev, void *p)
memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
printk(KERN_INFO "%s: Setting MAC to %pM\n",
priv->net_dev->name, priv->mac_addr);
- queue_work(priv->workqueue, &priv->adapter_restart);
+ schedule_work(&priv->adapter_restart);
mutex_unlock(&priv->mutex);
return 0;
}
@@ -10684,9 +10659,7 @@ static void ipw_rf_kill(void *adapter)
if (rf_kill_active(priv)) {
IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
- if (priv->workqueue)
- queue_delayed_work(priv->workqueue,
- &priv->rf_kill, 2 * HZ);
+ schedule_delayed_work(&priv->rf_kill, 2 * HZ);
goto exit_unlock;
}
@@ -10697,7 +10670,7 @@ static void ipw_rf_kill(void *adapter)
"device\n");
/* we can not do an adapter restart while inside an irq lock */
- queue_work(priv->workqueue, &priv->adapter_restart);
+ schedule_work(&priv->adapter_restart);
} else
IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
"enabled\n");
@@ -10735,7 +10708,7 @@ static void ipw_link_up(struct ipw_priv *priv)
notify_wx_assoc_event(priv);
if (priv->config & CFG_BACKGROUND_SCAN)
- queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
+ schedule_delayed_work(&priv->request_scan, HZ);
}
static void ipw_bg_link_up(struct work_struct *work)
@@ -10764,7 +10737,7 @@ static void ipw_link_down(struct ipw_priv *priv)
if (!(priv->status & STATUS_EXIT_PENDING)) {
/* Queue up another scan... */
- queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
+ schedule_delayed_work(&priv->request_scan, 0);
} else
cancel_delayed_work(&priv->scan_event);
}
@@ -10782,7 +10755,6 @@ static int __devinit ipw_setup_deferred_work(struct ipw_priv *priv)
{
int ret = 0;
- priv->workqueue = create_workqueue(DRV_NAME);
init_waitqueue_head(&priv->wait_command_queue);
init_waitqueue_head(&priv->wait_state);
@@ -11339,8 +11311,7 @@ static int ipw_up(struct ipw_priv *priv)
IPW_WARNING("Radio Frequency Kill Switch is On:\n"
"Kill switch must be turned off for "
"wireless networking to work.\n");
- queue_delayed_work(priv->workqueue, &priv->rf_kill,
- 2 * HZ);
+ schedule_delayed_work(&priv->rf_kill, 2 * HZ);
return 0;
}
@@ -11350,8 +11321,7 @@ static int ipw_up(struct ipw_priv *priv)
/* If configure to try and auto-associate, kick
* off a scan. */
- queue_delayed_work(priv->workqueue,
- &priv->request_scan, 0);
+ schedule_delayed_work(&priv->request_scan, 0);
return 0;
}
@@ -11817,7 +11787,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
if (err) {
IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
- goto out_destroy_workqueue;
+ goto out_iounmap;
}
SET_NETDEV_DEV(net_dev, &pdev->dev);
@@ -11885,9 +11855,6 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
out_release_irq:
free_irq(pdev->irq, priv);
- out_destroy_workqueue:
- destroy_workqueue(priv->workqueue);
- priv->workqueue = NULL;
out_iounmap:
iounmap(priv->hw_base);
out_pci_release_regions:
@@ -11930,18 +11897,31 @@ static void __devexit ipw_pci_remove(struct pci_dev *pdev)
kfree(priv->cmdlog);
priv->cmdlog = NULL;
}
- /* ipw_down will ensure that there is no more pending work
- * in the workqueue's, so we can safely remove them now. */
- cancel_delayed_work(&priv->adhoc_check);
- cancel_delayed_work(&priv->gather_stats);
- cancel_delayed_work(&priv->request_scan);
- cancel_delayed_work(&priv->request_direct_scan);
- cancel_delayed_work(&priv->request_passive_scan);
- cancel_delayed_work(&priv->scan_event);
- cancel_delayed_work(&priv->rf_kill);
- cancel_delayed_work(&priv->scan_check);
- destroy_workqueue(priv->workqueue);
- priv->workqueue = NULL;
+
+ /* make sure all works are inactive */
+ cancel_delayed_work_sync(&priv->adhoc_check);
+ cancel_work_sync(&priv->associate);
+ cancel_work_sync(&priv->disassociate);
+ cancel_work_sync(&priv->system_config);
+ cancel_work_sync(&priv->rx_replenish);
+ cancel_work_sync(&priv->adapter_restart);
+ cancel_delayed_work_sync(&priv->rf_kill);
+ cancel_work_sync(&priv->up);
+ cancel_work_sync(&priv->down);
+ cancel_delayed_work_sync(&priv->request_scan);
+ cancel_delayed_work_sync(&priv->request_direct_scan);
+ cancel_delayed_work_sync(&priv->request_passive_scan);
+ cancel_delayed_work_sync(&priv->scan_event);
+ cancel_delayed_work_sync(&priv->gather_stats);
+ cancel_work_sync(&priv->abort_scan);
+ cancel_work_sync(&priv->roam);
+ cancel_delayed_work_sync(&priv->scan_check);
+ cancel_work_sync(&priv->link_up);
+ cancel_work_sync(&priv->link_down);
+ cancel_delayed_work_sync(&priv->led_link_on);
+ cancel_delayed_work_sync(&priv->led_link_off);
+ cancel_delayed_work_sync(&priv->led_act_off);
+ cancel_work_sync(&priv->merge_networks);
/* Free MAC hash list for ADHOC */
for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
@@ -12029,7 +12009,7 @@ static int ipw_pci_resume(struct pci_dev *pdev)
priv->suspend_time = get_seconds() - priv->suspend_at;
/* Bring the device back up */
- queue_work(priv->workqueue, &priv->up);
+ schedule_work(&priv->up);
return 0;
}
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.h b/drivers/net/wireless/ipw2x00/ipw2200.h
index d7d049c7a4fa..0441445b8bfa 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.h
+++ b/drivers/net/wireless/ipw2x00/ipw2200.h
@@ -1299,8 +1299,6 @@ struct ipw_priv {
u8 direct_scan_ssid[IW_ESSID_MAX_SIZE];
u8 direct_scan_ssid_len;
- struct workqueue_struct *workqueue;
-
struct delayed_work adhoc_check;
struct work_struct associate;
struct work_struct disassociate;
diff --git a/drivers/net/wireless/iwlegacy/Kconfig b/drivers/net/wireless/iwlegacy/Kconfig
new file mode 100644
index 000000000000..2a45dd44cc12
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/Kconfig
@@ -0,0 +1,116 @@
+config IWLWIFI_LEGACY
+ tristate "Intel Wireless Wifi legacy devices"
+ depends on PCI && MAC80211
+ select FW_LOADER
+ select NEW_LEDS
+ select LEDS_CLASS
+ select LEDS_TRIGGERS
+ select MAC80211_LEDS
+
+menu "Debugging Options"
+ depends on IWLWIFI_LEGACY
+
+config IWLWIFI_LEGACY_DEBUG
+ bool "Enable full debugging output in 4965 and 3945 drivers"
+ depends on IWLWIFI_LEGACY
+ ---help---
+ This option will enable debug tracing output for the iwlwifilegacy
+ drivers.
+
+ This will result in the kernel module being ~100k larger. You can
+ control which debug output is sent to the kernel log by setting the
+ value in
+
+ /sys/class/net/wlan0/device/debug_level
+
+ This entry will only exist if this option is enabled.
+
+ To set a value, simply echo an 8-byte hex value to the same file:
+
+ % echo 0x43fff > /sys/class/net/wlan0/device/debug_level
+
+ You can find the list of debug mask values in:
+ drivers/net/wireless/iwlwifilegacy/iwl-debug.h
+
+ If this is your first time using this driver, you should say Y here
+ as the debug information can assist others in helping you resolve
+ any problems you may encounter.
+
+config IWLWIFI_LEGACY_DEBUGFS
+ bool "4965 and 3945 debugfs support"
+ depends on IWLWIFI_LEGACY && MAC80211_DEBUGFS
+ ---help---
+ Enable creation of debugfs files for the iwlwifilegacy drivers. This
+ is a low-impact option that allows getting insight into the
+ driver's state at runtime.
+
+config IWLWIFI_LEGACY_DEVICE_TRACING
+ bool "iwlwifilegacy legacy device access tracing"
+ depends on IWLWIFI_LEGACY
+ depends on EVENT_TRACING
+ help
+ Say Y here to trace all commands, including TX frames and IO
+ accesses, sent to the device. If you say yes, iwlwifilegacy will
+ register with the ftrace framework for event tracing and dump
+ all this information to the ringbuffer, you may need to
+ increase the ringbuffer size. See the ftrace documentation
+ for more information.
+
+ When tracing is not enabled, this option still has some
+ (though rather small) overhead.
+
+ If unsure, say Y so we can help you better when problems
+ occur.
+endmenu
+
+config IWL4965
+ tristate "Intel Wireless WiFi 4965AGN (iwl4965)"
+ depends on IWLWIFI_LEGACY
+ ---help---
+ This option enables support for
+
+ Select to build the driver supporting the:
+
+ Intel Wireless WiFi Link 4965AGN
+
+ This driver uses the kernel's mac80211 subsystem.
+
+ In order to use this driver, you will need a microcode (uCode)
+ image for it. You can obtain the microcode from:
+
+ <http://intellinuxwireless.org/>.
+
+ The microcode is typically installed in /lib/firmware. You can
+ look in the hotplug script /etc/hotplug/firmware.agent to
+ determine which directory FIRMWARE_DIR is set to when the script
+ runs.
+
+ If you want to compile the driver as a module ( = code which can be
+ inserted in and removed from the running kernel whenever you want),
+ say M here and read <file:Documentation/kbuild/modules.txt>. The
+ module will be called iwl4965.
+
+config IWL3945
+ tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
+ depends on IWLWIFI_LEGACY
+ ---help---
+ Select to build the driver supporting the:
+
+ Intel PRO/Wireless 3945ABG/BG Network Connection
+
+ This driver uses the kernel's mac80211 subsystem.
+
+ In order to use this driver, you will need a microcode (uCode)
+ image for it. You can obtain the microcode from:
+
+ <http://intellinuxwireless.org/>.
+
+ The microcode is typically installed in /lib/firmware. You can
+ look in the hotplug script /etc/hotplug/firmware.agent to
+ determine which directory FIRMWARE_DIR is set to when the script
+ runs.
+
+ If you want to compile the driver as a module ( = code which can be
+ inserted in and removed from the running kernel whenever you want),
+ say M here and read <file:Documentation/kbuild/modules.txt>. The
+ module will be called iwl3945.
diff --git a/drivers/net/wireless/iwlegacy/Makefile b/drivers/net/wireless/iwlegacy/Makefile
new file mode 100644
index 000000000000..d56aeb38c211
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/Makefile
@@ -0,0 +1,25 @@
+obj-$(CONFIG_IWLWIFI_LEGACY) += iwl-legacy.o
+iwl-legacy-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
+iwl-legacy-objs += iwl-rx.o iwl-tx.o iwl-sta.o
+iwl-legacy-objs += iwl-scan.o iwl-led.o
+iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-debugfs.o
+iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) += iwl-devtrace.o
+
+iwl-legacy-objs += $(iwl-legacy-m)
+
+CFLAGS_iwl-devtrace.o := -I$(src)
+
+# 4965
+obj-$(CONFIG_IWL4965) += iwl4965.o
+iwl4965-objs := iwl-4965.o iwl4965-base.o iwl-4965-rs.o iwl-4965-led.o
+iwl4965-objs += iwl-4965-ucode.o iwl-4965-tx.o
+iwl4965-objs += iwl-4965-lib.o iwl-4965-rx.o iwl-4965-calib.o
+iwl4965-objs += iwl-4965-sta.o iwl-4965-eeprom.o
+iwl4965-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-4965-debugfs.o
+
+# 3945
+obj-$(CONFIG_IWL3945) += iwl3945.o
+iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
+iwl3945-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-3945-debugfs.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c
index ef0835b01b6b..cfabb38793ab 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -60,12 +60,13 @@ ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
int bufsz = sizeof(struct iwl39_statistics_rx_phy) * 40 +
sizeof(struct iwl39_statistics_rx_non_phy) * 40 + 400;
ssize_t ret;
- struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
+ struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm,
+ *max_ofdm;
struct iwl39_statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
struct iwl39_statistics_rx_non_phy *general, *accum_general;
struct iwl39_statistics_rx_non_phy *delta_general, *max_general;
- if (!iwl_is_alive(priv))
+ if (!iwl_legacy_is_alive(priv))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
@@ -335,7 +336,7 @@ ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
ssize_t ret;
struct iwl39_statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
- if (!iwl_is_alive(priv))
+ if (!iwl_legacy_is_alive(priv))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
@@ -434,7 +435,7 @@ ssize_t iwl3945_ucode_general_stats_read(struct file *file,
struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
struct iwl39_statistics_div *div, *accum_div, *delta_div, *max_div;
- if (!iwl_is_alive(priv))
+ if (!iwl_legacy_is_alive(priv))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h
index 70809c53c215..8fef4b32b447 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
#include "iwl-core.h"
#include "iwl-debug.h"
-#ifdef CONFIG_IWLWIFI_DEBUGFS
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
ssize_t iwl3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos);
ssize_t iwl3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h b/drivers/net/wireless/iwlegacy/iwl-3945-fh.h
index 2c9ed2b502a3..836c9919f82e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-fh.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -185,4 +185,3 @@ struct iwl3945_tfd {
#endif /* __iwl_3945_fh_h__ */
-
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
index 65b5834da28c..779d3cb86e2c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -164,12 +164,11 @@ struct iwl3945_eeprom {
/*
* Per-channel regulatory data.
*
- * Each channel that *might* be supported by 3945 or 4965 has a fixed location
+ * Each channel that *might* be supported by 3945 has a fixed location
* in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
* txpower (MSB).
*
- * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz)
- * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
+ * Entries immediately below are for 20 MHz channel width.
*
* 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
*/
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/drivers/net/wireless/iwlegacy/iwl-3945-led.c
index abe2b739c4dc..abd923558d48 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-led.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -56,36 +56,9 @@ static int iwl3945_send_led_cmd(struct iwl_priv *priv,
.callback = NULL,
};
- return iwl_send_cmd(priv, &cmd);
-}
-
-/* Set led on command */
-static int iwl3945_led_on(struct iwl_priv *priv)
-{
- struct iwl_led_cmd led_cmd = {
- .id = IWL_LED_LINK,
- .on = IWL_LED_SOLID,
- .off = 0,
- .interval = IWL_DEF_LED_INTRVL
- };
- return iwl3945_send_led_cmd(priv, &led_cmd);
-}
-
-/* Set led off command */
-static int iwl3945_led_off(struct iwl_priv *priv)
-{
- struct iwl_led_cmd led_cmd = {
- .id = IWL_LED_LINK,
- .on = 0,
- .off = 0,
- .interval = IWL_DEF_LED_INTRVL
- };
- IWL_DEBUG_LED(priv, "led off\n");
- return iwl3945_send_led_cmd(priv, &led_cmd);
+ return iwl_legacy_send_cmd(priv, &cmd);
}
const struct iwl_led_ops iwl3945_led_ops = {
.cmd = iwl3945_send_led_cmd,
- .on = iwl3945_led_on,
- .off = iwl3945_led_off,
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.h b/drivers/net/wireless/iwlegacy/iwl-3945-led.h
index ce990adc51e7..96716276eb0d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.h
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-led.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
index 1f3e7e34fbc7..977bd2477c6a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -89,7 +89,7 @@ static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = {
};
#define IWL_RATE_MAX_WINDOW 62
-#define IWL_RATE_FLUSH (3*HZ)
+#define IWL_RATE_FLUSH (3*HZ)
#define IWL_RATE_WIN_FLUSH (HZ/2)
#define IWL39_RATE_HIGH_TH 11520
#define IWL_SUCCESS_UP_TH 8960
@@ -394,18 +394,18 @@ out:
IWL_DEBUG_INFO(priv, "leave\n");
}
-static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+static void *iwl3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
{
return hw->priv;
}
/* rate scale requires free function to be implemented */
-static void rs_free(void *priv)
+static void iwl3945_rs_free(void *priv)
{
return;
}
-static void *rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
+static void *iwl3945_rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
{
struct iwl3945_rs_sta *rs_sta;
struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
@@ -423,7 +423,7 @@ static void *rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
return rs_sta;
}
-static void rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
+static void iwl3945_rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
void *priv_sta)
{
struct iwl3945_rs_sta *rs_sta = priv_sta;
@@ -438,12 +438,12 @@ static void rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
/**
- * rs_tx_status - Update rate control values based on Tx results
+ * iwl3945_rs_tx_status - Update rate control values based on Tx results
*
* NOTE: Uses iwl_priv->retry_rate for the # of retries attempted by
* the hardware for each rate.
*/
-static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband,
+static void iwl3945_rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta, void *priv_sta,
struct sk_buff *skb)
{
@@ -612,7 +612,7 @@ static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
}
/**
- * rs_get_rate - find the rate for the requested packet
+ * iwl3945_rs_get_rate - find the rate for the requested packet
*
* Returns the ieee80211_rate structure allocated by the driver.
*
@@ -627,7 +627,7 @@ static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
* rate table and must reference the driver allocated rate table
*
*/
-static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
+static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
void *priv_sta, struct ieee80211_tx_rate_control *txrc)
{
struct ieee80211_supported_band *sband = txrc->sband;
@@ -644,7 +644,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
u32 fail_count;
s8 scale_action = 0;
unsigned long flags;
- u16 rate_mask = sta ? sta->supp_rates[sband->band] : 0;
+ u16 rate_mask;
s8 max_rate_idx = -1;
struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -899,7 +899,8 @@ static void iwl3945_remove_debugfs(void *priv, void *priv_sta)
* the station is added. Since mac80211 calls this function before a
* station is added we ignore it.
*/
-static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
+static void iwl3945_rs_rate_init_stub(void *priv_r,
+ struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta, void *priv_sta)
{
}
@@ -907,13 +908,13 @@ static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sba
static struct rate_control_ops rs_ops = {
.module = NULL,
.name = RS_NAME,
- .tx_status = rs_tx_status,
- .get_rate = rs_get_rate,
- .rate_init = rs_rate_init_stub,
- .alloc = rs_alloc,
- .free = rs_free,
- .alloc_sta = rs_alloc_sta,
- .free_sta = rs_free_sta,
+ .tx_status = iwl3945_rs_tx_status,
+ .get_rate = iwl3945_rs_get_rate,
+ .rate_init = iwl3945_rs_rate_init_stub,
+ .alloc = iwl3945_rs_alloc,
+ .free = iwl3945_rs_free,
+ .alloc_sta = iwl3945_rs_alloc_sta,
+ .free_sta = iwl3945_rs_free_sta,
#ifdef CONFIG_MAC80211_DEBUGFS
.add_sta_debugfs = iwl3945_add_debugfs,
.remove_sta_debugfs = iwl3945_remove_debugfs,
@@ -991,5 +992,3 @@ void iwl3945_rate_control_unregister(void)
{
ieee80211_rate_control_unregister(&rs_ops);
}
-
-
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlegacy/iwl-3945.c
index 39b6f16c87fa..d096dc28204d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -51,7 +51,6 @@
#include "iwl-led.h"
#include "iwl-3945-led.h"
#include "iwl-3945-debugfs.h"
-#include "iwl-legacy.h"
#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
[IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
@@ -172,14 +171,14 @@ void iwl3945_disable_events(struct iwl_priv *priv)
return;
}
- disable_ptr = iwl_read_targ_mem(priv, base + (4 * sizeof(u32)));
- array_size = iwl_read_targ_mem(priv, base + (5 * sizeof(u32)));
+ disable_ptr = iwl_legacy_read_targ_mem(priv, base + (4 * sizeof(u32)));
+ array_size = iwl_legacy_read_targ_mem(priv, base + (5 * sizeof(u32)));
if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) {
IWL_DEBUG_INFO(priv, "Disabling selected uCode log events at 0x%x\n",
disable_ptr);
for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++)
- iwl_write_targ_mem(priv,
+ iwl_legacy_write_targ_mem(priv,
disable_ptr + (i * sizeof(u32)),
evt_disable[i]);
@@ -202,7 +201,7 @@ static int iwl3945_hwrate_to_plcp_idx(u8 plcp)
return -1;
}
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
static const char *iwl3945_get_tx_fail_reason(u32 status)
@@ -255,7 +254,7 @@ int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate)
break;
case IEEE80211_BAND_2GHZ:
if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
- iwl_is_associated(priv, IWL_RXON_CTX_BSS)) {
+ iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
if (rate == IWL_RATE_11M_INDEX)
next_rate = IWL_RATE_5M_INDEX;
}
@@ -285,8 +284,9 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
BUG_ON(txq_id == IWL39_CMD_QUEUE_NUM);
- for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+ for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
+ q->read_ptr != index;
+ q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
tx_info = &txq->txb[txq->q.read_ptr];
ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
@@ -294,10 +294,10 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
priv->cfg->ops->lib->txq_free_tfd(priv, txq);
}
- if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) &&
+ if (iwl_legacy_queue_space(q) > q->low_mark && (txq_id >= 0) &&
(txq_id != IWL39_CMD_QUEUE_NUM) &&
priv->mac80211_registered)
- iwl_wake_queue(priv, txq);
+ iwl_legacy_wake_queue(priv, txq);
}
/**
@@ -317,7 +317,7 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
int rate_idx;
int fail;
- if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
+ if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
"is out of range [0-%d] %d %d\n", txq_id,
index, txq->q.n_bd, txq->q.write_ptr,
@@ -363,12 +363,7 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
* RX handler implementations
*
*****************************************************************************/
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-/*
- * based on the assumption of all statistics counter are in DWORD
- * FIXME: This function is for debugging, do not deal with
- * the case of counters roll-over.
- */
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
__le32 *stats)
{
@@ -410,10 +405,10 @@ void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
(int)sizeof(struct iwl3945_notif_statistics),
le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
-#ifdef CONFIG_IWLWIFI_DEBUGFS
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw);
#endif
- iwl_recover_from_statistics(priv, pkt);
+ iwl_legacy_recover_from_statistics(priv, pkt);
memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics));
}
@@ -425,7 +420,7 @@ void iwl3945_reply_statistics(struct iwl_priv *priv,
__le32 *flag = (__le32 *)&pkt->u.raw;
if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) {
-#ifdef CONFIG_IWLWIFI_DEBUGFS
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
memset(&priv->_3945.accum_statistics, 0,
sizeof(struct iwl3945_notif_statistics));
memset(&priv->_3945.delta_statistics, 0,
@@ -496,14 +491,14 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
}
if (!iwl3945_mod_params.sw_crypto)
- iwl_set_decrypted_flag(priv,
+ iwl_legacy_set_decrypted_flag(priv,
(struct ieee80211_hdr *)rxb_addr(rxb),
le32_to_cpu(rx_end->status), stats);
skb_add_rx_frag(skb, 0, rxb->page,
(void *)rx_hdr->payload - (void *)pkt, len);
- iwl_update_stats(priv, false, fc, len);
+ iwl_legacy_update_stats(priv, false, fc, len);
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
ieee80211_rx(priv->hw, skb);
@@ -528,10 +523,11 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
rx_status.flag = 0;
rx_status.mactime = le64_to_cpu(rx_end->timestamp);
- rx_status.freq =
- ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel));
rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ rx_status.freq =
+ ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
+ rx_status.band);
rx_status.rate_idx = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate);
if (rx_status.band == IEEE80211_BAND_5GHZ)
@@ -575,7 +571,8 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
rx_status.signal, rx_status.signal,
rx_status.rate_idx);
- iwl_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len), header);
+ iwl_legacy_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len),
+ header);
if (network_packet) {
priv->_3945.last_beacon_time =
@@ -695,8 +692,7 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
/* We need to figure out how to get the sta->supp_rates while
* in this running context */
- rate_mask = IWL_RATES_MASK;
-
+ rate_mask = IWL_RATES_MASK_3945;
/* Set retry limit on DATA packets and Probe Responses*/
if (ieee80211_is_probe_resp(fc))
@@ -744,7 +740,7 @@ static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate)
station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
station->sta.rate_n_flags = cpu_to_le16(tx_rate);
station->sta.mode = STA_CONTROL_MODIFY_MSK;
- iwl_send_add_sta(priv, &station->sta, CMD_ASYNC);
+ iwl_legacy_send_add_sta(priv, &station->sta, CMD_ASYNC);
spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n",
@@ -759,7 +755,7 @@ static void iwl3945_set_pwr_vmain(struct iwl_priv *priv)
* to set power to V_AUX, do
if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) {
- iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
+ iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
~APMG_PS_CTRL_MSK_PWR_SRC);
@@ -769,7 +765,7 @@ static void iwl3945_set_pwr_vmain(struct iwl_priv *priv)
}
*/
- iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
+ iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
~APMG_PS_CTRL_MSK_PWR_SRC);
@@ -779,10 +775,11 @@ static void iwl3945_set_pwr_vmain(struct iwl_priv *priv)
static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
{
- iwl_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
- iwl_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma);
- iwl_write_direct32(priv, FH39_RCSR_WPTR(0), 0);
- iwl_write_direct32(priv, FH39_RCSR_CONFIG(0),
+ iwl_legacy_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
+ iwl_legacy_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0),
+ rxq->rb_stts_dma);
+ iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), 0);
+ iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0),
FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
@@ -793,7 +790,7 @@ static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
/* fake read to flush all prev I/O */
- iwl_read_direct32(priv, FH39_RSSR_CTRL);
+ iwl_legacy_read_direct32(priv, FH39_RSSR_CTRL);
return 0;
}
@@ -802,23 +799,23 @@ static int iwl3945_tx_reset(struct iwl_priv *priv)
{
/* bypass mode */
- iwl_write_prph(priv, ALM_SCD_MODE_REG, 0x2);
+ iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0x2);
/* RA 0 is active */
- iwl_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01);
+ iwl_legacy_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01);
/* all 6 fifo are active */
- iwl_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f);
+ iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f);
- iwl_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
- iwl_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
- iwl_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004);
- iwl_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005);
+ iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
+ iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
+ iwl_legacy_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004);
+ iwl_legacy_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005);
- iwl_write_direct32(priv, FH39_TSSR_CBB_BASE,
+ iwl_legacy_write_direct32(priv, FH39_TSSR_CBB_BASE,
priv->_3945.shared_phys);
- iwl_write_direct32(priv, FH39_TSSR_MSG_CONFIG,
+ iwl_legacy_write_direct32(priv, FH39_TSSR_MSG_CONFIG,
FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
@@ -844,7 +841,7 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
iwl3945_hw_txq_ctx_free(priv);
/* allocate tx queue structure */
- rc = iwl_alloc_txq_mem(priv);
+ rc = iwl_legacy_alloc_txq_mem(priv);
if (rc)
return rc;
@@ -857,8 +854,8 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
slots_num = (txq_id == IWL39_CMD_QUEUE_NUM) ?
TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- rc = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
- txq_id);
+ rc = iwl_legacy_tx_queue_init(priv, &priv->txq[txq_id],
+ slots_num, txq_id);
if (rc) {
IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
goto error;
@@ -875,21 +872,23 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
/*
* Start up 3945's basic functionality after it has been reset
- * (e.g. after platform boot, or shutdown via iwl_apm_stop())
+ * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
* NOTE: This does not load uCode nor start the embedded processor
*/
static int iwl3945_apm_init(struct iwl_priv *priv)
{
- int ret = iwl_apm_init(priv);
+ int ret = iwl_legacy_apm_init(priv);
/* Clear APMG (NIC's internal power management) interrupts */
- iwl_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
- iwl_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
+ iwl_legacy_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
+ iwl_legacy_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
/* Reset radio chip */
- iwl_set_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
+ iwl_legacy_set_bits_prph(priv, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_RESET_REQ);
udelay(5);
- iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
+ iwl_legacy_clear_bits_prph(priv, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_RESET_REQ);
return ret;
}
@@ -898,30 +897,28 @@ static void iwl3945_nic_config(struct iwl_priv *priv)
{
struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
unsigned long flags;
- u8 rev_id = 0;
+ u8 rev_id = priv->pci_dev->revision;
spin_lock_irqsave(&priv->lock, flags);
/* Determine HW type */
- pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id);
-
IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
IWL_DEBUG_INFO(priv, "RTP type\n");
else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n");
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
} else {
IWL_DEBUG_INFO(priv, "3945 RADIO-MM type\n");
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
}
if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) {
IWL_DEBUG_INFO(priv, "SKU OP mode is mrc\n");
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
} else
IWL_DEBUG_INFO(priv, "SKU OP mode is basic\n");
@@ -929,24 +926,24 @@ static void iwl3945_nic_config(struct iwl_priv *priv)
if ((eeprom->board_revision & 0xF0) == 0xD0) {
IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
eeprom->board_revision);
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
} else {
IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
eeprom->board_revision);
- iwl_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
+ iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
}
if (eeprom->almgor_m_version <= 1) {
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
IWL_DEBUG_INFO(priv, "Card M type A version is 0x%X\n",
eeprom->almgor_m_version);
} else {
IWL_DEBUG_INFO(priv, "Card M type B version is 0x%X\n",
eeprom->almgor_m_version);
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -974,7 +971,7 @@ int iwl3945_hw_nic_init(struct iwl_priv *priv)
/* Allocate the RX queue, or reset if it is already allocated */
if (!rxq->bd) {
- rc = iwl_rx_queue_alloc(priv);
+ rc = iwl_legacy_rx_queue_alloc(priv);
if (rc) {
IWL_ERR(priv, "Unable to initialize Rx queue\n");
return -ENOMEM;
@@ -989,10 +986,10 @@ int iwl3945_hw_nic_init(struct iwl_priv *priv)
/* Look at using this instead:
rxq->need_update = 1;
- iwl_rx_queue_update_write_ptr(priv, rxq);
+ iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
*/
- iwl_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7);
+ iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7);
rc = iwl3945_txq_ctx_reset(priv);
if (rc)
@@ -1017,12 +1014,12 @@ void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
txq_id++)
if (txq_id == IWL39_CMD_QUEUE_NUM)
- iwl_cmd_queue_free(priv);
+ iwl_legacy_cmd_queue_free(priv);
else
- iwl_tx_queue_free(priv, txq_id);
+ iwl_legacy_tx_queue_free(priv, txq_id);
/* free tx queue structure */
- iwl_free_txq_mem(priv);
+ iwl_legacy_txq_mem(priv);
}
void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
@@ -1030,12 +1027,12 @@ void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
int txq_id;
/* stop SCD */
- iwl_write_prph(priv, ALM_SCD_MODE_REG, 0);
- iwl_write_prph(priv, ALM_SCD_TXFACT_REG, 0);
+ iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0);
+ iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0);
/* reset TFD queues */
for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
- iwl_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0);
+ iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0);
iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS,
FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
1000);
@@ -1102,12 +1099,12 @@ static int iwl3945_hw_reg_txpower_get_temperature(struct iwl_priv *priv)
#define IWL_TEMPERATURE_LIMIT_TIMER 6
/**
- * is_temp_calib_needed - determines if new calibration is needed
+ * iwl3945_is_temp_calib_needed - determines if new calibration is needed
*
* records new temperature in tx_mgr->temperature.
* replaces tx_mgr->last_temperature *only* if calib needed
* (assumes caller will actually do the calibration!). */
-static int is_temp_calib_needed(struct iwl_priv *priv)
+static int iwl3945_is_temp_calib_needed(struct iwl_priv *priv)
{
int temp_diff;
@@ -1338,9 +1335,6 @@ static void iwl3945_hw_reg_set_scan_power(struct iwl_priv *priv, u32 scan_tbl_in
* based on eeprom channel data) for this channel. */
power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX_TABLE]);
- /* further limit to user's max power preference.
- * FIXME: Other spectrum management power limitations do not
- * seem to apply?? */
power = min(power, priv->tx_power_user_lmt);
scan_power_info->requested_power = power;
@@ -1394,7 +1388,7 @@ static int iwl3945_send_tx_power(struct iwl_priv *priv)
chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel);
txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
- ch_info = iwl_get_channel_info(priv, priv->band, chan);
+ ch_info = iwl_legacy_get_channel_info(priv, priv->band, chan);
if (!ch_info) {
IWL_ERR(priv,
"Failed to get channel info for channel %d [%d]\n",
@@ -1402,7 +1396,7 @@ static int iwl3945_send_tx_power(struct iwl_priv *priv)
return -EINVAL;
}
- if (!is_channel_valid(ch_info)) {
+ if (!iwl_legacy_is_channel_valid(ch_info)) {
IWL_DEBUG_POWER(priv, "Not calling TX_PWR_TABLE_CMD on "
"non-Tx channel.\n");
return 0;
@@ -1437,7 +1431,7 @@ static int iwl3945_send_tx_power(struct iwl_priv *priv)
txpower.power[i].rate);
}
- return iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD,
+ return iwl_legacy_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD,
sizeof(struct iwl3945_txpowertable_cmd),
&txpower);
@@ -1571,7 +1565,7 @@ static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
/* set up new Tx power info for each and every channel, 2.4 and 5.x */
for (i = 0; i < priv->channel_count; i++) {
ch_info = &priv->channel_info[i];
- a_band = is_channel_a_band(ch_info);
+ a_band = iwl_legacy_is_channel_a_band(ch_info);
/* Get this chnlgrp's factory calibration temperature */
ref_temp = (s16)eeprom->groups[ch_info->group_index].
@@ -1583,7 +1577,7 @@ static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
ref_temp);
/* set tx power value for all rates, OFDM and CCK */
- for (rate_index = 0; rate_index < IWL_RATE_COUNT;
+ for (rate_index = 0; rate_index < IWL_RATE_COUNT_3945;
rate_index++) {
int power_idx =
ch_info->power_info[rate_index].base_power_index;
@@ -1637,7 +1631,7 @@ int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
for (i = 0; i < priv->channel_count; i++) {
ch_info = &priv->channel_info[i];
- a_band = is_channel_a_band(ch_info);
+ a_band = iwl_legacy_is_channel_a_band(ch_info);
/* find minimum power of all user and regulatory constraints
* (does not consider h/w clipping limitations) */
@@ -1653,7 +1647,7 @@ int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
/* update txpower settings for all channels,
* send to NIC if associated. */
- is_temp_calib_needed(priv);
+ iwl3945_is_temp_calib_needed(priv);
iwl3945_hw_reg_comp_txpower_temp(priv);
return 0;
@@ -1671,8 +1665,8 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
.flags = CMD_WANT_SKB,
.data = &rxon_assoc,
};
- const struct iwl_rxon_cmd *rxon1 = &ctx->staging;
- const struct iwl_rxon_cmd *rxon2 = &ctx->active;
+ const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
+ const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
if ((rxon1->flags == rxon2->flags) &&
(rxon1->filter_flags == rxon2->filter_flags) &&
@@ -1688,7 +1682,7 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
rxon_assoc.reserved = 0;
- rc = iwl_send_cmd_sync(priv, &cmd);
+ rc = iwl_legacy_send_cmd_sync(priv, &cmd);
if (rc)
return rc;
@@ -1698,7 +1692,7 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
rc = -EIO;
}
- iwl_free_pages(priv, cmd.reply_page);
+ iwl_legacy_free_pages(priv, cmd.reply_page);
return rc;
}
@@ -1722,7 +1716,7 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return -EINVAL;
- if (!iwl_is_alive(priv))
+ if (!iwl_legacy_is_alive(priv))
return -1;
/* always get timestamp with Rx frame */
@@ -1733,7 +1727,7 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
staging_rxon->flags |= iwl3945_get_antenna_flags(priv);
- rc = iwl_check_rxon_cmd(priv, ctx);
+ rc = iwl_legacy_check_rxon_cmd(priv, ctx);
if (rc) {
IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
return -EINVAL;
@@ -1742,8 +1736,9 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
/* If we don't need to send a full RXON, we can use
* iwl3945_rxon_assoc_cmd which is used to reconfigure filter
* and other flags for the current radio configuration. */
- if (!iwl_full_rxon_required(priv, &priv->contexts[IWL_RXON_CTX_BSS])) {
- rc = iwl_send_rxon_assoc(priv,
+ if (!iwl_legacy_full_rxon_required(priv,
+ &priv->contexts[IWL_RXON_CTX_BSS])) {
+ rc = iwl_legacy_send_rxon_assoc(priv,
&priv->contexts[IWL_RXON_CTX_BSS]);
if (rc) {
IWL_ERR(priv, "Error setting RXON_ASSOC "
@@ -1760,7 +1755,7 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
* an RXON_ASSOC and the new config wants the associated mask enabled,
* we must clear the associated from the active configuration
* before we apply the new config */
- if (iwl_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) {
+ if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) {
IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
@@ -1770,7 +1765,7 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
*/
active_rxon->reserved4 = 0;
active_rxon->reserved5 = 0;
- rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
+ rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
sizeof(struct iwl3945_rxon_cmd),
&priv->contexts[IWL_RXON_CTX_BSS].active);
@@ -1782,9 +1777,10 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
"configuration (%d).\n", rc);
return rc;
}
- iwl_clear_ucode_stations(priv,
+ iwl_legacy_clear_ucode_stations(priv,
+ &priv->contexts[IWL_RXON_CTX_BSS]);
+ iwl_legacy_restore_stations(priv,
&priv->contexts[IWL_RXON_CTX_BSS]);
- iwl_restore_stations(priv, &priv->contexts[IWL_RXON_CTX_BSS]);
}
IWL_DEBUG_INFO(priv, "Sending RXON\n"
@@ -1802,10 +1798,10 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
staging_rxon->reserved4 = 0;
staging_rxon->reserved5 = 0;
- iwl_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto);
+ iwl_legacy_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto);
/* Apply the new configuration */
- rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
+ rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
sizeof(struct iwl3945_rxon_cmd),
staging_rxon);
if (rc) {
@@ -1816,14 +1812,15 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
if (!new_assoc) {
- iwl_clear_ucode_stations(priv,
+ iwl_legacy_clear_ucode_stations(priv,
&priv->contexts[IWL_RXON_CTX_BSS]);
- iwl_restore_stations(priv, &priv->contexts[IWL_RXON_CTX_BSS]);
+ iwl_legacy_restore_stations(priv,
+ &priv->contexts[IWL_RXON_CTX_BSS]);
}
/* If we issue a new RXON command which required a tune then we must
* send a new TXPOWER command or we won't be able to Tx any frames */
- rc = priv->cfg->ops->lib->send_tx_power(priv);
+ rc = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
if (rc) {
IWL_ERR(priv, "Error setting Tx power (%d).\n", rc);
return rc;
@@ -1853,7 +1850,7 @@ void iwl3945_reg_txpower_periodic(struct iwl_priv *priv)
{
/* This will kick in the "brute force"
* iwl3945_hw_reg_comp_txpower_temp() below */
- if (!is_temp_calib_needed(priv))
+ if (!iwl3945_is_temp_calib_needed(priv))
goto reschedule;
/* Set up a new set of temp-adjusted TxPowers, send to NIC.
@@ -1900,7 +1897,7 @@ static u16 iwl3945_hw_reg_get_ch_grp_index(struct iwl_priv *priv,
u8 grp_channel;
/* Find the group index for the channel ... don't use index 1(?) */
- if (is_channel_a_band(ch_info)) {
+ if (iwl_legacy_is_channel_a_band(ch_info)) {
for (group = 1; group < 5; group++) {
grp_channel = ch_grp[group].group_channel;
if (ch_info->channel <= grp_channel) {
@@ -2080,8 +2077,8 @@ int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv)
/* initialize Tx power info for each and every channel, 2.4 and 5.x */
for (i = 0, ch_info = priv->channel_info; i < priv->channel_count;
i++, ch_info++) {
- a_band = is_channel_a_band(ch_info);
- if (!is_channel_valid(ch_info))
+ a_band = iwl_legacy_is_channel_a_band(ch_info);
+ if (!iwl_legacy_is_channel_valid(ch_info))
continue;
/* find this channel's channel group (*not* "band") index */
@@ -2184,7 +2181,7 @@ int iwl3945_hw_rxq_stop(struct iwl_priv *priv)
{
int rc;
- iwl_write_direct32(priv, FH39_RCSR_CONFIG(0), 0);
+ iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0), 0);
rc = iwl_poll_direct_bit(priv, FH39_RSSR_STATUS,
FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
if (rc < 0)
@@ -2201,10 +2198,10 @@ int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr);
- iwl_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0);
- iwl_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0);
+ iwl_legacy_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0);
+ iwl_legacy_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0);
- iwl_write_direct32(priv, FH39_TCSR_CONFIG(txq_id),
+ iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id),
FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
@@ -2233,7 +2230,8 @@ static u16 iwl3945_get_hcmd_size(u8 cmd_id, u16 len)
}
-static u16 iwl3945_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
+static u16 iwl3945_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
+ u8 *data)
{
struct iwl3945_addsta_cmd *addsta = (struct iwl3945_addsta_cmd *)data;
addsta->mode = cmd->mode;
@@ -2261,7 +2259,7 @@ static int iwl3945_add_bssid_station(struct iwl_priv *priv,
if (sta_id_r)
*sta_id_r = IWL_INVALID_STATION;
- ret = iwl_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
+ ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
if (ret) {
IWL_ERR(priv, "Unable to add station %pM\n", addr);
return ret;
@@ -2296,7 +2294,7 @@ static int iwl3945_manage_ibss_station(struct iwl_priv *priv,
return 0;
}
- return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id,
+ return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
vif->bss_conf.bssid);
}
@@ -2347,7 +2345,7 @@ int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
* 1M CCK rates */
if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
- iwl_is_associated(priv, IWL_RXON_CTX_BSS)) {
+ iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
index = IWL_FIRST_CCK_RATE;
for (i = IWL_RATE_6M_INDEX_TABLE;
@@ -2368,14 +2366,14 @@ int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
/* Update the rate scaling for control frame Tx */
rate_cmd.table_id = 0;
- rc = iwl_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
+ rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
&rate_cmd);
if (rc)
return rc;
/* Update the rate scaling for data frame Tx */
rate_cmd.table_id = 1;
- return iwl_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
+ return iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
&rate_cmd);
}
@@ -2475,11 +2473,11 @@ static int iwl3945_verify_bsm(struct iwl_priv *priv)
IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
/* verify BSM SRAM contents */
- val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG);
+ val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
for (reg = BSM_SRAM_LOWER_BOUND;
reg < BSM_SRAM_LOWER_BOUND + len;
reg += sizeof(u32), image++) {
- val = iwl_read_prph(priv, reg);
+ val = iwl_legacy_read_prph(priv, reg);
if (val != le32_to_cpu(*image)) {
IWL_ERR(priv, "BSM uCode verification failed at "
"addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
@@ -2512,7 +2510,7 @@ static int iwl3945_verify_bsm(struct iwl_priv *priv)
*/
static int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv)
{
- _iwl_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
+ _iwl_legacy_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
return 0;
}
@@ -2583,16 +2581,16 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
inst_len = priv->ucode_init.len;
data_len = priv->ucode_init_data.len;
- iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
- iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
- iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
- iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
+ iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
+ iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
+ iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
+ iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
/* Fill BSM memory with bootstrap instructions */
for (reg_offset = BSM_SRAM_LOWER_BOUND;
reg_offset < BSM_SRAM_LOWER_BOUND + len;
reg_offset += sizeof(u32), image++)
- _iwl_write_prph(priv, reg_offset,
+ _iwl_legacy_write_prph(priv, reg_offset,
le32_to_cpu(*image));
rc = iwl3945_verify_bsm(priv);
@@ -2600,19 +2598,19 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
return rc;
/* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
- iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
- iwl_write_prph(priv, BSM_WR_MEM_DST_REG,
+ iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
+ iwl_legacy_write_prph(priv, BSM_WR_MEM_DST_REG,
IWL39_RTC_INST_LOWER_BOUND);
- iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
+ iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
/* Load bootstrap code into instruction SRAM now,
* to prepare to load "initialize" uCode */
- iwl_write_prph(priv, BSM_WR_CTRL_REG,
+ iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
BSM_WR_CTRL_REG_BIT_START);
/* Wait for load of bootstrap uCode to finish */
for (i = 0; i < 100; i++) {
- done = iwl_read_prph(priv, BSM_WR_CTRL_REG);
+ done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
if (!(done & BSM_WR_CTRL_REG_BIT_START))
break;
udelay(10);
@@ -2626,7 +2624,7 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
/* Enable future boot loads whenever power management unit triggers it
* (e.g. when powering back up after power-save shutdown) */
- iwl_write_prph(priv, BSM_WR_CTRL_REG,
+ iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
BSM_WR_CTRL_REG_BIT_START_EN);
return 0;
@@ -2635,7 +2633,6 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
static struct iwl_hcmd_ops iwl3945_hcmd = {
.rxon_assoc = iwl3945_send_rxon_assoc,
.commit_rxon = iwl3945_commit_rxon,
- .send_bt_config = iwl_send_bt_config,
};
static struct iwl_lib_ops iwl3945_lib = {
@@ -2661,13 +2658,9 @@ static struct iwl_lib_ops iwl3945_lib = {
},
.acquire_semaphore = iwl3945_eeprom_acquire_semaphore,
.release_semaphore = iwl3945_eeprom_release_semaphore,
- .query_addr = iwlcore_eeprom_query_addr,
},
.send_tx_power = iwl3945_send_tx_power,
.is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr,
- .isr_ops = {
- .isr = iwl_isr_legacy,
- },
.debugfs_ops = {
.rx_stats_read = iwl3945_ucode_rx_stats_read,
@@ -2685,7 +2678,6 @@ static const struct iwl_legacy_ops iwl3945_legacy_ops = {
static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
.get_hcmd_size = iwl3945_get_hcmd_size,
.build_addsta_hcmd = iwl3945_build_addsta_hcmd,
- .tx_cmd_protection = iwl_legacy_tx_cmd_protection,
.request_scan = iwl3945_request_scan,
.post_scan = iwl3945_post_scan,
};
@@ -2705,13 +2697,10 @@ static struct iwl_base_params iwl3945_base_params = {
.pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
.set_l0s = false,
.use_bsm = true,
- .use_isr_legacy = true,
.led_compensation = 64,
- .broken_powersave = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 512,
- .tx_power_by_driver = true,
};
static struct iwl_cfg iwl3945_bg_cfg = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlegacy/iwl-3945.h
index 3eef1eb74a78..b118b59b71de 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlegacy/iwl-3945.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -108,7 +108,7 @@ struct iwl3945_rs_sta {
/*
* The common struct MUST be first because it is shared between
- * 3945 and agn!
+ * 3945 and 4965!
*/
struct iwl3945_sta_priv {
struct iwl_station_priv_common common;
@@ -201,7 +201,7 @@ struct iwl3945_ibss_seq {
/******************************************************************************
*
- * Functions implemented in iwl-base.c which are forward declared here
+ * Functions implemented in iwl3945-base.c which are forward declared here
* for use by iwl-*.c
*
*****************************************************************************/
@@ -209,7 +209,7 @@ extern int iwl3945_calc_db_from_ratio(int sig_ratio);
extern void iwl3945_rx_replenish(void *data);
extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr,int left);
+ struct ieee80211_hdr *hdr, int left);
extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
char **buf, bool display);
extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
@@ -217,7 +217,7 @@ extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
/******************************************************************************
*
* Functions implemented in iwl-[34]*.c which are forward declared here
- * for use by iwl-base.c
+ * for use by iwl3945-base.c
*
* NOTE: The implementation of these functions are hardware specific
* which is why they are in the hardware specific files (vs. iwl-base.c)
@@ -283,7 +283,7 @@ extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid);
extern struct ieee80211_ops iwl3945_hw_ops;
/*
- * Forward declare iwl-3945.c functions for iwl-base.c
+ * Forward declare iwl-3945.c functions for iwl3945-base.c
*/
extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv);
extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.c b/drivers/net/wireless/iwlegacy/iwl-4965-calib.c
new file mode 100644
index 000000000000..81d6a25eb04f
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-calib.c
@@ -0,0 +1,967 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-4965-calib.h"
+
+/*****************************************************************************
+ * INIT calibrations framework
+ *****************************************************************************/
+
+struct statistics_general_data {
+ u32 beacon_silence_rssi_a;
+ u32 beacon_silence_rssi_b;
+ u32 beacon_silence_rssi_c;
+ u32 beacon_energy_a;
+ u32 beacon_energy_b;
+ u32 beacon_energy_c;
+};
+
+void iwl4965_calib_free_results(struct iwl_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < IWL_CALIB_MAX; i++) {
+ kfree(priv->calib_results[i].buf);
+ priv->calib_results[i].buf = NULL;
+ priv->calib_results[i].buf_len = 0;
+ }
+}
+
+/*****************************************************************************
+ * RUNTIME calibrations framework
+ *****************************************************************************/
+
+/* "false alarms" are signals that our DSP tries to lock onto,
+ * but then determines that they are either noise, or transmissions
+ * from a distant wireless network (also "noise", really) that get
+ * "stepped on" by stronger transmissions within our own network.
+ * This algorithm attempts to set a sensitivity level that is high
+ * enough to receive all of our own network traffic, but not so
+ * high that our DSP gets too busy trying to lock onto non-network
+ * activity/noise. */
+static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
+ u32 norm_fa,
+ u32 rx_enable_time,
+ struct statistics_general_data *rx_info)
+{
+ u32 max_nrg_cck = 0;
+ int i = 0;
+ u8 max_silence_rssi = 0;
+ u32 silence_ref = 0;
+ u8 silence_rssi_a = 0;
+ u8 silence_rssi_b = 0;
+ u8 silence_rssi_c = 0;
+ u32 val;
+
+ /* "false_alarms" values below are cross-multiplications to assess the
+ * numbers of false alarms within the measured period of actual Rx
+ * (Rx is off when we're txing), vs the min/max expected false alarms
+ * (some should be expected if rx is sensitive enough) in a
+ * hypothetical listening period of 200 time units (TU), 204.8 msec:
+ *
+ * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time
+ *
+ * */
+ u32 false_alarms = norm_fa * 200 * 1024;
+ u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
+ u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
+ struct iwl_sensitivity_data *data = NULL;
+ const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+
+ data = &(priv->sensitivity_data);
+
+ data->nrg_auto_corr_silence_diff = 0;
+
+ /* Find max silence rssi among all 3 receivers.
+ * This is background noise, which may include transmissions from other
+ * networks, measured during silence before our network's beacon */
+ silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a &
+ ALL_BAND_FILTER) >> 8);
+ silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b &
+ ALL_BAND_FILTER) >> 8);
+ silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c &
+ ALL_BAND_FILTER) >> 8);
+
+ val = max(silence_rssi_b, silence_rssi_c);
+ max_silence_rssi = max(silence_rssi_a, (u8) val);
+
+ /* Store silence rssi in 20-beacon history table */
+ data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi;
+ data->nrg_silence_idx++;
+ if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L)
+ data->nrg_silence_idx = 0;
+
+ /* Find max silence rssi across 20 beacon history */
+ for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) {
+ val = data->nrg_silence_rssi[i];
+ silence_ref = max(silence_ref, val);
+ }
+ IWL_DEBUG_CALIB(priv, "silence a %u, b %u, c %u, 20-bcn max %u\n",
+ silence_rssi_a, silence_rssi_b, silence_rssi_c,
+ silence_ref);
+
+ /* Find max rx energy (min value!) among all 3 receivers,
+ * measured during beacon frame.
+ * Save it in 10-beacon history table. */
+ i = data->nrg_energy_idx;
+ val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c);
+ data->nrg_value[i] = min(rx_info->beacon_energy_a, val);
+
+ data->nrg_energy_idx++;
+ if (data->nrg_energy_idx >= 10)
+ data->nrg_energy_idx = 0;
+
+ /* Find min rx energy (max value) across 10 beacon history.
+ * This is the minimum signal level that we want to receive well.
+ * Add backoff (margin so we don't miss slightly lower energy frames).
+ * This establishes an upper bound (min value) for energy threshold. */
+ max_nrg_cck = data->nrg_value[0];
+ for (i = 1; i < 10; i++)
+ max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
+ max_nrg_cck += 6;
+
+ IWL_DEBUG_CALIB(priv, "rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
+ rx_info->beacon_energy_a, rx_info->beacon_energy_b,
+ rx_info->beacon_energy_c, max_nrg_cck - 6);
+
+ /* Count number of consecutive beacons with fewer-than-desired
+ * false alarms. */
+ if (false_alarms < min_false_alarms)
+ data->num_in_cck_no_fa++;
+ else
+ data->num_in_cck_no_fa = 0;
+ IWL_DEBUG_CALIB(priv, "consecutive bcns with few false alarms = %u\n",
+ data->num_in_cck_no_fa);
+
+ /* If we got too many false alarms this time, reduce sensitivity */
+ if ((false_alarms > max_false_alarms) &&
+ (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) {
+ IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u\n",
+ false_alarms, max_false_alarms);
+ IWL_DEBUG_CALIB(priv, "... reducing sensitivity\n");
+ data->nrg_curr_state = IWL_FA_TOO_MANY;
+ /* Store for "fewer than desired" on later beacon */
+ data->nrg_silence_ref = silence_ref;
+
+ /* increase energy threshold (reduce nrg value)
+ * to decrease sensitivity */
+ data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK;
+ /* Else if we got fewer than desired, increase sensitivity */
+ } else if (false_alarms < min_false_alarms) {
+ data->nrg_curr_state = IWL_FA_TOO_FEW;
+
+ /* Compare silence level with silence level for most recent
+ * healthy number or too many false alarms */
+ data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
+ (s32)silence_ref;
+
+ IWL_DEBUG_CALIB(priv,
+ "norm FA %u < min FA %u, silence diff %d\n",
+ false_alarms, min_false_alarms,
+ data->nrg_auto_corr_silence_diff);
+
+ /* Increase value to increase sensitivity, but only if:
+ * 1a) previous beacon did *not* have *too many* false alarms
+ * 1b) AND there's a significant difference in Rx levels
+ * from a previous beacon with too many, or healthy # FAs
+ * OR 2) We've seen a lot of beacons (100) with too few
+ * false alarms */
+ if ((data->nrg_prev_state != IWL_FA_TOO_MANY) &&
+ ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
+ (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
+
+ IWL_DEBUG_CALIB(priv, "... increasing sensitivity\n");
+ /* Increase nrg value to increase sensitivity */
+ val = data->nrg_th_cck + NRG_STEP_CCK;
+ data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val);
+ } else {
+ IWL_DEBUG_CALIB(priv,
+ "... but not changing sensitivity\n");
+ }
+
+ /* Else we got a healthy number of false alarms, keep status quo */
+ } else {
+ IWL_DEBUG_CALIB(priv, " FA in safe zone\n");
+ data->nrg_curr_state = IWL_FA_GOOD_RANGE;
+
+ /* Store for use in "fewer than desired" with later beacon */
+ data->nrg_silence_ref = silence_ref;
+
+ /* If previous beacon had too many false alarms,
+ * give it some extra margin by reducing sensitivity again
+ * (but don't go below measured energy of desired Rx) */
+ if (IWL_FA_TOO_MANY == data->nrg_prev_state) {
+ IWL_DEBUG_CALIB(priv, "... increasing margin\n");
+ if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN))
+ data->nrg_th_cck -= NRG_MARGIN;
+ else
+ data->nrg_th_cck = max_nrg_cck;
+ }
+ }
+
+ /* Make sure the energy threshold does not go above the measured
+ * energy of the desired Rx signals (reduced by backoff margin),
+ * or else we might start missing Rx frames.
+ * Lower value is higher energy, so we use max()!
+ */
+ data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
+ IWL_DEBUG_CALIB(priv, "new nrg_th_cck %u\n", data->nrg_th_cck);
+
+ data->nrg_prev_state = data->nrg_curr_state;
+
+ /* Auto-correlation CCK algorithm */
+ if (false_alarms > min_false_alarms) {
+
+ /* increase auto_corr values to decrease sensitivity
+ * so the DSP won't be disturbed by the noise
+ */
+ if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK)
+ data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1;
+ else {
+ val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
+ data->auto_corr_cck =
+ min((u32)ranges->auto_corr_max_cck, val);
+ }
+ val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
+ data->auto_corr_cck_mrc =
+ min((u32)ranges->auto_corr_max_cck_mrc, val);
+ } else if ((false_alarms < min_false_alarms) &&
+ ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
+ (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
+
+ /* Decrease auto_corr values to increase sensitivity */
+ val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
+ data->auto_corr_cck =
+ max((u32)ranges->auto_corr_min_cck, val);
+ val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
+ data->auto_corr_cck_mrc =
+ max((u32)ranges->auto_corr_min_cck_mrc, val);
+ }
+
+ return 0;
+}
+
+
+static int iwl4965_sens_auto_corr_ofdm(struct iwl_priv *priv,
+ u32 norm_fa,
+ u32 rx_enable_time)
+{
+ u32 val;
+ u32 false_alarms = norm_fa * 200 * 1024;
+ u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
+ u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
+ struct iwl_sensitivity_data *data = NULL;
+ const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+
+ data = &(priv->sensitivity_data);
+
+ /* If we got too many false alarms this time, reduce sensitivity */
+ if (false_alarms > max_false_alarms) {
+
+ IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u)\n",
+ false_alarms, max_false_alarms);
+
+ val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
+ data->auto_corr_ofdm =
+ min((u32)ranges->auto_corr_max_ofdm, val);
+
+ val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
+ data->auto_corr_ofdm_mrc =
+ min((u32)ranges->auto_corr_max_ofdm_mrc, val);
+
+ val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
+ data->auto_corr_ofdm_x1 =
+ min((u32)ranges->auto_corr_max_ofdm_x1, val);
+
+ val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
+ data->auto_corr_ofdm_mrc_x1 =
+ min((u32)ranges->auto_corr_max_ofdm_mrc_x1, val);
+ }
+
+ /* Else if we got fewer than desired, increase sensitivity */
+ else if (false_alarms < min_false_alarms) {
+
+ IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u\n",
+ false_alarms, min_false_alarms);
+
+ val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
+ data->auto_corr_ofdm =
+ max((u32)ranges->auto_corr_min_ofdm, val);
+
+ val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
+ data->auto_corr_ofdm_mrc =
+ max((u32)ranges->auto_corr_min_ofdm_mrc, val);
+
+ val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
+ data->auto_corr_ofdm_x1 =
+ max((u32)ranges->auto_corr_min_ofdm_x1, val);
+
+ val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
+ data->auto_corr_ofdm_mrc_x1 =
+ max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val);
+ } else {
+ IWL_DEBUG_CALIB(priv, "min FA %u < norm FA %u < max FA %u OK\n",
+ min_false_alarms, false_alarms, max_false_alarms);
+ }
+ return 0;
+}
+
+static void iwl4965_prepare_legacy_sensitivity_tbl(struct iwl_priv *priv,
+ struct iwl_sensitivity_data *data,
+ __le16 *tbl)
+{
+ tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
+ cpu_to_le16((u16)data->auto_corr_ofdm);
+ tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
+ cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
+ tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
+ cpu_to_le16((u16)data->auto_corr_ofdm_x1);
+ tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
+ cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
+
+ tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
+ cpu_to_le16((u16)data->auto_corr_cck);
+ tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
+ cpu_to_le16((u16)data->auto_corr_cck_mrc);
+
+ tbl[HD_MIN_ENERGY_CCK_DET_INDEX] =
+ cpu_to_le16((u16)data->nrg_th_cck);
+ tbl[HD_MIN_ENERGY_OFDM_DET_INDEX] =
+ cpu_to_le16((u16)data->nrg_th_ofdm);
+
+ tbl[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
+ cpu_to_le16(data->barker_corr_th_min);
+ tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
+ cpu_to_le16(data->barker_corr_th_min_mrc);
+ tbl[HD_OFDM_ENERGY_TH_IN_INDEX] =
+ cpu_to_le16(data->nrg_th_cca);
+
+ IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
+ data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
+ data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
+ data->nrg_th_ofdm);
+
+ IWL_DEBUG_CALIB(priv, "cck: ac %u mrc %u thresh %u\n",
+ data->auto_corr_cck, data->auto_corr_cck_mrc,
+ data->nrg_th_cck);
+}
+
+/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
+static int iwl4965_sensitivity_write(struct iwl_priv *priv)
+{
+ struct iwl_sensitivity_cmd cmd;
+ struct iwl_sensitivity_data *data = NULL;
+ struct iwl_host_cmd cmd_out = {
+ .id = SENSITIVITY_CMD,
+ .len = sizeof(struct iwl_sensitivity_cmd),
+ .flags = CMD_ASYNC,
+ .data = &cmd,
+ };
+
+ data = &(priv->sensitivity_data);
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ iwl4965_prepare_legacy_sensitivity_tbl(priv, data, &cmd.table[0]);
+
+ /* Update uCode's "work" table, and copy it to DSP */
+ cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
+
+ /* Don't send command to uCode if nothing has changed */
+ if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
+ sizeof(u16)*HD_TABLE_SIZE)) {
+ IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n");
+ return 0;
+ }
+
+ /* Copy table for comparison next time */
+ memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
+ sizeof(u16)*HD_TABLE_SIZE);
+
+ return iwl_legacy_send_cmd(priv, &cmd_out);
+}
+
+void iwl4965_init_sensitivity(struct iwl_priv *priv)
+{
+ int ret = 0;
+ int i;
+ struct iwl_sensitivity_data *data = NULL;
+ const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+
+ if (priv->disable_sens_cal)
+ return;
+
+ IWL_DEBUG_CALIB(priv, "Start iwl4965_init_sensitivity\n");
+
+ /* Clear driver's sensitivity algo data */
+ data = &(priv->sensitivity_data);
+
+ if (ranges == NULL)
+ return;
+
+ memset(data, 0, sizeof(struct iwl_sensitivity_data));
+
+ data->num_in_cck_no_fa = 0;
+ data->nrg_curr_state = IWL_FA_TOO_MANY;
+ data->nrg_prev_state = IWL_FA_TOO_MANY;
+ data->nrg_silence_ref = 0;
+ data->nrg_silence_idx = 0;
+ data->nrg_energy_idx = 0;
+
+ for (i = 0; i < 10; i++)
+ data->nrg_value[i] = 0;
+
+ for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
+ data->nrg_silence_rssi[i] = 0;
+
+ data->auto_corr_ofdm = ranges->auto_corr_min_ofdm;
+ data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc;
+ data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1;
+ data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1;
+ data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
+ data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc;
+ data->nrg_th_cck = ranges->nrg_th_cck;
+ data->nrg_th_ofdm = ranges->nrg_th_ofdm;
+ data->barker_corr_th_min = ranges->barker_corr_th_min;
+ data->barker_corr_th_min_mrc = ranges->barker_corr_th_min_mrc;
+ data->nrg_th_cca = ranges->nrg_th_cca;
+
+ data->last_bad_plcp_cnt_ofdm = 0;
+ data->last_fa_cnt_ofdm = 0;
+ data->last_bad_plcp_cnt_cck = 0;
+ data->last_fa_cnt_cck = 0;
+
+ ret |= iwl4965_sensitivity_write(priv);
+ IWL_DEBUG_CALIB(priv, "<<return 0x%X\n", ret);
+}
+
+void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
+{
+ u32 rx_enable_time;
+ u32 fa_cck;
+ u32 fa_ofdm;
+ u32 bad_plcp_cck;
+ u32 bad_plcp_ofdm;
+ u32 norm_fa_ofdm;
+ u32 norm_fa_cck;
+ struct iwl_sensitivity_data *data = NULL;
+ struct statistics_rx_non_phy *rx_info;
+ struct statistics_rx_phy *ofdm, *cck;
+ unsigned long flags;
+ struct statistics_general_data statis;
+
+ if (priv->disable_sens_cal)
+ return;
+
+ data = &(priv->sensitivity_data);
+
+ if (!iwl_legacy_is_any_associated(priv)) {
+ IWL_DEBUG_CALIB(priv, "<< - not associated\n");
+ return;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ rx_info = &(((struct iwl_notif_statistics *)resp)->rx.general);
+ ofdm = &(((struct iwl_notif_statistics *)resp)->rx.ofdm);
+ cck = &(((struct iwl_notif_statistics *)resp)->rx.cck);
+
+ if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
+ IWL_DEBUG_CALIB(priv, "<< invalid data.\n");
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return;
+ }
+
+ /* Extract Statistics: */
+ rx_enable_time = le32_to_cpu(rx_info->channel_load);
+ fa_cck = le32_to_cpu(cck->false_alarm_cnt);
+ fa_ofdm = le32_to_cpu(ofdm->false_alarm_cnt);
+ bad_plcp_cck = le32_to_cpu(cck->plcp_err);
+ bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err);
+
+ statis.beacon_silence_rssi_a =
+ le32_to_cpu(rx_info->beacon_silence_rssi_a);
+ statis.beacon_silence_rssi_b =
+ le32_to_cpu(rx_info->beacon_silence_rssi_b);
+ statis.beacon_silence_rssi_c =
+ le32_to_cpu(rx_info->beacon_silence_rssi_c);
+ statis.beacon_energy_a =
+ le32_to_cpu(rx_info->beacon_energy_a);
+ statis.beacon_energy_b =
+ le32_to_cpu(rx_info->beacon_energy_b);
+ statis.beacon_energy_c =
+ le32_to_cpu(rx_info->beacon_energy_c);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time);
+
+ if (!rx_enable_time) {
+ IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n");
+ return;
+ }
+
+ /* These statistics increase monotonically, and do not reset
+ * at each beacon. Calculate difference from last value, or just
+ * use the new statistics value if it has reset or wrapped around. */
+ if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
+ data->last_bad_plcp_cnt_cck = bad_plcp_cck;
+ else {
+ bad_plcp_cck -= data->last_bad_plcp_cnt_cck;
+ data->last_bad_plcp_cnt_cck += bad_plcp_cck;
+ }
+
+ if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm)
+ data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm;
+ else {
+ bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm;
+ data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm;
+ }
+
+ if (data->last_fa_cnt_ofdm > fa_ofdm)
+ data->last_fa_cnt_ofdm = fa_ofdm;
+ else {
+ fa_ofdm -= data->last_fa_cnt_ofdm;
+ data->last_fa_cnt_ofdm += fa_ofdm;
+ }
+
+ if (data->last_fa_cnt_cck > fa_cck)
+ data->last_fa_cnt_cck = fa_cck;
+ else {
+ fa_cck -= data->last_fa_cnt_cck;
+ data->last_fa_cnt_cck += fa_cck;
+ }
+
+ /* Total aborted signal locks */
+ norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
+ norm_fa_cck = fa_cck + bad_plcp_cck;
+
+ IWL_DEBUG_CALIB(priv,
+ "cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
+ bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
+
+ iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
+ iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
+
+ iwl4965_sensitivity_write(priv);
+}
+
+static inline u8 iwl4965_find_first_chain(u8 mask)
+{
+ if (mask & ANT_A)
+ return CHAIN_A;
+ if (mask & ANT_B)
+ return CHAIN_B;
+ return CHAIN_C;
+}
+
+/**
+ * Run disconnected antenna algorithm to find out which antennas are
+ * disconnected.
+ */
+static void
+iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
+ struct iwl_chain_noise_data *data)
+{
+ u32 active_chains = 0;
+ u32 max_average_sig;
+ u16 max_average_sig_antenna_i;
+ u8 num_tx_chains;
+ u8 first_chain;
+ u16 i = 0;
+
+ average_sig[0] = data->chain_signal_a /
+ priv->cfg->base_params->chain_noise_num_beacons;
+ average_sig[1] = data->chain_signal_b /
+ priv->cfg->base_params->chain_noise_num_beacons;
+ average_sig[2] = data->chain_signal_c /
+ priv->cfg->base_params->chain_noise_num_beacons;
+
+ if (average_sig[0] >= average_sig[1]) {
+ max_average_sig = average_sig[0];
+ max_average_sig_antenna_i = 0;
+ active_chains = (1 << max_average_sig_antenna_i);
+ } else {
+ max_average_sig = average_sig[1];
+ max_average_sig_antenna_i = 1;
+ active_chains = (1 << max_average_sig_antenna_i);
+ }
+
+ if (average_sig[2] >= max_average_sig) {
+ max_average_sig = average_sig[2];
+ max_average_sig_antenna_i = 2;
+ active_chains = (1 << max_average_sig_antenna_i);
+ }
+
+ IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n",
+ average_sig[0], average_sig[1], average_sig[2]);
+ IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n",
+ max_average_sig, max_average_sig_antenna_i);
+
+ /* Compare signal strengths for all 3 receivers. */
+ for (i = 0; i < NUM_RX_CHAINS; i++) {
+ if (i != max_average_sig_antenna_i) {
+ s32 rssi_delta = (max_average_sig - average_sig[i]);
+
+ /* If signal is very weak, compared with
+ * strongest, mark it as disconnected. */
+ if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
+ data->disconn_array[i] = 1;
+ else
+ active_chains |= (1 << i);
+ IWL_DEBUG_CALIB(priv, "i = %d rssiDelta = %d "
+ "disconn_array[i] = %d\n",
+ i, rssi_delta, data->disconn_array[i]);
+ }
+ }
+
+ /*
+ * The above algorithm sometimes fails when the ucode
+ * reports 0 for all chains. It's not clear why that
+ * happens to start with, but it is then causing trouble
+ * because this can make us enable more chains than the
+ * hardware really has.
+ *
+ * To be safe, simply mask out any chains that we know
+ * are not on the device.
+ */
+ active_chains &= priv->hw_params.valid_rx_ant;
+
+ num_tx_chains = 0;
+ for (i = 0; i < NUM_RX_CHAINS; i++) {
+ /* loops on all the bits of
+ * priv->hw_setting.valid_tx_ant */
+ u8 ant_msk = (1 << i);
+ if (!(priv->hw_params.valid_tx_ant & ant_msk))
+ continue;
+
+ num_tx_chains++;
+ if (data->disconn_array[i] == 0)
+ /* there is a Tx antenna connected */
+ break;
+ if (num_tx_chains == priv->hw_params.tx_chains_num &&
+ data->disconn_array[i]) {
+ /*
+ * If all chains are disconnected
+ * connect the first valid tx chain
+ */
+ first_chain =
+ iwl4965_find_first_chain(priv->cfg->valid_tx_ant);
+ data->disconn_array[first_chain] = 0;
+ active_chains |= BIT(first_chain);
+ IWL_DEBUG_CALIB(priv, "All Tx chains are disconnected \
+ W/A - declare %d as connected\n",
+ first_chain);
+ break;
+ }
+ }
+
+ if (active_chains != priv->hw_params.valid_rx_ant &&
+ active_chains != priv->chain_noise_data.active_chains)
+ IWL_DEBUG_CALIB(priv,
+ "Detected that not all antennas are connected! "
+ "Connected: %#x, valid: %#x.\n",
+ active_chains, priv->hw_params.valid_rx_ant);
+
+ /* Save for use within RXON, TX, SCAN commands, etc. */
+ data->active_chains = active_chains;
+ IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n",
+ active_chains);
+}
+
+static void iwl4965_gain_computation(struct iwl_priv *priv,
+ u32 *average_noise,
+ u16 min_average_noise_antenna_i,
+ u32 min_average_noise,
+ u8 default_chain)
+{
+ int i, ret;
+ struct iwl_chain_noise_data *data = &priv->chain_noise_data;
+
+ data->delta_gain_code[min_average_noise_antenna_i] = 0;
+
+ for (i = default_chain; i < NUM_RX_CHAINS; i++) {
+ s32 delta_g = 0;
+
+ if (!(data->disconn_array[i]) &&
+ (data->delta_gain_code[i] ==
+ CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
+ delta_g = average_noise[i] - min_average_noise;
+ data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
+ data->delta_gain_code[i] =
+ min(data->delta_gain_code[i],
+ (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
+
+ data->delta_gain_code[i] =
+ (data->delta_gain_code[i] | (1 << 2));
+ } else {
+ data->delta_gain_code[i] = 0;
+ }
+ }
+ IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n",
+ data->delta_gain_code[0],
+ data->delta_gain_code[1],
+ data->delta_gain_code[2]);
+
+ /* Differential gain gets sent to uCode only once */
+ if (!data->radio_write) {
+ struct iwl_calib_diff_gain_cmd cmd;
+ data->radio_write = 1;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
+ cmd.diff_gain_a = data->delta_gain_code[0];
+ cmd.diff_gain_b = data->delta_gain_code[1];
+ cmd.diff_gain_c = data->delta_gain_code[2];
+ ret = iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
+ sizeof(cmd), &cmd);
+ if (ret)
+ IWL_DEBUG_CALIB(priv, "fail sending cmd "
+ "REPLY_PHY_CALIBRATION_CMD\n");
+
+ /* TODO we might want recalculate
+ * rx_chain in rxon cmd */
+
+ /* Mark so we run this algo only once! */
+ data->state = IWL_CHAIN_NOISE_CALIBRATED;
+ }
+}
+
+
+
+/*
+ * Accumulate 16 beacons of signal and noise statistics for each of
+ * 3 receivers/antennas/rx-chains, then figure out:
+ * 1) Which antennas are connected.
+ * 2) Differential rx gain settings to balance the 3 receivers.
+ */
+void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
+{
+ struct iwl_chain_noise_data *data = NULL;
+
+ u32 chain_noise_a;
+ u32 chain_noise_b;
+ u32 chain_noise_c;
+ u32 chain_sig_a;
+ u32 chain_sig_b;
+ u32 chain_sig_c;
+ u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
+ u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
+ u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
+ u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
+ u16 i = 0;
+ u16 rxon_chnum = INITIALIZATION_VALUE;
+ u16 stat_chnum = INITIALIZATION_VALUE;
+ u8 rxon_band24;
+ u8 stat_band24;
+ unsigned long flags;
+ struct statistics_rx_non_phy *rx_info;
+
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+ if (priv->disable_chain_noise_cal)
+ return;
+
+ data = &(priv->chain_noise_data);
+
+ /*
+ * Accumulate just the first "chain_noise_num_beacons" after
+ * the first association, then we're done forever.
+ */
+ if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
+ if (data->state == IWL_CHAIN_NOISE_ALIVE)
+ IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n");
+ return;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ rx_info = &(((struct iwl_notif_statistics *)stat_resp)->
+ rx.general);
+
+ if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
+ IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n");
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return;
+ }
+
+ rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
+ rxon_chnum = le16_to_cpu(ctx->staging.channel);
+
+ stat_band24 = !!(((struct iwl_notif_statistics *)
+ stat_resp)->flag &
+ STATISTICS_REPLY_FLG_BAND_24G_MSK);
+ stat_chnum = le32_to_cpu(((struct iwl_notif_statistics *)
+ stat_resp)->flag) >> 16;
+
+ /* Make sure we accumulate data for just the associated channel
+ * (even if scanning). */
+ if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) {
+ IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n",
+ rxon_chnum, rxon_band24);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return;
+ }
+
+ /*
+ * Accumulate beacon statistics values across
+ * "chain_noise_num_beacons"
+ */
+ chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
+ IN_BAND_FILTER;
+ chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
+ IN_BAND_FILTER;
+ chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) &
+ IN_BAND_FILTER;
+
+ chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
+ chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
+ chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ data->beacon_count++;
+
+ data->chain_noise_a = (chain_noise_a + data->chain_noise_a);
+ data->chain_noise_b = (chain_noise_b + data->chain_noise_b);
+ data->chain_noise_c = (chain_noise_c + data->chain_noise_c);
+
+ data->chain_signal_a = (chain_sig_a + data->chain_signal_a);
+ data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
+ data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
+
+ IWL_DEBUG_CALIB(priv, "chan=%d, band24=%d, beacon=%d\n",
+ rxon_chnum, rxon_band24, data->beacon_count);
+ IWL_DEBUG_CALIB(priv, "chain_sig: a %d b %d c %d\n",
+ chain_sig_a, chain_sig_b, chain_sig_c);
+ IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n",
+ chain_noise_a, chain_noise_b, chain_noise_c);
+
+ /* If this is the "chain_noise_num_beacons", determine:
+ * 1) Disconnected antennas (using signal strengths)
+ * 2) Differential gain (using silence noise) to balance receivers */
+ if (data->beacon_count !=
+ priv->cfg->base_params->chain_noise_num_beacons)
+ return;
+
+ /* Analyze signal for disconnected antenna */
+ iwl4965_find_disconn_antenna(priv, average_sig, data);
+
+ /* Analyze noise for rx balance */
+ average_noise[0] = data->chain_noise_a /
+ priv->cfg->base_params->chain_noise_num_beacons;
+ average_noise[1] = data->chain_noise_b /
+ priv->cfg->base_params->chain_noise_num_beacons;
+ average_noise[2] = data->chain_noise_c /
+ priv->cfg->base_params->chain_noise_num_beacons;
+
+ for (i = 0; i < NUM_RX_CHAINS; i++) {
+ if (!(data->disconn_array[i]) &&
+ (average_noise[i] <= min_average_noise)) {
+ /* This means that chain i is active and has
+ * lower noise values so far: */
+ min_average_noise = average_noise[i];
+ min_average_noise_antenna_i = i;
+ }
+ }
+
+ IWL_DEBUG_CALIB(priv, "average_noise: a %d b %d c %d\n",
+ average_noise[0], average_noise[1],
+ average_noise[2]);
+
+ IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n",
+ min_average_noise, min_average_noise_antenna_i);
+
+ iwl4965_gain_computation(priv, average_noise,
+ min_average_noise_antenna_i, min_average_noise,
+ iwl4965_find_first_chain(priv->cfg->valid_rx_ant));
+
+ /* Some power changes may have been made during the calibration.
+ * Update and commit the RXON
+ */
+ if (priv->cfg->ops->lib->update_chain_flags)
+ priv->cfg->ops->lib->update_chain_flags(priv);
+
+ data->state = IWL_CHAIN_NOISE_DONE;
+ iwl_legacy_power_update_mode(priv, false);
+}
+
+void iwl4965_reset_run_time_calib(struct iwl_priv *priv)
+{
+ int i;
+ memset(&(priv->sensitivity_data), 0,
+ sizeof(struct iwl_sensitivity_data));
+ memset(&(priv->chain_noise_data), 0,
+ sizeof(struct iwl_chain_noise_data));
+ for (i = 0; i < NUM_RX_CHAINS; i++)
+ priv->chain_noise_data.delta_gain_code[i] =
+ CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
+
+ /* Ask for statistics now, the uCode will send notification
+ * periodically after association */
+ iwl_legacy_send_statistics_request(priv, CMD_ASYNC, true);
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-legacy.h b/drivers/net/wireless/iwlegacy/iwl-4965-calib.h
index 9f7b2f935964..f46c80e6e005 100644
--- a/drivers/net/wireless/iwlwifi/iwl-legacy.h
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-calib.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -59,21 +59,17 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
+#ifndef __iwl_4965_calib_h__
+#define __iwl_4965_calib_h__
-#ifndef __iwl_legacy_h__
-#define __iwl_legacy_h__
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-commands.h"
-/* mac80211 handlers */
-int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed);
-void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw);
-void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *bss_conf,
- u32 changes);
-void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
- struct ieee80211_tx_info *info,
- __le16 fc, __le32 *tx_flags);
+void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp);
+void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp);
+void iwl4965_init_sensitivity(struct iwl_priv *priv);
+void iwl4965_reset_run_time_calib(struct iwl_priv *priv);
+void iwl4965_calib_free_results(struct iwl_priv *priv);
-irqreturn_t iwl_isr_legacy(int irq, void *data);
-
-#endif /* __iwl_legacy_h__ */
+#endif /* __iwl_4965_calib_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c
new file mode 100644
index 000000000000..1c93665766e4
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c
@@ -0,0 +1,774 @@
+/******************************************************************************
+*
+* GPL LICENSE SUMMARY
+*
+* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+* USA
+*
+* The full GNU General Public License is included in this distribution
+* in the file called LICENSE.GPL.
+*
+* Contact Information:
+* Intel Linux Wireless <ilw@linux.intel.com>
+* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+*****************************************************************************/
+#include "iwl-4965.h"
+#include "iwl-4965-debugfs.h"
+
+static const char *fmt_value = " %-30s %10u\n";
+static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
+static const char *fmt_header =
+ "%-32s current cumulative delta max\n";
+
+static int iwl4965_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
+{
+ int p = 0;
+ u32 flag;
+
+ flag = le32_to_cpu(priv->_4965.statistics.flag);
+
+ p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
+ if (flag & UCODE_STATISTICS_CLEAR_MSK)
+ p += scnprintf(buf + p, bufsz - p,
+ "\tStatistics have been cleared\n");
+ p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
+ (flag & UCODE_STATISTICS_FREQUENCY_MSK)
+ ? "2.4 GHz" : "5.2 GHz");
+ p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
+ (flag & UCODE_STATISTICS_NARROW_BAND_MSK)
+ ? "enabled" : "disabled");
+
+ return p;
+}
+
+ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = sizeof(struct statistics_rx_phy) * 40 +
+ sizeof(struct statistics_rx_non_phy) * 40 +
+ sizeof(struct statistics_rx_ht_phy) * 40 + 400;
+ ssize_t ret;
+ struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
+ struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
+ struct statistics_rx_non_phy *general, *accum_general;
+ struct statistics_rx_non_phy *delta_general, *max_general;
+ struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
+
+ if (!iwl_legacy_is_alive(priv))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * the statistic information display here is based on
+ * the last statistics notification from uCode
+ * might not reflect the current uCode activity
+ */
+ ofdm = &priv->_4965.statistics.rx.ofdm;
+ cck = &priv->_4965.statistics.rx.cck;
+ general = &priv->_4965.statistics.rx.general;
+ ht = &priv->_4965.statistics.rx.ofdm_ht;
+ accum_ofdm = &priv->_4965.accum_statistics.rx.ofdm;
+ accum_cck = &priv->_4965.accum_statistics.rx.cck;
+ accum_general = &priv->_4965.accum_statistics.rx.general;
+ accum_ht = &priv->_4965.accum_statistics.rx.ofdm_ht;
+ delta_ofdm = &priv->_4965.delta_statistics.rx.ofdm;
+ delta_cck = &priv->_4965.delta_statistics.rx.cck;
+ delta_general = &priv->_4965.delta_statistics.rx.general;
+ delta_ht = &priv->_4965.delta_statistics.rx.ofdm_ht;
+ max_ofdm = &priv->_4965.max_delta.rx.ofdm;
+ max_cck = &priv->_4965.max_delta.rx.cck;
+ max_general = &priv->_4965.max_delta.rx.general;
+ max_ht = &priv->_4965.max_delta.rx.ofdm_ht;
+
+ pos += iwl4965_statistics_flag(priv, buf, bufsz);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_header, "Statistics_Rx - OFDM:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "ina_cnt:",
+ le32_to_cpu(ofdm->ina_cnt),
+ accum_ofdm->ina_cnt,
+ delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "fina_cnt:",
+ le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
+ delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "plcp_err:",
+ le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
+ delta_ofdm->plcp_err, max_ofdm->plcp_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "crc32_err:",
+ le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
+ delta_ofdm->crc32_err, max_ofdm->crc32_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "overrun_err:",
+ le32_to_cpu(ofdm->overrun_err),
+ accum_ofdm->overrun_err, delta_ofdm->overrun_err,
+ max_ofdm->overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "early_overrun_err:",
+ le32_to_cpu(ofdm->early_overrun_err),
+ accum_ofdm->early_overrun_err,
+ delta_ofdm->early_overrun_err,
+ max_ofdm->early_overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "crc32_good:",
+ le32_to_cpu(ofdm->crc32_good),
+ accum_ofdm->crc32_good, delta_ofdm->crc32_good,
+ max_ofdm->crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "false_alarm_cnt:",
+ le32_to_cpu(ofdm->false_alarm_cnt),
+ accum_ofdm->false_alarm_cnt,
+ delta_ofdm->false_alarm_cnt,
+ max_ofdm->false_alarm_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "fina_sync_err_cnt:",
+ le32_to_cpu(ofdm->fina_sync_err_cnt),
+ accum_ofdm->fina_sync_err_cnt,
+ delta_ofdm->fina_sync_err_cnt,
+ max_ofdm->fina_sync_err_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "sfd_timeout:",
+ le32_to_cpu(ofdm->sfd_timeout),
+ accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout,
+ max_ofdm->sfd_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "fina_timeout:",
+ le32_to_cpu(ofdm->fina_timeout),
+ accum_ofdm->fina_timeout, delta_ofdm->fina_timeout,
+ max_ofdm->fina_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "unresponded_rts:",
+ le32_to_cpu(ofdm->unresponded_rts),
+ accum_ofdm->unresponded_rts,
+ delta_ofdm->unresponded_rts,
+ max_ofdm->unresponded_rts);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "rxe_frame_lmt_ovrun:",
+ le32_to_cpu(ofdm->rxe_frame_limit_overrun),
+ accum_ofdm->rxe_frame_limit_overrun,
+ delta_ofdm->rxe_frame_limit_overrun,
+ max_ofdm->rxe_frame_limit_overrun);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "sent_ack_cnt:",
+ le32_to_cpu(ofdm->sent_ack_cnt),
+ accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt,
+ max_ofdm->sent_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "sent_cts_cnt:",
+ le32_to_cpu(ofdm->sent_cts_cnt),
+ accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt,
+ max_ofdm->sent_cts_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "sent_ba_rsp_cnt:",
+ le32_to_cpu(ofdm->sent_ba_rsp_cnt),
+ accum_ofdm->sent_ba_rsp_cnt,
+ delta_ofdm->sent_ba_rsp_cnt,
+ max_ofdm->sent_ba_rsp_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "dsp_self_kill:",
+ le32_to_cpu(ofdm->dsp_self_kill),
+ accum_ofdm->dsp_self_kill,
+ delta_ofdm->dsp_self_kill,
+ max_ofdm->dsp_self_kill);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "mh_format_err:",
+ le32_to_cpu(ofdm->mh_format_err),
+ accum_ofdm->mh_format_err,
+ delta_ofdm->mh_format_err,
+ max_ofdm->mh_format_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "re_acq_main_rssi_sum:",
+ le32_to_cpu(ofdm->re_acq_main_rssi_sum),
+ accum_ofdm->re_acq_main_rssi_sum,
+ delta_ofdm->re_acq_main_rssi_sum,
+ max_ofdm->re_acq_main_rssi_sum);
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_header, "Statistics_Rx - CCK:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "ina_cnt:",
+ le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
+ delta_cck->ina_cnt, max_cck->ina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "fina_cnt:",
+ le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
+ delta_cck->fina_cnt, max_cck->fina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "plcp_err:",
+ le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
+ delta_cck->plcp_err, max_cck->plcp_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "crc32_err:",
+ le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
+ delta_cck->crc32_err, max_cck->crc32_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "overrun_err:",
+ le32_to_cpu(cck->overrun_err),
+ accum_cck->overrun_err, delta_cck->overrun_err,
+ max_cck->overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "early_overrun_err:",
+ le32_to_cpu(cck->early_overrun_err),
+ accum_cck->early_overrun_err,
+ delta_cck->early_overrun_err,
+ max_cck->early_overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "crc32_good:",
+ le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
+ delta_cck->crc32_good, max_cck->crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "false_alarm_cnt:",
+ le32_to_cpu(cck->false_alarm_cnt),
+ accum_cck->false_alarm_cnt,
+ delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "fina_sync_err_cnt:",
+ le32_to_cpu(cck->fina_sync_err_cnt),
+ accum_cck->fina_sync_err_cnt,
+ delta_cck->fina_sync_err_cnt,
+ max_cck->fina_sync_err_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "sfd_timeout:",
+ le32_to_cpu(cck->sfd_timeout),
+ accum_cck->sfd_timeout, delta_cck->sfd_timeout,
+ max_cck->sfd_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "fina_timeout:",
+ le32_to_cpu(cck->fina_timeout),
+ accum_cck->fina_timeout, delta_cck->fina_timeout,
+ max_cck->fina_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "unresponded_rts:",
+ le32_to_cpu(cck->unresponded_rts),
+ accum_cck->unresponded_rts, delta_cck->unresponded_rts,
+ max_cck->unresponded_rts);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "rxe_frame_lmt_ovrun:",
+ le32_to_cpu(cck->rxe_frame_limit_overrun),
+ accum_cck->rxe_frame_limit_overrun,
+ delta_cck->rxe_frame_limit_overrun,
+ max_cck->rxe_frame_limit_overrun);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "sent_ack_cnt:",
+ le32_to_cpu(cck->sent_ack_cnt),
+ accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt,
+ max_cck->sent_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "sent_cts_cnt:",
+ le32_to_cpu(cck->sent_cts_cnt),
+ accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt,
+ max_cck->sent_cts_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "sent_ba_rsp_cnt:",
+ le32_to_cpu(cck->sent_ba_rsp_cnt),
+ accum_cck->sent_ba_rsp_cnt,
+ delta_cck->sent_ba_rsp_cnt,
+ max_cck->sent_ba_rsp_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "dsp_self_kill:",
+ le32_to_cpu(cck->dsp_self_kill),
+ accum_cck->dsp_self_kill, delta_cck->dsp_self_kill,
+ max_cck->dsp_self_kill);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "mh_format_err:",
+ le32_to_cpu(cck->mh_format_err),
+ accum_cck->mh_format_err, delta_cck->mh_format_err,
+ max_cck->mh_format_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "re_acq_main_rssi_sum:",
+ le32_to_cpu(cck->re_acq_main_rssi_sum),
+ accum_cck->re_acq_main_rssi_sum,
+ delta_cck->re_acq_main_rssi_sum,
+ max_cck->re_acq_main_rssi_sum);
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_header, "Statistics_Rx - GENERAL:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "bogus_cts:",
+ le32_to_cpu(general->bogus_cts),
+ accum_general->bogus_cts, delta_general->bogus_cts,
+ max_general->bogus_cts);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "bogus_ack:",
+ le32_to_cpu(general->bogus_ack),
+ accum_general->bogus_ack, delta_general->bogus_ack,
+ max_general->bogus_ack);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "non_bssid_frames:",
+ le32_to_cpu(general->non_bssid_frames),
+ accum_general->non_bssid_frames,
+ delta_general->non_bssid_frames,
+ max_general->non_bssid_frames);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "filtered_frames:",
+ le32_to_cpu(general->filtered_frames),
+ accum_general->filtered_frames,
+ delta_general->filtered_frames,
+ max_general->filtered_frames);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "non_channel_beacons:",
+ le32_to_cpu(general->non_channel_beacons),
+ accum_general->non_channel_beacons,
+ delta_general->non_channel_beacons,
+ max_general->non_channel_beacons);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "channel_beacons:",
+ le32_to_cpu(general->channel_beacons),
+ accum_general->channel_beacons,
+ delta_general->channel_beacons,
+ max_general->channel_beacons);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "num_missed_bcon:",
+ le32_to_cpu(general->num_missed_bcon),
+ accum_general->num_missed_bcon,
+ delta_general->num_missed_bcon,
+ max_general->num_missed_bcon);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "adc_rx_saturation_time:",
+ le32_to_cpu(general->adc_rx_saturation_time),
+ accum_general->adc_rx_saturation_time,
+ delta_general->adc_rx_saturation_time,
+ max_general->adc_rx_saturation_time);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "ina_detect_search_tm:",
+ le32_to_cpu(general->ina_detection_search_time),
+ accum_general->ina_detection_search_time,
+ delta_general->ina_detection_search_time,
+ max_general->ina_detection_search_time);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "beacon_silence_rssi_a:",
+ le32_to_cpu(general->beacon_silence_rssi_a),
+ accum_general->beacon_silence_rssi_a,
+ delta_general->beacon_silence_rssi_a,
+ max_general->beacon_silence_rssi_a);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "beacon_silence_rssi_b:",
+ le32_to_cpu(general->beacon_silence_rssi_b),
+ accum_general->beacon_silence_rssi_b,
+ delta_general->beacon_silence_rssi_b,
+ max_general->beacon_silence_rssi_b);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "beacon_silence_rssi_c:",
+ le32_to_cpu(general->beacon_silence_rssi_c),
+ accum_general->beacon_silence_rssi_c,
+ delta_general->beacon_silence_rssi_c,
+ max_general->beacon_silence_rssi_c);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "interference_data_flag:",
+ le32_to_cpu(general->interference_data_flag),
+ accum_general->interference_data_flag,
+ delta_general->interference_data_flag,
+ max_general->interference_data_flag);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "channel_load:",
+ le32_to_cpu(general->channel_load),
+ accum_general->channel_load,
+ delta_general->channel_load,
+ max_general->channel_load);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "dsp_false_alarms:",
+ le32_to_cpu(general->dsp_false_alarms),
+ accum_general->dsp_false_alarms,
+ delta_general->dsp_false_alarms,
+ max_general->dsp_false_alarms);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "beacon_rssi_a:",
+ le32_to_cpu(general->beacon_rssi_a),
+ accum_general->beacon_rssi_a,
+ delta_general->beacon_rssi_a,
+ max_general->beacon_rssi_a);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "beacon_rssi_b:",
+ le32_to_cpu(general->beacon_rssi_b),
+ accum_general->beacon_rssi_b,
+ delta_general->beacon_rssi_b,
+ max_general->beacon_rssi_b);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "beacon_rssi_c:",
+ le32_to_cpu(general->beacon_rssi_c),
+ accum_general->beacon_rssi_c,
+ delta_general->beacon_rssi_c,
+ max_general->beacon_rssi_c);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "beacon_energy_a:",
+ le32_to_cpu(general->beacon_energy_a),
+ accum_general->beacon_energy_a,
+ delta_general->beacon_energy_a,
+ max_general->beacon_energy_a);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "beacon_energy_b:",
+ le32_to_cpu(general->beacon_energy_b),
+ accum_general->beacon_energy_b,
+ delta_general->beacon_energy_b,
+ max_general->beacon_energy_b);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "beacon_energy_c:",
+ le32_to_cpu(general->beacon_energy_c),
+ accum_general->beacon_energy_c,
+ delta_general->beacon_energy_c,
+ max_general->beacon_energy_c);
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_header, "Statistics_Rx - OFDM_HT:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "plcp_err:",
+ le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
+ delta_ht->plcp_err, max_ht->plcp_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "overrun_err:",
+ le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
+ delta_ht->overrun_err, max_ht->overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "early_overrun_err:",
+ le32_to_cpu(ht->early_overrun_err),
+ accum_ht->early_overrun_err,
+ delta_ht->early_overrun_err,
+ max_ht->early_overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "crc32_good:",
+ le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
+ delta_ht->crc32_good, max_ht->crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "crc32_err:",
+ le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
+ delta_ht->crc32_err, max_ht->crc32_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "mh_format_err:",
+ le32_to_cpu(ht->mh_format_err),
+ accum_ht->mh_format_err,
+ delta_ht->mh_format_err, max_ht->mh_format_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "agg_crc32_good:",
+ le32_to_cpu(ht->agg_crc32_good),
+ accum_ht->agg_crc32_good,
+ delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "agg_mpdu_cnt:",
+ le32_to_cpu(ht->agg_mpdu_cnt),
+ accum_ht->agg_mpdu_cnt,
+ delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "agg_cnt:",
+ le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
+ delta_ht->agg_cnt, max_ht->agg_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "unsupport_mcs:",
+ le32_to_cpu(ht->unsupport_mcs),
+ accum_ht->unsupport_mcs,
+ delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+ssize_t iwl4965_ucode_tx_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
+ ssize_t ret;
+ struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
+
+ if (!iwl_legacy_is_alive(priv))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /* the statistic information display here is based on
+ * the last statistics notification from uCode
+ * might not reflect the current uCode activity
+ */
+ tx = &priv->_4965.statistics.tx;
+ accum_tx = &priv->_4965.accum_statistics.tx;
+ delta_tx = &priv->_4965.delta_statistics.tx;
+ max_tx = &priv->_4965.max_delta.tx;
+
+ pos += iwl4965_statistics_flag(priv, buf, bufsz);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_header, "Statistics_Tx:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "preamble:",
+ le32_to_cpu(tx->preamble_cnt),
+ accum_tx->preamble_cnt,
+ delta_tx->preamble_cnt, max_tx->preamble_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "rx_detected_cnt:",
+ le32_to_cpu(tx->rx_detected_cnt),
+ accum_tx->rx_detected_cnt,
+ delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "bt_prio_defer_cnt:",
+ le32_to_cpu(tx->bt_prio_defer_cnt),
+ accum_tx->bt_prio_defer_cnt,
+ delta_tx->bt_prio_defer_cnt,
+ max_tx->bt_prio_defer_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "bt_prio_kill_cnt:",
+ le32_to_cpu(tx->bt_prio_kill_cnt),
+ accum_tx->bt_prio_kill_cnt,
+ delta_tx->bt_prio_kill_cnt,
+ max_tx->bt_prio_kill_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "few_bytes_cnt:",
+ le32_to_cpu(tx->few_bytes_cnt),
+ accum_tx->few_bytes_cnt,
+ delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "cts_timeout:",
+ le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
+ delta_tx->cts_timeout, max_tx->cts_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "ack_timeout:",
+ le32_to_cpu(tx->ack_timeout),
+ accum_tx->ack_timeout,
+ delta_tx->ack_timeout, max_tx->ack_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "expected_ack_cnt:",
+ le32_to_cpu(tx->expected_ack_cnt),
+ accum_tx->expected_ack_cnt,
+ delta_tx->expected_ack_cnt,
+ max_tx->expected_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "actual_ack_cnt:",
+ le32_to_cpu(tx->actual_ack_cnt),
+ accum_tx->actual_ack_cnt,
+ delta_tx->actual_ack_cnt,
+ max_tx->actual_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "dump_msdu_cnt:",
+ le32_to_cpu(tx->dump_msdu_cnt),
+ accum_tx->dump_msdu_cnt,
+ delta_tx->dump_msdu_cnt,
+ max_tx->dump_msdu_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "abort_nxt_frame_mismatch:",
+ le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
+ accum_tx->burst_abort_next_frame_mismatch_cnt,
+ delta_tx->burst_abort_next_frame_mismatch_cnt,
+ max_tx->burst_abort_next_frame_mismatch_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "abort_missing_nxt_frame:",
+ le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
+ accum_tx->burst_abort_missing_next_frame_cnt,
+ delta_tx->burst_abort_missing_next_frame_cnt,
+ max_tx->burst_abort_missing_next_frame_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "cts_timeout_collision:",
+ le32_to_cpu(tx->cts_timeout_collision),
+ accum_tx->cts_timeout_collision,
+ delta_tx->cts_timeout_collision,
+ max_tx->cts_timeout_collision);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "ack_ba_timeout_collision:",
+ le32_to_cpu(tx->ack_or_ba_timeout_collision),
+ accum_tx->ack_or_ba_timeout_collision,
+ delta_tx->ack_or_ba_timeout_collision,
+ max_tx->ack_or_ba_timeout_collision);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "agg ba_timeout:",
+ le32_to_cpu(tx->agg.ba_timeout),
+ accum_tx->agg.ba_timeout,
+ delta_tx->agg.ba_timeout,
+ max_tx->agg.ba_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "agg ba_resched_frames:",
+ le32_to_cpu(tx->agg.ba_reschedule_frames),
+ accum_tx->agg.ba_reschedule_frames,
+ delta_tx->agg.ba_reschedule_frames,
+ max_tx->agg.ba_reschedule_frames);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "agg scd_query_agg_frame:",
+ le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
+ accum_tx->agg.scd_query_agg_frame_cnt,
+ delta_tx->agg.scd_query_agg_frame_cnt,
+ max_tx->agg.scd_query_agg_frame_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "agg scd_query_no_agg:",
+ le32_to_cpu(tx->agg.scd_query_no_agg),
+ accum_tx->agg.scd_query_no_agg,
+ delta_tx->agg.scd_query_no_agg,
+ max_tx->agg.scd_query_no_agg);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "agg scd_query_agg:",
+ le32_to_cpu(tx->agg.scd_query_agg),
+ accum_tx->agg.scd_query_agg,
+ delta_tx->agg.scd_query_agg,
+ max_tx->agg.scd_query_agg);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "agg scd_query_mismatch:",
+ le32_to_cpu(tx->agg.scd_query_mismatch),
+ accum_tx->agg.scd_query_mismatch,
+ delta_tx->agg.scd_query_mismatch,
+ max_tx->agg.scd_query_mismatch);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "agg frame_not_ready:",
+ le32_to_cpu(tx->agg.frame_not_ready),
+ accum_tx->agg.frame_not_ready,
+ delta_tx->agg.frame_not_ready,
+ max_tx->agg.frame_not_ready);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "agg underrun:",
+ le32_to_cpu(tx->agg.underrun),
+ accum_tx->agg.underrun,
+ delta_tx->agg.underrun, max_tx->agg.underrun);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "agg bt_prio_kill:",
+ le32_to_cpu(tx->agg.bt_prio_kill),
+ accum_tx->agg.bt_prio_kill,
+ delta_tx->agg.bt_prio_kill,
+ max_tx->agg.bt_prio_kill);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "agg rx_ba_rsp_cnt:",
+ le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
+ accum_tx->agg.rx_ba_rsp_cnt,
+ delta_tx->agg.rx_ba_rsp_cnt,
+ max_tx->agg.rx_ba_rsp_cnt);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+ssize_t
+iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = sizeof(struct statistics_general) * 10 + 300;
+ ssize_t ret;
+ struct statistics_general_common *general, *accum_general;
+ struct statistics_general_common *delta_general, *max_general;
+ struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
+ struct statistics_div *div, *accum_div, *delta_div, *max_div;
+
+ if (!iwl_legacy_is_alive(priv))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /* the statistic information display here is based on
+ * the last statistics notification from uCode
+ * might not reflect the current uCode activity
+ */
+ general = &priv->_4965.statistics.general.common;
+ dbg = &priv->_4965.statistics.general.common.dbg;
+ div = &priv->_4965.statistics.general.common.div;
+ accum_general = &priv->_4965.accum_statistics.general.common;
+ accum_dbg = &priv->_4965.accum_statistics.general.common.dbg;
+ accum_div = &priv->_4965.accum_statistics.general.common.div;
+ delta_general = &priv->_4965.delta_statistics.general.common;
+ max_general = &priv->_4965.max_delta.general.common;
+ delta_dbg = &priv->_4965.delta_statistics.general.common.dbg;
+ max_dbg = &priv->_4965.max_delta.general.common.dbg;
+ delta_div = &priv->_4965.delta_statistics.general.common.div;
+ max_div = &priv->_4965.max_delta.general.common.div;
+
+ pos += iwl4965_statistics_flag(priv, buf, bufsz);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_header, "Statistics_General:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_value, "temperature:",
+ le32_to_cpu(general->temperature));
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_value, "ttl_timestamp:",
+ le32_to_cpu(general->ttl_timestamp));
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "burst_check:",
+ le32_to_cpu(dbg->burst_check),
+ accum_dbg->burst_check,
+ delta_dbg->burst_check, max_dbg->burst_check);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "burst_count:",
+ le32_to_cpu(dbg->burst_count),
+ accum_dbg->burst_count,
+ delta_dbg->burst_count, max_dbg->burst_count);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "wait_for_silence_timeout_count:",
+ le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
+ accum_dbg->wait_for_silence_timeout_cnt,
+ delta_dbg->wait_for_silence_timeout_cnt,
+ max_dbg->wait_for_silence_timeout_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "sleep_time:",
+ le32_to_cpu(general->sleep_time),
+ accum_general->sleep_time,
+ delta_general->sleep_time, max_general->sleep_time);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "slots_out:",
+ le32_to_cpu(general->slots_out),
+ accum_general->slots_out,
+ delta_general->slots_out, max_general->slots_out);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "slots_idle:",
+ le32_to_cpu(general->slots_idle),
+ accum_general->slots_idle,
+ delta_general->slots_idle, max_general->slots_idle);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "tx_on_a:",
+ le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
+ delta_div->tx_on_a, max_div->tx_on_a);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "tx_on_b:",
+ le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
+ delta_div->tx_on_b, max_div->tx_on_b);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "exec_time:",
+ le32_to_cpu(div->exec_time), accum_div->exec_time,
+ delta_div->exec_time, max_div->exec_time);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "probe_time:",
+ le32_to_cpu(div->probe_time), accum_div->probe_time,
+ delta_div->probe_time, max_div->probe_time);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "rx_enable_counter:",
+ le32_to_cpu(general->rx_enable_counter),
+ accum_general->rx_enable_counter,
+ delta_general->rx_enable_counter,
+ max_general->rx_enable_counter);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "num_of_sos_states:",
+ le32_to_cpu(general->num_of_sos_states),
+ accum_general->num_of_sos_states,
+ delta_general->num_of_sos_states,
+ max_general->num_of_sos_states);
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h
new file mode 100644
index 000000000000..6c8e35361a9e
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h
@@ -0,0 +1,59 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-debug.h"
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ssize_t iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ssize_t iwl4965_ucode_general_stats_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos);
+#else
+static ssize_t
+iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+static ssize_t
+iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+static ssize_t
+iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c
new file mode 100644
index 000000000000..cb9baab1ff7d
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c
@@ -0,0 +1,154 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+
+#include <net/mac80211.h>
+
+#include "iwl-commands.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-debug.h"
+#include "iwl-4965.h"
+#include "iwl-io.h"
+
+/******************************************************************************
+ *
+ * EEPROM related functions
+ *
+******************************************************************************/
+
+/*
+ * The device's EEPROM semaphore prevents conflicts between driver and uCode
+ * when accessing the EEPROM; each access is a series of pulses to/from the
+ * EEPROM chip, not a single event, so even reads could conflict if they
+ * weren't arbitrated by the semaphore.
+ */
+int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv)
+{
+ u16 count;
+ int ret;
+
+ for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
+ /* Request semaphore */
+ iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+
+ /* See if we got it */
+ ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+ EEPROM_SEM_TIMEOUT);
+ if (ret >= 0) {
+ IWL_DEBUG_IO(priv,
+ "Acquired semaphore after %d tries.\n",
+ count+1);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv)
+{
+ iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+
+}
+
+int iwl4965_eeprom_check_version(struct iwl_priv *priv)
+{
+ u16 eeprom_ver;
+ u16 calib_ver;
+
+ eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
+ calib_ver = iwl_legacy_eeprom_query16(priv,
+ EEPROM_4965_CALIB_VERSION_OFFSET);
+
+ if (eeprom_ver < priv->cfg->eeprom_ver ||
+ calib_ver < priv->cfg->eeprom_calib_ver)
+ goto err;
+
+ IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
+ eeprom_ver, calib_ver);
+
+ return 0;
+err:
+ IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
+ "CALIB=0x%x < 0x%x\n",
+ eeprom_ver, priv->cfg->eeprom_ver,
+ calib_ver, priv->cfg->eeprom_calib_ver);
+ return -EINVAL;
+
+}
+
+void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
+{
+ const u8 *addr = iwl_legacy_eeprom_query_addr(priv,
+ EEPROM_MAC_ADDRESS);
+ memcpy(mac, addr, ETH_ALEN);
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
index 9166794eda0d..08b189c8472d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -789,4 +789,26 @@ struct iwl4965_scd_bc_tbl {
u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
} __packed;
+
+#define IWL4965_RTC_INST_LOWER_BOUND (0x000000)
+
+/* RSSI to dBm */
+#define IWL4965_RSSI_OFFSET 44
+
+/* PCI registers */
+#define PCI_CFG_RETRY_TIMEOUT 0x041
+
+/* PCI register values */
+#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
+#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
+
+#define IWL4965_DEFAULT_TX_RETRY 15
+
+/* Limit range of txpower output target to be between these values */
+#define IWL4965_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm: 1 milliwatt */
+
+/* EEPROM */
+#define IWL4965_FIRST_AMPDU_QUEUE 10
+
+
#endif /* !__iwl_4965_hw_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.c b/drivers/net/wireless/iwlegacy/iwl-4965-led.c
new file mode 100644
index 000000000000..26d324e30692
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-led.c
@@ -0,0 +1,74 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+
+#include "iwl-commands.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-4965-led.h"
+
+/* Send led command */
+static int
+iwl4965_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
+{
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_LEDS_CMD,
+ .len = sizeof(struct iwl_led_cmd),
+ .data = led_cmd,
+ .flags = CMD_ASYNC,
+ .callback = NULL,
+ };
+ u32 reg;
+
+ reg = iwl_read32(priv, CSR_LED_REG);
+ if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
+ iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
+
+ return iwl_legacy_send_cmd(priv, &cmd);
+}
+
+/* Set led register off */
+void iwl4965_led_enable(struct iwl_priv *priv)
+{
+ iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
+}
+
+const struct iwl_led_ops iwl4965_led_ops = {
+ .cmd = iwl4965_send_led_cmd,
+};
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.h b/drivers/net/wireless/iwlegacy/iwl-4965-led.h
new file mode 100644
index 000000000000..5ed3615fc338
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-led.h
@@ -0,0 +1,33 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_4965_led_h__
+#define __iwl_4965_led_h__
+
+extern const struct iwl_led_ops iwl4965_led_ops;
+void iwl4965_led_enable(struct iwl_priv *priv);
+
+#endif /* __iwl_4965_led_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
new file mode 100644
index 000000000000..5a8a3cce27bc
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
@@ -0,0 +1,1260 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+#include "iwl-4965-hw.h"
+#include "iwl-4965.h"
+#include "iwl-sta.h"
+
+void iwl4965_check_abort_status(struct iwl_priv *priv,
+ u8 frame_count, u32 status)
+{
+ if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
+ IWL_ERR(priv, "Tx flush command to flush out all frames\n");
+ if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
+ queue_work(priv->workqueue, &priv->tx_flush);
+ }
+}
+
+/*
+ * EEPROM
+ */
+struct iwl_mod_params iwl4965_mod_params = {
+ .amsdu_size_8K = 1,
+ .restart_fw = 1,
+ /* the rest are 0 by default */
+};
+
+void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
+{
+ unsigned long flags;
+ int i;
+ spin_lock_irqsave(&rxq->lock, flags);
+ INIT_LIST_HEAD(&rxq->rx_free);
+ INIT_LIST_HEAD(&rxq->rx_used);
+ /* Fill the rx_used queue with _all_ of the Rx buffers */
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+ /* In the reset function, these buffers may have been allocated
+ * to an SKB, so we need to unmap and free potential storage */
+ if (rxq->pool[i].page != NULL) {
+ pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << priv->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ __iwl_legacy_free_pages(priv, rxq->pool[i].page);
+ rxq->pool[i].page = NULL;
+ }
+ list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+ }
+
+ for (i = 0; i < RX_QUEUE_SIZE; i++)
+ rxq->queue[i] = NULL;
+
+ /* Set us so that we have processed and used all buffers, but have
+ * not restocked the Rx queue with fresh buffers */
+ rxq->read = rxq->write = 0;
+ rxq->write_actual = 0;
+ rxq->free_count = 0;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+}
+
+int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
+{
+ u32 rb_size;
+ const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
+ u32 rb_timeout = 0;
+
+ if (priv->cfg->mod_params->amsdu_size_8K)
+ rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
+ else
+ rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
+
+ /* Stop Rx DMA */
+ iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+
+ /* Reset driver's Rx queue write index */
+ iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
+
+ /* Tell device where to find RBD circular buffer in DRAM */
+ iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+ (u32)(rxq->bd_dma >> 8));
+
+ /* Tell device where in DRAM to update its Rx status */
+ iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
+ rxq->rb_stts_dma >> 4);
+
+ /* Enable Rx DMA
+ * Direct rx interrupts to hosts
+ * Rx buffer size 4 or 8k
+ * RB timeout 0x10
+ * 256 RBDs
+ */
+ iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
+ FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
+ FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
+ rb_size|
+ (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
+ (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
+
+ /* Set interrupt coalescing timer to default (2048 usecs) */
+ iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
+
+ return 0;
+}
+
+static void iwl4965_set_pwr_vmain(struct iwl_priv *priv)
+{
+/*
+ * (for documentation purposes)
+ * to set power to V_AUX, do:
+
+ if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
+ iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+ */
+
+ iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+}
+
+int iwl4965_hw_nic_init(struct iwl_priv *priv)
+{
+ unsigned long flags;
+ struct iwl_rx_queue *rxq = &priv->rxq;
+ int ret;
+
+ /* nic_init */
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->cfg->ops->lib->apm_ops.init(priv);
+
+ /* Set interrupt coalescing calibration timer to default (512 usecs) */
+ iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ iwl4965_set_pwr_vmain(priv);
+
+ priv->cfg->ops->lib->apm_ops.config(priv);
+
+ /* Allocate the RX queue, or reset if it is already allocated */
+ if (!rxq->bd) {
+ ret = iwl_legacy_rx_queue_alloc(priv);
+ if (ret) {
+ IWL_ERR(priv, "Unable to initialize Rx queue\n");
+ return -ENOMEM;
+ }
+ } else
+ iwl4965_rx_queue_reset(priv, rxq);
+
+ iwl4965_rx_replenish(priv);
+
+ iwl4965_rx_init(priv, rxq);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ rxq->need_update = 1;
+ iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Allocate or reset and init all Tx and Command queues */
+ if (!priv->txq) {
+ ret = iwl4965_txq_ctx_alloc(priv);
+ if (ret)
+ return ret;
+ } else
+ iwl4965_txq_ctx_reset(priv);
+
+ set_bit(STATUS_INIT, &priv->status);
+
+ return 0;
+}
+
+/**
+ * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ */
+static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl_priv *priv,
+ dma_addr_t dma_addr)
+{
+ return cpu_to_le32((u32)(dma_addr >> 8));
+}
+
+/**
+ * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool
+ *
+ * If there are slots in the RX queue that need to be restocked,
+ * and we have free pre-allocated buffers, fill the ranks as much
+ * as we can, pulling from rx_free.
+ *
+ * This moves the 'write' index forward to catch up with 'processed', and
+ * also updates the memory address in the firmware to reference the new
+ * target buffer.
+ */
+void iwl4965_rx_queue_restock(struct iwl_priv *priv)
+{
+ struct iwl_rx_queue *rxq = &priv->rxq;
+ struct list_head *element;
+ struct iwl_rx_mem_buffer *rxb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rxq->lock, flags);
+ while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
+ /* The overwritten rxb must be a used one */
+ rxb = rxq->queue[rxq->write];
+ BUG_ON(rxb && rxb->page);
+
+ /* Get next free Rx buffer, remove from free list */
+ element = rxq->rx_free.next;
+ rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
+ list_del(element);
+
+ /* Point to Rx buffer via next RBD in circular buffer */
+ rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv,
+ rxb->page_dma);
+ rxq->queue[rxq->write] = rxb;
+ rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
+ rxq->free_count--;
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ /* If the pre-allocated buffer pool is dropping low, schedule to
+ * refill it */
+ if (rxq->free_count <= RX_LOW_WATERMARK)
+ queue_work(priv->workqueue, &priv->rx_replenish);
+
+
+ /* If we've added more space for the firmware to place data, tell it.
+ * Increment device's write pointer in multiples of 8. */
+ if (rxq->write_actual != (rxq->write & ~0x7)) {
+ spin_lock_irqsave(&rxq->lock, flags);
+ rxq->need_update = 1;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
+ }
+}
+
+/**
+ * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free
+ *
+ * When moving to rx_free an SKB is allocated for the slot.
+ *
+ * Also restock the Rx queue via iwl_rx_queue_restock.
+ * This is called as a scheduled work item (except for during initialization)
+ */
+static void iwl4965_rx_allocate(struct iwl_priv *priv, gfp_t priority)
+{
+ struct iwl_rx_queue *rxq = &priv->rxq;
+ struct list_head *element;
+ struct iwl_rx_mem_buffer *rxb;
+ struct page *page;
+ unsigned long flags;
+ gfp_t gfp_mask = priority;
+
+ while (1) {
+ spin_lock_irqsave(&rxq->lock, flags);
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ if (rxq->free_count > RX_LOW_WATERMARK)
+ gfp_mask |= __GFP_NOWARN;
+
+ if (priv->hw_params.rx_page_order > 0)
+ gfp_mask |= __GFP_COMP;
+
+ /* Alloc a new receive buffer */
+ page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
+ if (!page) {
+ if (net_ratelimit())
+ IWL_DEBUG_INFO(priv, "alloc_pages failed, "
+ "order: %d\n",
+ priv->hw_params.rx_page_order);
+
+ if ((rxq->free_count <= RX_LOW_WATERMARK) &&
+ net_ratelimit())
+ IWL_CRIT(priv,
+ "Failed to alloc_pages with %s. "
+ "Only %u free buffers remaining.\n",
+ priority == GFP_ATOMIC ?
+ "GFP_ATOMIC" : "GFP_KERNEL",
+ rxq->free_count);
+ /* We don't reschedule replenish work here -- we will
+ * call the restock method and if it still needs
+ * more buffers it will schedule replenish */
+ return;
+ }
+
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ __free_pages(page, priv->hw_params.rx_page_order);
+ return;
+ }
+ element = rxq->rx_used.next;
+ rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
+ list_del(element);
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ BUG_ON(rxb->page);
+ rxb->page = page;
+ /* Get physical address of the RB */
+ rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
+ PAGE_SIZE << priv->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ /* dma address must be no more than 36 bits */
+ BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
+ /* and also 256 byte aligned! */
+ BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
+
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ priv->alloc_rxb_page++;
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ }
+}
+
+void iwl4965_rx_replenish(struct iwl_priv *priv)
+{
+ unsigned long flags;
+
+ iwl4965_rx_allocate(priv, GFP_KERNEL);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ iwl4965_rx_queue_restock(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+void iwl4965_rx_replenish_now(struct iwl_priv *priv)
+{
+ iwl4965_rx_allocate(priv, GFP_ATOMIC);
+
+ iwl4965_rx_queue_restock(priv);
+}
+
+/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
+ * If an SKB has been detached, the POOL needs to have its SKB set to NULL
+ * This free routine walks the list of POOL entries and if SKB is set to
+ * non NULL it is unmapped and freed
+ */
+void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
+{
+ int i;
+ for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
+ if (rxq->pool[i].page != NULL) {
+ pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << priv->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ __iwl_legacy_free_pages(priv, rxq->pool[i].page);
+ rxq->pool[i].page = NULL;
+ }
+ }
+
+ dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+ rxq->bd_dma);
+ dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
+ rxq->bd = NULL;
+ rxq->rb_stts = NULL;
+}
+
+int iwl4965_rxq_stop(struct iwl_priv *priv)
+{
+
+ /* stop Rx DMA */
+ iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
+ FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
+
+ return 0;
+}
+
+int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
+{
+ int idx = 0;
+ int band_offset = 0;
+
+ /* HT rate format: mac80211 wants an MCS number, which is just LSB */
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ idx = (rate_n_flags & 0xff);
+ return idx;
+ /* Legacy rate format, search for match in table */
+ } else {
+ if (band == IEEE80211_BAND_5GHZ)
+ band_offset = IWL_FIRST_OFDM_RATE;
+ for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
+ if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
+ return idx - band_offset;
+ }
+
+ return -1;
+}
+
+static int iwl4965_calc_rssi(struct iwl_priv *priv,
+ struct iwl_rx_phy_res *rx_resp)
+{
+ /* data from PHY/DSP regarding signal strength, etc.,
+ * contents are always there, not configurable by host. */
+ struct iwl4965_rx_non_cfg_phy *ncphy =
+ (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
+ u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK)
+ >> IWL49_AGC_DB_POS;
+
+ u32 valid_antennae =
+ (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK)
+ >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
+ u8 max_rssi = 0;
+ u32 i;
+
+ /* Find max rssi among 3 possible receivers.
+ * These values are measured by the digital signal processor (DSP).
+ * They should stay fairly constant even as the signal strength varies,
+ * if the radio's automatic gain control (AGC) is working right.
+ * AGC value (see below) will provide the "interesting" info. */
+ for (i = 0; i < 3; i++)
+ if (valid_antennae & (1 << i))
+ max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
+
+ IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
+ ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
+ max_rssi, agc);
+
+ /* dBm = max_rssi dB - agc dB - constant.
+ * Higher AGC (higher radio gain) means lower signal. */
+ return max_rssi - agc - IWL4965_RSSI_OFFSET;
+}
+
+
+static u32 iwl4965_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
+{
+ u32 decrypt_out = 0;
+
+ if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
+ RX_RES_STATUS_STATION_FOUND)
+ decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
+ RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
+
+ decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
+
+ /* packet was not encrypted */
+ if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+ RX_RES_STATUS_SEC_TYPE_NONE)
+ return decrypt_out;
+
+ /* packet was encrypted with unknown alg */
+ if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+ RX_RES_STATUS_SEC_TYPE_ERR)
+ return decrypt_out;
+
+ /* decryption was not done in HW */
+ if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
+ RX_MPDU_RES_STATUS_DEC_DONE_MSK)
+ return decrypt_out;
+
+ switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
+
+ case RX_RES_STATUS_SEC_TYPE_CCMP:
+ /* alg is CCM: check MIC only */
+ if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
+ /* Bad MIC */
+ decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+ else
+ decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+
+ break;
+
+ case RX_RES_STATUS_SEC_TYPE_TKIP:
+ if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
+ /* Bad TTAK */
+ decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
+ break;
+ }
+ /* fall through if TTAK OK */
+ default:
+ if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
+ decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+ else
+ decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+ break;
+ }
+
+ IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
+ decrypt_in, decrypt_out);
+
+ return decrypt_out;
+}
+
+static void iwl4965_pass_packet_to_mac80211(struct iwl_priv *priv,
+ struct ieee80211_hdr *hdr,
+ u16 len,
+ u32 ampdu_status,
+ struct iwl_rx_mem_buffer *rxb,
+ struct ieee80211_rx_status *stats)
+{
+ struct sk_buff *skb;
+ __le16 fc = hdr->frame_control;
+
+ /* We only process data packets if the interface is open */
+ if (unlikely(!priv->is_open)) {
+ IWL_DEBUG_DROP_LIMIT(priv,
+ "Dropping packet while interface is not open.\n");
+ return;
+ }
+
+ /* In case of HW accelerated crypto and bad decryption, drop */
+ if (!priv->cfg->mod_params->sw_crypto &&
+ iwl_legacy_set_decrypted_flag(priv, hdr, ampdu_status, stats))
+ return;
+
+ skb = dev_alloc_skb(128);
+ if (!skb) {
+ IWL_ERR(priv, "dev_alloc_skb failed\n");
+ return;
+ }
+
+ skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
+
+ iwl_legacy_update_stats(priv, false, fc, len);
+ memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+ ieee80211_rx(priv->hw, skb);
+ priv->alloc_rxb_page--;
+ rxb->page = NULL;
+}
+
+/* Called for REPLY_RX (legacy ABG frames), or
+ * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
+void iwl4965_rx_reply_rx(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct ieee80211_hdr *header;
+ struct ieee80211_rx_status rx_status;
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_rx_phy_res *phy_res;
+ __le32 rx_pkt_status;
+ struct iwl_rx_mpdu_res_start *amsdu;
+ u32 len;
+ u32 ampdu_status;
+ u32 rate_n_flags;
+
+ /**
+ * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
+ * REPLY_RX: physical layer info is in this buffer
+ * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
+ * command and cached in priv->last_phy_res
+ *
+ * Here we set up local variables depending on which command is
+ * received.
+ */
+ if (pkt->hdr.cmd == REPLY_RX) {
+ phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
+ header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
+ + phy_res->cfg_phy_cnt);
+
+ len = le16_to_cpu(phy_res->byte_count);
+ rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
+ phy_res->cfg_phy_cnt + len);
+ ampdu_status = le32_to_cpu(rx_pkt_status);
+ } else {
+ if (!priv->_4965.last_phy_res_valid) {
+ IWL_ERR(priv, "MPDU frame without cached PHY data\n");
+ return;
+ }
+ phy_res = &priv->_4965.last_phy_res;
+ amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
+ header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
+ len = le16_to_cpu(amsdu->byte_count);
+ rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
+ ampdu_status = iwl4965_translate_rx_status(priv,
+ le32_to_cpu(rx_pkt_status));
+ }
+
+ if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
+ IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
+ phy_res->cfg_phy_cnt);
+ return;
+ }
+
+ if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
+ !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
+ IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
+ le32_to_cpu(rx_pkt_status));
+ return;
+ }
+
+ /* This will be used in several places later */
+ rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
+
+ /* rx_status carries information about the packet to mac80211 */
+ rx_status.mactime = le64_to_cpu(phy_res->timestamp);
+ rx_status.freq =
+ ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
+ rx_status.band);
+ rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
+ IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ rx_status.rate_idx =
+ iwl4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
+ rx_status.flag = 0;
+
+ /* TSF isn't reliable. In order to allow smooth user experience,
+ * this W/A doesn't propagate it to the mac80211 */
+ /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
+
+ priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
+
+ /* Find max signal strength (dBm) among 3 antenna/receiver chains */
+ rx_status.signal = iwl4965_calc_rssi(priv, phy_res);
+
+ iwl_legacy_dbg_log_rx_data_frame(priv, len, header);
+ IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
+ rx_status.signal, (unsigned long long)rx_status.mactime);
+
+ /*
+ * "antenna number"
+ *
+ * It seems that the antenna field in the phy flags value
+ * is actually a bit field. This is undefined by radiotap,
+ * it wants an actual antenna number but I always get "7"
+ * for most legacy frames I receive indicating that the
+ * same frame was received on all three RX chains.
+ *
+ * I think this field should be removed in favor of a
+ * new 802.11n radiotap field "RX chains" that is defined
+ * as a bitmask.
+ */
+ rx_status.antenna =
+ (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
+ >> RX_RES_PHY_FLAGS_ANTENNA_POS;
+
+ /* set the preamble flag if appropriate */
+ if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
+ rx_status.flag |= RX_FLAG_SHORTPRE;
+
+ /* Set up the HT phy flags */
+ if (rate_n_flags & RATE_MCS_HT_MSK)
+ rx_status.flag |= RX_FLAG_HT;
+ if (rate_n_flags & RATE_MCS_HT40_MSK)
+ rx_status.flag |= RX_FLAG_40MHZ;
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rx_status.flag |= RX_FLAG_SHORT_GI;
+
+ iwl4965_pass_packet_to_mac80211(priv, header, len, ampdu_status,
+ rxb, &rx_status);
+}
+
+/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
+ * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
+void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ priv->_4965.last_phy_res_valid = true;
+ memcpy(&priv->_4965.last_phy_res, pkt->u.raw,
+ sizeof(struct iwl_rx_phy_res));
+}
+
+static int iwl4965_get_single_channel_for_scan(struct iwl_priv *priv,
+ struct ieee80211_vif *vif,
+ enum ieee80211_band band,
+ struct iwl_scan_channel *scan_ch)
+{
+ const struct ieee80211_supported_band *sband;
+ u16 passive_dwell = 0;
+ u16 active_dwell = 0;
+ int added = 0;
+ u16 channel = 0;
+
+ sband = iwl_get_hw_mode(priv, band);
+ if (!sband) {
+ IWL_ERR(priv, "invalid band\n");
+ return added;
+ }
+
+ active_dwell = iwl_legacy_get_active_dwell_time(priv, band, 0);
+ passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
+
+ if (passive_dwell <= active_dwell)
+ passive_dwell = active_dwell + 1;
+
+ channel = iwl_legacy_get_single_channel_number(priv, band);
+ if (channel) {
+ scan_ch->channel = cpu_to_le16(channel);
+ scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
+ scan_ch->active_dwell = cpu_to_le16(active_dwell);
+ scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+ /* Set txpower levels to defaults */
+ scan_ch->dsp_atten = 110;
+ if (band == IEEE80211_BAND_5GHZ)
+ scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+ else
+ scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+ added++;
+ } else
+ IWL_ERR(priv, "no valid channel found\n");
+ return added;
+}
+
+static int iwl4965_get_channels_for_scan(struct iwl_priv *priv,
+ struct ieee80211_vif *vif,
+ enum ieee80211_band band,
+ u8 is_active, u8 n_probes,
+ struct iwl_scan_channel *scan_ch)
+{
+ struct ieee80211_channel *chan;
+ const struct ieee80211_supported_band *sband;
+ const struct iwl_channel_info *ch_info;
+ u16 passive_dwell = 0;
+ u16 active_dwell = 0;
+ int added, i;
+ u16 channel;
+
+ sband = iwl_get_hw_mode(priv, band);
+ if (!sband)
+ return 0;
+
+ active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
+ passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
+
+ if (passive_dwell <= active_dwell)
+ passive_dwell = active_dwell + 1;
+
+ for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
+ chan = priv->scan_request->channels[i];
+
+ if (chan->band != band)
+ continue;
+
+ channel = chan->hw_value;
+ scan_ch->channel = cpu_to_le16(channel);
+
+ ch_info = iwl_legacy_get_channel_info(priv, band, channel);
+ if (!iwl_legacy_is_channel_valid(ch_info)) {
+ IWL_DEBUG_SCAN(priv,
+ "Channel %d is INVALID for this band.\n",
+ channel);
+ continue;
+ }
+
+ if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
+ (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
+ scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
+ else
+ scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
+
+ if (n_probes)
+ scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
+
+ scan_ch->active_dwell = cpu_to_le16(active_dwell);
+ scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+
+ /* Set txpower levels to defaults */
+ scan_ch->dsp_atten = 110;
+
+ /* NOTE: if we were doing 6Mb OFDM for scans we'd use
+ * power level:
+ * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
+ */
+ if (band == IEEE80211_BAND_5GHZ)
+ scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+ else
+ scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+
+ IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
+ channel, le32_to_cpu(scan_ch->type),
+ (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
+ "ACTIVE" : "PASSIVE",
+ (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
+ active_dwell : passive_dwell);
+
+ scan_ch++;
+ added++;
+ }
+
+ IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
+ return added;
+}
+
+int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
+{
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_SCAN_CMD,
+ .len = sizeof(struct iwl_scan_cmd),
+ .flags = CMD_SIZE_HUGE,
+ };
+ struct iwl_scan_cmd *scan;
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ u32 rate_flags = 0;
+ u16 cmd_len;
+ u16 rx_chain = 0;
+ enum ieee80211_band band;
+ u8 n_probes = 0;
+ u8 rx_ant = priv->hw_params.valid_rx_ant;
+ u8 rate;
+ bool is_active = false;
+ int chan_mod;
+ u8 active_chains;
+ u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
+ int ret;
+
+ lockdep_assert_held(&priv->mutex);
+
+ if (vif)
+ ctx = iwl_legacy_rxon_ctx_from_vif(vif);
+
+ if (!priv->scan_cmd) {
+ priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
+ IWL_MAX_SCAN_SIZE, GFP_KERNEL);
+ if (!priv->scan_cmd) {
+ IWL_DEBUG_SCAN(priv,
+ "fail to allocate memory for scan\n");
+ return -ENOMEM;
+ }
+ }
+ scan = priv->scan_cmd;
+ memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
+
+ scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
+ scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
+
+ if (iwl_legacy_is_any_associated(priv)) {
+ u16 interval = 0;
+ u32 extra;
+ u32 suspend_time = 100;
+ u32 scan_suspend_time = 100;
+
+ IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
+ if (priv->is_internal_short_scan)
+ interval = 0;
+ else
+ interval = vif->bss_conf.beacon_int;
+
+ scan->suspend_time = 0;
+ scan->max_out_time = cpu_to_le32(200 * 1024);
+ if (!interval)
+ interval = suspend_time;
+
+ extra = (suspend_time / interval) << 22;
+ scan_suspend_time = (extra |
+ ((suspend_time % interval) * 1024));
+ scan->suspend_time = cpu_to_le32(scan_suspend_time);
+ IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
+ scan_suspend_time, interval);
+ }
+
+ if (priv->is_internal_short_scan) {
+ IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
+ } else if (priv->scan_request->n_ssids) {
+ int i, p = 0;
+ IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
+ for (i = 0; i < priv->scan_request->n_ssids; i++) {
+ /* always does wildcard anyway */
+ if (!priv->scan_request->ssids[i].ssid_len)
+ continue;
+ scan->direct_scan[p].id = WLAN_EID_SSID;
+ scan->direct_scan[p].len =
+ priv->scan_request->ssids[i].ssid_len;
+ memcpy(scan->direct_scan[p].ssid,
+ priv->scan_request->ssids[i].ssid,
+ priv->scan_request->ssids[i].ssid_len);
+ n_probes++;
+ p++;
+ }
+ is_active = true;
+ } else
+ IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
+
+ scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
+ scan->tx_cmd.sta_id = ctx->bcast_sta_id;
+ scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+ switch (priv->scan_band) {
+ case IEEE80211_BAND_2GHZ:
+ scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
+ chan_mod = le32_to_cpu(
+ priv->contexts[IWL_RXON_CTX_BSS].active.flags &
+ RXON_FLG_CHANNEL_MODE_MSK)
+ >> RXON_FLG_CHANNEL_MODE_POS;
+ if (chan_mod == CHANNEL_MODE_PURE_40) {
+ rate = IWL_RATE_6M_PLCP;
+ } else {
+ rate = IWL_RATE_1M_PLCP;
+ rate_flags = RATE_MCS_CCK_MSK;
+ }
+ break;
+ case IEEE80211_BAND_5GHZ:
+ rate = IWL_RATE_6M_PLCP;
+ break;
+ default:
+ IWL_WARN(priv, "Invalid scan band\n");
+ return -EIO;
+ }
+
+ /*
+ * If active scanning is requested but a certain channel is
+ * marked passive, we can do active scanning if we detect
+ * transmissions.
+ *
+ * There is an issue with some firmware versions that triggers
+ * a sysassert on a "good CRC threshold" of zero (== disabled),
+ * on a radar channel even though this means that we should NOT
+ * send probes.
+ *
+ * The "good CRC threshold" is the number of frames that we
+ * need to receive during our dwell time on a channel before
+ * sending out probes -- setting this to a huge value will
+ * mean we never reach it, but at the same time work around
+ * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
+ * here instead of IWL_GOOD_CRC_TH_DISABLED.
+ */
+ scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
+ IWL_GOOD_CRC_TH_NEVER;
+
+ band = priv->scan_band;
+
+ if (priv->cfg->scan_rx_antennas[band])
+ rx_ant = priv->cfg->scan_rx_antennas[band];
+
+ if (priv->cfg->scan_tx_antennas[band])
+ scan_tx_antennas = priv->cfg->scan_tx_antennas[band];
+
+ priv->scan_tx_ant[band] = iwl4965_toggle_tx_ant(priv,
+ priv->scan_tx_ant[band],
+ scan_tx_antennas);
+ rate_flags |= iwl4965_ant_idx_to_flags(priv->scan_tx_ant[band]);
+ scan->tx_cmd.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate, rate_flags);
+
+ /* In power save mode use one chain, otherwise use all chains */
+ if (test_bit(STATUS_POWER_PMI, &priv->status)) {
+ /* rx_ant has been set to all valid chains previously */
+ active_chains = rx_ant &
+ ((u8)(priv->chain_noise_data.active_chains));
+ if (!active_chains)
+ active_chains = rx_ant;
+
+ IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
+ priv->chain_noise_data.active_chains);
+
+ rx_ant = iwl4965_first_antenna(active_chains);
+ }
+
+ /* MIMO is not used here, but value is required */
+ rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
+ rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
+ rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
+ rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
+ scan->rx_chain = cpu_to_le16(rx_chain);
+ if (!priv->is_internal_short_scan) {
+ cmd_len = iwl_legacy_fill_probe_req(priv,
+ (struct ieee80211_mgmt *)scan->data,
+ vif->addr,
+ priv->scan_request->ie,
+ priv->scan_request->ie_len,
+ IWL_MAX_SCAN_SIZE - sizeof(*scan));
+ } else {
+ /* use bcast addr, will not be transmitted but must be valid */
+ cmd_len = iwl_legacy_fill_probe_req(priv,
+ (struct ieee80211_mgmt *)scan->data,
+ iwlegacy_bcast_addr, NULL, 0,
+ IWL_MAX_SCAN_SIZE - sizeof(*scan));
+
+ }
+ scan->tx_cmd.len = cpu_to_le16(cmd_len);
+
+ scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
+ RXON_FILTER_BCON_AWARE_MSK);
+
+ if (priv->is_internal_short_scan) {
+ scan->channel_count =
+ iwl4965_get_single_channel_for_scan(priv, vif, band,
+ (void *)&scan->data[le16_to_cpu(
+ scan->tx_cmd.len)]);
+ } else {
+ scan->channel_count =
+ iwl4965_get_channels_for_scan(priv, vif, band,
+ is_active, n_probes,
+ (void *)&scan->data[le16_to_cpu(
+ scan->tx_cmd.len)]);
+ }
+ if (scan->channel_count == 0) {
+ IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
+ return -EIO;
+ }
+
+ cmd.len += le16_to_cpu(scan->tx_cmd.len) +
+ scan->channel_count * sizeof(struct iwl_scan_channel);
+ cmd.data = scan;
+ scan->len = cpu_to_le16(cmd.len);
+
+ set_bit(STATUS_SCAN_HW, &priv->status);
+
+ ret = iwl_legacy_send_cmd_sync(priv, &cmd);
+ if (ret)
+ clear_bit(STATUS_SCAN_HW, &priv->status);
+
+ return ret;
+}
+
+int iwl4965_manage_ibss_station(struct iwl_priv *priv,
+ struct ieee80211_vif *vif, bool add)
+{
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+ if (add)
+ return iwl4965_add_bssid_station(priv, vif_priv->ctx,
+ vif->bss_conf.bssid,
+ &vif_priv->ibss_bssid_sta_id);
+ return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
+ vif->bss_conf.bssid);
+}
+
+void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
+ int sta_id, int tid, int freed)
+{
+ lockdep_assert_held(&priv->sta_lock);
+
+ if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
+ priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+ else {
+ IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
+ priv->stations[sta_id].tid[tid].tfds_in_queue,
+ freed);
+ priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
+ }
+}
+
+#define IWL_TX_QUEUE_MSK 0xfffff
+
+static bool iwl4965_is_single_rx_stream(struct iwl_priv *priv)
+{
+ return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
+ priv->current_ht_config.single_chain_sufficient;
+}
+
+#define IWL_NUM_RX_CHAINS_MULTIPLE 3
+#define IWL_NUM_RX_CHAINS_SINGLE 2
+#define IWL_NUM_IDLE_CHAINS_DUAL 2
+#define IWL_NUM_IDLE_CHAINS_SINGLE 1
+
+/*
+ * Determine how many receiver/antenna chains to use.
+ *
+ * More provides better reception via diversity. Fewer saves power
+ * at the expense of throughput, but only when not in powersave to
+ * start with.
+ *
+ * MIMO (dual stream) requires at least 2, but works better with 3.
+ * This does not determine *which* chains to use, just how many.
+ */
+static int iwl4965_get_active_rx_chain_count(struct iwl_priv *priv)
+{
+ /* # of Rx chains to use when expecting MIMO. */
+ if (iwl4965_is_single_rx_stream(priv))
+ return IWL_NUM_RX_CHAINS_SINGLE;
+ else
+ return IWL_NUM_RX_CHAINS_MULTIPLE;
+}
+
+/*
+ * When we are in power saving mode, unless device support spatial
+ * multiplexing power save, use the active count for rx chain count.
+ */
+static int
+iwl4965_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
+{
+ /* # Rx chains when idling, depending on SMPS mode */
+ switch (priv->current_ht_config.smps) {
+ case IEEE80211_SMPS_STATIC:
+ case IEEE80211_SMPS_DYNAMIC:
+ return IWL_NUM_IDLE_CHAINS_SINGLE;
+ case IEEE80211_SMPS_OFF:
+ return active_cnt;
+ default:
+ WARN(1, "invalid SMPS mode %d",
+ priv->current_ht_config.smps);
+ return active_cnt;
+ }
+}
+
+/* up to 4 chains */
+static u8 iwl4965_count_chain_bitmap(u32 chain_bitmap)
+{
+ u8 res;
+ res = (chain_bitmap & BIT(0)) >> 0;
+ res += (chain_bitmap & BIT(1)) >> 1;
+ res += (chain_bitmap & BIT(2)) >> 2;
+ res += (chain_bitmap & BIT(3)) >> 3;
+ return res;
+}
+
+/**
+ * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
+ *
+ * Selects how many and which Rx receivers/antennas/chains to use.
+ * This should not be used for scan command ... it puts data in wrong place.
+ */
+void iwl4965_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+ bool is_single = iwl4965_is_single_rx_stream(priv);
+ bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
+ u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
+ u32 active_chains;
+ u16 rx_chain;
+
+ /* Tell uCode which antennas are actually connected.
+ * Before first association, we assume all antennas are connected.
+ * Just after first association, iwl4965_chain_noise_calibration()
+ * checks which antennas actually *are* connected. */
+ if (priv->chain_noise_data.active_chains)
+ active_chains = priv->chain_noise_data.active_chains;
+ else
+ active_chains = priv->hw_params.valid_rx_ant;
+
+ rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
+
+ /* How many receivers should we use? */
+ active_rx_cnt = iwl4965_get_active_rx_chain_count(priv);
+ idle_rx_cnt = iwl4965_get_idle_rx_chain_count(priv, active_rx_cnt);
+
+
+ /* correct rx chain count according hw settings
+ * and chain noise calibration
+ */
+ valid_rx_cnt = iwl4965_count_chain_bitmap(active_chains);
+ if (valid_rx_cnt < active_rx_cnt)
+ active_rx_cnt = valid_rx_cnt;
+
+ if (valid_rx_cnt < idle_rx_cnt)
+ idle_rx_cnt = valid_rx_cnt;
+
+ rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
+ rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
+
+ ctx->staging.rx_chain = cpu_to_le16(rx_chain);
+
+ if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
+ ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
+ else
+ ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
+
+ IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
+ ctx->staging.rx_chain,
+ active_rx_cnt, idle_rx_cnt);
+
+ WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
+ active_rx_cnt < idle_rx_cnt);
+}
+
+u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
+{
+ int i;
+ u8 ind = ant;
+
+ for (i = 0; i < RATE_ANT_NUM - 1; i++) {
+ ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
+ if (valid & BIT(ind))
+ return ind;
+ }
+ return ant;
+}
+
+static const char *iwl4965_get_fh_string(int cmd)
+{
+ switch (cmd) {
+ IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
+ IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
+ IWL_CMD(FH_RSCSR_CHNL0_WPTR);
+ IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
+ IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
+ IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
+ IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
+ IWL_CMD(FH_TSSR_TX_STATUS_REG);
+ IWL_CMD(FH_TSSR_TX_ERROR_REG);
+ default:
+ return "UNKNOWN";
+ }
+}
+
+int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display)
+{
+ int i;
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ int pos = 0;
+ size_t bufsz = 0;
+#endif
+ static const u32 fh_tbl[] = {
+ FH_RSCSR_CHNL0_STTS_WPTR_REG,
+ FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+ FH_RSCSR_CHNL0_WPTR,
+ FH_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH_MEM_RSSR_SHARED_CTRL_REG,
+ FH_MEM_RSSR_RX_STATUS_REG,
+ FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
+ FH_TSSR_TX_STATUS_REG,
+ FH_TSSR_TX_ERROR_REG
+ };
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ if (display) {
+ bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+ *buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!*buf)
+ return -ENOMEM;
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "FH register values:\n");
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ " %34s: 0X%08x\n",
+ iwl4965_get_fh_string(fh_tbl[i]),
+ iwl_legacy_read_direct32(priv, fh_tbl[i]));
+ }
+ return pos;
+ }
+#endif
+ IWL_ERR(priv, "FH register values:\n");
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+ IWL_ERR(priv, " %34s: 0X%08x\n",
+ iwl4965_get_fh_string(fh_tbl[i]),
+ iwl_legacy_read_direct32(priv, fh_tbl[i]));
+ }
+ return 0;
+}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
new file mode 100644
index 000000000000..31ac672b64e1
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
@@ -0,0 +1,2870 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/wireless.h>
+#include <net/mac80211.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+
+#include <linux/workqueue.h>
+
+#include "iwl-dev.h"
+#include "iwl-sta.h"
+#include "iwl-core.h"
+#include "iwl-4965.h"
+
+#define IWL4965_RS_NAME "iwl-4965-rs"
+
+#define NUM_TRY_BEFORE_ANT_TOGGLE 1
+#define IWL_NUMBER_TRY 1
+#define IWL_HT_NUMBER_TRY 3
+
+#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */
+#define IWL_RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */
+#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
+
+/* max allowed rate miss before sync LQ cmd */
+#define IWL_MISSED_RATE_MAX 15
+/* max time to accum history 2 seconds */
+#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ)
+
+static u8 rs_ht_to_legacy[] = {
+ IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
+ IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
+ IWL_RATE_6M_INDEX,
+ IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX,
+ IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX,
+ IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX,
+ IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
+};
+
+static const u8 ant_toggle_lookup[] = {
+ /*ANT_NONE -> */ ANT_NONE,
+ /*ANT_A -> */ ANT_B,
+ /*ANT_B -> */ ANT_C,
+ /*ANT_AB -> */ ANT_BC,
+ /*ANT_C -> */ ANT_A,
+ /*ANT_AC -> */ ANT_AB,
+ /*ANT_BC -> */ ANT_AC,
+ /*ANT_ABC -> */ ANT_ABC,
+};
+
+#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
+ [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
+ IWL_RATE_SISO_##s##M_PLCP, \
+ IWL_RATE_MIMO2_##s##M_PLCP,\
+ IWL_RATE_##r##M_IEEE, \
+ IWL_RATE_##ip##M_INDEX, \
+ IWL_RATE_##in##M_INDEX, \
+ IWL_RATE_##rp##M_INDEX, \
+ IWL_RATE_##rn##M_INDEX, \
+ IWL_RATE_##pp##M_INDEX, \
+ IWL_RATE_##np##M_INDEX }
+
+/*
+ * Parameter order:
+ * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
+ *
+ * If there isn't a valid next or previous rate then INV is used which
+ * maps to IWL_RATE_INVALID
+ *
+ */
+const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT] = {
+ IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
+ IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
+ IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
+ IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
+ IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
+ IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
+ IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
+ IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
+ IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
+ IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
+ IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
+ IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
+ IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
+};
+
+static int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
+{
+ int idx = 0;
+
+ /* HT rate format */
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ idx = (rate_n_flags & 0xff);
+
+ if (idx >= IWL_RATE_MIMO2_6M_PLCP)
+ idx = idx - IWL_RATE_MIMO2_6M_PLCP;
+
+ idx += IWL_FIRST_OFDM_RATE;
+ /* skip 9M not supported in ht*/
+ if (idx >= IWL_RATE_9M_INDEX)
+ idx += 1;
+ if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
+ return idx;
+
+ /* legacy rate format, search for match in table */
+ } else {
+ for (idx = 0; idx < ARRAY_SIZE(iwlegacy_rates); idx++)
+ if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
+ return idx;
+ }
+
+ return -1;
+}
+
+static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
+ struct sk_buff *skb,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta);
+static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
+ struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
+static void iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta,
+ bool force_search);
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
+ u32 *rate_n_flags, int index);
+#else
+static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
+ u32 *rate_n_flags, int index)
+{}
+#endif
+
+/**
+ * The following tables contain the expected throughput metrics for all rates
+ *
+ * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
+ *
+ * where invalid entries are zeros.
+ *
+ * CCK rates are only valid in legacy table and will only be used in G
+ * (2.4 GHz) band.
+ */
+
+static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
+ 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
+};
+
+static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */
+ {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */
+ {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */
+ {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
+};
+
+static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
+ {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
+ {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
+ {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
+};
+
+static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
+ {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
+ {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
+ {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/
+};
+
+static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
+ {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
+ {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
+ {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
+};
+
+/* mbps, mcs */
+static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
+ { "1", "BPSK DSSS"},
+ { "2", "QPSK DSSS"},
+ {"5.5", "BPSK CCK"},
+ { "11", "QPSK CCK"},
+ { "6", "BPSK 1/2"},
+ { "9", "BPSK 1/2"},
+ { "12", "QPSK 1/2"},
+ { "18", "QPSK 3/4"},
+ { "24", "16QAM 1/2"},
+ { "36", "16QAM 3/4"},
+ { "48", "64QAM 2/3"},
+ { "54", "64QAM 3/4"},
+ { "60", "64QAM 5/6"},
+};
+
+#define MCS_INDEX_PER_STREAM (8)
+
+static inline u8 iwl4965_rs_extract_rate(u32 rate_n_flags)
+{
+ return (u8)(rate_n_flags & 0xFF);
+}
+
+static void
+iwl4965_rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
+{
+ window->data = 0;
+ window->success_counter = 0;
+ window->success_ratio = IWL_INVALID_VALUE;
+ window->counter = 0;
+ window->average_tpt = IWL_INVALID_VALUE;
+ window->stamp = 0;
+}
+
+static inline u8 iwl4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
+{
+ return (ant_type & valid_antenna) == ant_type;
+}
+
+/*
+ * removes the old data from the statistics. All data that is older than
+ * TID_MAX_TIME_DIFF, will be deleted.
+ */
+static void
+iwl4965_rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
+{
+ /* The oldest age we want to keep */
+ u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
+
+ while (tl->queue_count &&
+ (tl->time_stamp < oldest_time)) {
+ tl->total -= tl->packet_count[tl->head];
+ tl->packet_count[tl->head] = 0;
+ tl->time_stamp += TID_QUEUE_CELL_SPACING;
+ tl->queue_count--;
+ tl->head++;
+ if (tl->head >= TID_QUEUE_MAX_SIZE)
+ tl->head = 0;
+ }
+}
+
+/*
+ * increment traffic load value for tid and also remove
+ * any old values if passed the certain time period
+ */
+static u8 iwl4965_rs_tl_add_packet(struct iwl_lq_sta *lq_data,
+ struct ieee80211_hdr *hdr)
+{
+ u32 curr_time = jiffies_to_msecs(jiffies);
+ u32 time_diff;
+ s32 index;
+ struct iwl_traffic_load *tl = NULL;
+ u8 tid;
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & 0xf;
+ } else
+ return MAX_TID_COUNT;
+
+ if (unlikely(tid >= TID_MAX_LOAD_COUNT))
+ return MAX_TID_COUNT;
+
+ tl = &lq_data->load[tid];
+
+ curr_time -= curr_time % TID_ROUND_VALUE;
+
+ /* Happens only for the first packet. Initialize the data */
+ if (!(tl->queue_count)) {
+ tl->total = 1;
+ tl->time_stamp = curr_time;
+ tl->queue_count = 1;
+ tl->head = 0;
+ tl->packet_count[0] = 1;
+ return MAX_TID_COUNT;
+ }
+
+ time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
+ index = time_diff / TID_QUEUE_CELL_SPACING;
+
+ /* The history is too long: remove data that is older than */
+ /* TID_MAX_TIME_DIFF */
+ if (index >= TID_QUEUE_MAX_SIZE)
+ iwl4965_rs_tl_rm_old_stats(tl, curr_time);
+
+ index = (tl->head + index) % TID_QUEUE_MAX_SIZE;
+ tl->packet_count[index] = tl->packet_count[index] + 1;
+ tl->total = tl->total + 1;
+
+ if ((index + 1) > tl->queue_count)
+ tl->queue_count = index + 1;
+
+ return tid;
+}
+
+/*
+ get the traffic load value for tid
+*/
+static u32 iwl4965_rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
+{
+ u32 curr_time = jiffies_to_msecs(jiffies);
+ u32 time_diff;
+ s32 index;
+ struct iwl_traffic_load *tl = NULL;
+
+ if (tid >= TID_MAX_LOAD_COUNT)
+ return 0;
+
+ tl = &(lq_data->load[tid]);
+
+ curr_time -= curr_time % TID_ROUND_VALUE;
+
+ if (!(tl->queue_count))
+ return 0;
+
+ time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
+ index = time_diff / TID_QUEUE_CELL_SPACING;
+
+ /* The history is too long: remove data that is older than */
+ /* TID_MAX_TIME_DIFF */
+ if (index >= TID_QUEUE_MAX_SIZE)
+ iwl4965_rs_tl_rm_old_stats(tl, curr_time);
+
+ return tl->total;
+}
+
+static int iwl4965_rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
+ struct iwl_lq_sta *lq_data, u8 tid,
+ struct ieee80211_sta *sta)
+{
+ int ret = -EAGAIN;
+ u32 load;
+
+ load = iwl4965_rs_tl_get_load(lq_data, tid);
+
+ if (load > IWL_AGG_LOAD_THRESHOLD) {
+ IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
+ sta->addr, tid);
+ ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
+ if (ret == -EAGAIN) {
+ /*
+ * driver and mac80211 is out of sync
+ * this might be cause by reloading firmware
+ * stop the tx ba session here
+ */
+ IWL_ERR(priv, "Fail start Tx agg on tid: %d\n",
+ tid);
+ ieee80211_stop_tx_ba_session(sta, tid);
+ }
+ } else {
+ IWL_ERR(priv, "Aggregation not enabled for tid %d "
+ "because load = %u\n", tid, load);
+ }
+ return ret;
+}
+
+static void iwl4965_rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
+ struct iwl_lq_sta *lq_data,
+ struct ieee80211_sta *sta)
+{
+ if (tid < TID_MAX_LOAD_COUNT)
+ iwl4965_rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
+ else
+ IWL_ERR(priv, "tid exceeds max load count: %d/%d\n",
+ tid, TID_MAX_LOAD_COUNT);
+}
+
+static inline int iwl4965_get_iwl4965_num_of_ant_from_rate(u32 rate_n_flags)
+{
+ return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
+ !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
+ !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
+}
+
+/*
+ * Static function to get the expected throughput from an iwl_scale_tbl_info
+ * that wraps a NULL pointer check
+ */
+static s32
+iwl4965_get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
+{
+ if (tbl->expected_tpt)
+ return tbl->expected_tpt[rs_index];
+ return 0;
+}
+
+/**
+ * iwl4965_rs_collect_tx_data - Update the success/failure sliding window
+ *
+ * We keep a sliding window of the last 62 packets transmitted
+ * at this rate. window->data contains the bitmask of successful
+ * packets.
+ */
+static int iwl4965_rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
+ int scale_index, int attempts, int successes)
+{
+ struct iwl_rate_scale_data *window = NULL;
+ static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
+ s32 fail_count, tpt;
+
+ if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
+ return -EINVAL;
+
+ /* Select window for current tx bit rate */
+ window = &(tbl->win[scale_index]);
+
+ /* Get expected throughput */
+ tpt = iwl4965_get_expected_tpt(tbl, scale_index);
+
+ /*
+ * Keep track of only the latest 62 tx frame attempts in this rate's
+ * history window; anything older isn't really relevant any more.
+ * If we have filled up the sliding window, drop the oldest attempt;
+ * if the oldest attempt (highest bit in bitmap) shows "success",
+ * subtract "1" from the success counter (this is the main reason
+ * we keep these bitmaps!).
+ */
+ while (attempts > 0) {
+ if (window->counter >= IWL_RATE_MAX_WINDOW) {
+
+ /* remove earliest */
+ window->counter = IWL_RATE_MAX_WINDOW - 1;
+
+ if (window->data & mask) {
+ window->data &= ~mask;
+ window->success_counter--;
+ }
+ }
+
+ /* Increment frames-attempted counter */
+ window->counter++;
+
+ /* Shift bitmap by one frame to throw away oldest history */
+ window->data <<= 1;
+
+ /* Mark the most recent #successes attempts as successful */
+ if (successes > 0) {
+ window->success_counter++;
+ window->data |= 0x1;
+ successes--;
+ }
+
+ attempts--;
+ }
+
+ /* Calculate current success ratio, avoid divide-by-0! */
+ if (window->counter > 0)
+ window->success_ratio = 128 * (100 * window->success_counter)
+ / window->counter;
+ else
+ window->success_ratio = IWL_INVALID_VALUE;
+
+ fail_count = window->counter - window->success_counter;
+
+ /* Calculate average throughput, if we have enough history. */
+ if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
+ (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
+ window->average_tpt = (window->success_ratio * tpt + 64) / 128;
+ else
+ window->average_tpt = IWL_INVALID_VALUE;
+
+ /* Tag this window as having been updated */
+ window->stamp = jiffies;
+
+ return 0;
+}
+
+/*
+ * Fill uCode API rate_n_flags field, based on "search" or "active" table.
+ */
+static u32 iwl4965_rate_n_flags_from_tbl(struct iwl_priv *priv,
+ struct iwl_scale_tbl_info *tbl,
+ int index, u8 use_green)
+{
+ u32 rate_n_flags = 0;
+
+ if (is_legacy(tbl->lq_type)) {
+ rate_n_flags = iwlegacy_rates[index].plcp;
+ if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
+ rate_n_flags |= RATE_MCS_CCK_MSK;
+
+ } else if (is_Ht(tbl->lq_type)) {
+ if (index > IWL_LAST_OFDM_RATE) {
+ IWL_ERR(priv, "Invalid HT rate index %d\n", index);
+ index = IWL_LAST_OFDM_RATE;
+ }
+ rate_n_flags = RATE_MCS_HT_MSK;
+
+ if (is_siso(tbl->lq_type))
+ rate_n_flags |= iwlegacy_rates[index].plcp_siso;
+ else
+ rate_n_flags |= iwlegacy_rates[index].plcp_mimo2;
+ } else {
+ IWL_ERR(priv, "Invalid tbl->lq_type %d\n", tbl->lq_type);
+ }
+
+ rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
+ RATE_MCS_ANT_ABC_MSK);
+
+ if (is_Ht(tbl->lq_type)) {
+ if (tbl->is_ht40) {
+ if (tbl->is_dup)
+ rate_n_flags |= RATE_MCS_DUP_MSK;
+ else
+ rate_n_flags |= RATE_MCS_HT40_MSK;
+ }
+ if (tbl->is_SGI)
+ rate_n_flags |= RATE_MCS_SGI_MSK;
+
+ if (use_green) {
+ rate_n_flags |= RATE_MCS_GF_MSK;
+ if (is_siso(tbl->lq_type) && tbl->is_SGI) {
+ rate_n_flags &= ~RATE_MCS_SGI_MSK;
+ IWL_ERR(priv, "GF was set with SGI:SISO\n");
+ }
+ }
+ }
+ return rate_n_flags;
+}
+
+/*
+ * Interpret uCode API's rate_n_flags format,
+ * fill "search" or "active" tx mode table.
+ */
+static int iwl4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
+ enum ieee80211_band band,
+ struct iwl_scale_tbl_info *tbl,
+ int *rate_idx)
+{
+ u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
+ u8 iwl4965_num_of_ant = iwl4965_get_iwl4965_num_of_ant_from_rate(rate_n_flags);
+ u8 mcs;
+
+ memset(tbl, 0, sizeof(struct iwl_scale_tbl_info));
+ *rate_idx = iwl4965_hwrate_to_plcp_idx(rate_n_flags);
+
+ if (*rate_idx == IWL_RATE_INVALID) {
+ *rate_idx = -1;
+ return -EINVAL;
+ }
+ tbl->is_SGI = 0; /* default legacy setup */
+ tbl->is_ht40 = 0;
+ tbl->is_dup = 0;
+ tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
+ tbl->lq_type = LQ_NONE;
+ tbl->max_search = IWL_MAX_SEARCH;
+
+ /* legacy rate format */
+ if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
+ if (iwl4965_num_of_ant == 1) {
+ if (band == IEEE80211_BAND_5GHZ)
+ tbl->lq_type = LQ_A;
+ else
+ tbl->lq_type = LQ_G;
+ }
+ /* HT rate format */
+ } else {
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ tbl->is_SGI = 1;
+
+ if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
+ (rate_n_flags & RATE_MCS_DUP_MSK))
+ tbl->is_ht40 = 1;
+
+ if (rate_n_flags & RATE_MCS_DUP_MSK)
+ tbl->is_dup = 1;
+
+ mcs = iwl4965_rs_extract_rate(rate_n_flags);
+
+ /* SISO */
+ if (mcs <= IWL_RATE_SISO_60M_PLCP) {
+ if (iwl4965_num_of_ant == 1)
+ tbl->lq_type = LQ_SISO; /*else NONE*/
+ /* MIMO2 */
+ } else {
+ if (iwl4965_num_of_ant == 2)
+ tbl->lq_type = LQ_MIMO2;
+ }
+ }
+ return 0;
+}
+
+/* switch to another antenna/antennas and return 1 */
+/* if no other valid antenna found, return 0 */
+static int iwl4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
+ struct iwl_scale_tbl_info *tbl)
+{
+ u8 new_ant_type;
+
+ if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
+ return 0;
+
+ if (!iwl4965_rs_is_valid_ant(valid_ant, tbl->ant_type))
+ return 0;
+
+ new_ant_type = ant_toggle_lookup[tbl->ant_type];
+
+ while ((new_ant_type != tbl->ant_type) &&
+ !iwl4965_rs_is_valid_ant(valid_ant, new_ant_type))
+ new_ant_type = ant_toggle_lookup[new_ant_type];
+
+ if (new_ant_type == tbl->ant_type)
+ return 0;
+
+ tbl->ant_type = new_ant_type;
+ *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
+ *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
+ return 1;
+}
+
+/**
+ * Green-field mode is valid if the station supports it and
+ * there are no non-GF stations present in the BSS.
+ */
+static bool iwl4965_rs_use_green(struct ieee80211_sta *sta)
+{
+ struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+
+ return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
+ !(ctx->ht.non_gf_sta_present);
+}
+
+/**
+ * iwl4965_rs_get_supported_rates - get the available rates
+ *
+ * if management frame or broadcast frame only return
+ * basic available rates.
+ *
+ */
+static u16 iwl4965_rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
+ struct ieee80211_hdr *hdr,
+ enum iwl_table_type rate_type)
+{
+ if (is_legacy(rate_type)) {
+ return lq_sta->active_legacy_rate;
+ } else {
+ if (is_siso(rate_type))
+ return lq_sta->active_siso_rate;
+ else
+ return lq_sta->active_mimo2_rate;
+ }
+}
+
+static u16
+iwl4965_rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
+ int rate_type)
+{
+ u8 high = IWL_RATE_INVALID;
+ u8 low = IWL_RATE_INVALID;
+
+ /* 802.11A or ht walks to the next literal adjacent rate in
+ * the rate table */
+ if (is_a_band(rate_type) || !is_legacy(rate_type)) {
+ int i;
+ u32 mask;
+
+ /* Find the previous rate that is in the rate mask */
+ i = index - 1;
+ for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
+ if (rate_mask & mask) {
+ low = i;
+ break;
+ }
+ }
+
+ /* Find the next rate that is in the rate mask */
+ i = index + 1;
+ for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
+ if (rate_mask & mask) {
+ high = i;
+ break;
+ }
+ }
+
+ return (high << 8) | low;
+ }
+
+ low = index;
+ while (low != IWL_RATE_INVALID) {
+ low = iwlegacy_rates[low].prev_rs;
+ if (low == IWL_RATE_INVALID)
+ break;
+ if (rate_mask & (1 << low))
+ break;
+ IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
+ }
+
+ high = index;
+ while (high != IWL_RATE_INVALID) {
+ high = iwlegacy_rates[high].next_rs;
+ if (high == IWL_RATE_INVALID)
+ break;
+ if (rate_mask & (1 << high))
+ break;
+ IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
+ }
+
+ return (high << 8) | low;
+}
+
+static u32 iwl4965_rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl,
+ u8 scale_index, u8 ht_possible)
+{
+ s32 low;
+ u16 rate_mask;
+ u16 high_low;
+ u8 switch_to_legacy = 0;
+ u8 is_green = lq_sta->is_green;
+ struct iwl_priv *priv = lq_sta->drv;
+
+ /* check if we need to switch from HT to legacy rates.
+ * assumption is that mandatory rates (1Mbps or 6Mbps)
+ * are always supported (spec demand) */
+ if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
+ switch_to_legacy = 1;
+ scale_index = rs_ht_to_legacy[scale_index];
+ if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ tbl->lq_type = LQ_A;
+ else
+ tbl->lq_type = LQ_G;
+
+ if (iwl4965_num_of_ant(tbl->ant_type) > 1)
+ tbl->ant_type =
+ iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
+
+ tbl->is_ht40 = 0;
+ tbl->is_SGI = 0;
+ tbl->max_search = IWL_MAX_SEARCH;
+ }
+
+ rate_mask = iwl4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
+
+ /* Mask with station rate restriction */
+ if (is_legacy(tbl->lq_type)) {
+ /* supp_rates has no CCK bits in A mode */
+ if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ rate_mask = (u16)(rate_mask &
+ (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
+ else
+ rate_mask = (u16)(rate_mask & lq_sta->supp_rates);
+ }
+
+ /* If we switched from HT to legacy, check current rate */
+ if (switch_to_legacy && (rate_mask & (1 << scale_index))) {
+ low = scale_index;
+ goto out;
+ }
+
+ high_low = iwl4965_rs_get_adjacent_rate(lq_sta->drv,
+ scale_index, rate_mask,
+ tbl->lq_type);
+ low = high_low & 0xff;
+
+ if (low == IWL_RATE_INVALID)
+ low = scale_index;
+
+out:
+ return iwl4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
+}
+
+/*
+ * Simple function to compare two rate scale table types
+ */
+static bool iwl4965_table_type_matches(struct iwl_scale_tbl_info *a,
+ struct iwl_scale_tbl_info *b)
+{
+ return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
+ (a->is_SGI == b->is_SGI);
+}
+
+/*
+ * mac80211 sends us Tx status
+ */
+static void
+iwl4965_rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *priv_sta,
+ struct sk_buff *skb)
+{
+ int legacy_success;
+ int retries;
+ int rs_index, mac_index, i;
+ struct iwl_lq_sta *lq_sta = priv_sta;
+ struct iwl_link_quality_cmd *table;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct iwl_priv *priv = (struct iwl_priv *)priv_r;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ enum mac80211_rate_control_flags mac_flags;
+ u32 tx_rate;
+ struct iwl_scale_tbl_info tbl_type;
+ struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
+ struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+
+ IWL_DEBUG_RATE_LIMIT(priv,
+ "get frame ack response, update rate scale window\n");
+
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (!lq_sta) {
+ IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n");
+ return;
+ } else if (!lq_sta->drv) {
+ IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
+ return;
+ }
+
+ if (!ieee80211_is_data(hdr->frame_control) ||
+ info->flags & IEEE80211_TX_CTL_NO_ACK)
+ return;
+
+ /* This packet was aggregated but doesn't carry status info */
+ if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
+ !(info->flags & IEEE80211_TX_STAT_AMPDU))
+ return;
+
+ /*
+ * Ignore this Tx frame response if its initial rate doesn't match
+ * that of latest Link Quality command. There may be stragglers
+ * from a previous Link Quality command, but we're no longer interested
+ * in those; they're either from the "active" mode while we're trying
+ * to check "search" mode, or a prior "search" mode after we've moved
+ * to a new "search" mode (which might become the new "active" mode).
+ */
+ table = &lq_sta->lq;
+ tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
+ iwl4965_rs_get_tbl_info_from_mcs(tx_rate,
+ priv->band, &tbl_type, &rs_index);
+ if (priv->band == IEEE80211_BAND_5GHZ)
+ rs_index -= IWL_FIRST_OFDM_RATE;
+ mac_flags = info->status.rates[0].flags;
+ mac_index = info->status.rates[0].idx;
+ /* For HT packets, map MCS to PLCP */
+ if (mac_flags & IEEE80211_TX_RC_MCS) {
+ mac_index &= RATE_MCS_CODE_MSK; /* Remove # of streams */
+ if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
+ mac_index++;
+ /*
+ * mac80211 HT index is always zero-indexed; we need to move
+ * HT OFDM rates after CCK rates in 2.4 GHz band
+ */
+ if (priv->band == IEEE80211_BAND_2GHZ)
+ mac_index += IWL_FIRST_OFDM_RATE;
+ }
+ /* Here we actually compare this rate to the latest LQ command */
+ if ((mac_index < 0) ||
+ (tbl_type.is_SGI !=
+ !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
+ (tbl_type.is_ht40 !=
+ !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
+ (tbl_type.is_dup !=
+ !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) ||
+ (tbl_type.ant_type != info->antenna_sel_tx) ||
+ (!!(tx_rate & RATE_MCS_HT_MSK) !=
+ !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
+ (!!(tx_rate & RATE_MCS_GF_MSK) !=
+ !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
+ (rs_index != mac_index)) {
+ IWL_DEBUG_RATE(priv,
+ "initial rate %d does not match %d (0x%x)\n",
+ mac_index, rs_index, tx_rate);
+ /*
+ * Since rates mis-match, the last LQ command may have failed.
+ * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
+ * ... driver.
+ */
+ lq_sta->missed_rate_counter++;
+ if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
+ lq_sta->missed_rate_counter = 0;
+ iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq,
+ CMD_ASYNC, false);
+ }
+ /* Regardless, ignore this status info for outdated rate */
+ return;
+ } else
+ /* Rate did match, so reset the missed_rate_counter */
+ lq_sta->missed_rate_counter = 0;
+
+ /* Figure out if rate scale algorithm is in active or search table */
+ if (iwl4965_table_type_matches(&tbl_type,
+ &(lq_sta->lq_info[lq_sta->active_tbl]))) {
+ curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+ } else if (iwl4965_table_type_matches(&tbl_type,
+ &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
+ curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+ other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ } else {
+ IWL_DEBUG_RATE(priv,
+ "Neither active nor search matches tx rate\n");
+ tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ IWL_DEBUG_RATE(priv, "active- lq:%x, ant:%x, SGI:%d\n",
+ tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
+ tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+ IWL_DEBUG_RATE(priv, "search- lq:%x, ant:%x, SGI:%d\n",
+ tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
+ IWL_DEBUG_RATE(priv, "actual- lq:%x, ant:%x, SGI:%d\n",
+ tbl_type.lq_type, tbl_type.ant_type, tbl_type.is_SGI);
+ /*
+ * no matching table found, let's by-pass the data collection
+ * and continue to perform rate scale to find the rate table
+ */
+ iwl4965_rs_stay_in_table(lq_sta, true);
+ goto done;
+ }
+
+ /*
+ * Updating the frame history depends on whether packets were
+ * aggregated.
+ *
+ * For aggregation, all packets were transmitted at the same rate, the
+ * first index into rate scale table.
+ */
+ if (info->flags & IEEE80211_TX_STAT_AMPDU) {
+ tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
+ iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
+ &rs_index);
+ iwl4965_rs_collect_tx_data(curr_tbl, rs_index,
+ info->status.ampdu_len,
+ info->status.ampdu_ack_len);
+
+ /* Update success/fail counts if not searching for new mode */
+ if (lq_sta->stay_in_tbl) {
+ lq_sta->total_success += info->status.ampdu_ack_len;
+ lq_sta->total_failed += (info->status.ampdu_len -
+ info->status.ampdu_ack_len);
+ }
+ } else {
+ /*
+ * For legacy, update frame history with for each Tx retry.
+ */
+ retries = info->status.rates[0].count - 1;
+ /* HW doesn't send more than 15 retries */
+ retries = min(retries, 15);
+
+ /* The last transmission may have been successful */
+ legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
+ /* Collect data for each rate used during failed TX attempts */
+ for (i = 0; i <= retries; ++i) {
+ tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
+ iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band,
+ &tbl_type, &rs_index);
+ /*
+ * Only collect stats if retried rate is in the same RS
+ * table as active/search.
+ */
+ if (iwl4965_table_type_matches(&tbl_type, curr_tbl))
+ tmp_tbl = curr_tbl;
+ else if (iwl4965_table_type_matches(&tbl_type,
+ other_tbl))
+ tmp_tbl = other_tbl;
+ else
+ continue;
+ iwl4965_rs_collect_tx_data(tmp_tbl, rs_index, 1,
+ i < retries ? 0 : legacy_success);
+ }
+
+ /* Update success/fail counts if not searching for new mode */
+ if (lq_sta->stay_in_tbl) {
+ lq_sta->total_success += legacy_success;
+ lq_sta->total_failed += retries + (1 - legacy_success);
+ }
+ }
+ /* The last TX rate is cached in lq_sta; it's set in if/else above */
+ lq_sta->last_rate_n_flags = tx_rate;
+done:
+ /* See if there's a better rate or modulation mode to try. */
+ if (sta && sta->supp_rates[sband->band])
+ iwl4965_rs_rate_scale_perform(priv, skb, sta, lq_sta);
+}
+
+/*
+ * Begin a period of staying with a selected modulation mode.
+ * Set "stay_in_tbl" flag to prevent any mode switches.
+ * Set frame tx success limits according to legacy vs. high-throughput,
+ * and reset overall (spanning all rates) tx success history statistics.
+ * These control how long we stay using same modulation mode before
+ * searching for a new mode.
+ */
+static void iwl4965_rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
+ struct iwl_lq_sta *lq_sta)
+{
+ IWL_DEBUG_RATE(priv, "we are staying in the same table\n");
+ lq_sta->stay_in_tbl = 1; /* only place this gets set */
+ if (is_legacy) {
+ lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
+ lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT;
+ lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT;
+ } else {
+ lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT;
+ lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT;
+ lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT;
+ }
+ lq_sta->table_count = 0;
+ lq_sta->total_failed = 0;
+ lq_sta->total_success = 0;
+ lq_sta->flush_timer = jiffies;
+ lq_sta->action_counter = 0;
+}
+
+/*
+ * Find correct throughput table for given mode of modulation
+ */
+static void iwl4965_rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl)
+{
+ /* Used to choose among HT tables */
+ s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
+
+ /* Check for invalid LQ type */
+ if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
+ tbl->expected_tpt = expected_tpt_legacy;
+ return;
+ }
+
+ /* Legacy rates have only one table */
+ if (is_legacy(tbl->lq_type)) {
+ tbl->expected_tpt = expected_tpt_legacy;
+ return;
+ }
+
+ /* Choose among many HT tables depending on number of streams
+ * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
+ * status */
+ if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
+ ht_tbl_pointer = expected_tpt_siso20MHz;
+ else if (is_siso(tbl->lq_type))
+ ht_tbl_pointer = expected_tpt_siso40MHz;
+ else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
+ ht_tbl_pointer = expected_tpt_mimo2_20MHz;
+ else /* if (is_mimo2(tbl->lq_type)) <-- must be true */
+ ht_tbl_pointer = expected_tpt_mimo2_40MHz;
+
+ if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
+ tbl->expected_tpt = ht_tbl_pointer[0];
+ else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */
+ tbl->expected_tpt = ht_tbl_pointer[1];
+ else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */
+ tbl->expected_tpt = ht_tbl_pointer[2];
+ else /* AGG+SGI */
+ tbl->expected_tpt = ht_tbl_pointer[3];
+}
+
+/*
+ * Find starting rate for new "search" high-throughput mode of modulation.
+ * Goal is to find lowest expected rate (under perfect conditions) that is
+ * above the current measured throughput of "active" mode, to give new mode
+ * a fair chance to prove itself without too many challenges.
+ *
+ * This gets called when transitioning to more aggressive modulation
+ * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
+ * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need
+ * to decrease to match "active" throughput. When moving from MIMO to SISO,
+ * bit rate will typically need to increase, but not if performance was bad.
+ */
+static s32 iwl4965_rs_get_best_rate(struct iwl_priv *priv,
+ struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl, /* "search" */
+ u16 rate_mask, s8 index)
+{
+ /* "active" values */
+ struct iwl_scale_tbl_info *active_tbl =
+ &(lq_sta->lq_info[lq_sta->active_tbl]);
+ s32 active_sr = active_tbl->win[index].success_ratio;
+ s32 active_tpt = active_tbl->expected_tpt[index];
+
+ /* expected "search" throughput */
+ s32 *tpt_tbl = tbl->expected_tpt;
+
+ s32 new_rate, high, low, start_hi;
+ u16 high_low;
+ s8 rate = index;
+
+ new_rate = high = low = start_hi = IWL_RATE_INVALID;
+
+ for (; ;) {
+ high_low = iwl4965_rs_get_adjacent_rate(priv, rate, rate_mask,
+ tbl->lq_type);
+
+ low = high_low & 0xff;
+ high = (high_low >> 8) & 0xff;
+
+ /*
+ * Lower the "search" bit rate, to give new "search" mode
+ * approximately the same throughput as "active" if:
+ *
+ * 1) "Active" mode has been working modestly well (but not
+ * great), and expected "search" throughput (under perfect
+ * conditions) at candidate rate is above the actual
+ * measured "active" throughput (but less than expected
+ * "active" throughput under perfect conditions).
+ * OR
+ * 2) "Active" mode has been working perfectly or very well
+ * and expected "search" throughput (under perfect
+ * conditions) at candidate rate is above expected
+ * "active" throughput (under perfect conditions).
+ */
+ if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
+ ((active_sr > IWL_RATE_DECREASE_TH) &&
+ (active_sr <= IWL_RATE_HIGH_TH) &&
+ (tpt_tbl[rate] <= active_tpt))) ||
+ ((active_sr >= IWL_RATE_SCALE_SWITCH) &&
+ (tpt_tbl[rate] > active_tpt))) {
+
+ /* (2nd or later pass)
+ * If we've already tried to raise the rate, and are
+ * now trying to lower it, use the higher rate. */
+ if (start_hi != IWL_RATE_INVALID) {
+ new_rate = start_hi;
+ break;
+ }
+
+ new_rate = rate;
+
+ /* Loop again with lower rate */
+ if (low != IWL_RATE_INVALID)
+ rate = low;
+
+ /* Lower rate not available, use the original */
+ else
+ break;
+
+ /* Else try to raise the "search" rate to match "active" */
+ } else {
+ /* (2nd or later pass)
+ * If we've already tried to lower the rate, and are
+ * now trying to raise it, use the lower rate. */
+ if (new_rate != IWL_RATE_INVALID)
+ break;
+
+ /* Loop again with higher rate */
+ else if (high != IWL_RATE_INVALID) {
+ start_hi = high;
+ rate = high;
+
+ /* Higher rate not available, use the original */
+ } else {
+ new_rate = rate;
+ break;
+ }
+ }
+ }
+
+ return new_rate;
+}
+
+/*
+ * Set up search table for MIMO2
+ */
+static int iwl4965_rs_switch_to_mimo2(struct iwl_priv *priv,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta,
+ struct iwl_scale_tbl_info *tbl, int index)
+{
+ u16 rate_mask;
+ s32 rate;
+ s8 is_green = lq_sta->is_green;
+ struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+
+ if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+ return -1;
+
+ if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
+ == WLAN_HT_CAP_SM_PS_STATIC)
+ return -1;
+
+ /* Need both Tx chains/antennas to support MIMO */
+ if (priv->hw_params.tx_chains_num < 2)
+ return -1;
+
+ IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n");
+
+ tbl->lq_type = LQ_MIMO2;
+ tbl->is_dup = lq_sta->is_dup;
+ tbl->action = 0;
+ tbl->max_search = IWL_MAX_SEARCH;
+ rate_mask = lq_sta->active_mimo2_rate;
+
+ if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
+ tbl->is_ht40 = 1;
+ else
+ tbl->is_ht40 = 0;
+
+ iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
+
+ rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
+
+ IWL_DEBUG_RATE(priv, "LQ: MIMO2 best rate %d mask %X\n",
+ rate, rate_mask);
+ if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
+ IWL_DEBUG_RATE(priv,
+ "Can't switch with index %d rate mask %x\n",
+ rate, rate_mask);
+ return -1;
+ }
+ tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
+ tbl, rate, is_green);
+
+ IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
+ tbl->current_rate, is_green);
+ return 0;
+}
+
+/*
+ * Set up search table for SISO
+ */
+static int iwl4965_rs_switch_to_siso(struct iwl_priv *priv,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta,
+ struct iwl_scale_tbl_info *tbl, int index)
+{
+ u16 rate_mask;
+ u8 is_green = lq_sta->is_green;
+ s32 rate;
+ struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+
+ if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+ return -1;
+
+ IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n");
+
+ tbl->is_dup = lq_sta->is_dup;
+ tbl->lq_type = LQ_SISO;
+ tbl->action = 0;
+ tbl->max_search = IWL_MAX_SEARCH;
+ rate_mask = lq_sta->active_siso_rate;
+
+ if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
+ tbl->is_ht40 = 1;
+ else
+ tbl->is_ht40 = 0;
+
+ if (is_green)
+ tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
+
+ iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
+ rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
+
+ IWL_DEBUG_RATE(priv, "LQ: get best rate %d mask %X\n", rate, rate_mask);
+ if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
+ IWL_DEBUG_RATE(priv,
+ "can not switch with index %d rate mask %x\n",
+ rate, rate_mask);
+ return -1;
+ }
+ tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
+ tbl, rate, is_green);
+ IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
+ tbl->current_rate, is_green);
+ return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from legacy
+ */
+static int iwl4965_rs_move_legacy_other(struct iwl_priv *priv,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta,
+ int index)
+{
+ struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct iwl_scale_tbl_info *search_tbl =
+ &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ struct iwl_rate_scale_data *window = &(tbl->win[index]);
+ u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+ (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
+ u8 start_action;
+ u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
+ u8 tx_chains_num = priv->hw_params.tx_chains_num;
+ int ret = 0;
+ u8 update_search_tbl_counter = 0;
+
+ tbl->action = IWL_LEGACY_SWITCH_SISO;
+
+ start_action = tbl->action;
+ for (; ;) {
+ lq_sta->action_counter++;
+ switch (tbl->action) {
+ case IWL_LEGACY_SWITCH_ANTENNA1:
+ case IWL_LEGACY_SWITCH_ANTENNA2:
+ IWL_DEBUG_RATE(priv, "LQ: Legacy toggle Antenna\n");
+
+ if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
+ tx_chains_num <= 1) ||
+ (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
+ tx_chains_num <= 2))
+ break;
+
+ /* Don't change antenna if success has been great */
+ if (window->success_ratio >= IWL_RS_GOOD_RATIO)
+ break;
+
+ /* Set up search table to try other antenna */
+ memcpy(search_tbl, tbl, sz);
+
+ if (iwl4965_rs_toggle_antenna(valid_tx_ant,
+ &search_tbl->current_rate, search_tbl)) {
+ update_search_tbl_counter = 1;
+ iwl4965_rs_set_expected_tpt_table(lq_sta,
+ search_tbl);
+ goto out;
+ }
+ break;
+ case IWL_LEGACY_SWITCH_SISO:
+ IWL_DEBUG_RATE(priv, "LQ: Legacy switch to SISO\n");
+
+ /* Set up search table to try SISO */
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+ ret = iwl4965_rs_switch_to_siso(priv, lq_sta, conf, sta,
+ search_tbl, index);
+ if (!ret) {
+ lq_sta->action_counter = 0;
+ goto out;
+ }
+
+ break;
+ case IWL_LEGACY_SWITCH_MIMO2_AB:
+ case IWL_LEGACY_SWITCH_MIMO2_AC:
+ case IWL_LEGACY_SWITCH_MIMO2_BC:
+ IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO2\n");
+
+ /* Set up search table to try MIMO */
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+
+ if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB)
+ search_tbl->ant_type = ANT_AB;
+ else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
+ search_tbl->ant_type = ANT_AC;
+ else
+ search_tbl->ant_type = ANT_BC;
+
+ if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
+ search_tbl->ant_type))
+ break;
+
+ ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
+ conf, sta,
+ search_tbl, index);
+ if (!ret) {
+ lq_sta->action_counter = 0;
+ goto out;
+ }
+ break;
+ }
+ tbl->action++;
+ if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
+ tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+
+ if (tbl->action == start_action)
+ break;
+
+ }
+ search_tbl->lq_type = LQ_NONE;
+ return 0;
+
+out:
+ lq_sta->search_better_tbl = 1;
+ tbl->action++;
+ if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
+ tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+ if (update_search_tbl_counter)
+ search_tbl->action = tbl->action;
+ return 0;
+
+}
+
+/*
+ * Try to switch to new modulation mode from SISO
+ */
+static int iwl4965_rs_move_siso_to_other(struct iwl_priv *priv,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta, int index)
+{
+ u8 is_green = lq_sta->is_green;
+ struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct iwl_scale_tbl_info *search_tbl =
+ &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ struct iwl_rate_scale_data *window = &(tbl->win[index]);
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+ (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
+ u8 start_action;
+ u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
+ u8 tx_chains_num = priv->hw_params.tx_chains_num;
+ u8 update_search_tbl_counter = 0;
+ int ret;
+
+ start_action = tbl->action;
+
+ for (;;) {
+ lq_sta->action_counter++;
+ switch (tbl->action) {
+ case IWL_SISO_SWITCH_ANTENNA1:
+ case IWL_SISO_SWITCH_ANTENNA2:
+ IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n");
+ if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
+ tx_chains_num <= 1) ||
+ (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
+ tx_chains_num <= 2))
+ break;
+
+ if (window->success_ratio >= IWL_RS_GOOD_RATIO)
+ break;
+
+ memcpy(search_tbl, tbl, sz);
+ if (iwl4965_rs_toggle_antenna(valid_tx_ant,
+ &search_tbl->current_rate, search_tbl)) {
+ update_search_tbl_counter = 1;
+ goto out;
+ }
+ break;
+ case IWL_SISO_SWITCH_MIMO2_AB:
+ case IWL_SISO_SWITCH_MIMO2_AC:
+ case IWL_SISO_SWITCH_MIMO2_BC:
+ IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO2\n");
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+
+ if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB)
+ search_tbl->ant_type = ANT_AB;
+ else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
+ search_tbl->ant_type = ANT_AC;
+ else
+ search_tbl->ant_type = ANT_BC;
+
+ if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
+ search_tbl->ant_type))
+ break;
+
+ ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
+ conf, sta,
+ search_tbl, index);
+ if (!ret)
+ goto out;
+ break;
+ case IWL_SISO_SWITCH_GI:
+ if (!tbl->is_ht40 && !(ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_20))
+ break;
+ if (tbl->is_ht40 && !(ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_40))
+ break;
+
+ IWL_DEBUG_RATE(priv, "LQ: SISO toggle SGI/NGI\n");
+
+ memcpy(search_tbl, tbl, sz);
+ if (is_green) {
+ if (!tbl->is_SGI)
+ break;
+ else
+ IWL_ERR(priv,
+ "SGI was set in GF+SISO\n");
+ }
+ search_tbl->is_SGI = !tbl->is_SGI;
+ iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
+ if (tbl->is_SGI) {
+ s32 tpt = lq_sta->last_tpt / 100;
+ if (tpt >= search_tbl->expected_tpt[index])
+ break;
+ }
+ search_tbl->current_rate =
+ iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
+ index, is_green);
+ update_search_tbl_counter = 1;
+ goto out;
+ }
+ tbl->action++;
+ if (tbl->action > IWL_SISO_SWITCH_GI)
+ tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+
+ if (tbl->action == start_action)
+ break;
+ }
+ search_tbl->lq_type = LQ_NONE;
+ return 0;
+
+ out:
+ lq_sta->search_better_tbl = 1;
+ tbl->action++;
+ if (tbl->action > IWL_SISO_SWITCH_GI)
+ tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+ if (update_search_tbl_counter)
+ search_tbl->action = tbl->action;
+
+ return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from MIMO2
+ */
+static int iwl4965_rs_move_mimo2_to_other(struct iwl_priv *priv,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta, int index)
+{
+ s8 is_green = lq_sta->is_green;
+ struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct iwl_scale_tbl_info *search_tbl =
+ &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ struct iwl_rate_scale_data *window = &(tbl->win[index]);
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+ (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
+ u8 start_action;
+ u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
+ u8 tx_chains_num = priv->hw_params.tx_chains_num;
+ u8 update_search_tbl_counter = 0;
+ int ret;
+
+ start_action = tbl->action;
+ for (;;) {
+ lq_sta->action_counter++;
+ switch (tbl->action) {
+ case IWL_MIMO2_SWITCH_ANTENNA1:
+ case IWL_MIMO2_SWITCH_ANTENNA2:
+ IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle Antennas\n");
+
+ if (tx_chains_num <= 2)
+ break;
+
+ if (window->success_ratio >= IWL_RS_GOOD_RATIO)
+ break;
+
+ memcpy(search_tbl, tbl, sz);
+ if (iwl4965_rs_toggle_antenna(valid_tx_ant,
+ &search_tbl->current_rate, search_tbl)) {
+ update_search_tbl_counter = 1;
+ goto out;
+ }
+ break;
+ case IWL_MIMO2_SWITCH_SISO_A:
+ case IWL_MIMO2_SWITCH_SISO_B:
+ case IWL_MIMO2_SWITCH_SISO_C:
+ IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to SISO\n");
+
+ /* Set up new search table for SISO */
+ memcpy(search_tbl, tbl, sz);
+
+ if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
+ search_tbl->ant_type = ANT_A;
+ else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
+ search_tbl->ant_type = ANT_B;
+ else
+ search_tbl->ant_type = ANT_C;
+
+ if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
+ search_tbl->ant_type))
+ break;
+
+ ret = iwl4965_rs_switch_to_siso(priv, lq_sta,
+ conf, sta,
+ search_tbl, index);
+ if (!ret)
+ goto out;
+
+ break;
+
+ case IWL_MIMO2_SWITCH_GI:
+ if (!tbl->is_ht40 && !(ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_20))
+ break;
+ if (tbl->is_ht40 && !(ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_40))
+ break;
+
+ IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle SGI/NGI\n");
+
+ /* Set up new search table for MIMO2 */
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = !tbl->is_SGI;
+ iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
+ /*
+ * If active table already uses the fastest possible
+ * modulation (dual stream with short guard interval),
+ * and it's working well, there's no need to look
+ * for a better type of modulation!
+ */
+ if (tbl->is_SGI) {
+ s32 tpt = lq_sta->last_tpt / 100;
+ if (tpt >= search_tbl->expected_tpt[index])
+ break;
+ }
+ search_tbl->current_rate =
+ iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
+ index, is_green);
+ update_search_tbl_counter = 1;
+ goto out;
+
+ }
+ tbl->action++;
+ if (tbl->action > IWL_MIMO2_SWITCH_GI)
+ tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
+
+ if (tbl->action == start_action)
+ break;
+ }
+ search_tbl->lq_type = LQ_NONE;
+ return 0;
+ out:
+ lq_sta->search_better_tbl = 1;
+ tbl->action++;
+ if (tbl->action > IWL_MIMO2_SWITCH_GI)
+ tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
+ if (update_search_tbl_counter)
+ search_tbl->action = tbl->action;
+
+ return 0;
+
+}
+
+/*
+ * Check whether we should continue using same modulation mode, or
+ * begin search for a new mode, based on:
+ * 1) # tx successes or failures while using this mode
+ * 2) # times calling this function
+ * 3) elapsed time in this mode (not used, for now)
+ */
+static void
+iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
+{
+ struct iwl_scale_tbl_info *tbl;
+ int i;
+ int active_tbl;
+ int flush_interval_passed = 0;
+ struct iwl_priv *priv;
+
+ priv = lq_sta->drv;
+ active_tbl = lq_sta->active_tbl;
+
+ tbl = &(lq_sta->lq_info[active_tbl]);
+
+ /* If we've been disallowing search, see if we should now allow it */
+ if (lq_sta->stay_in_tbl) {
+
+ /* Elapsed time using current modulation mode */
+ if (lq_sta->flush_timer)
+ flush_interval_passed =
+ time_after(jiffies,
+ (unsigned long)(lq_sta->flush_timer +
+ IWL_RATE_SCALE_FLUSH_INTVL));
+
+ /*
+ * Check if we should allow search for new modulation mode.
+ * If many frames have failed or succeeded, or we've used
+ * this same modulation for a long time, allow search, and
+ * reset history stats that keep track of whether we should
+ * allow a new search. Also (below) reset all bitmaps and
+ * stats in active history.
+ */
+ if (force_search ||
+ (lq_sta->total_failed > lq_sta->max_failure_limit) ||
+ (lq_sta->total_success > lq_sta->max_success_limit) ||
+ ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
+ && (flush_interval_passed))) {
+ IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n:",
+ lq_sta->total_failed,
+ lq_sta->total_success,
+ flush_interval_passed);
+
+ /* Allow search for new mode */
+ lq_sta->stay_in_tbl = 0; /* only place reset */
+ lq_sta->total_failed = 0;
+ lq_sta->total_success = 0;
+ lq_sta->flush_timer = 0;
+
+ /*
+ * Else if we've used this modulation mode enough repetitions
+ * (regardless of elapsed time or success/failure), reset
+ * history bitmaps and rate-specific stats for all rates in
+ * active table.
+ */
+ } else {
+ lq_sta->table_count++;
+ if (lq_sta->table_count >=
+ lq_sta->table_count_limit) {
+ lq_sta->table_count = 0;
+
+ IWL_DEBUG_RATE(priv,
+ "LQ: stay in table clear win\n");
+ for (i = 0; i < IWL_RATE_COUNT; i++)
+ iwl4965_rs_rate_scale_clear_window(
+ &(tbl->win[i]));
+ }
+ }
+
+ /* If transitioning to allow "search", reset all history
+ * bitmaps and stats in active table (this will become the new
+ * "search" table). */
+ if (!lq_sta->stay_in_tbl) {
+ for (i = 0; i < IWL_RATE_COUNT; i++)
+ iwl4965_rs_rate_scale_clear_window(
+ &(tbl->win[i]));
+ }
+ }
+}
+
+/*
+ * setup rate table in uCode
+ * return rate_n_flags as used in the table
+ */
+static u32 iwl4965_rs_update_rate_tbl(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl,
+ int index, u8 is_green)
+{
+ u32 rate;
+
+ /* Update uCode's rate table. */
+ rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, index, is_green);
+ iwl4965_rs_fill_link_cmd(priv, lq_sta, rate);
+ iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
+
+ return rate;
+}
+
+/*
+ * Do rate scaling and search for new modulation mode.
+ */
+static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
+ struct sk_buff *skb,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta)
+{
+ struct ieee80211_hw *hw = priv->hw;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ int low = IWL_RATE_INVALID;
+ int high = IWL_RATE_INVALID;
+ int index;
+ int i;
+ struct iwl_rate_scale_data *window = NULL;
+ int current_tpt = IWL_INVALID_VALUE;
+ int low_tpt = IWL_INVALID_VALUE;
+ int high_tpt = IWL_INVALID_VALUE;
+ u32 fail_count;
+ s8 scale_action = 0;
+ u16 rate_mask;
+ u8 update_lq = 0;
+ struct iwl_scale_tbl_info *tbl, *tbl1;
+ u16 rate_scale_index_msk = 0;
+ u32 rate;
+ u8 is_green = 0;
+ u8 active_tbl = 0;
+ u8 done_search = 0;
+ u16 high_low;
+ s32 sr;
+ u8 tid = MAX_TID_COUNT;
+ struct iwl_tid_data *tid_data;
+ struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+
+ IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n");
+
+ /* Send management frames and NO_ACK data using lowest rate. */
+ /* TODO: this could probably be improved.. */
+ if (!ieee80211_is_data(hdr->frame_control) ||
+ info->flags & IEEE80211_TX_CTL_NO_ACK)
+ return;
+
+ if (!sta || !lq_sta)
+ return;
+
+ lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
+
+ tid = iwl4965_rs_tl_add_packet(lq_sta, hdr);
+ if ((tid != MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) {
+ tid_data = &priv->stations[lq_sta->lq.sta_id].tid[tid];
+ if (tid_data->agg.state == IWL_AGG_OFF)
+ lq_sta->is_agg = 0;
+ else
+ lq_sta->is_agg = 1;
+ } else
+ lq_sta->is_agg = 0;
+
+ /*
+ * Select rate-scale / modulation-mode table to work with in
+ * the rest of this function: "search" if searching for better
+ * modulation mode, or "active" if doing rate scaling within a mode.
+ */
+ if (!lq_sta->search_better_tbl)
+ active_tbl = lq_sta->active_tbl;
+ else
+ active_tbl = 1 - lq_sta->active_tbl;
+
+ tbl = &(lq_sta->lq_info[active_tbl]);
+ if (is_legacy(tbl->lq_type))
+ lq_sta->is_green = 0;
+ else
+ lq_sta->is_green = iwl4965_rs_use_green(sta);
+ is_green = lq_sta->is_green;
+
+ /* current tx rate */
+ index = lq_sta->last_txrate_idx;
+
+ IWL_DEBUG_RATE(priv, "Rate scale index %d for type %d\n", index,
+ tbl->lq_type);
+
+ /* rates available for this association, and for modulation mode */
+ rate_mask = iwl4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
+
+ IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask);
+
+ /* mask with station rate restriction */
+ if (is_legacy(tbl->lq_type)) {
+ if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ /* supp_rates has no CCK bits in A mode */
+ rate_scale_index_msk = (u16) (rate_mask &
+ (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
+ else
+ rate_scale_index_msk = (u16) (rate_mask &
+ lq_sta->supp_rates);
+
+ } else
+ rate_scale_index_msk = rate_mask;
+
+ if (!rate_scale_index_msk)
+ rate_scale_index_msk = rate_mask;
+
+ if (!((1 << index) & rate_scale_index_msk)) {
+ IWL_ERR(priv, "Current Rate is not valid\n");
+ if (lq_sta->search_better_tbl) {
+ /* revert to active table if search table is not valid*/
+ tbl->lq_type = LQ_NONE;
+ lq_sta->search_better_tbl = 0;
+ tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ /* get "active" rate info */
+ index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
+ rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
+ tbl, index, is_green);
+ }
+ return;
+ }
+
+ /* Get expected throughput table and history window for current rate */
+ if (!tbl->expected_tpt) {
+ IWL_ERR(priv, "tbl->expected_tpt is NULL\n");
+ return;
+ }
+
+ /* force user max rate if set by user */
+ if ((lq_sta->max_rate_idx != -1) &&
+ (lq_sta->max_rate_idx < index)) {
+ index = lq_sta->max_rate_idx;
+ update_lq = 1;
+ window = &(tbl->win[index]);
+ goto lq_update;
+ }
+
+ window = &(tbl->win[index]);
+
+ /*
+ * If there is not enough history to calculate actual average
+ * throughput, keep analyzing results of more tx frames, without
+ * changing rate or mode (bypass most of the rest of this function).
+ * Set up new rate table in uCode only if old rate is not supported
+ * in current association (use new rate found above).
+ */
+ fail_count = window->counter - window->success_counter;
+ if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
+ (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
+ IWL_DEBUG_RATE(priv, "LQ: still below TH. succ=%d total=%d "
+ "for index %d\n",
+ window->success_counter, window->counter, index);
+
+ /* Can't calculate this yet; not enough history */
+ window->average_tpt = IWL_INVALID_VALUE;
+
+ /* Should we stay with this modulation mode,
+ * or search for a new one? */
+ iwl4965_rs_stay_in_table(lq_sta, false);
+
+ goto out;
+ }
+ /* Else we have enough samples; calculate estimate of
+ * actual average throughput */
+ if (window->average_tpt != ((window->success_ratio *
+ tbl->expected_tpt[index] + 64) / 128)) {
+ IWL_ERR(priv,
+ "expected_tpt should have been calculated by now\n");
+ window->average_tpt = ((window->success_ratio *
+ tbl->expected_tpt[index] + 64) / 128);
+ }
+
+ /* If we are searching for better modulation mode, check success. */
+ if (lq_sta->search_better_tbl) {
+ /* If good success, continue using the "search" mode;
+ * no need to send new link quality command, since we're
+ * continuing to use the setup that we've been trying. */
+ if (window->average_tpt > lq_sta->last_tpt) {
+
+ IWL_DEBUG_RATE(priv, "LQ: SWITCHING TO NEW TABLE "
+ "suc=%d cur-tpt=%d old-tpt=%d\n",
+ window->success_ratio,
+ window->average_tpt,
+ lq_sta->last_tpt);
+
+ if (!is_legacy(tbl->lq_type))
+ lq_sta->enable_counter = 1;
+
+ /* Swap tables; "search" becomes "active" */
+ lq_sta->active_tbl = active_tbl;
+ current_tpt = window->average_tpt;
+
+ /* Else poor success; go back to mode in "active" table */
+ } else {
+
+ IWL_DEBUG_RATE(priv, "LQ: GOING BACK TO THE OLD TABLE "
+ "suc=%d cur-tpt=%d old-tpt=%d\n",
+ window->success_ratio,
+ window->average_tpt,
+ lq_sta->last_tpt);
+
+ /* Nullify "search" table */
+ tbl->lq_type = LQ_NONE;
+
+ /* Revert to "active" table */
+ active_tbl = lq_sta->active_tbl;
+ tbl = &(lq_sta->lq_info[active_tbl]);
+
+ /* Revert to "active" rate and throughput info */
+ index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
+ current_tpt = lq_sta->last_tpt;
+
+ /* Need to set up a new rate table in uCode */
+ update_lq = 1;
+ }
+
+ /* Either way, we've made a decision; modulation mode
+ * search is done, allow rate adjustment next time. */
+ lq_sta->search_better_tbl = 0;
+ done_search = 1; /* Don't switch modes below! */
+ goto lq_update;
+ }
+
+ /* (Else) not in search of better modulation mode, try for better
+ * starting rate, while staying in this mode. */
+ high_low = iwl4965_rs_get_adjacent_rate(priv, index,
+ rate_scale_index_msk,
+ tbl->lq_type);
+ low = high_low & 0xff;
+ high = (high_low >> 8) & 0xff;
+
+ /* If user set max rate, dont allow higher than user constrain */
+ if ((lq_sta->max_rate_idx != -1) &&
+ (lq_sta->max_rate_idx < high))
+ high = IWL_RATE_INVALID;
+
+ sr = window->success_ratio;
+
+ /* Collect measured throughputs for current and adjacent rates */
+ current_tpt = window->average_tpt;
+ if (low != IWL_RATE_INVALID)
+ low_tpt = tbl->win[low].average_tpt;
+ if (high != IWL_RATE_INVALID)
+ high_tpt = tbl->win[high].average_tpt;
+
+ scale_action = 0;
+
+ /* Too many failures, decrease rate */
+ if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
+ IWL_DEBUG_RATE(priv,
+ "decrease rate because of low success_ratio\n");
+ scale_action = -1;
+
+ /* No throughput measured yet for adjacent rates; try increase. */
+ } else if ((low_tpt == IWL_INVALID_VALUE) &&
+ (high_tpt == IWL_INVALID_VALUE)) {
+
+ if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH)
+ scale_action = 1;
+ else if (low != IWL_RATE_INVALID)
+ scale_action = 0;
+ }
+
+ /* Both adjacent throughputs are measured, but neither one has better
+ * throughput; we're using the best rate, don't change it! */
+ else if ((low_tpt != IWL_INVALID_VALUE) &&
+ (high_tpt != IWL_INVALID_VALUE) &&
+ (low_tpt < current_tpt) &&
+ (high_tpt < current_tpt))
+ scale_action = 0;
+
+ /* At least one adjacent rate's throughput is measured,
+ * and may have better performance. */
+ else {
+ /* Higher adjacent rate's throughput is measured */
+ if (high_tpt != IWL_INVALID_VALUE) {
+ /* Higher rate has better throughput */
+ if (high_tpt > current_tpt &&
+ sr >= IWL_RATE_INCREASE_TH) {
+ scale_action = 1;
+ } else {
+ scale_action = 0;
+ }
+
+ /* Lower adjacent rate's throughput is measured */
+ } else if (low_tpt != IWL_INVALID_VALUE) {
+ /* Lower rate has better throughput */
+ if (low_tpt > current_tpt) {
+ IWL_DEBUG_RATE(priv,
+ "decrease rate because of low tpt\n");
+ scale_action = -1;
+ } else if (sr >= IWL_RATE_INCREASE_TH) {
+ scale_action = 1;
+ }
+ }
+ }
+
+ /* Sanity check; asked for decrease, but success rate or throughput
+ * has been good at old rate. Don't change it. */
+ if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
+ ((sr > IWL_RATE_HIGH_TH) ||
+ (current_tpt > (100 * tbl->expected_tpt[low]))))
+ scale_action = 0;
+
+ switch (scale_action) {
+ case -1:
+ /* Decrease starting rate, update uCode's rate table */
+ if (low != IWL_RATE_INVALID) {
+ update_lq = 1;
+ index = low;
+ }
+
+ break;
+ case 1:
+ /* Increase starting rate, update uCode's rate table */
+ if (high != IWL_RATE_INVALID) {
+ update_lq = 1;
+ index = high;
+ }
+
+ break;
+ case 0:
+ /* No change */
+ default:
+ break;
+ }
+
+ IWL_DEBUG_RATE(priv, "choose rate scale index %d action %d low %d "
+ "high %d type %d\n",
+ index, scale_action, low, high, tbl->lq_type);
+
+lq_update:
+ /* Replace uCode's rate table for the destination station. */
+ if (update_lq)
+ rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
+ tbl, index, is_green);
+
+ /* Should we stay with this modulation mode,
+ * or search for a new one? */
+ iwl4965_rs_stay_in_table(lq_sta, false);
+
+ /*
+ * Search for new modulation mode if we're:
+ * 1) Not changing rates right now
+ * 2) Not just finishing up a search
+ * 3) Allowing a new search
+ */
+ if (!update_lq && !done_search &&
+ !lq_sta->stay_in_tbl && window->counter) {
+ /* Save current throughput to compare with "search" throughput*/
+ lq_sta->last_tpt = current_tpt;
+
+ /* Select a new "search" modulation mode to try.
+ * If one is found, set up the new "search" table. */
+ if (is_legacy(tbl->lq_type))
+ iwl4965_rs_move_legacy_other(priv, lq_sta,
+ conf, sta, index);
+ else if (is_siso(tbl->lq_type))
+ iwl4965_rs_move_siso_to_other(priv, lq_sta,
+ conf, sta, index);
+ else /* (is_mimo2(tbl->lq_type)) */
+ iwl4965_rs_move_mimo2_to_other(priv, lq_sta,
+ conf, sta, index);
+
+ /* If new "search" mode was selected, set up in uCode table */
+ if (lq_sta->search_better_tbl) {
+ /* Access the "search" table, clear its history. */
+ tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ for (i = 0; i < IWL_RATE_COUNT; i++)
+ iwl4965_rs_rate_scale_clear_window(
+ &(tbl->win[i]));
+
+ /* Use new "search" start rate */
+ index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
+
+ IWL_DEBUG_RATE(priv,
+ "Switch current mcs: %X index: %d\n",
+ tbl->current_rate, index);
+ iwl4965_rs_fill_link_cmd(priv, lq_sta,
+ tbl->current_rate);
+ iwl_legacy_send_lq_cmd(priv, ctx,
+ &lq_sta->lq, CMD_ASYNC, false);
+ } else
+ done_search = 1;
+ }
+
+ if (done_search && !lq_sta->stay_in_tbl) {
+ /* If the "active" (non-search) mode was legacy,
+ * and we've tried switching antennas,
+ * but we haven't been able to try HT modes (not available),
+ * stay with best antenna legacy modulation for a while
+ * before next round of mode comparisons. */
+ tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
+ lq_sta->action_counter > tbl1->max_search) {
+ IWL_DEBUG_RATE(priv, "LQ: STAY in legacy table\n");
+ iwl4965_rs_set_stay_in_table(priv, 1, lq_sta);
+ }
+
+ /* If we're in an HT mode, and all 3 mode switch actions
+ * have been tried and compared, stay in this best modulation
+ * mode for a while before next round of mode comparisons. */
+ if (lq_sta->enable_counter &&
+ (lq_sta->action_counter >= tbl1->max_search)) {
+ if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
+ (lq_sta->tx_agg_tid_en & (1 << tid)) &&
+ (tid != MAX_TID_COUNT)) {
+ tid_data =
+ &priv->stations[lq_sta->lq.sta_id].tid[tid];
+ if (tid_data->agg.state == IWL_AGG_OFF) {
+ IWL_DEBUG_RATE(priv,
+ "try to aggregate tid %d\n",
+ tid);
+ iwl4965_rs_tl_turn_on_agg(priv, tid,
+ lq_sta, sta);
+ }
+ }
+ iwl4965_rs_set_stay_in_table(priv, 0, lq_sta);
+ }
+ }
+
+out:
+ tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv, tbl,
+ index, is_green);
+ i = index;
+ lq_sta->last_txrate_idx = i;
+}
+
+/**
+ * iwl4965_rs_initialize_lq - Initialize a station's hardware rate table
+ *
+ * The uCode's station table contains a table of fallback rates
+ * for automatic fallback during transmission.
+ *
+ * NOTE: This sets up a default set of values. These will be replaced later
+ * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
+ * rc80211_simple.
+ *
+ * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
+ * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
+ * which requires station table entry to exist).
+ */
+static void iwl4965_rs_initialize_lq(struct iwl_priv *priv,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta)
+{
+ struct iwl_scale_tbl_info *tbl;
+ int rate_idx;
+ int i;
+ u32 rate;
+ u8 use_green = iwl4965_rs_use_green(sta);
+ u8 active_tbl = 0;
+ u8 valid_tx_ant;
+ struct iwl_station_priv *sta_priv;
+ struct iwl_rxon_context *ctx;
+
+ if (!sta || !lq_sta)
+ return;
+
+ sta_priv = (void *)sta->drv_priv;
+ ctx = sta_priv->common.ctx;
+
+ i = lq_sta->last_txrate_idx;
+
+ valid_tx_ant = priv->hw_params.valid_tx_ant;
+
+ if (!lq_sta->search_better_tbl)
+ active_tbl = lq_sta->active_tbl;
+ else
+ active_tbl = 1 - lq_sta->active_tbl;
+
+ tbl = &(lq_sta->lq_info[active_tbl]);
+
+ if ((i < 0) || (i >= IWL_RATE_COUNT))
+ i = 0;
+
+ rate = iwlegacy_rates[i].plcp;
+ tbl->ant_type = iwl4965_first_antenna(valid_tx_ant);
+ rate |= tbl->ant_type << RATE_MCS_ANT_POS;
+
+ if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
+ rate |= RATE_MCS_CCK_MSK;
+
+ iwl4965_rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx);
+ if (!iwl4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
+ iwl4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl);
+
+ rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, rate_idx, use_green);
+ tbl->current_rate = rate;
+ iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
+ iwl4965_rs_fill_link_cmd(NULL, lq_sta, rate);
+ priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
+ iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_SYNC, true);
+}
+
+static void
+iwl4965_rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
+ struct ieee80211_tx_rate_control *txrc)
+{
+
+ struct sk_buff *skb = txrc->skb;
+ struct ieee80211_supported_band *sband = txrc->sband;
+ struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct iwl_lq_sta *lq_sta = priv_sta;
+ int rate_idx;
+
+ IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n");
+
+ /* Get max rate if user set max rate */
+ if (lq_sta) {
+ lq_sta->max_rate_idx = txrc->max_rate_idx;
+ if ((sband->band == IEEE80211_BAND_5GHZ) &&
+ (lq_sta->max_rate_idx != -1))
+ lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
+ if ((lq_sta->max_rate_idx < 0) ||
+ (lq_sta->max_rate_idx >= IWL_RATE_COUNT))
+ lq_sta->max_rate_idx = -1;
+ }
+
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (lq_sta && !lq_sta->drv) {
+ IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
+ priv_sta = NULL;
+ }
+
+ /* Send management frames and NO_ACK data using lowest rate. */
+ if (rate_control_send_low(sta, priv_sta, txrc))
+ return;
+
+ rate_idx = lq_sta->last_txrate_idx;
+
+ if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
+ rate_idx -= IWL_FIRST_OFDM_RATE;
+ /* 6M and 9M shared same MCS index */
+ rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
+ if (iwl4965_rs_extract_rate(lq_sta->last_rate_n_flags) >=
+ IWL_RATE_MIMO2_6M_PLCP)
+ rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
+ info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
+ if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
+ info->control.rates[0].flags |=
+ IEEE80211_TX_RC_SHORT_GI;
+ if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
+ info->control.rates[0].flags |=
+ IEEE80211_TX_RC_DUP_DATA;
+ if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
+ info->control.rates[0].flags |=
+ IEEE80211_TX_RC_40_MHZ_WIDTH;
+ if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
+ info->control.rates[0].flags |=
+ IEEE80211_TX_RC_GREEN_FIELD;
+ } else {
+ /* Check for invalid rates */
+ if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
+ ((sband->band == IEEE80211_BAND_5GHZ) &&
+ (rate_idx < IWL_FIRST_OFDM_RATE)))
+ rate_idx = rate_lowest_index(sband, sta);
+ /* On valid 5 GHz rate, adjust index */
+ else if (sband->band == IEEE80211_BAND_5GHZ)
+ rate_idx -= IWL_FIRST_OFDM_RATE;
+ info->control.rates[0].flags = 0;
+ }
+ info->control.rates[0].idx = rate_idx;
+
+}
+
+static void *iwl4965_rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
+ gfp_t gfp)
+{
+ struct iwl_lq_sta *lq_sta;
+ struct iwl_station_priv *sta_priv =
+ (struct iwl_station_priv *) sta->drv_priv;
+ struct iwl_priv *priv;
+
+ priv = (struct iwl_priv *)priv_rate;
+ IWL_DEBUG_RATE(priv, "create station rate scale window\n");
+
+ lq_sta = &sta_priv->lq_sta;
+
+ return lq_sta;
+}
+
+/*
+ * Called after adding a new station to initialize rate scaling
+ */
+void
+iwl4965_rs_rate_init(struct iwl_priv *priv,
+ struct ieee80211_sta *sta,
+ u8 sta_id)
+{
+ int i, j;
+ struct ieee80211_hw *hw = priv->hw;
+ struct ieee80211_conf *conf = &priv->hw->conf;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct iwl_station_priv *sta_priv;
+ struct iwl_lq_sta *lq_sta;
+ struct ieee80211_supported_band *sband;
+
+ sta_priv = (struct iwl_station_priv *) sta->drv_priv;
+ lq_sta = &sta_priv->lq_sta;
+ sband = hw->wiphy->bands[conf->channel->band];
+
+
+ lq_sta->lq.sta_id = sta_id;
+
+ for (j = 0; j < LQ_SIZE; j++)
+ for (i = 0; i < IWL_RATE_COUNT; i++)
+ iwl4965_rs_rate_scale_clear_window(
+ &lq_sta->lq_info[j].win[i]);
+
+ lq_sta->flush_timer = 0;
+ lq_sta->supp_rates = sta->supp_rates[sband->band];
+ for (j = 0; j < LQ_SIZE; j++)
+ for (i = 0; i < IWL_RATE_COUNT; i++)
+ iwl4965_rs_rate_scale_clear_window(
+ &lq_sta->lq_info[j].win[i]);
+
+ IWL_DEBUG_RATE(priv, "LQ:"
+ "*** rate scale station global init for station %d ***\n",
+ sta_id);
+ /* TODO: what is a good starting rate for STA? About middle? Maybe not
+ * the lowest or the highest rate.. Could consider using RSSI from
+ * previous packets? Need to have IEEE 802.1X auth succeed immediately
+ * after assoc.. */
+
+ lq_sta->is_dup = 0;
+ lq_sta->max_rate_idx = -1;
+ lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
+ lq_sta->is_green = iwl4965_rs_use_green(sta);
+ lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
+ lq_sta->band = priv->band;
+ /*
+ * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
+ * supp_rates[] does not; shift to convert format, force 9 MBits off.
+ */
+ lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
+ lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
+ lq_sta->active_siso_rate &= ~((u16)0x2);
+ lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
+
+ /* Same here */
+ lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
+ lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
+ lq_sta->active_mimo2_rate &= ~((u16)0x2);
+ lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
+
+ /* These values will be overridden later */
+ lq_sta->lq.general_params.single_stream_ant_msk =
+ iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
+ lq_sta->lq.general_params.dual_stream_ant_msk =
+ priv->hw_params.valid_tx_ant &
+ ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
+ if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
+ lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
+ } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
+ lq_sta->lq.general_params.dual_stream_ant_msk =
+ priv->hw_params.valid_tx_ant;
+ }
+
+ /* as default allow aggregation for all tids */
+ lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
+ lq_sta->drv = priv;
+
+ /* Set last_txrate_idx to lowest rate */
+ lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
+ if (sband->band == IEEE80211_BAND_5GHZ)
+ lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
+ lq_sta->is_agg = 0;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ lq_sta->dbg_fixed_rate = 0;
+#endif
+
+ iwl4965_rs_initialize_lq(priv, conf, sta, lq_sta);
+}
+
+static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
+ struct iwl_lq_sta *lq_sta, u32 new_rate)
+{
+ struct iwl_scale_tbl_info tbl_type;
+ int index = 0;
+ int rate_idx;
+ int repeat_rate = 0;
+ u8 ant_toggle_cnt = 0;
+ u8 use_ht_possible = 1;
+ u8 valid_tx_ant = 0;
+ struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
+
+ /* Override starting rate (index 0) if needed for debug purposes */
+ iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+
+ /* Interpret new_rate (rate_n_flags) */
+ iwl4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
+ &tbl_type, &rate_idx);
+
+ /* How many times should we repeat the initial rate? */
+ if (is_legacy(tbl_type.lq_type)) {
+ ant_toggle_cnt = 1;
+ repeat_rate = IWL_NUMBER_TRY;
+ } else {
+ repeat_rate = IWL_HT_NUMBER_TRY;
+ }
+
+ lq_cmd->general_params.mimo_delimiter =
+ is_mimo(tbl_type.lq_type) ? 1 : 0;
+
+ /* Fill 1st table entry (index 0) */
+ lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
+
+ if (iwl4965_num_of_ant(tbl_type.ant_type) == 1) {
+ lq_cmd->general_params.single_stream_ant_msk =
+ tbl_type.ant_type;
+ } else if (iwl4965_num_of_ant(tbl_type.ant_type) == 2) {
+ lq_cmd->general_params.dual_stream_ant_msk =
+ tbl_type.ant_type;
+ } /* otherwise we don't modify the existing value */
+
+ index++;
+ repeat_rate--;
+ if (priv)
+ valid_tx_ant = priv->hw_params.valid_tx_ant;
+
+ /* Fill rest of rate table */
+ while (index < LINK_QUAL_MAX_RETRY_NUM) {
+ /* Repeat initial/next rate.
+ * For legacy IWL_NUMBER_TRY == 1, this loop will not execute.
+ * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
+ while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
+ if (is_legacy(tbl_type.lq_type)) {
+ if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
+ ant_toggle_cnt++;
+ else if (priv &&
+ iwl4965_rs_toggle_antenna(valid_tx_ant,
+ &new_rate, &tbl_type))
+ ant_toggle_cnt = 1;
+ }
+
+ /* Override next rate if needed for debug purposes */
+ iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+
+ /* Fill next table entry */
+ lq_cmd->rs_table[index].rate_n_flags =
+ cpu_to_le32(new_rate);
+ repeat_rate--;
+ index++;
+ }
+
+ iwl4965_rs_get_tbl_info_from_mcs(new_rate,
+ lq_sta->band, &tbl_type,
+ &rate_idx);
+
+ /* Indicate to uCode which entries might be MIMO.
+ * If initial rate was MIMO, this will finally end up
+ * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
+ if (is_mimo(tbl_type.lq_type))
+ lq_cmd->general_params.mimo_delimiter = index;
+
+ /* Get next rate */
+ new_rate = iwl4965_rs_get_lower_rate(lq_sta,
+ &tbl_type, rate_idx,
+ use_ht_possible);
+
+ /* How many times should we repeat the next rate? */
+ if (is_legacy(tbl_type.lq_type)) {
+ if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
+ ant_toggle_cnt++;
+ else if (priv &&
+ iwl4965_rs_toggle_antenna(valid_tx_ant,
+ &new_rate, &tbl_type))
+ ant_toggle_cnt = 1;
+
+ repeat_rate = IWL_NUMBER_TRY;
+ } else {
+ repeat_rate = IWL_HT_NUMBER_TRY;
+ }
+
+ /* Don't allow HT rates after next pass.
+ * iwl4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */
+ use_ht_possible = 0;
+
+ /* Override next rate if needed for debug purposes */
+ iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+
+ /* Fill next table entry */
+ lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
+
+ index++;
+ repeat_rate--;
+ }
+
+ lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+ lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+
+ lq_cmd->agg_params.agg_time_limit =
+ cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+}
+
+static void
+*iwl4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+{
+ return hw->priv;
+}
+/* rate scale requires free function to be implemented */
+static void iwl4965_rs_free(void *priv_rate)
+{
+ return;
+}
+
+static void iwl4965_rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
+ void *priv_sta)
+{
+ struct iwl_priv *priv __maybe_unused = priv_r;
+
+ IWL_DEBUG_RATE(priv, "enter\n");
+ IWL_DEBUG_RATE(priv, "leave\n");
+}
+
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static int iwl4965_open_file_generic(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
+ u32 *rate_n_flags, int index)
+{
+ struct iwl_priv *priv;
+ u8 valid_tx_ant;
+ u8 ant_sel_tx;
+
+ priv = lq_sta->drv;
+ valid_tx_ant = priv->hw_params.valid_tx_ant;
+ if (lq_sta->dbg_fixed_rate) {
+ ant_sel_tx =
+ ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
+ >> RATE_MCS_ANT_POS);
+ if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
+ *rate_n_flags = lq_sta->dbg_fixed_rate;
+ IWL_DEBUG_RATE(priv, "Fixed rate ON\n");
+ } else {
+ lq_sta->dbg_fixed_rate = 0;
+ IWL_ERR(priv,
+ "Invalid antenna selection 0x%X, Valid is 0x%X\n",
+ ant_sel_tx, valid_tx_ant);
+ IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
+ }
+ } else {
+ IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
+ }
+}
+
+static ssize_t iwl4965_rs_sta_dbgfs_scale_table_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct iwl_lq_sta *lq_sta = file->private_data;
+ struct iwl_priv *priv;
+ char buf[64];
+ int buf_size;
+ u32 parsed_rate;
+ struct iwl_station_priv *sta_priv =
+ container_of(lq_sta, struct iwl_station_priv, lq_sta);
+ struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+
+ priv = lq_sta->drv;
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ if (sscanf(buf, "%x", &parsed_rate) == 1)
+ lq_sta->dbg_fixed_rate = parsed_rate;
+ else
+ lq_sta->dbg_fixed_rate = 0;
+
+ lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
+ lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
+ lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
+
+ IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
+ lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
+
+ if (lq_sta->dbg_fixed_rate) {
+ iwl4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
+ iwl_legacy_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC,
+ false);
+ }
+
+ return count;
+}
+
+static ssize_t iwl4965_rs_sta_dbgfs_scale_table_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char *buff;
+ int desc = 0;
+ int i = 0;
+ int index = 0;
+ ssize_t ret;
+
+ struct iwl_lq_sta *lq_sta = file->private_data;
+ struct iwl_priv *priv;
+ struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+
+ priv = lq_sta->drv;
+ buff = kmalloc(1024, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
+ desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
+ lq_sta->total_failed, lq_sta->total_success,
+ lq_sta->active_legacy_rate);
+ desc += sprintf(buff+desc, "fixed rate 0x%X\n",
+ lq_sta->dbg_fixed_rate);
+ desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
+ (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
+ (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
+ (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
+ desc += sprintf(buff+desc, "lq type %s\n",
+ (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
+ if (is_Ht(tbl->lq_type)) {
+ desc += sprintf(buff+desc, " %s",
+ (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
+ desc += sprintf(buff+desc, " %s",
+ (tbl->is_ht40) ? "40MHz" : "20MHz");
+ desc += sprintf(buff+desc, " %s %s %s\n",
+ (tbl->is_SGI) ? "SGI" : "",
+ (lq_sta->is_green) ? "GF enabled" : "",
+ (lq_sta->is_agg) ? "AGG on" : "");
+ }
+ desc += sprintf(buff+desc, "last tx rate=0x%X\n",
+ lq_sta->last_rate_n_flags);
+ desc += sprintf(buff+desc, "general:"
+ "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
+ lq_sta->lq.general_params.flags,
+ lq_sta->lq.general_params.mimo_delimiter,
+ lq_sta->lq.general_params.single_stream_ant_msk,
+ lq_sta->lq.general_params.dual_stream_ant_msk);
+
+ desc += sprintf(buff+desc, "agg:"
+ "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
+ le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
+ lq_sta->lq.agg_params.agg_dis_start_th,
+ lq_sta->lq.agg_params.agg_frame_cnt_limit);
+
+ desc += sprintf(buff+desc,
+ "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
+ lq_sta->lq.general_params.start_rate_index[0],
+ lq_sta->lq.general_params.start_rate_index[1],
+ lq_sta->lq.general_params.start_rate_index[2],
+ lq_sta->lq.general_params.start_rate_index[3]);
+
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+ index = iwl4965_hwrate_to_plcp_idx(
+ le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags));
+ if (is_legacy(tbl->lq_type)) {
+ desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n",
+ i,
+ le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
+ iwl_rate_mcs[index].mbps);
+ } else {
+ desc += sprintf(buff+desc,
+ " rate[%d] 0x%X %smbps (%s)\n",
+ i,
+ le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
+ iwl_rate_mcs[index].mbps, iwl_rate_mcs[index].mcs);
+ }
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+ kfree(buff);
+ return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
+ .write = iwl4965_rs_sta_dbgfs_scale_table_write,
+ .read = iwl4965_rs_sta_dbgfs_scale_table_read,
+ .open = iwl4965_open_file_generic,
+ .llseek = default_llseek,
+};
+static ssize_t iwl4965_rs_sta_dbgfs_stats_table_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char *buff;
+ int desc = 0;
+ int i, j;
+ ssize_t ret;
+
+ struct iwl_lq_sta *lq_sta = file->private_data;
+
+ buff = kmalloc(1024, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ for (i = 0; i < LQ_SIZE; i++) {
+ desc += sprintf(buff+desc,
+ "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
+ "rate=0x%X\n",
+ lq_sta->active_tbl == i ? "*" : "x",
+ lq_sta->lq_info[i].lq_type,
+ lq_sta->lq_info[i].is_SGI,
+ lq_sta->lq_info[i].is_ht40,
+ lq_sta->lq_info[i].is_dup,
+ lq_sta->is_green,
+ lq_sta->lq_info[i].current_rate);
+ for (j = 0; j < IWL_RATE_COUNT; j++) {
+ desc += sprintf(buff+desc,
+ "counter=%d success=%d %%=%d\n",
+ lq_sta->lq_info[i].win[j].counter,
+ lq_sta->lq_info[i].win[j].success_counter,
+ lq_sta->lq_info[i].win[j].success_ratio);
+ }
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+ kfree(buff);
+ return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
+ .read = iwl4965_rs_sta_dbgfs_stats_table_read,
+ .open = iwl4965_open_file_generic,
+ .llseek = default_llseek,
+};
+
+static ssize_t iwl4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buff[120];
+ int desc = 0;
+ ssize_t ret;
+
+ struct iwl_lq_sta *lq_sta = file->private_data;
+ struct iwl_priv *priv;
+ struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
+
+ priv = lq_sta->drv;
+
+ if (is_Ht(tbl->lq_type))
+ desc += sprintf(buff+desc,
+ "Bit Rate= %d Mb/s\n",
+ tbl->expected_tpt[lq_sta->last_txrate_idx]);
+ else
+ desc += sprintf(buff+desc,
+ "Bit Rate= %d Mb/s\n",
+ iwlegacy_rates[lq_sta->last_txrate_idx].ieee >> 1);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+ return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
+ .read = iwl4965_rs_sta_dbgfs_rate_scale_data_read,
+ .open = iwl4965_open_file_generic,
+ .llseek = default_llseek,
+};
+
+static void iwl4965_rs_add_debugfs(void *priv, void *priv_sta,
+ struct dentry *dir)
+{
+ struct iwl_lq_sta *lq_sta = priv_sta;
+ lq_sta->rs_sta_dbgfs_scale_table_file =
+ debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
+ lq_sta, &rs_sta_dbgfs_scale_table_ops);
+ lq_sta->rs_sta_dbgfs_stats_table_file =
+ debugfs_create_file("rate_stats_table", S_IRUSR, dir,
+ lq_sta, &rs_sta_dbgfs_stats_table_ops);
+ lq_sta->rs_sta_dbgfs_rate_scale_data_file =
+ debugfs_create_file("rate_scale_data", S_IRUSR, dir,
+ lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
+ lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
+ debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
+ &lq_sta->tx_agg_tid_en);
+
+}
+
+static void iwl4965_rs_remove_debugfs(void *priv, void *priv_sta)
+{
+ struct iwl_lq_sta *lq_sta = priv_sta;
+ debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
+ debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
+ debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
+ debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
+}
+#endif
+
+/*
+ * Initialization of rate scaling information is done by driver after
+ * the station is added. Since mac80211 calls this function before a
+ * station is added we ignore it.
+ */
+static void
+iwl4965_rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *priv_sta)
+{
+}
+static struct rate_control_ops rs_4965_ops = {
+ .module = NULL,
+ .name = IWL4965_RS_NAME,
+ .tx_status = iwl4965_rs_tx_status,
+ .get_rate = iwl4965_rs_get_rate,
+ .rate_init = iwl4965_rs_rate_init_stub,
+ .alloc = iwl4965_rs_alloc,
+ .free = iwl4965_rs_free,
+ .alloc_sta = iwl4965_rs_alloc_sta,
+ .free_sta = iwl4965_rs_free_sta,
+#ifdef CONFIG_MAC80211_DEBUGFS
+ .add_sta_debugfs = iwl4965_rs_add_debugfs,
+ .remove_sta_debugfs = iwl4965_rs_remove_debugfs,
+#endif
+};
+
+int iwl4965_rate_control_register(void)
+{
+ pr_err("Registering 4965 rate control operations\n");
+ return ieee80211_rate_control_register(&rs_4965_ops);
+}
+
+void iwl4965_rate_control_unregister(void)
+{
+ ieee80211_rate_control_unregister(&rs_4965_ops);
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
index bbd40b7dd597..b9fa2f6411a7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,14 +34,14 @@
#include "iwl-dev.h"
#include "iwl-core.h"
-#include "iwl-agn-calib.h"
+#include "iwl-4965-calib.h"
#include "iwl-sta.h"
#include "iwl-io.h"
#include "iwl-helpers.h"
-#include "iwl-agn-hw.h"
-#include "iwl-agn.h"
+#include "iwl-4965-hw.h"
+#include "iwl-4965.h"
-void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
+void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
@@ -58,14 +58,14 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
le32_to_cpu(missed_beacon->num_recvd_beacons),
le32_to_cpu(missed_beacon->num_expected_beacons));
if (!test_bit(STATUS_SCANNING, &priv->status))
- iwl_init_sensitivity(priv);
+ iwl4965_init_sensitivity(priv);
}
}
/* Calculate noise level, based on measurements during network silence just
* before arriving beacon. This measurement can be done only if we know
* exactly when to expect beacons, therefore only when we're associated. */
-static void iwl_rx_calc_noise(struct iwl_priv *priv)
+static void iwl4965_rx_calc_noise(struct iwl_priv *priv)
{
struct statistics_rx_non_phy *rx_info;
int num_active_rx = 0;
@@ -73,11 +73,7 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
int bcn_silence_a, bcn_silence_b, bcn_silence_c;
int last_rx_noise;
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics)
- rx_info = &(priv->_agn.statistics_bt.rx.general.common);
- else
- rx_info = &(priv->_agn.statistics.rx.general);
+ rx_info = &(priv->_4965.statistics.rx.general);
bcn_silence_a =
le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
bcn_silence_b =
@@ -109,13 +105,13 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
last_rx_noise);
}
-#ifdef CONFIG_IWLWIFI_DEBUGFS
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
/*
* based on the assumption of all statistics counter are in DWORD
* FIXME: This function is for debugging, do not deal with
* the case of counters roll-over.
*/
-static void iwl_accumulative_statistics(struct iwl_priv *priv,
+static void iwl4965_accumulative_statistics(struct iwl_priv *priv,
__le32 *stats)
{
int i, size;
@@ -125,28 +121,16 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv,
struct statistics_general_common *general, *accum_general;
struct statistics_tx *tx, *accum_tx;
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics) {
- prev_stats = (__le32 *)&priv->_agn.statistics_bt;
- accum_stats = (u32 *)&priv->_agn.accum_statistics_bt;
- size = sizeof(struct iwl_bt_notif_statistics);
- general = &priv->_agn.statistics_bt.general.common;
- accum_general = &priv->_agn.accum_statistics_bt.general.common;
- tx = &priv->_agn.statistics_bt.tx;
- accum_tx = &priv->_agn.accum_statistics_bt.tx;
- delta = (u32 *)&priv->_agn.delta_statistics_bt;
- max_delta = (u32 *)&priv->_agn.max_delta_bt;
- } else {
- prev_stats = (__le32 *)&priv->_agn.statistics;
- accum_stats = (u32 *)&priv->_agn.accum_statistics;
- size = sizeof(struct iwl_notif_statistics);
- general = &priv->_agn.statistics.general.common;
- accum_general = &priv->_agn.accum_statistics.general.common;
- tx = &priv->_agn.statistics.tx;
- accum_tx = &priv->_agn.accum_statistics.tx;
- delta = (u32 *)&priv->_agn.delta_statistics;
- max_delta = (u32 *)&priv->_agn.max_delta;
- }
+ prev_stats = (__le32 *)&priv->_4965.statistics;
+ accum_stats = (u32 *)&priv->_4965.accum_statistics;
+ size = sizeof(struct iwl_notif_statistics);
+ general = &priv->_4965.statistics.general.common;
+ accum_general = &priv->_4965.accum_statistics.general.common;
+ tx = &priv->_4965.statistics.tx;
+ accum_tx = &priv->_4965.accum_statistics.tx;
+ delta = (u32 *)&priv->_4965.delta_statistics;
+ max_delta = (u32 *)&priv->_4965.max_delta;
+
for (i = sizeof(__le32); i < size;
i += sizeof(__le32), stats++, prev_stats++, delta++,
max_delta++, accum_stats++) {
@@ -161,23 +145,19 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv,
/* reset accumulative statistics for "no-counter" type statistics */
accum_general->temperature = general->temperature;
- accum_general->temperature_m = general->temperature_m;
accum_general->ttl_timestamp = general->ttl_timestamp;
- accum_tx->tx_power.ant_a = tx->tx_power.ant_a;
- accum_tx->tx_power.ant_b = tx->tx_power.ant_b;
- accum_tx->tx_power.ant_c = tx->tx_power.ant_c;
}
#endif
#define REG_RECALIB_PERIOD (60)
/**
- * iwl_good_plcp_health - checks for plcp error.
+ * iwl4965_good_plcp_health - checks for plcp error.
*
* When the plcp error is exceeding the thresholds, reset the radio
* to improve the throughput.
*/
-bool iwl_good_plcp_health(struct iwl_priv *priv,
+bool iwl4965_good_plcp_health(struct iwl_priv *priv,
struct iwl_rx_packet *pkt)
{
bool rc = true;
@@ -207,28 +187,15 @@ bool iwl_good_plcp_health(struct iwl_priv *priv,
struct statistics_rx_phy *ofdm;
struct statistics_rx_ht_phy *ofdm_ht;
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics) {
- ofdm = &pkt->u.stats_bt.rx.ofdm;
- ofdm_ht = &pkt->u.stats_bt.rx.ofdm_ht;
- combined_plcp_delta =
- (le32_to_cpu(ofdm->plcp_err) -
- le32_to_cpu(priv->_agn.statistics_bt.
- rx.ofdm.plcp_err)) +
- (le32_to_cpu(ofdm_ht->plcp_err) -
- le32_to_cpu(priv->_agn.statistics_bt.
- rx.ofdm_ht.plcp_err));
- } else {
- ofdm = &pkt->u.stats.rx.ofdm;
- ofdm_ht = &pkt->u.stats.rx.ofdm_ht;
- combined_plcp_delta =
- (le32_to_cpu(ofdm->plcp_err) -
- le32_to_cpu(priv->_agn.statistics.
- rx.ofdm.plcp_err)) +
- (le32_to_cpu(ofdm_ht->plcp_err) -
- le32_to_cpu(priv->_agn.statistics.
- rx.ofdm_ht.plcp_err));
- }
+ ofdm = &pkt->u.stats.rx.ofdm;
+ ofdm_ht = &pkt->u.stats.rx.ofdm_ht;
+ combined_plcp_delta =
+ (le32_to_cpu(ofdm->plcp_err) -
+ le32_to_cpu(priv->_4965.statistics.
+ rx.ofdm.plcp_err)) +
+ (le32_to_cpu(ofdm_ht->plcp_err) -
+ le32_to_cpu(priv->_4965.statistics.
+ rx.ofdm_ht.plcp_err));
if ((combined_plcp_delta > 0) &&
((combined_plcp_delta * 100) / plcp_msec) >
@@ -259,58 +226,32 @@ bool iwl_good_plcp_health(struct iwl_priv *priv,
return rc;
}
-void iwl_rx_statistics(struct iwl_priv *priv,
+void iwl4965_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
int change;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics) {
- IWL_DEBUG_RX(priv,
- "Statistics notification received (%d vs %d).\n",
- (int)sizeof(struct iwl_bt_notif_statistics),
- le32_to_cpu(pkt->len_n_flags) &
- FH_RSCSR_FRAME_SIZE_MSK);
-
- change = ((priv->_agn.statistics_bt.general.common.temperature !=
- pkt->u.stats_bt.general.common.temperature) ||
- ((priv->_agn.statistics_bt.flag &
- STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
- (pkt->u.stats_bt.flag &
- STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats_bt);
+ IWL_DEBUG_RX(priv,
+ "Statistics notification received (%d vs %d).\n",
+ (int)sizeof(struct iwl_notif_statistics),
+ le32_to_cpu(pkt->len_n_flags) &
+ FH_RSCSR_FRAME_SIZE_MSK);
+
+ change = ((priv->_4965.statistics.general.common.temperature !=
+ pkt->u.stats.general.common.temperature) ||
+ ((priv->_4965.statistics.flag &
+ STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
+ (pkt->u.stats.flag &
+ STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+ iwl4965_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
#endif
- } else {
- IWL_DEBUG_RX(priv,
- "Statistics notification received (%d vs %d).\n",
- (int)sizeof(struct iwl_notif_statistics),
- le32_to_cpu(pkt->len_n_flags) &
- FH_RSCSR_FRAME_SIZE_MSK);
+ iwl_legacy_recover_from_statistics(priv, pkt);
- change = ((priv->_agn.statistics.general.common.temperature !=
- pkt->u.stats.general.common.temperature) ||
- ((priv->_agn.statistics.flag &
- STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
- (pkt->u.stats.flag &
- STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
-#endif
-
- }
-
- iwl_recover_from_statistics(priv, pkt);
-
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics)
- memcpy(&priv->_agn.statistics_bt, &pkt->u.stats_bt,
- sizeof(priv->_agn.statistics_bt));
- else
- memcpy(&priv->_agn.statistics, &pkt->u.stats,
- sizeof(priv->_agn.statistics));
+ memcpy(&priv->_4965.statistics, &pkt->u.stats,
+ sizeof(priv->_4965.statistics));
set_bit(STATUS_STATISTICS, &priv->status);
@@ -323,34 +264,28 @@ void iwl_rx_statistics(struct iwl_priv *priv,
if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
(pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
- iwl_rx_calc_noise(priv);
+ iwl4965_rx_calc_noise(priv);
queue_work(priv->workqueue, &priv->run_time_calib_work);
}
if (priv->cfg->ops->lib->temp_ops.temperature && change)
priv->cfg->ops->lib->temp_ops.temperature(priv);
}
-void iwl_reply_statistics(struct iwl_priv *priv,
+void iwl4965_reply_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- memset(&priv->_agn.accum_statistics, 0,
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+ memset(&priv->_4965.accum_statistics, 0,
sizeof(struct iwl_notif_statistics));
- memset(&priv->_agn.delta_statistics, 0,
+ memset(&priv->_4965.delta_statistics, 0,
sizeof(struct iwl_notif_statistics));
- memset(&priv->_agn.max_delta, 0,
+ memset(&priv->_4965.max_delta, 0,
sizeof(struct iwl_notif_statistics));
- memset(&priv->_agn.accum_statistics_bt, 0,
- sizeof(struct iwl_bt_notif_statistics));
- memset(&priv->_agn.delta_statistics_bt, 0,
- sizeof(struct iwl_bt_notif_statistics));
- memset(&priv->_agn.max_delta_bt, 0,
- sizeof(struct iwl_bt_notif_statistics));
#endif
IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
}
- iwl_rx_statistics(priv, rxb);
+ iwl4965_rx_statistics(priv, rxb);
}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
new file mode 100644
index 000000000000..a262c23553d2
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
@@ -0,0 +1,721 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <net/mac80211.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-sta.h"
+#include "iwl-4965.h"
+
+static struct iwl_link_quality_cmd *
+iwl4965_sta_alloc_lq(struct iwl_priv *priv, u8 sta_id)
+{
+ int i, r;
+ struct iwl_link_quality_cmd *link_cmd;
+ u32 rate_flags = 0;
+ __le32 rate_n_flags;
+
+ link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
+ if (!link_cmd) {
+ IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
+ return NULL;
+ }
+ /* Set up the rate scaling to start at selected rate, fall back
+ * all the way down to 1M in IEEE order, and then spin on 1M */
+ if (priv->band == IEEE80211_BAND_5GHZ)
+ r = IWL_RATE_6M_INDEX;
+ else
+ r = IWL_RATE_1M_INDEX;
+
+ if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
+ rate_flags |= RATE_MCS_CCK_MSK;
+
+ rate_flags |= iwl4965_first_antenna(priv->hw_params.valid_tx_ant) <<
+ RATE_MCS_ANT_POS;
+ rate_n_flags = iwl4965_hw_set_rate_n_flags(iwlegacy_rates[r].plcp,
+ rate_flags);
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
+ link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
+
+ link_cmd->general_params.single_stream_ant_msk =
+ iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
+
+ link_cmd->general_params.dual_stream_ant_msk =
+ priv->hw_params.valid_tx_ant &
+ ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
+ if (!link_cmd->general_params.dual_stream_ant_msk) {
+ link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
+ } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
+ link_cmd->general_params.dual_stream_ant_msk =
+ priv->hw_params.valid_tx_ant;
+ }
+
+ link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+ link_cmd->agg_params.agg_time_limit =
+ cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+
+ link_cmd->sta_id = sta_id;
+
+ return link_cmd;
+}
+
+/*
+ * iwl4965_add_bssid_station - Add the special IBSS BSSID station
+ *
+ * Function sleeps.
+ */
+int
+iwl4965_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ const u8 *addr, u8 *sta_id_r)
+{
+ int ret;
+ u8 sta_id;
+ struct iwl_link_quality_cmd *link_cmd;
+ unsigned long flags;
+
+ if (sta_id_r)
+ *sta_id_r = IWL_INVALID_STATION;
+
+ ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
+ if (ret) {
+ IWL_ERR(priv, "Unable to add station %pM\n", addr);
+ return ret;
+ }
+
+ if (sta_id_r)
+ *sta_id_r = sta_id;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ priv->stations[sta_id].used |= IWL_STA_LOCAL;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ /* Set up default rate scaling table in device's station table */
+ link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
+ if (!link_cmd) {
+ IWL_ERR(priv,
+ "Unable to initialize rate scaling for station %pM.\n",
+ addr);
+ return -ENOMEM;
+ }
+
+ ret = iwl_legacy_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true);
+ if (ret)
+ IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ priv->stations[sta_id].lq = link_cmd;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ return 0;
+}
+
+static int iwl4965_static_wepkey_cmd(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ bool send_if_empty)
+{
+ int i, not_empty = 0;
+ u8 buff[sizeof(struct iwl_wep_cmd) +
+ sizeof(struct iwl_wep_key) * WEP_KEYS_MAX];
+ struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff;
+ size_t cmd_size = sizeof(struct iwl_wep_cmd);
+ struct iwl_host_cmd cmd = {
+ .id = ctx->wep_key_cmd,
+ .data = wep_cmd,
+ .flags = CMD_SYNC,
+ };
+
+ might_sleep();
+
+ memset(wep_cmd, 0, cmd_size +
+ (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX));
+
+ for (i = 0; i < WEP_KEYS_MAX ; i++) {
+ wep_cmd->key[i].key_index = i;
+ if (ctx->wep_keys[i].key_size) {
+ wep_cmd->key[i].key_offset = i;
+ not_empty = 1;
+ } else {
+ wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
+ }
+
+ wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
+ memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
+ ctx->wep_keys[i].key_size);
+ }
+
+ wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
+ wep_cmd->num_keys = WEP_KEYS_MAX;
+
+ cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX;
+
+ cmd.len = cmd_size;
+
+ if (not_empty || send_if_empty)
+ return iwl_legacy_send_cmd(priv, &cmd);
+ else
+ return 0;
+}
+
+int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+ lockdep_assert_held(&priv->mutex);
+
+ return iwl4965_static_wepkey_cmd(priv, ctx, false);
+}
+
+int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf)
+{
+ int ret;
+
+ lockdep_assert_held(&priv->mutex);
+
+ IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
+ keyconf->keyidx);
+
+ memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
+ if (iwl_legacy_is_rfkill(priv)) {
+ IWL_DEBUG_WEP(priv,
+ "Not sending REPLY_WEPKEY command due to RFKILL.\n");
+ /* but keys in device are clear anyway so return success */
+ return 0;
+ }
+ ret = iwl4965_static_wepkey_cmd(priv, ctx, 1);
+ IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
+ keyconf->keyidx, ret);
+
+ return ret;
+}
+
+int iwl4965_set_default_wep_key(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf)
+{
+ int ret;
+
+ lockdep_assert_held(&priv->mutex);
+
+ if (keyconf->keylen != WEP_KEY_LEN_128 &&
+ keyconf->keylen != WEP_KEY_LEN_64) {
+ IWL_DEBUG_WEP(priv, "Bad WEP key length %d\n", keyconf->keylen);
+ return -EINVAL;
+ }
+
+ keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
+ keyconf->hw_key_idx = HW_KEY_DEFAULT;
+ priv->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
+
+ ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
+ memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
+ keyconf->keylen);
+
+ ret = iwl4965_static_wepkey_cmd(priv, ctx, false);
+ IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
+ keyconf->keylen, keyconf->keyidx, ret);
+
+ return ret;
+}
+
+static int iwl4965_set_wep_dynamic_key_info(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf,
+ u8 sta_id)
+{
+ unsigned long flags;
+ __le16 key_flags = 0;
+ struct iwl_legacy_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&priv->mutex);
+
+ keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
+
+ key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
+ key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+ key_flags &= ~STA_KEY_FLG_INVALID;
+
+ if (keyconf->keylen == WEP_KEY_LEN_128)
+ key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
+
+ if (sta_id == ctx->bcast_sta_id)
+ key_flags |= STA_KEY_MULTICAST_MSK;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+
+ priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+ priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
+ priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
+
+ memcpy(priv->stations[sta_id].keyinfo.key,
+ keyconf->key, keyconf->keylen);
+
+ memcpy(&priv->stations[sta_id].sta.key.key[3],
+ keyconf->key, keyconf->keylen);
+
+ if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
+ == STA_KEY_FLG_NO_ENC)
+ priv->stations[sta_id].sta.key.key_offset =
+ iwl_legacy_get_free_ucode_key_index(priv);
+ /* else, we are overriding an existing key => no need to allocated room
+ * in uCode. */
+
+ WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+ "no space for a new key");
+
+ priv->stations[sta_id].sta.key.key_flags = key_flags;
+ priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta,
+ sizeof(struct iwl_legacy_addsta_cmd));
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+}
+
+static int iwl4965_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf,
+ u8 sta_id)
+{
+ unsigned long flags;
+ __le16 key_flags = 0;
+ struct iwl_legacy_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&priv->mutex);
+
+ key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
+ key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+ key_flags &= ~STA_KEY_FLG_INVALID;
+
+ if (sta_id == ctx->bcast_sta_id)
+ key_flags |= STA_KEY_MULTICAST_MSK;
+
+ keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+ priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
+
+ memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
+ keyconf->keylen);
+
+ memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
+ keyconf->keylen);
+
+ if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
+ == STA_KEY_FLG_NO_ENC)
+ priv->stations[sta_id].sta.key.key_offset =
+ iwl_legacy_get_free_ucode_key_index(priv);
+ /* else, we are overriding an existing key => no need to allocated room
+ * in uCode. */
+
+ WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+ "no space for a new key");
+
+ priv->stations[sta_id].sta.key.key_flags = key_flags;
+ priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta,
+ sizeof(struct iwl_legacy_addsta_cmd));
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+}
+
+static int iwl4965_set_tkip_dynamic_key_info(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf,
+ u8 sta_id)
+{
+ unsigned long flags;
+ int ret = 0;
+ __le16 key_flags = 0;
+
+ key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
+ key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+ key_flags &= ~STA_KEY_FLG_INVALID;
+
+ if (sta_id == ctx->bcast_sta_id)
+ key_flags |= STA_KEY_MULTICAST_MSK;
+
+ keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+
+ priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+ priv->stations[sta_id].keyinfo.keylen = 16;
+
+ if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
+ == STA_KEY_FLG_NO_ENC)
+ priv->stations[sta_id].sta.key.key_offset =
+ iwl_legacy_get_free_ucode_key_index(priv);
+ /* else, we are overriding an existing key => no need to allocated room
+ * in uCode. */
+
+ WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+ "no space for a new key");
+
+ priv->stations[sta_id].sta.key.key_flags = key_flags;
+
+
+ /* This copy is acutally not needed: we get the key with each TX */
+ memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
+
+ memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 16);
+
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ return ret;
+}
+
+void iwl4965_update_tkip_key(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
+{
+ u8 sta_id;
+ unsigned long flags;
+ int i;
+
+ if (iwl_legacy_scan_cancel(priv)) {
+ /* cancel scan failed, just live w/ bad key and rely
+ briefly on SW decryption */
+ return;
+ }
+
+ sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, sta);
+ if (sta_id == IWL_INVALID_STATION)
+ return;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+
+ priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
+
+ for (i = 0; i < 5; i++)
+ priv->stations[sta_id].sta.key.tkip_rx_ttak[i] =
+ cpu_to_le16(phase1key[i]);
+
+ priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+ iwl_legacy_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
+
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+}
+
+int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf,
+ u8 sta_id)
+{
+ unsigned long flags;
+ u16 key_flags;
+ u8 keyidx;
+ struct iwl_legacy_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&priv->mutex);
+
+ ctx->key_mapping_keys--;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags);
+ keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
+
+ IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n",
+ keyconf->keyidx, sta_id);
+
+ if (keyconf->keyidx != keyidx) {
+ /* We need to remove a key with index different that the one
+ * in the uCode. This means that the key we need to remove has
+ * been replaced by another one with different index.
+ * Don't do anything and return ok
+ */
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+ return 0;
+ }
+
+ if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
+ IWL_WARN(priv, "Removing wrong key %d 0x%x\n",
+ keyconf->keyidx, key_flags);
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+ return 0;
+ }
+
+ if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
+ &priv->ucode_key_table))
+ IWL_ERR(priv, "index %d not used in uCode key table.\n",
+ priv->stations[sta_id].sta.key.key_offset);
+ memset(&priv->stations[sta_id].keyinfo, 0,
+ sizeof(struct iwl_hw_key));
+ memset(&priv->stations[sta_id].sta.key, 0,
+ sizeof(struct iwl4965_keyinfo));
+ priv->stations[sta_id].sta.key.key_flags =
+ STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
+ priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
+ priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+ if (iwl_legacy_is_rfkill(priv)) {
+ IWL_DEBUG_WEP(priv,
+ "Not sending REPLY_ADD_STA command because RFKILL enabled.\n");
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+ return 0;
+ }
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta,
+ sizeof(struct iwl_legacy_addsta_cmd));
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+}
+
+int iwl4965_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ int ret;
+
+ lockdep_assert_held(&priv->mutex);
+
+ ctx->key_mapping_keys++;
+ keyconf->hw_key_idx = HW_KEY_DYNAMIC;
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ ret = iwl4965_set_ccmp_dynamic_key_info(priv, ctx,
+ keyconf, sta_id);
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ ret = iwl4965_set_tkip_dynamic_key_info(priv, ctx,
+ keyconf, sta_id);
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ ret = iwl4965_set_wep_dynamic_key_info(priv, ctx,
+ keyconf, sta_id);
+ break;
+ default:
+ IWL_ERR(priv,
+ "Unknown alg: %s cipher = %x\n", __func__,
+ keyconf->cipher);
+ ret = -EINVAL;
+ }
+
+ IWL_DEBUG_WEP(priv,
+ "Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
+ keyconf->cipher, keyconf->keylen, keyconf->keyidx,
+ sta_id, ret);
+
+ return ret;
+}
+
+/**
+ * iwl4965_alloc_bcast_station - add broadcast station into driver's station table.
+ *
+ * This adds the broadcast station into the driver's station table
+ * and marks it driver active, so that it will be restored to the
+ * device at the next best time.
+ */
+int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+ struct iwl_link_quality_cmd *link_cmd;
+ unsigned long flags;
+ u8 sta_id;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ sta_id = iwl_legacy_prep_station(priv, ctx, iwlegacy_bcast_addr,
+ false, NULL);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Unable to prepare broadcast station\n");
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ return -EINVAL;
+ }
+
+ priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
+ priv->stations[sta_id].used |= IWL_STA_BCAST;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
+ if (!link_cmd) {
+ IWL_ERR(priv,
+ "Unable to initialize rate scaling for bcast station.\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ priv->stations[sta_id].lq = link_cmd;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ return 0;
+}
+
+/**
+ * iwl4965_update_bcast_station - update broadcast station's LQ command
+ *
+ * Only used by iwl4965. Placed here to have all bcast station management
+ * code together.
+ */
+static int iwl4965_update_bcast_station(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+ unsigned long flags;
+ struct iwl_link_quality_cmd *link_cmd;
+ u8 sta_id = ctx->bcast_sta_id;
+
+ link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
+ if (!link_cmd) {
+ IWL_ERR(priv,
+ "Unable to initialize rate scaling for bcast station.\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ if (priv->stations[sta_id].lq)
+ kfree(priv->stations[sta_id].lq);
+ else
+ IWL_DEBUG_INFO(priv,
+ "Bcast station rate scaling has not been initialized yet.\n");
+ priv->stations[sta_id].lq = link_cmd;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ return 0;
+}
+
+int iwl4965_update_bcast_stations(struct iwl_priv *priv)
+{
+ struct iwl_rxon_context *ctx;
+ int ret = 0;
+
+ for_each_context(priv, ctx) {
+ ret = iwl4965_update_bcast_station(priv, ctx);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * iwl4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
+ */
+int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
+{
+ unsigned long flags;
+ struct iwl_legacy_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&priv->mutex);
+
+ /* Remove "disable" flag, to enable Tx for this TID */
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
+ priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
+ priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta,
+ sizeof(struct iwl_legacy_addsta_cmd));
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+}
+
+int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
+ int tid, u16 ssn)
+{
+ unsigned long flags;
+ int sta_id;
+ struct iwl_legacy_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&priv->mutex);
+
+ sta_id = iwl_legacy_sta_id(sta);
+ if (sta_id == IWL_INVALID_STATION)
+ return -ENXIO;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ priv->stations[sta_id].sta.station_flags_msk = 0;
+ priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
+ priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
+ priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
+ priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta,
+ sizeof(struct iwl_legacy_addsta_cmd));
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+}
+
+int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
+ int tid)
+{
+ unsigned long flags;
+ int sta_id;
+ struct iwl_legacy_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&priv->mutex);
+
+ sta_id = iwl_legacy_sta_id(sta);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
+ return -ENXIO;
+ }
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ priv->stations[sta_id].sta.station_flags_msk = 0;
+ priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
+ priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
+ priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta,
+ sizeof(struct iwl_legacy_addsta_cmd));
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+}
+
+void
+iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
+ priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
+ priv->stations[sta_id].sta.sta.modify_mask =
+ STA_MODIFY_SLEEP_TX_COUNT_MSK;
+ priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
+ priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ iwl_legacy_send_add_sta(priv,
+ &priv->stations[sta_id].sta, CMD_ASYNC);
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
new file mode 100644
index 000000000000..5c40502f869a
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
@@ -0,0 +1,1369 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-sta.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+#include "iwl-4965-hw.h"
+#include "iwl-4965.h"
+
+/*
+ * mac80211 queues, ACs, hardware queues, FIFOs.
+ *
+ * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
+ *
+ * Mac80211 uses the following numbers, which we get as from it
+ * by way of skb_get_queue_mapping(skb):
+ *
+ * VO 0
+ * VI 1
+ * BE 2
+ * BK 3
+ *
+ *
+ * Regular (not A-MPDU) frames are put into hardware queues corresponding
+ * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
+ * own queue per aggregation session (RA/TID combination), such queues are
+ * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
+ * order to map frames to the right queue, we also need an AC->hw queue
+ * mapping. This is implemented here.
+ *
+ * Due to the way hw queues are set up (by the hw specific modules like
+ * iwl-4965.c), the AC->hw queue mapping is the identity
+ * mapping.
+ */
+
+static const u8 tid_to_ac[] = {
+ IEEE80211_AC_BE,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BE,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VO,
+ IEEE80211_AC_VO
+};
+
+static inline int iwl4965_get_ac_from_tid(u16 tid)
+{
+ if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+ return tid_to_ac[tid];
+
+ /* no support for TIDs 8-15 yet */
+ return -EINVAL;
+}
+
+static inline int
+iwl4965_get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
+{
+ if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+ return ctx->ac_to_fifo[tid_to_ac[tid]];
+
+ /* no support for TIDs 8-15 yet */
+ return -EINVAL;
+}
+
+/*
+ * handle build REPLY_TX command notification.
+ */
+static void iwl4965_tx_cmd_build_basic(struct iwl_priv *priv,
+ struct sk_buff *skb,
+ struct iwl_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_hdr *hdr,
+ u8 std_id)
+{
+ __le16 fc = hdr->frame_control;
+ __le32 tx_flags = tx_cmd->tx_flags;
+
+ tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ tx_flags |= TX_CMD_FLG_ACK_MSK;
+ if (ieee80211_is_mgmt(fc))
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ if (ieee80211_is_probe_resp(fc) &&
+ !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
+ tx_flags |= TX_CMD_FLG_TSF_MSK;
+ } else {
+ tx_flags &= (~TX_CMD_FLG_ACK_MSK);
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ }
+
+ if (ieee80211_is_back_req(fc))
+ tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
+
+ tx_cmd->sta_id = std_id;
+ if (ieee80211_has_morefrags(fc))
+ tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
+
+ if (ieee80211_is_data_qos(fc)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tx_cmd->tid_tspec = qc[0] & 0xf;
+ tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
+ } else {
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ }
+
+ iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
+
+ tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
+ if (ieee80211_is_mgmt(fc)) {
+ if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
+ tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
+ else
+ tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
+ } else {
+ tx_cmd->timeout.pm_frame_timeout = 0;
+ }
+
+ tx_cmd->driver_txop = 0;
+ tx_cmd->tx_flags = tx_flags;
+ tx_cmd->next_frame_len = 0;
+}
+
+#define RTS_DFAULT_RETRY_LIMIT 60
+
+static void iwl4965_tx_cmd_build_rate(struct iwl_priv *priv,
+ struct iwl_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info,
+ __le16 fc)
+{
+ u32 rate_flags;
+ int rate_idx;
+ u8 rts_retry_limit;
+ u8 data_retry_limit;
+ u8 rate_plcp;
+
+ /* Set retry limit on DATA packets and Probe Responses*/
+ if (ieee80211_is_probe_resp(fc))
+ data_retry_limit = 3;
+ else
+ data_retry_limit = IWL4965_DEFAULT_TX_RETRY;
+ tx_cmd->data_retry_limit = data_retry_limit;
+
+ /* Set retry limit on RTS packets */
+ rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
+ if (data_retry_limit < rts_retry_limit)
+ rts_retry_limit = data_retry_limit;
+ tx_cmd->rts_retry_limit = rts_retry_limit;
+
+ /* DATA packets will use the uCode station table for rate/antenna
+ * selection */
+ if (ieee80211_is_data(fc)) {
+ tx_cmd->initial_rate_index = 0;
+ tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
+ return;
+ }
+
+ /**
+ * If the current TX rate stored in mac80211 has the MCS bit set, it's
+ * not really a TX rate. Thus, we use the lowest supported rate for
+ * this band. Also use the lowest supported rate if the stored rate
+ * index is invalid.
+ */
+ rate_idx = info->control.rates[0].idx;
+ if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
+ (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
+ rate_idx = rate_lowest_index(&priv->bands[info->band],
+ info->control.sta);
+ /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
+ if (info->band == IEEE80211_BAND_5GHZ)
+ rate_idx += IWL_FIRST_OFDM_RATE;
+ /* Get PLCP rate for tx_cmd->rate_n_flags */
+ rate_plcp = iwlegacy_rates[rate_idx].plcp;
+ /* Zero out flags for this packet */
+ rate_flags = 0;
+
+ /* Set CCK flag as needed */
+ if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
+ rate_flags |= RATE_MCS_CCK_MSK;
+
+ /* Set up antennas */
+ priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
+ priv->hw_params.valid_tx_ant);
+
+ rate_flags |= iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
+
+ /* Set the rate in the TX cmd */
+ tx_cmd->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
+}
+
+static void iwl4965_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
+ struct ieee80211_tx_info *info,
+ struct iwl_tx_cmd *tx_cmd,
+ struct sk_buff *skb_frag,
+ int sta_id)
+{
+ struct ieee80211_key_conf *keyconf = info->control.hw_key;
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+ memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
+ IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
+ break;
+
+ case WLAN_CIPHER_SUITE_TKIP:
+ tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
+ ieee80211_get_tkip_key(keyconf, skb_frag,
+ IEEE80211_TKIP_P2_KEY, tx_cmd->key);
+ IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
+ break;
+
+ case WLAN_CIPHER_SUITE_WEP104:
+ tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
+ /* fall through */
+ case WLAN_CIPHER_SUITE_WEP40:
+ tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
+ (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
+
+ memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
+
+ IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
+ "with key %d\n", keyconf->keyidx);
+ break;
+
+ default:
+ IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
+ break;
+ }
+}
+
+/*
+ * start REPLY_TX command process
+ */
+int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sta *sta = info->control.sta;
+ struct iwl_station_priv *sta_priv = NULL;
+ struct iwl_tx_queue *txq;
+ struct iwl_queue *q;
+ struct iwl_device_cmd *out_cmd;
+ struct iwl_cmd_meta *out_meta;
+ struct iwl_tx_cmd *tx_cmd;
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ int txq_id;
+ dma_addr_t phys_addr;
+ dma_addr_t txcmd_phys;
+ dma_addr_t scratch_phys;
+ u16 len, firstlen, secondlen;
+ u16 seq_number = 0;
+ __le16 fc;
+ u8 hdr_len;
+ u8 sta_id;
+ u8 wait_write_ptr = 0;
+ u8 tid = 0;
+ u8 *qc = NULL;
+ unsigned long flags;
+ bool is_agg = false;
+
+ if (info->control.vif)
+ ctx = iwl_legacy_rxon_ctx_from_vif(info->control.vif);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (iwl_legacy_is_rfkill(priv)) {
+ IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
+ goto drop_unlock;
+ }
+
+ fc = hdr->frame_control;
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ if (ieee80211_is_auth(fc))
+ IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
+ else if (ieee80211_is_assoc_req(fc))
+ IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
+ else if (ieee80211_is_reassoc_req(fc))
+ IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
+#endif
+
+ hdr_len = ieee80211_hdrlen(fc);
+
+ /* Find index into station table for destination station */
+ sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, info->control.sta);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
+ hdr->addr1);
+ goto drop_unlock;
+ }
+
+ IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
+
+ if (sta)
+ sta_priv = (void *)sta->drv_priv;
+
+ if (sta_priv && sta_priv->asleep &&
+ (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)) {
+ /*
+ * This sends an asynchronous command to the device,
+ * but we can rely on it being processed before the
+ * next frame is processed -- and the next frame to
+ * this station is the one that will consume this
+ * counter.
+ * For now set the counter to just 1 since we do not
+ * support uAPSD yet.
+ */
+ iwl4965_sta_modify_sleep_tx_count(priv, sta_id, 1);
+ }
+
+ /*
+ * Send this frame after DTIM -- there's a special queue
+ * reserved for this for contexts that support AP mode.
+ */
+ if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
+ txq_id = ctx->mcast_queue;
+ /*
+ * The microcode will clear the more data
+ * bit in the last frame it transmits.
+ */
+ hdr->frame_control |=
+ cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+ } else
+ txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
+
+ /* irqs already disabled/saved above when locking priv->lock */
+ spin_lock(&priv->sta_lock);
+
+ if (ieee80211_is_data_qos(fc)) {
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
+ spin_unlock(&priv->sta_lock);
+ goto drop_unlock;
+ }
+ seq_number = priv->stations[sta_id].tid[tid].seq_number;
+ seq_number &= IEEE80211_SCTL_SEQ;
+ hdr->seq_ctrl = hdr->seq_ctrl &
+ cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(seq_number);
+ seq_number += 0x10;
+ /* aggregation is on for this <sta,tid> */
+ if (info->flags & IEEE80211_TX_CTL_AMPDU &&
+ priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
+ txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
+ is_agg = true;
+ }
+ }
+
+ txq = &priv->txq[txq_id];
+ q = &txq->q;
+
+ if (unlikely(iwl_legacy_queue_space(q) < q->high_mark)) {
+ spin_unlock(&priv->sta_lock);
+ goto drop_unlock;
+ }
+
+ if (ieee80211_is_data_qos(fc)) {
+ priv->stations[sta_id].tid[tid].tfds_in_queue++;
+ if (!ieee80211_has_morefrags(fc))
+ priv->stations[sta_id].tid[tid].seq_number = seq_number;
+ }
+
+ spin_unlock(&priv->sta_lock);
+
+ /* Set up driver data for this TFD */
+ memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
+ txq->txb[q->write_ptr].skb = skb;
+ txq->txb[q->write_ptr].ctx = ctx;
+
+ /* Set up first empty entry in queue's array of Tx/cmd buffers */
+ out_cmd = txq->cmd[q->write_ptr];
+ out_meta = &txq->meta[q->write_ptr];
+ tx_cmd = &out_cmd->cmd.tx;
+ memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
+ memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
+
+ /*
+ * Set up the Tx-command (not MAC!) header.
+ * Store the chosen Tx queue and TFD index within the sequence field;
+ * after Tx, uCode's Tx response will return this value so driver can
+ * locate the frame within the tx queue and do post-tx processing.
+ */
+ out_cmd->hdr.cmd = REPLY_TX;
+ out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
+ INDEX_TO_SEQ(q->write_ptr)));
+
+ /* Copy MAC header from skb into command buffer */
+ memcpy(tx_cmd->hdr, hdr, hdr_len);
+
+
+ /* Total # bytes to be transmitted */
+ len = (u16)skb->len;
+ tx_cmd->len = cpu_to_le16(len);
+
+ if (info->control.hw_key)
+ iwl4965_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
+
+ /* TODO need this for burst mode later on */
+ iwl4965_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
+ iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
+
+ iwl4965_tx_cmd_build_rate(priv, tx_cmd, info, fc);
+
+ iwl_legacy_update_stats(priv, true, fc, len);
+ /*
+ * Use the first empty entry in this queue's command buffer array
+ * to contain the Tx command and MAC header concatenated together
+ * (payload data will be in another buffer).
+ * Size of this varies, due to varying MAC header length.
+ * If end is not dword aligned, we'll have 2 extra bytes at the end
+ * of the MAC header (device reads on dword boundaries).
+ * We'll tell device about this padding later.
+ */
+ len = sizeof(struct iwl_tx_cmd) +
+ sizeof(struct iwl_cmd_header) + hdr_len;
+ firstlen = (len + 3) & ~3;
+
+ /* Tell NIC about any 2-byte padding after MAC header */
+ if (firstlen != len)
+ tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+
+ /* Physical address of this Tx command's header (not MAC header!),
+ * within command buffer array. */
+ txcmd_phys = pci_map_single(priv->pci_dev,
+ &out_cmd->hdr, firstlen,
+ PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+ dma_unmap_len_set(out_meta, len, firstlen);
+ /* Add buffer containing Tx command and MAC(!) header to TFD's
+ * first entry */
+ priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
+ txcmd_phys, firstlen, 1, 0);
+
+ if (!ieee80211_has_morefrags(hdr->frame_control)) {
+ txq->need_update = 1;
+ } else {
+ wait_write_ptr = 1;
+ txq->need_update = 0;
+ }
+
+ /* Set up TFD's 2nd entry to point directly to remainder of skb,
+ * if any (802.11 null frames have no payload). */
+ secondlen = skb->len - hdr_len;
+ if (secondlen > 0) {
+ phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
+ secondlen, PCI_DMA_TODEVICE);
+ priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
+ phys_addr, secondlen,
+ 0, 0);
+ }
+
+ scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
+ offsetof(struct iwl_tx_cmd, scratch);
+
+ /* take back ownership of DMA buffer to enable update */
+ pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
+ firstlen, PCI_DMA_BIDIRECTIONAL);
+ tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
+ tx_cmd->dram_msb_ptr = iwl_legacy_get_dma_hi_addr(scratch_phys);
+
+ IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
+ le16_to_cpu(out_cmd->hdr.sequence));
+ IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
+ iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
+ iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
+
+ /* Set up entry for this TFD in Tx byte-count array */
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
+ le16_to_cpu(tx_cmd->len));
+
+ pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
+ firstlen, PCI_DMA_BIDIRECTIONAL);
+
+ trace_iwlwifi_legacy_dev_tx(priv,
+ &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
+ sizeof(struct iwl_tfd),
+ &out_cmd->hdr, firstlen,
+ skb->data + hdr_len, secondlen);
+
+ /* Tell device the write index *just past* this latest filled TFD */
+ q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
+ iwl_legacy_txq_update_write_ptr(priv, txq);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /*
+ * At this point the frame is "transmitted" successfully
+ * and we will get a TX status notification eventually,
+ * regardless of the value of ret. "ret" only indicates
+ * whether or not we should update the write pointer.
+ */
+
+ /*
+ * Avoid atomic ops if it isn't an associated client.
+ * Also, if this is a packet for aggregation, don't
+ * increase the counter because the ucode will stop
+ * aggregation queues when their respective station
+ * goes to sleep.
+ */
+ if (sta_priv && sta_priv->client && !is_agg)
+ atomic_inc(&sta_priv->pending_frames);
+
+ if ((iwl_legacy_queue_space(q) < q->high_mark) &&
+ priv->mac80211_registered) {
+ if (wait_write_ptr) {
+ spin_lock_irqsave(&priv->lock, flags);
+ txq->need_update = 1;
+ iwl_legacy_txq_update_write_ptr(priv, txq);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ } else {
+ iwl_legacy_stop_queue(priv, txq);
+ }
+ }
+
+ return 0;
+
+drop_unlock:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return -1;
+}
+
+static inline int iwl4965_alloc_dma_ptr(struct iwl_priv *priv,
+ struct iwl_dma_ptr *ptr, size_t size)
+{
+ ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
+ GFP_KERNEL);
+ if (!ptr->addr)
+ return -ENOMEM;
+ ptr->size = size;
+ return 0;
+}
+
+static inline void iwl4965_free_dma_ptr(struct iwl_priv *priv,
+ struct iwl_dma_ptr *ptr)
+{
+ if (unlikely(!ptr->addr))
+ return;
+
+ dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
+ memset(ptr, 0, sizeof(*ptr));
+}
+
+/**
+ * iwl4965_hw_txq_ctx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv)
+{
+ int txq_id;
+
+ /* Tx queues */
+ if (priv->txq) {
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
+ if (txq_id == priv->cmd_queue)
+ iwl_legacy_cmd_queue_free(priv);
+ else
+ iwl_legacy_tx_queue_free(priv, txq_id);
+ }
+ iwl4965_free_dma_ptr(priv, &priv->kw);
+
+ iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
+
+ /* free tx queue structure */
+ iwl_legacy_txq_mem(priv);
+}
+
+/**
+ * iwl4965_txq_ctx_alloc - allocate TX queue context
+ * Allocate all Tx DMA structures and initialize them
+ *
+ * @param priv
+ * @return error code
+ */
+int iwl4965_txq_ctx_alloc(struct iwl_priv *priv)
+{
+ int ret;
+ int txq_id, slots_num;
+ unsigned long flags;
+
+ /* Free all tx/cmd queues and keep-warm buffer */
+ iwl4965_hw_txq_ctx_free(priv);
+
+ ret = iwl4965_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
+ priv->hw_params.scd_bc_tbls_size);
+ if (ret) {
+ IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
+ goto error_bc_tbls;
+ }
+ /* Alloc keep-warm buffer */
+ ret = iwl4965_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
+ if (ret) {
+ IWL_ERR(priv, "Keep Warm allocation failed\n");
+ goto error_kw;
+ }
+
+ /* allocate tx queue structure */
+ ret = iwl_legacy_alloc_txq_mem(priv);
+ if (ret)
+ goto error;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Turn off all Tx DMA fifos */
+ iwl4965_txq_set_sched(priv, 0);
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Alloc and init all Tx queues, including the command queue (#4/#9) */
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
+ slots_num = (txq_id == priv->cmd_queue) ?
+ TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ ret = iwl_legacy_tx_queue_init(priv,
+ &priv->txq[txq_id], slots_num,
+ txq_id);
+ if (ret) {
+ IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
+ goto error;
+ }
+ }
+
+ return ret;
+
+ error:
+ iwl4965_hw_txq_ctx_free(priv);
+ iwl4965_free_dma_ptr(priv, &priv->kw);
+ error_kw:
+ iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
+ error_bc_tbls:
+ return ret;
+}
+
+void iwl4965_txq_ctx_reset(struct iwl_priv *priv)
+{
+ int txq_id, slots_num;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Turn off all Tx DMA fifos */
+ iwl4965_txq_set_sched(priv, 0);
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Alloc and init all Tx queues, including the command queue (#4) */
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
+ slots_num = txq_id == priv->cmd_queue ?
+ TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ iwl_legacy_tx_queue_reset(priv, &priv->txq[txq_id],
+ slots_num, txq_id);
+ }
+}
+
+/**
+ * iwl4965_txq_ctx_stop - Stop all Tx DMA channels
+ */
+void iwl4965_txq_ctx_stop(struct iwl_priv *priv)
+{
+ int ch, txq_id;
+ unsigned long flags;
+
+ /* Turn off all Tx DMA fifos */
+ spin_lock_irqsave(&priv->lock, flags);
+
+ iwl4965_txq_set_sched(priv, 0);
+
+ /* Stop each Tx DMA channel, and wait for it to be idle */
+ for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
+ iwl_legacy_write_direct32(priv,
+ FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
+ if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
+ FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
+ 1000))
+ IWL_ERR(priv, "Failing on timeout while stopping"
+ " DMA channel %d [0x%08x]", ch,
+ iwl_legacy_read_direct32(priv,
+ FH_TSSR_TX_STATUS_REG));
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (!priv->txq)
+ return;
+
+ /* Unmap DMA from host system and free skb's */
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
+ if (txq_id == priv->cmd_queue)
+ iwl_legacy_cmd_queue_unmap(priv);
+ else
+ iwl_legacy_tx_queue_unmap(priv, txq_id);
+}
+
+/*
+ * Find first available (lowest unused) Tx Queue, mark it "active".
+ * Called only when finding queue for aggregation.
+ * Should never return anything < 7, because they should already
+ * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
+ */
+static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv)
+{
+ int txq_id;
+
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
+ if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
+ return txq_id;
+ return -1;
+}
+
+/**
+ * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
+ */
+static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
+ u16 txq_id)
+{
+ /* Simply stop the queue, but don't change any configuration;
+ * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
+ iwl_legacy_write_prph(priv,
+ IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
+ (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
+ (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
+}
+
+/**
+ * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
+ */
+static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
+ u16 txq_id)
+{
+ u32 tbl_dw_addr;
+ u32 tbl_dw;
+ u16 scd_q2ratid;
+
+ scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
+
+ tbl_dw_addr = priv->scd_base_addr +
+ IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
+
+ tbl_dw = iwl_legacy_read_targ_mem(priv, tbl_dw_addr);
+
+ if (txq_id & 0x1)
+ tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
+ else
+ tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
+
+ iwl_legacy_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
+
+ return 0;
+}
+
+/**
+ * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
+ *
+ * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
+ * i.e. it must be one of the higher queues used for aggregation
+ */
+static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
+ int tx_fifo, int sta_id, int tid, u16 ssn_idx)
+{
+ unsigned long flags;
+ u16 ra_tid;
+ int ret;
+
+ if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
+ (IWL49_FIRST_AMPDU_QUEUE +
+ priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
+ IWL_WARN(priv,
+ "queue number out of range: %d, must be %d to %d\n",
+ txq_id, IWL49_FIRST_AMPDU_QUEUE,
+ IWL49_FIRST_AMPDU_QUEUE +
+ priv->cfg->base_params->num_of_ampdu_queues - 1);
+ return -EINVAL;
+ }
+
+ ra_tid = BUILD_RAxTID(sta_id, tid);
+
+ /* Modify device's station table to Tx this TID */
+ ret = iwl4965_sta_tx_modify_enable_tid(priv, sta_id, tid);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Stop this Tx queue before configuring it */
+ iwl4965_tx_queue_stop_scheduler(priv, txq_id);
+
+ /* Map receiver-address / traffic-ID to this queue */
+ iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
+
+ /* Set this queue as a chain-building queue */
+ iwl_legacy_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
+
+ /* Place first TFD at index corresponding to start sequence number.
+ * Assumes that ssn_idx is valid (!= 0xFFF) */
+ priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
+ priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
+ iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
+
+ /* Set up Tx window size and frame limit for this queue */
+ iwl_legacy_write_targ_mem(priv,
+ priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
+ (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
+ IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
+
+ iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
+ IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
+ (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
+ & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
+
+ iwl_legacy_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
+
+ /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
+ iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+
+int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+{
+ int sta_id;
+ int tx_fifo;
+ int txq_id;
+ int ret;
+ unsigned long flags;
+ struct iwl_tid_data *tid_data;
+
+ tx_fifo = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
+ if (unlikely(tx_fifo < 0))
+ return tx_fifo;
+
+ IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
+ __func__, sta->addr, tid);
+
+ sta_id = iwl_legacy_sta_id(sta);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Start AGG on invalid station\n");
+ return -ENXIO;
+ }
+ if (unlikely(tid >= MAX_TID_COUNT))
+ return -EINVAL;
+
+ if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
+ IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
+ return -ENXIO;
+ }
+
+ txq_id = iwl4965_txq_ctx_activate_free(priv);
+ if (txq_id == -1) {
+ IWL_ERR(priv, "No free aggregation queue available\n");
+ return -ENXIO;
+ }
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ tid_data = &priv->stations[sta_id].tid[tid];
+ *ssn = SEQ_TO_SN(tid_data->seq_number);
+ tid_data->agg.txq_id = txq_id;
+ iwl_legacy_set_swq_id(&priv->txq[txq_id],
+ iwl4965_get_ac_from_tid(tid), txq_id);
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ ret = iwl4965_txq_agg_enable(priv, txq_id, tx_fifo,
+ sta_id, tid, *ssn);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ tid_data = &priv->stations[sta_id].tid[tid];
+ if (tid_data->tfds_in_queue == 0) {
+ IWL_DEBUG_HT(priv, "HW queue is empty\n");
+ tid_data->agg.state = IWL_AGG_ON;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ } else {
+ IWL_DEBUG_HT(priv,
+ "HW queue is NOT empty: %d packets in HW queue\n",
+ tid_data->tfds_in_queue);
+ tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
+ }
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+ return ret;
+}
+
+/**
+ * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
+ * priv->lock must be held by the caller
+ */
+static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
+ u16 ssn_idx, u8 tx_fifo)
+{
+ if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
+ (IWL49_FIRST_AMPDU_QUEUE +
+ priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
+ IWL_WARN(priv,
+ "queue number out of range: %d, must be %d to %d\n",
+ txq_id, IWL49_FIRST_AMPDU_QUEUE,
+ IWL49_FIRST_AMPDU_QUEUE +
+ priv->cfg->base_params->num_of_ampdu_queues - 1);
+ return -EINVAL;
+ }
+
+ iwl4965_tx_queue_stop_scheduler(priv, txq_id);
+
+ iwl_legacy_clear_bits_prph(priv,
+ IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
+
+ priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
+ priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
+ /* supposes that ssn_idx is valid (!= 0xFFF) */
+ iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
+
+ iwl_legacy_clear_bits_prph(priv,
+ IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
+ iwl_txq_ctx_deactivate(priv, txq_id);
+ iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
+
+ return 0;
+}
+
+int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid)
+{
+ int tx_fifo_id, txq_id, sta_id, ssn;
+ struct iwl_tid_data *tid_data;
+ int write_ptr, read_ptr;
+ unsigned long flags;
+
+ tx_fifo_id = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
+ if (unlikely(tx_fifo_id < 0))
+ return tx_fifo_id;
+
+ sta_id = iwl_legacy_sta_id(sta);
+
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
+ return -ENXIO;
+ }
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+
+ tid_data = &priv->stations[sta_id].tid[tid];
+ ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
+ txq_id = tid_data->agg.txq_id;
+
+ switch (priv->stations[sta_id].tid[tid].agg.state) {
+ case IWL_EMPTYING_HW_QUEUE_ADDBA:
+ /*
+ * This can happen if the peer stops aggregation
+ * again before we've had a chance to drain the
+ * queue we selected previously, i.e. before the
+ * session was really started completely.
+ */
+ IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
+ goto turn_off;
+ case IWL_AGG_ON:
+ break;
+ default:
+ IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
+ }
+
+ write_ptr = priv->txq[txq_id].q.write_ptr;
+ read_ptr = priv->txq[txq_id].q.read_ptr;
+
+ /* The queue is not empty */
+ if (write_ptr != read_ptr) {
+ IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
+ priv->stations[sta_id].tid[tid].agg.state =
+ IWL_EMPTYING_HW_QUEUE_DELBA;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+ return 0;
+ }
+
+ IWL_DEBUG_HT(priv, "HW queue is empty\n");
+ turn_off:
+ priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
+
+ /* do not restore/save irqs */
+ spin_unlock(&priv->sta_lock);
+ spin_lock(&priv->lock);
+
+ /*
+ * the only reason this call can fail is queue number out of range,
+ * which can happen if uCode is reloaded and all the station
+ * information are lost. if it is outside the range, there is no need
+ * to deactivate the uCode queue, just return "success" to allow
+ * mac80211 to clean up it own data.
+ */
+ iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo_id);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+
+ return 0;
+}
+
+int iwl4965_txq_check_empty(struct iwl_priv *priv,
+ int sta_id, u8 tid, int txq_id)
+{
+ struct iwl_queue *q = &priv->txq[txq_id].q;
+ u8 *addr = priv->stations[sta_id].sta.sta.addr;
+ struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
+ struct iwl_rxon_context *ctx;
+
+ ctx = &priv->contexts[priv->stations[sta_id].ctxid];
+
+ lockdep_assert_held(&priv->sta_lock);
+
+ switch (priv->stations[sta_id].tid[tid].agg.state) {
+ case IWL_EMPTYING_HW_QUEUE_DELBA:
+ /* We are reclaiming the last packet of the */
+ /* aggregated HW queue */
+ if ((txq_id == tid_data->agg.txq_id) &&
+ (q->read_ptr == q->write_ptr)) {
+ u16 ssn = SEQ_TO_SN(tid_data->seq_number);
+ int tx_fifo = iwl4965_get_fifo_from_tid(ctx, tid);
+ IWL_DEBUG_HT(priv,
+ "HW queue empty: continue DELBA flow\n");
+ iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo);
+ tid_data->agg.state = IWL_AGG_OFF;
+ ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
+ }
+ break;
+ case IWL_EMPTYING_HW_QUEUE_ADDBA:
+ /* We are reclaiming the last packet of the queue */
+ if (tid_data->tfds_in_queue == 0) {
+ IWL_DEBUG_HT(priv,
+ "HW queue empty: continue ADDBA flow\n");
+ tid_data->agg.state = IWL_AGG_ON;
+ ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static void iwl4965_non_agg_tx_status(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ const u8 *addr1)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_station_priv *sta_priv;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(ctx->vif, addr1);
+ if (sta) {
+ sta_priv = (void *)sta->drv_priv;
+ /* avoid atomic ops if this isn't a client */
+ if (sta_priv->client &&
+ atomic_dec_return(&sta_priv->pending_frames) == 0)
+ ieee80211_sta_block_awake(priv->hw, sta, false);
+ }
+ rcu_read_unlock();
+}
+
+static void
+iwl4965_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info,
+ bool is_agg)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
+
+ if (!is_agg)
+ iwl4965_non_agg_tx_status(priv, tx_info->ctx, hdr->addr1);
+
+ ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
+}
+
+int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
+{
+ struct iwl_tx_queue *txq = &priv->txq[txq_id];
+ struct iwl_queue *q = &txq->q;
+ struct iwl_tx_info *tx_info;
+ int nfreed = 0;
+ struct ieee80211_hdr *hdr;
+
+ if ((index >= q->n_bd) || (iwl_legacy_queue_used(q, index) == 0)) {
+ IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
+ "is out of range [0-%d] %d %d.\n", txq_id,
+ index, q->n_bd, q->write_ptr, q->read_ptr);
+ return 0;
+ }
+
+ for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
+ q->read_ptr != index;
+ q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+ tx_info = &txq->txb[txq->q.read_ptr];
+ iwl4965_tx_status(priv, tx_info,
+ txq_id >= IWL4965_FIRST_AMPDU_QUEUE);
+
+ hdr = (struct ieee80211_hdr *)tx_info->skb->data;
+ if (hdr && ieee80211_is_data_qos(hdr->frame_control))
+ nfreed++;
+ tx_info->skb = NULL;
+
+ priv->cfg->ops->lib->txq_free_tfd(priv, txq);
+ }
+ return nfreed;
+}
+
+/**
+ * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
+ *
+ * Go through block-ack's bitmap of ACK'd frames, update driver's record of
+ * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
+ */
+static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
+ struct iwl_ht_agg *agg,
+ struct iwl_compressed_ba_resp *ba_resp)
+
+{
+ int i, sh, ack;
+ u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
+ u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
+ int successes = 0;
+ struct ieee80211_tx_info *info;
+ u64 bitmap, sent_bitmap;
+
+ if (unlikely(!agg->wait_for_ba)) {
+ if (unlikely(ba_resp->bitmap))
+ IWL_ERR(priv, "Received BA when not expected\n");
+ return -EINVAL;
+ }
+
+ /* Mark that the expected block-ack response arrived */
+ agg->wait_for_ba = 0;
+ IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx,
+ ba_resp->seq_ctl);
+
+ /* Calculate shift to align block-ack bits with our Tx window bits */
+ sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
+ if (sh < 0) /* tbw something is wrong with indices */
+ sh += 0x100;
+
+ if (agg->frame_count > (64 - sh)) {
+ IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
+ return -1;
+ }
+
+ /* don't use 64-bit values for now */
+ bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
+
+ /* check for success or failure according to the
+ * transmitted bitmap and block-ack bitmap */
+ sent_bitmap = bitmap & agg->bitmap;
+
+ /* For each frame attempted in aggregation,
+ * update driver's record of tx frame's status. */
+ i = 0;
+ while (sent_bitmap) {
+ ack = sent_bitmap & 1ULL;
+ successes += ack;
+ IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
+ ack ? "ACK" : "NACK", i,
+ (agg->start_idx + i) & 0xff,
+ agg->start_idx + i);
+ sent_bitmap >>= 1;
+ ++i;
+ }
+
+ IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n",
+ (unsigned long long)bitmap);
+
+ info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
+ memset(&info->status, 0, sizeof(info->status));
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->flags |= IEEE80211_TX_STAT_AMPDU;
+ info->status.ampdu_ack_len = successes;
+ info->status.ampdu_len = agg->frame_count;
+ iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
+
+ return 0;
+}
+
+/**
+ * translate ucode response to mac80211 tx status control values
+ */
+void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
+ struct ieee80211_tx_info *info)
+{
+ struct ieee80211_tx_rate *r = &info->control.rates[0];
+
+ info->antenna_sel_tx =
+ ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
+ if (rate_n_flags & RATE_MCS_HT_MSK)
+ r->flags |= IEEE80211_TX_RC_MCS;
+ if (rate_n_flags & RATE_MCS_GF_MSK)
+ r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+ if (rate_n_flags & RATE_MCS_HT40_MSK)
+ r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ if (rate_n_flags & RATE_MCS_DUP_MSK)
+ r->flags |= IEEE80211_TX_RC_DUP_DATA;
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ r->flags |= IEEE80211_TX_RC_SHORT_GI;
+ r->idx = iwl4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
+}
+
+/**
+ * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
+ *
+ * Handles block-acknowledge notification from device, which reports success
+ * of frames sent via aggregation.
+ */
+void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
+ struct iwl_tx_queue *txq = NULL;
+ struct iwl_ht_agg *agg;
+ int index;
+ int sta_id;
+ int tid;
+ unsigned long flags;
+
+ /* "flow" corresponds to Tx queue */
+ u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
+
+ /* "ssn" is start of block-ack Tx window, corresponds to index
+ * (in Tx queue's circular buffer) of first TFD/frame in window */
+ u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
+
+ if (scd_flow >= priv->hw_params.max_txq_num) {
+ IWL_ERR(priv,
+ "BUG_ON scd_flow is bigger than number of queues\n");
+ return;
+ }
+
+ txq = &priv->txq[scd_flow];
+ sta_id = ba_resp->sta_id;
+ tid = ba_resp->tid;
+ agg = &priv->stations[sta_id].tid[tid].agg;
+ if (unlikely(agg->txq_id != scd_flow)) {
+ /*
+ * FIXME: this is a uCode bug which need to be addressed,
+ * log the information and return for now!
+ * since it is possible happen very often and in order
+ * not to fill the syslog, don't enable the logging by default
+ */
+ IWL_DEBUG_TX_REPLY(priv,
+ "BA scd_flow %d does not match txq_id %d\n",
+ scd_flow, agg->txq_id);
+ return;
+ }
+
+ /* Find index just before block-ack window */
+ index = iwl_legacy_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+
+ IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
+ "sta_id = %d\n",
+ agg->wait_for_ba,
+ (u8 *) &ba_resp->sta_addr_lo32,
+ ba_resp->sta_id);
+ IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx,"
+ "scd_flow = "
+ "%d, scd_ssn = %d\n",
+ ba_resp->tid,
+ ba_resp->seq_ctl,
+ (unsigned long long)le64_to_cpu(ba_resp->bitmap),
+ ba_resp->scd_flow,
+ ba_resp->scd_ssn);
+ IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n",
+ agg->start_idx,
+ (unsigned long long)agg->bitmap);
+
+ /* Update driver's record of ACK vs. not for each frame in window */
+ iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
+
+ /* Release all TFDs before the SSN, i.e. all TFDs in front of
+ * block-ack window (we assume that they've been successfully
+ * transmitted ... if not, it's too late anyway). */
+ if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
+ /* calculate mac80211 ampdu sw queue to wake */
+ int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index);
+ iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
+
+ if ((iwl_legacy_queue_space(&txq->q) > txq->q.low_mark) &&
+ priv->mac80211_registered &&
+ (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
+ iwl_legacy_wake_queue(priv, txq);
+
+ iwl4965_txq_check_empty(priv, sta_id, tid, scd_flow);
+ }
+
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+}
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+const char *iwl4965_get_tx_fail_reason(u32 status)
+{
+#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
+#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
+
+ switch (status & TX_STATUS_MSK) {
+ case TX_STATUS_SUCCESS:
+ return "SUCCESS";
+ TX_STATUS_POSTPONE(DELAY);
+ TX_STATUS_POSTPONE(FEW_BYTES);
+ TX_STATUS_POSTPONE(QUIET_PERIOD);
+ TX_STATUS_POSTPONE(CALC_TTAK);
+ TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
+ TX_STATUS_FAIL(SHORT_LIMIT);
+ TX_STATUS_FAIL(LONG_LIMIT);
+ TX_STATUS_FAIL(FIFO_UNDERRUN);
+ TX_STATUS_FAIL(DRAIN_FLOW);
+ TX_STATUS_FAIL(RFKILL_FLUSH);
+ TX_STATUS_FAIL(LIFE_EXPIRE);
+ TX_STATUS_FAIL(DEST_PS);
+ TX_STATUS_FAIL(HOST_ABORTED);
+ TX_STATUS_FAIL(BT_RETRY);
+ TX_STATUS_FAIL(STA_INVALID);
+ TX_STATUS_FAIL(FRAG_DROPPED);
+ TX_STATUS_FAIL(TID_DISABLE);
+ TX_STATUS_FAIL(FIFO_FLUSHED);
+ TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
+ TX_STATUS_FAIL(PASSIVE_NO_RX);
+ TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
+ }
+
+ return "UNKNOWN";
+
+#undef TX_STATUS_FAIL
+#undef TX_STATUS_POSTPONE
+}
+#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c b/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
new file mode 100644
index 000000000000..001d148feb94
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
@@ -0,0 +1,166 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+#include "iwl-4965-hw.h"
+#include "iwl-4965.h"
+#include "iwl-4965-calib.h"
+
+#define IWL_AC_UNSET -1
+
+/**
+ * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
+ * using sample data 100 bytes apart. If these sample points are good,
+ * it's a pretty good bet that everything between them is good, too.
+ */
+static int
+iwl4965_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
+{
+ u32 val;
+ int ret = 0;
+ u32 errcnt = 0;
+ u32 i;
+
+ IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
+
+ for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
+ /* read data comes through single port, auto-incr addr */
+ /* NOTE: Use the debugless read so we don't flood kernel log
+ * if IWL_DL_IO is set */
+ iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
+ i + IWL4965_RTC_INST_LOWER_BOUND);
+ val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ if (val != le32_to_cpu(*image)) {
+ ret = -EIO;
+ errcnt++;
+ if (errcnt >= 3)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host,
+ * looking at all data.
+ */
+static int iwl4965_verify_inst_full(struct iwl_priv *priv, __le32 *image,
+ u32 len)
+{
+ u32 val;
+ u32 save_len = len;
+ int ret = 0;
+ u32 errcnt;
+
+ IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
+
+ iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
+ IWL4965_RTC_INST_LOWER_BOUND);
+
+ errcnt = 0;
+ for (; len > 0; len -= sizeof(u32), image++) {
+ /* read data comes through single port, auto-incr addr */
+ /* NOTE: Use the debugless read so we don't flood kernel log
+ * if IWL_DL_IO is set */
+ val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ if (val != le32_to_cpu(*image)) {
+ IWL_ERR(priv, "uCode INST section is invalid at "
+ "offset 0x%x, is 0x%x, s/b 0x%x\n",
+ save_len - len, val, le32_to_cpu(*image));
+ ret = -EIO;
+ errcnt++;
+ if (errcnt >= 20)
+ break;
+ }
+ }
+
+ if (!errcnt)
+ IWL_DEBUG_INFO(priv,
+ "ucode image in INSTRUCTION memory is good\n");
+
+ return ret;
+}
+
+/**
+ * iwl4965_verify_ucode - determine which instruction image is in SRAM,
+ * and verify its contents
+ */
+int iwl4965_verify_ucode(struct iwl_priv *priv)
+{
+ __le32 *image;
+ u32 len;
+ int ret;
+
+ /* Try bootstrap */
+ image = (__le32 *)priv->ucode_boot.v_addr;
+ len = priv->ucode_boot.len;
+ ret = iwl4965_verify_inst_sparse(priv, image, len);
+ if (!ret) {
+ IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
+ return 0;
+ }
+
+ /* Try initialize */
+ image = (__le32 *)priv->ucode_init.v_addr;
+ len = priv->ucode_init.len;
+ ret = iwl4965_verify_inst_sparse(priv, image, len);
+ if (!ret) {
+ IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
+ return 0;
+ }
+
+ /* Try runtime/protocol */
+ image = (__le32 *)priv->ucode_code.v_addr;
+ len = priv->ucode_code.len;
+ ret = iwl4965_verify_inst_sparse(priv, image, len);
+ if (!ret) {
+ IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
+ return 0;
+ }
+
+ IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
+
+ /* Since nothing seems to match, show first several data entries in
+ * instruction SRAM, so maybe visual inspection will give a clue.
+ * Selection of bootstrap image (vs. other images) is arbitrary. */
+ image = (__le32 *)priv->ucode_boot.v_addr;
+ len = priv->ucode_boot.len;
+ ret = iwl4965_verify_inst_full(priv, image, len);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c
index 91a9f5253469..f5433c74b845 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -43,12 +43,11 @@
#include "iwl-core.h"
#include "iwl-io.h"
#include "iwl-helpers.h"
-#include "iwl-agn-calib.h"
+#include "iwl-4965-calib.h"
#include "iwl-sta.h"
-#include "iwl-agn-led.h"
-#include "iwl-agn.h"
-#include "iwl-agn-debugfs.h"
-#include "iwl-legacy.h"
+#include "iwl-4965-led.h"
+#include "iwl-4965.h"
+#include "iwl-4965-debugfs.h"
static int iwl4965_send_tx_power(struct iwl_priv *priv);
static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
@@ -74,11 +73,11 @@ static int iwl4965_verify_bsm(struct iwl_priv *priv)
IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
/* verify BSM SRAM contents */
- val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG);
+ val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
for (reg = BSM_SRAM_LOWER_BOUND;
reg < BSM_SRAM_LOWER_BOUND + len;
reg += sizeof(u32), image++) {
- val = iwl_read_prph(priv, reg);
+ val = iwl_legacy_read_prph(priv, reg);
if (val != le32_to_cpu(*image)) {
IWL_ERR(priv, "BSM uCode verification failed at "
"addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
@@ -158,33 +157,34 @@ static int iwl4965_load_bsm(struct iwl_priv *priv)
inst_len = priv->ucode_init.len;
data_len = priv->ucode_init_data.len;
- iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
- iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
- iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
- iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
+ iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
+ iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
+ iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
+ iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
/* Fill BSM memory with bootstrap instructions */
for (reg_offset = BSM_SRAM_LOWER_BOUND;
reg_offset < BSM_SRAM_LOWER_BOUND + len;
reg_offset += sizeof(u32), image++)
- _iwl_write_prph(priv, reg_offset, le32_to_cpu(*image));
+ _iwl_legacy_write_prph(priv, reg_offset, le32_to_cpu(*image));
ret = iwl4965_verify_bsm(priv);
if (ret)
return ret;
/* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
- iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
- iwl_write_prph(priv, BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND);
- iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
+ iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
+ iwl_legacy_write_prph(priv,
+ BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND);
+ iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
/* Load bootstrap code into instruction SRAM now,
* to prepare to load "initialize" uCode */
- iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
+ iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
/* Wait for load of bootstrap uCode to finish */
for (i = 0; i < 100; i++) {
- done = iwl_read_prph(priv, BSM_WR_CTRL_REG);
+ done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
if (!(done & BSM_WR_CTRL_REG_BIT_START))
break;
udelay(10);
@@ -198,7 +198,8 @@ static int iwl4965_load_bsm(struct iwl_priv *priv)
/* Enable future boot loads whenever power management unit triggers it
* (e.g. when powering back up after power-save shutdown) */
- iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
+ iwl_legacy_write_prph(priv,
+ BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
return 0;
@@ -224,14 +225,14 @@ static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
pdata = priv->ucode_data_backup.p_addr >> 4;
/* Tell bootstrap uCode where to find image to load */
- iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
- iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
- iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
+ iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
+ iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
+ iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
priv->ucode_data.len);
/* Inst byte count must be last to set up, bit 31 signals uCode
* that all new ptr/size info is in place */
- iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
+ iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
priv->ucode_code.len | BSM_DRAM_INST_LOAD);
IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
@@ -251,18 +252,10 @@ static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
*/
static void iwl4965_init_alive_start(struct iwl_priv *priv)
{
- /* Check alive response for "valid" sign from uCode */
- if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
- /* We had an error bringing up the hardware, so take it
- * all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
- goto restart;
- }
-
/* Bootstrap uCode has loaded initialize uCode ... verify inst image.
* This is a paranoid check, because we would not have gotten the
* "initialize" alive if code weren't properly loaded. */
- if (iwl_verify_ucode(priv)) {
+ if (iwl4965_verify_ucode(priv)) {
/* Runtime instruction load was bad;
* take it all the way back down so we can try again */
IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
@@ -288,7 +281,7 @@ restart:
queue_work(priv->workqueue, &priv->restart);
}
-static bool is_ht40_channel(__le32 rxon_flags)
+static bool iw4965_is_ht40_channel(__le32 rxon_flags)
{
int chan_mod = le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK)
>> RXON_FLG_CHANNEL_MODE_POS;
@@ -296,23 +289,6 @@ static bool is_ht40_channel(__le32 rxon_flags)
(chan_mod == CHANNEL_MODE_MIXED));
}
-/*
- * EEPROM handlers
- */
-static u16 iwl4965_eeprom_calib_version(struct iwl_priv *priv)
-{
- return iwl_eeprom_query16(priv, EEPROM_4965_CALIB_VERSION_OFFSET);
-}
-
-/*
- * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
- * must be called under priv->lock and mac access
- */
-static void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
-{
- iwl_write_prph(priv, IWL49_SCD_TXFACT, mask);
-}
-
static void iwl4965_nic_config(struct iwl_priv *priv)
{
unsigned long flags;
@@ -320,22 +296,23 @@ static void iwl4965_nic_config(struct iwl_priv *priv)
spin_lock_irqsave(&priv->lock, flags);
- radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
+ radio_cfg = iwl_legacy_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
/* write radio config values to register */
if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
EEPROM_RF_CFG_DASH_MSK(radio_cfg));
/* set CSR_HW_CONFIG_REG for uCode use */
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
priv->calib_info = (struct iwl_eeprom_calib_info *)
- iwl_eeprom_query_addr(priv, EEPROM_4965_CALIB_TXPOWER_OFFSET);
+ iwl_legacy_eeprom_query_addr(priv,
+ EEPROM_4965_CALIB_TXPOWER_OFFSET);
spin_unlock_irqrestore(&priv->lock, flags);
}
@@ -348,7 +325,7 @@ static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
- iwl_is_any_associated(priv)) {
+ iwl_legacy_is_any_associated(priv)) {
struct iwl_calib_diff_gain_cmd cmd;
/* clear data for chain noise calibration algorithm */
@@ -365,7 +342,7 @@ static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
cmd.diff_gain_a = 0;
cmd.diff_gain_b = 0;
cmd.diff_gain_c = 0;
- if (iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
+ if (iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
sizeof(cmd), &cmd))
IWL_ERR(priv,
"Could not send REPLY_PHY_CALIBRATION_CMD\n");
@@ -374,237 +351,6 @@ static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
}
}
-static void iwl4965_gain_computation(struct iwl_priv *priv,
- u32 *average_noise,
- u16 min_average_noise_antenna_i,
- u32 min_average_noise,
- u8 default_chain)
-{
- int i, ret;
- struct iwl_chain_noise_data *data = &priv->chain_noise_data;
-
- data->delta_gain_code[min_average_noise_antenna_i] = 0;
-
- for (i = default_chain; i < NUM_RX_CHAINS; i++) {
- s32 delta_g = 0;
-
- if (!(data->disconn_array[i]) &&
- (data->delta_gain_code[i] ==
- CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
- delta_g = average_noise[i] - min_average_noise;
- data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
- data->delta_gain_code[i] =
- min(data->delta_gain_code[i],
- (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
-
- data->delta_gain_code[i] =
- (data->delta_gain_code[i] | (1 << 2));
- } else {
- data->delta_gain_code[i] = 0;
- }
- }
- IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n",
- data->delta_gain_code[0],
- data->delta_gain_code[1],
- data->delta_gain_code[2]);
-
- /* Differential gain gets sent to uCode only once */
- if (!data->radio_write) {
- struct iwl_calib_diff_gain_cmd cmd;
- data->radio_write = 1;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
- cmd.diff_gain_a = data->delta_gain_code[0];
- cmd.diff_gain_b = data->delta_gain_code[1];
- cmd.diff_gain_c = data->delta_gain_code[2];
- ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
- sizeof(cmd), &cmd);
- if (ret)
- IWL_DEBUG_CALIB(priv, "fail sending cmd "
- "REPLY_PHY_CALIBRATION_CMD\n");
-
- /* TODO we might want recalculate
- * rx_chain in rxon cmd */
-
- /* Mark so we run this algo only once! */
- data->state = IWL_CHAIN_NOISE_CALIBRATED;
- }
-}
-
-static void iwl4965_bg_txpower_work(struct work_struct *work)
-{
- struct iwl_priv *priv = container_of(work, struct iwl_priv,
- txpower_work);
-
- /* If a scan happened to start before we got here
- * then just return; the statistics notification will
- * kick off another scheduled work to compensate for
- * any temperature delta we missed here. */
- if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
- test_bit(STATUS_SCANNING, &priv->status))
- return;
-
- mutex_lock(&priv->mutex);
-
- /* Regardless of if we are associated, we must reconfigure the
- * TX power since frames can be sent on non-radar channels while
- * not associated */
- iwl4965_send_tx_power(priv);
-
- /* Update last_temperature to keep is_calib_needed from running
- * when it isn't needed... */
- priv->last_temperature = priv->temperature;
-
- mutex_unlock(&priv->mutex);
-}
-
-/*
- * Acquire priv->lock before calling this function !
- */
-static void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
-{
- iwl_write_direct32(priv, HBUS_TARG_WRPTR,
- (index & 0xff) | (txq_id << 8));
- iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
-}
-
-/**
- * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
- * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
- * @scd_retry: (1) Indicates queue will be used in aggregation mode
- *
- * NOTE: Acquire priv->lock before calling this function !
- */
-static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- int tx_fifo_id, int scd_retry)
-{
- int txq_id = txq->q.id;
-
- /* Find out whether to activate Tx queue */
- int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
-
- /* Set up and activate */
- iwl_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
- (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
- (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
- (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
- (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
- IWL49_SCD_QUEUE_STTS_REG_MSK);
-
- txq->sched_retry = scd_retry;
-
- IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
- active ? "Activate" : "Deactivate",
- scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
-}
-
-static const s8 default_queue_to_tx_fifo[] = {
- IWL_TX_FIFO_VO,
- IWL_TX_FIFO_VI,
- IWL_TX_FIFO_BE,
- IWL_TX_FIFO_BK,
- IWL49_CMD_FIFO_NUM,
- IWL_TX_FIFO_UNUSED,
- IWL_TX_FIFO_UNUSED,
-};
-
-static int iwl4965_alive_notify(struct iwl_priv *priv)
-{
- u32 a;
- unsigned long flags;
- int i, chan;
- u32 reg_val;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Clear 4965's internal Tx Scheduler data base */
- priv->scd_base_addr = iwl_read_prph(priv, IWL49_SCD_SRAM_BASE_ADDR);
- a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
- for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
- iwl_write_targ_mem(priv, a, 0);
- for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
- iwl_write_targ_mem(priv, a, 0);
- for (; a < priv->scd_base_addr +
- IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
- iwl_write_targ_mem(priv, a, 0);
-
- /* Tel 4965 where to find Tx byte count tables */
- iwl_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
- priv->scd_bc_tbls.dma >> 10);
-
- /* Enable DMA channel */
- for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
- iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
-
- /* Update FH chicken bits */
- reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
- iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
- reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
-
- /* Disable chain mode for all queues */
- iwl_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
-
- /* Initialize each Tx queue (including the command queue) */
- for (i = 0; i < priv->hw_params.max_txq_num; i++) {
-
- /* TFD circular buffer read/write indexes */
- iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
- iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
-
- /* Max Tx Window size for Scheduler-ACK mode */
- iwl_write_targ_mem(priv, priv->scd_base_addr +
- IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
- (SCD_WIN_SIZE <<
- IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
- IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
-
- /* Frame limit */
- iwl_write_targ_mem(priv, priv->scd_base_addr +
- IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
- sizeof(u32),
- (SCD_FRAME_LIMIT <<
- IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
- IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
-
- }
- iwl_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
- (1 << priv->hw_params.max_txq_num) - 1);
-
- /* Activate all Tx DMA/FIFO channels */
- priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 6));
-
- iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0);
-
- /* make sure all queue are not stopped */
- memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
- for (i = 0; i < 4; i++)
- atomic_set(&priv->queue_stop_count[i], 0);
-
- /* reset to 0 to enable all the queue first */
- priv->txq_ctx_active_msk = 0;
- /* Map each Tx/cmd queue to its corresponding fifo */
- BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
-
- for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
- int ac = default_queue_to_tx_fifo[i];
-
- iwl_txq_ctx_activate(priv, i);
-
- if (ac == IWL_TX_FIFO_UNUSED)
- continue;
-
- iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
- }
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
-}
-
static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
.min_nrg_cck = 97,
.max_nrg_cck = 0, /* not used, set to 0 */
@@ -666,15 +412,15 @@ static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
- priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
- priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
+ priv->hw_params.tx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_tx_ant);
+ priv->hw_params.rx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_rx_ant);
priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
iwl4965_set_ct_threshold(priv);
priv->hw_params.sens = &iwl4965_sensitivity;
- priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
+ priv->hw_params.beacon_time_tsf_bits = IWL4965_EXT_BEACON_TIME_POS;
return 0;
}
@@ -1158,9 +904,9 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
IWL_DEBUG_TXPOWER(priv, "chan %d band %d is_ht40 %d\n", channel, band,
is_ht40);
- ch_info = iwl_get_channel_info(priv, priv->band, channel);
+ ch_info = iwl_legacy_get_channel_info(priv, priv->band, channel);
- if (!is_channel_valid(ch_info))
+ if (!iwl_legacy_is_channel_valid(ch_info))
return -EINVAL;
/* get txatten group, used to select 1) thermal txpower adjustment
@@ -1384,7 +1130,7 @@ static int iwl4965_send_tx_power(struct iwl_priv *priv)
band = priv->band == IEEE80211_BAND_2GHZ;
- is_ht40 = is_ht40_channel(ctx->active.flags);
+ is_ht40 = iw4965_is_ht40_channel(ctx->active.flags);
if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
ctrl_chan_high = 1;
@@ -1398,7 +1144,8 @@ static int iwl4965_send_tx_power(struct iwl_priv *priv)
if (ret)
goto out;
- ret = iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
+ ret = iwl_legacy_send_cmd_pdu(priv,
+ REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
out:
return ret;
@@ -1409,8 +1156,8 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
{
int ret = 0;
struct iwl4965_rxon_assoc_cmd rxon_assoc;
- const struct iwl_rxon_cmd *rxon1 = &ctx->staging;
- const struct iwl_rxon_cmd *rxon2 = &ctx->active;
+ const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
+ const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
if ((rxon1->flags == rxon2->flags) &&
(rxon1->filter_flags == rxon2->filter_flags) &&
@@ -1436,7 +1183,7 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
ctx->staging.ofdm_ht_dual_stream_basic_rates;
rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
- ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
+ ret = iwl_legacy_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
sizeof(rxon_assoc), &rxon_assoc, NULL);
if (ret)
return ret;
@@ -1447,12 +1194,12 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
{
/* cast away the const for active_rxon in this function */
- struct iwl_rxon_cmd *active_rxon = (void *)&ctx->active;
+ struct iwl_legacy_rxon_cmd *active_rxon = (void *)&ctx->active;
int ret;
bool new_assoc =
!!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
- if (!iwl_is_alive(priv))
+ if (!iwl_legacy_is_alive(priv))
return -EBUSY;
if (!ctx->is_active)
@@ -1461,7 +1208,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
/* always get timestamp with Rx frame */
ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
- ret = iwl_check_rxon_cmd(priv, ctx);
+ ret = iwl_legacy_check_rxon_cmd(priv, ctx);
if (ret) {
IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
return -EINVAL;
@@ -1475,21 +1222,21 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
(priv->switch_rxon.channel != ctx->staging.channel)) {
IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
le16_to_cpu(priv->switch_rxon.channel));
- iwl_chswitch_done(priv, false);
+ iwl_legacy_chswitch_done(priv, false);
}
/* If we don't need to send a full RXON, we can use
* iwl_rxon_assoc_cmd which is used to reconfigure filter
* and other flags for the current radio configuration. */
- if (!iwl_full_rxon_required(priv, ctx)) {
- ret = iwl_send_rxon_assoc(priv, ctx);
+ if (!iwl_legacy_full_rxon_required(priv, ctx)) {
+ ret = iwl_legacy_send_rxon_assoc(priv, ctx);
if (ret) {
IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
return ret;
}
memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
- iwl_print_rx_config_cmd(priv, ctx);
+ iwl_legacy_print_rx_config_cmd(priv, ctx);
return 0;
}
@@ -1497,12 +1244,12 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
* an RXON_ASSOC and the new config wants the associated mask enabled,
* we must clear the associated from the active configuration
* before we apply the new config */
- if (iwl_is_associated_ctx(ctx) && new_assoc) {
+ if (iwl_legacy_is_associated_ctx(ctx) && new_assoc) {
IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
- sizeof(struct iwl_rxon_cmd),
+ ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
+ sizeof(struct iwl_legacy_rxon_cmd),
active_rxon);
/* If the mask clearing failed then we set
@@ -1512,9 +1259,9 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
return ret;
}
- iwl_clear_ucode_stations(priv, ctx);
- iwl_restore_stations(priv, ctx);
- ret = iwl_restore_default_wep_keys(priv, ctx);
+ iwl_legacy_clear_ucode_stations(priv, ctx);
+ iwl_legacy_restore_stations(priv, ctx);
+ ret = iwl4965_restore_default_wep_keys(priv, ctx);
if (ret) {
IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
return ret;
@@ -1529,24 +1276,25 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
le16_to_cpu(ctx->staging.channel),
ctx->staging.bssid_addr);
- iwl_set_rxon_hwcrypto(priv, ctx, !priv->cfg->mod_params->sw_crypto);
+ iwl_legacy_set_rxon_hwcrypto(priv, ctx,
+ !priv->cfg->mod_params->sw_crypto);
/* Apply the new configuration
* RXON unassoc clears the station table in uCode so restoration of
* stations is needed after it (the RXON command) completes
*/
if (!new_assoc) {
- ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
- sizeof(struct iwl_rxon_cmd), &ctx->staging);
+ ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
+ sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
if (ret) {
IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
return ret;
}
IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
- iwl_clear_ucode_stations(priv, ctx);
- iwl_restore_stations(priv, ctx);
- ret = iwl_restore_default_wep_keys(priv, ctx);
+ iwl_legacy_clear_ucode_stations(priv, ctx);
+ iwl_legacy_restore_stations(priv, ctx);
+ ret = iwl4965_restore_default_wep_keys(priv, ctx);
if (ret) {
IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
return ret;
@@ -1557,21 +1305,21 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
/* Apply the new configuration
* RXON assoc doesn't clear the station table in uCode,
*/
- ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
- sizeof(struct iwl_rxon_cmd), &ctx->staging);
+ ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
+ sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
if (ret) {
IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
return ret;
}
memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
}
- iwl_print_rx_config_cmd(priv, ctx);
+ iwl_legacy_print_rx_config_cmd(priv, ctx);
- iwl_init_sensitivity(priv);
+ iwl4965_init_sensitivity(priv);
/* If we issue a new RXON command which required a tune then we must
* send a new TXPOWER command or we won't be able to Tx any frames */
- ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
+ ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
if (ret) {
IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
return ret;
@@ -1598,7 +1346,7 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
struct ieee80211_vif *vif = ctx->vif;
band = priv->band == IEEE80211_BAND_2GHZ;
- is_ht40 = is_ht40_channel(ctx->staging.flags);
+ is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags);
if (is_ht40 &&
(ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
@@ -1629,19 +1377,19 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
else {
switch_time_in_usec =
vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
- ucode_switch_time = iwl_usecs_to_beacons(priv,
+ ucode_switch_time = iwl_legacy_usecs_to_beacons(priv,
switch_time_in_usec,
beacon_interval);
- cmd.switch_time = iwl_add_beacon_time(priv,
+ cmd.switch_time = iwl_legacy_add_beacon_time(priv,
priv->ucode_beacon_time,
ucode_switch_time,
beacon_interval);
}
IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
cmd.switch_time);
- ch_info = iwl_get_channel_info(priv, priv->band, ch);
+ ch_info = iwl_legacy_get_channel_info(priv, priv->band, ch);
if (ch_info)
- cmd.expect_beacon = is_channel_radar(ch_info);
+ cmd.expect_beacon = iwl_legacy_is_channel_radar(ch_info);
else {
IWL_ERR(priv, "invalid channel switch from %u to %u\n",
ctx->active.channel, ch);
@@ -1658,7 +1406,8 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
priv->switch_rxon.channel = cmd.channel;
priv->switch_rxon.switch_in_progress = true;
- return iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
+ return iwl_legacy_send_cmd_pdu(priv,
+ REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
}
/**
@@ -1700,7 +1449,7 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
u32 R4;
if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
- (priv->_agn.statistics.flag &
+ (priv->_4965.statistics.flag &
STATISTICS_REPLY_FLG_HT40_MODE_MSK)) {
IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n");
R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
@@ -1725,7 +1474,7 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
if (!test_bit(STATUS_TEMPERATURE, &priv->status))
vt = sign_extend32(R4, 23);
else
- vt = sign_extend32(le32_to_cpu(priv->_agn.statistics.
+ vt = sign_extend32(le32_to_cpu(priv->_4965.statistics.
general.common.temperature), 23);
IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
@@ -1810,7 +1559,6 @@ static void iwl4965_temperature_calib(struct iwl_priv *priv)
}
priv->temperature = temp;
- iwl_tt_handler(priv);
set_bit(STATUS_TEMPERATURE, &priv->status);
if (!priv->disable_tx_power_cal &&
@@ -1819,152 +1567,6 @@ static void iwl4965_temperature_calib(struct iwl_priv *priv)
queue_work(priv->workqueue, &priv->txpower_work);
}
-/**
- * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
- */
-static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
- u16 txq_id)
-{
- /* Simply stop the queue, but don't change any configuration;
- * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
- iwl_write_prph(priv,
- IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
- (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
- (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
-}
-
-/**
- * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
- * priv->lock must be held by the caller
- */
-static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
- u16 ssn_idx, u8 tx_fifo)
-{
- if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
- (IWL49_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
- IWL_WARN(priv,
- "queue number out of range: %d, must be %d to %d\n",
- txq_id, IWL49_FIRST_AMPDU_QUEUE,
- IWL49_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues - 1);
- return -EINVAL;
- }
-
- iwl4965_tx_queue_stop_scheduler(priv, txq_id);
-
- iwl_clear_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
-
- priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
- priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
- /* supposes that ssn_idx is valid (!= 0xFFF) */
- iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
-
- iwl_clear_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
- iwl_txq_ctx_deactivate(priv, txq_id);
- iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
-
- return 0;
-}
-
-/**
- * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
- */
-static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
- u16 txq_id)
-{
- u32 tbl_dw_addr;
- u32 tbl_dw;
- u16 scd_q2ratid;
-
- scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
-
- tbl_dw_addr = priv->scd_base_addr +
- IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
-
- tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
-
- if (txq_id & 0x1)
- tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
- else
- tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
-
- iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
-
- return 0;
-}
-
-
-/**
- * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
- *
- * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
- * i.e. it must be one of the higher queues used for aggregation
- */
-static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
- int tx_fifo, int sta_id, int tid, u16 ssn_idx)
-{
- unsigned long flags;
- u16 ra_tid;
- int ret;
-
- if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
- (IWL49_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
- IWL_WARN(priv,
- "queue number out of range: %d, must be %d to %d\n",
- txq_id, IWL49_FIRST_AMPDU_QUEUE,
- IWL49_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues - 1);
- return -EINVAL;
- }
-
- ra_tid = BUILD_RAxTID(sta_id, tid);
-
- /* Modify device's station table to Tx this TID */
- ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
- if (ret)
- return ret;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Stop this Tx queue before configuring it */
- iwl4965_tx_queue_stop_scheduler(priv, txq_id);
-
- /* Map receiver-address / traffic-ID to this queue */
- iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
-
- /* Set this queue as a chain-building queue */
- iwl_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
-
- /* Place first TFD at index corresponding to start sequence number.
- * Assumes that ssn_idx is valid (!= 0xFFF) */
- priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
- priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
- iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
-
- /* Set up Tx window size and frame limit for this queue */
- iwl_write_targ_mem(priv,
- priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
- (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
- IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
-
- iwl_write_targ_mem(priv, priv->scd_base_addr +
- IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
- (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
- & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
-
- iwl_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
-
- /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
- iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
-}
-
-
static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
{
switch (cmd_id) {
@@ -1975,7 +1577,8 @@ static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
}
}
-static u16 iwl4965_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
+static u16 iwl4965_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
+ u8 *data)
{
struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data;
addsta->mode = cmd->mode;
@@ -2028,16 +1631,14 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
status = le16_to_cpu(frame_status[0].status);
idx = start_idx;
- /* FIXME: code repetition */
IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
agg->frame_count, agg->start_idx, idx);
info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb);
info->status.rates[0].count = tx_resp->failure_frame + 1;
info->flags &= ~IEEE80211_TX_CTL_AMPDU;
- info->flags |= iwl_tx_status_to_mac80211(status);
- iwlagn_hwrate_to_tx_control(priv, rate_n_flags, info);
- /* FIXME: code repetition end */
+ info->flags |= iwl4965_tx_status_to_mac80211(status);
+ iwl4965_hwrate_to_tx_control(priv, rate_n_flags, info);
IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
status & 0xff, tx_resp->failure_frame);
@@ -2064,7 +1665,7 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
agg->frame_count, txq_id, idx);
- hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
+ hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, idx);
if (!hdr) {
IWL_ERR(priv,
"BUG_ON idx doesn't point to valid skb"
@@ -2115,15 +1716,14 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
return 0;
}
-static u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
+static u8 iwl4965_find_station(struct iwl_priv *priv, const u8 *addr)
{
int i;
int start = 0;
int ret = IWL_INVALID_STATION;
unsigned long flags;
- if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) ||
- (priv->iw_mode == NL80211_IFTYPE_AP))
+ if ((priv->iw_mode == NL80211_IFTYPE_ADHOC))
start = IWL_STA_ID;
if (is_broadcast_ether_addr(addr))
@@ -2159,13 +1759,13 @@ static u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
return ret;
}
-static int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
+static int iwl4965_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
{
if (priv->iw_mode == NL80211_IFTYPE_STATION) {
return IWL_AP_ID;
} else {
u8 *da = ieee80211_get_DA(hdr);
- return iwl_find_station(priv, da);
+ return iwl4965_find_station(priv, da);
}
}
@@ -2190,7 +1790,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
u8 *qc = NULL;
unsigned long flags;
- if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
+ if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
"is out of range [0-%d] %d %d\n", txq_id,
index, txq->q.n_bd, txq->q.write_ptr,
@@ -2202,13 +1802,13 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
memset(&info->status, 0, sizeof(info->status));
- hdr = iwl_tx_queue_get_hdr(priv, txq_id, index);
+ hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, index);
if (ieee80211_is_data_qos(hdr->frame_control)) {
qc = ieee80211_get_qos_ctl(hdr);
tid = qc[0] & 0xf;
}
- sta_id = iwl_get_ra_sta_id(priv, hdr);
+ sta_id = iwl4965_get_ra_sta_id(priv, hdr);
if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
IWL_ERR(priv, "Station not known\n");
return;
@@ -2225,114 +1825,89 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
/* check if BAR is needed */
- if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
+ if ((tx_resp->frame_count == 1) && !iwl4965_is_tx_success(status))
info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
if (txq->q.read_ptr != (scd_ssn & 0xff)) {
- index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
+ index = iwl_legacy_queue_dec_wrap(scd_ssn & 0xff,
+ txq->q.n_bd);
IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
"%d index %d\n", scd_ssn , index);
- freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
+ freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
if (qc)
- iwl_free_tfds_in_queue(priv, sta_id,
+ iwl4965_free_tfds_in_queue(priv, sta_id,
tid, freed);
if (priv->mac80211_registered &&
- (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
- (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
- iwl_wake_queue(priv, txq);
+ (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark)
+ && (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
+ iwl_legacy_wake_queue(priv, txq);
}
} else {
info->status.rates[0].count = tx_resp->failure_frame + 1;
- info->flags |= iwl_tx_status_to_mac80211(status);
- iwlagn_hwrate_to_tx_control(priv,
+ info->flags |= iwl4965_tx_status_to_mac80211(status);
+ iwl4965_hwrate_to_tx_control(priv,
le32_to_cpu(tx_resp->rate_n_flags),
info);
IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) "
"rate_n_flags 0x%x retries %d\n",
txq_id,
- iwl_get_tx_fail_reason(status), status,
+ iwl4965_get_tx_fail_reason(status), status,
le32_to_cpu(tx_resp->rate_n_flags),
tx_resp->failure_frame);
- freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
+ freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
if (qc && likely(sta_id != IWL_INVALID_STATION))
- iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
+ iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
else if (sta_id == IWL_INVALID_STATION)
IWL_DEBUG_TX_REPLY(priv, "Station not known\n");
if (priv->mac80211_registered &&
- (iwl_queue_space(&txq->q) > txq->q.low_mark))
- iwl_wake_queue(priv, txq);
+ (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark))
+ iwl_legacy_wake_queue(priv, txq);
}
if (qc && likely(sta_id != IWL_INVALID_STATION))
- iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
+ iwl4965_txq_check_empty(priv, sta_id, tid, txq_id);
- iwl_check_abort_status(priv, tx_resp->frame_count, status);
+ iwl4965_check_abort_status(priv, tx_resp->frame_count, status);
spin_unlock_irqrestore(&priv->sta_lock, flags);
}
-static int iwl4965_calc_rssi(struct iwl_priv *priv,
- struct iwl_rx_phy_res *rx_resp)
+static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
{
- /* data from PHY/DSP regarding signal strength, etc.,
- * contents are always there, not configurable by host. */
- struct iwl4965_rx_non_cfg_phy *ncphy =
- (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
- u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK)
- >> IWL49_AGC_DB_POS;
-
- u32 valid_antennae =
- (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK)
- >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
- u8 max_rssi = 0;
- u32 i;
-
- /* Find max rssi among 3 possible receivers.
- * These values are measured by the digital signal processor (DSP).
- * They should stay fairly constant even as the signal strength varies,
- * if the radio's automatic gain control (AGC) is working right.
- * AGC value (see below) will provide the "interesting" info. */
- for (i = 0; i < 3; i++)
- if (valid_antennae & (1 << i))
- max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
-
- IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
- ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
- max_rssi, agc);
-
- /* dBm = max_rssi dB - agc dB - constant.
- * Higher AGC (higher radio gain) means lower signal. */
- return max_rssi - agc - IWLAGN_RSSI_OFFSET;
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl4965_beacon_notif *beacon = (void *)pkt->u.raw;
+ u8 rate __maybe_unused =
+ iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
+
+ IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
+ "tsf:0x%.8x%.8x rate:%d\n",
+ le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
+ beacon->beacon_notify_hdr.failure_frame,
+ le32_to_cpu(beacon->ibss_mgr_status),
+ le32_to_cpu(beacon->high_tsf),
+ le32_to_cpu(beacon->low_tsf), rate);
+
+ priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
}
-
/* Set up 4965-specific Rx frame reply handlers */
static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
{
/* Legacy Rx frames */
- priv->rx_handlers[REPLY_RX] = iwlagn_rx_reply_rx;
+ priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx;
/* Tx response */
priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
-}
-
-static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
-{
- INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
-}
-
-static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
-{
- cancel_work_sync(&priv->txpower_work);
+ priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
}
static struct iwl_hcmd_ops iwl4965_hcmd = {
.rxon_assoc = iwl4965_send_rxon_assoc,
.commit_rxon = iwl4965_commit_rxon,
- .set_rxon_chain = iwlagn_set_rxon_chain,
- .send_bt_config = iwl_send_bt_config,
+ .set_rxon_chain = iwl4965_set_rxon_chain,
};
static void iwl4965_post_scan(struct iwl_priv *priv)
@@ -2344,7 +1919,7 @@ static void iwl4965_post_scan(struct iwl_priv *priv)
* performing the scan, fire one off if needed
*/
if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
- iwlcore_commit_rxon(priv, ctx);
+ iwl_legacy_commit_rxon(priv, ctx);
}
static void iwl4965_post_associate(struct iwl_priv *priv)
@@ -2357,29 +1932,24 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
if (!vif || !priv->is_open)
return;
- if (vif->type == NL80211_IFTYPE_AP) {
- IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
- return;
- }
-
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
- iwl_scan_cancel_timeout(priv, 200);
+ iwl_legacy_scan_cancel_timeout(priv, 200);
- conf = ieee80211_get_hw_conf(priv->hw);
+ conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwlcore_commit_rxon(priv, ctx);
+ iwl_legacy_commit_rxon(priv, ctx);
- ret = iwl_send_rxon_timing(priv, ctx);
+ ret = iwl_legacy_send_rxon_timing(priv, ctx);
if (ret)
IWL_WARN(priv, "RXON timing - "
"Attempting to continue.\n");
ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
- iwl_set_rxon_ht(priv, &priv->current_ht_config);
+ iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
if (priv->cfg->ops->hcmd->set_rxon_chain)
priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
@@ -2401,7 +1971,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
}
- iwlcore_commit_rxon(priv, ctx);
+ iwl_legacy_commit_rxon(priv, ctx);
IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
vif->bss_conf.aid, ctx->active.bssid_addr);
@@ -2410,7 +1980,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
case NL80211_IFTYPE_STATION:
break;
case NL80211_IFTYPE_ADHOC:
- iwlagn_send_beacon_cmd(priv);
+ iwl4965_send_beacon_cmd(priv);
break;
default:
IWL_ERR(priv, "%s Should not be called in %d mode\n",
@@ -2422,10 +1992,10 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
* If chain noise has already been run, then we need to enable
* power management here */
if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
- iwl_power_update_mode(priv, false);
+ iwl_legacy_power_update_mode(priv, false);
/* Enable Rx differential gain and sensitivity calibrations */
- iwl_chain_noise_reset(priv);
+ iwl4965_chain_noise_reset(priv);
priv->start_calib = 1;
}
@@ -2441,14 +2011,14 @@ static void iwl4965_config_ap(struct iwl_priv *priv)
return;
/* The following should be done only at AP bring up */
- if (!iwl_is_associated_ctx(ctx)) {
+ if (!iwl_legacy_is_associated_ctx(ctx)) {
/* RXON - unassoc (to set timing command) */
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwlcore_commit_rxon(priv, ctx);
+ iwl_legacy_commit_rxon(priv, ctx);
/* RXON Timing */
- ret = iwl_send_rxon_timing(priv, ctx);
+ ret = iwl_legacy_send_rxon_timing(priv, ctx);
if (ret)
IWL_WARN(priv, "RXON timing failed - "
"Attempting to continue.\n");
@@ -2456,7 +2026,7 @@ static void iwl4965_config_ap(struct iwl_priv *priv)
/* AP has all antennas */
priv->chain_noise_data.active_chains =
priv->hw_params.valid_rx_ant;
- iwl_set_rxon_ht(priv, &priv->current_ht_config);
+ iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
if (priv->cfg->ops->hcmd->set_rxon_chain)
priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
@@ -2478,51 +2048,37 @@ static void iwl4965_config_ap(struct iwl_priv *priv)
~RXON_FLG_SHORT_SLOT_MSK;
}
/* need to send beacon cmd before committing assoc RXON! */
- iwlagn_send_beacon_cmd(priv);
+ iwl4965_send_beacon_cmd(priv);
/* restore RXON assoc */
ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
- iwlcore_commit_rxon(priv, ctx);
+ iwl_legacy_commit_rxon(priv, ctx);
}
- iwlagn_send_beacon_cmd(priv);
-
- /* FIXME - we need to add code here to detect a totally new
- * configuration, reset the AP, unassoc, rxon timing, assoc,
- * clear sta table, add BCAST sta... */
+ iwl4965_send_beacon_cmd(priv);
}
static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
.get_hcmd_size = iwl4965_get_hcmd_size,
.build_addsta_hcmd = iwl4965_build_addsta_hcmd,
- .chain_noise_reset = iwl4965_chain_noise_reset,
- .gain_computation = iwl4965_gain_computation,
- .tx_cmd_protection = iwl_legacy_tx_cmd_protection,
- .calc_rssi = iwl4965_calc_rssi,
- .request_scan = iwlagn_request_scan,
+ .request_scan = iwl4965_request_scan,
.post_scan = iwl4965_post_scan,
};
static struct iwl_lib_ops iwl4965_lib = {
.set_hw_params = iwl4965_hw_set_hw_params,
.txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
- .txq_set_sched = iwl4965_txq_set_sched,
- .txq_agg_enable = iwl4965_txq_agg_enable,
- .txq_agg_disable = iwl4965_txq_agg_disable,
- .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
- .txq_free_tfd = iwl_hw_txq_free_tfd,
- .txq_init = iwl_hw_tx_queue_init,
+ .txq_attach_buf_to_tfd = iwl4965_hw_txq_attach_buf_to_tfd,
+ .txq_free_tfd = iwl4965_hw_txq_free_tfd,
+ .txq_init = iwl4965_hw_tx_queue_init,
.rx_handler_setup = iwl4965_rx_handler_setup,
- .setup_deferred_work = iwl4965_setup_deferred_work,
- .cancel_deferred_work = iwl4965_cancel_deferred_work,
.is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
- .alive_notify = iwl4965_alive_notify,
.init_alive_start = iwl4965_init_alive_start,
.load_ucode = iwl4965_load_bsm,
- .dump_nic_event_log = iwl_dump_nic_event_log,
- .dump_nic_error_log = iwl_dump_nic_error_log,
- .dump_fh = iwl_dump_fh,
+ .dump_nic_event_log = iwl4965_dump_nic_event_log,
+ .dump_nic_error_log = iwl4965_dump_nic_error_log,
+ .dump_fh = iwl4965_dump_fh,
.set_channel_switch = iwl4965_hw_channel_switch,
.apm_ops = {
- .init = iwl_apm_init,
+ .init = iwl_legacy_apm_init,
.config = iwl4965_nic_config,
},
.eeprom_ops = {
@@ -2535,64 +2091,56 @@ static struct iwl_lib_ops iwl4965_lib = {
EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS
},
- .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
- .release_semaphore = iwlcore_eeprom_release_semaphore,
- .calib_version = iwl4965_eeprom_calib_version,
- .query_addr = iwlcore_eeprom_query_addr,
+ .acquire_semaphore = iwl4965_eeprom_acquire_semaphore,
+ .release_semaphore = iwl4965_eeprom_release_semaphore,
},
.send_tx_power = iwl4965_send_tx_power,
- .update_chain_flags = iwl_update_chain_flags,
- .isr_ops = {
- .isr = iwl_isr_legacy,
- },
+ .update_chain_flags = iwl4965_update_chain_flags,
.temp_ops = {
.temperature = iwl4965_temperature_calib,
},
.debugfs_ops = {
- .rx_stats_read = iwl_ucode_rx_stats_read,
- .tx_stats_read = iwl_ucode_tx_stats_read,
- .general_stats_read = iwl_ucode_general_stats_read,
- .bt_stats_read = iwl_ucode_bt_stats_read,
- .reply_tx_error = iwl_reply_tx_error_read,
+ .rx_stats_read = iwl4965_ucode_rx_stats_read,
+ .tx_stats_read = iwl4965_ucode_tx_stats_read,
+ .general_stats_read = iwl4965_ucode_general_stats_read,
},
- .check_plcp_health = iwl_good_plcp_health,
+ .check_plcp_health = iwl4965_good_plcp_health,
};
static const struct iwl_legacy_ops iwl4965_legacy_ops = {
.post_associate = iwl4965_post_associate,
.config_ap = iwl4965_config_ap,
- .manage_ibss_station = iwlagn_manage_ibss_station,
- .update_bcast_stations = iwl_update_bcast_stations,
+ .manage_ibss_station = iwl4965_manage_ibss_station,
+ .update_bcast_stations = iwl4965_update_bcast_stations,
};
struct ieee80211_ops iwl4965_hw_ops = {
- .tx = iwlagn_mac_tx,
- .start = iwlagn_mac_start,
- .stop = iwlagn_mac_stop,
- .add_interface = iwl_mac_add_interface,
- .remove_interface = iwl_mac_remove_interface,
- .change_interface = iwl_mac_change_interface,
+ .tx = iwl4965_mac_tx,
+ .start = iwl4965_mac_start,
+ .stop = iwl4965_mac_stop,
+ .add_interface = iwl_legacy_mac_add_interface,
+ .remove_interface = iwl_legacy_mac_remove_interface,
+ .change_interface = iwl_legacy_mac_change_interface,
.config = iwl_legacy_mac_config,
- .configure_filter = iwlagn_configure_filter,
- .set_key = iwlagn_mac_set_key,
- .update_tkip_key = iwlagn_mac_update_tkip_key,
- .conf_tx = iwl_mac_conf_tx,
+ .configure_filter = iwl4965_configure_filter,
+ .set_key = iwl4965_mac_set_key,
+ .update_tkip_key = iwl4965_mac_update_tkip_key,
+ .conf_tx = iwl_legacy_mac_conf_tx,
.reset_tsf = iwl_legacy_mac_reset_tsf,
.bss_info_changed = iwl_legacy_mac_bss_info_changed,
- .ampdu_action = iwlagn_mac_ampdu_action,
- .hw_scan = iwl_mac_hw_scan,
- .sta_add = iwlagn_mac_sta_add,
- .sta_remove = iwl_mac_sta_remove,
- .channel_switch = iwlagn_mac_channel_switch,
- .flush = iwlagn_mac_flush,
- .tx_last_beacon = iwl_mac_tx_last_beacon,
+ .ampdu_action = iwl4965_mac_ampdu_action,
+ .hw_scan = iwl_legacy_mac_hw_scan,
+ .sta_add = iwl4965_mac_sta_add,
+ .sta_remove = iwl_legacy_mac_sta_remove,
+ .channel_switch = iwl4965_mac_channel_switch,
+ .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
};
static const struct iwl_ops iwl4965_ops = {
.lib = &iwl4965_lib,
.hcmd = &iwl4965_hcmd,
.utils = &iwl4965_hcmd_utils,
- .led = &iwlagn_led_ops,
+ .led = &iwl4965_led_ops,
.legacy = &iwl4965_legacy_ops,
.ieee80211_ops = &iwl4965_hw_ops,
};
@@ -2604,22 +2152,18 @@ static struct iwl_base_params iwl4965_base_params = {
.pll_cfg_val = 0,
.set_l0s = true,
.use_bsm = true,
- .use_isr_legacy = true,
- .broken_powersave = true,
.led_compensation = 61,
.chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.temperature_kelvin = true,
.max_event_log_size = 512,
- .tx_power_by_driver = true,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
.chain_noise_calib_by_driver = true,
- .no_agg_framecnt_info = true,
};
-struct iwl_cfg iwl4965_agn_cfg = {
+struct iwl_cfg iwl4965_cfg = {
.name = "Intel(R) Wireless WiFi Link 4965AGN",
.fw_name_pre = IWL4965_FW_PRE,
.ucode_api_max = IWL4965_UCODE_API_MAX,
@@ -2630,7 +2174,7 @@ struct iwl_cfg iwl4965_agn_cfg = {
.eeprom_ver = EEPROM_4965_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
.ops = &iwl4965_ops,
- .mod_params = &iwlagn_mod_params,
+ .mod_params = &iwl4965_mod_params,
.base_params = &iwl4965_base_params,
.led_mode = IWL_LED_BLINK,
/*
@@ -2642,4 +2186,3 @@ struct iwl_cfg iwl4965_agn_cfg = {
/* Module firmware */
MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
-
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.h b/drivers/net/wireless/iwlegacy/iwl-4965.h
new file mode 100644
index 000000000000..01f8163daf16
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965.h
@@ -0,0 +1,282 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __iwl_4965_h__
+#define __iwl_4965_h__
+
+#include "iwl-dev.h"
+
+/* configuration for the _4965 devices */
+extern struct iwl_cfg iwl4965_cfg;
+
+extern struct iwl_mod_params iwl4965_mod_params;
+
+extern struct ieee80211_ops iwl4965_hw_ops;
+
+/* tx queue */
+void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
+ int sta_id, int tid, int freed);
+
+/* RXON */
+void iwl4965_set_rxon_chain(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx);
+
+/* uCode */
+int iwl4965_verify_ucode(struct iwl_priv *priv);
+
+/* lib */
+void iwl4965_check_abort_status(struct iwl_priv *priv,
+ u8 frame_count, u32 status);
+
+void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
+int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
+int iwl4965_hw_nic_init(struct iwl_priv *priv);
+int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display);
+
+/* rx */
+void iwl4965_rx_queue_restock(struct iwl_priv *priv);
+void iwl4965_rx_replenish(struct iwl_priv *priv);
+void iwl4965_rx_replenish_now(struct iwl_priv *priv);
+void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
+int iwl4965_rxq_stop(struct iwl_priv *priv);
+int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
+void iwl4965_rx_reply_rx(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+void iwl4965_rx_handle(struct iwl_priv *priv);
+
+/* tx */
+void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
+int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ dma_addr_t addr, u16 len, u8 reset, u8 pad);
+int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq);
+void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
+ struct ieee80211_tx_info *info);
+int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
+int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid);
+int iwl4965_txq_check_empty(struct iwl_priv *priv,
+ int sta_id, u8 tid, int txq_id);
+void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
+void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv);
+int iwl4965_txq_ctx_alloc(struct iwl_priv *priv);
+void iwl4965_txq_ctx_reset(struct iwl_priv *priv);
+void iwl4965_txq_ctx_stop(struct iwl_priv *priv);
+void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask);
+
+/*
+ * Acquire priv->lock before calling this function !
+ */
+void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index);
+/**
+ * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
+ * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
+ * @scd_retry: (1) Indicates queue will be used in aggregation mode
+ *
+ * NOTE: Acquire priv->lock before calling this function !
+ */
+void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ int tx_fifo_id, int scd_retry);
+
+static inline u32 iwl4965_tx_status_to_mac80211(u32 status)
+{
+ status &= TX_STATUS_MSK;
+
+ switch (status) {
+ case TX_STATUS_SUCCESS:
+ case TX_STATUS_DIRECT_DONE:
+ return IEEE80211_TX_STAT_ACK;
+ case TX_STATUS_FAIL_DEST_PS:
+ return IEEE80211_TX_STAT_TX_FILTERED;
+ default:
+ return 0;
+ }
+}
+
+static inline bool iwl4965_is_tx_success(u32 status)
+{
+ status &= TX_STATUS_MSK;
+ return (status == TX_STATUS_SUCCESS) ||
+ (status == TX_STATUS_DIRECT_DONE);
+}
+
+u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
+
+/* rx */
+void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+bool iwl4965_good_plcp_health(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt);
+void iwl4965_rx_statistics(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+void iwl4965_reply_statistics(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+
+/* scan */
+int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
+
+/* station mgmt */
+int iwl4965_manage_ibss_station(struct iwl_priv *priv,
+ struct ieee80211_vif *vif, bool add);
+
+/* hcmd */
+int iwl4965_send_beacon_cmd(struct iwl_priv *priv);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+const char *iwl4965_get_tx_fail_reason(u32 status);
+#else
+static inline const char *
+iwl4965_get_tx_fail_reason(u32 status) { return ""; }
+#endif
+
+/* station management */
+int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx);
+int iwl4965_add_bssid_station(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ const u8 *addr, u8 *sta_id_r);
+int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_key_conf *key);
+int iwl4965_set_default_wep_key(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_key_conf *key);
+int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx);
+int iwl4965_set_dynamic_key(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_key_conf *key, u8 sta_id);
+int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_key_conf *key, u8 sta_id);
+void iwl4965_update_tkip_key(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
+int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv,
+ int sta_id, int tid);
+int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
+ int tid, u16 ssn);
+int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
+ int tid);
+void iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv,
+ int sta_id, int cnt);
+int iwl4965_update_bcast_stations(struct iwl_priv *priv);
+
+/* rate */
+static inline u32 iwl4965_ant_idx_to_flags(u8 ant_idx)
+{
+ return BIT(ant_idx) << RATE_MCS_ANT_POS;
+}
+
+static inline u8 iwl4965_hw_get_rate(__le32 rate_n_flags)
+{
+ return le32_to_cpu(rate_n_flags) & 0xFF;
+}
+
+static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u32 flags)
+{
+ return cpu_to_le32(flags|(u32)rate);
+}
+
+/* eeprom */
+void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
+int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv);
+void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv);
+int iwl4965_eeprom_check_version(struct iwl_priv *priv);
+
+/* mac80211 handlers (for 4965) */
+void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+int iwl4965_mac_start(struct ieee80211_hw *hw);
+void iwl4965_mac_stop(struct ieee80211_hw *hw);
+void iwl4965_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast);
+int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta,
+ u32 iv32, u16 *phase1key);
+int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size);
+int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_channel_switch *ch_switch);
+
+#endif /* __iwl_4965_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-commands.h b/drivers/net/wireless/iwlegacy/iwl-commands.h
new file mode 100644
index 000000000000..17a1d504348e
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-commands.h
@@ -0,0 +1,3405 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+/*
+ * Please use this file (iwl-commands.h) only for uCode API definitions.
+ * Please use iwl-xxxx-hw.h for hardware-related definitions.
+ * Please use iwl-dev.h for driver implementation definitions.
+ */
+
+#ifndef __iwl_legacy_commands_h__
+#define __iwl_legacy_commands_h__
+
+struct iwl_priv;
+
+/* uCode version contains 4 values: Major/Minor/API/Serial */
+#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
+#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
+#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
+#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
+
+
+/* Tx rates */
+#define IWL_CCK_RATES 4
+#define IWL_OFDM_RATES 8
+#define IWL_MAX_RATES (IWL_CCK_RATES + IWL_OFDM_RATES)
+
+enum {
+ REPLY_ALIVE = 0x1,
+ REPLY_ERROR = 0x2,
+
+ /* RXON and QOS commands */
+ REPLY_RXON = 0x10,
+ REPLY_RXON_ASSOC = 0x11,
+ REPLY_QOS_PARAM = 0x13,
+ REPLY_RXON_TIMING = 0x14,
+
+ /* Multi-Station support */
+ REPLY_ADD_STA = 0x18,
+ REPLY_REMOVE_STA = 0x19,
+
+ /* Security */
+ REPLY_WEPKEY = 0x20,
+
+ /* RX, TX, LEDs */
+ REPLY_3945_RX = 0x1b, /* 3945 only */
+ REPLY_TX = 0x1c,
+ REPLY_RATE_SCALE = 0x47, /* 3945 only */
+ REPLY_LEDS_CMD = 0x48,
+ REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */
+
+ /* 802.11h related */
+ REPLY_CHANNEL_SWITCH = 0x72,
+ CHANNEL_SWITCH_NOTIFICATION = 0x73,
+ REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74,
+ SPECTRUM_MEASURE_NOTIFICATION = 0x75,
+
+ /* Power Management */
+ POWER_TABLE_CMD = 0x77,
+ PM_SLEEP_NOTIFICATION = 0x7A,
+ PM_DEBUG_STATISTIC_NOTIFIC = 0x7B,
+
+ /* Scan commands and notifications */
+ REPLY_SCAN_CMD = 0x80,
+ REPLY_SCAN_ABORT_CMD = 0x81,
+ SCAN_START_NOTIFICATION = 0x82,
+ SCAN_RESULTS_NOTIFICATION = 0x83,
+ SCAN_COMPLETE_NOTIFICATION = 0x84,
+
+ /* IBSS/AP commands */
+ BEACON_NOTIFICATION = 0x90,
+ REPLY_TX_BEACON = 0x91,
+
+ /* Miscellaneous commands */
+ REPLY_TX_PWR_TABLE_CMD = 0x97,
+
+ /* Bluetooth device coexistence config command */
+ REPLY_BT_CONFIG = 0x9b,
+
+ /* Statistics */
+ REPLY_STATISTICS_CMD = 0x9c,
+ STATISTICS_NOTIFICATION = 0x9d,
+
+ /* RF-KILL commands and notifications */
+ CARD_STATE_NOTIFICATION = 0xa1,
+
+ /* Missed beacons notification */
+ MISSED_BEACONS_NOTIFICATION = 0xa2,
+
+ REPLY_CT_KILL_CONFIG_CMD = 0xa4,
+ SENSITIVITY_CMD = 0xa8,
+ REPLY_PHY_CALIBRATION_CMD = 0xb0,
+ REPLY_RX_PHY_CMD = 0xc0,
+ REPLY_RX_MPDU_CMD = 0xc1,
+ REPLY_RX = 0xc3,
+ REPLY_COMPRESSED_BA = 0xc5,
+
+ REPLY_MAX = 0xff
+};
+
+/******************************************************************************
+ * (0)
+ * Commonly used structures and definitions:
+ * Command header, rate_n_flags, txpower
+ *
+ *****************************************************************************/
+
+/* iwl_cmd_header flags value */
+#define IWL_CMD_FAILED_MSK 0x40
+
+#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
+#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
+#define SEQ_TO_INDEX(s) ((s) & 0xff)
+#define INDEX_TO_SEQ(i) ((i) & 0xff)
+#define SEQ_HUGE_FRAME cpu_to_le16(0x4000)
+#define SEQ_RX_FRAME cpu_to_le16(0x8000)
+
+/**
+ * struct iwl_cmd_header
+ *
+ * This header format appears in the beginning of each command sent from the
+ * driver, and each response/notification received from uCode.
+ */
+struct iwl_cmd_header {
+ u8 cmd; /* Command ID: REPLY_RXON, etc. */
+ u8 flags; /* 0:5 reserved, 6 abort, 7 internal */
+ /*
+ * The driver sets up the sequence number to values of its choosing.
+ * uCode does not use this value, but passes it back to the driver
+ * when sending the response to each driver-originated command, so
+ * the driver can match the response to the command. Since the values
+ * don't get used by uCode, the driver may set up an arbitrary format.
+ *
+ * There is one exception: uCode sets bit 15 when it originates
+ * the response/notification, i.e. when the response/notification
+ * is not a direct response to a command sent by the driver. For
+ * example, uCode issues REPLY_3945_RX when it sends a received frame
+ * to the driver; it is not a direct response to any driver command.
+ *
+ * The Linux driver uses the following format:
+ *
+ * 0:7 tfd index - position within TX queue
+ * 8:12 TX queue id
+ * 13 reserved
+ * 14 huge - driver sets this to indicate command is in the
+ * 'huge' storage at the end of the command buffers
+ * 15 unsolicited RX or uCode-originated notification
+ */
+ __le16 sequence;
+
+ /* command or response/notification data follows immediately */
+ u8 data[0];
+} __packed;
+
+
+/**
+ * struct iwl3945_tx_power
+ *
+ * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_SCAN_CMD, REPLY_CHANNEL_SWITCH
+ *
+ * Each entry contains two values:
+ * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
+ * linear value that multiplies the output of the digital signal processor,
+ * before being sent to the analog radio.
+ * 2) Radio gain. This sets the analog gain of the radio Tx path.
+ * It is a coarser setting, and behaves in a logarithmic (dB) fashion.
+ *
+ * Driver obtains values from struct iwl3945_tx_power power_gain_table[][].
+ */
+struct iwl3945_tx_power {
+ u8 tx_gain; /* gain for analog radio */
+ u8 dsp_atten; /* gain for DSP */
+} __packed;
+
+/**
+ * struct iwl3945_power_per_rate
+ *
+ * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ */
+struct iwl3945_power_per_rate {
+ u8 rate; /* plcp */
+ struct iwl3945_tx_power tpc;
+ u8 reserved;
+} __packed;
+
+/**
+ * iwl4965 rate_n_flags bit fields
+ *
+ * rate_n_flags format is used in following iwl4965 commands:
+ * REPLY_RX (response only)
+ * REPLY_RX_MPDU (response only)
+ * REPLY_TX (both command and response)
+ * REPLY_TX_LINK_QUALITY_CMD
+ *
+ * High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"):
+ * 2-0: 0) 6 Mbps
+ * 1) 12 Mbps
+ * 2) 18 Mbps
+ * 3) 24 Mbps
+ * 4) 36 Mbps
+ * 5) 48 Mbps
+ * 6) 54 Mbps
+ * 7) 60 Mbps
+ *
+ * 4-3: 0) Single stream (SISO)
+ * 1) Dual stream (MIMO)
+ * 2) Triple stream (MIMO)
+ *
+ * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data
+ *
+ * Legacy OFDM rate format for bits 7:0 (bit 8 must be "0", bit 9 "0"):
+ * 3-0: 0xD) 6 Mbps
+ * 0xF) 9 Mbps
+ * 0x5) 12 Mbps
+ * 0x7) 18 Mbps
+ * 0x9) 24 Mbps
+ * 0xB) 36 Mbps
+ * 0x1) 48 Mbps
+ * 0x3) 54 Mbps
+ *
+ * Legacy CCK rate format for bits 7:0 (bit 8 must be "0", bit 9 "1"):
+ * 6-0: 10) 1 Mbps
+ * 20) 2 Mbps
+ * 55) 5.5 Mbps
+ * 110) 11 Mbps
+ */
+#define RATE_MCS_CODE_MSK 0x7
+#define RATE_MCS_SPATIAL_POS 3
+#define RATE_MCS_SPATIAL_MSK 0x18
+#define RATE_MCS_HT_DUP_POS 5
+#define RATE_MCS_HT_DUP_MSK 0x20
+
+/* Bit 8: (1) HT format, (0) legacy format in bits 7:0 */
+#define RATE_MCS_FLAGS_POS 8
+#define RATE_MCS_HT_POS 8
+#define RATE_MCS_HT_MSK 0x100
+
+/* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */
+#define RATE_MCS_CCK_POS 9
+#define RATE_MCS_CCK_MSK 0x200
+
+/* Bit 10: (1) Use Green Field preamble */
+#define RATE_MCS_GF_POS 10
+#define RATE_MCS_GF_MSK 0x400
+
+/* Bit 11: (1) Use 40Mhz HT40 chnl width, (0) use 20 MHz legacy chnl width */
+#define RATE_MCS_HT40_POS 11
+#define RATE_MCS_HT40_MSK 0x800
+
+/* Bit 12: (1) Duplicate data on both 20MHz chnls. HT40 (bit 11) must be set. */
+#define RATE_MCS_DUP_POS 12
+#define RATE_MCS_DUP_MSK 0x1000
+
+/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */
+#define RATE_MCS_SGI_POS 13
+#define RATE_MCS_SGI_MSK 0x2000
+
+/**
+ * rate_n_flags Tx antenna masks
+ * 4965 has 2 transmitters
+ * bit14:16
+ */
+#define RATE_MCS_ANT_POS 14
+#define RATE_MCS_ANT_A_MSK 0x04000
+#define RATE_MCS_ANT_B_MSK 0x08000
+#define RATE_MCS_ANT_C_MSK 0x10000
+#define RATE_MCS_ANT_AB_MSK (RATE_MCS_ANT_A_MSK | RATE_MCS_ANT_B_MSK)
+#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK)
+#define RATE_ANT_NUM 3
+
+#define POWER_TABLE_NUM_ENTRIES 33
+#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32
+#define POWER_TABLE_CCK_ENTRY 32
+
+#define IWL_PWR_NUM_HT_OFDM_ENTRIES 24
+#define IWL_PWR_CCK_ENTRIES 2
+
+/**
+ * union iwl4965_tx_power_dual_stream
+ *
+ * Host format used for REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ * Use __le32 version (struct tx_power_dual_stream) when building command.
+ *
+ * Driver provides radio gain and DSP attenuation settings to device in pairs,
+ * one value for each transmitter chain. The first value is for transmitter A,
+ * second for transmitter B.
+ *
+ * For SISO bit rates, both values in a pair should be identical.
+ * For MIMO rates, one value may be different from the other,
+ * in order to balance the Tx output between the two transmitters.
+ *
+ * See more details in doc for TXPOWER in iwl-4965-hw.h.
+ */
+union iwl4965_tx_power_dual_stream {
+ struct {
+ u8 radio_tx_gain[2];
+ u8 dsp_predis_atten[2];
+ } s;
+ u32 dw;
+};
+
+/**
+ * struct tx_power_dual_stream
+ *
+ * Table entries in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ *
+ * Same format as iwl_tx_power_dual_stream, but __le32
+ */
+struct tx_power_dual_stream {
+ __le32 dw;
+} __packed;
+
+/**
+ * struct iwl4965_tx_power_db
+ *
+ * Entire table within REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ */
+struct iwl4965_tx_power_db {
+ struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES];
+} __packed;
+
+/******************************************************************************
+ * (0a)
+ * Alive and Error Commands & Responses:
+ *
+ *****************************************************************************/
+
+#define UCODE_VALID_OK cpu_to_le32(0x1)
+#define INITIALIZE_SUBTYPE (9)
+
+/*
+ * ("Initialize") REPLY_ALIVE = 0x1 (response only, not a command)
+ *
+ * uCode issues this "initialize alive" notification once the initialization
+ * uCode image has completed its work, and is ready to load the runtime image.
+ * This is the *first* "alive" notification that the driver will receive after
+ * rebooting uCode; the "initialize" alive is indicated by subtype field == 9.
+ *
+ * See comments documenting "BSM" (bootstrap state machine).
+ *
+ * For 4965, this notification contains important calibration data for
+ * calculating txpower settings:
+ *
+ * 1) Power supply voltage indication. The voltage sensor outputs higher
+ * values for lower voltage, and vice verse.
+ *
+ * 2) Temperature measurement parameters, for each of two channel widths
+ * (20 MHz and 40 MHz) supported by the radios. Temperature sensing
+ * is done via one of the receiver chains, and channel width influences
+ * the results.
+ *
+ * 3) Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation,
+ * for each of 5 frequency ranges.
+ */
+struct iwl_init_alive_resp {
+ u8 ucode_minor;
+ u8 ucode_major;
+ __le16 reserved1;
+ u8 sw_rev[8];
+ u8 ver_type;
+ u8 ver_subtype; /* "9" for initialize alive */
+ __le16 reserved2;
+ __le32 log_event_table_ptr;
+ __le32 error_event_table_ptr;
+ __le32 timestamp;
+ __le32 is_valid;
+
+ /* calibration values from "initialize" uCode */
+ __le32 voltage; /* signed, higher value is lower voltage */
+ __le32 therm_r1[2]; /* signed, 1st for normal, 2nd for HT40 */
+ __le32 therm_r2[2]; /* signed */
+ __le32 therm_r3[2]; /* signed */
+ __le32 therm_r4[2]; /* signed */
+ __le32 tx_atten[5][2]; /* signed MIMO gain comp, 5 freq groups,
+ * 2 Tx chains */
+} __packed;
+
+
+/**
+ * REPLY_ALIVE = 0x1 (response only, not a command)
+ *
+ * uCode issues this "alive" notification once the runtime image is ready
+ * to receive commands from the driver. This is the *second* "alive"
+ * notification that the driver will receive after rebooting uCode;
+ * this "alive" is indicated by subtype field != 9.
+ *
+ * See comments documenting "BSM" (bootstrap state machine).
+ *
+ * This response includes two pointers to structures within the device's
+ * data SRAM (access via HBUS_TARG_MEM_* regs) that are useful for debugging:
+ *
+ * 1) log_event_table_ptr indicates base of the event log. This traces
+ * a 256-entry history of uCode execution within a circular buffer.
+ * Its header format is:
+ *
+ * __le32 log_size; log capacity (in number of entries)
+ * __le32 type; (1) timestamp with each entry, (0) no timestamp
+ * __le32 wraps; # times uCode has wrapped to top of circular buffer
+ * __le32 write_index; next circular buffer entry that uCode would fill
+ *
+ * The header is followed by the circular buffer of log entries. Entries
+ * with timestamps have the following format:
+ *
+ * __le32 event_id; range 0 - 1500
+ * __le32 timestamp; low 32 bits of TSF (of network, if associated)
+ * __le32 data; event_id-specific data value
+ *
+ * Entries without timestamps contain only event_id and data.
+ *
+ *
+ * 2) error_event_table_ptr indicates base of the error log. This contains
+ * information about any uCode error that occurs. For 4965, the format
+ * of the error log is:
+ *
+ * __le32 valid; (nonzero) valid, (0) log is empty
+ * __le32 error_id; type of error
+ * __le32 pc; program counter
+ * __le32 blink1; branch link
+ * __le32 blink2; branch link
+ * __le32 ilink1; interrupt link
+ * __le32 ilink2; interrupt link
+ * __le32 data1; error-specific data
+ * __le32 data2; error-specific data
+ * __le32 line; source code line of error
+ * __le32 bcon_time; beacon timer
+ * __le32 tsf_low; network timestamp function timer
+ * __le32 tsf_hi; network timestamp function timer
+ * __le32 gp1; GP1 timer register
+ * __le32 gp2; GP2 timer register
+ * __le32 gp3; GP3 timer register
+ * __le32 ucode_ver; uCode version
+ * __le32 hw_ver; HW Silicon version
+ * __le32 brd_ver; HW board version
+ * __le32 log_pc; log program counter
+ * __le32 frame_ptr; frame pointer
+ * __le32 stack_ptr; stack pointer
+ * __le32 hcmd; last host command
+ * __le32 isr0; isr status register LMPM_NIC_ISR0: rxtx_flag
+ * __le32 isr1; isr status register LMPM_NIC_ISR1: host_flag
+ * __le32 isr2; isr status register LMPM_NIC_ISR2: enc_flag
+ * __le32 isr3; isr status register LMPM_NIC_ISR3: time_flag
+ * __le32 isr4; isr status register LMPM_NIC_ISR4: wico interrupt
+ * __le32 isr_pref; isr status register LMPM_NIC_PREF_STAT
+ * __le32 wait_event; wait event() caller address
+ * __le32 l2p_control; L2pControlField
+ * __le32 l2p_duration; L2pDurationField
+ * __le32 l2p_mhvalid; L2pMhValidBits
+ * __le32 l2p_addr_match; L2pAddrMatchStat
+ * __le32 lmpm_pmg_sel; indicate which clocks are turned on (LMPM_PMG_SEL)
+ * __le32 u_timestamp; indicate when the date and time of the compilation
+ * __le32 reserved;
+ *
+ * The Linux driver can print both logs to the system log when a uCode error
+ * occurs.
+ */
+struct iwl_alive_resp {
+ u8 ucode_minor;
+ u8 ucode_major;
+ __le16 reserved1;
+ u8 sw_rev[8];
+ u8 ver_type;
+ u8 ver_subtype; /* not "9" for runtime alive */
+ __le16 reserved2;
+ __le32 log_event_table_ptr; /* SRAM address for event log */
+ __le32 error_event_table_ptr; /* SRAM address for error log */
+ __le32 timestamp;
+ __le32 is_valid;
+} __packed;
+
+/*
+ * REPLY_ERROR = 0x2 (response only, not a command)
+ */
+struct iwl_error_resp {
+ __le32 error_type;
+ u8 cmd_id;
+ u8 reserved1;
+ __le16 bad_cmd_seq_num;
+ __le32 error_info;
+ __le64 timestamp;
+} __packed;
+
+/******************************************************************************
+ * (1)
+ * RXON Commands & Responses:
+ *
+ *****************************************************************************/
+
+/*
+ * Rx config defines & structure
+ */
+/* rx_config device types */
+enum {
+ RXON_DEV_TYPE_AP = 1,
+ RXON_DEV_TYPE_ESS = 3,
+ RXON_DEV_TYPE_IBSS = 4,
+ RXON_DEV_TYPE_SNIFFER = 6,
+};
+
+
+#define RXON_RX_CHAIN_DRIVER_FORCE_MSK cpu_to_le16(0x1 << 0)
+#define RXON_RX_CHAIN_DRIVER_FORCE_POS (0)
+#define RXON_RX_CHAIN_VALID_MSK cpu_to_le16(0x7 << 1)
+#define RXON_RX_CHAIN_VALID_POS (1)
+#define RXON_RX_CHAIN_FORCE_SEL_MSK cpu_to_le16(0x7 << 4)
+#define RXON_RX_CHAIN_FORCE_SEL_POS (4)
+#define RXON_RX_CHAIN_FORCE_MIMO_SEL_MSK cpu_to_le16(0x7 << 7)
+#define RXON_RX_CHAIN_FORCE_MIMO_SEL_POS (7)
+#define RXON_RX_CHAIN_CNT_MSK cpu_to_le16(0x3 << 10)
+#define RXON_RX_CHAIN_CNT_POS (10)
+#define RXON_RX_CHAIN_MIMO_CNT_MSK cpu_to_le16(0x3 << 12)
+#define RXON_RX_CHAIN_MIMO_CNT_POS (12)
+#define RXON_RX_CHAIN_MIMO_FORCE_MSK cpu_to_le16(0x1 << 14)
+#define RXON_RX_CHAIN_MIMO_FORCE_POS (14)
+
+/* rx_config flags */
+/* band & modulation selection */
+#define RXON_FLG_BAND_24G_MSK cpu_to_le32(1 << 0)
+#define RXON_FLG_CCK_MSK cpu_to_le32(1 << 1)
+/* auto detection enable */
+#define RXON_FLG_AUTO_DETECT_MSK cpu_to_le32(1 << 2)
+/* TGg protection when tx */
+#define RXON_FLG_TGG_PROTECT_MSK cpu_to_le32(1 << 3)
+/* cck short slot & preamble */
+#define RXON_FLG_SHORT_SLOT_MSK cpu_to_le32(1 << 4)
+#define RXON_FLG_SHORT_PREAMBLE_MSK cpu_to_le32(1 << 5)
+/* antenna selection */
+#define RXON_FLG_DIS_DIV_MSK cpu_to_le32(1 << 7)
+#define RXON_FLG_ANT_SEL_MSK cpu_to_le32(0x0f00)
+#define RXON_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
+#define RXON_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
+/* radar detection enable */
+#define RXON_FLG_RADAR_DETECT_MSK cpu_to_le32(1 << 12)
+#define RXON_FLG_TGJ_NARROW_BAND_MSK cpu_to_le32(1 << 13)
+/* rx response to host with 8-byte TSF
+* (according to ON_AIR deassertion) */
+#define RXON_FLG_TSF2HOST_MSK cpu_to_le32(1 << 15)
+
+
+/* HT flags */
+#define RXON_FLG_CTRL_CHANNEL_LOC_POS (22)
+#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK cpu_to_le32(0x1 << 22)
+
+#define RXON_FLG_HT_OPERATING_MODE_POS (23)
+
+#define RXON_FLG_HT_PROT_MSK cpu_to_le32(0x1 << 23)
+#define RXON_FLG_HT40_PROT_MSK cpu_to_le32(0x2 << 23)
+
+#define RXON_FLG_CHANNEL_MODE_POS (25)
+#define RXON_FLG_CHANNEL_MODE_MSK cpu_to_le32(0x3 << 25)
+
+/* channel mode */
+enum {
+ CHANNEL_MODE_LEGACY = 0,
+ CHANNEL_MODE_PURE_40 = 1,
+ CHANNEL_MODE_MIXED = 2,
+ CHANNEL_MODE_RESERVED = 3,
+};
+#define RXON_FLG_CHANNEL_MODE_LEGACY \
+ cpu_to_le32(CHANNEL_MODE_LEGACY << RXON_FLG_CHANNEL_MODE_POS)
+#define RXON_FLG_CHANNEL_MODE_PURE_40 \
+ cpu_to_le32(CHANNEL_MODE_PURE_40 << RXON_FLG_CHANNEL_MODE_POS)
+#define RXON_FLG_CHANNEL_MODE_MIXED \
+ cpu_to_le32(CHANNEL_MODE_MIXED << RXON_FLG_CHANNEL_MODE_POS)
+
+/* CTS to self (if spec allows) flag */
+#define RXON_FLG_SELF_CTS_EN cpu_to_le32(0x1<<30)
+
+/* rx_config filter flags */
+/* accept all data frames */
+#define RXON_FILTER_PROMISC_MSK cpu_to_le32(1 << 0)
+/* pass control & management to host */
+#define RXON_FILTER_CTL2HOST_MSK cpu_to_le32(1 << 1)
+/* accept multi-cast */
+#define RXON_FILTER_ACCEPT_GRP_MSK cpu_to_le32(1 << 2)
+/* don't decrypt uni-cast frames */
+#define RXON_FILTER_DIS_DECRYPT_MSK cpu_to_le32(1 << 3)
+/* don't decrypt multi-cast frames */
+#define RXON_FILTER_DIS_GRP_DECRYPT_MSK cpu_to_le32(1 << 4)
+/* STA is associated */
+#define RXON_FILTER_ASSOC_MSK cpu_to_le32(1 << 5)
+/* transfer to host non bssid beacons in associated state */
+#define RXON_FILTER_BCON_AWARE_MSK cpu_to_le32(1 << 6)
+
+/**
+ * REPLY_RXON = 0x10 (command, has simple generic response)
+ *
+ * RXON tunes the radio tuner to a service channel, and sets up a number
+ * of parameters that are used primarily for Rx, but also for Tx operations.
+ *
+ * NOTE: When tuning to a new channel, driver must set the
+ * RXON_FILTER_ASSOC_MSK to 0. This will clear station-dependent
+ * info within the device, including the station tables, tx retry
+ * rate tables, and txpower tables. Driver must build a new station
+ * table and txpower table before transmitting anything on the RXON
+ * channel.
+ *
+ * NOTE: All RXONs wipe clean the internal txpower table. Driver must
+ * issue a new REPLY_TX_PWR_TABLE_CMD after each REPLY_RXON (0x10),
+ * regardless of whether RXON_FILTER_ASSOC_MSK is set.
+ */
+
+struct iwl3945_rxon_cmd {
+ u8 node_addr[6];
+ __le16 reserved1;
+ u8 bssid_addr[6];
+ __le16 reserved2;
+ u8 wlap_bssid_addr[6];
+ __le16 reserved3;
+ u8 dev_type;
+ u8 air_propagation;
+ __le16 reserved4;
+ u8 ofdm_basic_rates;
+ u8 cck_basic_rates;
+ __le16 assoc_id;
+ __le32 flags;
+ __le32 filter_flags;
+ __le16 channel;
+ __le16 reserved5;
+} __packed;
+
+struct iwl4965_rxon_cmd {
+ u8 node_addr[6];
+ __le16 reserved1;
+ u8 bssid_addr[6];
+ __le16 reserved2;
+ u8 wlap_bssid_addr[6];
+ __le16 reserved3;
+ u8 dev_type;
+ u8 air_propagation;
+ __le16 rx_chain;
+ u8 ofdm_basic_rates;
+ u8 cck_basic_rates;
+ __le16 assoc_id;
+ __le32 flags;
+ __le32 filter_flags;
+ __le16 channel;
+ u8 ofdm_ht_single_stream_basic_rates;
+ u8 ofdm_ht_dual_stream_basic_rates;
+} __packed;
+
+/* Create a common rxon cmd which will be typecast into the 3945 or 4965
+ * specific rxon cmd, depending on where it is called from.
+ */
+struct iwl_legacy_rxon_cmd {
+ u8 node_addr[6];
+ __le16 reserved1;
+ u8 bssid_addr[6];
+ __le16 reserved2;
+ u8 wlap_bssid_addr[6];
+ __le16 reserved3;
+ u8 dev_type;
+ u8 air_propagation;
+ __le16 rx_chain;
+ u8 ofdm_basic_rates;
+ u8 cck_basic_rates;
+ __le16 assoc_id;
+ __le32 flags;
+ __le32 filter_flags;
+ __le16 channel;
+ u8 ofdm_ht_single_stream_basic_rates;
+ u8 ofdm_ht_dual_stream_basic_rates;
+ u8 reserved4;
+ u8 reserved5;
+} __packed;
+
+
+/*
+ * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response)
+ */
+struct iwl3945_rxon_assoc_cmd {
+ __le32 flags;
+ __le32 filter_flags;
+ u8 ofdm_basic_rates;
+ u8 cck_basic_rates;
+ __le16 reserved;
+} __packed;
+
+struct iwl4965_rxon_assoc_cmd {
+ __le32 flags;
+ __le32 filter_flags;
+ u8 ofdm_basic_rates;
+ u8 cck_basic_rates;
+ u8 ofdm_ht_single_stream_basic_rates;
+ u8 ofdm_ht_dual_stream_basic_rates;
+ __le16 rx_chain_select_flags;
+ __le16 reserved;
+} __packed;
+
+#define IWL_CONN_MAX_LISTEN_INTERVAL 10
+#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */
+#define IWL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */
+
+/*
+ * REPLY_RXON_TIMING = 0x14 (command, has simple generic response)
+ */
+struct iwl_rxon_time_cmd {
+ __le64 timestamp;
+ __le16 beacon_interval;
+ __le16 atim_window;
+ __le32 beacon_init_val;
+ __le16 listen_interval;
+ u8 dtim_period;
+ u8 delta_cp_bss_tbtts;
+} __packed;
+
+/*
+ * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
+ */
+struct iwl3945_channel_switch_cmd {
+ u8 band;
+ u8 expect_beacon;
+ __le16 channel;
+ __le32 rxon_flags;
+ __le32 rxon_filter_flags;
+ __le32 switch_time;
+ struct iwl3945_power_per_rate power[IWL_MAX_RATES];
+} __packed;
+
+struct iwl4965_channel_switch_cmd {
+ u8 band;
+ u8 expect_beacon;
+ __le16 channel;
+ __le32 rxon_flags;
+ __le32 rxon_filter_flags;
+ __le32 switch_time;
+ struct iwl4965_tx_power_db tx_power;
+} __packed;
+
+/*
+ * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command)
+ */
+struct iwl_csa_notification {
+ __le16 band;
+ __le16 channel;
+ __le32 status; /* 0 - OK, 1 - fail */
+} __packed;
+
+/******************************************************************************
+ * (2)
+ * Quality-of-Service (QOS) Commands & Responses:
+ *
+ *****************************************************************************/
+
+/**
+ * struct iwl_ac_qos -- QOS timing params for REPLY_QOS_PARAM
+ * One for each of 4 EDCA access categories in struct iwl_qosparam_cmd
+ *
+ * @cw_min: Contention window, start value in numbers of slots.
+ * Should be a power-of-2, minus 1. Device's default is 0x0f.
+ * @cw_max: Contention window, max value in numbers of slots.
+ * Should be a power-of-2, minus 1. Device's default is 0x3f.
+ * @aifsn: Number of slots in Arbitration Interframe Space (before
+ * performing random backoff timing prior to Tx). Device default 1.
+ * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0.
+ *
+ * Device will automatically increase contention window by (2*CW) + 1 for each
+ * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW
+ * value, to cap the CW value.
+ */
+struct iwl_ac_qos {
+ __le16 cw_min;
+ __le16 cw_max;
+ u8 aifsn;
+ u8 reserved1;
+ __le16 edca_txop;
+} __packed;
+
+/* QoS flags defines */
+#define QOS_PARAM_FLG_UPDATE_EDCA_MSK cpu_to_le32(0x01)
+#define QOS_PARAM_FLG_TGN_MSK cpu_to_le32(0x02)
+#define QOS_PARAM_FLG_TXOP_TYPE_MSK cpu_to_le32(0x10)
+
+/* Number of Access Categories (AC) (EDCA), queues 0..3 */
+#define AC_NUM 4
+
+/*
+ * REPLY_QOS_PARAM = 0x13 (command, has simple generic response)
+ *
+ * This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs
+ * 0: Background, 1: Best Effort, 2: Video, 3: Voice.
+ */
+struct iwl_qosparam_cmd {
+ __le32 qos_flags;
+ struct iwl_ac_qos ac[AC_NUM];
+} __packed;
+
+/******************************************************************************
+ * (3)
+ * Add/Modify Stations Commands & Responses:
+ *
+ *****************************************************************************/
+/*
+ * Multi station support
+ */
+
+/* Special, dedicated locations within device's station table */
+#define IWL_AP_ID 0
+#define IWL_STA_ID 2
+#define IWL3945_BROADCAST_ID 24
+#define IWL3945_STATION_COUNT 25
+#define IWL4965_BROADCAST_ID 31
+#define IWL4965_STATION_COUNT 32
+
+#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/
+#define IWL_INVALID_STATION 255
+
+#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
+#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
+#define STA_FLG_RTS_MIMO_PROT_MSK cpu_to_le32(1 << 17)
+#define STA_FLG_AGG_MPDU_8US_MSK cpu_to_le32(1 << 18)
+#define STA_FLG_MAX_AGG_SIZE_POS (19)
+#define STA_FLG_MAX_AGG_SIZE_MSK cpu_to_le32(3 << 19)
+#define STA_FLG_HT40_EN_MSK cpu_to_le32(1 << 21)
+#define STA_FLG_MIMO_DIS_MSK cpu_to_le32(1 << 22)
+#define STA_FLG_AGG_MPDU_DENSITY_POS (23)
+#define STA_FLG_AGG_MPDU_DENSITY_MSK cpu_to_le32(7 << 23)
+
+/* Use in mode field. 1: modify existing entry, 0: add new station entry */
+#define STA_CONTROL_MODIFY_MSK 0x01
+
+/* key flags __le16*/
+#define STA_KEY_FLG_ENCRYPT_MSK cpu_to_le16(0x0007)
+#define STA_KEY_FLG_NO_ENC cpu_to_le16(0x0000)
+#define STA_KEY_FLG_WEP cpu_to_le16(0x0001)
+#define STA_KEY_FLG_CCMP cpu_to_le16(0x0002)
+#define STA_KEY_FLG_TKIP cpu_to_le16(0x0003)
+
+#define STA_KEY_FLG_KEYID_POS 8
+#define STA_KEY_FLG_INVALID cpu_to_le16(0x0800)
+/* wep key is either from global key (0) or from station info array (1) */
+#define STA_KEY_FLG_MAP_KEY_MSK cpu_to_le16(0x0008)
+
+/* wep key in STA: 5-bytes (0) or 13-bytes (1) */
+#define STA_KEY_FLG_KEY_SIZE_MSK cpu_to_le16(0x1000)
+#define STA_KEY_MULTICAST_MSK cpu_to_le16(0x4000)
+#define STA_KEY_MAX_NUM 8
+
+/* Flags indicate whether to modify vs. don't change various station params */
+#define STA_MODIFY_KEY_MASK 0x01
+#define STA_MODIFY_TID_DISABLE_TX 0x02
+#define STA_MODIFY_TX_RATE_MSK 0x04
+#define STA_MODIFY_ADDBA_TID_MSK 0x08
+#define STA_MODIFY_DELBA_TID_MSK 0x10
+#define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20
+
+/* Receiver address (actually, Rx station's index into station table),
+ * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
+#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
+
+struct iwl4965_keyinfo {
+ __le16 key_flags;
+ u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */
+ u8 reserved1;
+ __le16 tkip_rx_ttak[5]; /* 10-byte unicast TKIP TTAK */
+ u8 key_offset;
+ u8 reserved2;
+ u8 key[16]; /* 16-byte unicast decryption key */
+} __packed;
+
+/**
+ * struct sta_id_modify
+ * @addr[ETH_ALEN]: station's MAC address
+ * @sta_id: index of station in uCode's station table
+ * @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change
+ *
+ * Driver selects unused table index when adding new station,
+ * or the index to a pre-existing station entry when modifying that station.
+ * Some indexes have special purposes (IWL_AP_ID, index 0, is for AP).
+ *
+ * modify_mask flags select which parameters to modify vs. leave alone.
+ */
+struct sta_id_modify {
+ u8 addr[ETH_ALEN];
+ __le16 reserved1;
+ u8 sta_id;
+ u8 modify_mask;
+ __le16 reserved2;
+} __packed;
+
+/*
+ * REPLY_ADD_STA = 0x18 (command)
+ *
+ * The device contains an internal table of per-station information,
+ * with info on security keys, aggregation parameters, and Tx rates for
+ * initial Tx attempt and any retries (4965 devices uses
+ * REPLY_TX_LINK_QUALITY_CMD,
+ * 3945 uses REPLY_RATE_SCALE to set up rate tables).
+ *
+ * REPLY_ADD_STA sets up the table entry for one station, either creating
+ * a new entry, or modifying a pre-existing one.
+ *
+ * NOTE: RXON command (without "associated" bit set) wipes the station table
+ * clean. Moving into RF_KILL state does this also. Driver must set up
+ * new station table before transmitting anything on the RXON channel
+ * (except active scans or active measurements; those commands carry
+ * their own txpower/rate setup data).
+ *
+ * When getting started on a new channel, driver must set up the
+ * IWL_BROADCAST_ID entry (last entry in the table). For a client
+ * station in a BSS, once an AP is selected, driver sets up the AP STA
+ * in the IWL_AP_ID entry (1st entry in the table). BROADCAST and AP
+ * are all that are needed for a BSS client station. If the device is
+ * used as AP, or in an IBSS network, driver must set up station table
+ * entries for all STAs in network, starting with index IWL_STA_ID.
+ */
+
+struct iwl3945_addsta_cmd {
+ u8 mode; /* 1: modify existing, 0: add new station */
+ u8 reserved[3];
+ struct sta_id_modify sta;
+ struct iwl4965_keyinfo key;
+ __le32 station_flags; /* STA_FLG_* */
+ __le32 station_flags_msk; /* STA_FLG_* */
+
+ /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
+ * corresponding to bit (e.g. bit 5 controls TID 5).
+ * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
+ __le16 tid_disable_tx;
+
+ __le16 rate_n_flags;
+
+ /* TID for which to add block-ack support.
+ * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
+ u8 add_immediate_ba_tid;
+
+ /* TID for which to remove block-ack support.
+ * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
+ u8 remove_immediate_ba_tid;
+
+ /* Starting Sequence Number for added block-ack support.
+ * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
+ __le16 add_immediate_ba_ssn;
+} __packed;
+
+struct iwl4965_addsta_cmd {
+ u8 mode; /* 1: modify existing, 0: add new station */
+ u8 reserved[3];
+ struct sta_id_modify sta;
+ struct iwl4965_keyinfo key;
+ __le32 station_flags; /* STA_FLG_* */
+ __le32 station_flags_msk; /* STA_FLG_* */
+
+ /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
+ * corresponding to bit (e.g. bit 5 controls TID 5).
+ * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
+ __le16 tid_disable_tx;
+
+ __le16 reserved1;
+
+ /* TID for which to add block-ack support.
+ * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
+ u8 add_immediate_ba_tid;
+
+ /* TID for which to remove block-ack support.
+ * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
+ u8 remove_immediate_ba_tid;
+
+ /* Starting Sequence Number for added block-ack support.
+ * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
+ __le16 add_immediate_ba_ssn;
+
+ /*
+ * Number of packets OK to transmit to station even though
+ * it is asleep -- used to synchronise PS-poll and u-APSD
+ * responses while ucode keeps track of STA sleep state.
+ */
+ __le16 sleep_tx_count;
+
+ __le16 reserved2;
+} __packed;
+
+/* Wrapper struct for 3945 and 4965 addsta_cmd structures */
+struct iwl_legacy_addsta_cmd {
+ u8 mode; /* 1: modify existing, 0: add new station */
+ u8 reserved[3];
+ struct sta_id_modify sta;
+ struct iwl4965_keyinfo key;
+ __le32 station_flags; /* STA_FLG_* */
+ __le32 station_flags_msk; /* STA_FLG_* */
+
+ /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
+ * corresponding to bit (e.g. bit 5 controls TID 5).
+ * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
+ __le16 tid_disable_tx;
+
+ __le16 rate_n_flags; /* 3945 only */
+
+ /* TID for which to add block-ack support.
+ * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
+ u8 add_immediate_ba_tid;
+
+ /* TID for which to remove block-ack support.
+ * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
+ u8 remove_immediate_ba_tid;
+
+ /* Starting Sequence Number for added block-ack support.
+ * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
+ __le16 add_immediate_ba_ssn;
+
+ /*
+ * Number of packets OK to transmit to station even though
+ * it is asleep -- used to synchronise PS-poll and u-APSD
+ * responses while ucode keeps track of STA sleep state.
+ */
+ __le16 sleep_tx_count;
+
+ __le16 reserved2;
+} __packed;
+
+
+#define ADD_STA_SUCCESS_MSK 0x1
+#define ADD_STA_NO_ROOM_IN_TABLE 0x2
+#define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4
+#define ADD_STA_MODIFY_NON_EXIST_STA 0x8
+/*
+ * REPLY_ADD_STA = 0x18 (response)
+ */
+struct iwl_add_sta_resp {
+ u8 status; /* ADD_STA_* */
+} __packed;
+
+#define REM_STA_SUCCESS_MSK 0x1
+/*
+ * REPLY_REM_STA = 0x19 (response)
+ */
+struct iwl_rem_sta_resp {
+ u8 status;
+} __packed;
+
+/*
+ * REPLY_REM_STA = 0x19 (command)
+ */
+struct iwl_rem_sta_cmd {
+ u8 num_sta; /* number of removed stations */
+ u8 reserved[3];
+ u8 addr[ETH_ALEN]; /* MAC addr of the first station */
+ u8 reserved2[2];
+} __packed;
+
+#define IWL_TX_FIFO_BK_MSK cpu_to_le32(BIT(0))
+#define IWL_TX_FIFO_BE_MSK cpu_to_le32(BIT(1))
+#define IWL_TX_FIFO_VI_MSK cpu_to_le32(BIT(2))
+#define IWL_TX_FIFO_VO_MSK cpu_to_le32(BIT(3))
+#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00)
+
+#define IWL_DROP_SINGLE 0
+#define IWL_DROP_SELECTED 1
+#define IWL_DROP_ALL 2
+
+/*
+ * REPLY_WEP_KEY = 0x20
+ */
+struct iwl_wep_key {
+ u8 key_index;
+ u8 key_offset;
+ u8 reserved1[2];
+ u8 key_size;
+ u8 reserved2[3];
+ u8 key[16];
+} __packed;
+
+struct iwl_wep_cmd {
+ u8 num_keys;
+ u8 global_key_type;
+ u8 flags;
+ u8 reserved;
+ struct iwl_wep_key key[0];
+} __packed;
+
+#define WEP_KEY_WEP_TYPE 1
+#define WEP_KEYS_MAX 4
+#define WEP_INVALID_OFFSET 0xff
+#define WEP_KEY_LEN_64 5
+#define WEP_KEY_LEN_128 13
+
+/******************************************************************************
+ * (4)
+ * Rx Responses:
+ *
+ *****************************************************************************/
+
+#define RX_RES_STATUS_NO_CRC32_ERROR cpu_to_le32(1 << 0)
+#define RX_RES_STATUS_NO_RXE_OVERFLOW cpu_to_le32(1 << 1)
+
+#define RX_RES_PHY_FLAGS_BAND_24_MSK cpu_to_le16(1 << 0)
+#define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1)
+#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2)
+#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3)
+#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0xf0
+#define RX_RES_PHY_FLAGS_ANTENNA_POS 4
+
+#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8)
+#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8)
+#define RX_RES_STATUS_SEC_TYPE_WEP (0x1 << 8)
+#define RX_RES_STATUS_SEC_TYPE_CCMP (0x2 << 8)
+#define RX_RES_STATUS_SEC_TYPE_TKIP (0x3 << 8)
+#define RX_RES_STATUS_SEC_TYPE_ERR (0x7 << 8)
+
+#define RX_RES_STATUS_STATION_FOUND (1<<6)
+#define RX_RES_STATUS_NO_STATION_INFO_MISMATCH (1<<7)
+
+#define RX_RES_STATUS_DECRYPT_TYPE_MSK (0x3 << 11)
+#define RX_RES_STATUS_NOT_DECRYPT (0x0 << 11)
+#define RX_RES_STATUS_DECRYPT_OK (0x3 << 11)
+#define RX_RES_STATUS_BAD_ICV_MIC (0x1 << 11)
+#define RX_RES_STATUS_BAD_KEY_TTAK (0x2 << 11)
+
+#define RX_MPDU_RES_STATUS_ICV_OK (0x20)
+#define RX_MPDU_RES_STATUS_MIC_OK (0x40)
+#define RX_MPDU_RES_STATUS_TTAK_OK (1 << 7)
+#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800)
+
+
+struct iwl3945_rx_frame_stats {
+ u8 phy_count;
+ u8 id;
+ u8 rssi;
+ u8 agc;
+ __le16 sig_avg;
+ __le16 noise_diff;
+ u8 payload[0];
+} __packed;
+
+struct iwl3945_rx_frame_hdr {
+ __le16 channel;
+ __le16 phy_flags;
+ u8 reserved1;
+ u8 rate;
+ __le16 len;
+ u8 payload[0];
+} __packed;
+
+struct iwl3945_rx_frame_end {
+ __le32 status;
+ __le64 timestamp;
+ __le32 beacon_timestamp;
+} __packed;
+
+/*
+ * REPLY_3945_RX = 0x1b (response only, not a command)
+ *
+ * NOTE: DO NOT dereference from casts to this structure
+ * It is provided only for calculating minimum data set size.
+ * The actual offsets of the hdr and end are dynamic based on
+ * stats.phy_count
+ */
+struct iwl3945_rx_frame {
+ struct iwl3945_rx_frame_stats stats;
+ struct iwl3945_rx_frame_hdr hdr;
+ struct iwl3945_rx_frame_end end;
+} __packed;
+
+#define IWL39_RX_FRAME_SIZE (4 + sizeof(struct iwl3945_rx_frame))
+
+/* Fixed (non-configurable) rx data from phy */
+
+#define IWL49_RX_RES_PHY_CNT 14
+#define IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET (4)
+#define IWL49_RX_PHY_FLAGS_ANTENNAE_MASK (0x70)
+#define IWL49_AGC_DB_MASK (0x3f80) /* MASK(7,13) */
+#define IWL49_AGC_DB_POS (7)
+struct iwl4965_rx_non_cfg_phy {
+ __le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */
+ __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */
+ u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */
+ u8 pad[0];
+} __packed;
+
+
+/*
+ * REPLY_RX = 0xc3 (response only, not a command)
+ * Used only for legacy (non 11n) frames.
+ */
+struct iwl_rx_phy_res {
+ u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */
+ u8 cfg_phy_cnt; /* configurable DSP phy data byte count */
+ u8 stat_id; /* configurable DSP phy data set ID */
+ u8 reserved1;
+ __le64 timestamp; /* TSF at on air rise */
+ __le32 beacon_time_stamp; /* beacon at on-air rise */
+ __le16 phy_flags; /* general phy flags: band, modulation, ... */
+ __le16 channel; /* channel number */
+ u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */
+ __le32 rate_n_flags; /* RATE_MCS_* */
+ __le16 byte_count; /* frame's byte-count */
+ __le16 frame_time; /* frame's time on the air */
+} __packed;
+
+struct iwl_rx_mpdu_res_start {
+ __le16 byte_count;
+ __le16 reserved;
+} __packed;
+
+
+/******************************************************************************
+ * (5)
+ * Tx Commands & Responses:
+ *
+ * Driver must place each REPLY_TX command into one of the prioritized Tx
+ * queues in host DRAM, shared between driver and device (see comments for
+ * SCD registers and Tx/Rx Queues). When the device's Tx scheduler and uCode
+ * are preparing to transmit, the device pulls the Tx command over the PCI
+ * bus via one of the device's Tx DMA channels, to fill an internal FIFO
+ * from which data will be transmitted.
+ *
+ * uCode handles all timing and protocol related to control frames
+ * (RTS/CTS/ACK), based on flags in the Tx command. uCode and Tx scheduler
+ * handle reception of block-acks; uCode updates the host driver via
+ * REPLY_COMPRESSED_BA.
+ *
+ * uCode handles retrying Tx when an ACK is expected but not received.
+ * This includes trying lower data rates than the one requested in the Tx
+ * command, as set up by the REPLY_RATE_SCALE (for 3945) or
+ * REPLY_TX_LINK_QUALITY_CMD (4965).
+ *
+ * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD.
+ * This command must be executed after every RXON command, before Tx can occur.
+ *****************************************************************************/
+
+/* REPLY_TX Tx flags field */
+
+/*
+ * 1: Use Request-To-Send protocol before this frame.
+ * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK.
+ */
+#define TX_CMD_FLG_RTS_MSK cpu_to_le32(1 << 1)
+
+/*
+ * 1: Transmit Clear-To-Send to self before this frame.
+ * Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames.
+ * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK.
+ */
+#define TX_CMD_FLG_CTS_MSK cpu_to_le32(1 << 2)
+
+/* 1: Expect ACK from receiving station
+ * 0: Don't expect ACK (MAC header's duration field s/b 0)
+ * Set this for unicast frames, but not broadcast/multicast. */
+#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3)
+
+/* For 4965 devices:
+ * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD).
+ * Tx command's initial_rate_index indicates first rate to try;
+ * uCode walks through table for additional Tx attempts.
+ * 0: Use Tx rate/MCS from Tx command's rate_n_flags field.
+ * This rate will be used for all Tx attempts; it will not be scaled. */
+#define TX_CMD_FLG_STA_RATE_MSK cpu_to_le32(1 << 4)
+
+/* 1: Expect immediate block-ack.
+ * Set when Txing a block-ack request frame. Also set TX_CMD_FLG_ACK_MSK. */
+#define TX_CMD_FLG_IMM_BA_RSP_MASK cpu_to_le32(1 << 6)
+
+/*
+ * 1: Frame requires full Tx-Op protection.
+ * Set this if either RTS or CTS Tx Flag gets set.
+ */
+#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7)
+
+/* Tx antenna selection field; used only for 3945, reserved (0) for 4965 devices.
+ * Set field to "0" to allow 3945 uCode to select antenna (normal usage). */
+#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00)
+#define TX_CMD_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
+#define TX_CMD_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
+
+/* 1: uCode overrides sequence control field in MAC header.
+ * 0: Driver provides sequence control field in MAC header.
+ * Set this for management frames, non-QOS data frames, non-unicast frames,
+ * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */
+#define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13)
+
+/* 1: This frame is non-last MPDU; more fragments are coming.
+ * 0: Last fragment, or not using fragmentation. */
+#define TX_CMD_FLG_MORE_FRAG_MSK cpu_to_le32(1 << 14)
+
+/* 1: uCode calculates and inserts Timestamp Function (TSF) in outgoing frame.
+ * 0: No TSF required in outgoing frame.
+ * Set this for transmitting beacons and probe responses. */
+#define TX_CMD_FLG_TSF_MSK cpu_to_le32(1 << 16)
+
+/* 1: Driver inserted 2 bytes pad after the MAC header, for (required) dword
+ * alignment of frame's payload data field.
+ * 0: No pad
+ * Set this for MAC headers with 26 or 30 bytes, i.e. those with QOS or ADDR4
+ * field (but not both). Driver must align frame data (i.e. data following
+ * MAC header) to DWORD boundary. */
+#define TX_CMD_FLG_MH_PAD_MSK cpu_to_le32(1 << 20)
+
+/* accelerate aggregation support
+ * 0 - no CCMP encryption; 1 - CCMP encryption */
+#define TX_CMD_FLG_AGG_CCMP_MSK cpu_to_le32(1 << 22)
+
+/* HCCA-AP - disable duration overwriting. */
+#define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25)
+
+
+/*
+ * TX command security control
+ */
+#define TX_CMD_SEC_WEP 0x01
+#define TX_CMD_SEC_CCM 0x02
+#define TX_CMD_SEC_TKIP 0x03
+#define TX_CMD_SEC_MSK 0x03
+#define TX_CMD_SEC_SHIFT 6
+#define TX_CMD_SEC_KEY128 0x08
+
+/*
+ * security overhead sizes
+ */
+#define WEP_IV_LEN 4
+#define WEP_ICV_LEN 4
+#define CCMP_MIC_LEN 8
+#define TKIP_ICV_LEN 4
+
+/*
+ * REPLY_TX = 0x1c (command)
+ */
+
+struct iwl3945_tx_cmd {
+ /*
+ * MPDU byte count:
+ * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
+ * + 8 byte IV for CCM or TKIP (not used for WEP)
+ * + Data payload
+ * + 8-byte MIC (not used for CCM/WEP)
+ * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
+ * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
+ * Range: 14-2342 bytes.
+ */
+ __le16 len;
+
+ /*
+ * MPDU or MSDU byte count for next frame.
+ * Used for fragmentation and bursting, but not 11n aggregation.
+ * Same as "len", but for next frame. Set to 0 if not applicable.
+ */
+ __le16 next_frame_len;
+
+ __le32 tx_flags; /* TX_CMD_FLG_* */
+
+ u8 rate;
+
+ /* Index of recipient station in uCode's station table */
+ u8 sta_id;
+ u8 tid_tspec;
+ u8 sec_ctl;
+ u8 key[16];
+ union {
+ u8 byte[8];
+ __le16 word[4];
+ __le32 dw[2];
+ } tkip_mic;
+ __le32 next_frame_info;
+ union {
+ __le32 life_time;
+ __le32 attempt;
+ } stop_time;
+ u8 supp_rates[2];
+ u8 rts_retry_limit; /*byte 50 */
+ u8 data_retry_limit; /*byte 51 */
+ union {
+ __le16 pm_frame_timeout;
+ __le16 attempt_duration;
+ } timeout;
+
+ /*
+ * Duration of EDCA burst Tx Opportunity, in 32-usec units.
+ * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
+ */
+ __le16 driver_txop;
+
+ /*
+ * MAC header goes here, followed by 2 bytes padding if MAC header
+ * length is 26 or 30 bytes, followed by payload data
+ */
+ u8 payload[0];
+ struct ieee80211_hdr hdr[0];
+} __packed;
+
+/*
+ * REPLY_TX = 0x1c (response)
+ */
+struct iwl3945_tx_resp {
+ u8 failure_rts;
+ u8 failure_frame;
+ u8 bt_kill_count;
+ u8 rate;
+ __le32 wireless_media_time;
+ __le32 status; /* TX status */
+} __packed;
+
+
+/*
+ * 4965 uCode updates these Tx attempt count values in host DRAM.
+ * Used for managing Tx retries when expecting block-acks.
+ * Driver should set these fields to 0.
+ */
+struct iwl_dram_scratch {
+ u8 try_cnt; /* Tx attempts */
+ u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */
+ __le16 reserved;
+} __packed;
+
+struct iwl_tx_cmd {
+ /*
+ * MPDU byte count:
+ * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
+ * + 8 byte IV for CCM or TKIP (not used for WEP)
+ * + Data payload
+ * + 8-byte MIC (not used for CCM/WEP)
+ * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
+ * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
+ * Range: 14-2342 bytes.
+ */
+ __le16 len;
+
+ /*
+ * MPDU or MSDU byte count for next frame.
+ * Used for fragmentation and bursting, but not 11n aggregation.
+ * Same as "len", but for next frame. Set to 0 if not applicable.
+ */
+ __le16 next_frame_len;
+
+ __le32 tx_flags; /* TX_CMD_FLG_* */
+
+ /* uCode may modify this field of the Tx command (in host DRAM!).
+ * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
+ struct iwl_dram_scratch scratch;
+
+ /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
+ __le32 rate_n_flags; /* RATE_MCS_* */
+
+ /* Index of destination station in uCode's station table */
+ u8 sta_id;
+
+ /* Type of security encryption: CCM or TKIP */
+ u8 sec_ctl; /* TX_CMD_SEC_* */
+
+ /*
+ * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial
+ * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for
+ * data frames, this field may be used to selectively reduce initial
+ * rate (via non-0 value) for special frames (e.g. management), while
+ * still supporting rate scaling for all frames.
+ */
+ u8 initial_rate_index;
+ u8 reserved;
+ u8 key[16];
+ __le16 next_frame_flags;
+ __le16 reserved2;
+ union {
+ __le32 life_time;
+ __le32 attempt;
+ } stop_time;
+
+ /* Host DRAM physical address pointer to "scratch" in this command.
+ * Must be dword aligned. "0" in dram_lsb_ptr disables usage. */
+ __le32 dram_lsb_ptr;
+ u8 dram_msb_ptr;
+
+ u8 rts_retry_limit; /*byte 50 */
+ u8 data_retry_limit; /*byte 51 */
+ u8 tid_tspec;
+ union {
+ __le16 pm_frame_timeout;
+ __le16 attempt_duration;
+ } timeout;
+
+ /*
+ * Duration of EDCA burst Tx Opportunity, in 32-usec units.
+ * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
+ */
+ __le16 driver_txop;
+
+ /*
+ * MAC header goes here, followed by 2 bytes padding if MAC header
+ * length is 26 or 30 bytes, followed by payload data
+ */
+ u8 payload[0];
+ struct ieee80211_hdr hdr[0];
+} __packed;
+
+/* TX command response is sent after *3945* transmission attempts.
+ *
+ * NOTES:
+ *
+ * TX_STATUS_FAIL_NEXT_FRAG
+ *
+ * If the fragment flag in the MAC header for the frame being transmitted
+ * is set and there is insufficient time to transmit the next frame, the
+ * TX status will be returned with 'TX_STATUS_FAIL_NEXT_FRAG'.
+ *
+ * TX_STATUS_FIFO_UNDERRUN
+ *
+ * Indicates the host did not provide bytes to the FIFO fast enough while
+ * a TX was in progress.
+ *
+ * TX_STATUS_FAIL_MGMNT_ABORT
+ *
+ * This status is only possible if the ABORT ON MGMT RX parameter was
+ * set to true with the TX command.
+ *
+ * If the MSB of the status parameter is set then an abort sequence is
+ * required. This sequence consists of the host activating the TX Abort
+ * control line, and then waiting for the TX Abort command response. This
+ * indicates that a the device is no longer in a transmit state, and that the
+ * command FIFO has been cleared. The host must then deactivate the TX Abort
+ * control line. Receiving is still allowed in this case.
+ */
+enum {
+ TX_3945_STATUS_SUCCESS = 0x01,
+ TX_3945_STATUS_DIRECT_DONE = 0x02,
+ TX_3945_STATUS_FAIL_SHORT_LIMIT = 0x82,
+ TX_3945_STATUS_FAIL_LONG_LIMIT = 0x83,
+ TX_3945_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
+ TX_3945_STATUS_FAIL_MGMNT_ABORT = 0x85,
+ TX_3945_STATUS_FAIL_NEXT_FRAG = 0x86,
+ TX_3945_STATUS_FAIL_LIFE_EXPIRE = 0x87,
+ TX_3945_STATUS_FAIL_DEST_PS = 0x88,
+ TX_3945_STATUS_FAIL_ABORTED = 0x89,
+ TX_3945_STATUS_FAIL_BT_RETRY = 0x8a,
+ TX_3945_STATUS_FAIL_STA_INVALID = 0x8b,
+ TX_3945_STATUS_FAIL_FRAG_DROPPED = 0x8c,
+ TX_3945_STATUS_FAIL_TID_DISABLE = 0x8d,
+ TX_3945_STATUS_FAIL_FRAME_FLUSHED = 0x8e,
+ TX_3945_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
+ TX_3945_STATUS_FAIL_TX_LOCKED = 0x90,
+ TX_3945_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
+};
+
+/*
+ * TX command response is sent after *4965* transmission attempts.
+ *
+ * both postpone and abort status are expected behavior from uCode. there is
+ * no special operation required from driver; except for RFKILL_FLUSH,
+ * which required tx flush host command to flush all the tx frames in queues
+ */
+enum {
+ TX_STATUS_SUCCESS = 0x01,
+ TX_STATUS_DIRECT_DONE = 0x02,
+ /* postpone TX */
+ TX_STATUS_POSTPONE_DELAY = 0x40,
+ TX_STATUS_POSTPONE_FEW_BYTES = 0x41,
+ TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43,
+ TX_STATUS_POSTPONE_CALC_TTAK = 0x44,
+ /* abort TX */
+ TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81,
+ TX_STATUS_FAIL_SHORT_LIMIT = 0x82,
+ TX_STATUS_FAIL_LONG_LIMIT = 0x83,
+ TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
+ TX_STATUS_FAIL_DRAIN_FLOW = 0x85,
+ TX_STATUS_FAIL_RFKILL_FLUSH = 0x86,
+ TX_STATUS_FAIL_LIFE_EXPIRE = 0x87,
+ TX_STATUS_FAIL_DEST_PS = 0x88,
+ TX_STATUS_FAIL_HOST_ABORTED = 0x89,
+ TX_STATUS_FAIL_BT_RETRY = 0x8a,
+ TX_STATUS_FAIL_STA_INVALID = 0x8b,
+ TX_STATUS_FAIL_FRAG_DROPPED = 0x8c,
+ TX_STATUS_FAIL_TID_DISABLE = 0x8d,
+ TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e,
+ TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
+ TX_STATUS_FAIL_PASSIVE_NO_RX = 0x90,
+ TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
+};
+
+#define TX_PACKET_MODE_REGULAR 0x0000
+#define TX_PACKET_MODE_BURST_SEQ 0x0100
+#define TX_PACKET_MODE_BURST_FIRST 0x0200
+
+enum {
+ TX_POWER_PA_NOT_ACTIVE = 0x0,
+};
+
+enum {
+ TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */
+ TX_STATUS_DELAY_MSK = 0x00000040,
+ TX_STATUS_ABORT_MSK = 0x00000080,
+ TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */
+ TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */
+ TX_RESERVED = 0x00780000, /* bits 19:22 */
+ TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */
+ TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */
+};
+
+/* *******************************
+ * TX aggregation status
+ ******************************* */
+
+enum {
+ AGG_TX_STATE_TRANSMITTED = 0x00,
+ AGG_TX_STATE_UNDERRUN_MSK = 0x01,
+ AGG_TX_STATE_FEW_BYTES_MSK = 0x04,
+ AGG_TX_STATE_ABORT_MSK = 0x08,
+ AGG_TX_STATE_LAST_SENT_TTL_MSK = 0x10,
+ AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK = 0x20,
+ AGG_TX_STATE_SCD_QUERY_MSK = 0x80,
+ AGG_TX_STATE_TEST_BAD_CRC32_MSK = 0x100,
+ AGG_TX_STATE_RESPONSE_MSK = 0x1ff,
+ AGG_TX_STATE_DUMP_TX_MSK = 0x200,
+ AGG_TX_STATE_DELAY_TX_MSK = 0x400
+};
+
+#define AGG_TX_STATUS_MSK 0x00000fff /* bits 0:11 */
+#define AGG_TX_TRY_MSK 0x0000f000 /* bits 12:15 */
+
+#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL_MSK | \
+ AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK)
+
+/* # tx attempts for first frame in aggregation */
+#define AGG_TX_STATE_TRY_CNT_POS 12
+#define AGG_TX_STATE_TRY_CNT_MSK 0xf000
+
+/* Command ID and sequence number of Tx command for this frame */
+#define AGG_TX_STATE_SEQ_NUM_POS 16
+#define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000
+
+/*
+ * REPLY_TX = 0x1c (response)
+ *
+ * This response may be in one of two slightly different formats, indicated
+ * by the frame_count field:
+ *
+ * 1) No aggregation (frame_count == 1). This reports Tx results for
+ * a single frame. Multiple attempts, at various bit rates, may have
+ * been made for this frame.
+ *
+ * 2) Aggregation (frame_count > 1). This reports Tx results for
+ * 2 or more frames that used block-acknowledge. All frames were
+ * transmitted at same rate. Rate scaling may have been used if first
+ * frame in this new agg block failed in previous agg block(s).
+ *
+ * Note that, for aggregation, ACK (block-ack) status is not delivered here;
+ * block-ack has not been received by the time the 4965 device records
+ * this status.
+ * This status relates to reasons the tx might have been blocked or aborted
+ * within the sending station (this 4965 device), rather than whether it was
+ * received successfully by the destination station.
+ */
+struct agg_tx_status {
+ __le16 status;
+ __le16 sequence;
+} __packed;
+
+struct iwl4965_tx_resp {
+ u8 frame_count; /* 1 no aggregation, >1 aggregation */
+ u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */
+ u8 failure_rts; /* # failures due to unsuccessful RTS */
+ u8 failure_frame; /* # failures due to no ACK (unused for agg) */
+
+ /* For non-agg: Rate at which frame was successful.
+ * For agg: Rate at which all frames were transmitted. */
+ __le32 rate_n_flags; /* RATE_MCS_* */
+
+ /* For non-agg: RTS + CTS + frame tx attempts time + ACK.
+ * For agg: RTS + CTS + aggregation tx time + block-ack time. */
+ __le16 wireless_media_time; /* uSecs */
+
+ __le16 reserved;
+ __le32 pa_power1; /* RF power amplifier measurement (not used) */
+ __le32 pa_power2;
+
+ /*
+ * For non-agg: frame status TX_STATUS_*
+ * For agg: status of 1st frame, AGG_TX_STATE_*; other frame status
+ * fields follow this one, up to frame_count.
+ * Bit fields:
+ * 11- 0: AGG_TX_STATE_* status code
+ * 15-12: Retry count for 1st frame in aggregation (retries
+ * occur if tx failed for this frame when it was a
+ * member of a previous aggregation block). If rate
+ * scaling is used, retry count indicates the rate
+ * table entry used for all frames in the new agg.
+ * 31-16: Sequence # for this frame's Tx cmd (not SSN!)
+ */
+ union {
+ __le32 status;
+ struct agg_tx_status agg_status[0]; /* for each agg frame */
+ } u;
+} __packed;
+
+/*
+ * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command)
+ *
+ * Reports Block-Acknowledge from recipient station
+ */
+struct iwl_compressed_ba_resp {
+ __le32 sta_addr_lo32;
+ __le16 sta_addr_hi16;
+ __le16 reserved;
+
+ /* Index of recipient (BA-sending) station in uCode's station table */
+ u8 sta_id;
+ u8 tid;
+ __le16 seq_ctl;
+ __le64 bitmap;
+ __le16 scd_flow;
+ __le16 scd_ssn;
+} __packed;
+
+/*
+ * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response)
+ *
+ * See details under "TXPOWER" in iwl-4965-hw.h.
+ */
+
+struct iwl3945_txpowertable_cmd {
+ u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
+ u8 reserved;
+ __le16 channel;
+ struct iwl3945_power_per_rate power[IWL_MAX_RATES];
+} __packed;
+
+struct iwl4965_txpowertable_cmd {
+ u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
+ u8 reserved;
+ __le16 channel;
+ struct iwl4965_tx_power_db tx_power;
+} __packed;
+
+
+/**
+ * struct iwl3945_rate_scaling_cmd - Rate Scaling Command & Response
+ *
+ * REPLY_RATE_SCALE = 0x47 (command, has simple generic response)
+ *
+ * NOTE: The table of rates passed to the uCode via the
+ * RATE_SCALE command sets up the corresponding order of
+ * rates used for all related commands, including rate
+ * masks, etc.
+ *
+ * For example, if you set 9MB (PLCP 0x0f) as the first
+ * rate in the rate table, the bit mask for that rate
+ * when passed through ofdm_basic_rates on the REPLY_RXON
+ * command would be bit 0 (1 << 0)
+ */
+struct iwl3945_rate_scaling_info {
+ __le16 rate_n_flags;
+ u8 try_cnt;
+ u8 next_rate_index;
+} __packed;
+
+struct iwl3945_rate_scaling_cmd {
+ u8 table_id;
+ u8 reserved[3];
+ struct iwl3945_rate_scaling_info table[IWL_MAX_RATES];
+} __packed;
+
+
+/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */
+#define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1 << 0)
+
+/* # of EDCA prioritized tx fifos */
+#define LINK_QUAL_AC_NUM AC_NUM
+
+/* # entries in rate scale table to support Tx retries */
+#define LINK_QUAL_MAX_RETRY_NUM 16
+
+/* Tx antenna selection values */
+#define LINK_QUAL_ANT_A_MSK (1 << 0)
+#define LINK_QUAL_ANT_B_MSK (1 << 1)
+#define LINK_QUAL_ANT_MSK (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK)
+
+
+/**
+ * struct iwl_link_qual_general_params
+ *
+ * Used in REPLY_TX_LINK_QUALITY_CMD
+ */
+struct iwl_link_qual_general_params {
+ u8 flags;
+
+ /* No entries at or above this (driver chosen) index contain MIMO */
+ u8 mimo_delimiter;
+
+ /* Best single antenna to use for single stream (legacy, SISO). */
+ u8 single_stream_ant_msk; /* LINK_QUAL_ANT_* */
+
+ /* Best antennas to use for MIMO (unused for 4965, assumes both). */
+ u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */
+
+ /*
+ * If driver needs to use different initial rates for different
+ * EDCA QOS access categories (as implemented by tx fifos 0-3),
+ * this table will set that up, by indicating the indexes in the
+ * rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start.
+ * Otherwise, driver should set all entries to 0.
+ *
+ * Entry usage:
+ * 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice
+ * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3.
+ */
+ u8 start_rate_index[LINK_QUAL_AC_NUM];
+} __packed;
+
+#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
+#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
+#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100)
+
+#define LINK_QUAL_AGG_DISABLE_START_DEF (3)
+#define LINK_QUAL_AGG_DISABLE_START_MAX (255)
+#define LINK_QUAL_AGG_DISABLE_START_MIN (0)
+
+#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (31)
+#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63)
+#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
+
+/**
+ * struct iwl_link_qual_agg_params
+ *
+ * Used in REPLY_TX_LINK_QUALITY_CMD
+ */
+struct iwl_link_qual_agg_params {
+
+ /*
+ *Maximum number of uSec in aggregation.
+ * default set to 4000 (4 milliseconds) if not configured in .cfg
+ */
+ __le16 agg_time_limit;
+
+ /*
+ * Number of Tx retries allowed for a frame, before that frame will
+ * no longer be considered for the start of an aggregation sequence
+ * (scheduler will then try to tx it as single frame).
+ * Driver should set this to 3.
+ */
+ u8 agg_dis_start_th;
+
+ /*
+ * Maximum number of frames in aggregation.
+ * 0 = no limit (default). 1 = no aggregation.
+ * Other values = max # frames in aggregation.
+ */
+ u8 agg_frame_cnt_limit;
+
+ __le32 reserved;
+} __packed;
+
+/*
+ * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
+ *
+ * For 4965 devices only; 3945 uses REPLY_RATE_SCALE.
+ *
+ * Each station in the 4965 device's internal station table has its own table
+ * of 16
+ * Tx rates and modulation modes (e.g. legacy/SISO/MIMO) for retrying Tx when
+ * an ACK is not received. This command replaces the entire table for
+ * one station.
+ *
+ * NOTE: Station must already be in 4965 device's station table.
+ * Use REPLY_ADD_STA.
+ *
+ * The rate scaling procedures described below work well. Of course, other
+ * procedures are possible, and may work better for particular environments.
+ *
+ *
+ * FILLING THE RATE TABLE
+ *
+ * Given a particular initial rate and mode, as determined by the rate
+ * scaling algorithm described below, the Linux driver uses the following
+ * formula to fill the rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table in the
+ * Link Quality command:
+ *
+ *
+ * 1) If using High-throughput (HT) (SISO or MIMO) initial rate:
+ * a) Use this same initial rate for first 3 entries.
+ * b) Find next lower available rate using same mode (SISO or MIMO),
+ * use for next 3 entries. If no lower rate available, switch to
+ * legacy mode (no HT40 channel, no MIMO, no short guard interval).
+ * c) If using MIMO, set command's mimo_delimiter to number of entries
+ * using MIMO (3 or 6).
+ * d) After trying 2 HT rates, switch to legacy mode (no HT40 channel,
+ * no MIMO, no short guard interval), at the next lower bit rate
+ * (e.g. if second HT bit rate was 54, try 48 legacy), and follow
+ * legacy procedure for remaining table entries.
+ *
+ * 2) If using legacy initial rate:
+ * a) Use the initial rate for only one entry.
+ * b) For each following entry, reduce the rate to next lower available
+ * rate, until reaching the lowest available rate.
+ * c) When reducing rate, also switch antenna selection.
+ * d) Once lowest available rate is reached, repeat this rate until
+ * rate table is filled (16 entries), switching antenna each entry.
+ *
+ *
+ * ACCUMULATING HISTORY
+ *
+ * The rate scaling algorithm for 4965 devices, as implemented in Linux driver,
+ * uses two sets of frame Tx success history: One for the current/active
+ * modulation mode, and one for a speculative/search mode that is being
+ * attempted. If the speculative mode turns out to be more effective (i.e.
+ * actual transfer rate is better), then the driver continues to use the
+ * speculative mode as the new current active mode.
+ *
+ * Each history set contains, separately for each possible rate, data for a
+ * sliding window of the 62 most recent tx attempts at that rate. The data
+ * includes a shifting bitmap of success(1)/failure(0), and sums of successful
+ * and attempted frames, from which the driver can additionally calculate a
+ * success ratio (success / attempted) and number of failures
+ * (attempted - success), and control the size of the window (attempted).
+ * The driver uses the bit map to remove successes from the success sum, as
+ * the oldest tx attempts fall out of the window.
+ *
+ * When the 4965 device makes multiple tx attempts for a given frame, each
+ * attempt might be at a different rate, and have different modulation
+ * characteristics (e.g. antenna, fat channel, short guard interval), as set
+ * up in the rate scaling table in the Link Quality command. The driver must
+ * determine which rate table entry was used for each tx attempt, to determine
+ * which rate-specific history to update, and record only those attempts that
+ * match the modulation characteristics of the history set.
+ *
+ * When using block-ack (aggregation), all frames are transmitted at the same
+ * rate, since there is no per-attempt acknowledgment from the destination
+ * station. The Tx response struct iwl_tx_resp indicates the Tx rate in
+ * rate_n_flags field. After receiving a block-ack, the driver can update
+ * history for the entire block all at once.
+ *
+ *
+ * FINDING BEST STARTING RATE:
+ *
+ * When working with a selected initial modulation mode (see below), the
+ * driver attempts to find a best initial rate. The initial rate is the
+ * first entry in the Link Quality command's rate table.
+ *
+ * 1) Calculate actual throughput (success ratio * expected throughput, see
+ * table below) for current initial rate. Do this only if enough frames
+ * have been attempted to make the value meaningful: at least 6 failed
+ * tx attempts, or at least 8 successes. If not enough, don't try rate
+ * scaling yet.
+ *
+ * 2) Find available rates adjacent to current initial rate. Available means:
+ * a) supported by hardware &&
+ * b) supported by association &&
+ * c) within any constraints selected by user
+ *
+ * 3) Gather measured throughputs for adjacent rates. These might not have
+ * enough history to calculate a throughput. That's okay, we might try
+ * using one of them anyway!
+ *
+ * 4) Try decreasing rate if, for current rate:
+ * a) success ratio is < 15% ||
+ * b) lower adjacent rate has better measured throughput ||
+ * c) higher adjacent rate has worse throughput, and lower is unmeasured
+ *
+ * As a sanity check, if decrease was determined above, leave rate
+ * unchanged if:
+ * a) lower rate unavailable
+ * b) success ratio at current rate > 85% (very good)
+ * c) current measured throughput is better than expected throughput
+ * of lower rate (under perfect 100% tx conditions, see table below)
+ *
+ * 5) Try increasing rate if, for current rate:
+ * a) success ratio is < 15% ||
+ * b) both adjacent rates' throughputs are unmeasured (try it!) ||
+ * b) higher adjacent rate has better measured throughput ||
+ * c) lower adjacent rate has worse throughput, and higher is unmeasured
+ *
+ * As a sanity check, if increase was determined above, leave rate
+ * unchanged if:
+ * a) success ratio at current rate < 70%. This is not particularly
+ * good performance; higher rate is sure to have poorer success.
+ *
+ * 6) Re-evaluate the rate after each tx frame. If working with block-
+ * acknowledge, history and statistics may be calculated for the entire
+ * block (including prior history that fits within the history windows),
+ * before re-evaluation.
+ *
+ * FINDING BEST STARTING MODULATION MODE:
+ *
+ * After working with a modulation mode for a "while" (and doing rate scaling),
+ * the driver searches for a new initial mode in an attempt to improve
+ * throughput. The "while" is measured by numbers of attempted frames:
+ *
+ * For legacy mode, search for new mode after:
+ * 480 successful frames, or 160 failed frames
+ * For high-throughput modes (SISO or MIMO), search for new mode after:
+ * 4500 successful frames, or 400 failed frames
+ *
+ * Mode switch possibilities are (3 for each mode):
+ *
+ * For legacy:
+ * Change antenna, try SISO (if HT association), try MIMO (if HT association)
+ * For SISO:
+ * Change antenna, try MIMO, try shortened guard interval (SGI)
+ * For MIMO:
+ * Try SISO antenna A, SISO antenna B, try shortened guard interval (SGI)
+ *
+ * When trying a new mode, use the same bit rate as the old/current mode when
+ * trying antenna switches and shortened guard interval. When switching to
+ * SISO from MIMO or legacy, or to MIMO from SISO or legacy, use a rate
+ * for which the expected throughput (under perfect conditions) is about the
+ * same or slightly better than the actual measured throughput delivered by
+ * the old/current mode.
+ *
+ * Actual throughput can be estimated by multiplying the expected throughput
+ * by the success ratio (successful / attempted tx frames). Frame size is
+ * not considered in this calculation; it assumes that frame size will average
+ * out to be fairly consistent over several samples. The following are
+ * metric values for expected throughput assuming 100% success ratio.
+ * Only G band has support for CCK rates:
+ *
+ * RATE: 1 2 5 11 6 9 12 18 24 36 48 54 60
+ *
+ * G: 7 13 35 58 40 57 72 98 121 154 177 186 186
+ * A: 0 0 0 0 40 57 72 98 121 154 177 186 186
+ * SISO 20MHz: 0 0 0 0 42 42 76 102 124 159 183 193 202
+ * SGI SISO 20MHz: 0 0 0 0 46 46 82 110 132 168 192 202 211
+ * MIMO 20MHz: 0 0 0 0 74 74 123 155 179 214 236 244 251
+ * SGI MIMO 20MHz: 0 0 0 0 81 81 131 164 188 222 243 251 257
+ * SISO 40MHz: 0 0 0 0 77 77 127 160 184 220 242 250 257
+ * SGI SISO 40MHz: 0 0 0 0 83 83 135 169 193 229 250 257 264
+ * MIMO 40MHz: 0 0 0 0 123 123 182 214 235 264 279 285 289
+ * SGI MIMO 40MHz: 0 0 0 0 131 131 191 222 242 270 284 289 293
+ *
+ * After the new mode has been tried for a short while (minimum of 6 failed
+ * frames or 8 successful frames), compare success ratio and actual throughput
+ * estimate of the new mode with the old. If either is better with the new
+ * mode, continue to use the new mode.
+ *
+ * Continue comparing modes until all 3 possibilities have been tried.
+ * If moving from legacy to HT, try all 3 possibilities from the new HT
+ * mode. After trying all 3, a best mode is found. Continue to use this mode
+ * for the longer "while" described above (e.g. 480 successful frames for
+ * legacy), and then repeat the search process.
+ *
+ */
+struct iwl_link_quality_cmd {
+
+ /* Index of destination/recipient station in uCode's station table */
+ u8 sta_id;
+ u8 reserved1;
+ __le16 control; /* not used */
+ struct iwl_link_qual_general_params general_params;
+ struct iwl_link_qual_agg_params agg_params;
+
+ /*
+ * Rate info; when using rate-scaling, Tx command's initial_rate_index
+ * specifies 1st Tx rate attempted, via index into this table.
+ * 4965 devices works its way through table when retrying Tx.
+ */
+ struct {
+ __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */
+ } rs_table[LINK_QUAL_MAX_RETRY_NUM];
+ __le32 reserved2;
+} __packed;
+
+/*
+ * BT configuration enable flags:
+ * bit 0 - 1: BT channel announcement enabled
+ * 0: disable
+ * bit 1 - 1: priority of BT device enabled
+ * 0: disable
+ */
+#define BT_COEX_DISABLE (0x0)
+#define BT_ENABLE_CHANNEL_ANNOUNCE BIT(0)
+#define BT_ENABLE_PRIORITY BIT(1)
+
+#define BT_COEX_ENABLE (BT_ENABLE_CHANNEL_ANNOUNCE | BT_ENABLE_PRIORITY)
+
+#define BT_LEAD_TIME_DEF (0x1E)
+
+#define BT_MAX_KILL_DEF (0x5)
+
+/*
+ * REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
+ *
+ * 3945 and 4965 devices support hardware handshake with Bluetooth device on
+ * same platform. Bluetooth device alerts wireless device when it will Tx;
+ * wireless device can delay or kill its own Tx to accommodate.
+ */
+struct iwl_bt_cmd {
+ u8 flags;
+ u8 lead_time;
+ u8 max_kill;
+ u8 reserved;
+ __le32 kill_ack_mask;
+ __le32 kill_cts_mask;
+} __packed;
+
+
+/******************************************************************************
+ * (6)
+ * Spectrum Management (802.11h) Commands, Responses, Notifications:
+ *
+ *****************************************************************************/
+
+/*
+ * Spectrum Management
+ */
+#define MEASUREMENT_FILTER_FLAG (RXON_FILTER_PROMISC_MSK | \
+ RXON_FILTER_CTL2HOST_MSK | \
+ RXON_FILTER_ACCEPT_GRP_MSK | \
+ RXON_FILTER_DIS_DECRYPT_MSK | \
+ RXON_FILTER_DIS_GRP_DECRYPT_MSK | \
+ RXON_FILTER_ASSOC_MSK | \
+ RXON_FILTER_BCON_AWARE_MSK)
+
+struct iwl_measure_channel {
+ __le32 duration; /* measurement duration in extended beacon
+ * format */
+ u8 channel; /* channel to measure */
+ u8 type; /* see enum iwl_measure_type */
+ __le16 reserved;
+} __packed;
+
+/*
+ * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command)
+ */
+struct iwl_spectrum_cmd {
+ __le16 len; /* number of bytes starting from token */
+ u8 token; /* token id */
+ u8 id; /* measurement id -- 0 or 1 */
+ u8 origin; /* 0 = TGh, 1 = other, 2 = TGk */
+ u8 periodic; /* 1 = periodic */
+ __le16 path_loss_timeout;
+ __le32 start_time; /* start time in extended beacon format */
+ __le32 reserved2;
+ __le32 flags; /* rxon flags */
+ __le32 filter_flags; /* rxon filter flags */
+ __le16 channel_count; /* minimum 1, maximum 10 */
+ __le16 reserved3;
+ struct iwl_measure_channel channels[10];
+} __packed;
+
+/*
+ * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response)
+ */
+struct iwl_spectrum_resp {
+ u8 token;
+ u8 id; /* id of the prior command replaced, or 0xff */
+ __le16 status; /* 0 - command will be handled
+ * 1 - cannot handle (conflicts with another
+ * measurement) */
+} __packed;
+
+enum iwl_measurement_state {
+ IWL_MEASUREMENT_START = 0,
+ IWL_MEASUREMENT_STOP = 1,
+};
+
+enum iwl_measurement_status {
+ IWL_MEASUREMENT_OK = 0,
+ IWL_MEASUREMENT_CONCURRENT = 1,
+ IWL_MEASUREMENT_CSA_CONFLICT = 2,
+ IWL_MEASUREMENT_TGH_CONFLICT = 3,
+ /* 4-5 reserved */
+ IWL_MEASUREMENT_STOPPED = 6,
+ IWL_MEASUREMENT_TIMEOUT = 7,
+ IWL_MEASUREMENT_PERIODIC_FAILED = 8,
+};
+
+#define NUM_ELEMENTS_IN_HISTOGRAM 8
+
+struct iwl_measurement_histogram {
+ __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */
+ __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */
+} __packed;
+
+/* clear channel availability counters */
+struct iwl_measurement_cca_counters {
+ __le32 ofdm;
+ __le32 cck;
+} __packed;
+
+enum iwl_measure_type {
+ IWL_MEASURE_BASIC = (1 << 0),
+ IWL_MEASURE_CHANNEL_LOAD = (1 << 1),
+ IWL_MEASURE_HISTOGRAM_RPI = (1 << 2),
+ IWL_MEASURE_HISTOGRAM_NOISE = (1 << 3),
+ IWL_MEASURE_FRAME = (1 << 4),
+ /* bits 5:6 are reserved */
+ IWL_MEASURE_IDLE = (1 << 7),
+};
+
+/*
+ * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command)
+ */
+struct iwl_spectrum_notification {
+ u8 id; /* measurement id -- 0 or 1 */
+ u8 token;
+ u8 channel_index; /* index in measurement channel list */
+ u8 state; /* 0 - start, 1 - stop */
+ __le32 start_time; /* lower 32-bits of TSF */
+ u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */
+ u8 channel;
+ u8 type; /* see enum iwl_measurement_type */
+ u8 reserved1;
+ /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only
+ * valid if applicable for measurement type requested. */
+ __le32 cca_ofdm; /* cca fraction time in 40Mhz clock periods */
+ __le32 cca_cck; /* cca fraction time in 44Mhz clock periods */
+ __le32 cca_time; /* channel load time in usecs */
+ u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 -
+ * unidentified */
+ u8 reserved2[3];
+ struct iwl_measurement_histogram histogram;
+ __le32 stop_time; /* lower 32-bits of TSF */
+ __le32 status; /* see iwl_measurement_status */
+} __packed;
+
+/******************************************************************************
+ * (7)
+ * Power Management Commands, Responses, Notifications:
+ *
+ *****************************************************************************/
+
+/**
+ * struct iwl_powertable_cmd - Power Table Command
+ * @flags: See below:
+ *
+ * POWER_TABLE_CMD = 0x77 (command, has simple generic response)
+ *
+ * PM allow:
+ * bit 0 - '0' Driver not allow power management
+ * '1' Driver allow PM (use rest of parameters)
+ *
+ * uCode send sleep notifications:
+ * bit 1 - '0' Don't send sleep notification
+ * '1' send sleep notification (SEND_PM_NOTIFICATION)
+ *
+ * Sleep over DTIM
+ * bit 2 - '0' PM have to walk up every DTIM
+ * '1' PM could sleep over DTIM till listen Interval.
+ *
+ * PCI power managed
+ * bit 3 - '0' (PCI_CFG_LINK_CTRL & 0x1)
+ * '1' !(PCI_CFG_LINK_CTRL & 0x1)
+ *
+ * Fast PD
+ * bit 4 - '1' Put radio to sleep when receiving frame for others
+ *
+ * Force sleep Modes
+ * bit 31/30- '00' use both mac/xtal sleeps
+ * '01' force Mac sleep
+ * '10' force xtal sleep
+ * '11' Illegal set
+ *
+ * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then
+ * ucode assume sleep over DTIM is allowed and we don't need to wake up
+ * for every DTIM.
+ */
+#define IWL_POWER_VEC_SIZE 5
+
+#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0))
+#define IWL_POWER_POWER_SAVE_ENA_MSK cpu_to_le16(BIT(0))
+#define IWL_POWER_POWER_MANAGEMENT_ENA_MSK cpu_to_le16(BIT(1))
+#define IWL_POWER_SLEEP_OVER_DTIM_MSK cpu_to_le16(BIT(2))
+#define IWL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3))
+#define IWL_POWER_FAST_PD cpu_to_le16(BIT(4))
+#define IWL_POWER_BEACON_FILTERING cpu_to_le16(BIT(5))
+#define IWL_POWER_SHADOW_REG_ENA cpu_to_le16(BIT(6))
+#define IWL_POWER_CT_KILL_SET cpu_to_le16(BIT(7))
+
+struct iwl3945_powertable_cmd {
+ __le16 flags;
+ u8 reserved[2];
+ __le32 rx_data_timeout;
+ __le32 tx_data_timeout;
+ __le32 sleep_interval[IWL_POWER_VEC_SIZE];
+} __packed;
+
+struct iwl_powertable_cmd {
+ __le16 flags;
+ u8 keep_alive_seconds; /* 3945 reserved */
+ u8 debug_flags; /* 3945 reserved */
+ __le32 rx_data_timeout;
+ __le32 tx_data_timeout;
+ __le32 sleep_interval[IWL_POWER_VEC_SIZE];
+ __le32 keep_alive_beacons;
+} __packed;
+
+/*
+ * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command)
+ * all devices identical.
+ */
+struct iwl_sleep_notification {
+ u8 pm_sleep_mode;
+ u8 pm_wakeup_src;
+ __le16 reserved;
+ __le32 sleep_time;
+ __le32 tsf_low;
+ __le32 bcon_timer;
+} __packed;
+
+/* Sleep states. all devices identical. */
+enum {
+ IWL_PM_NO_SLEEP = 0,
+ IWL_PM_SLP_MAC = 1,
+ IWL_PM_SLP_FULL_MAC_UNASSOCIATE = 2,
+ IWL_PM_SLP_FULL_MAC_CARD_STATE = 3,
+ IWL_PM_SLP_PHY = 4,
+ IWL_PM_SLP_REPENT = 5,
+ IWL_PM_WAKEUP_BY_TIMER = 6,
+ IWL_PM_WAKEUP_BY_DRIVER = 7,
+ IWL_PM_WAKEUP_BY_RFKILL = 8,
+ /* 3 reserved */
+ IWL_PM_NUM_OF_MODES = 12,
+};
+
+/*
+ * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command)
+ */
+struct iwl_card_state_notif {
+ __le32 flags;
+} __packed;
+
+#define HW_CARD_DISABLED 0x01
+#define SW_CARD_DISABLED 0x02
+#define CT_CARD_DISABLED 0x04
+#define RXON_CARD_DISABLED 0x10
+
+struct iwl_ct_kill_config {
+ __le32 reserved;
+ __le32 critical_temperature_M;
+ __le32 critical_temperature_R;
+} __packed;
+
+/******************************************************************************
+ * (8)
+ * Scan Commands, Responses, Notifications:
+ *
+ *****************************************************************************/
+
+#define SCAN_CHANNEL_TYPE_PASSIVE cpu_to_le32(0)
+#define SCAN_CHANNEL_TYPE_ACTIVE cpu_to_le32(1)
+
+/**
+ * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table
+ *
+ * One for each channel in the scan list.
+ * Each channel can independently select:
+ * 1) SSID for directed active scans
+ * 2) Txpower setting (for rate specified within Tx command)
+ * 3) How long to stay on-channel (behavior may be modified by quiet_time,
+ * quiet_plcp_th, good_CRC_th)
+ *
+ * To avoid uCode errors, make sure the following are true (see comments
+ * under struct iwl_scan_cmd about max_out_time and quiet_time):
+ * 1) If using passive_dwell (i.e. passive_dwell != 0):
+ * active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
+ * 2) quiet_time <= active_dwell
+ * 3) If restricting off-channel time (i.e. max_out_time !=0):
+ * passive_dwell < max_out_time
+ * active_dwell < max_out_time
+ */
+struct iwl3945_scan_channel {
+ /*
+ * type is defined as:
+ * 0:0 1 = active, 0 = passive
+ * 1:4 SSID direct bit map; if a bit is set, then corresponding
+ * SSID IE is transmitted in probe request.
+ * 5:7 reserved
+ */
+ u8 type;
+ u8 channel; /* band is selected by iwl3945_scan_cmd "flags" field */
+ struct iwl3945_tx_power tpc;
+ __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
+ __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
+} __packed;
+
+/* set number of direct probes u8 type */
+#define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1))))
+
+struct iwl_scan_channel {
+ /*
+ * type is defined as:
+ * 0:0 1 = active, 0 = passive
+ * 1:20 SSID direct bit map; if a bit is set, then corresponding
+ * SSID IE is transmitted in probe request.
+ * 21:31 reserved
+ */
+ __le32 type;
+ __le16 channel; /* band is selected by iwl_scan_cmd "flags" field */
+ u8 tx_gain; /* gain for analog radio */
+ u8 dsp_atten; /* gain for DSP */
+ __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
+ __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
+} __packed;
+
+/* set number of direct probes __le32 type */
+#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
+
+/**
+ * struct iwl_ssid_ie - directed scan network information element
+ *
+ * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in
+ * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel;
+ * each channel may select different ssids from among the 20 (4) entries.
+ * SSID IEs get transmitted in reverse order of entry.
+ */
+struct iwl_ssid_ie {
+ u8 id;
+ u8 len;
+ u8 ssid[32];
+} __packed;
+
+#define PROBE_OPTION_MAX_3945 4
+#define PROBE_OPTION_MAX 20
+#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
+#define IWL_GOOD_CRC_TH_DISABLED 0
+#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
+#define IWL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff)
+#define IWL_MAX_SCAN_SIZE 1024
+#define IWL_MAX_CMD_SIZE 4096
+
+/*
+ * REPLY_SCAN_CMD = 0x80 (command)
+ *
+ * The hardware scan command is very powerful; the driver can set it up to
+ * maintain (relatively) normal network traffic while doing a scan in the
+ * background. The max_out_time and suspend_time control the ratio of how
+ * long the device stays on an associated network channel ("service channel")
+ * vs. how long it's away from the service channel, i.e. tuned to other channels
+ * for scanning.
+ *
+ * max_out_time is the max time off-channel (in usec), and suspend_time
+ * is how long (in "extended beacon" format) that the scan is "suspended"
+ * after returning to the service channel. That is, suspend_time is the
+ * time that we stay on the service channel, doing normal work, between
+ * scan segments. The driver may set these parameters differently to support
+ * scanning when associated vs. not associated, and light vs. heavy traffic
+ * loads when associated.
+ *
+ * After receiving this command, the device's scan engine does the following;
+ *
+ * 1) Sends SCAN_START notification to driver
+ * 2) Checks to see if it has time to do scan for one channel
+ * 3) Sends NULL packet, with power-save (PS) bit set to 1,
+ * to tell AP that we're going off-channel
+ * 4) Tunes to first channel in scan list, does active or passive scan
+ * 5) Sends SCAN_RESULT notification to driver
+ * 6) Checks to see if it has time to do scan on *next* channel in list
+ * 7) Repeats 4-6 until it no longer has time to scan the next channel
+ * before max_out_time expires
+ * 8) Returns to service channel
+ * 9) Sends NULL packet with PS=0 to tell AP that we're back
+ * 10) Stays on service channel until suspend_time expires
+ * 11) Repeats entire process 2-10 until list is complete
+ * 12) Sends SCAN_COMPLETE notification
+ *
+ * For fast, efficient scans, the scan command also has support for staying on
+ * a channel for just a short time, if doing active scanning and getting no
+ * responses to the transmitted probe request. This time is controlled by
+ * quiet_time, and the number of received packets below which a channel is
+ * considered "quiet" is controlled by quiet_plcp_threshold.
+ *
+ * For active scanning on channels that have regulatory restrictions against
+ * blindly transmitting, the scan can listen before transmitting, to make sure
+ * that there is already legitimate activity on the channel. If enough
+ * packets are cleanly received on the channel (controlled by good_CRC_th,
+ * typical value 1), the scan engine starts transmitting probe requests.
+ *
+ * Driver must use separate scan commands for 2.4 vs. 5 GHz bands.
+ *
+ * To avoid uCode errors, see timing restrictions described under
+ * struct iwl_scan_channel.
+ */
+
+struct iwl3945_scan_cmd {
+ __le16 len;
+ u8 reserved0;
+ u8 channel_count; /* # channels in channel list */
+ __le16 quiet_time; /* dwell only this # millisecs on quiet channel
+ * (only for active scan) */
+ __le16 quiet_plcp_th; /* quiet chnl is < this # pkts (typ. 1) */
+ __le16 good_CRC_th; /* passive -> active promotion threshold */
+ __le16 reserved1;
+ __le32 max_out_time; /* max usec to be away from associated (service)
+ * channel */
+ __le32 suspend_time; /* pause scan this long (in "extended beacon
+ * format") when returning to service channel:
+ * 3945; 31:24 # beacons, 19:0 additional usec,
+ * 4965; 31:22 # beacons, 21:0 additional usec.
+ */
+ __le32 flags; /* RXON_FLG_* */
+ __le32 filter_flags; /* RXON_FILTER_* */
+
+ /* For active scans (set to all-0s for passive scans).
+ * Does not include payload. Must specify Tx rate; no rate scaling. */
+ struct iwl3945_tx_cmd tx_cmd;
+
+ /* For directed active scans (set to all-0s otherwise) */
+ struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX_3945];
+
+ /*
+ * Probe request frame, followed by channel list.
+ *
+ * Size of probe request frame is specified by byte count in tx_cmd.
+ * Channel list follows immediately after probe request frame.
+ * Number of channels in list is specified by channel_count.
+ * Each channel in list is of type:
+ *
+ * struct iwl3945_scan_channel channels[0];
+ *
+ * NOTE: Only one band of channels can be scanned per pass. You
+ * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
+ * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
+ * before requesting another scan.
+ */
+ u8 data[0];
+} __packed;
+
+struct iwl_scan_cmd {
+ __le16 len;
+ u8 reserved0;
+ u8 channel_count; /* # channels in channel list */
+ __le16 quiet_time; /* dwell only this # millisecs on quiet channel
+ * (only for active scan) */
+ __le16 quiet_plcp_th; /* quiet chnl is < this # pkts (typ. 1) */
+ __le16 good_CRC_th; /* passive -> active promotion threshold */
+ __le16 rx_chain; /* RXON_RX_CHAIN_* */
+ __le32 max_out_time; /* max usec to be away from associated (service)
+ * channel */
+ __le32 suspend_time; /* pause scan this long (in "extended beacon
+ * format") when returning to service chnl:
+ * 3945; 31:24 # beacons, 19:0 additional usec,
+ * 4965; 31:22 # beacons, 21:0 additional usec.
+ */
+ __le32 flags; /* RXON_FLG_* */
+ __le32 filter_flags; /* RXON_FILTER_* */
+
+ /* For active scans (set to all-0s for passive scans).
+ * Does not include payload. Must specify Tx rate; no rate scaling. */
+ struct iwl_tx_cmd tx_cmd;
+
+ /* For directed active scans (set to all-0s otherwise) */
+ struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+
+ /*
+ * Probe request frame, followed by channel list.
+ *
+ * Size of probe request frame is specified by byte count in tx_cmd.
+ * Channel list follows immediately after probe request frame.
+ * Number of channels in list is specified by channel_count.
+ * Each channel in list is of type:
+ *
+ * struct iwl_scan_channel channels[0];
+ *
+ * NOTE: Only one band of channels can be scanned per pass. You
+ * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
+ * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
+ * before requesting another scan.
+ */
+ u8 data[0];
+} __packed;
+
+/* Can abort will notify by complete notification with abort status. */
+#define CAN_ABORT_STATUS cpu_to_le32(0x1)
+/* complete notification statuses */
+#define ABORT_STATUS 0x2
+
+/*
+ * REPLY_SCAN_CMD = 0x80 (response)
+ */
+struct iwl_scanreq_notification {
+ __le32 status; /* 1: okay, 2: cannot fulfill request */
+} __packed;
+
+/*
+ * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command)
+ */
+struct iwl_scanstart_notification {
+ __le32 tsf_low;
+ __le32 tsf_high;
+ __le32 beacon_timer;
+ u8 channel;
+ u8 band;
+ u8 reserved[2];
+ __le32 status;
+} __packed;
+
+#define SCAN_OWNER_STATUS 0x1;
+#define MEASURE_OWNER_STATUS 0x2;
+
+#define IWL_PROBE_STATUS_OK 0
+#define IWL_PROBE_STATUS_TX_FAILED BIT(0)
+/* error statuses combined with TX_FAILED */
+#define IWL_PROBE_STATUS_FAIL_TTL BIT(1)
+#define IWL_PROBE_STATUS_FAIL_BT BIT(2)
+
+#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */
+/*
+ * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command)
+ */
+struct iwl_scanresults_notification {
+ u8 channel;
+ u8 band;
+ u8 probe_status;
+ u8 num_probe_not_sent; /* not enough time to send */
+ __le32 tsf_low;
+ __le32 tsf_high;
+ __le32 statistics[NUMBER_OF_STATISTICS];
+} __packed;
+
+/*
+ * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command)
+ */
+struct iwl_scancomplete_notification {
+ u8 scanned_channels;
+ u8 status;
+ u8 last_channel;
+ __le32 tsf_low;
+ __le32 tsf_high;
+} __packed;
+
+
+/******************************************************************************
+ * (9)
+ * IBSS/AP Commands and Notifications:
+ *
+ *****************************************************************************/
+
+enum iwl_ibss_manager {
+ IWL_NOT_IBSS_MANAGER = 0,
+ IWL_IBSS_MANAGER = 1,
+};
+
+/*
+ * BEACON_NOTIFICATION = 0x90 (notification only, not a command)
+ */
+
+struct iwl3945_beacon_notif {
+ struct iwl3945_tx_resp beacon_notify_hdr;
+ __le32 low_tsf;
+ __le32 high_tsf;
+ __le32 ibss_mgr_status;
+} __packed;
+
+struct iwl4965_beacon_notif {
+ struct iwl4965_tx_resp beacon_notify_hdr;
+ __le32 low_tsf;
+ __le32 high_tsf;
+ __le32 ibss_mgr_status;
+} __packed;
+
+/*
+ * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
+ */
+
+struct iwl3945_tx_beacon_cmd {
+ struct iwl3945_tx_cmd tx;
+ __le16 tim_idx;
+ u8 tim_size;
+ u8 reserved1;
+ struct ieee80211_hdr frame[0]; /* beacon frame */
+} __packed;
+
+struct iwl_tx_beacon_cmd {
+ struct iwl_tx_cmd tx;
+ __le16 tim_idx;
+ u8 tim_size;
+ u8 reserved1;
+ struct ieee80211_hdr frame[0]; /* beacon frame */
+} __packed;
+
+/******************************************************************************
+ * (10)
+ * Statistics Commands and Notifications:
+ *
+ *****************************************************************************/
+
+#define IWL_TEMP_CONVERT 260
+
+#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
+#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
+#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
+
+/* Used for passing to driver number of successes and failures per rate */
+struct rate_histogram {
+ union {
+ __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
+ __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
+ __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
+ } success;
+ union {
+ __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
+ __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
+ __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
+ } failed;
+} __packed;
+
+/* statistics command response */
+
+struct iwl39_statistics_rx_phy {
+ __le32 ina_cnt;
+ __le32 fina_cnt;
+ __le32 plcp_err;
+ __le32 crc32_err;
+ __le32 overrun_err;
+ __le32 early_overrun_err;
+ __le32 crc32_good;
+ __le32 false_alarm_cnt;
+ __le32 fina_sync_err_cnt;
+ __le32 sfd_timeout;
+ __le32 fina_timeout;
+ __le32 unresponded_rts;
+ __le32 rxe_frame_limit_overrun;
+ __le32 sent_ack_cnt;
+ __le32 sent_cts_cnt;
+} __packed;
+
+struct iwl39_statistics_rx_non_phy {
+ __le32 bogus_cts; /* CTS received when not expecting CTS */
+ __le32 bogus_ack; /* ACK received when not expecting ACK */
+ __le32 non_bssid_frames; /* number of frames with BSSID that
+ * doesn't belong to the STA BSSID */
+ __le32 filtered_frames; /* count frames that were dumped in the
+ * filtering process */
+ __le32 non_channel_beacons; /* beacons with our bss id but not on
+ * our serving channel */
+} __packed;
+
+struct iwl39_statistics_rx {
+ struct iwl39_statistics_rx_phy ofdm;
+ struct iwl39_statistics_rx_phy cck;
+ struct iwl39_statistics_rx_non_phy general;
+} __packed;
+
+struct iwl39_statistics_tx {
+ __le32 preamble_cnt;
+ __le32 rx_detected_cnt;
+ __le32 bt_prio_defer_cnt;
+ __le32 bt_prio_kill_cnt;
+ __le32 few_bytes_cnt;
+ __le32 cts_timeout;
+ __le32 ack_timeout;
+ __le32 expected_ack_cnt;
+ __le32 actual_ack_cnt;
+} __packed;
+
+struct statistics_dbg {
+ __le32 burst_check;
+ __le32 burst_count;
+ __le32 wait_for_silence_timeout_cnt;
+ __le32 reserved[3];
+} __packed;
+
+struct iwl39_statistics_div {
+ __le32 tx_on_a;
+ __le32 tx_on_b;
+ __le32 exec_time;
+ __le32 probe_time;
+} __packed;
+
+struct iwl39_statistics_general {
+ __le32 temperature;
+ struct statistics_dbg dbg;
+ __le32 sleep_time;
+ __le32 slots_out;
+ __le32 slots_idle;
+ __le32 ttl_timestamp;
+ struct iwl39_statistics_div div;
+} __packed;
+
+struct statistics_rx_phy {
+ __le32 ina_cnt;
+ __le32 fina_cnt;
+ __le32 plcp_err;
+ __le32 crc32_err;
+ __le32 overrun_err;
+ __le32 early_overrun_err;
+ __le32 crc32_good;
+ __le32 false_alarm_cnt;
+ __le32 fina_sync_err_cnt;
+ __le32 sfd_timeout;
+ __le32 fina_timeout;
+ __le32 unresponded_rts;
+ __le32 rxe_frame_limit_overrun;
+ __le32 sent_ack_cnt;
+ __le32 sent_cts_cnt;
+ __le32 sent_ba_rsp_cnt;
+ __le32 dsp_self_kill;
+ __le32 mh_format_err;
+ __le32 re_acq_main_rssi_sum;
+ __le32 reserved3;
+} __packed;
+
+struct statistics_rx_ht_phy {
+ __le32 plcp_err;
+ __le32 overrun_err;
+ __le32 early_overrun_err;
+ __le32 crc32_good;
+ __le32 crc32_err;
+ __le32 mh_format_err;
+ __le32 agg_crc32_good;
+ __le32 agg_mpdu_cnt;
+ __le32 agg_cnt;
+ __le32 unsupport_mcs;
+} __packed;
+
+#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1)
+
+struct statistics_rx_non_phy {
+ __le32 bogus_cts; /* CTS received when not expecting CTS */
+ __le32 bogus_ack; /* ACK received when not expecting ACK */
+ __le32 non_bssid_frames; /* number of frames with BSSID that
+ * doesn't belong to the STA BSSID */
+ __le32 filtered_frames; /* count frames that were dumped in the
+ * filtering process */
+ __le32 non_channel_beacons; /* beacons with our bss id but not on
+ * our serving channel */
+ __le32 channel_beacons; /* beacons with our bss id and in our
+ * serving channel */
+ __le32 num_missed_bcon; /* number of missed beacons */
+ __le32 adc_rx_saturation_time; /* count in 0.8us units the time the
+ * ADC was in saturation */
+ __le32 ina_detection_search_time;/* total time (in 0.8us) searched
+ * for INA */
+ __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */
+ __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */
+ __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */
+ __le32 interference_data_flag; /* flag for interference data
+ * availability. 1 when data is
+ * available. */
+ __le32 channel_load; /* counts RX Enable time in uSec */
+ __le32 dsp_false_alarms; /* DSP false alarm (both OFDM
+ * and CCK) counter */
+ __le32 beacon_rssi_a;
+ __le32 beacon_rssi_b;
+ __le32 beacon_rssi_c;
+ __le32 beacon_energy_a;
+ __le32 beacon_energy_b;
+ __le32 beacon_energy_c;
+} __packed;
+
+struct statistics_rx {
+ struct statistics_rx_phy ofdm;
+ struct statistics_rx_phy cck;
+ struct statistics_rx_non_phy general;
+ struct statistics_rx_ht_phy ofdm_ht;
+} __packed;
+
+/**
+ * struct statistics_tx_power - current tx power
+ *
+ * @ant_a: current tx power on chain a in 1/2 dB step
+ * @ant_b: current tx power on chain b in 1/2 dB step
+ * @ant_c: current tx power on chain c in 1/2 dB step
+ */
+struct statistics_tx_power {
+ u8 ant_a;
+ u8 ant_b;
+ u8 ant_c;
+ u8 reserved;
+} __packed;
+
+struct statistics_tx_non_phy_agg {
+ __le32 ba_timeout;
+ __le32 ba_reschedule_frames;
+ __le32 scd_query_agg_frame_cnt;
+ __le32 scd_query_no_agg;
+ __le32 scd_query_agg;
+ __le32 scd_query_mismatch;
+ __le32 frame_not_ready;
+ __le32 underrun;
+ __le32 bt_prio_kill;
+ __le32 rx_ba_rsp_cnt;
+} __packed;
+
+struct statistics_tx {
+ __le32 preamble_cnt;
+ __le32 rx_detected_cnt;
+ __le32 bt_prio_defer_cnt;
+ __le32 bt_prio_kill_cnt;
+ __le32 few_bytes_cnt;
+ __le32 cts_timeout;
+ __le32 ack_timeout;
+ __le32 expected_ack_cnt;
+ __le32 actual_ack_cnt;
+ __le32 dump_msdu_cnt;
+ __le32 burst_abort_next_frame_mismatch_cnt;
+ __le32 burst_abort_missing_next_frame_cnt;
+ __le32 cts_timeout_collision;
+ __le32 ack_or_ba_timeout_collision;
+ struct statistics_tx_non_phy_agg agg;
+
+ __le32 reserved1;
+} __packed;
+
+
+struct statistics_div {
+ __le32 tx_on_a;
+ __le32 tx_on_b;
+ __le32 exec_time;
+ __le32 probe_time;
+ __le32 reserved1;
+ __le32 reserved2;
+} __packed;
+
+struct statistics_general_common {
+ __le32 temperature; /* radio temperature */
+ struct statistics_dbg dbg;
+ __le32 sleep_time;
+ __le32 slots_out;
+ __le32 slots_idle;
+ __le32 ttl_timestamp;
+ struct statistics_div div;
+ __le32 rx_enable_counter;
+ /*
+ * num_of_sos_states:
+ * count the number of times we have to re-tune
+ * in order to get out of bad PHY status
+ */
+ __le32 num_of_sos_states;
+} __packed;
+
+struct statistics_general {
+ struct statistics_general_common common;
+ __le32 reserved2;
+ __le32 reserved3;
+} __packed;
+
+#define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0)
+#define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1)
+#define UCODE_STATISTICS_NARROW_BAND_MSK (0x1 << 2)
+
+/*
+ * REPLY_STATISTICS_CMD = 0x9c,
+ * all devices identical.
+ *
+ * This command triggers an immediate response containing uCode statistics.
+ * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below.
+ *
+ * If the CLEAR_STATS configuration flag is set, uCode will clear its
+ * internal copy of the statistics (counters) after issuing the response.
+ * This flag does not affect STATISTICS_NOTIFICATIONs after beacons (see below).
+ *
+ * If the DISABLE_NOTIF configuration flag is set, uCode will not issue
+ * STATISTICS_NOTIFICATIONs after received beacons (see below). This flag
+ * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself.
+ */
+#define IWL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */
+#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */
+struct iwl_statistics_cmd {
+ __le32 configuration_flags; /* IWL_STATS_CONF_* */
+} __packed;
+
+/*
+ * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
+ *
+ * By default, uCode issues this notification after receiving a beacon
+ * while associated. To disable this behavior, set DISABLE_NOTIF flag in the
+ * REPLY_STATISTICS_CMD 0x9c, above.
+ *
+ * Statistics counters continue to increment beacon after beacon, but are
+ * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
+ * 0x9c with CLEAR_STATS bit set (see above).
+ *
+ * uCode also issues this notification during scans. uCode clears statistics
+ * appropriately so that each notification contains statistics for only the
+ * one channel that has just been scanned.
+ */
+#define STATISTICS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2)
+#define STATISTICS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8)
+
+struct iwl3945_notif_statistics {
+ __le32 flag;
+ struct iwl39_statistics_rx rx;
+ struct iwl39_statistics_tx tx;
+ struct iwl39_statistics_general general;
+} __packed;
+
+struct iwl_notif_statistics {
+ __le32 flag;
+ struct statistics_rx rx;
+ struct statistics_tx tx;
+ struct statistics_general general;
+} __packed;
+
+/*
+ * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command)
+ *
+ * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed
+ * in regardless of how many missed beacons, which mean when driver receive the
+ * notification, inside the command, it can find all the beacons information
+ * which include number of total missed beacons, number of consecutive missed
+ * beacons, number of beacons received and number of beacons expected to
+ * receive.
+ *
+ * If uCode detected consecutive_missed_beacons > 5, it will reset the radio
+ * in order to bring the radio/PHY back to working state; which has no relation
+ * to when driver will perform sensitivity calibration.
+ *
+ * Driver should set it own missed_beacon_threshold to decide when to perform
+ * sensitivity calibration based on number of consecutive missed beacons in
+ * order to improve overall performance, especially in noisy environment.
+ *
+ */
+
+#define IWL_MISSED_BEACON_THRESHOLD_MIN (1)
+#define IWL_MISSED_BEACON_THRESHOLD_DEF (5)
+#define IWL_MISSED_BEACON_THRESHOLD_MAX IWL_MISSED_BEACON_THRESHOLD_DEF
+
+struct iwl_missed_beacon_notif {
+ __le32 consecutive_missed_beacons;
+ __le32 total_missed_becons;
+ __le32 num_expected_beacons;
+ __le32 num_recvd_beacons;
+} __packed;
+
+
+/******************************************************************************
+ * (11)
+ * Rx Calibration Commands:
+ *
+ * With the uCode used for open source drivers, most Tx calibration (except
+ * for Tx Power) and most Rx calibration is done by uCode during the
+ * "initialize" phase of uCode boot. Driver must calibrate only:
+ *
+ * 1) Tx power (depends on temperature), described elsewhere
+ * 2) Receiver gain balance (optimize MIMO, and detect disconnected antennas)
+ * 3) Receiver sensitivity (to optimize signal detection)
+ *
+ *****************************************************************************/
+
+/**
+ * SENSITIVITY_CMD = 0xa8 (command, has simple generic response)
+ *
+ * This command sets up the Rx signal detector for a sensitivity level that
+ * is high enough to lock onto all signals within the associated network,
+ * but low enough to ignore signals that are below a certain threshold, so as
+ * not to have too many "false alarms". False alarms are signals that the
+ * Rx DSP tries to lock onto, but then discards after determining that they
+ * are noise.
+ *
+ * The optimum number of false alarms is between 5 and 50 per 200 TUs
+ * (200 * 1024 uSecs, i.e. 204.8 milliseconds) of actual Rx time (i.e.
+ * time listening, not transmitting). Driver must adjust sensitivity so that
+ * the ratio of actual false alarms to actual Rx time falls within this range.
+ *
+ * While associated, uCode delivers STATISTICS_NOTIFICATIONs after each
+ * received beacon. These provide information to the driver to analyze the
+ * sensitivity. Don't analyze statistics that come in from scanning, or any
+ * other non-associated-network source. Pertinent statistics include:
+ *
+ * From "general" statistics (struct statistics_rx_non_phy):
+ *
+ * (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level)
+ * Measure of energy of desired signal. Used for establishing a level
+ * below which the device does not detect signals.
+ *
+ * (beacon_silence_rssi_[abc] & 0x0FF00) >> 8 (unsigned, units in dB)
+ * Measure of background noise in silent period after beacon.
+ *
+ * channel_load
+ * uSecs of actual Rx time during beacon period (varies according to
+ * how much time was spent transmitting).
+ *
+ * From "cck" and "ofdm" statistics (struct statistics_rx_phy), separately:
+ *
+ * false_alarm_cnt
+ * Signal locks abandoned early (before phy-level header).
+ *
+ * plcp_err
+ * Signal locks abandoned late (during phy-level header).
+ *
+ * NOTE: Both false_alarm_cnt and plcp_err increment monotonically from
+ * beacon to beacon, i.e. each value is an accumulation of all errors
+ * before and including the latest beacon. Values will wrap around to 0
+ * after counting up to 2^32 - 1. Driver must differentiate vs.
+ * previous beacon's values to determine # false alarms in the current
+ * beacon period.
+ *
+ * Total number of false alarms = false_alarms + plcp_errs
+ *
+ * For OFDM, adjust the following table entries in struct iwl_sensitivity_cmd
+ * (notice that the start points for OFDM are at or close to settings for
+ * maximum sensitivity):
+ *
+ * START / MIN / MAX
+ * HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX 90 / 85 / 120
+ * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX 170 / 170 / 210
+ * HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX 105 / 105 / 140
+ * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX 220 / 220 / 270
+ *
+ * If actual rate of OFDM false alarms (+ plcp_errors) is too high
+ * (greater than 50 for each 204.8 msecs listening), reduce sensitivity
+ * by *adding* 1 to all 4 of the table entries above, up to the max for
+ * each entry. Conversely, if false alarm rate is too low (less than 5
+ * for each 204.8 msecs listening), *subtract* 1 from each entry to
+ * increase sensitivity.
+ *
+ * For CCK sensitivity, keep track of the following:
+ *
+ * 1). 20-beacon history of maximum background noise, indicated by
+ * (beacon_silence_rssi_[abc] & 0x0FF00), units in dB, across the
+ * 3 receivers. For any given beacon, the "silence reference" is
+ * the maximum of last 60 samples (20 beacons * 3 receivers).
+ *
+ * 2). 10-beacon history of strongest signal level, as indicated
+ * by (beacon_energy_[abc] & 0x0FF00) >> 8, across the 3 receivers,
+ * i.e. the strength of the signal through the best receiver at the
+ * moment. These measurements are "upside down", with lower values
+ * for stronger signals, so max energy will be *minimum* value.
+ *
+ * Then for any given beacon, the driver must determine the *weakest*
+ * of the strongest signals; this is the minimum level that needs to be
+ * successfully detected, when using the best receiver at the moment.
+ * "Max cck energy" is the maximum (higher value means lower energy!)
+ * of the last 10 minima. Once this is determined, driver must add
+ * a little margin by adding "6" to it.
+ *
+ * 3). Number of consecutive beacon periods with too few false alarms.
+ * Reset this to 0 at the first beacon period that falls within the
+ * "good" range (5 to 50 false alarms per 204.8 milliseconds rx).
+ *
+ * Then, adjust the following CCK table entries in struct iwl_sensitivity_cmd
+ * (notice that the start points for CCK are at maximum sensitivity):
+ *
+ * START / MIN / MAX
+ * HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX 125 / 125 / 200
+ * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX 200 / 200 / 400
+ * HD_MIN_ENERGY_CCK_DET_INDEX 100 / 0 / 100
+ *
+ * If actual rate of CCK false alarms (+ plcp_errors) is too high
+ * (greater than 50 for each 204.8 msecs listening), method for reducing
+ * sensitivity is:
+ *
+ * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
+ * up to max 400.
+ *
+ * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is < 160,
+ * sensitivity has been reduced a significant amount; bring it up to
+ * a moderate 161. Otherwise, *add* 3, up to max 200.
+ *
+ * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is > 160,
+ * sensitivity has been reduced only a moderate or small amount;
+ * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_INDEX,
+ * down to min 0. Otherwise (if gain has been significantly reduced),
+ * don't change the HD_MIN_ENERGY_CCK_DET_INDEX value.
+ *
+ * b) Save a snapshot of the "silence reference".
+ *
+ * If actual rate of CCK false alarms (+ plcp_errors) is too low
+ * (less than 5 for each 204.8 msecs listening), method for increasing
+ * sensitivity is used only if:
+ *
+ * 1a) Previous beacon did not have too many false alarms
+ * 1b) AND difference between previous "silence reference" and current
+ * "silence reference" (prev - current) is 2 or more,
+ * OR 2) 100 or more consecutive beacon periods have had rate of
+ * less than 5 false alarms per 204.8 milliseconds rx time.
+ *
+ * Method for increasing sensitivity:
+ *
+ * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX,
+ * down to min 125.
+ *
+ * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
+ * down to min 200.
+ *
+ * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_INDEX, up to max 100.
+ *
+ * If actual rate of CCK false alarms (+ plcp_errors) is within good range
+ * (between 5 and 50 for each 204.8 msecs listening):
+ *
+ * 1) Save a snapshot of the silence reference.
+ *
+ * 2) If previous beacon had too many CCK false alarms (+ plcp_errors),
+ * give some extra margin to energy threshold by *subtracting* 8
+ * from value in HD_MIN_ENERGY_CCK_DET_INDEX.
+ *
+ * For all cases (too few, too many, good range), make sure that the CCK
+ * detection threshold (energy) is below the energy level for robust
+ * detection over the past 10 beacon periods, the "Max cck energy".
+ * Lower values mean higher energy; this means making sure that the value
+ * in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy".
+ *
+ */
+
+/*
+ * Table entries in SENSITIVITY_CMD (struct iwl_sensitivity_cmd)
+ */
+#define HD_TABLE_SIZE (11) /* number of entries */
+#define HD_MIN_ENERGY_CCK_DET_INDEX (0) /* table indexes */
+#define HD_MIN_ENERGY_OFDM_DET_INDEX (1)
+#define HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX (2)
+#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX (3)
+#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX (4)
+#define HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX (5)
+#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX (6)
+#define HD_BARKER_CORR_TH_ADD_MIN_INDEX (7)
+#define HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX (8)
+#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9)
+#define HD_OFDM_ENERGY_TH_IN_INDEX (10)
+
+/* Control field in struct iwl_sensitivity_cmd */
+#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE cpu_to_le16(0)
+#define SENSITIVITY_CMD_CONTROL_WORK_TABLE cpu_to_le16(1)
+
+/**
+ * struct iwl_sensitivity_cmd
+ * @control: (1) updates working table, (0) updates default table
+ * @table: energy threshold values, use HD_* as index into table
+ *
+ * Always use "1" in "control" to update uCode's working table and DSP.
+ */
+struct iwl_sensitivity_cmd {
+ __le16 control; /* always use "1" */
+ __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */
+} __packed;
+
+
+/**
+ * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response)
+ *
+ * This command sets the relative gains of 4965 device's 3 radio receiver chains.
+ *
+ * After the first association, driver should accumulate signal and noise
+ * statistics from the STATISTICS_NOTIFICATIONs that follow the first 20
+ * beacons from the associated network (don't collect statistics that come
+ * in from scanning, or any other non-network source).
+ *
+ * DISCONNECTED ANTENNA:
+ *
+ * Driver should determine which antennas are actually connected, by comparing
+ * average beacon signal levels for the 3 Rx chains. Accumulate (add) the
+ * following values over 20 beacons, one accumulator for each of the chains
+ * a/b/c, from struct statistics_rx_non_phy:
+ *
+ * beacon_rssi_[abc] & 0x0FF (unsigned, units in dB)
+ *
+ * Find the strongest signal from among a/b/c. Compare the other two to the
+ * strongest. If any signal is more than 15 dB (times 20, unless you
+ * divide the accumulated values by 20) below the strongest, the driver
+ * considers that antenna to be disconnected, and should not try to use that
+ * antenna/chain for Rx or Tx. If both A and B seem to be disconnected,
+ * driver should declare the stronger one as connected, and attempt to use it
+ * (A and B are the only 2 Tx chains!).
+ *
+ *
+ * RX BALANCE:
+ *
+ * Driver should balance the 3 receivers (but just the ones that are connected
+ * to antennas, see above) for gain, by comparing the average signal levels
+ * detected during the silence after each beacon (background noise).
+ * Accumulate (add) the following values over 20 beacons, one accumulator for
+ * each of the chains a/b/c, from struct statistics_rx_non_phy:
+ *
+ * beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB)
+ *
+ * Find the weakest background noise level from among a/b/c. This Rx chain
+ * will be the reference, with 0 gain adjustment. Attenuate other channels by
+ * finding noise difference:
+ *
+ * (accum_noise[i] - accum_noise[reference]) / 30
+ *
+ * The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB.
+ * For use in diff_gain_[abc] fields of struct iwl_calibration_cmd, the
+ * driver should limit the difference results to a range of 0-3 (0-4.5 dB),
+ * and set bit 2 to indicate "reduce gain". The value for the reference
+ * (weakest) chain should be "0".
+ *
+ * diff_gain_[abc] bit fields:
+ * 2: (1) reduce gain, (0) increase gain
+ * 1-0: amount of gain, units of 1.5 dB
+ */
+
+/* Phy calibration command for series */
+/* The default calibrate table size if not specified by firmware */
+#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
+enum {
+ IWL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7,
+ IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19,
+};
+
+#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE (253)
+
+struct iwl_calib_hdr {
+ u8 op_code;
+ u8 first_group;
+ u8 groups_num;
+ u8 data_valid;
+} __packed;
+
+/* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
+struct iwl_calib_diff_gain_cmd {
+ struct iwl_calib_hdr hdr;
+ s8 diff_gain_a; /* see above */
+ s8 diff_gain_b;
+ s8 diff_gain_c;
+ u8 reserved1;
+} __packed;
+
+/******************************************************************************
+ * (12)
+ * Miscellaneous Commands:
+ *
+ *****************************************************************************/
+
+/*
+ * LEDs Command & Response
+ * REPLY_LEDS_CMD = 0x48 (command, has simple generic response)
+ *
+ * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field),
+ * this command turns it on or off, or sets up a periodic blinking cycle.
+ */
+struct iwl_led_cmd {
+ __le32 interval; /* "interval" in uSec */
+ u8 id; /* 1: Activity, 2: Link, 3: Tech */
+ u8 off; /* # intervals off while blinking;
+ * "0", with >0 "on" value, turns LED on */
+ u8 on; /* # intervals on while blinking;
+ * "0", regardless of "off", turns LED off */
+ u8 reserved;
+} __packed;
+
+
+/******************************************************************************
+ * (13)
+ * Union of all expected notifications/responses:
+ *
+ *****************************************************************************/
+
+struct iwl_rx_packet {
+ /*
+ * The first 4 bytes of the RX frame header contain both the RX frame
+ * size and some flags.
+ * Bit fields:
+ * 31: flag flush RB request
+ * 30: flag ignore TC (terminal counter) request
+ * 29: flag fast IRQ request
+ * 28-14: Reserved
+ * 13-00: RX frame size
+ */
+ __le32 len_n_flags;
+ struct iwl_cmd_header hdr;
+ union {
+ struct iwl3945_rx_frame rx_frame;
+ struct iwl3945_tx_resp tx_resp;
+ struct iwl3945_beacon_notif beacon_status;
+
+ struct iwl_alive_resp alive_frame;
+ struct iwl_spectrum_notification spectrum_notif;
+ struct iwl_csa_notification csa_notif;
+ struct iwl_error_resp err_resp;
+ struct iwl_card_state_notif card_state_notif;
+ struct iwl_add_sta_resp add_sta;
+ struct iwl_rem_sta_resp rem_sta;
+ struct iwl_sleep_notification sleep_notif;
+ struct iwl_spectrum_resp spectrum;
+ struct iwl_notif_statistics stats;
+ struct iwl_compressed_ba_resp compressed_ba;
+ struct iwl_missed_beacon_notif missed_beacon;
+ __le32 status;
+ u8 raw[0];
+ } u;
+} __packed;
+
+#endif /* __iwl_legacy_commands_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
new file mode 100644
index 000000000000..d418b647be80
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-core.c
@@ -0,0 +1,2674 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include "iwl-eeprom.h"
+#include "iwl-dev.h"
+#include "iwl-debug.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-power.h"
+#include "iwl-sta.h"
+#include "iwl-helpers.h"
+
+
+MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
+MODULE_VERSION(IWLWIFI_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+
+/*
+ * set bt_coex_active to true, uCode will do kill/defer
+ * every time the priority line is asserted (BT is sending signals on the
+ * priority line in the PCIx).
+ * set bt_coex_active to false, uCode will ignore the BT activity and
+ * perform the normal operation
+ *
+ * User might experience transmit issue on some platform due to WiFi/BT
+ * co-exist problem. The possible behaviors are:
+ * Able to scan and finding all the available AP
+ * Not able to associate with any AP
+ * On those platforms, WiFi communication can be restored by set
+ * "bt_coex_active" module parameter to "false"
+ *
+ * default: bt_coex_active = true (BT_COEX_ENABLE)
+ */
+static bool bt_coex_active = true;
+module_param(bt_coex_active, bool, S_IRUGO);
+MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
+
+u32 iwlegacy_debug_level;
+EXPORT_SYMBOL(iwlegacy_debug_level);
+
+const u8 iwlegacy_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+EXPORT_SYMBOL(iwlegacy_bcast_addr);
+
+
+/* This function both allocates and initializes hw and priv. */
+struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg)
+{
+ struct iwl_priv *priv;
+ /* mac80211 allocates memory for this device instance, including
+ * space for this driver's private structure */
+ struct ieee80211_hw *hw;
+
+ hw = ieee80211_alloc_hw(sizeof(struct iwl_priv),
+ cfg->ops->ieee80211_ops);
+ if (hw == NULL) {
+ pr_err("%s: Can not allocate network device\n",
+ cfg->name);
+ goto out;
+ }
+
+ priv = hw->priv;
+ priv->hw = hw;
+
+out:
+ return hw;
+}
+EXPORT_SYMBOL(iwl_legacy_alloc_all);
+
+#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
+#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
+static void iwl_legacy_init_ht_hw_capab(const struct iwl_priv *priv,
+ struct ieee80211_sta_ht_cap *ht_info,
+ enum ieee80211_band band)
+{
+ u16 max_bit_rate = 0;
+ u8 rx_chains_num = priv->hw_params.rx_chains_num;
+ u8 tx_chains_num = priv->hw_params.tx_chains_num;
+
+ ht_info->cap = 0;
+ memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
+
+ ht_info->ht_supported = true;
+
+ ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
+ max_bit_rate = MAX_BIT_RATE_20_MHZ;
+ if (priv->hw_params.ht40_channel & BIT(band)) {
+ ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
+ ht_info->mcs.rx_mask[4] = 0x01;
+ max_bit_rate = MAX_BIT_RATE_40_MHZ;
+ }
+
+ if (priv->cfg->mod_params->amsdu_size_8K)
+ ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+
+ ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
+ ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
+
+ ht_info->mcs.rx_mask[0] = 0xFF;
+ if (rx_chains_num >= 2)
+ ht_info->mcs.rx_mask[1] = 0xFF;
+ if (rx_chains_num >= 3)
+ ht_info->mcs.rx_mask[2] = 0xFF;
+
+ /* Highest supported Rx data rate */
+ max_bit_rate *= rx_chains_num;
+ WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
+ ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
+
+ /* Tx MCS capabilities */
+ ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ if (tx_chains_num != rx_chains_num) {
+ ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+ ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
+ IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+ }
+}
+
+/**
+ * iwl_legacy_init_geos - Initialize mac80211's geo/channel info based from eeprom
+ */
+int iwl_legacy_init_geos(struct iwl_priv *priv)
+{
+ struct iwl_channel_info *ch;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *channels;
+ struct ieee80211_channel *geo_ch;
+ struct ieee80211_rate *rates;
+ int i = 0;
+
+ if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
+ priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
+ IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
+ set_bit(STATUS_GEO_CONFIGURED, &priv->status);
+ return 0;
+ }
+
+ channels = kzalloc(sizeof(struct ieee80211_channel) *
+ priv->channel_count, GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY),
+ GFP_KERNEL);
+ if (!rates) {
+ kfree(channels);
+ return -ENOMEM;
+ }
+
+ /* 5.2GHz channels start after the 2.4GHz channels */
+ sband = &priv->bands[IEEE80211_BAND_5GHZ];
+ sband->channels = &channels[ARRAY_SIZE(iwlegacy_eeprom_band_1)];
+ /* just OFDM */
+ sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
+ sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
+
+ if (priv->cfg->sku & IWL_SKU_N)
+ iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
+ IEEE80211_BAND_5GHZ);
+
+ sband = &priv->bands[IEEE80211_BAND_2GHZ];
+ sband->channels = channels;
+ /* OFDM & CCK */
+ sband->bitrates = rates;
+ sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
+
+ if (priv->cfg->sku & IWL_SKU_N)
+ iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
+ IEEE80211_BAND_2GHZ);
+
+ priv->ieee_channels = channels;
+ priv->ieee_rates = rates;
+
+ for (i = 0; i < priv->channel_count; i++) {
+ ch = &priv->channel_info[i];
+
+ if (!iwl_legacy_is_channel_valid(ch))
+ continue;
+
+ if (iwl_legacy_is_channel_a_band(ch))
+ sband = &priv->bands[IEEE80211_BAND_5GHZ];
+ else
+ sband = &priv->bands[IEEE80211_BAND_2GHZ];
+
+ geo_ch = &sband->channels[sband->n_channels++];
+
+ geo_ch->center_freq =
+ ieee80211_channel_to_frequency(ch->channel, ch->band);
+ geo_ch->max_power = ch->max_power_avg;
+ geo_ch->max_antenna_gain = 0xff;
+ geo_ch->hw_value = ch->channel;
+
+ if (iwl_legacy_is_channel_valid(ch)) {
+ if (!(ch->flags & EEPROM_CHANNEL_IBSS))
+ geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
+
+ if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
+ geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+
+ if (ch->flags & EEPROM_CHANNEL_RADAR)
+ geo_ch->flags |= IEEE80211_CHAN_RADAR;
+
+ geo_ch->flags |= ch->ht40_extension_channel;
+
+ if (ch->max_power_avg > priv->tx_power_device_lmt)
+ priv->tx_power_device_lmt = ch->max_power_avg;
+ } else {
+ geo_ch->flags |= IEEE80211_CHAN_DISABLED;
+ }
+
+ IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
+ ch->channel, geo_ch->center_freq,
+ iwl_legacy_is_channel_a_band(ch) ? "5.2" : "2.4",
+ geo_ch->flags & IEEE80211_CHAN_DISABLED ?
+ "restricted" : "valid",
+ geo_ch->flags);
+ }
+
+ if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
+ priv->cfg->sku & IWL_SKU_A) {
+ IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
+ "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
+ priv->pci_dev->device,
+ priv->pci_dev->subsystem_device);
+ priv->cfg->sku &= ~IWL_SKU_A;
+ }
+
+ IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
+ priv->bands[IEEE80211_BAND_2GHZ].n_channels,
+ priv->bands[IEEE80211_BAND_5GHZ].n_channels);
+
+ set_bit(STATUS_GEO_CONFIGURED, &priv->status);
+
+ return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_init_geos);
+
+/*
+ * iwl_legacy_free_geos - undo allocations in iwl_legacy_init_geos
+ */
+void iwl_legacy_free_geos(struct iwl_priv *priv)
+{
+ kfree(priv->ieee_channels);
+ kfree(priv->ieee_rates);
+ clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
+}
+EXPORT_SYMBOL(iwl_legacy_free_geos);
+
+static bool iwl_legacy_is_channel_extension(struct iwl_priv *priv,
+ enum ieee80211_band band,
+ u16 channel, u8 extension_chan_offset)
+{
+ const struct iwl_channel_info *ch_info;
+
+ ch_info = iwl_legacy_get_channel_info(priv, band, channel);
+ if (!iwl_legacy_is_channel_valid(ch_info))
+ return false;
+
+ if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
+ return !(ch_info->ht40_extension_channel &
+ IEEE80211_CHAN_NO_HT40PLUS);
+ else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
+ return !(ch_info->ht40_extension_channel &
+ IEEE80211_CHAN_NO_HT40MINUS);
+
+ return false;
+}
+
+bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_sta_ht_cap *ht_cap)
+{
+ if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
+ return false;
+
+ /*
+ * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
+ * the bit will not set if it is pure 40MHz case
+ */
+ if (ht_cap && !ht_cap->ht_supported)
+ return false;
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+ if (priv->disable_ht40)
+ return false;
+#endif
+
+ return iwl_legacy_is_channel_extension(priv, priv->band,
+ le16_to_cpu(ctx->staging.channel),
+ ctx->ht.extension_chan_offset);
+}
+EXPORT_SYMBOL(iwl_legacy_is_ht40_tx_allowed);
+
+static u16 iwl_legacy_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
+{
+ u16 new_val;
+ u16 beacon_factor;
+
+ /*
+ * If mac80211 hasn't given us a beacon interval, program
+ * the default into the device.
+ */
+ if (!beacon_val)
+ return DEFAULT_BEACON_INTERVAL;
+
+ /*
+ * If the beacon interval we obtained from the peer
+ * is too large, we'll have to wake up more often
+ * (and in IBSS case, we'll beacon too much)
+ *
+ * For example, if max_beacon_val is 4096, and the
+ * requested beacon interval is 7000, we'll have to
+ * use 3500 to be able to wake up on the beacons.
+ *
+ * This could badly influence beacon detection stats.
+ */
+
+ beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
+ new_val = beacon_val / beacon_factor;
+
+ if (!new_val)
+ new_val = max_beacon_val;
+
+ return new_val;
+}
+
+int
+iwl_legacy_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+ u64 tsf;
+ s32 interval_tm, rem;
+ struct ieee80211_conf *conf = NULL;
+ u16 beacon_int;
+ struct ieee80211_vif *vif = ctx->vif;
+
+ conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
+
+ lockdep_assert_held(&priv->mutex);
+
+ memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
+
+ ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
+ ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
+
+ beacon_int = vif ? vif->bss_conf.beacon_int : 0;
+
+ /*
+ * TODO: For IBSS we need to get atim_window from mac80211,
+ * for now just always use 0
+ */
+ ctx->timing.atim_window = 0;
+
+ beacon_int = iwl_legacy_adjust_beacon_interval(beacon_int,
+ priv->hw_params.max_beacon_itrvl * TIME_UNIT);
+ ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
+
+ tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
+ interval_tm = beacon_int * TIME_UNIT;
+ rem = do_div(tsf, interval_tm);
+ ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
+
+ ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
+
+ IWL_DEBUG_ASSOC(priv,
+ "beacon interval %d beacon timer %d beacon tim %d\n",
+ le16_to_cpu(ctx->timing.beacon_interval),
+ le32_to_cpu(ctx->timing.beacon_init_val),
+ le16_to_cpu(ctx->timing.atim_window));
+
+ return iwl_legacy_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
+ sizeof(ctx->timing), &ctx->timing);
+}
+EXPORT_SYMBOL(iwl_legacy_send_rxon_timing);
+
+void
+iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ int hw_decrypt)
+{
+ struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
+
+ if (hw_decrypt)
+ rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
+ else
+ rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
+
+}
+EXPORT_SYMBOL(iwl_legacy_set_rxon_hwcrypto);
+
+/* validate RXON structure is valid */
+int
+iwl_legacy_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+ struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
+ bool error = false;
+
+ if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
+ if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
+ IWL_WARN(priv, "check 2.4G: wrong narrow\n");
+ error = true;
+ }
+ if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
+ IWL_WARN(priv, "check 2.4G: wrong radar\n");
+ error = true;
+ }
+ } else {
+ if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
+ IWL_WARN(priv, "check 5.2G: not short slot!\n");
+ error = true;
+ }
+ if (rxon->flags & RXON_FLG_CCK_MSK) {
+ IWL_WARN(priv, "check 5.2G: CCK!\n");
+ error = true;
+ }
+ }
+ if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
+ IWL_WARN(priv, "mac/bssid mcast!\n");
+ error = true;
+ }
+
+ /* make sure basic rates 6Mbps and 1Mbps are supported */
+ if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
+ (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
+ IWL_WARN(priv, "neither 1 nor 6 are basic\n");
+ error = true;
+ }
+
+ if (le16_to_cpu(rxon->assoc_id) > 2007) {
+ IWL_WARN(priv, "aid > 2007\n");
+ error = true;
+ }
+
+ if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
+ == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
+ IWL_WARN(priv, "CCK and short slot\n");
+ error = true;
+ }
+
+ if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
+ == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
+ IWL_WARN(priv, "CCK and auto detect");
+ error = true;
+ }
+
+ if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
+ RXON_FLG_TGG_PROTECT_MSK)) ==
+ RXON_FLG_TGG_PROTECT_MSK) {
+ IWL_WARN(priv, "TGg but no auto-detect\n");
+ error = true;
+ }
+
+ if (error)
+ IWL_WARN(priv, "Tuning to channel %d\n",
+ le16_to_cpu(rxon->channel));
+
+ if (error) {
+ IWL_ERR(priv, "Invalid RXON\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_check_rxon_cmd);
+
+/**
+ * iwl_legacy_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
+ * @priv: staging_rxon is compared to active_rxon
+ *
+ * If the RXON structure is changing enough to require a new tune,
+ * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
+ * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
+ */
+int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+ const struct iwl_legacy_rxon_cmd *staging = &ctx->staging;
+ const struct iwl_legacy_rxon_cmd *active = &ctx->active;
+
+#define CHK(cond) \
+ if ((cond)) { \
+ IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
+ return 1; \
+ }
+
+#define CHK_NEQ(c1, c2) \
+ if ((c1) != (c2)) { \
+ IWL_DEBUG_INFO(priv, "need full RXON - " \
+ #c1 " != " #c2 " - %d != %d\n", \
+ (c1), (c2)); \
+ return 1; \
+ }
+
+ /* These items are only settable from the full RXON command */
+ CHK(!iwl_legacy_is_associated_ctx(ctx));
+ CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
+ CHK(compare_ether_addr(staging->node_addr, active->node_addr));
+ CHK(compare_ether_addr(staging->wlap_bssid_addr,
+ active->wlap_bssid_addr));
+ CHK_NEQ(staging->dev_type, active->dev_type);
+ CHK_NEQ(staging->channel, active->channel);
+ CHK_NEQ(staging->air_propagation, active->air_propagation);
+ CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
+ active->ofdm_ht_single_stream_basic_rates);
+ CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
+ active->ofdm_ht_dual_stream_basic_rates);
+ CHK_NEQ(staging->assoc_id, active->assoc_id);
+
+ /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
+ * be updated with the RXON_ASSOC command -- however only some
+ * flag transitions are allowed using RXON_ASSOC */
+
+ /* Check if we are not switching bands */
+ CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
+ active->flags & RXON_FLG_BAND_24G_MSK);
+
+ /* Check if we are switching association toggle */
+ CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
+ active->filter_flags & RXON_FILTER_ASSOC_MSK);
+
+#undef CHK
+#undef CHK_NEQ
+
+ return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_full_rxon_required);
+
+u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+ /*
+ * Assign the lowest rate -- should really get this from
+ * the beacon skb from mac80211.
+ */
+ if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
+ return IWL_RATE_1M_PLCP;
+ else
+ return IWL_RATE_6M_PLCP;
+}
+EXPORT_SYMBOL(iwl_legacy_get_lowest_plcp);
+
+static void _iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
+ struct iwl_ht_config *ht_conf,
+ struct iwl_rxon_context *ctx)
+{
+ struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
+
+ if (!ctx->ht.enabled) {
+ rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
+ RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
+ RXON_FLG_HT40_PROT_MSK |
+ RXON_FLG_HT_PROT_MSK);
+ return;
+ }
+
+ rxon->flags |= cpu_to_le32(ctx->ht.protection <<
+ RXON_FLG_HT_OPERATING_MODE_POS);
+
+ /* Set up channel bandwidth:
+ * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
+ /* clear the HT channel mode before set the mode */
+ rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
+ RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
+ if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, NULL)) {
+ /* pure ht40 */
+ if (ctx->ht.protection ==
+ IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
+ rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
+ /* Note: control channel is opposite of extension channel */
+ switch (ctx->ht.extension_chan_offset) {
+ case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+ rxon->flags &=
+ ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+ rxon->flags |=
+ RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+ break;
+ }
+ } else {
+ /* Note: control channel is opposite of extension channel */
+ switch (ctx->ht.extension_chan_offset) {
+ case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+ rxon->flags &=
+ ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
+ rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+ rxon->flags |=
+ RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+ rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_NONE:
+ default:
+ /* channel location only valid if in Mixed mode */
+ IWL_ERR(priv,
+ "invalid extension channel offset\n");
+ break;
+ }
+ }
+ } else {
+ rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
+ }
+
+ if (priv->cfg->ops->hcmd->set_rxon_chain)
+ priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+
+ IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
+ "extension channel offset 0x%x\n",
+ le32_to_cpu(rxon->flags), ctx->ht.protection,
+ ctx->ht.extension_chan_offset);
+}
+
+void iwl_legacy_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
+{
+ struct iwl_rxon_context *ctx;
+
+ for_each_context(priv, ctx)
+ _iwl_legacy_set_rxon_ht(priv, ht_conf, ctx);
+}
+EXPORT_SYMBOL(iwl_legacy_set_rxon_ht);
+
+/* Return valid, unused, channel for a passive scan to reset the RF */
+u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
+ enum ieee80211_band band)
+{
+ const struct iwl_channel_info *ch_info;
+ int i;
+ u8 channel = 0;
+ u8 min, max;
+ struct iwl_rxon_context *ctx;
+
+ if (band == IEEE80211_BAND_5GHZ) {
+ min = 14;
+ max = priv->channel_count;
+ } else {
+ min = 0;
+ max = 14;
+ }
+
+ for (i = min; i < max; i++) {
+ bool busy = false;
+
+ for_each_context(priv, ctx) {
+ busy = priv->channel_info[i].channel ==
+ le16_to_cpu(ctx->staging.channel);
+ if (busy)
+ break;
+ }
+
+ if (busy)
+ continue;
+
+ channel = priv->channel_info[i].channel;
+ ch_info = iwl_legacy_get_channel_info(priv, band, channel);
+ if (iwl_legacy_is_channel_valid(ch_info))
+ break;
+ }
+
+ return channel;
+}
+EXPORT_SYMBOL(iwl_legacy_get_single_channel_number);
+
+/**
+ * iwl_legacy_set_rxon_channel - Set the band and channel values in staging RXON
+ * @ch: requested channel as a pointer to struct ieee80211_channel
+
+ * NOTE: Does not commit to the hardware; it sets appropriate bit fields
+ * in the staging RXON flag structure based on the ch->band
+ */
+int
+iwl_legacy_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
+ struct iwl_rxon_context *ctx)
+{
+ enum ieee80211_band band = ch->band;
+ u16 channel = ch->hw_value;
+
+ if ((le16_to_cpu(ctx->staging.channel) == channel) &&
+ (priv->band == band))
+ return 0;
+
+ ctx->staging.channel = cpu_to_le16(channel);
+ if (band == IEEE80211_BAND_5GHZ)
+ ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
+ else
+ ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
+
+ priv->band = band;
+
+ IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
+
+ return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_set_rxon_channel);
+
+void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ enum ieee80211_band band,
+ struct ieee80211_vif *vif)
+{
+ if (band == IEEE80211_BAND_5GHZ) {
+ ctx->staging.flags &=
+ ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
+ | RXON_FLG_CCK_MSK);
+ ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ } else {
+ /* Copied from iwl_post_associate() */
+ if (vif && vif->bss_conf.use_short_slot)
+ ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+
+ ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
+ ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
+ ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
+ }
+}
+EXPORT_SYMBOL(iwl_legacy_set_flags_for_band);
+
+/*
+ * initialize rxon structure with default values from eeprom
+ */
+void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+ const struct iwl_channel_info *ch_info;
+
+ memset(&ctx->staging, 0, sizeof(ctx->staging));
+
+ if (!ctx->vif) {
+ ctx->staging.dev_type = ctx->unused_devtype;
+ } else
+ switch (ctx->vif->type) {
+
+ case NL80211_IFTYPE_STATION:
+ ctx->staging.dev_type = ctx->station_devtype;
+ ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
+ break;
+
+ case NL80211_IFTYPE_ADHOC:
+ ctx->staging.dev_type = ctx->ibss_devtype;
+ ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
+ ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
+ RXON_FILTER_ACCEPT_GRP_MSK;
+ break;
+
+ default:
+ IWL_ERR(priv, "Unsupported interface type %d\n",
+ ctx->vif->type);
+ break;
+ }
+
+#if 0
+ /* TODO: Figure out when short_preamble would be set and cache from
+ * that */
+ if (!hw_to_local(priv->hw)->short_preamble)
+ ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+ else
+ ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+#endif
+
+ ch_info = iwl_legacy_get_channel_info(priv, priv->band,
+ le16_to_cpu(ctx->active.channel));
+
+ if (!ch_info)
+ ch_info = &priv->channel_info[0];
+
+ ctx->staging.channel = cpu_to_le16(ch_info->channel);
+ priv->band = ch_info->band;
+
+ iwl_legacy_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
+
+ ctx->staging.ofdm_basic_rates =
+ (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
+ ctx->staging.cck_basic_rates =
+ (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
+
+ /* clear both MIX and PURE40 mode flag */
+ ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
+ RXON_FLG_CHANNEL_MODE_PURE_40);
+ if (ctx->vif)
+ memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
+
+ ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
+ ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
+}
+EXPORT_SYMBOL(iwl_legacy_connection_init_rx_config);
+
+void iwl_legacy_set_rate(struct iwl_priv *priv)
+{
+ const struct ieee80211_supported_band *hw = NULL;
+ struct ieee80211_rate *rate;
+ struct iwl_rxon_context *ctx;
+ int i;
+
+ hw = iwl_get_hw_mode(priv, priv->band);
+ if (!hw) {
+ IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
+ return;
+ }
+
+ priv->active_rate = 0;
+
+ for (i = 0; i < hw->n_bitrates; i++) {
+ rate = &(hw->bitrates[i]);
+ if (rate->hw_value < IWL_RATE_COUNT_LEGACY)
+ priv->active_rate |= (1 << rate->hw_value);
+ }
+
+ IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
+
+ for_each_context(priv, ctx) {
+ ctx->staging.cck_basic_rates =
+ (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
+
+ ctx->staging.ofdm_basic_rates =
+ (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
+ }
+}
+EXPORT_SYMBOL(iwl_legacy_set_rate);
+
+void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success)
+{
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ if (priv->switch_rxon.switch_in_progress) {
+ ieee80211_chswitch_done(ctx->vif, is_success);
+ mutex_lock(&priv->mutex);
+ priv->switch_rxon.switch_in_progress = false;
+ mutex_unlock(&priv->mutex);
+ }
+}
+EXPORT_SYMBOL(iwl_legacy_chswitch_done);
+
+void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
+
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active;
+
+ if (priv->switch_rxon.switch_in_progress) {
+ if (!le32_to_cpu(csa->status) &&
+ (csa->channel == priv->switch_rxon.channel)) {
+ rxon->channel = csa->channel;
+ ctx->staging.channel = csa->channel;
+ IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
+ le16_to_cpu(csa->channel));
+ iwl_legacy_chswitch_done(priv, true);
+ } else {
+ IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
+ le16_to_cpu(csa->channel));
+ iwl_legacy_chswitch_done(priv, false);
+ }
+ }
+}
+EXPORT_SYMBOL(iwl_legacy_rx_csa);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+ struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
+
+ IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
+ iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
+ IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n",
+ le16_to_cpu(rxon->channel));
+ IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
+ IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
+ le32_to_cpu(rxon->filter_flags));
+ IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
+ IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
+ rxon->ofdm_basic_rates);
+ IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n",
+ rxon->cck_basic_rates);
+ IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
+ IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
+ IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n",
+ le16_to_cpu(rxon->assoc_id));
+}
+EXPORT_SYMBOL(iwl_legacy_print_rx_config_cmd);
+#endif
+/**
+ * iwl_legacy_irq_handle_error - called for HW or SW error interrupt from card
+ */
+void iwl_legacy_irq_handle_error(struct iwl_priv *priv)
+{
+ /* Set the FW error flag -- cleared on iwl_down */
+ set_bit(STATUS_FW_ERROR, &priv->status);
+
+ /* Cancel currently queued command. */
+ clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
+
+ IWL_ERR(priv, "Loaded firmware version: %s\n",
+ priv->hw->wiphy->fw_version);
+
+ priv->cfg->ops->lib->dump_nic_error_log(priv);
+ if (priv->cfg->ops->lib->dump_fh)
+ priv->cfg->ops->lib->dump_fh(priv, NULL, false);
+ priv->cfg->ops->lib->dump_nic_event_log(priv, false, NULL, false);
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ if (iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS)
+ iwl_legacy_print_rx_config_cmd(priv,
+ &priv->contexts[IWL_RXON_CTX_BSS]);
+#endif
+
+ wake_up_interruptible(&priv->wait_command_queue);
+
+ /* Keep the restart process from trying to send host
+ * commands by clearing the INIT status bit */
+ clear_bit(STATUS_READY, &priv->status);
+
+ if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
+ IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
+ "Restarting adapter due to uCode error.\n");
+
+ if (priv->cfg->mod_params->restart_fw)
+ queue_work(priv->workqueue, &priv->restart);
+ }
+}
+EXPORT_SYMBOL(iwl_legacy_irq_handle_error);
+
+static int iwl_legacy_apm_stop_master(struct iwl_priv *priv)
+{
+ int ret = 0;
+
+ /* stop device's busmaster DMA activity */
+ iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
+
+ ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
+ CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
+ if (ret)
+ IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
+
+ IWL_DEBUG_INFO(priv, "stop master\n");
+
+ return ret;
+}
+
+void iwl_legacy_apm_stop(struct iwl_priv *priv)
+{
+ IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
+
+ /* Stop device's DMA activity */
+ iwl_legacy_apm_stop_master(priv);
+
+ /* Reset the entire device */
+ iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+ udelay(10);
+
+ /*
+ * Clear "initialization complete" bit to move adapter from
+ * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+ */
+ iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+}
+EXPORT_SYMBOL(iwl_legacy_apm_stop);
+
+
+/*
+ * Start up NIC's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
+ * NOTE: This does not load uCode nor start the embedded processor
+ */
+int iwl_legacy_apm_init(struct iwl_priv *priv)
+{
+ int ret = 0;
+ u16 lctl;
+
+ IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
+
+ /*
+ * Use "set_bit" below rather than "write", to preserve any hardware
+ * bits already set by default after reset.
+ */
+
+ /* Disable L0S exit timer (platform NMI Work/Around) */
+ iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
+ CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+
+ /*
+ * Disable L0s without affecting L1;
+ * don't wait for ICH L0s (ICH bug W/A)
+ */
+ iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
+ CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
+
+ /* Set FH wait threshold to maximum (HW error during stress W/A) */
+ iwl_legacy_set_bit(priv, CSR_DBG_HPET_MEM_REG,
+ CSR_DBG_HPET_MEM_REG_VAL);
+
+ /*
+ * Enable HAP INTA (interrupt from management bus) to
+ * wake device's PCI Express link L1a -> L0s
+ * NOTE: This is no-op for 3945 (non-existant bit)
+ */
+ iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+
+ /*
+ * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
+ * Check if BIOS (or OS) enabled L1-ASPM on this device.
+ * If so (likely), disable L0S, so device moves directly L0->L1;
+ * costs negligible amount of power savings.
+ * If not (unlikely), enable L0S, so there is at least some
+ * power savings, even without L1.
+ */
+ if (priv->cfg->base_params->set_l0s) {
+ lctl = iwl_legacy_pcie_link_ctl(priv);
+ if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
+ PCI_CFG_LINK_CTRL_VAL_L1_EN) {
+ /* L1-ASPM enabled; disable(!) L0S */
+ iwl_legacy_set_bit(priv, CSR_GIO_REG,
+ CSR_GIO_REG_VAL_L0S_ENABLED);
+ IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n");
+ } else {
+ /* L1-ASPM disabled; enable(!) L0S */
+ iwl_legacy_clear_bit(priv, CSR_GIO_REG,
+ CSR_GIO_REG_VAL_L0S_ENABLED);
+ IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n");
+ }
+ }
+
+ /* Configure analog phase-lock-loop before activating to D0A */
+ if (priv->cfg->base_params->pll_cfg_val)
+ iwl_legacy_set_bit(priv, CSR_ANA_PLL_CFG,
+ priv->cfg->base_params->pll_cfg_val);
+
+ /*
+ * Set "initialization complete" bit to move adapter from
+ * D0U* --> D0A* (powered-up active) state.
+ */
+ iwl_legacy_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /*
+ * Wait for clock stabilization; once stabilized, access to
+ * device-internal resources is supported, e.g. iwl_legacy_write_prph()
+ * and accesses to uCode SRAM.
+ */
+ ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
+ if (ret < 0) {
+ IWL_DEBUG_INFO(priv, "Failed to init the card\n");
+ goto out;
+ }
+
+ /*
+ * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
+ * BSM (Boostrap State Machine) is only in 3945 and 4965.
+ *
+ * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
+ * do not disable clocks. This preserves any hardware bits already
+ * set by default in "CLK_CTRL_REG" after reset.
+ */
+ if (priv->cfg->base_params->use_bsm)
+ iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
+ APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
+ else
+ iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
+ APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(20);
+
+ /* Disable L1-Active */
+ iwl_legacy_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+
+out:
+ return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_apm_init);
+
+
+int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
+{
+ int ret;
+ s8 prev_tx_power;
+ bool defer;
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+ lockdep_assert_held(&priv->mutex);
+
+ if (priv->tx_power_user_lmt == tx_power && !force)
+ return 0;
+
+ if (!priv->cfg->ops->lib->send_tx_power)
+ return -EOPNOTSUPP;
+
+ if (tx_power < IWL4965_TX_POWER_TARGET_POWER_MIN) {
+ IWL_WARN(priv,
+ "Requested user TXPOWER %d below lower limit %d.\n",
+ tx_power,
+ IWL4965_TX_POWER_TARGET_POWER_MIN);
+ return -EINVAL;
+ }
+
+ if (tx_power > priv->tx_power_device_lmt) {
+ IWL_WARN(priv,
+ "Requested user TXPOWER %d above upper limit %d.\n",
+ tx_power, priv->tx_power_device_lmt);
+ return -EINVAL;
+ }
+
+ if (!iwl_legacy_is_ready_rf(priv))
+ return -EIO;
+
+ /* scan complete and commit_rxon use tx_power_next value,
+ * it always need to be updated for newest request */
+ priv->tx_power_next = tx_power;
+
+ /* do not set tx power when scanning or channel changing */
+ defer = test_bit(STATUS_SCANNING, &priv->status) ||
+ memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
+ if (defer && !force) {
+ IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
+ return 0;
+ }
+
+ prev_tx_power = priv->tx_power_user_lmt;
+ priv->tx_power_user_lmt = tx_power;
+
+ ret = priv->cfg->ops->lib->send_tx_power(priv);
+
+ /* if fail to set tx_power, restore the orig. tx power */
+ if (ret) {
+ priv->tx_power_user_lmt = prev_tx_power;
+ priv->tx_power_next = prev_tx_power;
+ }
+ return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_set_tx_power);
+
+void iwl_legacy_send_bt_config(struct iwl_priv *priv)
+{
+ struct iwl_bt_cmd bt_cmd = {
+ .lead_time = BT_LEAD_TIME_DEF,
+ .max_kill = BT_MAX_KILL_DEF,
+ .kill_ack_mask = 0,
+ .kill_cts_mask = 0,
+ };
+
+ if (!bt_coex_active)
+ bt_cmd.flags = BT_COEX_DISABLE;
+ else
+ bt_cmd.flags = BT_COEX_ENABLE;
+
+ IWL_DEBUG_INFO(priv, "BT coex %s\n",
+ (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
+
+ if (iwl_legacy_send_cmd_pdu(priv, REPLY_BT_CONFIG,
+ sizeof(struct iwl_bt_cmd), &bt_cmd))
+ IWL_ERR(priv, "failed to send BT Coex Config\n");
+}
+EXPORT_SYMBOL(iwl_legacy_send_bt_config);
+
+int iwl_legacy_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
+{
+ struct iwl_statistics_cmd statistics_cmd = {
+ .configuration_flags =
+ clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
+ };
+
+ if (flags & CMD_ASYNC)
+ return iwl_legacy_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD,
+ sizeof(struct iwl_statistics_cmd),
+ &statistics_cmd, NULL);
+ else
+ return iwl_legacy_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
+ sizeof(struct iwl_statistics_cmd),
+ &statistics_cmd);
+}
+EXPORT_SYMBOL(iwl_legacy_send_statistics_request);
+
+void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
+ IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
+ sleep->pm_sleep_mode, sleep->pm_wakeup_src);
+#endif
+}
+EXPORT_SYMBOL(iwl_legacy_rx_pm_sleep_notif);
+
+void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
+ "notification for %s:\n", len,
+ iwl_legacy_get_cmd_string(pkt->hdr.cmd));
+ iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
+}
+EXPORT_SYMBOL(iwl_legacy_rx_pm_debug_statistics_notif);
+
+void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+ IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
+ "seq 0x%04X ser 0x%08X\n",
+ le32_to_cpu(pkt->u.err_resp.error_type),
+ iwl_legacy_get_cmd_string(pkt->u.err_resp.cmd_id),
+ pkt->u.err_resp.cmd_id,
+ le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
+ le32_to_cpu(pkt->u.err_resp.error_info));
+}
+EXPORT_SYMBOL(iwl_legacy_rx_reply_error);
+
+void iwl_legacy_clear_isr_stats(struct iwl_priv *priv)
+{
+ memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));
+}
+
+int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_rxon_context *ctx;
+ unsigned long flags;
+ int q;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ if (!iwl_legacy_is_ready_rf(priv)) {
+ IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
+ return -EIO;
+ }
+
+ if (queue >= AC_NUM) {
+ IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
+ return 0;
+ }
+
+ q = AC_NUM - 1 - queue;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ for_each_context(priv, ctx) {
+ ctx->qos_data.def_qos_parm.ac[q].cw_min =
+ cpu_to_le16(params->cw_min);
+ ctx->qos_data.def_qos_parm.ac[q].cw_max =
+ cpu_to_le16(params->cw_max);
+ ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
+ ctx->qos_data.def_qos_parm.ac[q].edca_txop =
+ cpu_to_le16((params->txop * 32));
+
+ ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+ return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_mac_conf_tx);
+
+int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ return priv->ibss_manager == IWL_IBSS_MANAGER;
+}
+EXPORT_SYMBOL_GPL(iwl_legacy_mac_tx_last_beacon);
+
+static int
+iwl_legacy_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+ iwl_legacy_connection_init_rx_config(priv, ctx);
+
+ if (priv->cfg->ops->hcmd->set_rxon_chain)
+ priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+
+ return iwl_legacy_commit_rxon(priv, ctx);
+}
+
+static int iwl_legacy_setup_interface(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+ struct ieee80211_vif *vif = ctx->vif;
+ int err;
+
+ lockdep_assert_held(&priv->mutex);
+
+ /*
+ * This variable will be correct only when there's just
+ * a single context, but all code using it is for hardware
+ * that supports only one context.
+ */
+ priv->iw_mode = vif->type;
+
+ ctx->is_active = true;
+
+ err = iwl_legacy_set_mode(priv, ctx);
+ if (err) {
+ if (!ctx->always_active)
+ ctx->is_active = false;
+ return err;
+ }
+
+ return 0;
+}
+
+int
+iwl_legacy_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct iwl_rxon_context *tmp, *ctx = NULL;
+ int err;
+
+ IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
+ vif->type, vif->addr);
+
+ mutex_lock(&priv->mutex);
+
+ if (!iwl_legacy_is_ready_rf(priv)) {
+ IWL_WARN(priv, "Try to add interface when device not ready\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ for_each_context(priv, tmp) {
+ u32 possible_modes =
+ tmp->interface_modes | tmp->exclusive_interface_modes;
+
+ if (tmp->vif) {
+ /* check if this busy context is exclusive */
+ if (tmp->exclusive_interface_modes &
+ BIT(tmp->vif->type)) {
+ err = -EINVAL;
+ goto out;
+ }
+ continue;
+ }
+
+ if (!(possible_modes & BIT(vif->type)))
+ continue;
+
+ /* have maybe usable context w/o interface */
+ ctx = tmp;
+ break;
+ }
+
+ if (!ctx) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ vif_priv->ctx = ctx;
+ ctx->vif = vif;
+
+ err = iwl_legacy_setup_interface(priv, ctx);
+ if (!err)
+ goto out;
+
+ ctx->vif = NULL;
+ priv->iw_mode = NL80211_IFTYPE_STATION;
+ out:
+ mutex_unlock(&priv->mutex);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+ return err;
+}
+EXPORT_SYMBOL(iwl_legacy_mac_add_interface);
+
+static void iwl_legacy_teardown_interface(struct iwl_priv *priv,
+ struct ieee80211_vif *vif,
+ bool mode_change)
+{
+ struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
+
+ lockdep_assert_held(&priv->mutex);
+
+ if (priv->scan_vif == vif) {
+ iwl_legacy_scan_cancel_timeout(priv, 200);
+ iwl_legacy_force_scan_end(priv);
+ }
+
+ if (!mode_change) {
+ iwl_legacy_set_mode(priv, ctx);
+ if (!ctx->always_active)
+ ctx->is_active = false;
+ }
+}
+
+void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ mutex_lock(&priv->mutex);
+
+ WARN_ON(ctx->vif != vif);
+ ctx->vif = NULL;
+
+ iwl_legacy_teardown_interface(priv, vif, false);
+
+ memset(priv->bssid, 0, ETH_ALEN);
+ mutex_unlock(&priv->mutex);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+}
+EXPORT_SYMBOL(iwl_legacy_mac_remove_interface);
+
+int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv)
+{
+ if (!priv->txq)
+ priv->txq = kzalloc(
+ sizeof(struct iwl_tx_queue) *
+ priv->cfg->base_params->num_of_queues,
+ GFP_KERNEL);
+ if (!priv->txq) {
+ IWL_ERR(priv, "Not enough memory for txq\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_alloc_txq_mem);
+
+void iwl_legacy_txq_mem(struct iwl_priv *priv)
+{
+ kfree(priv->txq);
+ priv->txq = NULL;
+}
+EXPORT_SYMBOL(iwl_legacy_txq_mem);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+
+#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
+
+void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
+{
+ priv->tx_traffic_idx = 0;
+ priv->rx_traffic_idx = 0;
+ if (priv->tx_traffic)
+ memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
+ if (priv->rx_traffic)
+ memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
+}
+
+int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
+{
+ u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
+
+ if (iwlegacy_debug_level & IWL_DL_TX) {
+ if (!priv->tx_traffic) {
+ priv->tx_traffic =
+ kzalloc(traffic_size, GFP_KERNEL);
+ if (!priv->tx_traffic)
+ return -ENOMEM;
+ }
+ }
+ if (iwlegacy_debug_level & IWL_DL_RX) {
+ if (!priv->rx_traffic) {
+ priv->rx_traffic =
+ kzalloc(traffic_size, GFP_KERNEL);
+ if (!priv->rx_traffic)
+ return -ENOMEM;
+ }
+ }
+ iwl_legacy_reset_traffic_log(priv);
+ return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_alloc_traffic_mem);
+
+void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
+{
+ kfree(priv->tx_traffic);
+ priv->tx_traffic = NULL;
+
+ kfree(priv->rx_traffic);
+ priv->rx_traffic = NULL;
+}
+EXPORT_SYMBOL(iwl_legacy_free_traffic_mem);
+
+void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
+ u16 length, struct ieee80211_hdr *header)
+{
+ __le16 fc;
+ u16 len;
+
+ if (likely(!(iwlegacy_debug_level & IWL_DL_TX)))
+ return;
+
+ if (!priv->tx_traffic)
+ return;
+
+ fc = header->frame_control;
+ if (ieee80211_is_data(fc)) {
+ len = (length > IWL_TRAFFIC_ENTRY_SIZE)
+ ? IWL_TRAFFIC_ENTRY_SIZE : length;
+ memcpy((priv->tx_traffic +
+ (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
+ header, len);
+ priv->tx_traffic_idx =
+ (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
+ }
+}
+EXPORT_SYMBOL(iwl_legacy_dbg_log_tx_data_frame);
+
+void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
+ u16 length, struct ieee80211_hdr *header)
+{
+ __le16 fc;
+ u16 len;
+
+ if (likely(!(iwlegacy_debug_level & IWL_DL_RX)))
+ return;
+
+ if (!priv->rx_traffic)
+ return;
+
+ fc = header->frame_control;
+ if (ieee80211_is_data(fc)) {
+ len = (length > IWL_TRAFFIC_ENTRY_SIZE)
+ ? IWL_TRAFFIC_ENTRY_SIZE : length;
+ memcpy((priv->rx_traffic +
+ (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
+ header, len);
+ priv->rx_traffic_idx =
+ (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
+ }
+}
+EXPORT_SYMBOL(iwl_legacy_dbg_log_rx_data_frame);
+
+const char *iwl_legacy_get_mgmt_string(int cmd)
+{
+ switch (cmd) {
+ IWL_CMD(MANAGEMENT_ASSOC_REQ);
+ IWL_CMD(MANAGEMENT_ASSOC_RESP);
+ IWL_CMD(MANAGEMENT_REASSOC_REQ);
+ IWL_CMD(MANAGEMENT_REASSOC_RESP);
+ IWL_CMD(MANAGEMENT_PROBE_REQ);
+ IWL_CMD(MANAGEMENT_PROBE_RESP);
+ IWL_CMD(MANAGEMENT_BEACON);
+ IWL_CMD(MANAGEMENT_ATIM);
+ IWL_CMD(MANAGEMENT_DISASSOC);
+ IWL_CMD(MANAGEMENT_AUTH);
+ IWL_CMD(MANAGEMENT_DEAUTH);
+ IWL_CMD(MANAGEMENT_ACTION);
+ default:
+ return "UNKNOWN";
+
+ }
+}
+
+const char *iwl_legacy_get_ctrl_string(int cmd)
+{
+ switch (cmd) {
+ IWL_CMD(CONTROL_BACK_REQ);
+ IWL_CMD(CONTROL_BACK);
+ IWL_CMD(CONTROL_PSPOLL);
+ IWL_CMD(CONTROL_RTS);
+ IWL_CMD(CONTROL_CTS);
+ IWL_CMD(CONTROL_ACK);
+ IWL_CMD(CONTROL_CFEND);
+ IWL_CMD(CONTROL_CFENDACK);
+ default:
+ return "UNKNOWN";
+
+ }
+}
+
+void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv)
+{
+ memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
+ memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
+}
+
+/*
+ * if CONFIG_IWLWIFI_LEGACY_DEBUGFS defined,
+ * iwl_legacy_update_stats function will
+ * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass
+ * Use debugFs to display the rx/rx_statistics
+ * if CONFIG_IWLWIFI_LEGACY_DEBUGFS not being defined, then no MGMT and CTRL
+ * information will be recorded, but DATA pkt still will be recorded
+ * for the reason of iwl_led.c need to control the led blinking based on
+ * number of tx and rx data.
+ *
+ */
+void
+iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
+{
+ struct traffic_stats *stats;
+
+ if (is_tx)
+ stats = &priv->tx_stats;
+ else
+ stats = &priv->rx_stats;
+
+ if (ieee80211_is_mgmt(fc)) {
+ switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+ case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
+ stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
+ stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
+ stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
+ stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
+ stats->mgmt[MANAGEMENT_PROBE_REQ]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
+ stats->mgmt[MANAGEMENT_PROBE_RESP]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_BEACON):
+ stats->mgmt[MANAGEMENT_BEACON]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_ATIM):
+ stats->mgmt[MANAGEMENT_ATIM]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
+ stats->mgmt[MANAGEMENT_DISASSOC]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_AUTH):
+ stats->mgmt[MANAGEMENT_AUTH]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+ stats->mgmt[MANAGEMENT_DEAUTH]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_ACTION):
+ stats->mgmt[MANAGEMENT_ACTION]++;
+ break;
+ }
+ } else if (ieee80211_is_ctl(fc)) {
+ switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+ case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
+ stats->ctrl[CONTROL_BACK_REQ]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_BACK):
+ stats->ctrl[CONTROL_BACK]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
+ stats->ctrl[CONTROL_PSPOLL]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_RTS):
+ stats->ctrl[CONTROL_RTS]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_CTS):
+ stats->ctrl[CONTROL_CTS]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_ACK):
+ stats->ctrl[CONTROL_ACK]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_CFEND):
+ stats->ctrl[CONTROL_CFEND]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
+ stats->ctrl[CONTROL_CFENDACK]++;
+ break;
+ }
+ } else {
+ /* data */
+ stats->data_cnt++;
+ stats->data_bytes += len;
+ }
+}
+EXPORT_SYMBOL(iwl_legacy_update_stats);
+#endif
+
+static void _iwl_legacy_force_rf_reset(struct iwl_priv *priv)
+{
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ if (!iwl_legacy_is_any_associated(priv)) {
+ IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
+ return;
+ }
+ /*
+ * There is no easy and better way to force reset the radio,
+ * the only known method is switching channel which will force to
+ * reset and tune the radio.
+ * Use internal short scan (single channel) operation to should
+ * achieve this objective.
+ * Driver should reset the radio when number of consecutive missed
+ * beacon, or any other uCode error condition detected.
+ */
+ IWL_DEBUG_INFO(priv, "perform radio reset.\n");
+ iwl_legacy_internal_short_hw_scan(priv);
+}
+
+
+int iwl_legacy_force_reset(struct iwl_priv *priv, int mode, bool external)
+{
+ struct iwl_force_reset *force_reset;
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return -EINVAL;
+
+ if (mode >= IWL_MAX_FORCE_RESET) {
+ IWL_DEBUG_INFO(priv, "invalid reset request.\n");
+ return -EINVAL;
+ }
+ force_reset = &priv->force_reset[mode];
+ force_reset->reset_request_count++;
+ if (!external) {
+ if (force_reset->last_force_reset_jiffies &&
+ time_after(force_reset->last_force_reset_jiffies +
+ force_reset->reset_duration, jiffies)) {
+ IWL_DEBUG_INFO(priv, "force reset rejected\n");
+ force_reset->reset_reject_count++;
+ return -EAGAIN;
+ }
+ }
+ force_reset->reset_success_count++;
+ force_reset->last_force_reset_jiffies = jiffies;
+ IWL_DEBUG_INFO(priv, "perform force reset (%d)\n", mode);
+ switch (mode) {
+ case IWL_RF_RESET:
+ _iwl_legacy_force_rf_reset(priv);
+ break;
+ case IWL_FW_RESET:
+ /*
+ * if the request is from external(ex: debugfs),
+ * then always perform the request in regardless the module
+ * parameter setting
+ * if the request is from internal (uCode error or driver
+ * detect failure), then fw_restart module parameter
+ * need to be check before performing firmware reload
+ */
+ if (!external && !priv->cfg->mod_params->restart_fw) {
+ IWL_DEBUG_INFO(priv, "Cancel firmware reload based on "
+ "module parameter setting\n");
+ break;
+ }
+ IWL_ERR(priv, "On demand firmware reload\n");
+ /* Set the FW error flag -- cleared on iwl_down */
+ set_bit(STATUS_FW_ERROR, &priv->status);
+ wake_up_interruptible(&priv->wait_command_queue);
+ /*
+ * Keep the restart process from trying to send host
+ * commands by clearing the INIT status bit
+ */
+ clear_bit(STATUS_READY, &priv->status);
+ queue_work(priv->workqueue, &priv->restart);
+ break;
+ }
+ return 0;
+}
+
+int
+iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum nl80211_iftype newtype, bool newp2p)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
+ struct iwl_rxon_context *tmp;
+ u32 interface_modes;
+ int err;
+
+ newtype = ieee80211_iftype_p2p(newtype, newp2p);
+
+ mutex_lock(&priv->mutex);
+
+ interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
+
+ if (!(interface_modes & BIT(newtype))) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ if (ctx->exclusive_interface_modes & BIT(newtype)) {
+ for_each_context(priv, tmp) {
+ if (ctx == tmp)
+ continue;
+
+ if (!tmp->vif)
+ continue;
+
+ /*
+ * The current mode switch would be exclusive, but
+ * another context is active ... refuse the switch.
+ */
+ err = -EBUSY;
+ goto out;
+ }
+ }
+
+ /* success */
+ iwl_legacy_teardown_interface(priv, vif, true);
+ vif->type = newtype;
+ err = iwl_legacy_setup_interface(priv, ctx);
+ WARN_ON(err);
+ /*
+ * We've switched internally, but submitting to the
+ * device may have failed for some reason. Mask this
+ * error, because otherwise mac80211 will not switch
+ * (and set the interface type back) and we'll be
+ * out of sync with it.
+ */
+ err = 0;
+
+ out:
+ mutex_unlock(&priv->mutex);
+ return err;
+}
+EXPORT_SYMBOL(iwl_legacy_mac_change_interface);
+
+/*
+ * On every watchdog tick we check (latest) time stamp. If it does not
+ * change during timeout period and queue is not empty we reset firmware.
+ */
+static int iwl_legacy_check_stuck_queue(struct iwl_priv *priv, int cnt)
+{
+ struct iwl_tx_queue *txq = &priv->txq[cnt];
+ struct iwl_queue *q = &txq->q;
+ unsigned long timeout;
+ int ret;
+
+ if (q->read_ptr == q->write_ptr) {
+ txq->time_stamp = jiffies;
+ return 0;
+ }
+
+ timeout = txq->time_stamp +
+ msecs_to_jiffies(priv->cfg->base_params->wd_timeout);
+
+ if (time_after(jiffies, timeout)) {
+ IWL_ERR(priv, "Queue %d stuck for %u ms.\n",
+ q->id, priv->cfg->base_params->wd_timeout);
+ ret = iwl_legacy_force_reset(priv, IWL_FW_RESET, false);
+ return (ret == -EAGAIN) ? 0 : 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Making watchdog tick be a quarter of timeout assure we will
+ * discover the queue hung between timeout and 1.25*timeout
+ */
+#define IWL_WD_TICK(timeout) ((timeout) / 4)
+
+/*
+ * Watchdog timer callback, we check each tx queue for stuck, if if hung
+ * we reset the firmware. If everything is fine just rearm the timer.
+ */
+void iwl_legacy_bg_watchdog(unsigned long data)
+{
+ struct iwl_priv *priv = (struct iwl_priv *)data;
+ int cnt;
+ unsigned long timeout;
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ timeout = priv->cfg->base_params->wd_timeout;
+ if (timeout == 0)
+ return;
+
+ /* monitor and check for stuck cmd queue */
+ if (iwl_legacy_check_stuck_queue(priv, priv->cmd_queue))
+ return;
+
+ /* monitor and check for other stuck queues */
+ if (iwl_legacy_is_any_associated(priv)) {
+ for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
+ /* skip as we already checked the command queue */
+ if (cnt == priv->cmd_queue)
+ continue;
+ if (iwl_legacy_check_stuck_queue(priv, cnt))
+ return;
+ }
+ }
+
+ mod_timer(&priv->watchdog, jiffies +
+ msecs_to_jiffies(IWL_WD_TICK(timeout)));
+}
+EXPORT_SYMBOL(iwl_legacy_bg_watchdog);
+
+void iwl_legacy_setup_watchdog(struct iwl_priv *priv)
+{
+ unsigned int timeout = priv->cfg->base_params->wd_timeout;
+
+ if (timeout)
+ mod_timer(&priv->watchdog,
+ jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
+ else
+ del_timer(&priv->watchdog);
+}
+EXPORT_SYMBOL(iwl_legacy_setup_watchdog);
+
+/*
+ * extended beacon time format
+ * time in usec will be changed into a 32-bit value in extended:internal format
+ * the extended part is the beacon counts
+ * the internal part is the time in usec within one beacon interval
+ */
+u32
+iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
+ u32 usec, u32 beacon_interval)
+{
+ u32 quot;
+ u32 rem;
+ u32 interval = beacon_interval * TIME_UNIT;
+
+ if (!interval || !usec)
+ return 0;
+
+ quot = (usec / interval) &
+ (iwl_legacy_beacon_time_mask_high(priv,
+ priv->hw_params.beacon_time_tsf_bits) >>
+ priv->hw_params.beacon_time_tsf_bits);
+ rem = (usec % interval) & iwl_legacy_beacon_time_mask_low(priv,
+ priv->hw_params.beacon_time_tsf_bits);
+
+ return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
+}
+EXPORT_SYMBOL(iwl_legacy_usecs_to_beacons);
+
+/* base is usually what we get from ucode with each received frame,
+ * the same as HW timer counter counting down
+ */
+__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
+ u32 addon, u32 beacon_interval)
+{
+ u32 base_low = base & iwl_legacy_beacon_time_mask_low(priv,
+ priv->hw_params.beacon_time_tsf_bits);
+ u32 addon_low = addon & iwl_legacy_beacon_time_mask_low(priv,
+ priv->hw_params.beacon_time_tsf_bits);
+ u32 interval = beacon_interval * TIME_UNIT;
+ u32 res = (base & iwl_legacy_beacon_time_mask_high(priv,
+ priv->hw_params.beacon_time_tsf_bits)) +
+ (addon & iwl_legacy_beacon_time_mask_high(priv,
+ priv->hw_params.beacon_time_tsf_bits));
+
+ if (base_low > addon_low)
+ res += base_low - addon_low;
+ else if (base_low < addon_low) {
+ res += interval + base_low - addon_low;
+ res += (1 << priv->hw_params.beacon_time_tsf_bits);
+ } else
+ res += (1 << priv->hw_params.beacon_time_tsf_bits);
+
+ return cpu_to_le32(res);
+}
+EXPORT_SYMBOL(iwl_legacy_add_beacon_time);
+
+#ifdef CONFIG_PM
+
+int iwl_legacy_pci_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct iwl_priv *priv = pci_get_drvdata(pdev);
+
+ /*
+ * This function is called when system goes into suspend state
+ * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
+ * first but since iwl_mac_stop() has no knowledge of who the caller is,
+ * it will not call apm_ops.stop() to stop the DMA operation.
+ * Calling apm_ops.stop here to make sure we stop the DMA.
+ */
+ iwl_legacy_apm_stop(priv);
+
+ return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_pci_suspend);
+
+int iwl_legacy_pci_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct iwl_priv *priv = pci_get_drvdata(pdev);
+ bool hw_rfkill = false;
+
+ /*
+ * We disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state.
+ */
+ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+ iwl_legacy_enable_interrupts(priv);
+
+ if (!(iwl_read32(priv, CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
+ hw_rfkill = true;
+
+ if (hw_rfkill)
+ set_bit(STATUS_RF_KILL_HW, &priv->status);
+ else
+ clear_bit(STATUS_RF_KILL_HW, &priv->status);
+
+ wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill);
+
+ return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_pci_resume);
+
+const struct dev_pm_ops iwl_legacy_pm_ops = {
+ .suspend = iwl_legacy_pci_suspend,
+ .resume = iwl_legacy_pci_resume,
+ .freeze = iwl_legacy_pci_suspend,
+ .thaw = iwl_legacy_pci_resume,
+ .poweroff = iwl_legacy_pci_suspend,
+ .restore = iwl_legacy_pci_resume,
+};
+EXPORT_SYMBOL(iwl_legacy_pm_ops);
+
+#endif /* CONFIG_PM */
+
+static void
+iwl_legacy_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ if (!ctx->is_active)
+ return;
+
+ ctx->qos_data.def_qos_parm.qos_flags = 0;
+
+ if (ctx->qos_data.qos_active)
+ ctx->qos_data.def_qos_parm.qos_flags |=
+ QOS_PARAM_FLG_UPDATE_EDCA_MSK;
+
+ if (ctx->ht.enabled)
+ ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
+
+ IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
+ ctx->qos_data.qos_active,
+ ctx->qos_data.def_qos_parm.qos_flags);
+
+ iwl_legacy_send_cmd_pdu_async(priv, ctx->qos_cmd,
+ sizeof(struct iwl_qosparam_cmd),
+ &ctx->qos_data.def_qos_parm, NULL);
+}
+
+/**
+ * iwl_legacy_mac_config - mac80211 config callback
+ */
+int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct iwl_priv *priv = hw->priv;
+ const struct iwl_channel_info *ch_info;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct ieee80211_channel *channel = conf->channel;
+ struct iwl_ht_config *ht_conf = &priv->current_ht_config;
+ struct iwl_rxon_context *ctx;
+ unsigned long flags = 0;
+ int ret = 0;
+ u16 ch;
+ int scan_active = 0;
+ bool ht_changed[NUM_IWL_RXON_CTX] = {};
+
+ if (WARN_ON(!priv->cfg->ops->legacy))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&priv->mutex);
+
+ IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
+ channel->hw_value, changed);
+
+ if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
+ test_bit(STATUS_SCANNING, &priv->status))) {
+ scan_active = 1;
+ IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
+ }
+
+ if (changed & (IEEE80211_CONF_CHANGE_SMPS |
+ IEEE80211_CONF_CHANGE_CHANNEL)) {
+ /* mac80211 uses static for non-HT which is what we want */
+ priv->current_ht_config.smps = conf->smps_mode;
+
+ /*
+ * Recalculate chain counts.
+ *
+ * If monitor mode is enabled then mac80211 will
+ * set up the SM PS mode to OFF if an HT channel is
+ * configured.
+ */
+ if (priv->cfg->ops->hcmd->set_rxon_chain)
+ for_each_context(priv, ctx)
+ priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+ }
+
+ /* during scanning mac80211 will delay channel setting until
+ * scan finish with changed = 0
+ */
+ if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
+ if (scan_active)
+ goto set_ch_out;
+
+ ch = channel->hw_value;
+ ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch);
+ if (!iwl_legacy_is_channel_valid(ch_info)) {
+ IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
+ ret = -EINVAL;
+ goto set_ch_out;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ for_each_context(priv, ctx) {
+ /* Configure HT40 channels */
+ if (ctx->ht.enabled != conf_is_ht(conf)) {
+ ctx->ht.enabled = conf_is_ht(conf);
+ ht_changed[ctx->ctxid] = true;
+ }
+ if (ctx->ht.enabled) {
+ if (conf_is_ht40_minus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ ctx->ht.is_40mhz = true;
+ } else if (conf_is_ht40_plus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ ctx->ht.is_40mhz = true;
+ } else {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_NONE;
+ ctx->ht.is_40mhz = false;
+ }
+ } else
+ ctx->ht.is_40mhz = false;
+
+ /*
+ * Default to no protection. Protection mode will
+ * later be set from BSS config in iwl_ht_conf
+ */
+ ctx->ht.protection =
+ IEEE80211_HT_OP_MODE_PROTECTION_NONE;
+
+ /* if we are switching from ht to 2.4 clear flags
+ * from any ht related info since 2.4 does not
+ * support ht */
+ if ((le16_to_cpu(ctx->staging.channel) != ch))
+ ctx->staging.flags = 0;
+
+ iwl_legacy_set_rxon_channel(priv, channel, ctx);
+ iwl_legacy_set_rxon_ht(priv, ht_conf);
+
+ iwl_legacy_set_flags_for_band(priv, ctx, channel->band,
+ ctx->vif);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (priv->cfg->ops->legacy->update_bcast_stations)
+ ret =
+ priv->cfg->ops->legacy->update_bcast_stations(priv);
+
+ set_ch_out:
+ /* The list of supported rates and rate mask can be different
+ * for each band; since the band may have changed, reset
+ * the rate mask to what mac80211 lists */
+ iwl_legacy_set_rate(priv);
+ }
+
+ if (changed & (IEEE80211_CONF_CHANGE_PS |
+ IEEE80211_CONF_CHANGE_IDLE)) {
+ ret = iwl_legacy_power_update_mode(priv, false);
+ if (ret)
+ IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
+ priv->tx_power_user_lmt, conf->power_level);
+
+ iwl_legacy_set_tx_power(priv, conf->power_level, false);
+ }
+
+ if (!iwl_legacy_is_ready(priv)) {
+ IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
+ goto out;
+ }
+
+ if (scan_active)
+ goto out;
+
+ for_each_context(priv, ctx) {
+ if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
+ iwl_legacy_commit_rxon(priv, ctx);
+ else
+ IWL_DEBUG_INFO(priv,
+ "Not re-sending same RXON configuration.\n");
+ if (ht_changed[ctx->ctxid])
+ iwl_legacy_update_qos(priv, ctx);
+ }
+
+out:
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+ mutex_unlock(&priv->mutex);
+ return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_mac_config);
+
+void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw)
+{
+ struct iwl_priv *priv = hw->priv;
+ unsigned long flags;
+ /* IBSS can only be the IWL_RXON_CTX_BSS context */
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+ if (WARN_ON(!priv->cfg->ops->legacy))
+ return;
+
+ mutex_lock(&priv->mutex);
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ spin_lock_irqsave(&priv->lock, flags);
+ memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* new association get rid of ibss beacon skb */
+ if (priv->beacon_skb)
+ dev_kfree_skb(priv->beacon_skb);
+
+ priv->beacon_skb = NULL;
+
+ priv->timestamp = 0;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ iwl_legacy_scan_cancel_timeout(priv, 100);
+ if (!iwl_legacy_is_ready_rf(priv)) {
+ IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
+ mutex_unlock(&priv->mutex);
+ return;
+ }
+
+ /* we are restarting association process
+ * clear RXON_FILTER_ASSOC_MSK bit
+ */
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ iwl_legacy_commit_rxon(priv, ctx);
+
+ iwl_legacy_set_rate(priv);
+
+ mutex_unlock(&priv->mutex);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf);
+
+static void iwl_legacy_ht_conf(struct iwl_priv *priv,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_ht_config *ht_conf = &priv->current_ht_config;
+ struct ieee80211_sta *sta;
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
+
+ IWL_DEBUG_ASSOC(priv, "enter:\n");
+
+ if (!ctx->ht.enabled)
+ return;
+
+ ctx->ht.protection =
+ bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
+ ctx->ht.non_gf_sta_present =
+ !!(bss_conf->ht_operation_mode &
+ IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+
+ ht_conf->single_chain_sufficient = false;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ if (sta) {
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ int maxstreams;
+
+ maxstreams = (ht_cap->mcs.tx_params &
+ IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
+ >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
+ maxstreams += 1;
+
+ if ((ht_cap->mcs.rx_mask[1] == 0) &&
+ (ht_cap->mcs.rx_mask[2] == 0))
+ ht_conf->single_chain_sufficient = true;
+ if (maxstreams <= 1)
+ ht_conf->single_chain_sufficient = true;
+ } else {
+ /*
+ * If at all, this can only happen through a race
+ * when the AP disconnects us while we're still
+ * setting up the connection, in that case mac80211
+ * will soon tell us about that.
+ */
+ ht_conf->single_chain_sufficient = true;
+ }
+ rcu_read_unlock();
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ht_conf->single_chain_sufficient = true;
+ break;
+ default:
+ break;
+ }
+
+ IWL_DEBUG_ASSOC(priv, "leave\n");
+}
+
+static inline void iwl_legacy_set_no_assoc(struct iwl_priv *priv,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
+
+ /*
+ * inform the ucode that there is no longer an
+ * association and that no more packets should be
+ * sent
+ */
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ ctx->staging.assoc_id = 0;
+ iwl_legacy_commit_rxon(priv, ctx);
+}
+
+static void iwl_legacy_beacon_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_priv *priv = hw->priv;
+ unsigned long flags;
+ __le64 timestamp;
+ struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
+
+ if (!skb)
+ return;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ lockdep_assert_held(&priv->mutex);
+
+ if (!priv->beacon_ctx) {
+ IWL_ERR(priv, "update beacon but no beacon context!\n");
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (priv->beacon_skb)
+ dev_kfree_skb(priv->beacon_skb);
+
+ priv->beacon_skb = skb;
+
+ timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
+ priv->timestamp = le64_to_cpu(timestamp);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (!iwl_legacy_is_ready_rf(priv)) {
+ IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
+ return;
+ }
+
+ priv->cfg->ops->legacy->post_associate(priv);
+}
+
+void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changes)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
+ int ret;
+
+ if (WARN_ON(!priv->cfg->ops->legacy))
+ return;
+
+ IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
+
+ if (!iwl_legacy_is_alive(priv))
+ return;
+
+ mutex_lock(&priv->mutex);
+
+ if (changes & BSS_CHANGED_QOS) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ ctx->qos_data.qos_active = bss_conf->qos;
+ iwl_legacy_update_qos(priv, ctx);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ if (changes & BSS_CHANGED_BEACON_ENABLED) {
+ /*
+ * the add_interface code must make sure we only ever
+ * have a single interface that could be beaconing at
+ * any time.
+ */
+ if (vif->bss_conf.enable_beacon)
+ priv->beacon_ctx = ctx;
+ else
+ priv->beacon_ctx = NULL;
+ }
+
+ if (changes & BSS_CHANGED_BSSID) {
+ IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
+
+ /*
+ * If there is currently a HW scan going on in the
+ * background then we need to cancel it else the RXON
+ * below/in post_associate will fail.
+ */
+ if (iwl_legacy_scan_cancel_timeout(priv, 100)) {
+ IWL_WARN(priv,
+ "Aborted scan still in progress after 100ms\n");
+ IWL_DEBUG_MAC80211(priv,
+ "leaving - scan abort failed.\n");
+ mutex_unlock(&priv->mutex);
+ return;
+ }
+
+ /* mac80211 only sets assoc when in STATION mode */
+ if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
+ memcpy(ctx->staging.bssid_addr,
+ bss_conf->bssid, ETH_ALEN);
+
+ /* currently needed in a few places */
+ memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
+ } else {
+ ctx->staging.filter_flags &=
+ ~RXON_FILTER_ASSOC_MSK;
+ }
+
+ }
+
+ /*
+ * This needs to be after setting the BSSID in case
+ * mac80211 decides to do both changes at once because
+ * it will invoke post_associate.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON)
+ iwl_legacy_beacon_update(hw, vif);
+
+ if (changes & BSS_CHANGED_ERP_PREAMBLE) {
+ IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
+ bss_conf->use_short_preamble);
+ if (bss_conf->use_short_preamble)
+ ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+ }
+
+ if (changes & BSS_CHANGED_ERP_CTS_PROT) {
+ IWL_DEBUG_MAC80211(priv,
+ "ERP_CTS %d\n", bss_conf->use_cts_prot);
+ if (bss_conf->use_cts_prot &&
+ (priv->band != IEEE80211_BAND_5GHZ))
+ ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
+ if (bss_conf->use_cts_prot)
+ ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
+ }
+
+ if (changes & BSS_CHANGED_BASIC_RATES) {
+ /* XXX use this information
+ *
+ * To do that, remove code from iwl_legacy_set_rate() and put something
+ * like this here:
+ *
+ if (A-band)
+ ctx->staging.ofdm_basic_rates =
+ bss_conf->basic_rates;
+ else
+ ctx->staging.ofdm_basic_rates =
+ bss_conf->basic_rates >> 4;
+ ctx->staging.cck_basic_rates =
+ bss_conf->basic_rates & 0xF;
+ */
+ }
+
+ if (changes & BSS_CHANGED_HT) {
+ iwl_legacy_ht_conf(priv, vif);
+
+ if (priv->cfg->ops->hcmd->set_rxon_chain)
+ priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+ }
+
+ if (changes & BSS_CHANGED_ASSOC) {
+ IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
+ if (bss_conf->assoc) {
+ priv->timestamp = bss_conf->timestamp;
+
+ if (!iwl_legacy_is_rfkill(priv))
+ priv->cfg->ops->legacy->post_associate(priv);
+ } else
+ iwl_legacy_set_no_assoc(priv, vif);
+ }
+
+ if (changes && iwl_legacy_is_associated_ctx(ctx) && bss_conf->aid) {
+ IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
+ changes);
+ ret = iwl_legacy_send_rxon_assoc(priv, ctx);
+ if (!ret) {
+ /* Sync active_rxon with latest change. */
+ memcpy((void *)&ctx->active,
+ &ctx->staging,
+ sizeof(struct iwl_legacy_rxon_cmd));
+ }
+ }
+
+ if (changes & BSS_CHANGED_BEACON_ENABLED) {
+ if (vif->bss_conf.enable_beacon) {
+ memcpy(ctx->staging.bssid_addr,
+ bss_conf->bssid, ETH_ALEN);
+ memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
+ priv->cfg->ops->legacy->config_ap(priv);
+ } else
+ iwl_legacy_set_no_assoc(priv, vif);
+ }
+
+ if (changes & BSS_CHANGED_IBSS) {
+ ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif,
+ bss_conf->ibss_joined);
+ if (ret)
+ IWL_ERR(priv, "failed to %s IBSS station %pM\n",
+ bss_conf->ibss_joined ? "add" : "remove",
+ bss_conf->bssid);
+ }
+
+ mutex_unlock(&priv->mutex);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed);
+
+irqreturn_t iwl_legacy_isr(int irq, void *data)
+{
+ struct iwl_priv *priv = data;
+ u32 inta, inta_mask;
+ u32 inta_fh;
+ unsigned long flags;
+ if (!priv)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Disable (but don't clear!) interrupts here to avoid
+ * back-to-back ISRs and sporadic interrupts from our NIC.
+ * If we have something to service, the tasklet will re-enable ints.
+ * If we *don't* have something, we'll re-enable before leaving here. */
+ inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
+ iwl_write32(priv, CSR_INT_MASK, 0x00000000);
+
+ /* Discover which interrupts are active/pending */
+ inta = iwl_read32(priv, CSR_INT);
+ inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
+
+ /* Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC. */
+ if (!inta && !inta_fh) {
+ IWL_DEBUG_ISR(priv,
+ "Ignore interrupt, inta == 0, inta_fh == 0\n");
+ goto none;
+ }
+
+ if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
+ /* Hardware disappeared. It might have already raised
+ * an interrupt */
+ IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
+ goto unplugged;
+ }
+
+ IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
+ inta, inta_mask, inta_fh);
+
+ inta &= ~CSR_INT_BIT_SCD;
+
+ /* iwl_irq_tasklet() will service interrupts and re-enable them */
+ if (likely(inta || inta_fh))
+ tasklet_schedule(&priv->irq_tasklet);
+
+unplugged:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return IRQ_HANDLED;
+
+none:
+ /* re-enable interrupts here since we don't have anything to service. */
+ /* only Re-enable if diabled by irq */
+ if (test_bit(STATUS_INT_ENABLED, &priv->status))
+ iwl_legacy_enable_interrupts(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return IRQ_NONE;
+}
+EXPORT_SYMBOL(iwl_legacy_isr);
+
+/*
+ * iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
+ * function.
+ */
+void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
+ struct ieee80211_tx_info *info,
+ __le16 fc, __le32 *tx_flags)
+{
+ if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
+ *tx_flags |= TX_CMD_FLG_RTS_MSK;
+ *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
+ *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+
+ if (!ieee80211_is_mgmt(fc))
+ return;
+
+ switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+ case cpu_to_le16(IEEE80211_STYPE_AUTH):
+ case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+ case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
+ case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
+ *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+ *tx_flags |= TX_CMD_FLG_CTS_MSK;
+ break;
+ }
+ } else if (info->control.rates[0].flags &
+ IEEE80211_TX_RC_USE_CTS_PROTECT) {
+ *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+ *tx_flags |= TX_CMD_FLG_CTS_MSK;
+ *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+ }
+}
+EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.h b/drivers/net/wireless/iwlegacy/iwl-core.h
new file mode 100644
index 000000000000..f03b463e4378
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-core.h
@@ -0,0 +1,646 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_core_h__
+#define __iwl_legacy_core_h__
+
+/************************
+ * forward declarations *
+ ************************/
+struct iwl_host_cmd;
+struct iwl_cmd;
+
+
+#define IWLWIFI_VERSION "in-tree:"
+#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
+#define DRV_AUTHOR "<ilw@linux.intel.com>"
+
+#define IWL_PCI_DEVICE(dev, subdev, cfg) \
+ .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
+ .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
+ .driver_data = (kernel_ulong_t)&(cfg)
+
+#define TIME_UNIT 1024
+
+#define IWL_SKU_G 0x1
+#define IWL_SKU_A 0x2
+#define IWL_SKU_N 0x8
+
+#define IWL_CMD(x) case x: return #x
+
+struct iwl_hcmd_ops {
+ int (*rxon_assoc)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
+ int (*commit_rxon)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
+ void (*set_rxon_chain)(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx);
+};
+
+struct iwl_hcmd_utils_ops {
+ u16 (*get_hcmd_size)(u8 cmd_id, u16 len);
+ u16 (*build_addsta_hcmd)(const struct iwl_legacy_addsta_cmd *cmd,
+ u8 *data);
+ int (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif);
+ void (*post_scan)(struct iwl_priv *priv);
+};
+
+struct iwl_apm_ops {
+ int (*init)(struct iwl_priv *priv);
+ void (*config)(struct iwl_priv *priv);
+};
+
+struct iwl_debugfs_ops {
+ ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ ssize_t (*tx_stats_read)(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ ssize_t (*general_stats_read)(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+};
+
+struct iwl_temp_ops {
+ void (*temperature)(struct iwl_priv *priv);
+};
+
+struct iwl_lib_ops {
+ /* set hw dependent parameters */
+ int (*set_hw_params)(struct iwl_priv *priv);
+ /* Handling TX */
+ void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ u16 byte_cnt);
+ int (*txq_attach_buf_to_tfd)(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ dma_addr_t addr,
+ u16 len, u8 reset, u8 pad);
+ void (*txq_free_tfd)(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq);
+ int (*txq_init)(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq);
+ /* setup Rx handler */
+ void (*rx_handler_setup)(struct iwl_priv *priv);
+ /* alive notification after init uCode load */
+ void (*init_alive_start)(struct iwl_priv *priv);
+ /* check validity of rtc data address */
+ int (*is_valid_rtc_data_addr)(u32 addr);
+ /* 1st ucode load */
+ int (*load_ucode)(struct iwl_priv *priv);
+ int (*dump_nic_event_log)(struct iwl_priv *priv,
+ bool full_log, char **buf, bool display);
+ void (*dump_nic_error_log)(struct iwl_priv *priv);
+ int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display);
+ int (*set_channel_switch)(struct iwl_priv *priv,
+ struct ieee80211_channel_switch *ch_switch);
+ /* power management */
+ struct iwl_apm_ops apm_ops;
+
+ /* power */
+ int (*send_tx_power) (struct iwl_priv *priv);
+ void (*update_chain_flags)(struct iwl_priv *priv);
+
+ /* eeprom operations (as defined in iwl-eeprom.h) */
+ struct iwl_eeprom_ops eeprom_ops;
+
+ /* temperature */
+ struct iwl_temp_ops temp_ops;
+ /* check for plcp health */
+ bool (*check_plcp_health)(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt);
+
+ struct iwl_debugfs_ops debugfs_ops;
+
+};
+
+struct iwl_led_ops {
+ int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd);
+};
+
+struct iwl_legacy_ops {
+ void (*post_associate)(struct iwl_priv *priv);
+ void (*config_ap)(struct iwl_priv *priv);
+ /* station management */
+ int (*update_bcast_stations)(struct iwl_priv *priv);
+ int (*manage_ibss_station)(struct iwl_priv *priv,
+ struct ieee80211_vif *vif, bool add);
+};
+
+struct iwl_ops {
+ const struct iwl_lib_ops *lib;
+ const struct iwl_hcmd_ops *hcmd;
+ const struct iwl_hcmd_utils_ops *utils;
+ const struct iwl_led_ops *led;
+ const struct iwl_nic_ops *nic;
+ const struct iwl_legacy_ops *legacy;
+ const struct ieee80211_ops *ieee80211_ops;
+};
+
+struct iwl_mod_params {
+ int sw_crypto; /* def: 0 = using hardware encryption */
+ int disable_hw_scan; /* def: 0 = use h/w scan */
+ int num_of_queues; /* def: HW dependent */
+ int disable_11n; /* def: 0 = 11n capabilities enabled */
+ int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
+ int antenna; /* def: 0 = both antennas (use diversity) */
+ int restart_fw; /* def: 1 = restart firmware */
+};
+
+/*
+ * @led_compensation: compensate on the led on/off time per HW according
+ * to the deviation to achieve the desired led frequency.
+ * The detail algorithm is described in iwl-led.c
+ * @chain_noise_num_beacons: number of beacons used to compute chain noise
+ * @plcp_delta_threshold: plcp error rate threshold used to trigger
+ * radio tuning when there is a high receiving plcp error rate
+ * @wd_timeout: TX queues watchdog timeout
+ * @temperature_kelvin: temperature report by uCode in kelvin
+ * @max_event_log_size: size of event log buffer size for ucode event logging
+ * @ucode_tracing: support ucode continuous tracing
+ * @sensitivity_calib_by_driver: driver has the capability to perform
+ * sensitivity calibration operation
+ * @chain_noise_calib_by_driver: driver has the capability to perform
+ * chain noise calibration operation
+ */
+struct iwl_base_params {
+ int eeprom_size;
+ int num_of_queues; /* def: HW dependent */
+ int num_of_ampdu_queues;/* def: HW dependent */
+ /* for iwl_legacy_apm_init() */
+ u32 pll_cfg_val;
+ bool set_l0s;
+ bool use_bsm;
+
+ u16 led_compensation;
+ int chain_noise_num_beacons;
+ u8 plcp_delta_threshold;
+ unsigned int wd_timeout;
+ bool temperature_kelvin;
+ u32 max_event_log_size;
+ const bool ucode_tracing;
+ const bool sensitivity_calib_by_driver;
+ const bool chain_noise_calib_by_driver;
+};
+
+/**
+ * struct iwl_cfg
+ * @fw_name_pre: Firmware filename prefix. The api version and extension
+ * (.ucode) will be added to filename before loading from disk. The
+ * filename is constructed as fw_name_pre<api>.ucode.
+ * @ucode_api_max: Highest version of uCode API supported by driver.
+ * @ucode_api_min: Lowest version of uCode API supported by driver.
+ * @scan_antennas: available antenna for scan operation
+ * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
+ *
+ * We enable the driver to be backward compatible wrt API version. The
+ * driver specifies which APIs it supports (with @ucode_api_max being the
+ * highest and @ucode_api_min the lowest). Firmware will only be loaded if
+ * it has a supported API version. The firmware's API version will be
+ * stored in @iwl_priv, enabling the driver to make runtime changes based
+ * on firmware version used.
+ *
+ * For example,
+ * if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
+ * Driver interacts with Firmware API version >= 2.
+ * } else {
+ * Driver interacts with Firmware API version 1.
+ * }
+ *
+ * The ideal usage of this infrastructure is to treat a new ucode API
+ * release as a new hardware revision. That is, through utilizing the
+ * iwl_hcmd_utils_ops etc. we accommodate different command structures
+ * and flows between hardware versions as well as their API
+ * versions.
+ *
+ */
+struct iwl_cfg {
+ /* params specific to an individual device within a device family */
+ const char *name;
+ const char *fw_name_pre;
+ const unsigned int ucode_api_max;
+ const unsigned int ucode_api_min;
+ u8 valid_tx_ant;
+ u8 valid_rx_ant;
+ unsigned int sku;
+ u16 eeprom_ver;
+ u16 eeprom_calib_ver;
+ const struct iwl_ops *ops;
+ /* module based parameters which can be set from modprobe cmd */
+ const struct iwl_mod_params *mod_params;
+ /* params not likely to change within a device family */
+ struct iwl_base_params *base_params;
+ /* params likely to change within a device family */
+ u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
+ u8 scan_tx_antennas[IEEE80211_NUM_BANDS];
+ enum iwl_led_mode led_mode;
+};
+
+/***************************
+ * L i b *
+ ***************************/
+
+struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg);
+int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
+ const struct ieee80211_tx_queue_params *params);
+int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw);
+void iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ int hw_decrypt);
+int iwl_legacy_check_rxon_cmd(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx);
+int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx);
+int iwl_legacy_set_rxon_channel(struct iwl_priv *priv,
+ struct ieee80211_channel *ch,
+ struct iwl_rxon_context *ctx);
+void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ enum ieee80211_band band,
+ struct ieee80211_vif *vif);
+u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
+ enum ieee80211_band band);
+void iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
+ struct iwl_ht_config *ht_conf);
+bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_sta_ht_cap *ht_cap);
+void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx);
+void iwl_legacy_set_rate(struct iwl_priv *priv);
+int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
+ struct ieee80211_hdr *hdr,
+ u32 decrypt_res,
+ struct ieee80211_rx_status *stats);
+void iwl_legacy_irq_handle_error(struct iwl_priv *priv);
+int iwl_legacy_mac_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+int iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum nl80211_iftype newtype, bool newp2p);
+int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv);
+void iwl_legacy_txq_mem(struct iwl_priv *priv);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv);
+void iwl_legacy_free_traffic_mem(struct iwl_priv *priv);
+void iwl_legacy_reset_traffic_log(struct iwl_priv *priv);
+void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
+ u16 length, struct ieee80211_hdr *header);
+void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
+ u16 length, struct ieee80211_hdr *header);
+const char *iwl_legacy_get_mgmt_string(int cmd);
+const char *iwl_legacy_get_ctrl_string(int cmd);
+void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv);
+void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc,
+ u16 len);
+#else
+static inline int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
+{
+ return 0;
+}
+static inline void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
+{
+}
+static inline void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
+{
+}
+static inline void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
+ u16 length, struct ieee80211_hdr *header)
+{
+}
+static inline void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
+ u16 length, struct ieee80211_hdr *header)
+{
+}
+static inline void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx,
+ __le16 fc, u16 len)
+{
+}
+#endif
+/*****************************************************
+ * RX handlers.
+ * **************************************************/
+void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+
+/*****************************************************
+* RX
+******************************************************/
+void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv);
+void iwl_legacy_cmd_queue_free(struct iwl_priv *priv);
+int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv);
+void iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
+ struct iwl_rx_queue *q);
+int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q);
+void iwl_legacy_tx_cmd_complete(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+/* Handlers */
+void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+void iwl_legacy_recover_from_statistics(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt);
+void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success);
+void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
+
+/* TX helpers */
+
+/*****************************************************
+* TX
+******************************************************/
+void iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq);
+int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
+ int slots_num, u32 txq_id);
+void iwl_legacy_tx_queue_reset(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ int slots_num, u32 txq_id);
+void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id);
+void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id);
+void iwl_legacy_setup_watchdog(struct iwl_priv *priv);
+/*****************************************************
+ * TX power
+ ****************************************************/
+int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
+
+/*******************************************************************************
+ * Rate
+ ******************************************************************************/
+
+u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx);
+
+/*******************************************************************************
+ * Scanning
+ ******************************************************************************/
+void iwl_legacy_init_scan_params(struct iwl_priv *priv);
+int iwl_legacy_scan_cancel(struct iwl_priv *priv);
+int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
+void iwl_legacy_force_scan_end(struct iwl_priv *priv);
+int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req);
+void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv);
+int iwl_legacy_force_reset(struct iwl_priv *priv, int mode, bool external);
+u16 iwl_legacy_fill_probe_req(struct iwl_priv *priv,
+ struct ieee80211_mgmt *frame,
+ const u8 *ta, const u8 *ie, int ie_len, int left);
+void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv);
+u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
+ enum ieee80211_band band,
+ u8 n_probes);
+u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
+ enum ieee80211_band band,
+ struct ieee80211_vif *vif);
+void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv);
+void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv);
+
+/* For faster active scanning, scan will move to the next channel if fewer than
+ * PLCP_QUIET_THRESH packets are heard on this channel within
+ * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
+ * time if it's a quiet channel (nothing responded to our probe, and there's
+ * no other traffic).
+ * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
+#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
+#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
+
+#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7)
+
+/*****************************************************
+ * S e n d i n g H o s t C o m m a n d s *
+ *****************************************************/
+
+const char *iwl_legacy_get_cmd_string(u8 cmd);
+int __must_check iwl_legacy_send_cmd_sync(struct iwl_priv *priv,
+ struct iwl_host_cmd *cmd);
+int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
+int __must_check iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id,
+ u16 len, const void *data);
+int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len,
+ const void *data,
+ void (*callback)(struct iwl_priv *priv,
+ struct iwl_device_cmd *cmd,
+ struct iwl_rx_packet *pkt));
+
+int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
+
+
+/*****************************************************
+ * PCI *
+ *****************************************************/
+
+static inline u16 iwl_legacy_pcie_link_ctl(struct iwl_priv *priv)
+{
+ int pos;
+ u16 pci_lnk_ctl;
+ pos = pci_find_capability(priv->pci_dev, PCI_CAP_ID_EXP);
+ pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
+ return pci_lnk_ctl;
+}
+
+void iwl_legacy_bg_watchdog(unsigned long data);
+u32 iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
+ u32 usec, u32 beacon_interval);
+__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
+ u32 addon, u32 beacon_interval);
+
+#ifdef CONFIG_PM
+int iwl_legacy_pci_suspend(struct device *device);
+int iwl_legacy_pci_resume(struct device *device);
+extern const struct dev_pm_ops iwl_legacy_pm_ops;
+
+#define IWL_LEGACY_PM_OPS (&iwl_legacy_pm_ops)
+
+#else /* !CONFIG_PM */
+
+#define IWL_LEGACY_PM_OPS NULL
+
+#endif /* !CONFIG_PM */
+
+/*****************************************************
+* Error Handling Debugging
+******************************************************/
+void iwl4965_dump_nic_error_log(struct iwl_priv *priv);
+int iwl4965_dump_nic_event_log(struct iwl_priv *priv,
+ bool full_log, char **buf, bool display);
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx);
+#else
+static inline void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+}
+#endif
+
+void iwl_legacy_clear_isr_stats(struct iwl_priv *priv);
+
+/*****************************************************
+* GEOS
+******************************************************/
+int iwl_legacy_init_geos(struct iwl_priv *priv);
+void iwl_legacy_free_geos(struct iwl_priv *priv);
+
+/*************** DRIVER STATUS FUNCTIONS *****/
+
+#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
+/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
+#define STATUS_INT_ENABLED 2
+#define STATUS_RF_KILL_HW 3
+#define STATUS_CT_KILL 4
+#define STATUS_INIT 5
+#define STATUS_ALIVE 6
+#define STATUS_READY 7
+#define STATUS_TEMPERATURE 8
+#define STATUS_GEO_CONFIGURED 9
+#define STATUS_EXIT_PENDING 10
+#define STATUS_STATISTICS 12
+#define STATUS_SCANNING 13
+#define STATUS_SCAN_ABORTING 14
+#define STATUS_SCAN_HW 15
+#define STATUS_POWER_PMI 16
+#define STATUS_FW_ERROR 17
+
+
+static inline int iwl_legacy_is_ready(struct iwl_priv *priv)
+{
+ /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
+ * set but EXIT_PENDING is not */
+ return test_bit(STATUS_READY, &priv->status) &&
+ test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
+ !test_bit(STATUS_EXIT_PENDING, &priv->status);
+}
+
+static inline int iwl_legacy_is_alive(struct iwl_priv *priv)
+{
+ return test_bit(STATUS_ALIVE, &priv->status);
+}
+
+static inline int iwl_legacy_is_init(struct iwl_priv *priv)
+{
+ return test_bit(STATUS_INIT, &priv->status);
+}
+
+static inline int iwl_legacy_is_rfkill_hw(struct iwl_priv *priv)
+{
+ return test_bit(STATUS_RF_KILL_HW, &priv->status);
+}
+
+static inline int iwl_legacy_is_rfkill(struct iwl_priv *priv)
+{
+ return iwl_legacy_is_rfkill_hw(priv);
+}
+
+static inline int iwl_legacy_is_ctkill(struct iwl_priv *priv)
+{
+ return test_bit(STATUS_CT_KILL, &priv->status);
+}
+
+static inline int iwl_legacy_is_ready_rf(struct iwl_priv *priv)
+{
+
+ if (iwl_legacy_is_rfkill(priv))
+ return 0;
+
+ return iwl_legacy_is_ready(priv);
+}
+
+extern void iwl_legacy_send_bt_config(struct iwl_priv *priv);
+extern int iwl_legacy_send_statistics_request(struct iwl_priv *priv,
+ u8 flags, bool clear);
+void iwl_legacy_apm_stop(struct iwl_priv *priv);
+int iwl_legacy_apm_init(struct iwl_priv *priv);
+
+int iwl_legacy_send_rxon_timing(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx);
+static inline int iwl_legacy_send_rxon_assoc(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+ return priv->cfg->ops->hcmd->rxon_assoc(priv, ctx);
+}
+static inline int iwl_legacy_commit_rxon(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+ return priv->cfg->ops->hcmd->commit_rxon(priv, ctx);
+}
+static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
+ struct iwl_priv *priv, enum ieee80211_band band)
+{
+ return priv->hw->wiphy->bands[band];
+}
+
+/* mac80211 handlers */
+int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed);
+void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw);
+void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changes);
+void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
+ struct ieee80211_tx_info *info,
+ __le16 fc, __le32 *tx_flags);
+
+irqreturn_t iwl_legacy_isr(int irq, void *data);
+
+#endif /* __iwl_legacy_core_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-csr.h b/drivers/net/wireless/iwlegacy/iwl-csr.h
new file mode 100644
index 000000000000..668a9616c269
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-csr.h
@@ -0,0 +1,422 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_legacy_csr_h__
+#define __iwl_legacy_csr_h__
+/*
+ * CSR (control and status registers)
+ *
+ * CSR registers are mapped directly into PCI bus space, and are accessible
+ * whenever platform supplies power to device, even when device is in
+ * low power states due to driver-invoked device resets
+ * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes.
+ *
+ * Use iwl_write32() and iwl_read32() family to access these registers;
+ * these provide simple PCI bus access, without waking up the MAC.
+ * Do not use iwl_legacy_write_direct32() family for these registers;
+ * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ.
+ * The MAC (uCode processor, etc.) does not need to be powered up for accessing
+ * the CSR registers.
+ *
+ * NOTE: Device does need to be awake in order to read this memory
+ * via CSR_EEPROM register
+ */
+#define CSR_BASE (0x000)
+
+#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */
+#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */
+#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */
+#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */
+#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/
+#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */
+#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/
+#define CSR_GP_CNTRL (CSR_BASE+0x024)
+
+/* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */
+#define CSR_INT_PERIODIC_REG (CSR_BASE+0x005)
+
+/*
+ * Hardware revision info
+ * Bit fields:
+ * 31-8: Reserved
+ * 7-4: Type of device: see CSR_HW_REV_TYPE_xxx definitions
+ * 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D
+ * 1-0: "Dash" (-) value, as in A-1, etc.
+ *
+ * NOTE: Revision step affects calculation of CCK txpower for 4965.
+ * NOTE: See also CSR_HW_REV_WA_REG (work-around for bug in 4965).
+ */
+#define CSR_HW_REV (CSR_BASE+0x028)
+
+/*
+ * EEPROM memory reads
+ *
+ * NOTE: Device must be awake, initialized via apm_ops.init(),
+ * in order to read.
+ */
+#define CSR_EEPROM_REG (CSR_BASE+0x02c)
+#define CSR_EEPROM_GP (CSR_BASE+0x030)
+
+#define CSR_GIO_REG (CSR_BASE+0x03C)
+#define CSR_GP_UCODE_REG (CSR_BASE+0x048)
+#define CSR_GP_DRIVER_REG (CSR_BASE+0x050)
+
+/*
+ * UCODE-DRIVER GP (general purpose) mailbox registers.
+ * SET/CLR registers set/clear bit(s) if "1" is written.
+ */
+#define CSR_UCODE_DRV_GP1 (CSR_BASE+0x054)
+#define CSR_UCODE_DRV_GP1_SET (CSR_BASE+0x058)
+#define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c)
+#define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060)
+
+#define CSR_LED_REG (CSR_BASE+0x094)
+#define CSR_DRAM_INT_TBL_REG (CSR_BASE+0x0A0)
+
+/* GIO Chicken Bits (PCI Express bus link power management) */
+#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100)
+
+/* Analog phase-lock-loop configuration */
+#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c)
+
+/*
+ * CSR Hardware Revision Workaround Register. Indicates hardware rev;
+ * "step" determines CCK backoff for txpower calculation. Used for 4965 only.
+ * See also CSR_HW_REV register.
+ * Bit fields:
+ * 3-2: 0 = A, 1 = B, 2 = C, 3 = D step
+ * 1-0: "Dash" (-) value, as in C-1, etc.
+ */
+#define CSR_HW_REV_WA_REG (CSR_BASE+0x22C)
+
+#define CSR_DBG_HPET_MEM_REG (CSR_BASE+0x240)
+#define CSR_DBG_LINK_PWR_MGMT_REG (CSR_BASE+0x250)
+
+/* Bits for CSR_HW_IF_CONFIG_REG */
+#define CSR49_HW_IF_CONFIG_REG_BIT_4965_R (0x00000010)
+#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x00000C00)
+#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100)
+#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200)
+
+#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MB (0x00000100)
+#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MM (0x00000200)
+#define CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC (0x00000400)
+#define CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE (0x00000800)
+#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A (0x00000000)
+#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B (0x00001000)
+
+#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
+#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
+#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
+#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
+#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
+
+#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/
+#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/
+
+/* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
+ * acknowledged (reset) by host writing "1" to flagged bits. */
+#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
+#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */
+#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */
+#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
+#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
+#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
+#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
+#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
+#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */
+#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */
+#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */
+
+#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \
+ CSR_INT_BIT_HW_ERR | \
+ CSR_INT_BIT_FH_TX | \
+ CSR_INT_BIT_SW_ERR | \
+ CSR_INT_BIT_RF_KILL | \
+ CSR_INT_BIT_SW_RX | \
+ CSR_INT_BIT_WAKEUP | \
+ CSR_INT_BIT_ALIVE)
+
+/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
+#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */
+#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */
+#define CSR39_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */
+#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */
+#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */
+#define CSR39_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */
+#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */
+#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */
+
+#define CSR39_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \
+ CSR39_FH_INT_BIT_RX_CHNL2 | \
+ CSR_FH_INT_BIT_RX_CHNL1 | \
+ CSR_FH_INT_BIT_RX_CHNL0)
+
+
+#define CSR39_FH_INT_TX_MASK (CSR39_FH_INT_BIT_TX_CHNL6 | \
+ CSR_FH_INT_BIT_TX_CHNL1 | \
+ CSR_FH_INT_BIT_TX_CHNL0)
+
+#define CSR49_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \
+ CSR_FH_INT_BIT_RX_CHNL1 | \
+ CSR_FH_INT_BIT_RX_CHNL0)
+
+#define CSR49_FH_INT_TX_MASK (CSR_FH_INT_BIT_TX_CHNL1 | \
+ CSR_FH_INT_BIT_TX_CHNL0)
+
+/* GPIO */
+#define CSR_GPIO_IN_BIT_AUX_POWER (0x00000200)
+#define CSR_GPIO_IN_VAL_VAUX_PWR_SRC (0x00000000)
+#define CSR_GPIO_IN_VAL_VMAIN_PWR_SRC (0x00000200)
+
+/* RESET */
+#define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001)
+#define CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002)
+#define CSR_RESET_REG_FLAG_SW_RESET (0x00000080)
+#define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100)
+#define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200)
+#define CSR_RESET_LINK_PWR_MGMT_DISABLED (0x80000000)
+
+/*
+ * GP (general purpose) CONTROL REGISTER
+ * Bit fields:
+ * 27: HW_RF_KILL_SW
+ * Indicates state of (platform's) hardware RF-Kill switch
+ * 26-24: POWER_SAVE_TYPE
+ * Indicates current power-saving mode:
+ * 000 -- No power saving
+ * 001 -- MAC power-down
+ * 010 -- PHY (radio) power-down
+ * 011 -- Error
+ * 9-6: SYS_CONFIG
+ * Indicates current system configuration, reflecting pins on chip
+ * as forced high/low by device circuit board.
+ * 4: GOING_TO_SLEEP
+ * Indicates MAC is entering a power-saving sleep power-down.
+ * Not a good time to access device-internal resources.
+ * 3: MAC_ACCESS_REQ
+ * Host sets this to request and maintain MAC wakeup, to allow host
+ * access to device-internal resources. Host must wait for
+ * MAC_CLOCK_READY (and !GOING_TO_SLEEP) before accessing non-CSR
+ * device registers.
+ * 2: INIT_DONE
+ * Host sets this to put device into fully operational D0 power mode.
+ * Host resets this after SW_RESET to put device into low power mode.
+ * 0: MAC_CLOCK_READY
+ * Indicates MAC (ucode processor, etc.) is powered up and can run.
+ * Internal resources are accessible.
+ * NOTE: This does not indicate that the processor is actually running.
+ * NOTE: This does not indicate that 4965 or 3945 has completed
+ * init or post-power-down restore of internal SRAM memory.
+ * Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that
+ * SRAM is restored and uCode is in normal operation mode.
+ * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
+ * do not need to save/restore it.
+ * NOTE: After device reset, this bit remains "0" until host sets
+ * INIT_DONE
+ */
+#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001)
+#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004)
+#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008)
+#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010)
+
+#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001)
+
+#define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000)
+#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000)
+#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
+
+
+/* EEPROM REG */
+#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
+#define CSR_EEPROM_REG_BIT_CMD (0x00000002)
+#define CSR_EEPROM_REG_MSK_ADDR (0x0000FFFC)
+#define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000)
+
+/* EEPROM GP */
+#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */
+#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180)
+#define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002)
+#define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004)
+
+/* GP REG */
+#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */
+#define CSR_GP_REG_NO_POWER_SAVE (0x00000000)
+#define CSR_GP_REG_MAC_POWER_SAVE (0x01000000)
+#define CSR_GP_REG_PHY_POWER_SAVE (0x02000000)
+#define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000)
+
+
+/* CSR GIO */
+#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002)
+
+/*
+ * UCODE-DRIVER GP (general purpose) mailbox register 1
+ * Host driver and uCode write and/or read this register to communicate with
+ * each other.
+ * Bit fields:
+ * 4: UCODE_DISABLE
+ * Host sets this to request permanent halt of uCode, same as
+ * sending CARD_STATE command with "halt" bit set.
+ * 3: CT_KILL_EXIT
+ * Host sets this to request exit from CT_KILL state, i.e. host thinks
+ * device temperature is low enough to continue normal operation.
+ * 2: CMD_BLOCKED
+ * Host sets this during RF KILL power-down sequence (HW, SW, CT KILL)
+ * to release uCode to clear all Tx and command queues, enter
+ * unassociated mode, and power down.
+ * NOTE: Some devices also use HBUS_TARG_MBX_C register for this bit.
+ * 1: SW_BIT_RFKILL
+ * Host sets this when issuing CARD_STATE command to request
+ * device sleep.
+ * 0: MAC_SLEEP
+ * uCode sets this when preparing a power-saving power-down.
+ * uCode resets this when power-up is complete and SRAM is sane.
+ * NOTE: 3945/4965 saves internal SRAM data to host when powering down,
+ * and must restore this data after powering back up.
+ * MAC_SLEEP is the best indication that restore is complete.
+ * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
+ * do not need to save/restore it.
+ */
+#define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001)
+#define CSR_UCODE_SW_BIT_RFKILL (0x00000002)
+#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004)
+#define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008)
+
+/* GIO Chicken Bits (PCI Express bus link power management) */
+#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000)
+#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000)
+
+/* LED */
+#define CSR_LED_BSM_CTRL_MSK (0xFFFFFFDF)
+#define CSR_LED_REG_TRUN_ON (0x78)
+#define CSR_LED_REG_TRUN_OFF (0x38)
+
+/* ANA_PLL */
+#define CSR39_ANA_PLL_CFG_VAL (0x01000000)
+
+/* HPET MEM debug */
+#define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000)
+
+/* DRAM INT TABLE */
+#define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
+#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
+
+/*
+ * HBUS (Host-side Bus)
+ *
+ * HBUS registers are mapped directly into PCI bus space, but are used
+ * to indirectly access device's internal memory or registers that
+ * may be powered-down.
+ *
+ * Use iwl_legacy_write_direct32()/iwl_legacy_read_direct32() family
+ * for these registers;
+ * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
+ * to make sure the MAC (uCode processor, etc.) is powered up for accessing
+ * internal resources.
+ *
+ * Do not use iwl_write32()/iwl_read32() family to access these registers;
+ * these provide only simple PCI bus access, without waking up the MAC.
+ */
+#define HBUS_BASE (0x400)
+
+/*
+ * Registers for accessing device's internal SRAM memory (e.g. SCD SRAM
+ * structures, error log, event log, verifying uCode load).
+ * First write to address register, then read from or write to data register
+ * to complete the job. Once the address register is set up, accesses to
+ * data registers auto-increment the address by one dword.
+ * Bit usage for address registers (read or write):
+ * 0-31: memory address within device
+ */
+#define HBUS_TARG_MEM_RADDR (HBUS_BASE+0x00c)
+#define HBUS_TARG_MEM_WADDR (HBUS_BASE+0x010)
+#define HBUS_TARG_MEM_WDAT (HBUS_BASE+0x018)
+#define HBUS_TARG_MEM_RDAT (HBUS_BASE+0x01c)
+
+/* Mailbox C, used as workaround alternative to CSR_UCODE_DRV_GP1 mailbox */
+#define HBUS_TARG_MBX_C (HBUS_BASE+0x030)
+#define HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED (0x00000004)
+
+/*
+ * Registers for accessing device's internal peripheral registers
+ * (e.g. SCD, BSM, etc.). First write to address register,
+ * then read from or write to data register to complete the job.
+ * Bit usage for address registers (read or write):
+ * 0-15: register address (offset) within device
+ * 24-25: (# bytes - 1) to read or write (e.g. 3 for dword)
+ */
+#define HBUS_TARG_PRPH_WADDR (HBUS_BASE+0x044)
+#define HBUS_TARG_PRPH_RADDR (HBUS_BASE+0x048)
+#define HBUS_TARG_PRPH_WDAT (HBUS_BASE+0x04c)
+#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050)
+
+/*
+ * Per-Tx-queue write pointer (index, really!)
+ * Indicates index to next TFD that driver will fill (1 past latest filled).
+ * Bit usage:
+ * 0-7: queue write index
+ * 11-8: queue selector
+ */
+#define HBUS_TARG_WRPTR (HBUS_BASE+0x060)
+
+#endif /* !__iwl_legacy_csr_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-debug.h b/drivers/net/wireless/iwlegacy/iwl-debug.h
new file mode 100644
index 000000000000..ae13112701bf
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-debug.h
@@ -0,0 +1,198 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_debug_h__
+#define __iwl_legacy_debug_h__
+
+struct iwl_priv;
+extern u32 iwlegacy_debug_level;
+
+#define IWL_ERR(p, f, a...) dev_err(&((p)->pci_dev->dev), f, ## a)
+#define IWL_WARN(p, f, a...) dev_warn(&((p)->pci_dev->dev), f, ## a)
+#define IWL_INFO(p, f, a...) dev_info(&((p)->pci_dev->dev), f, ## a)
+#define IWL_CRIT(p, f, a...) dev_crit(&((p)->pci_dev->dev), f, ## a)
+
+#define iwl_print_hex_error(priv, p, len) \
+do { \
+ print_hex_dump(KERN_ERR, "iwl data: ", \
+ DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
+} while (0)
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+#define IWL_DEBUG(__priv, level, fmt, args...) \
+do { \
+ if (iwl_legacy_get_debug_level(__priv) & (level)) \
+ dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
+ "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
+ __func__ , ## args); \
+} while (0)
+
+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) \
+do { \
+ if ((iwl_legacy_get_debug_level(__priv) & (level)) && net_ratelimit()) \
+ dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
+ "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
+ __func__ , ## args); \
+} while (0)
+
+#define iwl_print_hex_dump(priv, level, p, len) \
+do { \
+ if (iwl_legacy_get_debug_level(priv) & level) \
+ print_hex_dump(KERN_DEBUG, "iwl data: ", \
+ DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
+} while (0)
+
+#else
+#define IWL_DEBUG(__priv, level, fmt, args...)
+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
+static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
+ const void *p, u32 len)
+{}
+#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name);
+void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv);
+#else
+static inline int
+iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
+{
+ return 0;
+}
+static inline void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
+{
+}
+#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
+
+/*
+ * To use the debug system:
+ *
+ * If you are defining a new debug classification, simply add it to the #define
+ * list here in the form of
+ *
+ * #define IWL_DL_xxxx VALUE
+ *
+ * where xxxx should be the name of the classification (for example, WEP).
+ *
+ * You then need to either add a IWL_xxxx_DEBUG() macro definition for your
+ * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want
+ * to send output to that classification.
+ *
+ * The active debug levels can be accessed via files
+ *
+ * /sys/module/iwl4965/parameters/debug{50}
+ * /sys/module/iwl3945/parameters/debug
+ * /sys/class/net/wlan0/device/debug_level
+ *
+ * when CONFIG_IWLWIFI_LEGACY_DEBUG=y.
+ */
+
+/* 0x0000000F - 0x00000001 */
+#define IWL_DL_INFO (1 << 0)
+#define IWL_DL_MAC80211 (1 << 1)
+#define IWL_DL_HCMD (1 << 2)
+#define IWL_DL_STATE (1 << 3)
+/* 0x000000F0 - 0x00000010 */
+#define IWL_DL_MACDUMP (1 << 4)
+#define IWL_DL_HCMD_DUMP (1 << 5)
+#define IWL_DL_EEPROM (1 << 6)
+#define IWL_DL_RADIO (1 << 7)
+/* 0x00000F00 - 0x00000100 */
+#define IWL_DL_POWER (1 << 8)
+#define IWL_DL_TEMP (1 << 9)
+#define IWL_DL_NOTIF (1 << 10)
+#define IWL_DL_SCAN (1 << 11)
+/* 0x0000F000 - 0x00001000 */
+#define IWL_DL_ASSOC (1 << 12)
+#define IWL_DL_DROP (1 << 13)
+#define IWL_DL_TXPOWER (1 << 14)
+#define IWL_DL_AP (1 << 15)
+/* 0x000F0000 - 0x00010000 */
+#define IWL_DL_FW (1 << 16)
+#define IWL_DL_RF_KILL (1 << 17)
+#define IWL_DL_FW_ERRORS (1 << 18)
+#define IWL_DL_LED (1 << 19)
+/* 0x00F00000 - 0x00100000 */
+#define IWL_DL_RATE (1 << 20)
+#define IWL_DL_CALIB (1 << 21)
+#define IWL_DL_WEP (1 << 22)
+#define IWL_DL_TX (1 << 23)
+/* 0x0F000000 - 0x01000000 */
+#define IWL_DL_RX (1 << 24)
+#define IWL_DL_ISR (1 << 25)
+#define IWL_DL_HT (1 << 26)
+#define IWL_DL_IO (1 << 27)
+/* 0xF0000000 - 0x10000000 */
+#define IWL_DL_11H (1 << 28)
+#define IWL_DL_STATS (1 << 29)
+#define IWL_DL_TX_REPLY (1 << 30)
+#define IWL_DL_QOS (1 << 31)
+
+#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
+#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
+#define IWL_DEBUG_MACDUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a)
+#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
+#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
+#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a)
+#define IWL_DEBUG_TX(p, f, a...) IWL_DEBUG(p, IWL_DL_TX, f, ## a)
+#define IWL_DEBUG_ISR(p, f, a...) IWL_DEBUG(p, IWL_DL_ISR, f, ## a)
+#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a)
+#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
+#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
+#define IWL_DEBUG_HC_DUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a)
+#define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a)
+#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
+#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
+#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
+#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a)
+#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \
+ IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a)
+#define IWL_DEBUG_AP(p, f, a...) IWL_DEBUG(p, IWL_DL_AP, f, ## a)
+#define IWL_DEBUG_TXPOWER(p, f, a...) IWL_DEBUG(p, IWL_DL_TXPOWER, f, ## a)
+#define IWL_DEBUG_IO(p, f, a...) IWL_DEBUG(p, IWL_DL_IO, f, ## a)
+#define IWL_DEBUG_RATE(p, f, a...) IWL_DEBUG(p, IWL_DL_RATE, f, ## a)
+#define IWL_DEBUG_RATE_LIMIT(p, f, a...) \
+ IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a)
+#define IWL_DEBUG_NOTIF(p, f, a...) IWL_DEBUG(p, IWL_DL_NOTIF, f, ## a)
+#define IWL_DEBUG_ASSOC(p, f, a...) \
+ IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
+#define IWL_DEBUG_ASSOC_LIMIT(p, f, a...) \
+ IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
+#define IWL_DEBUG_HT(p, f, a...) IWL_DEBUG(p, IWL_DL_HT, f, ## a)
+#define IWL_DEBUG_STATS(p, f, a...) IWL_DEBUG(p, IWL_DL_STATS, f, ## a)
+#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \
+ IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a)
+#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
+#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \
+ IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a)
+#define IWL_DEBUG_QOS(p, f, a...) IWL_DEBUG(p, IWL_DL_QOS, f, ## a)
+#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
+#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
+#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
+
+#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-debugfs.c
new file mode 100644
index 000000000000..2d32438b4cb8
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-debugfs.c
@@ -0,0 +1,1467 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+#include <linux/ieee80211.h>
+#include <net/mac80211.h>
+
+
+#include "iwl-dev.h"
+#include "iwl-debug.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+
+/* create and remove of files */
+#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
+ if (!debugfs_create_file(#name, mode, parent, priv, \
+ &iwl_legacy_dbgfs_##name##_ops)) \
+ goto err; \
+} while (0)
+
+#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
+ struct dentry *__tmp; \
+ __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
+ parent, ptr); \
+ if (IS_ERR(__tmp) || !__tmp) \
+ goto err; \
+} while (0)
+
+#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
+ struct dentry *__tmp; \
+ __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \
+ parent, ptr); \
+ if (IS_ERR(__tmp) || !__tmp) \
+ goto err; \
+} while (0)
+
+/* file operation */
+#define DEBUGFS_READ_FUNC(name) \
+static ssize_t iwl_legacy_dbgfs_##name##_read(struct file *file, \
+ char __user *user_buf, \
+ size_t count, loff_t *ppos);
+
+#define DEBUGFS_WRITE_FUNC(name) \
+static ssize_t iwl_legacy_dbgfs_##name##_write(struct file *file, \
+ const char __user *user_buf, \
+ size_t count, loff_t *ppos);
+
+
+static int
+iwl_legacy_dbgfs_open_file_generic(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+#define DEBUGFS_READ_FILE_OPS(name) \
+ DEBUGFS_READ_FUNC(name); \
+static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
+ .read = iwl_legacy_dbgfs_##name##_read, \
+ .open = iwl_legacy_dbgfs_open_file_generic, \
+ .llseek = generic_file_llseek, \
+};
+
+#define DEBUGFS_WRITE_FILE_OPS(name) \
+ DEBUGFS_WRITE_FUNC(name); \
+static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
+ .write = iwl_legacy_dbgfs_##name##_write, \
+ .open = iwl_legacy_dbgfs_open_file_generic, \
+ .llseek = generic_file_llseek, \
+};
+
+#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
+ DEBUGFS_READ_FUNC(name); \
+ DEBUGFS_WRITE_FUNC(name); \
+static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
+ .write = iwl_legacy_dbgfs_##name##_write, \
+ .read = iwl_legacy_dbgfs_##name##_read, \
+ .open = iwl_legacy_dbgfs_open_file_generic, \
+ .llseek = generic_file_llseek, \
+};
+
+static ssize_t iwl_legacy_dbgfs_tx_statistics_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ char *buf;
+ int pos = 0;
+
+ int cnt;
+ ssize_t ret;
+ const size_t bufsz = 100 +
+ sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
+ for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\t%25s\t\t: %u\n",
+ iwl_legacy_get_mgmt_string(cnt),
+ priv->tx_stats.mgmt[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
+ for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\t%25s\t\t: %u\n",
+ iwl_legacy_get_ctrl_string(cnt),
+ priv->tx_stats.ctrl[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
+ pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
+ priv->tx_stats.data_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
+ priv->tx_stats.data_bytes);
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+iwl_legacy_dbgfs_clear_traffic_statistics_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ u32 clear_flag;
+ char buf[8];
+ int buf_size;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%x", &clear_flag) != 1)
+ return -EFAULT;
+ iwl_legacy_clear_traffic_stats(priv);
+
+ return count;
+}
+
+static ssize_t iwl_legacy_dbgfs_rx_statistics_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ char *buf;
+ int pos = 0;
+ int cnt;
+ ssize_t ret;
+ const size_t bufsz = 100 +
+ sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
+ for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\t%25s\t\t: %u\n",
+ iwl_legacy_get_mgmt_string(cnt),
+ priv->rx_stats.mgmt[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
+ for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\t%25s\t\t: %u\n",
+ iwl_legacy_get_ctrl_string(cnt),
+ priv->rx_stats.ctrl[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
+ pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
+ priv->rx_stats.data_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
+ priv->rx_stats.data_bytes);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+#define BYTE1_MASK 0x000000ff;
+#define BYTE2_MASK 0x0000ffff;
+#define BYTE3_MASK 0x00ffffff;
+static ssize_t iwl_legacy_dbgfs_sram_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ u32 val;
+ char *buf;
+ ssize_t ret;
+ int i;
+ int pos = 0;
+ struct iwl_priv *priv = file->private_data;
+ size_t bufsz;
+
+ /* default is to dump the entire data segment */
+ if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
+ priv->dbgfs_sram_offset = 0x800000;
+ if (priv->ucode_type == UCODE_INIT)
+ priv->dbgfs_sram_len = priv->ucode_init_data.len;
+ else
+ priv->dbgfs_sram_len = priv->ucode_data.len;
+ }
+ bufsz = 30 + priv->dbgfs_sram_len * sizeof(char) * 10;
+ buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
+ priv->dbgfs_sram_len);
+ pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
+ priv->dbgfs_sram_offset);
+ for (i = priv->dbgfs_sram_len; i > 0; i -= 4) {
+ val = iwl_legacy_read_targ_mem(priv, priv->dbgfs_sram_offset + \
+ priv->dbgfs_sram_len - i);
+ if (i < 4) {
+ switch (i) {
+ case 1:
+ val &= BYTE1_MASK;
+ break;
+ case 2:
+ val &= BYTE2_MASK;
+ break;
+ case 3:
+ val &= BYTE3_MASK;
+ break;
+ }
+ }
+ if (!(i % 16))
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_sram_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[64];
+ int buf_size;
+ u32 offset, len;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
+ priv->dbgfs_sram_offset = offset;
+ priv->dbgfs_sram_len = len;
+ } else {
+ priv->dbgfs_sram_offset = 0;
+ priv->dbgfs_sram_len = 0;
+ }
+
+ return count;
+}
+
+static ssize_t
+iwl_legacy_dbgfs_stations_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ struct iwl_station_entry *station;
+ int max_sta = priv->hw_params.max_stations;
+ char *buf;
+ int i, j, pos = 0;
+ ssize_t ret;
+ /* Add 30 for initial string */
+ const size_t bufsz = 30 + sizeof(char) * 500 * (priv->num_stations);
+
+ buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
+ priv->num_stations);
+
+ for (i = 0; i < max_sta; i++) {
+ station = &priv->stations[i];
+ if (!station->used)
+ continue;
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "station %d - addr: %pM, flags: %#x\n",
+ i, station->sta.sta.addr,
+ station->sta.station_flags_msk);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "TID\tseq_num\ttxq_id\tframes\ttfds\t");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "start_idx\tbitmap\t\t\trate_n_flags\n");
+
+ for (j = 0; j < MAX_TID_COUNT; j++) {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
+ j, station->tid[j].seq_number,
+ station->tid[j].agg.txq_id,
+ station->tid[j].agg.frame_count,
+ station->tid[j].tfds_in_queue,
+ station->tid[j].agg.start_idx,
+ station->tid[j].agg.bitmap,
+ station->tid[j].agg.rate_n_flags);
+
+ if (station->tid[j].agg.wait_for_ba)
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " - waitforba");
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ }
+
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_nvm_read(struct file *file,
+ char __user *user_buf,
+ size_t count,
+ loff_t *ppos)
+{
+ ssize_t ret;
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0, ofs = 0, buf_size = 0;
+ const u8 *ptr;
+ char *buf;
+ u16 eeprom_ver;
+ size_t eeprom_len = priv->cfg->base_params->eeprom_size;
+ buf_size = 4 * eeprom_len + 256;
+
+ if (eeprom_len % 16) {
+ IWL_ERR(priv, "NVM size is not multiple of 16.\n");
+ return -ENODATA;
+ }
+
+ ptr = priv->eeprom;
+ if (!ptr) {
+ IWL_ERR(priv, "Invalid EEPROM memory\n");
+ return -ENOMEM;
+ }
+
+ /* 4 characters for byte 0xYY */
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+ eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
+ pos += scnprintf(buf + pos, buf_size - pos, "EEPROM "
+ "version: 0x%x\n", eeprom_ver);
+ for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
+ pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
+ hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
+ buf_size - pos, 0);
+ pos += strlen(buf + pos);
+ if (buf_size - pos > 0)
+ buf[pos++] = '\n';
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_log_event_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char *buf;
+ int pos = 0;
+ ssize_t ret = -ENOMEM;
+
+ ret = pos = priv->cfg->ops->lib->dump_nic_event_log(
+ priv, true, &buf, true);
+ if (buf) {
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ }
+ return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_log_event_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ u32 event_log_flag;
+ char buf[8];
+ int buf_size;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &event_log_flag) != 1)
+ return -EFAULT;
+ if (event_log_flag == 1)
+ priv->cfg->ops->lib->dump_nic_event_log(priv, true,
+ NULL, false);
+
+ return count;
+}
+
+
+
+static ssize_t
+iwl_legacy_dbgfs_channels_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ struct ieee80211_channel *channels = NULL;
+ const struct ieee80211_supported_band *supp_band = NULL;
+ int pos = 0, i, bufsz = PAGE_SIZE;
+ char *buf;
+ ssize_t ret;
+
+ if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ);
+ if (supp_band) {
+ channels = supp_band->channels;
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "Displaying %d channels in 2.4GHz band 802.11bg):\n",
+ supp_band->n_channels);
+
+ for (i = 0; i < supp_band->n_channels; i++)
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "%d: %ddBm: BSS%s%s, %s.\n",
+ channels[i].hw_value,
+ channels[i].max_power,
+ channels[i].flags & IEEE80211_CHAN_RADAR ?
+ " (IEEE 802.11h required)" : "",
+ ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
+ || (channels[i].flags &
+ IEEE80211_CHAN_RADAR)) ? "" :
+ ", IBSS",
+ channels[i].flags &
+ IEEE80211_CHAN_PASSIVE_SCAN ?
+ "passive only" : "active/passive");
+ }
+ supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ);
+ if (supp_band) {
+ channels = supp_band->channels;
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "Displaying %d channels in 5.2GHz band (802.11a)\n",
+ supp_band->n_channels);
+
+ for (i = 0; i < supp_band->n_channels; i++)
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "%d: %ddBm: BSS%s%s, %s.\n",
+ channels[i].hw_value,
+ channels[i].max_power,
+ channels[i].flags & IEEE80211_CHAN_RADAR ?
+ " (IEEE 802.11h required)" : "",
+ ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
+ || (channels[i].flags &
+ IEEE80211_CHAN_RADAR)) ? "" :
+ ", IBSS",
+ channels[i].flags &
+ IEEE80211_CHAN_PASSIVE_SCAN ?
+ "passive only" : "active/passive");
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_status_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ char buf[512];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
+ test_bit(STATUS_HCMD_ACTIVE, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
+ test_bit(STATUS_INT_ENABLED, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
+ test_bit(STATUS_RF_KILL_HW, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
+ test_bit(STATUS_CT_KILL, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n",
+ test_bit(STATUS_INIT, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
+ test_bit(STATUS_ALIVE, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
+ test_bit(STATUS_READY, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n",
+ test_bit(STATUS_TEMPERATURE, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n",
+ test_bit(STATUS_GEO_CONFIGURED, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
+ test_bit(STATUS_EXIT_PENDING, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
+ test_bit(STATUS_STATISTICS, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n",
+ test_bit(STATUS_SCANNING, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n",
+ test_bit(STATUS_SCAN_ABORTING, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
+ test_bit(STATUS_SCAN_HW, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
+ test_bit(STATUS_POWER_PMI, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
+ test_bit(STATUS_FW_ERROR, &priv->status));
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_legacy_dbgfs_interrupt_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ int cnt = 0;
+ char *buf;
+ int bufsz = 24 * 64; /* 24 items * 64 char per item */
+ ssize_t ret;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "Interrupt Statistics Report:\n");
+
+ pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
+ priv->isr_stats.hw);
+ pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
+ priv->isr_stats.sw);
+ if (priv->isr_stats.sw || priv->isr_stats.hw) {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tLast Restarting Code: 0x%X\n",
+ priv->isr_stats.err_code);
+ }
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
+ priv->isr_stats.sch);
+ pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
+ priv->isr_stats.alive);
+#endif
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "HW RF KILL switch toggled:\t %u\n",
+ priv->isr_stats.rfkill);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
+ priv->isr_stats.ctkill);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
+ priv->isr_stats.wakeup);
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "Rx command responses:\t\t %u\n",
+ priv->isr_stats.rx);
+ for (cnt = 0; cnt < REPLY_MAX; cnt++) {
+ if (priv->isr_stats.rx_handlers[cnt] > 0)
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tRx handler[%36s]:\t\t %u\n",
+ iwl_legacy_get_cmd_string(cnt),
+ priv->isr_stats.rx_handlers[cnt]);
+ }
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
+ priv->isr_stats.tx);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
+ priv->isr_stats.unhandled);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_interrupt_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ u32 reset_flag;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%x", &reset_flag) != 1)
+ return -EFAULT;
+ if (reset_flag == 0)
+ iwl_legacy_clear_isr_stats(priv);
+
+ return count;
+}
+
+static ssize_t
+iwl_legacy_dbgfs_qos_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ struct iwl_rxon_context *ctx;
+ int pos = 0, i;
+ char buf[256 * NUM_IWL_RXON_CTX];
+ const size_t bufsz = sizeof(buf);
+
+ for_each_context(priv, ctx) {
+ pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
+ ctx->ctxid);
+ for (i = 0; i < AC_NUM; i++) {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tcw_min\tcw_max\taifsn\ttxop\n");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "AC[%d]\t%u\t%u\t%u\t%u\n", i,
+ ctx->qos_data.def_qos_parm.ac[i].cw_min,
+ ctx->qos_data.def_qos_parm.ac[i].cw_max,
+ ctx->qos_data.def_qos_parm.ac[i].aifsn,
+ ctx->qos_data.def_qos_parm.ac[i].edca_txop);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ }
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_legacy_dbgfs_disable_ht40_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int ht40;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &ht40) != 1)
+ return -EFAULT;
+ if (!iwl_legacy_is_any_associated(priv))
+ priv->disable_ht40 = ht40 ? true : false;
+ else {
+ IWL_ERR(priv, "Sta associated with AP - "
+ "Change to 40MHz channel support is not allowed\n");
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static ssize_t iwl_legacy_dbgfs_disable_ht40_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[100];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "11n 40MHz Mode: %s\n",
+ priv->disable_ht40 ? "Disabled" : "Enabled");
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+DEBUGFS_READ_WRITE_FILE_OPS(sram);
+DEBUGFS_READ_WRITE_FILE_OPS(log_event);
+DEBUGFS_READ_FILE_OPS(nvm);
+DEBUGFS_READ_FILE_OPS(stations);
+DEBUGFS_READ_FILE_OPS(channels);
+DEBUGFS_READ_FILE_OPS(status);
+DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
+DEBUGFS_READ_FILE_OPS(qos);
+DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
+
+static ssize_t iwl_legacy_dbgfs_traffic_log_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0, ofs = 0;
+ int cnt = 0, entry;
+ struct iwl_tx_queue *txq;
+ struct iwl_queue *q;
+ struct iwl_rx_queue *rxq = &priv->rxq;
+ char *buf;
+ int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
+ (priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
+ const u8 *ptr;
+ ssize_t ret;
+
+ if (!priv->txq) {
+ IWL_ERR(priv, "txq not ready\n");
+ return -EAGAIN;
+ }
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate buffer\n");
+ return -ENOMEM;
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
+ for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
+ txq = &priv->txq[cnt];
+ q = &txq->q;
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "q[%d]: read_ptr: %u, write_ptr: %u\n",
+ cnt, q->read_ptr, q->write_ptr);
+ }
+ if (priv->tx_traffic && (iwlegacy_debug_level & IWL_DL_TX)) {
+ ptr = priv->tx_traffic;
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
+ for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
+ for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
+ entry++, ofs += 16) {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "0x%.4x ", ofs);
+ hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
+ buf + pos, bufsz - pos, 0);
+ pos += strlen(buf + pos);
+ if (bufsz - pos > 0)
+ buf[pos++] = '\n';
+ }
+ }
+ }
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "read: %u, write: %u\n",
+ rxq->read, rxq->write);
+
+ if (priv->rx_traffic && (iwlegacy_debug_level & IWL_DL_RX)) {
+ ptr = priv->rx_traffic;
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
+ for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
+ for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
+ entry++, ofs += 16) {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "0x%.4x ", ofs);
+ hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
+ buf + pos, bufsz - pos, 0);
+ pos += strlen(buf + pos);
+ if (bufsz - pos > 0)
+ buf[pos++] = '\n';
+ }
+ }
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_traffic_log_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int traffic_log;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &traffic_log) != 1)
+ return -EFAULT;
+ if (traffic_log == 0)
+ iwl_legacy_reset_traffic_log(priv);
+
+ return count;
+}
+
+static ssize_t iwl_legacy_dbgfs_tx_queue_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ struct iwl_tx_queue *txq;
+ struct iwl_queue *q;
+ char *buf;
+ int pos = 0;
+ int cnt;
+ int ret;
+ const size_t bufsz = sizeof(char) * 64 *
+ priv->cfg->base_params->num_of_queues;
+
+ if (!priv->txq) {
+ IWL_ERR(priv, "txq not ready\n");
+ return -EAGAIN;
+ }
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
+ txq = &priv->txq[cnt];
+ q = &txq->q;
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "hwq %.2d: read=%u write=%u stop=%d"
+ " swq_id=%#.2x (ac %d/hwq %d)\n",
+ cnt, q->read_ptr, q->write_ptr,
+ !!test_bit(cnt, priv->queue_stopped),
+ txq->swq_id, txq->swq_id & 3,
+ (txq->swq_id >> 2) & 0x1f);
+ if (cnt >= 4)
+ continue;
+ /* for the ACs, display the stop count too */
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " stop-count: %d\n",
+ atomic_read(&priv->queue_stop_count[cnt]));
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_rx_queue_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ struct iwl_rx_queue *rxq = &priv->rxq;
+ char buf[256];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
+ rxq->read);
+ pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
+ rxq->write);
+ pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
+ rxq->free_count);
+ if (rxq->rb_stts) {
+ pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
+ le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
+ } else {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "closed_rb_num: Not Allocated\n");
+ }
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_legacy_dbgfs_ucode_rx_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ return priv->cfg->ops->lib->debugfs_ops.rx_stats_read(file,
+ user_buf, count, ppos);
+}
+
+static ssize_t iwl_legacy_dbgfs_ucode_tx_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ return priv->cfg->ops->lib->debugfs_ops.tx_stats_read(file,
+ user_buf, count, ppos);
+}
+
+static ssize_t iwl_legacy_dbgfs_ucode_general_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ return priv->cfg->ops->lib->debugfs_ops.general_stats_read(file,
+ user_buf, count, ppos);
+}
+
+static ssize_t iwl_legacy_dbgfs_sensitivity_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ int cnt = 0;
+ char *buf;
+ int bufsz = sizeof(struct iwl_sensitivity_data) * 4 + 100;
+ ssize_t ret;
+ struct iwl_sensitivity_data *data;
+
+ data = &priv->sensitivity_data;
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
+ data->auto_corr_ofdm);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "auto_corr_ofdm_mrc:\t\t %u\n",
+ data->auto_corr_ofdm_mrc);
+ pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n",
+ data->auto_corr_ofdm_x1);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "auto_corr_ofdm_mrc_x1:\t\t %u\n",
+ data->auto_corr_ofdm_mrc_x1);
+ pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n",
+ data->auto_corr_cck);
+ pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n",
+ data->auto_corr_cck_mrc);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "last_bad_plcp_cnt_ofdm:\t\t %u\n",
+ data->last_bad_plcp_cnt_ofdm);
+ pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n",
+ data->last_fa_cnt_ofdm);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "last_bad_plcp_cnt_cck:\t\t %u\n",
+ data->last_bad_plcp_cnt_cck);
+ pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n",
+ data->last_fa_cnt_cck);
+ pos += scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n",
+ data->nrg_curr_state);
+ pos += scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n",
+ data->nrg_prev_state);
+ pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t");
+ for (cnt = 0; cnt < 10; cnt++) {
+ pos += scnprintf(buf + pos, bufsz - pos, " %u",
+ data->nrg_value[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t");
+ for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) {
+ pos += scnprintf(buf + pos, bufsz - pos, " %u",
+ data->nrg_silence_rssi[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n",
+ data->nrg_silence_ref);
+ pos += scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n",
+ data->nrg_energy_idx);
+ pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n",
+ data->nrg_silence_idx);
+ pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n",
+ data->nrg_th_cck);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "nrg_auto_corr_silence_diff:\t %u\n",
+ data->nrg_auto_corr_silence_diff);
+ pos += scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n",
+ data->num_in_cck_no_fa);
+ pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n",
+ data->nrg_th_ofdm);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+
+static ssize_t iwl_legacy_dbgfs_chain_noise_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ int cnt = 0;
+ char *buf;
+ int bufsz = sizeof(struct iwl_chain_noise_data) * 4 + 100;
+ ssize_t ret;
+ struct iwl_chain_noise_data *data;
+
+ data = &priv->chain_noise_data;
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
+ data->active_chains);
+ pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n",
+ data->chain_noise_a);
+ pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n",
+ data->chain_noise_b);
+ pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n",
+ data->chain_noise_c);
+ pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n",
+ data->chain_signal_a);
+ pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n",
+ data->chain_signal_b);
+ pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n",
+ data->chain_signal_c);
+ pos += scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n",
+ data->beacon_count);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t");
+ for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
+ pos += scnprintf(buf + pos, bufsz - pos, " %u",
+ data->disconn_array[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t");
+ for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
+ pos += scnprintf(buf + pos, bufsz - pos, " %u",
+ data->delta_gain_code[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ pos += scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n",
+ data->radio_write);
+ pos += scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n",
+ data->state);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_power_save_status_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[60];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+ u32 pwrsave_status;
+
+ pwrsave_status = iwl_read32(priv, CSR_GP_CNTRL) &
+ CSR_GP_REG_POWER_SAVE_STATUS_MSK;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
+ pos += scnprintf(buf + pos, bufsz - pos, "%s\n",
+ (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
+ (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
+ (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
+ "error");
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_legacy_dbgfs_clear_ucode_statistics_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int clear;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &clear) != 1)
+ return -EFAULT;
+
+ /* make request to uCode to retrieve statistics information */
+ mutex_lock(&priv->mutex);
+ iwl_legacy_send_statistics_request(priv, CMD_SYNC, true);
+ mutex_unlock(&priv->mutex);
+
+ return count;
+}
+
+static ssize_t iwl_legacy_dbgfs_ucode_tracing_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char buf[128];
+ const size_t bufsz = sizeof(buf);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "ucode trace timer is %s\n",
+ priv->event_log.ucode_trace ? "On" : "Off");
+ pos += scnprintf(buf + pos, bufsz - pos, "non_wraps_count:\t\t %u\n",
+ priv->event_log.non_wraps_count);
+ pos += scnprintf(buf + pos, bufsz - pos, "wraps_once_count:\t\t %u\n",
+ priv->event_log.wraps_once_count);
+ pos += scnprintf(buf + pos, bufsz - pos, "wraps_more_count:\t\t %u\n",
+ priv->event_log.wraps_more_count);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_legacy_dbgfs_ucode_tracing_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int trace;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &trace) != 1)
+ return -EFAULT;
+
+ if (trace) {
+ priv->event_log.ucode_trace = true;
+ /* schedule the ucode timer to occur in UCODE_TRACE_PERIOD */
+ mod_timer(&priv->ucode_trace,
+ jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
+ } else {
+ priv->event_log.ucode_trace = false;
+ del_timer_sync(&priv->ucode_trace);
+ }
+
+ return count;
+}
+
+static ssize_t iwl_legacy_dbgfs_rxon_flags_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ int len = 0;
+ char buf[20];
+
+ len = sprintf(buf, "0x%04X\n",
+ le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.flags));
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t iwl_legacy_dbgfs_rxon_filter_flags_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ int len = 0;
+ char buf[20];
+
+ len = sprintf(buf, "0x%04X\n",
+ le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags));
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t iwl_legacy_dbgfs_fh_reg_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char *buf;
+ int pos = 0;
+ ssize_t ret = -EFAULT;
+
+ if (priv->cfg->ops->lib->dump_fh) {
+ ret = pos = priv->cfg->ops->lib->dump_fh(priv, &buf, true);
+ if (buf) {
+ ret = simple_read_from_buffer(user_buf,
+ count, ppos, buf, pos);
+ kfree(buf);
+ }
+ }
+
+ return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_missed_beacon_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char buf[12];
+ const size_t bufsz = sizeof(buf);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%d\n",
+ priv->missed_beacon_threshold);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_legacy_dbgfs_missed_beacon_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int missed;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &missed) != 1)
+ return -EINVAL;
+
+ if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN ||
+ missed > IWL_MISSED_BEACON_THRESHOLD_MAX)
+ priv->missed_beacon_threshold =
+ IWL_MISSED_BEACON_THRESHOLD_DEF;
+ else
+ priv->missed_beacon_threshold = missed;
+
+ return count;
+}
+
+static ssize_t iwl_legacy_dbgfs_plcp_delta_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char buf[12];
+ const size_t bufsz = sizeof(buf);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%u\n",
+ priv->cfg->base_params->plcp_delta_threshold);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_legacy_dbgfs_plcp_delta_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int plcp;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &plcp) != 1)
+ return -EINVAL;
+ if ((plcp < IWL_MAX_PLCP_ERR_THRESHOLD_MIN) ||
+ (plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX))
+ priv->cfg->base_params->plcp_delta_threshold =
+ IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE;
+ else
+ priv->cfg->base_params->plcp_delta_threshold = plcp;
+ return count;
+}
+
+static ssize_t iwl_legacy_dbgfs_force_reset_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ int i, pos = 0;
+ char buf[300];
+ const size_t bufsz = sizeof(buf);
+ struct iwl_force_reset *force_reset;
+
+ for (i = 0; i < IWL_MAX_FORCE_RESET; i++) {
+ force_reset = &priv->force_reset[i];
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "Force reset method %d\n", i);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tnumber of reset request: %d\n",
+ force_reset->reset_request_count);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tnumber of reset request success: %d\n",
+ force_reset->reset_success_count);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tnumber of reset request reject: %d\n",
+ force_reset->reset_reject_count);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\treset duration: %lu\n",
+ force_reset->reset_duration);
+ }
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_legacy_dbgfs_force_reset_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int reset, ret;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &reset) != 1)
+ return -EINVAL;
+ switch (reset) {
+ case IWL_RF_RESET:
+ case IWL_FW_RESET:
+ ret = iwl_legacy_force_reset(priv, reset, true);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return ret ? ret : count;
+}
+
+static ssize_t iwl_legacy_dbgfs_wd_timeout_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int timeout;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &timeout) != 1)
+ return -EINVAL;
+ if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT)
+ timeout = IWL_DEF_WD_TIMEOUT;
+
+ priv->cfg->base_params->wd_timeout = timeout;
+ iwl_legacy_setup_watchdog(priv);
+ return count;
+}
+
+DEBUGFS_READ_FILE_OPS(rx_statistics);
+DEBUGFS_READ_FILE_OPS(tx_statistics);
+DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
+DEBUGFS_READ_FILE_OPS(rx_queue);
+DEBUGFS_READ_FILE_OPS(tx_queue);
+DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
+DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
+DEBUGFS_READ_FILE_OPS(ucode_general_stats);
+DEBUGFS_READ_FILE_OPS(sensitivity);
+DEBUGFS_READ_FILE_OPS(chain_noise);
+DEBUGFS_READ_FILE_OPS(power_save_status);
+DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
+DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
+DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
+DEBUGFS_READ_FILE_OPS(fh_reg);
+DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
+DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta);
+DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
+DEBUGFS_READ_FILE_OPS(rxon_flags);
+DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
+DEBUGFS_WRITE_FILE_OPS(wd_timeout);
+
+/*
+ * Create the debugfs files and directories
+ *
+ */
+int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
+{
+ struct dentry *phyd = priv->hw->wiphy->debugfsdir;
+ struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
+
+ dir_drv = debugfs_create_dir(name, phyd);
+ if (!dir_drv)
+ return -ENOMEM;
+
+ priv->debugfs_dir = dir_drv;
+
+ dir_data = debugfs_create_dir("data", dir_drv);
+ if (!dir_data)
+ goto err;
+ dir_rf = debugfs_create_dir("rf", dir_drv);
+ if (!dir_rf)
+ goto err;
+ dir_debug = debugfs_create_dir("debug", dir_drv);
+ if (!dir_debug)
+ goto err;
+
+ DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(log_event, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
+
+ if (priv->cfg->base_params->sensitivity_calib_by_driver)
+ DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
+ if (priv->cfg->base_params->chain_noise_calib_by_driver)
+ DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
+ if (priv->cfg->base_params->ucode_tracing)
+ DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
+ if (priv->cfg->base_params->sensitivity_calib_by_driver)
+ DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
+ &priv->disable_sens_cal);
+ if (priv->cfg->base_params->chain_noise_calib_by_driver)
+ DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
+ &priv->disable_chain_noise_cal);
+ DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf,
+ &priv->disable_tx_power_cal);
+ return 0;
+
+err:
+ IWL_ERR(priv, "Can't create the debugfs directory\n");
+ iwl_legacy_dbgfs_unregister(priv);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(iwl_legacy_dbgfs_register);
+
+/**
+ * Remove the debugfs files and directories
+ *
+ */
+void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
+{
+ if (!priv->debugfs_dir)
+ return;
+
+ debugfs_remove_recursive(priv->debugfs_dir);
+ priv->debugfs_dir = NULL;
+}
+EXPORT_SYMBOL(iwl_legacy_dbgfs_unregister);
diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h
new file mode 100644
index 000000000000..9ee849d669f3
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-dev.h
@@ -0,0 +1,1426 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+/*
+ * Please use this file (iwl-dev.h) for driver implementation definitions.
+ * Please use iwl-commands.h for uCode API definitions.
+ * Please use iwl-4965-hw.h for hardware-related definitions.
+ */
+
+#ifndef __iwl_legacy_dev_h__
+#define __iwl_legacy_dev_h__
+
+#include <linux/pci.h> /* for struct pci_device_id */
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/wait.h>
+#include <net/ieee80211_radiotap.h>
+
+#include "iwl-eeprom.h"
+#include "iwl-csr.h"
+#include "iwl-prph.h"
+#include "iwl-fh.h"
+#include "iwl-debug.h"
+#include "iwl-4965-hw.h"
+#include "iwl-3945-hw.h"
+#include "iwl-led.h"
+#include "iwl-power.h"
+#include "iwl-legacy-rs.h"
+
+struct iwl_tx_queue;
+
+/* CT-KILL constants */
+#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
+
+/* Default noise level to report when noise measurement is not available.
+ * This may be because we're:
+ * 1) Not associated (4965, no beacon statistics being sent to driver)
+ * 2) Scanning (noise measurement does not apply to associated channel)
+ * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
+ * Use default noise value of -127 ... this is below the range of measurable
+ * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
+ * Also, -127 works better than 0 when averaging frames with/without
+ * noise info (e.g. averaging might be done in app); measured dBm values are
+ * always negative ... using a negative value as the default keeps all
+ * averages within an s8's (used in some apps) range of negative values. */
+#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
+
+/*
+ * RTS threshold here is total size [2347] minus 4 FCS bytes
+ * Per spec:
+ * a value of 0 means RTS on all data/management packets
+ * a value > max MSDU size means no RTS
+ * else RTS for data/management frames where MPDU is larger
+ * than RTS value.
+ */
+#define DEFAULT_RTS_THRESHOLD 2347U
+#define MIN_RTS_THRESHOLD 0U
+#define MAX_RTS_THRESHOLD 2347U
+#define MAX_MSDU_SIZE 2304U
+#define MAX_MPDU_SIZE 2346U
+#define DEFAULT_BEACON_INTERVAL 100U
+#define DEFAULT_SHORT_RETRY_LIMIT 7U
+#define DEFAULT_LONG_RETRY_LIMIT 4U
+
+struct iwl_rx_mem_buffer {
+ dma_addr_t page_dma;
+ struct page *page;
+ struct list_head list;
+};
+
+#define rxb_addr(r) page_address(r->page)
+
+/* defined below */
+struct iwl_device_cmd;
+
+struct iwl_cmd_meta {
+ /* only for SYNC commands, iff the reply skb is wanted */
+ struct iwl_host_cmd *source;
+ /*
+ * only for ASYNC commands
+ * (which is somewhat stupid -- look at iwl-sta.c for instance
+ * which duplicates a bunch of code because the callback isn't
+ * invoked for SYNC commands, if it were and its result passed
+ * through it would be simpler...)
+ */
+ void (*callback)(struct iwl_priv *priv,
+ struct iwl_device_cmd *cmd,
+ struct iwl_rx_packet *pkt);
+
+ /* The CMD_SIZE_HUGE flag bit indicates that the command
+ * structure is stored at the end of the shared queue memory. */
+ u32 flags;
+
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+ DEFINE_DMA_UNMAP_LEN(len);
+};
+
+/*
+ * Generic queue structure
+ *
+ * Contains common data for Rx and Tx queues
+ */
+struct iwl_queue {
+ int n_bd; /* number of BDs in this queue */
+ int write_ptr; /* 1-st empty entry (index) host_w*/
+ int read_ptr; /* last used entry (index) host_r*/
+ /* use for monitoring and recovering the stuck queue */
+ dma_addr_t dma_addr; /* physical addr for BD's */
+ int n_window; /* safe queue window */
+ u32 id;
+ int low_mark; /* low watermark, resume queue if free
+ * space more than this */
+ int high_mark; /* high watermark, stop queue if free
+ * space less than this */
+} __packed;
+
+/* One for each TFD */
+struct iwl_tx_info {
+ struct sk_buff *skb;
+ struct iwl_rxon_context *ctx;
+};
+
+/**
+ * struct iwl_tx_queue - Tx Queue for DMA
+ * @q: generic Rx/Tx queue descriptor
+ * @bd: base of circular buffer of TFDs
+ * @cmd: array of command/TX buffer pointers
+ * @meta: array of meta data for each command/tx buffer
+ * @dma_addr_cmd: physical address of cmd/tx buffer array
+ * @txb: array of per-TFD driver data
+ * @time_stamp: time (in jiffies) of last read_ptr change
+ * @need_update: indicates need to update read/write index
+ * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
+ *
+ * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
+ * descriptors) and required locking structures.
+ */
+#define TFD_TX_CMD_SLOTS 256
+#define TFD_CMD_SLOTS 32
+
+struct iwl_tx_queue {
+ struct iwl_queue q;
+ void *tfds;
+ struct iwl_device_cmd **cmd;
+ struct iwl_cmd_meta *meta;
+ struct iwl_tx_info *txb;
+ unsigned long time_stamp;
+ u8 need_update;
+ u8 sched_retry;
+ u8 active;
+ u8 swq_id;
+};
+
+#define IWL_NUM_SCAN_RATES (2)
+
+struct iwl4965_channel_tgd_info {
+ u8 type;
+ s8 max_power;
+};
+
+struct iwl4965_channel_tgh_info {
+ s64 last_radar_time;
+};
+
+#define IWL4965_MAX_RATE (33)
+
+struct iwl3945_clip_group {
+ /* maximum power level to prevent clipping for each rate, derived by
+ * us from this band's saturation power in EEPROM */
+ const s8 clip_powers[IWL_MAX_RATES];
+};
+
+/* current Tx power values to use, one for each rate for each channel.
+ * requested power is limited by:
+ * -- regulatory EEPROM limits for this channel
+ * -- hardware capabilities (clip-powers)
+ * -- spectrum management
+ * -- user preference (e.g. iwconfig)
+ * when requested power is set, base power index must also be set. */
+struct iwl3945_channel_power_info {
+ struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
+ s8 power_table_index; /* actual (compenst'd) index into gain table */
+ s8 base_power_index; /* gain index for power at factory temp. */
+ s8 requested_power; /* power (dBm) requested for this chnl/rate */
+};
+
+/* current scan Tx power values to use, one for each scan rate for each
+ * channel. */
+struct iwl3945_scan_power_info {
+ struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
+ s8 power_table_index; /* actual (compenst'd) index into gain table */
+ s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */
+};
+
+/*
+ * One for each channel, holds all channel setup data
+ * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
+ * with one another!
+ */
+struct iwl_channel_info {
+ struct iwl4965_channel_tgd_info tgd;
+ struct iwl4965_channel_tgh_info tgh;
+ struct iwl_eeprom_channel eeprom; /* EEPROM regulatory limit */
+ struct iwl_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for
+ * HT40 channel */
+
+ u8 channel; /* channel number */
+ u8 flags; /* flags copied from EEPROM */
+ s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
+ s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */
+ s8 min_power; /* always 0 */
+ s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */
+
+ u8 group_index; /* 0-4, maps channel to group1/2/3/4/5 */
+ u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */
+ enum ieee80211_band band;
+
+ /* HT40 channel info */
+ s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
+ u8 ht40_flags; /* flags copied from EEPROM */
+ u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
+
+ /* Radio/DSP gain settings for each "normal" data Tx rate.
+ * These include, in addition to RF and DSP gain, a few fields for
+ * remembering/modifying gain settings (indexes). */
+ struct iwl3945_channel_power_info power_info[IWL4965_MAX_RATE];
+
+ /* Radio/DSP gain settings for each scan rate, for directed scans. */
+ struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES];
+};
+
+#define IWL_TX_FIFO_BK 0 /* shared */
+#define IWL_TX_FIFO_BE 1
+#define IWL_TX_FIFO_VI 2 /* shared */
+#define IWL_TX_FIFO_VO 3
+#define IWL_TX_FIFO_UNUSED -1
+
+/* Minimum number of queues. MAX_NUM is defined in hw specific files.
+ * Set the minimum to accommodate the 4 standard TX queues, 1 command
+ * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
+#define IWL_MIN_NUM_QUEUES 10
+
+#define IWL_DEFAULT_CMD_QUEUE_NUM 4
+
+#define IEEE80211_DATA_LEN 2304
+#define IEEE80211_4ADDR_LEN 30
+#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
+#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
+
+struct iwl_frame {
+ union {
+ struct ieee80211_hdr frame;
+ struct iwl_tx_beacon_cmd beacon;
+ u8 raw[IEEE80211_FRAME_LEN];
+ u8 cmd[360];
+ } u;
+ struct list_head list;
+};
+
+#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
+#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
+#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
+
+enum {
+ CMD_SYNC = 0,
+ CMD_SIZE_NORMAL = 0,
+ CMD_NO_SKB = 0,
+ CMD_SIZE_HUGE = (1 << 0),
+ CMD_ASYNC = (1 << 1),
+ CMD_WANT_SKB = (1 << 2),
+};
+
+#define DEF_CMD_PAYLOAD_SIZE 320
+
+/**
+ * struct iwl_device_cmd
+ *
+ * For allocation of the command and tx queues, this establishes the overall
+ * size of the largest command we send to uCode, except for a scan command
+ * (which is relatively huge; space is allocated separately).
+ */
+struct iwl_device_cmd {
+ struct iwl_cmd_header hdr; /* uCode API */
+ union {
+ u32 flags;
+ u8 val8;
+ u16 val16;
+ u32 val32;
+ struct iwl_tx_cmd tx;
+ u8 payload[DEF_CMD_PAYLOAD_SIZE];
+ } __packed cmd;
+} __packed;
+
+#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
+
+
+struct iwl_host_cmd {
+ const void *data;
+ unsigned long reply_page;
+ void (*callback)(struct iwl_priv *priv,
+ struct iwl_device_cmd *cmd,
+ struct iwl_rx_packet *pkt);
+ u32 flags;
+ u16 len;
+ u8 id;
+};
+
+#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
+#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
+#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
+
+/**
+ * struct iwl_rx_queue - Rx queue
+ * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
+ * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
+ * @read: Shared index to newest available Rx buffer
+ * @write: Shared index to oldest written Rx packet
+ * @free_count: Number of pre-allocated buffers in rx_free
+ * @rx_free: list of free SKBs for use
+ * @rx_used: List of Rx buffers with no SKB
+ * @need_update: flag to indicate we need to update read/write index
+ * @rb_stts: driver's pointer to receive buffer status
+ * @rb_stts_dma: bus address of receive buffer status
+ *
+ * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
+ */
+struct iwl_rx_queue {
+ __le32 *bd;
+ dma_addr_t bd_dma;
+ struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
+ struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
+ u32 read;
+ u32 write;
+ u32 free_count;
+ u32 write_actual;
+ struct list_head rx_free;
+ struct list_head rx_used;
+ int need_update;
+ struct iwl_rb_status *rb_stts;
+ dma_addr_t rb_stts_dma;
+ spinlock_t lock;
+};
+
+#define IWL_SUPPORTED_RATES_IE_LEN 8
+
+#define MAX_TID_COUNT 9
+
+#define IWL_INVALID_RATE 0xFF
+#define IWL_INVALID_VALUE -1
+
+/**
+ * struct iwl_ht_agg -- aggregation status while waiting for block-ack
+ * @txq_id: Tx queue used for Tx attempt
+ * @frame_count: # frames attempted by Tx command
+ * @wait_for_ba: Expect block-ack before next Tx reply
+ * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx window
+ * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx window
+ * @bitmap1: High order, one bit for each frame pending ACK in Tx window
+ * @rate_n_flags: Rate at which Tx was attempted
+ *
+ * If REPLY_TX indicates that aggregation was attempted, driver must wait
+ * for block ack (REPLY_COMPRESSED_BA). This struct stores tx reply info
+ * until block ack arrives.
+ */
+struct iwl_ht_agg {
+ u16 txq_id;
+ u16 frame_count;
+ u16 wait_for_ba;
+ u16 start_idx;
+ u64 bitmap;
+ u32 rate_n_flags;
+#define IWL_AGG_OFF 0
+#define IWL_AGG_ON 1
+#define IWL_EMPTYING_HW_QUEUE_ADDBA 2
+#define IWL_EMPTYING_HW_QUEUE_DELBA 3
+ u8 state;
+};
+
+
+struct iwl_tid_data {
+ u16 seq_number; /* 4965 only */
+ u16 tfds_in_queue;
+ struct iwl_ht_agg agg;
+};
+
+struct iwl_hw_key {
+ u32 cipher;
+ int keylen;
+ u8 keyidx;
+ u8 key[32];
+};
+
+union iwl_ht_rate_supp {
+ u16 rates;
+ struct {
+ u8 siso_rate;
+ u8 mimo_rate;
+ };
+};
+
+#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0)
+#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1)
+#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2)
+#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3)
+#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K
+#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K
+#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K
+
+/*
+ * Maximal MPDU density for TX aggregation
+ * 4 - 2us density
+ * 5 - 4us density
+ * 6 - 8us density
+ * 7 - 16us density
+ */
+#define CFG_HT_MPDU_DENSITY_2USEC (0x4)
+#define CFG_HT_MPDU_DENSITY_4USEC (0x5)
+#define CFG_HT_MPDU_DENSITY_8USEC (0x6)
+#define CFG_HT_MPDU_DENSITY_16USEC (0x7)
+#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
+#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
+#define CFG_HT_MPDU_DENSITY_MIN (0x1)
+
+struct iwl_ht_config {
+ bool single_chain_sufficient;
+ enum ieee80211_smps_mode smps; /* current smps mode */
+};
+
+/* QoS structures */
+struct iwl_qos_info {
+ int qos_active;
+ struct iwl_qosparam_cmd def_qos_parm;
+};
+
+/*
+ * Structure should be accessed with sta_lock held. When station addition
+ * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only
+ * the commands (iwl_legacy_addsta_cmd and iwl_link_quality_cmd) without
+ * sta_lock held.
+ */
+struct iwl_station_entry {
+ struct iwl_legacy_addsta_cmd sta;
+ struct iwl_tid_data tid[MAX_TID_COUNT];
+ u8 used, ctxid;
+ struct iwl_hw_key keyinfo;
+ struct iwl_link_quality_cmd *lq;
+};
+
+struct iwl_station_priv_common {
+ struct iwl_rxon_context *ctx;
+ u8 sta_id;
+};
+
+/*
+ * iwl_station_priv: Driver's private station information
+ *
+ * When mac80211 creates a station it reserves some space (hw->sta_data_size)
+ * in the structure for use by driver. This structure is places in that
+ * space.
+ *
+ * The common struct MUST be first because it is shared between
+ * 3945 and 4965!
+ */
+struct iwl_station_priv {
+ struct iwl_station_priv_common common;
+ struct iwl_lq_sta lq_sta;
+ atomic_t pending_frames;
+ bool client;
+ bool asleep;
+};
+
+/**
+ * struct iwl_vif_priv - driver's private per-interface information
+ *
+ * When mac80211 allocates a virtual interface, it can allocate
+ * space for us to put data into.
+ */
+struct iwl_vif_priv {
+ struct iwl_rxon_context *ctx;
+ u8 ibss_bssid_sta_id;
+};
+
+/* one for each uCode image (inst/data, boot/init/runtime) */
+struct fw_desc {
+ void *v_addr; /* access by driver */
+ dma_addr_t p_addr; /* access by card's busmaster DMA */
+ u32 len; /* bytes */
+};
+
+/* uCode file layout */
+struct iwl_ucode_header {
+ __le32 ver; /* major/minor/API/serial */
+ struct {
+ __le32 inst_size; /* bytes of runtime code */
+ __le32 data_size; /* bytes of runtime data */
+ __le32 init_size; /* bytes of init code */
+ __le32 init_data_size; /* bytes of init data */
+ __le32 boot_size; /* bytes of bootstrap code */
+ u8 data[0]; /* in same order as sizes */
+ } v1;
+};
+
+struct iwl4965_ibss_seq {
+ u8 mac[ETH_ALEN];
+ u16 seq_num;
+ u16 frag_num;
+ unsigned long packet_time;
+ struct list_head list;
+};
+
+struct iwl_sensitivity_ranges {
+ u16 min_nrg_cck;
+ u16 max_nrg_cck;
+
+ u16 nrg_th_cck;
+ u16 nrg_th_ofdm;
+
+ u16 auto_corr_min_ofdm;
+ u16 auto_corr_min_ofdm_mrc;
+ u16 auto_corr_min_ofdm_x1;
+ u16 auto_corr_min_ofdm_mrc_x1;
+
+ u16 auto_corr_max_ofdm;
+ u16 auto_corr_max_ofdm_mrc;
+ u16 auto_corr_max_ofdm_x1;
+ u16 auto_corr_max_ofdm_mrc_x1;
+
+ u16 auto_corr_max_cck;
+ u16 auto_corr_max_cck_mrc;
+ u16 auto_corr_min_cck;
+ u16 auto_corr_min_cck_mrc;
+
+ u16 barker_corr_th_min;
+ u16 barker_corr_th_min_mrc;
+ u16 nrg_th_cca;
+};
+
+
+#define KELVIN_TO_CELSIUS(x) ((x)-273)
+#define CELSIUS_TO_KELVIN(x) ((x)+273)
+
+
+/**
+ * struct iwl_hw_params
+ * @max_txq_num: Max # Tx queues supported
+ * @dma_chnl_num: Number of Tx DMA/FIFO channels
+ * @scd_bc_tbls_size: size of scheduler byte count tables
+ * @tfd_size: TFD size
+ * @tx/rx_chains_num: Number of TX/RX chains
+ * @valid_tx/rx_ant: usable antennas
+ * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
+ * @max_rxq_log: Log-base-2 of max_rxq_size
+ * @rx_page_order: Rx buffer page order
+ * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
+ * @max_stations:
+ * @ht40_channel: is 40MHz width possible in band 2.4
+ * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
+ * @sw_crypto: 0 for hw, 1 for sw
+ * @max_xxx_size: for ucode uses
+ * @ct_kill_threshold: temperature threshold
+ * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
+ * @struct iwl_sensitivity_ranges: range of sensitivity values
+ */
+struct iwl_hw_params {
+ u8 max_txq_num;
+ u8 dma_chnl_num;
+ u16 scd_bc_tbls_size;
+ u32 tfd_size;
+ u8 tx_chains_num;
+ u8 rx_chains_num;
+ u8 valid_tx_ant;
+ u8 valid_rx_ant;
+ u16 max_rxq_size;
+ u16 max_rxq_log;
+ u32 rx_page_order;
+ u32 rx_wrt_ptr_reg;
+ u8 max_stations;
+ u8 ht40_channel;
+ u8 max_beacon_itrvl; /* in 1024 ms */
+ u32 max_inst_size;
+ u32 max_data_size;
+ u32 max_bsm_size;
+ u32 ct_kill_threshold; /* value in hw-dependent units */
+ u16 beacon_time_tsf_bits;
+ const struct iwl_sensitivity_ranges *sens;
+};
+
+
+/******************************************************************************
+ *
+ * Functions implemented in core module which are forward declared here
+ * for use by iwl-[4-5].c
+ *
+ * NOTE: The implementation of these functions are not hardware specific
+ * which is why they are in the core module files.
+ *
+ * Naming convention --
+ * iwl_ <-- Is part of iwlwifi
+ * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
+ * iwl4965_bg_ <-- Called from work queue context
+ * iwl4965_mac_ <-- mac80211 callback
+ *
+ ****************************************************************************/
+extern void iwl4965_update_chain_flags(struct iwl_priv *priv);
+extern const u8 iwlegacy_bcast_addr[ETH_ALEN];
+extern int iwl_legacy_queue_space(const struct iwl_queue *q);
+static inline int iwl_legacy_queue_used(const struct iwl_queue *q, int i)
+{
+ return q->write_ptr >= q->read_ptr ?
+ (i >= q->read_ptr && i < q->write_ptr) :
+ !(i < q->read_ptr && i >= q->write_ptr);
+}
+
+
+static inline u8 iwl_legacy_get_cmd_index(struct iwl_queue *q, u32 index,
+ int is_huge)
+{
+ /*
+ * This is for init calibration result and scan command which
+ * required buffer > TFD_MAX_PAYLOAD_SIZE,
+ * the big buffer at end of command array
+ */
+ if (is_huge)
+ return q->n_window; /* must be power of 2 */
+
+ /* Otherwise, use normal size buffers */
+ return index & (q->n_window - 1);
+}
+
+
+struct iwl_dma_ptr {
+ dma_addr_t dma;
+ void *addr;
+ size_t size;
+};
+
+#define IWL_OPERATION_MODE_AUTO 0
+#define IWL_OPERATION_MODE_HT_ONLY 1
+#define IWL_OPERATION_MODE_MIXED 2
+#define IWL_OPERATION_MODE_20MHZ 3
+
+#define IWL_TX_CRC_SIZE 4
+#define IWL_TX_DELIMITER_SIZE 4
+
+#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000
+
+/* Sensitivity and chain noise calibration */
+#define INITIALIZATION_VALUE 0xFFFF
+#define IWL4965_CAL_NUM_BEACONS 20
+#define IWL_CAL_NUM_BEACONS 16
+#define MAXIMUM_ALLOWED_PATHLOSS 15
+
+#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
+
+#define MAX_FA_OFDM 50
+#define MIN_FA_OFDM 5
+#define MAX_FA_CCK 50
+#define MIN_FA_CCK 5
+
+#define AUTO_CORR_STEP_OFDM 1
+
+#define AUTO_CORR_STEP_CCK 3
+#define AUTO_CORR_MAX_TH_CCK 160
+
+#define NRG_DIFF 2
+#define NRG_STEP_CCK 2
+#define NRG_MARGIN 8
+#define MAX_NUMBER_CCK_NO_FA 100
+
+#define AUTO_CORR_CCK_MIN_VAL_DEF (125)
+
+#define CHAIN_A 0
+#define CHAIN_B 1
+#define CHAIN_C 2
+#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
+#define ALL_BAND_FILTER 0xFF00
+#define IN_BAND_FILTER 0xFF
+#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF
+
+#define NRG_NUM_PREV_STAT_L 20
+#define NUM_RX_CHAINS 3
+
+enum iwl4965_false_alarm_state {
+ IWL_FA_TOO_MANY = 0,
+ IWL_FA_TOO_FEW = 1,
+ IWL_FA_GOOD_RANGE = 2,
+};
+
+enum iwl4965_chain_noise_state {
+ IWL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */
+ IWL_CHAIN_NOISE_ACCUMULATE,
+ IWL_CHAIN_NOISE_CALIBRATED,
+ IWL_CHAIN_NOISE_DONE,
+};
+
+enum iwl4965_calib_enabled_state {
+ IWL_CALIB_DISABLED = 0, /* must be 0 */
+ IWL_CALIB_ENABLED = 1,
+};
+
+/*
+ * enum iwl_calib
+ * defines the order in which results of initial calibrations
+ * should be sent to the runtime uCode
+ */
+enum iwl_calib {
+ IWL_CALIB_MAX,
+};
+
+/* Opaque calibration results */
+struct iwl_calib_result {
+ void *buf;
+ size_t buf_len;
+};
+
+enum ucode_type {
+ UCODE_NONE = 0,
+ UCODE_INIT,
+ UCODE_RT
+};
+
+/* Sensitivity calib data */
+struct iwl_sensitivity_data {
+ u32 auto_corr_ofdm;
+ u32 auto_corr_ofdm_mrc;
+ u32 auto_corr_ofdm_x1;
+ u32 auto_corr_ofdm_mrc_x1;
+ u32 auto_corr_cck;
+ u32 auto_corr_cck_mrc;
+
+ u32 last_bad_plcp_cnt_ofdm;
+ u32 last_fa_cnt_ofdm;
+ u32 last_bad_plcp_cnt_cck;
+ u32 last_fa_cnt_cck;
+
+ u32 nrg_curr_state;
+ u32 nrg_prev_state;
+ u32 nrg_value[10];
+ u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
+ u32 nrg_silence_ref;
+ u32 nrg_energy_idx;
+ u32 nrg_silence_idx;
+ u32 nrg_th_cck;
+ s32 nrg_auto_corr_silence_diff;
+ u32 num_in_cck_no_fa;
+ u32 nrg_th_ofdm;
+
+ u16 barker_corr_th_min;
+ u16 barker_corr_th_min_mrc;
+ u16 nrg_th_cca;
+};
+
+/* Chain noise (differential Rx gain) calib data */
+struct iwl_chain_noise_data {
+ u32 active_chains;
+ u32 chain_noise_a;
+ u32 chain_noise_b;
+ u32 chain_noise_c;
+ u32 chain_signal_a;
+ u32 chain_signal_b;
+ u32 chain_signal_c;
+ u16 beacon_count;
+ u8 disconn_array[NUM_RX_CHAINS];
+ u8 delta_gain_code[NUM_RX_CHAINS];
+ u8 radio_write;
+ u8 state;
+};
+
+#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
+#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
+
+#define IWL_TRAFFIC_ENTRIES (256)
+#define IWL_TRAFFIC_ENTRY_SIZE (64)
+
+enum {
+ MEASUREMENT_READY = (1 << 0),
+ MEASUREMENT_ACTIVE = (1 << 1),
+};
+
+/* interrupt statistics */
+struct isr_statistics {
+ u32 hw;
+ u32 sw;
+ u32 err_code;
+ u32 sch;
+ u32 alive;
+ u32 rfkill;
+ u32 ctkill;
+ u32 wakeup;
+ u32 rx;
+ u32 rx_handlers[REPLY_MAX];
+ u32 tx;
+ u32 unhandled;
+};
+
+/* management statistics */
+enum iwl_mgmt_stats {
+ MANAGEMENT_ASSOC_REQ = 0,
+ MANAGEMENT_ASSOC_RESP,
+ MANAGEMENT_REASSOC_REQ,
+ MANAGEMENT_REASSOC_RESP,
+ MANAGEMENT_PROBE_REQ,
+ MANAGEMENT_PROBE_RESP,
+ MANAGEMENT_BEACON,
+ MANAGEMENT_ATIM,
+ MANAGEMENT_DISASSOC,
+ MANAGEMENT_AUTH,
+ MANAGEMENT_DEAUTH,
+ MANAGEMENT_ACTION,
+ MANAGEMENT_MAX,
+};
+/* control statistics */
+enum iwl_ctrl_stats {
+ CONTROL_BACK_REQ = 0,
+ CONTROL_BACK,
+ CONTROL_PSPOLL,
+ CONTROL_RTS,
+ CONTROL_CTS,
+ CONTROL_ACK,
+ CONTROL_CFEND,
+ CONTROL_CFENDACK,
+ CONTROL_MAX,
+};
+
+struct traffic_stats {
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+ u32 mgmt[MANAGEMENT_MAX];
+ u32 ctrl[CONTROL_MAX];
+ u32 data_cnt;
+ u64 data_bytes;
+#endif
+};
+
+/*
+ * iwl_switch_rxon: "channel switch" structure
+ *
+ * @ switch_in_progress: channel switch in progress
+ * @ channel: new channel
+ */
+struct iwl_switch_rxon {
+ bool switch_in_progress;
+ __le16 channel;
+};
+
+/*
+ * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
+ * to perform continuous uCode event logging operation if enabled
+ */
+#define UCODE_TRACE_PERIOD (100)
+
+/*
+ * iwl_event_log: current uCode event log position
+ *
+ * @ucode_trace: enable/disable ucode continuous trace timer
+ * @num_wraps: how many times the event buffer wraps
+ * @next_entry: the entry just before the next one that uCode would fill
+ * @non_wraps_count: counter for no wrap detected when dump ucode events
+ * @wraps_once_count: counter for wrap once detected when dump ucode events
+ * @wraps_more_count: counter for wrap more than once detected
+ * when dump ucode events
+ */
+struct iwl_event_log {
+ bool ucode_trace;
+ u32 num_wraps;
+ u32 next_entry;
+ int non_wraps_count;
+ int wraps_once_count;
+ int wraps_more_count;
+};
+
+/*
+ * host interrupt timeout value
+ * used with setting interrupt coalescing timer
+ * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
+ *
+ * default interrupt coalescing timer is 64 x 32 = 2048 usecs
+ * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
+ */
+#define IWL_HOST_INT_TIMEOUT_MAX (0xFF)
+#define IWL_HOST_INT_TIMEOUT_DEF (0x40)
+#define IWL_HOST_INT_TIMEOUT_MIN (0x0)
+#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
+#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
+#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
+
+/*
+ * This is the threshold value of plcp error rate per 100mSecs. It is
+ * used to set and check for the validity of plcp_delta.
+ */
+#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN (1)
+#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF (50)
+#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF (100)
+#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF (200)
+#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX (255)
+#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE (0)
+
+#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3)
+#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
+
+/* TX queue watchdog timeouts in mSecs */
+#define IWL_DEF_WD_TIMEOUT (2000)
+#define IWL_LONG_WD_TIMEOUT (10000)
+#define IWL_MAX_WD_TIMEOUT (120000)
+
+enum iwl_reset {
+ IWL_RF_RESET = 0,
+ IWL_FW_RESET,
+ IWL_MAX_FORCE_RESET,
+};
+
+struct iwl_force_reset {
+ int reset_request_count;
+ int reset_success_count;
+ int reset_reject_count;
+ unsigned long reset_duration;
+ unsigned long last_force_reset_jiffies;
+};
+
+/* extend beacon time format bit shifting */
+/*
+ * for _3945 devices
+ * bits 31:24 - extended
+ * bits 23:0 - interval
+ */
+#define IWL3945_EXT_BEACON_TIME_POS 24
+/*
+ * for _4965 devices
+ * bits 31:22 - extended
+ * bits 21:0 - interval
+ */
+#define IWL4965_EXT_BEACON_TIME_POS 22
+
+enum iwl_rxon_context_id {
+ IWL_RXON_CTX_BSS,
+
+ NUM_IWL_RXON_CTX
+};
+
+struct iwl_rxon_context {
+ struct ieee80211_vif *vif;
+
+ const u8 *ac_to_fifo;
+ const u8 *ac_to_queue;
+ u8 mcast_queue;
+
+ /*
+ * We could use the vif to indicate active, but we
+ * also need it to be active during disabling when
+ * we already removed the vif for type setting.
+ */
+ bool always_active, is_active;
+
+ bool ht_need_multiple_chains;
+
+ enum iwl_rxon_context_id ctxid;
+
+ u32 interface_modes, exclusive_interface_modes;
+ u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
+
+ /*
+ * We declare this const so it can only be
+ * changed via explicit cast within the
+ * routines that actually update the physical
+ * hardware.
+ */
+ const struct iwl_legacy_rxon_cmd active;
+ struct iwl_legacy_rxon_cmd staging;
+
+ struct iwl_rxon_time_cmd timing;
+
+ struct iwl_qos_info qos_data;
+
+ u8 bcast_sta_id, ap_sta_id;
+
+ u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
+ u8 qos_cmd;
+ u8 wep_key_cmd;
+
+ struct iwl_wep_key wep_keys[WEP_KEYS_MAX];
+ u8 key_mapping_keys;
+
+ __le32 station_flags;
+
+ struct {
+ bool non_gf_sta_present;
+ u8 protection;
+ bool enabled, is_40mhz;
+ u8 extension_chan_offset;
+ } ht;
+};
+
+struct iwl_priv {
+
+ /* ieee device used by generic ieee processing code */
+ struct ieee80211_hw *hw;
+ struct ieee80211_channel *ieee_channels;
+ struct ieee80211_rate *ieee_rates;
+ struct iwl_cfg *cfg;
+
+ /* temporary frame storage list */
+ struct list_head free_frames;
+ int frames_count;
+
+ enum ieee80211_band band;
+ int alloc_rxb_page;
+
+ void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+
+ struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+
+ /* spectrum measurement report caching */
+ struct iwl_spectrum_notification measure_report;
+ u8 measurement_status;
+
+ /* ucode beacon time */
+ u32 ucode_beacon_time;
+ int missed_beacon_threshold;
+
+ /* track IBSS manager (last beacon) status */
+ u32 ibss_manager;
+
+ /* storing the jiffies when the plcp error rate is received */
+ unsigned long plcp_jiffies;
+
+ /* force reset */
+ struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET];
+
+ /* we allocate array of iwl_channel_info for NIC's valid channels.
+ * Access via channel # using indirect index array */
+ struct iwl_channel_info *channel_info; /* channel info array */
+ u8 channel_count; /* # of channels */
+
+ /* thermal calibration */
+ s32 temperature; /* degrees Kelvin */
+ s32 last_temperature;
+
+ /* init calibration results */
+ struct iwl_calib_result calib_results[IWL_CALIB_MAX];
+
+ /* Scan related variables */
+ unsigned long scan_start;
+ unsigned long scan_start_tsf;
+ void *scan_cmd;
+ enum ieee80211_band scan_band;
+ struct cfg80211_scan_request *scan_request;
+ struct ieee80211_vif *scan_vif;
+ bool is_internal_short_scan;
+ u8 scan_tx_ant[IEEE80211_NUM_BANDS];
+ u8 mgmt_tx_ant;
+
+ /* spinlock */
+ spinlock_t lock; /* protect general shared data */
+ spinlock_t hcmd_lock; /* protect hcmd */
+ spinlock_t reg_lock; /* protect hw register access */
+ struct mutex mutex;
+ struct mutex sync_cmd_mutex; /* enable serialization of sync commands */
+
+ /* basic pci-network driver stuff */
+ struct pci_dev *pci_dev;
+
+ /* pci hardware address support */
+ void __iomem *hw_base;
+ u32 hw_rev;
+ u32 hw_wa_rev;
+ u8 rev_id;
+
+ /* microcode/device supports multiple contexts */
+ u8 valid_contexts;
+
+ /* command queue number */
+ u8 cmd_queue;
+
+ /* max number of station keys */
+ u8 sta_key_max_num;
+
+ /* EEPROM MAC addresses */
+ struct mac_address addresses[1];
+
+ /* uCode images, save to reload in case of failure */
+ int fw_index; /* firmware we're trying to load */
+ u32 ucode_ver; /* version of ucode, copy of
+ iwl_ucode.ver */
+ struct fw_desc ucode_code; /* runtime inst */
+ struct fw_desc ucode_data; /* runtime data original */
+ struct fw_desc ucode_data_backup; /* runtime data save/restore */
+ struct fw_desc ucode_init; /* initialization inst */
+ struct fw_desc ucode_init_data; /* initialization data */
+ struct fw_desc ucode_boot; /* bootstrap inst */
+ enum ucode_type ucode_type;
+ u8 ucode_write_complete; /* the image write is complete */
+ char firmware_name[25];
+
+ struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
+
+ struct iwl_switch_rxon switch_rxon;
+
+ /* 1st responses from initialize and runtime uCode images.
+ * _4965's initialize alive response contains some calibration data. */
+ struct iwl_init_alive_resp card_alive_init;
+ struct iwl_alive_resp card_alive;
+
+ u16 active_rate;
+
+ u8 start_calib;
+ struct iwl_sensitivity_data sensitivity_data;
+ struct iwl_chain_noise_data chain_noise_data;
+ __le16 sensitivity_tbl[HD_TABLE_SIZE];
+
+ struct iwl_ht_config current_ht_config;
+
+ /* Rate scaling data */
+ u8 retry_rate;
+
+ wait_queue_head_t wait_command_queue;
+
+ int activity_timer_active;
+
+ /* Rx and Tx DMA processing queues */
+ struct iwl_rx_queue rxq;
+ struct iwl_tx_queue *txq;
+ unsigned long txq_ctx_active_msk;
+ struct iwl_dma_ptr kw; /* keep warm address */
+ struct iwl_dma_ptr scd_bc_tbls;
+
+ u32 scd_base_addr; /* scheduler sram base address */
+
+ unsigned long status;
+
+ /* counts mgmt, ctl, and data packets */
+ struct traffic_stats tx_stats;
+ struct traffic_stats rx_stats;
+
+ /* counts interrupts */
+ struct isr_statistics isr_stats;
+
+ struct iwl_power_mgr power_data;
+
+ /* context information */
+ u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
+
+ /* station table variables */
+
+ /* Note: if lock and sta_lock are needed, lock must be acquired first */
+ spinlock_t sta_lock;
+ int num_stations;
+ struct iwl_station_entry stations[IWL_STATION_COUNT];
+ unsigned long ucode_key_table;
+
+ /* queue refcounts */
+#define IWL_MAX_HW_QUEUES 32
+ unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
+ /* for each AC */
+ atomic_t queue_stop_count[4];
+
+ /* Indication if ieee80211_ops->open has been called */
+ u8 is_open;
+
+ u8 mac80211_registered;
+
+ /* eeprom -- this is in the card's little endian byte order */
+ u8 *eeprom;
+ struct iwl_eeprom_calib_info *calib_info;
+
+ enum nl80211_iftype iw_mode;
+
+ /* Last Rx'd beacon timestamp */
+ u64 timestamp;
+
+ union {
+#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
+ struct {
+ void *shared_virt;
+ dma_addr_t shared_phys;
+
+ struct delayed_work thermal_periodic;
+ struct delayed_work rfkill_poll;
+
+ struct iwl3945_notif_statistics statistics;
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+ struct iwl3945_notif_statistics accum_statistics;
+ struct iwl3945_notif_statistics delta_statistics;
+ struct iwl3945_notif_statistics max_delta;
+#endif
+
+ u32 sta_supp_rates;
+ int last_rx_rssi; /* From Rx packet statistics */
+
+ /* Rx'd packet timing information */
+ u32 last_beacon_time;
+ u64 last_tsf;
+
+ /*
+ * each calibration channel group in the
+ * EEPROM has a derived clip setting for
+ * each rate.
+ */
+ const struct iwl3945_clip_group clip_groups[5];
+
+ } _3945;
+#endif
+#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE)
+ struct {
+ /*
+ * reporting the number of tids has AGG on. 0 means
+ * no AGGREGATION
+ */
+ u8 agg_tids_count;
+
+ struct iwl_rx_phy_res last_phy_res;
+ bool last_phy_res_valid;
+
+ struct completion firmware_loading_complete;
+
+ /*
+ * chain noise reset and gain commands are the
+ * two extra calibration commands follows the standard
+ * phy calibration commands
+ */
+ u8 phy_calib_chain_noise_reset_cmd;
+ u8 phy_calib_chain_noise_gain_cmd;
+
+ struct iwl_notif_statistics statistics;
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+ struct iwl_notif_statistics accum_statistics;
+ struct iwl_notif_statistics delta_statistics;
+ struct iwl_notif_statistics max_delta;
+#endif
+
+ } _4965;
+#endif
+ };
+
+ struct iwl_hw_params hw_params;
+
+ u32 inta_mask;
+
+ struct workqueue_struct *workqueue;
+
+ struct work_struct restart;
+ struct work_struct scan_completed;
+ struct work_struct rx_replenish;
+ struct work_struct abort_scan;
+
+ struct iwl_rxon_context *beacon_ctx;
+ struct sk_buff *beacon_skb;
+
+ struct work_struct start_internal_scan;
+ struct work_struct tx_flush;
+
+ struct tasklet_struct irq_tasklet;
+
+ struct delayed_work init_alive_start;
+ struct delayed_work alive_start;
+ struct delayed_work scan_check;
+
+ /* TX Power */
+ s8 tx_power_user_lmt;
+ s8 tx_power_device_lmt;
+ s8 tx_power_next;
+
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ /* debugging info */
+ u32 debug_level; /* per device debugging will override global
+ iwlegacy_debug_level if set */
+#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+ /* debugfs */
+ u16 tx_traffic_idx;
+ u16 rx_traffic_idx;
+ u8 *tx_traffic;
+ u8 *rx_traffic;
+ struct dentry *debugfs_dir;
+ u32 dbgfs_sram_offset, dbgfs_sram_len;
+ bool disable_ht40;
+#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
+
+ struct work_struct txpower_work;
+ u32 disable_sens_cal;
+ u32 disable_chain_noise_cal;
+ u32 disable_tx_power_cal;
+ struct work_struct run_time_calib_work;
+ struct timer_list statistics_periodic;
+ struct timer_list ucode_trace;
+ struct timer_list watchdog;
+ bool hw_ready;
+
+ struct iwl_event_log event_log;
+
+ struct led_classdev led;
+ unsigned long blink_on, blink_off;
+ bool led_registered;
+}; /*iwl_priv */
+
+static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
+{
+ set_bit(txq_id, &priv->txq_ctx_active_msk);
+}
+
+static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
+{
+ clear_bit(txq_id, &priv->txq_ctx_active_msk);
+}
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+/*
+ * iwl_legacy_get_debug_level: Return active debug level for device
+ *
+ * Using sysfs it is possible to set per device debug level. This debug
+ * level will be used if set, otherwise the global debug level which can be
+ * set via module parameter is used.
+ */
+static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
+{
+ if (priv->debug_level)
+ return priv->debug_level;
+ else
+ return iwlegacy_debug_level;
+}
+#else
+static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
+{
+ return iwlegacy_debug_level;
+}
+#endif
+
+
+static inline struct ieee80211_hdr *
+iwl_legacy_tx_queue_get_hdr(struct iwl_priv *priv,
+ int txq_id, int idx)
+{
+ if (priv->txq[txq_id].txb[idx].skb)
+ return (struct ieee80211_hdr *)priv->txq[txq_id].
+ txb[idx].skb->data;
+ return NULL;
+}
+
+static inline struct iwl_rxon_context *
+iwl_legacy_rxon_ctx_from_vif(struct ieee80211_vif *vif)
+{
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+ return vif_priv->ctx;
+}
+
+#define for_each_context(priv, ctx) \
+ for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \
+ ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \
+ if (priv->valid_contexts & BIT(ctx->ctxid))
+
+static inline int iwl_legacy_is_associated(struct iwl_priv *priv,
+ enum iwl_rxon_context_id ctxid)
+{
+ return (priv->contexts[ctxid].active.filter_flags &
+ RXON_FILTER_ASSOC_MSK) ? 1 : 0;
+}
+
+static inline int iwl_legacy_is_any_associated(struct iwl_priv *priv)
+{
+ return iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
+}
+
+static inline int iwl_legacy_is_associated_ctx(struct iwl_rxon_context *ctx)
+{
+ return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
+}
+
+static inline int iwl_legacy_is_channel_valid(const struct iwl_channel_info *ch_info)
+{
+ if (ch_info == NULL)
+ return 0;
+ return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
+}
+
+static inline int iwl_legacy_is_channel_radar(const struct iwl_channel_info *ch_info)
+{
+ return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
+}
+
+static inline u8 iwl_legacy_is_channel_a_band(const struct iwl_channel_info *ch_info)
+{
+ return ch_info->band == IEEE80211_BAND_5GHZ;
+}
+
+static inline int
+iwl_legacy_is_channel_passive(const struct iwl_channel_info *ch)
+{
+ return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
+}
+
+static inline void
+__iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page)
+{
+ __free_pages(page, priv->hw_params.rx_page_order);
+ priv->alloc_rxb_page--;
+}
+
+static inline void iwl_legacy_free_pages(struct iwl_priv *priv, unsigned long page)
+{
+ free_pages(page, priv->hw_params.rx_page_order);
+ priv->alloc_rxb_page--;
+}
+#endif /* __iwl_legacy_dev_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.c b/drivers/net/wireless/iwlegacy/iwl-devtrace.c
new file mode 100644
index 000000000000..080b852b33bd
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-devtrace.c
@@ -0,0 +1,45 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+
+/* sparse doesn't like tracepoint macros */
+#ifndef __CHECKER__
+#include "iwl-dev.h"
+
+#define CREATE_TRACE_POINTS
+#include "iwl-devtrace.h"
+
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite8);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ioread32);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite32);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_rx);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_tx);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_event);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_error);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_cont_event);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_wrap_event);
+#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.h b/drivers/net/wireless/iwlegacy/iwl-devtrace.h
new file mode 100644
index 000000000000..9612aa0f6ec4
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-devtrace.h
@@ -0,0 +1,270 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#if !defined(__IWLWIFI_LEGACY_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
+#define __IWLWIFI_LEGACY_DEVICE_TRACE
+
+#include <linux/tracepoint.h>
+
+#if !defined(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) || defined(__CHECKER__)
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif
+
+
+#define PRIV_ENTRY __field(struct iwl_priv *, priv)
+#define PRIV_ASSIGN (__entry->priv = priv)
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlwifi_legacy_io
+
+TRACE_EVENT(iwlwifi_legacy_dev_ioread32,
+ TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
+ TP_ARGS(priv, offs, val),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+ __field(u32, offs)
+ __field(u32, val)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->offs = offs;
+ __entry->val = val;
+ ),
+ TP_printk("[%p] read io[%#x] = %#x", __entry->priv,
+ __entry->offs, __entry->val)
+);
+
+TRACE_EVENT(iwlwifi_legacy_dev_iowrite8,
+ TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val),
+ TP_ARGS(priv, offs, val),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+ __field(u32, offs)
+ __field(u8, val)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->offs = offs;
+ __entry->val = val;
+ ),
+ TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
+ __entry->offs, __entry->val)
+);
+
+TRACE_EVENT(iwlwifi_legacy_dev_iowrite32,
+ TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
+ TP_ARGS(priv, offs, val),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+ __field(u32, offs)
+ __field(u32, val)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->offs = offs;
+ __entry->val = val;
+ ),
+ TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
+ __entry->offs, __entry->val)
+);
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlwifi_legacy_ucode
+
+TRACE_EVENT(iwlwifi_legacy_dev_ucode_cont_event,
+ TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
+ TP_ARGS(priv, time, data, ev),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+
+ __field(u32, time)
+ __field(u32, data)
+ __field(u32, ev)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->time = time;
+ __entry->data = data;
+ __entry->ev = ev;
+ ),
+ TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
+ __entry->priv, __entry->time, __entry->data, __entry->ev)
+);
+
+TRACE_EVENT(iwlwifi_legacy_dev_ucode_wrap_event,
+ TP_PROTO(struct iwl_priv *priv, u32 wraps, u32 n_entry, u32 p_entry),
+ TP_ARGS(priv, wraps, n_entry, p_entry),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+
+ __field(u32, wraps)
+ __field(u32, n_entry)
+ __field(u32, p_entry)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->wraps = wraps;
+ __entry->n_entry = n_entry;
+ __entry->p_entry = p_entry;
+ ),
+ TP_printk("[%p] wraps=#%02d n=0x%X p=0x%X",
+ __entry->priv, __entry->wraps, __entry->n_entry,
+ __entry->p_entry)
+);
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlwifi
+
+TRACE_EVENT(iwlwifi_legacy_dev_hcmd,
+ TP_PROTO(struct iwl_priv *priv, void *hcmd, size_t len, u32 flags),
+ TP_ARGS(priv, hcmd, len, flags),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+ __dynamic_array(u8, hcmd, len)
+ __field(u32, flags)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ memcpy(__get_dynamic_array(hcmd), hcmd, len);
+ __entry->flags = flags;
+ ),
+ TP_printk("[%p] hcmd %#.2x (%ssync)",
+ __entry->priv, ((u8 *)__get_dynamic_array(hcmd))[0],
+ __entry->flags & CMD_ASYNC ? "a" : "")
+);
+
+TRACE_EVENT(iwlwifi_legacy_dev_rx,
+ TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len),
+ TP_ARGS(priv, rxbuf, len),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+ __dynamic_array(u8, rxbuf, len)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ memcpy(__get_dynamic_array(rxbuf), rxbuf, len);
+ ),
+ TP_printk("[%p] RX cmd %#.2x",
+ __entry->priv, ((u8 *)__get_dynamic_array(rxbuf))[4])
+);
+
+TRACE_EVENT(iwlwifi_legacy_dev_tx,
+ TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen,
+ void *buf0, size_t buf0_len,
+ void *buf1, size_t buf1_len),
+ TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+
+ __field(size_t, framelen)
+ __dynamic_array(u8, tfd, tfdlen)
+
+ /*
+ * Do not insert between or below these items,
+ * we want to keep the frame together (except
+ * for the possible padding).
+ */
+ __dynamic_array(u8, buf0, buf0_len)
+ __dynamic_array(u8, buf1, buf1_len)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->framelen = buf0_len + buf1_len;
+ memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
+ memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
+ memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
+ ),
+ TP_printk("[%p] TX %.2x (%zu bytes)",
+ __entry->priv,
+ ((u8 *)__get_dynamic_array(buf0))[0],
+ __entry->framelen)
+);
+
+TRACE_EVENT(iwlwifi_legacy_dev_ucode_error,
+ TP_PROTO(struct iwl_priv *priv, u32 desc, u32 time,
+ u32 data1, u32 data2, u32 line, u32 blink1,
+ u32 blink2, u32 ilink1, u32 ilink2),
+ TP_ARGS(priv, desc, time, data1, data2, line,
+ blink1, blink2, ilink1, ilink2),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+ __field(u32, desc)
+ __field(u32, time)
+ __field(u32, data1)
+ __field(u32, data2)
+ __field(u32, line)
+ __field(u32, blink1)
+ __field(u32, blink2)
+ __field(u32, ilink1)
+ __field(u32, ilink2)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->desc = desc;
+ __entry->time = time;
+ __entry->data1 = data1;
+ __entry->data2 = data2;
+ __entry->line = line;
+ __entry->blink1 = blink1;
+ __entry->blink2 = blink2;
+ __entry->ilink1 = ilink1;
+ __entry->ilink2 = ilink2;
+ ),
+ TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, "
+ "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X",
+ __entry->priv, __entry->desc, __entry->time, __entry->data1,
+ __entry->data2, __entry->line, __entry->blink1,
+ __entry->blink2, __entry->ilink1, __entry->ilink2)
+);
+
+TRACE_EVENT(iwlwifi_legacy_dev_ucode_event,
+ TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
+ TP_ARGS(priv, time, data, ev),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+
+ __field(u32, time)
+ __field(u32, data)
+ __field(u32, ev)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->time = time;
+ __entry->data = data;
+ __entry->ev = ev;
+ ),
+ TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
+ __entry->priv, __entry->time, __entry->data, __entry->ev)
+);
+#endif /* __IWLWIFI_DEVICE_TRACE */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE iwl-devtrace
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
new file mode 100644
index 000000000000..04c5648027df
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
@@ -0,0 +1,561 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+
+#include <net/mac80211.h>
+
+#include "iwl-commands.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-debug.h"
+#include "iwl-eeprom.h"
+#include "iwl-io.h"
+
+/************************** EEPROM BANDS ****************************
+ *
+ * The iwlegacy_eeprom_band definitions below provide the mapping from the
+ * EEPROM contents to the specific channel number supported for each
+ * band.
+ *
+ * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3
+ * definition below maps to physical channel 42 in the 5.2GHz spectrum.
+ * The specific geography and calibration information for that channel
+ * is contained in the eeprom map itself.
+ *
+ * During init, we copy the eeprom information and channel map
+ * information into priv->channel_info_24/52 and priv->channel_map_24/52
+ *
+ * channel_map_24/52 provides the index in the channel_info array for a
+ * given channel. We have to have two separate maps as there is channel
+ * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
+ * band_2
+ *
+ * A value of 0xff stored in the channel_map indicates that the channel
+ * is not supported by the hardware at all.
+ *
+ * A value of 0xfe in the channel_map indicates that the channel is not
+ * valid for Tx with the current hardware. This means that
+ * while the system can tune and receive on a given channel, it may not
+ * be able to associate or transmit any frames on that
+ * channel. There is no corresponding channel information for that
+ * entry.
+ *
+ *********************************************************************/
+
+/* 2.4 GHz */
+const u8 iwlegacy_eeprom_band_1[14] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+};
+
+/* 5.2 GHz bands */
+static const u8 iwlegacy_eeprom_band_2[] = { /* 4915-5080MHz */
+ 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
+};
+
+static const u8 iwlegacy_eeprom_band_3[] = { /* 5170-5320MHz */
+ 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
+};
+
+static const u8 iwlegacy_eeprom_band_4[] = { /* 5500-5700MHz */
+ 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
+};
+
+static const u8 iwlegacy_eeprom_band_5[] = { /* 5725-5825MHz */
+ 145, 149, 153, 157, 161, 165
+};
+
+static const u8 iwlegacy_eeprom_band_6[] = { /* 2.4 ht40 channel */
+ 1, 2, 3, 4, 5, 6, 7
+};
+
+static const u8 iwlegacy_eeprom_band_7[] = { /* 5.2 ht40 channel */
+ 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
+};
+
+/******************************************************************************
+ *
+ * EEPROM related functions
+ *
+******************************************************************************/
+
+static int iwl_legacy_eeprom_verify_signature(struct iwl_priv *priv)
+{
+ u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
+ int ret = 0;
+
+ IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
+ switch (gp) {
+ case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
+ case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
+ break;
+ default:
+ IWL_ERR(priv, "bad EEPROM signature,"
+ "EEPROM_GP=0x%08x\n", gp);
+ ret = -ENOENT;
+ break;
+ }
+ return ret;
+}
+
+const u8
+*iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
+{
+ BUG_ON(offset >= priv->cfg->base_params->eeprom_size);
+ return &priv->eeprom[offset];
+}
+EXPORT_SYMBOL(iwl_legacy_eeprom_query_addr);
+
+u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset)
+{
+ if (!priv->eeprom)
+ return 0;
+ return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
+}
+EXPORT_SYMBOL(iwl_legacy_eeprom_query16);
+
+/**
+ * iwl_legacy_eeprom_init - read EEPROM contents
+ *
+ * Load the EEPROM contents from adapter into priv->eeprom
+ *
+ * NOTE: This routine uses the non-debug IO access functions.
+ */
+int iwl_legacy_eeprom_init(struct iwl_priv *priv)
+{
+ __le16 *e;
+ u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
+ int sz;
+ int ret;
+ u16 addr;
+
+ /* allocate eeprom */
+ sz = priv->cfg->base_params->eeprom_size;
+ IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
+ priv->eeprom = kzalloc(sz, GFP_KERNEL);
+ if (!priv->eeprom) {
+ ret = -ENOMEM;
+ goto alloc_err;
+ }
+ e = (__le16 *)priv->eeprom;
+
+ priv->cfg->ops->lib->apm_ops.init(priv);
+
+ ret = iwl_legacy_eeprom_verify_signature(priv);
+ if (ret < 0) {
+ IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
+ ret = -ENOENT;
+ goto err;
+ }
+
+ /* Make sure driver (instead of uCode) is allowed to read EEPROM */
+ ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv);
+ if (ret < 0) {
+ IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
+ ret = -ENOENT;
+ goto err;
+ }
+
+ /* eeprom is an array of 16bit values */
+ for (addr = 0; addr < sz; addr += sizeof(u16)) {
+ u32 r;
+
+ _iwl_legacy_write32(priv, CSR_EEPROM_REG,
+ CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
+
+ ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
+ CSR_EEPROM_REG_READ_VALID_MSK,
+ CSR_EEPROM_REG_READ_VALID_MSK,
+ IWL_EEPROM_ACCESS_TIMEOUT);
+ if (ret < 0) {
+ IWL_ERR(priv, "Time out reading EEPROM[%d]\n",
+ addr);
+ goto done;
+ }
+ r = _iwl_legacy_read_direct32(priv, CSR_EEPROM_REG);
+ e[addr / 2] = cpu_to_le16(r >> 16);
+ }
+
+ IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
+ "EEPROM",
+ iwl_legacy_eeprom_query16(priv, EEPROM_VERSION));
+
+ ret = 0;
+done:
+ priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv);
+
+err:
+ if (ret)
+ iwl_legacy_eeprom_free(priv);
+ /* Reset chip to save power until we load uCode during "up". */
+ iwl_legacy_apm_stop(priv);
+alloc_err:
+ return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_eeprom_init);
+
+void iwl_legacy_eeprom_free(struct iwl_priv *priv)
+{
+ kfree(priv->eeprom);
+ priv->eeprom = NULL;
+}
+EXPORT_SYMBOL(iwl_legacy_eeprom_free);
+
+static void iwl_legacy_init_band_reference(const struct iwl_priv *priv,
+ int eep_band, int *eeprom_ch_count,
+ const struct iwl_eeprom_channel **eeprom_ch_info,
+ const u8 **eeprom_ch_index)
+{
+ u32 offset = priv->cfg->ops->lib->
+ eeprom_ops.regulatory_bands[eep_band - 1];
+ switch (eep_band) {
+ case 1: /* 2.4GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_1);
+ *eeprom_ch_info = (struct iwl_eeprom_channel *)
+ iwl_legacy_eeprom_query_addr(priv, offset);
+ *eeprom_ch_index = iwlegacy_eeprom_band_1;
+ break;
+ case 2: /* 4.9GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_2);
+ *eeprom_ch_info = (struct iwl_eeprom_channel *)
+ iwl_legacy_eeprom_query_addr(priv, offset);
+ *eeprom_ch_index = iwlegacy_eeprom_band_2;
+ break;
+ case 3: /* 5.2GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_3);
+ *eeprom_ch_info = (struct iwl_eeprom_channel *)
+ iwl_legacy_eeprom_query_addr(priv, offset);
+ *eeprom_ch_index = iwlegacy_eeprom_band_3;
+ break;
+ case 4: /* 5.5GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_4);
+ *eeprom_ch_info = (struct iwl_eeprom_channel *)
+ iwl_legacy_eeprom_query_addr(priv, offset);
+ *eeprom_ch_index = iwlegacy_eeprom_band_4;
+ break;
+ case 5: /* 5.7GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_5);
+ *eeprom_ch_info = (struct iwl_eeprom_channel *)
+ iwl_legacy_eeprom_query_addr(priv, offset);
+ *eeprom_ch_index = iwlegacy_eeprom_band_5;
+ break;
+ case 6: /* 2.4GHz ht40 channels */
+ *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_6);
+ *eeprom_ch_info = (struct iwl_eeprom_channel *)
+ iwl_legacy_eeprom_query_addr(priv, offset);
+ *eeprom_ch_index = iwlegacy_eeprom_band_6;
+ break;
+ case 7: /* 5 GHz ht40 channels */
+ *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_7);
+ *eeprom_ch_info = (struct iwl_eeprom_channel *)
+ iwl_legacy_eeprom_query_addr(priv, offset);
+ *eeprom_ch_index = iwlegacy_eeprom_band_7;
+ break;
+ default:
+ BUG();
+ return;
+ }
+}
+
+#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
+ ? # x " " : "")
+/**
+ * iwl_legacy_mod_ht40_chan_info - Copy ht40 channel info into driver's priv.
+ *
+ * Does not set up a command, or touch hardware.
+ */
+static int iwl_legacy_mod_ht40_chan_info(struct iwl_priv *priv,
+ enum ieee80211_band band, u16 channel,
+ const struct iwl_eeprom_channel *eeprom_ch,
+ u8 clear_ht40_extension_channel)
+{
+ struct iwl_channel_info *ch_info;
+
+ ch_info = (struct iwl_channel_info *)
+ iwl_legacy_get_channel_info(priv, band, channel);
+
+ if (!iwl_legacy_is_channel_valid(ch_info))
+ return -1;
+
+ IWL_DEBUG_EEPROM(priv, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
+ " Ad-Hoc %ssupported\n",
+ ch_info->channel,
+ iwl_legacy_is_channel_a_band(ch_info) ?
+ "5.2" : "2.4",
+ CHECK_AND_PRINT(IBSS),
+ CHECK_AND_PRINT(ACTIVE),
+ CHECK_AND_PRINT(RADAR),
+ CHECK_AND_PRINT(WIDE),
+ CHECK_AND_PRINT(DFS),
+ eeprom_ch->flags,
+ eeprom_ch->max_power_avg,
+ ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS)
+ && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ?
+ "" : "not ");
+
+ ch_info->ht40_eeprom = *eeprom_ch;
+ ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
+ ch_info->ht40_flags = eeprom_ch->flags;
+ if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
+ ch_info->ht40_extension_channel &=
+ ~clear_ht40_extension_channel;
+
+ return 0;
+}
+
+#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
+ ? # x " " : "")
+
+/**
+ * iwl_legacy_init_channel_map - Set up driver's info for all possible channels
+ */
+int iwl_legacy_init_channel_map(struct iwl_priv *priv)
+{
+ int eeprom_ch_count = 0;
+ const u8 *eeprom_ch_index = NULL;
+ const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
+ int band, ch;
+ struct iwl_channel_info *ch_info;
+
+ if (priv->channel_count) {
+ IWL_DEBUG_EEPROM(priv, "Channel map already initialized.\n");
+ return 0;
+ }
+
+ IWL_DEBUG_EEPROM(priv, "Initializing regulatory info from EEPROM\n");
+
+ priv->channel_count =
+ ARRAY_SIZE(iwlegacy_eeprom_band_1) +
+ ARRAY_SIZE(iwlegacy_eeprom_band_2) +
+ ARRAY_SIZE(iwlegacy_eeprom_band_3) +
+ ARRAY_SIZE(iwlegacy_eeprom_band_4) +
+ ARRAY_SIZE(iwlegacy_eeprom_band_5);
+
+ IWL_DEBUG_EEPROM(priv, "Parsing data for %d channels.\n",
+ priv->channel_count);
+
+ priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
+ priv->channel_count, GFP_KERNEL);
+ if (!priv->channel_info) {
+ IWL_ERR(priv, "Could not allocate channel_info\n");
+ priv->channel_count = 0;
+ return -ENOMEM;
+ }
+
+ ch_info = priv->channel_info;
+
+ /* Loop through the 5 EEPROM bands adding them in order to the
+ * channel map we maintain (that contains additional information than
+ * what just in the EEPROM) */
+ for (band = 1; band <= 5; band++) {
+
+ iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
+ &eeprom_ch_info, &eeprom_ch_index);
+
+ /* Loop through each band adding each of the channels */
+ for (ch = 0; ch < eeprom_ch_count; ch++) {
+ ch_info->channel = eeprom_ch_index[ch];
+ ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
+ IEEE80211_BAND_5GHZ;
+
+ /* permanently store EEPROM's channel regulatory flags
+ * and max power in channel info database. */
+ ch_info->eeprom = eeprom_ch_info[ch];
+
+ /* Copy the run-time flags so they are there even on
+ * invalid channels */
+ ch_info->flags = eeprom_ch_info[ch].flags;
+ /* First write that ht40 is not enabled, and then enable
+ * one by one */
+ ch_info->ht40_extension_channel =
+ IEEE80211_CHAN_NO_HT40;
+
+ if (!(iwl_legacy_is_channel_valid(ch_info))) {
+ IWL_DEBUG_EEPROM(priv,
+ "Ch. %d Flags %x [%sGHz] - "
+ "No traffic\n",
+ ch_info->channel,
+ ch_info->flags,
+ iwl_legacy_is_channel_a_band(ch_info) ?
+ "5.2" : "2.4");
+ ch_info++;
+ continue;
+ }
+
+ /* Initialize regulatory-based run-time data */
+ ch_info->max_power_avg = ch_info->curr_txpow =
+ eeprom_ch_info[ch].max_power_avg;
+ ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
+ ch_info->min_power = 0;
+
+ IWL_DEBUG_EEPROM(priv, "Ch. %d [%sGHz] "
+ "%s%s%s%s%s%s(0x%02x %ddBm):"
+ " Ad-Hoc %ssupported\n",
+ ch_info->channel,
+ iwl_legacy_is_channel_a_band(ch_info) ?
+ "5.2" : "2.4",
+ CHECK_AND_PRINT_I(VALID),
+ CHECK_AND_PRINT_I(IBSS),
+ CHECK_AND_PRINT_I(ACTIVE),
+ CHECK_AND_PRINT_I(RADAR),
+ CHECK_AND_PRINT_I(WIDE),
+ CHECK_AND_PRINT_I(DFS),
+ eeprom_ch_info[ch].flags,
+ eeprom_ch_info[ch].max_power_avg,
+ ((eeprom_ch_info[ch].
+ flags & EEPROM_CHANNEL_IBSS)
+ && !(eeprom_ch_info[ch].
+ flags & EEPROM_CHANNEL_RADAR))
+ ? "" : "not ");
+
+ /* Set the tx_power_user_lmt to the highest power
+ * supported by any channel */
+ if (eeprom_ch_info[ch].max_power_avg >
+ priv->tx_power_user_lmt)
+ priv->tx_power_user_lmt =
+ eeprom_ch_info[ch].max_power_avg;
+
+ ch_info++;
+ }
+ }
+
+ /* Check if we do have HT40 channels */
+ if (priv->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
+ EEPROM_REGULATORY_BAND_NO_HT40 &&
+ priv->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
+ EEPROM_REGULATORY_BAND_NO_HT40)
+ return 0;
+
+ /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
+ for (band = 6; band <= 7; band++) {
+ enum ieee80211_band ieeeband;
+
+ iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
+ &eeprom_ch_info, &eeprom_ch_index);
+
+ /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
+ ieeeband =
+ (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+
+ /* Loop through each band adding each of the channels */
+ for (ch = 0; ch < eeprom_ch_count; ch++) {
+ /* Set up driver's info for lower half */
+ iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
+ eeprom_ch_index[ch],
+ &eeprom_ch_info[ch],
+ IEEE80211_CHAN_NO_HT40PLUS);
+
+ /* Set up driver's info for upper half */
+ iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
+ eeprom_ch_index[ch] + 4,
+ &eeprom_ch_info[ch],
+ IEEE80211_CHAN_NO_HT40MINUS);
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_init_channel_map);
+
+/*
+ * iwl_legacy_free_channel_map - undo allocations in iwl_legacy_init_channel_map
+ */
+void iwl_legacy_free_channel_map(struct iwl_priv *priv)
+{
+ kfree(priv->channel_info);
+ priv->channel_count = 0;
+}
+EXPORT_SYMBOL(iwl_legacy_free_channel_map);
+
+/**
+ * iwl_legacy_get_channel_info - Find driver's private channel info
+ *
+ * Based on band and channel number.
+ */
+const struct
+iwl_channel_info *iwl_legacy_get_channel_info(const struct iwl_priv *priv,
+ enum ieee80211_band band, u16 channel)
+{
+ int i;
+
+ switch (band) {
+ case IEEE80211_BAND_5GHZ:
+ for (i = 14; i < priv->channel_count; i++) {
+ if (priv->channel_info[i].channel == channel)
+ return &priv->channel_info[i];
+ }
+ break;
+ case IEEE80211_BAND_2GHZ:
+ if (channel >= 1 && channel <= 14)
+ return &priv->channel_info[channel - 1];
+ break;
+ default:
+ BUG();
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(iwl_legacy_get_channel_info);
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.h b/drivers/net/wireless/iwlegacy/iwl-eeprom.h
new file mode 100644
index 000000000000..c59c81002022
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-eeprom.h
@@ -0,0 +1,344 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_eeprom_h__
+#define __iwl_legacy_eeprom_h__
+
+#include <net/mac80211.h>
+
+struct iwl_priv;
+
+/*
+ * EEPROM access time values:
+ *
+ * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
+ * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
+ * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
+ * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
+ */
+#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
+
+#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
+#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
+
+
+/*
+ * Regulatory channel usage flags in EEPROM struct iwl4965_eeprom_channel.flags.
+ *
+ * IBSS and/or AP operation is allowed *only* on those channels with
+ * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because
+ * RADAR detection is not supported by the 4965 driver, but is a
+ * requirement for establishing a new network for legal operation on channels
+ * requiring RADAR detection or restricting ACTIVE scanning.
+ *
+ * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
+ * It only indicates that 20 MHz channel use is supported; HT40 channel
+ * usage is indicated by a separate set of regulatory flags for each
+ * HT40 channel pair.
+ *
+ * NOTE: Using a channel inappropriately will result in a uCode error!
+ */
+#define IWL_NUM_TX_CALIB_GROUPS 5
+enum {
+ EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */
+ EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */
+ /* Bit 2 Reserved */
+ EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
+ EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
+ EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
+ /* Bit 6 Reserved (was Narrow Channel) */
+ EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
+};
+
+/* SKU Capabilities */
+/* 3945 only */
+#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0)
+#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1)
+
+/* *regulatory* channel data format in eeprom, one for each channel.
+ * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
+struct iwl_eeprom_channel {
+ u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
+ s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
+} __packed;
+
+/* 3945 Specific */
+#define EEPROM_3945_EEPROM_VERSION (0x2f)
+
+/* 4965 has two radio transmitters (and 3 radio receivers) */
+#define EEPROM_TX_POWER_TX_CHAINS (2)
+
+/* 4965 has room for up to 8 sets of txpower calibration data */
+#define EEPROM_TX_POWER_BANDS (8)
+
+/* 4965 factory calibration measures txpower gain settings for
+ * each of 3 target output levels */
+#define EEPROM_TX_POWER_MEASUREMENTS (3)
+
+/* 4965 Specific */
+/* 4965 driver does not work with txpower calibration version < 5 */
+#define EEPROM_4965_TX_POWER_VERSION (5)
+#define EEPROM_4965_EEPROM_VERSION (0x2f)
+#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */
+#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */
+#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */
+#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */
+
+/* 2.4 GHz */
+extern const u8 iwlegacy_eeprom_band_1[14];
+
+/*
+ * factory calibration data for one txpower level, on one channel,
+ * measured on one of the 2 tx chains (radio transmitter and associated
+ * antenna). EEPROM contains:
+ *
+ * 1) Temperature (degrees Celsius) of device when measurement was made.
+ *
+ * 2) Gain table index used to achieve the target measurement power.
+ * This refers to the "well-known" gain tables (see iwl-4965-hw.h).
+ *
+ * 3) Actual measured output power, in half-dBm ("34" = 17 dBm).
+ *
+ * 4) RF power amplifier detector level measurement (not used).
+ */
+struct iwl_eeprom_calib_measure {
+ u8 temperature; /* Device temperature (Celsius) */
+ u8 gain_idx; /* Index into gain table */
+ u8 actual_pow; /* Measured RF output power, half-dBm */
+ s8 pa_det; /* Power amp detector level (not used) */
+} __packed;
+
+
+/*
+ * measurement set for one channel. EEPROM contains:
+ *
+ * 1) Channel number measured
+ *
+ * 2) Measurements for each of 3 power levels for each of 2 radio transmitters
+ * (a.k.a. "tx chains") (6 measurements altogether)
+ */
+struct iwl_eeprom_calib_ch_info {
+ u8 ch_num;
+ struct iwl_eeprom_calib_measure
+ measurements[EEPROM_TX_POWER_TX_CHAINS]
+ [EEPROM_TX_POWER_MEASUREMENTS];
+} __packed;
+
+/*
+ * txpower subband info.
+ *
+ * For each frequency subband, EEPROM contains the following:
+ *
+ * 1) First and last channels within range of the subband. "0" values
+ * indicate that this sample set is not being used.
+ *
+ * 2) Sample measurement sets for 2 channels close to the range endpoints.
+ */
+struct iwl_eeprom_calib_subband_info {
+ u8 ch_from; /* channel number of lowest channel in subband */
+ u8 ch_to; /* channel number of highest channel in subband */
+ struct iwl_eeprom_calib_ch_info ch1;
+ struct iwl_eeprom_calib_ch_info ch2;
+} __packed;
+
+
+/*
+ * txpower calibration info. EEPROM contains:
+ *
+ * 1) Factory-measured saturation power levels (maximum levels at which
+ * tx power amplifier can output a signal without too much distortion).
+ * There is one level for 2.4 GHz band and one for 5 GHz band. These
+ * values apply to all channels within each of the bands.
+ *
+ * 2) Factory-measured power supply voltage level. This is assumed to be
+ * constant (i.e. same value applies to all channels/bands) while the
+ * factory measurements are being made.
+ *
+ * 3) Up to 8 sets of factory-measured txpower calibration values.
+ * These are for different frequency ranges, since txpower gain
+ * characteristics of the analog radio circuitry vary with frequency.
+ *
+ * Not all sets need to be filled with data;
+ * struct iwl_eeprom_calib_subband_info contains range of channels
+ * (0 if unused) for each set of data.
+ */
+struct iwl_eeprom_calib_info {
+ u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
+ u8 saturation_power52; /* half-dBm */
+ __le16 voltage; /* signed */
+ struct iwl_eeprom_calib_subband_info
+ band_info[EEPROM_TX_POWER_BANDS];
+} __packed;
+
+
+/* General */
+#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
+#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
+#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
+#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
+#define EEPROM_VERSION (2*0x44) /* 2 bytes */
+#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
+#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
+#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
+#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
+#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
+
+/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
+#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
+#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
+#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
+#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
+#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
+#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
+
+#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0
+#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1
+
+/*
+ * Per-channel regulatory data.
+ *
+ * Each channel that *might* be supported by iwl has a fixed location
+ * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
+ * txpower (MSB).
+ *
+ * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz)
+ * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
+ *
+ * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+ */
+#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */
+#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */
+#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */
+
+/*
+ * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
+ * 5.0 GHz channels 7, 8, 11, 12, 16
+ * (4915-5080MHz) (none of these is ever supported)
+ */
+#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */
+#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */
+
+/*
+ * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
+ * (5170-5320MHz)
+ */
+#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */
+#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */
+
+/*
+ * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
+ * (5500-5700MHz)
+ */
+#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */
+#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */
+
+/*
+ * 5.7 GHz channels 145, 149, 153, 157, 161, 165
+ * (5725-5825MHz)
+ */
+#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */
+#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */
+
+/*
+ * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
+ *
+ * The channel listed is the center of the lower 20 MHz half of the channel.
+ * The overall center frequency is actually 2 channels (10 MHz) above that,
+ * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away
+ * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5,
+ * and the overall HT40 channel width centers on channel 3.
+ *
+ * NOTE: The RXON command uses 20 MHz channel numbers to specify the
+ * control channel to which to tune. RXON also specifies whether the
+ * control channel is the upper or lower half of a HT40 channel.
+ *
+ * NOTE: 4965 does not support HT40 channels on 2.4 GHz.
+ */
+#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0) /* 14 bytes */
+
+/*
+ * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64),
+ * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
+ */
+#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8) /* 22 bytes */
+
+#define EEPROM_REGULATORY_BAND_NO_HT40 (0)
+
+struct iwl_eeprom_ops {
+ const u32 regulatory_bands[7];
+ int (*acquire_semaphore) (struct iwl_priv *priv);
+ void (*release_semaphore) (struct iwl_priv *priv);
+};
+
+
+int iwl_legacy_eeprom_init(struct iwl_priv *priv);
+void iwl_legacy_eeprom_free(struct iwl_priv *priv);
+const u8 *iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv,
+ size_t offset);
+u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset);
+int iwl_legacy_init_channel_map(struct iwl_priv *priv);
+void iwl_legacy_free_channel_map(struct iwl_priv *priv);
+const struct iwl_channel_info *iwl_legacy_get_channel_info(
+ const struct iwl_priv *priv,
+ enum ieee80211_band band, u16 channel);
+
+#endif /* __iwl_legacy_eeprom_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-fh.h b/drivers/net/wireless/iwlegacy/iwl-fh.h
new file mode 100644
index 000000000000..4e20c7e5c883
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-fh.h
@@ -0,0 +1,513 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_legacy_fh_h__
+#define __iwl_legacy_fh_h__
+
+/****************************/
+/* Flow Handler Definitions */
+/****************************/
+
+/**
+ * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
+ * Addresses are offsets from device's PCI hardware base address.
+ */
+#define FH_MEM_LOWER_BOUND (0x1000)
+#define FH_MEM_UPPER_BOUND (0x2000)
+
+/**
+ * Keep-Warm (KW) buffer base address.
+ *
+ * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
+ * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
+ * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host
+ * from going into a power-savings mode that would cause higher DRAM latency,
+ * and possible data over/under-runs, before all Tx/Rx is complete.
+ *
+ * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4)
+ * of the buffer, which must be 4K aligned. Once this is set up, the 4965
+ * automatically invokes keep-warm accesses when normal accesses might not
+ * be sufficient to maintain fast DRAM response.
+ *
+ * Bit fields:
+ * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned
+ */
+#define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C)
+
+
+/**
+ * TFD Circular Buffers Base (CBBC) addresses
+ *
+ * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
+ * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
+ * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04
+ * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
+ * aligned (address bits 0-7 must be 0).
+ *
+ * Bit fields in each pointer register:
+ * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
+ */
+#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
+#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
+
+/* Find TFD CB base pointer for given queue (range 0-15). */
+#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
+
+
+/**
+ * Rx SRAM Control and Status Registers (RSCSR)
+ *
+ * These registers provide handshake between driver and 4965 for the Rx queue
+ * (this queue handles *all* command responses, notifications, Rx data, etc.
+ * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx
+ * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can
+ * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
+ * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
+ * mapping between RBDs and RBs.
+ *
+ * Driver must allocate host DRAM memory for the following, and set the
+ * physical address of each into 4965 registers:
+ *
+ * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
+ * entries (although any power of 2, up to 4096, is selectable by driver).
+ * Each entry (1 dword) points to a receive buffer (RB) of consistent size
+ * (typically 4K, although 8K or 16K are also selectable by driver).
+ * Driver sets up RB size and number of RBDs in the CB via Rx config
+ * register FH_MEM_RCSR_CHNL0_CONFIG_REG.
+ *
+ * Bit fields within one RBD:
+ * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned
+ *
+ * Driver sets physical address [35:8] of base of RBD circular buffer
+ * into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
+ *
+ * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
+ * (RBs) have been filled, via a "write pointer", actually the index of
+ * the RB's corresponding RBD within the circular buffer. Driver sets
+ * physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
+ *
+ * Bit fields in lower dword of Rx status buffer (upper dword not used
+ * by driver; see struct iwl4965_shared, val0):
+ * 31-12: Not used by driver
+ * 11- 0: Index of last filled Rx buffer descriptor
+ * (4965 writes, driver reads this value)
+ *
+ * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
+ * enter pointers to these RBs into contiguous RBD circular buffer entries,
+ * and update the 4965's "write" index register,
+ * FH_RSCSR_CHNL0_RBDCB_WPTR_REG.
+ *
+ * This "write" index corresponds to the *next* RBD that the driver will make
+ * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
+ * the circular buffer. This value should initially be 0 (before preparing any
+ * RBs), should be 8 after preparing the first 8 RBs (for example), and must
+ * wrap back to 0 at the end of the circular buffer (but don't wrap before
+ * "read" index has advanced past 1! See below).
+ * NOTE: 4965 EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8.
+ *
+ * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
+ * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
+ * to tell the driver the index of the latest filled RBD. The driver must
+ * read this "read" index from DRAM after receiving an Rx interrupt from 4965.
+ *
+ * The driver must also internally keep track of a third index, which is the
+ * next RBD to process. When receiving an Rx interrupt, driver should process
+ * all filled but unprocessed RBs up to, but not including, the RB
+ * corresponding to the "read" index. For example, if "read" index becomes "1",
+ * driver may process the RB pointed to by RBD 0. Depending on volume of
+ * traffic, there may be many RBs to process.
+ *
+ * If read index == write index, 4965 thinks there is no room to put new data.
+ * Due to this, the maximum number of filled RBs is 255, instead of 256. To
+ * be safe, make sure that there is a gap of at least 2 RBDs between "write"
+ * and "read" indexes; that is, make sure that there are no more than 254
+ * buffers waiting to be filled.
+ */
+#define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0)
+#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
+#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND)
+
+/**
+ * Physical base address of 8-byte Rx Status buffer.
+ * Bit fields:
+ * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
+ */
+#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0)
+
+/**
+ * Physical base address of Rx Buffer Descriptor Circular Buffer.
+ * Bit fields:
+ * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
+ */
+#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004)
+
+/**
+ * Rx write pointer (index, really!).
+ * Bit fields:
+ * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
+ * NOTE: For 256-entry circular buffer, use only bits [7:0].
+ */
+#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008)
+#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG)
+
+
+/**
+ * Rx Config/Status Registers (RCSR)
+ * Rx Config Reg for channel 0 (only channel used)
+ *
+ * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for
+ * normal operation (see bit fields).
+ *
+ * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
+ * Driver should poll FH_MEM_RSSR_RX_STATUS_REG for
+ * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
+ *
+ * Bit fields:
+ * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
+ * '10' operate normally
+ * 29-24: reserved
+ * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
+ * min "5" for 32 RBDs, max "12" for 4096 RBDs.
+ * 19-18: reserved
+ * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
+ * '10' 12K, '11' 16K.
+ * 15-14: reserved
+ * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
+ * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
+ * typical value 0x10 (about 1/2 msec)
+ * 3- 0: reserved
+ */
+#define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
+#define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0)
+#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND)
+
+#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0)
+
+#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
+#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */
+#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
+#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */
+#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
+#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/
+
+#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20)
+#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4)
+#define RX_RB_TIMEOUT (0x10)
+
+#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
+#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
+#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000)
+
+#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
+#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000)
+#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000)
+#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000)
+
+#define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004)
+#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
+#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
+
+#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */
+
+/**
+ * Rx Shared Status Registers (RSSR)
+ *
+ * After stopping Rx DMA channel (writing 0 to
+ * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
+ * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
+ *
+ * Bit fields:
+ * 24: 1 = Channel 0 is idle
+ *
+ * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
+ * contain default values that should not be altered by the driver.
+ */
+#define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40)
+#define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
+
+#define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND)
+#define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004)
+#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
+ (FH_MEM_RSSR_LOWER_BOUND + 0x008)
+
+#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
+
+#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
+
+/* TFDB Area - TFDs buffer table */
+#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
+#define FH_TFDIB_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x900)
+#define FH_TFDIB_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x958)
+#define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
+#define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
+
+/**
+ * Transmit DMA Channel Control/Status Registers (TCSR)
+ *
+ * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
+ * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
+ * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
+ *
+ * To use a Tx DMA channel, driver must initialize its
+ * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
+ *
+ * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
+ *
+ * All other bits should be 0.
+ *
+ * Bit fields:
+ * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
+ * '10' operate normally
+ * 29- 4: Reserved, set to "0"
+ * 3: Enable internal DMA requests (1, normal operation), disable (0)
+ * 2- 0: Reserved, set to "0"
+ */
+#define FH_TCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
+#define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60)
+
+/* Find Control/Status reg for given Tx DMA/FIFO channel */
+#define FH49_TCSR_CHNL_NUM (7)
+#define FH50_TCSR_CHNL_NUM (8)
+
+/* TCSR: tx_config register values */
+#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
+ (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl))
+#define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl) \
+ (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
+#define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \
+ (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
+
+#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001)
+
+#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008)
+
+#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
+
+#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000)
+
+#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
+
+#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000)
+#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000)
+#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003)
+
+#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
+#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
+
+/**
+ * Tx Shared Status Registers (TSSR)
+ *
+ * After stopping Tx DMA channel (writing 0 to
+ * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
+ * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle
+ * (channel's buffers empty | no pending requests).
+ *
+ * Bit fields:
+ * 31-24: 1 = Channel buffers empty (channel 7:0)
+ * 23-16: 1 = No pending requests (channel 7:0)
+ */
+#define FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0)
+#define FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0)
+
+#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010)
+
+/**
+ * Bit fields for TSSR(Tx Shared Status & Control) error status register:
+ * 31: Indicates an address error when accessed to internal memory
+ * uCode/driver must write "1" in order to clear this flag
+ * 30: Indicates that Host did not send the expected number of dwords to FH
+ * uCode/driver must write "1" in order to clear this flag
+ * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
+ * command was received from the scheduler while the TRB was already full
+ * with previous command
+ * uCode/driver must write "1" in order to clear this flag
+ * 7-0: Each status bit indicates a channel's TxCredit error. When an error
+ * bit is set, it indicates that the FH has received a full indication
+ * from the RTC TxFIFO and the current value of the TxCredit counter was
+ * not equal to zero. This mean that the credit mechanism was not
+ * synchronized to the TxFIFO status
+ * uCode/driver must write "1" in order to clear this flag
+ */
+#define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018)
+
+#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
+
+/* Tx service channels */
+#define FH_SRVC_CHNL (9)
+#define FH_SRVC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9C8)
+#define FH_SRVC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
+#define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
+ (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
+
+#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98)
+/* Instruct FH to increment the retry count of a packet when
+ * it is brought from the memory to TX-FIFO
+ */
+#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002)
+
+#define RX_QUEUE_SIZE 256
+#define RX_QUEUE_MASK 255
+#define RX_QUEUE_SIZE_LOG 8
+
+/*
+ * RX related structures and functions
+ */
+#define RX_FREE_BUFFERS 64
+#define RX_LOW_WATERMARK 8
+
+/* Size of one Rx buffer in host DRAM */
+#define IWL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */
+#define IWL_RX_BUF_SIZE_4K (4 * 1024)
+#define IWL_RX_BUF_SIZE_8K (8 * 1024)
+
+/**
+ * struct iwl_rb_status - reseve buffer status
+ * host memory mapped FH registers
+ * @closed_rb_num [0:11] - Indicates the index of the RB which was closed
+ * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
+ * @finished_rb_num [0:11] - Indicates the index of the current RB
+ * in which the last frame was written to
+ * @finished_fr_num [0:11] - Indicates the index of the RX Frame
+ * which was transfered
+ */
+struct iwl_rb_status {
+ __le16 closed_rb_num;
+ __le16 closed_fr_num;
+ __le16 finished_rb_num;
+ __le16 finished_fr_nam;
+ __le32 __unused; /* 3945 only */
+} __packed;
+
+
+#define TFD_QUEUE_SIZE_MAX (256)
+#define TFD_QUEUE_SIZE_BC_DUP (64)
+#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
+#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
+#define IWL_NUM_OF_TBS 20
+
+static inline u8 iwl_legacy_get_dma_hi_addr(dma_addr_t addr)
+{
+ return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
+}
+/**
+ * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
+ *
+ * This structure contains dma address and length of transmission address
+ *
+ * @lo: low [31:0] portion of the dma address of TX buffer
+ * every even is unaligned on 16 bit boundary
+ * @hi_n_len 0-3 [35:32] portion of dma
+ * 4-15 length of the tx buffer
+ */
+struct iwl_tfd_tb {
+ __le32 lo;
+ __le16 hi_n_len;
+} __packed;
+
+/**
+ * struct iwl_tfd
+ *
+ * Transmit Frame Descriptor (TFD)
+ *
+ * @ __reserved1[3] reserved
+ * @ num_tbs 0-4 number of active tbs
+ * 5 reserved
+ * 6-7 padding (not used)
+ * @ tbs[20] transmit frame buffer descriptors
+ * @ __pad padding
+ *
+ * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
+ * Both driver and device share these circular buffers, each of which must be
+ * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
+ *
+ * Driver must indicate the physical address of the base of each
+ * circular buffer via the FH_MEM_CBBC_QUEUE registers.
+ *
+ * Each TFD contains pointer/size information for up to 20 data buffers
+ * in host DRAM. These buffers collectively contain the (one) frame described
+ * by the TFD. Each buffer must be a single contiguous block of memory within
+ * itself, but buffers may be scattered in host DRAM. Each buffer has max size
+ * of (4K - 4). The concatenates all of a TFD's buffers into a single
+ * Tx frame, up to 8 KBytes in size.
+ *
+ * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
+ */
+struct iwl_tfd {
+ u8 __reserved1[3];
+ u8 num_tbs;
+ struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS];
+ __le32 __pad;
+} __packed;
+
+/* Keep Warm Size */
+#define IWL_KW_SIZE 0x1000 /* 4k */
+
+#endif /* !__iwl_legacy_fh_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-hcmd.c b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
new file mode 100644
index 000000000000..9d721cbda5bb
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
@@ -0,0 +1,271 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <net/mac80211.h>
+
+#include "iwl-dev.h"
+#include "iwl-debug.h"
+#include "iwl-eeprom.h"
+#include "iwl-core.h"
+
+
+const char *iwl_legacy_get_cmd_string(u8 cmd)
+{
+ switch (cmd) {
+ IWL_CMD(REPLY_ALIVE);
+ IWL_CMD(REPLY_ERROR);
+ IWL_CMD(REPLY_RXON);
+ IWL_CMD(REPLY_RXON_ASSOC);
+ IWL_CMD(REPLY_QOS_PARAM);
+ IWL_CMD(REPLY_RXON_TIMING);
+ IWL_CMD(REPLY_ADD_STA);
+ IWL_CMD(REPLY_REMOVE_STA);
+ IWL_CMD(REPLY_WEPKEY);
+ IWL_CMD(REPLY_3945_RX);
+ IWL_CMD(REPLY_TX);
+ IWL_CMD(REPLY_RATE_SCALE);
+ IWL_CMD(REPLY_LEDS_CMD);
+ IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
+ IWL_CMD(REPLY_CHANNEL_SWITCH);
+ IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
+ IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
+ IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
+ IWL_CMD(POWER_TABLE_CMD);
+ IWL_CMD(PM_SLEEP_NOTIFICATION);
+ IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
+ IWL_CMD(REPLY_SCAN_CMD);
+ IWL_CMD(REPLY_SCAN_ABORT_CMD);
+ IWL_CMD(SCAN_START_NOTIFICATION);
+ IWL_CMD(SCAN_RESULTS_NOTIFICATION);
+ IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
+ IWL_CMD(BEACON_NOTIFICATION);
+ IWL_CMD(REPLY_TX_BEACON);
+ IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
+ IWL_CMD(REPLY_BT_CONFIG);
+ IWL_CMD(REPLY_STATISTICS_CMD);
+ IWL_CMD(STATISTICS_NOTIFICATION);
+ IWL_CMD(CARD_STATE_NOTIFICATION);
+ IWL_CMD(MISSED_BEACONS_NOTIFICATION);
+ IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
+ IWL_CMD(SENSITIVITY_CMD);
+ IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
+ IWL_CMD(REPLY_RX_PHY_CMD);
+ IWL_CMD(REPLY_RX_MPDU_CMD);
+ IWL_CMD(REPLY_RX);
+ IWL_CMD(REPLY_COMPRESSED_BA);
+ default:
+ return "UNKNOWN";
+
+ }
+}
+EXPORT_SYMBOL(iwl_legacy_get_cmd_string);
+
+#define HOST_COMPLETE_TIMEOUT (HZ / 2)
+
+static void iwl_legacy_generic_cmd_callback(struct iwl_priv *priv,
+ struct iwl_device_cmd *cmd,
+ struct iwl_rx_packet *pkt)
+{
+ if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+ IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
+ iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
+ return;
+ }
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ switch (cmd->hdr.cmd) {
+ case REPLY_TX_LINK_QUALITY_CMD:
+ case SENSITIVITY_CMD:
+ IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n",
+ iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
+ break;
+ default:
+ IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n",
+ iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
+ }
+#endif
+}
+
+static int
+iwl_legacy_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
+{
+ int ret;
+
+ BUG_ON(!(cmd->flags & CMD_ASYNC));
+
+ /* An asynchronous command can not expect an SKB to be set. */
+ BUG_ON(cmd->flags & CMD_WANT_SKB);
+
+ /* Assign a generic callback if one is not provided */
+ if (!cmd->callback)
+ cmd->callback = iwl_legacy_generic_cmd_callback;
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return -EBUSY;
+
+ ret = iwl_legacy_enqueue_hcmd(priv, cmd);
+ if (ret < 0) {
+ IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
+ iwl_legacy_get_cmd_string(cmd->id), ret);
+ return ret;
+ }
+ return 0;
+}
+
+int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
+{
+ int cmd_idx;
+ int ret;
+
+ BUG_ON(cmd->flags & CMD_ASYNC);
+
+ /* A synchronous command can not have a callback set. */
+ BUG_ON(cmd->callback);
+
+ IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
+ iwl_legacy_get_cmd_string(cmd->id));
+ mutex_lock(&priv->sync_cmd_mutex);
+
+ set_bit(STATUS_HCMD_ACTIVE, &priv->status);
+ IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
+ iwl_legacy_get_cmd_string(cmd->id));
+
+ cmd_idx = iwl_legacy_enqueue_hcmd(priv, cmd);
+ if (cmd_idx < 0) {
+ ret = cmd_idx;
+ IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
+ iwl_legacy_get_cmd_string(cmd->id), ret);
+ goto out;
+ }
+
+ ret = wait_event_interruptible_timeout(priv->wait_command_queue,
+ !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
+ HOST_COMPLETE_TIMEOUT);
+ if (!ret) {
+ if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
+ IWL_ERR(priv,
+ "Error sending %s: time out after %dms.\n",
+ iwl_legacy_get_cmd_string(cmd->id),
+ jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
+
+ clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
+ IWL_DEBUG_INFO(priv,
+ "Clearing HCMD_ACTIVE for command %s\n",
+ iwl_legacy_get_cmd_string(cmd->id));
+ ret = -ETIMEDOUT;
+ goto cancel;
+ }
+ }
+
+ if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
+ IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n",
+ iwl_legacy_get_cmd_string(cmd->id));
+ ret = -ECANCELED;
+ goto fail;
+ }
+ if (test_bit(STATUS_FW_ERROR, &priv->status)) {
+ IWL_ERR(priv, "Command %s failed: FW Error\n",
+ iwl_legacy_get_cmd_string(cmd->id));
+ ret = -EIO;
+ goto fail;
+ }
+ if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
+ IWL_ERR(priv, "Error: Response NULL in '%s'\n",
+ iwl_legacy_get_cmd_string(cmd->id));
+ ret = -EIO;
+ goto cancel;
+ }
+
+ ret = 0;
+ goto out;
+
+cancel:
+ if (cmd->flags & CMD_WANT_SKB) {
+ /*
+ * Cancel the CMD_WANT_SKB flag for the cmd in the
+ * TX cmd queue. Otherwise in case the cmd comes
+ * in later, it will possibly set an invalid
+ * address (cmd->meta.source).
+ */
+ priv->txq[priv->cmd_queue].meta[cmd_idx].flags &=
+ ~CMD_WANT_SKB;
+ }
+fail:
+ if (cmd->reply_page) {
+ iwl_legacy_free_pages(priv, cmd->reply_page);
+ cmd->reply_page = 0;
+ }
+out:
+ mutex_unlock(&priv->sync_cmd_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_send_cmd_sync);
+
+int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
+{
+ if (cmd->flags & CMD_ASYNC)
+ return iwl_legacy_send_cmd_async(priv, cmd);
+
+ return iwl_legacy_send_cmd_sync(priv, cmd);
+}
+EXPORT_SYMBOL(iwl_legacy_send_cmd);
+
+int
+iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
+{
+ struct iwl_host_cmd cmd = {
+ .id = id,
+ .len = len,
+ .data = data,
+ };
+
+ return iwl_legacy_send_cmd_sync(priv, &cmd);
+}
+EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu);
+
+int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv,
+ u8 id, u16 len, const void *data,
+ void (*callback)(struct iwl_priv *priv,
+ struct iwl_device_cmd *cmd,
+ struct iwl_rx_packet *pkt))
+{
+ struct iwl_host_cmd cmd = {
+ .id = id,
+ .len = len,
+ .data = data,
+ };
+
+ cmd.flags |= CMD_ASYNC;
+ cmd.callback = callback;
+
+ return iwl_legacy_send_cmd_async(priv, &cmd);
+}
+EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu_async);
diff --git a/drivers/net/wireless/iwlegacy/iwl-helpers.h b/drivers/net/wireless/iwlegacy/iwl-helpers.h
new file mode 100644
index 000000000000..02132e755831
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-helpers.h
@@ -0,0 +1,181 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_helpers_h__
+#define __iwl_legacy_helpers_h__
+
+#include <linux/ctype.h>
+#include <net/mac80211.h>
+
+#include "iwl-io.h"
+
+#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
+
+
+static inline struct ieee80211_conf *iwl_legacy_ieee80211_get_hw_conf(
+ struct ieee80211_hw *hw)
+{
+ return &hw->conf;
+}
+
+/**
+ * iwl_legacy_queue_inc_wrap - increment queue index, wrap back to beginning
+ * @index -- current index
+ * @n_bd -- total number of entries in queue (must be power of 2)
+ */
+static inline int iwl_legacy_queue_inc_wrap(int index, int n_bd)
+{
+ return ++index & (n_bd - 1);
+}
+
+/**
+ * iwl_legacy_queue_dec_wrap - decrement queue index, wrap back to end
+ * @index -- current index
+ * @n_bd -- total number of entries in queue (must be power of 2)
+ */
+static inline int iwl_legacy_queue_dec_wrap(int index, int n_bd)
+{
+ return --index & (n_bd - 1);
+}
+
+/* TODO: Move fw_desc functions to iwl-pci.ko */
+static inline void iwl_legacy_free_fw_desc(struct pci_dev *pci_dev,
+ struct fw_desc *desc)
+{
+ if (desc->v_addr)
+ dma_free_coherent(&pci_dev->dev, desc->len,
+ desc->v_addr, desc->p_addr);
+ desc->v_addr = NULL;
+ desc->len = 0;
+}
+
+static inline int iwl_legacy_alloc_fw_desc(struct pci_dev *pci_dev,
+ struct fw_desc *desc)
+{
+ if (!desc->len) {
+ desc->v_addr = NULL;
+ return -EINVAL;
+ }
+
+ desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
+ &desc->p_addr, GFP_KERNEL);
+ return (desc->v_addr != NULL) ? 0 : -ENOMEM;
+}
+
+/*
+ * we have 8 bits used like this:
+ *
+ * 7 6 5 4 3 2 1 0
+ * | | | | | | | |
+ * | | | | | | +-+-------- AC queue (0-3)
+ * | | | | | |
+ * | +-+-+-+-+------------ HW queue ID
+ * |
+ * +---------------------- unused
+ */
+static inline void
+iwl_legacy_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
+{
+ BUG_ON(ac > 3); /* only have 2 bits */
+ BUG_ON(hwq > 31); /* only use 5 bits */
+
+ txq->swq_id = (hwq << 2) | ac;
+}
+
+static inline void iwl_legacy_wake_queue(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq)
+{
+ u8 queue = txq->swq_id;
+ u8 ac = queue & 3;
+ u8 hwq = (queue >> 2) & 0x1f;
+
+ if (test_and_clear_bit(hwq, priv->queue_stopped))
+ if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0)
+ ieee80211_wake_queue(priv->hw, ac);
+}
+
+static inline void iwl_legacy_stop_queue(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq)
+{
+ u8 queue = txq->swq_id;
+ u8 ac = queue & 3;
+ u8 hwq = (queue >> 2) & 0x1f;
+
+ if (!test_and_set_bit(hwq, priv->queue_stopped))
+ if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0)
+ ieee80211_stop_queue(priv->hw, ac);
+}
+
+#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
+#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
+
+static inline void iwl_legacy_disable_interrupts(struct iwl_priv *priv)
+{
+ clear_bit(STATUS_INT_ENABLED, &priv->status);
+
+ /* disable interrupts from uCode/NIC to host */
+ iwl_write32(priv, CSR_INT_MASK, 0x00000000);
+
+ /* acknowledge/clear/reset any interrupts still pending
+ * from uCode or flow handler (Rx/Tx DMA) */
+ iwl_write32(priv, CSR_INT, 0xffffffff);
+ iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
+ IWL_DEBUG_ISR(priv, "Disabled interrupts\n");
+}
+
+static inline void iwl_legacy_enable_interrupts(struct iwl_priv *priv)
+{
+ IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
+ set_bit(STATUS_INT_ENABLED, &priv->status);
+ iwl_write32(priv, CSR_INT_MASK, priv->inta_mask);
+}
+
+/**
+ * iwl_legacy_beacon_time_mask_low - mask of lower 32 bit of beacon time
+ * @priv -- pointer to iwl_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32 iwl_legacy_beacon_time_mask_low(struct iwl_priv *priv,
+ u16 tsf_bits)
+{
+ return (1 << tsf_bits) - 1;
+}
+
+/**
+ * iwl_legacy_beacon_time_mask_high - mask of higher 32 bit of beacon time
+ * @priv -- pointer to iwl_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32 iwl_legacy_beacon_time_mask_high(struct iwl_priv *priv,
+ u16 tsf_bits)
+{
+ return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
+}
+
+#endif /* __iwl_legacy_helpers_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-io.h b/drivers/net/wireless/iwlegacy/iwl-io.h
new file mode 100644
index 000000000000..5cc5d342914f
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-io.h
@@ -0,0 +1,545 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_io_h__
+#define __iwl_legacy_io_h__
+
+#include <linux/io.h>
+
+#include "iwl-dev.h"
+#include "iwl-debug.h"
+#include "iwl-devtrace.h"
+
+/*
+ * IO, register, and NIC memory access functions
+ *
+ * NOTE on naming convention and macro usage for these
+ *
+ * A single _ prefix before a an access function means that no state
+ * check or debug information is printed when that function is called.
+ *
+ * A double __ prefix before an access function means that state is checked
+ * and the current line number and caller function name are printed in addition
+ * to any other debug output.
+ *
+ * The non-prefixed name is the #define that maps the caller into a
+ * #define that provides the caller's name and __LINE__ to the double
+ * prefix version.
+ *
+ * If you wish to call the function without any debug or state checking,
+ * you should use the single _ prefix version (as is used by dependent IO
+ * routines, for example _iwl_legacy_read_direct32 calls the non-check version of
+ * _iwl_legacy_read32.)
+ *
+ * These declarations are *extremely* useful in quickly isolating code deltas
+ * which result in misconfiguration of the hardware I/O. In combination with
+ * git-bisect and the IO debug level you can quickly determine the specific
+ * commit which breaks the IO sequence to the hardware.
+ *
+ */
+
+static inline void _iwl_legacy_write8(struct iwl_priv *priv, u32 ofs, u8 val)
+{
+ trace_iwlwifi_legacy_dev_iowrite8(priv, ofs, val);
+ iowrite8(val, priv->hw_base + ofs);
+}
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline void
+__iwl_legacy_write8(const char *f, u32 l, struct iwl_priv *priv,
+ u32 ofs, u8 val)
+{
+ IWL_DEBUG_IO(priv, "write8(0x%08X, 0x%02X) - %s %d\n", ofs, val, f, l);
+ _iwl_legacy_write8(priv, ofs, val);
+}
+#define iwl_write8(priv, ofs, val) \
+ __iwl_legacy_write8(__FILE__, __LINE__, priv, ofs, val)
+#else
+#define iwl_write8(priv, ofs, val) _iwl_legacy_write8(priv, ofs, val)
+#endif
+
+
+static inline void _iwl_legacy_write32(struct iwl_priv *priv, u32 ofs, u32 val)
+{
+ trace_iwlwifi_legacy_dev_iowrite32(priv, ofs, val);
+ iowrite32(val, priv->hw_base + ofs);
+}
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline void
+__iwl_legacy_write32(const char *f, u32 l, struct iwl_priv *priv,
+ u32 ofs, u32 val)
+{
+ IWL_DEBUG_IO(priv, "write32(0x%08X, 0x%08X) - %s %d\n", ofs, val, f, l);
+ _iwl_legacy_write32(priv, ofs, val);
+}
+#define iwl_write32(priv, ofs, val) \
+ __iwl_legacy_write32(__FILE__, __LINE__, priv, ofs, val)
+#else
+#define iwl_write32(priv, ofs, val) _iwl_legacy_write32(priv, ofs, val)
+#endif
+
+static inline u32 _iwl_legacy_read32(struct iwl_priv *priv, u32 ofs)
+{
+ u32 val = ioread32(priv->hw_base + ofs);
+ trace_iwlwifi_legacy_dev_ioread32(priv, ofs, val);
+ return val;
+}
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline u32
+__iwl_legacy_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs)
+{
+ IWL_DEBUG_IO(priv, "read_direct32(0x%08X) - %s %d\n", ofs, f, l);
+ return _iwl_legacy_read32(priv, ofs);
+}
+#define iwl_read32(priv, ofs) __iwl_legacy_read32(__FILE__, __LINE__, priv, ofs)
+#else
+#define iwl_read32(p, o) _iwl_legacy_read32(p, o)
+#endif
+
+#define IWL_POLL_INTERVAL 10 /* microseconds */
+static inline int
+_iwl_legacy_poll_bit(struct iwl_priv *priv, u32 addr,
+ u32 bits, u32 mask, int timeout)
+{
+ int t = 0;
+
+ do {
+ if ((_iwl_legacy_read32(priv, addr) & mask) == (bits & mask))
+ return t;
+ udelay(IWL_POLL_INTERVAL);
+ t += IWL_POLL_INTERVAL;
+ } while (t < timeout);
+
+ return -ETIMEDOUT;
+}
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline int __iwl_legacy_poll_bit(const char *f, u32 l,
+ struct iwl_priv *priv, u32 addr,
+ u32 bits, u32 mask, int timeout)
+{
+ int ret = _iwl_legacy_poll_bit(priv, addr, bits, mask, timeout);
+ IWL_DEBUG_IO(priv, "poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n",
+ addr, bits, mask,
+ unlikely(ret == -ETIMEDOUT) ? "timeout" : "", f, l);
+ return ret;
+}
+#define iwl_poll_bit(priv, addr, bits, mask, timeout) \
+ __iwl_legacy_poll_bit(__FILE__, __LINE__, priv, addr, \
+ bits, mask, timeout)
+#else
+#define iwl_poll_bit(p, a, b, m, t) _iwl_legacy_poll_bit(p, a, b, m, t)
+#endif
+
+static inline void _iwl_legacy_set_bit(struct iwl_priv *priv, u32 reg, u32 mask)
+{
+ _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) | mask);
+}
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline void __iwl_legacy_set_bit(const char *f, u32 l,
+ struct iwl_priv *priv, u32 reg, u32 mask)
+{
+ u32 val = _iwl_legacy_read32(priv, reg) | mask;
+ IWL_DEBUG_IO(priv, "set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg,
+ mask, val);
+ _iwl_legacy_write32(priv, reg, val);
+}
+static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&p->reg_lock, reg_flags);
+ __iwl_legacy_set_bit(__FILE__, __LINE__, p, r, m);
+ spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
+#else
+static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&p->reg_lock, reg_flags);
+ _iwl_legacy_set_bit(p, r, m);
+ spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
+#endif
+
+static inline void
+_iwl_legacy_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask)
+{
+ _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) & ~mask);
+}
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline void
+__iwl_legacy_clear_bit(const char *f, u32 l,
+ struct iwl_priv *priv, u32 reg, u32 mask)
+{
+ u32 val = _iwl_legacy_read32(priv, reg) & ~mask;
+ IWL_DEBUG_IO(priv, "clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val);
+ _iwl_legacy_write32(priv, reg, val);
+}
+static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&p->reg_lock, reg_flags);
+ __iwl_legacy_clear_bit(__FILE__, __LINE__, p, r, m);
+ spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
+#else
+static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&p->reg_lock, reg_flags);
+ _iwl_legacy_clear_bit(p, r, m);
+ spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
+#endif
+
+static inline int _iwl_legacy_grab_nic_access(struct iwl_priv *priv)
+{
+ int ret;
+ u32 val;
+
+ /* this bit wakes up the NIC */
+ _iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+ /*
+ * These bits say the device is running, and should keep running for
+ * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
+ * but they do not indicate that embedded SRAM is restored yet;
+ * 3945 and 4965 have volatile SRAM, and must save/restore contents
+ * to/from host DRAM when sleeping/waking for power-saving.
+ * Each direction takes approximately 1/4 millisecond; with this
+ * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
+ * series of register accesses are expected (e.g. reading Event Log),
+ * to keep device from sleeping.
+ *
+ * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
+ * SRAM is okay/restored. We don't check that here because this call
+ * is just for hardware register access; but GP1 MAC_SLEEP check is a
+ * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
+ *
+ */
+ ret = _iwl_legacy_poll_bit(priv, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+ (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
+ CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
+ if (ret < 0) {
+ val = _iwl_legacy_read32(priv, CSR_GP_CNTRL);
+ IWL_ERR(priv,
+ "MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
+ _iwl_legacy_write32(priv, CSR_RESET,
+ CSR_RESET_REG_FLAG_FORCE_NMI);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline int __iwl_legacy_grab_nic_access(const char *f, u32 l,
+ struct iwl_priv *priv)
+{
+ IWL_DEBUG_IO(priv, "grabbing nic access - %s %d\n", f, l);
+ return _iwl_legacy_grab_nic_access(priv);
+}
+#define iwl_grab_nic_access(priv) \
+ __iwl_legacy_grab_nic_access(__FILE__, __LINE__, priv)
+#else
+#define iwl_grab_nic_access(priv) \
+ _iwl_legacy_grab_nic_access(priv)
+#endif
+
+static inline void _iwl_legacy_release_nic_access(struct iwl_priv *priv)
+{
+ _iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+}
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline void __iwl_legacy_release_nic_access(const char *f, u32 l,
+ struct iwl_priv *priv)
+{
+
+ IWL_DEBUG_IO(priv, "releasing nic access - %s %d\n", f, l);
+ _iwl_legacy_release_nic_access(priv);
+}
+#define iwl_release_nic_access(priv) \
+ __iwl_legacy_release_nic_access(__FILE__, __LINE__, priv)
+#else
+#define iwl_release_nic_access(priv) \
+ _iwl_legacy_release_nic_access(priv)
+#endif
+
+static inline u32 _iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
+{
+ return _iwl_legacy_read32(priv, reg);
+}
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline u32 __iwl_legacy_read_direct32(const char *f, u32 l,
+ struct iwl_priv *priv, u32 reg)
+{
+ u32 value = _iwl_legacy_read_direct32(priv, reg);
+ IWL_DEBUG_IO(priv,
+ "read_direct32(0x%4X) = 0x%08x - %s %d\n", reg, value,
+ f, l);
+ return value;
+}
+static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
+{
+ u32 value;
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ iwl_grab_nic_access(priv);
+ value = __iwl_legacy_read_direct32(__FILE__, __LINE__, priv, reg);
+ iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+ return value;
+}
+
+#else
+static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
+{
+ u32 value;
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ iwl_grab_nic_access(priv);
+ value = _iwl_legacy_read_direct32(priv, reg);
+ iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+ return value;
+
+}
+#endif
+
+static inline void _iwl_legacy_write_direct32(struct iwl_priv *priv,
+ u32 reg, u32 value)
+{
+ _iwl_legacy_write32(priv, reg, value);
+}
+static inline void
+iwl_legacy_write_direct32(struct iwl_priv *priv, u32 reg, u32 value)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ if (!iwl_grab_nic_access(priv)) {
+ _iwl_legacy_write_direct32(priv, reg, value);
+ iwl_release_nic_access(priv);
+ }
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+
+static inline void iwl_legacy_write_reg_buf(struct iwl_priv *priv,
+ u32 reg, u32 len, u32 *values)
+{
+ u32 count = sizeof(u32);
+
+ if ((priv != NULL) && (values != NULL)) {
+ for (; 0 < len; len -= count, reg += count, values++)
+ iwl_legacy_write_direct32(priv, reg, *values);
+ }
+}
+
+static inline int _iwl_legacy_poll_direct_bit(struct iwl_priv *priv, u32 addr,
+ u32 mask, int timeout)
+{
+ int t = 0;
+
+ do {
+ if ((iwl_legacy_read_direct32(priv, addr) & mask) == mask)
+ return t;
+ udelay(IWL_POLL_INTERVAL);
+ t += IWL_POLL_INTERVAL;
+ } while (t < timeout);
+
+ return -ETIMEDOUT;
+}
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline int __iwl_legacy_poll_direct_bit(const char *f, u32 l,
+ struct iwl_priv *priv,
+ u32 addr, u32 mask, int timeout)
+{
+ int ret = _iwl_legacy_poll_direct_bit(priv, addr, mask, timeout);
+
+ if (unlikely(ret == -ETIMEDOUT))
+ IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) - "
+ "timedout - %s %d\n", addr, mask, f, l);
+ else
+ IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) = 0x%08X "
+ "- %s %d\n", addr, mask, ret, f, l);
+ return ret;
+}
+#define iwl_poll_direct_bit(priv, addr, mask, timeout) \
+__iwl_legacy_poll_direct_bit(__FILE__, __LINE__, priv, addr, mask, timeout)
+#else
+#define iwl_poll_direct_bit _iwl_legacy_poll_direct_bit
+#endif
+
+static inline u32 _iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
+{
+ _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
+ rmb();
+ return _iwl_legacy_read_direct32(priv, HBUS_TARG_PRPH_RDAT);
+}
+static inline u32 iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
+{
+ unsigned long reg_flags;
+ u32 val;
+
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ iwl_grab_nic_access(priv);
+ val = _iwl_legacy_read_prph(priv, reg);
+ iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+ return val;
+}
+
+static inline void _iwl_legacy_write_prph(struct iwl_priv *priv,
+ u32 addr, u32 val)
+{
+ _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WADDR,
+ ((addr & 0x0000FFFF) | (3 << 24)));
+ wmb();
+ _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WDAT, val);
+}
+
+static inline void
+iwl_legacy_write_prph(struct iwl_priv *priv, u32 addr, u32 val)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ if (!iwl_grab_nic_access(priv)) {
+ _iwl_legacy_write_prph(priv, addr, val);
+ iwl_release_nic_access(priv);
+ }
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+
+#define _iwl_legacy_set_bits_prph(priv, reg, mask) \
+_iwl_legacy_write_prph(priv, reg, (_iwl_legacy_read_prph(priv, reg) | mask))
+
+static inline void
+iwl_legacy_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ iwl_grab_nic_access(priv);
+ _iwl_legacy_set_bits_prph(priv, reg, mask);
+ iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+
+#define _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask) \
+_iwl_legacy_write_prph(priv, reg, \
+ ((_iwl_legacy_read_prph(priv, reg) & mask) | bits))
+
+static inline void iwl_legacy_set_bits_mask_prph(struct iwl_priv *priv, u32 reg,
+ u32 bits, u32 mask)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ iwl_grab_nic_access(priv);
+ _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask);
+ iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+
+static inline void iwl_legacy_clear_bits_prph(struct iwl_priv
+ *priv, u32 reg, u32 mask)
+{
+ unsigned long reg_flags;
+ u32 val;
+
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ iwl_grab_nic_access(priv);
+ val = _iwl_legacy_read_prph(priv, reg);
+ _iwl_legacy_write_prph(priv, reg, (val & ~mask));
+ iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+
+static inline u32 iwl_legacy_read_targ_mem(struct iwl_priv *priv, u32 addr)
+{
+ unsigned long reg_flags;
+ u32 value;
+
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ iwl_grab_nic_access(priv);
+
+ _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr);
+ rmb();
+ value = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+
+ iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+ return value;
+}
+
+static inline void
+iwl_legacy_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ if (!iwl_grab_nic_access(priv)) {
+ _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
+ wmb();
+ _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WDAT, val);
+ iwl_release_nic_access(priv);
+ }
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+
+static inline void
+iwl_legacy_write_targ_mem_buf(struct iwl_priv *priv, u32 addr,
+ u32 len, u32 *values)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ if (!iwl_grab_nic_access(priv)) {
+ _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
+ wmb();
+ for (; 0 < len; len -= sizeof(u32), values++)
+ _iwl_legacy_write_direct32(priv,
+ HBUS_TARG_MEM_WDAT, *values);
+
+ iwl_release_nic_access(priv);
+ }
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.c b/drivers/net/wireless/iwlegacy/iwl-led.c
new file mode 100644
index 000000000000..15eb8b707157
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-led.c
@@ -0,0 +1,188 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+
+/* default: IWL_LED_BLINK(0) using blinking index table */
+static int led_mode;
+module_param(led_mode, int, S_IRUGO);
+MODULE_PARM_DESC(led_mode, "0=system default, "
+ "1=On(RF On)/Off(RF Off), 2=blinking");
+
+static const struct ieee80211_tpt_blink iwl_blink[] = {
+ { .throughput = 0 * 1024 - 1, .blink_time = 334 },
+ { .throughput = 1 * 1024 - 1, .blink_time = 260 },
+ { .throughput = 5 * 1024 - 1, .blink_time = 220 },
+ { .throughput = 10 * 1024 - 1, .blink_time = 190 },
+ { .throughput = 20 * 1024 - 1, .blink_time = 170 },
+ { .throughput = 50 * 1024 - 1, .blink_time = 150 },
+ { .throughput = 70 * 1024 - 1, .blink_time = 130 },
+ { .throughput = 100 * 1024 - 1, .blink_time = 110 },
+ { .throughput = 200 * 1024 - 1, .blink_time = 80 },
+ { .throughput = 300 * 1024 - 1, .blink_time = 50 },
+};
+
+/*
+ * Adjust led blink rate to compensate on a MAC Clock difference on every HW
+ * Led blink rate analysis showed an average deviation of 0% on 3945,
+ * 5% on 4965 HW.
+ * Need to compensate on the led on/off time per HW according to the deviation
+ * to achieve the desired led frequency
+ * The calculation is: (100-averageDeviation)/100 * blinkTime
+ * For code efficiency the calculation will be:
+ * compensation = (100 - averageDeviation) * 64 / 100
+ * NewBlinkTime = (compensation * BlinkTime) / 64
+ */
+static inline u8 iwl_legacy_blink_compensation(struct iwl_priv *priv,
+ u8 time, u16 compensation)
+{
+ if (!compensation) {
+ IWL_ERR(priv, "undefined blink compensation: "
+ "use pre-defined blinking time\n");
+ return time;
+ }
+
+ return (u8)((time * compensation) >> 6);
+}
+
+/* Set led pattern command */
+static int iwl_legacy_led_cmd(struct iwl_priv *priv,
+ unsigned long on,
+ unsigned long off)
+{
+ struct iwl_led_cmd led_cmd = {
+ .id = IWL_LED_LINK,
+ .interval = IWL_DEF_LED_INTRVL
+ };
+ int ret;
+
+ if (!test_bit(STATUS_READY, &priv->status))
+ return -EBUSY;
+
+ if (priv->blink_on == on && priv->blink_off == off)
+ return 0;
+
+ IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
+ priv->cfg->base_params->led_compensation);
+ led_cmd.on = iwl_legacy_blink_compensation(priv, on,
+ priv->cfg->base_params->led_compensation);
+ led_cmd.off = iwl_legacy_blink_compensation(priv, off,
+ priv->cfg->base_params->led_compensation);
+
+ ret = priv->cfg->ops->led->cmd(priv, &led_cmd);
+ if (!ret) {
+ priv->blink_on = on;
+ priv->blink_off = off;
+ }
+ return ret;
+}
+
+static void iwl_legacy_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
+ unsigned long on = 0;
+
+ if (brightness > 0)
+ on = IWL_LED_SOLID;
+
+ iwl_legacy_led_cmd(priv, on, 0);
+}
+
+static int iwl_legacy_led_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
+
+ return iwl_legacy_led_cmd(priv, *delay_on, *delay_off);
+}
+
+void iwl_legacy_leds_init(struct iwl_priv *priv)
+{
+ int mode = led_mode;
+ int ret;
+
+ if (mode == IWL_LED_DEFAULT)
+ mode = priv->cfg->led_mode;
+
+ priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
+ wiphy_name(priv->hw->wiphy));
+ priv->led.brightness_set = iwl_legacy_led_brightness_set;
+ priv->led.blink_set = iwl_legacy_led_blink_set;
+ priv->led.max_brightness = 1;
+
+ switch (mode) {
+ case IWL_LED_DEFAULT:
+ WARN_ON(1);
+ break;
+ case IWL_LED_BLINK:
+ priv->led.default_trigger =
+ ieee80211_create_tpt_led_trigger(priv->hw,
+ IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
+ iwl_blink, ARRAY_SIZE(iwl_blink));
+ break;
+ case IWL_LED_RF_STATE:
+ priv->led.default_trigger =
+ ieee80211_get_radio_led_name(priv->hw);
+ break;
+ }
+
+ ret = led_classdev_register(&priv->pci_dev->dev, &priv->led);
+ if (ret) {
+ kfree(priv->led.name);
+ return;
+ }
+
+ priv->led_registered = true;
+}
+EXPORT_SYMBOL(iwl_legacy_leds_init);
+
+void iwl_legacy_leds_exit(struct iwl_priv *priv)
+{
+ if (!priv->led_registered)
+ return;
+
+ led_classdev_unregister(&priv->led);
+ kfree(priv->led.name);
+}
+EXPORT_SYMBOL(iwl_legacy_leds_exit);
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.h b/drivers/net/wireless/iwlegacy/iwl-led.h
new file mode 100644
index 000000000000..f0791f70f79d
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-led.h
@@ -0,0 +1,56 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_leds_h__
+#define __iwl_legacy_leds_h__
+
+
+struct iwl_priv;
+
+#define IWL_LED_SOLID 11
+#define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
+
+#define IWL_LED_ACTIVITY (0<<1)
+#define IWL_LED_LINK (1<<1)
+
+/*
+ * LED mode
+ * IWL_LED_DEFAULT: use device default
+ * IWL_LED_RF_STATE: turn LED on/off based on RF state
+ * LED ON = RF ON
+ * LED OFF = RF OFF
+ * IWL_LED_BLINK: adjust led blink rate based on blink table
+ */
+enum iwl_led_mode {
+ IWL_LED_DEFAULT,
+ IWL_LED_RF_STATE,
+ IWL_LED_BLINK,
+};
+
+void iwl_legacy_leds_init(struct iwl_priv *priv);
+void iwl_legacy_leds_exit(struct iwl_priv *priv);
+
+#endif /* __iwl_legacy_leds_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h b/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h
new file mode 100644
index 000000000000..38647e481eb0
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h
@@ -0,0 +1,456 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_rs_h__
+#define __iwl_legacy_rs_h__
+
+struct iwl_rate_info {
+ u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
+ u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
+ u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
+ u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
+ u8 prev_ieee; /* previous rate in IEEE speeds */
+ u8 next_ieee; /* next rate in IEEE speeds */
+ u8 prev_rs; /* previous rate used in rs algo */
+ u8 next_rs; /* next rate used in rs algo */
+ u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
+ u8 next_rs_tgg; /* next rate used in TGG rs algo */
+};
+
+struct iwl3945_rate_info {
+ u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
+ u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
+ u8 prev_ieee; /* previous rate in IEEE speeds */
+ u8 next_ieee; /* next rate in IEEE speeds */
+ u8 prev_rs; /* previous rate used in rs algo */
+ u8 next_rs; /* next rate used in rs algo */
+ u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
+ u8 next_rs_tgg; /* next rate used in TGG rs algo */
+ u8 table_rs_index; /* index in rate scale table cmd */
+ u8 prev_table_rs; /* prev in rate table cmd */
+};
+
+
+/*
+ * These serve as indexes into
+ * struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
+ */
+enum {
+ IWL_RATE_1M_INDEX = 0,
+ IWL_RATE_2M_INDEX,
+ IWL_RATE_5M_INDEX,
+ IWL_RATE_11M_INDEX,
+ IWL_RATE_6M_INDEX,
+ IWL_RATE_9M_INDEX,
+ IWL_RATE_12M_INDEX,
+ IWL_RATE_18M_INDEX,
+ IWL_RATE_24M_INDEX,
+ IWL_RATE_36M_INDEX,
+ IWL_RATE_48M_INDEX,
+ IWL_RATE_54M_INDEX,
+ IWL_RATE_60M_INDEX,
+ IWL_RATE_COUNT,
+ IWL_RATE_COUNT_LEGACY = IWL_RATE_COUNT - 1, /* Excluding 60M */
+ IWL_RATE_COUNT_3945 = IWL_RATE_COUNT - 1,
+ IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
+ IWL_RATE_INVALID = IWL_RATE_COUNT,
+};
+
+enum {
+ IWL_RATE_6M_INDEX_TABLE = 0,
+ IWL_RATE_9M_INDEX_TABLE,
+ IWL_RATE_12M_INDEX_TABLE,
+ IWL_RATE_18M_INDEX_TABLE,
+ IWL_RATE_24M_INDEX_TABLE,
+ IWL_RATE_36M_INDEX_TABLE,
+ IWL_RATE_48M_INDEX_TABLE,
+ IWL_RATE_54M_INDEX_TABLE,
+ IWL_RATE_1M_INDEX_TABLE,
+ IWL_RATE_2M_INDEX_TABLE,
+ IWL_RATE_5M_INDEX_TABLE,
+ IWL_RATE_11M_INDEX_TABLE,
+ IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1,
+};
+
+enum {
+ IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
+ IWL39_LAST_OFDM_RATE = IWL_RATE_54M_INDEX,
+ IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
+ IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
+ IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
+};
+
+/* #define vs. enum to keep from defaulting to 'large integer' */
+#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX)
+#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX)
+#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX)
+#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX)
+#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX)
+#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX)
+#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX)
+#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX)
+#define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX)
+#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX)
+#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX)
+#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX)
+#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX)
+
+/* uCode API values for legacy bit rates, both OFDM and CCK */
+enum {
+ IWL_RATE_6M_PLCP = 13,
+ IWL_RATE_9M_PLCP = 15,
+ IWL_RATE_12M_PLCP = 5,
+ IWL_RATE_18M_PLCP = 7,
+ IWL_RATE_24M_PLCP = 9,
+ IWL_RATE_36M_PLCP = 11,
+ IWL_RATE_48M_PLCP = 1,
+ IWL_RATE_54M_PLCP = 3,
+ IWL_RATE_60M_PLCP = 3,/*FIXME:RS:should be removed*/
+ IWL_RATE_1M_PLCP = 10,
+ IWL_RATE_2M_PLCP = 20,
+ IWL_RATE_5M_PLCP = 55,
+ IWL_RATE_11M_PLCP = 110,
+ /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/
+};
+
+/* uCode API values for OFDM high-throughput (HT) bit rates */
+enum {
+ IWL_RATE_SISO_6M_PLCP = 0,
+ IWL_RATE_SISO_12M_PLCP = 1,
+ IWL_RATE_SISO_18M_PLCP = 2,
+ IWL_RATE_SISO_24M_PLCP = 3,
+ IWL_RATE_SISO_36M_PLCP = 4,
+ IWL_RATE_SISO_48M_PLCP = 5,
+ IWL_RATE_SISO_54M_PLCP = 6,
+ IWL_RATE_SISO_60M_PLCP = 7,
+ IWL_RATE_MIMO2_6M_PLCP = 0x8,
+ IWL_RATE_MIMO2_12M_PLCP = 0x9,
+ IWL_RATE_MIMO2_18M_PLCP = 0xa,
+ IWL_RATE_MIMO2_24M_PLCP = 0xb,
+ IWL_RATE_MIMO2_36M_PLCP = 0xc,
+ IWL_RATE_MIMO2_48M_PLCP = 0xd,
+ IWL_RATE_MIMO2_54M_PLCP = 0xe,
+ IWL_RATE_MIMO2_60M_PLCP = 0xf,
+ IWL_RATE_SISO_INVM_PLCP,
+ IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
+};
+
+/* MAC header values for bit rates */
+enum {
+ IWL_RATE_6M_IEEE = 12,
+ IWL_RATE_9M_IEEE = 18,
+ IWL_RATE_12M_IEEE = 24,
+ IWL_RATE_18M_IEEE = 36,
+ IWL_RATE_24M_IEEE = 48,
+ IWL_RATE_36M_IEEE = 72,
+ IWL_RATE_48M_IEEE = 96,
+ IWL_RATE_54M_IEEE = 108,
+ IWL_RATE_60M_IEEE = 120,
+ IWL_RATE_1M_IEEE = 2,
+ IWL_RATE_2M_IEEE = 4,
+ IWL_RATE_5M_IEEE = 11,
+ IWL_RATE_11M_IEEE = 22,
+};
+
+#define IWL_CCK_BASIC_RATES_MASK \
+ (IWL_RATE_1M_MASK | \
+ IWL_RATE_2M_MASK)
+
+#define IWL_CCK_RATES_MASK \
+ (IWL_CCK_BASIC_RATES_MASK | \
+ IWL_RATE_5M_MASK | \
+ IWL_RATE_11M_MASK)
+
+#define IWL_OFDM_BASIC_RATES_MASK \
+ (IWL_RATE_6M_MASK | \
+ IWL_RATE_12M_MASK | \
+ IWL_RATE_24M_MASK)
+
+#define IWL_OFDM_RATES_MASK \
+ (IWL_OFDM_BASIC_RATES_MASK | \
+ IWL_RATE_9M_MASK | \
+ IWL_RATE_18M_MASK | \
+ IWL_RATE_36M_MASK | \
+ IWL_RATE_48M_MASK | \
+ IWL_RATE_54M_MASK)
+
+#define IWL_BASIC_RATES_MASK \
+ (IWL_OFDM_BASIC_RATES_MASK | \
+ IWL_CCK_BASIC_RATES_MASK)
+
+#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
+#define IWL_RATES_MASK_3945 ((1 << IWL_RATE_COUNT_3945) - 1)
+
+#define IWL_INVALID_VALUE -1
+
+#define IWL_MIN_RSSI_VAL -100
+#define IWL_MAX_RSSI_VAL 0
+
+/* These values specify how many Tx frame attempts before
+ * searching for a new modulation mode */
+#define IWL_LEGACY_FAILURE_LIMIT 160
+#define IWL_LEGACY_SUCCESS_LIMIT 480
+#define IWL_LEGACY_TABLE_COUNT 160
+
+#define IWL_NONE_LEGACY_FAILURE_LIMIT 400
+#define IWL_NONE_LEGACY_SUCCESS_LIMIT 4500
+#define IWL_NONE_LEGACY_TABLE_COUNT 1500
+
+/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
+#define IWL_RS_GOOD_RATIO 12800 /* 100% */
+#define IWL_RATE_SCALE_SWITCH 10880 /* 85% */
+#define IWL_RATE_HIGH_TH 10880 /* 85% */
+#define IWL_RATE_INCREASE_TH 6400 /* 50% */
+#define IWL_RATE_DECREASE_TH 1920 /* 15% */
+
+/* possible actions when in legacy mode */
+#define IWL_LEGACY_SWITCH_ANTENNA1 0
+#define IWL_LEGACY_SWITCH_ANTENNA2 1
+#define IWL_LEGACY_SWITCH_SISO 2
+#define IWL_LEGACY_SWITCH_MIMO2_AB 3
+#define IWL_LEGACY_SWITCH_MIMO2_AC 4
+#define IWL_LEGACY_SWITCH_MIMO2_BC 5
+
+/* possible actions when in siso mode */
+#define IWL_SISO_SWITCH_ANTENNA1 0
+#define IWL_SISO_SWITCH_ANTENNA2 1
+#define IWL_SISO_SWITCH_MIMO2_AB 2
+#define IWL_SISO_SWITCH_MIMO2_AC 3
+#define IWL_SISO_SWITCH_MIMO2_BC 4
+#define IWL_SISO_SWITCH_GI 5
+
+/* possible actions when in mimo mode */
+#define IWL_MIMO2_SWITCH_ANTENNA1 0
+#define IWL_MIMO2_SWITCH_ANTENNA2 1
+#define IWL_MIMO2_SWITCH_SISO_A 2
+#define IWL_MIMO2_SWITCH_SISO_B 3
+#define IWL_MIMO2_SWITCH_SISO_C 4
+#define IWL_MIMO2_SWITCH_GI 5
+
+#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI
+
+#define IWL_ACTION_LIMIT 3 /* # possible actions */
+
+#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
+
+/* load per tid defines for A-MPDU activation */
+#define IWL_AGG_TPT_THREHOLD 0
+#define IWL_AGG_LOAD_THRESHOLD 10
+#define IWL_AGG_ALL_TID 0xff
+#define TID_QUEUE_CELL_SPACING 50 /*mS */
+#define TID_QUEUE_MAX_SIZE 20
+#define TID_ROUND_VALUE 5 /* mS */
+#define TID_MAX_LOAD_COUNT 8
+
+#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
+#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
+
+extern const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
+
+enum iwl_table_type {
+ LQ_NONE,
+ LQ_G, /* legacy types */
+ LQ_A,
+ LQ_SISO, /* high-throughput types */
+ LQ_MIMO2,
+ LQ_MAX,
+};
+
+#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
+#define is_siso(tbl) ((tbl) == LQ_SISO)
+#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
+#define is_mimo(tbl) (is_mimo2(tbl))
+#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
+#define is_a_band(tbl) ((tbl) == LQ_A)
+#define is_g_and(tbl) ((tbl) == LQ_G)
+
+#define ANT_NONE 0x0
+#define ANT_A BIT(0)
+#define ANT_B BIT(1)
+#define ANT_AB (ANT_A | ANT_B)
+#define ANT_C BIT(2)
+#define ANT_AC (ANT_A | ANT_C)
+#define ANT_BC (ANT_B | ANT_C)
+#define ANT_ABC (ANT_AB | ANT_C)
+
+#define IWL_MAX_MCS_DISPLAY_SIZE 12
+
+struct iwl_rate_mcs_info {
+ char mbps[IWL_MAX_MCS_DISPLAY_SIZE];
+ char mcs[IWL_MAX_MCS_DISPLAY_SIZE];
+};
+
+/**
+ * struct iwl_rate_scale_data -- tx success history for one rate
+ */
+struct iwl_rate_scale_data {
+ u64 data; /* bitmap of successful frames */
+ s32 success_counter; /* number of frames successful */
+ s32 success_ratio; /* per-cent * 128 */
+ s32 counter; /* number of frames attempted */
+ s32 average_tpt; /* success ratio * expected throughput */
+ unsigned long stamp;
+};
+
+/**
+ * struct iwl_scale_tbl_info -- tx params and success history for all rates
+ *
+ * There are two of these in struct iwl_lq_sta,
+ * one for "active", and one for "search".
+ */
+struct iwl_scale_tbl_info {
+ enum iwl_table_type lq_type;
+ u8 ant_type;
+ u8 is_SGI; /* 1 = short guard interval */
+ u8 is_ht40; /* 1 = 40 MHz channel width */
+ u8 is_dup; /* 1 = duplicated data streams */
+ u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
+ u8 max_search; /* maximun number of tables we can search */
+ s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
+ u32 current_rate; /* rate_n_flags, uCode API format */
+ struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
+};
+
+struct iwl_traffic_load {
+ unsigned long time_stamp; /* age of the oldest statistics */
+ u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
+ * slice */
+ u32 total; /* total num of packets during the
+ * last TID_MAX_TIME_DIFF */
+ u8 queue_count; /* number of queues that has
+ * been used since the last cleanup */
+ u8 head; /* start of the circular buffer */
+};
+
+/**
+ * struct iwl_lq_sta -- driver's rate scaling private structure
+ *
+ * Pointer to this gets passed back and forth between driver and mac80211.
+ */
+struct iwl_lq_sta {
+ u8 active_tbl; /* index of active table, range 0-1 */
+ u8 enable_counter; /* indicates HT mode */
+ u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
+ u8 search_better_tbl; /* 1: currently trying alternate mode */
+ s32 last_tpt;
+
+ /* The following determine when to search for a new mode */
+ u32 table_count_limit;
+ u32 max_failure_limit; /* # failed frames before new search */
+ u32 max_success_limit; /* # successful frames before new search */
+ u32 table_count;
+ u32 total_failed; /* total failed frames, any/all rates */
+ u32 total_success; /* total successful frames, any/all rates */
+ u64 flush_timer; /* time staying in mode before new search */
+
+ u8 action_counter; /* # mode-switch actions tried */
+ u8 is_green;
+ u8 is_dup;
+ enum ieee80211_band band;
+
+ /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
+ u32 supp_rates;
+ u16 active_legacy_rate;
+ u16 active_siso_rate;
+ u16 active_mimo2_rate;
+ s8 max_rate_idx; /* Max rate set by user */
+ u8 missed_rate_counter;
+
+ struct iwl_link_quality_cmd lq;
+ struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
+ struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
+ u8 tx_agg_tid_en;
+#ifdef CONFIG_MAC80211_DEBUGFS
+ struct dentry *rs_sta_dbgfs_scale_table_file;
+ struct dentry *rs_sta_dbgfs_stats_table_file;
+ struct dentry *rs_sta_dbgfs_rate_scale_data_file;
+ struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
+ u32 dbg_fixed_rate;
+#endif
+ struct iwl_priv *drv;
+
+ /* used to be in sta_info */
+ int last_txrate_idx;
+ /* last tx rate_n_flags */
+ u32 last_rate_n_flags;
+ /* packets destined for this STA are aggregated */
+ u8 is_agg;
+};
+
+static inline u8 iwl4965_num_of_ant(u8 mask)
+{
+ return !!((mask) & ANT_A) +
+ !!((mask) & ANT_B) +
+ !!((mask) & ANT_C);
+}
+
+static inline u8 iwl4965_first_antenna(u8 mask)
+{
+ if (mask & ANT_A)
+ return ANT_A;
+ if (mask & ANT_B)
+ return ANT_B;
+ return ANT_C;
+}
+
+
+/**
+ * iwl3945_rate_scale_init - Initialize the rate scale table based on assoc info
+ *
+ * The specific throughput table used is based on the type of network
+ * the associated with, including A, B, G, and G w/ TGG protection
+ */
+extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
+
+/* Initialize station's rate scaling information after adding station */
+extern void iwl4965_rs_rate_init(struct iwl_priv *priv,
+ struct ieee80211_sta *sta, u8 sta_id);
+extern void iwl3945_rs_rate_init(struct iwl_priv *priv,
+ struct ieee80211_sta *sta, u8 sta_id);
+
+/**
+ * iwl_rate_control_register - Register the rate control algorithm callbacks
+ *
+ * Since the rate control algorithm is hardware specific, there is no need
+ * or reason to place it as a stand alone module. The driver can call
+ * iwl_rate_control_register in order to register the rate control callbacks
+ * with the mac80211 subsystem. This should be performed prior to calling
+ * ieee80211_register_hw
+ *
+ */
+extern int iwl4965_rate_control_register(void);
+extern int iwl3945_rate_control_register(void);
+
+/**
+ * iwl_rate_control_unregister - Unregister the rate control callbacks
+ *
+ * This should be called after calling ieee80211_unregister_hw, but before
+ * the driver is unloaded.
+ */
+extern void iwl4965_rate_control_unregister(void);
+extern void iwl3945_rate_control_unregister(void);
+
+#endif /* __iwl_legacy_rs__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.c b/drivers/net/wireless/iwlegacy/iwl-power.c
new file mode 100644
index 000000000000..903ef0d6d6cb
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-power.c
@@ -0,0 +1,165 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+
+#include <net/mac80211.h>
+
+#include "iwl-eeprom.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-commands.h"
+#include "iwl-debug.h"
+#include "iwl-power.h"
+
+/*
+ * Setting power level allows the card to go to sleep when not busy.
+ *
+ * We calculate a sleep command based on the required latency, which
+ * we get from mac80211. In order to handle thermal throttling, we can
+ * also use pre-defined power levels.
+ */
+
+/*
+ * This defines the old power levels. They are still used by default
+ * (level 1) and for thermal throttle (levels 3 through 5)
+ */
+
+struct iwl_power_vec_entry {
+ struct iwl_powertable_cmd cmd;
+ u8 no_dtim; /* number of skip dtim */
+};
+
+static void iwl_legacy_power_sleep_cam_cmd(struct iwl_priv *priv,
+ struct iwl_powertable_cmd *cmd)
+{
+ memset(cmd, 0, sizeof(*cmd));
+
+ if (priv->power_data.pci_pm)
+ cmd->flags |= IWL_POWER_PCI_PM_MSK;
+
+ IWL_DEBUG_POWER(priv, "Sleep command for CAM\n");
+}
+
+static int
+iwl_legacy_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
+{
+ IWL_DEBUG_POWER(priv, "Sending power/sleep command\n");
+ IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags);
+ IWL_DEBUG_POWER(priv, "Tx timeout = %u\n",
+ le32_to_cpu(cmd->tx_data_timeout));
+ IWL_DEBUG_POWER(priv, "Rx timeout = %u\n",
+ le32_to_cpu(cmd->rx_data_timeout));
+ IWL_DEBUG_POWER(priv,
+ "Sleep interval vector = { %d , %d , %d , %d , %d }\n",
+ le32_to_cpu(cmd->sleep_interval[0]),
+ le32_to_cpu(cmd->sleep_interval[1]),
+ le32_to_cpu(cmd->sleep_interval[2]),
+ le32_to_cpu(cmd->sleep_interval[3]),
+ le32_to_cpu(cmd->sleep_interval[4]));
+
+ return iwl_legacy_send_cmd_pdu(priv, POWER_TABLE_CMD,
+ sizeof(struct iwl_powertable_cmd), cmd);
+}
+
+int
+iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
+ bool force)
+{
+ int ret;
+ bool update_chains;
+
+ lockdep_assert_held(&priv->mutex);
+
+ /* Don't update the RX chain when chain noise calibration is running */
+ update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
+ priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
+
+ if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
+ return 0;
+
+ if (!iwl_legacy_is_ready_rf(priv))
+ return -EIO;
+
+ /* scan complete use sleep_power_next, need to be updated */
+ memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
+ if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
+ IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
+ return 0;
+ }
+
+ if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
+ set_bit(STATUS_POWER_PMI, &priv->status);
+
+ ret = iwl_legacy_set_power(priv, cmd);
+ if (!ret) {
+ if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
+ clear_bit(STATUS_POWER_PMI, &priv->status);
+
+ if (priv->cfg->ops->lib->update_chain_flags && update_chains)
+ priv->cfg->ops->lib->update_chain_flags(priv);
+ else if (priv->cfg->ops->lib->update_chain_flags)
+ IWL_DEBUG_POWER(priv,
+ "Cannot update the power, chain noise "
+ "calibration running: %d\n",
+ priv->chain_noise_data.state);
+
+ memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd));
+ } else
+ IWL_ERR(priv, "set power fail, ret = %d", ret);
+
+ return ret;
+}
+
+int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force)
+{
+ struct iwl_powertable_cmd cmd;
+
+ iwl_legacy_power_sleep_cam_cmd(priv, &cmd);
+ return iwl_legacy_power_set_mode(priv, &cmd, force);
+}
+EXPORT_SYMBOL(iwl_legacy_power_update_mode);
+
+/* initialize to default */
+void iwl_legacy_power_initialize(struct iwl_priv *priv)
+{
+ u16 lctl = iwl_legacy_pcie_link_ctl(priv);
+
+ priv->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
+
+ priv->power_data.debug_sleep_level_override = -1;
+
+ memset(&priv->power_data.sleep_cmd, 0,
+ sizeof(priv->power_data.sleep_cmd));
+}
+EXPORT_SYMBOL(iwl_legacy_power_initialize);
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.h b/drivers/net/wireless/iwlegacy/iwl-power.h
new file mode 100644
index 000000000000..d30b36acdc4a
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-power.h
@@ -0,0 +1,55 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+#ifndef __iwl_legacy_power_setting_h__
+#define __iwl_legacy_power_setting_h__
+
+#include "iwl-commands.h"
+
+enum iwl_power_level {
+ IWL_POWER_INDEX_1,
+ IWL_POWER_INDEX_2,
+ IWL_POWER_INDEX_3,
+ IWL_POWER_INDEX_4,
+ IWL_POWER_INDEX_5,
+ IWL_POWER_NUM
+};
+
+struct iwl_power_mgr {
+ struct iwl_powertable_cmd sleep_cmd;
+ struct iwl_powertable_cmd sleep_cmd_next;
+ int debug_sleep_level_override;
+ bool pci_pm;
+};
+
+int
+iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
+ bool force);
+int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force);
+void iwl_legacy_power_initialize(struct iwl_priv *priv);
+
+#endif /* __iwl_legacy_power_setting_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-prph.h b/drivers/net/wireless/iwlegacy/iwl-prph.h
new file mode 100644
index 000000000000..30a493003ab0
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-prph.h
@@ -0,0 +1,523 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_prph_h__
+#define __iwl_legacy_prph_h__
+
+/*
+ * Registers in this file are internal, not PCI bus memory mapped.
+ * Driver accesses these via HBUS_TARG_PRPH_* registers.
+ */
+#define PRPH_BASE (0x00000)
+#define PRPH_END (0xFFFFF)
+
+/* APMG (power management) constants */
+#define APMG_BASE (PRPH_BASE + 0x3000)
+#define APMG_CLK_CTRL_REG (APMG_BASE + 0x0000)
+#define APMG_CLK_EN_REG (APMG_BASE + 0x0004)
+#define APMG_CLK_DIS_REG (APMG_BASE + 0x0008)
+#define APMG_PS_CTRL_REG (APMG_BASE + 0x000c)
+#define APMG_PCIDEV_STT_REG (APMG_BASE + 0x0010)
+#define APMG_RFKILL_REG (APMG_BASE + 0x0014)
+#define APMG_RTC_INT_STT_REG (APMG_BASE + 0x001c)
+#define APMG_RTC_INT_MSK_REG (APMG_BASE + 0x0020)
+#define APMG_DIGITAL_SVR_REG (APMG_BASE + 0x0058)
+#define APMG_ANALOG_SVR_REG (APMG_BASE + 0x006C)
+
+#define APMS_CLK_VAL_MRB_FUNC_MODE (0x00000001)
+#define APMG_CLK_VAL_DMA_CLK_RQT (0x00000200)
+#define APMG_CLK_VAL_BSM_CLK_RQT (0x00000800)
+
+#define APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS (0x00400000)
+#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000)
+#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000)
+#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000)
+#define APMG_PS_CTRL_VAL_PWR_SRC_MAX (0x01000000) /* 3945 only */
+#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000)
+#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */
+#define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060)
+
+#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
+
+/**
+ * BSM (Bootstrap State Machine)
+ *
+ * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
+ * in special SRAM that does not power down when the embedded control
+ * processor is sleeping (e.g. for periodic power-saving shutdowns of radio).
+ *
+ * When powering back up after sleeps (or during initial uCode load), the BSM
+ * internally loads the short bootstrap program from the special SRAM into the
+ * embedded processor's instruction SRAM, and starts the processor so it runs
+ * the bootstrap program.
+ *
+ * This bootstrap program loads (via PCI busmaster DMA) instructions and data
+ * images for a uCode program from host DRAM locations. The host driver
+ * indicates DRAM locations and sizes for instruction and data images via the
+ * four BSM_DRAM_* registers. Once the bootstrap program loads the new program,
+ * the new program starts automatically.
+ *
+ * The uCode used for open-source drivers includes two programs:
+ *
+ * 1) Initialization -- performs hardware calibration and sets up some
+ * internal data, then notifies host via "initialize alive" notification
+ * (struct iwl_init_alive_resp) that it has completed all of its work.
+ * After signal from host, it then loads and starts the runtime program.
+ * The initialization program must be used when initially setting up the
+ * NIC after loading the driver.
+ *
+ * 2) Runtime/Protocol -- performs all normal runtime operations. This
+ * notifies host via "alive" notification (struct iwl_alive_resp) that it
+ * is ready to be used.
+ *
+ * When initializing the NIC, the host driver does the following procedure:
+ *
+ * 1) Load bootstrap program (instructions only, no data image for bootstrap)
+ * into bootstrap memory. Use dword writes starting at BSM_SRAM_LOWER_BOUND
+ *
+ * 2) Point (via BSM_DRAM_*) to the "initialize" uCode data and instruction
+ * images in host DRAM.
+ *
+ * 3) Set up BSM to copy from BSM SRAM into uCode instruction SRAM when asked:
+ * BSM_WR_MEM_SRC_REG = 0
+ * BSM_WR_MEM_DST_REG = RTC_INST_LOWER_BOUND
+ * BSM_WR_MEM_DWCOUNT_REG = # dwords in bootstrap instruction image
+ *
+ * 4) Load bootstrap into instruction SRAM:
+ * BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START
+ *
+ * 5) Wait for load completion:
+ * Poll BSM_WR_CTRL_REG for BSM_WR_CTRL_REG_BIT_START = 0
+ *
+ * 6) Enable future boot loads whenever NIC's power management triggers it:
+ * BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START_EN
+ *
+ * 7) Start the NIC by removing all reset bits:
+ * CSR_RESET = 0
+ *
+ * The bootstrap uCode (already in instruction SRAM) loads initialization
+ * uCode. Initialization uCode performs data initialization, sends
+ * "initialize alive" notification to host, and waits for a signal from
+ * host to load runtime code.
+ *
+ * 4) Point (via BSM_DRAM_*) to the "runtime" uCode data and instruction
+ * images in host DRAM. The last register loaded must be the instruction
+ * byte count register ("1" in MSbit tells initialization uCode to load
+ * the runtime uCode):
+ * BSM_DRAM_INST_BYTECOUNT_REG = byte count | BSM_DRAM_INST_LOAD
+ *
+ * 5) Wait for "alive" notification, then issue normal runtime commands.
+ *
+ * Data caching during power-downs:
+ *
+ * Just before the embedded controller powers down (e.g for automatic
+ * power-saving modes, or for RFKILL), uCode stores (via PCI busmaster DMA)
+ * a current snapshot of the embedded processor's data SRAM into host DRAM.
+ * This caches the data while the embedded processor's memory is powered down.
+ * Location and size are controlled by BSM_DRAM_DATA_* registers.
+ *
+ * NOTE: Instruction SRAM does not need to be saved, since that doesn't
+ * change during operation; the original image (from uCode distribution
+ * file) can be used for reload.
+ *
+ * When powering back up, the BSM loads the bootstrap program. Bootstrap looks
+ * at the BSM_DRAM_* registers, which now point to the runtime instruction
+ * image and the cached (modified) runtime data (*not* the initialization
+ * uCode). Bootstrap reloads these runtime images into SRAM, and restarts the
+ * uCode from where it left off before the power-down.
+ *
+ * NOTE: Initialization uCode does *not* run as part of the save/restore
+ * procedure.
+ *
+ * This save/restore method is mostly for autonomous power management during
+ * normal operation (result of POWER_TABLE_CMD). Platform suspend/resume and
+ * RFKILL should use complete restarts (with total re-initialization) of uCode,
+ * allowing total shutdown (including BSM memory).
+ *
+ * Note that, during normal operation, the host DRAM that held the initial
+ * startup data for the runtime code is now being used as a backup data cache
+ * for modified data! If you need to completely re-initialize the NIC, make
+ * sure that you use the runtime data image from the uCode distribution file,
+ * not the modified/saved runtime data. You may want to store a separate
+ * "clean" runtime data image in DRAM to avoid disk reads of distribution file.
+ */
+
+/* BSM bit fields */
+#define BSM_WR_CTRL_REG_BIT_START (0x80000000) /* start boot load now */
+#define BSM_WR_CTRL_REG_BIT_START_EN (0x40000000) /* enable boot after pwrup*/
+#define BSM_DRAM_INST_LOAD (0x80000000) /* start program load now */
+
+/* BSM addresses */
+#define BSM_BASE (PRPH_BASE + 0x3400)
+#define BSM_END (PRPH_BASE + 0x3800)
+
+#define BSM_WR_CTRL_REG (BSM_BASE + 0x000) /* ctl and status */
+#define BSM_WR_MEM_SRC_REG (BSM_BASE + 0x004) /* source in BSM mem */
+#define BSM_WR_MEM_DST_REG (BSM_BASE + 0x008) /* dest in SRAM mem */
+#define BSM_WR_DWCOUNT_REG (BSM_BASE + 0x00C) /* bytes */
+#define BSM_WR_STATUS_REG (BSM_BASE + 0x010) /* bit 0: 1 == done */
+
+/*
+ * Pointers and size regs for bootstrap load and data SRAM save/restore.
+ * NOTE: 3945 pointers use bits 31:0 of DRAM address.
+ * 4965 pointers use bits 35:4 of DRAM address.
+ */
+#define BSM_DRAM_INST_PTR_REG (BSM_BASE + 0x090)
+#define BSM_DRAM_INST_BYTECOUNT_REG (BSM_BASE + 0x094)
+#define BSM_DRAM_DATA_PTR_REG (BSM_BASE + 0x098)
+#define BSM_DRAM_DATA_BYTECOUNT_REG (BSM_BASE + 0x09C)
+
+/*
+ * BSM special memory, stays powered on during power-save sleeps.
+ * Read/write, address range from LOWER_BOUND to (LOWER_BOUND + SIZE -1)
+ */
+#define BSM_SRAM_LOWER_BOUND (PRPH_BASE + 0x3800)
+#define BSM_SRAM_SIZE (1024) /* bytes */
+
+
+/* 3945 Tx scheduler registers */
+#define ALM_SCD_BASE (PRPH_BASE + 0x2E00)
+#define ALM_SCD_MODE_REG (ALM_SCD_BASE + 0x000)
+#define ALM_SCD_ARASTAT_REG (ALM_SCD_BASE + 0x004)
+#define ALM_SCD_TXFACT_REG (ALM_SCD_BASE + 0x010)
+#define ALM_SCD_TXF4MF_REG (ALM_SCD_BASE + 0x014)
+#define ALM_SCD_TXF5MF_REG (ALM_SCD_BASE + 0x020)
+#define ALM_SCD_SBYP_MODE_1_REG (ALM_SCD_BASE + 0x02C)
+#define ALM_SCD_SBYP_MODE_2_REG (ALM_SCD_BASE + 0x030)
+
+/**
+ * Tx Scheduler
+ *
+ * The Tx Scheduler selects the next frame to be transmitted, choosing TFDs
+ * (Transmit Frame Descriptors) from up to 16 circular Tx queues resident in
+ * host DRAM. It steers each frame's Tx command (which contains the frame
+ * data) into one of up to 7 prioritized Tx DMA FIFO channels within the
+ * device. A queue maps to only one (selectable by driver) Tx DMA channel,
+ * but one DMA channel may take input from several queues.
+ *
+ * Tx DMA FIFOs have dedicated purposes. For 4965, they are used as follows
+ * (cf. default_queue_to_tx_fifo in iwl-4965.c):
+ *
+ * 0 -- EDCA BK (background) frames, lowest priority
+ * 1 -- EDCA BE (best effort) frames, normal priority
+ * 2 -- EDCA VI (video) frames, higher priority
+ * 3 -- EDCA VO (voice) and management frames, highest priority
+ * 4 -- Commands (e.g. RXON, etc.)
+ * 5 -- unused (HCCA)
+ * 6 -- unused (HCCA)
+ * 7 -- not used by driver (device-internal only)
+ *
+ *
+ * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6.
+ * In addition, driver can map the remaining queues to Tx DMA/FIFO
+ * channels 0-3 to support 11n aggregation via EDCA DMA channels.
+ *
+ * The driver sets up each queue to work in one of two modes:
+ *
+ * 1) Scheduler-Ack, in which the scheduler automatically supports a
+ * block-ack (BA) window of up to 64 TFDs. In this mode, each queue
+ * contains TFDs for a unique combination of Recipient Address (RA)
+ * and Traffic Identifier (TID), that is, traffic of a given
+ * Quality-Of-Service (QOS) priority, destined for a single station.
+ *
+ * In scheduler-ack mode, the scheduler keeps track of the Tx status of
+ * each frame within the BA window, including whether it's been transmitted,
+ * and whether it's been acknowledged by the receiving station. The device
+ * automatically processes block-acks received from the receiving STA,
+ * and reschedules un-acked frames to be retransmitted (successful
+ * Tx completion may end up being out-of-order).
+ *
+ * The driver must maintain the queue's Byte Count table in host DRAM
+ * (struct iwl4965_sched_queue_byte_cnt_tbl) for this mode.
+ * This mode does not support fragmentation.
+ *
+ * 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order.
+ * The device may automatically retry Tx, but will retry only one frame
+ * at a time, until receiving ACK from receiving station, or reaching
+ * retry limit and giving up.
+ *
+ * The command queue (#4/#9) must use this mode!
+ * This mode does not require use of the Byte Count table in host DRAM.
+ *
+ * Driver controls scheduler operation via 3 means:
+ * 1) Scheduler registers
+ * 2) Shared scheduler data base in internal 4956 SRAM
+ * 3) Shared data in host DRAM
+ *
+ * Initialization:
+ *
+ * When loading, driver should allocate memory for:
+ * 1) 16 TFD circular buffers, each with space for (typically) 256 TFDs.
+ * 2) 16 Byte Count circular buffers in 16 KBytes contiguous memory
+ * (1024 bytes for each queue).
+ *
+ * After receiving "Alive" response from uCode, driver must initialize
+ * the scheduler (especially for queue #4/#9, the command queue, otherwise
+ * the driver can't issue commands!):
+ */
+
+/**
+ * Max Tx window size is the max number of contiguous TFDs that the scheduler
+ * can keep track of at one time when creating block-ack chains of frames.
+ * Note that "64" matches the number of ack bits in a block-ack packet.
+ * Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize
+ * IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) values.
+ */
+#define SCD_WIN_SIZE 64
+#define SCD_FRAME_LIMIT 64
+
+/* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */
+#define IWL49_SCD_START_OFFSET 0xa02c00
+
+/*
+ * 4965 tells driver SRAM address for internal scheduler structs via this reg.
+ * Value is valid only after "Alive" response from uCode.
+ */
+#define IWL49_SCD_SRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x0)
+
+/*
+ * Driver may need to update queue-empty bits after changing queue's
+ * write and read pointers (indexes) during (re-)initialization (i.e. when
+ * scheduler is not tracking what's happening).
+ * Bit fields:
+ * 31-16: Write mask -- 1: update empty bit, 0: don't change empty bit
+ * 15-00: Empty state, one for each queue -- 1: empty, 0: non-empty
+ * NOTE: This register is not used by Linux driver.
+ */
+#define IWL49_SCD_EMPTY_BITS (IWL49_SCD_START_OFFSET + 0x4)
+
+/*
+ * Physical base address of array of byte count (BC) circular buffers (CBs).
+ * Each Tx queue has a BC CB in host DRAM to support Scheduler-ACK mode.
+ * This register points to BC CB for queue 0, must be on 1024-byte boundary.
+ * Others are spaced by 1024 bytes.
+ * Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad.
+ * (Index into a queue's BC CB) = (index into queue's TFD CB) = (SSN & 0xff).
+ * Bit fields:
+ * 25-00: Byte Count CB physical address [35:10], must be 1024-byte aligned.
+ */
+#define IWL49_SCD_DRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x10)
+
+/*
+ * Enables any/all Tx DMA/FIFO channels.
+ * Scheduler generates requests for only the active channels.
+ * Set this to 0xff to enable all 8 channels (normal usage).
+ * Bit fields:
+ * 7- 0: Enable (1), disable (0), one bit for each channel 0-7
+ */
+#define IWL49_SCD_TXFACT (IWL49_SCD_START_OFFSET + 0x1c)
+/*
+ * Queue (x) Write Pointers (indexes, really!), one for each Tx queue.
+ * Initialized and updated by driver as new TFDs are added to queue.
+ * NOTE: If using Block Ack, index must correspond to frame's
+ * Start Sequence Number; index = (SSN & 0xff)
+ * NOTE: Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses?
+ */
+#define IWL49_SCD_QUEUE_WRPTR(x) (IWL49_SCD_START_OFFSET + 0x24 + (x) * 4)
+
+/*
+ * Queue (x) Read Pointers (indexes, really!), one for each Tx queue.
+ * For FIFO mode, index indicates next frame to transmit.
+ * For Scheduler-ACK mode, index indicates first frame in Tx window.
+ * Initialized by driver, updated by scheduler.
+ */
+#define IWL49_SCD_QUEUE_RDPTR(x) (IWL49_SCD_START_OFFSET + 0x64 + (x) * 4)
+
+/*
+ * Select which queues work in chain mode (1) vs. not (0).
+ * Use chain mode to build chains of aggregated frames.
+ * Bit fields:
+ * 31-16: Reserved
+ * 15-00: Mode, one bit for each queue -- 1: Chain mode, 0: one-at-a-time
+ * NOTE: If driver sets up queue for chain mode, it should be also set up
+ * Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x).
+ */
+#define IWL49_SCD_QUEUECHAIN_SEL (IWL49_SCD_START_OFFSET + 0xd0)
+
+/*
+ * Select which queues interrupt driver when scheduler increments
+ * a queue's read pointer (index).
+ * Bit fields:
+ * 31-16: Reserved
+ * 15-00: Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled
+ * NOTE: This functionality is apparently a no-op; driver relies on interrupts
+ * from Rx queue to read Tx command responses and update Tx queues.
+ */
+#define IWL49_SCD_INTERRUPT_MASK (IWL49_SCD_START_OFFSET + 0xe4)
+
+/*
+ * Queue search status registers. One for each queue.
+ * Sets up queue mode and assigns queue to Tx DMA channel.
+ * Bit fields:
+ * 19-10: Write mask/enable bits for bits 0-9
+ * 9: Driver should init to "0"
+ * 8: Scheduler-ACK mode (1), non-Scheduler-ACK (i.e. FIFO) mode (0).
+ * Driver should init to "1" for aggregation mode, or "0" otherwise.
+ * 7-6: Driver should init to "0"
+ * 5: Window Size Left; indicates whether scheduler can request
+ * another TFD, based on window size, etc. Driver should init
+ * this bit to "1" for aggregation mode, or "0" for non-agg.
+ * 4-1: Tx FIFO to use (range 0-7).
+ * 0: Queue is active (1), not active (0).
+ * Other bits should be written as "0"
+ *
+ * NOTE: If enabling Scheduler-ACK mode, chain mode should also be enabled
+ * via SCD_QUEUECHAIN_SEL.
+ */
+#define IWL49_SCD_QUEUE_STATUS_BITS(x)\
+ (IWL49_SCD_START_OFFSET + 0x104 + (x) * 4)
+
+/* Bit field positions */
+#define IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE (0)
+#define IWL49_SCD_QUEUE_STTS_REG_POS_TXF (1)
+#define IWL49_SCD_QUEUE_STTS_REG_POS_WSL (5)
+#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK (8)
+
+/* Write masks */
+#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10)
+#define IWL49_SCD_QUEUE_STTS_REG_MSK (0x0007FC00)
+
+/**
+ * 4965 internal SRAM structures for scheduler, shared with driver ...
+ *
+ * Driver should clear and initialize the following areas after receiving
+ * "Alive" response from 4965 uCode, i.e. after initial
+ * uCode load, or after a uCode load done for error recovery:
+ *
+ * SCD_CONTEXT_DATA_OFFSET (size 128 bytes)
+ * SCD_TX_STTS_BITMAP_OFFSET (size 256 bytes)
+ * SCD_TRANSLATE_TBL_OFFSET (size 32 bytes)
+ *
+ * Driver accesses SRAM via HBUS_TARG_MEM_* registers.
+ * Driver reads base address of this scheduler area from SCD_SRAM_BASE_ADDR.
+ * All OFFSET values must be added to this base address.
+ */
+
+/*
+ * Queue context. One 8-byte entry for each of 16 queues.
+ *
+ * Driver should clear this entire area (size 0x80) to 0 after receiving
+ * "Alive" notification from uCode. Additionally, driver should init
+ * each queue's entry as follows:
+ *
+ * LS Dword bit fields:
+ * 0-06: Max Tx window size for Scheduler-ACK. Driver should init to 64.
+ *
+ * MS Dword bit fields:
+ * 16-22: Frame limit. Driver should init to 10 (0xa).
+ *
+ * Driver should init all other bits to 0.
+ *
+ * Init must be done after driver receives "Alive" response from 4965 uCode,
+ * and when setting up queue for aggregation.
+ */
+#define IWL49_SCD_CONTEXT_DATA_OFFSET 0x380
+#define IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) \
+ (IWL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
+
+#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0)
+#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F)
+#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
+#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
+
+/*
+ * Tx Status Bitmap
+ *
+ * Driver should clear this entire area (size 0x100) to 0 after receiving
+ * "Alive" notification from uCode. Area is used only by device itself;
+ * no other support (besides clearing) is required from driver.
+ */
+#define IWL49_SCD_TX_STTS_BITMAP_OFFSET 0x400
+
+/*
+ * RAxTID to queue translation mapping.
+ *
+ * When queue is in Scheduler-ACK mode, frames placed in a that queue must be
+ * for only one combination of receiver address (RA) and traffic ID (TID), i.e.
+ * one QOS priority level destined for one station (for this wireless link,
+ * not final destination). The SCD_TRANSLATE_TABLE area provides 16 16-bit
+ * mappings, one for each of the 16 queues. If queue is not in Scheduler-ACK
+ * mode, the device ignores the mapping value.
+ *
+ * Bit fields, for each 16-bit map:
+ * 15-9: Reserved, set to 0
+ * 8-4: Index into device's station table for recipient station
+ * 3-0: Traffic ID (tid), range 0-15
+ *
+ * Driver should clear this entire area (size 32 bytes) to 0 after receiving
+ * "Alive" notification from uCode. To update a 16-bit map value, driver
+ * must read a dword-aligned value from device SRAM, replace the 16-bit map
+ * value of interest, and write the dword value back into device SRAM.
+ */
+#define IWL49_SCD_TRANSLATE_TBL_OFFSET 0x500
+
+/* Find translation table dword to read/write for given queue */
+#define IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
+ ((IWL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
+
+#define IWL_SCD_TXFIFO_POS_TID (0)
+#define IWL_SCD_TXFIFO_POS_RA (4)
+#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
+
+/*********************** END TX SCHEDULER *************************************/
+
+#endif /* __iwl_legacy_prph_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-rx.c b/drivers/net/wireless/iwlegacy/iwl-rx.c
new file mode 100644
index 000000000000..654cf233a384
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-rx.c
@@ -0,0 +1,302 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+#include <asm/unaligned.h>
+#include "iwl-eeprom.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-sta.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+/************************** RX-FUNCTIONS ****************************/
+/*
+ * Rx theory of operation
+ *
+ * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
+ * each of which point to Receive Buffers to be filled by the NIC. These get
+ * used not only for Rx frames, but for any command response or notification
+ * from the NIC. The driver and NIC manage the Rx buffers by means
+ * of indexes into the circular buffer.
+ *
+ * Rx Queue Indexes
+ * The host/firmware share two index registers for managing the Rx buffers.
+ *
+ * The READ index maps to the first position that the firmware may be writing
+ * to -- the driver can read up to (but not including) this position and get
+ * good data.
+ * The READ index is managed by the firmware once the card is enabled.
+ *
+ * The WRITE index maps to the last position the driver has read from -- the
+ * position preceding WRITE is the last slot the firmware can place a packet.
+ *
+ * The queue is empty (no good data) if WRITE = READ - 1, and is full if
+ * WRITE = READ.
+ *
+ * During initialization, the host sets up the READ queue position to the first
+ * INDEX position, and WRITE to the last (READ - 1 wrapped)
+ *
+ * When the firmware places a packet in a buffer, it will advance the READ index
+ * and fire the RX interrupt. The driver can then query the READ index and
+ * process as many packets as possible, moving the WRITE index forward as it
+ * resets the Rx queue buffers with new memory.
+ *
+ * The management in the driver is as follows:
+ * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
+ * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
+ * to replenish the iwl->rxq->rx_free.
+ * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
+ * iwl->rxq is replenished and the READ INDEX is updated (updating the
+ * 'processed' and 'read' driver indexes as well)
+ * + A received packet is processed and handed to the kernel network stack,
+ * detached from the iwl->rxq. The driver 'processed' index is updated.
+ * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
+ * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
+ * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
+ * were enough free buffers and RX_STALLED is set it is cleared.
+ *
+ *
+ * Driver sequence:
+ *
+ * iwl_legacy_rx_queue_alloc() Allocates rx_free
+ * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
+ * iwl_rx_queue_restock
+ * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
+ * queue, updates firmware pointers, and updates
+ * the WRITE index. If insufficient rx_free buffers
+ * are available, schedules iwl_rx_replenish
+ *
+ * -- enable interrupts --
+ * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
+ * READ INDEX, detaching the SKB from the pool.
+ * Moves the packet buffer from queue to rx_used.
+ * Calls iwl_rx_queue_restock to refill any empty
+ * slots.
+ * ...
+ *
+ */
+
+/**
+ * iwl_legacy_rx_queue_space - Return number of free slots available in queue.
+ */
+int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q)
+{
+ int s = q->read - q->write;
+ if (s <= 0)
+ s += RX_QUEUE_SIZE;
+ /* keep some buffer to not confuse full and empty queue */
+ s -= 2;
+ if (s < 0)
+ s = 0;
+ return s;
+}
+EXPORT_SYMBOL(iwl_legacy_rx_queue_space);
+
+/**
+ * iwl_legacy_rx_queue_update_write_ptr - Update the write pointer for the RX queue
+ */
+void
+iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
+ struct iwl_rx_queue *q)
+{
+ unsigned long flags;
+ u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
+ u32 reg;
+
+ spin_lock_irqsave(&q->lock, flags);
+
+ if (q->need_update == 0)
+ goto exit_unlock;
+
+ /* If power-saving is in use, make sure device is awake */
+ if (test_bit(STATUS_POWER_PMI, &priv->status)) {
+ reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
+
+ if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+ IWL_DEBUG_INFO(priv,
+ "Rx queue requesting wakeup,"
+ " GP1 = 0x%x\n", reg);
+ iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ goto exit_unlock;
+ }
+
+ q->write_actual = (q->write & ~0x7);
+ iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
+ q->write_actual);
+
+ /* Else device is assumed to be awake */
+ } else {
+ /* Device expects a multiple of 8 */
+ q->write_actual = (q->write & ~0x7);
+ iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
+ q->write_actual);
+ }
+
+ q->need_update = 0;
+
+ exit_unlock:
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+EXPORT_SYMBOL(iwl_legacy_rx_queue_update_write_ptr);
+
+int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv)
+{
+ struct iwl_rx_queue *rxq = &priv->rxq;
+ struct device *dev = &priv->pci_dev->dev;
+ int i;
+
+ spin_lock_init(&rxq->lock);
+ INIT_LIST_HEAD(&rxq->rx_free);
+ INIT_LIST_HEAD(&rxq->rx_used);
+
+ /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
+ rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
+ GFP_KERNEL);
+ if (!rxq->bd)
+ goto err_bd;
+
+ rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
+ &rxq->rb_stts_dma, GFP_KERNEL);
+ if (!rxq->rb_stts)
+ goto err_rb;
+
+ /* Fill the rx_used queue with _all_ of the Rx buffers */
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
+ list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+
+ /* Set us so that we have processed and used all buffers, but have
+ * not restocked the Rx queue with fresh buffers */
+ rxq->read = rxq->write = 0;
+ rxq->write_actual = 0;
+ rxq->free_count = 0;
+ rxq->need_update = 0;
+ return 0;
+
+err_rb:
+ dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+ rxq->bd_dma);
+err_bd:
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(iwl_legacy_rx_queue_alloc);
+
+
+void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
+
+ if (!report->state) {
+ IWL_DEBUG_11H(priv,
+ "Spectrum Measure Notification: Start\n");
+ return;
+ }
+
+ memcpy(&priv->measure_report, report, sizeof(*report));
+ priv->measurement_status |= MEASUREMENT_READY;
+}
+EXPORT_SYMBOL(iwl_legacy_rx_spectrum_measure_notif);
+
+void iwl_legacy_recover_from_statistics(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt)
+{
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+ if (iwl_legacy_is_any_associated(priv)) {
+ if (priv->cfg->ops->lib->check_plcp_health) {
+ if (!priv->cfg->ops->lib->check_plcp_health(
+ priv, pkt)) {
+ /*
+ * high plcp error detected
+ * reset Radio
+ */
+ iwl_legacy_force_reset(priv,
+ IWL_RF_RESET, false);
+ }
+ }
+ }
+}
+EXPORT_SYMBOL(iwl_legacy_recover_from_statistics);
+
+/*
+ * returns non-zero if packet should be dropped
+ */
+int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
+ struct ieee80211_hdr *hdr,
+ u32 decrypt_res,
+ struct ieee80211_rx_status *stats)
+{
+ u16 fc = le16_to_cpu(hdr->frame_control);
+
+ /*
+ * All contexts have the same setting here due to it being
+ * a module parameter, so OK to check any context.
+ */
+ if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
+ RXON_FILTER_DIS_DECRYPT_MSK)
+ return 0;
+
+ if (!(fc & IEEE80211_FCTL_PROTECTED))
+ return 0;
+
+ IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
+ switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
+ case RX_RES_STATUS_SEC_TYPE_TKIP:
+ /* The uCode has got a bad phase 1 Key, pushes the packet.
+ * Decryption will be done in SW. */
+ if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+ RX_RES_STATUS_BAD_KEY_TTAK)
+ break;
+
+ case RX_RES_STATUS_SEC_TYPE_WEP:
+ if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+ RX_RES_STATUS_BAD_ICV_MIC) {
+ /* bad ICV, the packet is destroyed since the
+ * decryption is inplace, drop it */
+ IWL_DEBUG_RX(priv, "Packet destroyed\n");
+ return -1;
+ }
+ case RX_RES_STATUS_SEC_TYPE_CCMP:
+ if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+ RX_RES_STATUS_DECRYPT_OK) {
+ IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
+ stats->flag |= RX_FLAG_DECRYPTED;
+ }
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_set_decrypted_flag);
diff --git a/drivers/net/wireless/iwlegacy/iwl-scan.c b/drivers/net/wireless/iwlegacy/iwl-scan.c
new file mode 100644
index 000000000000..60f597f796ca
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-scan.c
@@ -0,0 +1,625 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+
+#include "iwl-eeprom.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-sta.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+
+/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
+ * sending probe req. This should be set long enough to hear probe responses
+ * from more than one AP. */
+#define IWL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
+#define IWL_ACTIVE_DWELL_TIME_52 (20)
+
+#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3)
+#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2)
+
+/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
+ * Must be set longer than active dwell time.
+ * For the most reliable scan, set > AP beacon interval (typically 100msec). */
+#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
+#define IWL_PASSIVE_DWELL_TIME_52 (10)
+#define IWL_PASSIVE_DWELL_BASE (100)
+#define IWL_CHANNEL_TUNE_TIME 5
+
+static int iwl_legacy_send_scan_abort(struct iwl_priv *priv)
+{
+ int ret;
+ struct iwl_rx_packet *pkt;
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_SCAN_ABORT_CMD,
+ .flags = CMD_WANT_SKB,
+ };
+
+ /* Exit instantly with error when device is not ready
+ * to receive scan abort command or it does not perform
+ * hardware scan currently */
+ if (!test_bit(STATUS_READY, &priv->status) ||
+ !test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
+ !test_bit(STATUS_SCAN_HW, &priv->status) ||
+ test_bit(STATUS_FW_ERROR, &priv->status) ||
+ test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return -EIO;
+
+ ret = iwl_legacy_send_cmd_sync(priv, &cmd);
+ if (ret)
+ return ret;
+
+ pkt = (struct iwl_rx_packet *)cmd.reply_page;
+ if (pkt->u.status != CAN_ABORT_STATUS) {
+ /* The scan abort will return 1 for success or
+ * 2 for "failure". A failure condition can be
+ * due to simply not being in an active scan which
+ * can occur if we send the scan abort before we
+ * the microcode has notified us that a scan is
+ * completed. */
+ IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n", pkt->u.status);
+ ret = -EIO;
+ }
+
+ iwl_legacy_free_pages(priv, cmd.reply_page);
+ return ret;
+}
+
+static void iwl_legacy_complete_scan(struct iwl_priv *priv, bool aborted)
+{
+ /* check if scan was requested from mac80211 */
+ if (priv->scan_request) {
+ IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n");
+ ieee80211_scan_completed(priv->hw, aborted);
+ }
+
+ priv->is_internal_short_scan = false;
+ priv->scan_vif = NULL;
+ priv->scan_request = NULL;
+}
+
+void iwl_legacy_force_scan_end(struct iwl_priv *priv)
+{
+ lockdep_assert_held(&priv->mutex);
+
+ if (!test_bit(STATUS_SCANNING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n");
+ return;
+ }
+
+ IWL_DEBUG_SCAN(priv, "Forcing scan end\n");
+ clear_bit(STATUS_SCANNING, &priv->status);
+ clear_bit(STATUS_SCAN_HW, &priv->status);
+ clear_bit(STATUS_SCAN_ABORTING, &priv->status);
+ iwl_legacy_complete_scan(priv, true);
+}
+
+static void iwl_legacy_do_scan_abort(struct iwl_priv *priv)
+{
+ int ret;
+
+ lockdep_assert_held(&priv->mutex);
+
+ if (!test_bit(STATUS_SCANNING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n");
+ return;
+ }
+
+ if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Scan abort in progress\n");
+ return;
+ }
+
+ ret = iwl_legacy_send_scan_abort(priv);
+ if (ret) {
+ IWL_DEBUG_SCAN(priv, "Send scan abort failed %d\n", ret);
+ iwl_legacy_force_scan_end(priv);
+ } else
+ IWL_DEBUG_SCAN(priv, "Sucessfully send scan abort\n");
+}
+
+/**
+ * iwl_scan_cancel - Cancel any currently executing HW scan
+ */
+int iwl_legacy_scan_cancel(struct iwl_priv *priv)
+{
+ IWL_DEBUG_SCAN(priv, "Queuing abort scan\n");
+ queue_work(priv->workqueue, &priv->abort_scan);
+ return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_scan_cancel);
+
+/**
+ * iwl_legacy_scan_cancel_timeout - Cancel any currently executing HW scan
+ * @ms: amount of time to wait (in milliseconds) for scan to abort
+ *
+ */
+int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(ms);
+
+ lockdep_assert_held(&priv->mutex);
+
+ IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");
+
+ iwl_legacy_do_scan_abort(priv);
+
+ while (time_before_eq(jiffies, timeout)) {
+ if (!test_bit(STATUS_SCAN_HW, &priv->status))
+ break;
+ msleep(20);
+ }
+
+ return test_bit(STATUS_SCAN_HW, &priv->status);
+}
+EXPORT_SYMBOL(iwl_legacy_scan_cancel_timeout);
+
+/* Service response to REPLY_SCAN_CMD (0x80) */
+static void iwl_legacy_rx_reply_scan(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_scanreq_notification *notif =
+ (struct iwl_scanreq_notification *)pkt->u.raw;
+
+ IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
+#endif
+}
+
+/* Service SCAN_START_NOTIFICATION (0x82) */
+static void iwl_legacy_rx_scan_start_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_scanstart_notification *notif =
+ (struct iwl_scanstart_notification *)pkt->u.raw;
+ priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
+ IWL_DEBUG_SCAN(priv, "Scan start: "
+ "%d [802.11%s] "
+ "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
+ notif->channel,
+ notif->band ? "bg" : "a",
+ le32_to_cpu(notif->tsf_high),
+ le32_to_cpu(notif->tsf_low),
+ notif->status, notif->beacon_timer);
+}
+
+/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
+static void iwl_legacy_rx_scan_results_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_scanresults_notification *notif =
+ (struct iwl_scanresults_notification *)pkt->u.raw;
+
+ IWL_DEBUG_SCAN(priv, "Scan ch.res: "
+ "%d [802.11%s] "
+ "(TSF: 0x%08X:%08X) - %d "
+ "elapsed=%lu usec\n",
+ notif->channel,
+ notif->band ? "bg" : "a",
+ le32_to_cpu(notif->tsf_high),
+ le32_to_cpu(notif->tsf_low),
+ le32_to_cpu(notif->statistics[0]),
+ le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
+#endif
+}
+
+/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
+static void iwl_legacy_rx_scan_complete_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
+#endif
+
+ IWL_DEBUG_SCAN(priv,
+ "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
+ scan_notif->scanned_channels,
+ scan_notif->tsf_low,
+ scan_notif->tsf_high, scan_notif->status);
+
+ /* The HW is no longer scanning */
+ clear_bit(STATUS_SCAN_HW, &priv->status);
+
+ IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
+ (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
+ jiffies_to_msecs(jiffies - priv->scan_start));
+
+ queue_work(priv->workqueue, &priv->scan_completed);
+}
+
+void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv)
+{
+ /* scan handlers */
+ priv->rx_handlers[REPLY_SCAN_CMD] = iwl_legacy_rx_reply_scan;
+ priv->rx_handlers[SCAN_START_NOTIFICATION] =
+ iwl_legacy_rx_scan_start_notif;
+ priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
+ iwl_legacy_rx_scan_results_notif;
+ priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
+ iwl_legacy_rx_scan_complete_notif;
+}
+EXPORT_SYMBOL(iwl_legacy_setup_rx_scan_handlers);
+
+inline u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
+ enum ieee80211_band band,
+ u8 n_probes)
+{
+ if (band == IEEE80211_BAND_5GHZ)
+ return IWL_ACTIVE_DWELL_TIME_52 +
+ IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
+ else
+ return IWL_ACTIVE_DWELL_TIME_24 +
+ IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
+}
+EXPORT_SYMBOL(iwl_legacy_get_active_dwell_time);
+
+u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
+ enum ieee80211_band band,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_rxon_context *ctx;
+ u16 passive = (band == IEEE80211_BAND_2GHZ) ?
+ IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
+ IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
+
+ if (iwl_legacy_is_any_associated(priv)) {
+ /*
+ * If we're associated, we clamp the maximum passive
+ * dwell time to be 98% of the smallest beacon interval
+ * (minus 2 * channel tune time)
+ */
+ for_each_context(priv, ctx) {
+ u16 value;
+
+ if (!iwl_legacy_is_associated_ctx(ctx))
+ continue;
+ value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
+ if ((value > IWL_PASSIVE_DWELL_BASE) || !value)
+ value = IWL_PASSIVE_DWELL_BASE;
+ value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
+ passive = min(value, passive);
+ }
+ }
+
+ return passive;
+}
+EXPORT_SYMBOL(iwl_legacy_get_passive_dwell_time);
+
+void iwl_legacy_init_scan_params(struct iwl_priv *priv)
+{
+ u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
+ if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
+ priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
+ if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
+ priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
+}
+EXPORT_SYMBOL(iwl_legacy_init_scan_params);
+
+static int __must_check iwl_legacy_scan_initiate(struct iwl_priv *priv,
+ struct ieee80211_vif *vif,
+ bool internal,
+ enum ieee80211_band band)
+{
+ int ret;
+
+ lockdep_assert_held(&priv->mutex);
+
+ if (WARN_ON(!priv->cfg->ops->utils->request_scan))
+ return -EOPNOTSUPP;
+
+ cancel_delayed_work(&priv->scan_check);
+
+ if (!iwl_legacy_is_ready_rf(priv)) {
+ IWL_WARN(priv, "Request scan called when driver not ready.\n");
+ return -EIO;
+ }
+
+ if (test_bit(STATUS_SCAN_HW, &priv->status)) {
+ IWL_DEBUG_SCAN(priv,
+ "Multiple concurrent scan requests in parallel.\n");
+ return -EBUSY;
+ }
+
+ if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n");
+ return -EBUSY;
+ }
+
+ IWL_DEBUG_SCAN(priv, "Starting %sscan...\n",
+ internal ? "internal short " : "");
+
+ set_bit(STATUS_SCANNING, &priv->status);
+ priv->is_internal_short_scan = internal;
+ priv->scan_start = jiffies;
+ priv->scan_band = band;
+
+ ret = priv->cfg->ops->utils->request_scan(priv, vif);
+ if (ret) {
+ clear_bit(STATUS_SCANNING, &priv->status);
+ priv->is_internal_short_scan = false;
+ return ret;
+ }
+
+ queue_delayed_work(priv->workqueue, &priv->scan_check,
+ IWL_SCAN_CHECK_WATCHDOG);
+
+ return 0;
+}
+
+int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req)
+{
+ struct iwl_priv *priv = hw->priv;
+ int ret;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ if (req->n_channels == 0)
+ return -EINVAL;
+
+ mutex_lock(&priv->mutex);
+
+ if (test_bit(STATUS_SCANNING, &priv->status) &&
+ !priv->is_internal_short_scan) {
+ IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
+ /* mac80211 will only ask for one band at a time */
+ priv->scan_request = req;
+ priv->scan_vif = vif;
+
+ /*
+ * If an internal scan is in progress, just set
+ * up the scan_request as per above.
+ */
+ if (priv->is_internal_short_scan) {
+ IWL_DEBUG_SCAN(priv, "SCAN request during internal scan\n");
+ ret = 0;
+ } else
+ ret = iwl_legacy_scan_initiate(priv, vif, false,
+ req->channels[0]->band);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+out_unlock:
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_mac_hw_scan);
+
+/*
+ * internal short scan, this function should only been called while associated.
+ * It will reset and tune the radio to prevent possible RF related problem
+ */
+void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv)
+{
+ queue_work(priv->workqueue, &priv->start_internal_scan);
+}
+
+static void iwl_legacy_bg_start_internal_scan(struct work_struct *work)
+{
+ struct iwl_priv *priv =
+ container_of(work, struct iwl_priv, start_internal_scan);
+
+ IWL_DEBUG_SCAN(priv, "Start internal scan\n");
+
+ mutex_lock(&priv->mutex);
+
+ if (priv->is_internal_short_scan == true) {
+ IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n");
+ goto unlock;
+ }
+
+ if (test_bit(STATUS_SCANNING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
+ goto unlock;
+ }
+
+ if (iwl_legacy_scan_initiate(priv, NULL, true, priv->band))
+ IWL_DEBUG_SCAN(priv, "failed to start internal short scan\n");
+ unlock:
+ mutex_unlock(&priv->mutex);
+}
+
+static void iwl_legacy_bg_scan_check(struct work_struct *data)
+{
+ struct iwl_priv *priv =
+ container_of(data, struct iwl_priv, scan_check.work);
+
+ IWL_DEBUG_SCAN(priv, "Scan check work\n");
+
+ /* Since we are here firmware does not finish scan and
+ * most likely is in bad shape, so we don't bother to
+ * send abort command, just force scan complete to mac80211 */
+ mutex_lock(&priv->mutex);
+ iwl_legacy_force_scan_end(priv);
+ mutex_unlock(&priv->mutex);
+}
+
+/**
+ * iwl_legacy_fill_probe_req - fill in all required fields and IE for probe request
+ */
+
+u16
+iwl_legacy_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
+ const u8 *ta, const u8 *ies, int ie_len, int left)
+{
+ int len = 0;
+ u8 *pos = NULL;
+
+ /* Make sure there is enough space for the probe request,
+ * two mandatory IEs and the data */
+ left -= 24;
+ if (left < 0)
+ return 0;
+
+ frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
+ memcpy(frame->da, iwlegacy_bcast_addr, ETH_ALEN);
+ memcpy(frame->sa, ta, ETH_ALEN);
+ memcpy(frame->bssid, iwlegacy_bcast_addr, ETH_ALEN);
+ frame->seq_ctrl = 0;
+
+ len += 24;
+
+ /* ...next IE... */
+ pos = &frame->u.probe_req.variable[0];
+
+ /* fill in our indirect SSID IE */
+ left -= 2;
+ if (left < 0)
+ return 0;
+ *pos++ = WLAN_EID_SSID;
+ *pos++ = 0;
+
+ len += 2;
+
+ if (WARN_ON(left < ie_len))
+ return len;
+
+ if (ies && ie_len) {
+ memcpy(pos, ies, ie_len);
+ len += ie_len;
+ }
+
+ return (u16)len;
+}
+EXPORT_SYMBOL(iwl_legacy_fill_probe_req);
+
+static void iwl_legacy_bg_abort_scan(struct work_struct *work)
+{
+ struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
+
+ IWL_DEBUG_SCAN(priv, "Abort scan work\n");
+
+ /* We keep scan_check work queued in case when firmware will not
+ * report back scan completed notification */
+ mutex_lock(&priv->mutex);
+ iwl_legacy_scan_cancel_timeout(priv, 200);
+ mutex_unlock(&priv->mutex);
+}
+
+static void iwl_legacy_bg_scan_completed(struct work_struct *work)
+{
+ struct iwl_priv *priv =
+ container_of(work, struct iwl_priv, scan_completed);
+ bool aborted;
+
+ IWL_DEBUG_SCAN(priv, "Completed %sscan.\n",
+ priv->is_internal_short_scan ? "internal short " : "");
+
+ cancel_delayed_work(&priv->scan_check);
+
+ mutex_lock(&priv->mutex);
+
+ aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status);
+ if (aborted)
+ IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");
+
+ if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
+ goto out_settings;
+ }
+
+ if (priv->is_internal_short_scan && !aborted) {
+ int err;
+
+ /* Check if mac80211 requested scan during our internal scan */
+ if (priv->scan_request == NULL)
+ goto out_complete;
+
+ /* If so request a new scan */
+ err = iwl_legacy_scan_initiate(priv, priv->scan_vif, false,
+ priv->scan_request->channels[0]->band);
+ if (err) {
+ IWL_DEBUG_SCAN(priv,
+ "failed to initiate pending scan: %d\n", err);
+ aborted = true;
+ goto out_complete;
+ }
+
+ goto out;
+ }
+
+out_complete:
+ iwl_legacy_complete_scan(priv, aborted);
+
+out_settings:
+ /* Can we still talk to firmware ? */
+ if (!iwl_legacy_is_ready_rf(priv))
+ goto out;
+
+ /*
+ * We do not commit power settings while scan is pending,
+ * do it now if the settings changed.
+ */
+ iwl_legacy_power_set_mode(priv, &priv->power_data.sleep_cmd_next,
+ false);
+ iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
+
+ priv->cfg->ops->utils->post_scan(priv);
+
+out:
+ mutex_unlock(&priv->mutex);
+}
+
+void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv)
+{
+ INIT_WORK(&priv->scan_completed, iwl_legacy_bg_scan_completed);
+ INIT_WORK(&priv->abort_scan, iwl_legacy_bg_abort_scan);
+ INIT_WORK(&priv->start_internal_scan,
+ iwl_legacy_bg_start_internal_scan);
+ INIT_DELAYED_WORK(&priv->scan_check, iwl_legacy_bg_scan_check);
+}
+EXPORT_SYMBOL(iwl_legacy_setup_scan_deferred_work);
+
+void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv)
+{
+ cancel_work_sync(&priv->start_internal_scan);
+ cancel_work_sync(&priv->abort_scan);
+ cancel_work_sync(&priv->scan_completed);
+
+ if (cancel_delayed_work_sync(&priv->scan_check)) {
+ mutex_lock(&priv->mutex);
+ iwl_legacy_force_scan_end(priv);
+ mutex_unlock(&priv->mutex);
+ }
+}
+EXPORT_SYMBOL(iwl_legacy_cancel_scan_deferred_work);
diff --git a/drivers/net/wireless/iwlegacy/iwl-spectrum.h b/drivers/net/wireless/iwlegacy/iwl-spectrum.h
new file mode 100644
index 000000000000..9f70a4723103
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-spectrum.h
@@ -0,0 +1,92 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_spectrum_h__
+#define __iwl_legacy_spectrum_h__
+enum { /* ieee80211_basic_report.map */
+ IEEE80211_BASIC_MAP_BSS = (1 << 0),
+ IEEE80211_BASIC_MAP_OFDM = (1 << 1),
+ IEEE80211_BASIC_MAP_UNIDENTIFIED = (1 << 2),
+ IEEE80211_BASIC_MAP_RADAR = (1 << 3),
+ IEEE80211_BASIC_MAP_UNMEASURED = (1 << 4),
+ /* Bits 5-7 are reserved */
+
+};
+struct ieee80211_basic_report {
+ u8 channel;
+ __le64 start_time;
+ __le16 duration;
+ u8 map;
+} __packed;
+
+enum { /* ieee80211_measurement_request.mode */
+ /* Bit 0 is reserved */
+ IEEE80211_MEASUREMENT_ENABLE = (1 << 1),
+ IEEE80211_MEASUREMENT_REQUEST = (1 << 2),
+ IEEE80211_MEASUREMENT_REPORT = (1 << 3),
+ /* Bits 4-7 are reserved */
+};
+
+enum {
+ IEEE80211_REPORT_BASIC = 0, /* required */
+ IEEE80211_REPORT_CCA = 1, /* optional */
+ IEEE80211_REPORT_RPI = 2, /* optional */
+ /* 3-255 reserved */
+};
+
+struct ieee80211_measurement_params {
+ u8 channel;
+ __le64 start_time;
+ __le16 duration;
+} __packed;
+
+struct ieee80211_info_element {
+ u8 id;
+ u8 len;
+ u8 data[0];
+} __packed;
+
+struct ieee80211_measurement_request {
+ struct ieee80211_info_element ie;
+ u8 token;
+ u8 mode;
+ u8 type;
+ struct ieee80211_measurement_params params[0];
+} __packed;
+
+struct ieee80211_measurement_report {
+ struct ieee80211_info_element ie;
+ u8 token;
+ u8 mode;
+ u8 type;
+ union {
+ struct ieee80211_basic_report basic[0];
+ } u;
+} __packed;
+
+#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.c b/drivers/net/wireless/iwlegacy/iwl-sta.c
new file mode 100644
index 000000000000..47c9da3834ea
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-sta.c
@@ -0,0 +1,816 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include <linux/sched.h>
+#include <linux/lockdep.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-sta.h"
+
+/* priv->sta_lock must be held */
+static void iwl_legacy_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
+{
+
+ if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
+ IWL_ERR(priv,
+ "ACTIVATE a non DRIVER active station id %u addr %pM\n",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
+
+ if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) {
+ IWL_DEBUG_ASSOC(priv,
+ "STA id %u addr %pM already present"
+ " in uCode (according to driver)\n",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
+ } else {
+ priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
+ IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
+ }
+}
+
+static int iwl_legacy_process_add_sta_resp(struct iwl_priv *priv,
+ struct iwl_legacy_addsta_cmd *addsta,
+ struct iwl_rx_packet *pkt,
+ bool sync)
+{
+ u8 sta_id = addsta->sta.sta_id;
+ unsigned long flags;
+ int ret = -EIO;
+
+ if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+ IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
+ pkt->hdr.flags);
+ return ret;
+ }
+
+ IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
+ sta_id);
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+
+ switch (pkt->u.add_sta.status) {
+ case ADD_STA_SUCCESS_MSK:
+ IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
+ iwl_legacy_sta_ucode_activate(priv, sta_id);
+ ret = 0;
+ break;
+ case ADD_STA_NO_ROOM_IN_TABLE:
+ IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
+ sta_id);
+ break;
+ case ADD_STA_NO_BLOCK_ACK_RESOURCE:
+ IWL_ERR(priv,
+ "Adding station %d failed, no block ack resource.\n",
+ sta_id);
+ break;
+ case ADD_STA_MODIFY_NON_EXIST_STA:
+ IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
+ sta_id);
+ break;
+ default:
+ IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
+ pkt->u.add_sta.status);
+ break;
+ }
+
+ IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
+ priv->stations[sta_id].sta.mode ==
+ STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
+
+ /*
+ * XXX: The MAC address in the command buffer is often changed from
+ * the original sent to the device. That is, the MAC address
+ * written to the command buffer often is not the same MAC adress
+ * read from the command buffer when the command returns. This
+ * issue has not yet been resolved and this debugging is left to
+ * observe the problem.
+ */
+ IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
+ priv->stations[sta_id].sta.mode ==
+ STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
+ addsta->sta.addr);
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ return ret;
+}
+
+static void iwl_legacy_add_sta_callback(struct iwl_priv *priv,
+ struct iwl_device_cmd *cmd,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_legacy_addsta_cmd *addsta =
+ (struct iwl_legacy_addsta_cmd *)cmd->cmd.payload;
+
+ iwl_legacy_process_add_sta_resp(priv, addsta, pkt, false);
+
+}
+
+int iwl_legacy_send_add_sta(struct iwl_priv *priv,
+ struct iwl_legacy_addsta_cmd *sta, u8 flags)
+{
+ struct iwl_rx_packet *pkt = NULL;
+ int ret = 0;
+ u8 data[sizeof(*sta)];
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_ADD_STA,
+ .flags = flags,
+ .data = data,
+ };
+ u8 sta_id __maybe_unused = sta->sta.sta_id;
+
+ IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
+ sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
+
+ if (flags & CMD_ASYNC)
+ cmd.callback = iwl_legacy_add_sta_callback;
+ else {
+ cmd.flags |= CMD_WANT_SKB;
+ might_sleep();
+ }
+
+ cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data);
+ ret = iwl_legacy_send_cmd(priv, &cmd);
+
+ if (ret || (flags & CMD_ASYNC))
+ return ret;
+
+ if (ret == 0) {
+ pkt = (struct iwl_rx_packet *)cmd.reply_page;
+ ret = iwl_legacy_process_add_sta_resp(priv, sta, pkt, true);
+ }
+ iwl_legacy_free_pages(priv, cmd.reply_page);
+
+ return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_send_add_sta);
+
+static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index,
+ struct ieee80211_sta *sta,
+ struct iwl_rxon_context *ctx)
+{
+ struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
+ __le32 sta_flags;
+ u8 mimo_ps_mode;
+
+ if (!sta || !sta_ht_inf->ht_supported)
+ goto done;
+
+ mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
+ IWL_DEBUG_ASSOC(priv, "spatial multiplexing power save mode: %s\n",
+ (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
+ "static" :
+ (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
+ "dynamic" : "disabled");
+
+ sta_flags = priv->stations[index].sta.station_flags;
+
+ sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
+
+ switch (mimo_ps_mode) {
+ case WLAN_HT_CAP_SM_PS_STATIC:
+ sta_flags |= STA_FLG_MIMO_DIS_MSK;
+ break;
+ case WLAN_HT_CAP_SM_PS_DYNAMIC:
+ sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
+ break;
+ case WLAN_HT_CAP_SM_PS_DISABLED:
+ break;
+ default:
+ IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode);
+ break;
+ }
+
+ sta_flags |= cpu_to_le32(
+ (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
+
+ sta_flags |= cpu_to_le32(
+ (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
+
+ if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
+ sta_flags |= STA_FLG_HT40_EN_MSK;
+ else
+ sta_flags &= ~STA_FLG_HT40_EN_MSK;
+
+ priv->stations[index].sta.station_flags = sta_flags;
+ done:
+ return;
+}
+
+/**
+ * iwl_legacy_prep_station - Prepare station information for addition
+ *
+ * should be called with sta_lock held
+ */
+u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
+{
+ struct iwl_station_entry *station;
+ int i;
+ u8 sta_id = IWL_INVALID_STATION;
+ u16 rate;
+
+ if (is_ap)
+ sta_id = ctx->ap_sta_id;
+ else if (is_broadcast_ether_addr(addr))
+ sta_id = ctx->bcast_sta_id;
+ else
+ for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
+ if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
+ addr)) {
+ sta_id = i;
+ break;
+ }
+
+ if (!priv->stations[i].used &&
+ sta_id == IWL_INVALID_STATION)
+ sta_id = i;
+ }
+
+ /*
+ * These two conditions have the same outcome, but keep them
+ * separate
+ */
+ if (unlikely(sta_id == IWL_INVALID_STATION))
+ return sta_id;
+
+ /*
+ * uCode is not able to deal with multiple requests to add a
+ * station. Keep track if one is in progress so that we do not send
+ * another.
+ */
+ if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
+ IWL_DEBUG_INFO(priv,
+ "STA %d already in process of being added.\n",
+ sta_id);
+ return sta_id;
+ }
+
+ if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
+ (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) &&
+ !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) {
+ IWL_DEBUG_ASSOC(priv,
+ "STA %d (%pM) already added, not adding again.\n",
+ sta_id, addr);
+ return sta_id;
+ }
+
+ station = &priv->stations[sta_id];
+ station->used = IWL_STA_DRIVER_ACTIVE;
+ IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n",
+ sta_id, addr);
+ priv->num_stations++;
+
+ /* Set up the REPLY_ADD_STA command to send to device */
+ memset(&station->sta, 0, sizeof(struct iwl_legacy_addsta_cmd));
+ memcpy(station->sta.sta.addr, addr, ETH_ALEN);
+ station->sta.mode = 0;
+ station->sta.sta.sta_id = sta_id;
+ station->sta.station_flags = ctx->station_flags;
+ station->ctxid = ctx->ctxid;
+
+ if (sta) {
+ struct iwl_station_priv_common *sta_priv;
+
+ sta_priv = (void *)sta->drv_priv;
+ sta_priv->ctx = ctx;
+ }
+
+ /*
+ * OK to call unconditionally, since local stations (IBSS BSSID
+ * STA and broadcast STA) pass in a NULL sta, and mac80211
+ * doesn't allow HT IBSS.
+ */
+ iwl_legacy_set_ht_add_station(priv, sta_id, sta, ctx);
+
+ /* 3945 only */
+ rate = (priv->band == IEEE80211_BAND_5GHZ) ?
+ IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP;
+ /* Turn on both antennas for the station... */
+ station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
+
+ return sta_id;
+
+}
+EXPORT_SYMBOL_GPL(iwl_legacy_prep_station);
+
+#define STA_WAIT_TIMEOUT (HZ/2)
+
+/**
+ * iwl_legacy_add_station_common -
+ */
+int
+iwl_legacy_add_station_common(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ const u8 *addr, bool is_ap,
+ struct ieee80211_sta *sta, u8 *sta_id_r)
+{
+ unsigned long flags_spin;
+ int ret = 0;
+ u8 sta_id;
+ struct iwl_legacy_addsta_cmd sta_cmd;
+
+ *sta_id_r = 0;
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ sta_id = iwl_legacy_prep_station(priv, ctx, addr, is_ap, sta);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
+ addr);
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ return -EINVAL;
+ }
+
+ /*
+ * uCode is not able to deal with multiple requests to add a
+ * station. Keep track if one is in progress so that we do not send
+ * another.
+ */
+ if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
+ IWL_DEBUG_INFO(priv,
+ "STA %d already in process of being added.\n",
+ sta_id);
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ return -EEXIST;
+ }
+
+ if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
+ (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
+ IWL_DEBUG_ASSOC(priv,
+ "STA %d (%pM) already added, not adding again.\n",
+ sta_id, addr);
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ return -EEXIST;
+ }
+
+ priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta,
+ sizeof(struct iwl_legacy_addsta_cmd));
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+
+ /* Add station to device's station table */
+ ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+ if (ret) {
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ IWL_ERR(priv, "Adding station %pM failed.\n",
+ priv->stations[sta_id].sta.sta.addr);
+ priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
+ priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ }
+ *sta_id_r = sta_id;
+ return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_add_station_common);
+
+/**
+ * iwl_legacy_sta_ucode_deactivate - deactivate ucode status for a station
+ *
+ * priv->sta_lock must be held
+ */
+static void iwl_legacy_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
+{
+ /* Ucode must be active and driver must be non active */
+ if ((priv->stations[sta_id].used &
+ (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) !=
+ IWL_STA_UCODE_ACTIVE)
+ IWL_ERR(priv, "removed non active STA %u\n", sta_id);
+
+ priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
+
+ memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry));
+ IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id);
+}
+
+static int iwl_legacy_send_remove_station(struct iwl_priv *priv,
+ const u8 *addr, int sta_id,
+ bool temporary)
+{
+ struct iwl_rx_packet *pkt;
+ int ret;
+
+ unsigned long flags_spin;
+ struct iwl_rem_sta_cmd rm_sta_cmd;
+
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_REMOVE_STA,
+ .len = sizeof(struct iwl_rem_sta_cmd),
+ .flags = CMD_SYNC,
+ .data = &rm_sta_cmd,
+ };
+
+ memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
+ rm_sta_cmd.num_sta = 1;
+ memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
+
+ cmd.flags |= CMD_WANT_SKB;
+
+ ret = iwl_legacy_send_cmd(priv, &cmd);
+
+ if (ret)
+ return ret;
+
+ pkt = (struct iwl_rx_packet *)cmd.reply_page;
+ if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+ IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
+ pkt->hdr.flags);
+ ret = -EIO;
+ }
+
+ if (!ret) {
+ switch (pkt->u.rem_sta.status) {
+ case REM_STA_SUCCESS_MSK:
+ if (!temporary) {
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ iwl_legacy_sta_ucode_deactivate(priv, sta_id);
+ spin_unlock_irqrestore(&priv->sta_lock,
+ flags_spin);
+ }
+ IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
+ break;
+ default:
+ ret = -EIO;
+ IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
+ break;
+ }
+ }
+ iwl_legacy_free_pages(priv, cmd.reply_page);
+
+ return ret;
+}
+
+/**
+ * iwl_legacy_remove_station - Remove driver's knowledge of station.
+ */
+int iwl_legacy_remove_station(struct iwl_priv *priv, const u8 sta_id,
+ const u8 *addr)
+{
+ unsigned long flags;
+
+ if (!iwl_legacy_is_ready(priv)) {
+ IWL_DEBUG_INFO(priv,
+ "Unable to remove station %pM, device not ready.\n",
+ addr);
+ /*
+ * It is typical for stations to be removed when we are
+ * going down. Return success since device will be down
+ * soon anyway
+ */
+ return 0;
+ }
+
+ IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n",
+ sta_id, addr);
+
+ if (WARN_ON(sta_id == IWL_INVALID_STATION))
+ return -EINVAL;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+
+ if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
+ IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n",
+ addr);
+ goto out_err;
+ }
+
+ if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
+ IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n",
+ addr);
+ goto out_err;
+ }
+
+ if (priv->stations[sta_id].used & IWL_STA_LOCAL) {
+ kfree(priv->stations[sta_id].lq);
+ priv->stations[sta_id].lq = NULL;
+ }
+
+ priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
+
+ priv->num_stations--;
+
+ BUG_ON(priv->num_stations < 0);
+
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ return iwl_legacy_send_remove_station(priv, addr, sta_id, false);
+out_err:
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(iwl_legacy_remove_station);
+
+/**
+ * iwl_legacy_clear_ucode_stations - clear ucode station table bits
+ *
+ * This function clears all the bits in the driver indicating
+ * which stations are active in the ucode. Call when something
+ * other than explicit station management would cause this in
+ * the ucode, e.g. unassociated RXON.
+ */
+void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+ int i;
+ unsigned long flags_spin;
+ bool cleared = false;
+
+ IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n");
+
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ for (i = 0; i < priv->hw_params.max_stations; i++) {
+ if (ctx && ctx->ctxid != priv->stations[i].ctxid)
+ continue;
+
+ if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) {
+ IWL_DEBUG_INFO(priv,
+ "Clearing ucode active for station %d\n", i);
+ priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
+ cleared = true;
+ }
+ }
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+
+ if (!cleared)
+ IWL_DEBUG_INFO(priv,
+ "No active stations found to be cleared\n");
+}
+EXPORT_SYMBOL(iwl_legacy_clear_ucode_stations);
+
+/**
+ * iwl_legacy_restore_stations() - Restore driver known stations to device
+ *
+ * All stations considered active by driver, but not present in ucode, is
+ * restored.
+ *
+ * Function sleeps.
+ */
+void
+iwl_legacy_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+ struct iwl_legacy_addsta_cmd sta_cmd;
+ struct iwl_link_quality_cmd lq;
+ unsigned long flags_spin;
+ int i;
+ bool found = false;
+ int ret;
+ bool send_lq;
+
+ if (!iwl_legacy_is_ready(priv)) {
+ IWL_DEBUG_INFO(priv,
+ "Not ready yet, not restoring any stations.\n");
+ return;
+ }
+
+ IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ for (i = 0; i < priv->hw_params.max_stations; i++) {
+ if (ctx->ctxid != priv->stations[i].ctxid)
+ continue;
+ if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
+ !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) {
+ IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n",
+ priv->stations[i].sta.sta.addr);
+ priv->stations[i].sta.mode = 0;
+ priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS;
+ found = true;
+ }
+ }
+
+ for (i = 0; i < priv->hw_params.max_stations; i++) {
+ if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
+ memcpy(&sta_cmd, &priv->stations[i].sta,
+ sizeof(struct iwl_legacy_addsta_cmd));
+ send_lq = false;
+ if (priv->stations[i].lq) {
+ memcpy(&lq, priv->stations[i].lq,
+ sizeof(struct iwl_link_quality_cmd));
+ send_lq = true;
+ }
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+ if (ret) {
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ IWL_ERR(priv, "Adding station %pM failed.\n",
+ priv->stations[i].sta.sta.addr);
+ priv->stations[i].used &=
+ ~IWL_STA_DRIVER_ACTIVE;
+ priv->stations[i].used &=
+ ~IWL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&priv->sta_lock,
+ flags_spin);
+ }
+ /*
+ * Rate scaling has already been initialized, send
+ * current LQ command
+ */
+ if (send_lq)
+ iwl_legacy_send_lq_cmd(priv, ctx, &lq,
+ CMD_SYNC, true);
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
+ }
+ }
+
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ if (!found)
+ IWL_DEBUG_INFO(priv, "Restoring all known stations"
+ " .... no stations to be restored.\n");
+ else
+ IWL_DEBUG_INFO(priv, "Restoring all known stations"
+ " .... complete.\n");
+}
+EXPORT_SYMBOL(iwl_legacy_restore_stations);
+
+int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->sta_key_max_num; i++)
+ if (!test_and_set_bit(i, &priv->ucode_key_table))
+ return i;
+
+ return WEP_INVALID_OFFSET;
+}
+EXPORT_SYMBOL(iwl_legacy_get_free_ucode_key_index);
+
+void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ for (i = 0; i < priv->hw_params.max_stations; i++) {
+ if (!(priv->stations[i].used & IWL_STA_BCAST))
+ continue;
+
+ priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
+ priv->num_stations--;
+ BUG_ON(priv->num_stations < 0);
+ kfree(priv->stations[i].lq);
+ priv->stations[i].lq = NULL;
+ }
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+}
+EXPORT_SYMBOL_GPL(iwl_legacy_dealloc_bcast_stations);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
+ struct iwl_link_quality_cmd *lq)
+{
+ int i;
+ IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id);
+ IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n",
+ lq->general_params.single_stream_ant_msk,
+ lq->general_params.dual_stream_ant_msk);
+
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
+ IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n",
+ i, lq->rs_table[i].rate_n_flags);
+}
+#else
+static inline void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
+ struct iwl_link_quality_cmd *lq)
+{
+}
+#endif
+
+/**
+ * iwl_legacy_is_lq_table_valid() - Test one aspect of LQ cmd for validity
+ *
+ * It sometimes happens when a HT rate has been in use and we
+ * loose connectivity with AP then mac80211 will first tell us that the
+ * current channel is not HT anymore before removing the station. In such a
+ * scenario the RXON flags will be updated to indicate we are not
+ * communicating HT anymore, but the LQ command may still contain HT rates.
+ * Test for this to prevent driver from sending LQ command between the time
+ * RXON flags are updated and when LQ command is updated.
+ */
+static bool iwl_legacy_is_lq_table_valid(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct iwl_link_quality_cmd *lq)
+{
+ int i;
+
+ if (ctx->ht.enabled)
+ return true;
+
+ IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n",
+ ctx->active.channel);
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+ if (le32_to_cpu(lq->rs_table[i].rate_n_flags) &
+ RATE_MCS_HT_MSK) {
+ IWL_DEBUG_INFO(priv,
+ "index %d of LQ expects HT channel\n",
+ i);
+ return false;
+ }
+ }
+ return true;
+}
+
+/**
+ * iwl_legacy_send_lq_cmd() - Send link quality command
+ * @init: This command is sent as part of station initialization right
+ * after station has been added.
+ *
+ * The link quality command is sent as the last step of station creation.
+ * This is the special case in which init is set and we call a callback in
+ * this case to clear the state indicating that station creation is in
+ * progress.
+ */
+int iwl_legacy_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ struct iwl_link_quality_cmd *lq, u8 flags, bool init)
+{
+ int ret = 0;
+ unsigned long flags_spin;
+
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_TX_LINK_QUALITY_CMD,
+ .len = sizeof(struct iwl_link_quality_cmd),
+ .flags = flags,
+ .data = lq,
+ };
+
+ if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
+ return -EINVAL;
+
+
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+
+ iwl_legacy_dump_lq_cmd(priv, lq);
+ BUG_ON(init && (cmd.flags & CMD_ASYNC));
+
+ if (iwl_legacy_is_lq_table_valid(priv, ctx, lq))
+ ret = iwl_legacy_send_cmd(priv, &cmd);
+ else
+ ret = -EINVAL;
+
+ if (cmd.flags & CMD_ASYNC)
+ return ret;
+
+ if (init) {
+ IWL_DEBUG_INFO(priv, "init LQ command complete,"
+ " clearing sta addition status for sta %d\n",
+ lq->sta_id);
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_send_lq_cmd);
+
+int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_station_priv_common *sta_common = (void *)sta->drv_priv;
+ int ret;
+
+ IWL_DEBUG_INFO(priv, "received request to remove station %pM\n",
+ sta->addr);
+ mutex_lock(&priv->mutex);
+ IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
+ sta->addr);
+ ret = iwl_legacy_remove_station(priv, sta_common->sta_id, sta->addr);
+ if (ret)
+ IWL_ERR(priv, "Error removing station %pM\n",
+ sta->addr);
+ mutex_unlock(&priv->mutex);
+ return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_mac_sta_remove);
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.h b/drivers/net/wireless/iwlegacy/iwl-sta.h
new file mode 100644
index 000000000000..67bd75fe01a1
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-sta.h
@@ -0,0 +1,148 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#ifndef __iwl_legacy_sta_h__
+#define __iwl_legacy_sta_h__
+
+#include "iwl-dev.h"
+
+#define HW_KEY_DYNAMIC 0
+#define HW_KEY_DEFAULT 1
+
+#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
+#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
+#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
+ being activated */
+#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
+ (this is for the IBSS BSSID stations) */
+#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */
+
+
+void iwl_legacy_restore_stations(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx);
+void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx);
+void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv);
+int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv);
+int iwl_legacy_send_add_sta(struct iwl_priv *priv,
+ struct iwl_legacy_addsta_cmd *sta, u8 flags);
+int iwl_legacy_add_station_common(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ const u8 *addr, bool is_ap,
+ struct ieee80211_sta *sta, u8 *sta_id_r);
+int iwl_legacy_remove_station(struct iwl_priv *priv,
+ const u8 sta_id,
+ const u8 *addr);
+int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+
+u8 iwl_legacy_prep_station(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ const u8 *addr, bool is_ap,
+ struct ieee80211_sta *sta);
+
+int iwl_legacy_send_lq_cmd(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct iwl_link_quality_cmd *lq,
+ u8 flags, bool init);
+
+/**
+ * iwl_legacy_clear_driver_stations - clear knowledge of all stations from driver
+ * @priv: iwl priv struct
+ *
+ * This is called during iwl_down() to make sure that in the case
+ * we're coming there from a hardware restart mac80211 will be
+ * able to reconfigure stations -- if we're getting there in the
+ * normal down flow then the stations will already be cleared.
+ */
+static inline void iwl_legacy_clear_driver_stations(struct iwl_priv *priv)
+{
+ unsigned long flags;
+ struct iwl_rxon_context *ctx;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ memset(priv->stations, 0, sizeof(priv->stations));
+ priv->num_stations = 0;
+
+ priv->ucode_key_table = 0;
+
+ for_each_context(priv, ctx) {
+ /*
+ * Remove all key information that is not stored as part
+ * of station information since mac80211 may not have had
+ * a chance to remove all the keys. When device is
+ * reconfigured by mac80211 after an error all keys will
+ * be reconfigured.
+ */
+ memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
+ ctx->key_mapping_keys = 0;
+ }
+
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+}
+
+static inline int iwl_legacy_sta_id(struct ieee80211_sta *sta)
+{
+ if (WARN_ON(!sta))
+ return IWL_INVALID_STATION;
+
+ return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id;
+}
+
+/**
+ * iwl_legacy_sta_id_or_broadcast - return sta_id or broadcast sta
+ * @priv: iwl priv
+ * @context: the current context
+ * @sta: mac80211 station
+ *
+ * In certain circumstances mac80211 passes a station pointer
+ * that may be %NULL, for example during TX or key setup. In
+ * that case, we need to use the broadcast station, so this
+ * inline wraps that pattern.
+ */
+static inline int iwl_legacy_sta_id_or_broadcast(struct iwl_priv *priv,
+ struct iwl_rxon_context *context,
+ struct ieee80211_sta *sta)
+{
+ int sta_id;
+
+ if (!sta)
+ return context->bcast_sta_id;
+
+ sta_id = iwl_legacy_sta_id(sta);
+
+ /*
+ * mac80211 should not be passing a partially
+ * initialised station!
+ */
+ WARN_ON(sta_id == IWL_INVALID_STATION);
+
+ return sta_id;
+}
+#endif /* __iwl_legacy_sta_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c
new file mode 100644
index 000000000000..a227773cb384
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-tx.c
@@ -0,0 +1,660 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/etherdevice.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+#include "iwl-eeprom.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-sta.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+
+/**
+ * iwl_legacy_txq_update_write_ptr - Send new write index to hardware
+ */
+void
+iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
+{
+ u32 reg = 0;
+ int txq_id = txq->q.id;
+
+ if (txq->need_update == 0)
+ return;
+
+ /* if we're trying to save power */
+ if (test_bit(STATUS_POWER_PMI, &priv->status)) {
+ /* wake up nic if it's powered down ...
+ * uCode will wake up, and interrupt us again, so next
+ * time we'll skip this part. */
+ reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
+
+ if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+ IWL_DEBUG_INFO(priv,
+ "Tx queue %d requesting wakeup,"
+ " GP1 = 0x%x\n", txq_id, reg);
+ iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ return;
+ }
+
+ iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
+ txq->q.write_ptr | (txq_id << 8));
+
+ /*
+ * else not in power-save mode,
+ * uCode will never sleep when we're
+ * trying to tx (during RFKILL, we're not trying to tx).
+ */
+ } else
+ iwl_write32(priv, HBUS_TARG_WRPTR,
+ txq->q.write_ptr | (txq_id << 8));
+ txq->need_update = 0;
+}
+EXPORT_SYMBOL(iwl_legacy_txq_update_write_ptr);
+
+/**
+ * iwl_legacy_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
+ */
+void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
+{
+ struct iwl_tx_queue *txq = &priv->txq[txq_id];
+ struct iwl_queue *q = &txq->q;
+
+ if (q->n_bd == 0)
+ return;
+
+ while (q->write_ptr != q->read_ptr) {
+ priv->cfg->ops->lib->txq_free_tfd(priv, txq);
+ q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
+ }
+}
+EXPORT_SYMBOL(iwl_legacy_tx_queue_unmap);
+
+/**
+ * iwl_legacy_tx_queue_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id)
+{
+ struct iwl_tx_queue *txq = &priv->txq[txq_id];
+ struct device *dev = &priv->pci_dev->dev;
+ int i;
+
+ iwl_legacy_tx_queue_unmap(priv, txq_id);
+
+ /* De-alloc array of command/tx buffers */
+ for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
+ kfree(txq->cmd[i]);
+
+ /* De-alloc circular buffer of TFDs */
+ if (txq->q.n_bd)
+ dma_free_coherent(dev, priv->hw_params.tfd_size *
+ txq->q.n_bd, txq->tfds, txq->q.dma_addr);
+
+ /* De-alloc array of per-TFD driver data */
+ kfree(txq->txb);
+ txq->txb = NULL;
+
+ /* deallocate arrays */
+ kfree(txq->cmd);
+ kfree(txq->meta);
+ txq->cmd = NULL;
+ txq->meta = NULL;
+
+ /* 0-fill queue descriptor structure */
+ memset(txq, 0, sizeof(*txq));
+}
+EXPORT_SYMBOL(iwl_legacy_tx_queue_free);
+
+/**
+ * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
+ */
+void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv)
+{
+ struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
+ struct iwl_queue *q = &txq->q;
+ bool huge = false;
+ int i;
+
+ if (q->n_bd == 0)
+ return;
+
+ while (q->read_ptr != q->write_ptr) {
+ /* we have no way to tell if it is a huge cmd ATM */
+ i = iwl_legacy_get_cmd_index(q, q->read_ptr, 0);
+
+ if (txq->meta[i].flags & CMD_SIZE_HUGE)
+ huge = true;
+ else
+ pci_unmap_single(priv->pci_dev,
+ dma_unmap_addr(&txq->meta[i], mapping),
+ dma_unmap_len(&txq->meta[i], len),
+ PCI_DMA_BIDIRECTIONAL);
+
+ q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
+ }
+
+ if (huge) {
+ i = q->n_window;
+ pci_unmap_single(priv->pci_dev,
+ dma_unmap_addr(&txq->meta[i], mapping),
+ dma_unmap_len(&txq->meta[i], len),
+ PCI_DMA_BIDIRECTIONAL);
+ }
+}
+EXPORT_SYMBOL(iwl_legacy_cmd_queue_unmap);
+
+/**
+ * iwl_legacy_cmd_queue_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+void iwl_legacy_cmd_queue_free(struct iwl_priv *priv)
+{
+ struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
+ struct device *dev = &priv->pci_dev->dev;
+ int i;
+
+ iwl_legacy_cmd_queue_unmap(priv);
+
+ /* De-alloc array of command/tx buffers */
+ for (i = 0; i <= TFD_CMD_SLOTS; i++)
+ kfree(txq->cmd[i]);
+
+ /* De-alloc circular buffer of TFDs */
+ if (txq->q.n_bd)
+ dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
+ txq->tfds, txq->q.dma_addr);
+
+ /* deallocate arrays */
+ kfree(txq->cmd);
+ kfree(txq->meta);
+ txq->cmd = NULL;
+ txq->meta = NULL;
+
+ /* 0-fill queue descriptor structure */
+ memset(txq, 0, sizeof(*txq));
+}
+EXPORT_SYMBOL(iwl_legacy_cmd_queue_free);
+
+/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
+ * DMA services
+ *
+ * Theory of operation
+ *
+ * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
+ * of buffer descriptors, each of which points to one or more data buffers for
+ * the device to read from or fill. Driver and device exchange status of each
+ * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
+ * entries in each circular buffer, to protect against confusing empty and full
+ * queue states.
+ *
+ * The device reads or writes the data in the queues via the device's several
+ * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
+ *
+ * For Tx queue, there are low mark and high mark limits. If, after queuing
+ * the packet for Tx, free space become < low mark, Tx queue stopped. When
+ * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
+ * Tx queue resumed.
+ *
+ * See more detailed info in iwl-4965-hw.h.
+ ***************************************************/
+
+int iwl_legacy_queue_space(const struct iwl_queue *q)
+{
+ int s = q->read_ptr - q->write_ptr;
+
+ if (q->read_ptr > q->write_ptr)
+ s -= q->n_bd;
+
+ if (s <= 0)
+ s += q->n_window;
+ /* keep some reserve to not confuse empty and full situations */
+ s -= 2;
+ if (s < 0)
+ s = 0;
+ return s;
+}
+EXPORT_SYMBOL(iwl_legacy_queue_space);
+
+
+/**
+ * iwl_legacy_queue_init - Initialize queue's high/low-water and read/write indexes
+ */
+static int iwl_legacy_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
+ int count, int slots_num, u32 id)
+{
+ q->n_bd = count;
+ q->n_window = slots_num;
+ q->id = id;
+
+ /* count must be power-of-two size, otherwise iwl_legacy_queue_inc_wrap
+ * and iwl_legacy_queue_dec_wrap are broken. */
+ BUG_ON(!is_power_of_2(count));
+
+ /* slots_num must be power-of-two size, otherwise
+ * iwl_legacy_get_cmd_index is broken. */
+ BUG_ON(!is_power_of_2(slots_num));
+
+ q->low_mark = q->n_window / 4;
+ if (q->low_mark < 4)
+ q->low_mark = 4;
+
+ q->high_mark = q->n_window / 8;
+ if (q->high_mark < 2)
+ q->high_mark = 2;
+
+ q->write_ptr = q->read_ptr = 0;
+
+ return 0;
+}
+
+/**
+ * iwl_legacy_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
+ */
+static int iwl_legacy_tx_queue_alloc(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq, u32 id)
+{
+ struct device *dev = &priv->pci_dev->dev;
+ size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
+
+ /* Driver private data, only for Tx (not command) queues,
+ * not shared with device. */
+ if (id != priv->cmd_queue) {
+ txq->txb = kzalloc(sizeof(txq->txb[0]) *
+ TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
+ if (!txq->txb) {
+ IWL_ERR(priv, "kmalloc for auxiliary BD "
+ "structures failed\n");
+ goto error;
+ }
+ } else {
+ txq->txb = NULL;
+ }
+
+ /* Circular buffer of transmit frame descriptors (TFDs),
+ * shared with device */
+ txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
+ GFP_KERNEL);
+ if (!txq->tfds) {
+ IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
+ goto error;
+ }
+ txq->q.id = id;
+
+ return 0;
+
+ error:
+ kfree(txq->txb);
+ txq->txb = NULL;
+
+ return -ENOMEM;
+}
+
+/**
+ * iwl_legacy_tx_queue_init - Allocate and initialize one tx/cmd queue
+ */
+int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
+ int slots_num, u32 txq_id)
+{
+ int i, len;
+ int ret;
+ int actual_slots = slots_num;
+
+ /*
+ * Alloc buffer array for commands (Tx or other types of commands).
+ * For the command queue (#4/#9), allocate command space + one big
+ * command for scan, since scan command is very huge; the system will
+ * not have two scans at the same time, so only one is needed.
+ * For normal Tx queues (all other queues), no super-size command
+ * space is needed.
+ */
+ if (txq_id == priv->cmd_queue)
+ actual_slots++;
+
+ txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
+ GFP_KERNEL);
+ txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
+ GFP_KERNEL);
+
+ if (!txq->meta || !txq->cmd)
+ goto out_free_arrays;
+
+ len = sizeof(struct iwl_device_cmd);
+ for (i = 0; i < actual_slots; i++) {
+ /* only happens for cmd queue */
+ if (i == slots_num)
+ len = IWL_MAX_CMD_SIZE;
+
+ txq->cmd[i] = kmalloc(len, GFP_KERNEL);
+ if (!txq->cmd[i])
+ goto err;
+ }
+
+ /* Alloc driver data array and TFD circular buffer */
+ ret = iwl_legacy_tx_queue_alloc(priv, txq, txq_id);
+ if (ret)
+ goto err;
+
+ txq->need_update = 0;
+
+ /*
+ * For the default queues 0-3, set up the swq_id
+ * already -- all others need to get one later
+ * (if they need one at all).
+ */
+ if (txq_id < 4)
+ iwl_legacy_set_swq_id(txq, txq_id, txq_id);
+
+ /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
+ * iwl_legacy_queue_inc_wrap and iwl_legacy_queue_dec_wrap are broken. */
+ BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
+
+ /* Initialize queue's high/low-water marks, and head/tail indexes */
+ iwl_legacy_queue_init(priv, &txq->q,
+ TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
+
+ /* Tell device where to find queue */
+ priv->cfg->ops->lib->txq_init(priv, txq);
+
+ return 0;
+err:
+ for (i = 0; i < actual_slots; i++)
+ kfree(txq->cmd[i]);
+out_free_arrays:
+ kfree(txq->meta);
+ kfree(txq->cmd);
+
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(iwl_legacy_tx_queue_init);
+
+void iwl_legacy_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
+ int slots_num, u32 txq_id)
+{
+ int actual_slots = slots_num;
+
+ if (txq_id == priv->cmd_queue)
+ actual_slots++;
+
+ memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
+
+ txq->need_update = 0;
+
+ /* Initialize queue's high/low-water marks, and head/tail indexes */
+ iwl_legacy_queue_init(priv, &txq->q,
+ TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
+
+ /* Tell device where to find queue */
+ priv->cfg->ops->lib->txq_init(priv, txq);
+}
+EXPORT_SYMBOL(iwl_legacy_tx_queue_reset);
+
+/*************** HOST COMMAND QUEUE FUNCTIONS *****/
+
+/**
+ * iwl_legacy_enqueue_hcmd - enqueue a uCode command
+ * @priv: device private data point
+ * @cmd: a point to the ucode command structure
+ *
+ * The function returns < 0 values to indicate the operation is
+ * failed. On success, it turns the index (> 0) of command in the
+ * command queue.
+ */
+int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
+{
+ struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
+ struct iwl_queue *q = &txq->q;
+ struct iwl_device_cmd *out_cmd;
+ struct iwl_cmd_meta *out_meta;
+ dma_addr_t phys_addr;
+ unsigned long flags;
+ int len;
+ u32 idx;
+ u16 fix_size;
+
+ cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
+ fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
+
+ /* If any of the command structures end up being larger than
+ * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
+ * we will need to increase the size of the TFD entries
+ * Also, check to see if command buffer should not exceed the size
+ * of device_cmd and max_cmd_size. */
+ BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
+ !(cmd->flags & CMD_SIZE_HUGE));
+ BUG_ON(fix_size > IWL_MAX_CMD_SIZE);
+
+ if (iwl_legacy_is_rfkill(priv) || iwl_legacy_is_ctkill(priv)) {
+ IWL_WARN(priv, "Not sending command - %s KILL\n",
+ iwl_legacy_is_rfkill(priv) ? "RF" : "CT");
+ return -EIO;
+ }
+
+ if (iwl_legacy_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+ IWL_ERR(priv, "No space in command queue\n");
+ IWL_ERR(priv, "Restarting adapter due to queue full\n");
+ queue_work(priv->workqueue, &priv->restart);
+ return -ENOSPC;
+ }
+
+ spin_lock_irqsave(&priv->hcmd_lock, flags);
+
+ /* If this is a huge cmd, mark the huge flag also on the meta.flags
+ * of the _original_ cmd. This is used for DMA mapping clean up.
+ */
+ if (cmd->flags & CMD_SIZE_HUGE) {
+ idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0);
+ txq->meta[idx].flags = CMD_SIZE_HUGE;
+ }
+
+ idx = iwl_legacy_get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
+ out_cmd = txq->cmd[idx];
+ out_meta = &txq->meta[idx];
+
+ memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
+ out_meta->flags = cmd->flags;
+ if (cmd->flags & CMD_WANT_SKB)
+ out_meta->source = cmd;
+ if (cmd->flags & CMD_ASYNC)
+ out_meta->callback = cmd->callback;
+
+ out_cmd->hdr.cmd = cmd->id;
+ memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
+
+ /* At this point, the out_cmd now has all of the incoming cmd
+ * information */
+
+ out_cmd->hdr.flags = 0;
+ out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
+ INDEX_TO_SEQ(q->write_ptr));
+ if (cmd->flags & CMD_SIZE_HUGE)
+ out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
+ len = sizeof(struct iwl_device_cmd);
+ if (idx == TFD_CMD_SLOTS)
+ len = IWL_MAX_CMD_SIZE;
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ switch (out_cmd->hdr.cmd) {
+ case REPLY_TX_LINK_QUALITY_CMD:
+ case SENSITIVITY_CMD:
+ IWL_DEBUG_HC_DUMP(priv,
+ "Sending command %s (#%x), seq: 0x%04X, "
+ "%d bytes at %d[%d]:%d\n",
+ iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
+ out_cmd->hdr.cmd,
+ le16_to_cpu(out_cmd->hdr.sequence), fix_size,
+ q->write_ptr, idx, priv->cmd_queue);
+ break;
+ default:
+ IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
+ "%d bytes at %d[%d]:%d\n",
+ iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
+ out_cmd->hdr.cmd,
+ le16_to_cpu(out_cmd->hdr.sequence), fix_size,
+ q->write_ptr, idx, priv->cmd_queue);
+ }
+#endif
+ txq->need_update = 1;
+
+ if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
+ /* Set up entry in queue's byte count circular buffer */
+ priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
+
+ phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
+ fix_size, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_addr_set(out_meta, mapping, phys_addr);
+ dma_unmap_len_set(out_meta, len, fix_size);
+
+ trace_iwlwifi_legacy_dev_hcmd(priv, &out_cmd->hdr,
+ fix_size, cmd->flags);
+
+ priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
+ phys_addr, fix_size, 1,
+ U32_PAD(cmd->len));
+
+ /* Increment and update queue's write index */
+ q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
+ iwl_legacy_txq_update_write_ptr(priv, txq);
+
+ spin_unlock_irqrestore(&priv->hcmd_lock, flags);
+ return idx;
+}
+
+/**
+ * iwl_legacy_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
+ *
+ * When FW advances 'R' index, all entries between old and new 'R' index
+ * need to be reclaimed. As result, some free space forms. If there is
+ * enough free space (> low mark), wake the stack that feeds us.
+ */
+static void iwl_legacy_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
+ int idx, int cmd_idx)
+{
+ struct iwl_tx_queue *txq = &priv->txq[txq_id];
+ struct iwl_queue *q = &txq->q;
+ int nfreed = 0;
+
+ if ((idx >= q->n_bd) || (iwl_legacy_queue_used(q, idx) == 0)) {
+ IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
+ "is out of range [0-%d] %d %d.\n", txq_id,
+ idx, q->n_bd, q->write_ptr, q->read_ptr);
+ return;
+ }
+
+ for (idx = iwl_legacy_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
+ q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+ if (nfreed++ > 0) {
+ IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
+ q->write_ptr, q->read_ptr);
+ queue_work(priv->workqueue, &priv->restart);
+ }
+
+ }
+}
+
+/**
+ * iwl_legacy_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
+ * @rxb: Rx buffer to reclaim
+ *
+ * If an Rx buffer has an async callback associated with it the callback
+ * will be executed. The attached skb (if present) will only be freed
+ * if the callback returns 1
+ */
+void
+iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ int txq_id = SEQ_TO_QUEUE(sequence);
+ int index = SEQ_TO_INDEX(sequence);
+ int cmd_index;
+ bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
+ struct iwl_device_cmd *cmd;
+ struct iwl_cmd_meta *meta;
+ struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
+
+ /* If a Tx command is being handled and it isn't in the actual
+ * command queue then there a command routing bug has been introduced
+ * in the queue management code. */
+ if (WARN(txq_id != priv->cmd_queue,
+ "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
+ txq_id, priv->cmd_queue, sequence,
+ priv->txq[priv->cmd_queue].q.read_ptr,
+ priv->txq[priv->cmd_queue].q.write_ptr)) {
+ iwl_print_hex_error(priv, pkt, 32);
+ return;
+ }
+
+ /* If this is a huge cmd, clear the huge flag on the meta.flags
+ * of the _original_ cmd. So that iwl_legacy_cmd_queue_free won't unmap
+ * the DMA buffer for the scan (huge) command.
+ */
+ if (huge) {
+ cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, 0);
+ txq->meta[cmd_index].flags = 0;
+ }
+ cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, huge);
+ cmd = txq->cmd[cmd_index];
+ meta = &txq->meta[cmd_index];
+
+ pci_unmap_single(priv->pci_dev,
+ dma_unmap_addr(meta, mapping),
+ dma_unmap_len(meta, len),
+ PCI_DMA_BIDIRECTIONAL);
+
+ /* Input error checking is done when commands are added to queue. */
+ if (meta->flags & CMD_WANT_SKB) {
+ meta->source->reply_page = (unsigned long)rxb_addr(rxb);
+ rxb->page = NULL;
+ } else if (meta->callback)
+ meta->callback(priv, cmd, pkt);
+
+ iwl_legacy_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
+
+ if (!(meta->flags & CMD_ASYNC)) {
+ clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
+ IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
+ iwl_legacy_get_cmd_string(cmd->hdr.cmd));
+ wake_up_interruptible(&priv->wait_command_queue);
+ }
+ meta->flags = 0;
+}
+EXPORT_SYMBOL(iwl_legacy_tx_cmd_complete);
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
index 371abbf60eac..ab87e1b73529 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -61,7 +61,6 @@
#include "iwl-helpers.h"
#include "iwl-dev.h"
#include "iwl-spectrum.h"
-#include "iwl-legacy.h"
/*
* module name, copyright, version, etc.
@@ -70,7 +69,7 @@
#define DRV_DESCRIPTION \
"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
#define VD "d"
#else
#define VD
@@ -82,7 +81,7 @@
* this was configurable.
*/
#define DRV_VERSION IWLWIFI_VERSION VD "s"
-#define DRV_COPYRIGHT "Copyright(c) 2003-2010 Intel Corporation"
+#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
#define DRV_AUTHOR "<ilw@linux.intel.com>"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
@@ -164,7 +163,7 @@ static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
== STA_KEY_FLG_NO_ENC)
priv->stations[sta_id].sta.key.key_offset =
- iwl_get_free_ucode_key_index(priv);
+ iwl_legacy_get_free_ucode_key_index(priv);
/* else, we are overriding an existing key => no need to allocated room
* in uCode. */
@@ -177,7 +176,8 @@ static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n");
- ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
+ ret = iwl_legacy_send_add_sta(priv,
+ &priv->stations[sta_id].sta, CMD_ASYNC);
spin_unlock_irqrestore(&priv->sta_lock, flags);
@@ -201,7 +201,7 @@ static int iwl3945_set_wep_dynamic_key_info(struct iwl_priv *priv,
static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
{
unsigned long flags;
- struct iwl_addsta_cmd sta_cmd;
+ struct iwl_legacy_addsta_cmd sta_cmd;
spin_lock_irqsave(&priv->sta_lock, flags);
memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key));
@@ -210,11 +210,11 @@ static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_legacy_addsta_cmd));
spin_unlock_irqrestore(&priv->sta_lock, flags);
IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n");
- return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+ return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
static int iwl3945_set_dynamic_key(struct iwl_priv *priv,
@@ -318,7 +318,7 @@ unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
int left)
{
- if (!iwl_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb)
+ if (!iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb)
return 0;
if (priv->beacon_skb->len > left)
@@ -344,12 +344,12 @@ static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
return -ENOMEM;
}
- rate = iwl_rate_get_lowest_plcp(priv,
+ rate = iwl_legacy_get_lowest_plcp(priv,
&priv->contexts[IWL_RXON_CTX_BSS]);
frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate);
- rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
+ rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
&frame->u.cmd[0]);
iwl3945_free_frame(priv, frame);
@@ -443,7 +443,7 @@ static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
}
- priv->cfg->ops->utils->tx_cmd_protection(priv, info, fc, &tx_flags);
+ iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
if (ieee80211_is_mgmt(fc)) {
@@ -485,7 +485,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
- if (iwl_is_rfkill(priv)) {
+ if (iwl_legacy_is_rfkill(priv)) {
IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
goto drop_unlock;
}
@@ -500,7 +500,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
fc = hdr->frame_control;
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
if (ieee80211_is_auth(fc))
IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
else if (ieee80211_is_assoc_req(fc))
@@ -514,7 +514,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
hdr_len = ieee80211_hdrlen(fc);
/* Find index into station table for destination station */
- sta_id = iwl_sta_id_or_broadcast(
+ sta_id = iwl_legacy_sta_id_or_broadcast(
priv, &priv->contexts[IWL_RXON_CTX_BSS],
info->control.sta);
if (sta_id == IWL_INVALID_STATION) {
@@ -536,12 +536,12 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
txq = &priv->txq[txq_id];
q = &txq->q;
- if ((iwl_queue_space(q) < q->high_mark))
+ if ((iwl_legacy_queue_space(q) < q->high_mark))
goto drop;
spin_lock_irqsave(&priv->lock, flags);
- idx = get_cmd_index(q, q->write_ptr, 0);
+ idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0);
/* Set up driver data for this TFD */
memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
@@ -582,8 +582,8 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
len = (u16)skb->len;
tx_cmd->len = cpu_to_le16(len);
- iwl_dbg_log_tx_data_frame(priv, len, hdr);
- iwl_update_stats(priv, true, fc, len);
+ iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
+ iwl_legacy_update_stats(priv, true, fc, len);
tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
@@ -642,20 +642,20 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
/* Tell device the write index *just past* this latest filled TFD */
- q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
- iwl_txq_update_write_ptr(priv, txq);
+ q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
+ iwl_legacy_txq_update_write_ptr(priv, txq);
spin_unlock_irqrestore(&priv->lock, flags);
- if ((iwl_queue_space(q) < q->high_mark)
+ if ((iwl_legacy_queue_space(q) < q->high_mark)
&& priv->mac80211_registered) {
if (wait_write_ptr) {
spin_lock_irqsave(&priv->lock, flags);
txq->need_update = 1;
- iwl_txq_update_write_ptr(priv, txq);
+ iwl_legacy_txq_update_write_ptr(priv, txq);
spin_unlock_irqrestore(&priv->lock, flags);
}
- iwl_stop_queue(priv, txq);
+ iwl_legacy_stop_queue(priv, txq);
}
return 0;
@@ -683,8 +683,8 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
int duration = le16_to_cpu(params->duration);
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- if (iwl_is_associated(priv, IWL_RXON_CTX_BSS))
- add_time = iwl_usecs_to_beacons(priv,
+ if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
+ add_time = iwl_legacy_usecs_to_beacons(priv,
le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
le16_to_cpu(ctx->timing.beacon_interval));
@@ -697,9 +697,9 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
cmd.len = sizeof(spectrum);
spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
- if (iwl_is_associated(priv, IWL_RXON_CTX_BSS))
+ if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
spectrum.start_time =
- iwl_add_beacon_time(priv,
+ iwl_legacy_add_beacon_time(priv,
priv->_3945.last_beacon_time, add_time,
le16_to_cpu(ctx->timing.beacon_interval));
else
@@ -712,7 +712,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
spectrum.flags |= RXON_FLG_BAND_24G_MSK |
RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
- rc = iwl_send_cmd_sync(priv, &cmd);
+ rc = iwl_legacy_send_cmd_sync(priv, &cmd);
if (rc)
return rc;
@@ -739,7 +739,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
break;
}
- iwl_free_pages(priv, cmd.reply_page);
+ iwl_legacy_free_pages(priv, cmd.reply_page);
return rc;
}
@@ -783,45 +783,19 @@ static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
struct iwl_rx_packet *pkt = rxb_addr(rxb);
#endif
IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
}
-static void iwl3945_bg_beacon_update(struct work_struct *work)
-{
- struct iwl_priv *priv =
- container_of(work, struct iwl_priv, beacon_update);
- struct sk_buff *beacon;
-
- /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
- beacon = ieee80211_beacon_get(priv->hw,
- priv->contexts[IWL_RXON_CTX_BSS].vif);
-
- if (!beacon) {
- IWL_ERR(priv, "update beacon failed\n");
- return;
- }
-
- mutex_lock(&priv->mutex);
- /* new beacon skb is allocated every time; dispose previous.*/
- if (priv->beacon_skb)
- dev_kfree_skb(priv->beacon_skb);
-
- priv->beacon_skb = beacon;
- mutex_unlock(&priv->mutex);
-
- iwl3945_send_beacon_cmd(priv);
-}
-
static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
u8 rate = beacon->beacon_notify_hdr.rate;
IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
@@ -835,9 +809,6 @@ static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
- if ((priv->iw_mode == NL80211_IFTYPE_AP) &&
- (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
- queue_work(priv->workqueue, &priv->beacon_update);
}
/* Handle notification from uCode that card's power state is changing
@@ -862,7 +833,7 @@ static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
clear_bit(STATUS_RF_KILL_HW, &priv->status);
- iwl_scan_cancel(priv);
+ iwl_legacy_scan_cancel(priv);
if ((test_bit(STATUS_RF_KILL_HW, &status) !=
test_bit(STATUS_RF_KILL_HW, &priv->status)))
@@ -885,13 +856,13 @@ static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
{
priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive;
priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
- priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error;
- priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
+ priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
+ priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
- iwl_rx_spectrum_measure_notif;
- priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
+ iwl_legacy_rx_spectrum_measure_notif;
+ priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
- iwl_rx_pm_debug_statistics_notif;
+ iwl_legacy_rx_pm_debug_statistics_notif;
priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif;
/*
@@ -902,7 +873,7 @@ static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics;
priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
- iwl_setup_rx_scan_handlers(priv);
+ iwl_legacy_setup_rx_scan_handlers(priv);
priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
/* Set up hardware specific Rx handlers */
@@ -1003,7 +974,7 @@ static void iwl3945_rx_queue_restock(struct iwl_priv *priv)
spin_lock_irqsave(&rxq->lock, flags);
write = rxq->write & ~0x7;
- while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
+ while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
/* Get next free Rx buffer, remove from free list */
element = rxq->rx_free.next;
rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
@@ -1029,7 +1000,7 @@ static void iwl3945_rx_queue_restock(struct iwl_priv *priv)
spin_lock_irqsave(&rxq->lock, flags);
rxq->need_update = 1;
spin_unlock_irqrestore(&rxq->lock, flags);
- iwl_rx_queue_update_write_ptr(priv, rxq);
+ iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
}
}
@@ -1123,7 +1094,7 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
PAGE_SIZE << priv->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE);
- __iwl_free_pages(priv, rxq->pool[i].page);
+ __iwl_legacy_free_pages(priv, rxq->pool[i].page);
rxq->pool[i].page = NULL;
}
list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
@@ -1170,7 +1141,7 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx
pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
PAGE_SIZE << priv->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE);
- __iwl_free_pages(priv, rxq->pool[i].page);
+ __iwl_legacy_free_pages(priv, rxq->pool[i].page);
rxq->pool[i].page = NULL;
}
}
@@ -1275,7 +1246,7 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
len += sizeof(u32); /* account for status word */
- trace_iwlwifi_dev_rx(priv, pkt, len);
+ trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
/* Reclaim a command buffer only if this packet is a response
* to a (driver-originated) command.
@@ -1292,14 +1263,14 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
* rx_handlers table. See iwl3945_setup_rx_handlers() */
if (priv->rx_handlers[pkt->hdr.cmd]) {
IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i,
- get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+ iwl_legacy_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
} else {
/* No handling needed */
IWL_DEBUG_RX(priv,
"r %d i %d No handler needed for %s, 0x%02x\n",
- r, i, get_cmd_string(pkt->hdr.cmd),
+ r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
pkt->hdr.cmd);
}
@@ -1312,10 +1283,10 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
if (reclaim) {
/* Invoke any callbacks, transfer the buffer to caller,
- * and fire off the (possibly) blocking iwl_send_cmd()
+ * and fire off the (possibly) blocking iwl_legacy_send_cmd()
* as we reclaim the driver command queue */
if (rxb->page)
- iwl_tx_cmd_complete(priv, rxb);
+ iwl_legacy_tx_cmd_complete(priv, rxb);
else
IWL_WARN(priv, "Claim null rxb?\n");
}
@@ -1357,14 +1328,14 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
}
/* call this function to flush any scheduled tasklet */
-static inline void iwl_synchronize_irq(struct iwl_priv *priv)
+static inline void iwl3945_synchronize_irq(struct iwl_priv *priv)
{
/* wait to make sure we flush pending tasklet*/
synchronize_irq(priv->pci_dev->irq);
tasklet_kill(&priv->irq_tasklet);
}
-static const char *desc_lookup(int i)
+static const char *iwl3945_desc_lookup(int i)
{
switch (i) {
case 1:
@@ -1401,7 +1372,7 @@ void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
}
- count = iwl_read_targ_mem(priv, base);
+ count = iwl_legacy_read_targ_mem(priv, base);
if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
IWL_ERR(priv, "Start IWL Error Log Dump:\n");
@@ -1414,25 +1385,25 @@ void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
for (i = ERROR_START_OFFSET;
i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
i += ERROR_ELEM_SIZE) {
- desc = iwl_read_targ_mem(priv, base + i);
+ desc = iwl_legacy_read_targ_mem(priv, base + i);
time =
- iwl_read_targ_mem(priv, base + i + 1 * sizeof(u32));
+ iwl_legacy_read_targ_mem(priv, base + i + 1 * sizeof(u32));
blink1 =
- iwl_read_targ_mem(priv, base + i + 2 * sizeof(u32));
+ iwl_legacy_read_targ_mem(priv, base + i + 2 * sizeof(u32));
blink2 =
- iwl_read_targ_mem(priv, base + i + 3 * sizeof(u32));
+ iwl_legacy_read_targ_mem(priv, base + i + 3 * sizeof(u32));
ilink1 =
- iwl_read_targ_mem(priv, base + i + 4 * sizeof(u32));
+ iwl_legacy_read_targ_mem(priv, base + i + 4 * sizeof(u32));
ilink2 =
- iwl_read_targ_mem(priv, base + i + 5 * sizeof(u32));
+ iwl_legacy_read_targ_mem(priv, base + i + 5 * sizeof(u32));
data1 =
- iwl_read_targ_mem(priv, base + i + 6 * sizeof(u32));
+ iwl_legacy_read_targ_mem(priv, base + i + 6 * sizeof(u32));
IWL_ERR(priv,
"%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
- desc_lookup(desc), desc, time, blink1, blink2,
+ iwl3945_desc_lookup(desc), desc, time, blink1, blink2,
ilink1, ilink2, data1);
- trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, 0,
+ trace_iwlwifi_legacy_dev_ucode_error(priv, desc, time, data1, 0,
0, blink1, blink2, ilink1, ilink2);
}
}
@@ -1471,14 +1442,14 @@ static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
iwl_grab_nic_access(priv);
/* Set starting address; reads will auto-increment */
- _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
+ _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
rmb();
/* "time" is actually "data" for mode 0 (no timestamp).
* place event id # at far right for easier visual parsing. */
for (i = 0; i < num_events; i++) {
- ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
if (mode == 0) {
/* data, ev */
if (bufsz) {
@@ -1487,11 +1458,12 @@ static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
time, ev);
} else {
IWL_ERR(priv, "0x%08x\t%04u\n", time, ev);
- trace_iwlwifi_dev_ucode_event(priv, 0,
+ trace_iwlwifi_legacy_dev_ucode_event(priv, 0,
time, ev);
}
} else {
- data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ data = _iwl_legacy_read_direct32(priv,
+ HBUS_TARG_MEM_RDAT);
if (bufsz) {
pos += scnprintf(*buf + pos, bufsz - pos,
"%010u:0x%08x:%04u\n",
@@ -1499,7 +1471,7 @@ static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
} else {
IWL_ERR(priv, "%010u\t0x%08x\t%04u\n",
time, data, ev);
- trace_iwlwifi_dev_ucode_event(priv, time,
+ trace_iwlwifi_legacy_dev_ucode_event(priv, time,
data, ev);
}
}
@@ -1570,10 +1542,10 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
}
/* event log header */
- capacity = iwl_read_targ_mem(priv, base);
- mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
- num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
- next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
+ capacity = iwl_legacy_read_targ_mem(priv, base);
+ mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32)));
+ num_wraps = iwl_legacy_read_targ_mem(priv, base + (2 * sizeof(u32)));
+ next_entry = iwl_legacy_read_targ_mem(priv, base + (3 * sizeof(u32)));
if (capacity > priv->cfg->base_params->max_event_log_size) {
IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
@@ -1595,8 +1567,8 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
return pos;
}
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ if (!(iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES)
? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size;
#else
@@ -1607,7 +1579,7 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
IWL_ERR(priv, "Start IWL Event Log Dump: display last %d count\n",
size);
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
if (display) {
if (full_log)
bufsz = capacity * 48;
@@ -1617,7 +1589,7 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
if (!*buf)
return -ENOMEM;
}
- if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
+ if ((iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
/* if uCode has wrapped back to top of log,
* start at the oldest entry,
* i.e the next one that uCode would fill.
@@ -1647,7 +1619,7 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
u32 inta, handled = 0;
u32 inta_fh;
unsigned long flags;
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
u32 inta_mask;
#endif
@@ -1665,8 +1637,8 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (iwl_get_debug_level(priv) & IWL_DL_ISR) {
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
/* just for debug */
inta_mask = iwl_read32(priv, CSR_INT_MASK);
IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
@@ -1690,18 +1662,18 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
IWL_ERR(priv, "Hardware error detected. Restarting.\n");
/* Tell the device to stop sending interrupts */
- iwl_disable_interrupts(priv);
+ iwl_legacy_disable_interrupts(priv);
priv->isr_stats.hw++;
- iwl_irq_handle_error(priv);
+ iwl_legacy_irq_handle_error(priv);
handled |= CSR_INT_BIT_HW_ERR;
return;
}
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
/* NIC fires this, but we don't use it, redundant with WAKEUP */
if (inta & CSR_INT_BIT_SCD) {
IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
@@ -1724,20 +1696,20 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
IWL_ERR(priv, "Microcode SW error detected. "
"Restarting 0x%X.\n", inta);
priv->isr_stats.sw++;
- iwl_irq_handle_error(priv);
+ iwl_legacy_irq_handle_error(priv);
handled |= CSR_INT_BIT_SW_ERR;
}
/* uCode wakes up after power-down sleep */
if (inta & CSR_INT_BIT_WAKEUP) {
IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
- iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
- iwl_txq_update_write_ptr(priv, &priv->txq[0]);
- iwl_txq_update_write_ptr(priv, &priv->txq[1]);
- iwl_txq_update_write_ptr(priv, &priv->txq[2]);
- iwl_txq_update_write_ptr(priv, &priv->txq[3]);
- iwl_txq_update_write_ptr(priv, &priv->txq[4]);
- iwl_txq_update_write_ptr(priv, &priv->txq[5]);
+ iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
+ iwl_legacy_txq_update_write_ptr(priv, &priv->txq[0]);
+ iwl_legacy_txq_update_write_ptr(priv, &priv->txq[1]);
+ iwl_legacy_txq_update_write_ptr(priv, &priv->txq[2]);
+ iwl_legacy_txq_update_write_ptr(priv, &priv->txq[3]);
+ iwl_legacy_txq_update_write_ptr(priv, &priv->txq[4]);
+ iwl_legacy_txq_update_write_ptr(priv, &priv->txq[5]);
priv->isr_stats.wakeup++;
handled |= CSR_INT_BIT_WAKEUP;
@@ -1757,7 +1729,7 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
priv->isr_stats.tx++;
iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6));
- iwl_write_direct32(priv, FH39_TCSR_CREDIT
+ iwl_legacy_write_direct32(priv, FH39_TCSR_CREDIT
(FH39_SRVC_CHNL), 0x0);
handled |= CSR_INT_BIT_FH_TX;
}
@@ -1776,10 +1748,10 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
/* Re-enable all interrupts */
/* only Re-enable if disabled by irq */
if (test_bit(STATUS_INT_ENABLED, &priv->status))
- iwl_enable_interrupts(priv);
+ iwl_legacy_enable_interrupts(priv);
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
inta = iwl_read32(priv, CSR_INT);
inta_mask = iwl_read32(priv, CSR_INT_MASK);
inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
@@ -1806,14 +1778,14 @@ static int iwl3945_get_single_channel_for_scan(struct iwl_priv *priv,
return added;
}
- active_dwell = iwl_get_active_dwell_time(priv, band, 0);
- passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
+ active_dwell = iwl_legacy_get_active_dwell_time(priv, band, 0);
+ passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
if (passive_dwell <= active_dwell)
passive_dwell = active_dwell + 1;
- channel = iwl_get_single_channel_number(priv, band);
+ channel = iwl_legacy_get_single_channel_number(priv, band);
if (channel) {
scan_ch->channel = channel;
@@ -1849,8 +1821,8 @@ static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
if (!sband)
return 0;
- active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
- passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
+ active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
+ passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
if (passive_dwell <= active_dwell)
passive_dwell = active_dwell + 1;
@@ -1863,10 +1835,12 @@ static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
scan_ch->channel = chan->hw_value;
- ch_info = iwl_get_channel_info(priv, band, scan_ch->channel);
- if (!is_channel_valid(ch_info)) {
- IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n",
- scan_ch->channel);
+ ch_info = iwl_legacy_get_channel_info(priv, band,
+ scan_ch->channel);
+ if (!iwl_legacy_is_channel_valid(ch_info)) {
+ IWL_DEBUG_SCAN(priv,
+ "Channel %d is INVALID for this band.\n",
+ scan_ch->channel);
continue;
}
@@ -1875,7 +1849,7 @@ static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
/* If passive , set up for auto-switch
* and use long active_dwell time.
*/
- if (!is_active || is_channel_passive(ch_info) ||
+ if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
(chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
scan_ch->type = 0; /* passive */
if (IWL_UCODE_API(priv->ucode_ver) == 1)
@@ -1955,12 +1929,12 @@ static void iwl3945_init_hw_rates(struct iwl_priv *priv,
static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv)
{
- iwl_free_fw_desc(priv->pci_dev, &priv->ucode_code);
- iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data);
- iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
- iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init);
- iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
- iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
+ iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
+ iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
+ iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
+ iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
+ iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
+ iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
}
/**
@@ -1976,7 +1950,7 @@ static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 le
IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
- iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
+ iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
IWL39_RTC_INST_LOWER_BOUND);
errcnt = 0;
@@ -1984,7 +1958,7 @@ static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 le
/* read data comes through single port, auto-incr addr */
/* NOTE: Use the debugless read so we don't flood kernel log
* if IWL_DL_IO is set */
- val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
if (val != le32_to_cpu(*image)) {
IWL_ERR(priv, "uCode INST section is invalid at "
"offset 0x%x, is 0x%x, s/b 0x%x\n",
@@ -2023,9 +1997,9 @@ static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32
/* read data comes through single port, auto-incr addr */
/* NOTE: Use the debugless read so we don't flood kernel log
* if IWL_DL_IO is set */
- iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
+ iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
i + IWL39_RTC_INST_LOWER_BOUND);
- val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
if (val != le32_to_cpu(*image)) {
#if 0 /* Enable this if you want to see details */
IWL_ERR(priv, "uCode INST section is invalid at "
@@ -2101,7 +2075,7 @@ static void iwl3945_nic_start(struct iwl_priv *priv)
#define IWL3945_UCODE_GET(item) \
static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\
{ \
- return le32_to_cpu(ucode->u.v1.item); \
+ return le32_to_cpu(ucode->v1.item); \
}
static u32 iwl3945_ucode_get_header_size(u32 api_ver)
@@ -2111,7 +2085,7 @@ static u32 iwl3945_ucode_get_header_size(u32 api_ver)
static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode)
{
- return (u8 *) ucode->u.v1.data;
+ return (u8 *) ucode->v1.data;
}
IWL3945_UCODE_GET(inst_size);
@@ -2286,13 +2260,13 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
* 1) unmodified from disk
* 2) backup cache for save/restore during power-downs */
priv->ucode_code.len = inst_size;
- iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
+ iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
priv->ucode_data.len = data_size;
- iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
+ iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
priv->ucode_data_backup.len = data_size;
- iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
+ iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
!priv->ucode_data_backup.v_addr)
@@ -2301,10 +2275,10 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
/* Initialization instructions and data */
if (init_size && init_data_size) {
priv->ucode_init.len = init_size;
- iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
+ iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
priv->ucode_init_data.len = init_data_size;
- iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
+ iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
goto err_pci_alloc;
@@ -2313,7 +2287,7 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
/* Bootstrap (instructions only, no data) */
if (boot_size) {
priv->ucode_boot.len = boot_size;
- iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
+ iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
if (!priv->ucode_boot.v_addr)
goto err_pci_alloc;
@@ -2400,14 +2374,14 @@ static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv)
pdata = priv->ucode_data_backup.p_addr;
/* Tell bootstrap uCode where to find image to load */
- iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
- iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
- iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
+ iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
+ iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
+ iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
priv->ucode_data.len);
/* Inst byte count must be last to set up, bit 31 signals uCode
* that all new ptr/size info is in place */
- iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
+ iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
priv->ucode_code.len | BSM_DRAM_INST_LOAD);
IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
@@ -2488,7 +2462,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
goto restart;
}
- rfkill = iwl_read_prph(priv, APMG_RFKILL_REG);
+ rfkill = iwl_legacy_read_prph(priv, APMG_RFKILL_REG);
IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill);
if (rfkill & 0x1) {
@@ -2510,18 +2484,18 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
set_bit(STATUS_ALIVE, &priv->status);
/* Enable watchdog to monitor the driver tx queues */
- iwl_setup_watchdog(priv);
+ iwl_legacy_setup_watchdog(priv);
- if (iwl_is_rfkill(priv))
+ if (iwl_legacy_is_rfkill(priv))
return;
ieee80211_wake_queues(priv->hw);
- priv->active_rate = IWL_RATES_MASK;
+ priv->active_rate = IWL_RATES_MASK_3945;
- iwl_power_update_mode(priv, true);
+ iwl_legacy_power_update_mode(priv, true);
- if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) {
+ if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
struct iwl3945_rxon_cmd *active_rxon =
(struct iwl3945_rxon_cmd *)(&ctx->active);
@@ -2529,21 +2503,20 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
} else {
/* Initialize our rx_config data */
- iwl_connection_init_rx_config(priv, ctx);
+ iwl_legacy_connection_init_rx_config(priv, ctx);
}
/* Configure Bluetooth device coexistence support */
- priv->cfg->ops->hcmd->send_bt_config(priv);
+ iwl_legacy_send_bt_config(priv);
+
+ set_bit(STATUS_READY, &priv->status);
/* Configure the adapter for unassociated operation */
iwl3945_commit_rxon(priv, ctx);
iwl3945_reg_txpower_periodic(priv);
- iwl_leds_init(priv);
-
IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
- set_bit(STATUS_READY, &priv->status);
wake_up_interruptible(&priv->wait_command_queue);
return;
@@ -2561,7 +2534,7 @@ static void __iwl3945_down(struct iwl_priv *priv)
IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
- iwl_scan_cancel_timeout(priv, 200);
+ iwl_legacy_scan_cancel_timeout(priv, 200);
exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
@@ -2570,9 +2543,9 @@ static void __iwl3945_down(struct iwl_priv *priv)
del_timer_sync(&priv->watchdog);
/* Station information will now be cleared in device */
- iwl_clear_ucode_stations(priv, NULL);
- iwl_dealloc_bcast_stations(priv);
- iwl_clear_driver_stations(priv);
+ iwl_legacy_clear_ucode_stations(priv, NULL);
+ iwl_legacy_dealloc_bcast_stations(priv);
+ iwl_legacy_clear_driver_stations(priv);
/* Unblock any waiting calls */
wake_up_interruptible_all(&priv->wait_command_queue);
@@ -2587,16 +2560,16 @@ static void __iwl3945_down(struct iwl_priv *priv)
/* tell the device to stop sending interrupts */
spin_lock_irqsave(&priv->lock, flags);
- iwl_disable_interrupts(priv);
+ iwl_legacy_disable_interrupts(priv);
spin_unlock_irqrestore(&priv->lock, flags);
- iwl_synchronize_irq(priv);
+ iwl3945_synchronize_irq(priv);
if (priv->mac80211_registered)
ieee80211_stop_queues(priv->hw);
/* If we have not previously called iwl3945_init() then
* clear all bits but the RF Kill bits and return */
- if (!iwl_is_init(priv)) {
+ if (!iwl_legacy_is_init(priv)) {
priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
STATUS_RF_KILL_HW |
test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
@@ -2621,11 +2594,11 @@ static void __iwl3945_down(struct iwl_priv *priv)
iwl3945_hw_rxq_stop(priv);
/* Power-down device's busmaster DMA clocks */
- iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+ iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
udelay(5);
/* Stop the device, and put it in low power state */
- iwl_apm_stop(priv);
+ iwl_legacy_apm_stop(priv);
exit:
memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
@@ -2656,7 +2629,8 @@ static int iwl3945_alloc_bcast_station(struct iwl_priv *priv)
u8 sta_id;
spin_lock_irqsave(&priv->sta_lock, flags);
- sta_id = iwl_prep_station(priv, ctx, iwl_bcast_addr, false, NULL);
+ sta_id = iwl_legacy_prep_station(priv, ctx,
+ iwlegacy_bcast_addr, false, NULL);
if (sta_id == IWL_INVALID_STATION) {
IWL_ERR(priv, "Unable to prepare broadcast station\n");
spin_unlock_irqrestore(&priv->sta_lock, flags);
@@ -2714,7 +2688,7 @@ static int __iwl3945_up(struct iwl_priv *priv)
/* clear (again), then enable host interrupts */
iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
- iwl_enable_interrupts(priv);
+ iwl_legacy_enable_interrupts(priv);
/* really make sure rfkill handshake bits are cleared */
iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
@@ -2856,21 +2830,18 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
- if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) {
+ if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
u16 interval = 0;
u32 extra;
u32 suspend_time = 100;
u32 scan_suspend_time = 100;
- unsigned long flags;
IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
- spin_lock_irqsave(&priv->lock, flags);
if (priv->is_internal_short_scan)
interval = 0;
else
interval = vif->bss_conf.beacon_int;
- spin_unlock_irqrestore(&priv->lock, flags);
scan->suspend_time = 0;
scan->max_out_time = cpu_to_le32(200 * 1024);
@@ -2947,7 +2918,7 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
if (!priv->is_internal_short_scan) {
scan->tx_cmd.len = cpu_to_le16(
- iwl_fill_probe_req(priv,
+ iwl_legacy_fill_probe_req(priv,
(struct ieee80211_mgmt *)scan->data,
vif->addr,
priv->scan_request->ie,
@@ -2956,9 +2927,9 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
} else {
/* use bcast addr, will not be transmitted but must be valid */
scan->tx_cmd.len = cpu_to_le16(
- iwl_fill_probe_req(priv,
+ iwl_legacy_fill_probe_req(priv,
(struct ieee80211_mgmt *)scan->data,
- iwl_bcast_addr, NULL, 0,
+ iwlegacy_bcast_addr, NULL, 0,
IWL_MAX_SCAN_SIZE - sizeof(*scan)));
}
/* select Rx antennas */
@@ -2986,7 +2957,7 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
scan->len = cpu_to_le16(cmd.len);
set_bit(STATUS_SCAN_HW, &priv->status);
- ret = iwl_send_cmd_sync(priv, &cmd);
+ ret = iwl_legacy_send_cmd_sync(priv, &cmd);
if (ret)
clear_bit(STATUS_SCAN_HW, &priv->status);
return ret;
@@ -3054,25 +3025,20 @@ void iwl3945_post_associate(struct iwl_priv *priv)
if (!ctx->vif || !priv->is_open)
return;
- if (ctx->vif->type == NL80211_IFTYPE_AP) {
- IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
- return;
- }
-
IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
ctx->vif->bss_conf.aid, ctx->active.bssid_addr);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
- iwl_scan_cancel_timeout(priv, 200);
+ iwl_legacy_scan_cancel_timeout(priv, 200);
- conf = ieee80211_get_hw_conf(priv->hw);
+ conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
iwl3945_commit_rxon(priv, ctx);
- rc = iwl_send_rxon_timing(priv, ctx);
+ rc = iwl_legacy_send_rxon_timing(priv, ctx);
if (rc)
IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
"Attempting to continue.\n");
@@ -3170,8 +3136,6 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw)
* no need to poll the killswitch state anymore */
cancel_delayed_work(&priv->_3945.rfkill_poll);
- iwl_led_start(priv);
-
priv->is_open = 1;
IWL_DEBUG_MAC80211(priv, "leave\n");
return 0;
@@ -3206,7 +3170,7 @@ static void iwl3945_mac_stop(struct ieee80211_hw *hw)
IWL_DEBUG_MAC80211(priv, "leave\n");
}
-static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct iwl_priv *priv = hw->priv;
@@ -3219,7 +3183,6 @@ static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
dev_kfree_skb_any(skb);
IWL_DEBUG_MAC80211(priv, "leave\n");
- return NETDEV_TX_OK;
}
void iwl3945_config_ap(struct iwl_priv *priv)
@@ -3232,14 +3195,14 @@ void iwl3945_config_ap(struct iwl_priv *priv)
return;
/* The following should be done only at AP bring up */
- if (!(iwl_is_associated(priv, IWL_RXON_CTX_BSS))) {
+ if (!(iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))) {
/* RXON - unassoc (to set timing command) */
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
iwl3945_commit_rxon(priv, ctx);
/* RXON Timing */
- rc = iwl_send_rxon_timing(priv, ctx);
+ rc = iwl_legacy_send_rxon_timing(priv, ctx);
if (rc)
IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
"Attempting to continue.\n");
@@ -3266,10 +3229,6 @@ void iwl3945_config_ap(struct iwl_priv *priv)
iwl3945_commit_rxon(priv, ctx);
}
iwl3945_send_beacon_cmd(priv);
-
- /* FIXME - we need to add code here to detect a totally new
- * configuration, reset the AP, unassoc, rxon timing, assoc,
- * clear sta table, add BCAST sta... */
}
static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -3289,17 +3248,25 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -EOPNOTSUPP;
}
- static_key = !iwl_is_associated(priv, IWL_RXON_CTX_BSS);
+ /*
+ * To support IBSS RSN, don't program group keys in IBSS, the
+ * hardware will then not attempt to decrypt the frames.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
+ static_key = !iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
if (!static_key) {
- sta_id = iwl_sta_id_or_broadcast(
+ sta_id = iwl_legacy_sta_id_or_broadcast(
priv, &priv->contexts[IWL_RXON_CTX_BSS], sta);
if (sta_id == IWL_INVALID_STATION)
return -EINVAL;
}
mutex_lock(&priv->mutex);
- iwl_scan_cancel_timeout(priv, 100);
+ iwl_legacy_scan_cancel_timeout(priv, 100);
switch (cmd) {
case SET_KEY:
@@ -3344,7 +3311,8 @@ static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
sta_priv->common.sta_id = IWL_INVALID_STATION;
- ret = iwl_add_station_common(priv, &priv->contexts[IWL_RXON_CTX_BSS],
+ ret = iwl_legacy_add_station_common(priv,
+ &priv->contexts[IWL_RXON_CTX_BSS],
sta->addr, is_ap, sta, &sta_id);
if (ret) {
IWL_ERR(priv, "Unable to add station %pM (%d)\n",
@@ -3405,7 +3373,7 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw,
/*
* Receiving all multicast frames is always enabled by the
- * default flags setup in iwl_connection_init_rx_config()
+ * default flags setup in iwl_legacy_connection_init_rx_config()
* since we currently do not support programming multicast
* filters into the device.
*/
@@ -3420,7 +3388,7 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw,
*
*****************************************************************************/
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
/*
* The following adds a new attribute to the sysfs representation
@@ -3433,13 +3401,13 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw,
* level that is used instead of the global debug level if it (the per
* device debug level) is set.
*/
-static ssize_t show_debug_level(struct device *d,
+static ssize_t iwl3945_show_debug_level(struct device *d,
struct device_attribute *attr, char *buf)
{
struct iwl_priv *priv = dev_get_drvdata(d);
- return sprintf(buf, "0x%08X\n", iwl_get_debug_level(priv));
+ return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
}
-static ssize_t store_debug_level(struct device *d,
+static ssize_t iwl3945_store_debug_level(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -3452,7 +3420,7 @@ static ssize_t store_debug_level(struct device *d,
IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf);
else {
priv->debug_level = val;
- if (iwl_alloc_traffic_mem(priv))
+ if (iwl_legacy_alloc_traffic_mem(priv))
IWL_ERR(priv,
"Not enough memory to generate traffic log\n");
}
@@ -3460,31 +3428,31 @@ static ssize_t store_debug_level(struct device *d,
}
static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
- show_debug_level, store_debug_level);
+ iwl3945_show_debug_level, iwl3945_store_debug_level);
-#endif /* CONFIG_IWLWIFI_DEBUG */
+#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
-static ssize_t show_temperature(struct device *d,
+static ssize_t iwl3945_show_temperature(struct device *d,
struct device_attribute *attr, char *buf)
{
struct iwl_priv *priv = dev_get_drvdata(d);
- if (!iwl_is_alive(priv))
+ if (!iwl_legacy_is_alive(priv))
return -EAGAIN;
return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv));
}
-static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
+static DEVICE_ATTR(temperature, S_IRUGO, iwl3945_show_temperature, NULL);
-static ssize_t show_tx_power(struct device *d,
+static ssize_t iwl3945_show_tx_power(struct device *d,
struct device_attribute *attr, char *buf)
{
struct iwl_priv *priv = dev_get_drvdata(d);
return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
}
-static ssize_t store_tx_power(struct device *d,
+static ssize_t iwl3945_store_tx_power(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -3501,9 +3469,9 @@ static ssize_t store_tx_power(struct device *d,
return count;
}
-static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
+static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, iwl3945_show_tx_power, iwl3945_store_tx_power);
-static ssize_t show_flags(struct device *d,
+static ssize_t iwl3945_show_flags(struct device *d,
struct device_attribute *attr, char *buf)
{
struct iwl_priv *priv = dev_get_drvdata(d);
@@ -3512,7 +3480,7 @@ static ssize_t show_flags(struct device *d,
return sprintf(buf, "0x%04X\n", ctx->active.flags);
}
-static ssize_t store_flags(struct device *d,
+static ssize_t iwl3945_store_flags(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -3523,7 +3491,7 @@ static ssize_t store_flags(struct device *d,
mutex_lock(&priv->mutex);
if (le32_to_cpu(ctx->staging.flags) != flags) {
/* Cancel any currently running scans... */
- if (iwl_scan_cancel_timeout(priv, 100))
+ if (iwl_legacy_scan_cancel_timeout(priv, 100))
IWL_WARN(priv, "Could not cancel scan.\n");
else {
IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n",
@@ -3537,9 +3505,9 @@ static ssize_t store_flags(struct device *d,
return count;
}
-static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags);
+static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, iwl3945_show_flags, iwl3945_store_flags);
-static ssize_t show_filter_flags(struct device *d,
+static ssize_t iwl3945_show_filter_flags(struct device *d,
struct device_attribute *attr, char *buf)
{
struct iwl_priv *priv = dev_get_drvdata(d);
@@ -3549,7 +3517,7 @@ static ssize_t show_filter_flags(struct device *d,
le32_to_cpu(ctx->active.filter_flags));
}
-static ssize_t store_filter_flags(struct device *d,
+static ssize_t iwl3945_store_filter_flags(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -3560,7 +3528,7 @@ static ssize_t store_filter_flags(struct device *d,
mutex_lock(&priv->mutex);
if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
/* Cancel any currently running scans... */
- if (iwl_scan_cancel_timeout(priv, 100))
+ if (iwl_legacy_scan_cancel_timeout(priv, 100))
IWL_WARN(priv, "Could not cancel scan.\n");
else {
IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = "
@@ -3575,10 +3543,10 @@ static ssize_t store_filter_flags(struct device *d,
return count;
}
-static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
- store_filter_flags);
+static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, iwl3945_show_filter_flags,
+ iwl3945_store_filter_flags);
-static ssize_t show_measurement(struct device *d,
+static ssize_t iwl3945_show_measurement(struct device *d,
struct device_attribute *attr, char *buf)
{
struct iwl_priv *priv = dev_get_drvdata(d);
@@ -3610,7 +3578,7 @@ static ssize_t show_measurement(struct device *d,
return len;
}
-static ssize_t store_measurement(struct device *d,
+static ssize_t iwl3945_store_measurement(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -3647,9 +3615,9 @@ static ssize_t store_measurement(struct device *d,
}
static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
- show_measurement, store_measurement);
+ iwl3945_show_measurement, iwl3945_store_measurement);
-static ssize_t store_retry_rate(struct device *d,
+static ssize_t iwl3945_store_retry_rate(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -3662,38 +3630,38 @@ static ssize_t store_retry_rate(struct device *d,
return count;
}
-static ssize_t show_retry_rate(struct device *d,
+static ssize_t iwl3945_show_retry_rate(struct device *d,
struct device_attribute *attr, char *buf)
{
struct iwl_priv *priv = dev_get_drvdata(d);
return sprintf(buf, "%d", priv->retry_rate);
}
-static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate,
- store_retry_rate);
+static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, iwl3945_show_retry_rate,
+ iwl3945_store_retry_rate);
-static ssize_t show_channels(struct device *d,
+static ssize_t iwl3945_show_channels(struct device *d,
struct device_attribute *attr, char *buf)
{
/* all this shit doesn't belong into sysfs anyway */
return 0;
}
-static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
+static DEVICE_ATTR(channels, S_IRUSR, iwl3945_show_channels, NULL);
-static ssize_t show_antenna(struct device *d,
+static ssize_t iwl3945_show_antenna(struct device *d,
struct device_attribute *attr, char *buf)
{
struct iwl_priv *priv = dev_get_drvdata(d);
- if (!iwl_is_alive(priv))
+ if (!iwl_legacy_is_alive(priv))
return -EAGAIN;
return sprintf(buf, "%d\n", iwl3945_mod_params.antenna);
}
-static ssize_t store_antenna(struct device *d,
+static ssize_t iwl3945_store_antenna(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -3718,20 +3686,20 @@ static ssize_t store_antenna(struct device *d,
return count;
}
-static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna);
+static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, iwl3945_show_antenna, iwl3945_store_antenna);
-static ssize_t show_status(struct device *d,
+static ssize_t iwl3945_show_status(struct device *d,
struct device_attribute *attr, char *buf)
{
struct iwl_priv *priv = dev_get_drvdata(d);
- if (!iwl_is_alive(priv))
+ if (!iwl_legacy_is_alive(priv))
return -EAGAIN;
return sprintf(buf, "0x%08x\n", (int)priv->status);
}
-static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
+static DEVICE_ATTR(status, S_IRUGO, iwl3945_show_status, NULL);
-static ssize_t dump_error_log(struct device *d,
+static ssize_t iwl3945_dump_error_log(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -3744,7 +3712,7 @@ static ssize_t dump_error_log(struct device *d,
return strnlen(buf, count);
}
-static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
+static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, iwl3945_dump_error_log);
/*****************************************************************************
*
@@ -3760,18 +3728,17 @@ static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
INIT_WORK(&priv->restart, iwl3945_bg_restart);
INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
- INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll);
- iwl_setup_scan_deferred_work(priv);
+ iwl_legacy_setup_scan_deferred_work(priv);
iwl3945_hw_setup_deferred_work(priv);
init_timer(&priv->watchdog);
priv->watchdog.data = (unsigned long)priv;
- priv->watchdog.function = iwl_bg_watchdog;
+ priv->watchdog.function = iwl_legacy_bg_watchdog;
tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
iwl3945_irq_tasklet, (unsigned long)priv);
@@ -3783,9 +3750,8 @@ static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
cancel_delayed_work_sync(&priv->init_alive_start);
cancel_delayed_work(&priv->alive_start);
- cancel_work_sync(&priv->beacon_update);
- iwl_cancel_scan_deferred_work(priv);
+ iwl_legacy_cancel_scan_deferred_work(priv);
}
static struct attribute *iwl3945_sysfs_entries[] = {
@@ -3799,7 +3765,7 @@ static struct attribute *iwl3945_sysfs_entries[] = {
&dev_attr_status.attr,
&dev_attr_temperature.attr,
&dev_attr_tx_power.attr,
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
&dev_attr_debug_level.attr,
#endif
NULL
@@ -3814,19 +3780,19 @@ struct ieee80211_ops iwl3945_hw_ops = {
.tx = iwl3945_mac_tx,
.start = iwl3945_mac_start,
.stop = iwl3945_mac_stop,
- .add_interface = iwl_mac_add_interface,
- .remove_interface = iwl_mac_remove_interface,
- .change_interface = iwl_mac_change_interface,
+ .add_interface = iwl_legacy_mac_add_interface,
+ .remove_interface = iwl_legacy_mac_remove_interface,
+ .change_interface = iwl_legacy_mac_change_interface,
.config = iwl_legacy_mac_config,
.configure_filter = iwl3945_configure_filter,
.set_key = iwl3945_mac_set_key,
- .conf_tx = iwl_mac_conf_tx,
+ .conf_tx = iwl_legacy_mac_conf_tx,
.reset_tsf = iwl_legacy_mac_reset_tsf,
.bss_info_changed = iwl_legacy_mac_bss_info_changed,
- .hw_scan = iwl_mac_hw_scan,
+ .hw_scan = iwl_legacy_mac_hw_scan,
.sta_add = iwl3945_mac_sta_add,
- .sta_remove = iwl_mac_sta_remove,
- .tx_last_beacon = iwl_mac_tx_last_beacon,
+ .sta_remove = iwl_legacy_mac_sta_remove,
+ .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
};
static int iwl3945_init_drv(struct iwl_priv *priv)
@@ -3868,7 +3834,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
ret = -EINVAL;
goto err;
}
- ret = iwl_init_channel_map(priv);
+ ret = iwl_legacy_init_channel_map(priv);
if (ret) {
IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
goto err;
@@ -3880,7 +3846,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
goto err_free_channel_map;
}
- ret = iwlcore_init_geos(priv);
+ ret = iwl_legacy_init_geos(priv);
if (ret) {
IWL_ERR(priv, "initializing geos failed: %d\n", ret);
goto err_free_channel_map;
@@ -3890,7 +3856,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
return 0;
err_free_channel_map:
- iwl_free_channel_map(priv);
+ iwl_legacy_free_channel_map(priv);
err:
return ret;
}
@@ -3910,15 +3876,12 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_SPECTRUM_MGMT;
- if (!priv->cfg->base_params->broken_powersave)
- hw->flags |= IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
-
hw->wiphy->interface_modes =
priv->contexts[IWL_RXON_CTX_BSS].interface_modes;
hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
- WIPHY_FLAG_DISABLE_BEACON_HINTS;
+ WIPHY_FLAG_DISABLE_BEACON_HINTS |
+ WIPHY_FLAG_IBSS_RSN;
hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
/* we create the 802.11 header and a zero-length SSID element */
@@ -3935,6 +3898,8 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
&priv->bands[IEEE80211_BAND_5GHZ];
+ iwl_legacy_leds_init(priv);
+
ret = ieee80211_register_hw(priv->hw);
if (ret) {
IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
@@ -3960,7 +3925,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
/* mac80211 allocates memory for this device instance, including
* space for this driver's private structure */
- hw = iwl_alloc_all(cfg);
+ hw = iwl_legacy_alloc_all(cfg);
if (hw == NULL) {
pr_err("Can not allocate network device\n");
err = -ENOMEM;
@@ -4000,13 +3965,12 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
iwl3945_hw_ops.hw_scan = NULL;
}
-
IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
priv->cfg = cfg;
priv->pci_dev = pdev;
priv->inta_mask = CSR_INI_SET_MASK;
- if (iwl_alloc_traffic_mem(priv))
+ if (iwl_legacy_alloc_traffic_mem(priv))
IWL_ERR(priv, "Not enough memory to generate traffic log\n");
/***************************
@@ -4070,7 +4034,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
* ********************/
/* Read the EEPROM */
- err = iwl_eeprom_init(priv);
+ err = iwl_legacy_eeprom_init(priv);
if (err) {
IWL_ERR(priv, "Unable to init EEPROM\n");
goto out_iounmap;
@@ -4107,12 +4071,12 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
* ********************/
spin_lock_irqsave(&priv->lock, flags);
- iwl_disable_interrupts(priv);
+ iwl_legacy_disable_interrupts(priv);
spin_unlock_irqrestore(&priv->lock, flags);
pci_enable_msi(priv->pci_dev);
- err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr_ops.isr,
+ err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
IRQF_SHARED, DRV_NAME, priv);
if (err) {
IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
@@ -4125,24 +4089,24 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
goto out_release_irq;
}
- iwl_set_rxon_channel(priv,
+ iwl_legacy_set_rxon_channel(priv,
&priv->bands[IEEE80211_BAND_2GHZ].channels[5],
&priv->contexts[IWL_RXON_CTX_BSS]);
iwl3945_setup_deferred_work(priv);
iwl3945_setup_rx_handlers(priv);
- iwl_power_initialize(priv);
+ iwl_legacy_power_initialize(priv);
/*********************************
* 8. Setup and Register mac80211
* *******************************/
- iwl_enable_interrupts(priv);
+ iwl_legacy_enable_interrupts(priv);
err = iwl3945_setup_mac(priv);
if (err)
goto out_remove_sysfs;
- err = iwl_dbgfs_register(priv, DRV_NAME);
+ err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
if (err)
IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
@@ -4160,12 +4124,12 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
free_irq(priv->pci_dev->irq, priv);
out_disable_msi:
pci_disable_msi(priv->pci_dev);
- iwlcore_free_geos(priv);
- iwl_free_channel_map(priv);
+ iwl_legacy_free_geos(priv);
+ iwl_legacy_free_channel_map(priv);
out_unset_hw_params:
iwl3945_unset_hw_params(priv);
out_eeprom_free:
- iwl_eeprom_free(priv);
+ iwl_legacy_eeprom_free(priv);
out_iounmap:
pci_iounmap(pdev, priv->hw_base);
out_pci_release_regions:
@@ -4174,7 +4138,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
out_ieee80211_free_hw:
- iwl_free_traffic_mem(priv);
+ iwl_legacy_free_traffic_mem(priv);
ieee80211_free_hw(priv->hw);
out:
return err;
@@ -4190,10 +4154,12 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
- iwl_dbgfs_unregister(priv);
+ iwl_legacy_dbgfs_unregister(priv);
set_bit(STATUS_EXIT_PENDING, &priv->status);
+ iwl_legacy_leds_exit(priv);
+
if (priv->mac80211_registered) {
ieee80211_unregister_hw(priv->hw);
priv->mac80211_registered = 0;
@@ -4208,16 +4174,16 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
* paths to avoid running iwl_down() at all before leaving driver.
* This (inexpensive) call *makes sure* device is reset.
*/
- iwl_apm_stop(priv);
+ iwl_legacy_apm_stop(priv);
/* make sure we flush any pending irq or
* tasklet for the driver
*/
spin_lock_irqsave(&priv->lock, flags);
- iwl_disable_interrupts(priv);
+ iwl_legacy_disable_interrupts(priv);
spin_unlock_irqrestore(&priv->lock, flags);
- iwl_synchronize_irq(priv);
+ iwl3945_synchronize_irq(priv);
sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
@@ -4239,7 +4205,7 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
* until now... */
destroy_workqueue(priv->workqueue);
priv->workqueue = NULL;
- iwl_free_traffic_mem(priv);
+ iwl_legacy_free_traffic_mem(priv);
free_irq(pdev->irq, priv);
pci_disable_msi(pdev);
@@ -4249,8 +4215,8 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
- iwl_free_channel_map(priv);
- iwlcore_free_geos(priv);
+ iwl_legacy_free_channel_map(priv);
+ iwl_legacy_free_geos(priv);
kfree(priv->scan_cmd);
if (priv->beacon_skb)
dev_kfree_skb(priv->beacon_skb);
@@ -4270,7 +4236,7 @@ static struct pci_driver iwl3945_driver = {
.id_table = iwl3945_hw_card_ids,
.probe = iwl3945_pci_probe,
.remove = __devexit_p(iwl3945_pci_remove),
- .driver.pm = IWL_PM_OPS,
+ .driver.pm = IWL_LEGACY_PM_OPS,
};
static int __init iwl3945_init(void)
@@ -4311,17 +4277,17 @@ module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO);
MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO);
MODULE_PARM_DESC(swcrypto,
- "using software crypto (default 1 [software])\n");
-#ifdef CONFIG_IWLWIFI_DEBUG
-module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "debug output mask");
-#endif
+ "using software crypto (default 1 [software])");
module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan,
- int, S_IRUGO);
+ int, S_IRUGO);
MODULE_PARM_DESC(disable_hw_scan,
- "disable hardware scanning (default 0) (deprecated)");
-module_param_named(fw_restart3945, iwl3945_mod_params.restart_fw, int, S_IRUGO);
-MODULE_PARM_DESC(fw_restart3945, "restart firmware in case of error");
+ "disable hardware scanning (default 0) (deprecated)");
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "debug output mask");
+#endif
+module_param_named(fw_restart, iwl3945_mod_params.restart_fw, int, S_IRUGO);
+MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
module_exit(iwl3945_exit);
module_init(iwl3945_init);
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
new file mode 100644
index 000000000000..91b3d8b9d7a5
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c
@@ -0,0 +1,3632 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+
+#include <net/mac80211.h>
+
+#include <asm/div64.h>
+
+#define DRV_NAME "iwl4965"
+
+#include "iwl-eeprom.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+#include "iwl-sta.h"
+#include "iwl-4965-calib.h"
+#include "iwl-4965.h"
+#include "iwl-4965-led.h"
+
+
+/******************************************************************************
+ *
+ * module boiler plate
+ *
+ ******************************************************************************/
+
+/*
+ * module name, copyright, version, etc.
+ */
+#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux"
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+#define VD "d"
+#else
+#define VD
+#endif
+
+#define DRV_VERSION IWLWIFI_VERSION VD
+
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("iwl4965");
+
+void iwl4965_update_chain_flags(struct iwl_priv *priv)
+{
+ struct iwl_rxon_context *ctx;
+
+ if (priv->cfg->ops->hcmd->set_rxon_chain) {
+ for_each_context(priv, ctx) {
+ priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+ if (ctx->active.rx_chain != ctx->staging.rx_chain)
+ iwl_legacy_commit_rxon(priv, ctx);
+ }
+ }
+}
+
+static void iwl4965_clear_free_frames(struct iwl_priv *priv)
+{
+ struct list_head *element;
+
+ IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
+ priv->frames_count);
+
+ while (!list_empty(&priv->free_frames)) {
+ element = priv->free_frames.next;
+ list_del(element);
+ kfree(list_entry(element, struct iwl_frame, list));
+ priv->frames_count--;
+ }
+
+ if (priv->frames_count) {
+ IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
+ priv->frames_count);
+ priv->frames_count = 0;
+ }
+}
+
+static struct iwl_frame *iwl4965_get_free_frame(struct iwl_priv *priv)
+{
+ struct iwl_frame *frame;
+ struct list_head *element;
+ if (list_empty(&priv->free_frames)) {
+ frame = kzalloc(sizeof(*frame), GFP_KERNEL);
+ if (!frame) {
+ IWL_ERR(priv, "Could not allocate frame!\n");
+ return NULL;
+ }
+
+ priv->frames_count++;
+ return frame;
+ }
+
+ element = priv->free_frames.next;
+ list_del(element);
+ return list_entry(element, struct iwl_frame, list);
+}
+
+static void iwl4965_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
+{
+ memset(frame, 0, sizeof(*frame));
+ list_add(&frame->list, &priv->free_frames);
+}
+
+static u32 iwl4965_fill_beacon_frame(struct iwl_priv *priv,
+ struct ieee80211_hdr *hdr,
+ int left)
+{
+ lockdep_assert_held(&priv->mutex);
+
+ if (!priv->beacon_skb)
+ return 0;
+
+ if (priv->beacon_skb->len > left)
+ return 0;
+
+ memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
+
+ return priv->beacon_skb->len;
+}
+
+/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
+static void iwl4965_set_beacon_tim(struct iwl_priv *priv,
+ struct iwl_tx_beacon_cmd *tx_beacon_cmd,
+ u8 *beacon, u32 frame_size)
+{
+ u16 tim_idx;
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
+
+ /*
+ * The index is relative to frame start but we start looking at the
+ * variable-length part of the beacon.
+ */
+ tim_idx = mgmt->u.beacon.variable - beacon;
+
+ /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
+ while ((tim_idx < (frame_size - 2)) &&
+ (beacon[tim_idx] != WLAN_EID_TIM))
+ tim_idx += beacon[tim_idx+1] + 2;
+
+ /* If TIM field was found, set variables */
+ if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
+ tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
+ tx_beacon_cmd->tim_size = beacon[tim_idx+1];
+ } else
+ IWL_WARN(priv, "Unable to find TIM Element in beacon\n");
+}
+
+static unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
+ struct iwl_frame *frame)
+{
+ struct iwl_tx_beacon_cmd *tx_beacon_cmd;
+ u32 frame_size;
+ u32 rate_flags;
+ u32 rate;
+ /*
+ * We have to set up the TX command, the TX Beacon command, and the
+ * beacon contents.
+ */
+
+ lockdep_assert_held(&priv->mutex);
+
+ if (!priv->beacon_ctx) {
+ IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
+ return 0;
+ }
+
+ /* Initialize memory */
+ tx_beacon_cmd = &frame->u.beacon;
+ memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
+
+ /* Set up TX beacon contents */
+ frame_size = iwl4965_fill_beacon_frame(priv, tx_beacon_cmd->frame,
+ sizeof(frame->u) - sizeof(*tx_beacon_cmd));
+ if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
+ return 0;
+ if (!frame_size)
+ return 0;
+
+ /* Set up TX command fields */
+ tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
+ tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id;
+ tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+ tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
+ TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
+
+ /* Set up TX beacon command fields */
+ iwl4965_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame,
+ frame_size);
+
+ /* Set up packet rate and flags */
+ rate = iwl_legacy_get_lowest_plcp(priv, priv->beacon_ctx);
+ priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
+ priv->hw_params.valid_tx_ant);
+ rate_flags = iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
+ if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE))
+ rate_flags |= RATE_MCS_CCK_MSK;
+ tx_beacon_cmd->tx.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate,
+ rate_flags);
+
+ return sizeof(*tx_beacon_cmd) + frame_size;
+}
+
+int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
+{
+ struct iwl_frame *frame;
+ unsigned int frame_size;
+ int rc;
+
+ frame = iwl4965_get_free_frame(priv);
+ if (!frame) {
+ IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
+ "command.\n");
+ return -ENOMEM;
+ }
+
+ frame_size = iwl4965_hw_get_beacon_cmd(priv, frame);
+ if (!frame_size) {
+ IWL_ERR(priv, "Error configuring the beacon command\n");
+ iwl4965_free_frame(priv, frame);
+ return -EINVAL;
+ }
+
+ rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
+ &frame->u.cmd[0]);
+
+ iwl4965_free_frame(priv, frame);
+
+ return rc;
+}
+
+static inline dma_addr_t iwl4965_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
+{
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+
+ dma_addr_t addr = get_unaligned_le32(&tb->lo);
+ if (sizeof(dma_addr_t) > sizeof(u32))
+ addr |=
+ ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
+
+ return addr;
+}
+
+static inline u16 iwl4965_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
+{
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+
+ return le16_to_cpu(tb->hi_n_len) >> 4;
+}
+
+static inline void iwl4965_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
+ dma_addr_t addr, u16 len)
+{
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+ u16 hi_n_len = len << 4;
+
+ put_unaligned_le32(addr, &tb->lo);
+ if (sizeof(dma_addr_t) > sizeof(u32))
+ hi_n_len |= ((addr >> 16) >> 16) & 0xF;
+
+ tb->hi_n_len = cpu_to_le16(hi_n_len);
+
+ tfd->num_tbs = idx + 1;
+}
+
+static inline u8 iwl4965_tfd_get_num_tbs(struct iwl_tfd *tfd)
+{
+ return tfd->num_tbs & 0x1f;
+}
+
+/**
+ * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
+ * @priv - driver private data
+ * @txq - tx queue
+ *
+ * Does NOT advance any TFD circular buffer read/write indexes
+ * Does NOT free the TFD itself (which is within circular buffer)
+ */
+void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
+{
+ struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds;
+ struct iwl_tfd *tfd;
+ struct pci_dev *dev = priv->pci_dev;
+ int index = txq->q.read_ptr;
+ int i;
+ int num_tbs;
+
+ tfd = &tfd_tmp[index];
+
+ /* Sanity check on number of chunks */
+ num_tbs = iwl4965_tfd_get_num_tbs(tfd);
+
+ if (num_tbs >= IWL_NUM_OF_TBS) {
+ IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
+ /* @todo issue fatal error, it is quite serious situation */
+ return;
+ }
+
+ /* Unmap tx_cmd */
+ if (num_tbs)
+ pci_unmap_single(dev,
+ dma_unmap_addr(&txq->meta[index], mapping),
+ dma_unmap_len(&txq->meta[index], len),
+ PCI_DMA_BIDIRECTIONAL);
+
+ /* Unmap chunks, if any. */
+ for (i = 1; i < num_tbs; i++)
+ pci_unmap_single(dev, iwl4965_tfd_tb_get_addr(tfd, i),
+ iwl4965_tfd_tb_get_len(tfd, i),
+ PCI_DMA_TODEVICE);
+
+ /* free SKB */
+ if (txq->txb) {
+ struct sk_buff *skb;
+
+ skb = txq->txb[txq->q.read_ptr].skb;
+
+ /* can be called from irqs-disabled context */
+ if (skb) {
+ dev_kfree_skb_any(skb);
+ txq->txb[txq->q.read_ptr].skb = NULL;
+ }
+ }
+}
+
+int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ dma_addr_t addr, u16 len,
+ u8 reset, u8 pad)
+{
+ struct iwl_queue *q;
+ struct iwl_tfd *tfd, *tfd_tmp;
+ u32 num_tbs;
+
+ q = &txq->q;
+ tfd_tmp = (struct iwl_tfd *)txq->tfds;
+ tfd = &tfd_tmp[q->write_ptr];
+
+ if (reset)
+ memset(tfd, 0, sizeof(*tfd));
+
+ num_tbs = iwl4965_tfd_get_num_tbs(tfd);
+
+ /* Each TFD can point to a maximum 20 Tx buffers */
+ if (num_tbs >= IWL_NUM_OF_TBS) {
+ IWL_ERR(priv, "Error can not send more than %d chunks\n",
+ IWL_NUM_OF_TBS);
+ return -EINVAL;
+ }
+
+ BUG_ON(addr & ~DMA_BIT_MASK(36));
+ if (unlikely(addr & ~IWL_TX_DMA_MASK))
+ IWL_ERR(priv, "Unaligned address = %llx\n",
+ (unsigned long long)addr);
+
+ iwl4965_tfd_set_tb(tfd, num_tbs, addr, len);
+
+ return 0;
+}
+
+/*
+ * Tell nic where to find circular buffer of Tx Frame Descriptors for
+ * given Tx queue, and enable the DMA channel used for that queue.
+ *
+ * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
+ * channels supported in hardware.
+ */
+int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq)
+{
+ int txq_id = txq->q.id;
+
+ /* Circular buffer (TFD queue in DRAM) physical base address */
+ iwl_legacy_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
+ txq->q.dma_addr >> 8);
+
+ return 0;
+}
+
+/******************************************************************************
+ *
+ * Generic RX handler implementations
+ *
+ ******************************************************************************/
+static void iwl4965_rx_reply_alive(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_alive_resp *palive;
+ struct delayed_work *pwork;
+
+ palive = &pkt->u.alive_frame;
+
+ IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
+ "0x%01X 0x%01X\n",
+ palive->is_valid, palive->ver_type,
+ palive->ver_subtype);
+
+ if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
+ IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
+ memcpy(&priv->card_alive_init,
+ &pkt->u.alive_frame,
+ sizeof(struct iwl_init_alive_resp));
+ pwork = &priv->init_alive_start;
+ } else {
+ IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
+ memcpy(&priv->card_alive, &pkt->u.alive_frame,
+ sizeof(struct iwl_alive_resp));
+ pwork = &priv->alive_start;
+ }
+
+ /* We delay the ALIVE response by 5ms to
+ * give the HW RF Kill time to activate... */
+ if (palive->is_valid == UCODE_VALID_OK)
+ queue_delayed_work(priv->workqueue, pwork,
+ msecs_to_jiffies(5));
+ else
+ IWL_WARN(priv, "uCode did not respond OK.\n");
+}
+
+/**
+ * iwl4965_bg_statistics_periodic - Timer callback to queue statistics
+ *
+ * This callback is provided in order to send a statistics request.
+ *
+ * This timer function is continually reset to execute within
+ * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
+ * was received. We need to ensure we receive the statistics in order
+ * to update the temperature used for calibrating the TXPOWER.
+ */
+static void iwl4965_bg_statistics_periodic(unsigned long data)
+{
+ struct iwl_priv *priv = (struct iwl_priv *)data;
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ /* dont send host command if rf-kill is on */
+ if (!iwl_legacy_is_ready_rf(priv))
+ return;
+
+ iwl_legacy_send_statistics_request(priv, CMD_ASYNC, false);
+}
+
+
+static void iwl4965_print_cont_event_trace(struct iwl_priv *priv, u32 base,
+ u32 start_idx, u32 num_events,
+ u32 mode)
+{
+ u32 i;
+ u32 ptr; /* SRAM byte address of log data */
+ u32 ev, time, data; /* event log data */
+ unsigned long reg_flags;
+
+ if (mode == 0)
+ ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32));
+ else
+ ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
+
+ /* Make sure device is powered up for SRAM reads */
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ if (iwl_grab_nic_access(priv)) {
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+ return;
+ }
+
+ /* Set starting address; reads will auto-increment */
+ _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
+ rmb();
+
+ /*
+ * "time" is actually "data" for mode 0 (no timestamp).
+ * place event id # at far right for easier visual parsing.
+ */
+ for (i = 0; i < num_events; i++) {
+ ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ if (mode == 0) {
+ trace_iwlwifi_legacy_dev_ucode_cont_event(priv,
+ 0, time, ev);
+ } else {
+ data = _iwl_legacy_read_direct32(priv,
+ HBUS_TARG_MEM_RDAT);
+ trace_iwlwifi_legacy_dev_ucode_cont_event(priv,
+ time, data, ev);
+ }
+ }
+ /* Allow device to power down */
+ iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+
+static void iwl4965_continuous_event_trace(struct iwl_priv *priv)
+{
+ u32 capacity; /* event log capacity in # entries */
+ u32 base; /* SRAM byte address of event log header */
+ u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
+ u32 num_wraps; /* # times uCode wrapped to top of log */
+ u32 next_entry; /* index of next entry to be written by uCode */
+
+ if (priv->ucode_type == UCODE_INIT)
+ base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
+ else
+ base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
+ if (priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
+ capacity = iwl_legacy_read_targ_mem(priv, base);
+ num_wraps = iwl_legacy_read_targ_mem(priv,
+ base + (2 * sizeof(u32)));
+ mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32)));
+ next_entry = iwl_legacy_read_targ_mem(priv,
+ base + (3 * sizeof(u32)));
+ } else
+ return;
+
+ if (num_wraps == priv->event_log.num_wraps) {
+ iwl4965_print_cont_event_trace(priv,
+ base, priv->event_log.next_entry,
+ next_entry - priv->event_log.next_entry,
+ mode);
+ priv->event_log.non_wraps_count++;
+ } else {
+ if ((num_wraps - priv->event_log.num_wraps) > 1)
+ priv->event_log.wraps_more_count++;
+ else
+ priv->event_log.wraps_once_count++;
+ trace_iwlwifi_legacy_dev_ucode_wrap_event(priv,
+ num_wraps - priv->event_log.num_wraps,
+ next_entry, priv->event_log.next_entry);
+ if (next_entry < priv->event_log.next_entry) {
+ iwl4965_print_cont_event_trace(priv, base,
+ priv->event_log.next_entry,
+ capacity - priv->event_log.next_entry,
+ mode);
+
+ iwl4965_print_cont_event_trace(priv, base, 0,
+ next_entry, mode);
+ } else {
+ iwl4965_print_cont_event_trace(priv, base,
+ next_entry, capacity - next_entry,
+ mode);
+
+ iwl4965_print_cont_event_trace(priv, base, 0,
+ next_entry, mode);
+ }
+ }
+ priv->event_log.num_wraps = num_wraps;
+ priv->event_log.next_entry = next_entry;
+}
+
+/**
+ * iwl4965_bg_ucode_trace - Timer callback to log ucode event
+ *
+ * The timer is continually set to execute every
+ * UCODE_TRACE_PERIOD milliseconds after the last timer expired
+ * this function is to perform continuous uCode event logging operation
+ * if enabled
+ */
+static void iwl4965_bg_ucode_trace(unsigned long data)
+{
+ struct iwl_priv *priv = (struct iwl_priv *)data;
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ if (priv->event_log.ucode_trace) {
+ iwl4965_continuous_event_trace(priv);
+ /* Reschedule the timer to occur in UCODE_TRACE_PERIOD */
+ mod_timer(&priv->ucode_trace,
+ jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
+ }
+}
+
+static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl4965_beacon_notif *beacon =
+ (struct iwl4965_beacon_notif *)pkt->u.raw;
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
+
+ IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
+ "tsf %d %d rate %d\n",
+ le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
+ beacon->beacon_notify_hdr.failure_frame,
+ le32_to_cpu(beacon->ibss_mgr_status),
+ le32_to_cpu(beacon->high_tsf),
+ le32_to_cpu(beacon->low_tsf), rate);
+#endif
+
+ priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
+}
+
+static void iwl4965_perform_ct_kill_task(struct iwl_priv *priv)
+{
+ unsigned long flags;
+
+ IWL_DEBUG_POWER(priv, "Stop all queues\n");
+
+ if (priv->mac80211_registered)
+ ieee80211_stop_queues(priv->hw);
+
+ iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
+ CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
+ iwl_read32(priv, CSR_UCODE_DRV_GP1);
+
+ spin_lock_irqsave(&priv->reg_lock, flags);
+ if (!iwl_grab_nic_access(priv))
+ iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->reg_lock, flags);
+}
+
+/* Handle notification from uCode that card's power state is changing
+ * due to software, hardware, or critical temperature RFKILL */
+static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
+ unsigned long status = priv->status;
+
+ IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
+ (flags & HW_CARD_DISABLED) ? "Kill" : "On",
+ (flags & SW_CARD_DISABLED) ? "Kill" : "On",
+ (flags & CT_CARD_DISABLED) ?
+ "Reached" : "Not reached");
+
+ if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
+ CT_CARD_DISABLED)) {
+
+ iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
+ CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
+ HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
+
+ if (!(flags & RXON_CARD_DISABLED)) {
+ iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
+ CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+ iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
+ HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
+ }
+ }
+
+ if (flags & CT_CARD_DISABLED)
+ iwl4965_perform_ct_kill_task(priv);
+
+ if (flags & HW_CARD_DISABLED)
+ set_bit(STATUS_RF_KILL_HW, &priv->status);
+ else
+ clear_bit(STATUS_RF_KILL_HW, &priv->status);
+
+ if (!(flags & RXON_CARD_DISABLED))
+ iwl_legacy_scan_cancel(priv);
+
+ if ((test_bit(STATUS_RF_KILL_HW, &status) !=
+ test_bit(STATUS_RF_KILL_HW, &priv->status)))
+ wiphy_rfkill_set_hw_state(priv->hw->wiphy,
+ test_bit(STATUS_RF_KILL_HW, &priv->status));
+ else
+ wake_up_interruptible(&priv->wait_command_queue);
+}
+
+/**
+ * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks
+ *
+ * Setup the RX handlers for each of the reply types sent from the uCode
+ * to the host.
+ *
+ * This function chains into the hardware specific files for them to setup
+ * any hardware specific handlers as well.
+ */
+static void iwl4965_setup_rx_handlers(struct iwl_priv *priv)
+{
+ priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive;
+ priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
+ priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
+ priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
+ iwl_legacy_rx_spectrum_measure_notif;
+ priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
+ priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
+ iwl_legacy_rx_pm_debug_statistics_notif;
+ priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
+
+ /*
+ * The same handler is used for both the REPLY to a discrete
+ * statistics request from the host as well as for the periodic
+ * statistics notifications (after received beacons) from the uCode.
+ */
+ priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_reply_statistics;
+ priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_rx_statistics;
+
+ iwl_legacy_setup_rx_scan_handlers(priv);
+
+ /* status change handler */
+ priv->rx_handlers[CARD_STATE_NOTIFICATION] =
+ iwl4965_rx_card_state_notif;
+
+ priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
+ iwl4965_rx_missed_beacon_notif;
+ /* Rx handlers */
+ priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
+ priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
+ /* block ack */
+ priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
+ /* Set up hardware specific Rx handlers */
+ priv->cfg->ops->lib->rx_handler_setup(priv);
+}
+
+/**
+ * iwl4965_rx_handle - Main entry function for receiving responses from uCode
+ *
+ * Uses the priv->rx_handlers callback function array to invoke
+ * the appropriate handlers, including command responses,
+ * frame-received notifications, and other notifications.
+ */
+void iwl4965_rx_handle(struct iwl_priv *priv)
+{
+ struct iwl_rx_mem_buffer *rxb;
+ struct iwl_rx_packet *pkt;
+ struct iwl_rx_queue *rxq = &priv->rxq;
+ u32 r, i;
+ int reclaim;
+ unsigned long flags;
+ u8 fill_rx = 0;
+ u32 count = 8;
+ int total_empty;
+
+ /* uCode's read index (stored in shared DRAM) indicates the last Rx
+ * buffer that the driver may process (last buffer filled by ucode). */
+ r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
+ i = rxq->read;
+
+ /* Rx interrupt, but nothing sent from uCode */
+ if (i == r)
+ IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
+
+ /* calculate total frames need to be restock after handling RX */
+ total_empty = r - rxq->write_actual;
+ if (total_empty < 0)
+ total_empty += RX_QUEUE_SIZE;
+
+ if (total_empty > (RX_QUEUE_SIZE / 2))
+ fill_rx = 1;
+
+ while (i != r) {
+ int len;
+
+ rxb = rxq->queue[i];
+
+ /* If an RXB doesn't have a Rx queue slot associated with it,
+ * then a bug has been introduced in the queue refilling
+ * routines -- catch it here */
+ BUG_ON(rxb == NULL);
+
+ rxq->queue[i] = NULL;
+
+ pci_unmap_page(priv->pci_dev, rxb->page_dma,
+ PAGE_SIZE << priv->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ pkt = rxb_addr(rxb);
+
+ len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ len += sizeof(u32); /* account for status word */
+ trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
+
+ /* Reclaim a command buffer only if this packet is a response
+ * to a (driver-originated) command.
+ * If the packet (e.g. Rx frame) originated from uCode,
+ * there is no command buffer to reclaim.
+ * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
+ * but apparently a few don't get set; catch them here. */
+ reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
+ (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
+ (pkt->hdr.cmd != REPLY_RX) &&
+ (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
+ (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
+ (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
+ (pkt->hdr.cmd != REPLY_TX);
+
+ /* Based on type of command response or notification,
+ * handle those that need handling via function in
+ * rx_handlers table. See iwl4965_setup_rx_handlers() */
+ if (priv->rx_handlers[pkt->hdr.cmd]) {
+ IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
+ i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
+ pkt->hdr.cmd);
+ priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
+ priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
+ } else {
+ /* No handling needed */
+ IWL_DEBUG_RX(priv,
+ "r %d i %d No handler needed for %s, 0x%02x\n",
+ r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
+ pkt->hdr.cmd);
+ }
+
+ /*
+ * XXX: After here, we should always check rxb->page
+ * against NULL before touching it or its virtual
+ * memory (pkt). Because some rx_handler might have
+ * already taken or freed the pages.
+ */
+
+ if (reclaim) {
+ /* Invoke any callbacks, transfer the buffer to caller,
+ * and fire off the (possibly) blocking iwl_legacy_send_cmd()
+ * as we reclaim the driver command queue */
+ if (rxb->page)
+ iwl_legacy_tx_cmd_complete(priv, rxb);
+ else
+ IWL_WARN(priv, "Claim null rxb?\n");
+ }
+
+ /* Reuse the page if possible. For notification packets and
+ * SKBs that fail to Rx correctly, add them back into the
+ * rx_free list for reuse later. */
+ spin_lock_irqsave(&rxq->lock, flags);
+ if (rxb->page != NULL) {
+ rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
+ 0, PAGE_SIZE << priv->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ } else
+ list_add_tail(&rxb->list, &rxq->rx_used);
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ i = (i + 1) & RX_QUEUE_MASK;
+ /* If there are a lot of unused frames,
+ * restock the Rx queue so ucode wont assert. */
+ if (fill_rx) {
+ count++;
+ if (count >= 8) {
+ rxq->read = i;
+ iwl4965_rx_replenish_now(priv);
+ count = 0;
+ }
+ }
+ }
+
+ /* Backtrack one entry */
+ rxq->read = i;
+ if (fill_rx)
+ iwl4965_rx_replenish_now(priv);
+ else
+ iwl4965_rx_queue_restock(priv);
+}
+
+/* call this function to flush any scheduled tasklet */
+static inline void iwl4965_synchronize_irq(struct iwl_priv *priv)
+{
+ /* wait to make sure we flush pending tasklet*/
+ synchronize_irq(priv->pci_dev->irq);
+ tasklet_kill(&priv->irq_tasklet);
+}
+
+static void iwl4965_irq_tasklet(struct iwl_priv *priv)
+{
+ u32 inta, handled = 0;
+ u32 inta_fh;
+ unsigned long flags;
+ u32 i;
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ u32 inta_mask;
+#endif
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Ack/clear/reset pending uCode interrupts.
+ * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
+ * and will clear only when CSR_FH_INT_STATUS gets cleared. */
+ inta = iwl_read32(priv, CSR_INT);
+ iwl_write32(priv, CSR_INT, inta);
+
+ /* Ack/clear/reset pending flow-handler (DMA) interrupts.
+ * Any new interrupts that happen after this, either while we're
+ * in this tasklet, or later, will show up in next ISR/tasklet. */
+ inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
+ iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
+ /* just for debug */
+ inta_mask = iwl_read32(priv, CSR_INT_MASK);
+ IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
+ inta, inta_mask, inta_fh);
+ }
+#endif
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
+ * atomic, make sure that inta covers all the interrupts that
+ * we've discovered, even if FH interrupt came in just after
+ * reading CSR_INT. */
+ if (inta_fh & CSR49_FH_INT_RX_MASK)
+ inta |= CSR_INT_BIT_FH_RX;
+ if (inta_fh & CSR49_FH_INT_TX_MASK)
+ inta |= CSR_INT_BIT_FH_TX;
+
+ /* Now service all interrupt bits discovered above. */
+ if (inta & CSR_INT_BIT_HW_ERR) {
+ IWL_ERR(priv, "Hardware error detected. Restarting.\n");
+
+ /* Tell the device to stop sending interrupts */
+ iwl_legacy_disable_interrupts(priv);
+
+ priv->isr_stats.hw++;
+ iwl_legacy_irq_handle_error(priv);
+
+ handled |= CSR_INT_BIT_HW_ERR;
+
+ return;
+ }
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
+ /* NIC fires this, but we don't use it, redundant with WAKEUP */
+ if (inta & CSR_INT_BIT_SCD) {
+ IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
+ "the frame/frames.\n");
+ priv->isr_stats.sch++;
+ }
+
+ /* Alive notification via Rx interrupt will do the real work */
+ if (inta & CSR_INT_BIT_ALIVE) {
+ IWL_DEBUG_ISR(priv, "Alive interrupt\n");
+ priv->isr_stats.alive++;
+ }
+ }
+#endif
+ /* Safely ignore these bits for debug checks below */
+ inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
+
+ /* HW RF KILL switch toggled */
+ if (inta & CSR_INT_BIT_RF_KILL) {
+ int hw_rf_kill = 0;
+ if (!(iwl_read32(priv, CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
+ hw_rf_kill = 1;
+
+ IWL_WARN(priv, "RF_KILL bit toggled to %s.\n",
+ hw_rf_kill ? "disable radio" : "enable radio");
+
+ priv->isr_stats.rfkill++;
+
+ /* driver only loads ucode once setting the interface up.
+ * the driver allows loading the ucode even if the radio
+ * is killed. Hence update the killswitch state here. The
+ * rfkill handler will care about restarting if needed.
+ */
+ if (!test_bit(STATUS_ALIVE, &priv->status)) {
+ if (hw_rf_kill)
+ set_bit(STATUS_RF_KILL_HW, &priv->status);
+ else
+ clear_bit(STATUS_RF_KILL_HW, &priv->status);
+ wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
+ }
+
+ handled |= CSR_INT_BIT_RF_KILL;
+ }
+
+ /* Chip got too hot and stopped itself */
+ if (inta & CSR_INT_BIT_CT_KILL) {
+ IWL_ERR(priv, "Microcode CT kill error detected.\n");
+ priv->isr_stats.ctkill++;
+ handled |= CSR_INT_BIT_CT_KILL;
+ }
+
+ /* Error detected by uCode */
+ if (inta & CSR_INT_BIT_SW_ERR) {
+ IWL_ERR(priv, "Microcode SW error detected. "
+ " Restarting 0x%X.\n", inta);
+ priv->isr_stats.sw++;
+ iwl_legacy_irq_handle_error(priv);
+ handled |= CSR_INT_BIT_SW_ERR;
+ }
+
+ /*
+ * uCode wakes up after power-down sleep.
+ * Tell device about any new tx or host commands enqueued,
+ * and about any Rx buffers made available while asleep.
+ */
+ if (inta & CSR_INT_BIT_WAKEUP) {
+ IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
+ iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
+ for (i = 0; i < priv->hw_params.max_txq_num; i++)
+ iwl_legacy_txq_update_write_ptr(priv, &priv->txq[i]);
+ priv->isr_stats.wakeup++;
+ handled |= CSR_INT_BIT_WAKEUP;
+ }
+
+ /* All uCode command responses, including Tx command responses,
+ * Rx "responses" (frame-received notification), and other
+ * notifications from uCode come through here*/
+ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
+ iwl4965_rx_handle(priv);
+ priv->isr_stats.rx++;
+ handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
+ }
+
+ /* This "Tx" DMA channel is used only for loading uCode */
+ if (inta & CSR_INT_BIT_FH_TX) {
+ IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
+ priv->isr_stats.tx++;
+ handled |= CSR_INT_BIT_FH_TX;
+ /* Wake up uCode load routine, now that load is complete */
+ priv->ucode_write_complete = 1;
+ wake_up_interruptible(&priv->wait_command_queue);
+ }
+
+ if (inta & ~handled) {
+ IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
+ priv->isr_stats.unhandled++;
+ }
+
+ if (inta & ~(priv->inta_mask)) {
+ IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
+ inta & ~priv->inta_mask);
+ IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
+ }
+
+ /* Re-enable all interrupts */
+ /* only Re-enable if diabled by irq */
+ if (test_bit(STATUS_INT_ENABLED, &priv->status))
+ iwl_legacy_enable_interrupts(priv);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
+ inta = iwl_read32(priv, CSR_INT);
+ inta_mask = iwl_read32(priv, CSR_INT_MASK);
+ inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
+ IWL_DEBUG_ISR(priv,
+ "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
+ "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
+ }
+#endif
+}
+
+/*****************************************************************************
+ *
+ * sysfs attributes
+ *
+ *****************************************************************************/
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+
+/*
+ * The following adds a new attribute to the sysfs representation
+ * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
+ * used for controlling the debug level.
+ *
+ * See the level definitions in iwl for details.
+ *
+ * The debug_level being managed using sysfs below is a per device debug
+ * level that is used instead of the global debug level if it (the per
+ * device debug level) is set.
+ */
+static ssize_t iwl4965_show_debug_level(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct iwl_priv *priv = dev_get_drvdata(d);
+ return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
+}
+static ssize_t iwl4965_store_debug_level(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct iwl_priv *priv = dev_get_drvdata(d);
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret)
+ IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf);
+ else {
+ priv->debug_level = val;
+ if (iwl_legacy_alloc_traffic_mem(priv))
+ IWL_ERR(priv,
+ "Not enough memory to generate traffic log\n");
+ }
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
+ iwl4965_show_debug_level, iwl4965_store_debug_level);
+
+
+#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
+
+
+static ssize_t iwl4965_show_temperature(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct iwl_priv *priv = dev_get_drvdata(d);
+
+ if (!iwl_legacy_is_alive(priv))
+ return -EAGAIN;
+
+ return sprintf(buf, "%d\n", priv->temperature);
+}
+
+static DEVICE_ATTR(temperature, S_IRUGO, iwl4965_show_temperature, NULL);
+
+static ssize_t iwl4965_show_tx_power(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct iwl_priv *priv = dev_get_drvdata(d);
+
+ if (!iwl_legacy_is_ready_rf(priv))
+ return sprintf(buf, "off\n");
+ else
+ return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
+}
+
+static ssize_t iwl4965_store_tx_power(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct iwl_priv *priv = dev_get_drvdata(d);
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret)
+ IWL_INFO(priv, "%s is not in decimal form.\n", buf);
+ else {
+ ret = iwl_legacy_set_tx_power(priv, val, false);
+ if (ret)
+ IWL_ERR(priv, "failed setting tx power (0x%d).\n",
+ ret);
+ else
+ ret = count;
+ }
+ return ret;
+}
+
+static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO,
+ iwl4965_show_tx_power, iwl4965_store_tx_power);
+
+static struct attribute *iwl_sysfs_entries[] = {
+ &dev_attr_temperature.attr,
+ &dev_attr_tx_power.attr,
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ &dev_attr_debug_level.attr,
+#endif
+ NULL
+};
+
+static struct attribute_group iwl_attribute_group = {
+ .name = NULL, /* put in device directory */
+ .attrs = iwl_sysfs_entries,
+};
+
+/******************************************************************************
+ *
+ * uCode download functions
+ *
+ ******************************************************************************/
+
+static void iwl4965_dealloc_ucode_pci(struct iwl_priv *priv)
+{
+ iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
+ iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
+ iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
+ iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
+ iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
+ iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
+}
+
+static void iwl4965_nic_start(struct iwl_priv *priv)
+{
+ /* Remove all resets to allow NIC to operate */
+ iwl_write32(priv, CSR_RESET, 0);
+}
+
+static void iwl4965_ucode_callback(const struct firmware *ucode_raw,
+ void *context);
+static int iwl4965_mac_setup_register(struct iwl_priv *priv,
+ u32 max_probe_length);
+
+static int __must_check iwl4965_request_firmware(struct iwl_priv *priv, bool first)
+{
+ const char *name_pre = priv->cfg->fw_name_pre;
+ char tag[8];
+
+ if (first) {
+ priv->fw_index = priv->cfg->ucode_api_max;
+ sprintf(tag, "%d", priv->fw_index);
+ } else {
+ priv->fw_index--;
+ sprintf(tag, "%d", priv->fw_index);
+ }
+
+ if (priv->fw_index < priv->cfg->ucode_api_min) {
+ IWL_ERR(priv, "no suitable firmware found!\n");
+ return -ENOENT;
+ }
+
+ sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
+
+ IWL_DEBUG_INFO(priv, "attempting to load firmware '%s'\n",
+ priv->firmware_name);
+
+ return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name,
+ &priv->pci_dev->dev, GFP_KERNEL, priv,
+ iwl4965_ucode_callback);
+}
+
+struct iwl4965_firmware_pieces {
+ const void *inst, *data, *init, *init_data, *boot;
+ size_t inst_size, data_size, init_size, init_data_size, boot_size;
+};
+
+static int iwl4965_load_firmware(struct iwl_priv *priv,
+ const struct firmware *ucode_raw,
+ struct iwl4965_firmware_pieces *pieces)
+{
+ struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
+ u32 api_ver, hdr_size;
+ const u8 *src;
+
+ priv->ucode_ver = le32_to_cpu(ucode->ver);
+ api_ver = IWL_UCODE_API(priv->ucode_ver);
+
+ switch (api_ver) {
+ default:
+ case 0:
+ case 1:
+ case 2:
+ hdr_size = 24;
+ if (ucode_raw->size < hdr_size) {
+ IWL_ERR(priv, "File size too small!\n");
+ return -EINVAL;
+ }
+ pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
+ pieces->data_size = le32_to_cpu(ucode->v1.data_size);
+ pieces->init_size = le32_to_cpu(ucode->v1.init_size);
+ pieces->init_data_size =
+ le32_to_cpu(ucode->v1.init_data_size);
+ pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
+ src = ucode->v1.data;
+ break;
+ }
+
+ /* Verify size of file vs. image size info in file's header */
+ if (ucode_raw->size != hdr_size + pieces->inst_size +
+ pieces->data_size + pieces->init_size +
+ pieces->init_data_size + pieces->boot_size) {
+
+ IWL_ERR(priv,
+ "uCode file size %d does not match expected size\n",
+ (int)ucode_raw->size);
+ return -EINVAL;
+ }
+
+ pieces->inst = src;
+ src += pieces->inst_size;
+ pieces->data = src;
+ src += pieces->data_size;
+ pieces->init = src;
+ src += pieces->init_size;
+ pieces->init_data = src;
+ src += pieces->init_data_size;
+ pieces->boot = src;
+ src += pieces->boot_size;
+
+ return 0;
+}
+
+/**
+ * iwl4965_ucode_callback - callback when firmware was loaded
+ *
+ * If loaded successfully, copies the firmware into buffers
+ * for the card to fetch (via DMA).
+ */
+static void
+iwl4965_ucode_callback(const struct firmware *ucode_raw, void *context)
+{
+ struct iwl_priv *priv = context;
+ struct iwl_ucode_header *ucode;
+ int err;
+ struct iwl4965_firmware_pieces pieces;
+ const unsigned int api_max = priv->cfg->ucode_api_max;
+ const unsigned int api_min = priv->cfg->ucode_api_min;
+ u32 api_ver;
+
+ u32 max_probe_length = 200;
+ u32 standard_phy_calibration_size =
+ IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
+
+ memset(&pieces, 0, sizeof(pieces));
+
+ if (!ucode_raw) {
+ if (priv->fw_index <= priv->cfg->ucode_api_max)
+ IWL_ERR(priv,
+ "request for firmware file '%s' failed.\n",
+ priv->firmware_name);
+ goto try_again;
+ }
+
+ IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n",
+ priv->firmware_name, ucode_raw->size);
+
+ /* Make sure that we got at least the API version number */
+ if (ucode_raw->size < 4) {
+ IWL_ERR(priv, "File size way too small!\n");
+ goto try_again;
+ }
+
+ /* Data from ucode file: header followed by uCode images */
+ ucode = (struct iwl_ucode_header *)ucode_raw->data;
+
+ err = iwl4965_load_firmware(priv, ucode_raw, &pieces);
+
+ if (err)
+ goto try_again;
+
+ api_ver = IWL_UCODE_API(priv->ucode_ver);
+
+ /*
+ * api_ver should match the api version forming part of the
+ * firmware filename ... but we don't check for that and only rely
+ * on the API version read from firmware header from here on forward
+ */
+ if (api_ver < api_min || api_ver > api_max) {
+ IWL_ERR(priv,
+ "Driver unable to support your firmware API. "
+ "Driver supports v%u, firmware is v%u.\n",
+ api_max, api_ver);
+ goto try_again;
+ }
+
+ if (api_ver != api_max)
+ IWL_ERR(priv,
+ "Firmware has old API version. Expected v%u, "
+ "got v%u. New firmware can be obtained "
+ "from http://www.intellinuxwireless.org.\n",
+ api_max, api_ver);
+
+ IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
+ IWL_UCODE_MAJOR(priv->ucode_ver),
+ IWL_UCODE_MINOR(priv->ucode_ver),
+ IWL_UCODE_API(priv->ucode_ver),
+ IWL_UCODE_SERIAL(priv->ucode_ver));
+
+ snprintf(priv->hw->wiphy->fw_version,
+ sizeof(priv->hw->wiphy->fw_version),
+ "%u.%u.%u.%u",
+ IWL_UCODE_MAJOR(priv->ucode_ver),
+ IWL_UCODE_MINOR(priv->ucode_ver),
+ IWL_UCODE_API(priv->ucode_ver),
+ IWL_UCODE_SERIAL(priv->ucode_ver));
+
+ /*
+ * For any of the failures below (before allocating pci memory)
+ * we will try to load a version with a smaller API -- maybe the
+ * user just got a corrupted version of the latest API.
+ */
+
+ IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
+ priv->ucode_ver);
+ IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n",
+ pieces.inst_size);
+ IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n",
+ pieces.data_size);
+ IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n",
+ pieces.init_size);
+ IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n",
+ pieces.init_data_size);
+ IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %Zd\n",
+ pieces.boot_size);
+
+ /* Verify that uCode images will fit in card's SRAM */
+ if (pieces.inst_size > priv->hw_params.max_inst_size) {
+ IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n",
+ pieces.inst_size);
+ goto try_again;
+ }
+
+ if (pieces.data_size > priv->hw_params.max_data_size) {
+ IWL_ERR(priv, "uCode data len %Zd too large to fit in\n",
+ pieces.data_size);
+ goto try_again;
+ }
+
+ if (pieces.init_size > priv->hw_params.max_inst_size) {
+ IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n",
+ pieces.init_size);
+ goto try_again;
+ }
+
+ if (pieces.init_data_size > priv->hw_params.max_data_size) {
+ IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n",
+ pieces.init_data_size);
+ goto try_again;
+ }
+
+ if (pieces.boot_size > priv->hw_params.max_bsm_size) {
+ IWL_ERR(priv, "uCode boot instr len %Zd too large to fit in\n",
+ pieces.boot_size);
+ goto try_again;
+ }
+
+ /* Allocate ucode buffers for card's bus-master loading ... */
+
+ /* Runtime instructions and 2 copies of data:
+ * 1) unmodified from disk
+ * 2) backup cache for save/restore during power-downs */
+ priv->ucode_code.len = pieces.inst_size;
+ iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
+
+ priv->ucode_data.len = pieces.data_size;
+ iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
+
+ priv->ucode_data_backup.len = pieces.data_size;
+ iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
+
+ if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
+ !priv->ucode_data_backup.v_addr)
+ goto err_pci_alloc;
+
+ /* Initialization instructions and data */
+ if (pieces.init_size && pieces.init_data_size) {
+ priv->ucode_init.len = pieces.init_size;
+ iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
+
+ priv->ucode_init_data.len = pieces.init_data_size;
+ iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
+
+ if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
+ goto err_pci_alloc;
+ }
+
+ /* Bootstrap (instructions only, no data) */
+ if (pieces.boot_size) {
+ priv->ucode_boot.len = pieces.boot_size;
+ iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
+
+ if (!priv->ucode_boot.v_addr)
+ goto err_pci_alloc;
+ }
+
+ /* Now that we can no longer fail, copy information */
+
+ priv->sta_key_max_num = STA_KEY_MAX_NUM;
+
+ /* Copy images into buffers for card's bus-master reads ... */
+
+ /* Runtime instructions (first block of data in file) */
+ IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n",
+ pieces.inst_size);
+ memcpy(priv->ucode_code.v_addr, pieces.inst, pieces.inst_size);
+
+ IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
+ priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
+
+ /*
+ * Runtime data
+ * NOTE: Copy into backup buffer will be done in iwl_up()
+ */
+ IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n",
+ pieces.data_size);
+ memcpy(priv->ucode_data.v_addr, pieces.data, pieces.data_size);
+ memcpy(priv->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
+
+ /* Initialization instructions */
+ if (pieces.init_size) {
+ IWL_DEBUG_INFO(priv,
+ "Copying (but not loading) init instr len %Zd\n",
+ pieces.init_size);
+ memcpy(priv->ucode_init.v_addr, pieces.init, pieces.init_size);
+ }
+
+ /* Initialization data */
+ if (pieces.init_data_size) {
+ IWL_DEBUG_INFO(priv,
+ "Copying (but not loading) init data len %Zd\n",
+ pieces.init_data_size);
+ memcpy(priv->ucode_init_data.v_addr, pieces.init_data,
+ pieces.init_data_size);
+ }
+
+ /* Bootstrap instructions */
+ IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n",
+ pieces.boot_size);
+ memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
+
+ /*
+ * figure out the offset of chain noise reset and gain commands
+ * base on the size of standard phy calibration commands table size
+ */
+ priv->_4965.phy_calib_chain_noise_reset_cmd =
+ standard_phy_calibration_size;
+ priv->_4965.phy_calib_chain_noise_gain_cmd =
+ standard_phy_calibration_size + 1;
+
+ /**************************************************
+ * This is still part of probe() in a sense...
+ *
+ * 9. Setup and register with mac80211 and debugfs
+ **************************************************/
+ err = iwl4965_mac_setup_register(priv, max_probe_length);
+ if (err)
+ goto out_unbind;
+
+ err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
+ if (err)
+ IWL_ERR(priv,
+ "failed to create debugfs files. Ignoring error: %d\n", err);
+
+ err = sysfs_create_group(&priv->pci_dev->dev.kobj,
+ &iwl_attribute_group);
+ if (err) {
+ IWL_ERR(priv, "failed to create sysfs device attributes\n");
+ goto out_unbind;
+ }
+
+ /* We have our copies now, allow OS release its copies */
+ release_firmware(ucode_raw);
+ complete(&priv->_4965.firmware_loading_complete);
+ return;
+
+ try_again:
+ /* try next, if any */
+ if (iwl4965_request_firmware(priv, false))
+ goto out_unbind;
+ release_firmware(ucode_raw);
+ return;
+
+ err_pci_alloc:
+ IWL_ERR(priv, "failed to allocate pci memory\n");
+ iwl4965_dealloc_ucode_pci(priv);
+ out_unbind:
+ complete(&priv->_4965.firmware_loading_complete);
+ device_release_driver(&priv->pci_dev->dev);
+ release_firmware(ucode_raw);
+}
+
+static const char * const desc_lookup_text[] = {
+ "OK",
+ "FAIL",
+ "BAD_PARAM",
+ "BAD_CHECKSUM",
+ "NMI_INTERRUPT_WDG",
+ "SYSASSERT",
+ "FATAL_ERROR",
+ "BAD_COMMAND",
+ "HW_ERROR_TUNE_LOCK",
+ "HW_ERROR_TEMPERATURE",
+ "ILLEGAL_CHAN_FREQ",
+ "VCC_NOT_STABLE",
+ "FH_ERROR",
+ "NMI_INTERRUPT_HOST",
+ "NMI_INTERRUPT_ACTION_PT",
+ "NMI_INTERRUPT_UNKNOWN",
+ "UCODE_VERSION_MISMATCH",
+ "HW_ERROR_ABS_LOCK",
+ "HW_ERROR_CAL_LOCK_FAIL",
+ "NMI_INTERRUPT_INST_ACTION_PT",
+ "NMI_INTERRUPT_DATA_ACTION_PT",
+ "NMI_TRM_HW_ER",
+ "NMI_INTERRUPT_TRM",
+ "NMI_INTERRUPT_BREAK_POINT"
+ "DEBUG_0",
+ "DEBUG_1",
+ "DEBUG_2",
+ "DEBUG_3",
+};
+
+static struct { char *name; u8 num; } advanced_lookup[] = {
+ { "NMI_INTERRUPT_WDG", 0x34 },
+ { "SYSASSERT", 0x35 },
+ { "UCODE_VERSION_MISMATCH", 0x37 },
+ { "BAD_COMMAND", 0x38 },
+ { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
+ { "FATAL_ERROR", 0x3D },
+ { "NMI_TRM_HW_ERR", 0x46 },
+ { "NMI_INTERRUPT_TRM", 0x4C },
+ { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
+ { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
+ { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
+ { "NMI_INTERRUPT_HOST", 0x66 },
+ { "NMI_INTERRUPT_ACTION_PT", 0x7C },
+ { "NMI_INTERRUPT_UNKNOWN", 0x84 },
+ { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
+ { "ADVANCED_SYSASSERT", 0 },
+};
+
+static const char *iwl4965_desc_lookup(u32 num)
+{
+ int i;
+ int max = ARRAY_SIZE(desc_lookup_text);
+
+ if (num < max)
+ return desc_lookup_text[num];
+
+ max = ARRAY_SIZE(advanced_lookup) - 1;
+ for (i = 0; i < max; i++) {
+ if (advanced_lookup[i].num == num)
+ break;
+ }
+ return advanced_lookup[i].name;
+}
+
+#define ERROR_START_OFFSET (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE (7 * sizeof(u32))
+
+void iwl4965_dump_nic_error_log(struct iwl_priv *priv)
+{
+ u32 data2, line;
+ u32 desc, time, count, base, data1;
+ u32 blink1, blink2, ilink1, ilink2;
+ u32 pc, hcmd;
+
+ if (priv->ucode_type == UCODE_INIT) {
+ base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
+ } else {
+ base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
+ }
+
+ if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
+ IWL_ERR(priv,
+ "Not valid error log pointer 0x%08X for %s uCode\n",
+ base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
+ return;
+ }
+
+ count = iwl_legacy_read_targ_mem(priv, base);
+
+ if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
+ IWL_ERR(priv, "Start IWL Error Log Dump:\n");
+ IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
+ priv->status, count);
+ }
+
+ desc = iwl_legacy_read_targ_mem(priv, base + 1 * sizeof(u32));
+ priv->isr_stats.err_code = desc;
+ pc = iwl_legacy_read_targ_mem(priv, base + 2 * sizeof(u32));
+ blink1 = iwl_legacy_read_targ_mem(priv, base + 3 * sizeof(u32));
+ blink2 = iwl_legacy_read_targ_mem(priv, base + 4 * sizeof(u32));
+ ilink1 = iwl_legacy_read_targ_mem(priv, base + 5 * sizeof(u32));
+ ilink2 = iwl_legacy_read_targ_mem(priv, base + 6 * sizeof(u32));
+ data1 = iwl_legacy_read_targ_mem(priv, base + 7 * sizeof(u32));
+ data2 = iwl_legacy_read_targ_mem(priv, base + 8 * sizeof(u32));
+ line = iwl_legacy_read_targ_mem(priv, base + 9 * sizeof(u32));
+ time = iwl_legacy_read_targ_mem(priv, base + 11 * sizeof(u32));
+ hcmd = iwl_legacy_read_targ_mem(priv, base + 22 * sizeof(u32));
+
+ trace_iwlwifi_legacy_dev_ucode_error(priv, desc,
+ time, data1, data2, line,
+ blink1, blink2, ilink1, ilink2);
+
+ IWL_ERR(priv, "Desc Time "
+ "data1 data2 line\n");
+ IWL_ERR(priv, "%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
+ iwl4965_desc_lookup(desc), desc, time, data1, data2, line);
+ IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n");
+ IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n",
+ pc, blink1, blink2, ilink1, ilink2, hcmd);
+}
+
+#define EVENT_START_OFFSET (4 * sizeof(u32))
+
+/**
+ * iwl4965_print_event_log - Dump error event log to syslog
+ *
+ */
+static int iwl4965_print_event_log(struct iwl_priv *priv, u32 start_idx,
+ u32 num_events, u32 mode,
+ int pos, char **buf, size_t bufsz)
+{
+ u32 i;
+ u32 base; /* SRAM byte address of event log header */
+ u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
+ u32 ptr; /* SRAM byte address of log data */
+ u32 ev, time, data; /* event log data */
+ unsigned long reg_flags;
+
+ if (num_events == 0)
+ return pos;
+
+ if (priv->ucode_type == UCODE_INIT) {
+ base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
+ } else {
+ base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
+ }
+
+ if (mode == 0)
+ event_size = 2 * sizeof(u32);
+ else
+ event_size = 3 * sizeof(u32);
+
+ ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
+
+ /* Make sure device is powered up for SRAM reads */
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ iwl_grab_nic_access(priv);
+
+ /* Set starting address; reads will auto-increment */
+ _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
+ rmb();
+
+ /* "time" is actually "data" for mode 0 (no timestamp).
+ * place event id # at far right for easier visual parsing. */
+ for (i = 0; i < num_events; i++) {
+ ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ if (mode == 0) {
+ /* data, ev */
+ if (bufsz) {
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "EVT_LOG:0x%08x:%04u\n",
+ time, ev);
+ } else {
+ trace_iwlwifi_legacy_dev_ucode_event(priv, 0,
+ time, ev);
+ IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n",
+ time, ev);
+ }
+ } else {
+ data = _iwl_legacy_read_direct32(priv,
+ HBUS_TARG_MEM_RDAT);
+ if (bufsz) {
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "EVT_LOGT:%010u:0x%08x:%04u\n",
+ time, data, ev);
+ } else {
+ IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
+ time, data, ev);
+ trace_iwlwifi_legacy_dev_ucode_event(priv, time,
+ data, ev);
+ }
+ }
+ }
+
+ /* Allow device to power down */
+ iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+ return pos;
+}
+
+/**
+ * iwl4965_print_last_event_logs - Dump the newest # of event log to syslog
+ */
+static int iwl4965_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
+ u32 num_wraps, u32 next_entry,
+ u32 size, u32 mode,
+ int pos, char **buf, size_t bufsz)
+{
+ /*
+ * display the newest DEFAULT_LOG_ENTRIES entries
+ * i.e the entries just before the next ont that uCode would fill.
+ */
+ if (num_wraps) {
+ if (next_entry < size) {
+ pos = iwl4965_print_event_log(priv,
+ capacity - (size - next_entry),
+ size - next_entry, mode,
+ pos, buf, bufsz);
+ pos = iwl4965_print_event_log(priv, 0,
+ next_entry, mode,
+ pos, buf, bufsz);
+ } else
+ pos = iwl4965_print_event_log(priv, next_entry - size,
+ size, mode, pos, buf, bufsz);
+ } else {
+ if (next_entry < size) {
+ pos = iwl4965_print_event_log(priv, 0, next_entry,
+ mode, pos, buf, bufsz);
+ } else {
+ pos = iwl4965_print_event_log(priv, next_entry - size,
+ size, mode, pos, buf, bufsz);
+ }
+ }
+ return pos;
+}
+
+#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
+
+int iwl4965_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
+ char **buf, bool display)
+{
+ u32 base; /* SRAM byte address of event log header */
+ u32 capacity; /* event log capacity in # entries */
+ u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
+ u32 num_wraps; /* # times uCode wrapped to top of log */
+ u32 next_entry; /* index of next entry to be written by uCode */
+ u32 size; /* # entries that we'll print */
+ int pos = 0;
+ size_t bufsz = 0;
+
+ if (priv->ucode_type == UCODE_INIT) {
+ base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
+ } else {
+ base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
+ }
+
+ if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
+ IWL_ERR(priv,
+ "Invalid event log pointer 0x%08X for %s uCode\n",
+ base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
+ return -EINVAL;
+ }
+
+ /* event log header */
+ capacity = iwl_legacy_read_targ_mem(priv, base);
+ mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32)));
+ num_wraps = iwl_legacy_read_targ_mem(priv, base + (2 * sizeof(u32)));
+ next_entry = iwl_legacy_read_targ_mem(priv, base + (3 * sizeof(u32)));
+
+ size = num_wraps ? capacity : next_entry;
+
+ /* bail out if nothing in log */
+ if (size == 0) {
+ IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
+ return pos;
+ }
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ if (!(iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
+ size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
+ ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
+#else
+ size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
+ ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
+#endif
+ IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n",
+ size);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ if (display) {
+ if (full_log)
+ bufsz = capacity * 48;
+ else
+ bufsz = size * 48;
+ *buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!*buf)
+ return -ENOMEM;
+ }
+ if ((iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
+ /*
+ * if uCode has wrapped back to top of log,
+ * start at the oldest entry,
+ * i.e the next one that uCode would fill.
+ */
+ if (num_wraps)
+ pos = iwl4965_print_event_log(priv, next_entry,
+ capacity - next_entry, mode,
+ pos, buf, bufsz);
+ /* (then/else) start at top of log */
+ pos = iwl4965_print_event_log(priv, 0,
+ next_entry, mode, pos, buf, bufsz);
+ } else
+ pos = iwl4965_print_last_event_logs(priv, capacity, num_wraps,
+ next_entry, size, mode,
+ pos, buf, bufsz);
+#else
+ pos = iwl4965_print_last_event_logs(priv, capacity, num_wraps,
+ next_entry, size, mode,
+ pos, buf, bufsz);
+#endif
+ return pos;
+}
+
+static void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
+{
+ struct iwl_ct_kill_config cmd;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
+ CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ cmd.critical_temperature_R =
+ cpu_to_le32(priv->hw_params.ct_kill_threshold);
+
+ ret = iwl_legacy_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
+ sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
+ else
+ IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
+ "succeeded, "
+ "critical temperature is %d\n",
+ priv->hw_params.ct_kill_threshold);
+}
+
+static const s8 default_queue_to_tx_fifo[] = {
+ IWL_TX_FIFO_VO,
+ IWL_TX_FIFO_VI,
+ IWL_TX_FIFO_BE,
+ IWL_TX_FIFO_BK,
+ IWL49_CMD_FIFO_NUM,
+ IWL_TX_FIFO_UNUSED,
+ IWL_TX_FIFO_UNUSED,
+};
+
+static int iwl4965_alive_notify(struct iwl_priv *priv)
+{
+ u32 a;
+ unsigned long flags;
+ int i, chan;
+ u32 reg_val;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Clear 4965's internal Tx Scheduler data base */
+ priv->scd_base_addr = iwl_legacy_read_prph(priv,
+ IWL49_SCD_SRAM_BASE_ADDR);
+ a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
+ for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
+ iwl_legacy_write_targ_mem(priv, a, 0);
+ for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
+ iwl_legacy_write_targ_mem(priv, a, 0);
+ for (; a < priv->scd_base_addr +
+ IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
+ iwl_legacy_write_targ_mem(priv, a, 0);
+
+ /* Tel 4965 where to find Tx byte count tables */
+ iwl_legacy_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
+ priv->scd_bc_tbls.dma >> 10);
+
+ /* Enable DMA channel */
+ for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
+ iwl_legacy_write_direct32(priv,
+ FH_TCSR_CHNL_TX_CONFIG_REG(chan),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
+
+ /* Update FH chicken bits */
+ reg_val = iwl_legacy_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
+ iwl_legacy_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
+ reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
+
+ /* Disable chain mode for all queues */
+ iwl_legacy_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
+
+ /* Initialize each Tx queue (including the command queue) */
+ for (i = 0; i < priv->hw_params.max_txq_num; i++) {
+
+ /* TFD circular buffer read/write indexes */
+ iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
+ iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
+
+ /* Max Tx Window size for Scheduler-ACK mode */
+ iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
+ IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
+ (SCD_WIN_SIZE <<
+ IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
+ IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
+
+ /* Frame limit */
+ iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
+ IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
+ sizeof(u32),
+ (SCD_FRAME_LIMIT <<
+ IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+ IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
+
+ }
+ iwl_legacy_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
+ (1 << priv->hw_params.max_txq_num) - 1);
+
+ /* Activate all Tx DMA/FIFO channels */
+ iwl4965_txq_set_sched(priv, IWL_MASK(0, 6));
+
+ iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0);
+
+ /* make sure all queue are not stopped */
+ memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
+ for (i = 0; i < 4; i++)
+ atomic_set(&priv->queue_stop_count[i], 0);
+
+ /* reset to 0 to enable all the queue first */
+ priv->txq_ctx_active_msk = 0;
+ /* Map each Tx/cmd queue to its corresponding fifo */
+ BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
+
+ for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
+ int ac = default_queue_to_tx_fifo[i];
+
+ iwl_txq_ctx_activate(priv, i);
+
+ if (ac == IWL_TX_FIFO_UNUSED)
+ continue;
+
+ iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+/**
+ * iwl4965_alive_start - called after REPLY_ALIVE notification received
+ * from protocol/runtime uCode (initialization uCode's
+ * Alive gets handled by iwl_init_alive_start()).
+ */
+static void iwl4965_alive_start(struct iwl_priv *priv)
+{
+ int ret = 0;
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+ IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
+
+ if (priv->card_alive.is_valid != UCODE_VALID_OK) {
+ /* We had an error bringing up the hardware, so take it
+ * all the way back down so we can try again */
+ IWL_DEBUG_INFO(priv, "Alive failed.\n");
+ goto restart;
+ }
+
+ /* Initialize uCode has loaded Runtime uCode ... verify inst image.
+ * This is a paranoid check, because we would not have gotten the
+ * "runtime" alive if code weren't properly loaded. */
+ if (iwl4965_verify_ucode(priv)) {
+ /* Runtime instruction load was bad;
+ * take it all the way back down so we can try again */
+ IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
+ goto restart;
+ }
+
+ ret = iwl4965_alive_notify(priv);
+ if (ret) {
+ IWL_WARN(priv,
+ "Could not complete ALIVE transition [ntf]: %d\n", ret);
+ goto restart;
+ }
+
+
+ /* After the ALIVE response, we can send host commands to the uCode */
+ set_bit(STATUS_ALIVE, &priv->status);
+
+ /* Enable watchdog to monitor the driver tx queues */
+ iwl_legacy_setup_watchdog(priv);
+
+ if (iwl_legacy_is_rfkill(priv))
+ return;
+
+ ieee80211_wake_queues(priv->hw);
+
+ priv->active_rate = IWL_RATES_MASK;
+
+ if (iwl_legacy_is_associated_ctx(ctx)) {
+ struct iwl_legacy_rxon_cmd *active_rxon =
+ (struct iwl_legacy_rxon_cmd *)&ctx->active;
+ /* apply any changes in staging */
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+ active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ } else {
+ struct iwl_rxon_context *tmp;
+ /* Initialize our rx_config data */
+ for_each_context(priv, tmp)
+ iwl_legacy_connection_init_rx_config(priv, tmp);
+
+ if (priv->cfg->ops->hcmd->set_rxon_chain)
+ priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+ }
+
+ /* Configure bluetooth coexistence if enabled */
+ iwl_legacy_send_bt_config(priv);
+
+ iwl4965_reset_run_time_calib(priv);
+
+ set_bit(STATUS_READY, &priv->status);
+
+ /* Configure the adapter for unassociated operation */
+ iwl_legacy_commit_rxon(priv, ctx);
+
+ /* At this point, the NIC is initialized and operational */
+ iwl4965_rf_kill_ct_config(priv);
+
+ IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
+ wake_up_interruptible(&priv->wait_command_queue);
+
+ iwl_legacy_power_update_mode(priv, true);
+ IWL_DEBUG_INFO(priv, "Updated power mode\n");
+
+ return;
+
+ restart:
+ queue_work(priv->workqueue, &priv->restart);
+}
+
+static void iwl4965_cancel_deferred_work(struct iwl_priv *priv);
+
+static void __iwl4965_down(struct iwl_priv *priv)
+{
+ unsigned long flags;
+ int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
+
+ IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
+
+ iwl_legacy_scan_cancel_timeout(priv, 200);
+
+ exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
+
+ /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
+ * to prevent rearm timer */
+ del_timer_sync(&priv->watchdog);
+
+ iwl_legacy_clear_ucode_stations(priv, NULL);
+ iwl_legacy_dealloc_bcast_stations(priv);
+ iwl_legacy_clear_driver_stations(priv);
+
+ /* Unblock any waiting calls */
+ wake_up_interruptible_all(&priv->wait_command_queue);
+
+ /* Wipe out the EXIT_PENDING status bit if we are not actually
+ * exiting the module */
+ if (!exit_pending)
+ clear_bit(STATUS_EXIT_PENDING, &priv->status);
+
+ /* stop and reset the on-board processor */
+ iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+ /* tell the device to stop sending interrupts */
+ spin_lock_irqsave(&priv->lock, flags);
+ iwl_legacy_disable_interrupts(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ iwl4965_synchronize_irq(priv);
+
+ if (priv->mac80211_registered)
+ ieee80211_stop_queues(priv->hw);
+
+ /* If we have not previously called iwl_init() then
+ * clear all bits but the RF Kill bit and return */
+ if (!iwl_legacy_is_init(priv)) {
+ priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
+ STATUS_RF_KILL_HW |
+ test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
+ STATUS_GEO_CONFIGURED |
+ test_bit(STATUS_EXIT_PENDING, &priv->status) <<
+ STATUS_EXIT_PENDING;
+ goto exit;
+ }
+
+ /* ...otherwise clear out all the status bits but the RF Kill
+ * bit and continue taking the NIC down. */
+ priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
+ STATUS_RF_KILL_HW |
+ test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
+ STATUS_GEO_CONFIGURED |
+ test_bit(STATUS_FW_ERROR, &priv->status) <<
+ STATUS_FW_ERROR |
+ test_bit(STATUS_EXIT_PENDING, &priv->status) <<
+ STATUS_EXIT_PENDING;
+
+ iwl4965_txq_ctx_stop(priv);
+ iwl4965_rxq_stop(priv);
+
+ /* Power-down device's busmaster DMA clocks */
+ iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(5);
+
+ /* Make sure (redundant) we've released our request to stay awake */
+ iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+ /* Stop the device, and put it in low power state */
+ iwl_legacy_apm_stop(priv);
+
+ exit:
+ memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
+
+ dev_kfree_skb(priv->beacon_skb);
+ priv->beacon_skb = NULL;
+
+ /* clear out any free frames */
+ iwl4965_clear_free_frames(priv);
+}
+
+static void iwl4965_down(struct iwl_priv *priv)
+{
+ mutex_lock(&priv->mutex);
+ __iwl4965_down(priv);
+ mutex_unlock(&priv->mutex);
+
+ iwl4965_cancel_deferred_work(priv);
+}
+
+#define HW_READY_TIMEOUT (50)
+
+static int iwl4965_set_hw_ready(struct iwl_priv *priv)
+{
+ int ret = 0;
+
+ iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
+
+ /* See if we got it */
+ ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ HW_READY_TIMEOUT);
+ if (ret != -ETIMEDOUT)
+ priv->hw_ready = true;
+ else
+ priv->hw_ready = false;
+
+ IWL_DEBUG_INFO(priv, "hardware %s\n",
+ (priv->hw_ready == 1) ? "ready" : "not ready");
+ return ret;
+}
+
+static int iwl4965_prepare_card_hw(struct iwl_priv *priv)
+{
+ int ret = 0;
+
+ IWL_DEBUG_INFO(priv, "iwl4965_prepare_card_hw enter\n");
+
+ ret = iwl4965_set_hw_ready(priv);
+ if (priv->hw_ready)
+ return ret;
+
+ /* If HW is not ready, prepare the conditions to check again */
+ iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_PREPARE);
+
+ ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
+ ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
+
+ /* HW should be ready by now, check again. */
+ if (ret != -ETIMEDOUT)
+ iwl4965_set_hw_ready(priv);
+
+ return ret;
+}
+
+#define MAX_HW_RESTARTS 5
+
+static int __iwl4965_up(struct iwl_priv *priv)
+{
+ struct iwl_rxon_context *ctx;
+ int i;
+ int ret;
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
+ IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
+ return -EIO;
+ }
+
+ if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
+ IWL_ERR(priv, "ucode not available for device bringup\n");
+ return -EIO;
+ }
+
+ for_each_context(priv, ctx) {
+ ret = iwl4965_alloc_bcast_station(priv, ctx);
+ if (ret) {
+ iwl_legacy_dealloc_bcast_stations(priv);
+ return ret;
+ }
+ }
+
+ iwl4965_prepare_card_hw(priv);
+
+ if (!priv->hw_ready) {
+ IWL_WARN(priv, "Exit HW not ready\n");
+ return -EIO;
+ }
+
+ /* If platform's RF_KILL switch is NOT set to KILL */
+ if (iwl_read32(priv,
+ CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+ clear_bit(STATUS_RF_KILL_HW, &priv->status);
+ else
+ set_bit(STATUS_RF_KILL_HW, &priv->status);
+
+ if (iwl_legacy_is_rfkill(priv)) {
+ wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
+
+ iwl_legacy_enable_interrupts(priv);
+ IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
+ return 0;
+ }
+
+ iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
+
+ /* must be initialised before iwl_hw_nic_init */
+ priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
+
+ ret = iwl4965_hw_nic_init(priv);
+ if (ret) {
+ IWL_ERR(priv, "Unable to init nic\n");
+ return ret;
+ }
+
+ /* make sure rfkill handshake bits are cleared */
+ iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
+ CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ /* clear (again), then enable host interrupts */
+ iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
+ iwl_legacy_enable_interrupts(priv);
+
+ /* really make sure rfkill handshake bits are cleared */
+ iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+
+ /* Copy original ucode data image from disk into backup cache.
+ * This will be used to initialize the on-board processor's
+ * data SRAM for a clean start when the runtime program first loads. */
+ memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
+ priv->ucode_data.len);
+
+ for (i = 0; i < MAX_HW_RESTARTS; i++) {
+
+ /* load bootstrap state machine,
+ * load bootstrap program into processor's memory,
+ * prepare to load the "initialize" uCode */
+ ret = priv->cfg->ops->lib->load_ucode(priv);
+
+ if (ret) {
+ IWL_ERR(priv, "Unable to set up bootstrap uCode: %d\n",
+ ret);
+ continue;
+ }
+
+ /* start card; "initialize" will load runtime ucode */
+ iwl4965_nic_start(priv);
+
+ IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
+
+ return 0;
+ }
+
+ set_bit(STATUS_EXIT_PENDING, &priv->status);
+ __iwl4965_down(priv);
+ clear_bit(STATUS_EXIT_PENDING, &priv->status);
+
+ /* tried to restart and config the device for as long as our
+ * patience could withstand */
+ IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
+ return -EIO;
+}
+
+
+/*****************************************************************************
+ *
+ * Workqueue callbacks
+ *
+ *****************************************************************************/
+
+static void iwl4965_bg_init_alive_start(struct work_struct *data)
+{
+ struct iwl_priv *priv =
+ container_of(data, struct iwl_priv, init_alive_start.work);
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ mutex_lock(&priv->mutex);
+ priv->cfg->ops->lib->init_alive_start(priv);
+ mutex_unlock(&priv->mutex);
+}
+
+static void iwl4965_bg_alive_start(struct work_struct *data)
+{
+ struct iwl_priv *priv =
+ container_of(data, struct iwl_priv, alive_start.work);
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ mutex_lock(&priv->mutex);
+ iwl4965_alive_start(priv);
+ mutex_unlock(&priv->mutex);
+}
+
+static void iwl4965_bg_run_time_calib_work(struct work_struct *work)
+{
+ struct iwl_priv *priv = container_of(work, struct iwl_priv,
+ run_time_calib_work);
+
+ mutex_lock(&priv->mutex);
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
+ test_bit(STATUS_SCANNING, &priv->status)) {
+ mutex_unlock(&priv->mutex);
+ return;
+ }
+
+ if (priv->start_calib) {
+ iwl4965_chain_noise_calibration(priv,
+ (void *)&priv->_4965.statistics);
+ iwl4965_sensitivity_calibration(priv,
+ (void *)&priv->_4965.statistics);
+ }
+
+ mutex_unlock(&priv->mutex);
+}
+
+static void iwl4965_bg_restart(struct work_struct *data)
+{
+ struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
+ struct iwl_rxon_context *ctx;
+
+ mutex_lock(&priv->mutex);
+ for_each_context(priv, ctx)
+ ctx->vif = NULL;
+ priv->is_open = 0;
+
+ __iwl4965_down(priv);
+
+ mutex_unlock(&priv->mutex);
+ iwl4965_cancel_deferred_work(priv);
+ ieee80211_restart_hw(priv->hw);
+ } else {
+ iwl4965_down(priv);
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ mutex_lock(&priv->mutex);
+ __iwl4965_up(priv);
+ mutex_unlock(&priv->mutex);
+ }
+}
+
+static void iwl4965_bg_rx_replenish(struct work_struct *data)
+{
+ struct iwl_priv *priv =
+ container_of(data, struct iwl_priv, rx_replenish);
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ mutex_lock(&priv->mutex);
+ iwl4965_rx_replenish(priv);
+ mutex_unlock(&priv->mutex);
+}
+
+/*****************************************************************************
+ *
+ * mac80211 entry point functions
+ *
+ *****************************************************************************/
+
+#define UCODE_READY_TIMEOUT (4 * HZ)
+
+/*
+ * Not a mac80211 entry point function, but it fits in with all the
+ * other mac80211 functions grouped here.
+ */
+static int iwl4965_mac_setup_register(struct iwl_priv *priv,
+ u32 max_probe_length)
+{
+ int ret;
+ struct ieee80211_hw *hw = priv->hw;
+ struct iwl_rxon_context *ctx;
+
+ hw->rate_control_algorithm = "iwl-4965-rs";
+
+ /* Tell mac80211 our characteristics */
+ hw->flags = IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_AMPDU_AGGREGATION |
+ IEEE80211_HW_NEED_DTIM_PERIOD |
+ IEEE80211_HW_SPECTRUM_MGMT |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+
+ if (priv->cfg->sku & IWL_SKU_N)
+ hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
+ IEEE80211_HW_SUPPORTS_STATIC_SMPS;
+
+ hw->sta_data_size = sizeof(struct iwl_station_priv);
+ hw->vif_data_size = sizeof(struct iwl_vif_priv);
+
+ for_each_context(priv, ctx) {
+ hw->wiphy->interface_modes |= ctx->interface_modes;
+ hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
+ }
+
+ hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
+ WIPHY_FLAG_DISABLE_BEACON_HINTS;
+
+ /*
+ * For now, disable PS by default because it affects
+ * RX performance significantly.
+ */
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+ /* we create the 802.11 header and a zero-length SSID element */
+ hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
+
+ /* Default value; 4 EDCA QOS priorities */
+ hw->queues = 4;
+
+ hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
+
+ if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
+ priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &priv->bands[IEEE80211_BAND_2GHZ];
+ if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
+ priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &priv->bands[IEEE80211_BAND_5GHZ];
+
+ iwl_legacy_leds_init(priv);
+
+ ret = ieee80211_register_hw(priv->hw);
+ if (ret) {
+ IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
+ return ret;
+ }
+ priv->mac80211_registered = 1;
+
+ return 0;
+}
+
+
+int iwl4965_mac_start(struct ieee80211_hw *hw)
+{
+ struct iwl_priv *priv = hw->priv;
+ int ret;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ /* we should be verifying the device is ready to be opened */
+ mutex_lock(&priv->mutex);
+ ret = __iwl4965_up(priv);
+ mutex_unlock(&priv->mutex);
+
+ if (ret)
+ return ret;
+
+ if (iwl_legacy_is_rfkill(priv))
+ goto out;
+
+ IWL_DEBUG_INFO(priv, "Start UP work done.\n");
+
+ /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
+ * mac80211 will not be run successfully. */
+ ret = wait_event_interruptible_timeout(priv->wait_command_queue,
+ test_bit(STATUS_READY, &priv->status),
+ UCODE_READY_TIMEOUT);
+ if (!ret) {
+ if (!test_bit(STATUS_READY, &priv->status)) {
+ IWL_ERR(priv, "START_ALIVE timeout after %dms.\n",
+ jiffies_to_msecs(UCODE_READY_TIMEOUT));
+ return -ETIMEDOUT;
+ }
+ }
+
+ iwl4965_led_enable(priv);
+
+out:
+ priv->is_open = 1;
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+ return 0;
+}
+
+void iwl4965_mac_stop(struct ieee80211_hw *hw)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ if (!priv->is_open)
+ return;
+
+ priv->is_open = 0;
+
+ iwl4965_down(priv);
+
+ flush_workqueue(priv->workqueue);
+
+ /* enable interrupts again in order to receive rfkill changes */
+ iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
+ iwl_legacy_enable_interrupts(priv);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ IWL_DEBUG_MACDUMP(priv, "enter\n");
+
+ IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
+ ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
+
+ if (iwl4965_tx_skb(priv, skb))
+ dev_kfree_skb_any(skb);
+
+ IWL_DEBUG_MACDUMP(priv, "leave\n");
+}
+
+void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta,
+ u32 iv32, u16 *phase1key)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ iwl4965_update_tkip_key(priv, vif_priv->ctx, keyconf, sta,
+ iv32, phase1key);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct iwl_rxon_context *ctx = vif_priv->ctx;
+ int ret;
+ u8 sta_id;
+ bool is_default_wep_key = false;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ if (priv->cfg->mod_params->sw_crypto) {
+ IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ sta_id = iwl_legacy_sta_id_or_broadcast(priv, vif_priv->ctx, sta);
+ if (sta_id == IWL_INVALID_STATION)
+ return -EINVAL;
+
+ mutex_lock(&priv->mutex);
+ iwl_legacy_scan_cancel_timeout(priv, 100);
+
+ /*
+ * If we are getting WEP group key and we didn't receive any key mapping
+ * so far, we are in legacy wep mode (group key only), otherwise we are
+ * in 1X mode.
+ * In legacy wep mode, we use another host command to the uCode.
+ */
+ if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
+ !sta) {
+ if (cmd == SET_KEY)
+ is_default_wep_key = !ctx->key_mapping_keys;
+ else
+ is_default_wep_key =
+ (key->hw_key_idx == HW_KEY_DEFAULT);
+ }
+
+ switch (cmd) {
+ case SET_KEY:
+ if (is_default_wep_key)
+ ret = iwl4965_set_default_wep_key(priv,
+ vif_priv->ctx, key);
+ else
+ ret = iwl4965_set_dynamic_key(priv, vif_priv->ctx,
+ key, sta_id);
+
+ IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
+ break;
+ case DISABLE_KEY:
+ if (is_default_wep_key)
+ ret = iwl4965_remove_default_wep_key(priv, ctx, key);
+ else
+ ret = iwl4965_remove_dynamic_key(priv, ctx,
+ key, sta_id);
+
+ IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&priv->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ return ret;
+}
+
+int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
+{
+ struct iwl_priv *priv = hw->priv;
+ int ret = -EINVAL;
+
+ IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
+ sta->addr, tid);
+
+ if (!(priv->cfg->sku & IWL_SKU_N))
+ return -EACCES;
+
+ mutex_lock(&priv->mutex);
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ IWL_DEBUG_HT(priv, "start Rx\n");
+ ret = iwl4965_sta_rx_agg_start(priv, sta, tid, *ssn);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ IWL_DEBUG_HT(priv, "stop Rx\n");
+ ret = iwl4965_sta_rx_agg_stop(priv, sta, tid);
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ ret = 0;
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ IWL_DEBUG_HT(priv, "start Tx\n");
+ ret = iwl4965_tx_agg_start(priv, vif, sta, tid, ssn);
+ if (ret == 0) {
+ priv->_4965.agg_tids_count++;
+ IWL_DEBUG_HT(priv, "priv->_4965.agg_tids_count = %u\n",
+ priv->_4965.agg_tids_count);
+ }
+ break;
+ case IEEE80211_AMPDU_TX_STOP:
+ IWL_DEBUG_HT(priv, "stop Tx\n");
+ ret = iwl4965_tx_agg_stop(priv, vif, sta, tid);
+ if ((ret == 0) && (priv->_4965.agg_tids_count > 0)) {
+ priv->_4965.agg_tids_count--;
+ IWL_DEBUG_HT(priv, "priv->_4965.agg_tids_count = %u\n",
+ priv->_4965.agg_tids_count);
+ }
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ ret = 0;
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ ret = 0;
+ break;
+ }
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+
+int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ bool is_ap = vif->type == NL80211_IFTYPE_STATION;
+ int ret;
+ u8 sta_id;
+
+ IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
+ sta->addr);
+ mutex_lock(&priv->mutex);
+ IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
+ sta->addr);
+ sta_priv->common.sta_id = IWL_INVALID_STATION;
+
+ atomic_set(&sta_priv->pending_frames, 0);
+
+ ret = iwl_legacy_add_station_common(priv, vif_priv->ctx, sta->addr,
+ is_ap, sta, &sta_id);
+ if (ret) {
+ IWL_ERR(priv, "Unable to add station %pM (%d)\n",
+ sta->addr, ret);
+ /* Should we return success if return code is EEXIST ? */
+ mutex_unlock(&priv->mutex);
+ return ret;
+ }
+
+ sta_priv->common.sta_id = sta_id;
+
+ /* Initialize rate scaling */
+ IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
+ sta->addr);
+ iwl4965_rs_rate_init(priv, sta, sta_id);
+ mutex_unlock(&priv->mutex);
+
+ return 0;
+}
+
+void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_channel_switch *ch_switch)
+{
+ struct iwl_priv *priv = hw->priv;
+ const struct iwl_channel_info *ch_info;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct ieee80211_channel *channel = ch_switch->channel;
+ struct iwl_ht_config *ht_conf = &priv->current_ht_config;
+
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ u16 ch;
+ unsigned long flags = 0;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ if (iwl_legacy_is_rfkill(priv))
+ goto out_exit;
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
+ test_bit(STATUS_SCANNING, &priv->status))
+ goto out_exit;
+
+ if (!iwl_legacy_is_associated_ctx(ctx))
+ goto out_exit;
+
+ /* channel switch in progress */
+ if (priv->switch_rxon.switch_in_progress == true)
+ goto out_exit;
+
+ mutex_lock(&priv->mutex);
+ if (priv->cfg->ops->lib->set_channel_switch) {
+
+ ch = channel->hw_value;
+ if (le16_to_cpu(ctx->active.channel) != ch) {
+ ch_info = iwl_legacy_get_channel_info(priv,
+ channel->band,
+ ch);
+ if (!iwl_legacy_is_channel_valid(ch_info)) {
+ IWL_DEBUG_MAC80211(priv, "invalid channel\n");
+ goto out;
+ }
+ spin_lock_irqsave(&priv->lock, flags);
+
+ priv->current_ht_config.smps = conf->smps_mode;
+
+ /* Configure HT40 channels */
+ ctx->ht.enabled = conf_is_ht(conf);
+ if (ctx->ht.enabled) {
+ if (conf_is_ht40_minus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ ctx->ht.is_40mhz = true;
+ } else if (conf_is_ht40_plus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ ctx->ht.is_40mhz = true;
+ } else {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_NONE;
+ ctx->ht.is_40mhz = false;
+ }
+ } else
+ ctx->ht.is_40mhz = false;
+
+ if ((le16_to_cpu(ctx->staging.channel) != ch))
+ ctx->staging.flags = 0;
+
+ iwl_legacy_set_rxon_channel(priv, channel, ctx);
+ iwl_legacy_set_rxon_ht(priv, ht_conf);
+ iwl_legacy_set_flags_for_band(priv, ctx, channel->band,
+ ctx->vif);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ iwl_legacy_set_rate(priv);
+ /*
+ * at this point, staging_rxon has the
+ * configuration for channel switch
+ */
+ if (priv->cfg->ops->lib->set_channel_switch(priv,
+ ch_switch))
+ priv->switch_rxon.switch_in_progress = false;
+ }
+ }
+out:
+ mutex_unlock(&priv->mutex);
+out_exit:
+ if (!priv->switch_rxon.switch_in_progress)
+ ieee80211_chswitch_done(ctx->vif, false);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+void iwl4965_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct iwl_priv *priv = hw->priv;
+ __le32 filter_or = 0, filter_nand = 0;
+ struct iwl_rxon_context *ctx;
+
+#define CHK(test, flag) do { \
+ if (*total_flags & (test)) \
+ filter_or |= (flag); \
+ else \
+ filter_nand |= (flag); \
+ } while (0)
+
+ IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
+ changed_flags, *total_flags);
+
+ CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+ /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
+ CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
+ CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
+
+#undef CHK
+
+ mutex_lock(&priv->mutex);
+
+ for_each_context(priv, ctx) {
+ ctx->staging.filter_flags &= ~filter_nand;
+ ctx->staging.filter_flags |= filter_or;
+
+ /*
+ * Not committing directly because hardware can perform a scan,
+ * but we'll eventually commit the filter flags change anyway.
+ */
+ }
+
+ mutex_unlock(&priv->mutex);
+
+ /*
+ * Receiving all multicast frames is always enabled by the
+ * default flags setup in iwl_legacy_connection_init_rx_config()
+ * since we currently do not support programming multicast
+ * filters into the device.
+ */
+ *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+ FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+}
+
+/*****************************************************************************
+ *
+ * driver setup and teardown
+ *
+ *****************************************************************************/
+
+static void iwl4965_bg_txpower_work(struct work_struct *work)
+{
+ struct iwl_priv *priv = container_of(work, struct iwl_priv,
+ txpower_work);
+
+ /* If a scan happened to start before we got here
+ * then just return; the statistics notification will
+ * kick off another scheduled work to compensate for
+ * any temperature delta we missed here. */
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
+ test_bit(STATUS_SCANNING, &priv->status))
+ return;
+
+ mutex_lock(&priv->mutex);
+
+ /* Regardless of if we are associated, we must reconfigure the
+ * TX power since frames can be sent on non-radar channels while
+ * not associated */
+ priv->cfg->ops->lib->send_tx_power(priv);
+
+ /* Update last_temperature to keep is_calib_needed from running
+ * when it isn't needed... */
+ priv->last_temperature = priv->temperature;
+
+ mutex_unlock(&priv->mutex);
+}
+
+static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
+{
+ priv->workqueue = create_singlethread_workqueue(DRV_NAME);
+
+ init_waitqueue_head(&priv->wait_command_queue);
+
+ INIT_WORK(&priv->restart, iwl4965_bg_restart);
+ INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish);
+ INIT_WORK(&priv->run_time_calib_work, iwl4965_bg_run_time_calib_work);
+ INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start);
+ INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start);
+
+ iwl_legacy_setup_scan_deferred_work(priv);
+
+ INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
+
+ init_timer(&priv->statistics_periodic);
+ priv->statistics_periodic.data = (unsigned long)priv;
+ priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
+
+ init_timer(&priv->ucode_trace);
+ priv->ucode_trace.data = (unsigned long)priv;
+ priv->ucode_trace.function = iwl4965_bg_ucode_trace;
+
+ init_timer(&priv->watchdog);
+ priv->watchdog.data = (unsigned long)priv;
+ priv->watchdog.function = iwl_legacy_bg_watchdog;
+
+ tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
+ iwl4965_irq_tasklet, (unsigned long)priv);
+}
+
+static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
+{
+ cancel_work_sync(&priv->txpower_work);
+ cancel_delayed_work_sync(&priv->init_alive_start);
+ cancel_delayed_work(&priv->alive_start);
+ cancel_work_sync(&priv->run_time_calib_work);
+
+ iwl_legacy_cancel_scan_deferred_work(priv);
+
+ del_timer_sync(&priv->statistics_periodic);
+ del_timer_sync(&priv->ucode_trace);
+}
+
+static void iwl4965_init_hw_rates(struct iwl_priv *priv,
+ struct ieee80211_rate *rates)
+{
+ int i;
+
+ for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
+ rates[i].bitrate = iwlegacy_rates[i].ieee * 5;
+ rates[i].hw_value = i; /* Rate scaling will work on indexes */
+ rates[i].hw_value_short = i;
+ rates[i].flags = 0;
+ if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
+ /*
+ * If CCK != 1M then set short preamble rate flag.
+ */
+ rates[i].flags |=
+ (iwlegacy_rates[i].plcp == IWL_RATE_1M_PLCP) ?
+ 0 : IEEE80211_RATE_SHORT_PREAMBLE;
+ }
+ }
+}
+/*
+ * Acquire priv->lock before calling this function !
+ */
+void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
+{
+ iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
+ (index & 0xff) | (txq_id << 8));
+ iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
+}
+
+void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ int tx_fifo_id, int scd_retry)
+{
+ int txq_id = txq->q.id;
+
+ /* Find out whether to activate Tx queue */
+ int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
+
+ /* Set up and activate */
+ iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
+ (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
+ (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
+ (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
+ (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
+ IWL49_SCD_QUEUE_STTS_REG_MSK);
+
+ txq->sched_retry = scd_retry;
+
+ IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
+ active ? "Activate" : "Deactivate",
+ scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
+}
+
+
+static int iwl4965_init_drv(struct iwl_priv *priv)
+{
+ int ret;
+
+ spin_lock_init(&priv->sta_lock);
+ spin_lock_init(&priv->hcmd_lock);
+
+ INIT_LIST_HEAD(&priv->free_frames);
+
+ mutex_init(&priv->mutex);
+ mutex_init(&priv->sync_cmd_mutex);
+
+ priv->ieee_channels = NULL;
+ priv->ieee_rates = NULL;
+ priv->band = IEEE80211_BAND_2GHZ;
+
+ priv->iw_mode = NL80211_IFTYPE_STATION;
+ priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
+ priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
+ priv->_4965.agg_tids_count = 0;
+
+ /* initialize force reset */
+ priv->force_reset[IWL_RF_RESET].reset_duration =
+ IWL_DELAY_NEXT_FORCE_RF_RESET;
+ priv->force_reset[IWL_FW_RESET].reset_duration =
+ IWL_DELAY_NEXT_FORCE_FW_RELOAD;
+
+ /* Choose which receivers/antennas to use */
+ if (priv->cfg->ops->hcmd->set_rxon_chain)
+ priv->cfg->ops->hcmd->set_rxon_chain(priv,
+ &priv->contexts[IWL_RXON_CTX_BSS]);
+
+ iwl_legacy_init_scan_params(priv);
+
+ /* Set the tx_power_user_lmt to the lowest power level
+ * this value will get overwritten by channel max power avg
+ * from eeprom */
+ priv->tx_power_user_lmt = IWL4965_TX_POWER_TARGET_POWER_MIN;
+ priv->tx_power_next = IWL4965_TX_POWER_TARGET_POWER_MIN;
+
+ ret = iwl_legacy_init_channel_map(priv);
+ if (ret) {
+ IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
+ goto err;
+ }
+
+ ret = iwl_legacy_init_geos(priv);
+ if (ret) {
+ IWL_ERR(priv, "initializing geos failed: %d\n", ret);
+ goto err_free_channel_map;
+ }
+ iwl4965_init_hw_rates(priv, priv->ieee_rates);
+
+ return 0;
+
+err_free_channel_map:
+ iwl_legacy_free_channel_map(priv);
+err:
+ return ret;
+}
+
+static void iwl4965_uninit_drv(struct iwl_priv *priv)
+{
+ iwl4965_calib_free_results(priv);
+ iwl_legacy_free_geos(priv);
+ iwl_legacy_free_channel_map(priv);
+ kfree(priv->scan_cmd);
+}
+
+static void iwl4965_hw_detect(struct iwl_priv *priv)
+{
+ priv->hw_rev = _iwl_legacy_read32(priv, CSR_HW_REV);
+ priv->hw_wa_rev = _iwl_legacy_read32(priv, CSR_HW_REV_WA_REG);
+ pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id);
+ IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id);
+}
+
+static int iwl4965_set_hw_params(struct iwl_priv *priv)
+{
+ priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
+ priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
+ if (priv->cfg->mod_params->amsdu_size_8K)
+ priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
+ else
+ priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
+
+ priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
+
+ if (priv->cfg->mod_params->disable_11n)
+ priv->cfg->sku &= ~IWL_SKU_N;
+
+ /* Device-specific setup */
+ return priv->cfg->ops->lib->set_hw_params(priv);
+}
+
+static const u8 iwl4965_bss_ac_to_fifo[] = {
+ IWL_TX_FIFO_VO,
+ IWL_TX_FIFO_VI,
+ IWL_TX_FIFO_BE,
+ IWL_TX_FIFO_BK,
+};
+
+static const u8 iwl4965_bss_ac_to_queue[] = {
+ 0, 1, 2, 3,
+};
+
+static int
+iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int err = 0, i;
+ struct iwl_priv *priv;
+ struct ieee80211_hw *hw;
+ struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
+ unsigned long flags;
+ u16 pci_cmd;
+
+ /************************
+ * 1. Allocating HW data
+ ************************/
+
+ hw = iwl_legacy_alloc_all(cfg);
+ if (!hw) {
+ err = -ENOMEM;
+ goto out;
+ }
+ priv = hw->priv;
+ /* At this point both hw and priv are allocated. */
+
+ /*
+ * The default context is always valid,
+ * more may be discovered when firmware
+ * is loaded.
+ */
+ priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
+
+ for (i = 0; i < NUM_IWL_RXON_CTX; i++)
+ priv->contexts[i].ctxid = i;
+
+ priv->contexts[IWL_RXON_CTX_BSS].always_active = true;
+ priv->contexts[IWL_RXON_CTX_BSS].is_active = true;
+ priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
+ priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
+ priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
+ priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
+ priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
+ priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
+ priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo = iwl4965_bss_ac_to_fifo;
+ priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue = iwl4965_bss_ac_to_queue;
+ priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
+ BIT(NL80211_IFTYPE_ADHOC);
+ priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
+ BIT(NL80211_IFTYPE_STATION);
+ priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
+ priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
+ priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
+ priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
+
+ BUILD_BUG_ON(NUM_IWL_RXON_CTX != 1);
+
+ SET_IEEE80211_DEV(hw, &pdev->dev);
+
+ IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
+ priv->cfg = cfg;
+ priv->pci_dev = pdev;
+ priv->inta_mask = CSR_INI_SET_MASK;
+
+ if (iwl_legacy_alloc_traffic_mem(priv))
+ IWL_ERR(priv, "Not enough memory to generate traffic log\n");
+
+ /**************************
+ * 2. Initializing PCI bus
+ **************************/
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
+
+ if (pci_enable_device(pdev)) {
+ err = -ENODEV;
+ goto out_ieee80211_free_hw;
+ }
+
+ pci_set_master(pdev);
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
+ if (err) {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(32));
+ /* both attempts failed: */
+ if (err) {
+ IWL_WARN(priv, "No suitable DMA available.\n");
+ goto out_pci_disable_device;
+ }
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err)
+ goto out_pci_disable_device;
+
+ pci_set_drvdata(pdev, priv);
+
+
+ /***********************
+ * 3. Read REV register
+ ***********************/
+ priv->hw_base = pci_iomap(pdev, 0, 0);
+ if (!priv->hw_base) {
+ err = -ENODEV;
+ goto out_pci_release_regions;
+ }
+
+ IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
+ (unsigned long long) pci_resource_len(pdev, 0));
+ IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
+
+ /* these spin locks will be used in apm_ops.init and EEPROM access
+ * we should init now
+ */
+ spin_lock_init(&priv->reg_lock);
+ spin_lock_init(&priv->lock);
+
+ /*
+ * stop and reset the on-board processor just in case it is in a
+ * strange state ... like being left stranded by a primary kernel
+ * and this is now the kdump kernel trying to start up
+ */
+ iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+ iwl4965_hw_detect(priv);
+ IWL_INFO(priv, "Detected %s, REV=0x%X\n",
+ priv->cfg->name, priv->hw_rev);
+
+ /* We disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state */
+ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+ iwl4965_prepare_card_hw(priv);
+ if (!priv->hw_ready) {
+ IWL_WARN(priv, "Failed, HW not ready\n");
+ goto out_iounmap;
+ }
+
+ /*****************
+ * 4. Read EEPROM
+ *****************/
+ /* Read the EEPROM */
+ err = iwl_legacy_eeprom_init(priv);
+ if (err) {
+ IWL_ERR(priv, "Unable to init EEPROM\n");
+ goto out_iounmap;
+ }
+ err = iwl4965_eeprom_check_version(priv);
+ if (err)
+ goto out_free_eeprom;
+
+ if (err)
+ goto out_free_eeprom;
+
+ /* extract MAC Address */
+ iwl4965_eeprom_get_mac(priv, priv->addresses[0].addr);
+ IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
+ priv->hw->wiphy->addresses = priv->addresses;
+ priv->hw->wiphy->n_addresses = 1;
+
+ /************************
+ * 5. Setup HW constants
+ ************************/
+ if (iwl4965_set_hw_params(priv)) {
+ IWL_ERR(priv, "failed to set hw parameters\n");
+ goto out_free_eeprom;
+ }
+
+ /*******************
+ * 6. Setup priv
+ *******************/
+
+ err = iwl4965_init_drv(priv);
+ if (err)
+ goto out_free_eeprom;
+ /* At this point both hw and priv are initialized. */
+
+ /********************
+ * 7. Setup services
+ ********************/
+ spin_lock_irqsave(&priv->lock, flags);
+ iwl_legacy_disable_interrupts(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ pci_enable_msi(priv->pci_dev);
+
+ err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
+ IRQF_SHARED, DRV_NAME, priv);
+ if (err) {
+ IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
+ goto out_disable_msi;
+ }
+
+ iwl4965_setup_deferred_work(priv);
+ iwl4965_setup_rx_handlers(priv);
+
+ /*********************************************
+ * 8. Enable interrupts and read RFKILL state
+ *********************************************/
+
+ /* enable interrupts if needed: hw bug w/a */
+ pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
+ if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
+ pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
+ }
+
+ iwl_legacy_enable_interrupts(priv);
+
+ /* If platform's RF_KILL switch is NOT set to KILL */
+ if (iwl_read32(priv, CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+ clear_bit(STATUS_RF_KILL_HW, &priv->status);
+ else
+ set_bit(STATUS_RF_KILL_HW, &priv->status);
+
+ wiphy_rfkill_set_hw_state(priv->hw->wiphy,
+ test_bit(STATUS_RF_KILL_HW, &priv->status));
+
+ iwl_legacy_power_initialize(priv);
+
+ init_completion(&priv->_4965.firmware_loading_complete);
+
+ err = iwl4965_request_firmware(priv, true);
+ if (err)
+ goto out_destroy_workqueue;
+
+ return 0;
+
+ out_destroy_workqueue:
+ destroy_workqueue(priv->workqueue);
+ priv->workqueue = NULL;
+ free_irq(priv->pci_dev->irq, priv);
+ out_disable_msi:
+ pci_disable_msi(priv->pci_dev);
+ iwl4965_uninit_drv(priv);
+ out_free_eeprom:
+ iwl_legacy_eeprom_free(priv);
+ out_iounmap:
+ pci_iounmap(pdev, priv->hw_base);
+ out_pci_release_regions:
+ pci_set_drvdata(pdev, NULL);
+ pci_release_regions(pdev);
+ out_pci_disable_device:
+ pci_disable_device(pdev);
+ out_ieee80211_free_hw:
+ iwl_legacy_free_traffic_mem(priv);
+ ieee80211_free_hw(priv->hw);
+ out:
+ return err;
+}
+
+static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
+{
+ struct iwl_priv *priv = pci_get_drvdata(pdev);
+ unsigned long flags;
+
+ if (!priv)
+ return;
+
+ wait_for_completion(&priv->_4965.firmware_loading_complete);
+
+ IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
+
+ iwl_legacy_dbgfs_unregister(priv);
+ sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
+
+ /* ieee80211_unregister_hw call wil cause iwl_mac_stop to
+ * to be called and iwl4965_down since we are removing the device
+ * we need to set STATUS_EXIT_PENDING bit.
+ */
+ set_bit(STATUS_EXIT_PENDING, &priv->status);
+
+ iwl_legacy_leds_exit(priv);
+
+ if (priv->mac80211_registered) {
+ ieee80211_unregister_hw(priv->hw);
+ priv->mac80211_registered = 0;
+ } else {
+ iwl4965_down(priv);
+ }
+
+ /*
+ * Make sure device is reset to low power before unloading driver.
+ * This may be redundant with iwl4965_down(), but there are paths to
+ * run iwl4965_down() without calling apm_ops.stop(), and there are
+ * paths to avoid running iwl4965_down() at all before leaving driver.
+ * This (inexpensive) call *makes sure* device is reset.
+ */
+ iwl_legacy_apm_stop(priv);
+
+ /* make sure we flush any pending irq or
+ * tasklet for the driver
+ */
+ spin_lock_irqsave(&priv->lock, flags);
+ iwl_legacy_disable_interrupts(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ iwl4965_synchronize_irq(priv);
+
+ iwl4965_dealloc_ucode_pci(priv);
+
+ if (priv->rxq.bd)
+ iwl4965_rx_queue_free(priv, &priv->rxq);
+ iwl4965_hw_txq_ctx_free(priv);
+
+ iwl_legacy_eeprom_free(priv);
+
+
+ /*netif_stop_queue(dev); */
+ flush_workqueue(priv->workqueue);
+
+ /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes
+ * priv->workqueue... so we can't take down the workqueue
+ * until now... */
+ destroy_workqueue(priv->workqueue);
+ priv->workqueue = NULL;
+ iwl_legacy_free_traffic_mem(priv);
+
+ free_irq(priv->pci_dev->irq, priv);
+ pci_disable_msi(priv->pci_dev);
+ pci_iounmap(pdev, priv->hw_base);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ iwl4965_uninit_drv(priv);
+
+ dev_kfree_skb(priv->beacon_skb);
+
+ ieee80211_free_hw(priv->hw);
+}
+
+/*
+ * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
+ * must be called under priv->lock and mac access
+ */
+void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
+{
+ iwl_legacy_write_prph(priv, IWL49_SCD_TXFACT, mask);
+}
+
+/*****************************************************************************
+ *
+ * driver and module entry point
+ *
+ *****************************************************************************/
+
+/* Hardware specific file defines the PCI IDs table for that hardware module */
+static DEFINE_PCI_DEVICE_TABLE(iwl4965_hw_card_ids) = {
+#if defined(CONFIG_IWL4965_MODULE) || defined(CONFIG_IWL4965)
+ {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_cfg)},
+ {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_cfg)},
+#endif /* CONFIG_IWL4965 */
+
+ {0}
+};
+MODULE_DEVICE_TABLE(pci, iwl4965_hw_card_ids);
+
+static struct pci_driver iwl4965_driver = {
+ .name = DRV_NAME,
+ .id_table = iwl4965_hw_card_ids,
+ .probe = iwl4965_pci_probe,
+ .remove = __devexit_p(iwl4965_pci_remove),
+ .driver.pm = IWL_LEGACY_PM_OPS,
+};
+
+static int __init iwl4965_init(void)
+{
+
+ int ret;
+ pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
+ pr_info(DRV_COPYRIGHT "\n");
+
+ ret = iwl4965_rate_control_register();
+ if (ret) {
+ pr_err("Unable to register rate control algorithm: %d\n", ret);
+ return ret;
+ }
+
+ ret = pci_register_driver(&iwl4965_driver);
+ if (ret) {
+ pr_err("Unable to initialize PCI module\n");
+ goto error_register;
+ }
+
+ return ret;
+
+error_register:
+ iwl4965_rate_control_unregister();
+ return ret;
+}
+
+static void __exit iwl4965_exit(void)
+{
+ pci_unregister_driver(&iwl4965_driver);
+ iwl4965_rate_control_unregister();
+}
+
+module_exit(iwl4965_exit);
+module_init(iwl4965_init);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "debug output mask");
+#endif
+
+module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO);
+MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
+module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO);
+MODULE_PARM_DESC(queues_num, "number of hw queues.");
+module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO);
+MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
+module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K,
+ int, S_IRUGO);
+MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
+module_param_named(fw_restart, iwl4965_mod_params.restart_fw, int, S_IRUGO);
+MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index ed424574160e..17d555f2215a 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -1,14 +1,52 @@
-config IWLWIFI
- tristate "Intel Wireless Wifi"
+config IWLAGN
+ tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlagn) "
depends on PCI && MAC80211
select FW_LOADER
+ select NEW_LEDS
+ select LEDS_CLASS
+ select LEDS_TRIGGERS
+ select MAC80211_LEDS
+ ---help---
+ Select to build the driver supporting the:
+
+ Intel Wireless WiFi Link Next-Gen AGN
+
+ This option enables support for use with the following hardware:
+ Intel Wireless WiFi Link 6250AGN Adapter
+ Intel 6000 Series Wi-Fi Adapters (6200AGN and 6300AGN)
+ Intel WiFi Link 1000BGN
+ Intel Wireless WiFi 5150AGN
+ Intel Wireless WiFi 5100AGN, 5300AGN, and 5350AGN
+ Intel 6005 Series Wi-Fi Adapters
+ Intel 6030 Series Wi-Fi Adapters
+ Intel Wireless WiFi Link 6150BGN 2 Adapter
+ Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN)
+ Intel 2000 Series Wi-Fi Adapters
+
+
+ This driver uses the kernel's mac80211 subsystem.
+
+ In order to use this driver, you will need a microcode (uCode)
+ image for it. You can obtain the microcode from:
+
+ <http://intellinuxwireless.org/>.
+
+ The microcode is typically installed in /lib/firmware. You can
+ look in the hotplug script /etc/hotplug/firmware.agent to
+ determine which directory FIRMWARE_DIR is set to when the script
+ runs.
+
+ If you want to compile the driver as a module ( = code which can be
+ inserted in and removed from the running kernel whenever you want),
+ say M here and read <file:Documentation/kbuild/modules.txt>. The
+ module will be called iwlagn.
menu "Debugging Options"
- depends on IWLWIFI
+ depends on IWLAGN
config IWLWIFI_DEBUG
- bool "Enable full debugging output in iwlagn and iwl3945 drivers"
- depends on IWLWIFI
+ bool "Enable full debugging output in the iwlagn driver"
+ depends on IWLAGN
---help---
This option will enable debug tracing output for the iwlwifi drivers
@@ -33,7 +71,7 @@ config IWLWIFI_DEBUG
config IWLWIFI_DEBUGFS
bool "iwlagn debugfs support"
- depends on IWLWIFI && MAC80211_DEBUGFS
+ depends on IWLAGN && MAC80211_DEBUGFS
---help---
Enable creation of debugfs files for the iwlwifi drivers. This
is a low-impact option that allows getting insight into the
@@ -41,13 +79,13 @@ config IWLWIFI_DEBUGFS
config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
bool "Experimental uCode support"
- depends on IWLWIFI && IWLWIFI_DEBUG
+ depends on IWLAGN && IWLWIFI_DEBUG
---help---
Enable use of experimental ucode for testing and debugging.
config IWLWIFI_DEVICE_TRACING
bool "iwlwifi device access tracing"
- depends on IWLWIFI
+ depends on IWLAGN
depends on EVENT_TRACING
help
Say Y here to trace all commands, including TX frames and IO
@@ -64,73 +102,19 @@ config IWLWIFI_DEVICE_TRACING
occur.
endmenu
-config IWLAGN
- tristate "Intel Wireless WiFi Next Gen AGN (iwlagn)"
- depends on IWLWIFI
- ---help---
- Select to build the driver supporting the:
-
- Intel Wireless WiFi Link Next-Gen AGN
-
- This driver uses the kernel's mac80211 subsystem.
-
- In order to use this driver, you will need a microcode (uCode)
- image for it. You can obtain the microcode from:
-
- <http://intellinuxwireless.org/>.
-
- The microcode is typically installed in /lib/firmware. You can
- look in the hotplug script /etc/hotplug/firmware.agent to
- determine which directory FIRMWARE_DIR is set to when the script
- runs.
-
- If you want to compile the driver as a module ( = code which can be
- inserted in and removed from the running kernel whenever you want),
- say M here and read <file:Documentation/kbuild/modules.txt>. The
- module will be called iwlagn.
-
-
-config IWL4965
- bool "Intel Wireless WiFi 4965AGN"
- depends on IWLAGN
- ---help---
- This option enables support for Intel Wireless WiFi Link 4965AGN
-
-config IWL5000
- bool "Intel Wireless-N/Advanced-N/Ultimate-N WiFi Link"
+config IWL_P2P
+ bool "iwlwifi experimental P2P support"
depends on IWLAGN
- ---help---
- This option enables support for use with the following hardware:
- Intel Wireless WiFi Link 6250AGN Adapter
- Intel 6000 Series Wi-Fi Adapters (6200AGN and 6300AGN)
- Intel WiFi Link 1000BGN
- Intel Wireless WiFi 5150AGN
- Intel Wireless WiFi 5100AGN, 5300AGN, and 5350AGN
- Intel 6000 Gen 2 Series Wi-Fi Adapters (6000G2A and 6000G2B)
- Intel WIreless WiFi Link 6050BGN Gen 2 Adapter
- Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN)
-
-config IWL3945
- tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
- depends on IWLWIFI
- ---help---
- Select to build the driver supporting the:
-
- Intel PRO/Wireless 3945ABG/BG Network Connection
-
- This driver uses the kernel's mac80211 subsystem.
-
- In order to use this driver, you will need a microcode (uCode)
- image for it. You can obtain the microcode from:
+ help
+ This option enables experimental P2P support for some devices
+ based on microcode support. Since P2P support is still under
+ development, this option may even enable it for some devices
+ now that turn out to not support it in the future due to
+ microcode restrictions.
- <http://intellinuxwireless.org/>.
+ To determine if your microcode supports the experimental P2P
+ offered by this option, check if the driver advertises AP
+ support when it is loaded.
- The microcode is typically installed in /lib/firmware. You can
- look in the hotplug script /etc/hotplug/firmware.agent to
- determine which directory FIRMWARE_DIR is set to when the script
- runs.
+ Say Y only if you want to experiment with P2P.
- If you want to compile the driver as a module ( = code which can be
- inserted in and removed from the running kernel whenever you want),
- say M here and read <file:Documentation/kbuild/modules.txt>. The
- module will be called iwl3945.
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 93380f97835f..9d6ee836426c 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -1,35 +1,23 @@
-obj-$(CONFIG_IWLWIFI) += iwlcore.o
-iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
-iwlcore-objs += iwl-rx.o iwl-tx.o iwl-sta.o
-iwlcore-objs += iwl-scan.o iwl-led.o
-iwlcore-$(CONFIG_IWL3945) += iwl-legacy.o
-iwlcore-$(CONFIG_IWL4965) += iwl-legacy.o
-iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
-iwlcore-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
-
-# If 3945 is selected only, iwl-legacy.o will be added
-# to iwlcore-m above, but it needs to be built in.
-iwlcore-objs += $(iwlcore-m)
-
-CFLAGS_iwl-devtrace.o := -I$(src)
-
# AGN
obj-$(CONFIG_IWLAGN) += iwlagn.o
iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o
iwlagn-objs += iwl-agn-ucode.o iwl-agn-tx.o
-iwlagn-objs += iwl-agn-lib.o iwl-agn-rx.o iwl-agn-calib.o
+iwlagn-objs += iwl-agn-lib.o iwl-agn-calib.o
iwlagn-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-eeprom.o
-iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o
-iwlagn-$(CONFIG_IWL4965) += iwl-4965.o
-iwlagn-$(CONFIG_IWL5000) += iwl-agn-rxon.o iwl-agn-hcmd.o iwl-agn-ict.o
-iwlagn-$(CONFIG_IWL5000) += iwl-5000.o
-iwlagn-$(CONFIG_IWL5000) += iwl-6000.o
-iwlagn-$(CONFIG_IWL5000) += iwl-1000.o
+iwlagn-objs += iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
+iwlagn-objs += iwl-rx.o iwl-tx.o iwl-sta.o
+iwlagn-objs += iwl-scan.o iwl-led.o
+iwlagn-objs += iwl-agn-rxon.o iwl-agn-hcmd.o iwl-agn-ict.o
+iwlagn-objs += iwl-5000.o
+iwlagn-objs += iwl-6000.o
+iwlagn-objs += iwl-1000.o
+iwlagn-objs += iwl-2000.o
+
+iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o
+iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
+iwlagn-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
-# 3945
-obj-$(CONFIG_IWL3945) += iwl3945.o
-iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
-iwl3945-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-3945-debugfs.o
+CFLAGS_iwl-devtrace.o := -I$(src)
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index ba78bc8a259f..e8e1c2dc8659 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -232,8 +232,6 @@ static struct iwl_lib_ops iwl1000_lib = {
.bt_stats_read = iwl_ucode_bt_stats_read,
.reply_tx_error = iwl_reply_tx_error_read,
},
- .check_plcp_health = iwl_good_plcp_health,
- .check_ack_health = iwl_good_ack_health,
.txfifo_flush = iwlagn_txfifo_flush,
.dev_txfifo_flush = iwlagn_dev_txfifo_flush,
.tt_ops = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
new file mode 100644
index 000000000000..d7b6126408c9
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -0,0 +1,560 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+
+#include "iwl-eeprom.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-sta.h"
+#include "iwl-agn.h"
+#include "iwl-helpers.h"
+#include "iwl-agn-hw.h"
+#include "iwl-6000-hw.h"
+#include "iwl-agn-led.h"
+#include "iwl-agn-debugfs.h"
+
+/* Highest firmware API version supported */
+#define IWL2030_UCODE_API_MAX 5
+#define IWL2000_UCODE_API_MAX 5
+#define IWL200_UCODE_API_MAX 5
+
+/* Lowest firmware API version supported */
+#define IWL2030_UCODE_API_MIN 5
+#define IWL2000_UCODE_API_MIN 5
+#define IWL200_UCODE_API_MIN 5
+
+#define IWL2030_FW_PRE "iwlwifi-2030-"
+#define _IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE #api ".ucode"
+#define IWL2030_MODULE_FIRMWARE(api) _IWL2030_MODULE_FIRMWARE(api)
+
+#define IWL2000_FW_PRE "iwlwifi-2000-"
+#define _IWL2000_MODULE_FIRMWARE(api) IWL2000_FW_PRE #api ".ucode"
+#define IWL2000_MODULE_FIRMWARE(api) _IWL2000_MODULE_FIRMWARE(api)
+
+#define IWL200_FW_PRE "iwlwifi-200-"
+#define _IWL200_MODULE_FIRMWARE(api) IWL200_FW_PRE #api ".ucode"
+#define IWL200_MODULE_FIRMWARE(api) _IWL200_MODULE_FIRMWARE(api)
+
+static void iwl2000_set_ct_threshold(struct iwl_priv *priv)
+{
+ /* want Celsius */
+ priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
+ priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
+}
+
+/* NIC configuration for 2000 series */
+static void iwl2000_nic_config(struct iwl_priv *priv)
+{
+ u16 radio_cfg;
+
+ radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
+
+ /* write radio config values to register */
+ if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX)
+ iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
+ EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
+ EEPROM_RF_CFG_DASH_MSK(radio_cfg));
+
+ /* set CSR_HW_CONFIG_REG for uCode use */
+ iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
+ CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
+
+ if (priv->cfg->iq_invert)
+ iwl_set_bit(priv, CSR_GP_DRIVER_REG,
+ CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
+
+}
+
+static struct iwl_sensitivity_ranges iwl2000_sensitivity = {
+ .min_nrg_cck = 97,
+ .max_nrg_cck = 0, /* not used, set to 0 */
+ .auto_corr_min_ofdm = 80,
+ .auto_corr_min_ofdm_mrc = 128,
+ .auto_corr_min_ofdm_x1 = 105,
+ .auto_corr_min_ofdm_mrc_x1 = 192,
+
+ .auto_corr_max_ofdm = 145,
+ .auto_corr_max_ofdm_mrc = 232,
+ .auto_corr_max_ofdm_x1 = 110,
+ .auto_corr_max_ofdm_mrc_x1 = 232,
+
+ .auto_corr_min_cck = 125,
+ .auto_corr_max_cck = 175,
+ .auto_corr_min_cck_mrc = 160,
+ .auto_corr_max_cck_mrc = 310,
+ .nrg_th_cck = 97,
+ .nrg_th_ofdm = 100,
+
+ .barker_corr_th_min = 190,
+ .barker_corr_th_min_mrc = 390,
+ .nrg_th_cca = 62,
+};
+
+static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
+{
+ if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
+ priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
+ priv->cfg->base_params->num_of_queues =
+ priv->cfg->mod_params->num_of_queues;
+
+ priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
+ priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
+ priv->hw_params.scd_bc_tbls_size =
+ priv->cfg->base_params->num_of_queues *
+ sizeof(struct iwlagn_scd_bc_tbl);
+ priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
+ priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
+ priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
+
+ priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
+ priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
+
+ priv->hw_params.max_bsm_size = 0;
+ priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
+ BIT(IEEE80211_BAND_5GHZ);
+ priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
+
+ priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
+ if (priv->cfg->rx_with_siso_diversity)
+ priv->hw_params.rx_chains_num = 1;
+ else
+ priv->hw_params.rx_chains_num =
+ num_of_ant(priv->cfg->valid_rx_ant);
+ priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
+ priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
+
+ iwl2000_set_ct_threshold(priv);
+
+ /* Set initial sensitivity parameters */
+ /* Set initial calibration set */
+ priv->hw_params.sens = &iwl2000_sensitivity;
+ priv->hw_params.calib_init_cfg =
+ BIT(IWL_CALIB_XTAL) |
+ BIT(IWL_CALIB_LO) |
+ BIT(IWL_CALIB_TX_IQ) |
+ BIT(IWL_CALIB_BASE_BAND);
+ if (priv->cfg->need_dc_calib)
+ priv->hw_params.calib_rt_cfg |= BIT(IWL_CALIB_CFG_DC_IDX);
+ if (priv->cfg->need_temp_offset_calib)
+ priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
+
+ priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
+
+ return 0;
+}
+
+static int iwl2030_hw_channel_switch(struct iwl_priv *priv,
+ struct ieee80211_channel_switch *ch_switch)
+{
+ /*
+ * MULTI-FIXME
+ * See iwl_mac_channel_switch.
+ */
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ struct iwl6000_channel_switch_cmd cmd;
+ const struct iwl_channel_info *ch_info;
+ u32 switch_time_in_usec, ucode_switch_time;
+ u16 ch;
+ u32 tsf_low;
+ u8 switch_count;
+ u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
+ struct ieee80211_vif *vif = ctx->vif;
+ struct iwl_host_cmd hcmd = {
+ .id = REPLY_CHANNEL_SWITCH,
+ .len = sizeof(cmd),
+ .flags = CMD_SYNC,
+ .data = &cmd,
+ };
+
+ cmd.band = priv->band == IEEE80211_BAND_2GHZ;
+ ch = ch_switch->channel->hw_value;
+ IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
+ ctx->active.channel, ch);
+ cmd.channel = cpu_to_le16(ch);
+ cmd.rxon_flags = ctx->staging.flags;
+ cmd.rxon_filter_flags = ctx->staging.filter_flags;
+ switch_count = ch_switch->count;
+ tsf_low = ch_switch->timestamp & 0x0ffffffff;
+ /*
+ * calculate the ucode channel switch time
+ * adding TSF as one of the factor for when to switch
+ */
+ if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
+ if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
+ beacon_interval)) {
+ switch_count -= (priv->ucode_beacon_time -
+ tsf_low) / beacon_interval;
+ } else
+ switch_count = 0;
+ }
+ if (switch_count <= 1)
+ cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
+ else {
+ switch_time_in_usec =
+ vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
+ ucode_switch_time = iwl_usecs_to_beacons(priv,
+ switch_time_in_usec,
+ beacon_interval);
+ cmd.switch_time = iwl_add_beacon_time(priv,
+ priv->ucode_beacon_time,
+ ucode_switch_time,
+ beacon_interval);
+ }
+ IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
+ cmd.switch_time);
+ ch_info = iwl_get_channel_info(priv, priv->band, ch);
+ if (ch_info)
+ cmd.expect_beacon = is_channel_radar(ch_info);
+ else {
+ IWL_ERR(priv, "invalid channel switch from %u to %u\n",
+ ctx->active.channel, ch);
+ return -EFAULT;
+ }
+ priv->switch_rxon.channel = cmd.channel;
+ priv->switch_rxon.switch_in_progress = true;
+
+ return iwl_send_cmd_sync(priv, &hcmd);
+}
+
+static struct iwl_lib_ops iwl2000_lib = {
+ .set_hw_params = iwl2000_hw_set_hw_params,
+ .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
+ .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
+ .txq_set_sched = iwlagn_txq_set_sched,
+ .txq_agg_enable = iwlagn_txq_agg_enable,
+ .txq_agg_disable = iwlagn_txq_agg_disable,
+ .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
+ .txq_free_tfd = iwl_hw_txq_free_tfd,
+ .txq_init = iwl_hw_tx_queue_init,
+ .rx_handler_setup = iwlagn_rx_handler_setup,
+ .setup_deferred_work = iwlagn_bt_setup_deferred_work,
+ .cancel_deferred_work = iwlagn_bt_cancel_deferred_work,
+ .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
+ .load_ucode = iwlagn_load_ucode,
+ .dump_nic_event_log = iwl_dump_nic_event_log,
+ .dump_nic_error_log = iwl_dump_nic_error_log,
+ .dump_csr = iwl_dump_csr,
+ .dump_fh = iwl_dump_fh,
+ .init_alive_start = iwlagn_init_alive_start,
+ .alive_notify = iwlagn_alive_notify,
+ .send_tx_power = iwlagn_send_tx_power,
+ .update_chain_flags = iwl_update_chain_flags,
+ .set_channel_switch = iwl2030_hw_channel_switch,
+ .apm_ops = {
+ .init = iwl_apm_init,
+ .config = iwl2000_nic_config,
+ },
+ .eeprom_ops = {
+ .regulatory_bands = {
+ EEPROM_REG_BAND_1_CHANNELS,
+ EEPROM_REG_BAND_2_CHANNELS,
+ EEPROM_REG_BAND_3_CHANNELS,
+ EEPROM_REG_BAND_4_CHANNELS,
+ EEPROM_REG_BAND_5_CHANNELS,
+ EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
+ EEPROM_REG_BAND_52_HT40_CHANNELS
+ },
+ .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
+ .release_semaphore = iwlcore_eeprom_release_semaphore,
+ .calib_version = iwlagn_eeprom_calib_version,
+ .query_addr = iwlagn_eeprom_query_addr,
+ .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
+ },
+ .isr_ops = {
+ .isr = iwl_isr_ict,
+ .free = iwl_free_isr_ict,
+ .alloc = iwl_alloc_isr_ict,
+ .reset = iwl_reset_ict,
+ .disable = iwl_disable_ict,
+ },
+ .temp_ops = {
+ .temperature = iwlagn_temperature,
+ },
+ .debugfs_ops = {
+ .rx_stats_read = iwl_ucode_rx_stats_read,
+ .tx_stats_read = iwl_ucode_tx_stats_read,
+ .general_stats_read = iwl_ucode_general_stats_read,
+ .bt_stats_read = iwl_ucode_bt_stats_read,
+ .reply_tx_error = iwl_reply_tx_error_read,
+ },
+ .txfifo_flush = iwlagn_txfifo_flush,
+ .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
+ .tt_ops = {
+ .lower_power_detection = iwl_tt_is_low_power_state,
+ .tt_power_mode = iwl_tt_current_power_mode,
+ .ct_kill_check = iwl_check_for_ct_kill,
+ }
+};
+
+static const struct iwl_ops iwl2000_ops = {
+ .lib = &iwl2000_lib,
+ .hcmd = &iwlagn_hcmd,
+ .utils = &iwlagn_hcmd_utils,
+ .led = &iwlagn_led_ops,
+ .ieee80211_ops = &iwlagn_hw_ops,
+};
+
+static const struct iwl_ops iwl2030_ops = {
+ .lib = &iwl2000_lib,
+ .hcmd = &iwlagn_bt_hcmd,
+ .utils = &iwlagn_hcmd_utils,
+ .led = &iwlagn_led_ops,
+ .ieee80211_ops = &iwlagn_hw_ops,
+};
+
+static const struct iwl_ops iwl200_ops = {
+ .lib = &iwl2000_lib,
+ .hcmd = &iwlagn_hcmd,
+ .utils = &iwlagn_hcmd_utils,
+ .led = &iwlagn_led_ops,
+ .ieee80211_ops = &iwlagn_hw_ops,
+};
+
+static const struct iwl_ops iwl230_ops = {
+ .lib = &iwl2000_lib,
+ .hcmd = &iwlagn_bt_hcmd,
+ .utils = &iwlagn_hcmd_utils,
+ .led = &iwlagn_led_ops,
+ .ieee80211_ops = &iwlagn_hw_ops,
+};
+
+static struct iwl_base_params iwl2000_base_params = {
+ .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .pll_cfg_val = 0,
+ .set_l0s = true,
+ .use_bsm = false,
+ .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
+ .shadow_ram_support = true,
+ .led_compensation = 51,
+ .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+ .adv_thermal_throttle = true,
+ .support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
+ .wd_timeout = IWL_DEF_WD_TIMEOUT,
+ .max_event_log_size = 512,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
+ .shadow_reg_enable = true,
+};
+
+
+static struct iwl_base_params iwl2030_base_params = {
+ .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .pll_cfg_val = 0,
+ .set_l0s = true,
+ .use_bsm = false,
+ .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
+ .shadow_ram_support = true,
+ .led_compensation = 57,
+ .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+ .adv_thermal_throttle = true,
+ .support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
+ .wd_timeout = IWL_LONG_WD_TIMEOUT,
+ .max_event_log_size = 512,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
+ .shadow_reg_enable = true,
+};
+
+static struct iwl_ht_params iwl2000_ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+};
+
+static struct iwl_bt_params iwl2030_bt_params = {
+ .bt_statistics = true,
+ /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
+ .advanced_bt_coexist = true,
+ .agg_time_limit = BT_AGG_THRESHOLD_DEF,
+ .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
+ .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
+ .bt_sco_disable = true,
+ .bt_session_2 = true,
+};
+
+#define IWL_DEVICE_2000 \
+ .fw_name_pre = IWL2000_FW_PRE, \
+ .ucode_api_max = IWL2000_UCODE_API_MAX, \
+ .ucode_api_min = IWL2000_UCODE_API_MIN, \
+ .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
+ .ops = &iwl2000_ops, \
+ .mod_params = &iwlagn_mod_params, \
+ .base_params = &iwl2000_base_params, \
+ .need_dc_calib = true, \
+ .need_temp_offset_calib = true, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .iq_invert = true \
+
+struct iwl_cfg iwl2000_2bgn_cfg = {
+ .name = "2000 Series 2x2 BGN",
+ IWL_DEVICE_2000,
+ .ht_params = &iwl2000_ht_params,
+};
+
+struct iwl_cfg iwl2000_2bg_cfg = {
+ .name = "2000 Series 2x2 BG",
+ IWL_DEVICE_2000,
+};
+
+#define IWL_DEVICE_2030 \
+ .fw_name_pre = IWL2030_FW_PRE, \
+ .ucode_api_max = IWL2030_UCODE_API_MAX, \
+ .ucode_api_min = IWL2030_UCODE_API_MIN, \
+ .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
+ .ops = &iwl2030_ops, \
+ .mod_params = &iwlagn_mod_params, \
+ .base_params = &iwl2030_base_params, \
+ .bt_params = &iwl2030_bt_params, \
+ .need_dc_calib = true, \
+ .need_temp_offset_calib = true, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .adv_pm = true, \
+ .iq_invert = true \
+
+struct iwl_cfg iwl2030_2bgn_cfg = {
+ .name = "2000 Series 2x2 BGN/BT",
+ IWL_DEVICE_2030,
+ .ht_params = &iwl2000_ht_params,
+};
+
+struct iwl_cfg iwl2030_2bg_cfg = {
+ .name = "2000 Series 2x2 BG/BT",
+ IWL_DEVICE_2030,
+};
+
+#define IWL_DEVICE_6035 \
+ .fw_name_pre = IWL2030_FW_PRE, \
+ .ucode_api_max = IWL2030_UCODE_API_MAX, \
+ .ucode_api_min = IWL2030_UCODE_API_MIN, \
+ .eeprom_ver = EEPROM_6035_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_6035_TX_POWER_VERSION, \
+ .ops = &iwl2030_ops, \
+ .mod_params = &iwlagn_mod_params, \
+ .base_params = &iwl2030_base_params, \
+ .bt_params = &iwl2030_bt_params, \
+ .need_dc_calib = true, \
+ .need_temp_offset_calib = true, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .adv_pm = true \
+
+struct iwl_cfg iwl6035_2agn_cfg = {
+ .name = "2000 Series 2x2 AGN/BT",
+ IWL_DEVICE_6035,
+ .ht_params = &iwl2000_ht_params,
+};
+
+struct iwl_cfg iwl6035_2abg_cfg = {
+ .name = "2000 Series 2x2 ABG/BT",
+ IWL_DEVICE_6035,
+};
+
+struct iwl_cfg iwl6035_2bg_cfg = {
+ .name = "2000 Series 2x2 BG/BT",
+ IWL_DEVICE_6035,
+};
+
+#define IWL_DEVICE_200 \
+ .fw_name_pre = IWL200_FW_PRE, \
+ .ucode_api_max = IWL200_UCODE_API_MAX, \
+ .ucode_api_min = IWL200_UCODE_API_MIN, \
+ .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
+ .ops = &iwl200_ops, \
+ .mod_params = &iwlagn_mod_params, \
+ .base_params = &iwl2000_base_params, \
+ .need_dc_calib = true, \
+ .need_temp_offset_calib = true, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .adv_pm = true, \
+ .rx_with_siso_diversity = true \
+
+struct iwl_cfg iwl200_bg_cfg = {
+ .name = "200 Series 1x1 BG",
+ IWL_DEVICE_200,
+};
+
+struct iwl_cfg iwl200_bgn_cfg = {
+ .name = "200 Series 1x1 BGN",
+ IWL_DEVICE_200,
+ .ht_params = &iwl2000_ht_params,
+};
+
+#define IWL_DEVICE_230 \
+ .fw_name_pre = IWL200_FW_PRE, \
+ .ucode_api_max = IWL200_UCODE_API_MAX, \
+ .ucode_api_min = IWL200_UCODE_API_MIN, \
+ .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
+ .ops = &iwl230_ops, \
+ .mod_params = &iwlagn_mod_params, \
+ .base_params = &iwl2030_base_params, \
+ .bt_params = &iwl2030_bt_params, \
+ .need_dc_calib = true, \
+ .need_temp_offset_calib = true, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .adv_pm = true, \
+ .rx_with_siso_diversity = true \
+
+struct iwl_cfg iwl230_bg_cfg = {
+ .name = "200 Series 1x1 BG/BT",
+ IWL_DEVICE_230,
+};
+
+struct iwl_cfg iwl230_bgn_cfg = {
+ .name = "200 Series 1x1 BGN/BT",
+ IWL_DEVICE_230,
+ .ht_params = &iwl2000_ht_params,
+};
+
+MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL200_MODULE_FIRMWARE(IWL200_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 79ab0a6b1386..3ea31b659d1a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -51,7 +51,7 @@
#include "iwl-agn-debugfs.h"
/* Highest firmware API version supported */
-#define IWL5000_UCODE_API_MAX 2
+#define IWL5000_UCODE_API_MAX 5
#define IWL5150_UCODE_API_MAX 2
/* Lowest firmware API version supported */
@@ -402,8 +402,6 @@ static struct iwl_lib_ops iwl5000_lib = {
.bt_stats_read = iwl_ucode_bt_stats_read,
.reply_tx_error = iwl_reply_tx_error_read,
},
- .check_plcp_health = iwl_good_plcp_health,
- .check_ack_health = iwl_good_ack_health,
.txfifo_flush = iwlagn_txfifo_flush,
.dev_txfifo_flush = iwlagn_dev_txfifo_flush,
.tt_ops = {
@@ -471,8 +469,6 @@ static struct iwl_lib_ops iwl5150_lib = {
.bt_stats_read = iwl_ucode_bt_stats_read,
.reply_tx_error = iwl_reply_tx_error_read,
},
- .check_plcp_health = iwl_good_plcp_health,
- .check_ack_health = iwl_good_ack_health,
.txfifo_flush = iwlagn_txfifo_flush,
.dev_txfifo_flush = iwlagn_dev_txfifo_flush,
.tt_ops = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index ef36aff1bb43..a745b01c0ec1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -67,13 +67,13 @@
#define _IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE #api ".ucode"
#define IWL6050_MODULE_FIRMWARE(api) _IWL6050_MODULE_FIRMWARE(api)
-#define IWL6000G2A_FW_PRE "iwlwifi-6000g2a-"
-#define _IWL6000G2A_MODULE_FIRMWARE(api) IWL6000G2A_FW_PRE #api ".ucode"
-#define IWL6000G2A_MODULE_FIRMWARE(api) _IWL6000G2A_MODULE_FIRMWARE(api)
+#define IWL6005_FW_PRE "iwlwifi-6000g2a-"
+#define _IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE #api ".ucode"
+#define IWL6005_MODULE_FIRMWARE(api) _IWL6005_MODULE_FIRMWARE(api)
-#define IWL6000G2B_FW_PRE "iwlwifi-6000g2b-"
-#define _IWL6000G2B_MODULE_FIRMWARE(api) IWL6000G2B_FW_PRE #api ".ucode"
-#define IWL6000G2B_MODULE_FIRMWARE(api) _IWL6000G2B_MODULE_FIRMWARE(api)
+#define IWL6030_FW_PRE "iwlwifi-6000g2b-"
+#define _IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE #api ".ucode"
+#define IWL6030_MODULE_FIRMWARE(api) _IWL6030_MODULE_FIRMWARE(api)
static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
{
@@ -90,7 +90,7 @@ static void iwl6050_additional_nic_config(struct iwl_priv *priv)
CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
}
-static void iwl6050g2_additional_nic_config(struct iwl_priv *priv)
+static void iwl6150_additional_nic_config(struct iwl_priv *priv)
{
/* Indicate calibration version to uCode. */
if (priv->cfg->ops->lib->eeprom_ops.calib_version(priv) >= 6)
@@ -343,8 +343,6 @@ static struct iwl_lib_ops iwl6000_lib = {
.bt_stats_read = iwl_ucode_bt_stats_read,
.reply_tx_error = iwl_reply_tx_error_read,
},
- .check_plcp_health = iwl_good_plcp_health,
- .check_ack_health = iwl_good_ack_health,
.txfifo_flush = iwlagn_txfifo_flush,
.dev_txfifo_flush = iwlagn_dev_txfifo_flush,
.tt_ops = {
@@ -354,7 +352,7 @@ static struct iwl_lib_ops iwl6000_lib = {
}
};
-static struct iwl_lib_ops iwl6000g2b_lib = {
+static struct iwl_lib_ops iwl6030_lib = {
.set_hw_params = iwl6000_hw_set_hw_params,
.txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
.txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
@@ -415,8 +413,6 @@ static struct iwl_lib_ops iwl6000g2b_lib = {
.bt_stats_read = iwl_ucode_bt_stats_read,
.reply_tx_error = iwl_reply_tx_error_read,
},
- .check_plcp_health = iwl_good_plcp_health,
- .check_ack_health = iwl_good_ack_health,
.txfifo_flush = iwlagn_txfifo_flush,
.dev_txfifo_flush = iwlagn_dev_txfifo_flush,
.tt_ops = {
@@ -430,8 +426,8 @@ static struct iwl_nic_ops iwl6050_nic_ops = {
.additional_nic_config = &iwl6050_additional_nic_config,
};
-static struct iwl_nic_ops iwl6050g2_nic_ops = {
- .additional_nic_config = &iwl6050g2_additional_nic_config,
+static struct iwl_nic_ops iwl6150_nic_ops = {
+ .additional_nic_config = &iwl6150_additional_nic_config,
};
static const struct iwl_ops iwl6000_ops = {
@@ -451,17 +447,17 @@ static const struct iwl_ops iwl6050_ops = {
.ieee80211_ops = &iwlagn_hw_ops,
};
-static const struct iwl_ops iwl6050g2_ops = {
+static const struct iwl_ops iwl6150_ops = {
.lib = &iwl6000_lib,
.hcmd = &iwlagn_hcmd,
.utils = &iwlagn_hcmd_utils,
.led = &iwlagn_led_ops,
- .nic = &iwl6050g2_nic_ops,
+ .nic = &iwl6150_nic_ops,
.ieee80211_ops = &iwlagn_hw_ops,
};
-static const struct iwl_ops iwl6000g2b_ops = {
- .lib = &iwl6000g2b_lib,
+static const struct iwl_ops iwl6030_ops = {
+ .lib = &iwl6030_lib,
.hcmd = &iwlagn_bt_hcmd,
.utils = &iwlagn_hcmd_utils,
.led = &iwlagn_led_ops,
@@ -479,7 +475,6 @@ static struct iwl_base_params iwl6000_base_params = {
.shadow_ram_support = true,
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
- .supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
@@ -503,7 +498,6 @@ static struct iwl_base_params iwl6050_base_params = {
.shadow_ram_support = true,
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
- .supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
@@ -526,7 +520,6 @@ static struct iwl_base_params iwl6000_g2_base_params = {
.shadow_ram_support = true,
.led_compensation = 57,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
- .supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
@@ -555,11 +548,11 @@ static struct iwl_bt_params iwl6000_bt_params = {
};
#define IWL_DEVICE_6005 \
- .fw_name_pre = IWL6000G2A_FW_PRE, \
+ .fw_name_pre = IWL6005_FW_PRE, \
.ucode_api_max = IWL6000G2_UCODE_API_MAX, \
.ucode_api_min = IWL6000G2_UCODE_API_MIN, \
- .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, \
- .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION, \
+ .eeprom_ver = EEPROM_6005_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
.ops = &iwl6000_ops, \
.mod_params = &iwlagn_mod_params, \
.base_params = &iwl6000_g2_base_params, \
@@ -584,12 +577,12 @@ struct iwl_cfg iwl6005_2bg_cfg = {
};
#define IWL_DEVICE_6030 \
- .fw_name_pre = IWL6000G2B_FW_PRE, \
+ .fw_name_pre = IWL6030_FW_PRE, \
.ucode_api_max = IWL6000G2_UCODE_API_MAX, \
.ucode_api_min = IWL6000G2_UCODE_API_MIN, \
- .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, \
- .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION, \
- .ops = &iwl6000g2b_ops, \
+ .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
+ .ops = &iwl6030_ops, \
.mod_params = &iwlagn_mod_params, \
.base_params = &iwl6000_g2_base_params, \
.bt_params = &iwl6000_bt_params, \
@@ -708,9 +701,9 @@ struct iwl_cfg iwl6150_bgn_cfg = {
.fw_name_pre = IWL6050_FW_PRE,
.ucode_api_max = IWL6050_UCODE_API_MAX,
.ucode_api_min = IWL6050_UCODE_API_MIN,
- .eeprom_ver = EEPROM_6050G2_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_6050G2_TX_POWER_VERSION,
- .ops = &iwl6050g2_ops,
+ .eeprom_ver = EEPROM_6150_EEPROM_VERSION,
+ .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION,
+ .ops = &iwl6150_ops,
.mod_params = &iwlagn_mod_params,
.base_params = &iwl6050_base_params,
.ht_params = &iwl6000_ht_params,
@@ -736,5 +729,5 @@ struct iwl_cfg iwl6000_3agn_cfg = {
MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL6000G2A_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL6000G2B_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
index d16bb5ede014..9006293e740c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
@@ -631,8 +631,7 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv, void *resp)
}
spin_lock_irqsave(&priv->lock, flags);
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics) {
+ if (iwl_bt_statistics(priv)) {
rx_info = &(((struct iwl_bt_notif_statistics *)resp)->
rx.general.common);
ofdm = &(((struct iwl_bt_notif_statistics *)resp)->rx.ofdm);
@@ -897,8 +896,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
}
spin_lock_irqsave(&priv->lock, flags);
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics) {
+ if (iwl_bt_statistics(priv)) {
rx_info = &(((struct iwl_bt_notif_statistics *)stat_resp)->
rx.general.common);
} else {
@@ -913,8 +911,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
rxon_chnum = le16_to_cpu(ctx->staging.channel);
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics) {
+ if (iwl_bt_statistics(priv)) {
stat_band24 = !!(((struct iwl_bt_notif_statistics *)
stat_resp)->flag &
STATISTICS_REPLY_FLG_BAND_24G_MSK);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
index a6dbd8983dac..b500aaae53ec 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
@@ -39,8 +39,7 @@ static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
int p = 0;
u32 flag;
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics)
+ if (iwl_bt_statistics(priv))
flag = le32_to_cpu(priv->_agn.statistics_bt.flag);
else
flag = le32_to_cpu(priv->_agn.statistics.flag);
@@ -89,8 +88,7 @@ ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
* the last statistics notification from uCode
* might not reflect the current uCode activity
*/
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics) {
+ if (iwl_bt_statistics(priv)) {
ofdm = &priv->_agn.statistics_bt.rx.ofdm;
cck = &priv->_agn.statistics_bt.rx.cck;
general = &priv->_agn.statistics_bt.rx.general.common;
@@ -536,8 +534,7 @@ ssize_t iwl_ucode_tx_stats_read(struct file *file,
* the last statistics notification from uCode
* might not reflect the current uCode activity
*/
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics) {
+ if (iwl_bt_statistics(priv)) {
tx = &priv->_agn.statistics_bt.tx;
accum_tx = &priv->_agn.accum_statistics_bt.tx;
delta_tx = &priv->_agn.delta_statistics_bt.tx;
@@ -737,8 +734,7 @@ ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
* the last statistics notification from uCode
* might not reflect the current uCode activity
*/
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics) {
+ if (iwl_bt_statistics(priv)) {
general = &priv->_agn.statistics_bt.general.common;
dbg = &priv->_agn.statistics_bt.general.common.dbg;
div = &priv->_agn.statistics_bt.general.common.div;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
index 366340f3fb0f..41543ad4cb84 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
@@ -305,7 +305,11 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv)
cmd.slots[0].type = 0; /* BSS */
cmd.slots[1].type = 1; /* PAN */
- if (ctx_bss->vif && ctx_pan->vif) {
+ if (priv->_agn.hw_roc_channel) {
+ /* both contexts must be used for this to happen */
+ slot1 = priv->_agn.hw_roc_duration;
+ slot0 = IWL_MIN_SLOT_TIME;
+ } else if (ctx_bss->vif && ctx_pan->vif) {
int bcnint = ctx_pan->vif->bss_conf.beacon_int;
int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1;
@@ -330,12 +334,12 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv)
if (test_bit(STATUS_SCAN_HW, &priv->status) ||
(!ctx_bss->vif->bss_conf.idle &&
!ctx_bss->vif->bss_conf.assoc)) {
- slot0 = dtim * bcnint * 3 - 20;
- slot1 = 20;
+ slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
+ slot1 = IWL_MIN_SLOT_TIME;
} else if (!ctx_pan->vif->bss_conf.idle &&
!ctx_pan->vif->bss_conf.assoc) {
- slot1 = bcnint * 3 - 20;
- slot0 = 20;
+ slot1 = bcnint * 3 - IWL_MIN_SLOT_TIME;
+ slot0 = IWL_MIN_SLOT_TIME;
}
} else if (ctx_pan->vif) {
slot0 = 0;
@@ -344,8 +348,8 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv)
slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1);
if (test_bit(STATUS_SCAN_HW, &priv->status)) {
- slot0 = slot1 * 3 - 20;
- slot1 = 20;
+ slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME;
+ slot1 = IWL_MIN_SLOT_TIME;
}
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-led.c b/drivers/net/wireless/iwlwifi/iwl-agn-led.c
index 1a24946bc203..c1190d965614 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-led.c
@@ -63,23 +63,11 @@ static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
}
/* Set led register off */
-static int iwl_led_on_reg(struct iwl_priv *priv)
+void iwlagn_led_enable(struct iwl_priv *priv)
{
- IWL_DEBUG_LED(priv, "led on\n");
iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
- return 0;
-}
-
-/* Set led register off */
-static int iwl_led_off_reg(struct iwl_priv *priv)
-{
- IWL_DEBUG_LED(priv, "LED Reg off\n");
- iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_OFF);
- return 0;
}
const struct iwl_led_ops iwlagn_led_ops = {
.cmd = iwl_send_led_cmd,
- .on = iwl_led_on_reg,
- .off = iwl_led_off_reg,
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-led.h b/drivers/net/wireless/iwlwifi/iwl-agn-led.h
index a594e4fdc6b8..96f323dc5dd6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-led.h
@@ -28,5 +28,6 @@
#define __iwl_agn_led_h__
extern const struct iwl_led_ops iwlagn_led_ops;
+void iwlagn_led_enable(struct iwl_priv *priv);
#endif /* __iwl_agn_led_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 3dee87e8f55d..fd142bee9189 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -473,6 +473,11 @@ void iwlagn_rx_handler_setup(struct iwl_priv *priv)
priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] =
iwlagn_rx_calib_complete;
priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
+
+ /* set up notification wait support */
+ spin_lock_init(&priv->_agn.notif_wait_lock);
+ INIT_LIST_HEAD(&priv->_agn.notif_waits);
+ init_waitqueue_head(&priv->_agn.notif_waitq);
}
void iwlagn_setup_deferred_work(struct iwl_priv *priv)
@@ -604,6 +609,7 @@ const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv,
struct iwl_mod_params iwlagn_mod_params = {
.amsdu_size_8K = 1,
.restart_fw = 1,
+ .plcp_check = true,
/* the rest are 0 by default */
};
@@ -1157,17 +1163,18 @@ void iwlagn_rx_reply_rx(struct iwl_priv *priv,
/* rx_status carries information about the packet to mac80211 */
rx_status.mactime = le64_to_cpu(phy_res->timestamp);
- rx_status.freq =
- ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel));
rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ rx_status.freq =
+ ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
+ rx_status.band);
rx_status.rate_idx =
iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
rx_status.flag = 0;
/* TSF isn't reliable. In order to allow smooth user experience,
* this W/A doesn't propagate it to the mac80211 */
- /*rx_status.flag |= RX_FLAG_TSFT;*/
+ /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
@@ -1389,15 +1396,12 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
u32 extra;
u32 suspend_time = 100;
u32 scan_suspend_time = 100;
- unsigned long flags;
IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
- spin_lock_irqsave(&priv->lock, flags);
if (priv->is_internal_short_scan)
interval = 0;
else
interval = vif->bss_conf.beacon_int;
- spin_unlock_irqrestore(&priv->lock, flags);
scan->suspend_time = 0;
scan->max_out_time = cpu_to_le32(200 * 1024);
@@ -1801,26 +1805,39 @@ static const __le32 iwlagn_concurrent_lookup[12] = {
void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
{
- struct iwlagn_bt_cmd bt_cmd = {
+ struct iwl_basic_bt_cmd basic = {
.max_kill = IWLAGN_BT_MAX_KILL_DEFAULT,
.bt3_timer_t7_value = IWLAGN_BT3_T7_DEFAULT,
.bt3_prio_sample_time = IWLAGN_BT3_PRIO_SAMPLE_DEFAULT,
.bt3_timer_t2_value = IWLAGN_BT3_T2_DEFAULT,
};
+ struct iwl6000_bt_cmd bt_cmd_6000;
+ struct iwl2000_bt_cmd bt_cmd_2000;
+ int ret;
BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) !=
- sizeof(bt_cmd.bt3_lookup_table));
-
- if (priv->cfg->bt_params)
- bt_cmd.prio_boost = priv->cfg->bt_params->bt_prio_boost;
- else
- bt_cmd.prio_boost = 0;
- bt_cmd.kill_ack_mask = priv->kill_ack_mask;
- bt_cmd.kill_cts_mask = priv->kill_cts_mask;
+ sizeof(basic.bt3_lookup_table));
+
+ if (priv->cfg->bt_params) {
+ if (priv->cfg->bt_params->bt_session_2) {
+ bt_cmd_2000.prio_boost = cpu_to_le32(
+ priv->cfg->bt_params->bt_prio_boost);
+ bt_cmd_2000.tx_prio_boost = 0;
+ bt_cmd_2000.rx_prio_boost = 0;
+ } else {
+ bt_cmd_6000.prio_boost =
+ priv->cfg->bt_params->bt_prio_boost;
+ bt_cmd_6000.tx_prio_boost = 0;
+ bt_cmd_6000.rx_prio_boost = 0;
+ }
+ } else {
+ IWL_ERR(priv, "failed to construct BT Coex Config\n");
+ return;
+ }
- bt_cmd.valid = priv->bt_valid;
- bt_cmd.tx_prio_boost = 0;
- bt_cmd.rx_prio_boost = 0;
+ basic.kill_ack_mask = priv->kill_ack_mask;
+ basic.kill_cts_mask = priv->kill_cts_mask;
+ basic.valid = priv->bt_valid;
/*
* Configure BT coex mode to "no coexistence" when the
@@ -1829,49 +1846,45 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
* IBSS mode (no proper uCode support for coex then).
*/
if (!bt_coex_active || priv->iw_mode == NL80211_IFTYPE_ADHOC) {
- bt_cmd.flags = 0;
+ basic.flags = IWLAGN_BT_FLAG_COEX_MODE_DISABLED;
} else {
- bt_cmd.flags = IWLAGN_BT_FLAG_COEX_MODE_3W <<
+ basic.flags = IWLAGN_BT_FLAG_COEX_MODE_3W <<
IWLAGN_BT_FLAG_COEX_MODE_SHIFT;
if (priv->cfg->bt_params &&
priv->cfg->bt_params->bt_sco_disable)
- bt_cmd.flags |= IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE;
+ basic.flags |= IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE;
if (priv->bt_ch_announce)
- bt_cmd.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION;
- IWL_DEBUG_INFO(priv, "BT coex flag: 0X%x\n", bt_cmd.flags);
+ basic.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION;
+ IWL_DEBUG_INFO(priv, "BT coex flag: 0X%x\n", basic.flags);
}
- priv->bt_enable_flag = bt_cmd.flags;
+ priv->bt_enable_flag = basic.flags;
if (priv->bt_full_concurrent)
- memcpy(bt_cmd.bt3_lookup_table, iwlagn_concurrent_lookup,
+ memcpy(basic.bt3_lookup_table, iwlagn_concurrent_lookup,
sizeof(iwlagn_concurrent_lookup));
else
- memcpy(bt_cmd.bt3_lookup_table, iwlagn_def_3w_lookup,
+ memcpy(basic.bt3_lookup_table, iwlagn_def_3w_lookup,
sizeof(iwlagn_def_3w_lookup));
IWL_DEBUG_INFO(priv, "BT coex %s in %s mode\n",
- bt_cmd.flags ? "active" : "disabled",
+ basic.flags ? "active" : "disabled",
priv->bt_full_concurrent ?
"full concurrency" : "3-wire");
- if (iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG, sizeof(bt_cmd), &bt_cmd))
+ if (priv->cfg->bt_params->bt_session_2) {
+ memcpy(&bt_cmd_2000.basic, &basic,
+ sizeof(basic));
+ ret = iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
+ sizeof(bt_cmd_2000), &bt_cmd_2000);
+ } else {
+ memcpy(&bt_cmd_6000.basic, &basic,
+ sizeof(basic));
+ ret = iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
+ sizeof(bt_cmd_6000), &bt_cmd_6000);
+ }
+ if (ret)
IWL_ERR(priv, "failed to send BT Coex Config\n");
- /*
- * When we are doing a restart, need to also reconfigure BT
- * SCO to the device. If not doing a restart, bt_sco_active
- * will always be false, so there's no need to have an extra
- * variable to check for it.
- */
- if (priv->bt_sco_active) {
- struct iwlagn_bt_sco_cmd sco_cmd = { .flags = 0 };
-
- if (priv->bt_sco_active)
- sco_cmd.flags |= IWLAGN_BT_SCO_ACTIVE;
- if (iwl_send_cmd_pdu(priv, REPLY_BT_COEX_SCO,
- sizeof(sco_cmd), &sco_cmd))
- IWL_ERR(priv, "failed to send BT SCO command\n");
- }
}
static void iwlagn_bt_traffic_change_work(struct work_struct *work)
@@ -1881,6 +1894,11 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
struct iwl_rxon_context *ctx;
int smps_request = -1;
+ if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
+ /* bt coex disabled */
+ return;
+ }
+
/*
* Note: bt_traffic_load can be overridden by scan complete and
* coex profile notifications. Ignore that since only bad consequence
@@ -1991,12 +2009,14 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv,
(BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >>
BT_UART_MSG_FRAME6DISCOVERABLE_POS);
- IWL_DEBUG_NOTIF(priv, "Sniff Activity = 0x%X, Inquiry/Page SR Mode = "
- "0x%X, Connectable = 0x%X",
+ IWL_DEBUG_NOTIF(priv, "Sniff Activity = 0x%X, Page = "
+ "0x%X, Inquiry = 0x%X, Connectable = 0x%X",
(BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >>
BT_UART_MSG_FRAME7SNIFFACTIVITY_POS,
- (BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_MSK & uart_msg->frame7) >>
- BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_POS,
+ (BT_UART_MSG_FRAME7PAGE_MSK & uart_msg->frame7) >>
+ BT_UART_MSG_FRAME7PAGE_POS,
+ (BT_UART_MSG_FRAME7INQUIRY_MSK & uart_msg->frame7) >>
+ BT_UART_MSG_FRAME7INQUIRY_POS,
(BT_UART_MSG_FRAME7CONNECTABLE_MSK & uart_msg->frame7) >>
BT_UART_MSG_FRAME7CONNECTABLE_POS);
}
@@ -2032,9 +2052,13 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
unsigned long flags;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_bt_coex_profile_notif *coex = &pkt->u.bt_coex_profile_notif;
- struct iwlagn_bt_sco_cmd sco_cmd = { .flags = 0 };
struct iwl_bt_uart_msg *uart_msg = &coex->last_bt_uart_msg;
+ if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
+ /* bt coex disabled */
+ return;
+ }
+
IWL_DEBUG_NOTIF(priv, "BT Coex notification:\n");
IWL_DEBUG_NOTIF(priv, " status: %d\n", coex->bt_status);
IWL_DEBUG_NOTIF(priv, " traffic load: %d\n", coex->bt_traffic_load);
@@ -2063,15 +2087,6 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
queue_work(priv->workqueue,
&priv->bt_traffic_change_work);
}
- if (priv->bt_sco_active !=
- (uart_msg->frame3 & BT_UART_MSG_FRAME3SCOESCO_MSK)) {
- priv->bt_sco_active = uart_msg->frame3 &
- BT_UART_MSG_FRAME3SCOESCO_MSK;
- if (priv->bt_sco_active)
- sco_cmd.flags |= IWLAGN_BT_SCO_ACTIVE;
- iwl_send_cmd_pdu_async(priv, REPLY_BT_COEX_SCO,
- sizeof(sco_cmd), &sco_cmd, NULL);
- }
}
iwlagn_set_kill_msk(priv, uart_msg);
@@ -2389,3 +2404,44 @@ int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
}
return 0;
}
+
+/* notification wait support */
+void iwlagn_init_notification_wait(struct iwl_priv *priv,
+ struct iwl_notification_wait *wait_entry,
+ void (*fn)(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt),
+ u8 cmd)
+{
+ wait_entry->fn = fn;
+ wait_entry->cmd = cmd;
+ wait_entry->triggered = false;
+
+ spin_lock_bh(&priv->_agn.notif_wait_lock);
+ list_add(&wait_entry->list, &priv->_agn.notif_waits);
+ spin_unlock_bh(&priv->_agn.notif_wait_lock);
+}
+
+signed long iwlagn_wait_notification(struct iwl_priv *priv,
+ struct iwl_notification_wait *wait_entry,
+ unsigned long timeout)
+{
+ int ret;
+
+ ret = wait_event_timeout(priv->_agn.notif_waitq,
+ &wait_entry->triggered,
+ timeout);
+
+ spin_lock_bh(&priv->_agn.notif_wait_lock);
+ list_del(&wait_entry->list);
+ spin_unlock_bh(&priv->_agn.notif_wait_lock);
+
+ return ret;
+}
+
+void iwlagn_remove_notification(struct iwl_priv *priv,
+ struct iwl_notification_wait *wait_entry)
+{
+ spin_lock_bh(&priv->_agn.notif_wait_lock);
+ list_del(&wait_entry->list);
+ spin_unlock_bh(&priv->_agn.notif_wait_lock);
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 75fcd30a7c13..d03b4734c892 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -179,31 +179,31 @@ static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
};
static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */
- {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */
- {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */
- {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
+ {0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202}, /* Norm */
+ {0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210}, /* SGI */
+ {0, 0, 0, 0, 47, 0, 91, 133, 171, 242, 305, 334, 362}, /* AGG */
+ {0, 0, 0, 0, 52, 0, 101, 145, 187, 264, 330, 361, 390}, /* AGG+SGI */
};
static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
{0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
- {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
- {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
+ {0, 0, 0, 0, 94, 0, 177, 249, 313, 423, 512, 550, 586}, /* AGG */
+ {0, 0, 0, 0, 104, 0, 193, 270, 338, 454, 545, 584, 620}, /* AGG+SGI */
};
static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
- {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
- {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
- {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/
+ {0, 0, 0, 0, 74, 0, 123, 155, 179, 214, 236, 244, 251}, /* Norm */
+ {0, 0, 0, 0, 81, 0, 131, 164, 188, 223, 243, 251, 257}, /* SGI */
+ {0, 0, 0, 0, 89, 0, 167, 235, 296, 402, 488, 526, 560}, /* AGG */
+ {0, 0, 0, 0, 97, 0, 182, 255, 320, 431, 520, 558, 593}, /* AGG+SGI*/
};
static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
{0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
- {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
- {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
+ {0, 0, 0, 0, 171, 0, 305, 410, 496, 634, 731, 771, 805}, /* AGG */
+ {0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */
};
static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
@@ -2890,6 +2890,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
u8 ant_toggle_cnt = 0;
u8 use_ht_possible = 1;
u8 valid_tx_ant = 0;
+ struct iwl_station_priv *sta_priv =
+ container_of(lq_sta, struct iwl_station_priv, lq_sta);
struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
/* Override starting rate (index 0) if needed for debug purposes */
@@ -3008,7 +3010,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
repeat_rate--;
}
- lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+ lq_cmd->agg_params.agg_frame_cnt_limit =
+ sta_priv->max_agg_bufsize ?: LINK_QUAL_AGG_FRAME_LIMIT_DEF;
lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
lq_cmd->agg_params.agg_time_limit =
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index 75e50d33ecb3..184828c72b31 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -213,6 +213,7 @@ enum {
IWL_CCK_BASIC_RATES_MASK)
#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
+#define IWL_RATES_MASK_3945 ((1 << IWL_RATE_COUNT_3945) - 1)
#define IWL_INVALID_VALUE -1
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index 6d140bd53291..dfdbea6e8f99 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -52,10 +52,14 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
struct iwl_rxon_cmd *send)
{
+ struct iwl_notification_wait disable_wait;
__le32 old_filter = send->filter_flags;
u8 old_dev_type = send->dev_type;
int ret;
+ iwlagn_init_notification_wait(priv, &disable_wait, NULL,
+ REPLY_WIPAN_DEACTIVATION_COMPLETE);
+
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
send->dev_type = RXON_DEV_TYPE_P2P;
ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, sizeof(*send), send);
@@ -63,11 +67,18 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
send->filter_flags = old_filter;
send->dev_type = old_dev_type;
- if (ret)
+ if (ret) {
IWL_ERR(priv, "Error disabling PAN (%d)\n", ret);
-
- /* FIXME: WAIT FOR PAN DISABLE */
- msleep(300);
+ iwlagn_remove_notification(priv, &disable_wait);
+ } else {
+ signed long wait_res;
+
+ wait_res = iwlagn_wait_notification(priv, &disable_wait, HZ);
+ if (wait_res == 0) {
+ IWL_ERR(priv, "Timed out waiting for PAN disable\n");
+ ret = -EIO;
+ }
+ }
return ret;
}
@@ -145,6 +156,23 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
/* always get timestamp with Rx frame */
ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
+ if (ctx->ctxid == IWL_RXON_CTX_PAN && priv->_agn.hw_roc_channel) {
+ struct ieee80211_channel *chan = priv->_agn.hw_roc_channel;
+
+ iwl_set_rxon_channel(priv, chan, ctx);
+ iwl_set_flags_for_band(priv, ctx, chan->band, NULL);
+ ctx->staging.filter_flags |=
+ RXON_FILTER_ASSOC_MSK |
+ RXON_FILTER_PROMISC_MSK |
+ RXON_FILTER_CTL2HOST_MSK;
+ ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
+ new_assoc = true;
+
+ if (memcmp(&ctx->staging, &ctx->active,
+ sizeof(ctx->staging)) == 0)
+ return 0;
+ }
+
if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
@@ -288,10 +316,9 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
* If we issue a new RXON command which required a tune then we must
* send a new TXPOWER command or we won't be able to Tx any frames.
*
- * FIXME: which RXON requires a tune? Can we optimise this out in
- * some cases?
+ * It's expected we set power here if channel is changing.
*/
- ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
+ ret = iwl_set_tx_power(priv, priv->tx_power_next, true);
if (ret) {
IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
return ret;
@@ -444,6 +471,7 @@ static void iwlagn_check_needed_chains(struct iwl_priv *priv,
struct iwl_rxon_context *tmp;
struct ieee80211_sta *sta;
struct iwl_ht_config *ht_conf = &priv->current_ht_config;
+ struct ieee80211_sta_ht_cap *ht_cap;
bool need_multiple;
lockdep_assert_held(&priv->mutex);
@@ -452,23 +480,7 @@ static void iwlagn_check_needed_chains(struct iwl_priv *priv,
case NL80211_IFTYPE_STATION:
rcu_read_lock();
sta = ieee80211_find_sta(vif, bss_conf->bssid);
- if (sta) {
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- int maxstreams;
-
- maxstreams = (ht_cap->mcs.tx_params &
- IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
- >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
- maxstreams += 1;
-
- need_multiple = true;
-
- if ((ht_cap->mcs.rx_mask[1] == 0) &&
- (ht_cap->mcs.rx_mask[2] == 0))
- need_multiple = false;
- if (maxstreams <= 1)
- need_multiple = false;
- } else {
+ if (!sta) {
/*
* If at all, this can only happen through a race
* when the AP disconnects us while we're still
@@ -476,7 +488,46 @@ static void iwlagn_check_needed_chains(struct iwl_priv *priv,
* will soon tell us about that.
*/
need_multiple = false;
+ rcu_read_unlock();
+ break;
+ }
+
+ ht_cap = &sta->ht_cap;
+
+ need_multiple = true;
+
+ /*
+ * If the peer advertises no support for receiving 2 and 3
+ * stream MCS rates, it can't be transmitting them either.
+ */
+ if (ht_cap->mcs.rx_mask[1] == 0 &&
+ ht_cap->mcs.rx_mask[2] == 0) {
+ need_multiple = false;
+ } else if (!(ht_cap->mcs.tx_params &
+ IEEE80211_HT_MCS_TX_DEFINED)) {
+ /* If it can't TX MCS at all ... */
+ need_multiple = false;
+ } else if (ht_cap->mcs.tx_params &
+ IEEE80211_HT_MCS_TX_RX_DIFF) {
+ int maxstreams;
+
+ /*
+ * But if it can receive them, it might still not
+ * be able to transmit them, which is what we need
+ * to check here -- so check the number of streams
+ * it advertises for TX (if different from RX).
+ */
+
+ maxstreams = (ht_cap->mcs.tx_params &
+ IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK);
+ maxstreams >>=
+ IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
+ maxstreams += 1;
+
+ if (maxstreams <= 1)
+ need_multiple = false;
}
+
rcu_read_unlock();
break;
case NL80211_IFTYPE_ADHOC:
@@ -546,12 +597,10 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
if (changes & BSS_CHANGED_ASSOC) {
if (bss_conf->assoc) {
- iwl_led_associate(priv);
priv->timestamp = bss_conf->timestamp;
ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
} else {
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl_led_disassociate(priv);
}
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 24a11b8f73bc..a709d05c5868 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -539,7 +539,14 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
unsigned long flags;
bool is_agg = false;
- if (info->control.vif)
+ /*
+ * If the frame needs to go out off-channel, then
+ * we'll have put the PAN context to that channel,
+ * so make the frame go out there.
+ */
+ if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
+ ctx = &priv->contexts[IWL_RXON_CTX_PAN];
+ else if (info->control.vif)
ctx = iwl_rxon_ctx_from_vif(info->control.vif);
spin_lock_irqsave(&priv->lock, flags);
@@ -940,7 +947,7 @@ void iwlagn_txq_ctx_reset(struct iwl_priv *priv)
*/
void iwlagn_txq_ctx_stop(struct iwl_priv *priv)
{
- int ch;
+ int ch, txq_id;
unsigned long flags;
/* Turn off all Tx DMA fifos */
@@ -959,6 +966,16 @@ void iwlagn_txq_ctx_stop(struct iwl_priv *priv)
iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG));
}
spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (!priv->txq)
+ return;
+
+ /* Unmap DMA from host system and free skb's */
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
+ if (txq_id == priv->cmd_queue)
+ iwl_cmd_queue_unmap(priv);
+ else
+ iwl_tx_queue_unmap(priv, txq_id);
}
/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
index 24dabcd2a36c..d807e5e2b718 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -308,14 +308,6 @@ void iwlagn_init_alive_start(struct iwl_priv *priv)
{
int ret = 0;
- /* Check alive response for "valid" sign from uCode */
- if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
- /* We had an error bringing up the hardware, so take it
- * all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
- goto restart;
- }
-
/* initialize uCode was loaded... verify inst image.
* This is a paranoid check, because we would not have gotten the
* "initialize" alive if code weren't properly loaded. */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index c1cfd9952e52..f189bbe78fa6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -59,6 +59,7 @@
#include "iwl-sta.h"
#include "iwl-agn-calib.h"
#include "iwl-agn.h"
+#include "iwl-agn-led.h"
/******************************************************************************
@@ -85,7 +86,6 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_VERSION(DRV_VERSION);
MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
MODULE_LICENSE("GPL");
-MODULE_ALIAS("iwl4965");
static int iwlagn_ant_coupling;
static bool iwlagn_bt_ch_announce = 1;
@@ -461,8 +461,21 @@ static void iwl_rx_reply_alive(struct iwl_priv *priv,
if (palive->is_valid == UCODE_VALID_OK)
queue_delayed_work(priv->workqueue, pwork,
msecs_to_jiffies(5));
- else
- IWL_WARN(priv, "uCode did not respond OK.\n");
+ else {
+ IWL_WARN(priv, "%s uCode did not respond OK.\n",
+ (palive->ver_subtype == INITIALIZE_SUBTYPE) ?
+ "init" : "runtime");
+ /*
+ * If fail to load init uCode,
+ * let's try to load the init uCode again.
+ * We should not get into this situation, but if it
+ * does happen, we should not move on and loading "runtime"
+ * without proper calibrate the device.
+ */
+ if (palive->ver_subtype == INITIALIZE_SUBTYPE)
+ priv->ucode_type = UCODE_NONE;
+ queue_work(priv->workqueue, &priv->restart);
+ }
}
static void iwl_bg_beacon_update(struct work_struct *work)
@@ -699,18 +712,18 @@ static void iwl_bg_ucode_trace(unsigned long data)
}
}
-static void iwl_rx_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+static void iwlagn_rx_beacon_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl4965_beacon_notif *beacon =
- (struct iwl4965_beacon_notif *)pkt->u.raw;
+ struct iwlagn_beacon_notif *beacon = (void *)pkt->u.raw;
#ifdef CONFIG_IWLWIFI_DEBUG
+ u16 status = le16_to_cpu(beacon->beacon_notify_hdr.status.status);
u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
- IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
- "tsf %d %d rate %d\n",
- le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
+ IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
+ "tsf:0x%.8x%.8x rate:%d\n",
+ status & TX_STATUS_MSK,
beacon->beacon_notify_hdr.failure_frame,
le32_to_cpu(beacon->ibss_mgr_status),
le32_to_cpu(beacon->high_tsf),
@@ -813,7 +826,7 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv)
priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
iwl_rx_pm_debug_statistics_notif;
- priv->rx_handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif;
+ priv->rx_handlers[BEACON_NOTIFICATION] = iwlagn_rx_beacon_notif;
/*
* The same handler is used for both the REPLY to a discrete
@@ -846,7 +859,7 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv)
* the appropriate handlers, including command responses,
* frame-received notifications, and other notifications.
*/
-void iwl_rx_handle(struct iwl_priv *priv)
+static void iwl_rx_handle(struct iwl_priv *priv)
{
struct iwl_rx_mem_buffer *rxb;
struct iwl_rx_packet *pkt;
@@ -910,6 +923,27 @@ void iwl_rx_handle(struct iwl_priv *priv)
(pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
(pkt->hdr.cmd != REPLY_TX);
+ /*
+ * Do the notification wait before RX handlers so
+ * even if the RX handler consumes the RXB we have
+ * access to it in the notification wait entry.
+ */
+ if (!list_empty(&priv->_agn.notif_waits)) {
+ struct iwl_notification_wait *w;
+
+ spin_lock(&priv->_agn.notif_wait_lock);
+ list_for_each_entry(w, &priv->_agn.notif_waits, list) {
+ if (w->cmd == pkt->hdr.cmd) {
+ w->triggered = true;
+ if (w->fn)
+ w->fn(priv, pkt);
+ }
+ }
+ spin_unlock(&priv->_agn.notif_wait_lock);
+
+ wake_up_all(&priv->_agn.notif_waitq);
+ }
+
/* Based on type of command response or notification,
* handle those that need handling via function in
* rx_handlers table. See iwl_setup_rx_handlers() */
@@ -1379,66 +1413,6 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
iwl_enable_rfkill_int(priv);
}
-/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
-#define ACK_CNT_RATIO (50)
-#define BA_TIMEOUT_CNT (5)
-#define BA_TIMEOUT_MAX (16)
-
-/**
- * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
- *
- * When the ACK count ratio is 0 and aggregated BA timeout retries exceeding
- * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
- * operation state.
- */
-bool iwl_good_ack_health(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt)
-{
- bool rc = true;
- int actual_ack_cnt_delta, expected_ack_cnt_delta;
- int ba_timeout_delta;
-
- actual_ack_cnt_delta =
- le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) -
- le32_to_cpu(priv->_agn.statistics.tx.actual_ack_cnt);
- expected_ack_cnt_delta =
- le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) -
- le32_to_cpu(priv->_agn.statistics.tx.expected_ack_cnt);
- ba_timeout_delta =
- le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) -
- le32_to_cpu(priv->_agn.statistics.tx.agg.ba_timeout);
- if ((priv->_agn.agg_tids_count > 0) &&
- (expected_ack_cnt_delta > 0) &&
- (((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta)
- < ACK_CNT_RATIO) &&
- (ba_timeout_delta > BA_TIMEOUT_CNT)) {
- IWL_DEBUG_RADIO(priv, "actual_ack_cnt delta = %d,"
- " expected_ack_cnt = %d\n",
- actual_ack_cnt_delta, expected_ack_cnt_delta);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- /*
- * This is ifdef'ed on DEBUGFS because otherwise the
- * statistics aren't available. If DEBUGFS is set but
- * DEBUG is not, these will just compile out.
- */
- IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta = %d\n",
- priv->_agn.delta_statistics.tx.rx_detected_cnt);
- IWL_DEBUG_RADIO(priv,
- "ack_or_ba_timeout_collision delta = %d\n",
- priv->_agn.delta_statistics.tx.
- ack_or_ba_timeout_collision);
-#endif
- IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n",
- ba_timeout_delta);
- if (!actual_ack_cnt_delta &&
- (ba_timeout_delta >= BA_TIMEOUT_MAX))
- rc = false;
- }
- return rc;
-}
-
-
/*****************************************************************************
*
* sysfs attributes
@@ -2632,13 +2606,6 @@ static void iwl_alive_start(struct iwl_priv *priv)
IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
- if (priv->card_alive.is_valid != UCODE_VALID_OK) {
- /* We had an error bringing up the hardware, so take it
- * all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Alive failed.\n");
- goto restart;
- }
-
/* Initialize uCode has loaded Runtime uCode ... verify inst image.
* This is a paranoid check, because we would not have gotten the
* "runtime" alive if code weren't properly loaded. */
@@ -2710,9 +2677,11 @@ static void iwl_alive_start(struct iwl_priv *priv)
priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
}
- if (priv->cfg->bt_params &&
- !priv->cfg->bt_params->advanced_bt_coexist) {
- /* Configure Bluetooth device coexistence support */
+ if (!priv->cfg->bt_params || (priv->cfg->bt_params &&
+ !priv->cfg->bt_params->advanced_bt_coexist)) {
+ /*
+ * default is 2-wire BT coexexistence support
+ */
priv->cfg->ops->hcmd->send_bt_config(priv);
}
@@ -2726,8 +2695,6 @@ static void iwl_alive_start(struct iwl_priv *priv)
/* At this point, the NIC is initialized and operational */
iwl_rf_kill_ct_config(priv);
- iwl_leds_init(priv);
-
IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
wake_up_interruptible(&priv->wait_command_queue);
@@ -2769,7 +2736,6 @@ static void __iwl_down(struct iwl_priv *priv)
priv->cfg->bt_params->bt_init_traffic_load;
else
priv->bt_traffic_load = 0;
- priv->bt_sco_active = false;
priv->bt_full_concurrent = false;
priv->bt_ci_compliance = 0;
@@ -3063,8 +3029,7 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
}
if (priv->start_calib) {
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics) {
+ if (iwl_bt_statistics(priv)) {
iwl_chain_noise_calibration(priv,
(void *)&priv->_agn.statistics_bt);
iwl_sensitivity_calibration(priv,
@@ -3089,7 +3054,7 @@ static void iwl_bg_restart(struct work_struct *data)
if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
struct iwl_rxon_context *ctx;
- bool bt_sco, bt_full_concurrent;
+ bool bt_full_concurrent;
u8 bt_ci_compliance;
u8 bt_load;
u8 bt_status;
@@ -3108,7 +3073,6 @@ static void iwl_bg_restart(struct work_struct *data)
* re-configure the hw when we reconfigure the BT
* command.
*/
- bt_sco = priv->bt_sco_active;
bt_full_concurrent = priv->bt_full_concurrent;
bt_ci_compliance = priv->bt_ci_compliance;
bt_load = priv->bt_traffic_load;
@@ -3116,7 +3080,6 @@ static void iwl_bg_restart(struct work_struct *data)
__iwl_down(priv);
- priv->bt_sco_active = bt_sco;
priv->bt_full_concurrent = bt_full_concurrent;
priv->bt_ci_compliance = bt_ci_compliance;
priv->bt_traffic_load = bt_load;
@@ -3178,6 +3141,8 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
IEEE80211_HW_SPECTRUM_MGMT |
IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+ hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+
if (!priv->cfg->base_params->broken_powersave)
hw->flags |= IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
@@ -3194,8 +3159,11 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
}
+ hw->wiphy->max_remain_on_channel_duration = 1000;
+
hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
- WIPHY_FLAG_DISABLE_BEACON_HINTS;
+ WIPHY_FLAG_DISABLE_BEACON_HINTS |
+ WIPHY_FLAG_IBSS_RSN;
/*
* For now, disable PS by default because it affects
@@ -3219,6 +3187,8 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
&priv->bands[IEEE80211_BAND_5GHZ];
+ iwl_leds_init(priv);
+
ret = ieee80211_register_hw(priv->hw);
if (ret) {
IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
@@ -3263,7 +3233,7 @@ int iwlagn_mac_start(struct ieee80211_hw *hw)
}
}
- iwl_led_start(priv);
+ iwlagn_led_enable(priv);
out:
priv->is_open = 1;
@@ -3294,7 +3264,7 @@ void iwlagn_mac_stop(struct ieee80211_hw *hw)
IWL_DEBUG_MAC80211(priv, "leave\n");
}
-int iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct iwl_priv *priv = hw->priv;
@@ -3307,7 +3277,6 @@ int iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
dev_kfree_skb_any(skb);
IWL_DEBUG_MACDUMP(priv, "leave\n");
- return NETDEV_TX_OK;
}
void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
@@ -3345,6 +3314,14 @@ int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -EOPNOTSUPP;
}
+ /*
+ * To support IBSS RSN, don't program group keys in IBSS, the
+ * hardware will then not attempt to decrypt the frames.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
sta_id = iwl_sta_id_or_broadcast(priv, vif_priv->ctx, sta);
if (sta_id == IWL_INVALID_STATION)
return -EINVAL;
@@ -3399,10 +3376,12 @@ int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
{
struct iwl_priv *priv = hw->priv;
int ret = -EINVAL;
+ struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
sta->addr, tid);
@@ -3457,11 +3436,28 @@ int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
}
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
+ /*
+ * If the limit is 0, then it wasn't initialised yet,
+ * use the default. We can do that since we take the
+ * minimum below, and we don't want to go above our
+ * default due to hardware restrictions.
+ */
+ if (sta_priv->max_agg_bufsize == 0)
+ sta_priv->max_agg_bufsize =
+ LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+
+ /*
+ * Even though in theory the peer could have different
+ * aggregation reorder buffer sizes for different sessions,
+ * our ucode doesn't allow for that and has a global limit
+ * for each station. Therefore, use the minimum of all the
+ * aggregation sessions and our default value.
+ */
+ sta_priv->max_agg_bufsize =
+ min(sta_priv->max_agg_bufsize, buf_size);
+
if (priv->cfg->ht_params &&
priv->cfg->ht_params->use_rts_for_aggregation) {
- struct iwl_station_priv *sta_priv =
- (void *) sta->drv_priv;
-
/*
* switch to RTS/CTS if it is the prefer protection
* method for HT traffic
@@ -3469,9 +3465,13 @@ int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
sta_priv->lq_sta.lq.general_params.flags |=
LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
- iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
- &sta_priv->lq_sta.lq, CMD_ASYNC, false);
}
+
+ sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
+ sta_priv->max_agg_bufsize;
+
+ iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
+ &sta_priv->lq_sta.lq, CMD_ASYNC, false);
ret = 0;
break;
}
@@ -3709,6 +3709,95 @@ done:
IWL_DEBUG_MAC80211(priv, "leave\n");
}
+static void iwlagn_disable_roc(struct iwl_priv *priv)
+{
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
+ struct ieee80211_channel *chan = ACCESS_ONCE(priv->hw->conf.channel);
+
+ lockdep_assert_held(&priv->mutex);
+
+ if (!ctx->is_active)
+ return;
+
+ ctx->staging.dev_type = RXON_DEV_TYPE_2STA;
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ iwl_set_rxon_channel(priv, chan, ctx);
+ iwl_set_flags_for_band(priv, ctx, chan->band, NULL);
+
+ priv->_agn.hw_roc_channel = NULL;
+
+ iwlcore_commit_rxon(priv, ctx);
+
+ ctx->is_active = false;
+}
+
+static void iwlagn_bg_roc_done(struct work_struct *work)
+{
+ struct iwl_priv *priv = container_of(work, struct iwl_priv,
+ _agn.hw_roc_work.work);
+
+ mutex_lock(&priv->mutex);
+ ieee80211_remain_on_channel_expired(priv->hw);
+ iwlagn_disable_roc(priv);
+ mutex_unlock(&priv->mutex);
+}
+
+static int iwl_mac_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_channel *channel,
+ enum nl80211_channel_type channel_type,
+ int duration)
+{
+ struct iwl_priv *priv = hw->priv;
+ int err = 0;
+
+ if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
+ return -EOPNOTSUPP;
+
+ if (!(priv->contexts[IWL_RXON_CTX_PAN].interface_modes &
+ BIT(NL80211_IFTYPE_P2P_CLIENT)))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&priv->mutex);
+
+ if (priv->contexts[IWL_RXON_CTX_PAN].is_active ||
+ test_bit(STATUS_SCAN_HW, &priv->status)) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ priv->contexts[IWL_RXON_CTX_PAN].is_active = true;
+ priv->_agn.hw_roc_channel = channel;
+ priv->_agn.hw_roc_chantype = channel_type;
+ priv->_agn.hw_roc_duration = DIV_ROUND_UP(duration * 1000, 1024);
+ iwlcore_commit_rxon(priv, &priv->contexts[IWL_RXON_CTX_PAN]);
+ queue_delayed_work(priv->workqueue, &priv->_agn.hw_roc_work,
+ msecs_to_jiffies(duration + 20));
+
+ msleep(IWL_MIN_SLOT_TIME); /* TU is almost ms */
+ ieee80211_ready_on_channel(priv->hw);
+
+ out:
+ mutex_unlock(&priv->mutex);
+
+ return err;
+}
+
+static int iwl_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
+ return -EOPNOTSUPP;
+
+ cancel_delayed_work_sync(&priv->_agn.hw_roc_work);
+
+ mutex_lock(&priv->mutex);
+ iwlagn_disable_roc(priv);
+ mutex_unlock(&priv->mutex);
+
+ return 0;
+}
+
/*****************************************************************************
*
* driver setup and teardown
@@ -3730,6 +3819,7 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config);
INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start);
INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start);
+ INIT_DELAYED_WORK(&priv->_agn.hw_roc_work, iwlagn_bg_roc_done);
iwl_setup_scan_deferred_work(priv);
@@ -3876,7 +3966,6 @@ static void iwl_uninit_drv(struct iwl_priv *priv)
kfree(priv->scan_cmd);
}
-#ifdef CONFIG_IWL5000
struct ieee80211_ops iwlagn_hw_ops = {
.tx = iwlagn_mac_tx,
.start = iwlagn_mac_start,
@@ -3898,14 +3987,15 @@ struct ieee80211_ops iwlagn_hw_ops = {
.channel_switch = iwlagn_mac_channel_switch,
.flush = iwlagn_mac_flush,
.tx_last_beacon = iwl_mac_tx_last_beacon,
+ .remain_on_channel = iwl_mac_remain_on_channel,
+ .cancel_remain_on_channel = iwl_mac_cancel_remain_on_channel,
};
-#endif
static void iwl_hw_detect(struct iwl_priv *priv)
{
priv->hw_rev = _iwl_read32(priv, CSR_HW_REV);
priv->hw_wa_rev = _iwl_read32(priv, CSR_HW_REV_WA_REG);
- pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id);
+ priv->rev_id = priv->pci_dev->revision;
IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id);
}
@@ -3967,12 +4057,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (cfg->mod_params->disable_hw_scan) {
dev_printk(KERN_DEBUG, &(pdev->dev),
"sw scan support is deprecated\n");
-#ifdef CONFIG_IWL5000
iwlagn_hw_ops.hw_scan = NULL;
-#endif
-#ifdef CONFIG_IWL4965
- iwl4965_hw_ops.hw_scan = NULL;
-#endif
}
hw = iwl_alloc_all(cfg);
@@ -4025,6 +4110,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
priv->contexts[IWL_RXON_CTX_PAN].mcast_queue = IWL_IPAN_MCAST_QUEUE;
priv->contexts[IWL_RXON_CTX_PAN].interface_modes =
BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP);
+#ifdef CONFIG_IWL_P2P
+ priv->contexts[IWL_RXON_CTX_PAN].interface_modes |=
+ BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
+#endif
priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP;
priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA;
priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P;
@@ -4272,6 +4361,9 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
* we need to set STATUS_EXIT_PENDING bit.
*/
set_bit(STATUS_EXIT_PENDING, &priv->status);
+
+ iwl_leds_exit(priv);
+
if (priv->mac80211_registered) {
ieee80211_unregister_hw(priv->hw);
priv->mac80211_registered = 0;
@@ -4344,12 +4436,6 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
/* Hardware specific file defines the PCI IDs table for that hardware module */
static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
-#ifdef CONFIG_IWL4965
- {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)},
- {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)},
-#endif /* CONFIG_IWL4965 */
-#ifdef CONFIG_IWL5000
-/* 5100 Series WiFi */
{IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */
{IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */
{IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */
@@ -4492,7 +4578,48 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)},
{IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)},
-#endif /* CONFIG_IWL5000 */
+/* 2x00 Series */
+ {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0890, 0x4026, iwl2000_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x0891, 0x4226, iwl2000_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x0890, 0x4426, iwl2000_2bg_cfg)},
+
+/* 2x30 Series */
+ {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0887, 0x4066, iwl2030_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x0888, 0x4266, iwl2030_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x0887, 0x4466, iwl2030_2bg_cfg)},
+
+/* 6x35 Series */
+ {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x4064, iwl6035_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x088F, 0x4264, iwl6035_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x4464, iwl6035_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x4066, iwl6035_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x088F, 0x4266, iwl6035_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x4466, iwl6035_2bg_cfg)},
+
+/* 200 Series */
+ {IWL_PCI_DEVICE(0x0894, 0x0022, iwl200_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0895, 0x0222, iwl200_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0894, 0x0422, iwl200_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0894, 0x0026, iwl200_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0895, 0x0226, iwl200_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0894, 0x0426, iwl200_bg_cfg)},
+
+/* 230 Series */
+ {IWL_PCI_DEVICE(0x0892, 0x0062, iwl230_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0893, 0x0262, iwl230_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0892, 0x0462, iwl230_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0892, 0x0066, iwl230_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0893, 0x0266, iwl230_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0892, 0x0466, iwl230_bg_cfg)},
{0}
};
@@ -4592,3 +4719,9 @@ MODULE_PARM_DESC(antenna_coupling,
module_param_named(bt_ch_inhibition, iwlagn_bt_ch_announce, bool, S_IRUGO);
MODULE_PARM_DESC(bt_ch_inhibition,
"Disable BT channel inhibition (default: enable)");
+
+module_param_named(plcp_check, iwlagn_mod_params.plcp_check, bool, S_IRUGO);
+MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
+
+module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO);
+MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index da303585f801..b5a169be48e2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -96,6 +96,17 @@ extern struct iwl_cfg iwl100_bgn_cfg;
extern struct iwl_cfg iwl100_bg_cfg;
extern struct iwl_cfg iwl130_bgn_cfg;
extern struct iwl_cfg iwl130_bg_cfg;
+extern struct iwl_cfg iwl2000_2bgn_cfg;
+extern struct iwl_cfg iwl2000_2bg_cfg;
+extern struct iwl_cfg iwl2030_2bgn_cfg;
+extern struct iwl_cfg iwl2030_2bg_cfg;
+extern struct iwl_cfg iwl6035_2agn_cfg;
+extern struct iwl_cfg iwl6035_2abg_cfg;
+extern struct iwl_cfg iwl6035_2bg_cfg;
+extern struct iwl_cfg iwl200_bg_cfg;
+extern struct iwl_cfg iwl200_bgn_cfg;
+extern struct iwl_cfg iwl230_bg_cfg;
+extern struct iwl_cfg iwl230_bgn_cfg;
extern struct iwl_mod_params iwlagn_mod_params;
extern struct iwl_hcmd_ops iwlagn_hcmd;
@@ -110,8 +121,6 @@ void iwl_disable_ict(struct iwl_priv *priv);
int iwl_alloc_isr_ict(struct iwl_priv *priv);
void iwl_free_isr_ict(struct iwl_priv *priv);
irqreturn_t iwl_isr_ict(int irq, void *data);
-bool iwl_good_ack_health(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt);
/* tx queue */
void iwlagn_set_wr_ptrs(struct iwl_priv *priv,
@@ -185,7 +194,6 @@ void iwlagn_rx_reply_rx(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
-void iwl_rx_handle(struct iwl_priv *priv);
/* tx */
void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
@@ -238,8 +246,6 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
/* rx */
void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
-bool iwl_good_plcp_health(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt);
void iwl_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwl_reply_statistics(struct iwl_priv *priv,
@@ -330,8 +336,23 @@ void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv);
void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv);
+/* notification wait support */
+void __acquires(wait_entry)
+iwlagn_init_notification_wait(struct iwl_priv *priv,
+ struct iwl_notification_wait *wait_entry,
+ void (*fn)(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt),
+ u8 cmd);
+signed long __releases(wait_entry)
+iwlagn_wait_notification(struct iwl_priv *priv,
+ struct iwl_notification_wait *wait_entry,
+ unsigned long timeout);
+void __releases(wait_entry)
+iwlagn_remove_notification(struct iwl_priv *priv,
+ struct iwl_notification_wait *wait_entry);
+
/* mac80211 handlers (for 4965) */
-int iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
int iwlagn_mac_start(struct ieee80211_hw *hw);
void iwlagn_mac_stop(struct ieee80211_hw *hw);
void iwlagn_configure_filter(struct ieee80211_hw *hw,
@@ -349,7 +370,8 @@ void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size);
int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index f893d4a6aa87..03cfb74da2bc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -178,7 +178,6 @@ enum {
REPLY_BT_COEX_PRIO_TABLE = 0xcc,
REPLY_BT_COEX_PROT_ENV = 0xcd,
REPLY_BT_COEX_PROFILE_NOTIF = 0xce,
- REPLY_BT_COEX_SCO = 0xcf,
/* PAN commands */
REPLY_WIPAN_PARAMS = 0xb2,
@@ -189,6 +188,7 @@ enum {
REPLY_WIPAN_WEPKEY = 0xb8, /* use REPLY_WEPKEY structure */
REPLY_WIPAN_P2P_CHANNEL_SWITCH = 0xb9,
REPLY_WIPAN_NOA_NOTIFICATION = 0xbc,
+ REPLY_WIPAN_DEACTIVATION_COMPLETE = 0xbd,
REPLY_MAX = 0xff
};
@@ -2477,7 +2477,7 @@ struct iwl_bt_cmd {
IWLAGN_BT_VALID_BT4_TIMES | \
IWLAGN_BT_VALID_3W_LUT)
-struct iwlagn_bt_cmd {
+struct iwl_basic_bt_cmd {
u8 flags;
u8 ledtime; /* unused */
u8 max_kill;
@@ -2490,6 +2490,10 @@ struct iwlagn_bt_cmd {
__le32 bt3_lookup_table[12];
__le16 bt4_decision_time; /* unused */
__le16 valid;
+};
+
+struct iwl6000_bt_cmd {
+ struct iwl_basic_bt_cmd basic;
u8 prio_boost;
/*
* set IWLAGN_BT_VALID_BOOST to "1" in "valid" bitmask
@@ -2499,6 +2503,18 @@ struct iwlagn_bt_cmd {
__le16 rx_prio_boost; /* SW boost of WiFi rx priority */
};
+struct iwl2000_bt_cmd {
+ struct iwl_basic_bt_cmd basic;
+ __le32 prio_boost;
+ /*
+ * set IWLAGN_BT_VALID_BOOST to "1" in "valid" bitmask
+ * if configure the following patterns
+ */
+ u8 reserved;
+ u8 tx_prio_boost; /* SW boost of WiFi tx priority */
+ __le16 rx_prio_boost; /* SW boost of WiFi rx priority */
+};
+
#define IWLAGN_BT_SCO_ACTIVE cpu_to_le32(BIT(0))
struct iwlagn_bt_sco_cmd {
@@ -3082,6 +3098,13 @@ struct iwl4965_beacon_notif {
__le32 ibss_mgr_status;
} __packed;
+struct iwlagn_beacon_notif {
+ struct iwlagn_tx_resp beacon_notify_hdr;
+ __le32 low_tsf;
+ __le32 high_tsf;
+ __le32 ibss_mgr_status;
+} __packed;
+
/*
* REPLY_TX_BEACON = 0x91 (command, has simple generic response)
*/
@@ -4143,6 +4166,10 @@ enum iwl_bt_coex_profile_traffic_load {
*/
};
+#define BT_SESSION_ACTIVITY_1_UART_MSG 0x1
+#define BT_SESSION_ACTIVITY_2_UART_MSG 0x2
+
+/* BT UART message - Share Part (BT -> WiFi) */
#define BT_UART_MSG_FRAME1MSGTYPE_POS (0)
#define BT_UART_MSG_FRAME1MSGTYPE_MSK \
(0x7 << BT_UART_MSG_FRAME1MSGTYPE_POS)
@@ -4227,9 +4254,12 @@ enum iwl_bt_coex_profile_traffic_load {
#define BT_UART_MSG_FRAME7SNIFFACTIVITY_POS (0)
#define BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK \
(0x7 << BT_UART_MSG_FRAME7SNIFFACTIVITY_POS)
-#define BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_POS (3)
-#define BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_MSK \
- (0x3 << BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_POS)
+#define BT_UART_MSG_FRAME7PAGE_POS (3)
+#define BT_UART_MSG_FRAME7PAGE_MSK \
+ (0x1 << BT_UART_MSG_FRAME7PAGE_POS)
+#define BT_UART_MSG_FRAME7INQUIRY_POS (4)
+#define BT_UART_MSG_FRAME7INQUIRY_MSK \
+ (0x1 << BT_UART_MSG_FRAME7INQUIRY_POS)
#define BT_UART_MSG_FRAME7CONNECTABLE_POS (5)
#define BT_UART_MSG_FRAME7CONNECTABLE_MSK \
(0x1 << BT_UART_MSG_FRAME7CONNECTABLE_POS)
@@ -4237,6 +4267,83 @@ enum iwl_bt_coex_profile_traffic_load {
#define BT_UART_MSG_FRAME7RESERVED_MSK \
(0x3 << BT_UART_MSG_FRAME7RESERVED_POS)
+/* BT Session Activity 2 UART message (BT -> WiFi) */
+#define BT_UART_MSG_2_FRAME1RESERVED1_POS (5)
+#define BT_UART_MSG_2_FRAME1RESERVED1_MSK \
+ (0x1<<BT_UART_MSG_2_FRAME1RESERVED1_POS)
+#define BT_UART_MSG_2_FRAME1RESERVED2_POS (6)
+#define BT_UART_MSG_2_FRAME1RESERVED2_MSK \
+ (0x3<<BT_UART_MSG_2_FRAME1RESERVED2_POS)
+
+#define BT_UART_MSG_2_FRAME2AGGTRAFFICLOAD_POS (0)
+#define BT_UART_MSG_2_FRAME2AGGTRAFFICLOAD_MSK \
+ (0x3F<<BT_UART_MSG_2_FRAME2AGGTRAFFICLOAD_POS)
+#define BT_UART_MSG_2_FRAME2RESERVED_POS (6)
+#define BT_UART_MSG_2_FRAME2RESERVED_MSK \
+ (0x3<<BT_UART_MSG_2_FRAME2RESERVED_POS)
+
+#define BT_UART_MSG_2_FRAME3BRLASTTXPOWER_POS (0)
+#define BT_UART_MSG_2_FRAME3BRLASTTXPOWER_MSK \
+ (0xF<<BT_UART_MSG_2_FRAME3BRLASTTXPOWER_POS)
+#define BT_UART_MSG_2_FRAME3INQPAGESRMODE_POS (4)
+#define BT_UART_MSG_2_FRAME3INQPAGESRMODE_MSK \
+ (0x1<<BT_UART_MSG_2_FRAME3INQPAGESRMODE_POS)
+#define BT_UART_MSG_2_FRAME3LEMASTER_POS (5)
+#define BT_UART_MSG_2_FRAME3LEMASTER_MSK \
+ (0x1<<BT_UART_MSG_2_FRAME3LEMASTER_POS)
+#define BT_UART_MSG_2_FRAME3RESERVED_POS (6)
+#define BT_UART_MSG_2_FRAME3RESERVED_MSK \
+ (0x3<<BT_UART_MSG_2_FRAME3RESERVED_POS)
+
+#define BT_UART_MSG_2_FRAME4LELASTTXPOWER_POS (0)
+#define BT_UART_MSG_2_FRAME4LELASTTXPOWER_MSK \
+ (0xF<<BT_UART_MSG_2_FRAME4LELASTTXPOWER_POS)
+#define BT_UART_MSG_2_FRAME4NUMLECONN_POS (4)
+#define BT_UART_MSG_2_FRAME4NUMLECONN_MSK \
+ (0x3<<BT_UART_MSG_2_FRAME4NUMLECONN_POS)
+#define BT_UART_MSG_2_FRAME4RESERVED_POS (6)
+#define BT_UART_MSG_2_FRAME4RESERVED_MSK \
+ (0x3<<BT_UART_MSG_2_FRAME4RESERVED_POS)
+
+#define BT_UART_MSG_2_FRAME5BTMINRSSI_POS (0)
+#define BT_UART_MSG_2_FRAME5BTMINRSSI_MSK \
+ (0xF<<BT_UART_MSG_2_FRAME5BTMINRSSI_POS)
+#define BT_UART_MSG_2_FRAME5LESCANINITMODE_POS (4)
+#define BT_UART_MSG_2_FRAME5LESCANINITMODE_MSK \
+ (0x1<<BT_UART_MSG_2_FRAME5LESCANINITMODE_POS)
+#define BT_UART_MSG_2_FRAME5LEADVERMODE_POS (5)
+#define BT_UART_MSG_2_FRAME5LEADVERMODE_MSK \
+ (0x1<<BT_UART_MSG_2_FRAME5LEADVERMODE_POS)
+#define BT_UART_MSG_2_FRAME5RESERVED_POS (6)
+#define BT_UART_MSG_2_FRAME5RESERVED_MSK \
+ (0x3<<BT_UART_MSG_2_FRAME5RESERVED_POS)
+
+#define BT_UART_MSG_2_FRAME6LECONNINTERVAL_POS (0)
+#define BT_UART_MSG_2_FRAME6LECONNINTERVAL_MSK \
+ (0x1F<<BT_UART_MSG_2_FRAME6LECONNINTERVAL_POS)
+#define BT_UART_MSG_2_FRAME6RFU_POS (5)
+#define BT_UART_MSG_2_FRAME6RFU_MSK \
+ (0x1<<BT_UART_MSG_2_FRAME6RFU_POS)
+#define BT_UART_MSG_2_FRAME6RESERVED_POS (6)
+#define BT_UART_MSG_2_FRAME6RESERVED_MSK \
+ (0x3<<BT_UART_MSG_2_FRAME6RESERVED_POS)
+
+#define BT_UART_MSG_2_FRAME7LECONNSLAVELAT_POS (0)
+#define BT_UART_MSG_2_FRAME7LECONNSLAVELAT_MSK \
+ (0x7<<BT_UART_MSG_2_FRAME7LECONNSLAVELAT_POS)
+#define BT_UART_MSG_2_FRAME7LEPROFILE1_POS (3)
+#define BT_UART_MSG_2_FRAME7LEPROFILE1_MSK \
+ (0x1<<BT_UART_MSG_2_FRAME7LEPROFILE1_POS)
+#define BT_UART_MSG_2_FRAME7LEPROFILE2_POS (4)
+#define BT_UART_MSG_2_FRAME7LEPROFILE2_MSK \
+ (0x1<<BT_UART_MSG_2_FRAME7LEPROFILE2_POS)
+#define BT_UART_MSG_2_FRAME7LEPROFILEOTHER_POS (5)
+#define BT_UART_MSG_2_FRAME7LEPROFILEOTHER_MSK \
+ (0x1<<BT_UART_MSG_2_FRAME7LEPROFILEOTHER_POS)
+#define BT_UART_MSG_2_FRAME7RESERVED_POS (6)
+#define BT_UART_MSG_2_FRAME7RESERVED_MSK \
+ (0x3<<BT_UART_MSG_2_FRAME7RESERVED_POS)
+
struct iwl_bt_uart_msg {
u8 header;
@@ -4369,6 +4476,11 @@ int iwl_agn_check_rxon_cmd(struct iwl_priv *priv);
* REPLY_WIPAN_PARAMS = 0xb2 (Commands and Notification)
*/
+/*
+ * Minimum slot time in TU
+ */
+#define IWL_MIN_SLOT_TIME 20
+
/**
* struct iwl_wipan_slot
* @width: Time in TU
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index efbde1f1a8bf..4bd342060254 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -43,11 +43,6 @@
#include "iwl-helpers.h"
-MODULE_DESCRIPTION("iwl core");
-MODULE_VERSION(IWLWIFI_VERSION);
-MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
-MODULE_LICENSE("GPL");
-
/*
* set bt_coex_active to true, uCode will do kill/defer
* every time the priority line is asserted (BT is sending signals on the
@@ -65,15 +60,12 @@ MODULE_LICENSE("GPL");
* default: bt_coex_active = true (BT_COEX_ENABLE)
*/
bool bt_coex_active = true;
-EXPORT_SYMBOL_GPL(bt_coex_active);
module_param(bt_coex_active, bool, S_IRUGO);
MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
u32 iwl_debug_level;
-EXPORT_SYMBOL(iwl_debug_level);
const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
-EXPORT_SYMBOL(iwl_bcast_addr);
/* This function both allocates and initializes hw and priv. */
@@ -98,7 +90,6 @@ struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg)
out:
return hw;
}
-EXPORT_SYMBOL(iwl_alloc_all);
#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
@@ -219,15 +210,12 @@ int iwlcore_init_geos(struct iwl_priv *priv)
if (!is_channel_valid(ch))
continue;
- if (is_channel_a_band(ch))
- sband = &priv->bands[IEEE80211_BAND_5GHZ];
- else
- sband = &priv->bands[IEEE80211_BAND_2GHZ];
+ sband = &priv->bands[ch->band];
geo_ch = &sband->channels[sband->n_channels++];
geo_ch->center_freq =
- ieee80211_channel_to_frequency(ch->channel);
+ ieee80211_channel_to_frequency(ch->channel, ch->band);
geo_ch->max_power = ch->max_power_avg;
geo_ch->max_antenna_gain = 0xff;
geo_ch->hw_value = ch->channel;
@@ -275,7 +263,6 @@ int iwlcore_init_geos(struct iwl_priv *priv)
return 0;
}
-EXPORT_SYMBOL(iwlcore_init_geos);
/*
* iwlcore_free_geos - undo allocations in iwlcore_init_geos
@@ -286,7 +273,6 @@ void iwlcore_free_geos(struct iwl_priv *priv)
kfree(priv->ieee_rates);
clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
}
-EXPORT_SYMBOL(iwlcore_free_geos);
static bool iwl_is_channel_extension(struct iwl_priv *priv,
enum ieee80211_band band,
@@ -331,7 +317,6 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
le16_to_cpu(ctx->staging.channel),
ctx->ht.extension_chan_offset);
}
-EXPORT_SYMBOL(iwl_is_ht40_tx_allowed);
static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
{
@@ -432,7 +417,6 @@ int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
return iwl_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
sizeof(ctx->timing), &ctx->timing);
}
-EXPORT_SYMBOL(iwl_send_rxon_timing);
void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
int hw_decrypt)
@@ -445,7 +429,6 @@ void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
}
-EXPORT_SYMBOL(iwl_set_rxon_hwcrypto);
/* validate RXON structure is valid */
int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
@@ -518,7 +501,6 @@ int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
}
return 0;
}
-EXPORT_SYMBOL(iwl_check_rxon_cmd);
/**
* iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
@@ -582,7 +564,6 @@ int iwl_full_rxon_required(struct iwl_priv *priv,
return 0;
}
-EXPORT_SYMBOL(iwl_full_rxon_required);
u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
@@ -596,7 +577,6 @@ u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv,
else
return IWL_RATE_6M_PLCP;
}
-EXPORT_SYMBOL(iwl_rate_get_lowest_plcp);
static void _iwl_set_rxon_ht(struct iwl_priv *priv,
struct iwl_ht_config *ht_conf,
@@ -673,7 +653,6 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
for_each_context(priv, ctx)
_iwl_set_rxon_ht(priv, ht_conf, ctx);
}
-EXPORT_SYMBOL(iwl_set_rxon_ht);
/* Return valid, unused, channel for a passive scan to reset the RF */
u8 iwl_get_single_channel_number(struct iwl_priv *priv,
@@ -714,7 +693,6 @@ u8 iwl_get_single_channel_number(struct iwl_priv *priv,
return channel;
}
-EXPORT_SYMBOL(iwl_get_single_channel_number);
/**
* iwl_set_rxon_channel - Set the band and channel values in staging RXON
@@ -745,7 +723,6 @@ int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
return 0;
}
-EXPORT_SYMBOL(iwl_set_rxon_channel);
void iwl_set_flags_for_band(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
@@ -769,7 +746,6 @@ void iwl_set_flags_for_band(struct iwl_priv *priv,
ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
}
}
-EXPORT_SYMBOL(iwl_set_flags_for_band);
/*
* initialize rxon structure with default values from eeprom
@@ -841,7 +817,6 @@ void iwl_connection_init_rx_config(struct iwl_priv *priv,
ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff;
}
-EXPORT_SYMBOL(iwl_connection_init_rx_config);
void iwl_set_rate(struct iwl_priv *priv)
{
@@ -874,7 +849,6 @@ void iwl_set_rate(struct iwl_priv *priv)
(IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
}
}
-EXPORT_SYMBOL(iwl_set_rate);
void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
{
@@ -894,7 +868,6 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
mutex_unlock(&priv->mutex);
}
}
-EXPORT_SYMBOL(iwl_chswitch_done);
void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
{
@@ -922,7 +895,6 @@ void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
}
}
}
-EXPORT_SYMBOL(iwl_rx_csa);
#ifdef CONFIG_IWLWIFI_DEBUG
void iwl_print_rx_config_cmd(struct iwl_priv *priv,
@@ -944,13 +916,15 @@ void iwl_print_rx_config_cmd(struct iwl_priv *priv,
IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
}
-EXPORT_SYMBOL(iwl_print_rx_config_cmd);
#endif
/**
* iwl_irq_handle_error - called for HW or SW error interrupt from card
*/
void iwl_irq_handle_error(struct iwl_priv *priv)
{
+ unsigned int reload_msec;
+ unsigned long reload_jiffies;
+
/* Set the FW error flag -- cleared on iwl_down */
set_bit(STATUS_FW_ERROR, &priv->status);
@@ -994,6 +968,25 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
* commands by clearing the INIT status bit */
clear_bit(STATUS_READY, &priv->status);
+ /*
+ * If firmware keep reloading, then it indicate something
+ * serious wrong and firmware having problem to recover
+ * from it. Instead of keep trying which will fill the syslog
+ * and hang the system, let's just stop it
+ */
+ reload_jiffies = jiffies;
+ reload_msec = jiffies_to_msecs((long) reload_jiffies -
+ (long) priv->reload_jiffies);
+ priv->reload_jiffies = reload_jiffies;
+ if (reload_msec <= IWL_MIN_RELOAD_DURATION) {
+ priv->reload_count++;
+ if (priv->reload_count >= IWL_MAX_CONTINUE_RELOAD_CNT) {
+ IWL_ERR(priv, "BUG_ON, Stop restarting\n");
+ return;
+ }
+ } else
+ priv->reload_count = 0;
+
if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
"Restarting adapter due to uCode error.\n");
@@ -1002,7 +995,6 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
queue_work(priv->workqueue, &priv->restart);
}
}
-EXPORT_SYMBOL(iwl_irq_handle_error);
static int iwl_apm_stop_master(struct iwl_priv *priv)
{
@@ -1039,7 +1031,6 @@ void iwl_apm_stop(struct iwl_priv *priv)
*/
iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
}
-EXPORT_SYMBOL(iwl_apm_stop);
/*
@@ -1154,13 +1145,14 @@ int iwl_apm_init(struct iwl_priv *priv)
out:
return ret;
}
-EXPORT_SYMBOL(iwl_apm_init);
int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
{
int ret;
s8 prev_tx_power;
+ bool defer;
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
lockdep_assert_held(&priv->mutex);
@@ -1188,10 +1180,15 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
if (!iwl_is_ready_rf(priv))
return -EIO;
- /* scan complete use tx_power_next, need to be updated */
+ /* scan complete and commit_rxon use tx_power_next value,
+ * it always need to be updated for newest request */
priv->tx_power_next = tx_power;
- if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
- IWL_DEBUG_INFO(priv, "Deferring tx power set while scanning\n");
+
+ /* do not set tx power when scanning or channel changing */
+ defer = test_bit(STATUS_SCANNING, &priv->status) ||
+ memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
+ if (defer && !force) {
+ IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
return 0;
}
@@ -1207,7 +1204,6 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
}
return ret;
}
-EXPORT_SYMBOL(iwl_set_tx_power);
void iwl_send_bt_config(struct iwl_priv *priv)
{
@@ -1231,7 +1227,6 @@ void iwl_send_bt_config(struct iwl_priv *priv)
sizeof(struct iwl_bt_cmd), &bt_cmd))
IWL_ERR(priv, "failed to send BT Coex Config\n");
}
-EXPORT_SYMBOL(iwl_send_bt_config);
int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
{
@@ -1249,7 +1244,6 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
sizeof(struct iwl_statistics_cmd),
&statistics_cmd);
}
-EXPORT_SYMBOL(iwl_send_statistics_request);
void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
@@ -1261,7 +1255,6 @@ void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
sleep->pm_sleep_mode, sleep->pm_wakeup_src);
#endif
}
-EXPORT_SYMBOL(iwl_rx_pm_sleep_notif);
void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
@@ -1273,7 +1266,6 @@ void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
get_cmd_string(pkt->hdr.cmd));
iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
}
-EXPORT_SYMBOL(iwl_rx_pm_debug_statistics_notif);
void iwl_rx_reply_error(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
@@ -1288,7 +1280,6 @@ void iwl_rx_reply_error(struct iwl_priv *priv,
le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
le32_to_cpu(pkt->u.err_resp.error_info));
}
-EXPORT_SYMBOL(iwl_rx_reply_error);
void iwl_clear_isr_stats(struct iwl_priv *priv)
{
@@ -1340,7 +1331,6 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
IWL_DEBUG_MAC80211(priv, "leave\n");
return 0;
}
-EXPORT_SYMBOL(iwl_mac_conf_tx);
int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw)
{
@@ -1348,7 +1338,6 @@ int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw)
return priv->ibss_manager == IWL_IBSS_MANAGER;
}
-EXPORT_SYMBOL_GPL(iwl_mac_tx_last_beacon);
static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
{
@@ -1403,9 +1392,10 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
struct iwl_rxon_context *tmp, *ctx = NULL;
int err;
+ enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif);
IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
- vif->type, vif->addr);
+ viftype, vif->addr);
mutex_lock(&priv->mutex);
@@ -1429,7 +1419,7 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
continue;
}
- if (!(possible_modes & BIT(vif->type)))
+ if (!(possible_modes & BIT(viftype)))
continue;
/* have maybe usable context w/o interface */
@@ -1457,7 +1447,6 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
IWL_DEBUG_MAC80211(priv, "leave\n");
return err;
}
-EXPORT_SYMBOL(iwl_mac_add_interface);
static void iwl_teardown_interface(struct iwl_priv *priv,
struct ieee80211_vif *vif,
@@ -1510,7 +1499,6 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(priv, "leave\n");
}
-EXPORT_SYMBOL(iwl_mac_remove_interface);
int iwl_alloc_txq_mem(struct iwl_priv *priv)
{
@@ -1525,14 +1513,12 @@ int iwl_alloc_txq_mem(struct iwl_priv *priv)
}
return 0;
}
-EXPORT_SYMBOL(iwl_alloc_txq_mem);
void iwl_free_txq_mem(struct iwl_priv *priv)
{
kfree(priv->txq);
priv->txq = NULL;
}
-EXPORT_SYMBOL(iwl_free_txq_mem);
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -1571,7 +1557,6 @@ int iwl_alloc_traffic_mem(struct iwl_priv *priv)
iwl_reset_traffic_log(priv);
return 0;
}
-EXPORT_SYMBOL(iwl_alloc_traffic_mem);
void iwl_free_traffic_mem(struct iwl_priv *priv)
{
@@ -1581,7 +1566,6 @@ void iwl_free_traffic_mem(struct iwl_priv *priv)
kfree(priv->rx_traffic);
priv->rx_traffic = NULL;
}
-EXPORT_SYMBOL(iwl_free_traffic_mem);
void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
u16 length, struct ieee80211_hdr *header)
@@ -1606,7 +1590,6 @@ void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
(priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
}
}
-EXPORT_SYMBOL(iwl_dbg_log_tx_data_frame);
void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
u16 length, struct ieee80211_hdr *header)
@@ -1631,7 +1614,6 @@ void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
(priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
}
}
-EXPORT_SYMBOL(iwl_dbg_log_rx_data_frame);
const char *get_mgmt_string(int cmd)
{
@@ -1675,7 +1657,6 @@ void iwl_clear_traffic_stats(struct iwl_priv *priv)
{
memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
- priv->led_tpt = 0;
}
/*
@@ -1768,9 +1749,7 @@ void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
stats->data_cnt++;
stats->data_bytes += len;
}
- iwl_leds_background(priv);
}
-EXPORT_SYMBOL(iwl_update_stats);
#endif
static void iwl_force_rf_reset(struct iwl_priv *priv)
@@ -1909,7 +1888,6 @@ int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mutex_unlock(&priv->mutex);
return err;
}
-EXPORT_SYMBOL(iwl_mac_change_interface);
/*
* On every watchdog tick we check (latest) time stamp. If it does not
@@ -1981,7 +1959,6 @@ void iwl_bg_watchdog(unsigned long data)
mod_timer(&priv->watchdog, jiffies +
msecs_to_jiffies(IWL_WD_TICK(timeout)));
}
-EXPORT_SYMBOL(iwl_bg_watchdog);
void iwl_setup_watchdog(struct iwl_priv *priv)
{
@@ -1993,7 +1970,6 @@ void iwl_setup_watchdog(struct iwl_priv *priv)
else
del_timer(&priv->watchdog);
}
-EXPORT_SYMBOL(iwl_setup_watchdog);
/*
* extended beacon time format
@@ -2019,7 +1995,6 @@ u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval)
return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
}
-EXPORT_SYMBOL(iwl_usecs_to_beacons);
/* base is usually what we get from ucode with each received frame,
* the same as HW timer counter counting down
@@ -2047,7 +2022,6 @@ __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
return cpu_to_le32(res);
}
-EXPORT_SYMBOL(iwl_add_beacon_time);
#ifdef CONFIG_PM
@@ -2067,7 +2041,6 @@ int iwl_pci_suspend(struct device *device)
return 0;
}
-EXPORT_SYMBOL(iwl_pci_suspend);
int iwl_pci_resume(struct device *device)
{
@@ -2096,7 +2069,6 @@ int iwl_pci_resume(struct device *device)
return 0;
}
-EXPORT_SYMBOL(iwl_pci_resume);
const struct dev_pm_ops iwl_pm_ops = {
.suspend = iwl_pci_suspend,
@@ -2106,6 +2078,5 @@ const struct dev_pm_ops iwl_pm_ops = {
.poweroff = iwl_pci_suspend,
.restore = iwl_pci_resume,
};
-EXPORT_SYMBOL(iwl_pm_ops);
#endif /* CONFIG_PM */
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index a3474376fdbc..d47f3a87fce4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -210,12 +210,7 @@ struct iwl_lib_ops {
/* temperature */
struct iwl_temp_ops temp_ops;
- /* check for plcp health */
- bool (*check_plcp_health)(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt);
- /* check for ack health */
- bool (*check_ack_health)(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt);
+
int (*txfifo_flush)(struct iwl_priv *priv, u16 flush_control);
void (*dev_txfifo_flush)(struct iwl_priv *priv, u16 flush_control);
@@ -227,8 +222,6 @@ struct iwl_lib_ops {
struct iwl_led_ops {
int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd);
- int (*on)(struct iwl_priv *priv);
- int (*off)(struct iwl_priv *priv);
};
/* NIC specific ops */
@@ -263,6 +256,8 @@ struct iwl_mod_params {
int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
int antenna; /* def: 0 = both antennas (use diversity) */
int restart_fw; /* def: 1 = restart firmware */
+ bool plcp_check; /* def: true = enable plcp health check */
+ bool ack_check; /* def: false = disable ack health check */
};
/*
@@ -307,7 +302,6 @@ struct iwl_base_params {
u16 led_compensation;
const bool broken_powersave;
int chain_noise_num_beacons;
- const bool supports_idle;
bool adv_thermal_throttle;
bool support_ct_kill_exit;
const bool support_wimax_coexist;
@@ -342,6 +336,7 @@ struct iwl_bt_params {
u8 ampdu_factor;
u8 ampdu_density;
bool bt_sco_disable;
+ bool bt_session_2;
};
/*
* @use_rts_for_aggregation: use rts/cts protection for HT traffic
@@ -366,6 +361,7 @@ struct iwl_ht_params {
* @adv_pm: advance power management
* @rx_with_siso_diversity: 1x1 device with rx antenna diversity
* @internal_wimax_coex: internal wifi/wimax combo device
+ * @iq_invert: I/Q inversion
*
* We enable the driver to be backward compatible wrt API version. The
* driver specifies which APIs it supports (with @ucode_api_max being the
@@ -415,6 +411,7 @@ struct iwl_cfg {
const bool adv_pm;
const bool rx_with_siso_diversity;
const bool internal_wimax_coex;
+ const bool iq_invert;
};
/***************************
@@ -494,18 +491,6 @@ static inline void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
static inline void iwl_update_stats(struct iwl_priv *priv, bool is_tx,
__le16 fc, u16 len)
{
- struct traffic_stats *stats;
-
- if (is_tx)
- stats = &priv->tx_stats;
- else
- stats = &priv->rx_stats;
-
- if (ieee80211_is_data(fc)) {
- /* data */
- stats->data_bytes += len;
- }
- iwl_leds_background(priv);
}
#endif
/*****************************************************
@@ -522,6 +507,7 @@ void iwl_rx_reply_error(struct iwl_priv *priv,
* RX
******************************************************/
void iwl_cmd_queue_free(struct iwl_priv *priv);
+void iwl_cmd_queue_unmap(struct iwl_priv *priv);
int iwl_rx_queue_alloc(struct iwl_priv *priv);
void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
struct iwl_rx_queue *q);
@@ -530,8 +516,6 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
/* Handlers */
void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
-void iwl_recover_from_statistics(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt);
void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
@@ -546,6 +530,7 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
int slots_num, u32 txq_id);
void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id);
+void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id);
void iwl_setup_watchdog(struct iwl_priv *priv);
/*****************************************************
* TX power
@@ -755,6 +740,17 @@ static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
return priv->hw->wiphy->bands[band];
}
+static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv)
+{
+ return priv->cfg->bt_params &&
+ priv->cfg->bt_params->advanced_bt_coexist;
+}
+
+static inline bool iwl_bt_statistics(struct iwl_priv *priv)
+{
+ return priv->cfg->bt_params && priv->cfg->bt_params->bt_statistics;
+}
+
extern bool bt_coex_active;
extern bool bt_siso_mode;
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index b80bf7dff55b..f52bc040bcbf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -290,7 +290,7 @@
/* HW REV */
-#define CSR_HW_REV_TYPE_MSK (0x00000F0)
+#define CSR_HW_REV_TYPE_MSK (0x00001F0)
#define CSR_HW_REV_TYPE_3945 (0x00000D0)
#define CSR_HW_REV_TYPE_4965 (0x0000000)
#define CSR_HW_REV_TYPE_5300 (0x0000020)
@@ -300,9 +300,15 @@
#define CSR_HW_REV_TYPE_1000 (0x0000060)
#define CSR_HW_REV_TYPE_6x00 (0x0000070)
#define CSR_HW_REV_TYPE_6x50 (0x0000080)
-#define CSR_HW_REV_TYPE_6x50g2 (0x0000084)
-#define CSR_HW_REV_TYPE_6x00g2 (0x00000B0)
-#define CSR_HW_REV_TYPE_NONE (0x00000F0)
+#define CSR_HW_REV_TYPE_6150 (0x0000084)
+#define CSR_HW_REV_TYPE_6x05 (0x00000B0)
+#define CSR_HW_REV_TYPE_6x30 CSR_HW_REV_TYPE_6x05
+#define CSR_HW_REV_TYPE_6x35 CSR_HW_REV_TYPE_6x05
+#define CSR_HW_REV_TYPE_2x30 (0x00000C0)
+#define CSR_HW_REV_TYPE_2x00 (0x0000100)
+#define CSR_HW_REV_TYPE_200 (0x0000110)
+#define CSR_HW_REV_TYPE_230 (0x0000120)
+#define CSR_HW_REV_TYPE_NONE (0x00001F0)
/* EEPROM REG */
#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
@@ -376,6 +382,8 @@
#define CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6 (0x00000004)
#define CSR_GP_DRIVER_REG_BIT_6050_1x2 (0x00000008)
+#define CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER (0x00000080)
+
/* GIO Chicken Bits (PCI Express bus link power management) */
#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000)
#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 6fe80b5e7a15..8842411f1cf3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -207,18 +207,19 @@ static ssize_t iwl_dbgfs_rx_statistics_read(struct file *file,
return ret;
}
-#define BYTE1_MASK 0x000000ff;
-#define BYTE2_MASK 0x0000ffff;
-#define BYTE3_MASK 0x00ffffff;
static ssize_t iwl_dbgfs_sram_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- u32 val;
+ u32 val = 0;
char *buf;
ssize_t ret;
- int i;
+ int i = 0;
+ bool device_format = false;
+ int offset = 0;
+ int len = 0;
int pos = 0;
+ int sram;
struct iwl_priv *priv = file->private_data;
size_t bufsz;
@@ -230,35 +231,62 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
else
priv->dbgfs_sram_len = priv->ucode_data.len;
}
- bufsz = 30 + priv->dbgfs_sram_len * sizeof(char) * 10;
+ len = priv->dbgfs_sram_len;
+
+ if (len == -4) {
+ device_format = true;
+ len = 4;
+ }
+
+ bufsz = 50 + len * 4;
buf = kmalloc(bufsz, GFP_KERNEL);
if (!buf)
return -ENOMEM;
+
pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
- priv->dbgfs_sram_len);
+ len);
pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
priv->dbgfs_sram_offset);
- for (i = priv->dbgfs_sram_len; i > 0; i -= 4) {
- val = iwl_read_targ_mem(priv, priv->dbgfs_sram_offset + \
- priv->dbgfs_sram_len - i);
- if (i < 4) {
- switch (i) {
- case 1:
- val &= BYTE1_MASK;
- break;
- case 2:
- val &= BYTE2_MASK;
- break;
- case 3:
- val &= BYTE3_MASK;
- break;
- }
+
+ /* adjust sram address since reads are only on even u32 boundaries */
+ offset = priv->dbgfs_sram_offset & 0x3;
+ sram = priv->dbgfs_sram_offset & ~0x3;
+
+ /* read the first u32 from sram */
+ val = iwl_read_targ_mem(priv, sram);
+
+ for (; len; len--) {
+ /* put the address at the start of every line */
+ if (i == 0)
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "%08X: ", sram + offset);
+
+ if (device_format)
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "%02x", (val >> (8 * (3 - offset))) & 0xff);
+ else
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "%02x ", (val >> (8 * offset)) & 0xff);
+
+ /* if all bytes processed, read the next u32 from sram */
+ if (++offset == 4) {
+ sram += 4;
+ offset = 0;
+ val = iwl_read_targ_mem(priv, sram);
}
- if (!(i % 16))
+
+ /* put in extra spaces and split lines for human readability */
+ if (++i == 16) {
+ i = 0;
pos += scnprintf(buf + pos, bufsz - pos, "\n");
- pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
+ } else if (!(i & 7)) {
+ pos += scnprintf(buf + pos, bufsz - pos, " ");
+ } else if (!(i & 3)) {
+ pos += scnprintf(buf + pos, bufsz - pos, " ");
+ }
}
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ if (i)
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
@@ -282,6 +310,9 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
priv->dbgfs_sram_offset = offset;
priv->dbgfs_sram_len = len;
+ } else if (sscanf(buf, "%x", &offset) == 1) {
+ priv->dbgfs_sram_offset = offset;
+ priv->dbgfs_sram_len = -4;
} else {
priv->dbgfs_sram_offset = 0;
priv->dbgfs_sram_len = 0;
@@ -668,29 +699,6 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
-static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char buf[256];
- const size_t bufsz = sizeof(buf);
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "allow blinking: %s\n",
- (priv->allow_blinking) ? "True" : "False");
- if (priv->allow_blinking) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "Led blinking rate: %u\n",
- priv->last_blink_rate);
- pos += scnprintf(buf + pos, bufsz - pos,
- "Last blink time: %lu\n",
- priv->last_blink_time);
- }
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@@ -856,7 +864,6 @@ DEBUGFS_READ_FILE_OPS(channels);
DEBUGFS_READ_FILE_OPS(status);
DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
DEBUGFS_READ_FILE_OPS(qos);
-DEBUGFS_READ_FILE_OPS(led);
DEBUGFS_READ_FILE_OPS(thermal_throttling);
DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
@@ -1580,10 +1587,9 @@ static ssize_t iwl_dbgfs_bt_traffic_read(struct file *file,
"last traffic notif: %d\n",
priv->bt_status ? "On" : "Off", priv->last_bt_traffic_load);
pos += scnprintf(buf + pos, bufsz - pos, "ch_announcement: %d, "
- "sco_active: %d, kill_ack_mask: %x, "
- "kill_cts_mask: %x\n",
- priv->bt_ch_announce, priv->bt_sco_active,
- priv->kill_ack_mask, priv->kill_cts_mask);
+ "kill_ack_mask: %x, kill_cts_mask: %x\n",
+ priv->bt_ch_announce, priv->kill_ack_mask,
+ priv->kill_cts_mask);
pos += scnprintf(buf + pos, bufsz - pos, "bluetooth traffic load: ");
switch (priv->bt_traffic_load) {
@@ -1725,7 +1731,6 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
- DEBUGFS_ADD_FILE(led, dir_data, S_IRUSR);
if (!priv->cfg->base_params->broken_powersave) {
DEBUGFS_ADD_FILE(sleep_level_override, dir_data,
S_IWUSR | S_IRUSR);
@@ -1759,13 +1764,13 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
if (priv->cfg->base_params->ucode_tracing)
DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
- if (priv->cfg->bt_params && priv->cfg->bt_params->bt_statistics)
+ if (iwl_bt_statistics(priv))
DEBUGFS_ADD_FILE(ucode_bt_stats, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(reply_tx_error, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
- if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist)
+ if (iwl_advanced_bt_coexist(priv))
DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR);
if (priv->cfg->base_params->sensitivity_calib_by_driver)
DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
@@ -1783,7 +1788,6 @@ err:
iwl_dbgfs_unregister(priv);
return -ENOMEM;
}
-EXPORT_SYMBOL(iwl_dbgfs_register);
/**
* Remove the debugfs files and directories
@@ -1797,7 +1801,6 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
debugfs_remove_recursive(priv->debugfs_dir);
priv->debugfs_dir = NULL;
}
-EXPORT_SYMBOL(iwl_dbgfs_unregister);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 8dda67850af4..58165c769cf1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -34,6 +34,8 @@
#include <linux/pci.h> /* for struct pci_device_id */
#include <linux/kernel.h>
+#include <linux/wait.h>
+#include <linux/leds.h>
#include <net/ieee80211_radiotap.h>
#include "iwl-eeprom.h"
@@ -41,14 +43,14 @@
#include "iwl-prph.h"
#include "iwl-fh.h"
#include "iwl-debug.h"
-#include "iwl-4965-hw.h"
-#include "iwl-3945-hw.h"
#include "iwl-agn-hw.h"
#include "iwl-led.h"
#include "iwl-power.h"
#include "iwl-agn-rs.h"
#include "iwl-agn-tt.h"
+#define U32_PAD(n) ((4-(n))&0x3)
+
struct iwl_tx_queue;
/* CT-KILL constants */
@@ -136,7 +138,7 @@ struct iwl_queue {
* space more than this */
int high_mark; /* high watermark, stop queue if free
* space less than this */
-} __packed;
+};
/* One for each TFD */
struct iwl_tx_info {
@@ -507,6 +509,7 @@ struct iwl_station_priv {
atomic_t pending_frames;
bool client;
bool asleep;
+ u8 max_agg_bufsize;
};
/**
@@ -995,7 +998,6 @@ struct reply_agg_tx_error_statistics {
u32 unknown;
};
-#ifdef CONFIG_IWLWIFI_DEBUGFS
/* management statistics */
enum iwl_mgmt_stats {
MANAGEMENT_ASSOC_REQ = 0,
@@ -1026,16 +1028,13 @@ enum iwl_ctrl_stats {
};
struct traffic_stats {
+#ifdef CONFIG_IWLWIFI_DEBUGFS
u32 mgmt[MANAGEMENT_MAX];
u32 ctrl[CONTROL_MAX];
u32 data_cnt;
u64 data_bytes;
-};
-#else
-struct traffic_stats {
- u64 data_bytes;
-};
#endif
+};
/*
* iwl_switch_rxon: "channel switch" structure
@@ -1111,6 +1110,11 @@ struct iwl_event_log {
/* BT Antenna Coupling Threshold (dB) */
#define IWL_BT_ANTENNA_COUPLING_THRESHOLD (35)
+/* Firmware reload counter and Timestamp */
+#define IWL_MIN_RELOAD_DURATION 1000 /* 1000 ms */
+#define IWL_MAX_CONTINUE_RELOAD_CNT 4
+
+
enum iwl_reset {
IWL_RF_RESET = 0,
IWL_FW_RESET,
@@ -1139,6 +1143,33 @@ struct iwl_force_reset {
*/
#define IWLAGN_EXT_BEACON_TIME_POS 22
+/**
+ * struct iwl_notification_wait - notification wait entry
+ * @list: list head for global list
+ * @fn: function called with the notification
+ * @cmd: command ID
+ *
+ * This structure is not used directly, to wait for a
+ * notification declare it on the stack, and call
+ * iwlagn_init_notification_wait() with appropriate
+ * parameters. Then do whatever will cause the ucode
+ * to notify the driver, and to wait for that then
+ * call iwlagn_wait_notification().
+ *
+ * Each notification is one-shot. If at some point we
+ * need to support multi-shot notifications (which
+ * can't be allocated on the stack) we need to modify
+ * the code for them.
+ */
+struct iwl_notification_wait {
+ struct list_head list;
+
+ void (*fn)(struct iwl_priv *priv, struct iwl_rx_packet *pkt);
+
+ u8 cmd;
+ bool triggered;
+};
+
enum iwl_rxon_context_id {
IWL_RXON_CTX_BSS,
IWL_RXON_CTX_PAN,
@@ -1236,6 +1267,10 @@ struct iwl_priv {
/* force reset */
struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET];
+ /* firmware reload counter and timestamp */
+ unsigned long reload_jiffies;
+ int reload_count;
+
/* we allocate array of iwl_channel_info for NIC's valid channels.
* Access via channel # using indirect index array */
struct iwl_channel_info *channel_info; /* channel info array */
@@ -1310,11 +1345,6 @@ struct iwl_priv {
struct iwl_init_alive_resp card_alive_init;
struct iwl_alive_resp card_alive;
- unsigned long last_blink_time;
- u8 last_blink_rate;
- u8 allow_blinking;
- u64 led_tpt;
-
u16 active_rate;
u8 start_calib;
@@ -1463,6 +1493,17 @@ struct iwl_priv {
struct iwl_bt_notif_statistics delta_statistics_bt;
struct iwl_bt_notif_statistics max_delta_bt;
#endif
+
+ /* notification wait support */
+ struct list_head notif_waits;
+ spinlock_t notif_wait_lock;
+ wait_queue_head_t notif_waitq;
+
+ /* remain-on-channel offload support */
+ struct ieee80211_channel *hw_roc_channel;
+ struct delayed_work hw_roc_work;
+ enum nl80211_channel_type hw_roc_chantype;
+ int hw_roc_duration;
} _agn;
#endif
};
@@ -1472,7 +1513,6 @@ struct iwl_priv {
u8 bt_status;
u8 bt_traffic_load, last_bt_traffic_load;
bool bt_ch_announce;
- bool bt_sco_active;
bool bt_full_concurrent;
bool bt_ant_couple_ok;
__le32 kill_ack_mask;
@@ -1547,6 +1587,10 @@ struct iwl_priv {
bool hw_ready;
struct iwl_event_log event_log;
+
+ struct led_classdev led;
+ unsigned long blink_on, blink_off;
+ bool led_registered;
}; /*iwl_priv */
static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 358cfd7e5af1..833194a2c639 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -222,7 +222,6 @@ const u8 *iwlcore_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
BUG_ON(offset >= priv->cfg->base_params->eeprom_size);
return &priv->eeprom[offset];
}
-EXPORT_SYMBOL(iwlcore_eeprom_query_addr);
static int iwl_init_otp_access(struct iwl_priv *priv)
{
@@ -382,7 +381,6 @@ const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
{
return priv->cfg->ops->lib->eeprom_ops.query_addr(priv, offset);
}
-EXPORT_SYMBOL(iwl_eeprom_query_addr);
u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
{
@@ -390,7 +388,6 @@ u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
return 0;
return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
}
-EXPORT_SYMBOL(iwl_eeprom_query16);
/**
* iwl_eeprom_init - read EEPROM contents
@@ -509,14 +506,12 @@ err:
alloc_err:
return ret;
}
-EXPORT_SYMBOL(iwl_eeprom_init);
void iwl_eeprom_free(struct iwl_priv *priv)
{
kfree(priv->eeprom);
priv->eeprom = NULL;
}
-EXPORT_SYMBOL(iwl_eeprom_free);
static void iwl_init_band_reference(const struct iwl_priv *priv,
int eep_band, int *eeprom_ch_count,
@@ -779,7 +774,6 @@ int iwl_init_channel_map(struct iwl_priv *priv)
return 0;
}
-EXPORT_SYMBOL(iwl_init_channel_map);
/*
* iwl_free_channel_map - undo allocations in iwl_init_channel_map
@@ -789,7 +783,6 @@ void iwl_free_channel_map(struct iwl_priv *priv)
kfree(priv->channel_info);
priv->channel_count = 0;
}
-EXPORT_SYMBOL(iwl_free_channel_map);
/**
* iwl_get_channel_info - Find driver's private channel info
@@ -818,4 +811,3 @@ const struct iwl_channel_info *iwl_get_channel_info(const struct iwl_priv *priv,
return NULL;
}
-EXPORT_SYMBOL(iwl_get_channel_info);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 9e6f31355eee..98aa8af01192 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -247,13 +247,26 @@ struct iwl_eeprom_enhanced_txpwr {
#define EEPROM_6050_TX_POWER_VERSION (4)
#define EEPROM_6050_EEPROM_VERSION (0x532)
-/* 6x50g2 Specific */
-#define EEPROM_6050G2_TX_POWER_VERSION (6)
-#define EEPROM_6050G2_EEPROM_VERSION (0x553)
+/* 6150 Specific */
+#define EEPROM_6150_TX_POWER_VERSION (6)
+#define EEPROM_6150_EEPROM_VERSION (0x553)
+
+/* 6x05 Specific */
+#define EEPROM_6005_TX_POWER_VERSION (6)
+#define EEPROM_6005_EEPROM_VERSION (0x709)
+
+/* 6x30 Specific */
+#define EEPROM_6030_TX_POWER_VERSION (6)
+#define EEPROM_6030_EEPROM_VERSION (0x709)
+
+/* 2x00 Specific */
+#define EEPROM_2000_TX_POWER_VERSION (6)
+#define EEPROM_2000_EEPROM_VERSION (0x805)
+
+/* 6x35 Specific */
+#define EEPROM_6035_TX_POWER_VERSION (6)
+#define EEPROM_6035_EEPROM_VERSION (0x753)
-/* 6x00g2 Specific */
-#define EEPROM_6000G2_TX_POWER_VERSION (6)
-#define EEPROM_6000G2_EEPROM_VERSION (0x709)
/* OTP */
/* lower blocks contain EEPROM image and calibration data */
@@ -264,6 +277,7 @@ struct iwl_eeprom_enhanced_txpwr {
#define OTP_MAX_LL_ITEMS_1000 (3) /* OTP blocks for 1000 */
#define OTP_MAX_LL_ITEMS_6x00 (4) /* OTP blocks for 6x00 */
#define OTP_MAX_LL_ITEMS_6x50 (7) /* OTP blocks for 6x50 */
+#define OTP_MAX_LL_ITEMS_2x00 (4) /* OTP blocks for 2x00 */
/* 2.4 GHz */
extern const u8 iwl_eeprom_band_1[14];
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index c373b53babea..02499f684683 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -108,12 +108,12 @@ const char *get_cmd_string(u8 cmd)
IWL_CMD(REPLY_WIPAN_WEPKEY);
IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH);
IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION);
+ IWL_CMD(REPLY_WIPAN_DEACTIVATION_COMPLETE);
default:
return "UNKNOWN";
}
}
-EXPORT_SYMBOL(get_cmd_string);
#define HOST_COMPLETE_TIMEOUT (HZ / 2)
@@ -252,7 +252,6 @@ out:
mutex_unlock(&priv->sync_cmd_mutex);
return ret;
}
-EXPORT_SYMBOL(iwl_send_cmd_sync);
int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
{
@@ -261,7 +260,6 @@ int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
return iwl_send_cmd_sync(priv, cmd);
}
-EXPORT_SYMBOL(iwl_send_cmd);
int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
{
@@ -273,7 +271,6 @@ int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
return iwl_send_cmd_sync(priv, &cmd);
}
-EXPORT_SYMBOL(iwl_send_cmd_pdu);
int iwl_send_cmd_pdu_async(struct iwl_priv *priv,
u8 id, u16 len, const void *data,
@@ -292,4 +289,3 @@ int iwl_send_cmd_pdu_async(struct iwl_priv *priv,
return iwl_send_cmd_async(priv, &cmd);
}
-EXPORT_SYMBOL(iwl_send_cmd_pdu_async);
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index 46ccdf406e8e..d7f2a0bb32c9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -48,31 +48,19 @@ module_param(led_mode, int, S_IRUGO);
MODULE_PARM_DESC(led_mode, "0=system default, "
"1=On(RF On)/Off(RF Off), 2=blinking");
-static const struct {
- u16 tpt; /* Mb/s */
- u8 on_time;
- u8 off_time;
-} blink_tbl[] =
-{
- {300, 25, 25},
- {200, 40, 40},
- {100, 55, 55},
- {70, 65, 65},
- {50, 75, 75},
- {20, 85, 85},
- {10, 95, 95},
- {5, 110, 110},
- {1, 130, 130},
- {0, 167, 167},
- /* SOLID_ON */
- {-1, IWL_LED_SOLID, 0}
+static const struct ieee80211_tpt_blink iwl_blink[] = {
+ { .throughput = 0 * 1024 - 1, .blink_time = 334 },
+ { .throughput = 1 * 1024 - 1, .blink_time = 260 },
+ { .throughput = 5 * 1024 - 1, .blink_time = 220 },
+ { .throughput = 10 * 1024 - 1, .blink_time = 190 },
+ { .throughput = 20 * 1024 - 1, .blink_time = 170 },
+ { .throughput = 50 * 1024 - 1, .blink_time = 150 },
+ { .throughput = 70 * 1024 - 1, .blink_time = 130 },
+ { .throughput = 100 * 1024 - 1, .blink_time = 110 },
+ { .throughput = 200 * 1024 - 1, .blink_time = 80 },
+ { .throughput = 300 * 1024 - 1, .blink_time = 50 },
};
-#define IWL_1MB_RATE (128 * 1024)
-#define IWL_LED_THRESHOLD (16)
-#define IWL_MAX_BLINK_TBL (ARRAY_SIZE(blink_tbl) - 1) /* exclude SOLID_ON */
-#define IWL_SOLID_BLINK_IDX (ARRAY_SIZE(blink_tbl) - 1)
-
/*
* Adjust led blink rate to compensate on a MAC Clock difference on every HW
* Led blink rate analysis showed an average deviation of 0% on 3945,
@@ -97,133 +85,102 @@ static inline u8 iwl_blink_compensation(struct iwl_priv *priv,
}
/* Set led pattern command */
-static int iwl_led_pattern(struct iwl_priv *priv, unsigned int idx)
+static int iwl_led_cmd(struct iwl_priv *priv,
+ unsigned long on,
+ unsigned long off)
{
struct iwl_led_cmd led_cmd = {
.id = IWL_LED_LINK,
.interval = IWL_DEF_LED_INTRVL
};
+ int ret;
+
+ if (!test_bit(STATUS_READY, &priv->status))
+ return -EBUSY;
- BUG_ON(idx > IWL_MAX_BLINK_TBL);
+ if (priv->blink_on == on && priv->blink_off == off)
+ return 0;
- IWL_DEBUG_LED(priv, "Led blink time compensation= %u\n",
+ IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
priv->cfg->base_params->led_compensation);
- led_cmd.on =
- iwl_blink_compensation(priv, blink_tbl[idx].on_time,
+ led_cmd.on = iwl_blink_compensation(priv, on,
priv->cfg->base_params->led_compensation);
- led_cmd.off =
- iwl_blink_compensation(priv, blink_tbl[idx].off_time,
+ led_cmd.off = iwl_blink_compensation(priv, off,
priv->cfg->base_params->led_compensation);
- return priv->cfg->ops->led->cmd(priv, &led_cmd);
+ ret = priv->cfg->ops->led->cmd(priv, &led_cmd);
+ if (!ret) {
+ priv->blink_on = on;
+ priv->blink_off = off;
+ }
+ return ret;
}
-int iwl_led_start(struct iwl_priv *priv)
+static void iwl_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
{
- return priv->cfg->ops->led->on(priv);
-}
-EXPORT_SYMBOL(iwl_led_start);
+ struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
+ unsigned long on = 0;
-int iwl_led_associate(struct iwl_priv *priv)
-{
- IWL_DEBUG_LED(priv, "Associated\n");
- if (priv->cfg->led_mode == IWL_LED_BLINK)
- priv->allow_blinking = 1;
- priv->last_blink_time = jiffies;
+ if (brightness > 0)
+ on = IWL_LED_SOLID;
- return 0;
+ iwl_led_cmd(priv, on, 0);
}
-EXPORT_SYMBOL(iwl_led_associate);
-int iwl_led_disassociate(struct iwl_priv *priv)
+static int iwl_led_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
{
- priv->allow_blinking = 0;
+ struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
- return 0;
+ return iwl_led_cmd(priv, *delay_on, *delay_off);
}
-EXPORT_SYMBOL(iwl_led_disassociate);
-/*
- * calculate blink rate according to last second Tx/Rx activities
- */
-static int iwl_get_blink_rate(struct iwl_priv *priv)
-{
- int i;
- /* count both tx and rx traffic to be able to
- * handle traffic in either direction
- */
- u64 current_tpt = priv->tx_stats.data_bytes +
- priv->rx_stats.data_bytes;
- s64 tpt = current_tpt - priv->led_tpt;
-
- if (tpt < 0) /* wraparound */
- tpt = -tpt;
-
- IWL_DEBUG_LED(priv, "tpt %lld current_tpt %llu\n",
- (long long)tpt,
- (unsigned long long)current_tpt);
- priv->led_tpt = current_tpt;
-
- if (!priv->allow_blinking)
- i = IWL_MAX_BLINK_TBL;
- else
- for (i = 0; i < IWL_MAX_BLINK_TBL; i++)
- if (tpt > (blink_tbl[i].tpt * IWL_1MB_RATE))
- break;
-
- IWL_DEBUG_LED(priv, "LED BLINK IDX=%d\n", i);
- return i;
-}
-
-/*
- * this function called from handler. Since setting Led command can
- * happen very frequent we postpone led command to be called from
- * REPLY handler so we know ucode is up
- */
-void iwl_leds_background(struct iwl_priv *priv)
+void iwl_leds_init(struct iwl_priv *priv)
{
- u8 blink_idx;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
- priv->last_blink_time = 0;
- return;
- }
- if (iwl_is_rfkill(priv)) {
- priv->last_blink_time = 0;
- return;
+ int mode = led_mode;
+ int ret;
+
+ if (mode == IWL_LED_DEFAULT)
+ mode = priv->cfg->led_mode;
+
+ priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
+ wiphy_name(priv->hw->wiphy));
+ priv->led.brightness_set = iwl_led_brightness_set;
+ priv->led.blink_set = iwl_led_blink_set;
+ priv->led.max_brightness = 1;
+
+ switch (mode) {
+ case IWL_LED_DEFAULT:
+ WARN_ON(1);
+ break;
+ case IWL_LED_BLINK:
+ priv->led.default_trigger =
+ ieee80211_create_tpt_led_trigger(priv->hw,
+ IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
+ iwl_blink, ARRAY_SIZE(iwl_blink));
+ break;
+ case IWL_LED_RF_STATE:
+ priv->led.default_trigger =
+ ieee80211_get_radio_led_name(priv->hw);
+ break;
}
- if (!priv->allow_blinking) {
- priv->last_blink_time = 0;
- if (priv->last_blink_rate != IWL_SOLID_BLINK_IDX) {
- priv->last_blink_rate = IWL_SOLID_BLINK_IDX;
- iwl_led_pattern(priv, IWL_SOLID_BLINK_IDX);
- }
+ ret = led_classdev_register(&priv->pci_dev->dev, &priv->led);
+ if (ret) {
+ kfree(priv->led.name);
return;
}
- if (!priv->last_blink_time ||
- !time_after(jiffies, priv->last_blink_time +
- msecs_to_jiffies(1000)))
- return;
-
- blink_idx = iwl_get_blink_rate(priv);
-
- /* call only if blink rate change */
- if (blink_idx != priv->last_blink_rate)
- iwl_led_pattern(priv, blink_idx);
- priv->last_blink_time = jiffies;
- priv->last_blink_rate = blink_idx;
+ priv->led_registered = true;
}
-EXPORT_SYMBOL(iwl_leds_background);
-void iwl_leds_init(struct iwl_priv *priv)
+void iwl_leds_exit(struct iwl_priv *priv)
{
- priv->last_blink_rate = 0;
- priv->last_blink_time = 0;
- priv->allow_blinking = 0;
- if (led_mode != IWL_LED_DEFAULT &&
- led_mode != priv->cfg->led_mode)
- priv->cfg->led_mode = led_mode;
+ if (!priv->led_registered)
+ return;
+
+ led_classdev_unregister(&priv->led);
+ kfree(priv->led.name);
}
-EXPORT_SYMBOL(iwl_leds_init);
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/iwl-led.h
index 9079b33486ef..101eef12b3bb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-led.h
@@ -31,23 +31,14 @@
struct iwl_priv;
#define IWL_LED_SOLID 11
-#define IWL_LED_NAME_LEN 31
#define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
#define IWL_LED_ACTIVITY (0<<1)
#define IWL_LED_LINK (1<<1)
-enum led_type {
- IWL_LED_TRG_TX,
- IWL_LED_TRG_RX,
- IWL_LED_TRG_ASSOC,
- IWL_LED_TRG_RADIO,
- IWL_LED_TRG_MAX,
-};
-
/*
* LED mode
- * IWL_LED_DEFAULT: use system default
+ * IWL_LED_DEFAULT: use device default
* IWL_LED_RF_STATE: turn LED on/off based on RF state
* LED ON = RF ON
* LED OFF = RF OFF
@@ -60,9 +51,6 @@ enum iwl_led_mode {
};
void iwl_leds_init(struct iwl_priv *priv);
-void iwl_leds_background(struct iwl_priv *priv);
-int iwl_led_start(struct iwl_priv *priv);
-int iwl_led_associate(struct iwl_priv *priv);
-int iwl_led_disassociate(struct iwl_priv *priv);
+void iwl_leds_exit(struct iwl_priv *priv);
#endif /* __iwl_leds_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-legacy.c b/drivers/net/wireless/iwlwifi/iwl-legacy.c
deleted file mode 100644
index bb1a742a98a0..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-legacy.c
+++ /dev/null
@@ -1,662 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <net/mac80211.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-helpers.h"
-#include "iwl-legacy.h"
-
-static void iwl_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- if (!ctx->is_active)
- return;
-
- ctx->qos_data.def_qos_parm.qos_flags = 0;
-
- if (ctx->qos_data.qos_active)
- ctx->qos_data.def_qos_parm.qos_flags |=
- QOS_PARAM_FLG_UPDATE_EDCA_MSK;
-
- if (ctx->ht.enabled)
- ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
-
- IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
- ctx->qos_data.qos_active,
- ctx->qos_data.def_qos_parm.qos_flags);
-
- iwl_send_cmd_pdu_async(priv, ctx->qos_cmd,
- sizeof(struct iwl_qosparam_cmd),
- &ctx->qos_data.def_qos_parm, NULL);
-}
-
-/**
- * iwl_legacy_mac_config - mac80211 config callback
- */
-int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
-{
- struct iwl_priv *priv = hw->priv;
- const struct iwl_channel_info *ch_info;
- struct ieee80211_conf *conf = &hw->conf;
- struct ieee80211_channel *channel = conf->channel;
- struct iwl_ht_config *ht_conf = &priv->current_ht_config;
- struct iwl_rxon_context *ctx;
- unsigned long flags = 0;
- int ret = 0;
- u16 ch;
- int scan_active = 0;
- bool ht_changed[NUM_IWL_RXON_CTX] = {};
-
- if (WARN_ON(!priv->cfg->ops->legacy))
- return -EOPNOTSUPP;
-
- mutex_lock(&priv->mutex);
-
- IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
- channel->hw_value, changed);
-
- if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
- test_bit(STATUS_SCANNING, &priv->status))) {
- scan_active = 1;
- IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
- }
-
- if (changed & (IEEE80211_CONF_CHANGE_SMPS |
- IEEE80211_CONF_CHANGE_CHANNEL)) {
- /* mac80211 uses static for non-HT which is what we want */
- priv->current_ht_config.smps = conf->smps_mode;
-
- /*
- * Recalculate chain counts.
- *
- * If monitor mode is enabled then mac80211 will
- * set up the SM PS mode to OFF if an HT channel is
- * configured.
- */
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- for_each_context(priv, ctx)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
- }
-
- /* during scanning mac80211 will delay channel setting until
- * scan finish with changed = 0
- */
- if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
- if (scan_active)
- goto set_ch_out;
-
- ch = channel->hw_value;
- ch_info = iwl_get_channel_info(priv, channel->band, ch);
- if (!is_channel_valid(ch_info)) {
- IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
- ret = -EINVAL;
- goto set_ch_out;
- }
-
- spin_lock_irqsave(&priv->lock, flags);
-
- for_each_context(priv, ctx) {
- /* Configure HT40 channels */
- if (ctx->ht.enabled != conf_is_ht(conf)) {
- ctx->ht.enabled = conf_is_ht(conf);
- ht_changed[ctx->ctxid] = true;
- }
- if (ctx->ht.enabled) {
- if (conf_is_ht40_minus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_BELOW;
- ctx->ht.is_40mhz = true;
- } else if (conf_is_ht40_plus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
- ctx->ht.is_40mhz = true;
- } else {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_NONE;
- ctx->ht.is_40mhz = false;
- }
- } else
- ctx->ht.is_40mhz = false;
-
- /*
- * Default to no protection. Protection mode will
- * later be set from BSS config in iwl_ht_conf
- */
- ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
-
- /* if we are switching from ht to 2.4 clear flags
- * from any ht related info since 2.4 does not
- * support ht */
- if ((le16_to_cpu(ctx->staging.channel) != ch))
- ctx->staging.flags = 0;
-
- iwl_set_rxon_channel(priv, channel, ctx);
- iwl_set_rxon_ht(priv, ht_conf);
-
- iwl_set_flags_for_band(priv, ctx, channel->band,
- ctx->vif);
- }
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (priv->cfg->ops->legacy->update_bcast_stations)
- ret = priv->cfg->ops->legacy->update_bcast_stations(priv);
-
- set_ch_out:
- /* The list of supported rates and rate mask can be different
- * for each band; since the band may have changed, reset
- * the rate mask to what mac80211 lists */
- iwl_set_rate(priv);
- }
-
- if (changed & (IEEE80211_CONF_CHANGE_PS |
- IEEE80211_CONF_CHANGE_IDLE)) {
- ret = iwl_power_update_mode(priv, false);
- if (ret)
- IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
- }
-
- if (changed & IEEE80211_CONF_CHANGE_POWER) {
- IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
- priv->tx_power_user_lmt, conf->power_level);
-
- iwl_set_tx_power(priv, conf->power_level, false);
- }
-
- if (!iwl_is_ready(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
- goto out;
- }
-
- if (scan_active)
- goto out;
-
- for_each_context(priv, ctx) {
- if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
- iwlcore_commit_rxon(priv, ctx);
- else
- IWL_DEBUG_INFO(priv,
- "Not re-sending same RXON configuration.\n");
- if (ht_changed[ctx->ctxid])
- iwl_update_qos(priv, ctx);
- }
-
-out:
- IWL_DEBUG_MAC80211(priv, "leave\n");
- mutex_unlock(&priv->mutex);
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_config);
-
-void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
- unsigned long flags;
- /* IBSS can only be the IWL_RXON_CTX_BSS context */
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- if (WARN_ON(!priv->cfg->ops->legacy))
- return;
-
- mutex_lock(&priv->mutex);
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- spin_lock_irqsave(&priv->lock, flags);
- memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
- spin_unlock_irqrestore(&priv->lock, flags);
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* new association get rid of ibss beacon skb */
- if (priv->beacon_skb)
- dev_kfree_skb(priv->beacon_skb);
-
- priv->beacon_skb = NULL;
-
- priv->timestamp = 0;
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- iwl_scan_cancel_timeout(priv, 100);
- if (!iwl_is_ready_rf(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
- mutex_unlock(&priv->mutex);
- return;
- }
-
- /* we are restarting association process
- * clear RXON_FILTER_ASSOC_MSK bit
- */
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwlcore_commit_rxon(priv, ctx);
-
- iwl_set_rate(priv);
-
- mutex_unlock(&priv->mutex);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf);
-
-static void iwl_ht_conf(struct iwl_priv *priv,
- struct ieee80211_vif *vif)
-{
- struct iwl_ht_config *ht_conf = &priv->current_ht_config;
- struct ieee80211_sta *sta;
- struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
- struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
-
- IWL_DEBUG_ASSOC(priv, "enter:\n");
-
- if (!ctx->ht.enabled)
- return;
-
- ctx->ht.protection =
- bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
- ctx->ht.non_gf_sta_present =
- !!(bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
-
- ht_conf->single_chain_sufficient = false;
-
- switch (vif->type) {
- case NL80211_IFTYPE_STATION:
- rcu_read_lock();
- sta = ieee80211_find_sta(vif, bss_conf->bssid);
- if (sta) {
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- int maxstreams;
-
- maxstreams = (ht_cap->mcs.tx_params &
- IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
- >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
- maxstreams += 1;
-
- if ((ht_cap->mcs.rx_mask[1] == 0) &&
- (ht_cap->mcs.rx_mask[2] == 0))
- ht_conf->single_chain_sufficient = true;
- if (maxstreams <= 1)
- ht_conf->single_chain_sufficient = true;
- } else {
- /*
- * If at all, this can only happen through a race
- * when the AP disconnects us while we're still
- * setting up the connection, in that case mac80211
- * will soon tell us about that.
- */
- ht_conf->single_chain_sufficient = true;
- }
- rcu_read_unlock();
- break;
- case NL80211_IFTYPE_ADHOC:
- ht_conf->single_chain_sufficient = true;
- break;
- default:
- break;
- }
-
- IWL_DEBUG_ASSOC(priv, "leave\n");
-}
-
-static inline void iwl_set_no_assoc(struct iwl_priv *priv,
- struct ieee80211_vif *vif)
-{
- struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
-
- iwl_led_disassociate(priv);
- /*
- * inform the ucode that there is no longer an
- * association and that no more packets should be
- * sent
- */
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- ctx->staging.assoc_id = 0;
- iwlcore_commit_rxon(priv, ctx);
-}
-
-static void iwlcore_beacon_update(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct iwl_priv *priv = hw->priv;
- unsigned long flags;
- __le64 timestamp;
- struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
-
- if (!skb)
- return;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- lockdep_assert_held(&priv->mutex);
-
- if (!priv->beacon_ctx) {
- IWL_ERR(priv, "update beacon but no beacon context!\n");
- dev_kfree_skb(skb);
- return;
- }
-
- spin_lock_irqsave(&priv->lock, flags);
-
- if (priv->beacon_skb)
- dev_kfree_skb(priv->beacon_skb);
-
- priv->beacon_skb = skb;
-
- timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
- priv->timestamp = le64_to_cpu(timestamp);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (!iwl_is_ready_rf(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
- return;
- }
-
- priv->cfg->ops->legacy->post_associate(priv);
-}
-
-void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *bss_conf,
- u32 changes)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
- int ret;
-
- if (WARN_ON(!priv->cfg->ops->legacy))
- return;
-
- IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
-
- if (!iwl_is_alive(priv))
- return;
-
- mutex_lock(&priv->mutex);
-
- if (changes & BSS_CHANGED_QOS) {
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- ctx->qos_data.qos_active = bss_conf->qos;
- iwl_update_qos(priv, ctx);
- spin_unlock_irqrestore(&priv->lock, flags);
- }
-
- if (changes & BSS_CHANGED_BEACON_ENABLED) {
- /*
- * the add_interface code must make sure we only ever
- * have a single interface that could be beaconing at
- * any time.
- */
- if (vif->bss_conf.enable_beacon)
- priv->beacon_ctx = ctx;
- else
- priv->beacon_ctx = NULL;
- }
-
- if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_AP) {
- dev_kfree_skb(priv->beacon_skb);
- priv->beacon_skb = ieee80211_beacon_get(hw, vif);
- }
-
- if (changes & BSS_CHANGED_BEACON_INT && vif->type == NL80211_IFTYPE_AP)
- iwl_send_rxon_timing(priv, ctx);
-
- if (changes & BSS_CHANGED_BSSID) {
- IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
-
- /*
- * If there is currently a HW scan going on in the
- * background then we need to cancel it else the RXON
- * below/in post_associate will fail.
- */
- if (iwl_scan_cancel_timeout(priv, 100)) {
- IWL_WARN(priv, "Aborted scan still in progress after 100ms\n");
- IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n");
- mutex_unlock(&priv->mutex);
- return;
- }
-
- /* mac80211 only sets assoc when in STATION mode */
- if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
- memcpy(ctx->staging.bssid_addr,
- bss_conf->bssid, ETH_ALEN);
-
- /* currently needed in a few places */
- memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
- } else {
- ctx->staging.filter_flags &=
- ~RXON_FILTER_ASSOC_MSK;
- }
-
- }
-
- /*
- * This needs to be after setting the BSSID in case
- * mac80211 decides to do both changes at once because
- * it will invoke post_associate.
- */
- if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON)
- iwlcore_beacon_update(hw, vif);
-
- if (changes & BSS_CHANGED_ERP_PREAMBLE) {
- IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
- bss_conf->use_short_preamble);
- if (bss_conf->use_short_preamble)
- ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
- }
-
- if (changes & BSS_CHANGED_ERP_CTS_PROT) {
- IWL_DEBUG_MAC80211(priv, "ERP_CTS %d\n", bss_conf->use_cts_prot);
- if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
- ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
- if (bss_conf->use_cts_prot)
- ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
- else
- ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
- }
-
- if (changes & BSS_CHANGED_BASIC_RATES) {
- /* XXX use this information
- *
- * To do that, remove code from iwl_set_rate() and put something
- * like this here:
- *
- if (A-band)
- ctx->staging.ofdm_basic_rates =
- bss_conf->basic_rates;
- else
- ctx->staging.ofdm_basic_rates =
- bss_conf->basic_rates >> 4;
- ctx->staging.cck_basic_rates =
- bss_conf->basic_rates & 0xF;
- */
- }
-
- if (changes & BSS_CHANGED_HT) {
- iwl_ht_conf(priv, vif);
-
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
- }
-
- if (changes & BSS_CHANGED_ASSOC) {
- IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
- if (bss_conf->assoc) {
- priv->timestamp = bss_conf->timestamp;
-
- iwl_led_associate(priv);
-
- if (!iwl_is_rfkill(priv))
- priv->cfg->ops->legacy->post_associate(priv);
- } else
- iwl_set_no_assoc(priv, vif);
- }
-
- if (changes && iwl_is_associated_ctx(ctx) && bss_conf->aid) {
- IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
- changes);
- ret = iwl_send_rxon_assoc(priv, ctx);
- if (!ret) {
- /* Sync active_rxon with latest change. */
- memcpy((void *)&ctx->active,
- &ctx->staging,
- sizeof(struct iwl_rxon_cmd));
- }
- }
-
- if (changes & BSS_CHANGED_BEACON_ENABLED) {
- if (vif->bss_conf.enable_beacon) {
- memcpy(ctx->staging.bssid_addr,
- bss_conf->bssid, ETH_ALEN);
- memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
- iwl_led_associate(priv);
- priv->cfg->ops->legacy->config_ap(priv);
- } else
- iwl_set_no_assoc(priv, vif);
- }
-
- if (changes & BSS_CHANGED_IBSS) {
- ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif,
- bss_conf->ibss_joined);
- if (ret)
- IWL_ERR(priv, "failed to %s IBSS station %pM\n",
- bss_conf->ibss_joined ? "add" : "remove",
- bss_conf->bssid);
- }
-
- mutex_unlock(&priv->mutex);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed);
-
-irqreturn_t iwl_isr_legacy(int irq, void *data)
-{
- struct iwl_priv *priv = data;
- u32 inta, inta_mask;
- u32 inta_fh;
- unsigned long flags;
- if (!priv)
- return IRQ_NONE;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Disable (but don't clear!) interrupts here to avoid
- * back-to-back ISRs and sporadic interrupts from our NIC.
- * If we have something to service, the tasklet will re-enable ints.
- * If we *don't* have something, we'll re-enable before leaving here. */
- inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
- iwl_write32(priv, CSR_INT_MASK, 0x00000000);
-
- /* Discover which interrupts are active/pending */
- inta = iwl_read32(priv, CSR_INT);
- inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
-
- /* Ignore interrupt if there's nothing in NIC to service.
- * This may be due to IRQ shared with another device,
- * or due to sporadic interrupts thrown from our NIC. */
- if (!inta && !inta_fh) {
- IWL_DEBUG_ISR(priv,
- "Ignore interrupt, inta == 0, inta_fh == 0\n");
- goto none;
- }
-
- if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
- /* Hardware disappeared. It might have already raised
- * an interrupt */
- IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
- goto unplugged;
- }
-
- IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
- inta, inta_mask, inta_fh);
-
- inta &= ~CSR_INT_BIT_SCD;
-
- /* iwl_irq_tasklet() will service interrupts and re-enable them */
- if (likely(inta || inta_fh))
- tasklet_schedule(&priv->irq_tasklet);
-
-unplugged:
- spin_unlock_irqrestore(&priv->lock, flags);
- return IRQ_HANDLED;
-
-none:
- /* re-enable interrupts here since we don't have anything to service. */
- /* only Re-enable if disabled by irq */
- if (test_bit(STATUS_INT_ENABLED, &priv->status))
- iwl_enable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
- return IRQ_NONE;
-}
-EXPORT_SYMBOL(iwl_isr_legacy);
-
-/*
- * iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
- * function.
- */
-void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
- struct ieee80211_tx_info *info,
- __le16 fc, __le32 *tx_flags)
-{
- if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
- *tx_flags |= TX_CMD_FLG_RTS_MSK;
- *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
- *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
-
- if (!ieee80211_is_mgmt(fc))
- return;
-
- switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
- case cpu_to_le16(IEEE80211_STYPE_AUTH):
- case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
- case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
- case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
- *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
- *tx_flags |= TX_CMD_FLG_CTS_MSK;
- break;
- }
- } else if (info->control.rates[0].flags &
- IEEE80211_TX_RC_USE_CTS_PROTECT) {
- *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
- *tx_flags |= TX_CMD_FLG_CTS_MSK;
- *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
- }
-}
-EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 1eec18d909d8..576795e2c75b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -226,8 +226,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
else
cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
+ if (iwl_advanced_bt_coexist(priv)) {
if (!priv->cfg->bt_params->bt_sco_disable)
cmd->flags |= IWL_POWER_BT_SCO_ENA;
else
@@ -313,8 +312,7 @@ static void iwl_power_fill_sleep_cmd(struct iwl_priv *priv,
else
cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
+ if (iwl_advanced_bt_coexist(priv)) {
if (!priv->cfg->bt_params->bt_sco_disable)
cmd->flags |= IWL_POWER_BT_SCO_ENA;
else
@@ -358,8 +356,7 @@ static void iwl_power_build_cmd(struct iwl_priv *priv,
if (priv->cfg->base_params->broken_powersave)
iwl_power_sleep_cam_cmd(priv, cmd);
- else if (priv->cfg->base_params->supports_idle &&
- priv->hw->conf.flags & IEEE80211_CONF_IDLE)
+ else if (priv->hw->conf.flags & IEEE80211_CONF_IDLE)
iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20);
else if (priv->cfg->ops->lib->tt_ops.lower_power_detection &&
priv->cfg->ops->lib->tt_ops.tt_power_mode &&
@@ -428,7 +425,6 @@ int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
return ret;
}
-EXPORT_SYMBOL(iwl_power_set_mode);
int iwl_power_update_mode(struct iwl_priv *priv, bool force)
{
@@ -437,7 +433,6 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
iwl_power_build_cmd(priv, &cmd);
return iwl_power_set_mode(priv, &cmd, force);
}
-EXPORT_SYMBOL(iwl_power_update_mode);
/* initialize to default */
void iwl_power_initialize(struct iwl_priv *priv)
@@ -451,4 +446,3 @@ void iwl_power_initialize(struct iwl_priv *priv)
memset(&priv->power_data.sleep_cmd, 0,
sizeof(priv->power_data.sleep_cmd));
}
-EXPORT_SYMBOL(iwl_power_initialize);
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 87a6fd84d4d2..566e2d979ce3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -37,6 +37,7 @@
#include "iwl-sta.h"
#include "iwl-io.h"
#include "iwl-helpers.h"
+#include "iwl-agn-calib.h"
/************************** RX-FUNCTIONS ****************************/
/*
* Rx theory of operation
@@ -118,7 +119,6 @@ int iwl_rx_queue_space(const struct iwl_rx_queue *q)
s = 0;
return s;
}
-EXPORT_SYMBOL(iwl_rx_queue_space);
/**
* iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
@@ -170,7 +170,6 @@ void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q
exit_unlock:
spin_unlock_irqrestore(&q->lock, flags);
}
-EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr);
int iwl_rx_queue_alloc(struct iwl_priv *priv)
{
@@ -211,8 +210,6 @@ err_rb:
err_bd:
return -ENOMEM;
}
-EXPORT_SYMBOL(iwl_rx_queue_alloc);
-
void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
@@ -229,40 +226,397 @@ void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
memcpy(&priv->measure_report, report, sizeof(*report));
priv->measurement_status |= MEASUREMENT_READY;
}
-EXPORT_SYMBOL(iwl_rx_spectrum_measure_notif);
-void iwl_recover_from_statistics(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt)
+/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
+#define ACK_CNT_RATIO (50)
+#define BA_TIMEOUT_CNT (5)
+#define BA_TIMEOUT_MAX (16)
+
+/**
+ * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
+ *
+ * When the ACK count ratio is low and aggregated BA timeout retries exceeding
+ * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
+ * operation state.
+ */
+static bool iwl_good_ack_health(struct iwl_priv *priv, struct iwl_rx_packet *pkt)
{
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
- if (iwl_is_any_associated(priv)) {
- if (priv->cfg->ops->lib->check_ack_health) {
- if (!priv->cfg->ops->lib->check_ack_health(
- priv, pkt)) {
- /*
- * low ack count detected
- * restart Firmware
- */
- IWL_ERR(priv, "low ack count detected, "
- "restart firmware\n");
- if (!iwl_force_reset(priv, IWL_FW_RESET, false))
- return;
- }
+ int actual_delta, expected_delta, ba_timeout_delta;
+ struct statistics_tx *cur, *old;
+
+ if (priv->_agn.agg_tids_count)
+ return true;
+
+ if (iwl_bt_statistics(priv)) {
+ cur = &pkt->u.stats_bt.tx;
+ old = &priv->_agn.statistics_bt.tx;
+ } else {
+ cur = &pkt->u.stats.tx;
+ old = &priv->_agn.statistics.tx;
+ }
+
+ actual_delta = le32_to_cpu(cur->actual_ack_cnt) -
+ le32_to_cpu(old->actual_ack_cnt);
+ expected_delta = le32_to_cpu(cur->expected_ack_cnt) -
+ le32_to_cpu(old->expected_ack_cnt);
+
+ /* Values should not be negative, but we do not trust the firmware */
+ if (actual_delta <= 0 || expected_delta <= 0)
+ return true;
+
+ ba_timeout_delta = le32_to_cpu(cur->agg.ba_timeout) -
+ le32_to_cpu(old->agg.ba_timeout);
+
+ if ((actual_delta * 100 / expected_delta) < ACK_CNT_RATIO &&
+ ba_timeout_delta > BA_TIMEOUT_CNT) {
+ IWL_DEBUG_RADIO(priv, "deltas: actual %d expected %d ba_timeout %d\n",
+ actual_delta, expected_delta, ba_timeout_delta);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ /*
+ * This is ifdef'ed on DEBUGFS because otherwise the
+ * statistics aren't available. If DEBUGFS is set but
+ * DEBUG is not, these will just compile out.
+ */
+ IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta %d\n",
+ priv->_agn.delta_statistics.tx.rx_detected_cnt);
+ IWL_DEBUG_RADIO(priv,
+ "ack_or_ba_timeout_collision delta %d\n",
+ priv->_agn.delta_statistics.tx.ack_or_ba_timeout_collision);
+#endif
+
+ if (ba_timeout_delta >= BA_TIMEOUT_MAX)
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * iwl_good_plcp_health - checks for plcp error.
+ *
+ * When the plcp error is exceeding the thresholds, reset the radio
+ * to improve the throughput.
+ */
+static bool iwl_good_plcp_health(struct iwl_priv *priv, struct iwl_rx_packet *pkt)
+{
+ bool rc = true;
+ int combined_plcp_delta;
+ unsigned int plcp_msec;
+ unsigned long plcp_received_jiffies;
+
+ if (priv->cfg->base_params->plcp_delta_threshold ==
+ IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
+ IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
+ return rc;
+ }
+
+ /*
+ * check for plcp_err and trigger radio reset if it exceeds
+ * the plcp error threshold plcp_delta.
+ */
+ plcp_received_jiffies = jiffies;
+ plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
+ (long) priv->plcp_jiffies);
+ priv->plcp_jiffies = plcp_received_jiffies;
+ /*
+ * check to make sure plcp_msec is not 0 to prevent division
+ * by zero.
+ */
+ if (plcp_msec) {
+ struct statistics_rx_phy *ofdm;
+ struct statistics_rx_ht_phy *ofdm_ht;
+
+ if (iwl_bt_statistics(priv)) {
+ ofdm = &pkt->u.stats_bt.rx.ofdm;
+ ofdm_ht = &pkt->u.stats_bt.rx.ofdm_ht;
+ combined_plcp_delta =
+ (le32_to_cpu(ofdm->plcp_err) -
+ le32_to_cpu(priv->_agn.statistics_bt.
+ rx.ofdm.plcp_err)) +
+ (le32_to_cpu(ofdm_ht->plcp_err) -
+ le32_to_cpu(priv->_agn.statistics_bt.
+ rx.ofdm_ht.plcp_err));
+ } else {
+ ofdm = &pkt->u.stats.rx.ofdm;
+ ofdm_ht = &pkt->u.stats.rx.ofdm_ht;
+ combined_plcp_delta =
+ (le32_to_cpu(ofdm->plcp_err) -
+ le32_to_cpu(priv->_agn.statistics.
+ rx.ofdm.plcp_err)) +
+ (le32_to_cpu(ofdm_ht->plcp_err) -
+ le32_to_cpu(priv->_agn.statistics.
+ rx.ofdm_ht.plcp_err));
}
- if (priv->cfg->ops->lib->check_plcp_health) {
- if (!priv->cfg->ops->lib->check_plcp_health(
- priv, pkt)) {
- /*
- * high plcp error detected
- * reset Radio
- */
- iwl_force_reset(priv, IWL_RF_RESET, false);
- }
+
+ if ((combined_plcp_delta > 0) &&
+ ((combined_plcp_delta * 100) / plcp_msec) >
+ priv->cfg->base_params->plcp_delta_threshold) {
+ /*
+ * if plcp_err exceed the threshold,
+ * the following data is printed in csv format:
+ * Text: plcp_err exceeded %d,
+ * Received ofdm.plcp_err,
+ * Current ofdm.plcp_err,
+ * Received ofdm_ht.plcp_err,
+ * Current ofdm_ht.plcp_err,
+ * combined_plcp_delta,
+ * plcp_msec
+ */
+ IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
+ "%u, %u, %u, %u, %d, %u mSecs\n",
+ priv->cfg->base_params->plcp_delta_threshold,
+ le32_to_cpu(ofdm->plcp_err),
+ le32_to_cpu(ofdm->plcp_err),
+ le32_to_cpu(ofdm_ht->plcp_err),
+ le32_to_cpu(ofdm_ht->plcp_err),
+ combined_plcp_delta, plcp_msec);
+
+ rc = false;
}
}
+ return rc;
+}
+
+static void iwl_recover_from_statistics(struct iwl_priv *priv, struct iwl_rx_packet *pkt)
+{
+ const struct iwl_mod_params *mod_params = priv->cfg->mod_params;
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
+ !iwl_is_any_associated(priv))
+ return;
+
+ if (mod_params->ack_check && !iwl_good_ack_health(priv, pkt)) {
+ IWL_ERR(priv, "low ack count detected, restart firmware\n");
+ if (!iwl_force_reset(priv, IWL_FW_RESET, false))
+ return;
+ }
+
+ if (mod_params->plcp_check && !iwl_good_plcp_health(priv, pkt))
+ iwl_force_reset(priv, IWL_RF_RESET, false);
+}
+
+/* Calculate noise level, based on measurements during network silence just
+ * before arriving beacon. This measurement can be done only if we know
+ * exactly when to expect beacons, therefore only when we're associated. */
+static void iwl_rx_calc_noise(struct iwl_priv *priv)
+{
+ struct statistics_rx_non_phy *rx_info;
+ int num_active_rx = 0;
+ int total_silence = 0;
+ int bcn_silence_a, bcn_silence_b, bcn_silence_c;
+ int last_rx_noise;
+
+ if (iwl_bt_statistics(priv))
+ rx_info = &(priv->_agn.statistics_bt.rx.general.common);
+ else
+ rx_info = &(priv->_agn.statistics.rx.general);
+ bcn_silence_a =
+ le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
+ bcn_silence_b =
+ le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
+ bcn_silence_c =
+ le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
+
+ if (bcn_silence_a) {
+ total_silence += bcn_silence_a;
+ num_active_rx++;
+ }
+ if (bcn_silence_b) {
+ total_silence += bcn_silence_b;
+ num_active_rx++;
+ }
+ if (bcn_silence_c) {
+ total_silence += bcn_silence_c;
+ num_active_rx++;
+ }
+
+ /* Average among active antennas */
+ if (num_active_rx)
+ last_rx_noise = (total_silence / num_active_rx) - 107;
+ else
+ last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
+
+ IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
+ bcn_silence_a, bcn_silence_b, bcn_silence_c,
+ last_rx_noise);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+/*
+ * based on the assumption of all statistics counter are in DWORD
+ * FIXME: This function is for debugging, do not deal with
+ * the case of counters roll-over.
+ */
+static void iwl_accumulative_statistics(struct iwl_priv *priv,
+ __le32 *stats)
+{
+ int i, size;
+ __le32 *prev_stats;
+ u32 *accum_stats;
+ u32 *delta, *max_delta;
+ struct statistics_general_common *general, *accum_general;
+ struct statistics_tx *tx, *accum_tx;
+
+ if (iwl_bt_statistics(priv)) {
+ prev_stats = (__le32 *)&priv->_agn.statistics_bt;
+ accum_stats = (u32 *)&priv->_agn.accum_statistics_bt;
+ size = sizeof(struct iwl_bt_notif_statistics);
+ general = &priv->_agn.statistics_bt.general.common;
+ accum_general = &priv->_agn.accum_statistics_bt.general.common;
+ tx = &priv->_agn.statistics_bt.tx;
+ accum_tx = &priv->_agn.accum_statistics_bt.tx;
+ delta = (u32 *)&priv->_agn.delta_statistics_bt;
+ max_delta = (u32 *)&priv->_agn.max_delta_bt;
+ } else {
+ prev_stats = (__le32 *)&priv->_agn.statistics;
+ accum_stats = (u32 *)&priv->_agn.accum_statistics;
+ size = sizeof(struct iwl_notif_statistics);
+ general = &priv->_agn.statistics.general.common;
+ accum_general = &priv->_agn.accum_statistics.general.common;
+ tx = &priv->_agn.statistics.tx;
+ accum_tx = &priv->_agn.accum_statistics.tx;
+ delta = (u32 *)&priv->_agn.delta_statistics;
+ max_delta = (u32 *)&priv->_agn.max_delta;
+ }
+ for (i = sizeof(__le32); i < size;
+ i += sizeof(__le32), stats++, prev_stats++, delta++,
+ max_delta++, accum_stats++) {
+ if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
+ *delta = (le32_to_cpu(*stats) -
+ le32_to_cpu(*prev_stats));
+ *accum_stats += *delta;
+ if (*delta > *max_delta)
+ *max_delta = *delta;
+ }
+ }
+
+ /* reset accumulative statistics for "no-counter" type statistics */
+ accum_general->temperature = general->temperature;
+ accum_general->temperature_m = general->temperature_m;
+ accum_general->ttl_timestamp = general->ttl_timestamp;
+ accum_tx->tx_power.ant_a = tx->tx_power.ant_a;
+ accum_tx->tx_power.ant_b = tx->tx_power.ant_b;
+ accum_tx->tx_power.ant_c = tx->tx_power.ant_c;
+}
+#endif
+
+#define REG_RECALIB_PERIOD (60)
+
+void iwl_rx_statistics(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ int change;
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+ if (iwl_bt_statistics(priv)) {
+ IWL_DEBUG_RX(priv,
+ "Statistics notification received (%d vs %d).\n",
+ (int)sizeof(struct iwl_bt_notif_statistics),
+ le32_to_cpu(pkt->len_n_flags) &
+ FH_RSCSR_FRAME_SIZE_MSK);
+
+ change = ((priv->_agn.statistics_bt.general.common.temperature !=
+ pkt->u.stats_bt.general.common.temperature) ||
+ ((priv->_agn.statistics_bt.flag &
+ STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
+ (pkt->u.stats_bt.flag &
+ STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats_bt);
+#endif
+
+ } else {
+ IWL_DEBUG_RX(priv,
+ "Statistics notification received (%d vs %d).\n",
+ (int)sizeof(struct iwl_notif_statistics),
+ le32_to_cpu(pkt->len_n_flags) &
+ FH_RSCSR_FRAME_SIZE_MSK);
+
+ change = ((priv->_agn.statistics.general.common.temperature !=
+ pkt->u.stats.general.common.temperature) ||
+ ((priv->_agn.statistics.flag &
+ STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
+ (pkt->u.stats.flag &
+ STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
+#endif
+
+ }
+
+ iwl_recover_from_statistics(priv, pkt);
+
+ if (iwl_bt_statistics(priv))
+ memcpy(&priv->_agn.statistics_bt, &pkt->u.stats_bt,
+ sizeof(priv->_agn.statistics_bt));
+ else
+ memcpy(&priv->_agn.statistics, &pkt->u.stats,
+ sizeof(priv->_agn.statistics));
+
+ set_bit(STATUS_STATISTICS, &priv->status);
+
+ /* Reschedule the statistics timer to occur in
+ * REG_RECALIB_PERIOD seconds to ensure we get a
+ * thermal update even if the uCode doesn't give
+ * us one */
+ mod_timer(&priv->statistics_periodic, jiffies +
+ msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
+
+ if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
+ (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
+ iwl_rx_calc_noise(priv);
+ queue_work(priv->workqueue, &priv->run_time_calib_work);
+ }
+ if (priv->cfg->ops->lib->temp_ops.temperature && change)
+ priv->cfg->ops->lib->temp_ops.temperature(priv);
+}
+
+void iwl_reply_statistics(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+ if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ memset(&priv->_agn.accum_statistics, 0,
+ sizeof(struct iwl_notif_statistics));
+ memset(&priv->_agn.delta_statistics, 0,
+ sizeof(struct iwl_notif_statistics));
+ memset(&priv->_agn.max_delta, 0,
+ sizeof(struct iwl_notif_statistics));
+ memset(&priv->_agn.accum_statistics_bt, 0,
+ sizeof(struct iwl_bt_notif_statistics));
+ memset(&priv->_agn.delta_statistics_bt, 0,
+ sizeof(struct iwl_bt_notif_statistics));
+ memset(&priv->_agn.max_delta_bt, 0,
+ sizeof(struct iwl_bt_notif_statistics));
+#endif
+ IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
+ }
+ iwl_rx_statistics(priv, rxb);
+}
+
+void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_missed_beacon_notif *missed_beacon;
+
+ missed_beacon = &pkt->u.missed_beacon;
+ if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
+ priv->missed_beacon_threshold) {
+ IWL_DEBUG_CALIB(priv,
+ "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
+ le32_to_cpu(missed_beacon->consecutive_missed_beacons),
+ le32_to_cpu(missed_beacon->total_missed_becons),
+ le32_to_cpu(missed_beacon->num_recvd_beacons),
+ le32_to_cpu(missed_beacon->num_expected_beacons));
+ if (!test_bit(STATUS_SCANNING, &priv->status))
+ iwl_init_sensitivity(priv);
+ }
}
-EXPORT_SYMBOL(iwl_recover_from_statistics);
/*
* returns non-zero if packet should be dropped
@@ -315,4 +669,3 @@ int iwl_set_decrypted_flag(struct iwl_priv *priv,
}
return 0;
}
-EXPORT_SYMBOL(iwl_set_decrypted_flag);
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 12d9363d0afe..faa6d34cb658 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -155,7 +155,6 @@ int iwl_scan_cancel(struct iwl_priv *priv)
queue_work(priv->workqueue, &priv->abort_scan);
return 0;
}
-EXPORT_SYMBOL(iwl_scan_cancel);
/**
* iwl_scan_cancel_timeout - Cancel any currently executing HW scan
@@ -180,7 +179,6 @@ int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
return test_bit(STATUS_SCAN_HW, &priv->status);
}
-EXPORT_SYMBOL(iwl_scan_cancel_timeout);
/* Service response to REPLY_SCAN_CMD (0x80) */
static void iwl_rx_reply_scan(struct iwl_priv *priv,
@@ -257,8 +255,7 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
queue_work(priv->workqueue, &priv->scan_completed);
if (priv->iw_mode != NL80211_IFTYPE_ADHOC &&
- priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist &&
+ iwl_advanced_bt_coexist(priv) &&
priv->bt_status != scan_notif->bt_status) {
if (scan_notif->bt_status) {
/* BT on */
@@ -289,7 +286,6 @@ void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
iwl_rx_scan_complete_notif;
}
-EXPORT_SYMBOL(iwl_setup_rx_scan_handlers);
inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
enum ieee80211_band band,
@@ -302,7 +298,6 @@ inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
return IWL_ACTIVE_DWELL_TIME_24 +
IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
}
-EXPORT_SYMBOL(iwl_get_active_dwell_time);
u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
enum ieee80211_band band,
@@ -334,7 +329,6 @@ u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
return passive;
}
-EXPORT_SYMBOL(iwl_get_passive_dwell_time);
void iwl_init_scan_params(struct iwl_priv *priv)
{
@@ -344,7 +338,6 @@ void iwl_init_scan_params(struct iwl_priv *priv)
if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
}
-EXPORT_SYMBOL(iwl_init_scan_params);
static int __must_check iwl_scan_initiate(struct iwl_priv *priv,
struct ieee80211_vif *vif,
@@ -440,7 +433,6 @@ out_unlock:
return ret;
}
-EXPORT_SYMBOL(iwl_mac_hw_scan);
/*
* internal short scan, this function should only been called while associated.
@@ -537,7 +529,6 @@ u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
return (u16)len;
}
-EXPORT_SYMBOL(iwl_fill_probe_req);
static void iwl_bg_abort_scan(struct work_struct *work)
{
@@ -622,7 +613,6 @@ void iwl_setup_scan_deferred_work(struct iwl_priv *priv)
INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan);
INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
}
-EXPORT_SYMBOL(iwl_setup_scan_deferred_work);
void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
{
@@ -636,4 +626,3 @@ void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
mutex_unlock(&priv->mutex);
}
}
-EXPORT_SYMBOL(iwl_cancel_scan_deferred_work);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 49493d176515..bc90a12408a3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -169,7 +169,6 @@ int iwl_send_add_sta(struct iwl_priv *priv,
return ret;
}
-EXPORT_SYMBOL(iwl_send_add_sta);
static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
struct ieee80211_sta *sta,
@@ -316,7 +315,6 @@ u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
return sta_id;
}
-EXPORT_SYMBOL_GPL(iwl_prep_station);
#define STA_WAIT_TIMEOUT (HZ/2)
@@ -379,7 +377,6 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
*sta_id_r = sta_id;
return ret;
}
-EXPORT_SYMBOL(iwl_add_station_common);
/**
* iwl_sta_ucode_deactivate - deactivate ucode status for a station
@@ -513,7 +510,6 @@ out_err:
spin_unlock_irqrestore(&priv->sta_lock, flags);
return -EINVAL;
}
-EXPORT_SYMBOL_GPL(iwl_remove_station);
/**
* iwl_clear_ucode_stations - clear ucode station table bits
@@ -548,7 +544,6 @@ void iwl_clear_ucode_stations(struct iwl_priv *priv,
if (!cleared)
IWL_DEBUG_INFO(priv, "No active stations found to be cleared\n");
}
-EXPORT_SYMBOL(iwl_clear_ucode_stations);
/**
* iwl_restore_stations() - Restore driver known stations to device
@@ -625,7 +620,6 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
else
IWL_DEBUG_INFO(priv, "Restoring all known stations .... complete.\n");
}
-EXPORT_SYMBOL(iwl_restore_stations);
void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
{
@@ -668,7 +662,6 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
priv->stations[sta_id].sta.sta.addr, ret);
iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
}
-EXPORT_SYMBOL(iwl_reprogram_ap_sta);
int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
{
@@ -680,7 +673,6 @@ int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
return WEP_INVALID_OFFSET;
}
-EXPORT_SYMBOL(iwl_get_free_ucode_key_index);
void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
{
@@ -700,7 +692,6 @@ void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
}
spin_unlock_irqrestore(&priv->sta_lock, flags);
}
-EXPORT_SYMBOL_GPL(iwl_dealloc_bcast_stations);
#ifdef CONFIG_IWLWIFI_DEBUG
static void iwl_dump_lq_cmd(struct iwl_priv *priv,
@@ -810,7 +801,6 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
}
return ret;
}
-EXPORT_SYMBOL(iwl_send_lq_cmd);
int iwl_mac_sta_remove(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -832,4 +822,3 @@ int iwl_mac_sta_remove(struct ieee80211_hw *hw,
mutex_unlock(&priv->mutex);
return ret;
}
-EXPORT_SYMBOL(iwl_mac_sta_remove);
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 073b6ce6141c..277c9175dcf6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -84,7 +84,23 @@ void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
}
txq->need_update = 0;
}
-EXPORT_SYMBOL(iwl_txq_update_write_ptr);
+
+/**
+ * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
+ */
+void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
+{
+ struct iwl_tx_queue *txq = &priv->txq[txq_id];
+ struct iwl_queue *q = &txq->q;
+
+ if (q->n_bd == 0)
+ return;
+
+ while (q->write_ptr != q->read_ptr) {
+ priv->cfg->ops->lib->txq_free_tfd(priv, txq);
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
+ }
+}
/**
* iwl_tx_queue_free - Deallocate DMA queue.
@@ -97,17 +113,10 @@ EXPORT_SYMBOL(iwl_txq_update_write_ptr);
void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
{
struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct iwl_queue *q = &txq->q;
struct device *dev = &priv->pci_dev->dev;
int i;
- if (q->n_bd == 0)
- return;
-
- /* first, empty all BD's */
- for (; q->write_ptr != q->read_ptr;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
- priv->cfg->ops->lib->txq_free_tfd(priv, txq);
+ iwl_tx_queue_unmap(priv, txq_id);
/* De-alloc array of command/tx buffers */
for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
@@ -131,42 +140,35 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
/* 0-fill queue descriptor structure */
memset(txq, 0, sizeof(*txq));
}
-EXPORT_SYMBOL(iwl_tx_queue_free);
/**
- * iwl_cmd_queue_free - Deallocate DMA queue.
- * @txq: Transmit queue to deallocate.
- *
- * Empty queue by removing and destroying all BD's.
- * Free all buffers.
- * 0-fill, but do not free "txq" descriptor structure.
+ * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
*/
-void iwl_cmd_queue_free(struct iwl_priv *priv)
+void iwl_cmd_queue_unmap(struct iwl_priv *priv)
{
struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
struct iwl_queue *q = &txq->q;
- struct device *dev = &priv->pci_dev->dev;
int i;
bool huge = false;
if (q->n_bd == 0)
return;
- for (; q->read_ptr != q->write_ptr;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+ while (q->read_ptr != q->write_ptr) {
/* we have no way to tell if it is a huge cmd ATM */
i = get_cmd_index(q, q->read_ptr, 0);
- if (txq->meta[i].flags & CMD_SIZE_HUGE) {
+ if (txq->meta[i].flags & CMD_SIZE_HUGE)
huge = true;
- continue;
- }
+ else
+ pci_unmap_single(priv->pci_dev,
+ dma_unmap_addr(&txq->meta[i], mapping),
+ dma_unmap_len(&txq->meta[i], len),
+ PCI_DMA_BIDIRECTIONAL);
- pci_unmap_single(priv->pci_dev,
- dma_unmap_addr(&txq->meta[i], mapping),
- dma_unmap_len(&txq->meta[i], len),
- PCI_DMA_BIDIRECTIONAL);
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
}
+
if (huge) {
i = q->n_window;
pci_unmap_single(priv->pci_dev,
@@ -174,6 +176,23 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
dma_unmap_len(&txq->meta[i], len),
PCI_DMA_BIDIRECTIONAL);
}
+}
+
+/**
+ * iwl_cmd_queue_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+void iwl_cmd_queue_free(struct iwl_priv *priv)
+{
+ struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
+ struct device *dev = &priv->pci_dev->dev;
+ int i;
+
+ iwl_cmd_queue_unmap(priv);
/* De-alloc array of command/tx buffers */
for (i = 0; i <= TFD_CMD_SLOTS; i++)
@@ -193,7 +212,6 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
/* 0-fill queue descriptor structure */
memset(txq, 0, sizeof(*txq));
}
-EXPORT_SYMBOL(iwl_cmd_queue_free);
/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
* DMA services
@@ -233,7 +251,6 @@ int iwl_queue_space(const struct iwl_queue *q)
s = 0;
return s;
}
-EXPORT_SYMBOL(iwl_queue_space);
/**
@@ -384,7 +401,6 @@ out_free_arrays:
return -ENOMEM;
}
-EXPORT_SYMBOL(iwl_tx_queue_init);
void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
int slots_num, u32 txq_id)
@@ -404,7 +420,6 @@ void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
/* Tell device where to find queue */
priv->cfg->ops->lib->txq_init(priv, txq);
}
-EXPORT_SYMBOL(iwl_tx_queue_reset);
/*************** HOST COMMAND QUEUE FUNCTIONS *****/
@@ -641,4 +656,3 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
}
meta->flags = 0;
}
-EXPORT_SYMBOL(iwl_tx_cmd_complete);
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
index 5a4982271e96..ed57e4402800 100644
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
@@ -287,7 +287,8 @@ int iwm_cfg80211_inform_bss(struct iwm_priv *iwm)
return -EINVAL;
}
- freq = ieee80211_channel_to_frequency(umac_bss->channel);
+ freq = ieee80211_channel_to_frequency(umac_bss->channel,
+ band->band);
channel = ieee80211_get_channel(wiphy, freq);
signal = umac_bss->rssi * 100;
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index a944893ae3ca..9a57cf6a488f 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -543,7 +543,10 @@ static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
switch (le32_to_cpu(complete->status)) {
case UMAC_ASSOC_COMPLETE_SUCCESS:
chan = ieee80211_get_channel(wiphy,
- ieee80211_channel_to_frequency(complete->channel));
+ ieee80211_channel_to_frequency(complete->channel,
+ complete->band == UMAC_BAND_2GHZ ?
+ IEEE80211_BAND_2GHZ :
+ IEEE80211_BAND_5GHZ));
if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) {
/* Associated to a unallowed channel, disassociate. */
__iwm_invalidate_mlme_profile(iwm);
@@ -841,7 +844,7 @@ static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
goto err;
}
- freq = ieee80211_channel_to_frequency(umac_bss->channel);
+ freq = ieee80211_channel_to_frequency(umac_bss->channel, band->band);
channel = ieee80211_get_channel(wiphy, freq);
signal = umac_bss->rssi * 100;
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 698a1f7694ed..30ef0351bfc4 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -607,7 +607,8 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
/* No channel, no luck */
if (chan_no != -1) {
struct wiphy *wiphy = priv->wdev->wiphy;
- int freq = ieee80211_channel_to_frequency(chan_no);
+ int freq = ieee80211_channel_to_frequency(chan_no,
+ IEEE80211_BAND_2GHZ);
struct ieee80211_channel *channel =
ieee80211_get_channel(wiphy, freq);
@@ -1597,7 +1598,8 @@ static int lbs_get_survey(struct wiphy *wiphy, struct net_device *dev,
lbs_deb_enter(LBS_DEB_CFG80211);
survey->channel = ieee80211_get_channel(wiphy,
- ieee80211_channel_to_frequency(priv->channel));
+ ieee80211_channel_to_frequency(priv->channel,
+ IEEE80211_BAND_2GHZ));
ret = lbs_get_rssi(priv, &signal, &noise);
if (ret == 0) {
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 78c4da150a74..7e8a658b7670 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -145,9 +145,13 @@ int lbs_update_hw_spec(struct lbs_private *priv)
if (priv->current_addr[0] == 0xff)
memmove(priv->current_addr, cmd.permanentaddr, ETH_ALEN);
- memcpy(priv->dev->dev_addr, priv->current_addr, ETH_ALEN);
- if (priv->mesh_dev)
- memcpy(priv->mesh_dev->dev_addr, priv->current_addr, ETH_ALEN);
+ if (!priv->copied_hwaddr) {
+ memcpy(priv->dev->dev_addr, priv->current_addr, ETH_ALEN);
+ if (priv->mesh_dev)
+ memcpy(priv->mesh_dev->dev_addr,
+ priv->current_addr, ETH_ALEN);
+ priv->copied_hwaddr = 1;
+ }
out:
lbs_deb_leave(LBS_DEB_CMD);
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index 18dd9a02c459..bc461eb39660 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -90,6 +90,7 @@ struct lbs_private {
void *card;
u8 fw_ready;
u8 surpriseremoved;
+ u8 setup_fw_on_resume;
int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb);
void (*reset_card) (struct lbs_private *priv);
int (*enter_deep_sleep) (struct lbs_private *priv);
@@ -101,6 +102,7 @@ struct lbs_private {
u32 fwcapinfo;
u16 regioncode;
u8 current_addr[ETH_ALEN];
+ u8 copied_hwaddr;
/* Command download */
u8 dnld_sent;
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 00600239a053..f6c2cd665f49 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -20,10 +20,8 @@
#include <linux/moduleparam.h>
#include <linux/firmware.h>
#include <linux/jiffies.h>
-#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/netdevice.h>
-#include <linux/semaphore.h>
#include <linux/slab.h>
#include <linux/spi/libertas_spi.h>
#include <linux/spi/spi.h>
@@ -34,6 +32,12 @@
#include "dev.h"
#include "if_spi.h"
+struct if_spi_packet {
+ struct list_head list;
+ u16 blen;
+ u8 buffer[0] __attribute__((aligned(4)));
+};
+
struct if_spi_card {
struct spi_device *spi;
struct lbs_private *priv;
@@ -51,18 +55,36 @@ struct if_spi_card {
unsigned long spu_reg_delay;
/* Handles all SPI communication (except for FW load) */
- struct task_struct *spi_thread;
- int run_thread;
-
- /* Used to wake up the spi_thread */
- struct semaphore spi_ready;
- struct semaphore spi_thread_terminated;
+ struct workqueue_struct *workqueue;
+ struct work_struct packet_work;
u8 cmd_buffer[IF_SPI_CMD_BUF_SIZE];
+
+ /* A buffer of incoming packets from libertas core.
+ * Since we can't sleep in hw_host_to_card, we have to buffer
+ * them. */
+ struct list_head cmd_packet_list;
+ struct list_head data_packet_list;
+
+ /* Protects cmd_packet_list and data_packet_list */
+ spinlock_t buffer_lock;
};
static void free_if_spi_card(struct if_spi_card *card)
{
+ struct list_head *cursor, *next;
+ struct if_spi_packet *packet;
+
+ list_for_each_safe(cursor, next, &card->cmd_packet_list) {
+ packet = container_of(cursor, struct if_spi_packet, list);
+ list_del(&packet->list);
+ kfree(packet);
+ }
+ list_for_each_safe(cursor, next, &card->data_packet_list) {
+ packet = container_of(cursor, struct if_spi_packet, list);
+ list_del(&packet->list);
+ kfree(packet);
+ }
spi_set_drvdata(card->spi, NULL);
kfree(card);
}
@@ -622,7 +644,7 @@ out:
/*
* SPI Transfer Thread
*
- * The SPI thread handles all SPI transfers, so there is no need for a lock.
+ * The SPI worker handles all SPI transfers, so there is no need for a lock.
*/
/* Move a command from the card to the host */
@@ -742,6 +764,40 @@ out:
return err;
}
+/* Move data or a command from the host to the card. */
+static void if_spi_h2c(struct if_spi_card *card,
+ struct if_spi_packet *packet, int type)
+{
+ int err = 0;
+ u16 int_type, port_reg;
+
+ switch (type) {
+ case MVMS_DAT:
+ int_type = IF_SPI_CIC_TX_DOWNLOAD_OVER;
+ port_reg = IF_SPI_DATA_RDWRPORT_REG;
+ break;
+ case MVMS_CMD:
+ int_type = IF_SPI_CIC_CMD_DOWNLOAD_OVER;
+ port_reg = IF_SPI_CMD_RDWRPORT_REG;
+ break;
+ default:
+ lbs_pr_err("can't transfer buffer of type %d\n", type);
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* Write the data to the card */
+ err = spu_write(card, port_reg, packet->buffer, packet->blen);
+ if (err)
+ goto out;
+
+out:
+ kfree(packet);
+
+ if (err)
+ lbs_pr_err("%s: error %d\n", __func__, err);
+}
+
/* Inform the host about a card event */
static void if_spi_e2h(struct if_spi_card *card)
{
@@ -766,71 +822,88 @@ out:
lbs_pr_err("%s: error %d\n", __func__, err);
}
-static int lbs_spi_thread(void *data)
+static void if_spi_host_to_card_worker(struct work_struct *work)
{
int err;
- struct if_spi_card *card = data;
+ struct if_spi_card *card;
u16 hiStatus;
+ unsigned long flags;
+ struct if_spi_packet *packet;
- while (1) {
- /* Wait to be woken up by one of two things. First, our ISR
- * could tell us that something happened on the WLAN.
- * Secondly, libertas could call hw_host_to_card with more
- * data, which we might be able to send.
- */
- do {
- err = down_interruptible(&card->spi_ready);
- if (!card->run_thread) {
- up(&card->spi_thread_terminated);
- do_exit(0);
- }
- } while (err == -EINTR);
+ card = container_of(work, struct if_spi_card, packet_work);
- /* Read the host interrupt status register to see what we
- * can do. */
- err = spu_read_u16(card, IF_SPI_HOST_INT_STATUS_REG,
- &hiStatus);
- if (err) {
- lbs_pr_err("I/O error\n");
+ lbs_deb_enter(LBS_DEB_SPI);
+
+ /* Read the host interrupt status register to see what we
+ * can do. */
+ err = spu_read_u16(card, IF_SPI_HOST_INT_STATUS_REG,
+ &hiStatus);
+ if (err) {
+ lbs_pr_err("I/O error\n");
+ goto err;
+ }
+
+ if (hiStatus & IF_SPI_HIST_CMD_UPLOAD_RDY) {
+ err = if_spi_c2h_cmd(card);
+ if (err)
goto err;
- }
+ }
+ if (hiStatus & IF_SPI_HIST_RX_UPLOAD_RDY) {
+ err = if_spi_c2h_data(card);
+ if (err)
+ goto err;
+ }
- if (hiStatus & IF_SPI_HIST_CMD_UPLOAD_RDY) {
- err = if_spi_c2h_cmd(card);
- if (err)
- goto err;
- }
- if (hiStatus & IF_SPI_HIST_RX_UPLOAD_RDY) {
- err = if_spi_c2h_data(card);
- if (err)
- goto err;
+ /* workaround: in PS mode, the card does not set the Command
+ * Download Ready bit, but it sets TX Download Ready. */
+ if (hiStatus & IF_SPI_HIST_CMD_DOWNLOAD_RDY ||
+ (card->priv->psstate != PS_STATE_FULL_POWER &&
+ (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY))) {
+ /* This means two things. First of all,
+ * if there was a previous command sent, the card has
+ * successfully received it.
+ * Secondly, it is now ready to download another
+ * command.
+ */
+ lbs_host_to_card_done(card->priv);
+
+ /* Do we have any command packets from the host to
+ * send? */
+ packet = NULL;
+ spin_lock_irqsave(&card->buffer_lock, flags);
+ if (!list_empty(&card->cmd_packet_list)) {
+ packet = (struct if_spi_packet *)(card->
+ cmd_packet_list.next);
+ list_del(&packet->list);
}
+ spin_unlock_irqrestore(&card->buffer_lock, flags);
- /* workaround: in PS mode, the card does not set the Command
- * Download Ready bit, but it sets TX Download Ready. */
- if (hiStatus & IF_SPI_HIST_CMD_DOWNLOAD_RDY ||
- (card->priv->psstate != PS_STATE_FULL_POWER &&
- (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY))) {
- lbs_host_to_card_done(card->priv);
+ if (packet)
+ if_spi_h2c(card, packet, MVMS_CMD);
+ }
+ if (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY) {
+ /* Do we have any data packets from the host to
+ * send? */
+ packet = NULL;
+ spin_lock_irqsave(&card->buffer_lock, flags);
+ if (!list_empty(&card->data_packet_list)) {
+ packet = (struct if_spi_packet *)(card->
+ data_packet_list.next);
+ list_del(&packet->list);
}
+ spin_unlock_irqrestore(&card->buffer_lock, flags);
- if (hiStatus & IF_SPI_HIST_CARD_EVENT)
- if_spi_e2h(card);
+ if (packet)
+ if_spi_h2c(card, packet, MVMS_DAT);
+ }
+ if (hiStatus & IF_SPI_HIST_CARD_EVENT)
+ if_spi_e2h(card);
err:
- if (err)
- lbs_pr_err("%s: got error %d\n", __func__, err);
- }
-}
+ if (err)
+ lbs_pr_err("%s: got error %d\n", __func__, err);
-/* Block until lbs_spi_thread thread has terminated */
-static void if_spi_terminate_spi_thread(struct if_spi_card *card)
-{
- /* It would be nice to use kthread_stop here, but that function
- * can't wake threads waiting for a semaphore. */
- card->run_thread = 0;
- up(&card->spi_ready);
- down(&card->spi_thread_terminated);
+ lbs_deb_leave(LBS_DEB_SPI);
}
/*
@@ -842,18 +915,40 @@ static int if_spi_host_to_card(struct lbs_private *priv,
u8 type, u8 *buf, u16 nb)
{
int err = 0;
+ unsigned long flags;
struct if_spi_card *card = priv->card;
+ struct if_spi_packet *packet;
+ u16 blen;
lbs_deb_enter_args(LBS_DEB_SPI, "type %d, bytes %d", type, nb);
- nb = ALIGN(nb, 4);
+ if (nb == 0) {
+ lbs_pr_err("%s: invalid size requested: %d\n", __func__, nb);
+ err = -EINVAL;
+ goto out;
+ }
+ blen = ALIGN(nb, 4);
+ packet = kzalloc(sizeof(struct if_spi_packet) + blen, GFP_ATOMIC);
+ if (!packet) {
+ err = -ENOMEM;
+ goto out;
+ }
+ packet->blen = blen;
+ memcpy(packet->buffer, buf, nb);
+ memset(packet->buffer + nb, 0, blen - nb);
switch (type) {
case MVMS_CMD:
- err = spu_write(card, IF_SPI_CMD_RDWRPORT_REG, buf, nb);
+ priv->dnld_sent = DNLD_CMD_SENT;
+ spin_lock_irqsave(&card->buffer_lock, flags);
+ list_add_tail(&packet->list, &card->cmd_packet_list);
+ spin_unlock_irqrestore(&card->buffer_lock, flags);
break;
case MVMS_DAT:
- err = spu_write(card, IF_SPI_DATA_RDWRPORT_REG, buf, nb);
+ priv->dnld_sent = DNLD_DATA_SENT;
+ spin_lock_irqsave(&card->buffer_lock, flags);
+ list_add_tail(&packet->list, &card->data_packet_list);
+ spin_unlock_irqrestore(&card->buffer_lock, flags);
break;
default:
lbs_pr_err("can't transfer buffer of type %d", type);
@@ -861,6 +956,9 @@ static int if_spi_host_to_card(struct lbs_private *priv,
break;
}
+ /* Queue spi xfer work */
+ queue_work(card->workqueue, &card->packet_work);
+out:
lbs_deb_leave_args(LBS_DEB_SPI, "err=%d", err);
return err;
}
@@ -869,13 +967,14 @@ static int if_spi_host_to_card(struct lbs_private *priv,
* Host Interrupts
*
* Service incoming interrupts from the WLAN device. We can't sleep here, so
- * don't try to talk on the SPI bus, just wake up the SPI thread.
+ * don't try to talk on the SPI bus, just queue the SPI xfer work.
*/
static irqreturn_t if_spi_host_interrupt(int irq, void *dev_id)
{
struct if_spi_card *card = dev_id;
- up(&card->spi_ready);
+ queue_work(card->workqueue, &card->packet_work);
+
return IRQ_HANDLED;
}
@@ -883,56 +982,26 @@ static irqreturn_t if_spi_host_interrupt(int irq, void *dev_id)
* SPI callbacks
*/
-static int __devinit if_spi_probe(struct spi_device *spi)
+static int if_spi_init_card(struct if_spi_card *card)
{
- struct if_spi_card *card;
- struct lbs_private *priv = NULL;
- struct libertas_spi_platform_data *pdata = spi->dev.platform_data;
- int err = 0, i;
+ struct spi_device *spi = card->spi;
+ int err, i;
u32 scratch;
- struct sched_param param = { .sched_priority = 1 };
const struct firmware *helper = NULL;
const struct firmware *mainfw = NULL;
lbs_deb_enter(LBS_DEB_SPI);
- if (!pdata) {
- err = -EINVAL;
- goto out;
- }
-
- if (pdata->setup) {
- err = pdata->setup(spi);
- if (err)
- goto out;
- }
-
- /* Allocate card structure to represent this specific device */
- card = kzalloc(sizeof(struct if_spi_card), GFP_KERNEL);
- if (!card) {
- err = -ENOMEM;
- goto out;
- }
- spi_set_drvdata(spi, card);
- card->pdata = pdata;
- card->spi = spi;
- card->prev_xfer_time = jiffies;
-
- sema_init(&card->spi_ready, 0);
- sema_init(&card->spi_thread_terminated, 0);
-
- /* Initialize the SPI Interface Unit */
- err = spu_init(card, pdata->use_dummy_writes);
+ err = spu_init(card, card->pdata->use_dummy_writes);
if (err)
- goto free_card;
+ goto out;
err = spu_get_chip_revision(card, &card->card_id, &card->card_rev);
if (err)
- goto free_card;
+ goto out;
- /* Firmware load */
err = spu_read_u32(card, IF_SPI_SCRATCH_4_REG, &scratch);
if (err)
- goto free_card;
+ goto out;
if (scratch == SUCCESSFUL_FW_DOWNLOAD_MAGIC)
lbs_deb_spi("Firmware is already loaded for "
"Marvell WLAN 802.11 adapter\n");
@@ -946,7 +1015,7 @@ static int __devinit if_spi_probe(struct spi_device *spi)
lbs_pr_err("Unsupported chip_id: 0x%02x\n",
card->card_id);
err = -ENODEV;
- goto free_card;
+ goto out;
}
err = lbs_get_firmware(&card->spi->dev, NULL, NULL,
@@ -954,7 +1023,7 @@ static int __devinit if_spi_probe(struct spi_device *spi)
&mainfw);
if (err) {
lbs_pr_err("failed to find firmware (%d)\n", err);
- goto free_card;
+ goto out;
}
lbs_deb_spi("Initializing FW for Marvell WLAN 802.11 adapter "
@@ -966,15 +1035,68 @@ static int __devinit if_spi_probe(struct spi_device *spi)
spi->max_speed_hz);
err = if_spi_prog_helper_firmware(card, helper);
if (err)
- goto free_card;
+ goto out;
err = if_spi_prog_main_firmware(card, mainfw);
if (err)
- goto free_card;
+ goto out;
lbs_deb_spi("loaded FW for Marvell WLAN 802.11 adapter\n");
}
err = spu_set_interrupt_mode(card, 0, 1);
if (err)
+ goto out;
+
+out:
+ if (helper)
+ release_firmware(helper);
+ if (mainfw)
+ release_firmware(mainfw);
+
+ lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err);
+
+ return err;
+}
+
+static int __devinit if_spi_probe(struct spi_device *spi)
+{
+ struct if_spi_card *card;
+ struct lbs_private *priv = NULL;
+ struct libertas_spi_platform_data *pdata = spi->dev.platform_data;
+ int err = 0;
+
+ lbs_deb_enter(LBS_DEB_SPI);
+
+ if (!pdata) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (pdata->setup) {
+ err = pdata->setup(spi);
+ if (err)
+ goto out;
+ }
+
+ /* Allocate card structure to represent this specific device */
+ card = kzalloc(sizeof(struct if_spi_card), GFP_KERNEL);
+ if (!card) {
+ err = -ENOMEM;
+ goto teardown;
+ }
+ spi_set_drvdata(spi, card);
+ card->pdata = pdata;
+ card->spi = spi;
+ card->prev_xfer_time = jiffies;
+
+ INIT_LIST_HEAD(&card->cmd_packet_list);
+ INIT_LIST_HEAD(&card->data_packet_list);
+ spin_lock_init(&card->buffer_lock);
+
+ /* Initialize the SPI Interface Unit */
+
+ /* Firmware load */
+ err = if_spi_init_card(card);
+ if (err)
goto free_card;
/* Register our card with libertas.
@@ -993,27 +1115,16 @@ static int __devinit if_spi_probe(struct spi_device *spi)
priv->fw_ready = 1;
/* Initialize interrupt handling stuff. */
- card->run_thread = 1;
- card->spi_thread = kthread_run(lbs_spi_thread, card, "lbs_spi_thread");
- if (IS_ERR(card->spi_thread)) {
- card->run_thread = 0;
- err = PTR_ERR(card->spi_thread);
- lbs_pr_err("error creating SPI thread: err=%d\n", err);
- goto remove_card;
- }
- if (sched_setscheduler(card->spi_thread, SCHED_FIFO, &param))
- lbs_pr_err("Error setting scheduler, using default.\n");
+ card->workqueue = create_workqueue("libertas_spi");
+ INIT_WORK(&card->packet_work, if_spi_host_to_card_worker);
err = request_irq(spi->irq, if_spi_host_interrupt,
IRQF_TRIGGER_FALLING, "libertas_spi", card);
if (err) {
lbs_pr_err("can't get host irq line-- request_irq failed\n");
- goto terminate_thread;
+ goto terminate_workqueue;
}
- /* poke the IRQ handler so that we don't miss the first interrupt */
- up(&card->spi_ready);
-
/* Start the card.
* This will call register_netdev, and we'll start
* getting interrupts... */
@@ -1028,18 +1139,16 @@ static int __devinit if_spi_probe(struct spi_device *spi)
release_irq:
free_irq(spi->irq, card);
-terminate_thread:
- if_spi_terminate_spi_thread(card);
-remove_card:
+terminate_workqueue:
+ flush_workqueue(card->workqueue);
+ destroy_workqueue(card->workqueue);
lbs_remove_card(priv); /* will call free_netdev */
free_card:
free_if_spi_card(card);
+teardown:
+ if (pdata->teardown)
+ pdata->teardown(spi);
out:
- if (helper)
- release_firmware(helper);
- if (mainfw)
- release_firmware(mainfw);
-
lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err);
return err;
}
@@ -1056,7 +1165,8 @@ static int __devexit libertas_spi_remove(struct spi_device *spi)
lbs_remove_card(priv); /* will call free_netdev */
free_irq(spi->irq, card);
- if_spi_terminate_spi_thread(card);
+ flush_workqueue(card->workqueue);
+ destroy_workqueue(card->workqueue);
if (card->pdata->teardown)
card->pdata->teardown(spi);
free_if_spi_card(card);
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 6836a6dd9853..ca8149cd5bd9 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -539,6 +539,43 @@ static int lbs_thread(void *data)
return 0;
}
+/**
+ * @brief This function gets the HW spec from the firmware and sets
+ * some basic parameters.
+ *
+ * @param priv A pointer to struct lbs_private structure
+ * @return 0 or -1
+ */
+static int lbs_setup_firmware(struct lbs_private *priv)
+{
+ int ret = -1;
+ s16 curlevel = 0, minlevel = 0, maxlevel = 0;
+
+ lbs_deb_enter(LBS_DEB_FW);
+
+ /* Read MAC address from firmware */
+ memset(priv->current_addr, 0xff, ETH_ALEN);
+ ret = lbs_update_hw_spec(priv);
+ if (ret)
+ goto done;
+
+ /* Read power levels if available */
+ ret = lbs_get_tx_power(priv, &curlevel, &minlevel, &maxlevel);
+ if (ret == 0) {
+ priv->txpower_cur = curlevel;
+ priv->txpower_min = minlevel;
+ priv->txpower_max = maxlevel;
+ }
+
+ /* Send cmd to FW to enable 11D function */
+ ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_11D_ENABLE, 1);
+
+ lbs_set_mac_control(priv);
+done:
+ lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
+ return ret;
+}
+
int lbs_suspend(struct lbs_private *priv)
{
int ret;
@@ -584,47 +621,13 @@ int lbs_resume(struct lbs_private *priv)
lbs_pr_err("deep sleep activation failed: %d\n", ret);
}
- lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
- return ret;
-}
-EXPORT_SYMBOL_GPL(lbs_resume);
-
-/**
- * @brief This function gets the HW spec from the firmware and sets
- * some basic parameters.
- *
- * @param priv A pointer to struct lbs_private structure
- * @return 0 or -1
- */
-static int lbs_setup_firmware(struct lbs_private *priv)
-{
- int ret = -1;
- s16 curlevel = 0, minlevel = 0, maxlevel = 0;
-
- lbs_deb_enter(LBS_DEB_FW);
-
- /* Read MAC address from firmware */
- memset(priv->current_addr, 0xff, ETH_ALEN);
- ret = lbs_update_hw_spec(priv);
- if (ret)
- goto done;
-
- /* Read power levels if available */
- ret = lbs_get_tx_power(priv, &curlevel, &minlevel, &maxlevel);
- if (ret == 0) {
- priv->txpower_cur = curlevel;
- priv->txpower_min = minlevel;
- priv->txpower_max = maxlevel;
- }
+ if (priv->setup_fw_on_resume)
+ ret = lbs_setup_firmware(priv);
- /* Send cmd to FW to enable 11D function */
- ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_11D_ENABLE, 1);
-
- lbs_set_mac_control(priv);
-done:
lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
return ret;
}
+EXPORT_SYMBOL_GPL(lbs_resume);
/**
* This function handles the timeout of command sending.
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index 9278b3c8ee30..d4005081f1df 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -225,7 +225,7 @@ static void lbtf_free_adapter(struct lbtf_private *priv)
lbtf_deb_leave(LBTF_DEB_MAIN);
}
-static int lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct lbtf_private *priv = hw->priv;
@@ -236,7 +236,6 @@ static int lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
* there are no buffered multicast frames to send
*/
ieee80211_stop_queues(priv->hw);
- return NETDEV_TX_OK;
}
static void lbtf_tx_work(struct work_struct *work)
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 454f045ddff3..56f439d58013 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -541,7 +541,7 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
}
-static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
bool ack;
struct ieee80211_tx_info *txi;
@@ -551,7 +551,7 @@ static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
if (skb->len < 10) {
/* Should not happen; just a sanity check for addr1 use */
dev_kfree_skb(skb);
- return NETDEV_TX_OK;
+ return;
}
ack = mac80211_hwsim_tx_frame(hw, skb);
@@ -571,7 +571,6 @@ static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK) && ack)
txi->flags |= IEEE80211_TX_STAT_ACK;
ieee80211_tx_status_irqsafe(hw, skb);
- return NETDEV_TX_OK;
}
@@ -943,7 +942,8 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
{
switch (action) {
case IEEE80211_AMPDU_TX_START:
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 9ecf8407cb1b..df5959f36d0b 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -232,6 +232,9 @@ struct mwl8k_priv {
struct completion firmware_loading_complete;
};
+#define MAX_WEP_KEY_LEN 13
+#define NUM_WEP_KEYS 4
+
/* Per interface specific private data */
struct mwl8k_vif {
struct list_head list;
@@ -242,8 +245,21 @@ struct mwl8k_vif {
/* Non AMPDU sequence number assigned by driver. */
u16 seqno;
+
+ /* Saved WEP keys */
+ struct {
+ u8 enabled;
+ u8 key[sizeof(struct ieee80211_key_conf) + MAX_WEP_KEY_LEN];
+ } wep_key_conf[NUM_WEP_KEYS];
+
+ /* BSSID */
+ u8 bssid[ETH_ALEN];
+
+ /* A flag to indicate is HW crypto is enabled for this bssid */
+ bool is_hw_crypto_enabled;
};
#define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv))
+#define IEEE80211_KEY_CONF(_u8) ((struct ieee80211_key_conf *)(_u8))
struct mwl8k_sta {
/* Index into station database. Returned by UPDATE_STADB. */
@@ -337,6 +353,7 @@ static const struct ieee80211_rate mwl8k_rates_50[] = {
#define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203
#define MWL8K_CMD_BSS_START 0x1100 /* per-vif */
#define MWL8K_CMD_SET_NEW_STN 0x1111 /* per-vif */
+#define MWL8K_CMD_UPDATE_ENCRYPTION 0x1122 /* per-vif */
#define MWL8K_CMD_UPDATE_STADB 0x1123
static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize)
@@ -375,6 +392,7 @@ static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize)
MWL8K_CMDNAME(SET_RATEADAPT_MODE);
MWL8K_CMDNAME(BSS_START);
MWL8K_CMDNAME(SET_NEW_STN);
+ MWL8K_CMDNAME(UPDATE_ENCRYPTION);
MWL8K_CMDNAME(UPDATE_STADB);
default:
snprintf(buf, bufsize, "0x%x", cmd);
@@ -715,10 +733,12 @@ static inline void mwl8k_remove_dma_header(struct sk_buff *skb, __le16 qos)
skb_pull(skb, sizeof(*tr) - hdrlen);
}
-static inline void mwl8k_add_dma_header(struct sk_buff *skb)
+static void
+mwl8k_add_dma_header(struct sk_buff *skb, int tail_pad)
{
struct ieee80211_hdr *wh;
int hdrlen;
+ int reqd_hdrlen;
struct mwl8k_dma_data *tr;
/*
@@ -730,11 +750,13 @@ static inline void mwl8k_add_dma_header(struct sk_buff *skb)
wh = (struct ieee80211_hdr *)skb->data;
hdrlen = ieee80211_hdrlen(wh->frame_control);
- if (hdrlen != sizeof(*tr))
- skb_push(skb, sizeof(*tr) - hdrlen);
+ reqd_hdrlen = sizeof(*tr);
+
+ if (hdrlen != reqd_hdrlen)
+ skb_push(skb, reqd_hdrlen - hdrlen);
if (ieee80211_is_data_qos(wh->frame_control))
- hdrlen -= 2;
+ hdrlen -= IEEE80211_QOS_CTL_LEN;
tr = (struct mwl8k_dma_data *)skb->data;
if (wh != &tr->wh)
@@ -747,9 +769,52 @@ static inline void mwl8k_add_dma_header(struct sk_buff *skb)
* payload". That is, everything except for the 802.11 header.
* This includes all crypto material including the MIC.
*/
- tr->fwlen = cpu_to_le16(skb->len - sizeof(*tr));
+ tr->fwlen = cpu_to_le16(skb->len - sizeof(*tr) + tail_pad);
}
+static void mwl8k_encapsulate_tx_frame(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *wh;
+ struct ieee80211_tx_info *tx_info;
+ struct ieee80211_key_conf *key_conf;
+ int data_pad;
+
+ wh = (struct ieee80211_hdr *)skb->data;
+
+ tx_info = IEEE80211_SKB_CB(skb);
+
+ key_conf = NULL;
+ if (ieee80211_is_data(wh->frame_control))
+ key_conf = tx_info->control.hw_key;
+
+ /*
+ * Make sure the packet header is in the DMA header format (4-address
+ * without QoS), the necessary crypto padding between the header and the
+ * payload has already been provided by mac80211, but it doesn't add tail
+ * padding when HW crypto is enabled.
+ *
+ * We have the following trailer padding requirements:
+ * - WEP: 4 trailer bytes (ICV)
+ * - TKIP: 12 trailer bytes (8 MIC + 4 ICV)
+ * - CCMP: 8 trailer bytes (MIC)
+ */
+ data_pad = 0;
+ if (key_conf != NULL) {
+ switch (key_conf->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ data_pad = 4;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ data_pad = 12;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ data_pad = 8;
+ break;
+ }
+ }
+ mwl8k_add_dma_header(skb, data_pad);
+}
/*
* Packet reception for 88w8366 AP firmware.
@@ -778,6 +843,13 @@ struct mwl8k_rxd_8366_ap {
#define MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST 0x80
+/* 8366 AP rx_status bits */
+#define MWL8K_8366_AP_RXSTAT_DECRYPT_ERR_MASK 0x80
+#define MWL8K_8366_AP_RXSTAT_GENERAL_DECRYPT_ERR 0xFF
+#define MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR 0x02
+#define MWL8K_8366_AP_RXSTAT_WEP_DECRYPT_ICV_ERR 0x04
+#define MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_ICV_ERR 0x08
+
static void mwl8k_rxd_8366_ap_init(void *_rxd, dma_addr_t next_dma_addr)
{
struct mwl8k_rxd_8366_ap *rxd = _rxd;
@@ -834,10 +906,16 @@ mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status,
} else {
status->band = IEEE80211_BAND_2GHZ;
}
- status->freq = ieee80211_channel_to_frequency(rxd->channel);
+ status->freq = ieee80211_channel_to_frequency(rxd->channel,
+ status->band);
*qos = rxd->qos_control;
+ if ((rxd->rx_status != MWL8K_8366_AP_RXSTAT_GENERAL_DECRYPT_ERR) &&
+ (rxd->rx_status & MWL8K_8366_AP_RXSTAT_DECRYPT_ERR_MASK) &&
+ (rxd->rx_status & MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR))
+ status->flag |= RX_FLAG_MMIC_ERROR;
+
return le16_to_cpu(rxd->pkt_len);
}
@@ -876,6 +954,11 @@ struct mwl8k_rxd_sta {
#define MWL8K_STA_RATE_INFO_MCS_FORMAT 0x0001
#define MWL8K_STA_RX_CTRL_OWNED_BY_HOST 0x02
+#define MWL8K_STA_RX_CTRL_DECRYPT_ERROR 0x04
+/* ICV=0 or MIC=1 */
+#define MWL8K_STA_RX_CTRL_DEC_ERR_TYPE 0x08
+/* Key is uploaded only in failure case */
+#define MWL8K_STA_RX_CTRL_KEY_INDEX 0x30
static void mwl8k_rxd_sta_init(void *_rxd, dma_addr_t next_dma_addr)
{
@@ -931,9 +1014,13 @@ mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status,
} else {
status->band = IEEE80211_BAND_2GHZ;
}
- status->freq = ieee80211_channel_to_frequency(rxd->channel);
+ status->freq = ieee80211_channel_to_frequency(rxd->channel,
+ status->band);
*qos = rxd->qos_control;
+ if ((rxd->rx_ctrl & MWL8K_STA_RX_CTRL_DECRYPT_ERROR) &&
+ (rxd->rx_ctrl & MWL8K_STA_RX_CTRL_DEC_ERR_TYPE))
+ status->flag |= RX_FLAG_MMIC_ERROR;
return le16_to_cpu(rxd->pkt_len);
}
@@ -1092,9 +1179,25 @@ static inline void mwl8k_save_beacon(struct ieee80211_hw *hw,
ieee80211_queue_work(hw, &priv->finalize_join_worker);
}
+static inline struct mwl8k_vif *mwl8k_find_vif_bss(struct list_head *vif_list,
+ u8 *bssid)
+{
+ struct mwl8k_vif *mwl8k_vif;
+
+ list_for_each_entry(mwl8k_vif,
+ vif_list, list) {
+ if (memcmp(bssid, mwl8k_vif->bssid,
+ ETH_ALEN) == 0)
+ return mwl8k_vif;
+ }
+
+ return NULL;
+}
+
static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
{
struct mwl8k_priv *priv = hw->priv;
+ struct mwl8k_vif *mwl8k_vif = NULL;
struct mwl8k_rx_queue *rxq = priv->rxq + index;
int processed;
@@ -1104,6 +1207,7 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
void *rxd;
int pkt_len;
struct ieee80211_rx_status status;
+ struct ieee80211_hdr *wh;
__le16 qos;
skb = rxq->buf[rxq->head].skb;
@@ -1130,8 +1234,7 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
rxq->rxd_count--;
- skb_put(skb, pkt_len);
- mwl8k_remove_dma_header(skb, qos);
+ wh = &((struct mwl8k_dma_data *)skb->data)->wh;
/*
* Check for a pending join operation. Save a
@@ -1141,6 +1244,46 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
if (mwl8k_capture_bssid(priv, (void *)skb->data))
mwl8k_save_beacon(hw, skb);
+ if (ieee80211_has_protected(wh->frame_control)) {
+
+ /* Check if hw crypto has been enabled for
+ * this bss. If yes, set the status flags
+ * accordingly
+ */
+ mwl8k_vif = mwl8k_find_vif_bss(&priv->vif_list,
+ wh->addr1);
+
+ if (mwl8k_vif != NULL &&
+ mwl8k_vif->is_hw_crypto_enabled == true) {
+ /*
+ * When MMIC ERROR is encountered
+ * by the firmware, payload is
+ * dropped and only 32 bytes of
+ * mwl8k Firmware header is sent
+ * to the host.
+ *
+ * We need to add four bytes of
+ * key information. In it
+ * MAC80211 expects keyidx set to
+ * 0 for triggering Counter
+ * Measure of MMIC failure.
+ */
+ if (status.flag & RX_FLAG_MMIC_ERROR) {
+ struct mwl8k_dma_data *tr;
+ tr = (struct mwl8k_dma_data *)skb->data;
+ memset((void *)&(tr->data), 0, 4);
+ pkt_len += 4;
+ }
+
+ if (!ieee80211_is_auth(wh->frame_control))
+ status.flag |= RX_FLAG_IV_STRIPPED |
+ RX_FLAG_DECRYPTED |
+ RX_FLAG_MMIC_STRIPPED;
+ }
+ }
+
+ skb_put(skb, pkt_len);
+ mwl8k_remove_dma_header(skb, qos);
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
ieee80211_rx_irqsafe(hw, skb);
@@ -1392,6 +1535,13 @@ mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force)
info = IEEE80211_SKB_CB(skb);
ieee80211_tx_info_clear_status(info);
+
+ /* Rate control is happening in the firmware.
+ * Ensure no tx rate is being reported.
+ */
+ info->status.rates[0].idx = -1;
+ info->status.rates[0].count = 1;
+
if (MWL8K_TXD_SUCCESS(status))
info->flags |= IEEE80211_TX_STAT_ACK;
@@ -1423,7 +1573,7 @@ static void mwl8k_txq_deinit(struct ieee80211_hw *hw, int index)
txq->txd = NULL;
}
-static int
+static void
mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
{
struct mwl8k_priv *priv = hw->priv;
@@ -1443,7 +1593,11 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
else
qos = 0;
- mwl8k_add_dma_header(skb);
+ if (priv->ap_fw)
+ mwl8k_encapsulate_tx_frame(skb);
+ else
+ mwl8k_add_dma_header(skb, 0);
+
wh = &((struct mwl8k_dma_data *)skb->data)->wh;
tx_info = IEEE80211_SKB_CB(skb);
@@ -1481,7 +1635,7 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
wiphy_debug(hw->wiphy,
"failed to dma map skb, dropping TX frame.\n");
dev_kfree_skb(skb);
- return NETDEV_TX_OK;
+ return;
}
spin_lock_bh(&priv->tx_lock);
@@ -1518,8 +1672,6 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
mwl8k_tx_start(priv);
spin_unlock_bh(&priv->tx_lock);
-
- return NETDEV_TX_OK;
}
@@ -1974,8 +2126,18 @@ static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
cmd->ps_cookie = cpu_to_le32(priv->cookie_dma);
cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma);
cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES);
- for (i = 0; i < MWL8K_TX_QUEUES; i++)
- cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].txd_dma);
+
+ /*
+ * Mac80211 stack has Q0 as highest priority and Q3 as lowest in
+ * that order. Firmware has Q3 as highest priority and Q0 as lowest
+ * in that order. Map Q3 of mac80211 to Q0 of firmware so that the
+ * priority is interpreted the right way in firmware.
+ */
+ for (i = 0; i < MWL8K_TX_QUEUES; i++) {
+ int j = MWL8K_TX_QUEUES - 1 - i;
+ cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[j].txd_dma);
+ }
+
cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT |
MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP |
MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON);
@@ -3099,6 +3261,274 @@ static int mwl8k_cmd_set_new_stn_del(struct ieee80211_hw *hw,
}
/*
+ * CMD_UPDATE_ENCRYPTION.
+ */
+
+#define MAX_ENCR_KEY_LENGTH 16
+#define MIC_KEY_LENGTH 8
+
+struct mwl8k_cmd_update_encryption {
+ struct mwl8k_cmd_pkt header;
+
+ __le32 action;
+ __le32 reserved;
+ __u8 mac_addr[6];
+ __u8 encr_type;
+
+} __attribute__((packed));
+
+struct mwl8k_cmd_set_key {
+ struct mwl8k_cmd_pkt header;
+
+ __le32 action;
+ __le32 reserved;
+ __le16 length;
+ __le16 key_type_id;
+ __le32 key_info;
+ __le32 key_id;
+ __le16 key_len;
+ __u8 key_material[MAX_ENCR_KEY_LENGTH];
+ __u8 tkip_tx_mic_key[MIC_KEY_LENGTH];
+ __u8 tkip_rx_mic_key[MIC_KEY_LENGTH];
+ __le16 tkip_rsc_low;
+ __le32 tkip_rsc_high;
+ __le16 tkip_tsc_low;
+ __le32 tkip_tsc_high;
+ __u8 mac_addr[6];
+} __attribute__((packed));
+
+enum {
+ MWL8K_ENCR_ENABLE,
+ MWL8K_ENCR_SET_KEY,
+ MWL8K_ENCR_REMOVE_KEY,
+ MWL8K_ENCR_SET_GROUP_KEY,
+};
+
+#define MWL8K_UPDATE_ENCRYPTION_TYPE_WEP 0
+#define MWL8K_UPDATE_ENCRYPTION_TYPE_DISABLE 1
+#define MWL8K_UPDATE_ENCRYPTION_TYPE_TKIP 4
+#define MWL8K_UPDATE_ENCRYPTION_TYPE_MIXED 7
+#define MWL8K_UPDATE_ENCRYPTION_TYPE_AES 8
+
+enum {
+ MWL8K_ALG_WEP,
+ MWL8K_ALG_TKIP,
+ MWL8K_ALG_CCMP,
+};
+
+#define MWL8K_KEY_FLAG_TXGROUPKEY 0x00000004
+#define MWL8K_KEY_FLAG_PAIRWISE 0x00000008
+#define MWL8K_KEY_FLAG_TSC_VALID 0x00000040
+#define MWL8K_KEY_FLAG_WEP_TXKEY 0x01000000
+#define MWL8K_KEY_FLAG_MICKEY_VALID 0x02000000
+
+static int mwl8k_cmd_update_encryption_enable(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u8 *addr,
+ u8 encr_type)
+{
+ struct mwl8k_cmd_update_encryption *cmd;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_ENCRYPTION);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->action = cpu_to_le32(MWL8K_ENCR_ENABLE);
+ memcpy(cmd->mac_addr, addr, ETH_ALEN);
+ cmd->encr_type = encr_type;
+
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+ kfree(cmd);
+
+ return rc;
+}
+
+static int mwl8k_encryption_set_cmd_info(struct mwl8k_cmd_set_key *cmd,
+ u8 *addr,
+ struct ieee80211_key_conf *key)
+{
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_ENCRYPTION);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->length = cpu_to_le16(sizeof(*cmd) -
+ offsetof(struct mwl8k_cmd_set_key, length));
+ cmd->key_id = cpu_to_le32(key->keyidx);
+ cmd->key_len = cpu_to_le16(key->keylen);
+ memcpy(cmd->mac_addr, addr, ETH_ALEN);
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ cmd->key_type_id = cpu_to_le16(MWL8K_ALG_WEP);
+ if (key->keyidx == 0)
+ cmd->key_info = cpu_to_le32(MWL8K_KEY_FLAG_WEP_TXKEY);
+
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ cmd->key_type_id = cpu_to_le16(MWL8K_ALG_TKIP);
+ cmd->key_info = (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ ? cpu_to_le32(MWL8K_KEY_FLAG_PAIRWISE)
+ : cpu_to_le32(MWL8K_KEY_FLAG_TXGROUPKEY);
+ cmd->key_info |= cpu_to_le32(MWL8K_KEY_FLAG_MICKEY_VALID
+ | MWL8K_KEY_FLAG_TSC_VALID);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ cmd->key_type_id = cpu_to_le16(MWL8K_ALG_CCMP);
+ cmd->key_info = (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ ? cpu_to_le32(MWL8K_KEY_FLAG_PAIRWISE)
+ : cpu_to_le32(MWL8K_KEY_FLAG_TXGROUPKEY);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int mwl8k_cmd_encryption_set_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u8 *addr,
+ struct ieee80211_key_conf *key)
+{
+ struct mwl8k_cmd_set_key *cmd;
+ int rc;
+ int keymlen;
+ u32 action;
+ u8 idx;
+ struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ rc = mwl8k_encryption_set_cmd_info(cmd, addr, key);
+ if (rc < 0)
+ goto done;
+
+ idx = key->keyidx;
+
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ action = MWL8K_ENCR_SET_KEY;
+ else
+ action = MWL8K_ENCR_SET_GROUP_KEY;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ if (!mwl8k_vif->wep_key_conf[idx].enabled) {
+ memcpy(mwl8k_vif->wep_key_conf[idx].key, key,
+ sizeof(*key) + key->keylen);
+ mwl8k_vif->wep_key_conf[idx].enabled = 1;
+ }
+
+ keymlen = 0;
+ action = MWL8K_ENCR_SET_KEY;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ keymlen = MAX_ENCR_KEY_LENGTH + 2 * MIC_KEY_LENGTH;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ keymlen = key->keylen;
+ break;
+ default:
+ rc = -ENOTSUPP;
+ goto done;
+ }
+
+ memcpy(cmd->key_material, key->key, keymlen);
+ cmd->action = cpu_to_le32(action);
+
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+done:
+ kfree(cmd);
+
+ return rc;
+}
+
+static int mwl8k_cmd_encryption_remove_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u8 *addr,
+ struct ieee80211_key_conf *key)
+{
+ struct mwl8k_cmd_set_key *cmd;
+ int rc;
+ struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ rc = mwl8k_encryption_set_cmd_info(cmd, addr, key);
+ if (rc < 0)
+ goto done;
+
+ if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ WLAN_CIPHER_SUITE_WEP104)
+ mwl8k_vif->wep_key_conf[key->keyidx].enabled = 0;
+
+ cmd->action = cpu_to_le32(MWL8K_ENCR_REMOVE_KEY);
+
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+done:
+ kfree(cmd);
+
+ return rc;
+}
+
+static int mwl8k_set_key(struct ieee80211_hw *hw,
+ enum set_key_cmd cmd_param,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ int rc = 0;
+ u8 encr_type;
+ u8 *addr;
+ struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ return -EOPNOTSUPP;
+
+ if (sta == NULL)
+ addr = hw->wiphy->perm_addr;
+ else
+ addr = sta->addr;
+
+ if (cmd_param == SET_KEY) {
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ rc = mwl8k_cmd_encryption_set_key(hw, vif, addr, key);
+ if (rc)
+ goto out;
+
+ if ((key->cipher == WLAN_CIPHER_SUITE_WEP40)
+ || (key->cipher == WLAN_CIPHER_SUITE_WEP104))
+ encr_type = MWL8K_UPDATE_ENCRYPTION_TYPE_WEP;
+ else
+ encr_type = MWL8K_UPDATE_ENCRYPTION_TYPE_MIXED;
+
+ rc = mwl8k_cmd_update_encryption_enable(hw, vif, addr,
+ encr_type);
+ if (rc)
+ goto out;
+
+ mwl8k_vif->is_hw_crypto_enabled = true;
+
+ } else {
+ rc = mwl8k_cmd_encryption_remove_key(hw, vif, addr, key);
+
+ if (rc)
+ goto out;
+
+ mwl8k_vif->is_hw_crypto_enabled = false;
+
+ }
+out:
+ return rc;
+}
+
+/*
* CMD_UPDATE_STADB.
*/
struct ewc_ht_info {
@@ -3310,22 +3740,19 @@ static void mwl8k_rx_poll(unsigned long data)
/*
* Core driver operations.
*/
-static int mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct mwl8k_priv *priv = hw->priv;
int index = skb_get_queue_mapping(skb);
- int rc;
if (!priv->radio_on) {
wiphy_debug(hw->wiphy,
"dropped TX frame since radio disabled\n");
dev_kfree_skb(skb);
- return NETDEV_TX_OK;
+ return;
}
- rc = mwl8k_txq_xmit(hw, index, skb);
-
- return rc;
+ mwl8k_txq_xmit(hw, index, skb);
}
static int mwl8k_start(struct ieee80211_hw *hw)
@@ -3469,6 +3896,8 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw,
mwl8k_vif->vif = vif;
mwl8k_vif->macid = macid;
mwl8k_vif->seqno = 0;
+ memcpy(mwl8k_vif->bssid, vif->addr, ETH_ALEN);
+ mwl8k_vif->is_hw_crypto_enabled = false;
/* Set the mac address. */
mwl8k_cmd_set_mac_addr(hw, vif, vif->addr);
@@ -3528,9 +3957,13 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
if (rc)
goto out;
- rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x7);
- if (!rc)
- rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_TX, 0x7);
+ rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x3);
+ if (rc)
+ wiphy_warn(hw->wiphy, "failed to set # of RX antennas");
+ rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_TX, 0x7);
+ if (rc)
+ wiphy_warn(hw->wiphy, "failed to set # of TX antennas");
+
} else {
rc = mwl8k_cmd_rf_tx_power(hw, conf->power_level);
if (rc)
@@ -3866,18 +4299,27 @@ static int mwl8k_sta_add(struct ieee80211_hw *hw,
{
struct mwl8k_priv *priv = hw->priv;
int ret;
+ int i;
+ struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+ struct ieee80211_key_conf *key;
if (!priv->ap_fw) {
ret = mwl8k_cmd_update_stadb_add(hw, vif, sta);
if (ret >= 0) {
MWL8K_STA(sta)->peer_id = ret;
- return 0;
+ ret = 0;
}
- return ret;
+ } else {
+ ret = mwl8k_cmd_set_new_stn_add(hw, vif, sta);
}
- return mwl8k_cmd_set_new_stn_add(hw, vif, sta);
+ for (i = 0; i < NUM_WEP_KEYS; i++) {
+ key = IEEE80211_KEY_CONF(mwl8k_vif->wep_key_conf[i].key);
+ if (mwl8k_vif->wep_key_conf[i].enabled)
+ mwl8k_set_key(hw, SET_KEY, vif, sta, key);
+ }
+ return ret;
}
static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
@@ -3894,12 +4336,14 @@ static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
if (!priv->wmm_enabled)
rc = mwl8k_cmd_set_wmm_mode(hw, 1);
- if (!rc)
- rc = mwl8k_cmd_set_edca_params(hw, queue,
+ if (!rc) {
+ int q = MWL8K_TX_QUEUES - 1 - queue;
+ rc = mwl8k_cmd_set_edca_params(hw, q,
params->cw_min,
params->cw_max,
params->aifs,
params->txop);
+ }
mwl8k_fw_unlock(hw);
}
@@ -3932,7 +4376,8 @@ static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx,
static int
mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
{
switch (action) {
case IEEE80211_AMPDU_RX_START:
@@ -3955,6 +4400,7 @@ static const struct ieee80211_ops mwl8k_ops = {
.bss_info_changed = mwl8k_bss_info_changed,
.prepare_multicast = mwl8k_prepare_multicast,
.configure_filter = mwl8k_configure_filter,
+ .set_key = mwl8k_set_key,
.set_rts_threshold = mwl8k_set_rts_threshold,
.sta_add = mwl8k_sta_add,
.sta_remove = mwl8k_sta_remove,
@@ -4332,7 +4778,7 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
hw->queues = MWL8K_TX_QUEUES;
/* Set rssi values to dBm */
- hw->flags |= IEEE80211_HW_SIGNAL_DBM;
+ hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_HAS_RATE_CONTROL;
hw->vif_data_size = sizeof(struct mwl8k_vif);
hw->sta_data_size = sizeof(struct mwl8k_sta);
diff --git a/drivers/net/wireless/orinoco/scan.c b/drivers/net/wireless/orinoco/scan.c
index 86cb54c842e7..e99ca1c1e0d8 100644
--- a/drivers/net/wireless/orinoco/scan.c
+++ b/drivers/net/wireless/orinoco/scan.c
@@ -111,6 +111,11 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv,
freq = ieee80211_dsss_chan_to_freq(le16_to_cpu(bss->a.channel));
channel = ieee80211_get_channel(wiphy, freq);
+ if (!channel) {
+ printk(KERN_DEBUG "Invalid channel designation %04X(%04X)",
+ bss->a.channel, freq);
+ return; /* Then ignore it for now */
+ }
timestamp = 0;
capability = le16_to_cpu(bss->a.capabilities);
beacon_interval = le16_to_cpu(bss->a.beacon_interv);
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index 35b09aa0529b..13d750da9301 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -55,6 +55,17 @@ static struct ieee80211_rate p54_arates[] = {
{ .bitrate = 540, .hw_value = 11, },
};
+static struct p54_rssi_db_entry p54_rssi_default = {
+ /*
+ * The defaults are taken from usb-logs of the
+ * vendor driver. So, they should be safe to
+ * use in case we can't get a match from the
+ * rssi <-> dBm conversion database.
+ */
+ .mul = 130,
+ .add = -398,
+};
+
#define CHAN_HAS_CAL BIT(0)
#define CHAN_HAS_LIMIT BIT(1)
#define CHAN_HAS_CURVE BIT(2)
@@ -87,13 +98,27 @@ static int p54_get_band_from_freq(u16 freq)
return -1;
}
+static int same_band(u16 freq, u16 freq2)
+{
+ return p54_get_band_from_freq(freq) == p54_get_band_from_freq(freq2);
+}
+
static int p54_compare_channels(const void *_a,
const void *_b)
{
const struct p54_channel_entry *a = _a;
const struct p54_channel_entry *b = _b;
- return a->index - b->index;
+ return a->freq - b->freq;
+}
+
+static int p54_compare_rssichan(const void *_a,
+ const void *_b)
+{
+ const struct p54_rssi_db_entry *a = _a;
+ const struct p54_rssi_db_entry *b = _b;
+
+ return a->freq - b->freq;
}
static int p54_fill_band_bitrates(struct ieee80211_hw *dev,
@@ -145,25 +170,26 @@ static int p54_generate_band(struct ieee80211_hw *dev,
for (i = 0, j = 0; (j < list->band_channel_num[band]) &&
(i < list->entries); i++) {
+ struct p54_channel_entry *chan = &list->channels[i];
- if (list->channels[i].band != band)
+ if (chan->band != band)
continue;
- if (list->channels[i].data != CHAN_HAS_ALL) {
- wiphy_err(dev->wiphy,
- "%s%s%s is/are missing for channel:%d [%d MHz].\n",
- (list->channels[i].data & CHAN_HAS_CAL ? "" :
+ if (chan->data != CHAN_HAS_ALL) {
+ wiphy_err(dev->wiphy, "%s%s%s is/are missing for "
+ "channel:%d [%d MHz].\n",
+ (chan->data & CHAN_HAS_CAL ? "" :
" [iqauto calibration data]"),
- (list->channels[i].data & CHAN_HAS_LIMIT ? "" :
+ (chan->data & CHAN_HAS_LIMIT ? "" :
" [output power limits]"),
- (list->channels[i].data & CHAN_HAS_CURVE ? "" :
+ (chan->data & CHAN_HAS_CURVE ? "" :
" [curve data]"),
- list->channels[i].index, list->channels[i].freq);
+ chan->index, chan->freq);
continue;
}
- tmp->channels[j].band = list->channels[i].band;
- tmp->channels[j].center_freq = list->channels[i].freq;
+ tmp->channels[j].band = chan->band;
+ tmp->channels[j].center_freq = chan->freq;
j++;
}
@@ -291,7 +317,7 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev)
}
}
- /* sort the list by the channel index */
+ /* sort the channel list by frequency */
sort(list->channels, list->entries, sizeof(struct p54_channel_entry),
p54_compare_channels, NULL);
@@ -410,33 +436,121 @@ static int p54_convert_rev1(struct ieee80211_hw *dev,
static const char *p54_rf_chips[] = { "INVALID-0", "Duette3", "Duette2",
"Frisbee", "Xbow", "Longbow", "INVALID-6", "INVALID-7" };
-static void p54_parse_rssical(struct ieee80211_hw *dev, void *data, int len,
- u16 type)
+static int p54_parse_rssical(struct ieee80211_hw *dev,
+ u8 *data, int len, u16 type)
{
struct p54_common *priv = dev->priv;
- int offset = (type == PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED) ? 2 : 0;
- int entry_size = sizeof(struct pda_rssi_cal_entry) + offset;
- int num_entries = (type == PDR_RSSI_LINEAR_APPROXIMATION) ? 1 : 2;
- int i;
+ struct p54_rssi_db_entry *entry;
+ size_t db_len, entries;
+ int offset = 0, i;
+
+ if (type != PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED) {
+ entries = (type == PDR_RSSI_LINEAR_APPROXIMATION) ? 1 : 2;
+ if (len != sizeof(struct pda_rssi_cal_entry) * entries) {
+ wiphy_err(dev->wiphy, "rssical size mismatch.\n");
+ goto err_data;
+ }
+ } else {
+ /*
+ * Some devices (Dell 1450 USB, Xbow 5GHz card, etc...)
+ * have an empty two byte header.
+ */
+ if (*((__le16 *)&data[offset]) == cpu_to_le16(0))
+ offset += 2;
- if (len != (entry_size * num_entries)) {
- wiphy_err(dev->wiphy,
- "unknown rssi calibration data packing type:(%x) len:%d.\n",
- type, len);
+ entries = (len - offset) /
+ sizeof(struct pda_rssi_cal_ext_entry);
- print_hex_dump_bytes("rssical:", DUMP_PREFIX_NONE,
- data, len);
+ if ((len - offset) % sizeof(struct pda_rssi_cal_ext_entry) ||
+ entries <= 0) {
+ wiphy_err(dev->wiphy, "invalid rssi database.\n");
+ goto err_data;
+ }
+ }
- wiphy_err(dev->wiphy, "please report this issue.\n");
- return;
+ db_len = sizeof(*entry) * entries;
+ priv->rssi_db = kzalloc(db_len + sizeof(*priv->rssi_db), GFP_KERNEL);
+ if (!priv->rssi_db)
+ return -ENOMEM;
+
+ priv->rssi_db->offset = 0;
+ priv->rssi_db->entries = entries;
+ priv->rssi_db->entry_size = sizeof(*entry);
+ priv->rssi_db->len = db_len;
+
+ entry = (void *)((unsigned long)priv->rssi_db->data + priv->rssi_db->offset);
+ if (type == PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED) {
+ struct pda_rssi_cal_ext_entry *cal = (void *) &data[offset];
+
+ for (i = 0; i < entries; i++) {
+ entry[i].freq = le16_to_cpu(cal[i].freq);
+ entry[i].mul = (s16) le16_to_cpu(cal[i].mul);
+ entry[i].add = (s16) le16_to_cpu(cal[i].add);
+ }
+ } else {
+ struct pda_rssi_cal_entry *cal = (void *) &data[offset];
+
+ for (i = 0; i < entries; i++) {
+ u16 freq;
+ switch (i) {
+ case IEEE80211_BAND_2GHZ:
+ freq = 2437;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ freq = 5240;
+ break;
+ }
+
+ entry[i].freq = freq;
+ entry[i].mul = (s16) le16_to_cpu(cal[i].mul);
+ entry[i].add = (s16) le16_to_cpu(cal[i].add);
+ }
}
- for (i = 0; i < num_entries; i++) {
- struct pda_rssi_cal_entry *cal = data +
- (offset + i * entry_size);
- priv->rssical_db[i].mul = (s16) le16_to_cpu(cal->mul);
- priv->rssical_db[i].add = (s16) le16_to_cpu(cal->add);
+ /* sort the list by channel frequency */
+ sort(entry, entries, sizeof(*entry), p54_compare_rssichan, NULL);
+ return 0;
+
+err_data:
+ wiphy_err(dev->wiphy,
+ "rssi calibration data packing type:(%x) len:%d.\n",
+ type, len);
+
+ print_hex_dump_bytes("rssical:", DUMP_PREFIX_NONE, data, len);
+
+ wiphy_err(dev->wiphy, "please report this issue.\n");
+ return -EINVAL;
+}
+
+struct p54_rssi_db_entry *p54_rssi_find(struct p54_common *priv, const u16 freq)
+{
+ struct p54_rssi_db_entry *entry;
+ int i, found = -1;
+
+ if (!priv->rssi_db)
+ return &p54_rssi_default;
+
+ entry = (void *)(priv->rssi_db->data + priv->rssi_db->offset);
+ for (i = 0; i < priv->rssi_db->entries; i++) {
+ if (!same_band(freq, entry[i].freq))
+ continue;
+
+ if (found == -1) {
+ found = i;
+ continue;
+ }
+
+ /* nearest match */
+ if (abs(freq - entry[i].freq) <
+ abs(freq - entry[found].freq)) {
+ found = i;
+ continue;
+ } else {
+ break;
+ }
}
+
+ return found < 0 ? &p54_rssi_default : &entry[found];
}
static void p54_parse_default_country(struct ieee80211_hw *dev,
@@ -627,21 +741,30 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
case PDR_RSSI_LINEAR_APPROXIMATION:
case PDR_RSSI_LINEAR_APPROXIMATION_DUAL_BAND:
case PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED:
- p54_parse_rssical(dev, entry->data, data_len,
- le16_to_cpu(entry->code));
+ err = p54_parse_rssical(dev, entry->data, data_len,
+ le16_to_cpu(entry->code));
+ if (err)
+ goto err;
break;
- case PDR_RSSI_LINEAR_APPROXIMATION_CUSTOM: {
- __le16 *src = (void *) entry->data;
- s16 *dst = (void *) &priv->rssical_db;
+ case PDR_RSSI_LINEAR_APPROXIMATION_CUSTOMV2: {
+ struct pda_custom_wrapper *pda = (void *) entry->data;
+ __le16 *src;
+ u16 *dst;
int i;
- if (data_len != sizeof(priv->rssical_db)) {
- err = -EINVAL;
- goto err;
- }
- for (i = 0; i < sizeof(priv->rssical_db) /
- sizeof(*src); i++)
+ if (priv->rssi_db || data_len < sizeof(*pda))
+ break;
+
+ priv->rssi_db = p54_convert_db(pda, data_len);
+ if (!priv->rssi_db)
+ break;
+
+ src = (void *) priv->rssi_db->data;
+ dst = (void *) priv->rssi_db->data;
+
+ for (i = 0; i < priv->rssi_db->entries; i++)
*(dst++) = (s16) le16_to_cpu(*(src++));
+
}
break;
case PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS_CUSTOM: {
@@ -717,6 +840,8 @@ good_eeprom:
SET_IEEE80211_PERM_ADDR(dev, perm_addr);
}
+ priv->cur_rssi = &p54_rssi_default;
+
wiphy_info(dev->wiphy, "hwaddr %pM, MAC:isl38%02x RF:%s\n",
dev->wiphy->perm_addr, priv->version,
p54_rf_chips[priv->rxhw]);
@@ -727,9 +852,11 @@ err:
kfree(priv->iq_autocal);
kfree(priv->output_limit);
kfree(priv->curve_data);
+ kfree(priv->rssi_db);
priv->iq_autocal = NULL;
priv->output_limit = NULL;
priv->curve_data = NULL;
+ priv->rssi_db = NULL;
wiphy_err(dev->wiphy, "eeprom parse failed!\n");
return err;
diff --git a/drivers/net/wireless/p54/eeprom.h b/drivers/net/wireless/p54/eeprom.h
index 9051aef11249..afde72b84606 100644
--- a/drivers/net/wireless/p54/eeprom.h
+++ b/drivers/net/wireless/p54/eeprom.h
@@ -81,6 +81,12 @@ struct pda_pa_curve_data {
u8 data[0];
} __packed;
+struct pda_rssi_cal_ext_entry {
+ __le16 freq;
+ __le16 mul;
+ __le16 add;
+} __packed;
+
struct pda_rssi_cal_entry {
__le16 mul;
__le16 add;
@@ -179,6 +185,7 @@ struct pda_custom_wrapper {
/* used by our modificated eeprom image */
#define PDR_RSSI_LINEAR_APPROXIMATION_CUSTOM 0xDEAD
+#define PDR_RSSI_LINEAR_APPROXIMATION_CUSTOMV2 0xCAFF
#define PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS_CUSTOM 0xBEEF
#define PDR_PRISM_PA_CAL_CURVE_DATA_CUSTOM 0xB05D
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index 92b9b1f05fd5..2fab7d20ffc2 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -397,9 +397,9 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell)
union p54_scan_body_union *body;
struct p54_scan_tail_rate *rate;
struct pda_rssi_cal_entry *rssi;
+ struct p54_rssi_db_entry *rssi_data;
unsigned int i;
void *entry;
- int band = priv->hw->conf.channel->band;
__le16 freq = cpu_to_le16(priv->hw->conf.channel->center_freq);
skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*head) +
@@ -503,13 +503,14 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell)
}
rssi = (struct pda_rssi_cal_entry *) skb_put(skb, sizeof(*rssi));
- rssi->mul = cpu_to_le16(priv->rssical_db[band].mul);
- rssi->add = cpu_to_le16(priv->rssical_db[band].add);
+ rssi_data = p54_rssi_find(priv, le16_to_cpu(freq));
+ rssi->mul = cpu_to_le16(rssi_data->mul);
+ rssi->add = cpu_to_le16(rssi_data->add);
if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) {
/* Longbow frontend needs ever more */
rssi = (void *) skb_put(skb, sizeof(*rssi));
- rssi->mul = cpu_to_le16(priv->rssical_db[band].longbow_unkn);
- rssi->add = cpu_to_le16(priv->rssical_db[band].longbow_unk2);
+ rssi->mul = cpu_to_le16(rssi_data->longbow_unkn);
+ rssi->add = cpu_to_le16(rssi_data->longbow_unk2);
}
if (priv->fw_var >= 0x509) {
@@ -523,6 +524,7 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell)
hdr->len = cpu_to_le16(skb->len - sizeof(*hdr));
p54_tx(priv, skb);
+ priv->cur_rssi = rssi_data;
return 0;
err:
@@ -557,6 +559,7 @@ int p54_set_edcf(struct p54_common *priv)
{
struct sk_buff *skb;
struct p54_edcf *edcf;
+ u8 rtd;
skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*edcf),
P54_CONTROL_TYPE_DCFINIT, GFP_ATOMIC);
@@ -573,9 +576,15 @@ int p54_set_edcf(struct p54_common *priv)
edcf->sifs = 0x0a;
edcf->eofpad = 0x06;
}
+ /*
+ * calculate the extra round trip delay according to the
+ * formula from 802.11-2007 17.3.8.6.
+ */
+ rtd = 3 * priv->coverage_class;
+ edcf->slottime += rtd;
+ edcf->round_trip_delay = cpu_to_le16(rtd);
/* (see prism54/isl_oid.h for further details) */
edcf->frameburst = cpu_to_le16(0);
- edcf->round_trip_delay = cpu_to_le16(0);
edcf->flags = 0;
memset(edcf->mapping, 0, sizeof(edcf->mapping));
memcpy(edcf->queue, priv->qos_params, sizeof(edcf->queue));
diff --git a/drivers/net/wireless/p54/lmac.h b/drivers/net/wireless/p54/lmac.h
index 04b63ec80fa4..eb581abc1079 100644
--- a/drivers/net/wireless/p54/lmac.h
+++ b/drivers/net/wireless/p54/lmac.h
@@ -526,7 +526,7 @@ int p54_init_leds(struct p54_common *priv);
void p54_unregister_leds(struct p54_common *priv);
/* xmit functions */
-int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb);
+void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb);
int p54_tx_cancel(struct p54_common *priv, __le32 req_id);
void p54_tx(struct p54_common *priv, struct sk_buff *skb);
@@ -551,6 +551,7 @@ int p54_upload_key(struct p54_common *priv, u8 algo, int slot,
/* eeprom */
int p54_download_eeprom(struct p54_common *priv, void *buf,
u16 offset, u16 len);
+struct p54_rssi_db_entry *p54_rssi_find(struct p54_common *p, const u16 freq);
/* utility */
u8 *p54_find_ie(struct sk_buff *skb, u8 ie);
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index 622d27b6d8f2..356e6bb443a6 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -157,7 +157,7 @@ static int p54_beacon_update(struct p54_common *priv,
* to cancel the old beacon template by hand, instead the firmware
* will release the previous one through the feedback mechanism.
*/
- WARN_ON(p54_tx_80211(priv->hw, beacon));
+ p54_tx_80211(priv->hw, beacon);
priv->tsf_high32 = 0;
priv->tsf_low32 = 0;
@@ -524,6 +524,59 @@ static int p54_get_survey(struct ieee80211_hw *dev, int idx,
return 0;
}
+static unsigned int p54_flush_count(struct p54_common *priv)
+{
+ unsigned int total = 0, i;
+
+ BUILD_BUG_ON(P54_QUEUE_NUM > ARRAY_SIZE(priv->tx_stats));
+
+ /*
+ * Because the firmware has the sole control over any frames
+ * in the P54_QUEUE_BEACON or P54_QUEUE_SCAN queues, they
+ * don't really count as pending or active.
+ */
+ for (i = P54_QUEUE_MGMT; i < P54_QUEUE_NUM; i++)
+ total += priv->tx_stats[i].len;
+ return total;
+}
+
+static void p54_flush(struct ieee80211_hw *dev, bool drop)
+{
+ struct p54_common *priv = dev->priv;
+ unsigned int total, i;
+
+ /*
+ * Currently, it wouldn't really matter if we wait for one second
+ * or 15 minutes. But once someone gets around and completes the
+ * TODOs [ancel stuck frames / reset device] in p54_work, it will
+ * suddenly make sense to wait that long.
+ */
+ i = P54_STATISTICS_UPDATE * 2 / 20;
+
+ /*
+ * In this case no locking is required because as we speak the
+ * queues have already been stopped and no new frames can sneak
+ * up from behind.
+ */
+ while ((total = p54_flush_count(priv) && i--)) {
+ /* waste time */
+ msleep(20);
+ }
+
+ WARN(total, "tx flush timeout, unresponsive firmware");
+}
+
+static void p54_set_coverage_class(struct ieee80211_hw *dev, u8 coverage_class)
+{
+ struct p54_common *priv = dev->priv;
+
+ mutex_lock(&priv->conf_mutex);
+ /* support all coverage class values as in 802.11-2007 Table 7-27 */
+ priv->coverage_class = clamp_t(u8, coverage_class, 0, 31);
+ p54_set_edcf(priv);
+ mutex_unlock(&priv->conf_mutex);
+}
+
static const struct ieee80211_ops p54_ops = {
.tx = p54_tx_80211,
.start = p54_start,
@@ -536,11 +589,13 @@ static const struct ieee80211_ops p54_ops = {
.sta_remove = p54_sta_add_remove,
.set_key = p54_set_key,
.config = p54_config,
+ .flush = p54_flush,
.bss_info_changed = p54_bss_info_changed,
.configure_filter = p54_configure_filter,
.conf_tx = p54_conf_tx,
.get_stats = p54_get_stats,
.get_survey = p54_get_survey,
+ .set_coverage_class = p54_set_coverage_class,
};
struct ieee80211_hw *p54_init_common(size_t priv_data_len)
@@ -611,7 +666,7 @@ EXPORT_SYMBOL_GPL(p54_init_common);
int p54_register_common(struct ieee80211_hw *dev, struct device *pdev)
{
- struct p54_common *priv = dev->priv;
+ struct p54_common __maybe_unused *priv = dev->priv;
int err;
err = ieee80211_register_hw(dev);
@@ -642,10 +697,12 @@ void p54_free_common(struct ieee80211_hw *dev)
kfree(priv->iq_autocal);
kfree(priv->output_limit);
kfree(priv->curve_data);
+ kfree(priv->rssi_db);
kfree(priv->used_rxkeys);
priv->iq_autocal = NULL;
priv->output_limit = NULL;
priv->curve_data = NULL;
+ priv->rssi_db = NULL;
priv->used_rxkeys = NULL;
ieee80211_free_hw(dev);
}
diff --git a/drivers/net/wireless/p54/p54.h b/drivers/net/wireless/p54/p54.h
index 43a3b2ead81a..50730fc23fe5 100644
--- a/drivers/net/wireless/p54/p54.h
+++ b/drivers/net/wireless/p54/p54.h
@@ -116,7 +116,8 @@ struct p54_edcf_queue_param {
__le16 txop;
} __packed;
-struct p54_rssi_linear_approximation {
+struct p54_rssi_db_entry {
+ u16 freq;
s16 mul;
s16 add;
s16 longbow_unkn;
@@ -197,13 +198,14 @@ struct p54_common {
u8 rx_diversity_mask;
u8 tx_diversity_mask;
unsigned int output_power;
+ struct p54_rssi_db_entry *cur_rssi;
int noise;
/* calibration, output power limit and rssi<->dBm conversation data */
struct pda_iq_autocal_entry *iq_autocal;
unsigned int iq_autocal_len;
struct p54_cal_database *curve_data;
struct p54_cal_database *output_limit;
- struct p54_rssi_linear_approximation rssical_db[IEEE80211_NUM_BANDS];
+ struct p54_cal_database *rssi_db;
struct ieee80211_supported_band *band_table[IEEE80211_NUM_BANDS];
/* BBP/MAC state */
@@ -215,6 +217,7 @@ struct p54_common {
u32 tsf_low32, tsf_high32;
u32 basic_rate_mask;
u16 aid;
+ u8 coverage_class;
bool powersave_override;
__le32 beacon_req_id;
struct completion beacon_comp;
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 1eacba4daa5b..0494d7b102d4 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -199,6 +199,7 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
while (i != idx) {
u16 len;
struct sk_buff *skb;
+ dma_addr_t dma_addr;
desc = &ring[i];
len = le16_to_cpu(desc->len);
skb = rx_buf[i];
@@ -216,17 +217,20 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
len = priv->common.rx_mtu;
}
+ dma_addr = le32_to_cpu(desc->host_addr);
+ pci_dma_sync_single_for_cpu(priv->pdev, dma_addr,
+ priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
skb_put(skb, len);
if (p54_rx(dev, skb)) {
- pci_unmap_single(priv->pdev,
- le32_to_cpu(desc->host_addr),
- priv->common.rx_mtu + 32,
- PCI_DMA_FROMDEVICE);
+ pci_unmap_single(priv->pdev, dma_addr,
+ priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
rx_buf[i] = NULL;
- desc->host_addr = 0;
+ desc->host_addr = cpu_to_le32(0);
} else {
skb_trim(skb, 0);
+ pci_dma_sync_single_for_device(priv->pdev, dma_addr,
+ priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
}
diff --git a/drivers/net/wireless/p54/p54spi_eeprom.h b/drivers/net/wireless/p54/p54spi_eeprom.h
index d592cbd34d78..0b7bfb0adcf2 100644
--- a/drivers/net/wireless/p54/p54spi_eeprom.h
+++ b/drivers/net/wireless/p54/p54spi_eeprom.h
@@ -65,9 +65,10 @@ static unsigned char p54spi_eeprom[] = {
0x03, 0x00, 0x00, 0x11, /* PDR_ANTENNA_GAIN */
0x08, 0x08, 0x08, 0x08,
-0x09, 0x00, 0xad, 0xde, /* PDR_RSSI_LINEAR_APPROXIMATION_CUSTOM */
- 0x0a, 0x01, 0x72, 0xfe, 0x1a, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x0a, 0x00, 0xff, 0xca, /* PDR_RSSI_LINEAR_APPROXIMATION_CUSTOMV2 */
+ 0x01, 0x00, 0x0a, 0x00,
+ 0x00, 0x00, 0x0a, 0x00,
+ 0x85, 0x09, 0x0a, 0x01, 0x72, 0xfe, 0x1a, 0x00, 0x00, 0x00,
/* struct pda_custom_wrapper */
0x10, 0x06, 0x5d, 0xb0, /* PDR_PRISM_PA_CAL_CURVE_DATA_CUSTOM */
@@ -671,7 +672,7 @@ static unsigned char p54spi_eeprom[] = {
0xa8, 0x09, 0x25, 0x00, 0xf5, 0xff, 0xf9, 0xff, 0x00, 0x01,
0x02, 0x00, 0x00, 0x00, /* PDR_END */
- 0x67, 0x99,
+ 0xb6, 0x04,
};
#endif /* P54SPI_EEPROM_H */
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 21713a7638c4..9b344a921e74 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -98,6 +98,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
{USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */
{USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */
{USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */
+ {USB_DEVICE(0x1740, 0x1000)}, /* Senao NUB-350 */
{USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */
{USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */
{USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index f618b9623e5a..7834c26c2954 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -273,11 +273,9 @@ void p54_tx(struct p54_common *priv, struct sk_buff *skb)
static int p54_rssi_to_dbm(struct p54_common *priv, int rssi)
{
- int band = priv->hw->conf.channel->band;
-
if (priv->rxhw != 5) {
- return ((rssi * priv->rssical_db[band].mul) / 64 +
- priv->rssical_db[band].add) / 4;
+ return ((rssi * priv->cur_rssi->mul) / 64 +
+ priv->cur_rssi->add) / 4;
} else {
/*
* TODO: find the correct formula
@@ -369,7 +367,7 @@ static int p54_rx_data(struct p54_common *priv, struct sk_buff *skb)
rx_status->mactime = ((u64)priv->tsf_high32) << 32 | tsf32;
priv->tsf_low32 = tsf32;
- rx_status->flag |= RX_FLAG_TSFT;
+ rx_status->flag |= RX_FLAG_MACTIME_MPDU;
if (hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN))
header_len += hdr->align[0];
@@ -698,7 +696,7 @@ static u8 p54_convert_algo(u32 cipher)
}
}
-int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
+void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
{
struct p54_common *priv = dev->priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -719,12 +717,8 @@ int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
&hdr_flags, &aid, &burst_allowed);
if (p54_tx_qos_accounting_alloc(priv, skb, queue)) {
- if (!IS_QOS_QUEUE(queue)) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- } else {
- return NETDEV_TX_BUSY;
- }
+ dev_kfree_skb_any(skb);
+ return;
}
padding = (unsigned long)(skb->data - (sizeof(*hdr) + sizeof(*txhdr))) & 3;
@@ -867,5 +861,4 @@ int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
p54info->extra_len = extra_len;
p54_tx(priv, skb);
- return NETDEV_TX_OK;
}
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 848cc2cce247..518542b4bf9e 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2597,6 +2597,9 @@ static int rndis_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
__le32 mode;
int ret;
+ if (priv->device_type != RNDIS_BCM4320B)
+ return -ENOTSUPP;
+
netdev_dbg(usbdev->net, "%s(): %s, %d\n", __func__,
enabled ? "enabled" : "disabled",
timeout);
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 6f383cd684b0..f630552427b7 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -97,6 +97,18 @@ config RT2800PCI_RT35XX
Support for these devices is non-functional at the moment and is
intended for testers and developers.
+config RT2800PCI_RT53XX
+ bool "rt2800-pci - Include support for rt53xx devices (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ default n
+ ---help---
+ This adds support for rt53xx wireless chipset family to the
+ rt2800pci driver.
+ Supported chips: RT5390
+
+ Support for these devices is non-functional at the moment and is
+ intended for testers and developers.
+
endif
config RT2500USB
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 54ca49ad3472..2725f3c4442e 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -46,7 +46,7 @@
* These indirect registers work with busy bits,
* and we will try maximal REGISTER_BUSY_COUNT times to access
* the register while taking a REGISTER_BUSY_DELAY us delay
- * between each attampt. When the busy bit is still set at that time,
+ * between each attempt. When the busy bit is still set at that time,
* the access attempt is considered to have failed,
* and we will print an error.
*/
@@ -305,9 +305,7 @@ static void rt2400pci_config_intf(struct rt2x00_dev *rt2x00dev,
* Enable synchronisation.
*/
rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
- rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
rt2x00_set_field32(&reg, CSR14_TSF_SYNC, conf->sync);
- rt2x00_set_field32(&reg, CSR14_TBCN, 1);
rt2x00pci_register_write(rt2x00dev, CSR14, reg);
}
@@ -647,6 +645,11 @@ static void rt2400pci_start_queue(struct data_queue *queue)
rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
break;
case QID_BEACON:
+ /*
+ * Allow the tbtt tasklet to be scheduled.
+ */
+ tasklet_enable(&rt2x00dev->tbtt_tasklet);
+
rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
rt2x00_set_field32(&reg, CSR14_TBCN, 1);
@@ -708,6 +711,11 @@ static void rt2400pci_stop_queue(struct data_queue *queue)
rt2x00_set_field32(&reg, CSR14_TBCN, 0);
rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
rt2x00pci_register_write(rt2x00dev, CSR14, reg);
+
+ /*
+ * Wait for possibly running tbtt tasklets.
+ */
+ tasklet_disable(&rt2x00dev->tbtt_tasklet);
break;
default:
break;
@@ -963,9 +971,9 @@ static int rt2400pci_init_bbp(struct rt2x00_dev *rt2x00dev)
static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
enum dev_state state)
{
- int mask = (state == STATE_RADIO_IRQ_OFF) ||
- (state == STATE_RADIO_IRQ_OFF_ISR);
+ int mask = (state == STATE_RADIO_IRQ_OFF);
u32 reg;
+ unsigned long flags;
/*
* When interrupts are being enabled, the interrupt registers
@@ -974,12 +982,20 @@ static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
if (state == STATE_RADIO_IRQ_ON) {
rt2x00pci_register_read(rt2x00dev, CSR7, &reg);
rt2x00pci_register_write(rt2x00dev, CSR7, reg);
+
+ /*
+ * Enable tasklets.
+ */
+ tasklet_enable(&rt2x00dev->txstatus_tasklet);
+ tasklet_enable(&rt2x00dev->rxdone_tasklet);
}
/*
* Only toggle the interrupts bits we are going to use.
* Non-checked interrupt bits are disabled by default.
*/
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+
rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
rt2x00_set_field32(&reg, CSR8_TBCN_EXPIRE, mask);
rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, mask);
@@ -987,6 +1003,17 @@ static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, mask);
rt2x00_set_field32(&reg, CSR8_RXDONE, mask);
rt2x00pci_register_write(rt2x00dev, CSR8, reg);
+
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+ if (state == STATE_RADIO_IRQ_OFF) {
+ /*
+ * Ensure that all tasklets are finished before
+ * disabling the interrupts.
+ */
+ tasklet_disable(&rt2x00dev->txstatus_tasklet);
+ tasklet_disable(&rt2x00dev->rxdone_tasklet);
+ }
}
static int rt2400pci_enable_radio(struct rt2x00_dev *rt2x00dev)
@@ -1059,9 +1086,7 @@ static int rt2400pci_set_device_state(struct rt2x00_dev *rt2x00dev,
rt2400pci_disable_radio(rt2x00dev);
break;
case STATE_RADIO_IRQ_ON:
- case STATE_RADIO_IRQ_ON_ISR:
case STATE_RADIO_IRQ_OFF:
- case STATE_RADIO_IRQ_OFF_ISR:
rt2400pci_toggle_irq(rt2x00dev, state);
break;
case STATE_DEEP_SLEEP:
@@ -1183,8 +1208,6 @@ static void rt2400pci_write_beacon(struct queue_entry *entry,
/*
* Enable beaconing again.
*/
- rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
- rt2x00_set_field32(&reg, CSR14_TBCN, 1);
rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
rt2x00pci_register_write(rt2x00dev, CSR14, reg);
}
@@ -1289,57 +1312,71 @@ static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev,
}
}
-static irqreturn_t rt2400pci_interrupt_thread(int irq, void *dev_instance)
+static void rt2400pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
+ struct rt2x00_field32 irq_field)
{
- struct rt2x00_dev *rt2x00dev = dev_instance;
- u32 reg = rt2x00dev->irqvalue[0];
+ unsigned long flags;
+ u32 reg;
/*
- * Handle interrupts, walk through all bits
- * and run the tasks, the bits are checked in order of
- * priority.
+ * Enable a single interrupt. The interrupt mask register
+ * access needs locking.
*/
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
- /*
- * 1 - Beacon timer expired interrupt.
- */
- if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE))
- rt2x00lib_beacondone(rt2x00dev);
+ rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
+ rt2x00_set_field32(&reg, irq_field, 0);
+ rt2x00pci_register_write(rt2x00dev, CSR8, reg);
- /*
- * 2 - Rx ring done interrupt.
- */
- if (rt2x00_get_field32(reg, CSR7_RXDONE))
- rt2x00pci_rxdone(rt2x00dev);
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+}
- /*
- * 3 - Atim ring transmit done interrupt.
- */
- if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING))
- rt2400pci_txdone(rt2x00dev, QID_ATIM);
+static void rt2400pci_txstatus_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ u32 reg;
+ unsigned long flags;
/*
- * 4 - Priority ring transmit done interrupt.
+ * Handle all tx queues.
*/
- if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING))
- rt2400pci_txdone(rt2x00dev, QID_AC_VO);
+ rt2400pci_txdone(rt2x00dev, QID_ATIM);
+ rt2400pci_txdone(rt2x00dev, QID_AC_VO);
+ rt2400pci_txdone(rt2x00dev, QID_AC_VI);
/*
- * 5 - Tx ring transmit done interrupt.
+ * Enable all TXDONE interrupts again.
*/
- if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING))
- rt2400pci_txdone(rt2x00dev, QID_AC_VI);
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
- /* Enable interrupts again. */
- rt2x00dev->ops->lib->set_device_state(rt2x00dev,
- STATE_RADIO_IRQ_ON_ISR);
- return IRQ_HANDLED;
+ rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
+ rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 0);
+ rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, 0);
+ rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 0);
+ rt2x00pci_register_write(rt2x00dev, CSR8, reg);
+
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+}
+
+static void rt2400pci_tbtt_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2x00lib_beacondone(rt2x00dev);
+ rt2400pci_enable_interrupt(rt2x00dev, CSR8_TBCN_EXPIRE);
+}
+
+static void rt2400pci_rxdone_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2x00pci_rxdone(rt2x00dev);
+ rt2400pci_enable_interrupt(rt2x00dev, CSR8_RXDONE);
}
static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance)
{
struct rt2x00_dev *rt2x00dev = dev_instance;
- u32 reg;
+ u32 reg, mask;
+ unsigned long flags;
/*
* Get the interrupt sources & saved to local variable.
@@ -1354,14 +1391,44 @@ static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance)
if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
return IRQ_HANDLED;
- /* Store irqvalues for use in the interrupt thread. */
- rt2x00dev->irqvalue[0] = reg;
+ mask = reg;
- /* Disable interrupts, will be enabled again in the interrupt thread. */
- rt2x00dev->ops->lib->set_device_state(rt2x00dev,
- STATE_RADIO_IRQ_OFF_ISR);
+ /*
+ * Schedule tasklets for interrupt handling.
+ */
+ if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE))
+ tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
- return IRQ_WAKE_THREAD;
+ if (rt2x00_get_field32(reg, CSR7_RXDONE))
+ tasklet_schedule(&rt2x00dev->rxdone_tasklet);
+
+ if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING) ||
+ rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING) ||
+ rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) {
+ tasklet_schedule(&rt2x00dev->txstatus_tasklet);
+ /*
+ * Mask out all txdone interrupts.
+ */
+ rt2x00_set_field32(&mask, CSR8_TXDONE_TXRING, 1);
+ rt2x00_set_field32(&mask, CSR8_TXDONE_ATIMRING, 1);
+ rt2x00_set_field32(&mask, CSR8_TXDONE_PRIORING, 1);
+ }
+
+ /*
+ * Disable all interrupts for which a tasklet was scheduled right now,
+ * the tasklet will reenable the appropriate interrupts.
+ */
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+
+ rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
+ reg |= mask;
+ rt2x00pci_register_write(rt2x00dev, CSR8, reg);
+
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+
+
+ return IRQ_HANDLED;
}
/*
@@ -1655,7 +1722,9 @@ static const struct ieee80211_ops rt2400pci_mac80211_ops = {
static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = {
.irq_handler = rt2400pci_interrupt,
- .irq_handler_thread = rt2400pci_interrupt_thread,
+ .txstatus_tasklet = rt2400pci_txstatus_tasklet,
+ .tbtt_tasklet = rt2400pci_tbtt_tasklet,
+ .rxdone_tasklet = rt2400pci_rxdone_tasklet,
.probe_hw = rt2400pci_probe_hw,
.initialize = rt2x00pci_initialize,
.uninitialize = rt2x00pci_uninitialize,
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index a9ff26a27724..3ef1fb4185c0 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -311,9 +311,7 @@ static void rt2500pci_config_intf(struct rt2x00_dev *rt2x00dev,
* Enable synchronisation.
*/
rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
- rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
rt2x00_set_field32(&reg, CSR14_TSF_SYNC, conf->sync);
- rt2x00_set_field32(&reg, CSR14_TBCN, 1);
rt2x00pci_register_write(rt2x00dev, CSR14, reg);
}
@@ -737,6 +735,11 @@ static void rt2500pci_start_queue(struct data_queue *queue)
rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
break;
case QID_BEACON:
+ /*
+ * Allow the tbtt tasklet to be scheduled.
+ */
+ tasklet_enable(&rt2x00dev->tbtt_tasklet);
+
rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
rt2x00_set_field32(&reg, CSR14_TBCN, 1);
@@ -798,6 +801,11 @@ static void rt2500pci_stop_queue(struct data_queue *queue)
rt2x00_set_field32(&reg, CSR14_TBCN, 0);
rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
rt2x00pci_register_write(rt2x00dev, CSR14, reg);
+
+ /*
+ * Wait for possibly running tbtt tasklets.
+ */
+ tasklet_disable(&rt2x00dev->tbtt_tasklet);
break;
default:
break;
@@ -1118,9 +1126,9 @@ static int rt2500pci_init_bbp(struct rt2x00_dev *rt2x00dev)
static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
enum dev_state state)
{
- int mask = (state == STATE_RADIO_IRQ_OFF) ||
- (state == STATE_RADIO_IRQ_OFF_ISR);
+ int mask = (state == STATE_RADIO_IRQ_OFF);
u32 reg;
+ unsigned long flags;
/*
* When interrupts are being enabled, the interrupt registers
@@ -1129,12 +1137,20 @@ static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
if (state == STATE_RADIO_IRQ_ON) {
rt2x00pci_register_read(rt2x00dev, CSR7, &reg);
rt2x00pci_register_write(rt2x00dev, CSR7, reg);
+
+ /*
+ * Enable tasklets.
+ */
+ tasklet_enable(&rt2x00dev->txstatus_tasklet);
+ tasklet_enable(&rt2x00dev->rxdone_tasklet);
}
/*
* Only toggle the interrupts bits we are going to use.
* Non-checked interrupt bits are disabled by default.
*/
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+
rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
rt2x00_set_field32(&reg, CSR8_TBCN_EXPIRE, mask);
rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, mask);
@@ -1142,6 +1158,16 @@ static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, mask);
rt2x00_set_field32(&reg, CSR8_RXDONE, mask);
rt2x00pci_register_write(rt2x00dev, CSR8, reg);
+
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+ if (state == STATE_RADIO_IRQ_OFF) {
+ /*
+ * Ensure that all tasklets are finished.
+ */
+ tasklet_disable(&rt2x00dev->txstatus_tasklet);
+ tasklet_disable(&rt2x00dev->rxdone_tasklet);
+ }
}
static int rt2500pci_enable_radio(struct rt2x00_dev *rt2x00dev)
@@ -1214,9 +1240,7 @@ static int rt2500pci_set_device_state(struct rt2x00_dev *rt2x00dev,
rt2500pci_disable_radio(rt2x00dev);
break;
case STATE_RADIO_IRQ_ON:
- case STATE_RADIO_IRQ_ON_ISR:
case STATE_RADIO_IRQ_OFF:
- case STATE_RADIO_IRQ_OFF_ISR:
rt2500pci_toggle_irq(rt2x00dev, state);
break;
case STATE_DEEP_SLEEP:
@@ -1337,8 +1361,6 @@ static void rt2500pci_write_beacon(struct queue_entry *entry,
/*
* Enable beaconing again.
*/
- rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
- rt2x00_set_field32(&reg, CSR14_TBCN, 1);
rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
rt2x00pci_register_write(rt2x00dev, CSR14, reg);
}
@@ -1422,58 +1444,71 @@ static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev,
}
}
-static irqreturn_t rt2500pci_interrupt_thread(int irq, void *dev_instance)
+static void rt2500pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
+ struct rt2x00_field32 irq_field)
{
- struct rt2x00_dev *rt2x00dev = dev_instance;
- u32 reg = rt2x00dev->irqvalue[0];
+ unsigned long flags;
+ u32 reg;
/*
- * Handle interrupts, walk through all bits
- * and run the tasks, the bits are checked in order of
- * priority.
+ * Enable a single interrupt. The interrupt mask register
+ * access needs locking.
*/
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
- /*
- * 1 - Beacon timer expired interrupt.
- */
- if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE))
- rt2x00lib_beacondone(rt2x00dev);
+ rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
+ rt2x00_set_field32(&reg, irq_field, 0);
+ rt2x00pci_register_write(rt2x00dev, CSR8, reg);
- /*
- * 2 - Rx ring done interrupt.
- */
- if (rt2x00_get_field32(reg, CSR7_RXDONE))
- rt2x00pci_rxdone(rt2x00dev);
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+}
- /*
- * 3 - Atim ring transmit done interrupt.
- */
- if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING))
- rt2500pci_txdone(rt2x00dev, QID_ATIM);
+static void rt2500pci_txstatus_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ u32 reg;
+ unsigned long flags;
/*
- * 4 - Priority ring transmit done interrupt.
+ * Handle all tx queues.
*/
- if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING))
- rt2500pci_txdone(rt2x00dev, QID_AC_VO);
+ rt2500pci_txdone(rt2x00dev, QID_ATIM);
+ rt2500pci_txdone(rt2x00dev, QID_AC_VO);
+ rt2500pci_txdone(rt2x00dev, QID_AC_VI);
/*
- * 5 - Tx ring transmit done interrupt.
+ * Enable all TXDONE interrupts again.
*/
- if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING))
- rt2500pci_txdone(rt2x00dev, QID_AC_VI);
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+
+ rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
+ rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 0);
+ rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, 0);
+ rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 0);
+ rt2x00pci_register_write(rt2x00dev, CSR8, reg);
- /* Enable interrupts again. */
- rt2x00dev->ops->lib->set_device_state(rt2x00dev,
- STATE_RADIO_IRQ_ON_ISR);
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+}
- return IRQ_HANDLED;
+static void rt2500pci_tbtt_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2x00lib_beacondone(rt2x00dev);
+ rt2500pci_enable_interrupt(rt2x00dev, CSR8_TBCN_EXPIRE);
+}
+
+static void rt2500pci_rxdone_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2x00pci_rxdone(rt2x00dev);
+ rt2500pci_enable_interrupt(rt2x00dev, CSR8_RXDONE);
}
static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance)
{
struct rt2x00_dev *rt2x00dev = dev_instance;
- u32 reg;
+ u32 reg, mask;
+ unsigned long flags;
/*
* Get the interrupt sources & saved to local variable.
@@ -1488,14 +1523,42 @@ static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance)
if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
return IRQ_HANDLED;
- /* Store irqvalues for use in the interrupt thread. */
- rt2x00dev->irqvalue[0] = reg;
+ mask = reg;
- /* Disable interrupts, will be enabled again in the interrupt thread. */
- rt2x00dev->ops->lib->set_device_state(rt2x00dev,
- STATE_RADIO_IRQ_OFF_ISR);
+ /*
+ * Schedule tasklets for interrupt handling.
+ */
+ if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE))
+ tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
- return IRQ_WAKE_THREAD;
+ if (rt2x00_get_field32(reg, CSR7_RXDONE))
+ tasklet_schedule(&rt2x00dev->rxdone_tasklet);
+
+ if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING) ||
+ rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING) ||
+ rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) {
+ tasklet_schedule(&rt2x00dev->txstatus_tasklet);
+ /*
+ * Mask out all txdone interrupts.
+ */
+ rt2x00_set_field32(&mask, CSR8_TXDONE_TXRING, 1);
+ rt2x00_set_field32(&mask, CSR8_TXDONE_ATIMRING, 1);
+ rt2x00_set_field32(&mask, CSR8_TXDONE_PRIORING, 1);
+ }
+
+ /*
+ * Disable all interrupts for which a tasklet was scheduled right now,
+ * the tasklet will reenable the appropriate interrupts.
+ */
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+
+ rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
+ reg |= mask;
+ rt2x00pci_register_write(rt2x00dev, CSR8, reg);
+
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+ return IRQ_HANDLED;
}
/*
@@ -1952,7 +2015,9 @@ static const struct ieee80211_ops rt2500pci_mac80211_ops = {
static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = {
.irq_handler = rt2500pci_interrupt,
- .irq_handler_thread = rt2500pci_interrupt_thread,
+ .txstatus_tasklet = rt2500pci_txstatus_tasklet,
+ .tbtt_tasklet = rt2500pci_tbtt_tasklet,
+ .rxdone_tasklet = rt2500pci_rxdone_tasklet,
.probe_hw = rt2500pci_probe_hw,
.initialize = rt2x00pci_initialize,
.uninitialize = rt2x00pci_uninitialize,
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 6b3b1de46792..01f385d5846c 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -478,9 +478,7 @@ static void rt2500usb_config_intf(struct rt2x00_dev *rt2x00dev,
rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg);
rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg);
- rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 1);
rt2x00_set_field16(&reg, TXRX_CSR19_TSF_SYNC, conf->sync);
- rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 1);
rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
}
@@ -1056,9 +1054,7 @@ static int rt2500usb_set_device_state(struct rt2x00_dev *rt2x00dev,
rt2500usb_disable_radio(rt2x00dev);
break;
case STATE_RADIO_IRQ_ON:
- case STATE_RADIO_IRQ_ON_ISR:
case STATE_RADIO_IRQ_OFF:
- case STATE_RADIO_IRQ_OFF_ISR:
/* No support, but no error either */
break;
case STATE_DEEP_SLEEP:
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 4c55e8525cad..6f4a2432c021 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -51,6 +51,7 @@
* RF3320 2.4G 1T1R(RT3350/RT3370/RT3390)
* RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392)
* RF3853 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
+ * RF5390 2.4G 1T1R
*/
#define RF2820 0x0001
#define RF2850 0x0002
@@ -65,6 +66,7 @@
#define RF3320 0x000b
#define RF3322 0x000c
#define RF3853 0x000d
+#define RF5390 0x5390
/*
* Chipset revisions.
@@ -77,6 +79,7 @@
#define REV_RT3071E 0x0211
#define REV_RT3090E 0x0211
#define REV_RT3390E 0x0211
+#define REV_RT5390F 0x0502
/*
* Signal information.
@@ -121,6 +124,13 @@
#define E2PROM_CSR_RELOAD FIELD32(0x00000080)
/*
+ * AUX_CTRL: Aux/PCI-E related configuration
+ */
+#define AUX_CTRL 0x10c
+#define AUX_CTRL_WAKE_PCIE_EN FIELD32(0x00000002)
+#define AUX_CTRL_FORCE_PCIE_CLK FIELD32(0x00000400)
+
+/*
* OPT_14: Unknown register used by rt3xxx devices.
*/
#define OPT_14_CSR 0x0114
@@ -270,6 +280,7 @@
/*
* GPIO_CTRL_CFG:
+ * GPIOD: GPIO direction, 0: Output, 1: Input
*/
#define GPIO_CTRL_CFG 0x0228
#define GPIO_CTRL_CFG_BIT0 FIELD32(0x00000001)
@@ -280,7 +291,14 @@
#define GPIO_CTRL_CFG_BIT5 FIELD32(0x00000020)
#define GPIO_CTRL_CFG_BIT6 FIELD32(0x00000040)
#define GPIO_CTRL_CFG_BIT7 FIELD32(0x00000080)
-#define GPIO_CTRL_CFG_BIT8 FIELD32(0x00000100)
+#define GPIO_CTRL_CFG_GPIOD_BIT0 FIELD32(0x00000100)
+#define GPIO_CTRL_CFG_GPIOD_BIT1 FIELD32(0x00000200)
+#define GPIO_CTRL_CFG_GPIOD_BIT2 FIELD32(0x00000400)
+#define GPIO_CTRL_CFG_GPIOD_BIT3 FIELD32(0x00000800)
+#define GPIO_CTRL_CFG_GPIOD_BIT4 FIELD32(0x00001000)
+#define GPIO_CTRL_CFG_GPIOD_BIT5 FIELD32(0x00002000)
+#define GPIO_CTRL_CFG_GPIOD_BIT6 FIELD32(0x00004000)
+#define GPIO_CTRL_CFG_GPIOD_BIT7 FIELD32(0x00008000)
/*
* MCU_CMD_CFG
@@ -372,8 +390,12 @@
/*
* US_CYC_CNT
+ * BT_MODE_EN: Bluetooth mode enable
+ * CLOCK CYCLE: Clock cycle count in 1us.
+ * PCI:0x21, PCIE:0x7d, USB:0x1e
*/
#define US_CYC_CNT 0x02a4
+#define US_CYC_CNT_BT_MODE_EN FIELD32(0x00000100)
#define US_CYC_CNT_CLOCK_CYCLE FIELD32(0x000000ff)
/*
@@ -442,7 +464,7 @@
*/
#define RF_CSR_CFG 0x0500
#define RF_CSR_CFG_DATA FIELD32(0x000000ff)
-#define RF_CSR_CFG_REGNUM FIELD32(0x00001f00)
+#define RF_CSR_CFG_REGNUM FIELD32(0x00003f00)
#define RF_CSR_CFG_WRITE FIELD32(0x00010000)
#define RF_CSR_CFG_BUSY FIELD32(0x00020000)
@@ -1132,8 +1154,8 @@
* PROTECT_RATE: Protection control frame rate for CCK TX(RTS/CTS/CFEnd)
* PROTECT_CTRL: Protection control frame type for CCK TX
* 0:none, 1:RTS/CTS, 2:CTS-to-self
- * PROTECT_NAV: TXOP protection type for CCK TX
- * 0:none, 1:ShortNAVprotect, 2:LongNAVProtect
+ * PROTECT_NAV_SHORT: TXOP protection type for CCK TX with short NAV
+ * PROTECT_NAV_LONG: TXOP protection type for CCK TX with long NAV
* TX_OP_ALLOW_CCK: CCK TXOP allowance, 0:disallow
* TX_OP_ALLOW_OFDM: CCK TXOP allowance, 0:disallow
* TX_OP_ALLOW_MM20: CCK TXOP allowance, 0:disallow
@@ -1145,7 +1167,8 @@
#define CCK_PROT_CFG 0x1364
#define CCK_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
#define CCK_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
-#define CCK_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
+#define CCK_PROT_CFG_PROTECT_NAV_SHORT FIELD32(0x00040000)
+#define CCK_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
#define CCK_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
#define CCK_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
#define CCK_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
@@ -1160,7 +1183,8 @@
#define OFDM_PROT_CFG 0x1368
#define OFDM_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
#define OFDM_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
-#define OFDM_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
+#define OFDM_PROT_CFG_PROTECT_NAV_SHORT FIELD32(0x00040000)
+#define OFDM_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
#define OFDM_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
#define OFDM_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
#define OFDM_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
@@ -1175,7 +1199,8 @@
#define MM20_PROT_CFG 0x136c
#define MM20_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
#define MM20_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
-#define MM20_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
+#define MM20_PROT_CFG_PROTECT_NAV_SHORT FIELD32(0x00040000)
+#define MM20_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
#define MM20_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
#define MM20_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
#define MM20_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
@@ -1190,7 +1215,8 @@
#define MM40_PROT_CFG 0x1370
#define MM40_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
#define MM40_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
-#define MM40_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
+#define MM40_PROT_CFG_PROTECT_NAV_SHORT FIELD32(0x00040000)
+#define MM40_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
#define MM40_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
#define MM40_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
#define MM40_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
@@ -1205,7 +1231,8 @@
#define GF20_PROT_CFG 0x1374
#define GF20_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
#define GF20_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
-#define GF20_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
+#define GF20_PROT_CFG_PROTECT_NAV_SHORT FIELD32(0x00040000)
+#define GF20_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
#define GF20_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
#define GF20_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
#define GF20_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
@@ -1220,7 +1247,8 @@
#define GF40_PROT_CFG 0x1378
#define GF40_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
#define GF40_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
-#define GF40_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
+#define GF40_PROT_CFG_PROTECT_NAV_SHORT FIELD32(0x00040000)
+#define GF40_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
#define GF40_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
#define GF40_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
#define GF40_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
@@ -1697,11 +1725,14 @@ struct mac_iveiv_entry {
*/
/*
- * BBP 1: TX Antenna & Power
- * POWER: 0 - normal, 1 - drop tx power by 6dBm, 2 - drop tx power by 12dBm,
- * 3 - increase tx power by 6dBm
+ * BBP 1: TX Antenna & Power Control
+ * POWER_CTRL:
+ * 0 - normal,
+ * 1 - drop tx power by 6dBm,
+ * 2 - drop tx power by 12dBm,
+ * 3 - increase tx power by 6dBm
*/
-#define BBP1_TX_POWER FIELD8(0x07)
+#define BBP1_TX_POWER_CTRL FIELD8(0x07)
#define BBP1_TX_ANTENNA FIELD8(0x18)
/*
@@ -1715,6 +1746,13 @@ struct mac_iveiv_entry {
*/
#define BBP4_TX_BF FIELD8(0x01)
#define BBP4_BANDWIDTH FIELD8(0x18)
+#define BBP4_MAC_IF_CTRL FIELD8(0x40)
+
+/*
+ * BBP 109
+ */
+#define BBP109_TX0_POWER FIELD8(0x0f)
+#define BBP109_TX1_POWER FIELD8(0xf0)
/*
* BBP 138: Unknown
@@ -1725,6 +1763,11 @@ struct mac_iveiv_entry {
#define BBP138_TX_DAC2 FIELD8(0x40)
/*
+ * BBP 152: Rx Ant
+ */
+#define BBP152_RX_DEFAULT_ANT FIELD8(0x80)
+
+/*
* RFCSR registers
* The wordsize of the RFCSR is 8 bits.
*/
@@ -1733,12 +1776,18 @@ struct mac_iveiv_entry {
* RFCSR 1:
*/
#define RFCSR1_RF_BLOCK_EN FIELD8(0x01)
+#define RFCSR1_PLL_PD FIELD8(0x02)
#define RFCSR1_RX0_PD FIELD8(0x04)
#define RFCSR1_TX0_PD FIELD8(0x08)
#define RFCSR1_RX1_PD FIELD8(0x10)
#define RFCSR1_TX1_PD FIELD8(0x20)
/*
+ * RFCSR 2:
+ */
+#define RFCSR2_RESCAL_EN FIELD8(0x80)
+
+/*
* RFCSR 6:
*/
#define RFCSR6_R1 FIELD8(0x03)
@@ -1750,6 +1799,11 @@ struct mac_iveiv_entry {
#define RFCSR7_RF_TUNING FIELD8(0x01)
/*
+ * RFCSR 11:
+ */
+#define RFCSR11_R FIELD8(0x03)
+
+/*
* RFCSR 12:
*/
#define RFCSR12_TX_POWER FIELD8(0x1f)
@@ -1770,6 +1824,7 @@ struct mac_iveiv_entry {
#define RFCSR17_TXMIXER_GAIN FIELD8(0x07)
#define RFCSR17_TX_LO1_EN FIELD8(0x08)
#define RFCSR17_R FIELD8(0x20)
+#define RFCSR17_CODE FIELD8(0x7f)
/*
* RFCSR 20:
@@ -1802,9 +1857,33 @@ struct mac_iveiv_entry {
/*
* RFCSR 30:
*/
+#define RFCSR30_TX_H20M FIELD8(0x02)
+#define RFCSR30_RX_H20M FIELD8(0x04)
+#define RFCSR30_RX_VCM FIELD8(0x18)
#define RFCSR30_RF_CALIBRATION FIELD8(0x80)
/*
+ * RFCSR 31:
+ */
+#define RFCSR31_RX_AGC_FC FIELD8(0x1f)
+#define RFCSR31_RX_H20M FIELD8(0x20)
+
+/*
+ * RFCSR 38:
+ */
+#define RFCSR38_RX_LO1_EN FIELD8(0x20)
+
+/*
+ * RFCSR 39:
+ */
+#define RFCSR39_RX_LO2_EN FIELD8(0x80)
+
+/*
+ * RFCSR 49:
+ */
+#define RFCSR49_TX FIELD8(0x3f)
+
+/*
* RF registers
*/
@@ -1837,6 +1916,11 @@ struct mac_iveiv_entry {
*/
/*
+ * Chip ID
+ */
+#define EEPROM_CHIP_ID 0x0000
+
+/*
* EEPROM Version
*/
#define EEPROM_VERSION 0x0001
@@ -1989,23 +2073,26 @@ struct mac_iveiv_entry {
#define EEPROM_RSSI_A2_LNA_A2 FIELD16(0xff00)
/*
- * EEPROM Maximum TX power values
+ * EEPROM EIRP Maximum TX power values(unit: dbm)
*/
-#define EEPROM_MAX_TX_POWER 0x0027
-#define EEPROM_MAX_TX_POWER_24GHZ FIELD16(0x00ff)
-#define EEPROM_MAX_TX_POWER_5GHZ FIELD16(0xff00)
+#define EEPROM_EIRP_MAX_TX_POWER 0x0027
+#define EEPROM_EIRP_MAX_TX_POWER_2GHZ FIELD16(0x00ff)
+#define EEPROM_EIRP_MAX_TX_POWER_5GHZ FIELD16(0xff00)
/*
* EEPROM TXpower delta: 20MHZ AND 40 MHZ use different power.
* This is delta in 40MHZ.
- * VALUE: Tx Power dalta value (MAX=4)
+ * VALUE: Tx Power dalta value, MAX=4(unit: dbm)
* TYPE: 1: Plus the delta value, 0: minus the delta value
- * TXPOWER: Enable:
+ * ENABLE: enable tx power compensation for 40BW
*/
#define EEPROM_TXPOWER_DELTA 0x0028
-#define EEPROM_TXPOWER_DELTA_VALUE FIELD16(0x003f)
-#define EEPROM_TXPOWER_DELTA_TYPE FIELD16(0x0040)
-#define EEPROM_TXPOWER_DELTA_TXPOWER FIELD16(0x0080)
+#define EEPROM_TXPOWER_DELTA_VALUE_2G FIELD16(0x003f)
+#define EEPROM_TXPOWER_DELTA_TYPE_2G FIELD16(0x0040)
+#define EEPROM_TXPOWER_DELTA_ENABLE_2G FIELD16(0x0080)
+#define EEPROM_TXPOWER_DELTA_VALUE_5G FIELD16(0x3f00)
+#define EEPROM_TXPOWER_DELTA_TYPE_5G FIELD16(0x4000)
+#define EEPROM_TXPOWER_DELTA_ENABLE_5G FIELD16(0x8000)
/*
* EEPROM TXPOWER 802.11BG
@@ -2058,6 +2145,7 @@ struct mac_iveiv_entry {
#define MCU_LED_LED_POLARITY 0x54
#define MCU_RADAR 0x60
#define MCU_BOOT_SIGNAL 0x72
+#define MCU_ANT_SELECT 0X73
#define MCU_BBP_SIGNAL 0x80
#define MCU_POWER_SAVE 0x83
@@ -2202,4 +2290,9 @@ struct mac_iveiv_entry {
#define TXPOWER_A_TO_DEV(__txpower) \
clamp_t(char, __txpower, MIN_A_TXPOWER, MAX_A_TXPOWER)
+/*
+ * Board's maximun TX power limitation
+ */
+#define EIRP_MAX_TX_POWER_LIMIT 0x50
+
#endif /* RT2800_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 54917a281398..3da78bf0ca26 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -400,8 +400,15 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
if (rt2800_wait_csr_ready(rt2x00dev))
return -EBUSY;
- if (rt2x00_is_pci(rt2x00dev))
+ if (rt2x00_is_pci(rt2x00dev)) {
+ if (rt2x00_rt(rt2x00dev, RT5390)) {
+ rt2800_register_read(rt2x00dev, AUX_CTRL, &reg);
+ rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
+ rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
+ rt2800_register_write(rt2x00dev, AUX_CTRL, reg);
+ }
rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002);
+ }
/*
* Disable DMA, will be reenabled later when enabling
@@ -773,13 +780,14 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
unsigned int beacon_base;
unsigned int padding_len;
- u32 reg;
+ u32 orig_reg, reg;
/*
* Disable beaconing while we are reloading the beacon data,
* otherwise we might be sending out invalid data.
*/
rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+ orig_reg = reg;
rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
@@ -810,7 +818,14 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
* Write entire beacon with TXWI and padding to register.
*/
padding_len = roundup(entry->skb->len, 4) - entry->skb->len;
- skb_pad(entry->skb, padding_len);
+ if (padding_len && skb_pad(entry->skb, padding_len)) {
+ ERROR(rt2x00dev, "Failure padding beacon, aborting\n");
+ /* skb freed by skb_pad() on failure */
+ entry->skb = NULL;
+ rt2800_register_write(rt2x00dev, BCN_TIME_CFG, orig_reg);
+ return;
+ }
+
beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
rt2800_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data,
entry->skb->len + padding_len);
@@ -818,8 +833,6 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
/*
* Enable beaconing again.
*/
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
@@ -831,8 +844,8 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
}
EXPORT_SYMBOL_GPL(rt2800_write_beacon);
-static inline void rt2800_clear_beacon(struct rt2x00_dev *rt2x00dev,
- unsigned int beacon_base)
+static inline void rt2800_clear_beacon_register(struct rt2x00_dev *rt2x00dev,
+ unsigned int beacon_base)
{
int i;
@@ -845,6 +858,33 @@ static inline void rt2800_clear_beacon(struct rt2x00_dev *rt2x00dev,
rt2800_register_write(rt2x00dev, beacon_base + i, 0);
}
+void rt2800_clear_beacon(struct queue_entry *entry)
+{
+ struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+ u32 reg;
+
+ /*
+ * Disable beaconing while we are reloading the beacon data,
+ * otherwise we might be sending out invalid data.
+ */
+ rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
+ rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+ /*
+ * Clear beacon.
+ */
+ rt2800_clear_beacon_register(rt2x00dev,
+ HW_BEACON_OFFSET(entry->entry_idx));
+
+ /*
+ * Enabled beaconing again.
+ */
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
+ rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+}
+EXPORT_SYMBOL_GPL(rt2800_clear_beacon);
+
#ifdef CONFIG_RT2X00_LIB_DEBUGFS
const struct rt2x00debug rt2800_rt2x00debug = {
.owner = THIS_MODULE,
@@ -1005,7 +1045,7 @@ static void rt2800_config_wcid_attr(struct rt2x00_dev *rt2x00dev,
memset(&wcid_entry, 0, sizeof(wcid_entry));
if (crypto->cmd == SET_KEY)
- memcpy(&wcid_entry, crypto->address, ETH_ALEN);
+ memcpy(wcid_entry.mac, crypto->address, ETH_ALEN);
rt2800_register_multiwrite(rt2x00dev, offset,
&wcid_entry, sizeof(wcid_entry));
}
@@ -1155,29 +1195,11 @@ void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
if (flags & CONFIG_UPDATE_TYPE) {
/*
- * Clear current synchronisation setup.
- */
- rt2800_clear_beacon(rt2x00dev,
- HW_BEACON_OFFSET(intf->beacon->entry_idx));
- /*
* Enable synchronisation.
*/
rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, conf->sync);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE,
- (conf->sync == TSF_SYNC_ADHOC ||
- conf->sync == TSF_SYNC_AP_NONE));
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
-
- /*
- * Enable pre tbtt interrupt for beaconing modes
- */
- rt2800_register_read(rt2x00dev, INT_TIMER_EN, &reg);
- rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER,
- (conf->sync == TSF_SYNC_AP_NONE));
- rt2800_register_write(rt2x00dev, INT_TIMER_EN, reg);
-
}
if (flags & CONFIG_UPDATE_MAC) {
@@ -1361,10 +1383,32 @@ void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp,
}
EXPORT_SYMBOL_GPL(rt2800_config_erp);
+static void rt2800_set_ant_diversity(struct rt2x00_dev *rt2x00dev,
+ enum antenna ant)
+{
+ u32 reg;
+ u8 eesk_pin = (ant == ANTENNA_A) ? 1 : 0;
+ u8 gpio_bit3 = (ant == ANTENNA_A) ? 0 : 1;
+
+ if (rt2x00_is_pci(rt2x00dev)) {
+ rt2800_register_read(rt2x00dev, E2PROM_CSR, &reg);
+ rt2x00_set_field32(&reg, E2PROM_CSR_DATA_CLOCK, eesk_pin);
+ rt2800_register_write(rt2x00dev, E2PROM_CSR, reg);
+ } else if (rt2x00_is_usb(rt2x00dev))
+ rt2800_mcu_request(rt2x00dev, MCU_ANT_SELECT, 0xff,
+ eesk_pin, 0);
+
+ rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
+ rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT3, 0);
+ rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, gpio_bit3);
+ rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
+}
+
void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
{
u8 r1;
u8 r3;
+ u16 eeprom;
rt2800_bbp_read(rt2x00dev, 1, &r1);
rt2800_bbp_read(rt2x00dev, 3, &r3);
@@ -1372,7 +1416,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
/*
* Configure the TX antenna.
*/
- switch ((int)ant->tx) {
+ switch (ant->tx_chain_num) {
case 1:
rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0);
break;
@@ -1387,8 +1431,18 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
/*
* Configure the RX antenna.
*/
- switch ((int)ant->rx) {
+ switch (ant->rx_chain_num) {
case 1:
+ if (rt2x00_rt(rt2x00dev, RT3070) ||
+ rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3390)) {
+ rt2x00_eeprom_read(rt2x00dev,
+ EEPROM_NIC_CONF1, &eeprom);
+ if (rt2x00_get_field16(eeprom,
+ EEPROM_NIC_CONF1_ANT_DIVERSITY))
+ rt2800_set_ant_diversity(rt2x00dev,
+ rt2x00dev->default_ant.rx);
+ }
rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0);
break;
case 2:
@@ -1434,13 +1488,13 @@ static void rt2800_config_channel_rf2xxx(struct rt2x00_dev *rt2x00dev,
{
rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
- if (rt2x00dev->default_ant.tx == 1)
+ if (rt2x00dev->default_ant.tx_chain_num == 1)
rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_TX1, 1);
- if (rt2x00dev->default_ant.rx == 1) {
+ if (rt2x00dev->default_ant.rx_chain_num == 1) {
rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX1, 1);
rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1);
- } else if (rt2x00dev->default_ant.rx == 2)
+ } else if (rt2x00dev->default_ant.rx_chain_num == 2)
rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1);
if (rf->channel > 14) {
@@ -1526,6 +1580,99 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
}
+
+#define RT5390_POWER_BOUND 0x27
+#define RT5390_FREQ_OFFSET_BOUND 0x5f
+
+static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_conf *conf,
+ struct rf_channel *rf,
+ struct channel_info *info)
+{
+ u8 rfcsr;
+ u16 eeprom;
+
+ rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1);
+ rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3);
+ rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR11_R, rf->rf2);
+ rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr);
+ if (info->default_power1 > RT5390_POWER_BOUND)
+ rt2x00_set_field8(&rfcsr, RFCSR49_TX, RT5390_POWER_BOUND);
+ else
+ rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1);
+ rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
+ rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
+ if (rt2x00dev->freq_offset > RT5390_FREQ_OFFSET_BOUND)
+ rt2x00_set_field8(&rfcsr, RFCSR17_CODE, RT5390_FREQ_OFFSET_BOUND);
+ else
+ rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
+ rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
+
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+ if (rf->channel <= 14) {
+ int idx = rf->channel-1;
+
+ if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST)) {
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
+ /* r55/r59 value array of channel 1~14 */
+ static const char r55_bt_rev[] = {0x83, 0x83,
+ 0x83, 0x73, 0x73, 0x63, 0x53, 0x53,
+ 0x53, 0x43, 0x43, 0x43, 0x43, 0x43};
+ static const char r59_bt_rev[] = {0x0e, 0x0e,
+ 0x0e, 0x0e, 0x0e, 0x0b, 0x0a, 0x09,
+ 0x07, 0x07, 0x07, 0x07, 0x07, 0x07};
+
+ rt2800_rfcsr_write(rt2x00dev, 55, r55_bt_rev[idx]);
+ rt2800_rfcsr_write(rt2x00dev, 59, r59_bt_rev[idx]);
+ } else {
+ static const char r59_bt[] = {0x8b, 0x8b, 0x8b,
+ 0x8b, 0x8b, 0x8b, 0x8b, 0x8a, 0x89,
+ 0x88, 0x88, 0x86, 0x85, 0x84};
+
+ rt2800_rfcsr_write(rt2x00dev, 59, r59_bt[idx]);
+ }
+ } else {
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
+ static const char r55_nonbt_rev[] = {0x23, 0x23,
+ 0x23, 0x23, 0x13, 0x13, 0x03, 0x03,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03};
+ static const char r59_nonbt_rev[] = {0x07, 0x07,
+ 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
+ 0x07, 0x07, 0x06, 0x05, 0x04, 0x04};
+
+ rt2800_rfcsr_write(rt2x00dev, 55, r55_nonbt_rev[idx]);
+ rt2800_rfcsr_write(rt2x00dev, 59, r59_nonbt_rev[idx]);
+ } else if (rt2x00_rt(rt2x00dev, RT5390)) {
+ static const char r59_non_bt[] = {0x8f, 0x8f,
+ 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d,
+ 0x8a, 0x88, 0x88, 0x87, 0x87, 0x86};
+
+ rt2800_rfcsr_write(rt2x00dev, 59, r59_non_bt[idx]);
+ }
+ }
+ }
+
+ rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, 0);
+ rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
+ rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
+}
+
static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
struct ieee80211_conf *conf,
struct rf_channel *rf,
@@ -1550,6 +1697,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2x00_rf(rt2x00dev, RF3052) ||
rt2x00_rf(rt2x00dev, RF3320))
rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info);
+ else if (rt2x00_rf(rt2x00dev, RF5390))
+ rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info);
else
rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
@@ -1562,12 +1711,14 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_bbp_write(rt2x00dev, 86, 0);
if (rf->channel <= 14) {
- if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) {
- rt2800_bbp_write(rt2x00dev, 82, 0x62);
- rt2800_bbp_write(rt2x00dev, 75, 0x46);
- } else {
- rt2800_bbp_write(rt2x00dev, 82, 0x84);
- rt2800_bbp_write(rt2x00dev, 75, 0x50);
+ if (!rt2x00_rt(rt2x00dev, RT5390)) {
+ if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) {
+ rt2800_bbp_write(rt2x00dev, 82, 0x62);
+ rt2800_bbp_write(rt2x00dev, 75, 0x46);
+ } else {
+ rt2800_bbp_write(rt2x00dev, 82, 0x84);
+ rt2800_bbp_write(rt2x00dev, 75, 0x50);
+ }
}
} else {
rt2800_bbp_write(rt2x00dev, 82, 0xf2);
@@ -1587,13 +1738,13 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
tx_pin = 0;
/* Turn on unused PA or LNA when not using 1T or 1R */
- if (rt2x00dev->default_ant.tx != 1) {
+ if (rt2x00dev->default_ant.tx_chain_num == 2) {
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN, 1);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN, 1);
}
/* Turn on unused PA or LNA when not using 1T or 1R */
- if (rt2x00dev->default_ant.rx != 1) {
+ if (rt2x00dev->default_ant.rx_chain_num == 2) {
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1);
}
@@ -1637,30 +1788,116 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC, &reg);
}
+static int rt2800_get_txpower_bw_comp(struct rt2x00_dev *rt2x00dev,
+ enum ieee80211_band band)
+{
+ u16 eeprom;
+ u8 comp_en;
+ u8 comp_type;
+ int comp_value;
+
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_DELTA, &eeprom);
+
+ if (eeprom == 0xffff)
+ return 0;
+
+ if (band == IEEE80211_BAND_2GHZ) {
+ comp_en = rt2x00_get_field16(eeprom,
+ EEPROM_TXPOWER_DELTA_ENABLE_2G);
+ if (comp_en) {
+ comp_type = rt2x00_get_field16(eeprom,
+ EEPROM_TXPOWER_DELTA_TYPE_2G);
+ comp_value = rt2x00_get_field16(eeprom,
+ EEPROM_TXPOWER_DELTA_VALUE_2G);
+ if (!comp_type)
+ comp_value = -comp_value;
+ }
+ } else {
+ comp_en = rt2x00_get_field16(eeprom,
+ EEPROM_TXPOWER_DELTA_ENABLE_5G);
+ if (comp_en) {
+ comp_type = rt2x00_get_field16(eeprom,
+ EEPROM_TXPOWER_DELTA_TYPE_5G);
+ comp_value = rt2x00_get_field16(eeprom,
+ EEPROM_TXPOWER_DELTA_VALUE_5G);
+ if (!comp_type)
+ comp_value = -comp_value;
+ }
+ }
+
+ return comp_value;
+}
+
+static u8 rt2800_compesate_txpower(struct rt2x00_dev *rt2x00dev,
+ int is_rate_b,
+ enum ieee80211_band band,
+ int power_level,
+ u8 txpower)
+{
+ u32 reg;
+ u16 eeprom;
+ u8 criterion;
+ u8 eirp_txpower;
+ u8 eirp_txpower_criterion;
+ u8 reg_limit;
+ int bw_comp = 0;
+
+ if (!((band == IEEE80211_BAND_5GHZ) && is_rate_b))
+ return txpower;
+
+ if (test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
+ bw_comp = rt2800_get_txpower_bw_comp(rt2x00dev, band);
+
+ if (test_bit(CONFIG_SUPPORT_POWER_LIMIT, &rt2x00dev->flags)) {
+ /*
+ * Check if eirp txpower exceed txpower_limit.
+ * We use OFDM 6M as criterion and its eirp txpower
+ * is stored at EEPROM_EIRP_MAX_TX_POWER.
+ * .11b data rate need add additional 4dbm
+ * when calculating eirp txpower.
+ */
+ rt2800_register_read(rt2x00dev, TX_PWR_CFG_0, &reg);
+ criterion = rt2x00_get_field32(reg, TX_PWR_CFG_0_6MBS);
+
+ rt2x00_eeprom_read(rt2x00dev,
+ EEPROM_EIRP_MAX_TX_POWER, &eeprom);
+
+ if (band == IEEE80211_BAND_2GHZ)
+ eirp_txpower_criterion = rt2x00_get_field16(eeprom,
+ EEPROM_EIRP_MAX_TX_POWER_2GHZ);
+ else
+ eirp_txpower_criterion = rt2x00_get_field16(eeprom,
+ EEPROM_EIRP_MAX_TX_POWER_5GHZ);
+
+ eirp_txpower = eirp_txpower_criterion + (txpower - criterion) +
+ (is_rate_b ? 4 : 0) + bw_comp;
+
+ reg_limit = (eirp_txpower > power_level) ?
+ (eirp_txpower - power_level) : 0;
+ } else
+ reg_limit = 0;
+
+ return txpower + bw_comp - reg_limit;
+}
+
static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
- const int max_txpower)
+ struct ieee80211_conf *conf)
{
u8 txpower;
- u8 max_value = (u8)max_txpower;
u16 eeprom;
- int i;
+ int i, is_rate_b;
u32 reg;
u8 r1;
u32 offset;
+ enum ieee80211_band band = conf->channel->band;
+ int power_level = conf->power_level;
/*
- * set to normal tx power mode: +/- 0dBm
+ * set to normal bbp tx power control mode: +/- 0dBm
*/
rt2800_bbp_read(rt2x00dev, 1, &r1);
- rt2x00_set_field8(&r1, BBP1_TX_POWER, 0);
+ rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, 0);
rt2800_bbp_write(rt2x00dev, 1, r1);
-
- /*
- * The eeprom contains the tx power values for each rate. These
- * values map to 100% tx power. Each 16bit word contains four tx
- * power values and the order is the same as used in the TX_PWR_CFG
- * registers.
- */
offset = TX_PWR_CFG_0;
for (i = 0; i < EEPROM_TXPOWER_BYRATE_SIZE; i += 2) {
@@ -1674,73 +1911,99 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i,
&eeprom);
- /* TX_PWR_CFG_0: 1MBS, TX_PWR_CFG_1: 24MBS,
+ is_rate_b = i ? 0 : 1;
+ /*
+ * TX_PWR_CFG_0: 1MBS, TX_PWR_CFG_1: 24MBS,
* TX_PWR_CFG_2: MCS4, TX_PWR_CFG_3: MCS12,
- * TX_PWR_CFG_4: unknown */
+ * TX_PWR_CFG_4: unknown
+ */
txpower = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_BYRATE_RATE0);
- rt2x00_set_field32(&reg, TX_PWR_CFG_RATE0,
- min(txpower, max_value));
+ txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
+ power_level, txpower);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_RATE0, txpower);
- /* TX_PWR_CFG_0: 2MBS, TX_PWR_CFG_1: 36MBS,
+ /*
+ * TX_PWR_CFG_0: 2MBS, TX_PWR_CFG_1: 36MBS,
* TX_PWR_CFG_2: MCS5, TX_PWR_CFG_3: MCS13,
- * TX_PWR_CFG_4: unknown */
+ * TX_PWR_CFG_4: unknown
+ */
txpower = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_BYRATE_RATE1);
- rt2x00_set_field32(&reg, TX_PWR_CFG_RATE1,
- min(txpower, max_value));
+ txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
+ power_level, txpower);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_RATE1, txpower);
- /* TX_PWR_CFG_0: 55MBS, TX_PWR_CFG_1: 48MBS,
+ /*
+ * TX_PWR_CFG_0: 5.5MBS, TX_PWR_CFG_1: 48MBS,
* TX_PWR_CFG_2: MCS6, TX_PWR_CFG_3: MCS14,
- * TX_PWR_CFG_4: unknown */
+ * TX_PWR_CFG_4: unknown
+ */
txpower = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_BYRATE_RATE2);
- rt2x00_set_field32(&reg, TX_PWR_CFG_RATE2,
- min(txpower, max_value));
+ txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
+ power_level, txpower);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_RATE2, txpower);
- /* TX_PWR_CFG_0: 11MBS, TX_PWR_CFG_1: 54MBS,
+ /*
+ * TX_PWR_CFG_0: 11MBS, TX_PWR_CFG_1: 54MBS,
* TX_PWR_CFG_2: MCS7, TX_PWR_CFG_3: MCS15,
- * TX_PWR_CFG_4: unknown */
+ * TX_PWR_CFG_4: unknown
+ */
txpower = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_BYRATE_RATE3);
- rt2x00_set_field32(&reg, TX_PWR_CFG_RATE3,
- min(txpower, max_value));
+ txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
+ power_level, txpower);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_RATE3, txpower);
/* read the next four txpower values */
rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i + 1,
&eeprom);
- /* TX_PWR_CFG_0: 6MBS, TX_PWR_CFG_1: MCS0,
+ is_rate_b = 0;
+ /*
+ * TX_PWR_CFG_0: 6MBS, TX_PWR_CFG_1: MCS0,
* TX_PWR_CFG_2: MCS8, TX_PWR_CFG_3: unknown,
- * TX_PWR_CFG_4: unknown */
+ * TX_PWR_CFG_4: unknown
+ */
txpower = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_BYRATE_RATE0);
- rt2x00_set_field32(&reg, TX_PWR_CFG_RATE4,
- min(txpower, max_value));
+ txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
+ power_level, txpower);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_RATE4, txpower);
- /* TX_PWR_CFG_0: 9MBS, TX_PWR_CFG_1: MCS1,
+ /*
+ * TX_PWR_CFG_0: 9MBS, TX_PWR_CFG_1: MCS1,
* TX_PWR_CFG_2: MCS9, TX_PWR_CFG_3: unknown,
- * TX_PWR_CFG_4: unknown */
+ * TX_PWR_CFG_4: unknown
+ */
txpower = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_BYRATE_RATE1);
- rt2x00_set_field32(&reg, TX_PWR_CFG_RATE5,
- min(txpower, max_value));
+ txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
+ power_level, txpower);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_RATE5, txpower);
- /* TX_PWR_CFG_0: 12MBS, TX_PWR_CFG_1: MCS2,
+ /*
+ * TX_PWR_CFG_0: 12MBS, TX_PWR_CFG_1: MCS2,
* TX_PWR_CFG_2: MCS10, TX_PWR_CFG_3: unknown,
- * TX_PWR_CFG_4: unknown */
+ * TX_PWR_CFG_4: unknown
+ */
txpower = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_BYRATE_RATE2);
- rt2x00_set_field32(&reg, TX_PWR_CFG_RATE6,
- min(txpower, max_value));
+ txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
+ power_level, txpower);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_RATE6, txpower);
- /* TX_PWR_CFG_0: 18MBS, TX_PWR_CFG_1: MCS3,
+ /*
+ * TX_PWR_CFG_0: 18MBS, TX_PWR_CFG_1: MCS3,
* TX_PWR_CFG_2: MCS11, TX_PWR_CFG_3: unknown,
- * TX_PWR_CFG_4: unknown */
+ * TX_PWR_CFG_4: unknown
+ */
txpower = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_BYRATE_RATE3);
- rt2x00_set_field32(&reg, TX_PWR_CFG_RATE7,
- min(txpower, max_value));
+ txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
+ power_level, txpower);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_RATE7, txpower);
rt2800_register_write(rt2x00dev, offset, reg);
@@ -1799,11 +2062,13 @@ void rt2800_config(struct rt2x00_dev *rt2x00dev,
/* Always recalculate LNA gain before changing configuration */
rt2800_config_lna_gain(rt2x00dev, libconf);
- if (flags & IEEE80211_CONF_CHANGE_CHANNEL)
+ if (flags & IEEE80211_CONF_CHANGE_CHANNEL) {
rt2800_config_channel(rt2x00dev, libconf->conf,
&libconf->rf, &libconf->channel);
+ rt2800_config_txpower(rt2x00dev, libconf->conf);
+ }
if (flags & IEEE80211_CONF_CHANGE_POWER)
- rt2800_config_txpower(rt2x00dev, libconf->conf->power_level);
+ rt2800_config_txpower(rt2x00dev, libconf->conf);
if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
rt2800_config_retry_limit(rt2x00dev, libconf);
if (flags & IEEE80211_CONF_CHANGE_PS)
@@ -1832,7 +2097,8 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
if (rt2x00_rt(rt2x00dev, RT3070) ||
rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090) ||
- rt2x00_rt(rt2x00dev, RT3390))
+ rt2x00_rt(rt2x00dev, RT3390) ||
+ rt2x00_rt(rt2x00dev, RT5390))
return 0x1c + (2 * rt2x00dev->lna_gain);
else
return 0x2e + rt2x00dev->lna_gain;
@@ -1964,6 +2230,10 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x0000001f);
+ } else if (rt2x00_rt(rt2x00dev, RT5390)) {
+ rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
+ rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
+ rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
} else {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
@@ -2032,7 +2302,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, CCK_PROT_CFG, &reg);
rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_RATE, 3);
rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_CTRL, 0);
- rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_NAV, 1);
+ rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_NAV_SHORT, 1);
rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_CCK, 1);
rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2045,7 +2315,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_RATE, 3);
rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL, 0);
- rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_NAV, 1);
+ rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_NAV_SHORT, 1);
rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_CCK, 1);
rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2058,7 +2328,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, MM20_PROT_CFG, &reg);
rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_RATE, 0x4004);
rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_CTRL, 0);
- rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_NAV, 1);
+ rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_NAV_SHORT, 1);
rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2071,7 +2341,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, MM40_PROT_CFG, &reg);
rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_RATE, 0x4084);
rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL, 0);
- rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_NAV, 1);
+ rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_NAV_SHORT, 1);
rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2084,7 +2354,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, GF20_PROT_CFG, &reg);
rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_RATE, 0x4004);
rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_CTRL, 0);
- rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_NAV, 1);
+ rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_NAV_SHORT, 1);
rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2097,7 +2367,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, GF40_PROT_CFG, &reg);
rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_RATE, 0x4084);
rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_CTRL, 0);
- rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_NAV, 1);
+ rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_NAV_SHORT, 1);
rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2187,19 +2457,23 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
/*
* Clear all beacons
*/
- rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE0);
- rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE1);
- rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE2);
- rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE3);
- rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE4);
- rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE5);
- rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE6);
- rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE7);
+ rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE0);
+ rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE1);
+ rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE2);
+ rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE3);
+ rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE4);
+ rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE5);
+ rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE6);
+ rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE7);
if (rt2x00_is_usb(rt2x00dev)) {
rt2800_register_read(rt2x00dev, US_CYC_CNT, &reg);
rt2x00_set_field32(&reg, US_CYC_CNT_CLOCK_CYCLE, 30);
rt2800_register_write(rt2x00dev, US_CYC_CNT, reg);
+ } else if (rt2x00_is_pcie(rt2x00dev)) {
+ rt2800_register_read(rt2x00dev, US_CYC_CNT, &reg);
+ rt2x00_set_field32(&reg, US_CYC_CNT_CLOCK_CYCLE, 125);
+ rt2800_register_write(rt2x00dev, US_CYC_CNT, reg);
}
rt2800_register_read(rt2x00dev, HT_FBK_CFG0, &reg);
@@ -2335,15 +2609,31 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_wait_bbp_ready(rt2x00dev)))
return -EACCES;
- if (rt2800_is_305x_soc(rt2x00dev))
+ if (rt2x00_rt(rt2x00dev, RT5390)) {
+ rt2800_bbp_read(rt2x00dev, 4, &value);
+ rt2x00_set_field8(&value, BBP4_MAC_IF_CTRL, 1);
+ rt2800_bbp_write(rt2x00dev, 4, value);
+ }
+
+ if (rt2800_is_305x_soc(rt2x00dev) ||
+ rt2x00_rt(rt2x00dev, RT5390))
rt2800_bbp_write(rt2x00dev, 31, 0x08);
rt2800_bbp_write(rt2x00dev, 65, 0x2c);
rt2800_bbp_write(rt2x00dev, 66, 0x38);
+ if (rt2x00_rt(rt2x00dev, RT5390))
+ rt2800_bbp_write(rt2x00dev, 68, 0x0b);
+
if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) {
rt2800_bbp_write(rt2x00dev, 69, 0x16);
rt2800_bbp_write(rt2x00dev, 73, 0x12);
+ } else if (rt2x00_rt(rt2x00dev, RT5390)) {
+ rt2800_bbp_write(rt2x00dev, 69, 0x12);
+ rt2800_bbp_write(rt2x00dev, 73, 0x13);
+ rt2800_bbp_write(rt2x00dev, 75, 0x46);
+ rt2800_bbp_write(rt2x00dev, 76, 0x28);
+ rt2800_bbp_write(rt2x00dev, 77, 0x59);
} else {
rt2800_bbp_write(rt2x00dev, 69, 0x12);
rt2800_bbp_write(rt2x00dev, 73, 0x10);
@@ -2354,7 +2644,8 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
if (rt2x00_rt(rt2x00dev, RT3070) ||
rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090) ||
- rt2x00_rt(rt2x00dev, RT3390)) {
+ rt2x00_rt(rt2x00dev, RT3390) ||
+ rt2x00_rt(rt2x00dev, RT5390)) {
rt2800_bbp_write(rt2x00dev, 79, 0x13);
rt2800_bbp_write(rt2x00dev, 80, 0x05);
rt2800_bbp_write(rt2x00dev, 81, 0x33);
@@ -2366,35 +2657,62 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
}
rt2800_bbp_write(rt2x00dev, 82, 0x62);
- rt2800_bbp_write(rt2x00dev, 83, 0x6a);
+ if (rt2x00_rt(rt2x00dev, RT5390))
+ rt2800_bbp_write(rt2x00dev, 83, 0x7a);
+ else
+ rt2800_bbp_write(rt2x00dev, 83, 0x6a);
if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D))
rt2800_bbp_write(rt2x00dev, 84, 0x19);
+ else if (rt2x00_rt(rt2x00dev, RT5390))
+ rt2800_bbp_write(rt2x00dev, 84, 0x9a);
else
rt2800_bbp_write(rt2x00dev, 84, 0x99);
- rt2800_bbp_write(rt2x00dev, 86, 0x00);
+ if (rt2x00_rt(rt2x00dev, RT5390))
+ rt2800_bbp_write(rt2x00dev, 86, 0x38);
+ else
+ rt2800_bbp_write(rt2x00dev, 86, 0x00);
+
rt2800_bbp_write(rt2x00dev, 91, 0x04);
- rt2800_bbp_write(rt2x00dev, 92, 0x00);
+
+ if (rt2x00_rt(rt2x00dev, RT5390))
+ rt2800_bbp_write(rt2x00dev, 92, 0x02);
+ else
+ rt2800_bbp_write(rt2x00dev, 92, 0x00);
if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) ||
rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) ||
rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) ||
rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) ||
+ rt2x00_rt(rt2x00dev, RT5390) ||
rt2800_is_305x_soc(rt2x00dev))
rt2800_bbp_write(rt2x00dev, 103, 0xc0);
else
rt2800_bbp_write(rt2x00dev, 103, 0x00);
+ if (rt2x00_rt(rt2x00dev, RT5390))
+ rt2800_bbp_write(rt2x00dev, 104, 0x92);
+
if (rt2800_is_305x_soc(rt2x00dev))
rt2800_bbp_write(rt2x00dev, 105, 0x01);
+ else if (rt2x00_rt(rt2x00dev, RT5390))
+ rt2800_bbp_write(rt2x00dev, 105, 0x3c);
else
rt2800_bbp_write(rt2x00dev, 105, 0x05);
- rt2800_bbp_write(rt2x00dev, 106, 0x35);
+
+ if (rt2x00_rt(rt2x00dev, RT5390))
+ rt2800_bbp_write(rt2x00dev, 106, 0x03);
+ else
+ rt2800_bbp_write(rt2x00dev, 106, 0x35);
+
+ if (rt2x00_rt(rt2x00dev, RT5390))
+ rt2800_bbp_write(rt2x00dev, 128, 0x12);
if (rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090) ||
- rt2x00_rt(rt2x00dev, RT3390)) {
+ rt2x00_rt(rt2x00dev, RT3390) ||
+ rt2x00_rt(rt2x00dev, RT5390)) {
rt2800_bbp_read(rt2x00dev, 138, &value);
rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
@@ -2406,6 +2724,41 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 138, value);
}
+ if (rt2x00_rt(rt2x00dev, RT5390)) {
+ int ant, div_mode;
+
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+ div_mode = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY);
+ ant = (div_mode == 3) ? 1 : 0;
+
+ /* check if this is a Bluetooth combo card */
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+ if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST)) {
+ u32 reg;
+
+ rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
+ rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT3, 0);
+ rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT6, 0);
+ rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, 0);
+ rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT6, 0);
+ if (ant == 0)
+ rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, 1);
+ else if (ant == 1)
+ rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT6, 1);
+ rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
+ }
+
+ rt2800_bbp_read(rt2x00dev, 152, &value);
+ if (ant == 0)
+ rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1);
+ else
+ rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0);
+ rt2800_bbp_write(rt2x00dev, 152, value);
+
+ /* Init frequency calibration */
+ rt2800_bbp_write(rt2x00dev, 142, 1);
+ rt2800_bbp_write(rt2x00dev, 143, 57);
+ }
for (i = 0; i < EEPROM_BBP_SIZE; i++) {
rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
@@ -2436,6 +2789,10 @@ static u8 rt2800_init_rx_filter(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * bw40);
rt2800_bbp_write(rt2x00dev, 4, bbp);
+ rt2800_rfcsr_read(rt2x00dev, 31, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR31_RX_H20M, bw40);
+ rt2800_rfcsr_write(rt2x00dev, 31, rfcsr);
+
rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 1);
rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
@@ -2491,18 +2848,28 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
!rt2x00_rt(rt2x00dev, RT3071) &&
!rt2x00_rt(rt2x00dev, RT3090) &&
!rt2x00_rt(rt2x00dev, RT3390) &&
+ !rt2x00_rt(rt2x00dev, RT5390) &&
!rt2800_is_305x_soc(rt2x00dev))
return 0;
/*
* Init RF calibration.
*/
- rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
- rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
- rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
- msleep(1);
- rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
- rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+ if (rt2x00_rt(rt2x00dev, RT5390)) {
+ rt2800_rfcsr_read(rt2x00dev, 2, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1);
+ rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
+ msleep(1);
+ rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 0);
+ rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
+ } else {
+ rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
+ rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+ msleep(1);
+ rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
+ rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+ }
if (rt2x00_rt(rt2x00dev, RT3070) ||
rt2x00_rt(rt2x00dev, RT3071) ||
@@ -2510,7 +2877,7 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
rt2800_rfcsr_write(rt2x00dev, 5, 0x03);
rt2800_rfcsr_write(rt2x00dev, 6, 0x02);
- rt2800_rfcsr_write(rt2x00dev, 7, 0x70);
+ rt2800_rfcsr_write(rt2x00dev, 7, 0x60);
rt2800_rfcsr_write(rt2x00dev, 9, 0x0f);
rt2800_rfcsr_write(rt2x00dev, 10, 0x41);
rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
@@ -2593,6 +2960,87 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
rt2800_rfcsr_write(rt2x00dev, 31, 0x00);
return 0;
+ } else if (rt2x00_rt(rt2x00dev, RT5390)) {
+ rt2800_rfcsr_write(rt2x00dev, 1, 0x0f);
+ rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 3, 0x88);
+ rt2800_rfcsr_write(rt2x00dev, 5, 0x10);
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+ rt2800_rfcsr_write(rt2x00dev, 6, 0xe0);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 6, 0xa0);
+ rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 10, 0x53);
+ rt2800_rfcsr_write(rt2x00dev, 11, 0x4a);
+ rt2800_rfcsr_write(rt2x00dev, 12, 0xc6);
+ rt2800_rfcsr_write(rt2x00dev, 13, 0x9f);
+ rt2800_rfcsr_write(rt2x00dev, 14, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 16, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 18, 0x03);
+ rt2800_rfcsr_write(rt2x00dev, 19, 0x00);
+
+ rt2800_rfcsr_write(rt2x00dev, 20, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 21, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
+ rt2800_rfcsr_write(rt2x00dev, 23, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 24, 0x00);
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+ rt2800_rfcsr_write(rt2x00dev, 25, 0x80);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 25, 0xc0);
+ rt2800_rfcsr_write(rt2x00dev, 26, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 27, 0x09);
+ rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
+
+ rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 34, 0x07);
+ rt2800_rfcsr_write(rt2x00dev, 35, 0x12);
+ rt2800_rfcsr_write(rt2x00dev, 36, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 37, 0x08);
+ rt2800_rfcsr_write(rt2x00dev, 38, 0x85);
+ rt2800_rfcsr_write(rt2x00dev, 39, 0x1b);
+
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+ rt2800_rfcsr_write(rt2x00dev, 40, 0x0b);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 40, 0x4b);
+ rt2800_rfcsr_write(rt2x00dev, 41, 0xbb);
+ rt2800_rfcsr_write(rt2x00dev, 42, 0xd2);
+ rt2800_rfcsr_write(rt2x00dev, 43, 0x9a);
+ rt2800_rfcsr_write(rt2x00dev, 44, 0x0e);
+ rt2800_rfcsr_write(rt2x00dev, 45, 0xa2);
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+ rt2800_rfcsr_write(rt2x00dev, 46, 0x73);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 46, 0x7b);
+ rt2800_rfcsr_write(rt2x00dev, 47, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 48, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 49, 0x94);
+
+ rt2800_rfcsr_write(rt2x00dev, 52, 0x38);
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+ rt2800_rfcsr_write(rt2x00dev, 53, 0x00);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 53, 0x84);
+ rt2800_rfcsr_write(rt2x00dev, 54, 0x78);
+ rt2800_rfcsr_write(rt2x00dev, 55, 0x44);
+ rt2800_rfcsr_write(rt2x00dev, 56, 0x22);
+ rt2800_rfcsr_write(rt2x00dev, 57, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 58, 0x7f);
+ rt2800_rfcsr_write(rt2x00dev, 59, 0x63);
+
+ rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+ rt2800_rfcsr_write(rt2x00dev, 61, 0xd1);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 61, 0xdd);
+ rt2800_rfcsr_write(rt2x00dev, 62, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
}
if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
@@ -2602,12 +3050,12 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
} else if (rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090)) {
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x14);
+
rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR6_R2, 1);
rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
- rt2800_rfcsr_write(rt2x00dev, 31, 0x14);
-
rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
@@ -2619,6 +3067,10 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 0);
}
rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
+
+ rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
+ rt2x00_set_field32(&reg, GPIO_SWITCH_5, 0);
+ rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg);
} else if (rt2x00_rt(rt2x00dev, RT3390)) {
rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
rt2x00_set_field32(&reg, GPIO_SWITCH_5, 0);
@@ -2642,21 +3094,23 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x15);
}
- /*
- * Set back to initial state
- */
- rt2800_bbp_write(rt2x00dev, 24, 0);
+ if (!rt2x00_rt(rt2x00dev, RT5390)) {
+ /*
+ * Set back to initial state
+ */
+ rt2800_bbp_write(rt2x00dev, 24, 0);
- rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
- rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 0);
- rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
+ rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 0);
+ rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
- /*
- * set BBP back to BW20
- */
- rt2800_bbp_read(rt2x00dev, 4, &bbp);
- rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0);
- rt2800_bbp_write(rt2x00dev, 4, bbp);
+ /*
+ * Set BBP back to BW20
+ */
+ rt2800_bbp_read(rt2x00dev, 4, &bbp);
+ rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0);
+ rt2800_bbp_write(rt2x00dev, 4, bbp);
+ }
if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
@@ -2668,24 +3122,28 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, OPT_14_CSR_BIT0, 1);
rt2800_register_write(rt2x00dev, OPT_14_CSR, reg);
- rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
- rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0);
- if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
- rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
- rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
- if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags))
- rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
- }
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom);
- if (rt2x00_get_field16(eeprom, EEPROM_TXMIXER_GAIN_BG_VAL) >= 1)
- rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN,
- rt2x00_get_field16(eeprom,
- EEPROM_TXMIXER_GAIN_BG_VAL));
- rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
+ if (!rt2x00_rt(rt2x00dev, RT5390)) {
+ rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0);
+ if (rt2x00_rt(rt2x00dev, RT3070) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
+ if (!test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags))
+ rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
+ }
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom);
+ if (rt2x00_get_field16(eeprom, EEPROM_TXMIXER_GAIN_BG_VAL) >= 1)
+ rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN,
+ rt2x00_get_field16(eeprom,
+ EEPROM_TXMIXER_GAIN_BG_VAL));
+ rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
+ }
if (rt2x00_rt(rt2x00dev, RT3090)) {
rt2800_bbp_read(rt2x00dev, 138, &bbp);
+ /* Turn off unused DAC1 and ADC1 to reduce power consumption */
rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
rt2x00_set_field8(&bbp, BBP138_RX_ADC1, 0);
@@ -2719,10 +3177,9 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 21, rfcsr);
}
- if (rt2x00_rt(rt2x00dev, RT3070) || rt2x00_rt(rt2x00dev, RT3071)) {
+ if (rt2x00_rt(rt2x00dev, RT3070)) {
rt2800_rfcsr_read(rt2x00dev, 27, &rfcsr);
- if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) ||
- rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E))
+ if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F))
rt2x00_set_field8(&rfcsr, RFCSR27_R1, 3);
else
rt2x00_set_field8(&rfcsr, RFCSR27_R1, 0);
@@ -2732,6 +3189,20 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 27, rfcsr);
}
+ if (rt2x00_rt(rt2x00dev, RT5390)) {
+ rt2800_rfcsr_read(rt2x00dev, 38, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR38_RX_LO1_EN, 0);
+ rt2800_rfcsr_write(rt2x00dev, 38, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 39, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR39_RX_LO2_EN, 0);
+ rt2800_rfcsr_write(rt2x00dev, 39, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR30_RX_VCM, 2);
+ rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+ }
+
return 0;
}
@@ -2810,10 +3281,7 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
- rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
- rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
- rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
/* Wait for DMA, ignore error */
@@ -2823,9 +3291,6 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 0);
rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
-
- rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0);
- rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
}
EXPORT_SYMBOL_GPL(rt2800_disable_radio);
@@ -2986,13 +3451,6 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
default_lna_gain);
rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_MAX_TX_POWER, &word);
- if (rt2x00_get_field16(word, EEPROM_MAX_TX_POWER_24GHZ) == 0xff)
- rt2x00_set_field16(&word, EEPROM_MAX_TX_POWER_24GHZ, MAX_G_TXPOWER);
- if (rt2x00_get_field16(word, EEPROM_MAX_TX_POWER_5GHZ) == 0xff)
- rt2x00_set_field16(&word, EEPROM_MAX_TX_POWER_5GHZ, MAX_A_TXPOWER);
- rt2x00_eeprom_write(rt2x00dev, EEPROM_MAX_TX_POWER, word);
-
return 0;
}
EXPORT_SYMBOL_GPL(rt2800_validate_eeprom);
@@ -3009,10 +3467,15 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
/*
- * Identify RF chipset.
+ * Identify RF chipset by EEPROM value
+ * RT28xx/RT30xx: defined in "EEPROM_NIC_CONF0_RF_TYPE" field
+ * RT53xx: defined in "EEPROM_CHIP_ID" field
*/
- value = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE);
rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
+ if (rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT5390)
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &value);
+ else
+ value = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE);
rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET),
value, rt2x00_get_field32(reg, MAC_CSR0_REVISION));
@@ -3024,7 +3487,8 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
!rt2x00_rt(rt2x00dev, RT3071) &&
!rt2x00_rt(rt2x00dev, RT3090) &&
!rt2x00_rt(rt2x00dev, RT3390) &&
- !rt2x00_rt(rt2x00dev, RT3572)) {
+ !rt2x00_rt(rt2x00dev, RT3572) &&
+ !rt2x00_rt(rt2x00dev, RT5390)) {
ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
return -ENODEV;
}
@@ -3038,7 +3502,8 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
!rt2x00_rf(rt2x00dev, RF3021) &&
!rt2x00_rf(rt2x00dev, RF3022) &&
!rt2x00_rf(rt2x00dev, RF3052) &&
- !rt2x00_rf(rt2x00dev, RF3320)) {
+ !rt2x00_rf(rt2x00dev, RF3320) &&
+ !rt2x00_rf(rt2x00dev, RF5390)) {
ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
return -ENODEV;
}
@@ -3046,11 +3511,35 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
/*
* Identify default antenna configuration.
*/
- rt2x00dev->default_ant.tx =
+ rt2x00dev->default_ant.tx_chain_num =
rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH);
- rt2x00dev->default_ant.rx =
+ rt2x00dev->default_ant.rx_chain_num =
rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH);
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+
+ if (rt2x00_rt(rt2x00dev, RT3070) ||
+ rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3390)) {
+ value = rt2x00_get_field16(eeprom,
+ EEPROM_NIC_CONF1_ANT_DIVERSITY);
+ switch (value) {
+ case 0:
+ case 1:
+ case 2:
+ rt2x00dev->default_ant.tx = ANTENNA_A;
+ rt2x00dev->default_ant.rx = ANTENNA_A;
+ break;
+ case 3:
+ rt2x00dev->default_ant.tx = ANTENNA_A;
+ rt2x00dev->default_ant.rx = ANTENNA_B;
+ break;
+ }
+ } else {
+ rt2x00dev->default_ant.tx = ANTENNA_A;
+ rt2x00dev->default_ant.rx = ANTENNA_A;
+ }
+
/*
* Read frequency offset and RF programming sequence.
*/
@@ -3084,6 +3573,15 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &rt2x00dev->led_mcu_reg);
#endif /* CONFIG_RT2X00_LIB_LEDS */
+ /*
+ * Check if support EIRP tx power limit feature.
+ */
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER, &eeprom);
+
+ if (rt2x00_get_field16(eeprom, EEPROM_EIRP_MAX_TX_POWER_2GHZ) <
+ EIRP_MAX_TX_POWER_LIMIT)
+ __set_bit(CONFIG_SUPPORT_POWER_LIMIT, &rt2x00dev->flags);
+
return 0;
}
EXPORT_SYMBOL_GPL(rt2800_init_eeprom);
@@ -3236,7 +3734,6 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
char *default_power1;
char *default_power2;
unsigned int i;
- unsigned short max_power;
u16 eeprom;
/*
@@ -3303,7 +3800,8 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
rt2x00_rf(rt2x00dev, RF2020) ||
rt2x00_rf(rt2x00dev, RF3021) ||
rt2x00_rf(rt2x00dev, RF3022) ||
- rt2x00_rf(rt2x00dev, RF3320)) {
+ rt2x00_rf(rt2x00dev, RF3320) ||
+ rt2x00_rf(rt2x00dev, RF5390)) {
spec->num_channels = 14;
spec->channels = rf_vals_3x;
} else if (rt2x00_rf(rt2x00dev, RF3052)) {
@@ -3361,26 +3859,21 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->channels_info = info;
- rt2x00_eeprom_read(rt2x00dev, EEPROM_MAX_TX_POWER, &eeprom);
- max_power = rt2x00_get_field16(eeprom, EEPROM_MAX_TX_POWER_24GHZ);
default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1);
default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2);
for (i = 0; i < 14; i++) {
- info[i].max_power = max_power;
- info[i].default_power1 = TXPOWER_G_FROM_DEV(default_power1[i]);
- info[i].default_power2 = TXPOWER_G_FROM_DEV(default_power2[i]);
+ info[i].default_power1 = default_power1[i];
+ info[i].default_power2 = default_power2[i];
}
if (spec->num_channels > 14) {
- max_power = rt2x00_get_field16(eeprom, EEPROM_MAX_TX_POWER_5GHZ);
default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1);
default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2);
for (i = 14; i < spec->num_channels; i++) {
- info[i].max_power = max_power;
- info[i].default_power1 = TXPOWER_A_FROM_DEV(default_power1[i]);
- info[i].default_power2 = TXPOWER_A_FROM_DEV(default_power2[i]);
+ info[i].default_power1 = default_power1[i];
+ info[i].default_power2 = default_power2[i];
}
}
@@ -3530,7 +4023,8 @@ EXPORT_SYMBOL_GPL(rt2800_get_tsf);
int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
{
int ret = 0;
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index e3c995a9dec4..0c92d86a36f4 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -156,6 +156,7 @@ void rt2800_txdone(struct rt2x00_dev *rt2x00dev);
void rt2800_txdone_entry(struct queue_entry *entry, u32 status);
void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc);
+void rt2800_clear_beacon(struct queue_entry *entry);
extern const struct rt2x00debug rt2800_rt2x00debug;
@@ -198,7 +199,8 @@ int rt2800_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
u64 rt2800_get_tsf(struct ieee80211_hw *hw);
int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size);
int rt2800_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index aa97971a38af..38605e9fe427 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -200,11 +200,22 @@ static void rt2800pci_start_queue(struct data_queue *queue)
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
break;
case QID_BEACON:
+ /*
+ * Allow beacon tasklets to be scheduled for periodic
+ * beacon updates.
+ */
+ tasklet_enable(&rt2x00dev->tbtt_tasklet);
+ tasklet_enable(&rt2x00dev->pretbtt_tasklet);
+
rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+ rt2800_register_read(rt2x00dev, INT_TIMER_EN, &reg);
+ rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 1);
+ rt2800_register_write(rt2x00dev, INT_TIMER_EN, reg);
break;
default:
break;
@@ -250,6 +261,16 @@ static void rt2800pci_stop_queue(struct data_queue *queue)
rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+ rt2800_register_read(rt2x00dev, INT_TIMER_EN, &reg);
+ rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 0);
+ rt2800_register_write(rt2x00dev, INT_TIMER_EN, reg);
+
+ /*
+ * Wait for tbtt tasklets to finish.
+ */
+ tasklet_disable(&rt2x00dev->tbtt_tasklet);
+ tasklet_disable(&rt2x00dev->pretbtt_tasklet);
break;
default:
break;
@@ -397,9 +418,9 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
enum dev_state state)
{
- int mask = (state == STATE_RADIO_IRQ_ON) ||
- (state == STATE_RADIO_IRQ_ON_ISR);
+ int mask = (state == STATE_RADIO_IRQ_ON);
u32 reg;
+ unsigned long flags;
/*
* When interrupts are being enabled, the interrupt registers
@@ -408,8 +429,17 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
if (state == STATE_RADIO_IRQ_ON) {
rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
+
+ /*
+ * Enable tasklets. The beacon related tasklets are
+ * enabled when the beacon queue is started.
+ */
+ tasklet_enable(&rt2x00dev->txstatus_tasklet);
+ tasklet_enable(&rt2x00dev->rxdone_tasklet);
+ tasklet_enable(&rt2x00dev->autowake_tasklet);
}
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
rt2x00_set_field32(&reg, INT_MASK_CSR_RXDELAYINT, 0);
rt2x00_set_field32(&reg, INT_MASK_CSR_TXDELAYINT, 0);
@@ -430,6 +460,17 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&reg, INT_MASK_CSR_RX_COHERENT, 0);
rt2x00_set_field32(&reg, INT_MASK_CSR_TX_COHERENT, 0);
rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+ if (state == STATE_RADIO_IRQ_OFF) {
+ /*
+ * Ensure that all tasklets are finished before
+ * disabling the interrupts.
+ */
+ tasklet_disable(&rt2x00dev->txstatus_tasklet);
+ tasklet_disable(&rt2x00dev->rxdone_tasklet);
+ tasklet_disable(&rt2x00dev->autowake_tasklet);
+ }
}
static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
@@ -452,6 +493,13 @@ static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
+ if (rt2x00_rt(rt2x00dev, RT5390)) {
+ rt2800_register_read(rt2x00dev, AUX_CTRL, &reg);
+ rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
+ rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
+ rt2800_register_write(rt2x00dev, AUX_CTRL, reg);
+ }
+
rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
@@ -475,39 +523,23 @@ static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
{
- u32 reg;
-
- rt2800_disable_radio(rt2x00dev);
-
- rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001280);
-
- rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
- rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
-
- rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
- rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
+ if (rt2x00_is_soc(rt2x00dev)) {
+ rt2800_disable_radio(rt2x00dev);
+ rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0);
+ rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
+ }
}
static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
enum dev_state state)
{
- /*
- * Always put the device to sleep (even when we intend to wakeup!)
- * if the device is booting and wasn't asleep it will return
- * failure when attempting to wakeup.
- */
- rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0xff, 2);
-
if (state == STATE_AWAKE) {
- rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0);
+ rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0x02);
rt2800pci_mcu_status(rt2x00dev, TOKEN_WAKUP);
+ } else if (state == STATE_SLEEP) {
+ rt2800_register_write(rt2x00dev, H2M_MAILBOX_STATUS, 0xffffffff);
+ rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, 0xffffffff);
+ rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0x01, 0xff, 0x01);
}
return 0;
@@ -538,9 +570,7 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
rt2800pci_set_state(rt2x00dev, STATE_SLEEP);
break;
case STATE_RADIO_IRQ_ON:
- case STATE_RADIO_IRQ_ON_ISR:
case STATE_RADIO_IRQ_OFF:
- case STATE_RADIO_IRQ_OFF_ISR:
rt2800pci_toggle_irq(rt2x00dev, state);
break;
case STATE_DEEP_SLEEP:
@@ -652,6 +682,12 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
*/
rxdesc->flags |= RX_FLAG_IV_STRIPPED;
+ /*
+ * The hardware has already checked the Michael Mic and has
+ * stripped it from the frame. Signal this to mac80211.
+ */
+ rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
+
if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
rxdesc->flags |= RX_FLAG_DECRYPTED;
else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
@@ -726,45 +762,60 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
}
}
-static void rt2800pci_txstatus_tasklet(unsigned long data)
+static void rt2800pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
+ struct rt2x00_field32 irq_field)
{
- rt2800pci_txdone((struct rt2x00_dev *)data);
-}
-
-static irqreturn_t rt2800pci_interrupt_thread(int irq, void *dev_instance)
-{
- struct rt2x00_dev *rt2x00dev = dev_instance;
- u32 reg = rt2x00dev->irqvalue[0];
+ unsigned long flags;
+ u32 reg;
/*
- * 1 - Pre TBTT interrupt.
+ * Enable a single interrupt. The interrupt mask register
+ * access needs locking.
*/
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
- rt2x00lib_pretbtt(rt2x00dev);
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+ rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+ rt2x00_set_field32(&reg, irq_field, 1);
+ rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+}
- /*
- * 2 - Beacondone interrupt.
- */
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
- rt2x00lib_beacondone(rt2x00dev);
+static void rt2800pci_txstatus_tasklet(unsigned long data)
+{
+ rt2800pci_txdone((struct rt2x00_dev *)data);
/*
- * 3 - Rx ring done interrupt.
+ * No need to enable the tx status interrupt here as we always
+ * leave it enabled to minimize the possibility of a tx status
+ * register overflow. See comment in interrupt handler.
*/
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
- rt2x00pci_rxdone(rt2x00dev);
+}
- /*
- * 4 - Auto wakeup interrupt.
- */
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
- rt2800pci_wakeup(rt2x00dev);
+static void rt2800pci_pretbtt_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2x00lib_pretbtt(rt2x00dev);
+ rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT);
+}
- /* Enable interrupts again. */
- rt2x00dev->ops->lib->set_device_state(rt2x00dev,
- STATE_RADIO_IRQ_ON_ISR);
+static void rt2800pci_tbtt_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2x00lib_beacondone(rt2x00dev);
+ rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT);
+}
- return IRQ_HANDLED;
+static void rt2800pci_rxdone_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2x00pci_rxdone(rt2x00dev);
+ rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE);
+}
+
+static void rt2800pci_autowake_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2800pci_wakeup(rt2x00dev);
+ rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_AUTO_WAKEUP);
}
static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
@@ -810,8 +861,8 @@ static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
{
struct rt2x00_dev *rt2x00dev = dev_instance;
- u32 reg;
- irqreturn_t ret = IRQ_HANDLED;
+ u32 reg, mask;
+ unsigned long flags;
/* Read status and ACK all interrupts */
rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
@@ -823,38 +874,44 @@ static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
return IRQ_HANDLED;
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS))
- rt2800pci_txstatus_interrupt(rt2x00dev);
+ /*
+ * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
+ * for interrupts and interrupt masks we can just use the value of
+ * INT_SOURCE_CSR to create the interrupt mask.
+ */
+ mask = ~reg;
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT) ||
- rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT) ||
- rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE) ||
- rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP)) {
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) {
+ rt2800pci_txstatus_interrupt(rt2x00dev);
/*
- * All other interrupts are handled in the interrupt thread.
- * Store irqvalue for use in the interrupt thread.
+ * Never disable the TX_FIFO_STATUS interrupt.
*/
- rt2x00dev->irqvalue[0] = reg;
+ rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1);
+ }
- /*
- * Disable interrupts, will be enabled again in the
- * interrupt thread.
- */
- rt2x00dev->ops->lib->set_device_state(rt2x00dev,
- STATE_RADIO_IRQ_OFF_ISR);
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
+ tasklet_hi_schedule(&rt2x00dev->pretbtt_tasklet);
- /*
- * Leave the TX_FIFO_STATUS interrupt enabled to not lose any
- * tx status reports.
- */
- rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
- rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
- rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
+ tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
- ret = IRQ_WAKE_THREAD;
- }
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
+ tasklet_schedule(&rt2x00dev->rxdone_tasklet);
- return ret;
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
+ tasklet_schedule(&rt2x00dev->autowake_tasklet);
+
+ /*
+ * Disable all interrupts for which a tasklet was scheduled right now,
+ * the tasklet will reenable the appropriate interrupts.
+ */
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+ rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+ reg &= mask;
+ rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+ return IRQ_HANDLED;
}
/*
@@ -969,8 +1026,11 @@ static const struct rt2800_ops rt2800pci_rt2800_ops = {
static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
.irq_handler = rt2800pci_interrupt,
- .irq_handler_thread = rt2800pci_interrupt_thread,
- .txstatus_tasklet = rt2800pci_txstatus_tasklet,
+ .txstatus_tasklet = rt2800pci_txstatus_tasklet,
+ .pretbtt_tasklet = rt2800pci_pretbtt_tasklet,
+ .tbtt_tasklet = rt2800pci_tbtt_tasklet,
+ .rxdone_tasklet = rt2800pci_rxdone_tasklet,
+ .autowake_tasklet = rt2800pci_autowake_tasklet,
.probe_hw = rt2800pci_probe_hw,
.get_firmware_name = rt2800pci_get_firmware_name,
.check_firmware = rt2800_check_firmware,
@@ -990,6 +1050,7 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
.write_tx_desc = rt2800pci_write_tx_desc,
.write_tx_data = rt2800_write_tx_data,
.write_beacon = rt2800_write_beacon,
+ .clear_beacon = rt2800_clear_beacon,
.fill_rxdone = rt2800pci_fill_rxdone,
.config_shared_key = rt2800_config_shared_key,
.config_pairwise_key = rt2800_config_pairwise_key,
@@ -1065,12 +1126,17 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
{ PCI_DEVICE(0x1814, 0x3390), PCI_DEVICE_DATA(&rt2800pci_ops) },
#endif
#ifdef CONFIG_RT2800PCI_RT35XX
+ { PCI_DEVICE(0x1432, 0x7711), PCI_DEVICE_DATA(&rt2800pci_ops) },
+ { PCI_DEVICE(0x1432, 0x7722), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x3592), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x3593), PCI_DEVICE_DATA(&rt2800pci_ops) },
#endif
+#ifdef CONFIG_RT2800PCI_RT53XX
+ { PCI_DEVICE(0x1814, 0x5390), PCI_DEVICE_DATA(&rt2800pci_ops) },
+#endif
{ 0, }
};
#endif /* CONFIG_PCI */
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index b97a4a54ff4c..5d91561e0de7 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -253,9 +253,7 @@ static int rt2800usb_set_device_state(struct rt2x00_dev *rt2x00dev,
rt2800usb_set_state(rt2x00dev, STATE_SLEEP);
break;
case STATE_RADIO_IRQ_ON:
- case STATE_RADIO_IRQ_ON_ISR:
case STATE_RADIO_IRQ_OFF:
- case STATE_RADIO_IRQ_OFF_ISR:
/* No support, but no error either */
break;
case STATE_DEEP_SLEEP:
@@ -486,6 +484,12 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
*/
rxdesc->flags |= RX_FLAG_IV_STRIPPED;
+ /*
+ * The hardware has already checked the Michael Mic and has
+ * stripped it from the frame. Signal this to mac80211.
+ */
+ rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
+
if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
rxdesc->flags |= RX_FLAG_DECRYPTED;
else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
@@ -633,6 +637,7 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
.write_tx_desc = rt2800usb_write_tx_desc,
.write_tx_data = rt2800usb_write_tx_data,
.write_beacon = rt2800_write_beacon,
+ .clear_beacon = rt2800_clear_beacon,
.get_tx_data_len = rt2800usb_get_tx_data_len,
.fill_rxdone = rt2800usb_fill_rxdone,
.config_shared_key = rt2800_config_shared_key,
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 84aaf393da43..19453d23e90d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -189,6 +189,7 @@ struct rt2x00_chip {
#define RT3572 0x3572
#define RT3593 0x3593 /* PCIe */
#define RT3883 0x3883 /* WSOC */
+#define RT5390 0x5390 /* 2.4GHz */
u16 rf;
u16 rev;
@@ -225,6 +226,8 @@ struct channel_info {
struct antenna_setup {
enum antenna rx;
enum antenna tx;
+ u8 rx_chain_num;
+ u8 tx_chain_num;
};
/*
@@ -368,6 +371,7 @@ struct rt2x00_intf {
* dedicated beacon entry.
*/
struct queue_entry *beacon;
+ bool enable_beacon;
/*
* Actions that needed rescheduling.
@@ -511,14 +515,13 @@ struct rt2x00lib_ops {
irq_handler_t irq_handler;
/*
- * Threaded Interrupt handlers.
- */
- irq_handler_t irq_handler_thread;
-
- /*
* TX status tasklet handler.
*/
void (*txstatus_tasklet) (unsigned long data);
+ void (*pretbtt_tasklet) (unsigned long data);
+ void (*tbtt_tasklet) (unsigned long data);
+ void (*rxdone_tasklet) (unsigned long data);
+ void (*autowake_tasklet) (unsigned long data);
/*
* Device init handlers.
@@ -573,6 +576,7 @@ struct rt2x00lib_ops {
struct txentry_desc *txdesc);
void (*write_beacon) (struct queue_entry *entry,
struct txentry_desc *txdesc);
+ void (*clear_beacon) (struct queue_entry *entry);
int (*get_tx_data_len) (struct queue_entry *entry);
/*
@@ -664,6 +668,7 @@ enum rt2x00_flags {
*/
CONFIG_SUPPORT_HW_BUTTON,
CONFIG_SUPPORT_HW_CRYPTO,
+ CONFIG_SUPPORT_POWER_LIMIT,
DRIVER_SUPPORT_CONTROL_FILTERS,
DRIVER_SUPPORT_CONTROL_FILTER_PSPOLL,
DRIVER_SUPPORT_PRE_TBTT_INTERRUPT,
@@ -788,10 +793,12 @@ struct rt2x00_dev {
* - Open ap interface count.
* - Open sta interface count.
* - Association count.
+ * - Beaconing enabled count.
*/
unsigned int intf_ap_count;
unsigned int intf_sta_count;
unsigned int intf_associated;
+ unsigned int intf_beaconing;
/*
* Link quality
@@ -857,6 +864,13 @@ struct rt2x00_dev {
*/
struct ieee80211_low_level_stats low_level_stats;
+ /**
+ * Work queue for all work which should not be placed
+ * on the mac80211 workqueue (because of dependencies
+ * between various work structures).
+ */
+ struct workqueue_struct *workqueue;
+
/*
* Scheduled work.
* NOTE: intf_work will use ieee80211_iterate_active_interfaces()
@@ -887,12 +901,6 @@ struct rt2x00_dev {
const struct firmware *fw;
/*
- * Interrupt values, stored between interrupt service routine
- * and interrupt thread routine.
- */
- u32 irqvalue[2];
-
- /*
* FIFO for storing tx status reports between isr and tasklet.
*/
DECLARE_KFIFO_PTR(txstatus_fifo, u32);
@@ -901,6 +909,15 @@ struct rt2x00_dev {
* Tasklet for processing tx status reports (rt2800pci).
*/
struct tasklet_struct txstatus_tasklet;
+ struct tasklet_struct pretbtt_tasklet;
+ struct tasklet_struct tbtt_tasklet;
+ struct tasklet_struct rxdone_tasklet;
+ struct tasklet_struct autowake_tasklet;
+
+ /*
+ * Protect the interrupt mask register.
+ */
+ spinlock_t irqmask_lock;
};
/*
@@ -1168,7 +1185,7 @@ void rt2x00lib_rxdone(struct queue_entry *entry);
/*
* mac80211 handlers.
*/
-int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
int rt2x00mac_start(struct ieee80211_hw *hw);
void rt2x00mac_stop(struct ieee80211_hw *hw);
int rt2x00mac_add_interface(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 9597a03242cc..9de9dbe94399 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -121,7 +121,7 @@ static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac,
return;
if (test_and_clear_bit(DELAYED_UPDATE_BEACON, &intf->delayed_flags))
- rt2x00queue_update_beacon(rt2x00dev, vif, true);
+ rt2x00queue_update_beacon(rt2x00dev, vif);
}
static void rt2x00lib_intf_scheduled(struct work_struct *work)
@@ -174,7 +174,13 @@ static void rt2x00lib_beaconupdate_iter(void *data, u8 *mac,
vif->type != NL80211_IFTYPE_WDS)
return;
- rt2x00queue_update_beacon(rt2x00dev, vif, true);
+ /*
+ * Update the beacon without locking. This is safe on PCI devices
+ * as they only update the beacon periodically here. This should
+ * never be called for USB devices.
+ */
+ WARN_ON(rt2x00_is_usb(rt2x00dev));
+ rt2x00queue_update_beacon_locked(rt2x00dev, vif);
}
void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
@@ -183,9 +189,9 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
return;
/* send buffered bc/mc frames out for every bssid */
- ieee80211_iterate_active_interfaces(rt2x00dev->hw,
- rt2x00lib_bc_buffer_iter,
- rt2x00dev);
+ ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw,
+ rt2x00lib_bc_buffer_iter,
+ rt2x00dev);
/*
* Devices with pre tbtt interrupt don't need to update the beacon
* here as they will fetch the next beacon directly prior to
@@ -195,9 +201,9 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
return;
/* fetch next beacon */
- ieee80211_iterate_active_interfaces(rt2x00dev->hw,
- rt2x00lib_beaconupdate_iter,
- rt2x00dev);
+ ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw,
+ rt2x00lib_beaconupdate_iter,
+ rt2x00dev);
}
EXPORT_SYMBOL_GPL(rt2x00lib_beacondone);
@@ -207,9 +213,9 @@ void rt2x00lib_pretbtt(struct rt2x00_dev *rt2x00dev)
return;
/* fetch next beacon */
- ieee80211_iterate_active_interfaces(rt2x00dev->hw,
- rt2x00lib_beaconupdate_iter,
- rt2x00dev);
+ ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw,
+ rt2x00lib_beaconupdate_iter,
+ rt2x00dev);
}
EXPORT_SYMBOL_GPL(rt2x00lib_pretbtt);
@@ -649,7 +655,10 @@ static void rt2x00lib_channel(struct ieee80211_channel *entry,
const int channel, const int tx_power,
const int value)
{
- entry->center_freq = ieee80211_channel_to_frequency(channel);
+ /* XXX: this assumption about the band is wrong for 802.11j */
+ entry->band = channel <= 14 ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ entry->center_freq = ieee80211_channel_to_frequency(channel,
+ entry->band);
entry->hw_value = value;
entry->max_power = tx_power;
entry->max_antenna_gain = 0xff;
@@ -812,15 +821,29 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
GFP_KERNEL);
if (status)
return status;
+ }
- /* tasklet for processing the tx status reports. */
- if (rt2x00dev->ops->lib->txstatus_tasklet)
- tasklet_init(&rt2x00dev->txstatus_tasklet,
- rt2x00dev->ops->lib->txstatus_tasklet,
- (unsigned long)rt2x00dev);
-
+ /*
+ * Initialize tasklets if used by the driver. Tasklets are
+ * disabled until the interrupts are turned on. The driver
+ * has to handle that.
+ */
+#define RT2X00_TASKLET_INIT(taskletname) \
+ if (rt2x00dev->ops->lib->taskletname) { \
+ tasklet_init(&rt2x00dev->taskletname, \
+ rt2x00dev->ops->lib->taskletname, \
+ (unsigned long)rt2x00dev); \
+ tasklet_disable(&rt2x00dev->taskletname); \
}
+ RT2X00_TASKLET_INIT(txstatus_tasklet);
+ RT2X00_TASKLET_INIT(pretbtt_tasklet);
+ RT2X00_TASKLET_INIT(tbtt_tasklet);
+ RT2X00_TASKLET_INIT(rxdone_tasklet);
+ RT2X00_TASKLET_INIT(autowake_tasklet);
+
+#undef RT2X00_TASKLET_INIT
+
/*
* Register HW.
*/
@@ -949,6 +972,7 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
{
int retval = -ENOMEM;
+ spin_lock_init(&rt2x00dev->irqmask_lock);
mutex_init(&rt2x00dev->csr_mutex);
set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
@@ -973,8 +997,15 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
BIT(NL80211_IFTYPE_WDS);
/*
- * Initialize configuration work.
+ * Initialize work.
*/
+ rt2x00dev->workqueue =
+ alloc_ordered_workqueue(wiphy_name(rt2x00dev->hw->wiphy), 0);
+ if (!rt2x00dev->workqueue) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+
INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled);
/*
@@ -1033,6 +1064,7 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
cancel_work_sync(&rt2x00dev->intf_work);
cancel_work_sync(&rt2x00dev->rxdone_work);
cancel_work_sync(&rt2x00dev->txdone_work);
+ destroy_workqueue(rt2x00dev->workqueue);
/*
* Free the tx status fifo.
@@ -1043,6 +1075,10 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
* Kill the tx status tasklet.
*/
tasklet_kill(&rt2x00dev->txstatus_tasklet);
+ tasklet_kill(&rt2x00dev->pretbtt_tasklet);
+ tasklet_kill(&rt2x00dev->tbtt_tasklet);
+ tasklet_kill(&rt2x00dev->rxdone_tasklet);
+ tasklet_kill(&rt2x00dev->autowake_tasklet);
/*
* Uninitialize device.
diff --git a/drivers/net/wireless/rt2x00/rt2x00ht.c b/drivers/net/wireless/rt2x00/rt2x00ht.c
index b7ad46ecaa1d..03d9579da681 100644
--- a/drivers/net/wireless/rt2x00/rt2x00ht.c
+++ b/drivers/net/wireless/rt2x00/rt2x00ht.c
@@ -69,7 +69,6 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
txdesc->mcs |= 0x08;
}
-
/*
* This frame is eligible for an AMPDU, however, don't aggregate
* frames that are intended to probe a specific tx rate.
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index a105c500627b..2d94cbaf5f4a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -157,14 +157,30 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
bool local);
/**
- * rt2x00queue_update_beacon - Send new beacon from mac80211 to hardware
+ * rt2x00queue_update_beacon - Send new beacon from mac80211
+ * to hardware. Handles locking by itself (mutex).
* @rt2x00dev: Pointer to &struct rt2x00_dev.
* @vif: Interface for which the beacon should be updated.
- * @enable_beacon: Enable beaconing
*/
int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
- struct ieee80211_vif *vif,
- const bool enable_beacon);
+ struct ieee80211_vif *vif);
+
+/**
+ * rt2x00queue_update_beacon_locked - Send new beacon from mac80211
+ * to hardware. Caller needs to ensure locking.
+ * @rt2x00dev: Pointer to &struct rt2x00_dev.
+ * @vif: Interface for which the beacon should be updated.
+ */
+int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_vif *vif);
+
+/**
+ * rt2x00queue_clear_beacon - Clear beacon in hardware
+ * @rt2x00dev: Pointer to &struct rt2x00_dev.
+ * @vif: Interface for which the beacon should be updated.
+ */
+int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_vif *vif);
/**
* rt2x00queue_index_inc - Index incrementation function
diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c
index bfda60eaf4ef..c975b0a12e95 100644
--- a/drivers/net/wireless/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/rt2x00/rt2x00link.c
@@ -417,7 +417,8 @@ void rt2x00link_start_watchdog(struct rt2x00_dev *rt2x00dev)
!test_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags))
return;
- schedule_delayed_work(&link->watchdog_work, WATCHDOG_INTERVAL);
+ ieee80211_queue_delayed_work(rt2x00dev->hw,
+ &link->watchdog_work, WATCHDOG_INTERVAL);
}
void rt2x00link_stop_watchdog(struct rt2x00_dev *rt2x00dev)
@@ -441,7 +442,9 @@ static void rt2x00link_watchdog(struct work_struct *work)
rt2x00dev->ops->lib->watchdog(rt2x00dev);
if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
- schedule_delayed_work(&link->watchdog_work, WATCHDOG_INTERVAL);
+ ieee80211_queue_delayed_work(rt2x00dev->hw,
+ &link->watchdog_work,
+ WATCHDOG_INTERVAL);
}
void rt2x00link_register(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index f3da051df39e..c2c35838c2f3 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -99,7 +99,7 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
return retval;
}
-int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -139,9 +139,9 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
* either RTS or CTS-to-self frame and handles everything
* inside the hardware.
*/
- if ((tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS |
- IEEE80211_TX_RC_USE_CTS_PROTECT)) &&
- !rt2x00dev->ops->hw->set_rts_threshold) {
+ if (!rt2x00dev->ops->hw->set_rts_threshold &&
+ (tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS |
+ IEEE80211_TX_RC_USE_CTS_PROTECT))) {
if (rt2x00queue_available(queue) <= 1)
goto exit_fail;
@@ -155,12 +155,11 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
if (rt2x00queue_threshold(queue))
rt2x00queue_pause_queue(queue);
- return NETDEV_TX_OK;
+ return;
exit_fail:
ieee80211_stop_queue(rt2x00dev->hw, qid);
dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
}
EXPORT_SYMBOL_GPL(rt2x00mac_tx);
@@ -617,11 +616,47 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
bss_conf->bssid);
/*
- * Update the beacon.
+ * Update the beacon. This is only required on USB devices. PCI
+ * devices fetch beacons periodically.
*/
- if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED))
- rt2x00queue_update_beacon(rt2x00dev, vif,
- bss_conf->enable_beacon);
+ if (changes & BSS_CHANGED_BEACON && rt2x00_is_usb(rt2x00dev))
+ rt2x00queue_update_beacon(rt2x00dev, vif);
+
+ /*
+ * Start/stop beaconing.
+ */
+ if (changes & BSS_CHANGED_BEACON_ENABLED) {
+ if (!bss_conf->enable_beacon && intf->enable_beacon) {
+ rt2x00queue_clear_beacon(rt2x00dev, vif);
+ rt2x00dev->intf_beaconing--;
+ intf->enable_beacon = false;
+
+ if (rt2x00dev->intf_beaconing == 0) {
+ /*
+ * Last beaconing interface disabled
+ * -> stop beacon queue.
+ */
+ mutex_lock(&intf->beacon_skb_mutex);
+ rt2x00queue_stop_queue(rt2x00dev->bcn);
+ mutex_unlock(&intf->beacon_skb_mutex);
+ }
+
+
+ } else if (bss_conf->enable_beacon && !intf->enable_beacon) {
+ rt2x00dev->intf_beaconing++;
+ intf->enable_beacon = true;
+
+ if (rt2x00dev->intf_beaconing == 1) {
+ /*
+ * First beaconing interface enabled
+ * -> start beacon queue.
+ */
+ mutex_lock(&intf->beacon_skb_mutex);
+ rt2x00queue_start_queue(rt2x00dev->bcn);
+ mutex_unlock(&intf->beacon_skb_mutex);
+ }
+ }
+ }
/*
* When the association status has changed we must reset the link
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index ace0b668c04e..4dd82b0b0520 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -160,10 +160,9 @@ int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
/*
* Register interrupt handler.
*/
- status = request_threaded_irq(rt2x00dev->irq,
- rt2x00dev->ops->lib->irq_handler,
- rt2x00dev->ops->lib->irq_handler_thread,
- IRQF_SHARED, rt2x00dev->name, rt2x00dev);
+ status = request_irq(rt2x00dev->irq,
+ rt2x00dev->ops->lib->irq_handler,
+ IRQF_SHARED, rt2x00dev->name, rt2x00dev);
if (status) {
ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
rt2x00dev->irq, status);
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index ca82b3a91697..bf9bba356280 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -365,13 +365,10 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
/*
* Beacons and probe responses require the tsf timestamp
- * to be inserted into the frame, except for a frame that has been injected
- * through a monitor interface. This latter is needed for testing a
- * monitor interface.
+ * to be inserted into the frame.
*/
- if ((ieee80211_is_beacon(hdr->frame_control) ||
- ieee80211_is_probe_resp(hdr->frame_control)) &&
- (!(tx_info->flags & IEEE80211_TX_CTL_INJECTED)))
+ if (ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control))
__set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
/*
@@ -566,13 +563,10 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
return 0;
}
-int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
- struct ieee80211_vif *vif,
- const bool enable_beacon)
+int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_vif *vif)
{
struct rt2x00_intf *intf = vif_to_intf(vif);
- struct skb_frame_desc *skbdesc;
- struct txentry_desc txdesc;
if (unlikely(!intf->beacon))
return -ENOBUFS;
@@ -584,17 +578,36 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
*/
rt2x00queue_free_skb(intf->beacon);
- if (!enable_beacon) {
- rt2x00queue_stop_queue(intf->beacon->queue);
- mutex_unlock(&intf->beacon_skb_mutex);
- return 0;
- }
+ /*
+ * Clear beacon (single bssid devices don't need to clear the beacon
+ * since the beacon queue will get stopped anyway).
+ */
+ if (rt2x00dev->ops->lib->clear_beacon)
+ rt2x00dev->ops->lib->clear_beacon(intf->beacon);
+
+ mutex_unlock(&intf->beacon_skb_mutex);
+
+ return 0;
+}
+
+int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_vif *vif)
+{
+ struct rt2x00_intf *intf = vif_to_intf(vif);
+ struct skb_frame_desc *skbdesc;
+ struct txentry_desc txdesc;
+
+ if (unlikely(!intf->beacon))
+ return -ENOBUFS;
+
+ /*
+ * Clean up the beacon skb.
+ */
+ rt2x00queue_free_skb(intf->beacon);
intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
- if (!intf->beacon->skb) {
- mutex_unlock(&intf->beacon_skb_mutex);
+ if (!intf->beacon->skb)
return -ENOMEM;
- }
/*
* Copy all TX descriptor information into txdesc,
@@ -611,13 +624,25 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
skbdesc->entry = intf->beacon;
/*
- * Send beacon to hardware and enable beacon genaration..
+ * Send beacon to hardware.
*/
rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
+ return 0;
+
+}
+
+int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_vif *vif)
+{
+ struct rt2x00_intf *intf = vif_to_intf(vif);
+ int ret;
+
+ mutex_lock(&intf->beacon_skb_mutex);
+ ret = rt2x00queue_update_beacon_locked(rt2x00dev, vif);
mutex_unlock(&intf->beacon_skb_mutex);
- return 0;
+ return ret;
}
void rt2x00queue_for_each_entry(struct data_queue *queue,
@@ -885,7 +910,7 @@ void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
* The queue flush has failed...
*/
if (unlikely(!rt2x00queue_empty(queue)))
- WARNING(queue->rt2x00dev, "Queue %d failed to flush", queue->qid);
+ WARNING(queue->rt2x00dev, "Queue %d failed to flush\n", queue->qid);
/*
* Restore the queue to the previous status
diff --git a/drivers/net/wireless/rt2x00/rt2x00reg.h b/drivers/net/wireless/rt2x00/rt2x00reg.h
index e8259ae48ced..6f867eec49cc 100644
--- a/drivers/net/wireless/rt2x00/rt2x00reg.h
+++ b/drivers/net/wireless/rt2x00/rt2x00reg.h
@@ -85,8 +85,6 @@ enum dev_state {
STATE_RADIO_OFF,
STATE_RADIO_IRQ_ON,
STATE_RADIO_IRQ_OFF,
- STATE_RADIO_IRQ_ON_ISR,
- STATE_RADIO_IRQ_OFF_ISR,
};
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 1a9937d5aff6..fbe735f5b352 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -227,7 +227,7 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
* Schedule the delayed work for reading the TX status
* from the device.
*/
- ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->txdone_work);
+ queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
}
static void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
@@ -320,7 +320,7 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
* Schedule the delayed work for reading the RX status
* from the device.
*/
- ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->rxdone_work);
+ queue_work(rt2x00dev->workqueue, &rt2x00dev->rxdone_work);
}
static void rt2x00usb_kick_rx_entry(struct queue_entry *entry)
@@ -429,7 +429,7 @@ void rt2x00usb_flush_queue(struct data_queue *queue)
* Schedule the completion handler manually, when this
* worker function runs, it should cleanup the queue.
*/
- ieee80211_queue_work(queue->rt2x00dev->hw, completion);
+ queue_work(queue->rt2x00dev->workqueue, completion);
/*
* Wait for a little while to give the driver
@@ -453,7 +453,7 @@ static void rt2x00usb_watchdog_tx_status(struct data_queue *queue)
WARNING(queue->rt2x00dev, "TX queue %d status timed out,"
" invoke forced tx handler\n", queue->qid);
- ieee80211_queue_work(queue->rt2x00dev->hw, &queue->rt2x00dev->txdone_work);
+ queue_work(queue->rt2x00dev->workqueue, &queue->rt2x00dev->txdone_work);
}
void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 8de44dd401e0..927a4a3e0eeb 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -551,26 +551,14 @@ static void rt61pci_config_intf(struct rt2x00_dev *rt2x00dev,
struct rt2x00intf_conf *conf,
const unsigned int flags)
{
- unsigned int beacon_base;
u32 reg;
if (flags & CONFIG_UPDATE_TYPE) {
/*
- * Clear current synchronisation setup.
- * For the Beacon base registers, we only need to clear
- * the first byte since that byte contains the VALID and OWNER
- * bits which (when set to 0) will invalidate the entire beacon.
- */
- beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
- rt2x00pci_register_write(rt2x00dev, beacon_base, 0);
-
- /*
* Enable synchronisation.
*/
rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
- rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, conf->sync);
- rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
}
@@ -1154,6 +1142,11 @@ static void rt61pci_start_queue(struct data_queue *queue)
rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg);
break;
case QID_BEACON:
+ /*
+ * Allow the tbtt tasklet to be scheduled.
+ */
+ tasklet_enable(&rt2x00dev->tbtt_tasklet);
+
rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
@@ -1233,6 +1226,11 @@ static void rt61pci_stop_queue(struct data_queue *queue)
rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 0);
rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
+
+ /*
+ * Wait for possibly running tbtt tasklets.
+ */
+ tasklet_disable(&rt2x00dev->tbtt_tasklet);
break;
default:
break;
@@ -1719,9 +1717,9 @@ static int rt61pci_init_bbp(struct rt2x00_dev *rt2x00dev)
static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
enum dev_state state)
{
- int mask = (state == STATE_RADIO_IRQ_OFF) ||
- (state == STATE_RADIO_IRQ_OFF_ISR);
+ int mask = (state == STATE_RADIO_IRQ_OFF);
u32 reg;
+ unsigned long flags;
/*
* When interrupts are being enabled, the interrupt registers
@@ -1733,12 +1731,21 @@ static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
rt2x00pci_register_read(rt2x00dev, MCU_INT_SOURCE_CSR, &reg);
rt2x00pci_register_write(rt2x00dev, MCU_INT_SOURCE_CSR, reg);
+
+ /*
+ * Enable tasklets.
+ */
+ tasklet_enable(&rt2x00dev->txstatus_tasklet);
+ tasklet_enable(&rt2x00dev->rxdone_tasklet);
+ tasklet_enable(&rt2x00dev->autowake_tasklet);
}
/*
* Only toggle the interrupts bits we are going to use.
* Non-checked interrupt bits are disabled by default.
*/
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+
rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
rt2x00_set_field32(&reg, INT_MASK_CSR_TXDONE, mask);
rt2x00_set_field32(&reg, INT_MASK_CSR_RXDONE, mask);
@@ -1758,6 +1765,17 @@ static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_7, mask);
rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_TWAKEUP, mask);
rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg);
+
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+ if (state == STATE_RADIO_IRQ_OFF) {
+ /*
+ * Ensure that all tasklets are finished.
+ */
+ tasklet_disable(&rt2x00dev->txstatus_tasklet);
+ tasklet_disable(&rt2x00dev->rxdone_tasklet);
+ tasklet_disable(&rt2x00dev->autowake_tasklet);
+ }
}
static int rt61pci_enable_radio(struct rt2x00_dev *rt2x00dev)
@@ -1833,9 +1851,7 @@ static int rt61pci_set_device_state(struct rt2x00_dev *rt2x00dev,
rt61pci_disable_radio(rt2x00dev);
break;
case STATE_RADIO_IRQ_ON:
- case STATE_RADIO_IRQ_ON_ISR:
case STATE_RADIO_IRQ_OFF:
- case STATE_RADIO_IRQ_OFF_ISR:
rt61pci_toggle_irq(rt2x00dev, state);
break;
case STATE_DEEP_SLEEP:
@@ -1962,13 +1978,14 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
struct queue_entry_priv_pci *entry_priv = entry->priv_data;
unsigned int beacon_base;
unsigned int padding_len;
- u32 reg;
+ u32 orig_reg, reg;
/*
* Disable beaconing while we are reloading the beacon data,
* otherwise we might be sending out invalid data.
*/
rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
+ orig_reg = reg;
rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
@@ -1986,7 +2003,14 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
* Write entire beacon with descriptor and padding to register.
*/
padding_len = roundup(entry->skb->len, 4) - entry->skb->len;
- skb_pad(entry->skb, padding_len);
+ if (padding_len && skb_pad(entry->skb, padding_len)) {
+ ERROR(rt2x00dev, "Failure padding beacon, aborting\n");
+ /* skb freed by skb_pad() on failure */
+ entry->skb = NULL;
+ rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, orig_reg);
+ return;
+ }
+
beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
rt2x00pci_register_multiwrite(rt2x00dev, beacon_base,
entry_priv->desc, TXINFO_SIZE);
@@ -2002,8 +2026,6 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
*/
rt2x00pci_register_write(rt2x00dev, TXRX_CSR10, 0x00001008);
- rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
- rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
@@ -2014,6 +2036,32 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
entry->skb = NULL;
}
+static void rt61pci_clear_beacon(struct queue_entry *entry)
+{
+ struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+ u32 reg;
+
+ /*
+ * Disable beaconing while we are reloading the beacon data,
+ * otherwise we might be sending out invalid data.
+ */
+ rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
+ rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
+ rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
+
+ /*
+ * Clear beacon.
+ */
+ rt2x00pci_register_write(rt2x00dev,
+ HW_BEACON_OFFSET(entry->entry_idx), 0);
+
+ /*
+ * Enable beaconing again.
+ */
+ rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
+ rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
+}
+
/*
* RX control handlers
*/
@@ -2078,9 +2126,8 @@ static void rt61pci_fill_rxdone(struct queue_entry *entry,
rxdesc->flags |= RX_FLAG_IV_STRIPPED;
/*
- * FIXME: Legacy driver indicates that the frame does
- * contain the Michael Mic. Unfortunately, in rt2x00
- * the MIC seems to be missing completely...
+ * The hardware has already checked the Michael Mic and has
+ * stripped it from the frame. Signal this to mac80211.
*/
rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
@@ -2211,61 +2258,80 @@ static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev)
rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
}
-static irqreturn_t rt61pci_interrupt_thread(int irq, void *dev_instance)
+static void rt61pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
+ struct rt2x00_field32 irq_field)
{
- struct rt2x00_dev *rt2x00dev = dev_instance;
- u32 reg = rt2x00dev->irqvalue[0];
- u32 reg_mcu = rt2x00dev->irqvalue[1];
+ unsigned long flags;
+ u32 reg;
/*
- * Handle interrupts, walk through all bits
- * and run the tasks, the bits are checked in order of
- * priority.
+ * Enable a single interrupt. The interrupt mask register
+ * access needs locking.
*/
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
- /*
- * 1 - Rx ring done interrupt.
- */
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RXDONE))
- rt2x00pci_rxdone(rt2x00dev);
+ rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+ rt2x00_set_field32(&reg, irq_field, 0);
+ rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
- /*
- * 2 - Tx ring done interrupt.
- */
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TXDONE))
- rt61pci_txdone(rt2x00dev);
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+}
- /*
- * 3 - Handle MCU command done.
- */
- if (reg_mcu)
- rt2x00pci_register_write(rt2x00dev,
- M2H_CMD_DONE_CSR, 0xffffffff);
+static void rt61pci_enable_mcu_interrupt(struct rt2x00_dev *rt2x00dev,
+ struct rt2x00_field32 irq_field)
+{
+ unsigned long flags;
+ u32 reg;
/*
- * 4 - MCU Autowakeup interrupt.
+ * Enable a single MCU interrupt. The interrupt mask register
+ * access needs locking.
*/
- if (rt2x00_get_field32(reg_mcu, MCU_INT_SOURCE_CSR_TWAKEUP))
- rt61pci_wakeup(rt2x00dev);
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
- /*
- * 5 - Beacon done interrupt.
- */
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_BEACON_DONE))
- rt2x00lib_beacondone(rt2x00dev);
+ rt2x00pci_register_read(rt2x00dev, MCU_INT_MASK_CSR, &reg);
+ rt2x00_set_field32(&reg, irq_field, 0);
+ rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg);
- /* Enable interrupts again. */
- rt2x00dev->ops->lib->set_device_state(rt2x00dev,
- STATE_RADIO_IRQ_ON_ISR);
- return IRQ_HANDLED;
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+}
+
+static void rt61pci_txstatus_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt61pci_txdone(rt2x00dev);
+ rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_TXDONE);
+}
+
+static void rt61pci_tbtt_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2x00lib_beacondone(rt2x00dev);
+ rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_BEACON_DONE);
+}
+
+static void rt61pci_rxdone_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2x00pci_rxdone(rt2x00dev);
+ rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RXDONE);
}
+static void rt61pci_autowake_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt61pci_wakeup(rt2x00dev);
+ rt2x00pci_register_write(rt2x00dev,
+ M2H_CMD_DONE_CSR, 0xffffffff);
+ rt61pci_enable_mcu_interrupt(rt2x00dev, MCU_INT_MASK_CSR_TWAKEUP);
+}
static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
{
struct rt2x00_dev *rt2x00dev = dev_instance;
- u32 reg_mcu;
- u32 reg;
+ u32 reg_mcu, mask_mcu;
+ u32 reg, mask;
+ unsigned long flags;
/*
* Get the interrupt sources & saved to local variable.
@@ -2283,14 +2349,46 @@ static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
return IRQ_HANDLED;
- /* Store irqvalues for use in the interrupt thread. */
- rt2x00dev->irqvalue[0] = reg;
- rt2x00dev->irqvalue[1] = reg_mcu;
+ /*
+ * Schedule tasklets for interrupt handling.
+ */
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RXDONE))
+ tasklet_schedule(&rt2x00dev->rxdone_tasklet);
+
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TXDONE))
+ tasklet_schedule(&rt2x00dev->txstatus_tasklet);
+
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_BEACON_DONE))
+ tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
+
+ if (rt2x00_get_field32(reg_mcu, MCU_INT_SOURCE_CSR_TWAKEUP))
+ tasklet_schedule(&rt2x00dev->autowake_tasklet);
+
+ /*
+ * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
+ * for interrupts and interrupt masks we can just use the value of
+ * INT_SOURCE_CSR to create the interrupt mask.
+ */
+ mask = reg;
+ mask_mcu = reg_mcu;
- /* Disable interrupts, will be enabled again in the interrupt thread. */
- rt2x00dev->ops->lib->set_device_state(rt2x00dev,
- STATE_RADIO_IRQ_OFF_ISR);
- return IRQ_WAKE_THREAD;
+ /*
+ * Disable all interrupts for which a tasklet was scheduled right now,
+ * the tasklet will reenable the appropriate interrupts.
+ */
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+
+ rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+ reg |= mask;
+ rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
+
+ rt2x00pci_register_read(rt2x00dev, MCU_INT_MASK_CSR, &reg);
+ reg |= mask_mcu;
+ rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg);
+
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+ return IRQ_HANDLED;
}
/*
@@ -2884,7 +2982,10 @@ static const struct ieee80211_ops rt61pci_mac80211_ops = {
static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
.irq_handler = rt61pci_interrupt,
- .irq_handler_thread = rt61pci_interrupt_thread,
+ .txstatus_tasklet = rt61pci_txstatus_tasklet,
+ .tbtt_tasklet = rt61pci_tbtt_tasklet,
+ .rxdone_tasklet = rt61pci_rxdone_tasklet,
+ .autowake_tasklet = rt61pci_autowake_tasklet,
.probe_hw = rt61pci_probe_hw,
.get_firmware_name = rt61pci_get_firmware_name,
.check_firmware = rt61pci_check_firmware,
@@ -2903,6 +3004,7 @@ static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
.stop_queue = rt61pci_stop_queue,
.write_tx_desc = rt61pci_write_tx_desc,
.write_beacon = rt61pci_write_beacon,
+ .clear_beacon = rt61pci_clear_beacon,
.fill_rxdone = rt61pci_fill_rxdone,
.config_shared_key = rt61pci_config_shared_key,
.config_pairwise_key = rt61pci_config_pairwise_key,
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 029be3c6c030..6e9981a1dd7f 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -502,26 +502,14 @@ static void rt73usb_config_intf(struct rt2x00_dev *rt2x00dev,
struct rt2x00intf_conf *conf,
const unsigned int flags)
{
- unsigned int beacon_base;
u32 reg;
if (flags & CONFIG_UPDATE_TYPE) {
/*
- * Clear current synchronisation setup.
- * For the Beacon base registers we only need to clear
- * the first byte since that byte contains the VALID and OWNER
- * bits which (when set to 0) will invalidate the entire beacon.
- */
- beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
- rt2x00usb_register_write(rt2x00dev, beacon_base, 0);
-
- /*
* Enable synchronisation.
*/
rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
- rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, conf->sync);
- rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
}
@@ -1440,9 +1428,7 @@ static int rt73usb_set_device_state(struct rt2x00_dev *rt2x00dev,
rt73usb_disable_radio(rt2x00dev);
break;
case STATE_RADIO_IRQ_ON:
- case STATE_RADIO_IRQ_ON_ISR:
case STATE_RADIO_IRQ_OFF:
- case STATE_RADIO_IRQ_OFF_ISR:
/* No support, but no error either */
break;
case STATE_DEEP_SLEEP:
@@ -1547,13 +1533,14 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
unsigned int beacon_base;
unsigned int padding_len;
- u32 reg;
+ u32 orig_reg, reg;
/*
* Disable beaconing while we are reloading the beacon data,
* otherwise we might be sending out invalid data.
*/
rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
+ orig_reg = reg;
rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
@@ -1577,7 +1564,14 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
* Write entire beacon with descriptor and padding to register.
*/
padding_len = roundup(entry->skb->len, 4) - entry->skb->len;
- skb_pad(entry->skb, padding_len);
+ if (padding_len && skb_pad(entry->skb, padding_len)) {
+ ERROR(rt2x00dev, "Failure padding beacon, aborting\n");
+ /* skb freed by skb_pad() on failure */
+ entry->skb = NULL;
+ rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, orig_reg);
+ return;
+ }
+
beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
rt2x00usb_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data,
entry->skb->len + padding_len);
@@ -1590,8 +1584,6 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
*/
rt2x00usb_register_write(rt2x00dev, TXRX_CSR10, 0x00001008);
- rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
- rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
@@ -1602,6 +1594,33 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
entry->skb = NULL;
}
+static void rt73usb_clear_beacon(struct queue_entry *entry)
+{
+ struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+ unsigned int beacon_base;
+ u32 reg;
+
+ /*
+ * Disable beaconing while we are reloading the beacon data,
+ * otherwise we might be sending out invalid data.
+ */
+ rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
+ rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
+ rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
+
+ /*
+ * Clear beacon.
+ */
+ beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
+ rt2x00usb_register_write(rt2x00dev, beacon_base, 0);
+
+ /*
+ * Enable beaconing again.
+ */
+ rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
+ rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
+}
+
static int rt73usb_get_tx_data_len(struct queue_entry *entry)
{
int length;
@@ -1698,9 +1717,8 @@ static void rt73usb_fill_rxdone(struct queue_entry *entry,
rxdesc->flags |= RX_FLAG_IV_STRIPPED;
/*
- * FIXME: Legacy driver indicates that the frame does
- * contain the Michael Mic. Unfortunately, in rt2x00
- * the MIC seems to be missing completely...
+ * The hardware has already checked the Michael Mic and has
+ * stripped it from the frame. Signal this to mac80211.
*/
rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
@@ -2313,6 +2331,7 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
.flush_queue = rt2x00usb_flush_queue,
.write_tx_desc = rt73usb_write_tx_desc,
.write_beacon = rt73usb_write_beacon,
+ .clear_beacon = rt73usb_clear_beacon,
.get_tx_data_len = rt73usb_get_tx_data_len,
.fill_rxdone = rt73usb_fill_rxdone,
.config_shared_key = rt73usb_config_shared_key,
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 5851cbc1e957..80db5cabc9b9 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -146,7 +146,7 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
rx_status.freq = dev->conf.channel->center_freq;
rx_status.band = dev->conf.channel->band;
rx_status.mactime = le64_to_cpu(entry->tsft);
- rx_status.flag |= RX_FLAG_TSFT;
+ rx_status.flag |= RX_FLAG_MACTIME_MPDU;
if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
@@ -240,7 +240,7 @@ static irqreturn_t rtl8180_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -321,8 +321,6 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
spin_unlock_irqrestore(&priv->lock, flags);
rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4)));
-
- return 0;
}
void rtl8180_set_anaparam(struct rtl8180_priv *priv, u32 anaparam)
@@ -687,7 +685,6 @@ static void rtl8180_beacon_work(struct work_struct *work)
struct ieee80211_hw *dev = vif_priv->dev;
struct ieee80211_mgmt *mgmt;
struct sk_buff *skb;
- int err = 0;
/* don't overflow the tx ring */
if (ieee80211_queue_stopped(dev, 0))
@@ -708,8 +705,7 @@ static void rtl8180_beacon_work(struct work_struct *work)
/* TODO: use actual beacon queue */
skb_set_queue_mapping(skb, 0);
- err = rtl8180_tx(dev, skb);
- WARN_ON(err);
+ rtl8180_tx(dev, skb);
resched:
/*
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 6b82cac37ee3..c5a5e788f25f 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -227,7 +227,7 @@ static void rtl8187_tx_cb(struct urb *urb)
}
}
-static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
{
struct rtl8187_priv *priv = dev->priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -241,7 +241,7 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
kfree_skb(skb);
- return NETDEV_TX_OK;
+ return;
}
flags = skb->len;
@@ -309,8 +309,6 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
kfree_skb(skb);
}
usb_free_urb(urb);
-
- return NETDEV_TX_OK;
}
static void rtl8187_rx_cb(struct urb *urb)
@@ -373,7 +371,7 @@ static void rtl8187_rx_cb(struct urb *urb)
rx_status.rate_idx = rate;
rx_status.freq = dev->conf.channel->center_freq;
rx_status.band = dev->conf.channel->band;
- rx_status.flag |= RX_FLAG_TSFT;
+ rx_status.flag |= RX_FLAG_MACTIME_MPDU;
if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig
index 7f6573f7f470..ce49e0ce7cad 100644
--- a/drivers/net/wireless/rtlwifi/Kconfig
+++ b/drivers/net/wireless/rtlwifi/Kconfig
@@ -1,15 +1,33 @@
config RTL8192CE
- tristate "Realtek RTL8192CE/RTL8188SE Wireless Network Adapter"
- depends on MAC80211 && EXPERIMENTAL
+ tristate "Realtek RTL8192CE/RTL8188CE Wireless Network Adapter"
+ depends on MAC80211 && PCI && EXPERIMENTAL
select FW_LOADER
select RTLWIFI
+ select RTL8192C_COMMON
---help---
This is the driver for Realtek RTL8192CE/RTL8188CE 802.11n PCIe
wireless network adapters.
If you choose to build it as a module, it will be called rtl8192ce
+config RTL8192CU
+ tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter"
+ depends on MAC80211 && USB && EXPERIMENTAL
+ select FW_LOADER
+ select RTLWIFI
+ select RTL8192C_COMMON
+ ---help---
+ This is the driver for Realtek RTL8192CU/RTL8188CU 802.11n USB
+ wireless network adapters.
+
+ If you choose to build it as a module, it will be called rtl8192cu
+
config RTLWIFI
tristate
- depends on RTL8192CE
+ depends on RTL8192CE || RTL8192CU
+ default m
+
+config RTL8192C_COMMON
+ tristate
+ depends on RTL8192CE || RTL8192CU
default m
diff --git a/drivers/net/wireless/rtlwifi/Makefile b/drivers/net/wireless/rtlwifi/Makefile
index 2a7a4384f8ee..9192fd583413 100644
--- a/drivers/net/wireless/rtlwifi/Makefile
+++ b/drivers/net/wireless/rtlwifi/Makefile
@@ -5,9 +5,19 @@ rtlwifi-objs := \
core.o \
debug.o \
efuse.o \
- pci.o \
ps.o \
rc.o \
- regd.o
+ regd.o \
+ usb.o
+rtl8192c_common-objs += \
+
+ifeq ($(CONFIG_PCI),y)
+rtlwifi-objs += pci.o
+endif
+
+obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c/
obj-$(CONFIG_RTL8192CE) += rtl8192ce/
+obj-$(CONFIG_RTL8192CU) += rtl8192cu/
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index cf0b73e51fc2..bb0c781f4a1b 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -144,7 +144,7 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
ht_cap->mcs.rx_mask[1] = 0xFF;
ht_cap->mcs.rx_mask[4] = 0x01;
- ht_cap->mcs.rx_highest = MAX_BIT_RATE_40MHZ_MCS15;
+ ht_cap->mcs.rx_highest = cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS15);
} else if (get_rf_type(rtlphy) == RF_1T1R) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("1T1R\n"));
@@ -153,7 +153,7 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
ht_cap->mcs.rx_mask[1] = 0x00;
ht_cap->mcs.rx_mask[4] = 0x01;
- ht_cap->mcs.rx_highest = MAX_BIT_RATE_40MHZ_MCS7;
+ ht_cap->mcs.rx_highest = cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS7);
}
}
@@ -283,13 +283,7 @@ int rtl_init_core(struct ieee80211_hw *hw)
rtlmac->hw = hw;
/* <2> rate control register */
- if (rtl_rate_control_register()) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("rtl: Unable to register rtl_rc,"
- "use default RC !!\n"));
- } else {
- hw->rate_control_algorithm = "rtl_rc";
- }
+ hw->rate_control_algorithm = "rtl_rc";
/*
* <3> init CRDA must come after init
@@ -325,8 +319,6 @@ int rtl_init_core(struct ieee80211_hw *hw)
void rtl_deinit_core(struct ieee80211_hw *hw)
{
- /*RC*/
- rtl_rate_control_unregister();
}
void rtl_init_rx_config(struct ieee80211_hw *hw)
@@ -399,21 +391,21 @@ static void _rtl_query_protection_mode(struct ieee80211_hw *hw,
u8 rate_flag = info->control.rates[0].flags;
/* Common Settings */
- tcb_desc->b_rts_stbc = false;
- tcb_desc->b_cts_enable = false;
+ tcb_desc->rts_stbc = false;
+ tcb_desc->cts_enable = false;
tcb_desc->rts_sc = 0;
- tcb_desc->b_rts_bw = false;
- tcb_desc->b_rts_use_shortpreamble = false;
- tcb_desc->b_rts_use_shortgi = false;
+ tcb_desc->rts_bw = false;
+ tcb_desc->rts_use_shortpreamble = false;
+ tcb_desc->rts_use_shortgi = false;
if (rate_flag & IEEE80211_TX_RC_USE_CTS_PROTECT) {
/* Use CTS-to-SELF in protection mode. */
- tcb_desc->b_rts_enable = true;
- tcb_desc->b_cts_enable = true;
+ tcb_desc->rts_enable = true;
+ tcb_desc->cts_enable = true;
tcb_desc->rts_rate = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE24M];
} else if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
/* Use RTS-CTS in protection mode. */
- tcb_desc->b_rts_enable = true;
+ tcb_desc->rts_enable = true;
tcb_desc->rts_rate = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE24M];
}
@@ -429,7 +421,7 @@ static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
if (mac->opmode == NL80211_IFTYPE_STATION)
tcb_desc->ratr_index = 0;
else if (mac->opmode == NL80211_IFTYPE_ADHOC) {
- if (tcb_desc->b_multicast || tcb_desc->b_broadcast) {
+ if (tcb_desc->multicast || tcb_desc->broadcast) {
tcb_desc->hw_rate =
rtlpriv->cfg->maps[RTL_RC_CCK_RATE2M];
tcb_desc->use_driver_rate = 1;
@@ -439,7 +431,7 @@ static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
}
}
- if (rtlpriv->dm.b_useramask) {
+ if (rtlpriv->dm.useramask) {
/* TODO we will differentiate adhoc and station futrue */
tcb_desc->mac_id = 0;
@@ -461,19 +453,19 @@ static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- tcb_desc->b_packet_bw = false;
+ tcb_desc->packet_bw = false;
if (!mac->bw_40 || !mac->ht_enable)
return;
- if (tcb_desc->b_multicast || tcb_desc->b_broadcast)
+ if (tcb_desc->multicast || tcb_desc->broadcast)
return;
/*use legency rate, shall use 20MHz */
if (tcb_desc->hw_rate <= rtlpriv->cfg->maps[RTL_RC_OFDM_RATE54M])
return;
- tcb_desc->b_packet_bw = true;
+ tcb_desc->packet_bw = true;
}
static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw)
@@ -498,7 +490,7 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
struct ieee80211_rate *txrate;
- u16 fc = le16_to_cpu(hdr->frame_control);
+ __le16 fc = hdr->frame_control;
memset(tcb_desc, 0, sizeof(struct rtl_tcb_desc));
@@ -545,9 +537,9 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
}
if (is_multicast_ether_addr(ieee80211_get_DA(hdr)))
- tcb_desc->b_multicast = 1;
+ tcb_desc->multicast = 1;
else if (is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
- tcb_desc->b_broadcast = 1;
+ tcb_desc->broadcast = 1;
_rtl_txrate_selectmode(hw, tcb_desc);
_rtl_query_bandwidth_mode(hw, tcb_desc);
@@ -570,7 +562,7 @@ bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
- u16 fc = le16_to_cpu(hdr->frame_control);
+ __le16 fc = hdr->frame_control;
if (ieee80211_is_auth(fc)) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n"));
@@ -587,7 +579,7 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
struct rtl_priv *rtlpriv = rtl_priv(hw);
- u16 fc = le16_to_cpu(hdr->frame_control);
+ __le16 fc = hdr->frame_control;
u8 *act = (u8 *) (((u8 *) skb->data + MAC80211_3ADDR_LEN));
u8 category;
@@ -632,7 +624,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- u16 fc = le16_to_cpu(hdr->frame_control);
+ __le16 fc = hdr->frame_control;
u16 ether_type;
u8 mac_hdr_len = ieee80211_get_hdrlen_from_skb(skb);
const struct iphdr *ip;
@@ -646,7 +638,6 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
ip = (struct iphdr *)((u8 *) skb->data + mac_hdr_len +
SNAP_SIZE + PROTOC_TYPE_SIZE);
ether_type = *(u16 *) ((u8 *) skb->data + mac_hdr_len + SNAP_SIZE);
- ether_type = ntohs(ether_type);
if (ETH_P_IP == ether_type) {
if (IPPROTO_UDP == ip->protocol) {
@@ -690,7 +681,8 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
}
return true;
- } else if (0x86DD == ether_type) {
+ } else if (ETH_P_IPV6 == ether_type) {
+ /* IPv6 */
return true;
}
@@ -777,10 +769,10 @@ void rtl_watchdog_wq_callback(void *data)
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- bool b_busytraffic = false;
- bool b_higher_busytraffic = false;
- bool b_higher_busyrxtraffic = false;
- bool b_higher_busytxtraffic = false;
+ bool busytraffic = false;
+ bool higher_busytraffic = false;
+ bool higher_busyrxtraffic = false;
+ bool higher_busytxtraffic = false;
u8 idx = 0;
u32 rx_cnt_inp4eriod = 0;
@@ -788,7 +780,7 @@ void rtl_watchdog_wq_callback(void *data)
u32 aver_rx_cnt_inperiod = 0;
u32 aver_tx_cnt_inperiod = 0;
- bool benter_ps = false;
+ bool enter_ps = false;
if (is_hal_stop(rtlhal))
return;
@@ -832,29 +824,29 @@ void rtl_watchdog_wq_callback(void *data)
/* (2) check traffic busy */
if (aver_rx_cnt_inperiod > 100 || aver_tx_cnt_inperiod > 100)
- b_busytraffic = true;
+ busytraffic = true;
/* Higher Tx/Rx data. */
if (aver_rx_cnt_inperiod > 4000 ||
aver_tx_cnt_inperiod > 4000) {
- b_higher_busytraffic = true;
+ higher_busytraffic = true;
/* Extremely high Rx data. */
if (aver_rx_cnt_inperiod > 5000)
- b_higher_busyrxtraffic = true;
+ higher_busyrxtraffic = true;
else
- b_higher_busytxtraffic = false;
+ higher_busytxtraffic = false;
}
if (((rtlpriv->link_info.num_rx_inperiod +
rtlpriv->link_info.num_tx_inperiod) > 8) ||
(rtlpriv->link_info.num_rx_inperiod > 2))
- benter_ps = false;
+ enter_ps = false;
else
- benter_ps = true;
+ enter_ps = true;
/* LeisurePS only work in infra mode. */
- if (benter_ps)
+ if (enter_ps)
rtl_lps_enter(hw);
else
rtl_lps_leave(hw);
@@ -863,9 +855,9 @@ void rtl_watchdog_wq_callback(void *data)
rtlpriv->link_info.num_rx_inperiod = 0;
rtlpriv->link_info.num_tx_inperiod = 0;
- rtlpriv->link_info.b_busytraffic = b_busytraffic;
- rtlpriv->link_info.b_higher_busytraffic = b_higher_busytraffic;
- rtlpriv->link_info.b_higher_busyrxtraffic = b_higher_busyrxtraffic;
+ rtlpriv->link_info.busytraffic = busytraffic;
+ rtlpriv->link_info.higher_busytraffic = higher_busytraffic;
+ rtlpriv->link_info.higher_busyrxtraffic = higher_busyrxtraffic;
}
@@ -945,11 +937,16 @@ MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
static int __init rtl_core_module_init(void)
{
+ if (rtl_rate_control_register())
+ printk(KERN_ERR "rtlwifi: Unable to register rtl_rc,"
+ "use default RC !!\n");
return 0;
}
static void __exit rtl_core_module_exit(void)
{
+ /*RC*/
+ rtl_rate_control_unregister();
}
module_init(rtl_core_module_init);
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index 3de5a14745f1..043045342bc7 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -30,6 +30,7 @@
#define __RTL_BASE_H__
#define RTL_DUMMY_OFFSET 0
+#define RTL_RX_DESC_SIZE 24
#define RTL_DUMMY_UNIT 8
#define RTL_TX_DUMMY_SIZE (RTL_DUMMY_OFFSET * RTL_DUMMY_UNIT)
#define RTL_TX_DESC_SIZE 32
@@ -52,46 +53,22 @@
#define FRAME_OFFSET_SEQUENCE 22
#define FRAME_OFFSET_ADDRESS4 24
-#define SET_80211_HDR_FRAME_CONTROL(_hdr, _val) \
- WRITEEF2BYTE(_hdr, _val)
-#define SET_80211_HDR_TYPE_AND_SUBTYPE(_hdr, _val) \
- WRITEEF1BYTE(_hdr, _val)
-#define SET_80211_HDR_PWR_MGNT(_hdr, _val) \
- SET_BITS_TO_LE_2BYTE(_hdr, 12, 1, _val)
-#define SET_80211_HDR_TO_DS(_hdr, _val) \
- SET_BITS_TO_LE_2BYTE(_hdr, 8, 1, _val)
#define SET_80211_PS_POLL_AID(_hdr, _val) \
- WRITEEF2BYTE(((u8 *)(_hdr)) + 2, _val)
+ (*(u16 *)((u8 *)(_hdr) + 2) = le16_to_cpu(_val))
#define SET_80211_PS_POLL_BSSID(_hdr, _val) \
- CP_MACADDR(((u8 *)(_hdr)) + 4, (u8 *)(_val))
+ memcpy(((u8 *)(_hdr)) + 4, (u8 *)(_val), ETH_ALEN)
#define SET_80211_PS_POLL_TA(_hdr, _val) \
- CP_MACADDR(((u8 *)(_hdr)) + 10, (u8 *)(_val))
+ memcpy(((u8 *)(_hdr)) + 10, (u8 *)(_val), ETH_ALEN)
#define SET_80211_HDR_DURATION(_hdr, _val) \
- WRITEEF2BYTE((u8 *)(_hdr)+FRAME_OFFSET_DURATION, _val)
+ (*(u16 *)((u8 *)(_hdr) + FRAME_OFFSET_DURATION) = le16_to_cpu(_val))
#define SET_80211_HDR_ADDRESS1(_hdr, _val) \
- CP_MACADDR((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS1, (u8*)(_val))
+ memcpy((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS1, (u8*)(_val), ETH_ALEN)
#define SET_80211_HDR_ADDRESS2(_hdr, _val) \
- CP_MACADDR((u8 *)(_hdr) + FRAME_OFFSET_ADDRESS2, (u8 *)(_val))
+ memcpy((u8 *)(_hdr) + FRAME_OFFSET_ADDRESS2, (u8 *)(_val), ETH_ALEN)
#define SET_80211_HDR_ADDRESS3(_hdr, _val) \
- CP_MACADDR((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS3, (u8 *)(_val))
-#define SET_80211_HDR_FRAGMENT_SEQUENCE(_hdr, _val) \
- WRITEEF2BYTE((u8 *)(_hdr)+FRAME_OFFSET_SEQUENCE, _val)
-
-#define SET_BEACON_PROBE_RSP_TIME_STAMP_LOW(__phdr, __val) \
- WRITEEF4BYTE(((u8 *)(__phdr)) + 24, __val)
-#define SET_BEACON_PROBE_RSP_TIME_STAMP_HIGH(__phdr, __val) \
- WRITEEF4BYTE(((u8 *)(__phdr)) + 28, __val)
-#define SET_BEACON_PROBE_RSP_BEACON_INTERVAL(__phdr, __val) \
- WRITEEF2BYTE(((u8 *)(__phdr)) + 32, __val)
-#define GET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr) \
- READEF2BYTE(((u8 *)(__phdr)) + 34)
-#define SET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, __val) \
- WRITEEF2BYTE(((u8 *)(__phdr)) + 34, __val)
-#define MASK_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, __val) \
- SET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, \
- (GET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr) & (~(__val))))
+ memcpy((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS3, (u8 *)(_val), ETH_ALEN)
int rtl_init_core(struct ieee80211_hw *hw);
void rtl_deinit_core(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index d6a924a05654..e4f4aee8f298 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -82,7 +82,7 @@ static void rtl_op_stop(struct ieee80211_hw *hw)
mutex_unlock(&rtlpriv->locks.conf_mutex);
}
-static int rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -97,11 +97,10 @@ static int rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
rtlpriv->intf_ops->adapter_tx(hw, skb);
- return NETDEV_TX_OK;
+ return;
err_free:
dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
}
static int rtl_op_add_interface(struct ieee80211_hw *hw,
@@ -434,9 +433,9 @@ static int rtl_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
aci = _rtl_get_hal_qnum(queue);
mac->ac[aci].aifs = param->aifs;
- mac->ac[aci].cw_min = param->cw_min;
- mac->ac[aci].cw_max = param->cw_max;
- mac->ac[aci].tx_op = param->txop;
+ mac->ac[aci].cw_min = cpu_to_le16(param->cw_min);
+ mac->ac[aci].cw_max = cpu_to_le16(param->cw_max);
+ mac->ac[aci].tx_op = cpu_to_le16(param->txop);
memcpy(&mac->edca_param[aci], param, sizeof(*param));
rtlpriv->cfg->ops->set_qos(hw, aci);
return 0;
@@ -552,6 +551,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
("BSS_CHANGED_HT\n"));
+ rcu_read_lock();
sta = ieee80211_find_sta(mac->vif, mac->bssid);
if (sta) {
@@ -564,6 +564,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
mac->current_ampdu_factor =
sta->ht_cap.ampdu_factor;
}
+ rcu_read_unlock();
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SHORTGI_DENSITY,
(u8 *) (&mac->max_mss_density));
@@ -615,6 +616,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
else
mac->mode = WIRELESS_MODE_G;
+ rcu_read_lock();
sta = ieee80211_find_sta(mac->vif, mac->bssid);
if (sta) {
@@ -649,6 +651,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
*/
}
}
+ rcu_read_unlock();
/*mac80211 just give us CCK rates any time
*So we add G rate in basic rates when
@@ -666,7 +669,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
(u8 *) (&basic_rates));
- if (rtlpriv->dm.b_useramask)
+ if (rtlpriv->dm.useramask)
rtlpriv->cfg->ops->update_rate_mask(hw, 0);
else
rtlpriv->cfg->ops->update_rate_table(hw);
@@ -681,7 +684,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
*/
if (changed & BSS_CHANGED_ASSOC) {
if (bss_conf->assoc) {
- if (ppsc->b_fwctrl_lps) {
+ if (ppsc->fwctrl_lps) {
u8 mstatus = RT_MEDIA_CONNECT;
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_JOINBSSRPT,
@@ -689,7 +692,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
ppsc->report_linked = true;
}
} else {
- if (ppsc->b_fwctrl_lps) {
+ if (ppsc->fwctrl_lps) {
u8 mstatus = RT_MEDIA_DISCONNECT;
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_JOINBSSRPT,
@@ -748,7 +751,8 @@ static void rtl_op_sta_notify(struct ieee80211_hw *hw,
static int rtl_op_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 * ssn)
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -817,7 +821,7 @@ static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw)
/* fix fwlps issue */
rtlpriv->cfg->ops->set_network_type(hw, mac->opmode);
- if (rtlpriv->dm.b_useramask)
+ if (rtlpriv->dm.useramask)
rtlpriv->cfg->ops->update_rate_mask(hw, 0);
else
rtlpriv->cfg->ops->update_rate_table(hw);
diff --git a/drivers/net/wireless/rtlwifi/debug.h b/drivers/net/wireless/rtlwifi/debug.h
index 08bdec2ceda4..e4aa8687408c 100644
--- a/drivers/net/wireless/rtlwifi/debug.h
+++ b/drivers/net/wireless/rtlwifi/debug.h
@@ -105,6 +105,7 @@
#define COMP_MAC80211 BIT(26)
#define COMP_REGD BIT(27)
#define COMP_CHAN BIT(28)
+#define COMP_USB BIT(29)
/*--------------------------------------------------------------
Define the rt_print components
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index 62876cd5c41a..4f92cba6810a 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -1169,21 +1169,3 @@ static u8 efuse_calculate_word_cnts(u8 word_en)
return word_cnts;
}
-void efuse_reset_loader(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u16 tmp_u2b;
-
- tmp_u2b = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN]);
- rtl_write_word(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN],
- (tmp_u2b & ~(BIT(12))));
- udelay(10000);
- rtl_write_word(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN],
- (tmp_u2b | BIT(12)));
- udelay(10000);
-}
-
-bool efuse_program_map(struct ieee80211_hw *hw, char *p_filename, u8 tabletype)
-{
- return true;
-}
diff --git a/drivers/net/wireless/rtlwifi/efuse.h b/drivers/net/wireless/rtlwifi/efuse.h
index 2d39a4df181b..47774dd4c2a6 100644
--- a/drivers/net/wireless/rtlwifi/efuse.h
+++ b/drivers/net/wireless/rtlwifi/efuse.h
@@ -117,8 +117,5 @@ extern bool efuse_shadow_update_chk(struct ieee80211_hw *hw);
extern void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw);
extern void efuse_force_write_vendor_Id(struct ieee80211_hw *hw);
extern void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
-extern bool efuse_program_map(struct ieee80211_hw *hw,
- char *p_filename, u8 tabletype);
-extern void efuse_reset_loader(struct ieee80211_hw *hw);
#endif
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 1758d4463247..9cd7703c2a30 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -50,7 +50,7 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
ppsc->reg_rfps_level = 0;
- ppsc->b_support_aspm = 0;
+ ppsc->support_aspm = 0;
/*Update PCI ASPM setting */
ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm;
@@ -115,29 +115,29 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
switch (rtlpci->const_support_pciaspm) {
case 0:{
/*Not support ASPM. */
- bool b_support_aspm = false;
- ppsc->b_support_aspm = b_support_aspm;
+ bool support_aspm = false;
+ ppsc->support_aspm = support_aspm;
break;
}
case 1:{
/*Support ASPM. */
- bool b_support_aspm = true;
- bool b_support_backdoor = true;
- ppsc->b_support_aspm = b_support_aspm;
+ bool support_aspm = true;
+ bool support_backdoor = true;
+ ppsc->support_aspm = support_aspm;
/*if(priv->oem_id == RT_CID_TOSHIBA &&
!priv->ndis_adapter.amd_l1_patch)
- b_support_backdoor = false; */
+ support_backdoor = false; */
- ppsc->b_support_backdoor = b_support_backdoor;
+ ppsc->support_backdoor = support_backdoor;
break;
}
case 2:
/*ASPM value set by chipset. */
if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL) {
- bool b_support_aspm = true;
- ppsc->b_support_aspm = b_support_aspm;
+ bool support_aspm = true;
+ ppsc->support_aspm = support_aspm;
}
break;
default:
@@ -476,9 +476,9 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
skb = __skb_dequeue(&ring->queue);
pci_unmap_single(rtlpci->pdev,
- le32_to_cpu(rtlpriv->cfg->ops->
+ rtlpriv->cfg->ops->
get_desc((u8 *) entry, true,
- HW_DESC_TXBUFF_ADDR)),
+ HW_DESC_TXBUFF_ADDR),
skb->len, PCI_DMA_TODEVICE);
RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_TRACE,
@@ -557,7 +557,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
return;
} else {
struct ieee80211_hdr *hdr;
- u16 fc;
+ __le16 fc;
struct sk_buff *new_skb = NULL;
rtlpriv->cfg->ops->query_rx_desc(hw, &stats,
@@ -583,9 +583,9 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
*/
hdr = (struct ieee80211_hdr *)(skb->data);
- fc = le16_to_cpu(hdr->frame_control);
+ fc = hdr->frame_control;
- if (!stats.b_crc) {
+ if (!stats.crc) {
memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
sizeof(rx_status));
@@ -666,7 +666,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
}
done:
- bufferaddress = cpu_to_le32(*((dma_addr_t *) skb->cb));
+ bufferaddress = (u32)(*((dma_addr_t *) skb->cb));
tmp_one = 1;
rtlpriv->cfg->ops->set_desc((u8 *) pdesc, false,
HW_DESC_RXBUFF_ADDR,
@@ -690,75 +690,6 @@ done:
}
-void _rtl_pci_tx_interrupt(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- int prio;
-
- for (prio = 0; prio < RTL_PCI_MAX_TX_QUEUE_COUNT; prio++) {
- struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
-
- while (skb_queue_len(&ring->queue)) {
- struct rtl_tx_desc *entry = &ring->desc[ring->idx];
- struct sk_buff *skb;
- struct ieee80211_tx_info *info;
- u8 own;
-
- /*
- *beacon packet will only use the first
- *descriptor defautly, and the own may not
- *be cleared by the hardware, and
- *beacon will free in prepare beacon
- */
- if (prio == BEACON_QUEUE || prio == TXCMD_QUEUE ||
- prio == HCCA_QUEUE)
- break;
-
- own = (u8)rtlpriv->cfg->ops->get_desc((u8 *)entry,
- true,
- HW_DESC_OWN);
-
- if (own)
- break;
-
- skb = __skb_dequeue(&ring->queue);
- pci_unmap_single(rtlpci->pdev,
- le32_to_cpu(rtlpriv->cfg->ops->
- get_desc((u8 *) entry,
- true,
- HW_DESC_TXBUFF_ADDR)),
- skb->len, PCI_DMA_TODEVICE);
-
- ring->idx = (ring->idx + 1) % ring->entries;
-
- info = IEEE80211_SKB_CB(skb);
- ieee80211_tx_info_clear_status(info);
-
- info->flags |= IEEE80211_TX_STAT_ACK;
- /*info->status.rates[0].count = 1; */
-
- ieee80211_tx_status_irqsafe(hw, skb);
-
- if ((ring->entries - skb_queue_len(&ring->queue))
- == 2 && prio != BEACON_QUEUE) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("more desc left, wake "
- "skb_queue@%d,ring->idx = %d,"
- "skb_queue_len = 0x%d\n",
- prio, ring->idx,
- skb_queue_len(&ring->queue)));
-
- ieee80211_wake_queue(hw,
- skb_get_queue_mapping
- (skb));
- }
-
- skb = NULL;
- }
- }
-}
-
static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
{
struct ieee80211_hw *hw = dev_id;
@@ -959,17 +890,17 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
rtlhal->hw = hw;
rtlpci->pdev = pdev;
- ppsc->b_inactiveps = false;
- ppsc->b_leisure_ps = true;
- ppsc->b_fwctrl_lps = true;
- ppsc->b_reg_fwctrl_lps = 3;
+ ppsc->inactiveps = false;
+ ppsc->leisure_ps = true;
+ ppsc->fwctrl_lps = true;
+ ppsc->reg_fwctrl_lps = 3;
ppsc->reg_max_lps_awakeintvl = 5;
- if (ppsc->b_reg_fwctrl_lps == 1)
+ if (ppsc->reg_fwctrl_lps == 1)
ppsc->fwctrl_psmode = FW_PS_MIN_MODE;
- else if (ppsc->b_reg_fwctrl_lps == 2)
+ else if (ppsc->reg_fwctrl_lps == 2)
ppsc->fwctrl_psmode = FW_PS_MAX_MODE;
- else if (ppsc->b_reg_fwctrl_lps == 3)
+ else if (ppsc->reg_fwctrl_lps == 3)
ppsc->fwctrl_psmode = FW_PS_DTIM_MODE;
/*Tx/Rx related var */
@@ -1024,9 +955,8 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
("queue:%d, ring_addr:%p\n", prio, ring));
for (i = 0; i < entries; i++) {
- nextdescaddress = cpu_to_le32((u32) dma +
- ((i + 1) % entries) *
- sizeof(*ring));
+ nextdescaddress = (u32) dma + ((i + 1) % entries) *
+ sizeof(*ring);
rtlpriv->cfg->ops->set_desc((u8 *)&(ring[i]),
true, HW_DESC_TX_NEXTDESC_ADDR,
@@ -1090,7 +1020,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
rtlpci->rxbuffersize,
PCI_DMA_FROMDEVICE);
- bufferaddress = cpu_to_le32(*((dma_addr_t *)skb->cb));
+ bufferaddress = (u32)(*((dma_addr_t *)skb->cb));
rtlpriv->cfg->ops->set_desc((u8 *)entry, false,
HW_DESC_RXBUFF_ADDR,
(u8 *)&bufferaddress);
@@ -1121,9 +1051,9 @@ static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw,
struct sk_buff *skb = __skb_dequeue(&ring->queue);
pci_unmap_single(rtlpci->pdev,
- le32_to_cpu(rtlpriv->cfg->
+ rtlpriv->cfg->
ops->get_desc((u8 *) entry, true,
- HW_DESC_TXBUFF_ADDR)),
+ HW_DESC_TXBUFF_ADDR),
skb->len, PCI_DMA_TODEVICE);
kfree_skb(skb);
ring->idx = (ring->idx + 1) % ring->entries;
@@ -1255,11 +1185,11 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
__skb_dequeue(&ring->queue);
pci_unmap_single(rtlpci->pdev,
- le32_to_cpu(rtlpriv->cfg->ops->
+ rtlpriv->cfg->ops->
get_desc((u8 *)
entry,
true,
- HW_DESC_TXBUFF_ADDR)),
+ HW_DESC_TXBUFF_ADDR),
skb->len, PCI_DMA_TODEVICE);
kfree_skb(skb);
ring->idx = (ring->idx + 1) % ring->entries;
@@ -1273,7 +1203,7 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
return 0;
}
-unsigned int _rtl_mac_to_hwqueue(u16 fc,
+static unsigned int _rtl_mac_to_hwqueue(__le16 fc,
unsigned int mac80211_queue_index)
{
unsigned int hw_queue_index;
@@ -1312,7 +1242,7 @@ out:
return hw_queue_index;
}
-int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -1323,7 +1253,7 @@ int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
unsigned int queue_index, hw_queue;
unsigned long flags;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
- u16 fc = le16_to_cpu(hdr->frame_control);
+ __le16 fc = hdr->frame_control;
u8 *pda_addr = hdr->addr1;
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
/*ssn */
@@ -1429,7 +1359,7 @@ int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
return 0;
}
-void rtl_pci_deinit(struct ieee80211_hw *hw)
+static void rtl_pci_deinit(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -1444,7 +1374,7 @@ void rtl_pci_deinit(struct ieee80211_hw *hw)
}
-int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
+static int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
int err;
@@ -1461,7 +1391,7 @@ int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
return 1;
}
-int rtl_pci_start(struct ieee80211_hw *hw)
+static int rtl_pci_start(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -1496,7 +1426,7 @@ int rtl_pci_start(struct ieee80211_hw *hw)
return 0;
}
-void rtl_pci_stop(struct ieee80211_hw *hw)
+static void rtl_pci_stop(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -1547,13 +1477,11 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
struct pci_dev *bridge_pdev = pdev->bus->self;
u16 venderid;
u16 deviceid;
- u8 revisionid;
u16 irqline;
u8 tmp;
venderid = pdev->vendor;
deviceid = pdev->device;
- pci_read_config_byte(pdev, 0x8, &revisionid);
pci_read_config_word(pdev, 0x3C, &irqline);
if (deviceid == RTL_PCI_8192_DID ||
@@ -1564,7 +1492,7 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
deviceid == RTL_PCI_8173_DID ||
deviceid == RTL_PCI_8172_DID ||
deviceid == RTL_PCI_8171_DID) {
- switch (revisionid) {
+ switch (pdev->revision) {
case RTL_PCI_REVISION_ID_8192PCIE:
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
("8192 PCI-E is found - "
@@ -1838,7 +1766,7 @@ fail3:
ieee80211_free_hw(hw);
if (rtlpriv->io.pci_mem_start != 0)
- pci_iounmap(pdev, (void *)rtlpriv->io.pci_mem_start);
+ pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
fail2:
pci_release_regions(pdev);
@@ -1888,7 +1816,7 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
}
if (rtlpriv->io.pci_mem_start != 0) {
- pci_iounmap(pdev, (void *)rtlpriv->io.pci_mem_start);
+ pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
pci_release_regions(pdev);
}
diff --git a/drivers/net/wireless/rtlwifi/pci.h b/drivers/net/wireless/rtlwifi/pci.h
index d36a66939958..0caa81429726 100644
--- a/drivers/net/wireless/rtlwifi/pci.h
+++ b/drivers/net/wireless/rtlwifi/pci.h
@@ -244,34 +244,34 @@ int rtl_pci_resume(struct pci_dev *pdev);
static inline u8 pci_read8_sync(struct rtl_priv *rtlpriv, u32 addr)
{
- return 0xff & readb((u8 *) rtlpriv->io.pci_mem_start + addr);
+ return readb((u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
}
static inline u16 pci_read16_sync(struct rtl_priv *rtlpriv, u32 addr)
{
- return readw((u8 *) rtlpriv->io.pci_mem_start + addr);
+ return readw((u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
}
static inline u32 pci_read32_sync(struct rtl_priv *rtlpriv, u32 addr)
{
- return readl((u8 *) rtlpriv->io.pci_mem_start + addr);
+ return readl((u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
}
static inline void pci_write8_async(struct rtl_priv *rtlpriv, u32 addr, u8 val)
{
- writeb(val, (u8 *) rtlpriv->io.pci_mem_start + addr);
+ writeb(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
}
static inline void pci_write16_async(struct rtl_priv *rtlpriv,
u32 addr, u16 val)
{
- writew(val, (u8 *) rtlpriv->io.pci_mem_start + addr);
+ writew(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
}
static inline void pci_write32_async(struct rtl_priv *rtlpriv,
u32 addr, u32 val)
{
- writel(val, (u8 *) rtlpriv->io.pci_mem_start + addr);
+ writel(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
}
static inline void rtl_pci_raw_write_port_ulong(u32 port, u32 val)
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index d2326c13449e..6b7e217b6b89 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -86,7 +86,7 @@ bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
enum rf_pwrstate rtstate;
- bool b_actionallowed = false;
+ bool actionallowed = false;
u16 rfwait_cnt = 0;
unsigned long flag;
@@ -139,13 +139,13 @@ no_protect:
ppsc->rfoff_reason &= (~changesource);
if ((changesource == RF_CHANGE_BY_HW) &&
- (ppsc->b_hwradiooff == true)) {
- ppsc->b_hwradiooff = false;
+ (ppsc->hwradiooff == true)) {
+ ppsc->hwradiooff = false;
}
if (!ppsc->rfoff_reason) {
ppsc->rfoff_reason = 0;
- b_actionallowed = true;
+ actionallowed = true;
}
break;
@@ -153,17 +153,17 @@ no_protect:
case ERFOFF:
if ((changesource == RF_CHANGE_BY_HW)
- && (ppsc->b_hwradiooff == false)) {
- ppsc->b_hwradiooff = true;
+ && (ppsc->hwradiooff == false)) {
+ ppsc->hwradiooff = true;
}
ppsc->rfoff_reason |= changesource;
- b_actionallowed = true;
+ actionallowed = true;
break;
case ERFSLEEP:
ppsc->rfoff_reason |= changesource;
- b_actionallowed = true;
+ actionallowed = true;
break;
default:
@@ -172,7 +172,7 @@ no_protect:
break;
}
- if (b_actionallowed)
+ if (actionallowed)
rtlpriv->cfg->ops->set_rf_power_state(hw, state_toset);
if (!protect_or_not) {
@@ -181,7 +181,7 @@ no_protect:
spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
}
- return b_actionallowed;
+ return actionallowed;
}
EXPORT_SYMBOL(rtl_ps_set_rf_state);
@@ -191,7 +191,7 @@ static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw)
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- ppsc->b_swrf_processing = true;
+ ppsc->swrf_processing = true;
if (ppsc->inactive_pwrstate == ERFON && rtlhal->interface == INTF_PCI) {
if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
@@ -213,7 +213,7 @@ static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw)
}
}
- ppsc->b_swrf_processing = false;
+ ppsc->swrf_processing = false;
}
void rtl_ips_nic_off_wq_callback(void *data)
@@ -239,13 +239,13 @@ void rtl_ips_nic_off_wq_callback(void *data)
if (rtlpriv->sec.being_setkey)
return;
- if (ppsc->b_inactiveps) {
+ if (ppsc->inactiveps) {
rtstate = ppsc->rfpwr_state;
/*
*Do not enter IPS in the following conditions:
*(1) RF is already OFF or Sleep
- *(2) b_swrf_processing (indicates the IPS is still under going)
+ *(2) swrf_processing (indicates the IPS is still under going)
*(3) Connectted (only disconnected can trigger IPS)
*(4) IBSS (send Beacon)
*(5) AP mode (send Beacon)
@@ -253,14 +253,14 @@ void rtl_ips_nic_off_wq_callback(void *data)
*/
if (rtstate == ERFON &&
- !ppsc->b_swrf_processing &&
+ !ppsc->swrf_processing &&
(mac->link_state == MAC80211_NOLINK) &&
!mac->act_scanning) {
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
("IPSEnter(): Turn off RF.\n"));
ppsc->inactive_pwrstate = ERFOFF;
- ppsc->b_in_powersavemode = true;
+ ppsc->in_powersavemode = true;
/*rtl_pci_reset_trx_ring(hw); */
_rtl_ps_inactive_ps(hw);
@@ -290,15 +290,15 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
spin_lock_irqsave(&rtlpriv->locks.ips_lock, flags);
- if (ppsc->b_inactiveps) {
+ if (ppsc->inactiveps) {
rtstate = ppsc->rfpwr_state;
if (rtstate != ERFON &&
- !ppsc->b_swrf_processing &&
+ !ppsc->swrf_processing &&
ppsc->rfoff_reason <= RF_CHANGE_BY_IPS) {
ppsc->inactive_pwrstate = ERFON;
- ppsc->b_in_powersavemode = false;
+ ppsc->in_powersavemode = false;
_rtl_ps_inactive_ps(hw);
}
@@ -370,9 +370,9 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
* mode and set RPWM to turn RF on.
*/
- if ((ppsc->b_fwctrl_lps) && (ppsc->b_leisure_ps) &&
+ if ((ppsc->fwctrl_lps) && (ppsc->leisure_ps) &&
ppsc->report_linked) {
- bool b_fw_current_inps;
+ bool fw_current_inps;
if (ppsc->dot11_psmode == EACTIVE) {
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
("FW LPS leave ps_mode:%x\n",
@@ -385,11 +385,11 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_PWRMODE,
(u8 *) (&fw_pwrmode));
- b_fw_current_inps = false;
+ fw_current_inps = false;
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_FW_PSMODE_STATUS,
- (u8 *) (&b_fw_current_inps));
+ (u8 *) (&fw_current_inps));
} else {
if (rtl_get_fwlps_doze(hw)) {
@@ -398,10 +398,10 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
ppsc->fwctrl_psmode));
rpwm_val = 0x02; /* RF off */
- b_fw_current_inps = true;
+ fw_current_inps = true;
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_FW_PSMODE_STATUS,
- (u8 *) (&b_fw_current_inps));
+ (u8 *) (&fw_current_inps));
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_PWRMODE,
(u8 *) (&ppsc->fwctrl_psmode));
@@ -425,13 +425,13 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
unsigned long flag;
- if (!(ppsc->b_fwctrl_lps && ppsc->b_leisure_ps))
+ if (!(ppsc->fwctrl_lps && ppsc->leisure_ps))
return;
if (rtlpriv->sec.being_setkey)
return;
- if (rtlpriv->link_info.b_busytraffic)
+ if (rtlpriv->link_info.busytraffic)
return;
/*sleep after linked 10s, to let DHCP and 4-way handshake ok enough!! */
@@ -446,7 +446,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
- if (ppsc->b_leisure_ps) {
+ if (ppsc->leisure_ps) {
/* Idle for a while if we connect to AP a while ago. */
if (mac->cnt_after_linked >= 2) {
if (ppsc->dot11_psmode == EACTIVE) {
@@ -470,7 +470,7 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
- if (ppsc->b_fwctrl_lps && ppsc->b_leisure_ps) {
+ if (ppsc->fwctrl_lps && ppsc->leisure_ps) {
if (ppsc->dot11_psmode != EACTIVE) {
/*FIX ME */
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/Makefile b/drivers/net/wireless/rtlwifi/rtl8192c/Makefile
new file mode 100644
index 000000000000..aee42d7ae8a2
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/Makefile
@@ -0,0 +1,9 @@
+rtl8192c-common-objs := \
+ main.o \
+ dm_common.o \
+ fw_common.o \
+ phy_common.o
+
+obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c-common.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
new file mode 100644
index 000000000000..bb023274414c
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -0,0 +1,1398 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "dm_common.h"
+
+struct dig_t dm_digtable;
+static struct ps_t dm_pstable;
+
+static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = {
+ 0x7f8001fe,
+ 0x788001e2,
+ 0x71c001c7,
+ 0x6b8001ae,
+ 0x65400195,
+ 0x5fc0017f,
+ 0x5a400169,
+ 0x55400155,
+ 0x50800142,
+ 0x4c000130,
+ 0x47c0011f,
+ 0x43c0010f,
+ 0x40000100,
+ 0x3c8000f2,
+ 0x390000e4,
+ 0x35c000d7,
+ 0x32c000cb,
+ 0x300000c0,
+ 0x2d4000b5,
+ 0x2ac000ab,
+ 0x288000a2,
+ 0x26000098,
+ 0x24000090,
+ 0x22000088,
+ 0x20000080,
+ 0x1e400079,
+ 0x1c800072,
+ 0x1b00006c,
+ 0x19800066,
+ 0x18000060,
+ 0x16c0005b,
+ 0x15800056,
+ 0x14400051,
+ 0x1300004c,
+ 0x12000048,
+ 0x11000044,
+ 0x10000040,
+};
+
+static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
+ {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04},
+ {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04},
+ {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03},
+ {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03},
+ {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03},
+ {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03},
+ {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03},
+ {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03},
+ {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02},
+ {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02},
+ {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02},
+ {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02},
+ {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02},
+ {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02},
+ {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02},
+ {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02},
+ {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01},
+ {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02},
+ {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01},
+ {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
+ {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
+ {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01},
+ {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01},
+ {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01},
+ {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01},
+ {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01},
+ {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01},
+ {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01},
+ {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01},
+ {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01},
+ {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01},
+ {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01},
+ {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01}
+};
+
+static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
+ {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00},
+ {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00},
+ {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00},
+ {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00},
+ {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00},
+ {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00},
+ {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00},
+ {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00},
+ {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00},
+ {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00},
+ {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00},
+ {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00},
+ {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00},
+ {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00},
+ {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00},
+ {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00},
+ {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00},
+ {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00},
+ {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00},
+ {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
+ {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
+ {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00},
+ {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00},
+ {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
+ {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
+ {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00},
+ {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
+ {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
+ {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
+ {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
+ {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
+ {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
+ {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}
+};
+
+static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
+{
+ dm_digtable.dig_enable_flag = true;
+ dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
+ dm_digtable.cur_igvalue = 0x20;
+ dm_digtable.pre_igvalue = 0x0;
+ dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
+ dm_digtable.presta_connectstate = DIG_STA_DISCONNECT;
+ dm_digtable.curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
+ dm_digtable.rssi_lowthresh = DM_DIG_THRESH_LOW;
+ dm_digtable.rssi_highthresh = DM_DIG_THRESH_HIGH;
+ dm_digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
+ dm_digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
+ dm_digtable.rx_gain_range_max = DM_DIG_MAX;
+ dm_digtable.rx_gain_range_min = DM_DIG_MIN;
+ dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
+ dm_digtable.backoff_val_range_max = DM_DIG_BACKOFF_MAX;
+ dm_digtable.backoff_val_range_min = DM_DIG_BACKOFF_MIN;
+ dm_digtable.pre_cck_pd_state = CCK_PD_STAGE_MAX;
+ dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
+}
+
+static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ long rssi_val_min = 0;
+
+ if ((dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) &&
+ (dm_digtable.cursta_connectctate == DIG_STA_CONNECT)) {
+ if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb != 0)
+ rssi_val_min =
+ (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb >
+ rtlpriv->dm.undecorated_smoothed_pwdb) ?
+ rtlpriv->dm.undecorated_smoothed_pwdb :
+ rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+ else
+ rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
+ } else if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT ||
+ dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT) {
+ rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
+ } else if (dm_digtable.curmultista_connectstate ==
+ DIG_MULTISTA_CONNECT) {
+ rssi_val_min = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+ }
+
+ return (u8) rssi_val_min;
+}
+
+static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
+{
+ u32 ret_value;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
+
+ ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD);
+ falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16);
+
+ ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD);
+ falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff);
+ falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16);
+
+ ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
+ falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
+ falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
+ falsealm_cnt->cnt_rate_illegal +
+ falsealm_cnt->cnt_crc8_fail + falsealm_cnt->cnt_mcs_fail;
+
+ rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(14), 1);
+ ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0);
+ falsealm_cnt->cnt_cck_fail = ret_value;
+
+ ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3);
+ falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
+ falsealm_cnt->cnt_all = (falsealm_cnt->cnt_parity_fail +
+ falsealm_cnt->cnt_rate_illegal +
+ falsealm_cnt->cnt_crc8_fail +
+ falsealm_cnt->cnt_mcs_fail +
+ falsealm_cnt->cnt_cck_fail);
+
+ rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 1);
+ rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 0);
+ rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0);
+ rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2);
+
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+ ("cnt_parity_fail = %d, cnt_rate_illegal = %d, "
+ "cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
+ falsealm_cnt->cnt_parity_fail,
+ falsealm_cnt->cnt_rate_illegal,
+ falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail));
+
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+ ("cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
+ falsealm_cnt->cnt_ofdm_fail,
+ falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all));
+}
+
+static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 value_igi = dm_digtable.cur_igvalue;
+
+ if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0)
+ value_igi--;
+ else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH1)
+ value_igi += 0;
+ else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH2)
+ value_igi++;
+ else if (rtlpriv->falsealm_cnt.cnt_all >= DM_DIG_FA_TH2)
+ value_igi += 2;
+ if (value_igi > DM_DIG_FA_UPPER)
+ value_igi = DM_DIG_FA_UPPER;
+ else if (value_igi < DM_DIG_FA_LOWER)
+ value_igi = DM_DIG_FA_LOWER;
+ if (rtlpriv->falsealm_cnt.cnt_all > 10000)
+ value_igi = 0x32;
+
+ dm_digtable.cur_igvalue = value_igi;
+ rtl92c_dm_write_dig(hw);
+}
+
+static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->falsealm_cnt.cnt_all > dm_digtable.fa_highthresh) {
+ if ((dm_digtable.backoff_val - 2) <
+ dm_digtable.backoff_val_range_min)
+ dm_digtable.backoff_val =
+ dm_digtable.backoff_val_range_min;
+ else
+ dm_digtable.backoff_val -= 2;
+ } else if (rtlpriv->falsealm_cnt.cnt_all < dm_digtable.fa_lowthresh) {
+ if ((dm_digtable.backoff_val + 2) >
+ dm_digtable.backoff_val_range_max)
+ dm_digtable.backoff_val =
+ dm_digtable.backoff_val_range_max;
+ else
+ dm_digtable.backoff_val += 2;
+ }
+
+ if ((dm_digtable.rssi_val_min + 10 - dm_digtable.backoff_val) >
+ dm_digtable.rx_gain_range_max)
+ dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_max;
+ else if ((dm_digtable.rssi_val_min + 10 -
+ dm_digtable.backoff_val) < dm_digtable.rx_gain_range_min)
+ dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_min;
+ else
+ dm_digtable.cur_igvalue = dm_digtable.rssi_val_min + 10 -
+ dm_digtable.backoff_val;
+
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+ ("rssi_val_min = %x backoff_val %x\n",
+ dm_digtable.rssi_val_min, dm_digtable.backoff_val));
+
+ rtl92c_dm_write_dig(hw);
+}
+
+static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
+{
+ static u8 binitialized; /* initialized to false */
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ long rssi_strength = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+ bool multi_sta = false;
+
+ if (mac->opmode == NL80211_IFTYPE_ADHOC)
+ multi_sta = true;
+
+ if ((multi_sta == false) || (dm_digtable.cursta_connectctate !=
+ DIG_STA_DISCONNECT)) {
+ binitialized = false;
+ dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
+ return;
+ } else if (binitialized == false) {
+ binitialized = true;
+ dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
+ dm_digtable.cur_igvalue = 0x20;
+ rtl92c_dm_write_dig(hw);
+ }
+
+ if (dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) {
+ if ((rssi_strength < dm_digtable.rssi_lowthresh) &&
+ (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_1)) {
+
+ if (dm_digtable.dig_ext_port_stage ==
+ DIG_EXT_PORT_STAGE_2) {
+ dm_digtable.cur_igvalue = 0x20;
+ rtl92c_dm_write_dig(hw);
+ }
+
+ dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_1;
+ } else if (rssi_strength > dm_digtable.rssi_highthresh) {
+ dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_2;
+ rtl92c_dm_ctrl_initgain_by_fa(hw);
+ }
+ } else if (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_0) {
+ dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
+ dm_digtable.cur_igvalue = 0x20;
+ rtl92c_dm_write_dig(hw);
+ }
+
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+ ("curmultista_connectstate = "
+ "%x dig_ext_port_stage %x\n",
+ dm_digtable.curmultista_connectstate,
+ dm_digtable.dig_ext_port_stage));
+}
+
+static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+ ("presta_connectstate = %x,"
+ " cursta_connectctate = %x\n",
+ dm_digtable.presta_connectstate,
+ dm_digtable.cursta_connectctate));
+
+ if (dm_digtable.presta_connectstate == dm_digtable.cursta_connectctate
+ || dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT
+ || dm_digtable.cursta_connectctate == DIG_STA_CONNECT) {
+
+ if (dm_digtable.cursta_connectctate != DIG_STA_DISCONNECT) {
+ dm_digtable.rssi_val_min =
+ rtl92c_dm_initial_gain_min_pwdb(hw);
+ rtl92c_dm_ctrl_initgain_by_rssi(hw);
+ }
+ } else {
+ dm_digtable.rssi_val_min = 0;
+ dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
+ dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
+ dm_digtable.cur_igvalue = 0x20;
+ dm_digtable.pre_igvalue = 0;
+ rtl92c_dm_write_dig(hw);
+ }
+}
+
+static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT) {
+ dm_digtable.rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw);
+
+ if (dm_digtable.pre_cck_pd_state == CCK_PD_STAGE_LowRssi) {
+ if (dm_digtable.rssi_val_min <= 25)
+ dm_digtable.cur_cck_pd_state =
+ CCK_PD_STAGE_LowRssi;
+ else
+ dm_digtable.cur_cck_pd_state =
+ CCK_PD_STAGE_HighRssi;
+ } else {
+ if (dm_digtable.rssi_val_min <= 20)
+ dm_digtable.cur_cck_pd_state =
+ CCK_PD_STAGE_LowRssi;
+ else
+ dm_digtable.cur_cck_pd_state =
+ CCK_PD_STAGE_HighRssi;
+ }
+ } else {
+ dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
+ }
+
+ if (dm_digtable.pre_cck_pd_state != dm_digtable.cur_cck_pd_state) {
+ if (dm_digtable.cur_cck_pd_state == CCK_PD_STAGE_LowRssi) {
+ if (rtlpriv->falsealm_cnt.cnt_cck_fail > 800)
+ dm_digtable.cur_cck_fa_state =
+ CCK_FA_STAGE_High;
+ else
+ dm_digtable.cur_cck_fa_state = CCK_FA_STAGE_Low;
+
+ if (dm_digtable.pre_cck_fa_state !=
+ dm_digtable.cur_cck_fa_state) {
+ if (dm_digtable.cur_cck_fa_state ==
+ CCK_FA_STAGE_Low)
+ rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
+ 0x83);
+ else
+ rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
+ 0xcd);
+
+ dm_digtable.pre_cck_fa_state =
+ dm_digtable.cur_cck_fa_state;
+ }
+
+ rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x40);
+
+ if (IS_92C_SERIAL(rtlhal->version))
+ rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
+ MASKBYTE2, 0xd7);
+ } else {
+ rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
+ rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x47);
+
+ if (IS_92C_SERIAL(rtlhal->version))
+ rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
+ MASKBYTE2, 0xd3);
+ }
+ dm_digtable.pre_cck_pd_state = dm_digtable.cur_cck_pd_state;
+ }
+
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+ ("CCKPDStage=%x\n", dm_digtable.cur_cck_pd_state));
+
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+ ("is92C=%x\n", IS_92C_SERIAL(rtlhal->version)));
+}
+
+static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ if (mac->act_scanning == true)
+ return;
+
+ if ((mac->link_state > MAC80211_NOLINK) &&
+ (mac->link_state < MAC80211_LINKED))
+ dm_digtable.cursta_connectctate = DIG_STA_BEFORE_CONNECT;
+ else if (mac->link_state >= MAC80211_LINKED)
+ dm_digtable.cursta_connectctate = DIG_STA_CONNECT;
+ else
+ dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
+
+ rtl92c_dm_initial_gain_sta(hw);
+ rtl92c_dm_initial_gain_multi_sta(hw);
+ rtl92c_dm_cck_packet_detection_thresh(hw);
+
+ dm_digtable.presta_connectstate = dm_digtable.cursta_connectctate;
+
+}
+
+static void rtl92c_dm_dig(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->dm.dm_initialgain_enable == false)
+ return;
+ if (dm_digtable.dig_enable_flag == false)
+ return;
+
+ rtl92c_dm_ctrl_initgain_by_twoport(hw);
+
+}
+
+static void rtl92c_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.dynamic_txpower_enable = false;
+
+ rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
+ rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
+}
+
+void rtl92c_dm_write_dig(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
+ ("cur_igvalue = 0x%x, "
+ "pre_igvalue = 0x%x, backoff_val = %d\n",
+ dm_digtable.cur_igvalue, dm_digtable.pre_igvalue,
+ dm_digtable.backoff_val));
+
+ if (dm_digtable.pre_igvalue != dm_digtable.cur_igvalue) {
+ rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
+ dm_digtable.cur_igvalue);
+ rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f,
+ dm_digtable.cur_igvalue);
+
+ dm_digtable.pre_igvalue = dm_digtable.cur_igvalue;
+ }
+}
+EXPORT_SYMBOL(rtl92c_dm_write_dig);
+
+static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ long tmpentry_max_pwdb = 0, tmpentry_min_pwdb = 0xff;
+
+ u8 h2c_parameter[3] = { 0 };
+
+ return;
+
+ if (tmpentry_max_pwdb != 0) {
+ rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb =
+ tmpentry_max_pwdb;
+ } else {
+ rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb = 0;
+ }
+
+ if (tmpentry_min_pwdb != 0xff) {
+ rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb =
+ tmpentry_min_pwdb;
+ } else {
+ rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb = 0;
+ }
+
+ h2c_parameter[2] = (u8) (rtlpriv->dm.undecorated_smoothed_pwdb & 0xFF);
+ h2c_parameter[0] = 0;
+
+ rtl92c_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);
+}
+
+void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ rtlpriv->dm.current_turbo_edca = false;
+ rtlpriv->dm.is_any_nonbepkts = false;
+ rtlpriv->dm.is_cur_rdlstate = false;
+}
+EXPORT_SYMBOL(rtl92c_dm_init_edca_turbo);
+
+static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ static u64 last_txok_cnt;
+ static u64 last_rxok_cnt;
+ u64 cur_txok_cnt;
+ u64 cur_rxok_cnt;
+ u32 edca_be_ul = 0x5ea42b;
+ u32 edca_be_dl = 0x5ea42b;
+
+ if (mac->opmode == NL80211_IFTYPE_ADHOC)
+ goto dm_checkedcaturbo_exit;
+
+ if (mac->link_state != MAC80211_LINKED) {
+ rtlpriv->dm.current_turbo_edca = false;
+ return;
+ }
+
+ if (!mac->ht_enable) { /*FIX MERGE */
+ if (!(edca_be_ul & 0xffff0000))
+ edca_be_ul |= 0x005e0000;
+
+ if (!(edca_be_dl & 0xffff0000))
+ edca_be_dl |= 0x005e0000;
+ }
+
+ if ((!rtlpriv->dm.is_any_nonbepkts) &&
+ (!rtlpriv->dm.disable_framebursting)) {
+ cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt;
+ cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;
+ if (cur_rxok_cnt > 4 * cur_txok_cnt) {
+ if (!rtlpriv->dm.is_cur_rdlstate ||
+ !rtlpriv->dm.current_turbo_edca) {
+ rtl_write_dword(rtlpriv,
+ REG_EDCA_BE_PARAM,
+ edca_be_dl);
+ rtlpriv->dm.is_cur_rdlstate = true;
+ }
+ } else {
+ if (rtlpriv->dm.is_cur_rdlstate ||
+ !rtlpriv->dm.current_turbo_edca) {
+ rtl_write_dword(rtlpriv,
+ REG_EDCA_BE_PARAM,
+ edca_be_ul);
+ rtlpriv->dm.is_cur_rdlstate = false;
+ }
+ }
+ rtlpriv->dm.current_turbo_edca = true;
+ } else {
+ if (rtlpriv->dm.current_turbo_edca) {
+ u8 tmp = AC0_BE;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_AC_PARAM,
+ (u8 *) (&tmp));
+ rtlpriv->dm.current_turbo_edca = false;
+ }
+ }
+
+dm_checkedcaturbo_exit:
+ rtlpriv->dm.is_any_nonbepkts = false;
+ last_txok_cnt = rtlpriv->stats.txbytesunicast;
+ last_rxok_cnt = rtlpriv->stats.rxbytesunicast;
+}
+
+static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
+ *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 thermalvalue, delta, delta_lck, delta_iqk;
+ long ele_a, ele_d, temp_cck, val_x, value32;
+ long val_y, ele_c;
+ u8 ofdm_index[2], cck_index, ofdm_index_old[2], cck_index_old;
+ int i;
+ bool is2t = IS_92C_SERIAL(rtlhal->version);
+ u8 txpwr_level[2] = {0, 0};
+ u8 ofdm_min_index = 6, rf;
+
+ rtlpriv->dm.txpower_trackingInit = true;
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("rtl92c_dm_txpower_tracking_callback_thermalmeter\n"));
+
+ thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0x1f);
+
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
+ "eeprom_thermalmeter 0x%x\n",
+ thermalvalue, rtlpriv->dm.thermalvalue,
+ rtlefuse->eeprom_thermalmeter));
+
+ rtl92c_phy_ap_calibrate(hw, (thermalvalue -
+ rtlefuse->eeprom_thermalmeter));
+ if (is2t)
+ rf = 2;
+ else
+ rf = 1;
+
+ if (thermalvalue) {
+ ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
+ MASKDWORD) & MASKOFDM_D;
+
+ for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
+ if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
+ ofdm_index_old[0] = (u8) i;
+
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("Initial pathA ele_d reg0x%x = 0x%lx, "
+ "ofdm_index=0x%x\n",
+ ROFDM0_XATXIQIMBALANCE,
+ ele_d, ofdm_index_old[0]));
+ break;
+ }
+ }
+
+ if (is2t) {
+ ele_d = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
+ MASKDWORD) & MASKOFDM_D;
+
+ for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
+ if (ele_d == (ofdmswing_table[i] &
+ MASKOFDM_D)) {
+ ofdm_index_old[1] = (u8) i;
+
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
+ DBG_LOUD,
+ ("Initial pathB ele_d reg0x%x = "
+ "0x%lx, ofdm_index=0x%x\n",
+ ROFDM0_XBTXIQIMBALANCE, ele_d,
+ ofdm_index_old[1]));
+ break;
+ }
+ }
+ }
+
+ temp_cck =
+ rtl_get_bbreg(hw, RCCK0_TXFILTER2, MASKDWORD) & MASKCCK;
+
+ for (i = 0; i < CCK_TABLE_LENGTH; i++) {
+ if (rtlpriv->dm.cck_inch14) {
+ if (memcmp((void *)&temp_cck,
+ (void *)&cckswing_table_ch14[i][2],
+ 4) == 0) {
+ cck_index_old = (u8) i;
+
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
+ DBG_LOUD,
+ ("Initial reg0x%x = 0x%lx, "
+ "cck_index=0x%x, ch 14 %d\n",
+ RCCK0_TXFILTER2, temp_cck,
+ cck_index_old,
+ rtlpriv->dm.cck_inch14));
+ break;
+ }
+ } else {
+ if (memcmp((void *)&temp_cck,
+ (void *)
+ &cckswing_table_ch1ch13[i][2],
+ 4) == 0) {
+ cck_index_old = (u8) i;
+
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
+ DBG_LOUD,
+ ("Initial reg0x%x = 0x%lx, "
+ "cck_index=0x%x, ch14 %d\n",
+ RCCK0_TXFILTER2, temp_cck,
+ cck_index_old,
+ rtlpriv->dm.cck_inch14));
+ break;
+ }
+ }
+ }
+
+ if (!rtlpriv->dm.thermalvalue) {
+ rtlpriv->dm.thermalvalue =
+ rtlefuse->eeprom_thermalmeter;
+ rtlpriv->dm.thermalvalue_lck = thermalvalue;
+ rtlpriv->dm.thermalvalue_iqk = thermalvalue;
+ for (i = 0; i < rf; i++)
+ rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
+ rtlpriv->dm.cck_index = cck_index_old;
+ }
+
+ delta = (thermalvalue > rtlpriv->dm.thermalvalue) ?
+ (thermalvalue - rtlpriv->dm.thermalvalue) :
+ (rtlpriv->dm.thermalvalue - thermalvalue);
+
+ delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ?
+ (thermalvalue - rtlpriv->dm.thermalvalue_lck) :
+ (rtlpriv->dm.thermalvalue_lck - thermalvalue);
+
+ delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ?
+ (thermalvalue - rtlpriv->dm.thermalvalue_iqk) :
+ (rtlpriv->dm.thermalvalue_iqk - thermalvalue);
+
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
+ "eeprom_thermalmeter 0x%x delta 0x%x "
+ "delta_lck 0x%x delta_iqk 0x%x\n",
+ thermalvalue, rtlpriv->dm.thermalvalue,
+ rtlefuse->eeprom_thermalmeter, delta, delta_lck,
+ delta_iqk));
+
+ if (delta_lck > 1) {
+ rtlpriv->dm.thermalvalue_lck = thermalvalue;
+ rtl92c_phy_lc_calibrate(hw);
+ }
+
+ if (delta > 0 && rtlpriv->dm.txpower_track_control) {
+ if (thermalvalue > rtlpriv->dm.thermalvalue) {
+ for (i = 0; i < rf; i++)
+ rtlpriv->dm.ofdm_index[i] -= delta;
+ rtlpriv->dm.cck_index -= delta;
+ } else {
+ for (i = 0; i < rf; i++)
+ rtlpriv->dm.ofdm_index[i] += delta;
+ rtlpriv->dm.cck_index += delta;
+ }
+
+ if (is2t) {
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("temp OFDM_A_index=0x%x, "
+ "OFDM_B_index=0x%x,"
+ "cck_index=0x%x\n",
+ rtlpriv->dm.ofdm_index[0],
+ rtlpriv->dm.ofdm_index[1],
+ rtlpriv->dm.cck_index));
+ } else {
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("temp OFDM_A_index=0x%x,"
+ "cck_index=0x%x\n",
+ rtlpriv->dm.ofdm_index[0],
+ rtlpriv->dm.cck_index));
+ }
+
+ if (thermalvalue > rtlefuse->eeprom_thermalmeter) {
+ for (i = 0; i < rf; i++)
+ ofdm_index[i] =
+ rtlpriv->dm.ofdm_index[i]
+ + 1;
+ cck_index = rtlpriv->dm.cck_index + 1;
+ } else {
+ for (i = 0; i < rf; i++)
+ ofdm_index[i] =
+ rtlpriv->dm.ofdm_index[i];
+ cck_index = rtlpriv->dm.cck_index;
+ }
+
+ for (i = 0; i < rf; i++) {
+ if (txpwr_level[i] >= 0 &&
+ txpwr_level[i] <= 26) {
+ if (thermalvalue >
+ rtlefuse->eeprom_thermalmeter) {
+ if (delta < 5)
+ ofdm_index[i] -= 1;
+
+ else
+ ofdm_index[i] -= 2;
+ } else if (delta > 5 && thermalvalue <
+ rtlefuse->
+ eeprom_thermalmeter) {
+ ofdm_index[i] += 1;
+ }
+ } else if (txpwr_level[i] >= 27 &&
+ txpwr_level[i] <= 32
+ && thermalvalue >
+ rtlefuse->eeprom_thermalmeter) {
+ if (delta < 5)
+ ofdm_index[i] -= 1;
+
+ else
+ ofdm_index[i] -= 2;
+ } else if (txpwr_level[i] >= 32 &&
+ txpwr_level[i] <= 38 &&
+ thermalvalue >
+ rtlefuse->eeprom_thermalmeter
+ && delta > 5) {
+ ofdm_index[i] -= 1;
+ }
+ }
+
+ if (txpwr_level[i] >= 0 && txpwr_level[i] <= 26) {
+ if (thermalvalue >
+ rtlefuse->eeprom_thermalmeter) {
+ if (delta < 5)
+ cck_index -= 1;
+
+ else
+ cck_index -= 2;
+ } else if (delta > 5 && thermalvalue <
+ rtlefuse->eeprom_thermalmeter) {
+ cck_index += 1;
+ }
+ } else if (txpwr_level[i] >= 27 &&
+ txpwr_level[i] <= 32 &&
+ thermalvalue >
+ rtlefuse->eeprom_thermalmeter) {
+ if (delta < 5)
+ cck_index -= 1;
+
+ else
+ cck_index -= 2;
+ } else if (txpwr_level[i] >= 32 &&
+ txpwr_level[i] <= 38 &&
+ thermalvalue > rtlefuse->eeprom_thermalmeter
+ && delta > 5) {
+ cck_index -= 1;
+ }
+
+ for (i = 0; i < rf; i++) {
+ if (ofdm_index[i] > OFDM_TABLE_SIZE - 1)
+ ofdm_index[i] = OFDM_TABLE_SIZE - 1;
+
+ else if (ofdm_index[i] < ofdm_min_index)
+ ofdm_index[i] = ofdm_min_index;
+ }
+
+ if (cck_index > CCK_TABLE_SIZE - 1)
+ cck_index = CCK_TABLE_SIZE - 1;
+ else if (cck_index < 0)
+ cck_index = 0;
+
+ if (is2t) {
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("new OFDM_A_index=0x%x, "
+ "OFDM_B_index=0x%x,"
+ "cck_index=0x%x\n",
+ ofdm_index[0], ofdm_index[1],
+ cck_index));
+ } else {
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("new OFDM_A_index=0x%x,"
+ "cck_index=0x%x\n",
+ ofdm_index[0], cck_index));
+ }
+ }
+
+ if (rtlpriv->dm.txpower_track_control && delta != 0) {
+ ele_d =
+ (ofdmswing_table[ofdm_index[0]] & 0xFFC00000) >> 22;
+ val_x = rtlphy->reg_e94;
+ val_y = rtlphy->reg_e9c;
+
+ if (val_x != 0) {
+ if ((val_x & 0x00000200) != 0)
+ val_x = val_x | 0xFFFFFC00;
+ ele_a = ((val_x * ele_d) >> 8) & 0x000003FF;
+
+ if ((val_y & 0x00000200) != 0)
+ val_y = val_y | 0xFFFFFC00;
+ ele_c = ((val_y * ele_d) >> 8) & 0x000003FF;
+
+ value32 = (ele_d << 22) |
+ ((ele_c & 0x3F) << 16) | ele_a;
+
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
+ MASKDWORD, value32);
+
+ value32 = (ele_c & 0x000003C0) >> 6;
+ rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
+ value32);
+
+ value32 = ((val_x * ele_d) >> 7) & 0x01;
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
+ BIT(31), value32);
+
+ value32 = ((val_y * ele_d) >> 7) & 0x01;
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
+ BIT(29), value32);
+ } else {
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
+ MASKDWORD,
+ ofdmswing_table[ofdm_index[0]]);
+
+ rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
+ 0x00);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
+ BIT(31) | BIT(29), 0x00);
+ }
+
+ if (!rtlpriv->dm.cck_inch14) {
+ rtl_write_byte(rtlpriv, 0xa22,
+ cckswing_table_ch1ch13[cck_index]
+ [0]);
+ rtl_write_byte(rtlpriv, 0xa23,
+ cckswing_table_ch1ch13[cck_index]
+ [1]);
+ rtl_write_byte(rtlpriv, 0xa24,
+ cckswing_table_ch1ch13[cck_index]
+ [2]);
+ rtl_write_byte(rtlpriv, 0xa25,
+ cckswing_table_ch1ch13[cck_index]
+ [3]);
+ rtl_write_byte(rtlpriv, 0xa26,
+ cckswing_table_ch1ch13[cck_index]
+ [4]);
+ rtl_write_byte(rtlpriv, 0xa27,
+ cckswing_table_ch1ch13[cck_index]
+ [5]);
+ rtl_write_byte(rtlpriv, 0xa28,
+ cckswing_table_ch1ch13[cck_index]
+ [6]);
+ rtl_write_byte(rtlpriv, 0xa29,
+ cckswing_table_ch1ch13[cck_index]
+ [7]);
+ } else {
+ rtl_write_byte(rtlpriv, 0xa22,
+ cckswing_table_ch14[cck_index]
+ [0]);
+ rtl_write_byte(rtlpriv, 0xa23,
+ cckswing_table_ch14[cck_index]
+ [1]);
+ rtl_write_byte(rtlpriv, 0xa24,
+ cckswing_table_ch14[cck_index]
+ [2]);
+ rtl_write_byte(rtlpriv, 0xa25,
+ cckswing_table_ch14[cck_index]
+ [3]);
+ rtl_write_byte(rtlpriv, 0xa26,
+ cckswing_table_ch14[cck_index]
+ [4]);
+ rtl_write_byte(rtlpriv, 0xa27,
+ cckswing_table_ch14[cck_index]
+ [5]);
+ rtl_write_byte(rtlpriv, 0xa28,
+ cckswing_table_ch14[cck_index]
+ [6]);
+ rtl_write_byte(rtlpriv, 0xa29,
+ cckswing_table_ch14[cck_index]
+ [7]);
+ }
+
+ if (is2t) {
+ ele_d = (ofdmswing_table[ofdm_index[1]] &
+ 0xFFC00000) >> 22;
+
+ val_x = rtlphy->reg_eb4;
+ val_y = rtlphy->reg_ebc;
+
+ if (val_x != 0) {
+ if ((val_x & 0x00000200) != 0)
+ val_x = val_x | 0xFFFFFC00;
+ ele_a = ((val_x * ele_d) >> 8) &
+ 0x000003FF;
+
+ if ((val_y & 0x00000200) != 0)
+ val_y = val_y | 0xFFFFFC00;
+ ele_c = ((val_y * ele_d) >> 8) &
+ 0x00003FF;
+
+ value32 = (ele_d << 22) |
+ ((ele_c & 0x3F) << 16) | ele_a;
+ rtl_set_bbreg(hw,
+ ROFDM0_XBTXIQIMBALANCE,
+ MASKDWORD, value32);
+
+ value32 = (ele_c & 0x000003C0) >> 6;
+ rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
+ MASKH4BITS, value32);
+
+ value32 = ((val_x * ele_d) >> 7) & 0x01;
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
+ BIT(27), value32);
+
+ value32 = ((val_y * ele_d) >> 7) & 0x01;
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
+ BIT(25), value32);
+ } else {
+ rtl_set_bbreg(hw,
+ ROFDM0_XBTXIQIMBALANCE,
+ MASKDWORD,
+ ofdmswing_table[ofdm_index
+ [1]]);
+ rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
+ MASKH4BITS, 0x00);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
+ BIT(27) | BIT(25), 0x00);
+ }
+
+ }
+ }
+
+ if (delta_iqk > 3) {
+ rtlpriv->dm.thermalvalue_iqk = thermalvalue;
+ rtl92c_phy_iq_calibrate(hw, false);
+ }
+
+ if (rtlpriv->dm.txpower_track_control)
+ rtlpriv->dm.thermalvalue = thermalvalue;
+ }
+
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, ("<===\n"));
+
+}
+
+static void rtl92c_dm_initialize_txpower_tracking_thermalmeter(
+ struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.txpower_tracking = true;
+ rtlpriv->dm.txpower_trackingInit = false;
+
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("pMgntInfo->txpower_tracking = %d\n",
+ rtlpriv->dm.txpower_tracking));
+}
+
+static void rtl92c_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
+{
+ rtl92c_dm_initialize_txpower_tracking_thermalmeter(hw);
+}
+
+static void rtl92c_dm_txpower_tracking_directcall(struct ieee80211_hw *hw)
+{
+ rtl92c_dm_txpower_tracking_callback_thermalmeter(hw);
+}
+
+static void rtl92c_dm_check_txpower_tracking_thermal_meter(
+ struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ static u8 tm_trigger;
+
+ if (!rtlpriv->dm.txpower_tracking)
+ return;
+
+ if (!tm_trigger) {
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, RFREG_OFFSET_MASK,
+ 0x60);
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("Trigger 92S Thermal Meter!!\n"));
+ tm_trigger = 1;
+ return;
+ } else {
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("Schedule TxPowerTracking direct call!!\n"));
+ rtl92c_dm_txpower_tracking_directcall(hw);
+ tm_trigger = 0;
+ }
+}
+
+void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw)
+{
+ rtl92c_dm_check_txpower_tracking_thermal_meter(hw);
+}
+EXPORT_SYMBOL(rtl92c_dm_check_txpower_tracking);
+
+void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rate_adaptive *p_ra = &(rtlpriv->ra);
+
+ p_ra->ratr_state = DM_RATR_STA_INIT;
+ p_ra->pre_ratr_state = DM_RATR_STA_INIT;
+
+ if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)
+ rtlpriv->dm.useramask = true;
+ else
+ rtlpriv->dm.useramask = false;
+
+}
+EXPORT_SYMBOL(rtl92c_dm_init_rate_adaptive_mask);
+
+static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rate_adaptive *p_ra = &(rtlpriv->ra);
+ u32 low_rssithresh_for_ra, high_rssithresh_for_ra;
+
+ if (is_hal_stop(rtlhal)) {
+ RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+ ("<---- driver is going to unload\n"));
+ return;
+ }
+
+ if (!rtlpriv->dm.useramask) {
+ RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+ ("<---- driver does not control rate adaptive mask\n"));
+ return;
+ }
+
+ if (mac->link_state == MAC80211_LINKED) {
+
+ switch (p_ra->pre_ratr_state) {
+ case DM_RATR_STA_HIGH:
+ high_rssithresh_for_ra = 50;
+ low_rssithresh_for_ra = 20;
+ break;
+ case DM_RATR_STA_MIDDLE:
+ high_rssithresh_for_ra = 55;
+ low_rssithresh_for_ra = 20;
+ break;
+ case DM_RATR_STA_LOW:
+ high_rssithresh_for_ra = 50;
+ low_rssithresh_for_ra = 25;
+ break;
+ default:
+ high_rssithresh_for_ra = 50;
+ low_rssithresh_for_ra = 20;
+ break;
+ }
+
+ if (rtlpriv->dm.undecorated_smoothed_pwdb >
+ (long)high_rssithresh_for_ra)
+ p_ra->ratr_state = DM_RATR_STA_HIGH;
+ else if (rtlpriv->dm.undecorated_smoothed_pwdb >
+ (long)low_rssithresh_for_ra)
+ p_ra->ratr_state = DM_RATR_STA_MIDDLE;
+ else
+ p_ra->ratr_state = DM_RATR_STA_LOW;
+
+ if (p_ra->pre_ratr_state != p_ra->ratr_state) {
+ RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+ ("RSSI = %ld\n",
+ rtlpriv->dm.undecorated_smoothed_pwdb));
+ RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+ ("RSSI_LEVEL = %d\n", p_ra->ratr_state));
+ RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+ ("PreState = %d, CurState = %d\n",
+ p_ra->pre_ratr_state, p_ra->ratr_state));
+
+ rtlpriv->cfg->ops->update_rate_mask(hw,
+ p_ra->ratr_state);
+
+ p_ra->pre_ratr_state = p_ra->ratr_state;
+ }
+ }
+}
+
+static void rtl92c_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw)
+{
+ dm_pstable.pre_ccastate = CCA_MAX;
+ dm_pstable.cur_ccasate = CCA_MAX;
+ dm_pstable.pre_rfstate = RF_MAX;
+ dm_pstable.cur_rfstate = RF_MAX;
+ dm_pstable.rssi_val_min = 0;
+}
+
+static void rtl92c_dm_1r_cca(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ if (dm_pstable.rssi_val_min != 0) {
+ if (dm_pstable.pre_ccastate == CCA_2R) {
+ if (dm_pstable.rssi_val_min >= 35)
+ dm_pstable.cur_ccasate = CCA_1R;
+ else
+ dm_pstable.cur_ccasate = CCA_2R;
+ } else {
+ if (dm_pstable.rssi_val_min <= 30)
+ dm_pstable.cur_ccasate = CCA_2R;
+ else
+ dm_pstable.cur_ccasate = CCA_1R;
+ }
+ } else {
+ dm_pstable.cur_ccasate = CCA_MAX;
+ }
+
+ if (dm_pstable.pre_ccastate != dm_pstable.cur_ccasate) {
+ if (dm_pstable.cur_ccasate == CCA_1R) {
+ if (get_rf_type(rtlphy) == RF_2T2R) {
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE,
+ MASKBYTE0, 0x13);
+ rtl_set_bbreg(hw, 0xe70, MASKBYTE3, 0x20);
+ } else {
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE,
+ MASKBYTE0, 0x23);
+ rtl_set_bbreg(hw, 0xe70, 0x7fc00000, 0x10c);
+ }
+ } else {
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0,
+ 0x33);
+ rtl_set_bbreg(hw, 0xe70, MASKBYTE3, 0x63);
+ }
+ dm_pstable.pre_ccastate = dm_pstable.cur_ccasate;
+ }
+
+ RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, ("CCAStage = %s\n",
+ (dm_pstable.cur_ccasate ==
+ 0) ? "1RCCA" : "2RCCA"));
+}
+
+void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal)
+{
+ static u8 initialize;
+ static u32 reg_874, reg_c70, reg_85c, reg_a74;
+
+ if (initialize == 0) {
+ reg_874 = (rtl_get_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
+ MASKDWORD) & 0x1CC000) >> 14;
+
+ reg_c70 = (rtl_get_bbreg(hw, ROFDM0_AGCPARAMETER1,
+ MASKDWORD) & BIT(3)) >> 3;
+
+ reg_85c = (rtl_get_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
+ MASKDWORD) & 0xFF000000) >> 24;
+
+ reg_a74 = (rtl_get_bbreg(hw, 0xa74, MASKDWORD) & 0xF000) >> 12;
+
+ initialize = 1;
+ }
+
+ if (!bforce_in_normal) {
+ if (dm_pstable.rssi_val_min != 0) {
+ if (dm_pstable.pre_rfstate == RF_NORMAL) {
+ if (dm_pstable.rssi_val_min >= 30)
+ dm_pstable.cur_rfstate = RF_SAVE;
+ else
+ dm_pstable.cur_rfstate = RF_NORMAL;
+ } else {
+ if (dm_pstable.rssi_val_min <= 25)
+ dm_pstable.cur_rfstate = RF_NORMAL;
+ else
+ dm_pstable.cur_rfstate = RF_SAVE;
+ }
+ } else {
+ dm_pstable.cur_rfstate = RF_MAX;
+ }
+ } else {
+ dm_pstable.cur_rfstate = RF_NORMAL;
+ }
+
+ if (dm_pstable.pre_rfstate != dm_pstable.cur_rfstate) {
+ if (dm_pstable.cur_rfstate == RF_SAVE) {
+ rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
+ 0x1C0000, 0x2);
+ rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), 0);
+ rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
+ 0xFF000000, 0x63);
+ rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
+ 0xC000, 0x2);
+ rtl_set_bbreg(hw, 0xa74, 0xF000, 0x3);
+ rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
+ rtl_set_bbreg(hw, 0x818, BIT(28), 0x1);
+ } else {
+ rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
+ 0x1CC000, reg_874);
+ rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3),
+ reg_c70);
+ rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, 0xFF000000,
+ reg_85c);
+ rtl_set_bbreg(hw, 0xa74, 0xF000, reg_a74);
+ rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
+ }
+
+ dm_pstable.pre_rfstate = dm_pstable.cur_rfstate;
+ }
+}
+EXPORT_SYMBOL(rtl92c_dm_rf_saving);
+
+static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ if (((mac->link_state == MAC80211_NOLINK)) &&
+ (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
+ dm_pstable.rssi_val_min = 0;
+ RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
+ ("Not connected to any\n"));
+ }
+
+ if (mac->link_state == MAC80211_LINKED) {
+ if (mac->opmode == NL80211_IFTYPE_ADHOC) {
+ dm_pstable.rssi_val_min =
+ rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+ RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
+ ("AP Client PWDB = 0x%lx\n",
+ dm_pstable.rssi_val_min));
+ } else {
+ dm_pstable.rssi_val_min =
+ rtlpriv->dm.undecorated_smoothed_pwdb;
+ RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
+ ("STA Default Port PWDB = 0x%lx\n",
+ dm_pstable.rssi_val_min));
+ }
+ } else {
+ dm_pstable.rssi_val_min =
+ rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+
+ RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
+ ("AP Ext Port PWDB = 0x%lx\n",
+ dm_pstable.rssi_val_min));
+ }
+
+ if (IS_92C_SERIAL(rtlhal->version))
+ rtl92c_dm_1r_cca(hw);
+}
+
+void rtl92c_dm_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
+ rtl92c_dm_diginit(hw);
+ rtl92c_dm_init_dynamic_txpower(hw);
+ rtl92c_dm_init_edca_turbo(hw);
+ rtl92c_dm_init_rate_adaptive_mask(hw);
+ rtl92c_dm_initialize_txpower_tracking(hw);
+ rtl92c_dm_init_dynamic_bb_powersaving(hw);
+}
+EXPORT_SYMBOL(rtl92c_dm_init);
+
+void rtl92c_dm_watchdog(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ bool fw_current_inpsmode = false;
+ bool fw_ps_awake = true;
+
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+ (u8 *) (&fw_current_inpsmode));
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON,
+ (u8 *) (&fw_ps_awake));
+
+ if ((ppsc->rfpwr_state == ERFON) && ((!fw_current_inpsmode) &&
+ fw_ps_awake)
+ && (!ppsc->rfchange_inprogress)) {
+ rtl92c_dm_pwdb_monitor(hw);
+ rtl92c_dm_dig(hw);
+ rtl92c_dm_false_alarm_counter_statistics(hw);
+ rtl92c_dm_dynamic_bb_powersaving(hw);
+ rtlpriv->cfg->ops->dm_dynamic_txpower(hw);
+ rtl92c_dm_check_txpower_tracking(hw);
+ rtl92c_dm_refresh_rate_adaptive_mask(hw);
+ rtl92c_dm_check_edca_turbo(hw);
+
+ }
+}
+EXPORT_SYMBOL(rtl92c_dm_watchdog);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
new file mode 100644
index 000000000000..b9cbb0a3c03f
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
@@ -0,0 +1,204 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92COMMON_DM_H__
+#define __RTL92COMMON_DM_H__
+
+#include "../wifi.h"
+#include "../rtl8192ce/def.h"
+#include "../rtl8192ce/reg.h"
+#include "fw_common.h"
+
+#define HAL_DM_DIG_DISABLE BIT(0)
+#define HAL_DM_HIPWR_DISABLE BIT(1)
+
+#define OFDM_TABLE_LENGTH 37
+#define CCK_TABLE_LENGTH 33
+
+#define OFDM_TABLE_SIZE 37
+#define CCK_TABLE_SIZE 33
+
+#define BW_AUTO_SWITCH_HIGH_LOW 25
+#define BW_AUTO_SWITCH_LOW_HIGH 30
+
+#define DM_DIG_THRESH_HIGH 40
+#define DM_DIG_THRESH_LOW 35
+
+#define DM_FALSEALARM_THRESH_LOW 400
+#define DM_FALSEALARM_THRESH_HIGH 1000
+
+#define DM_DIG_MAX 0x3e
+#define DM_DIG_MIN 0x1e
+
+#define DM_DIG_FA_UPPER 0x32
+#define DM_DIG_FA_LOWER 0x20
+#define DM_DIG_FA_TH0 0x20
+#define DM_DIG_FA_TH1 0x100
+#define DM_DIG_FA_TH2 0x200
+
+#define DM_DIG_BACKOFF_MAX 12
+#define DM_DIG_BACKOFF_MIN -4
+#define DM_DIG_BACKOFF_DEFAULT 10
+
+#define RXPATHSELECTION_SS_TH_lOW 30
+#define RXPATHSELECTION_DIFF_TH 18
+
+#define DM_RATR_STA_INIT 0
+#define DM_RATR_STA_HIGH 1
+#define DM_RATR_STA_MIDDLE 2
+#define DM_RATR_STA_LOW 3
+
+#define CTS2SELF_THVAL 30
+#define REGC38_TH 20
+
+#define WAIOTTHVal 25
+
+#define TXHIGHPWRLEVEL_NORMAL 0
+#define TXHIGHPWRLEVEL_LEVEL1 1
+#define TXHIGHPWRLEVEL_LEVEL2 2
+#define TXHIGHPWRLEVEL_BT1 3
+#define TXHIGHPWRLEVEL_BT2 4
+
+#define DM_TYPE_BYFW 0
+#define DM_TYPE_BYDRIVER 1
+
+#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
+#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
+
+struct ps_t {
+ u8 pre_ccastate;
+ u8 cur_ccasate;
+ u8 pre_rfstate;
+ u8 cur_rfstate;
+ long rssi_val_min;
+};
+
+struct dig_t {
+ u8 dig_enable_flag;
+ u8 dig_ext_port_stage;
+ u32 rssi_lowthresh;
+ u32 rssi_highthresh;
+ u32 fa_lowthresh;
+ u32 fa_highthresh;
+ u8 cursta_connectctate;
+ u8 presta_connectstate;
+ u8 curmultista_connectstate;
+ u8 pre_igvalue;
+ u8 cur_igvalue;
+ char backoff_val;
+ char backoff_val_range_max;
+ char backoff_val_range_min;
+ u8 rx_gain_range_max;
+ u8 rx_gain_range_min;
+ u8 rssi_val_min;
+ u8 pre_cck_pd_state;
+ u8 cur_cck_pd_state;
+ u8 pre_cck_fa_state;
+ u8 cur_cck_fa_state;
+ u8 pre_ccastate;
+ u8 cur_ccasate;
+};
+
+struct swat_t {
+ u8 failure_cnt;
+ u8 try_flag;
+ u8 stop_trying;
+ long pre_rssi;
+ long trying_threshold;
+ u8 cur_antenna;
+ u8 pre_antenna;
+};
+
+enum tag_dynamic_init_gain_operation_type_definition {
+ DIG_TYPE_THRESH_HIGH = 0,
+ DIG_TYPE_THRESH_LOW = 1,
+ DIG_TYPE_BACKOFF = 2,
+ DIG_TYPE_RX_GAIN_MIN = 3,
+ DIG_TYPE_RX_GAIN_MAX = 4,
+ DIG_TYPE_ENABLE = 5,
+ DIG_TYPE_DISABLE = 6,
+ DIG_OP_TYPE_MAX
+};
+
+enum tag_cck_packet_detection_threshold_type_definition {
+ CCK_PD_STAGE_LowRssi = 0,
+ CCK_PD_STAGE_HighRssi = 1,
+ CCK_FA_STAGE_Low = 2,
+ CCK_FA_STAGE_High = 3,
+ CCK_PD_STAGE_MAX = 4,
+};
+
+enum dm_1r_cca_e {
+ CCA_1R = 0,
+ CCA_2R = 1,
+ CCA_MAX = 2,
+};
+
+enum dm_rf_e {
+ RF_SAVE = 0,
+ RF_NORMAL = 1,
+ RF_MAX = 2,
+};
+
+enum dm_sw_ant_switch_e {
+ ANS_ANTENNA_B = 1,
+ ANS_ANTENNA_A = 2,
+ ANS_ANTENNA_MAX = 3,
+};
+
+enum dm_dig_ext_port_alg_e {
+ DIG_EXT_PORT_STAGE_0 = 0,
+ DIG_EXT_PORT_STAGE_1 = 1,
+ DIG_EXT_PORT_STAGE_2 = 2,
+ DIG_EXT_PORT_STAGE_3 = 3,
+ DIG_EXT_PORT_STAGE_MAX = 4,
+};
+
+enum dm_dig_connect_e {
+ DIG_STA_DISCONNECT = 0,
+ DIG_STA_CONNECT = 1,
+ DIG_STA_BEFORE_CONNECT = 2,
+ DIG_MULTISTA_DISCONNECT = 3,
+ DIG_MULTISTA_CONNECT = 4,
+ DIG_CONNECT_MAX
+};
+
+extern struct dig_t dm_digtable;
+void rtl92c_dm_init(struct ieee80211_hw *hw);
+void rtl92c_dm_watchdog(struct ieee80211_hw *hw);
+void rtl92c_dm_write_dig(struct ieee80211_hw *hw);
+void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw);
+void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw);
+void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
+void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal);
+void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
+void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw);
+void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/fw.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index 11dd22b987e7..5ef91374b230 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -31,10 +31,9 @@
#include "../wifi.h"
#include "../pci.h"
#include "../base.h"
-#include "reg.h"
-#include "def.h"
-#include "fw.h"
-#include "table.h"
+#include "../rtl8192ce/reg.h"
+#include "../rtl8192ce/def.h"
+#include "fw_common.h"
static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable)
{
@@ -133,17 +132,15 @@ static void _rtl92c_write_fw(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- bool is_version_b;
u8 *bufferPtr = (u8 *) buffer;
RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, ("FW size is %d bytes,\n", size));
- is_version_b = IS_CHIP_VER_B(version);
- if (is_version_b) {
+ if (IS_CHIP_VER_B(version)) {
u32 pageNums, remainSize;
u32 page, offset;
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CE)
+ if (IS_HARDWARE_TYPE_8192CE(rtlhal))
_rtl92c_fill_dummy(bufferPtr, &size);
pageNums = size / FW_8192C_PAGE_SIZE;
@@ -231,14 +228,14 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
u32 fwsize;
int err;
enum version_8192c version = rtlhal->version;
+ const struct firmware *firmware;
- const struct firmware *firmware = NULL;
-
+ printk(KERN_INFO "rtl8192cu: Loading firmware file %s\n",
+ rtlpriv->cfg->fw_name);
err = request_firmware(&firmware, rtlpriv->cfg->fw_name,
rtlpriv->io.dev);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Failed to request firmware!\n"));
+ printk(KERN_ERR "rtl8192cu: Firmware loading failed\n");
return 1;
}
@@ -281,6 +278,7 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
return 0;
}
+EXPORT_SYMBOL(rtl92c_download_fw);
static bool _rtl92c_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum)
{
@@ -318,12 +316,12 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
while (true) {
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
- if (rtlhal->b_h2c_setinprogress) {
+ if (rtlhal->h2c_setinprogress) {
RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
("H2C set in progress! Wait to set.."
"element_id(%d).\n", element_id));
- while (rtlhal->b_h2c_setinprogress) {
+ while (rtlhal->h2c_setinprogress) {
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
flag);
h2c_waitcounter++;
@@ -339,7 +337,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
}
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
} else {
- rtlhal->b_h2c_setinprogress = true;
+ rtlhal->h2c_setinprogress = true;
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
break;
}
@@ -495,7 +493,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
}
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
- rtlhal->b_h2c_setinprogress = false;
+ rtlhal->h2c_setinprogress = false;
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("go out\n"));
@@ -507,7 +505,7 @@ void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u32 tmp_cmdbuf[2];
- if (rtlhal->bfw_ready == false) {
+ if (rtlhal->fw_ready == false) {
RT_ASSERT(false, ("return H2C cmd because of Fw "
"download fail!!!\n"));
return;
@@ -519,6 +517,7 @@ void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
return;
}
+EXPORT_SYMBOL(rtl92c_fill_h2c_cmd);
void rtl92c_firmware_selfreset(struct ieee80211_hw *hw)
{
@@ -539,6 +538,7 @@ void rtl92c_firmware_selfreset(struct ieee80211_hw *hw)
u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
}
}
+EXPORT_SYMBOL(rtl92c_firmware_selfreset);
void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
{
@@ -559,39 +559,7 @@ void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
rtl92c_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode);
}
-
-static bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw,
- struct sk_buff *skb)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- struct rtl8192_tx_ring *ring;
- struct rtl_tx_desc *pdesc;
- u8 own;
- unsigned long flags;
- struct sk_buff *pskb = NULL;
-
- ring = &rtlpci->tx_ring[BEACON_QUEUE];
-
- pskb = __skb_dequeue(&ring->queue);
- if (pskb)
- kfree_skb(pskb);
-
- spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
-
- pdesc = &ring->desc[0];
- own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc, true, HW_DESC_OWN);
-
- rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb);
-
- __skb_queue_tail(&ring->queue, skb);
-
- spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
-
- rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE);
-
- return true;
-}
+EXPORT_SYMBOL(rtl92c_set_fw_pwrmode_cmd);
#define BEACON_PG 0 /*->1*/
#define PSPOLL_PG 2
@@ -776,7 +744,7 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
memcpy((u8 *) skb_put(skb, totalpacketlen),
&reserved_page_packet, totalpacketlen);
- rtstatus = _rtl92c_cmd_send_packet(hw, skb);
+ rtstatus = rtlpriv->cfg->ops->cmd_send_packet(hw, skb);
if (rtstatus)
b_dlok = true;
@@ -793,6 +761,7 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
("Set RSVD page location to Fw FAIL!!!!!!.\n"));
}
+EXPORT_SYMBOL(rtl92c_set_fw_rsvdpagepkt);
void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
{
@@ -802,3 +771,4 @@ void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
rtl92c_fill_h2c_cmd(hw, H2C_JOINBSSRPT, 1, u1_joinbssrpt_parm);
}
+EXPORT_SYMBOL(rtl92c_set_fw_joinbss_report_cmd);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/fw.h b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
index 3db33bd14666..3db33bd14666 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/fw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/main.c b/drivers/net/wireless/rtlwifi/rtl8192c/main.c
new file mode 100644
index 000000000000..2f624fc27499
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/main.c
@@ -0,0 +1,39 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+
+
+MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
+MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
+MODULE_AUTHOR("Georgia <georgia@realtek.com>");
+MODULE_AUTHOR("Ziv Huang <ziv_huang@realtek.com>");
+MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek 8192C/8188C 802.11n PCI wireless");
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
new file mode 100644
index 000000000000..a70228278398
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
@@ -0,0 +1,2042 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../rtl8192ce/reg.h"
+#include "../rtl8192ce/def.h"
+#include "dm_common.h"
+#include "phy_common.h"
+
+/* Define macro to shorten lines */
+#define MCS_TXPWR mcs_txpwrlevel_origoffset
+
+u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 returnvalue, originalvalue, bitshift;
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
+ "bitmask(%#x)\n", regaddr,
+ bitmask));
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+ bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+ returnvalue = (originalvalue & bitmask) >> bitshift;
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("BBR MASK=0x%x "
+ "Addr[0x%x]=0x%x\n", bitmask,
+ regaddr, originalvalue));
+
+ return returnvalue;
+
+}
+EXPORT_SYMBOL(rtl92c_phy_query_bb_reg);
+
+void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 originalvalue, bitshift;
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
+ " data(%#x)\n", regaddr, bitmask,
+ data));
+
+ if (bitmask != MASKDWORD) {
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+ bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+ data = ((originalvalue & (~bitmask)) | (data << bitshift));
+ }
+
+ rtl_write_dword(rtlpriv, regaddr, data);
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
+ " data(%#x)\n", regaddr, bitmask,
+ data));
+}
+EXPORT_SYMBOL(rtl92c_phy_set_bb_reg);
+
+u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset)
+{
+ RT_ASSERT(false, ("deprecated!\n"));
+ return 0;
+}
+EXPORT_SYMBOL(_rtl92c_phy_fw_rf_serial_read);
+
+void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset,
+ u32 data)
+{
+ RT_ASSERT(false, ("deprecated!\n"));
+}
+EXPORT_SYMBOL(_rtl92c_phy_fw_rf_serial_write);
+
+u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+ u32 newoffset;
+ u32 tmplong, tmplong2;
+ u8 rfpi_enable = 0;
+ u32 retvalue;
+
+ offset &= 0x3f;
+ newoffset = offset;
+ if (RT_CANNOT_IO(hw)) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("return all one\n"));
+ return 0xFFFFFFFF;
+ }
+ tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
+ if (rfpath == RF90_PATH_A)
+ tmplong2 = tmplong;
+ else
+ tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
+ tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
+ (newoffset << 23) | BLSSIREADEDGE;
+ rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
+ tmplong & (~BLSSIREADEDGE));
+ mdelay(1);
+ rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
+ mdelay(1);
+ rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
+ tmplong | BLSSIREADEDGE);
+ mdelay(1);
+ if (rfpath == RF90_PATH_A)
+ rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
+ BIT(8));
+ else if (rfpath == RF90_PATH_B)
+ rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
+ BIT(8));
+ if (rfpi_enable)
+ retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readbackpi,
+ BLSSIREADBACKDATA);
+ else
+ retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback,
+ BLSSIREADBACKDATA);
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFR-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rflssi_readback,
+ retvalue));
+ return retvalue;
+}
+EXPORT_SYMBOL(_rtl92c_phy_rf_serial_read);
+
+void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset,
+ u32 data)
+{
+ u32 data_and_addr;
+ u32 newoffset;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+
+ if (RT_CANNOT_IO(hw)) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("stop\n"));
+ return;
+ }
+ offset &= 0x3f;
+ newoffset = offset;
+ data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
+ rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFW-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rf3wire_offset,
+ data_and_addr));
+}
+EXPORT_SYMBOL(_rtl92c_phy_rf_serial_write);
+
+u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask)
+{
+ u32 i;
+
+ for (i = 0; i <= 31; i++) {
+ if (((bitmask >> i) & 0x1) == 1)
+ break;
+ }
+ return i;
+}
+EXPORT_SYMBOL(_rtl92c_phy_calculate_bit_shift);
+
+static void _rtl92c_phy_bb_config_1t(struct ieee80211_hw *hw)
+{
+ rtl_set_bbreg(hw, RFPGA0_TXINFO, 0x3, 0x2);
+ rtl_set_bbreg(hw, RFPGA1_TXINFO, 0x300033, 0x200022);
+ rtl_set_bbreg(hw, RCCK0_AFESETTING, MASKBYTE3, 0x45);
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x23);
+ rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, 0x30, 0x1);
+ rtl_set_bbreg(hw, 0xe74, 0x0c000000, 0x2);
+ rtl_set_bbreg(hw, 0xe78, 0x0c000000, 0x2);
+ rtl_set_bbreg(hw, 0xe7c, 0x0c000000, 0x2);
+ rtl_set_bbreg(hw, 0xe80, 0x0c000000, 0x2);
+ rtl_set_bbreg(hw, 0xe88, 0x0c000000, 0x2);
+}
+bool rtl92c_phy_rf_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ return rtlpriv->cfg->ops->phy_rf6052_config(hw);
+}
+EXPORT_SYMBOL(rtl92c_phy_rf_config);
+
+bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ bool rtstatus;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("==>\n"));
+ rtstatus = rtlpriv->cfg->ops->config_bb_with_headerfile(hw,
+ BASEBAND_CONFIG_PHY_REG);
+ if (rtstatus != true) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Write BB Reg Fail!!"));
+ return false;
+ }
+ if (rtlphy->rf_type == RF_1T2R) {
+ _rtl92c_phy_bb_config_1t(hw);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Config to 1T!!\n"));
+ }
+ if (rtlefuse->autoload_failflag == false) {
+ rtlphy->pwrgroup_cnt = 0;
+ rtstatus = rtlpriv->cfg->ops->config_bb_with_pgheaderfile(hw,
+ BASEBAND_CONFIG_PHY_REG);
+ }
+ if (rtstatus != true) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("BB_PG Reg Fail!!"));
+ return false;
+ }
+ rtstatus = rtlpriv->cfg->ops->config_bb_with_headerfile(hw,
+ BASEBAND_CONFIG_AGC_TAB);
+ if (rtstatus != true) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("AGC Table Fail\n"));
+ return false;
+ }
+ rtlphy->cck_high_power = (bool) (rtl_get_bbreg(hw,
+ RFPGA0_XA_HSSIPARAMETER2,
+ 0x200));
+ return true;
+}
+EXPORT_SYMBOL(_rtl92c_phy_bb8192c_config_parafile);
+
+void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask,
+ u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ if (regaddr == RTXAGC_A_RATE18_06) {
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][0] = data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][0]));
+ }
+ if (regaddr == RTXAGC_A_RATE54_24) {
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][1] = data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][1] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][1]));
+ }
+ if (regaddr == RTXAGC_A_CCK1_MCS32) {
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][6] = data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][6] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][6]));
+ }
+ if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00) {
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][7] = data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][7]));
+ }
+ if (regaddr == RTXAGC_A_MCS03_MCS00) {
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][2] = data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][2] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][2]));
+ }
+ if (regaddr == RTXAGC_A_MCS07_MCS04) {
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][3] = data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][3] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][3]));
+ }
+ if (regaddr == RTXAGC_A_MCS11_MCS08) {
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][4] = data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][4] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][4]));
+ }
+ if (regaddr == RTXAGC_A_MCS15_MCS12) {
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][5] = data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][5] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][5]));
+ }
+ if (regaddr == RTXAGC_B_RATE18_06) {
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][8] = data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][8] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][8]));
+ }
+ if (regaddr == RTXAGC_B_RATE54_24) {
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][9] = data;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][9]));
+ }
+
+ if (regaddr == RTXAGC_B_CCK1_55_MCS32) {
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][14] = data;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][14]));
+ }
+
+ if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff) {
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][15] = data;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][15]));
+ }
+
+ if (regaddr == RTXAGC_B_MCS03_MCS00) {
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][10] = data;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][10]));
+ }
+
+ if (regaddr == RTXAGC_B_MCS07_MCS04) {
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][11] = data;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][11]));
+ }
+
+ if (regaddr == RTXAGC_B_MCS11_MCS08) {
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][12] = data;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][12]));
+ }
+
+ if (regaddr == RTXAGC_B_MCS15_MCS12) {
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][13] = data;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][13]));
+
+ rtlphy->pwrgroup_cnt++;
+ }
+}
+EXPORT_SYMBOL(_rtl92c_store_pwrIndex_diffrate_offset);
+
+void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ rtlphy->default_initialgain[0] =
+ (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0);
+ rtlphy->default_initialgain[1] =
+ (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0);
+ rtlphy->default_initialgain[2] =
+ (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0);
+ rtlphy->default_initialgain[3] =
+ (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Default initial gain (c50=0x%x, "
+ "c58=0x%x, c60=0x%x, c68=0x%x\n",
+ rtlphy->default_initialgain[0],
+ rtlphy->default_initialgain[1],
+ rtlphy->default_initialgain[2],
+ rtlphy->default_initialgain[3]));
+
+ rtlphy->framesync = (u8) rtl_get_bbreg(hw,
+ ROFDM0_RXDETECTOR3, MASKBYTE0);
+ rtlphy->framesync_c34 = rtl_get_bbreg(hw,
+ ROFDM0_RXDETECTOR2, MASKDWORD);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Default framesync (0x%x) = 0x%x\n",
+ ROFDM0_RXDETECTOR3, rtlphy->framesync));
+}
+
+void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW;
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW;
+ rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW;
+ rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB;
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB;
+ rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB;
+ rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE;
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE;
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset =
+ RFPGA0_XA_LSSIPARAMETER;
+ rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset =
+ RFPGA0_XB_LSSIPARAMETER;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = rFPGA0_XAB_RFPARAMETER;
+ rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = rFPGA0_XAB_RFPARAMETER;
+ rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = rFPGA0_XCD_RFPARAMETER;
+ rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = rFPGA0_XCD_RFPARAMETER;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+ rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+ rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+ rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1;
+ rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2;
+ rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfswitch_control =
+ RFPGA0_XAB_SWITCHCONTROL;
+ rtlphy->phyreg_def[RF90_PATH_B].rfswitch_control =
+ RFPGA0_XAB_SWITCHCONTROL;
+ rtlphy->phyreg_def[RF90_PATH_C].rfswitch_control =
+ RFPGA0_XCD_SWITCHCONTROL;
+ rtlphy->phyreg_def[RF90_PATH_D].rfswitch_control =
+ RFPGA0_XCD_SWITCHCONTROL;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
+ rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1;
+ rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1;
+ rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2;
+ rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2;
+ rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2;
+ rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbalance =
+ ROFDM0_XARXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbalance =
+ ROFDM0_XBRXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbalance =
+ ROFDM0_XCRXIQIMBANLANCE;
+ rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbalance =
+ ROFDM0_XDRXIQIMBALANCE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
+ rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE;
+ rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE;
+ rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbalance =
+ ROFDM0_XATXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbalance =
+ ROFDM0_XBTXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbalance =
+ ROFDM0_XCTXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbalance =
+ ROFDM0_XDTXIQIMBALANCE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
+ rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE;
+ rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE;
+ rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rflssi_readback =
+ RFPGA0_XA_LSSIREADBACK;
+ rtlphy->phyreg_def[RF90_PATH_B].rflssi_readback =
+ RFPGA0_XB_LSSIREADBACK;
+ rtlphy->phyreg_def[RF90_PATH_C].rflssi_readback =
+ RFPGA0_XC_LSSIREADBACK;
+ rtlphy->phyreg_def[RF90_PATH_D].rflssi_readback =
+ RFPGA0_XD_LSSIREADBACK;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rflssi_readbackpi =
+ TRANSCEIVEA_HSPI_READBACK;
+ rtlphy->phyreg_def[RF90_PATH_B].rflssi_readbackpi =
+ TRANSCEIVEB_HSPI_READBACK;
+
+}
+EXPORT_SYMBOL(_rtl92c_phy_init_bb_rf_register_definition);
+
+void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 txpwr_level;
+ long txpwr_dbm;
+
+ txpwr_level = rtlphy->cur_cck_txpwridx;
+ txpwr_dbm = _rtl92c_phy_txpwr_idx_to_dbm(hw,
+ WIRELESS_MODE_B, txpwr_level);
+ txpwr_level = rtlphy->cur_ofdm24g_txpwridx +
+ rtlefuse->legacy_ht_txpowerdiff;
+ if (_rtl92c_phy_txpwr_idx_to_dbm(hw,
+ WIRELESS_MODE_G,
+ txpwr_level) > txpwr_dbm)
+ txpwr_dbm =
+ _rtl92c_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G,
+ txpwr_level);
+ txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
+ if (_rtl92c_phy_txpwr_idx_to_dbm(hw,
+ WIRELESS_MODE_N_24G,
+ txpwr_level) > txpwr_dbm)
+ txpwr_dbm =
+ _rtl92c_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
+ txpwr_level);
+ *powerlevel = txpwr_dbm;
+}
+
+static void _rtl92c_get_txpower_index(struct ieee80211_hw *hw, u8 channel,
+ u8 *cckpowerlevel, u8 *ofdmpowerlevel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 index = (channel - 1);
+
+ cckpowerlevel[RF90_PATH_A] =
+ rtlefuse->txpwrlevel_cck[RF90_PATH_A][index];
+ cckpowerlevel[RF90_PATH_B] =
+ rtlefuse->txpwrlevel_cck[RF90_PATH_B][index];
+ if (get_rf_type(rtlphy) == RF_1T2R || get_rf_type(rtlphy) == RF_1T1R) {
+ ofdmpowerlevel[RF90_PATH_A] =
+ rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_A][index];
+ ofdmpowerlevel[RF90_PATH_B] =
+ rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_B][index];
+ } else if (get_rf_type(rtlphy) == RF_2T2R) {
+ ofdmpowerlevel[RF90_PATH_A] =
+ rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_A][index];
+ ofdmpowerlevel[RF90_PATH_B] =
+ rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_B][index];
+ }
+}
+
+static void _rtl92c_ccxpower_index_check(struct ieee80211_hw *hw,
+ u8 channel, u8 *cckpowerlevel,
+ u8 *ofdmpowerlevel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ rtlphy->cur_cck_txpwridx = cckpowerlevel[0];
+ rtlphy->cur_ofdm24g_txpwridx = ofdmpowerlevel[0];
+}
+
+void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv);
+ u8 cckpowerlevel[2], ofdmpowerlevel[2];
+
+ if (rtlefuse->txpwr_fromeprom == false)
+ return;
+ _rtl92c_get_txpower_index(hw, channel,
+ &cckpowerlevel[0], &ofdmpowerlevel[0]);
+ _rtl92c_ccxpower_index_check(hw,
+ channel, &cckpowerlevel[0],
+ &ofdmpowerlevel[0]);
+ rtlpriv->cfg->ops->phy_rf6052_set_cck_txpower(hw, &cckpowerlevel[0]);
+ rtlpriv->cfg->ops->phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0],
+ channel);
+}
+EXPORT_SYMBOL(rtl92c_phy_set_txpower_level);
+
+bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw, long power_indbm)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 idx;
+ u8 rf_path;
+
+ u8 ccktxpwridx = _rtl92c_phy_dbm_to_txpwr_Idx(hw,
+ WIRELESS_MODE_B,
+ power_indbm);
+ u8 ofdmtxpwridx = _rtl92c_phy_dbm_to_txpwr_Idx(hw,
+ WIRELESS_MODE_N_24G,
+ power_indbm);
+ if (ofdmtxpwridx - rtlefuse->legacy_ht_txpowerdiff > 0)
+ ofdmtxpwridx -= rtlefuse->legacy_ht_txpowerdiff;
+ else
+ ofdmtxpwridx = 0;
+ RT_TRACE(rtlpriv, COMP_TXAGC, DBG_TRACE,
+ ("%lx dBm, ccktxpwridx = %d, ofdmtxpwridx = %d\n",
+ power_indbm, ccktxpwridx, ofdmtxpwridx));
+ for (idx = 0; idx < 14; idx++) {
+ for (rf_path = 0; rf_path < 2; rf_path++) {
+ rtlefuse->txpwrlevel_cck[rf_path][idx] = ccktxpwridx;
+ rtlefuse->txpwrlevel_ht40_1s[rf_path][idx] =
+ ofdmtxpwridx;
+ rtlefuse->txpwrlevel_ht40_2s[rf_path][idx] =
+ ofdmtxpwridx;
+ }
+ }
+ rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
+ return true;
+}
+EXPORT_SYMBOL(rtl92c_phy_update_txpower_dbm);
+
+void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw, u16 beaconinterval)
+{
+}
+EXPORT_SYMBOL(rtl92c_phy_set_beacon_hw_reg);
+
+u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
+ enum wireless_mode wirelessmode,
+ long power_indbm)
+{
+ u8 txpwridx;
+ long offset;
+
+ switch (wirelessmode) {
+ case WIRELESS_MODE_B:
+ offset = -7;
+ break;
+ case WIRELESS_MODE_G:
+ case WIRELESS_MODE_N_24G:
+ offset = -8;
+ break;
+ default:
+ offset = -8;
+ break;
+ }
+
+ if ((power_indbm - offset) > 0)
+ txpwridx = (u8) ((power_indbm - offset) * 2);
+ else
+ txpwridx = 0;
+
+ if (txpwridx > MAX_TXPWR_IDX_NMODE_92S)
+ txpwridx = MAX_TXPWR_IDX_NMODE_92S;
+
+ return txpwridx;
+}
+EXPORT_SYMBOL(_rtl92c_phy_dbm_to_txpwr_Idx);
+
+long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
+ enum wireless_mode wirelessmode,
+ u8 txpwridx)
+{
+ long offset;
+ long pwrout_dbm;
+
+ switch (wirelessmode) {
+ case WIRELESS_MODE_B:
+ offset = -7;
+ break;
+ case WIRELESS_MODE_G:
+ case WIRELESS_MODE_N_24G:
+ offset = -8;
+ break;
+ default:
+ offset = -8;
+ break;
+ }
+ pwrout_dbm = txpwridx / 2 + offset;
+ return pwrout_dbm;
+}
+EXPORT_SYMBOL(_rtl92c_phy_txpwr_idx_to_dbm);
+
+void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ enum io_type iotype;
+
+ if (!is_hal_stop(rtlhal)) {
+ switch (operation) {
+ case SCAN_OPT_BACKUP:
+ iotype = IO_CMD_PAUSE_DM_BY_SCAN;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_IO_CMD,
+ (u8 *)&iotype);
+
+ break;
+ case SCAN_OPT_RESTORE:
+ iotype = IO_CMD_RESUME_DM_BY_SCAN;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_IO_CMD,
+ (u8 *)&iotype);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Unknown Scan Backup operation.\n"));
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL(rtl92c_phy_scan_operation_backup);
+
+void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
+ enum nl80211_channel_type ch_type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 tmp_bw = rtlphy->current_chan_bw;
+
+ if (rtlphy->set_bwmode_inprogress)
+ return;
+ rtlphy->set_bwmode_inprogress = true;
+ if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw)))
+ rtlpriv->cfg->ops->phy_set_bw_mode_callback(hw);
+ else {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("FALSE driver sleep or unload\n"));
+ rtlphy->set_bwmode_inprogress = false;
+ rtlphy->current_chan_bw = tmp_bw;
+ }
+}
+EXPORT_SYMBOL(rtl92c_phy_set_bw_mode);
+
+void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u32 delay;
+
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
+ ("switch to channel%d\n", rtlphy->current_channel));
+ if (is_hal_stop(rtlhal))
+ return;
+ do {
+ if (!rtlphy->sw_chnl_inprogress)
+ break;
+ if (!_rtl92c_phy_sw_chnl_step_by_step
+ (hw, rtlphy->current_channel, &rtlphy->sw_chnl_stage,
+ &rtlphy->sw_chnl_step, &delay)) {
+ if (delay > 0)
+ mdelay(delay);
+ else
+ continue;
+ } else
+ rtlphy->sw_chnl_inprogress = false;
+ break;
+ } while (true);
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
+}
+EXPORT_SYMBOL(rtl92c_phy_sw_chnl_callback);
+
+u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ if (rtlphy->sw_chnl_inprogress)
+ return 0;
+ if (rtlphy->set_bwmode_inprogress)
+ return 0;
+ RT_ASSERT((rtlphy->current_channel <= 14),
+ ("WIRELESS_MODE_G but channel>14"));
+ rtlphy->sw_chnl_inprogress = true;
+ rtlphy->sw_chnl_stage = 0;
+ rtlphy->sw_chnl_step = 0;
+ if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
+ rtl92c_phy_sw_chnl_callback(hw);
+ RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
+ ("sw_chnl_inprogress false schdule workitem\n"));
+ rtlphy->sw_chnl_inprogress = false;
+ } else {
+ RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
+ ("sw_chnl_inprogress false driver sleep or"
+ " unload\n"));
+ rtlphy->sw_chnl_inprogress = false;
+ }
+ return 1;
+}
+EXPORT_SYMBOL(rtl92c_phy_sw_chnl);
+
+static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
+ u8 channel, u8 *stage, u8 *step,
+ u32 *delay)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct swchnlcmd precommoncmd[MAX_PRECMD_CNT];
+ u32 precommoncmdcnt;
+ struct swchnlcmd postcommoncmd[MAX_POSTCMD_CNT];
+ u32 postcommoncmdcnt;
+ struct swchnlcmd rfdependcmd[MAX_RFDEPENDCMD_CNT];
+ u32 rfdependcmdcnt;
+ struct swchnlcmd *currentcmd = NULL;
+ u8 rfpath;
+ u8 num_total_rfpath = rtlphy->num_total_rfpath;
+
+ precommoncmdcnt = 0;
+ _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
+ MAX_PRECMD_CNT,
+ CMDID_SET_TXPOWEROWER_LEVEL, 0, 0, 0);
+ _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
+ MAX_PRECMD_CNT, CMDID_END, 0, 0, 0);
+
+ postcommoncmdcnt = 0;
+
+ _rtl92c_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++,
+ MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0);
+
+ rfdependcmdcnt = 0;
+
+ RT_ASSERT((channel >= 1 && channel <= 14),
+ ("illegal channel for Zebra: %d\n", channel));
+
+ _rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
+ MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
+ RF_CHNLBW, channel, 10);
+
+ _rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
+ MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0,
+ 0);
+
+ do {
+ switch (*stage) {
+ case 0:
+ currentcmd = &precommoncmd[*step];
+ break;
+ case 1:
+ currentcmd = &rfdependcmd[*step];
+ break;
+ case 2:
+ currentcmd = &postcommoncmd[*step];
+ break;
+ }
+
+ if (currentcmd->cmdid == CMDID_END) {
+ if ((*stage) == 2) {
+ return true;
+ } else {
+ (*stage)++;
+ (*step) = 0;
+ continue;
+ }
+ }
+
+ switch (currentcmd->cmdid) {
+ case CMDID_SET_TXPOWEROWER_LEVEL:
+ rtl92c_phy_set_txpower_level(hw, channel);
+ break;
+ case CMDID_WRITEPORT_ULONG:
+ rtl_write_dword(rtlpriv, currentcmd->para1,
+ currentcmd->para2);
+ break;
+ case CMDID_WRITEPORT_USHORT:
+ rtl_write_word(rtlpriv, currentcmd->para1,
+ (u16) currentcmd->para2);
+ break;
+ case CMDID_WRITEPORT_UCHAR:
+ rtl_write_byte(rtlpriv, currentcmd->para1,
+ (u8) currentcmd->para2);
+ break;
+ case CMDID_RF_WRITEREG:
+ for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) {
+ rtlphy->rfreg_chnlval[rfpath] =
+ ((rtlphy->rfreg_chnlval[rfpath] &
+ 0xfffffc00) | currentcmd->para2);
+
+ rtl_set_rfreg(hw, (enum radio_path)rfpath,
+ currentcmd->para1,
+ RFREG_OFFSET_MASK,
+ rtlphy->rfreg_chnlval[rfpath]);
+ }
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+
+ break;
+ } while (true);
+
+ (*delay) = currentcmd->msdelay;
+ (*step)++;
+ return false;
+}
+
+static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
+ u32 cmdtableidx, u32 cmdtablesz,
+ enum swchnlcmd_id cmdid,
+ u32 para1, u32 para2, u32 msdelay)
+{
+ struct swchnlcmd *pcmd;
+
+ if (cmdtable == NULL) {
+ RT_ASSERT(false, ("cmdtable cannot be NULL.\n"));
+ return false;
+ }
+
+ if (cmdtableidx >= cmdtablesz)
+ return false;
+
+ pcmd = cmdtable + cmdtableidx;
+ pcmd->cmdid = cmdid;
+ pcmd->para1 = para1;
+ pcmd->para2 = para2;
+ pcmd->msdelay = msdelay;
+ return true;
+}
+
+bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, u32 rfpath)
+{
+ return true;
+}
+EXPORT_SYMBOL(rtl8192_phy_check_is_legal_rfpath);
+
+static u8 _rtl92c_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb)
+{
+ u32 reg_eac, reg_e94, reg_e9c, reg_ea4;
+ u8 result = 0x00;
+
+ rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c1f);
+ rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x10008c1f);
+ rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x82140102);
+ rtl_set_bbreg(hw, 0xe3c, MASKDWORD,
+ config_pathb ? 0x28160202 : 0x28160502);
+
+ if (config_pathb) {
+ rtl_set_bbreg(hw, 0xe50, MASKDWORD, 0x10008c22);
+ rtl_set_bbreg(hw, 0xe54, MASKDWORD, 0x10008c22);
+ rtl_set_bbreg(hw, 0xe58, MASKDWORD, 0x82140102);
+ rtl_set_bbreg(hw, 0xe5c, MASKDWORD, 0x28160202);
+ }
+
+ rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x001028d1);
+ rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf9000000);
+ rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000);
+
+ mdelay(IQK_DELAY_TIME);
+
+ reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
+ reg_e94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD);
+ reg_e9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD);
+ reg_ea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD);
+
+ if (!(reg_eac & BIT(28)) &&
+ (((reg_e94 & 0x03FF0000) >> 16) != 0x142) &&
+ (((reg_e9c & 0x03FF0000) >> 16) != 0x42))
+ result |= 0x01;
+ else
+ return result;
+
+ if (!(reg_eac & BIT(27)) &&
+ (((reg_ea4 & 0x03FF0000) >> 16) != 0x132) &&
+ (((reg_eac & 0x03FF0000) >> 16) != 0x36))
+ result |= 0x02;
+ return result;
+}
+
+static u8 _rtl92c_phy_path_b_iqk(struct ieee80211_hw *hw)
+{
+ u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc;
+ u8 result = 0x00;
+
+ rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000002);
+ rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000000);
+ mdelay(IQK_DELAY_TIME);
+ reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
+ reg_eb4 = rtl_get_bbreg(hw, 0xeb4, MASKDWORD);
+ reg_ebc = rtl_get_bbreg(hw, 0xebc, MASKDWORD);
+ reg_ec4 = rtl_get_bbreg(hw, 0xec4, MASKDWORD);
+ reg_ecc = rtl_get_bbreg(hw, 0xecc, MASKDWORD);
+ if (!(reg_eac & BIT(31)) &&
+ (((reg_eb4 & 0x03FF0000) >> 16) != 0x142) &&
+ (((reg_ebc & 0x03FF0000) >> 16) != 0x42))
+ result |= 0x01;
+ else
+ return result;
+
+ if (!(reg_eac & BIT(30)) &&
+ (((reg_ec4 & 0x03FF0000) >> 16) != 0x132) &&
+ (((reg_ecc & 0x03FF0000) >> 16) != 0x36))
+ result |= 0x02;
+ return result;
+}
+
+static void _rtl92c_phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw,
+ bool iqk_ok, long result[][8],
+ u8 final_candidate, bool btxonly)
+{
+ u32 oldval_0, x, tx0_a, reg;
+ long y, tx0_c;
+
+ if (final_candidate == 0xFF)
+ return;
+ else if (iqk_ok) {
+ oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
+ MASKDWORD) >> 22) & 0x3FF;
+ x = result[final_candidate][0];
+ if ((x & 0x00000200) != 0)
+ x = x | 0xFFFFFC00;
+ tx0_a = (x * oldval_0) >> 8;
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(31),
+ ((x * oldval_0 >> 7) & 0x1));
+ y = result[final_candidate][1];
+ if ((y & 0x00000200) != 0)
+ y = y | 0xFFFFFC00;
+ tx0_c = (y * oldval_0) >> 8;
+ rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000,
+ ((tx0_c & 0x3C0) >> 6));
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000,
+ (tx0_c & 0x3F));
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(29),
+ ((y * oldval_0 >> 7) & 0x1));
+ if (btxonly)
+ return;
+ reg = result[final_candidate][2];
+ rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg);
+ reg = result[final_candidate][3] & 0x3F;
+ rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg);
+ reg = (result[final_candidate][3] >> 6) & 0xF;
+ rtl_set_bbreg(hw, 0xca0, 0xF0000000, reg);
+ }
+}
+
+static void _rtl92c_phy_path_b_fill_iqk_matrix(struct ieee80211_hw *hw,
+ bool iqk_ok, long result[][8],
+ u8 final_candidate, bool btxonly)
+{
+ u32 oldval_1, x, tx1_a, reg;
+ long y, tx1_c;
+
+ if (final_candidate == 0xFF)
+ return;
+ else if (iqk_ok) {
+ oldval_1 = (rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
+ MASKDWORD) >> 22) & 0x3FF;
+ x = result[final_candidate][4];
+ if ((x & 0x00000200) != 0)
+ x = x | 0xFFFFFC00;
+ tx1_a = (x * oldval_1) >> 8;
+ rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x3FF, tx1_a);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(27),
+ ((x * oldval_1 >> 7) & 0x1));
+ y = result[final_candidate][5];
+ if ((y & 0x00000200) != 0)
+ y = y | 0xFFFFFC00;
+ tx1_c = (y * oldval_1) >> 8;
+ rtl_set_bbreg(hw, ROFDM0_XDTXAFE, 0xF0000000,
+ ((tx1_c & 0x3C0) >> 6));
+ rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x003F0000,
+ (tx1_c & 0x3F));
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(25),
+ ((y * oldval_1 >> 7) & 0x1));
+ if (btxonly)
+ return;
+ reg = result[final_candidate][6];
+ rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0x3FF, reg);
+ reg = result[final_candidate][7] & 0x3F;
+ rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0xFC00, reg);
+ reg = (result[final_candidate][7] >> 6) & 0xF;
+ rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, 0x0000F000, reg);
+ }
+}
+
+static void _rtl92c_phy_save_adda_registers(struct ieee80211_hw *hw,
+ u32 *addareg, u32 *addabackup,
+ u32 registernum)
+{
+ u32 i;
+
+ for (i = 0; i < registernum; i++)
+ addabackup[i] = rtl_get_bbreg(hw, addareg[i], MASKDWORD);
+}
+
+static void _rtl92c_phy_save_mac_registers(struct ieee80211_hw *hw,
+ u32 *macreg, u32 *macbackup)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
+ macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]);
+ macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]);
+}
+
+static void _rtl92c_phy_reload_adda_registers(struct ieee80211_hw *hw,
+ u32 *addareg, u32 *addabackup,
+ u32 regiesternum)
+{
+ u32 i;
+
+ for (i = 0; i < regiesternum; i++)
+ rtl_set_bbreg(hw, addareg[i], MASKDWORD, addabackup[i]);
+}
+
+static void _rtl92c_phy_reload_mac_registers(struct ieee80211_hw *hw,
+ u32 *macreg, u32 *macbackup)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
+ rtl_write_byte(rtlpriv, macreg[i], (u8) macbackup[i]);
+ rtl_write_dword(rtlpriv, macreg[i], macbackup[i]);
+}
+
+static void _rtl92c_phy_path_adda_on(struct ieee80211_hw *hw,
+ u32 *addareg, bool is_patha_on, bool is2t)
+{
+ u32 pathOn;
+ u32 i;
+
+ pathOn = is_patha_on ? 0x04db25a4 : 0x0b1b25a4;
+ if (false == is2t) {
+ pathOn = 0x0bdb25a0;
+ rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0);
+ } else {
+ rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathOn);
+ }
+
+ for (i = 1; i < IQK_ADDA_REG_NUM; i++)
+ rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathOn);
+}
+
+static void _rtl92c_phy_mac_setting_calibration(struct ieee80211_hw *hw,
+ u32 *macreg, u32 *macbackup)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ rtl_write_byte(rtlpriv, macreg[0], 0x3F);
+
+ for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
+ rtl_write_byte(rtlpriv, macreg[i],
+ (u8) (macbackup[i] & (~BIT(3))));
+ rtl_write_byte(rtlpriv, macreg[i], (u8) (macbackup[i] & (~BIT(5))));
+}
+
+static void _rtl92c_phy_path_a_standby(struct ieee80211_hw *hw)
+{
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x0);
+ rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
+}
+
+static void _rtl92c_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode)
+{
+ u32 mode;
+
+ mode = pi_mode ? 0x01000100 : 0x01000000;
+ rtl_set_bbreg(hw, 0x820, MASKDWORD, mode);
+ rtl_set_bbreg(hw, 0x828, MASKDWORD, mode);
+}
+
+static bool _rtl92c_phy_simularity_compare(struct ieee80211_hw *hw,
+ long result[][8], u8 c1, u8 c2)
+{
+ u32 i, j, diff, simularity_bitmap, bound;
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ u8 final_candidate[2] = { 0xFF, 0xFF };
+ bool bresult = true, is2t = IS_92C_SERIAL(rtlhal->version);
+
+ if (is2t)
+ bound = 8;
+ else
+ bound = 4;
+
+ simularity_bitmap = 0;
+
+ for (i = 0; i < bound; i++) {
+ diff = (result[c1][i] > result[c2][i]) ?
+ (result[c1][i] - result[c2][i]) :
+ (result[c2][i] - result[c1][i]);
+
+ if (diff > MAX_TOLERANCE) {
+ if ((i == 2 || i == 6) && !simularity_bitmap) {
+ if (result[c1][i] + result[c1][i + 1] == 0)
+ final_candidate[(i / 4)] = c2;
+ else if (result[c2][i] + result[c2][i + 1] == 0)
+ final_candidate[(i / 4)] = c1;
+ else
+ simularity_bitmap = simularity_bitmap |
+ (1 << i);
+ } else
+ simularity_bitmap =
+ simularity_bitmap | (1 << i);
+ }
+ }
+
+ if (simularity_bitmap == 0) {
+ for (i = 0; i < (bound / 4); i++) {
+ if (final_candidate[i] != 0xFF) {
+ for (j = i * 4; j < (i + 1) * 4 - 2; j++)
+ result[3][j] =
+ result[final_candidate[i]][j];
+ bresult = false;
+ }
+ }
+ return bresult;
+ } else if (!(simularity_bitmap & 0x0F)) {
+ for (i = 0; i < 4; i++)
+ result[3][i] = result[c1][i];
+ return false;
+ } else if (!(simularity_bitmap & 0xF0) && is2t) {
+ for (i = 4; i < 8; i++)
+ result[3][i] = result[c1][i];
+ return false;
+ } else {
+ return false;
+ }
+
+}
+
+static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
+ long result[][8], u8 t, bool is2t)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u32 i;
+ u8 patha_ok, pathb_ok;
+ u32 adda_reg[IQK_ADDA_REG_NUM] = {
+ 0x85c, 0xe6c, 0xe70, 0xe74,
+ 0xe78, 0xe7c, 0xe80, 0xe84,
+ 0xe88, 0xe8c, 0xed0, 0xed4,
+ 0xed8, 0xedc, 0xee0, 0xeec
+ };
+
+ u32 iqk_mac_reg[IQK_MAC_REG_NUM] = {
+ 0x522, 0x550, 0x551, 0x040
+ };
+
+ const u32 retrycount = 2;
+
+ u32 bbvalue;
+
+ if (t == 0) {
+ bbvalue = rtl_get_bbreg(hw, 0x800, MASKDWORD);
+
+ _rtl92c_phy_save_adda_registers(hw, adda_reg,
+ rtlphy->adda_backup, 16);
+ _rtl92c_phy_save_mac_registers(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
+ }
+ _rtl92c_phy_path_adda_on(hw, adda_reg, true, is2t);
+ if (t == 0) {
+ rtlphy->rfpi_enable = (u8) rtl_get_bbreg(hw,
+ RFPGA0_XA_HSSIPARAMETER1,
+ BIT(8));
+ }
+ if (!rtlphy->rfpi_enable)
+ _rtl92c_phy_pi_mode_switch(hw, true);
+ if (t == 0) {
+ rtlphy->reg_c04 = rtl_get_bbreg(hw, 0xc04, MASKDWORD);
+ rtlphy->reg_c08 = rtl_get_bbreg(hw, 0xc08, MASKDWORD);
+ rtlphy->reg_874 = rtl_get_bbreg(hw, 0x874, MASKDWORD);
+ }
+ rtl_set_bbreg(hw, 0xc04, MASKDWORD, 0x03a05600);
+ rtl_set_bbreg(hw, 0xc08, MASKDWORD, 0x000800e4);
+ rtl_set_bbreg(hw, 0x874, MASKDWORD, 0x22204000);
+ if (is2t) {
+ rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
+ rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00010000);
+ }
+ _rtl92c_phy_mac_setting_calibration(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
+ rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x00080000);
+ if (is2t)
+ rtl_set_bbreg(hw, 0xb6c, MASKDWORD, 0x00080000);
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
+ rtl_set_bbreg(hw, 0xe40, MASKDWORD, 0x01007c00);
+ rtl_set_bbreg(hw, 0xe44, MASKDWORD, 0x01004800);
+ for (i = 0; i < retrycount; i++) {
+ patha_ok = _rtl92c_phy_path_a_iqk(hw, is2t);
+ if (patha_ok == 0x03) {
+ result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
+ 0x3FF0000) >> 16;
+ result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
+ 0x3FF0000) >> 16;
+ result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) &
+ 0x3FF0000) >> 16;
+ result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) &
+ 0x3FF0000) >> 16;
+ break;
+ } else if (i == (retrycount - 1) && patha_ok == 0x01)
+ result[t][0] = (rtl_get_bbreg(hw, 0xe94,
+ MASKDWORD) & 0x3FF0000) >>
+ 16;
+ result[t][1] =
+ (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) & 0x3FF0000) >> 16;
+
+ }
+
+ if (is2t) {
+ _rtl92c_phy_path_a_standby(hw);
+ _rtl92c_phy_path_adda_on(hw, adda_reg, false, is2t);
+ for (i = 0; i < retrycount; i++) {
+ pathb_ok = _rtl92c_phy_path_b_iqk(hw);
+ if (pathb_ok == 0x03) {
+ result[t][4] = (rtl_get_bbreg(hw,
+ 0xeb4,
+ MASKDWORD) &
+ 0x3FF0000) >> 16;
+ result[t][5] =
+ (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
+ 0x3FF0000) >> 16;
+ result[t][6] =
+ (rtl_get_bbreg(hw, 0xec4, MASKDWORD) &
+ 0x3FF0000) >> 16;
+ result[t][7] =
+ (rtl_get_bbreg(hw, 0xecc, MASKDWORD) &
+ 0x3FF0000) >> 16;
+ break;
+ } else if (i == (retrycount - 1) && pathb_ok == 0x01) {
+ result[t][4] = (rtl_get_bbreg(hw,
+ 0xeb4,
+ MASKDWORD) &
+ 0x3FF0000) >> 16;
+ }
+ result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
+ 0x3FF0000) >> 16;
+ }
+ }
+ rtl_set_bbreg(hw, 0xc04, MASKDWORD, rtlphy->reg_c04);
+ rtl_set_bbreg(hw, 0x874, MASKDWORD, rtlphy->reg_874);
+ rtl_set_bbreg(hw, 0xc08, MASKDWORD, rtlphy->reg_c08);
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0);
+ rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00032ed3);
+ if (is2t)
+ rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00032ed3);
+ if (t != 0) {
+ if (!rtlphy->rfpi_enable)
+ _rtl92c_phy_pi_mode_switch(hw, false);
+ _rtl92c_phy_reload_adda_registers(hw, adda_reg,
+ rtlphy->adda_backup, 16);
+ _rtl92c_phy_reload_mac_registers(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
+ }
+}
+
+static void _rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw,
+ char delta, bool is2t)
+{
+ /* This routine is deliberately dummied out for later fixes */
+#if 0
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+
+ u32 reg_d[PATH_NUM];
+ u32 tmpreg, index, offset, path, i, pathbound = PATH_NUM, apkbound;
+
+ u32 bb_backup[APK_BB_REG_NUM];
+ u32 bb_reg[APK_BB_REG_NUM] = {
+ 0x904, 0xc04, 0x800, 0xc08, 0x874
+ };
+ u32 bb_ap_mode[APK_BB_REG_NUM] = {
+ 0x00000020, 0x00a05430, 0x02040000,
+ 0x000800e4, 0x00204000
+ };
+ u32 bb_normal_ap_mode[APK_BB_REG_NUM] = {
+ 0x00000020, 0x00a05430, 0x02040000,
+ 0x000800e4, 0x22204000
+ };
+
+ u32 afe_backup[APK_AFE_REG_NUM];
+ u32 afe_reg[APK_AFE_REG_NUM] = {
+ 0x85c, 0xe6c, 0xe70, 0xe74, 0xe78,
+ 0xe7c, 0xe80, 0xe84, 0xe88, 0xe8c,
+ 0xed0, 0xed4, 0xed8, 0xedc, 0xee0,
+ 0xeec
+ };
+
+ u32 mac_backup[IQK_MAC_REG_NUM];
+ u32 mac_reg[IQK_MAC_REG_NUM] = {
+ 0x522, 0x550, 0x551, 0x040
+ };
+
+ u32 apk_rf_init_value[PATH_NUM][APK_BB_REG_NUM] = {
+ {0x0852c, 0x1852c, 0x5852c, 0x1852c, 0x5852c},
+ {0x2852e, 0x0852e, 0x3852e, 0x0852e, 0x0852e}
+ };
+
+ u32 apk_normal_rf_init_value[PATH_NUM][APK_BB_REG_NUM] = {
+ {0x0852c, 0x0a52c, 0x3a52c, 0x5a52c, 0x5a52c},
+ {0x0852c, 0x0a52c, 0x5a52c, 0x5a52c, 0x5a52c}
+ };
+
+ u32 apk_rf_value_0[PATH_NUM][APK_BB_REG_NUM] = {
+ {0x52019, 0x52014, 0x52013, 0x5200f, 0x5208d},
+ {0x5201a, 0x52019, 0x52016, 0x52033, 0x52050}
+ };
+
+ u32 apk_normal_rf_value_0[PATH_NUM][APK_BB_REG_NUM] = {
+ {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a},
+ {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a}
+ };
+
+ u32 afe_on_off[PATH_NUM] = {
+ 0x04db25a4, 0x0b1b25a4
+ };
+
+ u32 apk_offset[PATH_NUM] = { 0xb68, 0xb6c };
+
+ u32 apk_normal_offset[PATH_NUM] = { 0xb28, 0xb98 };
+
+ u32 apk_value[PATH_NUM] = { 0x92fc0000, 0x12fc0000 };
+
+ u32 apk_normal_value[PATH_NUM] = { 0x92680000, 0x12680000 };
+
+ const char apk_delta_mapping[APK_BB_REG_NUM][13] = {
+ {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
+ {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
+ {-6, -4, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
+ {-1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6},
+ {-11, -9, -7, -5, -3, -1, 0, 0, 0, 0, 0, 0, 0}
+ };
+
+ const u32 apk_normal_setting_value_1[13] = {
+ 0x01017018, 0xf7ed8f84, 0x1b1a1816, 0x2522201e, 0x322e2b28,
+ 0x433f3a36, 0x5b544e49, 0x7b726a62, 0xa69a8f84, 0xdfcfc0b3,
+ 0x12680000, 0x00880000, 0x00880000
+ };
+
+ const u32 apk_normal_setting_value_2[16] = {
+ 0x01c7021d, 0x01670183, 0x01000123, 0x00bf00e2, 0x008d00a3,
+ 0x0068007b, 0x004d0059, 0x003a0042, 0x002b0031, 0x001f0025,
+ 0x0017001b, 0x00110014, 0x000c000f, 0x0009000b, 0x00070008,
+ 0x00050006
+ };
+
+ const u32 apk_result[PATH_NUM][APK_BB_REG_NUM];
+
+ long bb_offset, delta_v, delta_offset;
+
+ if (!is2t)
+ pathbound = 1;
+
+ for (index = 0; index < PATH_NUM; index++) {
+ apk_offset[index] = apk_normal_offset[index];
+ apk_value[index] = apk_normal_value[index];
+ afe_on_off[index] = 0x6fdb25a4;
+ }
+
+ for (index = 0; index < APK_BB_REG_NUM; index++) {
+ for (path = 0; path < pathbound; path++) {
+ apk_rf_init_value[path][index] =
+ apk_normal_rf_init_value[path][index];
+ apk_rf_value_0[path][index] =
+ apk_normal_rf_value_0[path][index];
+ }
+ bb_ap_mode[index] = bb_normal_ap_mode[index];
+
+ apkbound = 6;
+ }
+
+ for (index = 0; index < APK_BB_REG_NUM; index++) {
+ if (index == 0)
+ continue;
+ bb_backup[index] = rtl_get_bbreg(hw, bb_reg[index], MASKDWORD);
+ }
+
+ _rtl92c_phy_save_mac_registers(hw, mac_reg, mac_backup);
+
+ _rtl92c_phy_save_adda_registers(hw, afe_reg, afe_backup, 16);
+
+ for (path = 0; path < pathbound; path++) {
+ if (path == RF90_PATH_A) {
+ offset = 0xb00;
+ for (index = 0; index < 11; index++) {
+ rtl_set_bbreg(hw, offset, MASKDWORD,
+ apk_normal_setting_value_1
+ [index]);
+
+ offset += 0x04;
+ }
+
+ rtl_set_bbreg(hw, 0xb98, MASKDWORD, 0x12680000);
+
+ offset = 0xb68;
+ for (; index < 13; index++) {
+ rtl_set_bbreg(hw, offset, MASKDWORD,
+ apk_normal_setting_value_1
+ [index]);
+
+ offset += 0x04;
+ }
+
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x40000000);
+
+ offset = 0xb00;
+ for (index = 0; index < 16; index++) {
+ rtl_set_bbreg(hw, offset, MASKDWORD,
+ apk_normal_setting_value_2
+ [index]);
+
+ offset += 0x04;
+ }
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
+ } else if (path == RF90_PATH_B) {
+ offset = 0xb70;
+ for (index = 0; index < 10; index++) {
+ rtl_set_bbreg(hw, offset, MASKDWORD,
+ apk_normal_setting_value_1
+ [index]);
+
+ offset += 0x04;
+ }
+ rtl_set_bbreg(hw, 0xb28, MASKDWORD, 0x12680000);
+ rtl_set_bbreg(hw, 0xb98, MASKDWORD, 0x12680000);
+
+ offset = 0xb68;
+ index = 11;
+ for (; index < 13; index++) {
+ rtl_set_bbreg(hw, offset, MASKDWORD,
+ apk_normal_setting_value_1
+ [index]);
+
+ offset += 0x04;
+ }
+
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x40000000);
+
+ offset = 0xb60;
+ for (index = 0; index < 16; index++) {
+ rtl_set_bbreg(hw, offset, MASKDWORD,
+ apk_normal_setting_value_2
+ [index]);
+
+ offset += 0x04;
+ }
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
+ }
+
+ reg_d[path] = rtl_get_rfreg(hw, (enum radio_path)path,
+ 0xd, MASKDWORD);
+
+ for (index = 0; index < APK_AFE_REG_NUM; index++)
+ rtl_set_bbreg(hw, afe_reg[index], MASKDWORD,
+ afe_on_off[path]);
+
+ if (path == RF90_PATH_A) {
+ for (index = 0; index < APK_BB_REG_NUM; index++) {
+ if (index == 0)
+ continue;
+ rtl_set_bbreg(hw, bb_reg[index], MASKDWORD,
+ bb_ap_mode[index]);
+ }
+ }
+
+ _rtl92c_phy_mac_setting_calibration(hw, mac_reg, mac_backup);
+
+ if (path == 0) {
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x0, MASKDWORD, 0x10000);
+ } else {
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASKDWORD,
+ 0x10000);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x10, MASKDWORD,
+ 0x1000f);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x11, MASKDWORD,
+ 0x20103);
+ }
+
+ delta_offset = ((delta + 14) / 2);
+ if (delta_offset < 0)
+ delta_offset = 0;
+ else if (delta_offset > 12)
+ delta_offset = 12;
+
+ for (index = 0; index < APK_BB_REG_NUM; index++) {
+ if (index != 1)
+ continue;
+
+ tmpreg = apk_rf_init_value[path][index];
+
+ if (!rtlefuse->apk_thermalmeterignore) {
+ bb_offset = (tmpreg & 0xF0000) >> 16;
+
+ if (!(tmpreg & BIT(15)))
+ bb_offset = -bb_offset;
+
+ delta_v =
+ apk_delta_mapping[index][delta_offset];
+
+ bb_offset += delta_v;
+
+ if (bb_offset < 0) {
+ tmpreg = tmpreg & (~BIT(15));
+ bb_offset = -bb_offset;
+ } else {
+ tmpreg = tmpreg | BIT(15);
+ }
+
+ tmpreg =
+ (tmpreg & 0xFFF0FFFF) | (bb_offset << 16);
+ }
+
+ rtl_set_rfreg(hw, (enum radio_path)path, 0xc,
+ MASKDWORD, 0x8992e);
+ rtl_set_rfreg(hw, (enum radio_path)path, 0x0,
+ MASKDWORD, apk_rf_value_0[path][index]);
+ rtl_set_rfreg(hw, (enum radio_path)path, 0xd,
+ MASKDWORD, tmpreg);
+
+ i = 0;
+ do {
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80000000);
+ rtl_set_bbreg(hw, apk_offset[path],
+ MASKDWORD, apk_value[0]);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("PHY_APCalibrate() offset 0x%x "
+ "value 0x%x\n",
+ apk_offset[path],
+ rtl_get_bbreg(hw, apk_offset[path],
+ MASKDWORD)));
+
+ mdelay(3);
+
+ rtl_set_bbreg(hw, apk_offset[path],
+ MASKDWORD, apk_value[1]);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("PHY_APCalibrate() offset 0x%x "
+ "value 0x%x\n",
+ apk_offset[path],
+ rtl_get_bbreg(hw, apk_offset[path],
+ MASKDWORD)));
+
+ mdelay(20);
+
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
+
+ if (path == RF90_PATH_A)
+ tmpreg = rtl_get_bbreg(hw, 0xbd8,
+ 0x03E00000);
+ else
+ tmpreg = rtl_get_bbreg(hw, 0xbd8,
+ 0xF8000000);
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("PHY_APCalibrate() offset "
+ "0xbd8[25:21] %x\n", tmpreg));
+
+ i++;
+
+ } while (tmpreg > apkbound && i < 4);
+
+ apk_result[path][index] = tmpreg;
+ }
+ }
+
+ _rtl92c_phy_reload_mac_registers(hw, mac_reg, mac_backup);
+
+ for (index = 0; index < APK_BB_REG_NUM; index++) {
+ if (index == 0)
+ continue;
+ rtl_set_bbreg(hw, bb_reg[index], MASKDWORD, bb_backup[index]);
+ }
+
+ _rtl92c_phy_reload_adda_registers(hw, afe_reg, afe_backup, 16);
+
+ for (path = 0; path < pathbound; path++) {
+ rtl_set_rfreg(hw, (enum radio_path)path, 0xd,
+ MASKDWORD, reg_d[path]);
+
+ if (path == RF90_PATH_B) {
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x10, MASKDWORD,
+ 0x1000f);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x11, MASKDWORD,
+ 0x20101);
+ }
+
+ if (apk_result[path][1] > 6)
+ apk_result[path][1] = 6;
+ }
+
+ for (path = 0; path < pathbound; path++) {
+ rtl_set_rfreg(hw, (enum radio_path)path, 0x3, MASKDWORD,
+ ((apk_result[path][1] << 15) |
+ (apk_result[path][1] << 10) |
+ (apk_result[path][1] << 5) |
+ apk_result[path][1]));
+
+ if (path == RF90_PATH_A)
+ rtl_set_rfreg(hw, (enum radio_path)path, 0x4, MASKDWORD,
+ ((apk_result[path][1] << 15) |
+ (apk_result[path][1] << 10) |
+ (0x00 << 5) | 0x05));
+ else
+ rtl_set_rfreg(hw, (enum radio_path)path, 0x4, MASKDWORD,
+ ((apk_result[path][1] << 15) |
+ (apk_result[path][1] << 10) |
+ (0x02 << 5) | 0x05));
+
+ rtl_set_rfreg(hw, (enum radio_path)path, 0xe, MASKDWORD,
+ ((0x08 << 15) | (0x08 << 10) | (0x08 << 5) |
+ 0x08));
+
+ }
+
+ rtlphy->apk_done = true;
+#endif
+}
+
+static void _rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw,
+ bool bmain, bool is2t)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ if (is_hal_stop(rtlhal)) {
+ rtl_set_bbreg(hw, REG_LEDCFG0, BIT(23), 0x01);
+ rtl_set_bbreg(hw, rFPGA0_XAB_RFPARAMETER, BIT(13), 0x01);
+ }
+ if (is2t) {
+ if (bmain)
+ rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
+ BIT(5) | BIT(6), 0x1);
+ else
+ rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
+ BIT(5) | BIT(6), 0x2);
+ } else {
+ if (bmain)
+ rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x2);
+ else
+ rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x1);
+
+ }
+}
+
+#undef IQK_ADDA_REG_NUM
+#undef IQK_DELAY_TIME
+
+void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ long result[4][8];
+ u8 i, final_candidate;
+ bool patha_ok, pathb_ok;
+ long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4,
+ reg_ecc, reg_tmp = 0;
+ bool is12simular, is13simular, is23simular;
+ bool start_conttx = false, singletone = false;
+ u32 iqk_bb_reg[10] = {
+ ROFDM0_XARXIQIMBALANCE,
+ ROFDM0_XBRXIQIMBALANCE,
+ ROFDM0_ECCATHRESHOLD,
+ ROFDM0_AGCRSSITABLE,
+ ROFDM0_XATXIQIMBALANCE,
+ ROFDM0_XBTXIQIMBALANCE,
+ ROFDM0_XCTXIQIMBALANCE,
+ ROFDM0_XCTXAFE,
+ ROFDM0_XDTXAFE,
+ ROFDM0_RXIQEXTANTA
+ };
+
+ if (recovery) {
+ _rtl92c_phy_reload_adda_registers(hw,
+ iqk_bb_reg,
+ rtlphy->iqk_bb_backup, 10);
+ return;
+ }
+ if (start_conttx || singletone)
+ return;
+ for (i = 0; i < 8; i++) {
+ result[0][i] = 0;
+ result[1][i] = 0;
+ result[2][i] = 0;
+ result[3][i] = 0;
+ }
+ final_candidate = 0xff;
+ patha_ok = false;
+ pathb_ok = false;
+ is12simular = false;
+ is23simular = false;
+ is13simular = false;
+ for (i = 0; i < 3; i++) {
+ if (IS_92C_SERIAL(rtlhal->version))
+ _rtl92c_phy_iq_calibrate(hw, result, i, true);
+ else
+ _rtl92c_phy_iq_calibrate(hw, result, i, false);
+ if (i == 1) {
+ is12simular = _rtl92c_phy_simularity_compare(hw,
+ result, 0,
+ 1);
+ if (is12simular) {
+ final_candidate = 0;
+ break;
+ }
+ }
+ if (i == 2) {
+ is13simular = _rtl92c_phy_simularity_compare(hw,
+ result, 0,
+ 2);
+ if (is13simular) {
+ final_candidate = 0;
+ break;
+ }
+ is23simular = _rtl92c_phy_simularity_compare(hw,
+ result, 1,
+ 2);
+ if (is23simular)
+ final_candidate = 1;
+ else {
+ for (i = 0; i < 8; i++)
+ reg_tmp += result[3][i];
+
+ if (reg_tmp != 0)
+ final_candidate = 3;
+ else
+ final_candidate = 0xFF;
+ }
+ }
+ }
+ for (i = 0; i < 4; i++) {
+ reg_e94 = result[i][0];
+ reg_e9c = result[i][1];
+ reg_ea4 = result[i][2];
+ reg_eac = result[i][3];
+ reg_eb4 = result[i][4];
+ reg_ebc = result[i][5];
+ reg_ec4 = result[i][6];
+ reg_ecc = result[i][7];
+ }
+ if (final_candidate != 0xff) {
+ rtlphy->reg_e94 = reg_e94 = result[final_candidate][0];
+ rtlphy->reg_e9c = reg_e9c = result[final_candidate][1];
+ reg_ea4 = result[final_candidate][2];
+ reg_eac = result[final_candidate][3];
+ rtlphy->reg_eb4 = reg_eb4 = result[final_candidate][4];
+ rtlphy->reg_ebc = reg_ebc = result[final_candidate][5];
+ reg_ec4 = result[final_candidate][6];
+ reg_ecc = result[final_candidate][7];
+ patha_ok = pathb_ok = true;
+ } else {
+ rtlphy->reg_e94 = rtlphy->reg_eb4 = 0x100;
+ rtlphy->reg_e9c = rtlphy->reg_ebc = 0x0;
+ }
+ if (reg_e94 != 0) /*&&(reg_ea4 != 0) */
+ _rtl92c_phy_path_a_fill_iqk_matrix(hw, patha_ok, result,
+ final_candidate,
+ (reg_ea4 == 0));
+ if (IS_92C_SERIAL(rtlhal->version)) {
+ if (reg_eb4 != 0) /*&&(reg_ec4 != 0) */
+ _rtl92c_phy_path_b_fill_iqk_matrix(hw, pathb_ok,
+ result,
+ final_candidate,
+ (reg_ec4 == 0));
+ }
+ _rtl92c_phy_save_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup, 10);
+}
+EXPORT_SYMBOL(rtl92c_phy_iq_calibrate);
+
+void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool start_conttx = false, singletone = false;
+
+ if (start_conttx || singletone)
+ return;
+ if (IS_92C_SERIAL(rtlhal->version))
+ rtlpriv->cfg->ops->phy_lc_calibrate(hw, true);
+ else
+ rtlpriv->cfg->ops->phy_lc_calibrate(hw, false);
+}
+EXPORT_SYMBOL(rtl92c_phy_lc_calibrate);
+
+void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ if (rtlphy->apk_done)
+ return;
+ if (IS_92C_SERIAL(rtlhal->version))
+ _rtl92c_phy_ap_calibrate(hw, delta, true);
+ else
+ _rtl92c_phy_ap_calibrate(hw, delta, false);
+}
+EXPORT_SYMBOL(rtl92c_phy_ap_calibrate);
+
+void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ if (IS_92C_SERIAL(rtlhal->version))
+ _rtl92c_phy_set_rfpath_switch(hw, bmain, true);
+ else
+ _rtl92c_phy_set_rfpath_switch(hw, bmain, false);
+}
+EXPORT_SYMBOL(rtl92c_phy_set_rfpath_switch);
+
+bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ bool postprocessing = false;
+
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+ ("-->IO Cmd(%#x), set_io_inprogress(%d)\n",
+ iotype, rtlphy->set_io_inprogress));
+ do {
+ switch (iotype) {
+ case IO_CMD_RESUME_DM_BY_SCAN:
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+ ("[IO CMD] Resume DM after scan.\n"));
+ postprocessing = true;
+ break;
+ case IO_CMD_PAUSE_DM_BY_SCAN:
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+ ("[IO CMD] Pause DM before scan.\n"));
+ postprocessing = true;
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+ } while (false);
+ if (postprocessing && !rtlphy->set_io_inprogress) {
+ rtlphy->set_io_inprogress = true;
+ rtlphy->current_io_type = iotype;
+ } else {
+ return false;
+ }
+ rtl92c_phy_set_io(hw);
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, ("<--IO Type(%#x)\n", iotype));
+ return true;
+}
+EXPORT_SYMBOL(rtl92c_phy_set_io_cmd);
+
+void rtl92c_phy_set_io(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+ ("--->Cmd(%#x), set_io_inprogress(%d)\n",
+ rtlphy->current_io_type, rtlphy->set_io_inprogress));
+ switch (rtlphy->current_io_type) {
+ case IO_CMD_RESUME_DM_BY_SCAN:
+ dm_digtable.cur_igvalue = rtlphy->initgain_backup.xaagccore1;
+ rtl92c_dm_write_dig(hw);
+ rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
+ break;
+ case IO_CMD_PAUSE_DM_BY_SCAN:
+ rtlphy->initgain_backup.xaagccore1 = dm_digtable.cur_igvalue;
+ dm_digtable.cur_igvalue = 0x17;
+ rtl92c_dm_write_dig(hw);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+ rtlphy->set_io_inprogress = false;
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+ ("<---(%#x)\n", rtlphy->current_io_type));
+}
+EXPORT_SYMBOL(rtl92c_phy_set_io);
+
+void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+}
+EXPORT_SYMBOL(rtl92ce_phy_set_rf_on);
+
+void _rtl92c_phy_set_rf_sleep(struct ieee80211_hw *hw)
+{
+ u32 u4b_tmp;
+ u8 delay = 5;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
+ u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
+ while (u4b_tmp != 0 && delay > 0) {
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
+ u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
+ delay--;
+ }
+ if (delay == 0) {
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
+ ("Switch RF timeout !!!.\n"));
+ return;
+ }
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+ rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22);
+}
+EXPORT_SYMBOL(_rtl92c_phy_set_rf_sleep);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
new file mode 100644
index 000000000000..53ffb0981586
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
@@ -0,0 +1,246 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92C_PHY_H__
+#define __RTL92C_PHY_H__
+
+#define MAX_PRECMD_CNT 16
+#define MAX_RFDEPENDCMD_CNT 16
+#define MAX_POSTCMD_CNT 16
+
+#define MAX_DOZE_WAITING_TIMES_9x 64
+
+#define RT_CANNOT_IO(hw) false
+#define HIGHPOWER_RADIOA_ARRAYLEN 22
+
+#define MAX_TOLERANCE 5
+#define IQK_DELAY_TIME 1
+
+#define APK_BB_REG_NUM 5
+#define APK_AFE_REG_NUM 16
+#define APK_CURVE_REG_NUM 4
+#define PATH_NUM 2
+
+#define LOOP_LIMIT 5
+#define MAX_STALL_TIME 50
+#define AntennaDiversityValue 0x80
+#define MAX_TXPWR_IDX_NMODE_92S 63
+#define Reset_Cnt_Limit 3
+
+#define IQK_ADDA_REG_NUM 16
+#define IQK_MAC_REG_NUM 4
+
+#define RF90_PATH_MAX 2
+
+#define CT_OFFSET_MAC_ADDR 0X16
+
+#define CT_OFFSET_CCK_TX_PWR_IDX 0x5A
+#define CT_OFFSET_HT401S_TX_PWR_IDX 0x60
+#define CT_OFFSET_HT402S_TX_PWR_IDX_DIF 0x66
+#define CT_OFFSET_HT20_TX_PWR_IDX_DIFF 0x69
+#define CT_OFFSET_OFDM_TX_PWR_IDX_DIFF 0x6C
+
+#define CT_OFFSET_HT40_MAX_PWR_OFFSET 0x6F
+#define CT_OFFSET_HT20_MAX_PWR_OFFSET 0x72
+
+#define CT_OFFSET_CHANNEL_PLAH 0x75
+#define CT_OFFSET_THERMAL_METER 0x78
+#define CT_OFFSET_RF_OPTION 0x79
+#define CT_OFFSET_VERSION 0x7E
+#define CT_OFFSET_CUSTOMER_ID 0x7F
+
+#define RTL92C_MAX_PATH_NUM 2
+#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER 255
+enum swchnlcmd_id {
+ CMDID_END,
+ CMDID_SET_TXPOWEROWER_LEVEL,
+ CMDID_BBREGWRITE10,
+ CMDID_WRITEPORT_ULONG,
+ CMDID_WRITEPORT_USHORT,
+ CMDID_WRITEPORT_UCHAR,
+ CMDID_RF_WRITEREG,
+};
+
+struct swchnlcmd {
+ enum swchnlcmd_id cmdid;
+ u32 para1;
+ u32 para2;
+ u32 msdelay;
+};
+
+enum hw90_block_e {
+ HW90_BLOCK_MAC = 0,
+ HW90_BLOCK_PHY0 = 1,
+ HW90_BLOCK_PHY1 = 2,
+ HW90_BLOCK_RF = 3,
+ HW90_BLOCK_MAXIMUM = 4,
+};
+
+enum baseband_config_type {
+ BASEBAND_CONFIG_PHY_REG = 0,
+ BASEBAND_CONFIG_AGC_TAB = 1,
+};
+
+enum ra_offset_area {
+ RA_OFFSET_LEGACY_OFDM1,
+ RA_OFFSET_LEGACY_OFDM2,
+ RA_OFFSET_HT_OFDM1,
+ RA_OFFSET_HT_OFDM2,
+ RA_OFFSET_HT_OFDM3,
+ RA_OFFSET_HT_OFDM4,
+ RA_OFFSET_HT_CCK,
+};
+
+enum antenna_path {
+ ANTENNA_NONE,
+ ANTENNA_D,
+ ANTENNA_C,
+ ANTENNA_CD,
+ ANTENNA_B,
+ ANTENNA_BD,
+ ANTENNA_BC,
+ ANTENNA_BCD,
+ ANTENNA_A,
+ ANTENNA_AD,
+ ANTENNA_AC,
+ ANTENNA_ACD,
+ ANTENNA_AB,
+ ANTENNA_ABD,
+ ANTENNA_ABC,
+ ANTENNA_ABCD
+};
+
+struct r_antenna_select_ofdm {
+ u32 r_tx_antenna:4;
+ u32 r_ant_l:4;
+ u32 r_ant_non_ht:4;
+ u32 r_ant_ht1:4;
+ u32 r_ant_ht2:4;
+ u32 r_ant_ht_s1:4;
+ u32 r_ant_non_ht_s1:4;
+ u32 ofdm_txsc:2;
+ u32 reserved:2;
+};
+
+struct r_antenna_select_cck {
+ u8 r_cckrx_enable_2:2;
+ u8 r_cckrx_enable:2;
+ u8 r_ccktx_enable:4;
+};
+
+struct efuse_contents {
+ u8 mac_addr[ETH_ALEN];
+ u8 cck_tx_power_idx[6];
+ u8 ht40_1s_tx_power_idx[6];
+ u8 ht40_2s_tx_power_idx_diff[3];
+ u8 ht20_tx_power_idx_diff[3];
+ u8 ofdm_tx_power_idx_diff[3];
+ u8 ht40_max_power_offset[3];
+ u8 ht20_max_power_offset[3];
+ u8 channel_plan;
+ u8 thermal_meter;
+ u8 rf_option[5];
+ u8 version;
+ u8 oem_id;
+ u8 regulatory;
+};
+
+struct tx_power_struct {
+ u8 cck[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 ht40_1s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 ht40_2s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 ht20_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 legacy_ht_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 legacy_ht_txpowerdiff;
+ u8 groupht20[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 groupht40[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 pwrgroup_cnt;
+ u32 mcs_original_offset[4][16];
+};
+
+extern u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask);
+extern void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask, u32 data);
+extern u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask);
+extern void rtl92c_phy_set_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask, u32 data);
+extern bool rtl92c_phy_mac_config(struct ieee80211_hw *hw);
+extern bool rtl92c_phy_bb_config(struct ieee80211_hw *hw);
+extern bool rtl92c_phy_rf_config(struct ieee80211_hw *hw);
+extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
+ enum radio_path rfpath);
+extern void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+extern void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw,
+ long *powerlevel);
+extern void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
+extern bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw,
+ long power_indbm);
+extern void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw,
+ u8 operation);
+extern void rtl92c_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
+extern void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
+ enum nl80211_channel_type ch_type);
+extern void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw);
+extern u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw);
+extern void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
+extern void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw,
+ u16 beaconinterval);
+void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
+void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw);
+void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
+bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+ enum radio_path rfpath);
+extern bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw,
+ u32 rfpath);
+extern bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state);
+void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw);
+void rtl92c_phy_set_io(struct ieee80211_hw *hw);
+void rtl92c_bb_block_on(struct ieee80211_hw *hw);
+u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
+long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
+ enum wireless_mode wirelessmode,
+ u8 txpwridx);
+u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
+ enum wireless_mode wirelessmode,
+ long power_indbm);
+void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
+static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
+ u32 cmdtableidx, u32 cmdtablesz,
+ enum swchnlcmd_id cmdid, u32 para1,
+ u32 para2, u32 msdelay);
+static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
+ u8 channel, u8 *stage, u8 *step,
+ u32 *delay);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/Makefile b/drivers/net/wireless/rtlwifi/rtl8192ce/Makefile
index 0f0be7c763b8..c0cb0cfe7d37 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/Makefile
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/Makefile
@@ -1,6 +1,5 @@
rtl8192ce-objs := \
dm.o \
- fw.o \
hw.o \
led.o \
phy.o \
@@ -10,3 +9,5 @@ rtl8192ce-objs := \
trx.o
obj-$(CONFIG_RTL8192CE) += rtl8192ce.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
index 83cd64895292..2f577c8828fc 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
@@ -121,11 +121,37 @@
#define CHIP_92C 0x01
#define CHIP_88C 0x00
+/* Add vendor information into chip version definition.
+ * Add UMC B-Cut and RTL8723 chip info definition.
+ *
+ * BIT 7 Reserved
+ * BIT 6 UMC BCut
+ * BIT 5 Manufacturer(TSMC/UMC)
+ * BIT 4 TEST/NORMAL
+ * BIT 3 8723 Version
+ * BIT 2 8723?
+ * BIT 1 1T2R?
+ * BIT 0 88C/92C
+*/
+
enum version_8192c {
VERSION_A_CHIP_92C = 0x01,
VERSION_A_CHIP_88C = 0x00,
VERSION_B_CHIP_92C = 0x11,
VERSION_B_CHIP_88C = 0x10,
+ VERSION_TEST_CHIP_88C = 0x00,
+ VERSION_TEST_CHIP_92C = 0x01,
+ VERSION_NORMAL_TSMC_CHIP_88C = 0x10,
+ VERSION_NORMAL_TSMC_CHIP_92C = 0x11,
+ VERSION_NORMAL_TSMC_CHIP_92C_1T2R = 0x13,
+ VERSION_NORMAL_UMC_CHIP_88C_A_CUT = 0x30,
+ VERSION_NORMAL_UMC_CHIP_92C_A_CUT = 0x31,
+ VERSION_NORMAL_UMC_CHIP_92C_1T2R_A_CUT = 0x33,
+ VERSION_NORMA_UMC_CHIP_8723_1T1R_A_CUT = 0x34,
+ VERSION_NORMA_UMC_CHIP_8723_1T1R_B_CUT = 0x3c,
+ VERSION_NORMAL_UMC_CHIP_88C_B_CUT = 0x70,
+ VERSION_NORMAL_UMC_CHIP_92C_B_CUT = 0x71,
+ VERSION_NORMAL_UMC_CHIP_92C_1T2R_B_CUT = 0x73,
VERSION_UNKNOWN = 0x88,
};
@@ -254,4 +280,122 @@ struct h2c_cmd_8192c {
u8 *p_cmdbuffer;
};
+static inline u8 _rtl92c_get_chnl_group(u8 chnl)
+{
+ u8 group = 0;
+
+ if (chnl < 3)
+ group = 0;
+ else if (chnl < 9)
+ group = 1;
+ else
+ group = 2;
+
+ return group;
+}
+
+/* NOTE: reference to rtl8192c_rates struct */
+static inline int _rtl92c_rate_mapping(struct ieee80211_hw *hw, bool isHT,
+ u8 desc_rate, bool first_ampdu)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int rate_idx = 0;
+
+ if (first_ampdu) {
+ if (false == isHT) {
+ switch (desc_rate) {
+ case DESC92C_RATE1M:
+ rate_idx = 0;
+ break;
+ case DESC92C_RATE2M:
+ rate_idx = 1;
+ break;
+ case DESC92C_RATE5_5M:
+ rate_idx = 2;
+ break;
+ case DESC92C_RATE11M:
+ rate_idx = 3;
+ break;
+ case DESC92C_RATE6M:
+ rate_idx = 4;
+ break;
+ case DESC92C_RATE9M:
+ rate_idx = 5;
+ break;
+ case DESC92C_RATE12M:
+ rate_idx = 6;
+ break;
+ case DESC92C_RATE18M:
+ rate_idx = 7;
+ break;
+ case DESC92C_RATE24M:
+ rate_idx = 8;
+ break;
+ case DESC92C_RATE36M:
+ rate_idx = 9;
+ break;
+ case DESC92C_RATE48M:
+ rate_idx = 10;
+ break;
+ case DESC92C_RATE54M:
+ rate_idx = 11;
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
+ ("Rate %d is not support, set to "
+ "1M rate.\n", desc_rate));
+ rate_idx = 0;
+ break;
+ }
+ } else {
+ rate_idx = 11;
+ }
+ return rate_idx;
+ }
+ switch (desc_rate) {
+ case DESC92C_RATE1M:
+ rate_idx = 0;
+ break;
+ case DESC92C_RATE2M:
+ rate_idx = 1;
+ break;
+ case DESC92C_RATE5_5M:
+ rate_idx = 2;
+ break;
+ case DESC92C_RATE11M:
+ rate_idx = 3;
+ break;
+ case DESC92C_RATE6M:
+ rate_idx = 4;
+ break;
+ case DESC92C_RATE9M:
+ rate_idx = 5;
+ break;
+ case DESC92C_RATE12M:
+ rate_idx = 6;
+ break;
+ case DESC92C_RATE18M:
+ rate_idx = 7;
+ break;
+ case DESC92C_RATE24M:
+ rate_idx = 8;
+ break;
+ case DESC92C_RATE36M:
+ rate_idx = 9;
+ break;
+ case DESC92C_RATE48M:
+ rate_idx = 10;
+ break;
+ case DESC92C_RATE54M:
+ rate_idx = 11;
+ break;
+ /* TODO: How to mapping MCS rate? */
+ /* NOTE: referenc to __ieee80211_rx */
+ default:
+ rate_idx = 11;
+ break;
+ }
+ return rate_idx;
+}
+
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
index 62e7c64e087b..7d76504df4d1 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
@@ -33,487 +33,15 @@
#include "def.h"
#include "phy.h"
#include "dm.h"
-#include "fw.h"
-struct dig_t dm_digtable;
-static struct ps_t dm_pstable;
-
-static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = {
- 0x7f8001fe,
- 0x788001e2,
- 0x71c001c7,
- 0x6b8001ae,
- 0x65400195,
- 0x5fc0017f,
- 0x5a400169,
- 0x55400155,
- 0x50800142,
- 0x4c000130,
- 0x47c0011f,
- 0x43c0010f,
- 0x40000100,
- 0x3c8000f2,
- 0x390000e4,
- 0x35c000d7,
- 0x32c000cb,
- 0x300000c0,
- 0x2d4000b5,
- 0x2ac000ab,
- 0x288000a2,
- 0x26000098,
- 0x24000090,
- 0x22000088,
- 0x20000080,
- 0x1e400079,
- 0x1c800072,
- 0x1b00006c,
- 0x19800066,
- 0x18000060,
- 0x16c0005b,
- 0x15800056,
- 0x14400051,
- 0x1300004c,
- 0x12000048,
- 0x11000044,
- 0x10000040,
-};
-
-static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
- {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04},
- {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04},
- {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03},
- {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03},
- {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03},
- {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03},
- {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03},
- {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03},
- {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02},
- {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02},
- {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02},
- {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02},
- {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02},
- {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02},
- {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02},
- {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02},
- {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01},
- {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02},
- {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01},
- {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
- {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
- {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01},
- {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01},
- {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01},
- {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01},
- {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01},
- {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01},
- {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01},
- {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01},
- {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01},
- {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01},
- {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01},
- {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01}
-};
-
-static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
- {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00},
- {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00},
- {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00},
- {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00},
- {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00},
- {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00},
- {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00},
- {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00},
- {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00},
- {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00},
- {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00},
- {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00},
- {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00},
- {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00},
- {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00},
- {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00},
- {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00},
- {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00},
- {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00},
- {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
- {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
- {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00},
- {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00},
- {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
- {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
- {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00},
- {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
- {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
- {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
- {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
- {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
- {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
- {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}
-};
-
-static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
-{
- dm_digtable.dig_enable_flag = true;
- dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
- dm_digtable.cur_igvalue = 0x20;
- dm_digtable.pre_igvalue = 0x0;
- dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
- dm_digtable.presta_connectstate = DIG_STA_DISCONNECT;
- dm_digtable.curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
- dm_digtable.rssi_lowthresh = DM_DIG_THRESH_LOW;
- dm_digtable.rssi_highthresh = DM_DIG_THRESH_HIGH;
- dm_digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
- dm_digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
- dm_digtable.rx_gain_range_max = DM_DIG_MAX;
- dm_digtable.rx_gain_range_min = DM_DIG_MIN;
- dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
- dm_digtable.backoff_val_range_max = DM_DIG_BACKOFF_MAX;
- dm_digtable.backoff_val_range_min = DM_DIG_BACKOFF_MIN;
- dm_digtable.pre_cck_pd_state = CCK_PD_STAGE_MAX;
- dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
-}
-
-static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- long rssi_val_min = 0;
-
- if ((dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) &&
- (dm_digtable.cursta_connectctate == DIG_STA_CONNECT)) {
- if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb != 0)
- rssi_val_min =
- (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb >
- rtlpriv->dm.undecorated_smoothed_pwdb) ?
- rtlpriv->dm.undecorated_smoothed_pwdb :
- rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
- else
- rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
- } else if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT ||
- dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT) {
- rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
- } else if (dm_digtable.curmultista_connectstate ==
- DIG_MULTISTA_CONNECT) {
- rssi_val_min = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
- }
-
- return (u8) rssi_val_min;
-}
-
-static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
-{
- u32 ret_value;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
-
- ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD);
- falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16);
-
- ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD);
- falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff);
- falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16);
-
- ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
- falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
- falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
- falsealm_cnt->cnt_rate_illegal +
- falsealm_cnt->cnt_crc8_fail + falsealm_cnt->cnt_mcs_fail;
-
- rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(14), 1);
- ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0);
- falsealm_cnt->cnt_cck_fail = ret_value;
-
- ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3);
- falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
- falsealm_cnt->cnt_all = (falsealm_cnt->cnt_parity_fail +
- falsealm_cnt->cnt_rate_illegal +
- falsealm_cnt->cnt_crc8_fail +
- falsealm_cnt->cnt_mcs_fail +
- falsealm_cnt->cnt_cck_fail);
-
- rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 1);
- rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 0);
- rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0);
- rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2);
-
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- ("cnt_parity_fail = %d, cnt_rate_illegal = %d, "
- "cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
- falsealm_cnt->cnt_parity_fail,
- falsealm_cnt->cnt_rate_illegal,
- falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail));
-
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- ("cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
- falsealm_cnt->cnt_ofdm_fail,
- falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all));
-}
-
-static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 value_igi = dm_digtable.cur_igvalue;
-
- if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0)
- value_igi--;
- else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH1)
- value_igi += 0;
- else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH2)
- value_igi++;
- else if (rtlpriv->falsealm_cnt.cnt_all >= DM_DIG_FA_TH2)
- value_igi += 2;
- if (value_igi > DM_DIG_FA_UPPER)
- value_igi = DM_DIG_FA_UPPER;
- else if (value_igi < DM_DIG_FA_LOWER)
- value_igi = DM_DIG_FA_LOWER;
- if (rtlpriv->falsealm_cnt.cnt_all > 10000)
- value_igi = 0x32;
-
- dm_digtable.cur_igvalue = value_igi;
- rtl92c_dm_write_dig(hw);
-}
-
-static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- if (rtlpriv->falsealm_cnt.cnt_all > dm_digtable.fa_highthresh) {
- if ((dm_digtable.backoff_val - 2) <
- dm_digtable.backoff_val_range_min)
- dm_digtable.backoff_val =
- dm_digtable.backoff_val_range_min;
- else
- dm_digtable.backoff_val -= 2;
- } else if (rtlpriv->falsealm_cnt.cnt_all < dm_digtable.fa_lowthresh) {
- if ((dm_digtable.backoff_val + 2) >
- dm_digtable.backoff_val_range_max)
- dm_digtable.backoff_val =
- dm_digtable.backoff_val_range_max;
- else
- dm_digtable.backoff_val += 2;
- }
-
- if ((dm_digtable.rssi_val_min + 10 - dm_digtable.backoff_val) >
- dm_digtable.rx_gain_range_max)
- dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_max;
- else if ((dm_digtable.rssi_val_min + 10 -
- dm_digtable.backoff_val) < dm_digtable.rx_gain_range_min)
- dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_min;
- else
- dm_digtable.cur_igvalue = dm_digtable.rssi_val_min + 10 -
- dm_digtable.backoff_val;
-
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- ("rssi_val_min = %x backoff_val %x\n",
- dm_digtable.rssi_val_min, dm_digtable.backoff_val));
-
- rtl92c_dm_write_dig(hw);
-}
-
-static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
-{
- static u8 binitialized; /* initialized to false */
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- long rssi_strength = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
- bool b_multi_sta = false;
-
- if (mac->opmode == NL80211_IFTYPE_ADHOC)
- b_multi_sta = true;
-
- if ((b_multi_sta == false) || (dm_digtable.cursta_connectctate !=
- DIG_STA_DISCONNECT)) {
- binitialized = false;
- dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
- return;
- } else if (binitialized == false) {
- binitialized = true;
- dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
- dm_digtable.cur_igvalue = 0x20;
- rtl92c_dm_write_dig(hw);
- }
-
- if (dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) {
- if ((rssi_strength < dm_digtable.rssi_lowthresh) &&
- (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_1)) {
-
- if (dm_digtable.dig_ext_port_stage ==
- DIG_EXT_PORT_STAGE_2) {
- dm_digtable.cur_igvalue = 0x20;
- rtl92c_dm_write_dig(hw);
- }
-
- dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_1;
- } else if (rssi_strength > dm_digtable.rssi_highthresh) {
- dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_2;
- rtl92c_dm_ctrl_initgain_by_fa(hw);
- }
- } else if (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_0) {
- dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
- dm_digtable.cur_igvalue = 0x20;
- rtl92c_dm_write_dig(hw);
- }
-
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- ("curmultista_connectstate = "
- "%x dig_ext_port_stage %x\n",
- dm_digtable.curmultista_connectstate,
- dm_digtable.dig_ext_port_stage));
-}
-
-static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- ("presta_connectstate = %x,"
- " cursta_connectctate = %x\n",
- dm_digtable.presta_connectstate,
- dm_digtable.cursta_connectctate));
-
- if (dm_digtable.presta_connectstate == dm_digtable.cursta_connectctate
- || dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT
- || dm_digtable.cursta_connectctate == DIG_STA_CONNECT) {
-
- if (dm_digtable.cursta_connectctate != DIG_STA_DISCONNECT) {
- dm_digtable.rssi_val_min =
- rtl92c_dm_initial_gain_min_pwdb(hw);
- rtl92c_dm_ctrl_initgain_by_rssi(hw);
- }
- } else {
- dm_digtable.rssi_val_min = 0;
- dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
- dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
- dm_digtable.cur_igvalue = 0x20;
- dm_digtable.pre_igvalue = 0;
- rtl92c_dm_write_dig(hw);
- }
-}
-
-static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-
- if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT) {
- dm_digtable.rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw);
-
- if (dm_digtable.pre_cck_pd_state == CCK_PD_STAGE_LowRssi) {
- if (dm_digtable.rssi_val_min <= 25)
- dm_digtable.cur_cck_pd_state =
- CCK_PD_STAGE_LowRssi;
- else
- dm_digtable.cur_cck_pd_state =
- CCK_PD_STAGE_HighRssi;
- } else {
- if (dm_digtable.rssi_val_min <= 20)
- dm_digtable.cur_cck_pd_state =
- CCK_PD_STAGE_LowRssi;
- else
- dm_digtable.cur_cck_pd_state =
- CCK_PD_STAGE_HighRssi;
- }
- } else {
- dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
- }
-
- if (dm_digtable.pre_cck_pd_state != dm_digtable.cur_cck_pd_state) {
- if (dm_digtable.cur_cck_pd_state == CCK_PD_STAGE_LowRssi) {
- if (rtlpriv->falsealm_cnt.cnt_cck_fail > 800)
- dm_digtable.cur_cck_fa_state =
- CCK_FA_STAGE_High;
- else
- dm_digtable.cur_cck_fa_state = CCK_FA_STAGE_Low;
-
- if (dm_digtable.pre_cck_fa_state !=
- dm_digtable.cur_cck_fa_state) {
- if (dm_digtable.cur_cck_fa_state ==
- CCK_FA_STAGE_Low)
- rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
- 0x83);
- else
- rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
- 0xcd);
-
- dm_digtable.pre_cck_fa_state =
- dm_digtable.cur_cck_fa_state;
- }
-
- rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x40);
-
- if (IS_92C_SERIAL(rtlhal->version))
- rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
- MASKBYTE2, 0xd7);
- } else {
- rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
- rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x47);
-
- if (IS_92C_SERIAL(rtlhal->version))
- rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
- MASKBYTE2, 0xd3);
- }
- dm_digtable.pre_cck_pd_state = dm_digtable.cur_cck_pd_state;
- }
-
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- ("CCKPDStage=%x\n", dm_digtable.cur_cck_pd_state));
-
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- ("is92C=%x\n", IS_92C_SERIAL(rtlhal->version)));
-}
-
-static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
-{
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-
- if (mac->act_scanning == true)
- return;
-
- if ((mac->link_state > MAC80211_NOLINK) &&
- (mac->link_state < MAC80211_LINKED))
- dm_digtable.cursta_connectctate = DIG_STA_BEFORE_CONNECT;
- else if (mac->link_state >= MAC80211_LINKED)
- dm_digtable.cursta_connectctate = DIG_STA_CONNECT;
- else
- dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
-
- rtl92c_dm_initial_gain_sta(hw);
- rtl92c_dm_initial_gain_multi_sta(hw);
- rtl92c_dm_cck_packet_detection_thresh(hw);
-
- dm_digtable.presta_connectstate = dm_digtable.cursta_connectctate;
-
-}
-
-static void rtl92c_dm_dig(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- if (rtlpriv->dm.b_dm_initialgain_enable == false)
- return;
- if (dm_digtable.dig_enable_flag == false)
- return;
-
- rtl92c_dm_ctrl_initgain_by_twoport(hw);
-
-}
-
-static void rtl92c_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtlpriv->dm.bdynamic_txpower_enable = false;
-
- rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
- rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
-}
-
-static void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
+void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
long undecorated_smoothed_pwdb;
- if (!rtlpriv->dm.bdynamic_txpower_enable)
+ if (!rtlpriv->dm.dynamic_txpower_enable)
return;
if (rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) {
@@ -583,891 +111,3 @@ static void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
}
-
-void rtl92c_dm_write_dig(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- ("cur_igvalue = 0x%x, "
- "pre_igvalue = 0x%x, backoff_val = %d\n",
- dm_digtable.cur_igvalue, dm_digtable.pre_igvalue,
- dm_digtable.backoff_val));
-
- if (dm_digtable.pre_igvalue != dm_digtable.cur_igvalue) {
- rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
- dm_digtable.cur_igvalue);
- rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f,
- dm_digtable.cur_igvalue);
-
- dm_digtable.pre_igvalue = dm_digtable.cur_igvalue;
- }
-}
-
-static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- long tmpentry_max_pwdb = 0, tmpentry_min_pwdb = 0xff;
-
- u8 h2c_parameter[3] = { 0 };
-
- return;
-
- if (tmpentry_max_pwdb != 0) {
- rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb =
- tmpentry_max_pwdb;
- } else {
- rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb = 0;
- }
-
- if (tmpentry_min_pwdb != 0xff) {
- rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb =
- tmpentry_min_pwdb;
- } else {
- rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb = 0;
- }
-
- h2c_parameter[2] = (u8) (rtlpriv->dm.undecorated_smoothed_pwdb & 0xFF);
- h2c_parameter[0] = 0;
-
- rtl92c_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);
-}
-
-void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- rtlpriv->dm.bcurrent_turbo_edca = false;
- rtlpriv->dm.bis_any_nonbepkts = false;
- rtlpriv->dm.bis_cur_rdlstate = false;
-}
-
-static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- static u64 last_txok_cnt;
- static u64 last_rxok_cnt;
- u64 cur_txok_cnt;
- u64 cur_rxok_cnt;
- u32 edca_be_ul = 0x5ea42b;
- u32 edca_be_dl = 0x5ea42b;
-
- if (mac->opmode == NL80211_IFTYPE_ADHOC)
- goto dm_checkedcaturbo_exit;
-
- if (mac->link_state != MAC80211_LINKED) {
- rtlpriv->dm.bcurrent_turbo_edca = false;
- return;
- }
-
- if (!mac->ht_enable) { /*FIX MERGE */
- if (!(edca_be_ul & 0xffff0000))
- edca_be_ul |= 0x005e0000;
-
- if (!(edca_be_dl & 0xffff0000))
- edca_be_dl |= 0x005e0000;
- }
-
- if ((!rtlpriv->dm.bis_any_nonbepkts) &&
- (!rtlpriv->dm.b_disable_framebursting)) {
- cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt;
- cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;
- if (cur_rxok_cnt > 4 * cur_txok_cnt) {
- if (!rtlpriv->dm.bis_cur_rdlstate ||
- !rtlpriv->dm.bcurrent_turbo_edca) {
- rtl_write_dword(rtlpriv,
- REG_EDCA_BE_PARAM,
- edca_be_dl);
- rtlpriv->dm.bis_cur_rdlstate = true;
- }
- } else {
- if (rtlpriv->dm.bis_cur_rdlstate ||
- !rtlpriv->dm.bcurrent_turbo_edca) {
- rtl_write_dword(rtlpriv,
- REG_EDCA_BE_PARAM,
- edca_be_ul);
- rtlpriv->dm.bis_cur_rdlstate = false;
- }
- }
- rtlpriv->dm.bcurrent_turbo_edca = true;
- } else {
- if (rtlpriv->dm.bcurrent_turbo_edca) {
- u8 tmp = AC0_BE;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_AC_PARAM,
- (u8 *) (&tmp));
- rtlpriv->dm.bcurrent_turbo_edca = false;
- }
- }
-
-dm_checkedcaturbo_exit:
- rtlpriv->dm.bis_any_nonbepkts = false;
- last_txok_cnt = rtlpriv->stats.txbytesunicast;
- last_rxok_cnt = rtlpriv->stats.rxbytesunicast;
-}
-
-static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
- *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- u8 thermalvalue, delta, delta_lck, delta_iqk;
- long ele_a, ele_d, temp_cck, val_x, value32;
- long val_y, ele_c;
- u8 ofdm_index[2], cck_index, ofdm_index_old[2], cck_index_old;
- int i;
- bool is2t = IS_92C_SERIAL(rtlhal->version);
- u8 txpwr_level[2] = {0, 0};
- u8 ofdm_min_index = 6, rf;
-
- rtlpriv->dm.btxpower_trackingInit = true;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("rtl92c_dm_txpower_tracking_callback_thermalmeter\n"));
-
- thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0x1f);
-
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
- "eeprom_thermalmeter 0x%x\n",
- thermalvalue, rtlpriv->dm.thermalvalue,
- rtlefuse->eeprom_thermalmeter));
-
- rtl92c_phy_ap_calibrate(hw, (thermalvalue -
- rtlefuse->eeprom_thermalmeter));
- if (is2t)
- rf = 2;
- else
- rf = 1;
-
- if (thermalvalue) {
- ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
- MASKDWORD) & MASKOFDM_D;
-
- for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
- if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
- ofdm_index_old[0] = (u8) i;
-
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("Initial pathA ele_d reg0x%x = 0x%lx, "
- "ofdm_index=0x%x\n",
- ROFDM0_XATXIQIMBALANCE,
- ele_d, ofdm_index_old[0]));
- break;
- }
- }
-
- if (is2t) {
- ele_d = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
- MASKDWORD) & MASKOFDM_D;
-
- for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
- if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
- ofdm_index_old[1] = (u8) i;
-
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
- DBG_LOUD,
- ("Initial pathB ele_d reg0x%x = "
- "0x%lx, ofdm_index=0x%x\n",
- ROFDM0_XBTXIQIMBALANCE, ele_d,
- ofdm_index_old[1]));
- break;
- }
- }
- }
-
- temp_cck =
- rtl_get_bbreg(hw, RCCK0_TXFILTER2, MASKDWORD) & MASKCCK;
-
- for (i = 0; i < CCK_TABLE_LENGTH; i++) {
- if (rtlpriv->dm.b_cck_inch14) {
- if (memcmp((void *)&temp_cck,
- (void *)&cckswing_table_ch14[i][2],
- 4) == 0) {
- cck_index_old = (u8) i;
-
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
- DBG_LOUD,
- ("Initial reg0x%x = 0x%lx, "
- "cck_index=0x%x, ch 14 %d\n",
- RCCK0_TXFILTER2, temp_cck,
- cck_index_old,
- rtlpriv->dm.b_cck_inch14));
- break;
- }
- } else {
- if (memcmp((void *)&temp_cck,
- (void *)
- &cckswing_table_ch1ch13[i][2],
- 4) == 0) {
- cck_index_old = (u8) i;
-
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
- DBG_LOUD,
- ("Initial reg0x%x = 0x%lx, "
- "cck_index=0x%x, ch14 %d\n",
- RCCK0_TXFILTER2, temp_cck,
- cck_index_old,
- rtlpriv->dm.b_cck_inch14));
- break;
- }
- }
- }
-
- if (!rtlpriv->dm.thermalvalue) {
- rtlpriv->dm.thermalvalue =
- rtlefuse->eeprom_thermalmeter;
- rtlpriv->dm.thermalvalue_lck = thermalvalue;
- rtlpriv->dm.thermalvalue_iqk = thermalvalue;
- for (i = 0; i < rf; i++)
- rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
- rtlpriv->dm.cck_index = cck_index_old;
- }
-
- delta = (thermalvalue > rtlpriv->dm.thermalvalue) ?
- (thermalvalue - rtlpriv->dm.thermalvalue) :
- (rtlpriv->dm.thermalvalue - thermalvalue);
-
- delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ?
- (thermalvalue - rtlpriv->dm.thermalvalue_lck) :
- (rtlpriv->dm.thermalvalue_lck - thermalvalue);
-
- delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ?
- (thermalvalue - rtlpriv->dm.thermalvalue_iqk) :
- (rtlpriv->dm.thermalvalue_iqk - thermalvalue);
-
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
- "eeprom_thermalmeter 0x%x delta 0x%x "
- "delta_lck 0x%x delta_iqk 0x%x\n",
- thermalvalue, rtlpriv->dm.thermalvalue,
- rtlefuse->eeprom_thermalmeter, delta, delta_lck,
- delta_iqk));
-
- if (delta_lck > 1) {
- rtlpriv->dm.thermalvalue_lck = thermalvalue;
- rtl92c_phy_lc_calibrate(hw);
- }
-
- if (delta > 0 && rtlpriv->dm.txpower_track_control) {
- if (thermalvalue > rtlpriv->dm.thermalvalue) {
- for (i = 0; i < rf; i++)
- rtlpriv->dm.ofdm_index[i] -= delta;
- rtlpriv->dm.cck_index -= delta;
- } else {
- for (i = 0; i < rf; i++)
- rtlpriv->dm.ofdm_index[i] += delta;
- rtlpriv->dm.cck_index += delta;
- }
-
- if (is2t) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("temp OFDM_A_index=0x%x, "
- "OFDM_B_index=0x%x,"
- "cck_index=0x%x\n",
- rtlpriv->dm.ofdm_index[0],
- rtlpriv->dm.ofdm_index[1],
- rtlpriv->dm.cck_index));
- } else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("temp OFDM_A_index=0x%x,"
- "cck_index=0x%x\n",
- rtlpriv->dm.ofdm_index[0],
- rtlpriv->dm.cck_index));
- }
-
- if (thermalvalue > rtlefuse->eeprom_thermalmeter) {
- for (i = 0; i < rf; i++)
- ofdm_index[i] =
- rtlpriv->dm.ofdm_index[i]
- + 1;
- cck_index = rtlpriv->dm.cck_index + 1;
- } else {
- for (i = 0; i < rf; i++)
- ofdm_index[i] =
- rtlpriv->dm.ofdm_index[i];
- cck_index = rtlpriv->dm.cck_index;
- }
-
- for (i = 0; i < rf; i++) {
- if (txpwr_level[i] >= 0 &&
- txpwr_level[i] <= 26) {
- if (thermalvalue >
- rtlefuse->eeprom_thermalmeter) {
- if (delta < 5)
- ofdm_index[i] -= 1;
-
- else
- ofdm_index[i] -= 2;
- } else if (delta > 5 && thermalvalue <
- rtlefuse->
- eeprom_thermalmeter) {
- ofdm_index[i] += 1;
- }
- } else if (txpwr_level[i] >= 27 &&
- txpwr_level[i] <= 32
- && thermalvalue >
- rtlefuse->eeprom_thermalmeter) {
- if (delta < 5)
- ofdm_index[i] -= 1;
-
- else
- ofdm_index[i] -= 2;
- } else if (txpwr_level[i] >= 32 &&
- txpwr_level[i] <= 38 &&
- thermalvalue >
- rtlefuse->eeprom_thermalmeter
- && delta > 5) {
- ofdm_index[i] -= 1;
- }
- }
-
- if (txpwr_level[i] >= 0 && txpwr_level[i] <= 26) {
- if (thermalvalue >
- rtlefuse->eeprom_thermalmeter) {
- if (delta < 5)
- cck_index -= 1;
-
- else
- cck_index -= 2;
- } else if (delta > 5 && thermalvalue <
- rtlefuse->eeprom_thermalmeter) {
- cck_index += 1;
- }
- } else if (txpwr_level[i] >= 27 &&
- txpwr_level[i] <= 32 &&
- thermalvalue >
- rtlefuse->eeprom_thermalmeter) {
- if (delta < 5)
- cck_index -= 1;
-
- else
- cck_index -= 2;
- } else if (txpwr_level[i] >= 32 &&
- txpwr_level[i] <= 38 &&
- thermalvalue > rtlefuse->eeprom_thermalmeter
- && delta > 5) {
- cck_index -= 1;
- }
-
- for (i = 0; i < rf; i++) {
- if (ofdm_index[i] > OFDM_TABLE_SIZE - 1)
- ofdm_index[i] = OFDM_TABLE_SIZE - 1;
-
- else if (ofdm_index[i] < ofdm_min_index)
- ofdm_index[i] = ofdm_min_index;
- }
-
- if (cck_index > CCK_TABLE_SIZE - 1)
- cck_index = CCK_TABLE_SIZE - 1;
- else if (cck_index < 0)
- cck_index = 0;
-
- if (is2t) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("new OFDM_A_index=0x%x, "
- "OFDM_B_index=0x%x,"
- "cck_index=0x%x\n",
- ofdm_index[0], ofdm_index[1],
- cck_index));
- } else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("new OFDM_A_index=0x%x,"
- "cck_index=0x%x\n",
- ofdm_index[0], cck_index));
- }
- }
-
- if (rtlpriv->dm.txpower_track_control && delta != 0) {
- ele_d =
- (ofdmswing_table[ofdm_index[0]] & 0xFFC00000) >> 22;
- val_x = rtlphy->reg_e94;
- val_y = rtlphy->reg_e9c;
-
- if (val_x != 0) {
- if ((val_x & 0x00000200) != 0)
- val_x = val_x | 0xFFFFFC00;
- ele_a = ((val_x * ele_d) >> 8) & 0x000003FF;
-
- if ((val_y & 0x00000200) != 0)
- val_y = val_y | 0xFFFFFC00;
- ele_c = ((val_y * ele_d) >> 8) & 0x000003FF;
-
- value32 = (ele_d << 22) |
- ((ele_c & 0x3F) << 16) | ele_a;
-
- rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
- MASKDWORD, value32);
-
- value32 = (ele_c & 0x000003C0) >> 6;
- rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
- value32);
-
- value32 = ((val_x * ele_d) >> 7) & 0x01;
- rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
- BIT(31), value32);
-
- value32 = ((val_y * ele_d) >> 7) & 0x01;
- rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
- BIT(29), value32);
- } else {
- rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
- MASKDWORD,
- ofdmswing_table[ofdm_index[0]]);
-
- rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
- 0x00);
- rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
- BIT(31) | BIT(29), 0x00);
- }
-
- if (!rtlpriv->dm.b_cck_inch14) {
- rtl_write_byte(rtlpriv, 0xa22,
- cckswing_table_ch1ch13[cck_index]
- [0]);
- rtl_write_byte(rtlpriv, 0xa23,
- cckswing_table_ch1ch13[cck_index]
- [1]);
- rtl_write_byte(rtlpriv, 0xa24,
- cckswing_table_ch1ch13[cck_index]
- [2]);
- rtl_write_byte(rtlpriv, 0xa25,
- cckswing_table_ch1ch13[cck_index]
- [3]);
- rtl_write_byte(rtlpriv, 0xa26,
- cckswing_table_ch1ch13[cck_index]
- [4]);
- rtl_write_byte(rtlpriv, 0xa27,
- cckswing_table_ch1ch13[cck_index]
- [5]);
- rtl_write_byte(rtlpriv, 0xa28,
- cckswing_table_ch1ch13[cck_index]
- [6]);
- rtl_write_byte(rtlpriv, 0xa29,
- cckswing_table_ch1ch13[cck_index]
- [7]);
- } else {
- rtl_write_byte(rtlpriv, 0xa22,
- cckswing_table_ch14[cck_index]
- [0]);
- rtl_write_byte(rtlpriv, 0xa23,
- cckswing_table_ch14[cck_index]
- [1]);
- rtl_write_byte(rtlpriv, 0xa24,
- cckswing_table_ch14[cck_index]
- [2]);
- rtl_write_byte(rtlpriv, 0xa25,
- cckswing_table_ch14[cck_index]
- [3]);
- rtl_write_byte(rtlpriv, 0xa26,
- cckswing_table_ch14[cck_index]
- [4]);
- rtl_write_byte(rtlpriv, 0xa27,
- cckswing_table_ch14[cck_index]
- [5]);
- rtl_write_byte(rtlpriv, 0xa28,
- cckswing_table_ch14[cck_index]
- [6]);
- rtl_write_byte(rtlpriv, 0xa29,
- cckswing_table_ch14[cck_index]
- [7]);
- }
-
- if (is2t) {
- ele_d = (ofdmswing_table[ofdm_index[1]] &
- 0xFFC00000) >> 22;
-
- val_x = rtlphy->reg_eb4;
- val_y = rtlphy->reg_ebc;
-
- if (val_x != 0) {
- if ((val_x & 0x00000200) != 0)
- val_x = val_x | 0xFFFFFC00;
- ele_a = ((val_x * ele_d) >> 8) &
- 0x000003FF;
-
- if ((val_y & 0x00000200) != 0)
- val_y = val_y | 0xFFFFFC00;
- ele_c = ((val_y * ele_d) >> 8) &
- 0x00003FF;
-
- value32 = (ele_d << 22) |
- ((ele_c & 0x3F) << 16) | ele_a;
- rtl_set_bbreg(hw,
- ROFDM0_XBTXIQIMBALANCE,
- MASKDWORD, value32);
-
- value32 = (ele_c & 0x000003C0) >> 6;
- rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
- MASKH4BITS, value32);
-
- value32 = ((val_x * ele_d) >> 7) & 0x01;
- rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
- BIT(27), value32);
-
- value32 = ((val_y * ele_d) >> 7) & 0x01;
- rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
- BIT(25), value32);
- } else {
- rtl_set_bbreg(hw,
- ROFDM0_XBTXIQIMBALANCE,
- MASKDWORD,
- ofdmswing_table[ofdm_index
- [1]]);
- rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
- MASKH4BITS, 0x00);
- rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
- BIT(27) | BIT(25), 0x00);
- }
-
- }
- }
-
- if (delta_iqk > 3) {
- rtlpriv->dm.thermalvalue_iqk = thermalvalue;
- rtl92c_phy_iq_calibrate(hw, false);
- }
-
- if (rtlpriv->dm.txpower_track_control)
- rtlpriv->dm.thermalvalue = thermalvalue;
- }
-
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, ("<===\n"));
-
-}
-
-static void rtl92c_dm_initialize_txpower_tracking_thermalmeter(
- struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtlpriv->dm.btxpower_tracking = true;
- rtlpriv->dm.btxpower_trackingInit = false;
-
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("pMgntInfo->btxpower_tracking = %d\n",
- rtlpriv->dm.btxpower_tracking));
-}
-
-static void rtl92c_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
-{
- rtl92c_dm_initialize_txpower_tracking_thermalmeter(hw);
-}
-
-static void rtl92c_dm_txpower_tracking_directcall(struct ieee80211_hw *hw)
-{
- rtl92c_dm_txpower_tracking_callback_thermalmeter(hw);
-}
-
-static void rtl92c_dm_check_txpower_tracking_thermal_meter(
- struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- static u8 tm_trigger;
-
- if (!rtlpriv->dm.btxpower_tracking)
- return;
-
- if (!tm_trigger) {
- rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, RFREG_OFFSET_MASK,
- 0x60);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("Trigger 92S Thermal Meter!!\n"));
- tm_trigger = 1;
- return;
- } else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("Schedule TxPowerTracking direct call!!\n"));
- rtl92c_dm_txpower_tracking_directcall(hw);
- tm_trigger = 0;
- }
-}
-
-void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw)
-{
- rtl92c_dm_check_txpower_tracking_thermal_meter(hw);
-}
-
-void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rate_adaptive *p_ra = &(rtlpriv->ra);
-
- p_ra->ratr_state = DM_RATR_STA_INIT;
- p_ra->pre_ratr_state = DM_RATR_STA_INIT;
-
- if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)
- rtlpriv->dm.b_useramask = true;
- else
- rtlpriv->dm.b_useramask = false;
-
-}
-
-static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rate_adaptive *p_ra = &(rtlpriv->ra);
- u32 low_rssithresh_for_ra, high_rssithresh_for_ra;
-
- if (is_hal_stop(rtlhal)) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- ("<---- driver is going to unload\n"));
- return;
- }
-
- if (!rtlpriv->dm.b_useramask) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- ("<---- driver does not control rate adaptive mask\n"));
- return;
- }
-
- if (mac->link_state == MAC80211_LINKED) {
-
- switch (p_ra->pre_ratr_state) {
- case DM_RATR_STA_HIGH:
- high_rssithresh_for_ra = 50;
- low_rssithresh_for_ra = 20;
- break;
- case DM_RATR_STA_MIDDLE:
- high_rssithresh_for_ra = 55;
- low_rssithresh_for_ra = 20;
- break;
- case DM_RATR_STA_LOW:
- high_rssithresh_for_ra = 50;
- low_rssithresh_for_ra = 25;
- break;
- default:
- high_rssithresh_for_ra = 50;
- low_rssithresh_for_ra = 20;
- break;
- }
-
- if (rtlpriv->dm.undecorated_smoothed_pwdb >
- (long)high_rssithresh_for_ra)
- p_ra->ratr_state = DM_RATR_STA_HIGH;
- else if (rtlpriv->dm.undecorated_smoothed_pwdb >
- (long)low_rssithresh_for_ra)
- p_ra->ratr_state = DM_RATR_STA_MIDDLE;
- else
- p_ra->ratr_state = DM_RATR_STA_LOW;
-
- if (p_ra->pre_ratr_state != p_ra->ratr_state) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- ("RSSI = %ld\n",
- rtlpriv->dm.undecorated_smoothed_pwdb));
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- ("RSSI_LEVEL = %d\n", p_ra->ratr_state));
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- ("PreState = %d, CurState = %d\n",
- p_ra->pre_ratr_state, p_ra->ratr_state));
-
- rtlpriv->cfg->ops->update_rate_mask(hw,
- p_ra->ratr_state);
-
- p_ra->pre_ratr_state = p_ra->ratr_state;
- }
- }
-}
-
-static void rtl92c_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw)
-{
- dm_pstable.pre_ccastate = CCA_MAX;
- dm_pstable.cur_ccasate = CCA_MAX;
- dm_pstable.pre_rfstate = RF_MAX;
- dm_pstable.cur_rfstate = RF_MAX;
- dm_pstable.rssi_val_min = 0;
-}
-
-static void rtl92c_dm_1r_cca(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
-
- if (dm_pstable.rssi_val_min != 0) {
- if (dm_pstable.pre_ccastate == CCA_2R) {
- if (dm_pstable.rssi_val_min >= 35)
- dm_pstable.cur_ccasate = CCA_1R;
- else
- dm_pstable.cur_ccasate = CCA_2R;
- } else {
- if (dm_pstable.rssi_val_min <= 30)
- dm_pstable.cur_ccasate = CCA_2R;
- else
- dm_pstable.cur_ccasate = CCA_1R;
- }
- } else {
- dm_pstable.cur_ccasate = CCA_MAX;
- }
-
- if (dm_pstable.pre_ccastate != dm_pstable.cur_ccasate) {
- if (dm_pstable.cur_ccasate == CCA_1R) {
- if (get_rf_type(rtlphy) == RF_2T2R) {
- rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE,
- MASKBYTE0, 0x13);
- rtl_set_bbreg(hw, 0xe70, MASKBYTE3, 0x20);
- } else {
- rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE,
- MASKBYTE0, 0x23);
- rtl_set_bbreg(hw, 0xe70, 0x7fc00000, 0x10c);
- }
- } else {
- rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0,
- 0x33);
- rtl_set_bbreg(hw, 0xe70, MASKBYTE3, 0x63);
- }
- dm_pstable.pre_ccastate = dm_pstable.cur_ccasate;
- }
-
- RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, ("CCAStage = %s\n",
- (dm_pstable.cur_ccasate ==
- 0) ? "1RCCA" : "2RCCA"));
-}
-
-void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal)
-{
- static u8 initialize;
- static u32 reg_874, reg_c70, reg_85c, reg_a74;
-
- if (initialize == 0) {
- reg_874 = (rtl_get_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
- MASKDWORD) & 0x1CC000) >> 14;
-
- reg_c70 = (rtl_get_bbreg(hw, ROFDM0_AGCPARAMETER1,
- MASKDWORD) & BIT(3)) >> 3;
-
- reg_85c = (rtl_get_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
- MASKDWORD) & 0xFF000000) >> 24;
-
- reg_a74 = (rtl_get_bbreg(hw, 0xa74, MASKDWORD) & 0xF000) >> 12;
-
- initialize = 1;
- }
-
- if (!bforce_in_normal) {
- if (dm_pstable.rssi_val_min != 0) {
- if (dm_pstable.pre_rfstate == RF_NORMAL) {
- if (dm_pstable.rssi_val_min >= 30)
- dm_pstable.cur_rfstate = RF_SAVE;
- else
- dm_pstable.cur_rfstate = RF_NORMAL;
- } else {
- if (dm_pstable.rssi_val_min <= 25)
- dm_pstable.cur_rfstate = RF_NORMAL;
- else
- dm_pstable.cur_rfstate = RF_SAVE;
- }
- } else {
- dm_pstable.cur_rfstate = RF_MAX;
- }
- } else {
- dm_pstable.cur_rfstate = RF_NORMAL;
- }
-
- if (dm_pstable.pre_rfstate != dm_pstable.cur_rfstate) {
- if (dm_pstable.cur_rfstate == RF_SAVE) {
- rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
- 0x1C0000, 0x2);
- rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), 0);
- rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
- 0xFF000000, 0x63);
- rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
- 0xC000, 0x2);
- rtl_set_bbreg(hw, 0xa74, 0xF000, 0x3);
- rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
- rtl_set_bbreg(hw, 0x818, BIT(28), 0x1);
- } else {
- rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
- 0x1CC000, reg_874);
- rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3),
- reg_c70);
- rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, 0xFF000000,
- reg_85c);
- rtl_set_bbreg(hw, 0xa74, 0xF000, reg_a74);
- rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
- }
-
- dm_pstable.pre_rfstate = dm_pstable.cur_rfstate;
- }
-}
-
-static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-
- if (((mac->link_state == MAC80211_NOLINK)) &&
- (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
- dm_pstable.rssi_val_min = 0;
- RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
- ("Not connected to any\n"));
- }
-
- if (mac->link_state == MAC80211_LINKED) {
- if (mac->opmode == NL80211_IFTYPE_ADHOC) {
- dm_pstable.rssi_val_min =
- rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
- RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
- ("AP Client PWDB = 0x%lx\n",
- dm_pstable.rssi_val_min));
- } else {
- dm_pstable.rssi_val_min =
- rtlpriv->dm.undecorated_smoothed_pwdb;
- RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
- ("STA Default Port PWDB = 0x%lx\n",
- dm_pstable.rssi_val_min));
- }
- } else {
- dm_pstable.rssi_val_min =
- rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
-
- RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
- ("AP Ext Port PWDB = 0x%lx\n",
- dm_pstable.rssi_val_min));
- }
-
- if (IS_92C_SERIAL(rtlhal->version))
- rtl92c_dm_1r_cca(hw);
-}
-
-void rtl92c_dm_init(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
- rtl92c_dm_diginit(hw);
- rtl92c_dm_init_dynamic_txpower(hw);
- rtl92c_dm_init_edca_turbo(hw);
- rtl92c_dm_init_rate_adaptive_mask(hw);
- rtl92c_dm_initialize_txpower_tracking(hw);
- rtl92c_dm_init_dynamic_bb_powersaving(hw);
-}
-
-void rtl92c_dm_watchdog(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- bool b_fw_current_inpsmode = false;
- bool b_fw_ps_awake = true;
-
- rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
- (u8 *) (&b_fw_current_inpsmode));
- rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON,
- (u8 *) (&b_fw_ps_awake));
-
- if ((ppsc->rfpwr_state == ERFON) && ((!b_fw_current_inpsmode) &&
- b_fw_ps_awake)
- && (!ppsc->rfchange_inprogress)) {
- rtl92c_dm_pwdb_monitor(hw);
- rtl92c_dm_dig(hw);
- rtl92c_dm_false_alarm_counter_statistics(hw);
- rtl92c_dm_dynamic_bb_powersaving(hw);
- rtl92c_dm_dynamic_txpower(hw);
- rtl92c_dm_check_txpower_tracking(hw);
- rtl92c_dm_refresh_rate_adaptive_mask(hw);
- rtl92c_dm_check_edca_turbo(hw);
- }
-}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
index 463439e4074c..36302ebae4a3 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
@@ -192,5 +192,6 @@ void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw);
void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw);
void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal);
+void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index 1c41a0c93506..05477f465a75 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -37,7 +37,6 @@
#include "def.h"
#include "phy.h"
#include "dm.h"
-#include "fw.h"
#include "led.h"
#include "hw.h"
@@ -124,7 +123,7 @@ void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_FW_PSMODE_STATUS:
- *((bool *) (val)) = ppsc->b_fw_current_inpsmode;
+ *((bool *) (val)) = ppsc->fw_current_inpsmode;
break;
case HW_VAR_CORRECT_TSF:{
u64 tsf;
@@ -173,15 +172,15 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_BASIC_RATE:{
- u16 b_rate_cfg = ((u16 *) val)[0];
+ u16 rate_cfg = ((u16 *) val)[0];
u8 rate_index = 0;
- b_rate_cfg = b_rate_cfg & 0x15f;
- b_rate_cfg |= 0x01;
- rtl_write_byte(rtlpriv, REG_RRSR, b_rate_cfg & 0xff);
+ rate_cfg &= 0x15f;
+ rate_cfg |= 0x01;
+ rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff);
rtl_write_byte(rtlpriv, REG_RRSR + 1,
- (b_rate_cfg >> 8)&0xff);
- while (b_rate_cfg > 0x1) {
- b_rate_cfg = (b_rate_cfg >> 1);
+ (rate_cfg >> 8)&0xff);
+ while (rate_cfg > 0x1) {
+ rate_cfg = (rate_cfg >> 1);
rate_index++;
}
rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL,
@@ -318,15 +317,17 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
case HW_VAR_AC_PARAM:{
u8 e_aci = *((u8 *) val);
- u32 u4b_ac_param = 0;
+ u32 u4b_ac_param;
+ u16 cw_min = le16_to_cpu(mac->ac[e_aci].cw_min);
+ u16 cw_max = le16_to_cpu(mac->ac[e_aci].cw_max);
+ u16 tx_op = le16_to_cpu(mac->ac[e_aci].tx_op);
- u4b_ac_param |= (u32) mac->ac[e_aci].aifs;
- u4b_ac_param |= ((u32) mac->ac[e_aci].cw_min
+ u4b_ac_param = (u32) mac->ac[e_aci].aifs;
+ u4b_ac_param |= ((u32)cw_min
& 0xF) << AC_PARAM_ECW_MIN_OFFSET;
- u4b_ac_param |= ((u32) mac->ac[e_aci].cw_max &
+ u4b_ac_param |= ((u32)cw_max &
0xF) << AC_PARAM_ECW_MAX_OFFSET;
- u4b_ac_param |= (u32) mac->ac[e_aci].tx_op
- << AC_PARAM_TXOP_LIMIT_OFFSET;
+ u4b_ac_param |= (u32)tx_op << AC_PARAM_TXOP_OFFSET;
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
("queue:%x, ac_param:%x\n", e_aci,
@@ -469,12 +470,12 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_FW_PSMODE_STATUS:
- ppsc->b_fw_current_inpsmode = *((bool *) val);
+ ppsc->fw_current_inpsmode = *((bool *) val);
break;
case HW_VAR_H2C_FW_JOINBSSRPT:{
u8 mstatus = (*(u8 *) val);
u8 tmp_regcr, tmp_reg422;
- bool b_recover = false;
+ bool recover = false;
if (mstatus == RT_MEDIA_CONNECT) {
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AID,
@@ -491,7 +492,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtl_read_byte(rtlpriv,
REG_FWHW_TXQ_CTRL + 2);
if (tmp_reg422 & BIT(6))
- b_recover = true;
+ recover = true;
rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
tmp_reg422 & (~BIT(6)));
@@ -500,7 +501,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
_rtl92ce_set_bcn_ctrl_reg(hw, BIT(3), 0);
_rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(4));
- if (b_recover) {
+ if (recover) {
rtl_write_byte(rtlpriv,
REG_FWHW_TXQ_CTRL + 2,
tmp_reg422);
@@ -868,7 +869,7 @@ static void _rtl92ce_enable_aspm_back_door(struct ieee80211_hw *hw)
rtl_write_word(rtlpriv, 0x350, 0x870c);
rtl_write_byte(rtlpriv, 0x352, 0x1);
- if (ppsc->b_support_backdoor)
+ if (ppsc->support_backdoor)
rtl_write_byte(rtlpriv, 0x349, 0x1b);
else
rtl_write_byte(rtlpriv, 0x349, 0x03);
@@ -940,15 +941,15 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
("Failed to download FW. Init HW "
"without FW now..\n"));
err = 1;
- rtlhal->bfw_ready = false;
+ rtlhal->fw_ready = false;
return err;
} else {
- rtlhal->bfw_ready = true;
+ rtlhal->fw_ready = true;
}
rtlhal->last_hmeboxnum = 0;
- rtl92c_phy_mac_config(hw);
- rtl92c_phy_bb_config(hw);
+ rtl92ce_phy_mac_config(hw);
+ rtl92ce_phy_bb_config(hw);
rtlphy->rf_mode = RF_OP_BY_SW_3WIRE;
rtl92c_phy_rf_config(hw);
rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0,
@@ -1170,21 +1171,20 @@ void rtl92ce_set_qos(struct ieee80211_hw *hw, int aci)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-
u32 u4b_ac_param;
+ u16 cw_min = le16_to_cpu(mac->ac[aci].cw_min);
+ u16 cw_max = le16_to_cpu(mac->ac[aci].cw_max);
+ u16 tx_op = le16_to_cpu(mac->ac[aci].tx_op);
rtl92c_dm_init_edca_turbo(hw);
-
u4b_ac_param = (u32) mac->ac[aci].aifs;
- u4b_ac_param |=
- ((u32) mac->ac[aci].cw_min & 0xF) << AC_PARAM_ECW_MIN_OFFSET;
- u4b_ac_param |=
- ((u32) mac->ac[aci].cw_max & 0xF) << AC_PARAM_ECW_MAX_OFFSET;
- u4b_ac_param |= (u32) mac->ac[aci].tx_op << AC_PARAM_TXOP_LIMIT_OFFSET;
+ u4b_ac_param |= (u32) ((cw_min & 0xF) << AC_PARAM_ECW_MIN_OFFSET);
+ u4b_ac_param |= (u32) ((cw_max & 0xF) << AC_PARAM_ECW_MAX_OFFSET);
+ u4b_ac_param |= (u32) (tx_op << AC_PARAM_TXOP_OFFSET);
RT_TRACE(rtlpriv, COMP_QOS, DBG_DMESG,
("queue:%x, ac_param:%x aifs:%x cwmin:%x cwmax:%x txop:%x\n",
- aci, u4b_ac_param, mac->ac[aci].aifs, mac->ac[aci].cw_min,
- mac->ac[aci].cw_max, mac->ac[aci].tx_op));
+ aci, u4b_ac_param, mac->ac[aci].aifs, cw_min,
+ cw_max, tx_op));
switch (aci) {
case AC1_BK:
rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, u4b_ac_param);
@@ -1237,7 +1237,7 @@ static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE0);
- if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) && rtlhal->bfw_ready)
+ if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) && rtlhal->fw_ready)
rtl92c_firmware_selfreset(hw);
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, 0x51);
rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
@@ -1335,19 +1335,6 @@ void rtl92ce_update_interrupt_mask(struct ieee80211_hw *hw,
rtl92ce_enable_interrupt(hw);
}
-static u8 _rtl92c_get_chnl_group(u8 chnl)
-{
- u8 group;
-
- if (chnl < 3)
- group = 0;
- else if (chnl < 9)
- group = 1;
- else
- group = 2;
- return group;
-}
-
static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
bool autoload_fail,
u8 *hwinfo)
@@ -1568,7 +1555,7 @@ static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
rtlefuse->eeprom_thermalmeter = (tempval & 0x1f);
if (rtlefuse->eeprom_thermalmeter == 0x1f || autoload_fail)
- rtlefuse->b_apk_thermalmeterignore = true;
+ rtlefuse->apk_thermalmeterignore = true;
rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
@@ -1625,7 +1612,7 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
- rtlefuse->b_txpwr_fromeprom = true;
+ rtlefuse->txpwr_fromeprom = true;
rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
@@ -1668,7 +1655,7 @@ static void _rtl92ce_hal_customized_behavior(struct ieee80211_hw *hw)
switch (rtlhal->oem_id) {
case RT_CID_819x_HP:
- pcipriv->ledctl.bled_opendrain = true;
+ pcipriv->ledctl.led_opendrain = true;
break;
case RT_CID_819x_Lenovo:
case RT_CID_DEFAULT:
@@ -1693,10 +1680,10 @@ void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw)
rtlhal->version = _rtl92ce_read_chip_version(hw);
if (get_rf_type(rtlphy) == RF_1T1R)
- rtlpriv->dm.brfpath_rxenable[0] = true;
+ rtlpriv->dm.rfpath_rxenable[0] = true;
else
- rtlpriv->dm.brfpath_rxenable[0] =
- rtlpriv->dm.brfpath_rxenable[1] = true;
+ rtlpriv->dm.rfpath_rxenable[0] =
+ rtlpriv->dm.rfpath_rxenable[1] = true;
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("VersionID = 0x%4x\n",
rtlhal->version));
tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
@@ -1725,18 +1712,18 @@ void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
u32 ratr_value = (u32) mac->basic_rates;
- u8 *p_mcsrate = mac->mcs;
+ u8 *mcsrate = mac->mcs;
u8 ratr_index = 0;
- u8 b_nmode = mac->ht_enable;
+ u8 nmode = mac->ht_enable;
u8 mimo_ps = 1;
u16 shortgi_rate;
u32 tmp_ratr_value;
- u8 b_curtxbw_40mhz = mac->bw_40;
- u8 b_curshortgi_40mhz = mac->sgi_40;
- u8 b_curshortgi_20mhz = mac->sgi_20;
+ u8 curtxbw_40mhz = mac->bw_40;
+ u8 curshortgi_40mhz = mac->sgi_40;
+ u8 curshortgi_20mhz = mac->sgi_20;
enum wireless_mode wirelessmode = mac->mode;
- ratr_value |= EF2BYTE((*(u16 *) (p_mcsrate))) << 12;
+ ratr_value |= ((*(u16 *) (mcsrate))) << 12;
switch (wirelessmode) {
case WIRELESS_MODE_B:
@@ -1750,7 +1737,7 @@ void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw)
break;
case WIRELESS_MODE_N_24G:
case WIRELESS_MODE_N_5G:
- b_nmode = 1;
+ nmode = 1;
if (mimo_ps == 0) {
ratr_value &= 0x0007F005;
} else {
@@ -1776,9 +1763,8 @@ void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw)
ratr_value &= 0x0FFFFFFF;
- if (b_nmode && ((b_curtxbw_40mhz &&
- b_curshortgi_40mhz) || (!b_curtxbw_40mhz &&
- b_curshortgi_20mhz))) {
+ if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) || (!curtxbw_40mhz &&
+ curshortgi_20mhz))) {
ratr_value |= 0x10000000;
tmp_ratr_value = (ratr_value >> 12);
@@ -1806,11 +1792,11 @@ void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
u32 ratr_bitmap = (u32) mac->basic_rates;
u8 *p_mcsrate = mac->mcs;
u8 ratr_index;
- u8 b_curtxbw_40mhz = mac->bw_40;
- u8 b_curshortgi_40mhz = mac->sgi_40;
- u8 b_curshortgi_20mhz = mac->sgi_20;
+ u8 curtxbw_40mhz = mac->bw_40;
+ u8 curshortgi_40mhz = mac->sgi_40;
+ u8 curshortgi_20mhz = mac->sgi_20;
enum wireless_mode wirelessmode = mac->mode;
- bool b_shortgi = false;
+ bool shortgi = false;
u8 rate_mask[5];
u8 macid = 0;
u8 mimops = 1;
@@ -1852,7 +1838,7 @@ void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
} else {
if (rtlphy->rf_type == RF_1T2R ||
rtlphy->rf_type == RF_1T1R) {
- if (b_curtxbw_40mhz) {
+ if (curtxbw_40mhz) {
if (rssi_level == 1)
ratr_bitmap &= 0x000f0000;
else if (rssi_level == 2)
@@ -1868,7 +1854,7 @@ void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
ratr_bitmap &= 0x000ff005;
}
} else {
- if (b_curtxbw_40mhz) {
+ if (curtxbw_40mhz) {
if (rssi_level == 1)
ratr_bitmap &= 0x0f0f0000;
else if (rssi_level == 2)
@@ -1886,13 +1872,13 @@ void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
}
}
- if ((b_curtxbw_40mhz && b_curshortgi_40mhz) ||
- (!b_curtxbw_40mhz && b_curshortgi_20mhz)) {
+ if ((curtxbw_40mhz && curshortgi_40mhz) ||
+ (!curtxbw_40mhz && curshortgi_20mhz)) {
if (macid == 0)
- b_shortgi = true;
+ shortgi = true;
else if (macid == 1)
- b_shortgi = false;
+ shortgi = false;
}
break;
default:
@@ -1906,9 +1892,9 @@ void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
}
RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
("ratr_bitmap :%x\n", ratr_bitmap));
- *(u32 *)&rate_mask = EF4BYTE((ratr_bitmap & 0x0fffffff) |
- (ratr_index << 28));
- rate_mask[4] = macid | (b_shortgi ? 0x20 : 0x00) | 0x80;
+ *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) |
+ (ratr_index << 28);
+ rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("Rate_index:%x, "
"ratr_val:%x, %x:%x:%x:%x:%x\n",
ratr_index, ratr_bitmap,
@@ -1940,13 +1926,13 @@ bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
u8 u1tmp;
- bool b_actuallyset = false;
+ bool actuallyset = false;
unsigned long flag;
if ((rtlpci->up_first_time == 1) || (rtlpci->being_init_adapter))
return false;
- if (ppsc->b_swrf_processing)
+ if (ppsc->swrf_processing)
return false;
spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
@@ -1972,24 +1958,24 @@ bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL);
e_rfpowerstate_toset = (u1tmp & BIT(3)) ? ERFON : ERFOFF;
- if ((ppsc->b_hwradiooff == true) && (e_rfpowerstate_toset == ERFON)) {
+ if ((ppsc->hwradiooff == true) && (e_rfpowerstate_toset == ERFON)) {
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
("GPIOChangeRF - HW Radio ON, RF ON\n"));
e_rfpowerstate_toset = ERFON;
- ppsc->b_hwradiooff = false;
- b_actuallyset = true;
- } else if ((ppsc->b_hwradiooff == false)
+ ppsc->hwradiooff = false;
+ actuallyset = true;
+ } else if ((ppsc->hwradiooff == false)
&& (e_rfpowerstate_toset == ERFOFF)) {
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
("GPIOChangeRF - HW Radio OFF, RF OFF\n"));
e_rfpowerstate_toset = ERFOFF;
- ppsc->b_hwradiooff = true;
- b_actuallyset = true;
+ ppsc->hwradiooff = true;
+ actuallyset = true;
}
- if (b_actuallyset) {
+ if (actuallyset) {
if (e_rfpowerstate_toset == ERFON) {
if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM)) {
@@ -2028,7 +2014,7 @@ bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
}
*valid = 1;
- return !ppsc->b_hwradiooff;
+ return !ppsc->hwradiooff;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
index 305c819c8c78..a3dfdb635168 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
@@ -30,6 +30,8 @@
#ifndef __RTL92CE_HW_H__
#define __RTL92CE_HW_H__
+#define H2C_RA_MASK 6
+
void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw);
void rtl92ce_interrupt_recognized(struct ieee80211_hw *hw,
@@ -53,5 +55,14 @@ void rtl92ce_enable_hw_security_config(struct ieee80211_hw *hw);
void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
u8 *p_macaddr, bool is_group, u8 enc_algo,
bool is_wepkey, bool clear_all);
+bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
+void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
+void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
+void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
+int rtl92c_download_fw(struct ieee80211_hw *hw);
+void rtl92c_firmware_selfreset(struct ieee80211_hw *hw);
+void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
+ u8 element_id, u32 cmd_len, u8 *p_cmdbuffer);
+bool rtl92ce_phy_mac_config(struct ieee80211_hw *hw);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/led.c b/drivers/net/wireless/rtlwifi/rtl8192ce/led.c
index 78a0569208ea..7b1da8d7508f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/led.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/led.c
@@ -57,7 +57,7 @@ void rtl92ce_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
("switch case not process\n"));
break;
}
- pled->b_ledon = true;
+ pled->ledon = true;
}
void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
@@ -76,7 +76,7 @@ void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
case LED_PIN_LED0:
ledcfg &= 0xf0;
- if (pcipriv->ledctl.bled_opendrain == true)
+ if (pcipriv->ledctl.led_opendrain == true)
rtl_write_byte(rtlpriv, REG_LEDCFG2,
(ledcfg | BIT(1) | BIT(5) | BIT(6)));
else
@@ -92,7 +92,7 @@ void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
("switch case not process\n"));
break;
}
- pled->b_ledon = false;
+ pled->ledon = false;
}
void rtl92ce_init_sw_leds(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
index 45044117139a..d0541e8c6012 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
@@ -32,89 +32,13 @@
#include "../ps.h"
#include "reg.h"
#include "def.h"
+#include "hw.h"
#include "phy.h"
#include "rf.h"
#include "dm.h"
#include "table.h"
-static u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 offset);
-static void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 offset,
- u32 data);
-static u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 offset);
-static void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 offset,
- u32 data);
-static u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
-static bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw);
-static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
-static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
- u8 configtype);
-static bool _rtl92c_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
- u8 configtype);
-static void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
-static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
- u32 cmdtableidx, u32 cmdtablesz,
- enum swchnlcmd_id cmdid, u32 para1,
- u32 para2, u32 msdelay);
-static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
- u8 channel, u8 *stage, u8 *step,
- u32 *delay);
-static u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
- enum wireless_mode wirelessmode,
- long power_indbm);
-static bool _rtl92c_phy_config_rf_external_pa(struct ieee80211_hw *hw,
- enum radio_path rfpath);
-static long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
- enum wireless_mode wirelessmode,
- u8 txpwridx);
-u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 returnvalue, originalvalue, bitshift;
-
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
- "bitmask(%#x)\n", regaddr,
- bitmask));
- originalvalue = rtl_read_dword(rtlpriv, regaddr);
- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
- returnvalue = (originalvalue & bitmask) >> bitshift;
-
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("BBR MASK=0x%x "
- "Addr[0x%x]=0x%x\n", bitmask,
- regaddr, originalvalue));
-
- return returnvalue;
-
-}
-
-void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask, u32 data)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 originalvalue, bitshift;
-
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
- " data(%#x)\n", regaddr, bitmask,
- data));
-
- if (bitmask != MASKDWORD) {
- originalvalue = rtl_read_dword(rtlpriv, regaddr);
- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
- data = ((originalvalue & (~bitmask)) | (data << bitshift));
- }
-
- rtl_write_dword(rtlpriv, regaddr, data);
-
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
- " data(%#x)\n", regaddr, bitmask,
- data));
-
-}
-
-u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
+u32 rtl92ce_phy_query_rf_reg(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 regaddr, u32 bitmask)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -149,7 +73,7 @@ u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
return readback_value;
}
-void rtl92c_phy_set_rf_reg(struct ieee80211_hw *hw,
+void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
enum radio_path rfpath,
u32 regaddr, u32 bitmask, u32 data)
{
@@ -197,137 +121,25 @@ void rtl92c_phy_set_rf_reg(struct ieee80211_hw *hw,
bitmask, data, rfpath));
}
-static u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 offset)
-{
- RT_ASSERT(false, ("deprecated!\n"));
- return 0;
-}
-
-static void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 offset,
- u32 data)
-{
- RT_ASSERT(false, ("deprecated!\n"));
-}
-
-static u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 offset)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
- u32 newoffset;
- u32 tmplong, tmplong2;
- u8 rfpi_enable = 0;
- u32 retvalue;
-
- offset &= 0x3f;
- newoffset = offset;
- if (RT_CANNOT_IO(hw)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("return all one\n"));
- return 0xFFFFFFFF;
- }
- tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
- if (rfpath == RF90_PATH_A)
- tmplong2 = tmplong;
- else
- tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
- tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
- (newoffset << 23) | BLSSIREADEDGE;
- rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
- tmplong & (~BLSSIREADEDGE));
- mdelay(1);
- rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
- mdelay(1);
- rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
- tmplong | BLSSIREADEDGE);
- mdelay(1);
- if (rfpath == RF90_PATH_A)
- rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
- BIT(8));
- else if (rfpath == RF90_PATH_B)
- rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
- BIT(8));
- if (rfpi_enable)
- retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readbackpi,
- BLSSIREADBACKDATA);
- else
- retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback,
- BLSSIREADBACKDATA);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFR-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rflssi_readback,
- retvalue));
- return retvalue;
-}
-
-static void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 offset,
- u32 data)
-{
- u32 data_and_addr;
- u32 newoffset;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
-
- if (RT_CANNOT_IO(hw)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("stop\n"));
- return;
- }
- offset &= 0x3f;
- newoffset = offset;
- data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
- rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFW-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rf3wire_offset,
- data_and_addr));
-}
-
-static u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask)
-{
- u32 i;
-
- for (i = 0; i <= 31; i++) {
- if (((bitmask >> i) & 0x1) == 1)
- break;
- }
- return i;
-}
-
-static void _rtl92c_phy_bb_config_1t(struct ieee80211_hw *hw)
-{
- rtl_set_bbreg(hw, RFPGA0_TXINFO, 0x3, 0x2);
- rtl_set_bbreg(hw, RFPGA1_TXINFO, 0x300033, 0x200022);
- rtl_set_bbreg(hw, RCCK0_AFESETTING, MASKBYTE3, 0x45);
- rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x23);
- rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, 0x30, 0x1);
- rtl_set_bbreg(hw, 0xe74, 0x0c000000, 0x2);
- rtl_set_bbreg(hw, 0xe78, 0x0c000000, 0x2);
- rtl_set_bbreg(hw, 0xe7c, 0x0c000000, 0x2);
- rtl_set_bbreg(hw, 0xe80, 0x0c000000, 0x2);
- rtl_set_bbreg(hw, 0xe88, 0x0c000000, 0x2);
-}
-
-bool rtl92c_phy_mac_config(struct ieee80211_hw *hw)
+bool rtl92ce_phy_mac_config(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
bool is92c = IS_92C_SERIAL(rtlhal->version);
- bool rtstatus = _rtl92c_phy_config_mac_with_headerfile(hw);
+ bool rtstatus = _rtl92ce_phy_config_mac_with_headerfile(hw);
if (is92c)
rtl_write_byte(rtlpriv, 0x14, 0x71);
return rtstatus;
}
-bool rtl92c_phy_bb_config(struct ieee80211_hw *hw)
+bool rtl92ce_phy_bb_config(struct ieee80211_hw *hw)
{
bool rtstatus = true;
struct rtl_priv *rtlpriv = rtl_priv(hw);
u16 regval;
u32 regvaldw;
- u8 b_reg_hwparafile = 1;
+ u8 reg_hwparafile = 1;
_rtl92c_phy_init_bb_rf_register_definition(hw);
regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
@@ -342,56 +154,12 @@ bool rtl92c_phy_bb_config(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80);
regvaldw = rtl_read_dword(rtlpriv, REG_LEDCFG0);
rtl_write_dword(rtlpriv, REG_LEDCFG0, regvaldw | BIT(23));
- if (b_reg_hwparafile == 1)
+ if (reg_hwparafile == 1)
rtstatus = _rtl92c_phy_bb8192c_config_parafile(hw);
return rtstatus;
}
-bool rtl92c_phy_rf_config(struct ieee80211_hw *hw)
-{
- return rtl92c_phy_rf6052_config(hw);
-}
-
-static bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- bool rtstatus;
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("==>\n"));
- rtstatus = _rtl92c_phy_config_bb_with_headerfile(hw,
- BASEBAND_CONFIG_PHY_REG);
- if (rtstatus != true) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Write BB Reg Fail!!"));
- return false;
- }
- if (rtlphy->rf_type == RF_1T2R) {
- _rtl92c_phy_bb_config_1t(hw);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Config to 1T!!\n"));
- }
- if (rtlefuse->autoload_failflag == false) {
- rtlphy->pwrgroup_cnt = 0;
- rtstatus = _rtl92c_phy_config_bb_with_pgheaderfile(hw,
- BASEBAND_CONFIG_PHY_REG);
- }
- if (rtstatus != true) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("BB_PG Reg Fail!!"));
- return false;
- }
- rtstatus = _rtl92c_phy_config_bb_with_headerfile(hw,
- BASEBAND_CONFIG_AGC_TAB);
- if (rtstatus != true) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("AGC Table Fail\n"));
- return false;
- }
- rtlphy->bcck_high_power = (bool) (rtl_get_bbreg(hw,
- RFPGA0_XA_HSSIPARAMETER2,
- 0x200));
- return true;
-}
-
-static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
+bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 i;
@@ -408,11 +176,7 @@ static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
return true;
}
-void rtl92c_phy_config_bb_external_pa(struct ieee80211_hw *hw)
-{
-}
-
-static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+bool _rtl92ce_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
u8 configtype)
{
int i;
@@ -456,7 +220,6 @@ static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
phy_regarray_table[i],
phy_regarray_table[i + 1]));
}
- rtl92c_phy_config_bb_external_pa(hw);
} else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
for (i = 0; i < agctab_arraylen; i = i + 2) {
rtl_set_bbreg(hw, agctab_array_table[i], MASKDWORD,
@@ -472,175 +235,7 @@ static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
return true;
}
-static void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask,
- u32 data)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
-
- if (regaddr == RTXAGC_A_RATE18_06) {
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][0] =
- data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][0]));
- }
- if (regaddr == RTXAGC_A_RATE54_24) {
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][1] =
- data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][1] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][1]));
- }
- if (regaddr == RTXAGC_A_CCK1_MCS32) {
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][6] =
- data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][6] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][6]));
- }
- if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00) {
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][7] =
- data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][7]));
- }
- if (regaddr == RTXAGC_A_MCS03_MCS00) {
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][2] =
- data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][2] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][2]));
- }
- if (regaddr == RTXAGC_A_MCS07_MCS04) {
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][3] =
- data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][3] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][3]));
- }
- if (regaddr == RTXAGC_A_MCS11_MCS08) {
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][4] =
- data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][4] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][4]));
- }
- if (regaddr == RTXAGC_A_MCS15_MCS12) {
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][5] =
- data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][5] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][5]));
- }
- if (regaddr == RTXAGC_B_RATE18_06) {
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][8] =
- data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][8] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][8]));
- }
- if (regaddr == RTXAGC_B_RATE54_24) {
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][9] =
- data;
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][9]));
- }
-
- if (regaddr == RTXAGC_B_CCK1_55_MCS32) {
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][14] =
- data;
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][14]));
- }
-
- if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff) {
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][15] =
- data;
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][15]));
- }
-
- if (regaddr == RTXAGC_B_MCS03_MCS00) {
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][10] =
- data;
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][10]));
- }
-
- if (regaddr == RTXAGC_B_MCS07_MCS04) {
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][11] =
- data;
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][11]));
- }
-
- if (regaddr == RTXAGC_B_MCS11_MCS08) {
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][12] =
- data;
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][12]));
- }
-
- if (regaddr == RTXAGC_B_MCS15_MCS12) {
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][13] =
- data;
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][13]));
-
- rtlphy->pwrgroup_cnt++;
- }
-}
-
-static bool _rtl92c_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
u8 configtype)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -679,13 +274,7 @@ static bool _rtl92c_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
return true;
}
-static bool _rtl92c_phy_config_rf_external_pa(struct ieee80211_hw *hw,
- enum radio_path rfpath)
-{
- return true;
-}
-
-bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+bool rtl92ce_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
enum radio_path rfpath)
{
@@ -740,7 +329,6 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
udelay(1);
}
}
- _rtl92c_phy_config_rf_external_pa(hw, rfpath);
break;
case RF90_PATH_B:
for (i = 0; i < radiob_arraylen; i = i + 2) {
@@ -776,346 +364,7 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
return true;
}
-void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
-
- rtlphy->default_initialgain[0] =
- (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0);
- rtlphy->default_initialgain[1] =
- (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0);
- rtlphy->default_initialgain[2] =
- (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0);
- rtlphy->default_initialgain[3] =
- (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Default initial gain (c50=0x%x, "
- "c58=0x%x, c60=0x%x, c68=0x%x\n",
- rtlphy->default_initialgain[0],
- rtlphy->default_initialgain[1],
- rtlphy->default_initialgain[2],
- rtlphy->default_initialgain[3]));
-
- rtlphy->framesync = (u8) rtl_get_bbreg(hw,
- ROFDM0_RXDETECTOR3, MASKBYTE0);
- rtlphy->framesync_c34 = rtl_get_bbreg(hw,
- ROFDM0_RXDETECTOR2, MASKDWORD);
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Default framesync (0x%x) = 0x%x\n",
- ROFDM0_RXDETECTOR3, rtlphy->framesync));
-}
-
-static void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
-
- rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW;
- rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW;
- rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW;
- rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB;
- rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB;
- rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB;
- rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE;
- rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE;
- rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE;
-
- rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset =
- RFPGA0_XA_LSSIPARAMETER;
- rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset =
- RFPGA0_XB_LSSIPARAMETER;
-
- rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = rFPGA0_XAB_RFPARAMETER;
- rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = rFPGA0_XAB_RFPARAMETER;
- rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = rFPGA0_XCD_RFPARAMETER;
- rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = rFPGA0_XCD_RFPARAMETER;
-
- rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE;
- rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE;
- rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE;
- rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1;
- rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2;
- rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfswitch_control =
- RFPGA0_XAB_SWITCHCONTROL;
- rtlphy->phyreg_def[RF90_PATH_B].rfswitch_control =
- RFPGA0_XAB_SWITCHCONTROL;
- rtlphy->phyreg_def[RF90_PATH_C].rfswitch_control =
- RFPGA0_XCD_SWITCHCONTROL;
- rtlphy->phyreg_def[RF90_PATH_D].rfswitch_control =
- RFPGA0_XCD_SWITCHCONTROL;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
- rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1;
- rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1;
- rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2;
- rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2;
- rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2;
- rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbalance =
- ROFDM0_XARXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbalance =
- ROFDM0_XBRXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbalance =
- ROFDM0_XCRXIQIMBANLANCE;
- rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbalance =
- ROFDM0_XDRXIQIMBALANCE;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
- rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE;
- rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE;
- rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
-
- rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbalance =
- ROFDM0_XATXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbalance =
- ROFDM0_XBTXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbalance =
- ROFDM0_XCTXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbalance =
- ROFDM0_XDTXIQIMBALANCE;
-
- rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
- rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE;
- rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE;
- rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
-
- rtlphy->phyreg_def[RF90_PATH_A].rflssi_readback =
- RFPGA0_XA_LSSIREADBACK;
- rtlphy->phyreg_def[RF90_PATH_B].rflssi_readback =
- RFPGA0_XB_LSSIREADBACK;
- rtlphy->phyreg_def[RF90_PATH_C].rflssi_readback =
- RFPGA0_XC_LSSIREADBACK;
- rtlphy->phyreg_def[RF90_PATH_D].rflssi_readback =
- RFPGA0_XD_LSSIREADBACK;
-
- rtlphy->phyreg_def[RF90_PATH_A].rflssi_readbackpi =
- TRANSCEIVEA_HSPI_READBACK;
- rtlphy->phyreg_def[RF90_PATH_B].rflssi_readbackpi =
- TRANSCEIVEB_HSPI_READBACK;
-
-}
-
-void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- u8 txpwr_level;
- long txpwr_dbm;
-
- txpwr_level = rtlphy->cur_cck_txpwridx;
- txpwr_dbm = _rtl92c_phy_txpwr_idx_to_dbm(hw,
- WIRELESS_MODE_B, txpwr_level);
- txpwr_level = rtlphy->cur_ofdm24g_txpwridx +
- rtlefuse->legacy_ht_txpowerdiff;
- if (_rtl92c_phy_txpwr_idx_to_dbm(hw,
- WIRELESS_MODE_G,
- txpwr_level) > txpwr_dbm)
- txpwr_dbm =
- _rtl92c_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G,
- txpwr_level);
- txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
- if (_rtl92c_phy_txpwr_idx_to_dbm(hw,
- WIRELESS_MODE_N_24G,
- txpwr_level) > txpwr_dbm)
- txpwr_dbm =
- _rtl92c_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
- txpwr_level);
- *powerlevel = txpwr_dbm;
-}
-
-static void _rtl92c_get_txpower_index(struct ieee80211_hw *hw, u8 channel,
- u8 *cckpowerlevel, u8 *ofdmpowerlevel)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- u8 index = (channel - 1);
-
- cckpowerlevel[RF90_PATH_A] =
- rtlefuse->txpwrlevel_cck[RF90_PATH_A][index];
- cckpowerlevel[RF90_PATH_B] =
- rtlefuse->txpwrlevel_cck[RF90_PATH_B][index];
- if (get_rf_type(rtlphy) == RF_1T2R || get_rf_type(rtlphy) == RF_1T1R) {
- ofdmpowerlevel[RF90_PATH_A] =
- rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_A][index];
- ofdmpowerlevel[RF90_PATH_B] =
- rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_B][index];
- } else if (get_rf_type(rtlphy) == RF_2T2R) {
- ofdmpowerlevel[RF90_PATH_A] =
- rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_A][index];
- ofdmpowerlevel[RF90_PATH_B] =
- rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_B][index];
- }
-}
-
-static void _rtl92c_ccxpower_index_check(struct ieee80211_hw *hw,
- u8 channel, u8 *cckpowerlevel,
- u8 *ofdmpowerlevel)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
-
- rtlphy->cur_cck_txpwridx = cckpowerlevel[0];
- rtlphy->cur_ofdm24g_txpwridx = ofdmpowerlevel[0];
-}
-
-void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
-{
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- u8 cckpowerlevel[2], ofdmpowerlevel[2];
-
- if (rtlefuse->b_txpwr_fromeprom == false)
- return;
- _rtl92c_get_txpower_index(hw, channel,
- &cckpowerlevel[0], &ofdmpowerlevel[0]);
- _rtl92c_ccxpower_index_check(hw,
- channel, &cckpowerlevel[0],
- &ofdmpowerlevel[0]);
- rtl92c_phy_rf6052_set_cck_txpower(hw, &cckpowerlevel[0]);
- rtl92c_phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0], channel);
-}
-
-bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw, long power_indbm)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- u8 idx;
- u8 rf_path;
-
- u8 ccktxpwridx = _rtl92c_phy_dbm_to_txpwr_Idx(hw,
- WIRELESS_MODE_B,
- power_indbm);
- u8 ofdmtxpwridx = _rtl92c_phy_dbm_to_txpwr_Idx(hw,
- WIRELESS_MODE_N_24G,
- power_indbm);
- if (ofdmtxpwridx - rtlefuse->legacy_ht_txpowerdiff > 0)
- ofdmtxpwridx -= rtlefuse->legacy_ht_txpowerdiff;
- else
- ofdmtxpwridx = 0;
- RT_TRACE(rtlpriv, COMP_TXAGC, DBG_TRACE,
- ("%lx dBm, ccktxpwridx = %d, ofdmtxpwridx = %d\n",
- power_indbm, ccktxpwridx, ofdmtxpwridx));
- for (idx = 0; idx < 14; idx++) {
- for (rf_path = 0; rf_path < 2; rf_path++) {
- rtlefuse->txpwrlevel_cck[rf_path][idx] = ccktxpwridx;
- rtlefuse->txpwrlevel_ht40_1s[rf_path][idx] =
- ofdmtxpwridx;
- rtlefuse->txpwrlevel_ht40_2s[rf_path][idx] =
- ofdmtxpwridx;
- }
- }
- rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
- return true;
-}
-
-void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw, u16 beaconinterval)
-{
-}
-
-static u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
- enum wireless_mode wirelessmode,
- long power_indbm)
-{
- u8 txpwridx;
- long offset;
-
- switch (wirelessmode) {
- case WIRELESS_MODE_B:
- offset = -7;
- break;
- case WIRELESS_MODE_G:
- case WIRELESS_MODE_N_24G:
- offset = -8;
- break;
- default:
- offset = -8;
- break;
- }
-
- if ((power_indbm - offset) > 0)
- txpwridx = (u8) ((power_indbm - offset) * 2);
- else
- txpwridx = 0;
-
- if (txpwridx > MAX_TXPWR_IDX_NMODE_92S)
- txpwridx = MAX_TXPWR_IDX_NMODE_92S;
-
- return txpwridx;
-}
-
-static long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
- enum wireless_mode wirelessmode,
- u8 txpwridx)
-{
- long offset;
- long pwrout_dbm;
-
- switch (wirelessmode) {
- case WIRELESS_MODE_B:
- offset = -7;
- break;
- case WIRELESS_MODE_G:
- case WIRELESS_MODE_N_24G:
- offset = -8;
- break;
- default:
- offset = -8;
- break;
- }
- pwrout_dbm = txpwridx / 2 + offset;
- return pwrout_dbm;
-}
-
-void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- enum io_type iotype;
-
- if (!is_hal_stop(rtlhal)) {
- switch (operation) {
- case SCAN_OPT_BACKUP:
- iotype = IO_CMD_PAUSE_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_IO_CMD,
- (u8 *)&iotype);
-
- break;
- case SCAN_OPT_RESTORE:
- iotype = IO_CMD_RESUME_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_IO_CMD,
- (u8 *)&iotype);
- break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Unknown Scan Backup operation.\n"));
- break;
- }
- }
-}
-
-void rtl92c_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
+void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -1183,645 +432,7 @@ void rtl92c_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
}
-void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
- enum nl80211_channel_type ch_type)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u8 tmp_bw = rtlphy->current_chan_bw;
-
- if (rtlphy->set_bwmode_inprogress)
- return;
- rtlphy->set_bwmode_inprogress = true;
- if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw)))
- rtl92c_phy_set_bw_mode_callback(hw);
- else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("FALSE driver sleep or unload\n"));
- rtlphy->set_bwmode_inprogress = false;
- rtlphy->current_chan_bw = tmp_bw;
- }
-}
-
-void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- u32 delay;
-
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- ("switch to channel%d\n", rtlphy->current_channel));
- if (is_hal_stop(rtlhal))
- return;
- do {
- if (!rtlphy->sw_chnl_inprogress)
- break;
- if (!_rtl92c_phy_sw_chnl_step_by_step
- (hw, rtlphy->current_channel, &rtlphy->sw_chnl_stage,
- &rtlphy->sw_chnl_step, &delay)) {
- if (delay > 0)
- mdelay(delay);
- else
- continue;
- } else
- rtlphy->sw_chnl_inprogress = false;
- break;
- } while (true);
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
-}
-
-u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-
- if (rtlphy->sw_chnl_inprogress)
- return 0;
- if (rtlphy->set_bwmode_inprogress)
- return 0;
- RT_ASSERT((rtlphy->current_channel <= 14),
- ("WIRELESS_MODE_G but channel>14"));
- rtlphy->sw_chnl_inprogress = true;
- rtlphy->sw_chnl_stage = 0;
- rtlphy->sw_chnl_step = 0;
- if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
- rtl92c_phy_sw_chnl_callback(hw);
- RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- ("sw_chnl_inprogress false schdule workitem\n"));
- rtlphy->sw_chnl_inprogress = false;
- } else {
- RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- ("sw_chnl_inprogress false driver sleep or"
- " unload\n"));
- rtlphy->sw_chnl_inprogress = false;
- }
- return 1;
-}
-
-static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
- u8 channel, u8 *stage, u8 *step,
- u32 *delay)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct swchnlcmd precommoncmd[MAX_PRECMD_CNT];
- u32 precommoncmdcnt;
- struct swchnlcmd postcommoncmd[MAX_POSTCMD_CNT];
- u32 postcommoncmdcnt;
- struct swchnlcmd rfdependcmd[MAX_RFDEPENDCMD_CNT];
- u32 rfdependcmdcnt;
- struct swchnlcmd *currentcmd = NULL;
- u8 rfpath;
- u8 num_total_rfpath = rtlphy->num_total_rfpath;
-
- precommoncmdcnt = 0;
- _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
- MAX_PRECMD_CNT,
- CMDID_SET_TXPOWEROWER_LEVEL, 0, 0, 0);
- _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
- MAX_PRECMD_CNT, CMDID_END, 0, 0, 0);
-
- postcommoncmdcnt = 0;
-
- _rtl92c_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++,
- MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0);
-
- rfdependcmdcnt = 0;
-
- RT_ASSERT((channel >= 1 && channel <= 14),
- ("illegal channel for Zebra: %d\n", channel));
-
- _rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
- MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
- RF_CHNLBW, channel, 10);
-
- _rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
- MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0,
- 0);
-
- do {
- switch (*stage) {
- case 0:
- currentcmd = &precommoncmd[*step];
- break;
- case 1:
- currentcmd = &rfdependcmd[*step];
- break;
- case 2:
- currentcmd = &postcommoncmd[*step];
- break;
- }
-
- if (currentcmd->cmdid == CMDID_END) {
- if ((*stage) == 2) {
- return true;
- } else {
- (*stage)++;
- (*step) = 0;
- continue;
- }
- }
-
- switch (currentcmd->cmdid) {
- case CMDID_SET_TXPOWEROWER_LEVEL:
- rtl92c_phy_set_txpower_level(hw, channel);
- break;
- case CMDID_WRITEPORT_ULONG:
- rtl_write_dword(rtlpriv, currentcmd->para1,
- currentcmd->para2);
- break;
- case CMDID_WRITEPORT_USHORT:
- rtl_write_word(rtlpriv, currentcmd->para1,
- (u16) currentcmd->para2);
- break;
- case CMDID_WRITEPORT_UCHAR:
- rtl_write_byte(rtlpriv, currentcmd->para1,
- (u8) currentcmd->para2);
- break;
- case CMDID_RF_WRITEREG:
- for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) {
- rtlphy->rfreg_chnlval[rfpath] =
- ((rtlphy->rfreg_chnlval[rfpath] &
- 0xfffffc00) | currentcmd->para2);
-
- rtl_set_rfreg(hw, (enum radio_path)rfpath,
- currentcmd->para1,
- RFREG_OFFSET_MASK,
- rtlphy->rfreg_chnlval[rfpath]);
- }
- break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
- break;
- }
-
- break;
- } while (true);
-
- (*delay) = currentcmd->msdelay;
- (*step)++;
- return false;
-}
-
-static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
- u32 cmdtableidx, u32 cmdtablesz,
- enum swchnlcmd_id cmdid,
- u32 para1, u32 para2, u32 msdelay)
-{
- struct swchnlcmd *pcmd;
-
- if (cmdtable == NULL) {
- RT_ASSERT(false, ("cmdtable cannot be NULL.\n"));
- return false;
- }
-
- if (cmdtableidx >= cmdtablesz)
- return false;
-
- pcmd = cmdtable + cmdtableidx;
- pcmd->cmdid = cmdid;
- pcmd->para1 = para1;
- pcmd->para2 = para2;
- pcmd->msdelay = msdelay;
- return true;
-}
-
-bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, u32 rfpath)
-{
- return true;
-}
-
-static u8 _rtl92c_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb)
-{
- u32 reg_eac, reg_e94, reg_e9c, reg_ea4;
- u8 result = 0x00;
-
- rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c1f);
- rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x10008c1f);
- rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x82140102);
- rtl_set_bbreg(hw, 0xe3c, MASKDWORD,
- config_pathb ? 0x28160202 : 0x28160502);
-
- if (config_pathb) {
- rtl_set_bbreg(hw, 0xe50, MASKDWORD, 0x10008c22);
- rtl_set_bbreg(hw, 0xe54, MASKDWORD, 0x10008c22);
- rtl_set_bbreg(hw, 0xe58, MASKDWORD, 0x82140102);
- rtl_set_bbreg(hw, 0xe5c, MASKDWORD, 0x28160202);
- }
-
- rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x001028d1);
- rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf9000000);
- rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000);
-
- mdelay(IQK_DELAY_TIME);
-
- reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
- reg_e94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD);
- reg_e9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD);
- reg_ea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD);
-
- if (!(reg_eac & BIT(28)) &&
- (((reg_e94 & 0x03FF0000) >> 16) != 0x142) &&
- (((reg_e9c & 0x03FF0000) >> 16) != 0x42))
- result |= 0x01;
- else
- return result;
-
- if (!(reg_eac & BIT(27)) &&
- (((reg_ea4 & 0x03FF0000) >> 16) != 0x132) &&
- (((reg_eac & 0x03FF0000) >> 16) != 0x36))
- result |= 0x02;
- return result;
-}
-
-static u8 _rtl92c_phy_path_b_iqk(struct ieee80211_hw *hw)
-{
- u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc;
- u8 result = 0x00;
-
- rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000002);
- rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000000);
- mdelay(IQK_DELAY_TIME);
- reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
- reg_eb4 = rtl_get_bbreg(hw, 0xeb4, MASKDWORD);
- reg_ebc = rtl_get_bbreg(hw, 0xebc, MASKDWORD);
- reg_ec4 = rtl_get_bbreg(hw, 0xec4, MASKDWORD);
- reg_ecc = rtl_get_bbreg(hw, 0xecc, MASKDWORD);
- if (!(reg_eac & BIT(31)) &&
- (((reg_eb4 & 0x03FF0000) >> 16) != 0x142) &&
- (((reg_ebc & 0x03FF0000) >> 16) != 0x42))
- result |= 0x01;
- else
- return result;
-
- if (!(reg_eac & BIT(30)) &&
- (((reg_ec4 & 0x03FF0000) >> 16) != 0x132) &&
- (((reg_ecc & 0x03FF0000) >> 16) != 0x36))
- result |= 0x02;
- return result;
-}
-
-static void _rtl92c_phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw,
- bool b_iqk_ok, long result[][8],
- u8 final_candidate, bool btxonly)
-{
- u32 oldval_0, x, tx0_a, reg;
- long y, tx0_c;
-
- if (final_candidate == 0xFF)
- return;
- else if (b_iqk_ok) {
- oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
- MASKDWORD) >> 22) & 0x3FF;
- x = result[final_candidate][0];
- if ((x & 0x00000200) != 0)
- x = x | 0xFFFFFC00;
- tx0_a = (x * oldval_0) >> 8;
- rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a);
- rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(31),
- ((x * oldval_0 >> 7) & 0x1));
- y = result[final_candidate][1];
- if ((y & 0x00000200) != 0)
- y = y | 0xFFFFFC00;
- tx0_c = (y * oldval_0) >> 8;
- rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000,
- ((tx0_c & 0x3C0) >> 6));
- rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000,
- (tx0_c & 0x3F));
- rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(29),
- ((y * oldval_0 >> 7) & 0x1));
- if (btxonly)
- return;
- reg = result[final_candidate][2];
- rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg);
- reg = result[final_candidate][3] & 0x3F;
- rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg);
- reg = (result[final_candidate][3] >> 6) & 0xF;
- rtl_set_bbreg(hw, 0xca0, 0xF0000000, reg);
- }
-}
-
-static void _rtl92c_phy_path_b_fill_iqk_matrix(struct ieee80211_hw *hw,
- bool b_iqk_ok, long result[][8],
- u8 final_candidate, bool btxonly)
-{
- u32 oldval_1, x, tx1_a, reg;
- long y, tx1_c;
-
- if (final_candidate == 0xFF)
- return;
- else if (b_iqk_ok) {
- oldval_1 = (rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
- MASKDWORD) >> 22) & 0x3FF;
- x = result[final_candidate][4];
- if ((x & 0x00000200) != 0)
- x = x | 0xFFFFFC00;
- tx1_a = (x * oldval_1) >> 8;
- rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x3FF, tx1_a);
- rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(27),
- ((x * oldval_1 >> 7) & 0x1));
- y = result[final_candidate][5];
- if ((y & 0x00000200) != 0)
- y = y | 0xFFFFFC00;
- tx1_c = (y * oldval_1) >> 8;
- rtl_set_bbreg(hw, ROFDM0_XDTXAFE, 0xF0000000,
- ((tx1_c & 0x3C0) >> 6));
- rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x003F0000,
- (tx1_c & 0x3F));
- rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(25),
- ((y * oldval_1 >> 7) & 0x1));
- if (btxonly)
- return;
- reg = result[final_candidate][6];
- rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0x3FF, reg);
- reg = result[final_candidate][7] & 0x3F;
- rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0xFC00, reg);
- reg = (result[final_candidate][7] >> 6) & 0xF;
- rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, 0x0000F000, reg);
- }
-}
-
-static void _rtl92c_phy_save_adda_registers(struct ieee80211_hw *hw,
- u32 *addareg, u32 *addabackup,
- u32 registernum)
-{
- u32 i;
-
- for (i = 0; i < registernum; i++)
- addabackup[i] = rtl_get_bbreg(hw, addareg[i], MASKDWORD);
-}
-
-static void _rtl92c_phy_save_mac_registers(struct ieee80211_hw *hw,
- u32 *macreg, u32 *macbackup)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 i;
-
- for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
- macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]);
- macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]);
-}
-
-static void _rtl92c_phy_reload_adda_registers(struct ieee80211_hw *hw,
- u32 *addareg, u32 *addabackup,
- u32 regiesternum)
-{
- u32 i;
-
- for (i = 0; i < regiesternum; i++)
- rtl_set_bbreg(hw, addareg[i], MASKDWORD, addabackup[i]);
-}
-
-static void _rtl92c_phy_reload_mac_registers(struct ieee80211_hw *hw,
- u32 *macreg, u32 *macbackup)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 i;
-
- for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
- rtl_write_byte(rtlpriv, macreg[i], (u8) macbackup[i]);
- rtl_write_dword(rtlpriv, macreg[i], macbackup[i]);
-}
-
-static void _rtl92c_phy_path_adda_on(struct ieee80211_hw *hw,
- u32 *addareg, bool is_patha_on, bool is2t)
-{
- u32 pathOn;
- u32 i;
-
- pathOn = is_patha_on ? 0x04db25a4 : 0x0b1b25a4;
- if (false == is2t) {
- pathOn = 0x0bdb25a0;
- rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0);
- } else {
- rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathOn);
- }
-
- for (i = 1; i < IQK_ADDA_REG_NUM; i++)
- rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathOn);
-}
-
-static void _rtl92c_phy_mac_setting_calibration(struct ieee80211_hw *hw,
- u32 *macreg, u32 *macbackup)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 i;
-
- rtl_write_byte(rtlpriv, macreg[0], 0x3F);
-
- for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
- rtl_write_byte(rtlpriv, macreg[i],
- (u8) (macbackup[i] & (~BIT(3))));
- rtl_write_byte(rtlpriv, macreg[i], (u8) (macbackup[i] & (~BIT(5))));
-}
-
-static void _rtl92c_phy_path_a_standby(struct ieee80211_hw *hw)
-{
- rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x0);
- rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
- rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
-}
-
-static void _rtl92c_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode)
-{
- u32 mode;
-
- mode = pi_mode ? 0x01000100 : 0x01000000;
- rtl_set_bbreg(hw, 0x820, MASKDWORD, mode);
- rtl_set_bbreg(hw, 0x828, MASKDWORD, mode);
-}
-
-static bool _rtl92c_phy_simularity_compare(struct ieee80211_hw *hw,
- long result[][8], u8 c1, u8 c2)
-{
- u32 i, j, diff, simularity_bitmap, bound;
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-
- u8 final_candidate[2] = { 0xFF, 0xFF };
- bool bresult = true, is2t = IS_92C_SERIAL(rtlhal->version);
-
- if (is2t)
- bound = 8;
- else
- bound = 4;
-
- simularity_bitmap = 0;
-
- for (i = 0; i < bound; i++) {
- diff = (result[c1][i] > result[c2][i]) ?
- (result[c1][i] - result[c2][i]) :
- (result[c2][i] - result[c1][i]);
-
- if (diff > MAX_TOLERANCE) {
- if ((i == 2 || i == 6) && !simularity_bitmap) {
- if (result[c1][i] + result[c1][i + 1] == 0)
- final_candidate[(i / 4)] = c2;
- else if (result[c2][i] + result[c2][i + 1] == 0)
- final_candidate[(i / 4)] = c1;
- else
- simularity_bitmap = simularity_bitmap |
- (1 << i);
- } else
- simularity_bitmap =
- simularity_bitmap | (1 << i);
- }
- }
-
- if (simularity_bitmap == 0) {
- for (i = 0; i < (bound / 4); i++) {
- if (final_candidate[i] != 0xFF) {
- for (j = i * 4; j < (i + 1) * 4 - 2; j++)
- result[3][j] =
- result[final_candidate[i]][j];
- bresult = false;
- }
- }
- return bresult;
- } else if (!(simularity_bitmap & 0x0F)) {
- for (i = 0; i < 4; i++)
- result[3][i] = result[c1][i];
- return false;
- } else if (!(simularity_bitmap & 0xF0) && is2t) {
- for (i = 4; i < 8; i++)
- result[3][i] = result[c1][i];
- return false;
- } else {
- return false;
- }
-
-}
-
-static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
- long result[][8], u8 t, bool is2t)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- u32 i;
- u8 patha_ok, pathb_ok;
- u32 adda_reg[IQK_ADDA_REG_NUM] = {
- 0x85c, 0xe6c, 0xe70, 0xe74,
- 0xe78, 0xe7c, 0xe80, 0xe84,
- 0xe88, 0xe8c, 0xed0, 0xed4,
- 0xed8, 0xedc, 0xee0, 0xeec
- };
-
- u32 iqk_mac_reg[IQK_MAC_REG_NUM] = {
- 0x522, 0x550, 0x551, 0x040
- };
-
- const u32 retrycount = 2;
-
- u32 bbvalue;
-
- if (t == 0) {
- bbvalue = rtl_get_bbreg(hw, 0x800, MASKDWORD);
-
- _rtl92c_phy_save_adda_registers(hw, adda_reg,
- rtlphy->adda_backup, 16);
- _rtl92c_phy_save_mac_registers(hw, iqk_mac_reg,
- rtlphy->iqk_mac_backup);
- }
- _rtl92c_phy_path_adda_on(hw, adda_reg, true, is2t);
- if (t == 0) {
- rtlphy->b_rfpi_enable = (u8) rtl_get_bbreg(hw,
- RFPGA0_XA_HSSIPARAMETER1,
- BIT(8));
- }
- if (!rtlphy->b_rfpi_enable)
- _rtl92c_phy_pi_mode_switch(hw, true);
- if (t == 0) {
- rtlphy->reg_c04 = rtl_get_bbreg(hw, 0xc04, MASKDWORD);
- rtlphy->reg_c08 = rtl_get_bbreg(hw, 0xc08, MASKDWORD);
- rtlphy->reg_874 = rtl_get_bbreg(hw, 0x874, MASKDWORD);
- }
- rtl_set_bbreg(hw, 0xc04, MASKDWORD, 0x03a05600);
- rtl_set_bbreg(hw, 0xc08, MASKDWORD, 0x000800e4);
- rtl_set_bbreg(hw, 0x874, MASKDWORD, 0x22204000);
- if (is2t) {
- rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
- rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00010000);
- }
- _rtl92c_phy_mac_setting_calibration(hw, iqk_mac_reg,
- rtlphy->iqk_mac_backup);
- rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x00080000);
- if (is2t)
- rtl_set_bbreg(hw, 0xb6c, MASKDWORD, 0x00080000);
- rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
- rtl_set_bbreg(hw, 0xe40, MASKDWORD, 0x01007c00);
- rtl_set_bbreg(hw, 0xe44, MASKDWORD, 0x01004800);
- for (i = 0; i < retrycount; i++) {
- patha_ok = _rtl92c_phy_path_a_iqk(hw, is2t);
- if (patha_ok == 0x03) {
- result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
- 0x3FF0000) >> 16;
- result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
- 0x3FF0000) >> 16;
- result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) &
- 0x3FF0000) >> 16;
- result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) &
- 0x3FF0000) >> 16;
- break;
- } else if (i == (retrycount - 1) && patha_ok == 0x01)
- result[t][0] = (rtl_get_bbreg(hw, 0xe94,
- MASKDWORD) & 0x3FF0000) >>
- 16;
- result[t][1] =
- (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) & 0x3FF0000) >> 16;
-
- }
-
- if (is2t) {
- _rtl92c_phy_path_a_standby(hw);
- _rtl92c_phy_path_adda_on(hw, adda_reg, false, is2t);
- for (i = 0; i < retrycount; i++) {
- pathb_ok = _rtl92c_phy_path_b_iqk(hw);
- if (pathb_ok == 0x03) {
- result[t][4] = (rtl_get_bbreg(hw,
- 0xeb4,
- MASKDWORD) &
- 0x3FF0000) >> 16;
- result[t][5] =
- (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
- 0x3FF0000) >> 16;
- result[t][6] =
- (rtl_get_bbreg(hw, 0xec4, MASKDWORD) &
- 0x3FF0000) >> 16;
- result[t][7] =
- (rtl_get_bbreg(hw, 0xecc, MASKDWORD) &
- 0x3FF0000) >> 16;
- break;
- } else if (i == (retrycount - 1) && pathb_ok == 0x01) {
- result[t][4] = (rtl_get_bbreg(hw,
- 0xeb4,
- MASKDWORD) &
- 0x3FF0000) >> 16;
- }
- result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
- 0x3FF0000) >> 16;
- }
- }
- rtl_set_bbreg(hw, 0xc04, MASKDWORD, rtlphy->reg_c04);
- rtl_set_bbreg(hw, 0x874, MASKDWORD, rtlphy->reg_874);
- rtl_set_bbreg(hw, 0xc08, MASKDWORD, rtlphy->reg_c08);
- rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0);
- rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00032ed3);
- if (is2t)
- rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00032ed3);
- if (t != 0) {
- if (!rtlphy->b_rfpi_enable)
- _rtl92c_phy_pi_mode_switch(hw, false);
- _rtl92c_phy_reload_adda_registers(hw, adda_reg,
- rtlphy->adda_backup, 16);
- _rtl92c_phy_reload_mac_registers(hw, iqk_mac_reg,
- rtlphy->iqk_mac_backup);
- }
-}
-
-static void _rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
+void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
{
u8 tmpreg;
u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal;
@@ -1866,666 +477,6 @@ static void _rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
}
}
-static void _rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw,
- char delta, bool is2t)
-{
- /* This routine is deliberately dummied out for later fixes */
-#if 0
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
-
- u32 reg_d[PATH_NUM];
- u32 tmpreg, index, offset, path, i, pathbound = PATH_NUM, apkbound;
-
- u32 bb_backup[APK_BB_REG_NUM];
- u32 bb_reg[APK_BB_REG_NUM] = {
- 0x904, 0xc04, 0x800, 0xc08, 0x874
- };
- u32 bb_ap_mode[APK_BB_REG_NUM] = {
- 0x00000020, 0x00a05430, 0x02040000,
- 0x000800e4, 0x00204000
- };
- u32 bb_normal_ap_mode[APK_BB_REG_NUM] = {
- 0x00000020, 0x00a05430, 0x02040000,
- 0x000800e4, 0x22204000
- };
-
- u32 afe_backup[APK_AFE_REG_NUM];
- u32 afe_reg[APK_AFE_REG_NUM] = {
- 0x85c, 0xe6c, 0xe70, 0xe74, 0xe78,
- 0xe7c, 0xe80, 0xe84, 0xe88, 0xe8c,
- 0xed0, 0xed4, 0xed8, 0xedc, 0xee0,
- 0xeec
- };
-
- u32 mac_backup[IQK_MAC_REG_NUM];
- u32 mac_reg[IQK_MAC_REG_NUM] = {
- 0x522, 0x550, 0x551, 0x040
- };
-
- u32 apk_rf_init_value[PATH_NUM][APK_BB_REG_NUM] = {
- {0x0852c, 0x1852c, 0x5852c, 0x1852c, 0x5852c},
- {0x2852e, 0x0852e, 0x3852e, 0x0852e, 0x0852e}
- };
-
- u32 apk_normal_rf_init_value[PATH_NUM][APK_BB_REG_NUM] = {
- {0x0852c, 0x0a52c, 0x3a52c, 0x5a52c, 0x5a52c},
- {0x0852c, 0x0a52c, 0x5a52c, 0x5a52c, 0x5a52c}
- };
-
- u32 apk_rf_value_0[PATH_NUM][APK_BB_REG_NUM] = {
- {0x52019, 0x52014, 0x52013, 0x5200f, 0x5208d},
- {0x5201a, 0x52019, 0x52016, 0x52033, 0x52050}
- };
-
- u32 apk_normal_rf_value_0[PATH_NUM][APK_BB_REG_NUM] = {
- {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a},
- {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a}
- };
-
- u32 afe_on_off[PATH_NUM] = {
- 0x04db25a4, 0x0b1b25a4
- };
-
- u32 apk_offset[PATH_NUM] = { 0xb68, 0xb6c };
-
- u32 apk_normal_offset[PATH_NUM] = { 0xb28, 0xb98 };
-
- u32 apk_value[PATH_NUM] = { 0x92fc0000, 0x12fc0000 };
-
- u32 apk_normal_value[PATH_NUM] = { 0x92680000, 0x12680000 };
-
- const char apk_delta_mapping[APK_BB_REG_NUM][13] = {
- {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
- {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
- {-6, -4, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
- {-1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6},
- {-11, -9, -7, -5, -3, -1, 0, 0, 0, 0, 0, 0, 0}
- };
-
- const u32 apk_normal_setting_value_1[13] = {
- 0x01017018, 0xf7ed8f84, 0x1b1a1816, 0x2522201e, 0x322e2b28,
- 0x433f3a36, 0x5b544e49, 0x7b726a62, 0xa69a8f84, 0xdfcfc0b3,
- 0x12680000, 0x00880000, 0x00880000
- };
-
- const u32 apk_normal_setting_value_2[16] = {
- 0x01c7021d, 0x01670183, 0x01000123, 0x00bf00e2, 0x008d00a3,
- 0x0068007b, 0x004d0059, 0x003a0042, 0x002b0031, 0x001f0025,
- 0x0017001b, 0x00110014, 0x000c000f, 0x0009000b, 0x00070008,
- 0x00050006
- };
-
- const u32 apk_result[PATH_NUM][APK_BB_REG_NUM];
-
- long bb_offset, delta_v, delta_offset;
-
- if (!is2t)
- pathbound = 1;
-
- for (index = 0; index < PATH_NUM; index++) {
- apk_offset[index] = apk_normal_offset[index];
- apk_value[index] = apk_normal_value[index];
- afe_on_off[index] = 0x6fdb25a4;
- }
-
- for (index = 0; index < APK_BB_REG_NUM; index++) {
- for (path = 0; path < pathbound; path++) {
- apk_rf_init_value[path][index] =
- apk_normal_rf_init_value[path][index];
- apk_rf_value_0[path][index] =
- apk_normal_rf_value_0[path][index];
- }
- bb_ap_mode[index] = bb_normal_ap_mode[index];
-
- apkbound = 6;
- }
-
- for (index = 0; index < APK_BB_REG_NUM; index++) {
- if (index == 0)
- continue;
- bb_backup[index] = rtl_get_bbreg(hw, bb_reg[index], MASKDWORD);
- }
-
- _rtl92c_phy_save_mac_registers(hw, mac_reg, mac_backup);
-
- _rtl92c_phy_save_adda_registers(hw, afe_reg, afe_backup, 16);
-
- for (path = 0; path < pathbound; path++) {
- if (path == RF90_PATH_A) {
- offset = 0xb00;
- for (index = 0; index < 11; index++) {
- rtl_set_bbreg(hw, offset, MASKDWORD,
- apk_normal_setting_value_1
- [index]);
-
- offset += 0x04;
- }
-
- rtl_set_bbreg(hw, 0xb98, MASKDWORD, 0x12680000);
-
- offset = 0xb68;
- for (; index < 13; index++) {
- rtl_set_bbreg(hw, offset, MASKDWORD,
- apk_normal_setting_value_1
- [index]);
-
- offset += 0x04;
- }
-
- rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x40000000);
-
- offset = 0xb00;
- for (index = 0; index < 16; index++) {
- rtl_set_bbreg(hw, offset, MASKDWORD,
- apk_normal_setting_value_2
- [index]);
-
- offset += 0x04;
- }
- rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
- } else if (path == RF90_PATH_B) {
- offset = 0xb70;
- for (index = 0; index < 10; index++) {
- rtl_set_bbreg(hw, offset, MASKDWORD,
- apk_normal_setting_value_1
- [index]);
-
- offset += 0x04;
- }
- rtl_set_bbreg(hw, 0xb28, MASKDWORD, 0x12680000);
- rtl_set_bbreg(hw, 0xb98, MASKDWORD, 0x12680000);
-
- offset = 0xb68;
- index = 11;
- for (; index < 13; index++) {
- rtl_set_bbreg(hw, offset, MASKDWORD,
- apk_normal_setting_value_1
- [index]);
-
- offset += 0x04;
- }
-
- rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x40000000);
-
- offset = 0xb60;
- for (index = 0; index < 16; index++) {
- rtl_set_bbreg(hw, offset, MASKDWORD,
- apk_normal_setting_value_2
- [index]);
-
- offset += 0x04;
- }
- rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
- }
-
- reg_d[path] = rtl_get_rfreg(hw, (enum radio_path)path,
- 0xd, MASKDWORD);
-
- for (index = 0; index < APK_AFE_REG_NUM; index++)
- rtl_set_bbreg(hw, afe_reg[index], MASKDWORD,
- afe_on_off[path]);
-
- if (path == RF90_PATH_A) {
- for (index = 0; index < APK_BB_REG_NUM; index++) {
- if (index == 0)
- continue;
- rtl_set_bbreg(hw, bb_reg[index], MASKDWORD,
- bb_ap_mode[index]);
- }
- }
-
- _rtl92c_phy_mac_setting_calibration(hw, mac_reg, mac_backup);
-
- if (path == 0) {
- rtl_set_rfreg(hw, RF90_PATH_B, 0x0, MASKDWORD, 0x10000);
- } else {
- rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASKDWORD,
- 0x10000);
- rtl_set_rfreg(hw, RF90_PATH_A, 0x10, MASKDWORD,
- 0x1000f);
- rtl_set_rfreg(hw, RF90_PATH_A, 0x11, MASKDWORD,
- 0x20103);
- }
-
- delta_offset = ((delta + 14) / 2);
- if (delta_offset < 0)
- delta_offset = 0;
- else if (delta_offset > 12)
- delta_offset = 12;
-
- for (index = 0; index < APK_BB_REG_NUM; index++) {
- if (index != 1)
- continue;
-
- tmpreg = apk_rf_init_value[path][index];
-
- if (!rtlefuse->b_apk_thermalmeterignore) {
- bb_offset = (tmpreg & 0xF0000) >> 16;
-
- if (!(tmpreg & BIT(15)))
- bb_offset = -bb_offset;
-
- delta_v =
- apk_delta_mapping[index][delta_offset];
-
- bb_offset += delta_v;
-
- if (bb_offset < 0) {
- tmpreg = tmpreg & (~BIT(15));
- bb_offset = -bb_offset;
- } else {
- tmpreg = tmpreg | BIT(15);
- }
-
- tmpreg =
- (tmpreg & 0xFFF0FFFF) | (bb_offset << 16);
- }
-
- rtl_set_rfreg(hw, (enum radio_path)path, 0xc,
- MASKDWORD, 0x8992e);
- rtl_set_rfreg(hw, (enum radio_path)path, 0x0,
- MASKDWORD, apk_rf_value_0[path][index]);
- rtl_set_rfreg(hw, (enum radio_path)path, 0xd,
- MASKDWORD, tmpreg);
-
- i = 0;
- do {
- rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80000000);
- rtl_set_bbreg(hw, apk_offset[path],
- MASKDWORD, apk_value[0]);
- RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("PHY_APCalibrate() offset 0x%x "
- "value 0x%x\n",
- apk_offset[path],
- rtl_get_bbreg(hw, apk_offset[path],
- MASKDWORD)));
-
- mdelay(3);
-
- rtl_set_bbreg(hw, apk_offset[path],
- MASKDWORD, apk_value[1]);
- RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("PHY_APCalibrate() offset 0x%x "
- "value 0x%x\n",
- apk_offset[path],
- rtl_get_bbreg(hw, apk_offset[path],
- MASKDWORD)));
-
- mdelay(20);
-
- rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
-
- if (path == RF90_PATH_A)
- tmpreg = rtl_get_bbreg(hw, 0xbd8,
- 0x03E00000);
- else
- tmpreg = rtl_get_bbreg(hw, 0xbd8,
- 0xF8000000);
-
- RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("PHY_APCalibrate() offset "
- "0xbd8[25:21] %x\n", tmpreg));
-
- i++;
-
- } while (tmpreg > apkbound && i < 4);
-
- apk_result[path][index] = tmpreg;
- }
- }
-
- _rtl92c_phy_reload_mac_registers(hw, mac_reg, mac_backup);
-
- for (index = 0; index < APK_BB_REG_NUM; index++) {
- if (index == 0)
- continue;
- rtl_set_bbreg(hw, bb_reg[index], MASKDWORD, bb_backup[index]);
- }
-
- _rtl92c_phy_reload_adda_registers(hw, afe_reg, afe_backup, 16);
-
- for (path = 0; path < pathbound; path++) {
- rtl_set_rfreg(hw, (enum radio_path)path, 0xd,
- MASKDWORD, reg_d[path]);
-
- if (path == RF90_PATH_B) {
- rtl_set_rfreg(hw, RF90_PATH_A, 0x10, MASKDWORD,
- 0x1000f);
- rtl_set_rfreg(hw, RF90_PATH_A, 0x11, MASKDWORD,
- 0x20101);
- }
-
- if (apk_result[path][1] > 6)
- apk_result[path][1] = 6;
- }
-
- for (path = 0; path < pathbound; path++) {
- rtl_set_rfreg(hw, (enum radio_path)path, 0x3, MASKDWORD,
- ((apk_result[path][1] << 15) |
- (apk_result[path][1] << 10) |
- (apk_result[path][1] << 5) |
- apk_result[path][1]));
-
- if (path == RF90_PATH_A)
- rtl_set_rfreg(hw, (enum radio_path)path, 0x4, MASKDWORD,
- ((apk_result[path][1] << 15) |
- (apk_result[path][1] << 10) |
- (0x00 << 5) | 0x05));
- else
- rtl_set_rfreg(hw, (enum radio_path)path, 0x4, MASKDWORD,
- ((apk_result[path][1] << 15) |
- (apk_result[path][1] << 10) |
- (0x02 << 5) | 0x05));
-
- rtl_set_rfreg(hw, (enum radio_path)path, 0xe, MASKDWORD,
- ((0x08 << 15) | (0x08 << 10) | (0x08 << 5) |
- 0x08));
-
- }
-
- rtlphy->b_apk_done = true;
-#endif
-}
-
-static void _rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw,
- bool bmain, bool is2t)
-{
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-
- if (is_hal_stop(rtlhal)) {
- rtl_set_bbreg(hw, REG_LEDCFG0, BIT(23), 0x01);
- rtl_set_bbreg(hw, rFPGA0_XAB_RFPARAMETER, BIT(13), 0x01);
- }
- if (is2t) {
- if (bmain)
- rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
- BIT(5) | BIT(6), 0x1);
- else
- rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
- BIT(5) | BIT(6), 0x2);
- } else {
- if (bmain)
- rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x2);
- else
- rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x1);
-
- }
-}
-
-#undef IQK_ADDA_REG_NUM
-#undef IQK_DELAY_TIME
-
-void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-
- long result[4][8];
- u8 i, final_candidate;
- bool b_patha_ok, b_pathb_ok;
- long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4,
- reg_ecc, reg_tmp = 0;
- bool is12simular, is13simular, is23simular;
- bool b_start_conttx = false, b_singletone = false;
- u32 iqk_bb_reg[10] = {
- ROFDM0_XARXIQIMBALANCE,
- ROFDM0_XBRXIQIMBALANCE,
- ROFDM0_ECCATHRESHOLD,
- ROFDM0_AGCRSSITABLE,
- ROFDM0_XATXIQIMBALANCE,
- ROFDM0_XBTXIQIMBALANCE,
- ROFDM0_XCTXIQIMBALANCE,
- ROFDM0_XCTXAFE,
- ROFDM0_XDTXAFE,
- ROFDM0_RXIQEXTANTA
- };
-
- if (b_recovery) {
- _rtl92c_phy_reload_adda_registers(hw,
- iqk_bb_reg,
- rtlphy->iqk_bb_backup, 10);
- return;
- }
- if (b_start_conttx || b_singletone)
- return;
- for (i = 0; i < 8; i++) {
- result[0][i] = 0;
- result[1][i] = 0;
- result[2][i] = 0;
- result[3][i] = 0;
- }
- final_candidate = 0xff;
- b_patha_ok = false;
- b_pathb_ok = false;
- is12simular = false;
- is23simular = false;
- is13simular = false;
- for (i = 0; i < 3; i++) {
- if (IS_92C_SERIAL(rtlhal->version))
- _rtl92c_phy_iq_calibrate(hw, result, i, true);
- else
- _rtl92c_phy_iq_calibrate(hw, result, i, false);
- if (i == 1) {
- is12simular = _rtl92c_phy_simularity_compare(hw,
- result, 0,
- 1);
- if (is12simular) {
- final_candidate = 0;
- break;
- }
- }
- if (i == 2) {
- is13simular = _rtl92c_phy_simularity_compare(hw,
- result, 0,
- 2);
- if (is13simular) {
- final_candidate = 0;
- break;
- }
- is23simular = _rtl92c_phy_simularity_compare(hw,
- result, 1,
- 2);
- if (is23simular)
- final_candidate = 1;
- else {
- for (i = 0; i < 8; i++)
- reg_tmp += result[3][i];
-
- if (reg_tmp != 0)
- final_candidate = 3;
- else
- final_candidate = 0xFF;
- }
- }
- }
- for (i = 0; i < 4; i++) {
- reg_e94 = result[i][0];
- reg_e9c = result[i][1];
- reg_ea4 = result[i][2];
- reg_eac = result[i][3];
- reg_eb4 = result[i][4];
- reg_ebc = result[i][5];
- reg_ec4 = result[i][6];
- reg_ecc = result[i][7];
- }
- if (final_candidate != 0xff) {
- rtlphy->reg_e94 = reg_e94 = result[final_candidate][0];
- rtlphy->reg_e9c = reg_e9c = result[final_candidate][1];
- reg_ea4 = result[final_candidate][2];
- reg_eac = result[final_candidate][3];
- rtlphy->reg_eb4 = reg_eb4 = result[final_candidate][4];
- rtlphy->reg_ebc = reg_ebc = result[final_candidate][5];
- reg_ec4 = result[final_candidate][6];
- reg_ecc = result[final_candidate][7];
- b_patha_ok = b_pathb_ok = true;
- } else {
- rtlphy->reg_e94 = rtlphy->reg_eb4 = 0x100;
- rtlphy->reg_e9c = rtlphy->reg_ebc = 0x0;
- }
- if (reg_e94 != 0) /*&&(reg_ea4 != 0) */
- _rtl92c_phy_path_a_fill_iqk_matrix(hw, b_patha_ok, result,
- final_candidate,
- (reg_ea4 == 0));
- if (IS_92C_SERIAL(rtlhal->version)) {
- if (reg_eb4 != 0) /*&&(reg_ec4 != 0) */
- _rtl92c_phy_path_b_fill_iqk_matrix(hw, b_pathb_ok,
- result,
- final_candidate,
- (reg_ec4 == 0));
- }
- _rtl92c_phy_save_adda_registers(hw, iqk_bb_reg,
- rtlphy->iqk_bb_backup, 10);
-}
-
-void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw)
-{
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- bool b_start_conttx = false, b_singletone = false;
-
- if (b_start_conttx || b_singletone)
- return;
- if (IS_92C_SERIAL(rtlhal->version))
- _rtl92c_phy_lc_calibrate(hw, true);
- else
- _rtl92c_phy_lc_calibrate(hw, false);
-}
-
-void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-
- if (rtlphy->b_apk_done)
- return;
- if (IS_92C_SERIAL(rtlhal->version))
- _rtl92c_phy_ap_calibrate(hw, delta, true);
- else
- _rtl92c_phy_ap_calibrate(hw, delta, false);
-}
-
-void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain)
-{
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-
- if (IS_92C_SERIAL(rtlhal->version))
- _rtl92c_phy_set_rfpath_switch(hw, bmain, true);
- else
- _rtl92c_phy_set_rfpath_switch(hw, bmain, false);
-}
-
-bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- bool b_postprocessing = false;
-
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- ("-->IO Cmd(%#x), set_io_inprogress(%d)\n",
- iotype, rtlphy->set_io_inprogress));
- do {
- switch (iotype) {
- case IO_CMD_RESUME_DM_BY_SCAN:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- ("[IO CMD] Resume DM after scan.\n"));
- b_postprocessing = true;
- break;
- case IO_CMD_PAUSE_DM_BY_SCAN:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- ("[IO CMD] Pause DM before scan.\n"));
- b_postprocessing = true;
- break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
- break;
- }
- } while (false);
- if (b_postprocessing && !rtlphy->set_io_inprogress) {
- rtlphy->set_io_inprogress = true;
- rtlphy->current_io_type = iotype;
- } else {
- return false;
- }
- rtl92c_phy_set_io(hw);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, ("<--IO Type(%#x)\n", iotype));
- return true;
-}
-
-void rtl92c_phy_set_io(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
-
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- ("--->Cmd(%#x), set_io_inprogress(%d)\n",
- rtlphy->current_io_type, rtlphy->set_io_inprogress));
- switch (rtlphy->current_io_type) {
- case IO_CMD_RESUME_DM_BY_SCAN:
- dm_digtable.cur_igvalue = rtlphy->initgain_backup.xaagccore1;
- rtl92c_dm_write_dig(hw);
- rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
- break;
- case IO_CMD_PAUSE_DM_BY_SCAN:
- rtlphy->initgain_backup.xaagccore1 = dm_digtable.cur_igvalue;
- dm_digtable.cur_igvalue = 0x17;
- rtl92c_dm_write_dig(hw);
- break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
- break;
- }
- rtlphy->set_io_inprogress = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- ("<---(%#x)\n", rtlphy->current_io_type));
-}
-
-void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
- rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
- rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
- rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
- rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
- rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
-}
-
-static void _rtl92ce_phy_set_rf_sleep(struct ieee80211_hw *hw)
-{
- u32 u4b_tmp;
- u8 delay = 5;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
- rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
- rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
- u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
- while (u4b_tmp != 0 && delay > 0) {
- rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0);
- rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
- rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
- u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
- delay--;
- }
- if (delay == 0) {
- rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
- rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
- rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
- rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- ("Switch RF timeout !!!.\n"));
- return;
- }
- rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
- rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22);
-}
-
static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
enum rf_pwrstate rfpwr_state)
{
@@ -2648,7 +599,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
jiffies_to_msecs(jiffies -
ppsc->last_awake_jiffies)));
ppsc->last_sleep_jiffies = jiffies;
- _rtl92ce_phy_set_rf_sleep(hw);
+ _rtl92c_phy_set_rf_sleep(hw);
break;
}
default:
@@ -2663,7 +614,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
return bresult;
}
-bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw,
+bool rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
enum rf_pwrstate rfpwr_state)
{
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
index ca4daee6e9a8..a37267e3fc22 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
@@ -57,8 +57,6 @@
#define IQK_MAC_REG_NUM 4
#define RF90_PATH_MAX 2
-#define CHANNEL_MAX_NUMBER 14
-#define CHANNEL_GROUP_MAX 3
#define CT_OFFSET_MAC_ADDR 0X16
@@ -78,9 +76,7 @@
#define CT_OFFSET_CUSTOMER_ID 0x7F
#define RTL92C_MAX_PATH_NUM 2
-#define CHANNEL_MAX_NUMBER 14
-#define CHANNEL_GROUP_MAX 3
-
+#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER 255
enum swchnlcmd_id {
CMDID_END,
CMDID_SET_TXPOWEROWER_LEVEL,
@@ -195,11 +191,11 @@ extern void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
extern u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 regaddr,
u32 bitmask);
-extern void rtl92c_phy_set_rf_reg(struct ieee80211_hw *hw,
+extern void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 regaddr,
u32 bitmask, u32 data);
extern bool rtl92c_phy_mac_config(struct ieee80211_hw *hw);
-extern bool rtl92c_phy_bb_config(struct ieee80211_hw *hw);
+bool rtl92ce_phy_bb_config(struct ieee80211_hw *hw);
extern bool rtl92c_phy_rf_config(struct ieee80211_hw *hw);
extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
enum radio_path rfpath);
@@ -227,11 +223,32 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
extern bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw,
u32 rfpath);
bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
-extern bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw,
+bool rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
enum rf_pwrstate rfpwr_state);
-void rtl92c_phy_config_bb_external_pa(struct ieee80211_hw *hw);
void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw);
bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
void rtl92c_phy_set_io(struct ieee80211_hw *hw);
+void rtl92c_bb_block_on(struct ieee80211_hw *hw);
+u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset);
+u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset);
+u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
+void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset,
+ u32 data);
+void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask,
+ u32 data);
+void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset,
+ u32 data);
+void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask,
+ u32 data);
+bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
+void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
+bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw);
+void _rtl92c_phy_set_rf_sleep(struct ieee80211_hw *hw);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
index 875d51465225..b0868a613841 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
@@ -63,7 +63,15 @@
#define REG_LEDCFG3 0x004F
#define REG_FSIMR 0x0050
#define REG_FSISR 0x0054
-
+#define REG_HSIMR 0x0058
+#define REG_HSISR 0x005c
+
+/* RTL8723 WIFI/BT/GPS Multi-Function GPIO Pin Control. */
+#define REG_GPIO_PIN_CTRL_2 0x0060
+/* RTL8723 WIFI/BT/GPS Multi-Function GPIO Select. */
+#define REG_GPIO_IO_SEL_2 0x0062
+/* RTL8723 WIFI/BT/GPS Multi-Function control source. */
+#define REG_MULTI_FUNC_CTRL 0x0068
#define REG_MCUFWDL 0x0080
#define REG_HMEBOX_EXT_0 0x0088
@@ -79,6 +87,7 @@
#define REG_PCIE_MIO_INTD 0x00E8
#define REG_HPON_FSM 0x00EC
#define REG_SYS_CFG 0x00F0
+#define REG_GPIO_OUTSTS 0x00F4 /* For RTL8723 only.*/
#define REG_CR 0x0100
#define REG_PBP 0x0104
@@ -209,6 +218,8 @@
#define REG_RDG_PIFS 0x0513
#define REG_SIFS_CTX 0x0514
#define REG_SIFS_TRX 0x0516
+#define REG_SIFS_CCK 0x0514
+#define REG_SIFS_OFDM 0x0516
#define REG_AGGR_BREAK_TIME 0x051A
#define REG_SLOT 0x051B
#define REG_TX_PTCL_CTRL 0x0520
@@ -261,6 +272,10 @@
#define REG_MAC_SPEC_SIFS 0x063A
#define REG_RESP_SIFS_CCK 0x063C
#define REG_RESP_SIFS_OFDM 0x063E
+/* [15:8]SIFS_R2T_OFDM, [7:0]SIFS_R2T_CCK */
+#define REG_R2T_SIFS 0x063C
+/* [15:8]SIFS_T2T_OFDM, [7:0]SIFS_T2T_CCK */
+#define REG_T2T_SIFS 0x063E
#define REG_ACKTO 0x0640
#define REG_CTS2TO 0x0641
#define REG_EIFS 0x0642
@@ -641,9 +656,10 @@
#define STOPBE BIT(1)
#define STOPBK BIT(0)
-#define RCR_APPFCS BIT(31)
+#define RCR_APP_FCS BIT(31)
#define RCR_APP_MIC BIT(30)
#define RCR_APP_ICV BIT(29)
+#define RCR_APP_PHYSTS BIT(28)
#define RCR_APP_PHYST_RXFF BIT(28)
#define RCR_APP_BA_SSN BIT(27)
#define RCR_ENMBID BIT(24)
@@ -759,6 +775,7 @@
#define BOOT_FROM_EEPROM BIT(4)
#define EEPROM_EN BIT(5)
+#define EEPROMSEL BOOT_FROM_EEPROM
#define AFE_BGEN BIT(0)
#define AFE_MBEN BIT(1)
@@ -876,6 +893,8 @@
#define BD_MAC2 BIT(9)
#define BD_MAC1 BIT(10)
#define IC_MACPHY_MODE BIT(11)
+#define BT_FUNC BIT(16)
+#define VENDOR_ID BIT(19)
#define PAD_HWPD_IDN BIT(22)
#define TRP_VAUX_EN BIT(23)
#define TRP_BT_EN BIT(24)
@@ -883,6 +902,28 @@
#define BD_HCI_SEL BIT(26)
#define TYPE_ID BIT(27)
+/* REG_GPIO_OUTSTS (For RTL8723 only) */
+#define EFS_HCI_SEL (BIT(0)|BIT(1))
+#define PAD_HCI_SEL (BIT(2)|BIT(3))
+#define HCI_SEL (BIT(4)|BIT(5))
+#define PKG_SEL_HCI BIT(6)
+#define FEN_GPS BIT(7)
+#define FEN_BT BIT(8)
+#define FEN_WL BIT(9)
+#define FEN_PCI BIT(10)
+#define FEN_USB BIT(11)
+#define BTRF_HWPDN_N BIT(12)
+#define WLRF_HWPDN_N BIT(13)
+#define PDN_BT_N BIT(14)
+#define PDN_GPS_N BIT(15)
+#define BT_CTL_HWPDN BIT(16)
+#define GPS_CTL_HWPDN BIT(17)
+#define PPHY_SUSB BIT(20)
+#define UPHY_SUSB BIT(21)
+#define PCI_SUSEN BIT(22)
+#define USB_SUSEN BIT(23)
+#define RF_RL_ID (BIT(31) | BIT(30) | BIT(29) | BIT(28))
+
#define CHIP_VER_RTL_MASK 0xF000
#define CHIP_VER_RTL_SHIFT 12
@@ -1035,7 +1076,7 @@
#define _RARF_RC7(x) (((x) & 0x1F) << 16)
#define _RARF_RC8(x) (((x) & 0x1F) << 24)
-#define AC_PARAM_TXOP_LIMIT_OFFSET 16
+#define AC_PARAM_TXOP_OFFSET 16
#define AC_PARAM_ECW_MAX_OFFSET 12
#define AC_PARAM_ECW_MIN_OFFSET 8
#define AC_PARAM_AIFS_OFFSET 0
@@ -1184,6 +1225,30 @@
#define HAL_8192C_HW_GPIO_WPS_BIT BIT(2)
+/* REG_MULTI_FUNC_CTRL(For RTL8723 Only) */
+/* Enable GPIO[9] as WiFi HW PDn source */
+#define WL_HWPDN_EN BIT(0)
+/* WiFi HW PDn polarity control */
+#define WL_HWPDN_SL BIT(1)
+/* WiFi function enable */
+#define WL_FUNC_EN BIT(2)
+/* Enable GPIO[9] as WiFi RF HW PDn source */
+#define WL_HWROF_EN BIT(3)
+/* Enable GPIO[11] as BT HW PDn source */
+#define BT_HWPDN_EN BIT(16)
+/* BT HW PDn polarity control */
+#define BT_HWPDN_SL BIT(17)
+/* BT function enable */
+#define BT_FUNC_EN BIT(18)
+/* Enable GPIO[11] as BT/GPS RF HW PDn source */
+#define BT_HWROF_EN BIT(19)
+/* Enable GPIO[10] as GPS HW PDn source */
+#define GPS_HWPDN_EN BIT(20)
+/* GPS HW PDn polarity control */
+#define GPS_HWPDN_SL BIT(21)
+/* GPS function enable */
+#define GPS_FUNC_EN BIT(22)
+
#define RPMAC_RESET 0x100
#define RPMAC_TXSTART 0x104
#define RPMAC_TXLEGACYSIG 0x108
@@ -1496,7 +1561,7 @@
#define BTXHTSTBC 0x30
#define BTXHTADVANCECODING 0x40
#define BTXHTSHORTGI 0x80
-#define BTXHTNUMBERHT_LT F 0x300
+#define BTXHTNUMBERHT_LTF 0x300
#define BTXHTCRC8 0x3fc00
#define BCOUNTERRESET 0x10000
#define BNUMOFOFDMTX 0xffff
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
index ffd8e04c4028..669b1168dbec 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
@@ -61,7 +61,7 @@ void rtl92c_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
}
}
-void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
u8 *ppowerlevel)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -410,7 +410,7 @@ static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw,
}
}
-void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
u8 *ppowerlevel, u8 channel)
{
u32 writeVal[2], powerBase0[2], powerBase1[2];
@@ -430,7 +430,7 @@ void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
}
}
-bool rtl92c_phy_rf6052_config(struct ieee80211_hw *hw)
+bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
@@ -484,11 +484,11 @@ static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
switch (rfpath) {
case RF90_PATH_A:
- rtstatus = rtl92c_phy_config_rf_with_headerfile(hw,
+ rtstatus = rtl92ce_phy_config_rf_with_headerfile(hw,
(enum radio_path) rfpath);
break;
case RF90_PATH_B:
- rtstatus = rtl92c_phy_config_rf_with_headerfile(hw,
+ rtstatus = rtl92ce_phy_config_rf_with_headerfile(hw,
(enum radio_path) rfpath);
break;
case RF90_PATH_C:
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
index d3014f99bb7b..3aa520c1c171 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
@@ -40,5 +40,8 @@ extern void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
u8 *ppowerlevel);
extern void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
u8 *ppowerlevel, u8 channel);
-extern bool rtl92c_phy_rf6052_config(struct ieee80211_hw *hw);
+bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw);
+bool rtl92ce_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+ enum radio_path rfpath);
+
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index b366e8862929..b1cc4d44f534 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -37,6 +37,7 @@
#include "phy.h"
#include "dm.h"
#include "hw.h"
+#include "rf.h"
#include "sw.h"
#include "trx.h"
#include "led.h"
@@ -46,13 +47,13 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- rtlpriv->dm.b_dm_initialgain_enable = 1;
+ rtlpriv->dm.dm_initialgain_enable = 1;
rtlpriv->dm.dm_flag = 0;
- rtlpriv->dm.b_disable_framebursting = 0;;
+ rtlpriv->dm.disable_framebursting = 0;
rtlpriv->dm.thermalvalue = 0;
rtlpci->transmit_config = CFENDFORM | BIT(12) | BIT(13);
- rtlpci->receive_config = (RCR_APPFCS |
+ rtlpci->receive_config = (RCR_APP_FCS |
RCR_AMF |
RCR_ADF |
RCR_APP_MIC |
@@ -122,7 +123,7 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = {
.switch_channel = rtl92c_phy_sw_chnl,
.dm_watchdog = rtl92c_dm_watchdog,
.scan_operation_backup = rtl92c_phy_scan_operation_backup,
- .set_rf_power_state = rtl92c_phy_set_rf_power_state,
+ .set_rf_power_state = rtl92ce_phy_set_rf_power_state,
.led_control = rtl92ce_led_control,
.set_desc = rtl92ce_set_desc,
.get_desc = rtl92ce_get_desc,
@@ -133,8 +134,17 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = {
.deinit_sw_leds = rtl92ce_deinit_sw_leds,
.get_bbreg = rtl92c_phy_query_bb_reg,
.set_bbreg = rtl92c_phy_set_bb_reg,
- .get_rfreg = rtl92c_phy_query_rf_reg,
- .set_rfreg = rtl92c_phy_set_rf_reg,
+ .get_rfreg = rtl92ce_phy_query_rf_reg,
+ .set_rfreg = rtl92ce_phy_set_rf_reg,
+ .cmd_send_packet = _rtl92c_cmd_send_packet,
+ .phy_rf6052_config = rtl92ce_phy_rf6052_config,
+ .phy_rf6052_set_cck_txpower = rtl92ce_phy_rf6052_set_cck_txpower,
+ .phy_rf6052_set_ofdm_txpower = rtl92ce_phy_rf6052_set_ofdm_txpower,
+ .config_bb_with_headerfile = _rtl92ce_phy_config_bb_with_headerfile,
+ .config_bb_with_pgheaderfile = _rtl92ce_phy_config_bb_with_pgheaderfile,
+ .phy_lc_calibrate = _rtl92ce_phy_lc_calibrate,
+ .phy_set_bw_mode_callback = rtl92ce_phy_set_bw_mode_callback,
+ .dm_dynamic_txpower = rtl92ce_dm_dynamic_txpower,
};
static struct rtl_mod_params rtl92ce_mod_params = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h
index de1198c38d4e..36e657668c1e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h
@@ -33,5 +33,19 @@
int rtl92c_init_sw_vars(struct ieee80211_hw *hw);
void rtl92c_deinit_sw_vars(struct ieee80211_hw *hw);
void rtl92c_init_var_map(struct ieee80211_hw *hw);
+bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw,
+ struct sk_buff *skb);
+void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel);
+void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel);
+bool _rtl92ce_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+ u8 configtype);
+bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+ u8 configtype);
+void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
+u32 rtl92ce_phy_query_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr, u32 bitmask);
+void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index bf5852f2d634..aa2b5815600f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -36,7 +36,7 @@
#include "trx.h"
#include "led.h"
-static enum rtl_desc_qsel _rtl92ce_map_hwqueue_to_fwqueue(u16 fc,
+static enum rtl_desc_qsel _rtl92ce_map_hwqueue_to_fwqueue(__le16 fc,
unsigned int
skb_queue)
{
@@ -245,24 +245,24 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
struct rtl_stats *pstats,
struct rx_desc_92c *pdesc,
struct rx_fwinfo_92c *p_drvinfo,
- bool bpacket_match_bssid,
- bool bpacket_toself,
- bool b_packet_beacon)
+ bool packet_match_bssid,
+ bool packet_toself,
+ bool packet_beacon)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct phy_sts_cck_8192s_t *cck_buf;
s8 rx_pwr_all, rx_pwr[4];
- u8 rf_rx_num, evm, pwdb_all;
+ u8 evm, pwdb_all, rf_rx_num = 0;
u8 i, max_spatial_stream;
- u32 rssi, total_rssi;
+ u32 rssi, total_rssi = 0;
bool is_cck_rate;
is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc);
- pstats->b_packet_matchbssid = bpacket_match_bssid;
- pstats->b_packet_toself = bpacket_toself;
- pstats->b_is_cck = is_cck_rate;
- pstats->b_packet_beacon = b_packet_beacon;
- pstats->b_is_cck = is_cck_rate;
+ pstats->packet_matchbssid = packet_match_bssid;
+ pstats->packet_toself = packet_toself;
+ pstats->is_cck = is_cck_rate;
+ pstats->packet_beacon = packet_beacon;
+ pstats->is_cck = is_cck_rate;
pstats->rx_mimo_signalquality[0] = -1;
pstats->rx_mimo_signalquality[1] = -1;
@@ -315,7 +315,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
pstats->rx_pwdb_all = pwdb_all;
pstats->recvsignalpower = rx_pwr_all;
- if (bpacket_match_bssid) {
+ if (packet_match_bssid) {
u8 sq;
if (pstats->rx_pwdb_all > 40)
sq = 100;
@@ -334,10 +334,10 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
pstats->rx_mimo_signalquality[1] = -1;
}
} else {
- rtlpriv->dm.brfpath_rxenable[0] =
- rtlpriv->dm.brfpath_rxenable[1] = true;
+ rtlpriv->dm.rfpath_rxenable[0] =
+ rtlpriv->dm.rfpath_rxenable[1] = true;
for (i = RF90_PATH_A; i < RF90_PATH_MAX; i++) {
- if (rtlpriv->dm.brfpath_rxenable[i])
+ if (rtlpriv->dm.rfpath_rxenable[i])
rf_rx_num++;
rx_pwr[i] =
@@ -347,7 +347,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
rtlpriv->stats.rx_snr_db[i] =
(long)(p_drvinfo->rxsnr[i] / 2);
- if (bpacket_match_bssid)
+ if (packet_match_bssid)
pstats->rx_mimo_signalstrength[i] = (u8) rssi;
}
@@ -366,7 +366,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
for (i = 0; i < max_spatial_stream; i++) {
evm = _rtl92c_evm_db_to_percentage(p_drvinfo->rxevm[i]);
- if (bpacket_match_bssid) {
+ if (packet_match_bssid) {
if (i == 0)
pstats->signalquality =
(u8) (evm & 0xff);
@@ -393,7 +393,7 @@ static void _rtl92ce_process_ui_rssi(struct ieee80211_hw *hw,
u8 rfpath;
u32 last_rssi, tmpval;
- if (pstats->b_packet_toself || pstats->b_packet_beacon) {
+ if (pstats->packet_toself || pstats->packet_beacon) {
rtlpriv->stats.rssi_calculate_cnt++;
if (rtlpriv->stats.ui_rssi.total_num++ >=
@@ -421,7 +421,7 @@ static void _rtl92ce_process_ui_rssi(struct ieee80211_hw *hw,
pstats->rssi = rtlpriv->stats.signal_strength;
}
- if (!pstats->b_is_cck && pstats->b_packet_toself) {
+ if (!pstats->is_cck && pstats->packet_toself) {
for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
rfpath++) {
@@ -463,7 +463,7 @@ static void _rtl92ce_update_rxsignalstatistics(struct ieee80211_hw *hw,
struct rtl_stats *pstats)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- int weighting;
+ int weighting = 0;
if (rtlpriv->stats.recv_signal_power == 0)
rtlpriv->stats.recv_signal_power = pstats->recvsignalpower;
@@ -493,7 +493,7 @@ static void _rtl92ce_process_pwdb(struct ieee80211_hw *hw,
rtlpriv->dm.undecorated_smoothed_pwdb;
}
- if (pstats->b_packet_toself || pstats->b_packet_beacon) {
+ if (pstats->packet_toself || pstats->packet_beacon) {
if (undecorated_smoothed_pwdb < 0)
undecorated_smoothed_pwdb = pstats->rx_pwdb_all;
@@ -525,7 +525,7 @@ static void _rtl92ce_process_ui_link_quality(struct ieee80211_hw *hw,
u32 last_evm, n_spatialstream, tmpval;
if (pstats->signalquality != 0) {
- if (pstats->b_packet_toself || pstats->b_packet_beacon) {
+ if (pstats->packet_toself || pstats->packet_beacon) {
if (rtlpriv->stats.ui_link_quality.total_num++ >=
PHY_LINKQUALITY_SLID_WIN_MAX) {
@@ -595,8 +595,8 @@ static void _rtl92ce_process_phyinfo(struct ieee80211_hw *hw,
struct rtl_stats *pcurrent_stats)
{
- if (!pcurrent_stats->b_packet_matchbssid &&
- !pcurrent_stats->b_packet_beacon)
+ if (!pcurrent_stats->packet_matchbssid &&
+ !pcurrent_stats->packet_beacon)
return;
_rtl92ce_process_ui_rssi(hw, pcurrent_stats);
@@ -617,34 +617,36 @@ static void _rtl92ce_translate_rx_signal_stuff(struct ieee80211_hw *hw,
u8 *tmp_buf;
u8 *praddr;
u8 *psaddr;
- u16 fc, type;
- bool b_packet_matchbssid, b_packet_toself, b_packet_beacon;
+ __le16 fc;
+ u16 type, c_fc;
+ bool packet_matchbssid, packet_toself, packet_beacon;
tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
hdr = (struct ieee80211_hdr *)tmp_buf;
- fc = le16_to_cpu(hdr->frame_control);
+ fc = hdr->frame_control;
+ c_fc = le16_to_cpu(fc);
type = WLAN_FC_GET_TYPE(fc);
praddr = hdr->addr1;
psaddr = hdr->addr2;
- b_packet_matchbssid =
+ packet_matchbssid =
((IEEE80211_FTYPE_CTL != type) &&
(!compare_ether_addr(mac->bssid,
- (fc & IEEE80211_FCTL_TODS) ?
- hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS) ?
+ (c_fc & IEEE80211_FCTL_TODS) ?
+ hdr->addr1 : (c_fc & IEEE80211_FCTL_FROMDS) ?
hdr->addr2 : hdr->addr3)) &&
- (!pstats->b_hwerror) && (!pstats->b_crc) && (!pstats->b_icv));
+ (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv));
- b_packet_toself = b_packet_matchbssid &&
+ packet_toself = packet_matchbssid &&
(!compare_ether_addr(praddr, rtlefuse->dev_addr));
if (ieee80211_is_beacon(fc))
- b_packet_beacon = true;
+ packet_beacon = true;
_rtl92ce_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
- b_packet_matchbssid, b_packet_toself,
- b_packet_beacon);
+ packet_matchbssid, packet_toself,
+ packet_beacon);
_rtl92ce_process_phyinfo(hw, tmp_buf, pstats);
}
@@ -662,14 +664,14 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
stats->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) *
RX_DRV_INFO_SIZE_UNIT;
stats->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03);
- stats->b_icv = (u16) GET_RX_DESC_ICV(pdesc);
- stats->b_crc = (u16) GET_RX_DESC_CRC32(pdesc);
- stats->b_hwerror = (stats->b_crc | stats->b_icv);
+ stats->icv = (u16) GET_RX_DESC_ICV(pdesc);
+ stats->crc = (u16) GET_RX_DESC_CRC32(pdesc);
+ stats->hwerror = (stats->crc | stats->icv);
stats->decrypted = !GET_RX_DESC_SWDEC(pdesc);
stats->rate = (u8) GET_RX_DESC_RXMCS(pdesc);
- stats->b_shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
- stats->b_isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
- stats->b_isampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1)
+ stats->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
+ stats->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
+ stats->isampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1)
&& (GET_RX_DESC_FAGGR(pdesc) == 1));
stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
@@ -689,7 +691,7 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
if (GET_RX_DESC_RXHT(pdesc))
rx_status->flag |= RX_FLAG_HT;
- rx_status->flag |= RX_FLAG_TSFT;
+ rx_status->flag |= RX_FLAG_MACTIME_MPDU;
if (stats->decrypted)
rx_status->flag |= RX_FLAG_DECRYPTED;
@@ -727,27 +729,24 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- bool b_defaultadapter = true;
-
- struct ieee80211_sta *sta = ieee80211_find_sta(mac->vif, mac->bssid);
-
+ bool defaultadapter = true;
+ struct ieee80211_sta *sta;
u8 *pdesc = (u8 *) pdesc_tx;
struct rtl_tcb_desc tcb_desc;
u8 *qc = ieee80211_get_qos_ctl(hdr);
u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
u16 seq_number;
- u16 fc = le16_to_cpu(hdr->frame_control);
+ __le16 fc = hdr->frame_control;
u8 rate_flag = info->control.rates[0].flags;
enum rtl_desc_qsel fw_qsel =
- _rtl92ce_map_hwqueue_to_fwqueue(le16_to_cpu(hdr->frame_control),
- queue_index);
+ _rtl92ce_map_hwqueue_to_fwqueue(fc, queue_index);
- bool b_firstseg = ((hdr->seq_ctrl &
- cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0);
+ bool firstseg = ((hdr->seq_ctrl &
+ cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0);
- bool b_lastseg = ((hdr->frame_control &
- cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0);
+ bool lastseg = ((hdr->frame_control &
+ cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0);
dma_addr_t mapping = pci_map_single(rtlpci->pdev,
skb->data, skb->len,
@@ -759,7 +758,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_92c));
- if (b_firstseg) {
+ if (firstseg) {
SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
SET_TX_DESC_TX_RATE(pdesc, tcb_desc.hw_rate);
@@ -774,25 +773,25 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
}
SET_TX_DESC_SEQ(pdesc, seq_number);
- SET_TX_DESC_RTS_ENABLE(pdesc, ((tcb_desc.b_rts_enable &&
+ SET_TX_DESC_RTS_ENABLE(pdesc, ((tcb_desc.rts_enable &&
!tcb_desc.
- b_cts_enable) ? 1 : 0));
+ cts_enable) ? 1 : 0));
SET_TX_DESC_HW_RTS_ENABLE(pdesc,
- ((tcb_desc.b_rts_enable
- || tcb_desc.b_cts_enable) ? 1 : 0));
- SET_TX_DESC_CTS2SELF(pdesc, ((tcb_desc.b_cts_enable) ? 1 : 0));
- SET_TX_DESC_RTS_STBC(pdesc, ((tcb_desc.b_rts_stbc) ? 1 : 0));
+ ((tcb_desc.rts_enable
+ || tcb_desc.cts_enable) ? 1 : 0));
+ SET_TX_DESC_CTS2SELF(pdesc, ((tcb_desc.cts_enable) ? 1 : 0));
+ SET_TX_DESC_RTS_STBC(pdesc, ((tcb_desc.rts_stbc) ? 1 : 0));
SET_TX_DESC_RTS_RATE(pdesc, tcb_desc.rts_rate);
SET_TX_DESC_RTS_BW(pdesc, 0);
SET_TX_DESC_RTS_SC(pdesc, tcb_desc.rts_sc);
SET_TX_DESC_RTS_SHORT(pdesc,
((tcb_desc.rts_rate <= DESC92C_RATE54M) ?
- (tcb_desc.b_rts_use_shortpreamble ? 1 : 0)
- : (tcb_desc.b_rts_use_shortgi ? 1 : 0)));
+ (tcb_desc.rts_use_shortpreamble ? 1 : 0)
+ : (tcb_desc.rts_use_shortgi ? 1 : 0)));
if (mac->bw_40) {
- if (tcb_desc.b_packet_bw) {
+ if (tcb_desc.packet_bw) {
SET_TX_DESC_DATA_BW(pdesc, 1);
SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3);
} else {
@@ -811,10 +810,13 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
SET_TX_DESC_LINIP(pdesc, 0);
SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb->len);
+ rcu_read_lock();
+ sta = ieee80211_find_sta(mac->vif, mac->bssid);
if (sta) {
u8 ampdu_density = sta->ht_cap.ampdu_density;
SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density);
}
+ rcu_read_unlock();
if (info->control.hw_key) {
struct ieee80211_key_conf *keyconf =
@@ -854,14 +856,14 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
}
}
- SET_TX_DESC_FIRST_SEG(pdesc, (b_firstseg ? 1 : 0));
- SET_TX_DESC_LAST_SEG(pdesc, (b_lastseg ? 1 : 0));
+ SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0));
+ SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0));
SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) skb->len);
SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping));
- if (rtlpriv->dm.b_useramask) {
+ if (rtlpriv->dm.useramask) {
SET_TX_DESC_RATE_ID(pdesc, tcb_desc.ratr_index);
SET_TX_DESC_MACID(pdesc, tcb_desc.mac_id);
} else {
@@ -869,16 +871,16 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
SET_TX_DESC_MACID(pdesc, tcb_desc.ratr_index);
}
- if ((!ieee80211_is_data_qos(fc)) && ppsc->b_leisure_ps &&
- ppsc->b_fwctrl_lps) {
+ if ((!ieee80211_is_data_qos(fc)) && ppsc->leisure_ps &&
+ ppsc->fwctrl_lps) {
SET_TX_DESC_HWSEQ_EN(pdesc, 1);
SET_TX_DESC_PKT_ID(pdesc, 8);
- if (!b_defaultadapter)
+ if (!defaultadapter)
SET_TX_DESC_QOS(pdesc, 1);
}
- SET_TX_DESC_MORE_FRAG(pdesc, (b_lastseg ? 0 : 1));
+ SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1));
if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
is_broadcast_ether_addr(ieee80211_get_DA(hdr))) {
@@ -889,8 +891,8 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
}
void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
- u8 *pdesc, bool b_firstseg,
- bool b_lastseg, struct sk_buff *skb)
+ u8 *pdesc, bool firstseg,
+ bool lastseg, struct sk_buff *skb)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -901,11 +903,11 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
PCI_DMA_TODEVICE);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
- u16 fc = le16_to_cpu(hdr->frame_control);
+ __le16 fc = hdr->frame_control;
CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
- if (b_firstseg)
+ if (firstseg)
SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M);
@@ -1029,3 +1031,36 @@ void rtl92ce_tx_polling(struct ieee80211_hw *hw, unsigned int hw_queue)
BIT(0) << (hw_queue));
}
}
+
+bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw,
+ struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl8192_tx_ring *ring;
+ struct rtl_tx_desc *pdesc;
+ u8 own;
+ unsigned long flags;
+ struct sk_buff *pskb = NULL;
+
+ ring = &rtlpci->tx_ring[BEACON_QUEUE];
+
+ spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
+
+ pskb = __skb_dequeue(&ring->queue);
+ if (pskb)
+ kfree_skb(pskb);
+
+ pdesc = &ring->desc[0];
+ own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc, true, HW_DESC_OWN);
+
+ rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb);
+
+ __skb_queue_tail(&ring->queue, skb);
+
+ spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+
+ rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE);
+
+ return true;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
index 53d0e0a5af5c..803adcc80c96 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
@@ -40,470 +40,494 @@
#define USB_HWDESC_HEADER_LEN 32
#define CRCLENGTH 4
+/* Define a macro that takes a le32 word, converts it to host ordering,
+ * right shifts by a specified count, creates a mask of the specified
+ * bit count, and extracts that number of bits.
+ */
+
+#define SHIFT_AND_MASK_LE(__pdesc, __shift, __mask) \
+ ((le32_to_cpu(*(((__le32 *)(__pdesc)))) >> (__shift)) & \
+ BIT_LEN_MASK_32(__mask))
+
+/* Define a macro that clears a bit field in an le32 word and
+ * sets the specified value into that bit field. The resulting
+ * value remains in le32 ordering; however, it is properly converted
+ * to host ordering for the clear and set operations before conversion
+ * back to le32.
+ */
+
+#define SET_BITS_OFFSET_LE(__pdesc, __shift, __len, __val) \
+ (*(__le32 *)(__pdesc) = \
+ (cpu_to_le32((le32_to_cpu(*((__le32 *)(__pdesc))) & \
+ (~(BIT_OFFSET_LEN_MASK_32((__shift), __len)))) | \
+ (((u32)(__val) & BIT_LEN_MASK_32(__len)) << (__shift)))));
+
+/* macros to read/write various fields in RX or TX descriptors */
+
#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 0, 16, __val)
+ SET_BITS_OFFSET_LE(__pdesc, 0, 16, __val)
#define SET_TX_DESC_OFFSET(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 16, 8, __val)
+ SET_BITS_OFFSET_LE(__pdesc, 16, 8, __val)
#define SET_TX_DESC_BMC(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 24, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc, 24, 1, __val)
#define SET_TX_DESC_HTC(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 25, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc, 25, 1, __val)
#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 26, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc, 26, 1, __val)
#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 27, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc, 27, 1, __val)
#define SET_TX_DESC_LINIP(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 28, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc, 28, 1, __val)
#define SET_TX_DESC_NO_ACM(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 29, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc, 29, 1, __val)
#define SET_TX_DESC_GF(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val)
#define SET_TX_DESC_OWN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val)
#define GET_TX_DESC_PKT_SIZE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 0, 16)
+ SHIFT_AND_MASK_LE(__pdesc, 0, 16)
#define GET_TX_DESC_OFFSET(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 16, 8)
+ SHIFT_AND_MASK_LE(__pdesc, 16, 8)
#define GET_TX_DESC_BMC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 24, 1)
+ SHIFT_AND_MASK_LE(__pdesc, 24, 1)
#define GET_TX_DESC_HTC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 25, 1)
+ SHIFT_AND_MASK_LE(__pdesc, 25, 1)
#define GET_TX_DESC_LAST_SEG(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 26, 1)
+ SHIFT_AND_MASK_LE(__pdesc, 26, 1)
#define GET_TX_DESC_FIRST_SEG(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 27, 1)
+ SHIFT_AND_MASK_LE(__pdesc, 27, 1)
#define GET_TX_DESC_LINIP(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 28, 1)
+ SHIFT_AND_MASK_LE(__pdesc, 28, 1)
#define GET_TX_DESC_NO_ACM(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 29, 1)
+ SHIFT_AND_MASK_LE(__pdesc, 29, 1)
#define GET_TX_DESC_GF(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 30, 1)
+ SHIFT_AND_MASK_LE(__pdesc, 30, 1)
#define GET_TX_DESC_OWN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 31, 1)
+ SHIFT_AND_MASK_LE(__pdesc, 31, 1)
#define SET_TX_DESC_MACID(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 0, 5, __val)
+ SET_BITS_OFFSET_LE(__pdesc+4, 0, 5, __val)
#define SET_TX_DESC_AGG_BREAK(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 5, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc+4, 5, 1, __val)
#define SET_TX_DESC_BK(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 6, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc+4, 6, 1, __val)
#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 7, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc+4, 7, 1, __val)
#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 8, 5, __val)
+ SET_BITS_OFFSET_LE(__pdesc+4, 8, 5, __val)
#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 13, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc+4, 13, 1, __val)
#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 14, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc+4, 14, 1, __val)
#define SET_TX_DESC_PIFS(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 15, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc+4, 15, 1, __val)
#define SET_TX_DESC_RATE_ID(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 16, 4, __val)
+ SET_BITS_OFFSET_LE(__pdesc+4, 16, 4, __val)
#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 20, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc+4, 20, 1, __val)
#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 21, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc+4, 21, 1, __val)
#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 22, 2, __val)
+ SET_BITS_OFFSET_LE(__pdesc+4, 22, 2, __val)
#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 24, 8, __val)
+ SET_BITS_OFFSET_LE(__pdesc+4, 24, 8, __val)
#define GET_TX_DESC_MACID(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 0, 5)
+ SHIFT_AND_MASK_LE(__pdesc+4, 0, 5)
#define GET_TX_DESC_AGG_ENABLE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 5, 1)
+ SHIFT_AND_MASK_LE(__pdesc+4, 5, 1)
#define GET_TX_DESC_AGG_BREAK(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 6, 1)
+ SHIFT_AND_MASK_LE(__pdesc+4, 6, 1)
#define GET_TX_DESC_RDG_ENABLE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 7, 1)
+ SHIFT_AND_MASK_LE(__pdesc+4, 7, 1)
#define GET_TX_DESC_QUEUE_SEL(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 8, 5)
+ SHIFT_AND_MASK_LE(__pdesc+4, 8, 5)
#define GET_TX_DESC_RDG_NAV_EXT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 13, 1)
+ SHIFT_AND_MASK_LE(__pdesc+4, 13, 1)
#define GET_TX_DESC_LSIG_TXOP_EN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 14, 1)
+ SHIFT_AND_MASK_LE(__pdesc+4, 14, 1)
#define GET_TX_DESC_PIFS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 15, 1)
+ SHIFT_AND_MASK_LE(__pdesc+4, 15, 1)
#define GET_TX_DESC_RATE_ID(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 16, 4)
+ SHIFT_AND_MASK_LE(__pdesc+4, 16, 4)
#define GET_TX_DESC_NAV_USE_HDR(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 20, 1)
+ SHIFT_AND_MASK_LE(__pdesc+4, 20, 1)
#define GET_TX_DESC_EN_DESC_ID(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 21, 1)
+ SHIFT_AND_MASK_LE(__pdesc+4, 21, 1)
#define GET_TX_DESC_SEC_TYPE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 22, 2)
+ SHIFT_AND_MASK_LE(__pdesc+4, 22, 2)
#define GET_TX_DESC_PKT_OFFSET(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 24, 8)
+ SHIFT_AND_MASK_LE(__pdesc+4, 24, 8)
#define SET_TX_DESC_RTS_RC(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 0, 6, __val)
+ SET_BITS_OFFSET_LE(__pdesc+8, 0, 6, __val)
#define SET_TX_DESC_DATA_RC(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 6, 6, __val)
+ SET_BITS_OFFSET_LE(__pdesc+8, 6, 6, __val)
#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 14, 2, __val)
+ SET_BITS_OFFSET_LE(__pdesc+8, 14, 2, __val)
#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 17, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc+8, 17, 1, __val)
#define SET_TX_DESC_RAW(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 18, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc+8, 18, 1, __val)
#define SET_TX_DESC_CCX(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 19, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc+8, 19, 1, __val)
#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 20, 3, __val)
+ SET_BITS_OFFSET_LE(__pdesc+8, 20, 3, __val)
#define SET_TX_DESC_ANTSEL_A(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 24, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc+8, 24, 1, __val)
#define SET_TX_DESC_ANTSEL_B(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 25, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc+8, 25, 1, __val)
#define SET_TX_DESC_TX_ANT_CCK(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 26, 2, __val)
+ SET_BITS_OFFSET_LE(__pdesc+8, 26, 2, __val)
#define SET_TX_DESC_TX_ANTL(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 28, 2, __val)
+ SET_BITS_OFFSET_LE(__pdesc+8, 28, 2, __val)
#define SET_TX_DESC_TX_ANT_HT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 30, 2, __val)
+ SET_BITS_OFFSET_LE(__pdesc+8, 30, 2, __val)
#define GET_TX_DESC_RTS_RC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 0, 6)
+ SHIFT_AND_MASK_LE(__pdesc+8, 0, 6)
#define GET_TX_DESC_DATA_RC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 6, 6)
+ SHIFT_AND_MASK_LE(__pdesc+8, 6, 6)
#define GET_TX_DESC_BAR_RTY_TH(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 14, 2)
+ SHIFT_AND_MASK_LE(__pdesc+8, 14, 2)
#define GET_TX_DESC_MORE_FRAG(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 17, 1)
+ SHIFT_AND_MASK_LE(__pdesc+8, 17, 1)
#define GET_TX_DESC_RAW(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 18, 1)
+ SHIFT_AND_MASK_LE(__pdesc+8, 18, 1)
#define GET_TX_DESC_CCX(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 19, 1)
+ SHIFT_AND_MASK_LE(__pdesc+8, 19, 1)
#define GET_TX_DESC_AMPDU_DENSITY(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 20, 3)
+ SHIFT_AND_MASK_LE(__pdesc+8, 20, 3)
#define GET_TX_DESC_ANTSEL_A(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 24, 1)
+ SHIFT_AND_MASK_LE(__pdesc+8, 24, 1)
#define GET_TX_DESC_ANTSEL_B(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 25, 1)
+ SHIFT_AND_MASK_LE(__pdesc+8, 25, 1)
#define GET_TX_DESC_TX_ANT_CCK(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 26, 2)
+ SHIFT_AND_MASK_LE(__pdesc+8, 26, 2)
#define GET_TX_DESC_TX_ANTL(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 28, 2)
+ SHIFT_AND_MASK_LE(__pdesc+8, 28, 2)
#define GET_TX_DESC_TX_ANT_HT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 30, 2)
+ SHIFT_AND_MASK_LE(__pdesc+8, 30, 2)
#define SET_TX_DESC_NEXT_HEAP_PAGE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 0, 8, __val)
+ SET_BITS_OFFSET_LE(__pdesc+12, 0, 8, __val)
#define SET_TX_DESC_TAIL_PAGE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 8, 8, __val)
+ SET_BITS_OFFSET_LE(__pdesc+12, 8, 8, __val)
#define SET_TX_DESC_SEQ(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 16, 12, __val)
+ SET_BITS_OFFSET_LE(__pdesc+12, 16, 12, __val)
#define SET_TX_DESC_PKT_ID(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 28, 4, __val)
+ SET_BITS_OFFSET_LE(__pdesc+12, 28, 4, __val)
#define GET_TX_DESC_NEXT_HEAP_PAGE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 0, 8)
+ SHIFT_AND_MASK_LE(__pdesc+12, 0, 8)
#define GET_TX_DESC_TAIL_PAGE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 8, 8)
+ SHIFT_AND_MASK_LE(__pdesc+12, 8, 8)
#define GET_TX_DESC_SEQ(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 16, 12)
+ SHIFT_AND_MASK_LE(__pdesc+12, 16, 12)
#define GET_TX_DESC_PKT_ID(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 28, 4)
+ SHIFT_AND_MASK_LE(__pdesc+12, 28, 4)
#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 5, __val)
+ SET_BITS_OFFSET_LE(__pdesc+16, 0, 5, __val)
#define SET_TX_DESC_AP_DCFE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 5, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc+16, 5, 1, __val)
#define SET_TX_DESC_QOS(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 6, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc+16, 6, 1, __val)
#define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 7, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc+16, 7, 1, __val)
#define SET_TX_DESC_USE_RATE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 8, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc+16, 8, 1, __val)
#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 9, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc+16, 9, 1, __val)
#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 10, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc+16, 10, 1, __val)
#define SET_TX_DESC_CTS2SELF(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 11, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc+16, 11, 1, __val)
#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 12, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc+16, 12, 1, __val)
#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 13, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc+16, 13, 1, __val)
#define SET_TX_DESC_PORT_ID(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 14, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc+16, 14, 1, __val)
#define SET_TX_DESC_WAIT_DCTS(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 18, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc+16, 18, 1, __val)
#define SET_TX_DESC_CTS2AP_EN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 19, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc+16, 19, 1, __val)
#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 20, 2, __val)
+ SET_BITS_OFFSET_LE(__pdesc+16, 20, 2, __val)
#define SET_TX_DESC_TX_STBC(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 22, 2, __val)
+ SET_BITS_OFFSET_LE(__pdesc+16, 22, 2, __val)
#define SET_TX_DESC_DATA_SHORT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 24, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc+16, 24, 1, __val)
#define SET_TX_DESC_DATA_BW(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 25, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc+16, 25, 1, __val)
#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 26, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc+16, 26, 1, __val)
#define SET_TX_DESC_RTS_BW(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 27, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc+16, 27, 1, __val)
#define SET_TX_DESC_RTS_SC(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 28, 2, __val)
+ SET_BITS_OFFSET_LE(__pdesc+16, 28, 2, __val)
#define SET_TX_DESC_RTS_STBC(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 30, 2, __val)
+ SET_BITS_OFFSET_LE(__pdesc+16, 30, 2, __val)
#define GET_TX_DESC_RTS_RATE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 0, 5)
+ SHIFT_AND_MASK_LE(__pdesc+16, 0, 5)
#define GET_TX_DESC_AP_DCFE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 5, 1)
+ SHIFT_AND_MASK_LE(__pdesc+16, 5, 1)
#define GET_TX_DESC_QOS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 6, 1)
+ SHIFT_AND_MASK_LE(__pdesc+16, 6, 1)
#define GET_TX_DESC_HWSEQ_EN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 7, 1)
+ SHIFT_AND_MASK_LE(__pdesc+16, 7, 1)
#define GET_TX_DESC_USE_RATE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 8, 1)
+ SHIFT_AND_MASK_LE(__pdesc+16, 8, 1)
#define GET_TX_DESC_DISABLE_RTS_FB(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 9, 1)
+ SHIFT_AND_MASK_LE(__pdesc+16, 9, 1)
#define GET_TX_DESC_DISABLE_FB(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 10, 1)
+ SHIFT_AND_MASK_LE(__pdesc+16, 10, 1)
#define GET_TX_DESC_CTS2SELF(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 11, 1)
+ SHIFT_AND_MASK_LE(__pdesc+16, 11, 1)
#define GET_TX_DESC_RTS_ENABLE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 12, 1)
+ SHIFT_AND_MASK_LE(__pdesc+16, 12, 1)
#define GET_TX_DESC_HW_RTS_ENABLE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 13, 1)
+ SHIFT_AND_MASK_LE(__pdesc+16, 13, 1)
#define GET_TX_DESC_PORT_ID(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 14, 1)
+ SHIFT_AND_MASK_LE(__pdesc+16, 14, 1)
#define GET_TX_DESC_WAIT_DCTS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 18, 1)
+ SHIFT_AND_MASK_LE(__pdesc+16, 18, 1)
#define GET_TX_DESC_CTS2AP_EN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 19, 1)
+ SHIFT_AND_MASK_LE(__pdesc+16, 19, 1)
#define GET_TX_DESC_TX_SUB_CARRIER(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 20, 2)
+ SHIFT_AND_MASK_LE(__pdesc+16, 20, 2)
#define GET_TX_DESC_TX_STBC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 22, 2)
+ SHIFT_AND_MASK_LE(__pdesc+16, 22, 2)
#define GET_TX_DESC_DATA_SHORT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 24, 1)
+ SHIFT_AND_MASK_LE(__pdesc+16, 24, 1)
#define GET_TX_DESC_DATA_BW(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 25, 1)
+ SHIFT_AND_MASK_LE(__pdesc+16, 25, 1)
#define GET_TX_DESC_RTS_SHORT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 26, 1)
+ SHIFT_AND_MASK_LE(__pdesc+16, 26, 1)
#define GET_TX_DESC_RTS_BW(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 27, 1)
+ SHIFT_AND_MASK_LE(__pdesc+16, 27, 1)
#define GET_TX_DESC_RTS_SC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 28, 2)
+ SHIFT_AND_MASK_LE(__pdesc+16, 28, 2)
#define GET_TX_DESC_RTS_STBC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 30, 2)
+ SHIFT_AND_MASK_LE(__pdesc+16, 30, 2)
#define SET_TX_DESC_TX_RATE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 0, 6, __val)
+ SET_BITS_OFFSET_LE(__pdesc+20, 0, 6, __val)
#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 6, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc+20, 6, 1, __val)
#define SET_TX_DESC_CCX_TAG(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 7, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc+20, 7, 1, __val)
#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 8, 5, __val)
+ SET_BITS_OFFSET_LE(__pdesc+20, 8, 5, __val)
#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 13, 4, __val)
+ SET_BITS_OFFSET_LE(__pdesc+20, 13, 4, __val)
#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 17, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc+20, 17, 1, __val)
#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 18, 6, __val)
+ SET_BITS_OFFSET_LE(__pdesc+20, 18, 6, __val)
#define SET_TX_DESC_USB_TXAGG_NUM(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 24, 8, __val)
+ SET_BITS_OFFSET_LE(__pdesc+20, 24, 8, __val)
#define GET_TX_DESC_TX_RATE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+20, 0, 6)
+ SHIFT_AND_MASK_LE(__pdesc+20, 0, 6)
#define GET_TX_DESC_DATA_SHORTGI(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+20, 6, 1)
+ SHIFT_AND_MASK_LE(__pdesc+20, 6, 1)
#define GET_TX_DESC_CCX_TAG(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+20, 7, 1)
+ SHIFT_AND_MASK_LE(__pdesc+20, 7, 1)
#define GET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+20, 8, 5)
+ SHIFT_AND_MASK_LE(__pdesc+20, 8, 5)
#define GET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+20, 13, 4)
+ SHIFT_AND_MASK_LE(__pdesc+20, 13, 4)
#define GET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+20, 17, 1)
+ SHIFT_AND_MASK_LE(__pdesc+20, 17, 1)
#define GET_TX_DESC_DATA_RETRY_LIMIT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+20, 18, 6)
+ SHIFT_AND_MASK_LE(__pdesc+20, 18, 6)
#define GET_TX_DESC_USB_TXAGG_NUM(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+20, 24, 8)
+ SHIFT_AND_MASK_LE(__pdesc+20, 24, 8)
#define SET_TX_DESC_TXAGC_A(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 5, __val)
+ SET_BITS_OFFSET_LE(__pdesc+24, 0, 5, __val)
#define SET_TX_DESC_TXAGC_B(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 5, 5, __val)
+ SET_BITS_OFFSET_LE(__pdesc+24, 5, 5, __val)
#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 10, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc+24, 10, 1, __val)
#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 11, 5, __val)
+ SET_BITS_OFFSET_LE(__pdesc+24, 11, 5, __val)
#define SET_TX_DESC_MCSG1_MAX_LEN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 16, 4, __val)
+ SET_BITS_OFFSET_LE(__pdesc+24, 16, 4, __val)
#define SET_TX_DESC_MCSG2_MAX_LEN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 20, 4, __val)
+ SET_BITS_OFFSET_LE(__pdesc+24, 20, 4, __val)
#define SET_TX_DESC_MCSG3_MAX_LEN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 24, 4, __val)
+ SET_BITS_OFFSET_LE(__pdesc+24, 24, 4, __val)
#define SET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 28, 4, __val)
+ SET_BITS_OFFSET_LE(__pdesc+24, 28, 4, __val)
#define GET_TX_DESC_TXAGC_A(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+24, 0, 5)
+ SHIFT_AND_MASK_LE(__pdesc+24, 0, 5)
#define GET_TX_DESC_TXAGC_B(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+24, 5, 5)
+ SHIFT_AND_MASK_LE(__pdesc+24, 5, 5)
#define GET_TX_DESC_USE_MAX_LEN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+24, 10, 1)
+ SHIFT_AND_MASK_LE(__pdesc+24, 10, 1)
#define GET_TX_DESC_MAX_AGG_NUM(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+24, 11, 5)
+ SHIFT_AND_MASK_LE(__pdesc+24, 11, 5)
#define GET_TX_DESC_MCSG1_MAX_LEN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+24, 16, 4)
+ SHIFT_AND_MASK_LE(__pdesc+24, 16, 4)
#define GET_TX_DESC_MCSG2_MAX_LEN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+24, 20, 4)
+ SHIFT_AND_MASK_LE(__pdesc+24, 20, 4)
#define GET_TX_DESC_MCSG3_MAX_LEN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+24, 24, 4)
+ SHIFT_AND_MASK_LE(__pdesc+24, 24, 4)
#define GET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+24, 28, 4)
+ SHIFT_AND_MASK_LE(__pdesc+24, 28, 4)
#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 16, __val)
+ SET_BITS_OFFSET_LE(__pdesc+28, 0, 16, __val)
#define SET_TX_DESC_MCSG4_MAX_LEN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+28, 16, 4, __val)
+ SET_BITS_OFFSET_LE(__pdesc+28, 16, 4, __val)
#define SET_TX_DESC_MCSG5_MAX_LEN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+28, 20, 4, __val)
+ SET_BITS_OFFSET_LE(__pdesc+28, 20, 4, __val)
#define SET_TX_DESC_MCSG6_MAX_LEN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+28, 24, 4, __val)
+ SET_BITS_OFFSET_LE(__pdesc+28, 24, 4, __val)
#define SET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+28, 28, 4, __val)
+ SET_BITS_OFFSET_LE(__pdesc+28, 28, 4, __val)
#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+28, 0, 16)
+ SHIFT_AND_MASK_LE(__pdesc+28, 0, 16)
#define GET_TX_DESC_MCSG4_MAX_LEN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+28, 16, 4)
+ SHIFT_AND_MASK_LE(__pdesc+28, 16, 4)
#define GET_TX_DESC_MCSG5_MAX_LEN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+28, 20, 4)
+ SHIFT_AND_MASK_LE(__pdesc+28, 20, 4)
#define GET_TX_DESC_MCSG6_MAX_LEN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+28, 24, 4)
+ SHIFT_AND_MASK_LE(__pdesc+28, 24, 4)
#define GET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+28, 28, 4)
+ SHIFT_AND_MASK_LE(__pdesc+28, 28, 4)
#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+32, 0, 32, __val)
+ SET_BITS_OFFSET_LE(__pdesc+32, 0, 32, __val)
#define SET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+36, 0, 32, __val)
+ SET_BITS_OFFSET_LE(__pdesc+36, 0, 32, __val)
#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+32, 0, 32)
+ SHIFT_AND_MASK_LE(__pdesc+32, 0, 32)
#define GET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+36, 0, 32)
+ SHIFT_AND_MASK_LE(__pdesc+36, 0, 32)
#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+40, 0, 32, __val)
+ SET_BITS_OFFSET_LE(__pdesc+40, 0, 32, __val)
#define SET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+44, 0, 32, __val)
+ SET_BITS_OFFSET_LE(__pdesc+44, 0, 32, __val)
#define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+40, 0, 32)
+ SHIFT_AND_MASK_LE(__pdesc+40, 0, 32)
#define GET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+44, 0, 32)
+ SHIFT_AND_MASK_LE(__pdesc+44, 0, 32)
#define GET_RX_DESC_PKT_LEN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 0, 14)
+ SHIFT_AND_MASK_LE(__pdesc, 0, 14)
#define GET_RX_DESC_CRC32(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 14, 1)
+ SHIFT_AND_MASK_LE(__pdesc, 14, 1)
#define GET_RX_DESC_ICV(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 15, 1)
+ SHIFT_AND_MASK_LE(__pdesc, 15, 1)
#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 16, 4)
+ SHIFT_AND_MASK_LE(__pdesc, 16, 4)
#define GET_RX_DESC_SECURITY(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 20, 3)
+ SHIFT_AND_MASK_LE(__pdesc, 20, 3)
#define GET_RX_DESC_QOS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 23, 1)
+ SHIFT_AND_MASK_LE(__pdesc, 23, 1)
#define GET_RX_DESC_SHIFT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 24, 2)
+ SHIFT_AND_MASK_LE(__pdesc, 24, 2)
#define GET_RX_DESC_PHYST(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 26, 1)
+ SHIFT_AND_MASK_LE(__pdesc, 26, 1)
#define GET_RX_DESC_SWDEC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 27, 1)
+ SHIFT_AND_MASK_LE(__pdesc, 27, 1)
#define GET_RX_DESC_LS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 28, 1)
+ SHIFT_AND_MASK_LE(__pdesc, 28, 1)
#define GET_RX_DESC_FS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 29, 1)
+ SHIFT_AND_MASK_LE(__pdesc, 29, 1)
#define GET_RX_DESC_EOR(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 30, 1)
+ SHIFT_AND_MASK_LE(__pdesc, 30, 1)
#define GET_RX_DESC_OWN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 31, 1)
+ SHIFT_AND_MASK_LE(__pdesc, 31, 1)
#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 0, 14, __val)
+ SET_BITS_OFFSET_LE(__pdesc, 0, 14, __val)
#define SET_RX_DESC_EOR(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val)
#define SET_RX_DESC_OWN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
+ SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val)
#define GET_RX_DESC_MACID(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 0, 5)
+ SHIFT_AND_MASK_LE(__pdesc+4, 0, 5)
#define GET_RX_DESC_TID(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 5, 4)
+ SHIFT_AND_MASK_LE(__pdesc+4, 5, 4)
#define GET_RX_DESC_HWRSVD(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 9, 5)
+ SHIFT_AND_MASK_LE(__pdesc+4, 9, 5)
#define GET_RX_DESC_PAGGR(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 14, 1)
+ SHIFT_AND_MASK_LE(__pdesc+4, 14, 1)
#define GET_RX_DESC_FAGGR(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 15, 1)
+ SHIFT_AND_MASK_LE(__pdesc+4, 15, 1)
#define GET_RX_DESC_A1_FIT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 16, 4)
+ SHIFT_AND_MASK_LE(__pdesc+4, 16, 4)
#define GET_RX_DESC_A2_FIT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 20, 4)
+ SHIFT_AND_MASK_LE(__pdesc+4, 20, 4)
#define GET_RX_DESC_PAM(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 24, 1)
+ SHIFT_AND_MASK_LE(__pdesc+4, 24, 1)
#define GET_RX_DESC_PWR(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 25, 1)
+ SHIFT_AND_MASK_LE(__pdesc+4, 25, 1)
#define GET_RX_DESC_MD(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 26, 1)
+ SHIFT_AND_MASK_LE(__pdesc+4, 26, 1)
#define GET_RX_DESC_MF(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 27, 1)
+ SHIFT_AND_MASK_LE(__pdesc+4, 27, 1)
#define GET_RX_DESC_TYPE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 28, 2)
+ SHIFT_AND_MASK_LE(__pdesc+4, 28, 2)
#define GET_RX_DESC_MC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 30, 1)
+ SHIFT_AND_MASK_LE(__pdesc+4, 30, 1)
#define GET_RX_DESC_BC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 31, 1)
+ SHIFT_AND_MASK_LE(__pdesc+4, 31, 1)
#define GET_RX_DESC_SEQ(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 0, 12)
+ SHIFT_AND_MASK_LE(__pdesc+8, 0, 12)
#define GET_RX_DESC_FRAG(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 12, 4)
+ SHIFT_AND_MASK_LE(__pdesc+8, 12, 4)
#define GET_RX_DESC_NEXT_PKT_LEN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 16, 14)
+ SHIFT_AND_MASK_LE(__pdesc+8, 16, 14)
#define GET_RX_DESC_NEXT_IND(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 30, 1)
+ SHIFT_AND_MASK_LE(__pdesc+8, 30, 1)
#define GET_RX_DESC_RSVD(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 31, 1)
+ SHIFT_AND_MASK_LE(__pdesc+8, 31, 1)
#define GET_RX_DESC_RXMCS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 0, 6)
+ SHIFT_AND_MASK_LE(__pdesc+12, 0, 6)
#define GET_RX_DESC_RXHT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 6, 1)
+ SHIFT_AND_MASK_LE(__pdesc+12, 6, 1)
#define GET_RX_DESC_SPLCP(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 8, 1)
+ SHIFT_AND_MASK_LE(__pdesc+12, 8, 1)
#define GET_RX_DESC_BW(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 9, 1)
+ SHIFT_AND_MASK_LE(__pdesc+12, 9, 1)
#define GET_RX_DESC_HTC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 10, 1)
+ SHIFT_AND_MASK_LE(__pdesc+12, 10, 1)
#define GET_RX_DESC_HWPC_ERR(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 14, 1)
+ SHIFT_AND_MASK_LE(__pdesc+12, 14, 1)
#define GET_RX_DESC_HWPC_IND(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 15, 1)
+ SHIFT_AND_MASK_LE(__pdesc+12, 15, 1)
#define GET_RX_DESC_IV0(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 16, 16)
+ SHIFT_AND_MASK_LE(__pdesc+12, 16, 16)
#define GET_RX_DESC_IV1(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 0, 32)
+ SHIFT_AND_MASK_LE(__pdesc+16, 0, 32)
#define GET_RX_DESC_TSFL(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+20, 0, 32)
+ SHIFT_AND_MASK_LE(__pdesc+20, 0, 32)
#define GET_RX_DESC_BUFF_ADDR(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+24, 0, 32)
+ SHIFT_AND_MASK_LE(__pdesc+24, 0, 32)
#define GET_RX_DESC_BUFF_ADDR64(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+28, 0, 32)
+ SHIFT_AND_MASK_LE(__pdesc+28, 0, 32)
#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 32, __val)
+ SET_BITS_OFFSET_LE(__pdesc+24, 0, 32, __val)
#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 32, __val)
+ SET_BITS_OFFSET_LE(__pdesc+28, 0, 32, __val)
#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \
do { \
@@ -711,4 +735,6 @@ void rtl92ce_tx_polling(struct ieee80211_hw *hw, unsigned int hw_queue);
void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
bool b_firstseg, bool b_lastseg,
struct sk_buff *skb);
+bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb);
+
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/Makefile b/drivers/net/wireless/rtlwifi/rtl8192cu/Makefile
new file mode 100644
index 000000000000..ad2de6b839ef
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/Makefile
@@ -0,0 +1,14 @@
+rtl8192cu-objs := \
+ dm.o \
+ hw.o \
+ led.o \
+ mac.o \
+ phy.o \
+ rf.o \
+ sw.o \
+ table.o \
+ trx.o
+
+obj-$(CONFIG_RTL8192CU) += rtl8192cu.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/def.h b/drivers/net/wireless/rtlwifi/rtl8192cu/def.h
new file mode 100644
index 000000000000..c54940ea72fe
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/def.h
@@ -0,0 +1,62 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../rtl8192ce/def.h"
+
+/*-------------------------------------------------------------------------
+ * Chip specific
+ *-------------------------------------------------------------------------*/
+#define CHIP_8723 BIT(2) /* RTL8723 With BT feature */
+#define CHIP_8723_DRV_REV BIT(3) /* RTL8723 Driver Revised */
+#define NORMAL_CHIP BIT(4)
+#define CHIP_VENDOR_UMC BIT(5)
+#define CHIP_VENDOR_UMC_B_CUT BIT(6)
+
+#define IS_NORMAL_CHIP(version) \
+ (((version) & NORMAL_CHIP) ? true : false)
+
+#define IS_8723_SERIES(version) \
+ (((version) & CHIP_8723) ? true : false)
+
+#define IS_92C_1T2R(version) \
+ (((version) & CHIP_92C) && ((version) & CHIP_92C_1T2R))
+
+#define IS_VENDOR_UMC(version) \
+ (((version) & CHIP_VENDOR_UMC) ? true : false)
+
+#define IS_VENDOR_UMC_A_CUT(version) \
+ (((version) & CHIP_VENDOR_UMC) ? (((version) & (BIT(6) | BIT(7))) ? \
+ false : true) : false)
+
+#define IS_VENDOR_8723_A_CUT(version) \
+ (((version) & CHIP_VENDOR_UMC) ? (((version) & (BIT(6))) ? \
+ false : true) : false)
+
+#define CHIP_BONDING_92C_1T2R 0x1
+#define CHIP_BONDING_IDENTIFIER(_value) (((_value) >> 22) & 0x3)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
new file mode 100644
index 000000000000..f311baee668d
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
@@ -0,0 +1,113 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../base.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "dm.h"
+
+void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ long undecorated_smoothed_pwdb;
+
+ if (!rtlpriv->dm.dynamic_txpower_enable)
+ return;
+
+ if (rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) {
+ rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
+ return;
+ }
+
+ if ((mac->link_state < MAC80211_LINKED) &&
+ (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
+ ("Not connected to any\n"));
+
+ rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
+
+ rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
+ return;
+ }
+
+ if (mac->link_state >= MAC80211_LINKED) {
+ if (mac->opmode == NL80211_IFTYPE_ADHOC) {
+ undecorated_smoothed_pwdb =
+ rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("AP Client PWDB = 0x%lx\n",
+ undecorated_smoothed_pwdb));
+ } else {
+ undecorated_smoothed_pwdb =
+ rtlpriv->dm.undecorated_smoothed_pwdb;
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("STA Default Port PWDB = 0x%lx\n",
+ undecorated_smoothed_pwdb));
+ }
+ } else {
+ undecorated_smoothed_pwdb =
+ rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("AP Ext Port PWDB = 0x%lx\n",
+ undecorated_smoothed_pwdb));
+ }
+
+ if (undecorated_smoothed_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
+ rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n"));
+ } else if ((undecorated_smoothed_pwdb <
+ (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
+ (undecorated_smoothed_pwdb >=
+ TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
+
+ rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n"));
+ } else if (undecorated_smoothed_pwdb <
+ (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
+ rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("TXHIGHPWRLEVEL_NORMAL\n"));
+ }
+
+ if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) {
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("PHY_SetTxPowerLevel8192S() Channel = %d\n",
+ rtlphy->current_channel));
+ rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
+ }
+
+ rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h
new file mode 100644
index 000000000000..7f966c666b5a
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h
@@ -0,0 +1,32 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../rtl8192ce/dm.h"
+
+void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
new file mode 100644
index 000000000000..9444e76838cf
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -0,0 +1,2504 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../efuse.h"
+#include "../base.h"
+#include "../cam.h"
+#include "../ps.h"
+#include "../usb.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "mac.h"
+#include "dm.h"
+#include "hw.h"
+#include "trx.h"
+#include "led.h"
+#include "table.h"
+
+static void _rtl92cu_phy_param_tab_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv);
+
+ rtlphy->hwparam_tables[MAC_REG].length = RTL8192CUMAC_2T_ARRAYLENGTH;
+ rtlphy->hwparam_tables[MAC_REG].pdata = RTL8192CUMAC_2T_ARRAY;
+ if (IS_HIGHT_PA(rtlefuse->board_type)) {
+ rtlphy->hwparam_tables[PHY_REG_PG].length =
+ RTL8192CUPHY_REG_Array_PG_HPLength;
+ rtlphy->hwparam_tables[PHY_REG_PG].pdata =
+ RTL8192CUPHY_REG_Array_PG_HP;
+ } else {
+ rtlphy->hwparam_tables[PHY_REG_PG].length =
+ RTL8192CUPHY_REG_ARRAY_PGLENGTH;
+ rtlphy->hwparam_tables[PHY_REG_PG].pdata =
+ RTL8192CUPHY_REG_ARRAY_PG;
+ }
+ /* 2T */
+ rtlphy->hwparam_tables[PHY_REG_2T].length =
+ RTL8192CUPHY_REG_2TARRAY_LENGTH;
+ rtlphy->hwparam_tables[PHY_REG_2T].pdata =
+ RTL8192CUPHY_REG_2TARRAY;
+ rtlphy->hwparam_tables[RADIOA_2T].length =
+ RTL8192CURADIOA_2TARRAYLENGTH;
+ rtlphy->hwparam_tables[RADIOA_2T].pdata =
+ RTL8192CURADIOA_2TARRAY;
+ rtlphy->hwparam_tables[RADIOB_2T].length =
+ RTL8192CURADIOB_2TARRAYLENGTH;
+ rtlphy->hwparam_tables[RADIOB_2T].pdata =
+ RTL8192CU_RADIOB_2TARRAY;
+ rtlphy->hwparam_tables[AGCTAB_2T].length =
+ RTL8192CUAGCTAB_2TARRAYLENGTH;
+ rtlphy->hwparam_tables[AGCTAB_2T].pdata =
+ RTL8192CUAGCTAB_2TARRAY;
+ /* 1T */
+ if (IS_HIGHT_PA(rtlefuse->board_type)) {
+ rtlphy->hwparam_tables[PHY_REG_1T].length =
+ RTL8192CUPHY_REG_1T_HPArrayLength;
+ rtlphy->hwparam_tables[PHY_REG_1T].pdata =
+ RTL8192CUPHY_REG_1T_HPArray;
+ rtlphy->hwparam_tables[RADIOA_1T].length =
+ RTL8192CURadioA_1T_HPArrayLength;
+ rtlphy->hwparam_tables[RADIOA_1T].pdata =
+ RTL8192CURadioA_1T_HPArray;
+ rtlphy->hwparam_tables[RADIOB_1T].length =
+ RTL8192CURADIOB_1TARRAYLENGTH;
+ rtlphy->hwparam_tables[RADIOB_1T].pdata =
+ RTL8192CU_RADIOB_1TARRAY;
+ rtlphy->hwparam_tables[AGCTAB_1T].length =
+ RTL8192CUAGCTAB_1T_HPArrayLength;
+ rtlphy->hwparam_tables[AGCTAB_1T].pdata =
+ Rtl8192CUAGCTAB_1T_HPArray;
+ } else {
+ rtlphy->hwparam_tables[PHY_REG_1T].length =
+ RTL8192CUPHY_REG_1TARRAY_LENGTH;
+ rtlphy->hwparam_tables[PHY_REG_1T].pdata =
+ RTL8192CUPHY_REG_1TARRAY;
+ rtlphy->hwparam_tables[RADIOA_1T].length =
+ RTL8192CURADIOA_1TARRAYLENGTH;
+ rtlphy->hwparam_tables[RADIOA_1T].pdata =
+ RTL8192CU_RADIOA_1TARRAY;
+ rtlphy->hwparam_tables[RADIOB_1T].length =
+ RTL8192CURADIOB_1TARRAYLENGTH;
+ rtlphy->hwparam_tables[RADIOB_1T].pdata =
+ RTL8192CU_RADIOB_1TARRAY;
+ rtlphy->hwparam_tables[AGCTAB_1T].length =
+ RTL8192CUAGCTAB_1TARRAYLENGTH;
+ rtlphy->hwparam_tables[AGCTAB_1T].pdata =
+ RTL8192CUAGCTAB_1TARRAY;
+ }
+}
+
+static void _rtl92cu_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
+ bool autoload_fail,
+ u8 *hwinfo)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 rf_path, index, tempval;
+ u16 i;
+
+ for (rf_path = 0; rf_path < 2; rf_path++) {
+ for (i = 0; i < 3; i++) {
+ if (!autoload_fail) {
+ rtlefuse->
+ eeprom_chnlarea_txpwr_cck[rf_path][i] =
+ hwinfo[EEPROM_TXPOWERCCK + rf_path * 3 + i];
+ rtlefuse->
+ eeprom_chnlarea_txpwr_ht40_1s[rf_path][i] =
+ hwinfo[EEPROM_TXPOWERHT40_1S + rf_path * 3 +
+ i];
+ } else {
+ rtlefuse->
+ eeprom_chnlarea_txpwr_cck[rf_path][i] =
+ EEPROM_DEFAULT_TXPOWERLEVEL;
+ rtlefuse->
+ eeprom_chnlarea_txpwr_ht40_1s[rf_path][i] =
+ EEPROM_DEFAULT_TXPOWERLEVEL;
+ }
+ }
+ }
+ for (i = 0; i < 3; i++) {
+ if (!autoload_fail)
+ tempval = hwinfo[EEPROM_TXPOWERHT40_2SDIFF + i];
+ else
+ tempval = EEPROM_DEFAULT_HT40_2SDIFF;
+ rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[RF90_PATH_A][i] =
+ (tempval & 0xf);
+ rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[RF90_PATH_B][i] =
+ ((tempval & 0xf0) >> 4);
+ }
+ for (rf_path = 0; rf_path < 2; rf_path++)
+ for (i = 0; i < 3; i++)
+ RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
+ ("RF(%d) EEPROM CCK Area(%d) = 0x%x\n", rf_path,
+ i, rtlefuse->
+ eeprom_chnlarea_txpwr_cck[rf_path][i]));
+ for (rf_path = 0; rf_path < 2; rf_path++)
+ for (i = 0; i < 3; i++)
+ RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
+ ("RF(%d) EEPROM HT40 1S Area(%d) = 0x%x\n",
+ rf_path, i,
+ rtlefuse->
+ eeprom_chnlarea_txpwr_ht40_1s[rf_path][i]));
+ for (rf_path = 0; rf_path < 2; rf_path++)
+ for (i = 0; i < 3; i++)
+ RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
+ ("RF(%d) EEPROM HT40 2S Diff Area(%d) = 0x%x\n",
+ rf_path, i,
+ rtlefuse->
+ eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path]
+ [i]));
+ for (rf_path = 0; rf_path < 2; rf_path++) {
+ for (i = 0; i < 14; i++) {
+ index = _rtl92c_get_chnl_group((u8) i);
+ rtlefuse->txpwrlevel_cck[rf_path][i] =
+ rtlefuse->eeprom_chnlarea_txpwr_cck[rf_path][index];
+ rtlefuse->txpwrlevel_ht40_1s[rf_path][i] =
+ rtlefuse->
+ eeprom_chnlarea_txpwr_ht40_1s[rf_path][index];
+ if ((rtlefuse->
+ eeprom_chnlarea_txpwr_ht40_1s[rf_path][index] -
+ rtlefuse->
+ eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path][index])
+ > 0) {
+ rtlefuse->txpwrlevel_ht40_2s[rf_path][i] =
+ rtlefuse->
+ eeprom_chnlarea_txpwr_ht40_1s[rf_path]
+ [index] - rtlefuse->
+ eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path]
+ [index];
+ } else {
+ rtlefuse->txpwrlevel_ht40_2s[rf_path][i] = 0;
+ }
+ }
+ for (i = 0; i < 14; i++) {
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("RF(%d)-Ch(%d) [CCK / HT40_1S / HT40_2S] = "
+ "[0x%x / 0x%x / 0x%x]\n", rf_path, i,
+ rtlefuse->txpwrlevel_cck[rf_path][i],
+ rtlefuse->txpwrlevel_ht40_1s[rf_path][i],
+ rtlefuse->txpwrlevel_ht40_2s[rf_path][i]));
+ }
+ }
+ for (i = 0; i < 3; i++) {
+ if (!autoload_fail) {
+ rtlefuse->eeprom_pwrlimit_ht40[i] =
+ hwinfo[EEPROM_TXPWR_GROUP + i];
+ rtlefuse->eeprom_pwrlimit_ht20[i] =
+ hwinfo[EEPROM_TXPWR_GROUP + 3 + i];
+ } else {
+ rtlefuse->eeprom_pwrlimit_ht40[i] = 0;
+ rtlefuse->eeprom_pwrlimit_ht20[i] = 0;
+ }
+ }
+ for (rf_path = 0; rf_path < 2; rf_path++) {
+ for (i = 0; i < 14; i++) {
+ index = _rtl92c_get_chnl_group((u8) i);
+ if (rf_path == RF90_PATH_A) {
+ rtlefuse->pwrgroup_ht20[rf_path][i] =
+ (rtlefuse->eeprom_pwrlimit_ht20[index]
+ & 0xf);
+ rtlefuse->pwrgroup_ht40[rf_path][i] =
+ (rtlefuse->eeprom_pwrlimit_ht40[index]
+ & 0xf);
+ } else if (rf_path == RF90_PATH_B) {
+ rtlefuse->pwrgroup_ht20[rf_path][i] =
+ ((rtlefuse->eeprom_pwrlimit_ht20[index]
+ & 0xf0) >> 4);
+ rtlefuse->pwrgroup_ht40[rf_path][i] =
+ ((rtlefuse->eeprom_pwrlimit_ht40[index]
+ & 0xf0) >> 4);
+ }
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("RF-%d pwrgroup_ht20[%d] = 0x%x\n",
+ rf_path, i,
+ rtlefuse->pwrgroup_ht20[rf_path][i]));
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("RF-%d pwrgroup_ht40[%d] = 0x%x\n",
+ rf_path, i,
+ rtlefuse->pwrgroup_ht40[rf_path][i]));
+ }
+ }
+ for (i = 0; i < 14; i++) {
+ index = _rtl92c_get_chnl_group((u8) i);
+ if (!autoload_fail)
+ tempval = hwinfo[EEPROM_TXPOWERHT20DIFF + index];
+ else
+ tempval = EEPROM_DEFAULT_HT20_DIFF;
+ rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] = (tempval & 0xF);
+ rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] =
+ ((tempval >> 4) & 0xF);
+ if (rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] & BIT(3))
+ rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] |= 0xF0;
+ if (rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] & BIT(3))
+ rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] |= 0xF0;
+ index = _rtl92c_get_chnl_group((u8) i);
+ if (!autoload_fail)
+ tempval = hwinfo[EEPROM_TXPOWER_OFDMDIFF + index];
+ else
+ tempval = EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF;
+ rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i] = (tempval & 0xF);
+ rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i] =
+ ((tempval >> 4) & 0xF);
+ }
+ rtlefuse->legacy_ht_txpowerdiff =
+ rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][7];
+ for (i = 0; i < 14; i++)
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("RF-A Ht20 to HT40 Diff[%d] = 0x%x\n", i,
+ rtlefuse->txpwr_ht20diff[RF90_PATH_A][i]));
+ for (i = 0; i < 14; i++)
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("RF-A Legacy to Ht40 Diff[%d] = 0x%x\n", i,
+ rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i]));
+ for (i = 0; i < 14; i++)
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("RF-B Ht20 to HT40 Diff[%d] = 0x%x\n", i,
+ rtlefuse->txpwr_ht20diff[RF90_PATH_B][i]));
+ for (i = 0; i < 14; i++)
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("RF-B Legacy to HT40 Diff[%d] = 0x%x\n", i,
+ rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i]));
+ if (!autoload_fail)
+ rtlefuse->eeprom_regulatory = (hwinfo[RF_OPTION1] & 0x7);
+ else
+ rtlefuse->eeprom_regulatory = 0;
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory));
+ if (!autoload_fail) {
+ rtlefuse->eeprom_tssi[RF90_PATH_A] = hwinfo[EEPROM_TSSI_A];
+ rtlefuse->eeprom_tssi[RF90_PATH_B] = hwinfo[EEPROM_TSSI_B];
+ } else {
+ rtlefuse->eeprom_tssi[RF90_PATH_A] = EEPROM_DEFAULT_TSSI;
+ rtlefuse->eeprom_tssi[RF90_PATH_B] = EEPROM_DEFAULT_TSSI;
+ }
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("TSSI_A = 0x%x, TSSI_B = 0x%x\n",
+ rtlefuse->eeprom_tssi[RF90_PATH_A],
+ rtlefuse->eeprom_tssi[RF90_PATH_B]));
+ if (!autoload_fail)
+ tempval = hwinfo[EEPROM_THERMAL_METER];
+ else
+ tempval = EEPROM_DEFAULT_THERMALMETER;
+ rtlefuse->eeprom_thermalmeter = (tempval & 0x1f);
+ if (rtlefuse->eeprom_thermalmeter < 0x06 ||
+ rtlefuse->eeprom_thermalmeter > 0x1c)
+ rtlefuse->eeprom_thermalmeter = 0x12;
+ if (rtlefuse->eeprom_thermalmeter == 0x1f || autoload_fail)
+ rtlefuse->apk_thermalmeterignore = true;
+ rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter));
+}
+
+static void _rtl92cu_read_board_type(struct ieee80211_hw *hw, u8 *contents)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 boardType;
+
+ if (IS_NORMAL_CHIP(rtlhal->version)) {
+ boardType = ((contents[EEPROM_RF_OPT1]) &
+ BOARD_TYPE_NORMAL_MASK) >> 5; /*bit[7:5]*/
+ } else {
+ boardType = contents[EEPROM_RF_OPT4];
+ boardType &= BOARD_TYPE_TEST_MASK;
+ }
+ rtlefuse->board_type = boardType;
+ if (IS_HIGHT_PA(rtlefuse->board_type))
+ rtlefuse->external_pa = 1;
+ printk(KERN_INFO "rtl8192cu: Board Type %x\n", rtlefuse->board_type);
+
+#ifdef CONFIG_ANTENNA_DIVERSITY
+ /* Antenna Diversity setting. */
+ if (registry_par->antdiv_cfg == 2) /* 2: From Efuse */
+ rtl_efuse->antenna_cfg = (contents[EEPROM_RF_OPT1]&0x18)>>3;
+ else
+ rtl_efuse->antenna_cfg = registry_par->antdiv_cfg; /* 0:OFF, */
+
+ printk(KERN_INFO "rtl8192cu: Antenna Config %x\n",
+ rtl_efuse->antenna_cfg);
+#endif
+}
+
+#ifdef CONFIG_BT_COEXIST
+static void _update_bt_param(_adapter *padapter)
+{
+ struct btcoexist_priv *pbtpriv = &(padapter->halpriv.bt_coexist);
+ struct registry_priv *registry_par = &padapter->registrypriv;
+ if (2 != registry_par->bt_iso) {
+ /* 0:Low, 1:High, 2:From Efuse */
+ pbtpriv->BT_Ant_isolation = registry_par->bt_iso;
+ }
+ if (registry_par->bt_sco == 1) {
+ /* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter, 4.Busy,
+ * 5.OtherBusy */
+ pbtpriv->BT_Service = BT_OtherAction;
+ } else if (registry_par->bt_sco == 2) {
+ pbtpriv->BT_Service = BT_SCO;
+ } else if (registry_par->bt_sco == 4) {
+ pbtpriv->BT_Service = BT_Busy;
+ } else if (registry_par->bt_sco == 5) {
+ pbtpriv->BT_Service = BT_OtherBusy;
+ } else {
+ pbtpriv->BT_Service = BT_Idle;
+ }
+ pbtpriv->BT_Ampdu = registry_par->bt_ampdu;
+ pbtpriv->bCOBT = _TRUE;
+ pbtpriv->BtEdcaUL = 0;
+ pbtpriv->BtEdcaDL = 0;
+ pbtpriv->BtRssiState = 0xff;
+ pbtpriv->bInitSet = _FALSE;
+ pbtpriv->bBTBusyTraffic = _FALSE;
+ pbtpriv->bBTTrafficModeSet = _FALSE;
+ pbtpriv->bBTNonTrafficModeSet = _FALSE;
+ pbtpriv->CurrentState = 0;
+ pbtpriv->PreviousState = 0;
+ printk(KERN_INFO "rtl8192cu: BT Coexistance = %s\n",
+ (pbtpriv->BT_Coexist == _TRUE) ? "enable" : "disable");
+ if (pbtpriv->BT_Coexist) {
+ if (pbtpriv->BT_Ant_Num == Ant_x2)
+ printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+ "Ant_Num = Antx2\n");
+ else if (pbtpriv->BT_Ant_Num == Ant_x1)
+ printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+ "Ant_Num = Antx1\n");
+ switch (pbtpriv->BT_CoexistType) {
+ case BT_2Wire:
+ printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+ "CoexistType = BT_2Wire\n");
+ break;
+ case BT_ISSC_3Wire:
+ printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+ "CoexistType = BT_ISSC_3Wire\n");
+ break;
+ case BT_Accel:
+ printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+ "CoexistType = BT_Accel\n");
+ break;
+ case BT_CSR_BC4:
+ printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+ "CoexistType = BT_CSR_BC4\n");
+ break;
+ case BT_CSR_BC8:
+ printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+ "CoexistType = BT_CSR_BC8\n");
+ break;
+ case BT_RTL8756:
+ printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+ "CoexistType = BT_RTL8756\n");
+ break;
+ default:
+ printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+ "CoexistType = Unknown\n");
+ break;
+ }
+ printk(KERN_INFO "rtl8192cu: BlueTooth BT_Ant_isolation = %d\n",
+ pbtpriv->BT_Ant_isolation);
+ switch (pbtpriv->BT_Service) {
+ case BT_OtherAction:
+ printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
+ "BT_OtherAction\n");
+ break;
+ case BT_SCO:
+ printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
+ "BT_SCO\n");
+ break;
+ case BT_Busy:
+ printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
+ "BT_Busy\n");
+ break;
+ case BT_OtherBusy:
+ printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
+ "BT_OtherBusy\n");
+ break;
+ default:
+ printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
+ "BT_Idle\n");
+ break;
+ }
+ printk(KERN_INFO "rtl8192cu: BT_RadioSharedType = 0x%x\n",
+ pbtpriv->BT_RadioSharedType);
+ }
+}
+
+#define GET_BT_COEXIST(priv) (&priv->bt_coexist)
+
+static void _rtl92cu_read_bluetooth_coexistInfo(struct ieee80211_hw *hw,
+ u8 *contents,
+ bool bautoloadfailed);
+{
+ HAL_DATA_TYPE *pHalData = GET_HAL_DATA(Adapter);
+ bool isNormal = IS_NORMAL_CHIP(pHalData->VersionID);
+ struct btcoexist_priv *pbtpriv = &pHalData->bt_coexist;
+ u8 rf_opt4;
+
+ _rtw_memset(pbtpriv, 0, sizeof(struct btcoexist_priv));
+ if (AutoloadFail) {
+ pbtpriv->BT_Coexist = _FALSE;
+ pbtpriv->BT_CoexistType = BT_2Wire;
+ pbtpriv->BT_Ant_Num = Ant_x2;
+ pbtpriv->BT_Ant_isolation = 0;
+ pbtpriv->BT_RadioSharedType = BT_Radio_Shared;
+ return;
+ }
+ if (isNormal) {
+ if (pHalData->BoardType == BOARD_USB_COMBO)
+ pbtpriv->BT_Coexist = _TRUE;
+ else
+ pbtpriv->BT_Coexist = ((PROMContent[EEPROM_RF_OPT3] &
+ 0x20) >> 5); /* bit[5] */
+ rf_opt4 = PROMContent[EEPROM_RF_OPT4];
+ pbtpriv->BT_CoexistType = ((rf_opt4&0xe)>>1); /* bit [3:1] */
+ pbtpriv->BT_Ant_Num = (rf_opt4&0x1); /* bit [0] */
+ pbtpriv->BT_Ant_isolation = ((rf_opt4&0x10)>>4); /* bit [4] */
+ pbtpriv->BT_RadioSharedType = ((rf_opt4&0x20)>>5); /* bit [5] */
+ } else {
+ pbtpriv->BT_Coexist = (PROMContent[EEPROM_RF_OPT4] >> 4) ?
+ _TRUE : _FALSE;
+ }
+ _update_bt_param(Adapter);
+}
+#endif
+
+static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u16 i, usvalue;
+ u8 hwinfo[HWSET_MAX_SIZE] = {0};
+ u16 eeprom_id;
+
+ if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
+ rtl_efuse_shadow_map_update(hw);
+ memcpy((void *)hwinfo,
+ (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
+ HWSET_MAX_SIZE);
+ } else if (rtlefuse->epromtype == EEPROM_93C46) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("RTL819X Not boot from eeprom, check it !!"));
+ }
+ RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, ("MAP\n"),
+ hwinfo, HWSET_MAX_SIZE);
+ eeprom_id = *((u16 *)&hwinfo[0]);
+ if (eeprom_id != RTL8190_EEPROM_ID) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("EEPROM ID(%#x) is invalid!!\n", eeprom_id));
+ rtlefuse->autoload_failflag = true;
+ } else {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload OK\n"));
+ rtlefuse->autoload_failflag = false;
+ }
+ if (rtlefuse->autoload_failflag == true)
+ return;
+ for (i = 0; i < 6; i += 2) {
+ usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i];
+ *((u16 *) (&rtlefuse->dev_addr[i])) = usvalue;
+ }
+ printk(KERN_INFO "rtl8192cu: MAC address: %pM\n", rtlefuse->dev_addr);
+ _rtl92cu_read_txpower_info_from_hwpg(hw,
+ rtlefuse->autoload_failflag, hwinfo);
+ rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID];
+ rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID];
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ (" VID = 0x%02x PID = 0x%02x\n",
+ rtlefuse->eeprom_vid, rtlefuse->eeprom_did));
+ rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
+ rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
+ rtlefuse->txpwr_fromeprom = true;
+ rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid));
+ if (rtlhal->oem_id == RT_CID_DEFAULT) {
+ switch (rtlefuse->eeprom_oemid) {
+ case EEPROM_CID_DEFAULT:
+ if (rtlefuse->eeprom_did == 0x8176) {
+ if ((rtlefuse->eeprom_svid == 0x103C &&
+ rtlefuse->eeprom_smid == 0x1629))
+ rtlhal->oem_id = RT_CID_819x_HP;
+ else
+ rtlhal->oem_id = RT_CID_DEFAULT;
+ } else {
+ rtlhal->oem_id = RT_CID_DEFAULT;
+ }
+ break;
+ case EEPROM_CID_TOSHIBA:
+ rtlhal->oem_id = RT_CID_TOSHIBA;
+ break;
+ case EEPROM_CID_QMI:
+ rtlhal->oem_id = RT_CID_819x_QMI;
+ break;
+ case EEPROM_CID_WHQL:
+ default:
+ rtlhal->oem_id = RT_CID_DEFAULT;
+ break;
+ }
+ }
+ _rtl92cu_read_board_type(hw, hwinfo);
+#ifdef CONFIG_BT_COEXIST
+ _rtl92cu_read_bluetooth_coexistInfo(hw, hwinfo,
+ rtlefuse->autoload_failflag);
+#endif
+}
+
+static void _rtl92cu_hal_customized_behavior(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ switch (rtlhal->oem_id) {
+ case RT_CID_819x_HP:
+ usb_priv->ledctl.led_opendrain = true;
+ break;
+ case RT_CID_819x_Lenovo:
+ case RT_CID_DEFAULT:
+ case RT_CID_TOSHIBA:
+ case RT_CID_CCX:
+ case RT_CID_819x_Acer:
+ case RT_CID_WHQL:
+ default:
+ break;
+ }
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("RT Customized ID: 0x%02X\n", rtlhal->oem_id));
+}
+
+void rtl92cu_read_eeprom_info(struct ieee80211_hw *hw)
+{
+
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 tmp_u1b;
+
+ if (!IS_NORMAL_CHIP(rtlhal->version))
+ return;
+ tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
+ rtlefuse->epromtype = (tmp_u1b & EEPROMSEL) ?
+ EEPROM_93C46 : EEPROM_BOOT_EFUSE;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("Boot from %s\n",
+ (tmp_u1b & EEPROMSEL) ? "EERROM" : "EFUSE"));
+ rtlefuse->autoload_failflag = (tmp_u1b & EEPROM_EN) ? false : true;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload %s\n",
+ (tmp_u1b & EEPROM_EN) ? "OK!!" : "ERR!!"));
+ _rtl92cu_read_adapter_info(hw);
+ _rtl92cu_hal_customized_behavior(hw);
+ return;
+}
+
+static int _rtl92cu_init_power_on(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int status = 0;
+ u16 value16;
+ u8 value8;
+ /* polling autoload done. */
+ u32 pollingCount = 0;
+
+ do {
+ if (rtl_read_byte(rtlpriv, REG_APS_FSMCO) & PFM_ALDN) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("Autoload Done!\n"));
+ break;
+ }
+ if (pollingCount++ > 100) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
+ ("Failed to polling REG_APS_FSMCO[PFM_ALDN]"
+ " done!\n"));
+ return -ENODEV;
+ }
+ } while (true);
+ /* 0. RSV_CTRL 0x1C[7:0] = 0 unlock ISO/CLK/Power control register */
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0);
+ /* Power on when re-enter from IPS/Radio off/card disable */
+ /* enable SPS into PWM mode */
+ rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
+ udelay(100);
+ value8 = rtl_read_byte(rtlpriv, REG_LDOV12D_CTRL);
+ if (0 == (value8 & LDV12_EN)) {
+ value8 |= LDV12_EN;
+ rtl_write_byte(rtlpriv, REG_LDOV12D_CTRL, value8);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ (" power-on :REG_LDOV12D_CTRL Reg0x21:0x%02x.\n",
+ value8));
+ udelay(100);
+ value8 = rtl_read_byte(rtlpriv, REG_SYS_ISO_CTRL);
+ value8 &= ~ISO_MD2PP;
+ rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL, value8);
+ }
+ /* auto enable WLAN */
+ pollingCount = 0;
+ value16 = rtl_read_word(rtlpriv, REG_APS_FSMCO);
+ value16 |= APFM_ONMAC;
+ rtl_write_word(rtlpriv, REG_APS_FSMCO, value16);
+ do {
+ if (!(rtl_read_word(rtlpriv, REG_APS_FSMCO) & APFM_ONMAC)) {
+ printk(KERN_INFO "rtl8192cu: MAC auto ON okay!\n");
+ break;
+ }
+ if (pollingCount++ > 100) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
+ ("Failed to polling REG_APS_FSMCO[APFM_ONMAC]"
+ " done!\n"));
+ return -ENODEV;
+ }
+ } while (true);
+ /* Enable Radio ,GPIO ,and LED function */
+ rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x0812);
+ /* release RF digital isolation */
+ value16 = rtl_read_word(rtlpriv, REG_SYS_ISO_CTRL);
+ value16 &= ~ISO_DIOR;
+ rtl_write_word(rtlpriv, REG_SYS_ISO_CTRL, value16);
+ /* Reconsider when to do this operation after asking HWSD. */
+ pollingCount = 0;
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, (rtl_read_byte(rtlpriv,
+ REG_APSD_CTRL) & ~BIT(6)));
+ do {
+ pollingCount++;
+ } while ((pollingCount < 200) &&
+ (rtl_read_byte(rtlpriv, REG_APSD_CTRL) & BIT(7)));
+ /* Enable MAC DMA/WMAC/SCHEDULE/SEC block */
+ value16 = rtl_read_word(rtlpriv, REG_CR);
+ value16 |= (HCI_TXDMA_EN | HCI_RXDMA_EN | TXDMA_EN | RXDMA_EN |
+ PROTOCOL_EN | SCHEDULE_EN | MACTXEN | MACRXEN | ENSEC);
+ rtl_write_word(rtlpriv, REG_CR, value16);
+ return status;
+}
+
+static void _rtl92cu_init_queue_reserved_page(struct ieee80211_hw *hw,
+ bool wmm_enable,
+ u8 out_ep_num,
+ u8 queue_sel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool isChipN = IS_NORMAL_CHIP(rtlhal->version);
+ u32 outEPNum = (u32)out_ep_num;
+ u32 numHQ = 0;
+ u32 numLQ = 0;
+ u32 numNQ = 0;
+ u32 numPubQ;
+ u32 value32;
+ u8 value8;
+ u32 txQPageNum, txQPageUnit, txQRemainPage;
+
+ if (!wmm_enable) {
+ numPubQ = (isChipN) ? CHIP_B_PAGE_NUM_PUBQ :
+ CHIP_A_PAGE_NUM_PUBQ;
+ txQPageNum = TX_TOTAL_PAGE_NUMBER - numPubQ;
+
+ txQPageUnit = txQPageNum/outEPNum;
+ txQRemainPage = txQPageNum % outEPNum;
+ if (queue_sel & TX_SELE_HQ)
+ numHQ = txQPageUnit;
+ if (queue_sel & TX_SELE_LQ)
+ numLQ = txQPageUnit;
+ /* HIGH priority queue always present in the configuration of
+ * 2 out-ep. Remainder pages have assigned to High queue */
+ if ((outEPNum > 1) && (txQRemainPage))
+ numHQ += txQRemainPage;
+ /* NOTE: This step done before writting REG_RQPN. */
+ if (isChipN) {
+ if (queue_sel & TX_SELE_NQ)
+ numNQ = txQPageUnit;
+ value8 = (u8)_NPQ(numNQ);
+ rtl_write_byte(rtlpriv, REG_RQPN_NPQ, value8);
+ }
+ } else {
+ /* for WMM ,number of out-ep must more than or equal to 2! */
+ numPubQ = isChipN ? WMM_CHIP_B_PAGE_NUM_PUBQ :
+ WMM_CHIP_A_PAGE_NUM_PUBQ;
+ if (queue_sel & TX_SELE_HQ) {
+ numHQ = isChipN ? WMM_CHIP_B_PAGE_NUM_HPQ :
+ WMM_CHIP_A_PAGE_NUM_HPQ;
+ }
+ if (queue_sel & TX_SELE_LQ) {
+ numLQ = isChipN ? WMM_CHIP_B_PAGE_NUM_LPQ :
+ WMM_CHIP_A_PAGE_NUM_LPQ;
+ }
+ /* NOTE: This step done before writting REG_RQPN. */
+ if (isChipN) {
+ if (queue_sel & TX_SELE_NQ)
+ numNQ = WMM_CHIP_B_PAGE_NUM_NPQ;
+ value8 = (u8)_NPQ(numNQ);
+ rtl_write_byte(rtlpriv, REG_RQPN_NPQ, value8);
+ }
+ }
+ /* TX DMA */
+ value32 = _HPQ(numHQ) | _LPQ(numLQ) | _PUBQ(numPubQ) | LD_RQPN;
+ rtl_write_dword(rtlpriv, REG_RQPN, value32);
+}
+
+static void _rtl92c_init_trx_buffer(struct ieee80211_hw *hw, bool wmm_enable)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 txpktbuf_bndy;
+ u8 value8;
+
+ if (!wmm_enable)
+ txpktbuf_bndy = TX_PAGE_BOUNDARY;
+ else /* for WMM */
+ txpktbuf_bndy = (IS_NORMAL_CHIP(rtlhal->version))
+ ? WMM_CHIP_B_TX_PAGE_BOUNDARY
+ : WMM_CHIP_A_TX_PAGE_BOUNDARY;
+ rtl_write_byte(rtlpriv, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy);
+ rtl_write_byte(rtlpriv, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy);
+ rtl_write_byte(rtlpriv, REG_TXPKTBUF_WMAC_LBK_BF_HD, txpktbuf_bndy);
+ rtl_write_byte(rtlpriv, REG_TRXFF_BNDY, txpktbuf_bndy);
+ rtl_write_byte(rtlpriv, REG_TDECTRL+1, txpktbuf_bndy);
+ rtl_write_word(rtlpriv, (REG_TRXFF_BNDY + 2), 0x27FF);
+ value8 = _PSRX(RX_PAGE_SIZE_REG_VALUE) | _PSTX(PBP_128);
+ rtl_write_byte(rtlpriv, REG_PBP, value8);
+}
+
+static void _rtl92c_init_chipN_reg_priority(struct ieee80211_hw *hw, u16 beQ,
+ u16 bkQ, u16 viQ, u16 voQ,
+ u16 mgtQ, u16 hiQ)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u16 value16 = (rtl_read_word(rtlpriv, REG_TRXDMA_CTRL) & 0x7);
+
+ value16 |= _TXDMA_BEQ_MAP(beQ) | _TXDMA_BKQ_MAP(bkQ) |
+ _TXDMA_VIQ_MAP(viQ) | _TXDMA_VOQ_MAP(voQ) |
+ _TXDMA_MGQ_MAP(mgtQ) | _TXDMA_HIQ_MAP(hiQ);
+ rtl_write_word(rtlpriv, REG_TRXDMA_CTRL, value16);
+}
+
+static void _rtl92cu_init_chipN_one_out_ep_priority(struct ieee80211_hw *hw,
+ bool wmm_enable,
+ u8 queue_sel)
+{
+ u16 uninitialized_var(value);
+
+ switch (queue_sel) {
+ case TX_SELE_HQ:
+ value = QUEUE_HIGH;
+ break;
+ case TX_SELE_LQ:
+ value = QUEUE_LOW;
+ break;
+ case TX_SELE_NQ:
+ value = QUEUE_NORMAL;
+ break;
+ default:
+ WARN_ON(1); /* Shall not reach here! */
+ break;
+ }
+ _rtl92c_init_chipN_reg_priority(hw, value, value, value, value,
+ value, value);
+ printk(KERN_INFO "rtl8192cu: Tx queue select: 0x%02x\n", queue_sel);
+}
+
+static void _rtl92cu_init_chipN_two_out_ep_priority(struct ieee80211_hw *hw,
+ bool wmm_enable,
+ u8 queue_sel)
+{
+ u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ;
+ u16 uninitialized_var(valueHi);
+ u16 uninitialized_var(valueLow);
+
+ switch (queue_sel) {
+ case (TX_SELE_HQ | TX_SELE_LQ):
+ valueHi = QUEUE_HIGH;
+ valueLow = QUEUE_LOW;
+ break;
+ case (TX_SELE_NQ | TX_SELE_LQ):
+ valueHi = QUEUE_NORMAL;
+ valueLow = QUEUE_LOW;
+ break;
+ case (TX_SELE_HQ | TX_SELE_NQ):
+ valueHi = QUEUE_HIGH;
+ valueLow = QUEUE_NORMAL;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+ if (!wmm_enable) {
+ beQ = valueLow;
+ bkQ = valueLow;
+ viQ = valueHi;
+ voQ = valueHi;
+ mgtQ = valueHi;
+ hiQ = valueHi;
+ } else {/* for WMM ,CONFIG_OUT_EP_WIFI_MODE */
+ beQ = valueHi;
+ bkQ = valueLow;
+ viQ = valueLow;
+ voQ = valueHi;
+ mgtQ = valueHi;
+ hiQ = valueHi;
+ }
+ _rtl92c_init_chipN_reg_priority(hw, beQ, bkQ, viQ, voQ, mgtQ, hiQ);
+ printk(KERN_INFO "rtl8192cu: Tx queue select: 0x%02x\n", queue_sel);
+}
+
+static void _rtl92cu_init_chipN_three_out_ep_priority(struct ieee80211_hw *hw,
+ bool wmm_enable,
+ u8 queue_sel)
+{
+ u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (!wmm_enable) { /* typical setting */
+ beQ = QUEUE_LOW;
+ bkQ = QUEUE_LOW;
+ viQ = QUEUE_NORMAL;
+ voQ = QUEUE_HIGH;
+ mgtQ = QUEUE_HIGH;
+ hiQ = QUEUE_HIGH;
+ } else { /* for WMM */
+ beQ = QUEUE_LOW;
+ bkQ = QUEUE_NORMAL;
+ viQ = QUEUE_NORMAL;
+ voQ = QUEUE_HIGH;
+ mgtQ = QUEUE_HIGH;
+ hiQ = QUEUE_HIGH;
+ }
+ _rtl92c_init_chipN_reg_priority(hw, beQ, bkQ, viQ, voQ, mgtQ, hiQ);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
+ ("Tx queue select :0x%02x..\n", queue_sel));
+}
+
+static void _rtl92cu_init_chipN_queue_priority(struct ieee80211_hw *hw,
+ bool wmm_enable,
+ u8 out_ep_num,
+ u8 queue_sel)
+{
+ switch (out_ep_num) {
+ case 1:
+ _rtl92cu_init_chipN_one_out_ep_priority(hw, wmm_enable,
+ queue_sel);
+ break;
+ case 2:
+ _rtl92cu_init_chipN_two_out_ep_priority(hw, wmm_enable,
+ queue_sel);
+ break;
+ case 3:
+ _rtl92cu_init_chipN_three_out_ep_priority(hw, wmm_enable,
+ queue_sel);
+ break;
+ default:
+ WARN_ON(1); /* Shall not reach here! */
+ break;
+ }
+}
+
+static void _rtl92cu_init_chipT_queue_priority(struct ieee80211_hw *hw,
+ bool wmm_enable,
+ u8 out_ep_num,
+ u8 queue_sel)
+{
+ u8 hq_sele;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ switch (out_ep_num) {
+ case 2: /* (TX_SELE_HQ|TX_SELE_LQ) */
+ if (!wmm_enable) /* typical setting */
+ hq_sele = HQSEL_VOQ | HQSEL_VIQ | HQSEL_MGTQ |
+ HQSEL_HIQ;
+ else /* for WMM */
+ hq_sele = HQSEL_VOQ | HQSEL_BEQ | HQSEL_MGTQ |
+ HQSEL_HIQ;
+ break;
+ case 1:
+ if (TX_SELE_LQ == queue_sel) {
+ /* map all endpoint to Low queue */
+ hq_sele = 0;
+ } else if (TX_SELE_HQ == queue_sel) {
+ /* map all endpoint to High queue */
+ hq_sele = HQSEL_VOQ | HQSEL_VIQ | HQSEL_BEQ |
+ HQSEL_BKQ | HQSEL_MGTQ | HQSEL_HIQ;
+ }
+ break;
+ default:
+ WARN_ON(1); /* Shall not reach here! */
+ break;
+ }
+ rtl_write_byte(rtlpriv, (REG_TRXDMA_CTRL+1), hq_sele);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
+ ("Tx queue select :0x%02x..\n", hq_sele));
+}
+
+static void _rtl92cu_init_queue_priority(struct ieee80211_hw *hw,
+ bool wmm_enable,
+ u8 out_ep_num,
+ u8 queue_sel)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ if (IS_NORMAL_CHIP(rtlhal->version))
+ _rtl92cu_init_chipN_queue_priority(hw, wmm_enable, out_ep_num,
+ queue_sel);
+ else
+ _rtl92cu_init_chipT_queue_priority(hw, wmm_enable, out_ep_num,
+ queue_sel);
+}
+
+static void _rtl92cu_init_usb_aggregation(struct ieee80211_hw *hw)
+{
+}
+
+static void _rtl92cu_init_wmac_setting(struct ieee80211_hw *hw)
+{
+ u16 value16;
+
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ mac->rx_conf = (RCR_APM | RCR_AM | RCR_ADF | RCR_AB | RCR_APP_FCS |
+ RCR_APP_ICV | RCR_AMF | RCR_HTC_LOC_CTRL |
+ RCR_APP_MIC | RCR_APP_PHYSTS | RCR_ACRC32);
+ rtl_write_dword(rtlpriv, REG_RCR, mac->rx_conf);
+ /* Accept all multicast address */
+ rtl_write_dword(rtlpriv, REG_MAR, 0xFFFFFFFF);
+ rtl_write_dword(rtlpriv, REG_MAR + 4, 0xFFFFFFFF);
+ /* Accept all management frames */
+ value16 = 0xFFFF;
+ rtl92c_set_mgt_filter(hw, value16);
+ /* Reject all control frame - default value is 0 */
+ rtl92c_set_ctrl_filter(hw, 0x0);
+ /* Accept all data frames */
+ value16 = 0xFFFF;
+ rtl92c_set_data_filter(hw, value16);
+}
+
+static int _rtl92cu_init_mac(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
+ int err = 0;
+ u32 boundary = 0;
+ u8 wmm_enable = false; /* TODO */
+ u8 out_ep_nums = rtlusb->out_ep_nums;
+ u8 queue_sel = rtlusb->out_queue_sel;
+ err = _rtl92cu_init_power_on(hw);
+
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Failed to init power on!\n"));
+ return err;
+ }
+ if (!wmm_enable) {
+ boundary = TX_PAGE_BOUNDARY;
+ } else { /* for WMM */
+ boundary = (IS_NORMAL_CHIP(rtlhal->version))
+ ? WMM_CHIP_B_TX_PAGE_BOUNDARY
+ : WMM_CHIP_A_TX_PAGE_BOUNDARY;
+ }
+ if (false == rtl92c_init_llt_table(hw, boundary)) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Failed to init LLT Table!\n"));
+ return -EINVAL;
+ }
+ _rtl92cu_init_queue_reserved_page(hw, wmm_enable, out_ep_nums,
+ queue_sel);
+ _rtl92c_init_trx_buffer(hw, wmm_enable);
+ _rtl92cu_init_queue_priority(hw, wmm_enable, out_ep_nums,
+ queue_sel);
+ /* Get Rx PHY status in order to report RSSI and others. */
+ rtl92c_init_driver_info_size(hw, RTL92C_DRIVER_INFO_SIZE);
+ rtl92c_init_interrupt(hw);
+ rtl92c_init_network_type(hw);
+ _rtl92cu_init_wmac_setting(hw);
+ rtl92c_init_adaptive_ctrl(hw);
+ rtl92c_init_edca(hw);
+ rtl92c_init_rate_fallback(hw);
+ rtl92c_init_retry_function(hw);
+ _rtl92cu_init_usb_aggregation(hw);
+ rtlpriv->cfg->ops->set_bw_mode(hw, NL80211_CHAN_HT20);
+ rtl92c_set_min_space(hw, IS_92C_SERIAL(rtlhal->version));
+ rtl92c_init_beacon_parameters(hw, rtlhal->version);
+ rtl92c_init_ampdu_aggregation(hw);
+ rtl92c_init_beacon_max_error(hw, true);
+ return err;
+}
+
+void rtl92cu_enable_hw_security_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 sec_reg_value = 0x0;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+ rtlpriv->sec.pairwise_enc_algorithm,
+ rtlpriv->sec.group_enc_algorithm));
+ if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("not open sw encryption\n"));
+ return;
+ }
+ sec_reg_value = SCR_TxEncEnable | SCR_RxDecEnable;
+ if (rtlpriv->sec.use_defaultkey) {
+ sec_reg_value |= SCR_TxUseDK;
+ sec_reg_value |= SCR_RxUseDK;
+ }
+ if (IS_NORMAL_CHIP(rtlhal->version))
+ sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK);
+ rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
+ ("The SECR-value %x\n", sec_reg_value));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
+}
+
+static void _rtl92cu_hw_configure(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+ /* To Fix MAC loopback mode fail. */
+ rtl_write_byte(rtlpriv, REG_LDOHCI12_CTRL, 0x0f);
+ rtl_write_byte(rtlpriv, 0x15, 0xe9);
+ /* HW SEQ CTRL */
+ /* set 0x0 to 0xFF by tynli. Default enable HW SEQ NUM. */
+ rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, 0xFF);
+ /* fixed USB interface interference issue */
+ rtl_write_byte(rtlpriv, 0xfe40, 0xe0);
+ rtl_write_byte(rtlpriv, 0xfe41, 0x8d);
+ rtl_write_byte(rtlpriv, 0xfe42, 0x80);
+ rtlusb->reg_bcn_ctrl_val = 0x18;
+ rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8)rtlusb->reg_bcn_ctrl_val);
+}
+
+static void _InitPABias(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 pa_setting;
+
+ /* FIXED PA current issue */
+ pa_setting = efuse_read_1byte(hw, 0x1FA);
+ if (!(pa_setting & BIT(0))) {
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0FFFFF, 0x0F406);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0FFFFF, 0x4F406);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0FFFFF, 0x8F406);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0FFFFF, 0xCF406);
+ }
+ if (!(pa_setting & BIT(1)) && IS_NORMAL_CHIP(rtlhal->version) &&
+ IS_92C_SERIAL(rtlhal->version)) {
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0FFFFF, 0x0F406);
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0FFFFF, 0x4F406);
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0FFFFF, 0x8F406);
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0FFFFF, 0xCF406);
+ }
+ if (!(pa_setting & BIT(4))) {
+ pa_setting = rtl_read_byte(rtlpriv, 0x16);
+ pa_setting &= 0x0F;
+ rtl_write_byte(rtlpriv, 0x16, pa_setting | 0x90);
+ }
+}
+
+static void _InitAntenna_Selection(struct ieee80211_hw *hw)
+{
+#ifdef CONFIG_ANTENNA_DIVERSITY
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ if (pHalData->AntDivCfg == 0)
+ return;
+
+ if (rtlphy->rf_type == RF_1T1R) {
+ rtl_write_dword(rtlpriv, REG_LEDCFG0,
+ rtl_read_dword(rtlpriv,
+ REG_LEDCFG0)|BIT(23));
+ rtl_set_bbreg(hw, rFPGA0_XAB_RFPARAMETER, BIT(13), 0x01);
+ if (rtl_get_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300) ==
+ Antenna_A)
+ pHalData->CurAntenna = Antenna_A;
+ else
+ pHalData->CurAntenna = Antenna_B;
+ }
+#endif
+}
+
+static void _dump_registers(struct ieee80211_hw *hw)
+{
+}
+
+static void _update_mac_setting(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ mac->rx_conf = rtl_read_dword(rtlpriv, REG_RCR);
+ mac->rx_mgt_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP0);
+ mac->rx_ctrl_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP1);
+ mac->rx_data_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP2);
+}
+
+int rtl92cu_hw_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ int err = 0;
+ static bool iqk_initialized;
+
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8192CU;
+ err = _rtl92cu_init_mac(hw);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("init mac failed!\n"));
+ return err;
+ }
+ err = rtl92c_download_fw(hw);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("Failed to download FW. Init HW without FW now..\n"));
+ err = 1;
+ rtlhal->fw_ready = false;
+ return err;
+ } else {
+ rtlhal->fw_ready = true;
+ }
+ rtlhal->last_hmeboxnum = 0; /* h2c */
+ _rtl92cu_phy_param_tab_init(hw);
+ rtl92cu_phy_mac_config(hw);
+ rtl92cu_phy_bb_config(hw);
+ rtlphy->rf_mode = RF_OP_BY_SW_3WIRE;
+ rtl92c_phy_rf_config(hw);
+ if (IS_VENDOR_UMC_A_CUT(rtlhal->version) &&
+ !IS_92C_SERIAL(rtlhal->version)) {
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, MASKDWORD, 0x30255);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G2, MASKDWORD, 0x50a00);
+ }
+ rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0,
+ RF_CHNLBW, RFREG_OFFSET_MASK);
+ rtlphy->rfreg_chnlval[1] = rtl_get_rfreg(hw, (enum radio_path)1,
+ RF_CHNLBW, RFREG_OFFSET_MASK);
+ rtl92cu_bb_block_on(hw);
+ rtl_cam_reset_all_entry(hw);
+ rtl92cu_enable_hw_security_config(hw);
+ ppsc->rfpwr_state = ERFON;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr);
+ if (ppsc->rfpwr_state == ERFON) {
+ rtl92c_phy_set_rfpath_switch(hw, 1);
+ if (iqk_initialized) {
+ rtl92c_phy_iq_calibrate(hw, false);
+ } else {
+ rtl92c_phy_iq_calibrate(hw, false);
+ iqk_initialized = true;
+ }
+ rtl92c_dm_check_txpower_tracking(hw);
+ rtl92c_phy_lc_calibrate(hw);
+ }
+ _rtl92cu_hw_configure(hw);
+ _InitPABias(hw);
+ _InitAntenna_Selection(hw);
+ _update_mac_setting(hw);
+ rtl92c_dm_init(hw);
+ _dump_registers(hw);
+ return err;
+}
+
+static void _DisableRFAFEAndResetBB(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+/**************************************
+a. TXPAUSE 0x522[7:0] = 0xFF Pause MAC TX queue
+b. RF path 0 offset 0x00 = 0x00 disable RF
+c. APSD_CTRL 0x600[7:0] = 0x40
+d. SYS_FUNC_EN 0x02[7:0] = 0x16 reset BB state machine
+e. SYS_FUNC_EN 0x02[7:0] = 0x14 reset BB state machine
+***************************************/
+ u8 eRFPath = 0, value8 = 0;
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+ rtl_set_rfreg(hw, (enum radio_path)eRFPath, 0x0, MASKBYTE0, 0x0);
+
+ value8 |= APSDOFF;
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, value8); /*0x40*/
+ value8 = 0;
+ value8 |= (FEN_USBD | FEN_USBA | FEN_BB_GLB_RSTn);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, value8);/*0x16*/
+ value8 &= (~FEN_BB_GLB_RSTn);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, value8); /*0x14*/
+}
+
+static void _ResetDigitalProcedure1(struct ieee80211_hw *hw, bool bWithoutHWSM)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ if (rtlhal->fw_version <= 0x20) {
+ /*****************************
+ f. MCUFWDL 0x80[7:0]=0 reset MCU ready status
+ g. SYS_FUNC_EN 0x02[10]= 0 reset MCU reg, (8051 reset)
+ h. SYS_FUNC_EN 0x02[15-12]= 5 reset MAC reg, DCORE
+ i. SYS_FUNC_EN 0x02[10]= 1 enable MCU reg, (8051 enable)
+ ******************************/
+ u16 valu16 = 0;
+
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, 0);
+ valu16 = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
+ rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, (valu16 &
+ (~FEN_CPUEN))); /* reset MCU ,8051 */
+ valu16 = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN)&0x0FFF;
+ rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, (valu16 |
+ (FEN_HWPDN|FEN_ELDR))); /* reset MAC */
+ valu16 = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
+ rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, (valu16 |
+ FEN_CPUEN)); /* enable MCU ,8051 */
+ } else {
+ u8 retry_cnts = 0;
+
+ /* IF fw in RAM code, do reset */
+ if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(1)) {
+ /* reset MCU ready status */
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, 0);
+ if (rtlhal->fw_ready) {
+ /* 8051 reset by self */
+ rtl_write_byte(rtlpriv, REG_HMETFR+3, 0x20);
+ while ((retry_cnts++ < 100) &&
+ (FEN_CPUEN & rtl_read_word(rtlpriv,
+ REG_SYS_FUNC_EN))) {
+ udelay(50);
+ }
+ if (retry_cnts >= 100) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("#####=> 8051 reset failed!.."
+ ".......................\n"););
+ /* if 8051 reset fail, reset MAC. */
+ rtl_write_byte(rtlpriv,
+ REG_SYS_FUNC_EN + 1,
+ 0x50);
+ udelay(100);
+ }
+ }
+ }
+ /* Reset MAC and Enable 8051 */
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, 0x54);
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, 0);
+ }
+ if (bWithoutHWSM) {
+ /*****************************
+ Without HW auto state machine
+ g.SYS_CLKR 0x08[15:0] = 0x30A3 disable MAC clock
+ h.AFE_PLL_CTRL 0x28[7:0] = 0x80 disable AFE PLL
+ i.AFE_XTAL_CTRL 0x24[15:0] = 0x880F gated AFE DIG_CLOCK
+ j.SYS_ISu_CTRL 0x00[7:0] = 0xF9 isolated digital to PON
+ ******************************/
+ rtl_write_word(rtlpriv, REG_SYS_CLKR, 0x70A3);
+ rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x80);
+ rtl_write_word(rtlpriv, REG_AFE_XTAL_CTRL, 0x880F);
+ rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL, 0xF9);
+ }
+}
+
+static void _ResetDigitalProcedure2(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+/*****************************
+k. SYS_FUNC_EN 0x03[7:0] = 0x44 disable ELDR runction
+l. SYS_CLKR 0x08[15:0] = 0x3083 disable ELDR clock
+m. SYS_ISO_CTRL 0x01[7:0] = 0x83 isolated ELDR to PON
+******************************/
+ rtl_write_word(rtlpriv, REG_SYS_CLKR, 0x70A3);
+ rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL+1, 0x82);
+}
+
+static void _DisableGPIO(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+/***************************************
+j. GPIO_PIN_CTRL 0x44[31:0]=0x000
+k. Value = GPIO_PIN_CTRL[7:0]
+l. GPIO_PIN_CTRL 0x44[31:0] = 0x00FF0000 | (value <<8); write ext PIN level
+m. GPIO_MUXCFG 0x42 [15:0] = 0x0780
+n. LEDCFG 0x4C[15:0] = 0x8080
+***************************************/
+ u8 value8;
+ u16 value16;
+ u32 value32;
+
+ /* 1. Disable GPIO[7:0] */
+ rtl_write_word(rtlpriv, REG_GPIO_PIN_CTRL+2, 0x0000);
+ value32 = rtl_read_dword(rtlpriv, REG_GPIO_PIN_CTRL) & 0xFFFF00FF;
+ value8 = (u8) (value32&0x000000FF);
+ value32 |= ((value8<<8) | 0x00FF0000);
+ rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, value32);
+ /* 2. Disable GPIO[10:8] */
+ rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG+3, 0x00);
+ value16 = rtl_read_word(rtlpriv, REG_GPIO_MUXCFG+2) & 0xFF0F;
+ value8 = (u8) (value16&0x000F);
+ value16 |= ((value8<<4) | 0x0780);
+ rtl_write_word(rtlpriv, REG_GPIO_PIN_CTRL+2, value16);
+ /* 3. Disable LED0 & 1 */
+ rtl_write_word(rtlpriv, REG_LEDCFG0, 0x8080);
+}
+
+static void _DisableAnalog(struct ieee80211_hw *hw, bool bWithoutHWSM)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u16 value16 = 0;
+ u8 value8 = 0;
+
+ if (bWithoutHWSM) {
+ /*****************************
+ n. LDOA15_CTRL 0x20[7:0] = 0x04 disable A15 power
+ o. LDOV12D_CTRL 0x21[7:0] = 0x54 disable digital core power
+ r. When driver call disable, the ASIC will turn off remaining
+ clock automatically
+ ******************************/
+ rtl_write_byte(rtlpriv, REG_LDOA15_CTRL, 0x04);
+ value8 = rtl_read_byte(rtlpriv, REG_LDOV12D_CTRL);
+ value8 &= (~LDV12_EN);
+ rtl_write_byte(rtlpriv, REG_LDOV12D_CTRL, value8);
+ }
+
+/*****************************
+h. SPS0_CTRL 0x11[7:0] = 0x23 enter PFM mode
+i. APS_FSMCO 0x04[15:0] = 0x4802 set USB suspend
+******************************/
+ rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x23);
+ value16 |= (APDM_HOST | AFSM_HSUS | PFM_ALDN);
+ rtl_write_word(rtlpriv, REG_APS_FSMCO, (u16)value16);
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0E);
+}
+
+static void _CardDisableHWSM(struct ieee80211_hw *hw)
+{
+ /* ==== RF Off Sequence ==== */
+ _DisableRFAFEAndResetBB(hw);
+ /* ==== Reset digital sequence ====== */
+ _ResetDigitalProcedure1(hw, false);
+ /* ==== Pull GPIO PIN to balance level and LED control ====== */
+ _DisableGPIO(hw);
+ /* ==== Disable analog sequence === */
+ _DisableAnalog(hw, false);
+}
+
+static void _CardDisableWithoutHWSM(struct ieee80211_hw *hw)
+{
+ /*==== RF Off Sequence ==== */
+ _DisableRFAFEAndResetBB(hw);
+ /* ==== Reset digital sequence ====== */
+ _ResetDigitalProcedure1(hw, true);
+ /* ==== Pull GPIO PIN to balance level and LED control ====== */
+ _DisableGPIO(hw);
+ /* ==== Reset digital sequence ====== */
+ _ResetDigitalProcedure2(hw);
+ /* ==== Disable analog sequence === */
+ _DisableAnalog(hw, true);
+}
+
+static void _rtl92cu_set_bcn_ctrl_reg(struct ieee80211_hw *hw,
+ u8 set_bits, u8 clear_bits)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+ rtlusb->reg_bcn_ctrl_val |= set_bits;
+ rtlusb->reg_bcn_ctrl_val &= ~clear_bits;
+ rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlusb->reg_bcn_ctrl_val);
+}
+
+static void _rtl92cu_stop_tx_beacon(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 tmp1byte = 0;
+ if (IS_NORMAL_CHIP(rtlhal->version)) {
+ tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
+ tmp1byte & (~BIT(6)));
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0x64);
+ tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
+ tmp1byte &= ~(BIT(0));
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
+ } else {
+ rtl_write_byte(rtlpriv, REG_TXPAUSE,
+ rtl_read_byte(rtlpriv, REG_TXPAUSE) | BIT(6));
+ }
+}
+
+static void _rtl92cu_resume_tx_beacon(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 tmp1byte = 0;
+
+ if (IS_NORMAL_CHIP(rtlhal->version)) {
+ tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
+ tmp1byte | BIT(6));
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff);
+ tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
+ tmp1byte |= BIT(0);
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
+ } else {
+ rtl_write_byte(rtlpriv, REG_TXPAUSE,
+ rtl_read_byte(rtlpriv, REG_TXPAUSE) & (~BIT(6)));
+ }
+}
+
+static void _rtl92cu_enable_bcn_sub_func(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+ if (IS_NORMAL_CHIP(rtlhal->version))
+ _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(1));
+ else
+ _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4));
+}
+
+static void _rtl92cu_disable_bcn_sub_func(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+ if (IS_NORMAL_CHIP(rtlhal->version))
+ _rtl92cu_set_bcn_ctrl_reg(hw, BIT(1), 0);
+ else
+ _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
+}
+
+static int _rtl92cu_set_media_status(struct ieee80211_hw *hw,
+ enum nl80211_iftype type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 bt_msr = rtl_read_byte(rtlpriv, MSR);
+ enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
+
+ bt_msr &= 0xfc;
+ rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xFF);
+ if (type == NL80211_IFTYPE_UNSPECIFIED || type ==
+ NL80211_IFTYPE_STATION) {
+ _rtl92cu_stop_tx_beacon(hw);
+ _rtl92cu_enable_bcn_sub_func(hw);
+ } else if (type == NL80211_IFTYPE_ADHOC || type == NL80211_IFTYPE_AP) {
+ _rtl92cu_resume_tx_beacon(hw);
+ _rtl92cu_disable_bcn_sub_func(hw);
+ } else {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, ("Set HW_VAR_MEDIA_"
+ "STATUS:No such media status(%x).\n", type));
+ }
+ switch (type) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ bt_msr |= MSR_NOLINK;
+ ledaction = LED_CTL_LINK;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Set Network type to NO LINK!\n"));
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ bt_msr |= MSR_ADHOC;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Set Network type to Ad Hoc!\n"));
+ break;
+ case NL80211_IFTYPE_STATION:
+ bt_msr |= MSR_INFRA;
+ ledaction = LED_CTL_LINK;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Set Network type to STA!\n"));
+ break;
+ case NL80211_IFTYPE_AP:
+ bt_msr |= MSR_AP;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Set Network type to AP!\n"));
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Network type %d not support!\n", type));
+ goto error_out;
+ }
+ rtl_write_byte(rtlpriv, (MSR), bt_msr);
+ rtlpriv->cfg->ops->led_control(hw, ledaction);
+ if ((bt_msr & 0xfc) == MSR_AP)
+ rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
+ else
+ rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
+ return 0;
+error_out:
+ return 1;
+}
+
+void rtl92cu_card_disable(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ enum nl80211_iftype opmode;
+
+ mac->link_state = MAC80211_NOLINK;
+ opmode = NL80211_IFTYPE_UNSPECIFIED;
+ _rtl92cu_set_media_status(hw, opmode);
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+ if (rtlusb->disableHWSM)
+ _CardDisableHWSM(hw);
+ else
+ _CardDisableWithoutHWSM(hw);
+}
+
+void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
+{
+ /* dummy routine needed for callback from rtl_op_configure_filter() */
+}
+
+/*========================================================================== */
+
+static void _rtl92cu_set_check_bssid(struct ieee80211_hw *hw,
+ enum nl80211_iftype type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 filterout_non_associated_bssid = false;
+
+ switch (type) {
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
+ filterout_non_associated_bssid = true;
+ break;
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NL80211_IFTYPE_AP:
+ default:
+ break;
+ }
+ if (filterout_non_associated_bssid == true) {
+ if (IS_NORMAL_CHIP(rtlhal->version)) {
+ switch (rtlphy->current_io_type) {
+ case IO_CMD_RESUME_DM_BY_SCAN:
+ reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_RCR, (u8 *)(&reg_rcr));
+ /* enable update TSF */
+ _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4));
+ break;
+ case IO_CMD_PAUSE_DM_BY_SCAN:
+ reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_RCR, (u8 *)(&reg_rcr));
+ /* disable update TSF */
+ _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
+ break;
+ }
+ } else {
+ reg_rcr |= (RCR_CBSSID);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
+ (u8 *)(&reg_rcr));
+ _rtl92cu_set_bcn_ctrl_reg(hw, 0, (BIT(4)|BIT(5)));
+ }
+ } else if (filterout_non_associated_bssid == false) {
+ if (IS_NORMAL_CHIP(rtlhal->version)) {
+ reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
+ (u8 *)(&reg_rcr));
+ _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
+ } else {
+ reg_rcr &= (~RCR_CBSSID);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
+ (u8 *)(&reg_rcr));
+ _rtl92cu_set_bcn_ctrl_reg(hw, (BIT(4)|BIT(5)), 0);
+ }
+ }
+}
+
+int rtl92cu_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
+{
+ if (_rtl92cu_set_media_status(hw, type))
+ return -EOPNOTSUPP;
+ _rtl92cu_set_check_bssid(hw, type);
+ return 0;
+}
+
+static void _InitBeaconParameters(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+ rtl_write_word(rtlpriv, REG_BCN_CTRL, 0x1010);
+
+ /* TODO: Remove these magic number */
+ rtl_write_word(rtlpriv, REG_TBTT_PROHIBIT, 0x6404);
+ rtl_write_byte(rtlpriv, REG_DRVERLYINT, DRIVER_EARLY_INT_TIME);
+ rtl_write_byte(rtlpriv, REG_BCNDMATIM, BCN_DMA_ATIME_INT_TIME);
+ /* Change beacon AIFS to the largest number
+ * beacause test chip does not contension before sending beacon. */
+ if (IS_NORMAL_CHIP(rtlhal->version))
+ rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660F);
+ else
+ rtl_write_word(rtlpriv, REG_BCNTCFG, 0x66FF);
+}
+
+static void _beacon_function_enable(struct ieee80211_hw *hw, bool Enable,
+ bool Linked)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ _rtl92cu_set_bcn_ctrl_reg(hw, (BIT(4) | BIT(3) | BIT(1)), 0x00);
+ rtl_write_byte(rtlpriv, REG_RD_CTRL+1, 0x6F);
+}
+
+void rtl92cu_set_beacon_related_registers(struct ieee80211_hw *hw)
+{
+
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u16 bcn_interval, atim_window;
+ u32 value32;
+
+ bcn_interval = mac->beacon_interval;
+ atim_window = 2; /*FIX MERGE */
+ rtl_write_word(rtlpriv, REG_ATIMWND, atim_window);
+ rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
+ _InitBeaconParameters(hw);
+ rtl_write_byte(rtlpriv, REG_SLOT, 0x09);
+ /*
+ * Force beacon frame transmission even after receiving beacon frame
+ * from other ad hoc STA
+ *
+ *
+ * Reset TSF Timer to zero, added by Roger. 2008.06.24
+ */
+ value32 = rtl_read_dword(rtlpriv, REG_TCR);
+ value32 &= ~TSFRST;
+ rtl_write_dword(rtlpriv, REG_TCR, value32);
+ value32 |= TSFRST;
+ rtl_write_dword(rtlpriv, REG_TCR, value32);
+ RT_TRACE(rtlpriv, COMP_INIT|COMP_BEACON, DBG_LOUD,
+ ("SetBeaconRelatedRegisters8192CUsb(): Set TCR(%x)\n",
+ value32));
+ /* TODO: Modify later (Find the right parameters)
+ * NOTE: Fix test chip's bug (about contention windows's randomness) */
+ if ((mac->opmode == NL80211_IFTYPE_ADHOC) ||
+ (mac->opmode == NL80211_IFTYPE_AP)) {
+ rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x50);
+ rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x50);
+ }
+ _beacon_function_enable(hw, true, true);
+}
+
+void rtl92cu_set_beacon_interval(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u16 bcn_interval = mac->beacon_interval;
+
+ RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
+ ("beacon_interval:%d\n", bcn_interval));
+ rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
+}
+
+void rtl92cu_update_interrupt_mask(struct ieee80211_hw *hw,
+ u32 add_msr, u32 rm_msr)
+{
+}
+
+void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ switch (variable) {
+ case HW_VAR_RCR:
+ *((u32 *)(val)) = mac->rx_conf;
+ break;
+ case HW_VAR_RF_STATE:
+ *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state;
+ break;
+ case HW_VAR_FWLPS_RF_ON:{
+ enum rf_pwrstate rfState;
+ u32 val_rcr;
+
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE,
+ (u8 *)(&rfState));
+ if (rfState == ERFOFF) {
+ *((bool *) (val)) = true;
+ } else {
+ val_rcr = rtl_read_dword(rtlpriv, REG_RCR);
+ val_rcr &= 0x00070000;
+ if (val_rcr)
+ *((bool *) (val)) = false;
+ else
+ *((bool *) (val)) = true;
+ }
+ break;
+ }
+ case HW_VAR_FW_PSMODE_STATUS:
+ *((bool *) (val)) = ppsc->fw_current_inpsmode;
+ break;
+ case HW_VAR_CORRECT_TSF:{
+ u64 tsf;
+ u32 *ptsf_low = (u32 *)&tsf;
+ u32 *ptsf_high = ((u32 *)&tsf) + 1;
+
+ *ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4));
+ *ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
+ *((u64 *)(val)) = tsf;
+ break;
+ }
+ case HW_VAR_MGT_FILTER:
+ *((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP0);
+ break;
+ case HW_VAR_CTRL_FILTER:
+ *((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP1);
+ break;
+ case HW_VAR_DATA_FILTER:
+ *((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP2);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+}
+
+void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+ enum wireless_mode wirelessmode = mac->mode;
+ u8 idx = 0;
+
+ switch (variable) {
+ case HW_VAR_ETHER_ADDR:{
+ for (idx = 0; idx < ETH_ALEN; idx++) {
+ rtl_write_byte(rtlpriv, (REG_MACID + idx),
+ val[idx]);
+ }
+ break;
+ }
+ case HW_VAR_BASIC_RATE:{
+ u16 rate_cfg = ((u16 *) val)[0];
+ u8 rate_index = 0;
+
+ rate_cfg &= 0x15f;
+ /* TODO */
+ /* if (mac->current_network.vender == HT_IOT_PEER_CISCO
+ * && ((rate_cfg & 0x150) == 0)) {
+ * rate_cfg |= 0x010;
+ * } */
+ rate_cfg |= 0x01;
+ rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff);
+ rtl_write_byte(rtlpriv, REG_RRSR + 1,
+ (rate_cfg >> 8) & 0xff);
+ while (rate_cfg > 0x1) {
+ rate_cfg >>= 1;
+ rate_index++;
+ }
+ rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL,
+ rate_index);
+ break;
+ }
+ case HW_VAR_BSSID:{
+ for (idx = 0; idx < ETH_ALEN; idx++) {
+ rtl_write_byte(rtlpriv, (REG_BSSID + idx),
+ val[idx]);
+ }
+ break;
+ }
+ case HW_VAR_SIFS:{
+ rtl_write_byte(rtlpriv, REG_SIFS_CCK + 1, val[0]);
+ rtl_write_byte(rtlpriv, REG_SIFS_OFDM + 1, val[1]);
+ rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]);
+ rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]);
+ rtl_write_byte(rtlpriv, REG_R2T_SIFS+1, val[0]);
+ rtl_write_byte(rtlpriv, REG_T2T_SIFS+1, val[0]);
+ RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+ ("HW_VAR_SIFS\n"));
+ break;
+ }
+ case HW_VAR_SLOT_TIME:{
+ u8 e_aci;
+ u8 QOS_MODE = 1;
+
+ rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
+ RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+ ("HW_VAR_SLOT_TIME %x\n", val[0]));
+ if (QOS_MODE) {
+ for (e_aci = 0; e_aci < AC_MAX; e_aci++)
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_AC_PARAM,
+ (u8 *)(&e_aci));
+ } else {
+ u8 sifstime = 0;
+ u8 u1bAIFS;
+
+ if (IS_WIRELESS_MODE_A(wirelessmode) ||
+ IS_WIRELESS_MODE_N_24G(wirelessmode) ||
+ IS_WIRELESS_MODE_N_5G(wirelessmode))
+ sifstime = 16;
+ else
+ sifstime = 10;
+ u1bAIFS = sifstime + (2 * val[0]);
+ rtl_write_byte(rtlpriv, REG_EDCA_VO_PARAM,
+ u1bAIFS);
+ rtl_write_byte(rtlpriv, REG_EDCA_VI_PARAM,
+ u1bAIFS);
+ rtl_write_byte(rtlpriv, REG_EDCA_BE_PARAM,
+ u1bAIFS);
+ rtl_write_byte(rtlpriv, REG_EDCA_BK_PARAM,
+ u1bAIFS);
+ }
+ break;
+ }
+ case HW_VAR_ACK_PREAMBLE:{
+ u8 reg_tmp;
+ u8 short_preamble = (bool) (*(u8 *) val);
+ reg_tmp = 0;
+ if (short_preamble)
+ reg_tmp |= 0x80;
+ rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_tmp);
+ break;
+ }
+ case HW_VAR_AMPDU_MIN_SPACE:{
+ u8 min_spacing_to_set;
+ u8 sec_min_space;
+
+ min_spacing_to_set = *((u8 *) val);
+ if (min_spacing_to_set <= 7) {
+ switch (rtlpriv->sec.pairwise_enc_algorithm) {
+ case NO_ENCRYPTION:
+ case AESCCMP_ENCRYPTION:
+ sec_min_space = 0;
+ break;
+ case WEP40_ENCRYPTION:
+ case WEP104_ENCRYPTION:
+ case TKIP_ENCRYPTION:
+ sec_min_space = 6;
+ break;
+ default:
+ sec_min_space = 7;
+ break;
+ }
+ if (min_spacing_to_set < sec_min_space)
+ min_spacing_to_set = sec_min_space;
+ mac->min_space_cfg = ((mac->min_space_cfg &
+ 0xf8) |
+ min_spacing_to_set);
+ *val = min_spacing_to_set;
+ RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+ ("Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+ mac->min_space_cfg));
+ rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
+ mac->min_space_cfg);
+ }
+ break;
+ }
+ case HW_VAR_SHORTGI_DENSITY:{
+ u8 density_to_set;
+
+ density_to_set = *((u8 *) val);
+ density_to_set &= 0x1f;
+ mac->min_space_cfg &= 0x07;
+ mac->min_space_cfg |= (density_to_set << 3);
+ RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+ ("Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+ mac->min_space_cfg));
+ rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
+ mac->min_space_cfg);
+ break;
+ }
+ case HW_VAR_AMPDU_FACTOR:{
+ u8 regtoset_normal[4] = {0x41, 0xa8, 0x72, 0xb9};
+ u8 factor_toset;
+ u8 *p_regtoset = NULL;
+ u8 index = 0;
+
+ p_regtoset = regtoset_normal;
+ factor_toset = *((u8 *) val);
+ if (factor_toset <= 3) {
+ factor_toset = (1 << (factor_toset + 2));
+ if (factor_toset > 0xf)
+ factor_toset = 0xf;
+ for (index = 0; index < 4; index++) {
+ if ((p_regtoset[index] & 0xf0) >
+ (factor_toset << 4))
+ p_regtoset[index] =
+ (p_regtoset[index] & 0x0f)
+ | (factor_toset << 4);
+ if ((p_regtoset[index] & 0x0f) >
+ factor_toset)
+ p_regtoset[index] =
+ (p_regtoset[index] & 0xf0)
+ | (factor_toset);
+ rtl_write_byte(rtlpriv,
+ (REG_AGGLEN_LMT + index),
+ p_regtoset[index]);
+ }
+ RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+ ("Set HW_VAR_AMPDU_FACTOR: %#x\n",
+ factor_toset));
+ }
+ break;
+ }
+ case HW_VAR_AC_PARAM:{
+ u8 e_aci = *((u8 *) val);
+ u32 u4b_ac_param;
+ u16 cw_min = le16_to_cpu(mac->ac[e_aci].cw_min);
+ u16 cw_max = le16_to_cpu(mac->ac[e_aci].cw_max);
+ u16 tx_op = le16_to_cpu(mac->ac[e_aci].tx_op);
+
+ u4b_ac_param = (u32) mac->ac[e_aci].aifs;
+ u4b_ac_param |= (u32) ((cw_min & 0xF) <<
+ AC_PARAM_ECW_MIN_OFFSET);
+ u4b_ac_param |= (u32) ((cw_max & 0xF) <<
+ AC_PARAM_ECW_MAX_OFFSET);
+ u4b_ac_param |= (u32) tx_op << AC_PARAM_TXOP_OFFSET;
+ RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+ ("queue:%x, ac_param:%x\n", e_aci,
+ u4b_ac_param));
+ switch (e_aci) {
+ case AC1_BK:
+ rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM,
+ u4b_ac_param);
+ break;
+ case AC0_BE:
+ rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM,
+ u4b_ac_param);
+ break;
+ case AC2_VI:
+ rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM,
+ u4b_ac_param);
+ break;
+ case AC3_VO:
+ rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM,
+ u4b_ac_param);
+ break;
+ default:
+ RT_ASSERT(false, ("SetHwReg8185(): invalid"
+ " aci: %d !\n", e_aci));
+ break;
+ }
+ if (rtlusb->acm_method != eAcmWay2_SW)
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_ACM_CTRL, (u8 *)(&e_aci));
+ break;
+ }
+ case HW_VAR_ACM_CTRL:{
+ u8 e_aci = *((u8 *) val);
+ union aci_aifsn *p_aci_aifsn = (union aci_aifsn *)
+ (&(mac->ac[0].aifs));
+ u8 acm = p_aci_aifsn->f.acm;
+ u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL);
+
+ acm_ctrl =
+ acm_ctrl | ((rtlusb->acm_method == 2) ? 0x0 : 0x1);
+ if (acm) {
+ switch (e_aci) {
+ case AC0_BE:
+ acm_ctrl |= AcmHw_BeqEn;
+ break;
+ case AC2_VI:
+ acm_ctrl |= AcmHw_ViqEn;
+ break;
+ case AC3_VO:
+ acm_ctrl |= AcmHw_VoqEn;
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("HW_VAR_ACM_CTRL acm set "
+ "failed: eACI is %d\n", acm));
+ break;
+ }
+ } else {
+ switch (e_aci) {
+ case AC0_BE:
+ acm_ctrl &= (~AcmHw_BeqEn);
+ break;
+ case AC2_VI:
+ acm_ctrl &= (~AcmHw_ViqEn);
+ break;
+ case AC3_VO:
+ acm_ctrl &= (~AcmHw_BeqEn);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+ }
+ RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
+ ("SetHwReg8190pci(): [HW_VAR_ACM_CTRL] "
+ "Write 0x%X\n", acm_ctrl));
+ rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
+ break;
+ }
+ case HW_VAR_RCR:{
+ rtl_write_dword(rtlpriv, REG_RCR, ((u32 *) (val))[0]);
+ mac->rx_conf = ((u32 *) (val))[0];
+ RT_TRACE(rtlpriv, COMP_RECV, DBG_DMESG,
+ ("### Set RCR(0x%08x) ###\n", mac->rx_conf));
+ break;
+ }
+ case HW_VAR_RETRY_LIMIT:{
+ u8 retry_limit = ((u8 *) (val))[0];
+
+ rtl_write_word(rtlpriv, REG_RL,
+ retry_limit << RETRY_LIMIT_SHORT_SHIFT |
+ retry_limit << RETRY_LIMIT_LONG_SHIFT);
+ RT_TRACE(rtlpriv, COMP_MLME, DBG_DMESG, ("Set HW_VAR_R"
+ "ETRY_LIMIT(0x%08x)\n", retry_limit));
+ break;
+ }
+ case HW_VAR_DUAL_TSF_RST:
+ rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1)));
+ break;
+ case HW_VAR_EFUSE_BYTES:
+ rtlefuse->efuse_usedbytes = *((u16 *) val);
+ break;
+ case HW_VAR_EFUSE_USAGE:
+ rtlefuse->efuse_usedpercentage = *((u8 *) val);
+ break;
+ case HW_VAR_IO_CMD:
+ rtl92c_phy_set_io_cmd(hw, (*(enum io_type *)val));
+ break;
+ case HW_VAR_WPA_CONFIG:
+ rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val));
+ break;
+ case HW_VAR_SET_RPWM:{
+ u8 rpwm_val = rtl_read_byte(rtlpriv, REG_USB_HRPWM);
+
+ if (rpwm_val & BIT(7))
+ rtl_write_byte(rtlpriv, REG_USB_HRPWM,
+ (*(u8 *)val));
+ else
+ rtl_write_byte(rtlpriv, REG_USB_HRPWM,
+ ((*(u8 *)val) | BIT(7)));
+ break;
+ }
+ case HW_VAR_H2C_FW_PWRMODE:{
+ u8 psmode = (*(u8 *) val);
+
+ if ((psmode != FW_PS_ACTIVE_MODE) &&
+ (!IS_92C_SERIAL(rtlhal->version)))
+ rtl92c_dm_rf_saving(hw, true);
+ rtl92c_set_fw_pwrmode_cmd(hw, (*(u8 *) val));
+ break;
+ }
+ case HW_VAR_FW_PSMODE_STATUS:
+ ppsc->fw_current_inpsmode = *((bool *) val);
+ break;
+ case HW_VAR_H2C_FW_JOINBSSRPT:{
+ u8 mstatus = (*(u8 *) val);
+ u8 tmp_reg422;
+ bool recover = false;
+
+ if (mstatus == RT_MEDIA_CONNECT) {
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_AID, NULL);
+ rtl_write_byte(rtlpriv, REG_CR + 1, 0x03);
+ _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(3));
+ _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
+ tmp_reg422 = rtl_read_byte(rtlpriv,
+ REG_FWHW_TXQ_CTRL + 2);
+ if (tmp_reg422 & BIT(6))
+ recover = true;
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
+ tmp_reg422 & (~BIT(6)));
+ rtl92c_set_fw_rsvdpagepkt(hw, 0);
+ _rtl92cu_set_bcn_ctrl_reg(hw, BIT(3), 0);
+ _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4));
+ if (recover)
+ rtl_write_byte(rtlpriv,
+ REG_FWHW_TXQ_CTRL + 2,
+ tmp_reg422 | BIT(6));
+ rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
+ }
+ rtl92c_set_fw_joinbss_report_cmd(hw, (*(u8 *) val));
+ break;
+ }
+ case HW_VAR_AID:{
+ u16 u2btmp;
+
+ u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT);
+ u2btmp &= 0xC000;
+ rtl_write_word(rtlpriv, REG_BCN_PSR_RPT,
+ (u2btmp | mac->assoc_id));
+ break;
+ }
+ case HW_VAR_CORRECT_TSF:{
+ u8 btype_ibss = ((u8 *) (val))[0];
+
+ if (btype_ibss == true)
+ _rtl92cu_stop_tx_beacon(hw);
+ _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(3));
+ rtl_write_dword(rtlpriv, REG_TSFTR, (u32)(mac->tsf &
+ 0xffffffff));
+ rtl_write_dword(rtlpriv, REG_TSFTR + 4,
+ (u32)((mac->tsf >> 32) & 0xffffffff));
+ _rtl92cu_set_bcn_ctrl_reg(hw, BIT(3), 0);
+ if (btype_ibss == true)
+ _rtl92cu_resume_tx_beacon(hw);
+ break;
+ }
+ case HW_VAR_MGT_FILTER:
+ rtl_write_word(rtlpriv, REG_RXFLTMAP0, *(u16 *)val);
+ break;
+ case HW_VAR_CTRL_FILTER:
+ rtl_write_word(rtlpriv, REG_RXFLTMAP1, *(u16 *)val);
+ break;
+ case HW_VAR_DATA_FILTER:
+ rtl_write_word(rtlpriv, REG_RXFLTMAP2, *(u16 *)val);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("switch case "
+ "not process\n"));
+ break;
+ }
+}
+
+void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u32 ratr_value = (u32) mac->basic_rates;
+ u8 *mcsrate = mac->mcs;
+ u8 ratr_index = 0;
+ u8 nmode = mac->ht_enable;
+ u8 mimo_ps = 1;
+ u16 shortgi_rate = 0;
+ u32 tmp_ratr_value = 0;
+ u8 curtxbw_40mhz = mac->bw_40;
+ u8 curshortgi_40mhz = mac->sgi_40;
+ u8 curshortgi_20mhz = mac->sgi_20;
+ enum wireless_mode wirelessmode = mac->mode;
+
+ ratr_value |= ((*(u16 *) (mcsrate))) << 12;
+ switch (wirelessmode) {
+ case WIRELESS_MODE_B:
+ if (ratr_value & 0x0000000c)
+ ratr_value &= 0x0000000d;
+ else
+ ratr_value &= 0x0000000f;
+ break;
+ case WIRELESS_MODE_G:
+ ratr_value &= 0x00000FF5;
+ break;
+ case WIRELESS_MODE_N_24G:
+ case WIRELESS_MODE_N_5G:
+ nmode = 1;
+ if (mimo_ps == 0) {
+ ratr_value &= 0x0007F005;
+ } else {
+ u32 ratr_mask;
+
+ if (get_rf_type(rtlphy) == RF_1T2R ||
+ get_rf_type(rtlphy) == RF_1T1R)
+ ratr_mask = 0x000ff005;
+ else
+ ratr_mask = 0x0f0ff005;
+ if (curtxbw_40mhz)
+ ratr_mask |= 0x00000010;
+ ratr_value &= ratr_mask;
+ }
+ break;
+ default:
+ if (rtlphy->rf_type == RF_1T2R)
+ ratr_value &= 0x000ff0ff;
+ else
+ ratr_value &= 0x0f0ff0ff;
+ break;
+ }
+ ratr_value &= 0x0FFFFFFF;
+ if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) ||
+ (!curtxbw_40mhz && curshortgi_20mhz))) {
+ ratr_value |= 0x10000000;
+ tmp_ratr_value = (ratr_value >> 12);
+ for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) {
+ if ((1 << shortgi_rate) & tmp_ratr_value)
+ break;
+ }
+ shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) |
+ (shortgi_rate << 4) | (shortgi_rate);
+ }
+ rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
+ RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("%x\n", rtl_read_dword(rtlpriv,
+ REG_ARFR0)));
+}
+
+void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u32 ratr_bitmap = (u32) mac->basic_rates;
+ u8 *p_mcsrate = mac->mcs;
+ u8 ratr_index = 0;
+ u8 curtxbw_40mhz = mac->bw_40;
+ u8 curshortgi_40mhz = mac->sgi_40;
+ u8 curshortgi_20mhz = mac->sgi_20;
+ enum wireless_mode wirelessmode = mac->mode;
+ bool shortgi = false;
+ u8 rate_mask[5];
+ u8 macid = 0;
+ u8 mimops = 1;
+
+ ratr_bitmap |= (p_mcsrate[1] << 20) | (p_mcsrate[0] << 12);
+ switch (wirelessmode) {
+ case WIRELESS_MODE_B:
+ ratr_index = RATR_INX_WIRELESS_B;
+ if (ratr_bitmap & 0x0000000c)
+ ratr_bitmap &= 0x0000000d;
+ else
+ ratr_bitmap &= 0x0000000f;
+ break;
+ case WIRELESS_MODE_G:
+ ratr_index = RATR_INX_WIRELESS_GB;
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x00000f00;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x00000ff0;
+ else
+ ratr_bitmap &= 0x00000ff5;
+ break;
+ case WIRELESS_MODE_A:
+ ratr_index = RATR_INX_WIRELESS_A;
+ ratr_bitmap &= 0x00000ff0;
+ break;
+ case WIRELESS_MODE_N_24G:
+ case WIRELESS_MODE_N_5G:
+ ratr_index = RATR_INX_WIRELESS_NGB;
+ if (mimops == 0) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x00070000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0007f000;
+ else
+ ratr_bitmap &= 0x0007f005;
+ } else {
+ if (rtlphy->rf_type == RF_1T2R ||
+ rtlphy->rf_type == RF_1T1R) {
+ if (curtxbw_40mhz) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x000f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x000ff000;
+ else
+ ratr_bitmap &= 0x000ff015;
+ } else {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x000f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x000ff000;
+ else
+ ratr_bitmap &= 0x000ff005;
+ }
+ } else {
+ if (curtxbw_40mhz) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x0f0f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0f0ff000;
+ else
+ ratr_bitmap &= 0x0f0ff015;
+ } else {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x0f0f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0f0ff000;
+ else
+ ratr_bitmap &= 0x0f0ff005;
+ }
+ }
+ }
+ if ((curtxbw_40mhz && curshortgi_40mhz) ||
+ (!curtxbw_40mhz && curshortgi_20mhz)) {
+ if (macid == 0)
+ shortgi = true;
+ else if (macid == 1)
+ shortgi = false;
+ }
+ break;
+ default:
+ ratr_index = RATR_INX_WIRELESS_NGB;
+ if (rtlphy->rf_type == RF_1T2R)
+ ratr_bitmap &= 0x000ff0ff;
+ else
+ ratr_bitmap &= 0x0f0ff0ff;
+ break;
+ }
+ RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("ratr_bitmap :%x\n",
+ ratr_bitmap));
+ *(u32 *)&rate_mask = ((ratr_bitmap & 0x0fffffff) |
+ ratr_index << 28);
+ rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
+ RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("Rate_index:%x, "
+ "ratr_val:%x, %x:%x:%x:%x:%x\n",
+ ratr_index, ratr_bitmap,
+ rate_mask[0], rate_mask[1],
+ rate_mask[2], rate_mask[3],
+ rate_mask[4]));
+ rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask);
+}
+
+void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u16 sifs_timer;
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
+ (u8 *)&mac->slot_time);
+ if (!mac->ht_enable)
+ sifs_timer = 0x0a0a;
+ else
+ sifs_timer = 0x0e0e;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer);
+}
+
+bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
+ u8 u1tmp = 0;
+ bool actuallyset = false;
+ unsigned long flag = 0;
+ /* to do - usb autosuspend */
+ u8 usb_autosuspend = 0;
+
+ if (ppsc->swrf_processing)
+ return false;
+ spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+ if (ppsc->rfchange_inprogress) {
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+ return false;
+ } else {
+ ppsc->rfchange_inprogress = true;
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+ }
+ cur_rfstate = ppsc->rfpwr_state;
+ if (usb_autosuspend) {
+ /* to do................... */
+ } else {
+ if (ppsc->pwrdown_mode) {
+ u1tmp = rtl_read_byte(rtlpriv, REG_HSISR);
+ e_rfpowerstate_toset = (u1tmp & BIT(7)) ?
+ ERFOFF : ERFON;
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
+ ("pwrdown, 0x5c(BIT7)=%02x\n", u1tmp));
+ } else {
+ rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG,
+ rtl_read_byte(rtlpriv,
+ REG_MAC_PINMUX_CFG) & ~(BIT(3)));
+ u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL);
+ e_rfpowerstate_toset = (u1tmp & BIT(3)) ?
+ ERFON : ERFOFF;
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
+ ("GPIO_IN=%02x\n", u1tmp));
+ }
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("N-SS RF =%x\n",
+ e_rfpowerstate_toset));
+ }
+ if ((ppsc->hwradiooff) && (e_rfpowerstate_toset == ERFON)) {
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("GPIOChangeRF - HW "
+ "Radio ON, RF ON\n"));
+ ppsc->hwradiooff = false;
+ actuallyset = true;
+ } else if ((!ppsc->hwradiooff) && (e_rfpowerstate_toset ==
+ ERFOFF)) {
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("GPIOChangeRF - HW"
+ " Radio OFF\n"));
+ ppsc->hwradiooff = true;
+ actuallyset = true;
+ } else {
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD ,
+ ("pHalData->bHwRadioOff and eRfPowerStateToSet do not"
+ " match: pHalData->bHwRadioOff %x, eRfPowerStateToSet "
+ "%x\n", ppsc->hwradiooff, e_rfpowerstate_toset));
+ }
+ if (actuallyset) {
+ ppsc->hwradiooff = 1;
+ if (e_rfpowerstate_toset == ERFON) {
+ if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
+ RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM))
+ RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
+ else if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_PCI_D3)
+ && RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_PCI_D3))
+ RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_PCI_D3);
+ }
+ spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+ ppsc->rfchange_inprogress = false;
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+ /* For power down module, we need to enable register block
+ * contrl reg at 0x1c. Then enable power down control bit
+ * of register 0x04 BIT4 and BIT15 as 1.
+ */
+ if (ppsc->pwrdown_mode && e_rfpowerstate_toset == ERFOFF) {
+ /* Enable register area 0x0-0xc. */
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0);
+ if (IS_HARDWARE_TYPE_8723U(rtlhal)) {
+ /*
+ * We should configure HW PDn source for WiFi
+ * ONLY, and then our HW will be set in
+ * power-down mode if PDn source from all
+ * functions are configured.
+ */
+ u1tmp = rtl_read_byte(rtlpriv,
+ REG_MULTI_FUNC_CTRL);
+ rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL,
+ (u1tmp|WL_HWPDN_EN));
+ } else {
+ rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x8812);
+ }
+ }
+ if (e_rfpowerstate_toset == ERFOFF) {
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM)
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
+ else if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_PCI_D3)
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_PCI_D3);
+ }
+ } else if (e_rfpowerstate_toset == ERFOFF || cur_rfstate == ERFOFF) {
+ /* Enter D3 or ASPM after GPIO had been done. */
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM)
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
+ else if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_PCI_D3)
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_PCI_D3);
+ spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+ ppsc->rfchange_inprogress = false;
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+ } else {
+ spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+ ppsc->rfchange_inprogress = false;
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+ }
+ *valid = 1;
+ return !ppsc->hwradiooff;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
new file mode 100644
index 000000000000..62af555bb61c
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
@@ -0,0 +1,116 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92CU_HW_H__
+#define __RTL92CU_HW_H__
+
+#define H2C_RA_MASK 6
+
+#define LLT_POLLING_LLT_THRESHOLD 20
+#define LLT_POLLING_READY_TIMEOUT_COUNT 100
+#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER 255
+
+#define RX_PAGE_SIZE_REG_VALUE PBP_128
+/* Note: We will divide number of page equally for each queue
+ * other than public queue! */
+#define TX_TOTAL_PAGE_NUMBER 0xF8
+#define TX_PAGE_BOUNDARY (TX_TOTAL_PAGE_NUMBER + 1)
+
+
+#define CHIP_B_PAGE_NUM_PUBQ 0xE7
+
+/* For Test Chip Setting
+ * (HPQ + LPQ + PUBQ) shall be TX_TOTAL_PAGE_NUMBER */
+#define CHIP_A_PAGE_NUM_PUBQ 0x7E
+
+
+/* For Chip A Setting */
+#define WMM_CHIP_A_TX_TOTAL_PAGE_NUMBER 0xF5
+#define WMM_CHIP_A_TX_PAGE_BOUNDARY \
+ (WMM_CHIP_A_TX_TOTAL_PAGE_NUMBER + 1) /* F6 */
+
+#define WMM_CHIP_A_PAGE_NUM_PUBQ 0xA3
+#define WMM_CHIP_A_PAGE_NUM_HPQ 0x29
+#define WMM_CHIP_A_PAGE_NUM_LPQ 0x29
+
+
+
+/* Note: For Chip B Setting ,modify later */
+#define WMM_CHIP_B_TX_TOTAL_PAGE_NUMBER 0xF5
+#define WMM_CHIP_B_TX_PAGE_BOUNDARY \
+ (WMM_CHIP_B_TX_TOTAL_PAGE_NUMBER + 1) /* F6 */
+
+#define WMM_CHIP_B_PAGE_NUM_PUBQ 0xB0
+#define WMM_CHIP_B_PAGE_NUM_HPQ 0x29
+#define WMM_CHIP_B_PAGE_NUM_LPQ 0x1C
+#define WMM_CHIP_B_PAGE_NUM_NPQ 0x1C
+
+#define BOARD_TYPE_NORMAL_MASK 0xE0
+#define BOARD_TYPE_TEST_MASK 0x0F
+
+/* should be renamed and moved to another file */
+enum _BOARD_TYPE_8192CUSB {
+ BOARD_USB_DONGLE = 0, /* USB dongle */
+ BOARD_USB_High_PA = 1, /* USB dongle - high power PA */
+ BOARD_MINICARD = 2, /* Minicard */
+ BOARD_USB_SOLO = 3, /* USB solo-Slim module */
+ BOARD_USB_COMBO = 4, /* USB Combo-Slim module */
+};
+
+#define IS_HIGHT_PA(boardtype) \
+ ((boardtype == BOARD_USB_High_PA) ? true : false)
+
+#define RTL92C_DRIVER_INFO_SIZE 4
+void rtl92cu_read_eeprom_info(struct ieee80211_hw *hw);
+void rtl92cu_enable_hw_security_config(struct ieee80211_hw *hw);
+int rtl92cu_hw_init(struct ieee80211_hw *hw);
+void rtl92cu_card_disable(struct ieee80211_hw *hw);
+int rtl92cu_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type);
+void rtl92cu_set_beacon_related_registers(struct ieee80211_hw *hw);
+void rtl92cu_set_beacon_interval(struct ieee80211_hw *hw);
+void rtl92cu_update_interrupt_mask(struct ieee80211_hw *hw,
+ u32 add_msr, u32 rm_msr);
+void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw);
+void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level);
+
+void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw);
+bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid);
+void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid);
+u8 _rtl92c_get_chnl_group(u8 chnl);
+int rtl92c_download_fw(struct ieee80211_hw *hw);
+void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
+void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished);
+void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
+void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
+ u8 element_id, u32 cmd_len, u8 *p_cmdbuffer);
+bool rtl92cu_phy_mac_config(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/led.c b/drivers/net/wireless/rtlwifi/rtl8192cu/led.c
new file mode 100644
index 000000000000..332c74348a69
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/led.c
@@ -0,0 +1,142 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../usb.h"
+#include "reg.h"
+#include "led.h"
+
+static void _rtl92cu_init_led(struct ieee80211_hw *hw,
+ struct rtl_led *pled, enum rtl_led_pin ledpin)
+{
+ pled->hw = hw;
+ pled->ledpin = ledpin;
+ pled->ledon = false;
+}
+
+static void _rtl92cu_deInit_led(struct rtl_led *pled)
+{
+}
+
+void rtl92cu_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
+{
+ u8 ledcfg;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
+ ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin));
+ ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
+ switch (pled->ledpin) {
+ case LED_PIN_GPIO0:
+ break;
+ case LED_PIN_LED0:
+ rtl_write_byte(rtlpriv,
+ REG_LEDCFG2, (ledcfg & 0xf0) | BIT(5) | BIT(6));
+ break;
+ case LED_PIN_LED1:
+ rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0x0f) | BIT(5));
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+ pled->ledon = true;
+}
+
+void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw);
+ u8 ledcfg;
+
+ RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
+ ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin));
+ ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
+ switch (pled->ledpin) {
+ case LED_PIN_GPIO0:
+ break;
+ case LED_PIN_LED0:
+ ledcfg &= 0xf0;
+ if (usbpriv->ledctl.led_opendrain == true)
+ rtl_write_byte(rtlpriv, REG_LEDCFG2,
+ (ledcfg | BIT(1) | BIT(5) | BIT(6)));
+ else
+ rtl_write_byte(rtlpriv, REG_LEDCFG2,
+ (ledcfg | BIT(3) | BIT(5) | BIT(6)));
+ break;
+ case LED_PIN_LED1:
+ ledcfg &= 0x0f;
+ rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg | BIT(3)));
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+ pled->ledon = false;
+}
+
+void rtl92cu_init_sw_leds(struct ieee80211_hw *hw)
+{
+ struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw);
+ _rtl92cu_init_led(hw, &(usbpriv->ledctl.sw_led0), LED_PIN_LED0);
+ _rtl92cu_init_led(hw, &(usbpriv->ledctl.sw_led1), LED_PIN_LED1);
+}
+
+void rtl92cu_deinit_sw_leds(struct ieee80211_hw *hw)
+{
+ struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw);
+ _rtl92cu_deInit_led(&(usbpriv->ledctl.sw_led0));
+ _rtl92cu_deInit_led(&(usbpriv->ledctl.sw_led1));
+}
+
+static void _rtl92cu_sw_led_control(struct ieee80211_hw *hw,
+ enum led_ctl_mode ledaction)
+{
+}
+
+void rtl92cu_led_control(struct ieee80211_hw *hw,
+ enum led_ctl_mode ledaction)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ if ((ppsc->rfoff_reason > RF_CHANGE_BY_PS) &&
+ (ledaction == LED_CTL_TX ||
+ ledaction == LED_CTL_RX ||
+ ledaction == LED_CTL_SITE_SURVEY ||
+ ledaction == LED_CTL_LINK ||
+ ledaction == LED_CTL_NO_LINK ||
+ ledaction == LED_CTL_START_TO_LINK ||
+ ledaction == LED_CTL_POWER_ON)) {
+ return;
+ }
+ RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, ("ledaction %d,\n",
+ ledaction));
+ _rtl92cu_sw_led_control(hw, ledaction);
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/led.h b/drivers/net/wireless/rtlwifi/rtl8192cu/led.h
new file mode 100644
index 000000000000..decaee4d1eb1
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/led.h
@@ -0,0 +1,37 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92CU_LED_H__
+#define __RTL92CU_LED_H__
+
+void rtl92cu_init_sw_leds(struct ieee80211_hw *hw);
+void rtl92cu_deinit_sw_leds(struct ieee80211_hw *hw);
+void rtl92cu_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled);
+void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled);
+void rtl92cu_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
new file mode 100644
index 000000000000..f8514cba17b6
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -0,0 +1,1144 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+****************************************************************************/
+#include <linux/module.h>
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../usb.h"
+#include "../ps.h"
+#include "../cam.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "rf.h"
+#include "dm.h"
+#include "mac.h"
+#include "trx.h"
+
+/* macro to shorten lines */
+
+#define LINK_Q ui_link_quality
+#define RX_EVM rx_evm_percentage
+#define RX_SIGQ rx_mimo_signalquality
+
+
+void rtl92c_read_chip_version(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ enum version_8192c chip_version = VERSION_UNKNOWN;
+ u32 value32;
+
+ value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG);
+ if (value32 & TRP_VAUX_EN) {
+ chip_version = (value32 & TYPE_ID) ? VERSION_TEST_CHIP_92C :
+ VERSION_TEST_CHIP_88C;
+ } else {
+ /* Normal mass production chip. */
+ chip_version = NORMAL_CHIP;
+ chip_version |= ((value32 & TYPE_ID) ? CHIP_92C : 0);
+ chip_version |= ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0);
+ /* RTL8723 with BT function. */
+ chip_version |= ((value32 & BT_FUNC) ? CHIP_8723 : 0);
+ if (IS_VENDOR_UMC(chip_version))
+ chip_version |= ((value32 & CHIP_VER_RTL_MASK) ?
+ CHIP_VENDOR_UMC_B_CUT : 0);
+ if (IS_92C_SERIAL(chip_version)) {
+ value32 = rtl_read_dword(rtlpriv, REG_HPON_FSM);
+ chip_version |= ((CHIP_BONDING_IDENTIFIER(value32) ==
+ CHIP_BONDING_92C_1T2R) ? CHIP_92C_1T2R : 0);
+ } else if (IS_8723_SERIES(chip_version)) {
+ value32 = rtl_read_dword(rtlpriv, REG_GPIO_OUTSTS);
+ chip_version |= ((value32 & RF_RL_ID) ?
+ CHIP_8723_DRV_REV : 0);
+ }
+ }
+ rtlhal->version = (enum version_8192c)chip_version;
+ switch (rtlhal->version) {
+ case VERSION_NORMAL_TSMC_CHIP_92C_1T2R:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Chip Version ID: VERSION_B_CHIP_92C.\n"));
+ break;
+ case VERSION_NORMAL_TSMC_CHIP_92C:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_92C.\n"));
+ break;
+ case VERSION_NORMAL_TSMC_CHIP_88C:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_88C.\n"));
+ break;
+ case VERSION_NORMAL_UMC_CHIP_92C_1T2R_A_CUT:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Chip Version ID: VERSION_NORMAL_UMC_CHIP_i"
+ "92C_1T2R_A_CUT.\n"));
+ break;
+ case VERSION_NORMAL_UMC_CHIP_92C_A_CUT:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Chip Version ID: VERSION_NORMAL_UMC_CHIP_"
+ "92C_A_CUT.\n"));
+ break;
+ case VERSION_NORMAL_UMC_CHIP_88C_A_CUT:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Chip Version ID: VERSION_NORMAL_UMC_CHIP"
+ "_88C_A_CUT.\n"));
+ break;
+ case VERSION_NORMAL_UMC_CHIP_92C_1T2R_B_CUT:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Chip Version ID: VERSION_NORMAL_UMC_CHIP"
+ "_92C_1T2R_B_CUT.\n"));
+ break;
+ case VERSION_NORMAL_UMC_CHIP_92C_B_CUT:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Chip Version ID: VERSION_NORMAL_UMC_CHIP"
+ "_92C_B_CUT.\n"));
+ break;
+ case VERSION_NORMAL_UMC_CHIP_88C_B_CUT:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Chip Version ID: VERSION_NORMAL_UMC_CHIP"
+ "_88C_B_CUT.\n"));
+ break;
+ case VERSION_NORMA_UMC_CHIP_8723_1T1R_A_CUT:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Chip Version ID: VERSION_NORMA_UMC_CHIP"
+ "_8723_1T1R_A_CUT.\n"));
+ break;
+ case VERSION_NORMA_UMC_CHIP_8723_1T1R_B_CUT:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Chip Version ID: VERSION_NORMA_UMC_CHIP"
+ "_8723_1T1R_B_CUT.\n"));
+ break;
+ case VERSION_TEST_CHIP_92C:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Chip Version ID: VERSION_TEST_CHIP_92C.\n"));
+ break;
+ case VERSION_TEST_CHIP_88C:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Chip Version ID: VERSION_TEST_CHIP_88C.\n"));
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Chip Version ID: ???????????????.\n"));
+ break;
+ }
+ if (IS_92C_SERIAL(rtlhal->version))
+ rtlphy->rf_type =
+ (IS_92C_1T2R(rtlhal->version)) ? RF_1T2R : RF_2T2R;
+ else
+ rtlphy->rf_type = RF_1T1R;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ?
+ "RF_2T2R" : "RF_1T1R"));
+ if (get_rf_type(rtlphy) == RF_1T1R)
+ rtlpriv->dm.rfpath_rxenable[0] = true;
+ else
+ rtlpriv->dm.rfpath_rxenable[0] =
+ rtlpriv->dm.rfpath_rxenable[1] = true;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("VersionID = 0x%4x\n",
+ rtlhal->version));
+}
+
+/**
+ * writeLLT - LLT table write access
+ * @io: io callback
+ * @address: LLT logical address.
+ * @data: LLT data content
+ *
+ * Realtek hardware access function.
+ *
+ */
+bool rtl92c_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ bool status = true;
+ long count = 0;
+ u32 value = _LLT_INIT_ADDR(address) |
+ _LLT_INIT_DATA(data) | _LLT_OP(_LLT_WRITE_ACCESS);
+
+ rtl_write_dword(rtlpriv, REG_LLT_INIT, value);
+ do {
+ value = rtl_read_dword(rtlpriv, REG_LLT_INIT);
+ if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
+ break;
+ if (count > POLLING_LLT_THRESHOLD) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Failed to polling write LLT done at"
+ " address %d! _LLT_OP_VALUE(%x)\n",
+ address, _LLT_OP_VALUE(value)));
+ status = false;
+ break;
+ }
+ } while (++count);
+ return status;
+}
+/**
+ * rtl92c_init_LLT_table - Init LLT table
+ * @io: io callback
+ * @boundary:
+ *
+ * Realtek hardware access function.
+ *
+ */
+bool rtl92c_init_llt_table(struct ieee80211_hw *hw, u32 boundary)
+{
+ bool rst = true;
+ u32 i;
+
+ for (i = 0; i < (boundary - 1); i++) {
+ rst = rtl92c_llt_write(hw, i , i + 1);
+ if (true != rst) {
+ printk(KERN_ERR "===> %s #1 fail\n", __func__);
+ return rst;
+ }
+ }
+ /* end of list */
+ rst = rtl92c_llt_write(hw, (boundary - 1), 0xFF);
+ if (true != rst) {
+ printk(KERN_ERR "===> %s #2 fail\n", __func__);
+ return rst;
+ }
+ /* Make the other pages as ring buffer
+ * This ring buffer is used as beacon buffer if we config this MAC
+ * as two MAC transfer.
+ * Otherwise used as local loopback buffer.
+ */
+ for (i = boundary; i < LLT_LAST_ENTRY_OF_TX_PKT_BUFFER; i++) {
+ rst = rtl92c_llt_write(hw, i, (i + 1));
+ if (true != rst) {
+ printk(KERN_ERR "===> %s #3 fail\n", __func__);
+ return rst;
+ }
+ }
+ /* Let last entry point to the start entry of ring buffer */
+ rst = rtl92c_llt_write(hw, LLT_LAST_ENTRY_OF_TX_PKT_BUFFER, boundary);
+ if (true != rst) {
+ printk(KERN_ERR "===> %s #4 fail\n", __func__);
+ return rst;
+ }
+ return rst;
+}
+void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
+ u8 *p_macaddr, bool is_group, u8 enc_algo,
+ bool is_wepkey, bool clear_all)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 *macaddr = p_macaddr;
+ u32 entry_id = 0;
+ bool is_pairwise = false;
+ static u8 cam_const_addr[4][6] = {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
+ };
+ static u8 cam_const_broad[] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+
+ if (clear_all) {
+ u8 idx = 0;
+ u8 cam_offset = 0;
+ u8 clear_number = 5;
+
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("clear_all\n"));
+ for (idx = 0; idx < clear_number; idx++) {
+ rtl_cam_mark_invalid(hw, cam_offset + idx);
+ rtl_cam_empty_entry(hw, cam_offset + idx);
+ if (idx < 5) {
+ memset(rtlpriv->sec.key_buf[idx], 0,
+ MAX_KEY_LEN);
+ rtlpriv->sec.key_len[idx] = 0;
+ }
+ }
+ } else {
+ switch (enc_algo) {
+ case WEP40_ENCRYPTION:
+ enc_algo = CAM_WEP40;
+ break;
+ case WEP104_ENCRYPTION:
+ enc_algo = CAM_WEP104;
+ break;
+ case TKIP_ENCRYPTION:
+ enc_algo = CAM_TKIP;
+ break;
+ case AESCCMP_ENCRYPTION:
+ enc_algo = CAM_AES;
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("iillegal switch case\n"));
+ enc_algo = CAM_TKIP;
+ break;
+ }
+ if (is_wepkey || rtlpriv->sec.use_defaultkey) {
+ macaddr = cam_const_addr[key_index];
+ entry_id = key_index;
+ } else {
+ if (is_group) {
+ macaddr = cam_const_broad;
+ entry_id = key_index;
+ } else {
+ key_index = PAIRWISE_KEYIDX;
+ entry_id = CAM_PAIRWISE_KEY_POSITION;
+ is_pairwise = true;
+ }
+ }
+ if (rtlpriv->sec.key_len[key_index] == 0) {
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("delete one entry\n"));
+ rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
+ } else {
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
+ ("The insert KEY length is %d\n",
+ rtlpriv->sec.key_len[PAIRWISE_KEYIDX]));
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
+ ("The insert KEY is %x %x\n",
+ rtlpriv->sec.key_buf[0][0],
+ rtlpriv->sec.key_buf[0][1]));
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("add one entry\n"));
+ if (is_pairwise) {
+ RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_LOUD,
+ "Pairwiase Key content :",
+ rtlpriv->sec.pairwise_key,
+ rtlpriv->sec.
+ key_len[PAIRWISE_KEYIDX]);
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("set Pairwiase key\n"));
+
+ rtl_cam_add_one_entry(hw, macaddr, key_index,
+ entry_id, enc_algo,
+ CAM_CONFIG_NO_USEDK,
+ rtlpriv->sec.
+ key_buf[key_index]);
+ } else {
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("set group key\n"));
+ if (mac->opmode == NL80211_IFTYPE_ADHOC) {
+ rtl_cam_add_one_entry(hw,
+ rtlefuse->dev_addr,
+ PAIRWISE_KEYIDX,
+ CAM_PAIRWISE_KEY_POSITION,
+ enc_algo,
+ CAM_CONFIG_NO_USEDK,
+ rtlpriv->sec.key_buf
+ [entry_id]);
+ }
+ rtl_cam_add_one_entry(hw, macaddr, key_index,
+ entry_id, enc_algo,
+ CAM_CONFIG_NO_USEDK,
+ rtlpriv->sec.key_buf[entry_id]);
+ }
+ }
+ }
+}
+
+u32 rtl92c_get_txdma_status(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ return rtl_read_dword(rtlpriv, REG_TXDMA_STATUS);
+}
+
+void rtl92c_enable_interrupt(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+ if (IS_HARDWARE_TYPE_8192CE(rtlhal)) {
+ rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] &
+ 0xFFFFFFFF);
+ rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] &
+ 0xFFFFFFFF);
+ rtlpci->irq_enabled = true;
+ } else {
+ rtl_write_dword(rtlpriv, REG_HIMR, rtlusb->irq_mask[0] &
+ 0xFFFFFFFF);
+ rtl_write_dword(rtlpriv, REG_HIMRE, rtlusb->irq_mask[1] &
+ 0xFFFFFFFF);
+ rtlusb->irq_enabled = true;
+ }
+}
+
+void rtl92c_init_interrupt(struct ieee80211_hw *hw)
+{
+ rtl92c_enable_interrupt(hw);
+}
+
+void rtl92c_disable_interrupt(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+ rtl_write_dword(rtlpriv, REG_HIMR, IMR8190_DISABLED);
+ rtl_write_dword(rtlpriv, REG_HIMRE, IMR8190_DISABLED);
+ if (IS_HARDWARE_TYPE_8192CE(rtlhal))
+ rtlpci->irq_enabled = false;
+ else if (IS_HARDWARE_TYPE_8192CU(rtlhal))
+ rtlusb->irq_enabled = false;
+}
+
+void rtl92c_set_qos(struct ieee80211_hw *hw, int aci)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u32 u4b_ac_param;
+
+ rtl92c_dm_init_edca_turbo(hw);
+ u4b_ac_param = (u32) mac->ac[aci].aifs;
+ u4b_ac_param |=
+ ((u32) le16_to_cpu(mac->ac[aci].cw_min) & 0xF) <<
+ AC_PARAM_ECW_MIN_OFFSET;
+ u4b_ac_param |=
+ ((u32) le16_to_cpu(mac->ac[aci].cw_max) & 0xF) <<
+ AC_PARAM_ECW_MAX_OFFSET;
+ u4b_ac_param |= (u32) le16_to_cpu(mac->ac[aci].tx_op) <<
+ AC_PARAM_TXOP_OFFSET;
+ RT_TRACE(rtlpriv, COMP_QOS, DBG_LOUD,
+ ("queue:%x, ac_param:%x\n", aci, u4b_ac_param));
+ switch (aci) {
+ case AC1_BK:
+ rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, u4b_ac_param);
+ break;
+ case AC0_BE:
+ rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, u4b_ac_param);
+ break;
+ case AC2_VI:
+ rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, u4b_ac_param);
+ break;
+ case AC3_VO:
+ rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, u4b_ac_param);
+ break;
+ default:
+ RT_ASSERT(false, ("invalid aci: %d !\n", aci));
+ break;
+ }
+}
+
+/*-------------------------------------------------------------------------
+ * HW MAC Address
+ *-------------------------------------------------------------------------*/
+void rtl92c_set_mac_addr(struct ieee80211_hw *hw, const u8 *addr)
+{
+ u32 i;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ for (i = 0 ; i < ETH_ALEN ; i++)
+ rtl_write_byte(rtlpriv, (REG_MACID + i), *(addr+i));
+
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, ("MAC Address: %02X-%02X-%02X-"
+ "%02X-%02X-%02X\n",
+ rtl_read_byte(rtlpriv, REG_MACID),
+ rtl_read_byte(rtlpriv, REG_MACID+1),
+ rtl_read_byte(rtlpriv, REG_MACID+2),
+ rtl_read_byte(rtlpriv, REG_MACID+3),
+ rtl_read_byte(rtlpriv, REG_MACID+4),
+ rtl_read_byte(rtlpriv, REG_MACID+5)));
+}
+
+void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, size);
+}
+
+int rtl92c_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
+{
+ u8 value;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ switch (type) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ value = NT_NO_LINK;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("Set Network type to NO LINK!\n"));
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ value = NT_LINK_AD_HOC;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("Set Network type to Ad Hoc!\n"));
+ break;
+ case NL80211_IFTYPE_STATION:
+ value = NT_LINK_AP;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("Set Network type to STA!\n"));
+ break;
+ case NL80211_IFTYPE_AP:
+ value = NT_AS_AP;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("Set Network type to AP!\n"));
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("Network type %d not support!\n", type));
+ return -EOPNOTSUPP;
+ }
+ rtl_write_byte(rtlpriv, (REG_CR + 2), value);
+ return 0;
+}
+
+void rtl92c_init_network_type(struct ieee80211_hw *hw)
+{
+ rtl92c_set_network_type(hw, NL80211_IFTYPE_UNSPECIFIED);
+}
+
+void rtl92c_init_adaptive_ctrl(struct ieee80211_hw *hw)
+{
+ u16 value16;
+ u32 value32;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ /* Response Rate Set */
+ value32 = rtl_read_dword(rtlpriv, REG_RRSR);
+ value32 &= ~RATE_BITMAP_ALL;
+ value32 |= RATE_RRSR_CCK_ONLY_1M;
+ rtl_write_dword(rtlpriv, REG_RRSR, value32);
+ /* SIFS (used in NAV) */
+ value16 = _SPEC_SIFS_CCK(0x10) | _SPEC_SIFS_OFDM(0x10);
+ rtl_write_word(rtlpriv, REG_SPEC_SIFS, value16);
+ /* Retry Limit */
+ value16 = _LRL(0x30) | _SRL(0x30);
+ rtl_write_dword(rtlpriv, REG_RL, value16);
+}
+
+void rtl92c_init_rate_fallback(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ /* Set Data Auto Rate Fallback Retry Count register. */
+ rtl_write_dword(rtlpriv, REG_DARFRC, 0x00000000);
+ rtl_write_dword(rtlpriv, REG_DARFRC+4, 0x10080404);
+ rtl_write_dword(rtlpriv, REG_RARFRC, 0x04030201);
+ rtl_write_dword(rtlpriv, REG_RARFRC+4, 0x08070605);
+}
+
+static void rtl92c_set_cck_sifs(struct ieee80211_hw *hw, u8 trx_sifs,
+ u8 ctx_sifs)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_byte(rtlpriv, REG_SIFS_CCK, trx_sifs);
+ rtl_write_byte(rtlpriv, (REG_SIFS_CCK + 1), ctx_sifs);
+}
+
+static void rtl92c_set_ofdm_sifs(struct ieee80211_hw *hw, u8 trx_sifs,
+ u8 ctx_sifs)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_byte(rtlpriv, REG_SIFS_OFDM, trx_sifs);
+ rtl_write_byte(rtlpriv, (REG_SIFS_OFDM + 1), ctx_sifs);
+}
+
+void rtl92c_init_edca_param(struct ieee80211_hw *hw,
+ u16 queue, u16 txop, u8 cw_min, u8 cw_max, u8 aifs)
+{
+ /* sequence: VO, VI, BE, BK ==> the same as 92C hardware design.
+ * referenc : enum nl80211_txq_q or ieee80211_set_wmm_default function.
+ */
+ u32 value;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ value = (u32)aifs;
+ value |= ((u32)cw_min & 0xF) << 8;
+ value |= ((u32)cw_max & 0xF) << 12;
+ value |= (u32)txop << 16;
+ /* 92C hardware register sequence is the same as queue number. */
+ rtl_write_dword(rtlpriv, (REG_EDCA_VO_PARAM + (queue * 4)), value);
+}
+
+void rtl92c_init_edca(struct ieee80211_hw *hw)
+{
+ u16 value16;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ /* disable EDCCA count down, to reduce collison and retry */
+ value16 = rtl_read_word(rtlpriv, REG_RD_CTRL);
+ value16 |= DIS_EDCA_CNT_DWN;
+ rtl_write_word(rtlpriv, REG_RD_CTRL, value16);
+ /* Update SIFS timing. ??????????
+ * pHalData->SifsTime = 0x0e0e0a0a; */
+ rtl92c_set_cck_sifs(hw, 0xa, 0xa);
+ rtl92c_set_ofdm_sifs(hw, 0xe, 0xe);
+ /* Set CCK/OFDM SIFS to be 10us. */
+ rtl_write_word(rtlpriv, REG_SIFS_CCK, 0x0a0a);
+ rtl_write_word(rtlpriv, REG_SIFS_OFDM, 0x1010);
+ rtl_write_word(rtlpriv, REG_PROT_MODE_CTRL, 0x0204);
+ rtl_write_dword(rtlpriv, REG_BAR_MODE_CTRL, 0x014004);
+ /* TXOP */
+ rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, 0x005EA42B);
+ rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0x0000A44F);
+ rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x005EA324);
+ rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x002FA226);
+ /* PIFS */
+ rtl_write_byte(rtlpriv, REG_PIFS, 0x1C);
+ /* AGGR BREAK TIME Register */
+ rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16);
+ rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0040);
+ rtl_write_byte(rtlpriv, REG_BCNDMATIM, 0x02);
+ rtl_write_byte(rtlpriv, REG_ATIMWND, 0x02);
+}
+
+void rtl92c_init_ampdu_aggregation(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0x99997631);
+ rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16);
+ /* init AMPDU aggregation number, tuning for Tx's TP, */
+ rtl_write_word(rtlpriv, 0x4CA, 0x0708);
+}
+
+void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw, bool infra_mode)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xFF);
+}
+
+void rtl92c_init_rdg_setting(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_byte(rtlpriv, REG_RD_CTRL, 0xFF);
+ rtl_write_word(rtlpriv, REG_RD_NAV_NXT, 0x200);
+ rtl_write_byte(rtlpriv, REG_RD_RESP_PKT_TH, 0x05);
+}
+
+void rtl92c_init_retry_function(struct ieee80211_hw *hw)
+{
+ u8 value8;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ value8 = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL);
+ value8 |= EN_AMPDU_RTY_NEW;
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL, value8);
+ /* Set ACK timeout */
+ rtl_write_byte(rtlpriv, REG_ACKTO, 0x40);
+}
+
+void rtl92c_init_beacon_parameters(struct ieee80211_hw *hw,
+ enum version_8192c version)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+ rtl_write_word(rtlpriv, REG_TBTT_PROHIBIT, 0x6404);/* ms */
+ rtl_write_byte(rtlpriv, REG_DRVERLYINT, DRIVER_EARLY_INT_TIME);/*ms*/
+ rtl_write_byte(rtlpriv, REG_BCNDMATIM, BCN_DMA_ATIME_INT_TIME);
+ if (IS_NORMAL_CHIP(rtlhal->version))
+ rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660F);
+ else
+ rtl_write_word(rtlpriv, REG_BCNTCFG, 0x66FF);
+}
+
+void rtl92c_disable_fast_edca(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_word(rtlpriv, REG_FAST_EDCA_CTRL, 0);
+}
+
+void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 value = is2T ? MAX_MSS_DENSITY_2T : MAX_MSS_DENSITY_1T;
+
+ rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, value);
+}
+
+u16 rtl92c_get_mgt_filter(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ return rtl_read_word(rtlpriv, REG_RXFLTMAP0);
+}
+
+void rtl92c_set_mgt_filter(struct ieee80211_hw *hw, u16 filter)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_word(rtlpriv, REG_RXFLTMAP0, filter);
+}
+
+u16 rtl92c_get_ctrl_filter(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ return rtl_read_word(rtlpriv, REG_RXFLTMAP1);
+}
+
+void rtl92c_set_ctrl_filter(struct ieee80211_hw *hw, u16 filter)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_word(rtlpriv, REG_RXFLTMAP1, filter);
+}
+
+u16 rtl92c_get_data_filter(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ return rtl_read_word(rtlpriv, REG_RXFLTMAP2);
+}
+
+void rtl92c_set_data_filter(struct ieee80211_hw *hw, u16 filter)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_word(rtlpriv, REG_RXFLTMAP2, filter);
+}
+/*==============================================================*/
+
+static u8 _rtl92c_query_rxpwrpercentage(char antpower)
+{
+ if ((antpower <= -100) || (antpower >= 20))
+ return 0;
+ else if (antpower >= 0)
+ return 100;
+ else
+ return 100 + antpower;
+}
+
+static u8 _rtl92c_evm_db_to_percentage(char value)
+{
+ char ret_val;
+
+ ret_val = value;
+ if (ret_val >= 0)
+ ret_val = 0;
+ if (ret_val <= -33)
+ ret_val = -33;
+ ret_val = 0 - ret_val;
+ ret_val *= 3;
+ if (ret_val == 99)
+ ret_val = 100;
+ return ret_val;
+}
+
+static long _rtl92c_translate_todbm(struct ieee80211_hw *hw,
+ u8 signal_strength_index)
+{
+ long signal_power;
+
+ signal_power = (long)((signal_strength_index + 1) >> 1);
+ signal_power -= 95;
+ return signal_power;
+}
+
+static long _rtl92c_signal_scale_mapping(struct ieee80211_hw *hw,
+ long currsig)
+{
+ long retsig;
+
+ if (currsig >= 61 && currsig <= 100)
+ retsig = 90 + ((currsig - 60) / 4);
+ else if (currsig >= 41 && currsig <= 60)
+ retsig = 78 + ((currsig - 40) / 2);
+ else if (currsig >= 31 && currsig <= 40)
+ retsig = 66 + (currsig - 30);
+ else if (currsig >= 21 && currsig <= 30)
+ retsig = 54 + (currsig - 20);
+ else if (currsig >= 5 && currsig <= 20)
+ retsig = 42 + (((currsig - 5) * 2) / 3);
+ else if (currsig == 4)
+ retsig = 36;
+ else if (currsig == 3)
+ retsig = 27;
+ else if (currsig == 2)
+ retsig = 18;
+ else if (currsig == 1)
+ retsig = 9;
+ else
+ retsig = currsig;
+ return retsig;
+}
+
+static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats,
+ struct rx_desc_92c *pdesc,
+ struct rx_fwinfo_92c *p_drvinfo,
+ bool packet_match_bssid,
+ bool packet_toself,
+ bool packet_beacon)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct phy_sts_cck_8192s_t *cck_buf;
+ s8 rx_pwr_all = 0, rx_pwr[4];
+ u8 rf_rx_num = 0, evm, pwdb_all;
+ u8 i, max_spatial_stream;
+ u32 rssi, total_rssi = 0;
+ bool in_powersavemode = false;
+ bool is_cck_rate;
+
+ is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc);
+ pstats->packet_matchbssid = packet_match_bssid;
+ pstats->packet_toself = packet_toself;
+ pstats->is_cck = is_cck_rate;
+ pstats->packet_beacon = packet_beacon;
+ pstats->is_cck = is_cck_rate;
+ pstats->RX_SIGQ[0] = -1;
+ pstats->RX_SIGQ[1] = -1;
+ if (is_cck_rate) {
+ u8 report, cck_highpwr;
+ cck_buf = (struct phy_sts_cck_8192s_t *)p_drvinfo;
+ if (!in_powersavemode)
+ cck_highpwr = rtlphy->cck_high_power;
+ else
+ cck_highpwr = false;
+ if (!cck_highpwr) {
+ u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
+ report = cck_buf->cck_agc_rpt & 0xc0;
+ report = report >> 6;
+ switch (report) {
+ case 0x3:
+ rx_pwr_all = -46 - (cck_agc_rpt & 0x3e);
+ break;
+ case 0x2:
+ rx_pwr_all = -26 - (cck_agc_rpt & 0x3e);
+ break;
+ case 0x1:
+ rx_pwr_all = -12 - (cck_agc_rpt & 0x3e);
+ break;
+ case 0x0:
+ rx_pwr_all = 16 - (cck_agc_rpt & 0x3e);
+ break;
+ }
+ } else {
+ u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
+ report = p_drvinfo->cfosho[0] & 0x60;
+ report = report >> 5;
+ switch (report) {
+ case 0x3:
+ rx_pwr_all = -46 - ((cck_agc_rpt & 0x1f) << 1);
+ break;
+ case 0x2:
+ rx_pwr_all = -26 - ((cck_agc_rpt & 0x1f) << 1);
+ break;
+ case 0x1:
+ rx_pwr_all = -12 - ((cck_agc_rpt & 0x1f) << 1);
+ break;
+ case 0x0:
+ rx_pwr_all = 16 - ((cck_agc_rpt & 0x1f) << 1);
+ break;
+ }
+ }
+ pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all);
+ pstats->rx_pwdb_all = pwdb_all;
+ pstats->recvsignalpower = rx_pwr_all;
+ if (packet_match_bssid) {
+ u8 sq;
+ if (pstats->rx_pwdb_all > 40)
+ sq = 100;
+ else {
+ sq = cck_buf->sq_rpt;
+ if (sq > 64)
+ sq = 0;
+ else if (sq < 20)
+ sq = 100;
+ else
+ sq = ((64 - sq) * 100) / 44;
+ }
+ pstats->signalquality = sq;
+ pstats->RX_SIGQ[0] = sq;
+ pstats->RX_SIGQ[1] = -1;
+ }
+ } else {
+ rtlpriv->dm.rfpath_rxenable[0] =
+ rtlpriv->dm.rfpath_rxenable[1] = true;
+ for (i = RF90_PATH_A; i < RF90_PATH_MAX; i++) {
+ if (rtlpriv->dm.rfpath_rxenable[i])
+ rf_rx_num++;
+ rx_pwr[i] =
+ ((p_drvinfo->gain_trsw[i] & 0x3f) * 2) - 110;
+ rssi = _rtl92c_query_rxpwrpercentage(rx_pwr[i]);
+ total_rssi += rssi;
+ rtlpriv->stats.rx_snr_db[i] =
+ (long)(p_drvinfo->rxsnr[i] / 2);
+
+ if (packet_match_bssid)
+ pstats->rx_mimo_signalstrength[i] = (u8) rssi;
+ }
+ rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
+ pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all);
+ pstats->rx_pwdb_all = pwdb_all;
+ pstats->rxpower = rx_pwr_all;
+ pstats->recvsignalpower = rx_pwr_all;
+ if (GET_RX_DESC_RX_MCS(pdesc) &&
+ GET_RX_DESC_RX_MCS(pdesc) >= DESC92C_RATEMCS8 &&
+ GET_RX_DESC_RX_MCS(pdesc) <= DESC92C_RATEMCS15)
+ max_spatial_stream = 2;
+ else
+ max_spatial_stream = 1;
+ for (i = 0; i < max_spatial_stream; i++) {
+ evm = _rtl92c_evm_db_to_percentage(p_drvinfo->rxevm[i]);
+ if (packet_match_bssid) {
+ if (i == 0)
+ pstats->signalquality =
+ (u8) (evm & 0xff);
+ pstats->RX_SIGQ[i] =
+ (u8) (evm & 0xff);
+ }
+ }
+ }
+ if (is_cck_rate)
+ pstats->signalstrength =
+ (u8) (_rtl92c_signal_scale_mapping(hw, pwdb_all));
+ else if (rf_rx_num != 0)
+ pstats->signalstrength =
+ (u8) (_rtl92c_signal_scale_mapping
+ (hw, total_rssi /= rf_rx_num));
+}
+
+static void _rtl92c_process_ui_rssi(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 rfpath;
+ u32 last_rssi, tmpval;
+
+ if (pstats->packet_toself || pstats->packet_beacon) {
+ rtlpriv->stats.rssi_calculate_cnt++;
+ if (rtlpriv->stats.ui_rssi.total_num++ >=
+ PHY_RSSI_SLID_WIN_MAX) {
+ rtlpriv->stats.ui_rssi.total_num =
+ PHY_RSSI_SLID_WIN_MAX;
+ last_rssi =
+ rtlpriv->stats.ui_rssi.elements[rtlpriv->
+ stats.ui_rssi.index];
+ rtlpriv->stats.ui_rssi.total_val -= last_rssi;
+ }
+ rtlpriv->stats.ui_rssi.total_val += pstats->signalstrength;
+ rtlpriv->stats.ui_rssi.elements[rtlpriv->stats.ui_rssi.
+ index++] = pstats->signalstrength;
+ if (rtlpriv->stats.ui_rssi.index >= PHY_RSSI_SLID_WIN_MAX)
+ rtlpriv->stats.ui_rssi.index = 0;
+ tmpval = rtlpriv->stats.ui_rssi.total_val /
+ rtlpriv->stats.ui_rssi.total_num;
+ rtlpriv->stats.signal_strength =
+ _rtl92c_translate_todbm(hw, (u8) tmpval);
+ pstats->rssi = rtlpriv->stats.signal_strength;
+ }
+ if (!pstats->is_cck && pstats->packet_toself) {
+ for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
+ rfpath++) {
+ if (!rtl8192_phy_check_is_legal_rfpath(hw, rfpath))
+ continue;
+ if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) {
+ rtlpriv->stats.rx_rssi_percentage[rfpath] =
+ pstats->rx_mimo_signalstrength[rfpath];
+ }
+ if (pstats->rx_mimo_signalstrength[rfpath] >
+ rtlpriv->stats.rx_rssi_percentage[rfpath]) {
+ rtlpriv->stats.rx_rssi_percentage[rfpath] =
+ ((rtlpriv->stats.
+ rx_rssi_percentage[rfpath] *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstats->rx_mimo_signalstrength[rfpath])) /
+ (RX_SMOOTH_FACTOR);
+
+ rtlpriv->stats.rx_rssi_percentage[rfpath] =
+ rtlpriv->stats.rx_rssi_percentage[rfpath] +
+ 1;
+ } else {
+ rtlpriv->stats.rx_rssi_percentage[rfpath] =
+ ((rtlpriv->stats.
+ rx_rssi_percentage[rfpath] *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstats->rx_mimo_signalstrength[rfpath])) /
+ (RX_SMOOTH_FACTOR);
+ }
+ }
+ }
+}
+
+static void _rtl92c_update_rxsignalstatistics(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int weighting = 0;
+
+ if (rtlpriv->stats.recv_signal_power == 0)
+ rtlpriv->stats.recv_signal_power = pstats->recvsignalpower;
+ if (pstats->recvsignalpower > rtlpriv->stats.recv_signal_power)
+ weighting = 5;
+ else if (pstats->recvsignalpower < rtlpriv->stats.recv_signal_power)
+ weighting = (-5);
+ rtlpriv->stats.recv_signal_power =
+ (rtlpriv->stats.recv_signal_power * 5 +
+ pstats->recvsignalpower + weighting) / 6;
+}
+
+static void _rtl92c_process_pwdb(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ long undecorated_smoothed_pwdb = 0;
+
+ if (mac->opmode == NL80211_IFTYPE_ADHOC) {
+ return;
+ } else {
+ undecorated_smoothed_pwdb =
+ rtlpriv->dm.undecorated_smoothed_pwdb;
+ }
+ if (pstats->packet_toself || pstats->packet_beacon) {
+ if (undecorated_smoothed_pwdb < 0)
+ undecorated_smoothed_pwdb = pstats->rx_pwdb_all;
+ if (pstats->rx_pwdb_all > (u32) undecorated_smoothed_pwdb) {
+ undecorated_smoothed_pwdb =
+ (((undecorated_smoothed_pwdb) *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
+ undecorated_smoothed_pwdb = undecorated_smoothed_pwdb
+ + 1;
+ } else {
+ undecorated_smoothed_pwdb =
+ (((undecorated_smoothed_pwdb) *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
+ }
+ rtlpriv->dm.undecorated_smoothed_pwdb =
+ undecorated_smoothed_pwdb;
+ _rtl92c_update_rxsignalstatistics(hw, pstats);
+ }
+}
+
+static void _rtl92c_process_LINK_Q(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 last_evm = 0, n_stream, tmpval;
+
+ if (pstats->signalquality != 0) {
+ if (pstats->packet_toself || pstats->packet_beacon) {
+ if (rtlpriv->stats.LINK_Q.total_num++ >=
+ PHY_LINKQUALITY_SLID_WIN_MAX) {
+ rtlpriv->stats.LINK_Q.total_num =
+ PHY_LINKQUALITY_SLID_WIN_MAX;
+ last_evm =
+ rtlpriv->stats.LINK_Q.elements
+ [rtlpriv->stats.LINK_Q.index];
+ rtlpriv->stats.LINK_Q.total_val -=
+ last_evm;
+ }
+ rtlpriv->stats.LINK_Q.total_val +=
+ pstats->signalquality;
+ rtlpriv->stats.LINK_Q.elements
+ [rtlpriv->stats.LINK_Q.index++] =
+ pstats->signalquality;
+ if (rtlpriv->stats.LINK_Q.index >=
+ PHY_LINKQUALITY_SLID_WIN_MAX)
+ rtlpriv->stats.LINK_Q.index = 0;
+ tmpval = rtlpriv->stats.LINK_Q.total_val /
+ rtlpriv->stats.LINK_Q.total_num;
+ rtlpriv->stats.signal_quality = tmpval;
+ rtlpriv->stats.last_sigstrength_inpercent = tmpval;
+ for (n_stream = 0; n_stream < 2;
+ n_stream++) {
+ if (pstats->RX_SIGQ[n_stream] != -1) {
+ if (!rtlpriv->stats.RX_EVM[n_stream]) {
+ rtlpriv->stats.RX_EVM[n_stream]
+ = pstats->RX_SIGQ[n_stream];
+ }
+ rtlpriv->stats.RX_EVM[n_stream] =
+ ((rtlpriv->stats.RX_EVM
+ [n_stream] *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstats->RX_SIGQ
+ [n_stream] * 1)) /
+ (RX_SMOOTH_FACTOR);
+ }
+ }
+ }
+ } else {
+ ;
+ }
+}
+
+static void _rtl92c_process_phyinfo(struct ieee80211_hw *hw,
+ u8 *buffer,
+ struct rtl_stats *pcurrent_stats)
+{
+ if (!pcurrent_stats->packet_matchbssid &&
+ !pcurrent_stats->packet_beacon)
+ return;
+ _rtl92c_process_ui_rssi(hw, pcurrent_stats);
+ _rtl92c_process_pwdb(hw, pcurrent_stats);
+ _rtl92c_process_LINK_Q(hw, pcurrent_stats);
+}
+
+void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
+ struct sk_buff *skb,
+ struct rtl_stats *pstats,
+ struct rx_desc_92c *pdesc,
+ struct rx_fwinfo_92c *p_drvinfo)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct ieee80211_hdr *hdr;
+ u8 *tmp_buf;
+ u8 *praddr;
+ u8 *psaddr;
+ __le16 fc;
+ u16 type, cpu_fc;
+ bool packet_matchbssid, packet_toself, packet_beacon;
+
+ tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
+ hdr = (struct ieee80211_hdr *)tmp_buf;
+ fc = hdr->frame_control;
+ cpu_fc = le16_to_cpu(fc);
+ type = WLAN_FC_GET_TYPE(fc);
+ praddr = hdr->addr1;
+ psaddr = hdr->addr2;
+ packet_matchbssid =
+ ((IEEE80211_FTYPE_CTL != type) &&
+ (!compare_ether_addr(mac->bssid,
+ (cpu_fc & IEEE80211_FCTL_TODS) ?
+ hdr->addr1 : (cpu_fc & IEEE80211_FCTL_FROMDS) ?
+ hdr->addr2 : hdr->addr3)) &&
+ (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv));
+
+ packet_toself = packet_matchbssid &&
+ (!compare_ether_addr(praddr, rtlefuse->dev_addr));
+ if (ieee80211_is_beacon(fc))
+ packet_beacon = true;
+ _rtl92c_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
+ packet_matchbssid, packet_toself,
+ packet_beacon);
+ _rtl92c_process_phyinfo(hw, tmp_buf, pstats);
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
new file mode 100644
index 000000000000..298fdb724aa5
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
@@ -0,0 +1,180 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92C_MAC_H__
+#define __RTL92C_MAC_H__
+
+#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER 255
+#define DRIVER_EARLY_INT_TIME 0x05
+#define BCN_DMA_ATIME_INT_TIME 0x02
+
+void rtl92c_read_chip_version(struct ieee80211_hw *hw);
+bool rtl92c_llt_write(struct ieee80211_hw *hw, u32 address, u32 data);
+bool rtl92c_init_llt_table(struct ieee80211_hw *hw, u32 boundary);
+void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
+ u8 *p_macaddr, bool is_group, u8 enc_algo,
+ bool is_wepkey, bool clear_all);
+void rtl92c_enable_interrupt(struct ieee80211_hw *hw);
+void rtl92c_disable_interrupt(struct ieee80211_hw *hw);
+void rtl92c_set_qos(struct ieee80211_hw *hw, int aci);
+
+
+/*---------------------------------------------------------------
+ * Hardware init functions
+ *---------------------------------------------------------------*/
+void rtl92c_set_mac_addr(struct ieee80211_hw *hw, const u8 *addr);
+void rtl92c_init_interrupt(struct ieee80211_hw *hw);
+void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size);
+
+int rtl92c_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type);
+void rtl92c_init_network_type(struct ieee80211_hw *hw);
+void rtl92c_init_adaptive_ctrl(struct ieee80211_hw *hw);
+void rtl92c_init_rate_fallback(struct ieee80211_hw *hw);
+
+void rtl92c_init_edca_param(struct ieee80211_hw *hw,
+ u16 queue,
+ u16 txop,
+ u8 ecwmax,
+ u8 ecwmin,
+ u8 aifs);
+
+void rtl92c_init_edca(struct ieee80211_hw *hw);
+void rtl92c_init_ampdu_aggregation(struct ieee80211_hw *hw);
+void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw, bool infra_mode);
+void rtl92c_init_rdg_setting(struct ieee80211_hw *hw);
+void rtl92c_init_retry_function(struct ieee80211_hw *hw);
+
+void rtl92c_init_beacon_parameters(struct ieee80211_hw *hw,
+ enum version_8192c version);
+
+void rtl92c_disable_fast_edca(struct ieee80211_hw *hw);
+void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T);
+
+/* For filter */
+u16 rtl92c_get_mgt_filter(struct ieee80211_hw *hw);
+void rtl92c_set_mgt_filter(struct ieee80211_hw *hw, u16 filter);
+u16 rtl92c_get_ctrl_filter(struct ieee80211_hw *hw);
+void rtl92c_set_ctrl_filter(struct ieee80211_hw *hw, u16 filter);
+u16 rtl92c_get_data_filter(struct ieee80211_hw *hw);
+void rtl92c_set_data_filter(struct ieee80211_hw *hw, u16 filter);
+
+
+u32 rtl92c_get_txdma_status(struct ieee80211_hw *hw);
+
+#define RX_HAL_IS_CCK_RATE(_pdesc)\
+ (GET_RX_DESC_RX_MCS(_pdesc) == DESC92C_RATE1M ||\
+ GET_RX_DESC_RX_MCS(_pdesc) == DESC92C_RATE2M ||\
+ GET_RX_DESC_RX_MCS(_pdesc) == DESC92C_RATE5_5M ||\
+ GET_RX_DESC_RX_MCS(_pdesc) == DESC92C_RATE11M)
+
+struct rx_fwinfo_92c {
+ u8 gain_trsw[4];
+ u8 pwdb_all;
+ u8 cfosho[4];
+ u8 cfotail[4];
+ char rxevm[2];
+ char rxsnr[4];
+ u8 pdsnr[2];
+ u8 csi_current[2];
+ u8 csi_target[2];
+ u8 sigevm;
+ u8 max_ex_pwr;
+ u8 ex_intf_flag:1;
+ u8 sgi_en:1;
+ u8 rxsc:2;
+ u8 reserve:4;
+} __packed;
+
+struct rx_desc_92c {
+ u32 length:14;
+ u32 crc32:1;
+ u32 icverror:1;
+ u32 drv_infosize:4;
+ u32 security:3;
+ u32 qos:1;
+ u32 shift:2;
+ u32 phystatus:1;
+ u32 swdec:1;
+ u32 lastseg:1;
+ u32 firstseg:1;
+ u32 eor:1;
+ u32 own:1;
+ u32 macid:5; /* word 1 */
+ u32 tid:4;
+ u32 hwrsvd:5;
+ u32 paggr:1;
+ u32 faggr:1;
+ u32 a1_fit:4;
+ u32 a2_fit:4;
+ u32 pam:1;
+ u32 pwr:1;
+ u32 moredata:1;
+ u32 morefrag:1;
+ u32 type:2;
+ u32 mc:1;
+ u32 bc:1;
+ u32 seq:12; /* word 2 */
+ u32 frag:4;
+ u32 nextpktlen:14;
+ u32 nextind:1;
+ u32 rsvd:1;
+ u32 rxmcs:6; /* word 3 */
+ u32 rxht:1;
+ u32 amsdu:1;
+ u32 splcp:1;
+ u32 bandwidth:1;
+ u32 htc:1;
+ u32 tcpchk_rpt:1;
+ u32 ipcchk_rpt:1;
+ u32 tcpchk_valid:1;
+ u32 hwpcerr:1;
+ u32 hwpcind:1;
+ u32 iv0:16;
+ u32 iv1; /* word 4 */
+ u32 tsfl; /* word 5 */
+ u32 bufferaddress; /* word 6 */
+ u32 bufferaddress64; /* word 7 */
+} __packed;
+
+enum rtl_desc_qsel rtl92c_map_hwqueue_to_fwqueue(u16 fc,
+ unsigned int
+ skb_queue);
+void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
+ struct sk_buff *skb,
+ struct rtl_stats *pstats,
+ struct rx_desc_92c *pdesc,
+ struct rx_fwinfo_92c *p_drvinfo);
+
+/*---------------------------------------------------------------
+ * Card disable functions
+ *---------------------------------------------------------------*/
+
+
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
new file mode 100644
index 000000000000..4e020e654e6b
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
@@ -0,0 +1,607 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../ps.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "rf.h"
+#include "dm.h"
+#include "table.h"
+
+u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr, u32 bitmask)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 original_value, readback_value, bitshift;
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
+ "rfpath(%#x), bitmask(%#x)\n",
+ regaddr, rfpath, bitmask));
+ if (rtlphy->rf_mode != RF_OP_BY_FW) {
+ original_value = _rtl92c_phy_rf_serial_read(hw,
+ rfpath, regaddr);
+ } else {
+ original_value = _rtl92c_phy_fw_rf_serial_read(hw,
+ rfpath, regaddr);
+ }
+ bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+ readback_value = (original_value & bitmask) >> bitshift;
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ ("regaddr(%#x), rfpath(%#x), "
+ "bitmask(%#x), original_value(%#x)\n",
+ regaddr, rfpath, bitmask, original_value));
+ return readback_value;
+}
+
+void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath,
+ u32 regaddr, u32 bitmask, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u32 original_value, bitshift;
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ ("regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath));
+ if (rtlphy->rf_mode != RF_OP_BY_FW) {
+ if (bitmask != RFREG_OFFSET_MASK) {
+ original_value = _rtl92c_phy_rf_serial_read(hw,
+ rfpath,
+ regaddr);
+ bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+ data =
+ ((original_value & (~bitmask)) |
+ (data << bitshift));
+ }
+ _rtl92c_phy_rf_serial_write(hw, rfpath, regaddr, data);
+ } else {
+ if (bitmask != RFREG_OFFSET_MASK) {
+ original_value = _rtl92c_phy_fw_rf_serial_read(hw,
+ rfpath,
+ regaddr);
+ bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+ data =
+ ((original_value & (~bitmask)) |
+ (data << bitshift));
+ }
+ _rtl92c_phy_fw_rf_serial_write(hw, rfpath, regaddr, data);
+ }
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
+ "bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath));
+}
+
+bool rtl92cu_phy_mac_config(struct ieee80211_hw *hw)
+{
+ bool rtstatus;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool is92c = IS_92C_SERIAL(rtlhal->version);
+
+ rtstatus = _rtl92cu_phy_config_mac_with_headerfile(hw);
+ if (is92c && IS_HARDWARE_TYPE_8192CE(rtlhal))
+ rtl_write_byte(rtlpriv, 0x14, 0x71);
+ return rtstatus;
+}
+
+bool rtl92cu_phy_bb_config(struct ieee80211_hw *hw)
+{
+ bool rtstatus = true;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u16 regval;
+ u8 b_reg_hwparafile = 1;
+
+ _rtl92c_phy_init_bb_rf_register_definition(hw);
+ regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
+ rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, regval | BIT(13) |
+ BIT(0) | BIT(1));
+ rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x83);
+ rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL + 1, 0xdb);
+ rtl_write_byte(rtlpriv, REG_RF_CTRL, RF_EN | RF_RSTB | RF_SDMRSTB);
+ if (IS_HARDWARE_TYPE_8192CE(rtlhal)) {
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, FEN_PPLL | FEN_PCIEA |
+ FEN_DIO_PCIE | FEN_BB_GLB_RSTn | FEN_BBRSTB);
+ } else if (IS_HARDWARE_TYPE_8192CU(rtlhal)) {
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, FEN_USBA | FEN_USBD |
+ FEN_BB_GLB_RSTn | FEN_BBRSTB);
+ rtl_write_byte(rtlpriv, REG_LDOHCI12_CTRL, 0x0f);
+ }
+ rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80);
+ if (b_reg_hwparafile == 1)
+ rtstatus = _rtl92c_phy_bb8192c_config_parafile(hw);
+ return rtstatus;
+}
+
+bool _rtl92cu_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u32 i;
+ u32 arraylength;
+ u32 *ptrarray;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Read Rtl819XMACPHY_Array\n"));
+ arraylength = rtlphy->hwparam_tables[MAC_REG].length ;
+ ptrarray = rtlphy->hwparam_tables[MAC_REG].pdata;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Img:RTL8192CEMAC_2T_ARRAY\n"));
+ for (i = 0; i < arraylength; i = i + 2)
+ rtl_write_byte(rtlpriv, ptrarray[i], (u8) ptrarray[i + 1]);
+ return true;
+}
+
+bool _rtl92cu_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+ u8 configtype)
+{
+ int i;
+ u32 *phy_regarray_table;
+ u32 *agctab_array_table;
+ u16 phy_reg_arraylen, agctab_arraylen;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ if (IS_92C_SERIAL(rtlhal->version)) {
+ agctab_arraylen = rtlphy->hwparam_tables[AGCTAB_2T].length;
+ agctab_array_table = rtlphy->hwparam_tables[AGCTAB_2T].pdata;
+ phy_reg_arraylen = rtlphy->hwparam_tables[PHY_REG_2T].length;
+ phy_regarray_table = rtlphy->hwparam_tables[PHY_REG_2T].pdata;
+ } else {
+ agctab_arraylen = rtlphy->hwparam_tables[AGCTAB_1T].length;
+ agctab_array_table = rtlphy->hwparam_tables[AGCTAB_1T].pdata;
+ phy_reg_arraylen = rtlphy->hwparam_tables[PHY_REG_1T].length;
+ phy_regarray_table = rtlphy->hwparam_tables[PHY_REG_1T].pdata;
+ }
+ if (configtype == BASEBAND_CONFIG_PHY_REG) {
+ for (i = 0; i < phy_reg_arraylen; i = i + 2) {
+ if (phy_regarray_table[i] == 0xfe)
+ mdelay(50);
+ else if (phy_regarray_table[i] == 0xfd)
+ mdelay(5);
+ else if (phy_regarray_table[i] == 0xfc)
+ mdelay(1);
+ else if (phy_regarray_table[i] == 0xfb)
+ udelay(50);
+ else if (phy_regarray_table[i] == 0xfa)
+ udelay(5);
+ else if (phy_regarray_table[i] == 0xf9)
+ udelay(1);
+ rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
+ phy_regarray_table[i + 1]);
+ udelay(1);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("The phy_regarray_table[0] is %x"
+ " Rtl819XPHY_REGArray[1] is %x\n",
+ phy_regarray_table[i],
+ phy_regarray_table[i + 1]));
+ }
+ } else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
+ for (i = 0; i < agctab_arraylen; i = i + 2) {
+ rtl_set_bbreg(hw, agctab_array_table[i], MASKDWORD,
+ agctab_array_table[i + 1]);
+ udelay(1);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("The agctab_array_table[0] is "
+ "%x Rtl819XPHY_REGArray[1] is %x\n",
+ agctab_array_table[i],
+ agctab_array_table[i + 1]));
+ }
+ }
+ return true;
+}
+
+bool _rtl92cu_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+ u8 configtype)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ int i;
+ u32 *phy_regarray_table_pg;
+ u16 phy_regarray_pg_len;
+
+ rtlphy->pwrgroup_cnt = 0;
+ phy_regarray_pg_len = rtlphy->hwparam_tables[PHY_REG_PG].length;
+ phy_regarray_table_pg = rtlphy->hwparam_tables[PHY_REG_PG].pdata;
+ if (configtype == BASEBAND_CONFIG_PHY_REG) {
+ for (i = 0; i < phy_regarray_pg_len; i = i + 3) {
+ if (phy_regarray_table_pg[i] == 0xfe)
+ mdelay(50);
+ else if (phy_regarray_table_pg[i] == 0xfd)
+ mdelay(5);
+ else if (phy_regarray_table_pg[i] == 0xfc)
+ mdelay(1);
+ else if (phy_regarray_table_pg[i] == 0xfb)
+ udelay(50);
+ else if (phy_regarray_table_pg[i] == 0xfa)
+ udelay(5);
+ else if (phy_regarray_table_pg[i] == 0xf9)
+ udelay(1);
+ _rtl92c_store_pwrIndex_diffrate_offset(hw,
+ phy_regarray_table_pg[i],
+ phy_regarray_table_pg[i + 1],
+ phy_regarray_table_pg[i + 2]);
+ }
+ } else {
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+ ("configtype != BaseBand_Config_PHY_REG\n"));
+ }
+ return true;
+}
+
+bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+ enum radio_path rfpath)
+{
+ int i;
+ u32 *radioa_array_table;
+ u32 *radiob_array_table;
+ u16 radioa_arraylen, radiob_arraylen;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ if (IS_92C_SERIAL(rtlhal->version)) {
+ radioa_arraylen = rtlphy->hwparam_tables[RADIOA_2T].length;
+ radioa_array_table = rtlphy->hwparam_tables[RADIOA_2T].pdata;
+ radiob_arraylen = rtlphy->hwparam_tables[RADIOB_2T].length;
+ radiob_array_table = rtlphy->hwparam_tables[RADIOB_2T].pdata;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Radio_A:RTL8192CERADIOA_2TARRAY\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Radio_B:RTL8192CE_RADIOB_2TARRAY\n"));
+ } else {
+ radioa_arraylen = rtlphy->hwparam_tables[RADIOA_1T].length;
+ radioa_array_table = rtlphy->hwparam_tables[RADIOA_1T].pdata;
+ radiob_arraylen = rtlphy->hwparam_tables[RADIOB_1T].length;
+ radiob_array_table = rtlphy->hwparam_tables[RADIOB_1T].pdata;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Radio_A:RTL8192CE_RADIOA_1TARRAY\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Radio_B:RTL8192CE_RADIOB_1TARRAY\n"));
+ }
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Radio No %x\n", rfpath));
+ switch (rfpath) {
+ case RF90_PATH_A:
+ for (i = 0; i < radioa_arraylen; i = i + 2) {
+ if (radioa_array_table[i] == 0xfe)
+ mdelay(50);
+ else if (radioa_array_table[i] == 0xfd)
+ mdelay(5);
+ else if (radioa_array_table[i] == 0xfc)
+ mdelay(1);
+ else if (radioa_array_table[i] == 0xfb)
+ udelay(50);
+ else if (radioa_array_table[i] == 0xfa)
+ udelay(5);
+ else if (radioa_array_table[i] == 0xf9)
+ udelay(1);
+ else {
+ rtl_set_rfreg(hw, rfpath, radioa_array_table[i],
+ RFREG_OFFSET_MASK,
+ radioa_array_table[i + 1]);
+ udelay(1);
+ }
+ }
+ break;
+ case RF90_PATH_B:
+ for (i = 0; i < radiob_arraylen; i = i + 2) {
+ if (radiob_array_table[i] == 0xfe) {
+ mdelay(50);
+ } else if (radiob_array_table[i] == 0xfd)
+ mdelay(5);
+ else if (radiob_array_table[i] == 0xfc)
+ mdelay(1);
+ else if (radiob_array_table[i] == 0xfb)
+ udelay(50);
+ else if (radiob_array_table[i] == 0xfa)
+ udelay(5);
+ else if (radiob_array_table[i] == 0xf9)
+ udelay(1);
+ else {
+ rtl_set_rfreg(hw, rfpath, radiob_array_table[i],
+ RFREG_OFFSET_MASK,
+ radiob_array_table[i + 1]);
+ udelay(1);
+ }
+ }
+ break;
+ case RF90_PATH_C:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ case RF90_PATH_D:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+ return true;
+}
+
+void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u8 reg_bw_opmode;
+ u8 reg_prsr_rsc;
+
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
+ ("Switch to %s bandwidth\n",
+ rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+ "20MHz" : "40MHz"))
+ if (is_hal_stop(rtlhal)) {
+ rtlphy->set_bwmode_inprogress = false;
+ return;
+ }
+ reg_bw_opmode = rtl_read_byte(rtlpriv, REG_BWOPMODE);
+ reg_prsr_rsc = rtl_read_byte(rtlpriv, REG_RRSR + 2);
+ switch (rtlphy->current_chan_bw) {
+ case HT_CHANNEL_WIDTH_20:
+ reg_bw_opmode |= BW_OPMODE_20MHZ;
+ rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ reg_bw_opmode &= ~BW_OPMODE_20MHZ;
+ rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
+ reg_prsr_rsc =
+ (reg_prsr_rsc & 0x90) | (mac->cur_40_prime_sc << 5);
+ rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
+ break;
+ }
+ switch (rtlphy->current_chan_bw) {
+ case HT_CHANNEL_WIDTH_20:
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x0);
+ rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x0);
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 1);
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x1);
+ rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x1);
+ rtl_set_bbreg(hw, RCCK0_SYSTEM, BCCK_SIDEBAND,
+ (mac->cur_40_prime_sc >> 1));
+ rtl_set_bbreg(hw, ROFDM1_LSTF, 0xC00, mac->cur_40_prime_sc);
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 0);
+ rtl_set_bbreg(hw, 0x818, (BIT(26) | BIT(27)),
+ (mac->cur_40_prime_sc ==
+ HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
+ break;
+ }
+ rtl92cu_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
+ rtlphy->set_bwmode_inprogress = false;
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
+}
+
+void rtl92cu_bb_block_on(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ mutex_lock(&rtlpriv->io.bb_mutex);
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0x1);
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0x1);
+ mutex_unlock(&rtlpriv->io.bb_mutex);
+}
+
+void _rtl92cu_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
+{
+ u8 tmpreg;
+ u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ tmpreg = rtl_read_byte(rtlpriv, 0xd03);
+
+ if ((tmpreg & 0x70) != 0)
+ rtl_write_byte(rtlpriv, 0xd03, tmpreg & 0x8F);
+ else
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+
+ if ((tmpreg & 0x70) != 0) {
+ rf_a_mode = rtl_get_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS);
+ if (is2t)
+ rf_b_mode = rtl_get_rfreg(hw, RF90_PATH_B, 0x00,
+ MASK12BITS);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS,
+ (rf_a_mode & 0x8FFFF) | 0x10000);
+ if (is2t)
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS,
+ (rf_b_mode & 0x8FFFF) | 0x10000);
+ }
+ lc_cal = rtl_get_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS, lc_cal | 0x08000);
+ mdelay(100);
+ if ((tmpreg & 0x70) != 0) {
+ rtl_write_byte(rtlpriv, 0xd03, tmpreg);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS, rf_a_mode);
+ if (is2t)
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS,
+ rf_b_mode);
+ } else {
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+ }
+}
+
+bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ bool bresult = true;
+ u8 i, queue_id;
+ struct rtl8192_tx_ring *ring = NULL;
+
+ ppsc->set_rfpowerstate_inprogress = true;
+ switch (rfpwr_state) {
+ case ERFON:
+ if ((ppsc->rfpwr_state == ERFOFF) &&
+ RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) {
+ bool rtstatus;
+ u32 InitializeCount = 0;
+
+ do {
+ InitializeCount++;
+ RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+ ("IPS Set eRf nic enable\n"));
+ rtstatus = rtl_ps_enable_nic(hw);
+ } while ((rtstatus != true)
+ && (InitializeCount < 10));
+ RT_CLEAR_PS_LEVEL(ppsc,
+ RT_RF_OFF_LEVL_HALT_NIC);
+ } else {
+ RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+ ("Set ERFON sleeped:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->
+ last_sleep_jiffies)));
+ ppsc->last_awake_jiffies = jiffies;
+ rtl92ce_phy_set_rf_on(hw);
+ }
+ if (mac->link_state == MAC80211_LINKED) {
+ rtlpriv->cfg->ops->led_control(hw,
+ LED_CTL_LINK);
+ } else {
+ rtlpriv->cfg->ops->led_control(hw,
+ LED_CTL_NO_LINK);
+ }
+ break;
+ case ERFOFF:
+ for (queue_id = 0, i = 0;
+ queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
+ ring = &pcipriv->dev.tx_ring[queue_id];
+ if (skb_queue_len(&ring->queue) == 0 ||
+ queue_id == BEACON_QUEUE) {
+ queue_id++;
+ continue;
+ } else {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("eRf Off/Sleep: %d times "
+ "TcbBusyQueue[%d] "
+ "=%d before doze!\n", (i + 1),
+ queue_id,
+ skb_queue_len(&ring->queue)));
+ udelay(10);
+ i++;
+ }
+ if (i >= MAX_DOZE_WAITING_TIMES_9x) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("\nERFOFF: %d times "
+ "TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue)));
+ break;
+ }
+ }
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
+ RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+ ("IPS Set eRf nic disable\n"));
+ rtl_ps_disable_nic(hw);
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+ } else {
+ if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) {
+ rtlpriv->cfg->ops->led_control(hw,
+ LED_CTL_NO_LINK);
+ } else {
+ rtlpriv->cfg->ops->led_control(hw,
+ LED_CTL_POWER_OFF);
+ }
+ }
+ break;
+ case ERFSLEEP:
+ if (ppsc->rfpwr_state == ERFOFF)
+ break;
+ for (queue_id = 0, i = 0;
+ queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
+ ring = &pcipriv->dev.tx_ring[queue_id];
+ if (skb_queue_len(&ring->queue) == 0) {
+ queue_id++;
+ continue;
+ } else {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("eRf Off/Sleep: %d times "
+ "TcbBusyQueue[%d] =%d before "
+ "doze!\n", (i + 1), queue_id,
+ skb_queue_len(&ring->queue)));
+ udelay(10);
+ i++;
+ }
+ if (i >= MAX_DOZE_WAITING_TIMES_9x) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("\n ERFSLEEP: %d times "
+ "TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue)));
+ break;
+ }
+ }
+ RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+ ("Set ERFSLEEP awaked:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_awake_jiffies)));
+ ppsc->last_sleep_jiffies = jiffies;
+ _rtl92c_phy_set_rf_sleep(hw);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ bresult = false;
+ break;
+ }
+ if (bresult)
+ ppsc->rfpwr_state = rfpwr_state;
+ ppsc->set_rfpowerstate_inprogress = false;
+ return bresult;
+}
+
+bool rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state)
+{
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ bool bresult = false;
+
+ if (rfpwr_state == ppsc->rfpwr_state)
+ return bresult;
+ bresult = _rtl92cu_phy_set_rf_power_state(hw, rfpwr_state);
+ return bresult;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h
new file mode 100644
index 000000000000..06299559ab68
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h
@@ -0,0 +1,36 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../rtl8192ce/phy.h"
+
+void rtl92cu_bb_block_on(struct ieee80211_hw *hw);
+bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, u32 rfpath);
+void rtl92c_phy_set_io(struct ieee80211_hw *hw);
+bool _rtl92cu_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
+bool rtl92cu_phy_bb_config(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/reg.h b/drivers/net/wireless/rtlwifi/rtl8192cu/reg.h
new file mode 100644
index 000000000000..7f1be614c998
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/reg.h
@@ -0,0 +1,30 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../rtl8192ce/reg.h"
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
new file mode 100644
index 000000000000..1c79c226f145
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
@@ -0,0 +1,493 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "rf.h"
+#include "dm.h"
+
+static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw);
+
+void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ switch (bandwidth) {
+ case HT_CHANNEL_WIDTH_20:
+ rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] &
+ 0xfffff3ff) | 0x0400);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
+ rtlphy->rfreg_chnlval[0]);
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] &
+ 0xfffff3ff));
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
+ rtlphy->rfreg_chnlval[0]);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("unknown bandwidth: %#X\n", bandwidth));
+ break;
+ }
+}
+
+void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u32 tx_agc[2] = { 0, 0 }, tmpval = 0;
+ bool turbo_scanoff = false;
+ u8 idx1, idx2;
+ u8 *ptr;
+
+ if (rtlhal->interface == INTF_PCI) {
+ if (rtlefuse->eeprom_regulatory != 0)
+ turbo_scanoff = true;
+ } else {
+ if ((rtlefuse->eeprom_regulatory != 0) ||
+ (rtlefuse->external_pa))
+ turbo_scanoff = true;
+ }
+ if (mac->act_scanning == true) {
+ tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
+ tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
+ if (turbo_scanoff) {
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ tx_agc[idx1] = ppowerlevel[idx1] |
+ (ppowerlevel[idx1] << 8) |
+ (ppowerlevel[idx1] << 16) |
+ (ppowerlevel[idx1] << 24);
+ if (rtlhal->interface == INTF_USB) {
+ if (tx_agc[idx1] > 0x20 &&
+ rtlefuse->external_pa)
+ tx_agc[idx1] = 0x20;
+ }
+ }
+ }
+ } else {
+ if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+ TXHIGHPWRLEVEL_LEVEL1) {
+ tx_agc[RF90_PATH_A] = 0x10101010;
+ tx_agc[RF90_PATH_B] = 0x10101010;
+ } else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+ TXHIGHPWRLEVEL_LEVEL1) {
+ tx_agc[RF90_PATH_A] = 0x00000000;
+ tx_agc[RF90_PATH_B] = 0x00000000;
+ } else{
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ tx_agc[idx1] = ppowerlevel[idx1] |
+ (ppowerlevel[idx1] << 8) |
+ (ppowerlevel[idx1] << 16) |
+ (ppowerlevel[idx1] << 24);
+ }
+ if (rtlefuse->eeprom_regulatory == 0) {
+ tmpval = (rtlphy->mcs_txpwrlevel_origoffset
+ [0][6]) +
+ (rtlphy->mcs_txpwrlevel_origoffset
+ [0][7] << 8);
+ tx_agc[RF90_PATH_A] += tmpval;
+ tmpval = (rtlphy->mcs_txpwrlevel_origoffset
+ [0][14]) +
+ (rtlphy->mcs_txpwrlevel_origoffset
+ [0][15] << 24);
+ tx_agc[RF90_PATH_B] += tmpval;
+ }
+ }
+ }
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ ptr = (u8 *) (&(tx_agc[idx1]));
+ for (idx2 = 0; idx2 < 4; idx2++) {
+ if (*ptr > RF6052_MAX_TX_PWR)
+ *ptr = RF6052_MAX_TX_PWR;
+ ptr++;
+ }
+ }
+ tmpval = tx_agc[RF90_PATH_A] & 0xff;
+ rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
+ RTXAGC_A_CCK1_MCS32));
+
+ tmpval = tx_agc[RF90_PATH_A] >> 8;
+ if (mac->mode == WIRELESS_MODE_B)
+ tmpval = tmpval & 0xff00ffff;
+ rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
+ RTXAGC_B_CCK11_A_CCK2_11));
+ tmpval = tx_agc[RF90_PATH_B] >> 24;
+ rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
+ RTXAGC_B_CCK11_A_CCK2_11));
+ tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff;
+ rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
+ RTXAGC_B_CCK1_55_MCS32));
+}
+
+static void rtl92c_phy_get_power_base(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel,
+ u32 *ofdmbase, u32 *mcsbase)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u32 powerBase0, powerBase1;
+ u8 legacy_pwrdiff = 0, ht20_pwrdiff = 0;
+ u8 i, powerlevel[2];
+
+ for (i = 0; i < 2; i++) {
+ powerlevel[i] = ppowerlevel[i];
+ legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff[i][channel - 1];
+ powerBase0 = powerlevel[i] + legacy_pwrdiff;
+ powerBase0 = (powerBase0 << 24) | (powerBase0 << 16) |
+ (powerBase0 << 8) | powerBase0;
+ *(ofdmbase + i) = powerBase0;
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ (" [OFDM power base index rf(%c) = 0x%x]\n",
+ ((i == 0) ? 'A' : 'B'), *(ofdmbase + i)));
+ }
+ for (i = 0; i < 2; i++) {
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) {
+ ht20_pwrdiff = rtlefuse->txpwr_ht20diff[i][channel - 1];
+ powerlevel[i] += ht20_pwrdiff;
+ }
+ powerBase1 = powerlevel[i];
+ powerBase1 = (powerBase1 << 24) |
+ (powerBase1 << 16) | (powerBase1 << 8) | powerBase1;
+ *(mcsbase + i) = powerBase1;
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ (" [MCS power base index rf(%c) = 0x%x]\n",
+ ((i == 0) ? 'A' : 'B'), *(mcsbase + i)));
+ }
+}
+
+static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
+ u8 channel, u8 index,
+ u32 *powerBase0,
+ u32 *powerBase1,
+ u32 *p_outwriteval)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 i, chnlgroup = 0, pwr_diff_limit[4];
+ u32 writeVal, customer_limit, rf;
+
+ for (rf = 0; rf < 2; rf++) {
+ switch (rtlefuse->eeprom_regulatory) {
+ case 0:
+ chnlgroup = 0;
+ writeVal = rtlphy->mcs_txpwrlevel_origoffset
+ [chnlgroup][index + (rf ? 8 : 0)]
+ + ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("RTK better performance,writeVal(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeVal));
+ break;
+ case 1:
+ if (rtlphy->pwrgroup_cnt == 1)
+ chnlgroup = 0;
+ if (rtlphy->pwrgroup_cnt >= 3) {
+ if (channel <= 3)
+ chnlgroup = 0;
+ else if (channel >= 4 && channel <= 9)
+ chnlgroup = 1;
+ else if (channel > 9)
+ chnlgroup = 2;
+ if (rtlphy->current_chan_bw ==
+ HT_CHANNEL_WIDTH_20)
+ chnlgroup++;
+ else
+ chnlgroup += 4;
+ }
+ writeVal = rtlphy->mcs_txpwrlevel_origoffset
+ [chnlgroup][index +
+ (rf ? 8 : 0)] +
+ ((index < 2) ? powerBase0[rf] :
+ powerBase1[rf]);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("Realtek regulatory, 20MHz, "
+ "writeVal(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeVal));
+ break;
+ case 2:
+ writeVal = ((index < 2) ? powerBase0[rf] :
+ powerBase1[rf]);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("Better regulatory,writeVal(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeVal));
+ break;
+ case 3:
+ chnlgroup = 0;
+ if (rtlphy->current_chan_bw ==
+ HT_CHANNEL_WIDTH_20_40) {
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("customer's limit, 40MHzrf(%c) = "
+ "0x%x\n", ((rf == 0) ? 'A' : 'B'),
+ rtlefuse->pwrgroup_ht40[rf]
+ [channel - 1]));
+ } else {
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("customer's limit, 20MHz rf(%c) = "
+ "0x%x\n", ((rf == 0) ? 'A' : 'B'),
+ rtlefuse->pwrgroup_ht20[rf]
+ [channel - 1]));
+ }
+ for (i = 0; i < 4; i++) {
+ pwr_diff_limit[i] =
+ (u8) ((rtlphy->mcs_txpwrlevel_origoffset
+ [chnlgroup][index + (rf ? 8 : 0)]
+ & (0x7f << (i * 8))) >> (i * 8));
+ if (rtlphy->current_chan_bw ==
+ HT_CHANNEL_WIDTH_20_40) {
+ if (pwr_diff_limit[i] >
+ rtlefuse->pwrgroup_ht40[rf]
+ [channel - 1])
+ pwr_diff_limit[i] = rtlefuse->
+ pwrgroup_ht40[rf]
+ [channel - 1];
+ } else {
+ if (pwr_diff_limit[i] >
+ rtlefuse->pwrgroup_ht20[rf]
+ [channel - 1])
+ pwr_diff_limit[i] =
+ rtlefuse->pwrgroup_ht20[rf]
+ [channel - 1];
+ }
+ }
+ customer_limit = (pwr_diff_limit[3] << 24) |
+ (pwr_diff_limit[2] << 16) |
+ (pwr_diff_limit[1] << 8) | (pwr_diff_limit[0]);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("Customer's limit rf(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), customer_limit));
+ writeVal = customer_limit + ((index < 2) ?
+ powerBase0[rf] : powerBase1[rf]);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("Customer, writeVal rf(%c)= 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeVal));
+ break;
+ default:
+ chnlgroup = 0;
+ writeVal = rtlphy->mcs_txpwrlevel_origoffset[chnlgroup]
+ [index + (rf ? 8 : 0)] + ((index < 2) ?
+ powerBase0[rf] : powerBase1[rf]);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR, ("RTK better "
+ "performance, writeValrf(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeVal));
+ break;
+ }
+ if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+ TXHIGHPWRLEVEL_LEVEL1)
+ writeVal = 0x14141414;
+ else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+ TXHIGHPWRLEVEL_LEVEL2)
+ writeVal = 0x00000000;
+ if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1)
+ writeVal = writeVal - 0x06060606;
+ else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+ TXHIGHPWRLEVEL_BT2)
+ writeVal = writeVal;
+ *(p_outwriteval + rf) = writeVal;
+ }
+}
+
+static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw,
+ u8 index, u32 *pValue)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u16 regoffset_a[6] = {
+ RTXAGC_A_RATE18_06, RTXAGC_A_RATE54_24,
+ RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04,
+ RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12
+ };
+ u16 regoffset_b[6] = {
+ RTXAGC_B_RATE18_06, RTXAGC_B_RATE54_24,
+ RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04,
+ RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12
+ };
+ u8 i, rf, pwr_val[4];
+ u32 writeVal;
+ u16 regoffset;
+
+ for (rf = 0; rf < 2; rf++) {
+ writeVal = pValue[rf];
+ for (i = 0; i < 4; i++) {
+ pwr_val[i] = (u8)((writeVal & (0x7f << (i * 8))) >>
+ (i * 8));
+ if (pwr_val[i] > RF6052_MAX_TX_PWR)
+ pwr_val[i] = RF6052_MAX_TX_PWR;
+ }
+ writeVal = (pwr_val[3] << 24) | (pwr_val[2] << 16) |
+ (pwr_val[1] << 8) | pwr_val[0];
+ if (rf == 0)
+ regoffset = regoffset_a[index];
+ else
+ regoffset = regoffset_b[index];
+ rtl_set_bbreg(hw, regoffset, MASKDWORD, writeVal);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("Set 0x%x = %08x\n", regoffset, writeVal));
+ if (((get_rf_type(rtlphy) == RF_2T2R) &&
+ (regoffset == RTXAGC_A_MCS15_MCS12 ||
+ regoffset == RTXAGC_B_MCS15_MCS12)) ||
+ ((get_rf_type(rtlphy) != RF_2T2R) &&
+ (regoffset == RTXAGC_A_MCS07_MCS04 ||
+ regoffset == RTXAGC_B_MCS07_MCS04))) {
+ writeVal = pwr_val[3];
+ if (regoffset == RTXAGC_A_MCS15_MCS12 ||
+ regoffset == RTXAGC_A_MCS07_MCS04)
+ regoffset = 0xc90;
+ if (regoffset == RTXAGC_B_MCS15_MCS12 ||
+ regoffset == RTXAGC_B_MCS07_MCS04)
+ regoffset = 0xc98;
+ for (i = 0; i < 3; i++) {
+ writeVal = (writeVal > 6) ? (writeVal - 6) : 0;
+ rtl_write_byte(rtlpriv, (u32)(regoffset + i),
+ (u8)writeVal);
+ }
+ }
+ }
+}
+
+void rtl92cu_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel)
+{
+ u32 writeVal[2], powerBase0[2], powerBase1[2];
+ u8 index = 0;
+
+ rtl92c_phy_get_power_base(hw, ppowerlevel,
+ channel, &powerBase0[0], &powerBase1[0]);
+ for (index = 0; index < 6; index++) {
+ _rtl92c_get_txpower_writeval_by_regulatory(hw,
+ channel, index,
+ &powerBase0[0],
+ &powerBase1[0],
+ &writeVal[0]);
+ _rtl92c_write_ofdm_power_reg(hw, index, &writeVal[0]);
+ }
+}
+
+bool rtl92cu_phy_rf6052_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ bool rtstatus = true;
+ u8 b_reg_hwparafile = 1;
+
+ if (rtlphy->rf_type == RF_1T1R)
+ rtlphy->num_total_rfpath = 1;
+ else
+ rtlphy->num_total_rfpath = 2;
+ if (b_reg_hwparafile == 1)
+ rtstatus = _rtl92c_phy_rf6052_config_parafile(hw);
+ return rtstatus;
+}
+
+static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u32 u4_regvalue = 0;
+ u8 rfpath;
+ bool rtstatus = true;
+ struct bb_reg_def *pphyreg;
+
+ for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
+ pphyreg = &rtlphy->phyreg_def[rfpath];
+ switch (rfpath) {
+ case RF90_PATH_A:
+ case RF90_PATH_C:
+ u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
+ BRFSI_RFENV);
+ break;
+ case RF90_PATH_B:
+ case RF90_PATH_D:
+ u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
+ BRFSI_RFENV << 16);
+ break;
+ }
+ rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1);
+ udelay(1);
+ rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1);
+ udelay(1);
+ rtl_set_bbreg(hw, pphyreg->rfhssi_para2,
+ B3WIREADDREAALENGTH, 0x0);
+ udelay(1);
+ rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0);
+ udelay(1);
+ switch (rfpath) {
+ case RF90_PATH_A:
+ rtstatus = rtl92cu_phy_config_rf_with_headerfile(hw,
+ (enum radio_path) rfpath);
+ break;
+ case RF90_PATH_B:
+ rtstatus = rtl92cu_phy_config_rf_with_headerfile(hw,
+ (enum radio_path) rfpath);
+ break;
+ case RF90_PATH_C:
+ break;
+ case RF90_PATH_D:
+ break;
+ }
+ switch (rfpath) {
+ case RF90_PATH_A:
+ case RF90_PATH_C:
+ rtl_set_bbreg(hw, pphyreg->rfintfs,
+ BRFSI_RFENV, u4_regvalue);
+ break;
+ case RF90_PATH_B:
+ case RF90_PATH_D:
+ rtl_set_bbreg(hw, pphyreg->rfintfs,
+ BRFSI_RFENV << 16, u4_regvalue);
+ break;
+ }
+ if (rtstatus != true) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Radio[%d] Fail!!", rfpath));
+ goto phy_rf_cfg_fail;
+ }
+ }
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("<---\n"));
+ return rtstatus;
+phy_rf_cfg_fail:
+ return rtstatus;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
new file mode 100644
index 000000000000..86c2728cfa00
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
@@ -0,0 +1,47 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92CU_RF_H__
+#define __RTL92CU_RF_H__
+
+#define RF6052_MAX_TX_PWR 0x3F
+#define RF6052_MAX_REG 0x3F
+#define RF6052_MAX_PATH 2
+
+extern void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
+ u8 bandwidth);
+extern void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel);
+extern void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel);
+bool rtl92cu_phy_rf6052_config(struct ieee80211_hw *hw);
+bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+ enum radio_path rfpath);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
new file mode 100644
index 000000000000..71244a38d49e
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -0,0 +1,336 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../core.h"
+#include "../usb.h"
+#include "../efuse.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "mac.h"
+#include "dm.h"
+#include "rf.h"
+#include "sw.h"
+#include "trx.h"
+#include "led.h"
+#include "hw.h"
+#include <linux/vmalloc.h>
+
+MODULE_AUTHOR("Georgia <georgia@realtek.com>");
+MODULE_AUTHOR("Ziv Huang <ziv_huang@realtek.com>");
+MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek 8192C/8188C 802.11n USB wireless");
+MODULE_FIRMWARE("rtlwifi/rtl8192cufw.bin");
+
+static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.dm_initialgain_enable = 1;
+ rtlpriv->dm.dm_flag = 0;
+ rtlpriv->dm.disable_framebursting = 0;
+ rtlpriv->dm.thermalvalue = 0;
+ rtlpriv->rtlhal.pfirmware = vmalloc(0x4000);
+ if (!rtlpriv->rtlhal.pfirmware) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Can't alloc buffer for fw.\n"));
+ return 1;
+ }
+ return 0;
+}
+
+static void rtl92cu_deinit_sw_vars(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->rtlhal.pfirmware) {
+ vfree(rtlpriv->rtlhal.pfirmware);
+ rtlpriv->rtlhal.pfirmware = NULL;
+ }
+}
+
+static struct rtl_hal_ops rtl8192cu_hal_ops = {
+ .init_sw_vars = rtl92cu_init_sw_vars,
+ .deinit_sw_vars = rtl92cu_deinit_sw_vars,
+ .read_chip_version = rtl92c_read_chip_version,
+ .read_eeprom_info = rtl92cu_read_eeprom_info,
+ .enable_interrupt = rtl92c_enable_interrupt,
+ .disable_interrupt = rtl92c_disable_interrupt,
+ .hw_init = rtl92cu_hw_init,
+ .hw_disable = rtl92cu_card_disable,
+ .set_network_type = rtl92cu_set_network_type,
+ .set_chk_bssid = rtl92cu_set_check_bssid,
+ .set_qos = rtl92c_set_qos,
+ .set_bcn_reg = rtl92cu_set_beacon_related_registers,
+ .set_bcn_intv = rtl92cu_set_beacon_interval,
+ .update_interrupt_mask = rtl92cu_update_interrupt_mask,
+ .get_hw_reg = rtl92cu_get_hw_reg,
+ .set_hw_reg = rtl92cu_set_hw_reg,
+ .update_rate_table = rtl92cu_update_hal_rate_table,
+ .update_rate_mask = rtl92cu_update_hal_rate_mask,
+ .fill_tx_desc = rtl92cu_tx_fill_desc,
+ .fill_fake_txdesc = rtl92cu_fill_fake_txdesc,
+ .fill_tx_cmddesc = rtl92cu_tx_fill_cmddesc,
+ .cmd_send_packet = rtl92cu_cmd_send_packet,
+ .query_rx_desc = rtl92cu_rx_query_desc,
+ .set_channel_access = rtl92cu_update_channel_access_setting,
+ .radio_onoff_checking = rtl92cu_gpio_radio_on_off_checking,
+ .set_bw_mode = rtl92c_phy_set_bw_mode,
+ .switch_channel = rtl92c_phy_sw_chnl,
+ .dm_watchdog = rtl92c_dm_watchdog,
+ .scan_operation_backup = rtl92c_phy_scan_operation_backup,
+ .set_rf_power_state = rtl92cu_phy_set_rf_power_state,
+ .led_control = rtl92cu_led_control,
+ .enable_hw_sec = rtl92cu_enable_hw_security_config,
+ .set_key = rtl92c_set_key,
+ .init_sw_leds = rtl92cu_init_sw_leds,
+ .deinit_sw_leds = rtl92cu_deinit_sw_leds,
+ .get_bbreg = rtl92c_phy_query_bb_reg,
+ .set_bbreg = rtl92c_phy_set_bb_reg,
+ .get_rfreg = rtl92cu_phy_query_rf_reg,
+ .set_rfreg = rtl92cu_phy_set_rf_reg,
+ .phy_rf6052_config = rtl92cu_phy_rf6052_config,
+ .phy_rf6052_set_cck_txpower = rtl92cu_phy_rf6052_set_cck_txpower,
+ .phy_rf6052_set_ofdm_txpower = rtl92cu_phy_rf6052_set_ofdm_txpower,
+ .config_bb_with_headerfile = _rtl92cu_phy_config_bb_with_headerfile,
+ .config_bb_with_pgheaderfile = _rtl92cu_phy_config_bb_with_pgheaderfile,
+ .phy_lc_calibrate = _rtl92cu_phy_lc_calibrate,
+ .phy_set_bw_mode_callback = rtl92cu_phy_set_bw_mode_callback,
+ .dm_dynamic_txpower = rtl92cu_dm_dynamic_txpower,
+};
+
+static struct rtl_mod_params rtl92cu_mod_params = {
+ .sw_crypto = 0,
+};
+
+static struct rtl_hal_usbint_cfg rtl92cu_interface_cfg = {
+ /* rx */
+ .in_ep_num = RTL92C_USB_BULK_IN_NUM,
+ .rx_urb_num = RTL92C_NUM_RX_URBS,
+ .rx_max_size = RTL92C_SIZE_MAX_RX_BUFFER,
+ .usb_rx_hdl = rtl8192cu_rx_hdl,
+ .usb_rx_segregate_hdl = NULL, /* rtl8192c_rx_segregate_hdl; */
+ /* tx */
+ .usb_tx_cleanup = rtl8192c_tx_cleanup,
+ .usb_tx_post_hdl = rtl8192c_tx_post_hdl,
+ .usb_tx_aggregate_hdl = rtl8192c_tx_aggregate_hdl,
+ /* endpoint mapping */
+ .usb_endpoint_mapping = rtl8192cu_endpoint_mapping,
+ .usb_mq_to_hwq = rtl8192cu_mq_to_hwq,
+};
+
+static struct rtl_hal_cfg rtl92cu_hal_cfg = {
+ .name = "rtl92c_usb",
+ .fw_name = "rtlwifi/rtl8192cufw.bin",
+ .ops = &rtl8192cu_hal_ops,
+ .mod_params = &rtl92cu_mod_params,
+ .usb_interface_cfg = &rtl92cu_interface_cfg,
+
+ .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
+ .maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN,
+ .maps[SYS_CLK] = REG_SYS_CLKR,
+ .maps[MAC_RCR_AM] = AM,
+ .maps[MAC_RCR_AB] = AB,
+ .maps[MAC_RCR_ACRC32] = ACRC32,
+ .maps[MAC_RCR_ACF] = ACF,
+ .maps[MAC_RCR_AAP] = AAP,
+
+ .maps[EFUSE_TEST] = REG_EFUSE_TEST,
+ .maps[EFUSE_CTRL] = REG_EFUSE_CTRL,
+ .maps[EFUSE_CLK] = 0,
+ .maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL,
+ .maps[EFUSE_PWC_EV12V] = PWC_EV12V,
+ .maps[EFUSE_FEN_ELDR] = FEN_ELDR,
+ .maps[EFUSE_LOADER_CLK_EN] = LOADER_CLK_EN,
+ .maps[EFUSE_ANA8M] = EFUSE_ANA8M,
+ .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE,
+ .maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION,
+ .maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN,
+
+ .maps[RWCAM] = REG_CAMCMD,
+ .maps[WCAMI] = REG_CAMWRITE,
+ .maps[RCAMO] = REG_CAMREAD,
+ .maps[CAMDBG] = REG_CAMDBG,
+ .maps[SECR] = REG_SECCFG,
+ .maps[SEC_CAM_NONE] = CAM_NONE,
+ .maps[SEC_CAM_WEP40] = CAM_WEP40,
+ .maps[SEC_CAM_TKIP] = CAM_TKIP,
+ .maps[SEC_CAM_AES] = CAM_AES,
+ .maps[SEC_CAM_WEP104] = CAM_WEP104,
+
+ .maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6,
+ .maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5,
+ .maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4,
+ .maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3,
+ .maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2,
+ .maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1,
+ .maps[RTL_IMR_BCNDOK8] = IMR_BCNDOK8,
+ .maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7,
+ .maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6,
+ .maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5,
+ .maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4,
+ .maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3,
+ .maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2,
+ .maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1,
+ .maps[RTL_IMR_TIMEOUT2] = IMR_TIMEOUT2,
+ .maps[RTL_IMR_TIMEOUT1] = IMR_TIMEOUT1,
+
+ .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW,
+ .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT,
+ .maps[RTL_IMR_BcnInt] = IMR_BCNINT,
+ .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW,
+ .maps[RTL_IMR_RDU] = IMR_RDU,
+ .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND,
+ .maps[RTL_IMR_BDOK] = IMR_BDOK,
+ .maps[RTL_IMR_MGNTDOK] = IMR_MGNTDOK,
+ .maps[RTL_IMR_TBDER] = IMR_TBDER,
+ .maps[RTL_IMR_HIGHDOK] = IMR_HIGHDOK,
+ .maps[RTL_IMR_TBDOK] = IMR_TBDOK,
+ .maps[RTL_IMR_BKDOK] = IMR_BKDOK,
+ .maps[RTL_IMR_BEDOK] = IMR_BEDOK,
+ .maps[RTL_IMR_VIDOK] = IMR_VIDOK,
+ .maps[RTL_IMR_VODOK] = IMR_VODOK,
+ .maps[RTL_IMR_ROK] = IMR_ROK,
+ .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER),
+
+ .maps[RTL_RC_CCK_RATE1M] = DESC92C_RATE1M,
+ .maps[RTL_RC_CCK_RATE2M] = DESC92C_RATE2M,
+ .maps[RTL_RC_CCK_RATE5_5M] = DESC92C_RATE5_5M,
+ .maps[RTL_RC_CCK_RATE11M] = DESC92C_RATE11M,
+ .maps[RTL_RC_OFDM_RATE6M] = DESC92C_RATE6M,
+ .maps[RTL_RC_OFDM_RATE9M] = DESC92C_RATE9M,
+ .maps[RTL_RC_OFDM_RATE12M] = DESC92C_RATE12M,
+ .maps[RTL_RC_OFDM_RATE18M] = DESC92C_RATE18M,
+ .maps[RTL_RC_OFDM_RATE24M] = DESC92C_RATE24M,
+ .maps[RTL_RC_OFDM_RATE36M] = DESC92C_RATE36M,
+ .maps[RTL_RC_OFDM_RATE48M] = DESC92C_RATE48M,
+ .maps[RTL_RC_OFDM_RATE54M] = DESC92C_RATE54M,
+ .maps[RTL_RC_HT_RATEMCS7] = DESC92C_RATEMCS7,
+ .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15,
+};
+
+#define USB_VENDER_ID_REALTEK 0x0bda
+
+/* 2010-10-19 DID_USB_V3.4 */
+static struct usb_device_id rtl8192c_usb_ids[] = {
+
+ /*=== Realtek demoboard ===*/
+ /* Default ID */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8191, rtl92cu_hal_cfg)},
+
+ /****** 8188CU ********/
+ /* 8188CE-VAU USB minCard */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8170, rtl92cu_hal_cfg)},
+ /* 8188cu 1*1 dongle */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8176, rtl92cu_hal_cfg)},
+ /* 8188cu 1*1 dongle, (b/g mode only) */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8177, rtl92cu_hal_cfg)},
+ /* 8188cu Slim Solo */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817a, rtl92cu_hal_cfg)},
+ /* 8188cu Slim Combo */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817b, rtl92cu_hal_cfg)},
+ /* 8188RU High-power USB Dongle */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817d, rtl92cu_hal_cfg)},
+ /* 8188CE-VAU USB minCard (b/g mode only) */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817e, rtl92cu_hal_cfg)},
+ /* 8188 Combo for BC4 */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)},
+
+ /****** 8192CU ********/
+ /* 8191cu 1*2 */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8177, rtl92cu_hal_cfg)},
+ /* 8192cu 2*2 */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817b, rtl92cu_hal_cfg)},
+ /* 8192CE-VAU USB minCard */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817c, rtl92cu_hal_cfg)},
+
+ /*=== Customer ID ===*/
+ /****** 8188CU ********/
+ {RTL_USB_DEVICE(0x050d, 0x1102, rtl92cu_hal_cfg)}, /*Belkin - Edimax*/
+ {RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/
+ {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
+ {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
+ {RTL_USB_DEVICE(0x0Df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+ {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
+ /* HP - Lite-On ,8188CUS Slim Combo */
+ {RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(0x2001, 0x3308, rtl92cu_hal_cfg)}, /*D-Link - Alpha*/
+ {RTL_USB_DEVICE(0x2019, 0xab2a, rtl92cu_hal_cfg)}, /*Planex - Abocom*/
+ {RTL_USB_DEVICE(0x2019, 0xed17, rtl92cu_hal_cfg)}, /*PCI - Edimax*/
+ {RTL_USB_DEVICE(0x20f4, 0x648b, rtl92cu_hal_cfg)}, /*TRENDnet - Cameo*/
+ {RTL_USB_DEVICE(0x7392, 0x7811, rtl92cu_hal_cfg)}, /*Edimax - Edimax*/
+ {RTL_USB_DEVICE(0x3358, 0x13d3, rtl92cu_hal_cfg)}, /*Azwave 8188CE-VAU*/
+ /* Russian customer -Azwave (8188CE-VAU b/g mode only) */
+ {RTL_USB_DEVICE(0x3359, 0x13d3, rtl92cu_hal_cfg)},
+
+ /****** 8192CU ********/
+ {RTL_USB_DEVICE(0x0586, 0x341f, rtl92cu_hal_cfg)}, /*Zyxel -Abocom*/
+ {RTL_USB_DEVICE(0x07aa, 0x0056, rtl92cu_hal_cfg)}, /*ATKK-Gemtek*/
+ {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
+ {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Abocom -Abocom*/
+ {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
+ {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
+ {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
+ {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
+ {RTL_USB_DEVICE(0x7392, 0x7822, rtl92cu_hal_cfg)}, /*Edimax -Edimax*/
+ {}
+};
+
+MODULE_DEVICE_TABLE(usb, rtl8192c_usb_ids);
+
+static struct usb_driver rtl8192cu_driver = {
+ .name = "rtl8192cu",
+ .probe = rtl_usb_probe,
+ .disconnect = rtl_usb_disconnect,
+ .id_table = rtl8192c_usb_ids,
+
+#ifdef CONFIG_PM
+ /* .suspend = rtl_usb_suspend, */
+ /* .resume = rtl_usb_resume, */
+ /* .reset_resume = rtl8192c_resume, */
+#endif /* CONFIG_PM */
+#ifdef CONFIG_AUTOSUSPEND
+ .supports_autosuspend = 1,
+#endif
+};
+
+static int __init rtl8192cu_init(void)
+{
+ return usb_register(&rtl8192cu_driver);
+}
+
+static void __exit rtl8192cu_exit(void)
+{
+ usb_deregister(&rtl8192cu_driver);
+}
+
+module_init(rtl8192cu_init);
+module_exit(rtl8192cu_exit);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
new file mode 100644
index 000000000000..43b1177924ab
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
@@ -0,0 +1,53 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92CU_SW_H__
+#define __RTL92CU_SW_H__
+
+#define EFUSE_MAX_SECTION 16
+
+void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *powerlevel);
+void rtl92cu_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel);
+bool _rtl92cu_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+ u8 configtype);
+bool _rtl92cu_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+ u8 configtype);
+void _rtl92cu_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
+void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath,
+ u32 regaddr, u32 bitmask, u32 data);
+bool rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state);
+u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr, u32 bitmask);
+void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/table.c b/drivers/net/wireless/rtlwifi/rtl8192cu/table.c
new file mode 100644
index 000000000000..d57ef5e88a9e
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/table.c
@@ -0,0 +1,1888 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "table.h"
+
+u32 RTL8192CUPHY_REG_2TARRAY[RTL8192CUPHY_REG_2TARRAY_LENGTH] = {
+ 0x024, 0x0011800f,
+ 0x028, 0x00ffdb83,
+ 0x800, 0x80040002,
+ 0x804, 0x00000003,
+ 0x808, 0x0000fc00,
+ 0x80c, 0x0000000a,
+ 0x810, 0x10005388,
+ 0x814, 0x020c3d10,
+ 0x818, 0x02200385,
+ 0x81c, 0x00000000,
+ 0x820, 0x01000100,
+ 0x824, 0x00390004,
+ 0x828, 0x01000100,
+ 0x82c, 0x00390004,
+ 0x830, 0x27272727,
+ 0x834, 0x27272727,
+ 0x838, 0x27272727,
+ 0x83c, 0x27272727,
+ 0x840, 0x00010000,
+ 0x844, 0x00010000,
+ 0x848, 0x27272727,
+ 0x84c, 0x27272727,
+ 0x850, 0x00000000,
+ 0x854, 0x00000000,
+ 0x858, 0x569a569a,
+ 0x85c, 0x0c1b25a4,
+ 0x860, 0x66e60230,
+ 0x864, 0x061f0130,
+ 0x868, 0x27272727,
+ 0x86c, 0x2b2b2b27,
+ 0x870, 0x07000700,
+ 0x874, 0x22184000,
+ 0x878, 0x08080808,
+ 0x87c, 0x00000000,
+ 0x880, 0xc0083070,
+ 0x884, 0x000004d5,
+ 0x888, 0x00000000,
+ 0x88c, 0xcc0000c0,
+ 0x890, 0x00000800,
+ 0x894, 0xfffffffe,
+ 0x898, 0x40302010,
+ 0x89c, 0x00706050,
+ 0x900, 0x00000000,
+ 0x904, 0x00000023,
+ 0x908, 0x00000000,
+ 0x90c, 0x81121313,
+ 0xa00, 0x00d047c8,
+ 0xa04, 0x80ff000c,
+ 0xa08, 0x8c838300,
+ 0xa0c, 0x2e68120f,
+ 0xa10, 0x9500bb78,
+ 0xa14, 0x11144028,
+ 0xa18, 0x00881117,
+ 0xa1c, 0x89140f00,
+ 0xa20, 0x1a1b0000,
+ 0xa24, 0x090e1317,
+ 0xa28, 0x00000204,
+ 0xa2c, 0x00d30000,
+ 0xa70, 0x101fbf00,
+ 0xa74, 0x00000007,
+ 0xc00, 0x48071d40,
+ 0xc04, 0x03a05633,
+ 0xc08, 0x000000e4,
+ 0xc0c, 0x6c6c6c6c,
+ 0xc10, 0x08800000,
+ 0xc14, 0x40000100,
+ 0xc18, 0x08800000,
+ 0xc1c, 0x40000100,
+ 0xc20, 0x00000000,
+ 0xc24, 0x00000000,
+ 0xc28, 0x00000000,
+ 0xc2c, 0x00000000,
+ 0xc30, 0x69e9ac44,
+ 0xc34, 0x469652cf,
+ 0xc38, 0x49795994,
+ 0xc3c, 0x0a97971c,
+ 0xc40, 0x1f7c403f,
+ 0xc44, 0x000100b7,
+ 0xc48, 0xec020107,
+ 0xc4c, 0x007f037f,
+ 0xc50, 0x6954341e,
+ 0xc54, 0x43bc0094,
+ 0xc58, 0x6954341e,
+ 0xc5c, 0x433c0094,
+ 0xc60, 0x00000000,
+ 0xc64, 0x5116848b,
+ 0xc68, 0x47c00bff,
+ 0xc6c, 0x00000036,
+ 0xc70, 0x2c7f000d,
+ 0xc74, 0x0186115b,
+ 0xc78, 0x0000001f,
+ 0xc7c, 0x00b99612,
+ 0xc80, 0x40000100,
+ 0xc84, 0x20f60000,
+ 0xc88, 0x40000100,
+ 0xc8c, 0x20200000,
+ 0xc90, 0x00121820,
+ 0xc94, 0x00000000,
+ 0xc98, 0x00121820,
+ 0xc9c, 0x00007f7f,
+ 0xca0, 0x00000000,
+ 0xca4, 0x00000080,
+ 0xca8, 0x00000000,
+ 0xcac, 0x00000000,
+ 0xcb0, 0x00000000,
+ 0xcb4, 0x00000000,
+ 0xcb8, 0x00000000,
+ 0xcbc, 0x28000000,
+ 0xcc0, 0x00000000,
+ 0xcc4, 0x00000000,
+ 0xcc8, 0x00000000,
+ 0xccc, 0x00000000,
+ 0xcd0, 0x00000000,
+ 0xcd4, 0x00000000,
+ 0xcd8, 0x64b22427,
+ 0xcdc, 0x00766932,
+ 0xce0, 0x00222222,
+ 0xce4, 0x00000000,
+ 0xce8, 0x37644302,
+ 0xcec, 0x2f97d40c,
+ 0xd00, 0x00080740,
+ 0xd04, 0x00020403,
+ 0xd08, 0x0000907f,
+ 0xd0c, 0x20010201,
+ 0xd10, 0xa0633333,
+ 0xd14, 0x3333bc43,
+ 0xd18, 0x7a8f5b6b,
+ 0xd2c, 0xcc979975,
+ 0xd30, 0x00000000,
+ 0xd34, 0x80608000,
+ 0xd38, 0x00000000,
+ 0xd3c, 0x00027293,
+ 0xd40, 0x00000000,
+ 0xd44, 0x00000000,
+ 0xd48, 0x00000000,
+ 0xd4c, 0x00000000,
+ 0xd50, 0x6437140a,
+ 0xd54, 0x00000000,
+ 0xd58, 0x00000000,
+ 0xd5c, 0x30032064,
+ 0xd60, 0x4653de68,
+ 0xd64, 0x04518a3c,
+ 0xd68, 0x00002101,
+ 0xd6c, 0x2a201c16,
+ 0xd70, 0x1812362e,
+ 0xd74, 0x322c2220,
+ 0xd78, 0x000e3c24,
+ 0xe00, 0x2a2a2a2a,
+ 0xe04, 0x2a2a2a2a,
+ 0xe08, 0x03902a2a,
+ 0xe10, 0x2a2a2a2a,
+ 0xe14, 0x2a2a2a2a,
+ 0xe18, 0x2a2a2a2a,
+ 0xe1c, 0x2a2a2a2a,
+ 0xe28, 0x00000000,
+ 0xe30, 0x1000dc1f,
+ 0xe34, 0x10008c1f,
+ 0xe38, 0x02140102,
+ 0xe3c, 0x681604c2,
+ 0xe40, 0x01007c00,
+ 0xe44, 0x01004800,
+ 0xe48, 0xfb000000,
+ 0xe4c, 0x000028d1,
+ 0xe50, 0x1000dc1f,
+ 0xe54, 0x10008c1f,
+ 0xe58, 0x02140102,
+ 0xe5c, 0x28160d05,
+ 0xe60, 0x00000010,
+ 0xe68, 0x001b25a4,
+ 0xe6c, 0x63db25a4,
+ 0xe70, 0x63db25a4,
+ 0xe74, 0x0c1b25a4,
+ 0xe78, 0x0c1b25a4,
+ 0xe7c, 0x0c1b25a4,
+ 0xe80, 0x0c1b25a4,
+ 0xe84, 0x63db25a4,
+ 0xe88, 0x0c1b25a4,
+ 0xe8c, 0x63db25a4,
+ 0xed0, 0x63db25a4,
+ 0xed4, 0x63db25a4,
+ 0xed8, 0x63db25a4,
+ 0xedc, 0x001b25a4,
+ 0xee0, 0x001b25a4,
+ 0xeec, 0x6fdb25a4,
+ 0xf14, 0x00000003,
+ 0xf4c, 0x00000000,
+ 0xf00, 0x00000300,
+};
+
+u32 RTL8192CUPHY_REG_1TARRAY[RTL8192CUPHY_REG_1TARRAY_LENGTH] = {
+ 0x024, 0x0011800f,
+ 0x028, 0x00ffdb83,
+ 0x800, 0x80040000,
+ 0x804, 0x00000001,
+ 0x808, 0x0000fc00,
+ 0x80c, 0x0000000a,
+ 0x810, 0x10005388,
+ 0x814, 0x020c3d10,
+ 0x818, 0x02200385,
+ 0x81c, 0x00000000,
+ 0x820, 0x01000100,
+ 0x824, 0x00390004,
+ 0x828, 0x00000000,
+ 0x82c, 0x00000000,
+ 0x830, 0x00000000,
+ 0x834, 0x00000000,
+ 0x838, 0x00000000,
+ 0x83c, 0x00000000,
+ 0x840, 0x00010000,
+ 0x844, 0x00000000,
+ 0x848, 0x00000000,
+ 0x84c, 0x00000000,
+ 0x850, 0x00000000,
+ 0x854, 0x00000000,
+ 0x858, 0x569a569a,
+ 0x85c, 0x001b25a4,
+ 0x860, 0x66e60230,
+ 0x864, 0x061f0130,
+ 0x868, 0x00000000,
+ 0x86c, 0x32323200,
+ 0x870, 0x07000700,
+ 0x874, 0x22004000,
+ 0x878, 0x00000808,
+ 0x87c, 0x00000000,
+ 0x880, 0xc0083070,
+ 0x884, 0x000004d5,
+ 0x888, 0x00000000,
+ 0x88c, 0xccc000c0,
+ 0x890, 0x00000800,
+ 0x894, 0xfffffffe,
+ 0x898, 0x40302010,
+ 0x89c, 0x00706050,
+ 0x900, 0x00000000,
+ 0x904, 0x00000023,
+ 0x908, 0x00000000,
+ 0x90c, 0x81121111,
+ 0xa00, 0x00d047c8,
+ 0xa04, 0x80ff000c,
+ 0xa08, 0x8c838300,
+ 0xa0c, 0x2e68120f,
+ 0xa10, 0x9500bb78,
+ 0xa14, 0x11144028,
+ 0xa18, 0x00881117,
+ 0xa1c, 0x89140f00,
+ 0xa20, 0x1a1b0000,
+ 0xa24, 0x090e1317,
+ 0xa28, 0x00000204,
+ 0xa2c, 0x00d30000,
+ 0xa70, 0x101fbf00,
+ 0xa74, 0x00000007,
+ 0xc00, 0x48071d40,
+ 0xc04, 0x03a05611,
+ 0xc08, 0x000000e4,
+ 0xc0c, 0x6c6c6c6c,
+ 0xc10, 0x08800000,
+ 0xc14, 0x40000100,
+ 0xc18, 0x08800000,
+ 0xc1c, 0x40000100,
+ 0xc20, 0x00000000,
+ 0xc24, 0x00000000,
+ 0xc28, 0x00000000,
+ 0xc2c, 0x00000000,
+ 0xc30, 0x69e9ac44,
+ 0xc34, 0x469652cf,
+ 0xc38, 0x49795994,
+ 0xc3c, 0x0a97971c,
+ 0xc40, 0x1f7c403f,
+ 0xc44, 0x000100b7,
+ 0xc48, 0xec020107,
+ 0xc4c, 0x007f037f,
+ 0xc50, 0x6954341e,
+ 0xc54, 0x43bc0094,
+ 0xc58, 0x6954341e,
+ 0xc5c, 0x433c0094,
+ 0xc60, 0x00000000,
+ 0xc64, 0x5116848b,
+ 0xc68, 0x47c00bff,
+ 0xc6c, 0x00000036,
+ 0xc70, 0x2c7f000d,
+ 0xc74, 0x018610db,
+ 0xc78, 0x0000001f,
+ 0xc7c, 0x00b91612,
+ 0xc80, 0x40000100,
+ 0xc84, 0x20f60000,
+ 0xc88, 0x40000100,
+ 0xc8c, 0x20200000,
+ 0xc90, 0x00121820,
+ 0xc94, 0x00000000,
+ 0xc98, 0x00121820,
+ 0xc9c, 0x00007f7f,
+ 0xca0, 0x00000000,
+ 0xca4, 0x00000080,
+ 0xca8, 0x00000000,
+ 0xcac, 0x00000000,
+ 0xcb0, 0x00000000,
+ 0xcb4, 0x00000000,
+ 0xcb8, 0x00000000,
+ 0xcbc, 0x28000000,
+ 0xcc0, 0x00000000,
+ 0xcc4, 0x00000000,
+ 0xcc8, 0x00000000,
+ 0xccc, 0x00000000,
+ 0xcd0, 0x00000000,
+ 0xcd4, 0x00000000,
+ 0xcd8, 0x64b22427,
+ 0xcdc, 0x00766932,
+ 0xce0, 0x00222222,
+ 0xce4, 0x00000000,
+ 0xce8, 0x37644302,
+ 0xcec, 0x2f97d40c,
+ 0xd00, 0x00080740,
+ 0xd04, 0x00020401,
+ 0xd08, 0x0000907f,
+ 0xd0c, 0x20010201,
+ 0xd10, 0xa0633333,
+ 0xd14, 0x3333bc43,
+ 0xd18, 0x7a8f5b6b,
+ 0xd2c, 0xcc979975,
+ 0xd30, 0x00000000,
+ 0xd34, 0x80608000,
+ 0xd38, 0x00000000,
+ 0xd3c, 0x00027293,
+ 0xd40, 0x00000000,
+ 0xd44, 0x00000000,
+ 0xd48, 0x00000000,
+ 0xd4c, 0x00000000,
+ 0xd50, 0x6437140a,
+ 0xd54, 0x00000000,
+ 0xd58, 0x00000000,
+ 0xd5c, 0x30032064,
+ 0xd60, 0x4653de68,
+ 0xd64, 0x04518a3c,
+ 0xd68, 0x00002101,
+ 0xd6c, 0x2a201c16,
+ 0xd70, 0x1812362e,
+ 0xd74, 0x322c2220,
+ 0xd78, 0x000e3c24,
+ 0xe00, 0x2a2a2a2a,
+ 0xe04, 0x2a2a2a2a,
+ 0xe08, 0x03902a2a,
+ 0xe10, 0x2a2a2a2a,
+ 0xe14, 0x2a2a2a2a,
+ 0xe18, 0x2a2a2a2a,
+ 0xe1c, 0x2a2a2a2a,
+ 0xe28, 0x00000000,
+ 0xe30, 0x1000dc1f,
+ 0xe34, 0x10008c1f,
+ 0xe38, 0x02140102,
+ 0xe3c, 0x681604c2,
+ 0xe40, 0x01007c00,
+ 0xe44, 0x01004800,
+ 0xe48, 0xfb000000,
+ 0xe4c, 0x000028d1,
+ 0xe50, 0x1000dc1f,
+ 0xe54, 0x10008c1f,
+ 0xe58, 0x02140102,
+ 0xe5c, 0x28160d05,
+ 0xe60, 0x00000008,
+ 0xe68, 0x001b25a4,
+ 0xe6c, 0x631b25a0,
+ 0xe70, 0x631b25a0,
+ 0xe74, 0x081b25a0,
+ 0xe78, 0x081b25a0,
+ 0xe7c, 0x081b25a0,
+ 0xe80, 0x081b25a0,
+ 0xe84, 0x631b25a0,
+ 0xe88, 0x081b25a0,
+ 0xe8c, 0x631b25a0,
+ 0xed0, 0x631b25a0,
+ 0xed4, 0x631b25a0,
+ 0xed8, 0x631b25a0,
+ 0xedc, 0x001b25a0,
+ 0xee0, 0x001b25a0,
+ 0xeec, 0x6b1b25a0,
+ 0xf14, 0x00000003,
+ 0xf4c, 0x00000000,
+ 0xf00, 0x00000300,
+};
+
+u32 RTL8192CUPHY_REG_ARRAY_PG[RTL8192CUPHY_REG_ARRAY_PGLENGTH] = {
+ 0xe00, 0xffffffff, 0x07090c0c,
+ 0xe04, 0xffffffff, 0x01020405,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x0b0c0c0e,
+ 0xe14, 0xffffffff, 0x01030506,
+ 0xe18, 0xffffffff, 0x0b0c0d0e,
+ 0xe1c, 0xffffffff, 0x01030509,
+ 0x830, 0xffffffff, 0x07090c0c,
+ 0x834, 0xffffffff, 0x01020405,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x0b0c0d0e,
+ 0x848, 0xffffffff, 0x01030509,
+ 0x84c, 0xffffffff, 0x0b0c0d0e,
+ 0x868, 0xffffffff, 0x01030509,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x04040404,
+ 0xe04, 0xffffffff, 0x00020204,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x06060606,
+ 0xe14, 0xffffffff, 0x00020406,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x04040404,
+ 0x834, 0xffffffff, 0x00020204,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x06060606,
+ 0x848, 0xffffffff, 0x00020406,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x04040404,
+ 0xe04, 0xffffffff, 0x00020204,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x04040404,
+ 0x834, 0xffffffff, 0x00020204,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+};
+
+u32 RTL8192CURADIOA_2TARRAY[RTL8192CURADIOA_2TARRAYLENGTH] = {
+ 0x000, 0x00030159,
+ 0x001, 0x00031284,
+ 0x002, 0x00098000,
+ 0x003, 0x00018c63,
+ 0x004, 0x000210e7,
+ 0x009, 0x0002044f,
+ 0x00a, 0x0001adb1,
+ 0x00b, 0x00054867,
+ 0x00c, 0x0008992e,
+ 0x00d, 0x0000e52c,
+ 0x00e, 0x00039ce7,
+ 0x00f, 0x00000451,
+ 0x019, 0x00000000,
+ 0x01a, 0x00010255,
+ 0x01b, 0x00060a00,
+ 0x01c, 0x000fc378,
+ 0x01d, 0x000a1250,
+ 0x01e, 0x0004445f,
+ 0x01f, 0x00080001,
+ 0x020, 0x0000b614,
+ 0x021, 0x0006c000,
+ 0x022, 0x00000000,
+ 0x023, 0x00001558,
+ 0x024, 0x00000060,
+ 0x025, 0x00000483,
+ 0x026, 0x0004f000,
+ 0x027, 0x000ec7d9,
+ 0x028, 0x000577c0,
+ 0x029, 0x00004783,
+ 0x02a, 0x00000001,
+ 0x02b, 0x00021334,
+ 0x02a, 0x00000000,
+ 0x02b, 0x00000054,
+ 0x02a, 0x00000001,
+ 0x02b, 0x00000808,
+ 0x02b, 0x00053333,
+ 0x02c, 0x0000000c,
+ 0x02a, 0x00000002,
+ 0x02b, 0x00000808,
+ 0x02b, 0x0005b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000003,
+ 0x02b, 0x00000808,
+ 0x02b, 0x00063333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000004,
+ 0x02b, 0x00000808,
+ 0x02b, 0x0006b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000005,
+ 0x02b, 0x00000808,
+ 0x02b, 0x00073333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000006,
+ 0x02b, 0x00000709,
+ 0x02b, 0x0005b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000007,
+ 0x02b, 0x00000709,
+ 0x02b, 0x00063333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000008,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x0004b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000009,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x00053333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000a,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x0005b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000b,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x00063333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000c,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x0006b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000d,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x00073333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000e,
+ 0x02b, 0x0000050b,
+ 0x02b, 0x00066666,
+ 0x02c, 0x0000001a,
+ 0x02a, 0x000e0000,
+ 0x010, 0x0004000f,
+ 0x011, 0x000e31fc,
+ 0x010, 0x0006000f,
+ 0x011, 0x000ff9f8,
+ 0x010, 0x0002000f,
+ 0x011, 0x000203f9,
+ 0x010, 0x0003000f,
+ 0x011, 0x000ff500,
+ 0x010, 0x00000000,
+ 0x011, 0x00000000,
+ 0x010, 0x0008000f,
+ 0x011, 0x0003f100,
+ 0x010, 0x0009000f,
+ 0x011, 0x00023100,
+ 0x012, 0x00032000,
+ 0x012, 0x00071000,
+ 0x012, 0x000b0000,
+ 0x012, 0x000fc000,
+ 0x013, 0x000287af,
+ 0x013, 0x000244b7,
+ 0x013, 0x000204ab,
+ 0x013, 0x0001c49f,
+ 0x013, 0x00018493,
+ 0x013, 0x00014297,
+ 0x013, 0x00010295,
+ 0x013, 0x0000c298,
+ 0x013, 0x0000819c,
+ 0x013, 0x000040a8,
+ 0x013, 0x0000001c,
+ 0x014, 0x0001944c,
+ 0x014, 0x00059444,
+ 0x014, 0x0009944c,
+ 0x014, 0x000d9444,
+ 0x015, 0x0000f424,
+ 0x015, 0x0004f424,
+ 0x015, 0x0008f424,
+ 0x015, 0x000cf424,
+ 0x016, 0x000e0330,
+ 0x016, 0x000a0330,
+ 0x016, 0x00060330,
+ 0x016, 0x00020330,
+ 0x000, 0x00010159,
+ 0x018, 0x0000f401,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x01f, 0x00080003,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x01e, 0x00044457,
+ 0x01f, 0x00080000,
+ 0x000, 0x00030159,
+};
+
+u32 RTL8192CU_RADIOB_2TARRAY[RTL8192CURADIOB_2TARRAYLENGTH] = {
+ 0x000, 0x00030159,
+ 0x001, 0x00031284,
+ 0x002, 0x00098000,
+ 0x003, 0x00018c63,
+ 0x004, 0x000210e7,
+ 0x009, 0x0002044f,
+ 0x00a, 0x0001adb1,
+ 0x00b, 0x00054867,
+ 0x00c, 0x0008992e,
+ 0x00d, 0x0000e52c,
+ 0x00e, 0x00039ce7,
+ 0x00f, 0x00000451,
+ 0x012, 0x00032000,
+ 0x012, 0x00071000,
+ 0x012, 0x000b0000,
+ 0x012, 0x000fc000,
+ 0x013, 0x000287af,
+ 0x013, 0x000244b7,
+ 0x013, 0x000204ab,
+ 0x013, 0x0001c49f,
+ 0x013, 0x00018493,
+ 0x013, 0x00014297,
+ 0x013, 0x00010295,
+ 0x013, 0x0000c298,
+ 0x013, 0x0000819c,
+ 0x013, 0x000040a8,
+ 0x013, 0x0000001c,
+ 0x014, 0x0001944c,
+ 0x014, 0x00059444,
+ 0x014, 0x0009944c,
+ 0x014, 0x000d9444,
+ 0x015, 0x0000f424,
+ 0x015, 0x0004f424,
+ 0x015, 0x0008f424,
+ 0x015, 0x000cf424,
+ 0x016, 0x000e0330,
+ 0x016, 0x000a0330,
+ 0x016, 0x00060330,
+ 0x016, 0x00020330,
+};
+
+u32 RTL8192CU_RADIOA_1TARRAY[RTL8192CURADIOA_1TARRAYLENGTH] = {
+ 0x000, 0x00030159,
+ 0x001, 0x00031284,
+ 0x002, 0x00098000,
+ 0x003, 0x00018c63,
+ 0x004, 0x000210e7,
+ 0x009, 0x0002044f,
+ 0x00a, 0x0001adb1,
+ 0x00b, 0x00054867,
+ 0x00c, 0x0008992e,
+ 0x00d, 0x0000e52c,
+ 0x00e, 0x00039ce7,
+ 0x00f, 0x00000451,
+ 0x019, 0x00000000,
+ 0x01a, 0x00010255,
+ 0x01b, 0x00060a00,
+ 0x01c, 0x000fc378,
+ 0x01d, 0x000a1250,
+ 0x01e, 0x0004445f,
+ 0x01f, 0x00080001,
+ 0x020, 0x0000b614,
+ 0x021, 0x0006c000,
+ 0x022, 0x00000000,
+ 0x023, 0x00001558,
+ 0x024, 0x00000060,
+ 0x025, 0x00000483,
+ 0x026, 0x0004f000,
+ 0x027, 0x000ec7d9,
+ 0x028, 0x000577c0,
+ 0x029, 0x00004783,
+ 0x02a, 0x00000001,
+ 0x02b, 0x00021334,
+ 0x02a, 0x00000000,
+ 0x02b, 0x00000054,
+ 0x02a, 0x00000001,
+ 0x02b, 0x00000808,
+ 0x02b, 0x00053333,
+ 0x02c, 0x0000000c,
+ 0x02a, 0x00000002,
+ 0x02b, 0x00000808,
+ 0x02b, 0x0005b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000003,
+ 0x02b, 0x00000808,
+ 0x02b, 0x00063333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000004,
+ 0x02b, 0x00000808,
+ 0x02b, 0x0006b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000005,
+ 0x02b, 0x00000808,
+ 0x02b, 0x00073333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000006,
+ 0x02b, 0x00000709,
+ 0x02b, 0x0005b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000007,
+ 0x02b, 0x00000709,
+ 0x02b, 0x00063333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000008,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x0004b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000009,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x00053333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000a,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x0005b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000b,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x00063333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000c,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x0006b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000d,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x00073333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000e,
+ 0x02b, 0x0000050b,
+ 0x02b, 0x00066666,
+ 0x02c, 0x0000001a,
+ 0x02a, 0x000e0000,
+ 0x010, 0x0004000f,
+ 0x011, 0x000e31fc,
+ 0x010, 0x0006000f,
+ 0x011, 0x000ff9f8,
+ 0x010, 0x0002000f,
+ 0x011, 0x000203f9,
+ 0x010, 0x0003000f,
+ 0x011, 0x000ff500,
+ 0x010, 0x00000000,
+ 0x011, 0x00000000,
+ 0x010, 0x0008000f,
+ 0x011, 0x0003f100,
+ 0x010, 0x0009000f,
+ 0x011, 0x00023100,
+ 0x012, 0x00032000,
+ 0x012, 0x00071000,
+ 0x012, 0x000b0000,
+ 0x012, 0x000fc000,
+ 0x013, 0x000287b3,
+ 0x013, 0x000244b7,
+ 0x013, 0x000204ab,
+ 0x013, 0x0001c49f,
+ 0x013, 0x00018493,
+ 0x013, 0x0001429b,
+ 0x013, 0x00010299,
+ 0x013, 0x0000c29c,
+ 0x013, 0x000081a0,
+ 0x013, 0x000040ac,
+ 0x013, 0x00000020,
+ 0x014, 0x0001944c,
+ 0x014, 0x00059444,
+ 0x014, 0x0009944c,
+ 0x014, 0x000d9444,
+ 0x015, 0x0000f405,
+ 0x015, 0x0004f405,
+ 0x015, 0x0008f405,
+ 0x015, 0x000cf405,
+ 0x016, 0x000e0330,
+ 0x016, 0x000a0330,
+ 0x016, 0x00060330,
+ 0x016, 0x00020330,
+ 0x000, 0x00010159,
+ 0x018, 0x0000f401,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x01f, 0x00080003,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x01e, 0x00044457,
+ 0x01f, 0x00080000,
+ 0x000, 0x00030159,
+};
+
+u32 RTL8192CU_RADIOB_1TARRAY[RTL8192CURADIOB_1TARRAYLENGTH] = {
+ 0x0,
+};
+
+u32 RTL8192CUMAC_2T_ARRAY[RTL8192CUMAC_2T_ARRAYLENGTH] = {
+ 0x420, 0x00000080,
+ 0x423, 0x00000000,
+ 0x430, 0x00000000,
+ 0x431, 0x00000000,
+ 0x432, 0x00000000,
+ 0x433, 0x00000001,
+ 0x434, 0x00000004,
+ 0x435, 0x00000005,
+ 0x436, 0x00000006,
+ 0x437, 0x00000007,
+ 0x438, 0x00000000,
+ 0x439, 0x00000000,
+ 0x43a, 0x00000000,
+ 0x43b, 0x00000001,
+ 0x43c, 0x00000004,
+ 0x43d, 0x00000005,
+ 0x43e, 0x00000006,
+ 0x43f, 0x00000007,
+ 0x440, 0x0000005d,
+ 0x441, 0x00000001,
+ 0x442, 0x00000000,
+ 0x444, 0x00000015,
+ 0x445, 0x000000f0,
+ 0x446, 0x0000000f,
+ 0x447, 0x00000000,
+ 0x458, 0x00000041,
+ 0x459, 0x000000a8,
+ 0x45a, 0x00000072,
+ 0x45b, 0x000000b9,
+ 0x460, 0x00000066,
+ 0x461, 0x00000066,
+ 0x462, 0x00000008,
+ 0x463, 0x00000003,
+ 0x4c8, 0x000000ff,
+ 0x4c9, 0x00000008,
+ 0x4cc, 0x000000ff,
+ 0x4cd, 0x000000ff,
+ 0x4ce, 0x00000001,
+ 0x500, 0x00000026,
+ 0x501, 0x000000a2,
+ 0x502, 0x0000002f,
+ 0x503, 0x00000000,
+ 0x504, 0x00000028,
+ 0x505, 0x000000a3,
+ 0x506, 0x0000005e,
+ 0x507, 0x00000000,
+ 0x508, 0x0000002b,
+ 0x509, 0x000000a4,
+ 0x50a, 0x0000005e,
+ 0x50b, 0x00000000,
+ 0x50c, 0x0000004f,
+ 0x50d, 0x000000a4,
+ 0x50e, 0x00000000,
+ 0x50f, 0x00000000,
+ 0x512, 0x0000001c,
+ 0x514, 0x0000000a,
+ 0x515, 0x00000010,
+ 0x516, 0x0000000a,
+ 0x517, 0x00000010,
+ 0x51a, 0x00000016,
+ 0x524, 0x0000000f,
+ 0x525, 0x0000004f,
+ 0x546, 0x00000040,
+ 0x547, 0x00000000,
+ 0x550, 0x00000010,
+ 0x551, 0x00000010,
+ 0x559, 0x00000002,
+ 0x55a, 0x00000002,
+ 0x55d, 0x000000ff,
+ 0x605, 0x00000030,
+ 0x608, 0x0000000e,
+ 0x609, 0x0000002a,
+ 0x652, 0x00000020,
+ 0x63c, 0x0000000a,
+ 0x63d, 0x0000000e,
+ 0x63e, 0x0000000a,
+ 0x63f, 0x0000000e,
+ 0x66e, 0x00000005,
+ 0x700, 0x00000021,
+ 0x701, 0x00000043,
+ 0x702, 0x00000065,
+ 0x703, 0x00000087,
+ 0x708, 0x00000021,
+ 0x709, 0x00000043,
+ 0x70a, 0x00000065,
+ 0x70b, 0x00000087,
+};
+
+u32 RTL8192CUAGCTAB_2TARRAY[RTL8192CUAGCTAB_2TARRAYLENGTH] = {
+ 0xc78, 0x7b000001,
+ 0xc78, 0x7b010001,
+ 0xc78, 0x7b020001,
+ 0xc78, 0x7b030001,
+ 0xc78, 0x7b040001,
+ 0xc78, 0x7b050001,
+ 0xc78, 0x7a060001,
+ 0xc78, 0x79070001,
+ 0xc78, 0x78080001,
+ 0xc78, 0x77090001,
+ 0xc78, 0x760a0001,
+ 0xc78, 0x750b0001,
+ 0xc78, 0x740c0001,
+ 0xc78, 0x730d0001,
+ 0xc78, 0x720e0001,
+ 0xc78, 0x710f0001,
+ 0xc78, 0x70100001,
+ 0xc78, 0x6f110001,
+ 0xc78, 0x6e120001,
+ 0xc78, 0x6d130001,
+ 0xc78, 0x6c140001,
+ 0xc78, 0x6b150001,
+ 0xc78, 0x6a160001,
+ 0xc78, 0x69170001,
+ 0xc78, 0x68180001,
+ 0xc78, 0x67190001,
+ 0xc78, 0x661a0001,
+ 0xc78, 0x651b0001,
+ 0xc78, 0x641c0001,
+ 0xc78, 0x631d0001,
+ 0xc78, 0x621e0001,
+ 0xc78, 0x611f0001,
+ 0xc78, 0x60200001,
+ 0xc78, 0x49210001,
+ 0xc78, 0x48220001,
+ 0xc78, 0x47230001,
+ 0xc78, 0x46240001,
+ 0xc78, 0x45250001,
+ 0xc78, 0x44260001,
+ 0xc78, 0x43270001,
+ 0xc78, 0x42280001,
+ 0xc78, 0x41290001,
+ 0xc78, 0x402a0001,
+ 0xc78, 0x262b0001,
+ 0xc78, 0x252c0001,
+ 0xc78, 0x242d0001,
+ 0xc78, 0x232e0001,
+ 0xc78, 0x222f0001,
+ 0xc78, 0x21300001,
+ 0xc78, 0x20310001,
+ 0xc78, 0x06320001,
+ 0xc78, 0x05330001,
+ 0xc78, 0x04340001,
+ 0xc78, 0x03350001,
+ 0xc78, 0x02360001,
+ 0xc78, 0x01370001,
+ 0xc78, 0x00380001,
+ 0xc78, 0x00390001,
+ 0xc78, 0x003a0001,
+ 0xc78, 0x003b0001,
+ 0xc78, 0x003c0001,
+ 0xc78, 0x003d0001,
+ 0xc78, 0x003e0001,
+ 0xc78, 0x003f0001,
+ 0xc78, 0x7b400001,
+ 0xc78, 0x7b410001,
+ 0xc78, 0x7b420001,
+ 0xc78, 0x7b430001,
+ 0xc78, 0x7b440001,
+ 0xc78, 0x7b450001,
+ 0xc78, 0x7a460001,
+ 0xc78, 0x79470001,
+ 0xc78, 0x78480001,
+ 0xc78, 0x77490001,
+ 0xc78, 0x764a0001,
+ 0xc78, 0x754b0001,
+ 0xc78, 0x744c0001,
+ 0xc78, 0x734d0001,
+ 0xc78, 0x724e0001,
+ 0xc78, 0x714f0001,
+ 0xc78, 0x70500001,
+ 0xc78, 0x6f510001,
+ 0xc78, 0x6e520001,
+ 0xc78, 0x6d530001,
+ 0xc78, 0x6c540001,
+ 0xc78, 0x6b550001,
+ 0xc78, 0x6a560001,
+ 0xc78, 0x69570001,
+ 0xc78, 0x68580001,
+ 0xc78, 0x67590001,
+ 0xc78, 0x665a0001,
+ 0xc78, 0x655b0001,
+ 0xc78, 0x645c0001,
+ 0xc78, 0x635d0001,
+ 0xc78, 0x625e0001,
+ 0xc78, 0x615f0001,
+ 0xc78, 0x60600001,
+ 0xc78, 0x49610001,
+ 0xc78, 0x48620001,
+ 0xc78, 0x47630001,
+ 0xc78, 0x46640001,
+ 0xc78, 0x45650001,
+ 0xc78, 0x44660001,
+ 0xc78, 0x43670001,
+ 0xc78, 0x42680001,
+ 0xc78, 0x41690001,
+ 0xc78, 0x406a0001,
+ 0xc78, 0x266b0001,
+ 0xc78, 0x256c0001,
+ 0xc78, 0x246d0001,
+ 0xc78, 0x236e0001,
+ 0xc78, 0x226f0001,
+ 0xc78, 0x21700001,
+ 0xc78, 0x20710001,
+ 0xc78, 0x06720001,
+ 0xc78, 0x05730001,
+ 0xc78, 0x04740001,
+ 0xc78, 0x03750001,
+ 0xc78, 0x02760001,
+ 0xc78, 0x01770001,
+ 0xc78, 0x00780001,
+ 0xc78, 0x00790001,
+ 0xc78, 0x007a0001,
+ 0xc78, 0x007b0001,
+ 0xc78, 0x007c0001,
+ 0xc78, 0x007d0001,
+ 0xc78, 0x007e0001,
+ 0xc78, 0x007f0001,
+ 0xc78, 0x3800001e,
+ 0xc78, 0x3801001e,
+ 0xc78, 0x3802001e,
+ 0xc78, 0x3803001e,
+ 0xc78, 0x3804001e,
+ 0xc78, 0x3805001e,
+ 0xc78, 0x3806001e,
+ 0xc78, 0x3807001e,
+ 0xc78, 0x3808001e,
+ 0xc78, 0x3c09001e,
+ 0xc78, 0x3e0a001e,
+ 0xc78, 0x400b001e,
+ 0xc78, 0x440c001e,
+ 0xc78, 0x480d001e,
+ 0xc78, 0x4c0e001e,
+ 0xc78, 0x500f001e,
+ 0xc78, 0x5210001e,
+ 0xc78, 0x5611001e,
+ 0xc78, 0x5a12001e,
+ 0xc78, 0x5e13001e,
+ 0xc78, 0x6014001e,
+ 0xc78, 0x6015001e,
+ 0xc78, 0x6016001e,
+ 0xc78, 0x6217001e,
+ 0xc78, 0x6218001e,
+ 0xc78, 0x6219001e,
+ 0xc78, 0x621a001e,
+ 0xc78, 0x621b001e,
+ 0xc78, 0x621c001e,
+ 0xc78, 0x621d001e,
+ 0xc78, 0x621e001e,
+ 0xc78, 0x621f001e,
+};
+
+u32 RTL8192CUAGCTAB_1TARRAY[RTL8192CUAGCTAB_1TARRAYLENGTH] = {
+ 0xc78, 0x7b000001,
+ 0xc78, 0x7b010001,
+ 0xc78, 0x7b020001,
+ 0xc78, 0x7b030001,
+ 0xc78, 0x7b040001,
+ 0xc78, 0x7b050001,
+ 0xc78, 0x7a060001,
+ 0xc78, 0x79070001,
+ 0xc78, 0x78080001,
+ 0xc78, 0x77090001,
+ 0xc78, 0x760a0001,
+ 0xc78, 0x750b0001,
+ 0xc78, 0x740c0001,
+ 0xc78, 0x730d0001,
+ 0xc78, 0x720e0001,
+ 0xc78, 0x710f0001,
+ 0xc78, 0x70100001,
+ 0xc78, 0x6f110001,
+ 0xc78, 0x6e120001,
+ 0xc78, 0x6d130001,
+ 0xc78, 0x6c140001,
+ 0xc78, 0x6b150001,
+ 0xc78, 0x6a160001,
+ 0xc78, 0x69170001,
+ 0xc78, 0x68180001,
+ 0xc78, 0x67190001,
+ 0xc78, 0x661a0001,
+ 0xc78, 0x651b0001,
+ 0xc78, 0x641c0001,
+ 0xc78, 0x631d0001,
+ 0xc78, 0x621e0001,
+ 0xc78, 0x611f0001,
+ 0xc78, 0x60200001,
+ 0xc78, 0x49210001,
+ 0xc78, 0x48220001,
+ 0xc78, 0x47230001,
+ 0xc78, 0x46240001,
+ 0xc78, 0x45250001,
+ 0xc78, 0x44260001,
+ 0xc78, 0x43270001,
+ 0xc78, 0x42280001,
+ 0xc78, 0x41290001,
+ 0xc78, 0x402a0001,
+ 0xc78, 0x262b0001,
+ 0xc78, 0x252c0001,
+ 0xc78, 0x242d0001,
+ 0xc78, 0x232e0001,
+ 0xc78, 0x222f0001,
+ 0xc78, 0x21300001,
+ 0xc78, 0x20310001,
+ 0xc78, 0x06320001,
+ 0xc78, 0x05330001,
+ 0xc78, 0x04340001,
+ 0xc78, 0x03350001,
+ 0xc78, 0x02360001,
+ 0xc78, 0x01370001,
+ 0xc78, 0x00380001,
+ 0xc78, 0x00390001,
+ 0xc78, 0x003a0001,
+ 0xc78, 0x003b0001,
+ 0xc78, 0x003c0001,
+ 0xc78, 0x003d0001,
+ 0xc78, 0x003e0001,
+ 0xc78, 0x003f0001,
+ 0xc78, 0x7b400001,
+ 0xc78, 0x7b410001,
+ 0xc78, 0x7b420001,
+ 0xc78, 0x7b430001,
+ 0xc78, 0x7b440001,
+ 0xc78, 0x7b450001,
+ 0xc78, 0x7a460001,
+ 0xc78, 0x79470001,
+ 0xc78, 0x78480001,
+ 0xc78, 0x77490001,
+ 0xc78, 0x764a0001,
+ 0xc78, 0x754b0001,
+ 0xc78, 0x744c0001,
+ 0xc78, 0x734d0001,
+ 0xc78, 0x724e0001,
+ 0xc78, 0x714f0001,
+ 0xc78, 0x70500001,
+ 0xc78, 0x6f510001,
+ 0xc78, 0x6e520001,
+ 0xc78, 0x6d530001,
+ 0xc78, 0x6c540001,
+ 0xc78, 0x6b550001,
+ 0xc78, 0x6a560001,
+ 0xc78, 0x69570001,
+ 0xc78, 0x68580001,
+ 0xc78, 0x67590001,
+ 0xc78, 0x665a0001,
+ 0xc78, 0x655b0001,
+ 0xc78, 0x645c0001,
+ 0xc78, 0x635d0001,
+ 0xc78, 0x625e0001,
+ 0xc78, 0x615f0001,
+ 0xc78, 0x60600001,
+ 0xc78, 0x49610001,
+ 0xc78, 0x48620001,
+ 0xc78, 0x47630001,
+ 0xc78, 0x46640001,
+ 0xc78, 0x45650001,
+ 0xc78, 0x44660001,
+ 0xc78, 0x43670001,
+ 0xc78, 0x42680001,
+ 0xc78, 0x41690001,
+ 0xc78, 0x406a0001,
+ 0xc78, 0x266b0001,
+ 0xc78, 0x256c0001,
+ 0xc78, 0x246d0001,
+ 0xc78, 0x236e0001,
+ 0xc78, 0x226f0001,
+ 0xc78, 0x21700001,
+ 0xc78, 0x20710001,
+ 0xc78, 0x06720001,
+ 0xc78, 0x05730001,
+ 0xc78, 0x04740001,
+ 0xc78, 0x03750001,
+ 0xc78, 0x02760001,
+ 0xc78, 0x01770001,
+ 0xc78, 0x00780001,
+ 0xc78, 0x00790001,
+ 0xc78, 0x007a0001,
+ 0xc78, 0x007b0001,
+ 0xc78, 0x007c0001,
+ 0xc78, 0x007d0001,
+ 0xc78, 0x007e0001,
+ 0xc78, 0x007f0001,
+ 0xc78, 0x3800001e,
+ 0xc78, 0x3801001e,
+ 0xc78, 0x3802001e,
+ 0xc78, 0x3803001e,
+ 0xc78, 0x3804001e,
+ 0xc78, 0x3805001e,
+ 0xc78, 0x3806001e,
+ 0xc78, 0x3807001e,
+ 0xc78, 0x3808001e,
+ 0xc78, 0x3c09001e,
+ 0xc78, 0x3e0a001e,
+ 0xc78, 0x400b001e,
+ 0xc78, 0x440c001e,
+ 0xc78, 0x480d001e,
+ 0xc78, 0x4c0e001e,
+ 0xc78, 0x500f001e,
+ 0xc78, 0x5210001e,
+ 0xc78, 0x5611001e,
+ 0xc78, 0x5a12001e,
+ 0xc78, 0x5e13001e,
+ 0xc78, 0x6014001e,
+ 0xc78, 0x6015001e,
+ 0xc78, 0x6016001e,
+ 0xc78, 0x6217001e,
+ 0xc78, 0x6218001e,
+ 0xc78, 0x6219001e,
+ 0xc78, 0x621a001e,
+ 0xc78, 0x621b001e,
+ 0xc78, 0x621c001e,
+ 0xc78, 0x621d001e,
+ 0xc78, 0x621e001e,
+ 0xc78, 0x621f001e,
+};
+
+u32 RTL8192CUPHY_REG_1T_HPArray[RTL8192CUPHY_REG_1T_HPArrayLength] = {
+ 0x024, 0x0011800f,
+ 0x028, 0x00ffdb83,
+ 0x040, 0x000c0004,
+ 0x800, 0x80040000,
+ 0x804, 0x00000001,
+ 0x808, 0x0000fc00,
+ 0x80c, 0x0000000a,
+ 0x810, 0x10005388,
+ 0x814, 0x020c3d10,
+ 0x818, 0x02200385,
+ 0x81c, 0x00000000,
+ 0x820, 0x01000100,
+ 0x824, 0x00390204,
+ 0x828, 0x00000000,
+ 0x82c, 0x00000000,
+ 0x830, 0x00000000,
+ 0x834, 0x00000000,
+ 0x838, 0x00000000,
+ 0x83c, 0x00000000,
+ 0x840, 0x00010000,
+ 0x844, 0x00000000,
+ 0x848, 0x00000000,
+ 0x84c, 0x00000000,
+ 0x850, 0x00000000,
+ 0x854, 0x00000000,
+ 0x858, 0x569a569a,
+ 0x85c, 0x001b25a4,
+ 0x860, 0x66e60230,
+ 0x864, 0x061f0130,
+ 0x868, 0x00000000,
+ 0x86c, 0x20202000,
+ 0x870, 0x03000300,
+ 0x874, 0x22004000,
+ 0x878, 0x00000808,
+ 0x87c, 0x00ffc3f1,
+ 0x880, 0xc0083070,
+ 0x884, 0x000004d5,
+ 0x888, 0x00000000,
+ 0x88c, 0xccc000c0,
+ 0x890, 0x00000800,
+ 0x894, 0xfffffffe,
+ 0x898, 0x40302010,
+ 0x89c, 0x00706050,
+ 0x900, 0x00000000,
+ 0x904, 0x00000023,
+ 0x908, 0x00000000,
+ 0x90c, 0x81121111,
+ 0xa00, 0x00d047c8,
+ 0xa04, 0x80ff000c,
+ 0xa08, 0x8c838300,
+ 0xa0c, 0x2e68120f,
+ 0xa10, 0x9500bb78,
+ 0xa14, 0x11144028,
+ 0xa18, 0x00881117,
+ 0xa1c, 0x89140f00,
+ 0xa20, 0x15160000,
+ 0xa24, 0x070b0f12,
+ 0xa28, 0x00000104,
+ 0xa2c, 0x00d30000,
+ 0xa70, 0x101fbf00,
+ 0xa74, 0x00000007,
+ 0xc00, 0x48071d40,
+ 0xc04, 0x03a05611,
+ 0xc08, 0x000000e4,
+ 0xc0c, 0x6c6c6c6c,
+ 0xc10, 0x08800000,
+ 0xc14, 0x40000100,
+ 0xc18, 0x08800000,
+ 0xc1c, 0x40000100,
+ 0xc20, 0x00000000,
+ 0xc24, 0x00000000,
+ 0xc28, 0x00000000,
+ 0xc2c, 0x00000000,
+ 0xc30, 0x69e9ac44,
+ 0xc34, 0x469652cf,
+ 0xc38, 0x49795994,
+ 0xc3c, 0x0a97971c,
+ 0xc40, 0x1f7c403f,
+ 0xc44, 0x000100b7,
+ 0xc48, 0xec020107,
+ 0xc4c, 0x007f037f,
+ 0xc50, 0x6954342e,
+ 0xc54, 0x43bc0094,
+ 0xc58, 0x6954342f,
+ 0xc5c, 0x433c0094,
+ 0xc60, 0x00000000,
+ 0xc64, 0x5116848b,
+ 0xc68, 0x47c00bff,
+ 0xc6c, 0x00000036,
+ 0xc70, 0x2c46000d,
+ 0xc74, 0x018610db,
+ 0xc78, 0x0000001f,
+ 0xc7c, 0x00b91612,
+ 0xc80, 0x24000090,
+ 0xc84, 0x20f60000,
+ 0xc88, 0x24000090,
+ 0xc8c, 0x20200000,
+ 0xc90, 0x00121820,
+ 0xc94, 0x00000000,
+ 0xc98, 0x00121820,
+ 0xc9c, 0x00007f7f,
+ 0xca0, 0x00000000,
+ 0xca4, 0x00000080,
+ 0xca8, 0x00000000,
+ 0xcac, 0x00000000,
+ 0xcb0, 0x00000000,
+ 0xcb4, 0x00000000,
+ 0xcb8, 0x00000000,
+ 0xcbc, 0x28000000,
+ 0xcc0, 0x00000000,
+ 0xcc4, 0x00000000,
+ 0xcc8, 0x00000000,
+ 0xccc, 0x00000000,
+ 0xcd0, 0x00000000,
+ 0xcd4, 0x00000000,
+ 0xcd8, 0x64b22427,
+ 0xcdc, 0x00766932,
+ 0xce0, 0x00222222,
+ 0xce4, 0x00000000,
+ 0xce8, 0x37644302,
+ 0xcec, 0x2f97d40c,
+ 0xd00, 0x00080740,
+ 0xd04, 0x00020401,
+ 0xd08, 0x0000907f,
+ 0xd0c, 0x20010201,
+ 0xd10, 0xa0633333,
+ 0xd14, 0x3333bc43,
+ 0xd18, 0x7a8f5b6b,
+ 0xd2c, 0xcc979975,
+ 0xd30, 0x00000000,
+ 0xd34, 0x80608000,
+ 0xd38, 0x00000000,
+ 0xd3c, 0x00027293,
+ 0xd40, 0x00000000,
+ 0xd44, 0x00000000,
+ 0xd48, 0x00000000,
+ 0xd4c, 0x00000000,
+ 0xd50, 0x6437140a,
+ 0xd54, 0x00000000,
+ 0xd58, 0x00000000,
+ 0xd5c, 0x30032064,
+ 0xd60, 0x4653de68,
+ 0xd64, 0x04518a3c,
+ 0xd68, 0x00002101,
+ 0xd6c, 0x2a201c16,
+ 0xd70, 0x1812362e,
+ 0xd74, 0x322c2220,
+ 0xd78, 0x000e3c24,
+ 0xe00, 0x24242424,
+ 0xe04, 0x24242424,
+ 0xe08, 0x03902024,
+ 0xe10, 0x24242424,
+ 0xe14, 0x24242424,
+ 0xe18, 0x24242424,
+ 0xe1c, 0x24242424,
+ 0xe28, 0x00000000,
+ 0xe30, 0x1000dc1f,
+ 0xe34, 0x10008c1f,
+ 0xe38, 0x02140102,
+ 0xe3c, 0x681604c2,
+ 0xe40, 0x01007c00,
+ 0xe44, 0x01004800,
+ 0xe48, 0xfb000000,
+ 0xe4c, 0x000028d1,
+ 0xe50, 0x1000dc1f,
+ 0xe54, 0x10008c1f,
+ 0xe58, 0x02140102,
+ 0xe5c, 0x28160d05,
+ 0xe60, 0x00000008,
+ 0xe68, 0x001b25a4,
+ 0xe6c, 0x631b25a0,
+ 0xe70, 0x631b25a0,
+ 0xe74, 0x081b25a0,
+ 0xe78, 0x081b25a0,
+ 0xe7c, 0x081b25a0,
+ 0xe80, 0x081b25a0,
+ 0xe84, 0x631b25a0,
+ 0xe88, 0x081b25a0,
+ 0xe8c, 0x631b25a0,
+ 0xed0, 0x631b25a0,
+ 0xed4, 0x631b25a0,
+ 0xed8, 0x631b25a0,
+ 0xedc, 0x001b25a0,
+ 0xee0, 0x001b25a0,
+ 0xeec, 0x6b1b25a0,
+ 0xee8, 0x31555448,
+ 0xf14, 0x00000003,
+ 0xf4c, 0x00000000,
+ 0xf00, 0x00000300,
+};
+
+u32 RTL8192CUPHY_REG_Array_PG_HP[RTL8192CUPHY_REG_Array_PG_HPLength] = {
+ 0xe00, 0xffffffff, 0x06080808,
+ 0xe04, 0xffffffff, 0x00040406,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x04060608,
+ 0xe14, 0xffffffff, 0x00020204,
+ 0xe18, 0xffffffff, 0x04060608,
+ 0xe1c, 0xffffffff, 0x00020204,
+ 0x830, 0xffffffff, 0x06080808,
+ 0x834, 0xffffffff, 0x00040406,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x04060608,
+ 0x848, 0xffffffff, 0x00020204,
+ 0x84c, 0xffffffff, 0x04060608,
+ 0x868, 0xffffffff, 0x00020204,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+};
+
+u32 RTL8192CURadioA_1T_HPArray[RTL8192CURadioA_1T_HPArrayLength] = {
+ 0x000, 0x00030159,
+ 0x001, 0x00031284,
+ 0x002, 0x00098000,
+ 0x003, 0x00018c63,
+ 0x004, 0x000210e7,
+ 0x009, 0x0002044f,
+ 0x00a, 0x0001adb0,
+ 0x00b, 0x00054867,
+ 0x00c, 0x0008992e,
+ 0x00d, 0x0000e529,
+ 0x00e, 0x00039ce7,
+ 0x00f, 0x00000451,
+ 0x019, 0x00000000,
+ 0x01a, 0x00000255,
+ 0x01b, 0x00060a00,
+ 0x01c, 0x000fc378,
+ 0x01d, 0x000a1250,
+ 0x01e, 0x0004445f,
+ 0x01f, 0x00080001,
+ 0x020, 0x0000b614,
+ 0x021, 0x0006c000,
+ 0x022, 0x0000083c,
+ 0x023, 0x00001558,
+ 0x024, 0x00000060,
+ 0x025, 0x00000483,
+ 0x026, 0x0004f000,
+ 0x027, 0x000ec7d9,
+ 0x028, 0x000977c0,
+ 0x029, 0x00004783,
+ 0x02a, 0x00000001,
+ 0x02b, 0x00021334,
+ 0x02a, 0x00000000,
+ 0x02b, 0x00000054,
+ 0x02a, 0x00000001,
+ 0x02b, 0x00000808,
+ 0x02b, 0x00053333,
+ 0x02c, 0x0000000c,
+ 0x02a, 0x00000002,
+ 0x02b, 0x00000808,
+ 0x02b, 0x0005b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000003,
+ 0x02b, 0x00000808,
+ 0x02b, 0x00063333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000004,
+ 0x02b, 0x00000808,
+ 0x02b, 0x0006b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000005,
+ 0x02b, 0x00000808,
+ 0x02b, 0x00073333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000006,
+ 0x02b, 0x00000709,
+ 0x02b, 0x0005b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000007,
+ 0x02b, 0x00000709,
+ 0x02b, 0x00063333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000008,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x0004b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000009,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x00053333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000a,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x0005b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000b,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x00063333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000c,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x0006b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000d,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x00073333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000e,
+ 0x02b, 0x0000050b,
+ 0x02b, 0x00066666,
+ 0x02c, 0x0000001a,
+ 0x02a, 0x000e0000,
+ 0x010, 0x0004000f,
+ 0x011, 0x000e31fc,
+ 0x010, 0x0006000f,
+ 0x011, 0x000ff9f8,
+ 0x010, 0x0002000f,
+ 0x011, 0x000203f9,
+ 0x010, 0x0003000f,
+ 0x011, 0x000ff500,
+ 0x010, 0x00000000,
+ 0x011, 0x00000000,
+ 0x010, 0x0008000f,
+ 0x011, 0x0003f100,
+ 0x010, 0x0009000f,
+ 0x011, 0x00023100,
+ 0x012, 0x000d8000,
+ 0x012, 0x00090000,
+ 0x012, 0x00051000,
+ 0x012, 0x00012000,
+ 0x013, 0x00028fb4,
+ 0x013, 0x00024fa8,
+ 0x013, 0x000207a4,
+ 0x013, 0x0001c798,
+ 0x013, 0x000183a4,
+ 0x013, 0x00014398,
+ 0x013, 0x000101a4,
+ 0x013, 0x0000c198,
+ 0x013, 0x000080a4,
+ 0x013, 0x00004098,
+ 0x013, 0x00000000,
+ 0x014, 0x0001944c,
+ 0x014, 0x00059444,
+ 0x014, 0x0009944c,
+ 0x014, 0x000d9444,
+ 0x015, 0x0000f405,
+ 0x015, 0x0004f405,
+ 0x015, 0x0008f405,
+ 0x015, 0x000cf405,
+ 0x016, 0x000e0330,
+ 0x016, 0x000a0330,
+ 0x016, 0x00060330,
+ 0x016, 0x00020330,
+ 0x000, 0x00010159,
+ 0x018, 0x0000f401,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x01f, 0x00080003,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x01e, 0x00044457,
+ 0x01f, 0x00080000,
+ 0x000, 0x00030159,
+};
+
+u32 Rtl8192CUAGCTAB_1T_HPArray[RTL8192CUAGCTAB_1T_HPArrayLength] = {
+ 0xc78, 0x7b000001,
+ 0xc78, 0x7b010001,
+ 0xc78, 0x7b020001,
+ 0xc78, 0x7b030001,
+ 0xc78, 0x7b040001,
+ 0xc78, 0x7b050001,
+ 0xc78, 0x7b060001,
+ 0xc78, 0x7b070001,
+ 0xc78, 0x7b080001,
+ 0xc78, 0x7a090001,
+ 0xc78, 0x790a0001,
+ 0xc78, 0x780b0001,
+ 0xc78, 0x770c0001,
+ 0xc78, 0x760d0001,
+ 0xc78, 0x750e0001,
+ 0xc78, 0x740f0001,
+ 0xc78, 0x73100001,
+ 0xc78, 0x72110001,
+ 0xc78, 0x71120001,
+ 0xc78, 0x70130001,
+ 0xc78, 0x6f140001,
+ 0xc78, 0x6e150001,
+ 0xc78, 0x6d160001,
+ 0xc78, 0x6c170001,
+ 0xc78, 0x6b180001,
+ 0xc78, 0x6a190001,
+ 0xc78, 0x691a0001,
+ 0xc78, 0x681b0001,
+ 0xc78, 0x671c0001,
+ 0xc78, 0x661d0001,
+ 0xc78, 0x651e0001,
+ 0xc78, 0x641f0001,
+ 0xc78, 0x63200001,
+ 0xc78, 0x62210001,
+ 0xc78, 0x61220001,
+ 0xc78, 0x60230001,
+ 0xc78, 0x46240001,
+ 0xc78, 0x45250001,
+ 0xc78, 0x44260001,
+ 0xc78, 0x43270001,
+ 0xc78, 0x42280001,
+ 0xc78, 0x41290001,
+ 0xc78, 0x402a0001,
+ 0xc78, 0x262b0001,
+ 0xc78, 0x252c0001,
+ 0xc78, 0x242d0001,
+ 0xc78, 0x232e0001,
+ 0xc78, 0x222f0001,
+ 0xc78, 0x21300001,
+ 0xc78, 0x20310001,
+ 0xc78, 0x06320001,
+ 0xc78, 0x05330001,
+ 0xc78, 0x04340001,
+ 0xc78, 0x03350001,
+ 0xc78, 0x02360001,
+ 0xc78, 0x01370001,
+ 0xc78, 0x00380001,
+ 0xc78, 0x00390001,
+ 0xc78, 0x003a0001,
+ 0xc78, 0x003b0001,
+ 0xc78, 0x003c0001,
+ 0xc78, 0x003d0001,
+ 0xc78, 0x003e0001,
+ 0xc78, 0x003f0001,
+ 0xc78, 0x7b400001,
+ 0xc78, 0x7b410001,
+ 0xc78, 0x7b420001,
+ 0xc78, 0x7b430001,
+ 0xc78, 0x7b440001,
+ 0xc78, 0x7b450001,
+ 0xc78, 0x7b460001,
+ 0xc78, 0x7b470001,
+ 0xc78, 0x7b480001,
+ 0xc78, 0x7a490001,
+ 0xc78, 0x794a0001,
+ 0xc78, 0x784b0001,
+ 0xc78, 0x774c0001,
+ 0xc78, 0x764d0001,
+ 0xc78, 0x754e0001,
+ 0xc78, 0x744f0001,
+ 0xc78, 0x73500001,
+ 0xc78, 0x72510001,
+ 0xc78, 0x71520001,
+ 0xc78, 0x70530001,
+ 0xc78, 0x6f540001,
+ 0xc78, 0x6e550001,
+ 0xc78, 0x6d560001,
+ 0xc78, 0x6c570001,
+ 0xc78, 0x6b580001,
+ 0xc78, 0x6a590001,
+ 0xc78, 0x695a0001,
+ 0xc78, 0x685b0001,
+ 0xc78, 0x675c0001,
+ 0xc78, 0x665d0001,
+ 0xc78, 0x655e0001,
+ 0xc78, 0x645f0001,
+ 0xc78, 0x63600001,
+ 0xc78, 0x62610001,
+ 0xc78, 0x61620001,
+ 0xc78, 0x60630001,
+ 0xc78, 0x46640001,
+ 0xc78, 0x45650001,
+ 0xc78, 0x44660001,
+ 0xc78, 0x43670001,
+ 0xc78, 0x42680001,
+ 0xc78, 0x41690001,
+ 0xc78, 0x406a0001,
+ 0xc78, 0x266b0001,
+ 0xc78, 0x256c0001,
+ 0xc78, 0x246d0001,
+ 0xc78, 0x236e0001,
+ 0xc78, 0x226f0001,
+ 0xc78, 0x21700001,
+ 0xc78, 0x20710001,
+ 0xc78, 0x06720001,
+ 0xc78, 0x05730001,
+ 0xc78, 0x04740001,
+ 0xc78, 0x03750001,
+ 0xc78, 0x02760001,
+ 0xc78, 0x01770001,
+ 0xc78, 0x00780001,
+ 0xc78, 0x00790001,
+ 0xc78, 0x007a0001,
+ 0xc78, 0x007b0001,
+ 0xc78, 0x007c0001,
+ 0xc78, 0x007d0001,
+ 0xc78, 0x007e0001,
+ 0xc78, 0x007f0001,
+ 0xc78, 0x3800001e,
+ 0xc78, 0x3801001e,
+ 0xc78, 0x3802001e,
+ 0xc78, 0x3803001e,
+ 0xc78, 0x3804001e,
+ 0xc78, 0x3805001e,
+ 0xc78, 0x3806001e,
+ 0xc78, 0x3807001e,
+ 0xc78, 0x3808001e,
+ 0xc78, 0x3c09001e,
+ 0xc78, 0x3e0a001e,
+ 0xc78, 0x400b001e,
+ 0xc78, 0x440c001e,
+ 0xc78, 0x480d001e,
+ 0xc78, 0x4c0e001e,
+ 0xc78, 0x500f001e,
+ 0xc78, 0x5210001e,
+ 0xc78, 0x5611001e,
+ 0xc78, 0x5a12001e,
+ 0xc78, 0x5e13001e,
+ 0xc78, 0x6014001e,
+ 0xc78, 0x6015001e,
+ 0xc78, 0x6016001e,
+ 0xc78, 0x6217001e,
+ 0xc78, 0x6218001e,
+ 0xc78, 0x6219001e,
+ 0xc78, 0x621a001e,
+ 0xc78, 0x621b001e,
+ 0xc78, 0x621c001e,
+ 0xc78, 0x621d001e,
+ 0xc78, 0x621e001e,
+ 0xc78, 0x621f001e,
+};
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/table.h b/drivers/net/wireless/rtlwifi/rtl8192cu/table.h
new file mode 100644
index 000000000000..c3d5cd826cfa
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/table.h
@@ -0,0 +1,71 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92CU_TABLE__H_
+#define __RTL92CU_TABLE__H_
+
+#include <linux/types.h>
+
+#define RTL8192CUPHY_REG_2TARRAY_LENGTH 374
+extern u32 RTL8192CUPHY_REG_2TARRAY[RTL8192CUPHY_REG_2TARRAY_LENGTH];
+#define RTL8192CUPHY_REG_1TARRAY_LENGTH 374
+extern u32 RTL8192CUPHY_REG_1TARRAY[RTL8192CUPHY_REG_1TARRAY_LENGTH];
+
+#define RTL8192CUPHY_REG_ARRAY_PGLENGTH 336
+extern u32 RTL8192CUPHY_REG_ARRAY_PG[RTL8192CUPHY_REG_ARRAY_PGLENGTH];
+
+#define RTL8192CURADIOA_2TARRAYLENGTH 282
+extern u32 RTL8192CURADIOA_2TARRAY[RTL8192CURADIOA_2TARRAYLENGTH];
+#define RTL8192CURADIOB_2TARRAYLENGTH 78
+extern u32 RTL8192CU_RADIOB_2TARRAY[RTL8192CURADIOB_2TARRAYLENGTH];
+#define RTL8192CURADIOA_1TARRAYLENGTH 282
+extern u32 RTL8192CU_RADIOA_1TARRAY[RTL8192CURADIOA_1TARRAYLENGTH];
+#define RTL8192CURADIOB_1TARRAYLENGTH 1
+extern u32 RTL8192CU_RADIOB_1TARRAY[RTL8192CURADIOB_1TARRAYLENGTH];
+
+#define RTL8192CUMAC_2T_ARRAYLENGTH 172
+extern u32 RTL8192CUMAC_2T_ARRAY[RTL8192CUMAC_2T_ARRAYLENGTH];
+
+#define RTL8192CUAGCTAB_2TARRAYLENGTH 320
+extern u32 RTL8192CUAGCTAB_2TARRAY[RTL8192CUAGCTAB_2TARRAYLENGTH];
+#define RTL8192CUAGCTAB_1TARRAYLENGTH 320
+extern u32 RTL8192CUAGCTAB_1TARRAY[RTL8192CUAGCTAB_1TARRAYLENGTH];
+
+#define RTL8192CUPHY_REG_1T_HPArrayLength 378
+extern u32 RTL8192CUPHY_REG_1T_HPArray[RTL8192CUPHY_REG_1T_HPArrayLength];
+
+#define RTL8192CUPHY_REG_Array_PG_HPLength 336
+extern u32 RTL8192CUPHY_REG_Array_PG_HP[RTL8192CUPHY_REG_Array_PG_HPLength];
+
+#define RTL8192CURadioA_1T_HPArrayLength 282
+extern u32 RTL8192CURadioA_1T_HPArray[RTL8192CURadioA_1T_HPArrayLength];
+#define RTL8192CUAGCTAB_1T_HPArrayLength 320
+extern u32 Rtl8192CUAGCTAB_1T_HPArray[RTL8192CUAGCTAB_1T_HPArrayLength];
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
new file mode 100644
index 000000000000..d0b0d43b9a6d
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -0,0 +1,687 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../usb.h"
+#include "../ps.h"
+#include "../base.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "rf.h"
+#include "dm.h"
+#include "mac.h"
+#include "trx.h"
+
+static int _ConfigVerTOutEP(struct ieee80211_hw *hw)
+{
+ u8 ep_cfg, txqsele;
+ u8 ep_nums = 0;
+
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
+
+ rtlusb->out_queue_sel = 0;
+ ep_cfg = rtl_read_byte(rtlpriv, REG_TEST_SIE_OPTIONAL);
+ ep_cfg = (ep_cfg & USB_TEST_EP_MASK) >> USB_TEST_EP_SHIFT;
+ switch (ep_cfg) {
+ case 0: /* 2 bulk OUT, 1 bulk IN */
+ case 3:
+ rtlusb->out_queue_sel = TX_SELE_HQ | TX_SELE_LQ;
+ ep_nums = 2;
+ break;
+ case 1: /* 1 bulk IN/OUT => map all endpoint to Low queue */
+ case 2: /* 1 bulk IN, 1 bulk OUT => map all endpoint to High queue */
+ txqsele = rtl_read_byte(rtlpriv, REG_TEST_USB_TXQS);
+ if (txqsele & 0x0F) /* /map all endpoint to High queue */
+ rtlusb->out_queue_sel = TX_SELE_HQ;
+ else if (txqsele&0xF0) /* map all endpoint to Low queue */
+ rtlusb->out_queue_sel = TX_SELE_LQ;
+ ep_nums = 1;
+ break;
+ default:
+ break;
+ }
+ return (rtlusb->out_ep_nums == ep_nums) ? 0 : -EINVAL;
+}
+
+static int _ConfigVerNOutEP(struct ieee80211_hw *hw)
+{
+ u8 ep_cfg;
+ u8 ep_nums = 0;
+
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
+
+ rtlusb->out_queue_sel = 0;
+ /* Normal and High queue */
+ ep_cfg = rtl_read_byte(rtlpriv, (REG_NORMAL_SIE_EP + 1));
+ if (ep_cfg & USB_NORMAL_SIE_EP_MASK) {
+ rtlusb->out_queue_sel |= TX_SELE_HQ;
+ ep_nums++;
+ }
+ if ((ep_cfg >> USB_NORMAL_SIE_EP_SHIFT) & USB_NORMAL_SIE_EP_MASK) {
+ rtlusb->out_queue_sel |= TX_SELE_NQ;
+ ep_nums++;
+ }
+ /* Low queue */
+ ep_cfg = rtl_read_byte(rtlpriv, (REG_NORMAL_SIE_EP + 2));
+ if (ep_cfg & USB_NORMAL_SIE_EP_MASK) {
+ rtlusb->out_queue_sel |= TX_SELE_LQ;
+ ep_nums++;
+ }
+ return (rtlusb->out_ep_nums == ep_nums) ? 0 : -EINVAL;
+}
+
+static void _TwoOutEpMapping(struct ieee80211_hw *hw, bool bIsChipB,
+ bool bwificfg, struct rtl_ep_map *ep_map)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (bwificfg) { /* for WMM */
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("USB Chip-B & WMM Setting.....\n"));
+ ep_map->ep_mapping[RTL_TXQ_BE] = 2;
+ ep_map->ep_mapping[RTL_TXQ_BK] = 3;
+ ep_map->ep_mapping[RTL_TXQ_VI] = 3;
+ ep_map->ep_mapping[RTL_TXQ_VO] = 2;
+ ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
+ ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
+ ep_map->ep_mapping[RTL_TXQ_HI] = 2;
+ } else { /* typical setting */
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("USB typical Setting.....\n"));
+ ep_map->ep_mapping[RTL_TXQ_BE] = 3;
+ ep_map->ep_mapping[RTL_TXQ_BK] = 3;
+ ep_map->ep_mapping[RTL_TXQ_VI] = 2;
+ ep_map->ep_mapping[RTL_TXQ_VO] = 2;
+ ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
+ ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
+ ep_map->ep_mapping[RTL_TXQ_HI] = 2;
+ }
+}
+
+static void _ThreeOutEpMapping(struct ieee80211_hw *hw, bool bwificfg,
+ struct rtl_ep_map *ep_map)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ if (bwificfg) { /* for WMM */
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("USB 3EP Setting for WMM.....\n"));
+ ep_map->ep_mapping[RTL_TXQ_BE] = 5;
+ ep_map->ep_mapping[RTL_TXQ_BK] = 3;
+ ep_map->ep_mapping[RTL_TXQ_VI] = 3;
+ ep_map->ep_mapping[RTL_TXQ_VO] = 2;
+ ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
+ ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
+ ep_map->ep_mapping[RTL_TXQ_HI] = 2;
+ } else { /* typical setting */
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("USB 3EP Setting for typical.....\n"));
+ ep_map->ep_mapping[RTL_TXQ_BE] = 5;
+ ep_map->ep_mapping[RTL_TXQ_BK] = 5;
+ ep_map->ep_mapping[RTL_TXQ_VI] = 3;
+ ep_map->ep_mapping[RTL_TXQ_VO] = 2;
+ ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
+ ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
+ ep_map->ep_mapping[RTL_TXQ_HI] = 2;
+ }
+}
+
+static void _OneOutEpMapping(struct ieee80211_hw *hw, struct rtl_ep_map *ep_map)
+{
+ ep_map->ep_mapping[RTL_TXQ_BE] = 2;
+ ep_map->ep_mapping[RTL_TXQ_BK] = 2;
+ ep_map->ep_mapping[RTL_TXQ_VI] = 2;
+ ep_map->ep_mapping[RTL_TXQ_VO] = 2;
+ ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
+ ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
+ ep_map->ep_mapping[RTL_TXQ_HI] = 2;
+}
+static int _out_ep_mapping(struct ieee80211_hw *hw)
+{
+ int err = 0;
+ bool bIsChipN, bwificfg = false;
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
+ struct rtl_ep_map *ep_map = &(rtlusb->ep_map);
+
+ bIsChipN = IS_NORMAL_CHIP(rtlhal->version);
+ switch (rtlusb->out_ep_nums) {
+ case 2:
+ _TwoOutEpMapping(hw, bIsChipN, bwificfg, ep_map);
+ break;
+ case 3:
+ /* Test chip doesn't support three out EPs. */
+ if (!bIsChipN) {
+ err = -EINVAL;
+ goto err_out;
+ }
+ _ThreeOutEpMapping(hw, bIsChipN, ep_map);
+ break;
+ case 1:
+ _OneOutEpMapping(hw, ep_map);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+err_out:
+ return err;
+
+}
+/* endpoint mapping */
+int rtl8192cu_endpoint_mapping(struct ieee80211_hw *hw)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ int error = 0;
+ if (likely(IS_NORMAL_CHIP(rtlhal->version)))
+ error = _ConfigVerNOutEP(hw);
+ else
+ error = _ConfigVerTOutEP(hw);
+ if (error)
+ goto err_out;
+ error = _out_ep_mapping(hw);
+ if (error)
+ goto err_out;
+err_out:
+ return error;
+}
+
+u16 rtl8192cu_mq_to_hwq(__le16 fc, u16 mac80211_queue_index)
+{
+ u16 hw_queue_index;
+
+ if (unlikely(ieee80211_is_beacon(fc))) {
+ hw_queue_index = RTL_TXQ_BCN;
+ goto out;
+ }
+ if (ieee80211_is_mgmt(fc)) {
+ hw_queue_index = RTL_TXQ_MGT;
+ goto out;
+ }
+ switch (mac80211_queue_index) {
+ case 0:
+ hw_queue_index = RTL_TXQ_VO;
+ break;
+ case 1:
+ hw_queue_index = RTL_TXQ_VI;
+ break;
+ case 2:
+ hw_queue_index = RTL_TXQ_BE;
+ break;
+ case 3:
+ hw_queue_index = RTL_TXQ_BK;
+ break;
+ default:
+ hw_queue_index = RTL_TXQ_BE;
+ RT_ASSERT(false, ("QSLT_BE queue, skb_queue:%d\n",
+ mac80211_queue_index));
+ break;
+ }
+out:
+ return hw_queue_index;
+}
+
+static enum rtl_desc_qsel _rtl8192cu_mq_to_descq(struct ieee80211_hw *hw,
+ __le16 fc, u16 mac80211_queue_index)
+{
+ enum rtl_desc_qsel qsel;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (unlikely(ieee80211_is_beacon(fc))) {
+ qsel = QSLT_BEACON;
+ goto out;
+ }
+ if (ieee80211_is_mgmt(fc)) {
+ qsel = QSLT_MGNT;
+ goto out;
+ }
+ switch (mac80211_queue_index) {
+ case 0: /* VO */
+ qsel = QSLT_VO;
+ RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
+ ("VO queue, set qsel = 0x%x\n", QSLT_VO));
+ break;
+ case 1: /* VI */
+ qsel = QSLT_VI;
+ RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
+ ("VI queue, set qsel = 0x%x\n", QSLT_VI));
+ break;
+ case 3: /* BK */
+ qsel = QSLT_BK;
+ RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
+ ("BK queue, set qsel = 0x%x\n", QSLT_BK));
+ break;
+ case 2: /* BE */
+ default:
+ qsel = QSLT_BE;
+ RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
+ ("BE queue, set qsel = 0x%x\n", QSLT_BE));
+ break;
+ }
+out:
+ return qsel;
+}
+
+/* =============================================================== */
+
+/*----------------------------------------------------------------------
+ *
+ * Rx handler
+ *
+ *---------------------------------------------------------------------- */
+bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
+ struct rtl_stats *stats,
+ struct ieee80211_rx_status *rx_status,
+ u8 *p_desc, struct sk_buff *skb)
+{
+ struct rx_fwinfo_92c *p_drvinfo;
+ struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc;
+ u32 phystatus = GET_RX_DESC_PHY_STATUS(pdesc);
+
+ stats->length = (u16) GET_RX_DESC_PKT_LEN(pdesc);
+ stats->rx_drvinfo_size = (u8)GET_RX_DESC_DRVINFO_SIZE(pdesc) *
+ RX_DRV_INFO_SIZE_UNIT;
+ stats->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03);
+ stats->icv = (u16) GET_RX_DESC_ICV(pdesc);
+ stats->crc = (u16) GET_RX_DESC_CRC32(pdesc);
+ stats->hwerror = (stats->crc | stats->icv);
+ stats->decrypted = !GET_RX_DESC_SWDEC(pdesc);
+ stats->rate = (u8) GET_RX_DESC_RX_MCS(pdesc);
+ stats->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
+ stats->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
+ stats->isampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1)
+ && (GET_RX_DESC_FAGGR(pdesc) == 1));
+ stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
+ stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
+ rx_status->freq = hw->conf.channel->center_freq;
+ rx_status->band = hw->conf.channel->band;
+ if (GET_RX_DESC_CRC32(pdesc))
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ if (!GET_RX_DESC_SWDEC(pdesc))
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ if (GET_RX_DESC_BW(pdesc))
+ rx_status->flag |= RX_FLAG_40MHZ;
+ if (GET_RX_DESC_RX_HT(pdesc))
+ rx_status->flag |= RX_FLAG_HT;
+ rx_status->flag |= RX_FLAG_MACTIME_MPDU;
+ if (stats->decrypted)
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ rx_status->rate_idx = _rtl92c_rate_mapping(hw,
+ (bool)GET_RX_DESC_RX_HT(pdesc),
+ (u8)GET_RX_DESC_RX_MCS(pdesc),
+ (bool)GET_RX_DESC_PAGGR(pdesc));
+ rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
+ if (phystatus == true) {
+ p_drvinfo = (struct rx_fwinfo_92c *)(pdesc + RTL_RX_DESC_SIZE);
+ rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc,
+ p_drvinfo);
+ }
+ /*rx_status->qual = stats->signal; */
+ rx_status->signal = stats->rssi + 10;
+ /*rx_status->noise = -stats->noise; */
+ return true;
+}
+
+#define RTL_RX_DRV_INFO_UNIT 8
+
+static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct ieee80211_rx_status *rx_status =
+ (struct ieee80211_rx_status *)IEEE80211_SKB_RXCB(skb);
+ u32 skb_len, pkt_len, drvinfo_len;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 *rxdesc;
+ struct rtl_stats stats = {
+ .signal = 0,
+ .noise = -98,
+ .rate = 0,
+ };
+ struct rx_fwinfo_92c *p_drvinfo;
+ bool bv;
+ __le16 fc;
+ struct ieee80211_hdr *hdr;
+
+ memset(rx_status, 0, sizeof(rx_status));
+ rxdesc = skb->data;
+ skb_len = skb->len;
+ drvinfo_len = (GET_RX_DESC_DRVINFO_SIZE(rxdesc) * RTL_RX_DRV_INFO_UNIT);
+ pkt_len = GET_RX_DESC_PKT_LEN(rxdesc);
+ /* TODO: Error recovery. drop this skb or something. */
+ WARN_ON(skb_len < (pkt_len + RTL_RX_DESC_SIZE + drvinfo_len));
+ stats.length = (u16) GET_RX_DESC_PKT_LEN(rxdesc);
+ stats.rx_drvinfo_size = (u8)GET_RX_DESC_DRVINFO_SIZE(rxdesc) *
+ RX_DRV_INFO_SIZE_UNIT;
+ stats.rx_bufshift = (u8) (GET_RX_DESC_SHIFT(rxdesc) & 0x03);
+ stats.icv = (u16) GET_RX_DESC_ICV(rxdesc);
+ stats.crc = (u16) GET_RX_DESC_CRC32(rxdesc);
+ stats.hwerror = (stats.crc | stats.icv);
+ stats.decrypted = !GET_RX_DESC_SWDEC(rxdesc);
+ stats.rate = (u8) GET_RX_DESC_RX_MCS(rxdesc);
+ stats.shortpreamble = (u16) GET_RX_DESC_SPLCP(rxdesc);
+ stats.isampdu = (bool) ((GET_RX_DESC_PAGGR(rxdesc) == 1)
+ && (GET_RX_DESC_FAGGR(rxdesc) == 1));
+ stats.timestamp_low = GET_RX_DESC_TSFL(rxdesc);
+ stats.rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(rxdesc);
+ /* TODO: is center_freq changed when doing scan? */
+ /* TODO: Shall we add protection or just skip those two step? */
+ rx_status->freq = hw->conf.channel->center_freq;
+ rx_status->band = hw->conf.channel->band;
+ if (GET_RX_DESC_CRC32(rxdesc))
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ if (!GET_RX_DESC_SWDEC(rxdesc))
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ if (GET_RX_DESC_BW(rxdesc))
+ rx_status->flag |= RX_FLAG_40MHZ;
+ if (GET_RX_DESC_RX_HT(rxdesc))
+ rx_status->flag |= RX_FLAG_HT;
+ /* Data rate */
+ rx_status->rate_idx = _rtl92c_rate_mapping(hw,
+ (bool)GET_RX_DESC_RX_HT(rxdesc),
+ (u8)GET_RX_DESC_RX_MCS(rxdesc),
+ (bool)GET_RX_DESC_PAGGR(rxdesc)
+ );
+ /* There is a phy status after this rx descriptor. */
+ if (GET_RX_DESC_PHY_STATUS(rxdesc)) {
+ p_drvinfo = (struct rx_fwinfo_92c *)(rxdesc + RTL_RX_DESC_SIZE);
+ rtl92c_translate_rx_signal_stuff(hw, skb, &stats,
+ (struct rx_desc_92c *)rxdesc, p_drvinfo);
+ }
+ skb_pull(skb, (drvinfo_len + RTL_RX_DESC_SIZE));
+ hdr = (struct ieee80211_hdr *)(skb->data);
+ fc = hdr->frame_control;
+ bv = ieee80211_is_probe_resp(fc);
+ if (bv)
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("Got probe response frame.\n"));
+ if (ieee80211_is_beacon(fc))
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("Got beacon frame.\n"));
+ if (ieee80211_is_data(fc))
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("Got data frame.\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("Fram: fc = 0x%X addr1 = 0x%02X:0x%02X:0x%02X:0x%02X:0x%02X:"
+ "0x%02X\n", fc, (u32)hdr->addr1[0], (u32)hdr->addr1[1],
+ (u32)hdr->addr1[2], (u32)hdr->addr1[3], (u32)hdr->addr1[4],
+ (u32)hdr->addr1[5]));
+ memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
+ ieee80211_rx_irqsafe(hw, skb);
+}
+
+void rtl8192cu_rx_hdl(struct ieee80211_hw *hw, struct sk_buff * skb)
+{
+ _rtl_rx_process(hw, skb);
+}
+
+void rtl8192c_rx_segregate_hdl(
+ struct ieee80211_hw *hw,
+ struct sk_buff *skb,
+ struct sk_buff_head *skb_list)
+{
+}
+
+/*----------------------------------------------------------------------
+ *
+ * Tx handler
+ *
+ *---------------------------------------------------------------------- */
+void rtl8192c_tx_cleanup(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+}
+
+int rtl8192c_tx_post_hdl(struct ieee80211_hw *hw, struct urb *urb,
+ struct sk_buff *skb)
+{
+ return 0;
+}
+
+struct sk_buff *rtl8192c_tx_aggregate_hdl(struct ieee80211_hw *hw,
+ struct sk_buff_head *list)
+{
+ return skb_dequeue(list);
+}
+
+/*======================================== trx ===============================*/
+
+static void _rtl_fill_usb_tx_desc(u8 *txdesc)
+{
+ SET_TX_DESC_OWN(txdesc, 1);
+ SET_TX_DESC_LAST_SEG(txdesc, 1);
+ SET_TX_DESC_FIRST_SEG(txdesc, 1);
+}
+/**
+ * For HW recovery information
+ */
+static void _rtl_tx_desc_checksum(u8 *txdesc)
+{
+ u16 *ptr = (u16 *)txdesc;
+ u16 checksum = 0;
+ u32 index;
+
+ /* Clear first */
+ SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, 0);
+ for (index = 0; index < 16; index++)
+ checksum = checksum ^ (*(ptr + index));
+ SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, checksum);
+}
+
+void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr, u8 *pdesc_tx,
+ struct ieee80211_tx_info *info, struct sk_buff *skb,
+ unsigned int queue_index)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ bool defaultadapter = true;
+ struct ieee80211_sta *sta;
+ struct rtl_tcb_desc tcb_desc;
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ u16 seq_number;
+ __le16 fc = hdr->frame_control;
+ u8 rate_flag = info->control.rates[0].flags;
+ u16 pktlen = skb->len;
+ enum rtl_desc_qsel fw_qsel = _rtl8192cu_mq_to_descq(hw, fc,
+ skb_get_queue_mapping(skb));
+ u8 *txdesc;
+
+ seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
+ rtl_get_tcb_desc(hw, info, skb, &tcb_desc);
+ txdesc = (u8 *)skb_push(skb, RTL_TX_HEADER_SIZE);
+ memset(txdesc, 0, RTL_TX_HEADER_SIZE);
+ SET_TX_DESC_PKT_SIZE(txdesc, pktlen);
+ SET_TX_DESC_LINIP(txdesc, 0);
+ SET_TX_DESC_PKT_OFFSET(txdesc, RTL_DUMMY_OFFSET);
+ SET_TX_DESC_OFFSET(txdesc, RTL_TX_HEADER_SIZE);
+ SET_TX_DESC_TX_RATE(txdesc, tcb_desc.hw_rate);
+ if (tcb_desc.use_shortgi || tcb_desc.use_shortpreamble)
+ SET_TX_DESC_DATA_SHORTGI(txdesc, 1);
+ if (mac->tids[tid].agg.agg_state == RTL_AGG_ON &&
+ info->flags & IEEE80211_TX_CTL_AMPDU) {
+ SET_TX_DESC_AGG_ENABLE(txdesc, 1);
+ SET_TX_DESC_MAX_AGG_NUM(txdesc, 0x14);
+ } else {
+ SET_TX_DESC_AGG_BREAK(txdesc, 1);
+ }
+ SET_TX_DESC_SEQ(txdesc, seq_number);
+ SET_TX_DESC_RTS_ENABLE(txdesc, ((tcb_desc.rts_enable &&
+ !tcb_desc.cts_enable) ? 1 : 0));
+ SET_TX_DESC_HW_RTS_ENABLE(txdesc, ((tcb_desc.rts_enable ||
+ tcb_desc.cts_enable) ? 1 : 0));
+ SET_TX_DESC_CTS2SELF(txdesc, ((tcb_desc.cts_enable) ? 1 : 0));
+ SET_TX_DESC_RTS_STBC(txdesc, ((tcb_desc.rts_stbc) ? 1 : 0));
+ SET_TX_DESC_RTS_RATE(txdesc, tcb_desc.rts_rate);
+ SET_TX_DESC_RTS_BW(txdesc, 0);
+ SET_TX_DESC_RTS_SC(txdesc, tcb_desc.rts_sc);
+ SET_TX_DESC_RTS_SHORT(txdesc,
+ ((tcb_desc.rts_rate <= DESC92C_RATE54M) ?
+ (tcb_desc.rts_use_shortpreamble ? 1 : 0)
+ : (tcb_desc.rts_use_shortgi ? 1 : 0)));
+ if (mac->bw_40) {
+ if (tcb_desc.packet_bw) {
+ SET_TX_DESC_DATA_BW(txdesc, 1);
+ SET_TX_DESC_DATA_SC(txdesc, 3);
+ } else {
+ SET_TX_DESC_DATA_BW(txdesc, 0);
+ if (rate_flag & IEEE80211_TX_RC_DUP_DATA)
+ SET_TX_DESC_DATA_SC(txdesc,
+ mac->cur_40_prime_sc);
+ }
+ } else {
+ SET_TX_DESC_DATA_BW(txdesc, 0);
+ SET_TX_DESC_DATA_SC(txdesc, 0);
+ }
+ rcu_read_lock();
+ sta = ieee80211_find_sta(mac->vif, mac->bssid);
+ if (sta) {
+ u8 ampdu_density = sta->ht_cap.ampdu_density;
+ SET_TX_DESC_AMPDU_DENSITY(txdesc, ampdu_density);
+ }
+ rcu_read_unlock();
+ if (info->control.hw_key) {
+ struct ieee80211_key_conf *keyconf = info->control.hw_key;
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ SET_TX_DESC_SEC_TYPE(txdesc, 0x1);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ SET_TX_DESC_SEC_TYPE(txdesc, 0x3);
+ break;
+ default:
+ SET_TX_DESC_SEC_TYPE(txdesc, 0x0);
+ break;
+ }
+ }
+ SET_TX_DESC_PKT_ID(txdesc, 0);
+ SET_TX_DESC_QUEUE_SEL(txdesc, fw_qsel);
+ SET_TX_DESC_DATA_RATE_FB_LIMIT(txdesc, 0x1F);
+ SET_TX_DESC_RTS_RATE_FB_LIMIT(txdesc, 0xF);
+ SET_TX_DESC_DISABLE_FB(txdesc, 0);
+ SET_TX_DESC_USE_RATE(txdesc, tcb_desc.use_driver_rate ? 1 : 0);
+ if (ieee80211_is_data_qos(fc)) {
+ if (mac->rdg_en) {
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+ ("Enable RDG function.\n"));
+ SET_TX_DESC_RDG_ENABLE(txdesc, 1);
+ SET_TX_DESC_HTC(txdesc, 1);
+ }
+ }
+ if (rtlpriv->dm.useramask) {
+ SET_TX_DESC_RATE_ID(txdesc, tcb_desc.ratr_index);
+ SET_TX_DESC_MACID(txdesc, tcb_desc.mac_id);
+ } else {
+ SET_TX_DESC_RATE_ID(txdesc, 0xC + tcb_desc.ratr_index);
+ SET_TX_DESC_MACID(txdesc, tcb_desc.ratr_index);
+ }
+ if ((!ieee80211_is_data_qos(fc)) && ppsc->leisure_ps &&
+ ppsc->fwctrl_lps) {
+ SET_TX_DESC_HWSEQ_EN(txdesc, 1);
+ SET_TX_DESC_PKT_ID(txdesc, 8);
+ if (!defaultadapter)
+ SET_TX_DESC_QOS(txdesc, 1);
+ }
+ if (ieee80211_has_morefrags(fc))
+ SET_TX_DESC_MORE_FRAG(txdesc, 1);
+ if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
+ is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
+ SET_TX_DESC_BMC(txdesc, 1);
+ _rtl_fill_usb_tx_desc(txdesc);
+ _rtl_tx_desc_checksum(txdesc);
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, (" %s ==>\n", __func__));
+}
+
+void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc,
+ u32 buffer_len, bool bIsPsPoll)
+{
+ /* Clear all status */
+ memset(pDesc, 0, RTL_TX_HEADER_SIZE);
+ SET_TX_DESC_FIRST_SEG(pDesc, 1); /* bFirstSeg; */
+ SET_TX_DESC_LAST_SEG(pDesc, 1); /* bLastSeg; */
+ SET_TX_DESC_OFFSET(pDesc, RTL_TX_HEADER_SIZE); /* Offset = 32 */
+ SET_TX_DESC_PKT_SIZE(pDesc, buffer_len); /* Buffer size + command hdr */
+ SET_TX_DESC_QUEUE_SEL(pDesc, QSLT_MGNT); /* Fixed queue of Mgnt queue */
+ /* Set NAVUSEHDR to prevent Ps-poll AId filed to be changed to error
+ * vlaue by Hw. */
+ if (bIsPsPoll) {
+ SET_TX_DESC_NAV_USE_HDR(pDesc, 1);
+ } else {
+ SET_TX_DESC_HWSEQ_EN(pDesc, 1); /* Hw set sequence number */
+ SET_TX_DESC_PKT_ID(pDesc, 0x100); /* set bit3 to 1. */
+ }
+ SET_TX_DESC_USE_RATE(pDesc, 1); /* use data rate which is set by Sw */
+ SET_TX_DESC_OWN(pDesc, 1);
+ SET_TX_DESC_TX_RATE(pDesc, DESC92C_RATE1M);
+ _rtl_tx_desc_checksum(pDesc);
+}
+
+void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw,
+ u8 *pdesc, bool firstseg,
+ bool lastseg, struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 fw_queue = QSLT_BEACON;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
+ __le16 fc = hdr->frame_control;
+
+ memset((void *)pdesc, 0, RTL_TX_HEADER_SIZE);
+ if (firstseg)
+ SET_TX_DESC_OFFSET(pdesc, RTL_TX_HEADER_SIZE);
+ SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M);
+ SET_TX_DESC_SEQ(pdesc, 0);
+ SET_TX_DESC_LINIP(pdesc, 0);
+ SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
+ SET_TX_DESC_FIRST_SEG(pdesc, 1);
+ SET_TX_DESC_LAST_SEG(pdesc, 1);
+ SET_TX_DESC_RATE_ID(pdesc, 7);
+ SET_TX_DESC_MACID(pdesc, 0);
+ SET_TX_DESC_OWN(pdesc, 1);
+ SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len));
+ SET_TX_DESC_FIRST_SEG(pdesc, 1);
+ SET_TX_DESC_LAST_SEG(pdesc, 1);
+ SET_TX_DESC_OFFSET(pdesc, 0x20);
+ SET_TX_DESC_USE_RATE(pdesc, 1);
+ if (!ieee80211_is_data_qos(fc)) {
+ SET_TX_DESC_HWSEQ_EN(pdesc, 1);
+ SET_TX_DESC_PKT_ID(pdesc, 8);
+ }
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, "H2C Tx Cmd Content\n",
+ pdesc, RTL_TX_DESC_SIZE);
+}
+
+bool rtl92cu_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ return true;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
new file mode 100644
index 000000000000..b396d46edbb7
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
@@ -0,0 +1,430 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92CU_TRX_H__
+#define __RTL92CU_TRX_H__
+
+#define RTL92C_USB_BULK_IN_NUM 1
+#define RTL92C_NUM_RX_URBS 8
+#define RTL92C_NUM_TX_URBS 32
+
+#define RTL92C_SIZE_MAX_RX_BUFFER 15360 /* 8192 */
+#define RX_DRV_INFO_SIZE_UNIT 8
+
+enum usb_rx_agg_mode {
+ USB_RX_AGG_DISABLE,
+ USB_RX_AGG_DMA,
+ USB_RX_AGG_USB,
+ USB_RX_AGG_DMA_USB
+};
+
+#define TX_SELE_HQ BIT(0) /* High Queue */
+#define TX_SELE_LQ BIT(1) /* Low Queue */
+#define TX_SELE_NQ BIT(2) /* Normal Queue */
+
+#define RTL_USB_TX_AGG_NUM_DESC 5
+
+#define RTL_USB_RX_AGG_PAGE_NUM 4
+#define RTL_USB_RX_AGG_PAGE_TIMEOUT 3
+
+#define RTL_USB_RX_AGG_BLOCK_NUM 5
+#define RTL_USB_RX_AGG_BLOCK_TIMEOUT 3
+
+/*======================== rx status =========================================*/
+
+struct rx_drv_info_92c {
+ /*
+ * Driver info contain PHY status and other variabel size info
+ * PHY Status content as below
+ */
+
+ /* DWORD 0 */
+ u8 gain_trsw[4];
+
+ /* DWORD 1 */
+ u8 pwdb_all;
+ u8 cfosho[4];
+
+ /* DWORD 2 */
+ u8 cfotail[4];
+
+ /* DWORD 3 */
+ s8 rxevm[2];
+ s8 rxsnr[4];
+
+ /* DWORD 4 */
+ u8 pdsnr[2];
+
+ /* DWORD 5 */
+ u8 csi_current[2];
+ u8 csi_target[2];
+
+ /* DWORD 6 */
+ u8 sigevm;
+ u8 max_ex_pwr;
+ u8 ex_intf_flag:1;
+ u8 sgi_en:1;
+ u8 rxsc:2;
+ u8 reserve:4;
+} __packed;
+
+/* Define a macro that takes a le32 word, converts it to host ordering,
+ * right shifts by a specified count, creates a mask of the specified
+ * bit count, and extracts that number of bits.
+ */
+
+#define SHIFT_AND_MASK_LE(__pdesc, __shift, __bits) \
+ ((le32_to_cpu(*(((__le32 *)(__pdesc)))) >> (__shift)) & \
+ BIT_LEN_MASK_32(__bits))
+
+/* Define a macro that clears a bit field in an le32 word and
+ * sets the specified value into that bit field. The resulting
+ * value remains in le32 ordering; however, it is properly converted
+ * to host ordering for the clear and set operations before conversion
+ * back to le32.
+ */
+
+#define SET_BITS_OFFSET_LE(__pdesc, __shift, __len, __val) \
+ (*(__le32 *)(__pdesc) = \
+ (cpu_to_le32((le32_to_cpu(*((__le32 *)(__pdesc))) & \
+ (~(BIT_OFFSET_LEN_MASK_32((__shift), __len)))) | \
+ (((u32)(__val) & BIT_LEN_MASK_32(__len)) << (__shift)))));
+
+/* macros to read various fields in RX descriptor */
+
+/* DWORD 0 */
+#define GET_RX_DESC_PKT_LEN(__rxdesc) \
+ SHIFT_AND_MASK_LE((__rxdesc), 0, 14)
+#define GET_RX_DESC_CRC32(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc, 14, 1)
+#define GET_RX_DESC_ICV(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc, 15, 1)
+#define GET_RX_DESC_DRVINFO_SIZE(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc, 16, 4)
+#define GET_RX_DESC_SECURITY(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc, 20, 3)
+#define GET_RX_DESC_QOS(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc, 23, 1)
+#define GET_RX_DESC_SHIFT(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc, 24, 2)
+#define GET_RX_DESC_PHY_STATUS(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc, 26, 1)
+#define GET_RX_DESC_SWDEC(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc, 27, 1)
+#define GET_RX_DESC_LAST_SEG(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc, 28, 1)
+#define GET_RX_DESC_FIRST_SEG(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc, 29, 1)
+#define GET_RX_DESC_EOR(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc, 30, 1)
+#define GET_RX_DESC_OWN(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc, 31, 1)
+
+/* DWORD 1 */
+#define GET_RX_DESC_MACID(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+4, 0, 5)
+#define GET_RX_DESC_TID(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+4, 5, 4)
+#define GET_RX_DESC_PAGGR(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+4, 14, 1)
+#define GET_RX_DESC_FAGGR(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+4, 15, 1)
+#define GET_RX_DESC_A1_FIT(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+4, 16, 4)
+#define GET_RX_DESC_A2_FIT(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+4, 20, 4)
+#define GET_RX_DESC_PAM(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+4, 24, 1)
+#define GET_RX_DESC_PWR(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+4, 25, 1)
+#define GET_RX_DESC_MORE_DATA(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+4, 26, 1)
+#define GET_RX_DESC_MORE_FRAG(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+4, 27, 1)
+#define GET_RX_DESC_TYPE(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+4, 28, 2)
+#define GET_RX_DESC_MC(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+4, 30, 1)
+#define GET_RX_DESC_BC(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+4, 31, 1)
+
+/* DWORD 2 */
+#define GET_RX_DESC_SEQ(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+8, 0, 12)
+#define GET_RX_DESC_FRAG(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+8, 12, 4)
+#define GET_RX_DESC_USB_AGG_PKTNUM(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+8, 16, 8)
+#define GET_RX_DESC_NEXT_IND(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+8, 30, 1)
+
+/* DWORD 3 */
+#define GET_RX_DESC_RX_MCS(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+12, 0, 6)
+#define GET_RX_DESC_RX_HT(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+12, 6, 1)
+#define GET_RX_DESC_AMSDU(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+12, 7, 1)
+#define GET_RX_DESC_SPLCP(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+12, 8, 1)
+#define GET_RX_DESC_BW(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+12, 9, 1)
+#define GET_RX_DESC_HTC(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+12, 10, 1)
+#define GET_RX_DESC_TCP_CHK_RPT(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+12, 11, 1)
+#define GET_RX_DESC_IP_CHK_RPT(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+12, 12, 1)
+#define GET_RX_DESC_TCP_CHK_VALID(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+12, 13, 1)
+#define GET_RX_DESC_HWPC_ERR(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+12, 14, 1)
+#define GET_RX_DESC_HWPC_IND(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+12, 15, 1)
+#define GET_RX_DESC_IV0(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+12, 16, 16)
+
+/* DWORD 4 */
+#define GET_RX_DESC_IV1(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+16, 0, 32)
+
+/* DWORD 5 */
+#define GET_RX_DESC_TSFL(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+20, 0, 32)
+
+/*======================= tx desc ============================================*/
+
+/* macros to set various fields in TX descriptor */
+
+/* Dword 0 */
+#define SET_TX_DESC_PKT_SIZE(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc, 0, 16, __value)
+#define SET_TX_DESC_OFFSET(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc, 16, 8, __value)
+#define SET_TX_DESC_BMC(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc, 24, 1, __value)
+#define SET_TX_DESC_HTC(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc, 25, 1, __value)
+#define SET_TX_DESC_LAST_SEG(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc, 26, 1, __value)
+#define SET_TX_DESC_FIRST_SEG(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc, 27, 1, __value)
+#define SET_TX_DESC_LINIP(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc, 28, 1, __value)
+#define SET_TX_DESC_NO_ACM(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc, 29, 1, __value)
+#define SET_TX_DESC_GF(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc, 30, 1, __value)
+#define SET_TX_DESC_OWN(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc, 31, 1, __value)
+
+
+/* Dword 1 */
+#define SET_TX_DESC_MACID(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+4, 0, 5, __value)
+#define SET_TX_DESC_AGG_ENABLE(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+4, 5, 1, __value)
+#define SET_TX_DESC_AGG_BREAK(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+4, 6, 1, __value)
+#define SET_TX_DESC_RDG_ENABLE(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+4, 7, 1, __value)
+#define SET_TX_DESC_QUEUE_SEL(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+4, 8, 5, __value)
+#define SET_TX_DESC_RDG_NAV_EXT(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+4, 13, 1, __value)
+#define SET_TX_DESC_LSIG_TXOP_EN(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+4, 14, 1, __value)
+#define SET_TX_DESC_PIFS(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+4, 15, 1, __value)
+#define SET_TX_DESC_RATE_ID(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+4, 16, 4, __value)
+#define SET_TX_DESC_RA_BRSR_ID(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+4, 16, 4, __value)
+#define SET_TX_DESC_NAV_USE_HDR(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+4, 20, 1, __value)
+#define SET_TX_DESC_EN_DESC_ID(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+4, 21, 1, __value)
+#define SET_TX_DESC_SEC_TYPE(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+4, 22, 2, __value)
+#define SET_TX_DESC_PKT_OFFSET(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+4, 26, 5, __value)
+
+/* Dword 2 */
+#define SET_TX_DESC_RTS_RC(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+8, 0, 6, __value)
+#define SET_TX_DESC_DATA_RC(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+8, 6, 6, __value)
+#define SET_TX_DESC_BAR_RTY_TH(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+8, 14, 2, __value)
+#define SET_TX_DESC_MORE_FRAG(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+8, 17, 1, __value)
+#define SET_TX_DESC_RAW(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+8, 18, 1, __value)
+#define SET_TX_DESC_CCX(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+8, 19, 1, __value)
+#define SET_TX_DESC_AMPDU_DENSITY(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+8, 20, 3, __value)
+#define SET_TX_DESC_ANTSEL_A(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+8, 24, 1, __value)
+#define SET_TX_DESC_ANTSEL_B(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+8, 25, 1, __value)
+#define SET_TX_DESC_TX_ANT_CCK(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+8, 26, 2, __value)
+#define SET_TX_DESC_TX_ANTL(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+8, 28, 2, __value)
+#define SET_TX_DESC_TX_ANT_HT(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+8, 30, 2, __value)
+
+/* Dword 3 */
+#define SET_TX_DESC_NEXT_HEAP_PAGE(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+12, 0, 8, __value)
+#define SET_TX_DESC_TAIL_PAGE(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+12, 8, 8, __value)
+#define SET_TX_DESC_SEQ(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+12, 16, 12, __value)
+#define SET_TX_DESC_PKT_ID(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+12, 28, 4, __value)
+
+/* Dword 4 */
+#define SET_TX_DESC_RTS_RATE(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 0, 5, __value)
+#define SET_TX_DESC_AP_DCFE(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 5, 1, __value)
+#define SET_TX_DESC_QOS(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 6, 1, __value)
+#define SET_TX_DESC_HWSEQ_EN(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 7, 1, __value)
+#define SET_TX_DESC_USE_RATE(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 8, 1, __value)
+#define SET_TX_DESC_DISABLE_RTS_FB(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 9, 1, __value)
+#define SET_TX_DESC_DISABLE_FB(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 10, 1, __value)
+#define SET_TX_DESC_CTS2SELF(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 11, 1, __value)
+#define SET_TX_DESC_RTS_ENABLE(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 12, 1, __value)
+#define SET_TX_DESC_HW_RTS_ENABLE(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 13, 1, __value)
+#define SET_TX_DESC_WAIT_DCTS(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 18, 1, __value)
+#define SET_TX_DESC_CTS2AP_EN(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 19, 1, __value)
+#define SET_TX_DESC_DATA_SC(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 20, 2, __value)
+#define SET_TX_DESC_DATA_STBC(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 22, 2, __value)
+#define SET_TX_DESC_DATA_SHORT(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 24, 1, __value)
+#define SET_TX_DESC_DATA_BW(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 25, 1, __value)
+#define SET_TX_DESC_RTS_SHORT(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 26, 1, __value)
+#define SET_TX_DESC_RTS_BW(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 27, 1, __value)
+#define SET_TX_DESC_RTS_SC(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 28, 2, __value)
+#define SET_TX_DESC_RTS_STBC(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 30, 2, __value)
+
+/* Dword 5 */
+#define SET_TX_DESC_TX_RATE(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+20, 0, 6, __val)
+#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+20, 6, 1, __val)
+#define SET_TX_DESC_CCX_TAG(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+20, 7, 1, __val)
+#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+20, 8, 5, __value)
+#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+20, 13, 4, __value)
+#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+20, 17, 1, __value)
+#define SET_TX_DESC_DATA_RETRY_LIMIT(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+20, 18, 6, __value)
+#define SET_TX_DESC_USB_TXAGG_NUM(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+20, 24, 8, __value)
+
+/* Dword 6 */
+#define SET_TX_DESC_TXAGC_A(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+24, 0, 5, __value)
+#define SET_TX_DESC_TXAGC_B(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+24, 5, 5, __value)
+#define SET_TX_DESC_USB_MAX_LEN(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+24, 10, 1, __value)
+#define SET_TX_DESC_MAX_AGG_NUM(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+24, 11, 5, __value)
+#define SET_TX_DESC_MCSG1_MAX_LEN(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+24, 16, 4, __value)
+#define SET_TX_DESC_MCSG2_MAX_LEN(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+24, 20, 4, __value)
+#define SET_TX_DESC_MCSG3_MAX_LEN(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+24, 24, 4, __value)
+#define SET_TX_DESC_MCSG7_MAX_LEN(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+24, 28, 4, __value)
+
+/* Dword 7 */
+#define SET_TX_DESC_TX_DESC_CHECKSUM(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+28, 0, 16, __value)
+#define SET_TX_DESC_MCSG4_MAX_LEN(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+28, 16, 4, __value)
+#define SET_TX_DESC_MCSG5_MAX_LEN(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+28, 20, 4, __value)
+#define SET_TX_DESC_MCSG6_MAX_LEN(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+28, 24, 4, __value)
+#define SET_TX_DESC_MCSG15_MAX_LEN(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+28, 28, 4, __value)
+
+
+int rtl8192cu_endpoint_mapping(struct ieee80211_hw *hw);
+u16 rtl8192cu_mq_to_hwq(__le16 fc, u16 mac80211_queue_index);
+bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
+ struct rtl_stats *stats,
+ struct ieee80211_rx_status *rx_status,
+ u8 *p_desc, struct sk_buff *skb);
+void rtl8192cu_rx_hdl(struct ieee80211_hw *hw, struct sk_buff * skb);
+void rtl8192c_rx_segregate_hdl(struct ieee80211_hw *, struct sk_buff *,
+ struct sk_buff_head *);
+void rtl8192c_tx_cleanup(struct ieee80211_hw *hw, struct sk_buff *skb);
+int rtl8192c_tx_post_hdl(struct ieee80211_hw *hw, struct urb *urb,
+ struct sk_buff *skb);
+struct sk_buff *rtl8192c_tx_aggregate_hdl(struct ieee80211_hw *,
+ struct sk_buff_head *);
+void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr, u8 *pdesc_tx,
+ struct ieee80211_tx_info *info, struct sk_buff *skb,
+ unsigned int queue_index);
+void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc,
+ u32 buffer_len, bool bIsPsPoll);
+void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw,
+ u8 *pdesc, bool b_firstseg,
+ bool b_lastseg, struct sk_buff *skb);
+bool rtl92cu_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
new file mode 100644
index 000000000000..a4b2613d6a8c
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -0,0 +1,1035 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ *****************************************************************************/
+#include <linux/usb.h>
+#include "core.h"
+#include "wifi.h"
+#include "usb.h"
+#include "base.h"
+#include "ps.h"
+
+#define REALTEK_USB_VENQT_READ 0xC0
+#define REALTEK_USB_VENQT_WRITE 0x40
+#define REALTEK_USB_VENQT_CMD_REQ 0x05
+#define REALTEK_USB_VENQT_CMD_IDX 0x00
+
+#define REALTEK_USB_VENQT_MAX_BUF_SIZE 254
+
+static void usbctrl_async_callback(struct urb *urb)
+{
+ if (urb)
+ kfree(urb->context);
+}
+
+static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request,
+ u16 value, u16 index, void *pdata,
+ u16 len)
+{
+ int rc;
+ unsigned int pipe;
+ u8 reqtype;
+ struct usb_ctrlrequest *dr;
+ struct urb *urb;
+ struct rtl819x_async_write_data {
+ u8 data[REALTEK_USB_VENQT_MAX_BUF_SIZE];
+ struct usb_ctrlrequest dr;
+ } *buf;
+
+ pipe = usb_sndctrlpipe(udev, 0); /* write_out */
+ reqtype = REALTEK_USB_VENQT_WRITE;
+
+ buf = kmalloc(sizeof(*buf), GFP_ATOMIC);
+ if (!buf)
+ return -ENOMEM;
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ kfree(buf);
+ return -ENOMEM;
+ }
+
+ dr = &buf->dr;
+
+ dr->bRequestType = reqtype;
+ dr->bRequest = request;
+ dr->wValue = cpu_to_le16(value);
+ dr->wIndex = cpu_to_le16(index);
+ dr->wLength = cpu_to_le16(len);
+ memcpy(buf, pdata, len);
+ usb_fill_control_urb(urb, udev, pipe,
+ (unsigned char *)dr, buf, len,
+ usbctrl_async_callback, buf);
+ rc = usb_submit_urb(urb, GFP_ATOMIC);
+ if (rc < 0)
+ kfree(buf);
+ usb_free_urb(urb);
+ return rc;
+}
+
+static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
+ u16 value, u16 index, void *pdata,
+ u16 len)
+{
+ unsigned int pipe;
+ int status;
+ u8 reqtype;
+
+ pipe = usb_rcvctrlpipe(udev, 0); /* read_in */
+ reqtype = REALTEK_USB_VENQT_READ;
+
+ status = usb_control_msg(udev, pipe, request, reqtype, value, index,
+ pdata, len, 0); /* max. timeout */
+
+ if (status < 0)
+ printk(KERN_ERR "reg 0x%x, usbctrl_vendorreq TimeOut! "
+ "status:0x%x value=0x%x\n", value, status,
+ *(u32 *)pdata);
+ return status;
+}
+
+static u32 _usb_read_sync(struct usb_device *udev, u32 addr, u16 len)
+{
+ u8 request;
+ u16 wvalue;
+ u16 index;
+ u32 *data;
+ u32 ret;
+
+ data = kmalloc(sizeof(u32), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ request = REALTEK_USB_VENQT_CMD_REQ;
+ index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
+
+ wvalue = (u16)addr;
+ _usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len);
+ ret = *data;
+ kfree(data);
+ return ret;
+}
+
+static u8 _usb_read8_sync(struct rtl_priv *rtlpriv, u32 addr)
+{
+ struct device *dev = rtlpriv->io.dev;
+
+ return (u8)_usb_read_sync(to_usb_device(dev), addr, 1);
+}
+
+static u16 _usb_read16_sync(struct rtl_priv *rtlpriv, u32 addr)
+{
+ struct device *dev = rtlpriv->io.dev;
+
+ return (u16)_usb_read_sync(to_usb_device(dev), addr, 2);
+}
+
+static u32 _usb_read32_sync(struct rtl_priv *rtlpriv, u32 addr)
+{
+ struct device *dev = rtlpriv->io.dev;
+
+ return _usb_read_sync(to_usb_device(dev), addr, 4);
+}
+
+static void _usb_write_async(struct usb_device *udev, u32 addr, u32 val,
+ u16 len)
+{
+ u8 request;
+ u16 wvalue;
+ u16 index;
+ u32 data;
+
+ request = REALTEK_USB_VENQT_CMD_REQ;
+ index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
+ wvalue = (u16)(addr&0x0000ffff);
+ data = val;
+ _usbctrl_vendorreq_async_write(udev, request, wvalue, index, &data,
+ len);
+}
+
+static void _usb_write8_async(struct rtl_priv *rtlpriv, u32 addr, u8 val)
+{
+ struct device *dev = rtlpriv->io.dev;
+
+ _usb_write_async(to_usb_device(dev), addr, val, 1);
+}
+
+static void _usb_write16_async(struct rtl_priv *rtlpriv, u32 addr, u16 val)
+{
+ struct device *dev = rtlpriv->io.dev;
+
+ _usb_write_async(to_usb_device(dev), addr, val, 2);
+}
+
+static void _usb_write32_async(struct rtl_priv *rtlpriv, u32 addr, u32 val)
+{
+ struct device *dev = rtlpriv->io.dev;
+
+ _usb_write_async(to_usb_device(dev), addr, val, 4);
+}
+
+static int _usb_nbytes_read_write(struct usb_device *udev, bool read, u32 addr,
+ u16 len, u8 *pdata)
+{
+ int status;
+ u8 request;
+ u16 wvalue;
+ u16 index;
+
+ request = REALTEK_USB_VENQT_CMD_REQ;
+ index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
+ wvalue = (u16)addr;
+ if (read)
+ status = _usbctrl_vendorreq_sync_read(udev, request, wvalue,
+ index, pdata, len);
+ else
+ status = _usbctrl_vendorreq_async_write(udev, request, wvalue,
+ index, pdata, len);
+ return status;
+}
+
+static int _usb_readN_sync(struct rtl_priv *rtlpriv, u32 addr, u16 len,
+ u8 *pdata)
+{
+ struct device *dev = rtlpriv->io.dev;
+
+ return _usb_nbytes_read_write(to_usb_device(dev), true, addr, len,
+ pdata);
+}
+
+static int _usb_writeN_async(struct rtl_priv *rtlpriv, u32 addr, u16 len,
+ u8 *pdata)
+{
+ struct device *dev = rtlpriv->io.dev;
+
+ return _usb_nbytes_read_write(to_usb_device(dev), false, addr, len,
+ pdata);
+}
+
+static void _rtl_usb_io_handler_init(struct device *dev,
+ struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->io.dev = dev;
+ mutex_init(&rtlpriv->io.bb_mutex);
+ rtlpriv->io.write8_async = _usb_write8_async;
+ rtlpriv->io.write16_async = _usb_write16_async;
+ rtlpriv->io.write32_async = _usb_write32_async;
+ rtlpriv->io.writeN_async = _usb_writeN_async;
+ rtlpriv->io.read8_sync = _usb_read8_sync;
+ rtlpriv->io.read16_sync = _usb_read16_sync;
+ rtlpriv->io.read32_sync = _usb_read32_sync;
+ rtlpriv->io.readN_sync = _usb_readN_sync;
+}
+
+static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ mutex_destroy(&rtlpriv->io.bb_mutex);
+}
+
+/**
+ *
+ * Default aggregation handler. Do nothing and just return the oldest skb.
+ */
+static struct sk_buff *_none_usb_tx_aggregate_hdl(struct ieee80211_hw *hw,
+ struct sk_buff_head *list)
+{
+ return skb_dequeue(list);
+}
+
+#define IS_HIGH_SPEED_USB(udev) \
+ ((USB_SPEED_HIGH == (udev)->speed) ? true : false)
+
+static int _rtl_usb_init_tx(struct ieee80211_hw *hw)
+{
+ u32 i;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+ rtlusb->max_bulk_out_size = IS_HIGH_SPEED_USB(rtlusb->udev)
+ ? USB_HIGH_SPEED_BULK_SIZE
+ : USB_FULL_SPEED_BULK_SIZE;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("USB Max Bulk-out Size=%d\n",
+ rtlusb->max_bulk_out_size));
+
+ for (i = 0; i < __RTL_TXQ_NUM; i++) {
+ u32 ep_num = rtlusb->ep_map.ep_mapping[i];
+ if (!ep_num) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("Invalid endpoint map setting!\n"));
+ return -EINVAL;
+ }
+ }
+
+ rtlusb->usb_tx_post_hdl =
+ rtlpriv->cfg->usb_interface_cfg->usb_tx_post_hdl;
+ rtlusb->usb_tx_cleanup =
+ rtlpriv->cfg->usb_interface_cfg->usb_tx_cleanup;
+ rtlusb->usb_tx_aggregate_hdl =
+ (rtlpriv->cfg->usb_interface_cfg->usb_tx_aggregate_hdl)
+ ? rtlpriv->cfg->usb_interface_cfg->usb_tx_aggregate_hdl
+ : &_none_usb_tx_aggregate_hdl;
+
+ init_usb_anchor(&rtlusb->tx_submitted);
+ for (i = 0; i < RTL_USB_MAX_EP_NUM; i++) {
+ skb_queue_head_init(&rtlusb->tx_skb_queue[i]);
+ init_usb_anchor(&rtlusb->tx_pending[i]);
+ }
+ return 0;
+}
+
+static int _rtl_usb_init_rx(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
+
+ rtlusb->rx_max_size = rtlpriv->cfg->usb_interface_cfg->rx_max_size;
+ rtlusb->rx_urb_num = rtlpriv->cfg->usb_interface_cfg->rx_urb_num;
+ rtlusb->in_ep = rtlpriv->cfg->usb_interface_cfg->in_ep_num;
+ rtlusb->usb_rx_hdl = rtlpriv->cfg->usb_interface_cfg->usb_rx_hdl;
+ rtlusb->usb_rx_segregate_hdl =
+ rtlpriv->cfg->usb_interface_cfg->usb_rx_segregate_hdl;
+
+ printk(KERN_INFO "rtl8192cu: rx_max_size %d, rx_urb_num %d, in_ep %d\n",
+ rtlusb->rx_max_size, rtlusb->rx_urb_num, rtlusb->in_ep);
+ init_usb_anchor(&rtlusb->rx_submitted);
+ return 0;
+}
+
+static int _rtl_usb_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
+ int err;
+ u8 epidx;
+ struct usb_interface *usb_intf = rtlusb->intf;
+ u8 epnums = usb_intf->cur_altsetting->desc.bNumEndpoints;
+
+ rtlusb->out_ep_nums = rtlusb->in_ep_nums = 0;
+ for (epidx = 0; epidx < epnums; epidx++) {
+ struct usb_endpoint_descriptor *pep_desc;
+ pep_desc = &usb_intf->cur_altsetting->endpoint[epidx].desc;
+
+ if (usb_endpoint_dir_in(pep_desc))
+ rtlusb->in_ep_nums++;
+ else if (usb_endpoint_dir_out(pep_desc))
+ rtlusb->out_ep_nums++;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("USB EP(0x%02x), MaxPacketSize=%d ,Interval=%d.\n",
+ pep_desc->bEndpointAddress, pep_desc->wMaxPacketSize,
+ pep_desc->bInterval));
+ }
+ if (rtlusb->in_ep_nums < rtlpriv->cfg->usb_interface_cfg->in_ep_num)
+ return -EINVAL ;
+
+ /* usb endpoint mapping */
+ err = rtlpriv->cfg->usb_interface_cfg->usb_endpoint_mapping(hw);
+ rtlusb->usb_mq_to_hwq = rtlpriv->cfg->usb_interface_cfg->usb_mq_to_hwq;
+ _rtl_usb_init_tx(hw);
+ _rtl_usb_init_rx(hw);
+ return err;
+}
+
+static int _rtl_usb_init_sw(struct ieee80211_hw *hw)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+ rtlhal->hw = hw;
+ ppsc->inactiveps = false;
+ ppsc->leisure_ps = false;
+ ppsc->fwctrl_lps = false;
+ ppsc->reg_fwctrl_lps = 3;
+ ppsc->reg_max_lps_awakeintvl = 5;
+ ppsc->fwctrl_psmode = FW_PS_DTIM_MODE;
+
+ /* IBSS */
+ mac->beacon_interval = 100;
+
+ /* AMPDU */
+ mac->min_space_cfg = 0;
+ mac->max_mss_density = 0;
+
+ /* set sane AMPDU defaults */
+ mac->current_ampdu_density = 7;
+ mac->current_ampdu_factor = 3;
+
+ /* QOS */
+ rtlusb->acm_method = eAcmWay2_SW;
+
+ /* IRQ */
+ /* HIMR - turn all on */
+ rtlusb->irq_mask[0] = 0xFFFFFFFF;
+ /* HIMR_EX - turn all on */
+ rtlusb->irq_mask[1] = 0xFFFFFFFF;
+ rtlusb->disableHWSM = true;
+ return 0;
+}
+
+#define __RADIO_TAP_SIZE_RSV 32
+
+static void _rtl_rx_completed(struct urb *urb);
+
+static struct sk_buff *_rtl_prep_rx_urb(struct ieee80211_hw *hw,
+ struct rtl_usb *rtlusb,
+ struct urb *urb,
+ gfp_t gfp_mask)
+{
+ struct sk_buff *skb;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ skb = __dev_alloc_skb((rtlusb->rx_max_size + __RADIO_TAP_SIZE_RSV),
+ gfp_mask);
+ if (!skb) {
+ RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
+ ("Failed to __dev_alloc_skb!!\n"))
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* reserve some space for mac80211's radiotap */
+ skb_reserve(skb, __RADIO_TAP_SIZE_RSV);
+ usb_fill_bulk_urb(urb, rtlusb->udev,
+ usb_rcvbulkpipe(rtlusb->udev, rtlusb->in_ep),
+ skb->data, min(skb_tailroom(skb),
+ (int)rtlusb->rx_max_size),
+ _rtl_rx_completed, skb);
+
+ _rtl_install_trx_info(rtlusb, skb, rtlusb->in_ep);
+ return skb;
+}
+
+#undef __RADIO_TAP_SIZE_RSV
+
+static void _rtl_usb_rx_process_agg(struct ieee80211_hw *hw,
+ struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 *rxdesc = skb->data;
+ struct ieee80211_hdr *hdr;
+ bool unicast = false;
+ __le16 fc;
+ struct ieee80211_rx_status rx_status = {0};
+ struct rtl_stats stats = {
+ .signal = 0,
+ .noise = -98,
+ .rate = 0,
+ };
+
+ skb_pull(skb, RTL_RX_DESC_SIZE);
+ rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb);
+ skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift));
+ hdr = (struct ieee80211_hdr *)(skb->data);
+ fc = hdr->frame_control;
+ if (!stats.crc) {
+ memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
+
+ if (is_broadcast_ether_addr(hdr->addr1)) {
+ /*TODO*/;
+ } else if (is_multicast_ether_addr(hdr->addr1)) {
+ /*TODO*/
+ } else {
+ unicast = true;
+ rtlpriv->stats.rxbytesunicast += skb->len;
+ }
+
+ rtl_is_special_data(hw, skb, false);
+
+ if (ieee80211_is_data(fc)) {
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
+
+ if (unicast)
+ rtlpriv->link_info.num_rx_inperiod++;
+ }
+ }
+}
+
+static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw,
+ struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 *rxdesc = skb->data;
+ struct ieee80211_hdr *hdr;
+ bool unicast = false;
+ __le16 fc;
+ struct ieee80211_rx_status rx_status = {0};
+ struct rtl_stats stats = {
+ .signal = 0,
+ .noise = -98,
+ .rate = 0,
+ };
+
+ skb_pull(skb, RTL_RX_DESC_SIZE);
+ rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb);
+ skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift));
+ hdr = (struct ieee80211_hdr *)(skb->data);
+ fc = hdr->frame_control;
+ if (!stats.crc) {
+ memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
+
+ if (is_broadcast_ether_addr(hdr->addr1)) {
+ /*TODO*/;
+ } else if (is_multicast_ether_addr(hdr->addr1)) {
+ /*TODO*/
+ } else {
+ unicast = true;
+ rtlpriv->stats.rxbytesunicast += skb->len;
+ }
+
+ rtl_is_special_data(hw, skb, false);
+
+ if (ieee80211_is_data(fc)) {
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
+
+ if (unicast)
+ rtlpriv->link_info.num_rx_inperiod++;
+ }
+ if (likely(rtl_action_proc(hw, skb, false))) {
+ struct sk_buff *uskb = NULL;
+ u8 *pdata;
+
+ uskb = dev_alloc_skb(skb->len + 128);
+ memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status,
+ sizeof(rx_status));
+ pdata = (u8 *)skb_put(uskb, skb->len);
+ memcpy(pdata, skb->data, skb->len);
+ dev_kfree_skb_any(skb);
+ ieee80211_rx_irqsafe(hw, uskb);
+ } else {
+ dev_kfree_skb_any(skb);
+ }
+ }
+}
+
+static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct sk_buff *_skb;
+ struct sk_buff_head rx_queue;
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+ skb_queue_head_init(&rx_queue);
+ if (rtlusb->usb_rx_segregate_hdl)
+ rtlusb->usb_rx_segregate_hdl(hw, skb, &rx_queue);
+ WARN_ON(skb_queue_empty(&rx_queue));
+ while (!skb_queue_empty(&rx_queue)) {
+ _skb = skb_dequeue(&rx_queue);
+ _rtl_usb_rx_process_agg(hw, skb);
+ ieee80211_rx_irqsafe(hw, skb);
+ }
+}
+
+static void _rtl_rx_completed(struct urb *_urb)
+{
+ struct sk_buff *skb = (struct sk_buff *)_urb->context;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct rtl_usb *rtlusb = (struct rtl_usb *)info->rate_driver_data[0];
+ struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int err = 0;
+
+ if (unlikely(IS_USB_STOP(rtlusb)))
+ goto free;
+
+ if (likely(0 == _urb->status)) {
+ /* If this code were moved to work queue, would CPU
+ * utilization be improved? NOTE: We shall allocate another skb
+ * and reuse the original one.
+ */
+ skb_put(skb, _urb->actual_length);
+
+ if (likely(!rtlusb->usb_rx_segregate_hdl)) {
+ struct sk_buff *_skb;
+ _rtl_usb_rx_process_noagg(hw, skb);
+ _skb = _rtl_prep_rx_urb(hw, rtlusb, _urb, GFP_ATOMIC);
+ if (IS_ERR(_skb)) {
+ err = PTR_ERR(_skb);
+ RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
+ ("Can't allocate skb for bulk IN!\n"));
+ return;
+ }
+ skb = _skb;
+ } else{
+ /* TO DO */
+ _rtl_rx_pre_process(hw, skb);
+ printk(KERN_ERR "rtlwifi: rx agg not supported\n");
+ }
+ goto resubmit;
+ }
+
+ switch (_urb->status) {
+ /* disconnect */
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ goto free;
+ default:
+ break;
+ }
+
+resubmit:
+ skb_reset_tail_pointer(skb);
+ skb_trim(skb, 0);
+
+ usb_anchor_urb(_urb, &rtlusb->rx_submitted);
+ err = usb_submit_urb(_urb, GFP_ATOMIC);
+ if (unlikely(err)) {
+ usb_unanchor_urb(_urb);
+ goto free;
+ }
+ return;
+
+free:
+ dev_kfree_skb_irq(skb);
+}
+
+static int _rtl_usb_receive(struct ieee80211_hw *hw)
+{
+ struct urb *urb;
+ struct sk_buff *skb;
+ int err;
+ int i;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+ WARN_ON(0 == rtlusb->rx_urb_num);
+ /* 1600 == 1514 + max WLAN header + rtk info */
+ WARN_ON(rtlusb->rx_max_size < 1600);
+
+ for (i = 0; i < rtlusb->rx_urb_num; i++) {
+ err = -ENOMEM;
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
+ ("Failed to alloc URB!!\n"))
+ goto err_out;
+ }
+
+ skb = _rtl_prep_rx_urb(hw, rtlusb, urb, GFP_KERNEL);
+ if (IS_ERR(skb)) {
+ RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
+ ("Failed to prep_rx_urb!!\n"))
+ err = PTR_ERR(skb);
+ goto err_out;
+ }
+
+ usb_anchor_urb(urb, &rtlusb->rx_submitted);
+ err = usb_submit_urb(urb, GFP_KERNEL);
+ if (err)
+ goto err_out;
+ usb_free_urb(urb);
+ }
+ return 0;
+
+err_out:
+ usb_kill_anchored_urbs(&rtlusb->rx_submitted);
+ return err;
+}
+
+static int rtl_usb_start(struct ieee80211_hw *hw)
+{
+ int err;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+ err = rtlpriv->cfg->ops->hw_init(hw);
+ rtl_init_rx_config(hw);
+
+ /* Enable software */
+ SET_USB_START(rtlusb);
+ /* should after adapter start and interrupt enable. */
+ set_hal_start(rtlhal);
+
+ /* Start bulk IN */
+ _rtl_usb_receive(hw);
+
+ return err;
+}
+/**
+ *
+ *
+ */
+
+/*======================= tx =========================================*/
+static void rtl_usb_cleanup(struct ieee80211_hw *hw)
+{
+ u32 i;
+ struct sk_buff *_skb;
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+ struct ieee80211_tx_info *txinfo;
+
+ SET_USB_STOP(rtlusb);
+
+ /* clean up rx stuff. */
+ usb_kill_anchored_urbs(&rtlusb->rx_submitted);
+
+ /* clean up tx stuff */
+ for (i = 0; i < RTL_USB_MAX_EP_NUM; i++) {
+ while ((_skb = skb_dequeue(&rtlusb->tx_skb_queue[i]))) {
+ rtlusb->usb_tx_cleanup(hw, _skb);
+ txinfo = IEEE80211_SKB_CB(_skb);
+ ieee80211_tx_info_clear_status(txinfo);
+ txinfo->flags |= IEEE80211_TX_STAT_ACK;
+ ieee80211_tx_status_irqsafe(hw, _skb);
+ }
+ usb_kill_anchored_urbs(&rtlusb->tx_pending[i]);
+ }
+ usb_kill_anchored_urbs(&rtlusb->tx_submitted);
+}
+
+/**
+ *
+ * We may add some struct into struct rtl_usb later. Do deinit here.
+ *
+ */
+static void rtl_usb_deinit(struct ieee80211_hw *hw)
+{
+ rtl_usb_cleanup(hw);
+}
+
+static void rtl_usb_stop(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+ /* should after adapter start and interrupt enable. */
+ set_hal_stop(rtlhal);
+ /* Enable software */
+ SET_USB_STOP(rtlusb);
+ rtl_usb_deinit(hw);
+ rtlpriv->cfg->ops->hw_disable(hw);
+}
+
+static void _rtl_submit_tx_urb(struct ieee80211_hw *hw, struct urb *_urb)
+{
+ int err;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+ usb_anchor_urb(_urb, &rtlusb->tx_submitted);
+ err = usb_submit_urb(_urb, GFP_ATOMIC);
+ if (err < 0) {
+ struct sk_buff *skb;
+
+ RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
+ ("Failed to submit urb.\n"));
+ usb_unanchor_urb(_urb);
+ skb = (struct sk_buff *)_urb->context;
+ kfree_skb(skb);
+ }
+ usb_free_urb(_urb);
+}
+
+static int _usb_tx_post(struct ieee80211_hw *hw, struct urb *urb,
+ struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+ struct ieee80211_tx_info *txinfo;
+
+ rtlusb->usb_tx_post_hdl(hw, urb, skb);
+ skb_pull(skb, RTL_TX_HEADER_SIZE);
+ txinfo = IEEE80211_SKB_CB(skb);
+ ieee80211_tx_info_clear_status(txinfo);
+ txinfo->flags |= IEEE80211_TX_STAT_ACK;
+
+ if (urb->status) {
+ RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
+ ("Urb has error status 0x%X\n", urb->status));
+ goto out;
+ }
+ /* TODO: statistics */
+out:
+ ieee80211_tx_status_irqsafe(hw, skb);
+ return urb->status;
+}
+
+static void _rtl_tx_complete(struct urb *urb)
+{
+ struct sk_buff *skb = (struct sk_buff *)urb->context;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct rtl_usb *rtlusb = (struct rtl_usb *)info->rate_driver_data[0];
+ struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf);
+ int err;
+
+ if (unlikely(IS_USB_STOP(rtlusb)))
+ return;
+ err = _usb_tx_post(hw, urb, skb);
+ if (err) {
+ /* Ignore error and keep issuiing other urbs */
+ return;
+ }
+}
+
+static struct urb *_rtl_usb_tx_urb_setup(struct ieee80211_hw *hw,
+ struct sk_buff *skb, u32 ep_num)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+ struct urb *_urb;
+
+ WARN_ON(NULL == skb);
+ _urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!_urb) {
+ RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
+ ("Can't allocate URB for bulk out!\n"));
+ kfree_skb(skb);
+ return NULL;
+ }
+ _rtl_install_trx_info(rtlusb, skb, ep_num);
+ usb_fill_bulk_urb(_urb, rtlusb->udev, usb_sndbulkpipe(rtlusb->udev,
+ ep_num), skb->data, skb->len, _rtl_tx_complete, skb);
+ _urb->transfer_flags |= URB_ZERO_PACKET;
+ return _urb;
+}
+
+static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
+ enum rtl_txq qnum)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+ u32 ep_num;
+ struct urb *_urb = NULL;
+ struct sk_buff *_skb = NULL;
+ struct sk_buff_head *skb_list;
+ struct usb_anchor *urb_list;
+
+ WARN_ON(NULL == rtlusb->usb_tx_aggregate_hdl);
+ if (unlikely(IS_USB_STOP(rtlusb))) {
+ RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
+ ("USB device is stopping...\n"));
+ kfree_skb(skb);
+ return;
+ }
+ ep_num = rtlusb->ep_map.ep_mapping[qnum];
+ skb_list = &rtlusb->tx_skb_queue[ep_num];
+ _skb = skb;
+ _urb = _rtl_usb_tx_urb_setup(hw, _skb, ep_num);
+ if (unlikely(!_urb)) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Can't allocate urb. Drop skb!\n"));
+ return;
+ }
+ urb_list = &rtlusb->tx_pending[ep_num];
+ _rtl_submit_tx_urb(hw, _urb);
+}
+
+static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
+ u16 hw_queue)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct rtl_tx_desc *pdesc = NULL;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
+ __le16 fc = hdr->frame_control;
+ u8 *pda_addr = hdr->addr1;
+ /* ssn */
+ u8 *qc = NULL;
+ u8 tid = 0;
+ u16 seq_number = 0;
+
+ if (ieee80211_is_mgmt(fc))
+ rtl_tx_mgmt_proc(hw, skb);
+ rtl_action_proc(hw, skb, true);
+ if (is_multicast_ether_addr(pda_addr))
+ rtlpriv->stats.txbytesmulticast += skb->len;
+ else if (is_broadcast_ether_addr(pda_addr))
+ rtlpriv->stats.txbytesbroadcast += skb->len;
+ else
+ rtlpriv->stats.txbytesunicast += skb->len;
+ if (ieee80211_is_data_qos(fc)) {
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ seq_number = (le16_to_cpu(hdr->seq_ctrl) &
+ IEEE80211_SCTL_SEQ) >> 4;
+ seq_number += 1;
+ seq_number <<= 4;
+ }
+ rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, info, skb,
+ hw_queue);
+ if (!ieee80211_has_morefrags(hdr->frame_control)) {
+ if (qc)
+ mac->tids[tid].seq_number = seq_number;
+ }
+ if (ieee80211_is_data(fc))
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
+}
+
+static int rtl_usb_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
+ __le16 fc = hdr->frame_control;
+ u16 hw_queue;
+
+ if (unlikely(is_hal_stop(rtlhal)))
+ goto err_free;
+ hw_queue = rtlusb->usb_mq_to_hwq(fc, skb_get_queue_mapping(skb));
+ _rtl_usb_tx_preprocess(hw, skb, hw_queue);
+ _rtl_usb_transmit(hw, skb, hw_queue);
+ return NETDEV_TX_OK;
+
+err_free:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static bool rtl_usb_tx_chk_waitq_insert(struct ieee80211_hw *hw,
+ struct sk_buff *skb)
+{
+ return false;
+}
+
+static struct rtl_intf_ops rtl_usb_ops = {
+ .adapter_start = rtl_usb_start,
+ .adapter_stop = rtl_usb_stop,
+ .adapter_tx = rtl_usb_tx,
+ .waitq_insert = rtl_usb_tx_chk_waitq_insert,
+};
+
+int __devinit rtl_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ int err;
+ struct ieee80211_hw *hw = NULL;
+ struct rtl_priv *rtlpriv = NULL;
+ struct usb_device *udev;
+ struct rtl_usb_priv *usb_priv;
+
+ hw = ieee80211_alloc_hw(sizeof(struct rtl_priv) +
+ sizeof(struct rtl_usb_priv), &rtl_ops);
+ if (!hw) {
+ RT_ASSERT(false, ("%s : ieee80211 alloc failed\n", __func__));
+ return -ENOMEM;
+ }
+ rtlpriv = hw->priv;
+ SET_IEEE80211_DEV(hw, &intf->dev);
+ udev = interface_to_usbdev(intf);
+ usb_get_dev(udev);
+ usb_priv = rtl_usbpriv(hw);
+ memset(usb_priv, 0, sizeof(*usb_priv));
+ usb_priv->dev.intf = intf;
+ usb_priv->dev.udev = udev;
+ usb_set_intfdata(intf, hw);
+ /* init cfg & intf_ops */
+ rtlpriv->rtlhal.interface = INTF_USB;
+ rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_info);
+ rtlpriv->intf_ops = &rtl_usb_ops;
+ rtl_dbgp_flag_init(hw);
+ /* Init IO handler */
+ _rtl_usb_io_handler_init(&udev->dev, hw);
+ rtlpriv->cfg->ops->read_chip_version(hw);
+ /*like read eeprom and so on */
+ rtlpriv->cfg->ops->read_eeprom_info(hw);
+ if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Can't init_sw_vars.\n"));
+ goto error_out;
+ }
+ rtlpriv->cfg->ops->init_sw_leds(hw);
+ err = _rtl_usb_init(hw);
+ err = _rtl_usb_init_sw(hw);
+ /* Init mac80211 sw */
+ err = rtl_init_core(hw);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Can't allocate sw for mac80211.\n"));
+ goto error_out;
+ }
+
+ /*init rfkill */
+ /* rtl_init_rfkill(hw); */
+
+ err = ieee80211_register_hw(hw);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
+ ("Can't register mac80211 hw.\n"));
+ goto error_out;
+ } else {
+ rtlpriv->mac80211.mac80211_registered = 1;
+ }
+ set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
+ return 0;
+error_out:
+ rtl_deinit_core(hw);
+ _rtl_usb_io_handler_release(hw);
+ ieee80211_free_hw(hw);
+ usb_put_dev(udev);
+ return -ENODEV;
+}
+EXPORT_SYMBOL(rtl_usb_probe);
+
+void rtl_usb_disconnect(struct usb_interface *intf)
+{
+ struct ieee80211_hw *hw = usb_get_intfdata(intf);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+ if (unlikely(!rtlpriv))
+ return;
+ /*ieee80211_unregister_hw will call ops_stop */
+ if (rtlmac->mac80211_registered == 1) {
+ ieee80211_unregister_hw(hw);
+ rtlmac->mac80211_registered = 0;
+ } else {
+ rtl_deinit_deferred_work(hw);
+ rtlpriv->intf_ops->adapter_stop(hw);
+ }
+ /*deinit rfkill */
+ /* rtl_deinit_rfkill(hw); */
+ rtl_usb_deinit(hw);
+ rtl_deinit_core(hw);
+ rtlpriv->cfg->ops->deinit_sw_leds(hw);
+ rtlpriv->cfg->ops->deinit_sw_vars(hw);
+ _rtl_usb_io_handler_release(hw);
+ usb_put_dev(rtlusb->udev);
+ usb_set_intfdata(intf, NULL);
+ ieee80211_free_hw(hw);
+}
+EXPORT_SYMBOL(rtl_usb_disconnect);
+
+int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message)
+{
+ return 0;
+}
+EXPORT_SYMBOL(rtl_usb_suspend);
+
+int rtl_usb_resume(struct usb_interface *pusb_intf)
+{
+ return 0;
+}
+EXPORT_SYMBOL(rtl_usb_resume);
diff --git a/drivers/net/wireless/rtlwifi/usb.h b/drivers/net/wireless/rtlwifi/usb.h
new file mode 100644
index 000000000000..abadfe918d30
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/usb.h
@@ -0,0 +1,164 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_USB_H__
+#define __RTL_USB_H__
+
+#include <linux/usb.h>
+#include <linux/skbuff.h>
+
+#define RTL_USB_DEVICE(vend, prod, cfg) \
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE, \
+ .idVendor = (vend), \
+ .idProduct = (prod), \
+ .driver_info = (kernel_ulong_t)&(cfg)
+
+#define USB_HIGH_SPEED_BULK_SIZE 512
+#define USB_FULL_SPEED_BULK_SIZE 64
+
+
+#define RTL_USB_MAX_TXQ_NUM 4 /* max tx queue */
+#define RTL_USB_MAX_EP_NUM 6 /* max ep number */
+#define RTL_USB_MAX_TX_URBS_NUM 8
+
+enum rtl_txq {
+ /* These definitions shall be consistent with value
+ * returned by skb_get_queue_mapping
+ *------------------------------------*/
+ RTL_TXQ_BK,
+ RTL_TXQ_BE,
+ RTL_TXQ_VI,
+ RTL_TXQ_VO,
+ /*------------------------------------*/
+ RTL_TXQ_BCN,
+ RTL_TXQ_MGT,
+ RTL_TXQ_HI,
+
+ /* Must be last */
+ __RTL_TXQ_NUM,
+};
+
+struct rtl_ep_map {
+ u32 ep_mapping[__RTL_TXQ_NUM];
+};
+
+struct _trx_info {
+ struct rtl_usb *rtlusb;
+ u32 ep_num;
+};
+
+static inline void _rtl_install_trx_info(struct rtl_usb *rtlusb,
+ struct sk_buff *skb,
+ u32 ep_num)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ info->rate_driver_data[0] = rtlusb;
+ info->rate_driver_data[1] = (void *)(__kernel_size_t)ep_num;
+}
+
+
+/* Add suspend/resume later */
+enum rtl_usb_state {
+ USB_STATE_STOP = 0,
+ USB_STATE_START = 1,
+};
+
+#define IS_USB_STOP(rtlusb_ptr) (USB_STATE_STOP == (rtlusb_ptr)->state)
+#define IS_USB_START(rtlusb_ptr) (USB_STATE_START == (rtlusb_ptr)->state)
+#define SET_USB_STOP(rtlusb_ptr) \
+ do { \
+ (rtlusb_ptr)->state = USB_STATE_STOP; \
+ } while (0)
+
+#define SET_USB_START(rtlusb_ptr) \
+ do { \
+ (rtlusb_ptr)->state = USB_STATE_START; \
+ } while (0)
+
+struct rtl_usb {
+ struct usb_device *udev;
+ struct usb_interface *intf;
+ enum rtl_usb_state state;
+
+ /* Bcn control register setting */
+ u32 reg_bcn_ctrl_val;
+ /* for 88/92cu card disable */
+ u8 disableHWSM;
+ /*QOS & EDCA */
+ enum acm_method acm_method;
+ /* irq . HIMR,HIMR_EX */
+ u32 irq_mask[2];
+ bool irq_enabled;
+
+ u16 (*usb_mq_to_hwq)(__le16 fc, u16 mac80211_queue_index);
+
+ /* Tx */
+ u8 out_ep_nums ;
+ u8 out_queue_sel;
+ struct rtl_ep_map ep_map;
+
+ u32 max_bulk_out_size;
+ u32 tx_submitted_urbs;
+ struct sk_buff_head tx_skb_queue[RTL_USB_MAX_EP_NUM];
+
+ struct usb_anchor tx_pending[RTL_USB_MAX_EP_NUM];
+ struct usb_anchor tx_submitted;
+
+ struct sk_buff *(*usb_tx_aggregate_hdl)(struct ieee80211_hw *,
+ struct sk_buff_head *);
+ int (*usb_tx_post_hdl)(struct ieee80211_hw *,
+ struct urb *, struct sk_buff *);
+ void (*usb_tx_cleanup)(struct ieee80211_hw *, struct sk_buff *);
+
+ /* Rx */
+ u8 in_ep_nums ;
+ u32 in_ep; /* Bulk IN endpoint number */
+ u32 rx_max_size; /* Bulk IN max buffer size */
+ u32 rx_urb_num; /* How many Bulk INs are submitted to host. */
+ struct usb_anchor rx_submitted;
+ void (*usb_rx_segregate_hdl)(struct ieee80211_hw *, struct sk_buff *,
+ struct sk_buff_head *);
+ void (*usb_rx_hdl)(struct ieee80211_hw *, struct sk_buff *);
+};
+
+struct rtl_usb_priv {
+ struct rtl_usb dev;
+ struct rtl_led_ctl ledctl;
+};
+
+#define rtl_usbpriv(hw) (((struct rtl_usb_priv *)(rtl_priv(hw))->priv))
+#define rtl_usbdev(usbpriv) (&((usbpriv)->dev))
+
+
+
+int __devinit rtl_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id);
+void rtl_usb_disconnect(struct usb_interface *intf);
+int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message);
+int rtl_usb_resume(struct usb_interface *pusb_intf);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index d44d79613d2d..01226f8e70f9 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -34,6 +34,8 @@
#include <linux/firmware.h>
#include <linux/version.h>
#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/usb.h>
#include <net/mac80211.h>
#include "debug.h"
@@ -82,6 +84,19 @@
#define MAC80211_3ADDR_LEN 24
#define MAC80211_4ADDR_LEN 30
+#define CHANNEL_MAX_NUMBER (14 + 24 + 21) /* 14 is the max channel no */
+#define CHANNEL_GROUP_MAX (3 + 9) /* ch1~3, 4~9, 10~14 = three groups */
+#define MAX_PG_GROUP 13
+#define CHANNEL_GROUP_MAX_2G 3
+#define CHANNEL_GROUP_IDX_5GL 3
+#define CHANNEL_GROUP_IDX_5GM 6
+#define CHANNEL_GROUP_IDX_5GH 9
+#define CHANNEL_GROUP_MAX_5G 9
+#define CHANNEL_MAX_NUMBER_2G 14
+#define AVG_THERMAL_NUM 8
+
+/* for early mode */
+#define EM_HDR_LEN 8
enum intf_type {
INTF_PCI = 0,
INTF_USB = 1,
@@ -113,11 +128,38 @@ enum hardware_type {
HARDWARE_TYPE_RTL8192CU,
HARDWARE_TYPE_RTL8192DE,
HARDWARE_TYPE_RTL8192DU,
+ HARDWARE_TYPE_RTL8723E,
+ HARDWARE_TYPE_RTL8723U,
- /*keep it last*/
+ /* keep it last */
HARDWARE_TYPE_NUM
};
+#define IS_HARDWARE_TYPE_8192SU(rtlhal) \
+ (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SU)
+#define IS_HARDWARE_TYPE_8192SE(rtlhal) \
+ (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
+#define IS_HARDWARE_TYPE_8192CE(rtlhal) \
+ (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CE)
+#define IS_HARDWARE_TYPE_8192CU(rtlhal) \
+ (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CU)
+#define IS_HARDWARE_TYPE_8192DE(rtlhal) \
+ (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE)
+#define IS_HARDWARE_TYPE_8192DU(rtlhal) \
+ (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DU)
+#define IS_HARDWARE_TYPE_8723E(rtlhal) \
+ (rtlhal->hw_type == HARDWARE_TYPE_RTL8723E)
+#define IS_HARDWARE_TYPE_8723U(rtlhal) \
+ (rtlhal->hw_type == HARDWARE_TYPE_RTL8723U)
+#define IS_HARDWARE_TYPE_8192S(rtlhal) \
+(IS_HARDWARE_TYPE_8192SE(rtlhal) || IS_HARDWARE_TYPE_8192SU(rtlhal))
+#define IS_HARDWARE_TYPE_8192C(rtlhal) \
+(IS_HARDWARE_TYPE_8192CE(rtlhal) || IS_HARDWARE_TYPE_8192CU(rtlhal))
+#define IS_HARDWARE_TYPE_8192D(rtlhal) \
+(IS_HARDWARE_TYPE_8192DE(rtlhal) || IS_HARDWARE_TYPE_8192DU(rtlhal))
+#define IS_HARDWARE_TYPE_8723(rtlhal) \
+(IS_HARDWARE_TYPE_8723E(rtlhal) || IS_HARDWARE_TYPE_8723U(rtlhal))
+
enum scan_operation_backup_opt {
SCAN_OPT_BACKUP = 0,
SCAN_OPT_RESTORE,
@@ -315,6 +357,7 @@ enum rf_type {
RF_1T1R = 0,
RF_1T2R = 1,
RF_2T2R = 2,
+ RF_2T2R_GREEN = 3,
};
enum ht_channel_width {
@@ -359,6 +402,8 @@ enum rtl_var_map {
EFUSE_LOADER_CLK_EN,
EFUSE_ANA8M,
EFUSE_HWSET_MAX_SIZE,
+ EFUSE_MAX_SECTION_MAP,
+ EFUSE_REAL_CONTENT_SIZE,
/*CAM map */
RWCAM,
@@ -397,6 +442,7 @@ enum rtl_var_map {
RTL_IMR_ATIMEND, /*For 92C,ATIM Window End Interrupt */
RTL_IMR_BDOK, /*Beacon Queue DMA OK Interrup */
RTL_IMR_HIGHDOK, /*High Queue DMA OK Interrupt */
+ RTL_IMR_COMDOK, /*Command Queue DMA OK Interrupt*/
RTL_IMR_TBDOK, /*Transmit Beacon OK interrup */
RTL_IMR_MGNTDOK, /*Management Queue DMA OK Interrupt */
RTL_IMR_TBDER, /*For 92C,Transmit Beacon Error Interrupt */
@@ -405,7 +451,8 @@ enum rtl_var_map {
RTL_IMR_VIDOK, /*AC_VI DMA OK Interrupt */
RTL_IMR_VODOK, /*AC_VO DMA Interrupt */
RTL_IMR_ROK, /*Receive DMA OK Interrupt */
- RTL_IBSS_INT_MASKS, /*(RTL_IMR_BcnInt|RTL_IMR_TBDOK|RTL_IMR_TBDER)*/
+ RTL_IBSS_INT_MASKS, /*(RTL_IMR_BcnInt | RTL_IMR_TBDOK |
+ * RTL_IMR_TBDER) */
/*CCK Rates, TxHT = 0 */
RTL_RC_CCK_RATE1M,
@@ -481,6 +528,19 @@ enum acm_method {
eAcmWay2_SW = 2,
};
+enum macphy_mode {
+ SINGLEMAC_SINGLEPHY = 0,
+ DUALMAC_DUALPHY,
+ DUALMAC_SINGLEPHY,
+};
+
+enum band_type {
+ BAND_ON_2_4G = 0,
+ BAND_ON_5G,
+ BAND_ON_BOTH,
+ BANDMAX
+};
+
/*aci/aifsn Field.
Ref: WMM spec 2.2.2: WME Parameter Element, p.12.*/
union aci_aifsn {
@@ -505,6 +565,17 @@ enum wireless_mode {
WIRELESS_MODE_N_5G = 0x20
};
+#define IS_WIRELESS_MODE_A(wirelessmode) \
+ (wirelessmode == WIRELESS_MODE_A)
+#define IS_WIRELESS_MODE_B(wirelessmode) \
+ (wirelessmode == WIRELESS_MODE_B)
+#define IS_WIRELESS_MODE_G(wirelessmode) \
+ (wirelessmode == WIRELESS_MODE_G)
+#define IS_WIRELESS_MODE_N_24G(wirelessmode) \
+ (wirelessmode == WIRELESS_MODE_N_24G)
+#define IS_WIRELESS_MODE_N_5G(wirelessmode) \
+ (wirelessmode == WIRELESS_MODE_N_5G)
+
enum ratr_table_mode {
RATR_INX_WIRELESS_NGB = 0,
RATR_INX_WIRELESS_NG = 1,
@@ -574,11 +645,11 @@ struct rtl_probe_rsp {
struct rtl_led {
void *hw;
enum rtl_led_pin ledpin;
- bool b_ledon;
+ bool ledon;
};
struct rtl_led_ctl {
- bool bled_opendrain;
+ bool led_opendrain;
struct rtl_led sw_led0;
struct rtl_led sw_led1;
};
@@ -603,6 +674,8 @@ struct false_alarm_statistics {
u32 cnt_rate_illegal;
u32 cnt_crc8_fail;
u32 cnt_mcs_fail;
+ u32 cnt_fast_fsync_fail;
+ u32 cnt_sb_search_fail;
u32 cnt_ofdm_fail;
u32 cnt_cck_fail;
u32 cnt_all;
@@ -690,6 +763,32 @@ struct rtl_rfkill {
bool rfkill_state; /*0 is off, 1 is on */
};
+#define IQK_MATRIX_REG_NUM 8
+#define IQK_MATRIX_SETTINGS_NUM (1 + 24 + 21)
+struct iqk_matrix_regs {
+ bool b_iqk_done;
+ long value[1][IQK_MATRIX_REG_NUM];
+};
+
+struct phy_parameters {
+ u16 length;
+ u32 *pdata;
+};
+
+enum hw_param_tab_index {
+ PHY_REG_2T,
+ PHY_REG_1T,
+ PHY_REG_PG,
+ RADIOA_2T,
+ RADIOB_2T,
+ RADIOA_1T,
+ RADIOB_1T,
+ MAC_REG,
+ AGCTAB_2T,
+ AGCTAB_1T,
+ MAX_TAB
+};
+
struct rtl_phy {
struct bb_reg_def phyreg_def[4]; /*Radio A/B/C/D */
struct init_gain initgain_backup;
@@ -705,8 +804,9 @@ struct rtl_phy {
u8 current_channel;
u8 h2c_box_num;
u8 set_io_inprogress;
+ u8 lck_inprogress;
- /*record for power tracking*/
+ /* record for power tracking */
s32 reg_e94;
s32 reg_e9c;
s32 reg_ea4;
@@ -723,26 +823,32 @@ struct rtl_phy {
u32 iqk_mac_backup[IQK_MAC_REG_NUM];
u32 iqk_bb_backup[10];
- bool b_rfpi_enable;
+ /* Dual mac */
+ bool need_iqk;
+ struct iqk_matrix_regs iqk_matrix_regsetting[IQK_MATRIX_SETTINGS_NUM];
+
+ bool rfpi_enable;
u8 pwrgroup_cnt;
- u8 bcck_high_power;
- /* 3 groups of pwr diff by rates*/
- u32 mcs_txpwrlevel_origoffset[4][16];
+ u8 cck_high_power;
+ /* MAX_PG_GROUP groups of pwr diff by rates */
+ u32 mcs_txpwrlevel_origoffset[MAX_PG_GROUP][16];
u8 default_initialgain[4];
- /*the current Tx power level*/
+ /* the current Tx power level */
u8 cur_cck_txpwridx;
u8 cur_ofdm24g_txpwridx;
u32 rfreg_chnlval[2];
- bool b_apk_done;
+ bool apk_done;
+ u32 reg_rf3c[2]; /* pathA / pathB */
- /*fsync*/
u8 framesync;
u32 framesync_c34;
u8 num_total_rfpath;
+ struct phy_parameters hwparam_tables[MAX_TAB];
+ u16 rf_pathmap;
};
#define MAX_TID_COUNT 9
@@ -768,6 +874,7 @@ struct rtl_tid_data {
struct rtl_priv;
struct rtl_io {
struct device *dev;
+ struct mutex bb_mutex;
/*PCI MEM map */
unsigned long pci_mem_end; /*shared mem end */
@@ -779,11 +886,14 @@ struct rtl_io {
void (*write8_async) (struct rtl_priv *rtlpriv, u32 addr, u8 val);
void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, u16 val);
void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, u32 val);
-
- u8(*read8_sync) (struct rtl_priv *rtlpriv, u32 addr);
- u16(*read16_sync) (struct rtl_priv *rtlpriv, u32 addr);
- u32(*read32_sync) (struct rtl_priv *rtlpriv, u32 addr);
-
+ int (*writeN_async) (struct rtl_priv *rtlpriv, u32 addr, u16 len,
+ u8 *pdata);
+
+ u8(*read8_sync) (struct rtl_priv *rtlpriv, u32 addr);
+ u16(*read16_sync) (struct rtl_priv *rtlpriv, u32 addr);
+ u32(*read32_sync) (struct rtl_priv *rtlpriv, u32 addr);
+ int (*readN_sync) (struct rtl_priv *rtlpriv, u32 addr, u16 len,
+ u8 *pdata);
};
struct rtl_mac {
@@ -815,16 +925,24 @@ struct rtl_mac {
bool act_scanning;
u8 cnt_after_linked;
- /*RDG*/ bool rdg_en;
+ /* early mode */
+ /* skb wait queue */
+ struct sk_buff_head skb_waitq[MAX_TID_COUNT];
+ u8 earlymode_threshold;
+
+ /*RDG*/
+ bool rdg_en;
- /*AP*/ u8 bssid[6];
- u8 mcs[16]; /*16 bytes mcs for HT rates.*/
- u32 basic_rates; /*b/g rates*/
+ /*AP*/
+ u8 bssid[6];
+ u32 vendor;
+ u8 mcs[16]; /* 16 bytes mcs for HT rates. */
+ u32 basic_rates; /* b/g rates */
u8 ht_enable;
u8 sgi_40;
u8 sgi_20;
u8 bw_40;
- u8 mode; /*wireless mode*/
+ u8 mode; /* wireless mode */
u8 slot_time;
u8 short_preamble;
u8 use_cts_protect;
@@ -835,9 +953,11 @@ struct rtl_mac {
u8 retry_long;
u16 assoc_id;
- /*IBSS*/ int beacon_interval;
+ /*IBSS*/
+ int beacon_interval;
- /*AMPDU*/ u8 min_space_cfg; /*For Min spacing configurations */
+ /*AMPDU*/
+ u8 min_space_cfg; /*For Min spacing configurations */
u8 max_mss_density;
u8 current_ampdu_factor;
u8 current_ampdu_density;
@@ -852,17 +972,54 @@ struct rtl_hal {
enum intf_type interface;
u16 hw_type; /*92c or 92d or 92s and so on */
+ u8 ic_class;
u8 oem_id;
- u8 version; /*version of chip */
+ u32 version; /*version of chip */
u8 state; /*stop 0, start 1 */
/*firmware */
+ u32 fwsize;
u8 *pfirmware;
- bool b_h2c_setinprogress;
+ u16 fw_version;
+ u16 fw_subversion;
+ bool h2c_setinprogress;
u8 last_hmeboxnum;
- bool bfw_ready;
+ bool fw_ready;
/*Reserve page start offset except beacon in TxQ. */
u8 fw_rsvdpage_startoffset;
+ u8 h2c_txcmd_seq;
+
+ /* FW Cmd IO related */
+ u16 fwcmd_iomap;
+ u32 fwcmd_ioparam;
+ bool set_fwcmd_inprogress;
+ u8 current_fwcmd_io;
+
+ /**/
+ bool driver_going2unload;
+
+ /*AMPDU init min space*/
+ u8 minspace_cfg; /*For Min spacing configurations */
+
+ /* Dual mac */
+ enum macphy_mode macphymode;
+ enum band_type current_bandtype; /* 0:2.4G, 1:5G */
+ enum band_type current_bandtypebackup;
+ enum band_type bandset;
+ /* dual MAC 0--Mac0 1--Mac1 */
+ u32 interfaceindex;
+ /* just for DualMac S3S4 */
+ u8 macphyctl_reg;
+ bool earlymode_enable;
+ /* Dual mac*/
+ bool during_mac0init_radiob;
+ bool during_mac1init_radioa;
+ bool reloadtxpowerindex;
+ /* True if IMR or IQK have done
+ for 2.4G in scan progress */
+ bool load_imrandiqk_setting_for2g;
+
+ bool disable_amsdu_8k;
};
struct rtl_security {
@@ -887,48 +1044,61 @@ struct rtl_security {
};
struct rtl_dm {
- /*PHY status for DM */
+ /*PHY status for Dynamic Management */
long entry_min_undecoratedsmoothed_pwdb;
long undecorated_smoothed_pwdb; /*out dm */
long entry_max_undecoratedsmoothed_pwdb;
- bool b_dm_initialgain_enable;
- bool bdynamic_txpower_enable;
- bool bcurrent_turbo_edca;
- bool bis_any_nonbepkts; /*out dm */
- bool bis_cur_rdlstate;
- bool btxpower_trackingInit;
- bool b_disable_framebursting;
- bool b_cck_inch14;
- bool btxpower_tracking;
- bool b_useramask;
- bool brfpath_rxenable[4];
-
+ bool dm_initialgain_enable;
+ bool dynamic_txpower_enable;
+ bool current_turbo_edca;
+ bool is_any_nonbepkts; /*out dm */
+ bool is_cur_rdlstate;
+ bool txpower_trackingInit;
+ bool disable_framebursting;
+ bool cck_inch14;
+ bool txpower_tracking;
+ bool useramask;
+ bool rfpath_rxenable[4];
+ bool inform_fw_driverctrldm;
+ bool current_mrc_switch;
+ u8 txpowercount;
+
+ u8 thermalvalue_rxgain;
u8 thermalvalue_iqk;
u8 thermalvalue_lck;
u8 thermalvalue;
u8 last_dtp_lvl;
+ u8 thermalvalue_avg[AVG_THERMAL_NUM];
+ u8 thermalvalue_avg_index;
+ bool done_txpower;
u8 dynamic_txhighpower_lvl; /*Tx high power level */
- u8 dm_flag; /*Indicate if each dynamic mechanism's status. */
+ u8 dm_flag; /*Indicate each dynamic mechanism's status. */
u8 dm_type;
u8 txpower_track_control;
-
+ bool interrupt_migration;
+ bool disable_tx_int;
char ofdm_index[2];
char cck_index;
+ u8 power_index_backup[6];
};
-#define EFUSE_MAX_LOGICAL_SIZE 128
+#define EFUSE_MAX_LOGICAL_SIZE 256
struct rtl_efuse {
- bool bautoLoad_ok;
+ bool autoLoad_ok;
bool bootfromefuse;
u16 max_physical_size;
- u8 contents[EFUSE_MAX_LOGICAL_SIZE];
u8 efuse_map[2][EFUSE_MAX_LOGICAL_SIZE];
u16 efuse_usedbytes;
u8 efuse_usedpercentage;
+#ifdef EFUSE_REPG_WORKAROUND
+ bool efuse_re_pg_sec1flag;
+ u8 efuse_re_pg_data[8];
+#endif
u8 autoload_failflag;
+ u8 autoload_status;
short epromtype;
u16 eeprom_vid;
@@ -938,69 +1108,90 @@ struct rtl_efuse {
u8 eeprom_oemid;
u16 eeprom_channelplan;
u8 eeprom_version;
+ u8 board_type;
+ u8 external_pa;
u8 dev_addr[6];
- bool b_txpwr_fromeprom;
+ bool txpwr_fromeprom;
+ u8 eeprom_crystalcap;
u8 eeprom_tssi[2];
- u8 eeprom_pwrlimit_ht20[3];
- u8 eeprom_pwrlimit_ht40[3];
- u8 eeprom_chnlarea_txpwr_cck[2][3];
- u8 eeprom_chnlarea_txpwr_ht40_1s[2][3];
- u8 eeprom_chnlarea_txpwr_ht40_2sdiif[2][3];
- u8 txpwrlevel_cck[2][14];
- u8 txpwrlevel_ht40_1s[2][14]; /*For HT 40MHZ pwr */
- u8 txpwrlevel_ht40_2s[2][14]; /*For HT 40MHZ pwr */
+ u8 eeprom_tssi_5g[3][2]; /* for 5GL/5GM/5GH band. */
+ u8 eeprom_pwrlimit_ht20[CHANNEL_GROUP_MAX];
+ u8 eeprom_pwrlimit_ht40[CHANNEL_GROUP_MAX];
+ u8 eeprom_chnlarea_txpwr_cck[2][CHANNEL_GROUP_MAX_2G];
+ u8 eeprom_chnlarea_txpwr_ht40_1s[2][CHANNEL_GROUP_MAX];
+ u8 eeprom_chnlarea_txpwr_ht40_2sdiif[2][CHANNEL_GROUP_MAX];
+ u8 txpwrlevel_cck[2][CHANNEL_MAX_NUMBER_2G];
+ u8 txpwrlevel_ht40_1s[2][CHANNEL_MAX_NUMBER]; /*For HT 40MHZ pwr */
+ u8 txpwrlevel_ht40_2s[2][CHANNEL_MAX_NUMBER]; /*For HT 40MHZ pwr */
+
+ u8 internal_pa_5g[2]; /* pathA / pathB */
+ u8 eeprom_c9;
+ u8 eeprom_cc;
/*For power group */
- u8 pwrgroup_ht20[2][14];
- u8 pwrgroup_ht40[2][14];
-
- char txpwr_ht20diff[2][14]; /*HT 20<->40 Pwr diff */
- u8 txpwr_legacyhtdiff[2][14]; /*For HT<->legacy pwr diff */
+ u8 eeprom_pwrgroup[2][3];
+ u8 pwrgroup_ht20[2][CHANNEL_MAX_NUMBER];
+ u8 pwrgroup_ht40[2][CHANNEL_MAX_NUMBER];
+
+ char txpwr_ht20diff[2][CHANNEL_MAX_NUMBER]; /*HT 20<->40 Pwr diff */
+ /*For HT<->legacy pwr diff*/
+ u8 txpwr_legacyhtdiff[2][CHANNEL_MAX_NUMBER];
+ u8 txpwr_safetyflag; /* Band edge enable flag */
+ u16 eeprom_txpowerdiff;
+ u8 legacy_httxpowerdiff; /* Legacy to HT rate power diff */
+ u8 antenna_txpwdiff[3];
u8 eeprom_regulatory;
u8 eeprom_thermalmeter;
- /*ThermalMeter, index 0 for RFIC0, and 1 for RFIC1 */
- u8 thermalmeter[2];
+ u8 thermalmeter[2]; /*ThermalMeter, index 0 for RFIC0, 1 for RFIC1 */
+ u16 tssi_13dbm;
+ u8 crystalcap; /* CrystalCap. */
+ u8 delta_iqk;
+ u8 delta_lck;
u8 legacy_ht_txpowerdiff; /*Legacy to HT rate power diff */
- bool b_apk_thermalmeterignore;
+ bool apk_thermalmeterignore;
+
+ bool b1x1_recvcombine;
+ bool b1ss_support;
+
+ /*channel plan */
+ u8 channel_plan;
};
struct rtl_ps_ctl {
+ bool pwrdomain_protect;
bool set_rfpowerstate_inprogress;
- bool b_in_powersavemode;
+ bool in_powersavemode;
bool rfchange_inprogress;
- bool b_swrf_processing;
- bool b_hwradiooff;
-
- u32 last_sleep_jiffies;
- u32 last_awake_jiffies;
- u32 last_delaylps_stamp_jiffies;
+ bool swrf_processing;
+ bool hwradiooff;
/*
* just for PCIE ASPM
* If it supports ASPM, Offset[560h] = 0x40,
* otherwise Offset[560h] = 0x00.
* */
- bool b_support_aspm;
- bool b_support_backdoor;
+ bool support_aspm;
+ bool support_backdoor;
/*for LPS */
enum rt_psmode dot11_psmode; /*Power save mode configured. */
- bool b_leisure_ps;
- bool b_fwctrl_lps;
+ bool swctrl_lps;
+ bool leisure_ps;
+ bool fwctrl_lps;
u8 fwctrl_psmode;
/*For Fw control LPS mode */
- u8 b_reg_fwctrl_lps;
+ u8 reg_fwctrl_lps;
/*Record Fw PS mode status. */
- bool b_fw_current_inpsmode;
+ bool fw_current_inpsmode;
u8 reg_max_lps_awakeintvl;
bool report_linked;
/*for IPS */
- bool b_inactiveps;
+ bool inactiveps;
u32 rfoff_reason;
@@ -1011,8 +1202,26 @@ struct rtl_ps_ctl {
/*just for PCIE ASPM */
u8 const_amdpci_aspm;
+ bool pwrdown_mode;
+
enum rf_pwrstate inactive_pwrstate;
enum rf_pwrstate rfpwr_state; /*cur power state */
+
+ /* for SW LPS*/
+ bool sw_ps_enabled;
+ bool state;
+ bool state_inap;
+ bool multi_buffered;
+ u16 nullfunc_seq;
+ unsigned int dtim_counter;
+ unsigned int sleep_ms;
+ unsigned long last_sleep_jiffies;
+ unsigned long last_awake_jiffies;
+ unsigned long last_delaylps_stamp_jiffies;
+ unsigned long last_dtim;
+ unsigned long last_beacon;
+ unsigned long last_action;
+ unsigned long last_slept;
};
struct rtl_stats {
@@ -1038,10 +1247,10 @@ struct rtl_stats {
s32 recvsignalpower;
s8 rxpower; /*in dBm Translate from PWdB */
u8 signalstrength; /*in 0-100 index. */
- u16 b_hwerror:1;
- u16 b_crc:1;
- u16 b_icv:1;
- u16 b_shortpreamble:1;
+ u16 hwerror:1;
+ u16 crc:1;
+ u16 icv:1;
+ u16 shortpreamble:1;
u16 antenna:1;
u16 decrypted:1;
u16 wakeup:1;
@@ -1050,15 +1259,16 @@ struct rtl_stats {
u8 rx_drvinfo_size;
u8 rx_bufshift;
- bool b_isampdu;
+ bool isampdu;
+ bool isfirst_ampdu;
bool rx_is40Mhzpacket;
u32 rx_pwdb_all;
u8 rx_mimo_signalstrength[4]; /*in 0~100 index */
s8 rx_mimo_signalquality[2];
- bool b_packet_matchbssid;
- bool b_is_cck;
- bool b_packet_toself;
- bool b_packet_beacon; /*for rssi */
+ bool packet_matchbssid;
+ bool is_cck;
+ bool packet_toself;
+ bool packet_beacon; /*for rssi */
char cck_adc_pwdb[4]; /*for rx path selection */
};
@@ -1069,23 +1279,23 @@ struct rt_link_detect {
u32 num_tx_inperiod;
u32 num_rx_inperiod;
- bool b_busytraffic;
- bool b_higher_busytraffic;
- bool b_higher_busyrxtraffic;
+ bool busytraffic;
+ bool higher_busytraffic;
+ bool higher_busyrxtraffic;
};
struct rtl_tcb_desc {
- u8 b_packet_bw:1;
- u8 b_multicast:1;
- u8 b_broadcast:1;
-
- u8 b_rts_stbc:1;
- u8 b_rts_enable:1;
- u8 b_cts_enable:1;
- u8 b_rts_use_shortpreamble:1;
- u8 b_rts_use_shortgi:1;
+ u8 packet_bw:1;
+ u8 multicast:1;
+ u8 broadcast:1;
+
+ u8 rts_stbc:1;
+ u8 rts_enable:1;
+ u8 cts_enable:1;
+ u8 rts_use_shortpreamble:1;
+ u8 rts_use_shortgi:1;
u8 rts_sc:1;
- u8 b_rts_bw:1;
+ u8 rts_bw:1;
u8 rts_rate;
u8 use_shortgi:1;
@@ -1096,20 +1306,34 @@ struct rtl_tcb_desc {
u8 ratr_index;
u8 mac_id;
u8 hw_rate;
+
+ u8 last_inipkt:1;
+ u8 cmd_or_init:1;
+ u8 queue_index;
+
+ /* early mode */
+ u8 empkt_num;
+ /* The max value by HW */
+ u32 empkt_len[5];
};
struct rtl_hal_ops {
int (*init_sw_vars) (struct ieee80211_hw *hw);
void (*deinit_sw_vars) (struct ieee80211_hw *hw);
+ void (*read_chip_version)(struct ieee80211_hw *hw);
void (*read_eeprom_info) (struct ieee80211_hw *hw);
void (*interrupt_recognized) (struct ieee80211_hw *hw,
u32 *p_inta, u32 *p_intb);
int (*hw_init) (struct ieee80211_hw *hw);
void (*hw_disable) (struct ieee80211_hw *hw);
+ void (*hw_suspend) (struct ieee80211_hw *hw);
+ void (*hw_resume) (struct ieee80211_hw *hw);
void (*enable_interrupt) (struct ieee80211_hw *hw);
void (*disable_interrupt) (struct ieee80211_hw *hw);
int (*set_network_type) (struct ieee80211_hw *hw,
enum nl80211_iftype type);
+ void (*set_chk_bssid)(struct ieee80211_hw *hw,
+ bool check_bssid);
void (*set_bw_mode) (struct ieee80211_hw *hw,
enum nl80211_channel_type ch_type);
u8(*switch_channel) (struct ieee80211_hw *hw);
@@ -1126,23 +1350,26 @@ struct rtl_hal_ops {
struct ieee80211_hdr *hdr, u8 *pdesc_tx,
struct ieee80211_tx_info *info,
struct sk_buff *skb, unsigned int queue_index);
+ void (*fill_fake_txdesc) (struct ieee80211_hw *hw, u8 * pDesc,
+ u32 buffer_len, bool bIsPsPoll);
void (*fill_tx_cmddesc) (struct ieee80211_hw *hw, u8 *pdesc,
- bool b_firstseg, bool b_lastseg,
+ bool firstseg, bool lastseg,
struct sk_buff *skb);
- bool(*query_rx_desc) (struct ieee80211_hw *hw,
+ bool (*cmd_send_packet)(struct ieee80211_hw *hw, struct sk_buff *skb);
+ bool (*query_rx_desc) (struct ieee80211_hw *hw,
struct rtl_stats *stats,
struct ieee80211_rx_status *rx_status,
u8 *pdesc, struct sk_buff *skb);
void (*set_channel_access) (struct ieee80211_hw *hw);
- bool(*radio_onoff_checking) (struct ieee80211_hw *hw, u8 *valid);
+ bool (*radio_onoff_checking) (struct ieee80211_hw *hw, u8 *valid);
void (*dm_watchdog) (struct ieee80211_hw *hw);
void (*scan_operation_backup) (struct ieee80211_hw *hw, u8 operation);
- bool(*set_rf_power_state) (struct ieee80211_hw *hw,
+ bool (*set_rf_power_state) (struct ieee80211_hw *hw,
enum rf_pwrstate rfpwr_state);
void (*led_control) (struct ieee80211_hw *hw,
enum led_ctl_mode ledaction);
void (*set_desc) (u8 *pdesc, bool istx, u8 desc_name, u8 *val);
- u32(*get_desc) (u8 *pdesc, bool istx, u8 desc_name);
+ u32 (*get_desc) (u8 *pdesc, bool istx, u8 desc_name);
void (*tx_polling) (struct ieee80211_hw *hw, unsigned int hw_queue);
void (*enable_hw_sec) (struct ieee80211_hw *hw);
void (*set_key) (struct ieee80211_hw *hw, u32 key_index,
@@ -1150,22 +1377,36 @@ struct rtl_hal_ops {
bool is_wepkey, bool clear_all);
void (*init_sw_leds) (struct ieee80211_hw *hw);
void (*deinit_sw_leds) (struct ieee80211_hw *hw);
- u32(*get_bbreg) (struct ieee80211_hw *hw, u32 regaddr, u32 bitmask);
+ u32 (*get_bbreg) (struct ieee80211_hw *hw, u32 regaddr, u32 bitmask);
void (*set_bbreg) (struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
u32 data);
- u32(*get_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath,
+ u32 (*get_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath,
u32 regaddr, u32 bitmask);
void (*set_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath,
u32 regaddr, u32 bitmask, u32 data);
+ bool (*phy_rf6052_config) (struct ieee80211_hw *hw);
+ void (*phy_rf6052_set_cck_txpower) (struct ieee80211_hw *hw,
+ u8 *powerlevel);
+ void (*phy_rf6052_set_ofdm_txpower) (struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel);
+ bool (*config_bb_with_headerfile) (struct ieee80211_hw *hw,
+ u8 configtype);
+ bool (*config_bb_with_pgheaderfile) (struct ieee80211_hw *hw,
+ u8 configtype);
+ void (*phy_lc_calibrate) (struct ieee80211_hw *hw, bool is2t);
+ void (*phy_set_bw_mode_callback) (struct ieee80211_hw *hw);
+ void (*dm_dynamic_txpower) (struct ieee80211_hw *hw);
};
struct rtl_intf_ops {
/*com */
+ void (*read_efuse_byte)(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
int (*adapter_start) (struct ieee80211_hw *hw);
void (*adapter_stop) (struct ieee80211_hw *hw);
int (*adapter_tx) (struct ieee80211_hw *hw, struct sk_buff *skb);
int (*reset_trx_ring) (struct ieee80211_hw *hw);
+ bool (*waitq_insert) (struct ieee80211_hw *hw, struct sk_buff *skb);
/*pci */
void (*disable_aspm) (struct ieee80211_hw *hw);
@@ -1179,11 +1420,36 @@ struct rtl_mod_params {
int sw_crypto;
};
+struct rtl_hal_usbint_cfg {
+ /* data - rx */
+ u32 in_ep_num;
+ u32 rx_urb_num;
+ u32 rx_max_size;
+
+ /* op - rx */
+ void (*usb_rx_hdl)(struct ieee80211_hw *, struct sk_buff *);
+ void (*usb_rx_segregate_hdl)(struct ieee80211_hw *, struct sk_buff *,
+ struct sk_buff_head *);
+
+ /* tx */
+ void (*usb_tx_cleanup)(struct ieee80211_hw *, struct sk_buff *);
+ int (*usb_tx_post_hdl)(struct ieee80211_hw *, struct urb *,
+ struct sk_buff *);
+ struct sk_buff *(*usb_tx_aggregate_hdl)(struct ieee80211_hw *,
+ struct sk_buff_head *);
+
+ /* endpoint mapping */
+ int (*usb_endpoint_mapping)(struct ieee80211_hw *hw);
+ u16 (*usb_mq_to_hwq)(__le16 fc, u16 mac80211_queue_index);
+};
+
struct rtl_hal_cfg {
+ u8 bar_id;
char *name;
char *fw_name;
struct rtl_hal_ops *ops;
struct rtl_mod_params *mod_params;
+ struct rtl_hal_usbint_cfg *usb_interface_cfg;
/*this map used for some registers or vars
defined int HAL but used in MAIN */
@@ -1202,6 +1468,11 @@ struct rtl_locks {
spinlock_t rf_ps_lock;
spinlock_t rf_lock;
spinlock_t lps_lock;
+ spinlock_t waitq_lock;
+ spinlock_t tx_urb_lock;
+
+ /*Dual mac*/
+ spinlock_t cck_and_rw_pagea_lock;
};
struct rtl_works {
@@ -1218,12 +1489,20 @@ struct rtl_works {
struct workqueue_struct *rtl_wq;
struct delayed_work watchdog_wq;
struct delayed_work ips_nic_off_wq;
+
+ /* For SW LPS */
+ struct delayed_work ps_work;
+ struct delayed_work ps_rfon_wq;
};
struct rtl_debug {
u32 dbgp_type[DBGP_TYPE_MAX];
u32 global_debuglevel;
u64 global_debugcomponents;
+
+ /* add for proc debug */
+ struct proc_dir_entry *proc_dir;
+ char proc_name[20];
};
struct rtl_priv {
@@ -1274,6 +1553,91 @@ struct rtl_priv {
#define rtl_efuse(rtlpriv) (&((rtlpriv)->efuse))
#define rtl_psc(rtlpriv) (&((rtlpriv)->psc))
+
+/***************************************
+ Bluetooth Co-existance Related
+****************************************/
+
+enum bt_ant_num {
+ ANT_X2 = 0,
+ ANT_X1 = 1,
+};
+
+enum bt_co_type {
+ BT_2WIRE = 0,
+ BT_ISSC_3WIRE = 1,
+ BT_ACCEL = 2,
+ BT_CSR_BC4 = 3,
+ BT_CSR_BC8 = 4,
+ BT_RTL8756 = 5,
+};
+
+enum bt_cur_state {
+ BT_OFF = 0,
+ BT_ON = 1,
+};
+
+enum bt_service_type {
+ BT_SCO = 0,
+ BT_A2DP = 1,
+ BT_HID = 2,
+ BT_HID_IDLE = 3,
+ BT_SCAN = 4,
+ BT_IDLE = 5,
+ BT_OTHER_ACTION = 6,
+ BT_BUSY = 7,
+ BT_OTHERBUSY = 8,
+ BT_PAN = 9,
+};
+
+enum bt_radio_shared {
+ BT_RADIO_SHARED = 0,
+ BT_RADIO_INDIVIDUAL = 1,
+};
+
+struct bt_coexist_info {
+
+ /* EEPROM BT info. */
+ u8 eeprom_bt_coexist;
+ u8 eeprom_bt_type;
+ u8 eeprom_bt_ant_num;
+ u8 eeprom_bt_ant_isolation;
+ u8 eeprom_bt_radio_shared;
+
+ u8 bt_coexistence;
+ u8 bt_ant_num;
+ u8 bt_coexist_type;
+ u8 bt_state;
+ u8 bt_cur_state; /* 0:on, 1:off */
+ u8 bt_ant_isolation; /* 0:good, 1:bad */
+ u8 bt_pape_ctrl; /* 0:SW, 1:SW/HW dynamic */
+ u8 bt_service;
+ u8 bt_radio_shared_type;
+ u8 bt_rfreg_origin_1e;
+ u8 bt_rfreg_origin_1f;
+ u8 bt_rssi_state;
+ u32 ratio_tx;
+ u32 ratio_pri;
+ u32 bt_edca_ul;
+ u32 bt_edca_dl;
+
+ bool b_init_set;
+ bool b_bt_busy_traffic;
+ bool b_bt_traffic_mode_set;
+ bool b_bt_non_traffic_mode_set;
+
+ bool b_fw_coexist_all_off;
+ bool b_sw_coexist_all_off;
+ u32 current_state;
+ u32 previous_state;
+ u8 bt_pre_rssi_state;
+
+ u8 b_reg_bt_iso;
+ u8 b_reg_bt_sco;
+
+};
+
+
/****************************************
mem access macro define start
Call endian free function when
@@ -1281,7 +1645,7 @@ struct rtl_priv {
2. Before write integer to IO.
3. After read integer from IO.
****************************************/
-/* Convert little data endian to host */
+/* Convert little data endian to host ordering */
#define EF1BYTE(_val) \
((u8)(_val))
#define EF2BYTE(_val) \
@@ -1289,27 +1653,21 @@ struct rtl_priv {
#define EF4BYTE(_val) \
(le32_to_cpu(_val))
-/* Read data from memory */
-#define READEF1BYTE(_ptr) \
- EF1BYTE(*((u8 *)(_ptr)))
+/* Read le16 data from memory and convert to host ordering */
#define READEF2BYTE(_ptr) \
EF2BYTE(*((u16 *)(_ptr)))
-#define READEF4BYTE(_ptr) \
- EF4BYTE(*((u32 *)(_ptr)))
-/* Write data to memory */
-#define WRITEEF1BYTE(_ptr, _val) \
- (*((u8 *)(_ptr))) = EF1BYTE(_val)
+/* Write le16 data to memory in host ordering */
#define WRITEEF2BYTE(_ptr, _val) \
(*((u16 *)(_ptr))) = EF2BYTE(_val)
-#define WRITEEF4BYTE(_ptr, _val) \
- (*((u32 *)(_ptr))) = EF4BYTE(_val)
-
-/*Example:
-BIT_LEN_MASK_32(0) => 0x00000000
-BIT_LEN_MASK_32(1) => 0x00000001
-BIT_LEN_MASK_32(2) => 0x00000003
-BIT_LEN_MASK_32(32) => 0xFFFFFFFF*/
+
+/* Create a bit mask
+ * Examples:
+ * BIT_LEN_MASK_32(0) => 0x00000000
+ * BIT_LEN_MASK_32(1) => 0x00000001
+ * BIT_LEN_MASK_32(2) => 0x00000003
+ * BIT_LEN_MASK_32(32) => 0xFFFFFFFF
+ */
#define BIT_LEN_MASK_32(__bitlen) \
(0xFFFFFFFF >> (32 - (__bitlen)))
#define BIT_LEN_MASK_16(__bitlen) \
@@ -1317,9 +1675,11 @@ BIT_LEN_MASK_32(32) => 0xFFFFFFFF*/
#define BIT_LEN_MASK_8(__bitlen) \
(0xFF >> (8 - (__bitlen)))
-/*Example:
-BIT_OFFSET_LEN_MASK_32(0, 2) => 0x00000003
-BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000*/
+/* Create an offset bit mask
+ * Examples:
+ * BIT_OFFSET_LEN_MASK_32(0, 2) => 0x00000003
+ * BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000
+ */
#define BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen) \
(BIT_LEN_MASK_32(__bitlen) << (__bitoffset))
#define BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen) \
@@ -1328,8 +1688,9 @@ BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000*/
(BIT_LEN_MASK_8(__bitlen) << (__bitoffset))
/*Description:
-Return 4-byte value in host byte ordering from
-4-byte pointer in little-endian system.*/
+ * Return 4-byte value in host byte ordering from
+ * 4-byte pointer in little-endian system.
+ */
#define LE_P4BYTE_TO_HOST_4BYTE(__pstart) \
(EF4BYTE(*((u32 *)(__pstart))))
#define LE_P2BYTE_TO_HOST_2BYTE(__pstart) \
@@ -1337,28 +1698,10 @@ Return 4-byte value in host byte ordering from
#define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \
(EF1BYTE(*((u8 *)(__pstart))))
-/*Description:
-Translate subfield (continuous bits in little-endian) of 4-byte
-value to host byte ordering.*/
-#define LE_BITS_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
- ( \
- (LE_P4BYTE_TO_HOST_4BYTE(__pstart) >> (__bitoffset)) & \
- BIT_LEN_MASK_32(__bitlen) \
- )
-#define LE_BITS_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
- ( \
- (LE_P2BYTE_TO_HOST_2BYTE(__pstart) >> (__bitoffset)) & \
- BIT_LEN_MASK_16(__bitlen) \
- )
-#define LE_BITS_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
- ( \
- (LE_P1BYTE_TO_HOST_1BYTE(__pstart) >> (__bitoffset)) & \
- BIT_LEN_MASK_8(__bitlen) \
- )
-
-/*Description:
-Mask subfield (continuous bits in little-endian) of 4-byte value
-and return the result in 4-byte value in host byte ordering.*/
+/* Description:
+ * Mask subfield (continuous bits in little-endian) of 4-byte value
+ * and return the result in 4-byte value in host byte ordering.
+ */
#define LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
( \
LE_P4BYTE_TO_HOST_4BYTE(__pstart) & \
@@ -1375,20 +1718,9 @@ and return the result in 4-byte value in host byte ordering.*/
(~BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen)) \
)
-/*Description:
-Set subfield of little-endian 4-byte value to specified value. */
-#define SET_BITS_TO_LE_4BYTE(__pstart, __bitoffset, __bitlen, __val) \
- *((u32 *)(__pstart)) = EF4BYTE \
- ( \
- LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) | \
- ((((u32)__val) & BIT_LEN_MASK_32(__bitlen)) << (__bitoffset)) \
- );
-#define SET_BITS_TO_LE_2BYTE(__pstart, __bitoffset, __bitlen, __val) \
- *((u16 *)(__pstart)) = EF2BYTE \
- ( \
- LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) | \
- ((((u16)__val) & BIT_LEN_MASK_16(__bitlen)) << (__bitoffset)) \
- );
+/* Description:
+ * Set subfield of little-endian 4-byte value to specified value.
+ */
#define SET_BITS_TO_LE_1BYTE(__pstart, __bitoffset, __bitlen, __val) \
*((u8 *)(__pstart)) = EF1BYTE \
( \
@@ -1400,13 +1732,14 @@ Set subfield of little-endian 4-byte value to specified value. */
mem access macro define end
****************************************/
-#define packet_get_type(_packet) (EF1BYTE((_packet).octet[0]) & 0xFC)
+#define byte(x, n) ((x >> (8 * n)) & 0xff)
+
#define RTL_WATCH_DOG_TIME 2000
#define MSECS(t) msecs_to_jiffies(t)
-#define WLAN_FC_GET_VERS(fc) ((fc) & IEEE80211_FCTL_VERS)
-#define WLAN_FC_GET_TYPE(fc) ((fc) & IEEE80211_FCTL_FTYPE)
-#define WLAN_FC_GET_STYPE(fc) ((fc) & IEEE80211_FCTL_STYPE)
-#define WLAN_FC_MORE_DATA(fc) ((fc) & IEEE80211_FCTL_MOREDATA)
+#define WLAN_FC_GET_VERS(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_VERS)
+#define WLAN_FC_GET_TYPE(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE)
+#define WLAN_FC_GET_STYPE(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE)
+#define WLAN_FC_MORE_DATA(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_MOREDATA)
#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
@@ -1420,6 +1753,8 @@ Set subfield of little-endian 4-byte value to specified value. */
#define RT_RF_OFF_LEVL_FW_32K BIT(5) /*FW in 32k */
/*Always enable ASPM and Clock Req in initialization.*/
#define RT_RF_PS_LEVEL_ALWAYS_ASPM BIT(6)
+/* no matter RFOFF or SLEEP we set PS_ASPM_LEVL*/
+#define RT_PS_LEVEL_ASPM BIT(7)
/*When LPS is on, disable 2R if no packet is received or transmittd.*/
#define RT_RF_LPS_DISALBE_2R BIT(30)
#define RT_RF_LPS_LEVEL_ASPM BIT(31) /*LPS with ASPM */
@@ -1433,15 +1768,6 @@ Set subfield of little-endian 4-byte value to specified value. */
#define container_of_dwork_rtl(x, y, z) \
container_of(container_of(x, struct delayed_work, work), y, z)
-#define FILL_OCTET_STRING(_os, _octet, _len) \
- (_os).octet = (u8 *)(_octet); \
- (_os).length = (_len);
-
-#define CP_MACADDR(des, src) \
- ((des)[0] = (src)[0], (des)[1] = (src)[1],\
- (des)[2] = (src)[2], (des)[3] = (src)[3],\
- (des)[4] = (src)[4], (des)[5] = (src)[5])
-
static inline u8 rtl_read_byte(struct rtl_priv *rtlpriv, u32 addr)
{
return rtlpriv->io.read8_sync(rtlpriv, addr);
diff --git a/drivers/net/wireless/wl1251/acx.c b/drivers/net/wireless/wl1251/acx.c
index 64a0214cfb29..ef8370edace7 100644
--- a/drivers/net/wireless/wl1251/acx.c
+++ b/drivers/net/wireless/wl1251/acx.c
@@ -776,6 +776,31 @@ out:
return ret;
}
+int wl1251_acx_low_rssi(struct wl1251 *wl, s8 threshold, u8 weight,
+ u8 depth, enum wl1251_acx_low_rssi_type type)
+{
+ struct acx_low_rssi *rssi;
+ int ret;
+
+ wl1251_debug(DEBUG_ACX, "acx low rssi");
+
+ rssi = kzalloc(sizeof(*rssi), GFP_KERNEL);
+ if (!rssi)
+ return -ENOMEM;
+
+ rssi->threshold = threshold;
+ rssi->weight = weight;
+ rssi->depth = depth;
+ rssi->type = type;
+
+ ret = wl1251_cmd_configure(wl, ACX_LOW_RSSI, rssi, sizeof(*rssi));
+ if (ret < 0)
+ wl1251_warning("failed to set low rssi threshold: %d", ret);
+
+ kfree(rssi);
+ return ret;
+}
+
int wl1251_acx_set_preamble(struct wl1251 *wl, enum acx_preamble_type preamble)
{
struct acx_preamble *acx;
@@ -978,6 +1003,34 @@ out:
return ret;
}
+int wl1251_acx_bet_enable(struct wl1251 *wl, enum wl1251_acx_bet_mode mode,
+ u8 max_consecutive)
+{
+ struct wl1251_acx_bet_enable *acx;
+ int ret;
+
+ wl1251_debug(DEBUG_ACX, "acx bet enable");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->enable = mode;
+ acx->max_consecutive = max_consecutive;
+
+ ret = wl1251_cmd_configure(wl, ACX_BET_ENABLE, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1251_warning("wl1251 acx bet enable failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
u8 aifs, u16 txop)
{
diff --git a/drivers/net/wireless/wl1251/acx.h b/drivers/net/wireless/wl1251/acx.h
index efcc3aaca14f..c2ba100f9b1a 100644
--- a/drivers/net/wireless/wl1251/acx.h
+++ b/drivers/net/wireless/wl1251/acx.h
@@ -399,6 +399,49 @@ struct acx_rts_threshold {
u8 pad[2];
} __packed;
+enum wl1251_acx_low_rssi_type {
+ /*
+ * The event is a "Level" indication which keeps triggering
+ * as long as the average RSSI is below the threshold.
+ */
+ WL1251_ACX_LOW_RSSI_TYPE_LEVEL = 0,
+
+ /*
+ * The event is an "Edge" indication which triggers
+ * only when the RSSI threshold is crossed from above.
+ */
+ WL1251_ACX_LOW_RSSI_TYPE_EDGE = 1,
+};
+
+struct acx_low_rssi {
+ struct acx_header header;
+
+ /*
+ * The threshold (in dBm) below (or above after low rssi
+ * indication) which the firmware generates an interrupt to the
+ * host. This parameter is signed.
+ */
+ s8 threshold;
+
+ /*
+ * The weight of the current RSSI sample, before adding the new
+ * sample, that is used to calculate the average RSSI.
+ */
+ u8 weight;
+
+ /*
+ * The number of Beacons/Probe response frames that will be
+ * received before issuing the Low or Regained RSSI event.
+ */
+ u8 depth;
+
+ /*
+ * Configures how the Low RSSI Event is triggered. Refer to
+ * enum wl1251_acx_low_rssi_type for more.
+ */
+ u8 type;
+} __packed;
+
struct acx_beacon_filter_option {
struct acx_header header;
@@ -1164,6 +1207,31 @@ struct wl1251_acx_wr_tbtt_and_dtim {
u8 padding;
} __packed;
+enum wl1251_acx_bet_mode {
+ WL1251_ACX_BET_DISABLE = 0,
+ WL1251_ACX_BET_ENABLE = 1,
+};
+
+struct wl1251_acx_bet_enable {
+ struct acx_header header;
+
+ /*
+ * Specifies if beacon early termination procedure is enabled or
+ * disabled, see enum wl1251_acx_bet_mode.
+ */
+ u8 enable;
+
+ /*
+ * Specifies the maximum number of consecutive beacons that may be
+ * early terminated. After this number is reached at least one full
+ * beacon must be correctly received in FW before beacon ET
+ * resumes. Range 0 - 255.
+ */
+ u8 max_consecutive;
+
+ u8 padding[2];
+} __packed;
+
struct wl1251_acx_ac_cfg {
struct acx_header header;
@@ -1393,6 +1461,8 @@ int wl1251_acx_cca_threshold(struct wl1251 *wl);
int wl1251_acx_bcn_dtim_options(struct wl1251 *wl);
int wl1251_acx_aid(struct wl1251 *wl, u16 aid);
int wl1251_acx_event_mbox_mask(struct wl1251 *wl, u32 event_mask);
+int wl1251_acx_low_rssi(struct wl1251 *wl, s8 threshold, u8 weight,
+ u8 depth, enum wl1251_acx_low_rssi_type type);
int wl1251_acx_set_preamble(struct wl1251 *wl, enum acx_preamble_type preamble);
int wl1251_acx_cts_protect(struct wl1251 *wl,
enum acx_ctsprotect_type ctsprotect);
@@ -1401,6 +1471,8 @@ int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime);
int wl1251_acx_rate_policies(struct wl1251 *wl);
int wl1251_acx_mem_cfg(struct wl1251 *wl);
int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim);
+int wl1251_acx_bet_enable(struct wl1251 *wl, enum wl1251_acx_bet_mode mode,
+ u8 max_consecutive);
int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
u8 aifs, u16 txop);
int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue,
diff --git a/drivers/net/wireless/wl1251/event.c b/drivers/net/wireless/wl1251/event.c
index 712372e50a87..dfc4579acb06 100644
--- a/drivers/net/wireless/wl1251/event.c
+++ b/drivers/net/wireless/wl1251/event.c
@@ -90,6 +90,24 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox)
}
}
+ if (wl->vif && wl->rssi_thold) {
+ if (vector & ROAMING_TRIGGER_LOW_RSSI_EVENT_ID) {
+ wl1251_debug(DEBUG_EVENT,
+ "ROAMING_TRIGGER_LOW_RSSI_EVENT");
+ ieee80211_cqm_rssi_notify(wl->vif,
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
+ GFP_KERNEL);
+ }
+
+ if (vector & ROAMING_TRIGGER_REGAINED_RSSI_EVENT_ID) {
+ wl1251_debug(DEBUG_EVENT,
+ "ROAMING_TRIGGER_REGAINED_RSSI_EVENT");
+ ieee80211_cqm_rssi_notify(wl->vif,
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
+ GFP_KERNEL);
+ }
+ }
+
return 0;
}
diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/wl1251/main.c
index 40372bac9482..12c9e635a6d6 100644
--- a/drivers/net/wireless/wl1251/main.c
+++ b/drivers/net/wireless/wl1251/main.c
@@ -375,7 +375,7 @@ out:
mutex_unlock(&wl->mutex);
}
-static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct wl1251 *wl = hw->priv;
unsigned long flags;
@@ -401,8 +401,6 @@ static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
wl->tx_queue_stopped = true;
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
-
- return NETDEV_TX_OK;
}
static int wl1251_op_start(struct ieee80211_hw *hw)
@@ -502,6 +500,7 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
wl->psm = 0;
wl->tx_queue_stopped = false;
wl->power_level = WL1251_DEFAULT_POWER_LEVEL;
+ wl->rssi_thold = 0;
wl->channel = WL1251_DEFAULT_CHANNEL;
wl1251_debugfs_reset(wl);
@@ -959,6 +958,16 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
+ if (changed & BSS_CHANGED_CQM) {
+ ret = wl1251_acx_low_rssi(wl, bss_conf->cqm_rssi_thold,
+ WL1251_DEFAULT_LOW_RSSI_WEIGHT,
+ WL1251_DEFAULT_LOW_RSSI_DEPTH,
+ WL1251_ACX_LOW_RSSI_TYPE_EDGE);
+ if (ret < 0)
+ goto out;
+ wl->rssi_thold = bss_conf->cqm_rssi_thold;
+ }
+
if (changed & BSS_CHANGED_BSSID) {
memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
@@ -1313,9 +1322,11 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_BEACON_FILTER |
- IEEE80211_HW_SUPPORTS_UAPSD;
+ IEEE80211_HW_SUPPORTS_UAPSD |
+ IEEE80211_HW_SUPPORTS_CQM_RSSI;
- wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC);
wl->hw->wiphy->max_scan_ssids = 1;
wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1251_band_2ghz;
@@ -1377,6 +1388,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
wl->psm_requested = false;
wl->tx_queue_stopped = false;
wl->power_level = WL1251_DEFAULT_POWER_LEVEL;
+ wl->rssi_thold = 0;
wl->beacon_int = WL1251_DEFAULT_BEACON_INT;
wl->dtim_period = WL1251_DEFAULT_DTIM_PERIOD;
wl->vif = NULL;
diff --git a/drivers/net/wireless/wl1251/ps.c b/drivers/net/wireless/wl1251/ps.c
index 5ed47c8373d2..9ba23ede51bd 100644
--- a/drivers/net/wireless/wl1251/ps.c
+++ b/drivers/net/wireless/wl1251/ps.c
@@ -153,6 +153,11 @@ int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode)
if (ret < 0)
return ret;
+ ret = wl1251_acx_bet_enable(wl, WL1251_ACX_BET_ENABLE,
+ WL1251_DEFAULT_BET_CONSECUTIVE);
+ if (ret < 0)
+ return ret;
+
ret = wl1251_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE);
if (ret < 0)
return ret;
@@ -170,6 +175,12 @@ int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode)
if (ret < 0)
return ret;
+ /* disable BET */
+ ret = wl1251_acx_bet_enable(wl, WL1251_ACX_BET_DISABLE,
+ WL1251_DEFAULT_BET_CONSECUTIVE);
+ if (ret < 0)
+ return ret;
+
/* disable beacon filtering */
ret = wl1251_acx_beacon_filter_opt(wl, false);
if (ret < 0)
diff --git a/drivers/net/wireless/wl1251/rx.c b/drivers/net/wireless/wl1251/rx.c
index efa53607d5c9..c1b3b3f03da2 100644
--- a/drivers/net/wireless/wl1251/rx.c
+++ b/drivers/net/wireless/wl1251/rx.c
@@ -78,9 +78,10 @@ static void wl1251_rx_status(struct wl1251 *wl,
*/
wl->noise = desc->rssi - desc->snr / 2;
- status->freq = ieee80211_channel_to_frequency(desc->channel);
+ status->freq = ieee80211_channel_to_frequency(desc->channel,
+ status->band);
- status->flag |= RX_FLAG_TSFT;
+ status->flag |= RX_FLAG_MACTIME_MPDU;
if (desc->flags & RX_DESC_ENCRYPTION_MASK) {
status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
@@ -95,8 +96,52 @@ static void wl1251_rx_status(struct wl1251 *wl,
if (unlikely(!(desc->flags & RX_DESC_VALID_FCS)))
status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ switch (desc->rate) {
+ /* skip 1 and 12 Mbps because they have same value 0x0a */
+ case RATE_2MBPS:
+ status->rate_idx = 1;
+ break;
+ case RATE_5_5MBPS:
+ status->rate_idx = 2;
+ break;
+ case RATE_11MBPS:
+ status->rate_idx = 3;
+ break;
+ case RATE_6MBPS:
+ status->rate_idx = 4;
+ break;
+ case RATE_9MBPS:
+ status->rate_idx = 5;
+ break;
+ case RATE_18MBPS:
+ status->rate_idx = 7;
+ break;
+ case RATE_24MBPS:
+ status->rate_idx = 8;
+ break;
+ case RATE_36MBPS:
+ status->rate_idx = 9;
+ break;
+ case RATE_48MBPS:
+ status->rate_idx = 10;
+ break;
+ case RATE_54MBPS:
+ status->rate_idx = 11;
+ break;
+ }
+
+ /* for 1 and 12 Mbps we have to check the modulation */
+ if (desc->rate == RATE_1MBPS) {
+ if (!(desc->mod_pre & OFDM_RATE_BIT))
+ /* CCK -> RATE_1MBPS */
+ status->rate_idx = 0;
+ else
+ /* OFDM -> RATE_12MBPS */
+ status->rate_idx = 6;
+ }
- /* FIXME: set status->rate_idx */
+ if (desc->mod_pre & SHORT_PREAMBLE_BIT)
+ status->flag |= RX_FLAG_SHORTPRE;
}
static void wl1251_rx_body(struct wl1251 *wl,
diff --git a/drivers/net/wireless/wl1251/tx.c b/drivers/net/wireless/wl1251/tx.c
index 554b4f9a3d3e..28121c590a2b 100644
--- a/drivers/net/wireless/wl1251/tx.c
+++ b/drivers/net/wireless/wl1251/tx.c
@@ -213,16 +213,30 @@ static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb,
wl1251_debug(DEBUG_TX, "skb offset %d", offset);
/* check whether the current skb can be used */
- if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) {
- unsigned char *src = skb->data;
+ if (skb_cloned(skb) || (skb_tailroom(skb) < offset)) {
+ struct sk_buff *newskb = skb_copy_expand(skb, 0, 3,
+ GFP_KERNEL);
+
+ if (unlikely(newskb == NULL)) {
+ wl1251_error("Can't allocate skb!");
+ return -EINVAL;
+ }
- /* align the buffer on a 4-byte boundary */
+ tx_hdr = (struct tx_double_buffer_desc *) newskb->data;
+
+ dev_kfree_skb_any(skb);
+ wl->tx_frames[tx_hdr->id] = skb = newskb;
+
+ offset = (4 - (long)skb->data) & 0x03;
+ wl1251_debug(DEBUG_TX, "new skb offset %d", offset);
+ }
+
+ /* align the buffer on a 4-byte boundary */
+ if (offset) {
+ unsigned char *src = skb->data;
skb_reserve(skb, offset);
memmove(skb->data, src, skb->len);
tx_hdr = (struct tx_double_buffer_desc *) skb->data;
- } else {
- wl1251_info("No handler, fixme!");
- return -EINVAL;
}
}
@@ -368,7 +382,7 @@ static void wl1251_tx_packet_cb(struct wl1251 *wl,
{
struct ieee80211_tx_info *info;
struct sk_buff *skb;
- int hdrlen, ret;
+ int hdrlen;
u8 *frame;
skb = wl->tx_frames[result->id];
@@ -407,40 +421,12 @@ static void wl1251_tx_packet_cb(struct wl1251 *wl,
ieee80211_tx_status(wl->hw, skb);
wl->tx_frames[result->id] = NULL;
-
- if (wl->tx_queue_stopped) {
- wl1251_debug(DEBUG_TX, "cb: queue was stopped");
-
- skb = skb_dequeue(&wl->tx_queue);
-
- /* The skb can be NULL because tx_work might have been
- scheduled before the queue was stopped making the
- queue empty */
-
- if (skb) {
- ret = wl1251_tx_frame(wl, skb);
- if (ret == -EBUSY) {
- /* firmware buffer is still full */
- wl1251_debug(DEBUG_TX, "cb: fw buffer "
- "still full");
- skb_queue_head(&wl->tx_queue, skb);
- return;
- } else if (ret < 0) {
- dev_kfree_skb(skb);
- return;
- }
- }
-
- wl1251_debug(DEBUG_TX, "cb: waking queues");
- ieee80211_wake_queues(wl->hw);
- wl->tx_queue_stopped = false;
- }
}
/* Called upon reception of a TX complete interrupt */
void wl1251_tx_complete(struct wl1251 *wl)
{
- int i, result_index, num_complete = 0;
+ int i, result_index, num_complete = 0, queue_len;
struct tx_result result[FW_TX_CMPLT_BLOCK_SIZE], *result_ptr;
unsigned long flags;
@@ -471,18 +457,22 @@ void wl1251_tx_complete(struct wl1251 *wl)
}
}
- if (wl->tx_queue_stopped
- &&
- skb_queue_len(&wl->tx_queue) <= WL1251_TX_QUEUE_LOW_WATERMARK){
+ queue_len = skb_queue_len(&wl->tx_queue);
- /* firmware buffer has space, restart queues */
+ if ((num_complete > 0) && (queue_len > 0)) {
+ /* firmware buffer has space, reschedule tx_work */
+ wl1251_debug(DEBUG_TX, "tx_complete: reschedule tx_work");
+ ieee80211_queue_work(wl->hw, &wl->tx_work);
+ }
+
+ if (wl->tx_queue_stopped &&
+ queue_len <= WL1251_TX_QUEUE_LOW_WATERMARK) {
+ /* tx_queue has space, restart queues */
wl1251_debug(DEBUG_TX, "tx_complete: waking queues");
spin_lock_irqsave(&wl->wl_lock, flags);
ieee80211_wake_queues(wl->hw);
wl->tx_queue_stopped = false;
spin_unlock_irqrestore(&wl->wl_lock, flags);
- ieee80211_queue_work(wl->hw, &wl->tx_work);
-
}
/* Every completed frame needs to be acknowledged */
diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
index c0ce2c8b43b8..bb23cd522b22 100644
--- a/drivers/net/wireless/wl1251/wl1251.h
+++ b/drivers/net/wireless/wl1251/wl1251.h
@@ -370,6 +370,8 @@ struct wl1251 {
/* in dBm */
int power_level;
+ int rssi_thold;
+
struct wl1251_stats stats;
struct wl1251_debugfs debugfs;
@@ -410,6 +412,8 @@ void wl1251_disable_interrupts(struct wl1251 *wl);
#define WL1251_DEFAULT_CHANNEL 0
+#define WL1251_DEFAULT_BET_CONSECUTIVE 10
+
#define CHIP_ID_1251_PG10 (0x7010101)
#define CHIP_ID_1251_PG11 (0x7020101)
#define CHIP_ID_1251_PG12 (0x7030101)
@@ -431,4 +435,7 @@ void wl1251_disable_interrupts(struct wl1251 *wl);
#define WL1251_PART_WORK_REG_START REGISTERS_BASE
#define WL1251_PART_WORK_REG_SIZE REGISTERS_WORK_SIZE
+#define WL1251_DEFAULT_LOW_RSSI_WEIGHT 10
+#define WL1251_DEFAULT_LOW_RSSI_DEPTH 10
+
#endif
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
index 0e65bce457d6..692ebff38fc8 100644
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ b/drivers/net/wireless/wl12xx/Kconfig
@@ -54,7 +54,7 @@ config WL12XX_SDIO
config WL12XX_SDIO_TEST
tristate "TI wl12xx SDIO testing support"
- depends on WL12XX && MMC
+ depends on WL12XX && MMC && WL12XX_SDIO
default n
---help---
This module adds support for the SDIO bus testing with the
diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/wl12xx/acx.c
index cc4068d2b4a8..3badc6bb7866 100644
--- a/drivers/net/wireless/wl12xx/acx.c
+++ b/drivers/net/wireless/wl12xx/acx.c
@@ -751,10 +751,10 @@ int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats)
return 0;
}
-int wl1271_acx_rate_policies(struct wl1271 *wl)
+int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
{
- struct acx_rate_policy *acx;
- struct conf_tx_rate_class *c = &wl->conf.tx.rc_conf;
+ struct acx_sta_rate_policy *acx;
+ struct conf_tx_rate_class *c = &wl->conf.tx.sta_rc_conf;
int idx = 0;
int ret = 0;
@@ -783,6 +783,10 @@ int wl1271_acx_rate_policies(struct wl1271 *wl)
acx->rate_class_cnt = cpu_to_le32(ACX_TX_RATE_POLICY_CNT);
+ wl1271_debug(DEBUG_ACX, "basic_rate: 0x%x, full_rate: 0x%x",
+ acx->rate_class[ACX_TX_BASIC_RATE].enabled_rates,
+ acx->rate_class[ACX_TX_AP_FULL_RATE].enabled_rates);
+
ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("Setting of rate policies failed: %d", ret);
@@ -794,6 +798,38 @@ out:
return ret;
}
+int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c,
+ u8 idx)
+{
+ struct acx_ap_rate_policy *acx;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_ACX, "acx ap rate policy");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->rate_policy.enabled_rates = cpu_to_le32(c->enabled_rates);
+ acx->rate_policy.short_retry_limit = c->short_retry_limit;
+ acx->rate_policy.long_retry_limit = c->long_retry_limit;
+ acx->rate_policy.aflags = c->aflags;
+
+ acx->rate_policy_idx = cpu_to_le32(idx);
+
+ ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("Setting of ap rate policy failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
u8 aifsn, u16 txop)
{
@@ -915,9 +951,9 @@ out:
return ret;
}
-int wl1271_acx_mem_cfg(struct wl1271 *wl)
+int wl1271_acx_ap_mem_cfg(struct wl1271 *wl)
{
- struct wl1271_acx_config_memory *mem_conf;
+ struct wl1271_acx_ap_config_memory *mem_conf;
int ret;
wl1271_debug(DEBUG_ACX, "wl1271 mem cfg");
@@ -929,10 +965,10 @@ int wl1271_acx_mem_cfg(struct wl1271 *wl)
}
/* memory config */
- mem_conf->num_stations = DEFAULT_NUM_STATIONS;
- mem_conf->rx_mem_block_num = ACX_RX_MEM_BLOCKS;
- mem_conf->tx_min_mem_block_num = ACX_TX_MIN_MEM_BLOCKS;
- mem_conf->num_ssid_profiles = ACX_NUM_SSID_PROFILES;
+ mem_conf->num_stations = wl->conf.mem.num_stations;
+ mem_conf->rx_mem_block_num = wl->conf.mem.rx_block_num;
+ mem_conf->tx_min_mem_block_num = wl->conf.mem.tx_min_block_num;
+ mem_conf->num_ssid_profiles = wl->conf.mem.ssid_profiles;
mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS);
ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf,
@@ -947,13 +983,45 @@ out:
return ret;
}
-int wl1271_acx_init_mem_config(struct wl1271 *wl)
+int wl1271_acx_sta_mem_cfg(struct wl1271 *wl)
{
+ struct wl1271_acx_sta_config_memory *mem_conf;
int ret;
- ret = wl1271_acx_mem_cfg(wl);
- if (ret < 0)
- return ret;
+ wl1271_debug(DEBUG_ACX, "wl1271 mem cfg");
+
+ mem_conf = kzalloc(sizeof(*mem_conf), GFP_KERNEL);
+ if (!mem_conf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* memory config */
+ mem_conf->num_stations = wl->conf.mem.num_stations;
+ mem_conf->rx_mem_block_num = wl->conf.mem.rx_block_num;
+ mem_conf->tx_min_mem_block_num = wl->conf.mem.tx_min_block_num;
+ mem_conf->num_ssid_profiles = wl->conf.mem.ssid_profiles;
+ mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS);
+ mem_conf->dyn_mem_enable = wl->conf.mem.dynamic_memory;
+ mem_conf->tx_free_req = wl->conf.mem.min_req_tx_blocks;
+ mem_conf->rx_free_req = wl->conf.mem.min_req_rx_blocks;
+ mem_conf->tx_min = wl->conf.mem.tx_min;
+
+ ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf,
+ sizeof(*mem_conf));
+ if (ret < 0) {
+ wl1271_warning("wl1271 mem config failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(mem_conf);
+ return ret;
+}
+
+int wl1271_acx_init_mem_config(struct wl1271 *wl)
+{
+ int ret;
wl->target_mem_map = kzalloc(sizeof(struct wl1271_acx_mem_map),
GFP_KERNEL);
@@ -1233,6 +1301,7 @@ int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
struct wl1271_acx_ht_capabilities *acx;
u8 mac_address[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
int ret = 0;
+ u32 ht_capabilites = 0;
wl1271_debug(DEBUG_ACX, "acx ht capabilities setting");
@@ -1244,27 +1313,26 @@ int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
/* Allow HT Operation ? */
if (allow_ht_operation) {
- acx->ht_capabilites =
+ ht_capabilites =
WL1271_ACX_FW_CAP_HT_OPERATION;
if (ht_cap->cap & IEEE80211_HT_CAP_GRN_FLD)
- acx->ht_capabilites |=
+ ht_capabilites |=
WL1271_ACX_FW_CAP_GREENFIELD_FRAME_FORMAT;
if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
- acx->ht_capabilites |=
+ ht_capabilites |=
WL1271_ACX_FW_CAP_SHORT_GI_FOR_20MHZ_PACKETS;
if (ht_cap->cap & IEEE80211_HT_CAP_LSIG_TXOP_PROT)
- acx->ht_capabilites |=
+ ht_capabilites |=
WL1271_ACX_FW_CAP_LSIG_TXOP_PROTECTION;
/* get data from A-MPDU parameters field */
acx->ampdu_max_length = ht_cap->ampdu_factor;
acx->ampdu_min_spacing = ht_cap->ampdu_density;
-
- memcpy(acx->mac_address, mac_address, ETH_ALEN);
- } else { /* HT operations are not allowed */
- acx->ht_capabilites = 0;
}
+ memcpy(acx->mac_address, mac_address, ETH_ALEN);
+ acx->ht_capabilites = cpu_to_le32(ht_capabilites);
+
ret = wl1271_cmd_configure(wl, ACX_PEER_HT_CAP, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("acx ht capabilities setting failed: %d", ret);
@@ -1309,6 +1377,91 @@ out:
return ret;
}
+/* Configure BA session initiator/receiver parameters setting in the FW. */
+int wl1271_acx_set_ba_session(struct wl1271 *wl,
+ enum ieee80211_back_parties direction,
+ u8 tid_index, u8 policy)
+{
+ struct wl1271_acx_ba_session_policy *acx;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx ba session setting");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* ANY role */
+ acx->role_id = 0xff;
+ acx->tid = tid_index;
+ acx->enable = policy;
+ acx->ba_direction = direction;
+
+ switch (direction) {
+ case WLAN_BACK_INITIATOR:
+ acx->win_size = wl->conf.ht.tx_ba_win_size;
+ acx->inactivity_timeout = wl->conf.ht.inactivity_timeout;
+ break;
+ case WLAN_BACK_RECIPIENT:
+ acx->win_size = RX_BA_WIN_SIZE;
+ acx->inactivity_timeout = 0;
+ break;
+ default:
+ wl1271_error("Incorrect acx command id=%x\n", direction);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = wl1271_cmd_configure(wl,
+ ACX_BA_SESSION_POLICY_CFG,
+ acx,
+ sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx ba session setting failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
+/* setup BA session receiver setting in the FW. */
+int wl1271_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn,
+ bool enable)
+{
+ struct wl1271_acx_ba_receiver_setup *acx;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx ba receiver session setting");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Single link for now */
+ acx->link_id = 1;
+ acx->tid = tid_index;
+ acx->enable = enable;
+ acx->win_size = 0;
+ acx->ssn = ssn;
+
+ ret = wl1271_cmd_configure(wl, ACX_BA_SESSION_RX_SETUP, acx,
+ sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx ba receiver session failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime)
{
struct wl1271_acx_fw_tsf_information *tsf_info;
@@ -1334,3 +1487,82 @@ out:
kfree(tsf_info);
return ret;
}
+
+int wl1271_acx_max_tx_retry(struct wl1271 *wl)
+{
+ struct wl1271_acx_max_tx_retry *acx = NULL;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx max tx retry");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx)
+ return -ENOMEM;
+
+ acx->max_tx_retry = cpu_to_le16(wl->conf.tx.ap_max_tx_retries);
+
+ ret = wl1271_cmd_configure(wl, ACX_MAX_TX_FAILURE, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx max tx retry failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
+int wl1271_acx_config_ps(struct wl1271 *wl)
+{
+ struct wl1271_acx_config_ps *config_ps;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx config ps");
+
+ config_ps = kzalloc(sizeof(*config_ps), GFP_KERNEL);
+ if (!config_ps) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ config_ps->exit_retries = wl->conf.conn.psm_exit_retries;
+ config_ps->enter_retries = wl->conf.conn.psm_entry_retries;
+ config_ps->null_data_rate = cpu_to_le32(wl->basic_rate);
+
+ ret = wl1271_cmd_configure(wl, ACX_CONFIG_PS, config_ps,
+ sizeof(*config_ps));
+
+ if (ret < 0) {
+ wl1271_warning("acx config ps failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(config_ps);
+ return ret;
+}
+
+int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr)
+{
+ struct wl1271_acx_inconnection_sta *acx = NULL;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx set inconnaction sta %pM", addr);
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx)
+ return -ENOMEM;
+
+ memcpy(acx->addr, addr, ETH_ALEN);
+
+ ret = wl1271_cmd_configure(wl, ACX_UPDATE_INCONNECTION_STA_LIST,
+ acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx set inconnaction sta failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/acx.h b/drivers/net/wireless/wl12xx/acx.h
index 7bd8e4db4a71..dd19b01d807b 100644
--- a/drivers/net/wireless/wl12xx/acx.h
+++ b/drivers/net/wireless/wl12xx/acx.h
@@ -133,7 +133,6 @@ enum {
#define DEFAULT_UCAST_PRIORITY 0
#define DEFAULT_RX_Q_PRIORITY 0
-#define DEFAULT_NUM_STATIONS 1
#define DEFAULT_RXQ_PRIORITY 0 /* low 0 .. 15 high */
#define DEFAULT_RXQ_TYPE 0x07 /* All frames, Data/Ctrl/Mgmt */
#define TRACE_BUFFER_MAX_SIZE 256
@@ -747,13 +746,23 @@ struct acx_rate_class {
#define ACX_TX_BASIC_RATE 0
#define ACX_TX_AP_FULL_RATE 1
#define ACX_TX_RATE_POLICY_CNT 2
-struct acx_rate_policy {
+struct acx_sta_rate_policy {
struct acx_header header;
__le32 rate_class_cnt;
struct acx_rate_class rate_class[CONF_TX_MAX_RATE_CLASSES];
} __packed;
+
+#define ACX_TX_AP_MODE_MGMT_RATE 4
+#define ACX_TX_AP_MODE_BCST_RATE 5
+struct acx_ap_rate_policy {
+ struct acx_header header;
+
+ __le32 rate_policy_idx;
+ struct acx_rate_class rate_policy;
+} __packed;
+
struct acx_ac_cfg {
struct acx_header header;
u8 ac;
@@ -787,12 +796,9 @@ struct acx_tx_config_options {
__le16 tx_compl_threshold; /* number of packets */
} __packed;
-#define ACX_RX_MEM_BLOCKS 70
-#define ACX_TX_MIN_MEM_BLOCKS 40
#define ACX_TX_DESCRIPTORS 32
-#define ACX_NUM_SSID_PROFILES 1
-struct wl1271_acx_config_memory {
+struct wl1271_acx_ap_config_memory {
struct acx_header header;
u8 rx_mem_block_num;
@@ -802,6 +808,20 @@ struct wl1271_acx_config_memory {
__le32 total_tx_descriptors;
} __packed;
+struct wl1271_acx_sta_config_memory {
+ struct acx_header header;
+
+ u8 rx_mem_block_num;
+ u8 tx_min_mem_block_num;
+ u8 num_stations;
+ u8 num_ssid_profiles;
+ __le32 total_tx_descriptors;
+ u8 dyn_mem_enable;
+ u8 tx_free_req;
+ u8 rx_free_req;
+ u8 tx_min;
+} __packed;
+
struct wl1271_acx_mem_map {
struct acx_header header;
@@ -1051,6 +1071,59 @@ struct wl1271_acx_ht_information {
u8 padding[3];
} __packed;
+#define RX_BA_WIN_SIZE 8
+
+struct wl1271_acx_ba_session_policy {
+ struct acx_header header;
+ /*
+ * Specifies role Id, Range 0-7, 0xFF means ANY role.
+ * Future use. For now this field is irrelevant
+ */
+ u8 role_id;
+ /*
+ * Specifies Link Id, Range 0-31, 0xFF means ANY Link Id.
+ * Not applicable if Role Id is set to ANY.
+ */
+ u8 link_id;
+
+ u8 tid;
+
+ u8 enable;
+
+ /* Windows size in number of packets */
+ u16 win_size;
+
+ /*
+ * As initiator inactivity timeout in time units(TU) of 1024us.
+ * As receiver reserved
+ */
+ u16 inactivity_timeout;
+
+ /* Initiator = 1/Receiver = 0 */
+ u8 ba_direction;
+
+ u8 padding[3];
+} __packed;
+
+struct wl1271_acx_ba_receiver_setup {
+ struct acx_header header;
+
+ /* Specifies Link Id, Range 0-31, 0xFF means ANY Link Id */
+ u8 link_id;
+
+ u8 tid;
+
+ u8 enable;
+
+ u8 padding[1];
+
+ /* Windows size in number of packets */
+ u16 win_size;
+
+ /* BA session starting sequence number. RANGE 0-FFF */
+ u16 ssn;
+} __packed;
+
struct wl1271_acx_fw_tsf_information {
struct acx_header header;
@@ -1062,6 +1135,33 @@ struct wl1271_acx_fw_tsf_information {
u8 padding[3];
} __packed;
+struct wl1271_acx_max_tx_retry {
+ struct acx_header header;
+
+ /*
+ * the number of frames transmission failures before
+ * issuing the aging event.
+ */
+ __le16 max_tx_retry;
+ u8 padding_1[2];
+} __packed;
+
+struct wl1271_acx_config_ps {
+ struct acx_header header;
+
+ u8 exit_retries;
+ u8 enter_retries;
+ u8 padding[2];
+ __le32 null_data_rate;
+} __packed;
+
+struct wl1271_acx_inconnection_sta {
+ struct acx_header header;
+
+ u8 addr[ETH_ALEN];
+ u8 padding1[2];
+} __packed;
+
enum {
ACX_WAKE_UP_CONDITIONS = 0x0002,
ACX_MEM_CFG = 0x0003,
@@ -1113,22 +1213,24 @@ enum {
ACX_RSSI_SNR_WEIGHTS = 0x0052,
ACX_KEEP_ALIVE_MODE = 0x0053,
ACX_SET_KEEP_ALIVE_CONFIG = 0x0054,
- ACX_BA_SESSION_RESPONDER_POLICY = 0x0055,
- ACX_BA_SESSION_INITIATOR_POLICY = 0x0056,
+ ACX_BA_SESSION_POLICY_CFG = 0x0055,
+ ACX_BA_SESSION_RX_SETUP = 0x0056,
ACX_PEER_HT_CAP = 0x0057,
ACX_HT_BSS_OPERATION = 0x0058,
ACX_COEX_ACTIVITY = 0x0059,
ACX_SET_DCO_ITRIM_PARAMS = 0x0061,
+ ACX_GEN_FW_CMD = 0x0070,
+ ACX_HOST_IF_CFG_BITMAP = 0x0071,
+ ACX_MAX_TX_FAILURE = 0x0072,
+ ACX_UPDATE_INCONNECTION_STA_LIST = 0x0073,
DOT11_RX_MSDU_LIFE_TIME = 0x1004,
DOT11_CUR_TX_PWR = 0x100D,
DOT11_RX_DOT11_MODE = 0x1012,
DOT11_RTS_THRESHOLD = 0x1013,
DOT11_GROUP_ADDRESS_TBL = 0x1014,
ACX_PM_CONFIG = 0x1016,
-
- MAX_DOT11_IE = DOT11_GROUP_ADDRESS_TBL,
-
- MAX_IE = 0xFFFF
+ ACX_CONFIG_PS = 0x1017,
+ ACX_CONFIG_HANGOVER = 0x1018,
};
@@ -1160,7 +1262,9 @@ int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble);
int wl1271_acx_cts_protect(struct wl1271 *wl,
enum acx_ctsprotect_type ctsprotect);
int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats);
-int wl1271_acx_rate_policies(struct wl1271 *wl);
+int wl1271_acx_sta_rate_policies(struct wl1271 *wl);
+int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c,
+ u8 idx);
int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
u8 aifsn, u16 txop);
int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
@@ -1168,7 +1272,8 @@ int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
u32 apsd_conf0, u32 apsd_conf1);
int wl1271_acx_frag_threshold(struct wl1271 *wl, u16 frag_threshold);
int wl1271_acx_tx_config_options(struct wl1271 *wl);
-int wl1271_acx_mem_cfg(struct wl1271 *wl);
+int wl1271_acx_ap_mem_cfg(struct wl1271 *wl);
+int wl1271_acx_sta_mem_cfg(struct wl1271 *wl);
int wl1271_acx_init_mem_config(struct wl1271 *wl);
int wl1271_acx_init_rx_interrupt(struct wl1271 *wl);
int wl1271_acx_smart_reflex(struct wl1271 *wl);
@@ -1185,6 +1290,14 @@ int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
bool allow_ht_operation);
int wl1271_acx_set_ht_information(struct wl1271 *wl,
u16 ht_operation_mode);
+int wl1271_acx_set_ba_session(struct wl1271 *wl,
+ enum ieee80211_back_parties direction,
+ u8 tid_index, u8 policy);
+int wl1271_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn,
+ bool enable);
int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime);
+int wl1271_acx_max_tx_retry(struct wl1271 *wl);
+int wl1271_acx_config_ps(struct wl1271 *wl);
+int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr);
#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/boot.c b/drivers/net/wireless/wl12xx/boot.c
index 4df04f84d7f1..1ffbad67d2d8 100644
--- a/drivers/net/wireless/wl12xx/boot.c
+++ b/drivers/net/wireless/wl12xx/boot.c
@@ -28,6 +28,7 @@
#include "boot.h"
#include "io.h"
#include "event.h"
+#include "rx.h"
static struct wl1271_partition_set part_table[PART_TABLE_LEN] = {
[PART_DOWN] = {
@@ -100,6 +101,22 @@ static void wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag)
wl1271_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl);
}
+static void wl1271_parse_fw_ver(struct wl1271 *wl)
+{
+ int ret;
+
+ ret = sscanf(wl->chip.fw_ver_str + 4, "%u.%u.%u.%u.%u",
+ &wl->chip.fw_ver[0], &wl->chip.fw_ver[1],
+ &wl->chip.fw_ver[2], &wl->chip.fw_ver[3],
+ &wl->chip.fw_ver[4]);
+
+ if (ret != 5) {
+ wl1271_warning("fw version incorrect value");
+ memset(wl->chip.fw_ver, 0, sizeof(wl->chip.fw_ver));
+ return;
+ }
+}
+
static void wl1271_boot_fw_version(struct wl1271 *wl)
{
struct wl1271_static_data static_data;
@@ -107,11 +124,13 @@ static void wl1271_boot_fw_version(struct wl1271 *wl)
wl1271_read(wl, wl->cmd_box_addr, &static_data, sizeof(static_data),
false);
- strncpy(wl->chip.fw_ver, static_data.fw_version,
- sizeof(wl->chip.fw_ver));
+ strncpy(wl->chip.fw_ver_str, static_data.fw_version,
+ sizeof(wl->chip.fw_ver_str));
/* make sure the string is NULL-terminated */
- wl->chip.fw_ver[sizeof(wl->chip.fw_ver) - 1] = '\0';
+ wl->chip.fw_ver_str[sizeof(wl->chip.fw_ver_str) - 1] = '\0';
+
+ wl1271_parse_fw_ver(wl);
}
static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
@@ -231,7 +250,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
*/
if (wl->nvs_len == sizeof(struct wl1271_nvs_file) ||
wl->nvs_len == WL1271_INI_LEGACY_NVS_FILE_SIZE) {
- if (wl->nvs->general_params.dual_mode_select)
+ /* for now 11a is unsupported in AP mode */
+ if (wl->bss_type != BSS_TYPE_AP_BSS &&
+ wl->nvs->general_params.dual_mode_select)
wl->enable_11a = true;
}
@@ -431,6 +452,9 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
PSPOLL_DELIVERY_FAILURE_EVENT_ID |
SOFT_GEMINI_SENSE_EVENT_ID;
+ if (wl->bss_type == BSS_TYPE_AP_BSS)
+ wl->event_mask |= STA_REMOVE_COMPLETE_EVENT_ID;
+
ret = wl1271_event_unmask(wl);
if (ret < 0) {
wl1271_error("EVENT mask setting failed");
@@ -595,8 +619,7 @@ int wl1271_boot(struct wl1271 *wl)
wl1271_boot_enable_interrupts(wl);
/* set the wl1271 default filters */
- wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
- wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
+ wl1271_set_default_filters(wl);
wl1271_event_mbox_config(wl);
diff --git a/drivers/net/wireless/wl12xx/cmd.c b/drivers/net/wireless/wl12xx/cmd.c
index 0106628aa5a2..97ffd7aa57a8 100644
--- a/drivers/net/wireless/wl12xx/cmd.c
+++ b/drivers/net/wireless/wl12xx/cmd.c
@@ -36,6 +36,7 @@
#include "wl12xx_80211.h"
#include "cmd.h"
#include "event.h"
+#include "tx.h"
#define WL1271_CMD_FAST_POLL_COUNT 50
@@ -221,7 +222,7 @@ int wl1271_cmd_ext_radio_parms(struct wl1271 *wl)
* Poll the mailbox event field until any of the bits in the mask is set or a
* timeout occurs (WL1271_EVENT_TIMEOUT in msecs)
*/
-static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
+static int wl1271_cmd_wait_for_event_or_timeout(struct wl1271 *wl, u32 mask)
{
u32 events_vector, event;
unsigned long timeout;
@@ -230,7 +231,8 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
do {
if (time_after(jiffies, timeout)) {
- ieee80211_queue_work(wl->hw, &wl->recovery_work);
+ wl1271_debug(DEBUG_CMD, "timeout waiting for event %d",
+ (int)mask);
return -ETIMEDOUT;
}
@@ -248,6 +250,19 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
return 0;
}
+static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
+{
+ int ret;
+
+ ret = wl1271_cmd_wait_for_event_or_timeout(wl, mask);
+ if (ret != 0) {
+ ieee80211_queue_work(wl->hw, &wl->recovery_work);
+ return ret;
+ }
+
+ return 0;
+}
+
int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
{
struct wl1271_cmd_join *join;
@@ -271,6 +286,7 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
join->rx_filter_options = cpu_to_le32(wl->rx_filter);
join->bss_type = bss_type;
join->basic_rate_set = cpu_to_le32(wl->basic_rate_set);
+ join->supported_rate_set = cpu_to_le32(wl->rate_set);
if (wl->band == IEEE80211_BAND_5GHZ)
join->bss_type |= WL1271_JOIN_CMD_BSS_TYPE_5GHZ;
@@ -288,6 +304,9 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
wl->tx_security_last_seq = 0;
wl->tx_security_seq = 0;
+ wl1271_debug(DEBUG_CMD, "cmd join: basic_rate_set=0x%x, rate_set=0x%x",
+ join->basic_rate_set, join->supported_rate_set);
+
ret = wl1271_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join), 0);
if (ret < 0) {
wl1271_error("failed to initiate cmd join");
@@ -439,7 +458,7 @@ out:
return ret;
}
-int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, u32 rates, bool send)
+int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
{
struct wl1271_cmd_ps_params *ps_params = NULL;
int ret = 0;
@@ -453,10 +472,6 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, u32 rates, bool send)
}
ps_params->ps_mode = ps_mode;
- ps_params->send_null_data = send;
- ps_params->retries = wl->conf.conn.psm_entry_nullfunc_retries;
- ps_params->hang_over_period = wl->conf.conn.psm_entry_hangover_period;
- ps_params->null_data_rate = cpu_to_le32(rates);
ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
sizeof(*ps_params), 0);
@@ -490,8 +505,8 @@ int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
cmd->len = cpu_to_le16(buf_len);
cmd->template_type = template_id;
cmd->enabled_rates = cpu_to_le32(rates);
- cmd->short_retry_limit = wl->conf.tx.rc_conf.short_retry_limit;
- cmd->long_retry_limit = wl->conf.tx.rc_conf.long_retry_limit;
+ cmd->short_retry_limit = wl->conf.tx.tmpl_short_retry_limit;
+ cmd->long_retry_limit = wl->conf.tx.tmpl_long_retry_limit;
cmd->index = index;
if (buf)
@@ -659,15 +674,15 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr)
/* llc layer */
memcpy(tmpl.llc_hdr, rfc1042_header, sizeof(rfc1042_header));
- tmpl.llc_type = htons(ETH_P_ARP);
+ tmpl.llc_type = cpu_to_be16(ETH_P_ARP);
/* arp header */
arp_hdr = &tmpl.arp_hdr;
- arp_hdr->ar_hrd = htons(ARPHRD_ETHER);
- arp_hdr->ar_pro = htons(ETH_P_IP);
+ arp_hdr->ar_hrd = cpu_to_be16(ARPHRD_ETHER);
+ arp_hdr->ar_pro = cpu_to_be16(ETH_P_IP);
arp_hdr->ar_hln = ETH_ALEN;
arp_hdr->ar_pln = 4;
- arp_hdr->ar_op = htons(ARPOP_REPLY);
+ arp_hdr->ar_op = cpu_to_be16(ARPOP_REPLY);
/* arp payload */
memcpy(tmpl.sender_hw, wl->vif->addr, ETH_ALEN);
@@ -702,9 +717,9 @@ int wl1271_build_qos_null_data(struct wl1271 *wl)
wl->basic_rate);
}
-int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id)
+int wl1271_cmd_set_sta_default_wep_key(struct wl1271 *wl, u8 id)
{
- struct wl1271_cmd_set_keys *cmd;
+ struct wl1271_cmd_set_sta_keys *cmd;
int ret = 0;
wl1271_debug(DEBUG_CMD, "cmd set_default_wep_key %d", id);
@@ -731,11 +746,42 @@ out:
return ret;
}
-int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+int wl1271_cmd_set_ap_default_wep_key(struct wl1271 *wl, u8 id)
+{
+ struct wl1271_cmd_set_ap_keys *cmd;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_CMD, "cmd set_ap_default_wep_key %d", id);
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cmd->hlid = WL1271_AP_BROADCAST_HLID;
+ cmd->key_id = id;
+ cmd->lid_key_type = WEP_DEFAULT_LID_TYPE;
+ cmd->key_action = cpu_to_le16(KEY_SET_ID);
+ cmd->key_type = KEY_WEP;
+
+ ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_warning("cmd set_ap_default_wep_key failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(cmd);
+
+ return ret;
+}
+
+int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, const u8 *addr,
u32 tx_seq_32, u16 tx_seq_16)
{
- struct wl1271_cmd_set_keys *cmd;
+ struct wl1271_cmd_set_sta_keys *cmd;
int ret = 0;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -788,6 +834,67 @@ out:
return ret;
}
+int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+ u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
+ u16 tx_seq_16)
+{
+ struct wl1271_cmd_set_ap_keys *cmd;
+ int ret = 0;
+ u8 lid_type;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ if (hlid == WL1271_AP_BROADCAST_HLID) {
+ if (key_type == KEY_WEP)
+ lid_type = WEP_DEFAULT_LID_TYPE;
+ else
+ lid_type = BROADCAST_LID_TYPE;
+ } else {
+ lid_type = UNICAST_LID_TYPE;
+ }
+
+ wl1271_debug(DEBUG_CRYPT, "ap key action: %d id: %d lid: %d type: %d"
+ " hlid: %d", (int)action, (int)id, (int)lid_type,
+ (int)key_type, (int)hlid);
+
+ cmd->lid_key_type = lid_type;
+ cmd->hlid = hlid;
+ cmd->key_action = cpu_to_le16(action);
+ cmd->key_size = key_size;
+ cmd->key_type = key_type;
+ cmd->key_id = id;
+ cmd->ac_seq_num16[0] = cpu_to_le16(tx_seq_16);
+ cmd->ac_seq_num32[0] = cpu_to_le32(tx_seq_32);
+
+ if (key_type == KEY_TKIP) {
+ /*
+ * We get the key in the following form:
+ * TKIP (16 bytes) - TX MIC (8 bytes) - RX MIC (8 bytes)
+ * but the target is expecting:
+ * TKIP - RX MIC - TX MIC
+ */
+ memcpy(cmd->key, key, 16);
+ memcpy(cmd->key + 16, key + 24, 8);
+ memcpy(cmd->key + 24, key + 16, 8);
+ } else {
+ memcpy(cmd->key, key, key_size);
+ }
+
+ wl1271_dump(DEBUG_CRYPT, "TARGET AP KEY: ", cmd, sizeof(*cmd));
+
+ ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_warning("could not set ap keys");
+ goto out;
+ }
+
+out:
+ kfree(cmd);
+ return ret;
+}
+
int wl1271_cmd_disconnect(struct wl1271 *wl)
{
struct wl1271_cmd_disconnect *cmd;
@@ -850,3 +957,180 @@ out_free:
out:
return ret;
}
+
+int wl1271_cmd_start_bss(struct wl1271 *wl)
+{
+ struct wl1271_cmd_bss_start *cmd;
+ struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
+ int ret;
+
+ wl1271_debug(DEBUG_CMD, "cmd start bss");
+
+ /*
+ * FIXME: We currently do not support hidden SSID. The real SSID
+ * should be fetched from mac80211 first.
+ */
+ if (wl->ssid_len == 0) {
+ wl1271_warning("Hidden SSID currently not supported for AP");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(cmd->bssid, bss_conf->bssid, ETH_ALEN);
+
+ cmd->aging_period = cpu_to_le16(WL1271_AP_DEF_INACTIV_SEC);
+ cmd->bss_index = WL1271_AP_BSS_INDEX;
+ cmd->global_hlid = WL1271_AP_GLOBAL_HLID;
+ cmd->broadcast_hlid = WL1271_AP_BROADCAST_HLID;
+ cmd->basic_rate_set = cpu_to_le32(wl->basic_rate_set);
+ cmd->beacon_interval = cpu_to_le16(wl->beacon_int);
+ cmd->dtim_interval = bss_conf->dtim_period;
+ cmd->beacon_expiry = WL1271_AP_DEF_BEACON_EXP;
+ cmd->channel = wl->channel;
+ cmd->ssid_len = wl->ssid_len;
+ cmd->ssid_type = SSID_TYPE_PUBLIC;
+ memcpy(cmd->ssid, wl->ssid, wl->ssid_len);
+
+ switch (wl->band) {
+ case IEEE80211_BAND_2GHZ:
+ cmd->band = RADIO_BAND_2_4GHZ;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ cmd->band = RADIO_BAND_5GHZ;
+ break;
+ default:
+ wl1271_warning("bss start - unknown band: %d", (int)wl->band);
+ cmd->band = RADIO_BAND_2_4GHZ;
+ break;
+ }
+
+ ret = wl1271_cmd_send(wl, CMD_BSS_START, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to initiate cmd start bss");
+ goto out_free;
+ }
+
+out_free:
+ kfree(cmd);
+
+out:
+ return ret;
+}
+
+int wl1271_cmd_stop_bss(struct wl1271 *wl)
+{
+ struct wl1271_cmd_bss_start *cmd;
+ int ret;
+
+ wl1271_debug(DEBUG_CMD, "cmd stop bss");
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cmd->bss_index = WL1271_AP_BSS_INDEX;
+
+ ret = wl1271_cmd_send(wl, CMD_BSS_STOP, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to initiate cmd stop bss");
+ goto out_free;
+ }
+
+out_free:
+ kfree(cmd);
+
+out:
+ return ret;
+}
+
+int wl1271_cmd_add_sta(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid)
+{
+ struct wl1271_cmd_add_sta *cmd;
+ int ret;
+
+ wl1271_debug(DEBUG_CMD, "cmd add sta %d", (int)hlid);
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* currently we don't support UAPSD */
+ cmd->sp_len = 0;
+
+ memcpy(cmd->addr, sta->addr, ETH_ALEN);
+ cmd->bss_index = WL1271_AP_BSS_INDEX;
+ cmd->aid = sta->aid;
+ cmd->hlid = hlid;
+
+ /*
+ * FIXME: Does STA support QOS? We need to propagate this info from
+ * hostapd. Currently not that important since this is only used for
+ * sending the correct flavor of null-data packet in response to a
+ * trigger.
+ */
+ cmd->wmm = 0;
+
+ cmd->supported_rates = cpu_to_le32(wl1271_tx_enabled_rates_get(wl,
+ sta->supp_rates[wl->band]));
+
+ wl1271_debug(DEBUG_CMD, "new sta rates: 0x%x", cmd->supported_rates);
+
+ ret = wl1271_cmd_send(wl, CMD_ADD_STA, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to initiate cmd add sta");
+ goto out_free;
+ }
+
+out_free:
+ kfree(cmd);
+
+out:
+ return ret;
+}
+
+int wl1271_cmd_remove_sta(struct wl1271 *wl, u8 hlid)
+{
+ struct wl1271_cmd_remove_sta *cmd;
+ int ret;
+
+ wl1271_debug(DEBUG_CMD, "cmd remove sta %d", (int)hlid);
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cmd->hlid = hlid;
+ /* We never send a deauth, mac80211 is in charge of this */
+ cmd->reason_opcode = 0;
+ cmd->send_deauth_flag = 0;
+
+ ret = wl1271_cmd_send(wl, CMD_REMOVE_STA, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to initiate cmd remove sta");
+ goto out_free;
+ }
+
+ /*
+ * We are ok with a timeout here. The event is sometimes not sent
+ * due to a firmware bug.
+ */
+ wl1271_cmd_wait_for_event_or_timeout(wl, STA_REMOVE_COMPLETE_EVENT_ID);
+
+out_free:
+ kfree(cmd);
+
+out:
+ return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/cmd.h b/drivers/net/wireless/wl12xx/cmd.h
index 2a1d9db7ceb8..54c12e71417e 100644
--- a/drivers/net/wireless/wl12xx/cmd.h
+++ b/drivers/net/wireless/wl12xx/cmd.h
@@ -39,7 +39,7 @@ int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
int wl1271_cmd_data_path(struct wl1271 *wl, bool enable);
-int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, u32 rates, bool send);
+int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode);
int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
size_t len);
int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
@@ -54,12 +54,20 @@ struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr);
int wl1271_build_qos_null_data(struct wl1271 *wl);
int wl1271_cmd_build_klv_null_data(struct wl1271 *wl);
-int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id);
-int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
- u8 key_size, const u8 *key, const u8 *addr,
- u32 tx_seq_32, u16 tx_seq_16);
+int wl1271_cmd_set_sta_default_wep_key(struct wl1271 *wl, u8 id);
+int wl1271_cmd_set_ap_default_wep_key(struct wl1271 *wl, u8 id);
+int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+ u8 key_size, const u8 *key, const u8 *addr,
+ u32 tx_seq_32, u16 tx_seq_16);
+int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+ u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
+ u16 tx_seq_16);
int wl1271_cmd_disconnect(struct wl1271 *wl);
int wl1271_cmd_set_sta_state(struct wl1271 *wl);
+int wl1271_cmd_start_bss(struct wl1271 *wl);
+int wl1271_cmd_stop_bss(struct wl1271 *wl);
+int wl1271_cmd_add_sta(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid);
+int wl1271_cmd_remove_sta(struct wl1271 *wl, u8 hlid);
enum wl1271_commands {
CMD_INTERROGATE = 1, /*use this to read information elements*/
@@ -98,6 +106,12 @@ enum wl1271_commands {
CMD_STOP_PERIODIC_SCAN = 51,
CMD_SET_STA_STATE = 52,
+ /* AP mode commands */
+ CMD_BSS_START = 60,
+ CMD_BSS_STOP = 61,
+ CMD_ADD_STA = 62,
+ CMD_REMOVE_STA = 63,
+
NUM_COMMANDS,
MAX_COMMAND_ID = 0xFFFF,
};
@@ -126,6 +140,14 @@ enum cmd_templ {
* For CTS-to-self (FastCTS) mechanism
* for BT/WLAN coexistence (SoftGemini). */
CMD_TEMPL_ARP_RSP,
+ CMD_TEMPL_LINK_MEASUREMENT_REPORT,
+
+ /* AP-mode specific */
+ CMD_TEMPL_AP_BEACON = 13,
+ CMD_TEMPL_AP_PROBE_RESPONSE,
+ CMD_TEMPL_AP_ARP_RSP,
+ CMD_TEMPL_DEAUTH_AP,
+
CMD_TEMPL_MAX = 0xff
};
@@ -195,6 +217,7 @@ struct wl1271_cmd_join {
* ACK or CTS frames).
*/
__le32 basic_rate_set;
+ __le32 supported_rate_set;
u8 dtim_interval;
/*
* bits 0-2: This bitwise field specifies the type
@@ -257,20 +280,11 @@ struct wl1271_cmd_ps_params {
struct wl1271_cmd_header header;
u8 ps_mode; /* STATION_* */
- u8 send_null_data; /* Do we have to send NULL data packet ? */
- u8 retries; /* Number of retires for the initial NULL data packet */
-
- /*
- * TUs during which the target stays awake after switching
- * to power save mode.
- */
- u8 hang_over_period;
- __le32 null_data_rate;
+ u8 padding[3];
} __packed;
/* HW encryption keys */
#define NUM_ACCESS_CATEGORIES_COPY 4
-#define MAX_KEY_SIZE 32
enum wl1271_cmd_key_action {
KEY_ADD_OR_REPLACE = 1,
@@ -289,7 +303,7 @@ enum wl1271_cmd_key_type {
/* FIXME: Add description for key-types */
-struct wl1271_cmd_set_keys {
+struct wl1271_cmd_set_sta_keys {
struct wl1271_cmd_header header;
/* Ignored for default WEP key */
@@ -318,6 +332,57 @@ struct wl1271_cmd_set_keys {
__le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
} __packed;
+enum wl1271_cmd_lid_key_type {
+ UNICAST_LID_TYPE = 0,
+ BROADCAST_LID_TYPE = 1,
+ WEP_DEFAULT_LID_TYPE = 2
+};
+
+struct wl1271_cmd_set_ap_keys {
+ struct wl1271_cmd_header header;
+
+ /*
+ * Indicates whether the HLID is a unicast key set
+ * or broadcast key set. A special value 0xFF is
+ * used to indicate that the HLID is on WEP-default
+ * (multi-hlids). of type wl1271_cmd_lid_key_type.
+ */
+ u8 hlid;
+
+ /*
+ * In WEP-default network (hlid == 0xFF) used to
+ * indicate which network STA/IBSS/AP role should be
+ * changed
+ */
+ u8 lid_key_type;
+
+ /*
+ * Key ID - For TKIP and AES key types, this field
+ * indicates the value that should be inserted into
+ * the KeyID field of frames transmitted using this
+ * key entry. For broadcast keys the index use as a
+ * marker for TX/RX key.
+ * For WEP default network (HLID=0xFF), this field
+ * indicates the ID of the key to add or remove.
+ */
+ u8 key_id;
+ u8 reserved_1;
+
+ /* key_action_e */
+ __le16 key_action;
+
+ /* key size in bytes */
+ u8 key_size;
+
+ /* key_type_e */
+ u8 key_type;
+
+ /* This field holds the security key data to add to the STA table */
+ u8 key[MAX_KEY_SIZE];
+ __le16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY];
+ __le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
+} __packed;
+
struct wl1271_cmd_test_header {
u8 id;
u8 padding[3];
@@ -412,4 +477,68 @@ struct wl1271_cmd_set_sta_state {
u8 padding[3];
} __packed;
+enum wl1271_ssid_type {
+ SSID_TYPE_PUBLIC = 0,
+ SSID_TYPE_HIDDEN = 1
+};
+
+struct wl1271_cmd_bss_start {
+ struct wl1271_cmd_header header;
+
+ /* wl1271_ssid_type */
+ u8 ssid_type;
+ u8 ssid_len;
+ u8 ssid[IW_ESSID_MAX_SIZE];
+ u8 padding_1[2];
+
+ /* Basic rate set */
+ __le32 basic_rate_set;
+ /* Aging period in seconds*/
+ __le16 aging_period;
+
+ /*
+ * This field specifies the time between target beacon
+ * transmission times (TBTTs), in time units (TUs).
+ * Valid values are 1 to 1024.
+ */
+ __le16 beacon_interval;
+ u8 bssid[ETH_ALEN];
+ u8 bss_index;
+ /* Radio band */
+ u8 band;
+ u8 channel;
+ /* The host link id for the AP's global queue */
+ u8 global_hlid;
+ /* The host link id for the AP's broadcast queue */
+ u8 broadcast_hlid;
+ /* DTIM count */
+ u8 dtim_interval;
+ /* Beacon expiry time in ms */
+ u8 beacon_expiry;
+ u8 padding_2[3];
+} __packed;
+
+struct wl1271_cmd_add_sta {
+ struct wl1271_cmd_header header;
+
+ u8 addr[ETH_ALEN];
+ u8 hlid;
+ u8 aid;
+ u8 psd_type[NUM_ACCESS_CATEGORIES_COPY];
+ __le32 supported_rates;
+ u8 bss_index;
+ u8 sp_len;
+ u8 wmm;
+ u8 padding1;
+} __packed;
+
+struct wl1271_cmd_remove_sta {
+ struct wl1271_cmd_header header;
+
+ u8 hlid;
+ u8 reason_opcode;
+ u8 send_deauth_flag;
+ u8 padding1;
+} __packed;
+
#endif /* __WL1271_CMD_H__ */
diff --git a/drivers/net/wireless/wl12xx/conf.h b/drivers/net/wireless/wl12xx/conf.h
index a16b3616e430..856a8a2fff4f 100644
--- a/drivers/net/wireless/wl12xx/conf.h
+++ b/drivers/net/wireless/wl12xx/conf.h
@@ -496,6 +496,26 @@ struct conf_rx_settings {
CONF_HW_BIT_RATE_2MBPS)
#define CONF_TX_RATE_RETRY_LIMIT 10
+/*
+ * Rates supported for data packets when operating as AP. Note the absense
+ * of the 22Mbps rate. There is a FW limitation on 12 rates so we must drop
+ * one. The rate dropped is not mandatory under any operating mode.
+ */
+#define CONF_TX_AP_ENABLED_RATES (CONF_HW_BIT_RATE_1MBPS | \
+ CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS | \
+ CONF_HW_BIT_RATE_6MBPS | CONF_HW_BIT_RATE_9MBPS | \
+ CONF_HW_BIT_RATE_11MBPS | CONF_HW_BIT_RATE_12MBPS | \
+ CONF_HW_BIT_RATE_18MBPS | CONF_HW_BIT_RATE_24MBPS | \
+ CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \
+ CONF_HW_BIT_RATE_54MBPS)
+
+/*
+ * Default rates for management traffic when operating in AP mode. This
+ * should be configured according to the basic rate set of the AP
+ */
+#define CONF_TX_AP_DEFAULT_MGMT_RATES (CONF_HW_BIT_RATE_1MBPS | \
+ CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS)
+
struct conf_tx_rate_class {
/*
@@ -636,9 +656,9 @@ struct conf_tx_settings {
/*
* Configuration for rate classes for TX (currently only one
- * rate class supported.)
+ * rate class supported). Used in non-AP mode.
*/
- struct conf_tx_rate_class rc_conf;
+ struct conf_tx_rate_class sta_rc_conf;
/*
* Configuration for access categories for TX rate control.
@@ -647,6 +667,28 @@ struct conf_tx_settings {
struct conf_tx_ac_category ac_conf[CONF_TX_MAX_AC_COUNT];
/*
+ * Configuration for rate classes in AP-mode. These rate classes
+ * are for the AC TX queues
+ */
+ struct conf_tx_rate_class ap_rc_conf[CONF_TX_MAX_AC_COUNT];
+
+ /*
+ * Management TX rate class for AP-mode.
+ */
+ struct conf_tx_rate_class ap_mgmt_conf;
+
+ /*
+ * Broadcast TX rate class for AP-mode.
+ */
+ struct conf_tx_rate_class ap_bcst_conf;
+
+ /*
+ * AP-mode - allow this number of TX retries to a station before an
+ * event is triggered from FW.
+ */
+ u16 ap_max_tx_retries;
+
+ /*
* Configuration for TID parameters.
*/
u8 tid_conf_count;
@@ -687,6 +729,12 @@ struct conf_tx_settings {
* Range: CONF_HW_BIT_RATE_* bit mask
*/
u32 basic_rate_5;
+
+ /*
+ * TX retry limits for templates
+ */
+ u8 tmpl_short_retry_limit;
+ u8 tmpl_long_retry_limit;
};
enum {
@@ -912,6 +960,14 @@ struct conf_conn_settings {
u8 psm_entry_retries;
/*
+ * Specifies the maximum number of times to try PSM exit if it fails
+ * (if sending the appropriate null-func message fails.)
+ *
+ * Range 0 - 255
+ */
+ u8 psm_exit_retries;
+
+ /*
* Specifies the maximum number of times to try transmit the PSM entry
* null-func frame for each PSM entry attempt
*
@@ -1036,30 +1092,30 @@ struct conf_scan_settings {
/*
* The minimum time to wait on each channel for active scans
*
- * Range: 0 - 65536 tu
+ * Range: u32 tu/1000
*/
- u16 min_dwell_time_active;
+ u32 min_dwell_time_active;
/*
* The maximum time to wait on each channel for active scans
*
- * Range: 0 - 65536 tu
+ * Range: u32 tu/1000
*/
- u16 max_dwell_time_active;
+ u32 max_dwell_time_active;
/*
- * The maximum time to wait on each channel for passive scans
+ * The minimum time to wait on each channel for passive scans
*
- * Range: 0 - 65536 tu
+ * Range: u32 tu/1000
*/
- u16 min_dwell_time_passive;
+ u32 min_dwell_time_passive;
/*
* The maximum time to wait on each channel for passive scans
*
- * Range: 0 - 65536 tu
+ * Range: u32 tu/1000
*/
- u16 max_dwell_time_passive;
+ u32 max_dwell_time_passive;
/*
* Number of probe requests to transmit on each active scan channel
@@ -1090,6 +1146,51 @@ struct conf_rf_settings {
u8 tx_per_channel_power_compensation_5[CONF_TX_PWR_COMPENSATION_LEN_5];
};
+struct conf_ht_setting {
+ u16 tx_ba_win_size;
+ u16 inactivity_timeout;
+};
+
+struct conf_memory_settings {
+ /* Number of stations supported in IBSS mode */
+ u8 num_stations;
+
+ /* Number of ssid profiles used in IBSS mode */
+ u8 ssid_profiles;
+
+ /* Number of memory buffers allocated to rx pool */
+ u8 rx_block_num;
+
+ /* Minimum number of blocks allocated to tx pool */
+ u8 tx_min_block_num;
+
+ /* Disable/Enable dynamic memory */
+ u8 dynamic_memory;
+
+ /*
+ * Minimum required free tx memory blocks in order to assure optimum
+ * performence
+ *
+ * Range: 0-120
+ */
+ u8 min_req_tx_blocks;
+
+ /*
+ * Minimum required free rx memory blocks in order to assure optimum
+ * performence
+ *
+ * Range: 0-120
+ */
+ u8 min_req_rx_blocks;
+
+ /*
+ * Minimum number of mem blocks (free+used) guaranteed for TX
+ *
+ * Range: 0-120
+ */
+ u8 tx_min;
+};
+
struct conf_drv_settings {
struct conf_sg_settings sg;
struct conf_rx_settings rx;
@@ -1100,6 +1201,8 @@ struct conf_drv_settings {
struct conf_roam_trigger_settings roam_trigger;
struct conf_scan_settings scan;
struct conf_rf_settings rf;
+ struct conf_ht_setting ht;
+ struct conf_memory_settings mem;
};
#endif
diff --git a/drivers/net/wireless/wl12xx/debugfs.c b/drivers/net/wireless/wl12xx/debugfs.c
index ec6077760157..bebfa28a171a 100644
--- a/drivers/net/wireless/wl12xx/debugfs.c
+++ b/drivers/net/wireless/wl12xx/debugfs.c
@@ -261,27 +261,25 @@ static ssize_t gpio_power_write(struct file *file,
unsigned long value;
int ret;
- mutex_lock(&wl->mutex);
-
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len)) {
- ret = -EFAULT;
- goto out;
+ return -EFAULT;
}
buf[len] = '\0';
ret = strict_strtoul(buf, 0, &value);
if (ret < 0) {
wl1271_warning("illegal value in gpio_power");
- goto out;
+ return -EINVAL;
}
+ mutex_lock(&wl->mutex);
+
if (value)
wl1271_power_on(wl);
else
wl1271_power_off(wl);
-out:
mutex_unlock(&wl->mutex);
return count;
}
@@ -293,12 +291,13 @@ static const struct file_operations gpio_power_ops = {
.llseek = default_llseek,
};
-static int wl1271_debugfs_add_files(struct wl1271 *wl)
+static int wl1271_debugfs_add_files(struct wl1271 *wl,
+ struct dentry *rootdir)
{
int ret = 0;
struct dentry *entry, *stats;
- stats = debugfs_create_dir("fw-statistics", wl->rootdir);
+ stats = debugfs_create_dir("fw-statistics", rootdir);
if (!stats || IS_ERR(stats)) {
entry = stats;
goto err;
@@ -395,16 +394,11 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl)
DEBUGFS_FWSTATS_ADD(rxpipe, missed_beacon_host_int_trig_rx_data);
DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data);
- DEBUGFS_ADD(tx_queue_len, wl->rootdir);
- DEBUGFS_ADD(retry_count, wl->rootdir);
- DEBUGFS_ADD(excessive_retries, wl->rootdir);
-
- DEBUGFS_ADD(gpio_power, wl->rootdir);
+ DEBUGFS_ADD(tx_queue_len, rootdir);
+ DEBUGFS_ADD(retry_count, rootdir);
+ DEBUGFS_ADD(excessive_retries, rootdir);
- entry = debugfs_create_x32("debug_level", 0600, wl->rootdir,
- &wl12xx_debug_level);
- if (!entry || IS_ERR(entry))
- goto err;
+ DEBUGFS_ADD(gpio_power, rootdir);
return 0;
@@ -419,7 +413,7 @@ err:
void wl1271_debugfs_reset(struct wl1271 *wl)
{
- if (!wl->rootdir)
+ if (!wl->stats.fw_stats)
return;
memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats));
@@ -430,13 +424,13 @@ void wl1271_debugfs_reset(struct wl1271 *wl)
int wl1271_debugfs_init(struct wl1271 *wl)
{
int ret;
+ struct dentry *rootdir;
- wl->rootdir = debugfs_create_dir(KBUILD_MODNAME,
- wl->hw->wiphy->debugfsdir);
+ rootdir = debugfs_create_dir(KBUILD_MODNAME,
+ wl->hw->wiphy->debugfsdir);
- if (IS_ERR(wl->rootdir)) {
- ret = PTR_ERR(wl->rootdir);
- wl->rootdir = NULL;
+ if (IS_ERR(rootdir)) {
+ ret = PTR_ERR(rootdir);
goto err;
}
@@ -450,7 +444,7 @@ int wl1271_debugfs_init(struct wl1271 *wl)
wl->stats.fw_stats_update = jiffies;
- ret = wl1271_debugfs_add_files(wl);
+ ret = wl1271_debugfs_add_files(wl, rootdir);
if (ret < 0)
goto err_file;
@@ -462,8 +456,7 @@ err_file:
wl->stats.fw_stats = NULL;
err_fw:
- debugfs_remove_recursive(wl->rootdir);
- wl->rootdir = NULL;
+ debugfs_remove_recursive(rootdir);
err:
return ret;
@@ -473,8 +466,4 @@ void wl1271_debugfs_exit(struct wl1271 *wl)
{
kfree(wl->stats.fw_stats);
wl->stats.fw_stats = NULL;
-
- debugfs_remove_recursive(wl->rootdir);
- wl->rootdir = NULL;
-
}
diff --git a/drivers/net/wireless/wl12xx/event.c b/drivers/net/wireless/wl12xx/event.c
index f9146f5242fb..1b170c5cc595 100644
--- a/drivers/net/wireless/wl12xx/event.c
+++ b/drivers/net/wireless/wl12xx/event.c
@@ -135,20 +135,6 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
/* go to extremely low power mode */
wl1271_ps_elp_sleep(wl);
break;
- case EVENT_EXIT_POWER_SAVE_FAIL:
- wl1271_debug(DEBUG_PSM, "PSM exit failed");
-
- if (test_bit(WL1271_FLAG_PSM, &wl->flags)) {
- wl->psm_entry_retry = 0;
- break;
- }
-
- /* make sure the firmware goes to active mode - the frame to
- be sent next will indicate to the AP, that we are active. */
- ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
- wl->basic_rate, false);
- break;
- case EVENT_EXIT_POWER_SAVE_SUCCESS:
default:
break;
}
@@ -186,6 +172,7 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
int ret;
u32 vector;
bool beacon_loss = false;
+ bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
wl1271_event_mbox_dump(mbox);
@@ -218,21 +205,21 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
* BSS_LOSE_EVENT, beacon loss has to be reported to the stack.
*
*/
- if (vector & BSS_LOSE_EVENT_ID) {
+ if ((vector & BSS_LOSE_EVENT_ID) && !is_ap) {
wl1271_info("Beacon loss detected.");
/* indicate to the stack, that beacons have been lost */
beacon_loss = true;
}
- if (vector & PS_REPORT_EVENT_ID) {
+ if ((vector & PS_REPORT_EVENT_ID) && !is_ap) {
wl1271_debug(DEBUG_EVENT, "PS_REPORT_EVENT");
ret = wl1271_event_ps_report(wl, mbox, &beacon_loss);
if (ret < 0)
return ret;
}
- if (vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID)
+ if ((vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID) && !is_ap)
wl1271_event_pspoll_delivery_fail(wl);
if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) {
diff --git a/drivers/net/wireless/wl12xx/event.h b/drivers/net/wireless/wl12xx/event.h
index 6cce0143adb5..0e80886f3031 100644
--- a/drivers/net/wireless/wl12xx/event.h
+++ b/drivers/net/wireless/wl12xx/event.h
@@ -59,6 +59,7 @@ enum {
BSS_LOSE_EVENT_ID = BIT(18),
REGAINED_BSS_EVENT_ID = BIT(19),
ROAMING_TRIGGER_MAX_TX_RETRY_EVENT_ID = BIT(20),
+ STA_REMOVE_COMPLETE_EVENT_ID = BIT(21), /* AP */
SOFT_GEMINI_SENSE_EVENT_ID = BIT(22),
SOFT_GEMINI_PREDICTION_EVENT_ID = BIT(23),
SOFT_GEMINI_AVALANCHE_EVENT_ID = BIT(24),
@@ -74,8 +75,6 @@ enum {
enum {
EVENT_ENTER_POWER_SAVE_FAIL = 0,
EVENT_ENTER_POWER_SAVE_SUCCESS,
- EVENT_EXIT_POWER_SAVE_FAIL,
- EVENT_EXIT_POWER_SAVE_SUCCESS,
};
struct event_debug_report {
@@ -115,7 +114,12 @@ struct event_mailbox {
u8 scheduled_scan_status;
u8 ps_status;
- u8 reserved_5[29];
+ /* AP FW only */
+ u8 hlid_removed;
+ __le16 sta_aging_status;
+ __le16 sta_tx_retry_exceeded;
+
+ u8 reserved_5[24];
} __packed;
int wl1271_event_unmask(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/init.c b/drivers/net/wireless/wl12xx/init.c
index 785a5304bfc4..6072fe457135 100644
--- a/drivers/net/wireless/wl12xx/init.c
+++ b/drivers/net/wireless/wl12xx/init.c
@@ -30,27 +30,9 @@
#include "acx.h"
#include "cmd.h"
#include "reg.h"
+#include "tx.h"
-static int wl1271_init_hwenc_config(struct wl1271 *wl)
-{
- int ret;
-
- ret = wl1271_acx_feature_cfg(wl);
- if (ret < 0) {
- wl1271_warning("couldn't set feature config");
- return ret;
- }
-
- ret = wl1271_cmd_set_default_wep_key(wl, wl->default_key);
- if (ret < 0) {
- wl1271_warning("couldn't set default key");
- return ret;
- }
-
- return 0;
-}
-
-int wl1271_init_templates_config(struct wl1271 *wl)
+int wl1271_sta_init_templates_config(struct wl1271 *wl)
{
int ret, i;
@@ -118,6 +100,132 @@ int wl1271_init_templates_config(struct wl1271 *wl)
return 0;
}
+static int wl1271_ap_init_deauth_template(struct wl1271 *wl)
+{
+ struct wl12xx_disconn_template *tmpl;
+ int ret;
+
+ tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
+ if (!tmpl) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ tmpl->header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_DEAUTH);
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP,
+ tmpl, sizeof(*tmpl), 0,
+ wl1271_tx_min_rate_get(wl));
+
+out:
+ kfree(tmpl);
+ return ret;
+}
+
+static int wl1271_ap_init_null_template(struct wl1271 *wl)
+{
+ struct ieee80211_hdr_3addr *nullfunc;
+ int ret;
+
+ nullfunc = kzalloc(sizeof(*nullfunc), GFP_KERNEL);
+ if (!nullfunc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_NULLFUNC |
+ IEEE80211_FCTL_FROMDS);
+
+ /* nullfunc->addr1 is filled by FW */
+
+ memcpy(nullfunc->addr2, wl->mac_addr, ETH_ALEN);
+ memcpy(nullfunc->addr3, wl->mac_addr, ETH_ALEN);
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, nullfunc,
+ sizeof(*nullfunc), 0,
+ wl1271_tx_min_rate_get(wl));
+
+out:
+ kfree(nullfunc);
+ return ret;
+}
+
+static int wl1271_ap_init_qos_null_template(struct wl1271 *wl)
+{
+ struct ieee80211_qos_hdr *qosnull;
+ int ret;
+
+ qosnull = kzalloc(sizeof(*qosnull), GFP_KERNEL);
+ if (!qosnull) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ qosnull->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_NULLFUNC |
+ IEEE80211_FCTL_FROMDS);
+
+ /* qosnull->addr1 is filled by FW */
+
+ memcpy(qosnull->addr2, wl->mac_addr, ETH_ALEN);
+ memcpy(qosnull->addr3, wl->mac_addr, ETH_ALEN);
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, qosnull,
+ sizeof(*qosnull), 0,
+ wl1271_tx_min_rate_get(wl));
+
+out:
+ kfree(qosnull);
+ return ret;
+}
+
+static int wl1271_ap_init_templates_config(struct wl1271 *wl)
+{
+ int ret;
+
+ /*
+ * Put very large empty placeholders for all templates. These
+ * reserve memory for later.
+ */
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, NULL,
+ sizeof
+ (struct wl12xx_probe_resp_template),
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_BEACON, NULL,
+ sizeof
+ (struct wl12xx_beacon_template),
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP, NULL,
+ sizeof
+ (struct wl12xx_disconn_template),
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL,
+ sizeof(struct wl12xx_null_data_template),
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL,
+ sizeof
+ (struct wl12xx_qos_null_data_template),
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static int wl1271_init_rx_config(struct wl1271 *wl, u32 config, u32 filter)
{
int ret;
@@ -145,10 +253,6 @@ int wl1271_init_phy_config(struct wl1271 *wl)
if (ret < 0)
return ret;
- ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0);
- if (ret < 0)
- return ret;
-
ret = wl1271_acx_service_period_timeout(wl);
if (ret < 0)
return ret;
@@ -213,11 +317,199 @@ static int wl1271_init_beacon_broadcast(struct wl1271 *wl)
return 0;
}
+static int wl1271_sta_hw_init(struct wl1271 *wl)
+{
+ int ret;
+
+ ret = wl1271_cmd_ext_radio_parms(wl);
+ if (ret < 0)
+ return ret;
+
+ /* PS config */
+ ret = wl1271_acx_config_ps(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_sta_init_templates_config(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Initialize connection monitoring thresholds */
+ ret = wl1271_acx_conn_monit_params(wl, false);
+ if (ret < 0)
+ return ret;
+
+ /* Beacon filtering */
+ ret = wl1271_init_beacon_filter(wl);
+ if (ret < 0)
+ return ret;
+
+ /* Bluetooth WLAN coexistence */
+ ret = wl1271_init_pta(wl);
+ if (ret < 0)
+ return ret;
+
+ /* Beacons and broadcast settings */
+ ret = wl1271_init_beacon_broadcast(wl);
+ if (ret < 0)
+ return ret;
+
+ /* Configure for ELP power saving */
+ ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
+ if (ret < 0)
+ return ret;
+
+ /* Configure rssi/snr averaging weights */
+ ret = wl1271_acx_rssi_snr_avg_weights(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_acx_sta_rate_policies(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_acx_sta_mem_cfg(wl);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl)
+{
+ int ret, i;
+
+ ret = wl1271_cmd_set_sta_default_wep_key(wl, wl->default_key);
+ if (ret < 0) {
+ wl1271_warning("couldn't set default key");
+ return ret;
+ }
+
+ /* disable all keep-alive templates */
+ for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
+ ret = wl1271_acx_keep_alive_config(wl, i,
+ ACX_KEEP_ALIVE_TPL_INVALID);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* disable the keep-alive feature */
+ ret = wl1271_acx_keep_alive_mode(wl, false);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int wl1271_ap_hw_init(struct wl1271 *wl)
+{
+ int ret, i;
+
+ ret = wl1271_ap_init_templates_config(wl);
+ if (ret < 0)
+ return ret;
+
+ /* Configure for power always on */
+ ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
+ if (ret < 0)
+ return ret;
+
+ /* Configure initial TX rate classes */
+ for (i = 0; i < wl->conf.tx.ac_conf_count; i++) {
+ ret = wl1271_acx_ap_rate_policy(wl,
+ &wl->conf.tx.ap_rc_conf[i], i);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = wl1271_acx_ap_rate_policy(wl,
+ &wl->conf.tx.ap_mgmt_conf,
+ ACX_TX_AP_MODE_MGMT_RATE);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_acx_ap_rate_policy(wl,
+ &wl->conf.tx.ap_bcst_conf,
+ ACX_TX_AP_MODE_BCST_RATE);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_acx_max_tx_retry(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_acx_ap_mem_cfg(wl);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl)
+{
+ int ret;
+
+ ret = wl1271_ap_init_deauth_template(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_ap_init_null_template(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_ap_init_qos_null_template(wl);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void wl1271_check_ba_support(struct wl1271 *wl)
+{
+ /* validate FW cose ver x.x.x.50-60.x */
+ if ((wl->chip.fw_ver[3] >= WL12XX_BA_SUPPORT_FW_COST_VER2_START) &&
+ (wl->chip.fw_ver[3] < WL12XX_BA_SUPPORT_FW_COST_VER2_END)) {
+ wl->ba_support = true;
+ return;
+ }
+
+ wl->ba_support = false;
+}
+
+static int wl1271_set_ba_policies(struct wl1271 *wl)
+{
+ u8 tid_index;
+ int ret = 0;
+
+ /* Reset the BA RX indicators */
+ wl->ba_rx_bitmap = 0;
+
+ /* validate that FW support BA */
+ wl1271_check_ba_support(wl);
+
+ if (wl->ba_support)
+ /* 802.11n initiator BA session setting */
+ for (tid_index = 0; tid_index < CONF_TX_MAX_TID_COUNT;
+ ++tid_index) {
+ ret = wl1271_acx_set_ba_session(wl, WLAN_BACK_INITIATOR,
+ tid_index, true);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
int wl1271_hw_init(struct wl1271 *wl)
{
struct conf_tx_ac_category *conf_ac;
struct conf_tx_tid *conf_tid;
int ret, i;
+ bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
ret = wl1271_cmd_general_parms(wl);
if (ret < 0)
@@ -227,12 +519,12 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
return ret;
- ret = wl1271_cmd_ext_radio_parms(wl);
- if (ret < 0)
- return ret;
+ /* Mode specific init */
+ if (is_ap)
+ ret = wl1271_ap_hw_init(wl);
+ else
+ ret = wl1271_sta_hw_init(wl);
- /* Template settings */
- ret = wl1271_init_templates_config(wl);
if (ret < 0)
return ret;
@@ -259,16 +551,6 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
- /* Initialize connection monitoring thresholds */
- ret = wl1271_acx_conn_monit_params(wl, false);
- if (ret < 0)
- goto out_free_memmap;
-
- /* Beacon filtering */
- ret = wl1271_init_beacon_filter(wl);
- if (ret < 0)
- goto out_free_memmap;
-
/* Configure TX patch complete interrupt behavior */
ret = wl1271_acx_tx_config_options(wl);
if (ret < 0)
@@ -279,21 +561,11 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
- /* Bluetooth WLAN coexistence */
- ret = wl1271_init_pta(wl);
- if (ret < 0)
- goto out_free_memmap;
-
/* Energy detection */
ret = wl1271_init_energy_detection(wl);
if (ret < 0)
goto out_free_memmap;
- /* Beacons and boradcast settings */
- ret = wl1271_init_beacon_broadcast(wl);
- if (ret < 0)
- goto out_free_memmap;
-
/* Default fragmentation threshold */
ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold);
if (ret < 0)
@@ -321,23 +593,13 @@ int wl1271_hw_init(struct wl1271 *wl)
goto out_free_memmap;
}
- /* Configure TX rate classes */
- ret = wl1271_acx_rate_policies(wl);
- if (ret < 0)
- goto out_free_memmap;
-
/* Enable data path */
ret = wl1271_cmd_data_path(wl, 1);
if (ret < 0)
goto out_free_memmap;
- /* Configure for ELP power saving */
- ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
- if (ret < 0)
- goto out_free_memmap;
-
/* Configure HW encryption */
- ret = wl1271_init_hwenc_config(wl);
+ ret = wl1271_acx_feature_cfg(wl);
if (ret < 0)
goto out_free_memmap;
@@ -346,21 +608,17 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
- /* disable all keep-alive templates */
- for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
- ret = wl1271_acx_keep_alive_config(wl, i,
- ACX_KEEP_ALIVE_TPL_INVALID);
- if (ret < 0)
- goto out_free_memmap;
- }
+ /* Mode specific init - post mem init */
+ if (is_ap)
+ ret = wl1271_ap_hw_init_post_mem(wl);
+ else
+ ret = wl1271_sta_hw_init_post_mem(wl);
- /* disable the keep-alive feature */
- ret = wl1271_acx_keep_alive_mode(wl, false);
if (ret < 0)
goto out_free_memmap;
- /* Configure rssi/snr averaging weights */
- ret = wl1271_acx_rssi_snr_avg_weights(wl);
+ /* Configure initiator BA sessions policies */
+ ret = wl1271_set_ba_policies(wl);
if (ret < 0)
goto out_free_memmap;
diff --git a/drivers/net/wireless/wl12xx/init.h b/drivers/net/wireless/wl12xx/init.h
index 7762421f8602..3a8bd3f426d2 100644
--- a/drivers/net/wireless/wl12xx/init.h
+++ b/drivers/net/wireless/wl12xx/init.h
@@ -27,7 +27,7 @@
#include "wl12xx.h"
int wl1271_hw_init_power_auth(struct wl1271 *wl);
-int wl1271_init_templates_config(struct wl1271 *wl);
+int wl1271_sta_init_templates_config(struct wl1271 *wl);
int wl1271_init_phy_config(struct wl1271 *wl);
int wl1271_init_pta(struct wl1271 *wl);
int wl1271_init_energy_detection(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c
index 062247ef3ad2..947491a1d9cc 100644
--- a/drivers/net/wireless/wl12xx/main.c
+++ b/drivers/net/wireless/wl12xx/main.c
@@ -116,11 +116,11 @@ static struct conf_drv_settings default_conf = {
},
.tx = {
.tx_energy_detection = 0,
- .rc_conf = {
+ .sta_rc_conf = {
.enabled_rates = 0,
.short_retry_limit = 10,
.long_retry_limit = 10,
- .aflags = 0
+ .aflags = 0,
},
.ac_conf_count = 4,
.ac_conf = {
@@ -153,6 +153,45 @@ static struct conf_drv_settings default_conf = {
.tx_op_limit = 1504,
},
},
+ .ap_rc_conf = {
+ [0] = {
+ .enabled_rates = CONF_TX_AP_ENABLED_RATES,
+ .short_retry_limit = 10,
+ .long_retry_limit = 10,
+ .aflags = 0,
+ },
+ [1] = {
+ .enabled_rates = CONF_TX_AP_ENABLED_RATES,
+ .short_retry_limit = 10,
+ .long_retry_limit = 10,
+ .aflags = 0,
+ },
+ [2] = {
+ .enabled_rates = CONF_TX_AP_ENABLED_RATES,
+ .short_retry_limit = 10,
+ .long_retry_limit = 10,
+ .aflags = 0,
+ },
+ [3] = {
+ .enabled_rates = CONF_TX_AP_ENABLED_RATES,
+ .short_retry_limit = 10,
+ .long_retry_limit = 10,
+ .aflags = 0,
+ },
+ },
+ .ap_mgmt_conf = {
+ .enabled_rates = CONF_TX_AP_DEFAULT_MGMT_RATES,
+ .short_retry_limit = 10,
+ .long_retry_limit = 10,
+ .aflags = 0,
+ },
+ .ap_bcst_conf = {
+ .enabled_rates = CONF_HW_BIT_RATE_1MBPS,
+ .short_retry_limit = 10,
+ .long_retry_limit = 10,
+ .aflags = 0,
+ },
+ .ap_max_tx_retries = 100,
.tid_conf_count = 4,
.tid_conf = {
[CONF_TX_AC_BE] = {
@@ -193,6 +232,8 @@ static struct conf_drv_settings default_conf = {
.tx_compl_threshold = 4,
.basic_rate = CONF_HW_BIT_RATE_1MBPS,
.basic_rate_5 = CONF_HW_BIT_RATE_6MBPS,
+ .tmpl_short_retry_limit = 10,
+ .tmpl_long_retry_limit = 10,
},
.conn = {
.wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
@@ -215,6 +256,7 @@ static struct conf_drv_settings default_conf = {
.bet_enable = CONF_BET_MODE_ENABLE,
.bet_max_consecutive = 10,
.psm_entry_retries = 5,
+ .psm_exit_retries = 255,
.psm_entry_nullfunc_retries = 3,
.psm_entry_hangover_period = 1,
.keep_alive_interval = 55000,
@@ -233,13 +275,13 @@ static struct conf_drv_settings default_conf = {
.avg_weight_rssi_beacon = 20,
.avg_weight_rssi_data = 10,
.avg_weight_snr_beacon = 20,
- .avg_weight_snr_data = 10
+ .avg_weight_snr_data = 10,
},
.scan = {
.min_dwell_time_active = 7500,
.max_dwell_time_active = 30000,
- .min_dwell_time_passive = 30000,
- .max_dwell_time_passive = 60000,
+ .min_dwell_time_passive = 100000,
+ .max_dwell_time_passive = 100000,
.num_probe_reqs = 2,
},
.rf = {
@@ -252,9 +294,24 @@ static struct conf_drv_settings default_conf = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
},
},
+ .ht = {
+ .tx_ba_win_size = 64,
+ .inactivity_timeout = 10000,
+ },
+ .mem = {
+ .num_stations = 1,
+ .ssid_profiles = 1,
+ .rx_block_num = 70,
+ .tx_min_block_num = 40,
+ .dynamic_memory = 0,
+ .min_req_tx_blocks = 104,
+ .min_req_rx_blocks = 22,
+ .tx_min = 27,
+ }
};
static void __wl1271_op_remove_interface(struct wl1271 *wl);
+static void wl1271_free_ap_keys(struct wl1271 *wl);
static void wl1271_device_release(struct device *dev)
@@ -393,7 +450,7 @@ static int wl1271_plt_init(struct wl1271 *wl)
if (ret < 0)
return ret;
- ret = wl1271_init_templates_config(wl);
+ ret = wl1271_sta_init_templates_config(wl);
if (ret < 0)
return ret;
@@ -425,6 +482,10 @@ static int wl1271_plt_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
+ ret = wl1271_acx_sta_mem_cfg(wl);
+ if (ret < 0)
+ goto out_free_memmap;
+
/* Default fragmentation threshold */
ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold);
if (ret < 0)
@@ -476,14 +537,71 @@ static int wl1271_plt_init(struct wl1271 *wl)
return ret;
}
+static void wl1271_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_blks)
+{
+ bool fw_ps;
+
+ /* only regulate station links */
+ if (hlid < WL1271_AP_STA_HLID_START)
+ return;
+
+ fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
+
+ /*
+ * Wake up from high level PS if the STA is asleep with too little
+ * blocks in FW or if the STA is awake.
+ */
+ if (!fw_ps || tx_blks < WL1271_PS_STA_MAX_BLOCKS)
+ wl1271_ps_link_end(wl, hlid);
+
+ /* Start high-level PS if the STA is asleep with enough blocks in FW */
+ else if (fw_ps && tx_blks >= WL1271_PS_STA_MAX_BLOCKS)
+ wl1271_ps_link_start(wl, hlid, true);
+}
+
+static void wl1271_irq_update_links_status(struct wl1271 *wl,
+ struct wl1271_fw_ap_status *status)
+{
+ u32 cur_fw_ps_map;
+ u8 hlid;
+
+ cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
+ if (wl->ap_fw_ps_map != cur_fw_ps_map) {
+ wl1271_debug(DEBUG_PSM,
+ "link ps prev 0x%x cur 0x%x changed 0x%x",
+ wl->ap_fw_ps_map, cur_fw_ps_map,
+ wl->ap_fw_ps_map ^ cur_fw_ps_map);
+
+ wl->ap_fw_ps_map = cur_fw_ps_map;
+ }
+
+ for (hlid = WL1271_AP_STA_HLID_START; hlid < AP_MAX_LINKS; hlid++) {
+ u8 cnt = status->tx_lnk_free_blks[hlid] -
+ wl->links[hlid].prev_freed_blks;
+
+ wl->links[hlid].prev_freed_blks =
+ status->tx_lnk_free_blks[hlid];
+ wl->links[hlid].allocated_blks -= cnt;
+
+ wl1271_irq_ps_regulate_link(wl, hlid,
+ wl->links[hlid].allocated_blks);
+ }
+}
+
static void wl1271_fw_status(struct wl1271 *wl,
- struct wl1271_fw_status *status)
+ struct wl1271_fw_full_status *full_status)
{
+ struct wl1271_fw_common_status *status = &full_status->common;
struct timespec ts;
u32 total = 0;
int i;
- wl1271_raw_read(wl, FW_STATUS_ADDR, status, sizeof(*status), false);
+ if (wl->bss_type == BSS_TYPE_AP_BSS)
+ wl1271_raw_read(wl, FW_STATUS_ADDR, status,
+ sizeof(struct wl1271_fw_ap_status), false);
+ else
+ wl1271_raw_read(wl, FW_STATUS_ADDR, status,
+ sizeof(struct wl1271_fw_sta_status), false);
wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
"drv_rx_counter = %d, tx_results_counter = %d)",
@@ -507,6 +625,10 @@ static void wl1271_fw_status(struct wl1271 *wl,
if (total)
clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
+ /* for AP update num of allocated TX blocks per link and ps status */
+ if (wl->bss_type == BSS_TYPE_AP_BSS)
+ wl1271_irq_update_links_status(wl, &full_status->ap);
+
/* update the host-chipset time offset */
getnstimeofday(&ts);
wl->time_offset = (timespec_to_ns(&ts) >> 10) -
@@ -542,7 +664,7 @@ static void wl1271_irq_work(struct work_struct *work)
loopcount--;
wl1271_fw_status(wl, wl->fw_status);
- intr = le32_to_cpu(wl->fw_status->intr);
+ intr = le32_to_cpu(wl->fw_status->common.intr);
if (!intr) {
wl1271_debug(DEBUG_IRQ, "Zero interrupt received.");
spin_lock_irqsave(&wl->wl_lock, flags);
@@ -564,7 +686,7 @@ static void wl1271_irq_work(struct work_struct *work)
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
/* check for tx results */
- if (wl->fw_status->tx_results_counter !=
+ if (wl->fw_status->common.tx_results_counter !=
(wl->tx_results_count & 0xff))
wl1271_tx_complete(wl);
@@ -578,7 +700,7 @@ static void wl1271_irq_work(struct work_struct *work)
wl1271_tx_work_locked(wl);
}
- wl1271_rx(wl, wl->fw_status);
+ wl1271_rx(wl, &wl->fw_status->common);
}
if (intr & WL1271_ACX_INTR_EVENT_A) {
@@ -616,9 +738,26 @@ out:
static int wl1271_fetch_firmware(struct wl1271 *wl)
{
const struct firmware *fw;
+ const char *fw_name;
int ret;
- ret = request_firmware(&fw, WL1271_FW_NAME, wl1271_wl_to_dev(wl));
+ switch (wl->bss_type) {
+ case BSS_TYPE_AP_BSS:
+ fw_name = WL1271_AP_FW_NAME;
+ break;
+ case BSS_TYPE_IBSS:
+ case BSS_TYPE_STA_BSS:
+ fw_name = WL1271_FW_NAME;
+ break;
+ default:
+ wl1271_error("no compatible firmware for bss_type %d",
+ wl->bss_type);
+ return -EINVAL;
+ }
+
+ wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
+
+ ret = request_firmware(&fw, fw_name, wl1271_wl_to_dev(wl));
if (ret < 0) {
wl1271_error("could not get firmware: %d", ret);
@@ -632,6 +771,7 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
goto out;
}
+ vfree(wl->fw);
wl->fw_len = fw->size;
wl->fw = vmalloc(wl->fw_len);
@@ -642,7 +782,7 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
}
memcpy(wl->fw, fw->data, wl->fw_len);
-
+ wl->fw_bss_type = wl->bss_type;
ret = 0;
out:
@@ -778,7 +918,8 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
goto out;
}
- if (wl->fw == NULL) {
+ /* Make sure the firmware type matches the BSS type */
+ if (wl->fw == NULL || wl->fw_bss_type != wl->bss_type) {
ret = wl1271_fetch_firmware(wl);
if (ret < 0)
goto out;
@@ -811,6 +952,8 @@ int wl1271_plt_start(struct wl1271 *wl)
goto out;
}
+ wl->bss_type = BSS_TYPE_STA_BSS;
+
while (retries) {
retries--;
ret = wl1271_chip_wakeup(wl);
@@ -827,7 +970,7 @@ int wl1271_plt_start(struct wl1271 *wl)
wl->state = WL1271_STATE_PLT;
wl1271_notice("firmware booted in PLT mode (%s)",
- wl->chip.fw_ver);
+ wl->chip.fw_ver_str);
goto out;
irq_disable:
@@ -854,12 +997,10 @@ out:
return ret;
}
-int wl1271_plt_stop(struct wl1271 *wl)
+int __wl1271_plt_stop(struct wl1271 *wl)
{
int ret = 0;
- mutex_lock(&wl->mutex);
-
wl1271_notice("power down");
if (wl->state != WL1271_STATE_PLT) {
@@ -875,56 +1016,55 @@ int wl1271_plt_stop(struct wl1271 *wl)
wl->state = WL1271_STATE_OFF;
wl->rx_counter = 0;
-out:
mutex_unlock(&wl->mutex);
-
cancel_work_sync(&wl->irq_work);
cancel_work_sync(&wl->recovery_work);
+ mutex_lock(&wl->mutex);
+out:
+ return ret;
+}
+int wl1271_plt_stop(struct wl1271 *wl)
+{
+ int ret;
+
+ mutex_lock(&wl->mutex);
+ ret = __wl1271_plt_stop(wl);
+ mutex_unlock(&wl->mutex);
return ret;
}
-static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct wl1271 *wl = hw->priv;
- struct ieee80211_conf *conf = &hw->conf;
- struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
- struct ieee80211_sta *sta = txinfo->control.sta;
unsigned long flags;
int q;
+ u8 hlid = 0;
+
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ wl->tx_queue_count++;
/*
- * peek into the rates configured in the STA entry.
- * The rates set after connection stage, The first block only BG sets:
- * the compare is for bit 0-16 of sta_rate_set. The second block add
- * HT rates in case of HT supported.
+ * The workqueue is slow to process the tx_queue and we need stop
+ * the queue here, otherwise the queue will get too long.
*/
- spin_lock_irqsave(&wl->wl_lock, flags);
- if (sta &&
- (sta->supp_rates[conf->channel->band] !=
- (wl->sta_rate_set & HW_BG_RATES_MASK))) {
- wl->sta_rate_set = sta->supp_rates[conf->channel->band];
- set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags);
+ if (wl->tx_queue_count >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
+ wl1271_debug(DEBUG_TX, "op_tx: stopping queues");
+ ieee80211_stop_queues(wl->hw);
+ set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
}
-#ifdef CONFIG_WL12XX_HT
- if (sta &&
- sta->ht_cap.ht_supported &&
- ((wl->sta_rate_set >> HW_HT_RATES_OFFSET) !=
- sta->ht_cap.mcs.rx_mask[0])) {
- /* Clean MCS bits before setting them */
- wl->sta_rate_set &= HW_BG_RATES_MASK;
- wl->sta_rate_set |=
- (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET);
- set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags);
- }
-#endif
- wl->tx_queue_count++;
spin_unlock_irqrestore(&wl->wl_lock, flags);
/* queue the packet */
q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
- skb_queue_tail(&wl->tx_queue[q], skb);
+ if (wl->bss_type == BSS_TYPE_AP_BSS) {
+ hlid = wl1271_tx_get_hlid(skb);
+ wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
+ skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
+ } else {
+ skb_queue_tail(&wl->tx_queue[q], skb);
+ }
/*
* The chip specific setup must run before the first TX packet -
@@ -933,21 +1073,6 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
ieee80211_queue_work(wl->hw, &wl->tx_work);
-
- /*
- * The workqueue is slow to process the tx_queue and we need stop
- * the queue here, otherwise the queue will get too long.
- */
- if (wl->tx_queue_count >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
- wl1271_debug(DEBUG_TX, "op_tx: stopping queues");
-
- spin_lock_irqsave(&wl->wl_lock, flags);
- ieee80211_stop_queues(wl->hw);
- set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
- spin_unlock_irqrestore(&wl->wl_lock, flags);
- }
-
- return NETDEV_TX_OK;
}
static struct notifier_block wl1271_dev_notifier = {
@@ -967,6 +1092,9 @@ static int wl1271_op_start(struct ieee80211_hw *hw)
*
* The MAC address is first known when the corresponding interface
* is added. That is where we will initialize the hardware.
+ *
+ * In addition, we currently have different firmwares for AP and managed
+ * operation. We will know which to boot according to interface type.
*/
return 0;
@@ -1006,6 +1134,9 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
wl->bss_type = BSS_TYPE_IBSS;
wl->set_bss_type = BSS_TYPE_STA_BSS;
break;
+ case NL80211_IFTYPE_AP:
+ wl->bss_type = BSS_TYPE_AP_BSS;
+ break;
default:
ret = -EOPNOTSUPP;
goto out;
@@ -1061,11 +1192,11 @@ power_off:
wl->vif = vif;
wl->state = WL1271_STATE_ON;
- wl1271_info("firmware booted (%s)", wl->chip.fw_ver);
+ wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
/* update hw/fw version info in wiphy struct */
wiphy->hw_version = wl->chip.id;
- strncpy(wiphy->fw_version, wl->chip.fw_ver,
+ strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
sizeof(wiphy->fw_version));
/*
@@ -1147,10 +1278,13 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl)
wl->time_offset = 0;
wl->session_counter = 0;
wl->rate_set = CONF_TX_RATE_MASK_BASIC;
- wl->sta_rate_set = 0;
wl->flags = 0;
wl->vif = NULL;
wl->filters = 0;
+ wl1271_free_ap_keys(wl);
+ memset(wl->ap_hlid_map, 0, sizeof(wl->ap_hlid_map));
+ wl->ap_fw_ps_map = 0;
+ wl->ap_ps_map = 0;
for (i = 0; i < NUM_TX_QUEUES; i++)
wl->tx_blocks_freed[i] = 0;
@@ -1186,8 +1320,7 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
static void wl1271_configure_filters(struct wl1271 *wl, unsigned int filters)
{
- wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
- wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
+ wl1271_set_default_filters(wl);
/* combine requested filters with current filter config */
filters = wl->filters | filters;
@@ -1322,25 +1455,7 @@ static void wl1271_set_band_rate(struct wl1271 *wl)
wl->basic_rate_set = wl->conf.tx.basic_rate_5;
}
-static u32 wl1271_min_rate_get(struct wl1271 *wl)
-{
- int i;
- u32 rate = 0;
-
- if (!wl->basic_rate_set) {
- WARN_ON(1);
- wl->basic_rate_set = wl->conf.tx.basic_rate;
- }
-
- for (i = 0; !rate; i++) {
- if ((wl->basic_rate_set >> i) & 0x1)
- rate = 1 << i;
- }
-
- return rate;
-}
-
-static int wl1271_handle_idle(struct wl1271 *wl, bool idle)
+static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle)
{
int ret;
@@ -1350,9 +1465,8 @@ static int wl1271_handle_idle(struct wl1271 *wl, bool idle)
if (ret < 0)
goto out;
}
- wl->rate_set = wl1271_min_rate_get(wl);
- wl->sta_rate_set = 0;
- ret = wl1271_acx_rate_policies(wl);
+ wl->rate_set = wl1271_tx_min_rate_get(wl);
+ ret = wl1271_acx_sta_rate_policies(wl);
if (ret < 0)
goto out;
ret = wl1271_acx_keep_alive_config(
@@ -1381,14 +1495,17 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
struct wl1271 *wl = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
int channel, ret = 0;
+ bool is_ap;
channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
- wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s",
+ wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s"
+ " changed 0x%x",
channel,
conf->flags & IEEE80211_CONF_PS ? "on" : "off",
conf->power_level,
- conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use");
+ conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
+ changed);
/*
* mac80211 will go to idle nearly immediately after transmitting some
@@ -1406,6 +1523,8 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
goto out;
}
+ is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+
ret = wl1271_ps_elp_wakeup(wl, false);
if (ret < 0)
goto out;
@@ -1417,31 +1536,34 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
wl->band = conf->channel->band;
wl->channel = channel;
- /*
- * FIXME: the mac80211 should really provide a fixed rate
- * to use here. for now, just use the smallest possible rate
- * for the band as a fixed rate for association frames and
- * other control messages.
- */
- if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
- wl1271_set_band_rate(wl);
-
- wl->basic_rate = wl1271_min_rate_get(wl);
- ret = wl1271_acx_rate_policies(wl);
- if (ret < 0)
- wl1271_warning("rate policy for update channel "
- "failed %d", ret);
+ if (!is_ap) {
+ /*
+ * FIXME: the mac80211 should really provide a fixed
+ * rate to use here. for now, just use the smallest
+ * possible rate for the band as a fixed rate for
+ * association frames and other control messages.
+ */
+ if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+ wl1271_set_band_rate(wl);
- if (test_bit(WL1271_FLAG_JOINED, &wl->flags)) {
- ret = wl1271_join(wl, false);
+ wl->basic_rate = wl1271_tx_min_rate_get(wl);
+ ret = wl1271_acx_sta_rate_policies(wl);
if (ret < 0)
- wl1271_warning("cmd join to update channel "
+ wl1271_warning("rate policy for channel "
"failed %d", ret);
+
+ if (test_bit(WL1271_FLAG_JOINED, &wl->flags)) {
+ ret = wl1271_join(wl, false);
+ if (ret < 0)
+ wl1271_warning("cmd join on channel "
+ "failed %d", ret);
+ }
}
}
- if (changed & IEEE80211_CONF_CHANGE_IDLE) {
- ret = wl1271_handle_idle(wl, conf->flags & IEEE80211_CONF_IDLE);
+ if (changed & IEEE80211_CONF_CHANGE_IDLE && !is_ap) {
+ ret = wl1271_sta_handle_idle(wl,
+ conf->flags & IEEE80211_CONF_IDLE);
if (ret < 0)
wl1271_warning("idle mode change failed %d", ret);
}
@@ -1548,7 +1670,8 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
struct wl1271 *wl = hw->priv;
int ret;
- wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter");
+ wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
+ " total %x", changed, *total);
mutex_lock(&wl->mutex);
@@ -1562,15 +1685,16 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
-
- if (*total & FIF_ALLMULTI)
- ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0);
- else if (fp)
- ret = wl1271_acx_group_address_tbl(wl, fp->enabled,
- fp->mc_list,
- fp->mc_list_length);
- if (ret < 0)
- goto out_sleep;
+ if (wl->bss_type != BSS_TYPE_AP_BSS) {
+ if (*total & FIF_ALLMULTI)
+ ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0);
+ else if (fp)
+ ret = wl1271_acx_group_address_tbl(wl, fp->enabled,
+ fp->mc_list,
+ fp->mc_list_length);
+ if (ret < 0)
+ goto out_sleep;
+ }
/* determine, whether supported filter values have changed */
if (changed == 0)
@@ -1593,38 +1717,192 @@ out:
kfree(fp);
}
+static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type,
+ u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
+ u16 tx_seq_16)
+{
+ struct wl1271_ap_key *ap_key;
+ int i;
+
+ wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
+
+ if (key_size > MAX_KEY_SIZE)
+ return -EINVAL;
+
+ /*
+ * Find next free entry in ap_keys. Also check we are not replacing
+ * an existing key.
+ */
+ for (i = 0; i < MAX_NUM_KEYS; i++) {
+ if (wl->recorded_ap_keys[i] == NULL)
+ break;
+
+ if (wl->recorded_ap_keys[i]->id == id) {
+ wl1271_warning("trying to record key replacement");
+ return -EINVAL;
+ }
+ }
+
+ if (i == MAX_NUM_KEYS)
+ return -EBUSY;
+
+ ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
+ if (!ap_key)
+ return -ENOMEM;
+
+ ap_key->id = id;
+ ap_key->key_type = key_type;
+ ap_key->key_size = key_size;
+ memcpy(ap_key->key, key, key_size);
+ ap_key->hlid = hlid;
+ ap_key->tx_seq_32 = tx_seq_32;
+ ap_key->tx_seq_16 = tx_seq_16;
+
+ wl->recorded_ap_keys[i] = ap_key;
+ return 0;
+}
+
+static void wl1271_free_ap_keys(struct wl1271 *wl)
+{
+ int i;
+
+ for (i = 0; i < MAX_NUM_KEYS; i++) {
+ kfree(wl->recorded_ap_keys[i]);
+ wl->recorded_ap_keys[i] = NULL;
+ }
+}
+
+static int wl1271_ap_init_hwenc(struct wl1271 *wl)
+{
+ int i, ret = 0;
+ struct wl1271_ap_key *key;
+ bool wep_key_added = false;
+
+ for (i = 0; i < MAX_NUM_KEYS; i++) {
+ if (wl->recorded_ap_keys[i] == NULL)
+ break;
+
+ key = wl->recorded_ap_keys[i];
+ ret = wl1271_cmd_set_ap_key(wl, KEY_ADD_OR_REPLACE,
+ key->id, key->key_type,
+ key->key_size, key->key,
+ key->hlid, key->tx_seq_32,
+ key->tx_seq_16);
+ if (ret < 0)
+ goto out;
+
+ if (key->key_type == KEY_WEP)
+ wep_key_added = true;
+ }
+
+ if (wep_key_added) {
+ ret = wl1271_cmd_set_ap_default_wep_key(wl, wl->default_key);
+ if (ret < 0)
+ goto out;
+ }
+
+out:
+ wl1271_free_ap_keys(wl);
+ return ret;
+}
+
+static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+ u8 key_size, const u8 *key, u32 tx_seq_32,
+ u16 tx_seq_16, struct ieee80211_sta *sta)
+{
+ int ret;
+ bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+
+ if (is_ap) {
+ struct wl1271_station *wl_sta;
+ u8 hlid;
+
+ if (sta) {
+ wl_sta = (struct wl1271_station *)sta->drv_priv;
+ hlid = wl_sta->hlid;
+ } else {
+ hlid = WL1271_AP_BROADCAST_HLID;
+ }
+
+ if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
+ /*
+ * We do not support removing keys after AP shutdown.
+ * Pretend we do to make mac80211 happy.
+ */
+ if (action != KEY_ADD_OR_REPLACE)
+ return 0;
+
+ ret = wl1271_record_ap_key(wl, id,
+ key_type, key_size,
+ key, hlid, tx_seq_32,
+ tx_seq_16);
+ } else {
+ ret = wl1271_cmd_set_ap_key(wl, action,
+ id, key_type, key_size,
+ key, hlid, tx_seq_32,
+ tx_seq_16);
+ }
+
+ if (ret < 0)
+ return ret;
+ } else {
+ const u8 *addr;
+ static const u8 bcast_addr[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+
+ addr = sta ? sta->addr : bcast_addr;
+
+ if (is_zero_ether_addr(addr)) {
+ /* We dont support TX only encryption */
+ return -EOPNOTSUPP;
+ }
+
+ /* The wl1271 does not allow to remove unicast keys - they
+ will be cleared automatically on next CMD_JOIN. Ignore the
+ request silently, as we dont want the mac80211 to emit
+ an error message. */
+ if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
+ return 0;
+
+ ret = wl1271_cmd_set_sta_key(wl, action,
+ id, key_type, key_size,
+ key, addr, tx_seq_32,
+ tx_seq_16);
+ if (ret < 0)
+ return ret;
+
+ /* the default WEP key needs to be configured at least once */
+ if (key_type == KEY_WEP) {
+ ret = wl1271_cmd_set_sta_default_wep_key(wl,
+ wl->default_key);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key_conf)
{
struct wl1271 *wl = hw->priv;
- const u8 *addr;
int ret;
u32 tx_seq_32 = 0;
u16 tx_seq_16 = 0;
u8 key_type;
- static const u8 bcast_addr[ETH_ALEN] =
- { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
-
wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
- addr = sta ? sta->addr : bcast_addr;
-
- wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x", cmd);
- wl1271_dump(DEBUG_CRYPT, "ADDR: ", addr, ETH_ALEN);
+ wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
key_conf->cipher, key_conf->keyidx,
key_conf->keylen, key_conf->flags);
wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
- if (is_zero_ether_addr(addr)) {
- /* We dont support TX only encryption */
- ret = -EOPNOTSUPP;
- goto out;
- }
-
mutex_lock(&wl->mutex);
if (unlikely(wl->state == WL1271_STATE_OFF)) {
@@ -1671,36 +1949,21 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
switch (cmd) {
case SET_KEY:
- ret = wl1271_cmd_set_key(wl, KEY_ADD_OR_REPLACE,
- key_conf->keyidx, key_type,
- key_conf->keylen, key_conf->key,
- addr, tx_seq_32, tx_seq_16);
+ ret = wl1271_set_key(wl, KEY_ADD_OR_REPLACE,
+ key_conf->keyidx, key_type,
+ key_conf->keylen, key_conf->key,
+ tx_seq_32, tx_seq_16, sta);
if (ret < 0) {
wl1271_error("Could not add or replace key");
goto out_sleep;
}
-
- /* the default WEP key needs to be configured at least once */
- if (key_type == KEY_WEP) {
- ret = wl1271_cmd_set_default_wep_key(wl,
- wl->default_key);
- if (ret < 0)
- goto out_sleep;
- }
break;
case DISABLE_KEY:
- /* The wl1271 does not allow to remove unicast keys - they
- will be cleared automatically on next CMD_JOIN. Ignore the
- request silently, as we dont want the mac80211 to emit
- an error message. */
- if (!is_broadcast_ether_addr(addr))
- break;
-
- ret = wl1271_cmd_set_key(wl, KEY_REMOVE,
- key_conf->keyidx, key_type,
- key_conf->keylen, key_conf->key,
- addr, 0, 0);
+ ret = wl1271_set_key(wl, KEY_REMOVE,
+ key_conf->keyidx, key_type,
+ key_conf->keylen, key_conf->key,
+ 0, 0, sta);
if (ret < 0) {
wl1271_error("Could not remove key");
goto out_sleep;
@@ -1719,7 +1982,6 @@ out_sleep:
out_unlock:
mutex_unlock(&wl->mutex);
-out:
return ret;
}
@@ -1821,7 +2083,7 @@ out:
return ret;
}
-static void wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
+static int wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
int offset)
{
u8 *ptr = skb->data + offset;
@@ -1831,89 +2093,213 @@ static void wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
if (ptr[0] == WLAN_EID_SSID) {
wl->ssid_len = ptr[1];
memcpy(wl->ssid, ptr+2, wl->ssid_len);
- return;
+ return 0;
}
ptr += (ptr[1] + 2);
}
+
wl1271_error("No SSID in IEs!\n");
+ return -ENOENT;
}
-static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
+static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
- enum wl1271_cmd_ps_mode mode;
- struct wl1271 *wl = hw->priv;
- struct ieee80211_sta *sta = ieee80211_find_sta(vif, bss_conf->bssid);
- bool do_join = false;
- bool set_assoc = false;
- int ret;
+ int ret = 0;
- wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed");
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ if (bss_conf->use_short_slot)
+ ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT);
+ else
+ ret = wl1271_acx_slot(wl, SLOT_TIME_LONG);
+ if (ret < 0) {
+ wl1271_warning("Set slot time failed %d", ret);
+ goto out;
+ }
+ }
- mutex_lock(&wl->mutex);
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ if (bss_conf->use_short_preamble)
+ wl1271_acx_set_preamble(wl, ACX_PREAMBLE_SHORT);
+ else
+ wl1271_acx_set_preamble(wl, ACX_PREAMBLE_LONG);
+ }
- if (unlikely(wl->state == WL1271_STATE_OFF))
- goto out;
+ if (changed & BSS_CHANGED_ERP_CTS_PROT) {
+ if (bss_conf->use_cts_prot)
+ ret = wl1271_acx_cts_protect(wl, CTSPROTECT_ENABLE);
+ else
+ ret = wl1271_acx_cts_protect(wl, CTSPROTECT_DISABLE);
+ if (ret < 0) {
+ wl1271_warning("Set ctsprotect failed %d", ret);
+ goto out;
+ }
+ }
- ret = wl1271_ps_elp_wakeup(wl, false);
- if (ret < 0)
- goto out;
+out:
+ return ret;
+}
+
+static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
+{
+ bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+ int ret = 0;
- if ((changed & BSS_CHANGED_BEACON_INT) &&
- (wl->bss_type == BSS_TYPE_IBSS)) {
- wl1271_debug(DEBUG_ADHOC, "ad-hoc beacon interval updated: %d",
+ if ((changed & BSS_CHANGED_BEACON_INT)) {
+ wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
bss_conf->beacon_int);
wl->beacon_int = bss_conf->beacon_int;
- do_join = true;
}
- if ((changed & BSS_CHANGED_BEACON) &&
- (wl->bss_type == BSS_TYPE_IBSS)) {
- struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
+ if ((changed & BSS_CHANGED_BEACON)) {
+ struct ieee80211_hdr *hdr;
+ int ieoffset = offsetof(struct ieee80211_mgmt,
+ u.beacon.variable);
+ struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
+ u16 tmpl_id;
- wl1271_debug(DEBUG_ADHOC, "ad-hoc beacon updated");
+ if (!beacon)
+ goto out;
- if (beacon) {
- struct ieee80211_hdr *hdr;
- int ieoffset = offsetof(struct ieee80211_mgmt,
- u.beacon.variable);
+ wl1271_debug(DEBUG_MASTER, "beacon updated");
- wl1271_ssid_set(wl, beacon, ieoffset);
+ ret = wl1271_ssid_set(wl, beacon, ieoffset);
+ if (ret < 0) {
+ dev_kfree_skb(beacon);
+ goto out;
+ }
+ tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
+ CMD_TEMPL_BEACON;
+ ret = wl1271_cmd_template_set(wl, tmpl_id,
+ beacon->data,
+ beacon->len, 0,
+ wl1271_tx_min_rate_get(wl));
+ if (ret < 0) {
+ dev_kfree_skb(beacon);
+ goto out;
+ }
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON,
- beacon->data,
- beacon->len, 0,
- wl1271_min_rate_get(wl));
+ hdr = (struct ieee80211_hdr *) beacon->data;
+ hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_PROBE_RESP);
+
+ tmpl_id = is_ap ? CMD_TEMPL_AP_PROBE_RESPONSE :
+ CMD_TEMPL_PROBE_RESPONSE;
+ ret = wl1271_cmd_template_set(wl,
+ tmpl_id,
+ beacon->data,
+ beacon->len, 0,
+ wl1271_tx_min_rate_get(wl));
+ dev_kfree_skb(beacon);
+ if (ret < 0)
+ goto out;
+ }
- if (ret < 0) {
- dev_kfree_skb(beacon);
- goto out_sleep;
- }
+out:
+ return ret;
+}
- hdr = (struct ieee80211_hdr *) beacon->data;
- hdr->frame_control = cpu_to_le16(
- IEEE80211_FTYPE_MGMT |
- IEEE80211_STYPE_PROBE_RESP);
+/* AP mode changes */
+static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
+{
+ int ret = 0;
- ret = wl1271_cmd_template_set(wl,
- CMD_TEMPL_PROBE_RESPONSE,
- beacon->data,
- beacon->len, 0,
- wl1271_min_rate_get(wl));
- dev_kfree_skb(beacon);
- if (ret < 0)
- goto out_sleep;
+ if ((changed & BSS_CHANGED_BASIC_RATES)) {
+ u32 rates = bss_conf->basic_rates;
+ struct conf_tx_rate_class mgmt_rc;
+
+ wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates);
+ wl->basic_rate = wl1271_tx_min_rate_get(wl);
+ wl1271_debug(DEBUG_AP, "basic rates: 0x%x",
+ wl->basic_rate_set);
+
+ /* update the AP management rate policy with the new rates */
+ mgmt_rc.enabled_rates = wl->basic_rate_set;
+ mgmt_rc.long_retry_limit = 10;
+ mgmt_rc.short_retry_limit = 10;
+ mgmt_rc.aflags = 0;
+ ret = wl1271_acx_ap_rate_policy(wl, &mgmt_rc,
+ ACX_TX_AP_MODE_MGMT_RATE);
+ if (ret < 0) {
+ wl1271_error("AP mgmt policy change failed %d", ret);
+ goto out;
+ }
+ }
- /* Need to update the SSID (for filtering etc) */
- do_join = true;
+ ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
+ if (ret < 0)
+ goto out;
+
+ if ((changed & BSS_CHANGED_BEACON_ENABLED)) {
+ if (bss_conf->enable_beacon) {
+ if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
+ ret = wl1271_cmd_start_bss(wl);
+ if (ret < 0)
+ goto out;
+
+ set_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
+ wl1271_debug(DEBUG_AP, "started AP");
+
+ ret = wl1271_ap_init_hwenc(wl);
+ if (ret < 0)
+ goto out;
+ }
+ } else {
+ if (test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
+ ret = wl1271_cmd_stop_bss(wl);
+ if (ret < 0)
+ goto out;
+
+ clear_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
+ wl1271_debug(DEBUG_AP, "stopped AP");
+ }
}
}
- if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
- (wl->bss_type == BSS_TYPE_IBSS)) {
+ ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
+ if (ret < 0)
+ goto out;
+out:
+ return;
+}
+
+/* STA/IBSS mode changes */
+static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
+{
+ bool do_join = false, set_assoc = false;
+ bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
+ u32 sta_rate_set = 0;
+ int ret;
+ struct ieee80211_sta *sta;
+ bool sta_exists = false;
+ struct ieee80211_sta_ht_cap sta_ht_cap;
+
+ if (is_ibss) {
+ ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
+ changed);
+ if (ret < 0)
+ goto out;
+ }
+
+ if ((changed & BSS_CHANGED_BEACON_INT) && is_ibss)
+ do_join = true;
+
+ /* Need to update the SSID (for filtering etc) */
+ if ((changed & BSS_CHANGED_BEACON) && is_ibss)
+ do_join = true;
+
+ if ((changed & BSS_CHANGED_BEACON_ENABLED) && is_ibss) {
wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
bss_conf->enable_beacon ? "enabled" : "disabled");
@@ -1924,7 +2310,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
do_join = true;
}
- if (changed & BSS_CHANGED_CQM) {
+ if ((changed & BSS_CHANGED_CQM)) {
bool enable = false;
if (bss_conf->cqm_rssi_thold)
enable = true;
@@ -1942,24 +2328,70 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
* and enable the BSSID filter
*/
memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
- memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
+ memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
+ if (!is_zero_ether_addr(wl->bssid)) {
ret = wl1271_cmd_build_null_data(wl);
if (ret < 0)
- goto out_sleep;
+ goto out;
ret = wl1271_build_qos_null_data(wl);
if (ret < 0)
- goto out_sleep;
+ goto out;
/* filter out all packets not from this BSSID */
wl1271_configure_filters(wl, 0);
/* Need to update the BSSID (for filtering etc) */
do_join = true;
+ }
+ }
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ if (sta) {
+ /* save the supp_rates of the ap */
+ sta_rate_set = sta->supp_rates[wl->hw->conf.channel->band];
+ if (sta->ht_cap.ht_supported)
+ sta_rate_set |=
+ (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET);
+ sta_ht_cap = sta->ht_cap;
+ sta_exists = true;
+ }
+ rcu_read_unlock();
+
+ if (sta_exists) {
+ /* handle new association with HT and HT information change */
+ if ((changed & BSS_CHANGED_HT) &&
+ (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
+ ret = wl1271_acx_set_ht_capabilities(wl, &sta_ht_cap,
+ true);
+ if (ret < 0) {
+ wl1271_warning("Set ht cap true failed %d",
+ ret);
+ goto out;
+ }
+ ret = wl1271_acx_set_ht_information(wl,
+ bss_conf->ht_operation_mode);
+ if (ret < 0) {
+ wl1271_warning("Set ht information failed %d",
+ ret);
+ goto out;
+ }
+ }
+ /* handle new association without HT and disassociation */
+ else if (changed & BSS_CHANGED_ASSOC) {
+ ret = wl1271_acx_set_ht_capabilities(wl, &sta_ht_cap,
+ false);
+ if (ret < 0) {
+ wl1271_warning("Set ht cap false failed %d",
+ ret);
+ goto out;
+ }
+ }
}
- if (changed & BSS_CHANGED_ASSOC) {
+ if ((changed & BSS_CHANGED_ASSOC)) {
if (bss_conf->assoc) {
u32 rates;
int ieoffset;
@@ -1975,10 +2407,13 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
rates = bss_conf->basic_rates;
wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl,
rates);
- wl->basic_rate = wl1271_min_rate_get(wl);
- ret = wl1271_acx_rate_policies(wl);
+ wl->basic_rate = wl1271_tx_min_rate_get(wl);
+ if (sta_rate_set)
+ wl->rate_set = wl1271_tx_enabled_rates_get(wl,
+ sta_rate_set);
+ ret = wl1271_acx_sta_rate_policies(wl);
if (ret < 0)
- goto out_sleep;
+ goto out;
/*
* with wl1271, we don't need to update the
@@ -1988,7 +2423,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
*/
ret = wl1271_cmd_build_ps_poll(wl, wl->aid);
if (ret < 0)
- goto out_sleep;
+ goto out;
/*
* Get a template for hardware connection maintenance
@@ -2002,17 +2437,19 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
/* enable the connection monitoring feature */
ret = wl1271_acx_conn_monit_params(wl, true);
if (ret < 0)
- goto out_sleep;
+ goto out;
/* If we want to go in PSM but we're not there yet */
if (test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags) &&
!test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+ enum wl1271_cmd_ps_mode mode;
+
mode = STATION_POWER_SAVE_MODE;
ret = wl1271_ps_set_mode(wl, mode,
wl->basic_rate,
true);
if (ret < 0)
- goto out_sleep;
+ goto out;
}
} else {
/* use defaults when not associated */
@@ -2029,10 +2466,10 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
/* revert back to minimum rates for the current band */
wl1271_set_band_rate(wl);
- wl->basic_rate = wl1271_min_rate_get(wl);
- ret = wl1271_acx_rate_policies(wl);
+ wl->basic_rate = wl1271_tx_min_rate_get(wl);
+ ret = wl1271_acx_sta_rate_policies(wl);
if (ret < 0)
- goto out_sleep;
+ goto out;
/* disable connection monitor features */
ret = wl1271_acx_conn_monit_params(wl, false);
@@ -2040,74 +2477,17 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
/* Disable the keep-alive feature */
ret = wl1271_acx_keep_alive_mode(wl, false);
if (ret < 0)
- goto out_sleep;
+ goto out;
/* restore the bssid filter and go to dummy bssid */
wl1271_unjoin(wl);
wl1271_dummy_join(wl);
}
-
- }
-
- if (changed & BSS_CHANGED_ERP_SLOT) {
- if (bss_conf->use_short_slot)
- ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT);
- else
- ret = wl1271_acx_slot(wl, SLOT_TIME_LONG);
- if (ret < 0) {
- wl1271_warning("Set slot time failed %d", ret);
- goto out_sleep;
- }
- }
-
- if (changed & BSS_CHANGED_ERP_PREAMBLE) {
- if (bss_conf->use_short_preamble)
- wl1271_acx_set_preamble(wl, ACX_PREAMBLE_SHORT);
- else
- wl1271_acx_set_preamble(wl, ACX_PREAMBLE_LONG);
- }
-
- if (changed & BSS_CHANGED_ERP_CTS_PROT) {
- if (bss_conf->use_cts_prot)
- ret = wl1271_acx_cts_protect(wl, CTSPROTECT_ENABLE);
- else
- ret = wl1271_acx_cts_protect(wl, CTSPROTECT_DISABLE);
- if (ret < 0) {
- wl1271_warning("Set ctsprotect failed %d", ret);
- goto out_sleep;
- }
}
- /*
- * Takes care of: New association with HT enable,
- * HT information change in beacon.
- */
- if (sta &&
- (changed & BSS_CHANGED_HT) &&
- (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
- ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true);
- if (ret < 0) {
- wl1271_warning("Set ht cap true failed %d", ret);
- goto out_sleep;
- }
- ret = wl1271_acx_set_ht_information(wl,
- bss_conf->ht_operation_mode);
- if (ret < 0) {
- wl1271_warning("Set ht information failed %d", ret);
- goto out_sleep;
- }
- }
- /*
- * Takes care of: New association without HT,
- * Disassociation.
- */
- else if (sta && (changed & BSS_CHANGED_ASSOC)) {
- ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, false);
- if (ret < 0) {
- wl1271_warning("Set ht cap false failed %d", ret);
- goto out_sleep;
- }
- }
+ ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
+ if (ret < 0)
+ goto out;
if (changed & BSS_CHANGED_ARP_FILTER) {
__be32 addr = bss_conf->arp_addr_list[0];
@@ -2124,76 +2504,128 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
ret = wl1271_cmd_build_arp_rsp(wl, addr);
if (ret < 0) {
wl1271_warning("build arp rsp failed: %d", ret);
- goto out_sleep;
+ goto out;
}
ret = wl1271_acx_arp_ip_filter(wl,
- (ACX_ARP_FILTER_ARP_FILTERING |
- ACX_ARP_FILTER_AUTO_ARP),
+ ACX_ARP_FILTER_ARP_FILTERING,
addr);
} else
ret = wl1271_acx_arp_ip_filter(wl, 0, addr);
if (ret < 0)
- goto out_sleep;
+ goto out;
}
if (do_join) {
ret = wl1271_join(wl, set_assoc);
if (ret < 0) {
wl1271_warning("cmd join failed %d", ret);
- goto out_sleep;
+ goto out;
}
}
-out_sleep:
- wl1271_ps_elp_sleep(wl);
-
out:
- mutex_unlock(&wl->mutex);
+ return;
}
-static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
- const struct ieee80211_tx_queue_params *params)
+static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
{
struct wl1271 *wl = hw->priv;
- u8 ps_scheme;
+ bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
int ret;
- mutex_lock(&wl->mutex);
+ wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x",
+ (int)changed);
- wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
+ mutex_lock(&wl->mutex);
- if (unlikely(wl->state == WL1271_STATE_OFF)) {
- ret = -EAGAIN;
+ if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
- }
ret = wl1271_ps_elp_wakeup(wl, false);
if (ret < 0)
goto out;
- /* the txop is confed in units of 32us by the mac80211, we need us */
- ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue),
- params->cw_min, params->cw_max,
- params->aifs, params->txop << 5);
- if (ret < 0)
- goto out_sleep;
+ if (is_ap)
+ wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
+ else
+ wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
+
+ wl1271_ps_elp_sleep(wl);
+
+out:
+ mutex_unlock(&wl->mutex);
+}
+
+static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct wl1271 *wl = hw->priv;
+ u8 ps_scheme;
+ int ret = 0;
+
+ mutex_lock(&wl->mutex);
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
if (params->uapsd)
ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
else
ps_scheme = CONF_PS_SCHEME_LEGACY;
- ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue),
- CONF_CHANNEL_TYPE_EDCF,
- wl1271_tx_get_queue(queue),
- ps_scheme, CONF_ACK_POLICY_LEGACY, 0, 0);
- if (ret < 0)
- goto out_sleep;
+ if (wl->state == WL1271_STATE_OFF) {
+ /*
+ * If the state is off, the parameters will be recorded and
+ * configured on init. This happens in AP-mode.
+ */
+ struct conf_tx_ac_category *conf_ac =
+ &wl->conf.tx.ac_conf[wl1271_tx_get_queue(queue)];
+ struct conf_tx_tid *conf_tid =
+ &wl->conf.tx.tid_conf[wl1271_tx_get_queue(queue)];
+
+ conf_ac->ac = wl1271_tx_get_queue(queue);
+ conf_ac->cw_min = (u8)params->cw_min;
+ conf_ac->cw_max = params->cw_max;
+ conf_ac->aifsn = params->aifs;
+ conf_ac->tx_op_limit = params->txop << 5;
+
+ conf_tid->queue_id = wl1271_tx_get_queue(queue);
+ conf_tid->channel_type = CONF_CHANNEL_TYPE_EDCF;
+ conf_tid->tsid = wl1271_tx_get_queue(queue);
+ conf_tid->ps_scheme = ps_scheme;
+ conf_tid->ack_policy = CONF_ACK_POLICY_LEGACY;
+ conf_tid->apsd_conf[0] = 0;
+ conf_tid->apsd_conf[1] = 0;
+ } else {
+ ret = wl1271_ps_elp_wakeup(wl, false);
+ if (ret < 0)
+ goto out;
+
+ /*
+ * the txop is confed in units of 32us by the mac80211,
+ * we need us
+ */
+ ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue),
+ params->cw_min, params->cw_max,
+ params->aifs, params->txop << 5);
+ if (ret < 0)
+ goto out_sleep;
+
+ ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue),
+ CONF_CHANNEL_TYPE_EDCF,
+ wl1271_tx_get_queue(queue),
+ ps_scheme, CONF_ACK_POLICY_LEGACY,
+ 0, 0);
+ if (ret < 0)
+ goto out_sleep;
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ wl1271_ps_elp_sleep(wl);
+ }
out:
mutex_unlock(&wl->mutex);
@@ -2247,6 +2679,184 @@ static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
return 0;
}
+static int wl1271_allocate_sta(struct wl1271 *wl,
+ struct ieee80211_sta *sta,
+ u8 *hlid)
+{
+ struct wl1271_station *wl_sta;
+ int id;
+
+ id = find_first_zero_bit(wl->ap_hlid_map, AP_MAX_STATIONS);
+ if (id >= AP_MAX_STATIONS) {
+ wl1271_warning("could not allocate HLID - too much stations");
+ return -EBUSY;
+ }
+
+ wl_sta = (struct wl1271_station *)sta->drv_priv;
+ __set_bit(id, wl->ap_hlid_map);
+ wl_sta->hlid = WL1271_AP_STA_HLID_START + id;
+ *hlid = wl_sta->hlid;
+ memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
+ return 0;
+}
+
+static void wl1271_free_sta(struct wl1271 *wl, u8 hlid)
+{
+ int id = hlid - WL1271_AP_STA_HLID_START;
+
+ if (WARN_ON(!test_bit(id, wl->ap_hlid_map)))
+ return;
+
+ __clear_bit(id, wl->ap_hlid_map);
+ memset(wl->links[hlid].addr, 0, ETH_ALEN);
+ wl1271_tx_reset_link_queues(wl, hlid);
+ __clear_bit(hlid, &wl->ap_ps_map);
+ __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
+}
+
+static int wl1271_op_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct wl1271 *wl = hw->priv;
+ int ret = 0;
+ u8 hlid;
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ goto out;
+
+ if (wl->bss_type != BSS_TYPE_AP_BSS)
+ goto out;
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
+
+ ret = wl1271_allocate_sta(wl, sta, &hlid);
+ if (ret < 0)
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl, false);
+ if (ret < 0)
+ goto out_free_sta;
+
+ ret = wl1271_cmd_add_sta(wl, sta, hlid);
+ if (ret < 0)
+ goto out_sleep;
+
+out_sleep:
+ wl1271_ps_elp_sleep(wl);
+
+out_free_sta:
+ if (ret < 0)
+ wl1271_free_sta(wl, hlid);
+
+out:
+ mutex_unlock(&wl->mutex);
+ return ret;
+}
+
+static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct wl1271 *wl = hw->priv;
+ struct wl1271_station *wl_sta;
+ int ret = 0, id;
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ goto out;
+
+ if (wl->bss_type != BSS_TYPE_AP_BSS)
+ goto out;
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
+
+ wl_sta = (struct wl1271_station *)sta->drv_priv;
+ id = wl_sta->hlid - WL1271_AP_STA_HLID_START;
+ if (WARN_ON(!test_bit(id, wl->ap_hlid_map)))
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl, false);
+ if (ret < 0)
+ goto out;
+
+ ret = wl1271_cmd_remove_sta(wl, wl_sta->hlid);
+ if (ret < 0)
+ goto out_sleep;
+
+ wl1271_free_sta(wl, wl_sta->hlid);
+
+out_sleep:
+ wl1271_ps_elp_sleep(wl);
+
+out:
+ mutex_unlock(&wl->mutex);
+ return ret;
+}
+
+int wl1271_op_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
+{
+ struct wl1271 *wl = hw->priv;
+ int ret;
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state == WL1271_STATE_OFF)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ ret = wl1271_ps_elp_wakeup(wl, false);
+ if (ret < 0)
+ goto out;
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ if (wl->ba_support) {
+ ret = wl1271_acx_set_ba_receiver_session(wl, tid, *ssn,
+ true);
+ if (!ret)
+ wl->ba_rx_bitmap |= BIT(tid);
+ } else {
+ ret = -ENOTSUPP;
+ }
+ break;
+
+ case IEEE80211_AMPDU_RX_STOP:
+ ret = wl1271_acx_set_ba_receiver_session(wl, tid, 0, false);
+ if (!ret)
+ wl->ba_rx_bitmap &= ~BIT(tid);
+ break;
+
+ /*
+ * The BA initiator session management in FW independently.
+ * Falling break here on purpose for all TX APDU commands.
+ */
+ case IEEE80211_AMPDU_TX_START:
+ case IEEE80211_AMPDU_TX_STOP:
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ ret = -EINVAL;
+ break;
+
+ default:
+ wl1271_error("Incorrect ampdu action id=%x\n", action);
+ ret = -EINVAL;
+ }
+
+ wl1271_ps_elp_sleep(wl);
+
+out:
+ mutex_unlock(&wl->mutex);
+
+ return ret;
+}
+
/* can't be const, mac80211 writes to this */
static struct ieee80211_rate wl1271_rates[] = {
{ .bitrate = 10,
@@ -2305,6 +2915,7 @@ static struct ieee80211_channel wl1271_channels[] = {
{ .hw_value = 11, .center_freq = 2462, .max_power = 25 },
{ .hw_value = 12, .center_freq = 2467, .max_power = 25 },
{ .hw_value = 13, .center_freq = 2472, .max_power = 25 },
+ { .hw_value = 14, .center_freq = 2484, .max_power = 25 },
};
/* mapping to indexes for wl1271_rates */
@@ -2493,6 +3104,9 @@ static const struct ieee80211_ops wl1271_ops = {
.conf_tx = wl1271_op_conf_tx,
.get_tsf = wl1271_op_get_tsf,
.get_survey = wl1271_op_get_survey,
+ .sta_add = wl1271_op_sta_add,
+ .sta_remove = wl1271_op_sta_remove,
+ .ampdu_action = wl1271_op_ampdu_action,
CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
};
@@ -2607,6 +3221,18 @@ int wl1271_register_hw(struct wl1271 *wl)
if (wl->mac80211_registered)
return 0;
+ ret = wl1271_fetch_nvs(wl);
+ if (ret == 0) {
+ u8 *nvs_ptr = (u8 *)wl->nvs->nvs;
+
+ wl->mac_addr[0] = nvs_ptr[11];
+ wl->mac_addr[1] = nvs_ptr[10];
+ wl->mac_addr[2] = nvs_ptr[6];
+ wl->mac_addr[3] = nvs_ptr[5];
+ wl->mac_addr[4] = nvs_ptr[4];
+ wl->mac_addr[5] = nvs_ptr[3];
+ }
+
SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
ret = ieee80211_register_hw(wl->hw);
@@ -2629,6 +3255,9 @@ EXPORT_SYMBOL_GPL(wl1271_register_hw);
void wl1271_unregister_hw(struct wl1271 *wl)
{
+ if (wl->state == WL1271_STATE_PLT)
+ __wl1271_plt_stop(wl);
+
unregister_netdevice_notifier(&wl1271_dev_notifier);
ieee80211_unregister_hw(wl->hw);
wl->mac80211_registered = false;
@@ -2661,13 +3290,15 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
IEEE80211_HW_SUPPORTS_UAPSD |
IEEE80211_HW_HAS_RATE_CONTROL |
IEEE80211_HW_CONNECTION_MONITOR |
- IEEE80211_HW_SUPPORTS_CQM_RSSI;
+ IEEE80211_HW_SUPPORTS_CQM_RSSI |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+ IEEE80211_HW_AP_LINK_PS;
wl->hw->wiphy->cipher_suites = cipher_suites;
wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC);
+ BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
wl->hw->wiphy->max_scan_ssids = 1;
/*
* Maximum length of elements in scanning probe request templates
@@ -2676,8 +3307,20 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
*/
wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
sizeof(struct ieee80211_header);
- wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1271_band_2ghz;
- wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wl1271_band_5ghz;
+
+ /*
+ * We keep local copies of the band structs because we need to
+ * modify them on a per-device basis.
+ */
+ memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
+ sizeof(wl1271_band_2ghz));
+ memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
+ sizeof(wl1271_band_5ghz));
+
+ wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &wl->bands[IEEE80211_BAND_2GHZ];
+ wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &wl->bands[IEEE80211_BAND_5GHZ];
wl->hw->queues = 4;
wl->hw->max_rates = 1;
@@ -2686,6 +3329,10 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
SET_IEEE80211_DEV(wl->hw, wl1271_wl_to_dev(wl));
+ wl->hw->sta_data_size = sizeof(struct wl1271_station);
+
+ wl->hw->max_rx_aggregation_subframes = 8;
+
return 0;
}
EXPORT_SYMBOL_GPL(wl1271_init_ieee80211);
@@ -2697,7 +3344,7 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
struct ieee80211_hw *hw;
struct platform_device *plat_dev = NULL;
struct wl1271 *wl;
- int i, ret;
+ int i, j, ret;
unsigned int order;
hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
@@ -2725,6 +3372,10 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
for (i = 0; i < NUM_TX_QUEUES; i++)
skb_queue_head_init(&wl->tx_queue[i]);
+ for (i = 0; i < NUM_TX_QUEUES; i++)
+ for (j = 0; j < AP_MAX_LINKS; j++)
+ skb_queue_head_init(&wl->links[j].tx_queue[i]);
+
INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work);
INIT_WORK(&wl->irq_work, wl1271_irq_work);
@@ -2735,19 +3386,24 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
wl->beacon_int = WL1271_DEFAULT_BEACON_INT;
wl->default_key = 0;
wl->rx_counter = 0;
- wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
- wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
+ wl->rx_config = WL1271_DEFAULT_STA_RX_CONFIG;
+ wl->rx_filter = WL1271_DEFAULT_STA_RX_FILTER;
wl->psm_entry_retry = 0;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
wl->basic_rate = CONF_TX_RATE_MASK_BASIC;
wl->rate_set = CONF_TX_RATE_MASK_BASIC;
- wl->sta_rate_set = 0;
wl->band = IEEE80211_BAND_2GHZ;
wl->vif = NULL;
wl->flags = 0;
wl->sg_enabled = true;
wl->hw_pg_ver = -1;
+ wl->bss_type = MAX_BSS_TYPE;
+ wl->set_bss_type = MAX_BSS_TYPE;
+ wl->fw_bss_type = MAX_BSS_TYPE;
+ wl->last_tx_hlid = 0;
+ wl->ap_ps_map = 0;
+ wl->ap_fw_ps_map = 0;
memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
@@ -2837,11 +3493,11 @@ int wl1271_free_hw(struct wl1271 *wl)
}
EXPORT_SYMBOL_GPL(wl1271_free_hw);
-u32 wl12xx_debug_level;
+u32 wl12xx_debug_level = DEBUG_NONE;
EXPORT_SYMBOL_GPL(wl12xx_debug_level);
-module_param_named(debug_level, wl12xx_debug_level, uint, DEBUG_NONE);
+module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
+MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
diff --git a/drivers/net/wireless/wl12xx/ps.c b/drivers/net/wireless/wl12xx/ps.c
index 60a3738eadb0..5c347b1bd17f 100644
--- a/drivers/net/wireless/wl12xx/ps.c
+++ b/drivers/net/wireless/wl12xx/ps.c
@@ -24,6 +24,7 @@
#include "reg.h"
#include "ps.h"
#include "io.h"
+#include "tx.h"
#define WL1271_WAKEUP_TIMEOUT 500
@@ -139,8 +140,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
return ret;
}
- ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE,
- rates, send);
+ ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE);
if (ret < 0)
return ret;
@@ -163,8 +163,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
if (ret < 0)
return ret;
- ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE,
- rates, send);
+ ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE);
if (ret < 0)
return ret;
@@ -175,4 +174,81 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
return ret;
}
+static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid)
+{
+ int i, filtered = 0;
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *info;
+ unsigned long flags;
+
+ /* filter all frames currently the low level queus for this hlid */
+ for (i = 0; i < NUM_TX_QUEUES; i++) {
+ while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
+ info = IEEE80211_SKB_CB(skb);
+ info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+ info->status.rates[0].idx = -1;
+ ieee80211_tx_status(wl->hw, skb);
+ filtered++;
+ }
+ }
+
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ wl->tx_queue_count -= filtered;
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+ wl1271_handle_tx_low_watermark(wl);
+}
+
+void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues)
+{
+ struct ieee80211_sta *sta;
+
+ if (test_bit(hlid, &wl->ap_ps_map))
+ return;
+
+ wl1271_debug(DEBUG_PSM, "start mac80211 PSM on hlid %d blks %d "
+ "clean_queues %d", hlid, wl->links[hlid].allocated_blks,
+ clean_queues);
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr);
+ if (!sta) {
+ wl1271_error("could not find sta %pM for starting ps",
+ wl->links[hlid].addr);
+ rcu_read_unlock();
+ return;
+ }
+ ieee80211_sta_ps_transition_ni(sta, true);
+ rcu_read_unlock();
+
+ /* do we want to filter all frames from this link's queues? */
+ if (clean_queues)
+ wl1271_ps_filter_frames(wl, hlid);
+
+ __set_bit(hlid, &wl->ap_ps_map);
+}
+
+void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid)
+{
+ struct ieee80211_sta *sta;
+
+ if (!test_bit(hlid, &wl->ap_ps_map))
+ return;
+
+ wl1271_debug(DEBUG_PSM, "end mac80211 PSM on hlid %d", hlid);
+
+ __clear_bit(hlid, &wl->ap_ps_map);
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr);
+ if (!sta) {
+ wl1271_error("could not find sta %pM for ending ps",
+ wl->links[hlid].addr);
+ goto end;
+ }
+
+ ieee80211_sta_ps_transition_ni(sta, false);
+end:
+ rcu_read_unlock();
+}
diff --git a/drivers/net/wireless/wl12xx/ps.h b/drivers/net/wireless/wl12xx/ps.h
index 8415060f08e5..fc1f4c193593 100644
--- a/drivers/net/wireless/wl12xx/ps.h
+++ b/drivers/net/wireless/wl12xx/ps.h
@@ -32,5 +32,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
void wl1271_ps_elp_sleep(struct wl1271 *wl);
int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake);
void wl1271_elp_work(struct work_struct *work);
+void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues);
+void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid);
#endif /* __WL1271_PS_H__ */
diff --git a/drivers/net/wireless/wl12xx/rx.c b/drivers/net/wireless/wl12xx/rx.c
index 682304c30b81..3d13d7a83ea1 100644
--- a/drivers/net/wireless/wl12xx/rx.c
+++ b/drivers/net/wireless/wl12xx/rx.c
@@ -29,14 +29,14 @@
#include "rx.h"
#include "io.h"
-static u8 wl1271_rx_get_mem_block(struct wl1271_fw_status *status,
+static u8 wl1271_rx_get_mem_block(struct wl1271_fw_common_status *status,
u32 drv_rx_counter)
{
return le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
RX_MEM_BLOCK_MASK;
}
-static u32 wl1271_rx_get_buf_size(struct wl1271_fw_status *status,
+static u32 wl1271_rx_get_buf_size(struct wl1271_fw_common_status *status,
u32 drv_rx_counter)
{
return (le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
@@ -76,7 +76,7 @@ static void wl1271_rx_status(struct wl1271 *wl,
*/
wl->noise = desc->rssi - (desc->snr >> 1);
- status->freq = ieee80211_channel_to_frequency(desc->channel);
+ status->freq = ieee80211_channel_to_frequency(desc->channel, desc_band);
if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) {
status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
@@ -92,7 +92,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
{
struct wl1271_rx_descriptor *desc;
struct sk_buff *skb;
- u16 *fc;
+ struct ieee80211_hdr *hdr;
u8 *buf;
u8 beacon = 0;
@@ -118,8 +118,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
/* now we pull the descriptor out of the buffer */
skb_pull(skb, sizeof(*desc));
- fc = (u16 *)skb->data;
- if ((*fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON)
+ hdr = (struct ieee80211_hdr *)skb->data;
+ if (ieee80211_is_beacon(hdr->frame_control))
beacon = 1;
wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
@@ -134,7 +134,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
return 0;
}
-void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
+void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status)
{
struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
u32 buf_size;
@@ -198,6 +198,16 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
pkt_offset += pkt_length;
}
}
- wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS,
- cpu_to_le32(wl->rx_counter));
+ wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
+}
+
+void wl1271_set_default_filters(struct wl1271 *wl)
+{
+ if (wl->bss_type == BSS_TYPE_AP_BSS) {
+ wl->rx_config = WL1271_DEFAULT_AP_RX_CONFIG;
+ wl->rx_filter = WL1271_DEFAULT_AP_RX_FILTER;
+ } else {
+ wl->rx_config = WL1271_DEFAULT_STA_RX_CONFIG;
+ wl->rx_filter = WL1271_DEFAULT_STA_RX_FILTER;
+ }
}
diff --git a/drivers/net/wireless/wl12xx/rx.h b/drivers/net/wireless/wl12xx/rx.h
index 3abb26fe0364..75fabf836491 100644
--- a/drivers/net/wireless/wl12xx/rx.h
+++ b/drivers/net/wireless/wl12xx/rx.h
@@ -30,10 +30,6 @@
#define WL1271_RX_MAX_RSSI -30
#define WL1271_RX_MIN_RSSI -95
-#define WL1271_RX_ALIGN_TO 4
-#define WL1271_RX_ALIGN(len) (((len) + WL1271_RX_ALIGN_TO - 1) & \
- ~(WL1271_RX_ALIGN_TO - 1))
-
#define SHORT_PREAMBLE_BIT BIT(0)
#define OFDM_RATE_BIT BIT(6)
#define PBCC_RATE_BIT BIT(7)
@@ -86,8 +82,9 @@
/*
* RX Descriptor status
*
- * Bits 0-2 - status
- * Bits 3-7 - reserved
+ * Bits 0-2 - error code
+ * Bits 3-5 - process_id tag (AP mode FW)
+ * Bits 6-7 - reserved
*/
#define WL1271_RX_DESC_STATUS_MASK 0x07
@@ -110,12 +107,16 @@ struct wl1271_rx_descriptor {
u8 snr;
__le32 timestamp;
u8 packet_class;
- u8 process_id;
+ union {
+ u8 process_id; /* STA FW */
+ u8 hlid; /* AP FW */
+ } __packed;
u8 pad_len;
u8 reserved;
} __packed;
-void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status);
+void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status);
u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
+void wl1271_set_default_filters(struct wl1271 *wl);
#endif
diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/wl12xx/sdio.c
index 93cbb8d5aba9..d5e874825069 100644
--- a/drivers/net/wireless/wl12xx/sdio.c
+++ b/drivers/net/wireless/wl12xx/sdio.c
@@ -345,3 +345,4 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
MODULE_FIRMWARE(WL1271_FW_NAME);
+MODULE_FIRMWARE(WL1271_AP_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c
index 7145ea543783..0132dad756c4 100644
--- a/drivers/net/wireless/wl12xx/spi.c
+++ b/drivers/net/wireless/wl12xx/spi.c
@@ -110,6 +110,7 @@ static void wl1271_spi_reset(struct wl1271 *wl)
spi_message_add_tail(&t, &m);
spi_sync(wl_to_spi(wl), &m);
+
wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
kfree(cmd);
}
@@ -494,4 +495,5 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
MODULE_FIRMWARE(WL1271_FW_NAME);
+MODULE_FIRMWARE(WL1271_AP_FW_NAME);
MODULE_ALIAS("spi:wl1271");
diff --git a/drivers/net/wireless/wl12xx/tx.c b/drivers/net/wireless/wl12xx/tx.c
index b44c75cd8c1e..ac60d577319f 100644
--- a/drivers/net/wireless/wl12xx/tx.c
+++ b/drivers/net/wireless/wl12xx/tx.c
@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/etherdevice.h>
#include "wl12xx.h"
#include "io.h"
@@ -30,6 +31,23 @@
#include "ps.h"
#include "tx.h"
+static int wl1271_set_default_wep_key(struct wl1271 *wl, u8 id)
+{
+ int ret;
+ bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+
+ if (is_ap)
+ ret = wl1271_cmd_set_ap_default_wep_key(wl, id);
+ else
+ ret = wl1271_cmd_set_sta_default_wep_key(wl, id);
+
+ if (ret < 0)
+ return ret;
+
+ wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id);
+ return 0;
+}
+
static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
{
int id;
@@ -52,8 +70,65 @@ static void wl1271_free_tx_id(struct wl1271 *wl, int id)
}
}
+static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+
+ /*
+ * add the station to the known list before transmitting the
+ * authentication response. this way it won't get de-authed by FW
+ * when transmitting too soon.
+ */
+ hdr = (struct ieee80211_hdr *)(skb->data +
+ sizeof(struct wl1271_tx_hw_descr));
+ if (ieee80211_is_auth(hdr->frame_control))
+ wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
+}
+
+static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid)
+{
+ bool fw_ps;
+ u8 tx_blks;
+
+ /* only regulate station links */
+ if (hlid < WL1271_AP_STA_HLID_START)
+ return;
+
+ fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
+ tx_blks = wl->links[hlid].allocated_blks;
+
+ /*
+ * if in FW PS and there is enough data in FW we can put the link
+ * into high-level PS and clean out its TX queues.
+ */
+ if (fw_ps && tx_blks >= WL1271_PS_STA_MAX_BLOCKS)
+ wl1271_ps_link_start(wl, hlid, true);
+}
+
+u8 wl1271_tx_get_hlid(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb);
+
+ if (control->control.sta) {
+ struct wl1271_station *wl_sta;
+
+ wl_sta = (struct wl1271_station *)
+ control->control.sta->drv_priv;
+ return wl_sta->hlid;
+ } else {
+ struct ieee80211_hdr *hdr;
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ return WL1271_AP_GLOBAL_HLID;
+ else
+ return WL1271_AP_BROADCAST_HLID;
+ }
+}
+
static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
- u32 buf_offset)
+ u32 buf_offset, u8 hlid)
{
struct wl1271_tx_hw_descr *desc;
u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
@@ -82,6 +157,9 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
wl->tx_blocks_available -= total_blocks;
+ if (wl->bss_type == BSS_TYPE_AP_BSS)
+ wl->links[hlid].allocated_blks += total_blocks;
+
ret = 0;
wl1271_debug(DEBUG_TX,
@@ -95,11 +173,12 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
}
static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
- u32 extra, struct ieee80211_tx_info *control)
+ u32 extra, struct ieee80211_tx_info *control,
+ u8 hlid)
{
struct timespec ts;
struct wl1271_tx_hw_descr *desc;
- int pad, ac;
+ int pad, ac, rate_idx;
s64 hosttime;
u16 tx_attr;
@@ -117,7 +196,11 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
getnstimeofday(&ts);
hosttime = (timespec_to_ns(&ts) >> 10);
desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
- desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
+
+ if (wl->bss_type != BSS_TYPE_AP_BSS)
+ desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
+ else
+ desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU);
/* configure the tx attributes */
tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
@@ -125,25 +208,49 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
/* queue (we use same identifiers for tid's and ac's */
ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
desc->tid = ac;
- desc->aid = TX_HW_DEFAULT_AID;
+
+ if (wl->bss_type != BSS_TYPE_AP_BSS) {
+ desc->aid = hlid;
+
+ /* if the packets are destined for AP (have a STA entry)
+ send them with AP rate policies, otherwise use default
+ basic rates */
+ if (control->control.sta)
+ rate_idx = ACX_TX_AP_FULL_RATE;
+ else
+ rate_idx = ACX_TX_BASIC_RATE;
+ } else {
+ desc->hlid = hlid;
+ switch (hlid) {
+ case WL1271_AP_GLOBAL_HLID:
+ rate_idx = ACX_TX_AP_MODE_MGMT_RATE;
+ break;
+ case WL1271_AP_BROADCAST_HLID:
+ rate_idx = ACX_TX_AP_MODE_BCST_RATE;
+ break;
+ default:
+ rate_idx = ac;
+ break;
+ }
+ }
+
+ tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
desc->reserved = 0;
/* align the length (and store in terms of words) */
- pad = WL1271_TX_ALIGN(skb->len);
+ pad = ALIGN(skb->len, WL1271_TX_ALIGN_TO);
desc->length = cpu_to_le16(pad >> 2);
/* calculate number of padding bytes */
pad = pad - skb->len;
tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
- /* if the packets are destined for AP (have a STA entry) send them
- with AP rate policies, otherwise use default basic rates */
- if (control->control.sta)
- tx_attr |= ACX_TX_AP_FULL_RATE << TX_HW_ATTR_OFST_RATE_POLICY;
-
desc->tx_attr = cpu_to_le16(tx_attr);
- wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad);
+ wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d hlid: %d "
+ "tx_attr: 0x%x len: %d life: %d mem: %d", pad, desc->hlid,
+ le16_to_cpu(desc->tx_attr), le16_to_cpu(desc->length),
+ le16_to_cpu(desc->life_time), desc->total_mem_blocks);
}
/* caller must hold wl->mutex */
@@ -153,8 +260,8 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
struct ieee80211_tx_info *info;
u32 extra = 0;
int ret = 0;
- u8 idx;
u32 total_len;
+ u8 hlid;
if (!skb)
return -EINVAL;
@@ -166,29 +273,43 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
extra = WL1271_TKIP_IV_SPACE;
if (info->control.hw_key) {
- idx = info->control.hw_key->hw_key_idx;
+ bool is_wep;
+ u8 idx = info->control.hw_key->hw_key_idx;
+ u32 cipher = info->control.hw_key->cipher;
+
+ is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
+ (cipher == WLAN_CIPHER_SUITE_WEP104);
- /* FIXME: do we have to do this if we're not using WEP? */
- if (unlikely(wl->default_key != idx)) {
- ret = wl1271_cmd_set_default_wep_key(wl, idx);
+ if (unlikely(is_wep && wl->default_key != idx)) {
+ ret = wl1271_set_default_wep_key(wl, idx);
if (ret < 0)
return ret;
wl->default_key = idx;
}
}
- ret = wl1271_tx_allocate(wl, skb, extra, buf_offset);
+ if (wl->bss_type == BSS_TYPE_AP_BSS)
+ hlid = wl1271_tx_get_hlid(skb);
+ else
+ hlid = TX_HW_DEFAULT_AID;
+
+ ret = wl1271_tx_allocate(wl, skb, extra, buf_offset, hlid);
if (ret < 0)
return ret;
- wl1271_tx_fill_hdr(wl, skb, extra, info);
+ if (wl->bss_type == BSS_TYPE_AP_BSS) {
+ wl1271_tx_ap_update_inconnection_sta(wl, skb);
+ wl1271_tx_regulate_link(wl, hlid);
+ }
+
+ wl1271_tx_fill_hdr(wl, skb, extra, info, hlid);
/*
* The length of each packet is stored in terms of words. Thus, we must
* pad the skb data to make sure its length is aligned.
* The number of padding bytes is computed and set in wl1271_tx_fill_hdr
*/
- total_len = WL1271_TX_ALIGN(skb->len);
+ total_len = ALIGN(skb->len, WL1271_TX_ALIGN_TO);
memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len);
memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
@@ -222,7 +343,7 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
return enabled_rates;
}
-static void handle_tx_low_watermark(struct wl1271 *wl)
+void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
{
unsigned long flags;
@@ -236,7 +357,7 @@ static void handle_tx_low_watermark(struct wl1271 *wl)
}
}
-static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
+static struct sk_buff *wl1271_sta_skb_dequeue(struct wl1271 *wl)
{
struct sk_buff *skb = NULL;
unsigned long flags;
@@ -262,12 +383,69 @@ out:
return skb;
}
+static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl)
+{
+ struct sk_buff *skb = NULL;
+ unsigned long flags;
+ int i, h, start_hlid;
+
+ /* start from the link after the last one */
+ start_hlid = (wl->last_tx_hlid + 1) % AP_MAX_LINKS;
+
+ /* dequeue according to AC, round robin on each link */
+ for (i = 0; i < AP_MAX_LINKS; i++) {
+ h = (start_hlid + i) % AP_MAX_LINKS;
+
+ skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_VO]);
+ if (skb)
+ goto out;
+ skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_VI]);
+ if (skb)
+ goto out;
+ skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_BE]);
+ if (skb)
+ goto out;
+ skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_BK]);
+ if (skb)
+ goto out;
+ }
+
+out:
+ if (skb) {
+ wl->last_tx_hlid = h;
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ wl->tx_queue_count--;
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+ } else {
+ wl->last_tx_hlid = 0;
+ }
+
+ return skb;
+}
+
+static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
+{
+ if (wl->bss_type == BSS_TYPE_AP_BSS)
+ return wl1271_ap_skb_dequeue(wl);
+
+ return wl1271_sta_skb_dequeue(wl);
+}
+
static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb)
{
unsigned long flags;
int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
- skb_queue_head(&wl->tx_queue[q], skb);
+ if (wl->bss_type == BSS_TYPE_AP_BSS) {
+ u8 hlid = wl1271_tx_get_hlid(skb);
+ skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
+
+ /* make sure we dequeue the same packet next time */
+ wl->last_tx_hlid = (hlid + AP_MAX_LINKS - 1) % AP_MAX_LINKS;
+ } else {
+ skb_queue_head(&wl->tx_queue[q], skb);
+ }
+
spin_lock_irqsave(&wl->wl_lock, flags);
wl->tx_queue_count++;
spin_unlock_irqrestore(&wl->wl_lock, flags);
@@ -277,35 +455,13 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
{
struct sk_buff *skb;
bool woken_up = false;
- u32 sta_rates = 0;
u32 buf_offset = 0;
bool sent_packets = false;
int ret;
- /* check if the rates supported by the AP have changed */
- if (unlikely(test_and_clear_bit(WL1271_FLAG_STA_RATES_CHANGED,
- &wl->flags))) {
- unsigned long flags;
-
- spin_lock_irqsave(&wl->wl_lock, flags);
- sta_rates = wl->sta_rate_set;
- spin_unlock_irqrestore(&wl->wl_lock, flags);
- }
-
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
- /* if rates have changed, re-configure the rate policy */
- if (unlikely(sta_rates)) {
- ret = wl1271_ps_elp_wakeup(wl, false);
- if (ret < 0)
- goto out;
- woken_up = true;
-
- wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rates);
- wl1271_acx_rate_policies(wl);
- }
-
while ((skb = wl1271_skb_dequeue(wl))) {
if (!woken_up) {
ret = wl1271_ps_elp_wakeup(wl, false);
@@ -352,7 +508,7 @@ out_ack:
if (sent_packets) {
/* interrupt the firmware with the new packets */
wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
- handle_tx_low_watermark(wl);
+ wl1271_handle_tx_low_watermark(wl);
}
out:
@@ -469,32 +625,76 @@ void wl1271_tx_complete(struct wl1271 *wl)
}
}
+void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
+{
+ struct sk_buff *skb;
+ int i, total = 0;
+ unsigned long flags;
+ struct ieee80211_tx_info *info;
+
+ for (i = 0; i < NUM_TX_QUEUES; i++) {
+ while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
+ wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb);
+ info = IEEE80211_SKB_CB(skb);
+ info->status.rates[0].idx = -1;
+ info->status.rates[0].count = 0;
+ ieee80211_tx_status(wl->hw, skb);
+ total++;
+ }
+ }
+
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ wl->tx_queue_count -= total;
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+ wl1271_handle_tx_low_watermark(wl);
+}
+
/* caller must hold wl->mutex */
void wl1271_tx_reset(struct wl1271 *wl)
{
int i;
struct sk_buff *skb;
+ struct ieee80211_tx_info *info;
/* TX failure */
- for (i = 0; i < NUM_TX_QUEUES; i++) {
- while ((skb = skb_dequeue(&wl->tx_queue[i]))) {
- wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
- ieee80211_tx_status(wl->hw, skb);
+ if (wl->bss_type == BSS_TYPE_AP_BSS) {
+ for (i = 0; i < AP_MAX_LINKS; i++) {
+ wl1271_tx_reset_link_queues(wl, i);
+ wl->links[i].allocated_blks = 0;
+ wl->links[i].prev_freed_blks = 0;
+ }
+
+ wl->last_tx_hlid = 0;
+ } else {
+ for (i = 0; i < NUM_TX_QUEUES; i++) {
+ while ((skb = skb_dequeue(&wl->tx_queue[i]))) {
+ wl1271_debug(DEBUG_TX, "freeing skb 0x%p",
+ skb);
+ info = IEEE80211_SKB_CB(skb);
+ info->status.rates[0].idx = -1;
+ info->status.rates[0].count = 0;
+ ieee80211_tx_status(wl->hw, skb);
+ }
}
}
+
wl->tx_queue_count = 0;
/*
* Make sure the driver is at a consistent state, in case this
* function is called from a context other than interface removal.
*/
- handle_tx_low_watermark(wl);
+ wl1271_handle_tx_low_watermark(wl);
for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
if (wl->tx_frames[i] != NULL) {
skb = wl->tx_frames[i];
wl1271_free_tx_id(wl, i);
wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
+ info = IEEE80211_SKB_CB(skb);
+ info->status.rates[0].idx = -1;
+ info->status.rates[0].count = 0;
ieee80211_tx_status(wl->hw, skb);
}
}
@@ -509,8 +709,8 @@ void wl1271_tx_flush(struct wl1271 *wl)
while (!time_after(jiffies, timeout)) {
mutex_lock(&wl->mutex);
- wl1271_debug(DEBUG_TX, "flushing tx buffer: %d",
- wl->tx_frames_cnt);
+ wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d",
+ wl->tx_frames_cnt, wl->tx_queue_count);
if ((wl->tx_frames_cnt == 0) && (wl->tx_queue_count == 0)) {
mutex_unlock(&wl->mutex);
return;
@@ -521,3 +721,21 @@ void wl1271_tx_flush(struct wl1271 *wl)
wl1271_warning("Unable to flush all TX buffers, timed out.");
}
+
+u32 wl1271_tx_min_rate_get(struct wl1271 *wl)
+{
+ int i;
+ u32 rate = 0;
+
+ if (!wl->basic_rate_set) {
+ WARN_ON(1);
+ wl->basic_rate_set = wl->conf.tx.basic_rate;
+ }
+
+ for (i = 0; !rate; i++) {
+ if ((wl->basic_rate_set >> i) & 0x1)
+ rate = 1 << i;
+ }
+
+ return rate;
+}
diff --git a/drivers/net/wireless/wl12xx/tx.h b/drivers/net/wireless/wl12xx/tx.h
index 903e5dc69b7a..02f07fa66e82 100644
--- a/drivers/net/wireless/wl12xx/tx.h
+++ b/drivers/net/wireless/wl12xx/tx.h
@@ -29,6 +29,7 @@
#define TX_HW_BLOCK_SIZE 252
#define TX_HW_MGMT_PKT_LIFETIME_TU 2000
+#define TX_HW_AP_MODE_PKT_LIFETIME_TU 8000
/* The chipset reference driver states, that the "aid" value 1
* is for infra-BSS, but is still always used */
#define TX_HW_DEFAULT_AID 1
@@ -52,8 +53,6 @@
#define TX_HW_RESULT_QUEUE_LEN_MASK 0xf
#define WL1271_TX_ALIGN_TO 4
-#define WL1271_TX_ALIGN(len) (((len) + WL1271_TX_ALIGN_TO - 1) & \
- ~(WL1271_TX_ALIGN_TO - 1))
#define WL1271_TKIP_IV_SPACE 4
struct wl1271_tx_hw_descr {
@@ -77,8 +76,12 @@ struct wl1271_tx_hw_descr {
u8 id;
/* The packet TID value (as User-Priority) */
u8 tid;
- /* Identifier of the remote STA in IBSS, 1 in infra-BSS */
- u8 aid;
+ union {
+ /* STA - Identifier of the remote STA in IBSS, 1 in infra-BSS */
+ u8 aid;
+ /* AP - host link ID (HLID) */
+ u8 hlid;
+ } __packed;
u8 reserved;
} __packed;
@@ -146,5 +149,9 @@ void wl1271_tx_reset(struct wl1271 *wl);
void wl1271_tx_flush(struct wl1271 *wl);
u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set);
+u32 wl1271_tx_min_rate_get(struct wl1271 *wl);
+u8 wl1271_tx_get_hlid(struct sk_buff *skb);
+void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid);
+void wl1271_handle_tx_low_watermark(struct wl1271 *wl);
#endif
diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/wl12xx/wl12xx.h
index 9050dd9b62d2..338acc9f60b3 100644
--- a/drivers/net/wireless/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/wl12xx/wl12xx.h
@@ -38,6 +38,13 @@
#define DRIVER_NAME "wl1271"
#define DRIVER_PREFIX DRIVER_NAME ": "
+/*
+ * FW versions support BA 11n
+ * versions marks x.x.x.50-60.x
+ */
+#define WL12XX_BA_SUPPORT_FW_COST_VER2_START 50
+#define WL12XX_BA_SUPPORT_FW_COST_VER2_END 60
+
enum {
DEBUG_NONE = 0,
DEBUG_IRQ = BIT(0),
@@ -57,6 +64,8 @@ enum {
DEBUG_SDIO = BIT(14),
DEBUG_FILTERS = BIT(15),
DEBUG_ADHOC = BIT(16),
+ DEBUG_AP = BIT(17),
+ DEBUG_MASTER = (DEBUG_ADHOC | DEBUG_AP),
DEBUG_ALL = ~0,
};
@@ -103,16 +112,27 @@ extern u32 wl12xx_debug_level;
true); \
} while (0)
-#define WL1271_DEFAULT_RX_CONFIG (CFG_UNI_FILTER_EN | \
+#define WL1271_DEFAULT_STA_RX_CONFIG (CFG_UNI_FILTER_EN | \
CFG_BSSID_FILTER_EN | \
CFG_MC_FILTER_EN)
-#define WL1271_DEFAULT_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PRSP_EN | \
+#define WL1271_DEFAULT_STA_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PRSP_EN | \
CFG_RX_MGMT_EN | CFG_RX_DATA_EN | \
CFG_RX_CTL_EN | CFG_RX_BCN_EN | \
CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN)
-#define WL1271_FW_NAME "wl1271-fw.bin"
+#define WL1271_DEFAULT_AP_RX_CONFIG 0
+
+#define WL1271_DEFAULT_AP_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PREQ_EN | \
+ CFG_RX_MGMT_EN | CFG_RX_DATA_EN | \
+ CFG_RX_CTL_EN | CFG_RX_AUTH_EN | \
+ CFG_RX_ASSOC_EN)
+
+
+
+#define WL1271_FW_NAME "wl1271-fw-2.bin"
+#define WL1271_AP_FW_NAME "wl1271-fw-ap.bin"
+
#define WL1271_NVS_NAME "wl1271-nvs.bin"
#define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff))
@@ -129,6 +149,25 @@ extern u32 wl12xx_debug_level;
#define WL1271_DEFAULT_BEACON_INT 100
#define WL1271_DEFAULT_DTIM_PERIOD 1
+#define WL1271_AP_GLOBAL_HLID 0
+#define WL1271_AP_BROADCAST_HLID 1
+#define WL1271_AP_STA_HLID_START 2
+
+/*
+ * When in AP-mode, we allow (at least) this number of mem-blocks
+ * to be transmitted to FW for a STA in PS-mode. Only when packets are
+ * present in the FW buffers it will wake the sleeping STA. We want to put
+ * enough packets for the driver to transmit all of its buffered data before
+ * the STA goes to sleep again. But we don't want to take too much mem-blocks
+ * as it might hurt the throughput of active STAs.
+ * The number of blocks (18) is enough for 2 large packets.
+ */
+#define WL1271_PS_STA_MAX_BLOCKS (2 * 9)
+
+#define WL1271_AP_BSS_INDEX 0
+#define WL1271_AP_DEF_INACTIV_SEC 300
+#define WL1271_AP_DEF_BEACON_EXP 20
+
#define ACX_TX_DESCRIPTORS 32
#define WL1271_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
@@ -161,10 +200,13 @@ struct wl1271_partition_set {
struct wl1271;
+#define WL12XX_NUM_FW_VER 5
+
/* FIXME: I'm not sure about this structure name */
struct wl1271_chip {
u32 id;
- char fw_ver[21];
+ char fw_ver_str[ETHTOOL_BUSINFO_LEN];
+ unsigned int fw_ver[WL12XX_NUM_FW_VER];
};
struct wl1271_stats {
@@ -178,8 +220,13 @@ struct wl1271_stats {
#define NUM_TX_QUEUES 4
#define NUM_RX_PKT_DESC 8
-/* FW status registers */
-struct wl1271_fw_status {
+#define AP_MAX_STATIONS 5
+
+/* Broadcast and Global links + links to stations */
+#define AP_MAX_LINKS (AP_MAX_STATIONS + 2)
+
+/* FW status registers common for AP/STA */
+struct wl1271_fw_common_status {
__le32 intr;
u8 fw_rx_counter;
u8 drv_rx_counter;
@@ -188,9 +235,43 @@ struct wl1271_fw_status {
__le32 rx_pkt_descs[NUM_RX_PKT_DESC];
__le32 tx_released_blks[NUM_TX_QUEUES];
__le32 fw_localtime;
- __le32 padding[2];
} __packed;
+/* FW status registers for AP */
+struct wl1271_fw_ap_status {
+ struct wl1271_fw_common_status common;
+
+ /* Next fields valid only in AP FW */
+
+ /*
+ * A bitmap (where each bit represents a single HLID)
+ * to indicate if the station is in PS mode.
+ */
+ __le32 link_ps_bitmap;
+
+ /* Number of freed MBs per HLID */
+ u8 tx_lnk_free_blks[AP_MAX_LINKS];
+ u8 padding_1[1];
+} __packed;
+
+/* FW status registers for STA */
+struct wl1271_fw_sta_status {
+ struct wl1271_fw_common_status common;
+
+ u8 tx_total;
+ u8 reserved1;
+ __le16 reserved2;
+} __packed;
+
+struct wl1271_fw_full_status {
+ union {
+ struct wl1271_fw_common_status common;
+ struct wl1271_fw_sta_status sta;
+ struct wl1271_fw_ap_status ap;
+ };
+} __packed;
+
+
struct wl1271_rx_mem_pool_addr {
u32 addr;
u32 addr_extra;
@@ -218,6 +299,48 @@ struct wl1271_if_operations {
void (*disable_irq)(struct wl1271 *wl);
};
+#define MAX_NUM_KEYS 14
+#define MAX_KEY_SIZE 32
+
+struct wl1271_ap_key {
+ u8 id;
+ u8 key_type;
+ u8 key_size;
+ u8 key[MAX_KEY_SIZE];
+ u8 hlid;
+ u32 tx_seq_32;
+ u16 tx_seq_16;
+};
+
+enum wl12xx_flags {
+ WL1271_FLAG_STA_ASSOCIATED,
+ WL1271_FLAG_JOINED,
+ WL1271_FLAG_GPIO_POWER,
+ WL1271_FLAG_TX_QUEUE_STOPPED,
+ WL1271_FLAG_IN_ELP,
+ WL1271_FLAG_PSM,
+ WL1271_FLAG_PSM_REQUESTED,
+ WL1271_FLAG_IRQ_PENDING,
+ WL1271_FLAG_IRQ_RUNNING,
+ WL1271_FLAG_IDLE,
+ WL1271_FLAG_IDLE_REQUESTED,
+ WL1271_FLAG_PSPOLL_FAILURE,
+ WL1271_FLAG_STA_STATE_SENT,
+ WL1271_FLAG_FW_TX_BUSY,
+ WL1271_FLAG_AP_STARTED
+};
+
+struct wl1271_link {
+ /* AP-mode - TX queue per AC in link */
+ struct sk_buff_head tx_queue[NUM_TX_QUEUES];
+
+ /* accounting for allocated / available TX blocks in FW */
+ u8 allocated_blks;
+ u8 prev_freed_blks;
+
+ u8 addr[ETH_ALEN];
+};
+
struct wl1271 {
struct platform_device *plat_dev;
struct ieee80211_hw *hw;
@@ -236,21 +359,6 @@ struct wl1271 {
enum wl1271_state state;
struct mutex mutex;
-#define WL1271_FLAG_STA_RATES_CHANGED (0)
-#define WL1271_FLAG_STA_ASSOCIATED (1)
-#define WL1271_FLAG_JOINED (2)
-#define WL1271_FLAG_GPIO_POWER (3)
-#define WL1271_FLAG_TX_QUEUE_STOPPED (4)
-#define WL1271_FLAG_IN_ELP (5)
-#define WL1271_FLAG_PSM (6)
-#define WL1271_FLAG_PSM_REQUESTED (7)
-#define WL1271_FLAG_IRQ_PENDING (8)
-#define WL1271_FLAG_IRQ_RUNNING (9)
-#define WL1271_FLAG_IDLE (10)
-#define WL1271_FLAG_IDLE_REQUESTED (11)
-#define WL1271_FLAG_PSPOLL_FAILURE (12)
-#define WL1271_FLAG_STA_STATE_SENT (13)
-#define WL1271_FLAG_FW_TX_BUSY (14)
unsigned long flags;
struct wl1271_partition_set part;
@@ -262,6 +370,7 @@ struct wl1271 {
u8 *fw;
size_t fw_len;
+ u8 fw_bss_type;
struct wl1271_nvs_file *nvs;
size_t nvs_len;
@@ -343,7 +452,6 @@ struct wl1271 {
* bits 16-23 - 802.11n MCS index mask
* support only 1 stream, thus only 8 bits for the MCS rates (0-7).
*/
- u32 sta_rate_set;
u32 basic_rate_set;
u32 basic_rate;
u32 rate_set;
@@ -378,13 +486,12 @@ struct wl1271 {
int last_rssi_event;
struct wl1271_stats stats;
- struct dentry *rootdir;
__le32 buffer_32;
u32 buffer_cmd;
u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
- struct wl1271_fw_status *fw_status;
+ struct wl1271_fw_full_status *fw_status;
struct wl1271_tx_hw_res_if *tx_res_if;
struct ieee80211_vif *vif;
@@ -400,6 +507,38 @@ struct wl1271 {
/* Most recently reported noise in dBm */
s8 noise;
+
+ /* map for HLIDs of associated stations - when operating in AP mode */
+ unsigned long ap_hlid_map[BITS_TO_LONGS(AP_MAX_STATIONS)];
+
+ /* recoreded keys for AP-mode - set here before AP startup */
+ struct wl1271_ap_key *recorded_ap_keys[MAX_NUM_KEYS];
+
+ /* bands supported by this instance of wl12xx */
+ struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+
+ /* RX BA constraint value */
+ bool ba_support;
+ u8 ba_rx_bitmap;
+
+ /*
+ * AP-mode - links indexed by HLID. The global and broadcast links
+ * are always active.
+ */
+ struct wl1271_link links[AP_MAX_LINKS];
+
+ /* the hlid of the link where the last transmitted skb came from */
+ int last_tx_hlid;
+
+ /* AP-mode - a bitmap of links currently in PS mode according to FW */
+ u32 ap_fw_ps_map;
+
+ /* AP-mode - a bitmap of links currently in PS mode in mac80211 */
+ unsigned long ap_ps_map;
+};
+
+struct wl1271_station {
+ u8 hlid;
};
int wl1271_plt_start(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/wl12xx/wl12xx_80211.h
index be21032f4dc1..67dcf8f28cd3 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_80211.h
+++ b/drivers/net/wireless/wl12xx/wl12xx_80211.h
@@ -138,13 +138,13 @@ struct wl12xx_arp_rsp_template {
struct ieee80211_hdr_3addr hdr;
u8 llc_hdr[sizeof(rfc1042_header)];
- u16 llc_type;
+ __be16 llc_type;
struct arphdr arp_hdr;
u8 sender_hw[ETH_ALEN];
- u32 sender_ip;
+ __be32 sender_ip;
u8 target_hw[ETH_ALEN];
- u32 target_ip;
+ __be32 target_ip;
} __packed;
@@ -160,4 +160,9 @@ struct wl12xx_probe_resp_template {
struct wl12xx_ie_country country;
} __packed;
+struct wl12xx_disconn_template {
+ struct ieee80211_header header;
+ __le16 disconn_reason;
+} __packed;
+
#endif
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 6a9b66051cf7..a73a305d3cba 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -108,25 +108,17 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
{
int r;
int i;
- zd_addr_t *a16;
- u16 *v16;
+ zd_addr_t a16[USB_MAX_IOREAD32_COUNT * 2];
+ u16 v16[USB_MAX_IOREAD32_COUNT * 2];
unsigned int count16;
if (count > USB_MAX_IOREAD32_COUNT)
return -EINVAL;
- /* Allocate a single memory block for values and addresses. */
- count16 = 2*count;
- /* zd_addr_t is __nocast, so the kmalloc needs an explicit cast */
- a16 = (zd_addr_t *) kmalloc(count16 * (sizeof(zd_addr_t) + sizeof(u16)),
- GFP_KERNEL);
- if (!a16) {
- dev_dbg_f(zd_chip_dev(chip),
- "error ENOMEM in allocation of a16\n");
- r = -ENOMEM;
- goto out;
- }
- v16 = (u16 *)(a16 + count16);
+ /* Use stack for values and addresses. */
+ count16 = 2 * count;
+ BUG_ON(count16 * sizeof(zd_addr_t) > sizeof(a16));
+ BUG_ON(count16 * sizeof(u16) > sizeof(v16));
for (i = 0; i < count; i++) {
int j = 2*i;
@@ -139,7 +131,7 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
if (r) {
dev_dbg_f(zd_chip_dev(chip),
"error: zd_ioread16v_locked. Error number %d\n", r);
- goto out;
+ return r;
}
for (i = 0; i < count; i++) {
@@ -147,18 +139,19 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
values[i] = (v16[j] << 16) | v16[j+1];
}
-out:
- kfree((void *)a16);
- return r;
+ return 0;
}
-int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
- unsigned int count)
+static int _zd_iowrite32v_async_locked(struct zd_chip *chip,
+ const struct zd_ioreq32 *ioreqs,
+ unsigned int count)
{
int i, j, r;
- struct zd_ioreq16 *ioreqs16;
+ struct zd_ioreq16 ioreqs16[USB_MAX_IOWRITE32_COUNT * 2];
unsigned int count16;
+ /* Use stack for values and addresses. */
+
ZD_ASSERT(mutex_is_locked(&chip->mutex));
if (count == 0)
@@ -166,15 +159,8 @@ int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
if (count > USB_MAX_IOWRITE32_COUNT)
return -EINVAL;
- /* Allocate a single memory block for values and addresses. */
- count16 = 2*count;
- ioreqs16 = kmalloc(count16 * sizeof(struct zd_ioreq16), GFP_KERNEL);
- if (!ioreqs16) {
- r = -ENOMEM;
- dev_dbg_f(zd_chip_dev(chip),
- "error %d in ioreqs16 allocation\n", r);
- goto out;
- }
+ count16 = 2 * count;
+ BUG_ON(count16 * sizeof(struct zd_ioreq16) > sizeof(ioreqs16));
for (i = 0; i < count; i++) {
j = 2*i;
@@ -185,18 +171,30 @@ int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
ioreqs16[j+1].addr = ioreqs[i].addr;
}
- r = zd_usb_iowrite16v(&chip->usb, ioreqs16, count16);
+ r = zd_usb_iowrite16v_async(&chip->usb, ioreqs16, count16);
#ifdef DEBUG
if (r) {
dev_dbg_f(zd_chip_dev(chip),
"error %d in zd_usb_write16v\n", r);
}
#endif /* DEBUG */
-out:
- kfree(ioreqs16);
return r;
}
+int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
+ unsigned int count)
+{
+ int r;
+
+ zd_usb_iowrite16v_async_start(&chip->usb);
+ r = _zd_iowrite32v_async_locked(chip, ioreqs, count);
+ if (r) {
+ zd_usb_iowrite16v_async_end(&chip->usb, 0);
+ return r;
+ }
+ return zd_usb_iowrite16v_async_end(&chip->usb, 50 /* ms */);
+}
+
int zd_iowrite16a_locked(struct zd_chip *chip,
const struct zd_ioreq16 *ioreqs, unsigned int count)
{
@@ -204,6 +202,8 @@ int zd_iowrite16a_locked(struct zd_chip *chip,
unsigned int i, j, t, max;
ZD_ASSERT(mutex_is_locked(&chip->mutex));
+ zd_usb_iowrite16v_async_start(&chip->usb);
+
for (i = 0; i < count; i += j + t) {
t = 0;
max = count-i;
@@ -216,8 +216,9 @@ int zd_iowrite16a_locked(struct zd_chip *chip,
}
}
- r = zd_usb_iowrite16v(&chip->usb, &ioreqs[i], j);
+ r = zd_usb_iowrite16v_async(&chip->usb, &ioreqs[i], j);
if (r) {
+ zd_usb_iowrite16v_async_end(&chip->usb, 0);
dev_dbg_f(zd_chip_dev(chip),
"error zd_usb_iowrite16v. Error number %d\n",
r);
@@ -225,7 +226,7 @@ int zd_iowrite16a_locked(struct zd_chip *chip,
}
}
- return 0;
+ return zd_usb_iowrite16v_async_end(&chip->usb, 50 /* ms */);
}
/* Writes a variable number of 32 bit registers. The functions will split
@@ -238,6 +239,8 @@ int zd_iowrite32a_locked(struct zd_chip *chip,
int r;
unsigned int i, j, t, max;
+ zd_usb_iowrite16v_async_start(&chip->usb);
+
for (i = 0; i < count; i += j + t) {
t = 0;
max = count-i;
@@ -250,8 +253,9 @@ int zd_iowrite32a_locked(struct zd_chip *chip,
}
}
- r = _zd_iowrite32v_locked(chip, &ioreqs[i], j);
+ r = _zd_iowrite32v_async_locked(chip, &ioreqs[i], j);
if (r) {
+ zd_usb_iowrite16v_async_end(&chip->usb, 0);
dev_dbg_f(zd_chip_dev(chip),
"error _zd_iowrite32v_locked."
" Error number %d\n", r);
@@ -259,7 +263,7 @@ int zd_iowrite32a_locked(struct zd_chip *chip,
}
}
- return 0;
+ return zd_usb_iowrite16v_async_end(&chip->usb, 50 /* ms */);
}
int zd_ioread16(struct zd_chip *chip, zd_addr_t addr, u16 *value)
@@ -370,16 +374,12 @@ error:
return r;
}
-/* MAC address: if custom mac addresses are to be used CR_MAC_ADDR_P1 and
- * CR_MAC_ADDR_P2 must be overwritten
- */
-int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
+static int zd_write_mac_addr_common(struct zd_chip *chip, const u8 *mac_addr,
+ const struct zd_ioreq32 *in_reqs,
+ const char *type)
{
int r;
- struct zd_ioreq32 reqs[2] = {
- [0] = { .addr = CR_MAC_ADDR_P1 },
- [1] = { .addr = CR_MAC_ADDR_P2 },
- };
+ struct zd_ioreq32 reqs[2] = {in_reqs[0], in_reqs[1]};
if (mac_addr) {
reqs[0].value = (mac_addr[3] << 24)
@@ -388,9 +388,9 @@ int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
| mac_addr[0];
reqs[1].value = (mac_addr[5] << 8)
| mac_addr[4];
- dev_dbg_f(zd_chip_dev(chip), "mac addr %pM\n", mac_addr);
+ dev_dbg_f(zd_chip_dev(chip), "%s addr %pM\n", type, mac_addr);
} else {
- dev_dbg_f(zd_chip_dev(chip), "set NULL mac\n");
+ dev_dbg_f(zd_chip_dev(chip), "set NULL %s\n", type);
}
mutex_lock(&chip->mutex);
@@ -399,6 +399,29 @@ int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
return r;
}
+/* MAC address: if custom mac addresses are to be used CR_MAC_ADDR_P1 and
+ * CR_MAC_ADDR_P2 must be overwritten
+ */
+int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
+{
+ static const struct zd_ioreq32 reqs[2] = {
+ [0] = { .addr = CR_MAC_ADDR_P1 },
+ [1] = { .addr = CR_MAC_ADDR_P2 },
+ };
+
+ return zd_write_mac_addr_common(chip, mac_addr, reqs, "mac");
+}
+
+int zd_write_bssid(struct zd_chip *chip, const u8 *bssid)
+{
+ static const struct zd_ioreq32 reqs[2] = {
+ [0] = { .addr = CR_BSSID_P1 },
+ [1] = { .addr = CR_BSSID_P2 },
+ };
+
+ return zd_write_mac_addr_common(chip, bssid, reqs, "bssid");
+}
+
int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain)
{
int r;
@@ -849,11 +872,12 @@ static int get_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s)
static int set_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s)
{
struct zd_ioreq32 reqs[3];
+ u16 b_interval = s->beacon_interval & 0xffff;
- if (s->beacon_interval <= 5)
- s->beacon_interval = 5;
- if (s->pre_tbtt < 4 || s->pre_tbtt >= s->beacon_interval)
- s->pre_tbtt = s->beacon_interval - 1;
+ if (b_interval <= 5)
+ b_interval = 5;
+ if (s->pre_tbtt < 4 || s->pre_tbtt >= b_interval)
+ s->pre_tbtt = b_interval - 1;
if (s->atim_wnd_period >= s->pre_tbtt)
s->atim_wnd_period = s->pre_tbtt - 1;
@@ -862,31 +886,57 @@ static int set_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s)
reqs[1].addr = CR_PRE_TBTT;
reqs[1].value = s->pre_tbtt;
reqs[2].addr = CR_BCN_INTERVAL;
- reqs[2].value = s->beacon_interval;
+ reqs[2].value = (s->beacon_interval & ~0xffff) | b_interval;
return zd_iowrite32a_locked(chip, reqs, ARRAY_SIZE(reqs));
}
-static int set_beacon_interval(struct zd_chip *chip, u32 interval)
+static int set_beacon_interval(struct zd_chip *chip, u16 interval,
+ u8 dtim_period, int type)
{
int r;
struct aw_pt_bi s;
+ u32 b_interval, mode_flag;
ZD_ASSERT(mutex_is_locked(&chip->mutex));
+
+ if (interval > 0) {
+ switch (type) {
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_MESH_POINT:
+ mode_flag = BCN_MODE_IBSS;
+ break;
+ case NL80211_IFTYPE_AP:
+ mode_flag = BCN_MODE_AP;
+ break;
+ default:
+ mode_flag = 0;
+ break;
+ }
+ } else {
+ dtim_period = 0;
+ mode_flag = 0;
+ }
+
+ b_interval = mode_flag | (dtim_period << 16) | interval;
+
+ r = zd_iowrite32_locked(chip, b_interval, CR_BCN_INTERVAL);
+ if (r)
+ return r;
r = get_aw_pt_bi(chip, &s);
if (r)
return r;
- s.beacon_interval = interval;
return set_aw_pt_bi(chip, &s);
}
-int zd_set_beacon_interval(struct zd_chip *chip, u32 interval)
+int zd_set_beacon_interval(struct zd_chip *chip, u16 interval, u8 dtim_period,
+ int type)
{
int r;
mutex_lock(&chip->mutex);
- r = set_beacon_interval(chip, interval);
+ r = set_beacon_interval(chip, interval, dtim_period, type);
mutex_unlock(&chip->mutex);
return r;
}
@@ -905,7 +955,7 @@ static int hw_init(struct zd_chip *chip)
if (r)
return r;
- return set_beacon_interval(chip, 100);
+ return set_beacon_interval(chip, 100, 0, NL80211_IFTYPE_UNSPECIFIED);
}
static zd_addr_t fw_reg_addr(struct zd_chip *chip, u16 offset)
@@ -1407,6 +1457,9 @@ void zd_chip_disable_int(struct zd_chip *chip)
mutex_lock(&chip->mutex);
zd_usb_disable_int(&chip->usb);
mutex_unlock(&chip->mutex);
+
+ /* cancel pending interrupt work */
+ cancel_work_sync(&zd_chip_to_mac(chip)->process_intr);
}
int zd_chip_enable_rxtx(struct zd_chip *chip)
@@ -1416,6 +1469,7 @@ int zd_chip_enable_rxtx(struct zd_chip *chip)
mutex_lock(&chip->mutex);
zd_usb_enable_tx(&chip->usb);
r = zd_usb_enable_rx(&chip->usb);
+ zd_tx_watchdog_enable(&chip->usb);
mutex_unlock(&chip->mutex);
return r;
}
@@ -1423,6 +1477,7 @@ int zd_chip_enable_rxtx(struct zd_chip *chip)
void zd_chip_disable_rxtx(struct zd_chip *chip)
{
mutex_lock(&chip->mutex);
+ zd_tx_watchdog_disable(&chip->usb);
zd_usb_disable_rx(&chip->usb);
zd_usb_disable_tx(&chip->usb);
mutex_unlock(&chip->mutex);
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index f8bbf7d302ae..14e4402a6111 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -546,6 +546,7 @@ enum {
#define RX_FILTER_CTRL (RX_FILTER_RTS | RX_FILTER_CTS | \
RX_FILTER_CFEND | RX_FILTER_CFACK)
+#define BCN_MODE_AP 0x1000000
#define BCN_MODE_IBSS 0x2000000
/* Monitor mode sets filter to 0xfffff */
@@ -881,6 +882,7 @@ static inline u8 _zd_chip_get_channel(struct zd_chip *chip)
u8 zd_chip_get_channel(struct zd_chip *chip);
int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain);
int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr);
+int zd_write_bssid(struct zd_chip *chip, const u8 *bssid);
int zd_chip_switch_radio_on(struct zd_chip *chip);
int zd_chip_switch_radio_off(struct zd_chip *chip);
int zd_chip_enable_int(struct zd_chip *chip);
@@ -920,7 +922,8 @@ enum led_status {
int zd_chip_control_leds(struct zd_chip *chip, enum led_status status);
-int zd_set_beacon_interval(struct zd_chip *chip, u32 interval);
+int zd_set_beacon_interval(struct zd_chip *chip, u16 interval, u8 dtim_period,
+ int type);
static inline int zd_get_beacon_interval(struct zd_chip *chip, u32 *interval)
{
diff --git a/drivers/net/wireless/zd1211rw/zd_def.h b/drivers/net/wireless/zd1211rw/zd_def.h
index 6ac597ffd3b9..5463ca9ebc01 100644
--- a/drivers/net/wireless/zd1211rw/zd_def.h
+++ b/drivers/net/wireless/zd1211rw/zd_def.h
@@ -45,7 +45,7 @@ typedef u16 __nocast zd_addr_t;
#ifdef DEBUG
# define ZD_ASSERT(x) \
do { \
- if (!(x)) { \
+ if (unlikely(!(x))) { \
pr_debug("%s:%d ASSERT %s VIOLATED!\n", \
__FILE__, __LINE__, __stringify(x)); \
dump_stack(); \
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 6107304cb94c..5037c8b2b415 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -138,6 +138,12 @@ static const struct ieee80211_channel zd_channels[] = {
static void housekeeping_init(struct zd_mac *mac);
static void housekeeping_enable(struct zd_mac *mac);
static void housekeeping_disable(struct zd_mac *mac);
+static void beacon_init(struct zd_mac *mac);
+static void beacon_enable(struct zd_mac *mac);
+static void beacon_disable(struct zd_mac *mac);
+static void set_rts_cts(struct zd_mac *mac, unsigned int short_preamble);
+static int zd_mac_config_beacon(struct ieee80211_hw *hw,
+ struct sk_buff *beacon);
static int zd_reg2alpha2(u8 regdomain, char *alpha2)
{
@@ -231,6 +237,26 @@ static int set_rx_filter(struct zd_mac *mac)
return zd_iowrite32(&mac->chip, CR_RX_FILTER, filter);
}
+static int set_mac_and_bssid(struct zd_mac *mac)
+{
+ int r;
+
+ if (!mac->vif)
+ return -1;
+
+ r = zd_write_mac_addr(&mac->chip, mac->vif->addr);
+ if (r)
+ return r;
+
+ /* Vendor driver after setting MAC either sets BSSID for AP or
+ * filter for other modes.
+ */
+ if (mac->type != NL80211_IFTYPE_AP)
+ return set_rx_filter(mac);
+ else
+ return zd_write_bssid(&mac->chip, mac->vif->addr);
+}
+
static int set_mc_hash(struct zd_mac *mac)
{
struct zd_mc_hash hash;
@@ -238,7 +264,7 @@ static int set_mc_hash(struct zd_mac *mac)
return zd_chip_set_multicast_hash(&mac->chip, &hash);
}
-static int zd_op_start(struct ieee80211_hw *hw)
+int zd_op_start(struct ieee80211_hw *hw)
{
struct zd_mac *mac = zd_hw_mac(hw);
struct zd_chip *chip = &mac->chip;
@@ -275,6 +301,8 @@ static int zd_op_start(struct ieee80211_hw *hw)
goto disable_rxtx;
housekeeping_enable(mac);
+ beacon_enable(mac);
+ set_bit(ZD_DEVICE_RUNNING, &mac->flags);
return 0;
disable_rxtx:
zd_chip_disable_rxtx(chip);
@@ -286,19 +314,22 @@ out:
return r;
}
-static void zd_op_stop(struct ieee80211_hw *hw)
+void zd_op_stop(struct ieee80211_hw *hw)
{
struct zd_mac *mac = zd_hw_mac(hw);
struct zd_chip *chip = &mac->chip;
struct sk_buff *skb;
struct sk_buff_head *ack_wait_queue = &mac->ack_wait_queue;
+ clear_bit(ZD_DEVICE_RUNNING, &mac->flags);
+
/* The order here deliberately is a little different from the open()
* method, since we need to make sure there is no opportunity for RX
* frames to be processed by mac80211 after we have stopped it.
*/
zd_chip_disable_rxtx(chip);
+ beacon_disable(mac);
housekeeping_disable(mac);
flush_workqueue(zd_workqueue);
@@ -311,6 +342,68 @@ static void zd_op_stop(struct ieee80211_hw *hw)
dev_kfree_skb_any(skb);
}
+int zd_restore_settings(struct zd_mac *mac)
+{
+ struct sk_buff *beacon;
+ struct zd_mc_hash multicast_hash;
+ unsigned int short_preamble;
+ int r, beacon_interval, beacon_period;
+ u8 channel;
+
+ dev_dbg_f(zd_mac_dev(mac), "\n");
+
+ spin_lock_irq(&mac->lock);
+ multicast_hash = mac->multicast_hash;
+ short_preamble = mac->short_preamble;
+ beacon_interval = mac->beacon.interval;
+ beacon_period = mac->beacon.period;
+ channel = mac->channel;
+ spin_unlock_irq(&mac->lock);
+
+ r = set_mac_and_bssid(mac);
+ if (r < 0) {
+ dev_dbg_f(zd_mac_dev(mac), "set_mac_and_bssid failed, %d\n", r);
+ return r;
+ }
+
+ r = zd_chip_set_channel(&mac->chip, channel);
+ if (r < 0) {
+ dev_dbg_f(zd_mac_dev(mac), "zd_chip_set_channel failed, %d\n",
+ r);
+ return r;
+ }
+
+ set_rts_cts(mac, short_preamble);
+
+ r = zd_chip_set_multicast_hash(&mac->chip, &multicast_hash);
+ if (r < 0) {
+ dev_dbg_f(zd_mac_dev(mac),
+ "zd_chip_set_multicast_hash failed, %d\n", r);
+ return r;
+ }
+
+ if (mac->type == NL80211_IFTYPE_MESH_POINT ||
+ mac->type == NL80211_IFTYPE_ADHOC ||
+ mac->type == NL80211_IFTYPE_AP) {
+ if (mac->vif != NULL) {
+ beacon = ieee80211_beacon_get(mac->hw, mac->vif);
+ if (beacon) {
+ zd_mac_config_beacon(mac->hw, beacon);
+ kfree_skb(beacon);
+ }
+ }
+
+ zd_set_beacon_interval(&mac->chip, beacon_interval,
+ beacon_period, mac->type);
+
+ spin_lock_irq(&mac->lock);
+ mac->beacon.last_update = jiffies;
+ spin_unlock_irq(&mac->lock);
+ }
+
+ return 0;
+}
+
/**
* zd_mac_tx_status - reports tx status of a packet if required
* @hw - a &struct ieee80211_hw pointer
@@ -574,64 +667,120 @@ static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs,
static int zd_mac_config_beacon(struct ieee80211_hw *hw, struct sk_buff *beacon)
{
struct zd_mac *mac = zd_hw_mac(hw);
- int r;
+ int r, ret, num_cmds, req_pos = 0;
u32 tmp, j = 0;
/* 4 more bytes for tail CRC */
u32 full_len = beacon->len + 4;
+ unsigned long end_jiffies, message_jiffies;
+ struct zd_ioreq32 *ioreqs;
- r = zd_iowrite32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, 0);
+ /* Alloc memory for full beacon write at once. */
+ num_cmds = 1 + zd_chip_is_zd1211b(&mac->chip) + full_len;
+ ioreqs = kmalloc(num_cmds * sizeof(struct zd_ioreq32), GFP_KERNEL);
+ if (!ioreqs)
+ return -ENOMEM;
+
+ mutex_lock(&mac->chip.mutex);
+
+ r = zd_iowrite32_locked(&mac->chip, 0, CR_BCN_FIFO_SEMAPHORE);
if (r < 0)
- return r;
- r = zd_ioread32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, &tmp);
+ goto out;
+ r = zd_ioread32_locked(&mac->chip, &tmp, CR_BCN_FIFO_SEMAPHORE);
if (r < 0)
- return r;
+ goto release_sema;
+ end_jiffies = jiffies + HZ / 2; /*~500ms*/
+ message_jiffies = jiffies + HZ / 10; /*~100ms*/
while (tmp & 0x2) {
- r = zd_ioread32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, &tmp);
+ r = zd_ioread32_locked(&mac->chip, &tmp, CR_BCN_FIFO_SEMAPHORE);
if (r < 0)
- return r;
- if ((++j % 100) == 0) {
- printk(KERN_ERR "CR_BCN_FIFO_SEMAPHORE not ready\n");
- if (j >= 500) {
- printk(KERN_ERR "Giving up beacon config.\n");
- return -ETIMEDOUT;
+ goto release_sema;
+ if (time_is_before_eq_jiffies(message_jiffies)) {
+ message_jiffies = jiffies + HZ / 10;
+ dev_err(zd_mac_dev(mac),
+ "CR_BCN_FIFO_SEMAPHORE not ready\n");
+ if (time_is_before_eq_jiffies(end_jiffies)) {
+ dev_err(zd_mac_dev(mac),
+ "Giving up beacon config.\n");
+ r = -ETIMEDOUT;
+ goto reset_device;
}
}
- msleep(1);
+ msleep(20);
}
- r = zd_iowrite32(&mac->chip, CR_BCN_FIFO, full_len - 1);
- if (r < 0)
- return r;
+ ioreqs[req_pos].addr = CR_BCN_FIFO;
+ ioreqs[req_pos].value = full_len - 1;
+ req_pos++;
if (zd_chip_is_zd1211b(&mac->chip)) {
- r = zd_iowrite32(&mac->chip, CR_BCN_LENGTH, full_len - 1);
- if (r < 0)
- return r;
+ ioreqs[req_pos].addr = CR_BCN_LENGTH;
+ ioreqs[req_pos].value = full_len - 1;
+ req_pos++;
}
for (j = 0 ; j < beacon->len; j++) {
- r = zd_iowrite32(&mac->chip, CR_BCN_FIFO,
- *((u8 *)(beacon->data + j)));
- if (r < 0)
- return r;
+ ioreqs[req_pos].addr = CR_BCN_FIFO;
+ ioreqs[req_pos].value = *((u8 *)(beacon->data + j));
+ req_pos++;
}
for (j = 0; j < 4; j++) {
- r = zd_iowrite32(&mac->chip, CR_BCN_FIFO, 0x0);
- if (r < 0)
- return r;
+ ioreqs[req_pos].addr = CR_BCN_FIFO;
+ ioreqs[req_pos].value = 0x0;
+ req_pos++;
}
- r = zd_iowrite32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, 1);
- if (r < 0)
- return r;
+ BUG_ON(req_pos != num_cmds);
+
+ r = zd_iowrite32a_locked(&mac->chip, ioreqs, num_cmds);
+
+release_sema:
+ /*
+ * Try very hard to release device beacon semaphore, as otherwise
+ * device/driver can be left in unusable state.
+ */
+ end_jiffies = jiffies + HZ / 2; /*~500ms*/
+ ret = zd_iowrite32_locked(&mac->chip, 1, CR_BCN_FIFO_SEMAPHORE);
+ while (ret < 0) {
+ if (time_is_before_eq_jiffies(end_jiffies)) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+
+ msleep(20);
+ ret = zd_iowrite32_locked(&mac->chip, 1, CR_BCN_FIFO_SEMAPHORE);
+ }
+
+ if (ret < 0)
+ dev_err(zd_mac_dev(mac), "Could not release "
+ "CR_BCN_FIFO_SEMAPHORE!\n");
+ if (r < 0 || ret < 0) {
+ if (r >= 0)
+ r = ret;
+ goto out;
+ }
/* 802.11b/g 2.4G CCK 1Mb
* 802.11a, not yet implemented, uses different values (see GPL vendor
* driver)
*/
- return zd_iowrite32(&mac->chip, CR_BCN_PLCP_CFG, 0x00000400 |
- (full_len << 19));
+ r = zd_iowrite32_locked(&mac->chip, 0x00000400 | (full_len << 19),
+ CR_BCN_PLCP_CFG);
+out:
+ mutex_unlock(&mac->chip.mutex);
+ kfree(ioreqs);
+ return r;
+
+reset_device:
+ mutex_unlock(&mac->chip.mutex);
+ kfree(ioreqs);
+
+ /* semaphore stuck, reset device to avoid fw freeze later */
+ dev_warn(zd_mac_dev(mac), "CR_BCN_FIFO_SEMAPHORE stuck, "
+ "reseting device...");
+ usb_queue_reset_device(mac->chip.usb.intf);
+
+ return r;
}
static int fill_ctrlset(struct zd_mac *mac,
@@ -701,7 +850,7 @@ static int fill_ctrlset(struct zd_mac *mac,
* control block of the skbuff will be initialized. If necessary the incoming
* mac80211 queues will be stopped.
*/
-static int zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct zd_mac *mac = zd_hw_mac(hw);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -716,11 +865,10 @@ static int zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
r = zd_usb_tx(&mac->chip.usb, skb);
if (r)
goto fail;
- return 0;
+ return;
fail:
dev_kfree_skb(skb);
- return 0;
}
/**
@@ -779,6 +927,13 @@ static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
mac->ack_pending = 1;
mac->ack_signal = stats->signal;
+
+ /* Prevent pending tx-packet on AP-mode */
+ if (mac->type == NL80211_IFTYPE_AP) {
+ skb = __skb_dequeue(q);
+ zd_mac_tx_status(hw, skb, mac->ack_signal, NULL);
+ mac->ack_pending = 0;
+ }
}
spin_unlock_irqrestore(&q->lock, flags);
@@ -882,13 +1037,16 @@ static int zd_op_add_interface(struct ieee80211_hw *hw,
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_AP:
mac->type = vif->type;
break;
default:
return -EOPNOTSUPP;
}
- return zd_write_mac_addr(&mac->chip, vif->addr);
+ mac->vif = vif;
+
+ return set_mac_and_bssid(mac);
}
static void zd_op_remove_interface(struct ieee80211_hw *hw,
@@ -896,7 +1054,8 @@ static void zd_op_remove_interface(struct ieee80211_hw *hw,
{
struct zd_mac *mac = zd_hw_mac(hw);
mac->type = NL80211_IFTYPE_UNSPECIFIED;
- zd_set_beacon_interval(&mac->chip, 0);
+ mac->vif = NULL;
+ zd_set_beacon_interval(&mac->chip, 0, 0, NL80211_IFTYPE_UNSPECIFIED);
zd_write_mac_addr(&mac->chip, NULL);
}
@@ -905,49 +1064,67 @@ static int zd_op_config(struct ieee80211_hw *hw, u32 changed)
struct zd_mac *mac = zd_hw_mac(hw);
struct ieee80211_conf *conf = &hw->conf;
+ spin_lock_irq(&mac->lock);
+ mac->channel = conf->channel->hw_value;
+ spin_unlock_irq(&mac->lock);
+
return zd_chip_set_channel(&mac->chip, conf->channel->hw_value);
}
-static void zd_process_intr(struct work_struct *work)
+static void zd_beacon_done(struct zd_mac *mac)
{
- u16 int_status;
- struct zd_mac *mac = container_of(work, struct zd_mac, process_intr);
+ struct sk_buff *skb, *beacon;
- int_status = le16_to_cpu(*(__le16 *)(mac->intr_buffer+4));
- if (int_status & INT_CFG_NEXT_BCN)
- dev_dbg_f_limit(zd_mac_dev(mac), "INT_CFG_NEXT_BCN\n");
- else
- dev_dbg_f(zd_mac_dev(mac), "Unsupported interrupt\n");
-
- zd_chip_enable_hwint(&mac->chip);
-}
+ if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
+ return;
+ if (!mac->vif || mac->vif->type != NL80211_IFTYPE_AP)
+ return;
+ /*
+ * Send out buffered broad- and multicast frames.
+ */
+ while (!ieee80211_queue_stopped(mac->hw, 0)) {
+ skb = ieee80211_get_buffered_bc(mac->hw, mac->vif);
+ if (!skb)
+ break;
+ zd_op_tx(mac->hw, skb);
+ }
-static void set_multicast_hash_handler(struct work_struct *work)
-{
- struct zd_mac *mac =
- container_of(work, struct zd_mac, set_multicast_hash_work);
- struct zd_mc_hash hash;
+ /*
+ * Fetch next beacon so that tim_count is updated.
+ */
+ beacon = ieee80211_beacon_get(mac->hw, mac->vif);
+ if (beacon) {
+ zd_mac_config_beacon(mac->hw, beacon);
+ kfree_skb(beacon);
+ }
spin_lock_irq(&mac->lock);
- hash = mac->multicast_hash;
+ mac->beacon.last_update = jiffies;
spin_unlock_irq(&mac->lock);
-
- zd_chip_set_multicast_hash(&mac->chip, &hash);
}
-static void set_rx_filter_handler(struct work_struct *work)
+static void zd_process_intr(struct work_struct *work)
{
- struct zd_mac *mac =
- container_of(work, struct zd_mac, set_rx_filter_work);
- int r;
+ u16 int_status;
+ unsigned long flags;
+ struct zd_mac *mac = container_of(work, struct zd_mac, process_intr);
- dev_dbg_f(zd_mac_dev(mac), "\n");
- r = set_rx_filter(mac);
- if (r)
- dev_err(zd_mac_dev(mac), "set_rx_filter_handler error %d\n", r);
+ spin_lock_irqsave(&mac->lock, flags);
+ int_status = le16_to_cpu(*(__le16 *)(mac->intr_buffer + 4));
+ spin_unlock_irqrestore(&mac->lock, flags);
+
+ if (int_status & INT_CFG_NEXT_BCN) {
+ /*dev_dbg_f_limit(zd_mac_dev(mac), "INT_CFG_NEXT_BCN\n");*/
+ zd_beacon_done(mac);
+ } else {
+ dev_dbg_f(zd_mac_dev(mac), "Unsupported interrupt\n");
+ }
+
+ zd_chip_enable_hwint(&mac->chip);
}
+
static u64 zd_op_prepare_multicast(struct ieee80211_hw *hw,
struct netdev_hw_addr_list *mc_list)
{
@@ -979,6 +1156,7 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw,
};
struct zd_mac *mac = zd_hw_mac(hw);
unsigned long flags;
+ int r;
/* Only deal with supported flags */
changed_flags &= SUPPORTED_FIF_FLAGS;
@@ -1000,11 +1178,13 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw,
mac->multicast_hash = hash;
spin_unlock_irqrestore(&mac->lock, flags);
- /* XXX: these can be called here now, can sleep now! */
- queue_work(zd_workqueue, &mac->set_multicast_hash_work);
+ zd_chip_set_multicast_hash(&mac->chip, &hash);
- if (changed_flags & FIF_CONTROL)
- queue_work(zd_workqueue, &mac->set_rx_filter_work);
+ if (changed_flags & FIF_CONTROL) {
+ r = set_rx_filter(mac);
+ if (r)
+ dev_err(zd_mac_dev(mac), "set_rx_filter error %d\n", r);
+ }
/* no handling required for FIF_OTHER_BSS as we don't currently
* do BSSID filtering */
@@ -1016,20 +1196,9 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw,
* time. */
}
-static void set_rts_cts_work(struct work_struct *work)
+static void set_rts_cts(struct zd_mac *mac, unsigned int short_preamble)
{
- struct zd_mac *mac =
- container_of(work, struct zd_mac, set_rts_cts_work);
- unsigned long flags;
- unsigned int short_preamble;
-
mutex_lock(&mac->chip.mutex);
-
- spin_lock_irqsave(&mac->lock, flags);
- mac->updating_rts_rate = 0;
- short_preamble = mac->short_preamble;
- spin_unlock_irqrestore(&mac->lock, flags);
-
zd_chip_set_rts_cts_rate_locked(&mac->chip, short_preamble);
mutex_unlock(&mac->chip.mutex);
}
@@ -1040,33 +1209,42 @@ static void zd_op_bss_info_changed(struct ieee80211_hw *hw,
u32 changes)
{
struct zd_mac *mac = zd_hw_mac(hw);
- unsigned long flags;
int associated;
dev_dbg_f(zd_mac_dev(mac), "changes: %x\n", changes);
if (mac->type == NL80211_IFTYPE_MESH_POINT ||
- mac->type == NL80211_IFTYPE_ADHOC) {
+ mac->type == NL80211_IFTYPE_ADHOC ||
+ mac->type == NL80211_IFTYPE_AP) {
associated = true;
if (changes & BSS_CHANGED_BEACON) {
struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
if (beacon) {
+ zd_chip_disable_hwint(&mac->chip);
zd_mac_config_beacon(hw, beacon);
+ zd_chip_enable_hwint(&mac->chip);
kfree_skb(beacon);
}
}
if (changes & BSS_CHANGED_BEACON_ENABLED) {
- u32 interval;
+ u16 interval = 0;
+ u8 period = 0;
- if (bss_conf->enable_beacon)
- interval = BCN_MODE_IBSS |
- bss_conf->beacon_int;
- else
- interval = 0;
+ if (bss_conf->enable_beacon) {
+ period = bss_conf->dtim_period;
+ interval = bss_conf->beacon_int;
+ }
- zd_set_beacon_interval(&mac->chip, interval);
+ spin_lock_irq(&mac->lock);
+ mac->beacon.period = period;
+ mac->beacon.interval = interval;
+ mac->beacon.last_update = jiffies;
+ spin_unlock_irq(&mac->lock);
+
+ zd_set_beacon_interval(&mac->chip, interval, period,
+ mac->type);
}
} else
associated = is_valid_ether_addr(bss_conf->bssid);
@@ -1078,15 +1256,11 @@ static void zd_op_bss_info_changed(struct ieee80211_hw *hw,
/* TODO: do hardware bssid filtering */
if (changes & BSS_CHANGED_ERP_PREAMBLE) {
- spin_lock_irqsave(&mac->lock, flags);
+ spin_lock_irq(&mac->lock);
mac->short_preamble = bss_conf->use_short_preamble;
- if (!mac->updating_rts_rate) {
- mac->updating_rts_rate = 1;
- /* FIXME: should disable TX here, until work has
- * completed and RTS_CTS reg is updated */
- queue_work(zd_workqueue, &mac->set_rts_cts_work);
- }
- spin_unlock_irqrestore(&mac->lock, flags);
+ spin_unlock_irq(&mac->lock);
+
+ set_rts_cts(mac, bss_conf->use_short_preamble);
}
}
@@ -1138,12 +1312,14 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &mac->band;
hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_SIGNAL_UNSPEC;
+ IEEE80211_HW_SIGNAL_UNSPEC |
+ IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_MESH_POINT) |
BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC);
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_AP);
hw->max_signal = 100;
hw->queues = 1;
@@ -1160,15 +1336,82 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
zd_chip_init(&mac->chip, hw, intf);
housekeeping_init(mac);
- INIT_WORK(&mac->set_multicast_hash_work, set_multicast_hash_handler);
- INIT_WORK(&mac->set_rts_cts_work, set_rts_cts_work);
- INIT_WORK(&mac->set_rx_filter_work, set_rx_filter_handler);
+ beacon_init(mac);
INIT_WORK(&mac->process_intr, zd_process_intr);
SET_IEEE80211_DEV(hw, &intf->dev);
return hw;
}
+#define BEACON_WATCHDOG_DELAY round_jiffies_relative(HZ)
+
+static void beacon_watchdog_handler(struct work_struct *work)
+{
+ struct zd_mac *mac =
+ container_of(work, struct zd_mac, beacon.watchdog_work.work);
+ struct sk_buff *beacon;
+ unsigned long timeout;
+ int interval, period;
+
+ if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
+ goto rearm;
+ if (mac->type != NL80211_IFTYPE_AP || !mac->vif)
+ goto rearm;
+
+ spin_lock_irq(&mac->lock);
+ interval = mac->beacon.interval;
+ period = mac->beacon.period;
+ timeout = mac->beacon.last_update + msecs_to_jiffies(interval) + HZ;
+ spin_unlock_irq(&mac->lock);
+
+ if (interval > 0 && time_is_before_jiffies(timeout)) {
+ dev_dbg_f(zd_mac_dev(mac), "beacon interrupt stalled, "
+ "restarting. "
+ "(interval: %d, dtim: %d)\n",
+ interval, period);
+
+ zd_chip_disable_hwint(&mac->chip);
+
+ beacon = ieee80211_beacon_get(mac->hw, mac->vif);
+ if (beacon) {
+ zd_mac_config_beacon(mac->hw, beacon);
+ kfree_skb(beacon);
+ }
+
+ zd_set_beacon_interval(&mac->chip, interval, period, mac->type);
+
+ zd_chip_enable_hwint(&mac->chip);
+
+ spin_lock_irq(&mac->lock);
+ mac->beacon.last_update = jiffies;
+ spin_unlock_irq(&mac->lock);
+ }
+
+rearm:
+ queue_delayed_work(zd_workqueue, &mac->beacon.watchdog_work,
+ BEACON_WATCHDOG_DELAY);
+}
+
+static void beacon_init(struct zd_mac *mac)
+{
+ INIT_DELAYED_WORK(&mac->beacon.watchdog_work, beacon_watchdog_handler);
+}
+
+static void beacon_enable(struct zd_mac *mac)
+{
+ dev_dbg_f(zd_mac_dev(mac), "\n");
+
+ mac->beacon.last_update = jiffies;
+ queue_delayed_work(zd_workqueue, &mac->beacon.watchdog_work,
+ BEACON_WATCHDOG_DELAY);
+}
+
+static void beacon_disable(struct zd_mac *mac)
+{
+ dev_dbg_f(zd_mac_dev(mac), "\n");
+ cancel_delayed_work_sync(&mac->beacon.watchdog_work);
+}
+
#define LINK_LED_WORK_DELAY HZ
static void link_led_handler(struct work_struct *work)
@@ -1179,6 +1422,9 @@ static void link_led_handler(struct work_struct *work)
int is_associated;
int r;
+ if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
+ goto requeue;
+
spin_lock_irq(&mac->lock);
is_associated = mac->associated;
spin_unlock_irq(&mac->lock);
@@ -1188,6 +1434,7 @@ static void link_led_handler(struct work_struct *work)
if (r)
dev_dbg_f(zd_mac_dev(mac), "zd_chip_control_leds error %d\n", r);
+requeue:
queue_delayed_work(zd_workqueue, &mac->housekeeping.link_led_work,
LINK_LED_WORK_DELAY);
}
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index a6d86b996c79..f8c93c3fe755 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -163,6 +163,17 @@ struct housekeeping {
struct delayed_work link_led_work;
};
+struct beacon {
+ struct delayed_work watchdog_work;
+ unsigned long last_update;
+ u16 interval;
+ u8 period;
+};
+
+enum zd_device_flags {
+ ZD_DEVICE_RUNNING,
+};
+
#define ZD_MAC_STATS_BUFFER_SIZE 16
#define ZD_MAC_MAX_ACK_WAITERS 50
@@ -172,17 +183,19 @@ struct zd_mac {
spinlock_t lock;
spinlock_t intr_lock;
struct ieee80211_hw *hw;
+ struct ieee80211_vif *vif;
struct housekeeping housekeeping;
- struct work_struct set_multicast_hash_work;
+ struct beacon beacon;
struct work_struct set_rts_cts_work;
- struct work_struct set_rx_filter_work;
struct work_struct process_intr;
struct zd_mc_hash multicast_hash;
u8 intr_buffer[USB_MAX_EP_INT_BUFFER];
u8 regdomain;
u8 default_regdomain;
+ u8 channel;
int type;
int associated;
+ unsigned long flags;
struct sk_buff_head ack_wait_queue;
struct ieee80211_channel channels[14];
struct ieee80211_rate rates[12];
@@ -191,9 +204,6 @@ struct zd_mac {
/* Short preamble (used for RTS/CTS) */
unsigned int short_preamble:1;
- /* flags to indicate update in progress */
- unsigned int updating_rts_rate:1;
-
/* whether to pass frames with CRC errors to stack */
unsigned int pass_failed_fcs:1;
@@ -304,6 +314,10 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length);
void zd_mac_tx_failed(struct urb *urb);
void zd_mac_tx_to_dev(struct sk_buff *skb, int error);
+int zd_op_start(struct ieee80211_hw *hw);
+void zd_op_stop(struct ieee80211_hw *hw);
+int zd_restore_settings(struct zd_mac *mac);
+
#ifdef DEBUG
void zd_dump_rx_status(const struct rx_status *status);
#else
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 06041cb1c422..81e80489a052 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -377,8 +377,10 @@ static inline void handle_regs_int(struct urb *urb)
int_num = le16_to_cpu(*(__le16 *)(urb->transfer_buffer+2));
if (int_num == CR_INTERRUPT) {
struct zd_mac *mac = zd_hw_mac(zd_usb_to_hw(urb->context));
+ spin_lock(&mac->lock);
memcpy(&mac->intr_buffer, urb->transfer_buffer,
USB_MAX_EP_INT_BUFFER);
+ spin_unlock(&mac->lock);
schedule_work(&mac->process_intr);
} else if (intr->read_regs_enabled) {
intr->read_regs.length = len = urb->actual_length;
@@ -409,8 +411,10 @@ static void int_urb_complete(struct urb *urb)
case -ENOENT:
case -ECONNRESET:
case -EPIPE:
- goto kfree;
+ dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
+ return;
default:
+ dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
goto resubmit;
}
@@ -441,12 +445,11 @@ static void int_urb_complete(struct urb *urb)
resubmit:
r = usb_submit_urb(urb, GFP_ATOMIC);
if (r) {
- dev_dbg_f(urb_dev(urb), "resubmit urb %p\n", urb);
- goto kfree;
+ dev_dbg_f(urb_dev(urb), "error: resubmit urb %p err code %d\n",
+ urb, r);
+ /* TODO: add worker to reset intr->urb */
}
return;
-kfree:
- kfree(urb->transfer_buffer);
}
static inline int int_urb_interval(struct usb_device *udev)
@@ -477,9 +480,8 @@ static inline int usb_int_enabled(struct zd_usb *usb)
int zd_usb_enable_int(struct zd_usb *usb)
{
int r;
- struct usb_device *udev;
+ struct usb_device *udev = zd_usb_to_usbdev(usb);
struct zd_usb_interrupt *intr = &usb->intr;
- void *transfer_buffer = NULL;
struct urb *urb;
dev_dbg_f(zd_usb_dev(usb), "\n");
@@ -500,20 +502,21 @@ int zd_usb_enable_int(struct zd_usb *usb)
intr->urb = urb;
spin_unlock_irq(&intr->lock);
- /* TODO: make it a DMA buffer */
r = -ENOMEM;
- transfer_buffer = kmalloc(USB_MAX_EP_INT_BUFFER, GFP_KERNEL);
- if (!transfer_buffer) {
+ intr->buffer = usb_alloc_coherent(udev, USB_MAX_EP_INT_BUFFER,
+ GFP_KERNEL, &intr->buffer_dma);
+ if (!intr->buffer) {
dev_dbg_f(zd_usb_dev(usb),
"couldn't allocate transfer_buffer\n");
goto error_set_urb_null;
}
- udev = zd_usb_to_usbdev(usb);
usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, EP_INT_IN),
- transfer_buffer, USB_MAX_EP_INT_BUFFER,
+ intr->buffer, USB_MAX_EP_INT_BUFFER,
int_urb_complete, usb,
intr->interval);
+ urb->transfer_dma = intr->buffer_dma;
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
dev_dbg_f(zd_usb_dev(usb), "submit urb %p\n", intr->urb);
r = usb_submit_urb(urb, GFP_KERNEL);
@@ -525,7 +528,8 @@ int zd_usb_enable_int(struct zd_usb *usb)
return 0;
error:
- kfree(transfer_buffer);
+ usb_free_coherent(udev, USB_MAX_EP_INT_BUFFER,
+ intr->buffer, intr->buffer_dma);
error_set_urb_null:
spin_lock_irq(&intr->lock);
intr->urb = NULL;
@@ -539,8 +543,11 @@ out:
void zd_usb_disable_int(struct zd_usb *usb)
{
unsigned long flags;
+ struct usb_device *udev = zd_usb_to_usbdev(usb);
struct zd_usb_interrupt *intr = &usb->intr;
struct urb *urb;
+ void *buffer;
+ dma_addr_t buffer_dma;
spin_lock_irqsave(&intr->lock, flags);
urb = intr->urb;
@@ -549,11 +556,18 @@ void zd_usb_disable_int(struct zd_usb *usb)
return;
}
intr->urb = NULL;
+ buffer = intr->buffer;
+ buffer_dma = intr->buffer_dma;
+ intr->buffer = NULL;
spin_unlock_irqrestore(&intr->lock, flags);
usb_kill_urb(urb);
dev_dbg_f(zd_usb_dev(usb), "urb %p killed\n", urb);
usb_free_urb(urb);
+
+ if (buffer)
+ usb_free_coherent(udev, USB_MAX_EP_INT_BUFFER,
+ buffer, buffer_dma);
}
static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer,
@@ -601,6 +615,7 @@ static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer,
static void rx_urb_complete(struct urb *urb)
{
+ int r;
struct zd_usb *usb;
struct zd_usb_rx *rx;
const u8 *buffer;
@@ -615,6 +630,7 @@ static void rx_urb_complete(struct urb *urb)
case -ENOENT:
case -ECONNRESET:
case -EPIPE:
+ dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
return;
default:
dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
@@ -626,6 +642,8 @@ static void rx_urb_complete(struct urb *urb)
usb = urb->context;
rx = &usb->rx;
+ zd_usb_reset_rx_idle_timer(usb);
+
if (length%rx->usb_packet_size > rx->usb_packet_size-4) {
/* If there is an old first fragment, we don't care. */
dev_dbg_f(urb_dev(urb), "*** first fragment ***\n");
@@ -654,7 +672,9 @@ static void rx_urb_complete(struct urb *urb)
}
resubmit:
- usb_submit_urb(urb, GFP_ATOMIC);
+ r = usb_submit_urb(urb, GFP_ATOMIC);
+ if (r)
+ dev_dbg_f(urb_dev(urb), "urb %p resubmit error %d\n", urb, r);
}
static struct urb *alloc_rx_urb(struct zd_usb *usb)
@@ -690,7 +710,7 @@ static void free_rx_urb(struct urb *urb)
usb_free_urb(urb);
}
-int zd_usb_enable_rx(struct zd_usb *usb)
+static int __zd_usb_enable_rx(struct zd_usb *usb)
{
int i, r;
struct zd_usb_rx *rx = &usb->rx;
@@ -742,7 +762,21 @@ error:
return r;
}
-void zd_usb_disable_rx(struct zd_usb *usb)
+int zd_usb_enable_rx(struct zd_usb *usb)
+{
+ int r;
+ struct zd_usb_rx *rx = &usb->rx;
+
+ mutex_lock(&rx->setup_mutex);
+ r = __zd_usb_enable_rx(usb);
+ mutex_unlock(&rx->setup_mutex);
+
+ zd_usb_reset_rx_idle_timer(usb);
+
+ return r;
+}
+
+static void __zd_usb_disable_rx(struct zd_usb *usb)
{
int i;
unsigned long flags;
@@ -769,6 +803,40 @@ void zd_usb_disable_rx(struct zd_usb *usb)
spin_unlock_irqrestore(&rx->lock, flags);
}
+void zd_usb_disable_rx(struct zd_usb *usb)
+{
+ struct zd_usb_rx *rx = &usb->rx;
+
+ mutex_lock(&rx->setup_mutex);
+ __zd_usb_disable_rx(usb);
+ mutex_unlock(&rx->setup_mutex);
+
+ cancel_delayed_work_sync(&rx->idle_work);
+}
+
+static void zd_usb_reset_rx(struct zd_usb *usb)
+{
+ bool do_reset;
+ struct zd_usb_rx *rx = &usb->rx;
+ unsigned long flags;
+
+ mutex_lock(&rx->setup_mutex);
+
+ spin_lock_irqsave(&rx->lock, flags);
+ do_reset = rx->urbs != NULL;
+ spin_unlock_irqrestore(&rx->lock, flags);
+
+ if (do_reset) {
+ __zd_usb_disable_rx(usb);
+ __zd_usb_enable_rx(usb);
+ }
+
+ mutex_unlock(&rx->setup_mutex);
+
+ if (do_reset)
+ zd_usb_reset_rx_idle_timer(usb);
+}
+
/**
* zd_usb_disable_tx - disable transmission
* @usb: the zd1211rw-private USB structure
@@ -779,19 +847,21 @@ void zd_usb_disable_tx(struct zd_usb *usb)
{
struct zd_usb_tx *tx = &usb->tx;
unsigned long flags;
- struct list_head *pos, *n;
+
+ atomic_set(&tx->enabled, 0);
+
+ /* kill all submitted tx-urbs */
+ usb_kill_anchored_urbs(&tx->submitted);
spin_lock_irqsave(&tx->lock, flags);
- list_for_each_safe(pos, n, &tx->free_urb_list) {
- list_del(pos);
- usb_free_urb(list_entry(pos, struct urb, urb_list));
- }
- tx->enabled = 0;
+ WARN_ON(!skb_queue_empty(&tx->submitted_skbs));
+ WARN_ON(tx->submitted_urbs != 0);
tx->submitted_urbs = 0;
+ spin_unlock_irqrestore(&tx->lock, flags);
+
/* The stopped state is ignored, relying on ieee80211_wake_queues()
* in a potentionally following zd_usb_enable_tx().
*/
- spin_unlock_irqrestore(&tx->lock, flags);
}
/**
@@ -807,63 +877,13 @@ void zd_usb_enable_tx(struct zd_usb *usb)
struct zd_usb_tx *tx = &usb->tx;
spin_lock_irqsave(&tx->lock, flags);
- tx->enabled = 1;
+ atomic_set(&tx->enabled, 1);
tx->submitted_urbs = 0;
ieee80211_wake_queues(zd_usb_to_hw(usb));
tx->stopped = 0;
spin_unlock_irqrestore(&tx->lock, flags);
}
-/**
- * alloc_tx_urb - provides an tx URB
- * @usb: a &struct zd_usb pointer
- *
- * Allocates a new URB. If possible takes the urb from the free list in
- * usb->tx.
- */
-static struct urb *alloc_tx_urb(struct zd_usb *usb)
-{
- struct zd_usb_tx *tx = &usb->tx;
- unsigned long flags;
- struct list_head *entry;
- struct urb *urb;
-
- spin_lock_irqsave(&tx->lock, flags);
- if (list_empty(&tx->free_urb_list)) {
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- goto out;
- }
- entry = tx->free_urb_list.next;
- list_del(entry);
- urb = list_entry(entry, struct urb, urb_list);
-out:
- spin_unlock_irqrestore(&tx->lock, flags);
- return urb;
-}
-
-/**
- * free_tx_urb - frees a used tx URB
- * @usb: a &struct zd_usb pointer
- * @urb: URB to be freed
- *
- * Frees the transmission URB, which means to put it on the free URB
- * list.
- */
-static void free_tx_urb(struct zd_usb *usb, struct urb *urb)
-{
- struct zd_usb_tx *tx = &usb->tx;
- unsigned long flags;
-
- spin_lock_irqsave(&tx->lock, flags);
- if (!tx->enabled) {
- usb_free_urb(urb);
- goto out;
- }
- list_add(&urb->urb_list, &tx->free_urb_list);
-out:
- spin_unlock_irqrestore(&tx->lock, flags);
-}
-
static void tx_dec_submitted_urbs(struct zd_usb *usb)
{
struct zd_usb_tx *tx = &usb->tx;
@@ -905,6 +925,16 @@ static void tx_urb_complete(struct urb *urb)
struct sk_buff *skb;
struct ieee80211_tx_info *info;
struct zd_usb *usb;
+ struct zd_usb_tx *tx;
+
+ skb = (struct sk_buff *)urb->context;
+ info = IEEE80211_SKB_CB(skb);
+ /*
+ * grab 'usb' pointer before handing off the skb (since
+ * it might be freed by zd_mac_tx_to_dev or mac80211)
+ */
+ usb = &zd_hw_mac(info->rate_driver_data[0])->chip.usb;
+ tx = &usb->tx;
switch (urb->status) {
case 0:
@@ -922,20 +952,16 @@ static void tx_urb_complete(struct urb *urb)
goto resubmit;
}
free_urb:
- skb = (struct sk_buff *)urb->context;
- /*
- * grab 'usb' pointer before handing off the skb (since
- * it might be freed by zd_mac_tx_to_dev or mac80211)
- */
- info = IEEE80211_SKB_CB(skb);
- usb = &zd_hw_mac(info->rate_driver_data[0])->chip.usb;
+ skb_unlink(skb, &usb->tx.submitted_skbs);
zd_mac_tx_to_dev(skb, urb->status);
- free_tx_urb(usb, urb);
+ usb_free_urb(urb);
tx_dec_submitted_urbs(usb);
return;
resubmit:
+ usb_anchor_urb(urb, &tx->submitted);
r = usb_submit_urb(urb, GFP_ATOMIC);
if (r) {
+ usb_unanchor_urb(urb);
dev_dbg_f(urb_dev(urb), "error resubmit urb %p %d\n", urb, r);
goto free_urb;
}
@@ -956,10 +982,17 @@ resubmit:
int zd_usb_tx(struct zd_usb *usb, struct sk_buff *skb)
{
int r;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct usb_device *udev = zd_usb_to_usbdev(usb);
struct urb *urb;
+ struct zd_usb_tx *tx = &usb->tx;
+
+ if (!atomic_read(&tx->enabled)) {
+ r = -ENOENT;
+ goto out;
+ }
- urb = alloc_tx_urb(usb);
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
r = -ENOMEM;
goto out;
@@ -968,17 +1001,118 @@ int zd_usb_tx(struct zd_usb *usb, struct sk_buff *skb)
usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_DATA_OUT),
skb->data, skb->len, tx_urb_complete, skb);
+ info->rate_driver_data[1] = (void *)jiffies;
+ skb_queue_tail(&tx->submitted_skbs, skb);
+ usb_anchor_urb(urb, &tx->submitted);
+
r = usb_submit_urb(urb, GFP_ATOMIC);
- if (r)
+ if (r) {
+ dev_dbg_f(zd_usb_dev(usb), "error submit urb %p %d\n", urb, r);
+ usb_unanchor_urb(urb);
+ skb_unlink(skb, &tx->submitted_skbs);
goto error;
+ }
tx_inc_submitted_urbs(usb);
return 0;
error:
- free_tx_urb(usb, urb);
+ usb_free_urb(urb);
out:
return r;
}
+static bool zd_tx_timeout(struct zd_usb *usb)
+{
+ struct zd_usb_tx *tx = &usb->tx;
+ struct sk_buff_head *q = &tx->submitted_skbs;
+ struct sk_buff *skb, *skbnext;
+ struct ieee80211_tx_info *info;
+ unsigned long flags, trans_start;
+ bool have_timedout = false;
+
+ spin_lock_irqsave(&q->lock, flags);
+ skb_queue_walk_safe(q, skb, skbnext) {
+ info = IEEE80211_SKB_CB(skb);
+ trans_start = (unsigned long)info->rate_driver_data[1];
+
+ if (time_is_before_jiffies(trans_start + ZD_TX_TIMEOUT)) {
+ have_timedout = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return have_timedout;
+}
+
+static void zd_tx_watchdog_handler(struct work_struct *work)
+{
+ struct zd_usb *usb =
+ container_of(work, struct zd_usb, tx.watchdog_work.work);
+ struct zd_usb_tx *tx = &usb->tx;
+
+ if (!atomic_read(&tx->enabled) || !tx->watchdog_enabled)
+ goto out;
+ if (!zd_tx_timeout(usb))
+ goto out;
+
+ /* TX halted, try reset */
+ dev_warn(zd_usb_dev(usb), "TX-stall detected, reseting device...");
+
+ usb_queue_reset_device(usb->intf);
+
+ /* reset will stop this worker, don't rearm */
+ return;
+out:
+ queue_delayed_work(zd_workqueue, &tx->watchdog_work,
+ ZD_TX_WATCHDOG_INTERVAL);
+}
+
+void zd_tx_watchdog_enable(struct zd_usb *usb)
+{
+ struct zd_usb_tx *tx = &usb->tx;
+
+ if (!tx->watchdog_enabled) {
+ dev_dbg_f(zd_usb_dev(usb), "\n");
+ queue_delayed_work(zd_workqueue, &tx->watchdog_work,
+ ZD_TX_WATCHDOG_INTERVAL);
+ tx->watchdog_enabled = 1;
+ }
+}
+
+void zd_tx_watchdog_disable(struct zd_usb *usb)
+{
+ struct zd_usb_tx *tx = &usb->tx;
+
+ if (tx->watchdog_enabled) {
+ dev_dbg_f(zd_usb_dev(usb), "\n");
+ tx->watchdog_enabled = 0;
+ cancel_delayed_work_sync(&tx->watchdog_work);
+ }
+}
+
+static void zd_rx_idle_timer_handler(struct work_struct *work)
+{
+ struct zd_usb *usb =
+ container_of(work, struct zd_usb, rx.idle_work.work);
+ struct zd_mac *mac = zd_usb_to_mac(usb);
+
+ if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
+ return;
+
+ dev_dbg_f(zd_usb_dev(usb), "\n");
+
+ /* 30 seconds since last rx, reset rx */
+ zd_usb_reset_rx(usb);
+}
+
+void zd_usb_reset_rx_idle_timer(struct zd_usb *usb)
+{
+ struct zd_usb_rx *rx = &usb->rx;
+
+ cancel_delayed_work(&rx->idle_work);
+ queue_delayed_work(zd_workqueue, &rx->idle_work, ZD_RX_IDLE_INTERVAL);
+}
+
static inline void init_usb_interrupt(struct zd_usb *usb)
{
struct zd_usb_interrupt *intr = &usb->intr;
@@ -993,22 +1127,27 @@ static inline void init_usb_rx(struct zd_usb *usb)
{
struct zd_usb_rx *rx = &usb->rx;
spin_lock_init(&rx->lock);
+ mutex_init(&rx->setup_mutex);
if (interface_to_usbdev(usb->intf)->speed == USB_SPEED_HIGH) {
rx->usb_packet_size = 512;
} else {
rx->usb_packet_size = 64;
}
ZD_ASSERT(rx->fragment_length == 0);
+ INIT_DELAYED_WORK(&rx->idle_work, zd_rx_idle_timer_handler);
}
static inline void init_usb_tx(struct zd_usb *usb)
{
struct zd_usb_tx *tx = &usb->tx;
spin_lock_init(&tx->lock);
- tx->enabled = 0;
+ atomic_set(&tx->enabled, 0);
tx->stopped = 0;
- INIT_LIST_HEAD(&tx->free_urb_list);
+ skb_queue_head_init(&tx->submitted_skbs);
+ init_usb_anchor(&tx->submitted);
tx->submitted_urbs = 0;
+ tx->watchdog_enabled = 0;
+ INIT_DELAYED_WORK(&tx->watchdog_work, zd_tx_watchdog_handler);
}
void zd_usb_init(struct zd_usb *usb, struct ieee80211_hw *hw,
@@ -1017,6 +1156,7 @@ void zd_usb_init(struct zd_usb *usb, struct ieee80211_hw *hw,
memset(usb, 0, sizeof(*usb));
usb->intf = usb_get_intf(intf);
usb_set_intfdata(usb->intf, hw);
+ init_usb_anchor(&usb->submitted_cmds);
init_usb_interrupt(usb);
init_usb_tx(usb);
init_usb_rx(usb);
@@ -1240,6 +1380,7 @@ static void disconnect(struct usb_interface *intf)
ieee80211_unregister_hw(hw);
/* Just in case something has gone wrong! */
+ zd_usb_disable_tx(usb);
zd_usb_disable_rx(usb);
zd_usb_disable_int(usb);
@@ -1255,11 +1396,92 @@ static void disconnect(struct usb_interface *intf)
dev_dbg(&intf->dev, "disconnected\n");
}
+static void zd_usb_resume(struct zd_usb *usb)
+{
+ struct zd_mac *mac = zd_usb_to_mac(usb);
+ int r;
+
+ dev_dbg_f(zd_usb_dev(usb), "\n");
+
+ r = zd_op_start(zd_usb_to_hw(usb));
+ if (r < 0) {
+ dev_warn(zd_usb_dev(usb), "Device resume failed "
+ "with error code %d. Retrying...\n", r);
+ if (usb->was_running)
+ set_bit(ZD_DEVICE_RUNNING, &mac->flags);
+ usb_queue_reset_device(usb->intf);
+ return;
+ }
+
+ if (mac->type != NL80211_IFTYPE_UNSPECIFIED) {
+ r = zd_restore_settings(mac);
+ if (r < 0) {
+ dev_dbg(zd_usb_dev(usb),
+ "failed to restore settings, %d\n", r);
+ return;
+ }
+ }
+}
+
+static void zd_usb_stop(struct zd_usb *usb)
+{
+ dev_dbg_f(zd_usb_dev(usb), "\n");
+
+ zd_op_stop(zd_usb_to_hw(usb));
+
+ zd_usb_disable_tx(usb);
+ zd_usb_disable_rx(usb);
+ zd_usb_disable_int(usb);
+
+ usb->initialized = 0;
+}
+
+static int pre_reset(struct usb_interface *intf)
+{
+ struct ieee80211_hw *hw = usb_get_intfdata(intf);
+ struct zd_mac *mac;
+ struct zd_usb *usb;
+
+ if (!hw || intf->condition != USB_INTERFACE_BOUND)
+ return 0;
+
+ mac = zd_hw_mac(hw);
+ usb = &mac->chip.usb;
+
+ usb->was_running = test_bit(ZD_DEVICE_RUNNING, &mac->flags);
+
+ zd_usb_stop(usb);
+
+ mutex_lock(&mac->chip.mutex);
+ return 0;
+}
+
+static int post_reset(struct usb_interface *intf)
+{
+ struct ieee80211_hw *hw = usb_get_intfdata(intf);
+ struct zd_mac *mac;
+ struct zd_usb *usb;
+
+ if (!hw || intf->condition != USB_INTERFACE_BOUND)
+ return 0;
+
+ mac = zd_hw_mac(hw);
+ usb = &mac->chip.usb;
+
+ mutex_unlock(&mac->chip.mutex);
+
+ if (usb->was_running)
+ zd_usb_resume(usb);
+ return 0;
+}
+
static struct usb_driver driver = {
.name = KBUILD_MODNAME,
.id_table = usb_ids,
.probe = probe,
.disconnect = disconnect,
+ .pre_reset = pre_reset,
+ .post_reset = post_reset,
};
struct workqueue_struct *zd_workqueue;
@@ -1393,30 +1615,35 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
return -EWOULDBLOCK;
}
if (!usb_int_enabled(usb)) {
- dev_dbg_f(zd_usb_dev(usb),
+ dev_dbg_f(zd_usb_dev(usb),
"error: usb interrupt not enabled\n");
return -EWOULDBLOCK;
}
+ ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
+ BUILD_BUG_ON(sizeof(struct usb_req_read_regs) + USB_MAX_IOREAD16_COUNT *
+ sizeof(__le16) > sizeof(usb->req_buf));
+ BUG_ON(sizeof(struct usb_req_read_regs) + count * sizeof(__le16) >
+ sizeof(usb->req_buf));
+
req_len = sizeof(struct usb_req_read_regs) + count * sizeof(__le16);
- req = kmalloc(req_len, GFP_KERNEL);
- if (!req)
- return -ENOMEM;
+ req = (void *)usb->req_buf;
+
req->id = cpu_to_le16(USB_REQ_READ_REGS);
for (i = 0; i < count; i++)
req->addr[i] = cpu_to_le16((u16)addresses[i]);
udev = zd_usb_to_usbdev(usb);
prepare_read_regs_int(usb);
- r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT),
- req, req_len, &actual_req_len, 1000 /* ms */);
+ r = usb_interrupt_msg(udev, usb_sndintpipe(udev, EP_REGS_OUT),
+ req, req_len, &actual_req_len, 50 /* ms */);
if (r) {
dev_dbg_f(zd_usb_dev(usb),
- "error in usb_bulk_msg(). Error number %d\n", r);
+ "error in usb_interrupt_msg(). Error number %d\n", r);
goto error;
}
if (req_len != actual_req_len) {
- dev_dbg_f(zd_usb_dev(usb), "error in usb_bulk_msg()\n"
+ dev_dbg_f(zd_usb_dev(usb), "error in usb_interrupt_msg()\n"
" req_len %d != actual_req_len %d\n",
req_len, actual_req_len);
r = -EIO;
@@ -1424,7 +1651,7 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
}
timeout = wait_for_completion_timeout(&usb->intr.read_regs.completion,
- msecs_to_jiffies(1000));
+ msecs_to_jiffies(50));
if (!timeout) {
disable_read_regs_int(usb);
dev_dbg_f(zd_usb_dev(usb), "read timed out\n");
@@ -1434,17 +1661,106 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
r = get_results(usb, values, req, count);
error:
- kfree(req);
return r;
}
-int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
- unsigned int count)
+static void iowrite16v_urb_complete(struct urb *urb)
+{
+ struct zd_usb *usb = urb->context;
+
+ if (urb->status && !usb->cmd_error)
+ usb->cmd_error = urb->status;
+}
+
+static int zd_submit_waiting_urb(struct zd_usb *usb, bool last)
+{
+ int r = 0;
+ struct urb *urb = usb->urb_async_waiting;
+
+ if (!urb)
+ return 0;
+
+ usb->urb_async_waiting = NULL;
+
+ if (!last)
+ urb->transfer_flags |= URB_NO_INTERRUPT;
+
+ usb_anchor_urb(urb, &usb->submitted_cmds);
+ r = usb_submit_urb(urb, GFP_KERNEL);
+ if (r) {
+ usb_unanchor_urb(urb);
+ dev_dbg_f(zd_usb_dev(usb),
+ "error in usb_submit_urb(). Error number %d\n", r);
+ goto error;
+ }
+
+ /* fall-through with r == 0 */
+error:
+ usb_free_urb(urb);
+ return r;
+}
+
+void zd_usb_iowrite16v_async_start(struct zd_usb *usb)
+{
+ ZD_ASSERT(usb_anchor_empty(&usb->submitted_cmds));
+ ZD_ASSERT(usb->urb_async_waiting == NULL);
+ ZD_ASSERT(!usb->in_async);
+
+ ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
+
+ usb->in_async = 1;
+ usb->cmd_error = 0;
+ usb->urb_async_waiting = NULL;
+}
+
+int zd_usb_iowrite16v_async_end(struct zd_usb *usb, unsigned int timeout)
+{
+ int r;
+
+ ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
+ ZD_ASSERT(usb->in_async);
+
+ /* Submit last iowrite16v URB */
+ r = zd_submit_waiting_urb(usb, true);
+ if (r) {
+ dev_dbg_f(zd_usb_dev(usb),
+ "error in zd_submit_waiting_usb(). "
+ "Error number %d\n", r);
+
+ usb_kill_anchored_urbs(&usb->submitted_cmds);
+ goto error;
+ }
+
+ if (timeout)
+ timeout = usb_wait_anchor_empty_timeout(&usb->submitted_cmds,
+ timeout);
+ if (!timeout) {
+ usb_kill_anchored_urbs(&usb->submitted_cmds);
+ if (usb->cmd_error == -ENOENT) {
+ dev_dbg_f(zd_usb_dev(usb), "timed out");
+ r = -ETIMEDOUT;
+ goto error;
+ }
+ }
+
+ r = usb->cmd_error;
+error:
+ usb->in_async = 0;
+ return r;
+}
+
+int zd_usb_iowrite16v_async(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
+ unsigned int count)
{
int r;
struct usb_device *udev;
struct usb_req_write_regs *req = NULL;
- int i, req_len, actual_req_len;
+ int i, req_len;
+ struct urb *urb;
+ struct usb_host_endpoint *ep;
+
+ ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
+ ZD_ASSERT(usb->in_async);
if (count == 0)
return 0;
@@ -1460,11 +1776,23 @@ int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
return -EWOULDBLOCK;
}
+ udev = zd_usb_to_usbdev(usb);
+
+ ep = usb_pipe_endpoint(udev, usb_sndintpipe(udev, EP_REGS_OUT));
+ if (!ep)
+ return -ENOENT;
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb)
+ return -ENOMEM;
+
req_len = sizeof(struct usb_req_write_regs) +
count * sizeof(struct reg_data);
req = kmalloc(req_len, GFP_KERNEL);
- if (!req)
- return -ENOMEM;
+ if (!req) {
+ r = -ENOMEM;
+ goto error;
+ }
req->id = cpu_to_le16(USB_REQ_WRITE_REGS);
for (i = 0; i < count; i++) {
@@ -1473,29 +1801,44 @@ int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
rw->value = cpu_to_le16(ioreqs[i].value);
}
- udev = zd_usb_to_usbdev(usb);
- r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT),
- req, req_len, &actual_req_len, 1000 /* ms */);
+ usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT),
+ req, req_len, iowrite16v_urb_complete, usb,
+ ep->desc.bInterval);
+ urb->transfer_flags |= URB_FREE_BUFFER | URB_SHORT_NOT_OK;
+
+ /* Submit previous URB */
+ r = zd_submit_waiting_urb(usb, false);
if (r) {
dev_dbg_f(zd_usb_dev(usb),
- "error in usb_bulk_msg(). Error number %d\n", r);
- goto error;
- }
- if (req_len != actual_req_len) {
- dev_dbg_f(zd_usb_dev(usb),
- "error in usb_bulk_msg()"
- " req_len %d != actual_req_len %d\n",
- req_len, actual_req_len);
- r = -EIO;
+ "error in zd_submit_waiting_usb(). "
+ "Error number %d\n", r);
goto error;
}
- /* FALL-THROUGH with r == 0 */
+ /* Delay submit so that URB_NO_INTERRUPT flag can be set for all URBs
+ * of currect batch except for very last.
+ */
+ usb->urb_async_waiting = urb;
+ return 0;
error:
- kfree(req);
+ usb_free_urb(urb);
return r;
}
+int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
+ unsigned int count)
+{
+ int r;
+
+ zd_usb_iowrite16v_async_start(usb);
+ r = zd_usb_iowrite16v_async(usb, ioreqs, count);
+ if (r) {
+ zd_usb_iowrite16v_async_end(usb, 0);
+ return r;
+ }
+ return zd_usb_iowrite16v_async_end(usb, 50 /* ms */);
+}
+
int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
{
int r;
@@ -1537,14 +1880,19 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
if (r) {
dev_dbg_f(zd_usb_dev(usb),
"error %d: Couldn't read CR203\n", r);
- goto out;
+ return r;
}
bit_value_template &= ~(RF_IF_LE|RF_CLK|RF_DATA);
+ ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
+ BUILD_BUG_ON(sizeof(struct usb_req_rfwrite) +
+ USB_MAX_RFWRITE_BIT_COUNT * sizeof(__le16) >
+ sizeof(usb->req_buf));
+ BUG_ON(sizeof(struct usb_req_rfwrite) + bits * sizeof(__le16) >
+ sizeof(usb->req_buf));
+
req_len = sizeof(struct usb_req_rfwrite) + bits * sizeof(__le16);
- req = kmalloc(req_len, GFP_KERNEL);
- if (!req)
- return -ENOMEM;
+ req = (void *)usb->req_buf;
req->id = cpu_to_le16(USB_REQ_WRITE_RF);
/* 1: 3683a, but not used in ZYDAS driver */
@@ -1559,15 +1907,15 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
}
udev = zd_usb_to_usbdev(usb);
- r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT),
- req, req_len, &actual_req_len, 1000 /* ms */);
+ r = usb_interrupt_msg(udev, usb_sndintpipe(udev, EP_REGS_OUT),
+ req, req_len, &actual_req_len, 50 /* ms */);
if (r) {
dev_dbg_f(zd_usb_dev(usb),
- "error in usb_bulk_msg(). Error number %d\n", r);
+ "error in usb_interrupt_msg(). Error number %d\n", r);
goto out;
}
if (req_len != actual_req_len) {
- dev_dbg_f(zd_usb_dev(usb), "error in usb_bulk_msg()"
+ dev_dbg_f(zd_usb_dev(usb), "error in usb_interrupt_msg()"
" req_len %d != actual_req_len %d\n",
req_len, actual_req_len);
r = -EIO;
@@ -1576,6 +1924,5 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
/* FALL-THROUGH with r == 0 */
out:
- kfree(req);
return r;
}
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h
index 1b1655cb7cb4..b3df2c8116cc 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.h
+++ b/drivers/net/wireless/zd1211rw/zd_usb.h
@@ -32,6 +32,10 @@
#define ZD_USB_TX_HIGH 5
#define ZD_USB_TX_LOW 2
+#define ZD_TX_TIMEOUT (HZ * 5)
+#define ZD_TX_WATCHDOG_INTERVAL round_jiffies_relative(HZ)
+#define ZD_RX_IDLE_INTERVAL round_jiffies_relative(30 * HZ)
+
enum devicetype {
DEVICE_ZD1211 = 0,
DEVICE_ZD1211B = 1,
@@ -162,6 +166,8 @@ struct zd_usb_interrupt {
struct read_regs_int read_regs;
spinlock_t lock;
struct urb *urb;
+ void *buffer;
+ dma_addr_t buffer_dma;
int interval;
u8 read_regs_enabled:1;
};
@@ -175,7 +181,9 @@ static inline struct usb_int_regs *get_read_regs(struct zd_usb_interrupt *intr)
struct zd_usb_rx {
spinlock_t lock;
- u8 fragment[2*USB_MAX_RX_SIZE];
+ struct mutex setup_mutex;
+ struct delayed_work idle_work;
+ u8 fragment[2 * USB_MAX_RX_SIZE];
unsigned int fragment_length;
unsigned int usb_packet_size;
struct urb **urbs;
@@ -184,19 +192,21 @@ struct zd_usb_rx {
/**
* struct zd_usb_tx - structure used for transmitting frames
+ * @enabled: atomic enabled flag, indicates whether tx is enabled
* @lock: lock for transmission
- * @free_urb_list: list of free URBs, contains all the URBs, which can be used
+ * @submitted: anchor for URBs sent to device
* @submitted_urbs: atomic integer that counts the URBs having sent to the
* device, which haven't been completed
- * @enabled: enabled flag, indicates whether tx is enabled
* @stopped: indicates whether higher level tx queues are stopped
*/
struct zd_usb_tx {
+ atomic_t enabled;
spinlock_t lock;
- struct list_head free_urb_list;
+ struct delayed_work watchdog_work;
+ struct sk_buff_head submitted_skbs;
+ struct usb_anchor submitted;
int submitted_urbs;
- int enabled;
- int stopped;
+ u8 stopped:1, watchdog_enabled:1;
};
/* Contains the usb parts. The structure doesn't require a lock because intf
@@ -207,7 +217,11 @@ struct zd_usb {
struct zd_usb_rx rx;
struct zd_usb_tx tx;
struct usb_interface *intf;
- u8 is_zd1211b:1, initialized:1;
+ struct usb_anchor submitted_cmds;
+ struct urb *urb_async_waiting;
+ int cmd_error;
+ u8 req_buf[64]; /* zd_usb_iowrite16v needs 62 bytes */
+ u8 is_zd1211b:1, initialized:1, was_running:1, in_async:1;
};
#define zd_usb_dev(usb) (&usb->intf->dev)
@@ -234,12 +248,17 @@ void zd_usb_clear(struct zd_usb *usb);
int zd_usb_scnprint_id(struct zd_usb *usb, char *buffer, size_t size);
+void zd_tx_watchdog_enable(struct zd_usb *usb);
+void zd_tx_watchdog_disable(struct zd_usb *usb);
+
int zd_usb_enable_int(struct zd_usb *usb);
void zd_usb_disable_int(struct zd_usb *usb);
int zd_usb_enable_rx(struct zd_usb *usb);
void zd_usb_disable_rx(struct zd_usb *usb);
+void zd_usb_reset_rx_idle_timer(struct zd_usb *usb);
+
void zd_usb_enable_tx(struct zd_usb *usb);
void zd_usb_disable_tx(struct zd_usb *usb);
@@ -254,6 +273,10 @@ static inline int zd_usb_ioread16(struct zd_usb *usb, u16 *value,
return zd_usb_ioread16v(usb, value, (const zd_addr_t *)&addr, 1);
}
+void zd_usb_iowrite16v_async_start(struct zd_usb *usb);
+int zd_usb_iowrite16v_async_end(struct zd_usb *usb, unsigned int timeout);
+int zd_usb_iowrite16v_async(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
+ unsigned int count);
int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
unsigned int count);
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index cad66ce1640b..2642af4ee491 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -1101,8 +1101,7 @@ static struct net_device_ops xemaclite_netdev_ops;
* Return: 0, if the driver is bound to the Emaclite device, or
* a negative error if there is failure.
*/
-static int __devinit xemaclite_of_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit xemaclite_of_probe(struct platform_device *ofdev)
{
struct resource r_irq; /* Interrupt resources */
struct resource r_mem; /* IO mem resources */
@@ -1288,7 +1287,7 @@ static struct of_device_id xemaclite_of_match[] __devinitdata = {
};
MODULE_DEVICE_TABLE(of, xemaclite_of_match);
-static struct of_platform_driver xemaclite_of_driver = {
+static struct platform_driver xemaclite_of_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
@@ -1306,7 +1305,7 @@ static struct of_platform_driver xemaclite_of_driver = {
static int __init xemaclite_init(void)
{
/* No kernel boot options used, we just need to register the driver */
- return of_register_platform_driver(&xemaclite_of_driver);
+ return platform_driver_register(&xemaclite_of_driver);
}
/**
@@ -1314,7 +1313,7 @@ static int __init xemaclite_init(void)
*/
static void __exit xemaclite_cleanup(void)
{
- of_unregister_platform_driver(&xemaclite_of_driver);
+ platform_driver_unregister(&xemaclite_of_driver);
}
module_init(xemaclite_init);
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index ffedfd492754..ea1580085347 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -3,7 +3,7 @@
#
menuconfig NFC_DEVICES
- bool "NFC devices"
+ bool "Near Field Communication (NFC) devices"
default n
---help---
You'll have to say Y if your computer contains an NFC device that
diff --git a/drivers/nfc/pn544.c b/drivers/nfc/pn544.c
index bae647264dd6..724f65d8f9e4 100644
--- a/drivers/nfc/pn544.c
+++ b/drivers/nfc/pn544.c
@@ -60,7 +60,7 @@ enum pn544_irq {
struct pn544_info {
struct miscdevice miscdev;
struct i2c_client *i2c_dev;
- struct regulator_bulk_data regs[2];
+ struct regulator_bulk_data regs[3];
enum pn544_state state;
wait_queue_head_t read_wait;
@@ -74,6 +74,7 @@ struct pn544_info {
static const char reg_vdd_io[] = "Vdd_IO";
static const char reg_vbat[] = "VBat";
+static const char reg_vsim[] = "VSim";
/* sysfs interface */
static ssize_t pn544_test(struct device *dev,
@@ -740,6 +741,7 @@ static int __devinit pn544_probe(struct i2c_client *client,
info->regs[0].supply = reg_vdd_io;
info->regs[1].supply = reg_vbat;
+ info->regs[2].supply = reg_vsim;
r = regulator_bulk_get(&client->dev, ARRAY_SIZE(info->regs),
info->regs);
if (r < 0)
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 3c6e100a3ad0..d06a6374ed6c 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -69,4 +69,10 @@ config OF_MDIO
help
OpenFirmware MDIO bus (Ethernet PHY) accessors
+config OF_PCI
+ def_tristate PCI
+ depends on PCI && (PPC || MICROBLAZE || X86)
+ help
+ OpenFirmware PCI bus accessors
+
endmenu # OF
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index 3ab21a0a4907..f7861ed2f287 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_OF_I2C) += of_i2c.o
obj-$(CONFIG_OF_NET) += of_net.o
obj-$(CONFIG_OF_SPI) += of_spi.o
obj-$(CONFIG_OF_MDIO) += of_mdio.o
+obj-$(CONFIG_OF_PCI) += of_pci.o
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 45d86530799f..62b4b32ac887 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -47,40 +47,6 @@ void of_dev_put(struct platform_device *dev)
}
EXPORT_SYMBOL(of_dev_put);
-static ssize_t devspec_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct platform_device *ofdev;
-
- ofdev = to_platform_device(dev);
- return sprintf(buf, "%s\n", ofdev->dev.of_node->full_name);
-}
-
-static ssize_t name_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct platform_device *ofdev;
-
- ofdev = to_platform_device(dev);
- return sprintf(buf, "%s\n", ofdev->dev.of_node->name);
-}
-
-static ssize_t modalias_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- ssize_t len = of_device_get_modalias(dev, buf, PAGE_SIZE - 2);
- buf[len] = '\n';
- buf[len+1] = 0;
- return len+1;
-}
-
-struct device_attribute of_platform_device_attrs[] = {
- __ATTR_RO(devspec),
- __ATTR_RO(name),
- __ATTR_RO(modalias),
- __ATTR_NULL
-};
-
int of_device_add(struct platform_device *ofdev)
{
BUG_ON(ofdev->dev.of_node == NULL);
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
new file mode 100644
index 000000000000..ac1ec54e4fd5
--- /dev/null
+++ b/drivers/of/of_pci.c
@@ -0,0 +1,92 @@
+#include <linux/kernel.h>
+#include <linux/of_pci.h>
+#include <linux/of_irq.h>
+#include <asm/prom.h>
+
+/**
+ * of_irq_map_pci - Resolve the interrupt for a PCI device
+ * @pdev: the device whose interrupt is to be resolved
+ * @out_irq: structure of_irq filled by this function
+ *
+ * This function resolves the PCI interrupt for a given PCI device. If a
+ * device-node exists for a given pci_dev, it will use normal OF tree
+ * walking. If not, it will implement standard swizzling and walk up the
+ * PCI tree until an device-node is found, at which point it will finish
+ * resolving using the OF tree walking.
+ */
+int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
+{
+ struct device_node *dn, *ppnode;
+ struct pci_dev *ppdev;
+ u32 lspec;
+ __be32 lspec_be;
+ __be32 laddr[3];
+ u8 pin;
+ int rc;
+
+ /* Check if we have a device node, if yes, fallback to standard
+ * device tree parsing
+ */
+ dn = pci_device_to_OF_node(pdev);
+ if (dn) {
+ rc = of_irq_map_one(dn, 0, out_irq);
+ if (!rc)
+ return rc;
+ }
+
+ /* Ok, we don't, time to have fun. Let's start by building up an
+ * interrupt spec. we assume #interrupt-cells is 1, which is standard
+ * for PCI. If you do different, then don't use that routine.
+ */
+ rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
+ if (rc != 0)
+ return rc;
+ /* No pin, exit */
+ if (pin == 0)
+ return -ENODEV;
+
+ /* Now we walk up the PCI tree */
+ lspec = pin;
+ for (;;) {
+ /* Get the pci_dev of our parent */
+ ppdev = pdev->bus->self;
+
+ /* Ouch, it's a host bridge... */
+ if (ppdev == NULL) {
+ ppnode = pci_bus_to_OF_node(pdev->bus);
+
+ /* No node for host bridge ? give up */
+ if (ppnode == NULL)
+ return -EINVAL;
+ } else {
+ /* We found a P2P bridge, check if it has a node */
+ ppnode = pci_device_to_OF_node(ppdev);
+ }
+
+ /* Ok, we have found a parent with a device-node, hand over to
+ * the OF parsing code.
+ * We build a unit address from the linux device to be used for
+ * resolution. Note that we use the linux bus number which may
+ * not match your firmware bus numbering.
+ * Fortunately, in most cases, interrupt-map-mask doesn't
+ * include the bus number as part of the matching.
+ * You should still be careful about that though if you intend
+ * to rely on this function (you ship a firmware that doesn't
+ * create device nodes for all PCI devices).
+ */
+ if (ppnode)
+ break;
+
+ /* We can only get here if we hit a P2P bridge with no node,
+ * let's do standard swizzling and try again
+ */
+ lspec = pci_swizzle_interrupt_pin(pdev, lspec);
+ pdev = ppdev;
+ }
+
+ lspec_be = cpu_to_be32(lspec);
+ laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8));
+ laddr[1] = laddr[2] = cpu_to_be32(0);
+ return of_irq_map_raw(ppnode, &lspec_be, 1, laddr, out_irq);
+}
+EXPORT_SYMBOL_GPL(of_irq_map_pci);
diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c
index 28295d0a50f6..4d87b5dc9284 100644
--- a/drivers/of/pdt.c
+++ b/drivers/of/pdt.c
@@ -36,19 +36,55 @@ unsigned int of_pdt_unique_id __initdata;
(p)->unique_id = of_pdt_unique_id++; \
} while (0)
-static inline const char *of_pdt_node_name(struct device_node *dp)
+static char * __init of_pdt_build_full_name(struct device_node *dp)
{
- return dp->path_component_name;
+ int len, ourlen, plen;
+ char *n;
+
+ dp->path_component_name = build_path_component(dp);
+
+ plen = strlen(dp->parent->full_name);
+ ourlen = strlen(dp->path_component_name);
+ len = ourlen + plen + 2;
+
+ n = prom_early_alloc(len);
+ strcpy(n, dp->parent->full_name);
+ if (!of_node_is_root(dp->parent)) {
+ strcpy(n + plen, "/");
+ plen++;
+ }
+ strcpy(n + plen, dp->path_component_name);
+
+ return n;
}
-#else
+#else /* CONFIG_SPARC */
static inline void of_pdt_incr_unique_id(void *p) { }
static inline void irq_trans_init(struct device_node *dp) { }
-static inline const char *of_pdt_node_name(struct device_node *dp)
+static char * __init of_pdt_build_full_name(struct device_node *dp)
{
- return dp->name;
+ static int failsafe_id = 0; /* for generating unique names on failure */
+ char *buf;
+ int len;
+
+ if (of_pdt_prom_ops->pkg2path(dp->phandle, NULL, 0, &len))
+ goto failsafe;
+
+ buf = prom_early_alloc(len + 1);
+ if (of_pdt_prom_ops->pkg2path(dp->phandle, buf, len, &len))
+ goto failsafe;
+ return buf;
+
+ failsafe:
+ buf = prom_early_alloc(strlen(dp->parent->full_name) +
+ strlen(dp->name) + 16);
+ sprintf(buf, "%s/%s@unknown%i",
+ of_node_is_root(dp->parent) ? "" : dp->parent->full_name,
+ dp->name, failsafe_id++);
+ pr_err("%s: pkg2path failed; assigning %s\n", __func__, buf);
+ return buf;
}
#endif /* !CONFIG_SPARC */
@@ -132,47 +168,6 @@ static char * __init of_pdt_get_one_property(phandle node, const char *name)
return buf;
}
-static char * __init of_pdt_try_pkg2path(phandle node)
-{
- char *res, *buf = NULL;
- int len;
-
- if (!of_pdt_prom_ops->pkg2path)
- return NULL;
-
- if (of_pdt_prom_ops->pkg2path(node, buf, 0, &len))
- return NULL;
- buf = prom_early_alloc(len + 1);
- if (of_pdt_prom_ops->pkg2path(node, buf, len, &len)) {
- pr_err("%s: package-to-path failed\n", __func__);
- return NULL;
- }
-
- res = strrchr(buf, '/');
- if (!res) {
- pr_err("%s: couldn't find / in %s\n", __func__, buf);
- return NULL;
- }
- return res+1;
-}
-
-/*
- * When fetching the node's name, first try using package-to-path; if
- * that fails (either because the arch hasn't supplied a PROM callback,
- * or some other random failure), fall back to just looking at the node's
- * 'name' property.
- */
-static char * __init of_pdt_build_name(phandle node)
-{
- char *buf;
-
- buf = of_pdt_try_pkg2path(node);
- if (!buf)
- buf = of_pdt_get_one_property(node, "name");
-
- return buf;
-}
-
static struct device_node * __init of_pdt_create_node(phandle node,
struct device_node *parent)
{
@@ -187,7 +182,7 @@ static struct device_node * __init of_pdt_create_node(phandle node,
kref_init(&dp->kref);
- dp->name = of_pdt_build_name(node);
+ dp->name = of_pdt_get_one_property(node, "name");
dp->type = of_pdt_get_one_property(node, "device_type");
dp->phandle = node;
@@ -198,26 +193,6 @@ static struct device_node * __init of_pdt_create_node(phandle node,
return dp;
}
-static char * __init of_pdt_build_full_name(struct device_node *dp)
-{
- int len, ourlen, plen;
- char *n;
-
- plen = strlen(dp->parent->full_name);
- ourlen = strlen(of_pdt_node_name(dp));
- len = ourlen + plen + 2;
-
- n = prom_early_alloc(len);
- strcpy(n, dp->parent->full_name);
- if (!of_node_is_root(dp->parent)) {
- strcpy(n + plen, "/");
- plen++;
- }
- strcpy(n + plen, of_pdt_node_name(dp));
-
- return n;
-}
-
static struct device_node * __init of_pdt_build_tree(struct device_node *parent,
phandle node,
struct device_node ***nextp)
@@ -240,9 +215,6 @@ static struct device_node * __init of_pdt_build_tree(struct device_node *parent,
*(*nextp) = dp;
*nextp = &dp->allnext;
-#if defined(CONFIG_SPARC)
- dp->path_component_name = build_path_component(dp);
-#endif
dp->full_name = of_pdt_build_full_name(dp);
dp->child = of_pdt_build_tree(dp,
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index c01cd1ac7617..1ce4c45c4ab2 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -42,471 +42,10 @@ struct platform_device *of_find_device_by_node(struct device_node *np)
}
EXPORT_SYMBOL(of_find_device_by_node);
-static int platform_driver_probe_shim(struct platform_device *pdev)
-{
- struct platform_driver *pdrv;
- struct of_platform_driver *ofpdrv;
- const struct of_device_id *match;
-
- pdrv = container_of(pdev->dev.driver, struct platform_driver, driver);
- ofpdrv = container_of(pdrv, struct of_platform_driver, platform_driver);
-
- /* There is an unlikely chance that an of_platform driver might match
- * on a non-OF platform device. If so, then of_match_device() will
- * come up empty. Return -EINVAL in this case so other drivers get
- * the chance to bind. */
- match = of_match_device(pdev->dev.driver->of_match_table, &pdev->dev);
- return match ? ofpdrv->probe(pdev, match) : -EINVAL;
-}
-
-static void platform_driver_shutdown_shim(struct platform_device *pdev)
-{
- struct platform_driver *pdrv;
- struct of_platform_driver *ofpdrv;
-
- pdrv = container_of(pdev->dev.driver, struct platform_driver, driver);
- ofpdrv = container_of(pdrv, struct of_platform_driver, platform_driver);
- ofpdrv->shutdown(pdev);
-}
-
-/**
- * of_register_platform_driver
- */
-int of_register_platform_driver(struct of_platform_driver *drv)
-{
- char *of_name;
-
- /* setup of_platform_driver to platform_driver adaptors */
- drv->platform_driver.driver = drv->driver;
-
- /* Prefix the driver name with 'of:' to avoid namespace collisions
- * and bogus matches. There are some drivers in the tree that
- * register both an of_platform_driver and a platform_driver with
- * the same name. This is a temporary measure until they are all
- * cleaned up --gcl July 29, 2010 */
- of_name = kmalloc(strlen(drv->driver.name) + 5, GFP_KERNEL);
- if (!of_name)
- return -ENOMEM;
- sprintf(of_name, "of:%s", drv->driver.name);
- drv->platform_driver.driver.name = of_name;
-
- if (drv->probe)
- drv->platform_driver.probe = platform_driver_probe_shim;
- drv->platform_driver.remove = drv->remove;
- if (drv->shutdown)
- drv->platform_driver.shutdown = platform_driver_shutdown_shim;
- drv->platform_driver.suspend = drv->suspend;
- drv->platform_driver.resume = drv->resume;
-
- return platform_driver_register(&drv->platform_driver);
-}
-EXPORT_SYMBOL(of_register_platform_driver);
-
-void of_unregister_platform_driver(struct of_platform_driver *drv)
-{
- platform_driver_unregister(&drv->platform_driver);
- kfree(drv->platform_driver.driver.name);
- drv->platform_driver.driver.name = NULL;
-}
-EXPORT_SYMBOL(of_unregister_platform_driver);
-
#if defined(CONFIG_PPC_DCR)
#include <asm/dcr.h>
#endif
-extern struct device_attribute of_platform_device_attrs[];
-
-static int of_platform_bus_match(struct device *dev, struct device_driver *drv)
-{
- const struct of_device_id *matches = drv->of_match_table;
-
- if (!matches)
- return 0;
-
- return of_match_device(matches, dev) != NULL;
-}
-
-static int of_platform_device_probe(struct device *dev)
-{
- int error = -ENODEV;
- struct of_platform_driver *drv;
- struct platform_device *of_dev;
- const struct of_device_id *match;
-
- drv = to_of_platform_driver(dev->driver);
- of_dev = to_platform_device(dev);
-
- if (!drv->probe)
- return error;
-
- of_dev_get(of_dev);
-
- match = of_match_device(drv->driver.of_match_table, dev);
- if (match)
- error = drv->probe(of_dev, match);
- if (error)
- of_dev_put(of_dev);
-
- return error;
-}
-
-static int of_platform_device_remove(struct device *dev)
-{
- struct platform_device *of_dev = to_platform_device(dev);
- struct of_platform_driver *drv = to_of_platform_driver(dev->driver);
-
- if (dev->driver && drv->remove)
- drv->remove(of_dev);
- return 0;
-}
-
-static void of_platform_device_shutdown(struct device *dev)
-{
- struct platform_device *of_dev = to_platform_device(dev);
- struct of_platform_driver *drv = to_of_platform_driver(dev->driver);
-
- if (dev->driver && drv->shutdown)
- drv->shutdown(of_dev);
-}
-
-#ifdef CONFIG_PM_SLEEP
-
-static int of_platform_legacy_suspend(struct device *dev, pm_message_t mesg)
-{
- struct platform_device *of_dev = to_platform_device(dev);
- struct of_platform_driver *drv = to_of_platform_driver(dev->driver);
- int ret = 0;
-
- if (dev->driver && drv->suspend)
- ret = drv->suspend(of_dev, mesg);
- return ret;
-}
-
-static int of_platform_legacy_resume(struct device *dev)
-{
- struct platform_device *of_dev = to_platform_device(dev);
- struct of_platform_driver *drv = to_of_platform_driver(dev->driver);
- int ret = 0;
-
- if (dev->driver && drv->resume)
- ret = drv->resume(of_dev);
- return ret;
-}
-
-static int of_platform_pm_prepare(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (drv && drv->pm && drv->pm->prepare)
- ret = drv->pm->prepare(dev);
-
- return ret;
-}
-
-static void of_platform_pm_complete(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
-
- if (drv && drv->pm && drv->pm->complete)
- drv->pm->complete(dev);
-}
-
-#ifdef CONFIG_SUSPEND
-
-static int of_platform_pm_suspend(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->suspend)
- ret = drv->pm->suspend(dev);
- } else {
- ret = of_platform_legacy_suspend(dev, PMSG_SUSPEND);
- }
-
- return ret;
-}
-
-static int of_platform_pm_suspend_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->suspend_noirq)
- ret = drv->pm->suspend_noirq(dev);
- }
-
- return ret;
-}
-
-static int of_platform_pm_resume(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->resume)
- ret = drv->pm->resume(dev);
- } else {
- ret = of_platform_legacy_resume(dev);
- }
-
- return ret;
-}
-
-static int of_platform_pm_resume_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->resume_noirq)
- ret = drv->pm->resume_noirq(dev);
- }
-
- return ret;
-}
-
-#else /* !CONFIG_SUSPEND */
-
-#define of_platform_pm_suspend NULL
-#define of_platform_pm_resume NULL
-#define of_platform_pm_suspend_noirq NULL
-#define of_platform_pm_resume_noirq NULL
-
-#endif /* !CONFIG_SUSPEND */
-
-#ifdef CONFIG_HIBERNATION
-
-static int of_platform_pm_freeze(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->freeze)
- ret = drv->pm->freeze(dev);
- } else {
- ret = of_platform_legacy_suspend(dev, PMSG_FREEZE);
- }
-
- return ret;
-}
-
-static int of_platform_pm_freeze_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->freeze_noirq)
- ret = drv->pm->freeze_noirq(dev);
- }
-
- return ret;
-}
-
-static int of_platform_pm_thaw(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->thaw)
- ret = drv->pm->thaw(dev);
- } else {
- ret = of_platform_legacy_resume(dev);
- }
-
- return ret;
-}
-
-static int of_platform_pm_thaw_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->thaw_noirq)
- ret = drv->pm->thaw_noirq(dev);
- }
-
- return ret;
-}
-
-static int of_platform_pm_poweroff(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->poweroff)
- ret = drv->pm->poweroff(dev);
- } else {
- ret = of_platform_legacy_suspend(dev, PMSG_HIBERNATE);
- }
-
- return ret;
-}
-
-static int of_platform_pm_poweroff_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->poweroff_noirq)
- ret = drv->pm->poweroff_noirq(dev);
- }
-
- return ret;
-}
-
-static int of_platform_pm_restore(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->restore)
- ret = drv->pm->restore(dev);
- } else {
- ret = of_platform_legacy_resume(dev);
- }
-
- return ret;
-}
-
-static int of_platform_pm_restore_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->restore_noirq)
- ret = drv->pm->restore_noirq(dev);
- }
-
- return ret;
-}
-
-#else /* !CONFIG_HIBERNATION */
-
-#define of_platform_pm_freeze NULL
-#define of_platform_pm_thaw NULL
-#define of_platform_pm_poweroff NULL
-#define of_platform_pm_restore NULL
-#define of_platform_pm_freeze_noirq NULL
-#define of_platform_pm_thaw_noirq NULL
-#define of_platform_pm_poweroff_noirq NULL
-#define of_platform_pm_restore_noirq NULL
-
-#endif /* !CONFIG_HIBERNATION */
-
-static struct dev_pm_ops of_platform_dev_pm_ops = {
- .prepare = of_platform_pm_prepare,
- .complete = of_platform_pm_complete,
- .suspend = of_platform_pm_suspend,
- .resume = of_platform_pm_resume,
- .freeze = of_platform_pm_freeze,
- .thaw = of_platform_pm_thaw,
- .poweroff = of_platform_pm_poweroff,
- .restore = of_platform_pm_restore,
- .suspend_noirq = of_platform_pm_suspend_noirq,
- .resume_noirq = of_platform_pm_resume_noirq,
- .freeze_noirq = of_platform_pm_freeze_noirq,
- .thaw_noirq = of_platform_pm_thaw_noirq,
- .poweroff_noirq = of_platform_pm_poweroff_noirq,
- .restore_noirq = of_platform_pm_restore_noirq,
-};
-
-#define OF_PLATFORM_PM_OPS_PTR (&of_platform_dev_pm_ops)
-
-#else /* !CONFIG_PM_SLEEP */
-
-#define OF_PLATFORM_PM_OPS_PTR NULL
-
-#endif /* !CONFIG_PM_SLEEP */
-
-int of_bus_type_init(struct bus_type *bus, const char *name)
-{
- bus->name = name;
- bus->match = of_platform_bus_match;
- bus->probe = of_platform_device_probe;
- bus->remove = of_platform_device_remove;
- bus->shutdown = of_platform_device_shutdown;
- bus->dev_attrs = of_platform_device_attrs;
- bus->pm = OF_PLATFORM_PM_OPS_PTR;
- return bus_register(bus);
-}
-
-int of_register_driver(struct of_platform_driver *drv, struct bus_type *bus)
-{
- /*
- * Temporary: of_platform_bus used to be distinct from the platform
- * bus. It isn't anymore, and so drivers on the platform bus need
- * to be registered in a special way.
- *
- * After all of_platform_bus_type drivers are converted to
- * platform_drivers, this exception can be removed.
- */
- if (bus == &platform_bus_type)
- return of_register_platform_driver(drv);
-
- /* register with core */
- drv->driver.bus = bus;
- return driver_register(&drv->driver);
-}
-EXPORT_SYMBOL(of_register_driver);
-
-void of_unregister_driver(struct of_platform_driver *drv)
-{
- if (drv->driver.bus == &platform_bus_type)
- of_unregister_platform_driver(drv);
- else
- driver_unregister(&drv->driver);
-}
-EXPORT_SYMBOL(of_unregister_driver);
-
#if !defined(CONFIG_SPARC)
/*
* The following routines scan a subtree and registers a device for
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 59f55441e075..b8ef8ddcc292 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -258,8 +258,10 @@ op_add_sample(struct oprofile_cpu_buffer *cpu_buf,
*/
static int
log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
- unsigned long backtrace, int is_kernel, unsigned long event)
+ unsigned long backtrace, int is_kernel, unsigned long event,
+ struct task_struct *task)
{
+ struct task_struct *tsk = task ? task : current;
cpu_buf->sample_received++;
if (pc == ESCAPE_CODE) {
@@ -267,7 +269,7 @@ log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
return 0;
}
- if (op_add_code(cpu_buf, backtrace, is_kernel, current))
+ if (op_add_code(cpu_buf, backtrace, is_kernel, tsk))
goto fail;
if (op_add_sample(cpu_buf, pc, event))
@@ -292,7 +294,8 @@ static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
static inline void
__oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
- unsigned long event, int is_kernel)
+ unsigned long event, int is_kernel,
+ struct task_struct *task)
{
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
unsigned long backtrace = oprofile_backtrace_depth;
@@ -301,7 +304,7 @@ __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
* if log_sample() fail we can't backtrace since we lost the
* source of this event
*/
- if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event))
+ if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event, task))
/* failed */
return;
@@ -313,10 +316,17 @@ __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
oprofile_end_trace(cpu_buf);
}
+void oprofile_add_ext_hw_sample(unsigned long pc, struct pt_regs * const regs,
+ unsigned long event, int is_kernel,
+ struct task_struct *task)
+{
+ __oprofile_add_ext_sample(pc, regs, event, is_kernel, task);
+}
+
void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
unsigned long event, int is_kernel)
{
- __oprofile_add_ext_sample(pc, regs, event, is_kernel);
+ __oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL);
}
void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
@@ -332,7 +342,7 @@ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
pc = ESCAPE_CODE; /* as this causes an early return. */
}
- __oprofile_add_ext_sample(pc, regs, event, is_kernel);
+ __oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL);
}
/*
@@ -403,7 +413,7 @@ int oprofile_write_commit(struct op_entry *entry)
void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
{
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
- log_sample(cpu_buf, pc, 0, is_kernel, event);
+ log_sample(cpu_buf, pc, 0, is_kernel, event, NULL);
}
void oprofile_add_trace(unsigned long pc)
diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
index 010725117dbb..3ef44624f510 100644
--- a/drivers/oprofile/timer_int.c
+++ b/drivers/oprofile/timer_int.c
@@ -97,7 +97,7 @@ static struct notifier_block __refdata oprofile_cpu_notifier = {
.notifier_call = oprofile_cpu_notify,
};
-int __init oprofile_timer_init(struct oprofile_operations *ops)
+int oprofile_timer_init(struct oprofile_operations *ops)
{
int rc;
@@ -113,7 +113,7 @@ int __init oprofile_timer_init(struct oprofile_operations *ops)
return 0;
}
-void __exit oprofile_timer_exit(void)
+void oprofile_timer_exit(void)
{
unregister_hotcpu_notifier(&oprofile_cpu_notifier);
}
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 9383063d2b16..bcd5d54b7d4d 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -296,25 +296,25 @@ static struct pci_port_ops dino_port_ops = {
.outl = dino_out32
};
-static void dino_mask_irq(unsigned int irq)
+static void dino_mask_irq(struct irq_data *d)
{
- struct dino_device *dino_dev = get_irq_chip_data(irq);
- int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
+ struct dino_device *dino_dev = irq_data_get_irq_chip_data(d);
+ int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
- DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, irq);
+ DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, d->irq);
/* Clear the matching bit in the IMR register */
dino_dev->imr &= ~(DINO_MASK_IRQ(local_irq));
__raw_writel(dino_dev->imr, dino_dev->hba.base_addr+DINO_IMR);
}
-static void dino_unmask_irq(unsigned int irq)
+static void dino_unmask_irq(struct irq_data *d)
{
- struct dino_device *dino_dev = get_irq_chip_data(irq);
- int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
+ struct dino_device *dino_dev = irq_data_get_irq_chip_data(d);
+ int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
u32 tmp;
- DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, irq);
+ DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, d->irq);
/*
** clear pending IRQ bits
@@ -346,9 +346,9 @@ static void dino_unmask_irq(unsigned int irq)
}
static struct irq_chip dino_interrupt_type = {
- .name = "GSC-PCI",
- .unmask = dino_unmask_irq,
- .mask = dino_mask_irq,
+ .name = "GSC-PCI",
+ .irq_unmask = dino_unmask_irq,
+ .irq_mask = dino_mask_irq,
};
diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c
index e860038b0b84..deeec32a5803 100644
--- a/drivers/parisc/eisa.c
+++ b/drivers/parisc/eisa.c
@@ -144,8 +144,9 @@ static unsigned int eisa_irq_level __read_mostly; /* default to edge triggered *
/* called by free irq */
-static void eisa_mask_irq(unsigned int irq)
+static void eisa_mask_irq(struct irq_data *d)
{
+ unsigned int irq = d->irq;
unsigned long flags;
EISA_DBG("disable irq %d\n", irq);
@@ -164,8 +165,9 @@ static void eisa_mask_irq(unsigned int irq)
}
/* called by request irq */
-static void eisa_unmask_irq(unsigned int irq)
+static void eisa_unmask_irq(struct irq_data *d)
{
+ unsigned int irq = d->irq;
unsigned long flags;
EISA_DBG("enable irq %d\n", irq);
@@ -183,9 +185,9 @@ static void eisa_unmask_irq(unsigned int irq)
}
static struct irq_chip eisa_interrupt_type = {
- .name = "EISA",
- .unmask = eisa_unmask_irq,
- .mask = eisa_mask_irq,
+ .name = "EISA",
+ .irq_unmask = eisa_unmask_irq,
+ .irq_mask = eisa_mask_irq,
};
static irqreturn_t eisa_irq(int wax_irq, void *intr_dev)
diff --git a/drivers/parisc/gsc.c b/drivers/parisc/gsc.c
index 772b1939ac21..ef31080cf591 100644
--- a/drivers/parisc/gsc.c
+++ b/drivers/parisc/gsc.c
@@ -105,13 +105,13 @@ int gsc_find_local_irq(unsigned int irq, int *global_irqs, int limit)
return NO_IRQ;
}
-static void gsc_asic_mask_irq(unsigned int irq)
+static void gsc_asic_mask_irq(struct irq_data *d)
{
- struct gsc_asic *irq_dev = get_irq_chip_data(irq);
- int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32);
+ struct gsc_asic *irq_dev = irq_data_get_irq_chip_data(d);
+ int local_irq = gsc_find_local_irq(d->irq, irq_dev->global_irq, 32);
u32 imr;
- DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __func__, irq,
+ DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __func__, d->irq,
irq_dev->name, imr);
/* Disable the IRQ line by clearing the bit in the IMR */
@@ -120,13 +120,13 @@ static void gsc_asic_mask_irq(unsigned int irq)
gsc_writel(imr, irq_dev->hpa + OFFSET_IMR);
}
-static void gsc_asic_unmask_irq(unsigned int irq)
+static void gsc_asic_unmask_irq(struct irq_data *d)
{
- struct gsc_asic *irq_dev = get_irq_chip_data(irq);
- int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32);
+ struct gsc_asic *irq_dev = irq_data_get_irq_chip_data(d);
+ int local_irq = gsc_find_local_irq(d->irq, irq_dev->global_irq, 32);
u32 imr;
- DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __func__, irq,
+ DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __func__, d->irq,
irq_dev->name, imr);
/* Enable the IRQ line by setting the bit in the IMR */
@@ -140,9 +140,9 @@ static void gsc_asic_unmask_irq(unsigned int irq)
}
static struct irq_chip gsc_asic_interrupt_type = {
- .name = "GSC-ASIC",
- .unmask = gsc_asic_unmask_irq,
- .mask = gsc_asic_mask_irq,
+ .name = "GSC-ASIC",
+ .irq_unmask = gsc_asic_unmask_irq,
+ .irq_mask = gsc_asic_mask_irq,
};
int gsc_assign_irq(struct irq_chip *type, void *data)
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 0327894bf235..95930d016235 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -615,10 +615,10 @@ iosapic_set_irt_data( struct vector_info *vi, u32 *dp0, u32 *dp1)
}
-static void iosapic_mask_irq(unsigned int irq)
+static void iosapic_mask_irq(struct irq_data *d)
{
unsigned long flags;
- struct vector_info *vi = get_irq_chip_data(irq);
+ struct vector_info *vi = irq_data_get_irq_chip_data(d);
u32 d0, d1;
spin_lock_irqsave(&iosapic_lock, flags);
@@ -628,9 +628,9 @@ static void iosapic_mask_irq(unsigned int irq)
spin_unlock_irqrestore(&iosapic_lock, flags);
}
-static void iosapic_unmask_irq(unsigned int irq)
+static void iosapic_unmask_irq(struct irq_data *d)
{
- struct vector_info *vi = get_irq_chip_data(irq);
+ struct vector_info *vi = irq_data_get_irq_chip_data(d);
u32 d0, d1;
/* data is initialized by fixup_irq */
@@ -666,34 +666,34 @@ printk("\n");
* enables their IRQ. It can lead to "interesting" race conditions
* in the driver initialization sequence.
*/
- DBG(KERN_DEBUG "enable_irq(%d): eoi(%p, 0x%x)\n", irq,
+ DBG(KERN_DEBUG "enable_irq(%d): eoi(%p, 0x%x)\n", d->irq,
vi->eoi_addr, vi->eoi_data);
iosapic_eoi(vi->eoi_addr, vi->eoi_data);
}
-static void iosapic_eoi_irq(unsigned int irq)
+static void iosapic_eoi_irq(struct irq_data *d)
{
- struct vector_info *vi = get_irq_chip_data(irq);
+ struct vector_info *vi = irq_data_get_irq_chip_data(d);
iosapic_eoi(vi->eoi_addr, vi->eoi_data);
- cpu_eoi_irq(irq);
+ cpu_eoi_irq(d);
}
#ifdef CONFIG_SMP
-static int iosapic_set_affinity_irq(unsigned int irq,
- const struct cpumask *dest)
+static int iosapic_set_affinity_irq(struct irq_data *d,
+ const struct cpumask *dest, bool force)
{
- struct vector_info *vi = get_irq_chip_data(irq);
+ struct vector_info *vi = irq_data_get_irq_chip_data(d);
u32 d0, d1, dummy_d0;
unsigned long flags;
int dest_cpu;
- dest_cpu = cpu_check_affinity(irq, dest);
+ dest_cpu = cpu_check_affinity(d, dest);
if (dest_cpu < 0)
return -1;
- cpumask_copy(irq_desc[irq].affinity, cpumask_of(dest_cpu));
- vi->txn_addr = txn_affinity_addr(irq, dest_cpu);
+ cpumask_copy(d->affinity, cpumask_of(dest_cpu));
+ vi->txn_addr = txn_affinity_addr(d->irq, dest_cpu);
spin_lock_irqsave(&iosapic_lock, flags);
/* d1 contains the destination CPU, so only want to set that
@@ -708,13 +708,13 @@ static int iosapic_set_affinity_irq(unsigned int irq,
#endif
static struct irq_chip iosapic_interrupt_type = {
- .name = "IO-SAPIC-level",
- .unmask = iosapic_unmask_irq,
- .mask = iosapic_mask_irq,
- .ack = cpu_ack_irq,
- .eoi = iosapic_eoi_irq,
+ .name = "IO-SAPIC-level",
+ .irq_unmask = iosapic_unmask_irq,
+ .irq_mask = iosapic_mask_irq,
+ .irq_ack = cpu_ack_irq,
+ .irq_eoi = iosapic_eoi_irq,
#ifdef CONFIG_SMP
- .set_affinity = iosapic_set_affinity_irq,
+ .irq_set_affinity = iosapic_set_affinity_irq,
#endif
};
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c
index 28241532c0fd..a4d8ff66a639 100644
--- a/drivers/parisc/superio.c
+++ b/drivers/parisc/superio.c
@@ -286,8 +286,9 @@ superio_init(struct pci_dev *pcidev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87560_LIO, superio_init);
-static void superio_mask_irq(unsigned int irq)
+static void superio_mask_irq(struct irq_data *d)
{
+ unsigned int irq = d->irq;
u8 r8;
if ((irq < 1) || (irq == 2) || (irq > 7)) {
@@ -303,8 +304,9 @@ static void superio_mask_irq(unsigned int irq)
outb (r8,IC_PIC1+1);
}
-static void superio_unmask_irq(unsigned int irq)
+static void superio_unmask_irq(struct irq_data *d)
{
+ unsigned int irq = d->irq;
u8 r8;
if ((irq < 1) || (irq == 2) || (irq > 7)) {
@@ -320,9 +322,9 @@ static void superio_unmask_irq(unsigned int irq)
}
static struct irq_chip superio_interrupt_type = {
- .name = SUPERIO,
- .unmask = superio_unmask_irq,
- .mask = superio_mask_irq,
+ .name = SUPERIO,
+ .irq_unmask = superio_unmask_irq,
+ .irq_mask = superio_mask_irq,
};
#ifdef DEBUG_SUPERIO_INIT
diff --git a/drivers/parport/parport_sunbpp.c b/drivers/parport/parport_sunbpp.c
index 55ba118f1cf1..910c5a26e347 100644
--- a/drivers/parport/parport_sunbpp.c
+++ b/drivers/parport/parport_sunbpp.c
@@ -286,7 +286,7 @@ static struct parport_operations parport_sunbpp_ops =
.owner = THIS_MODULE,
};
-static int __devinit bpp_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit bpp_probe(struct platform_device *op)
{
struct parport_operations *ops;
struct bpp_regs __iomem *regs;
@@ -381,7 +381,7 @@ static const struct of_device_id bpp_match[] = {
MODULE_DEVICE_TABLE(of, bpp_match);
-static struct of_platform_driver bpp_sbus_driver = {
+static struct platform_driver bpp_sbus_driver = {
.driver = {
.name = "bpp",
.owner = THIS_MODULE,
@@ -393,12 +393,12 @@ static struct of_platform_driver bpp_sbus_driver = {
static int __init parport_sunbpp_init(void)
{
- return of_register_platform_driver(&bpp_sbus_driver);
+ return platform_driver_register(&bpp_sbus_driver);
}
static void __exit parport_sunbpp_exit(void)
{
- of_unregister_platform_driver(&bpp_sbus_driver);
+ platform_driver_unregister(&bpp_sbus_driver);
}
MODULE_AUTHOR("Derrick J Brashear");
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 98e6fdf34d30..77cf813ba264 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_PCI_IOV) += iov.o
obj-$(CONFIG_X86) += setup-bus.o
obj-$(CONFIG_ALPHA) += setup-bus.o setup-irq.o
obj-$(CONFIG_ARM) += setup-bus.o setup-irq.o
+obj-$(CONFIG_UNICORE32) += setup-bus.o setup-irq.o
obj-$(CONFIG_PARISC) += setup-bus.o
obj-$(CONFIG_SUPERH) += setup-bus.o setup-irq.o
obj-$(CONFIG_PPC) += setup-bus.o
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 4789f8e8bf7a..35463ddf10a1 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -3627,9 +3627,9 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
pte = dmar_domain->pgd;
if (dma_pte_present(pte)) {
- free_pgtable_page(dmar_domain->pgd);
dmar_domain->pgd = (struct dma_pte *)
phys_to_virt(dma_pte_addr(pte));
+ free_pgtable_page(pte);
}
dmar_domain->agaw--;
}
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 6fe0772e0e7d..7c3b18e78cee 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -293,19 +293,11 @@ static int acpi_dev_run_wake(struct device *phys_dev, bool enable)
}
if (enable) {
- if (!dev->wakeup.run_wake_count++) {
- acpi_enable_wakeup_device_power(dev, ACPI_STATE_S0);
- acpi_enable_gpe(dev->wakeup.gpe_device,
- dev->wakeup.gpe_number);
- }
- } else if (dev->wakeup.run_wake_count > 0) {
- if (!--dev->wakeup.run_wake_count) {
- acpi_disable_gpe(dev->wakeup.gpe_device,
- dev->wakeup.gpe_number);
- acpi_disable_wakeup_device_power(dev);
- }
+ acpi_enable_wakeup_device_power(dev, ACPI_STATE_S0);
+ acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number);
} else {
- error = -EALREADY;
+ acpi_disable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number);
+ acpi_disable_wakeup_device_power(dev);
}
return error;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 88246dd46452..d86ea8b01137 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -431,7 +431,7 @@ static void pci_device_shutdown(struct device *dev)
pci_msix_shutdown(pci_dev);
}
-#ifdef CONFIG_PM_OPS
+#ifdef CONFIG_PM
/* Auxiliary functions used for system resume and run-time resume. */
@@ -1059,7 +1059,7 @@ static int pci_pm_runtime_idle(struct device *dev)
#endif /* !CONFIG_PM_RUNTIME */
-#ifdef CONFIG_PM_OPS
+#ifdef CONFIG_PM
const struct dev_pm_ops pci_dev_pm_ops = {
.prepare = pci_pm_prepare,
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index ea25e5bfcf23..c85438a367d5 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -1088,7 +1088,7 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev)
attr->write = write_vpd_attr;
retval = sysfs_create_bin_file(&dev->dev.kobj, attr);
if (retval) {
- kfree(dev->vpd->attr);
+ kfree(attr);
return retval;
}
dev->vpd->attr = attr;
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index 80c11d131499..3eb77080366a 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -35,13 +35,6 @@
PCI_ERR_UNC_UNX_COMP| \
PCI_ERR_UNC_MALF_TLP)
-struct header_log_regs {
- unsigned int dw0;
- unsigned int dw1;
- unsigned int dw2;
- unsigned int dw3;
-};
-
#define AER_MAX_MULTI_ERR_DEVICES 5 /* Not likely to have more */
struct aer_err_info {
struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES];
@@ -59,7 +52,7 @@ struct aer_err_info {
unsigned int status; /* COR/UNCOR Error Status */
unsigned int mask; /* COR/UNCOR Error Mask */
- struct header_log_regs tlp; /* TLP Header */
+ struct aer_header_log_regs tlp; /* TLP Header */
};
struct aer_err_source {
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index 9d3e4c8d0184..b07a42e0b350 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -19,6 +19,7 @@
#include <linux/errno.h>
#include <linux/pm.h>
#include <linux/suspend.h>
+#include <linux/cper.h>
#include "aerdrv.h"
@@ -57,86 +58,44 @@
(e & AER_DATA_LINK_LAYER_ERROR_MASK(t)) ? AER_DATA_LINK_LAYER_ERROR : \
AER_TRANSACTION_LAYER_ERROR)
-#define AER_PR(info, pdev, fmt, args...) \
- printk("%s%s %s: " fmt, (info->severity == AER_CORRECTABLE) ? \
- KERN_WARNING : KERN_ERR, dev_driver_string(&pdev->dev), \
- dev_name(&pdev->dev), ## args)
-
/*
* AER error strings
*/
-static char *aer_error_severity_string[] = {
+static const char *aer_error_severity_string[] = {
"Uncorrected (Non-Fatal)",
"Uncorrected (Fatal)",
"Corrected"
};
-static char *aer_error_layer[] = {
+static const char *aer_error_layer[] = {
"Physical Layer",
"Data Link Layer",
"Transaction Layer"
};
-static char *aer_correctable_error_string[] = {
- "Receiver Error ", /* Bit Position 0 */
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- "Bad TLP ", /* Bit Position 6 */
- "Bad DLLP ", /* Bit Position 7 */
- "RELAY_NUM Rollover ", /* Bit Position 8 */
- NULL,
- NULL,
- NULL,
- "Replay Timer Timeout ", /* Bit Position 12 */
- "Advisory Non-Fatal ", /* Bit Position 13 */
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
+
+static const char *aer_correctable_error_string[] = {
+ "Receiver Error", /* Bit Position 0 */
NULL,
NULL,
NULL,
NULL,
NULL,
+ "Bad TLP", /* Bit Position 6 */
+ "Bad DLLP", /* Bit Position 7 */
+ "RELAY_NUM Rollover", /* Bit Position 8 */
NULL,
NULL,
NULL,
+ "Replay Timer Timeout", /* Bit Position 12 */
+ "Advisory Non-Fatal", /* Bit Position 13 */
};
-static char *aer_uncorrectable_error_string[] = {
- NULL,
- NULL,
- NULL,
- NULL,
- "Data Link Protocol ", /* Bit Position 4 */
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- "Poisoned TLP ", /* Bit Position 12 */
- "Flow Control Protocol ", /* Bit Position 13 */
- "Completion Timeout ", /* Bit Position 14 */
- "Completer Abort ", /* Bit Position 15 */
- "Unexpected Completion ", /* Bit Position 16 */
- "Receiver Overflow ", /* Bit Position 17 */
- "Malformed TLP ", /* Bit Position 18 */
- "ECRC ", /* Bit Position 19 */
- "Unsupported Request ", /* Bit Position 20 */
+static const char *aer_uncorrectable_error_string[] = {
NULL,
NULL,
NULL,
NULL,
+ "Data Link Protocol", /* Bit Position 4 */
NULL,
NULL,
NULL,
@@ -144,19 +103,29 @@ static char *aer_uncorrectable_error_string[] = {
NULL,
NULL,
NULL,
+ "Poisoned TLP", /* Bit Position 12 */
+ "Flow Control Protocol", /* Bit Position 13 */
+ "Completion Timeout", /* Bit Position 14 */
+ "Completer Abort", /* Bit Position 15 */
+ "Unexpected Completion", /* Bit Position 16 */
+ "Receiver Overflow", /* Bit Position 17 */
+ "Malformed TLP", /* Bit Position 18 */
+ "ECRC", /* Bit Position 19 */
+ "Unsupported Request", /* Bit Position 20 */
};
-static char *aer_agent_string[] = {
+static const char *aer_agent_string[] = {
"Receiver ID",
"Requester ID",
"Completer ID",
"Transmitter ID"
};
-static void __aer_print_error(struct aer_err_info *info, struct pci_dev *dev)
+static void __aer_print_error(const char *prefix,
+ struct aer_err_info *info)
{
int i, status;
- char *errmsg = NULL;
+ const char *errmsg = NULL;
status = (info->status & ~info->mask);
@@ -165,15 +134,17 @@ static void __aer_print_error(struct aer_err_info *info, struct pci_dev *dev)
continue;
if (info->severity == AER_CORRECTABLE)
- errmsg = aer_correctable_error_string[i];
+ errmsg = i < ARRAY_SIZE(aer_correctable_error_string) ?
+ aer_correctable_error_string[i] : NULL;
else
- errmsg = aer_uncorrectable_error_string[i];
+ errmsg = i < ARRAY_SIZE(aer_uncorrectable_error_string) ?
+ aer_uncorrectable_error_string[i] : NULL;
if (errmsg)
- AER_PR(info, dev, " [%2d] %s%s\n", i, errmsg,
+ printk("%s"" [%2d] %-22s%s\n", prefix, i, errmsg,
info->first_error == i ? " (First)" : "");
else
- AER_PR(info, dev, " [%2d] Unknown Error Bit%s\n", i,
+ printk("%s"" [%2d] Unknown Error Bit%s\n", prefix, i,
info->first_error == i ? " (First)" : "");
}
}
@@ -181,11 +152,15 @@ static void __aer_print_error(struct aer_err_info *info, struct pci_dev *dev)
void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
{
int id = ((dev->bus->number << 8) | dev->devfn);
+ char prefix[44];
+
+ snprintf(prefix, sizeof(prefix), "%s%s %s: ",
+ (info->severity == AER_CORRECTABLE) ? KERN_WARNING : KERN_ERR,
+ dev_driver_string(&dev->dev), dev_name(&dev->dev));
if (info->status == 0) {
- AER_PR(info, dev,
- "PCIe Bus Error: severity=%s, type=Unaccessible, "
- "id=%04x(Unregistered Agent ID)\n",
+ printk("%s""PCIe Bus Error: severity=%s, type=Unaccessible, "
+ "id=%04x(Unregistered Agent ID)\n", prefix,
aer_error_severity_string[info->severity], id);
} else {
int layer, agent;
@@ -193,23 +168,22 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
layer = AER_GET_LAYER_ERROR(info->severity, info->status);
agent = AER_GET_AGENT(info->severity, info->status);
- AER_PR(info, dev,
- "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
- aer_error_severity_string[info->severity],
+ printk("%s""PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
+ prefix, aer_error_severity_string[info->severity],
aer_error_layer[layer], id, aer_agent_string[agent]);
- AER_PR(info, dev,
- " device [%04x:%04x] error status/mask=%08x/%08x\n",
- dev->vendor, dev->device, info->status, info->mask);
+ printk("%s"" device [%04x:%04x] error status/mask=%08x/%08x\n",
+ prefix, dev->vendor, dev->device,
+ info->status, info->mask);
- __aer_print_error(info, dev);
+ __aer_print_error(prefix, info);
if (info->tlp_header_valid) {
unsigned char *tlp = (unsigned char *) &info->tlp;
- AER_PR(info, dev, " TLP Header:"
+ printk("%s"" TLP Header:"
" %02x%02x%02x%02x %02x%02x%02x%02x"
" %02x%02x%02x%02x %02x%02x%02x%02x\n",
- *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
+ prefix, *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
*(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4),
*(tlp + 11), *(tlp + 10), *(tlp + 9),
*(tlp + 8), *(tlp + 15), *(tlp + 14),
@@ -218,8 +192,8 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
}
if (info->id && info->error_dev_num > 1 && info->id == id)
- AER_PR(info, dev,
- " Error of this Agent(%04x) is reported first\n", id);
+ printk("%s"" Error of this Agent(%04x) is reported first\n",
+ prefix, id);
}
void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
@@ -228,3 +202,61 @@ void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
info->multi_error_valid ? "Multiple " : "",
aer_error_severity_string[info->severity], info->id);
}
+
+#ifdef CONFIG_ACPI_APEI_PCIEAER
+static int cper_severity_to_aer(int cper_severity)
+{
+ switch (cper_severity) {
+ case CPER_SEV_RECOVERABLE:
+ return AER_NONFATAL;
+ case CPER_SEV_FATAL:
+ return AER_FATAL;
+ default:
+ return AER_CORRECTABLE;
+ }
+}
+
+void cper_print_aer(const char *prefix, int cper_severity,
+ struct aer_capability_regs *aer)
+{
+ int aer_severity, layer, agent, status_strs_size, tlp_header_valid = 0;
+ u32 status, mask;
+ const char **status_strs;
+
+ aer_severity = cper_severity_to_aer(cper_severity);
+ if (aer_severity == AER_CORRECTABLE) {
+ status = aer->cor_status;
+ mask = aer->cor_mask;
+ status_strs = aer_correctable_error_string;
+ status_strs_size = ARRAY_SIZE(aer_correctable_error_string);
+ } else {
+ status = aer->uncor_status;
+ mask = aer->uncor_mask;
+ status_strs = aer_uncorrectable_error_string;
+ status_strs_size = ARRAY_SIZE(aer_uncorrectable_error_string);
+ tlp_header_valid = status & AER_LOG_TLP_MASKS;
+ }
+ layer = AER_GET_LAYER_ERROR(aer_severity, status);
+ agent = AER_GET_AGENT(aer_severity, status);
+ printk("%s""aer_status: 0x%08x, aer_mask: 0x%08x\n",
+ prefix, status, mask);
+ cper_print_bits(prefix, status, status_strs, status_strs_size);
+ printk("%s""aer_layer=%s, aer_agent=%s\n", prefix,
+ aer_error_layer[layer], aer_agent_string[agent]);
+ if (aer_severity != AER_CORRECTABLE)
+ printk("%s""aer_uncor_severity: 0x%08x\n",
+ prefix, aer->uncor_severity);
+ if (tlp_header_valid) {
+ const unsigned char *tlp;
+ tlp = (const unsigned char *)&aer->header_log;
+ printk("%s""aer_tlp_header:"
+ " %02x%02x%02x%02x %02x%02x%02x%02x"
+ " %02x%02x%02x%02x %02x%02x%02x%02x\n",
+ prefix, *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
+ *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4),
+ *(tlp + 11), *(tlp + 10), *(tlp + 9),
+ *(tlp + 8), *(tlp + 15), *(tlp + 14),
+ *(tlp + 13), *(tlp + 12));
+ }
+}
+#endif
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index c84900da3c59..44cbbbaa499d 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -764,6 +764,8 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
if (pci_find_bus(pci_domain_nr(bus), max+1))
goto out;
child = pci_add_new_bus(bus, dev, ++max);
+ if (!child)
+ goto out;
buses = (buses & 0xff000000)
| ((unsigned int)(child->primary) << 0)
| ((unsigned int)(child->secondary) << 8)
@@ -777,7 +779,7 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
buses &= ~0xff000000;
buses |= CARDBUS_LATENCY_TIMER << 24;
}
-
+
/*
* We need to blast all three values with a single write.
*/
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 53a786fd0d40..ff01bfb3cc29 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2618,58 +2618,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375,
#endif /* CONFIG_PCI_MSI */
-#ifdef CONFIG_PCI_IOV
-
-/*
- * For Intel 82576 SR-IOV NIC, if BIOS doesn't allocate resources for the
- * SR-IOV BARs, zero the Flash BAR and program the SR-IOV BARs to use the
- * old Flash Memory Space.
- */
-static void __devinit quirk_i82576_sriov(struct pci_dev *dev)
-{
- int pos, flags;
- u32 bar, start, size;
-
- if (PAGE_SIZE > 0x10000)
- return;
-
- flags = pci_resource_flags(dev, 0);
- if ((flags & PCI_BASE_ADDRESS_SPACE) !=
- PCI_BASE_ADDRESS_SPACE_MEMORY ||
- (flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK) !=
- PCI_BASE_ADDRESS_MEM_TYPE_32)
- return;
-
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
- if (!pos)
- return;
-
- pci_read_config_dword(dev, pos + PCI_SRIOV_BAR, &bar);
- if (bar & PCI_BASE_ADDRESS_MEM_MASK)
- return;
-
- start = pci_resource_start(dev, 1);
- size = pci_resource_len(dev, 1);
- if (!start || size != 0x400000 || start & (size - 1))
- return;
-
- pci_resource_flags(dev, 1) = 0;
- pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, 0);
- pci_write_config_dword(dev, pos + PCI_SRIOV_BAR, start);
- pci_write_config_dword(dev, pos + PCI_SRIOV_BAR + 12, start + size / 2);
-
- dev_info(&dev->dev, "use Flash Memory Space for SR-IOV BARs\n");
-}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10c9, quirk_i82576_sriov);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e6, quirk_i82576_sriov);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e7, quirk_i82576_sriov);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150d, quirk_i82576_sriov);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1518, quirk_i82576_sriov);
-
-#endif /* CONFIG_PCI_IOV */
-
/* Allow manual resource allocation for PCI hotplug bridges
* via pci=hpmemsize=nnM and pci=hpiosize=nnM parameters. For
* some PCI-PCI hotplug bridges, like PLX 6254 (former HINT HB6),
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 3a5a6fcc0ead..492b7d807fe8 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -243,7 +243,7 @@ struct pci_ops pcifront_bus_ops = {
#ifdef CONFIG_PCI_MSI
static int pci_frontend_enable_msix(struct pci_dev *dev,
- int **vector, int nvec)
+ int vector[], int nvec)
{
int err;
int i;
@@ -277,18 +277,24 @@ static int pci_frontend_enable_msix(struct pci_dev *dev,
if (likely(!err)) {
if (likely(!op.value)) {
/* we get the result */
- for (i = 0; i < nvec; i++)
- *(*vector+i) = op.msix_entries[i].vector;
- return 0;
+ for (i = 0; i < nvec; i++) {
+ if (op.msix_entries[i].vector <= 0) {
+ dev_warn(&dev->dev, "MSI-X entry %d is invalid: %d!\n",
+ i, op.msix_entries[i].vector);
+ err = -EINVAL;
+ vector[i] = -1;
+ continue;
+ }
+ vector[i] = op.msix_entries[i].vector;
+ }
} else {
printk(KERN_DEBUG "enable msix get value %x\n",
op.value);
- return op.value;
}
} else {
dev_err(&dev->dev, "enable msix get err %x\n", err);
- return err;
}
+ return err;
}
static void pci_frontend_disable_msix(struct pci_dev *dev)
@@ -310,7 +316,7 @@ static void pci_frontend_disable_msix(struct pci_dev *dev)
dev_err(&dev->dev, "pci_disable_msix get err %x\n", err);
}
-static int pci_frontend_enable_msi(struct pci_dev *dev, int **vector)
+static int pci_frontend_enable_msi(struct pci_dev *dev, int vector[])
{
int err;
struct xen_pci_op op = {
@@ -324,7 +330,13 @@ static int pci_frontend_enable_msi(struct pci_dev *dev, int **vector)
err = do_pci_op(pdev, &op);
if (likely(!err)) {
- *(*vector) = op.value;
+ vector[0] = op.value;
+ if (op.value <= 0) {
+ dev_warn(&dev->dev, "MSI entry is invalid: %d!\n",
+ op.value);
+ err = -EINVAL;
+ vector[0] = -1;
+ }
} else {
dev_err(&dev->dev, "pci frontend enable msi failed for dev "
"%x:%x\n", op.bus, op.devfn);
@@ -733,8 +745,7 @@ static void free_pdev(struct pcifront_device *pdev)
pcifront_free_roots(pdev);
- /*For PCIE_AER error handling job*/
- flush_scheduled_work();
+ cancel_work_sync(&pdev->op_work);
if (pdev->irq >= 0)
unbind_from_irqhandler(pdev->irq, pdev);
diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c
index 546d3024b6f0..6defd4a8168e 100644
--- a/drivers/pcmcia/electra_cf.c
+++ b/drivers/pcmcia/electra_cf.c
@@ -181,8 +181,7 @@ static struct pccard_operations electra_cf_ops = {
.set_mem_map = electra_cf_set_mem_map,
};
-static int __devinit electra_cf_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit electra_cf_probe(struct platform_device *ofdev)
{
struct device *device = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
@@ -356,7 +355,7 @@ static const struct of_device_id electra_cf_match[] = {
};
MODULE_DEVICE_TABLE(of, electra_cf_match);
-static struct of_platform_driver electra_cf_driver = {
+static struct platform_driver electra_cf_driver = {
.driver = {
.name = (char *)driver_name,
.owner = THIS_MODULE,
@@ -368,13 +367,13 @@ static struct of_platform_driver electra_cf_driver = {
static int __init electra_cf_init(void)
{
- return of_register_platform_driver(&electra_cf_driver);
+ return platform_driver_register(&electra_cf_driver);
}
module_init(electra_cf_init);
static void __exit electra_cf_exit(void)
{
- of_unregister_platform_driver(&electra_cf_driver);
+ platform_driver_unregister(&electra_cf_driver);
}
module_exit(electra_cf_exit);
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index 0db482771fb5..271a590a5f3c 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -1148,8 +1148,7 @@ static struct pccard_operations m8xx_services = {
.set_mem_map = m8xx_set_mem_map,
};
-static int __init m8xx_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __init m8xx_probe(struct platform_device *ofdev)
{
struct pcmcia_win *w;
unsigned int i, m, hwirq;
@@ -1295,7 +1294,7 @@ static const struct of_device_id m8xx_pcmcia_match[] = {
MODULE_DEVICE_TABLE(of, m8xx_pcmcia_match);
-static struct of_platform_driver m8xx_pcmcia_driver = {
+static struct platform_driver m8xx_pcmcia_driver = {
.driver = {
.name = driver_name,
.owner = THIS_MODULE,
@@ -1307,12 +1306,12 @@ static struct of_platform_driver m8xx_pcmcia_driver = {
static int __init m8xx_init(void)
{
- return of_register_platform_driver(&m8xx_pcmcia_driver);
+ return platform_driver_register(&m8xx_pcmcia_driver);
}
static void __exit m8xx_exit(void)
{
- of_unregister_platform_driver(&m8xx_pcmcia_driver);
+ platform_driver_unregister(&m8xx_pcmcia_driver);
}
module_init(m8xx_init);
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index 0bdda5b3ed55..42fbf1a75576 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -518,6 +518,8 @@ int pcmcia_enable_device(struct pcmcia_device *p_dev)
flags |= CONF_ENABLE_IOCARD;
if (flags & CONF_ENABLE_IOCARD)
s->socket.flags |= SS_IOCARD;
+ if (flags & CONF_ENABLE_ZVCARD)
+ s->socket.flags |= SS_ZVCARD | SS_IOCARD;
if (flags & CONF_ENABLE_SPKR) {
s->socket.flags |= SS_SPKR_ENA;
status = CCSR_AUDIO_ENA;
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index 3755e7c8c715..2c540542b5af 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -215,7 +215,7 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
}
#endif
-static void pxa2xx_configure_sockets(struct device *dev)
+void pxa2xx_configure_sockets(struct device *dev)
{
struct pcmcia_low_level *ops = dev->platform_data;
/*
diff --git a/drivers/pcmcia/pxa2xx_base.h b/drivers/pcmcia/pxa2xx_base.h
index bb62ea87b8f9..b609b45469ed 100644
--- a/drivers/pcmcia/pxa2xx_base.h
+++ b/drivers/pcmcia/pxa2xx_base.h
@@ -1,3 +1,4 @@
int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt);
void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops);
+void pxa2xx_configure_sockets(struct device *dev);
diff --git a/drivers/pcmcia/pxa2xx_lubbock.c b/drivers/pcmcia/pxa2xx_lubbock.c
index b9f8c8fb42bd..25afe637c657 100644
--- a/drivers/pcmcia/pxa2xx_lubbock.c
+++ b/drivers/pcmcia/pxa2xx_lubbock.c
@@ -226,6 +226,7 @@ int pcmcia_lubbock_init(struct sa1111_dev *sadev)
lubbock_set_misc_wr((1 << 15) | (1 << 14), 0);
pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops);
+ pxa2xx_configure_sockets(&sadev->dev);
ret = sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops,
pxa2xx_drv_pcmcia_add_one);
}
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 906e1fb421e7..b039d6a93f17 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -151,6 +151,24 @@ config TC1100_WMI
This is a driver for the WMI extensions (wireless and bluetooth power
control) of the HP Compaq TC1100 tablet.
+config HP_ACCEL
+ tristate "HP laptop accelerometer"
+ depends on INPUT && ACPI
+ select SENSORS_LIS3LV02D
+ select NEW_LEDS
+ select LEDS_CLASS
+ help
+ This driver provides support for the "Mobile Data Protection System 3D"
+ or "3D DriveGuard" feature of HP laptops. On such systems the driver
+ should load automatically (via ACPI alias).
+
+ Support for a led indicating disk protection will be provided as
+ hp::hddprotect. For more information on the feature, refer to
+ Documentation/hwmon/lis3lv02d.
+
+ To compile this driver as a module, choose M here: the module will
+ be called hp_accel.
+
config HP_WMI
tristate "HP WMI extras"
depends on ACPI_WMI
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 951b5163c7e1..a9877711b3f1 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_DELL_WMI) += dell-wmi.o
obj-$(CONFIG_DELL_WMI_AIO) += dell-wmi-aio.o
obj-$(CONFIG_ACER_WMI) += acer-wmi.o
obj-$(CONFIG_ACERHDF) += acerhdf.o
+obj-$(CONFIG_HP_ACCEL) += hp_accel.o
obj-$(CONFIG_HP_WMI) += hp-wmi.o
obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o
obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 38b34a73866a..ad3d099bf5c1 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -39,7 +39,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
-#include <linux/dmi.h>
#include <acpi/acpi_drivers.h>
diff --git a/drivers/hwmon/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 3d21fa2b97cd..16b233fac8f7 100644
--- a/drivers/hwmon/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -35,11 +35,11 @@
#include <linux/freezer.h>
#include <linux/uaccess.h>
#include <linux/leds.h>
+#include <linux/atomic.h>
#include <acpi/acpi_drivers.h>
-#include <asm/atomic.h>
-#include "lis3lv02d.h"
+#include "../../misc/lis3lv02d/lis3lv02d.h"
-#define DRIVER_NAME "lis3lv02d"
+#define DRIVER_NAME "hp_accel"
#define ACPI_MDPS_CLASS "accelerometer"
/* Delayed LEDs infrastructure ------------------------------------ */
diff --git a/drivers/platform/x86/xo15-ebook.c b/drivers/platform/x86/xo15-ebook.c
index 2343bb36e563..c1372ed9d2e9 100644
--- a/drivers/platform/x86/xo15-ebook.c
+++ b/drivers/platform/x86/xo15-ebook.c
@@ -133,7 +133,6 @@ static int ebook_switch_add(struct acpi_device *device)
/* Button's GPE is run-wake GPE */
acpi_enable_gpe(device->wakeup.gpe_device,
device->wakeup.gpe_number);
- device->wakeup.run_wake_count++;
device_set_wakeup_enable(&device->dev, true);
}
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 61bf5d724139..52a462fc6b84 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -117,10 +117,24 @@ config BATTERY_BQ20Z75
config BATTERY_BQ27x00
tristate "BQ27x00 battery driver"
+ help
+ Say Y here to enable support for batteries with BQ27x00 (I2C/HDQ) chips.
+
+config BATTERY_BQ27X00_I2C
+ bool "BQ27200/BQ27500 support"
+ depends on BATTERY_BQ27x00
depends on I2C
+ default y
help
Say Y here to enable support for batteries with BQ27x00 (I2C) chips.
+config BATTERY_BQ27X00_PLATFORM
+ bool "BQ27000 support"
+ depends on BATTERY_BQ27x00
+ default y
+ help
+ Say Y here to enable support for batteries with BQ27000 (HDQ) chips.
+
config BATTERY_DA9030
tristate "DA9030 battery driver"
depends on PMIC_DA903X
diff --git a/drivers/power/bq20z75.c b/drivers/power/bq20z75.c
index 492da27e1a47..8018e8cc7a74 100644
--- a/drivers/power/bq20z75.c
+++ b/drivers/power/bq20z75.c
@@ -25,6 +25,10 @@
#include <linux/power_supply.h>
#include <linux/i2c.h>
#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+
+#include <linux/power/bq20z75.h>
enum {
REG_MANUFACTURER_DATA,
@@ -38,11 +42,22 @@ enum {
REG_CYCLE_COUNT,
REG_SERIAL_NUMBER,
REG_REMAINING_CAPACITY,
+ REG_REMAINING_CAPACITY_CHARGE,
REG_FULL_CHARGE_CAPACITY,
+ REG_FULL_CHARGE_CAPACITY_CHARGE,
REG_DESIGN_CAPACITY,
+ REG_DESIGN_CAPACITY_CHARGE,
REG_DESIGN_VOLTAGE,
};
+/* Battery Mode defines */
+#define BATTERY_MODE_OFFSET 0x03
+#define BATTERY_MODE_MASK 0x8000
+enum bq20z75_battery_mode {
+ BATTERY_MODE_AMPS,
+ BATTERY_MODE_WATTS
+};
+
/* manufacturer access defines */
#define MANUFACTURER_ACCESS_STATUS 0x0006
#define MANUFACTURER_ACCESS_SLEEP 0x0011
@@ -78,8 +93,12 @@ static const struct bq20z75_device_data {
BQ20Z75_DATA(POWER_SUPPLY_PROP_CAPACITY, 0x0E, 0, 100),
[REG_REMAINING_CAPACITY] =
BQ20Z75_DATA(POWER_SUPPLY_PROP_ENERGY_NOW, 0x0F, 0, 65535),
+ [REG_REMAINING_CAPACITY_CHARGE] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_CHARGE_NOW, 0x0F, 0, 65535),
[REG_FULL_CHARGE_CAPACITY] =
BQ20Z75_DATA(POWER_SUPPLY_PROP_ENERGY_FULL, 0x10, 0, 65535),
+ [REG_FULL_CHARGE_CAPACITY_CHARGE] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_CHARGE_FULL, 0x10, 0, 65535),
[REG_TIME_TO_EMPTY] =
BQ20Z75_DATA(POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, 0x12, 0,
65535),
@@ -93,6 +112,9 @@ static const struct bq20z75_device_data {
[REG_DESIGN_CAPACITY] =
BQ20Z75_DATA(POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, 0x18, 0,
65535),
+ [REG_DESIGN_CAPACITY_CHARGE] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, 0x18, 0,
+ 65535),
[REG_DESIGN_VOLTAGE] =
BQ20Z75_DATA(POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, 0x19, 0,
65535),
@@ -117,39 +139,72 @@ static enum power_supply_property bq20z75_properties[] = {
POWER_SUPPLY_PROP_ENERGY_NOW,
POWER_SUPPLY_PROP_ENERGY_FULL,
POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
};
struct bq20z75_info {
- struct i2c_client *client;
- struct power_supply power_supply;
+ struct i2c_client *client;
+ struct power_supply power_supply;
+ struct bq20z75_platform_data *pdata;
+ bool is_present;
+ bool gpio_detect;
+ bool enable_detection;
+ int irq;
};
static int bq20z75_read_word_data(struct i2c_client *client, u8 address)
{
- s32 ret;
+ struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client);
+ s32 ret = 0;
+ int retries = 1;
+
+ if (bq20z75_device->pdata)
+ retries = max(bq20z75_device->pdata->i2c_retry_count + 1, 1);
+
+ while (retries > 0) {
+ ret = i2c_smbus_read_word_data(client, address);
+ if (ret >= 0)
+ break;
+ retries--;
+ }
- ret = i2c_smbus_read_word_data(client, address);
if (ret < 0) {
- dev_err(&client->dev,
+ dev_warn(&client->dev,
"%s: i2c read at address 0x%x failed\n",
__func__, address);
return ret;
}
+
return le16_to_cpu(ret);
}
static int bq20z75_write_word_data(struct i2c_client *client, u8 address,
u16 value)
{
- s32 ret;
+ struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client);
+ s32 ret = 0;
+ int retries = 1;
+
+ if (bq20z75_device->pdata)
+ retries = max(bq20z75_device->pdata->i2c_retry_count + 1, 1);
+
+ while (retries > 0) {
+ ret = i2c_smbus_write_word_data(client, address,
+ le16_to_cpu(value));
+ if (ret >= 0)
+ break;
+ retries--;
+ }
- ret = i2c_smbus_write_word_data(client, address, le16_to_cpu(value));
if (ret < 0) {
- dev_err(&client->dev,
+ dev_warn(&client->dev,
"%s: i2c write to address 0x%x failed\n",
__func__, address);
return ret;
}
+
return 0;
}
@@ -158,6 +213,18 @@ static int bq20z75_get_battery_presence_and_health(
union power_supply_propval *val)
{
s32 ret;
+ struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client);
+
+ if (psp == POWER_SUPPLY_PROP_PRESENT &&
+ bq20z75_device->gpio_detect) {
+ ret = gpio_get_value(
+ bq20z75_device->pdata->battery_detect);
+ if (ret == bq20z75_device->pdata->battery_detect_present)
+ val->intval = 1;
+ else
+ val->intval = 0;
+ return ret;
+ }
/* Write to ManufacturerAccess with
* ManufacturerAccess command and then
@@ -171,8 +238,11 @@ static int bq20z75_get_battery_presence_and_health(
ret = bq20z75_read_word_data(client,
bq20z75_data[REG_MANUFACTURER_DATA].addr);
- if (ret < 0)
+ if (ret < 0) {
+ if (psp == POWER_SUPPLY_PROP_PRESENT)
+ val->intval = 0; /* battery removed */
return ret;
+ }
if (ret < bq20z75_data[REG_MANUFACTURER_DATA].min_value ||
ret > bq20z75_data[REG_MANUFACTURER_DATA].max_value) {
@@ -260,6 +330,9 @@ static void bq20z75_unit_adjustment(struct i2c_client *client,
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
case POWER_SUPPLY_PROP_CURRENT_NOW:
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
val->intval *= BASE_UNIT_CONVERSION;
break;
@@ -281,11 +354,44 @@ static void bq20z75_unit_adjustment(struct i2c_client *client,
}
}
+static enum bq20z75_battery_mode
+bq20z75_set_battery_mode(struct i2c_client *client,
+ enum bq20z75_battery_mode mode)
+{
+ int ret, original_val;
+
+ original_val = bq20z75_read_word_data(client, BATTERY_MODE_OFFSET);
+ if (original_val < 0)
+ return original_val;
+
+ if ((original_val & BATTERY_MODE_MASK) == mode)
+ return mode;
+
+ if (mode == BATTERY_MODE_AMPS)
+ ret = original_val & ~BATTERY_MODE_MASK;
+ else
+ ret = original_val | BATTERY_MODE_MASK;
+
+ ret = bq20z75_write_word_data(client, BATTERY_MODE_OFFSET, ret);
+ if (ret < 0)
+ return ret;
+
+ return original_val & BATTERY_MODE_MASK;
+}
+
static int bq20z75_get_battery_capacity(struct i2c_client *client,
int reg_offset, enum power_supply_property psp,
union power_supply_propval *val)
{
s32 ret;
+ enum bq20z75_battery_mode mode = BATTERY_MODE_WATTS;
+
+ if (power_supply_is_amp_property(psp))
+ mode = BATTERY_MODE_AMPS;
+
+ mode = bq20z75_set_battery_mode(client, mode);
+ if (mode < 0)
+ return mode;
ret = bq20z75_read_word_data(client, bq20z75_data[reg_offset].addr);
if (ret < 0)
@@ -298,6 +404,10 @@ static int bq20z75_get_battery_capacity(struct i2c_client *client,
} else
val->intval = ret;
+ ret = bq20z75_set_battery_mode(client, mode);
+ if (ret < 0)
+ return ret;
+
return 0;
}
@@ -318,12 +428,25 @@ static int bq20z75_get_battery_serial_number(struct i2c_client *client,
return 0;
}
+static int bq20z75_get_property_index(struct i2c_client *client,
+ enum power_supply_property psp)
+{
+ int count;
+ for (count = 0; count < ARRAY_SIZE(bq20z75_data); count++)
+ if (psp == bq20z75_data[count].psp)
+ return count;
+
+ dev_warn(&client->dev,
+ "%s: Invalid Property - %d\n", __func__, psp);
+
+ return -EINVAL;
+}
+
static int bq20z75_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
- int count;
- int ret;
+ int ret = 0;
struct bq20z75_info *bq20z75_device = container_of(psy,
struct bq20z75_info, power_supply);
struct i2c_client *client = bq20z75_device->client;
@@ -332,8 +455,6 @@ static int bq20z75_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_PRESENT:
case POWER_SUPPLY_PROP_HEALTH:
ret = bq20z75_get_battery_presence_and_health(client, psp, val);
- if (ret)
- return ret;
break;
case POWER_SUPPLY_PROP_TECHNOLOGY:
@@ -343,22 +464,19 @@ static int bq20z75_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_ENERGY_NOW:
case POWER_SUPPLY_PROP_ENERGY_FULL:
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
case POWER_SUPPLY_PROP_CAPACITY:
- for (count = 0; count < ARRAY_SIZE(bq20z75_data); count++) {
- if (psp == bq20z75_data[count].psp)
- break;
- }
-
- ret = bq20z75_get_battery_capacity(client, count, psp, val);
- if (ret)
- return ret;
+ ret = bq20z75_get_property_index(client, psp);
+ if (ret < 0)
+ break;
+ ret = bq20z75_get_battery_capacity(client, ret, psp, val);
break;
case POWER_SUPPLY_PROP_SERIAL_NUMBER:
ret = bq20z75_get_battery_serial_number(client, val);
- if (ret)
- return ret;
break;
case POWER_SUPPLY_PROP_STATUS:
@@ -369,15 +487,11 @@ static int bq20z75_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
- for (count = 0; count < ARRAY_SIZE(bq20z75_data); count++) {
- if (psp == bq20z75_data[count].psp)
- break;
- }
-
- ret = bq20z75_get_battery_property(client, count, psp, val);
- if (ret)
- return ret;
+ ret = bq20z75_get_property_index(client, psp);
+ if (ret < 0)
+ break;
+ ret = bq20z75_get_battery_property(client, ret, psp, val);
break;
default:
@@ -386,26 +500,51 @@ static int bq20z75_get_property(struct power_supply *psy,
return -EINVAL;
}
- /* Convert units to match requirements for power supply class */
- bq20z75_unit_adjustment(client, psp, val);
+ if (!bq20z75_device->enable_detection)
+ goto done;
+
+ if (!bq20z75_device->gpio_detect &&
+ bq20z75_device->is_present != (ret >= 0)) {
+ bq20z75_device->is_present = (ret >= 0);
+ power_supply_changed(&bq20z75_device->power_supply);
+ }
+
+done:
+ if (!ret) {
+ /* Convert units to match requirements for power supply class */
+ bq20z75_unit_adjustment(client, psp, val);
+ }
dev_dbg(&client->dev,
"%s: property = %d, value = %d\n", __func__, psp, val->intval);
- return 0;
+ return ret;
+}
+
+static irqreturn_t bq20z75_irq(int irq, void *devid)
+{
+ struct power_supply *battery = devid;
+
+ power_supply_changed(battery);
+
+ return IRQ_HANDLED;
}
-static int bq20z75_probe(struct i2c_client *client,
+static int __devinit bq20z75_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct bq20z75_info *bq20z75_device;
+ struct bq20z75_platform_data *pdata = client->dev.platform_data;
int rc;
+ int irq;
bq20z75_device = kzalloc(sizeof(struct bq20z75_info), GFP_KERNEL);
if (!bq20z75_device)
return -ENOMEM;
bq20z75_device->client = client;
+ bq20z75_device->enable_detection = false;
+ bq20z75_device->gpio_detect = false;
bq20z75_device->power_supply.name = "battery";
bq20z75_device->power_supply.type = POWER_SUPPLY_TYPE_BATTERY;
bq20z75_device->power_supply.properties = bq20z75_properties;
@@ -413,26 +552,86 @@ static int bq20z75_probe(struct i2c_client *client,
ARRAY_SIZE(bq20z75_properties);
bq20z75_device->power_supply.get_property = bq20z75_get_property;
+ if (pdata) {
+ bq20z75_device->gpio_detect =
+ gpio_is_valid(pdata->battery_detect);
+ bq20z75_device->pdata = pdata;
+ }
+
i2c_set_clientdata(client, bq20z75_device);
+ if (!bq20z75_device->gpio_detect)
+ goto skip_gpio;
+
+ rc = gpio_request(pdata->battery_detect, dev_name(&client->dev));
+ if (rc) {
+ dev_warn(&client->dev, "Failed to request gpio: %d\n", rc);
+ bq20z75_device->gpio_detect = false;
+ goto skip_gpio;
+ }
+
+ rc = gpio_direction_input(pdata->battery_detect);
+ if (rc) {
+ dev_warn(&client->dev, "Failed to get gpio as input: %d\n", rc);
+ gpio_free(pdata->battery_detect);
+ bq20z75_device->gpio_detect = false;
+ goto skip_gpio;
+ }
+
+ irq = gpio_to_irq(pdata->battery_detect);
+ if (irq <= 0) {
+ dev_warn(&client->dev, "Failed to get gpio as irq: %d\n", irq);
+ gpio_free(pdata->battery_detect);
+ bq20z75_device->gpio_detect = false;
+ goto skip_gpio;
+ }
+
+ rc = request_irq(irq, bq20z75_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ dev_name(&client->dev), &bq20z75_device->power_supply);
+ if (rc) {
+ dev_warn(&client->dev, "Failed to request irq: %d\n", rc);
+ gpio_free(pdata->battery_detect);
+ bq20z75_device->gpio_detect = false;
+ goto skip_gpio;
+ }
+
+ bq20z75_device->irq = irq;
+
+skip_gpio:
+
rc = power_supply_register(&client->dev, &bq20z75_device->power_supply);
if (rc) {
dev_err(&client->dev,
"%s: Failed to register power supply\n", __func__);
- kfree(bq20z75_device);
- return rc;
+ goto exit_psupply;
}
dev_info(&client->dev,
"%s: battery gas gauge device registered\n", client->name);
return 0;
+
+exit_psupply:
+ if (bq20z75_device->irq)
+ free_irq(bq20z75_device->irq, &bq20z75_device->power_supply);
+ if (bq20z75_device->gpio_detect)
+ gpio_free(pdata->battery_detect);
+
+ kfree(bq20z75_device);
+
+ return rc;
}
-static int bq20z75_remove(struct i2c_client *client)
+static int __devexit bq20z75_remove(struct i2c_client *client)
{
struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client);
+ if (bq20z75_device->irq)
+ free_irq(bq20z75_device->irq, &bq20z75_device->power_supply);
+ if (bq20z75_device->gpio_detect)
+ gpio_free(bq20z75_device->pdata->battery_detect);
+
power_supply_unregister(&bq20z75_device->power_supply);
kfree(bq20z75_device);
bq20z75_device = NULL;
@@ -465,10 +664,11 @@ static const struct i2c_device_id bq20z75_id[] = {
{ "bq20z75", 0 },
{}
};
+MODULE_DEVICE_TABLE(i2c, bq20z75_id);
static struct i2c_driver bq20z75_battery_driver = {
.probe = bq20z75_probe,
- .remove = bq20z75_remove,
+ .remove = __devexit_p(bq20z75_remove),
.suspend = bq20z75_suspend,
.resume = bq20z75_resume,
.id_table = bq20z75_id,
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
index eff0273d4030..59e68dbd028b 100644
--- a/drivers/power/bq27x00_battery.c
+++ b/drivers/power/bq27x00_battery.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2008 Rodolfo Giometti <giometti@linux.it>
* Copyright (C) 2008 Eurotech S.p.A. <info@eurotech.it>
+ * Copyright (C) 2010-2011 Lars-Peter Clausen <lars@metafoo.de>
*
* Based on a previous work by Copyright (C) 2008 Texas Instruments, Inc.
*
@@ -15,6 +16,13 @@
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*
*/
+
+/*
+ * Datasheets:
+ * http://focus.ti.com/docs/prod/folders/print/bq27000.html
+ * http://focus.ti.com/docs/prod/folders/print/bq27500.html
+ */
+
#include <linux/module.h>
#include <linux/param.h>
#include <linux/jiffies.h>
@@ -27,7 +35,9 @@
#include <linux/slab.h>
#include <asm/unaligned.h>
-#define DRIVER_VERSION "1.1.0"
+#include <linux/power/bq27x00_battery.h>
+
+#define DRIVER_VERSION "1.2.0"
#define BQ27x00_REG_TEMP 0x06
#define BQ27x00_REG_VOLT 0x08
@@ -36,36 +46,59 @@
#define BQ27x00_REG_TTE 0x16
#define BQ27x00_REG_TTF 0x18
#define BQ27x00_REG_TTECP 0x26
+#define BQ27x00_REG_NAC 0x0C /* Nominal available capaciy */
+#define BQ27x00_REG_LMD 0x12 /* Last measured discharge */
+#define BQ27x00_REG_CYCT 0x2A /* Cycle count total */
+#define BQ27x00_REG_AE 0x22 /* Available enery */
#define BQ27000_REG_RSOC 0x0B /* Relative State-of-Charge */
+#define BQ27000_REG_ILMD 0x76 /* Initial last measured discharge */
#define BQ27000_FLAG_CHGS BIT(7)
+#define BQ27000_FLAG_FC BIT(5)
-#define BQ27500_REG_SOC 0x2c
+#define BQ27500_REG_SOC 0x2C
+#define BQ27500_REG_DCAP 0x3C /* Design capacity */
#define BQ27500_FLAG_DSC BIT(0)
#define BQ27500_FLAG_FC BIT(9)
-/* If the system has several batteries we need a different name for each
- * of them...
- */
-static DEFINE_IDR(battery_id);
-static DEFINE_MUTEX(battery_mutex);
+#define BQ27000_RS 20 /* Resistor sense */
struct bq27x00_device_info;
struct bq27x00_access_methods {
- int (*read)(u8 reg, int *rt_value, int b_single,
- struct bq27x00_device_info *di);
+ int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
};
enum bq27x00_chip { BQ27000, BQ27500 };
+struct bq27x00_reg_cache {
+ int temperature;
+ int time_to_empty;
+ int time_to_empty_avg;
+ int time_to_full;
+ int charge_full;
+ int charge_counter;
+ int capacity;
+ int flags;
+
+ int current_now;
+};
+
struct bq27x00_device_info {
struct device *dev;
int id;
- struct bq27x00_access_methods *bus;
- struct power_supply bat;
enum bq27x00_chip chip;
- struct i2c_client *client;
+ struct bq27x00_reg_cache cache;
+ int charge_design_full;
+
+ unsigned long last_update;
+ struct delayed_work work;
+
+ struct power_supply bat;
+
+ struct bq27x00_access_methods bus;
+
+ struct mutex lock;
};
static enum power_supply_property bq27x00_battery_props[] = {
@@ -78,164 +111,328 @@ static enum power_supply_property bq27x00_battery_props[] = {
POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_COUNTER,
+ POWER_SUPPLY_PROP_ENERGY_NOW,
};
+static unsigned int poll_interval = 360;
+module_param(poll_interval, uint, 0644);
+MODULE_PARM_DESC(poll_interval, "battery poll interval in seconds - " \
+ "0 disables polling");
+
/*
* Common code for BQ27x00 devices
*/
-static int bq27x00_read(u8 reg, int *rt_value, int b_single,
- struct bq27x00_device_info *di)
+static inline int bq27x00_read(struct bq27x00_device_info *di, u8 reg,
+ bool single)
{
- return di->bus->read(reg, rt_value, b_single, di);
+ return di->bus.read(di, reg, single);
}
/*
- * Return the battery temperature in tenths of degree Celsius
+ * Return the battery Relative State-of-Charge
* Or < 0 if something fails.
*/
-static int bq27x00_battery_temperature(struct bq27x00_device_info *di)
+static int bq27x00_battery_read_rsoc(struct bq27x00_device_info *di)
{
- int ret;
- int temp = 0;
+ int rsoc;
- ret = bq27x00_read(BQ27x00_REG_TEMP, &temp, 0, di);
- if (ret) {
- dev_err(di->dev, "error reading temperature\n");
- return ret;
+ if (di->chip == BQ27500)
+ rsoc = bq27x00_read(di, BQ27500_REG_SOC, false);
+ else
+ rsoc = bq27x00_read(di, BQ27000_REG_RSOC, true);
+
+ if (rsoc < 0)
+ dev_err(di->dev, "error reading relative State-of-Charge\n");
+
+ return rsoc;
+}
+
+/*
+ * Return a battery charge value in µAh
+ * Or < 0 if something fails.
+ */
+static int bq27x00_battery_read_charge(struct bq27x00_device_info *di, u8 reg)
+{
+ int charge;
+
+ charge = bq27x00_read(di, reg, false);
+ if (charge < 0) {
+ dev_err(di->dev, "error reading nominal available capacity\n");
+ return charge;
}
if (di->chip == BQ27500)
- return temp - 2731;
+ charge *= 1000;
else
- return ((temp >> 2) - 273) * 10;
+ charge = charge * 3570 / BQ27000_RS;
+
+ return charge;
}
/*
- * Return the battery Voltage in milivolts
+ * Return the battery Nominal available capaciy in µAh
* Or < 0 if something fails.
*/
-static int bq27x00_battery_voltage(struct bq27x00_device_info *di)
+static inline int bq27x00_battery_read_nac(struct bq27x00_device_info *di)
{
- int ret;
- int volt = 0;
+ return bq27x00_battery_read_charge(di, BQ27x00_REG_NAC);
+}
- ret = bq27x00_read(BQ27x00_REG_VOLT, &volt, 0, di);
- if (ret) {
- dev_err(di->dev, "error reading voltage\n");
- return ret;
+/*
+ * Return the battery Last measured discharge in µAh
+ * Or < 0 if something fails.
+ */
+static inline int bq27x00_battery_read_lmd(struct bq27x00_device_info *di)
+{
+ return bq27x00_battery_read_charge(di, BQ27x00_REG_LMD);
+}
+
+/*
+ * Return the battery Initial last measured discharge in µAh
+ * Or < 0 if something fails.
+ */
+static int bq27x00_battery_read_ilmd(struct bq27x00_device_info *di)
+{
+ int ilmd;
+
+ if (di->chip == BQ27500)
+ ilmd = bq27x00_read(di, BQ27500_REG_DCAP, false);
+ else
+ ilmd = bq27x00_read(di, BQ27000_REG_ILMD, true);
+
+ if (ilmd < 0) {
+ dev_err(di->dev, "error reading initial last measured discharge\n");
+ return ilmd;
}
- return volt * 1000;
+ if (di->chip == BQ27500)
+ ilmd *= 1000;
+ else
+ ilmd = ilmd * 256 * 3570 / BQ27000_RS;
+
+ return ilmd;
}
/*
- * Return the battery average current
- * Note that current can be negative signed as well
- * Or 0 if something fails.
+ * Return the battery Cycle count total
+ * Or < 0 if something fails.
*/
-static int bq27x00_battery_current(struct bq27x00_device_info *di)
+static int bq27x00_battery_read_cyct(struct bq27x00_device_info *di)
{
- int ret;
- int curr = 0;
- int flags = 0;
+ int cyct;
- ret = bq27x00_read(BQ27x00_REG_AI, &curr, 0, di);
- if (ret) {
- dev_err(di->dev, "error reading current\n");
- return 0;
+ cyct = bq27x00_read(di, BQ27x00_REG_CYCT, false);
+ if (cyct < 0)
+ dev_err(di->dev, "error reading cycle count total\n");
+
+ return cyct;
+}
+
+/*
+ * Read a time register.
+ * Return < 0 if something fails.
+ */
+static int bq27x00_battery_read_time(struct bq27x00_device_info *di, u8 reg)
+{
+ int tval;
+
+ tval = bq27x00_read(di, reg, false);
+ if (tval < 0) {
+ dev_err(di->dev, "error reading register %02x: %d\n", reg, tval);
+ return tval;
}
- if (di->chip == BQ27500) {
- /* bq27500 returns signed value */
- curr = (int)(s16)curr;
- } else {
- ret = bq27x00_read(BQ27x00_REG_FLAGS, &flags, 0, di);
- if (ret < 0) {
- dev_err(di->dev, "error reading flags\n");
- return 0;
- }
- if (flags & BQ27000_FLAG_CHGS) {
- dev_dbg(di->dev, "negative current!\n");
- curr = -curr;
- }
+ if (tval == 65535)
+ return -ENODATA;
+
+ return tval * 60;
+}
+
+static void bq27x00_update(struct bq27x00_device_info *di)
+{
+ struct bq27x00_reg_cache cache = {0, };
+ bool is_bq27500 = di->chip == BQ27500;
+
+ cache.flags = bq27x00_read(di, BQ27x00_REG_FLAGS, is_bq27500);
+ if (cache.flags >= 0) {
+ cache.capacity = bq27x00_battery_read_rsoc(di);
+ cache.temperature = bq27x00_read(di, BQ27x00_REG_TEMP, false);
+ cache.time_to_empty = bq27x00_battery_read_time(di, BQ27x00_REG_TTE);
+ cache.time_to_empty_avg = bq27x00_battery_read_time(di, BQ27x00_REG_TTECP);
+ cache.time_to_full = bq27x00_battery_read_time(di, BQ27x00_REG_TTF);
+ cache.charge_full = bq27x00_battery_read_lmd(di);
+ cache.charge_counter = bq27x00_battery_read_cyct(di);
+
+ if (!is_bq27500)
+ cache.current_now = bq27x00_read(di, BQ27x00_REG_AI, false);
+
+ /* We only have to read charge design full once */
+ if (di->charge_design_full <= 0)
+ di->charge_design_full = bq27x00_battery_read_ilmd(di);
+ }
+
+ /* Ignore current_now which is a snapshot of the current battery state
+ * and is likely to be different even between two consecutive reads */
+ if (memcmp(&di->cache, &cache, sizeof(cache) - sizeof(int)) != 0) {
+ di->cache = cache;
+ power_supply_changed(&di->bat);
}
- return curr * 1000;
+ di->last_update = jiffies;
+}
+
+static void bq27x00_battery_poll(struct work_struct *work)
+{
+ struct bq27x00_device_info *di =
+ container_of(work, struct bq27x00_device_info, work.work);
+
+ bq27x00_update(di);
+
+ if (poll_interval > 0) {
+ /* The timer does not have to be accurate. */
+ set_timer_slack(&di->work.timer, poll_interval * HZ / 4);
+ schedule_delayed_work(&di->work, poll_interval * HZ);
+ }
}
+
/*
- * Return the battery Relative State-of-Charge
+ * Return the battery temperature in tenths of degree Celsius
* Or < 0 if something fails.
*/
-static int bq27x00_battery_rsoc(struct bq27x00_device_info *di)
+static int bq27x00_battery_temperature(struct bq27x00_device_info *di,
+ union power_supply_propval *val)
{
- int ret;
- int rsoc = 0;
+ if (di->cache.temperature < 0)
+ return di->cache.temperature;
if (di->chip == BQ27500)
- ret = bq27x00_read(BQ27500_REG_SOC, &rsoc, 0, di);
+ val->intval = di->cache.temperature - 2731;
else
- ret = bq27x00_read(BQ27000_REG_RSOC, &rsoc, 1, di);
- if (ret) {
- dev_err(di->dev, "error reading relative State-of-Charge\n");
- return ret;
+ val->intval = ((di->cache.temperature * 5) - 5463) / 2;
+
+ return 0;
+}
+
+/*
+ * Return the battery average current in µA
+ * Note that current can be negative signed as well
+ * Or 0 if something fails.
+ */
+static int bq27x00_battery_current(struct bq27x00_device_info *di,
+ union power_supply_propval *val)
+{
+ int curr;
+
+ if (di->chip == BQ27500)
+ curr = bq27x00_read(di, BQ27x00_REG_AI, false);
+ else
+ curr = di->cache.current_now;
+
+ if (curr < 0)
+ return curr;
+
+ if (di->chip == BQ27500) {
+ /* bq27500 returns signed value */
+ val->intval = (int)((s16)curr) * 1000;
+ } else {
+ if (di->cache.flags & BQ27000_FLAG_CHGS) {
+ dev_dbg(di->dev, "negative current!\n");
+ curr = -curr;
+ }
+
+ val->intval = curr * 3570 / BQ27000_RS;
}
- return rsoc;
+ return 0;
}
static int bq27x00_battery_status(struct bq27x00_device_info *di,
- union power_supply_propval *val)
+ union power_supply_propval *val)
{
- int flags = 0;
int status;
- int ret;
-
- ret = bq27x00_read(BQ27x00_REG_FLAGS, &flags, 0, di);
- if (ret < 0) {
- dev_err(di->dev, "error reading flags\n");
- return ret;
- }
if (di->chip == BQ27500) {
- if (flags & BQ27500_FLAG_FC)
+ if (di->cache.flags & BQ27500_FLAG_FC)
status = POWER_SUPPLY_STATUS_FULL;
- else if (flags & BQ27500_FLAG_DSC)
+ else if (di->cache.flags & BQ27500_FLAG_DSC)
status = POWER_SUPPLY_STATUS_DISCHARGING;
else
status = POWER_SUPPLY_STATUS_CHARGING;
} else {
- if (flags & BQ27000_FLAG_CHGS)
+ if (di->cache.flags & BQ27000_FLAG_FC)
+ status = POWER_SUPPLY_STATUS_FULL;
+ else if (di->cache.flags & BQ27000_FLAG_CHGS)
status = POWER_SUPPLY_STATUS_CHARGING;
+ else if (power_supply_am_i_supplied(&di->bat))
+ status = POWER_SUPPLY_STATUS_NOT_CHARGING;
else
status = POWER_SUPPLY_STATUS_DISCHARGING;
}
val->intval = status;
+
return 0;
}
/*
- * Read a time register.
- * Return < 0 if something fails.
+ * Return the battery Voltage in milivolts
+ * Or < 0 if something fails.
*/
-static int bq27x00_battery_time(struct bq27x00_device_info *di, int reg,
- union power_supply_propval *val)
+static int bq27x00_battery_voltage(struct bq27x00_device_info *di,
+ union power_supply_propval *val)
{
- int tval = 0;
- int ret;
+ int volt;
- ret = bq27x00_read(reg, &tval, 0, di);
- if (ret) {
- dev_err(di->dev, "error reading register %02x\n", reg);
- return ret;
+ volt = bq27x00_read(di, BQ27x00_REG_VOLT, false);
+ if (volt < 0)
+ return volt;
+
+ val->intval = volt * 1000;
+
+ return 0;
+}
+
+/*
+ * Return the battery Available energy in µWh
+ * Or < 0 if something fails.
+ */
+static int bq27x00_battery_energy(struct bq27x00_device_info *di,
+ union power_supply_propval *val)
+{
+ int ae;
+
+ ae = bq27x00_read(di, BQ27x00_REG_AE, false);
+ if (ae < 0) {
+ dev_err(di->dev, "error reading available energy\n");
+ return ae;
}
- if (tval == 65535)
- return -ENODATA;
+ if (di->chip == BQ27500)
+ ae *= 1000;
+ else
+ ae = ae * 29200 / BQ27000_RS;
+
+ val->intval = ae;
+
+ return 0;
+}
+
+
+static int bq27x00_simple_value(int value,
+ union power_supply_propval *val)
+{
+ if (value < 0)
+ return value;
+
+ val->intval = value;
- val->intval = tval * 60;
return 0;
}
@@ -249,33 +446,61 @@ static int bq27x00_battery_get_property(struct power_supply *psy,
int ret = 0;
struct bq27x00_device_info *di = to_bq27x00_device_info(psy);
+ mutex_lock(&di->lock);
+ if (time_is_before_jiffies(di->last_update + 5 * HZ)) {
+ cancel_delayed_work_sync(&di->work);
+ bq27x00_battery_poll(&di->work.work);
+ }
+ mutex_unlock(&di->lock);
+
+ if (psp != POWER_SUPPLY_PROP_PRESENT && di->cache.flags < 0)
+ return -ENODEV;
+
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
ret = bq27x00_battery_status(di, val);
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = bq27x00_battery_voltage(di, val);
+ break;
case POWER_SUPPLY_PROP_PRESENT:
- val->intval = bq27x00_battery_voltage(di);
- if (psp == POWER_SUPPLY_PROP_PRESENT)
- val->intval = val->intval <= 0 ? 0 : 1;
+ val->intval = di->cache.flags < 0 ? 0 : 1;
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
- val->intval = bq27x00_battery_current(di);
+ ret = bq27x00_battery_current(di, val);
break;
case POWER_SUPPLY_PROP_CAPACITY:
- val->intval = bq27x00_battery_rsoc(di);
+ ret = bq27x00_simple_value(di->cache.capacity, val);
break;
case POWER_SUPPLY_PROP_TEMP:
- val->intval = bq27x00_battery_temperature(di);
+ ret = bq27x00_battery_temperature(di, val);
break;
case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
- ret = bq27x00_battery_time(di, BQ27x00_REG_TTE, val);
+ ret = bq27x00_simple_value(di->cache.time_to_empty, val);
break;
case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
- ret = bq27x00_battery_time(di, BQ27x00_REG_TTECP, val);
+ ret = bq27x00_simple_value(di->cache.time_to_empty_avg, val);
break;
case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW:
- ret = bq27x00_battery_time(di, BQ27x00_REG_TTF, val);
+ ret = bq27x00_simple_value(di->cache.time_to_full, val);
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ ret = bq27x00_simple_value(bq27x00_battery_read_nac(di), val);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ ret = bq27x00_simple_value(di->cache.charge_full, val);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ ret = bq27x00_simple_value(di->charge_design_full, val);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+ ret = bq27x00_simple_value(di->cache.charge_counter, val);
+ break;
+ case POWER_SUPPLY_PROP_ENERGY_NOW:
+ ret = bq27x00_battery_energy(di, val);
break;
default:
return -EINVAL;
@@ -284,56 +509,91 @@ static int bq27x00_battery_get_property(struct power_supply *psy,
return ret;
}
-static void bq27x00_powersupply_init(struct bq27x00_device_info *di)
+static void bq27x00_external_power_changed(struct power_supply *psy)
{
+ struct bq27x00_device_info *di = to_bq27x00_device_info(psy);
+
+ cancel_delayed_work_sync(&di->work);
+ schedule_delayed_work(&di->work, 0);
+}
+
+static int bq27x00_powersupply_init(struct bq27x00_device_info *di)
+{
+ int ret;
+
di->bat.type = POWER_SUPPLY_TYPE_BATTERY;
di->bat.properties = bq27x00_battery_props;
di->bat.num_properties = ARRAY_SIZE(bq27x00_battery_props);
di->bat.get_property = bq27x00_battery_get_property;
- di->bat.external_power_changed = NULL;
+ di->bat.external_power_changed = bq27x00_external_power_changed;
+
+ INIT_DELAYED_WORK(&di->work, bq27x00_battery_poll);
+ mutex_init(&di->lock);
+
+ ret = power_supply_register(di->dev, &di->bat);
+ if (ret) {
+ dev_err(di->dev, "failed to register battery: %d\n", ret);
+ return ret;
+ }
+
+ dev_info(di->dev, "support ver. %s enabled\n", DRIVER_VERSION);
+
+ bq27x00_update(di);
+
+ return 0;
}
-/*
- * i2c specific code
+static void bq27x00_powersupply_unregister(struct bq27x00_device_info *di)
+{
+ cancel_delayed_work_sync(&di->work);
+
+ power_supply_unregister(&di->bat);
+
+ mutex_destroy(&di->lock);
+}
+
+
+/* i2c specific code */
+#ifdef CONFIG_BATTERY_BQ27X00_I2C
+
+/* If the system has several batteries we need a different name for each
+ * of them...
*/
+static DEFINE_IDR(battery_id);
+static DEFINE_MUTEX(battery_mutex);
-static int bq27x00_read_i2c(u8 reg, int *rt_value, int b_single,
- struct bq27x00_device_info *di)
+static int bq27x00_read_i2c(struct bq27x00_device_info *di, u8 reg, bool single)
{
- struct i2c_client *client = di->client;
- struct i2c_msg msg[1];
+ struct i2c_client *client = to_i2c_client(di->dev);
+ struct i2c_msg msg[2];
unsigned char data[2];
- int err;
+ int ret;
if (!client->adapter)
return -ENODEV;
- msg->addr = client->addr;
- msg->flags = 0;
- msg->len = 1;
- msg->buf = data;
-
- data[0] = reg;
- err = i2c_transfer(client->adapter, msg, 1);
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].buf = &reg;
+ msg[0].len = sizeof(reg);
+ msg[1].addr = client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].buf = data;
+ if (single)
+ msg[1].len = 1;
+ else
+ msg[1].len = 2;
- if (err >= 0) {
- if (!b_single)
- msg->len = 2;
- else
- msg->len = 1;
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret < 0)
+ return ret;
- msg->flags = I2C_M_RD;
- err = i2c_transfer(client->adapter, msg, 1);
- if (err >= 0) {
- if (!b_single)
- *rt_value = get_unaligned_le16(data);
- else
- *rt_value = data[0];
+ if (!single)
+ ret = get_unaligned_le16(data);
+ else
+ ret = data[0];
- return 0;
- }
- }
- return err;
+ return ret;
}
static int bq27x00_battery_probe(struct i2c_client *client,
@@ -341,7 +601,6 @@ static int bq27x00_battery_probe(struct i2c_client *client,
{
char *name;
struct bq27x00_device_info *di;
- struct bq27x00_access_methods *bus;
int num;
int retval = 0;
@@ -368,38 +627,20 @@ static int bq27x00_battery_probe(struct i2c_client *client,
retval = -ENOMEM;
goto batt_failed_2;
}
+
di->id = num;
+ di->dev = &client->dev;
di->chip = id->driver_data;
+ di->bat.name = name;
+ di->bus.read = &bq27x00_read_i2c;
- bus = kzalloc(sizeof(*bus), GFP_KERNEL);
- if (!bus) {
- dev_err(&client->dev, "failed to allocate access method "
- "data\n");
- retval = -ENOMEM;
+ if (bq27x00_powersupply_init(di))
goto batt_failed_3;
- }
i2c_set_clientdata(client, di);
- di->dev = &client->dev;
- di->bat.name = name;
- bus->read = &bq27x00_read_i2c;
- di->bus = bus;
- di->client = client;
-
- bq27x00_powersupply_init(di);
-
- retval = power_supply_register(&client->dev, &di->bat);
- if (retval) {
- dev_err(&client->dev, "failed to register battery\n");
- goto batt_failed_4;
- }
-
- dev_info(&client->dev, "support ver. %s enabled\n", DRIVER_VERSION);
return 0;
-batt_failed_4:
- kfree(bus);
batt_failed_3:
kfree(di);
batt_failed_2:
@@ -416,9 +657,8 @@ static int bq27x00_battery_remove(struct i2c_client *client)
{
struct bq27x00_device_info *di = i2c_get_clientdata(client);
- power_supply_unregister(&di->bat);
+ bq27x00_powersupply_unregister(di);
- kfree(di->bus);
kfree(di->bat.name);
mutex_lock(&battery_mutex);
@@ -430,15 +670,12 @@ static int bq27x00_battery_remove(struct i2c_client *client)
return 0;
}
-/*
- * Module stuff
- */
-
static const struct i2c_device_id bq27x00_id[] = {
{ "bq27200", BQ27000 }, /* bq27200 is same as bq27000, but with i2c */
{ "bq27500", BQ27500 },
{},
};
+MODULE_DEVICE_TABLE(i2c, bq27x00_id);
static struct i2c_driver bq27x00_battery_driver = {
.driver = {
@@ -449,13 +686,164 @@ static struct i2c_driver bq27x00_battery_driver = {
.id_table = bq27x00_id,
};
+static inline int bq27x00_battery_i2c_init(void)
+{
+ int ret = i2c_add_driver(&bq27x00_battery_driver);
+ if (ret)
+ printk(KERN_ERR "Unable to register BQ27x00 i2c driver\n");
+
+ return ret;
+}
+
+static inline void bq27x00_battery_i2c_exit(void)
+{
+ i2c_del_driver(&bq27x00_battery_driver);
+}
+
+#else
+
+static inline int bq27x00_battery_i2c_init(void) { return 0; }
+static inline void bq27x00_battery_i2c_exit(void) {};
+
+#endif
+
+/* platform specific code */
+#ifdef CONFIG_BATTERY_BQ27X00_PLATFORM
+
+static int bq27000_read_platform(struct bq27x00_device_info *di, u8 reg,
+ bool single)
+{
+ struct device *dev = di->dev;
+ struct bq27000_platform_data *pdata = dev->platform_data;
+ unsigned int timeout = 3;
+ int upper, lower;
+ int temp;
+
+ if (!single) {
+ /* Make sure the value has not changed in between reading the
+ * lower and the upper part */
+ upper = pdata->read(dev, reg + 1);
+ do {
+ temp = upper;
+ if (upper < 0)
+ return upper;
+
+ lower = pdata->read(dev, reg);
+ if (lower < 0)
+ return lower;
+
+ upper = pdata->read(dev, reg + 1);
+ } while (temp != upper && --timeout);
+
+ if (timeout == 0)
+ return -EIO;
+
+ return (upper << 8) | lower;
+ }
+
+ return pdata->read(dev, reg);
+}
+
+static int __devinit bq27000_battery_probe(struct platform_device *pdev)
+{
+ struct bq27x00_device_info *di;
+ struct bq27000_platform_data *pdata = pdev->dev.platform_data;
+ int ret;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform_data supplied\n");
+ return -EINVAL;
+ }
+
+ if (!pdata->read) {
+ dev_err(&pdev->dev, "no hdq read callback supplied\n");
+ return -EINVAL;
+ }
+
+ di = kzalloc(sizeof(*di), GFP_KERNEL);
+ if (!di) {
+ dev_err(&pdev->dev, "failed to allocate device info data\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, di);
+
+ di->dev = &pdev->dev;
+ di->chip = BQ27000;
+
+ di->bat.name = pdata->name ?: dev_name(&pdev->dev);
+ di->bus.read = &bq27000_read_platform;
+
+ ret = bq27x00_powersupply_init(di);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ platform_set_drvdata(pdev, NULL);
+ kfree(di);
+
+ return ret;
+}
+
+static int __devexit bq27000_battery_remove(struct platform_device *pdev)
+{
+ struct bq27x00_device_info *di = platform_get_drvdata(pdev);
+
+ bq27x00_powersupply_unregister(di);
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(di);
+
+ return 0;
+}
+
+static struct platform_driver bq27000_battery_driver = {
+ .probe = bq27000_battery_probe,
+ .remove = __devexit_p(bq27000_battery_remove),
+ .driver = {
+ .name = "bq27000-battery",
+ .owner = THIS_MODULE,
+ },
+};
+
+static inline int bq27x00_battery_platform_init(void)
+{
+ int ret = platform_driver_register(&bq27000_battery_driver);
+ if (ret)
+ printk(KERN_ERR "Unable to register BQ27000 platform driver\n");
+
+ return ret;
+}
+
+static inline void bq27x00_battery_platform_exit(void)
+{
+ platform_driver_unregister(&bq27000_battery_driver);
+}
+
+#else
+
+static inline int bq27x00_battery_platform_init(void) { return 0; }
+static inline void bq27x00_battery_platform_exit(void) {};
+
+#endif
+
+/*
+ * Module stuff
+ */
+
static int __init bq27x00_battery_init(void)
{
int ret;
- ret = i2c_add_driver(&bq27x00_battery_driver);
+ ret = bq27x00_battery_i2c_init();
if (ret)
- printk(KERN_ERR "Unable to register BQ27x00 driver\n");
+ return ret;
+
+ ret = bq27x00_battery_platform_init();
+ if (ret)
+ bq27x00_battery_i2c_exit();
return ret;
}
@@ -463,7 +851,8 @@ module_init(bq27x00_battery_init);
static void __exit bq27x00_battery_exit(void)
{
- i2c_del_driver(&bq27x00_battery_driver);
+ bq27x00_battery_platform_exit();
+ bq27x00_battery_i2c_exit();
}
module_exit(bq27x00_battery_exit);
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c
index 6957e8af6449..4d2dc4fa2888 100644
--- a/drivers/power/ds2782_battery.c
+++ b/drivers/power/ds2782_battery.c
@@ -393,6 +393,7 @@ static const struct i2c_device_id ds278x_id[] = {
{"ds2786", DS2786},
{},
};
+MODULE_DEVICE_TABLE(i2c, ds278x_id);
static struct i2c_driver ds278x_battery_driver = {
.driver = {
diff --git a/drivers/power/jz4740-battery.c b/drivers/power/jz4740-battery.c
index 02414db6a94c..763f894ed188 100644
--- a/drivers/power/jz4740-battery.c
+++ b/drivers/power/jz4740-battery.c
@@ -39,7 +39,7 @@ struct jz_battery {
int irq;
int charge_irq;
- struct mfd_cell *cell;
+ const struct mfd_cell *cell;
int status;
long voltage;
@@ -258,7 +258,7 @@ static int __devinit jz_battery_probe(struct platform_device *pdev)
return -ENOMEM;
}
- jz_battery->cell = pdev->dev.platform_data;
+ jz_battery->cell = mfd_get_cell(pdev);
jz_battery->irq = platform_get_irq(pdev, 0);
if (jz_battery->irq < 0) {
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 970f7335d3a7..329b46b2327d 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -171,6 +171,8 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
dev_set_drvdata(dev, psy);
psy->dev = dev;
+ INIT_WORK(&psy->changed_work, power_supply_changed_work);
+
rc = kobject_set_name(&dev->kobj, "%s", psy->name);
if (rc)
goto kobject_set_name_failed;
@@ -179,8 +181,6 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
if (rc)
goto device_add_failed;
- INIT_WORK(&psy->changed_work, power_supply_changed_work);
-
rc = power_supply_create_triggers(psy);
if (rc)
goto create_triggers_failed;
diff --git a/drivers/power/power_supply_leds.c b/drivers/power/power_supply_leds.c
index 031a554837f7..da25eb94e5c6 100644
--- a/drivers/power/power_supply_leds.c
+++ b/drivers/power/power_supply_leds.c
@@ -21,6 +21,8 @@
static void power_supply_update_bat_leds(struct power_supply *psy)
{
union power_supply_propval status;
+ unsigned long delay_on = 0;
+ unsigned long delay_off = 0;
if (psy->get_property(psy, POWER_SUPPLY_PROP_STATUS, &status))
return;
@@ -32,16 +34,22 @@ static void power_supply_update_bat_leds(struct power_supply *psy)
led_trigger_event(psy->charging_full_trig, LED_FULL);
led_trigger_event(psy->charging_trig, LED_OFF);
led_trigger_event(psy->full_trig, LED_FULL);
+ led_trigger_event(psy->charging_blink_full_solid_trig,
+ LED_FULL);
break;
case POWER_SUPPLY_STATUS_CHARGING:
led_trigger_event(psy->charging_full_trig, LED_FULL);
led_trigger_event(psy->charging_trig, LED_FULL);
led_trigger_event(psy->full_trig, LED_OFF);
+ led_trigger_blink(psy->charging_blink_full_solid_trig,
+ &delay_on, &delay_off);
break;
default:
led_trigger_event(psy->charging_full_trig, LED_OFF);
led_trigger_event(psy->charging_trig, LED_OFF);
led_trigger_event(psy->full_trig, LED_OFF);
+ led_trigger_event(psy->charging_blink_full_solid_trig,
+ LED_OFF);
break;
}
}
@@ -64,15 +72,24 @@ static int power_supply_create_bat_triggers(struct power_supply *psy)
if (!psy->full_trig_name)
goto full_failed;
+ psy->charging_blink_full_solid_trig_name = kasprintf(GFP_KERNEL,
+ "%s-charging-blink-full-solid", psy->name);
+ if (!psy->charging_blink_full_solid_trig_name)
+ goto charging_blink_full_solid_failed;
+
led_trigger_register_simple(psy->charging_full_trig_name,
&psy->charging_full_trig);
led_trigger_register_simple(psy->charging_trig_name,
&psy->charging_trig);
led_trigger_register_simple(psy->full_trig_name,
&psy->full_trig);
+ led_trigger_register_simple(psy->charging_blink_full_solid_trig_name,
+ &psy->charging_blink_full_solid_trig);
goto success;
+charging_blink_full_solid_failed:
+ kfree(psy->full_trig_name);
full_failed:
kfree(psy->charging_trig_name);
charging_failed:
@@ -88,6 +105,8 @@ static void power_supply_remove_bat_triggers(struct power_supply *psy)
led_trigger_unregister_simple(psy->charging_full_trig);
led_trigger_unregister_simple(psy->charging_trig);
led_trigger_unregister_simple(psy->full_trig);
+ led_trigger_unregister_simple(psy->charging_blink_full_solid_trig);
+ kfree(psy->charging_blink_full_solid_trig_name);
kfree(psy->full_trig_name);
kfree(psy->charging_trig_name);
kfree(psy->charging_full_trig_name);
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index cd1f90754a3a..605514afc29f 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -270,7 +270,7 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env)
attr = &power_supply_attrs[psy->properties[j]];
ret = power_supply_show_property(dev, attr, prop_buf);
- if (ret == -ENODEV) {
+ if (ret == -ENODEV || ret == -ENODATA) {
/* When a battery is absent, we expect -ENODEV. Don't abort;
send the uevent with at least the the PRESENT=0 property */
ret = 0;
diff --git a/drivers/power/twl4030_charger.c b/drivers/power/twl4030_charger.c
index ff1f42398a2e..92c16e1677bd 100644
--- a/drivers/power/twl4030_charger.c
+++ b/drivers/power/twl4030_charger.c
@@ -71,8 +71,11 @@ struct twl4030_bci {
struct power_supply usb;
struct otg_transceiver *transceiver;
struct notifier_block otg_nb;
+ struct work_struct work;
int irq_chg;
int irq_bci;
+
+ unsigned long event;
};
/*
@@ -258,14 +261,11 @@ static irqreturn_t twl4030_bci_interrupt(int irq, void *arg)
return IRQ_HANDLED;
}
-static int twl4030_bci_usb_ncb(struct notifier_block *nb, unsigned long val,
- void *priv)
+static void twl4030_bci_usb_work(struct work_struct *data)
{
- struct twl4030_bci *bci = container_of(nb, struct twl4030_bci, otg_nb);
+ struct twl4030_bci *bci = container_of(data, struct twl4030_bci, work);
- dev_dbg(bci->dev, "OTG notify %lu\n", val);
-
- switch (val) {
+ switch (bci->event) {
case USB_EVENT_VBUS:
case USB_EVENT_CHARGER:
twl4030_charger_enable_usb(bci, true);
@@ -274,6 +274,17 @@ static int twl4030_bci_usb_ncb(struct notifier_block *nb, unsigned long val,
twl4030_charger_enable_usb(bci, false);
break;
}
+}
+
+static int twl4030_bci_usb_ncb(struct notifier_block *nb, unsigned long val,
+ void *priv)
+{
+ struct twl4030_bci *bci = container_of(nb, struct twl4030_bci, otg_nb);
+
+ dev_dbg(bci->dev, "OTG notify %lu\n", val);
+
+ bci->event = val;
+ schedule_work(&bci->work);
return NOTIFY_OK;
}
@@ -466,6 +477,8 @@ static int __init twl4030_bci_probe(struct platform_device *pdev)
goto fail_bci_irq;
}
+ INIT_WORK(&bci->work, twl4030_bci_usb_work);
+
bci->transceiver = otg_get_transceiver();
if (bci->transceiver != NULL) {
bci->otg_nb.notifier_call = twl4030_bci_usb_ncb;
diff --git a/drivers/power/z2_battery.c b/drivers/power/z2_battery.c
index e5ed52d71937..81304c0bb9d3 100644
--- a/drivers/power/z2_battery.c
+++ b/drivers/power/z2_battery.c
@@ -293,6 +293,7 @@ static const struct i2c_device_id z2_batt_id[] = {
{ "aer915", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, z2_batt_id);
static struct i2c_driver z2_batt_driver = {
.driver = {
diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c
index cba1b43f7519..a4e8eb9fece6 100644
--- a/drivers/pps/kapi.c
+++ b/drivers/pps/kapi.c
@@ -168,7 +168,7 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event,
{
unsigned long flags;
int captured = 0;
- struct pps_ktime ts_real;
+ struct pps_ktime ts_real = { .sec = 0, .nsec = 0, .flags = 0 };
/* check event type */
BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0);
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c
index 76b41853a877..1269fbd2deca 100644
--- a/drivers/rapidio/rio-sysfs.c
+++ b/drivers/rapidio/rio-sysfs.c
@@ -77,9 +77,9 @@ rio_read_config(struct file *filp, struct kobject *kobj,
/* Several chips lock up trying to read undefined config space */
if (capable(CAP_SYS_ADMIN))
- size = 0x200000;
+ size = RIO_MAINT_SPACE_SZ;
- if (off > size)
+ if (off >= size)
return 0;
if (off + count > size) {
size -= off;
@@ -147,10 +147,10 @@ rio_write_config(struct file *filp, struct kobject *kobj,
loff_t init_off = off;
u8 *data = (u8 *) buf;
- if (off > 0x200000)
+ if (off >= RIO_MAINT_SPACE_SZ)
return 0;
- if (off + count > 0x200000) {
- size = 0x200000 - off;
+ if (off + count > RIO_MAINT_SPACE_SZ) {
+ size = RIO_MAINT_SPACE_SZ - off;
count = size;
}
@@ -200,7 +200,7 @@ static struct bin_attribute rio_config_attr = {
.name = "config",
.mode = S_IRUGO | S_IWUSR,
},
- .size = 0x200000,
+ .size = RIO_MAINT_SPACE_SZ,
.read = rio_read_config,
.write = rio_write_config,
};
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index ed6feaf9398d..2dec589a8908 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -17,6 +17,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/mfd/abx500.h>
+#include <linux/mfd/core.h>
/* LDO registers and some handy masking definitions for AB3100 */
#define AB3100_LDO_A 0x40
@@ -576,7 +577,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
static int __devinit ab3100_regulators_probe(struct platform_device *pdev)
{
- struct ab3100_platform_data *plfdata = pdev->dev.platform_data;
+ struct ab3100_platform_data *plfdata = mfd_get_data(pdev);
int err = 0;
u8 data;
int i;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 9fa20957847d..a2dc6223e8d2 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2565,8 +2565,11 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
init_data->consumer_supplies[i].dev,
init_data->consumer_supplies[i].dev_name,
init_data->consumer_supplies[i].supply);
- if (ret < 0)
+ if (ret < 0) {
+ dev_err(dev, "Failed to set supply %s\n",
+ init_data->consumer_supplies[i].supply);
goto unset_supplies;
+ }
}
list_add(&rdev->list, &regulator_list);
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index 3e5d0c3b4e53..23249cb0a8bd 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -15,6 +15,7 @@
#include <linux/regulator/driver.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
+#include <linux/mfd/core.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/err.h>
@@ -336,8 +337,7 @@ static int __devinit mc13783_regulator_probe(struct platform_device *pdev)
{
struct mc13xxx_regulator_priv *priv;
struct mc13xxx *mc13783 = dev_get_drvdata(pdev->dev.parent);
- struct mc13783_regulator_platform_data *pdata =
- dev_get_platdata(&pdev->dev);
+ struct mc13783_regulator_platform_data *pdata = mfd_get_data(pdev);
struct mc13783_regulator_init_data *init_data;
int i, ret;
@@ -381,8 +381,7 @@ err:
static int __devexit mc13783_regulator_remove(struct platform_device *pdev)
{
struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
- struct mc13783_regulator_platform_data *pdata =
- dev_get_platdata(&pdev->dev);
+ struct mc13783_regulator_platform_data *pdata = mfd_get_data(pdev);
int i;
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
index 1b8f7398a4a8..6f15168e5ed4 100644
--- a/drivers/regulator/mc13892-regulator.c
+++ b/drivers/regulator/mc13892-regulator.c
@@ -15,6 +15,7 @@
#include <linux/regulator/driver.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
+#include <linux/mfd/core.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/err.h>
@@ -520,8 +521,7 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
{
struct mc13xxx_regulator_priv *priv;
struct mc13xxx *mc13892 = dev_get_drvdata(pdev->dev.parent);
- struct mc13xxx_regulator_platform_data *pdata =
- dev_get_platdata(&pdev->dev);
+ struct mc13xxx_regulator_platform_data *pdata = mfd_get_data(pdev);
struct mc13xxx_regulator_init_data *init_data;
int i, ret;
u32 val;
@@ -595,8 +595,7 @@ err_free:
static int __devexit mc13892_regulator_remove(struct platform_device *pdev)
{
struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
- struct mc13xxx_regulator_platform_data *pdata =
- dev_get_platdata(&pdev->dev);
+ struct mc13xxx_regulator_platform_data *pdata = mfd_get_data(pdev);
int i;
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
index f53d31b950d4..2bb5de1f2421 100644
--- a/drivers/regulator/mc13xxx-regulator-core.c
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -174,7 +174,7 @@ static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev)
dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
- BUG_ON(val < 0 || val > mc13xxx_regulators[id].desc.n_voltages);
+ BUG_ON(val > mc13xxx_regulators[id].desc.n_voltages);
return mc13xxx_regulators[id].voltages[val];
}
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index bd332cf1cc3f..6a292852a358 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -475,6 +475,13 @@ static struct regulator_ops twlfixed_ops = {
.get_status = twlreg_get_status,
};
+static struct regulator_ops twl6030_fixed_resource = {
+ .enable = twlreg_enable,
+ .disable = twlreg_disable,
+ .is_enabled = twlreg_is_enabled,
+ .get_status = twlreg_get_status,
+};
+
/*----------------------------------------------------------------------*/
#define TWL4030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
@@ -538,6 +545,20 @@ static struct regulator_ops twlfixed_ops = {
}, \
}
+#define TWL6030_FIXED_RESOURCE(label, offset, num, turnon_delay, remap_conf) { \
+ .base = offset, \
+ .id = num, \
+ .delay = turnon_delay, \
+ .remap = remap_conf, \
+ .desc = { \
+ .name = #label, \
+ .id = TWL6030_REG_##label, \
+ .ops = &twl6030_fixed_resource, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+ }
+
/*
* We list regulators here if systems need some level of
* software control over them after boot.
@@ -577,7 +598,8 @@ static struct twlreg_info twl_regs[] = {
TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0, 0x21),
TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0, 0x21),
TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0, 0x21),
- TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0, 0x21)
+ TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0, 0x21),
+ TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 48, 0, 0x21),
};
static int __devinit twlreg_probe(struct platform_device *pdev)
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 8b0d2c4bde91..06df898842c0 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -120,6 +120,7 @@ static unsigned int wm831x_dcdc_get_mode(struct regulator_dev *rdev)
return REGULATOR_MODE_IDLE;
default:
BUG();
+ return -EINVAL;
}
}
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index c36749e4c926..5469c52cba3d 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -309,7 +309,7 @@ static const struct rtc_class_ops at91_rtc_ops = {
.read_alarm = at91_rtc_readalarm,
.set_alarm = at91_rtc_setalarm,
.proc = at91_rtc_proc,
- .alarm_irq_enabled = at91_rtc_alarm_irq_enable,
+ .alarm_irq_enable = at91_rtc_alarm_irq_enable,
};
/*
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index c7ff8df347e7..159b95e4b420 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -37,6 +37,8 @@
#include <linux/mod_devicetable.h>
#include <linux/log2.h>
#include <linux/pm.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
/* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
#include <asm-generic/rtc.h>
@@ -1123,6 +1125,47 @@ static struct pnp_driver cmos_pnp_driver = {
#endif /* CONFIG_PNP */
+#ifdef CONFIG_OF
+static const struct of_device_id of_cmos_match[] = {
+ {
+ .compatible = "motorola,mc146818",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, of_cmos_match);
+
+static __init void cmos_of_init(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct rtc_time time;
+ int ret;
+ const __be32 *val;
+
+ if (!node)
+ return;
+
+ val = of_get_property(node, "ctrl-reg", NULL);
+ if (val)
+ CMOS_WRITE(be32_to_cpup(val), RTC_CONTROL);
+
+ val = of_get_property(node, "freq-reg", NULL);
+ if (val)
+ CMOS_WRITE(be32_to_cpup(val), RTC_FREQ_SELECT);
+
+ get_rtc_time(&time);
+ ret = rtc_valid_tm(&time);
+ if (ret) {
+ struct rtc_time def_time = {
+ .tm_year = 1,
+ .tm_mday = 1,
+ };
+ set_rtc_time(&def_time);
+ }
+}
+#else
+static inline void cmos_of_init(struct platform_device *pdev) {}
+#define of_cmos_match NULL
+#endif
/*----------------------------------------------------------------*/
/* Platform setup should have set up an RTC device, when PNP is
@@ -1131,6 +1174,7 @@ static struct pnp_driver cmos_pnp_driver = {
static int __init cmos_platform_probe(struct platform_device *pdev)
{
+ cmos_of_init(pdev);
cmos_wake_setup(&pdev->dev);
return cmos_do_probe(&pdev->dev,
platform_get_resource(pdev, IORESOURCE_IO, 0),
@@ -1162,6 +1206,7 @@ static struct platform_driver cmos_platform_driver = {
#ifdef CONFIG_PM
.pm = &cmos_pm_ops,
#endif
+ .of_match_table = of_cmos_match,
}
};
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 23a9ee19764c..950735415a7c 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -1,7 +1,7 @@
/*
* RTC client/driver for the Maxim/Dallas DS3232 Real-Time Clock over I2C
*
- * Copyright (C) 2009-2010 Freescale Semiconductor.
+ * Copyright (C) 2009-2011 Freescale Semiconductor.
* Author: Jack Lan <jack.lan@freescale.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -141,9 +141,11 @@ static int ds3232_read_time(struct device *dev, struct rtc_time *time)
time->tm_hour = bcd2bin(hour);
}
- time->tm_wday = bcd2bin(week);
+ /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */
+ time->tm_wday = bcd2bin(week) - 1;
time->tm_mday = bcd2bin(day);
- time->tm_mon = bcd2bin(month & 0x7F);
+ /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */
+ time->tm_mon = bcd2bin(month & 0x7F) - 1;
if (century)
add_century = 100;
@@ -162,9 +164,11 @@ static int ds3232_set_time(struct device *dev, struct rtc_time *time)
buf[0] = bin2bcd(time->tm_sec);
buf[1] = bin2bcd(time->tm_min);
buf[2] = bin2bcd(time->tm_hour);
- buf[3] = bin2bcd(time->tm_wday); /* Day of the week */
+ /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */
+ buf[3] = bin2bcd(time->tm_wday + 1);
buf[4] = bin2bcd(time->tm_mday); /* Date */
- buf[5] = bin2bcd(time->tm_mon);
+ /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */
+ buf[5] = bin2bcd(time->tm_mon + 1);
if (time->tm_year >= 100) {
buf[5] |= 0x80;
buf[6] = bin2bcd(time->tm_year - 100);
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index dfcdf0901d21..2b952c654b14 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -268,8 +268,7 @@ static const struct rtc_class_ops mpc5121_rtc_ops = {
.update_irq_enable = mpc5121_rtc_update_irq_enable,
};
-static int __devinit mpc5121_rtc_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit mpc5121_rtc_probe(struct platform_device *op)
{
struct mpc5121_rtc_data *rtc;
int err = 0;
@@ -364,7 +363,7 @@ static struct of_device_id mpc5121_rtc_match[] __devinitdata = {
{},
};
-static struct of_platform_driver mpc5121_rtc_driver = {
+static struct platform_driver mpc5121_rtc_driver = {
.driver = {
.name = "mpc5121-rtc",
.owner = THIS_MODULE,
@@ -376,13 +375,13 @@ static struct of_platform_driver mpc5121_rtc_driver = {
static int __init mpc5121_rtc_init(void)
{
- return of_register_platform_driver(&mpc5121_rtc_driver);
+ return platform_driver_register(&mpc5121_rtc_driver);
}
module_init(mpc5121_rtc_init);
static void __exit mpc5121_rtc_exit(void)
{
- of_unregister_platform_driver(&mpc5121_rtc_driver);
+ platform_driver_unregister(&mpc5121_rtc_driver);
}
module_exit(mpc5121_rtc_exit);
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index 1db62db8469d..8d2cf6027120 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -62,6 +62,17 @@ static inline int is_intr(u8 rtc_intr)
return rtc_intr & RTC_IRQMASK;
}
+static inline unsigned char vrtc_is_updating(void)
+{
+ unsigned char uip;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+ uip = (vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ return uip;
+}
+
/*
* rtc_time's year contains the increment over 1900, but vRTC's YEAR
* register can't be programmed to value larger than 0x64, so vRTC
@@ -76,7 +87,7 @@ static int mrst_read_time(struct device *dev, struct rtc_time *time)
{
unsigned long flags;
- if (rtc_is_updating())
+ if (vrtc_is_updating())
mdelay(20);
spin_lock_irqsave(&rtc_lock, flags);
diff --git a/drivers/rtc/rtc-pl030.c b/drivers/rtc/rtc-pl030.c
index bbdb2f02798a..baff724cd40f 100644
--- a/drivers/rtc/rtc-pl030.c
+++ b/drivers/rtc/rtc-pl030.c
@@ -103,7 +103,7 @@ static const struct rtc_class_ops pl030_ops = {
.set_alarm = pl030_set_alarm,
};
-static int pl030_probe(struct amba_device *dev, struct amba_id *id)
+static int pl030_probe(struct amba_device *dev, const struct amba_id *id)
{
struct pl030_rtc *rtc;
int ret;
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index b7a6690e5b35..6568f4b37e19 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -358,7 +358,7 @@ static int pl031_remove(struct amba_device *adev)
return 0;
}
-static int pl031_probe(struct amba_device *adev, struct amba_id *id)
+static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret;
struct pl031_local *ldata;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 318672d05563..379d8592bc6e 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -72,7 +72,7 @@ static struct dasd_discipline dasd_eckd_discipline;
static struct ccw_device_id dasd_eckd_ids[] = {
{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
- { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3},
+ { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3},
{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
{ CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
@@ -2648,6 +2648,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
dasd_sfree_request(cqr, startdev);
return ERR_PTR(-EAGAIN);
}
+ len_to_track_end = 0;
/*
* A tidaw can address 4k of memory, but must not cross page boundaries
* We can let the block layer handle this by setting
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index c881a14fa5dd..1f6a4d894e73 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -62,8 +62,8 @@ static int xpram_devs;
/*
* Parameter parsing functions.
*/
-static int __initdata devs = XPRAM_DEVS;
-static char __initdata *sizes[XPRAM_MAX_DEVS];
+static int devs = XPRAM_DEVS;
+static char *sizes[XPRAM_MAX_DEVS];
module_param(devs, int, 0);
module_param_array(sizes, charp, NULL, 0);
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index 8cd58e412b5e..5ad44daef73b 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -460,7 +460,8 @@ kbd_ioctl(struct kbd_data *kbd, struct file *file,
unsigned int cmd, unsigned long arg)
{
void __user *argp;
- int ct, perm;
+ unsigned int ct;
+ int perm;
argp = (void __user *)arg;
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 430f875006f2..f3147542e52e 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -630,11 +630,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
irb = (struct irb *)&S390_lowcore.irb;
do {
kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
- /*
- * Non I/O-subchannel thin interrupts are processed differently
- */
- if (tpi_info->adapter_IO == 1 &&
- tpi_info->int_type == IO_INTERRUPT_TYPE) {
+ if (tpi_info->adapter_IO) {
do_adapter_IO(tpi_info->isc);
continue;
}
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index bf7f80f5a330..7a9032d01fb8 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -105,8 +105,6 @@ struct subchannel {
struct schib_config config;
} __attribute__ ((aligned(8)));
-#define IO_INTERRUPT_TYPE 0 /* I/O interrupt type */
-
#define to_subchannel(n) container_of(n, struct subchannel, dev)
extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id);
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index f47a714538db..c5d763ed406e 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -741,7 +741,6 @@ struct qeth_card {
/* QDIO buffer handling */
struct qeth_qdio_info qdio;
struct qeth_perf_stats perf_stats;
- int use_hard_stop;
int read_or_write_problem;
struct qeth_osn_info osn_info;
struct qeth_discipline discipline;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 019ae58ab913..f3d98ac16e9f 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -302,12 +302,15 @@ static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
int com = cmd->hdr.command;
ipa_name = qeth_get_ipa_cmd_name(com);
if (rc)
- QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s returned x%X \"%s\"\n",
- ipa_name, com, QETH_CARD_IFNAME(card),
- rc, qeth_get_ipa_msg(rc));
+ QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s/%s returned "
+ "x%X \"%s\"\n",
+ ipa_name, com, dev_name(&card->gdev->dev),
+ QETH_CARD_IFNAME(card), rc,
+ qeth_get_ipa_msg(rc));
else
- QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s succeeded\n",
- ipa_name, com, QETH_CARD_IFNAME(card));
+ QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s/%s succeeded\n",
+ ipa_name, com, dev_name(&card->gdev->dev),
+ QETH_CARD_IFNAME(card));
}
static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
@@ -1083,7 +1086,6 @@ static int qeth_setup_card(struct qeth_card *card)
card->data.state = CH_STATE_DOWN;
card->state = CARD_STATE_DOWN;
card->lan_online = 0;
- card->use_hard_stop = 0;
card->read_or_write_problem = 0;
card->dev = NULL;
spin_lock_init(&card->vlanlock);
@@ -1732,20 +1734,22 @@ int qeth_send_control_data(struct qeth_card *card, int len,
};
}
+ if (reply->rc == -EIO)
+ goto error;
rc = reply->rc;
qeth_put_reply(reply);
return rc;
time_err:
+ reply->rc = -ETIME;
spin_lock_irqsave(&reply->card->lock, flags);
list_del_init(&reply->list);
spin_unlock_irqrestore(&reply->card->lock, flags);
- reply->rc = -ETIME;
atomic_inc(&reply->received);
+error:
atomic_set(&card->write.irq_pending, 0);
qeth_release_buffer(iob->channel, iob);
card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO;
- wake_up(&reply->wait_q);
rc = reply->rc;
qeth_put_reply(reply);
return rc;
@@ -2490,45 +2494,19 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
}
EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
-static int qeth_send_startstoplan(struct qeth_card *card,
- enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot)
-{
- int rc;
- struct qeth_cmd_buffer *iob;
-
- iob = qeth_get_ipacmd_buffer(card, ipacmd, prot);
- rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
-
- return rc;
-}
-
int qeth_send_startlan(struct qeth_card *card)
{
int rc;
+ struct qeth_cmd_buffer *iob;
QETH_DBF_TEXT(SETUP, 2, "strtlan");
- rc = qeth_send_startstoplan(card, IPA_CMD_STARTLAN, 0);
+ iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0);
+ rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
return rc;
}
EXPORT_SYMBOL_GPL(qeth_send_startlan);
-int qeth_send_stoplan(struct qeth_card *card)
-{
- int rc = 0;
-
- /*
- * TODO: according to the IPA format document page 14,
- * TCP/IP (we!) never issue a STOPLAN
- * is this right ?!?
- */
- QETH_DBF_TEXT(SETUP, 2, "stoplan");
-
- rc = qeth_send_startstoplan(card, IPA_CMD_STOPLAN, 0);
- return rc;
-}
-EXPORT_SYMBOL_GPL(qeth_send_stoplan);
-
int qeth_default_setadapterparms_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index ada0fe782373..6fbaacb21943 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -202,17 +202,19 @@ static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
kfree(mc);
}
-static void qeth_l2_del_all_mc(struct qeth_card *card)
+static void qeth_l2_del_all_mc(struct qeth_card *card, int del)
{
struct qeth_mc_mac *mc, *tmp;
spin_lock_bh(&card->mclock);
list_for_each_entry_safe(mc, tmp, &card->mc_list, list) {
- if (mc->is_vmac)
- qeth_l2_send_setdelmac(card, mc->mc_addr,
+ if (del) {
+ if (mc->is_vmac)
+ qeth_l2_send_setdelmac(card, mc->mc_addr,
IPA_CMD_DELVMAC, NULL);
- else
- qeth_l2_send_delgroupmac(card, mc->mc_addr);
+ else
+ qeth_l2_send_delgroupmac(card, mc->mc_addr);
+ }
list_del(&mc->list);
kfree(mc);
}
@@ -288,18 +290,13 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
qeth_l2_send_setdelvlan_cb, NULL);
}
-static void qeth_l2_process_vlans(struct qeth_card *card, int clear)
+static void qeth_l2_process_vlans(struct qeth_card *card)
{
struct qeth_vlan_vid *id;
QETH_CARD_TEXT(card, 3, "L2prcvln");
spin_lock_bh(&card->vlanlock);
list_for_each_entry(id, &card->vid_list, list) {
- if (clear)
- qeth_l2_send_setdelvlan(card, id->vid,
- IPA_CMD_DELVLAN);
- else
- qeth_l2_send_setdelvlan(card, id->vid,
- IPA_CMD_SETVLAN);
+ qeth_l2_send_setdelvlan(card, id->vid, IPA_CMD_SETVLAN);
}
spin_unlock_bh(&card->vlanlock);
}
@@ -379,19 +376,11 @@ static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
dev_close(card->dev);
rtnl_unlock();
}
- if (!card->use_hard_stop ||
- recovery_mode) {
- __u8 *mac = &card->dev->dev_addr[0];
- rc = qeth_l2_send_delmac(card, mac);
- QETH_DBF_TEXT_(SETUP, 2, "Lerr%d", rc);
- }
+ card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
card->state = CARD_STATE_SOFTSETUP;
}
if (card->state == CARD_STATE_SOFTSETUP) {
- qeth_l2_process_vlans(card, 1);
- if (!card->use_hard_stop ||
- recovery_mode)
- qeth_l2_del_all_mc(card);
+ qeth_l2_del_all_mc(card, 0);
qeth_clear_ipacmd_list(card);
card->state = CARD_STATE_HARDSETUP;
}
@@ -405,7 +394,6 @@ static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
qeth_clear_cmd_buffers(&card->read);
qeth_clear_cmd_buffers(&card->write);
}
- card->use_hard_stop = 0;
return rc;
}
@@ -705,7 +693,7 @@ static void qeth_l2_set_multicast_list(struct net_device *dev)
if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
(card->state != CARD_STATE_UP))
return;
- qeth_l2_del_all_mc(card);
+ qeth_l2_del_all_mc(card, 1);
spin_lock_bh(&card->mclock);
netdev_for_each_mc_addr(ha, dev)
qeth_l2_add_mc(card, ha->addr, 0);
@@ -907,10 +895,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
- if (cgdev->state == CCWGROUP_ONLINE) {
- card->use_hard_stop = 1;
+ if (cgdev->state == CCWGROUP_ONLINE)
qeth_l2_set_offline(cgdev);
- }
if (card->dev) {
unregister_netdev(card->dev);
@@ -1040,7 +1026,7 @@ contin:
if (card->info.type != QETH_CARD_TYPE_OSN &&
card->info.type != QETH_CARD_TYPE_OSM)
- qeth_l2_process_vlans(card, 0);
+ qeth_l2_process_vlans(card);
netif_tx_disable(card->dev);
@@ -1076,7 +1062,6 @@ contin:
return 0;
out_remove:
- card->use_hard_stop = 1;
qeth_l2_stop_card(card, 0);
ccw_device_set_offline(CARD_DDEV(card));
ccw_device_set_offline(CARD_WDEV(card));
@@ -1144,7 +1129,6 @@ static int qeth_l2_recover(void *ptr)
QETH_CARD_TEXT(card, 2, "recover2");
dev_warn(&card->gdev->dev,
"A recovery process has been started for the device\n");
- card->use_hard_stop = 1;
__qeth_l2_set_offline(card->gdev, 1);
rc = __qeth_l2_set_online(card->gdev, 1);
if (!rc)
@@ -1191,7 +1175,6 @@ static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev)
if (gdev->state == CCWGROUP_OFFLINE)
return 0;
if (card->state == CARD_STATE_UP) {
- card->use_hard_stop = 1;
__qeth_l2_set_offline(card->gdev, 1);
} else
__qeth_l2_set_offline(card->gdev, 0);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index d09b0c44fc3d..6a9cc58321a0 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -510,8 +510,7 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
kfree(tbd_list);
}
-static void qeth_l3_clear_ip_list(struct qeth_card *card, int clean,
- int recover)
+static void qeth_l3_clear_ip_list(struct qeth_card *card, int recover)
{
struct qeth_ipaddr *addr, *tmp;
unsigned long flags;
@@ -530,11 +529,6 @@ static void qeth_l3_clear_ip_list(struct qeth_card *card, int clean,
addr = list_entry(card->ip_list.next,
struct qeth_ipaddr, entry);
list_del_init(&addr->entry);
- if (clean) {
- spin_unlock_irqrestore(&card->ip_lock, flags);
- qeth_l3_deregister_addr_entry(card, addr);
- spin_lock_irqsave(&card->ip_lock, flags);
- }
if (!recover || addr->is_multicast) {
kfree(addr);
continue;
@@ -1611,29 +1605,6 @@ static int qeth_l3_start_ipassists(struct qeth_card *card)
return 0;
}
-static int qeth_l3_put_unique_id(struct qeth_card *card)
-{
-
- int rc = 0;
- struct qeth_cmd_buffer *iob;
- struct qeth_ipa_cmd *cmd;
-
- QETH_CARD_TEXT(card, 2, "puniqeid");
-
- if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) ==
- UNIQUE_ID_NOT_BY_CARD)
- return -1;
- iob = qeth_get_ipacmd_buffer(card, IPA_CMD_DESTROY_ADDR,
- QETH_PROT_IPV6);
- cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
- *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
- card->info.unique_id;
- memcpy(&cmd->data.create_destroy_addr.unique_id[0],
- card->dev->dev_addr, OSA_ADDR_LEN);
- rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
- return rc;
-}
-
static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
@@ -2324,25 +2295,14 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
dev_close(card->dev);
rtnl_unlock();
}
- if (!card->use_hard_stop) {
- rc = qeth_send_stoplan(card);
- if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
- }
card->state = CARD_STATE_SOFTSETUP;
}
if (card->state == CARD_STATE_SOFTSETUP) {
- qeth_l3_clear_ip_list(card, !card->use_hard_stop, 1);
+ qeth_l3_clear_ip_list(card, 1);
qeth_clear_ipacmd_list(card);
card->state = CARD_STATE_HARDSETUP;
}
if (card->state == CARD_STATE_HARDSETUP) {
- if (!card->use_hard_stop &&
- (card->info.type != QETH_CARD_TYPE_IQD)) {
- rc = qeth_l3_put_unique_id(card);
- if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
- }
qeth_qdio_clear_card(card, 0);
qeth_clear_qdio_buffers(card);
qeth_clear_working_pool_list(card);
@@ -2352,7 +2312,6 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
qeth_clear_cmd_buffers(&card->read);
qeth_clear_cmd_buffers(&card->write);
}
- card->use_hard_stop = 0;
return rc;
}
@@ -3483,17 +3442,15 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
- if (cgdev->state == CCWGROUP_ONLINE) {
- card->use_hard_stop = 1;
+ if (cgdev->state == CCWGROUP_ONLINE)
qeth_l3_set_offline(cgdev);
- }
if (card->dev) {
unregister_netdev(card->dev);
card->dev = NULL;
}
- qeth_l3_clear_ip_list(card, 0, 0);
+ qeth_l3_clear_ip_list(card, 0);
qeth_l3_clear_ipato_list(card);
return;
}
@@ -3594,7 +3551,6 @@ contin:
mutex_unlock(&card->discipline_mutex);
return 0;
out_remove:
- card->use_hard_stop = 1;
qeth_l3_stop_card(card, 0);
ccw_device_set_offline(CARD_DDEV(card));
ccw_device_set_offline(CARD_WDEV(card));
@@ -3663,7 +3619,6 @@ static int qeth_l3_recover(void *ptr)
QETH_CARD_TEXT(card, 2, "recover2");
dev_warn(&card->gdev->dev,
"A recovery process has been started for the device\n");
- card->use_hard_stop = 1;
__qeth_l3_set_offline(card->gdev, 1);
rc = __qeth_l3_set_online(card->gdev, 1);
if (!rc)
@@ -3684,7 +3639,6 @@ static int qeth_l3_recover(void *ptr)
static void qeth_l3_shutdown(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- qeth_l3_clear_ip_list(card, 0, 0);
qeth_qdio_clear_card(card, 0);
qeth_clear_qdio_buffers(card);
}
@@ -3700,7 +3654,6 @@ static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev)
if (gdev->state == CCWGROUP_OFFLINE)
return 0;
if (card->state == CARD_STATE_UP) {
- card->use_hard_stop = 1;
__qeth_l3_set_offline(card->gdev, 1);
} else
__qeth_l3_set_offline(card->gdev, 0);
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 51c666fb67a4..645b0fcbb370 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -122,36 +122,21 @@ static int __init zfcp_module_init(void)
{
int retval = -ENOMEM;
- zfcp_data.gpn_ft_cache = zfcp_cache_hw_align("zfcp_gpn",
- sizeof(struct zfcp_fc_gpn_ft_req));
- if (!zfcp_data.gpn_ft_cache)
- goto out;
-
- zfcp_data.qtcb_cache = zfcp_cache_hw_align("zfcp_qtcb",
- sizeof(struct fsf_qtcb));
- if (!zfcp_data.qtcb_cache)
+ zfcp_fsf_qtcb_cache = zfcp_cache_hw_align("zfcp_fsf_qtcb",
+ sizeof(struct fsf_qtcb));
+ if (!zfcp_fsf_qtcb_cache)
goto out_qtcb_cache;
- zfcp_data.sr_buffer_cache = zfcp_cache_hw_align("zfcp_sr",
- sizeof(struct fsf_status_read_buffer));
- if (!zfcp_data.sr_buffer_cache)
- goto out_sr_cache;
+ zfcp_fc_req_cache = zfcp_cache_hw_align("zfcp_fc_req",
+ sizeof(struct zfcp_fc_req));
+ if (!zfcp_fc_req_cache)
+ goto out_fc_cache;
- zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid",
- sizeof(struct zfcp_fc_gid_pn));
- if (!zfcp_data.gid_pn_cache)
- goto out_gid_cache;
-
- zfcp_data.adisc_cache = zfcp_cache_hw_align("zfcp_adisc",
- sizeof(struct zfcp_fc_els_adisc));
- if (!zfcp_data.adisc_cache)
- goto out_adisc_cache;
-
- zfcp_data.scsi_transport_template =
+ zfcp_scsi_transport_template =
fc_attach_transport(&zfcp_transport_functions);
- if (!zfcp_data.scsi_transport_template)
+ if (!zfcp_scsi_transport_template)
goto out_transport;
- scsi_transport_reserve_device(zfcp_data.scsi_transport_template,
+ scsi_transport_reserve_device(zfcp_scsi_transport_template,
sizeof(struct zfcp_scsi_dev));
@@ -175,18 +160,12 @@ static int __init zfcp_module_init(void)
out_ccw_register:
misc_deregister(&zfcp_cfdc_misc);
out_misc:
- fc_release_transport(zfcp_data.scsi_transport_template);
+ fc_release_transport(zfcp_scsi_transport_template);
out_transport:
- kmem_cache_destroy(zfcp_data.adisc_cache);
-out_adisc_cache:
- kmem_cache_destroy(zfcp_data.gid_pn_cache);
-out_gid_cache:
- kmem_cache_destroy(zfcp_data.sr_buffer_cache);
-out_sr_cache:
- kmem_cache_destroy(zfcp_data.qtcb_cache);
+ kmem_cache_destroy(zfcp_fc_req_cache);
+out_fc_cache:
+ kmem_cache_destroy(zfcp_fsf_qtcb_cache);
out_qtcb_cache:
- kmem_cache_destroy(zfcp_data.gpn_ft_cache);
-out:
return retval;
}
@@ -196,12 +175,9 @@ static void __exit zfcp_module_exit(void)
{
ccw_driver_unregister(&zfcp_ccw_driver);
misc_deregister(&zfcp_cfdc_misc);
- fc_release_transport(zfcp_data.scsi_transport_template);
- kmem_cache_destroy(zfcp_data.adisc_cache);
- kmem_cache_destroy(zfcp_data.gid_pn_cache);
- kmem_cache_destroy(zfcp_data.sr_buffer_cache);
- kmem_cache_destroy(zfcp_data.qtcb_cache);
- kmem_cache_destroy(zfcp_data.gpn_ft_cache);
+ fc_release_transport(zfcp_scsi_transport_template);
+ kmem_cache_destroy(zfcp_fc_req_cache);
+ kmem_cache_destroy(zfcp_fsf_qtcb_cache);
}
module_exit(zfcp_module_exit);
@@ -260,18 +236,18 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
return -ENOMEM;
adapter->pool.qtcb_pool =
- mempool_create_slab_pool(4, zfcp_data.qtcb_cache);
+ mempool_create_slab_pool(4, zfcp_fsf_qtcb_cache);
if (!adapter->pool.qtcb_pool)
return -ENOMEM;
- adapter->pool.status_read_data =
- mempool_create_slab_pool(FSF_STATUS_READS_RECOM,
- zfcp_data.sr_buffer_cache);
- if (!adapter->pool.status_read_data)
+ BUILD_BUG_ON(sizeof(struct fsf_status_read_buffer) > PAGE_SIZE);
+ adapter->pool.sr_data =
+ mempool_create_page_pool(FSF_STATUS_READS_RECOM, 0);
+ if (!adapter->pool.sr_data)
return -ENOMEM;
adapter->pool.gid_pn =
- mempool_create_slab_pool(1, zfcp_data.gid_pn_cache);
+ mempool_create_slab_pool(1, zfcp_fc_req_cache);
if (!adapter->pool.gid_pn)
return -ENOMEM;
@@ -290,8 +266,8 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
mempool_destroy(adapter->pool.qtcb_pool);
if (adapter->pool.status_read_req)
mempool_destroy(adapter->pool.status_read_req);
- if (adapter->pool.status_read_data)
- mempool_destroy(adapter->pool.status_read_data);
+ if (adapter->pool.sr_data)
+ mempool_destroy(adapter->pool.sr_data);
if (adapter->pool.gid_pn)
mempool_destroy(adapter->pool.gid_pn);
}
@@ -386,6 +362,7 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
INIT_WORK(&adapter->scan_work, zfcp_fc_scan_ports);
+ INIT_WORK(&adapter->ns_up_work, zfcp_fc_sym_name_update);
if (zfcp_qdio_setup(adapter))
goto failed;
@@ -437,7 +414,7 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN;
adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
- if (!zfcp_adapter_scsi_register(adapter))
+ if (!zfcp_scsi_adapter_register(adapter))
return adapter;
failed:
@@ -451,10 +428,11 @@ void zfcp_adapter_unregister(struct zfcp_adapter *adapter)
cancel_work_sync(&adapter->scan_work);
cancel_work_sync(&adapter->stat_work);
+ cancel_work_sync(&adapter->ns_up_work);
zfcp_destroy_adapter_work_queue(adapter);
zfcp_fc_wka_ports_force_offline(adapter->gs);
- zfcp_adapter_scsi_unregister(adapter);
+ zfcp_scsi_adapter_unregister(adapter);
sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs);
zfcp_erp_thread_kill(adapter);
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 9ae1d0a6f627..527ba48eea57 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -89,7 +89,6 @@ struct zfcp_reqlist;
#define ZFCP_STATUS_LUN_READONLY 0x00000008
/* FSF request status (this does not have a common part) */
-#define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002
#define ZFCP_STATUS_FSFREQ_ERROR 0x00000008
#define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010
#define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040
@@ -108,7 +107,7 @@ struct zfcp_adapter_mempool {
mempool_t *scsi_req;
mempool_t *scsi_abort;
mempool_t *status_read_req;
- mempool_t *status_read_data;
+ mempool_t *sr_data;
mempool_t *gid_pn;
mempool_t *qtcb_pool;
};
@@ -190,6 +189,7 @@ struct zfcp_adapter {
struct fsf_qtcb_bottom_port *stats_reset_data;
unsigned long stats_reset;
struct work_struct scan_work;
+ struct work_struct ns_up_work;
struct service_level service_level;
struct workqueue_struct *work_queue;
struct device_dma_parameters dma_parms;
@@ -314,15 +314,4 @@ struct zfcp_fsf_req {
void (*handler)(struct zfcp_fsf_req *);
};
-/* driver data */
-struct zfcp_data {
- struct scsi_host_template scsi_host_template;
- struct scsi_transport_template *scsi_transport_template;
- struct kmem_cache *gpn_ft_cache;
- struct kmem_cache *qtcb_cache;
- struct kmem_cache *sr_buffer_cache;
- struct kmem_cache *gid_pn_cache;
- struct kmem_cache *adisc_cache;
-};
-
#endif /* ZFCP_DEF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index e003e306f870..e1b4f800e226 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -732,7 +732,7 @@ static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act)
if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED)
return ZFCP_ERP_FAILED;
- if (mempool_resize(act->adapter->pool.status_read_data,
+ if (mempool_resize(act->adapter->pool.sr_data,
act->adapter->stat_read_buf_num, GFP_KERNEL))
return ZFCP_ERP_FAILED;
@@ -1231,8 +1231,10 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
if (result == ZFCP_ERP_SUCCEEDED) {
register_service_level(&adapter->service_level);
queue_work(adapter->work_queue, &adapter->scan_work);
+ queue_work(adapter->work_queue, &adapter->ns_up_work);
} else
unregister_service_level(&adapter->service_level);
+
kref_put(&adapter->ref, zfcp_adapter_release);
break;
}
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 6e325284fbe7..03627cfd81cd 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -80,6 +80,7 @@ extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long);
extern void zfcp_erp_timeout_handler(unsigned long);
/* zfcp_fc.c */
+extern struct kmem_cache *zfcp_fc_req_cache;
extern void zfcp_fc_enqueue_event(struct zfcp_adapter *,
enum fc_host_event_code event_code, u32);
extern void zfcp_fc_post_event(struct work_struct *);
@@ -95,8 +96,10 @@ extern int zfcp_fc_gs_setup(struct zfcp_adapter *);
extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *);
extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *);
+extern void zfcp_fc_sym_name_update(struct work_struct *);
/* zfcp_fsf.c */
+extern struct kmem_cache *zfcp_fsf_qtcb_cache;
extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
extern int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *);
extern int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *);
@@ -139,9 +142,9 @@ extern struct zfcp_fsf_req *zfcp_fsf_get_req(struct zfcp_qdio *,
struct qdio_buffer *);
/* zfcp_scsi.c */
-extern struct zfcp_data zfcp_data;
-extern int zfcp_adapter_scsi_register(struct zfcp_adapter *);
-extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
+extern struct scsi_transport_template *zfcp_scsi_transport_template;
+extern int zfcp_scsi_adapter_register(struct zfcp_adapter *);
+extern void zfcp_scsi_adapter_unregister(struct zfcp_adapter *);
extern struct fc_function_template zfcp_transport_functions;
extern void zfcp_scsi_rport_work(struct work_struct *);
extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 30cf91a787a3..297e6b71ce9c 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -11,11 +11,14 @@
#include <linux/types.h>
#include <linux/slab.h>
+#include <linux/utsname.h>
#include <scsi/fc/fc_els.h>
#include <scsi/libfc.h>
#include "zfcp_ext.h"
#include "zfcp_fc.h"
+struct kmem_cache *zfcp_fc_req_cache;
+
static u32 zfcp_fc_rscn_range_mask[] = {
[ELS_ADDR_FMT_PORT] = 0xFFFFFF,
[ELS_ADDR_FMT_AREA] = 0xFFFF00,
@@ -260,24 +263,18 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
zfcp_fc_incoming_rscn(fsf_req);
}
-static void zfcp_fc_ns_gid_pn_eval(void *data)
+static void zfcp_fc_ns_gid_pn_eval(struct zfcp_fc_req *fc_req)
{
- struct zfcp_fc_gid_pn *gid_pn = data;
- struct zfcp_fsf_ct_els *ct = &gid_pn->ct;
- struct zfcp_fc_gid_pn_req *gid_pn_req = sg_virt(ct->req);
- struct zfcp_fc_gid_pn_resp *gid_pn_resp = sg_virt(ct->resp);
- struct zfcp_port *port = gid_pn->port;
+ struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+ struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
- if (ct->status)
+ if (ct_els->status)
return;
- if (gid_pn_resp->ct_hdr.ct_cmd != FC_FS_ACC)
+ if (gid_pn_rsp->ct_hdr.ct_cmd != FC_FS_ACC)
return;
- /* paranoia */
- if (gid_pn_req->gid_pn.fn_wwpn != port->wwpn)
- return;
/* looks like a valid d_id */
- port->d_id = ntoh24(gid_pn_resp->gid_pn.fp_fid);
+ ct_els->port->d_id = ntoh24(gid_pn_rsp->gid_pn.fp_fid);
}
static void zfcp_fc_complete(void *data)
@@ -285,69 +282,73 @@ static void zfcp_fc_complete(void *data)
complete(data);
}
+static void zfcp_fc_ct_ns_init(struct fc_ct_hdr *ct_hdr, u16 cmd, u16 mr_size)
+{
+ ct_hdr->ct_rev = FC_CT_REV;
+ ct_hdr->ct_fs_type = FC_FST_DIR;
+ ct_hdr->ct_fs_subtype = FC_NS_SUBTYPE;
+ ct_hdr->ct_cmd = cmd;
+ ct_hdr->ct_mr_size = mr_size / 4;
+}
+
static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
- struct zfcp_fc_gid_pn *gid_pn)
+ struct zfcp_fc_req *fc_req)
{
struct zfcp_adapter *adapter = port->adapter;
DECLARE_COMPLETION_ONSTACK(completion);
+ struct zfcp_fc_gid_pn_req *gid_pn_req = &fc_req->u.gid_pn.req;
+ struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
int ret;
/* setup parameters for send generic command */
- gid_pn->port = port;
- gid_pn->ct.handler = zfcp_fc_complete;
- gid_pn->ct.handler_data = &completion;
- gid_pn->ct.req = &gid_pn->sg_req;
- gid_pn->ct.resp = &gid_pn->sg_resp;
- sg_init_one(&gid_pn->sg_req, &gid_pn->gid_pn_req,
- sizeof(struct zfcp_fc_gid_pn_req));
- sg_init_one(&gid_pn->sg_resp, &gid_pn->gid_pn_resp,
- sizeof(struct zfcp_fc_gid_pn_resp));
-
- /* setup nameserver request */
- gid_pn->gid_pn_req.ct_hdr.ct_rev = FC_CT_REV;
- gid_pn->gid_pn_req.ct_hdr.ct_fs_type = FC_FST_DIR;
- gid_pn->gid_pn_req.ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE;
- gid_pn->gid_pn_req.ct_hdr.ct_options = 0;
- gid_pn->gid_pn_req.ct_hdr.ct_cmd = FC_NS_GID_PN;
- gid_pn->gid_pn_req.ct_hdr.ct_mr_size = ZFCP_FC_CT_SIZE_PAGE / 4;
- gid_pn->gid_pn_req.gid_pn.fn_wwpn = port->wwpn;
-
- ret = zfcp_fsf_send_ct(&adapter->gs->ds, &gid_pn->ct,
+ fc_req->ct_els.port = port;
+ fc_req->ct_els.handler = zfcp_fc_complete;
+ fc_req->ct_els.handler_data = &completion;
+ fc_req->ct_els.req = &fc_req->sg_req;
+ fc_req->ct_els.resp = &fc_req->sg_rsp;
+ sg_init_one(&fc_req->sg_req, gid_pn_req, sizeof(*gid_pn_req));
+ sg_init_one(&fc_req->sg_rsp, gid_pn_rsp, sizeof(*gid_pn_rsp));
+
+ zfcp_fc_ct_ns_init(&gid_pn_req->ct_hdr,
+ FC_NS_GID_PN, ZFCP_FC_CT_SIZE_PAGE);
+ gid_pn_req->gid_pn.fn_wwpn = port->wwpn;
+
+ ret = zfcp_fsf_send_ct(&adapter->gs->ds, &fc_req->ct_els,
adapter->pool.gid_pn_req,
ZFCP_FC_CTELS_TMO);
if (!ret) {
wait_for_completion(&completion);
- zfcp_fc_ns_gid_pn_eval(gid_pn);
+ zfcp_fc_ns_gid_pn_eval(fc_req);
}
return ret;
}
/**
- * zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request
+ * zfcp_fc_ns_gid_pn - initiate GID_PN nameserver request
* @port: port where GID_PN request is needed
* return: -ENOMEM on error, 0 otherwise
*/
static int zfcp_fc_ns_gid_pn(struct zfcp_port *port)
{
int ret;
- struct zfcp_fc_gid_pn *gid_pn;
+ struct zfcp_fc_req *fc_req;
struct zfcp_adapter *adapter = port->adapter;
- gid_pn = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC);
- if (!gid_pn)
+ fc_req = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC);
+ if (!fc_req)
return -ENOMEM;
- memset(gid_pn, 0, sizeof(*gid_pn));
+ memset(fc_req, 0, sizeof(*fc_req));
ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
if (ret)
goto out;
- ret = zfcp_fc_ns_gid_pn_request(port, gid_pn);
+ ret = zfcp_fc_ns_gid_pn_request(port, fc_req);
zfcp_fc_wka_port_put(&adapter->gs->ds);
out:
- mempool_free(gid_pn, adapter->pool.gid_pn);
+ mempool_free(fc_req, adapter->pool.gid_pn);
return ret;
}
@@ -419,11 +420,11 @@ void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fc_els_flogi *plogi)
static void zfcp_fc_adisc_handler(void *data)
{
- struct zfcp_fc_els_adisc *adisc = data;
- struct zfcp_port *port = adisc->els.port;
- struct fc_els_adisc *adisc_resp = &adisc->adisc_resp;
+ struct zfcp_fc_req *fc_req = data;
+ struct zfcp_port *port = fc_req->ct_els.port;
+ struct fc_els_adisc *adisc_resp = &fc_req->u.adisc.rsp;
- if (adisc->els.status) {
+ if (fc_req->ct_els.status) {
/* request rejected or timed out */
zfcp_erp_port_forced_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
"fcadh_1");
@@ -445,42 +446,42 @@ static void zfcp_fc_adisc_handler(void *data)
out:
atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
put_device(&port->dev);
- kmem_cache_free(zfcp_data.adisc_cache, adisc);
+ kmem_cache_free(zfcp_fc_req_cache, fc_req);
}
static int zfcp_fc_adisc(struct zfcp_port *port)
{
- struct zfcp_fc_els_adisc *adisc;
+ struct zfcp_fc_req *fc_req;
struct zfcp_adapter *adapter = port->adapter;
+ struct Scsi_Host *shost = adapter->scsi_host;
int ret;
- adisc = kmem_cache_zalloc(zfcp_data.adisc_cache, GFP_ATOMIC);
- if (!adisc)
+ fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC);
+ if (!fc_req)
return -ENOMEM;
- adisc->els.port = port;
- adisc->els.req = &adisc->req;
- adisc->els.resp = &adisc->resp;
- sg_init_one(adisc->els.req, &adisc->adisc_req,
+ fc_req->ct_els.port = port;
+ fc_req->ct_els.req = &fc_req->sg_req;
+ fc_req->ct_els.resp = &fc_req->sg_rsp;
+ sg_init_one(&fc_req->sg_req, &fc_req->u.adisc.req,
sizeof(struct fc_els_adisc));
- sg_init_one(adisc->els.resp, &adisc->adisc_resp,
+ sg_init_one(&fc_req->sg_rsp, &fc_req->u.adisc.rsp,
sizeof(struct fc_els_adisc));
- adisc->els.handler = zfcp_fc_adisc_handler;
- adisc->els.handler_data = adisc;
+ fc_req->ct_els.handler = zfcp_fc_adisc_handler;
+ fc_req->ct_els.handler_data = fc_req;
/* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
without FC-AL-2 capability, so we don't set it */
- adisc->adisc_req.adisc_wwpn = fc_host_port_name(adapter->scsi_host);
- adisc->adisc_req.adisc_wwnn = fc_host_node_name(adapter->scsi_host);
- adisc->adisc_req.adisc_cmd = ELS_ADISC;
- hton24(adisc->adisc_req.adisc_port_id,
- fc_host_port_id(adapter->scsi_host));
+ fc_req->u.adisc.req.adisc_wwpn = fc_host_port_name(shost);
+ fc_req->u.adisc.req.adisc_wwnn = fc_host_node_name(shost);
+ fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
+ hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
- ret = zfcp_fsf_send_els(adapter, port->d_id, &adisc->els,
+ ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els,
ZFCP_FC_CTELS_TMO);
if (ret)
- kmem_cache_free(zfcp_data.adisc_cache, adisc);
+ kmem_cache_free(zfcp_fc_req_cache, fc_req);
return ret;
}
@@ -528,68 +529,42 @@ void zfcp_fc_test_link(struct zfcp_port *port)
put_device(&port->dev);
}
-static void zfcp_free_sg_env(struct zfcp_fc_gpn_ft *gpn_ft, int buf_num)
+static struct zfcp_fc_req *zfcp_alloc_sg_env(int buf_num)
{
- struct scatterlist *sg = &gpn_ft->sg_req;
-
- kmem_cache_free(zfcp_data.gpn_ft_cache, sg_virt(sg));
- zfcp_sg_free_table(gpn_ft->sg_resp, buf_num);
-
- kfree(gpn_ft);
-}
+ struct zfcp_fc_req *fc_req;
-static struct zfcp_fc_gpn_ft *zfcp_alloc_sg_env(int buf_num)
-{
- struct zfcp_fc_gpn_ft *gpn_ft;
- struct zfcp_fc_gpn_ft_req *req;
-
- gpn_ft = kzalloc(sizeof(*gpn_ft), GFP_KERNEL);
- if (!gpn_ft)
+ fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL);
+ if (!fc_req)
return NULL;
- req = kmem_cache_zalloc(zfcp_data.gpn_ft_cache, GFP_KERNEL);
- if (!req) {
- kfree(gpn_ft);
- gpn_ft = NULL;
- goto out;
+ if (zfcp_sg_setup_table(&fc_req->sg_rsp, buf_num)) {
+ kmem_cache_free(zfcp_fc_req_cache, fc_req);
+ return NULL;
}
- sg_init_one(&gpn_ft->sg_req, req, sizeof(*req));
- if (zfcp_sg_setup_table(gpn_ft->sg_resp, buf_num)) {
- zfcp_free_sg_env(gpn_ft, buf_num);
- gpn_ft = NULL;
- }
-out:
- return gpn_ft;
-}
+ sg_init_one(&fc_req->sg_req, &fc_req->u.gpn_ft.req,
+ sizeof(struct zfcp_fc_gpn_ft_req));
+ return fc_req;
+}
-static int zfcp_fc_send_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
+static int zfcp_fc_send_gpn_ft(struct zfcp_fc_req *fc_req,
struct zfcp_adapter *adapter, int max_bytes)
{
- struct zfcp_fsf_ct_els *ct = &gpn_ft->ct;
- struct zfcp_fc_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req);
+ struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+ struct zfcp_fc_gpn_ft_req *req = &fc_req->u.gpn_ft.req;
DECLARE_COMPLETION_ONSTACK(completion);
int ret;
- /* prepare CT IU for GPN_FT */
- req->ct_hdr.ct_rev = FC_CT_REV;
- req->ct_hdr.ct_fs_type = FC_FST_DIR;
- req->ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE;
- req->ct_hdr.ct_options = 0;
- req->ct_hdr.ct_cmd = FC_NS_GPN_FT;
- req->ct_hdr.ct_mr_size = max_bytes / 4;
- req->gpn_ft.fn_domain_id_scope = 0;
- req->gpn_ft.fn_area_id_scope = 0;
+ zfcp_fc_ct_ns_init(&req->ct_hdr, FC_NS_GPN_FT, max_bytes);
req->gpn_ft.fn_fc4_type = FC_TYPE_FCP;
- /* prepare zfcp_send_ct */
- ct->handler = zfcp_fc_complete;
- ct->handler_data = &completion;
- ct->req = &gpn_ft->sg_req;
- ct->resp = gpn_ft->sg_resp;
+ ct_els->handler = zfcp_fc_complete;
+ ct_els->handler_data = &completion;
+ ct_els->req = &fc_req->sg_req;
+ ct_els->resp = &fc_req->sg_rsp;
- ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct, NULL,
+ ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
ZFCP_FC_CTELS_TMO);
if (!ret)
wait_for_completion(&completion);
@@ -610,11 +585,11 @@ static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh)
list_move_tail(&port->list, lh);
}
-static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
+static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_req *fc_req,
struct zfcp_adapter *adapter, int max_entries)
{
- struct zfcp_fsf_ct_els *ct = &gpn_ft->ct;
- struct scatterlist *sg = gpn_ft->sg_resp;
+ struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+ struct scatterlist *sg = &fc_req->sg_rsp;
struct fc_ct_hdr *hdr = sg_virt(sg);
struct fc_gpn_ft_resp *acc = sg_virt(sg);
struct zfcp_port *port, *tmp;
@@ -623,7 +598,7 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
u32 d_id;
int ret = 0, x, last = 0;
- if (ct->status)
+ if (ct_els->status)
return -EIO;
if (hdr->ct_cmd != FC_FS_ACC) {
@@ -687,7 +662,7 @@ void zfcp_fc_scan_ports(struct work_struct *work)
struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
scan_work);
int ret, i;
- struct zfcp_fc_gpn_ft *gpn_ft;
+ struct zfcp_fc_req *fc_req;
int chain, max_entries, buf_num, max_bytes;
chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS;
@@ -702,25 +677,145 @@ void zfcp_fc_scan_ports(struct work_struct *work)
if (zfcp_fc_wka_port_get(&adapter->gs->ds))
return;
- gpn_ft = zfcp_alloc_sg_env(buf_num);
- if (!gpn_ft)
+ fc_req = zfcp_alloc_sg_env(buf_num);
+ if (!fc_req)
goto out;
for (i = 0; i < 3; i++) {
- ret = zfcp_fc_send_gpn_ft(gpn_ft, adapter, max_bytes);
+ ret = zfcp_fc_send_gpn_ft(fc_req, adapter, max_bytes);
if (!ret) {
- ret = zfcp_fc_eval_gpn_ft(gpn_ft, adapter, max_entries);
+ ret = zfcp_fc_eval_gpn_ft(fc_req, adapter, max_entries);
if (ret == -EAGAIN)
ssleep(1);
else
break;
}
}
- zfcp_free_sg_env(gpn_ft, buf_num);
+ zfcp_sg_free_table(&fc_req->sg_rsp, buf_num);
+ kmem_cache_free(zfcp_fc_req_cache, fc_req);
out:
zfcp_fc_wka_port_put(&adapter->gs->ds);
}
+static int zfcp_fc_gspn(struct zfcp_adapter *adapter,
+ struct zfcp_fc_req *fc_req)
+{
+ DECLARE_COMPLETION_ONSTACK(completion);
+ char devno[] = "DEVNO:";
+ struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+ struct zfcp_fc_gspn_req *gspn_req = &fc_req->u.gspn.req;
+ struct zfcp_fc_gspn_rsp *gspn_rsp = &fc_req->u.gspn.rsp;
+ int ret;
+
+ zfcp_fc_ct_ns_init(&gspn_req->ct_hdr, FC_NS_GSPN_ID,
+ FC_SYMBOLIC_NAME_SIZE);
+ hton24(gspn_req->gspn.fp_fid, fc_host_port_id(adapter->scsi_host));
+
+ sg_init_one(&fc_req->sg_req, gspn_req, sizeof(*gspn_req));
+ sg_init_one(&fc_req->sg_rsp, gspn_rsp, sizeof(*gspn_rsp));
+
+ ct_els->handler = zfcp_fc_complete;
+ ct_els->handler_data = &completion;
+ ct_els->req = &fc_req->sg_req;
+ ct_els->resp = &fc_req->sg_rsp;
+
+ ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
+ ZFCP_FC_CTELS_TMO);
+ if (ret)
+ return ret;
+
+ wait_for_completion(&completion);
+ if (ct_els->status)
+ return ct_els->status;
+
+ if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_NPIV &&
+ !(strstr(gspn_rsp->gspn.fp_name, devno)))
+ snprintf(fc_host_symbolic_name(adapter->scsi_host),
+ FC_SYMBOLIC_NAME_SIZE, "%s%s %s NAME: %s",
+ gspn_rsp->gspn.fp_name, devno,
+ dev_name(&adapter->ccw_device->dev),
+ init_utsname()->nodename);
+ else
+ strlcpy(fc_host_symbolic_name(adapter->scsi_host),
+ gspn_rsp->gspn.fp_name, FC_SYMBOLIC_NAME_SIZE);
+
+ return 0;
+}
+
+static void zfcp_fc_rspn(struct zfcp_adapter *adapter,
+ struct zfcp_fc_req *fc_req)
+{
+ DECLARE_COMPLETION_ONSTACK(completion);
+ struct Scsi_Host *shost = adapter->scsi_host;
+ struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+ struct zfcp_fc_rspn_req *rspn_req = &fc_req->u.rspn.req;
+ struct fc_ct_hdr *rspn_rsp = &fc_req->u.rspn.rsp;
+ int ret, len;
+
+ zfcp_fc_ct_ns_init(&rspn_req->ct_hdr, FC_NS_RSPN_ID,
+ FC_SYMBOLIC_NAME_SIZE);
+ hton24(rspn_req->rspn.fr_fid.fp_fid, fc_host_port_id(shost));
+ len = strlcpy(rspn_req->rspn.fr_name, fc_host_symbolic_name(shost),
+ FC_SYMBOLIC_NAME_SIZE);
+ rspn_req->rspn.fr_name_len = len;
+
+ sg_init_one(&fc_req->sg_req, rspn_req, sizeof(*rspn_req));
+ sg_init_one(&fc_req->sg_rsp, rspn_rsp, sizeof(*rspn_rsp));
+
+ ct_els->handler = zfcp_fc_complete;
+ ct_els->handler_data = &completion;
+ ct_els->req = &fc_req->sg_req;
+ ct_els->resp = &fc_req->sg_rsp;
+
+ ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
+ ZFCP_FC_CTELS_TMO);
+ if (!ret)
+ wait_for_completion(&completion);
+}
+
+/**
+ * zfcp_fc_sym_name_update - Retrieve and update the symbolic port name
+ * @work: ns_up_work of the adapter where to update the symbolic port name
+ *
+ * Retrieve the current symbolic port name that may have been set by
+ * the hardware using the GSPN request and update the fc_host
+ * symbolic_name sysfs attribute. When running in NPIV mode (and hence
+ * the port name is unique for this system), update the symbolic port
+ * name to add Linux specific information and update the FC nameserver
+ * using the RSPN request.
+ */
+void zfcp_fc_sym_name_update(struct work_struct *work)
+{
+ struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
+ ns_up_work);
+ int ret;
+ struct zfcp_fc_req *fc_req;
+
+ if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT &&
+ fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
+ return;
+
+ fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL);
+ if (!fc_req)
+ return;
+
+ ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
+ if (ret)
+ goto out_free;
+
+ ret = zfcp_fc_gspn(adapter, fc_req);
+ if (ret || fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
+ goto out_ds_put;
+
+ memset(fc_req, 0, sizeof(*fc_req));
+ zfcp_fc_rspn(adapter, fc_req);
+
+out_ds_put:
+ zfcp_fc_wka_port_put(&adapter->gs->ds);
+out_free:
+ kmem_cache_free(zfcp_fc_req_cache, fc_req);
+}
+
static void zfcp_fc_ct_els_job_handler(void *data)
{
struct fc_bsg_job *job = data;
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index b464ae01086c..4561f3bf7300 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -64,33 +64,16 @@ struct zfcp_fc_gid_pn_req {
} __packed;
/**
- * struct zfcp_fc_gid_pn_resp - container for ct header plus gid_pn response
+ * struct zfcp_fc_gid_pn_rsp - container for ct header plus gid_pn response
* @ct_hdr: FC GS common transport header
* @gid_pn: GID_PN response
*/
-struct zfcp_fc_gid_pn_resp {
+struct zfcp_fc_gid_pn_rsp {
struct fc_ct_hdr ct_hdr;
struct fc_gid_pn_resp gid_pn;
} __packed;
/**
- * struct zfcp_fc_gid_pn - everything required in zfcp for gid_pn request
- * @ct: data passed to zfcp_fsf for issuing fsf request
- * @sg_req: scatterlist entry for request data
- * @sg_resp: scatterlist entry for response data
- * @gid_pn_req: GID_PN request data
- * @gid_pn_resp: GID_PN response data
- */
-struct zfcp_fc_gid_pn {
- struct zfcp_fsf_ct_els ct;
- struct scatterlist sg_req;
- struct scatterlist sg_resp;
- struct zfcp_fc_gid_pn_req gid_pn_req;
- struct zfcp_fc_gid_pn_resp gid_pn_resp;
- struct zfcp_port *port;
-};
-
-/**
* struct zfcp_fc_gpn_ft - container for ct header plus gpn_ft request
* @ct_hdr: FC GS common transport header
* @gpn_ft: GPN_FT request
@@ -101,41 +84,72 @@ struct zfcp_fc_gpn_ft_req {
} __packed;
/**
- * struct zfcp_fc_gpn_ft_resp - container for ct header plus gpn_ft response
+ * struct zfcp_fc_gspn_req - container for ct header plus GSPN_ID request
* @ct_hdr: FC GS common transport header
- * @gpn_ft: Array of gpn_ft response data to fill one memory page
+ * @gspn: GSPN_ID request
*/
-struct zfcp_fc_gpn_ft_resp {
+struct zfcp_fc_gspn_req {
struct fc_ct_hdr ct_hdr;
- struct fc_gpn_ft_resp gpn_ft[ZFCP_FC_GPN_FT_ENT_PAGE];
+ struct fc_gid_pn_resp gspn;
} __packed;
/**
- * struct zfcp_fc_gpn_ft - zfcp data for gpn_ft request
- * @ct: data passed to zfcp_fsf for issuing fsf request
- * @sg_req: scatter list entry for gpn_ft request
- * @sg_resp: scatter list entries for gpn_ft responses (per memory page)
+ * struct zfcp_fc_gspn_rsp - container for ct header plus GSPN_ID response
+ * @ct_hdr: FC GS common transport header
+ * @gspn: GSPN_ID response
+ * @name: The name string of the GSPN_ID response
*/
-struct zfcp_fc_gpn_ft {
- struct zfcp_fsf_ct_els ct;
- struct scatterlist sg_req;
- struct scatterlist sg_resp[ZFCP_FC_GPN_FT_NUM_BUFS];
-};
+struct zfcp_fc_gspn_rsp {
+ struct fc_ct_hdr ct_hdr;
+ struct fc_gspn_resp gspn;
+ char name[FC_SYMBOLIC_NAME_SIZE];
+} __packed;
/**
- * struct zfcp_fc_els_adisc - everything required in zfcp for issuing ELS ADISC
- * @els: data required for issuing els fsf command
- * @req: scatterlist entry for ELS ADISC request
- * @resp: scatterlist entry for ELS ADISC response
- * @adisc_req: ELS ADISC request data
- * @adisc_resp: ELS ADISC response data
+ * struct zfcp_fc_rspn_req - container for ct header plus RSPN_ID request
+ * @ct_hdr: FC GS common transport header
+ * @rspn: RSPN_ID request
+ * @name: The name string of the RSPN_ID request
*/
-struct zfcp_fc_els_adisc {
- struct zfcp_fsf_ct_els els;
- struct scatterlist req;
- struct scatterlist resp;
- struct fc_els_adisc adisc_req;
- struct fc_els_adisc adisc_resp;
+struct zfcp_fc_rspn_req {
+ struct fc_ct_hdr ct_hdr;
+ struct fc_ns_rspn rspn;
+ char name[FC_SYMBOLIC_NAME_SIZE];
+} __packed;
+
+/**
+ * struct zfcp_fc_req - Container for FC ELS and CT requests sent from zfcp
+ * @ct_els: data required for issuing fsf command
+ * @sg_req: scatterlist entry for request data
+ * @sg_rsp: scatterlist entry for response data
+ * @u: request specific data
+ */
+struct zfcp_fc_req {
+ struct zfcp_fsf_ct_els ct_els;
+ struct scatterlist sg_req;
+ struct scatterlist sg_rsp;
+ union {
+ struct {
+ struct fc_els_adisc req;
+ struct fc_els_adisc rsp;
+ } adisc;
+ struct {
+ struct zfcp_fc_gid_pn_req req;
+ struct zfcp_fc_gid_pn_rsp rsp;
+ } gid_pn;
+ struct {
+ struct scatterlist sg_rsp2[ZFCP_FC_GPN_FT_NUM_BUFS - 1];
+ struct zfcp_fc_gpn_ft_req req;
+ } gpn_ft;
+ struct {
+ struct zfcp_fc_gspn_req req;
+ struct zfcp_fc_gspn_rsp rsp;
+ } gspn;
+ struct {
+ struct zfcp_fc_rspn_req req;
+ struct fc_ct_hdr rsp;
+ } rspn;
+ } u;
};
/**
@@ -192,14 +206,21 @@ struct zfcp_fc_wka_ports {
* zfcp_fc_scsi_to_fcp - setup FCP command with data from scsi_cmnd
* @fcp: fcp_cmnd to setup
* @scsi: scsi_cmnd where to get LUN, task attributes/flags and CDB
+ * @tm: task management flags to setup task management command
*/
static inline
-void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi)
+void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi,
+ u8 tm_flags)
{
char tag[2];
int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun);
+ if (unlikely(tm_flags)) {
+ fcp->fc_tm_flags = tm_flags;
+ return;
+ }
+
if (scsi_populate_tag_msg(scsi, tag)) {
switch (tag[0]) {
case MSG_ORDERED_TAG:
@@ -226,19 +247,6 @@ void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi)
}
/**
- * zfcp_fc_fcp_tm - setup FCP command as task management command
- * @fcp: fcp_cmnd to setup
- * @dev: scsi_device where to send the task management command
- * @tm: task management flags to setup tm command
- */
-static inline
-void zfcp_fc_fcp_tm(struct fcp_cmnd *fcp, struct scsi_device *dev, u8 tm_flags)
-{
- int_to_scsilun(dev->lun, (struct scsi_lun *) &fcp->fc_lun);
- fcp->fc_tm_flags |= tm_flags;
-}
-
-/**
* zfcp_fc_evap_fcp_rsp - evaluate FCP RSP IU and update scsi_cmnd accordingly
* @fcp_rsp: FCP RSP IU to evaluate
* @scsi: SCSI command where to update status and sense buffer
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 60ff9d172c79..a0e05ef65924 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -18,6 +18,8 @@
#include "zfcp_qdio.h"
#include "zfcp_reqlist.h"
+struct kmem_cache *zfcp_fsf_qtcb_cache;
+
static void zfcp_fsf_request_timeout_handler(unsigned long data)
{
struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
@@ -83,7 +85,7 @@ void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
}
if (likely(req->qtcb))
- kmem_cache_free(zfcp_data.qtcb_cache, req->qtcb);
+ kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb);
kfree(req);
}
@@ -212,7 +214,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
zfcp_dbf_hba_fsf_uss("fssrh_1", req);
- mempool_free(sr_buf, adapter->pool.status_read_data);
+ mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
zfcp_fsf_req_free(req);
return;
}
@@ -265,7 +267,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
break;
}
- mempool_free(sr_buf, adapter->pool.status_read_data);
+ mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
zfcp_fsf_req_free(req);
atomic_inc(&adapter->stat_miss);
@@ -628,7 +630,7 @@ static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
if (likely(pool))
qtcb = mempool_alloc(pool, GFP_ATOMIC);
else
- qtcb = kmem_cache_alloc(zfcp_data.qtcb_cache, GFP_ATOMIC);
+ qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC);
if (unlikely(!qtcb))
return NULL;
@@ -723,6 +725,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
struct zfcp_adapter *adapter = qdio->adapter;
struct zfcp_fsf_req *req;
struct fsf_status_read_buffer *sr_buf;
+ struct page *page;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
@@ -736,11 +739,12 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
goto out;
}
- sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC);
- if (!sr_buf) {
+ page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC);
+ if (!page) {
retval = -ENOMEM;
goto failed_buf;
}
+ sr_buf = page_address(page);
memset(sr_buf, 0, sizeof(*sr_buf));
req->data = sr_buf;
@@ -755,7 +759,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
failed_req_send:
req->data = NULL;
- mempool_free(sr_buf, adapter->pool.status_read_data);
+ mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
failed_buf:
zfcp_dbf_hba_fsf_uss("fssr__1", req);
zfcp_fsf_req_free(req);
@@ -1552,7 +1556,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
- if (unlikely(IS_ERR(req))) {
+ if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
@@ -1605,7 +1609,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
- if (unlikely(IS_ERR(req))) {
+ if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
@@ -2206,7 +2210,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction);
fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
- zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
+ zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
if (scsi_prot_sg_count(scsi_cmnd)) {
zfcp_qdio_set_data_div(qdio, &req->qdio_req,
@@ -2284,7 +2288,6 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
goto out;
}
- req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT;
req->data = scmnd;
req->handler = zfcp_fsf_fcp_task_mgmt_handler;
req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
@@ -2296,7 +2299,7 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
- zfcp_fc_fcp_tm(fcp_cmnd, scmnd->device, tm_flags);
+ zfcp_fc_scsi_to_fcp(fcp_cmnd, scmnd, tm_flags);
zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
if (!zfcp_fsf_req_send(req))
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index ddb5800823a9..2a4991d6d4d5 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -292,7 +292,37 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
return SUCCESS;
}
-int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
+struct scsi_transport_template *zfcp_scsi_transport_template;
+
+static struct scsi_host_template zfcp_scsi_host_template = {
+ .module = THIS_MODULE,
+ .name = "zfcp",
+ .queuecommand = zfcp_scsi_queuecommand,
+ .eh_abort_handler = zfcp_scsi_eh_abort_handler,
+ .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
+ .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
+ .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
+ .slave_alloc = zfcp_scsi_slave_alloc,
+ .slave_configure = zfcp_scsi_slave_configure,
+ .slave_destroy = zfcp_scsi_slave_destroy,
+ .change_queue_depth = zfcp_scsi_change_queue_depth,
+ .proc_name = "zfcp",
+ .can_queue = 4096,
+ .this_id = -1,
+ .sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ,
+ .max_sectors = (ZFCP_QDIO_MAX_SBALES_PER_REQ * 8),
+ .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
+ .cmd_per_lun = 1,
+ .use_clustering = 1,
+ .shost_attrs = zfcp_sysfs_shost_attrs,
+ .sdev_attrs = zfcp_sysfs_sdev_attrs,
+};
+
+/**
+ * zfcp_scsi_adapter_register - Register SCSI and FC host with SCSI midlayer
+ * @adapter: The zfcp adapter to register with the SCSI midlayer
+ */
+int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter)
{
struct ccw_dev_id dev_id;
@@ -301,7 +331,7 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
ccw_device_get_id(adapter->ccw_device, &dev_id);
/* register adapter as SCSI host with mid layer of SCSI stack */
- adapter->scsi_host = scsi_host_alloc(&zfcp_data.scsi_host_template,
+ adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template,
sizeof (struct zfcp_adapter *));
if (!adapter->scsi_host) {
dev_err(&adapter->ccw_device->dev,
@@ -316,7 +346,7 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
adapter->scsi_host->max_channel = 0;
adapter->scsi_host->unique_id = dev_id.devno;
adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
- adapter->scsi_host->transportt = zfcp_data.scsi_transport_template;
+ adapter->scsi_host->transportt = zfcp_scsi_transport_template;
adapter->scsi_host->hostdata[0] = (unsigned long) adapter;
@@ -328,7 +358,11 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
return 0;
}
-void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
+/**
+ * zfcp_scsi_adapter_unregister - Unregister SCSI and FC host from SCSI midlayer
+ * @adapter: The zfcp adapter to unregister.
+ */
+void zfcp_scsi_adapter_unregister(struct zfcp_adapter *adapter)
{
struct Scsi_Host *shost;
struct zfcp_port *port;
@@ -346,8 +380,6 @@ void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
scsi_remove_host(shost);
scsi_host_put(shost);
adapter->scsi_host = NULL;
-
- return;
}
static struct fc_host_statistics*
@@ -688,33 +720,8 @@ struct fc_function_template zfcp_transport_functions = {
/* no functions registered for following dynamic attributes but
directly set by LLDD */
.show_host_port_type = 1,
+ .show_host_symbolic_name = 1,
.show_host_speed = 1,
.show_host_port_id = 1,
.dd_bsg_size = sizeof(struct zfcp_fsf_ct_els),
};
-
-struct zfcp_data zfcp_data = {
- .scsi_host_template = {
- .name = "zfcp",
- .module = THIS_MODULE,
- .proc_name = "zfcp",
- .change_queue_depth = zfcp_scsi_change_queue_depth,
- .slave_alloc = zfcp_scsi_slave_alloc,
- .slave_configure = zfcp_scsi_slave_configure,
- .slave_destroy = zfcp_scsi_slave_destroy,
- .queuecommand = zfcp_scsi_queuecommand,
- .eh_abort_handler = zfcp_scsi_eh_abort_handler,
- .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
- .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
- .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
- .can_queue = 4096,
- .this_id = -1,
- .sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ,
- .cmd_per_lun = 1,
- .use_clustering = 1,
- .sdev_attrs = zfcp_sysfs_sdev_attrs,
- .max_sectors = (ZFCP_QDIO_MAX_SBALES_PER_REQ * 8),
- .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
- .shost_attrs = zfcp_sysfs_shost_attrs,
- },
-};
diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c
index 614a5e114a19..5f94d22c491e 100644
--- a/drivers/sbus/char/bbc_i2c.c
+++ b/drivers/sbus/char/bbc_i2c.c
@@ -361,8 +361,7 @@ fail:
extern int bbc_envctrl_init(struct bbc_i2c_bus *bp);
extern void bbc_envctrl_cleanup(struct bbc_i2c_bus *bp);
-static int __devinit bbc_i2c_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit bbc_i2c_probe(struct platform_device *op)
{
struct bbc_i2c_bus *bp;
int err, index = 0;
@@ -413,7 +412,7 @@ static const struct of_device_id bbc_i2c_match[] = {
};
MODULE_DEVICE_TABLE(of, bbc_i2c_match);
-static struct of_platform_driver bbc_i2c_driver = {
+static struct platform_driver bbc_i2c_driver = {
.driver = {
.name = "bbc_i2c",
.owner = THIS_MODULE,
@@ -425,12 +424,12 @@ static struct of_platform_driver bbc_i2c_driver = {
static int __init bbc_i2c_init(void)
{
- return of_register_platform_driver(&bbc_i2c_driver);
+ return platform_driver_register(&bbc_i2c_driver);
}
static void __exit bbc_i2c_exit(void)
{
- of_unregister_platform_driver(&bbc_i2c_driver);
+ platform_driver_unregister(&bbc_i2c_driver);
}
module_init(bbc_i2c_init);
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index 55f71ea9c418..740da4465447 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -171,8 +171,7 @@ static struct miscdevice d7s_miscdev = {
.fops = &d7s_fops
};
-static int __devinit d7s_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit d7s_probe(struct platform_device *op)
{
struct device_node *opts;
int err = -EINVAL;
@@ -266,7 +265,7 @@ static const struct of_device_id d7s_match[] = {
};
MODULE_DEVICE_TABLE(of, d7s_match);
-static struct of_platform_driver d7s_driver = {
+static struct platform_driver d7s_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
@@ -278,12 +277,12 @@ static struct of_platform_driver d7s_driver = {
static int __init d7s_init(void)
{
- return of_register_platform_driver(&d7s_driver);
+ return platform_driver_register(&d7s_driver);
}
static void __exit d7s_exit(void)
{
- of_unregister_platform_driver(&d7s_driver);
+ platform_driver_unregister(&d7s_driver);
}
module_init(d7s_init);
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index 8ce414e39489..be7b4e56154f 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -1028,8 +1028,7 @@ static int kenvctrld(void *__unused)
return 0;
}
-static int __devinit envctrl_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit envctrl_probe(struct platform_device *op)
{
struct device_node *dp;
int index, err;
@@ -1129,7 +1128,7 @@ static const struct of_device_id envctrl_match[] = {
};
MODULE_DEVICE_TABLE(of, envctrl_match);
-static struct of_platform_driver envctrl_driver = {
+static struct platform_driver envctrl_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
@@ -1141,12 +1140,12 @@ static struct of_platform_driver envctrl_driver = {
static int __init envctrl_init(void)
{
- return of_register_platform_driver(&envctrl_driver);
+ return platform_driver_register(&envctrl_driver);
}
static void __exit envctrl_exit(void)
{
- of_unregister_platform_driver(&envctrl_driver);
+ platform_driver_unregister(&envctrl_driver);
}
module_init(envctrl_init);
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index 2b4b4b613c48..73dd4e7afaaa 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -160,8 +160,7 @@ static const struct file_operations flash_fops = {
static struct miscdevice flash_dev = { FLASH_MINOR, "flash", &flash_fops };
-static int __devinit flash_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit flash_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
struct device_node *parent;
@@ -207,7 +206,7 @@ static const struct of_device_id flash_match[] = {
};
MODULE_DEVICE_TABLE(of, flash_match);
-static struct of_platform_driver flash_driver = {
+static struct platform_driver flash_driver = {
.driver = {
.name = "flash",
.owner = THIS_MODULE,
@@ -219,12 +218,12 @@ static struct of_platform_driver flash_driver = {
static int __init flash_init(void)
{
- return of_register_platform_driver(&flash_driver);
+ return platform_driver_register(&flash_driver);
}
static void __exit flash_cleanup(void)
{
- of_unregister_platform_driver(&flash_driver);
+ platform_driver_unregister(&flash_driver);
}
module_init(flash_init);
diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c
index 1b345be5cc02..ebce9639a26a 100644
--- a/drivers/sbus/char/uctrl.c
+++ b/drivers/sbus/char/uctrl.c
@@ -348,8 +348,7 @@ static void uctrl_get_external_status(struct uctrl_driver *driver)
}
-static int __devinit uctrl_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit uctrl_probe(struct platform_device *op)
{
struct uctrl_driver *p;
int err = -ENOMEM;
@@ -425,7 +424,7 @@ static const struct of_device_id uctrl_match[] = {
};
MODULE_DEVICE_TABLE(of, uctrl_match);
-static struct of_platform_driver uctrl_driver = {
+static struct platform_driver uctrl_driver = {
.driver = {
.name = "uctrl",
.owner = THIS_MODULE,
@@ -438,12 +437,12 @@ static struct of_platform_driver uctrl_driver = {
static int __init uctrl_init(void)
{
- return of_register_platform_driver(&uctrl_driver);
+ return platform_driver_register(&uctrl_driver);
}
static void __exit uctrl_exit(void)
{
- of_unregister_platform_driver(&uctrl_driver);
+ platform_driver_unregister(&uctrl_driver);
}
module_init(uctrl_init);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 8616496ffc02..4a1f029c4fe9 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -381,6 +381,7 @@ config ISCSI_BOOT_SYSFS
source "drivers/scsi/cxgbi/Kconfig"
source "drivers/scsi/bnx2i/Kconfig"
+source "drivers/scsi/bnx2fc/Kconfig"
source "drivers/scsi/be2iscsi/Kconfig"
config SGIWD93_SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 2e9a87e8e7d8..7ad0b8a79ae8 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_LIBFC) += libfc/
obj-$(CONFIG_LIBFCOE) += fcoe/
obj-$(CONFIG_FCOE) += fcoe/
obj-$(CONFIG_FCOE_FNIC) += fnic/
+obj-$(CONFIG_SCSI_BNX2X_FCOE) += libfc/ fcoe/ bnx2fc/
obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o
obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
obj-$(CONFIG_ISCSI_BOOT_SYSFS) += iscsi_boot_sysfs.o
@@ -165,7 +166,7 @@ scsi_mod-$(CONFIG_SCSI_NETLINK) += scsi_netlink.o
scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o
scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o
scsi_mod-y += scsi_trace.o
-scsi_mod-$(CONFIG_PM_OPS) += scsi_pm.o
+scsi_mod-$(CONFIG_PM) += scsi_pm.o
scsi_tgt-y += scsi_tgt_lib.o scsi_tgt_if.o
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 9a5629f94f95..e7cd2fcbe036 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -936,8 +936,7 @@ static void NCR5380_exit(struct Scsi_Host *instance)
{
struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
- cancel_delayed_work(&hostdata->coroutine);
- flush_scheduled_work();
+ cancel_delayed_work_sync(&hostdata->coroutine);
}
/**
diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h
index be5558ab84ea..95ee50385188 100644
--- a/drivers/scsi/aic7xxx/aic79xx.h
+++ b/drivers/scsi/aic7xxx/aic79xx.h
@@ -672,7 +672,7 @@ struct scb_data {
/************************ Target Mode Definitions *****************************/
/*
- * Connection desciptor for select-in requests in target mode.
+ * Connection descriptor for select-in requests in target mode.
*/
struct target_cmd {
uint8_t scsiid; /* Our ID and the initiator's ID */
diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h
index e4e651cca3e4..17444bc18bca 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.h
+++ b/drivers/scsi/aic7xxx/aic7xxx.h
@@ -618,7 +618,7 @@ struct scb_data {
/************************ Target Mode Definitions *****************************/
/*
- * Connection desciptor for select-in requests in target mode.
+ * Connection descriptor for select-in requests in target mode.
*/
struct target_cmd {
uint8_t scsiid; /* Our ID and the initiator's ID */
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 3f5a542a7793..e021b4812d58 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -4780,7 +4780,7 @@ ahc_init_scbdata(struct ahc_softc *ahc)
SLIST_INIT(&scb_data->sg_maps);
/* Allocate SCB resources */
- scb_data->scbarray = (struct scb *)kmalloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC, GFP_ATOMIC);
+ scb_data->scbarray = kmalloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC, GFP_ATOMIC);
if (scb_data->scbarray == NULL)
return (ENOMEM);
memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX_ALLOC);
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 984bd527c6c9..da7b9887ec48 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -1020,7 +1020,7 @@ static void arcmsr_remove(struct pci_dev *pdev)
int poll_count = 0;
arcmsr_free_sysfs_attr(acb);
scsi_remove_host(host);
- flush_scheduled_work();
+ flush_work_sync(&acb->arcmsr_do_message_isr_bh);
del_timer_sync(&acb->eternal_timer);
arcmsr_disable_outbound_ints(acb);
arcmsr_stop_adapter_bgrb(acb);
@@ -1066,7 +1066,7 @@ static void arcmsr_shutdown(struct pci_dev *pdev)
(struct AdapterControlBlock *)host->hostdata;
del_timer_sync(&acb->eternal_timer);
arcmsr_disable_outbound_ints(acb);
- flush_scheduled_work();
+ flush_work_sync(&acb->arcmsr_do_message_isr_bh);
arcmsr_stop_adapter_bgrb(acb);
arcmsr_flush_adapter_cache(acb);
}
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index eaaa8813067d..868cc5590145 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -210,28 +210,20 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
}
/**
- * beiscsi_conn_get_param - get the iscsi parameter
- * @cls_conn: pointer to iscsi cls conn
+ * beiscsi_ep_get_param - get the iscsi parameter
+ * @ep: pointer to iscsi ep
* @param: parameter type identifier
* @buf: buffer pointer
*
* returns iscsi parameter
*/
-int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
+int beiscsi_ep_get_param(struct iscsi_endpoint *ep,
enum iscsi_param param, char *buf)
{
- struct beiscsi_endpoint *beiscsi_ep;
- struct iscsi_conn *conn = cls_conn->dd_data;
- struct beiscsi_conn *beiscsi_conn = conn->dd_data;
+ struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
int len = 0;
SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_get_param, param= %d\n", param);
- beiscsi_ep = beiscsi_conn->ep;
- if (!beiscsi_ep) {
- SE_DEBUG(DBG_LVL_1,
- "In beiscsi_conn_get_param , no beiscsi_ep\n");
- return -ENODEV;
- }
switch (param) {
case ISCSI_PARAM_CONN_PORT:
@@ -244,7 +236,7 @@ int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
len = sprintf(buf, "%pI6\n", &beiscsi_ep->dst6_addr);
break;
default:
- return iscsi_conn_get_param(cls_conn, param, buf);
+ return -ENOSYS;
}
return len;
}
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index 8950a702b9f4..9c532797c29e 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -48,8 +48,8 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn,
uint64_t transport_fd, int is_leading);
-int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
- enum iscsi_param param, char *buf);
+int beiscsi_ep_get_param(struct iscsi_endpoint *ep, enum iscsi_param param,
+ char *buf);
int beiscsi_get_host_param(struct Scsi_Host *shost,
enum iscsi_host_param param, char *buf);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 79cefbe31367..24e20ba9633c 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -4277,7 +4277,7 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
phba->shost->host_no);
- phba->wq = create_workqueue(phba->wq_name);
+ phba->wq = alloc_workqueue(phba->wq_name, WQ_MEM_RECLAIM, 1);
if (!phba->wq) {
shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
"Failed to allocate work queue\n");
@@ -4384,7 +4384,7 @@ struct iscsi_transport beiscsi_iscsi_transport = {
.bind_conn = beiscsi_conn_bind,
.destroy_conn = iscsi_conn_teardown,
.set_param = beiscsi_set_param,
- .get_conn_param = beiscsi_conn_get_param,
+ .get_conn_param = iscsi_conn_get_param,
.get_session_param = iscsi_session_get_param,
.get_host_param = beiscsi_get_host_param,
.start_conn = beiscsi_conn_start,
@@ -4395,6 +4395,7 @@ struct iscsi_transport beiscsi_iscsi_transport = {
.alloc_pdu = beiscsi_alloc_pdu,
.parse_pdu_itt = beiscsi_parse_pdu,
.get_stats = beiscsi_conn_get_stats,
+ .get_ep_param = beiscsi_ep_get_param,
.ep_connect = beiscsi_ep_connect,
.ep_poll = beiscsi_ep_poll,
.ep_disconnect = beiscsi_ep_disconnect,
diff --git a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
new file mode 100644
index 000000000000..69d031d98469
--- /dev/null
+++ b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
@@ -0,0 +1,1080 @@
+#ifndef __57XX_FCOE_HSI_LINUX_LE__
+#define __57XX_FCOE_HSI_LINUX_LE__
+
+/*
+ * common data for all protocols
+ */
+struct b577xx_doorbell_hdr {
+ u8 header;
+#define B577XX_DOORBELL_HDR_RX (0x1<<0)
+#define B577XX_DOORBELL_HDR_RX_SHIFT 0
+#define B577XX_DOORBELL_HDR_DB_TYPE (0x1<<1)
+#define B577XX_DOORBELL_HDR_DB_TYPE_SHIFT 1
+#define B577XX_DOORBELL_HDR_DPM_SIZE (0x3<<2)
+#define B577XX_DOORBELL_HDR_DPM_SIZE_SHIFT 2
+#define B577XX_DOORBELL_HDR_CONN_TYPE (0xF<<4)
+#define B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT 4
+};
+
+/*
+ * doorbell message sent to the chip
+ */
+struct b577xx_doorbell_set_prod {
+#if defined(__BIG_ENDIAN)
+ u16 prod;
+ u8 zero_fill1;
+ struct b577xx_doorbell_hdr header;
+#elif defined(__LITTLE_ENDIAN)
+ struct b577xx_doorbell_hdr header;
+ u8 zero_fill1;
+ u16 prod;
+#endif
+};
+
+
+struct regpair {
+ __le32 lo;
+ __le32 hi;
+};
+
+
+/*
+ * Fixed size structure in order to plant it in Union structure
+ */
+struct fcoe_abts_rsp_union {
+ u32 r_ctl;
+ u32 abts_rsp_payload[7];
+};
+
+
+/*
+ * 4 regs size
+ */
+struct fcoe_bd_ctx {
+ u32 buf_addr_hi;
+ u32 buf_addr_lo;
+#if defined(__BIG_ENDIAN)
+ u16 rsrv0;
+ u16 buf_len;
+#elif defined(__LITTLE_ENDIAN)
+ u16 buf_len;
+ u16 rsrv0;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 rsrv1;
+ u16 flags;
+#elif defined(__LITTLE_ENDIAN)
+ u16 flags;
+ u16 rsrv1;
+#endif
+};
+
+
+struct fcoe_cleanup_flow_info {
+#if defined(__BIG_ENDIAN)
+ u16 reserved1;
+ u16 task_id;
+#elif defined(__LITTLE_ENDIAN)
+ u16 task_id;
+ u16 reserved1;
+#endif
+ u32 reserved2[7];
+};
+
+
+struct fcoe_fcp_cmd_payload {
+ u32 opaque[8];
+};
+
+struct fcoe_fc_hdr {
+#if defined(__BIG_ENDIAN)
+ u8 cs_ctl;
+ u8 s_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 s_id[3];
+ u8 cs_ctl;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 r_ctl;
+ u8 d_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 d_id[3];
+ u8 r_ctl;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 seq_id;
+ u8 df_ctl;
+ u16 seq_cnt;
+#elif defined(__LITTLE_ENDIAN)
+ u16 seq_cnt;
+ u8 df_ctl;
+ u8 seq_id;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 type;
+ u8 f_ctl[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 f_ctl[3];
+ u8 type;
+#endif
+ u32 parameters;
+#if defined(__BIG_ENDIAN)
+ u16 ox_id;
+ u16 rx_id;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rx_id;
+ u16 ox_id;
+#endif
+};
+
+struct fcoe_fc_frame {
+ struct fcoe_fc_hdr fc_hdr;
+ u32 reserved0[2];
+};
+
+union fcoe_cmd_flow_info {
+ struct fcoe_fcp_cmd_payload fcp_cmd_payload;
+ struct fcoe_fc_frame mp_fc_frame;
+};
+
+
+
+struct fcoe_fcp_rsp_flags {
+ u8 flags;
+#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0)
+#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_SHIFT 0
+#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1)
+#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_SHIFT 1
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2)
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_SHIFT 2
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3)
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_SHIFT 3
+#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ (0x1<<4)
+#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_SHIFT 4
+#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS (0x7<<5)
+#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5
+};
+
+
+struct fcoe_fcp_rsp_payload {
+ struct regpair reserved0;
+ u32 fcp_resid;
+#if defined(__BIG_ENDIAN)
+ u16 retry_delay_timer;
+ struct fcoe_fcp_rsp_flags fcp_flags;
+ u8 scsi_status_code;
+#elif defined(__LITTLE_ENDIAN)
+ u8 scsi_status_code;
+ struct fcoe_fcp_rsp_flags fcp_flags;
+ u16 retry_delay_timer;
+#endif
+ u32 fcp_rsp_len;
+ u32 fcp_sns_len;
+};
+
+
+/*
+ * Fixed size structure in order to plant it in Union structure
+ */
+struct fcoe_fcp_rsp_union {
+ struct fcoe_fcp_rsp_payload payload;
+ struct regpair reserved0;
+};
+
+
+struct fcoe_fcp_xfr_rdy_payload {
+ u32 burst_len;
+ u32 data_ro;
+};
+
+struct fcoe_read_flow_info {
+ struct fcoe_fc_hdr fc_data_in_hdr;
+ u32 reserved[2];
+};
+
+struct fcoe_write_flow_info {
+ struct fcoe_fc_hdr fc_data_out_hdr;
+ struct fcoe_fcp_xfr_rdy_payload fcp_xfr_payload;
+};
+
+union fcoe_rsp_flow_info {
+ struct fcoe_fcp_rsp_union fcp_rsp;
+ struct fcoe_abts_rsp_union abts_rsp;
+};
+
+/*
+ * 32 bytes used for general purposes
+ */
+union fcoe_general_task_ctx {
+ union fcoe_cmd_flow_info cmd_info;
+ struct fcoe_read_flow_info read_info;
+ struct fcoe_write_flow_info write_info;
+ union fcoe_rsp_flow_info rsp_info;
+ struct fcoe_cleanup_flow_info cleanup_info;
+ u32 comp_info[8];
+};
+
+
+/*
+ * FCoE KCQ CQE parameters
+ */
+union fcoe_kcqe_params {
+ u32 reserved0[4];
+};
+
+/*
+ * FCoE KCQ CQE
+ */
+struct fcoe_kcqe {
+ u32 fcoe_conn_id;
+ u32 completion_status;
+ u32 fcoe_conn_context_id;
+ union fcoe_kcqe_params params;
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define FCOE_KCQE_RESERVED0 (0x7<<0)
+#define FCOE_KCQE_RESERVED0_SHIFT 0
+#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3)
+#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3
+#define FCOE_KCQE_LAYER_CODE (0x7<<4)
+#define FCOE_KCQE_LAYER_CODE_SHIFT 4
+#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7)
+#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7
+ u8 op_code;
+ u16 qe_self_seq;
+#elif defined(__LITTLE_ENDIAN)
+ u16 qe_self_seq;
+ u8 op_code;
+ u8 flags;
+#define FCOE_KCQE_RESERVED0 (0x7<<0)
+#define FCOE_KCQE_RESERVED0_SHIFT 0
+#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3)
+#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3
+#define FCOE_KCQE_LAYER_CODE (0x7<<4)
+#define FCOE_KCQE_LAYER_CODE_SHIFT 4
+#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7)
+#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7
+#endif
+};
+
+/*
+ * FCoE KWQE header
+ */
+struct fcoe_kwqe_header {
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0)
+#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0
+#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4)
+#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4
+#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7
+ u8 op_code;
+#elif defined(__LITTLE_ENDIAN)
+ u8 op_code;
+ u8 flags;
+#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0)
+#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0
+#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4)
+#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4
+#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7
+#endif
+};
+
+/*
+ * FCoE firmware init request 1
+ */
+struct fcoe_kwqe_init1 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 num_tasks;
+#elif defined(__LITTLE_ENDIAN)
+ u16 num_tasks;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 task_list_pbl_addr_lo;
+ u32 task_list_pbl_addr_hi;
+ u32 dummy_buffer_addr_lo;
+ u32 dummy_buffer_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u16 rq_num_wqes;
+ u16 sq_num_wqes;
+#elif defined(__LITTLE_ENDIAN)
+ u16 sq_num_wqes;
+ u16 rq_num_wqes;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 cq_num_wqes;
+ u16 rq_buffer_log_size;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rq_buffer_log_size;
+ u16 cq_num_wqes;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0)
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4)
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4
+#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7
+ u8 num_sessions_log;
+ u16 mtu;
+#elif defined(__LITTLE_ENDIAN)
+ u16 mtu;
+ u8 num_sessions_log;
+ u8 flags;
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0)
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4)
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4
+#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7
+#endif
+};
+
+/*
+ * FCoE firmware init request 2
+ */
+struct fcoe_kwqe_init2 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 hash_tbl_pbl_addr_lo;
+ u32 hash_tbl_pbl_addr_hi;
+ u32 t2_hash_tbl_addr_lo;
+ u32 t2_hash_tbl_addr_hi;
+ u32 t2_ptr_hash_tbl_addr_lo;
+ u32 t2_ptr_hash_tbl_addr_hi;
+ u32 free_list_count;
+};
+
+/*
+ * FCoE firmware init request 3
+ */
+struct fcoe_kwqe_init3 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 error_bit_map_lo;
+ u32 error_bit_map_hi;
+#if defined(__BIG_ENDIAN)
+ u8 reserved21[3];
+ u8 cached_session_enable;
+#elif defined(__LITTLE_ENDIAN)
+ u8 cached_session_enable;
+ u8 reserved21[3];
+#endif
+ u32 reserved2[4];
+};
+
+/*
+ * FCoE connection offload request 1
+ */
+struct fcoe_kwqe_conn_offload1 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 fcoe_conn_id;
+#elif defined(__LITTLE_ENDIAN)
+ u16 fcoe_conn_id;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 sq_addr_lo;
+ u32 sq_addr_hi;
+ u32 rq_pbl_addr_lo;
+ u32 rq_pbl_addr_hi;
+ u32 rq_first_pbe_addr_lo;
+ u32 rq_first_pbe_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u16 reserved0;
+ u16 rq_prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rq_prod;
+ u16 reserved0;
+#endif
+};
+
+/*
+ * FCoE connection offload request 2
+ */
+struct fcoe_kwqe_conn_offload2 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 tx_max_fc_pay_len;
+#elif defined(__LITTLE_ENDIAN)
+ u16 tx_max_fc_pay_len;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 cq_addr_lo;
+ u32 cq_addr_hi;
+ u32 xferq_addr_lo;
+ u32 xferq_addr_hi;
+ u32 conn_db_addr_lo;
+ u32 conn_db_addr_hi;
+ u32 reserved1;
+};
+
+/*
+ * FCoE connection offload request 3
+ */
+struct fcoe_kwqe_conn_offload3 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 vlan_tag;
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13
+#elif defined(__LITTLE_ENDIAN)
+ u16 vlan_tag;
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13
+ struct fcoe_kwqe_header hdr;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 tx_max_conc_seqs_c3;
+ u8 s_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 s_id[3];
+ u8 tx_max_conc_seqs_c3;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7
+ u8 d_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 d_id[3];
+ u8 flags;
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7
+#endif
+ u32 reserved;
+ u32 confq_first_pbe_addr_lo;
+ u32 confq_first_pbe_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u16 rx_max_fc_pay_len;
+ u16 tx_total_conc_seqs;
+#elif defined(__LITTLE_ENDIAN)
+ u16 tx_total_conc_seqs;
+ u16 rx_max_fc_pay_len;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 rx_open_seqs_exch_c3;
+ u8 rx_max_conc_seqs_c3;
+ u16 rx_total_conc_seqs;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rx_total_conc_seqs;
+ u8 rx_max_conc_seqs_c3;
+ u8 rx_open_seqs_exch_c3;
+#endif
+};
+
+/*
+ * FCoE connection offload request 4
+ */
+struct fcoe_kwqe_conn_offload4 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u8 reserved2;
+ u8 e_d_tov_timer_val;
+#elif defined(__LITTLE_ENDIAN)
+ u8 e_d_tov_timer_val;
+ u8 reserved2;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u8 src_mac_addr_lo32[4];
+#if defined(__BIG_ENDIAN)
+ u8 dst_mac_addr_hi16[2];
+ u8 src_mac_addr_hi16[2];
+#elif defined(__LITTLE_ENDIAN)
+ u8 src_mac_addr_hi16[2];
+ u8 dst_mac_addr_hi16[2];
+#endif
+ u8 dst_mac_addr_lo32[4];
+ u32 lcq_addr_lo;
+ u32 lcq_addr_hi;
+ u32 confq_pbl_base_addr_lo;
+ u32 confq_pbl_base_addr_hi;
+};
+
+/*
+ * FCoE connection enable request
+ */
+struct fcoe_kwqe_conn_enable_disable {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u8 src_mac_addr_lo32[4];
+#if defined(__BIG_ENDIAN)
+ u16 vlan_tag;
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13
+ u8 src_mac_addr_hi16[2];
+#elif defined(__LITTLE_ENDIAN)
+ u8 src_mac_addr_hi16[2];
+ u16 vlan_tag;
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13
+#endif
+ u8 dst_mac_addr_lo32[4];
+#if defined(__BIG_ENDIAN)
+ u16 reserved1;
+ u8 dst_mac_addr_hi16[2];
+#elif defined(__LITTLE_ENDIAN)
+ u8 dst_mac_addr_hi16[2];
+ u16 reserved1;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 vlan_flag;
+ u8 s_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 s_id[3];
+ u8 vlan_flag;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 reserved3;
+ u8 d_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 d_id[3];
+ u8 reserved3;
+#endif
+ u32 context_id;
+ u32 conn_id;
+ u32 reserved4;
+};
+
+/*
+ * FCoE connection destroy request
+ */
+struct fcoe_kwqe_conn_destroy {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 context_id;
+ u32 conn_id;
+ u32 reserved1[5];
+};
+
+/*
+ * FCoe destroy request
+ */
+struct fcoe_kwqe_destroy {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 reserved1[7];
+};
+
+/*
+ * FCoe statistics request
+ */
+struct fcoe_kwqe_stat {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 stat_params_addr_lo;
+ u32 stat_params_addr_hi;
+ u32 reserved1[5];
+};
+
+/*
+ * FCoE KWQ WQE
+ */
+union fcoe_kwqe {
+ struct fcoe_kwqe_init1 init1;
+ struct fcoe_kwqe_init2 init2;
+ struct fcoe_kwqe_init3 init3;
+ struct fcoe_kwqe_conn_offload1 conn_offload1;
+ struct fcoe_kwqe_conn_offload2 conn_offload2;
+ struct fcoe_kwqe_conn_offload3 conn_offload3;
+ struct fcoe_kwqe_conn_offload4 conn_offload4;
+ struct fcoe_kwqe_conn_enable_disable conn_enable_disable;
+ struct fcoe_kwqe_conn_destroy conn_destroy;
+ struct fcoe_kwqe_destroy destroy;
+ struct fcoe_kwqe_stat statistics;
+};
+
+struct fcoe_mul_sges_ctx {
+ struct regpair cur_sge_addr;
+#if defined(__BIG_ENDIAN)
+ u8 sgl_size;
+ u8 cur_sge_idx;
+ u16 cur_sge_off;
+#elif defined(__LITTLE_ENDIAN)
+ u16 cur_sge_off;
+ u8 cur_sge_idx;
+ u8 sgl_size;
+#endif
+};
+
+struct fcoe_s_stat_ctx {
+ u8 flags;
+#define FCOE_S_STAT_CTX_ACTIVE (0x1<<0)
+#define FCOE_S_STAT_CTX_ACTIVE_SHIFT 0
+#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND (0x1<<1)
+#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND_SHIFT 1
+#define FCOE_S_STAT_CTX_ABTS_PERFORMED (0x1<<2)
+#define FCOE_S_STAT_CTX_ABTS_PERFORMED_SHIFT 2
+#define FCOE_S_STAT_CTX_SEQ_TIMEOUT (0x1<<3)
+#define FCOE_S_STAT_CTX_SEQ_TIMEOUT_SHIFT 3
+#define FCOE_S_STAT_CTX_P_RJT (0x1<<4)
+#define FCOE_S_STAT_CTX_P_RJT_SHIFT 4
+#define FCOE_S_STAT_CTX_ACK_EOFT (0x1<<5)
+#define FCOE_S_STAT_CTX_ACK_EOFT_SHIFT 5
+#define FCOE_S_STAT_CTX_RSRV1 (0x3<<6)
+#define FCOE_S_STAT_CTX_RSRV1_SHIFT 6
+};
+
+struct fcoe_seq_ctx {
+#if defined(__BIG_ENDIAN)
+ u16 low_seq_cnt;
+ struct fcoe_s_stat_ctx s_stat;
+ u8 seq_id;
+#elif defined(__LITTLE_ENDIAN)
+ u8 seq_id;
+ struct fcoe_s_stat_ctx s_stat;
+ u16 low_seq_cnt;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 err_seq_cnt;
+ u16 high_seq_cnt;
+#elif defined(__LITTLE_ENDIAN)
+ u16 high_seq_cnt;
+ u16 err_seq_cnt;
+#endif
+ u32 low_exp_ro;
+ u32 high_exp_ro;
+};
+
+
+struct fcoe_single_sge_ctx {
+ struct regpair cur_buf_addr;
+#if defined(__BIG_ENDIAN)
+ u16 reserved0;
+ u16 cur_buf_rem;
+#elif defined(__LITTLE_ENDIAN)
+ u16 cur_buf_rem;
+ u16 reserved0;
+#endif
+};
+
+union fcoe_sgl_ctx {
+ struct fcoe_single_sge_ctx single_sge;
+ struct fcoe_mul_sges_ctx mul_sges;
+};
+
+
+
+/*
+ * FCoE SQ element
+ */
+struct fcoe_sqe {
+ u16 wqe;
+#define FCOE_SQE_TASK_ID (0x7FFF<<0)
+#define FCOE_SQE_TASK_ID_SHIFT 0
+#define FCOE_SQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_SQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+
+struct fcoe_task_ctx_entry_tx_only {
+ union fcoe_sgl_ctx sgl_ctx;
+};
+
+struct fcoe_task_ctx_entry_txwr_rxrd {
+#if defined(__BIG_ENDIAN)
+ u16 verify_tx_seq;
+ u8 init_flags;
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6
+ u8 tx_flags;
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4
+#elif defined(__LITTLE_ENDIAN)
+ u8 tx_flags;
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4
+ u8 init_flags;
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6
+ u16 verify_tx_seq;
+#endif
+};
+
+/*
+ * Common section. Both TX and RX processing might write and read from it in
+ * different flows
+ */
+struct fcoe_task_ctx_entry_tx_rx_cmn {
+ u32 data_2_trns;
+ union fcoe_general_task_ctx general;
+#if defined(__BIG_ENDIAN)
+ u16 tx_low_seq_cnt;
+ struct fcoe_s_stat_ctx tx_s_stat;
+ u8 tx_seq_id;
+#elif defined(__LITTLE_ENDIAN)
+ u8 tx_seq_id;
+ struct fcoe_s_stat_ctx tx_s_stat;
+ u16 tx_low_seq_cnt;
+#endif
+ u32 common_flags;
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID (0xFFFFFF<<0)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID (0x1<<24)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT 24
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT (0x1<<25)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT_SHIFT 25
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER (0x1<<26)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER_SHIFT 26
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF (0x1<<27)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF_SHIFT 27
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME (0x1<<28)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT 28
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV (0x7<<29)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV_SHIFT 29
+};
+
+struct fcoe_task_ctx_entry_rxwr_txrd {
+#if defined(__BIG_ENDIAN)
+ u16 rx_id;
+ u16 rx_flags;
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9
+#elif defined(__LITTLE_ENDIAN)
+ u16 rx_flags;
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9
+ u16 rx_id;
+#endif
+};
+
+struct fcoe_task_ctx_entry_rx_only {
+ struct fcoe_seq_ctx seq_ctx;
+ struct fcoe_seq_ctx ooo_seq_ctx;
+ u32 rsrv3;
+ union fcoe_sgl_ctx sgl_ctx;
+};
+
+struct fcoe_task_ctx_entry {
+ struct fcoe_task_ctx_entry_tx_only tx_wr_only;
+ struct fcoe_task_ctx_entry_txwr_rxrd tx_wr_rx_rd;
+ struct fcoe_task_ctx_entry_tx_rx_cmn cmn;
+ struct fcoe_task_ctx_entry_rxwr_txrd rx_wr_tx_rd;
+ struct fcoe_task_ctx_entry_rx_only rx_wr_only;
+ u32 reserved[4];
+};
+
+
+/*
+ * FCoE XFRQ element
+ */
+struct fcoe_xfrqe {
+ u16 wqe;
+#define FCOE_XFRQE_TASK_ID (0x7FFF<<0)
+#define FCOE_XFRQE_TASK_ID_SHIFT 0
+#define FCOE_XFRQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_XFRQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+/*
+ * FCoE CONFQ element
+ */
+struct fcoe_confqe {
+#if defined(__BIG_ENDIAN)
+ u16 rx_id;
+ u16 ox_id;
+#elif defined(__LITTLE_ENDIAN)
+ u16 ox_id;
+ u16 rx_id;
+#endif
+ u32 param;
+};
+
+
+/*
+ * FCoE conection data base
+ */
+struct fcoe_conn_db {
+#if defined(__BIG_ENDIAN)
+ u16 rsrv0;
+ u16 rq_prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rq_prod;
+ u16 rsrv0;
+#endif
+ u32 rsrv1;
+ struct regpair cq_arm;
+};
+
+
+/*
+ * FCoE CQ element
+ */
+struct fcoe_cqe {
+ u16 wqe;
+#define FCOE_CQE_CQE_INFO (0x3FFF<<0)
+#define FCOE_CQE_CQE_INFO_SHIFT 0
+#define FCOE_CQE_CQE_TYPE (0x1<<14)
+#define FCOE_CQE_CQE_TYPE_SHIFT 14
+#define FCOE_CQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_CQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+/*
+ * FCoE error/warning resporting entry
+ */
+struct fcoe_err_report_entry {
+ u32 err_warn_bitmap_lo;
+ u32 err_warn_bitmap_hi;
+ u32 tx_buf_off;
+ u32 rx_buf_off;
+ struct fcoe_fc_hdr fc_hdr;
+};
+
+
+/*
+ * FCoE hash table entry (32 bytes)
+ */
+struct fcoe_hash_table_entry {
+#if defined(__BIG_ENDIAN)
+ u8 d_id_0;
+ u8 s_id_2;
+ u8 s_id_1;
+ u8 s_id_0;
+#elif defined(__LITTLE_ENDIAN)
+ u8 s_id_0;
+ u8 s_id_1;
+ u8 s_id_2;
+ u8 d_id_0;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 dst_mac_addr_hi;
+ u8 d_id_2;
+ u8 d_id_1;
+#elif defined(__LITTLE_ENDIAN)
+ u8 d_id_1;
+ u8 d_id_2;
+ u16 dst_mac_addr_hi;
+#endif
+ u32 dst_mac_addr_lo;
+#if defined(__BIG_ENDIAN)
+ u16 vlan_id;
+ u16 src_mac_addr_hi;
+#elif defined(__LITTLE_ENDIAN)
+ u16 src_mac_addr_hi;
+ u16 vlan_id;
+#endif
+ u32 src_mac_addr_lo;
+#if defined(__BIG_ENDIAN)
+ u16 reserved1;
+ u8 reserved0;
+ u8 vlan_flag;
+#elif defined(__LITTLE_ENDIAN)
+ u8 vlan_flag;
+ u8 reserved0;
+ u16 reserved1;
+#endif
+ u32 reserved2;
+ u32 field_id;
+#define FCOE_HASH_TABLE_ENTRY_CID (0xFFFFFF<<0)
+#define FCOE_HASH_TABLE_ENTRY_CID_SHIFT 0
+#define FCOE_HASH_TABLE_ENTRY_RESERVED3 (0x7F<<24)
+#define FCOE_HASH_TABLE_ENTRY_RESERVED3_SHIFT 24
+#define FCOE_HASH_TABLE_ENTRY_VALID (0x1<<31)
+#define FCOE_HASH_TABLE_ENTRY_VALID_SHIFT 31
+};
+
+/*
+ * FCoE pending work request CQE
+ */
+struct fcoe_pend_wq_cqe {
+ u16 wqe;
+#define FCOE_PEND_WQ_CQE_TASK_ID (0x3FFF<<0)
+#define FCOE_PEND_WQ_CQE_TASK_ID_SHIFT 0
+#define FCOE_PEND_WQ_CQE_CQE_TYPE (0x1<<14)
+#define FCOE_PEND_WQ_CQE_CQE_TYPE_SHIFT 14
+#define FCOE_PEND_WQ_CQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_PEND_WQ_CQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+/*
+ * FCoE RX statistics parameters section#0
+ */
+struct fcoe_rx_stat_params_section0 {
+ u32 fcoe_ver_cnt;
+ u32 fcoe_rx_pkt_cnt;
+ u32 fcoe_rx_byte_cnt;
+ u32 fcoe_rx_drop_pkt_cnt;
+};
+
+
+/*
+ * FCoE RX statistics parameters section#1
+ */
+struct fcoe_rx_stat_params_section1 {
+ u32 fc_crc_cnt;
+ u32 eofa_del_cnt;
+ u32 miss_frame_cnt;
+ u32 seq_timeout_cnt;
+ u32 drop_seq_cnt;
+ u32 fcoe_rx_drop_pkt_cnt;
+ u32 fcp_rx_pkt_cnt;
+ u32 reserved0;
+};
+
+
+/*
+ * FCoE TX statistics parameters
+ */
+struct fcoe_tx_stat_params {
+ u32 fcoe_tx_pkt_cnt;
+ u32 fcoe_tx_byte_cnt;
+ u32 fcp_tx_pkt_cnt;
+ u32 reserved0;
+};
+
+/*
+ * FCoE statistics parameters
+ */
+struct fcoe_statistics_params {
+ struct fcoe_tx_stat_params tx_stat;
+ struct fcoe_rx_stat_params_section0 rx_stat0;
+ struct fcoe_rx_stat_params_section1 rx_stat1;
+};
+
+
+/*
+ * FCoE t2 hash table entry (64 bytes)
+ */
+struct fcoe_t2_hash_table_entry {
+ struct fcoe_hash_table_entry data;
+ struct regpair next;
+ struct regpair reserved0[3];
+};
+
+/*
+ * FCoE unsolicited CQE
+ */
+struct fcoe_unsolicited_cqe {
+ u16 wqe;
+#define FCOE_UNSOLICITED_CQE_SUBTYPE (0x3<<0)
+#define FCOE_UNSOLICITED_CQE_SUBTYPE_SHIFT 0
+#define FCOE_UNSOLICITED_CQE_PKT_LEN (0xFFF<<2)
+#define FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT 2
+#define FCOE_UNSOLICITED_CQE_CQE_TYPE (0x1<<14)
+#define FCOE_UNSOLICITED_CQE_CQE_TYPE_SHIFT 14
+#define FCOE_UNSOLICITED_CQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_UNSOLICITED_CQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+
+#endif /* __57XX_FCOE_HSI_LINUX_LE__ */
diff --git a/drivers/scsi/bnx2fc/Kconfig b/drivers/scsi/bnx2fc/Kconfig
new file mode 100644
index 000000000000..6a38080e35ed
--- /dev/null
+++ b/drivers/scsi/bnx2fc/Kconfig
@@ -0,0 +1,11 @@
+config SCSI_BNX2X_FCOE
+ tristate "Broadcom NetXtreme II FCoE support"
+ depends on PCI
+ select NETDEVICES
+ select NETDEV_1000
+ select LIBFC
+ select LIBFCOE
+ select CNIC
+ ---help---
+ This driver supports FCoE offload for the Broadcom NetXtreme II
+ devices.
diff --git a/drivers/scsi/bnx2fc/Makefile b/drivers/scsi/bnx2fc/Makefile
new file mode 100644
index 000000000000..a92695a25176
--- /dev/null
+++ b/drivers/scsi/bnx2fc/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_SCSI_BNX2X_FCOE) += bnx2fc.o
+
+bnx2fc-y := bnx2fc_els.o bnx2fc_fcoe.o bnx2fc_hwi.o bnx2fc_io.o bnx2fc_tgt.o
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
new file mode 100644
index 000000000000..df2fc09ba479
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -0,0 +1,511 @@
+#ifndef _BNX2FC_H_
+#define _BNX2FC_H_
+/* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver.
+ *
+ * Copyright (c) 2008 - 2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/kthread.h>
+#include <linux/crc32.h>
+#include <linux/cpu.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/bitops.h>
+#include <linux/log2.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/io.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/libfc.h>
+#include <scsi/libfcoe.h>
+#include <scsi/fc_encode.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fip.h>
+#include <scsi/fc/fc_fc2.h>
+#include <scsi/fc_frame.h>
+#include <scsi/fc/fc_fcoe.h>
+#include <scsi/fc/fc_fcp.h>
+
+#include "57xx_hsi_bnx2fc.h"
+#include "bnx2fc_debug.h"
+#include "../../net/cnic_if.h"
+#include "bnx2fc_constants.h"
+
+#define BNX2FC_NAME "bnx2fc"
+#define BNX2FC_VERSION "1.0.0"
+
+#define PFX "bnx2fc: "
+
+#define BNX2X_DOORBELL_PCI_BAR 2
+
+#define BNX2FC_MAX_BD_LEN 0xffff
+#define BNX2FC_BD_SPLIT_SZ 0x8000
+#define BNX2FC_MAX_BDS_PER_CMD 256
+
+#define BNX2FC_SQ_WQES_MAX 256
+
+#define BNX2FC_SCSI_MAX_SQES ((3 * BNX2FC_SQ_WQES_MAX) / 8)
+#define BNX2FC_TM_MAX_SQES ((BNX2FC_SQ_WQES_MAX) / 2)
+#define BNX2FC_ELS_MAX_SQES (BNX2FC_TM_MAX_SQES - 1)
+
+#define BNX2FC_RQ_WQES_MAX 16
+#define BNX2FC_CQ_WQES_MAX (BNX2FC_SQ_WQES_MAX + BNX2FC_RQ_WQES_MAX)
+
+#define BNX2FC_NUM_MAX_SESS 128
+#define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS))
+
+#define BNX2FC_MAX_OUTSTANDING_CMNDS 4096
+#define BNX2FC_MIN_PAYLOAD 256
+#define BNX2FC_MAX_PAYLOAD 2048
+
+#define BNX2FC_RQ_BUF_SZ 256
+#define BNX2FC_RQ_BUF_LOG_SZ (ilog2(BNX2FC_RQ_BUF_SZ))
+
+#define BNX2FC_SQ_WQE_SIZE (sizeof(struct fcoe_sqe))
+#define BNX2FC_CQ_WQE_SIZE (sizeof(struct fcoe_cqe))
+#define BNX2FC_RQ_WQE_SIZE (BNX2FC_RQ_BUF_SZ)
+#define BNX2FC_XFERQ_WQE_SIZE (sizeof(struct fcoe_xfrqe))
+#define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe))
+#define BNX2FC_5771X_DB_PAGE_SIZE 128
+
+#define BNX2FC_MAX_TASKS BNX2FC_MAX_OUTSTANDING_CMNDS
+#define BNX2FC_TASK_SIZE 128
+#define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE)
+#define BNX2FC_TASK_CTX_ARR_SZ (BNX2FC_MAX_TASKS/BNX2FC_TASKS_PER_PAGE)
+
+#define BNX2FC_MAX_ROWS_IN_HASH_TBL 8
+#define BNX2FC_HASH_TBL_CHUNK_SIZE (16 * 1024)
+
+#define BNX2FC_MAX_SEQS 255
+
+#define BNX2FC_READ (1 << 1)
+#define BNX2FC_WRITE (1 << 0)
+
+#define BNX2FC_MIN_XID 0
+#define BNX2FC_MAX_XID (BNX2FC_MAX_OUTSTANDING_CMNDS - 1)
+#define FCOE_MIN_XID (BNX2FC_MAX_OUTSTANDING_CMNDS)
+#define FCOE_MAX_XID \
+ (BNX2FC_MAX_OUTSTANDING_CMNDS + (nr_cpu_ids * 256))
+#define BNX2FC_MAX_LUN 0xFFFF
+#define BNX2FC_MAX_FCP_TGT 256
+#define BNX2FC_MAX_CMD_LEN 16
+
+#define BNX2FC_TM_TIMEOUT 60 /* secs */
+#define BNX2FC_IO_TIMEOUT 20000UL /* msecs */
+
+#define BNX2FC_WAIT_CNT 120
+#define BNX2FC_FW_TIMEOUT (3 * HZ)
+
+#define PORT_MAX 2
+
+#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
+
+/* FC FCP Status */
+#define FC_GOOD 0
+
+#define BNX2FC_RNID_HBA 0x7
+
+/* bnx2fc driver uses only one instance of fcoe_percpu_s */
+extern struct fcoe_percpu_s bnx2fc_global;
+
+extern struct workqueue_struct *bnx2fc_wq;
+
+struct bnx2fc_percpu_s {
+ struct task_struct *iothread;
+ struct list_head work_list;
+ spinlock_t fp_work_lock;
+};
+
+
+struct bnx2fc_hba {
+ struct list_head link;
+ struct cnic_dev *cnic;
+ struct pci_dev *pcidev;
+ struct net_device *netdev;
+ struct net_device *phys_dev;
+ unsigned long reg_with_cnic;
+ #define BNX2FC_CNIC_REGISTERED 1
+ struct packet_type fcoe_packet_type;
+ struct packet_type fip_packet_type;
+ struct bnx2fc_cmd_mgr *cmd_mgr;
+ struct workqueue_struct *timer_work_queue;
+ struct kref kref;
+ spinlock_t hba_lock;
+ struct mutex hba_mutex;
+ unsigned long adapter_state;
+ #define ADAPTER_STATE_UP 0
+ #define ADAPTER_STATE_GOING_DOWN 1
+ #define ADAPTER_STATE_LINK_DOWN 2
+ #define ADAPTER_STATE_READY 3
+ u32 flags;
+ unsigned long init_done;
+ #define BNX2FC_FW_INIT_DONE 0
+ #define BNX2FC_CTLR_INIT_DONE 1
+ #define BNX2FC_CREATE_DONE 2
+ struct fcoe_ctlr ctlr;
+ u8 vlan_enabled;
+ int vlan_id;
+ u32 next_conn_id;
+ struct fcoe_task_ctx_entry **task_ctx;
+ dma_addr_t *task_ctx_dma;
+ struct regpair *task_ctx_bd_tbl;
+ dma_addr_t task_ctx_bd_dma;
+
+ int hash_tbl_segment_count;
+ void **hash_tbl_segments;
+ void *hash_tbl_pbl;
+ dma_addr_t hash_tbl_pbl_dma;
+ struct fcoe_t2_hash_table_entry *t2_hash_tbl;
+ dma_addr_t t2_hash_tbl_dma;
+ char *t2_hash_tbl_ptr;
+ dma_addr_t t2_hash_tbl_ptr_dma;
+
+ char *dummy_buffer;
+ dma_addr_t dummy_buf_dma;
+
+ struct fcoe_statistics_params *stats_buffer;
+ dma_addr_t stats_buf_dma;
+
+ /*
+ * PCI related info.
+ */
+ u16 pci_did;
+ u16 pci_vid;
+ u16 pci_sdid;
+ u16 pci_svid;
+ u16 pci_func;
+ u16 pci_devno;
+
+ struct task_struct *l2_thread;
+
+ /* linkdown handling */
+ wait_queue_head_t shutdown_wait;
+ int wait_for_link_down;
+
+ /*destroy handling */
+ struct timer_list destroy_timer;
+ wait_queue_head_t destroy_wait;
+
+ /* Active list of offloaded sessions */
+ struct bnx2fc_rport *tgt_ofld_list[BNX2FC_NUM_MAX_SESS];
+ int num_ofld_sess;
+
+ /* statistics */
+ struct completion stat_req_done;
+};
+
+#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_hba, ctlr)
+
+struct bnx2fc_cmd_mgr {
+ struct bnx2fc_hba *hba;
+ u16 next_idx;
+ struct list_head *free_list;
+ spinlock_t *free_list_lock;
+ struct io_bdt **io_bdt_pool;
+ struct bnx2fc_cmd **cmds;
+};
+
+struct bnx2fc_rport {
+ struct fcoe_port *port;
+ struct fc_rport *rport;
+ struct fc_rport_priv *rdata;
+ void __iomem *ctx_base;
+#define DPM_TRIGER_TYPE 0x40
+ u32 fcoe_conn_id;
+ u32 context_id;
+ u32 sid;
+
+ unsigned long flags;
+#define BNX2FC_FLAG_SESSION_READY 0x1
+#define BNX2FC_FLAG_OFFLOADED 0x2
+#define BNX2FC_FLAG_DISABLED 0x3
+#define BNX2FC_FLAG_DESTROYED 0x4
+#define BNX2FC_FLAG_OFLD_REQ_CMPL 0x5
+#define BNX2FC_FLAG_DESTROY_CMPL 0x6
+#define BNX2FC_FLAG_CTX_ALLOC_FAILURE 0x7
+#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x8
+#define BNX2FC_FLAG_EXPL_LOGO 0x9
+
+ u32 max_sqes;
+ u32 max_rqes;
+ u32 max_cqes;
+
+ struct fcoe_sqe *sq;
+ dma_addr_t sq_dma;
+ u16 sq_prod_idx;
+ u8 sq_curr_toggle_bit;
+ u32 sq_mem_size;
+
+ struct fcoe_cqe *cq;
+ dma_addr_t cq_dma;
+ u32 cq_cons_idx;
+ u8 cq_curr_toggle_bit;
+ u32 cq_mem_size;
+
+ void *rq;
+ dma_addr_t rq_dma;
+ u32 rq_prod_idx;
+ u32 rq_cons_idx;
+ u32 rq_mem_size;
+
+ void *rq_pbl;
+ dma_addr_t rq_pbl_dma;
+ u32 rq_pbl_size;
+
+ struct fcoe_xfrqe *xferq;
+ dma_addr_t xferq_dma;
+ u32 xferq_mem_size;
+
+ struct fcoe_confqe *confq;
+ dma_addr_t confq_dma;
+ u32 confq_mem_size;
+
+ void *confq_pbl;
+ dma_addr_t confq_pbl_dma;
+ u32 confq_pbl_size;
+
+ struct fcoe_conn_db *conn_db;
+ dma_addr_t conn_db_dma;
+ u32 conn_db_mem_size;
+
+ struct fcoe_sqe *lcq;
+ dma_addr_t lcq_dma;
+ u32 lcq_mem_size;
+
+ void *ofld_req[4];
+ dma_addr_t ofld_req_dma[4];
+ void *enbl_req;
+ dma_addr_t enbl_req_dma;
+
+ spinlock_t tgt_lock;
+ spinlock_t cq_lock;
+ atomic_t num_active_ios;
+ u32 flush_in_prog;
+ unsigned long work_time_slice;
+ unsigned long timestamp;
+ struct list_head free_task_list;
+ struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1];
+ atomic_t pi;
+ atomic_t ci;
+ struct list_head active_cmd_queue;
+ struct list_head els_queue;
+ struct list_head io_retire_queue;
+ struct list_head active_tm_queue;
+
+ struct timer_list ofld_timer;
+ wait_queue_head_t ofld_wait;
+
+ struct timer_list upld_timer;
+ wait_queue_head_t upld_wait;
+};
+
+struct bnx2fc_mp_req {
+ u8 tm_flags;
+
+ u32 req_len;
+ void *req_buf;
+ dma_addr_t req_buf_dma;
+ struct fcoe_bd_ctx *mp_req_bd;
+ dma_addr_t mp_req_bd_dma;
+ struct fc_frame_header req_fc_hdr;
+
+ u32 resp_len;
+ void *resp_buf;
+ dma_addr_t resp_buf_dma;
+ struct fcoe_bd_ctx *mp_resp_bd;
+ dma_addr_t mp_resp_bd_dma;
+ struct fc_frame_header resp_fc_hdr;
+};
+
+struct bnx2fc_els_cb_arg {
+ struct bnx2fc_cmd *aborted_io_req;
+ struct bnx2fc_cmd *io_req;
+ u16 l2_oxid;
+};
+
+/* bnx2fc command structure */
+struct bnx2fc_cmd {
+ struct list_head link;
+ u8 on_active_queue;
+ u8 on_tmf_queue;
+ u8 cmd_type;
+#define BNX2FC_SCSI_CMD 1
+#define BNX2FC_TASK_MGMT_CMD 2
+#define BNX2FC_ABTS 3
+#define BNX2FC_ELS 4
+#define BNX2FC_CLEANUP 5
+ u8 io_req_flags;
+ struct kref refcount;
+ struct fcoe_port *port;
+ struct bnx2fc_rport *tgt;
+ struct scsi_cmnd *sc_cmd;
+ struct bnx2fc_cmd_mgr *cmd_mgr;
+ struct bnx2fc_mp_req mp_req;
+ void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg);
+ struct bnx2fc_els_cb_arg *cb_arg;
+ struct delayed_work timeout_work; /* timer for ULP timeouts */
+ struct completion tm_done;
+ int wait_for_comp;
+ u16 xid;
+ struct fcoe_task_ctx_entry *task;
+ struct io_bdt *bd_tbl;
+ struct fcp_rsp *rsp;
+ size_t data_xfer_len;
+ unsigned long req_flags;
+#define BNX2FC_FLAG_ISSUE_RRQ 0x1
+#define BNX2FC_FLAG_ISSUE_ABTS 0x2
+#define BNX2FC_FLAG_ABTS_DONE 0x3
+#define BNX2FC_FLAG_TM_COMPL 0x4
+#define BNX2FC_FLAG_TM_TIMEOUT 0x5
+#define BNX2FC_FLAG_IO_CLEANUP 0x6
+#define BNX2FC_FLAG_RETIRE_OXID 0x7
+#define BNX2FC_FLAG_EH_ABORT 0x8
+#define BNX2FC_FLAG_IO_COMPL 0x9
+#define BNX2FC_FLAG_ELS_DONE 0xa
+#define BNX2FC_FLAG_ELS_TIMEOUT 0xb
+ u32 fcp_resid;
+ u32 fcp_rsp_len;
+ u32 fcp_sns_len;
+ u8 cdb_status; /* SCSI IO status */
+ u8 fcp_status; /* FCP IO status */
+ u8 fcp_rsp_code;
+ u8 scsi_comp_flags;
+};
+
+struct io_bdt {
+ struct bnx2fc_cmd *io_req;
+ struct fcoe_bd_ctx *bd_tbl;
+ dma_addr_t bd_tbl_dma;
+ u16 bd_valid;
+};
+
+struct bnx2fc_work {
+ struct list_head list;
+ struct bnx2fc_rport *tgt;
+ u16 wqe;
+};
+struct bnx2fc_unsol_els {
+ struct fc_lport *lport;
+ struct fc_frame *fp;
+ struct work_struct unsol_els_work;
+};
+
+
+
+struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type);
+void bnx2fc_cmd_release(struct kref *ref);
+int bnx2fc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd);
+int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba);
+int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba);
+int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt);
+int bnx2fc_send_session_disable_req(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt);
+int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt);
+int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt);
+void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
+ u32 num_cqe);
+int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba);
+void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba);
+int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba);
+void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba);
+struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
+ u16 min_xid, u16 max_xid);
+void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr);
+void bnx2fc_get_link_state(struct bnx2fc_hba *hba);
+char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items);
+void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items);
+int bnx2fc_get_paged_crc_eof(struct sk_buff *skb, int tlen);
+int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req);
+int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp);
+int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp);
+int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp);
+int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req);
+int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req);
+void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
+ unsigned int timer_msec);
+int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req);
+void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u16 orig_xid);
+void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task);
+void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task);
+void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid);
+void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt);
+int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd);
+int bnx2fc_eh_host_reset(struct scsi_cmnd *sc_cmd);
+int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd);
+int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd);
+void bnx2fc_rport_event_handler(struct fc_lport *lport,
+ struct fc_rport_priv *rport,
+ enum fc_rport_event event);
+void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq);
+void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq);
+void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq);
+void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq);
+void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq);
+void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
+ struct fcp_cmnd *fcp_cmnd);
+
+
+
+void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt);
+struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
+ struct fc_frame *fp, unsigned int op,
+ void (*resp)(struct fc_seq *,
+ struct fc_frame *,
+ void *),
+ void *arg, u32 timeout);
+int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt);
+void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe);
+struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
+ u32 port_id);
+void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
+ unsigned char *buf,
+ u32 frame_len, u16 l2_oxid);
+int bnx2fc_send_stat_req(struct bnx2fc_hba *hba);
+
+#endif
diff --git a/drivers/scsi/bnx2fc/bnx2fc_constants.h b/drivers/scsi/bnx2fc/bnx2fc_constants.h
new file mode 100644
index 000000000000..fe7769173c43
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_constants.h
@@ -0,0 +1,206 @@
+#ifndef __BNX2FC_CONSTANTS_H_
+#define __BNX2FC_CONSTANTS_H_
+
+/**
+ * This file defines HSI constants for the FCoE flows
+ */
+
+/* KWQ/KCQ FCoE layer code */
+#define FCOE_KWQE_LAYER_CODE (7)
+
+/* KWQ (kernel work queue) request op codes */
+#define FCOE_KWQE_OPCODE_INIT1 (0)
+#define FCOE_KWQE_OPCODE_INIT2 (1)
+#define FCOE_KWQE_OPCODE_INIT3 (2)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN1 (3)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN2 (4)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN3 (5)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN4 (6)
+#define FCOE_KWQE_OPCODE_ENABLE_CONN (7)
+#define FCOE_KWQE_OPCODE_DISABLE_CONN (8)
+#define FCOE_KWQE_OPCODE_DESTROY_CONN (9)
+#define FCOE_KWQE_OPCODE_DESTROY (10)
+#define FCOE_KWQE_OPCODE_STAT (11)
+
+/* KCQ (kernel completion queue) response op codes */
+#define FCOE_KCQE_OPCODE_INIT_FUNC (0x10)
+#define FCOE_KCQE_OPCODE_DESTROY_FUNC (0x11)
+#define FCOE_KCQE_OPCODE_STAT_FUNC (0x12)
+#define FCOE_KCQE_OPCODE_OFFLOAD_CONN (0x15)
+#define FCOE_KCQE_OPCODE_ENABLE_CONN (0x16)
+#define FCOE_KCQE_OPCODE_DISABLE_CONN (0x17)
+#define FCOE_KCQE_OPCODE_DESTROY_CONN (0x18)
+#define FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20)
+#define FCOE_KCQE_OPCODE_FCOE_ERROR (0x21)
+
+/* KCQ (kernel completion queue) completion status */
+#define FCOE_KCQE_COMPLETION_STATUS_SUCCESS (0x0)
+#define FCOE_KCQE_COMPLETION_STATUS_ERROR (0x1)
+#define FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE (0x2)
+#define FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x3)
+#define FCOE_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE (0x4)
+#define FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR (0x5)
+
+/* Unsolicited CQE type */
+#define FCOE_UNSOLICITED_FRAME_CQE_TYPE 0
+#define FCOE_ERROR_DETECTION_CQE_TYPE 1
+#define FCOE_WARNING_DETECTION_CQE_TYPE 2
+
+/* Task context constants */
+/* After driver has initialize the task in case timer services required */
+#define FCOE_TASK_TX_STATE_INIT 0
+/* In case timer services are required then shall be updated by Xstorm after
+ * start processing the task. In case no timer facilities are required then the
+ * driver would initialize the state to this value */
+#define FCOE_TASK_TX_STATE_NORMAL 1
+/* Task is under abort procedure. Updated in order to stop processing of
+ * pending WQEs on this task */
+#define FCOE_TASK_TX_STATE_ABORT 2
+/* For E_D_T_TOV timer expiration in Xstorm (Class 2 only) */
+#define FCOE_TASK_TX_STATE_ERROR 3
+/* For REC_TOV timer expiration indication received from Xstorm */
+#define FCOE_TASK_TX_STATE_WARNING 4
+/* For completed unsolicited task */
+#define FCOE_TASK_TX_STATE_UNSOLICITED_COMPLETED 5
+/* For exchange cleanup request task */
+#define FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP 6
+/* For sequence cleanup request task */
+#define FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP 7
+/* Mark task as aborted and indicate that ABTS was not transmitted */
+#define FCOE_TASK_TX_STATE_BEFORE_ABTS_TX 8
+/* Mark task as aborted and indicate that ABTS was transmitted */
+#define FCOE_TASK_TX_STATE_AFTER_ABTS_TX 9
+/* For completion the ABTS task. */
+#define FCOE_TASK_TX_STATE_ABTS_TX_COMPLETED 10
+/* Mark task as aborted and indicate that Exchange cleanup was not transmitted
+ */
+#define FCOE_TASK_TX_STATE_BEFORE_EXCHANGE_CLEANUP_TX 11
+/* Mark task as aborted and indicate that Exchange cleanup was transmitted */
+#define FCOE_TASK_TX_STATE_AFTER_EXCHANGE_CLEANUP_TX 12
+
+#define FCOE_TASK_RX_STATE_NORMAL 0
+#define FCOE_TASK_RX_STATE_COMPLETED 1
+/* Obsolete: Intermediate completion (middle path with local completion) */
+#define FCOE_TASK_RX_STATE_INTER_COMP 2
+/* For REC_TOV timer expiration indication received from Xstorm */
+#define FCOE_TASK_RX_STATE_WARNING 3
+/* For E_D_T_TOV timer expiration in Ustorm */
+#define FCOE_TASK_RX_STATE_ERROR 4
+/* ABTS ACC arrived wait for local completion to finally complete the task. */
+#define FCOE_TASK_RX_STATE_ABTS_ACC_ARRIVED 5
+/* local completion arrived wait for ABTS ACC to finally complete the task. */
+#define FCOE_TASK_RX_STATE_ABTS_LOCAL_COMP_ARRIVED 6
+/* Special completion indication in case of task was aborted. */
+#define FCOE_TASK_RX_STATE_ABTS_COMPLETED 7
+/* Special completion indication in case of task was cleaned. */
+#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED 8
+/* Special completion indication (in task requested the exchange cleanup) in
+ * case cleaned task is in non-valid. */
+#define FCOE_TASK_RX_STATE_ABORT_CLEANUP_COMPLETED 9
+/* Special completion indication (in task requested the sequence cleanup) in
+ * case cleaned task was already returned to normal. */
+#define FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP 10
+/* Exchange cleanup arrived wait until xfer will be handled to finally
+ * complete the task. */
+#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_ARRIVED 11
+/* Xfer handled, wait for exchange cleanup to finally complete the task. */
+#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_HANDLED_XFER 12
+
+#define FCOE_TASK_TYPE_WRITE 0
+#define FCOE_TASK_TYPE_READ 1
+#define FCOE_TASK_TYPE_MIDPATH 2
+#define FCOE_TASK_TYPE_UNSOLICITED 3
+#define FCOE_TASK_TYPE_ABTS 4
+#define FCOE_TASK_TYPE_EXCHANGE_CLEANUP 5
+#define FCOE_TASK_TYPE_SEQUENCE_CLEANUP 6
+
+#define FCOE_TASK_DEV_TYPE_DISK 0
+#define FCOE_TASK_DEV_TYPE_TAPE 1
+
+#define FCOE_TASK_CLASS_TYPE_3 0
+#define FCOE_TASK_CLASS_TYPE_2 1
+
+/* Everest FCoE connection type */
+#define B577XX_FCOE_CONNECTION_TYPE 4
+
+/* Error codes for Error Reporting in fast path flows */
+/* XFER error codes */
+#define FCOE_ERROR_CODE_XFER_OOO_RO 0
+#define FCOE_ERROR_CODE_XFER_RO_NOT_ALIGNED 1
+#define FCOE_ERROR_CODE_XFER_NULL_BURST_LEN 2
+#define FCOE_ERROR_CODE_XFER_RO_GREATER_THAN_DATA2TRNS 3
+#define FCOE_ERROR_CODE_XFER_INVALID_PAYLOAD_SIZE 4
+#define FCOE_ERROR_CODE_XFER_TASK_TYPE_NOT_WRITE 5
+#define FCOE_ERROR_CODE_XFER_PEND_XFER_SET 6
+#define FCOE_ERROR_CODE_XFER_OPENED_SEQ 7
+#define FCOE_ERROR_CODE_XFER_FCTL 8
+
+/* FCP RSP error codes */
+#define FCOE_ERROR_CODE_FCP_RSP_BIDI_FLAGS_SET 9
+#define FCOE_ERROR_CODE_FCP_RSP_UNDERFLOW 10
+#define FCOE_ERROR_CODE_FCP_RSP_OVERFLOW 11
+#define FCOE_ERROR_CODE_FCP_RSP_INVALID_LENGTH_FIELD 12
+#define FCOE_ERROR_CODE_FCP_RSP_INVALID_SNS_FIELD 13
+#define FCOE_ERROR_CODE_FCP_RSP_INVALID_PAYLOAD_SIZE 14
+#define FCOE_ERROR_CODE_FCP_RSP_PEND_XFER_SET 15
+#define FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ 16
+#define FCOE_ERROR_CODE_FCP_RSP_FCTL 17
+#define FCOE_ERROR_CODE_FCP_RSP_LAST_SEQ_RESET 18
+#define FCOE_ERROR_CODE_FCP_RSP_CONF_REQ_NOT_SUPPORTED_YET 19
+
+/* FCP DATA error codes */
+#define FCOE_ERROR_CODE_DATA_OOO_RO 20
+#define FCOE_ERROR_CODE_DATA_EXCEEDS_DEFINED_MAX_FRAME_SIZE 21
+#define FCOE_ERROR_CODE_DATA_EXCEEDS_DATA2TRNS 22
+#define FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET 23
+#define FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET 24
+#define FCOE_ERROR_CODE_DATA_EOFN_END_SEQ_SET 25
+#define FCOE_ERROR_CODE_DATA_EOFT_END_SEQ_RESET 26
+#define FCOE_ERROR_CODE_DATA_TASK_TYPE_NOT_READ 27
+#define FCOE_ERROR_CODE_DATA_FCTL 28
+
+/* Middle path error codes */
+#define FCOE_ERROR_CODE_MIDPATH_TYPE_NOT_ELS 29
+#define FCOE_ERROR_CODE_MIDPATH_SOFI3_SEQ_ACTIVE_SET 30
+#define FCOE_ERROR_CODE_MIDPATH_SOFN_SEQ_ACTIVE_RESET 31
+#define FCOE_ERROR_CODE_MIDPATH_EOFN_END_SEQ_SET 32
+#define FCOE_ERROR_CODE_MIDPATH_EOFT_END_SEQ_RESET 33
+#define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_FCTL 34
+#define FCOE_ERROR_CODE_MIDPATH_INVALID_REPLY 35
+#define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_RCTL 36
+
+/* ABTS error codes */
+#define FCOE_ERROR_CODE_ABTS_REPLY_F_CTL 37
+#define FCOE_ERROR_CODE_ABTS_REPLY_DDF_RCTL_FIELD 38
+#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_BLS_RCTL 39
+#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_RCTL 40
+#define FCOE_ERROR_CODE_ABTS_REPLY_RCTL_GENERAL_MISMATCH 41
+
+/* Common error codes */
+#define FCOE_ERROR_CODE_COMMON_MIDDLE_FRAME_WITH_PAD 42
+#define FCOE_ERROR_CODE_COMMON_SEQ_INIT_IN_TCE 43
+#define FCOE_ERROR_CODE_COMMON_FC_HDR_RX_ID_MISMATCH 44
+#define FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT 45
+#define FCOE_ERROR_CODE_COMMON_DATA_FC_HDR_FCP_TYPE_MISMATCH 46
+#define FCOE_ERROR_CODE_COMMON_DATA_NO_MORE_SGES 47
+#define FCOE_ERROR_CODE_COMMON_OPTIONAL_FC_HDR 48
+#define FCOE_ERROR_CODE_COMMON_READ_TCE_OX_ID_TOO_BIG 49
+#define FCOE_ERROR_CODE_COMMON_DATA_WAS_NOT_TRANSMITTED 50
+
+/* Unsolicited Rx error codes */
+#define FCOE_ERROR_CODE_UNSOLICITED_TYPE_NOT_ELS 51
+#define FCOE_ERROR_CODE_UNSOLICITED_TYPE_NOT_BLS 52
+#define FCOE_ERROR_CODE_UNSOLICITED_FCTL_ELS 53
+#define FCOE_ERROR_CODE_UNSOLICITED_FCTL_BLS 54
+#define FCOE_ERROR_CODE_UNSOLICITED_R_CTL 55
+
+#define FCOE_ERROR_CODE_RW_TASK_DDF_RCTL_INFO_FIELD 56
+#define FCOE_ERROR_CODE_RW_TASK_INVALID_RCTL 57
+#define FCOE_ERROR_CODE_RW_TASK_RCTL_GENERAL_MISMATCH 58
+
+/* Timer error codes */
+#define FCOE_ERROR_CODE_E_D_TOV_TIMER_EXPIRATION 60
+#define FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION 61
+
+
+#endif /* BNX2FC_CONSTANTS_H_ */
diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.h b/drivers/scsi/bnx2fc/bnx2fc_debug.h
new file mode 100644
index 000000000000..7f6aff68cc53
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_debug.h
@@ -0,0 +1,70 @@
+#ifndef __BNX2FC_DEBUG__
+#define __BNX2FC_DEBUG__
+
+/* Log level bit mask */
+#define LOG_IO 0x01 /* scsi cmd error, cleanup */
+#define LOG_TGT 0x02 /* Session setup, cleanup, etc' */
+#define LOG_HBA 0x04 /* lport events, link, mtu, etc' */
+#define LOG_ELS 0x08 /* ELS logs */
+#define LOG_MISC 0x10 /* fcoe L2 frame related logs*/
+#define LOG_ALL 0xff /* LOG all messages */
+
+extern unsigned int bnx2fc_debug_level;
+
+#define BNX2FC_CHK_LOGGING(LEVEL, CMD) \
+ do { \
+ if (unlikely(bnx2fc_debug_level & LEVEL)) \
+ do { \
+ CMD; \
+ } while (0); \
+ } while (0)
+
+#define BNX2FC_ELS_DBG(fmt, arg...) \
+ BNX2FC_CHK_LOGGING(LOG_ELS, \
+ printk(KERN_ALERT PFX fmt, ##arg))
+
+#define BNX2FC_MISC_DBG(fmt, arg...) \
+ BNX2FC_CHK_LOGGING(LOG_MISC, \
+ printk(KERN_ALERT PFX fmt, ##arg))
+
+#define BNX2FC_IO_DBG(io_req, fmt, arg...) \
+ do { \
+ if (!io_req || !io_req->port || !io_req->port->lport || \
+ !io_req->port->lport->host) \
+ BNX2FC_CHK_LOGGING(LOG_IO, \
+ printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \
+ else \
+ BNX2FC_CHK_LOGGING(LOG_IO, \
+ shost_printk(KERN_ALERT, \
+ (io_req)->port->lport->host, \
+ PFX "xid:0x%x " fmt, \
+ (io_req)->xid, ##arg)); \
+ } while (0)
+
+#define BNX2FC_TGT_DBG(tgt, fmt, arg...) \
+ do { \
+ if (!tgt || !tgt->port || !tgt->port->lport || \
+ !tgt->port->lport->host || !tgt->rport) \
+ BNX2FC_CHK_LOGGING(LOG_TGT, \
+ printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \
+ else \
+ BNX2FC_CHK_LOGGING(LOG_TGT, \
+ shost_printk(KERN_ALERT, \
+ (tgt)->port->lport->host, \
+ PFX "port:%x " fmt, \
+ (tgt)->rport->port_id, ##arg)); \
+ } while (0)
+
+
+#define BNX2FC_HBA_DBG(lport, fmt, arg...) \
+ do { \
+ if (!lport || !lport->host) \
+ BNX2FC_CHK_LOGGING(LOG_HBA, \
+ printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \
+ else \
+ BNX2FC_CHK_LOGGING(LOG_HBA, \
+ shost_printk(KERN_ALERT, lport->host, \
+ PFX fmt, ##arg)); \
+ } while (0)
+
+#endif
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
new file mode 100644
index 000000000000..7a11a255157f
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -0,0 +1,515 @@
+/*
+ * bnx2fc_els.c: Broadcom NetXtreme II Linux FCoE offload driver.
+ * This file contains helper routines that handle ELS requests
+ * and responses.
+ *
+ * Copyright (c) 2008 - 2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include "bnx2fc.h"
+
+static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
+ void *arg);
+static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
+ void *arg);
+static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
+ void *data, u32 data_len,
+ void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
+ struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec);
+
+static void bnx2fc_rrq_compl(struct bnx2fc_els_cb_arg *cb_arg)
+{
+ struct bnx2fc_cmd *orig_io_req;
+ struct bnx2fc_cmd *rrq_req;
+ int rc = 0;
+
+ BUG_ON(!cb_arg);
+ rrq_req = cb_arg->io_req;
+ orig_io_req = cb_arg->aborted_io_req;
+ BUG_ON(!orig_io_req);
+ BNX2FC_ELS_DBG("rrq_compl: orig xid = 0x%x, rrq_xid = 0x%x\n",
+ orig_io_req->xid, rrq_req->xid);
+
+ kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
+
+ if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rrq_req->req_flags)) {
+ /*
+ * els req is timed out. cleanup the IO with FW and
+ * drop the completion. Remove from active_cmd_queue.
+ */
+ BNX2FC_ELS_DBG("rrq xid - 0x%x timed out, clean it up\n",
+ rrq_req->xid);
+
+ if (rrq_req->on_active_queue) {
+ list_del_init(&rrq_req->link);
+ rrq_req->on_active_queue = 0;
+ rc = bnx2fc_initiate_cleanup(rrq_req);
+ BUG_ON(rc);
+ }
+ }
+ kfree(cb_arg);
+}
+int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req)
+{
+
+ struct fc_els_rrq rrq;
+ struct bnx2fc_rport *tgt = aborted_io_req->tgt;
+ struct fc_lport *lport = tgt->rdata->local_port;
+ struct bnx2fc_els_cb_arg *cb_arg = NULL;
+ u32 sid = tgt->sid;
+ u32 r_a_tov = lport->r_a_tov;
+ unsigned long start = jiffies;
+ int rc;
+
+ BNX2FC_ELS_DBG("Sending RRQ orig_xid = 0x%x\n",
+ aborted_io_req->xid);
+ memset(&rrq, 0, sizeof(rrq));
+
+ cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_NOIO);
+ if (!cb_arg) {
+ printk(KERN_ERR PFX "Unable to allocate cb_arg for RRQ\n");
+ rc = -ENOMEM;
+ goto rrq_err;
+ }
+
+ cb_arg->aborted_io_req = aborted_io_req;
+
+ rrq.rrq_cmd = ELS_RRQ;
+ hton24(rrq.rrq_s_id, sid);
+ rrq.rrq_ox_id = htons(aborted_io_req->xid);
+ rrq.rrq_rx_id = htons(aborted_io_req->task->rx_wr_tx_rd.rx_id);
+
+retry_rrq:
+ rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq),
+ bnx2fc_rrq_compl, cb_arg,
+ r_a_tov);
+ if (rc == -ENOMEM) {
+ if (time_after(jiffies, start + (10 * HZ))) {
+ BNX2FC_ELS_DBG("rrq Failed\n");
+ rc = FAILED;
+ goto rrq_err;
+ }
+ msleep(20);
+ goto retry_rrq;
+ }
+rrq_err:
+ if (rc) {
+ BNX2FC_ELS_DBG("RRQ failed - release orig io req 0x%x\n",
+ aborted_io_req->xid);
+ kfree(cb_arg);
+ spin_lock_bh(&tgt->tgt_lock);
+ kref_put(&aborted_io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ }
+ return rc;
+}
+
+static void bnx2fc_l2_els_compl(struct bnx2fc_els_cb_arg *cb_arg)
+{
+ struct bnx2fc_cmd *els_req;
+ struct bnx2fc_rport *tgt;
+ struct bnx2fc_mp_req *mp_req;
+ struct fc_frame_header *fc_hdr;
+ unsigned char *buf;
+ void *resp_buf;
+ u32 resp_len, hdr_len;
+ u16 l2_oxid;
+ int frame_len;
+ int rc = 0;
+
+ l2_oxid = cb_arg->l2_oxid;
+ BNX2FC_ELS_DBG("ELS COMPL - l2_oxid = 0x%x\n", l2_oxid);
+
+ els_req = cb_arg->io_req;
+ if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &els_req->req_flags)) {
+ /*
+ * els req is timed out. cleanup the IO with FW and
+ * drop the completion. libfc will handle the els timeout
+ */
+ if (els_req->on_active_queue) {
+ list_del_init(&els_req->link);
+ els_req->on_active_queue = 0;
+ rc = bnx2fc_initiate_cleanup(els_req);
+ BUG_ON(rc);
+ }
+ goto free_arg;
+ }
+
+ tgt = els_req->tgt;
+ mp_req = &(els_req->mp_req);
+ fc_hdr = &(mp_req->resp_fc_hdr);
+ resp_len = mp_req->resp_len;
+ resp_buf = mp_req->resp_buf;
+
+ buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
+ if (!buf) {
+ printk(KERN_ERR PFX "Unable to alloc mp buf\n");
+ goto free_arg;
+ }
+ hdr_len = sizeof(*fc_hdr);
+ if (hdr_len + resp_len > PAGE_SIZE) {
+ printk(KERN_ERR PFX "l2_els_compl: resp len is "
+ "beyond page size\n");
+ goto free_buf;
+ }
+ memcpy(buf, fc_hdr, hdr_len);
+ memcpy(buf + hdr_len, resp_buf, resp_len);
+ frame_len = hdr_len + resp_len;
+
+ bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, l2_oxid);
+
+free_buf:
+ kfree(buf);
+free_arg:
+ kfree(cb_arg);
+}
+
+int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp)
+{
+ struct fc_els_adisc *adisc;
+ struct fc_frame_header *fh;
+ struct bnx2fc_els_cb_arg *cb_arg;
+ struct fc_lport *lport = tgt->rdata->local_port;
+ u32 r_a_tov = lport->r_a_tov;
+ int rc;
+
+ fh = fc_frame_header_get(fp);
+ cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
+ if (!cb_arg) {
+ printk(KERN_ERR PFX "Unable to allocate cb_arg for ADISC\n");
+ return -ENOMEM;
+ }
+
+ cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
+
+ BNX2FC_ELS_DBG("send ADISC: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
+ adisc = fc_frame_payload_get(fp, sizeof(*adisc));
+ /* adisc is initialized by libfc */
+ rc = bnx2fc_initiate_els(tgt, ELS_ADISC, adisc, sizeof(*adisc),
+ bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
+ if (rc)
+ kfree(cb_arg);
+ return rc;
+}
+
+int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp)
+{
+ struct fc_els_logo *logo;
+ struct fc_frame_header *fh;
+ struct bnx2fc_els_cb_arg *cb_arg;
+ struct fc_lport *lport = tgt->rdata->local_port;
+ u32 r_a_tov = lport->r_a_tov;
+ int rc;
+
+ fh = fc_frame_header_get(fp);
+ cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
+ if (!cb_arg) {
+ printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
+ return -ENOMEM;
+ }
+
+ cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
+
+ BNX2FC_ELS_DBG("Send LOGO: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
+ logo = fc_frame_payload_get(fp, sizeof(*logo));
+ /* logo is initialized by libfc */
+ rc = bnx2fc_initiate_els(tgt, ELS_LOGO, logo, sizeof(*logo),
+ bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
+ if (rc)
+ kfree(cb_arg);
+ return rc;
+}
+
+int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp)
+{
+ struct fc_els_rls *rls;
+ struct fc_frame_header *fh;
+ struct bnx2fc_els_cb_arg *cb_arg;
+ struct fc_lport *lport = tgt->rdata->local_port;
+ u32 r_a_tov = lport->r_a_tov;
+ int rc;
+
+ fh = fc_frame_header_get(fp);
+ cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
+ if (!cb_arg) {
+ printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
+ return -ENOMEM;
+ }
+
+ cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
+
+ rls = fc_frame_payload_get(fp, sizeof(*rls));
+ /* rls is initialized by libfc */
+ rc = bnx2fc_initiate_els(tgt, ELS_RLS, rls, sizeof(*rls),
+ bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
+ if (rc)
+ kfree(cb_arg);
+ return rc;
+}
+
+static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
+ void *data, u32 data_len,
+ void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
+ struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec)
+{
+ struct fcoe_port *port = tgt->port;
+ struct bnx2fc_hba *hba = port->priv;
+ struct fc_rport *rport = tgt->rport;
+ struct fc_lport *lport = port->lport;
+ struct bnx2fc_cmd *els_req;
+ struct bnx2fc_mp_req *mp_req;
+ struct fc_frame_header *fc_hdr;
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ int rc = 0;
+ int task_idx, index;
+ u32 did, sid;
+ u16 xid;
+
+ rc = fc_remote_port_chkready(rport);
+ if (rc) {
+ printk(KERN_ALERT PFX "els 0x%x: rport not ready\n", op);
+ rc = -EINVAL;
+ goto els_err;
+ }
+ if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
+ printk(KERN_ALERT PFX "els 0x%x: link is not ready\n", op);
+ rc = -EINVAL;
+ goto els_err;
+ }
+ if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) ||
+ (test_bit(BNX2FC_FLAG_EXPL_LOGO, &tgt->flags))) {
+ printk(KERN_ERR PFX "els 0x%x: tgt not ready\n", op);
+ rc = -EINVAL;
+ goto els_err;
+ }
+ els_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ELS);
+ if (!els_req) {
+ rc = -ENOMEM;
+ goto els_err;
+ }
+
+ els_req->sc_cmd = NULL;
+ els_req->port = port;
+ els_req->tgt = tgt;
+ els_req->cb_func = cb_func;
+ cb_arg->io_req = els_req;
+ els_req->cb_arg = cb_arg;
+
+ mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req);
+ rc = bnx2fc_init_mp_req(els_req);
+ if (rc == FAILED) {
+ printk(KERN_ALERT PFX "ELS MP request init failed\n");
+ spin_lock_bh(&tgt->tgt_lock);
+ kref_put(&els_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ rc = -ENOMEM;
+ goto els_err;
+ } else {
+ /* rc SUCCESS */
+ rc = 0;
+ }
+
+ /* Set the data_xfer_len to the size of ELS payload */
+ mp_req->req_len = data_len;
+ els_req->data_xfer_len = mp_req->req_len;
+
+ /* Fill ELS Payload */
+ if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
+ memcpy(mp_req->req_buf, data, data_len);
+ } else {
+ printk(KERN_ALERT PFX "Invalid ELS op 0x%x\n", op);
+ els_req->cb_func = NULL;
+ els_req->cb_arg = NULL;
+ spin_lock_bh(&tgt->tgt_lock);
+ kref_put(&els_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ rc = -EINVAL;
+ }
+
+ if (rc)
+ goto els_err;
+
+ /* Fill FC header */
+ fc_hdr = &(mp_req->req_fc_hdr);
+
+ did = tgt->rport->port_id;
+ sid = tgt->sid;
+
+ __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
+ FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
+ FC_FC_SEQ_INIT, 0);
+
+ /* Obtain exchange id */
+ xid = els_req->xid;
+ task_idx = xid/BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+
+ /* Initialize task context for this IO request */
+ task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+ bnx2fc_init_mp_task(els_req, task);
+
+ spin_lock_bh(&tgt->tgt_lock);
+
+ if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
+ printk(KERN_ERR PFX "initiate_els.. session not ready\n");
+ els_req->cb_func = NULL;
+ els_req->cb_arg = NULL;
+ kref_put(&els_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return -EINVAL;
+ }
+
+ if (timer_msec)
+ bnx2fc_cmd_timer_set(els_req, timer_msec);
+ bnx2fc_add_2_sq(tgt, xid);
+
+ els_req->on_active_queue = 1;
+ list_add_tail(&els_req->link, &tgt->els_queue);
+
+ /* Ring doorbell */
+ bnx2fc_ring_doorbell(tgt);
+ spin_unlock_bh(&tgt->tgt_lock);
+
+els_err:
+ return rc;
+}
+
+void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
+ struct fcoe_task_ctx_entry *task, u8 num_rq)
+{
+ struct bnx2fc_mp_req *mp_req;
+ struct fc_frame_header *fc_hdr;
+ u64 *hdr;
+ u64 *temp_hdr;
+
+ BNX2FC_ELS_DBG("Entered process_els_compl xid = 0x%x"
+ "cmd_type = %d\n", els_req->xid, els_req->cmd_type);
+
+ if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
+ &els_req->req_flags)) {
+ BNX2FC_ELS_DBG("Timer context finished processing this "
+ "els - 0x%x\n", els_req->xid);
+ /* This IO doesnt receive cleanup completion */
+ kref_put(&els_req->refcount, bnx2fc_cmd_release);
+ return;
+ }
+
+ /* Cancel the timeout_work, as we received the response */
+ if (cancel_delayed_work(&els_req->timeout_work))
+ kref_put(&els_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+
+ if (els_req->on_active_queue) {
+ list_del_init(&els_req->link);
+ els_req->on_active_queue = 0;
+ }
+
+ mp_req = &(els_req->mp_req);
+ fc_hdr = &(mp_req->resp_fc_hdr);
+
+ hdr = (u64 *)fc_hdr;
+ temp_hdr = (u64 *)
+ &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
+ hdr[0] = cpu_to_be64(temp_hdr[0]);
+ hdr[1] = cpu_to_be64(temp_hdr[1]);
+ hdr[2] = cpu_to_be64(temp_hdr[2]);
+
+ mp_req->resp_len = task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_off;
+
+ /* Parse ELS response */
+ if ((els_req->cb_func) && (els_req->cb_arg)) {
+ els_req->cb_func(els_req->cb_arg);
+ els_req->cb_arg = NULL;
+ }
+
+ kref_put(&els_req->refcount, bnx2fc_cmd_release);
+}
+
+static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
+ void *arg)
+{
+ struct fcoe_ctlr *fip = arg;
+ struct fc_exch *exch = fc_seq_exch(seq);
+ struct fc_lport *lport = exch->lp;
+ u8 *mac;
+ struct fc_frame_header *fh;
+ u8 op;
+
+ if (IS_ERR(fp))
+ goto done;
+
+ mac = fr_cb(fp)->granted_mac;
+ if (is_zero_ether_addr(mac)) {
+ fh = fc_frame_header_get(fp);
+ if (fh->fh_type != FC_TYPE_ELS) {
+ printk(KERN_ERR PFX "bnx2fc_flogi_resp:"
+ "fh_type != FC_TYPE_ELS\n");
+ fc_frame_free(fp);
+ return;
+ }
+ op = fc_frame_payload_op(fp);
+ if (lport->vport) {
+ if (op == ELS_LS_RJT) {
+ printk(KERN_ERR PFX "bnx2fc_flogi_resp is LS_RJT\n");
+ fc_vport_terminate(lport->vport);
+ fc_frame_free(fp);
+ return;
+ }
+ }
+ if (fcoe_ctlr_recv_flogi(fip, lport, fp)) {
+ fc_frame_free(fp);
+ return;
+ }
+ }
+ fip->update_mac(lport, mac);
+done:
+ fc_lport_flogi_resp(seq, fp, lport);
+}
+
+static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
+ void *arg)
+{
+ struct fcoe_ctlr *fip = arg;
+ struct fc_exch *exch = fc_seq_exch(seq);
+ struct fc_lport *lport = exch->lp;
+ static u8 zero_mac[ETH_ALEN] = { 0 };
+
+ if (!IS_ERR(fp))
+ fip->update_mac(lport, zero_mac);
+ fc_lport_logo_resp(seq, fp, lport);
+}
+
+struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
+ struct fc_frame *fp, unsigned int op,
+ void (*resp)(struct fc_seq *,
+ struct fc_frame *,
+ void *),
+ void *arg, u32 timeout)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+ struct fcoe_ctlr *fip = &hba->ctlr;
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+
+ switch (op) {
+ case ELS_FLOGI:
+ case ELS_FDISC:
+ return fc_elsct_send(lport, did, fp, op, bnx2fc_flogi_resp,
+ fip, timeout);
+ case ELS_LOGO:
+ /* only hook onto fabric logouts, not port logouts */
+ if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
+ break;
+ return fc_elsct_send(lport, did, fp, op, bnx2fc_logo_resp,
+ fip, timeout);
+ }
+ return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
+}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
new file mode 100644
index 000000000000..e476e8753079
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -0,0 +1,2535 @@
+/* bnx2fc_fcoe.c: Broadcom NetXtreme II Linux FCoE offload driver.
+ * This file contains the code that interacts with libfc, libfcoe,
+ * cnic modules to create FCoE instances, send/receive non-offloaded
+ * FIP/FCoE packets, listen to link events etc.
+ *
+ * Copyright (c) 2008 - 2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include "bnx2fc.h"
+
+static struct list_head adapter_list;
+static u32 adapter_count;
+static DEFINE_MUTEX(bnx2fc_dev_lock);
+DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
+
+#define DRV_MODULE_NAME "bnx2fc"
+#define DRV_MODULE_VERSION BNX2FC_VERSION
+#define DRV_MODULE_RELDATE "Jan 25, 2011"
+
+
+static char version[] __devinitdata =
+ "Broadcom NetXtreme II FCoE Driver " DRV_MODULE_NAME \
+ " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+
+MODULE_AUTHOR("Bhanu Prakash Gollapudi <bprakash@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 FCoE Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+#define BNX2FC_MAX_QUEUE_DEPTH 256
+#define BNX2FC_MIN_QUEUE_DEPTH 32
+#define FCOE_WORD_TO_BYTE 4
+
+static struct scsi_transport_template *bnx2fc_transport_template;
+static struct scsi_transport_template *bnx2fc_vport_xport_template;
+
+struct workqueue_struct *bnx2fc_wq;
+
+/* bnx2fc structure needs only one instance of the fcoe_percpu_s structure.
+ * Here the io threads are per cpu but the l2 thread is just one
+ */
+struct fcoe_percpu_s bnx2fc_global;
+DEFINE_SPINLOCK(bnx2fc_global_lock);
+
+static struct cnic_ulp_ops bnx2fc_cnic_cb;
+static struct libfc_function_template bnx2fc_libfc_fcn_templ;
+static struct scsi_host_template bnx2fc_shost_template;
+static struct fc_function_template bnx2fc_transport_function;
+static struct fc_function_template bnx2fc_vport_xport_function;
+static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode);
+static int bnx2fc_destroy(struct net_device *net_device);
+static int bnx2fc_enable(struct net_device *netdev);
+static int bnx2fc_disable(struct net_device *netdev);
+
+static void bnx2fc_recv_frame(struct sk_buff *skb);
+
+static void bnx2fc_start_disc(struct bnx2fc_hba *hba);
+static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev);
+static int bnx2fc_net_config(struct fc_lport *lp);
+static int bnx2fc_lport_config(struct fc_lport *lport);
+static int bnx2fc_em_config(struct fc_lport *lport);
+static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba);
+static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba);
+static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
+static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba);
+static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
+ struct device *parent, int npiv);
+static void bnx2fc_destroy_work(struct work_struct *work);
+
+static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev);
+static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic);
+
+static int bnx2fc_fw_init(struct bnx2fc_hba *hba);
+static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba);
+
+static void bnx2fc_port_shutdown(struct fc_lport *lport);
+static void bnx2fc_stop(struct bnx2fc_hba *hba);
+static int __init bnx2fc_mod_init(void);
+static void __exit bnx2fc_mod_exit(void);
+
+unsigned int bnx2fc_debug_level;
+module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR);
+
+static int bnx2fc_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu);
+/* notification function for CPU hotplug events */
+static struct notifier_block bnx2fc_cpu_notifier = {
+ .notifier_call = bnx2fc_cpu_callback,
+};
+
+static void bnx2fc_clean_rx_queue(struct fc_lport *lp)
+{
+ struct fcoe_percpu_s *bg;
+ struct fcoe_rcv_info *fr;
+ struct sk_buff_head *list;
+ struct sk_buff *skb, *next;
+ struct sk_buff *head;
+
+ bg = &bnx2fc_global;
+ spin_lock_bh(&bg->fcoe_rx_list.lock);
+ list = &bg->fcoe_rx_list;
+ head = list->next;
+ for (skb = head; skb != (struct sk_buff *)list;
+ skb = next) {
+ next = skb->next;
+ fr = fcoe_dev_from_skb(skb);
+ if (fr->fr_dev == lp) {
+ __skb_unlink(skb, list);
+ kfree_skb(skb);
+ }
+ }
+ spin_unlock_bh(&bg->fcoe_rx_list.lock);
+}
+
+int bnx2fc_get_paged_crc_eof(struct sk_buff *skb, int tlen)
+{
+ int rc;
+ spin_lock(&bnx2fc_global_lock);
+ rc = fcoe_get_paged_crc_eof(skb, tlen, &bnx2fc_global);
+ spin_unlock(&bnx2fc_global_lock);
+
+ return rc;
+}
+
+static void bnx2fc_abort_io(struct fc_lport *lport)
+{
+ /*
+ * This function is no-op for bnx2fc, but we do
+ * not want to leave it as NULL either, as libfc
+ * can call the default function which is
+ * fc_fcp_abort_io.
+ */
+}
+
+static void bnx2fc_cleanup(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_rport *tgt;
+ int i;
+
+ BNX2FC_MISC_DBG("Entered %s\n", __func__);
+ mutex_lock(&hba->hba_mutex);
+ spin_lock_bh(&hba->hba_lock);
+ for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
+ tgt = hba->tgt_ofld_list[i];
+ if (tgt) {
+ /* Cleanup IOs belonging to requested vport */
+ if (tgt->port == port) {
+ spin_unlock_bh(&hba->hba_lock);
+ BNX2FC_TGT_DBG(tgt, "flush/cleanup\n");
+ bnx2fc_flush_active_ios(tgt);
+ spin_lock_bh(&hba->hba_lock);
+ }
+ }
+ }
+ spin_unlock_bh(&hba->hba_lock);
+ mutex_unlock(&hba->hba_mutex);
+}
+
+static int bnx2fc_xmit_l2_frame(struct bnx2fc_rport *tgt,
+ struct fc_frame *fp)
+{
+ struct fc_rport_priv *rdata = tgt->rdata;
+ struct fc_frame_header *fh;
+ int rc = 0;
+
+ fh = fc_frame_header_get(fp);
+ BNX2FC_TGT_DBG(tgt, "Xmit L2 frame rport = 0x%x, oxid = 0x%x, "
+ "r_ctl = 0x%x\n", rdata->ids.port_id,
+ ntohs(fh->fh_ox_id), fh->fh_r_ctl);
+ if ((fh->fh_type == FC_TYPE_ELS) &&
+ (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
+
+ switch (fc_frame_payload_op(fp)) {
+ case ELS_ADISC:
+ rc = bnx2fc_send_adisc(tgt, fp);
+ break;
+ case ELS_LOGO:
+ rc = bnx2fc_send_logo(tgt, fp);
+ break;
+ case ELS_RLS:
+ rc = bnx2fc_send_rls(tgt, fp);
+ break;
+ default:
+ break;
+ }
+ } else if ((fh->fh_type == FC_TYPE_BLS) &&
+ (fh->fh_r_ctl == FC_RCTL_BA_ABTS))
+ BNX2FC_TGT_DBG(tgt, "ABTS frame\n");
+ else {
+ BNX2FC_TGT_DBG(tgt, "Send L2 frame type 0x%x "
+ "rctl 0x%x thru non-offload path\n",
+ fh->fh_type, fh->fh_r_ctl);
+ return -ENODEV;
+ }
+ if (rc)
+ return -ENOMEM;
+ else
+ return 0;
+}
+
+/**
+ * bnx2fc_xmit - bnx2fc's FCoE frame transmit function
+ *
+ * @lport: the associated local port
+ * @fp: the fc_frame to be transmitted
+ */
+static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct ethhdr *eh;
+ struct fcoe_crc_eof *cp;
+ struct sk_buff *skb;
+ struct fc_frame_header *fh;
+ struct bnx2fc_hba *hba;
+ struct fcoe_port *port;
+ struct fcoe_hdr *hp;
+ struct bnx2fc_rport *tgt;
+ struct fcoe_dev_stats *stats;
+ u8 sof, eof;
+ u32 crc;
+ unsigned int hlen, tlen, elen;
+ int wlen, rc = 0;
+
+ port = (struct fcoe_port *)lport_priv(lport);
+ hba = port->priv;
+
+ fh = fc_frame_header_get(fp);
+
+ skb = fp_skb(fp);
+ if (!lport->link_up) {
+ BNX2FC_HBA_DBG(lport, "bnx2fc_xmit link down\n");
+ kfree_skb(skb);
+ return 0;
+ }
+
+ if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
+ if (!hba->ctlr.sel_fcf) {
+ BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n");
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+ if (fcoe_ctlr_els_send(&hba->ctlr, lport, skb))
+ return 0;
+ }
+
+ sof = fr_sof(fp);
+ eof = fr_eof(fp);
+
+ /*
+ * Snoop the frame header to check if the frame is for
+ * an offloaded session
+ */
+ /*
+ * tgt_ofld_list access is synchronized using
+ * both hba mutex and hba lock. Atleast hba mutex or
+ * hba lock needs to be held for read access.
+ */
+
+ spin_lock_bh(&hba->hba_lock);
+ tgt = bnx2fc_tgt_lookup(port, ntoh24(fh->fh_d_id));
+ if (tgt && (test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) {
+ /* This frame is for offloaded session */
+ BNX2FC_HBA_DBG(lport, "xmit: Frame is for offloaded session "
+ "port_id = 0x%x\n", ntoh24(fh->fh_d_id));
+ spin_unlock_bh(&hba->hba_lock);
+ rc = bnx2fc_xmit_l2_frame(tgt, fp);
+ if (rc != -ENODEV) {
+ kfree_skb(skb);
+ return rc;
+ }
+ } else {
+ spin_unlock_bh(&hba->hba_lock);
+ }
+
+ elen = sizeof(struct ethhdr);
+ hlen = sizeof(struct fcoe_hdr);
+ tlen = sizeof(struct fcoe_crc_eof);
+ wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
+
+ skb->ip_summed = CHECKSUM_NONE;
+ crc = fcoe_fc_crc(fp);
+
+ /* copy port crc and eof to the skb buff */
+ if (skb_is_nonlinear(skb)) {
+ skb_frag_t *frag;
+ if (bnx2fc_get_paged_crc_eof(skb, tlen)) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+ frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
+ cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
+ + frag->page_offset;
+ } else {
+ cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
+ }
+
+ memset(cp, 0, sizeof(*cp));
+ cp->fcoe_eof = eof;
+ cp->fcoe_crc32 = cpu_to_le32(~crc);
+ if (skb_is_nonlinear(skb)) {
+ kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
+ cp = NULL;
+ }
+
+ /* adjust skb network/transport offsets to match mac/fcoe/port */
+ skb_push(skb, elen + hlen);
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb->mac_len = elen;
+ skb->protocol = htons(ETH_P_FCOE);
+ skb->dev = hba->netdev;
+
+ /* fill up mac and fcoe headers */
+ eh = eth_hdr(skb);
+ eh->h_proto = htons(ETH_P_FCOE);
+ if (hba->ctlr.map_dest)
+ fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
+ else
+ /* insert GW address */
+ memcpy(eh->h_dest, hba->ctlr.dest_addr, ETH_ALEN);
+
+ if (unlikely(hba->ctlr.flogi_oxid != FC_XID_UNKNOWN))
+ memcpy(eh->h_source, hba->ctlr.ctl_src_addr, ETH_ALEN);
+ else
+ memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
+
+ hp = (struct fcoe_hdr *)(eh + 1);
+ memset(hp, 0, sizeof(*hp));
+ if (FC_FCOE_VER)
+ FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
+ hp->fcoe_sof = sof;
+
+ /* fcoe lso, mss is in max_payload which is non-zero for FCP data */
+ if (lport->seq_offload && fr_max_payload(fp)) {
+ skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
+ skb_shinfo(skb)->gso_size = fr_max_payload(fp);
+ } else {
+ skb_shinfo(skb)->gso_type = 0;
+ skb_shinfo(skb)->gso_size = 0;
+ }
+
+ /*update tx stats */
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
+ stats->TxFrames++;
+ stats->TxWords += wlen;
+ put_cpu();
+
+ /* send down to lld */
+ fr_dev(fp) = lport;
+ if (port->fcoe_pending_queue.qlen)
+ fcoe_check_wait_queue(lport, skb);
+ else if (fcoe_start_io(skb))
+ fcoe_check_wait_queue(lport, skb);
+
+ return 0;
+}
+
+/**
+ * bnx2fc_rcv - This is bnx2fc's receive function called by NET_RX_SOFTIRQ
+ *
+ * @skb: the receive socket buffer
+ * @dev: associated net device
+ * @ptype: context
+ * @olddev: last device
+ *
+ * This function receives the packet and builds FC frame and passes it up
+ */
+static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype, struct net_device *olddev)
+{
+ struct fc_lport *lport;
+ struct bnx2fc_hba *hba;
+ struct fc_frame_header *fh;
+ struct fcoe_rcv_info *fr;
+ struct fcoe_percpu_s *bg;
+ unsigned short oxid;
+
+ hba = container_of(ptype, struct bnx2fc_hba, fcoe_packet_type);
+ lport = hba->ctlr.lp;
+
+ if (unlikely(lport == NULL)) {
+ printk(KERN_ALERT PFX "bnx2fc_rcv: lport is NULL\n");
+ goto err;
+ }
+
+ if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
+ printk(KERN_ALERT PFX "bnx2fc_rcv: Wrong FC type frame\n");
+ goto err;
+ }
+
+ /*
+ * Check for minimum frame length, and make sure required FCoE
+ * and FC headers are pulled into the linear data area.
+ */
+ if (unlikely((skb->len < FCOE_MIN_FRAME) ||
+ !pskb_may_pull(skb, FCOE_HEADER_LEN)))
+ goto err;
+
+ skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
+ fh = (struct fc_frame_header *) skb_transport_header(skb);
+
+ oxid = ntohs(fh->fh_ox_id);
+
+ fr = fcoe_dev_from_skb(skb);
+ fr->fr_dev = lport;
+ fr->ptype = ptype;
+
+ bg = &bnx2fc_global;
+ spin_lock_bh(&bg->fcoe_rx_list.lock);
+
+ __skb_queue_tail(&bg->fcoe_rx_list, skb);
+ if (bg->fcoe_rx_list.qlen == 1)
+ wake_up_process(bg->thread);
+
+ spin_unlock_bh(&bg->fcoe_rx_list.lock);
+
+ return 0;
+err:
+ kfree_skb(skb);
+ return -1;
+}
+
+static int bnx2fc_l2_rcv_thread(void *arg)
+{
+ struct fcoe_percpu_s *bg = arg;
+ struct sk_buff *skb;
+
+ set_user_nice(current, -20);
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ schedule();
+ set_current_state(TASK_RUNNING);
+ spin_lock_bh(&bg->fcoe_rx_list.lock);
+ while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) {
+ spin_unlock_bh(&bg->fcoe_rx_list.lock);
+ bnx2fc_recv_frame(skb);
+ spin_lock_bh(&bg->fcoe_rx_list.lock);
+ }
+ spin_unlock_bh(&bg->fcoe_rx_list.lock);
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+ set_current_state(TASK_RUNNING);
+ return 0;
+}
+
+
+static void bnx2fc_recv_frame(struct sk_buff *skb)
+{
+ u32 fr_len;
+ struct fc_lport *lport;
+ struct fcoe_rcv_info *fr;
+ struct fcoe_dev_stats *stats;
+ struct fc_frame_header *fh;
+ struct fcoe_crc_eof crc_eof;
+ struct fc_frame *fp;
+ struct fc_lport *vn_port;
+ struct fcoe_port *port;
+ u8 *mac = NULL;
+ u8 *dest_mac = NULL;
+ struct fcoe_hdr *hp;
+
+ fr = fcoe_dev_from_skb(skb);
+ lport = fr->fr_dev;
+ if (unlikely(lport == NULL)) {
+ printk(KERN_ALERT PFX "Invalid lport struct\n");
+ kfree_skb(skb);
+ return;
+ }
+
+ if (skb_is_nonlinear(skb))
+ skb_linearize(skb);
+ mac = eth_hdr(skb)->h_source;
+ dest_mac = eth_hdr(skb)->h_dest;
+
+ /* Pull the header */
+ hp = (struct fcoe_hdr *) skb_network_header(skb);
+ fh = (struct fc_frame_header *) skb_transport_header(skb);
+ skb_pull(skb, sizeof(struct fcoe_hdr));
+ fr_len = skb->len - sizeof(struct fcoe_crc_eof);
+
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
+ stats->RxFrames++;
+ stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
+
+ fp = (struct fc_frame *)skb;
+ fc_frame_init(fp);
+ fr_dev(fp) = lport;
+ fr_sof(fp) = hp->fcoe_sof;
+ if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
+ put_cpu();
+ kfree_skb(skb);
+ return;
+ }
+ fr_eof(fp) = crc_eof.fcoe_eof;
+ fr_crc(fp) = crc_eof.fcoe_crc32;
+ if (pskb_trim(skb, fr_len)) {
+ put_cpu();
+ kfree_skb(skb);
+ return;
+ }
+
+ fh = fc_frame_header_get(fp);
+
+ vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
+ if (vn_port) {
+ port = lport_priv(vn_port);
+ if (compare_ether_addr(port->data_src_addr, dest_mac)
+ != 0) {
+ BNX2FC_HBA_DBG(lport, "fpma mismatch\n");
+ put_cpu();
+ kfree_skb(skb);
+ return;
+ }
+ }
+ if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
+ fh->fh_type == FC_TYPE_FCP) {
+ /* Drop FCP data. We dont this in L2 path */
+ put_cpu();
+ kfree_skb(skb);
+ return;
+ }
+ if (fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
+ fh->fh_type == FC_TYPE_ELS) {
+ switch (fc_frame_payload_op(fp)) {
+ case ELS_LOGO:
+ if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
+ /* drop non-FIP LOGO */
+ put_cpu();
+ kfree_skb(skb);
+ return;
+ }
+ break;
+ }
+ }
+ if (le32_to_cpu(fr_crc(fp)) !=
+ ~crc32(~0, skb->data, fr_len)) {
+ if (stats->InvalidCRCCount < 5)
+ printk(KERN_WARNING PFX "dropping frame with "
+ "CRC error\n");
+ stats->InvalidCRCCount++;
+ put_cpu();
+ kfree_skb(skb);
+ return;
+ }
+ put_cpu();
+ fc_exch_recv(lport, fp);
+}
+
+/**
+ * bnx2fc_percpu_io_thread - thread per cpu for ios
+ *
+ * @arg: ptr to bnx2fc_percpu_info structure
+ */
+int bnx2fc_percpu_io_thread(void *arg)
+{
+ struct bnx2fc_percpu_s *p = arg;
+ struct bnx2fc_work *work, *tmp;
+ LIST_HEAD(work_list);
+
+ set_user_nice(current, -20);
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ schedule();
+ set_current_state(TASK_RUNNING);
+ spin_lock_bh(&p->fp_work_lock);
+ while (!list_empty(&p->work_list)) {
+ list_splice_init(&p->work_list, &work_list);
+ spin_unlock_bh(&p->fp_work_lock);
+
+ list_for_each_entry_safe(work, tmp, &work_list, list) {
+ list_del_init(&work->list);
+ bnx2fc_process_cq_compl(work->tgt, work->wqe);
+ kfree(work);
+ }
+
+ spin_lock_bh(&p->fp_work_lock);
+ }
+ spin_unlock_bh(&p->fp_work_lock);
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+ set_current_state(TASK_RUNNING);
+
+ return 0;
+}
+
+static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
+{
+ struct fc_host_statistics *bnx2fc_stats;
+ struct fc_lport *lport = shost_priv(shost);
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+ struct fcoe_statistics_params *fw_stats;
+ int rc = 0;
+
+ fw_stats = (struct fcoe_statistics_params *)hba->stats_buffer;
+ if (!fw_stats)
+ return NULL;
+
+ bnx2fc_stats = fc_get_host_stats(shost);
+
+ init_completion(&hba->stat_req_done);
+ if (bnx2fc_send_stat_req(hba))
+ return bnx2fc_stats;
+ rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ));
+ if (!rc) {
+ BNX2FC_HBA_DBG(lport, "FW stat req timed out\n");
+ return bnx2fc_stats;
+ }
+ bnx2fc_stats->invalid_crc_count += fw_stats->rx_stat1.fc_crc_cnt;
+ bnx2fc_stats->tx_frames += fw_stats->tx_stat.fcoe_tx_pkt_cnt;
+ bnx2fc_stats->tx_words += (fw_stats->tx_stat.fcoe_tx_byte_cnt) / 4;
+ bnx2fc_stats->rx_frames += fw_stats->rx_stat0.fcoe_rx_pkt_cnt;
+ bnx2fc_stats->rx_words += (fw_stats->rx_stat0.fcoe_rx_byte_cnt) / 4;
+
+ bnx2fc_stats->dumped_frames = 0;
+ bnx2fc_stats->lip_count = 0;
+ bnx2fc_stats->nos_count = 0;
+ bnx2fc_stats->loss_of_sync_count = 0;
+ bnx2fc_stats->loss_of_signal_count = 0;
+ bnx2fc_stats->prim_seq_protocol_err_count = 0;
+
+ return bnx2fc_stats;
+}
+
+static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+ struct Scsi_Host *shost = lport->host;
+ int rc = 0;
+
+ shost->max_cmd_len = BNX2FC_MAX_CMD_LEN;
+ shost->max_lun = BNX2FC_MAX_LUN;
+ shost->max_id = BNX2FC_MAX_FCP_TGT;
+ shost->max_channel = 0;
+ if (lport->vport)
+ shost->transportt = bnx2fc_vport_xport_template;
+ else
+ shost->transportt = bnx2fc_transport_template;
+
+ /* Add the new host to SCSI-ml */
+ rc = scsi_add_host(lport->host, dev);
+ if (rc) {
+ printk(KERN_ERR PFX "Error on scsi_add_host\n");
+ return rc;
+ }
+ if (!lport->vport)
+ fc_host_max_npiv_vports(lport->host) = USHRT_MAX;
+ sprintf(fc_host_symbolic_name(lport->host), "%s v%s over %s",
+ BNX2FC_NAME, BNX2FC_VERSION,
+ hba->netdev->name);
+
+ return 0;
+}
+
+static int bnx2fc_mfs_update(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+ struct net_device *netdev = hba->netdev;
+ u32 mfs;
+ u32 max_mfs;
+
+ mfs = netdev->mtu - (sizeof(struct fcoe_hdr) +
+ sizeof(struct fcoe_crc_eof));
+ max_mfs = BNX2FC_MAX_PAYLOAD + sizeof(struct fc_frame_header);
+ BNX2FC_HBA_DBG(lport, "mfs = %d, max_mfs = %d\n", mfs, max_mfs);
+ if (mfs > max_mfs)
+ mfs = max_mfs;
+
+ /* Adjust mfs to be a multiple of 256 bytes */
+ mfs = (((mfs - sizeof(struct fc_frame_header)) / BNX2FC_MIN_PAYLOAD) *
+ BNX2FC_MIN_PAYLOAD);
+ mfs = mfs + sizeof(struct fc_frame_header);
+
+ BNX2FC_HBA_DBG(lport, "Set MFS = %d\n", mfs);
+ if (fc_set_mfs(lport, mfs))
+ return -EINVAL;
+ return 0;
+}
+static void bnx2fc_link_speed_update(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+ struct net_device *netdev = hba->netdev;
+ struct ethtool_cmd ecmd = { ETHTOOL_GSET };
+
+ if (!dev_ethtool_get_settings(netdev, &ecmd)) {
+ lport->link_supported_speeds &=
+ ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
+ if (ecmd.supported & (SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full))
+ lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
+ if (ecmd.supported & SUPPORTED_10000baseT_Full)
+ lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
+
+ if (ecmd.speed == SPEED_1000)
+ lport->link_speed = FC_PORTSPEED_1GBIT;
+ if (ecmd.speed == SPEED_10000)
+ lport->link_speed = FC_PORTSPEED_10GBIT;
+ }
+ return;
+}
+static int bnx2fc_link_ok(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+ struct net_device *dev = hba->phys_dev;
+ int rc = 0;
+
+ if ((dev->flags & IFF_UP) && netif_carrier_ok(dev))
+ clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+ else {
+ set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+ rc = -1;
+ }
+ return rc;
+}
+
+/**
+ * bnx2fc_get_link_state - get network link state
+ *
+ * @hba: adapter instance pointer
+ *
+ * updates adapter structure flag based on netdev state
+ */
+void bnx2fc_get_link_state(struct bnx2fc_hba *hba)
+{
+ if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state))
+ set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+ else
+ clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+}
+
+static int bnx2fc_net_config(struct fc_lport *lport)
+{
+ struct bnx2fc_hba *hba;
+ struct fcoe_port *port;
+ u64 wwnn, wwpn;
+
+ port = lport_priv(lport);
+ hba = port->priv;
+
+ /* require support for get_pauseparam ethtool op. */
+ if (!hba->phys_dev->ethtool_ops ||
+ !hba->phys_dev->ethtool_ops->get_pauseparam)
+ return -EOPNOTSUPP;
+
+ if (bnx2fc_mfs_update(lport))
+ return -EINVAL;
+
+ skb_queue_head_init(&port->fcoe_pending_queue);
+ port->fcoe_pending_queue_active = 0;
+ setup_timer(&port->timer, fcoe_queue_timer, (unsigned long) lport);
+
+ bnx2fc_link_speed_update(lport);
+
+ if (!lport->vport) {
+ wwnn = fcoe_wwn_from_mac(hba->ctlr.ctl_src_addr, 1, 0);
+ BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn);
+ fc_set_wwnn(lport, wwnn);
+
+ wwpn = fcoe_wwn_from_mac(hba->ctlr.ctl_src_addr, 2, 0);
+ BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn);
+ fc_set_wwpn(lport, wwpn);
+ }
+
+ return 0;
+}
+
+static void bnx2fc_destroy_timer(unsigned long data)
+{
+ struct bnx2fc_hba *hba = (struct bnx2fc_hba *)data;
+
+ BNX2FC_HBA_DBG(hba->ctlr.lp, "ERROR:bnx2fc_destroy_timer - "
+ "Destroy compl not received!!\n");
+ hba->flags |= BNX2FC_FLAG_DESTROY_CMPL;
+ wake_up_interruptible(&hba->destroy_wait);
+}
+
+/**
+ * bnx2fc_indicate_netevent - Generic netdev event handler
+ *
+ * @context: adapter structure pointer
+ * @event: event type
+ *
+ * Handles NETDEV_UP, NETDEV_DOWN, NETDEV_GOING_DOWN,NETDEV_CHANGE and
+ * NETDEV_CHANGE_MTU events
+ */
+static void bnx2fc_indicate_netevent(void *context, unsigned long event)
+{
+ struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
+ struct fc_lport *lport = hba->ctlr.lp;
+ struct fc_lport *vport;
+ u32 link_possible = 1;
+
+ if (!test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+ BNX2FC_MISC_DBG("driver not ready. event=%s %ld\n",
+ hba->netdev->name, event);
+ return;
+ }
+
+ /*
+ * ASSUMPTION:
+ * indicate_netevent cannot be called from cnic unless bnx2fc
+ * does register_device
+ */
+ BUG_ON(!lport);
+
+ BNX2FC_HBA_DBG(lport, "enter netevent handler - event=%s %ld\n",
+ hba->netdev->name, event);
+
+ switch (event) {
+ case NETDEV_UP:
+ BNX2FC_HBA_DBG(lport, "Port up, adapter_state = %ld\n",
+ hba->adapter_state);
+ if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
+ printk(KERN_ERR "indicate_netevent: "\
+ "adapter is not UP!!\n");
+ /* fall thru to update mfs if MTU has changed */
+ case NETDEV_CHANGEMTU:
+ BNX2FC_HBA_DBG(lport, "NETDEV_CHANGEMTU event\n");
+ bnx2fc_mfs_update(lport);
+ mutex_lock(&lport->lp_mutex);
+ list_for_each_entry(vport, &lport->vports, list)
+ bnx2fc_mfs_update(vport);
+ mutex_unlock(&lport->lp_mutex);
+ break;
+
+ case NETDEV_DOWN:
+ BNX2FC_HBA_DBG(lport, "Port down\n");
+ clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+ clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+ link_possible = 0;
+ break;
+
+ case NETDEV_GOING_DOWN:
+ BNX2FC_HBA_DBG(lport, "Port going down\n");
+ set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+ link_possible = 0;
+ break;
+
+ case NETDEV_CHANGE:
+ BNX2FC_HBA_DBG(lport, "NETDEV_CHANGE\n");
+ break;
+
+ default:
+ printk(KERN_ERR PFX "Unkonwn netevent %ld", event);
+ return;
+ }
+
+ bnx2fc_link_speed_update(lport);
+
+ if (link_possible && !bnx2fc_link_ok(lport)) {
+ printk(KERN_ERR "indicate_netevent: call ctlr_link_up\n");
+ fcoe_ctlr_link_up(&hba->ctlr);
+ } else {
+ printk(KERN_ERR "indicate_netevent: call ctlr_link_down\n");
+ if (fcoe_ctlr_link_down(&hba->ctlr)) {
+ clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
+ mutex_lock(&lport->lp_mutex);
+ list_for_each_entry(vport, &lport->vports, list)
+ fc_host_port_type(vport->host) =
+ FC_PORTTYPE_UNKNOWN;
+ mutex_unlock(&lport->lp_mutex);
+ fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
+ per_cpu_ptr(lport->dev_stats,
+ get_cpu())->LinkFailureCount++;
+ put_cpu();
+ fcoe_clean_pending_queue(lport);
+
+ init_waitqueue_head(&hba->shutdown_wait);
+ BNX2FC_HBA_DBG(lport, "indicate_netevent "
+ "num_ofld_sess = %d\n",
+ hba->num_ofld_sess);
+ hba->wait_for_link_down = 1;
+ BNX2FC_HBA_DBG(lport, "waiting for uploads to "
+ "compl proc = %s\n",
+ current->comm);
+ wait_event_interruptible(hba->shutdown_wait,
+ (hba->num_ofld_sess == 0));
+ BNX2FC_HBA_DBG(lport, "wakeup - num_ofld_sess = %d\n",
+ hba->num_ofld_sess);
+ hba->wait_for_link_down = 0;
+
+ if (signal_pending(current))
+ flush_signals(current);
+ }
+ }
+}
+
+static int bnx2fc_libfc_config(struct fc_lport *lport)
+{
+
+ /* Set the function pointers set by bnx2fc driver */
+ memcpy(&lport->tt, &bnx2fc_libfc_fcn_templ,
+ sizeof(struct libfc_function_template));
+ fc_elsct_init(lport);
+ fc_exch_init(lport);
+ fc_rport_init(lport);
+ fc_disc_init(lport);
+ return 0;
+}
+
+static int bnx2fc_em_config(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+
+ if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_MIN_XID,
+ FCOE_MAX_XID, NULL)) {
+ printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba, BNX2FC_MIN_XID,
+ BNX2FC_MAX_XID);
+
+ if (!hba->cmd_mgr) {
+ printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n");
+ fc_exch_mgr_free(lport);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int bnx2fc_lport_config(struct fc_lport *lport)
+{
+ lport->link_up = 0;
+ lport->qfull = 0;
+ lport->max_retry_count = 3;
+ lport->max_rport_retry_count = 3;
+ lport->e_d_tov = 2 * 1000;
+ lport->r_a_tov = 10 * 1000;
+
+ /* REVISIT: enable when supporting tape devices
+ lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
+ FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
+ */
+ lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS);
+ lport->does_npiv = 1;
+
+ memset(&lport->rnid_gen, 0, sizeof(struct fc_els_rnid_gen));
+ lport->rnid_gen.rnid_atype = BNX2FC_RNID_HBA;
+
+ /* alloc stats structure */
+ if (fc_lport_init_stats(lport))
+ return -ENOMEM;
+
+ /* Finish fc_lport configuration */
+ fc_lport_config(lport);
+
+ return 0;
+}
+
+/**
+ * bnx2fc_fip_recv - handle a received FIP frame.
+ *
+ * @skb: the received skb
+ * @dev: associated &net_device
+ * @ptype: the &packet_type structure which was used to register this handler.
+ * @orig_dev: original receive &net_device, in case @ dev is a bond.
+ *
+ * Returns: 0 for success
+ */
+static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype,
+ struct net_device *orig_dev)
+{
+ struct bnx2fc_hba *hba;
+ hba = container_of(ptype, struct bnx2fc_hba, fip_packet_type);
+ fcoe_ctlr_recv(&hba->ctlr, skb);
+ return 0;
+}
+
+/**
+ * bnx2fc_update_src_mac - Update Ethernet MAC filters.
+ *
+ * @fip: FCoE controller.
+ * @old: Unicast MAC address to delete if the MAC is non-zero.
+ * @new: Unicast MAC address to add.
+ *
+ * Remove any previously-set unicast MAC filter.
+ * Add secondary FCoE MAC address filter for our OUI.
+ */
+static void bnx2fc_update_src_mac(struct fc_lport *lport, u8 *addr)
+{
+ struct fcoe_port *port = lport_priv(lport);
+
+ memcpy(port->data_src_addr, addr, ETH_ALEN);
+}
+
+/**
+ * bnx2fc_get_src_mac - return the ethernet source address for an lport
+ *
+ * @lport: libfc port
+ */
+static u8 *bnx2fc_get_src_mac(struct fc_lport *lport)
+{
+ struct fcoe_port *port;
+
+ port = (struct fcoe_port *)lport_priv(lport);
+ return port->data_src_addr;
+}
+
+/**
+ * bnx2fc_fip_send - send an Ethernet-encapsulated FIP frame.
+ *
+ * @fip: FCoE controller.
+ * @skb: FIP Packet.
+ */
+static void bnx2fc_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
+{
+ skb->dev = bnx2fc_from_ctlr(fip)->netdev;
+ dev_queue_xmit(skb);
+}
+
+static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled)
+{
+ struct Scsi_Host *shost = vport_to_shost(vport);
+ struct fc_lport *n_port = shost_priv(shost);
+ struct fcoe_port *port = lport_priv(n_port);
+ struct bnx2fc_hba *hba = port->priv;
+ struct net_device *netdev = hba->netdev;
+ struct fc_lport *vn_port;
+
+ if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
+ printk(KERN_ERR PFX "vn ports cannot be created on"
+ "this hba\n");
+ return -EIO;
+ }
+ mutex_lock(&bnx2fc_dev_lock);
+ vn_port = bnx2fc_if_create(hba, &vport->dev, 1);
+ mutex_unlock(&bnx2fc_dev_lock);
+
+ if (IS_ERR(vn_port)) {
+ printk(KERN_ERR PFX "bnx2fc_vport_create (%s) failed\n",
+ netdev->name);
+ return -EIO;
+ }
+
+ if (disabled) {
+ fc_vport_set_state(vport, FC_VPORT_DISABLED);
+ } else {
+ vn_port->boot_time = jiffies;
+ fc_lport_init(vn_port);
+ fc_fabric_login(vn_port);
+ fc_vport_setlink(vn_port);
+ }
+ return 0;
+}
+
+static int bnx2fc_vport_destroy(struct fc_vport *vport)
+{
+ struct Scsi_Host *shost = vport_to_shost(vport);
+ struct fc_lport *n_port = shost_priv(shost);
+ struct fc_lport *vn_port = vport->dd_data;
+ struct fcoe_port *port = lport_priv(vn_port);
+
+ mutex_lock(&n_port->lp_mutex);
+ list_del(&vn_port->list);
+ mutex_unlock(&n_port->lp_mutex);
+ queue_work(bnx2fc_wq, &port->destroy_work);
+ return 0;
+}
+
+static int bnx2fc_vport_disable(struct fc_vport *vport, bool disable)
+{
+ struct fc_lport *lport = vport->dd_data;
+
+ if (disable) {
+ fc_vport_set_state(vport, FC_VPORT_DISABLED);
+ fc_fabric_logoff(lport);
+ } else {
+ lport->boot_time = jiffies;
+ fc_fabric_login(lport);
+ fc_vport_setlink(lport);
+ }
+ return 0;
+}
+
+
+static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba)
+{
+ struct net_device *netdev = hba->netdev;
+ struct net_device *physdev = hba->phys_dev;
+ struct netdev_hw_addr *ha;
+ int sel_san_mac = 0;
+
+ /* Do not support for bonding device */
+ if ((netdev->priv_flags & IFF_MASTER_ALB) ||
+ (netdev->priv_flags & IFF_SLAVE_INACTIVE) ||
+ (netdev->priv_flags & IFF_MASTER_8023AD)) {
+ return -EOPNOTSUPP;
+ }
+
+ /* setup Source MAC Address */
+ rcu_read_lock();
+ for_each_dev_addr(physdev, ha) {
+ BNX2FC_MISC_DBG("net_config: ha->type = %d, fip_mac = ",
+ ha->type);
+ printk(KERN_INFO "%2x:%2x:%2x:%2x:%2x:%2x\n", ha->addr[0],
+ ha->addr[1], ha->addr[2], ha->addr[3],
+ ha->addr[4], ha->addr[5]);
+
+ if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
+ (is_valid_ether_addr(ha->addr))) {
+ memcpy(hba->ctlr.ctl_src_addr, ha->addr, ETH_ALEN);
+ sel_san_mac = 1;
+ BNX2FC_MISC_DBG("Found SAN MAC\n");
+ }
+ }
+ rcu_read_unlock();
+
+ if (!sel_san_mac)
+ return -ENODEV;
+
+ hba->fip_packet_type.func = bnx2fc_fip_recv;
+ hba->fip_packet_type.type = htons(ETH_P_FIP);
+ hba->fip_packet_type.dev = netdev;
+ dev_add_pack(&hba->fip_packet_type);
+
+ hba->fcoe_packet_type.func = bnx2fc_rcv;
+ hba->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
+ hba->fcoe_packet_type.dev = netdev;
+ dev_add_pack(&hba->fcoe_packet_type);
+
+ return 0;
+}
+
+static int bnx2fc_attach_transport(void)
+{
+ bnx2fc_transport_template =
+ fc_attach_transport(&bnx2fc_transport_function);
+
+ if (bnx2fc_transport_template == NULL) {
+ printk(KERN_ERR PFX "Failed to attach FC transport\n");
+ return -ENODEV;
+ }
+
+ bnx2fc_vport_xport_template =
+ fc_attach_transport(&bnx2fc_vport_xport_function);
+ if (bnx2fc_vport_xport_template == NULL) {
+ printk(KERN_ERR PFX
+ "Failed to attach FC transport for vport\n");
+ fc_release_transport(bnx2fc_transport_template);
+ bnx2fc_transport_template = NULL;
+ return -ENODEV;
+ }
+ return 0;
+}
+static void bnx2fc_release_transport(void)
+{
+ fc_release_transport(bnx2fc_transport_template);
+ fc_release_transport(bnx2fc_vport_xport_template);
+ bnx2fc_transport_template = NULL;
+ bnx2fc_vport_xport_template = NULL;
+}
+
+static void bnx2fc_interface_release(struct kref *kref)
+{
+ struct bnx2fc_hba *hba;
+ struct net_device *netdev;
+ struct net_device *phys_dev;
+
+ hba = container_of(kref, struct bnx2fc_hba, kref);
+ BNX2FC_HBA_DBG(hba->ctlr.lp, "Interface is being released\n");
+
+ netdev = hba->netdev;
+ phys_dev = hba->phys_dev;
+
+ /* tear-down FIP controller */
+ if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done))
+ fcoe_ctlr_destroy(&hba->ctlr);
+
+ /* Free the command manager */
+ if (hba->cmd_mgr) {
+ bnx2fc_cmd_mgr_free(hba->cmd_mgr);
+ hba->cmd_mgr = NULL;
+ }
+ dev_put(netdev);
+ module_put(THIS_MODULE);
+}
+
+static inline void bnx2fc_interface_get(struct bnx2fc_hba *hba)
+{
+ kref_get(&hba->kref);
+}
+
+static inline void bnx2fc_interface_put(struct bnx2fc_hba *hba)
+{
+ kref_put(&hba->kref, bnx2fc_interface_release);
+}
+static void bnx2fc_interface_destroy(struct bnx2fc_hba *hba)
+{
+ bnx2fc_unbind_pcidev(hba);
+ kfree(hba);
+}
+
+/**
+ * bnx2fc_interface_create - create a new fcoe instance
+ *
+ * @cnic: pointer to cnic device
+ *
+ * Creates a new FCoE instance on the given device which include allocating
+ * hba structure, scsi_host and lport structures.
+ */
+static struct bnx2fc_hba *bnx2fc_interface_create(struct cnic_dev *cnic)
+{
+ struct bnx2fc_hba *hba;
+ int rc;
+
+ hba = kzalloc(sizeof(*hba), GFP_KERNEL);
+ if (!hba) {
+ printk(KERN_ERR PFX "Unable to allocate hba structure\n");
+ return NULL;
+ }
+ spin_lock_init(&hba->hba_lock);
+ mutex_init(&hba->hba_mutex);
+
+ hba->cnic = cnic;
+ rc = bnx2fc_bind_pcidev(hba);
+ if (rc)
+ goto bind_err;
+ hba->phys_dev = cnic->netdev;
+ /* will get overwritten after we do vlan discovery */
+ hba->netdev = hba->phys_dev;
+
+ init_waitqueue_head(&hba->shutdown_wait);
+ init_waitqueue_head(&hba->destroy_wait);
+
+ return hba;
+bind_err:
+ printk(KERN_ERR PFX "create_interface: bind error\n");
+ kfree(hba);
+ return NULL;
+}
+
+static int bnx2fc_interface_setup(struct bnx2fc_hba *hba,
+ enum fip_state fip_mode)
+{
+ int rc = 0;
+ struct net_device *netdev = hba->netdev;
+ struct fcoe_ctlr *fip = &hba->ctlr;
+
+ dev_hold(netdev);
+ kref_init(&hba->kref);
+
+ hba->flags = 0;
+
+ /* Initialize FIP */
+ memset(fip, 0, sizeof(*fip));
+ fcoe_ctlr_init(fip, fip_mode);
+ hba->ctlr.send = bnx2fc_fip_send;
+ hba->ctlr.update_mac = bnx2fc_update_src_mac;
+ hba->ctlr.get_src_addr = bnx2fc_get_src_mac;
+ set_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done);
+
+ rc = bnx2fc_netdev_setup(hba);
+ if (rc)
+ goto setup_err;
+
+ hba->next_conn_id = 0;
+
+ memset(hba->tgt_ofld_list, 0, sizeof(hba->tgt_ofld_list));
+ hba->num_ofld_sess = 0;
+
+ return 0;
+
+setup_err:
+ fcoe_ctlr_destroy(&hba->ctlr);
+ dev_put(netdev);
+ bnx2fc_interface_put(hba);
+ return rc;
+}
+
+/**
+ * bnx2fc_if_create - Create FCoE instance on a given interface
+ *
+ * @hba: FCoE interface to create a local port on
+ * @parent: Device pointer to be the parent in sysfs for the SCSI host
+ * @npiv: Indicates if the port is vport or not
+ *
+ * Creates a fc_lport instance and a Scsi_Host instance and configure them.
+ *
+ * Returns: Allocated fc_lport or an error pointer
+ */
+static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
+ struct device *parent, int npiv)
+{
+ struct fc_lport *lport = NULL;
+ struct fcoe_port *port;
+ struct Scsi_Host *shost;
+ struct fc_vport *vport = dev_to_vport(parent);
+ int rc = 0;
+
+ /* Allocate Scsi_Host structure */
+ if (!npiv) {
+ lport = libfc_host_alloc(&bnx2fc_shost_template,
+ sizeof(struct fcoe_port));
+ } else {
+ lport = libfc_vport_create(vport,
+ sizeof(struct fcoe_port));
+ }
+
+ if (!lport) {
+ printk(KERN_ERR PFX "could not allocate scsi host structure\n");
+ return NULL;
+ }
+ shost = lport->host;
+ port = lport_priv(lport);
+ port->lport = lport;
+ port->priv = hba;
+ INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
+
+ /* Configure fcoe_port */
+ rc = bnx2fc_lport_config(lport);
+ if (rc)
+ goto lp_config_err;
+
+ if (npiv) {
+ vport = dev_to_vport(parent);
+ printk(KERN_ERR PFX "Setting vport names, 0x%llX 0x%llX\n",
+ vport->node_name, vport->port_name);
+ fc_set_wwnn(lport, vport->node_name);
+ fc_set_wwpn(lport, vport->port_name);
+ }
+ /* Configure netdev and networking properties of the lport */
+ rc = bnx2fc_net_config(lport);
+ if (rc) {
+ printk(KERN_ERR PFX "Error on bnx2fc_net_config\n");
+ goto lp_config_err;
+ }
+
+ rc = bnx2fc_shost_config(lport, parent);
+ if (rc) {
+ printk(KERN_ERR PFX "Couldnt configure shost for %s\n",
+ hba->netdev->name);
+ goto lp_config_err;
+ }
+
+ /* Initialize the libfc library */
+ rc = bnx2fc_libfc_config(lport);
+ if (rc) {
+ printk(KERN_ERR PFX "Couldnt configure libfc\n");
+ goto shost_err;
+ }
+ fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
+
+ /* Allocate exchange manager */
+ if (!npiv) {
+ rc = bnx2fc_em_config(lport);
+ if (rc) {
+ printk(KERN_ERR PFX "Error on bnx2fc_em_config\n");
+ goto shost_err;
+ }
+ }
+
+ bnx2fc_interface_get(hba);
+ return lport;
+
+shost_err:
+ scsi_remove_host(shost);
+lp_config_err:
+ scsi_host_put(lport->host);
+ return NULL;
+}
+
+static void bnx2fc_netdev_cleanup(struct bnx2fc_hba *hba)
+{
+ /* Dont listen for Ethernet packets anymore */
+ __dev_remove_pack(&hba->fcoe_packet_type);
+ __dev_remove_pack(&hba->fip_packet_type);
+ synchronize_net();
+}
+
+static void bnx2fc_if_destroy(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+
+ BNX2FC_HBA_DBG(hba->ctlr.lp, "ENTERED bnx2fc_if_destroy\n");
+ /* Stop the transmit retry timer */
+ del_timer_sync(&port->timer);
+
+ /* Free existing transmit skbs */
+ fcoe_clean_pending_queue(lport);
+
+ bnx2fc_interface_put(hba);
+
+ /* Free queued packets for the receive thread */
+ bnx2fc_clean_rx_queue(lport);
+
+ /* Detach from scsi-ml */
+ fc_remove_host(lport->host);
+ scsi_remove_host(lport->host);
+
+ /*
+ * Note that only the physical lport will have the exchange manager.
+ * for vports, this function is NOP
+ */
+ fc_exch_mgr_free(lport);
+
+ /* Free memory used by statistical counters */
+ fc_lport_free_stats(lport);
+
+ /* Release Scsi_Host */
+ scsi_host_put(lport->host);
+}
+
+/**
+ * bnx2fc_destroy - Destroy a bnx2fc FCoE interface
+ *
+ * @buffer: The name of the Ethernet interface to be destroyed
+ * @kp: The associated kernel parameter
+ *
+ * Called from sysfs.
+ *
+ * Returns: 0 for success
+ */
+static int bnx2fc_destroy(struct net_device *netdev)
+{
+ struct bnx2fc_hba *hba = NULL;
+ struct net_device *phys_dev;
+ int rc = 0;
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ mutex_lock(&bnx2fc_dev_lock);
+#ifdef CONFIG_SCSI_BNX2X_FCOE_MODULE
+ if (THIS_MODULE->state != MODULE_STATE_LIVE) {
+ rc = -ENODEV;
+ goto netdev_err;
+ }
+#endif
+ /* obtain physical netdev */
+ if (netdev->priv_flags & IFF_802_1Q_VLAN)
+ phys_dev = vlan_dev_real_dev(netdev);
+ else {
+ printk(KERN_ERR PFX "Not a vlan device\n");
+ rc = -ENODEV;
+ goto netdev_err;
+ }
+
+ hba = bnx2fc_hba_lookup(phys_dev);
+ if (!hba || !hba->ctlr.lp) {
+ rc = -ENODEV;
+ printk(KERN_ERR PFX "bnx2fc_destroy: hba or lport not found\n");
+ goto netdev_err;
+ }
+
+ if (!test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+ printk(KERN_ERR PFX "bnx2fc_destroy: Create not called\n");
+ goto netdev_err;
+ }
+
+ bnx2fc_netdev_cleanup(hba);
+
+ bnx2fc_stop(hba);
+
+ bnx2fc_if_destroy(hba->ctlr.lp);
+
+ destroy_workqueue(hba->timer_work_queue);
+
+ if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done))
+ bnx2fc_fw_destroy(hba);
+
+ clear_bit(BNX2FC_CREATE_DONE, &hba->init_done);
+netdev_err:
+ mutex_unlock(&bnx2fc_dev_lock);
+ rtnl_unlock();
+ return rc;
+}
+
+static void bnx2fc_destroy_work(struct work_struct *work)
+{
+ struct fcoe_port *port;
+ struct fc_lport *lport;
+
+ port = container_of(work, struct fcoe_port, destroy_work);
+ lport = port->lport;
+
+ BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
+
+ bnx2fc_port_shutdown(lport);
+ rtnl_lock();
+ mutex_lock(&bnx2fc_dev_lock);
+ bnx2fc_if_destroy(lport);
+ mutex_unlock(&bnx2fc_dev_lock);
+ rtnl_unlock();
+}
+
+static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba)
+{
+ bnx2fc_free_fw_resc(hba);
+ bnx2fc_free_task_ctx(hba);
+}
+
+/**
+ * bnx2fc_bind_adapter_devices - binds bnx2fc adapter with the associated
+ * pci structure
+ *
+ * @hba: Adapter instance
+ */
+static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba)
+{
+ if (bnx2fc_setup_task_ctx(hba))
+ goto mem_err;
+
+ if (bnx2fc_setup_fw_resc(hba))
+ goto mem_err;
+
+ return 0;
+mem_err:
+ bnx2fc_unbind_adapter_devices(hba);
+ return -ENOMEM;
+}
+
+static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba)
+{
+ struct cnic_dev *cnic;
+
+ if (!hba->cnic) {
+ printk(KERN_ERR PFX "cnic is NULL\n");
+ return -ENODEV;
+ }
+ cnic = hba->cnic;
+ hba->pcidev = cnic->pcidev;
+ if (hba->pcidev)
+ pci_dev_get(hba->pcidev);
+
+ return 0;
+}
+
+static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
+{
+ if (hba->pcidev)
+ pci_dev_put(hba->pcidev);
+ hba->pcidev = NULL;
+}
+
+
+
+/**
+ * bnx2fc_ulp_start - cnic callback to initialize & start adapter instance
+ *
+ * @handle: transport handle pointing to adapter struture
+ *
+ * This function maps adapter structure to pcidev structure and initiates
+ * firmware handshake to enable/initialize on-chip FCoE components.
+ * This bnx2fc - cnic interface api callback is used after following
+ * conditions are met -
+ * a) underlying network interface is up (marked by event NETDEV_UP
+ * from netdev
+ * b) bnx2fc adatper structure is registered.
+ */
+static void bnx2fc_ulp_start(void *handle)
+{
+ struct bnx2fc_hba *hba = handle;
+ struct fc_lport *lport = hba->ctlr.lp;
+
+ BNX2FC_MISC_DBG("Entered %s\n", __func__);
+ mutex_lock(&bnx2fc_dev_lock);
+
+ if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done))
+ goto start_disc;
+
+ if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done))
+ bnx2fc_fw_init(hba);
+
+start_disc:
+ mutex_unlock(&bnx2fc_dev_lock);
+
+ BNX2FC_MISC_DBG("bnx2fc started.\n");
+
+ /* Kick off Fabric discovery*/
+ if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+ printk(KERN_ERR PFX "ulp_init: start discovery\n");
+ lport->tt.frame_send = bnx2fc_xmit;
+ bnx2fc_start_disc(hba);
+ }
+}
+
+static void bnx2fc_port_shutdown(struct fc_lport *lport)
+{
+ BNX2FC_MISC_DBG("Entered %s\n", __func__);
+ fc_fabric_logoff(lport);
+ fc_lport_destroy(lport);
+}
+
+static void bnx2fc_stop(struct bnx2fc_hba *hba)
+{
+ struct fc_lport *lport;
+ struct fc_lport *vport;
+
+ BNX2FC_MISC_DBG("ENTERED %s - init_done = %ld\n", __func__,
+ hba->init_done);
+ if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done) &&
+ test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+ lport = hba->ctlr.lp;
+ bnx2fc_port_shutdown(lport);
+ BNX2FC_HBA_DBG(lport, "bnx2fc_stop: waiting for %d "
+ "offloaded sessions\n",
+ hba->num_ofld_sess);
+ wait_event_interruptible(hba->shutdown_wait,
+ (hba->num_ofld_sess == 0));
+ mutex_lock(&lport->lp_mutex);
+ list_for_each_entry(vport, &lport->vports, list)
+ fc_host_port_type(vport->host) = FC_PORTTYPE_UNKNOWN;
+ mutex_unlock(&lport->lp_mutex);
+ fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
+ fcoe_ctlr_link_down(&hba->ctlr);
+ fcoe_clean_pending_queue(lport);
+
+ mutex_lock(&hba->hba_mutex);
+ clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+ clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+
+ clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
+ mutex_unlock(&hba->hba_mutex);
+ }
+}
+
+static int bnx2fc_fw_init(struct bnx2fc_hba *hba)
+{
+#define BNX2FC_INIT_POLL_TIME (1000 / HZ)
+ int rc = -1;
+ int i = HZ;
+
+ rc = bnx2fc_bind_adapter_devices(hba);
+ if (rc) {
+ printk(KERN_ALERT PFX
+ "bnx2fc_bind_adapter_devices failed - rc = %d\n", rc);
+ goto err_out;
+ }
+
+ rc = bnx2fc_send_fw_fcoe_init_msg(hba);
+ if (rc) {
+ printk(KERN_ALERT PFX
+ "bnx2fc_send_fw_fcoe_init_msg failed - rc = %d\n", rc);
+ goto err_unbind;
+ }
+
+ /*
+ * Wait until the adapter init message is complete, and adapter
+ * state is UP.
+ */
+ while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
+ msleep(BNX2FC_INIT_POLL_TIME);
+
+ if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) {
+ printk(KERN_ERR PFX "bnx2fc_start: %s failed to initialize. "
+ "Ignoring...\n",
+ hba->cnic->netdev->name);
+ rc = -1;
+ goto err_unbind;
+ }
+
+
+ /* Mark HBA to indicate that the FW INIT is done */
+ set_bit(BNX2FC_FW_INIT_DONE, &hba->init_done);
+ return 0;
+
+err_unbind:
+ bnx2fc_unbind_adapter_devices(hba);
+err_out:
+ return rc;
+}
+
+static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba)
+{
+ if (test_and_clear_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
+ if (bnx2fc_send_fw_fcoe_destroy_msg(hba) == 0) {
+ init_timer(&hba->destroy_timer);
+ hba->destroy_timer.expires = BNX2FC_FW_TIMEOUT +
+ jiffies;
+ hba->destroy_timer.function = bnx2fc_destroy_timer;
+ hba->destroy_timer.data = (unsigned long)hba;
+ add_timer(&hba->destroy_timer);
+ wait_event_interruptible(hba->destroy_wait,
+ (hba->flags &
+ BNX2FC_FLAG_DESTROY_CMPL));
+ /* This should never happen */
+ if (signal_pending(current))
+ flush_signals(current);
+
+ del_timer_sync(&hba->destroy_timer);
+ }
+ bnx2fc_unbind_adapter_devices(hba);
+ }
+}
+
+/**
+ * bnx2fc_ulp_stop - cnic callback to shutdown adapter instance
+ *
+ * @handle: transport handle pointing to adapter structure
+ *
+ * Driver checks if adapter is already in shutdown mode, if not start
+ * the shutdown process.
+ */
+static void bnx2fc_ulp_stop(void *handle)
+{
+ struct bnx2fc_hba *hba = (struct bnx2fc_hba *)handle;
+
+ printk(KERN_ERR "ULP_STOP\n");
+
+ mutex_lock(&bnx2fc_dev_lock);
+ bnx2fc_stop(hba);
+ bnx2fc_fw_destroy(hba);
+ mutex_unlock(&bnx2fc_dev_lock);
+}
+
+static void bnx2fc_start_disc(struct bnx2fc_hba *hba)
+{
+ struct fc_lport *lport;
+ int wait_cnt = 0;
+
+ BNX2FC_MISC_DBG("Entered %s\n", __func__);
+ /* Kick off FIP/FLOGI */
+ if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
+ printk(KERN_ERR PFX "Init not done yet\n");
+ return;
+ }
+
+ lport = hba->ctlr.lp;
+ BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n");
+
+ if (!bnx2fc_link_ok(lport)) {
+ BNX2FC_HBA_DBG(lport, "ctlr_link_up\n");
+ fcoe_ctlr_link_up(&hba->ctlr);
+ fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
+ set_bit(ADAPTER_STATE_READY, &hba->adapter_state);
+ }
+
+ /* wait for the FCF to be selected before issuing FLOGI */
+ while (!hba->ctlr.sel_fcf) {
+ msleep(250);
+ /* give up after 3 secs */
+ if (++wait_cnt > 12)
+ break;
+ }
+ fc_lport_init(lport);
+ fc_fabric_login(lport);
+}
+
+
+/**
+ * bnx2fc_ulp_init - Initialize an adapter instance
+ *
+ * @dev : cnic device handle
+ * Called from cnic_register_driver() context to initialize all
+ * enumerated cnic devices. This routine allocates adapter structure
+ * and other device specific resources.
+ */
+static void bnx2fc_ulp_init(struct cnic_dev *dev)
+{
+ struct bnx2fc_hba *hba;
+ int rc = 0;
+
+ BNX2FC_MISC_DBG("Entered %s\n", __func__);
+ /* bnx2fc works only when bnx2x is loaded */
+ if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+ printk(KERN_ERR PFX "bnx2fc FCoE not supported on %s,"
+ " flags: %lx\n",
+ dev->netdev->name, dev->flags);
+ return;
+ }
+
+ /* Configure FCoE interface */
+ hba = bnx2fc_interface_create(dev);
+ if (!hba) {
+ printk(KERN_ERR PFX "hba initialization failed\n");
+ return;
+ }
+
+ /* Add HBA to the adapter list */
+ mutex_lock(&bnx2fc_dev_lock);
+ list_add_tail(&hba->link, &adapter_list);
+ adapter_count++;
+ mutex_unlock(&bnx2fc_dev_lock);
+
+ clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
+ rc = dev->register_device(dev, CNIC_ULP_FCOE,
+ (void *) hba);
+ if (rc)
+ printk(KERN_ALERT PFX "register_device failed, rc = %d\n", rc);
+ else
+ set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
+}
+
+
+static int bnx2fc_disable(struct net_device *netdev)
+{
+ struct bnx2fc_hba *hba;
+ struct net_device *phys_dev;
+ struct ethtool_drvinfo drvinfo;
+ int rc = 0;
+
+ if (!rtnl_trylock()) {
+ printk(KERN_ERR PFX "retrying for rtnl_lock\n");
+ return -EIO;
+ }
+
+ mutex_lock(&bnx2fc_dev_lock);
+
+ if (THIS_MODULE->state != MODULE_STATE_LIVE) {
+ rc = -ENODEV;
+ goto nodev;
+ }
+
+ /* obtain physical netdev */
+ if (netdev->priv_flags & IFF_802_1Q_VLAN)
+ phys_dev = vlan_dev_real_dev(netdev);
+ else {
+ printk(KERN_ERR PFX "Not a vlan device\n");
+ rc = -ENODEV;
+ goto nodev;
+ }
+
+ /* verify if the physical device is a netxtreme2 device */
+ if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
+ memset(&drvinfo, 0, sizeof(drvinfo));
+ phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
+ if (strcmp(drvinfo.driver, "bnx2x")) {
+ printk(KERN_ERR PFX "Not a netxtreme2 device\n");
+ rc = -ENODEV;
+ goto nodev;
+ }
+ } else {
+ printk(KERN_ERR PFX "unable to obtain drv_info\n");
+ rc = -ENODEV;
+ goto nodev;
+ }
+
+ printk(KERN_ERR PFX "phys_dev is netxtreme2 device\n");
+
+ /* obtain hba and initialize rest of the structure */
+ hba = bnx2fc_hba_lookup(phys_dev);
+ if (!hba || !hba->ctlr.lp) {
+ rc = -ENODEV;
+ printk(KERN_ERR PFX "bnx2fc_disable: hba or lport not found\n");
+ } else {
+ fcoe_ctlr_link_down(&hba->ctlr);
+ fcoe_clean_pending_queue(hba->ctlr.lp);
+ }
+
+nodev:
+ mutex_unlock(&bnx2fc_dev_lock);
+ rtnl_unlock();
+ return rc;
+}
+
+
+static int bnx2fc_enable(struct net_device *netdev)
+{
+ struct bnx2fc_hba *hba;
+ struct net_device *phys_dev;
+ struct ethtool_drvinfo drvinfo;
+ int rc = 0;
+
+ if (!rtnl_trylock()) {
+ printk(KERN_ERR PFX "retrying for rtnl_lock\n");
+ return -EIO;
+ }
+
+ BNX2FC_MISC_DBG("Entered %s\n", __func__);
+ mutex_lock(&bnx2fc_dev_lock);
+
+ if (THIS_MODULE->state != MODULE_STATE_LIVE) {
+ rc = -ENODEV;
+ goto nodev;
+ }
+
+ /* obtain physical netdev */
+ if (netdev->priv_flags & IFF_802_1Q_VLAN)
+ phys_dev = vlan_dev_real_dev(netdev);
+ else {
+ printk(KERN_ERR PFX "Not a vlan device\n");
+ rc = -ENODEV;
+ goto nodev;
+ }
+ /* verify if the physical device is a netxtreme2 device */
+ if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
+ memset(&drvinfo, 0, sizeof(drvinfo));
+ phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
+ if (strcmp(drvinfo.driver, "bnx2x")) {
+ printk(KERN_ERR PFX "Not a netxtreme2 device\n");
+ rc = -ENODEV;
+ goto nodev;
+ }
+ } else {
+ printk(KERN_ERR PFX "unable to obtain drv_info\n");
+ rc = -ENODEV;
+ goto nodev;
+ }
+
+ /* obtain hba and initialize rest of the structure */
+ hba = bnx2fc_hba_lookup(phys_dev);
+ if (!hba || !hba->ctlr.lp) {
+ rc = -ENODEV;
+ printk(KERN_ERR PFX "bnx2fc_enable: hba or lport not found\n");
+ } else if (!bnx2fc_link_ok(hba->ctlr.lp))
+ fcoe_ctlr_link_up(&hba->ctlr);
+
+nodev:
+ mutex_unlock(&bnx2fc_dev_lock);
+ rtnl_unlock();
+ return rc;
+}
+
+/**
+ * bnx2fc_create - Create bnx2fc FCoE interface
+ *
+ * @buffer: The name of Ethernet interface to create on
+ * @kp: The associated kernel param
+ *
+ * Called from sysfs.
+ *
+ * Returns: 0 for success
+ */
+static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
+{
+ struct bnx2fc_hba *hba;
+ struct net_device *phys_dev;
+ struct fc_lport *lport;
+ struct ethtool_drvinfo drvinfo;
+ int rc = 0;
+ int vlan_id;
+
+ BNX2FC_MISC_DBG("Entered bnx2fc_create\n");
+ if (fip_mode != FIP_MODE_FABRIC) {
+ printk(KERN_ERR "fip mode not FABRIC\n");
+ return -EIO;
+ }
+
+ if (!rtnl_trylock()) {
+ printk(KERN_ERR "trying for rtnl_lock\n");
+ return -EIO;
+ }
+ mutex_lock(&bnx2fc_dev_lock);
+
+#ifdef CONFIG_SCSI_BNX2X_FCOE_MODULE
+ if (THIS_MODULE->state != MODULE_STATE_LIVE) {
+ rc = -ENODEV;
+ goto mod_err;
+ }
+#endif
+
+ if (!try_module_get(THIS_MODULE)) {
+ rc = -EINVAL;
+ goto mod_err;
+ }
+
+ /* obtain physical netdev */
+ if (netdev->priv_flags & IFF_802_1Q_VLAN) {
+ phys_dev = vlan_dev_real_dev(netdev);
+ vlan_id = vlan_dev_vlan_id(netdev);
+ } else {
+ printk(KERN_ERR PFX "Not a vlan device\n");
+ rc = -EINVAL;
+ goto netdev_err;
+ }
+ /* verify if the physical device is a netxtreme2 device */
+ if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
+ memset(&drvinfo, 0, sizeof(drvinfo));
+ phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
+ if (strcmp(drvinfo.driver, "bnx2x")) {
+ printk(KERN_ERR PFX "Not a netxtreme2 device\n");
+ rc = -EINVAL;
+ goto netdev_err;
+ }
+ } else {
+ printk(KERN_ERR PFX "unable to obtain drv_info\n");
+ rc = -EINVAL;
+ goto netdev_err;
+ }
+
+ /* obtain hba and initialize rest of the structure */
+ hba = bnx2fc_hba_lookup(phys_dev);
+ if (!hba) {
+ rc = -ENODEV;
+ printk(KERN_ERR PFX "bnx2fc_create: hba not found\n");
+ goto netdev_err;
+ }
+
+ if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
+ rc = bnx2fc_fw_init(hba);
+ if (rc)
+ goto netdev_err;
+ }
+
+ if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+ rc = -EEXIST;
+ goto netdev_err;
+ }
+
+ /* update netdev with vlan netdev */
+ hba->netdev = netdev;
+ hba->vlan_id = vlan_id;
+ hba->vlan_enabled = 1;
+
+ rc = bnx2fc_interface_setup(hba, fip_mode);
+ if (rc) {
+ printk(KERN_ERR PFX "bnx2fc_interface_setup failed\n");
+ goto ifput_err;
+ }
+
+ hba->timer_work_queue =
+ create_singlethread_workqueue("bnx2fc_timer_wq");
+ if (!hba->timer_work_queue) {
+ printk(KERN_ERR PFX "ulp_init could not create timer_wq\n");
+ rc = -EINVAL;
+ goto ifput_err;
+ }
+
+ lport = bnx2fc_if_create(hba, &hba->pcidev->dev, 0);
+ if (!lport) {
+ printk(KERN_ERR PFX "Failed to create interface (%s)\n",
+ netdev->name);
+ bnx2fc_netdev_cleanup(hba);
+ rc = -EINVAL;
+ goto if_create_err;
+ }
+
+ lport->boot_time = jiffies;
+
+ /* Make this master N_port */
+ hba->ctlr.lp = lport;
+
+ set_bit(BNX2FC_CREATE_DONE, &hba->init_done);
+ printk(KERN_ERR PFX "create: START DISC\n");
+ bnx2fc_start_disc(hba);
+ /*
+ * Release from kref_init in bnx2fc_interface_setup, on success
+ * lport should be holding a reference taken in bnx2fc_if_create
+ */
+ bnx2fc_interface_put(hba);
+ /* put netdev that was held while calling dev_get_by_name */
+ mutex_unlock(&bnx2fc_dev_lock);
+ rtnl_unlock();
+ return 0;
+
+if_create_err:
+ destroy_workqueue(hba->timer_work_queue);
+ifput_err:
+ bnx2fc_interface_put(hba);
+netdev_err:
+ module_put(THIS_MODULE);
+mod_err:
+ mutex_unlock(&bnx2fc_dev_lock);
+ rtnl_unlock();
+ return rc;
+}
+
+/**
+ * bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc adapter instance
+ *
+ * @cnic: Pointer to cnic device instance
+ *
+ **/
+static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic)
+{
+ struct list_head *list;
+ struct list_head *temp;
+ struct bnx2fc_hba *hba;
+
+ /* Called with bnx2fc_dev_lock held */
+ list_for_each_safe(list, temp, &adapter_list) {
+ hba = (struct bnx2fc_hba *)list;
+ if (hba->cnic == cnic)
+ return hba;
+ }
+ return NULL;
+}
+
+static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev)
+{
+ struct list_head *list;
+ struct list_head *temp;
+ struct bnx2fc_hba *hba;
+
+ /* Called with bnx2fc_dev_lock held */
+ list_for_each_safe(list, temp, &adapter_list) {
+ hba = (struct bnx2fc_hba *)list;
+ if (hba->phys_dev == phys_dev)
+ return hba;
+ }
+ printk(KERN_ERR PFX "hba_lookup: hba NULL\n");
+ return NULL;
+}
+
+/**
+ * bnx2fc_ulp_exit - shuts down adapter instance and frees all resources
+ *
+ * @dev cnic device handle
+ */
+static void bnx2fc_ulp_exit(struct cnic_dev *dev)
+{
+ struct bnx2fc_hba *hba;
+
+ BNX2FC_MISC_DBG("Entered bnx2fc_ulp_exit\n");
+
+ if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+ printk(KERN_ERR PFX "bnx2fc port check: %s, flags: %lx\n",
+ dev->netdev->name, dev->flags);
+ return;
+ }
+
+ mutex_lock(&bnx2fc_dev_lock);
+ hba = bnx2fc_find_hba_for_cnic(dev);
+ if (!hba) {
+ printk(KERN_ERR PFX "bnx2fc_ulp_exit: hba not found, dev 0%p\n",
+ dev);
+ mutex_unlock(&bnx2fc_dev_lock);
+ return;
+ }
+
+ list_del_init(&hba->link);
+ adapter_count--;
+
+ if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+ /* destroy not called yet, move to quiesced list */
+ bnx2fc_netdev_cleanup(hba);
+ bnx2fc_if_destroy(hba->ctlr.lp);
+ }
+ mutex_unlock(&bnx2fc_dev_lock);
+
+ bnx2fc_ulp_stop(hba);
+ /* unregister cnic device */
+ if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))
+ hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE);
+ bnx2fc_interface_destroy(hba);
+}
+
+/**
+ * bnx2fc_fcoe_reset - Resets the fcoe
+ *
+ * @shost: shost the reset is from
+ *
+ * Returns: always 0
+ */
+static int bnx2fc_fcoe_reset(struct Scsi_Host *shost)
+{
+ struct fc_lport *lport = shost_priv(shost);
+ fc_lport_reset(lport);
+ return 0;
+}
+
+
+static bool bnx2fc_match(struct net_device *netdev)
+{
+ mutex_lock(&bnx2fc_dev_lock);
+ if (netdev->priv_flags & IFF_802_1Q_VLAN) {
+ struct net_device *phys_dev = vlan_dev_real_dev(netdev);
+
+ if (bnx2fc_hba_lookup(phys_dev)) {
+ mutex_unlock(&bnx2fc_dev_lock);
+ return true;
+ }
+ }
+ mutex_unlock(&bnx2fc_dev_lock);
+ return false;
+}
+
+
+static struct fcoe_transport bnx2fc_transport = {
+ .name = {"bnx2fc"},
+ .attached = false,
+ .list = LIST_HEAD_INIT(bnx2fc_transport.list),
+ .match = bnx2fc_match,
+ .create = bnx2fc_create,
+ .destroy = bnx2fc_destroy,
+ .enable = bnx2fc_enable,
+ .disable = bnx2fc_disable,
+};
+
+/**
+ * bnx2fc_percpu_thread_create - Create a receive thread for an
+ * online CPU
+ *
+ * @cpu: cpu index for the online cpu
+ */
+static void bnx2fc_percpu_thread_create(unsigned int cpu)
+{
+ struct bnx2fc_percpu_s *p;
+ struct task_struct *thread;
+
+ p = &per_cpu(bnx2fc_percpu, cpu);
+
+ thread = kthread_create(bnx2fc_percpu_io_thread,
+ (void *)p,
+ "bnx2fc_thread/%d", cpu);
+ /* bind thread to the cpu */
+ if (likely(!IS_ERR(p->iothread))) {
+ kthread_bind(thread, cpu);
+ p->iothread = thread;
+ wake_up_process(thread);
+ }
+}
+
+static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
+{
+ struct bnx2fc_percpu_s *p;
+ struct task_struct *thread;
+ struct bnx2fc_work *work, *tmp;
+ LIST_HEAD(work_list);
+
+ BNX2FC_MISC_DBG("destroying io thread for CPU %d\n", cpu);
+
+ /* Prevent any new work from being queued for this CPU */
+ p = &per_cpu(bnx2fc_percpu, cpu);
+ spin_lock_bh(&p->fp_work_lock);
+ thread = p->iothread;
+ p->iothread = NULL;
+
+
+ /* Free all work in the list */
+ list_for_each_entry_safe(work, tmp, &work_list, list) {
+ list_del_init(&work->list);
+ bnx2fc_process_cq_compl(work->tgt, work->wqe);
+ kfree(work);
+ }
+
+ spin_unlock_bh(&p->fp_work_lock);
+
+ if (thread)
+ kthread_stop(thread);
+}
+
+/**
+ * bnx2fc_cpu_callback - Handler for CPU hotplug events
+ *
+ * @nfb: The callback data block
+ * @action: The event triggering the callback
+ * @hcpu: The index of the CPU that the event is for
+ *
+ * This creates or destroys per-CPU data for fcoe
+ *
+ * Returns NOTIFY_OK always.
+ */
+static int bnx2fc_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ printk(PFX "CPU %x online: Create Rx thread\n", cpu);
+ bnx2fc_percpu_thread_create(cpu);
+ break;
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ printk(PFX "CPU %x offline: Remove Rx thread\n", cpu);
+ bnx2fc_percpu_thread_destroy(cpu);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+/**
+ * bnx2fc_mod_init - module init entry point
+ *
+ * Initialize driver wide global data structures, and register
+ * with cnic module
+ **/
+static int __init bnx2fc_mod_init(void)
+{
+ struct fcoe_percpu_s *bg;
+ struct task_struct *l2_thread;
+ int rc = 0;
+ unsigned int cpu = 0;
+ struct bnx2fc_percpu_s *p;
+
+ printk(KERN_INFO PFX "%s", version);
+
+ /* register as a fcoe transport */
+ rc = fcoe_transport_attach(&bnx2fc_transport);
+ if (rc) {
+ printk(KERN_ERR "failed to register an fcoe transport, check "
+ "if libfcoe is loaded\n");
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&adapter_list);
+ mutex_init(&bnx2fc_dev_lock);
+ adapter_count = 0;
+
+ /* Attach FC transport template */
+ rc = bnx2fc_attach_transport();
+ if (rc)
+ goto detach_ft;
+
+ bnx2fc_wq = alloc_workqueue("bnx2fc", 0, 0);
+ if (!bnx2fc_wq) {
+ rc = -ENOMEM;
+ goto release_bt;
+ }
+
+ bg = &bnx2fc_global;
+ skb_queue_head_init(&bg->fcoe_rx_list);
+ l2_thread = kthread_create(bnx2fc_l2_rcv_thread,
+ (void *)bg,
+ "bnx2fc_l2_thread");
+ if (IS_ERR(l2_thread)) {
+ rc = PTR_ERR(l2_thread);
+ goto free_wq;
+ }
+ wake_up_process(l2_thread);
+ spin_lock_bh(&bg->fcoe_rx_list.lock);
+ bg->thread = l2_thread;
+ spin_unlock_bh(&bg->fcoe_rx_list.lock);
+
+ for_each_possible_cpu(cpu) {
+ p = &per_cpu(bnx2fc_percpu, cpu);
+ INIT_LIST_HEAD(&p->work_list);
+ spin_lock_init(&p->fp_work_lock);
+ }
+
+ for_each_online_cpu(cpu) {
+ bnx2fc_percpu_thread_create(cpu);
+ }
+
+ /* Initialize per CPU interrupt thread */
+ register_hotcpu_notifier(&bnx2fc_cpu_notifier);
+
+ cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb);
+
+ return 0;
+
+free_wq:
+ destroy_workqueue(bnx2fc_wq);
+release_bt:
+ bnx2fc_release_transport();
+detach_ft:
+ fcoe_transport_detach(&bnx2fc_transport);
+out:
+ return rc;
+}
+
+static void __exit bnx2fc_mod_exit(void)
+{
+ LIST_HEAD(to_be_deleted);
+ struct bnx2fc_hba *hba, *next;
+ struct fcoe_percpu_s *bg;
+ struct task_struct *l2_thread;
+ struct sk_buff *skb;
+ unsigned int cpu = 0;
+
+ /*
+ * NOTE: Since cnic calls register_driver routine rtnl_lock,
+ * it will have higher precedence than bnx2fc_dev_lock.
+ * unregister_device() cannot be called with bnx2fc_dev_lock
+ * held.
+ */
+ mutex_lock(&bnx2fc_dev_lock);
+ list_splice(&adapter_list, &to_be_deleted);
+ INIT_LIST_HEAD(&adapter_list);
+ adapter_count = 0;
+ mutex_unlock(&bnx2fc_dev_lock);
+
+ /* Unregister with cnic */
+ list_for_each_entry_safe(hba, next, &to_be_deleted, link) {
+ list_del_init(&hba->link);
+ printk(KERN_ERR PFX "MOD_EXIT:destroy hba = 0x%p, kref = %d\n",
+ hba, atomic_read(&hba->kref.refcount));
+ bnx2fc_ulp_stop(hba);
+ /* unregister cnic device */
+ if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED,
+ &hba->reg_with_cnic))
+ hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE);
+ bnx2fc_interface_destroy(hba);
+ }
+ cnic_unregister_driver(CNIC_ULP_FCOE);
+
+ /* Destroy global thread */
+ bg = &bnx2fc_global;
+ spin_lock_bh(&bg->fcoe_rx_list.lock);
+ l2_thread = bg->thread;
+ bg->thread = NULL;
+ while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL)
+ kfree_skb(skb);
+
+ spin_unlock_bh(&bg->fcoe_rx_list.lock);
+
+ if (l2_thread)
+ kthread_stop(l2_thread);
+
+ unregister_hotcpu_notifier(&bnx2fc_cpu_notifier);
+
+ /* Destroy per cpu threads */
+ for_each_online_cpu(cpu) {
+ bnx2fc_percpu_thread_destroy(cpu);
+ }
+
+ destroy_workqueue(bnx2fc_wq);
+ /*
+ * detach from scsi transport
+ * must happen after all destroys are done
+ */
+ bnx2fc_release_transport();
+
+ /* detach from fcoe transport */
+ fcoe_transport_detach(&bnx2fc_transport);
+}
+
+module_init(bnx2fc_mod_init);
+module_exit(bnx2fc_mod_exit);
+
+static struct fc_function_template bnx2fc_transport_function = {
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_active_fc4s = 1,
+ .show_host_maxframe_size = 1,
+
+ .show_host_port_id = 1,
+ .show_host_supported_speeds = 1,
+ .get_host_speed = fc_get_host_speed,
+ .show_host_speed = 1,
+ .show_host_port_type = 1,
+ .get_host_port_state = fc_get_host_port_state,
+ .show_host_port_state = 1,
+ .show_host_symbolic_name = 1,
+
+ .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
+ sizeof(struct bnx2fc_rport)),
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+
+ .show_host_fabric_name = 1,
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+ .show_starget_port_id = 1,
+ .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+ .get_fc_host_stats = bnx2fc_get_host_stats,
+
+ .issue_fc_host_lip = bnx2fc_fcoe_reset,
+
+ .terminate_rport_io = fc_rport_terminate_io,
+
+ .vport_create = bnx2fc_vport_create,
+ .vport_delete = bnx2fc_vport_destroy,
+ .vport_disable = bnx2fc_vport_disable,
+};
+
+static struct fc_function_template bnx2fc_vport_xport_function = {
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_active_fc4s = 1,
+ .show_host_maxframe_size = 1,
+
+ .show_host_port_id = 1,
+ .show_host_supported_speeds = 1,
+ .get_host_speed = fc_get_host_speed,
+ .show_host_speed = 1,
+ .show_host_port_type = 1,
+ .get_host_port_state = fc_get_host_port_state,
+ .show_host_port_state = 1,
+ .show_host_symbolic_name = 1,
+
+ .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
+ sizeof(struct bnx2fc_rport)),
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+
+ .show_host_fabric_name = 1,
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+ .show_starget_port_id = 1,
+ .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+ .get_fc_host_stats = fc_get_host_stats,
+ .issue_fc_host_lip = bnx2fc_fcoe_reset,
+ .terminate_rport_io = fc_rport_terminate_io,
+};
+
+/**
+ * scsi_host_template structure used while registering with SCSI-ml
+ */
+static struct scsi_host_template bnx2fc_shost_template = {
+ .module = THIS_MODULE,
+ .name = "Broadcom Offload FCoE Initiator",
+ .queuecommand = bnx2fc_queuecommand,
+ .eh_abort_handler = bnx2fc_eh_abort, /* abts */
+ .eh_device_reset_handler = bnx2fc_eh_device_reset, /* lun reset */
+ .eh_target_reset_handler = bnx2fc_eh_target_reset, /* tgt reset */
+ .eh_host_reset_handler = fc_eh_host_reset,
+ .slave_alloc = fc_slave_alloc,
+ .change_queue_depth = fc_change_queue_depth,
+ .change_queue_type = fc_change_queue_type,
+ .this_id = -1,
+ .cmd_per_lun = 3,
+ .can_queue = (BNX2FC_MAX_OUTSTANDING_CMNDS/2),
+ .use_clustering = ENABLE_CLUSTERING,
+ .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD,
+ .max_sectors = 512,
+};
+
+static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
+ .frame_send = bnx2fc_xmit,
+ .elsct_send = bnx2fc_elsct_send,
+ .fcp_abort_io = bnx2fc_abort_io,
+ .fcp_cleanup = bnx2fc_cleanup,
+ .rport_event_callback = bnx2fc_rport_event_handler,
+};
+
+/**
+ * bnx2fc_cnic_cb - global template of bnx2fc - cnic driver interface
+ * structure carrying callback function pointers
+ */
+static struct cnic_ulp_ops bnx2fc_cnic_cb = {
+ .owner = THIS_MODULE,
+ .cnic_init = bnx2fc_ulp_init,
+ .cnic_exit = bnx2fc_ulp_exit,
+ .cnic_start = bnx2fc_ulp_start,
+ .cnic_stop = bnx2fc_ulp_stop,
+ .indicate_kcqes = bnx2fc_indicate_kcqe,
+ .indicate_netevent = bnx2fc_indicate_netevent,
+};
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
new file mode 100644
index 000000000000..4f4096836742
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -0,0 +1,1868 @@
+/* bnx2fc_hwi.c: Broadcom NetXtreme II Linux FCoE offload driver.
+ * This file contains the code that low level functions that interact
+ * with 57712 FCoE firmware.
+ *
+ * Copyright (c) 2008 - 2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include "bnx2fc.h"
+
+DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
+
+static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *new_cqe_kcqe);
+static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *ofld_kcqe);
+static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *ofld_kcqe);
+static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code);
+static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *conn_destroy);
+
+int bnx2fc_send_stat_req(struct bnx2fc_hba *hba)
+{
+ struct fcoe_kwqe_stat stat_req;
+ struct kwqe *kwqe_arr[2];
+ int num_kwqes = 1;
+ int rc = 0;
+
+ memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat));
+ stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT;
+ stat_req.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma;
+ stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32);
+
+ kwqe_arr[0] = (struct kwqe *) &stat_req;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+ return rc;
+}
+
+/**
+ * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w
+ *
+ * @hba: adapter structure pointer
+ *
+ * Send down FCoE firmware init KWQEs which initiates the initial handshake
+ * with the f/w.
+ *
+ */
+int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
+{
+ struct fcoe_kwqe_init1 fcoe_init1;
+ struct fcoe_kwqe_init2 fcoe_init2;
+ struct fcoe_kwqe_init3 fcoe_init3;
+ struct kwqe *kwqe_arr[3];
+ int num_kwqes = 3;
+ int rc = 0;
+
+ if (!hba->cnic) {
+ printk(KERN_ALERT PFX "hba->cnic NULL during fcoe fw init\n");
+ return -ENODEV;
+ }
+
+ /* fill init1 KWQE */
+ memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1));
+ fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1;
+ fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
+ FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ fcoe_init1.num_tasks = BNX2FC_MAX_TASKS;
+ fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX;
+ fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX;
+ fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ;
+ fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX;
+ fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
+ fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32);
+ fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma;
+ fcoe_init1.task_list_pbl_addr_hi =
+ (u32) ((u64) hba->task_ctx_bd_dma >> 32);
+ fcoe_init1.mtu = hba->netdev->mtu;
+
+ fcoe_init1.flags = (PAGE_SHIFT <<
+ FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT);
+
+ fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG;
+
+ /* fill init2 KWQE */
+ memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2));
+ fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2;
+ fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
+ FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
+ fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
+ ((u64) hba->hash_tbl_pbl_dma >> 32);
+
+ fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma;
+ fcoe_init2.t2_hash_tbl_addr_hi = (u32)
+ ((u64) hba->t2_hash_tbl_dma >> 32);
+
+ fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma;
+ fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32)
+ ((u64) hba->t2_hash_tbl_ptr_dma >> 32);
+
+ fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS;
+
+ /* fill init3 KWQE */
+ memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3));
+ fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3;
+ fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
+ FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+ fcoe_init3.error_bit_map_lo = 0xffffffff;
+ fcoe_init3.error_bit_map_hi = 0xffffffff;
+
+
+ kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
+ kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
+ kwqe_arr[2] = (struct kwqe *) &fcoe_init3;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+ return rc;
+}
+int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba)
+{
+ struct fcoe_kwqe_destroy fcoe_destroy;
+ struct kwqe *kwqe_arr[2];
+ int num_kwqes = 1;
+ int rc = -1;
+
+ /* fill destroy KWQE */
+ memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy));
+ fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY;
+ fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
+ FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+ kwqe_arr[0] = (struct kwqe *) &fcoe_destroy;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+ return rc;
+}
+
+/**
+ * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process
+ *
+ * @port: port structure pointer
+ * @tgt: bnx2fc_rport structure pointer
+ */
+int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt)
+{
+ struct fc_lport *lport = port->lport;
+ struct bnx2fc_hba *hba = port->priv;
+ struct kwqe *kwqe_arr[4];
+ struct fcoe_kwqe_conn_offload1 ofld_req1;
+ struct fcoe_kwqe_conn_offload2 ofld_req2;
+ struct fcoe_kwqe_conn_offload3 ofld_req3;
+ struct fcoe_kwqe_conn_offload4 ofld_req4;
+ struct fc_rport_priv *rdata = tgt->rdata;
+ struct fc_rport *rport = tgt->rport;
+ int num_kwqes = 4;
+ u32 port_id;
+ int rc = 0;
+ u16 conn_id;
+
+ /* Initialize offload request 1 structure */
+ memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1));
+
+ ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1;
+ ofld_req1.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+
+ conn_id = (u16)tgt->fcoe_conn_id;
+ ofld_req1.fcoe_conn_id = conn_id;
+
+
+ ofld_req1.sq_addr_lo = (u32) tgt->sq_dma;
+ ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32);
+
+ ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma;
+ ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32);
+
+ ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma;
+ ofld_req1.rq_first_pbe_addr_hi =
+ (u32)((u64) tgt->rq_dma >> 32);
+
+ ofld_req1.rq_prod = 0x8000;
+
+ /* Initialize offload request 2 structure */
+ memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2));
+
+ ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2;
+ ofld_req2.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size;
+
+ ofld_req2.cq_addr_lo = (u32) tgt->cq_dma;
+ ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32);
+
+ ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma;
+ ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32);
+
+ ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma;
+ ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32);
+
+ /* Initialize offload request 3 structure */
+ memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3));
+
+ ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3;
+ ofld_req3.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ ofld_req3.vlan_tag = hba->vlan_id <<
+ FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT;
+ ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT;
+
+ port_id = fc_host_port_id(lport->host);
+ if (port_id == 0) {
+ BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Store s_id of the initiator for further reference. This will
+ * be used during disable/destroy during linkdown processing as
+ * when the lport is reset, the port_id also is reset to 0
+ */
+ tgt->sid = port_id;
+ ofld_req3.s_id[0] = (port_id & 0x000000FF);
+ ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8;
+ ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16;
+
+ port_id = rport->port_id;
+ ofld_req3.d_id[0] = (port_id & 0x000000FF);
+ ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8;
+ ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16;
+
+ ofld_req3.tx_total_conc_seqs = rdata->max_seq;
+
+ ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq;
+ ofld_req3.rx_max_fc_pay_len = lport->mfs;
+
+ ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS;
+ ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS;
+ ofld_req3.rx_open_seqs_exch_c3 = 1;
+
+ ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma;
+ ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32);
+
+ /* set mul_n_port_ids supported flag to 0, until it is supported */
+ ofld_req3.flags = 0;
+ /*
+ ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) <<
+ FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT);
+ */
+ /* Info from PLOGI response */
+ ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) <<
+ FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT);
+
+ ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
+ FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT);
+
+ /* vlan flag */
+ ofld_req3.flags |= (hba->vlan_enabled <<
+ FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT);
+
+ /* C2_VALID and ACK flags are not set as they are not suppported */
+
+
+ /* Initialize offload request 4 structure */
+ memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4));
+ ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4;
+ ofld_req4.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20;
+
+
+ ofld_req4.src_mac_addr_lo32[0] = port->data_src_addr[5];
+ /* local mac */
+ ofld_req4.src_mac_addr_lo32[1] = port->data_src_addr[4];
+ ofld_req4.src_mac_addr_lo32[2] = port->data_src_addr[3];
+ ofld_req4.src_mac_addr_lo32[3] = port->data_src_addr[2];
+ ofld_req4.src_mac_addr_hi16[0] = port->data_src_addr[1];
+ ofld_req4.src_mac_addr_hi16[1] = port->data_src_addr[0];
+ ofld_req4.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */
+ ofld_req4.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4];
+ ofld_req4.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3];
+ ofld_req4.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2];
+ ofld_req4.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1];
+ ofld_req4.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0];
+
+ ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
+ ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
+
+ ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma;
+ ofld_req4.confq_pbl_base_addr_hi =
+ (u32)((u64) tgt->confq_pbl_dma >> 32);
+
+ kwqe_arr[0] = (struct kwqe *) &ofld_req1;
+ kwqe_arr[1] = (struct kwqe *) &ofld_req2;
+ kwqe_arr[2] = (struct kwqe *) &ofld_req3;
+ kwqe_arr[3] = (struct kwqe *) &ofld_req4;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+ return rc;
+}
+
+/**
+ * bnx2fc_send_session_enable_req - initiates FCoE Session enablement
+ *
+ * @port: port structure pointer
+ * @tgt: bnx2fc_rport structure pointer
+ */
+static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt)
+{
+ struct kwqe *kwqe_arr[2];
+ struct bnx2fc_hba *hba = port->priv;
+ struct fcoe_kwqe_conn_enable_disable enbl_req;
+ struct fc_lport *lport = port->lport;
+ struct fc_rport *rport = tgt->rport;
+ int num_kwqes = 1;
+ int rc = 0;
+ u32 port_id;
+
+ memset(&enbl_req, 0x00,
+ sizeof(struct fcoe_kwqe_conn_enable_disable));
+ enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN;
+ enbl_req.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ enbl_req.src_mac_addr_lo32[0] = port->data_src_addr[5];
+ /* local mac */
+ enbl_req.src_mac_addr_lo32[1] = port->data_src_addr[4];
+ enbl_req.src_mac_addr_lo32[2] = port->data_src_addr[3];
+ enbl_req.src_mac_addr_lo32[3] = port->data_src_addr[2];
+ enbl_req.src_mac_addr_hi16[0] = port->data_src_addr[1];
+ enbl_req.src_mac_addr_hi16[1] = port->data_src_addr[0];
+
+ enbl_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */
+ enbl_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4];
+ enbl_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3];
+ enbl_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2];
+ enbl_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1];
+ enbl_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0];
+
+ port_id = fc_host_port_id(lport->host);
+ if (port_id != tgt->sid) {
+ printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x,"
+ "sid = 0x%x\n", port_id, tgt->sid);
+ port_id = tgt->sid;
+ }
+ enbl_req.s_id[0] = (port_id & 0x000000FF);
+ enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
+ enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
+
+ port_id = rport->port_id;
+ enbl_req.d_id[0] = (port_id & 0x000000FF);
+ enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
+ enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
+ enbl_req.vlan_tag = hba->vlan_id <<
+ FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
+ enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
+ enbl_req.vlan_flag = hba->vlan_enabled;
+ enbl_req.context_id = tgt->context_id;
+ enbl_req.conn_id = tgt->fcoe_conn_id;
+
+ kwqe_arr[0] = (struct kwqe *) &enbl_req;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+ return rc;
+}
+
+/**
+ * bnx2fc_send_session_disable_req - initiates FCoE Session disable
+ *
+ * @port: port structure pointer
+ * @tgt: bnx2fc_rport structure pointer
+ */
+int bnx2fc_send_session_disable_req(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt)
+{
+ struct bnx2fc_hba *hba = port->priv;
+ struct fcoe_kwqe_conn_enable_disable disable_req;
+ struct kwqe *kwqe_arr[2];
+ struct fc_rport *rport = tgt->rport;
+ int num_kwqes = 1;
+ int rc = 0;
+ u32 port_id;
+
+ memset(&disable_req, 0x00,
+ sizeof(struct fcoe_kwqe_conn_enable_disable));
+ disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN;
+ disable_req.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ disable_req.src_mac_addr_lo32[0] = port->data_src_addr[5];
+ disable_req.src_mac_addr_lo32[2] = port->data_src_addr[3];
+ disable_req.src_mac_addr_lo32[3] = port->data_src_addr[2];
+ disable_req.src_mac_addr_hi16[0] = port->data_src_addr[1];
+ disable_req.src_mac_addr_hi16[1] = port->data_src_addr[0];
+
+ disable_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */
+ disable_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4];
+ disable_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3];
+ disable_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2];
+ disable_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1];
+ disable_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0];
+
+ port_id = tgt->sid;
+ disable_req.s_id[0] = (port_id & 0x000000FF);
+ disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
+ disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
+
+
+ port_id = rport->port_id;
+ disable_req.d_id[0] = (port_id & 0x000000FF);
+ disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
+ disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
+ disable_req.context_id = tgt->context_id;
+ disable_req.conn_id = tgt->fcoe_conn_id;
+ disable_req.vlan_tag = hba->vlan_id <<
+ FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
+ disable_req.vlan_tag |=
+ 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
+ disable_req.vlan_flag = hba->vlan_enabled;
+
+ kwqe_arr[0] = (struct kwqe *) &disable_req;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+ return rc;
+}
+
+/**
+ * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy
+ *
+ * @port: port structure pointer
+ * @tgt: bnx2fc_rport structure pointer
+ */
+int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt)
+{
+ struct fcoe_kwqe_conn_destroy destroy_req;
+ struct kwqe *kwqe_arr[2];
+ int num_kwqes = 1;
+ int rc = 0;
+
+ memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy));
+ destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN;
+ destroy_req.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ destroy_req.context_id = tgt->context_id;
+ destroy_req.conn_id = tgt->fcoe_conn_id;
+
+ kwqe_arr[0] = (struct kwqe *) &destroy_req;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+ return rc;
+}
+
+static void bnx2fc_unsol_els_work(struct work_struct *work)
+{
+ struct bnx2fc_unsol_els *unsol_els;
+ struct fc_lport *lport;
+ struct fc_frame *fp;
+
+ unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work);
+ lport = unsol_els->lport;
+ fp = unsol_els->fp;
+ fc_exch_recv(lport, fp);
+ kfree(unsol_els);
+}
+
+void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
+ unsigned char *buf,
+ u32 frame_len, u16 l2_oxid)
+{
+ struct fcoe_port *port = tgt->port;
+ struct fc_lport *lport = port->lport;
+ struct bnx2fc_unsol_els *unsol_els;
+ struct fc_frame_header *fh;
+ struct fc_frame *fp;
+ struct sk_buff *skb;
+ u32 payload_len;
+ u32 crc;
+ u8 op;
+
+
+ unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC);
+ if (!unsol_els) {
+ BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n");
+ return;
+ }
+
+ BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n",
+ l2_oxid, frame_len);
+
+ payload_len = frame_len - sizeof(struct fc_frame_header);
+
+ fp = fc_frame_alloc(lport, payload_len);
+ if (!fp) {
+ printk(KERN_ERR PFX "fc_frame_alloc failure\n");
+ return;
+ }
+
+ fh = (struct fc_frame_header *) fc_frame_header_get(fp);
+ /* Copy FC Frame header and payload into the frame */
+ memcpy(fh, buf, frame_len);
+
+ if (l2_oxid != FC_XID_UNKNOWN)
+ fh->fh_ox_id = htons(l2_oxid);
+
+ skb = fp_skb(fp);
+
+ if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) ||
+ (fh->fh_r_ctl == FC_RCTL_ELS_REP)) {
+
+ if (fh->fh_type == FC_TYPE_ELS) {
+ op = fc_frame_payload_op(fp);
+ if ((op == ELS_TEST) || (op == ELS_ESTC) ||
+ (op == ELS_FAN) || (op == ELS_CSU)) {
+ /*
+ * No need to reply for these
+ * ELS requests
+ */
+ printk(KERN_ERR PFX "dropping ELS 0x%x\n", op);
+ kfree_skb(skb);
+ return;
+ }
+ }
+ crc = fcoe_fc_crc(fp);
+ fc_frame_init(fp);
+ fr_dev(fp) = lport;
+ fr_sof(fp) = FC_SOF_I3;
+ fr_eof(fp) = FC_EOF_T;
+ fr_crc(fp) = cpu_to_le32(~crc);
+ unsol_els->lport = lport;
+ unsol_els->fp = fp;
+ INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work);
+ queue_work(bnx2fc_wq, &unsol_els->unsol_els_work);
+ } else {
+ BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl);
+ kfree_skb(skb);
+ }
+}
+
+static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
+{
+ u8 num_rq;
+ struct fcoe_err_report_entry *err_entry;
+ unsigned char *rq_data;
+ unsigned char *buf = NULL, *buf1;
+ int i;
+ u16 xid;
+ u32 frame_len, len;
+ struct bnx2fc_cmd *io_req = NULL;
+ struct fcoe_task_ctx_entry *task, *task_page;
+ struct bnx2fc_hba *hba = tgt->port->priv;
+ int task_idx, index;
+ int rc = 0;
+
+
+ BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe);
+ switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) {
+ case FCOE_UNSOLICITED_FRAME_CQE_TYPE:
+ frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >>
+ FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT;
+
+ num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ;
+
+ rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq);
+ if (rq_data) {
+ buf = rq_data;
+ } else {
+ buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ),
+ GFP_ATOMIC);
+
+ if (!buf1) {
+ BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n");
+ break;
+ }
+
+ for (i = 0; i < num_rq; i++) {
+ rq_data = (unsigned char *)
+ bnx2fc_get_next_rqe(tgt, 1);
+ len = BNX2FC_RQ_BUF_SZ;
+ memcpy(buf1, rq_data, len);
+ buf1 += len;
+ }
+ }
+ bnx2fc_process_l2_frame_compl(tgt, buf, frame_len,
+ FC_XID_UNKNOWN);
+
+ if (buf != rq_data)
+ kfree(buf);
+ bnx2fc_return_rqe(tgt, num_rq);
+ break;
+
+ case FCOE_ERROR_DETECTION_CQE_TYPE:
+ /*
+ *In case of error reporting CQE a single RQ entry
+ * is consumes.
+ */
+ spin_lock_bh(&tgt->tgt_lock);
+ num_rq = 1;
+ err_entry = (struct fcoe_err_report_entry *)
+ bnx2fc_get_next_rqe(tgt, 1);
+ xid = err_entry->fc_hdr.ox_id;
+ BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid);
+ BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n",
+ err_entry->err_warn_bitmap_hi,
+ err_entry->err_warn_bitmap_lo);
+ BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
+ err_entry->tx_buf_off, err_entry->rx_buf_off);
+
+ bnx2fc_return_rqe(tgt, 1);
+
+ if (xid > BNX2FC_MAX_XID) {
+ BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
+ xid);
+ spin_unlock_bh(&tgt->tgt_lock);
+ break;
+ }
+
+ task_idx = xid / BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+ task_page = (struct fcoe_task_ctx_entry *)
+ hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+
+ io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
+ if (!io_req) {
+ spin_unlock_bh(&tgt->tgt_lock);
+ break;
+ }
+
+ if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
+ printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
+ spin_unlock_bh(&tgt->tgt_lock);
+ break;
+ }
+
+ if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
+ &io_req->req_flags)) {
+ BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in "
+ "progress.. ignore unsol err\n");
+ spin_unlock_bh(&tgt->tgt_lock);
+ break;
+ }
+
+ /*
+ * If ABTS is already in progress, and FW error is
+ * received after that, do not cancel the timeout_work
+ * and let the error recovery continue by explicitly
+ * logging out the target, when the ABTS eventually
+ * times out.
+ */
+ if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
+ &io_req->req_flags)) {
+ /*
+ * Cancel the timeout_work, as we received IO
+ * completion with FW error.
+ */
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* timer hold */
+
+ rc = bnx2fc_initiate_abts(io_req);
+ if (rc != SUCCESS) {
+ BNX2FC_IO_DBG(io_req, "err_warn: initiate_abts "
+ "failed. issue cleanup\n");
+ rc = bnx2fc_initiate_cleanup(io_req);
+ BUG_ON(rc);
+ }
+ } else
+ printk(KERN_ERR PFX "err_warn: io_req (0x%x) already "
+ "in ABTS processing\n", xid);
+ spin_unlock_bh(&tgt->tgt_lock);
+ break;
+
+ case FCOE_WARNING_DETECTION_CQE_TYPE:
+ /*
+ *In case of warning reporting CQE a single RQ entry
+ * is consumes.
+ */
+ num_rq = 1;
+ err_entry = (struct fcoe_err_report_entry *)
+ bnx2fc_get_next_rqe(tgt, 1);
+ xid = cpu_to_be16(err_entry->fc_hdr.ox_id);
+ BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid);
+ BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x",
+ err_entry->err_warn_bitmap_hi,
+ err_entry->err_warn_bitmap_lo);
+ BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
+ err_entry->tx_buf_off, err_entry->rx_buf_off);
+
+ bnx2fc_return_rqe(tgt, 1);
+ break;
+
+ default:
+ printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n");
+ break;
+ }
+}
+
+void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
+{
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ struct fcoe_port *port = tgt->port;
+ struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_cmd *io_req;
+ int task_idx, index;
+ u16 xid;
+ u8 cmd_type;
+ u8 rx_state = 0;
+ u8 num_rq;
+
+ spin_lock_bh(&tgt->tgt_lock);
+ xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
+ if (xid >= BNX2FC_MAX_TASKS) {
+ printk(KERN_ALERT PFX "ERROR:xid out of range\n");
+ spin_unlock_bh(&tgt->tgt_lock);
+ return;
+ }
+ task_idx = xid / BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+ task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+
+ num_rq = ((task->rx_wr_tx_rd.rx_flags &
+ FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE) >>
+ FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT);
+
+ io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
+
+ if (io_req == NULL) {
+ printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n");
+ spin_unlock_bh(&tgt->tgt_lock);
+ return;
+ }
+
+ /* Timestamp IO completion time */
+ cmd_type = io_req->cmd_type;
+
+ /* optimized completion path */
+ if (cmd_type == BNX2FC_SCSI_CMD) {
+ rx_state = ((task->rx_wr_tx_rd.rx_flags &
+ FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE) >>
+ FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT);
+
+ if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
+ bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return;
+ }
+ }
+
+ /* Process other IO completion types */
+ switch (cmd_type) {
+ case BNX2FC_SCSI_CMD:
+ if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
+ bnx2fc_process_abts_compl(io_req, task, num_rq);
+ else if (rx_state ==
+ FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
+ bnx2fc_process_cleanup_compl(io_req, task, num_rq);
+ else
+ printk(KERN_ERR PFX "Invalid rx state - %d\n",
+ rx_state);
+ break;
+
+ case BNX2FC_TASK_MGMT_CMD:
+ BNX2FC_IO_DBG(io_req, "Processing TM complete\n");
+ bnx2fc_process_tm_compl(io_req, task, num_rq);
+ break;
+
+ case BNX2FC_ABTS:
+ /*
+ * ABTS request received by firmware. ABTS response
+ * will be delivered to the task belonging to the IO
+ * that was aborted
+ */
+ BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n");
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ break;
+
+ case BNX2FC_ELS:
+ BNX2FC_IO_DBG(io_req, "cq_compl - call process_els_compl\n");
+ bnx2fc_process_els_compl(io_req, task, num_rq);
+ break;
+
+ case BNX2FC_CLEANUP:
+ BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n");
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ break;
+
+ default:
+ printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type);
+ break;
+ }
+ spin_unlock_bh(&tgt->tgt_lock);
+}
+
+struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
+{
+ struct bnx2fc_work *work;
+ work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);
+ if (!work)
+ return NULL;
+
+ INIT_LIST_HEAD(&work->list);
+ work->tgt = tgt;
+ work->wqe = wqe;
+ return work;
+}
+
+int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
+{
+ struct fcoe_cqe *cq;
+ u32 cq_cons;
+ struct fcoe_cqe *cqe;
+ u16 wqe;
+ bool more_cqes_found = false;
+
+ /*
+ * cq_lock is a low contention lock used to protect
+ * the CQ data structure from being freed up during
+ * the upload operation
+ */
+ spin_lock_bh(&tgt->cq_lock);
+
+ if (!tgt->cq) {
+ printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n");
+ spin_unlock_bh(&tgt->cq_lock);
+ return 0;
+ }
+ cq = tgt->cq;
+ cq_cons = tgt->cq_cons_idx;
+ cqe = &cq[cq_cons];
+
+ do {
+ more_cqes_found ^= true;
+
+ while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
+ (tgt->cq_curr_toggle_bit <<
+ FCOE_CQE_TOGGLE_BIT_SHIFT)) {
+
+ /* new entry on the cq */
+ if (wqe & FCOE_CQE_CQE_TYPE) {
+ /* Unsolicited event notification */
+ bnx2fc_process_unsol_compl(tgt, wqe);
+ } else {
+ struct bnx2fc_work *work = NULL;
+ struct bnx2fc_percpu_s *fps = NULL;
+ unsigned int cpu = wqe % num_possible_cpus();
+
+ fps = &per_cpu(bnx2fc_percpu, cpu);
+ spin_lock_bh(&fps->fp_work_lock);
+ if (unlikely(!fps->iothread))
+ goto unlock;
+
+ work = bnx2fc_alloc_work(tgt, wqe);
+ if (work)
+ list_add_tail(&work->list,
+ &fps->work_list);
+unlock:
+ spin_unlock_bh(&fps->fp_work_lock);
+
+ /* Pending work request completion */
+ if (fps->iothread && work)
+ wake_up_process(fps->iothread);
+ else
+ bnx2fc_process_cq_compl(tgt, wqe);
+ }
+ cqe++;
+ tgt->cq_cons_idx++;
+
+ if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
+ tgt->cq_cons_idx = 0;
+ cqe = cq;
+ tgt->cq_curr_toggle_bit =
+ 1 - tgt->cq_curr_toggle_bit;
+ }
+ }
+ /* Re-arm CQ */
+ if (more_cqes_found) {
+ tgt->conn_db->cq_arm.lo = -1;
+ wmb();
+ }
+ } while (more_cqes_found);
+
+ /*
+ * Commit tgt->cq_cons_idx change to the memory
+ * spin_lock implies full memory barrier, no need to smp_wmb
+ */
+
+ spin_unlock_bh(&tgt->cq_lock);
+ return 0;
+}
+
+/**
+ * bnx2fc_fastpath_notification - process global event queue (KCQ)
+ *
+ * @hba: adapter structure pointer
+ * @new_cqe_kcqe: pointer to newly DMA'd KCQ entry
+ *
+ * Fast path event notification handler
+ */
+static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *new_cqe_kcqe)
+{
+ u32 conn_id = new_cqe_kcqe->fcoe_conn_id;
+ struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id];
+
+ if (!tgt) {
+ printk(KERN_ALERT PFX "conn_id 0x%x not valid\n", conn_id);
+ return;
+ }
+
+ bnx2fc_process_new_cqes(tgt);
+}
+
+/**
+ * bnx2fc_process_ofld_cmpl - process FCoE session offload completion
+ *
+ * @hba: adapter structure pointer
+ * @ofld_kcqe: connection offload kcqe pointer
+ *
+ * handle session offload completion, enable the session if offload is
+ * successful.
+ */
+static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *ofld_kcqe)
+{
+ struct bnx2fc_rport *tgt;
+ struct fcoe_port *port;
+ u32 conn_id;
+ u32 context_id;
+ int rc;
+
+ conn_id = ofld_kcqe->fcoe_conn_id;
+ context_id = ofld_kcqe->fcoe_conn_context_id;
+ tgt = hba->tgt_ofld_list[conn_id];
+ if (!tgt) {
+ printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n");
+ return;
+ }
+ BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
+ ofld_kcqe->fcoe_conn_context_id);
+ port = tgt->port;
+ if (hba != tgt->port->priv) {
+ printk(KERN_ALERT PFX "ERROR:ofld_cmpl: HBA mis-match\n");
+ goto ofld_cmpl_err;
+ }
+ /*
+ * cnic has allocated a context_id for this session; use this
+ * while enabling the session.
+ */
+ tgt->context_id = context_id;
+ if (ofld_kcqe->completion_status) {
+ if (ofld_kcqe->completion_status ==
+ FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) {
+ printk(KERN_ERR PFX "unable to allocate FCoE context "
+ "resources\n");
+ set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags);
+ }
+ goto ofld_cmpl_err;
+ } else {
+
+ /* now enable the session */
+ rc = bnx2fc_send_session_enable_req(port, tgt);
+ if (rc) {
+ printk(KERN_ALERT PFX "enable session failed\n");
+ goto ofld_cmpl_err;
+ }
+ }
+ return;
+ofld_cmpl_err:
+ set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->ofld_wait);
+}
+
+/**
+ * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion
+ *
+ * @hba: adapter structure pointer
+ * @ofld_kcqe: connection offload kcqe pointer
+ *
+ * handle session enable completion, mark the rport as ready
+ */
+
+static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *ofld_kcqe)
+{
+ struct bnx2fc_rport *tgt;
+ u32 conn_id;
+ u32 context_id;
+
+ context_id = ofld_kcqe->fcoe_conn_context_id;
+ conn_id = ofld_kcqe->fcoe_conn_id;
+ tgt = hba->tgt_ofld_list[conn_id];
+ if (!tgt) {
+ printk(KERN_ALERT PFX "ERROR:enbl_cmpl: No pending ofld req\n");
+ return;
+ }
+
+ BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n",
+ ofld_kcqe->fcoe_conn_context_id);
+
+ /*
+ * context_id should be the same for this target during offload
+ * and enable
+ */
+ if (tgt->context_id != context_id) {
+ printk(KERN_ALERT PFX "context id mis-match\n");
+ return;
+ }
+ if (hba != tgt->port->priv) {
+ printk(KERN_ALERT PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
+ goto enbl_cmpl_err;
+ }
+ if (ofld_kcqe->completion_status) {
+ goto enbl_cmpl_err;
+ } else {
+ /* enable successful - rport ready for issuing IOs */
+ set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->ofld_wait);
+ }
+ return;
+
+enbl_cmpl_err:
+ set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->ofld_wait);
+}
+
+static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *disable_kcqe)
+{
+
+ struct bnx2fc_rport *tgt;
+ u32 conn_id;
+
+ conn_id = disable_kcqe->fcoe_conn_id;
+ tgt = hba->tgt_ofld_list[conn_id];
+ if (!tgt) {
+ printk(KERN_ALERT PFX "ERROR: disable_cmpl: No disable req\n");
+ return;
+ }
+
+ BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id);
+
+ if (disable_kcqe->completion_status) {
+ printk(KERN_ALERT PFX "ERROR: Disable failed with cmpl status %d\n",
+ disable_kcqe->completion_status);
+ return;
+ } else {
+ /* disable successful */
+ BNX2FC_TGT_DBG(tgt, "disable successful\n");
+ clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->upld_wait);
+ }
+}
+
+static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *destroy_kcqe)
+{
+ struct bnx2fc_rport *tgt;
+ u32 conn_id;
+
+ conn_id = destroy_kcqe->fcoe_conn_id;
+ tgt = hba->tgt_ofld_list[conn_id];
+ if (!tgt) {
+ printk(KERN_ALERT PFX "destroy_cmpl: No destroy req\n");
+ return;
+ }
+
+ BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id);
+
+ if (destroy_kcqe->completion_status) {
+ printk(KERN_ALERT PFX "Destroy conn failed, cmpl status %d\n",
+ destroy_kcqe->completion_status);
+ return;
+ } else {
+ /* destroy successful */
+ BNX2FC_TGT_DBG(tgt, "upload successful\n");
+ clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->upld_wait);
+ }
+}
+
+static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
+{
+ switch (err_code) {
+ case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE:
+ printk(KERN_ERR PFX "init_failure due to invalid opcode\n");
+ break;
+
+ case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE:
+ printk(KERN_ERR PFX "init failed due to ctx alloc failure\n");
+ break;
+
+ case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR:
+ printk(KERN_ERR PFX "init_failure due to NIC error\n");
+ break;
+
+ default:
+ printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
+ }
+}
+
+/**
+ * bnx2fc_indicae_kcqe - process KCQE
+ *
+ * @hba: adapter structure pointer
+ * @kcqe: kcqe pointer
+ * @num_cqe: Number of completion queue elements
+ *
+ * Generic KCQ event handler
+ */
+void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
+ u32 num_cqe)
+{
+ struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
+ int i = 0;
+ struct fcoe_kcqe *kcqe = NULL;
+
+ while (i < num_cqe) {
+ kcqe = (struct fcoe_kcqe *) kcq[i++];
+
+ switch (kcqe->op_code) {
+ case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION:
+ bnx2fc_fastpath_notification(hba, kcqe);
+ break;
+
+ case FCOE_KCQE_OPCODE_OFFLOAD_CONN:
+ bnx2fc_process_ofld_cmpl(hba, kcqe);
+ break;
+
+ case FCOE_KCQE_OPCODE_ENABLE_CONN:
+ bnx2fc_process_enable_conn_cmpl(hba, kcqe);
+ break;
+
+ case FCOE_KCQE_OPCODE_INIT_FUNC:
+ if (kcqe->completion_status !=
+ FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
+ bnx2fc_init_failure(hba,
+ kcqe->completion_status);
+ } else {
+ set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+ bnx2fc_get_link_state(hba);
+ printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n",
+ (u8)hba->pcidev->bus->number);
+ }
+ break;
+
+ case FCOE_KCQE_OPCODE_DESTROY_FUNC:
+ if (kcqe->completion_status !=
+ FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
+
+ printk(KERN_ERR PFX "DESTROY failed\n");
+ } else {
+ printk(KERN_ERR PFX "DESTROY success\n");
+ }
+ hba->flags |= BNX2FC_FLAG_DESTROY_CMPL;
+ wake_up_interruptible(&hba->destroy_wait);
+ break;
+
+ case FCOE_KCQE_OPCODE_DISABLE_CONN:
+ bnx2fc_process_conn_disable_cmpl(hba, kcqe);
+ break;
+
+ case FCOE_KCQE_OPCODE_DESTROY_CONN:
+ bnx2fc_process_conn_destroy_cmpl(hba, kcqe);
+ break;
+
+ case FCOE_KCQE_OPCODE_STAT_FUNC:
+ if (kcqe->completion_status !=
+ FCOE_KCQE_COMPLETION_STATUS_SUCCESS)
+ printk(KERN_ERR PFX "STAT failed\n");
+ complete(&hba->stat_req_done);
+ break;
+
+ case FCOE_KCQE_OPCODE_FCOE_ERROR:
+ /* fall thru */
+ default:
+ printk(KERN_ALERT PFX "unknown opcode 0x%x\n",
+ kcqe->op_code);
+ }
+ }
+}
+
+void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid)
+{
+ struct fcoe_sqe *sqe;
+
+ sqe = &tgt->sq[tgt->sq_prod_idx];
+
+ /* Fill SQ WQE */
+ sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT;
+ sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT;
+
+ /* Advance SQ Prod Idx */
+ if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) {
+ tgt->sq_prod_idx = 0;
+ tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit;
+ }
+}
+
+void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
+{
+ struct b577xx_doorbell_set_prod ev_doorbell;
+ u32 msg;
+
+ wmb();
+
+ memset(&ev_doorbell, 0, sizeof(struct b577xx_doorbell_set_prod));
+ ev_doorbell.header.header = B577XX_DOORBELL_HDR_DB_TYPE;
+
+ ev_doorbell.prod = tgt->sq_prod_idx |
+ (tgt->sq_curr_toggle_bit << 15);
+ ev_doorbell.header.header |= B577XX_FCOE_CONNECTION_TYPE <<
+ B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT;
+ msg = *((u32 *)&ev_doorbell);
+ writel(cpu_to_le32(msg), tgt->ctx_base);
+
+ mmiowb();
+
+}
+
+int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
+{
+ u32 context_id = tgt->context_id;
+ struct fcoe_port *port = tgt->port;
+ u32 reg_off;
+ resource_size_t reg_base;
+ struct bnx2fc_hba *hba = port->priv;
+
+ reg_base = pci_resource_start(hba->pcidev,
+ BNX2X_DOORBELL_PCI_BAR);
+ reg_off = BNX2FC_5771X_DB_PAGE_SIZE *
+ (context_id & 0x1FFFF) + DPM_TRIGER_TYPE;
+ tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
+ if (!tgt->ctx_base)
+ return -ENOMEM;
+ return 0;
+}
+
+char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items)
+{
+ char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ);
+
+ if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX)
+ return NULL;
+
+ tgt->rq_cons_idx += num_items;
+
+ if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX)
+ tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX;
+
+ return buf;
+}
+
+void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
+{
+ /* return the rq buffer */
+ u32 next_prod_idx = tgt->rq_prod_idx + num_items;
+ if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) {
+ /* Wrap around RQ */
+ next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX;
+ }
+ tgt->rq_prod_idx = next_prod_idx;
+ tgt->conn_db->rq_prod = tgt->rq_prod_idx;
+}
+
+void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u16 orig_xid)
+{
+ u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ u32 context_id = tgt->context_id;
+
+ memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
+
+ /* Tx Write Rx Read */
+ task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
+ task->tx_wr_rx_rd.init_flags = task_type <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
+ task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
+ /* Common */
+ task->cmn.common_flags = context_id <<
+ FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
+ task->cmn.general.cleanup_info.task_id = orig_xid;
+
+
+}
+
+void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task)
+{
+ struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct fc_frame_header *fc_hdr;
+ u8 task_type = 0;
+ u64 *hdr;
+ u64 temp_hdr[3];
+ u32 context_id;
+
+
+ /* Obtain task_type */
+ if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) ||
+ (io_req->cmd_type == BNX2FC_ELS)) {
+ task_type = FCOE_TASK_TYPE_MIDPATH;
+ } else if (io_req->cmd_type == BNX2FC_ABTS) {
+ task_type = FCOE_TASK_TYPE_ABTS;
+ }
+
+ memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
+
+ /* Setup the task from io_req for easy reference */
+ io_req->task = task;
+
+ BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n",
+ io_req->cmd_type, task_type);
+
+ /* Tx only */
+ if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
+ (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
+ task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
+ (u32)mp_req->mp_req_bd_dma;
+ task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
+ (u32)((u64)mp_req->mp_req_bd_dma >> 32);
+ task->tx_wr_only.sgl_ctx.mul_sges.sgl_size = 1;
+ BNX2FC_IO_DBG(io_req, "init_mp_task - bd_dma = 0x%llx\n",
+ (unsigned long long)mp_req->mp_req_bd_dma);
+ }
+
+ /* Tx Write Rx Read */
+ task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_INIT <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
+ task->tx_wr_rx_rd.init_flags = task_type <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
+ task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT;
+ task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
+
+ /* Common */
+ task->cmn.data_2_trns = io_req->data_xfer_len;
+ context_id = tgt->context_id;
+ task->cmn.common_flags = context_id <<
+ FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
+ task->cmn.common_flags |= 1 <<
+ FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT;
+ task->cmn.common_flags |= 1 <<
+ FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT;
+
+ /* Rx Write Tx Read */
+ fc_hdr = &(mp_req->req_fc_hdr);
+ if (task_type == FCOE_TASK_TYPE_MIDPATH) {
+ fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid);
+ fc_hdr->fh_rx_id = htons(0xffff);
+ task->rx_wr_tx_rd.rx_id = 0xffff;
+ } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
+ fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid);
+ }
+
+ /* Fill FC Header into middle path buffer */
+ hdr = (u64 *) &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
+ memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr));
+ hdr[0] = cpu_to_be64(temp_hdr[0]);
+ hdr[1] = cpu_to_be64(temp_hdr[1]);
+ hdr[2] = cpu_to_be64(temp_hdr[2]);
+
+ /* Rx Only */
+ if (task_type == FCOE_TASK_TYPE_MIDPATH) {
+
+ task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
+ (u32)mp_req->mp_resp_bd_dma;
+ task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
+ (u32)((u64)mp_req->mp_resp_bd_dma >> 32);
+ task->rx_wr_only.sgl_ctx.mul_sges.sgl_size = 1;
+ }
+}
+
+void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task)
+{
+ u8 task_type;
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+ struct io_bdt *bd_tbl = io_req->bd_tbl;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ u64 *fcp_cmnd;
+ u64 tmp_fcp_cmnd[4];
+ u32 context_id;
+ int cnt, i;
+ int bd_count;
+
+ memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
+
+ /* Setup the task from io_req for easy reference */
+ io_req->task = task;
+
+ if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
+ task_type = FCOE_TASK_TYPE_WRITE;
+ else
+ task_type = FCOE_TASK_TYPE_READ;
+
+ /* Tx only */
+ if (task_type == FCOE_TASK_TYPE_WRITE) {
+ task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
+ (u32)bd_tbl->bd_tbl_dma;
+ task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
+ (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
+ task->tx_wr_only.sgl_ctx.mul_sges.sgl_size =
+ bd_tbl->bd_valid;
+ }
+
+ /*Tx Write Rx Read */
+ /* Init state to NORMAL */
+ task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
+ task->tx_wr_rx_rd.init_flags = task_type <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
+ task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT;
+ task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
+
+ /* Common */
+ task->cmn.data_2_trns = io_req->data_xfer_len;
+ context_id = tgt->context_id;
+ task->cmn.common_flags = context_id <<
+ FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
+ task->cmn.common_flags |= 1 <<
+ FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT;
+ task->cmn.common_flags |= 1 <<
+ FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT;
+
+ /* Set initiative ownership */
+ task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT;
+
+ /* Set initial seq counter */
+ task->cmn.tx_low_seq_cnt = 1;
+
+ /* Set state to "waiting for the first packet" */
+ task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME;
+
+ /* Fill FCP_CMND IU */
+ fcp_cmnd = (u64 *)
+ task->cmn.general.cmd_info.fcp_cmd_payload.opaque;
+ bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
+
+ /* swap fcp_cmnd */
+ cnt = sizeof(struct fcp_cmnd) / sizeof(u64);
+
+ for (i = 0; i < cnt; i++) {
+ *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]);
+ fcp_cmnd++;
+ }
+
+ /* Rx Write Tx Read */
+ task->rx_wr_tx_rd.rx_id = 0xffff;
+
+ /* Rx Only */
+ if (task_type == FCOE_TASK_TYPE_READ) {
+
+ bd_count = bd_tbl->bd_valid;
+ if (bd_count == 1) {
+
+ struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
+
+ task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.lo =
+ fcoe_bd_tbl->buf_addr_lo;
+ task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.hi =
+ fcoe_bd_tbl->buf_addr_hi;
+ task->rx_wr_only.sgl_ctx.single_sge.cur_buf_rem =
+ fcoe_bd_tbl->buf_len;
+ task->tx_wr_rx_rd.init_flags |= 1 <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT;
+ } else {
+
+ task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
+ (u32)bd_tbl->bd_tbl_dma;
+ task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
+ (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
+ task->rx_wr_only.sgl_ctx.mul_sges.sgl_size =
+ bd_tbl->bd_valid;
+ }
+ }
+}
+
+/**
+ * bnx2fc_setup_task_ctx - allocate and map task context
+ *
+ * @hba: pointer to adapter structure
+ *
+ * allocate memory for task context, and associated BD table to be used
+ * by firmware
+ *
+ */
+int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
+{
+ int rc = 0;
+ struct regpair *task_ctx_bdt;
+ dma_addr_t addr;
+ int i;
+
+ /*
+ * Allocate task context bd table. A page size of bd table
+ * can map 256 buffers. Each buffer contains 32 task context
+ * entries. Hence the limit with one page is 8192 task context
+ * entries.
+ */
+ hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->task_ctx_bd_dma,
+ GFP_KERNEL);
+ if (!hba->task_ctx_bd_tbl) {
+ printk(KERN_ERR PFX "unable to allocate task context BDT\n");
+ rc = -1;
+ goto out;
+ }
+ memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE);
+
+ /*
+ * Allocate task_ctx which is an array of pointers pointing to
+ * a page containing 32 task contexts
+ */
+ hba->task_ctx = kzalloc((BNX2FC_TASK_CTX_ARR_SZ * sizeof(void *)),
+ GFP_KERNEL);
+ if (!hba->task_ctx) {
+ printk(KERN_ERR PFX "unable to allocate task context array\n");
+ rc = -1;
+ goto out1;
+ }
+
+ /*
+ * Allocate task_ctx_dma which is an array of dma addresses
+ */
+ hba->task_ctx_dma = kmalloc((BNX2FC_TASK_CTX_ARR_SZ *
+ sizeof(dma_addr_t)), GFP_KERNEL);
+ if (!hba->task_ctx_dma) {
+ printk(KERN_ERR PFX "unable to alloc context mapping array\n");
+ rc = -1;
+ goto out2;
+ }
+
+ task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
+ for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
+
+ hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->task_ctx_dma[i],
+ GFP_KERNEL);
+ if (!hba->task_ctx[i]) {
+ printk(KERN_ERR PFX "unable to alloc task context\n");
+ rc = -1;
+ goto out3;
+ }
+ memset(hba->task_ctx[i], 0, PAGE_SIZE);
+ addr = (u64)hba->task_ctx_dma[i];
+ task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32);
+ task_ctx_bdt->lo = cpu_to_le32((u32)addr);
+ task_ctx_bdt++;
+ }
+ return 0;
+
+out3:
+ for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
+ if (hba->task_ctx[i]) {
+
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->task_ctx[i], hba->task_ctx_dma[i]);
+ hba->task_ctx[i] = NULL;
+ }
+ }
+
+ kfree(hba->task_ctx_dma);
+ hba->task_ctx_dma = NULL;
+out2:
+ kfree(hba->task_ctx);
+ hba->task_ctx = NULL;
+out1:
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma);
+ hba->task_ctx_bd_tbl = NULL;
+out:
+ return rc;
+}
+
+void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba)
+{
+ int i;
+
+ if (hba->task_ctx_bd_tbl) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->task_ctx_bd_tbl,
+ hba->task_ctx_bd_dma);
+ hba->task_ctx_bd_tbl = NULL;
+ }
+
+ if (hba->task_ctx) {
+ for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
+ if (hba->task_ctx[i]) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->task_ctx[i],
+ hba->task_ctx_dma[i]);
+ hba->task_ctx[i] = NULL;
+ }
+ }
+ kfree(hba->task_ctx);
+ hba->task_ctx = NULL;
+ }
+
+ kfree(hba->task_ctx_dma);
+ hba->task_ctx_dma = NULL;
+}
+
+static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba)
+{
+ int i;
+ int segment_count;
+ int hash_table_size;
+ u32 *pbl;
+
+ segment_count = hba->hash_tbl_segment_count;
+ hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
+ sizeof(struct fcoe_hash_table_entry);
+
+ pbl = hba->hash_tbl_pbl;
+ for (i = 0; i < segment_count; ++i) {
+ dma_addr_t dma_address;
+
+ dma_address = le32_to_cpu(*pbl);
+ ++pbl;
+ dma_address += ((u64)le32_to_cpu(*pbl)) << 32;
+ ++pbl;
+ dma_free_coherent(&hba->pcidev->dev,
+ BNX2FC_HASH_TBL_CHUNK_SIZE,
+ hba->hash_tbl_segments[i],
+ dma_address);
+
+ }
+
+ if (hba->hash_tbl_pbl) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->hash_tbl_pbl,
+ hba->hash_tbl_pbl_dma);
+ hba->hash_tbl_pbl = NULL;
+ }
+}
+
+static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
+{
+ int i;
+ int hash_table_size;
+ int segment_count;
+ int segment_array_size;
+ int dma_segment_array_size;
+ dma_addr_t *dma_segment_array;
+ u32 *pbl;
+
+ hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
+ sizeof(struct fcoe_hash_table_entry);
+
+ segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1;
+ segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE;
+ hba->hash_tbl_segment_count = segment_count;
+
+ segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments);
+ hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL);
+ if (!hba->hash_tbl_segments) {
+ printk(KERN_ERR PFX "hash table pointers alloc failed\n");
+ return -ENOMEM;
+ }
+ dma_segment_array_size = segment_count * sizeof(*dma_segment_array);
+ dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL);
+ if (!dma_segment_array) {
+ printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < segment_count; ++i) {
+ hba->hash_tbl_segments[i] =
+ dma_alloc_coherent(&hba->pcidev->dev,
+ BNX2FC_HASH_TBL_CHUNK_SIZE,
+ &dma_segment_array[i],
+ GFP_KERNEL);
+ if (!hba->hash_tbl_segments[i]) {
+ printk(KERN_ERR PFX "hash segment alloc failed\n");
+ while (--i >= 0) {
+ dma_free_coherent(&hba->pcidev->dev,
+ BNX2FC_HASH_TBL_CHUNK_SIZE,
+ hba->hash_tbl_segments[i],
+ dma_segment_array[i]);
+ hba->hash_tbl_segments[i] = NULL;
+ }
+ kfree(dma_segment_array);
+ return -ENOMEM;
+ }
+ memset(hba->hash_tbl_segments[i], 0,
+ BNX2FC_HASH_TBL_CHUNK_SIZE);
+ }
+
+ hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->hash_tbl_pbl_dma,
+ GFP_KERNEL);
+ if (!hba->hash_tbl_pbl) {
+ printk(KERN_ERR PFX "hash table pbl alloc failed\n");
+ kfree(dma_segment_array);
+ return -ENOMEM;
+ }
+ memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
+
+ pbl = hba->hash_tbl_pbl;
+ for (i = 0; i < segment_count; ++i) {
+ u64 paddr = dma_segment_array[i];
+ *pbl = cpu_to_le32((u32) paddr);
+ ++pbl;
+ *pbl = cpu_to_le32((u32) (paddr >> 32));
+ ++pbl;
+ }
+ pbl = hba->hash_tbl_pbl;
+ i = 0;
+ while (*pbl && *(pbl + 1)) {
+ u32 lo;
+ u32 hi;
+ lo = *pbl;
+ ++pbl;
+ hi = *pbl;
+ ++pbl;
+ ++i;
+ }
+ kfree(dma_segment_array);
+ return 0;
+}
+
+/**
+ * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer
+ *
+ * @hba: Pointer to adapter structure
+ *
+ */
+int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
+{
+ u64 addr;
+ u32 mem_size;
+ int i;
+
+ if (bnx2fc_allocate_hash_table(hba))
+ return -ENOMEM;
+
+ mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
+ hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
+ &hba->t2_hash_tbl_ptr_dma,
+ GFP_KERNEL);
+ if (!hba->t2_hash_tbl_ptr) {
+ printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
+ bnx2fc_free_fw_resc(hba);
+ return -ENOMEM;
+ }
+ memset(hba->t2_hash_tbl_ptr, 0x00, mem_size);
+
+ mem_size = BNX2FC_NUM_MAX_SESS *
+ sizeof(struct fcoe_t2_hash_table_entry);
+ hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
+ &hba->t2_hash_tbl_dma,
+ GFP_KERNEL);
+ if (!hba->t2_hash_tbl) {
+ printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
+ bnx2fc_free_fw_resc(hba);
+ return -ENOMEM;
+ }
+ memset(hba->t2_hash_tbl, 0x00, mem_size);
+ for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
+ addr = (unsigned long) hba->t2_hash_tbl_dma +
+ ((i+1) * sizeof(struct fcoe_t2_hash_table_entry));
+ hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff;
+ hba->t2_hash_tbl[i].next.hi = addr >> 32;
+ }
+
+ hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE, &hba->dummy_buf_dma,
+ GFP_KERNEL);
+ if (!hba->dummy_buffer) {
+ printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n");
+ bnx2fc_free_fw_resc(hba);
+ return -ENOMEM;
+ }
+
+ hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->stats_buf_dma,
+ GFP_KERNEL);
+ if (!hba->stats_buffer) {
+ printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
+ bnx2fc_free_fw_resc(hba);
+ return -ENOMEM;
+ }
+ memset(hba->stats_buffer, 0x00, PAGE_SIZE);
+
+ return 0;
+}
+
+void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba)
+{
+ u32 mem_size;
+
+ if (hba->stats_buffer) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->stats_buffer, hba->stats_buf_dma);
+ hba->stats_buffer = NULL;
+ }
+
+ if (hba->dummy_buffer) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->dummy_buffer, hba->dummy_buf_dma);
+ hba->dummy_buffer = NULL;
+ }
+
+ if (hba->t2_hash_tbl_ptr) {
+ mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
+ dma_free_coherent(&hba->pcidev->dev, mem_size,
+ hba->t2_hash_tbl_ptr,
+ hba->t2_hash_tbl_ptr_dma);
+ hba->t2_hash_tbl_ptr = NULL;
+ }
+
+ if (hba->t2_hash_tbl) {
+ mem_size = BNX2FC_NUM_MAX_SESS *
+ sizeof(struct fcoe_t2_hash_table_entry);
+ dma_free_coherent(&hba->pcidev->dev, mem_size,
+ hba->t2_hash_tbl, hba->t2_hash_tbl_dma);
+ hba->t2_hash_tbl = NULL;
+ }
+ bnx2fc_free_hash_table(hba);
+}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
new file mode 100644
index 000000000000..0f1dd23730db
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -0,0 +1,1833 @@
+/* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver.
+ * IO manager and SCSI IO processing.
+ *
+ * Copyright (c) 2008 - 2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include "bnx2fc.h"
+static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
+ int bd_index);
+static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req);
+static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req);
+static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
+ struct bnx2fc_cmd *io_req);
+static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req);
+static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req);
+static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
+ struct fcoe_fcp_rsp_payload *fcp_rsp,
+ u8 num_rq);
+
+void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
+ unsigned int timer_msec)
+{
+ struct bnx2fc_hba *hba = io_req->port->priv;
+
+ if (queue_delayed_work(hba->timer_work_queue, &io_req->timeout_work,
+ msecs_to_jiffies(timer_msec)))
+ kref_get(&io_req->refcount);
+}
+
+static void bnx2fc_cmd_timeout(struct work_struct *work)
+{
+ struct bnx2fc_cmd *io_req = container_of(work, struct bnx2fc_cmd,
+ timeout_work.work);
+ struct fc_lport *lport;
+ struct fc_rport_priv *rdata;
+ u8 cmd_type = io_req->cmd_type;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ int logo_issued;
+ int rc;
+
+ BNX2FC_IO_DBG(io_req, "cmd_timeout, cmd_type = %d,"
+ "req_flags = %lx\n", cmd_type, io_req->req_flags);
+
+ spin_lock_bh(&tgt->tgt_lock);
+ if (test_and_clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags)) {
+ clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags);
+ /*
+ * ideally we should hold the io_req until RRQ complets,
+ * and release io_req from timeout hold.
+ */
+ spin_unlock_bh(&tgt->tgt_lock);
+ bnx2fc_send_rrq(io_req);
+ return;
+ }
+ if (test_and_clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags)) {
+ BNX2FC_IO_DBG(io_req, "IO ready for reuse now\n");
+ goto done;
+ }
+
+ switch (cmd_type) {
+ case BNX2FC_SCSI_CMD:
+ if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
+ &io_req->req_flags)) {
+ /* Handle eh_abort timeout */
+ BNX2FC_IO_DBG(io_req, "eh_abort timed out\n");
+ complete(&io_req->tm_done);
+ } else if (test_bit(BNX2FC_FLAG_ISSUE_ABTS,
+ &io_req->req_flags)) {
+ /* Handle internally generated ABTS timeout */
+ BNX2FC_IO_DBG(io_req, "ABTS timed out refcnt = %d\n",
+ io_req->refcount.refcount.counter);
+ if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
+ &io_req->req_flags))) {
+
+ lport = io_req->port->lport;
+ rdata = io_req->tgt->rdata;
+ logo_issued = test_and_set_bit(
+ BNX2FC_FLAG_EXPL_LOGO,
+ &tgt->flags);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ /* Explicitly logo the target */
+ if (!logo_issued) {
+ BNX2FC_IO_DBG(io_req, "Explicit "
+ "logo - tgt flags = 0x%lx\n",
+ tgt->flags);
+
+ mutex_lock(&lport->disc.disc_mutex);
+ lport->tt.rport_logoff(rdata);
+ mutex_unlock(&lport->disc.disc_mutex);
+ }
+ return;
+ }
+ } else {
+ /* Hanlde IO timeout */
+ BNX2FC_IO_DBG(io_req, "IO timed out. issue ABTS\n");
+ if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL,
+ &io_req->req_flags)) {
+ BNX2FC_IO_DBG(io_req, "IO completed before "
+ " timer expiry\n");
+ goto done;
+ }
+
+ if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
+ &io_req->req_flags)) {
+ rc = bnx2fc_initiate_abts(io_req);
+ if (rc == SUCCESS)
+ goto done;
+ /*
+ * Explicitly logo the target if
+ * abts initiation fails
+ */
+ lport = io_req->port->lport;
+ rdata = io_req->tgt->rdata;
+ logo_issued = test_and_set_bit(
+ BNX2FC_FLAG_EXPL_LOGO,
+ &tgt->flags);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ if (!logo_issued) {
+ BNX2FC_IO_DBG(io_req, "Explicit "
+ "logo - tgt flags = 0x%lx\n",
+ tgt->flags);
+
+
+ mutex_lock(&lport->disc.disc_mutex);
+ lport->tt.rport_logoff(rdata);
+ mutex_unlock(&lport->disc.disc_mutex);
+ }
+ return;
+ } else {
+ BNX2FC_IO_DBG(io_req, "IO already in "
+ "ABTS processing\n");
+ }
+ }
+ break;
+ case BNX2FC_ELS:
+
+ if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
+ BNX2FC_IO_DBG(io_req, "ABTS for ELS timed out\n");
+
+ if (!test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
+ &io_req->req_flags)) {
+ lport = io_req->port->lport;
+ rdata = io_req->tgt->rdata;
+ logo_issued = test_and_set_bit(
+ BNX2FC_FLAG_EXPL_LOGO,
+ &tgt->flags);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ /* Explicitly logo the target */
+ if (!logo_issued) {
+ BNX2FC_IO_DBG(io_req, "Explicitly logo"
+ "(els)\n");
+ mutex_lock(&lport->disc.disc_mutex);
+ lport->tt.rport_logoff(rdata);
+ mutex_unlock(&lport->disc.disc_mutex);
+ }
+ return;
+ }
+ } else {
+ /*
+ * Handle ELS timeout.
+ * tgt_lock is used to sync compl path and timeout
+ * path. If els compl path is processing this IO, we
+ * have nothing to do here, just release the timer hold
+ */
+ BNX2FC_IO_DBG(io_req, "ELS timed out\n");
+ if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
+ &io_req->req_flags))
+ goto done;
+
+ /* Indicate the cb_func that this ELS is timed out */
+ set_bit(BNX2FC_FLAG_ELS_TIMEOUT, &io_req->req_flags);
+
+ if ((io_req->cb_func) && (io_req->cb_arg)) {
+ io_req->cb_func(io_req->cb_arg);
+ io_req->cb_arg = NULL;
+ }
+ }
+ break;
+ default:
+ printk(KERN_ERR PFX "cmd_timeout: invalid cmd_type %d\n",
+ cmd_type);
+ break;
+ }
+
+done:
+ /* release the cmd that was held when timer was set */
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+}
+
+static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
+{
+ /* Called with host lock held */
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+
+ /*
+ * active_cmd_queue may have other command types as well,
+ * and during flush operation, we want to error back only
+ * scsi commands.
+ */
+ if (io_req->cmd_type != BNX2FC_SCSI_CMD)
+ return;
+
+ BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code);
+ bnx2fc_unmap_sg_list(io_req);
+ io_req->sc_cmd = NULL;
+ if (!sc_cmd) {
+ printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. "
+ "IO(0x%x) already cleaned up\n",
+ io_req->xid);
+ return;
+ }
+ sc_cmd->result = err_code << 16;
+
+ BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n",
+ sc_cmd, host_byte(sc_cmd->result), sc_cmd->retries,
+ sc_cmd->allowed);
+ scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
+ sc_cmd->SCp.ptr = NULL;
+ sc_cmd->scsi_done(sc_cmd);
+}
+
+struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
+ u16 min_xid, u16 max_xid)
+{
+ struct bnx2fc_cmd_mgr *cmgr;
+ struct io_bdt *bdt_info;
+ struct bnx2fc_cmd *io_req;
+ size_t len;
+ u32 mem_size;
+ u16 xid;
+ int i;
+ int num_ios;
+ size_t bd_tbl_sz;
+
+ if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
+ printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \
+ and max_xid 0x%x\n", min_xid, max_xid);
+ return NULL;
+ }
+ BNX2FC_MISC_DBG("min xid 0x%x, max xid 0x%x\n", min_xid, max_xid);
+
+ num_ios = max_xid - min_xid + 1;
+ len = (num_ios * (sizeof(struct bnx2fc_cmd *)));
+ len += sizeof(struct bnx2fc_cmd_mgr);
+
+ cmgr = kzalloc(len, GFP_KERNEL);
+ if (!cmgr) {
+ printk(KERN_ERR PFX "failed to alloc cmgr\n");
+ return NULL;
+ }
+
+ cmgr->free_list = kzalloc(sizeof(*cmgr->free_list) *
+ num_possible_cpus(), GFP_KERNEL);
+ if (!cmgr->free_list) {
+ printk(KERN_ERR PFX "failed to alloc free_list\n");
+ goto mem_err;
+ }
+
+ cmgr->free_list_lock = kzalloc(sizeof(*cmgr->free_list_lock) *
+ num_possible_cpus(), GFP_KERNEL);
+ if (!cmgr->free_list_lock) {
+ printk(KERN_ERR PFX "failed to alloc free_list_lock\n");
+ goto mem_err;
+ }
+
+ cmgr->hba = hba;
+ cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1);
+
+ for (i = 0; i < num_possible_cpus(); i++) {
+ INIT_LIST_HEAD(&cmgr->free_list[i]);
+ spin_lock_init(&cmgr->free_list_lock[i]);
+ }
+
+ /* Pre-allocated pool of bnx2fc_cmds */
+ xid = BNX2FC_MIN_XID;
+ for (i = 0; i < num_ios; i++) {
+ io_req = kzalloc(sizeof(*io_req), GFP_KERNEL);
+
+ if (!io_req) {
+ printk(KERN_ERR PFX "failed to alloc io_req\n");
+ goto mem_err;
+ }
+
+ INIT_LIST_HEAD(&io_req->link);
+ INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout);
+
+ io_req->xid = xid++;
+ if (io_req->xid >= BNX2FC_MAX_OUTSTANDING_CMNDS)
+ printk(KERN_ERR PFX "ERROR allocating xids - 0x%x\n",
+ io_req->xid);
+ list_add_tail(&io_req->link,
+ &cmgr->free_list[io_req->xid % num_possible_cpus()]);
+ io_req++;
+ }
+
+ /* Allocate pool of io_bdts - one for each bnx2fc_cmd */
+ mem_size = num_ios * sizeof(struct io_bdt *);
+ cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL);
+ if (!cmgr->io_bdt_pool) {
+ printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n");
+ goto mem_err;
+ }
+
+ mem_size = sizeof(struct io_bdt);
+ for (i = 0; i < num_ios; i++) {
+ cmgr->io_bdt_pool[i] = kmalloc(mem_size, GFP_KERNEL);
+ if (!cmgr->io_bdt_pool[i]) {
+ printk(KERN_ERR PFX "failed to alloc "
+ "io_bdt_pool[%d]\n", i);
+ goto mem_err;
+ }
+ }
+
+ /* Allocate an map fcoe_bdt_ctx structures */
+ bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx);
+ for (i = 0; i < num_ios; i++) {
+ bdt_info = cmgr->io_bdt_pool[i];
+ bdt_info->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
+ bd_tbl_sz,
+ &bdt_info->bd_tbl_dma,
+ GFP_KERNEL);
+ if (!bdt_info->bd_tbl) {
+ printk(KERN_ERR PFX "failed to alloc "
+ "bdt_tbl[%d]\n", i);
+ goto mem_err;
+ }
+ }
+
+ return cmgr;
+
+mem_err:
+ bnx2fc_cmd_mgr_free(cmgr);
+ return NULL;
+}
+
+void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr)
+{
+ struct io_bdt *bdt_info;
+ struct bnx2fc_hba *hba = cmgr->hba;
+ size_t bd_tbl_sz;
+ u16 min_xid = BNX2FC_MIN_XID;
+ u16 max_xid = BNX2FC_MAX_XID;
+ int num_ios;
+ int i;
+
+ num_ios = max_xid - min_xid + 1;
+
+ /* Free fcoe_bdt_ctx structures */
+ if (!cmgr->io_bdt_pool)
+ goto free_cmd_pool;
+
+ bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx);
+ for (i = 0; i < num_ios; i++) {
+ bdt_info = cmgr->io_bdt_pool[i];
+ if (bdt_info->bd_tbl) {
+ dma_free_coherent(&hba->pcidev->dev, bd_tbl_sz,
+ bdt_info->bd_tbl,
+ bdt_info->bd_tbl_dma);
+ bdt_info->bd_tbl = NULL;
+ }
+ }
+
+ /* Destroy io_bdt pool */
+ for (i = 0; i < num_ios; i++) {
+ kfree(cmgr->io_bdt_pool[i]);
+ cmgr->io_bdt_pool[i] = NULL;
+ }
+
+ kfree(cmgr->io_bdt_pool);
+ cmgr->io_bdt_pool = NULL;
+
+free_cmd_pool:
+ kfree(cmgr->free_list_lock);
+
+ /* Destroy cmd pool */
+ if (!cmgr->free_list)
+ goto free_cmgr;
+
+ for (i = 0; i < num_possible_cpus(); i++) {
+ struct list_head *list;
+ struct list_head *tmp;
+
+ list_for_each_safe(list, tmp, &cmgr->free_list[i]) {
+ struct bnx2fc_cmd *io_req = (struct bnx2fc_cmd *)list;
+ list_del(&io_req->link);
+ kfree(io_req);
+ }
+ }
+ kfree(cmgr->free_list);
+free_cmgr:
+ /* Free command manager itself */
+ kfree(cmgr);
+}
+
+struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
+{
+ struct fcoe_port *port = tgt->port;
+ struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr;
+ struct bnx2fc_cmd *io_req;
+ struct list_head *listp;
+ struct io_bdt *bd_tbl;
+ u32 max_sqes;
+ u16 xid;
+
+ max_sqes = tgt->max_sqes;
+ switch (type) {
+ case BNX2FC_TASK_MGMT_CMD:
+ max_sqes = BNX2FC_TM_MAX_SQES;
+ break;
+ case BNX2FC_ELS:
+ max_sqes = BNX2FC_ELS_MAX_SQES;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * NOTE: Free list insertions and deletions are protected with
+ * cmgr lock
+ */
+ spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+ if ((list_empty(&(cmd_mgr->free_list[smp_processor_id()]))) ||
+ (tgt->num_active_ios.counter >= max_sqes)) {
+ BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available "
+ "ios(%d):sqes(%d)\n",
+ tgt->num_active_ios.counter, tgt->max_sqes);
+ if (list_empty(&(cmd_mgr->free_list[smp_processor_id()])))
+ printk(KERN_ERR PFX "elstm_alloc: list_empty\n");
+ spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+ return NULL;
+ }
+
+ listp = (struct list_head *)
+ cmd_mgr->free_list[smp_processor_id()].next;
+ list_del_init(listp);
+ io_req = (struct bnx2fc_cmd *) listp;
+ xid = io_req->xid;
+ cmd_mgr->cmds[xid] = io_req;
+ atomic_inc(&tgt->num_active_ios);
+ spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+
+ INIT_LIST_HEAD(&io_req->link);
+
+ io_req->port = port;
+ io_req->cmd_mgr = cmd_mgr;
+ io_req->req_flags = 0;
+ io_req->cmd_type = type;
+
+ /* Bind io_bdt for this io_req */
+ /* Have a static link between io_req and io_bdt_pool */
+ bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
+ bd_tbl->io_req = io_req;
+
+ /* Hold the io_req against deletion */
+ kref_init(&io_req->refcount);
+ return io_req;
+}
+static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
+{
+ struct fcoe_port *port = tgt->port;
+ struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr;
+ struct bnx2fc_cmd *io_req;
+ struct list_head *listp;
+ struct io_bdt *bd_tbl;
+ u32 max_sqes;
+ u16 xid;
+
+ max_sqes = BNX2FC_SCSI_MAX_SQES;
+ /*
+ * NOTE: Free list insertions and deletions are protected with
+ * cmgr lock
+ */
+ spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+ if ((list_empty(&cmd_mgr->free_list[smp_processor_id()])) ||
+ (tgt->num_active_ios.counter >= max_sqes)) {
+ spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+ return NULL;
+ }
+
+ listp = (struct list_head *)
+ cmd_mgr->free_list[smp_processor_id()].next;
+ list_del_init(listp);
+ io_req = (struct bnx2fc_cmd *) listp;
+ xid = io_req->xid;
+ cmd_mgr->cmds[xid] = io_req;
+ atomic_inc(&tgt->num_active_ios);
+ spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+
+ INIT_LIST_HEAD(&io_req->link);
+
+ io_req->port = port;
+ io_req->cmd_mgr = cmd_mgr;
+ io_req->req_flags = 0;
+
+ /* Bind io_bdt for this io_req */
+ /* Have a static link between io_req and io_bdt_pool */
+ bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
+ bd_tbl->io_req = io_req;
+
+ /* Hold the io_req against deletion */
+ kref_init(&io_req->refcount);
+ return io_req;
+}
+
+void bnx2fc_cmd_release(struct kref *ref)
+{
+ struct bnx2fc_cmd *io_req = container_of(ref,
+ struct bnx2fc_cmd, refcount);
+ struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
+
+ spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+ if (io_req->cmd_type != BNX2FC_SCSI_CMD)
+ bnx2fc_free_mp_resc(io_req);
+ cmd_mgr->cmds[io_req->xid] = NULL;
+ /* Delete IO from retire queue */
+ list_del_init(&io_req->link);
+ /* Add it to the free list */
+ list_add(&io_req->link,
+ &cmd_mgr->free_list[smp_processor_id()]);
+ atomic_dec(&io_req->tgt->num_active_ios);
+ spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+}
+
+static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req)
+{
+ struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
+ struct bnx2fc_hba *hba = io_req->port->priv;
+ size_t sz = sizeof(struct fcoe_bd_ctx);
+
+ /* clear tm flags */
+ mp_req->tm_flags = 0;
+ if (mp_req->mp_req_bd) {
+ dma_free_coherent(&hba->pcidev->dev, sz,
+ mp_req->mp_req_bd,
+ mp_req->mp_req_bd_dma);
+ mp_req->mp_req_bd = NULL;
+ }
+ if (mp_req->mp_resp_bd) {
+ dma_free_coherent(&hba->pcidev->dev, sz,
+ mp_req->mp_resp_bd,
+ mp_req->mp_resp_bd_dma);
+ mp_req->mp_resp_bd = NULL;
+ }
+ if (mp_req->req_buf) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ mp_req->req_buf,
+ mp_req->req_buf_dma);
+ mp_req->req_buf = NULL;
+ }
+ if (mp_req->resp_buf) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ mp_req->resp_buf,
+ mp_req->resp_buf_dma);
+ mp_req->resp_buf = NULL;
+ }
+}
+
+int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
+{
+ struct bnx2fc_mp_req *mp_req;
+ struct fcoe_bd_ctx *mp_req_bd;
+ struct fcoe_bd_ctx *mp_resp_bd;
+ struct bnx2fc_hba *hba = io_req->port->priv;
+ dma_addr_t addr;
+ size_t sz;
+
+ mp_req = (struct bnx2fc_mp_req *)&(io_req->mp_req);
+ memset(mp_req, 0, sizeof(struct bnx2fc_mp_req));
+
+ mp_req->req_len = sizeof(struct fcp_cmnd);
+ io_req->data_xfer_len = mp_req->req_len;
+ mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ &mp_req->req_buf_dma,
+ GFP_ATOMIC);
+ if (!mp_req->req_buf) {
+ printk(KERN_ERR PFX "unable to alloc MP req buffer\n");
+ bnx2fc_free_mp_resc(io_req);
+ return FAILED;
+ }
+
+ mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ &mp_req->resp_buf_dma,
+ GFP_ATOMIC);
+ if (!mp_req->resp_buf) {
+ printk(KERN_ERR PFX "unable to alloc TM resp buffer\n");
+ bnx2fc_free_mp_resc(io_req);
+ return FAILED;
+ }
+ memset(mp_req->req_buf, 0, PAGE_SIZE);
+ memset(mp_req->resp_buf, 0, PAGE_SIZE);
+
+ /* Allocate and map mp_req_bd and mp_resp_bd */
+ sz = sizeof(struct fcoe_bd_ctx);
+ mp_req->mp_req_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
+ &mp_req->mp_req_bd_dma,
+ GFP_ATOMIC);
+ if (!mp_req->mp_req_bd) {
+ printk(KERN_ERR PFX "unable to alloc MP req bd\n");
+ bnx2fc_free_mp_resc(io_req);
+ return FAILED;
+ }
+ mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
+ &mp_req->mp_resp_bd_dma,
+ GFP_ATOMIC);
+ if (!mp_req->mp_req_bd) {
+ printk(KERN_ERR PFX "unable to alloc MP resp bd\n");
+ bnx2fc_free_mp_resc(io_req);
+ return FAILED;
+ }
+ /* Fill bd table */
+ addr = mp_req->req_buf_dma;
+ mp_req_bd = mp_req->mp_req_bd;
+ mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff;
+ mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32);
+ mp_req_bd->buf_len = PAGE_SIZE;
+ mp_req_bd->flags = 0;
+
+ /*
+ * MP buffer is either a task mgmt command or an ELS.
+ * So the assumption is that it consumes a single bd
+ * entry in the bd table
+ */
+ mp_resp_bd = mp_req->mp_resp_bd;
+ addr = mp_req->resp_buf_dma;
+ mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff;
+ mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32);
+ mp_resp_bd->buf_len = PAGE_SIZE;
+ mp_resp_bd->flags = 0;
+
+ return SUCCESS;
+}
+
+static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
+{
+ struct fc_lport *lport;
+ struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct fcoe_port *port;
+ struct bnx2fc_hba *hba;
+ struct bnx2fc_rport *tgt;
+ struct bnx2fc_cmd *io_req;
+ struct bnx2fc_mp_req *tm_req;
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ struct Scsi_Host *host = sc_cmd->device->host;
+ struct fc_frame_header *fc_hdr;
+ struct fcp_cmnd *fcp_cmnd;
+ int task_idx, index;
+ int rc = SUCCESS;
+ u16 xid;
+ u32 sid, did;
+ unsigned long start = jiffies;
+
+ lport = shost_priv(host);
+ port = lport_priv(lport);
+ hba = port->priv;
+
+ if (rport == NULL) {
+ printk(KERN_ALERT PFX "device_reset: rport is NULL\n");
+ rc = FAILED;
+ goto tmf_err;
+ }
+
+ rc = fc_block_scsi_eh(sc_cmd);
+ if (rc)
+ return rc;
+
+ if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
+ printk(KERN_ERR PFX "device_reset: link is not ready\n");
+ rc = FAILED;
+ goto tmf_err;
+ }
+ /* rport and tgt are allocated together, so tgt should be non-NULL */
+ tgt = (struct bnx2fc_rport *)&rp[1];
+
+ if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) {
+ printk(KERN_ERR PFX "device_reset: tgt not offloaded\n");
+ rc = FAILED;
+ goto tmf_err;
+ }
+retry_tmf:
+ io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_TASK_MGMT_CMD);
+ if (!io_req) {
+ if (time_after(jiffies, start + HZ)) {
+ printk(KERN_ERR PFX "tmf: Failed TMF");
+ rc = FAILED;
+ goto tmf_err;
+ }
+ msleep(20);
+ goto retry_tmf;
+ }
+ /* Initialize rest of io_req fields */
+ io_req->sc_cmd = sc_cmd;
+ io_req->port = port;
+ io_req->tgt = tgt;
+
+ tm_req = (struct bnx2fc_mp_req *)&(io_req->mp_req);
+
+ rc = bnx2fc_init_mp_req(io_req);
+ if (rc == FAILED) {
+ printk(KERN_ERR PFX "Task mgmt MP request init failed\n");
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ goto tmf_err;
+ }
+
+ /* Set TM flags */
+ io_req->io_req_flags = 0;
+ tm_req->tm_flags = tm_flags;
+
+ /* Fill FCP_CMND */
+ bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf);
+ fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf;
+ memset(fcp_cmnd->fc_cdb, 0, sc_cmd->cmd_len);
+ fcp_cmnd->fc_dl = 0;
+
+ /* Fill FC header */
+ fc_hdr = &(tm_req->req_fc_hdr);
+ sid = tgt->sid;
+ did = rport->port_id;
+ __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, did, sid,
+ FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
+ FC_FC_SEQ_INIT, 0);
+ /* Obtain exchange id */
+ xid = io_req->xid;
+
+ BNX2FC_TGT_DBG(tgt, "Initiate TMF - xid = 0x%x\n", xid);
+ task_idx = xid/BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+
+ /* Initialize task context for this IO request */
+ task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+ bnx2fc_init_mp_task(io_req, task);
+
+ sc_cmd->SCp.ptr = (char *)io_req;
+
+ /* Obtain free SQ entry */
+ spin_lock_bh(&tgt->tgt_lock);
+ bnx2fc_add_2_sq(tgt, xid);
+
+ /* Enqueue the io_req to active_tm_queue */
+ io_req->on_tmf_queue = 1;
+ list_add_tail(&io_req->link, &tgt->active_tm_queue);
+
+ init_completion(&io_req->tm_done);
+ io_req->wait_for_comp = 1;
+
+ /* Ring doorbell */
+ bnx2fc_ring_doorbell(tgt);
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ rc = wait_for_completion_timeout(&io_req->tm_done,
+ BNX2FC_TM_TIMEOUT * HZ);
+ spin_lock_bh(&tgt->tgt_lock);
+
+ io_req->wait_for_comp = 0;
+ if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags)))
+ set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags);
+
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ if (!rc) {
+ printk(KERN_ERR PFX "task mgmt command failed...\n");
+ rc = FAILED;
+ } else {
+ printk(KERN_ERR PFX "task mgmt command success...\n");
+ rc = SUCCESS;
+ }
+tmf_err:
+ return rc;
+}
+
+int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
+{
+ struct fc_lport *lport;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct fc_rport *rport = tgt->rport;
+ struct fc_rport_priv *rdata = tgt->rdata;
+ struct bnx2fc_hba *hba;
+ struct fcoe_port *port;
+ struct bnx2fc_cmd *abts_io_req;
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ struct fc_frame_header *fc_hdr;
+ struct bnx2fc_mp_req *abts_req;
+ int task_idx, index;
+ u32 sid, did;
+ u16 xid;
+ int rc = SUCCESS;
+ u32 r_a_tov = rdata->r_a_tov;
+
+ /* called with tgt_lock held */
+ BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n");
+
+ port = io_req->port;
+ hba = port->priv;
+ lport = port->lport;
+
+ if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
+ printk(KERN_ERR PFX "initiate_abts: tgt not offloaded\n");
+ rc = FAILED;
+ goto abts_err;
+ }
+
+ if (rport == NULL) {
+ printk(KERN_ALERT PFX "initiate_abts: rport is NULL\n");
+ rc = FAILED;
+ goto abts_err;
+ }
+
+ if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
+ printk(KERN_ERR PFX "initiate_abts: link is not ready\n");
+ rc = FAILED;
+ goto abts_err;
+ }
+
+ abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS);
+ if (!abts_io_req) {
+ printk(KERN_ERR PFX "abts: couldnt allocate cmd\n");
+ rc = FAILED;
+ goto abts_err;
+ }
+
+ /* Initialize rest of io_req fields */
+ abts_io_req->sc_cmd = NULL;
+ abts_io_req->port = port;
+ abts_io_req->tgt = tgt;
+ abts_io_req->data_xfer_len = 0; /* No data transfer for ABTS */
+
+ abts_req = (struct bnx2fc_mp_req *)&(abts_io_req->mp_req);
+ memset(abts_req, 0, sizeof(struct bnx2fc_mp_req));
+
+ /* Fill FC header */
+ fc_hdr = &(abts_req->req_fc_hdr);
+
+ /* Obtain oxid and rxid for the original exchange to be aborted */
+ fc_hdr->fh_ox_id = htons(io_req->xid);
+ fc_hdr->fh_rx_id = htons(io_req->task->rx_wr_tx_rd.rx_id);
+
+ sid = tgt->sid;
+ did = rport->port_id;
+
+ __fc_fill_fc_hdr(fc_hdr, FC_RCTL_BA_ABTS, did, sid,
+ FC_TYPE_BLS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
+ FC_FC_SEQ_INIT, 0);
+
+ xid = abts_io_req->xid;
+ BNX2FC_IO_DBG(abts_io_req, "ABTS io_req\n");
+ task_idx = xid/BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+
+ /* Initialize task context for this IO request */
+ task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+ bnx2fc_init_mp_task(abts_io_req, task);
+
+ /*
+ * ABTS task is a temporary task that will be cleaned up
+ * irrespective of ABTS response. We need to start the timer
+ * for the original exchange, as the CQE is posted for the original
+ * IO request.
+ *
+ * Timer for ABTS is started only when it is originated by a
+ * TM request. For the ABTS issued as part of ULP timeout,
+ * scsi-ml maintains the timers.
+ */
+
+ /* if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))*/
+ bnx2fc_cmd_timer_set(io_req, 2 * r_a_tov);
+
+ /* Obtain free SQ entry */
+ bnx2fc_add_2_sq(tgt, xid);
+
+ /* Ring doorbell */
+ bnx2fc_ring_doorbell(tgt);
+
+abts_err:
+ return rc;
+}
+
+int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
+{
+ struct fc_lport *lport;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct bnx2fc_hba *hba;
+ struct fcoe_port *port;
+ struct bnx2fc_cmd *cleanup_io_req;
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ int task_idx, index;
+ u16 xid, orig_xid;
+ int rc = 0;
+
+ /* ASSUMPTION: called with tgt_lock held */
+ BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n");
+
+ port = io_req->port;
+ hba = port->priv;
+ lport = port->lport;
+
+ cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP);
+ if (!cleanup_io_req) {
+ printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
+ rc = -1;
+ goto cleanup_err;
+ }
+
+ /* Initialize rest of io_req fields */
+ cleanup_io_req->sc_cmd = NULL;
+ cleanup_io_req->port = port;
+ cleanup_io_req->tgt = tgt;
+ cleanup_io_req->data_xfer_len = 0; /* No data transfer for cleanup */
+
+ xid = cleanup_io_req->xid;
+
+ task_idx = xid/BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+
+ /* Initialize task context for this IO request */
+ task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+ orig_xid = io_req->xid;
+
+ BNX2FC_IO_DBG(io_req, "CLEANUP io_req xid = 0x%x\n", xid);
+
+ bnx2fc_init_cleanup_task(cleanup_io_req, task, orig_xid);
+
+ /* Obtain free SQ entry */
+ bnx2fc_add_2_sq(tgt, xid);
+
+ /* Ring doorbell */
+ bnx2fc_ring_doorbell(tgt);
+
+cleanup_err:
+ return rc;
+}
+
+/**
+ * bnx2fc_eh_target_reset: Reset a target
+ *
+ * @sc_cmd: SCSI command
+ *
+ * Set from SCSI host template to send task mgmt command to the target
+ * and wait for the response
+ */
+int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd)
+{
+ return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
+}
+
+/**
+ * bnx2fc_eh_device_reset - Reset a single LUN
+ *
+ * @sc_cmd: SCSI command
+ *
+ * Set from SCSI host template to send task mgmt command to the target
+ * and wait for the response
+ */
+int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
+{
+ return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
+}
+
+/**
+ * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding
+ * SCSI command
+ *
+ * @sc_cmd: SCSI_ML command pointer
+ *
+ * SCSI abort request handler
+ */
+int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
+{
+ struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct bnx2fc_cmd *io_req;
+ struct fc_lport *lport;
+ struct bnx2fc_rport *tgt;
+ int rc = FAILED;
+
+
+ rc = fc_block_scsi_eh(sc_cmd);
+ if (rc)
+ return rc;
+
+ lport = shost_priv(sc_cmd->device->host);
+ if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
+ printk(KERN_ALERT PFX "eh_abort: link not ready\n");
+ return rc;
+ }
+
+ tgt = (struct bnx2fc_rport *)&rp[1];
+
+ BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n");
+
+ spin_lock_bh(&tgt->tgt_lock);
+ io_req = (struct bnx2fc_cmd *)sc_cmd->SCp.ptr;
+ if (!io_req) {
+ /* Command might have just completed */
+ printk(KERN_ERR PFX "eh_abort: io_req is NULL\n");
+ spin_unlock_bh(&tgt->tgt_lock);
+ return SUCCESS;
+ }
+ BNX2FC_IO_DBG(io_req, "eh_abort - refcnt = %d\n",
+ io_req->refcount.refcount.counter);
+
+ /* Hold IO request across abort processing */
+ kref_get(&io_req->refcount);
+
+ BUG_ON(tgt != io_req->tgt);
+
+ /* Remove the io_req from the active_q. */
+ /*
+ * Task Mgmt functions (LUN RESET & TGT RESET) will not
+ * issue an ABTS on this particular IO req, as the
+ * io_req is no longer in the active_q.
+ */
+ if (tgt->flush_in_prog) {
+ printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) "
+ "flush in progress\n", io_req->xid);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return SUCCESS;
+ }
+
+ if (io_req->on_active_queue == 0) {
+ printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) "
+ "not on active_q\n", io_req->xid);
+ /*
+ * This condition can happen only due to the FW bug,
+ * where we do not receive cleanup response from
+ * the FW. Handle this case gracefully by erroring
+ * back the IO request to SCSI-ml
+ */
+ bnx2fc_scsi_done(io_req, DID_ABORT);
+
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return SUCCESS;
+ }
+
+ /*
+ * Only eh_abort processing will remove the IO from
+ * active_cmd_q before processing the request. this is
+ * done to avoid race conditions between IOs aborted
+ * as part of task management completion and eh_abort
+ * processing
+ */
+ list_del_init(&io_req->link);
+ io_req->on_active_queue = 0;
+ /* Move IO req to retire queue */
+ list_add_tail(&io_req->link, &tgt->io_retire_queue);
+
+ init_completion(&io_req->tm_done);
+ io_req->wait_for_comp = 1;
+
+ if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
+ /* Cancel the current timer running on this io_req */
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+ set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
+ rc = bnx2fc_initiate_abts(io_req);
+ } else {
+ printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) "
+ "already in abts processing\n", io_req->xid);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return SUCCESS;
+ }
+ if (rc == FAILED) {
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return rc;
+ }
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ wait_for_completion(&io_req->tm_done);
+
+ spin_lock_bh(&tgt->tgt_lock);
+ io_req->wait_for_comp = 0;
+ if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
+ &io_req->req_flags))) {
+ /* Let the scsi-ml try to recover this command */
+ printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
+ io_req->xid);
+ rc = FAILED;
+ } else {
+ /*
+ * We come here even when there was a race condition
+ * between timeout and abts completion, and abts
+ * completion happens just in time.
+ */
+ BNX2FC_IO_DBG(io_req, "abort succeeded\n");
+ rc = SUCCESS;
+ bnx2fc_scsi_done(io_req, DID_ABORT);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ }
+
+ /* release the reference taken in eh_abort */
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return rc;
+}
+
+void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq)
+{
+ BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl "
+ "refcnt = %d, cmd_type = %d\n",
+ io_req->refcount.refcount.counter, io_req->cmd_type);
+ bnx2fc_scsi_done(io_req, DID_ERROR);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+}
+
+void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq)
+{
+ u32 r_ctl;
+ u32 r_a_tov = FC_DEF_R_A_TOV;
+ u8 issue_rrq = 0;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+
+ BNX2FC_IO_DBG(io_req, "Entered process_abts_compl xid = 0x%x"
+ "refcnt = %d, cmd_type = %d\n",
+ io_req->xid,
+ io_req->refcount.refcount.counter, io_req->cmd_type);
+
+ if (test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
+ &io_req->req_flags)) {
+ BNX2FC_IO_DBG(io_req, "Timer context finished processing"
+ " this io\n");
+ return;
+ }
+
+ /* Do not issue RRQ as this IO is already cleanedup */
+ if (test_and_set_bit(BNX2FC_FLAG_IO_CLEANUP,
+ &io_req->req_flags))
+ goto io_compl;
+
+ /*
+ * For ABTS issued due to SCSI eh_abort_handler, timeout
+ * values are maintained by scsi-ml itself. Cancel timeout
+ * in case ABTS issued as part of task management function
+ * or due to FW error.
+ */
+ if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+
+ r_ctl = task->cmn.general.rsp_info.abts_rsp.r_ctl;
+
+ switch (r_ctl) {
+ case FC_RCTL_BA_ACC:
+ /*
+ * Dont release this cmd yet. It will be relesed
+ * after we get RRQ response
+ */
+ BNX2FC_IO_DBG(io_req, "ABTS response - ACC Send RRQ\n");
+ issue_rrq = 1;
+ break;
+
+ case FC_RCTL_BA_RJT:
+ BNX2FC_IO_DBG(io_req, "ABTS response - RJT\n");
+ break;
+ default:
+ printk(KERN_ERR PFX "Unknown ABTS response\n");
+ break;
+ }
+
+ if (issue_rrq) {
+ BNX2FC_IO_DBG(io_req, "Issue RRQ after R_A_TOV\n");
+ set_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
+ }
+ set_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags);
+ bnx2fc_cmd_timer_set(io_req, r_a_tov);
+
+io_compl:
+ if (io_req->wait_for_comp) {
+ if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
+ &io_req->req_flags))
+ complete(&io_req->tm_done);
+ } else {
+ /*
+ * We end up here when ABTS is issued as
+ * in asynchronous context, i.e., as part
+ * of task management completion, or
+ * when FW error is received or when the
+ * ABTS is issued when the IO is timed
+ * out.
+ */
+
+ if (io_req->on_active_queue) {
+ list_del_init(&io_req->link);
+ io_req->on_active_queue = 0;
+ /* Move IO req to retire queue */
+ list_add_tail(&io_req->link, &tgt->io_retire_queue);
+ }
+ bnx2fc_scsi_done(io_req, DID_ERROR);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ }
+}
+
+static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
+{
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct list_head *list;
+ struct list_head *tmp;
+ struct bnx2fc_cmd *cmd;
+ int tm_lun = sc_cmd->device->lun;
+ int rc = 0;
+ int lun;
+
+ /* called with tgt_lock held */
+ BNX2FC_IO_DBG(io_req, "Entered bnx2fc_lun_reset_cmpl\n");
+ /*
+ * Walk thru the active_ios queue and ABORT the IO
+ * that matches with the LUN that was reset
+ */
+ list_for_each_safe(list, tmp, &tgt->active_cmd_queue) {
+ BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n");
+ cmd = (struct bnx2fc_cmd *)list;
+ lun = cmd->sc_cmd->device->lun;
+ if (lun == tm_lun) {
+ /* Initiate ABTS on this cmd */
+ if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
+ &cmd->req_flags)) {
+ /* cancel the IO timeout */
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release);
+ /* timer hold */
+ rc = bnx2fc_initiate_abts(cmd);
+ /* abts shouldnt fail in this context */
+ WARN_ON(rc != SUCCESS);
+ } else
+ printk(KERN_ERR PFX "lun_rst: abts already in"
+ " progress for this IO 0x%x\n",
+ cmd->xid);
+ }
+ }
+}
+
+static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req)
+{
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct list_head *list;
+ struct list_head *tmp;
+ struct bnx2fc_cmd *cmd;
+ int rc = 0;
+
+ /* called with tgt_lock held */
+ BNX2FC_IO_DBG(io_req, "Entered bnx2fc_tgt_reset_cmpl\n");
+ /*
+ * Walk thru the active_ios queue and ABORT the IO
+ * that matches with the LUN that was reset
+ */
+ list_for_each_safe(list, tmp, &tgt->active_cmd_queue) {
+ BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n");
+ cmd = (struct bnx2fc_cmd *)list;
+ /* Initiate ABTS */
+ if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
+ &cmd->req_flags)) {
+ /* cancel the IO timeout */
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* timer hold */
+ rc = bnx2fc_initiate_abts(cmd);
+ /* abts shouldnt fail in this context */
+ WARN_ON(rc != SUCCESS);
+
+ } else
+ printk(KERN_ERR PFX "tgt_rst: abts already in progress"
+ " for this IO 0x%x\n", cmd->xid);
+ }
+}
+
+void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task, u8 num_rq)
+{
+ struct bnx2fc_mp_req *tm_req;
+ struct fc_frame_header *fc_hdr;
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+ u64 *hdr;
+ u64 *temp_hdr;
+ void *rsp_buf;
+
+ /* Called with tgt_lock held */
+ BNX2FC_IO_DBG(io_req, "Entered process_tm_compl\n");
+
+ if (!(test_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags)))
+ set_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags);
+ else {
+ /* TM has already timed out and we got
+ * delayed completion. Ignore completion
+ * processing.
+ */
+ return;
+ }
+
+ tm_req = &(io_req->mp_req);
+ fc_hdr = &(tm_req->resp_fc_hdr);
+ hdr = (u64 *)fc_hdr;
+ temp_hdr = (u64 *)
+ &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
+ hdr[0] = cpu_to_be64(temp_hdr[0]);
+ hdr[1] = cpu_to_be64(temp_hdr[1]);
+ hdr[2] = cpu_to_be64(temp_hdr[2]);
+
+ tm_req->resp_len = task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_off;
+
+ rsp_buf = tm_req->resp_buf;
+
+ if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) {
+ bnx2fc_parse_fcp_rsp(io_req,
+ (struct fcoe_fcp_rsp_payload *)
+ rsp_buf, num_rq);
+ if (io_req->fcp_rsp_code == 0) {
+ /* TM successful */
+ if (tm_req->tm_flags & FCP_TMF_LUN_RESET)
+ bnx2fc_lun_reset_cmpl(io_req);
+ else if (tm_req->tm_flags & FCP_TMF_TGT_RESET)
+ bnx2fc_tgt_reset_cmpl(io_req);
+ }
+ } else {
+ printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n",
+ fc_hdr->fh_r_ctl);
+ }
+ if (!sc_cmd->SCp.ptr) {
+ printk(KERN_ALERT PFX "tm_compl: SCp.ptr is NULL\n");
+ return;
+ }
+ switch (io_req->fcp_status) {
+ case FC_GOOD:
+ if (io_req->cdb_status == 0) {
+ /* Good IO completion */
+ sc_cmd->result = DID_OK << 16;
+ } else {
+ /* Transport status is good, SCSI status not good */
+ sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
+ }
+ if (io_req->fcp_resid)
+ scsi_set_resid(sc_cmd, io_req->fcp_resid);
+ break;
+
+ default:
+ BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n",
+ io_req->fcp_status);
+ break;
+ }
+
+ sc_cmd = io_req->sc_cmd;
+ io_req->sc_cmd = NULL;
+
+ /* check if the io_req exists in tgt's tmf_q */
+ if (io_req->on_tmf_queue) {
+
+ list_del_init(&io_req->link);
+ io_req->on_tmf_queue = 0;
+ } else {
+
+ printk(KERN_ALERT PFX "Command not on active_cmd_queue!\n");
+ return;
+ }
+
+ sc_cmd->SCp.ptr = NULL;
+ sc_cmd->scsi_done(sc_cmd);
+
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ if (io_req->wait_for_comp) {
+ BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n");
+ complete(&io_req->tm_done);
+ }
+}
+
+static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
+ int bd_index)
+{
+ struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
+ int frag_size, sg_frags;
+
+ sg_frags = 0;
+ while (sg_len) {
+ if (sg_len >= BNX2FC_BD_SPLIT_SZ)
+ frag_size = BNX2FC_BD_SPLIT_SZ;
+ else
+ frag_size = sg_len;
+ bd[bd_index + sg_frags].buf_addr_lo = addr & 0xffffffff;
+ bd[bd_index + sg_frags].buf_addr_hi = addr >> 32;
+ bd[bd_index + sg_frags].buf_len = (u16)frag_size;
+ bd[bd_index + sg_frags].flags = 0;
+
+ addr += (u64) frag_size;
+ sg_frags++;
+ sg_len -= frag_size;
+ }
+ return sg_frags;
+
+}
+
+static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req)
+{
+ struct scsi_cmnd *sc = io_req->sc_cmd;
+ struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
+ struct scatterlist *sg;
+ int byte_count = 0;
+ int sg_count = 0;
+ int bd_count = 0;
+ int sg_frags;
+ unsigned int sg_len;
+ u64 addr;
+ int i;
+
+ sg_count = scsi_dma_map(sc);
+ scsi_for_each_sg(sc, sg, sg_count, i) {
+ sg_len = sg_dma_len(sg);
+ addr = sg_dma_address(sg);
+ if (sg_len > BNX2FC_MAX_BD_LEN) {
+ sg_frags = bnx2fc_split_bd(io_req, addr, sg_len,
+ bd_count);
+ } else {
+
+ sg_frags = 1;
+ bd[bd_count].buf_addr_lo = addr & 0xffffffff;
+ bd[bd_count].buf_addr_hi = addr >> 32;
+ bd[bd_count].buf_len = (u16)sg_len;
+ bd[bd_count].flags = 0;
+ }
+ bd_count += sg_frags;
+ byte_count += sg_len;
+ }
+ if (byte_count != scsi_bufflen(sc))
+ printk(KERN_ERR PFX "byte_count = %d != scsi_bufflen = %d, "
+ "task_id = 0x%x\n", byte_count, scsi_bufflen(sc),
+ io_req->xid);
+ return bd_count;
+}
+
+static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req)
+{
+ struct scsi_cmnd *sc = io_req->sc_cmd;
+ struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
+ int bd_count;
+
+ if (scsi_sg_count(sc))
+ bd_count = bnx2fc_map_sg(io_req);
+ else {
+ bd_count = 0;
+ bd[0].buf_addr_lo = bd[0].buf_addr_hi = 0;
+ bd[0].buf_len = bd[0].flags = 0;
+ }
+ io_req->bd_tbl->bd_valid = bd_count;
+}
+
+static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req)
+{
+ struct scsi_cmnd *sc = io_req->sc_cmd;
+
+ if (io_req->bd_tbl->bd_valid && sc) {
+ scsi_dma_unmap(sc);
+ io_req->bd_tbl->bd_valid = 0;
+ }
+}
+
+void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
+ struct fcp_cmnd *fcp_cmnd)
+{
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+ char tag[2];
+
+ memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
+
+ int_to_scsilun(sc_cmd->device->lun,
+ (struct scsi_lun *) fcp_cmnd->fc_lun);
+
+
+ fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
+ memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
+
+ fcp_cmnd->fc_cmdref = 0;
+ fcp_cmnd->fc_pri_ta = 0;
+ fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
+ fcp_cmnd->fc_flags = io_req->io_req_flags;
+
+ if (scsi_populate_tag_msg(sc_cmd, tag)) {
+ switch (tag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ fcp_cmnd->fc_pri_ta = FCP_PTA_HEADQ;
+ break;
+ case ORDERED_QUEUE_TAG:
+ fcp_cmnd->fc_pri_ta = FCP_PTA_ORDERED;
+ break;
+ default:
+ fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
+ break;
+ }
+ } else {
+ fcp_cmnd->fc_pri_ta = 0;
+ }
+}
+
+static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
+ struct fcoe_fcp_rsp_payload *fcp_rsp,
+ u8 num_rq)
+{
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ u8 rsp_flags = fcp_rsp->fcp_flags.flags;
+ u32 rq_buff_len = 0;
+ int i;
+ unsigned char *rq_data;
+ unsigned char *dummy;
+ int fcp_sns_len = 0;
+ int fcp_rsp_len = 0;
+
+ io_req->fcp_status = FC_GOOD;
+ io_req->fcp_resid = fcp_rsp->fcp_resid;
+
+ io_req->scsi_comp_flags = rsp_flags;
+ CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
+ fcp_rsp->scsi_status_code;
+
+ /* Fetch fcp_rsp_info and fcp_sns_info if available */
+ if (num_rq) {
+
+ /*
+ * We do not anticipate num_rq >1, as the linux defined
+ * SCSI_SENSE_BUFFERSIZE is 96 bytes + 8 bytes of FCP_RSP_INFO
+ * 256 bytes of single rq buffer is good enough to hold this.
+ */
+
+ if (rsp_flags &
+ FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) {
+ fcp_rsp_len = rq_buff_len
+ = fcp_rsp->fcp_rsp_len;
+ }
+
+ if (rsp_flags &
+ FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) {
+ fcp_sns_len = fcp_rsp->fcp_sns_len;
+ rq_buff_len += fcp_rsp->fcp_sns_len;
+ }
+
+ io_req->fcp_rsp_len = fcp_rsp_len;
+ io_req->fcp_sns_len = fcp_sns_len;
+
+ if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) {
+ /* Invalid sense sense length. */
+ printk(KERN_ALERT PFX "invalid sns length %d\n",
+ rq_buff_len);
+ /* reset rq_buff_len */
+ rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ;
+ }
+
+ rq_data = bnx2fc_get_next_rqe(tgt, 1);
+
+ if (num_rq > 1) {
+ /* We do not need extra sense data */
+ for (i = 1; i < num_rq; i++)
+ dummy = bnx2fc_get_next_rqe(tgt, 1);
+ }
+
+ /* fetch fcp_rsp_code */
+ if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
+ /* Only for task management function */
+ io_req->fcp_rsp_code = rq_data[3];
+ printk(KERN_ERR PFX "fcp_rsp_code = %d\n",
+ io_req->fcp_rsp_code);
+ }
+
+ /* fetch sense data */
+ rq_data += fcp_rsp_len;
+
+ if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
+ printk(KERN_ERR PFX "Truncating sense buffer\n");
+ fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
+ }
+
+ memset(sc_cmd->sense_buffer, 0, sizeof(sc_cmd->sense_buffer));
+ if (fcp_sns_len)
+ memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len);
+
+ /* return RQ entries */
+ for (i = 0; i < num_rq; i++)
+ bnx2fc_return_rqe(tgt, 1);
+ }
+}
+
+/**
+ * bnx2fc_queuecommand - Queuecommand function of the scsi template
+ *
+ * @host: The Scsi_Host the command was issued to
+ * @sc_cmd: struct scsi_cmnd to be executed
+ *
+ * This is the IO strategy routine, called by SCSI-ML
+ **/
+int bnx2fc_queuecommand(struct Scsi_Host *host,
+ struct scsi_cmnd *sc_cmd)
+{
+ struct fc_lport *lport = shost_priv(host);
+ struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct bnx2fc_rport *tgt;
+ struct bnx2fc_cmd *io_req;
+ int rc = 0;
+ int rval;
+
+ rval = fc_remote_port_chkready(rport);
+ if (rval) {
+ sc_cmd->result = rval;
+ sc_cmd->scsi_done(sc_cmd);
+ return 0;
+ }
+
+ if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto exit_qcmd;
+ }
+
+ /* rport and tgt are allocated together, so tgt should be non-NULL */
+ tgt = (struct bnx2fc_rport *)&rp[1];
+
+ if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
+ /*
+ * Session is not offloaded yet. Let SCSI-ml retry
+ * the command.
+ */
+ rc = SCSI_MLQUEUE_TARGET_BUSY;
+ goto exit_qcmd;
+ }
+
+ io_req = bnx2fc_cmd_alloc(tgt);
+ if (!io_req) {
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto exit_qcmd;
+ }
+ io_req->sc_cmd = sc_cmd;
+
+ if (bnx2fc_post_io_req(tgt, io_req)) {
+ printk(KERN_ERR PFX "Unable to post io_req\n");
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto exit_qcmd;
+ }
+exit_qcmd:
+ return rc;
+}
+
+void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq)
+{
+ struct fcoe_fcp_rsp_payload *fcp_rsp;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct scsi_cmnd *sc_cmd;
+ struct Scsi_Host *host;
+
+
+ /* scsi_cmd_cmpl is called with tgt lock held */
+
+ if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) {
+ /* we will not receive ABTS response for this IO */
+ BNX2FC_IO_DBG(io_req, "Timer context finished processing "
+ "this scsi cmd\n");
+ }
+
+ /* Cancel the timeout_work, as we received IO completion */
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+
+ sc_cmd = io_req->sc_cmd;
+ if (sc_cmd == NULL) {
+ printk(KERN_ERR PFX "scsi_cmd_compl - sc_cmd is NULL\n");
+ return;
+ }
+
+ /* Fetch fcp_rsp from task context and perform cmd completion */
+ fcp_rsp = (struct fcoe_fcp_rsp_payload *)
+ &(task->cmn.general.rsp_info.fcp_rsp.payload);
+
+ /* parse fcp_rsp and obtain sense data from RQ if available */
+ bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq);
+
+ host = sc_cmd->device->host;
+ if (!sc_cmd->SCp.ptr) {
+ printk(KERN_ERR PFX "SCp.ptr is NULL\n");
+ return;
+ }
+ io_req->sc_cmd = NULL;
+
+ if (io_req->on_active_queue) {
+ list_del_init(&io_req->link);
+ io_req->on_active_queue = 0;
+ /* Move IO req to retire queue */
+ list_add_tail(&io_req->link, &tgt->io_retire_queue);
+ } else {
+ /* This should not happen, but could have been pulled
+ * by bnx2fc_flush_active_ios(), or during a race
+ * between command abort and (late) completion.
+ */
+ BNX2FC_IO_DBG(io_req, "xid not on active_cmd_queue\n");
+ if (io_req->wait_for_comp)
+ if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
+ &io_req->req_flags))
+ complete(&io_req->tm_done);
+ }
+
+ bnx2fc_unmap_sg_list(io_req);
+
+ switch (io_req->fcp_status) {
+ case FC_GOOD:
+ if (io_req->cdb_status == 0) {
+ /* Good IO completion */
+ sc_cmd->result = DID_OK << 16;
+ } else {
+ /* Transport status is good, SCSI status not good */
+ BNX2FC_IO_DBG(io_req, "scsi_cmpl: cdb_status = %d"
+ " fcp_resid = 0x%x\n",
+ io_req->cdb_status, io_req->fcp_resid);
+ sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
+ }
+ if (io_req->fcp_resid)
+ scsi_set_resid(sc_cmd, io_req->fcp_resid);
+ break;
+ default:
+ printk(KERN_ALERT PFX "scsi_cmd_compl: fcp_status = %d\n",
+ io_req->fcp_status);
+ break;
+ }
+ sc_cmd->SCp.ptr = NULL;
+ sc_cmd->scsi_done(sc_cmd);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+}
+
+static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
+ struct bnx2fc_cmd *io_req)
+{
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+ struct fcoe_port *port = tgt->port;
+ struct bnx2fc_hba *hba = port->priv;
+ struct fc_lport *lport = port->lport;
+ struct fcoe_dev_stats *stats;
+ int task_idx, index;
+ u16 xid;
+
+ /* Initialize rest of io_req fields */
+ io_req->cmd_type = BNX2FC_SCSI_CMD;
+ io_req->port = port;
+ io_req->tgt = tgt;
+ io_req->data_xfer_len = scsi_bufflen(sc_cmd);
+ sc_cmd->SCp.ptr = (char *)io_req;
+
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
+ if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ io_req->io_req_flags = BNX2FC_READ;
+ stats->InputRequests++;
+ stats->InputBytes += io_req->data_xfer_len;
+ } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
+ io_req->io_req_flags = BNX2FC_WRITE;
+ stats->OutputRequests++;
+ stats->OutputBytes += io_req->data_xfer_len;
+ } else {
+ io_req->io_req_flags = 0;
+ stats->ControlRequests++;
+ }
+ put_cpu();
+
+ xid = io_req->xid;
+
+ /* Build buffer descriptor list for firmware from sg list */
+ bnx2fc_build_bd_list_from_sg(io_req);
+
+ task_idx = xid / BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+
+ /* Initialize task context for this IO request */
+ task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+ bnx2fc_init_task(io_req, task);
+
+ spin_lock_bh(&tgt->tgt_lock);
+
+ if (tgt->flush_in_prog) {
+ printk(KERN_ERR PFX "Flush in progress..Host Busy\n");
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return -EAGAIN;
+ }
+
+ if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
+ printk(KERN_ERR PFX "Session not ready...post_io\n");
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return -EAGAIN;
+ }
+
+ /* Time IO req */
+ bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT);
+ /* Obtain free SQ entry */
+ bnx2fc_add_2_sq(tgt, xid);
+
+ /* Enqueue the io_req to active_cmd_queue */
+
+ io_req->on_active_queue = 1;
+ /* move io_req from pending_queue to active_queue */
+ list_add_tail(&io_req->link, &tgt->active_cmd_queue);
+
+ /* Ring doorbell */
+ bnx2fc_ring_doorbell(tgt);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return 0;
+}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
new file mode 100644
index 000000000000..7ea93af60260
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -0,0 +1,844 @@
+/* bnx2fc_tgt.c: Broadcom NetXtreme II Linux FCoE offload driver.
+ * Handles operations such as session offload/upload etc, and manages
+ * session resources such as connection id and qp resources.
+ *
+ * Copyright (c) 2008 - 2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include "bnx2fc.h"
+static void bnx2fc_upld_timer(unsigned long data);
+static void bnx2fc_ofld_timer(unsigned long data);
+static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
+ struct fcoe_port *port,
+ struct fc_rport_priv *rdata);
+static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt);
+static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt);
+static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt);
+static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id);
+
+static void bnx2fc_upld_timer(unsigned long data)
+{
+
+ struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data;
+
+ BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n");
+ /* fake upload completion */
+ clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->upld_wait);
+}
+
+static void bnx2fc_ofld_timer(unsigned long data)
+{
+
+ struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data;
+
+ BNX2FC_TGT_DBG(tgt, "entered bnx2fc_ofld_timer\n");
+ /* NOTE: This function should never be called, as
+ * offload should never timeout
+ */
+ /*
+ * If the timer has expired, this session is dead
+ * Clear offloaded flag and logout of this device.
+ * Since OFFLOADED flag is cleared, this case
+ * will be considered as offload error and the
+ * port will be logged off, and conn_id, session
+ * resources are freed up in bnx2fc_offload_session
+ */
+ clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->ofld_wait);
+}
+
+static void bnx2fc_offload_session(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt,
+ struct fc_rport_priv *rdata)
+{
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_rport *rport = rdata->rport;
+ struct bnx2fc_hba *hba = port->priv;
+ int rval;
+ int i = 0;
+
+ /* Initialize bnx2fc_rport */
+ /* NOTE: tgt is already bzero'd */
+ rval = bnx2fc_init_tgt(tgt, port, rdata);
+ if (rval) {
+ printk(KERN_ERR PFX "Failed to allocate conn id for "
+ "port_id (%6x)\n", rport->port_id);
+ goto ofld_err;
+ }
+
+ /* Allocate session resources */
+ rval = bnx2fc_alloc_session_resc(hba, tgt);
+ if (rval) {
+ printk(KERN_ERR PFX "Failed to allocate resources\n");
+ goto ofld_err;
+ }
+
+ /*
+ * Initialize FCoE session offload process.
+ * Upon completion of offload process add
+ * rport to list of rports
+ */
+retry_ofld:
+ clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+ rval = bnx2fc_send_session_ofld_req(port, tgt);
+ if (rval) {
+ printk(KERN_ERR PFX "ofld_req failed\n");
+ goto ofld_err;
+ }
+
+ /*
+ * wait for the session is offloaded and enabled. 3 Secs
+ * should be ample time for this process to complete.
+ */
+ setup_timer(&tgt->ofld_timer, bnx2fc_ofld_timer, (unsigned long)tgt);
+ mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT);
+
+ wait_event_interruptible(tgt->ofld_wait,
+ (test_bit(
+ BNX2FC_FLAG_OFLD_REQ_CMPL,
+ &tgt->flags)));
+ if (signal_pending(current))
+ flush_signals(current);
+
+ del_timer_sync(&tgt->ofld_timer);
+
+ if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) {
+ if (test_and_clear_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE,
+ &tgt->flags)) {
+ BNX2FC_TGT_DBG(tgt, "ctx_alloc_failure, "
+ "retry ofld..%d\n", i++);
+ msleep_interruptible(1000);
+ if (i > 3) {
+ i = 0;
+ goto ofld_err;
+ }
+ goto retry_ofld;
+ }
+ goto ofld_err;
+ }
+ if (bnx2fc_map_doorbell(tgt)) {
+ printk(KERN_ERR PFX "map doorbell failed - no mem\n");
+ /* upload will take care of cleaning up sess resc */
+ lport->tt.rport_logoff(rdata);
+ }
+ return;
+
+ofld_err:
+ /* couldn't offload the session. log off from this rport */
+ BNX2FC_TGT_DBG(tgt, "bnx2fc_offload_session - offload error\n");
+ lport->tt.rport_logoff(rdata);
+ /* Free session resources */
+ bnx2fc_free_session_resc(hba, tgt);
+ if (tgt->fcoe_conn_id != -1)
+ bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
+}
+
+void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
+{
+ struct bnx2fc_cmd *io_req;
+ struct list_head *list;
+ struct list_head *tmp;
+ int rc;
+ int i = 0;
+ BNX2FC_TGT_DBG(tgt, "Entered flush_active_ios - %d\n",
+ tgt->num_active_ios.counter);
+
+ spin_lock_bh(&tgt->tgt_lock);
+ tgt->flush_in_prog = 1;
+
+ list_for_each_safe(list, tmp, &tgt->active_cmd_queue) {
+ i++;
+ io_req = (struct bnx2fc_cmd *)list;
+ list_del_init(&io_req->link);
+ io_req->on_active_queue = 0;
+ BNX2FC_IO_DBG(io_req, "cmd_queue cleanup\n");
+
+ if (cancel_delayed_work(&io_req->timeout_work)) {
+ if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
+ &io_req->req_flags)) {
+ /* Handle eh_abort timeout */
+ BNX2FC_IO_DBG(io_req, "eh_abort for IO "
+ "cleaned up\n");
+ complete(&io_req->tm_done);
+ }
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+ }
+
+ set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags);
+ set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags);
+ rc = bnx2fc_initiate_cleanup(io_req);
+ BUG_ON(rc);
+ }
+
+ list_for_each_safe(list, tmp, &tgt->els_queue) {
+ i++;
+ io_req = (struct bnx2fc_cmd *)list;
+ list_del_init(&io_req->link);
+ io_req->on_active_queue = 0;
+
+ BNX2FC_IO_DBG(io_req, "els_queue cleanup\n");
+
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+
+ if ((io_req->cb_func) && (io_req->cb_arg)) {
+ io_req->cb_func(io_req->cb_arg);
+ io_req->cb_arg = NULL;
+ }
+
+ rc = bnx2fc_initiate_cleanup(io_req);
+ BUG_ON(rc);
+ }
+
+ list_for_each_safe(list, tmp, &tgt->io_retire_queue) {
+ i++;
+ io_req = (struct bnx2fc_cmd *)list;
+ list_del_init(&io_req->link);
+
+ BNX2FC_IO_DBG(io_req, "retire_queue flush\n");
+
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+
+ clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
+ }
+
+ BNX2FC_TGT_DBG(tgt, "IOs flushed = %d\n", i);
+ i = 0;
+ spin_unlock_bh(&tgt->tgt_lock);
+ /* wait for active_ios to go to 0 */
+ while ((tgt->num_active_ios.counter != 0) && (i++ < BNX2FC_WAIT_CNT))
+ msleep(25);
+ if (tgt->num_active_ios.counter != 0)
+ printk(KERN_ERR PFX "CLEANUP on port 0x%x:"
+ " active_ios = %d\n",
+ tgt->rdata->ids.port_id, tgt->num_active_ios.counter);
+ spin_lock_bh(&tgt->tgt_lock);
+ tgt->flush_in_prog = 0;
+ spin_unlock_bh(&tgt->tgt_lock);
+}
+
+static void bnx2fc_upload_session(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt)
+{
+ struct bnx2fc_hba *hba = port->priv;
+
+ BNX2FC_TGT_DBG(tgt, "upload_session: active_ios = %d\n",
+ tgt->num_active_ios.counter);
+
+ /*
+ * Called with hba->hba_mutex held.
+ * This is a blocking call
+ */
+ clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
+ bnx2fc_send_session_disable_req(port, tgt);
+
+ /*
+ * wait for upload to complete. 3 Secs
+ * should be sufficient time for this process to complete.
+ */
+ setup_timer(&tgt->upld_timer, bnx2fc_upld_timer, (unsigned long)tgt);
+ mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
+
+ BNX2FC_TGT_DBG(tgt, "waiting for disable compl\n");
+ wait_event_interruptible(tgt->upld_wait,
+ (test_bit(
+ BNX2FC_FLAG_UPLD_REQ_COMPL,
+ &tgt->flags)));
+
+ if (signal_pending(current))
+ flush_signals(current);
+
+ del_timer_sync(&tgt->upld_timer);
+
+ /*
+ * traverse thru the active_q and tmf_q and cleanup
+ * IOs in these lists
+ */
+ BNX2FC_TGT_DBG(tgt, "flush/upload - disable wait flags = 0x%lx\n",
+ tgt->flags);
+ bnx2fc_flush_active_ios(tgt);
+
+ /* Issue destroy KWQE */
+ if (test_bit(BNX2FC_FLAG_DISABLED, &tgt->flags)) {
+ BNX2FC_TGT_DBG(tgt, "send destroy req\n");
+ clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
+ bnx2fc_send_session_destroy_req(hba, tgt);
+
+ /* wait for destroy to complete */
+ setup_timer(&tgt->upld_timer,
+ bnx2fc_upld_timer, (unsigned long)tgt);
+ mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
+
+ wait_event_interruptible(tgt->upld_wait,
+ (test_bit(
+ BNX2FC_FLAG_UPLD_REQ_COMPL,
+ &tgt->flags)));
+
+ if (!(test_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags)))
+ printk(KERN_ERR PFX "ERROR!! destroy timed out\n");
+
+ BNX2FC_TGT_DBG(tgt, "destroy wait complete flags = 0x%lx\n",
+ tgt->flags);
+ if (signal_pending(current))
+ flush_signals(current);
+
+ del_timer_sync(&tgt->upld_timer);
+
+ } else
+ printk(KERN_ERR PFX "ERROR!! DISABLE req timed out, destroy"
+ " not sent to FW\n");
+
+ /* Free session resources */
+ spin_lock_bh(&tgt->cq_lock);
+ bnx2fc_free_session_resc(hba, tgt);
+ bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
+ spin_unlock_bh(&tgt->cq_lock);
+}
+
+static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
+ struct fcoe_port *port,
+ struct fc_rport_priv *rdata)
+{
+
+ struct fc_rport *rport = rdata->rport;
+ struct bnx2fc_hba *hba = port->priv;
+
+ tgt->rport = rport;
+ tgt->rdata = rdata;
+ tgt->port = port;
+
+ if (hba->num_ofld_sess >= BNX2FC_NUM_MAX_SESS) {
+ BNX2FC_TGT_DBG(tgt, "exceeded max sessions. logoff this tgt\n");
+ tgt->fcoe_conn_id = -1;
+ return -1;
+ }
+
+ tgt->fcoe_conn_id = bnx2fc_alloc_conn_id(hba, tgt);
+ if (tgt->fcoe_conn_id == -1)
+ return -1;
+
+ BNX2FC_TGT_DBG(tgt, "init_tgt - conn_id = 0x%x\n", tgt->fcoe_conn_id);
+
+ tgt->max_sqes = BNX2FC_SQ_WQES_MAX;
+ tgt->max_rqes = BNX2FC_RQ_WQES_MAX;
+ tgt->max_cqes = BNX2FC_CQ_WQES_MAX;
+
+ /* Initialize the toggle bit */
+ tgt->sq_curr_toggle_bit = 1;
+ tgt->cq_curr_toggle_bit = 1;
+ tgt->sq_prod_idx = 0;
+ tgt->cq_cons_idx = 0;
+ tgt->rq_prod_idx = 0x8000;
+ tgt->rq_cons_idx = 0;
+ atomic_set(&tgt->num_active_ios, 0);
+
+ tgt->work_time_slice = 2;
+
+ spin_lock_init(&tgt->tgt_lock);
+ spin_lock_init(&tgt->cq_lock);
+
+ /* Initialize active_cmd_queue list */
+ INIT_LIST_HEAD(&tgt->active_cmd_queue);
+
+ /* Initialize IO retire queue */
+ INIT_LIST_HEAD(&tgt->io_retire_queue);
+
+ INIT_LIST_HEAD(&tgt->els_queue);
+
+ /* Initialize active_tm_queue list */
+ INIT_LIST_HEAD(&tgt->active_tm_queue);
+
+ init_waitqueue_head(&tgt->ofld_wait);
+ init_waitqueue_head(&tgt->upld_wait);
+
+ return 0;
+}
+
+/**
+ * This event_callback is called after successful completion of libfc
+ * initiated target login. bnx2fc can proceed with initiating the session
+ * establishment.
+ */
+void bnx2fc_rport_event_handler(struct fc_lport *lport,
+ struct fc_rport_priv *rdata,
+ enum fc_rport_event event)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+ struct fc_rport *rport = rdata->rport;
+ struct fc_rport_libfc_priv *rp;
+ struct bnx2fc_rport *tgt;
+ u32 port_id;
+
+ BNX2FC_HBA_DBG(lport, "rport_event_hdlr: event = %d, port_id = 0x%x\n",
+ event, rdata->ids.port_id);
+ switch (event) {
+ case RPORT_EV_READY:
+ if (!rport) {
+ printk(KERN_ALERT PFX "rport is NULL: ERROR!\n");
+ break;
+ }
+
+ rp = rport->dd_data;
+ if (rport->port_id == FC_FID_DIR_SERV) {
+ /*
+ * bnx2fc_rport structure doesnt exist for
+ * directory server.
+ * We should not come here, as lport will
+ * take care of fabric login
+ */
+ printk(KERN_ALERT PFX "%x - rport_event_handler ERROR\n",
+ rdata->ids.port_id);
+ break;
+ }
+
+ if (rdata->spp_type != FC_TYPE_FCP) {
+ BNX2FC_HBA_DBG(lport, "not FCP type target."
+ " not offloading\n");
+ break;
+ }
+ if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
+ BNX2FC_HBA_DBG(lport, "not FCP_TARGET"
+ " not offloading\n");
+ break;
+ }
+
+ /*
+ * Offlaod process is protected with hba mutex.
+ * Use the same mutex_lock for upload process too
+ */
+ mutex_lock(&hba->hba_mutex);
+ tgt = (struct bnx2fc_rport *)&rp[1];
+
+ /* This can happen when ADISC finds the same target */
+ if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) {
+ BNX2FC_TGT_DBG(tgt, "already offloaded\n");
+ mutex_unlock(&hba->hba_mutex);
+ return;
+ }
+
+ /*
+ * Offload the session. This is a blocking call, and will
+ * wait until the session is offloaded.
+ */
+ bnx2fc_offload_session(port, tgt, rdata);
+
+ BNX2FC_TGT_DBG(tgt, "OFFLOAD num_ofld_sess = %d\n",
+ hba->num_ofld_sess);
+
+ if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) {
+ /*
+ * Session is offloaded and enabled. Map
+ * doorbell register for this target
+ */
+ BNX2FC_TGT_DBG(tgt, "sess offloaded\n");
+ /* This counter is protected with hba mutex */
+ hba->num_ofld_sess++;
+
+ set_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
+ } else {
+ /*
+ * Offload or enable would have failed.
+ * In offload/enable completion path, the
+ * rport would have already been removed
+ */
+ BNX2FC_TGT_DBG(tgt, "Port is being logged off as "
+ "offloaded flag not set\n");
+ }
+ mutex_unlock(&hba->hba_mutex);
+ break;
+ case RPORT_EV_LOGO:
+ case RPORT_EV_FAILED:
+ case RPORT_EV_STOP:
+ port_id = rdata->ids.port_id;
+ if (port_id == FC_FID_DIR_SERV)
+ break;
+
+ if (!rport) {
+ printk(KERN_ALERT PFX "%x - rport not created Yet!!\n",
+ port_id);
+ break;
+ }
+ rp = rport->dd_data;
+ mutex_lock(&hba->hba_mutex);
+ /*
+ * Perform session upload. Note that rdata->peers is already
+ * removed from disc->rports list before we get this event.
+ */
+ tgt = (struct bnx2fc_rport *)&rp[1];
+
+ if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) {
+ mutex_unlock(&hba->hba_mutex);
+ break;
+ }
+ clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
+
+ bnx2fc_upload_session(port, tgt);
+ hba->num_ofld_sess--;
+ BNX2FC_TGT_DBG(tgt, "UPLOAD num_ofld_sess = %d\n",
+ hba->num_ofld_sess);
+ /*
+ * Try to wake up the linkdown wait thread. If num_ofld_sess
+ * is 0, the waiting therad wakes up
+ */
+ if ((hba->wait_for_link_down) &&
+ (hba->num_ofld_sess == 0)) {
+ wake_up_interruptible(&hba->shutdown_wait);
+ }
+ if (test_bit(BNX2FC_FLAG_EXPL_LOGO, &tgt->flags)) {
+ printk(KERN_ERR PFX "Relogin to the tgt\n");
+ mutex_lock(&lport->disc.disc_mutex);
+ lport->tt.rport_login(rdata);
+ mutex_unlock(&lport->disc.disc_mutex);
+ }
+ mutex_unlock(&hba->hba_mutex);
+
+ break;
+
+ case RPORT_EV_NONE:
+ break;
+ }
+}
+
+/**
+ * bnx2fc_tgt_lookup() - Lookup a bnx2fc_rport by port_id
+ *
+ * @port: fcoe_port struct to lookup the target port on
+ * @port_id: The remote port ID to look up
+ */
+struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
+ u32 port_id)
+{
+ struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_rport *tgt;
+ struct fc_rport_priv *rdata;
+ int i;
+
+ for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
+ tgt = hba->tgt_ofld_list[i];
+ if ((tgt) && (tgt->port == port)) {
+ rdata = tgt->rdata;
+ if (rdata->ids.port_id == port_id) {
+ if (rdata->rp_state != RPORT_ST_DELETE) {
+ BNX2FC_TGT_DBG(tgt, "rport "
+ "obtained\n");
+ return tgt;
+ } else {
+ printk(KERN_ERR PFX "rport 0x%x "
+ "is in DELETED state\n",
+ rdata->ids.port_id);
+ return NULL;
+ }
+ }
+ }
+ }
+ return NULL;
+}
+
+
+/**
+ * bnx2fc_alloc_conn_id - allocates FCOE Connection id
+ *
+ * @hba: pointer to adapter structure
+ * @tgt: pointer to bnx2fc_rport structure
+ */
+static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt)
+{
+ u32 conn_id, next;
+
+ /* called with hba mutex held */
+
+ /*
+ * tgt_ofld_list access is synchronized using
+ * both hba mutex and hba lock. Atleast hba mutex or
+ * hba lock needs to be held for read access.
+ */
+
+ spin_lock_bh(&hba->hba_lock);
+ next = hba->next_conn_id;
+ conn_id = hba->next_conn_id++;
+ if (hba->next_conn_id == BNX2FC_NUM_MAX_SESS)
+ hba->next_conn_id = 0;
+
+ while (hba->tgt_ofld_list[conn_id] != NULL) {
+ conn_id++;
+ if (conn_id == BNX2FC_NUM_MAX_SESS)
+ conn_id = 0;
+
+ if (conn_id == next) {
+ /* No free conn_ids are available */
+ spin_unlock_bh(&hba->hba_lock);
+ return -1;
+ }
+ }
+ hba->tgt_ofld_list[conn_id] = tgt;
+ tgt->fcoe_conn_id = conn_id;
+ spin_unlock_bh(&hba->hba_lock);
+ return conn_id;
+}
+
+static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id)
+{
+ /* called with hba mutex held */
+ spin_lock_bh(&hba->hba_lock);
+ hba->tgt_ofld_list[conn_id] = NULL;
+ hba->next_conn_id = conn_id;
+ spin_unlock_bh(&hba->hba_lock);
+}
+
+/**
+ *bnx2fc_alloc_session_resc - Allocate qp resources for the session
+ *
+ */
+static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt)
+{
+ dma_addr_t page;
+ int num_pages;
+ u32 *pbl;
+
+ /* Allocate and map SQ */
+ tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE;
+ tgt->sq_mem_size = (tgt->sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+ tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
+ &tgt->sq_dma, GFP_KERNEL);
+ if (!tgt->sq) {
+ printk(KERN_ALERT PFX "unable to allocate SQ memory %d\n",
+ tgt->sq_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->sq, 0, tgt->sq_mem_size);
+
+ /* Allocate and map CQ */
+ tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE;
+ tgt->cq_mem_size = (tgt->cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+ tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
+ &tgt->cq_dma, GFP_KERNEL);
+ if (!tgt->cq) {
+ printk(KERN_ALERT PFX "unable to allocate CQ memory %d\n",
+ tgt->cq_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->cq, 0, tgt->cq_mem_size);
+
+ /* Allocate and map RQ and RQ PBL */
+ tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE;
+ tgt->rq_mem_size = (tgt->rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+ tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
+ &tgt->rq_dma, GFP_KERNEL);
+ if (!tgt->rq) {
+ printk(KERN_ALERT PFX "unable to allocate RQ memory %d\n",
+ tgt->rq_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->rq, 0, tgt->rq_mem_size);
+
+ tgt->rq_pbl_size = (tgt->rq_mem_size / PAGE_SIZE) * sizeof(void *);
+ tgt->rq_pbl_size = (tgt->rq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+ tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
+ &tgt->rq_pbl_dma, GFP_KERNEL);
+ if (!tgt->rq_pbl) {
+ printk(KERN_ALERT PFX "unable to allocate RQ PBL %d\n",
+ tgt->rq_pbl_size);
+ goto mem_alloc_failure;
+ }
+
+ memset(tgt->rq_pbl, 0, tgt->rq_pbl_size);
+ num_pages = tgt->rq_mem_size / PAGE_SIZE;
+ page = tgt->rq_dma;
+ pbl = (u32 *)tgt->rq_pbl;
+
+ while (num_pages--) {
+ *pbl = (u32)page;
+ pbl++;
+ *pbl = (u32)((u64)page >> 32);
+ pbl++;
+ page += PAGE_SIZE;
+ }
+
+ /* Allocate and map XFERQ */
+ tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE;
+ tgt->xferq_mem_size = (tgt->xferq_mem_size + (PAGE_SIZE - 1)) &
+ PAGE_MASK;
+
+ tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
+ &tgt->xferq_dma, GFP_KERNEL);
+ if (!tgt->xferq) {
+ printk(KERN_ALERT PFX "unable to allocate XFERQ %d\n",
+ tgt->xferq_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->xferq, 0, tgt->xferq_mem_size);
+
+ /* Allocate and map CONFQ & CONFQ PBL */
+ tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE;
+ tgt->confq_mem_size = (tgt->confq_mem_size + (PAGE_SIZE - 1)) &
+ PAGE_MASK;
+
+ tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
+ &tgt->confq_dma, GFP_KERNEL);
+ if (!tgt->confq) {
+ printk(KERN_ALERT PFX "unable to allocate CONFQ %d\n",
+ tgt->confq_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->confq, 0, tgt->confq_mem_size);
+
+ tgt->confq_pbl_size =
+ (tgt->confq_mem_size / PAGE_SIZE) * sizeof(void *);
+ tgt->confq_pbl_size =
+ (tgt->confq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+ tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
+ tgt->confq_pbl_size,
+ &tgt->confq_pbl_dma, GFP_KERNEL);
+ if (!tgt->confq_pbl) {
+ printk(KERN_ALERT PFX "unable to allocate CONFQ PBL %d\n",
+ tgt->confq_pbl_size);
+ goto mem_alloc_failure;
+ }
+
+ memset(tgt->confq_pbl, 0, tgt->confq_pbl_size);
+ num_pages = tgt->confq_mem_size / PAGE_SIZE;
+ page = tgt->confq_dma;
+ pbl = (u32 *)tgt->confq_pbl;
+
+ while (num_pages--) {
+ *pbl = (u32)page;
+ pbl++;
+ *pbl = (u32)((u64)page >> 32);
+ pbl++;
+ page += PAGE_SIZE;
+ }
+
+ /* Allocate and map ConnDB */
+ tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db);
+
+ tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev,
+ tgt->conn_db_mem_size,
+ &tgt->conn_db_dma, GFP_KERNEL);
+ if (!tgt->conn_db) {
+ printk(KERN_ALERT PFX "unable to allocate conn_db %d\n",
+ tgt->conn_db_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->conn_db, 0, tgt->conn_db_mem_size);
+
+
+ /* Allocate and map LCQ */
+ tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE;
+ tgt->lcq_mem_size = (tgt->lcq_mem_size + (PAGE_SIZE - 1)) &
+ PAGE_MASK;
+
+ tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
+ &tgt->lcq_dma, GFP_KERNEL);
+
+ if (!tgt->lcq) {
+ printk(KERN_ALERT PFX "unable to allocate lcq %d\n",
+ tgt->lcq_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->lcq, 0, tgt->lcq_mem_size);
+
+ /* Arm CQ */
+ tgt->conn_db->cq_arm.lo = -1;
+ tgt->conn_db->rq_prod = 0x8000;
+
+ return 0;
+
+mem_alloc_failure:
+ bnx2fc_free_session_resc(hba, tgt);
+ bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
+ return -ENOMEM;
+}
+
+/**
+ * bnx2i_free_session_resc - free qp resources for the session
+ *
+ * @hba: adapter structure pointer
+ * @tgt: bnx2fc_rport structure pointer
+ *
+ * Free QP resources - SQ/RQ/CQ/XFERQ memory and PBL
+ */
+static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt)
+{
+ BNX2FC_TGT_DBG(tgt, "Freeing up session resources\n");
+
+ if (tgt->ctx_base) {
+ iounmap(tgt->ctx_base);
+ tgt->ctx_base = NULL;
+ }
+ /* Free LCQ */
+ if (tgt->lcq) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
+ tgt->lcq, tgt->lcq_dma);
+ tgt->lcq = NULL;
+ }
+ /* Free connDB */
+ if (tgt->conn_db) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->conn_db_mem_size,
+ tgt->conn_db, tgt->conn_db_dma);
+ tgt->conn_db = NULL;
+ }
+ /* Free confq and confq pbl */
+ if (tgt->confq_pbl) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->confq_pbl_size,
+ tgt->confq_pbl, tgt->confq_pbl_dma);
+ tgt->confq_pbl = NULL;
+ }
+ if (tgt->confq) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
+ tgt->confq, tgt->confq_dma);
+ tgt->confq = NULL;
+ }
+ /* Free XFERQ */
+ if (tgt->xferq) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
+ tgt->xferq, tgt->xferq_dma);
+ tgt->xferq = NULL;
+ }
+ /* Free RQ PBL and RQ */
+ if (tgt->rq_pbl) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
+ tgt->rq_pbl, tgt->rq_pbl_dma);
+ tgt->rq_pbl = NULL;
+ }
+ if (tgt->rq) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
+ tgt->rq, tgt->rq_dma);
+ tgt->rq = NULL;
+ }
+ /* Free CQ */
+ if (tgt->cq) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
+ tgt->cq, tgt->cq_dma);
+ tgt->cq = NULL;
+ }
+ /* Free SQ */
+ if (tgt->sq) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
+ tgt->sq, tgt->sq_dma);
+ tgt->sq = NULL;
+ }
+}
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index e1ca5fe7e6bb..cfd59023227b 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -360,7 +360,7 @@ struct bnx2i_hba {
#define ADAPTER_STATE_LINK_DOWN 2
#define ADAPTER_STATE_INIT_FAILED 31
unsigned int mtu_supported;
- #define BNX2I_MAX_MTU_SUPPORTED 1500
+ #define BNX2I_MAX_MTU_SUPPORTED 9000
struct Scsi_Host *shost;
@@ -751,6 +751,8 @@ extern int bnx2i_send_iscsi_login(struct bnx2i_conn *conn,
struct iscsi_task *mtask);
extern int bnx2i_send_iscsi_tmf(struct bnx2i_conn *conn,
struct iscsi_task *mtask);
+extern int bnx2i_send_iscsi_text(struct bnx2i_conn *conn,
+ struct iscsi_task *mtask);
extern int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn,
struct bnx2i_cmd *cmnd);
extern int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn,
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 96505e3ab986..1da34c019b8a 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -445,6 +445,56 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
}
/**
+ * bnx2i_send_iscsi_text - post iSCSI text WQE to hardware
+ * @conn: iscsi connection
+ * @mtask: driver command structure which is requesting
+ * a WQE to sent to chip for further processing
+ *
+ * prepare and post an iSCSI Text request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_text(struct bnx2i_conn *bnx2i_conn,
+ struct iscsi_task *mtask)
+{
+ struct bnx2i_cmd *bnx2i_cmd;
+ struct bnx2i_text_request *text_wqe;
+ struct iscsi_text *text_hdr;
+ u32 dword;
+
+ bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
+ text_hdr = (struct iscsi_text *)mtask->hdr;
+ text_wqe = (struct bnx2i_text_request *) bnx2i_conn->ep->qp.sq_prod_qe;
+
+ memset(text_wqe, 0, sizeof(struct bnx2i_text_request));
+
+ text_wqe->op_code = text_hdr->opcode;
+ text_wqe->op_attr = text_hdr->flags;
+ text_wqe->data_length = ntoh24(text_hdr->dlength);
+ text_wqe->itt = mtask->itt |
+ (ISCSI_TASK_TYPE_MPATH << ISCSI_TEXT_REQUEST_TYPE_SHIFT);
+ text_wqe->ttt = be32_to_cpu(text_hdr->ttt);
+
+ text_wqe->cmd_sn = be32_to_cpu(text_hdr->cmdsn);
+
+ text_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma;
+ text_wqe->resp_bd_list_addr_hi =
+ (u32) ((u64) bnx2i_conn->gen_pdu.resp_bd_dma >> 32);
+
+ dword = ((1 << ISCSI_TEXT_REQUEST_NUM_RESP_BDS_SHIFT) |
+ (bnx2i_conn->gen_pdu.resp_buf_size <<
+ ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH_SHIFT));
+ text_wqe->resp_buffer = dword;
+ text_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma;
+ text_wqe->bd_list_addr_hi =
+ (u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32);
+ text_wqe->num_bds = 1;
+ text_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+ bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+ return 0;
+}
+
+
+/**
* bnx2i_send_iscsi_scsicmd - post iSCSI scsicmd request WQE to hardware
* @conn: iscsi connection
* @cmd: driver command structure which is requesting
@@ -490,15 +540,18 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
nopout_hdr = (struct iscsi_nopout *)task->hdr;
nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe;
+
+ memset(nopout_wqe, 0x00, sizeof(struct bnx2i_nop_out_request));
+
nopout_wqe->op_code = nopout_hdr->opcode;
nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL;
memcpy(nopout_wqe->lun, nopout_hdr->lun, 8);
if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
- u32 tmp = nopout_hdr->lun[0];
+ u32 tmp = nopout_wqe->lun[0];
/* 57710 requires LUN field to be swapped */
- nopout_hdr->lun[0] = nopout_hdr->lun[1];
- nopout_hdr->lun[1] = tmp;
+ nopout_wqe->lun[0] = nopout_wqe->lun[1];
+ nopout_wqe->lun[1] = tmp;
}
nopout_wqe->itt = ((u16)task->itt |
@@ -1425,6 +1478,68 @@ done:
return 0;
}
+
+/**
+ * bnx2i_process_text_resp - this function handles iscsi text response
+ * @session: iscsi session pointer
+ * @bnx2i_conn: iscsi connection pointer
+ * @cqe: pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI Text Response CQE& complete it to open-iscsi user daemon
+ */
+static int bnx2i_process_text_resp(struct iscsi_session *session,
+ struct bnx2i_conn *bnx2i_conn,
+ struct cqe *cqe)
+{
+ struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+ struct iscsi_task *task;
+ struct bnx2i_text_response *text;
+ struct iscsi_text_rsp *resp_hdr;
+ int pld_len;
+ int pad_len;
+
+ text = (struct bnx2i_text_response *) cqe;
+ spin_lock(&session->lock);
+ task = iscsi_itt_to_task(conn, text->itt & ISCSI_LOGIN_RESPONSE_INDEX);
+ if (!task)
+ goto done;
+
+ resp_hdr = (struct iscsi_text_rsp *)&bnx2i_conn->gen_pdu.resp_hdr;
+ memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+ resp_hdr->opcode = text->op_code;
+ resp_hdr->flags = text->response_flags;
+ resp_hdr->hlength = 0;
+
+ hton24(resp_hdr->dlength, text->data_length);
+ resp_hdr->itt = task->hdr->itt;
+ resp_hdr->ttt = cpu_to_be32(text->ttt);
+ resp_hdr->statsn = task->hdr->exp_statsn;
+ resp_hdr->exp_cmdsn = cpu_to_be32(text->exp_cmd_sn);
+ resp_hdr->max_cmdsn = cpu_to_be32(text->max_cmd_sn);
+ pld_len = text->data_length;
+ bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf +
+ pld_len;
+ pad_len = 0;
+ if (pld_len & 0x3)
+ pad_len = 4 - (pld_len % 4);
+
+ if (pad_len) {
+ int i = 0;
+ for (i = 0; i < pad_len; i++) {
+ bnx2i_conn->gen_pdu.resp_wr_ptr[0] = 0;
+ bnx2i_conn->gen_pdu.resp_wr_ptr++;
+ }
+ }
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr,
+ bnx2i_conn->gen_pdu.resp_buf,
+ bnx2i_conn->gen_pdu.resp_wr_ptr -
+ bnx2i_conn->gen_pdu.resp_buf);
+done:
+ spin_unlock(&session->lock);
+ return 0;
+}
+
+
/**
* bnx2i_process_tmf_resp - this function handles iscsi TMF response
* @session: iscsi session pointer
@@ -1766,6 +1881,10 @@ static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
bnx2i_process_tmf_resp(session, bnx2i_conn,
qp->cq_cons_qe);
break;
+ case ISCSI_OP_TEXT_RSP:
+ bnx2i_process_text_resp(session, bnx2i_conn,
+ qp->cq_cons_qe);
+ break;
case ISCSI_OP_LOGOUT_RSP:
bnx2i_process_logout_resp(session, bnx2i_conn,
qp->cq_cons_qe);
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 72a7b2d4a439..1d24a2819736 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -18,8 +18,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
static u32 adapter_count;
#define DRV_MODULE_NAME "bnx2i"
-#define DRV_MODULE_VERSION "2.6.2.2"
-#define DRV_MODULE_RELDATE "Nov 23, 2010"
+#define DRV_MODULE_VERSION "2.6.2.3"
+#define DRV_MODULE_RELDATE "Dec 31, 2010"
static char version[] __devinitdata =
"Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
@@ -29,7 +29,7 @@ static char version[] __devinitdata =
MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com> and "
"Eddie Wai <eddie.wai@broadcom.com>");
-MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/57710/57711"
+MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/57710/57711/57712"
" iSCSI Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
@@ -88,9 +88,11 @@ void bnx2i_identify_device(struct bnx2i_hba *hba)
(hba->pci_did == PCI_DEVICE_ID_NX2_5709S)) {
set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
- } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57711 ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57711E)
+ } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_57711 ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_57711E ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_57712 ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_57712E)
set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
else
printk(KERN_ALERT "bnx2i: unknown device, 0x%x\n",
@@ -161,6 +163,21 @@ void bnx2i_start(void *handle)
struct bnx2i_hba *hba = handle;
int i = HZ;
+ if (!hba->cnic->max_iscsi_conn) {
+ printk(KERN_ALERT "bnx2i: dev %s does not support "
+ "iSCSI\n", hba->netdev->name);
+
+ if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+ mutex_lock(&bnx2i_dev_lock);
+ list_del_init(&hba->link);
+ adapter_count--;
+ hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
+ clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+ mutex_unlock(&bnx2i_dev_lock);
+ bnx2i_free_hba(hba);
+ }
+ return;
+ }
bnx2i_send_fw_iscsi_init_msg(hba);
while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
msleep(BNX2I_INIT_POLL_TIME);
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index f0dce26593eb..1809f9ccc4ce 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1092,6 +1092,9 @@ static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task)
case ISCSI_OP_SCSI_TMFUNC:
rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task);
break;
+ case ISCSI_OP_TEXT:
+ rc = bnx2i_send_iscsi_text(bnx2i_conn, task);
+ break;
default:
iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
"send_gen: unsupported op 0x%x\n",
@@ -1455,42 +1458,40 @@ static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn)
/**
- * bnx2i_conn_get_param - return iscsi connection parameter to caller
- * @cls_conn: pointer to iscsi cls conn
+ * bnx2i_ep_get_param - return iscsi ep parameter to caller
+ * @ep: pointer to iscsi endpoint
* @param: parameter type identifier
* @buf: buffer pointer
*
- * returns iSCSI connection parameters
+ * returns iSCSI ep parameters
*/
-static int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn,
- enum iscsi_param param, char *buf)
+static int bnx2i_ep_get_param(struct iscsi_endpoint *ep,
+ enum iscsi_param param, char *buf)
{
- struct iscsi_conn *conn = cls_conn->dd_data;
- struct bnx2i_conn *bnx2i_conn = conn->dd_data;
- int len = 0;
+ struct bnx2i_endpoint *bnx2i_ep = ep->dd_data;
+ struct bnx2i_hba *hba = bnx2i_ep->hba;
+ int len = -ENOTCONN;
- if (!(bnx2i_conn && bnx2i_conn->ep && bnx2i_conn->ep->hba))
- goto out;
+ if (!hba)
+ return -ENOTCONN;
switch (param) {
case ISCSI_PARAM_CONN_PORT:
- mutex_lock(&bnx2i_conn->ep->hba->net_dev_lock);
- if (bnx2i_conn->ep->cm_sk)
- len = sprintf(buf, "%hu\n",
- bnx2i_conn->ep->cm_sk->dst_port);
- mutex_unlock(&bnx2i_conn->ep->hba->net_dev_lock);
+ mutex_lock(&hba->net_dev_lock);
+ if (bnx2i_ep->cm_sk)
+ len = sprintf(buf, "%hu\n", bnx2i_ep->cm_sk->dst_port);
+ mutex_unlock(&hba->net_dev_lock);
break;
case ISCSI_PARAM_CONN_ADDRESS:
- mutex_lock(&bnx2i_conn->ep->hba->net_dev_lock);
- if (bnx2i_conn->ep->cm_sk)
- len = sprintf(buf, "%pI4\n",
- &bnx2i_conn->ep->cm_sk->dst_ip);
- mutex_unlock(&bnx2i_conn->ep->hba->net_dev_lock);
+ mutex_lock(&hba->net_dev_lock);
+ if (bnx2i_ep->cm_sk)
+ len = sprintf(buf, "%pI4\n", &bnx2i_ep->cm_sk->dst_ip);
+ mutex_unlock(&hba->net_dev_lock);
break;
default:
- return iscsi_conn_get_param(cls_conn, param, buf);
+ return -ENOSYS;
}
-out:
+
return len;
}
@@ -1935,13 +1936,13 @@ static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep)
cnic_dev_10g = 1;
switch (bnx2i_ep->state) {
- case EP_STATE_CONNECT_FAILED:
case EP_STATE_CLEANUP_FAILED:
case EP_STATE_OFLD_FAILED:
case EP_STATE_DISCONN_TIMEDOUT:
ret = 0;
break;
case EP_STATE_CONNECT_START:
+ case EP_STATE_CONNECT_FAILED:
case EP_STATE_CONNECT_COMPL:
case EP_STATE_ULP_UPDATE_START:
case EP_STATE_ULP_UPDATE_COMPL:
@@ -2167,7 +2168,8 @@ struct iscsi_transport bnx2i_iscsi_transport = {
.name = "bnx2i",
.caps = CAP_RECOVERY_L0 | CAP_HDRDGST |
CAP_MULTI_R2T | CAP_DATADGST |
- CAP_DATA_PATH_OFFLOAD,
+ CAP_DATA_PATH_OFFLOAD |
+ CAP_TEXT_NEGO,
.param_mask = ISCSI_MAX_RECV_DLENGTH |
ISCSI_MAX_XMIT_DLENGTH |
ISCSI_HDRDGST_EN |
@@ -2200,7 +2202,7 @@ struct iscsi_transport bnx2i_iscsi_transport = {
.bind_conn = bnx2i_conn_bind,
.destroy_conn = bnx2i_conn_destroy,
.set_param = iscsi_set_param,
- .get_conn_param = bnx2i_conn_get_param,
+ .get_conn_param = iscsi_conn_get_param,
.get_session_param = iscsi_session_get_param,
.get_host_param = bnx2i_host_get_param,
.start_conn = bnx2i_conn_start,
@@ -2209,6 +2211,7 @@ struct iscsi_transport bnx2i_iscsi_transport = {
.xmit_task = bnx2i_task_xmit,
.get_stats = bnx2i_conn_get_stats,
/* TCP connect - disconnect - option-2 interface calls */
+ .get_ep_param = bnx2i_ep_get_param,
.ep_connect = bnx2i_ep_connect,
.ep_poll = bnx2i_ep_poll,
.ep_disconnect = bnx2i_ep_disconnect,
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index a129a170b47b..fc2cdb62f53b 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -105,7 +105,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = {
/* owner and name should be set already */
.caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
| CAP_DATADGST | CAP_DIGEST_OFFLOAD |
- CAP_PADDING_OFFLOAD,
+ CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
.param_mask = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH |
ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN |
ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T |
@@ -137,7 +137,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = {
.destroy_conn = iscsi_tcp_conn_teardown,
.start_conn = iscsi_conn_start,
.stop_conn = iscsi_conn_stop,
- .get_conn_param = cxgbi_get_conn_param,
+ .get_conn_param = iscsi_conn_get_param,
.set_param = cxgbi_set_conn_param,
.get_stats = cxgbi_get_conn_stats,
/* pdu xmit req from user space */
@@ -152,6 +152,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = {
.xmit_pdu = cxgbi_conn_xmit_pdu,
.parse_pdu_itt = cxgbi_parse_pdu_itt,
/* TCP connect/disconnect */
+ .get_ep_param = cxgbi_get_ep_param,
.ep_connect = cxgbi_ep_connect,
.ep_poll = cxgbi_ep_poll,
.ep_disconnect = cxgbi_ep_disconnect,
@@ -1108,10 +1109,11 @@ static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr,
csk, idx, npods, gl);
for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
- struct sk_buff *skb = ddp->gl_skb[idx];
+ struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
+ PPOD_SIZE, 0, GFP_ATOMIC);
- /* hold on to the skb until we clear the ddp mapping */
- skb_get(skb);
+ if (!skb)
+ return -ENOMEM;
ulp_mem_io_set_hdr(skb, pm_addr);
cxgbi_ddp_ppod_set((struct cxgbi_pagepod *)(skb->head +
@@ -1136,56 +1138,20 @@ static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag,
cdev, idx, npods, tag);
for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
- struct sk_buff *skb = ddp->gl_skb[idx];
+ struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
+ PPOD_SIZE, 0, GFP_ATOMIC);
if (!skb) {
- pr_err("tag 0x%x, 0x%x, %d/%u, skb NULL.\n",
+ pr_err("tag 0x%x, 0x%x, %d/%u, skb OOM.\n",
tag, idx, i, npods);
continue;
}
- ddp->gl_skb[idx] = NULL;
- memset(skb->head + sizeof(struct ulp_mem_io), 0, PPOD_SIZE);
ulp_mem_io_set_hdr(skb, pm_addr);
skb->priority = CPL_PRIORITY_CONTROL;
cxgb3_ofld_send(cdev->lldev, skb);
}
}
-static void ddp_free_gl_skb(struct cxgbi_ddp_info *ddp, int idx, int cnt)
-{
- int i;
-
- log_debug(1 << CXGBI_DBG_DDP,
- "ddp 0x%p, idx %d, cnt %d.\n", ddp, idx, cnt);
-
- for (i = 0; i < cnt; i++, idx++)
- if (ddp->gl_skb[idx]) {
- kfree_skb(ddp->gl_skb[idx]);
- ddp->gl_skb[idx] = NULL;
- }
-}
-
-static int ddp_alloc_gl_skb(struct cxgbi_ddp_info *ddp, int idx,
- int cnt, gfp_t gfp)
-{
- int i;
-
- log_debug(1 << CXGBI_DBG_DDP,
- "ddp 0x%p, idx %d, cnt %d.\n", ddp, idx, cnt);
-
- for (i = 0; i < cnt; i++) {
- struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
- PPOD_SIZE, 0, gfp);
- if (skb)
- ddp->gl_skb[idx + i] = skb;
- else {
- ddp_free_gl_skb(ddp, idx, i);
- return -ENOMEM;
- }
- }
- return 0;
-}
-
static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
unsigned int tid, int pg_idx, bool reply)
{
@@ -1316,8 +1282,6 @@ static int cxgb3i_ddp_init(struct cxgbi_device *cdev)
}
tdev->ulp_iscsi = ddp;
- cdev->csk_ddp_free_gl_skb = ddp_free_gl_skb;
- cdev->csk_ddp_alloc_gl_skb = ddp_alloc_gl_skb;
cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
cdev->csk_ddp_set = ddp_set_map;
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h
index 5f5e3394b594..20593fd69d8f 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h
@@ -24,10 +24,21 @@
extern cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS];
-#define cxgb3i_get_private_ipv4addr(ndev) \
- (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr)
-#define cxgb3i_set_private_ipv4addr(ndev, addr) \
- (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr) = addr
+static inline unsigned int cxgb3i_get_private_ipv4addr(struct net_device *ndev)
+{
+ return ((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr;
+}
+
+static inline void cxgb3i_set_private_ipv4addr(struct net_device *ndev,
+ unsigned int addr)
+{
+ struct port_info *pi = (struct port_info *)netdev_priv(ndev);
+
+ pi->iscsic.flags = addr ? 1 : 0;
+ pi->iscsi_ipv4addr = addr;
+ if (addr)
+ memcpy(pi->iscsic.mac_addr, ndev->dev_addr, ETH_ALEN);
+}
struct cpl_iscsi_hdr_norss {
union opcode_tid ot;
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 8c04fada710b..f3a4cd7cf782 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -106,7 +106,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
.name = DRV_MODULE_NAME,
.caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST |
CAP_DATADGST | CAP_DIGEST_OFFLOAD |
- CAP_PADDING_OFFLOAD,
+ CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
.param_mask = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH |
ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN |
ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T |
@@ -138,7 +138,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
.destroy_conn = iscsi_tcp_conn_teardown,
.start_conn = iscsi_conn_start,
.stop_conn = iscsi_conn_stop,
- .get_conn_param = cxgbi_get_conn_param,
+ .get_conn_param = iscsi_conn_get_param,
.set_param = cxgbi_set_conn_param,
.get_stats = cxgbi_get_conn_stats,
/* pdu xmit req from user space */
@@ -153,6 +153,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
.xmit_pdu = cxgbi_conn_xmit_pdu,
.parse_pdu_itt = cxgbi_parse_pdu_itt,
/* TCP connect/disconnect */
+ .get_ep_param = cxgbi_get_ep_param,
.ep_connect = cxgbi_ep_connect,
.ep_poll = cxgbi_ep_poll,
.ep_disconnect = cxgbi_ep_disconnect,
@@ -1425,8 +1426,6 @@ static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
cxgbi_ddp_page_size_factor(pgsz_factor);
cxgb4_iscsi_init(lldi->ports[0], tagmask, pgsz_factor);
- cdev->csk_ddp_free_gl_skb = NULL;
- cdev->csk_ddp_alloc_gl_skb = NULL;
cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
cdev->csk_ddp_set = ddp_set_map;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index d2ad3d676724..6c0c4bb4e9e8 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -470,7 +470,7 @@ static struct rtable *find_route_ipv4(__be32 saddr, __be32 daddr,
}
};
- if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
+ if (ip_route_output_flow(&init_net, &rt, &fl, NULL))
return NULL;
return rt;
@@ -543,6 +543,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
csk->dst = dst;
csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr;
csk->daddr.sin_port = daddr->sin_port;
+ csk->daddr.sin_family = daddr->sin_family;
csk->saddr.sin_addr.s_addr = rt->rt_src;
return csk;
@@ -1277,12 +1278,6 @@ static int ddp_tag_reserve(struct cxgbi_sock *csk, unsigned int tid,
return idx;
}
- if (cdev->csk_ddp_alloc_gl_skb) {
- err = cdev->csk_ddp_alloc_gl_skb(ddp, idx, npods, gfp);
- if (err < 0)
- goto unmark_entries;
- }
-
tag = cxgbi_ddp_tag_base(tformat, sw_tag);
tag |= idx << PPOD_IDX_SHIFT;
@@ -1293,11 +1288,8 @@ static int ddp_tag_reserve(struct cxgbi_sock *csk, unsigned int tid,
hdr.page_offset = htonl(gl->offset);
err = cdev->csk_ddp_set(csk, &hdr, idx, npods, gl);
- if (err < 0) {
- if (cdev->csk_ddp_free_gl_skb)
- cdev->csk_ddp_free_gl_skb(ddp, idx, npods);
+ if (err < 0)
goto unmark_entries;
- }
ddp->idx_last = idx;
log_debug(1 << CXGBI_DBG_DDP,
@@ -1363,8 +1355,6 @@ static void ddp_destroy(struct kref *kref)
>> PPOD_PAGES_SHIFT;
pr_info("cdev 0x%p, ddp %d + %d.\n", cdev, i, npods);
kfree(gl);
- if (cdev->csk_ddp_free_gl_skb)
- cdev->csk_ddp_free_gl_skb(ddp, i, npods);
i += npods;
} else
i++;
@@ -1407,8 +1397,6 @@ int cxgbi_ddp_init(struct cxgbi_device *cdev,
return -ENOMEM;
}
ddp->gl_map = (struct cxgbi_gather_list **)(ddp + 1);
- ddp->gl_skb = (struct sk_buff **)(((char *)ddp->gl_map) +
- ppmax * sizeof(struct cxgbi_gather_list *));
cdev->ddp = ddp;
spin_lock_init(&ddp->map_lock);
@@ -1908,13 +1896,16 @@ EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu);
static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
{
- u8 submode = 0;
+ if (hcrc || dcrc) {
+ u8 submode = 0;
- if (hcrc)
- submode |= 1;
- if (dcrc)
- submode |= 2;
- cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode;
+ if (hcrc)
+ submode |= 1;
+ if (dcrc)
+ submode |= 2;
+ cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode;
+ } else
+ cxgbi_skcb_ulp_mode(skb) = 0;
}
int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
@@ -2210,32 +2201,34 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
}
EXPORT_SYMBOL_GPL(cxgbi_set_conn_param);
-int cxgbi_get_conn_param(struct iscsi_cls_conn *cls_conn,
- enum iscsi_param param, char *buf)
+int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param,
+ char *buf)
{
- struct iscsi_conn *iconn = cls_conn->dd_data;
+ struct cxgbi_endpoint *cep = ep->dd_data;
+ struct cxgbi_sock *csk;
int len;
log_debug(1 << CXGBI_DBG_ISCSI,
- "cls_conn 0x%p, param %d.\n", cls_conn, param);
+ "cls_conn 0x%p, param %d.\n", ep, param);
switch (param) {
case ISCSI_PARAM_CONN_PORT:
- spin_lock_bh(&iconn->session->lock);
- len = sprintf(buf, "%hu\n", iconn->portal_port);
- spin_unlock_bh(&iconn->session->lock);
- break;
case ISCSI_PARAM_CONN_ADDRESS:
- spin_lock_bh(&iconn->session->lock);
- len = sprintf(buf, "%s\n", iconn->portal_address);
- spin_unlock_bh(&iconn->session->lock);
- break;
+ if (!cep)
+ return -ENOTCONN;
+
+ csk = cep->csk;
+ if (!csk)
+ return -ENOTCONN;
+
+ return iscsi_conn_get_addr_param((struct sockaddr_storage *)
+ &csk->daddr, param, buf);
default:
- return iscsi_conn_get_param(cls_conn, param, buf);
+ return -ENOSYS;
}
return len;
}
-EXPORT_SYMBOL_GPL(cxgbi_get_conn_param);
+EXPORT_SYMBOL_GPL(cxgbi_get_ep_param);
struct iscsi_cls_conn *
cxgbi_create_conn(struct iscsi_cls_session *cls_session, u32 cid)
@@ -2302,11 +2295,6 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
cxgbi_conn_max_xmit_dlength(conn);
cxgbi_conn_max_recv_dlength(conn);
- spin_lock_bh(&conn->session->lock);
- sprintf(conn->portal_address, "%pI4", &csk->daddr.sin_addr.s_addr);
- conn->portal_port = ntohs(csk->daddr.sin_port);
- spin_unlock_bh(&conn->session->lock);
-
log_debug(1 << CXGBI_DBG_ISCSI,
"cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n",
cls_session, cls_conn, ep, cconn, csk);
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index c57d59db000c..0a20fd5f7102 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -131,7 +131,6 @@ struct cxgbi_ddp_info {
unsigned int rsvd_tag_mask;
spinlock_t map_lock;
struct cxgbi_gather_list **gl_map;
- struct sk_buff **gl_skb;
};
#define DDP_PGIDX_MAX 4
@@ -536,8 +535,6 @@ struct cxgbi_device {
struct cxgbi_ddp_info *ddp;
void (*dev_ddp_cleanup)(struct cxgbi_device *);
- void (*csk_ddp_free_gl_skb)(struct cxgbi_ddp_info *, int, int);
- int (*csk_ddp_alloc_gl_skb)(struct cxgbi_ddp_info *, int, int, gfp_t);
int (*csk_ddp_set)(struct cxgbi_sock *, struct cxgbi_pagepod_hdr *,
unsigned int, unsigned int,
struct cxgbi_gather_list *);
@@ -715,7 +712,7 @@ void cxgbi_cleanup_task(struct iscsi_task *task);
void cxgbi_get_conn_stats(struct iscsi_cls_conn *, struct iscsi_stats *);
int cxgbi_set_conn_param(struct iscsi_cls_conn *,
enum iscsi_param, char *, int);
-int cxgbi_get_conn_param(struct iscsi_cls_conn *, enum iscsi_param, char *);
+int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param, char *);
struct iscsi_cls_conn *cxgbi_create_conn(struct iscsi_cls_session *, u32);
int cxgbi_bind_conn(struct iscsi_cls_session *,
struct iscsi_cls_conn *, u64, int);
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
index b837c5b3c8f9..564e6ecd17c2 100644
--- a/drivers/scsi/device_handler/scsi_dh.c
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -25,16 +25,9 @@
#include <scsi/scsi_dh.h>
#include "../scsi_priv.h"
-struct scsi_dh_devinfo_list {
- struct list_head node;
- char vendor[9];
- char model[17];
- struct scsi_device_handler *handler;
-};
-
static DEFINE_SPINLOCK(list_lock);
static LIST_HEAD(scsi_dh_list);
-static LIST_HEAD(scsi_dh_dev_list);
+static int scsi_dh_list_idx = 1;
static struct scsi_device_handler *get_device_handler(const char *name)
{
@@ -51,40 +44,18 @@ static struct scsi_device_handler *get_device_handler(const char *name)
return found;
}
-
-static struct scsi_device_handler *
-scsi_dh_cache_lookup(struct scsi_device *sdev)
+static struct scsi_device_handler *get_device_handler_by_idx(int idx)
{
- struct scsi_dh_devinfo_list *tmp;
- struct scsi_device_handler *found_dh = NULL;
+ struct scsi_device_handler *tmp, *found = NULL;
spin_lock(&list_lock);
- list_for_each_entry(tmp, &scsi_dh_dev_list, node) {
- if (!strncmp(sdev->vendor, tmp->vendor, strlen(tmp->vendor)) &&
- !strncmp(sdev->model, tmp->model, strlen(tmp->model))) {
- found_dh = tmp->handler;
+ list_for_each_entry(tmp, &scsi_dh_list, list) {
+ if (tmp->idx == idx) {
+ found = tmp;
break;
}
}
spin_unlock(&list_lock);
-
- return found_dh;
-}
-
-static int scsi_dh_handler_lookup(struct scsi_device_handler *scsi_dh,
- struct scsi_device *sdev)
-{
- int i, found = 0;
-
- for(i = 0; scsi_dh->devlist[i].vendor; i++) {
- if (!strncmp(sdev->vendor, scsi_dh->devlist[i].vendor,
- strlen(scsi_dh->devlist[i].vendor)) &&
- !strncmp(sdev->model, scsi_dh->devlist[i].model,
- strlen(scsi_dh->devlist[i].model))) {
- found = 1;
- break;
- }
- }
return found;
}
@@ -102,41 +73,14 @@ device_handler_match(struct scsi_device_handler *scsi_dh,
struct scsi_device *sdev)
{
struct scsi_device_handler *found_dh = NULL;
- struct scsi_dh_devinfo_list *tmp;
+ int idx;
- found_dh = scsi_dh_cache_lookup(sdev);
- if (found_dh)
- return found_dh;
+ idx = scsi_get_device_flags_keyed(sdev, sdev->vendor, sdev->model,
+ SCSI_DEVINFO_DH);
+ found_dh = get_device_handler_by_idx(idx);
- if (scsi_dh) {
- if (scsi_dh_handler_lookup(scsi_dh, sdev))
- found_dh = scsi_dh;
- } else {
- struct scsi_device_handler *tmp_dh;
-
- spin_lock(&list_lock);
- list_for_each_entry(tmp_dh, &scsi_dh_list, list) {
- if (scsi_dh_handler_lookup(tmp_dh, sdev))
- found_dh = tmp_dh;
- }
- spin_unlock(&list_lock);
- }
-
- if (found_dh) { /* If device is found, add it to the cache */
- tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
- if (tmp) {
- strncpy(tmp->vendor, sdev->vendor, 8);
- strncpy(tmp->model, sdev->model, 16);
- tmp->vendor[8] = '\0';
- tmp->model[16] = '\0';
- tmp->handler = found_dh;
- spin_lock(&list_lock);
- list_add(&tmp->node, &scsi_dh_dev_list);
- spin_unlock(&list_lock);
- } else {
- found_dh = NULL;
- }
- }
+ if (scsi_dh && found_dh != scsi_dh)
+ found_dh = NULL;
return found_dh;
}
@@ -373,12 +317,25 @@ static int scsi_dh_notifier_remove(struct device *dev, void *data)
*/
int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
{
+ int i;
+
if (get_device_handler(scsi_dh->name))
return -EBUSY;
spin_lock(&list_lock);
+ scsi_dh->idx = scsi_dh_list_idx++;
list_add(&scsi_dh->list, &scsi_dh_list);
spin_unlock(&list_lock);
+
+ for (i = 0; scsi_dh->devlist[i].vendor; i++) {
+ scsi_dev_info_list_add_keyed(0,
+ scsi_dh->devlist[i].vendor,
+ scsi_dh->devlist[i].model,
+ NULL,
+ scsi_dh->idx,
+ SCSI_DEVINFO_DH);
+ }
+
bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add);
printk(KERN_INFO "%s: device handler registered\n", scsi_dh->name);
@@ -395,7 +352,7 @@ EXPORT_SYMBOL_GPL(scsi_register_device_handler);
*/
int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
{
- struct scsi_dh_devinfo_list *tmp, *pos;
+ int i;
if (!get_device_handler(scsi_dh->name))
return -ENODEV;
@@ -403,14 +360,14 @@ int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh,
scsi_dh_notifier_remove);
+ for (i = 0; scsi_dh->devlist[i].vendor; i++) {
+ scsi_dev_info_list_del_keyed(scsi_dh->devlist[i].vendor,
+ scsi_dh->devlist[i].model,
+ SCSI_DEVINFO_DH);
+ }
+
spin_lock(&list_lock);
list_del(&scsi_dh->list);
- list_for_each_entry_safe(pos, tmp, &scsi_dh_dev_list, node) {
- if (pos->handler == scsi_dh) {
- list_del(&pos->node);
- kfree(pos);
- }
- }
spin_unlock(&list_lock);
printk(KERN_INFO "%s: device handler unregistered\n", scsi_dh->name);
@@ -576,6 +533,10 @@ static int __init scsi_dh_init(void)
{
int r;
+ r = scsi_dev_info_add_list(SCSI_DEVINFO_DH, "SCSI Device Handler");
+ if (r)
+ return r;
+
r = bus_register_notifier(&scsi_bus_type, &scsi_dh_nb);
if (!r)
@@ -590,6 +551,7 @@ static void __exit scsi_dh_exit(void)
bus_for_each_dev(&scsi_bus_type, NULL, NULL,
scsi_dh_sysfs_attr_remove);
bus_unregister_notifier(&scsi_bus_type, &scsi_dh_nb);
+ scsi_dev_info_remove_list(SCSI_DEVINFO_DH);
}
module_init(scsi_dh_init);
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 6b729324b8d3..7cae0bc85390 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -253,13 +253,15 @@ static void stpg_endio(struct request *req, int error)
{
struct alua_dh_data *h = req->end_io_data;
struct scsi_sense_hdr sense_hdr;
- unsigned err = SCSI_DH_IO;
+ unsigned err = SCSI_DH_OK;
if (error || host_byte(req->errors) != DID_OK ||
- msg_byte(req->errors) != COMMAND_COMPLETE)
+ msg_byte(req->errors) != COMMAND_COMPLETE) {
+ err = SCSI_DH_IO;
goto done;
+ }
- if (err == SCSI_DH_IO && h->senselen > 0) {
+ if (h->senselen > 0) {
err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
&sense_hdr);
if (!err) {
@@ -285,7 +287,8 @@ static void stpg_endio(struct request *req, int error)
print_alua_state(h->state));
}
done:
- blk_put_request(req);
+ req->end_io_data = NULL;
+ __blk_put_request(req->q, req);
if (h->callback_fn) {
h->callback_fn(h->callback_data, err);
h->callback_fn = h->callback_data = NULL;
@@ -303,7 +306,6 @@ done:
static unsigned submit_stpg(struct alua_dh_data *h)
{
struct request *rq;
- int err = SCSI_DH_RES_TEMP_UNAVAIL;
int stpg_len = 8;
struct scsi_device *sdev = h->sdev;
@@ -332,7 +334,7 @@ static unsigned submit_stpg(struct alua_dh_data *h)
rq->end_io_data = h;
blk_execute_rq_nowait(rq->q, NULL, rq, 1, stpg_endio);
- return err;
+ return SCSI_DH_OK;
}
/*
@@ -730,7 +732,9 @@ static const struct scsi_dh_devlist alua_dev_list[] = {
{"Pillar", "Axiom" },
{"Intel", "Multi-Flex"},
{"NETAPP", "LUN"},
+ {"NETAPP", "LUN C-Mode"},
{"AIX", "NVDISK"},
+ {"Promise", "VTrak"},
{NULL, NULL}
};
@@ -759,7 +763,7 @@ static int alua_bus_attach(struct scsi_device *sdev)
unsigned long flags;
int err = SCSI_DH_OK;
- scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+ scsi_dh_data = kzalloc(sizeof(*scsi_dh_data)
+ sizeof(*h) , GFP_KERNEL);
if (!scsi_dh_data) {
sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 6faf472f7537..48441f6908a4 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -650,7 +650,7 @@ static int clariion_bus_attach(struct scsi_device *sdev)
unsigned long flags;
int err;
- scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+ scsi_dh_data = kzalloc(sizeof(*scsi_dh_data)
+ sizeof(*h) , GFP_KERNEL);
if (!scsi_dh_data) {
sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index e3916641e627..b479f1eef968 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -225,7 +225,8 @@ static void start_stop_endio(struct request *req, int error)
}
}
done:
- blk_put_request(req);
+ req->end_io_data = NULL;
+ __blk_put_request(req->q, req);
if (h->callback_fn) {
h->callback_fn(h->callback_data, err);
h->callback_fn = h->callback_data = NULL;
@@ -338,8 +339,8 @@ static int hp_sw_bus_attach(struct scsi_device *sdev)
unsigned long flags;
int ret;
- scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
- + sizeof(struct hp_sw_dh_data) , GFP_KERNEL);
+ scsi_dh_data = kzalloc(sizeof(*scsi_dh_data)
+ + sizeof(*h) , GFP_KERNEL);
if (!scsi_dh_data) {
sdev_printk(KERN_ERR, sdev, "%s: Attach Failed\n",
HP_SW_NAME);
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 5be3ae15cb71..f468b05cfaac 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -281,11 +281,13 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
}
static struct request *rdac_failover_get(struct scsi_device *sdev,
- struct rdac_dh_data *h)
+ struct rdac_dh_data *h, struct list_head *list)
{
struct request *rq;
struct rdac_mode_common *common;
unsigned data_size;
+ struct rdac_queue_data *qdata;
+ u8 *lun_table;
if (h->ctlr->use_ms10) {
struct rdac_pg_expanded *rdac_pg;
@@ -298,6 +300,7 @@ static struct request *rdac_failover_get(struct scsi_device *sdev,
rdac_pg->subpage_code = 0x1;
rdac_pg->page_len[0] = 0x01;
rdac_pg->page_len[1] = 0x28;
+ lun_table = rdac_pg->lun_table;
} else {
struct rdac_pg_legacy *rdac_pg;
@@ -307,11 +310,16 @@ static struct request *rdac_failover_get(struct scsi_device *sdev,
common = &rdac_pg->common;
rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
rdac_pg->page_len = 0x68;
+ lun_table = rdac_pg->lun_table;
}
common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS;
common->quiescence_timeout = RDAC_QUIESCENCE_TIME;
common->rdac_options = RDAC_FORCED_QUIESENCE;
+ list_for_each_entry(qdata, list, entry) {
+ lun_table[qdata->h->lun] = 0x81;
+ }
+
/* get request for block layer packet command */
rq = get_rdac_req(sdev, &h->ctlr->mode_select, data_size, WRITE);
if (!rq)
@@ -565,7 +573,6 @@ static void send_mode_select(struct work_struct *work)
int err, retry_cnt = RDAC_RETRY_COUNT;
struct rdac_queue_data *tmp, *qdata;
LIST_HEAD(list);
- u8 *lun_table;
spin_lock(&ctlr->ms_lock);
list_splice_init(&ctlr->ms_head, &list);
@@ -573,21 +580,12 @@ static void send_mode_select(struct work_struct *work)
ctlr->ms_sdev = NULL;
spin_unlock(&ctlr->ms_lock);
- if (ctlr->use_ms10)
- lun_table = ctlr->mode_select.expanded.lun_table;
- else
- lun_table = ctlr->mode_select.legacy.lun_table;
-
retry:
err = SCSI_DH_RES_TEMP_UNAVAIL;
- rq = rdac_failover_get(sdev, h);
+ rq = rdac_failover_get(sdev, h, &list);
if (!rq)
goto done;
- list_for_each_entry(qdata, &list, entry) {
- lun_table[qdata->h->lun] = 0x81;
- }
-
RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
"%s MODE_SELECT command",
(char *) h->ctlr->array_name, h->ctlr->index,
@@ -800,7 +798,7 @@ static int rdac_bus_attach(struct scsi_device *sdev)
int err;
char array_name[ARRAY_LABEL_LEN];
- scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+ scsi_dh_data = kzalloc(sizeof(*scsi_dh_data)
+ sizeof(*h) , GFP_KERNEL);
if (!scsi_dh_data) {
sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
@@ -906,4 +904,5 @@ module_exit(rdac_exit);
MODULE_DESCRIPTION("Multipath LSI/Engenio RDAC driver");
MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
+MODULE_VERSION("01.00.0000.0000");
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/fcoe/Makefile b/drivers/scsi/fcoe/Makefile
index 950f27615c76..f6d37d0271f7 100644
--- a/drivers/scsi/fcoe/Makefile
+++ b/drivers/scsi/fcoe/Makefile
@@ -1,2 +1,4 @@
obj-$(CONFIG_FCOE) += fcoe.o
obj-$(CONFIG_LIBFCOE) += libfcoe.o
+
+libfcoe-objs := fcoe_ctlr.o fcoe_transport.o
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 9f9600b67001..94fb4802e580 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -31,6 +31,7 @@
#include <linux/fs.h>
#include <linux/sysfs.h>
#include <linux/ctype.h>
+#include <linux/workqueue.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <scsi/scsi_transport.h>
@@ -58,6 +59,8 @@ MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \
DEFINE_MUTEX(fcoe_config_mutex);
+static struct workqueue_struct *fcoe_wq;
+
/* fcoe_percpu_clean completion. Waiter protected by fcoe_create_mutex */
static DECLARE_COMPLETION(fcoe_flush_completion);
@@ -72,7 +75,6 @@ static int fcoe_xmit(struct fc_lport *, struct fc_frame *);
static int fcoe_rcv(struct sk_buff *, struct net_device *,
struct packet_type *, struct net_device *);
static int fcoe_percpu_receive_thread(void *);
-static void fcoe_clean_pending_queue(struct fc_lport *);
static void fcoe_percpu_clean(struct fc_lport *);
static int fcoe_link_speed_update(struct fc_lport *);
static int fcoe_link_ok(struct fc_lport *);
@@ -80,7 +82,6 @@ static int fcoe_link_ok(struct fc_lport *);
static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
static int fcoe_hostlist_add(const struct fc_lport *);
-static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *);
static int fcoe_device_notification(struct notifier_block *, ulong, void *);
static void fcoe_dev_setup(void);
static void fcoe_dev_cleanup(void);
@@ -101,10 +102,11 @@ static int fcoe_ddp_done(struct fc_lport *, u16);
static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
-static int fcoe_create(const char *, struct kernel_param *);
-static int fcoe_destroy(const char *, struct kernel_param *);
-static int fcoe_enable(const char *, struct kernel_param *);
-static int fcoe_disable(const char *, struct kernel_param *);
+static bool fcoe_match(struct net_device *netdev);
+static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode);
+static int fcoe_destroy(struct net_device *netdev);
+static int fcoe_enable(struct net_device *netdev);
+static int fcoe_disable(struct net_device *netdev);
static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
u32 did, struct fc_frame *,
@@ -117,24 +119,6 @@ static void fcoe_recv_frame(struct sk_buff *skb);
static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
-module_param_call(create, fcoe_create, NULL, (void *)FIP_MODE_FABRIC, S_IWUSR);
-__MODULE_PARM_TYPE(create, "string");
-MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface");
-module_param_call(create_vn2vn, fcoe_create, NULL,
- (void *)FIP_MODE_VN2VN, S_IWUSR);
-__MODULE_PARM_TYPE(create_vn2vn, "string");
-MODULE_PARM_DESC(create_vn2vn, " Creates a VN_node to VN_node FCoE instance "
- "on an Ethernet interface");
-module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
-__MODULE_PARM_TYPE(destroy, "string");
-MODULE_PARM_DESC(destroy, " Destroys fcoe instance on a ethernet interface");
-module_param_call(enable, fcoe_enable, NULL, NULL, S_IWUSR);
-__MODULE_PARM_TYPE(enable, "string");
-MODULE_PARM_DESC(enable, " Enables fcoe on a ethernet interface.");
-module_param_call(disable, fcoe_disable, NULL, NULL, S_IWUSR);
-__MODULE_PARM_TYPE(disable, "string");
-MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface.");
-
/* notification function for packets from net device */
static struct notifier_block fcoe_notifier = {
.notifier_call = fcoe_device_notification,
@@ -145,8 +129,8 @@ static struct notifier_block fcoe_cpu_notifier = {
.notifier_call = fcoe_cpu_callback,
};
-static struct scsi_transport_template *fcoe_transport_template;
-static struct scsi_transport_template *fcoe_vport_transport_template;
+static struct scsi_transport_template *fcoe_nport_scsi_transport;
+static struct scsi_transport_template *fcoe_vport_scsi_transport;
static int fcoe_vport_destroy(struct fc_vport *);
static int fcoe_vport_create(struct fc_vport *, bool disabled);
@@ -163,7 +147,7 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = {
.lport_set_port_id = fcoe_set_port_id,
};
-struct fc_function_template fcoe_transport_function = {
+struct fc_function_template fcoe_nport_fc_functions = {
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
@@ -203,7 +187,7 @@ struct fc_function_template fcoe_transport_function = {
.bsg_request = fc_lport_bsg_request,
};
-struct fc_function_template fcoe_vport_transport_function = {
+struct fc_function_template fcoe_vport_fc_functions = {
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
@@ -356,10 +340,18 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
struct fcoe_interface *fcoe;
int err;
+ if (!try_module_get(THIS_MODULE)) {
+ FCOE_NETDEV_DBG(netdev,
+ "Could not get a reference to the module\n");
+ fcoe = ERR_PTR(-EBUSY);
+ goto out;
+ }
+
fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL);
if (!fcoe) {
FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n");
- return NULL;
+ fcoe = ERR_PTR(-ENOMEM);
+ goto out_nomod;
}
dev_hold(netdev);
@@ -378,9 +370,15 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
fcoe_ctlr_destroy(&fcoe->ctlr);
kfree(fcoe);
dev_put(netdev);
- return NULL;
+ fcoe = ERR_PTR(err);
+ goto out_nomod;
}
+ goto out;
+
+out_nomod:
+ module_put(THIS_MODULE);
+out:
return fcoe;
}
@@ -442,6 +440,7 @@ static void fcoe_interface_release(struct kref *kref)
fcoe_ctlr_destroy(&fcoe->ctlr);
kfree(fcoe);
dev_put(netdev);
+ module_put(THIS_MODULE);
}
/**
@@ -505,7 +504,7 @@ static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
static void fcoe_update_src_mac(struct fc_lport *lport, u8 *addr)
{
struct fcoe_port *port = lport_priv(lport);
- struct fcoe_interface *fcoe = port->fcoe;
+ struct fcoe_interface *fcoe = port->priv;
rtnl_lock();
if (!is_zero_ether_addr(port->data_src_addr))
@@ -561,17 +560,6 @@ static int fcoe_lport_config(struct fc_lport *lport)
}
/**
- * fcoe_queue_timer() - The fcoe queue timer
- * @lport: The local port
- *
- * Calls fcoe_check_wait_queue on timeout
- */
-static void fcoe_queue_timer(ulong lport)
-{
- fcoe_check_wait_queue((struct fc_lport *)lport, NULL);
-}
-
-/**
* fcoe_get_wwn() - Get the world wide name from LLD if it supports it
* @netdev: the associated net device
* @wwn: the output WWN
@@ -650,7 +638,7 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
/* Setup lport private data to point to fcoe softc */
port = lport_priv(lport);
- fcoe = port->fcoe;
+ fcoe = port->priv;
/*
* Determine max frame size based on underlying device and optional
@@ -708,9 +696,9 @@ static int fcoe_shost_config(struct fc_lport *lport, struct device *dev)
lport->host->max_cmd_len = FCOE_MAX_CMD_LEN;
if (lport->vport)
- lport->host->transportt = fcoe_vport_transport_template;
+ lport->host->transportt = fcoe_vport_scsi_transport;
else
- lport->host->transportt = fcoe_transport_template;
+ lport->host->transportt = fcoe_nport_scsi_transport;
/* add the new host to the SCSI-ml */
rc = scsi_add_host(lport->host, dev);
@@ -760,7 +748,7 @@ bool fcoe_oem_match(struct fc_frame *fp)
static inline int fcoe_em_config(struct fc_lport *lport)
{
struct fcoe_port *port = lport_priv(lport);
- struct fcoe_interface *fcoe = port->fcoe;
+ struct fcoe_interface *fcoe = port->priv;
struct fcoe_interface *oldfcoe = NULL;
struct net_device *old_real_dev, *cur_real_dev;
u16 min_xid = FCOE_MIN_XID;
@@ -844,7 +832,7 @@ skip_oem:
static void fcoe_if_destroy(struct fc_lport *lport)
{
struct fcoe_port *port = lport_priv(lport);
- struct fcoe_interface *fcoe = port->fcoe;
+ struct fcoe_interface *fcoe = port->priv;
struct net_device *netdev = fcoe->netdev;
FCOE_NETDEV_DBG(netdev, "Destroying interface\n");
@@ -886,7 +874,6 @@ static void fcoe_if_destroy(struct fc_lport *lport)
/* Release the Scsi_Host */
scsi_host_put(lport->host);
- module_put(THIS_MODULE);
}
/**
@@ -941,8 +928,9 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
struct device *parent, int npiv)
{
struct net_device *netdev = fcoe->netdev;
- struct fc_lport *lport = NULL;
+ struct fc_lport *lport, *n_port;
struct fcoe_port *port;
+ struct Scsi_Host *shost;
int rc;
/*
* parent is only a vport if npiv is 1,
@@ -952,13 +940,11 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
FCOE_NETDEV_DBG(netdev, "Create Interface\n");
- if (!npiv) {
- lport = libfc_host_alloc(&fcoe_shost_template,
- sizeof(struct fcoe_port));
- } else {
- lport = libfc_vport_create(vport,
- sizeof(struct fcoe_port));
- }
+ if (!npiv)
+ lport = libfc_host_alloc(&fcoe_shost_template, sizeof(*port));
+ else
+ lport = libfc_vport_create(vport, sizeof(*port));
+
if (!lport) {
FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n");
rc = -ENOMEM;
@@ -966,7 +952,9 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
}
port = lport_priv(lport);
port->lport = lport;
- port->fcoe = fcoe;
+ port->priv = fcoe;
+ port->max_queue_depth = FCOE_MAX_QUEUE_DEPTH;
+ port->min_queue_depth = FCOE_MIN_QUEUE_DEPTH;
INIT_WORK(&port->destroy_work, fcoe_destroy_work);
/* configure a fc_lport including the exchange manager */
@@ -1009,24 +997,27 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
goto out_lp_destroy;
}
- if (!npiv) {
- /*
- * fcoe_em_alloc() and fcoe_hostlist_add() both
- * need to be atomic with respect to other changes to the
- * hostlist since fcoe_em_alloc() looks for an existing EM
- * instance on host list updated by fcoe_hostlist_add().
- *
- * This is currently handled through the fcoe_config_mutex
- * begin held.
- */
-
+ /*
+ * fcoe_em_alloc() and fcoe_hostlist_add() both
+ * need to be atomic with respect to other changes to the
+ * hostlist since fcoe_em_alloc() looks for an existing EM
+ * instance on host list updated by fcoe_hostlist_add().
+ *
+ * This is currently handled through the fcoe_config_mutex
+ * begin held.
+ */
+ if (!npiv)
/* lport exch manager allocation */
rc = fcoe_em_config(lport);
- if (rc) {
- FCOE_NETDEV_DBG(netdev, "Could not configure the EM "
- "for the interface\n");
- goto out_lp_destroy;
- }
+ else {
+ shost = vport_to_shost(vport);
+ n_port = shost_priv(shost);
+ rc = fc_exch_mgr_list_clone(n_port, lport);
+ }
+
+ if (rc) {
+ FCOE_NETDEV_DBG(netdev, "Could not configure the EM\n");
+ goto out_lp_destroy;
}
fcoe_interface_get(fcoe);
@@ -1050,11 +1041,12 @@ out:
static int __init fcoe_if_init(void)
{
/* attach to scsi transport */
- fcoe_transport_template = fc_attach_transport(&fcoe_transport_function);
- fcoe_vport_transport_template =
- fc_attach_transport(&fcoe_vport_transport_function);
+ fcoe_nport_scsi_transport =
+ fc_attach_transport(&fcoe_nport_fc_functions);
+ fcoe_vport_scsi_transport =
+ fc_attach_transport(&fcoe_vport_fc_functions);
- if (!fcoe_transport_template) {
+ if (!fcoe_nport_scsi_transport) {
printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n");
return -ENODEV;
}
@@ -1071,10 +1063,10 @@ static int __init fcoe_if_init(void)
*/
int __exit fcoe_if_exit(void)
{
- fc_release_transport(fcoe_transport_template);
- fc_release_transport(fcoe_vport_transport_template);
- fcoe_transport_template = NULL;
- fcoe_vport_transport_template = NULL;
+ fc_release_transport(fcoe_nport_scsi_transport);
+ fc_release_transport(fcoe_vport_scsi_transport);
+ fcoe_nport_scsi_transport = NULL;
+ fcoe_vport_scsi_transport = NULL;
return 0;
}
@@ -1361,108 +1353,22 @@ err2:
}
/**
- * fcoe_start_io() - Start FCoE I/O
- * @skb: The packet to be transmitted
- *
- * This routine is called from the net device to start transmitting
- * FCoE packets.
- *
- * Returns: 0 for success
- */
-static inline int fcoe_start_io(struct sk_buff *skb)
-{
- struct sk_buff *nskb;
- int rc;
-
- nskb = skb_clone(skb, GFP_ATOMIC);
- rc = dev_queue_xmit(nskb);
- if (rc != 0)
- return rc;
- kfree_skb(skb);
- return 0;
-}
-
-/**
- * fcoe_get_paged_crc_eof() - Allocate a page to be used for the trailer CRC
+ * fcoe_alloc_paged_crc_eof() - Allocate a page to be used for the trailer CRC
* @skb: The packet to be transmitted
* @tlen: The total length of the trailer
*
- * This routine allocates a page for frame trailers. The page is re-used if
- * there is enough room left on it for the current trailer. If there isn't
- * enough buffer left a new page is allocated for the trailer. Reference to
- * the page from this function as well as the skbs using the page fragments
- * ensure that the page is freed at the appropriate time.
- *
* Returns: 0 for success
*/
-static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
+static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
{
struct fcoe_percpu_s *fps;
- struct page *page;
+ int rc;
fps = &get_cpu_var(fcoe_percpu);
- page = fps->crc_eof_page;
- if (!page) {
- page = alloc_page(GFP_ATOMIC);
- if (!page) {
- put_cpu_var(fcoe_percpu);
- return -ENOMEM;
- }
- fps->crc_eof_page = page;
- fps->crc_eof_offset = 0;
- }
-
- get_page(page);
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
- fps->crc_eof_offset, tlen);
- skb->len += tlen;
- skb->data_len += tlen;
- skb->truesize += tlen;
- fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
-
- if (fps->crc_eof_offset >= PAGE_SIZE) {
- fps->crc_eof_page = NULL;
- fps->crc_eof_offset = 0;
- put_page(page);
- }
+ rc = fcoe_get_paged_crc_eof(skb, tlen, fps);
put_cpu_var(fcoe_percpu);
- return 0;
-}
-/**
- * fcoe_fc_crc() - Calculates the CRC for a given frame
- * @fp: The frame to be checksumed
- *
- * This uses crc32() routine to calculate the CRC for a frame
- *
- * Return: The 32 bit CRC value
- */
-u32 fcoe_fc_crc(struct fc_frame *fp)
-{
- struct sk_buff *skb = fp_skb(fp);
- struct skb_frag_struct *frag;
- unsigned char *data;
- unsigned long off, len, clen;
- u32 crc;
- unsigned i;
-
- crc = crc32(~0, skb->data, skb_headlen(skb));
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- frag = &skb_shinfo(skb)->frags[i];
- off = frag->page_offset;
- len = frag->size;
- while (len > 0) {
- clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
- data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
- KM_SKB_DATA_SOFTIRQ);
- crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
- kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
- off += clen;
- len -= clen;
- }
- }
- return crc;
+ return rc;
}
/**
@@ -1485,7 +1391,7 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
unsigned int tlen; /* trailer length */
unsigned int elen; /* eth header, may include vlan */
struct fcoe_port *port = lport_priv(lport);
- struct fcoe_interface *fcoe = port->fcoe;
+ struct fcoe_interface *fcoe = port->priv;
u8 sof, eof;
struct fcoe_hdr *hp;
@@ -1526,7 +1432,7 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
/* copy port crc and eof to the skb buff */
if (skb_is_nonlinear(skb)) {
skb_frag_t *frag;
- if (fcoe_get_paged_crc_eof(skb, tlen)) {
+ if (fcoe_alloc_paged_crc_eof(skb, tlen)) {
kfree_skb(skb);
return -ENOMEM;
}
@@ -1606,6 +1512,56 @@ static void fcoe_percpu_flush_done(struct sk_buff *skb)
}
/**
+ * fcoe_filter_frames() - filter out bad fcoe frames, i.e. bad CRC
+ * @lport: The local port the frame was received on
+ * @fp: The received frame
+ *
+ * Return: 0 on passing filtering checks
+ */
+static inline int fcoe_filter_frames(struct fc_lport *lport,
+ struct fc_frame *fp)
+{
+ struct fcoe_interface *fcoe;
+ struct fc_frame_header *fh;
+ struct sk_buff *skb = (struct sk_buff *)fp;
+ struct fcoe_dev_stats *stats;
+
+ /*
+ * We only check CRC if no offload is available and if it is
+ * it's solicited data, in which case, the FCP layer would
+ * check it during the copy.
+ */
+ if (lport->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY)
+ fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
+ else
+ fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
+
+ fh = (struct fc_frame_header *) skb_transport_header(skb);
+ fh = fc_frame_header_get(fp);
+ if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && fh->fh_type == FC_TYPE_FCP)
+ return 0;
+
+ fcoe = ((struct fcoe_port *)lport_priv(lport))->priv;
+ if (is_fip_mode(&fcoe->ctlr) && fc_frame_payload_op(fp) == ELS_LOGO &&
+ ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
+ FCOE_DBG("fcoe: dropping FCoE lport LOGO in fip mode\n");
+ return -EINVAL;
+ }
+
+ if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED) ||
+ le32_to_cpu(fr_crc(fp)) == ~crc32(~0, skb->data, skb->len)) {
+ fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
+ return 0;
+ }
+
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
+ stats->InvalidCRCCount++;
+ if (stats->InvalidCRCCount < 5)
+ printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
+ return -EINVAL;
+}
+
+/**
* fcoe_recv_frame() - process a single received frame
* @skb: frame to process
*/
@@ -1615,7 +1571,6 @@ static void fcoe_recv_frame(struct sk_buff *skb)
struct fc_lport *lport;
struct fcoe_rcv_info *fr;
struct fcoe_dev_stats *stats;
- struct fc_frame_header *fh;
struct fcoe_crc_eof crc_eof;
struct fc_frame *fp;
struct fcoe_port *port;
@@ -1646,7 +1601,6 @@ static void fcoe_recv_frame(struct sk_buff *skb)
* was done in fcoe_rcv already.
*/
hp = (struct fcoe_hdr *) skb_network_header(skb);
- fh = (struct fc_frame_header *) skb_transport_header(skb);
stats = per_cpu_ptr(lport->dev_stats, get_cpu());
if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
@@ -1679,35 +1633,11 @@ static void fcoe_recv_frame(struct sk_buff *skb)
if (pskb_trim(skb, fr_len))
goto drop;
- /*
- * We only check CRC if no offload is available and if it is
- * it's solicited data, in which case, the FCP layer would
- * check it during the copy.
- */
- if (lport->crc_offload &&
- skb->ip_summed == CHECKSUM_UNNECESSARY)
- fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
- else
- fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
-
- fh = fc_frame_header_get(fp);
- if ((fh->fh_r_ctl != FC_RCTL_DD_SOL_DATA ||
- fh->fh_type != FC_TYPE_FCP) &&
- (fr_flags(fp) & FCPHF_CRC_UNCHECKED)) {
- if (le32_to_cpu(fr_crc(fp)) !=
- ~crc32(~0, skb->data, fr_len)) {
- if (stats->InvalidCRCCount < 5)
- printk(KERN_WARNING "fcoe: dropping "
- "frame with CRC error\n");
- stats->InvalidCRCCount++;
- goto drop;
- }
- fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
+ if (!fcoe_filter_frames(lport, fp)) {
+ put_cpu();
+ fc_exch_recv(lport, fp);
+ return;
}
- put_cpu();
- fc_exch_recv(lport, fp);
- return;
-
drop:
stats->ErrorFrames++;
put_cpu();
@@ -1746,64 +1676,6 @@ int fcoe_percpu_receive_thread(void *arg)
}
/**
- * fcoe_check_wait_queue() - Attempt to clear the transmit backlog
- * @lport: The local port whose backlog is to be cleared
- *
- * This empties the wait_queue, dequeues the head of the wait_queue queue
- * and calls fcoe_start_io() for each packet. If all skb have been
- * transmitted it returns the qlen. If an error occurs it restores
- * wait_queue (to try again later) and returns -1.
- *
- * The wait_queue is used when the skb transmit fails. The failed skb
- * will go in the wait_queue which will be emptied by the timer function or
- * by the next skb transmit.
- */
-static void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb)
-{
- struct fcoe_port *port = lport_priv(lport);
- int rc;
-
- spin_lock_bh(&port->fcoe_pending_queue.lock);
-
- if (skb)
- __skb_queue_tail(&port->fcoe_pending_queue, skb);
-
- if (port->fcoe_pending_queue_active)
- goto out;
- port->fcoe_pending_queue_active = 1;
-
- while (port->fcoe_pending_queue.qlen) {
- /* keep qlen > 0 until fcoe_start_io succeeds */
- port->fcoe_pending_queue.qlen++;
- skb = __skb_dequeue(&port->fcoe_pending_queue);
-
- spin_unlock_bh(&port->fcoe_pending_queue.lock);
- rc = fcoe_start_io(skb);
- spin_lock_bh(&port->fcoe_pending_queue.lock);
-
- if (rc) {
- __skb_queue_head(&port->fcoe_pending_queue, skb);
- /* undo temporary increment above */
- port->fcoe_pending_queue.qlen--;
- break;
- }
- /* undo temporary increment above */
- port->fcoe_pending_queue.qlen--;
- }
-
- if (port->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
- lport->qfull = 0;
- if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer))
- mod_timer(&port->timer, jiffies + 2);
- port->fcoe_pending_queue_active = 0;
-out:
- if (port->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
- lport->qfull = 1;
- spin_unlock_bh(&port->fcoe_pending_queue.lock);
- return;
-}
-
-/**
* fcoe_dev_setup() - Setup the link change notification interface
*/
static void fcoe_dev_setup(void)
@@ -1874,7 +1746,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
list_del(&fcoe->list);
port = lport_priv(fcoe->ctlr.lp);
fcoe_interface_cleanup(fcoe);
- schedule_work(&port->destroy_work);
+ queue_work(fcoe_wq, &port->destroy_work);
goto out;
break;
case NETDEV_FEAT_CHANGE:
@@ -1900,39 +1772,16 @@ out:
}
/**
- * fcoe_if_to_netdev() - Parse a name buffer to get a net device
- * @buffer: The name of the net device
- *
- * Returns: NULL or a ptr to net_device
- */
-static struct net_device *fcoe_if_to_netdev(const char *buffer)
-{
- char *cp;
- char ifname[IFNAMSIZ + 2];
-
- if (buffer) {
- strlcpy(ifname, buffer, IFNAMSIZ);
- cp = ifname + strlen(ifname);
- while (--cp >= ifname && *cp == '\n')
- *cp = '\0';
- return dev_get_by_name(&init_net, ifname);
- }
- return NULL;
-}
-
-/**
* fcoe_disable() - Disables a FCoE interface
- * @buffer: The name of the Ethernet interface to be disabled
- * @kp: The associated kernel parameter
+ * @netdev : The net_device object the Ethernet interface to create on
*
- * Called from sysfs.
+ * Called from fcoe transport.
*
* Returns: 0 for success
*/
-static int fcoe_disable(const char *buffer, struct kernel_param *kp)
+static int fcoe_disable(struct net_device *netdev)
{
struct fcoe_interface *fcoe;
- struct net_device *netdev;
int rc = 0;
mutex_lock(&fcoe_config_mutex);
@@ -1948,16 +1797,9 @@ static int fcoe_disable(const char *buffer, struct kernel_param *kp)
}
#endif
- netdev = fcoe_if_to_netdev(buffer);
- if (!netdev) {
- rc = -ENODEV;
- goto out_nodev;
- }
-
if (!rtnl_trylock()) {
- dev_put(netdev);
mutex_unlock(&fcoe_config_mutex);
- return restart_syscall();
+ return -ERESTARTSYS;
}
fcoe = fcoe_hostlist_lookup_port(netdev);
@@ -1969,7 +1811,6 @@ static int fcoe_disable(const char *buffer, struct kernel_param *kp)
} else
rc = -ENODEV;
- dev_put(netdev);
out_nodev:
mutex_unlock(&fcoe_config_mutex);
return rc;
@@ -1977,17 +1818,15 @@ out_nodev:
/**
* fcoe_enable() - Enables a FCoE interface
- * @buffer: The name of the Ethernet interface to be enabled
- * @kp: The associated kernel parameter
+ * @netdev : The net_device object the Ethernet interface to create on
*
- * Called from sysfs.
+ * Called from fcoe transport.
*
* Returns: 0 for success
*/
-static int fcoe_enable(const char *buffer, struct kernel_param *kp)
+static int fcoe_enable(struct net_device *netdev)
{
struct fcoe_interface *fcoe;
- struct net_device *netdev;
int rc = 0;
mutex_lock(&fcoe_config_mutex);
@@ -2002,17 +1841,9 @@ static int fcoe_enable(const char *buffer, struct kernel_param *kp)
goto out_nodev;
}
#endif
-
- netdev = fcoe_if_to_netdev(buffer);
- if (!netdev) {
- rc = -ENODEV;
- goto out_nodev;
- }
-
if (!rtnl_trylock()) {
- dev_put(netdev);
mutex_unlock(&fcoe_config_mutex);
- return restart_syscall();
+ return -ERESTARTSYS;
}
fcoe = fcoe_hostlist_lookup_port(netdev);
@@ -2023,7 +1854,6 @@ static int fcoe_enable(const char *buffer, struct kernel_param *kp)
else if (!fcoe_link_ok(fcoe->ctlr.lp))
fcoe_ctlr_link_up(&fcoe->ctlr);
- dev_put(netdev);
out_nodev:
mutex_unlock(&fcoe_config_mutex);
return rc;
@@ -2031,17 +1861,15 @@ out_nodev:
/**
* fcoe_destroy() - Destroy a FCoE interface
- * @buffer: The name of the Ethernet interface to be destroyed
- * @kp: The associated kernel parameter
+ * @netdev : The net_device object the Ethernet interface to create on
*
- * Called from sysfs.
+ * Called from fcoe transport
*
* Returns: 0 for success
*/
-static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
+static int fcoe_destroy(struct net_device *netdev)
{
struct fcoe_interface *fcoe;
- struct net_device *netdev;
int rc = 0;
mutex_lock(&fcoe_config_mutex);
@@ -2056,32 +1884,21 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
goto out_nodev;
}
#endif
-
- netdev = fcoe_if_to_netdev(buffer);
- if (!netdev) {
- rc = -ENODEV;
- goto out_nodev;
- }
-
if (!rtnl_trylock()) {
- dev_put(netdev);
mutex_unlock(&fcoe_config_mutex);
- return restart_syscall();
+ return -ERESTARTSYS;
}
fcoe = fcoe_hostlist_lookup_port(netdev);
if (!fcoe) {
rtnl_unlock();
rc = -ENODEV;
- goto out_putdev;
+ goto out_nodev;
}
fcoe_interface_cleanup(fcoe);
list_del(&fcoe->list);
/* RTNL mutex is dropped by fcoe_if_destroy */
fcoe_if_destroy(fcoe->ctlr.lp);
-
-out_putdev:
- dev_put(netdev);
out_nodev:
mutex_unlock(&fcoe_config_mutex);
return rc;
@@ -2104,27 +1921,39 @@ static void fcoe_destroy_work(struct work_struct *work)
}
/**
+ * fcoe_match() - Check if the FCoE is supported on the given netdevice
+ * @netdev : The net_device object the Ethernet interface to create on
+ *
+ * Called from fcoe transport.
+ *
+ * Returns: always returns true as this is the default FCoE transport,
+ * i.e., support all netdevs.
+ */
+static bool fcoe_match(struct net_device *netdev)
+{
+ return true;
+}
+
+/**
* fcoe_create() - Create a fcoe interface
- * @buffer: The name of the Ethernet interface to create on
- * @kp: The associated kernel param
+ * @netdev : The net_device object the Ethernet interface to create on
+ * @fip_mode: The FIP mode for this creation
*
- * Called from sysfs.
+ * Called from fcoe transport
*
* Returns: 0 for success
*/
-static int fcoe_create(const char *buffer, struct kernel_param *kp)
+static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
{
- enum fip_state fip_mode = (enum fip_state)(long)kp->arg;
int rc;
struct fcoe_interface *fcoe;
struct fc_lport *lport;
- struct net_device *netdev;
mutex_lock(&fcoe_config_mutex);
if (!rtnl_trylock()) {
mutex_unlock(&fcoe_config_mutex);
- return restart_syscall();
+ return -ERESTARTSYS;
}
#ifdef CONFIG_FCOE_MODULE
@@ -2135,31 +1964,20 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
*/
if (THIS_MODULE->state != MODULE_STATE_LIVE) {
rc = -ENODEV;
- goto out_nomod;
- }
-#endif
-
- if (!try_module_get(THIS_MODULE)) {
- rc = -EINVAL;
- goto out_nomod;
- }
-
- netdev = fcoe_if_to_netdev(buffer);
- if (!netdev) {
- rc = -ENODEV;
goto out_nodev;
}
+#endif
/* look for existing lport */
if (fcoe_hostlist_lookup(netdev)) {
rc = -EEXIST;
- goto out_putdev;
+ goto out_nodev;
}
fcoe = fcoe_interface_create(netdev, fip_mode);
- if (!fcoe) {
- rc = -ENOMEM;
- goto out_putdev;
+ if (IS_ERR(fcoe)) {
+ rc = PTR_ERR(fcoe);
+ goto out_nodev;
}
lport = fcoe_if_create(fcoe, &netdev->dev, 0);
@@ -2188,18 +2006,13 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
* should be holding a reference taken in fcoe_if_create().
*/
fcoe_interface_put(fcoe);
- dev_put(netdev);
rtnl_unlock();
mutex_unlock(&fcoe_config_mutex);
return 0;
out_free:
fcoe_interface_put(fcoe);
-out_putdev:
- dev_put(netdev);
out_nodev:
- module_put(THIS_MODULE);
-out_nomod:
rtnl_unlock();
mutex_unlock(&fcoe_config_mutex);
return rc;
@@ -2214,8 +2027,7 @@ out_nomod:
*/
int fcoe_link_speed_update(struct fc_lport *lport)
{
- struct fcoe_port *port = lport_priv(lport);
- struct net_device *netdev = port->fcoe->netdev;
+ struct net_device *netdev = fcoe_netdev(lport);
struct ethtool_cmd ecmd = { ETHTOOL_GSET };
if (!dev_ethtool_get_settings(netdev, &ecmd)) {
@@ -2246,8 +2058,7 @@ int fcoe_link_speed_update(struct fc_lport *lport)
*/
int fcoe_link_ok(struct fc_lport *lport)
{
- struct fcoe_port *port = lport_priv(lport);
- struct net_device *netdev = port->fcoe->netdev;
+ struct net_device *netdev = fcoe_netdev(lport);
if (netif_oper_up(netdev))
return 0;
@@ -2311,24 +2122,6 @@ void fcoe_percpu_clean(struct fc_lport *lport)
}
/**
- * fcoe_clean_pending_queue() - Dequeue a skb and free it
- * @lport: The local port to dequeue a skb on
- */
-void fcoe_clean_pending_queue(struct fc_lport *lport)
-{
- struct fcoe_port *port = lport_priv(lport);
- struct sk_buff *skb;
-
- spin_lock_bh(&port->fcoe_pending_queue.lock);
- while ((skb = __skb_dequeue(&port->fcoe_pending_queue)) != NULL) {
- spin_unlock_bh(&port->fcoe_pending_queue.lock);
- kfree_skb(skb);
- spin_lock_bh(&port->fcoe_pending_queue.lock);
- }
- spin_unlock_bh(&port->fcoe_pending_queue.lock);
-}
-
-/**
* fcoe_reset() - Reset a local port
* @shost: The SCSI host associated with the local port to be reset
*
@@ -2337,7 +2130,13 @@ void fcoe_clean_pending_queue(struct fc_lport *lport)
int fcoe_reset(struct Scsi_Host *shost)
{
struct fc_lport *lport = shost_priv(shost);
- fc_lport_reset(lport);
+ struct fcoe_port *port = lport_priv(lport);
+ struct fcoe_interface *fcoe = port->priv;
+
+ fcoe_ctlr_link_down(&fcoe->ctlr);
+ fcoe_clean_pending_queue(fcoe->ctlr.lp);
+ if (!fcoe_link_ok(fcoe->ctlr.lp))
+ fcoe_ctlr_link_up(&fcoe->ctlr);
return 0;
}
@@ -2395,12 +2194,24 @@ static int fcoe_hostlist_add(const struct fc_lport *lport)
fcoe = fcoe_hostlist_lookup_port(fcoe_netdev(lport));
if (!fcoe) {
port = lport_priv(lport);
- fcoe = port->fcoe;
+ fcoe = port->priv;
list_add_tail(&fcoe->list, &fcoe_hostlist);
}
return 0;
}
+
+static struct fcoe_transport fcoe_sw_transport = {
+ .name = {FCOE_TRANSPORT_DEFAULT},
+ .attached = false,
+ .list = LIST_HEAD_INIT(fcoe_sw_transport.list),
+ .match = fcoe_match,
+ .create = fcoe_create,
+ .destroy = fcoe_destroy,
+ .enable = fcoe_enable,
+ .disable = fcoe_disable,
+};
+
/**
* fcoe_init() - Initialize fcoe.ko
*
@@ -2412,6 +2223,18 @@ static int __init fcoe_init(void)
unsigned int cpu;
int rc = 0;
+ fcoe_wq = alloc_workqueue("fcoe", 0, 0);
+ if (!fcoe_wq)
+ return -ENOMEM;
+
+ /* register as a fcoe transport */
+ rc = fcoe_transport_attach(&fcoe_sw_transport);
+ if (rc) {
+ printk(KERN_ERR "failed to register an fcoe transport, check "
+ "if libfcoe is loaded\n");
+ return rc;
+ }
+
mutex_lock(&fcoe_config_mutex);
for_each_possible_cpu(cpu) {
@@ -2442,6 +2265,7 @@ out_free:
fcoe_percpu_thread_destroy(cpu);
}
mutex_unlock(&fcoe_config_mutex);
+ destroy_workqueue(fcoe_wq);
return rc;
}
module_init(fcoe_init);
@@ -2467,7 +2291,7 @@ static void __exit fcoe_exit(void)
list_del(&fcoe->list);
port = lport_priv(fcoe->ctlr.lp);
fcoe_interface_cleanup(fcoe);
- schedule_work(&port->destroy_work);
+ queue_work(fcoe_wq, &port->destroy_work);
}
rtnl_unlock();
@@ -2478,16 +2302,21 @@ static void __exit fcoe_exit(void)
mutex_unlock(&fcoe_config_mutex);
- /* flush any asyncronous interface destroys,
- * this should happen after the netdev notifier is unregistered */
- flush_scheduled_work();
- /* That will flush out all the N_Ports on the hostlist, but now we
- * may have NPIV VN_Ports scheduled for destruction */
- flush_scheduled_work();
+ /*
+ * destroy_work's may be chained but destroy_workqueue()
+ * can take care of them. Just kill the fcoe_wq.
+ */
+ destroy_workqueue(fcoe_wq);
- /* detach from scsi transport
- * must happen after all destroys are done, therefor after the flush */
+ /*
+ * Detaching from the scsi transport must happen after all
+ * destroys are done on the fcoe_wq. destroy_workqueue will
+ * enusre the fcoe_wq is flushed.
+ */
fcoe_if_exit();
+
+ /* detach from fcoe transport */
+ fcoe_transport_detach(&fcoe_sw_transport);
}
module_exit(fcoe_exit);
@@ -2559,7 +2388,7 @@ static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did,
void *arg, u32 timeout)
{
struct fcoe_port *port = lport_priv(lport);
- struct fcoe_interface *fcoe = port->fcoe;
+ struct fcoe_interface *fcoe = port->priv;
struct fcoe_ctlr *fip = &fcoe->ctlr;
struct fc_frame_header *fh = fc_frame_header_get(fp);
@@ -2592,7 +2421,7 @@ static int fcoe_vport_create(struct fc_vport *vport, bool disabled)
struct Scsi_Host *shost = vport_to_shost(vport);
struct fc_lport *n_port = shost_priv(shost);
struct fcoe_port *port = lport_priv(n_port);
- struct fcoe_interface *fcoe = port->fcoe;
+ struct fcoe_interface *fcoe = port->priv;
struct net_device *netdev = fcoe->netdev;
struct fc_lport *vn_port;
@@ -2632,7 +2461,7 @@ static int fcoe_vport_destroy(struct fc_vport *vport)
mutex_lock(&n_port->lp_mutex);
list_del(&vn_port->list);
mutex_unlock(&n_port->lp_mutex);
- schedule_work(&port->destroy_work);
+ queue_work(fcoe_wq, &port->destroy_work);
return 0;
}
@@ -2736,7 +2565,7 @@ static void fcoe_set_port_id(struct fc_lport *lport,
u32 port_id, struct fc_frame *fp)
{
struct fcoe_port *port = lport_priv(lport);
- struct fcoe_interface *fcoe = port->fcoe;
+ struct fcoe_interface *fcoe = port->priv;
if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp);
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index c69b2c56c2d1..408a6fd78fb4 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -24,7 +24,7 @@
#include <linux/kthread.h>
#define FCOE_MAX_QUEUE_DEPTH 256
-#define FCOE_LOW_QUEUE_DEPTH 32
+#define FCOE_MIN_QUEUE_DEPTH 32
#define FCOE_WORD_TO_BYTE 4
@@ -40,12 +40,6 @@
#define FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */
#define FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */
-/*
- * Max MTU for FCoE: 14 (FCoE header) + 24 (FC header) + 2112 (max FC payload)
- * + 4 (FC CRC) + 4 (FCoE trailer) = 2158 bytes
- */
-#define FCOE_MTU 2158
-
unsigned int fcoe_debug_logging;
module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
@@ -71,21 +65,6 @@ do { \
netdev->name, ##args);)
/**
- * struct fcoe_percpu_s - The per-CPU context for FCoE receive threads
- * @thread: The thread context
- * @fcoe_rx_list: The queue of pending packets to process
- * @page: The memory page for calculating frame trailer CRCs
- * @crc_eof_offset: The offset into the CRC page pointing to available
- * memory for a new trailer
- */
-struct fcoe_percpu_s {
- struct task_struct *thread;
- struct sk_buff_head fcoe_rx_list;
- struct page *crc_eof_page;
- int crc_eof_offset;
-};
-
-/**
* struct fcoe_interface - A FCoE interface
* @list: Handle for a list of FCoE interfaces
* @netdev: The associated net device
@@ -108,30 +87,6 @@ struct fcoe_interface {
struct kref kref;
};
-/**
- * struct fcoe_port - The FCoE private structure
- * @fcoe: The associated fcoe interface
- * @lport: The associated local port
- * @fcoe_pending_queue: The pending Rx queue of skbs
- * @fcoe_pending_queue_active: Indicates if the pending queue is active
- * @timer: The queue timer
- * @destroy_work: Handle for work context
- * (to prevent RTNL deadlocks)
- * @data_srt_addr: Source address for data
- *
- * An instance of this structure is to be allocated along with the
- * Scsi_Host and libfc fc_lport structures.
- */
-struct fcoe_port {
- struct fcoe_interface *fcoe;
- struct fc_lport *lport;
- struct sk_buff_head fcoe_pending_queue;
- u8 fcoe_pending_queue_active;
- struct timer_list timer;
- struct work_struct destroy_work;
- u8 data_src_addr[ETH_ALEN];
-};
-
#define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr)
/**
@@ -140,7 +95,8 @@ struct fcoe_port {
*/
static inline struct net_device *fcoe_netdev(const struct fc_lport *lport)
{
- return ((struct fcoe_port *)lport_priv(lport))->fcoe->netdev;
+ return ((struct fcoe_interface *)
+ ((struct fcoe_port *)lport_priv(lport))->priv)->netdev;
}
#endif /* _FCOE_H_ */
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 625c6be25396..c93f007e702f 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -44,9 +44,7 @@
#include <scsi/libfc.h>
#include <scsi/libfcoe.h>
-MODULE_AUTHOR("Open-FCoE.org");
-MODULE_DESCRIPTION("FIP discovery protocol support for FCoE HBAs");
-MODULE_LICENSE("GPL v2");
+#include "libfcoe.h"
#define FCOE_CTLR_MIN_FKA 500 /* min keep alive (mS) */
#define FCOE_CTLR_DEF_FKA FIP_DEF_FKA /* default keep alive (mS) */
@@ -66,31 +64,7 @@ static u8 fcoe_all_enode[ETH_ALEN] = FIP_ALL_ENODE_MACS;
static u8 fcoe_all_vn2vn[ETH_ALEN] = FIP_ALL_VN2VN_MACS;
static u8 fcoe_all_p2p[ETH_ALEN] = FIP_ALL_P2P_MACS;
-unsigned int libfcoe_debug_logging;
-module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
-
-#define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */
-#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */
-
-#define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \
-do { \
- if (unlikely(libfcoe_debug_logging & LEVEL)) \
- do { \
- CMD; \
- } while (0); \
-} while (0)
-
-#define LIBFCOE_DBG(fmt, args...) \
- LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \
- printk(KERN_INFO "libfcoe: " fmt, ##args);)
-
-#define LIBFCOE_FIP_DBG(fip, fmt, args...) \
- LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \
- printk(KERN_INFO "host%d: fip: " fmt, \
- (fip)->lp->host->host_no, ##args);)
-
-static const char *fcoe_ctlr_states[] = {
+static const char * const fcoe_ctlr_states[] = {
[FIP_ST_DISABLED] = "DISABLED",
[FIP_ST_LINK_WAIT] = "LINK_WAIT",
[FIP_ST_AUTO] = "AUTO",
@@ -308,8 +282,8 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
struct fip_mac_desc mac;
struct fip_wwn_desc wwnn;
struct fip_size_desc size;
- } __attribute__((packed)) desc;
- } __attribute__((packed)) *sol;
+ } __packed desc;
+ } __packed * sol;
u32 fcoe_size;
skb = dev_alloc_skb(sizeof(*sol));
@@ -456,7 +430,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip,
struct ethhdr eth;
struct fip_header fip;
struct fip_mac_desc mac;
- } __attribute__((packed)) *kal;
+ } __packed * kal;
struct fip_vn_desc *vn;
u32 len;
struct fc_lport *lp;
@@ -527,7 +501,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport,
struct ethhdr eth;
struct fip_header fip;
struct fip_encaps encaps;
- } __attribute__((packed)) *cap;
+ } __packed * cap;
struct fc_frame_header *fh;
struct fip_mac_desc *mac;
struct fcoe_fcf *fcf;
@@ -1819,7 +1793,7 @@ static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip,
struct fip_mac_desc mac;
struct fip_wwn_desc wwnn;
struct fip_vn_desc vn;
- } __attribute__((packed)) *frame;
+ } __packed * frame;
struct fip_fc4_feat *ff;
struct fip_size_desc *size;
u32 fcp_feat;
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
new file mode 100644
index 000000000000..258684101bfd
--- /dev/null
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -0,0 +1,770 @@
+/*
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/errno.h>
+#include <linux/crc32.h>
+#include <scsi/libfcoe.h>
+
+#include "libfcoe.h"
+
+MODULE_AUTHOR("Open-FCoE.org");
+MODULE_DESCRIPTION("FIP discovery protocol and FCoE transport for FCoE HBAs");
+MODULE_LICENSE("GPL v2");
+
+static int fcoe_transport_create(const char *, struct kernel_param *);
+static int fcoe_transport_destroy(const char *, struct kernel_param *);
+static int fcoe_transport_show(char *buffer, const struct kernel_param *kp);
+static struct fcoe_transport *fcoe_transport_lookup(struct net_device *device);
+static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *device);
+static int fcoe_transport_enable(const char *, struct kernel_param *);
+static int fcoe_transport_disable(const char *, struct kernel_param *);
+static int libfcoe_device_notification(struct notifier_block *notifier,
+ ulong event, void *ptr);
+
+static LIST_HEAD(fcoe_transports);
+static DEFINE_MUTEX(ft_mutex);
+static LIST_HEAD(fcoe_netdevs);
+static DEFINE_MUTEX(fn_mutex);
+
+unsigned int libfcoe_debug_logging;
+module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+
+module_param_call(show, NULL, fcoe_transport_show, NULL, S_IRUSR);
+__MODULE_PARM_TYPE(show, "string");
+MODULE_PARM_DESC(show, " Show attached FCoE transports");
+
+module_param_call(create, fcoe_transport_create, NULL,
+ (void *)FIP_MODE_FABRIC, S_IWUSR);
+__MODULE_PARM_TYPE(create, "string");
+MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface");
+
+module_param_call(create_vn2vn, fcoe_transport_create, NULL,
+ (void *)FIP_MODE_VN2VN, S_IWUSR);
+__MODULE_PARM_TYPE(create_vn2vn, "string");
+MODULE_PARM_DESC(create_vn2vn, " Creates a VN_node to VN_node FCoE instance "
+ "on an Ethernet interface");
+
+module_param_call(destroy, fcoe_transport_destroy, NULL, NULL, S_IWUSR);
+__MODULE_PARM_TYPE(destroy, "string");
+MODULE_PARM_DESC(destroy, " Destroys fcoe instance on a ethernet interface");
+
+module_param_call(enable, fcoe_transport_enable, NULL, NULL, S_IWUSR);
+__MODULE_PARM_TYPE(enable, "string");
+MODULE_PARM_DESC(enable, " Enables fcoe on a ethernet interface.");
+
+module_param_call(disable, fcoe_transport_disable, NULL, NULL, S_IWUSR);
+__MODULE_PARM_TYPE(disable, "string");
+MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface.");
+
+/* notification function for packets from net device */
+static struct notifier_block libfcoe_notifier = {
+ .notifier_call = libfcoe_device_notification,
+};
+
+/**
+ * fcoe_fc_crc() - Calculates the CRC for a given frame
+ * @fp: The frame to be checksumed
+ *
+ * This uses crc32() routine to calculate the CRC for a frame
+ *
+ * Return: The 32 bit CRC value
+ */
+u32 fcoe_fc_crc(struct fc_frame *fp)
+{
+ struct sk_buff *skb = fp_skb(fp);
+ struct skb_frag_struct *frag;
+ unsigned char *data;
+ unsigned long off, len, clen;
+ u32 crc;
+ unsigned i;
+
+ crc = crc32(~0, skb->data, skb_headlen(skb));
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ off = frag->page_offset;
+ len = frag->size;
+ while (len > 0) {
+ clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
+ data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
+ KM_SKB_DATA_SOFTIRQ);
+ crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
+ kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
+ off += clen;
+ len -= clen;
+ }
+ }
+ return crc;
+}
+EXPORT_SYMBOL_GPL(fcoe_fc_crc);
+
+/**
+ * fcoe_start_io() - Start FCoE I/O
+ * @skb: The packet to be transmitted
+ *
+ * This routine is called from the net device to start transmitting
+ * FCoE packets.
+ *
+ * Returns: 0 for success
+ */
+int fcoe_start_io(struct sk_buff *skb)
+{
+ struct sk_buff *nskb;
+ int rc;
+
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ return -ENOMEM;
+ rc = dev_queue_xmit(nskb);
+ if (rc != 0)
+ return rc;
+ kfree_skb(skb);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fcoe_start_io);
+
+
+/**
+ * fcoe_clean_pending_queue() - Dequeue a skb and free it
+ * @lport: The local port to dequeue a skb on
+ */
+void fcoe_clean_pending_queue(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct sk_buff *skb;
+
+ spin_lock_bh(&port->fcoe_pending_queue.lock);
+ while ((skb = __skb_dequeue(&port->fcoe_pending_queue)) != NULL) {
+ spin_unlock_bh(&port->fcoe_pending_queue.lock);
+ kfree_skb(skb);
+ spin_lock_bh(&port->fcoe_pending_queue.lock);
+ }
+ spin_unlock_bh(&port->fcoe_pending_queue.lock);
+}
+EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue);
+
+/**
+ * fcoe_check_wait_queue() - Attempt to clear the transmit backlog
+ * @lport: The local port whose backlog is to be cleared
+ *
+ * This empties the wait_queue, dequeues the head of the wait_queue queue
+ * and calls fcoe_start_io() for each packet. If all skb have been
+ * transmitted it returns the qlen. If an error occurs it restores
+ * wait_queue (to try again later) and returns -1.
+ *
+ * The wait_queue is used when the skb transmit fails. The failed skb
+ * will go in the wait_queue which will be emptied by the timer function or
+ * by the next skb transmit.
+ */
+void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ int rc;
+
+ spin_lock_bh(&port->fcoe_pending_queue.lock);
+
+ if (skb)
+ __skb_queue_tail(&port->fcoe_pending_queue, skb);
+
+ if (port->fcoe_pending_queue_active)
+ goto out;
+ port->fcoe_pending_queue_active = 1;
+
+ while (port->fcoe_pending_queue.qlen) {
+ /* keep qlen > 0 until fcoe_start_io succeeds */
+ port->fcoe_pending_queue.qlen++;
+ skb = __skb_dequeue(&port->fcoe_pending_queue);
+
+ spin_unlock_bh(&port->fcoe_pending_queue.lock);
+ rc = fcoe_start_io(skb);
+ spin_lock_bh(&port->fcoe_pending_queue.lock);
+
+ if (rc) {
+ __skb_queue_head(&port->fcoe_pending_queue, skb);
+ /* undo temporary increment above */
+ port->fcoe_pending_queue.qlen--;
+ break;
+ }
+ /* undo temporary increment above */
+ port->fcoe_pending_queue.qlen--;
+ }
+
+ if (port->fcoe_pending_queue.qlen < port->min_queue_depth)
+ lport->qfull = 0;
+ if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer))
+ mod_timer(&port->timer, jiffies + 2);
+ port->fcoe_pending_queue_active = 0;
+out:
+ if (port->fcoe_pending_queue.qlen > port->max_queue_depth)
+ lport->qfull = 1;
+ spin_unlock_bh(&port->fcoe_pending_queue.lock);
+}
+EXPORT_SYMBOL_GPL(fcoe_check_wait_queue);
+
+/**
+ * fcoe_queue_timer() - The fcoe queue timer
+ * @lport: The local port
+ *
+ * Calls fcoe_check_wait_queue on timeout
+ */
+void fcoe_queue_timer(ulong lport)
+{
+ fcoe_check_wait_queue((struct fc_lport *)lport, NULL);
+}
+EXPORT_SYMBOL_GPL(fcoe_queue_timer);
+
+/**
+ * fcoe_get_paged_crc_eof() - Allocate a page to be used for the trailer CRC
+ * @skb: The packet to be transmitted
+ * @tlen: The total length of the trailer
+ * @fps: The fcoe context
+ *
+ * This routine allocates a page for frame trailers. The page is re-used if
+ * there is enough room left on it for the current trailer. If there isn't
+ * enough buffer left a new page is allocated for the trailer. Reference to
+ * the page from this function as well as the skbs using the page fragments
+ * ensure that the page is freed at the appropriate time.
+ *
+ * Returns: 0 for success
+ */
+int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen,
+ struct fcoe_percpu_s *fps)
+{
+ struct page *page;
+
+ page = fps->crc_eof_page;
+ if (!page) {
+ page = alloc_page(GFP_ATOMIC);
+ if (!page)
+ return -ENOMEM;
+
+ fps->crc_eof_page = page;
+ fps->crc_eof_offset = 0;
+ }
+
+ get_page(page);
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
+ fps->crc_eof_offset, tlen);
+ skb->len += tlen;
+ skb->data_len += tlen;
+ skb->truesize += tlen;
+ fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
+
+ if (fps->crc_eof_offset >= PAGE_SIZE) {
+ fps->crc_eof_page = NULL;
+ fps->crc_eof_offset = 0;
+ put_page(page);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fcoe_get_paged_crc_eof);
+
+/**
+ * fcoe_transport_lookup - find an fcoe transport that matches a netdev
+ * @netdev: The netdev to look for from all attached transports
+ *
+ * Returns : ptr to the fcoe transport that supports this netdev or NULL
+ * if not found.
+ *
+ * The ft_mutex should be held when this is called
+ */
+static struct fcoe_transport *fcoe_transport_lookup(struct net_device *netdev)
+{
+ struct fcoe_transport *ft = NULL;
+
+ list_for_each_entry(ft, &fcoe_transports, list)
+ if (ft->match && ft->match(netdev))
+ return ft;
+ return NULL;
+}
+
+/**
+ * fcoe_transport_attach - Attaches an FCoE transport
+ * @ft: The fcoe transport to be attached
+ *
+ * Returns : 0 for success
+ */
+int fcoe_transport_attach(struct fcoe_transport *ft)
+{
+ int rc = 0;
+
+ mutex_lock(&ft_mutex);
+ if (ft->attached) {
+ LIBFCOE_TRANSPORT_DBG("transport %s already attached\n",
+ ft->name);
+ rc = -EEXIST;
+ goto out_attach;
+ }
+
+ /* Add default transport to the tail */
+ if (strcmp(ft->name, FCOE_TRANSPORT_DEFAULT))
+ list_add(&ft->list, &fcoe_transports);
+ else
+ list_add_tail(&ft->list, &fcoe_transports);
+
+ ft->attached = true;
+ LIBFCOE_TRANSPORT_DBG("attaching transport %s\n", ft->name);
+
+out_attach:
+ mutex_unlock(&ft_mutex);
+ return rc;
+}
+EXPORT_SYMBOL(fcoe_transport_attach);
+
+/**
+ * fcoe_transport_attach - Detaches an FCoE transport
+ * @ft: The fcoe transport to be attached
+ *
+ * Returns : 0 for success
+ */
+int fcoe_transport_detach(struct fcoe_transport *ft)
+{
+ int rc = 0;
+
+ mutex_lock(&ft_mutex);
+ if (!ft->attached) {
+ LIBFCOE_TRANSPORT_DBG("transport %s already detached\n",
+ ft->name);
+ rc = -ENODEV;
+ goto out_attach;
+ }
+
+ list_del(&ft->list);
+ ft->attached = false;
+ LIBFCOE_TRANSPORT_DBG("detaching transport %s\n", ft->name);
+
+out_attach:
+ mutex_unlock(&ft_mutex);
+ return rc;
+
+}
+EXPORT_SYMBOL(fcoe_transport_detach);
+
+static int fcoe_transport_show(char *buffer, const struct kernel_param *kp)
+{
+ int i, j;
+ struct fcoe_transport *ft = NULL;
+
+ i = j = sprintf(buffer, "Attached FCoE transports:");
+ mutex_lock(&ft_mutex);
+ list_for_each_entry(ft, &fcoe_transports, list) {
+ i += snprintf(&buffer[i], IFNAMSIZ, "%s ", ft->name);
+ if (i >= PAGE_SIZE)
+ break;
+ }
+ mutex_unlock(&ft_mutex);
+ if (i == j)
+ i += snprintf(&buffer[i], IFNAMSIZ, "none");
+ return i;
+}
+
+static int __init fcoe_transport_init(void)
+{
+ register_netdevice_notifier(&libfcoe_notifier);
+ return 0;
+}
+
+static int __exit fcoe_transport_exit(void)
+{
+ struct fcoe_transport *ft;
+
+ unregister_netdevice_notifier(&libfcoe_notifier);
+ mutex_lock(&ft_mutex);
+ list_for_each_entry(ft, &fcoe_transports, list)
+ printk(KERN_ERR "FCoE transport %s is still attached!\n",
+ ft->name);
+ mutex_unlock(&ft_mutex);
+ return 0;
+}
+
+
+static int fcoe_add_netdev_mapping(struct net_device *netdev,
+ struct fcoe_transport *ft)
+{
+ struct fcoe_netdev_mapping *nm;
+
+ nm = kmalloc(sizeof(*nm), GFP_KERNEL);
+ if (!nm) {
+ printk(KERN_ERR "Unable to allocate netdev_mapping");
+ return -ENOMEM;
+ }
+
+ nm->netdev = netdev;
+ nm->ft = ft;
+
+ mutex_lock(&fn_mutex);
+ list_add(&nm->list, &fcoe_netdevs);
+ mutex_unlock(&fn_mutex);
+ return 0;
+}
+
+
+static void fcoe_del_netdev_mapping(struct net_device *netdev)
+{
+ struct fcoe_netdev_mapping *nm = NULL, *tmp;
+
+ mutex_lock(&fn_mutex);
+ list_for_each_entry_safe(nm, tmp, &fcoe_netdevs, list) {
+ if (nm->netdev == netdev) {
+ list_del(&nm->list);
+ kfree(nm);
+ mutex_unlock(&fn_mutex);
+ return;
+ }
+ }
+ mutex_unlock(&fn_mutex);
+}
+
+
+/**
+ * fcoe_netdev_map_lookup - find the fcoe transport that matches the netdev on which
+ * it was created
+ *
+ * Returns : ptr to the fcoe transport that supports this netdev or NULL
+ * if not found.
+ *
+ * The ft_mutex should be held when this is called
+ */
+static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *netdev)
+{
+ struct fcoe_transport *ft = NULL;
+ struct fcoe_netdev_mapping *nm;
+
+ mutex_lock(&fn_mutex);
+ list_for_each_entry(nm, &fcoe_netdevs, list) {
+ if (netdev == nm->netdev) {
+ ft = nm->ft;
+ mutex_unlock(&fn_mutex);
+ return ft;
+ }
+ }
+
+ mutex_unlock(&fn_mutex);
+ return NULL;
+}
+
+/**
+ * fcoe_if_to_netdev() - Parse a name buffer to get a net device
+ * @buffer: The name of the net device
+ *
+ * Returns: NULL or a ptr to net_device
+ */
+static struct net_device *fcoe_if_to_netdev(const char *buffer)
+{
+ char *cp;
+ char ifname[IFNAMSIZ + 2];
+
+ if (buffer) {
+ strlcpy(ifname, buffer, IFNAMSIZ);
+ cp = ifname + strlen(ifname);
+ while (--cp >= ifname && *cp == '\n')
+ *cp = '\0';
+ return dev_get_by_name(&init_net, ifname);
+ }
+ return NULL;
+}
+
+/**
+ * libfcoe_device_notification() - Handler for net device events
+ * @notifier: The context of the notification
+ * @event: The type of event
+ * @ptr: The net device that the event was on
+ *
+ * This function is called by the Ethernet driver in case of link change event.
+ *
+ * Returns: 0 for success
+ */
+static int libfcoe_device_notification(struct notifier_block *notifier,
+ ulong event, void *ptr)
+{
+ struct net_device *netdev = ptr;
+
+ switch (event) {
+ case NETDEV_UNREGISTER:
+ printk(KERN_ERR "libfcoe_device_notification: NETDEV_UNREGISTER %s\n",
+ netdev->name);
+ fcoe_del_netdev_mapping(netdev);
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+
+/**
+ * fcoe_transport_create() - Create a fcoe interface
+ * @buffer: The name of the Ethernet interface to create on
+ * @kp: The associated kernel param
+ *
+ * Called from sysfs. This holds the ft_mutex while calling the
+ * registered fcoe transport's create function.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_transport_create(const char *buffer, struct kernel_param *kp)
+{
+ int rc = -ENODEV;
+ struct net_device *netdev = NULL;
+ struct fcoe_transport *ft = NULL;
+ enum fip_state fip_mode = (enum fip_state)(long)kp->arg;
+
+ if (!mutex_trylock(&ft_mutex))
+ return restart_syscall();
+
+#ifdef CONFIG_LIBFCOE_MODULE
+ /*
+ * Make sure the module has been initialized, and is not about to be
+ * removed. Module parameter sysfs files are writable before the
+ * module_init function is called and after module_exit.
+ */
+ if (THIS_MODULE->state != MODULE_STATE_LIVE)
+ goto out_nodev;
+#endif
+
+ netdev = fcoe_if_to_netdev(buffer);
+ if (!netdev) {
+ LIBFCOE_TRANSPORT_DBG("Invalid device %s.\n", buffer);
+ goto out_nodev;
+ }
+
+ ft = fcoe_netdev_map_lookup(netdev);
+ if (ft) {
+ LIBFCOE_TRANSPORT_DBG("transport %s already has existing "
+ "FCoE instance on %s.\n",
+ ft->name, netdev->name);
+ rc = -EEXIST;
+ goto out_putdev;
+ }
+
+ ft = fcoe_transport_lookup(netdev);
+ if (!ft) {
+ LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n",
+ netdev->name);
+ goto out_putdev;
+ }
+
+ rc = fcoe_add_netdev_mapping(netdev, ft);
+ if (rc) {
+ LIBFCOE_TRANSPORT_DBG("failed to add new netdev mapping "
+ "for FCoE transport %s for %s.\n",
+ ft->name, netdev->name);
+ goto out_putdev;
+ }
+
+ /* pass to transport create */
+ rc = ft->create ? ft->create(netdev, fip_mode) : -ENODEV;
+ if (rc)
+ fcoe_del_netdev_mapping(netdev);
+
+ LIBFCOE_TRANSPORT_DBG("transport %s %s to create fcoe on %s.\n",
+ ft->name, (rc) ? "failed" : "succeeded",
+ netdev->name);
+
+out_putdev:
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&ft_mutex);
+ if (rc == -ERESTARTSYS)
+ return restart_syscall();
+ else
+ return rc;
+}
+
+/**
+ * fcoe_transport_destroy() - Destroy a FCoE interface
+ * @buffer: The name of the Ethernet interface to be destroyed
+ * @kp: The associated kernel parameter
+ *
+ * Called from sysfs. This holds the ft_mutex while calling the
+ * registered fcoe transport's destroy function.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp)
+{
+ int rc = -ENODEV;
+ struct net_device *netdev = NULL;
+ struct fcoe_transport *ft = NULL;
+
+ if (!mutex_trylock(&ft_mutex))
+ return restart_syscall();
+
+#ifdef CONFIG_LIBFCOE_MODULE
+ /*
+ * Make sure the module has been initialized, and is not about to be
+ * removed. Module parameter sysfs files are writable before the
+ * module_init function is called and after module_exit.
+ */
+ if (THIS_MODULE->state != MODULE_STATE_LIVE)
+ goto out_nodev;
+#endif
+
+ netdev = fcoe_if_to_netdev(buffer);
+ if (!netdev) {
+ LIBFCOE_TRANSPORT_DBG("invalid device %s.\n", buffer);
+ goto out_nodev;
+ }
+
+ ft = fcoe_netdev_map_lookup(netdev);
+ if (!ft) {
+ LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n",
+ netdev->name);
+ goto out_putdev;
+ }
+
+ /* pass to transport destroy */
+ rc = ft->destroy ? ft->destroy(netdev) : -ENODEV;
+ fcoe_del_netdev_mapping(netdev);
+ LIBFCOE_TRANSPORT_DBG("transport %s %s to destroy fcoe on %s.\n",
+ ft->name, (rc) ? "failed" : "succeeded",
+ netdev->name);
+
+out_putdev:
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&ft_mutex);
+
+ if (rc == -ERESTARTSYS)
+ return restart_syscall();
+ else
+ return rc;
+}
+
+/**
+ * fcoe_transport_disable() - Disables a FCoE interface
+ * @buffer: The name of the Ethernet interface to be disabled
+ * @kp: The associated kernel parameter
+ *
+ * Called from sysfs.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp)
+{
+ int rc = -ENODEV;
+ struct net_device *netdev = NULL;
+ struct fcoe_transport *ft = NULL;
+
+ if (!mutex_trylock(&ft_mutex))
+ return restart_syscall();
+
+#ifdef CONFIG_LIBFCOE_MODULE
+ /*
+ * Make sure the module has been initialized, and is not about to be
+ * removed. Module parameter sysfs files are writable before the
+ * module_init function is called and after module_exit.
+ */
+ if (THIS_MODULE->state != MODULE_STATE_LIVE)
+ goto out_nodev;
+#endif
+
+ netdev = fcoe_if_to_netdev(buffer);
+ if (!netdev)
+ goto out_nodev;
+
+ ft = fcoe_netdev_map_lookup(netdev);
+ if (!ft)
+ goto out_putdev;
+
+ rc = ft->disable ? ft->disable(netdev) : -ENODEV;
+
+out_putdev:
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&ft_mutex);
+
+ if (rc == -ERESTARTSYS)
+ return restart_syscall();
+ else
+ return rc;
+}
+
+/**
+ * fcoe_transport_enable() - Enables a FCoE interface
+ * @buffer: The name of the Ethernet interface to be enabled
+ * @kp: The associated kernel parameter
+ *
+ * Called from sysfs.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp)
+{
+ int rc = -ENODEV;
+ struct net_device *netdev = NULL;
+ struct fcoe_transport *ft = NULL;
+
+ if (!mutex_trylock(&ft_mutex))
+ return restart_syscall();
+
+#ifdef CONFIG_LIBFCOE_MODULE
+ /*
+ * Make sure the module has been initialized, and is not about to be
+ * removed. Module parameter sysfs files are writable before the
+ * module_init function is called and after module_exit.
+ */
+ if (THIS_MODULE->state != MODULE_STATE_LIVE)
+ goto out_nodev;
+#endif
+
+ netdev = fcoe_if_to_netdev(buffer);
+ if (!netdev)
+ goto out_nodev;
+
+ ft = fcoe_netdev_map_lookup(netdev);
+ if (!ft)
+ goto out_putdev;
+
+ rc = ft->enable ? ft->enable(netdev) : -ENODEV;
+
+out_putdev:
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&ft_mutex);
+ if (rc == -ERESTARTSYS)
+ return restart_syscall();
+ else
+ return rc;
+}
+
+/**
+ * libfcoe_init() - Initialization routine for libfcoe.ko
+ */
+static int __init libfcoe_init(void)
+{
+ fcoe_transport_init();
+
+ return 0;
+}
+module_init(libfcoe_init);
+
+/**
+ * libfcoe_exit() - Tear down libfcoe.ko
+ */
+static void __exit libfcoe_exit(void)
+{
+ fcoe_transport_exit();
+}
+module_exit(libfcoe_exit);
diff --git a/drivers/scsi/fcoe/libfcoe.h b/drivers/scsi/fcoe/libfcoe.h
new file mode 100644
index 000000000000..6af5fc3a17d8
--- /dev/null
+++ b/drivers/scsi/fcoe/libfcoe.h
@@ -0,0 +1,31 @@
+#ifndef _FCOE_LIBFCOE_H_
+#define _FCOE_LIBFCOE_H_
+
+extern unsigned int libfcoe_debug_logging;
+#define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */
+#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */
+#define LIBFCOE_TRANSPORT_LOGGING 0x04 /* FCoE transport logging */
+
+#define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \
+do { \
+ if (unlikely(libfcoe_debug_logging & LEVEL)) \
+ do { \
+ CMD; \
+ } while (0); \
+} while (0)
+
+#define LIBFCOE_DBG(fmt, args...) \
+ LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \
+ printk(KERN_INFO "libfcoe: " fmt, ##args);)
+
+#define LIBFCOE_FIP_DBG(fip, fmt, args...) \
+ LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \
+ printk(KERN_INFO "host%d: fip: " fmt, \
+ (fip)->lp->host->host_no, ##args);)
+
+#define LIBFCOE_TRANSPORT_DBG(fmt, args...) \
+ LIBFCOE_CHECK_LOGGING(LIBFCOE_TRANSPORT_LOGGING, \
+ printk(KERN_INFO "%s: " fmt, \
+ __func__, ##args);)
+
+#endif /* _FCOE_LIBFCOE_H_ */
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 92f185081e62..671cde9d4060 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -37,7 +37,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.4.0.145"
+#define DRV_VERSION "1.5.0.1"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index db710148d156..b576be734e2e 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -654,7 +654,7 @@ void vnic_dev_unregister(struct vnic_dev *vdev)
vdev->linkstatus_pa);
if (vdev->stats)
pci_free_consistent(vdev->pdev,
- sizeof(struct vnic_dev),
+ sizeof(struct vnic_stats),
vdev->stats, vdev->stats_pa);
if (vdev->fw_info)
pci_free_consistent(vdev->pdev,
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 12deffccb8da..c30591f84a06 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -74,6 +74,10 @@ static int hpsa_allow_any;
module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(hpsa_allow_any,
"Allow hpsa driver to access unknown HP Smart Array hardware");
+static int hpsa_simple_mode;
+module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(hpsa_simple_mode,
+ "Use 'simple mode' rather than 'performant mode'");
/* define the PCI info for the cards we can control */
static const struct pci_device_id hpsa_pci_device_id[] = {
@@ -155,6 +159,10 @@ static ssize_t unique_id_show(struct device *dev,
struct device_attribute *attr, char *buf);
static ssize_t host_show_firmware_revision(struct device *dev,
struct device_attribute *attr, char *buf);
+static ssize_t host_show_commands_outstanding(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t host_show_transport_mode(struct device *dev,
+ struct device_attribute *attr, char *buf);
static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
static ssize_t host_store_rescan(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count);
@@ -173,6 +181,10 @@ static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
unsigned long *memory_bar);
static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
+static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
+ void __iomem *vaddr, int wait_for_ready);
+#define BOARD_NOT_READY 0
+#define BOARD_READY 1
static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
@@ -180,6 +192,10 @@ static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
static DEVICE_ATTR(firmware_revision, S_IRUGO,
host_show_firmware_revision, NULL);
+static DEVICE_ATTR(commands_outstanding, S_IRUGO,
+ host_show_commands_outstanding, NULL);
+static DEVICE_ATTR(transport_mode, S_IRUGO,
+ host_show_transport_mode, NULL);
static struct device_attribute *hpsa_sdev_attrs[] = {
&dev_attr_raid_level,
@@ -191,6 +207,8 @@ static struct device_attribute *hpsa_sdev_attrs[] = {
static struct device_attribute *hpsa_shost_attrs[] = {
&dev_attr_rescan,
&dev_attr_firmware_revision,
+ &dev_attr_commands_outstanding,
+ &dev_attr_transport_mode,
NULL,
};
@@ -291,17 +309,38 @@ static ssize_t host_show_firmware_revision(struct device *dev,
fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
}
+static ssize_t host_show_commands_outstanding(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ctlr_info *h = shost_to_hba(shost);
+
+ return snprintf(buf, 20, "%d\n", h->commands_outstanding);
+}
+
+static ssize_t host_show_transport_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ctlr_info *h;
+ struct Scsi_Host *shost = class_to_shost(dev);
+
+ h = shost_to_hba(shost);
+ return snprintf(buf, 20, "%s\n",
+ h->transMethod & CFGTBL_Trans_Performant ?
+ "performant" : "simple");
+}
+
/* Enqueuing and dequeuing functions for cmdlists. */
-static inline void addQ(struct hlist_head *list, struct CommandList *c)
+static inline void addQ(struct list_head *list, struct CommandList *c)
{
- hlist_add_head(&c->list, list);
+ list_add_tail(&c->list, list);
}
static inline u32 next_command(struct ctlr_info *h)
{
u32 a;
- if (unlikely(h->transMethod != CFGTBL_Trans_Performant))
+ if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
return h->access.command_completed(h);
if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
@@ -325,7 +364,7 @@ static inline u32 next_command(struct ctlr_info *h)
*/
static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
{
- if (likely(h->transMethod == CFGTBL_Trans_Performant))
+ if (likely(h->transMethod & CFGTBL_Trans_Performant))
c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
}
@@ -344,9 +383,9 @@ static void enqueue_cmd_and_start_io(struct ctlr_info *h,
static inline void removeQ(struct CommandList *c)
{
- if (WARN_ON(hlist_unhashed(&c->list)))
+ if (WARN_ON(list_empty(&c->list)))
return;
- hlist_del_init(&c->list);
+ list_del_init(&c->list);
}
static inline int is_hba_lunid(unsigned char scsi3addr[])
@@ -1130,6 +1169,10 @@ static void complete_scsi_command(struct CommandList *cp,
cmd->result = DID_TIME_OUT << 16;
dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
break;
+ case CMD_UNABORTABLE:
+ cmd->result = DID_ERROR << 16;
+ dev_warn(&h->pdev->dev, "Command unabortable\n");
+ break;
default:
cmd->result = DID_ERROR << 16;
dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
@@ -1160,7 +1203,7 @@ static int hpsa_scsi_detect(struct ctlr_info *h)
sh->sg_tablesize = h->maxsgentries;
h->scsi_host = sh;
sh->hostdata[0] = (unsigned long) h;
- sh->irq = h->intr[PERF_MODE_INT];
+ sh->irq = h->intr[h->intr_mode];
sh->unique_id = sh->irq;
error = scsi_add_host(sh, &h->pdev->dev);
if (error)
@@ -1295,6 +1338,9 @@ static void hpsa_scsi_interpret_error(struct CommandList *cp)
case CMD_TIMEOUT:
dev_warn(d, "cp %p timed out\n", cp);
break;
+ case CMD_UNABORTABLE:
+ dev_warn(d, "Command unabortable\n");
+ break;
default:
dev_warn(d, "cp %p returned unknown status %x\n", cp,
ei->CommandStatus);
@@ -1595,6 +1641,8 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */
return 0;
+ memset(scsi3addr, 0, 8);
+ scsi3addr[3] = target;
if (is_hba_lunid(scsi3addr))
return 0; /* Don't add the RAID controller here. */
@@ -1609,8 +1657,6 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
return 0;
}
- memset(scsi3addr, 0, 8);
- scsi3addr[3] = target;
if (hpsa_update_device_info(h, scsi3addr, this_device))
return 0;
(*nmsa2xxx_enclosures)++;
@@ -2199,7 +2245,7 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
c->cmdindex = i;
- INIT_HLIST_NODE(&c->list);
+ INIT_LIST_HEAD(&c->list);
c->busaddr = (u32) cmd_dma_handle;
temp64.val = (u64) err_dma_handle;
c->ErrDesc.Addr.lower = temp64.val32.lower;
@@ -2237,7 +2283,7 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
}
memset(c->err_info, 0, sizeof(*c->err_info));
- INIT_HLIST_NODE(&c->list);
+ INIT_LIST_HEAD(&c->list);
c->busaddr = (u32) cmd_dma_handle;
temp64.val = (u64) err_dma_handle;
c->ErrDesc.Addr.lower = temp64.val32.lower;
@@ -2267,7 +2313,7 @@ static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
pci_free_consistent(h->pdev, sizeof(*c->err_info),
c->err_info, (dma_addr_t) temp64.val);
pci_free_consistent(h->pdev, sizeof(*c),
- c, (dma_addr_t) c->busaddr);
+ c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK));
}
#ifdef CONFIG_COMPAT
@@ -2281,6 +2327,7 @@ static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
int err;
u32 cp;
+ memset(&arg64, 0, sizeof(arg64));
err = 0;
err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
sizeof(arg64.LUN_info));
@@ -2317,6 +2364,7 @@ static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
int err;
u32 cp;
+ memset(&arg64, 0, sizeof(arg64));
err = 0;
err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
sizeof(arg64.LUN_info));
@@ -2433,15 +2481,17 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
if (buff == NULL)
return -EFAULT;
- }
- if (iocommand.Request.Type.Direction == XFER_WRITE) {
- /* Copy the data into the buffer we created */
- if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) {
- kfree(buff);
- return -EFAULT;
+ if (iocommand.Request.Type.Direction == XFER_WRITE) {
+ /* Copy the data into the buffer we created */
+ if (copy_from_user(buff, iocommand.buf,
+ iocommand.buf_size)) {
+ kfree(buff);
+ return -EFAULT;
+ }
+ } else {
+ memset(buff, 0, iocommand.buf_size);
}
- } else
- memset(buff, 0, iocommand.buf_size);
+ }
c = cmd_special_alloc(h);
if (c == NULL) {
kfree(buff);
@@ -2487,8 +2537,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
cmd_special_free(h, c);
return -EFAULT;
}
-
- if (iocommand.Request.Type.Direction == XFER_READ) {
+ if (iocommand.Request.Type.Direction == XFER_READ &&
+ iocommand.buf_size > 0) {
/* Copy the data out of the buffer we created */
if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
kfree(buff);
@@ -2581,14 +2631,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
}
c->cmd_type = CMD_IOCTL_PEND;
c->Header.ReplyQueue = 0;
-
- if (ioc->buf_size > 0) {
- c->Header.SGList = sg_used;
- c->Header.SGTotal = sg_used;
- } else {
- c->Header.SGList = 0;
- c->Header.SGTotal = 0;
- }
+ c->Header.SGList = c->Header.SGTotal = sg_used;
memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
c->Header.Tag.lower = c->busaddr;
memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
@@ -2605,7 +2648,8 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
}
}
hpsa_scsi_do_simple_cmd_core(h, c);
- hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
+ if (sg_used)
+ hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
check_ioctl_unit_attention(h, c);
/* Copy the error information out */
memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
@@ -2614,7 +2658,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
status = -EFAULT;
goto cleanup1;
}
- if (ioc->Request.Type.Direction == XFER_READ) {
+ if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) {
/* Copy the data out of the buffer we created */
BYTE __user *ptr = ioc->buf;
for (i = 0; i < sg_used; i++) {
@@ -2810,8 +2854,8 @@ static void start_io(struct ctlr_info *h)
{
struct CommandList *c;
- while (!hlist_empty(&h->reqQ)) {
- c = hlist_entry(h->reqQ.first, struct CommandList, list);
+ while (!list_empty(&h->reqQ)) {
+ c = list_entry(h->reqQ.next, struct CommandList, list);
/* can't do anything if fifo is full */
if ((h->access.fifo_full(h))) {
dev_warn(&h->pdev->dev, "fifo full\n");
@@ -2867,20 +2911,22 @@ static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
static inline u32 hpsa_tag_contains_index(u32 tag)
{
-#define DIRECT_LOOKUP_BIT 0x10
return tag & DIRECT_LOOKUP_BIT;
}
static inline u32 hpsa_tag_to_index(u32 tag)
{
-#define DIRECT_LOOKUP_SHIFT 5
return tag >> DIRECT_LOOKUP_SHIFT;
}
-static inline u32 hpsa_tag_discard_error_bits(u32 tag)
+
+static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
{
-#define HPSA_ERROR_BITS 0x03
- return tag & ~HPSA_ERROR_BITS;
+#define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
+#define HPSA_SIMPLE_ERROR_BITS 0x03
+ if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
+ return tag & ~HPSA_SIMPLE_ERROR_BITS;
+ return tag & ~HPSA_PERF_ERROR_BITS;
}
/* process completion of an indexed ("direct lookup") command */
@@ -2904,10 +2950,9 @@ static inline u32 process_nonindexed_cmd(struct ctlr_info *h,
{
u32 tag;
struct CommandList *c = NULL;
- struct hlist_node *tmp;
- tag = hpsa_tag_discard_error_bits(raw_tag);
- hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
+ tag = hpsa_tag_discard_error_bits(h, raw_tag);
+ list_for_each_entry(c, &h->cmpQ, list) {
if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
finish_cmd(c, raw_tag);
return next_command(h);
@@ -2957,7 +3002,10 @@ static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* Send a message CDB to the firmware. */
+/* Send a message CDB to the firmware. Careful, this only works
+ * in simple mode, not performant mode due to the tag lookup.
+ * We only ever use this immediately after a controller reset.
+ */
static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
unsigned char type)
{
@@ -3023,7 +3071,7 @@ static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
- if (hpsa_tag_discard_error_bits(tag) == paddr32)
+ if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32)
break;
msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
}
@@ -3055,38 +3103,6 @@ static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
#define hpsa_soft_reset_controller(p) hpsa_message(p, 1, 0)
#define hpsa_noop(p) hpsa_message(p, 3, 0)
-static __devinit int hpsa_reset_msi(struct pci_dev *pdev)
-{
-/* the #defines are stolen from drivers/pci/msi.h. */
-#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
-#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
-
- int pos;
- u16 control = 0;
-
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
- if (pos) {
- pci_read_config_word(pdev, msi_control_reg(pos), &control);
- if (control & PCI_MSI_FLAGS_ENABLE) {
- dev_info(&pdev->dev, "resetting MSI\n");
- pci_write_config_word(pdev, msi_control_reg(pos),
- control & ~PCI_MSI_FLAGS_ENABLE);
- }
- }
-
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
- if (pos) {
- pci_read_config_word(pdev, msi_control_reg(pos), &control);
- if (control & PCI_MSIX_FLAGS_ENABLE) {
- dev_info(&pdev->dev, "resetting MSI-X\n");
- pci_write_config_word(pdev, msi_control_reg(pos),
- control & ~PCI_MSIX_FLAGS_ENABLE);
- }
- }
-
- return 0;
-}
-
static int hpsa_controller_hard_reset(struct pci_dev *pdev,
void * __iomem vaddr, bool use_doorbell)
{
@@ -3142,17 +3158,17 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
*/
static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
{
- u16 saved_config_space[32];
u64 cfg_offset;
u32 cfg_base_addr;
u64 cfg_base_addr_index;
void __iomem *vaddr;
unsigned long paddr;
u32 misc_fw_support, active_transport;
- int rc, i;
+ int rc;
struct CfgTable __iomem *cfgtable;
bool use_doorbell;
u32 board_id;
+ u16 command_register;
/* For controllers as old as the P600, this is very nearly
* the same thing as
@@ -3162,14 +3178,6 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
* pci_set_power_state(pci_dev, PCI_D0);
* pci_restore_state(pci_dev);
*
- * but we can't use these nice canned kernel routines on
- * kexec, because they also check the MSI/MSI-X state in PCI
- * configuration space and do the wrong thing when it is
- * set/cleared. Also, the pci_save/restore_state functions
- * violate the ordering requirements for restoring the
- * configuration space from the CCISS document (see the
- * comment below). So we roll our own ....
- *
* For controllers newer than the P600, the pci power state
* method of resetting doesn't work so we have another way
* using the doorbell register.
@@ -3182,13 +3190,21 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
* likely not be happy. Just forbid resetting this conjoined mess.
* The 640x isn't really supported by hpsa anyway.
*/
- hpsa_lookup_board_id(pdev, &board_id);
+ rc = hpsa_lookup_board_id(pdev, &board_id);
+ if (rc < 0) {
+ dev_warn(&pdev->dev, "Not resetting device.\n");
+ return -ENODEV;
+ }
if (board_id == 0x409C0E11 || board_id == 0x409D0E11)
return -ENOTSUPP;
- for (i = 0; i < 32; i++)
- pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
-
+ /* Save the PCI command register */
+ pci_read_config_word(pdev, 4, &command_register);
+ /* Turn the board off. This is so that later pci_restore_state()
+ * won't turn the board on before the rest of config space is ready.
+ */
+ pci_disable_device(pdev);
+ pci_save_state(pdev);
/* find the first memory BAR, so we can find the cfg table */
rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
@@ -3214,46 +3230,47 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
misc_fw_support = readl(&cfgtable->misc_fw_support);
use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
- /* The doorbell reset seems to cause lockups on some Smart
- * Arrays (e.g. P410, P410i, maybe others). Until this is
- * fixed or at least isolated, avoid the doorbell reset.
- */
- use_doorbell = 0;
-
rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
if (rc)
goto unmap_cfgtable;
- /* Restore the PCI configuration space. The Open CISS
- * Specification says, "Restore the PCI Configuration
- * Registers, offsets 00h through 60h. It is important to
- * restore the command register, 16-bits at offset 04h,
- * last. Do not restore the configuration status register,
- * 16-bits at offset 06h." Note that the offset is 2*i.
- */
- for (i = 0; i < 32; i++) {
- if (i == 2 || i == 3)
- continue;
- pci_write_config_word(pdev, 2*i, saved_config_space[i]);
+ pci_restore_state(pdev);
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_warn(&pdev->dev, "failed to enable device.\n");
+ goto unmap_cfgtable;
}
- wmb();
- pci_write_config_word(pdev, 4, saved_config_space[2]);
+ pci_write_config_word(pdev, 4, command_register);
/* Some devices (notably the HP Smart Array 5i Controller)
need a little pause here */
msleep(HPSA_POST_RESET_PAUSE_MSECS);
+ /* Wait for board to become not ready, then ready. */
+ dev_info(&pdev->dev, "Waiting for board to become ready.\n");
+ rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
+ if (rc)
+ dev_warn(&pdev->dev,
+ "failed waiting for board to become not ready\n");
+ rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
+ if (rc) {
+ dev_warn(&pdev->dev,
+ "failed waiting for board to become ready\n");
+ goto unmap_cfgtable;
+ }
+ dev_info(&pdev->dev, "board ready.\n");
+
/* Controller should be in simple mode at this point. If it's not,
* It means we're on one of those controllers which doesn't support
* the doorbell reset method and on which the PCI power management reset
* method doesn't work (P800, for example.)
- * In those cases, pretend the reset worked and hope for the best.
+ * In those cases, don't try to proceed, as it generally doesn't work.
*/
active_transport = readl(&cfgtable->TransportActive);
if (active_transport & PERFORMANT_MODE) {
dev_warn(&pdev->dev, "Unable to successfully reset controller,"
- " proceeding anyway.\n");
- rc = -ENOTSUPP;
+ " Ignoring controller.\n");
+ rc = -ENODEV;
}
unmap_cfgtable:
@@ -3386,7 +3403,7 @@ static void __devinit hpsa_interrupt_mode(struct ctlr_info *h)
default_int_mode:
#endif /* CONFIG_PCI_MSI */
/* if we get here we're going to use the default interrupt mode */
- h->intr[PERF_MODE_INT] = h->pdev->irq;
+ h->intr[h->intr_mode] = h->pdev->irq;
}
static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
@@ -3438,18 +3455,28 @@ static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
return -ENODEV;
}
-static int __devinit hpsa_wait_for_board_ready(struct ctlr_info *h)
+static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
+ void __iomem *vaddr, int wait_for_ready)
{
- int i;
+ int i, iterations;
u32 scratchpad;
+ if (wait_for_ready)
+ iterations = HPSA_BOARD_READY_ITERATIONS;
+ else
+ iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
- for (i = 0; i < HPSA_BOARD_READY_ITERATIONS; i++) {
- scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
- if (scratchpad == HPSA_FIRMWARE_READY)
- return 0;
+ for (i = 0; i < iterations; i++) {
+ scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
+ if (wait_for_ready) {
+ if (scratchpad == HPSA_FIRMWARE_READY)
+ return 0;
+ } else {
+ if (scratchpad != HPSA_FIRMWARE_READY)
+ return 0;
+ }
msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
}
- dev_warn(&h->pdev->dev, "board not ready, timed out.\n");
+ dev_warn(&pdev->dev, "board not ready, timed out.\n");
return -ENODEV;
}
@@ -3497,6 +3524,11 @@ static int __devinit hpsa_find_cfgtables(struct ctlr_info *h)
static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
{
h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
+
+ /* Limit commands in memory limited kdump scenario. */
+ if (reset_devices && h->max_commands > 32)
+ h->max_commands = 32;
+
if (h->max_commands < 16) {
dev_warn(&h->pdev->dev, "Controller reports "
"max supported commands of %d, an obvious lie. "
@@ -3571,16 +3603,21 @@ static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
{
int i;
+ u32 doorbell_value;
+ unsigned long flags;
/* under certain very rare conditions, this can take awhile.
* (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
* as we enter this code.)
*/
for (i = 0; i < MAX_CONFIG_WAIT; i++) {
- if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
+ spin_lock_irqsave(&h->lock, flags);
+ doorbell_value = readl(h->vaddr + SA5_DOORBELL);
+ spin_unlock_irqrestore(&h->lock, flags);
+ if (!(doorbell_value & CFGTBL_ChangeReq))
break;
/* delay and try again */
- msleep(10);
+ usleep_range(10000, 20000);
}
}
@@ -3603,6 +3640,7 @@ static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h)
"unable to get board into simple mode\n");
return -ENODEV;
}
+ h->transMethod = CFGTBL_Trans_Simple;
return 0;
}
@@ -3641,7 +3679,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
err = -ENOMEM;
goto err_out_free_res;
}
- err = hpsa_wait_for_board_ready(h);
+ err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
if (err)
goto err_out_free_res;
err = hpsa_find_cfgtables(h);
@@ -3710,8 +3748,6 @@ static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev)
return 0; /* just try to do the kdump anyhow. */
if (rc)
return -ENODEV;
- if (hpsa_reset_msi(pdev))
- return -ENODEV;
/* Now try to get the controller to respond to a no-op */
for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
@@ -3749,8 +3785,11 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
h->pdev = pdev;
h->busy_initializing = 1;
- INIT_HLIST_HEAD(&h->cmpQ);
- INIT_HLIST_HEAD(&h->reqQ);
+ h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
+ INIT_LIST_HEAD(&h->cmpQ);
+ INIT_LIST_HEAD(&h->reqQ);
+ spin_lock_init(&h->lock);
+ spin_lock_init(&h->scan_lock);
rc = hpsa_pci_init(h);
if (rc != 0)
goto clean1;
@@ -3777,20 +3816,20 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
h->access.set_intr_mask(h, HPSA_INTR_OFF);
if (h->msix_vector || h->msi_vector)
- rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr_msi,
+ rc = request_irq(h->intr[h->intr_mode], do_hpsa_intr_msi,
IRQF_DISABLED, h->devname, h);
else
- rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr_intx,
+ rc = request_irq(h->intr[h->intr_mode], do_hpsa_intr_intx,
IRQF_DISABLED, h->devname, h);
if (rc) {
dev_err(&pdev->dev, "unable to get irq %d for %s\n",
- h->intr[PERF_MODE_INT], h->devname);
+ h->intr[h->intr_mode], h->devname);
goto clean2;
}
dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
h->devname, pdev->device,
- h->intr[PERF_MODE_INT], dac ? "" : " not");
+ h->intr[h->intr_mode], dac ? "" : " not");
h->cmd_pool_bits =
kmalloc(((h->nr_cmds + BITS_PER_LONG -
@@ -3810,8 +3849,6 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
}
if (hpsa_allocate_sg_chain_blocks(h))
goto clean4;
- spin_lock_init(&h->lock);
- spin_lock_init(&h->scan_lock);
init_waitqueue_head(&h->scan_wait_queue);
h->scan_finished = 1; /* no scan currently in progress */
@@ -3843,7 +3880,7 @@ clean4:
h->nr_cmds * sizeof(struct ErrorInfo),
h->errinfo_pool,
h->errinfo_pool_dhandle);
- free_irq(h->intr[PERF_MODE_INT], h);
+ free_irq(h->intr[h->intr_mode], h);
clean2:
clean1:
h->busy_initializing = 0;
@@ -3887,7 +3924,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
*/
hpsa_flush_cache(h);
h->access.set_intr_mask(h, HPSA_INTR_OFF);
- free_irq(h->intr[PERF_MODE_INT], h);
+ free_irq(h->intr[h->intr_mode], h);
#ifdef CONFIG_PCI_MSI
if (h->msix_vector)
pci_disable_msix(h->pdev);
@@ -3989,7 +4026,8 @@ static void calc_bucket_map(int bucket[], int num_buckets,
}
}
-static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h)
+static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
+ u32 use_short_tags)
{
int i;
unsigned long register_value;
@@ -4037,7 +4075,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h)
writel(0, &h->transtable->RepQCtrAddrHigh32);
writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
writel(0, &h->transtable->RepQAddr0High32);
- writel(CFGTBL_Trans_Performant,
+ writel(CFGTBL_Trans_Performant | use_short_tags,
&(h->cfgtable->HostWrite.TransportRequest));
writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
hpsa_wait_for_mode_change_ack(h);
@@ -4047,12 +4085,18 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h)
" performant mode\n");
return;
}
+ /* Change the access methods to the performant access methods */
+ h->access = SA5_performant_access;
+ h->transMethod = CFGTBL_Trans_Performant;
}
static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
{
u32 trans_support;
+ if (hpsa_simple_mode)
+ return;
+
trans_support = readl(&(h->cfgtable->TransportSupport));
if (!(trans_support & PERFORMANT_MODE))
return;
@@ -4072,11 +4116,8 @@ static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
|| (h->blockFetchTable == NULL))
goto clean_up;
- hpsa_enter_performant_mode(h);
-
- /* Change the access methods to the performant access methods */
- h->access = SA5_performant_access;
- h->transMethod = CFGTBL_Trans_Performant;
+ hpsa_enter_performant_mode(h,
+ trans_support & CFGTBL_Trans_use_short_tags);
return;
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 19586e189f0f..621a1530054a 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -72,11 +72,12 @@ struct ctlr_info {
unsigned int intr[4];
unsigned int msix_vector;
unsigned int msi_vector;
+ int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
struct access_method access;
/* queue and queue Info */
- struct hlist_head reqQ;
- struct hlist_head cmpQ;
+ struct list_head reqQ;
+ struct list_head cmpQ;
unsigned int Qdepth;
unsigned int maxQsinceinit;
unsigned int maxSG;
@@ -154,12 +155,16 @@ struct ctlr_info {
* HPSA_BOARD_READY_ITERATIONS are derived from those.
*/
#define HPSA_BOARD_READY_WAIT_SECS (120)
+#define HPSA_BOARD_NOT_READY_WAIT_SECS (10)
#define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
#define HPSA_BOARD_READY_POLL_INTERVAL \
((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
#define HPSA_BOARD_READY_ITERATIONS \
((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
+#define HPSA_BOARD_NOT_READY_ITERATIONS \
+ ((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \
+ HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
#define HPSA_POST_RESET_PAUSE_MSECS (3000)
#define HPSA_POST_RESET_NOOP_RETRIES (12)
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index f5c4c3cc0530..18464900e761 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -104,6 +104,7 @@
#define CFGTBL_Trans_Simple 0x00000002l
#define CFGTBL_Trans_Performant 0x00000004l
+#define CFGTBL_Trans_use_short_tags 0x20000000l
#define CFGTBL_BusType_Ultra2 0x00000001l
#define CFGTBL_BusType_Ultra3 0x00000002l
@@ -265,6 +266,7 @@ struct ErrorInfo {
#define DIRECT_LOOKUP_SHIFT 5
#define DIRECT_LOOKUP_BIT 0x10
+#define DIRECT_LOOKUP_MASK (~((1 << DIRECT_LOOKUP_SHIFT) - 1))
#define HPSA_ERROR_BIT 0x02
struct ctlr_info; /* defined in hpsa.h */
@@ -291,7 +293,7 @@ struct CommandList {
struct ctlr_info *h;
int cmd_type;
long cmdindex;
- struct hlist_node list;
+ struct list_head list;
struct request *rq;
struct completion *waiting;
void *scsi_cmd;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 9c5c8be72231..0621238fac4a 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -1301,7 +1301,7 @@ static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
ipr_clear_res_target(res);
list_move_tail(&res->queue, &ioa_cfg->free_res_q);
}
- } else if (!res->sdev) {
+ } else if (!res->sdev || res->del_from_ml) {
res->add_to_ml = 1;
if (ioa_cfg->allow_ml_add_del)
schedule_work(&ioa_cfg->work_q);
@@ -3104,7 +3104,10 @@ restart:
did_work = 1;
sdev = res->sdev;
if (!scsi_device_get(sdev)) {
- list_move_tail(&res->queue, &ioa_cfg->free_res_q);
+ if (!res->add_to_ml)
+ list_move_tail(&res->queue, &ioa_cfg->free_res_q);
+ else
+ res->del_from_ml = 0;
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
scsi_remove_device(sdev);
scsi_device_put(sdev);
@@ -6219,11 +6222,10 @@ static struct ata_port_operations ipr_sata_ops = {
};
static struct ata_port_info sata_port_info = {
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
- ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
- .pio_mask = 0x10, /* pio4 */
- .mwdma_mask = 0x07,
- .udma_mask = 0x7f, /* udma0-6 */
+ .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
+ .pio_mask = ATA_PIO4_ONLY,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
.port_ops = &ipr_sata_ops
};
@@ -8865,7 +8867,7 @@ static void __ipr_remove(struct pci_dev *pdev)
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
- flush_scheduled_work();
+ flush_work_sync(&ioa_cfg->work_q);
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
spin_lock(&ipr_driver_lock);
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index fec47de72535..a860452a8f71 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -608,54 +608,12 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
iscsi_sw_tcp_release_conn(conn);
}
-static int iscsi_sw_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock,
- char *buf, int *port,
- int (*getname)(struct socket *,
- struct sockaddr *,
- int *addrlen))
-{
- struct sockaddr_storage *addr;
- struct sockaddr_in6 *sin6;
- struct sockaddr_in *sin;
- int rc = 0, len;
-
- addr = kmalloc(sizeof(*addr), GFP_KERNEL);
- if (!addr)
- return -ENOMEM;
-
- if (getname(sock, (struct sockaddr *) addr, &len)) {
- rc = -ENODEV;
- goto free_addr;
- }
-
- switch (addr->ss_family) {
- case AF_INET:
- sin = (struct sockaddr_in *)addr;
- spin_lock_bh(&conn->session->lock);
- sprintf(buf, "%pI4", &sin->sin_addr.s_addr);
- *port = be16_to_cpu(sin->sin_port);
- spin_unlock_bh(&conn->session->lock);
- break;
- case AF_INET6:
- sin6 = (struct sockaddr_in6 *)addr;
- spin_lock_bh(&conn->session->lock);
- sprintf(buf, "%pI6", &sin6->sin6_addr);
- *port = be16_to_cpu(sin6->sin6_port);
- spin_unlock_bh(&conn->session->lock);
- break;
- }
-free_addr:
- kfree(addr);
- return rc;
-}
-
static int
iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
int is_leading)
{
- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
- struct iscsi_host *ihost = shost_priv(shost);
+ struct iscsi_session *session = cls_session->dd_data;
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
@@ -670,27 +628,15 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
"sockfd_lookup failed %d\n", err);
return -EEXIST;
}
- /*
- * copy these values now because if we drop the session
- * userspace may still want to query the values since we will
- * be using them for the reconnect
- */
- err = iscsi_sw_tcp_get_addr(conn, sock, conn->portal_address,
- &conn->portal_port, kernel_getpeername);
- if (err)
- goto free_socket;
-
- err = iscsi_sw_tcp_get_addr(conn, sock, ihost->local_address,
- &ihost->local_port, kernel_getsockname);
- if (err)
- goto free_socket;
err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
if (err)
goto free_socket;
+ spin_lock_bh(&session->lock);
/* bind iSCSI connection and socket */
tcp_sw_conn->sock = sock;
+ spin_unlock_bh(&session->lock);
/* setup Socket parameters */
sk = sock->sk;
@@ -752,24 +698,74 @@ static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf)
{
struct iscsi_conn *conn = cls_conn->dd_data;
- int len;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct sockaddr_in6 addr;
+ int rc, len;
switch(param) {
case ISCSI_PARAM_CONN_PORT:
- spin_lock_bh(&conn->session->lock);
- len = sprintf(buf, "%hu\n", conn->portal_port);
- spin_unlock_bh(&conn->session->lock);
- break;
case ISCSI_PARAM_CONN_ADDRESS:
spin_lock_bh(&conn->session->lock);
- len = sprintf(buf, "%s\n", conn->portal_address);
+ if (!tcp_sw_conn || !tcp_sw_conn->sock) {
+ spin_unlock_bh(&conn->session->lock);
+ return -ENOTCONN;
+ }
+ rc = kernel_getpeername(tcp_sw_conn->sock,
+ (struct sockaddr *)&addr, &len);
spin_unlock_bh(&conn->session->lock);
- break;
+ if (rc)
+ return rc;
+
+ return iscsi_conn_get_addr_param((struct sockaddr_storage *)
+ &addr, param, buf);
default:
return iscsi_conn_get_param(cls_conn, param, buf);
}
- return len;
+ return 0;
+}
+
+static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf)
+{
+ struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(shost);
+ struct iscsi_session *session = tcp_sw_host->session;
+ struct iscsi_conn *conn;
+ struct iscsi_tcp_conn *tcp_conn;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn;
+ struct sockaddr_in6 addr;
+ int rc, len;
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ spin_lock_bh(&session->lock);
+ conn = session->leadconn;
+ if (!conn) {
+ spin_unlock_bh(&session->lock);
+ return -ENOTCONN;
+ }
+ tcp_conn = conn->dd_data;
+
+ tcp_sw_conn = tcp_conn->dd_data;
+ if (!tcp_sw_conn->sock) {
+ spin_unlock_bh(&session->lock);
+ return -ENOTCONN;
+ }
+
+ rc = kernel_getsockname(tcp_sw_conn->sock,
+ (struct sockaddr *)&addr, &len);
+ spin_unlock_bh(&session->lock);
+ if (rc)
+ return rc;
+
+ return iscsi_conn_get_addr_param((struct sockaddr_storage *)
+ &addr, param, buf);
+ default:
+ return iscsi_host_get_param(shost, param, buf);
+ }
+
+ return 0;
}
static void
@@ -797,6 +793,7 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
{
struct iscsi_cls_session *cls_session;
struct iscsi_session *session;
+ struct iscsi_sw_tcp_host *tcp_sw_host;
struct Scsi_Host *shost;
if (ep) {
@@ -804,7 +801,8 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
return NULL;
}
- shost = iscsi_host_alloc(&iscsi_sw_tcp_sht, 0, 1);
+ shost = iscsi_host_alloc(&iscsi_sw_tcp_sht,
+ sizeof(struct iscsi_sw_tcp_host), 1);
if (!shost)
return NULL;
shost->transportt = iscsi_sw_tcp_scsi_transport;
@@ -825,6 +823,8 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
if (!cls_session)
goto remove_host;
session = cls_session->dd_data;
+ tcp_sw_host = iscsi_host_priv(shost);
+ tcp_sw_host->session = session;
shost->can_queue = session->scsi_cmds_max;
if (iscsi_tcp_r2tpool_alloc(session))
@@ -929,7 +929,7 @@ static struct iscsi_transport iscsi_sw_tcp_transport = {
.start_conn = iscsi_conn_start,
.stop_conn = iscsi_sw_tcp_conn_stop,
/* iscsi host params */
- .get_host_param = iscsi_host_get_param,
+ .get_host_param = iscsi_sw_tcp_host_get_param,
.set_host_param = iscsi_host_set_param,
/* IO */
.send_pdu = iscsi_conn_send_pdu,
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 94644bad0ed7..666fe09378fa 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -55,6 +55,10 @@ struct iscsi_sw_tcp_conn {
ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
};
+struct iscsi_sw_tcp_host {
+ struct iscsi_session *session;
+};
+
struct iscsi_sw_tcp_hdrbuf {
struct iscsi_hdr hdrbuf;
char hdrextbuf[ISCSI_MAX_AHS_SIZE +
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index d21367d3305f..28231badd9e6 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -38,7 +38,7 @@ u16 fc_cpu_mask; /* cpu mask for possible cpus */
EXPORT_SYMBOL(fc_cpu_mask);
static u16 fc_cpu_order; /* 2's power to represent total possible cpus */
static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
-struct workqueue_struct *fc_exch_workqueue;
+static struct workqueue_struct *fc_exch_workqueue;
/*
* Structure and function definitions for managing Fibre Channel Exchanges
@@ -558,6 +558,22 @@ static struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
return sp;
}
+/*
+ * Set the response handler for the exchange associated with a sequence.
+ */
+static void fc_seq_set_resp(struct fc_seq *sp,
+ void (*resp)(struct fc_seq *, struct fc_frame *,
+ void *),
+ void *arg)
+{
+ struct fc_exch *ep = fc_seq_exch(sp);
+
+ spin_lock_bh(&ep->ex_lock);
+ ep->resp = resp;
+ ep->arg = arg;
+ spin_unlock_bh(&ep->ex_lock);
+}
+
/**
* fc_seq_exch_abort() - Abort an exchange and sequence
* @req_sp: The sequence to be aborted
@@ -650,13 +666,10 @@ static void fc_exch_timeout(struct work_struct *work)
if (e_stat & ESB_ST_ABNORMAL)
rc = fc_exch_done_locked(ep);
spin_unlock_bh(&ep->ex_lock);
+ if (!rc)
+ fc_exch_delete(ep);
if (resp)
resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg);
- if (!rc) {
- /* delete the exchange if it's already being aborted */
- fc_exch_delete(ep);
- return;
- }
fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
goto done;
}
@@ -1266,6 +1279,8 @@ free:
* @fp: The request frame
*
* On success, the sequence pointer will be returned and also in fr_seq(@fp).
+ * A reference will be held on the exchange/sequence for the caller, which
+ * must call fc_seq_release().
*/
static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
{
@@ -1283,6 +1298,15 @@ static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
}
/**
+ * fc_seq_release() - Release the hold
+ * @sp: The sequence.
+ */
+static void fc_seq_release(struct fc_seq *sp)
+{
+ fc_exch_release(fc_seq_exch(sp));
+}
+
+/**
* fc_exch_recv_req() - Handler for an incoming request
* @lport: The local port that received the request
* @mp: The EM that the exchange is on
@@ -2151,6 +2175,7 @@ err:
fc_exch_mgr_del(ema);
return -ENOMEM;
}
+EXPORT_SYMBOL(fc_exch_mgr_list_clone);
/**
* fc_exch_mgr_alloc() - Allocate an exchange manager
@@ -2254,16 +2279,45 @@ void fc_exch_mgr_free(struct fc_lport *lport)
EXPORT_SYMBOL(fc_exch_mgr_free);
/**
+ * fc_find_ema() - Lookup and return appropriate Exchange Manager Anchor depending
+ * upon 'xid'.
+ * @f_ctl: f_ctl
+ * @lport: The local port the frame was received on
+ * @fh: The received frame header
+ */
+static struct fc_exch_mgr_anchor *fc_find_ema(u32 f_ctl,
+ struct fc_lport *lport,
+ struct fc_frame_header *fh)
+{
+ struct fc_exch_mgr_anchor *ema;
+ u16 xid;
+
+ if (f_ctl & FC_FC_EX_CTX)
+ xid = ntohs(fh->fh_ox_id);
+ else {
+ xid = ntohs(fh->fh_rx_id);
+ if (xid == FC_XID_UNKNOWN)
+ return list_entry(lport->ema_list.prev,
+ typeof(*ema), ema_list);
+ }
+
+ list_for_each_entry(ema, &lport->ema_list, ema_list) {
+ if ((xid >= ema->mp->min_xid) &&
+ (xid <= ema->mp->max_xid))
+ return ema;
+ }
+ return NULL;
+}
+/**
* fc_exch_recv() - Handler for received frames
* @lport: The local port the frame was received on
- * @fp: The received frame
+ * @fp: The received frame
*/
void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_frame_header *fh = fc_frame_header_get(fp);
struct fc_exch_mgr_anchor *ema;
- u32 f_ctl, found = 0;
- u16 oxid;
+ u32 f_ctl;
/* lport lock ? */
if (!lport || lport->state == LPORT_ST_DISABLED) {
@@ -2274,24 +2328,17 @@ void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
}
f_ctl = ntoh24(fh->fh_f_ctl);
- oxid = ntohs(fh->fh_ox_id);
- if (f_ctl & FC_FC_EX_CTX) {
- list_for_each_entry(ema, &lport->ema_list, ema_list) {
- if ((oxid >= ema->mp->min_xid) &&
- (oxid <= ema->mp->max_xid)) {
- found = 1;
- break;
- }
- }
-
- if (!found) {
- FC_LPORT_DBG(lport, "Received response for out "
- "of range oxid:%hx\n", oxid);
- fc_frame_free(fp);
- return;
- }
- } else
- ema = list_entry(lport->ema_list.prev, typeof(*ema), ema_list);
+ ema = fc_find_ema(f_ctl, lport, fh);
+ if (!ema) {
+ FC_LPORT_DBG(lport, "Unable to find Exchange Manager Anchor,"
+ "fc_ctl <0x%x>, xid <0x%x>\n",
+ f_ctl,
+ (f_ctl & FC_FC_EX_CTX) ?
+ ntohs(fh->fh_ox_id) :
+ ntohs(fh->fh_rx_id));
+ fc_frame_free(fp);
+ return;
+ }
/*
* If frame is marked invalid, just drop it.
@@ -2329,6 +2376,9 @@ int fc_exch_init(struct fc_lport *lport)
if (!lport->tt.seq_start_next)
lport->tt.seq_start_next = fc_seq_start_next;
+ if (!lport->tt.seq_set_resp)
+ lport->tt.seq_set_resp = fc_seq_set_resp;
+
if (!lport->tt.exch_seq_send)
lport->tt.exch_seq_send = fc_exch_seq_send;
@@ -2350,6 +2400,9 @@ int fc_exch_init(struct fc_lport *lport)
if (!lport->tt.seq_assign)
lport->tt.seq_assign = fc_seq_assign;
+ if (!lport->tt.seq_release)
+ lport->tt.seq_release = fc_seq_release;
+
return 0;
}
EXPORT_SYMBOL(fc_exch_init);
@@ -2357,7 +2410,7 @@ EXPORT_SYMBOL(fc_exch_init);
/**
* fc_setup_exch_mgr() - Setup an exchange manager
*/
-int fc_setup_exch_mgr()
+int fc_setup_exch_mgr(void)
{
fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch),
0, SLAB_HWCACHE_ALIGN, NULL);
@@ -2395,7 +2448,7 @@ int fc_setup_exch_mgr()
/**
* fc_destroy_exch_mgr() - Destroy an exchange manager
*/
-void fc_destroy_exch_mgr()
+void fc_destroy_exch_mgr(void)
{
destroy_workqueue(fc_exch_workqueue);
kmem_cache_destroy(fc_em_cachep);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 5962d1a5a674..b1b03af158bf 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -42,7 +42,7 @@
#include "fc_libfc.h"
-struct kmem_cache *scsi_pkt_cachep;
+static struct kmem_cache *scsi_pkt_cachep;
/* SRB state definitions */
#define FC_SRB_FREE 0 /* cmd is free */
@@ -155,6 +155,7 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
if (fsp) {
memset(fsp, 0, sizeof(*fsp));
fsp->lp = lport;
+ fsp->xfer_ddp = FC_XID_UNKNOWN;
atomic_set(&fsp->ref_cnt, 1);
init_timer(&fsp->timer);
INIT_LIST_HEAD(&fsp->list);
@@ -1201,6 +1202,7 @@ unlock:
static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp)
{
int rc = FAILED;
+ unsigned long ticks_left;
if (fc_fcp_send_abort(fsp))
return FAILED;
@@ -1209,13 +1211,13 @@ static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp)
fsp->wait_for_comp = 1;
spin_unlock_bh(&fsp->scsi_pkt_lock);
- rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);
+ ticks_left = wait_for_completion_timeout(&fsp->tm_done,
+ FC_SCSI_TM_TOV);
spin_lock_bh(&fsp->scsi_pkt_lock);
fsp->wait_for_comp = 0;
- if (!rc) {
+ if (!ticks_left) {
FC_FCP_DBG(fsp, "target abort cmd failed\n");
- rc = FAILED;
} else if (fsp->state & FC_SRB_ABORTED) {
FC_FCP_DBG(fsp, "target abort cmd passed\n");
rc = SUCCESS;
@@ -1321,7 +1323,7 @@ static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
*
* scsi-eh will escalate for when either happens.
*/
- goto out;
+ return;
}
if (fc_fcp_lock_pkt(fsp))
@@ -1787,15 +1789,14 @@ static inline int fc_fcp_lport_queue_ready(struct fc_lport *lport)
/**
* fc_queuecommand() - The queuecommand function of the SCSI template
+ * @shost: The Scsi_Host that the command was issued to
* @cmd: The scsi_cmnd to be executed
- * @done: The callback function to be called when the scsi_cmnd is complete
*
- * This is the i/o strategy routine, called by the SCSI layer. This routine
- * is called with the host_lock held.
+ * This is the i/o strategy routine, called by the SCSI layer.
*/
-static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
+int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
{
- struct fc_lport *lport;
+ struct fc_lport *lport = shost_priv(shost);
struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
struct fc_fcp_pkt *fsp;
struct fc_rport_libfc_priv *rpriv;
@@ -1803,15 +1804,12 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs
int rc = 0;
struct fcoe_dev_stats *stats;
- lport = shost_priv(sc_cmd->device->host);
-
rval = fc_remote_port_chkready(rport);
if (rval) {
sc_cmd->result = rval;
- done(sc_cmd);
+ sc_cmd->scsi_done(sc_cmd);
return 0;
}
- spin_unlock_irq(lport->host->host_lock);
if (!*(struct fc_remote_port **)rport->dd_data) {
/*
@@ -1819,7 +1817,7 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs
* online
*/
sc_cmd->result = DID_IMM_RETRY << 16;
- done(sc_cmd);
+ sc_cmd->scsi_done(sc_cmd);
goto out;
}
@@ -1842,10 +1840,7 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs
* build the libfc request pkt
*/
fsp->cmd = sc_cmd; /* save the cmd */
- fsp->lp = lport; /* save the softc ptr */
fsp->rport = rport; /* set the remote port ptr */
- fsp->xfer_ddp = FC_XID_UNKNOWN;
- sc_cmd->scsi_done = done;
/*
* set up the transfer length
@@ -1886,11 +1881,8 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs
rc = SCSI_MLQUEUE_HOST_BUSY;
}
out:
- spin_lock_irq(lport->host->host_lock);
return rc;
}
-
-DEF_SCSI_QCMD(fc_queuecommand)
EXPORT_SYMBOL(fc_queuecommand);
/**
@@ -2112,7 +2104,6 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
* the sc passed in is not setup for execution like when sent
* through the queuecommand callout.
*/
- fsp->lp = lport; /* save the softc ptr */
fsp->rport = rport; /* set the remote port ptr */
/*
@@ -2245,7 +2236,7 @@ void fc_fcp_destroy(struct fc_lport *lport)
}
EXPORT_SYMBOL(fc_fcp_destroy);
-int fc_setup_fcp()
+int fc_setup_fcp(void)
{
int rc = 0;
@@ -2261,7 +2252,7 @@ int fc_setup_fcp()
return rc;
}
-void fc_destroy_fcp()
+void fc_destroy_fcp(void)
{
if (scsi_pkt_cachep)
kmem_cache_destroy(scsi_pkt_cachep);
diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c
index 6a48c28e4420..b7735129f1f3 100644
--- a/drivers/scsi/libfc/fc_libfc.c
+++ b/drivers/scsi/libfc/fc_libfc.c
@@ -35,6 +35,27 @@ unsigned int fc_debug_logging;
module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+DEFINE_MUTEX(fc_prov_mutex);
+static LIST_HEAD(fc_local_ports);
+struct blocking_notifier_head fc_lport_notifier_head =
+ BLOCKING_NOTIFIER_INIT(fc_lport_notifier_head);
+EXPORT_SYMBOL(fc_lport_notifier_head);
+
+/*
+ * Providers which primarily send requests and PRLIs.
+ */
+struct fc4_prov *fc_active_prov[FC_FC4_PROV_SIZE] = {
+ [0] = &fc_rport_t0_prov,
+ [FC_TYPE_FCP] = &fc_rport_fcp_init,
+};
+
+/*
+ * Providers which receive requests.
+ */
+struct fc4_prov *fc_passive_prov[FC_FC4_PROV_SIZE] = {
+ [FC_TYPE_ELS] = &fc_lport_els_prov,
+};
+
/**
* libfc_init() - Initialize libfc.ko
*/
@@ -210,3 +231,102 @@ void fc_fill_reply_hdr(struct fc_frame *fp, const struct fc_frame *in_fp,
fc_fill_hdr(fp, in_fp, r_ctl, FC_FCTL_RESP, 0, parm_offset);
}
EXPORT_SYMBOL(fc_fill_reply_hdr);
+
+/**
+ * fc_fc4_conf_lport_params() - Modify "service_params" of specified lport
+ * if there is service provider (target provider) registered with libfc
+ * for specified "fc_ft_type"
+ * @lport: Local port which service_params needs to be modified
+ * @type: FC-4 type, such as FC_TYPE_FCP
+ */
+void fc_fc4_conf_lport_params(struct fc_lport *lport, enum fc_fh_type type)
+{
+ struct fc4_prov *prov_entry;
+ BUG_ON(type >= FC_FC4_PROV_SIZE);
+ BUG_ON(!lport);
+ prov_entry = fc_passive_prov[type];
+ if (type == FC_TYPE_FCP) {
+ if (prov_entry && prov_entry->recv)
+ lport->service_params |= FCP_SPPF_TARG_FCN;
+ }
+}
+
+void fc_lport_iterate(void (*notify)(struct fc_lport *, void *), void *arg)
+{
+ struct fc_lport *lport;
+
+ mutex_lock(&fc_prov_mutex);
+ list_for_each_entry(lport, &fc_local_ports, lport_list)
+ notify(lport, arg);
+ mutex_unlock(&fc_prov_mutex);
+}
+EXPORT_SYMBOL(fc_lport_iterate);
+
+/**
+ * fc_fc4_register_provider() - register FC-4 upper-level provider.
+ * @type: FC-4 type, such as FC_TYPE_FCP
+ * @prov: structure describing provider including ops vector.
+ *
+ * Returns 0 on success, negative error otherwise.
+ */
+int fc_fc4_register_provider(enum fc_fh_type type, struct fc4_prov *prov)
+{
+ struct fc4_prov **prov_entry;
+ int ret = 0;
+
+ if (type >= FC_FC4_PROV_SIZE)
+ return -EINVAL;
+ mutex_lock(&fc_prov_mutex);
+ prov_entry = (prov->recv ? fc_passive_prov : fc_active_prov) + type;
+ if (*prov_entry)
+ ret = -EBUSY;
+ else
+ *prov_entry = prov;
+ mutex_unlock(&fc_prov_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(fc_fc4_register_provider);
+
+/**
+ * fc_fc4_deregister_provider() - deregister FC-4 upper-level provider.
+ * @type: FC-4 type, such as FC_TYPE_FCP
+ * @prov: structure describing provider including ops vector.
+ */
+void fc_fc4_deregister_provider(enum fc_fh_type type, struct fc4_prov *prov)
+{
+ BUG_ON(type >= FC_FC4_PROV_SIZE);
+ mutex_lock(&fc_prov_mutex);
+ if (prov->recv)
+ rcu_assign_pointer(fc_passive_prov[type], NULL);
+ else
+ rcu_assign_pointer(fc_active_prov[type], NULL);
+ mutex_unlock(&fc_prov_mutex);
+ synchronize_rcu();
+}
+EXPORT_SYMBOL(fc_fc4_deregister_provider);
+
+/**
+ * fc_fc4_add_lport() - add new local port to list and run notifiers.
+ * @lport: The new local port.
+ */
+void fc_fc4_add_lport(struct fc_lport *lport)
+{
+ mutex_lock(&fc_prov_mutex);
+ list_add_tail(&lport->lport_list, &fc_local_ports);
+ blocking_notifier_call_chain(&fc_lport_notifier_head,
+ FC_LPORT_EV_ADD, lport);
+ mutex_unlock(&fc_prov_mutex);
+}
+
+/**
+ * fc_fc4_del_lport() - remove local port from list and run notifiers.
+ * @lport: The new local port.
+ */
+void fc_fc4_del_lport(struct fc_lport *lport)
+{
+ mutex_lock(&fc_prov_mutex);
+ list_del(&lport->lport_list);
+ blocking_notifier_call_chain(&fc_lport_notifier_head,
+ FC_LPORT_EV_DEL, lport);
+ mutex_unlock(&fc_prov_mutex);
+}
diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h
index eea0c3541b71..fedc819d70c0 100644
--- a/drivers/scsi/libfc/fc_libfc.h
+++ b/drivers/scsi/libfc/fc_libfc.h
@@ -94,6 +94,17 @@ extern unsigned int fc_debug_logging;
(lport)->host->host_no, ##args))
/*
+ * FC-4 Providers.
+ */
+extern struct fc4_prov *fc_active_prov[]; /* providers without recv */
+extern struct fc4_prov *fc_passive_prov[]; /* providers with recv */
+extern struct mutex fc_prov_mutex; /* lock over table changes */
+
+extern struct fc4_prov fc_rport_t0_prov; /* type 0 provider */
+extern struct fc4_prov fc_lport_els_prov; /* ELS provider */
+extern struct fc4_prov fc_rport_fcp_init; /* FCP initiator provider */
+
+/*
* Set up direct-data placement for this I/O request
*/
void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid);
@@ -112,6 +123,9 @@ void fc_destroy_fcp(void);
* Internal libfc functions
*/
const char *fc_els_resp_type(struct fc_frame *);
+extern void fc_fc4_add_lport(struct fc_lport *);
+extern void fc_fc4_del_lport(struct fc_lport *);
+extern void fc_fc4_conf_lport_params(struct fc_lport *, enum fc_fh_type);
/*
* Copies a buffer into an sg list
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index c5a10f94f845..8c08b210001d 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -633,6 +633,7 @@ int fc_lport_destroy(struct fc_lport *lport)
lport->tt.fcp_abort_io(lport);
lport->tt.disc_stop_final(lport);
lport->tt.exch_mgr_reset(lport, 0, 0);
+ fc_fc4_del_lport(lport);
return 0;
}
EXPORT_SYMBOL(fc_lport_destroy);
@@ -849,7 +850,7 @@ out:
}
/**
- * fc_lport_recv_req() - The generic lport request handler
+ * fc_lport_recv_els_req() - The generic lport ELS request handler
* @lport: The local port that received the request
* @fp: The request frame
*
@@ -859,9 +860,9 @@ out:
* Locking Note: This function should not be called with the lport
* lock held becuase it will grab the lock.
*/
-static void fc_lport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
+static void fc_lport_recv_els_req(struct fc_lport *lport,
+ struct fc_frame *fp)
{
- struct fc_frame_header *fh = fc_frame_header_get(fp);
void (*recv)(struct fc_lport *, struct fc_frame *);
mutex_lock(&lport->lp_mutex);
@@ -873,8 +874,7 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
*/
if (!lport->link_up)
fc_frame_free(fp);
- else if (fh->fh_type == FC_TYPE_ELS &&
- fh->fh_r_ctl == FC_RCTL_ELS_REQ) {
+ else {
/*
* Check opcode.
*/
@@ -903,14 +903,62 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
}
recv(lport, fp);
- } else {
- FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n",
- fr_eof(fp));
- fc_frame_free(fp);
}
mutex_unlock(&lport->lp_mutex);
}
+static int fc_lport_els_prli(struct fc_rport_priv *rdata, u32 spp_len,
+ const struct fc_els_spp *spp_in,
+ struct fc_els_spp *spp_out)
+{
+ return FC_SPP_RESP_INVL;
+}
+
+struct fc4_prov fc_lport_els_prov = {
+ .prli = fc_lport_els_prli,
+ .recv = fc_lport_recv_els_req,
+};
+
+/**
+ * fc_lport_recv_req() - The generic lport request handler
+ * @lport: The lport that received the request
+ * @fp: The frame the request is in
+ *
+ * Locking Note: This function should not be called with the lport
+ * lock held becuase it may grab the lock.
+ */
+static void fc_lport_recv_req(struct fc_lport *lport,
+ struct fc_frame *fp)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ struct fc_seq *sp = fr_seq(fp);
+ struct fc4_prov *prov;
+
+ /*
+ * Use RCU read lock and module_lock to be sure module doesn't
+ * deregister and get unloaded while we're calling it.
+ * try_module_get() is inlined and accepts a NULL parameter.
+ * Only ELSes and FCP target ops should come through here.
+ * The locking is unfortunate, and a better scheme is being sought.
+ */
+
+ rcu_read_lock();
+ if (fh->fh_type >= FC_FC4_PROV_SIZE)
+ goto drop;
+ prov = rcu_dereference(fc_passive_prov[fh->fh_type]);
+ if (!prov || !try_module_get(prov->module))
+ goto drop;
+ rcu_read_unlock();
+ prov->recv(lport, fp);
+ module_put(prov->module);
+ return;
+drop:
+ rcu_read_unlock();
+ FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type);
+ fc_frame_free(fp);
+ lport->tt.exch_done(sp);
+}
+
/**
* fc_lport_reset() - Reset a local port
* @lport: The local port which should be reset
@@ -1542,6 +1590,7 @@ void fc_lport_enter_flogi(struct fc_lport *lport)
*/
int fc_lport_config(struct fc_lport *lport)
{
+ INIT_LIST_HEAD(&lport->ema_list);
INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
mutex_init(&lport->lp_mutex);
@@ -1549,6 +1598,7 @@ int fc_lport_config(struct fc_lport *lport)
fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
fc_lport_add_fc4_type(lport, FC_TYPE_CT);
+ fc_fc4_conf_lport_params(lport, FC_TYPE_FCP);
return 0;
}
@@ -1586,6 +1636,7 @@ int fc_lport_init(struct fc_lport *lport)
fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT;
if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
+ fc_fc4_add_lport(lport);
return 0;
}
diff --git a/drivers/scsi/libfc/fc_npiv.c b/drivers/scsi/libfc/fc_npiv.c
index dd2b43bb1c70..f33b897e4784 100644
--- a/drivers/scsi/libfc/fc_npiv.c
+++ b/drivers/scsi/libfc/fc_npiv.c
@@ -37,9 +37,7 @@ struct fc_lport *libfc_vport_create(struct fc_vport *vport, int privsize)
vn_port = libfc_host_alloc(shost->hostt, privsize);
if (!vn_port)
- goto err_out;
- if (fc_exch_mgr_list_clone(n_port, vn_port))
- goto err_put;
+ return vn_port;
vn_port->vport = vport;
vport->dd_data = vn_port;
@@ -49,11 +47,6 @@ struct fc_lport *libfc_vport_create(struct fc_vport *vport, int privsize)
mutex_unlock(&n_port->lp_mutex);
return vn_port;
-
-err_put:
- scsi_host_put(vn_port->host);
-err_out:
- return NULL;
}
EXPORT_SYMBOL(libfc_vport_create);
@@ -86,6 +79,7 @@ struct fc_lport *fc_vport_id_lookup(struct fc_lport *n_port, u32 port_id)
return lport;
}
+EXPORT_SYMBOL(fc_vport_id_lookup);
/*
* When setting the link state of vports during an lport state change, it's
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index a7175adab32d..49e1ccca09d5 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -58,7 +58,7 @@
#include "fc_libfc.h"
-struct workqueue_struct *rport_event_queue;
+static struct workqueue_struct *rport_event_queue;
static void fc_rport_enter_flogi(struct fc_rport_priv *);
static void fc_rport_enter_plogi(struct fc_rport_priv *);
@@ -145,8 +145,10 @@ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
INIT_WORK(&rdata->event_work, fc_rport_work);
- if (port_id != FC_FID_DIR_SERV)
+ if (port_id != FC_FID_DIR_SERV) {
+ rdata->lld_event_callback = lport->tt.rport_event_callback;
list_add_rcu(&rdata->peers, &lport->disc.rports);
+ }
return rdata;
}
@@ -257,6 +259,8 @@ static void fc_rport_work(struct work_struct *work)
struct fc_rport_operations *rport_ops;
struct fc_rport_identifiers ids;
struct fc_rport *rport;
+ struct fc4_prov *prov;
+ u8 type;
mutex_lock(&rdata->rp_mutex);
event = rdata->event;
@@ -300,12 +304,25 @@ static void fc_rport_work(struct work_struct *work)
FC_RPORT_DBG(rdata, "callback ev %d\n", event);
rport_ops->event_callback(lport, rdata, event);
}
+ if (rdata->lld_event_callback) {
+ FC_RPORT_DBG(rdata, "lld callback ev %d\n", event);
+ rdata->lld_event_callback(lport, rdata, event);
+ }
kref_put(&rdata->kref, lport->tt.rport_destroy);
break;
case RPORT_EV_FAILED:
case RPORT_EV_LOGO:
case RPORT_EV_STOP:
+ if (rdata->prli_count) {
+ mutex_lock(&fc_prov_mutex);
+ for (type = 1; type < FC_FC4_PROV_SIZE; type++) {
+ prov = fc_passive_prov[type];
+ if (prov && prov->prlo)
+ prov->prlo(rdata);
+ }
+ mutex_unlock(&fc_prov_mutex);
+ }
port_id = rdata->ids.port_id;
mutex_unlock(&rdata->rp_mutex);
@@ -313,6 +330,10 @@ static void fc_rport_work(struct work_struct *work)
FC_RPORT_DBG(rdata, "callback ev %d\n", event);
rport_ops->event_callback(lport, rdata, event);
}
+ if (rdata->lld_event_callback) {
+ FC_RPORT_DBG(rdata, "lld callback ev %d\n", event);
+ rdata->lld_event_callback(lport, rdata, event);
+ }
cancel_delayed_work_sync(&rdata->retry_work);
/*
@@ -336,6 +357,7 @@ static void fc_rport_work(struct work_struct *work)
if (port_id == FC_FID_DIR_SERV) {
rdata->event = RPORT_EV_NONE;
mutex_unlock(&rdata->rp_mutex);
+ kref_put(&rdata->kref, lport->tt.rport_destroy);
} else if ((rdata->flags & FC_RP_STARTED) &&
rdata->major_retries <
lport->max_rport_retry_count) {
@@ -575,7 +597,7 @@ static void fc_rport_error_retry(struct fc_rport_priv *rdata,
/* make sure this isn't an FC_EX_CLOSED error, never retry those */
if (PTR_ERR(fp) == -FC_EX_CLOSED)
- return fc_rport_error(rdata, fp);
+ goto out;
if (rdata->retries < rdata->local_port->max_rport_retry_count) {
FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n",
@@ -588,7 +610,8 @@ static void fc_rport_error_retry(struct fc_rport_priv *rdata,
return;
}
- return fc_rport_error(rdata, fp);
+out:
+ fc_rport_error(rdata, fp);
}
/**
@@ -878,6 +901,9 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn);
rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn);
+ /* save plogi response sp_features for further reference */
+ rdata->sp_features = ntohs(plp->fl_csp.sp_features);
+
if (lport->point_to_multipoint)
fc_rport_login_complete(rdata, fp);
csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
@@ -949,6 +975,8 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
struct fc_els_prli prli;
struct fc_els_spp spp;
} *pp;
+ struct fc_els_spp temp_spp;
+ struct fc4_prov *prov;
u32 roles = FC_RPORT_ROLE_UNKNOWN;
u32 fcp_parm = 0;
u8 op;
@@ -983,6 +1011,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK);
FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x\n",
pp->spp.spp_flags);
+ rdata->spp_type = pp->spp.spp_type;
if (resp_code != FC_SPP_RESP_ACK) {
if (resp_code == FC_SPP_RESP_CONF)
fc_rport_error(rdata, fp);
@@ -996,6 +1025,15 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
fcp_parm = ntohl(pp->spp.spp_params);
if (fcp_parm & FCP_SPPF_RETRY)
rdata->flags |= FC_RP_FLAGS_RETRY;
+ if (fcp_parm & FCP_SPPF_CONF_COMPL)
+ rdata->flags |= FC_RP_FLAGS_CONF_REQ;
+
+ prov = fc_passive_prov[FC_TYPE_FCP];
+ if (prov) {
+ memset(&temp_spp, 0, sizeof(temp_spp));
+ prov->prli(rdata, pp->prli.prli_spp_len,
+ &pp->spp, &temp_spp);
+ }
rdata->supported_classes = FC_COS_CLASS3;
if (fcp_parm & FCP_SPPF_INIT_FCN)
@@ -1033,6 +1071,7 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
struct fc_els_spp spp;
} *pp;
struct fc_frame *fp;
+ struct fc4_prov *prov;
/*
* If the rport is one of the well known addresses
@@ -1054,9 +1093,20 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
return;
}
- if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI,
- fc_rport_prli_resp, rdata,
- 2 * lport->r_a_tov))
+ fc_prli_fill(lport, fp);
+
+ prov = fc_passive_prov[FC_TYPE_FCP];
+ if (prov) {
+ pp = fc_frame_payload_get(fp, sizeof(*pp));
+ prov->prli(rdata, sizeof(pp->spp), NULL, &pp->spp);
+ }
+
+ fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rdata->ids.port_id,
+ fc_host_port_id(lport->host), FC_TYPE_ELS,
+ FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
+
+ if (!lport->tt.exch_seq_send(lport, fp, fc_rport_prli_resp,
+ NULL, rdata, 2 * lport->r_a_tov))
fc_rport_error_retry(rdata, NULL);
else
kref_get(&rdata->kref);
@@ -1642,9 +1692,9 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
unsigned int len;
unsigned int plen;
enum fc_els_spp_resp resp;
+ enum fc_els_spp_resp passive;
struct fc_seq_els_data rjt_data;
- u32 fcp_parm;
- u32 roles = FC_RPORT_ROLE_UNKNOWN;
+ struct fc4_prov *prov;
FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
fc_rport_state(rdata));
@@ -1678,46 +1728,42 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
pp->prli.prli_len = htons(len);
len -= sizeof(struct fc_els_prli);
- /* reinitialize remote port roles */
- rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
-
/*
* Go through all the service parameter pages and build
* response. If plen indicates longer SPP than standard,
* use that. The entire response has been pre-cleared above.
*/
spp = &pp->spp;
+ mutex_lock(&fc_prov_mutex);
while (len >= plen) {
+ rdata->spp_type = rspp->spp_type;
spp->spp_type = rspp->spp_type;
spp->spp_type_ext = rspp->spp_type_ext;
- spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
- resp = FC_SPP_RESP_ACK;
-
- switch (rspp->spp_type) {
- case 0: /* common to all FC-4 types */
- break;
- case FC_TYPE_FCP:
- fcp_parm = ntohl(rspp->spp_params);
- if (fcp_parm & FCP_SPPF_RETRY)
- rdata->flags |= FC_RP_FLAGS_RETRY;
- rdata->supported_classes = FC_COS_CLASS3;
- if (fcp_parm & FCP_SPPF_INIT_FCN)
- roles |= FC_RPORT_ROLE_FCP_INITIATOR;
- if (fcp_parm & FCP_SPPF_TARG_FCN)
- roles |= FC_RPORT_ROLE_FCP_TARGET;
- rdata->ids.roles = roles;
-
- spp->spp_params = htonl(lport->service_params);
- break;
- default:
- resp = FC_SPP_RESP_INVL;
- break;
+ resp = 0;
+
+ if (rspp->spp_type < FC_FC4_PROV_SIZE) {
+ prov = fc_active_prov[rspp->spp_type];
+ if (prov)
+ resp = prov->prli(rdata, plen, rspp, spp);
+ prov = fc_passive_prov[rspp->spp_type];
+ if (prov) {
+ passive = prov->prli(rdata, plen, rspp, spp);
+ if (!resp || passive == FC_SPP_RESP_ACK)
+ resp = passive;
+ }
+ }
+ if (!resp) {
+ if (spp->spp_flags & FC_SPP_EST_IMG_PAIR)
+ resp |= FC_SPP_RESP_CONF;
+ else
+ resp |= FC_SPP_RESP_INVL;
}
spp->spp_flags |= resp;
len -= plen;
rspp = (struct fc_els_spp *)((char *)rspp + plen);
spp = (struct fc_els_spp *)((char *)spp + plen);
}
+ mutex_unlock(&fc_prov_mutex);
/*
* Send LS_ACC. If this fails, the originator should retry.
@@ -1887,9 +1933,82 @@ int fc_rport_init(struct fc_lport *lport)
EXPORT_SYMBOL(fc_rport_init);
/**
+ * fc_rport_fcp_prli() - Handle incoming PRLI for the FCP initiator.
+ * @rdata: remote port private
+ * @spp_len: service parameter page length
+ * @rspp: received service parameter page
+ * @spp: response service parameter page
+ *
+ * Returns the value for the response code to be placed in spp_flags;
+ * Returns 0 if not an initiator.
+ */
+static int fc_rport_fcp_prli(struct fc_rport_priv *rdata, u32 spp_len,
+ const struct fc_els_spp *rspp,
+ struct fc_els_spp *spp)
+{
+ struct fc_lport *lport = rdata->local_port;
+ u32 fcp_parm;
+
+ fcp_parm = ntohl(rspp->spp_params);
+ rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
+ if (fcp_parm & FCP_SPPF_INIT_FCN)
+ rdata->ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+ if (fcp_parm & FCP_SPPF_TARG_FCN)
+ rdata->ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
+ if (fcp_parm & FCP_SPPF_RETRY)
+ rdata->flags |= FC_RP_FLAGS_RETRY;
+ rdata->supported_classes = FC_COS_CLASS3;
+
+ if (!(lport->service_params & FC_RPORT_ROLE_FCP_INITIATOR))
+ return 0;
+
+ spp->spp_flags |= rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
+
+ /*
+ * OR in our service parameters with other providers (target), if any.
+ */
+ fcp_parm = ntohl(spp->spp_params);
+ spp->spp_params = htonl(fcp_parm | lport->service_params);
+ return FC_SPP_RESP_ACK;
+}
+
+/*
+ * FC-4 provider ops for FCP initiator.
+ */
+struct fc4_prov fc_rport_fcp_init = {
+ .prli = fc_rport_fcp_prli,
+};
+
+/**
+ * fc_rport_t0_prli() - Handle incoming PRLI parameters for type 0
+ * @rdata: remote port private
+ * @spp_len: service parameter page length
+ * @rspp: received service parameter page
+ * @spp: response service parameter page
+ */
+static int fc_rport_t0_prli(struct fc_rport_priv *rdata, u32 spp_len,
+ const struct fc_els_spp *rspp,
+ struct fc_els_spp *spp)
+{
+ if (rspp->spp_flags & FC_SPP_EST_IMG_PAIR)
+ return FC_SPP_RESP_INVL;
+ return FC_SPP_RESP_ACK;
+}
+
+/*
+ * FC-4 provider ops for type 0 service parameters.
+ *
+ * This handles the special case of type 0 which is always successful
+ * but doesn't do anything otherwise.
+ */
+struct fc4_prov fc_rport_t0_prov = {
+ .prli = fc_rport_t0_prli,
+};
+
+/**
* fc_setup_rport() - Initialize the rport_event_queue
*/
-int fc_setup_rport()
+int fc_setup_rport(void)
{
rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
if (!rport_event_queue)
@@ -1900,7 +2019,7 @@ int fc_setup_rport()
/**
* fc_destroy_rport() - Destroy the rport_event_queue
*/
-void fc_destroy_rport()
+void fc_destroy_rport(void)
{
destroy_workqueue(rport_event_queue);
}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index da8b61543ee4..0c550d5b9133 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -3352,6 +3352,47 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
}
EXPORT_SYMBOL_GPL(iscsi_session_get_param);
+int iscsi_conn_get_addr_param(struct sockaddr_storage *addr,
+ enum iscsi_param param, char *buf)
+{
+ struct sockaddr_in6 *sin6 = NULL;
+ struct sockaddr_in *sin = NULL;
+ int len;
+
+ switch (addr->ss_family) {
+ case AF_INET:
+ sin = (struct sockaddr_in *)addr;
+ break;
+ case AF_INET6:
+ sin6 = (struct sockaddr_in6 *)addr;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (param) {
+ case ISCSI_PARAM_CONN_ADDRESS:
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ if (sin)
+ len = sprintf(buf, "%pI4\n", &sin->sin_addr.s_addr);
+ else
+ len = sprintf(buf, "%pI6\n", &sin6->sin6_addr);
+ break;
+ case ISCSI_PARAM_CONN_PORT:
+ if (sin)
+ len = sprintf(buf, "%hu\n", be16_to_cpu(sin->sin_port));
+ else
+ len = sprintf(buf, "%hu\n",
+ be16_to_cpu(sin6->sin6_port));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(iscsi_conn_get_addr_param);
+
int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf)
{
@@ -3416,9 +3457,6 @@ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
case ISCSI_HOST_PARAM_INITIATOR_NAME:
len = sprintf(buf, "%s\n", ihost->initiatorname);
break;
- case ISCSI_HOST_PARAM_IPADDRESS:
- len = sprintf(buf, "%s\n", ihost->local_address);
- break;
default:
return -ENOSYS;
}
diff --git a/drivers/scsi/libsas/Kconfig b/drivers/scsi/libsas/Kconfig
index 18f33cd54411..9dafe64e7c7a 100644
--- a/drivers/scsi/libsas/Kconfig
+++ b/drivers/scsi/libsas/Kconfig
@@ -46,11 +46,3 @@ config SCSI_SAS_HOST_SMP
Allows sas hosts to receive SMP frames. Selecting this
option builds an SMP interpreter into libsas. Say
N here if you want to save the few kb this consumes.
-
-config SCSI_SAS_LIBSAS_DEBUG
- bool "Compile the SAS Domain Transport Attributes in debug mode"
- default y
- depends on SCSI_SAS_LIBSAS
- help
- Compiles the SAS Layer in debug mode. In debug mode, the
- SAS Layer prints diagnostic and debug messages.
diff --git a/drivers/scsi/libsas/Makefile b/drivers/scsi/libsas/Makefile
index 1ad1323c60fa..566a10024598 100644
--- a/drivers/scsi/libsas/Makefile
+++ b/drivers/scsi/libsas/Makefile
@@ -21,10 +21,6 @@
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
-ifeq ($(CONFIG_SCSI_SAS_LIBSAS_DEBUG),y)
- EXTRA_CFLAGS += -DSAS_DEBUG
-endif
-
obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas.o
libsas-y += sas_init.o \
sas_phy.o \
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index e1a395b438ee..d2b2d1474c28 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -71,13 +71,13 @@ static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
case SAS_SG_ERR:
return AC_ERR_INVALID;
- case SAM_STAT_CHECK_CONDITION:
case SAS_OPEN_TO:
case SAS_OPEN_REJECT:
SAS_DPRINTK("%s: Saw error %d. What to do?\n",
__func__, ts->stat);
return AC_ERR_OTHER;
+ case SAM_STAT_CHECK_CONDITION:
case SAS_ABORTED_TASK:
return AC_ERR_DEV;
@@ -107,13 +107,15 @@ static void sas_ata_task_done(struct sas_task *task)
sas_ha = dev->port->ha;
spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
- if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD) {
+ if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
+ ((stat->stat == SAM_STAT_CHECK_CONDITION &&
+ dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf);
qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
dev->sata_dev.sstatus = resp->sstatus;
dev->sata_dev.serror = resp->serror;
dev->sata_dev.scontrol = resp->scontrol;
- } else if (stat->stat != SAM_STAT_GOOD) {
+ } else {
ac = sas_to_ata_err(stat);
if (ac) {
SAS_DPRINTK("%s: SAS error %x\n", __func__,
@@ -238,37 +240,43 @@ static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc)
return true;
}
-static void sas_ata_phy_reset(struct ata_port *ap)
+static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
{
+ struct ata_port *ap = link->ap;
struct domain_device *dev = ap->private_data;
struct sas_internal *i =
to_sas_internal(dev->port->ha->core.shost->transportt);
int res = TMF_RESP_FUNC_FAILED;
+ int ret = 0;
if (i->dft->lldd_I_T_nexus_reset)
res = i->dft->lldd_I_T_nexus_reset(dev);
- if (res != TMF_RESP_FUNC_COMPLETE)
+ if (res != TMF_RESP_FUNC_COMPLETE) {
SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __func__);
+ ret = -EAGAIN;
+ }
switch (dev->sata_dev.command_set) {
case ATA_COMMAND_SET:
SAS_DPRINTK("%s: Found ATA device.\n", __func__);
- ap->link.device[0].class = ATA_DEV_ATA;
+ *class = ATA_DEV_ATA;
break;
case ATAPI_COMMAND_SET:
SAS_DPRINTK("%s: Found ATAPI device.\n", __func__);
- ap->link.device[0].class = ATA_DEV_ATAPI;
+ *class = ATA_DEV_ATAPI;
break;
default:
SAS_DPRINTK("%s: Unknown SATA command set: %d.\n",
__func__,
dev->sata_dev.command_set);
- ap->link.device[0].class = ATA_DEV_UNKNOWN;
+ *class = ATA_DEV_UNKNOWN;
break;
}
ap->cbl = ATA_CBL_SATA;
+ return ret;
}
static void sas_ata_post_internal(struct ata_queued_cmd *qc)
@@ -299,57 +307,12 @@ static void sas_ata_post_internal(struct ata_queued_cmd *qc)
}
}
-static int sas_ata_scr_write(struct ata_link *link, unsigned int sc_reg_in,
- u32 val)
-{
- struct domain_device *dev = link->ap->private_data;
-
- SAS_DPRINTK("STUB %s\n", __func__);
- switch (sc_reg_in) {
- case SCR_STATUS:
- dev->sata_dev.sstatus = val;
- break;
- case SCR_CONTROL:
- dev->sata_dev.scontrol = val;
- break;
- case SCR_ERROR:
- dev->sata_dev.serror = val;
- break;
- case SCR_ACTIVE:
- dev->sata_dev.ap->link.sactive = val;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
- u32 *val)
-{
- struct domain_device *dev = link->ap->private_data;
-
- SAS_DPRINTK("STUB %s\n", __func__);
- switch (sc_reg_in) {
- case SCR_STATUS:
- *val = dev->sata_dev.sstatus;
- return 0;
- case SCR_CONTROL:
- *val = dev->sata_dev.scontrol;
- return 0;
- case SCR_ERROR:
- *val = dev->sata_dev.serror;
- return 0;
- case SCR_ACTIVE:
- *val = dev->sata_dev.ap->link.sactive;
- return 0;
- default:
- return -EINVAL;
- }
-}
-
static struct ata_port_operations sas_sata_ops = {
- .phy_reset = sas_ata_phy_reset,
+ .prereset = ata_std_prereset,
+ .softreset = NULL,
+ .hardreset = sas_ata_hard_reset,
+ .postreset = ata_std_postreset,
+ .error_handler = ata_std_error_handler,
.post_internal_cmd = sas_ata_post_internal,
.qc_defer = ata_std_qc_defer,
.qc_prep = ata_noop_qc_prep,
@@ -357,15 +320,12 @@ static struct ata_port_operations sas_sata_ops = {
.qc_fill_rtf = sas_ata_qc_fill_rtf,
.port_start = ata_sas_port_start,
.port_stop = ata_sas_port_stop,
- .scr_read = sas_ata_scr_read,
- .scr_write = sas_ata_scr_write
};
static struct ata_port_info sata_port_info = {
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
- ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ,
- .pio_mask = 0x1f, /* PIO0-4 */
- .mwdma_mask = 0x07, /* MWDMA0-2 */
+ .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &sas_sata_ops
};
@@ -781,3 +741,68 @@ int sas_discover_sata(struct domain_device *dev)
return res;
}
+
+void sas_ata_strategy_handler(struct Scsi_Host *shost)
+{
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, shost) {
+ struct domain_device *ddev = sdev_to_domain_dev(sdev);
+ struct ata_port *ap = ddev->sata_dev.ap;
+
+ if (!dev_is_sata(ddev))
+ continue;
+
+ ata_port_printk(ap, KERN_DEBUG, "sas eh calling libata port error handler");
+ ata_scsi_port_error_handler(shost, ap);
+ }
+}
+
+int sas_ata_timed_out(struct scsi_cmnd *cmd, struct sas_task *task,
+ enum blk_eh_timer_return *rtn)
+{
+ struct domain_device *ddev = cmd_to_domain_dev(cmd);
+
+ if (!dev_is_sata(ddev) || task)
+ return 0;
+
+ /* we're a sata device with no task, so this must be a libata
+ * eh timeout. Ideally should hook into libata timeout
+ * handling, but there's no point, it just wants to activate
+ * the eh thread */
+ *rtn = BLK_EH_NOT_HANDLED;
+ return 1;
+}
+
+int sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
+ struct list_head *done_q)
+{
+ int rtn = 0;
+ struct scsi_cmnd *cmd, *n;
+ struct ata_port *ap;
+
+ do {
+ LIST_HEAD(sata_q);
+
+ ap = NULL;
+
+ list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
+ struct domain_device *ddev = cmd_to_domain_dev(cmd);
+
+ if (!dev_is_sata(ddev) || TO_SAS_TASK(cmd))
+ continue;
+ if (ap && ap != ddev->sata_dev.ap)
+ continue;
+ ap = ddev->sata_dev.ap;
+ rtn = 1;
+ list_move(&cmd->eh_entry, &sata_q);
+ }
+
+ if (!list_empty(&sata_q)) {
+ ata_port_printk(ap, KERN_DEBUG, "sas eh calling libata cmd error handler\n");
+ ata_scsi_cmd_error_handler(shost, ap, &sata_q);
+ }
+ } while (ap);
+
+ return rtn;
+}
diff --git a/drivers/scsi/libsas/sas_dump.c b/drivers/scsi/libsas/sas_dump.c
index c17c25030f1c..fc460933575c 100644
--- a/drivers/scsi/libsas/sas_dump.c
+++ b/drivers/scsi/libsas/sas_dump.c
@@ -24,8 +24,6 @@
#include "sas_dump.h"
-#ifdef SAS_DEBUG
-
static const char *sas_hae_str[] = {
[0] = "HAE_RESET",
};
@@ -72,5 +70,3 @@ void sas_dump_port(struct asd_sas_port *port)
SAS_DPRINTK("port%d: oob_mode:0x%x\n", port->id, port->oob_mode);
SAS_DPRINTK("port%d: num_phys:%d\n", port->id, port->num_phys);
}
-
-#endif /* SAS_DEBUG */
diff --git a/drivers/scsi/libsas/sas_dump.h b/drivers/scsi/libsas/sas_dump.h
index 47b45d4f5258..800e4c69093f 100644
--- a/drivers/scsi/libsas/sas_dump.h
+++ b/drivers/scsi/libsas/sas_dump.h
@@ -24,19 +24,7 @@
#include "sas_internal.h"
-#ifdef SAS_DEBUG
-
void sas_dprint_porte(int phyid, enum port_event pe);
void sas_dprint_phye(int phyid, enum phy_event pe);
void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he);
void sas_dump_port(struct asd_sas_port *port);
-
-#else /* SAS_DEBUG */
-
-static inline void sas_dprint_porte(int phyid, enum port_event pe) { }
-static inline void sas_dprint_phye(int phyid, enum phy_event pe) { }
-static inline void sas_dprint_hae(struct sas_ha_struct *sas_ha,
- enum ha_event he) { }
-static inline void sas_dump_port(struct asd_sas_port *port) { }
-
-#endif /* SAS_DEBUG */
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 505ffe358293..f3f693b772ac 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -244,6 +244,11 @@ static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req,
* dev to host FIS as described in section G.5 of
* sas-2 r 04b */
dr = &((struct smp_resp *)disc_resp)->disc;
+ if (memcmp(dev->sas_addr, dr->attached_sas_addr,
+ SAS_ADDR_SIZE) == 0) {
+ sas_printk("Found loopback topology, just ignore it!\n");
+ return 0;
+ }
if (!(dr->attached_dev_type == 0 &&
dr->attached_sata_dev))
break;
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 0001374bd6b2..8b538bd1ff2b 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -33,11 +33,7 @@
#define sas_printk(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__)
-#ifdef SAS_DEBUG
-#define SAS_DPRINTK(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__)
-#else
-#define SAS_DPRINTK(fmt, ...)
-#endif
+#define SAS_DPRINTK(fmt, ...) printk(KERN_DEBUG "sas: " fmt, ## __VA_ARGS__)
#define TO_SAS_TASK(_scsi_cmd) ((void *)(_scsi_cmd)->host_scribble)
#define ASSIGN_SAS_TASK(_sc, _t) do { (_sc)->host_scribble = (void *) _t; } while (0)
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 9a7aaf5f1311..bece759c51ec 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -663,11 +663,16 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
* scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any
* command we see here has no sas_task and is thus unknown to the HA.
*/
- if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q))
- scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
+ if (!sas_ata_eh(shost, &eh_work_q, &ha->eh_done_q))
+ if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q))
+ scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
out:
+ /* now link into libata eh --- if we have any ata devices */
+ sas_ata_strategy_handler(shost);
+
scsi_eh_flush_done_q(&ha->eh_done_q);
+
SAS_DPRINTK("--- Exit %s\n", __func__);
return;
}
@@ -676,6 +681,11 @@ enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
{
struct sas_task *task = TO_SAS_TASK(cmd);
unsigned long flags;
+ enum blk_eh_timer_return rtn;
+
+ if (sas_ata_timed_out(cmd, task, &rtn))
+ return rtn;
+
if (!task) {
cmd->request->timeout /= 2;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 746dd3d7a092..b64c6da870d3 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2010 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -325,6 +325,7 @@ struct lpfc_vport {
#define FC_VPORT_CVL_RCVD 0x400000 /* VLink failed due to CVL */
#define FC_VFI_REGISTERED 0x800000 /* VFI is registered */
#define FC_FDISC_COMPLETED 0x1000000/* FDISC completed */
+#define FC_DISC_DELAYED 0x2000000/* Delay NPort discovery */
uint32_t ct_flags;
#define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */
@@ -348,6 +349,8 @@ struct lpfc_vport {
uint32_t fc_myDID; /* fibre channel S_ID */
uint32_t fc_prevDID; /* previous fibre channel S_ID */
+ struct lpfc_name fabric_portname;
+ struct lpfc_name fabric_nodename;
int32_t stopped; /* HBA has not been restarted since last ERATT */
uint8_t fc_linkspeed; /* Link speed after last READ_LA */
@@ -372,6 +375,7 @@ struct lpfc_vport {
#define WORKER_DISC_TMO 0x1 /* vport: Discovery timeout */
#define WORKER_ELS_TMO 0x2 /* vport: ELS timeout */
#define WORKER_FDMI_TMO 0x4 /* vport: FDMI timeout */
+#define WORKER_DELAYED_DISC_TMO 0x8 /* vport: delayed discovery */
#define WORKER_MBOX_TMO 0x100 /* hba: MBOX timeout */
#define WORKER_HB_TMO 0x200 /* hba: Heart beat timeout */
@@ -382,6 +386,7 @@ struct lpfc_vport {
struct timer_list fc_fdmitmo;
struct timer_list els_tmofunc;
+ struct timer_list delayed_disc_tmo;
int unreg_vpi_cmpl;
@@ -548,6 +553,8 @@ struct lpfc_hba {
#define LPFC_SLI3_CRP_ENABLED 0x08
#define LPFC_SLI3_BG_ENABLED 0x20
#define LPFC_SLI3_DSS_ENABLED 0x40
+#define LPFC_SLI4_PERFH_ENABLED 0x80
+#define LPFC_SLI4_PHWQ_ENABLED 0x100
uint32_t iocb_cmd_size;
uint32_t iocb_rsp_size;
@@ -655,7 +662,7 @@ struct lpfc_hba {
#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
#define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */
#define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */
-
+ uint32_t cfg_enable_dss;
lpfc_vpd_t vpd; /* vital product data */
struct pci_dev *pcidev;
@@ -792,6 +799,10 @@ struct lpfc_hba {
struct dentry *debug_slow_ring_trc;
struct lpfc_debugfs_trc *slow_ring_trc;
atomic_t slow_ring_trc_cnt;
+ /* iDiag debugfs sub-directory */
+ struct dentry *idiag_root;
+ struct dentry *idiag_pci_cfg;
+ struct dentry *idiag_que_info;
#endif
/* Used for deferred freeing of ELS data buffers */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 3512abb8a587..e7c020df12fa 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2009 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -623,10 +623,14 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
int status = 0;
int cnt = 0;
int i;
+ int rc;
init_completion(&online_compl);
- lpfc_workq_post_event(phba, &status, &online_compl,
+ rc = lpfc_workq_post_event(phba, &status, &online_compl,
LPFC_EVT_OFFLINE_PREP);
+ if (rc == 0)
+ return -ENOMEM;
+
wait_for_completion(&online_compl);
if (status != 0)
@@ -652,7 +656,10 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
}
init_completion(&online_compl);
- lpfc_workq_post_event(phba, &status, &online_compl, type);
+ rc = lpfc_workq_post_event(phba, &status, &online_compl, type);
+ if (rc == 0)
+ return -ENOMEM;
+
wait_for_completion(&online_compl);
if (status != 0)
@@ -671,6 +678,7 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
*
* Notes:
* Assumes any error from lpfc_do_offline() will be negative.
+ * Do not make this function static.
*
* Returns:
* lpfc_do_offline() return code if not zero
@@ -682,6 +690,7 @@ lpfc_selective_reset(struct lpfc_hba *phba)
{
struct completion online_compl;
int status = 0;
+ int rc;
if (!phba->cfg_enable_hba_reset)
return -EIO;
@@ -692,8 +701,11 @@ lpfc_selective_reset(struct lpfc_hba *phba)
return status;
init_completion(&online_compl);
- lpfc_workq_post_event(phba, &status, &online_compl,
+ rc = lpfc_workq_post_event(phba, &status, &online_compl,
LPFC_EVT_ONLINE);
+ if (rc == 0)
+ return -ENOMEM;
+
wait_for_completion(&online_compl);
if (status != 0)
@@ -812,14 +824,17 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
struct lpfc_hba *phba = vport->phba;
struct completion online_compl;
int status=0;
+ int rc;
if (!phba->cfg_enable_hba_reset)
return -EACCES;
init_completion(&online_compl);
if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
- lpfc_workq_post_event(phba, &status, &online_compl,
+ rc = lpfc_workq_post_event(phba, &status, &online_compl,
LPFC_EVT_ONLINE);
+ if (rc == 0)
+ return -ENOMEM;
wait_for_completion(&online_compl);
} else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
@@ -1279,6 +1294,28 @@ lpfc_fips_rev_show(struct device *dev, struct device_attribute *attr,
}
/**
+ * lpfc_dss_show - Return the current state of dss and the configured state
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the formatted text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_dss_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%s - %sOperational\n",
+ (phba->cfg_enable_dss) ? "Enabled" : "Disabled",
+ (phba->sli3_options & LPFC_SLI3_DSS_ENABLED) ?
+ "" : "Not ");
+}
+
+/**
* lpfc_param_show - Return a cfg attribute value in decimal
*
* Description:
@@ -1597,13 +1634,13 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
#define LPFC_ATTR(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_init(name, defval, minval, maxval)
#define LPFC_ATTR_R(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_show(name)\
lpfc_param_init(name, defval, minval, maxval)\
@@ -1611,7 +1648,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
#define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_show(name)\
lpfc_param_init(name, defval, minval, maxval)\
@@ -1622,7 +1659,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
#define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_hex_show(name)\
lpfc_param_init(name, defval, minval, maxval)\
@@ -1630,7 +1667,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
#define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_hex_show(name)\
lpfc_param_init(name, defval, minval, maxval)\
@@ -1641,13 +1678,13 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
#define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_init(name, defval, minval, maxval)
#define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_show(name)\
lpfc_vport_param_init(name, defval, minval, maxval)\
@@ -1655,7 +1692,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
#define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_show(name)\
lpfc_vport_param_init(name, defval, minval, maxval)\
@@ -1666,7 +1703,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
#define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_hex_show(name)\
lpfc_vport_param_init(name, defval, minval, maxval)\
@@ -1674,7 +1711,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
#define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_hex_show(name)\
lpfc_vport_param_init(name, defval, minval, maxval)\
@@ -1718,7 +1755,7 @@ static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL);
static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL);
static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL);
-
+static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
static char *lpfc_soft_wwn_key = "C99G71SL8032A";
@@ -1813,6 +1850,7 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
int stat1=0, stat2=0;
unsigned int i, j, cnt=count;
u8 wwpn[8];
+ int rc;
if (!phba->cfg_enable_hba_reset)
return -EACCES;
@@ -1863,7 +1901,11 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
"0463 lpfc_soft_wwpn attribute set failed to "
"reinit adapter - %d\n", stat1);
init_completion(&online_compl);
- lpfc_workq_post_event(phba, &stat2, &online_compl, LPFC_EVT_ONLINE);
+ rc = lpfc_workq_post_event(phba, &stat2, &online_compl,
+ LPFC_EVT_ONLINE);
+ if (rc == 0)
+ return -ENOMEM;
+
wait_for_completion(&online_compl);
if (stat2)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -1954,7 +1996,7 @@ static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,\
static int lpfc_poll = 0;
-module_param(lpfc_poll, int, 0);
+module_param(lpfc_poll, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
" 0 - none,"
" 1 - poll with interrupts enabled"
@@ -1964,21 +2006,21 @@ static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
lpfc_poll_show, lpfc_poll_store);
int lpfc_sli_mode = 0;
-module_param(lpfc_sli_mode, int, 0);
+module_param(lpfc_sli_mode, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:"
" 0 - auto (SLI-3 if supported),"
" 2 - select SLI-2 even on SLI-3 capable HBAs,"
" 3 - select SLI-3");
int lpfc_enable_npiv = 1;
-module_param(lpfc_enable_npiv, int, 0);
+module_param(lpfc_enable_npiv, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_enable_npiv, "Enable NPIV functionality");
lpfc_param_show(enable_npiv);
lpfc_param_init(enable_npiv, 1, 0, 1);
static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL);
int lpfc_enable_rrq;
-module_param(lpfc_enable_rrq, int, 0);
+module_param(lpfc_enable_rrq, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality");
lpfc_param_show(enable_rrq);
lpfc_param_init(enable_rrq, 0, 0, 1);
@@ -2040,7 +2082,7 @@ static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
lpfc_txcmplq_hw_show, NULL);
int lpfc_iocb_cnt = 2;
-module_param(lpfc_iocb_cnt, int, 1);
+module_param(lpfc_iocb_cnt, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_iocb_cnt,
"Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs");
lpfc_param_show(iocb_cnt);
@@ -2192,7 +2234,7 @@ static DEVICE_ATTR(lpfc_nodev_tmo, S_IRUGO | S_IWUSR,
# disappear until the timer expires. Value range is [0,255]. Default
# value is 30.
*/
-module_param(lpfc_devloss_tmo, int, 0);
+module_param(lpfc_devloss_tmo, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_devloss_tmo,
"Seconds driver will hold I/O waiting "
"for a device to come back");
@@ -2302,7 +2344,7 @@ LPFC_VPORT_ATTR_R(peer_port_login, 0, 0, 1,
# Default value of this parameter is 1.
*/
static int lpfc_restrict_login = 1;
-module_param(lpfc_restrict_login, int, 0);
+module_param(lpfc_restrict_login, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_restrict_login,
"Restrict virtual ports login to remote initiators.");
lpfc_vport_param_show(restrict_login);
@@ -2473,7 +2515,7 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
}
static int lpfc_topology = 0;
-module_param(lpfc_topology, int, 0);
+module_param(lpfc_topology, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_topology, "Select Fibre Channel topology");
lpfc_param_show(topology)
lpfc_param_init(topology, 0, 0, 6)
@@ -2915,7 +2957,7 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
}
static int lpfc_link_speed = 0;
-module_param(lpfc_link_speed, int, 0);
+module_param(lpfc_link_speed, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_link_speed, "Select link speed");
lpfc_param_show(link_speed)
@@ -3043,7 +3085,7 @@ lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
}
static int lpfc_aer_support = 1;
-module_param(lpfc_aer_support, int, 1);
+module_param(lpfc_aer_support, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_aer_support, "Enable PCIe device AER support");
lpfc_param_show(aer_support)
@@ -3155,7 +3197,7 @@ LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1,
# The value is set in milliseconds.
*/
static int lpfc_max_scsicmpl_time;
-module_param(lpfc_max_scsicmpl_time, int, 0);
+module_param(lpfc_max_scsicmpl_time, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_max_scsicmpl_time,
"Use command completion time to control queue depth");
lpfc_vport_param_show(max_scsicmpl_time);
@@ -3331,7 +3373,7 @@ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
*/
unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION;
-module_param(lpfc_prot_mask, uint, 0);
+module_param(lpfc_prot_mask, uint, S_IRUGO);
MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
/*
@@ -3343,9 +3385,28 @@ MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
#
*/
unsigned char lpfc_prot_guard = SHOST_DIX_GUARD_IP;
-module_param(lpfc_prot_guard, byte, 0);
+module_param(lpfc_prot_guard, byte, S_IRUGO);
MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type");
+/*
+ * Delay initial NPort discovery when Clean Address bit is cleared in
+ * FLOGI/FDISC accept and FCID/Fabric name/Fabric portname is changed.
+ * This parameter can have value 0 or 1.
+ * When this parameter is set to 0, no delay is added to the initial
+ * discovery.
+ * When this parameter is set to non-zero value, initial Nport discovery is
+ * delayed by ra_tov seconds when Clean Address bit is cleared in FLOGI/FDISC
+ * accept and FCID/Fabric name/Fabric portname is changed.
+ * Driver always delay Nport discovery for subsequent FLOGI/FDISC completion
+ * when Clean Address bit is cleared in FLOGI/FDISC
+ * accept and FCID/Fabric name/Fabric portname is changed.
+ * Default value is 0.
+ */
+int lpfc_delay_discovery;
+module_param(lpfc_delay_discovery, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_delay_discovery,
+ "Delay NPort discovery when Clean Address bit is cleared. "
+ "Allowed values: 0,1.");
/*
* lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
@@ -3437,6 +3498,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_txcmplq_hw,
&dev_attr_lpfc_fips_level,
&dev_attr_lpfc_fips_rev,
+ &dev_attr_lpfc_dss,
NULL,
};
@@ -4639,6 +4701,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_aer_support_init(phba, lpfc_aer_support);
lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
+ phba->cfg_enable_dss = 1;
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 17fde522c84a..3d40023f4804 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -53,9 +53,9 @@ void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
void lpfc_supported_pages(struct lpfcMboxq *);
-void lpfc_sli4_params(struct lpfcMboxq *);
+void lpfc_pc_sli4_params(struct lpfcMboxq *);
int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
-
+int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *);
struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
void lpfc_rcv_seq_check_edtov(struct lpfc_vport *);
@@ -167,6 +167,8 @@ int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int);
void lpfc_fdmi_tmo(unsigned long);
void lpfc_fdmi_timeout_handler(struct lpfc_vport *);
+void lpfc_delayed_disc_tmo(unsigned long);
+void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *);
int lpfc_config_port_prep(struct lpfc_hba *);
int lpfc_config_port_post(struct lpfc_hba *);
@@ -341,6 +343,7 @@ extern struct fc_function_template lpfc_transport_functions;
extern struct fc_function_template lpfc_vport_transport_functions;
extern int lpfc_sli_mode;
extern int lpfc_enable_npiv;
+extern int lpfc_delay_discovery;
int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t);
int lpfc_vport_symbolic_port_name(struct lpfc_vport *, char *, size_t);
@@ -423,6 +426,6 @@ int lpfc_send_rrq(struct lpfc_hba *, struct lpfc_node_rrq *);
int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *,
uint16_t, uint16_t, uint16_t);
void lpfc_cleanup_wt_rrqs(struct lpfc_hba *);
-void lpfc_cleanup_vports_rrqs(struct lpfc_vport *);
+void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *);
struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
uint32_t);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index c004fa9a681e..d9edfd90d7ff 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1738,6 +1738,55 @@ fdmi_cmd_exit:
return 1;
}
+/**
+ * lpfc_delayed_disc_tmo - Timeout handler for delayed discovery timer.
+ * @ptr - Context object of the timer.
+ *
+ * This function set the WORKER_DELAYED_DISC_TMO flag and wake up
+ * the worker thread.
+ **/
+void
+lpfc_delayed_disc_tmo(unsigned long ptr)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *)ptr;
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t tmo_posted;
+ unsigned long iflag;
+
+ spin_lock_irqsave(&vport->work_port_lock, iflag);
+ tmo_posted = vport->work_port_events & WORKER_DELAYED_DISC_TMO;
+ if (!tmo_posted)
+ vport->work_port_events |= WORKER_DELAYED_DISC_TMO;
+ spin_unlock_irqrestore(&vport->work_port_lock, iflag);
+
+ if (!tmo_posted)
+ lpfc_worker_wake_up(phba);
+ return;
+}
+
+/**
+ * lpfc_delayed_disc_timeout_handler - Function called by worker thread to
+ * handle delayed discovery.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This function start nport discovery of the vport.
+ **/
+void
+lpfc_delayed_disc_timeout_handler(struct lpfc_vport *vport)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ spin_lock_irq(shost->host_lock);
+ if (!(vport->fc_flag & FC_DISC_DELAYED)) {
+ spin_unlock_irq(shost->host_lock);
+ return;
+ }
+ vport->fc_flag &= ~FC_DISC_DELAYED;
+ spin_unlock_irq(shost->host_lock);
+
+ lpfc_do_scr_ns_plogi(vport->phba, vport);
+}
+
void
lpfc_fdmi_tmo(unsigned long ptr)
{
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index a80d938fafc9..a753581509d6 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2007-2009 Emulex. All rights reserved. *
+ * Copyright (C) 2007-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -57,8 +57,8 @@
* # mount -t debugfs none /sys/kernel/debug
*
* The lpfc debugfs directory hierarchy is:
- * lpfc/lpfcX/vportY
- * where X is the lpfc hba unique_id
+ * /sys/kernel/debug/lpfc/fnX/vportY
+ * where X is the lpfc hba function unique_id
* where Y is the vport VPI on that hba
*
* Debugging services available per vport:
@@ -82,52 +82,34 @@
* the HBA. X MUST also be a power of 2.
*/
static int lpfc_debugfs_enable = 1;
-module_param(lpfc_debugfs_enable, int, 0);
+module_param(lpfc_debugfs_enable, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_debugfs_enable, "Enable debugfs services");
/* This MUST be a power of 2 */
static int lpfc_debugfs_max_disc_trc;
-module_param(lpfc_debugfs_max_disc_trc, int, 0);
+module_param(lpfc_debugfs_max_disc_trc, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_debugfs_max_disc_trc,
"Set debugfs discovery trace depth");
/* This MUST be a power of 2 */
static int lpfc_debugfs_max_slow_ring_trc;
-module_param(lpfc_debugfs_max_slow_ring_trc, int, 0);
+module_param(lpfc_debugfs_max_slow_ring_trc, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_debugfs_max_slow_ring_trc,
"Set debugfs slow ring trace depth");
static int lpfc_debugfs_mask_disc_trc;
-module_param(lpfc_debugfs_mask_disc_trc, int, 0);
+module_param(lpfc_debugfs_mask_disc_trc, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
"Set debugfs discovery trace mask");
#include <linux/debugfs.h>
-/* size of output line, for discovery_trace and slow_ring_trace */
-#define LPFC_DEBUG_TRC_ENTRY_SIZE 100
-
-/* nodelist output buffer size */
-#define LPFC_NODELIST_SIZE 8192
-#define LPFC_NODELIST_ENTRY_SIZE 120
-
-/* dumpHBASlim output buffer size */
-#define LPFC_DUMPHBASLIM_SIZE 4096
-
-/* dumpHostSlim output buffer size */
-#define LPFC_DUMPHOSTSLIM_SIZE 4096
-
-/* hbqinfo output buffer size */
-#define LPFC_HBQINFO_SIZE 8192
-
-struct lpfc_debug {
- char *buffer;
- int len;
-};
-
static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
static unsigned long lpfc_debugfs_start_time = 0L;
+/* iDiag */
+static struct lpfc_idiag idiag;
+
/**
* lpfc_debugfs_disc_trc_data - Dump discovery logging to a buffer
* @vport: The vport to gather the log info from.
@@ -996,8 +978,6 @@ lpfc_debugfs_dumpDataDif_write(struct file *file, const char __user *buf,
return nbytes;
}
-
-
/**
* lpfc_debugfs_nodelist_open - Open the nodelist debugfs file
* @inode: The inode pointer that contains a vport pointer.
@@ -1099,6 +1079,7 @@ lpfc_debugfs_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
{
struct lpfc_debug *debug = file->private_data;
+
return simple_read_from_buffer(buf, nbytes, ppos, debug->buffer,
debug->len);
}
@@ -1137,6 +1118,776 @@ lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file)
return 0;
}
+/*
+ * iDiag debugfs file access methods
+ */
+
+/*
+ * iDiag PCI config space register access methods:
+ *
+ * The PCI config space register accessees of read, write, read-modify-write
+ * for set bits, and read-modify-write for clear bits to SLI4 PCI functions
+ * are provided. In the proper SLI4 PCI function's debugfs iDiag directory,
+ *
+ * /sys/kernel/debug/lpfc/fn<#>/iDiag
+ *
+ * the access is through the debugfs entry pciCfg:
+ *
+ * 1. For PCI config space register read access, there are two read methods:
+ * A) read a single PCI config space register in the size of a byte
+ * (8 bits), a word (16 bits), or a dword (32 bits); or B) browse through
+ * the 4K extended PCI config space.
+ *
+ * A) Read a single PCI config space register consists of two steps:
+ *
+ * Step-1: Set up PCI config space register read command, the command
+ * syntax is,
+ *
+ * echo 1 <where> <count> > pciCfg
+ *
+ * where, 1 is the iDiag command for PCI config space read, <where> is the
+ * offset from the beginning of the device's PCI config space to read from,
+ * and <count> is the size of PCI config space register data to read back,
+ * it will be 1 for reading a byte (8 bits), 2 for reading a word (16 bits
+ * or 2 bytes), or 4 for reading a dword (32 bits or 4 bytes).
+ *
+ * Setp-2: Perform the debugfs read operation to execute the idiag command
+ * set up in Step-1,
+ *
+ * cat pciCfg
+ *
+ * Examples:
+ * To read PCI device's vendor-id and device-id from PCI config space,
+ *
+ * echo 1 0 4 > pciCfg
+ * cat pciCfg
+ *
+ * To read PCI device's currnt command from config space,
+ *
+ * echo 1 4 2 > pciCfg
+ * cat pciCfg
+ *
+ * B) Browse through the entire 4K extended PCI config space also consists
+ * of two steps:
+ *
+ * Step-1: Set up PCI config space register browsing command, the command
+ * syntax is,
+ *
+ * echo 1 0 4096 > pciCfg
+ *
+ * where, 1 is the iDiag command for PCI config space read, 0 must be used
+ * as the offset for PCI config space register browse, and 4096 must be
+ * used as the count for PCI config space register browse.
+ *
+ * Step-2: Repeately issue the debugfs read operation to browse through
+ * the entire PCI config space registers:
+ *
+ * cat pciCfg
+ * cat pciCfg
+ * cat pciCfg
+ * ...
+ *
+ * When browsing to the end of the 4K PCI config space, the browse method
+ * shall wrap around to start reading from beginning again, and again...
+ *
+ * 2. For PCI config space register write access, it supports a single PCI
+ * config space register write in the size of a byte (8 bits), a word
+ * (16 bits), or a dword (32 bits). The command syntax is,
+ *
+ * echo 2 <where> <count> <value> > pciCfg
+ *
+ * where, 2 is the iDiag command for PCI config space write, <where> is
+ * the offset from the beginning of the device's PCI config space to write
+ * into, <count> is the size of data to write into the PCI config space,
+ * it will be 1 for writing a byte (8 bits), 2 for writing a word (16 bits
+ * or 2 bytes), or 4 for writing a dword (32 bits or 4 bytes), and <value>
+ * is the data to be written into the PCI config space register at the
+ * offset.
+ *
+ * Examples:
+ * To disable PCI device's interrupt assertion,
+ *
+ * 1) Read in device's PCI config space register command field <cmd>:
+ *
+ * echo 1 4 2 > pciCfg
+ * cat pciCfg
+ *
+ * 2) Set bit 10 (Interrupt Disable bit) in the <cmd>:
+ *
+ * <cmd> = <cmd> | (1 < 10)
+ *
+ * 3) Write the modified command back:
+ *
+ * echo 2 4 2 <cmd> > pciCfg
+ *
+ * 3. For PCI config space register set bits access, it supports a single PCI
+ * config space register set bits in the size of a byte (8 bits), a word
+ * (16 bits), or a dword (32 bits). The command syntax is,
+ *
+ * echo 3 <where> <count> <bitmask> > pciCfg
+ *
+ * where, 3 is the iDiag command for PCI config space set bits, <where> is
+ * the offset from the beginning of the device's PCI config space to set
+ * bits into, <count> is the size of the bitmask to set into the PCI config
+ * space, it will be 1 for setting a byte (8 bits), 2 for setting a word
+ * (16 bits or 2 bytes), or 4 for setting a dword (32 bits or 4 bytes), and
+ * <bitmask> is the bitmask, indicating the bits to be set into the PCI
+ * config space register at the offset. The logic performed to the content
+ * of the PCI config space register, regval, is,
+ *
+ * regval |= <bitmask>
+ *
+ * 4. For PCI config space register clear bits access, it supports a single
+ * PCI config space register clear bits in the size of a byte (8 bits),
+ * a word (16 bits), or a dword (32 bits). The command syntax is,
+ *
+ * echo 4 <where> <count> <bitmask> > pciCfg
+ *
+ * where, 4 is the iDiag command for PCI config space clear bits, <where>
+ * is the offset from the beginning of the device's PCI config space to
+ * clear bits from, <count> is the size of the bitmask to set into the PCI
+ * config space, it will be 1 for setting a byte (8 bits), 2 for setting
+ * a word(16 bits or 2 bytes), or 4 for setting a dword (32 bits or 4
+ * bytes), and <bitmask> is the bitmask, indicating the bits to be cleared
+ * from the PCI config space register at the offset. the logic performed
+ * to the content of the PCI config space register, regval, is,
+ *
+ * regval &= ~<bitmask>
+ *
+ * Note, for all single register read, write, set bits, or clear bits access,
+ * the offset (<where>) must be aligned with the size of the data:
+ *
+ * For data size of byte (8 bits), the offset must be aligned to the byte
+ * boundary; for data size of word (16 bits), the offset must be aligned
+ * to the word boundary; while for data size of dword (32 bits), the offset
+ * must be aligned to the dword boundary. Otherwise, the interface will
+ * return the error:
+ *
+ * "-bash: echo: write error: Invalid argument".
+ *
+ * For example:
+ *
+ * echo 1 2 4 > pciCfg
+ * -bash: echo: write error: Invalid argument
+ *
+ * Note also, all of the numbers in the command fields for all read, write,
+ * set bits, and clear bits PCI config space register command fields can be
+ * either decimal or hex.
+ *
+ * For example,
+ * echo 1 0 4096 > pciCfg
+ *
+ * will be the same as
+ * echo 1 0 0x1000 > pciCfg
+ *
+ * And,
+ * echo 2 155 1 10 > pciCfg
+ *
+ * will be
+ * echo 2 0x9b 1 0xa > pciCfg
+ */
+
+/**
+ * lpfc_idiag_cmd_get - Get and parse idiag debugfs comands from user space
+ * @buf: The pointer to the user space buffer.
+ * @nbytes: The number of bytes in the user space buffer.
+ * @idiag_cmd: pointer to the idiag command struct.
+ *
+ * This routine reads data from debugfs user space buffer and parses the
+ * buffer for getting the idiag command and arguments. The while space in
+ * between the set of data is used as the parsing separator.
+ *
+ * This routine returns 0 when successful, it returns proper error code
+ * back to the user space in error conditions.
+ */
+static int lpfc_idiag_cmd_get(const char __user *buf, size_t nbytes,
+ struct lpfc_idiag_cmd *idiag_cmd)
+{
+ char mybuf[64];
+ char *pbuf, *step_str;
+ int bsize, i;
+
+ /* Protect copy from user */
+ if (!access_ok(VERIFY_READ, buf, nbytes))
+ return -EFAULT;
+
+ memset(mybuf, 0, sizeof(mybuf));
+ memset(idiag_cmd, 0, sizeof(*idiag_cmd));
+ bsize = min(nbytes, (sizeof(mybuf)-1));
+
+ if (copy_from_user(mybuf, buf, bsize))
+ return -EFAULT;
+ pbuf = &mybuf[0];
+ step_str = strsep(&pbuf, "\t ");
+
+ /* The opcode must present */
+ if (!step_str)
+ return -EINVAL;
+
+ idiag_cmd->opcode = simple_strtol(step_str, NULL, 0);
+ if (idiag_cmd->opcode == 0)
+ return -EINVAL;
+
+ for (i = 0; i < LPFC_IDIAG_CMD_DATA_SIZE; i++) {
+ step_str = strsep(&pbuf, "\t ");
+ if (!step_str)
+ return 0;
+ idiag_cmd->data[i] = simple_strtol(step_str, NULL, 0);
+ }
+ return 0;
+}
+
+/**
+ * lpfc_idiag_open - idiag open debugfs
+ * @inode: The inode pointer that contains a pointer to phba.
+ * @file: The file pointer to attach the file operation.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It
+ * gets the reference to phba from the i_private field in @inode, it then
+ * allocates buffer for the file operation, performs the necessary PCI config
+ * space read into the allocated buffer according to the idiag user command
+ * setup, and then returns a pointer to buffer in the private_data field in
+ * @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return an
+ * negative error value.
+ **/
+static int
+lpfc_idiag_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_debug *debug;
+
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ return -ENOMEM;
+
+ debug->i_private = inode->i_private;
+ debug->buffer = NULL;
+ file->private_data = debug;
+
+ return 0;
+}
+
+/**
+ * lpfc_idiag_release - Release idiag access file operation
+ * @inode: The inode pointer that contains a vport pointer. (unused)
+ * @file: The file pointer that contains the buffer to release.
+ *
+ * Description:
+ * This routine is the generic release routine for the idiag access file
+ * operation, it frees the buffer that was allocated when the debugfs file
+ * was opened.
+ *
+ * Returns:
+ * This function returns zero.
+ **/
+static int
+lpfc_idiag_release(struct inode *inode, struct file *file)
+{
+ struct lpfc_debug *debug = file->private_data;
+
+ /* Free the buffers to the file operation */
+ kfree(debug->buffer);
+ kfree(debug);
+
+ return 0;
+}
+
+/**
+ * lpfc_idiag_cmd_release - Release idiag cmd access file operation
+ * @inode: The inode pointer that contains a vport pointer. (unused)
+ * @file: The file pointer that contains the buffer to release.
+ *
+ * Description:
+ * This routine frees the buffer that was allocated when the debugfs file
+ * was opened. It also reset the fields in the idiag command struct in the
+ * case the command is not continuous browsing of the data structure.
+ *
+ * Returns:
+ * This function returns zero.
+ **/
+static int
+lpfc_idiag_cmd_release(struct inode *inode, struct file *file)
+{
+ struct lpfc_debug *debug = file->private_data;
+
+ /* Read PCI config register, if not read all, clear command fields */
+ if ((debug->op == LPFC_IDIAG_OP_RD) &&
+ (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD))
+ if ((idiag.cmd.data[1] == sizeof(uint8_t)) ||
+ (idiag.cmd.data[1] == sizeof(uint16_t)) ||
+ (idiag.cmd.data[1] == sizeof(uint32_t)))
+ memset(&idiag, 0, sizeof(idiag));
+
+ /* Write PCI config register, clear command fields */
+ if ((debug->op == LPFC_IDIAG_OP_WR) &&
+ (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR))
+ memset(&idiag, 0, sizeof(idiag));
+
+ /* Free the buffers to the file operation */
+ kfree(debug->buffer);
+ kfree(debug);
+
+ return 0;
+}
+
+/**
+ * lpfc_idiag_pcicfg_read - idiag debugfs read pcicfg
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba pci config space according to the
+ * idiag command, and copies to user @buf. Depending on the PCI config space
+ * read command setup, it does either a single register read of a byte
+ * (8 bits), a word (16 bits), or a dword (32 bits) or browsing through all
+ * registers from the 4K extended PCI config space.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_pcicfg_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ int offset_label, offset, len = 0, index = LPFC_PCI_CFG_RD_SIZE;
+ int where, count;
+ char *pbuffer;
+ struct pci_dev *pdev;
+ uint32_t u32val;
+ uint16_t u16val;
+ uint8_t u8val;
+
+ pdev = phba->pcidev;
+ if (!pdev)
+ return 0;
+
+ /* This is a user read operation */
+ debug->op = LPFC_IDIAG_OP_RD;
+
+ if (!debug->buffer)
+ debug->buffer = kmalloc(LPFC_PCI_CFG_SIZE, GFP_KERNEL);
+ if (!debug->buffer)
+ return 0;
+ pbuffer = debug->buffer;
+
+ if (*ppos)
+ return 0;
+
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) {
+ where = idiag.cmd.data[0];
+ count = idiag.cmd.data[1];
+ } else
+ return 0;
+
+ /* Read single PCI config space register */
+ switch (count) {
+ case SIZE_U8: /* byte (8 bits) */
+ pci_read_config_byte(pdev, where, &u8val);
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "%03x: %02x\n", where, u8val);
+ break;
+ case SIZE_U16: /* word (16 bits) */
+ pci_read_config_word(pdev, where, &u16val);
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "%03x: %04x\n", where, u16val);
+ break;
+ case SIZE_U32: /* double word (32 bits) */
+ pci_read_config_dword(pdev, where, &u32val);
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "%03x: %08x\n", where, u32val);
+ break;
+ case LPFC_PCI_CFG_SIZE: /* browse all */
+ goto pcicfg_browse;
+ break;
+ default:
+ /* illegal count */
+ len = 0;
+ break;
+ }
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+
+pcicfg_browse:
+
+ /* Browse all PCI config space registers */
+ offset_label = idiag.offset.last_rd;
+ offset = offset_label;
+
+ /* Read PCI config space */
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "%03x: ", offset_label);
+ while (index > 0) {
+ pci_read_config_dword(pdev, offset, &u32val);
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "%08x ", u32val);
+ offset += sizeof(uint32_t);
+ index -= sizeof(uint32_t);
+ if (!index)
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "\n");
+ else if (!(index % (8 * sizeof(uint32_t)))) {
+ offset_label += (8 * sizeof(uint32_t));
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "\n%03x: ", offset_label);
+ }
+ }
+
+ /* Set up the offset for next portion of pci cfg read */
+ idiag.offset.last_rd += LPFC_PCI_CFG_RD_SIZE;
+ if (idiag.offset.last_rd >= LPFC_PCI_CFG_SIZE)
+ idiag.offset.last_rd = 0;
+
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+/**
+ * lpfc_idiag_pcicfg_write - Syntax check and set up idiag pcicfg commands
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * This routine get the debugfs idiag command struct from user space and
+ * then perform the syntax check for PCI config space read or write command
+ * accordingly. In the case of PCI config space read command, it sets up
+ * the command in the idiag command struct for the debugfs read operation.
+ * In the case of PCI config space write operation, it executes the write
+ * operation into the PCI config space accordingly.
+ *
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ */
+static ssize_t
+lpfc_idiag_pcicfg_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ uint32_t where, value, count;
+ uint32_t u32val;
+ uint16_t u16val;
+ uint8_t u8val;
+ struct pci_dev *pdev;
+ int rc;
+
+ pdev = phba->pcidev;
+ if (!pdev)
+ return -EFAULT;
+
+ /* This is a user write operation */
+ debug->op = LPFC_IDIAG_OP_WR;
+
+ rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
+ if (rc)
+ return rc;
+
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) {
+ /* Read command from PCI config space, set up command fields */
+ where = idiag.cmd.data[0];
+ count = idiag.cmd.data[1];
+ if (count == LPFC_PCI_CFG_SIZE) {
+ if (where != 0)
+ goto error_out;
+ } else if ((count != sizeof(uint8_t)) &&
+ (count != sizeof(uint16_t)) &&
+ (count != sizeof(uint32_t)))
+ goto error_out;
+ if (count == sizeof(uint8_t)) {
+ if (where > LPFC_PCI_CFG_SIZE - sizeof(uint8_t))
+ goto error_out;
+ if (where % sizeof(uint8_t))
+ goto error_out;
+ }
+ if (count == sizeof(uint16_t)) {
+ if (where > LPFC_PCI_CFG_SIZE - sizeof(uint16_t))
+ goto error_out;
+ if (where % sizeof(uint16_t))
+ goto error_out;
+ }
+ if (count == sizeof(uint32_t)) {
+ if (where > LPFC_PCI_CFG_SIZE - sizeof(uint32_t))
+ goto error_out;
+ if (where % sizeof(uint32_t))
+ goto error_out;
+ }
+ } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) {
+ /* Write command to PCI config space, read-modify-write */
+ where = idiag.cmd.data[0];
+ count = idiag.cmd.data[1];
+ value = idiag.cmd.data[2];
+ /* Sanity checks */
+ if ((count != sizeof(uint8_t)) &&
+ (count != sizeof(uint16_t)) &&
+ (count != sizeof(uint32_t)))
+ goto error_out;
+ if (count == sizeof(uint8_t)) {
+ if (where > LPFC_PCI_CFG_SIZE - sizeof(uint8_t))
+ goto error_out;
+ if (where % sizeof(uint8_t))
+ goto error_out;
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR)
+ pci_write_config_byte(pdev, where,
+ (uint8_t)value);
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) {
+ rc = pci_read_config_byte(pdev, where, &u8val);
+ if (!rc) {
+ u8val |= (uint8_t)value;
+ pci_write_config_byte(pdev, where,
+ u8val);
+ }
+ }
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) {
+ rc = pci_read_config_byte(pdev, where, &u8val);
+ if (!rc) {
+ u8val &= (uint8_t)(~value);
+ pci_write_config_byte(pdev, where,
+ u8val);
+ }
+ }
+ }
+ if (count == sizeof(uint16_t)) {
+ if (where > LPFC_PCI_CFG_SIZE - sizeof(uint16_t))
+ goto error_out;
+ if (where % sizeof(uint16_t))
+ goto error_out;
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR)
+ pci_write_config_word(pdev, where,
+ (uint16_t)value);
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) {
+ rc = pci_read_config_word(pdev, where, &u16val);
+ if (!rc) {
+ u16val |= (uint16_t)value;
+ pci_write_config_word(pdev, where,
+ u16val);
+ }
+ }
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) {
+ rc = pci_read_config_word(pdev, where, &u16val);
+ if (!rc) {
+ u16val &= (uint16_t)(~value);
+ pci_write_config_word(pdev, where,
+ u16val);
+ }
+ }
+ }
+ if (count == sizeof(uint32_t)) {
+ if (where > LPFC_PCI_CFG_SIZE - sizeof(uint32_t))
+ goto error_out;
+ if (where % sizeof(uint32_t))
+ goto error_out;
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR)
+ pci_write_config_dword(pdev, where, value);
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) {
+ rc = pci_read_config_dword(pdev, where,
+ &u32val);
+ if (!rc) {
+ u32val |= value;
+ pci_write_config_dword(pdev, where,
+ u32val);
+ }
+ }
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) {
+ rc = pci_read_config_dword(pdev, where,
+ &u32val);
+ if (!rc) {
+ u32val &= ~value;
+ pci_write_config_dword(pdev, where,
+ u32val);
+ }
+ }
+ }
+ } else
+ /* All other opecodes are illegal for now */
+ goto error_out;
+
+ return nbytes;
+error_out:
+ memset(&idiag, 0, sizeof(idiag));
+ return -EINVAL;
+}
+
+/**
+ * lpfc_idiag_queinfo_read - idiag debugfs read queue information
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba SLI4 PCI function queue information,
+ * and copies to user @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ int len = 0, fcp_qidx;
+ char *pbuffer;
+
+ if (!debug->buffer)
+ debug->buffer = kmalloc(LPFC_QUE_INFO_GET_BUF_SIZE, GFP_KERNEL);
+ if (!debug->buffer)
+ return 0;
+ pbuffer = debug->buffer;
+
+ if (*ppos)
+ return 0;
+
+ /* Get slow-path event queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Slow-path EQ information:\n");
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], EQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n",
+ phba->sli4_hba.sp_eq->queue_id,
+ phba->sli4_hba.sp_eq->entry_count,
+ phba->sli4_hba.sp_eq->host_index,
+ phba->sli4_hba.sp_eq->hba_index);
+
+ /* Get fast-path event queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Fast-path EQ information:\n");
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) {
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], EQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n",
+ phba->sli4_hba.fp_eq[fcp_qidx]->queue_id,
+ phba->sli4_hba.fp_eq[fcp_qidx]->entry_count,
+ phba->sli4_hba.fp_eq[fcp_qidx]->host_index,
+ phba->sli4_hba.fp_eq[fcp_qidx]->hba_index);
+ }
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+
+ /* Get mailbox complete queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Mailbox CQ information:\n");
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tAssociated EQ-ID [%02d]:\n",
+ phba->sli4_hba.mbx_cq->assoc_qid);
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], CQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n",
+ phba->sli4_hba.mbx_cq->queue_id,
+ phba->sli4_hba.mbx_cq->entry_count,
+ phba->sli4_hba.mbx_cq->host_index,
+ phba->sli4_hba.mbx_cq->hba_index);
+
+ /* Get slow-path complete queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Slow-path CQ information:\n");
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tAssociated EQ-ID [%02d]:\n",
+ phba->sli4_hba.els_cq->assoc_qid);
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], CQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n",
+ phba->sli4_hba.els_cq->queue_id,
+ phba->sli4_hba.els_cq->entry_count,
+ phba->sli4_hba.els_cq->host_index,
+ phba->sli4_hba.els_cq->hba_index);
+
+ /* Get fast-path complete queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Fast-path CQ information:\n");
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) {
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tAssociated EQ-ID [%02d]:\n",
+ phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid);
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], EQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n",
+ phba->sli4_hba.fcp_cq[fcp_qidx]->queue_id,
+ phba->sli4_hba.fcp_cq[fcp_qidx]->entry_count,
+ phba->sli4_hba.fcp_cq[fcp_qidx]->host_index,
+ phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index);
+ }
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+
+ /* Get mailbox queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Mailbox MQ information:\n");
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tAssociated CQ-ID [%02d]:\n",
+ phba->sli4_hba.mbx_wq->assoc_qid);
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], MQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n",
+ phba->sli4_hba.mbx_wq->queue_id,
+ phba->sli4_hba.mbx_wq->entry_count,
+ phba->sli4_hba.mbx_wq->host_index,
+ phba->sli4_hba.mbx_wq->hba_index);
+
+ /* Get slow-path work queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Slow-path WQ information:\n");
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tAssociated CQ-ID [%02d]:\n",
+ phba->sli4_hba.els_wq->assoc_qid);
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], WQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n",
+ phba->sli4_hba.els_wq->queue_id,
+ phba->sli4_hba.els_wq->entry_count,
+ phba->sli4_hba.els_wq->host_index,
+ phba->sli4_hba.els_wq->hba_index);
+
+ /* Get fast-path work queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Fast-path WQ information:\n");
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) {
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tAssociated CQ-ID [%02d]:\n",
+ phba->sli4_hba.fcp_wq[fcp_qidx]->assoc_qid);
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], WQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n",
+ phba->sli4_hba.fcp_wq[fcp_qidx]->queue_id,
+ phba->sli4_hba.fcp_wq[fcp_qidx]->entry_count,
+ phba->sli4_hba.fcp_wq[fcp_qidx]->host_index,
+ phba->sli4_hba.fcp_wq[fcp_qidx]->hba_index);
+ }
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+
+ /* Get receive queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Slow-path RQ information:\n");
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tAssociated CQ-ID [%02d]:\n",
+ phba->sli4_hba.hdr_rq->assoc_qid);
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], RHQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n",
+ phba->sli4_hba.hdr_rq->queue_id,
+ phba->sli4_hba.hdr_rq->entry_count,
+ phba->sli4_hba.hdr_rq->host_index,
+ phba->sli4_hba.hdr_rq->hba_index);
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], RDQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n",
+ phba->sli4_hba.dat_rq->queue_id,
+ phba->sli4_hba.dat_rq->entry_count,
+ phba->sli4_hba.dat_rq->host_index,
+ phba->sli4_hba.dat_rq->hba_index);
+
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
#undef lpfc_debugfs_op_disc_trc
static const struct file_operations lpfc_debugfs_op_disc_trc = {
.owner = THIS_MODULE,
@@ -1213,6 +1964,28 @@ static const struct file_operations lpfc_debugfs_op_slow_ring_trc = {
static struct dentry *lpfc_debugfs_root = NULL;
static atomic_t lpfc_debugfs_hba_count;
+
+/*
+ * File operations for the iDiag debugfs
+ */
+#undef lpfc_idiag_op_pciCfg
+static const struct file_operations lpfc_idiag_op_pciCfg = {
+ .owner = THIS_MODULE,
+ .open = lpfc_idiag_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_idiag_pcicfg_read,
+ .write = lpfc_idiag_pcicfg_write,
+ .release = lpfc_idiag_cmd_release,
+};
+
+#undef lpfc_idiag_op_queInfo
+static const struct file_operations lpfc_idiag_op_queInfo = {
+ .owner = THIS_MODULE,
+ .open = lpfc_idiag_open,
+ .read = lpfc_idiag_queinfo_read,
+ .release = lpfc_idiag_release,
+};
+
#endif
/**
@@ -1249,8 +2022,8 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
if (!lpfc_debugfs_start_time)
lpfc_debugfs_start_time = jiffies;
- /* Setup lpfcX directory for specific HBA */
- snprintf(name, sizeof(name), "lpfc%d", phba->brd_no);
+ /* Setup funcX directory for specific HBA PCI function */
+ snprintf(name, sizeof(name), "fn%d", phba->brd_no);
if (!phba->hba_debugfs_root) {
phba->hba_debugfs_root =
debugfs_create_dir(name, lpfc_debugfs_root);
@@ -1275,28 +2048,38 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
}
/* Setup dumpHBASlim */
- snprintf(name, sizeof(name), "dumpHBASlim");
- phba->debug_dumpHBASlim =
- debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
- phba->hba_debugfs_root,
- phba, &lpfc_debugfs_op_dumpHBASlim);
- if (!phba->debug_dumpHBASlim) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0413 Cannot create debugfs dumpHBASlim\n");
- goto debug_failed;
- }
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ snprintf(name, sizeof(name), "dumpHBASlim");
+ phba->debug_dumpHBASlim =
+ debugfs_create_file(name,
+ S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dumpHBASlim);
+ if (!phba->debug_dumpHBASlim) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0413 Cannot create debugfs "
+ "dumpHBASlim\n");
+ goto debug_failed;
+ }
+ } else
+ phba->debug_dumpHBASlim = NULL;
/* Setup dumpHostSlim */
- snprintf(name, sizeof(name), "dumpHostSlim");
- phba->debug_dumpHostSlim =
- debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
- phba->hba_debugfs_root,
- phba, &lpfc_debugfs_op_dumpHostSlim);
- if (!phba->debug_dumpHostSlim) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0414 Cannot create debugfs dumpHostSlim\n");
- goto debug_failed;
- }
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ snprintf(name, sizeof(name), "dumpHostSlim");
+ phba->debug_dumpHostSlim =
+ debugfs_create_file(name,
+ S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dumpHostSlim);
+ if (!phba->debug_dumpHostSlim) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0414 Cannot create debugfs "
+ "dumpHostSlim\n");
+ goto debug_failed;
+ }
+ } else
+ phba->debug_dumpHBASlim = NULL;
/* Setup dumpData */
snprintf(name, sizeof(name), "dumpData");
@@ -1322,8 +2105,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
goto debug_failed;
}
-
-
/* Setup slow ring trace */
if (lpfc_debugfs_max_slow_ring_trc) {
num = lpfc_debugfs_max_slow_ring_trc - 1;
@@ -1342,7 +2123,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
}
}
-
snprintf(name, sizeof(name), "slow_ring_trace");
phba->debug_slow_ring_trc =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
@@ -1434,6 +2214,53 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
"0409 Cant create debugfs nodelist\n");
goto debug_failed;
}
+
+ /*
+ * iDiag debugfs root entry points for SLI4 device only
+ */
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ goto debug_failed;
+
+ snprintf(name, sizeof(name), "iDiag");
+ if (!phba->idiag_root) {
+ phba->idiag_root =
+ debugfs_create_dir(name, phba->hba_debugfs_root);
+ if (!phba->idiag_root) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "2922 Can't create idiag debugfs\n");
+ goto debug_failed;
+ }
+ /* Initialize iDiag data structure */
+ memset(&idiag, 0, sizeof(idiag));
+ }
+
+ /* iDiag read PCI config space */
+ snprintf(name, sizeof(name), "pciCfg");
+ if (!phba->idiag_pci_cfg) {
+ phba->idiag_pci_cfg =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->idiag_root, phba, &lpfc_idiag_op_pciCfg);
+ if (!phba->idiag_pci_cfg) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "2923 Can't create idiag debugfs\n");
+ goto debug_failed;
+ }
+ idiag.offset.last_rd = 0;
+ }
+
+ /* iDiag get PCI function queue information */
+ snprintf(name, sizeof(name), "queInfo");
+ if (!phba->idiag_que_info) {
+ phba->idiag_que_info =
+ debugfs_create_file(name, S_IFREG|S_IRUGO,
+ phba->idiag_root, phba, &lpfc_idiag_op_queInfo);
+ if (!phba->idiag_que_info) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "2924 Can't create idiag debugfs\n");
+ goto debug_failed;
+ }
+ }
+
debug_failed:
return;
#endif
@@ -1508,8 +2335,31 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
phba->debug_slow_ring_trc = NULL;
}
+ /*
+ * iDiag release
+ */
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ if (phba->idiag_que_info) {
+ /* iDiag queInfo */
+ debugfs_remove(phba->idiag_que_info);
+ phba->idiag_que_info = NULL;
+ }
+ if (phba->idiag_pci_cfg) {
+ /* iDiag pciCfg */
+ debugfs_remove(phba->idiag_pci_cfg);
+ phba->idiag_pci_cfg = NULL;
+ }
+
+ /* Finally remove the iDiag debugfs root */
+ if (phba->idiag_root) {
+ /* iDiag root */
+ debugfs_remove(phba->idiag_root);
+ phba->idiag_root = NULL;
+ }
+ }
+
if (phba->hba_debugfs_root) {
- debugfs_remove(phba->hba_debugfs_root); /* lpfcX */
+ debugfs_remove(phba->hba_debugfs_root); /* fnX */
phba->hba_debugfs_root = NULL;
atomic_dec(&lpfc_debugfs_hba_count);
}
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 03c7313a1012..91b9a9427cda 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2007 Emulex. All rights reserved. *
+ * Copyright (C) 2007-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -22,6 +22,44 @@
#define _H_LPFC_DEBUG_FS
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+
+/* size of output line, for discovery_trace and slow_ring_trace */
+#define LPFC_DEBUG_TRC_ENTRY_SIZE 100
+
+/* nodelist output buffer size */
+#define LPFC_NODELIST_SIZE 8192
+#define LPFC_NODELIST_ENTRY_SIZE 120
+
+/* dumpHBASlim output buffer size */
+#define LPFC_DUMPHBASLIM_SIZE 4096
+
+/* dumpHostSlim output buffer size */
+#define LPFC_DUMPHOSTSLIM_SIZE 4096
+
+/* hbqinfo output buffer size */
+#define LPFC_HBQINFO_SIZE 8192
+
+/* rdPciConf output buffer size */
+#define LPFC_PCI_CFG_SIZE 4096
+#define LPFC_PCI_CFG_RD_BUF_SIZE (LPFC_PCI_CFG_SIZE/2)
+#define LPFC_PCI_CFG_RD_SIZE (LPFC_PCI_CFG_SIZE/4)
+
+/* queue info output buffer size */
+#define LPFC_QUE_INFO_GET_BUF_SIZE 2048
+
+#define SIZE_U8 sizeof(uint8_t)
+#define SIZE_U16 sizeof(uint16_t)
+#define SIZE_U32 sizeof(uint32_t)
+
+struct lpfc_debug {
+ char *i_private;
+ char op;
+#define LPFC_IDIAG_OP_RD 1
+#define LPFC_IDIAG_OP_WR 2
+ char *buffer;
+ int len;
+};
+
struct lpfc_debugfs_trc {
char *fmt;
uint32_t data1;
@@ -30,6 +68,26 @@ struct lpfc_debugfs_trc {
uint32_t seq_cnt;
unsigned long jif;
};
+
+struct lpfc_idiag_offset {
+ uint32_t last_rd;
+};
+
+#define LPFC_IDIAG_CMD_DATA_SIZE 4
+struct lpfc_idiag_cmd {
+ uint32_t opcode;
+#define LPFC_IDIAG_CMD_PCICFG_RD 0x00000001
+#define LPFC_IDIAG_CMD_PCICFG_WR 0x00000002
+#define LPFC_IDIAG_CMD_PCICFG_ST 0x00000003
+#define LPFC_IDIAG_CMD_PCICFG_CL 0x00000004
+ uint32_t data[LPFC_IDIAG_CMD_DATA_SIZE];
+};
+
+struct lpfc_idiag {
+ uint32_t active;
+ struct lpfc_idiag_cmd cmd;
+ struct lpfc_idiag_offset offset;
+};
#endif
/* Mask for discovery_trace */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index c62d567cc845..8e28edf9801e 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2009 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -485,6 +485,59 @@ fail:
}
/**
+ * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @sp: pointer to service parameter data structure.
+ *
+ * This routine is called from FLOGI/FDISC completion handler functions.
+ * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric
+ * node nodename is changed in the completion service parameter else return
+ * 0. This function also set flag in the vport data structure to delay
+ * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit
+ * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric
+ * node nodename is changed in the completion service parameter.
+ *
+ * Return code
+ * 0 - FCID and Fabric Nodename and Fabric portname is not changed.
+ * 1 - FCID or Fabric Nodename or Fabric portname is changed.
+ *
+ **/
+static uint8_t
+lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
+ struct serv_parm *sp)
+{
+ uint8_t fabric_param_changed = 0;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if ((vport->fc_prevDID != vport->fc_myDID) ||
+ memcmp(&vport->fabric_portname, &sp->portName,
+ sizeof(struct lpfc_name)) ||
+ memcmp(&vport->fabric_nodename, &sp->nodeName,
+ sizeof(struct lpfc_name)))
+ fabric_param_changed = 1;
+
+ /*
+ * Word 1 Bit 31 in common service parameter is overloaded.
+ * Word 1 Bit 31 in FLOGI request is multiple NPort request
+ * Word 1 Bit 31 in FLOGI response is clean address bit
+ *
+ * If fabric parameter is changed and clean address bit is
+ * cleared delay nport discovery if
+ * - vport->fc_prevDID != 0 (not initial discovery) OR
+ * - lpfc_delay_discovery module parameter is set.
+ */
+ if (fabric_param_changed && !sp->cmn.clean_address_bit &&
+ (vport->fc_prevDID || lpfc_delay_discovery)) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_DISC_DELAYED;
+ spin_unlock_irq(shost->host_lock);
+ }
+
+ return fabric_param_changed;
+}
+
+
+/**
* lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
* @vport: pointer to a host virtual N_Port data structure.
* @ndlp: pointer to a node-list data structure.
@@ -512,6 +565,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *np;
struct lpfc_nodelist *next_np;
+ uint8_t fabric_param_changed;
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_FABRIC;
@@ -544,6 +598,12 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_class_sup |= FC_COS_CLASS4;
ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
sp->cmn.bbRcvSizeLsb;
+
+ fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
+ memcpy(&vport->fabric_portname, &sp->portName,
+ sizeof(struct lpfc_name));
+ memcpy(&vport->fabric_nodename, &sp->nodeName,
+ sizeof(struct lpfc_name));
memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
@@ -565,7 +625,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
}
- if ((vport->fc_prevDID != vport->fc_myDID) &&
+ if (fabric_param_changed &&
!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
/* If our NportID changed, we need to ensure all
@@ -2203,6 +2263,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
struct lpfc_sli *psli;
+ struct lpfcMboxq *mbox;
psli = &phba->sli;
/* we pass cmdiocb to state machine which needs rspiocb as well */
@@ -2260,6 +2321,21 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
NLP_EVT_CMPL_LOGO);
out:
lpfc_els_free_iocb(phba, cmdiocb);
+ /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */
+ if ((vport->fc_flag & FC_PT2PT) &&
+ !(vport->fc_flag & FC_PT2PT_PLOGI)) {
+ phba->pport->fc_myDID = 0;
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mbox) {
+ lpfc_config_link(phba, mbox);
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->vport = vport;
+ if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
+ MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
+ }
+ }
return;
}
@@ -2745,7 +2821,8 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
}
break;
case ELS_CMD_FDISC:
- lpfc_issue_els_fdisc(vport, ndlp, retry);
+ if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI))
+ lpfc_issue_els_fdisc(vport, ndlp, retry);
break;
}
return;
@@ -2815,9 +2892,17 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
switch (irsp->ulpStatus) {
case IOSTAT_FCP_RSP_ERROR:
+ break;
case IOSTAT_REMOTE_STOP:
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ /* This IO was aborted by the target, we don't
+ * know the rxid and because we did not send the
+ * ABTS we cannot generate and RRQ.
+ */
+ lpfc_set_rrq_active(phba, ndlp,
+ cmdiocb->sli4_xritag, 0, 0);
+ }
break;
-
case IOSTAT_LOCAL_REJECT:
switch ((irsp->un.ulpWord[4] & 0xff)) {
case IOERR_LOOP_OPEN_FAILURE:
@@ -4013,28 +4098,34 @@ lpfc_els_clear_rrq(struct lpfc_vport *vport,
uint8_t *pcmd;
struct RRQ *rrq;
uint16_t rxid;
+ uint16_t xri;
struct lpfc_node_rrq *prrq;
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt);
pcmd += sizeof(uint32_t);
rrq = (struct RRQ *)pcmd;
- rxid = bf_get(rrq_oxid, rrq);
+ rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg);
+ rxid = be16_to_cpu(bf_get(rrq_rxid, rrq));
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
" x%x x%x\n",
- bf_get(rrq_did, rrq),
- bf_get(rrq_oxid, rrq),
+ be32_to_cpu(bf_get(rrq_did, rrq)),
+ be16_to_cpu(bf_get(rrq_oxid, rrq)),
rxid,
iocb->iotag, iocb->iocb.ulpContext);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
"Clear RRQ: did:x%x flg:x%x exchg:x%.08x",
ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg);
- prrq = lpfc_get_active_rrq(vport, rxid, ndlp->nlp_DID);
+ if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq)))
+ xri = be16_to_cpu(bf_get(rrq_oxid, rrq));
+ else
+ xri = rxid;
+ prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID);
if (prrq)
- lpfc_clr_rrq_active(phba, rxid, prrq);
+ lpfc_clr_rrq_active(phba, xri, prrq);
return;
}
@@ -6166,6 +6257,11 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (vport->load_flag & FC_UNLOADING)
goto dropit;
+ /* If NPort discovery is delayed drop incoming ELS */
+ if ((vport->fc_flag & FC_DISC_DELAYED) &&
+ (cmd != ELS_CMD_PLOGI))
+ goto dropit;
+
ndlp = lpfc_findnode_did(vport, did);
if (!ndlp) {
/* Cannot find existing Fabric ndlp, so allocate a new one */
@@ -6218,6 +6314,12 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
lpfc_send_els_event(vport, ndlp, payload);
+
+ /* If Nport discovery is delayed, reject PLOGIs */
+ if (vport->fc_flag & FC_DISC_DELAYED) {
+ rjt_err = LSRJT_UNABLE_TPC;
+ break;
+ }
if (vport->port_state < LPFC_DISC_AUTH) {
if (!(phba->pport->fc_flag & FC_PT2PT) ||
(phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -6596,6 +6698,21 @@ void
lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp, *ndlp_fdmi;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ /*
+ * If lpfc_delay_discovery parameter is set and the clean address
+ * bit is cleared and fc fabric parameters chenged, delay FC NPort
+ * discovery.
+ */
+ spin_lock_irq(shost->host_lock);
+ if (vport->fc_flag & FC_DISC_DELAYED) {
+ spin_unlock_irq(shost->host_lock);
+ mod_timer(&vport->delayed_disc_tmo,
+ jiffies + HZ * phba->fc_ratov);
+ return;
+ }
+ spin_unlock_irq(shost->host_lock);
ndlp = lpfc_findnode_did(vport, NameServer_DID);
if (!ndlp) {
@@ -6938,6 +7055,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *next_np;
IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_iocbq *piocb;
+ struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
+ struct serv_parm *sp;
+ uint8_t fabric_param_changed;
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0123 FDISC completes. x%x/x%x prevDID: x%x\n",
@@ -6981,7 +7101,14 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
- if ((vport->fc_prevDID != vport->fc_myDID) &&
+ prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
+ sp = prsp->virt + sizeof(uint32_t);
+ fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
+ memcpy(&vport->fabric_portname, &sp->portName,
+ sizeof(struct lpfc_name));
+ memcpy(&vport->fabric_nodename, &sp->nodeName,
+ sizeof(struct lpfc_name));
+ if (fabric_param_changed &&
!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
/* If our NportID changed, we need to ensure all
* remaining NPORTs get unreg_login'ed so we can
@@ -7582,6 +7709,32 @@ void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport
+ * @vport: pointer to lpfc vport data structure.
+ *
+ * This routine is invoked by the vport cleanup for deletions and the cleanup
+ * for an ndlp on removal.
+ **/
+void
+lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
+ unsigned long iflag = 0;
+
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
+ list_for_each_entry_safe(sglq_entry, sglq_next,
+ &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
+ if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport)
+ sglq_entry->ndlp = NULL;
+ }
+ spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return;
+}
+
+/**
* lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
* @phba: pointer to lpfc hba data structure.
* @axri: pointer to the els xri abort wcqe structure.
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index bb015960dbc9..154c715fb3af 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -658,6 +658,8 @@ lpfc_work_done(struct lpfc_hba *phba)
lpfc_ramp_down_queue_handler(phba);
if (work_port_events & WORKER_RAMP_UP_QUEUE)
lpfc_ramp_up_queue_handler(phba);
+ if (work_port_events & WORKER_DELAYED_DISC_TMO)
+ lpfc_delayed_disc_timeout_handler(vport);
}
lpfc_destroy_vport_work_array(phba, vports);
@@ -838,6 +840,11 @@ lpfc_linkdown_port(struct lpfc_vport *vport)
lpfc_port_link_failure(vport);
+ /* Stop delayed Nport discovery */
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_DISC_DELAYED;
+ spin_unlock_irq(shost->host_lock);
+ del_timer_sync(&vport->delayed_disc_tmo);
}
int
@@ -3160,7 +3167,7 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
spin_unlock_irq(shost->host_lock);
vport->unreg_vpi_cmpl = VPORT_OK;
mempool_free(pmb, phba->mbox_mem_pool);
- lpfc_cleanup_vports_rrqs(vport);
+ lpfc_cleanup_vports_rrqs(vport, NULL);
/*
* This shost reference might have been taken at the beginning of
* lpfc_vport_delete()
@@ -3900,6 +3907,8 @@ lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
return;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
+ if (vport->phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_cleanup_vports_rrqs(vport, ndlp);
lpfc_nlp_put(ndlp);
return;
}
@@ -4289,7 +4298,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
list_del_init(&ndlp->els_retry_evt.evt_listp);
list_del_init(&ndlp->dev_loss_evt.evt_listp);
-
+ lpfc_cleanup_vports_rrqs(vport, ndlp);
lpfc_unreg_rpi(vport, ndlp);
return 0;
@@ -4426,10 +4435,11 @@ lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp;
+ unsigned long iflags;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(shost->host_lock, iflags);
ndlp = __lpfc_findnode_did(vport, did);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(shost->host_lock, iflags);
return ndlp;
}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 96ed3ba6ba95..94ae37c5111a 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -341,6 +341,12 @@ struct csp {
uint8_t bbCreditMsb;
uint8_t bbCreditlsb; /* FC Word 0, byte 3 */
+/*
+ * Word 1 Bit 31 in common service parameter is overloaded.
+ * Word 1 Bit 31 in FLOGI request is multiple NPort request
+ * Word 1 Bit 31 in FLOGI response is clean address bit
+ */
+#define clean_address_bit request_multiple_Nport /* Word 1, bit 31 */
#ifdef __BIG_ENDIAN_BITFIELD
uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */
uint16_t randomOffset:1; /* FC Word 1, bit 30 */
@@ -3198,7 +3204,10 @@ typedef struct {
#define IOERR_SLER_RRQ_RJT_ERR 0x4C
#define IOERR_SLER_RRQ_RETRY_ERR 0x4D
#define IOERR_SLER_ABTS_ERR 0x4E
-
+#define IOERR_ELXSEC_KEY_UNWRAP_ERROR 0xF0
+#define IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR 0xF1
+#define IOERR_ELXSEC_CRYPTO_ERROR 0xF2
+#define IOERR_ELXSEC_CRYPTO_COMPARE_ERROR 0xF3
#define IOERR_DRVR_MASK 0x100
#define IOERR_SLI_DOWN 0x101 /* ulpStatus - Driver defined */
#define IOERR_SLI_BRESET 0x102
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 94c1aa1136de..c7178d60c7bf 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -778,6 +778,7 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A
#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
+#define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS 0xB5
/* FCoE Opcodes */
#define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01
@@ -1852,6 +1853,9 @@ struct lpfc_mbx_request_features {
#define lpfc_mbx_rq_ftr_rq_ifip_SHIFT 7
#define lpfc_mbx_rq_ftr_rq_ifip_MASK 0x00000001
#define lpfc_mbx_rq_ftr_rq_ifip_WORD word2
+#define lpfc_mbx_rq_ftr_rq_perfh_SHIFT 11
+#define lpfc_mbx_rq_ftr_rq_perfh_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rq_perfh_WORD word2
uint32_t word3;
#define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT 0
#define lpfc_mbx_rq_ftr_rsp_iaab_MASK 0x00000001
@@ -1877,6 +1881,9 @@ struct lpfc_mbx_request_features {
#define lpfc_mbx_rq_ftr_rsp_ifip_SHIFT 7
#define lpfc_mbx_rq_ftr_rsp_ifip_MASK 0x00000001
#define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3
+#define lpfc_mbx_rq_ftr_rsp_perfh_SHIFT 11
+#define lpfc_mbx_rq_ftr_rsp_perfh_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rsp_perfh_WORD word3
};
struct lpfc_mbx_supp_pages {
@@ -1935,7 +1942,7 @@ struct lpfc_mbx_supp_pages {
#define LPFC_SLI4_PARAMETERS 2
};
-struct lpfc_mbx_sli4_params {
+struct lpfc_mbx_pc_sli4_params {
uint32_t word1;
#define qs_SHIFT 0
#define qs_MASK 0x00000001
@@ -2051,6 +2058,88 @@ struct lpfc_mbx_sli4_params {
uint32_t rsvd_13_63[51];
};
+struct lpfc_sli4_parameters {
+ uint32_t word0;
+#define cfg_prot_type_SHIFT 0
+#define cfg_prot_type_MASK 0x000000FF
+#define cfg_prot_type_WORD word0
+ uint32_t word1;
+#define cfg_ft_SHIFT 0
+#define cfg_ft_MASK 0x00000001
+#define cfg_ft_WORD word1
+#define cfg_sli_rev_SHIFT 4
+#define cfg_sli_rev_MASK 0x0000000f
+#define cfg_sli_rev_WORD word1
+#define cfg_sli_family_SHIFT 8
+#define cfg_sli_family_MASK 0x0000000f
+#define cfg_sli_family_WORD word1
+#define cfg_if_type_SHIFT 12
+#define cfg_if_type_MASK 0x0000000f
+#define cfg_if_type_WORD word1
+#define cfg_sli_hint_1_SHIFT 16
+#define cfg_sli_hint_1_MASK 0x000000ff
+#define cfg_sli_hint_1_WORD word1
+#define cfg_sli_hint_2_SHIFT 24
+#define cfg_sli_hint_2_MASK 0x0000001f
+#define cfg_sli_hint_2_WORD word1
+ uint32_t word2;
+ uint32_t word3;
+ uint32_t word4;
+#define cfg_cqv_SHIFT 14
+#define cfg_cqv_MASK 0x00000003
+#define cfg_cqv_WORD word4
+ uint32_t word5;
+ uint32_t word6;
+#define cfg_mqv_SHIFT 14
+#define cfg_mqv_MASK 0x00000003
+#define cfg_mqv_WORD word6
+ uint32_t word7;
+ uint32_t word8;
+#define cfg_wqv_SHIFT 14
+#define cfg_wqv_MASK 0x00000003
+#define cfg_wqv_WORD word8
+ uint32_t word9;
+ uint32_t word10;
+#define cfg_rqv_SHIFT 14
+#define cfg_rqv_MASK 0x00000003
+#define cfg_rqv_WORD word10
+ uint32_t word11;
+#define cfg_rq_db_window_SHIFT 28
+#define cfg_rq_db_window_MASK 0x0000000f
+#define cfg_rq_db_window_WORD word11
+ uint32_t word12;
+#define cfg_fcoe_SHIFT 0
+#define cfg_fcoe_MASK 0x00000001
+#define cfg_fcoe_WORD word12
+#define cfg_phwq_SHIFT 15
+#define cfg_phwq_MASK 0x00000001
+#define cfg_phwq_WORD word12
+#define cfg_loopbk_scope_SHIFT 28
+#define cfg_loopbk_scope_MASK 0x0000000f
+#define cfg_loopbk_scope_WORD word12
+ uint32_t sge_supp_len;
+ uint32_t word14;
+#define cfg_sgl_page_cnt_SHIFT 0
+#define cfg_sgl_page_cnt_MASK 0x0000000f
+#define cfg_sgl_page_cnt_WORD word14
+#define cfg_sgl_page_size_SHIFT 8
+#define cfg_sgl_page_size_MASK 0x000000ff
+#define cfg_sgl_page_size_WORD word14
+#define cfg_sgl_pp_align_SHIFT 16
+#define cfg_sgl_pp_align_MASK 0x000000ff
+#define cfg_sgl_pp_align_WORD word14
+ uint32_t word15;
+ uint32_t word16;
+ uint32_t word17;
+ uint32_t word18;
+ uint32_t word19;
+};
+
+struct lpfc_mbx_get_sli4_parameters {
+ struct mbox_header header;
+ struct lpfc_sli4_parameters sli4_parameters;
+};
+
/* Mailbox Completion Queue Error Messages */
#define MB_CQE_STATUS_SUCCESS 0x0
#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1
@@ -2103,7 +2192,8 @@ struct lpfc_mqe {
struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
struct lpfc_mbx_query_fw_cfg query_fw_cfg;
struct lpfc_mbx_supp_pages supp_pages;
- struct lpfc_mbx_sli4_params sli4_params;
+ struct lpfc_mbx_pc_sli4_params sli4_params;
+ struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
struct lpfc_mbx_nop nop;
} un;
};
@@ -2381,6 +2471,10 @@ struct wqe_common {
#define wqe_wqes_SHIFT 15
#define wqe_wqes_MASK 0x00000001
#define wqe_wqes_WORD word10
+/* Note that this field overlaps above fields */
+#define wqe_wqid_SHIFT 1
+#define wqe_wqid_MASK 0x0000007f
+#define wqe_wqid_WORD word10
#define wqe_pri_SHIFT 16
#define wqe_pri_MASK 0x00000007
#define wqe_pri_WORD word10
@@ -2599,7 +2693,8 @@ struct fcp_iwrite64_wqe {
uint32_t total_xfer_len;
uint32_t initial_xfer_len;
struct wqe_common wqe_com; /* words 6-11 */
- uint32_t rsvd_12_15[4]; /* word 12-15 */
+ uint32_t rsrvd12;
+ struct ulp_bde64 ph_bde; /* words 13-15 */
};
struct fcp_iread64_wqe {
@@ -2608,7 +2703,8 @@ struct fcp_iread64_wqe {
uint32_t total_xfer_len; /* word 4 */
uint32_t rsrvd5; /* word 5 */
struct wqe_common wqe_com; /* words 6-11 */
- uint32_t rsvd_12_15[4]; /* word 12-15 */
+ uint32_t rsrvd12;
+ struct ulp_bde64 ph_bde; /* words 13-15 */
};
struct fcp_icmnd64_wqe {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 6d0b36aa3389..35665cfb5689 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -460,7 +460,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
|| ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G)
&& !(phba->lmt & LMT_16Gb))) {
/* Reset link speed to auto */
- lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1302 Invalid speed for this board: "
"Reset link speed to auto: x%x\n",
phba->cfg_link_speed);
@@ -945,17 +945,13 @@ static void
lpfc_rrq_timeout(unsigned long ptr)
{
struct lpfc_hba *phba;
- uint32_t tmo_posted;
unsigned long iflag;
phba = (struct lpfc_hba *)ptr;
spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
- tmo_posted = phba->hba_flag & HBA_RRQ_ACTIVE;
- if (!tmo_posted)
- phba->hba_flag |= HBA_RRQ_ACTIVE;
+ phba->hba_flag |= HBA_RRQ_ACTIVE;
spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
- if (!tmo_posted)
- lpfc_worker_wake_up(phba);
+ lpfc_worker_wake_up(phba);
}
/**
@@ -2280,6 +2276,7 @@ lpfc_cleanup(struct lpfc_vport *vport)
/* Wait for any activity on ndlps to settle */
msleep(10);
}
+ lpfc_cleanup_vports_rrqs(vport, NULL);
}
/**
@@ -2295,6 +2292,7 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
{
del_timer_sync(&vport->els_tmofunc);
del_timer_sync(&vport->fc_fdmitmo);
+ del_timer_sync(&vport->delayed_disc_tmo);
lpfc_can_disctmo(vport);
return;
}
@@ -2355,6 +2353,10 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba)
del_timer_sync(&phba->fabric_block_timer);
del_timer_sync(&phba->eratt_poll);
del_timer_sync(&phba->hb_tmofunc);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ del_timer_sync(&phba->rrq_tmr);
+ phba->hba_flag &= ~HBA_RRQ_ACTIVE;
+ }
phba->hb_outstanding = 0;
switch (phba->pci_dev_grp) {
@@ -2732,6 +2734,11 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
init_timer(&vport->els_tmofunc);
vport->els_tmofunc.function = lpfc_els_timeout;
vport->els_tmofunc.data = (unsigned long)vport;
+
+ init_timer(&vport->delayed_disc_tmo);
+ vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo;
+ vport->delayed_disc_tmo.data = (unsigned long)vport;
+
error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
if (error)
goto out_put_shost;
@@ -4283,36 +4290,37 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
goto out_free_bsmbx;
}
- /* Get the Supported Pages. It is always available. */
+ /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
lpfc_supported_pages(mboxq);
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
- if (unlikely(rc)) {
- rc = -EIO;
- mempool_free(mboxq, phba->mbox_mem_pool);
- goto out_free_bsmbx;
- }
-
- mqe = &mboxq->u.mqe;
- memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
- LPFC_MAX_SUPPORTED_PAGES);
- for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
- switch (pn_page[i]) {
- case LPFC_SLI4_PARAMETERS:
- phba->sli4_hba.pc_sli4_params.supported = 1;
- break;
- default:
- break;
+ if (!rc) {
+ mqe = &mboxq->u.mqe;
+ memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
+ LPFC_MAX_SUPPORTED_PAGES);
+ for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
+ switch (pn_page[i]) {
+ case LPFC_SLI4_PARAMETERS:
+ phba->sli4_hba.pc_sli4_params.supported = 1;
+ break;
+ default:
+ break;
+ }
+ }
+ /* Read the port's SLI4 Parameters capabilities if supported. */
+ if (phba->sli4_hba.pc_sli4_params.supported)
+ rc = lpfc_pc_sli4_params_get(phba, mboxq);
+ if (rc) {
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ rc = -EIO;
+ goto out_free_bsmbx;
}
}
-
- /* Read the port's SLI4 Parameters capabilities if supported. */
- if (phba->sli4_hba.pc_sli4_params.supported)
- rc = lpfc_pc_sli4_params_get(phba, mboxq);
+ /*
+ * Get sli4 parameters that override parameters from Port capabilities.
+ * If this call fails it is not a critical error so continue loading.
+ */
+ lpfc_get_sli4_parameters(phba, mboxq);
mempool_free(mboxq, phba->mbox_mem_pool);
- if (rc) {
- rc = -EIO;
- goto out_free_bsmbx;
- }
/* Create all the SLI4 queues */
rc = lpfc_sli4_queue_create(phba);
if (rc)
@@ -7810,7 +7818,7 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
mqe = &mboxq->u.mqe;
/* Read the port's SLI4 Parameters port capabilities */
- lpfc_sli4_params(mboxq);
+ lpfc_pc_sli4_params(mboxq);
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
else {
@@ -7854,6 +7862,66 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
}
/**
+ * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
+ * @phba: Pointer to HBA context object.
+ * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
+ *
+ * This function is called in the SLI4 code path to read the port's
+ * sli4 capabilities.
+ *
+ * This function may be be called from any context that can block-wait
+ * for the completion. The expectation is that this routine is called
+ * typically from probe_one or from the online routine.
+ **/
+int
+lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ int rc;
+ struct lpfc_mqe *mqe = &mboxq->u.mqe;
+ struct lpfc_pc_sli4_params *sli4_params;
+ int length;
+ struct lpfc_sli4_parameters *mbx_sli4_parameters;
+
+ /* Read the port's SLI4 Config Parameters */
+ length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
+ length, LPFC_SLI4_MBX_EMBED);
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ else
+ rc = lpfc_sli_issue_mbox_wait(phba, mboxq,
+ lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG));
+ if (unlikely(rc))
+ return rc;
+ sli4_params = &phba->sli4_hba.pc_sli4_params;
+ mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
+ sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
+ sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
+ sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
+ sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
+ mbx_sli4_parameters);
+ sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
+ mbx_sli4_parameters);
+ if (bf_get(cfg_phwq, mbx_sli4_parameters))
+ phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
+ else
+ phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
+ sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
+ sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
+ sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
+ sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
+ sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
+ sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
+ sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
+ mbx_sli4_parameters);
+ sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
+ mbx_sli4_parameters);
+ return 0;
+}
+
+/**
* lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
* @pdev: pointer to PCI device
* @pid: pointer to PCI device identifier
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 23403c650207..dba32dfdb59b 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1263,7 +1263,8 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
if (phba->cfg_enable_bg)
mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
- mb->un.varCfgPort.cdss = 1; /* Configure Security */
+ if (phba->cfg_enable_dss)
+ mb->un.varCfgPort.cdss = 1; /* Configure Security */
mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
@@ -1692,7 +1693,7 @@ lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
* @mbox: pointer to lpfc mbox command.
* @subsystem: The sli4 config sub mailbox subsystem.
* @opcode: The sli4 config sub mailbox command opcode.
- * @length: Length of the sli4 config mailbox command.
+ * @length: Length of the sli4 config mailbox command (including sub-header).
*
* This routine sets up the header fields of SLI4 specific mailbox command
* for sending IOCTL command.
@@ -1723,14 +1724,14 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
if (emb) {
/* Set up main header fields */
bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1);
- sli4_config->header.cfg_mhdr.payload_length =
- LPFC_MBX_CMD_HDR_LENGTH + length;
+ sli4_config->header.cfg_mhdr.payload_length = length;
/* Set up sub-header fields following main header */
bf_set(lpfc_mbox_hdr_opcode,
&sli4_config->header.cfg_shdr.request, opcode);
bf_set(lpfc_mbox_hdr_subsystem,
&sli4_config->header.cfg_shdr.request, subsystem);
- sli4_config->header.cfg_shdr.request.request_length = length;
+ sli4_config->header.cfg_shdr.request.request_length =
+ length - LPFC_MBX_CMD_HDR_LENGTH;
return length;
}
@@ -1902,6 +1903,7 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
/* Set up host requested features. */
bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
+ bf_set(lpfc_mbx_rq_ftr_rq_perfh, &mboxq->u.mqe.un.req_ftrs, 1);
/* Enable DIF (block guard) only if configured to do so. */
if (phba->cfg_enable_bg)
@@ -2159,17 +2161,16 @@ lpfc_supported_pages(struct lpfcMboxq *mbox)
}
/**
- * lpfc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params
- * mailbox command.
+ * lpfc_pc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params mbox cmd.
* @mbox: pointer to lpfc mbox command to initialize.
*
* The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to
* retrieve the particular SLI4 features supported by the port.
**/
void
-lpfc_sli4_params(struct lpfcMboxq *mbox)
+lpfc_pc_sli4_params(struct lpfcMboxq *mbox)
{
- struct lpfc_mbx_sli4_params *sli4_params;
+ struct lpfc_mbx_pc_sli4_params *sli4_params;
memset(mbox, 0, sizeof(*mbox));
sli4_params = &mbox->u.mqe.un.sli4_params;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index d85a7423a694..52b35159fc35 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -350,7 +350,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_maxframe =
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
- /* no need to reg_login if we are already in one of these states */
+ /*
+ * Need to unreg_login if we are already in one of these states and
+ * change to NPR state. This will block the port until after the ACC
+ * completes and the reg_login is issued and completed.
+ */
switch (ndlp->nlp_state) {
case NLP_STE_NPR_NODE:
if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
@@ -359,8 +363,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
case NLP_STE_PRLI_ISSUE:
case NLP_STE_UNMAPPED_NODE:
case NLP_STE_MAPPED_NODE:
- lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
- return 1;
+ lpfc_unreg_rpi(vport, ndlp);
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
}
if ((vport->fc_flag & FC_PT2PT) &&
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index c97751c95d77..bf34178b80bf 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -609,6 +609,32 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
}
/**
+ * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
+ * @vport: pointer to lpfc vport data structure.
+ *
+ * This routine is invoked by the vport cleanup for deletions and the cleanup
+ * for an ndlp on removal.
+ **/
+void
+lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_scsi_buf *psb, *next_psb;
+ unsigned long iflag = 0;
+
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+ list_for_each_entry_safe(psb, next_psb,
+ &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
+ if (psb->rdata && psb->rdata->pnode
+ && psb->rdata->pnode->vport == vport)
+ psb->rdata = NULL;
+ }
+ spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+}
+
+/**
* lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
* @phba: pointer to lpfc hba data structure.
* @axri: pointer to the fcp xri abort wcqe structure.
@@ -640,7 +666,11 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
psb->status = IOSTAT_SUCCESS;
spin_unlock(
&phba->sli4_hba.abts_scsi_buf_list_lock);
- ndlp = psb->rdata->pnode;
+ if (psb->rdata && psb->rdata->pnode)
+ ndlp = psb->rdata->pnode;
+ else
+ ndlp = NULL;
+
rrq_empty = list_empty(&phba->active_rrq_list);
spin_unlock_irqrestore(&phba->hbalock, iflag);
if (ndlp)
@@ -964,36 +994,29 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
static struct lpfc_scsi_buf*
lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
- struct lpfc_scsi_buf *lpfc_cmd = NULL;
- struct lpfc_scsi_buf *start_lpfc_cmd = NULL;
- struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
+ struct lpfc_scsi_buf *lpfc_cmd ;
unsigned long iflag = 0;
int found = 0;
spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
- list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
- spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
- while (!found && lpfc_cmd) {
+ list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list,
+ list) {
if (lpfc_test_rrq_active(phba, ndlp,
- lpfc_cmd->cur_iocbq.sli4_xritag)) {
- lpfc_release_scsi_buf_s4(phba, lpfc_cmd);
- spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
- list_remove_head(scsi_buf_list, lpfc_cmd,
- struct lpfc_scsi_buf, list);
- spin_unlock_irqrestore(&phba->scsi_buf_list_lock,
- iflag);
- if (lpfc_cmd == start_lpfc_cmd) {
- lpfc_cmd = NULL;
- break;
- } else
- continue;
- }
+ lpfc_cmd->cur_iocbq.sli4_xritag))
+ continue;
+ list_del(&lpfc_cmd->list);
found = 1;
lpfc_cmd->seg_cnt = 0;
lpfc_cmd->nonsg_phys = 0;
lpfc_cmd->prot_seg_cnt = 0;
+ break;
}
- return lpfc_cmd;
+ spin_unlock_irqrestore(&phba->scsi_buf_list_lock,
+ iflag);
+ if (!found)
+ return NULL;
+ else
+ return lpfc_cmd;
}
/**
* lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
@@ -1981,12 +2004,14 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
struct scatterlist *sgel = NULL;
struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+ struct sli4_sge *first_data_sgl;
IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
dma_addr_t physaddr;
uint32_t num_bde = 0;
uint32_t dma_len;
uint32_t dma_offset = 0;
int nseg;
+ struct ulp_bde64 *bde;
/*
* There are three possibilities here - use scatter-gather segment, use
@@ -2011,7 +2036,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
bf_set(lpfc_sli4_sge_last, sgl, 0);
sgl->word2 = cpu_to_le32(sgl->word2);
sgl += 1;
-
+ first_data_sgl = sgl;
lpfc_cmd->seg_cnt = nseg;
if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
@@ -2047,6 +2072,17 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
dma_offset += dma_len;
sgl++;
}
+ /* setup the performance hint (first data BDE) if enabled */
+ if (phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) {
+ bde = (struct ulp_bde64 *)
+ &(iocb_cmd->unsli3.sli3Words[5]);
+ bde->addrLow = first_data_sgl->addr_lo;
+ bde->addrHigh = first_data_sgl->addr_hi;
+ bde->tus.f.bdeSize =
+ le32_to_cpu(first_data_sgl->sge_len);
+ bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ bde->tus.w = cpu_to_le32(bde->tus.w);
+ }
} else {
sgl += 1;
/* clear the last flag in the fcp_rsp map entry */
@@ -2471,6 +2507,16 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_worker_wake_up(phba);
break;
case IOSTAT_LOCAL_REJECT:
+ case IOSTAT_REMOTE_STOP:
+ if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
+ lpfc_cmd->result ==
+ IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
+ lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
+ lpfc_cmd->result ==
+ IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
+ cmd->result = ScsiResult(DID_NO_CONNECT, 0);
+ break;
+ }
if (lpfc_cmd->result == IOERR_INVALID_RPI ||
lpfc_cmd->result == IOERR_NO_RESOURCES ||
lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
@@ -2478,7 +2524,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
cmd->result = ScsiResult(DID_REQUEUE, 0);
break;
}
-
if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
@@ -2497,7 +2542,17 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
"on unprotected cmd\n");
}
}
-
+ if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
+ && (phba->sli_rev == LPFC_SLI_REV4)
+ && (pnode && NLP_CHK_NODE_ACT(pnode))) {
+ /* This IO was aborted by the target, we don't
+ * know the rxid and because we did not send the
+ * ABTS we cannot generate and RRQ.
+ */
+ lpfc_set_rrq_active(phba, pnode,
+ lpfc_cmd->cur_iocbq.sli4_xritag,
+ 0, 0);
+ }
/* else: fall through */
default:
cmd->result = ScsiResult(DID_ERROR, 0);
@@ -2508,9 +2563,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
|| (pnode->nlp_state != NLP_STE_MAPPED_NODE))
cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
SAM_STAT_BUSY);
- } else {
+ } else
cmd->result = ScsiResult(DID_OK, 0);
- }
if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
uint32_t *lp = (uint32_t *)cmd->sense_buffer;
@@ -3004,11 +3058,11 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
* transport is still transitioning.
*/
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
- cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
+ cmnd->result = ScsiResult(DID_IMM_RETRY, 0);
goto out_fail_command;
}
if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
- goto out_host_busy;
+ goto out_tgt_busy;
lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
if (lpfc_cmd == NULL) {
@@ -3125,6 +3179,9 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
out_host_busy:
return SCSI_MLQUEUE_HOST_BUSY;
+ out_tgt_busy:
+ return SCSI_MLQUEUE_TARGET_BUSY;
+
out_fail_command:
done(cmnd);
return 0;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index a359d2b873ce..2ee0374a9908 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -96,7 +96,8 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
/* set consumption flag every once in a while */
if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
-
+ if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
+ bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
/* Update the host index before invoking device */
@@ -534,15 +535,35 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
uint16_t adj_xri;
struct lpfc_node_rrq *rrq;
int empty;
+ uint32_t did = 0;
+
+
+ if (!ndlp)
+ return -EINVAL;
+
+ if (!phba->cfg_enable_rrq)
+ return -EINVAL;
+
+ if (phba->pport->load_flag & FC_UNLOADING) {
+ phba->hba_flag &= ~HBA_RRQ_ACTIVE;
+ goto out;
+ }
+ did = ndlp->nlp_DID;
/*
* set the active bit even if there is no mem available.
*/
adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
- if (!ndlp)
- return -EINVAL;
+
+ if (NLP_CHK_FREE_REQ(ndlp))
+ goto out;
+
+ if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
+ goto out;
+
if (test_and_set_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
- return -EINVAL;
+ goto out;
+
rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
if (rrq) {
rrq->send_rrq = send_rrq;
@@ -553,14 +574,7 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
rrq->vport = ndlp->vport;
rrq->rxid = rxid;
empty = list_empty(&phba->active_rrq_list);
- if (phba->cfg_enable_rrq && send_rrq)
- /*
- * We need the xri before we can add this to the
- * phba active rrq list.
- */
- rrq->send_rrq = send_rrq;
- else
- rrq->send_rrq = 0;
+ rrq->send_rrq = send_rrq;
list_add_tail(&rrq->list, &phba->active_rrq_list);
if (!(phba->hba_flag & HBA_RRQ_ACTIVE)) {
phba->hba_flag |= HBA_RRQ_ACTIVE;
@@ -569,40 +583,49 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
}
return 0;
}
- return -ENOMEM;
+out:
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2921 Can't set rrq active xri:0x%x rxid:0x%x"
+ " DID:0x%x Send:%d\n",
+ xritag, rxid, did, send_rrq);
+ return -EINVAL;
}
/**
- * __lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
+ * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
* @phba: Pointer to HBA context object.
* @xritag: xri used in this exchange.
* @rrq: The RRQ to be cleared.
*
- * This function is called with hbalock held. This function
**/
-static void
-__lpfc_clr_rrq_active(struct lpfc_hba *phba,
- uint16_t xritag,
- struct lpfc_node_rrq *rrq)
+void
+lpfc_clr_rrq_active(struct lpfc_hba *phba,
+ uint16_t xritag,
+ struct lpfc_node_rrq *rrq)
{
uint16_t adj_xri;
- struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp = NULL;
- ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
+ if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
+ ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
/* The target DID could have been swapped (cable swap)
* we should use the ndlp from the findnode if it is
* available.
*/
- if (!ndlp)
+ if ((!ndlp) && rrq->ndlp)
ndlp = rrq->ndlp;
+ if (!ndlp)
+ goto out;
+
adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
if (test_and_clear_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) {
rrq->send_rrq = 0;
rrq->xritag = 0;
rrq->rrq_stop_time = 0;
}
+out:
mempool_free(rrq, phba->rrq_pool);
}
@@ -627,34 +650,34 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)
struct lpfc_node_rrq *nextrrq;
unsigned long next_time;
unsigned long iflags;
+ LIST_HEAD(send_rrq);
spin_lock_irqsave(&phba->hbalock, iflags);
phba->hba_flag &= ~HBA_RRQ_ACTIVE;
next_time = jiffies + HZ * (phba->fc_ratov + 1);
list_for_each_entry_safe(rrq, nextrrq,
- &phba->active_rrq_list, list) {
- if (time_after(jiffies, rrq->rrq_stop_time)) {
- list_del(&rrq->list);
- if (!rrq->send_rrq)
- /* this call will free the rrq */
- __lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
- else {
- /* if we send the rrq then the completion handler
- * will clear the bit in the xribitmap.
- */
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- if (lpfc_send_rrq(phba, rrq)) {
- lpfc_clr_rrq_active(phba, rrq->xritag,
- rrq);
- }
- spin_lock_irqsave(&phba->hbalock, iflags);
- }
- } else if (time_before(rrq->rrq_stop_time, next_time))
+ &phba->active_rrq_list, list) {
+ if (time_after(jiffies, rrq->rrq_stop_time))
+ list_move(&rrq->list, &send_rrq);
+ else if (time_before(rrq->rrq_stop_time, next_time))
next_time = rrq->rrq_stop_time;
}
spin_unlock_irqrestore(&phba->hbalock, iflags);
if (!list_empty(&phba->active_rrq_list))
mod_timer(&phba->rrq_tmr, next_time);
+ list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
+ list_del(&rrq->list);
+ if (!rrq->send_rrq)
+ /* this call will free the rrq */
+ lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+ else if (lpfc_send_rrq(phba, rrq)) {
+ /* if we send the rrq then the completion handler
+ * will clear the bit in the xribitmap.
+ */
+ lpfc_clr_rrq_active(phba, rrq->xritag,
+ rrq);
+ }
+ }
}
/**
@@ -692,29 +715,37 @@ lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
/**
* lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
* @vport: Pointer to vport context object.
- *
- * Remove all active RRQs for this vport from the phba->active_rrq_list and
- * clear the rrq.
+ * @ndlp: Pointer to the lpfc_node_list structure.
+ * If ndlp is NULL Remove all active RRQs for this vport from the
+ * phba->active_rrq_list and clear the rrq.
+ * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
**/
void
-lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport)
+lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_node_rrq *rrq;
struct lpfc_node_rrq *nextrrq;
unsigned long iflags;
+ LIST_HEAD(rrq_list);
if (phba->sli_rev != LPFC_SLI_REV4)
return;
- spin_lock_irqsave(&phba->hbalock, iflags);
- list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
- if (rrq->vport == vport) {
- list_del(&rrq->list);
- __lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
- }
+ if (!ndlp) {
+ lpfc_sli4_vport_delete_els_xri_aborted(vport);
+ lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
}
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
+ if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
+ list_move(&rrq->list, &rrq_list);
spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
+ list_del(&rrq->list);
+ lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+ }
}
/**
@@ -732,24 +763,27 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
struct lpfc_node_rrq *nextrrq;
unsigned long next_time;
unsigned long iflags;
+ LIST_HEAD(rrq_list);
if (phba->sli_rev != LPFC_SLI_REV4)
return;
spin_lock_irqsave(&phba->hbalock, iflags);
phba->hba_flag &= ~HBA_RRQ_ACTIVE;
next_time = jiffies + HZ * (phba->fc_ratov * 2);
- list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
+ list_splice_init(&phba->active_rrq_list, &rrq_list);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
list_del(&rrq->list);
- __lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+ lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
}
- spin_unlock_irqrestore(&phba->hbalock, iflags);
if (!list_empty(&phba->active_rrq_list))
mod_timer(&phba->rrq_tmr, next_time);
}
/**
- * __lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
+ * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
* @phba: Pointer to HBA context object.
* @ndlp: Targets nodelist pointer for this exchange.
* @xritag the xri in the bitmap to test.
@@ -758,8 +792,8 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
* returns 0 = rrq not active for this xri
* 1 = rrq is valid for this xri.
**/
-static int
-__lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+int
+lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
uint16_t xritag)
{
uint16_t adj_xri;
@@ -802,52 +836,6 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
}
/**
- * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
- * @phba: Pointer to HBA context object.
- * @xritag: xri used in this exchange.
- * @rrq: The RRQ to be cleared.
- *
- * This function is takes the hbalock.
- **/
-void
-lpfc_clr_rrq_active(struct lpfc_hba *phba,
- uint16_t xritag,
- struct lpfc_node_rrq *rrq)
-{
- unsigned long iflags;
-
- spin_lock_irqsave(&phba->hbalock, iflags);
- __lpfc_clr_rrq_active(phba, xritag, rrq);
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- return;
-}
-
-
-
-/**
- * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
- * @phba: Pointer to HBA context object.
- * @ndlp: Targets nodelist pointer for this exchange.
- * @xritag the xri in the bitmap to test.
- *
- * This function takes the hbalock.
- * returns 0 = rrq not active for this xri
- * 1 = rrq is valid for this xri.
- **/
-int
-lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
- uint16_t xritag)
-{
- int ret;
- unsigned long iflags;
-
- spin_lock_irqsave(&phba->hbalock, iflags);
- ret = __lpfc_test_rrq_active(phba, ndlp, xritag);
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- return ret;
-}
-
-/**
* __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
* @phba: Pointer to HBA context object.
* @piocb: Pointer to the iocbq.
@@ -884,7 +872,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
return NULL;
adj_xri = sglq->sli4_xritag -
phba->sli4_hba.max_cfg_param.xri_base;
- if (__lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
+ if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
/* This xri has an rrq outstanding for this DID.
* put it back in the list and get another xri.
*/
@@ -969,7 +957,8 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
} else {
sglq->state = SGL_FREED;
sglq->ndlp = NULL;
- list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
+ list_add_tail(&sglq->list,
+ &phba->sli4_hba.lpfc_sgl_list);
/* Check if TXQ queue needs to be serviced */
if (pring->txq_cnt)
@@ -4817,7 +4806,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
"0378 No support for fcpi mode.\n");
ftr_rsp++;
}
-
+ if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
+ phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
+ else
+ phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
/*
* If the port cannot support the host's requested features
* then turn off the global config parameters to disable the
@@ -5004,7 +4996,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock);
phba->link_state = LPFC_LINK_DOWN;
spin_unlock_irq(&phba->hbalock);
- rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+ if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK)
+ rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
out_unset_queue:
/* Unset all the queues set up in this routine when error out */
if (rc)
@@ -10478,6 +10471,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
cq->type = type;
cq->subtype = subtype;
cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
+ cq->assoc_qid = eq->queue_id;
cq->host_index = 0;
cq->hba_index = 0;
@@ -10672,6 +10666,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
goto out;
}
mq->type = LPFC_MQ;
+ mq->assoc_qid = cq->queue_id;
mq->subtype = subtype;
mq->host_index = 0;
mq->hba_index = 0;
@@ -10759,6 +10754,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
goto out;
}
wq->type = LPFC_WQ;
+ wq->assoc_qid = cq->queue_id;
wq->subtype = subtype;
wq->host_index = 0;
wq->hba_index = 0;
@@ -10876,6 +10872,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
goto out;
}
hrq->type = LPFC_HRQ;
+ hrq->assoc_qid = cq->queue_id;
hrq->subtype = subtype;
hrq->host_index = 0;
hrq->hba_index = 0;
@@ -10936,6 +10933,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
goto out;
}
drq->type = LPFC_DRQ;
+ drq->assoc_qid = cq->queue_id;
drq->subtype = subtype;
drq->host_index = 0;
drq->hba_index = 0;
@@ -11189,7 +11187,7 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
if (!mbox)
return -ENOMEM;
length = (sizeof(struct lpfc_mbx_rq_destroy) -
- sizeof(struct mbox_header));
+ sizeof(struct lpfc_sli4_cfg_mhdr));
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
length, LPFC_SLI4_MBX_EMBED);
@@ -11279,7 +11277,7 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
sizeof(struct lpfc_mbx_post_sgl_pages) -
- sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
+ sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
&mbox->u.mqe.un.post_sgl_pages;
@@ -12402,7 +12400,8 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
sizeof(struct lpfc_mbx_post_hdr_tmpl) -
- sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
+ sizeof(struct lpfc_sli4_cfg_mhdr),
+ LPFC_SLI4_MBX_EMBED);
bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
hdr_tmpl, rpi_page->page_count);
bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index c7217d579e0f..595056b89608 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -125,9 +125,9 @@ struct lpfc_queue {
uint32_t entry_count; /* Number of entries to support on the queue */
uint32_t entry_size; /* Size of each queue entry. */
uint32_t queue_id; /* Queue ID assigned by the hardware */
+ uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */
struct list_head page_list;
uint32_t page_count; /* Number of pages allocated for this queue */
-
uint32_t host_index; /* The host's index for putting or getting */
uint32_t hba_index; /* The last known hba index for get or put */
union sli4_qe qe[1]; /* array to index entries (must be last) */
@@ -359,6 +359,10 @@ struct lpfc_pc_sli4_params {
uint32_t hdr_pp_align;
uint32_t sgl_pages_max;
uint32_t sgl_pp_align;
+ uint8_t cqv;
+ uint8_t mqv;
+ uint8_t wqv;
+ uint8_t rqv;
};
/* SLI4 HBA data structure entries */
@@ -562,6 +566,8 @@ void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
struct sli4_wcqe_xri_aborted *);
void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
struct sli4_wcqe_xri_aborted *);
+void lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *);
+void lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *);
int lpfc_sli4_brdreset(struct lpfc_hba *);
int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *);
void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 386cf92de492..0a4d376dbca5 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2010 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.20"
+#define LPFC_DRIVER_VERSION "8.3.21"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 6b8d2952e32f..30ba5440c67a 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -464,6 +464,7 @@ disable_vport(struct fc_vport *fc_vport)
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
long timeout;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
ndlp = lpfc_findnode_did(vport, Fabric_DID);
if (ndlp && NLP_CHK_NODE_ACT(ndlp)
@@ -498,6 +499,9 @@ disable_vport(struct fc_vport *fc_vport)
* scsi_host_put() to release the vport.
*/
lpfc_mbx_unreg_vpi(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+ spin_unlock_irq(shost->host_lock);
lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 9aa048525eb2..c212694a9714 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -1412,7 +1412,7 @@ megaraid_isr_memmapped(int irq, void *devp)
* @nstatus - number of completed commands
* @status - status of the last command completed
*
- * Complete the comamnds and call the scsi mid-layer callback hooks.
+ * Complete the commands and call the scsi mid-layer callback hooks.
*/
static void
mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
@@ -4296,7 +4296,7 @@ mega_support_cluster(adapter_t *adapter)
* @adapter - pointer to our soft state
* @dma_handle - DMA address of the buffer
*
- * Issue internal comamnds while interrupts are available.
+ * Issue internal commands while interrupts are available.
* We only issue direct mailbox commands from within the driver. ioctl()
* interface using these routines can issue passthru commands.
*/
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 1b5e375732c0..635b228c3ead 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "00.00.05.29-rc1"
-#define MEGASAS_RELDATE "Dec. 7, 2010"
-#define MEGASAS_EXT_VERSION "Tue. Dec. 7 17:00:00 PDT 2010"
+#define MEGASAS_VERSION "00.00.05.34-rc1"
+#define MEGASAS_RELDATE "Feb. 24, 2011"
+#define MEGASAS_EXT_VERSION "Thu. Feb. 24 17:00:00 PDT 2011"
/*
* Device IDs
@@ -723,6 +723,7 @@ struct megasas_ctrl_info {
MEGASAS_MAX_DEV_PER_CHANNEL)
#define MEGASAS_MAX_SECTORS (2*1024)
+#define MEGASAS_MAX_SECTORS_IEEE (2*128)
#define MEGASAS_DBG_LVL 1
#define MEGASAS_FW_BUSY 1
@@ -1477,4 +1478,7 @@ struct megasas_mgmt_info {
int max_index;
};
+#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
+#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
+
#endif /*LSI_MEGARAID_SAS_H */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 5d6d07bd1cd0..bbd10c81fd9c 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,12 +18,13 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* FILE: megaraid_sas_base.c
- * Version : v00.00.05.29-rc1
+ * Version : v00.00.05.34-rc1
*
* Authors: LSI Corporation
* Sreenivas Bagalkote
* Sumant Patro
* Bo Yang
+ * Adam Radford <linuxraid@lsi.com>
*
* Send feedback to: <megaraidlinux@lsi.com>
*
@@ -134,7 +135,11 @@ spinlock_t poll_aen_lock;
void
megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
u8 alt_status);
-
+static u32
+megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs);
+static int
+megasas_adp_reset_gen2(struct megasas_instance *instance,
+ struct megasas_register_set __iomem *reg_set);
static irqreturn_t megasas_isr(int irq, void *devp);
static u32
megasas_init_adapter_mfi(struct megasas_instance *instance);
@@ -554,6 +559,8 @@ static int
megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
{
u32 status;
+ u32 mfiStatus = 0;
+
/*
* Check if it is our interrupt
*/
@@ -564,6 +571,15 @@ megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
}
/*
+ * Check if it is our interrupt
+ */
+ if ((megasas_read_fw_status_reg_gen2(regs) & MFI_STATE_MASK) ==
+ MFI_STATE_FAULT) {
+ mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
+ } else
+ mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
+
+ /*
* Clear the interrupt by writing back the same value
*/
writel(status, &regs->outbound_intr_status);
@@ -573,7 +589,7 @@ megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
*/
readl(&regs->outbound_intr_status);
- return 1;
+ return mfiStatus;
}
/**
@@ -597,17 +613,6 @@ megasas_fire_cmd_skinny(struct megasas_instance *instance,
}
/**
- * megasas_adp_reset_skinny - For controller reset
- * @regs: MFI register set
- */
-static int
-megasas_adp_reset_skinny(struct megasas_instance *instance,
- struct megasas_register_set __iomem *regs)
-{
- return 0;
-}
-
-/**
* megasas_check_reset_skinny - For controller reset check
* @regs: MFI register set
*/
@@ -625,7 +630,7 @@ static struct megasas_instance_template megasas_instance_template_skinny = {
.disable_intr = megasas_disable_intr_skinny,
.clear_intr = megasas_clear_intr_skinny,
.read_fw_status_reg = megasas_read_fw_status_reg_skinny,
- .adp_reset = megasas_adp_reset_skinny,
+ .adp_reset = megasas_adp_reset_gen2,
.check_reset = megasas_check_reset_skinny,
.service_isr = megasas_isr,
.tasklet = megasas_complete_cmd_dpc,
@@ -740,20 +745,28 @@ megasas_adp_reset_gen2(struct megasas_instance *instance,
{
u32 retry = 0 ;
u32 HostDiag;
+ u32 *seq_offset = &reg_set->seq_offset;
+ u32 *hostdiag_offset = &reg_set->host_diag;
+
+ if (instance->instancet == &megasas_instance_template_skinny) {
+ seq_offset = &reg_set->fusion_seq_offset;
+ hostdiag_offset = &reg_set->fusion_host_diag;
+ }
+
+ writel(0, seq_offset);
+ writel(4, seq_offset);
+ writel(0xb, seq_offset);
+ writel(2, seq_offset);
+ writel(7, seq_offset);
+ writel(0xd, seq_offset);
- writel(0, &reg_set->seq_offset);
- writel(4, &reg_set->seq_offset);
- writel(0xb, &reg_set->seq_offset);
- writel(2, &reg_set->seq_offset);
- writel(7, &reg_set->seq_offset);
- writel(0xd, &reg_set->seq_offset);
msleep(1000);
- HostDiag = (u32)readl(&reg_set->host_diag);
+ HostDiag = (u32)readl(hostdiag_offset);
while ( !( HostDiag & DIAG_WRITE_ENABLE) ) {
msleep(100);
- HostDiag = (u32)readl(&reg_set->host_diag);
+ HostDiag = (u32)readl(hostdiag_offset);
printk(KERN_NOTICE "RESETGEN2: retry=%x, hostdiag=%x\n",
retry, HostDiag);
@@ -764,14 +777,14 @@ megasas_adp_reset_gen2(struct megasas_instance *instance,
printk(KERN_NOTICE "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
- writel((HostDiag | DIAG_RESET_ADAPTER), &reg_set->host_diag);
+ writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
ssleep(10);
- HostDiag = (u32)readl(&reg_set->host_diag);
+ HostDiag = (u32)readl(hostdiag_offset);
while ( ( HostDiag & DIAG_RESET_ADAPTER) ) {
msleep(100);
- HostDiag = (u32)readl(&reg_set->host_diag);
+ HostDiag = (u32)readl(hostdiag_offset);
printk(KERN_NOTICE "RESET_GEN2: retry=%x, hostdiag=%x\n",
retry, HostDiag);
@@ -877,7 +890,7 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance,
* @instance: Adapter soft state
* @cmd_to_abort: Previously issued cmd to be aborted
*
- * MFI firmware can abort previously issued AEN comamnd (automatic event
+ * MFI firmware can abort previously issued AEN command (automatic event
* notification). The megasas_issue_blocked_abort_cmd() issues such abort
* cmd and waits for return status.
* Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
@@ -2503,7 +2516,9 @@ megasas_deplete_reply_queue(struct megasas_instance *instance,
if ((mfiStatus = instance->instancet->clear_intr(
instance->reg_set)
) == 0) {
- return IRQ_NONE;
+ /* Hardware may not set outbound_intr_status in MSI-X mode */
+ if (!instance->msi_flag)
+ return IRQ_NONE;
}
instance->mfiStatus = mfiStatus;
@@ -2611,7 +2626,9 @@ megasas_transition_to_ready(struct megasas_instance* instance)
case MFI_STATE_FAULT:
printk(KERN_DEBUG "megasas: FW in FAULT state!!\n");
- return -ENODEV;
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_FAULT;
+ break;
case MFI_STATE_WAIT_HANDSHAKE:
/*
@@ -3424,7 +3441,6 @@ fail_reply_queue:
megasas_free_cmds(instance);
fail_alloc_cmds:
- iounmap(instance->reg_set);
return 1;
}
@@ -3494,7 +3510,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
/* Get operational params, sge flags, send init cmd to controller */
if (instance->instancet->init_adapter(instance))
- return -ENODEV;
+ goto fail_init_adapter;
printk(KERN_ERR "megasas: INIT adapter done\n");
@@ -3543,7 +3559,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
* Setup tasklet for cmd completion
*/
- tasklet_init(&instance->isr_tasklet, megasas_complete_cmd_dpc,
+ tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
(unsigned long)instance);
/* Initialize the cmd completion timer */
@@ -3553,6 +3569,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
MEGASAS_COMPLETION_TIMER_INTERVAL);
return 0;
+fail_init_adapter:
fail_ready_state:
iounmap(instance->reg_set);
@@ -3820,6 +3837,10 @@ static int megasas_io_attach(struct megasas_instance *instance)
instance->max_fw_cmds - MEGASAS_INT_CMDS;
host->this_id = instance->init_id;
host->sg_tablesize = instance->max_num_sge;
+
+ if (instance->fw_support_ieee)
+ instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
+
/*
* Check if the module parameter value for max_sectors can be used
*/
@@ -3899,9 +3920,26 @@ fail_set_dma_mask:
static int __devinit
megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
- int rval;
+ int rval, pos;
struct Scsi_Host *host;
struct megasas_instance *instance;
+ u16 control = 0;
+
+ /* Reset MSI-X in the kdump kernel */
+ if (reset_devices) {
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ if (pos) {
+ pci_read_config_word(pdev, msi_control_reg(pos),
+ &control);
+ if (control & PCI_MSIX_FLAGS_ENABLE) {
+ dev_info(&pdev->dev, "resetting MSI-X\n");
+ pci_write_config_word(pdev,
+ msi_control_reg(pos),
+ control &
+ ~PCI_MSIX_FLAGS_ENABLE);
+ }
+ }
+ }
/*
* Announce PCI information
@@ -4039,12 +4077,6 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
else
INIT_WORK(&instance->work_init, process_fw_state_change_wq);
- /*
- * Initialize MFI Firmware
- */
- if (megasas_init_fw(instance))
- goto fail_init_mfi;
-
/* Try to enable MSI-X */
if ((instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078R) &&
(instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078DE) &&
@@ -4054,6 +4086,12 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
instance->msi_flag = 1;
/*
+ * Initialize MFI Firmware
+ */
+ if (megasas_init_fw(instance))
+ goto fail_init_mfi;
+
+ /*
* Register IRQ
*/
if (request_irq(instance->msi_flag ? instance->msixentry.vector :
@@ -4105,24 +4143,23 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
instance->instancet->disable_intr(instance->reg_set);
free_irq(instance->msi_flag ? instance->msixentry.vector :
instance->pdev->irq, instance);
+fail_irq:
+ if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION)
+ megasas_release_fusion(instance);
+ else
+ megasas_release_mfi(instance);
+ fail_init_mfi:
if (instance->msi_flag)
pci_disable_msix(instance->pdev);
-
- fail_irq:
- fail_init_mfi:
fail_alloc_dma_buf:
if (instance->evt_detail)
pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
instance->evt_detail,
instance->evt_detail_h);
- if (instance->producer) {
+ if (instance->producer)
pci_free_consistent(pdev, sizeof(u32), instance->producer,
instance->producer_h);
- megasas_release_mfi(instance);
- } else {
- megasas_release_fusion(instance);
- }
if (instance->consumer)
pci_free_consistent(pdev, sizeof(u32), instance->consumer,
instance->consumer_h);
@@ -4242,9 +4279,8 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
/* cancel the delayed work if this work still in queue */
if (instance->ev != NULL) {
struct megasas_aen_event *ev = instance->ev;
- cancel_delayed_work(
+ cancel_delayed_work_sync(
(struct delayed_work *)&ev->hotplug_work);
- flush_scheduled_work();
instance->ev = NULL;
}
@@ -4297,6 +4333,10 @@ megasas_resume(struct pci_dev *pdev)
if (megasas_set_dma_mask(pdev))
goto fail_set_dma_mask;
+ /* Now re-enable MSI-X */
+ if (instance->msi_flag)
+ pci_enable_msix(instance->pdev, &instance->msixentry, 1);
+
/*
* Initialize MFI Firmware
*/
@@ -4333,10 +4373,6 @@ megasas_resume(struct pci_dev *pdev)
tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
(unsigned long)instance);
- /* Now re-enable MSI-X */
- if (instance->msi_flag)
- pci_enable_msix(instance->pdev, &instance->msixentry, 1);
-
/*
* Register IRQ
*/
@@ -4417,9 +4453,8 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev)
/* cancel the delayed work if this work still in queue*/
if (instance->ev != NULL) {
struct megasas_aen_event *ev = instance->ev;
- cancel_delayed_work(
+ cancel_delayed_work_sync(
(struct delayed_work *)&ev->hotplug_work);
- flush_scheduled_work();
instance->ev = NULL;
}
@@ -4611,6 +4646,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
* For each user buffer, create a mirror buffer and copy in
*/
for (i = 0; i < ioc->sge_count; i++) {
+ if (!ioc->sgl[i].iov_len)
+ continue;
+
kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
ioc->sgl[i].iov_len,
&buf_handle, GFP_KERNEL);
@@ -5177,6 +5215,7 @@ megasas_aen_polling(struct work_struct *work)
break;
case MR_EVT_LD_OFFLINE:
+ case MR_EVT_CFG_CLEARED:
case MR_EVT_LD_DELETED:
megasas_get_ld_list(instance);
for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index c1e09d5a6196..905c1e9317b6 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -82,6 +82,10 @@ u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);
struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
+
+void
+megasas_check_and_restore_queue_depth(struct megasas_instance *instance);
+
u8 MR_ValidateMapInfo(struct MR_FW_RAID_MAP_ALL *map,
struct LD_LOAD_BALANCE_INFO *lbInfo);
u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo,
@@ -984,13 +988,15 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
return 0;
-fail_alloc_cmds:
-fail_alloc_mfi_cmds:
fail_map_info:
if (i == 1)
dma_free_coherent(&instance->pdev->dev, fusion->map_sz,
fusion->ld_map[0], fusion->ld_map_phys[0]);
fail_ioc_init:
+ megasas_free_cmds_fusion(instance);
+fail_alloc_cmds:
+ megasas_free_cmds(instance);
+fail_alloc_mfi_cmds:
return 1;
}
@@ -1432,8 +1438,7 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
local_map_ptr = fusion->ld_map[(instance->map_id & 1)];
/* Check if this is a system PD I/O */
- if ((instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) &&
- (instance->pd_list[pd_index].driveType == TYPE_DISK)) {
+ if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
io_request->Function = 0;
io_request->DevHandle =
local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
@@ -1456,7 +1461,7 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
}
io_request->RaidContext.VirtualDiskTgtId = device_id;
- io_request->LUN[0] = scmd->device->lun;
+ io_request->LUN[1] = scmd->device->lun;
io_request->DataLength = scsi_bufflen(scmd);
}
@@ -1480,7 +1485,7 @@ megasas_build_io_fusion(struct megasas_instance *instance,
device_id = MEGASAS_DEV_INDEX(instance, scp);
/* Zero out some fields so they don't get reused */
- io_request->LUN[0] = 0;
+ io_request->LUN[1] = 0;
io_request->CDB.EEDP32.PrimaryReferenceTag = 0;
io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0;
io_request->EEDPFlags = 0;
@@ -1744,7 +1749,7 @@ complete_cmd_fusion(struct megasas_instance *instance)
wmb();
writel(fusion->last_reply_idx,
&instance->reg_set->reply_post_host_index);
-
+ megasas_check_and_restore_queue_depth(instance);
return IRQ_HANDLED;
}
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index 8be75e65f763..a3e60385787f 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -8,7 +8,7 @@
* scatter/gather formats.
* Creation Date: June 21, 2006
*
- * mpi2.h Version: 02.00.16
+ * mpi2.h Version: 02.00.17
*
* Version History
* ---------------
@@ -63,6 +63,7 @@
* function codes, 0xF0 to 0xFF.
* 05-12-10 02.00.16 Bumped MPI2_HEADER_VERSION_UNIT.
* Added alternative defines for the SGE Direction bit.
+ * 08-11-10 02.00.17 Bumped MPI2_HEADER_VERSION_UNIT.
* --------------------------------------------------------------------------
*/
@@ -88,7 +89,7 @@
#define MPI2_VERSION_02_00 (0x0200)
/* versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT (0x10)
+#define MPI2_HEADER_VERSION_UNIT (0x11)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index d76a65847603..f5b9c766e28f 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -6,7 +6,7 @@
* Title: MPI Configuration messages and pages
* Creation Date: November 10, 2006
*
- * mpi2_cnfg.h Version: 02.00.15
+ * mpi2_cnfg.h Version: 02.00.16
*
* Version History
* ---------------
@@ -125,6 +125,8 @@
* define.
* Added MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE define.
* Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define.
+ * 08-11-10 02.00.16 Removed IO Unit Page 1 device path (multi-pathing)
+ * defines.
* --------------------------------------------------------------------------
*/
@@ -745,8 +747,6 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1
#define MPI2_IOUNITPAGE1_DISABLE_IR (0x00000040)
#define MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING (0x00000020)
#define MPI2_IOUNITPAGE1_IR_USE_STATIC_VOLUME_ID (0x00000004)
-#define MPI2_IOUNITPAGE1_MULTI_PATHING (0x00000002)
-#define MPI2_IOUNITPAGE1_SINGLE_PATHING (0x00000000)
/* IO Unit Page 3 */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt b/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
deleted file mode 100644
index b1e88f26b748..000000000000
--- a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
+++ /dev/null
@@ -1,384 +0,0 @@
- ==============================
- Fusion-MPT MPI 2.0 Header File Change History
- ==============================
-
- Copyright (c) 2000-2010 LSI Corporation.
-
- ---------------------------------------
- Header Set Release Version: 02.00.14
- Header Set Release Date: 10-28-09
- ---------------------------------------
-
- Filename Current version Prior version
- ---------- --------------- -------------
- mpi2.h 02.00.14 02.00.13
- mpi2_cnfg.h 02.00.13 02.00.12
- mpi2_init.h 02.00.08 02.00.07
- mpi2_ioc.h 02.00.13 02.00.12
- mpi2_raid.h 02.00.04 02.00.04
- mpi2_sas.h 02.00.03 02.00.02
- mpi2_targ.h 02.00.03 02.00.03
- mpi2_tool.h 02.00.04 02.00.04
- mpi2_type.h 02.00.00 02.00.00
- mpi2_ra.h 02.00.00 02.00.00
- mpi2_hbd.h 02.00.00
- mpi2_history.txt 02.00.14 02.00.13
-
-
- * Date Version Description
- * -------- -------- ------------------------------------------------------
-
-mpi2.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * 06-04-07 02.00.01 Bumped MPI2_HEADER_VERSION_UNIT.
- * 06-26-07 02.00.02 Bumped MPI2_HEADER_VERSION_UNIT.
- * 08-31-07 02.00.03 Bumped MPI2_HEADER_VERSION_UNIT.
- * Moved ReplyPostHostIndex register to offset 0x6C of the
- * MPI2_SYSTEM_INTERFACE_REGS and modified the define for
- * MPI2_REPLY_POST_HOST_INDEX_OFFSET.
- * Added union of request descriptors.
- * Added union of reply descriptors.
- * 10-31-07 02.00.04 Bumped MPI2_HEADER_VERSION_UNIT.
- * Added define for MPI2_VERSION_02_00.
- * Fixed the size of the FunctionDependent5 field in the
- * MPI2_DEFAULT_REPLY structure.
- * 12-18-07 02.00.05 Bumped MPI2_HEADER_VERSION_UNIT.
- * Removed the MPI-defined Fault Codes and extended the
- * product specific codes up to 0xEFFF.
- * Added a sixth key value for the WriteSequence register
- * and changed the flush value to 0x0.
- * Added message function codes for Diagnostic Buffer Post
- * and Diagnsotic Release.
- * New IOCStatus define: MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED
- * Moved MPI2_VERSION_UNION from mpi2_ioc.h.
- * 02-29-08 02.00.06 Bumped MPI2_HEADER_VERSION_UNIT.
- * 03-03-08 02.00.07 Bumped MPI2_HEADER_VERSION_UNIT.
- * 05-21-08 02.00.08 Bumped MPI2_HEADER_VERSION_UNIT.
- * Added #defines for marking a reply descriptor as unused.
- * 06-27-08 02.00.09 Bumped MPI2_HEADER_VERSION_UNIT.
- * 10-02-08 02.00.10 Bumped MPI2_HEADER_VERSION_UNIT.
- * Moved LUN field defines from mpi2_init.h.
- * 01-19-09 02.00.11 Bumped MPI2_HEADER_VERSION_UNIT.
- * 05-06-09 02.00.12 Bumped MPI2_HEADER_VERSION_UNIT.
- * In all request and reply descriptors, replaced VF_ID
- * field with MSIxIndex field.
- * Removed DevHandle field from
- * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those
- * bytes reserved.
- * Added RAID Accelerator functionality.
- * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT.
- * 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT.
- * Added MSI-x index mask and shift for Reply Post Host
- * Index register.
- * Added function code for Host Based Discovery Action.
- * --------------------------------------------------------------------------
-
-mpi2_cnfg.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * 06-04-07 02.00.01 Added defines for SAS IO Unit Page 2 PhyFlags.
- * Added Manufacturing Page 11.
- * Added MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE
- * define.
- * 06-26-07 02.00.02 Adding generic structure for product-specific
- * Manufacturing pages: MPI2_CONFIG_PAGE_MANUFACTURING_PS.
- * Rework of BIOS Page 2 configuration page.
- * Fixed MPI2_BIOSPAGE2_BOOT_DEVICE to be a union of the
- * forms.
- * Added configuration pages IOC Page 8 and Driver
- * Persistent Mapping Page 0.
- * 08-31-07 02.00.03 Modified configuration pages dealing with Integrated
- * RAID (Manufacturing Page 4, RAID Volume Pages 0 and 1,
- * RAID Physical Disk Pages 0 and 1, RAID Configuration
- * Page 0).
- * Added new value for AccessStatus field of SAS Device
- * Page 0 (_SATA_NEEDS_INITIALIZATION).
- * 10-31-07 02.00.04 Added missing SEPDevHandle field to
- * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0.
- * 12-18-07 02.00.05 Modified IO Unit Page 0 to use 32-bit version fields for
- * NVDATA.
- * Modified IOC Page 7 to use masks and added field for
- * SASBroadcastPrimitiveMasks.
- * Added MPI2_CONFIG_PAGE_BIOS_4.
- * Added MPI2_CONFIG_PAGE_LOG_0.
- * 02-29-08 02.00.06 Modified various names to make them 32-character unique.
- * Added SAS Device IDs.
- * Updated Integrated RAID configuration pages including
- * Manufacturing Page 4, IOC Page 6, and RAID Configuration
- * Page 0.
- * 05-21-08 02.00.07 Added define MPI2_MANPAGE4_MIX_SSD_SAS_SATA.
- * Added define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION.
- * Fixed define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING.
- * Added missing MaxNumRoutedSasAddresses field to
- * MPI2_CONFIG_PAGE_EXPANDER_0.
- * Added SAS Port Page 0.
- * Modified structure layout for
- * MPI2_CONFIG_PAGE_DRIVER_MAPPING_0.
- * 06-27-08 02.00.08 Changed MPI2_CONFIG_PAGE_RD_PDISK_1 to use
- * MPI2_RAID_PHYS_DISK1_PATH_MAX to size the array.
- * 10-02-08 02.00.09 Changed MPI2_RAID_PGAD_CONFIGNUM_MASK from 0x0000FFFF
- * to 0x000000FF.
- * Added two new values for the Physical Disk Coercion Size
- * bits in the Flags field of Manufacturing Page 4.
- * Added product-specific Manufacturing pages 16 to 31.
- * Modified Flags bits for controlling write cache on SATA
- * drives in IO Unit Page 1.
- * Added new bit to AdditionalControlFlags of SAS IO Unit
- * Page 1 to control Invalid Topology Correction.
- * Added SupportedPhysDisks field to RAID Volume Page 1 and
- * added related defines.
- * Added additional defines for RAID Volume Page 0
- * VolumeStatusFlags field.
- * Modified meaning of RAID Volume Page 0 VolumeSettings
- * define for auto-configure of hot-swap drives.
- * Added PhysDiskAttributes field (and related defines) to
- * RAID Physical Disk Page 0.
- * Added MPI2_SAS_PHYINFO_PHY_VACANT define.
- * Added three new DiscoveryStatus bits for SAS IO Unit
- * Page 0 and SAS Expander Page 0.
- * Removed multiplexing information from SAS IO Unit pages.
- * Added BootDeviceWaitTime field to SAS IO Unit Page 4.
- * Removed Zone Address Resolved bit from PhyInfo and from
- * Expander Page 0 Flags field.
- * Added two new AccessStatus values to SAS Device Page 0
- * for indicating routing problems. Added 3 reserved words
- * to this page.
- * 01-19-09 02.00.10 Fixed defines for GPIOVal field of IO Unit Page 3.
- * Inserted missing reserved field into structure for IOC
- * Page 6.
- * Added more pending task bits to RAID Volume Page 0
- * VolumeStatusFlags defines.
- * Added MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED define.
- * Added a new DiscoveryStatus bit for SAS IO Unit Page 0
- * and SAS Expander Page 0 to flag a downstream initiator
- * when in simplified routing mode.
- * Removed SATA Init Failure defines for DiscoveryStatus
- * fields of SAS IO Unit Page 0 and SAS Expander Page 0.
- * Added MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED define.
- * Added PortGroups, DmaGroup, and ControlGroup fields to
- * SAS Device Page 0.
- * 05-06-09 02.00.11 Added structures and defines for IO Unit Page 5 and IO
- * Unit Page 6.
- * Added expander reduced functionality data to SAS
- * Expander Page 0.
- * Added SAS PHY Page 2 and SAS PHY Page 3.
- * 07-30-09 02.00.12 Added IO Unit Page 7.
- * Added new device ids.
- * Added SAS IO Unit Page 5.
- * Added partial and slumber power management capable flags
- * to SAS Device Page 0 Flags field.
- * Added PhyInfo defines for power condition.
- * Added Ethernet configuration pages.
- * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY.
- * Added SAS PHY Page 4 structure and defines.
- * --------------------------------------------------------------------------
-
-mpi2_init.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * 10-31-07 02.00.01 Fixed name for pMpi2SCSITaskManagementRequest_t.
- * 12-18-07 02.00.02 Modified Task Management Target Reset Method defines.
- * 02-29-08 02.00.03 Added Query Task Set and Query Unit Attention.
- * 03-03-08 02.00.04 Fixed name of struct _MPI2_SCSI_TASK_MANAGE_REPLY.
- * 05-21-08 02.00.05 Fixed typo in name of Mpi2SepRequest_t.
- * 10-02-08 02.00.06 Removed Untagged and No Disconnect values from SCSI IO
- * Control field Task Attribute flags.
- * Moved LUN field defines to mpi2.h becasue they are
- * common to many structures.
- * 05-06-09 02.00.07 Changed task management type of Query Unit Attention to
- * Query Asynchronous Event.
- * Defined two new bits in the SlotStatus field of the SCSI
- * Enclosure Processor Request and Reply.
- * 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for
- * both SCSI IO Error Reply and SCSI Task Management Reply.
- * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY.
- * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define.
- * --------------------------------------------------------------------------
-
-mpi2_ioc.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * 06-04-07 02.00.01 In IOCFacts Reply structure, renamed MaxDevices to
- * MaxTargets.
- * Added TotalImageSize field to FWDownload Request.
- * Added reserved words to FWUpload Request.
- * 06-26-07 02.00.02 Added IR Configuration Change List Event.
- * 08-31-07 02.00.03 Removed SystemReplyQueueDepth field from the IOCInit
- * request and replaced it with
- * ReplyDescriptorPostQueueDepth and ReplyFreeQueueDepth.
- * Replaced the MinReplyQueueDepth field of the IOCFacts
- * reply with MaxReplyDescriptorPostQueueDepth.
- * Added MPI2_RDPQ_DEPTH_MIN define to specify the minimum
- * depth for the Reply Descriptor Post Queue.
- * Added SASAddress field to Initiator Device Table
- * Overflow Event data.
- * 10-31-07 02.00.04 Added ReasonCode MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING
- * for SAS Initiator Device Status Change Event data.
- * Modified Reason Code defines for SAS Topology Change
- * List Event data, including adding a bit for PHY Vacant
- * status, and adding a mask for the Reason Code.
- * Added define for
- * MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING.
- * Added define for MPI2_EXT_IMAGE_TYPE_MEGARAID.
- * 12-18-07 02.00.05 Added Boot Status defines for the IOCExceptions field of
- * the IOCFacts Reply.
- * Removed MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
- * Moved MPI2_VERSION_UNION to mpi2.h.
- * Changed MPI2_EVENT_NOTIFICATION_REQUEST to use masks
- * instead of enables, and added SASBroadcastPrimitiveMasks
- * field.
- * Added Log Entry Added Event and related structure.
- * 02-29-08 02.00.06 Added define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID.
- * Removed define MPI2_IOCFACTS_PROTOCOL_SMP_TARGET.
- * Added MaxVolumes and MaxPersistentEntries fields to
- * IOCFacts reply.
- * Added ProtocalFlags and IOCCapabilities fields to
- * MPI2_FW_IMAGE_HEADER.
- * Removed MPI2_PORTENABLE_FLAGS_ENABLE_SINGLE_PORT.
- * 03-03-08 02.00.07 Fixed MPI2_FW_IMAGE_HEADER by changing Reserved26 to
- * a U16 (from a U32).
- * Removed extra 's' from EventMasks name.
- * 06-27-08 02.00.08 Fixed an offset in a comment.
- * 10-02-08 02.00.09 Removed SystemReplyFrameSize from MPI2_IOC_INIT_REQUEST.
- * Removed CurReplyFrameSize from MPI2_IOC_FACTS_REPLY and
- * renamed MinReplyFrameSize to ReplyFrameSize.
- * Added MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX.
- * Added two new RAIDOperation values for Integrated RAID
- * Operations Status Event data.
- * Added four new IR Configuration Change List Event data
- * ReasonCode values.
- * Added two new ReasonCode defines for SAS Device Status
- * Change Event data.
- * Added three new DiscoveryStatus bits for the SAS
- * Discovery event data.
- * Added Multiplexing Status Change bit to the PhyStatus
- * field of the SAS Topology Change List event data.
- * Removed define for MPI2_INIT_IMAGE_BOOTFLAGS_XMEMCOPY.
- * BootFlags are now product-specific.
- * Added defines for the indivdual signature bytes
- * for MPI2_INIT_IMAGE_FOOTER.
- * 01-19-09 02.00.10 Added MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY define.
- * Added MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR
- * define.
- * Added MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE
- * define.
- * Removed MPI2_EVENT_SAS_DISC_DS_SATA_INIT_FAILURE define.
- * 05-06-09 02.00.11 Added MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR define.
- * Added MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX define.
- * Added two new reason codes for SAS Device Status Change
- * Event.
- * Added new event: SAS PHY Counter.
- * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure.
- * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
- * Added new product id family for 2208.
- * 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST.
- * Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY.
- * Added MinDevHandle field to MPI2_IOC_FACTS_REPLY.
- * Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY.
- * Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define.
- * Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define.
- * Added Host Based Discovery Phy Event data.
- * Added defines for ProductID Product field
- * (MPI2_FW_HEADER_PID_).
- * Modified values for SAS ProductID Family
- * (MPI2_FW_HEADER_PID_FAMILY_).
- * --------------------------------------------------------------------------
-
-mpi2_raid.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * 08-31-07 02.00.01 Modifications to RAID Action request and reply,
- * including the Actions and ActionData.
- * 02-29-08 02.00.02 Added MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD.
- * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that
- * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT
- * can be sized by the build environment.
- * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of
- * VolumeCreationFlags and marked the old one as obsolete.
- * 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define.
- * --------------------------------------------------------------------------
-
-mpi2_sas.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * 06-26-07 02.00.01 Added Clear All Persistent Operation to SAS IO Unit
- * Control Request.
- * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control
- * Request.
- * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST
- * to MPI2_SGE_IO_UNION since it supports chained SGLs.
- * 05-12-10 02.00.04 Modified some comments.
- * --------------------------------------------------------------------------
-
-mpi2_targ.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * 08-31-07 02.00.01 Added Command Buffer Data Location Address Space bits to
- * BufferPostFlags field of CommandBufferPostBase Request.
- * 02-29-08 02.00.02 Modified various names to make them 32-character unique.
- * 10-02-08 02.00.03 Removed NextCmdBufferOffset from
- * MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST.
- * Target Status Send Request only takes a single SGE for
- * response data.
- * --------------------------------------------------------------------------
-
-mpi2_tool.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * 12-18-07 02.00.01 Added Diagnostic Buffer Post and Diagnostic Release
- * structures and defines.
- * 02-29-08 02.00.02 Modified various names to make them 32-character unique.
- * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool.
- * 07-30-09 02.00.04 Added ExtendedType field to DiagnosticBufferPost request
- * and reply messages.
- * Added MPI2_DIAG_BUF_TYPE_EXTENDED.
- * Incremented MPI2_DIAG_BUF_TYPE_COUNT.
- * 05-12-10 02.00.05 Added Diagnostic Data Upload tool.
- * --------------------------------------------------------------------------
-
-mpi2_type.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * --------------------------------------------------------------------------
-
-mpi2_ra.h
- * 05-06-09 02.00.00 Initial version.
- * --------------------------------------------------------------------------
-
-mpi2_hbd.h
- * 10-28-09 02.00.00 Initial version.
- * --------------------------------------------------------------------------
-
-
-mpi2_history.txt Parts list history
-
-Filename 02.00.14 02.00.13 02.00.12
----------- -------- -------- --------
-mpi2.h 02.00.14 02.00.13 02.00.12
-mpi2_cnfg.h 02.00.13 02.00.12 02.00.11
-mpi2_init.h 02.00.08 02.00.07 02.00.07
-mpi2_ioc.h 02.00.13 02.00.12 02.00.11
-mpi2_raid.h 02.00.04 02.00.04 02.00.03
-mpi2_sas.h 02.00.03 02.00.02 02.00.02
-mpi2_targ.h 02.00.03 02.00.03 02.00.03
-mpi2_tool.h 02.00.04 02.00.04 02.00.03
-mpi2_type.h 02.00.00 02.00.00 02.00.00
-mpi2_ra.h 02.00.00 02.00.00 02.00.00
-mpi2_hbd.h 02.00.00
-
-Filename 02.00.11 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06
----------- -------- -------- -------- -------- -------- --------
-mpi2.h 02.00.11 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06
-mpi2_cnfg.h 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06 02.00.06
-mpi2_init.h 02.00.06 02.00.06 02.00.05 02.00.05 02.00.04 02.00.03
-mpi2_ioc.h 02.00.10 02.00.09 02.00.08 02.00.07 02.00.07 02.00.06
-mpi2_raid.h 02.00.03 02.00.03 02.00.03 02.00.03 02.00.02 02.00.02
-mpi2_sas.h 02.00.02 02.00.02 02.00.01 02.00.01 02.00.01 02.00.01
-mpi2_targ.h 02.00.03 02.00.03 02.00.02 02.00.02 02.00.02 02.00.02
-mpi2_tool.h 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02
-mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00
-
-Filename 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00
----------- -------- -------- -------- -------- -------- --------
-mpi2.h 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00
-mpi2_cnfg.h 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00
-mpi2_init.h 02.00.02 02.00.01 02.00.00 02.00.00 02.00.00 02.00.00
-mpi2_ioc.h 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00
-mpi2_raid.h 02.00.01 02.00.01 02.00.01 02.00.00 02.00.00 02.00.00
-mpi2_sas.h 02.00.01 02.00.01 02.00.01 02.00.01 02.00.00 02.00.00
-mpi2_targ.h 02.00.01 02.00.01 02.00.01 02.00.00 02.00.00 02.00.00
-mpi2_tool.h 02.00.01 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00
-mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00
-
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
index 608f6d6e6fca..fdffde1ebc0f 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
@@ -6,7 +6,7 @@
* Title: MPI Serial Attached SCSI structures and definitions
* Creation Date: February 9, 2007
*
- * mpi2_sas.h Version: 02.00.04
+ * mpi2_sas.h Version: 02.00.05
*
* Version History
* ---------------
@@ -21,6 +21,7 @@
* 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST
* to MPI2_SGE_IO_UNION since it supports chained SGLs.
* 05-12-10 02.00.04 Modified some comments.
+ * 08-11-10 02.00.05 Added NCQ operations to SAS IO Unit Control.
* --------------------------------------------------------------------------
*/
@@ -163,7 +164,7 @@ typedef struct _MPI2_SATA_PASSTHROUGH_REQUEST
U32 Reserved4; /* 0x14 */
U32 DataLength; /* 0x18 */
U8 CommandFIS[20]; /* 0x1C */
- MPI2_SGE_IO_UNION SGL; /* 0x20 */
+ MPI2_SGE_IO_UNION SGL; /* 0x30 */
} MPI2_SATA_PASSTHROUGH_REQUEST, MPI2_POINTER PTR_MPI2_SATA_PASSTHROUGH_REQUEST,
Mpi2SataPassthroughRequest_t, MPI2_POINTER pMpi2SataPassthroughRequest_t;
@@ -246,6 +247,8 @@ typedef struct _MPI2_SAS_IOUNIT_CONTROL_REQUEST
#define MPI2_SAS_OP_REMOVE_DEVICE (0x0D)
#define MPI2_SAS_OP_LOOKUP_MAPPING (0x0E)
#define MPI2_SAS_OP_SET_IOC_PARAMETER (0x0F)
+#define MPI2_SAS_OP_DEV_ENABLE_NCQ (0x14)
+#define MPI2_SAS_OP_DEV_DISABLE_NCQ (0x15)
#define MPI2_SAS_OP_PRODUCT_SPECIFIC_MIN (0x80)
/* values for the PrimFlags field */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
index 5c6e3a67bb94..2a4bceda364b 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
@@ -6,7 +6,7 @@
* Title: MPI diagnostic tool structures and definitions
* Creation Date: March 26, 2007
*
- * mpi2_tool.h Version: 02.00.05
+ * mpi2_tool.h Version: 02.00.06
*
* Version History
* ---------------
@@ -23,6 +23,8 @@
* Added MPI2_DIAG_BUF_TYPE_EXTENDED.
* Incremented MPI2_DIAG_BUF_TYPE_COUNT.
* 05-12-10 02.00.05 Added Diagnostic Data Upload tool.
+ * 08-11-10 02.00.06 Added defines that were missing for Diagnostic Buffer
+ * Post Request.
* --------------------------------------------------------------------------
*/
@@ -354,6 +356,10 @@ typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST
/* count of the number of buffer types */
#define MPI2_DIAG_BUF_TYPE_COUNT (0x03)
+/* values for the Flags field */
+#define MPI2_DIAG_BUF_FLAG_RELEASE_ON_FULL (0x00000002)
+#define MPI2_DIAG_BUF_FLAG_IMMEDIATE_RELEASE (0x00000001)
+
/****************************************************************************
* Diagnostic Buffer Post reply
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 9ead0399808a..e8a6f1cf1e4b 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -752,20 +752,19 @@ static u8
_base_get_cb_idx(struct MPT2SAS_ADAPTER *ioc, u16 smid)
{
int i;
- u8 cb_idx = 0xFF;
-
- if (smid >= ioc->hi_priority_smid) {
- if (smid < ioc->internal_smid) {
- i = smid - ioc->hi_priority_smid;
- cb_idx = ioc->hpr_lookup[i].cb_idx;
- } else if (smid <= ioc->hba_queue_depth) {
- i = smid - ioc->internal_smid;
- cb_idx = ioc->internal_lookup[i].cb_idx;
- }
- } else {
+ u8 cb_idx;
+
+ if (smid < ioc->hi_priority_smid) {
i = smid - 1;
cb_idx = ioc->scsi_lookup[i].cb_idx;
- }
+ } else if (smid < ioc->internal_smid) {
+ i = smid - ioc->hi_priority_smid;
+ cb_idx = ioc->hpr_lookup[i].cb_idx;
+ } else if (smid <= ioc->hba_queue_depth) {
+ i = smid - ioc->internal_smid;
+ cb_idx = ioc->internal_lookup[i].cb_idx;
+ } else
+ cb_idx = 0xFF;
return cb_idx;
}
@@ -1430,7 +1429,7 @@ mpt2sas_base_get_smid_scsiio(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx,
struct scsi_cmnd *scmd)
{
unsigned long flags;
- struct request_tracker *request;
+ struct scsiio_tracker *request;
u16 smid;
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
@@ -1442,7 +1441,7 @@ mpt2sas_base_get_smid_scsiio(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx,
}
request = list_entry(ioc->free_list.next,
- struct request_tracker, tracker_list);
+ struct scsiio_tracker, tracker_list);
request->scmd = scmd;
request->cb_idx = cb_idx;
smid = request->smid;
@@ -1496,48 +1495,47 @@ mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid)
struct chain_tracker *chain_req, *next;
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- if (smid >= ioc->hi_priority_smid) {
- if (smid < ioc->internal_smid) {
- /* hi-priority */
- i = smid - ioc->hi_priority_smid;
- ioc->hpr_lookup[i].cb_idx = 0xFF;
- list_add_tail(&ioc->hpr_lookup[i].tracker_list,
- &ioc->hpr_free_list);
- } else {
- /* internal queue */
- i = smid - ioc->internal_smid;
- ioc->internal_lookup[i].cb_idx = 0xFF;
- list_add_tail(&ioc->internal_lookup[i].tracker_list,
- &ioc->internal_free_list);
+ if (smid < ioc->hi_priority_smid) {
+ /* scsiio queue */
+ i = smid - 1;
+ if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
+ list_for_each_entry_safe(chain_req, next,
+ &ioc->scsi_lookup[i].chain_list, tracker_list) {
+ list_del_init(&chain_req->tracker_list);
+ list_add_tail(&chain_req->tracker_list,
+ &ioc->free_chain_list);
+ }
}
+ ioc->scsi_lookup[i].cb_idx = 0xFF;
+ ioc->scsi_lookup[i].scmd = NULL;
+ list_add_tail(&ioc->scsi_lookup[i].tracker_list,
+ &ioc->free_list);
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- return;
- }
- /* scsiio queue */
- i = smid - 1;
- if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
- list_for_each_entry_safe(chain_req, next,
- &ioc->scsi_lookup[i].chain_list, tracker_list) {
- list_del_init(&chain_req->tracker_list);
- list_add_tail(&chain_req->tracker_list,
- &ioc->free_chain_list);
+ /*
+ * See _wait_for_commands_to_complete() call with regards
+ * to this code.
+ */
+ if (ioc->shost_recovery && ioc->pending_io_count) {
+ if (ioc->pending_io_count == 1)
+ wake_up(&ioc->reset_wq);
+ ioc->pending_io_count--;
}
+ return;
+ } else if (smid < ioc->internal_smid) {
+ /* hi-priority */
+ i = smid - ioc->hi_priority_smid;
+ ioc->hpr_lookup[i].cb_idx = 0xFF;
+ list_add_tail(&ioc->hpr_lookup[i].tracker_list,
+ &ioc->hpr_free_list);
+ } else if (smid <= ioc->hba_queue_depth) {
+ /* internal queue */
+ i = smid - ioc->internal_smid;
+ ioc->internal_lookup[i].cb_idx = 0xFF;
+ list_add_tail(&ioc->internal_lookup[i].tracker_list,
+ &ioc->internal_free_list);
}
- ioc->scsi_lookup[i].cb_idx = 0xFF;
- ioc->scsi_lookup[i].scmd = NULL;
- list_add_tail(&ioc->scsi_lookup[i].tracker_list,
- &ioc->free_list);
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
-
- /*
- * See _wait_for_commands_to_complete() call with regards to this code.
- */
- if (ioc->shost_recovery && ioc->pending_io_count) {
- if (ioc->pending_io_count == 1)
- wake_up(&ioc->reset_wq);
- ioc->pending_io_count--;
- }
}
/**
@@ -1725,6 +1723,31 @@ _base_display_dell_branding(struct MPT2SAS_ADAPTER *ioc)
}
/**
+ * _base_display_intel_branding - Display branding string
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
+{
+ if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_INTEL &&
+ ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008) {
+
+ switch (ioc->pdev->subsystem_device) {
+ case MPT2SAS_INTEL_RMS2LL080_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS2LL080_BRANDING);
+ break;
+ case MPT2SAS_INTEL_RMS2LL040_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS2LL040_BRANDING);
+ break;
+ }
+ }
+}
+
+/**
* _base_display_ioc_capabilities - Disply IOC's capabilities.
* @ioc: per adapter object
*
@@ -1754,6 +1777,7 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
ioc->bios_pg3.BiosVersion & 0x000000FF);
_base_display_dell_branding(ioc);
+ _base_display_intel_branding(ioc);
printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name);
@@ -2252,9 +2276,9 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
ioc->name, (unsigned long long) ioc->request_dma));
total_sz += sz;
- sz = ioc->scsiio_depth * sizeof(struct request_tracker);
+ sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker);
ioc->scsi_lookup_pages = get_order(sz);
- ioc->scsi_lookup = (struct request_tracker *)__get_free_pages(
+ ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages(
GFP_KERNEL, ioc->scsi_lookup_pages);
if (!ioc->scsi_lookup) {
printk(MPT2SAS_ERR_FMT "scsi_lookup: get_free_pages failed, "
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 283568c6fb04..a3f8aa9baea4 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,8 +69,8 @@
#define MPT2SAS_DRIVER_NAME "mpt2sas"
#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION "07.100.00.00"
-#define MPT2SAS_MAJOR_VERSION 07
+#define MPT2SAS_DRIVER_VERSION "08.100.00.00"
+#define MPT2SAS_MAJOR_VERSION 08
#define MPT2SAS_MINOR_VERSION 100
#define MPT2SAS_BUILD_VERSION 00
#define MPT2SAS_RELEASE_VERSION 00
@@ -101,7 +101,8 @@
#define MPT_NAME_LENGTH 32 /* generic length of strings */
#define MPT_STRING_LENGTH 64
-#define MPT_MAX_CALLBACKS 16
+#define MPT_MAX_CALLBACKS 16
+
#define CAN_SLEEP 1
#define NO_SLEEP 0
@@ -154,6 +155,20 @@
#define MPT2SAS_DELL_6GBPS_SAS_SSDID 0x1F22
/*
+ * Intel HBA branding
+ */
+#define MPT2SAS_INTEL_RMS2LL080_BRANDING \
+ "Intel Integrated RAID Module RMS2LL080"
+#define MPT2SAS_INTEL_RMS2LL040_BRANDING \
+ "Intel Integrated RAID Module RMS2LL040"
+
+/*
+ * Intel HBA SSDIDs
+ */
+#define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E
+#define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F
+
+/*
* per target private data
*/
#define MPT_TARGET_FLAGS_RAID_COMPONENT 0x01
@@ -431,14 +446,14 @@ struct chain_tracker {
};
/**
- * struct request_tracker - firmware request tracker
+ * struct scsiio_tracker - scsi mf request tracker
* @smid: system message id
* @scmd: scsi request pointer
* @cb_idx: callback index
* @chain_list: list of chains associated to this IO
* @tracker_list: list of free request (ioc->free_list)
*/
-struct request_tracker {
+struct scsiio_tracker {
u16 smid;
struct scsi_cmnd *scmd;
u8 cb_idx;
@@ -447,6 +462,19 @@ struct request_tracker {
};
/**
+ * struct request_tracker - misc mf request tracker
+ * @smid: system message id
+ * @scmd: scsi request pointer
+ * @cb_idx: callback index
+ * @tracker_list: list of free request (ioc->free_list)
+ */
+struct request_tracker {
+ u16 smid;
+ u8 cb_idx;
+ struct list_head tracker_list;
+};
+
+/**
* struct _tr_list - target reset list
* @handle: device handle
* @state: state machine
@@ -709,7 +737,7 @@ struct MPT2SAS_ADAPTER {
u8 *request;
dma_addr_t request_dma;
u32 request_dma_sz;
- struct request_tracker *scsi_lookup;
+ struct scsiio_tracker *scsi_lookup;
ulong scsi_lookup_pages;
spinlock_t scsi_lookup_lock;
struct list_head free_list;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 5ded3db6e316..6ceb7759bfe5 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -6975,7 +6975,6 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state)
u32 device_state;
mpt2sas_base_stop_watchdog(ioc);
- flush_scheduled_work();
scsi_block_requests(shost);
device_state = pci_choose_state(pdev, state);
printk(MPT2SAS_INFO_FMT "pdev=0x%p, slot=%s, entering "
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index b37c8a3c1bb0..86afb13f1e79 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -1005,11 +1005,23 @@ int osd_req_read_sg(struct osd_request *or,
const struct osd_sg_entry *sglist, unsigned numentries)
{
u64 len;
- int ret = _add_sg_continuation_descriptor(or, sglist, numentries, &len);
+ u64 off;
+ int ret;
- if (ret)
- return ret;
- osd_req_read(or, obj, 0, bio, len);
+ if (numentries > 1) {
+ off = 0;
+ ret = _add_sg_continuation_descriptor(or, sglist, numentries,
+ &len);
+ if (ret)
+ return ret;
+ } else {
+ /* Optimize the case of single segment, read_sg is a
+ * bidi operation.
+ */
+ len = sglist->len;
+ off = sglist->offset;
+ }
+ osd_req_read(or, obj, off, bio, len);
return 0;
}
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 54de1d1af1a7..521e2182d45b 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -1484,7 +1484,7 @@ static int osst_read_back_buffer_and_rewrite(struct osst_tape * STp, struct osst
int dbg = debugging;
#endif
- if ((buffer = (unsigned char *)vmalloc((nframes + 1) * OS_DATA_SIZE)) == NULL)
+ if ((buffer = vmalloc((nframes + 1) * OS_DATA_SIZE)) == NULL)
return (-EIO);
printk(KERN_INFO "%s:I: Reading back %d frames from drive buffer%s\n",
@@ -2296,7 +2296,7 @@ static int osst_write_header(struct osst_tape * STp, struct osst_request ** aSRp
if (STp->raw) return 0;
if (STp->header_cache == NULL) {
- if ((STp->header_cache = (os_header_t *)vmalloc(sizeof(os_header_t))) == NULL) {
+ if ((STp->header_cache = vmalloc(sizeof(os_header_t))) == NULL) {
printk(KERN_ERR "%s:E: Failed to allocate header cache\n", name);
return (-ENOMEM);
}
@@ -2484,7 +2484,7 @@ static int __osst_analyze_headers(struct osst_tape * STp, struct osst_request **
name, ppos, update_frame_cntr);
#endif
if (STp->header_cache == NULL) {
- if ((STp->header_cache = (os_header_t *)vmalloc(sizeof(os_header_t))) == NULL) {
+ if ((STp->header_cache = vmalloc(sizeof(os_header_t))) == NULL) {
printk(KERN_ERR "%s:E: Failed to allocate header cache\n", name);
return 0;
}
@@ -5851,9 +5851,7 @@ static int osst_probe(struct device *dev)
/* if this is the first attach, build the infrastructure */
write_lock(&os_scsi_tapes_lock);
if (os_scsi_tapes == NULL) {
- os_scsi_tapes =
- (struct osst_tape **)kmalloc(osst_max_dev * sizeof(struct osst_tape *),
- GFP_ATOMIC);
+ os_scsi_tapes = kmalloc(osst_max_dev * sizeof(struct osst_tape *), GFP_ATOMIC);
if (os_scsi_tapes == NULL) {
write_unlock(&os_scsi_tapes_lock);
printk(KERN_ERR "osst :E: Unable to allocate array for OnStream SCSI tapes.\n");
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index d8db0137c0c7..18b6c55cd08c 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -1382,53 +1382,50 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
return MPI_IO_STATUS_BUSY;
}
-static void pm8001_work_queue(struct work_struct *work)
+static void pm8001_work_fn(struct work_struct *work)
{
- struct delayed_work *dw = container_of(work, struct delayed_work, work);
- struct pm8001_wq *wq = container_of(dw, struct pm8001_wq, work_q);
+ struct pm8001_work *pw = container_of(work, struct pm8001_work, work);
struct pm8001_device *pm8001_dev;
- struct domain_device *dev;
+ struct domain_device *dev;
- switch (wq->handler) {
+ switch (pw->handler) {
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
- pm8001_dev = wq->data;
+ pm8001_dev = pw->data;
dev = pm8001_dev->sas_device;
pm8001_I_T_nexus_reset(dev);
break;
case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
- pm8001_dev = wq->data;
+ pm8001_dev = pw->data;
dev = pm8001_dev->sas_device;
pm8001_I_T_nexus_reset(dev);
break;
case IO_DS_IN_ERROR:
- pm8001_dev = wq->data;
+ pm8001_dev = pw->data;
dev = pm8001_dev->sas_device;
pm8001_I_T_nexus_reset(dev);
break;
case IO_DS_NON_OPERATIONAL:
- pm8001_dev = wq->data;
+ pm8001_dev = pw->data;
dev = pm8001_dev->sas_device;
pm8001_I_T_nexus_reset(dev);
break;
}
- list_del(&wq->entry);
- kfree(wq);
+ kfree(pw);
}
static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data,
int handler)
{
- struct pm8001_wq *wq;
+ struct pm8001_work *pw;
int ret = 0;
- wq = kmalloc(sizeof(struct pm8001_wq), GFP_ATOMIC);
- if (wq) {
- wq->pm8001_ha = pm8001_ha;
- wq->data = data;
- wq->handler = handler;
- INIT_DELAYED_WORK(&wq->work_q, pm8001_work_queue);
- list_add_tail(&wq->entry, &pm8001_ha->wq_list);
- schedule_delayed_work(&wq->work_q, 0);
+ pw = kmalloc(sizeof(struct pm8001_work), GFP_ATOMIC);
+ if (pw) {
+ pw->pm8001_ha = pm8001_ha;
+ pw->data = data;
+ pw->handler = handler;
+ INIT_WORK(&pw->work, pm8001_work_fn);
+ queue_work(pm8001_wq, &pw->work);
} else
ret = -ENOMEM;
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index b95285f3383f..002360da01e3 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -51,6 +51,8 @@ static int pm8001_id;
LIST_HEAD(hba_list);
+struct workqueue_struct *pm8001_wq;
+
/**
* The main structure which LLDD must register for scsi core.
*/
@@ -134,7 +136,6 @@ static void __devinit pm8001_phy_init(struct pm8001_hba_info *pm8001_ha,
static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
{
int i;
- struct pm8001_wq *wq;
if (!pm8001_ha)
return;
@@ -150,8 +151,7 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
PM8001_CHIP_DISP->chip_iounmap(pm8001_ha);
if (pm8001_ha->shost)
scsi_host_put(pm8001_ha->shost);
- list_for_each_entry(wq, &pm8001_ha->wq_list, entry)
- cancel_delayed_work(&wq->work_q);
+ flush_workqueue(pm8001_wq);
kfree(pm8001_ha->tags);
kfree(pm8001_ha);
}
@@ -381,7 +381,6 @@ pm8001_pci_alloc(struct pci_dev *pdev, u32 chip_id, struct Scsi_Host *shost)
pm8001_ha->sas = sha;
pm8001_ha->shost = shost;
pm8001_ha->id = pm8001_id++;
- INIT_LIST_HEAD(&pm8001_ha->wq_list);
pm8001_ha->logging_level = 0x01;
sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id);
#ifdef PM8001_USE_TASKLET
@@ -758,7 +757,7 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)
int i , pos;
u32 device_state;
pm8001_ha = sha->lldd_ha;
- flush_scheduled_work();
+ flush_workqueue(pm8001_wq);
scsi_block_requests(pm8001_ha->shost);
pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
if (pos == 0) {
@@ -870,17 +869,26 @@ static struct pci_driver pm8001_pci_driver = {
*/
static int __init pm8001_init(void)
{
- int rc;
+ int rc = -ENOMEM;
+
+ pm8001_wq = alloc_workqueue("pm8001", 0, 0);
+ if (!pm8001_wq)
+ goto err;
+
pm8001_id = 0;
pm8001_stt = sas_domain_attach_transport(&pm8001_transport_ops);
if (!pm8001_stt)
- return -ENOMEM;
+ goto err_wq;
rc = pci_register_driver(&pm8001_pci_driver);
if (rc)
- goto err_out;
+ goto err_tp;
return 0;
-err_out:
+
+err_tp:
sas_release_transport(pm8001_stt);
+err_wq:
+ destroy_workqueue(pm8001_wq);
+err:
return rc;
}
@@ -888,6 +896,7 @@ static void __exit pm8001_exit(void)
{
pci_unregister_driver(&pm8001_pci_driver);
sas_release_transport(pm8001_stt);
+ destroy_workqueue(pm8001_wq);
}
module_init(pm8001_init);
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 7f064f9ca828..bdb6b27dedd6 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -50,6 +50,7 @@
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
+#include <linux/workqueue.h>
#include <scsi/libsas.h>
#include <scsi/scsi_tcq.h>
#include <scsi/sas_ata.h>
@@ -379,18 +380,16 @@ struct pm8001_hba_info {
#ifdef PM8001_USE_TASKLET
struct tasklet_struct tasklet;
#endif
- struct list_head wq_list;
u32 logging_level;
u32 fw_status;
const struct firmware *fw_image;
};
-struct pm8001_wq {
- struct delayed_work work_q;
+struct pm8001_work {
+ struct work_struct work;
struct pm8001_hba_info *pm8001_ha;
void *data;
int handler;
- struct list_head entry;
};
struct pm8001_fw_image_header {
@@ -460,6 +459,9 @@ struct fw_control_ex {
void *param3;
};
+/* pm8001 workqueue */
+extern struct workqueue_struct *pm8001_wq;
+
/******************** function prototype *********************/
int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out);
void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha);
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 321cf3ae8630..bcf858e88c64 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -5454,7 +5454,7 @@ static void __devexit pmcraid_remove(struct pci_dev *pdev)
pmcraid_shutdown(pdev);
pmcraid_disable_interrupts(pinstance, ~0);
- flush_scheduled_work();
+ flush_work_sync(&pinstance->worker_q);
pmcraid_kill_tasklets(pinstance);
pmcraid_unregister_interrupt_handler(pinstance);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index ccfc8e78be21..6c51c0a35b9e 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2402,13 +2402,13 @@ struct qla_hw_data {
volatile struct {
uint32_t mbox_int :1;
uint32_t mbox_busy :1;
-
uint32_t disable_risc_code_load :1;
uint32_t enable_64bit_addressing :1;
uint32_t enable_lip_reset :1;
uint32_t enable_target_reset :1;
uint32_t enable_lip_full_login :1;
uint32_t enable_led_scheme :1;
+
uint32_t msi_enabled :1;
uint32_t msix_enabled :1;
uint32_t disable_serdes :1;
@@ -2417,6 +2417,7 @@ struct qla_hw_data {
uint32_t pci_channel_io_perm_failure :1;
uint32_t fce_enabled :1;
uint32_t fac_supported :1;
+
uint32_t chip_reset_done :1;
uint32_t port0 :1;
uint32_t running_gold_fw :1;
@@ -2424,9 +2425,11 @@ struct qla_hw_data {
uint32_t cpu_affinity_enabled :1;
uint32_t disable_msix_handshake :1;
uint32_t fcp_prio_enabled :1;
- uint32_t fw_hung :1;
- uint32_t quiesce_owner:1;
+ uint32_t isp82xx_fw_hung:1;
+
+ uint32_t quiesce_owner:1;
uint32_t thermal_supported:1;
+ uint32_t isp82xx_reset_hdlr_active:1;
/* 26 bits */
} flags;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 89e900adb679..d48326ee3f61 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -565,6 +565,7 @@ extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *);
extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *);
extern void qla82xx_start_iocbs(srb_t *);
extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *);
+extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);
/* BSG related functions */
extern int qla24xx_bsg_request(struct fc_bsg_job *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 4c083928c2fb..74a91b6dfc68 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -121,8 +121,11 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
rval = QLA_FUNCTION_FAILED;
if (ms_pkt->entry_status != 0) {
- DEBUG2_3(printk("scsi(%ld): %s failed, error status (%x).\n",
- vha->host_no, routine, ms_pkt->entry_status));
+ DEBUG2_3(printk(KERN_WARNING "scsi(%ld): %s failed, error status "
+ "(%x) on port_id: %02x%02x%02x.\n",
+ vha->host_no, routine, ms_pkt->entry_status,
+ vha->d_id.b.domain, vha->d_id.b.area,
+ vha->d_id.b.al_pa));
} else {
if (IS_FWI2_CAPABLE(ha))
comp_status = le16_to_cpu(
@@ -136,8 +139,10 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
if (ct_rsp->header.response !=
__constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) {
DEBUG2_3(printk("scsi(%ld): %s failed, "
- "rejected request:\n", vha->host_no,
- routine));
+ "rejected request on port_id: %02x%02x%02x\n",
+ vha->host_no, routine,
+ vha->d_id.b.domain, vha->d_id.b.area,
+ vha->d_id.b.al_pa));
DEBUG2_3(qla2x00_dump_buffer(
(uint8_t *)&ct_rsp->header,
sizeof(struct ct_rsp_hdr)));
@@ -147,8 +152,10 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
break;
default:
DEBUG2_3(printk("scsi(%ld): %s failed, completion "
- "status (%x).\n", vha->host_no, routine,
- comp_status));
+ "status (%x) on port_id: %02x%02x%02x.\n",
+ vha->host_no, routine, comp_status,
+ vha->d_id.b.domain, vha->d_id.b.area,
+ vha->d_id.b.al_pa));
break;
}
}
@@ -1965,7 +1972,7 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
"scsi(%ld): GFF_ID issue IOCB failed "
"(%d).\n", vha->host_no, rval));
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
- "GPN_ID") != QLA_SUCCESS) {
+ "GFF_ID") != QLA_SUCCESS) {
DEBUG2_3(printk(KERN_INFO
"scsi(%ld): GFF_ID IOCB status had a "
"failure status code\n", vha->host_no));
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index d9479c3fe5f8..8575808dbae0 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1967,7 +1967,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
} else {
/* Mailbox cmd failed. Timeout on min_wait. */
if (time_after_eq(jiffies, mtime) ||
- (IS_QLA82XX(ha) && ha->flags.fw_hung))
+ ha->flags.isp82xx_fw_hung)
break;
}
@@ -3945,8 +3945,13 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *vp;
unsigned long flags;
+ fc_port_t *fcport;
- vha->flags.online = 0;
+ /* For ISP82XX, driver waits for completion of the commands.
+ * online flag should be set.
+ */
+ if (!IS_QLA82XX(ha))
+ vha->flags.online = 0;
ha->flags.chip_reset_done = 0;
clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
ha->qla_stats.total_isp_aborts++;
@@ -3954,7 +3959,10 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
qla_printk(KERN_INFO, ha,
"Performing ISP error recovery - ha= %p.\n", ha);
- /* Chip reset does not apply to 82XX */
+ /* For ISP82XX, reset_chip is just disabling interrupts.
+ * Driver waits for the completion of the commands.
+ * the interrupts need to be enabled.
+ */
if (!IS_QLA82XX(ha))
ha->isp_ops->reset_chip(vha);
@@ -3980,14 +3988,31 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
LOOP_DOWN_TIME);
}
+ /* Clear all async request states across all VPs. */
+ list_for_each_entry(fcport, &vha->vp_fcports, list)
+ fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ atomic_inc(&vp->vref_count);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ list_for_each_entry(fcport, &vp->vp_fcports, list)
+ fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ atomic_dec(&vp->vref_count);
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
if (!ha->flags.eeh_busy) {
/* Make sure for ISP 82XX IO DMA is complete */
if (IS_QLA82XX(ha)) {
- if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
- WAIT_HOST) == QLA_SUCCESS) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "Done wait for pending commands\n"));
- }
+ qla82xx_chip_reset_cleanup(vha);
+
+ /* Done waiting for pending commands.
+ * Reset the online flag.
+ */
+ vha->flags.online = 0;
}
/* Requeue all commands in outstanding command list. */
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 4c1ba6263eb3..d78d5896fc33 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -328,6 +328,7 @@ qla2x00_start_scsi(srb_t *sp)
struct qla_hw_data *ha;
struct req_que *req;
struct rsp_que *rsp;
+ char tag[2];
/* Setup device pointers. */
ret = 0;
@@ -406,7 +407,22 @@ qla2x00_start_scsi(srb_t *sp)
cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
/* Update tagged queuing modifier */
- cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
+ if (scsi_populate_tag_msg(cmd, tag)) {
+ switch (tag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(CF_HEAD_TAG);
+ break;
+ case ORDERED_QUEUE_TAG:
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(CF_ORDERED_TAG);
+ break;
+ default:
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(CF_SIMPLE_TAG);
+ break;
+ }
+ }
/* Load SCSI command packet. */
memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
@@ -971,6 +987,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
uint16_t fcp_cmnd_len;
struct fcp_cmnd *fcp_cmnd;
dma_addr_t crc_ctx_dma;
+ char tag[2];
cmd = sp->cmd;
@@ -1068,9 +1085,27 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
- fcp_cmnd->task_attribute = 0;
fcp_cmnd->task_management = 0;
+ /*
+ * Update tagged queuing modifier if using command tag queuing
+ */
+ if (scsi_populate_tag_msg(cmd, tag)) {
+ switch (tag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
+ break;
+ case ORDERED_QUEUE_TAG:
+ fcp_cmnd->task_attribute = TSK_ORDERED;
+ break;
+ default:
+ fcp_cmnd->task_attribute = 0;
+ break;
+ }
+ } else {
+ fcp_cmnd->task_attribute = 0;
+ }
+
cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
DEBUG18(printk(KERN_INFO "%s(%ld): Total SG(s) Entries %d, Data"
@@ -1177,6 +1212,7 @@ qla24xx_start_scsi(srb_t *sp)
struct scsi_cmnd *cmd = sp->cmd;
struct scsi_qla_host *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
+ char tag[2];
/* Setup device pointers. */
ret = 0;
@@ -1260,6 +1296,18 @@ qla24xx_start_scsi(srb_t *sp)
int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+ /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
+ if (scsi_populate_tag_msg(cmd, tag)) {
+ switch (tag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ cmd_pkt->task = TSK_HEAD_OF_QUEUE;
+ break;
+ case ORDERED_QUEUE_TAG:
+ cmd_pkt->task = TSK_ORDERED;
+ break;
+ }
+ }
+
/* Load SCSI command packet. */
memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index e473e9fb363c..7a7c0ecfe7dd 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -71,6 +71,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
return QLA_FUNCTION_TIMEOUT;
}
+ if (ha->flags.isp82xx_fw_hung) {
+ /* Setting Link-Down error */
+ mcp->mb[0] = MBS_LINK_DOWN_ERROR;
+ rval = QLA_FUNCTION_FAILED;
+ goto premature_exit;
+ }
+
/*
* Wait for active mailbox commands to finish by waiting at most tov
* seconds. This is to serialize actual issuing of mailbox cmds during
@@ -83,13 +90,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
return QLA_FUNCTION_TIMEOUT;
}
- if (IS_QLA82XX(ha) && ha->flags.fw_hung) {
- /* Setting Link-Down error */
- mcp->mb[0] = MBS_LINK_DOWN_ERROR;
- rval = QLA_FUNCTION_FAILED;
- goto premature_exit;
- }
-
ha->flags.mbox_busy = 1;
/* Save mailbox command for debug */
ha->mcp = mcp;
@@ -223,7 +223,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ha->flags.mbox_int = 0;
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- if (IS_QLA82XX(ha) && ha->flags.fw_hung) {
+ if (ha->flags.isp82xx_fw_hung) {
ha->flags.mbox_busy = 0;
/* Setting Link-Down error */
mcp->mb[0] = MBS_LINK_DOWN_ERROR;
@@ -2462,22 +2462,19 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
"-- completion status (%x).\n", __func__,
vha->host_no, le16_to_cpu(sts->comp_status)));
rval = QLA_FUNCTION_FAILED;
- } else if (!(le16_to_cpu(sts->scsi_status) &
- SS_RESPONSE_INFO_LEN_VALID)) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- no response info (%x).\n", __func__, vha->host_no,
- le16_to_cpu(sts->scsi_status)));
- rval = QLA_FUNCTION_FAILED;
- } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- not enough response info (%d).\n", __func__,
- vha->host_no, le32_to_cpu(sts->rsp_data_len)));
- rval = QLA_FUNCTION_FAILED;
- } else if (sts->data[3]) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- response (%x).\n", __func__,
- vha->host_no, sts->data[3]));
- rval = QLA_FUNCTION_FAILED;
+ } else if (le16_to_cpu(sts->scsi_status) &
+ SS_RESPONSE_INFO_LEN_VALID) {
+ if (le32_to_cpu(sts->rsp_data_len) < 4) {
+ DEBUG2_3_11(printk("%s(%ld): ignoring inconsistent "
+ "data length -- not enough response info (%d).\n",
+ __func__, vha->host_no,
+ le32_to_cpu(sts->rsp_data_len)));
+ } else if (sts->data[3]) {
+ DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
+ "-- response (%x).\n", __func__,
+ vha->host_no, sts->data[3]));
+ rval = QLA_FUNCTION_FAILED;
+ }
}
/* Issue marker IOCB. */
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index fdb96a3584a5..76ec876e6b21 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -7,6 +7,7 @@
#include "qla_def.h"
#include <linux/delay.h>
#include <linux/pci.h>
+#include <scsi/scsi_tcq.h>
#define MASK(n) ((1ULL<<(n))-1)
#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | \
@@ -2547,7 +2548,7 @@ qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
*dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
*dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
- *dsd_seg++ = dsd_list_len;
+ cmd_pkt->fcp_data_dseg_len = dsd_list_len;
} else {
*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
@@ -2620,6 +2621,7 @@ qla82xx_start_scsi(srb_t *sp)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
+ char tag[2];
/* Setup device pointers. */
ret = 0;
@@ -2770,6 +2772,22 @@ sufficient_dsds:
int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+ /*
+ * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
+ */
+ if (scsi_populate_tag_msg(cmd, tag)) {
+ switch (tag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ ctx->fcp_cmnd->task_attribute =
+ TSK_HEAD_OF_QUEUE;
+ break;
+ case ORDERED_QUEUE_TAG:
+ ctx->fcp_cmnd->task_attribute =
+ TSK_ORDERED;
+ break;
+ }
+ }
+
/* build FCP_CMND IU */
memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
@@ -2835,6 +2853,20 @@ sufficient_dsds:
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
sizeof(cmd_pkt->lun));
+ /*
+ * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
+ */
+ if (scsi_populate_tag_msg(cmd, tag)) {
+ switch (tag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ cmd_pkt->task = TSK_HEAD_OF_QUEUE;
+ break;
+ case ORDERED_QUEUE_TAG:
+ cmd_pkt->task = TSK_ORDERED;
+ break;
+ }
+ }
+
/* Load SCSI command packet. */
memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
@@ -3457,46 +3489,28 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
}
}
-static void
+int
qla82xx_check_fw_alive(scsi_qla_host_t *vha)
{
- uint32_t fw_heartbeat_counter, halt_status;
- struct qla_hw_data *ha = vha->hw;
+ uint32_t fw_heartbeat_counter;
+ int status = 0;
- fw_heartbeat_counter = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
+ fw_heartbeat_counter = qla82xx_rd_32(vha->hw,
+ QLA82XX_PEG_ALIVE_COUNTER);
/* all 0xff, assume AER/EEH in progress, ignore */
if (fw_heartbeat_counter == 0xffffffff)
- return;
+ return status;
if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
vha->seconds_since_last_heartbeat++;
/* FW not alive after 2 seconds */
if (vha->seconds_since_last_heartbeat == 2) {
vha->seconds_since_last_heartbeat = 0;
- halt_status = qla82xx_rd_32(ha,
- QLA82XX_PEG_HALT_STATUS1);
- if (halt_status & HALT_STATUS_UNRECOVERABLE) {
- set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
- } else {
- qla_printk(KERN_INFO, ha,
- "scsi(%ld): %s - detect abort needed\n",
- vha->host_no, __func__);
- set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- }
- qla2xxx_wake_dpc(vha);
- ha->flags.fw_hung = 1;
- if (ha->flags.mbox_busy) {
- ha->flags.mbox_int = 1;
- DEBUG2(qla_printk(KERN_ERR, ha,
- "Due to fw hung, doing premature "
- "completion of mbx command\n"));
- if (test_bit(MBX_INTR_WAIT,
- &ha->mbx_cmd_flags))
- complete(&ha->mbx_intr_comp);
- }
+ status = 1;
}
} else
vha->seconds_since_last_heartbeat = 0;
vha->fw_heartbeat_counter = fw_heartbeat_counter;
+ return status;
}
/*
@@ -3557,6 +3571,8 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
break;
case QLA82XX_DEV_NEED_RESET:
qla82xx_need_reset_handler(vha);
+ dev_init_timeout = jiffies +
+ (ha->nx_dev_init_timeout * HZ);
break;
case QLA82XX_DEV_NEED_QUIESCENT:
qla82xx_need_qsnt_handler(vha);
@@ -3596,30 +3612,18 @@ exit:
void qla82xx_watchdog(scsi_qla_host_t *vha)
{
- uint32_t dev_state;
+ uint32_t dev_state, halt_status;
struct qla_hw_data *ha = vha->hw;
- dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
-
/* don't poll if reset is going on */
- if (!(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
- test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
- test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))) {
- if (dev_state == QLA82XX_DEV_NEED_RESET) {
+ if (!ha->flags.isp82xx_reset_hdlr_active) {
+ dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ if (dev_state == QLA82XX_DEV_NEED_RESET &&
+ !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
qla_printk(KERN_WARNING, ha,
- "%s(): Adapter reset needed!\n", __func__);
+ "%s(): Adapter reset needed!\n", __func__);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
- ha->flags.fw_hung = 1;
- if (ha->flags.mbox_busy) {
- ha->flags.mbox_int = 1;
- DEBUG2(qla_printk(KERN_ERR, ha,
- "Need reset, doing premature "
- "completion of mbx command\n"));
- if (test_bit(MBX_INTR_WAIT,
- &ha->mbx_cmd_flags))
- complete(&ha->mbx_intr_comp);
- }
} else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
!test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
DEBUG(qla_printk(KERN_INFO, ha,
@@ -3629,6 +3633,31 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
qla2xxx_wake_dpc(vha);
} else {
qla82xx_check_fw_alive(vha);
+ if (qla82xx_check_fw_alive(vha)) {
+ halt_status = qla82xx_rd_32(ha,
+ QLA82XX_PEG_HALT_STATUS1);
+ if (halt_status & HALT_STATUS_UNRECOVERABLE) {
+ set_bit(ISP_UNRECOVERABLE,
+ &vha->dpc_flags);
+ } else {
+ qla_printk(KERN_INFO, ha,
+ "scsi(%ld): %s - detect abort needed\n",
+ vha->host_no, __func__);
+ set_bit(ISP_ABORT_NEEDED,
+ &vha->dpc_flags);
+ }
+ qla2xxx_wake_dpc(vha);
+ ha->flags.isp82xx_fw_hung = 1;
+ if (ha->flags.mbox_busy) {
+ ha->flags.mbox_int = 1;
+ DEBUG2(qla_printk(KERN_ERR, ha,
+ "Due to fw hung, doing premature "
+ "completion of mbx command\n"));
+ if (test_bit(MBX_INTR_WAIT,
+ &ha->mbx_cmd_flags))
+ complete(&ha->mbx_intr_comp);
+ }
+ }
}
}
}
@@ -3663,6 +3692,7 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
"Exiting.\n", __func__, vha->host_no);
return QLA_SUCCESS;
}
+ ha->flags.isp82xx_reset_hdlr_active = 1;
qla82xx_idc_lock(ha);
dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
@@ -3683,7 +3713,8 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
qla82xx_idc_unlock(ha);
if (rval == QLA_SUCCESS) {
- ha->flags.fw_hung = 0;
+ ha->flags.isp82xx_fw_hung = 0;
+ ha->flags.isp82xx_reset_hdlr_active = 0;
qla82xx_restart_isp(vha);
}
@@ -3791,3 +3822,71 @@ int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
return status;
}
+
+void
+qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
+{
+ int i;
+ unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Check if 82XX firmware is alive or not
+ * We may have arrived here from NEED_RESET
+ * detection only
+ */
+ if (!ha->flags.isp82xx_fw_hung) {
+ for (i = 0; i < 2; i++) {
+ msleep(1000);
+ if (qla82xx_check_fw_alive(vha)) {
+ ha->flags.isp82xx_fw_hung = 1;
+ if (ha->flags.mbox_busy) {
+ ha->flags.mbox_int = 1;
+ complete(&ha->mbx_intr_comp);
+ }
+ break;
+ }
+ }
+ }
+
+ /* Abort all commands gracefully if fw NOT hung */
+ if (!ha->flags.isp82xx_fw_hung) {
+ int cnt, que;
+ srb_t *sp;
+ struct req_que *req;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ for (que = 0; que < ha->max_req_queues; que++) {
+ req = ha->req_q_map[que];
+ if (!req)
+ continue;
+ for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ sp = req->outstanding_cmds[cnt];
+ if (sp) {
+ if (!sp->ctx ||
+ (sp->flags & SRB_FCP_CMND_DMA_VALID)) {
+ spin_unlock_irqrestore(
+ &ha->hardware_lock, flags);
+ if (ha->isp_ops->abort_command(sp)) {
+ qla_printk(KERN_INFO, ha,
+ "scsi(%ld): mbx abort command failed in %s\n",
+ vha->host_no, __func__);
+ } else {
+ qla_printk(KERN_INFO, ha,
+ "scsi(%ld): mbx abort command success in %s\n",
+ vha->host_no, __func__);
+ }
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ }
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ /* Wait for pending cmds (physical and virtual) to complete */
+ if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
+ WAIT_HOST) == QLA_SUCCESS) {
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "Done wait for pending commands\n"));
+ }
+ }
+}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index f27724d76cf6..75a966c94860 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -349,7 +349,7 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
"Can't create request queue\n");
goto fail;
}
- ha->wq = create_workqueue("qla2xxx_wq");
+ ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
vha->req = ha->req_q_map[req];
options |= BIT_1;
for (ques = 1; ques < ha->max_rsp_queues; ques++) {
@@ -506,7 +506,7 @@ qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str)
static inline srb_t *
qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
- struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+ struct scsi_cmnd *cmd)
{
srb_t *sp;
struct qla_hw_data *ha = vha->hw;
@@ -520,14 +520,13 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
sp->cmd = cmd;
sp->flags = 0;
CMD_SP(cmd) = (void *)sp;
- cmd->scsi_done = done;
sp->ctx = NULL;
return sp;
}
static int
-qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
@@ -537,7 +536,6 @@ qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)
srb_t *sp;
int rval;
- spin_unlock_irq(vha->host->host_lock);
if (ha->flags.eeh_busy) {
if (ha->flags.pci_channel_io_perm_failure)
cmd->result = DID_NO_CONNECT << 16;
@@ -569,40 +567,32 @@ qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)
goto qc24_target_busy;
}
- sp = qla2x00_get_new_sp(base_vha, fcport, cmd, done);
+ sp = qla2x00_get_new_sp(base_vha, fcport, cmd);
if (!sp)
- goto qc24_host_busy_lock;
+ goto qc24_host_busy;
rval = ha->isp_ops->start_scsi(sp);
if (rval != QLA_SUCCESS)
goto qc24_host_busy_free_sp;
- spin_lock_irq(vha->host->host_lock);
-
return 0;
qc24_host_busy_free_sp:
qla2x00_sp_free_dma(sp);
mempool_free(sp, ha->srb_mempool);
-qc24_host_busy_lock:
- spin_lock_irq(vha->host->host_lock);
+qc24_host_busy:
return SCSI_MLQUEUE_HOST_BUSY;
qc24_target_busy:
- spin_lock_irq(vha->host->host_lock);
return SCSI_MLQUEUE_TARGET_BUSY;
qc24_fail_command:
- spin_lock_irq(vha->host->host_lock);
- done(cmd);
+ cmd->scsi_done(cmd);
return 0;
}
-static DEF_SCSI_QCMD(qla2xxx_queuecommand)
-
-
/*
* qla2x00_eh_wait_on_command
* Waits for the command to be returned by the Firmware for some
@@ -821,17 +811,20 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
{
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
srb_t *sp;
- int ret = SUCCESS;
+ int ret;
unsigned int id, lun;
unsigned long flags;
int wait = 0;
struct qla_hw_data *ha = vha->hw;
- fc_block_scsi_eh(cmd);
-
if (!CMD_SP(cmd))
return SUCCESS;
+ ret = fc_block_scsi_eh(cmd);
+ if (ret != 0)
+ return ret;
+ ret = SUCCESS;
+
id = cmd->device->id;
lun = cmd->device->lun;
@@ -940,11 +933,13 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
int err;
- fc_block_scsi_eh(cmd);
-
if (!fcport)
return FAILED;
+ err = fc_block_scsi_eh(cmd);
+ if (err != 0)
+ return err;
+
qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET ISSUED.\n",
vha->host_no, cmd->device->id, cmd->device->lun, name);
@@ -1018,14 +1013,17 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
int ret = FAILED;
unsigned int id, lun;
- fc_block_scsi_eh(cmd);
-
id = cmd->device->id;
lun = cmd->device->lun;
if (!fcport)
return ret;
+ ret = fc_block_scsi_eh(cmd);
+ if (ret != 0)
+ return ret;
+ ret = FAILED;
+
qla_printk(KERN_INFO, vha->hw,
"scsi(%ld:%d:%d): BUS RESET ISSUED.\n", vha->host_no, id, lun);
@@ -1078,14 +1076,17 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
unsigned int id, lun;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
- fc_block_scsi_eh(cmd);
-
id = cmd->device->id;
lun = cmd->device->lun;
if (!fcport)
return ret;
+ ret = fc_block_scsi_eh(cmd);
+ if (ret != 0)
+ return ret;
+ ret = FAILED;
+
qla_printk(KERN_INFO, ha,
"scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", vha->host_no, id, lun);
@@ -3805,7 +3806,7 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
ha->flags.eeh_busy = 1;
/* For ISP82XX complete any pending mailbox cmd */
if (IS_QLA82XX(ha)) {
- ha->flags.fw_hung = 1;
+ ha->flags.isp82xx_fw_hung = 1;
if (ha->flags.mbox_busy) {
ha->flags.mbox_int = 1;
DEBUG2(qla_printk(KERN_ERR, ha,
@@ -3945,7 +3946,7 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_READY);
qla82xx_idc_unlock(ha);
- ha->flags.fw_hung = 0;
+ ha->flags.isp82xx_fw_hung = 0;
rval = qla82xx_restart_isp(base_vha);
qla82xx_idc_lock(ha);
/* Clear driver state register */
@@ -3958,7 +3959,7 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
"This devfn is not reset owner = 0x%x\n", ha->pdev->devfn));
if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
QLA82XX_DEV_READY)) {
- ha->flags.fw_hung = 0;
+ ha->flags.isp82xx_fw_hung = 0;
rval = qla82xx_restart_isp(base_vha);
qla82xx_idc_lock(ha);
qla82xx_set_drv_active(base_vha);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index cf0075a2d0c2..3a260c3f055a 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.03.05-k0"
+#define QLA2XXX_VERSION "8.03.07.00"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 3
-#define QLA_DRIVER_PATCH_VER 5
+#define QLA_DRIVER_PATCH_VER 7
#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 6ffbe9727dff..03e028e6e809 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -1027,7 +1027,7 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
((ddb_entry->default_time2wait +
4) * HZ);
- DEBUG2(printk("scsi%ld: ddb [%d] initate"
+ DEBUG2(printk("scsi%ld: ddb [%d] initiate"
" RELOGIN after %d seconds\n",
ha->host_no,
ddb_entry->fw_ddb_index,
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 3fc1d256636f..967836ef5ab2 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -812,7 +812,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
);
start_dpc++;
DEBUG(printk("scsi%ld:%d:%d: ddb [%d] "
- "initate relogin after"
+ "initiate relogin after"
" %d seconds\n",
ha->host_no, ddb_entry->bus,
ddb_entry->target,
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 664c9572d0c9..e2d45c91b8e8 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -1292,15 +1292,19 @@ static struct scsi_host_template qpti_template = {
.use_clustering = ENABLE_CLUSTERING,
};
-static int __devinit qpti_sbus_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit qpti_sbus_probe(struct platform_device *op)
{
- struct scsi_host_template *tpnt = match->data;
+ struct scsi_host_template *tpnt;
struct device_node *dp = op->dev.of_node;
struct Scsi_Host *host;
struct qlogicpti *qpti;
static int nqptis;
const char *fcode;
+ if (!op->dev.of_match)
+ return -EINVAL;
+ tpnt = op->dev.of_match->data;
+
/* Sometimes Antares cards come up not completely
* setup, and we get a report of a zero IRQ.
*/
@@ -1457,7 +1461,7 @@ static const struct of_device_id qpti_match[] = {
};
MODULE_DEVICE_TABLE(of, qpti_match);
-static struct of_platform_driver qpti_sbus_driver = {
+static struct platform_driver qpti_sbus_driver = {
.driver = {
.name = "qpti",
.owner = THIS_MODULE,
@@ -1469,12 +1473,12 @@ static struct of_platform_driver qpti_sbus_driver = {
static int __init qpti_init(void)
{
- return of_register_platform_driver(&qpti_sbus_driver);
+ return platform_driver_register(&qpti_sbus_driver);
}
static void __exit qpti_exit(void)
{
- of_unregister_platform_driver(&qpti_sbus_driver);
+ platform_driver_unregister(&qpti_sbus_driver);
}
MODULE_DESCRIPTION("QlogicISP SBUS driver");
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 43fad4c09beb..82e9e5c0476e 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -382,6 +382,91 @@ int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
EXPORT_SYMBOL(scsi_dev_info_list_add_keyed);
/**
+ * scsi_dev_info_list_del_keyed - remove one dev_info list entry.
+ * @vendor: vendor string
+ * @model: model (product) string
+ * @key: specify list to use
+ *
+ * Description:
+ * Remove and destroy one dev_info entry for @vendor, @model
+ * in list specified by @key.
+ *
+ * Returns: 0 OK, -error on failure.
+ **/
+int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key)
+{
+ struct scsi_dev_info_list *devinfo, *found = NULL;
+ struct scsi_dev_info_list_table *devinfo_table =
+ scsi_devinfo_lookup_by_key(key);
+
+ if (IS_ERR(devinfo_table))
+ return PTR_ERR(devinfo_table);
+
+ list_for_each_entry(devinfo, &devinfo_table->scsi_dev_info_list,
+ dev_info_list) {
+ if (devinfo->compatible) {
+ /*
+ * Behave like the older version of get_device_flags.
+ */
+ size_t max;
+ /*
+ * XXX why skip leading spaces? If an odd INQUIRY
+ * value, that should have been part of the
+ * scsi_static_device_list[] entry, such as " FOO"
+ * rather than "FOO". Since this code is already
+ * here, and we don't know what device it is
+ * trying to work with, leave it as-is.
+ */
+ max = 8; /* max length of vendor */
+ while ((max > 0) && *vendor == ' ') {
+ max--;
+ vendor++;
+ }
+ /*
+ * XXX removing the following strlen() would be
+ * good, using it means that for a an entry not in
+ * the list, we scan every byte of every vendor
+ * listed in scsi_static_device_list[], and never match
+ * a single one (and still have to compare at
+ * least the first byte of each vendor).
+ */
+ if (memcmp(devinfo->vendor, vendor,
+ min(max, strlen(devinfo->vendor))))
+ continue;
+ /*
+ * Skip spaces again.
+ */
+ max = 16; /* max length of model */
+ while ((max > 0) && *model == ' ') {
+ max--;
+ model++;
+ }
+ if (memcmp(devinfo->model, model,
+ min(max, strlen(devinfo->model))))
+ continue;
+ found = devinfo;
+ } else {
+ if (!memcmp(devinfo->vendor, vendor,
+ sizeof(devinfo->vendor)) &&
+ !memcmp(devinfo->model, model,
+ sizeof(devinfo->model)))
+ found = devinfo;
+ }
+ if (found)
+ break;
+ }
+
+ if (found) {
+ list_del(&found->dev_info_list);
+ kfree(found);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(scsi_dev_info_list_del_keyed);
+
+/**
* scsi_dev_info_list_add_str - parse dev_list and add to the scsi_dev_info_list.
* @dev_list: string of device flags to add
*
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 45c75649b9e0..991de3c15cfc 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -223,7 +223,7 @@ static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
* @scmd: Cmd to have sense checked.
*
* Return value:
- * SUCCESS or FAILED or NEEDS_RETRY
+ * SUCCESS or FAILED or NEEDS_RETRY or TARGET_ERROR
*
* Notes:
* When a deferred error is detected the current command has
@@ -326,17 +326,19 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
*/
return SUCCESS;
- /* these three are not supported */
+ /* these are not supported */
case COPY_ABORTED:
case VOLUME_OVERFLOW:
case MISCOMPARE:
- return SUCCESS;
+ case BLANK_CHECK:
+ case DATA_PROTECT:
+ return TARGET_ERROR;
case MEDIUM_ERROR:
if (sshdr.asc == 0x11 || /* UNRECOVERED READ ERR */
sshdr.asc == 0x13 || /* AMNF DATA FIELD */
sshdr.asc == 0x14) { /* RECORD NOT FOUND */
- return SUCCESS;
+ return TARGET_ERROR;
}
return NEEDS_RETRY;
@@ -344,11 +346,9 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
if (scmd->device->retry_hwerror)
return ADD_TO_MLQUEUE;
else
- return SUCCESS;
+ return TARGET_ERROR;
case ILLEGAL_REQUEST:
- case BLANK_CHECK:
- case DATA_PROTECT:
default:
return SUCCESS;
}
@@ -787,6 +787,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
case SUCCESS:
case NEEDS_RETRY:
case FAILED:
+ case TARGET_ERROR:
break;
case ADD_TO_MLQUEUE:
rtn = NEEDS_RETRY;
@@ -1469,6 +1470,14 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
rtn = scsi_check_sense(scmd);
if (rtn == NEEDS_RETRY)
goto maybe_retry;
+ else if (rtn == TARGET_ERROR) {
+ /*
+ * Need to modify host byte to signal a
+ * permanent target failure
+ */
+ scmd->result |= (DID_TARGET_FAILURE << 16);
+ rtn = SUCCESS;
+ }
/* if rtn == FAILED, we have no sense information;
* returning FAILED will wake the error handler thread
* to collect the sense and redo the decide
@@ -1486,6 +1495,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
case RESERVATION_CONFLICT:
sdev_printk(KERN_INFO, scmd->device,
"reservation conflict\n");
+ scmd->result |= (DID_NEXUS_FAILURE << 16);
return SUCCESS; /* causes immediate i/o error */
default:
return FAILED;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9045c52abd25..8d4ef8efa3cd 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -667,6 +667,30 @@ void scsi_release_buffers(struct scsi_cmnd *cmd)
}
EXPORT_SYMBOL(scsi_release_buffers);
+static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
+{
+ int error = 0;
+
+ switch(host_byte(result)) {
+ case DID_TRANSPORT_FAILFAST:
+ error = -ENOLINK;
+ break;
+ case DID_TARGET_FAILURE:
+ cmd->result |= (DID_OK << 16);
+ error = -EREMOTEIO;
+ break;
+ case DID_NEXUS_FAILURE:
+ cmd->result |= (DID_OK << 16);
+ error = -EBADE;
+ break;
+ default:
+ error = -EIO;
+ break;
+ }
+
+ return error;
+}
+
/*
* Function: scsi_io_completion()
*
@@ -737,7 +761,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
req->sense_len = len;
}
if (!sense_deferred)
- error = -EIO;
+ error = __scsi_error_from_host_byte(cmd, result);
}
req->resid_len = scsi_get_resid(cmd);
@@ -796,7 +820,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
return;
- error = -EIO;
+ error = __scsi_error_from_host_byte(cmd, result);
if (host_byte(result) == DID_RESET) {
/* Third party bus reset or reset for error recovery
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index b4056d14f812..2a588955423a 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -45,6 +45,7 @@ static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
enum {
SCSI_DEVINFO_GLOBAL = 0,
SCSI_DEVINFO_SPI,
+ SCSI_DEVINFO_DH,
};
extern int scsi_get_device_flags(struct scsi_device *sdev,
@@ -56,6 +57,7 @@ extern int scsi_get_device_flags_keyed(struct scsi_device *sdev,
extern int scsi_dev_info_list_add_keyed(int compatible, char *vendor,
char *model, char *strflags,
int flags, int key);
+extern int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key);
extern int scsi_dev_info_add_list(int key, const char *name);
extern int scsi_dev_info_remove_list(int key);
@@ -146,7 +148,7 @@ static inline void scsi_netlink_exit(void) {}
#endif
/* scsi_pm.c */
-#ifdef CONFIG_PM_OPS
+#ifdef CONFIG_PM
extern const struct dev_pm_ops scsi_bus_pm_ops;
#endif
#ifdef CONFIG_PM_RUNTIME
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 490ce213204e..e44ff64233fd 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -383,7 +383,7 @@ struct bus_type scsi_bus_type = {
.name = "scsi",
.match = scsi_bus_match,
.uevent = scsi_bus_uevent,
-#ifdef CONFIG_PM_OPS
+#ifdef CONFIG_PM
.pm = &scsi_bus_pm_ops,
#endif
};
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index c399be979921..f67282058ba1 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -629,7 +629,7 @@ static int __init scsi_tgt_init(void)
if (!scsi_tgt_cmd_cache)
return -ENOMEM;
- scsi_tgtd = create_workqueue("scsi_tgtd");
+ scsi_tgtd = alloc_workqueue("scsi_tgtd", 0, 1);
if (!scsi_tgtd) {
err = -ENOMEM;
goto free_kmemcache;
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index f905ecb5704d..b4218390941e 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -954,6 +954,7 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
if (dd_size)
conn->dd_data = &conn[1];
+ mutex_init(&conn->ep_mutex);
INIT_LIST_HEAD(&conn->conn_list);
conn->transport = transport;
conn->cid = cid;
@@ -975,7 +976,6 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
spin_lock_irqsave(&connlock, flags);
list_add(&conn->conn_list, &connlist);
- conn->active = 1;
spin_unlock_irqrestore(&connlock, flags);
ISCSI_DBG_TRANS_CONN(conn, "Completed conn creation\n");
@@ -1001,7 +1001,6 @@ int iscsi_destroy_conn(struct iscsi_cls_conn *conn)
unsigned long flags;
spin_lock_irqsave(&connlock, flags);
- conn->active = 0;
list_del(&conn->conn_list);
spin_unlock_irqrestore(&connlock, flags);
@@ -1430,6 +1429,29 @@ release_host:
return err;
}
+static int iscsi_if_ep_disconnect(struct iscsi_transport *transport,
+ u64 ep_handle)
+{
+ struct iscsi_cls_conn *conn;
+ struct iscsi_endpoint *ep;
+
+ if (!transport->ep_disconnect)
+ return -EINVAL;
+
+ ep = iscsi_lookup_endpoint(ep_handle);
+ if (!ep)
+ return -EINVAL;
+ conn = ep->conn;
+ if (conn) {
+ mutex_lock(&conn->ep_mutex);
+ conn->ep = NULL;
+ mutex_unlock(&conn->ep_mutex);
+ }
+
+ transport->ep_disconnect(ep);
+ return 0;
+}
+
static int
iscsi_if_transport_ep(struct iscsi_transport *transport,
struct iscsi_uevent *ev, int msg_type)
@@ -1454,14 +1476,8 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
ev->u.ep_poll.timeout_ms);
break;
case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
- if (!transport->ep_disconnect)
- return -EINVAL;
-
- ep = iscsi_lookup_endpoint(ev->u.ep_disconnect.ep_handle);
- if (!ep)
- return -EINVAL;
-
- transport->ep_disconnect(ep);
+ rc = iscsi_if_ep_disconnect(transport,
+ ev->u.ep_disconnect.ep_handle);
break;
}
return rc;
@@ -1609,12 +1625,31 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
session = iscsi_session_lookup(ev->u.b_conn.sid);
conn = iscsi_conn_lookup(ev->u.b_conn.sid, ev->u.b_conn.cid);
- if (session && conn)
- ev->r.retcode = transport->bind_conn(session, conn,
- ev->u.b_conn.transport_eph,
- ev->u.b_conn.is_leading);
- else
+ if (conn && conn->ep)
+ iscsi_if_ep_disconnect(transport, conn->ep->id);
+
+ if (!session || !conn) {
err = -EINVAL;
+ break;
+ }
+
+ ev->r.retcode = transport->bind_conn(session, conn,
+ ev->u.b_conn.transport_eph,
+ ev->u.b_conn.is_leading);
+ if (ev->r.retcode || !transport->ep_connect)
+ break;
+
+ ep = iscsi_lookup_endpoint(ev->u.b_conn.transport_eph);
+ if (ep) {
+ ep->conn = conn;
+
+ mutex_lock(&conn->ep_mutex);
+ conn->ep = ep;
+ mutex_unlock(&conn->ep_mutex);
+ } else
+ iscsi_cls_conn_printk(KERN_ERR, conn,
+ "Could not set ep conn "
+ "binding\n");
break;
case ISCSI_UEVENT_SET_PARAM:
err = iscsi_set_param(transport, ev);
@@ -1747,13 +1782,48 @@ iscsi_conn_attr(data_digest, ISCSI_PARAM_DATADGST_EN);
iscsi_conn_attr(ifmarker, ISCSI_PARAM_IFMARKER_EN);
iscsi_conn_attr(ofmarker, ISCSI_PARAM_OFMARKER_EN);
iscsi_conn_attr(persistent_port, ISCSI_PARAM_PERSISTENT_PORT);
-iscsi_conn_attr(port, ISCSI_PARAM_CONN_PORT);
iscsi_conn_attr(exp_statsn, ISCSI_PARAM_EXP_STATSN);
iscsi_conn_attr(persistent_address, ISCSI_PARAM_PERSISTENT_ADDRESS);
-iscsi_conn_attr(address, ISCSI_PARAM_CONN_ADDRESS);
iscsi_conn_attr(ping_tmo, ISCSI_PARAM_PING_TMO);
iscsi_conn_attr(recv_tmo, ISCSI_PARAM_RECV_TMO);
+#define iscsi_conn_ep_attr_show(param) \
+static ssize_t show_conn_ep_param_##param(struct device *dev, \
+ struct device_attribute *attr,\
+ char *buf) \
+{ \
+ struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); \
+ struct iscsi_transport *t = conn->transport; \
+ struct iscsi_endpoint *ep; \
+ ssize_t rc; \
+ \
+ /* \
+ * Need to make sure ep_disconnect does not free the LLD's \
+ * interconnect resources while we are trying to read them. \
+ */ \
+ mutex_lock(&conn->ep_mutex); \
+ ep = conn->ep; \
+ if (!ep && t->ep_connect) { \
+ mutex_unlock(&conn->ep_mutex); \
+ return -ENOTCONN; \
+ } \
+ \
+ if (ep) \
+ rc = t->get_ep_param(ep, param, buf); \
+ else \
+ rc = t->get_conn_param(conn, param, buf); \
+ mutex_unlock(&conn->ep_mutex); \
+ return rc; \
+}
+
+#define iscsi_conn_ep_attr(field, param) \
+ iscsi_conn_ep_attr_show(param) \
+static ISCSI_CLASS_ATTR(conn, field, S_IRUGO, \
+ show_conn_ep_param_##param, NULL);
+
+iscsi_conn_ep_attr(address, ISCSI_PARAM_CONN_ADDRESS);
+iscsi_conn_ep_attr(port, ISCSI_PARAM_CONN_PORT);
+
/*
* iSCSI session attrs
*/
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index 193b37ba1834..676fe9ac7f61 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -562,7 +562,7 @@ fail:
return err;
}
-static int __devinit esp_sbus_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit esp_sbus_probe(struct platform_device *op)
{
struct device_node *dma_node = NULL;
struct device_node *dp = op->dev.of_node;
@@ -632,7 +632,7 @@ static const struct of_device_id esp_match[] = {
};
MODULE_DEVICE_TABLE(of, esp_match);
-static struct of_platform_driver esp_sbus_driver = {
+static struct platform_driver esp_sbus_driver = {
.driver = {
.name = "esp",
.owner = THIS_MODULE,
@@ -644,12 +644,12 @@ static struct of_platform_driver esp_sbus_driver = {
static int __init sunesp_init(void)
{
- return of_register_platform_driver(&esp_sbus_driver);
+ return platform_driver_register(&esp_sbus_driver);
}
static void __exit sunesp_exit(void)
{
- of_unregister_platform_driver(&esp_sbus_driver);
+ platform_driver_unregister(&esp_sbus_driver);
}
MODULE_DESCRIPTION("Sun ESP SCSI driver");
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index bb233a9cbad2..cdeb01f45bc2 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -53,6 +53,12 @@ if SPI_MASTER
comment "SPI Master Controller Drivers"
+config SPI_ALTERA
+ tristate "Altera SPI Controller"
+ select SPI_BITBANG
+ help
+ This is the driver for the Altera SPI Controller.
+
config SPI_ATH79
tristate "Atheros AR71XX/AR724X/AR913X SPI controller driver"
depends on ATH79 && GENERIC_GPIO
@@ -231,6 +237,13 @@ config SPI_FSL_ESPI
From MPC8536, 85xx platform uses the controller, and all P10xx,
P20xx, P30xx,P40xx, P50xx uses this controller.
+config SPI_OC_TINY
+ tristate "OpenCores tiny SPI"
+ depends on GENERIC_GPIO
+ select SPI_BITBANG
+ help
+ This is the driver for OpenCores tiny SPI master controller.
+
config SPI_OMAP_UWIRE
tristate "OMAP1 MicroWire"
depends on ARCH_OMAP1
@@ -330,6 +343,12 @@ config SPI_SH_MSIOF
help
SPI driver for SuperH MSIOF blocks.
+config SPI_SH
+ tristate "SuperH SPI controller"
+ depends on SUPERH
+ help
+ SPI driver for SuperH SPI blocks.
+
config SPI_SH_SCI
tristate "SuperH SCI SPI controller"
depends on SUPERH
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 86d1b5f9bbd9..8b5a315045a6 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -9,6 +9,7 @@ ccflags-$(CONFIG_SPI_DEBUG) := -DDEBUG
obj-$(CONFIG_SPI_MASTER) += spi.o
# SPI master controller drivers (bus)
+obj-$(CONFIG_SPI_ALTERA) += spi_altera.o
obj-$(CONFIG_SPI_ATMEL) += atmel_spi.o
obj-$(CONFIG_SPI_ATH79) += ath79_spi.o
obj-$(CONFIG_SPI_BFIN) += spi_bfin5xx.o
@@ -27,6 +28,7 @@ obj-$(CONFIG_SPI_IMX) += spi_imx.o
obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o
obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o
obj-$(CONFIG_SPI_PXA2XX_PCI) += pxa2xx_spi_pci.o
+obj-$(CONFIG_SPI_OC_TINY) += spi_oc_tiny.o
obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o
obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o
obj-$(CONFIG_SPI_OMAP_100K) += omap_spi_100k.o
@@ -46,6 +48,7 @@ obj-$(CONFIG_SPI_TEGRA) += spi_tegra.o
obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi_topcliff_pch.o
obj-$(CONFIG_SPI_TXX9) += spi_txx9.o
obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o
+obj-$(CONFIG_SPI_SH) += spi_sh.o
obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o
obj-$(CONFIG_SPI_SH_MSIOF) += spi_sh_msiof.o
obj-$(CONFIG_SPI_STMP3XXX) += spi_stmp.o
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index 71a1219a995d..5c2b092a915e 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -329,15 +329,16 @@ struct vendor_data {
/**
* struct pl022 - This is the private SSP driver data structure
* @adev: AMBA device model hookup
- * @vendor: Vendor data for the IP block
- * @phybase: The physical memory where the SSP device resides
- * @virtbase: The virtual memory where the SSP is mapped
+ * @vendor: vendor data for the IP block
+ * @phybase: the physical memory where the SSP device resides
+ * @virtbase: the virtual memory where the SSP is mapped
+ * @clk: outgoing clock "SPICLK" for the SPI bus
* @master: SPI framework hookup
* @master_info: controller-specific data from machine setup
- * @regs: SSP controller register's virtual address
- * @pump_messages: Work struct for scheduling work to the workqueue
- * @lock: spinlock to syncronise access to driver data
* @workqueue: a workqueue on which any spi_message request is queued
+ * @pump_messages: work struct for scheduling work to the workqueue
+ * @queue_lock: spinlock to syncronise access to message queue
+ * @queue: message queue
* @busy: workqueue is busy
* @running: workqueue is running
* @pump_transfers: Tasklet used in Interrupt Transfer mode
@@ -348,8 +349,14 @@ struct vendor_data {
* @tx_end: end position in TX buffer to be read
* @rx: current position in RX buffer to be written
* @rx_end: end position in RX buffer to be written
- * @readingtype: the type of read currently going on
- * @writingtype: the type or write currently going on
+ * @read: the type of read currently going on
+ * @write: the type of write currently going on
+ * @exp_fifo_level: expected FIFO level
+ * @dma_rx_channel: optional channel for RX DMA
+ * @dma_tx_channel: optional channel for TX DMA
+ * @sgt_rx: scattertable for the RX transfer
+ * @sgt_tx: scattertable for the TX transfer
+ * @dummypage: a dummy page used for driving data on the bus with DMA
*/
struct pl022 {
struct amba_device *adev;
@@ -397,8 +404,8 @@ struct pl022 {
* @cpsr: Value of Clock prescale register
* @n_bytes: how many bytes(power of 2) reqd for a given data width of client
* @enable_dma: Whether to enable DMA or not
- * @write: function ptr to be used to write when doing xfer for this chip
* @read: function ptr to be used to read when doing xfer for this chip
+ * @write: function ptr to be used to write when doing xfer for this chip
* @cs_control: chip select callback provided by chip
* @xfer_type: polling/interrupt/DMA
*
@@ -508,9 +515,10 @@ static void giveback(struct pl022 *pl022)
msg->state = NULL;
if (msg->complete)
msg->complete(msg->context);
- /* This message is completed, so let's turn off the clocks! */
+ /* This message is completed, so let's turn off the clocks & power */
clk_disable(pl022->clk);
amba_pclk_disable(pl022->adev);
+ amba_vcore_disable(pl022->adev);
}
/**
@@ -917,7 +925,6 @@ static int configure_dma(struct pl022 *pl022)
struct dma_chan *txchan = pl022->dma_tx_channel;
struct dma_async_tx_descriptor *rxdesc;
struct dma_async_tx_descriptor *txdesc;
- dma_cookie_t cookie;
/* Check that the channels are available */
if (!rxchan || !txchan)
@@ -962,10 +969,8 @@ static int configure_dma(struct pl022 *pl022)
tx_conf.dst_addr_width = rx_conf.src_addr_width;
BUG_ON(rx_conf.src_addr_width != tx_conf.dst_addr_width);
- rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG,
- (unsigned long) &rx_conf);
- txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
- (unsigned long) &tx_conf);
+ dmaengine_slave_config(rxchan, &rx_conf);
+ dmaengine_slave_config(txchan, &tx_conf);
/* Create sglists for the transfers */
pages = (pl022->cur_transfer->len >> PAGE_SHIFT) + 1;
@@ -1018,23 +1023,17 @@ static int configure_dma(struct pl022 *pl022)
rxdesc->callback_param = pl022;
/* Submit and fire RX and TX with TX last so we're ready to read! */
- cookie = rxdesc->tx_submit(rxdesc);
- if (dma_submit_error(cookie))
- goto err_submit_rx;
- cookie = txdesc->tx_submit(txdesc);
- if (dma_submit_error(cookie))
- goto err_submit_tx;
- rxchan->device->device_issue_pending(rxchan);
- txchan->device->device_issue_pending(txchan);
+ dmaengine_submit(rxdesc);
+ dmaengine_submit(txdesc);
+ dma_async_issue_pending(rxchan);
+ dma_async_issue_pending(txchan);
return 0;
-err_submit_tx:
-err_submit_rx:
err_txdesc:
- txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0);
+ dmaengine_terminate_all(txchan);
err_rxdesc:
- rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0);
+ dmaengine_terminate_all(rxchan);
dma_unmap_sg(txchan->device->dev, pl022->sgt_tx.sgl,
pl022->sgt_tx.nents, DMA_TO_DEVICE);
err_tx_sgmap:
@@ -1101,8 +1100,8 @@ static void terminate_dma(struct pl022 *pl022)
struct dma_chan *rxchan = pl022->dma_rx_channel;
struct dma_chan *txchan = pl022->dma_tx_channel;
- rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0);
- txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0);
+ dmaengine_terminate_all(rxchan);
+ dmaengine_terminate_all(txchan);
unmap_free_dma_scatter(pl022);
}
@@ -1482,9 +1481,11 @@ static void pump_messages(struct work_struct *work)
/* Setup the SPI using the per chip configuration */
pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi);
/*
- * We enable the clocks here, then the clocks will be disabled when
- * giveback() is called in each method (poll/interrupt/DMA)
+ * We enable the core voltage and clocks here, then the clocks
+ * and core will be disabled when giveback() is called in each method
+ * (poll/interrupt/DMA)
*/
+ amba_vcore_enable(pl022->adev);
amba_pclk_enable(pl022->adev);
clk_enable(pl022->clk);
restore_state(pl022);
@@ -1910,8 +1911,6 @@ static int pl022_setup(struct spi_device *spi)
&& ((pl022->master_info)->enable_dma)) {
chip->enable_dma = true;
dev_dbg(&spi->dev, "DMA mode set in controller state\n");
- if (status < 0)
- goto err_config_params;
SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
SSP_DMACR_MASK_RXDMAE, 0);
SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
@@ -2021,7 +2020,7 @@ static void pl022_cleanup(struct spi_device *spi)
static int __devinit
-pl022_probe(struct amba_device *adev, struct amba_id *id)
+pl022_probe(struct amba_device *adev, const struct amba_id *id)
{
struct device *dev = &adev->dev;
struct pl022_ssp_controller *platform_info = adev->dev.platform_data;
@@ -2130,8 +2129,12 @@ pl022_probe(struct amba_device *adev, struct amba_id *id)
goto err_spi_register;
}
dev_dbg(dev, "probe succeded\n");
- /* Disable the silicon block pclk and clock it when needed */
+ /*
+ * Disable the silicon block pclk and any voltage domain and just
+ * power it up and clock it when it's needed
+ */
amba_pclk_disable(adev);
+ amba_vcore_disable(adev);
return 0;
err_spi_register:
@@ -2196,9 +2199,11 @@ static int pl022_suspend(struct amba_device *adev, pm_message_t state)
return status;
}
+ amba_vcore_enable(adev);
amba_pclk_enable(adev);
load_ssp_default_config(pl022);
amba_pclk_disable(adev);
+ amba_vcore_disable(adev);
dev_dbg(&adev->dev, "suspended\n");
return 0;
}
diff --git a/drivers/spi/davinci_spi.c b/drivers/spi/davinci_spi.c
index 6beab99bf95b..166a879fd9e8 100644
--- a/drivers/spi/davinci_spi.c
+++ b/drivers/spi/davinci_spi.c
@@ -790,7 +790,6 @@ static int davinci_spi_probe(struct platform_device *pdev)
struct resource *r, *mem;
resource_size_t dma_rx_chan = SPI_NO_RESOURCE;
resource_size_t dma_tx_chan = SPI_NO_RESOURCE;
- resource_size_t dma_eventq = SPI_NO_RESOURCE;
int i = 0, ret = 0;
u32 spipc0;
@@ -878,17 +877,13 @@ static int davinci_spi_probe(struct platform_device *pdev)
r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
if (r)
dma_tx_chan = r->start;
- r = platform_get_resource(pdev, IORESOURCE_DMA, 2);
- if (r)
- dma_eventq = r->start;
dspi->bitbang.txrx_bufs = davinci_spi_bufs;
if (dma_rx_chan != SPI_NO_RESOURCE &&
- dma_tx_chan != SPI_NO_RESOURCE &&
- dma_eventq != SPI_NO_RESOURCE) {
+ dma_tx_chan != SPI_NO_RESOURCE) {
dspi->dma.rx_channel = dma_rx_chan;
dspi->dma.tx_channel = dma_tx_chan;
- dspi->dma.eventq = dma_eventq;
+ dspi->dma.eventq = pdata->dma_event_q;
ret = davinci_spi_request_dma(dspi);
if (ret)
@@ -897,7 +892,7 @@ static int davinci_spi_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "DMA: supported\n");
dev_info(&pdev->dev, "DMA: RX channel: %d, TX channel: %d, "
"event queue: %d\n", dma_rx_chan, dma_tx_chan,
- dma_eventq);
+ pdata->dma_event_q);
}
dspi->get_rx = davinci_spi_rx_buf_u8;
diff --git a/drivers/spi/mpc512x_psc_spi.c b/drivers/spi/mpc512x_psc_spi.c
index 77d9e7ee8b27..6a5b4238fb6b 100644
--- a/drivers/spi/mpc512x_psc_spi.c
+++ b/drivers/spi/mpc512x_psc_spi.c
@@ -507,8 +507,7 @@ static int __devexit mpc512x_psc_spi_do_remove(struct device *dev)
return 0;
}
-static int __devinit mpc512x_psc_spi_of_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit mpc512x_psc_spi_of_probe(struct platform_device *op)
{
const u32 *regaddr_p;
u64 regaddr64, size64;
@@ -551,7 +550,7 @@ static struct of_device_id mpc512x_psc_spi_of_match[] = {
MODULE_DEVICE_TABLE(of, mpc512x_psc_spi_of_match);
-static struct of_platform_driver mpc512x_psc_spi_of_driver = {
+static struct platform_driver mpc512x_psc_spi_of_driver = {
.probe = mpc512x_psc_spi_of_probe,
.remove = __devexit_p(mpc512x_psc_spi_of_remove),
.driver = {
@@ -563,13 +562,13 @@ static struct of_platform_driver mpc512x_psc_spi_of_driver = {
static int __init mpc512x_psc_spi_init(void)
{
- return of_register_platform_driver(&mpc512x_psc_spi_of_driver);
+ return platform_driver_register(&mpc512x_psc_spi_of_driver);
}
module_init(mpc512x_psc_spi_init);
static void __exit mpc512x_psc_spi_exit(void)
{
- of_unregister_platform_driver(&mpc512x_psc_spi_of_driver);
+ platform_driver_unregister(&mpc512x_psc_spi_of_driver);
}
module_exit(mpc512x_psc_spi_exit);
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c
index 8a904c1c8485..e30baf0852ac 100644
--- a/drivers/spi/mpc52xx_psc_spi.c
+++ b/drivers/spi/mpc52xx_psc_spi.c
@@ -450,8 +450,7 @@ free_master:
return ret;
}
-static int __devinit mpc52xx_psc_spi_of_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit mpc52xx_psc_spi_of_probe(struct platform_device *op)
{
const u32 *regaddr_p;
u64 regaddr64, size64;
@@ -503,7 +502,7 @@ static const struct of_device_id mpc52xx_psc_spi_of_match[] = {
MODULE_DEVICE_TABLE(of, mpc52xx_psc_spi_of_match);
-static struct of_platform_driver mpc52xx_psc_spi_of_driver = {
+static struct platform_driver mpc52xx_psc_spi_of_driver = {
.probe = mpc52xx_psc_spi_of_probe,
.remove = __devexit_p(mpc52xx_psc_spi_of_remove),
.driver = {
@@ -515,13 +514,13 @@ static struct of_platform_driver mpc52xx_psc_spi_of_driver = {
static int __init mpc52xx_psc_spi_init(void)
{
- return of_register_platform_driver(&mpc52xx_psc_spi_of_driver);
+ return platform_driver_register(&mpc52xx_psc_spi_of_driver);
}
module_init(mpc52xx_psc_spi_init);
static void __exit mpc52xx_psc_spi_exit(void)
{
- of_unregister_platform_driver(&mpc52xx_psc_spi_of_driver);
+ platform_driver_unregister(&mpc52xx_psc_spi_of_driver);
}
module_exit(mpc52xx_psc_spi_exit);
diff --git a/drivers/spi/mpc52xx_spi.c b/drivers/spi/mpc52xx_spi.c
index 84439f655601..015a974bed72 100644
--- a/drivers/spi/mpc52xx_spi.c
+++ b/drivers/spi/mpc52xx_spi.c
@@ -390,8 +390,7 @@ static int mpc52xx_spi_transfer(struct spi_device *spi, struct spi_message *m)
/*
* OF Platform Bus Binding
*/
-static int __devinit mpc52xx_spi_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit mpc52xx_spi_probe(struct platform_device *op)
{
struct spi_master *master;
struct mpc52xx_spi *ms;
@@ -556,7 +555,7 @@ static const struct of_device_id mpc52xx_spi_match[] __devinitconst = {
};
MODULE_DEVICE_TABLE(of, mpc52xx_spi_match);
-static struct of_platform_driver mpc52xx_spi_of_driver = {
+static struct platform_driver mpc52xx_spi_of_driver = {
.driver = {
.name = "mpc52xx-spi",
.owner = THIS_MODULE,
@@ -568,13 +567,13 @@ static struct of_platform_driver mpc52xx_spi_of_driver = {
static int __init mpc52xx_spi_init(void)
{
- return of_register_platform_driver(&mpc52xx_spi_of_driver);
+ return platform_driver_register(&mpc52xx_spi_of_driver);
}
module_init(mpc52xx_spi_init);
static void __exit mpc52xx_spi_exit(void)
{
- of_unregister_platform_driver(&mpc52xx_spi_of_driver);
+ platform_driver_unregister(&mpc52xx_spi_of_driver);
}
module_exit(mpc52xx_spi_exit);
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
index abb1ffbf3d20..c196d5643d17 100644
--- a/drivers/spi/omap2_mcspi.c
+++ b/drivers/spi/omap2_mcspi.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2005, 2006 Nokia Corporation
* Author: Samuel Ortiz <samuel.ortiz@nokia.com> and
- * Juha Yrjölä <juha.yrjola@nokia.com>
+ * Juha Yrj�l� <juha.yrjola@nokia.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -33,6 +33,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
@@ -46,7 +47,6 @@
#define OMAP2_MCSPI_MAX_CTRL 4
#define OMAP2_MCSPI_REVISION 0x00
-#define OMAP2_MCSPI_SYSCONFIG 0x10
#define OMAP2_MCSPI_SYSSTATUS 0x14
#define OMAP2_MCSPI_IRQSTATUS 0x18
#define OMAP2_MCSPI_IRQENABLE 0x1c
@@ -63,13 +63,6 @@
/* per-register bitmasks: */
-#define OMAP2_MCSPI_SYSCONFIG_SMARTIDLE BIT(4)
-#define OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP BIT(2)
-#define OMAP2_MCSPI_SYSCONFIG_AUTOIDLE BIT(0)
-#define OMAP2_MCSPI_SYSCONFIG_SOFTRESET BIT(1)
-
-#define OMAP2_MCSPI_SYSSTATUS_RESETDONE BIT(0)
-
#define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0)
#define OMAP2_MCSPI_MODULCTRL_MS BIT(2)
#define OMAP2_MCSPI_MODULCTRL_STEST BIT(3)
@@ -122,13 +115,12 @@ struct omap2_mcspi {
spinlock_t lock;
struct list_head msg_queue;
struct spi_master *master;
- struct clk *ick;
- struct clk *fck;
/* Virtual base address of the controller */
void __iomem *base;
unsigned long phys;
/* SPI1 has 4 channels, while SPI2 has 2 */
struct omap2_mcspi_dma *dma_channels;
+ struct device *dev;
};
struct omap2_mcspi_cs {
@@ -144,7 +136,6 @@ struct omap2_mcspi_cs {
* corresponding registers are modified.
*/
struct omap2_mcspi_regs {
- u32 sysconfig;
u32 modulctrl;
u32 wakeupenable;
struct list_head cs;
@@ -268,9 +259,6 @@ static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi)
mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL,
omap2_mcspi_ctx[spi_cntrl->bus_num - 1].modulctrl);
- mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_SYSCONFIG,
- omap2_mcspi_ctx[spi_cntrl->bus_num - 1].sysconfig);
-
mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE,
omap2_mcspi_ctx[spi_cntrl->bus_num - 1].wakeupenable);
@@ -280,20 +268,12 @@ static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi)
}
static void omap2_mcspi_disable_clocks(struct omap2_mcspi *mcspi)
{
- clk_disable(mcspi->ick);
- clk_disable(mcspi->fck);
+ pm_runtime_put_sync(mcspi->dev);
}
static int omap2_mcspi_enable_clocks(struct omap2_mcspi *mcspi)
{
- if (clk_enable(mcspi->ick))
- return -ENODEV;
- if (clk_enable(mcspi->fck))
- return -ENODEV;
-
- omap2_mcspi_restore_ctx(mcspi);
-
- return 0;
+ return pm_runtime_get_sync(mcspi->dev);
}
static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
@@ -651,6 +631,17 @@ out:
return count - c;
}
+static u32 omap2_mcspi_calc_divisor(u32 speed_hz)
+{
+ u32 div;
+
+ for (div = 0; div < 15; div++)
+ if (speed_hz >= (OMAP2_MCSPI_MAX_FREQ >> div))
+ return div;
+
+ return 15;
+}
+
/* called only when no transfer is active to this device */
static int omap2_mcspi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
@@ -673,12 +664,8 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
if (t && t->speed_hz)
speed_hz = t->speed_hz;
- if (speed_hz) {
- while (div <= 15 && (OMAP2_MCSPI_MAX_FREQ / (1 << div))
- > speed_hz)
- div++;
- } else
- div = 15;
+ speed_hz = min_t(u32, speed_hz, OMAP2_MCSPI_MAX_FREQ);
+ div = omap2_mcspi_calc_divisor(speed_hz);
l = mcspi_cached_chconf0(spi);
@@ -715,7 +702,7 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
mcspi_write_chconf0(spi, l);
dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
- OMAP2_MCSPI_MAX_FREQ / (1 << div),
+ OMAP2_MCSPI_MAX_FREQ >> div,
(spi->mode & SPI_CPHA) ? "trailing" : "leading",
(spi->mode & SPI_CPOL) ? "inverted" : "normal");
@@ -819,8 +806,9 @@ static int omap2_mcspi_setup(struct spi_device *spi)
return ret;
}
- if (omap2_mcspi_enable_clocks(mcspi))
- return -ENODEV;
+ ret = omap2_mcspi_enable_clocks(mcspi);
+ if (ret < 0)
+ return ret;
ret = omap2_mcspi_setup_transfer(spi, NULL);
omap2_mcspi_disable_clocks(mcspi);
@@ -863,10 +851,11 @@ static void omap2_mcspi_work(struct work_struct *work)
struct omap2_mcspi *mcspi;
mcspi = container_of(work, struct omap2_mcspi, work);
- spin_lock_irq(&mcspi->lock);
- if (omap2_mcspi_enable_clocks(mcspi))
- goto out;
+ if (omap2_mcspi_enable_clocks(mcspi) < 0)
+ return;
+
+ spin_lock_irq(&mcspi->lock);
/* We only enable one channel at a time -- the one whose message is
* at the head of the queue -- although this controller would gladly
@@ -979,10 +968,9 @@ static void omap2_mcspi_work(struct work_struct *work)
spin_lock_irq(&mcspi->lock);
}
- omap2_mcspi_disable_clocks(mcspi);
-
-out:
spin_unlock_irq(&mcspi->lock);
+
+ omap2_mcspi_disable_clocks(mcspi);
}
static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
@@ -1015,10 +1003,10 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
t->bits_per_word);
return -EINVAL;
}
- if (t->speed_hz && t->speed_hz < OMAP2_MCSPI_MAX_FREQ/(1<<16)) {
- dev_dbg(&spi->dev, "%d Hz max exceeds %d\n",
- t->speed_hz,
- OMAP2_MCSPI_MAX_FREQ/(1<<16));
+ if (t->speed_hz && t->speed_hz < (OMAP2_MCSPI_MAX_FREQ >> 15)) {
+ dev_dbg(&spi->dev, "speed_hz %d below minimum %d Hz\n",
+ t->speed_hz,
+ OMAP2_MCSPI_MAX_FREQ >> 15);
return -EINVAL;
}
@@ -1058,25 +1046,15 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
return 0;
}
-static int __init omap2_mcspi_reset(struct omap2_mcspi *mcspi)
+static int __init omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
{
struct spi_master *master = mcspi->master;
u32 tmp;
+ int ret = 0;
- if (omap2_mcspi_enable_clocks(mcspi))
- return -1;
-
- mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG,
- OMAP2_MCSPI_SYSCONFIG_SOFTRESET);
- do {
- tmp = mcspi_read_reg(master, OMAP2_MCSPI_SYSSTATUS);
- } while (!(tmp & OMAP2_MCSPI_SYSSTATUS_RESETDONE));
-
- tmp = OMAP2_MCSPI_SYSCONFIG_AUTOIDLE |
- OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP |
- OMAP2_MCSPI_SYSCONFIG_SMARTIDLE;
- mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG, tmp);
- omap2_mcspi_ctx[master->bus_num - 1].sysconfig = tmp;
+ ret = omap2_mcspi_enable_clocks(mcspi);
+ if (ret < 0)
+ return ret;
tmp = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, tmp);
@@ -1087,91 +1065,26 @@ static int __init omap2_mcspi_reset(struct omap2_mcspi *mcspi)
return 0;
}
-static u8 __initdata spi1_rxdma_id [] = {
- OMAP24XX_DMA_SPI1_RX0,
- OMAP24XX_DMA_SPI1_RX1,
- OMAP24XX_DMA_SPI1_RX2,
- OMAP24XX_DMA_SPI1_RX3,
-};
-
-static u8 __initdata spi1_txdma_id [] = {
- OMAP24XX_DMA_SPI1_TX0,
- OMAP24XX_DMA_SPI1_TX1,
- OMAP24XX_DMA_SPI1_TX2,
- OMAP24XX_DMA_SPI1_TX3,
-};
-
-static u8 __initdata spi2_rxdma_id[] = {
- OMAP24XX_DMA_SPI2_RX0,
- OMAP24XX_DMA_SPI2_RX1,
-};
-
-static u8 __initdata spi2_txdma_id[] = {
- OMAP24XX_DMA_SPI2_TX0,
- OMAP24XX_DMA_SPI2_TX1,
-};
-
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) \
- || defined(CONFIG_ARCH_OMAP4)
-static u8 __initdata spi3_rxdma_id[] = {
- OMAP24XX_DMA_SPI3_RX0,
- OMAP24XX_DMA_SPI3_RX1,
-};
+static int omap_mcspi_runtime_resume(struct device *dev)
+{
+ struct omap2_mcspi *mcspi;
+ struct spi_master *master;
-static u8 __initdata spi3_txdma_id[] = {
- OMAP24XX_DMA_SPI3_TX0,
- OMAP24XX_DMA_SPI3_TX1,
-};
-#endif
+ master = dev_get_drvdata(dev);
+ mcspi = spi_master_get_devdata(master);
+ omap2_mcspi_restore_ctx(mcspi);
-#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
-static u8 __initdata spi4_rxdma_id[] = {
- OMAP34XX_DMA_SPI4_RX0,
-};
+ return 0;
+}
-static u8 __initdata spi4_txdma_id[] = {
- OMAP34XX_DMA_SPI4_TX0,
-};
-#endif
static int __init omap2_mcspi_probe(struct platform_device *pdev)
{
struct spi_master *master;
+ struct omap2_mcspi_platform_config *pdata = pdev->dev.platform_data;
struct omap2_mcspi *mcspi;
struct resource *r;
int status = 0, i;
- const u8 *rxdma_id, *txdma_id;
- unsigned num_chipselect;
-
- switch (pdev->id) {
- case 1:
- rxdma_id = spi1_rxdma_id;
- txdma_id = spi1_txdma_id;
- num_chipselect = 4;
- break;
- case 2:
- rxdma_id = spi2_rxdma_id;
- txdma_id = spi2_txdma_id;
- num_chipselect = 2;
- break;
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) \
- || defined(CONFIG_ARCH_OMAP4)
- case 3:
- rxdma_id = spi3_rxdma_id;
- txdma_id = spi3_txdma_id;
- num_chipselect = 2;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
- case 4:
- rxdma_id = spi4_rxdma_id;
- txdma_id = spi4_txdma_id;
- num_chipselect = 1;
- break;
-#endif
- default:
- return -EINVAL;
- }
master = spi_alloc_master(&pdev->dev, sizeof *mcspi);
if (master == NULL) {
@@ -1188,7 +1101,7 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
master->setup = omap2_mcspi_setup;
master->transfer = omap2_mcspi_transfer;
master->cleanup = omap2_mcspi_cleanup;
- master->num_chipselect = num_chipselect;
+ master->num_chipselect = pdata->num_cs;
dev_set_drvdata(&pdev->dev, master);
@@ -1206,49 +1119,62 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
goto err1;
}
+ r->start += pdata->regs_offset;
+ r->end += pdata->regs_offset;
mcspi->phys = r->start;
mcspi->base = ioremap(r->start, r->end - r->start + 1);
if (!mcspi->base) {
dev_dbg(&pdev->dev, "can't ioremap MCSPI\n");
status = -ENOMEM;
- goto err1aa;
+ goto err2;
}
+ mcspi->dev = &pdev->dev;
INIT_WORK(&mcspi->work, omap2_mcspi_work);
spin_lock_init(&mcspi->lock);
INIT_LIST_HEAD(&mcspi->msg_queue);
INIT_LIST_HEAD(&omap2_mcspi_ctx[master->bus_num - 1].cs);
- mcspi->ick = clk_get(&pdev->dev, "ick");
- if (IS_ERR(mcspi->ick)) {
- dev_dbg(&pdev->dev, "can't get mcspi_ick\n");
- status = PTR_ERR(mcspi->ick);
- goto err1a;
- }
- mcspi->fck = clk_get(&pdev->dev, "fck");
- if (IS_ERR(mcspi->fck)) {
- dev_dbg(&pdev->dev, "can't get mcspi_fck\n");
- status = PTR_ERR(mcspi->fck);
- goto err2;
- }
-
mcspi->dma_channels = kcalloc(master->num_chipselect,
sizeof(struct omap2_mcspi_dma),
GFP_KERNEL);
if (mcspi->dma_channels == NULL)
- goto err3;
+ goto err2;
+
+ for (i = 0; i < master->num_chipselect; i++) {
+ char dma_ch_name[14];
+ struct resource *dma_res;
+
+ sprintf(dma_ch_name, "rx%d", i);
+ dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+ dma_ch_name);
+ if (!dma_res) {
+ dev_dbg(&pdev->dev, "cannot get DMA RX channel\n");
+ status = -ENODEV;
+ break;
+ }
- for (i = 0; i < num_chipselect; i++) {
mcspi->dma_channels[i].dma_rx_channel = -1;
- mcspi->dma_channels[i].dma_rx_sync_dev = rxdma_id[i];
+ mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start;
+ sprintf(dma_ch_name, "tx%d", i);
+ dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+ dma_ch_name);
+ if (!dma_res) {
+ dev_dbg(&pdev->dev, "cannot get DMA TX channel\n");
+ status = -ENODEV;
+ break;
+ }
+
mcspi->dma_channels[i].dma_tx_channel = -1;
- mcspi->dma_channels[i].dma_tx_sync_dev = txdma_id[i];
+ mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start;
}
- if (omap2_mcspi_reset(mcspi) < 0)
- goto err4;
+ pm_runtime_enable(&pdev->dev);
+
+ if (status || omap2_mcspi_master_setup(mcspi) < 0)
+ goto err3;
status = spi_register_master(master);
if (status < 0)
@@ -1257,17 +1183,13 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
return status;
err4:
- kfree(mcspi->dma_channels);
+ spi_master_put(master);
err3:
- clk_put(mcspi->fck);
+ kfree(mcspi->dma_channels);
err2:
- clk_put(mcspi->ick);
-err1a:
- iounmap(mcspi->base);
-err1aa:
release_mem_region(r->start, (r->end - r->start) + 1);
+ iounmap(mcspi->base);
err1:
- spi_master_put(master);
return status;
}
@@ -1283,9 +1205,7 @@ static int __exit omap2_mcspi_remove(struct platform_device *pdev)
mcspi = spi_master_get_devdata(master);
dma_channels = mcspi->dma_channels;
- clk_put(mcspi->fck);
- clk_put(mcspi->ick);
-
+ omap2_mcspi_disable_clocks(mcspi);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(r->start, (r->end - r->start) + 1);
@@ -1336,6 +1256,7 @@ static int omap2_mcspi_resume(struct device *dev)
static const struct dev_pm_ops omap2_mcspi_pm_ops = {
.resume = omap2_mcspi_resume,
+ .runtime_resume = omap_mcspi_runtime_resume,
};
static struct platform_driver omap2_mcspi_driver = {
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 95928833855b..a429b01d0285 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -1557,9 +1557,7 @@ static int __devinit pxa2xx_spi_probe(struct platform_device *pdev)
drv_data->ssp = ssp;
master->dev.parent = &pdev->dev;
-#ifdef CONFIG_OF
master->dev.of_node = pdev->dev.of_node;
-#endif
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
diff --git a/drivers/spi/pxa2xx_spi_pci.c b/drivers/spi/pxa2xx_spi_pci.c
index 19752b09e155..378e504f89eb 100644
--- a/drivers/spi/pxa2xx_spi_pci.c
+++ b/drivers/spi/pxa2xx_spi_pci.c
@@ -89,9 +89,7 @@ static int __devinit ce4100_spi_probe(struct pci_dev *dev,
goto err_nomem;
pdev->dev.parent = &dev->dev;
-#ifdef CONFIG_OF
pdev->dev.of_node = dev->dev.of_node;
-#endif
ssp = &spi_info->ssp;
ssp->phys_base = pci_resource_start(dev, 0);
ssp->mmio_base = ioremap(phys_beg, phys_len);
diff --git a/drivers/spi/spi_altera.c b/drivers/spi/spi_altera.c
new file mode 100644
index 000000000000..4813a63ce6fb
--- /dev/null
+++ b/drivers/spi/spi_altera.c
@@ -0,0 +1,339 @@
+/*
+ * Altera SPI driver
+ *
+ * Copyright (C) 2008 Thomas Chou <thomas@wytron.com.tw>
+ *
+ * Based on spi_s3c24xx.c, which is:
+ * Copyright (c) 2006 Ben Dooks
+ * Copyright (c) 2006 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/io.h>
+#include <linux/of.h>
+
+#define DRV_NAME "spi_altera"
+
+#define ALTERA_SPI_RXDATA 0
+#define ALTERA_SPI_TXDATA 4
+#define ALTERA_SPI_STATUS 8
+#define ALTERA_SPI_CONTROL 12
+#define ALTERA_SPI_SLAVE_SEL 20
+
+#define ALTERA_SPI_STATUS_ROE_MSK 0x8
+#define ALTERA_SPI_STATUS_TOE_MSK 0x10
+#define ALTERA_SPI_STATUS_TMT_MSK 0x20
+#define ALTERA_SPI_STATUS_TRDY_MSK 0x40
+#define ALTERA_SPI_STATUS_RRDY_MSK 0x80
+#define ALTERA_SPI_STATUS_E_MSK 0x100
+
+#define ALTERA_SPI_CONTROL_IROE_MSK 0x8
+#define ALTERA_SPI_CONTROL_ITOE_MSK 0x10
+#define ALTERA_SPI_CONTROL_ITRDY_MSK 0x40
+#define ALTERA_SPI_CONTROL_IRRDY_MSK 0x80
+#define ALTERA_SPI_CONTROL_IE_MSK 0x100
+#define ALTERA_SPI_CONTROL_SSO_MSK 0x400
+
+struct altera_spi {
+ /* bitbang has to be first */
+ struct spi_bitbang bitbang;
+ struct completion done;
+
+ void __iomem *base;
+ int irq;
+ int len;
+ int count;
+ int bytes_per_word;
+ unsigned long imr;
+
+ /* data buffers */
+ const unsigned char *tx;
+ unsigned char *rx;
+};
+
+static inline struct altera_spi *altera_spi_to_hw(struct spi_device *sdev)
+{
+ return spi_master_get_devdata(sdev->master);
+}
+
+static void altera_spi_chipsel(struct spi_device *spi, int value)
+{
+ struct altera_spi *hw = altera_spi_to_hw(spi);
+
+ if (spi->mode & SPI_CS_HIGH) {
+ switch (value) {
+ case BITBANG_CS_INACTIVE:
+ writel(1 << spi->chip_select,
+ hw->base + ALTERA_SPI_SLAVE_SEL);
+ hw->imr |= ALTERA_SPI_CONTROL_SSO_MSK;
+ writel(hw->imr, hw->base + ALTERA_SPI_CONTROL);
+ break;
+
+ case BITBANG_CS_ACTIVE:
+ hw->imr &= ~ALTERA_SPI_CONTROL_SSO_MSK;
+ writel(hw->imr, hw->base + ALTERA_SPI_CONTROL);
+ writel(0, hw->base + ALTERA_SPI_SLAVE_SEL);
+ break;
+ }
+ } else {
+ switch (value) {
+ case BITBANG_CS_INACTIVE:
+ hw->imr &= ~ALTERA_SPI_CONTROL_SSO_MSK;
+ writel(hw->imr, hw->base + ALTERA_SPI_CONTROL);
+ break;
+
+ case BITBANG_CS_ACTIVE:
+ writel(1 << spi->chip_select,
+ hw->base + ALTERA_SPI_SLAVE_SEL);
+ hw->imr |= ALTERA_SPI_CONTROL_SSO_MSK;
+ writel(hw->imr, hw->base + ALTERA_SPI_CONTROL);
+ break;
+ }
+ }
+}
+
+static int altera_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
+{
+ return 0;
+}
+
+static int altera_spi_setup(struct spi_device *spi)
+{
+ return 0;
+}
+
+static inline unsigned int hw_txbyte(struct altera_spi *hw, int count)
+{
+ if (hw->tx) {
+ switch (hw->bytes_per_word) {
+ case 1:
+ return hw->tx[count];
+ case 2:
+ return (hw->tx[count * 2]
+ | (hw->tx[count * 2 + 1] << 8));
+ }
+ }
+ return 0;
+}
+
+static int altera_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct altera_spi *hw = altera_spi_to_hw(spi);
+
+ hw->tx = t->tx_buf;
+ hw->rx = t->rx_buf;
+ hw->count = 0;
+ hw->bytes_per_word = (t->bits_per_word ? : spi->bits_per_word) / 8;
+ hw->len = t->len / hw->bytes_per_word;
+
+ if (hw->irq >= 0) {
+ /* enable receive interrupt */
+ hw->imr |= ALTERA_SPI_CONTROL_IRRDY_MSK;
+ writel(hw->imr, hw->base + ALTERA_SPI_CONTROL);
+
+ /* send the first byte */
+ writel(hw_txbyte(hw, 0), hw->base + ALTERA_SPI_TXDATA);
+
+ wait_for_completion(&hw->done);
+ /* disable receive interrupt */
+ hw->imr &= ~ALTERA_SPI_CONTROL_IRRDY_MSK;
+ writel(hw->imr, hw->base + ALTERA_SPI_CONTROL);
+ } else {
+ /* send the first byte */
+ writel(hw_txbyte(hw, 0), hw->base + ALTERA_SPI_TXDATA);
+
+ while (1) {
+ unsigned int rxd;
+
+ while (!(readl(hw->base + ALTERA_SPI_STATUS) &
+ ALTERA_SPI_STATUS_RRDY_MSK))
+ cpu_relax();
+
+ rxd = readl(hw->base + ALTERA_SPI_RXDATA);
+ if (hw->rx) {
+ switch (hw->bytes_per_word) {
+ case 1:
+ hw->rx[hw->count] = rxd;
+ break;
+ case 2:
+ hw->rx[hw->count * 2] = rxd;
+ hw->rx[hw->count * 2 + 1] = rxd >> 8;
+ break;
+ }
+ }
+
+ hw->count++;
+
+ if (hw->count < hw->len)
+ writel(hw_txbyte(hw, hw->count),
+ hw->base + ALTERA_SPI_TXDATA);
+ else
+ break;
+ }
+
+ }
+
+ return hw->count * hw->bytes_per_word;
+}
+
+static irqreturn_t altera_spi_irq(int irq, void *dev)
+{
+ struct altera_spi *hw = dev;
+ unsigned int rxd;
+
+ rxd = readl(hw->base + ALTERA_SPI_RXDATA);
+ if (hw->rx) {
+ switch (hw->bytes_per_word) {
+ case 1:
+ hw->rx[hw->count] = rxd;
+ break;
+ case 2:
+ hw->rx[hw->count * 2] = rxd;
+ hw->rx[hw->count * 2 + 1] = rxd >> 8;
+ break;
+ }
+ }
+
+ hw->count++;
+
+ if (hw->count < hw->len)
+ writel(hw_txbyte(hw, hw->count), hw->base + ALTERA_SPI_TXDATA);
+ else
+ complete(&hw->done);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit altera_spi_probe(struct platform_device *pdev)
+{
+ struct altera_spi_platform_data *platp = pdev->dev.platform_data;
+ struct altera_spi *hw;
+ struct spi_master *master;
+ struct resource *res;
+ int err = -ENODEV;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct altera_spi));
+ if (!master)
+ return err;
+
+ /* setup the master state. */
+ master->bus_num = pdev->id;
+ master->num_chipselect = 16;
+ master->mode_bits = SPI_CS_HIGH;
+ master->setup = altera_spi_setup;
+
+ hw = spi_master_get_devdata(master);
+ platform_set_drvdata(pdev, hw);
+
+ /* setup the state for the bitbang driver */
+ hw->bitbang.master = spi_master_get(master);
+ if (!hw->bitbang.master)
+ return err;
+ hw->bitbang.setup_transfer = altera_spi_setupxfer;
+ hw->bitbang.chipselect = altera_spi_chipsel;
+ hw->bitbang.txrx_bufs = altera_spi_txrx;
+
+ /* find and map our resources */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ goto exit_busy;
+ if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
+ pdev->name))
+ goto exit_busy;
+ hw->base = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+ if (!hw->base)
+ goto exit_busy;
+ /* program defaults into the registers */
+ hw->imr = 0; /* disable spi interrupts */
+ writel(hw->imr, hw->base + ALTERA_SPI_CONTROL);
+ writel(0, hw->base + ALTERA_SPI_STATUS); /* clear status reg */
+ if (readl(hw->base + ALTERA_SPI_STATUS) & ALTERA_SPI_STATUS_RRDY_MSK)
+ readl(hw->base + ALTERA_SPI_RXDATA); /* flush rxdata */
+ /* irq is optional */
+ hw->irq = platform_get_irq(pdev, 0);
+ if (hw->irq >= 0) {
+ init_completion(&hw->done);
+ err = devm_request_irq(&pdev->dev, hw->irq, altera_spi_irq, 0,
+ pdev->name, hw);
+ if (err)
+ goto exit;
+ }
+ /* find platform data */
+ if (!platp)
+ hw->bitbang.master->dev.of_node = pdev->dev.of_node;
+
+ /* register our spi controller */
+ err = spi_bitbang_start(&hw->bitbang);
+ if (err)
+ goto exit;
+ dev_info(&pdev->dev, "base %p, irq %d\n", hw->base, hw->irq);
+
+ return 0;
+
+exit_busy:
+ err = -EBUSY;
+exit:
+ platform_set_drvdata(pdev, NULL);
+ spi_master_put(master);
+ return err;
+}
+
+static int __devexit altera_spi_remove(struct platform_device *dev)
+{
+ struct altera_spi *hw = platform_get_drvdata(dev);
+ struct spi_master *master = hw->bitbang.master;
+
+ spi_bitbang_stop(&hw->bitbang);
+ platform_set_drvdata(dev, NULL);
+ spi_master_put(master);
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id altera_spi_match[] = {
+ { .compatible = "ALTR,spi-1.0", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, altera_spi_match);
+#else /* CONFIG_OF */
+#define altera_spi_match NULL
+#endif /* CONFIG_OF */
+
+static struct platform_driver altera_spi_driver = {
+ .probe = altera_spi_probe,
+ .remove = __devexit_p(altera_spi_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = NULL,
+ .of_match_table = altera_spi_match,
+ },
+};
+
+static int __init altera_spi_init(void)
+{
+ return platform_driver_register(&altera_spi_driver);
+}
+module_init(altera_spi_init);
+
+static void __exit altera_spi_exit(void)
+{
+ platform_driver_unregister(&altera_spi_driver);
+}
+module_exit(altera_spi_exit);
+
+MODULE_DESCRIPTION("Altera SPI driver");
+MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c
index 3f223511127b..a28462486df8 100644
--- a/drivers/spi/spi_bfin5xx.c
+++ b/drivers/spi/spi_bfin5xx.c
@@ -425,6 +425,7 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
struct bfin_spi_slave_data *chip = drv_data->cur_chip;
struct spi_message *msg = drv_data->cur_msg;
int n_bytes = drv_data->n_bytes;
+ int loop = 0;
/* wait until transfer finished. */
while (!(read_STAT(drv_data) & BIT_STAT_RXS))
@@ -435,10 +436,15 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
/* last read */
if (drv_data->rx) {
dev_dbg(&drv_data->pdev->dev, "last read\n");
- if (n_bytes == 2)
- *(u16 *) (drv_data->rx) = read_RDBR(drv_data);
- else if (n_bytes == 1)
- *(u8 *) (drv_data->rx) = read_RDBR(drv_data);
+ if (n_bytes % 2) {
+ u16 *buf = (u16 *)drv_data->rx;
+ for (loop = 0; loop < n_bytes / 2; loop++)
+ *buf++ = read_RDBR(drv_data);
+ } else {
+ u8 *buf = (u8 *)drv_data->rx;
+ for (loop = 0; loop < n_bytes; loop++)
+ *buf++ = read_RDBR(drv_data);
+ }
drv_data->rx += n_bytes;
}
@@ -458,29 +464,53 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
if (drv_data->rx && drv_data->tx) {
/* duplex */
dev_dbg(&drv_data->pdev->dev, "duplex: write_TDBR\n");
- if (drv_data->n_bytes == 2) {
- *(u16 *) (drv_data->rx) = read_RDBR(drv_data);
- write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
- } else if (drv_data->n_bytes == 1) {
- *(u8 *) (drv_data->rx) = read_RDBR(drv_data);
- write_TDBR(drv_data, (*(u8 *) (drv_data->tx)));
+ if (n_bytes % 2) {
+ u16 *buf = (u16 *)drv_data->rx;
+ u16 *buf2 = (u16 *)drv_data->tx;
+ for (loop = 0; loop < n_bytes / 2; loop++) {
+ *buf++ = read_RDBR(drv_data);
+ write_TDBR(drv_data, *buf2++);
+ }
+ } else {
+ u8 *buf = (u8 *)drv_data->rx;
+ u8 *buf2 = (u8 *)drv_data->tx;
+ for (loop = 0; loop < n_bytes; loop++) {
+ *buf++ = read_RDBR(drv_data);
+ write_TDBR(drv_data, *buf2++);
+ }
}
} else if (drv_data->rx) {
/* read */
dev_dbg(&drv_data->pdev->dev, "read: write_TDBR\n");
- if (drv_data->n_bytes == 2)
- *(u16 *) (drv_data->rx) = read_RDBR(drv_data);
- else if (drv_data->n_bytes == 1)
- *(u8 *) (drv_data->rx) = read_RDBR(drv_data);
- write_TDBR(drv_data, chip->idle_tx_val);
+ if (n_bytes % 2) {
+ u16 *buf = (u16 *)drv_data->rx;
+ for (loop = 0; loop < n_bytes / 2; loop++) {
+ *buf++ = read_RDBR(drv_data);
+ write_TDBR(drv_data, chip->idle_tx_val);
+ }
+ } else {
+ u8 *buf = (u8 *)drv_data->rx;
+ for (loop = 0; loop < n_bytes; loop++) {
+ *buf++ = read_RDBR(drv_data);
+ write_TDBR(drv_data, chip->idle_tx_val);
+ }
+ }
} else if (drv_data->tx) {
/* write */
dev_dbg(&drv_data->pdev->dev, "write: write_TDBR\n");
- bfin_spi_dummy_read(drv_data);
- if (drv_data->n_bytes == 2)
- write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
- else if (drv_data->n_bytes == 1)
- write_TDBR(drv_data, (*(u8 *) (drv_data->tx)));
+ if (n_bytes % 2) {
+ u16 *buf = (u16 *)drv_data->tx;
+ for (loop = 0; loop < n_bytes / 2; loop++) {
+ read_RDBR(drv_data);
+ write_TDBR(drv_data, *buf++);
+ }
+ } else {
+ u8 *buf = (u8 *)drv_data->tx;
+ for (loop = 0; loop < n_bytes; loop++) {
+ read_RDBR(drv_data);
+ write_TDBR(drv_data, *buf++);
+ }
+ }
}
if (drv_data->tx)
@@ -623,6 +653,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
message->state = bfin_spi_next_transfer(drv_data);
/* Schedule next transfer tasklet */
tasklet_schedule(&drv_data->pump_transfers);
+ return;
}
if (transfer->tx_buf != NULL) {
@@ -651,16 +682,16 @@ static void bfin_spi_pump_transfers(unsigned long data)
/* Bits per word setup */
bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word;
- if (bits_per_word == 8) {
- drv_data->n_bytes = 1;
- drv_data->len = transfer->len;
- cr_width = 0;
- drv_data->ops = &bfin_bfin_spi_transfer_ops_u8;
- } else if (bits_per_word == 16) {
- drv_data->n_bytes = 2;
+ if ((bits_per_word > 0) && (bits_per_word % 16 == 0)) {
+ drv_data->n_bytes = bits_per_word/8;
drv_data->len = (transfer->len) >> 1;
cr_width = BIT_CTL_WORDSIZE;
drv_data->ops = &bfin_bfin_spi_transfer_ops_u16;
+ } else if ((bits_per_word > 0) && (bits_per_word % 8 == 0)) {
+ drv_data->n_bytes = bits_per_word/8;
+ drv_data->len = transfer->len;
+ cr_width = 0;
+ drv_data->ops = &bfin_bfin_spi_transfer_ops_u8;
} else {
dev_err(&drv_data->pdev->dev, "transfer: unsupported bits_per_word\n");
message->status = -EINVAL;
@@ -815,10 +846,19 @@ static void bfin_spi_pump_transfers(unsigned long data)
if (drv_data->tx == NULL)
write_TDBR(drv_data, chip->idle_tx_val);
else {
- if (bits_per_word == 8)
- write_TDBR(drv_data, (*(u8 *) (drv_data->tx)));
- else
- write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
+ int loop;
+ if (bits_per_word % 16 == 0) {
+ u16 *buf = (u16 *)drv_data->tx;
+ for (loop = 0; loop < bits_per_word / 16;
+ loop++) {
+ write_TDBR(drv_data, *buf++);
+ }
+ } else if (bits_per_word % 8 == 0) {
+ u8 *buf = (u8 *)drv_data->tx;
+ for (loop = 0; loop < bits_per_word / 8; loop++)
+ write_TDBR(drv_data, *buf++);
+ }
+
drv_data->tx += drv_data->n_bytes;
}
@@ -1031,7 +1071,7 @@ static int bfin_spi_setup(struct spi_device *spi)
chip->ctl_reg &= bfin_ctl_reg;
}
- if (spi->bits_per_word != 8 && spi->bits_per_word != 16) {
+ if (spi->bits_per_word % 8) {
dev_err(&spi->dev, "%d bits_per_word is not supported\n",
spi->bits_per_word);
goto error;
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c
index 8b55724d5f39..14a63f6010d1 100644
--- a/drivers/spi/spi_bitbang.c
+++ b/drivers/spi/spi_bitbang.c
@@ -259,10 +259,6 @@ static void bitbang_work(struct work_struct *work)
struct spi_bitbang *bitbang =
container_of(work, struct spi_bitbang, work);
unsigned long flags;
- int (*setup_transfer)(struct spi_device *,
- struct spi_transfer *);
-
- setup_transfer = bitbang->setup_transfer;
spin_lock_irqsave(&bitbang->lock, flags);
bitbang->busy = 1;
@@ -300,11 +296,7 @@ static void bitbang_work(struct work_struct *work)
/* init (-1) or override (1) transfer params */
if (do_setup != 0) {
- if (!setup_transfer) {
- status = -ENOPROTOOPT;
- break;
- }
- status = setup_transfer(spi, t);
+ status = bitbang->setup_transfer(spi, t);
if (status < 0)
break;
if (do_setup == -1)
@@ -465,6 +457,9 @@ int spi_bitbang_start(struct spi_bitbang *bitbang)
}
} else if (!bitbang->master->setup)
return -EINVAL;
+ if (bitbang->master->transfer == spi_bitbang_transfer &&
+ !bitbang->setup_transfer)
+ return -EINVAL;
/* this task is the only thing to touch the SPI bits */
bitbang->busy = 0;
diff --git a/drivers/spi/spi_fsl_espi.c b/drivers/spi/spi_fsl_espi.c
index a99e2333b949..900e921ab80e 100644
--- a/drivers/spi/spi_fsl_espi.c
+++ b/drivers/spi/spi_fsl_espi.c
@@ -685,8 +685,7 @@ static int of_fsl_espi_get_chipselects(struct device *dev)
return 0;
}
-static int __devinit of_fsl_espi_probe(struct platform_device *ofdev,
- const struct of_device_id *ofid)
+static int __devinit of_fsl_espi_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
@@ -695,7 +694,7 @@ static int __devinit of_fsl_espi_probe(struct platform_device *ofdev,
struct resource irq;
int ret = -ENOMEM;
- ret = of_mpc8xxx_spi_probe(ofdev, ofid);
+ ret = of_mpc8xxx_spi_probe(ofdev);
if (ret)
return ret;
@@ -736,7 +735,7 @@ static const struct of_device_id of_fsl_espi_match[] = {
};
MODULE_DEVICE_TABLE(of, of_fsl_espi_match);
-static struct of_platform_driver fsl_espi_driver = {
+static struct platform_driver fsl_espi_driver = {
.driver = {
.name = "fsl_espi",
.owner = THIS_MODULE,
@@ -748,13 +747,13 @@ static struct of_platform_driver fsl_espi_driver = {
static int __init fsl_espi_init(void)
{
- return of_register_platform_driver(&fsl_espi_driver);
+ return platform_driver_register(&fsl_espi_driver);
}
module_init(fsl_espi_init);
static void __exit fsl_espi_exit(void)
{
- of_unregister_platform_driver(&fsl_espi_driver);
+ platform_driver_unregister(&fsl_espi_driver);
}
module_exit(fsl_espi_exit);
diff --git a/drivers/spi/spi_fsl_lib.c b/drivers/spi/spi_fsl_lib.c
index 5cd741fdb5c3..ff59f42ae990 100644
--- a/drivers/spi/spi_fsl_lib.c
+++ b/drivers/spi/spi_fsl_lib.c
@@ -189,8 +189,7 @@ int __devexit mpc8xxx_spi_remove(struct device *dev)
return 0;
}
-int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev,
- const struct of_device_id *ofid)
+int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
diff --git a/drivers/spi/spi_fsl_lib.h b/drivers/spi/spi_fsl_lib.h
index 281e060977cd..cbe881b9ea76 100644
--- a/drivers/spi/spi_fsl_lib.h
+++ b/drivers/spi/spi_fsl_lib.h
@@ -118,7 +118,6 @@ extern const char *mpc8xxx_spi_strmode(unsigned int flags);
extern int mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
unsigned int irq);
extern int mpc8xxx_spi_remove(struct device *dev);
-extern int of_mpc8xxx_spi_probe(struct platform_device *ofdev,
- const struct of_device_id *ofid);
+extern int of_mpc8xxx_spi_probe(struct platform_device *ofdev);
#endif /* __SPI_FSL_LIB_H__ */
diff --git a/drivers/spi/spi_fsl_spi.c b/drivers/spi/spi_fsl_spi.c
index 7ca52d3ae8f8..7963c9b49566 100644
--- a/drivers/spi/spi_fsl_spi.c
+++ b/drivers/spi/spi_fsl_spi.c
@@ -1042,8 +1042,7 @@ static int of_fsl_spi_free_chipselects(struct device *dev)
return 0;
}
-static int __devinit of_fsl_spi_probe(struct platform_device *ofdev,
- const struct of_device_id *ofid)
+static int __devinit of_fsl_spi_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
@@ -1052,7 +1051,7 @@ static int __devinit of_fsl_spi_probe(struct platform_device *ofdev,
struct resource irq;
int ret = -ENOMEM;
- ret = of_mpc8xxx_spi_probe(ofdev, ofid);
+ ret = of_mpc8xxx_spi_probe(ofdev);
if (ret)
return ret;
@@ -1100,7 +1099,7 @@ static const struct of_device_id of_fsl_spi_match[] = {
};
MODULE_DEVICE_TABLE(of, of_fsl_spi_match);
-static struct of_platform_driver of_fsl_spi_driver = {
+static struct platform_driver of_fsl_spi_driver = {
.driver = {
.name = "fsl_spi",
.owner = THIS_MODULE,
@@ -1177,13 +1176,13 @@ static void __exit legacy_driver_unregister(void) {}
static int __init fsl_spi_init(void)
{
legacy_driver_register();
- return of_register_platform_driver(&of_fsl_spi_driver);
+ return platform_driver_register(&of_fsl_spi_driver);
}
module_init(fsl_spi_init);
static void __exit fsl_spi_exit(void)
{
- of_unregister_platform_driver(&of_fsl_spi_driver);
+ platform_driver_unregister(&of_fsl_spi_driver);
legacy_driver_unregister();
}
module_exit(fsl_spi_exit);
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c
index 1cf9d5faabf4..69d6dba67c19 100644
--- a/drivers/spi/spi_imx.c
+++ b/drivers/spi/spi_imx.c
@@ -174,7 +174,7 @@ static unsigned int spi_imx_clkdiv_2(unsigned int fin,
#define SPI_IMX2_3_CTRL 0x08
#define SPI_IMX2_3_CTRL_ENABLE (1 << 0)
#define SPI_IMX2_3_CTRL_XCH (1 << 2)
-#define SPI_IMX2_3_CTRL_MODE(cs) (1 << ((cs) + 4))
+#define SPI_IMX2_3_CTRL_MODE_MASK (0xf << 4)
#define SPI_IMX2_3_CTRL_POSTDIV_OFFSET 8
#define SPI_IMX2_3_CTRL_PREDIV_OFFSET 12
#define SPI_IMX2_3_CTRL_CS(cs) ((cs) << 18)
@@ -253,8 +253,14 @@ static int __maybe_unused spi_imx2_3_config(struct spi_imx_data *spi_imx,
{
u32 ctrl = SPI_IMX2_3_CTRL_ENABLE, cfg = 0;
- /* set master mode */
- ctrl |= SPI_IMX2_3_CTRL_MODE(config->cs);
+ /*
+ * The hardware seems to have a race condition when changing modes. The
+ * current assumption is that the selection of the channel arrives
+ * earlier in the hardware than the mode bits when they are written at
+ * the same time.
+ * So set master mode for all channels as we do not support slave mode.
+ */
+ ctrl |= SPI_IMX2_3_CTRL_MODE_MASK;
/* set clock speed */
ctrl |= spi_imx2_3_clkdiv(spi_imx->spi_clk, config->speed_hz);
diff --git a/drivers/spi/spi_oc_tiny.c b/drivers/spi/spi_oc_tiny.c
new file mode 100644
index 000000000000..f1bde66cea19
--- /dev/null
+++ b/drivers/spi/spi_oc_tiny.c
@@ -0,0 +1,425 @@
+/*
+ * OpenCores tiny SPI master driver
+ *
+ * http://opencores.org/project,tiny_spi
+ *
+ * Copyright (C) 2011 Thomas Chou <thomas@wytron.com.tw>
+ *
+ * Based on spi_s3c24xx.c, which is:
+ * Copyright (c) 2006 Ben Dooks
+ * Copyright (c) 2006 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/spi/spi_oc_tiny.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+
+#define DRV_NAME "spi_oc_tiny"
+
+#define TINY_SPI_RXDATA 0
+#define TINY_SPI_TXDATA 4
+#define TINY_SPI_STATUS 8
+#define TINY_SPI_CONTROL 12
+#define TINY_SPI_BAUD 16
+
+#define TINY_SPI_STATUS_TXE 0x1
+#define TINY_SPI_STATUS_TXR 0x2
+
+struct tiny_spi {
+ /* bitbang has to be first */
+ struct spi_bitbang bitbang;
+ struct completion done;
+
+ void __iomem *base;
+ int irq;
+ unsigned int freq;
+ unsigned int baudwidth;
+ unsigned int baud;
+ unsigned int speed_hz;
+ unsigned int mode;
+ unsigned int len;
+ unsigned int txc, rxc;
+ const u8 *txp;
+ u8 *rxp;
+ unsigned int gpio_cs_count;
+ int *gpio_cs;
+};
+
+static inline struct tiny_spi *tiny_spi_to_hw(struct spi_device *sdev)
+{
+ return spi_master_get_devdata(sdev->master);
+}
+
+static unsigned int tiny_spi_baud(struct spi_device *spi, unsigned int hz)
+{
+ struct tiny_spi *hw = tiny_spi_to_hw(spi);
+
+ return min(DIV_ROUND_UP(hw->freq, hz * 2), (1U << hw->baudwidth)) - 1;
+}
+
+static void tiny_spi_chipselect(struct spi_device *spi, int is_active)
+{
+ struct tiny_spi *hw = tiny_spi_to_hw(spi);
+
+ if (hw->gpio_cs_count) {
+ gpio_set_value(hw->gpio_cs[spi->chip_select],
+ (spi->mode & SPI_CS_HIGH) ? is_active : !is_active);
+ }
+}
+
+static int tiny_spi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct tiny_spi *hw = tiny_spi_to_hw(spi);
+ unsigned int baud = hw->baud;
+
+ if (t) {
+ if (t->speed_hz && t->speed_hz != hw->speed_hz)
+ baud = tiny_spi_baud(spi, t->speed_hz);
+ }
+ writel(baud, hw->base + TINY_SPI_BAUD);
+ writel(hw->mode, hw->base + TINY_SPI_CONTROL);
+ return 0;
+}
+
+static int tiny_spi_setup(struct spi_device *spi)
+{
+ struct tiny_spi *hw = tiny_spi_to_hw(spi);
+
+ if (spi->max_speed_hz != hw->speed_hz) {
+ hw->speed_hz = spi->max_speed_hz;
+ hw->baud = tiny_spi_baud(spi, hw->speed_hz);
+ }
+ hw->mode = spi->mode & (SPI_CPOL | SPI_CPHA);
+ return 0;
+}
+
+static inline void tiny_spi_wait_txr(struct tiny_spi *hw)
+{
+ while (!(readb(hw->base + TINY_SPI_STATUS) &
+ TINY_SPI_STATUS_TXR))
+ cpu_relax();
+}
+
+static inline void tiny_spi_wait_txe(struct tiny_spi *hw)
+{
+ while (!(readb(hw->base + TINY_SPI_STATUS) &
+ TINY_SPI_STATUS_TXE))
+ cpu_relax();
+}
+
+static int tiny_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct tiny_spi *hw = tiny_spi_to_hw(spi);
+ const u8 *txp = t->tx_buf;
+ u8 *rxp = t->rx_buf;
+ unsigned int i;
+
+ if (hw->irq >= 0) {
+ /* use intrrupt driven data transfer */
+ hw->len = t->len;
+ hw->txp = t->tx_buf;
+ hw->rxp = t->rx_buf;
+ hw->txc = 0;
+ hw->rxc = 0;
+
+ /* send the first byte */
+ if (t->len > 1) {
+ writeb(hw->txp ? *hw->txp++ : 0,
+ hw->base + TINY_SPI_TXDATA);
+ hw->txc++;
+ writeb(hw->txp ? *hw->txp++ : 0,
+ hw->base + TINY_SPI_TXDATA);
+ hw->txc++;
+ writeb(TINY_SPI_STATUS_TXR, hw->base + TINY_SPI_STATUS);
+ } else {
+ writeb(hw->txp ? *hw->txp++ : 0,
+ hw->base + TINY_SPI_TXDATA);
+ hw->txc++;
+ writeb(TINY_SPI_STATUS_TXE, hw->base + TINY_SPI_STATUS);
+ }
+
+ wait_for_completion(&hw->done);
+ } else if (txp && rxp) {
+ /* we need to tighten the transfer loop */
+ writeb(*txp++, hw->base + TINY_SPI_TXDATA);
+ if (t->len > 1) {
+ writeb(*txp++, hw->base + TINY_SPI_TXDATA);
+ for (i = 2; i < t->len; i++) {
+ u8 rx, tx = *txp++;
+ tiny_spi_wait_txr(hw);
+ rx = readb(hw->base + TINY_SPI_TXDATA);
+ writeb(tx, hw->base + TINY_SPI_TXDATA);
+ *rxp++ = rx;
+ }
+ tiny_spi_wait_txr(hw);
+ *rxp++ = readb(hw->base + TINY_SPI_TXDATA);
+ }
+ tiny_spi_wait_txe(hw);
+ *rxp++ = readb(hw->base + TINY_SPI_RXDATA);
+ } else if (rxp) {
+ writeb(0, hw->base + TINY_SPI_TXDATA);
+ if (t->len > 1) {
+ writeb(0,
+ hw->base + TINY_SPI_TXDATA);
+ for (i = 2; i < t->len; i++) {
+ u8 rx;
+ tiny_spi_wait_txr(hw);
+ rx = readb(hw->base + TINY_SPI_TXDATA);
+ writeb(0, hw->base + TINY_SPI_TXDATA);
+ *rxp++ = rx;
+ }
+ tiny_spi_wait_txr(hw);
+ *rxp++ = readb(hw->base + TINY_SPI_TXDATA);
+ }
+ tiny_spi_wait_txe(hw);
+ *rxp++ = readb(hw->base + TINY_SPI_RXDATA);
+ } else if (txp) {
+ writeb(*txp++, hw->base + TINY_SPI_TXDATA);
+ if (t->len > 1) {
+ writeb(*txp++, hw->base + TINY_SPI_TXDATA);
+ for (i = 2; i < t->len; i++) {
+ u8 tx = *txp++;
+ tiny_spi_wait_txr(hw);
+ writeb(tx, hw->base + TINY_SPI_TXDATA);
+ }
+ }
+ tiny_spi_wait_txe(hw);
+ } else {
+ writeb(0, hw->base + TINY_SPI_TXDATA);
+ if (t->len > 1) {
+ writeb(0, hw->base + TINY_SPI_TXDATA);
+ for (i = 2; i < t->len; i++) {
+ tiny_spi_wait_txr(hw);
+ writeb(0, hw->base + TINY_SPI_TXDATA);
+ }
+ }
+ tiny_spi_wait_txe(hw);
+ }
+ return t->len;
+}
+
+static irqreturn_t tiny_spi_irq(int irq, void *dev)
+{
+ struct tiny_spi *hw = dev;
+
+ writeb(0, hw->base + TINY_SPI_STATUS);
+ if (hw->rxc + 1 == hw->len) {
+ if (hw->rxp)
+ *hw->rxp++ = readb(hw->base + TINY_SPI_RXDATA);
+ hw->rxc++;
+ complete(&hw->done);
+ } else {
+ if (hw->rxp)
+ *hw->rxp++ = readb(hw->base + TINY_SPI_TXDATA);
+ hw->rxc++;
+ if (hw->txc < hw->len) {
+ writeb(hw->txp ? *hw->txp++ : 0,
+ hw->base + TINY_SPI_TXDATA);
+ hw->txc++;
+ writeb(TINY_SPI_STATUS_TXR,
+ hw->base + TINY_SPI_STATUS);
+ } else {
+ writeb(TINY_SPI_STATUS_TXE,
+ hw->base + TINY_SPI_STATUS);
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_OF
+#include <linux/of_gpio.h>
+
+static int __devinit tiny_spi_of_probe(struct platform_device *pdev)
+{
+ struct tiny_spi *hw = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ unsigned int i;
+ const __be32 *val;
+ int len;
+
+ if (!np)
+ return 0;
+ hw->gpio_cs_count = of_gpio_count(np);
+ if (hw->gpio_cs_count) {
+ hw->gpio_cs = devm_kzalloc(&pdev->dev,
+ hw->gpio_cs_count * sizeof(unsigned int),
+ GFP_KERNEL);
+ if (!hw->gpio_cs)
+ return -ENOMEM;
+ }
+ for (i = 0; i < hw->gpio_cs_count; i++) {
+ hw->gpio_cs[i] = of_get_gpio_flags(np, i, NULL);
+ if (hw->gpio_cs[i] < 0)
+ return -ENODEV;
+ }
+ hw->bitbang.master->dev.of_node = pdev->dev.of_node;
+ val = of_get_property(pdev->dev.of_node,
+ "clock-frequency", &len);
+ if (val && len >= sizeof(__be32))
+ hw->freq = be32_to_cpup(val);
+ val = of_get_property(pdev->dev.of_node, "baud-width", &len);
+ if (val && len >= sizeof(__be32))
+ hw->baudwidth = be32_to_cpup(val);
+ return 0;
+}
+#else /* !CONFIG_OF */
+static int __devinit tiny_spi_of_probe(struct platform_device *pdev)
+{
+ return 0;
+}
+#endif /* CONFIG_OF */
+
+static int __devinit tiny_spi_probe(struct platform_device *pdev)
+{
+ struct tiny_spi_platform_data *platp = pdev->dev.platform_data;
+ struct tiny_spi *hw;
+ struct spi_master *master;
+ struct resource *res;
+ unsigned int i;
+ int err = -ENODEV;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct tiny_spi));
+ if (!master)
+ return err;
+
+ /* setup the master state. */
+ master->bus_num = pdev->id;
+ master->num_chipselect = 255;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->setup = tiny_spi_setup;
+
+ hw = spi_master_get_devdata(master);
+ platform_set_drvdata(pdev, hw);
+
+ /* setup the state for the bitbang driver */
+ hw->bitbang.master = spi_master_get(master);
+ if (!hw->bitbang.master)
+ return err;
+ hw->bitbang.setup_transfer = tiny_spi_setup_transfer;
+ hw->bitbang.chipselect = tiny_spi_chipselect;
+ hw->bitbang.txrx_bufs = tiny_spi_txrx_bufs;
+
+ /* find and map our resources */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ goto exit_busy;
+ if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
+ pdev->name))
+ goto exit_busy;
+ hw->base = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+ if (!hw->base)
+ goto exit_busy;
+ /* irq is optional */
+ hw->irq = platform_get_irq(pdev, 0);
+ if (hw->irq >= 0) {
+ init_completion(&hw->done);
+ err = devm_request_irq(&pdev->dev, hw->irq, tiny_spi_irq, 0,
+ pdev->name, hw);
+ if (err)
+ goto exit;
+ }
+ /* find platform data */
+ if (platp) {
+ hw->gpio_cs_count = platp->gpio_cs_count;
+ hw->gpio_cs = platp->gpio_cs;
+ if (platp->gpio_cs_count && !platp->gpio_cs)
+ goto exit_busy;
+ hw->freq = platp->freq;
+ hw->baudwidth = platp->baudwidth;
+ } else {
+ err = tiny_spi_of_probe(pdev);
+ if (err)
+ goto exit;
+ }
+ for (i = 0; i < hw->gpio_cs_count; i++) {
+ err = gpio_request(hw->gpio_cs[i], dev_name(&pdev->dev));
+ if (err)
+ goto exit_gpio;
+ gpio_direction_output(hw->gpio_cs[i], 1);
+ }
+ hw->bitbang.master->num_chipselect = max(1U, hw->gpio_cs_count);
+
+ /* register our spi controller */
+ err = spi_bitbang_start(&hw->bitbang);
+ if (err)
+ goto exit;
+ dev_info(&pdev->dev, "base %p, irq %d\n", hw->base, hw->irq);
+
+ return 0;
+
+exit_gpio:
+ while (i-- > 0)
+ gpio_free(hw->gpio_cs[i]);
+exit_busy:
+ err = -EBUSY;
+exit:
+ platform_set_drvdata(pdev, NULL);
+ spi_master_put(master);
+ return err;
+}
+
+static int __devexit tiny_spi_remove(struct platform_device *pdev)
+{
+ struct tiny_spi *hw = platform_get_drvdata(pdev);
+ struct spi_master *master = hw->bitbang.master;
+ unsigned int i;
+
+ spi_bitbang_stop(&hw->bitbang);
+ for (i = 0; i < hw->gpio_cs_count; i++)
+ gpio_free(hw->gpio_cs[i]);
+ platform_set_drvdata(pdev, NULL);
+ spi_master_put(master);
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id tiny_spi_match[] = {
+ { .compatible = "opencores,tiny-spi-rtlsvn2", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, tiny_spi_match);
+#else /* CONFIG_OF */
+#define tiny_spi_match NULL
+#endif /* CONFIG_OF */
+
+static struct platform_driver tiny_spi_driver = {
+ .probe = tiny_spi_probe,
+ .remove = __devexit_p(tiny_spi_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = NULL,
+ .of_match_table = tiny_spi_match,
+ },
+};
+
+static int __init tiny_spi_init(void)
+{
+ return platform_driver_register(&tiny_spi_driver);
+}
+module_init(tiny_spi_init);
+
+static void __exit tiny_spi_exit(void)
+{
+ platform_driver_unregister(&tiny_spi_driver);
+}
+module_exit(tiny_spi_exit);
+
+MODULE_DESCRIPTION("OpenCores tiny SPI driver");
+MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/spi/spi_ppc4xx.c b/drivers/spi/spi_ppc4xx.c
index 80e172d3e72a..2a298c029194 100644
--- a/drivers/spi/spi_ppc4xx.c
+++ b/drivers/spi/spi_ppc4xx.c
@@ -390,8 +390,7 @@ static void free_gpios(struct ppc4xx_spi *hw)
/*
* platform_device layer stuff...
*/
-static int __init spi_ppc4xx_of_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __init spi_ppc4xx_of_probe(struct platform_device *op)
{
struct ppc4xx_spi *hw;
struct spi_master *master;
@@ -586,7 +585,7 @@ static const struct of_device_id spi_ppc4xx_of_match[] = {
MODULE_DEVICE_TABLE(of, spi_ppc4xx_of_match);
-static struct of_platform_driver spi_ppc4xx_of_driver = {
+static struct platform_driver spi_ppc4xx_of_driver = {
.probe = spi_ppc4xx_of_probe,
.remove = __exit_p(spi_ppc4xx_of_remove),
.driver = {
@@ -598,13 +597,13 @@ static struct of_platform_driver spi_ppc4xx_of_driver = {
static int __init spi_ppc4xx_init(void)
{
- return of_register_platform_driver(&spi_ppc4xx_of_driver);
+ return platform_driver_register(&spi_ppc4xx_of_driver);
}
module_init(spi_ppc4xx_init);
static void __exit spi_ppc4xx_exit(void)
{
- of_unregister_platform_driver(&spi_ppc4xx_of_driver);
+ platform_driver_unregister(&spi_ppc4xx_of_driver);
}
module_exit(spi_ppc4xx_exit);
diff --git a/drivers/spi/spi_sh.c b/drivers/spi/spi_sh.c
new file mode 100644
index 000000000000..869a07d375d6
--- /dev/null
+++ b/drivers/spi/spi_sh.c
@@ -0,0 +1,543 @@
+/*
+ * SH SPI bus driver
+ *
+ * Copyright (C) 2011 Renesas Solutions Corp.
+ *
+ * Based on pxa2xx_spi.c:
+ * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/spi/spi.h>
+
+#define SPI_SH_TBR 0x00
+#define SPI_SH_RBR 0x00
+#define SPI_SH_CR1 0x08
+#define SPI_SH_CR2 0x10
+#define SPI_SH_CR3 0x18
+#define SPI_SH_CR4 0x20
+#define SPI_SH_CR5 0x28
+
+/* CR1 */
+#define SPI_SH_TBE 0x80
+#define SPI_SH_TBF 0x40
+#define SPI_SH_RBE 0x20
+#define SPI_SH_RBF 0x10
+#define SPI_SH_PFONRD 0x08
+#define SPI_SH_SSDB 0x04
+#define SPI_SH_SSD 0x02
+#define SPI_SH_SSA 0x01
+
+/* CR2 */
+#define SPI_SH_RSTF 0x80
+#define SPI_SH_LOOPBK 0x40
+#define SPI_SH_CPOL 0x20
+#define SPI_SH_CPHA 0x10
+#define SPI_SH_L1M0 0x08
+
+/* CR3 */
+#define SPI_SH_MAX_BYTE 0xFF
+
+/* CR4 */
+#define SPI_SH_TBEI 0x80
+#define SPI_SH_TBFI 0x40
+#define SPI_SH_RBEI 0x20
+#define SPI_SH_RBFI 0x10
+#define SPI_SH_WPABRT 0x04
+#define SPI_SH_SSS 0x01
+
+/* CR8 */
+#define SPI_SH_P1L0 0x80
+#define SPI_SH_PP1L0 0x40
+#define SPI_SH_MUXI 0x20
+#define SPI_SH_MUXIRQ 0x10
+
+#define SPI_SH_FIFO_SIZE 32
+#define SPI_SH_SEND_TIMEOUT (3 * HZ)
+#define SPI_SH_RECEIVE_TIMEOUT (HZ >> 3)
+
+#undef DEBUG
+
+struct spi_sh_data {
+ void __iomem *addr;
+ int irq;
+ struct spi_master *master;
+ struct list_head queue;
+ struct workqueue_struct *workqueue;
+ struct work_struct ws;
+ unsigned long cr1;
+ wait_queue_head_t wait;
+ spinlock_t lock;
+};
+
+static void spi_sh_write(struct spi_sh_data *ss, unsigned long data,
+ unsigned long offset)
+{
+ writel(data, ss->addr + offset);
+}
+
+static unsigned long spi_sh_read(struct spi_sh_data *ss, unsigned long offset)
+{
+ return readl(ss->addr + offset);
+}
+
+static void spi_sh_set_bit(struct spi_sh_data *ss, unsigned long val,
+ unsigned long offset)
+{
+ unsigned long tmp;
+
+ tmp = spi_sh_read(ss, offset);
+ tmp |= val;
+ spi_sh_write(ss, tmp, offset);
+}
+
+static void spi_sh_clear_bit(struct spi_sh_data *ss, unsigned long val,
+ unsigned long offset)
+{
+ unsigned long tmp;
+
+ tmp = spi_sh_read(ss, offset);
+ tmp &= ~val;
+ spi_sh_write(ss, tmp, offset);
+}
+
+static void clear_fifo(struct spi_sh_data *ss)
+{
+ spi_sh_set_bit(ss, SPI_SH_RSTF, SPI_SH_CR2);
+ spi_sh_clear_bit(ss, SPI_SH_RSTF, SPI_SH_CR2);
+}
+
+static int spi_sh_wait_receive_buffer(struct spi_sh_data *ss)
+{
+ int timeout = 100000;
+
+ while (spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_RBE) {
+ udelay(10);
+ if (timeout-- < 0)
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static int spi_sh_wait_write_buffer_empty(struct spi_sh_data *ss)
+{
+ int timeout = 100000;
+
+ while (!(spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_TBE)) {
+ udelay(10);
+ if (timeout-- < 0)
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static int spi_sh_send(struct spi_sh_data *ss, struct spi_message *mesg,
+ struct spi_transfer *t)
+{
+ int i, retval = 0;
+ int remain = t->len;
+ int cur_len;
+ unsigned char *data;
+ unsigned long tmp;
+ long ret;
+
+ if (t->len)
+ spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
+
+ data = (unsigned char *)t->tx_buf;
+ while (remain > 0) {
+ cur_len = min(SPI_SH_FIFO_SIZE, remain);
+ for (i = 0; i < cur_len &&
+ !(spi_sh_read(ss, SPI_SH_CR4) &
+ SPI_SH_WPABRT) &&
+ !(spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_TBF);
+ i++)
+ spi_sh_write(ss, (unsigned long)data[i], SPI_SH_TBR);
+
+ if (spi_sh_read(ss, SPI_SH_CR4) & SPI_SH_WPABRT) {
+ /* Abort SPI operation */
+ spi_sh_set_bit(ss, SPI_SH_WPABRT, SPI_SH_CR4);
+ retval = -EIO;
+ break;
+ }
+
+ cur_len = i;
+
+ remain -= cur_len;
+ data += cur_len;
+
+ if (remain > 0) {
+ ss->cr1 &= ~SPI_SH_TBE;
+ spi_sh_set_bit(ss, SPI_SH_TBE, SPI_SH_CR4);
+ ret = wait_event_interruptible_timeout(ss->wait,
+ ss->cr1 & SPI_SH_TBE,
+ SPI_SH_SEND_TIMEOUT);
+ if (ret == 0 && !(ss->cr1 & SPI_SH_TBE)) {
+ printk(KERN_ERR "%s: timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+ }
+ }
+
+ if (list_is_last(&t->transfer_list, &mesg->transfers)) {
+ tmp = spi_sh_read(ss, SPI_SH_CR1);
+ tmp = tmp & ~(SPI_SH_SSD | SPI_SH_SSDB);
+ spi_sh_write(ss, tmp, SPI_SH_CR1);
+ spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
+
+ ss->cr1 &= ~SPI_SH_TBE;
+ spi_sh_set_bit(ss, SPI_SH_TBE, SPI_SH_CR4);
+ ret = wait_event_interruptible_timeout(ss->wait,
+ ss->cr1 & SPI_SH_TBE,
+ SPI_SH_SEND_TIMEOUT);
+ if (ret == 0 && (ss->cr1 & SPI_SH_TBE)) {
+ printk(KERN_ERR "%s: timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+ }
+
+ return retval;
+}
+
+static int spi_sh_receive(struct spi_sh_data *ss, struct spi_message *mesg,
+ struct spi_transfer *t)
+{
+ int i;
+ int remain = t->len;
+ int cur_len;
+ unsigned char *data;
+ unsigned long tmp;
+ long ret;
+
+ if (t->len > SPI_SH_MAX_BYTE)
+ spi_sh_write(ss, SPI_SH_MAX_BYTE, SPI_SH_CR3);
+ else
+ spi_sh_write(ss, t->len, SPI_SH_CR3);
+
+ tmp = spi_sh_read(ss, SPI_SH_CR1);
+ tmp = tmp & ~(SPI_SH_SSD | SPI_SH_SSDB);
+ spi_sh_write(ss, tmp, SPI_SH_CR1);
+ spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
+
+ spi_sh_wait_write_buffer_empty(ss);
+
+ data = (unsigned char *)t->rx_buf;
+ while (remain > 0) {
+ if (remain >= SPI_SH_FIFO_SIZE) {
+ ss->cr1 &= ~SPI_SH_RBF;
+ spi_sh_set_bit(ss, SPI_SH_RBF, SPI_SH_CR4);
+ ret = wait_event_interruptible_timeout(ss->wait,
+ ss->cr1 & SPI_SH_RBF,
+ SPI_SH_RECEIVE_TIMEOUT);
+ if (ret == 0 &&
+ spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_RBE) {
+ printk(KERN_ERR "%s: timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+ }
+
+ cur_len = min(SPI_SH_FIFO_SIZE, remain);
+ for (i = 0; i < cur_len; i++) {
+ if (spi_sh_wait_receive_buffer(ss))
+ break;
+ data[i] = (unsigned char)spi_sh_read(ss, SPI_SH_RBR);
+ }
+
+ remain -= cur_len;
+ data += cur_len;
+ }
+
+ /* deassert CS when SPI is receiving. */
+ if (t->len > SPI_SH_MAX_BYTE) {
+ clear_fifo(ss);
+ spi_sh_write(ss, 1, SPI_SH_CR3);
+ } else {
+ spi_sh_write(ss, 0, SPI_SH_CR3);
+ }
+
+ return 0;
+}
+
+static void spi_sh_work(struct work_struct *work)
+{
+ struct spi_sh_data *ss = container_of(work, struct spi_sh_data, ws);
+ struct spi_message *mesg;
+ struct spi_transfer *t;
+ unsigned long flags;
+ int ret;
+
+ pr_debug("%s: enter\n", __func__);
+
+ spin_lock_irqsave(&ss->lock, flags);
+ while (!list_empty(&ss->queue)) {
+ mesg = list_entry(ss->queue.next, struct spi_message, queue);
+ list_del_init(&mesg->queue);
+
+ spin_unlock_irqrestore(&ss->lock, flags);
+ list_for_each_entry(t, &mesg->transfers, transfer_list) {
+ pr_debug("tx_buf = %p, rx_buf = %p\n",
+ t->tx_buf, t->rx_buf);
+ pr_debug("len = %d, delay_usecs = %d\n",
+ t->len, t->delay_usecs);
+
+ if (t->tx_buf) {
+ ret = spi_sh_send(ss, mesg, t);
+ if (ret < 0)
+ goto error;
+ }
+ if (t->rx_buf) {
+ ret = spi_sh_receive(ss, mesg, t);
+ if (ret < 0)
+ goto error;
+ }
+ mesg->actual_length += t->len;
+ }
+ spin_lock_irqsave(&ss->lock, flags);
+
+ mesg->status = 0;
+ mesg->complete(mesg->context);
+ }
+
+ clear_fifo(ss);
+ spi_sh_set_bit(ss, SPI_SH_SSD, SPI_SH_CR1);
+ udelay(100);
+
+ spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
+ SPI_SH_CR1);
+
+ clear_fifo(ss);
+
+ spin_unlock_irqrestore(&ss->lock, flags);
+
+ return;
+
+ error:
+ mesg->status = ret;
+ mesg->complete(mesg->context);
+
+ spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
+ SPI_SH_CR1);
+ clear_fifo(ss);
+
+}
+
+static int spi_sh_setup(struct spi_device *spi)
+{
+ struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
+
+ if (!spi->bits_per_word)
+ spi->bits_per_word = 8;
+
+ pr_debug("%s: enter\n", __func__);
+
+ spi_sh_write(ss, 0xfe, SPI_SH_CR1); /* SPI sycle stop */
+ spi_sh_write(ss, 0x00, SPI_SH_CR1); /* CR1 init */
+ spi_sh_write(ss, 0x00, SPI_SH_CR3); /* CR3 init */
+
+ clear_fifo(ss);
+
+ /* 1/8 clock */
+ spi_sh_write(ss, spi_sh_read(ss, SPI_SH_CR2) | 0x07, SPI_SH_CR2);
+ udelay(10);
+
+ return 0;
+}
+
+static int spi_sh_transfer(struct spi_device *spi, struct spi_message *mesg)
+{
+ struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
+ unsigned long flags;
+
+ pr_debug("%s: enter\n", __func__);
+ pr_debug("\tmode = %02x\n", spi->mode);
+
+ spin_lock_irqsave(&ss->lock, flags);
+
+ mesg->actual_length = 0;
+ mesg->status = -EINPROGRESS;
+
+ spi_sh_clear_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
+
+ list_add_tail(&mesg->queue, &ss->queue);
+ queue_work(ss->workqueue, &ss->ws);
+
+ spin_unlock_irqrestore(&ss->lock, flags);
+
+ return 0;
+}
+
+static void spi_sh_cleanup(struct spi_device *spi)
+{
+ struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
+
+ pr_debug("%s: enter\n", __func__);
+
+ spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
+ SPI_SH_CR1);
+}
+
+static irqreturn_t spi_sh_irq(int irq, void *_ss)
+{
+ struct spi_sh_data *ss = (struct spi_sh_data *)_ss;
+ unsigned long cr1;
+
+ cr1 = spi_sh_read(ss, SPI_SH_CR1);
+ if (cr1 & SPI_SH_TBE)
+ ss->cr1 |= SPI_SH_TBE;
+ if (cr1 & SPI_SH_TBF)
+ ss->cr1 |= SPI_SH_TBF;
+ if (cr1 & SPI_SH_RBE)
+ ss->cr1 |= SPI_SH_RBE;
+ if (cr1 & SPI_SH_RBF)
+ ss->cr1 |= SPI_SH_RBF;
+
+ if (ss->cr1) {
+ spi_sh_clear_bit(ss, ss->cr1, SPI_SH_CR4);
+ wake_up(&ss->wait);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __devexit spi_sh_remove(struct platform_device *pdev)
+{
+ struct spi_sh_data *ss = dev_get_drvdata(&pdev->dev);
+
+ destroy_workqueue(ss->workqueue);
+ free_irq(ss->irq, ss);
+ iounmap(ss->addr);
+ spi_master_put(ss->master);
+
+ return 0;
+}
+
+static int __devinit spi_sh_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct spi_master *master;
+ struct spi_sh_data *ss;
+ int ret, irq;
+
+ /* get base addr */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(res == NULL)) {
+ dev_err(&pdev->dev, "invalid resource\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "platform_get_irq error\n");
+ return -ENODEV;
+ }
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct spi_sh_data));
+ if (master == NULL) {
+ dev_err(&pdev->dev, "spi_alloc_master error.\n");
+ return -ENOMEM;
+ }
+
+ ss = spi_master_get_devdata(master);
+ dev_set_drvdata(&pdev->dev, ss);
+
+ ss->irq = irq;
+ ss->master = master;
+ ss->addr = ioremap(res->start, resource_size(res));
+ if (ss->addr == NULL) {
+ dev_err(&pdev->dev, "ioremap error.\n");
+ ret = -ENOMEM;
+ goto error1;
+ }
+ INIT_LIST_HEAD(&ss->queue);
+ spin_lock_init(&ss->lock);
+ INIT_WORK(&ss->ws, spi_sh_work);
+ init_waitqueue_head(&ss->wait);
+ ss->workqueue = create_singlethread_workqueue(
+ dev_name(master->dev.parent));
+ if (ss->workqueue == NULL) {
+ dev_err(&pdev->dev, "create workqueue error\n");
+ ret = -EBUSY;
+ goto error2;
+ }
+
+ ret = request_irq(irq, spi_sh_irq, IRQF_DISABLED, "spi_sh", ss);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "request_irq error\n");
+ goto error3;
+ }
+
+ master->num_chipselect = 2;
+ master->bus_num = pdev->id;
+ master->setup = spi_sh_setup;
+ master->transfer = spi_sh_transfer;
+ master->cleanup = spi_sh_cleanup;
+
+ ret = spi_register_master(master);
+ if (ret < 0) {
+ printk(KERN_ERR "spi_register_master error.\n");
+ goto error4;
+ }
+
+ return 0;
+
+ error4:
+ free_irq(irq, ss);
+ error3:
+ destroy_workqueue(ss->workqueue);
+ error2:
+ iounmap(ss->addr);
+ error1:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static struct platform_driver spi_sh_driver = {
+ .probe = spi_sh_probe,
+ .remove = __devexit_p(spi_sh_remove),
+ .driver = {
+ .name = "sh_spi",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init spi_sh_init(void)
+{
+ return platform_driver_register(&spi_sh_driver);
+}
+module_init(spi_sh_init);
+
+static void __exit spi_sh_exit(void)
+{
+ platform_driver_unregister(&spi_sh_driver);
+}
+module_exit(spi_sh_exit);
+
+MODULE_DESCRIPTION("SH SPI bus driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yoshihiro Shimoda");
+MODULE_ALIAS("platform:sh_spi");
diff --git a/drivers/spi/spi_sh_msiof.c b/drivers/spi/spi_sh_msiof.c
index 2c665fceaac7..e00d94b22250 100644
--- a/drivers/spi/spi_sh_msiof.c
+++ b/drivers/spi/spi_sh_msiof.c
@@ -9,22 +9,22 @@
*
*/
-#include <linux/kernel.h>
-#include <linux/init.h>
+#include <linux/bitmap.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
#include <linux/platform_device.h>
-#include <linux/completion.h>
#include <linux/pm_runtime.h>
-#include <linux/gpio.h>
-#include <linux/bitmap.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/err.h>
+#include <linux/spi/sh_msiof.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
-#include <linux/spi/sh_msiof.h>
#include <asm/unaligned.h>
@@ -67,7 +67,7 @@ struct sh_msiof_spi_priv {
#define STR_TEOF (1 << 23)
#define STR_REOF (1 << 7)
-static unsigned long sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
+static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
{
switch (reg_offs) {
case TSCR:
@@ -79,7 +79,7 @@ static unsigned long sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
}
static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs,
- unsigned long value)
+ u32 value)
{
switch (reg_offs) {
case TSCR:
@@ -93,10 +93,10 @@ static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs,
}
static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p,
- unsigned long clr, unsigned long set)
+ u32 clr, u32 set)
{
- unsigned long mask = clr | set;
- unsigned long data;
+ u32 mask = clr | set;
+ u32 data;
int k;
data = sh_msiof_read(p, CTR);
@@ -166,10 +166,10 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
}
static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
- int cpol, int cpha,
- int tx_hi_z, int lsb_first)
+ u32 cpol, u32 cpha,
+ u32 tx_hi_z, u32 lsb_first)
{
- unsigned long tmp;
+ u32 tmp;
int edge;
/*
@@ -187,7 +187,7 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
tmp |= cpol << 30; /* TSCKIZ */
tmp |= cpol << 28; /* RSCKIZ */
- edge = cpol ? cpha : !cpha;
+ edge = cpol ^ !cpha;
tmp |= edge << 27; /* TEDG */
tmp |= edge << 26; /* REDG */
@@ -197,11 +197,9 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
const void *tx_buf, void *rx_buf,
- int bits, int words)
+ u32 bits, u32 words)
{
- unsigned long dr2;
-
- dr2 = ((bits - 1) << 24) | ((words - 1) << 16);
+ u32 dr2 = ((bits - 1) << 24) | ((words - 1) << 16);
if (tx_buf)
sh_msiof_write(p, TMDR2, dr2);
@@ -222,7 +220,7 @@ static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
const void *tx_buf, int words, int fs)
{
- const unsigned char *buf_8 = tx_buf;
+ const u8 *buf_8 = tx_buf;
int k;
for (k = 0; k < words; k++)
@@ -232,7 +230,7 @@ static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p,
const void *tx_buf, int words, int fs)
{
- const unsigned short *buf_16 = tx_buf;
+ const u16 *buf_16 = tx_buf;
int k;
for (k = 0; k < words; k++)
@@ -242,7 +240,7 @@ static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p,
static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p,
const void *tx_buf, int words, int fs)
{
- const unsigned short *buf_16 = tx_buf;
+ const u16 *buf_16 = tx_buf;
int k;
for (k = 0; k < words; k++)
@@ -252,7 +250,7 @@ static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p,
static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p,
const void *tx_buf, int words, int fs)
{
- const unsigned int *buf_32 = tx_buf;
+ const u32 *buf_32 = tx_buf;
int k;
for (k = 0; k < words; k++)
@@ -262,17 +260,37 @@ static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p,
static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p,
const void *tx_buf, int words, int fs)
{
- const unsigned int *buf_32 = tx_buf;
+ const u32 *buf_32 = tx_buf;
int k;
for (k = 0; k < words; k++)
sh_msiof_write(p, TFDR, get_unaligned(&buf_32[k]) << fs);
}
+static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p,
+ const void *tx_buf, int words, int fs)
+{
+ const u32 *buf_32 = tx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ sh_msiof_write(p, TFDR, swab32(buf_32[k] << fs));
+}
+
+static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p,
+ const void *tx_buf, int words, int fs)
+{
+ const u32 *buf_32 = tx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ sh_msiof_write(p, TFDR, swab32(get_unaligned(&buf_32[k]) << fs));
+}
+
static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p,
void *rx_buf, int words, int fs)
{
- unsigned char *buf_8 = rx_buf;
+ u8 *buf_8 = rx_buf;
int k;
for (k = 0; k < words; k++)
@@ -282,7 +300,7 @@ static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p,
static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p,
void *rx_buf, int words, int fs)
{
- unsigned short *buf_16 = rx_buf;
+ u16 *buf_16 = rx_buf;
int k;
for (k = 0; k < words; k++)
@@ -292,7 +310,7 @@ static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p,
static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p,
void *rx_buf, int words, int fs)
{
- unsigned short *buf_16 = rx_buf;
+ u16 *buf_16 = rx_buf;
int k;
for (k = 0; k < words; k++)
@@ -302,7 +320,7 @@ static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p,
static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p,
void *rx_buf, int words, int fs)
{
- unsigned int *buf_32 = rx_buf;
+ u32 *buf_32 = rx_buf;
int k;
for (k = 0; k < words; k++)
@@ -312,19 +330,40 @@ static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p,
static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p,
void *rx_buf, int words, int fs)
{
- unsigned int *buf_32 = rx_buf;
+ u32 *buf_32 = rx_buf;
int k;
for (k = 0; k < words; k++)
put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_32[k]);
}
+static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p,
+ void *rx_buf, int words, int fs)
+{
+ u32 *buf_32 = rx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ buf_32[k] = swab32(sh_msiof_read(p, RFDR) >> fs);
+}
+
+static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
+ void *rx_buf, int words, int fs)
+{
+ u32 *buf_32 = rx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ put_unaligned(swab32(sh_msiof_read(p, RFDR) >> fs), &buf_32[k]);
+}
+
static int sh_msiof_spi_bits(struct spi_device *spi, struct spi_transfer *t)
{
int bits;
bits = t ? t->bits_per_word : 0;
- bits = bits ? bits : spi->bits_per_word;
+ if (!bits)
+ bits = spi->bits_per_word;
return bits;
}
@@ -334,7 +373,8 @@ static unsigned long sh_msiof_spi_hz(struct spi_device *spi,
unsigned long hz;
hz = t ? t->speed_hz : 0;
- hz = hz ? hz : spi->max_speed_hz;
+ if (!hz)
+ hz = spi->max_speed_hz;
return hz;
}
@@ -468,9 +508,17 @@ static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
int bytes_done;
int words;
int n;
+ bool swab;
bits = sh_msiof_spi_bits(spi, t);
+ if (bits <= 8 && t->len > 15 && !(t->len & 3)) {
+ bits = 32;
+ swab = true;
+ } else {
+ swab = false;
+ }
+
/* setup bytes per word and fifo read/write functions */
if (bits <= 8) {
bytes_per_word = 1;
@@ -487,6 +535,17 @@ static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
rx_fifo = sh_msiof_spi_read_fifo_16u;
else
rx_fifo = sh_msiof_spi_read_fifo_16;
+ } else if (swab) {
+ bytes_per_word = 4;
+ if ((unsigned long)t->tx_buf & 0x03)
+ tx_fifo = sh_msiof_spi_write_fifo_s32u;
+ else
+ tx_fifo = sh_msiof_spi_write_fifo_s32;
+
+ if ((unsigned long)t->rx_buf & 0x03)
+ rx_fifo = sh_msiof_spi_read_fifo_s32u;
+ else
+ rx_fifo = sh_msiof_spi_read_fifo_s32;
} else {
bytes_per_word = 4;
if ((unsigned long)t->tx_buf & 0x03)
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 603428213d21..d9fd86211365 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -30,6 +30,7 @@
#include <linux/errno.h>
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/compat.h>
#include <linux/spi/spi.h>
#include <linux/spi/spidev.h>
@@ -471,6 +472,16 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return retval;
}
+#ifdef CONFIG_COMPAT
+static long
+spidev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ return spidev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#else
+#define spidev_compat_ioctl NULL
+#endif /* CONFIG_COMPAT */
+
static int spidev_open(struct inode *inode, struct file *filp)
{
struct spidev_data *spidev;
@@ -543,6 +554,7 @@ static const struct file_operations spidev_fops = {
.write = spidev_write,
.read = spidev_read,
.unlocked_ioctl = spidev_ioctl,
+ .compat_ioctl = spidev_compat_ioctl,
.open = spidev_open,
.release = spidev_release,
.llseek = no_llseek,
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c
index 7adaef62a991..c69c6f2c2c5c 100644
--- a/drivers/spi/xilinx_spi.c
+++ b/drivers/spi/xilinx_spi.c
@@ -18,6 +18,7 @@
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/spi/xilinx_spi.h>
@@ -351,14 +352,12 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-#ifdef CONFIG_OF
static const struct of_device_id xilinx_spi_of_match[] = {
{ .compatible = "xlnx,xps-spi-2.00.a", },
{ .compatible = "xlnx,xps-spi-2.00.b", },
{}
};
MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
-#endif
struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem,
u32 irq, s16 bus_num, int num_cs, int little_endian, int bits_per_word)
@@ -394,9 +393,7 @@ struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem,
master->bus_num = bus_num;
master->num_chipselect = num_cs;
-#ifdef CONFIG_OF
master->dev.of_node = dev->of_node;
-#endif
xspi->mem = *mem;
xspi->irq = irq;
@@ -474,7 +471,7 @@ static int __devinit xilinx_spi_probe(struct platform_device *dev)
struct spi_master *master;
u8 i;
- pdata = dev->dev.platform_data;
+ pdata = mfd_get_data(dev);
if (pdata) {
num_cs = pdata->num_chipselect;
little_endian = pdata->little_endian;
@@ -539,9 +536,7 @@ static struct platform_driver xilinx_spi_driver = {
.driver = {
.name = XILINX_SPI_NAME,
.owner = THIS_MODULE,
-#ifdef CONFIG_OF
.of_match_table = xilinx_spi_of_match,
-#endif
},
};
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 3918d2cc5856..e05ba6eefc7e 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -1192,10 +1192,10 @@ void ssb_device_enable(struct ssb_device *dev, u32 core_specific_flags)
}
EXPORT_SYMBOL(ssb_device_enable);
-/* Wait for a bit in a register to get set or unset.
+/* Wait for bitmask in a register to get set or cleared.
* timeout is in units of ten-microseconds */
-static int ssb_wait_bit(struct ssb_device *dev, u16 reg, u32 bitmask,
- int timeout, int set)
+static int ssb_wait_bits(struct ssb_device *dev, u16 reg, u32 bitmask,
+ int timeout, int set)
{
int i;
u32 val;
@@ -1203,7 +1203,7 @@ static int ssb_wait_bit(struct ssb_device *dev, u16 reg, u32 bitmask,
for (i = 0; i < timeout; i++) {
val = ssb_read32(dev, reg);
if (set) {
- if (val & bitmask)
+ if ((val & bitmask) == bitmask)
return 0;
} else {
if (!(val & bitmask))
@@ -1220,20 +1220,38 @@ static int ssb_wait_bit(struct ssb_device *dev, u16 reg, u32 bitmask,
void ssb_device_disable(struct ssb_device *dev, u32 core_specific_flags)
{
- u32 reject;
+ u32 reject, val;
if (ssb_read32(dev, SSB_TMSLOW) & SSB_TMSLOW_RESET)
return;
reject = ssb_tmslow_reject_bitmask(dev);
- ssb_write32(dev, SSB_TMSLOW, reject | SSB_TMSLOW_CLOCK);
- ssb_wait_bit(dev, SSB_TMSLOW, reject, 1000, 1);
- ssb_wait_bit(dev, SSB_TMSHIGH, SSB_TMSHIGH_BUSY, 1000, 0);
- ssb_write32(dev, SSB_TMSLOW,
- SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
- reject | SSB_TMSLOW_RESET |
- core_specific_flags);
- ssb_flush_tmslow(dev);
+
+ if (ssb_read32(dev, SSB_TMSLOW) & SSB_TMSLOW_CLOCK) {
+ ssb_write32(dev, SSB_TMSLOW, reject | SSB_TMSLOW_CLOCK);
+ ssb_wait_bits(dev, SSB_TMSLOW, reject, 1000, 1);
+ ssb_wait_bits(dev, SSB_TMSHIGH, SSB_TMSHIGH_BUSY, 1000, 0);
+
+ if (ssb_read32(dev, SSB_IDLOW) & SSB_IDLOW_INITIATOR) {
+ val = ssb_read32(dev, SSB_IMSTATE);
+ val |= SSB_IMSTATE_REJECT;
+ ssb_write32(dev, SSB_IMSTATE, val);
+ ssb_wait_bits(dev, SSB_IMSTATE, SSB_IMSTATE_BUSY, 1000,
+ 0);
+ }
+
+ ssb_write32(dev, SSB_TMSLOW,
+ SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
+ reject | SSB_TMSLOW_RESET |
+ core_specific_flags);
+ ssb_flush_tmslow(dev);
+
+ if (ssb_read32(dev, SSB_IDLOW) & SSB_IDLOW_INITIATOR) {
+ val = ssb_read32(dev, SSB_IMSTATE);
+ val &= ~SSB_IMSTATE_REJECT;
+ ssb_write32(dev, SSB_IMSTATE, val);
+ }
+ }
ssb_write32(dev, SSB_TMSLOW,
reject | SSB_TMSLOW_RESET |
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index 158449e55044..a467b20baac8 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -468,10 +468,14 @@ static void sprom_extract_r45(struct ssb_sprom *out, const u16 *in)
SPEX(country_code, SSB_SPROM4_CCODE, 0xFFFF, 0);
SPEX(boardflags_lo, SSB_SPROM4_BFLLO, 0xFFFF, 0);
SPEX(boardflags_hi, SSB_SPROM4_BFLHI, 0xFFFF, 0);
+ SPEX(boardflags2_lo, SSB_SPROM4_BFL2LO, 0xFFFF, 0);
+ SPEX(boardflags2_hi, SSB_SPROM4_BFL2HI, 0xFFFF, 0);
} else {
SPEX(country_code, SSB_SPROM5_CCODE, 0xFFFF, 0);
SPEX(boardflags_lo, SSB_SPROM5_BFLLO, 0xFFFF, 0);
SPEX(boardflags_hi, SSB_SPROM5_BFLHI, 0xFFFF, 0);
+ SPEX(boardflags2_lo, SSB_SPROM5_BFL2LO, 0xFFFF, 0);
+ SPEX(boardflags2_hi, SSB_SPROM5_BFL2HI, 0xFFFF, 0);
}
SPEX(ant_available_a, SSB_SPROM4_ANTAVAIL, SSB_SPROM4_ANTAVAIL_A,
SSB_SPROM4_ANTAVAIL_A_SHIFT);
@@ -641,7 +645,7 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out,
break;
default:
ssb_printk(KERN_WARNING PFX "Unsupported SPROM"
- " revision %d detected. Will extract"
+ " revision %d detected. Will extract"
" v1\n", out->revision);
out->revision = 1;
sprom_extract_r123(out, in);
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 5c8fcfc42c3e..4a88e69bb9bb 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -51,6 +51,8 @@ source "drivers/staging/cx25821/Kconfig"
source "drivers/staging/tm6000/Kconfig"
+source "drivers/staging/cxd2099/Kconfig"
+
source "drivers/staging/dabusb/Kconfig"
source "drivers/staging/se401/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index d53886317826..80ee49ab59de 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_SLICOSS) += slicoss/
obj-$(CONFIG_VIDEO_GO7007) += go7007/
obj-$(CONFIG_VIDEO_CX25821) += cx25821/
obj-$(CONFIG_VIDEO_TM6000) += tm6000/
+obj-$(CONFIG_DVB_CXD2099) += cxd2099/
obj-$(CONFIG_USB_DABUSB) += dabusb/
obj-$(CONFIG_USB_VICAM) += usbvideo/
obj-$(CONFIG_USB_SE401) += se401/
diff --git a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
index 991463f4a7f4..9b7b71c294b8 100644
--- a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
@@ -2313,7 +2313,9 @@ static s32 wl_inform_single_bss(struct wl_priv *wl, struct wl_bss_info *bi)
notif_bss_info->frame_len =
offsetof(struct ieee80211_mgmt,
u.beacon.variable) + wl_get_ielen(wl);
- freq = ieee80211_channel_to_frequency(notif_bss_info->channel);
+ freq = ieee80211_channel_to_frequency(notif_bss_info->channel,
+ band->band);
+
channel = ieee80211_get_channel(wiphy, freq);
WL_DBG("SSID : \"%s\", rssi %d, channel %d, capability : 0x04%x, bssid %pM\n",
diff --git a/drivers/staging/brcm80211/sys/wl_mac80211.c b/drivers/staging/brcm80211/sys/wl_mac80211.c
index cd8392badff0..6363077468f1 100644
--- a/drivers/staging/brcm80211/sys/wl_mac80211.c
+++ b/drivers/staging/brcm80211/sys/wl_mac80211.c
@@ -104,9 +104,6 @@ static int wl_request_fw(struct wl_info *wl, struct pci_dev *pdev);
static void wl_release_fw(struct wl_info *wl);
/* local prototypes */
-static int wl_start(struct sk_buff *skb, struct wl_info *wl);
-static int wl_start_int(struct wl_info *wl, struct ieee80211_hw *hw,
- struct sk_buff *skb);
static void wl_dpc(unsigned long data);
MODULE_AUTHOR("Broadcom Corporation");
@@ -135,7 +132,6 @@ module_param(phymsglevel, int, 0);
#define HW_TO_WL(hw) (hw->priv)
#define WL_TO_HW(wl) (wl->pub->ieee_hw)
-static int wl_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
static int wl_ops_start(struct ieee80211_hw *hw);
static void wl_ops_stop(struct ieee80211_hw *hw);
static int wl_ops_add_interface(struct ieee80211_hw *hw,
@@ -173,20 +169,18 @@ static int wl_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid, u16 *ssn);
-static int wl_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void wl_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
- int status;
struct wl_info *wl = hw->priv;
WL_LOCK(wl);
if (!wl->pub->up) {
WL_ERROR("ops->tx called while down\n");
- status = -ENETDOWN;
+ kfree_skb(skb);
goto done;
}
- status = wl_start(skb, wl);
+ wlc_sendpkt_mac80211(wl->wlc, skb, hw);
done:
WL_UNLOCK(wl);
- return status;
}
static int wl_ops_start(struct ieee80211_hw *hw)
@@ -1325,22 +1319,6 @@ void wl_free(struct wl_info *wl)
osl_detach(osh);
}
-/* transmit a packet */
-static int BCMFASTPATH wl_start(struct sk_buff *skb, struct wl_info *wl)
-{
- if (!wl)
- return -ENETDOWN;
-
- return wl_start_int(wl, WL_TO_HW(wl), skb);
-}
-
-static int BCMFASTPATH
-wl_start_int(struct wl_info *wl, struct ieee80211_hw *hw, struct sk_buff *skb)
-{
- wlc_sendpkt_mac80211(wl->wlc, skb, hw);
- return NETDEV_TX_OK;
-}
-
void wl_txflowcontrol(struct wl_info *wl, struct wl_if *wlif, bool state,
int prio)
{
diff --git a/drivers/staging/brcm80211/sys/wlc_mac80211.c b/drivers/staging/brcm80211/sys/wlc_mac80211.c
index e37e8058e2b8..aa12d1a65184 100644
--- a/drivers/staging/brcm80211/sys/wlc_mac80211.c
+++ b/drivers/staging/brcm80211/sys/wlc_mac80211.c
@@ -6818,11 +6818,14 @@ prep_mac80211_status(struct wlc_info *wlc, d11rxhdr_t *rxh, struct sk_buff *p,
ratespec_t rspec;
unsigned char *plcp;
+#if 0
+ /* Clearly, this is bogus -- reading the TSF now is wrong */
wlc_read_tsf(wlc, &tsf_l, &tsf_h); /* mactime */
rx_status->mactime = tsf_h;
rx_status->mactime <<= 32;
rx_status->mactime |= tsf_l;
- rx_status->flag |= RX_FLAG_TSFT;
+ rx_status->flag |= RX_FLAG_MACTIME_MPDU; /* clearly wrong */
+#endif
channel = WLC_CHAN_CHANNEL(rxh->RxChan);
diff --git a/drivers/staging/cxd2099/Kconfig b/drivers/staging/cxd2099/Kconfig
new file mode 100644
index 000000000000..9d638c30735d
--- /dev/null
+++ b/drivers/staging/cxd2099/Kconfig
@@ -0,0 +1,11 @@
+config DVB_CXD2099
+ tristate "CXD2099AR Common Interface driver"
+ depends on DVB_CORE && PCI && I2C && DVB_NGENE
+ ---help---
+ Support for the CI module found on cineS2 DVB-S2, supported by
+ the Micronas PCIe device driver (ngene).
+
+ For now, data is passed through '/dev/dvb/adapterX/sec0':
+ - Encrypted data must be written to 'sec0'.
+ - Decrypted data can be read from 'sec0'.
+ - Setup the CAM using device 'ca0'.
diff --git a/drivers/staging/cxd2099/Makefile b/drivers/staging/cxd2099/Makefile
new file mode 100644
index 000000000000..72b14558c119
--- /dev/null
+++ b/drivers/staging/cxd2099/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_DVB_CXD2099) += cxd2099.o
+
+EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/
+EXTRA_CFLAGS += -Idrivers/media/dvb/frontends/
+EXTRA_CFLAGS += -Idrivers/media/common/tuners/
diff --git a/drivers/staging/cxd2099/TODO b/drivers/staging/cxd2099/TODO
new file mode 100644
index 000000000000..375bb6f8ee2c
--- /dev/null
+++ b/drivers/staging/cxd2099/TODO
@@ -0,0 +1,12 @@
+For now, data is passed through '/dev/dvb/adapterX/sec0':
+ - Encrypted data must be written to 'sec0'.
+ - Decrypted data can be read from 'sec0'.
+ - Setup the CAM using device 'ca0'.
+
+But this is wrong. There are some discussions about the proper way for
+doing it, as seen at:
+ http://www.mail-archive.com/linux-media@vger.kernel.org/msg22196.html
+
+While there's no proper fix for it, the driver should be kept in staging.
+
+Patches should be submitted to: linux-media@vger.kernel.org.
diff --git a/drivers/staging/cxd2099/cxd2099.c b/drivers/staging/cxd2099/cxd2099.c
new file mode 100644
index 000000000000..b49186c74eb3
--- /dev/null
+++ b/drivers/staging/cxd2099/cxd2099.c
@@ -0,0 +1,574 @@
+/*
+ * cxd2099.c: Driver for the CXD2099AR Common Interface Controller
+ *
+ * Copyright (C) 2010 DigitalDevices UG
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 only, as published by the Free Software Foundation.
+ *
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+
+#include "cxd2099.h"
+
+#define MAX_BUFFER_SIZE 248
+
+struct cxd {
+ struct dvb_ca_en50221 en;
+
+ struct i2c_adapter *i2c;
+ u8 adr;
+ u8 regs[0x23];
+ u8 lastaddress;
+ u8 clk_reg_f;
+ u8 clk_reg_b;
+ int mode;
+ u32 bitrate;
+ int ready;
+ int dr;
+ int slot_stat;
+
+ u8 amem[1024];
+ int amem_read;
+
+ int cammode;
+ struct mutex lock;
+};
+
+static int i2c_write_reg(struct i2c_adapter *adapter, u8 adr,
+ u8 reg, u8 data)
+{
+ u8 m[2] = {reg, data};
+ struct i2c_msg msg = {.addr = adr, .flags = 0, .buf = m, .len = 2};
+
+ if (i2c_transfer(adapter, &msg, 1) != 1) {
+ printk(KERN_ERR "Failed to write to I2C register %02x@%02x!\n",
+ reg, adr);
+ return -1;
+ }
+ return 0;
+}
+
+static int i2c_write(struct i2c_adapter *adapter, u8 adr,
+ u8 *data, u8 len)
+{
+ struct i2c_msg msg = {.addr = adr, .flags = 0, .buf = data, .len = len};
+
+ if (i2c_transfer(adapter, &msg, 1) != 1) {
+ printk(KERN_ERR "Failed to write to I2C!\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int i2c_read_reg(struct i2c_adapter *adapter, u8 adr,
+ u8 reg, u8 *val)
+{
+ struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
+ .buf = &reg, .len = 1 },
+ {.addr = adr, .flags = I2C_M_RD,
+ .buf = val, .len = 1 } };
+
+ if (i2c_transfer(adapter, msgs, 2) != 2) {
+ printk(KERN_ERR "error in i2c_read_reg\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int i2c_read(struct i2c_adapter *adapter, u8 adr,
+ u8 reg, u8 *data, u8 n)
+{
+ struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
+ .buf = &reg, .len = 1 },
+ {.addr = adr, .flags = I2C_M_RD,
+ .buf = data, .len = n } };
+
+ if (i2c_transfer(adapter, msgs, 2) != 2) {
+ printk(KERN_ERR "error in i2c_read\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int read_block(struct cxd *ci, u8 adr, u8 *data, u8 n)
+{
+ int status;
+
+ status = i2c_write_reg(ci->i2c, ci->adr, 0, adr);
+ if (!status) {
+ ci->lastaddress = adr;
+ status = i2c_read(ci->i2c, ci->adr, 1, data, n);
+ }
+ return status;
+}
+
+static int read_reg(struct cxd *ci, u8 reg, u8 *val)
+{
+ return read_block(ci, reg, val, 1);
+}
+
+
+static int read_pccard(struct cxd *ci, u16 address, u8 *data, u8 n)
+{
+ int status;
+ u8 addr[3] = { 2, address&0xff, address>>8 };
+
+ status = i2c_write(ci->i2c, ci->adr, addr, 3);
+ if (!status)
+ status = i2c_read(ci->i2c, ci->adr, 3, data, n);
+ return status;
+}
+
+static int write_pccard(struct cxd *ci, u16 address, u8 *data, u8 n)
+{
+ int status;
+ u8 addr[3] = { 2, address&0xff, address>>8 };
+
+ status = i2c_write(ci->i2c, ci->adr, addr, 3);
+ if (!status) {
+ u8 buf[256] = {3};
+ memcpy(buf+1, data, n);
+ status = i2c_write(ci->i2c, ci->adr, buf, n+1);
+ }
+ return status;
+}
+
+static int read_io(struct cxd *ci, u16 address, u8 *val)
+{
+ int status;
+ u8 addr[3] = { 2, address&0xff, address>>8 };
+
+ status = i2c_write(ci->i2c, ci->adr, addr, 3);
+ if (!status)
+ status = i2c_read(ci->i2c, ci->adr, 3, val, 1);
+ return status;
+}
+
+static int write_io(struct cxd *ci, u16 address, u8 val)
+{
+ int status;
+ u8 addr[3] = { 2, address&0xff, address>>8 };
+ u8 buf[2] = { 3, val };
+
+ status = i2c_write(ci->i2c, ci->adr, addr, 3);
+ if (!status)
+ status = i2c_write(ci->i2c, ci->adr, buf, 2);
+
+ return status;
+}
+
+
+static int write_regm(struct cxd *ci, u8 reg, u8 val, u8 mask)
+{
+ int status;
+
+ status = i2c_write_reg(ci->i2c, ci->adr, 0, reg);
+ if (!status && reg >= 6 && reg <= 8 && mask != 0xff)
+ status = i2c_read_reg(ci->i2c, ci->adr, 1, &ci->regs[reg]);
+ ci->regs[reg] = (ci->regs[reg]&(~mask))|val;
+ if (!status) {
+ ci->lastaddress = reg;
+ status = i2c_write_reg(ci->i2c, ci->adr, 1, ci->regs[reg]);
+ }
+ if (reg == 0x20)
+ ci->regs[reg] &= 0x7f;
+ return status;
+}
+
+static int write_reg(struct cxd *ci, u8 reg, u8 val)
+{
+ return write_regm(ci, reg, val, 0xff);
+}
+
+#ifdef BUFFER_MODE
+static int write_block(struct cxd *ci, u8 adr, u8 *data, int n)
+{
+ int status;
+ u8 buf[256] = {1};
+
+ status = i2c_write_reg(ci->i2c, ci->adr, 0, adr);
+ if (!status) {
+ ci->lastaddress = adr;
+ memcpy(buf+1, data, n);
+ status = i2c_write(ci->i2c, ci->adr, buf, n+1);
+ }
+ return status;
+}
+#endif
+
+static void set_mode(struct cxd *ci, int mode)
+{
+ if (mode == ci->mode)
+ return;
+
+ switch (mode) {
+ case 0x00: /* IO mem */
+ write_regm(ci, 0x06, 0x00, 0x07);
+ break;
+ case 0x01: /* ATT mem */
+ write_regm(ci, 0x06, 0x02, 0x07);
+ break;
+ default:
+ break;
+ }
+ ci->mode = mode;
+}
+
+static void cam_mode(struct cxd *ci, int mode)
+{
+ if (mode == ci->cammode)
+ return;
+
+ switch (mode) {
+ case 0x00:
+ write_regm(ci, 0x20, 0x80, 0x80);
+ break;
+ case 0x01:
+ printk(KERN_INFO "enable cam buffer mode\n");
+ /* write_reg(ci, 0x0d, 0x00); */
+ /* write_reg(ci, 0x0e, 0x01); */
+ write_regm(ci, 0x08, 0x40, 0x40);
+ /* read_reg(ci, 0x12, &dummy); */
+ write_regm(ci, 0x08, 0x80, 0x80);
+ break;
+ default:
+ break;
+ }
+ ci->cammode = mode;
+}
+
+
+
+#define CHK_ERROR(s) if ((status = s)) break
+
+static int init(struct cxd *ci)
+{
+ int status;
+
+ mutex_lock(&ci->lock);
+ ci->mode = -1;
+ do {
+ CHK_ERROR(write_reg(ci, 0x00, 0x00));
+ CHK_ERROR(write_reg(ci, 0x01, 0x00));
+ CHK_ERROR(write_reg(ci, 0x02, 0x10));
+ CHK_ERROR(write_reg(ci, 0x03, 0x00));
+ CHK_ERROR(write_reg(ci, 0x05, 0xFF));
+ CHK_ERROR(write_reg(ci, 0x06, 0x1F));
+ CHK_ERROR(write_reg(ci, 0x07, 0x1F));
+ CHK_ERROR(write_reg(ci, 0x08, 0x28));
+ CHK_ERROR(write_reg(ci, 0x14, 0x20));
+
+ CHK_ERROR(write_reg(ci, 0x09, 0x4D)); /* Input Mode C, BYPass Serial, TIVAL = low, MSB */
+ CHK_ERROR(write_reg(ci, 0x0A, 0xA7)); /* TOSTRT = 8, Mode B (gated clock), falling Edge, Serial, POL=HIGH, MSB */
+
+ /* Sync detector */
+ CHK_ERROR(write_reg(ci, 0x0B, 0x33));
+ CHK_ERROR(write_reg(ci, 0x0C, 0x33));
+
+ CHK_ERROR(write_regm(ci, 0x14, 0x00, 0x0F));
+ CHK_ERROR(write_reg(ci, 0x15, ci->clk_reg_b));
+ CHK_ERROR(write_regm(ci, 0x16, 0x00, 0x0F));
+ CHK_ERROR(write_reg(ci, 0x17, ci->clk_reg_f));
+
+ CHK_ERROR(write_reg(ci, 0x20, 0x28)); /* Integer Divider, Falling Edge, Internal Sync, */
+ CHK_ERROR(write_reg(ci, 0x21, 0x00)); /* MCLKI = TICLK/8 */
+ CHK_ERROR(write_reg(ci, 0x22, 0x07)); /* MCLKI = TICLK/8 */
+
+
+ CHK_ERROR(write_regm(ci, 0x20, 0x80, 0x80)); /* Reset CAM state machine */
+
+ CHK_ERROR(write_regm(ci, 0x03, 0x02, 02)); /* Enable IREQA Interrupt */
+ CHK_ERROR(write_reg(ci, 0x01, 0x04)); /* Enable CD Interrupt */
+ CHK_ERROR(write_reg(ci, 0x00, 0x31)); /* Enable TS1,Hot Swap,Slot A */
+ CHK_ERROR(write_regm(ci, 0x09, 0x08, 0x08)); /* Put TS in bypass */
+ ci->cammode = -1;
+#ifdef BUFFER_MODE
+ cam_mode(ci, 0);
+#endif
+ } while (0);
+ mutex_unlock(&ci->lock);
+
+ return 0;
+}
+
+
+static int read_attribute_mem(struct dvb_ca_en50221 *ca,
+ int slot, int address)
+{
+ struct cxd *ci = ca->data;
+ u8 val;
+ mutex_lock(&ci->lock);
+ set_mode(ci, 1);
+ read_pccard(ci, address, &val, 1);
+ mutex_unlock(&ci->lock);
+ return val;
+}
+
+
+static int write_attribute_mem(struct dvb_ca_en50221 *ca, int slot,
+ int address, u8 value)
+{
+ struct cxd *ci = ca->data;
+
+ mutex_lock(&ci->lock);
+ set_mode(ci, 1);
+ write_pccard(ci, address, &value, 1);
+ mutex_unlock(&ci->lock);
+ return 0;
+}
+
+static int read_cam_control(struct dvb_ca_en50221 *ca,
+ int slot, u8 address)
+{
+ struct cxd *ci = ca->data;
+ u8 val;
+
+ mutex_lock(&ci->lock);
+ set_mode(ci, 0);
+ read_io(ci, address, &val);
+ mutex_unlock(&ci->lock);
+ return val;
+}
+
+static int write_cam_control(struct dvb_ca_en50221 *ca, int slot,
+ u8 address, u8 value)
+{
+ struct cxd *ci = ca->data;
+
+ mutex_lock(&ci->lock);
+ set_mode(ci, 0);
+ write_io(ci, address, value);
+ mutex_unlock(&ci->lock);
+ return 0;
+}
+
+static int slot_reset(struct dvb_ca_en50221 *ca, int slot)
+{
+ struct cxd *ci = ca->data;
+
+ mutex_lock(&ci->lock);
+ cam_mode(ci, 0);
+ write_reg(ci, 0x00, 0x21);
+ write_reg(ci, 0x06, 0x1F);
+ write_reg(ci, 0x00, 0x31);
+ write_regm(ci, 0x20, 0x80, 0x80);
+ write_reg(ci, 0x03, 0x02);
+ ci->ready = 0;
+ ci->mode = -1;
+ {
+ int i;
+ for (i = 0; i < 100; i++) {
+ msleep(10);
+ if (ci->ready)
+ break;
+ }
+ }
+ mutex_unlock(&ci->lock);
+ /* msleep(500); */
+ return 0;
+}
+
+static int slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
+{
+ struct cxd *ci = ca->data;
+
+ printk(KERN_INFO "slot_shutdown\n");
+ mutex_lock(&ci->lock);
+ /* write_regm(ci, 0x09, 0x08, 0x08); */
+ write_regm(ci, 0x20, 0x80, 0x80);
+ write_regm(ci, 0x06, 0x07, 0x07);
+ ci->mode = -1;
+ mutex_unlock(&ci->lock);
+ return 0; /* shutdown(ci); */
+}
+
+static int slot_ts_enable(struct dvb_ca_en50221 *ca, int slot)
+{
+ struct cxd *ci = ca->data;
+
+ mutex_lock(&ci->lock);
+ write_regm(ci, 0x09, 0x00, 0x08);
+ set_mode(ci, 0);
+#ifdef BUFFER_MODE
+ cam_mode(ci, 1);
+#endif
+ mutex_unlock(&ci->lock);
+ return 0;
+}
+
+
+static int campoll(struct cxd *ci)
+{
+ u8 istat;
+
+ read_reg(ci, 0x04, &istat);
+ if (!istat)
+ return 0;
+ write_reg(ci, 0x05, istat);
+
+ if (istat&0x40) {
+ ci->dr = 1;
+ printk(KERN_INFO "DR\n");
+ }
+ if (istat&0x20)
+ printk(KERN_INFO "WC\n");
+
+ if (istat&2) {
+ u8 slotstat;
+
+ read_reg(ci, 0x01, &slotstat);
+ if (!(2&slotstat)) {
+ if (!ci->slot_stat) {
+ ci->slot_stat |= DVB_CA_EN50221_POLL_CAM_PRESENT;
+ write_regm(ci, 0x03, 0x08, 0x08);
+ }
+
+ } else {
+ if (ci->slot_stat) {
+ ci->slot_stat = 0;
+ write_regm(ci, 0x03, 0x00, 0x08);
+ printk(KERN_INFO "NO CAM\n");
+ ci->ready = 0;
+ }
+ }
+ if (istat&8 && ci->slot_stat == DVB_CA_EN50221_POLL_CAM_PRESENT) {
+ ci->ready = 1;
+ ci->slot_stat |= DVB_CA_EN50221_POLL_CAM_READY;
+ printk(KERN_INFO "READY\n");
+ }
+ }
+ return 0;
+}
+
+
+static int poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open)
+{
+ struct cxd *ci = ca->data;
+ u8 slotstat;
+
+ mutex_lock(&ci->lock);
+ campoll(ci);
+ read_reg(ci, 0x01, &slotstat);
+ mutex_unlock(&ci->lock);
+
+ return ci->slot_stat;
+}
+
+#ifdef BUFFER_MODE
+static int read_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount)
+{
+ struct cxd *ci = ca->data;
+ u8 msb, lsb;
+ u16 len;
+
+ mutex_lock(&ci->lock);
+ campoll(ci);
+ mutex_unlock(&ci->lock);
+
+ printk(KERN_INFO "read_data\n");
+ if (!ci->dr)
+ return 0;
+
+ mutex_lock(&ci->lock);
+ read_reg(ci, 0x0f, &msb);
+ read_reg(ci, 0x10, &lsb);
+ len = (msb<<8)|lsb;
+ read_block(ci, 0x12, ebuf, len);
+ ci->dr = 0;
+ mutex_unlock(&ci->lock);
+
+ return len;
+}
+
+static int write_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount)
+{
+ struct cxd *ci = ca->data;
+
+ mutex_lock(&ci->lock);
+ printk(KERN_INFO "write_data %d\n", ecount);
+ write_reg(ci, 0x0d, ecount>>8);
+ write_reg(ci, 0x0e, ecount&0xff);
+ write_block(ci, 0x11, ebuf, ecount);
+ mutex_unlock(&ci->lock);
+ return ecount;
+}
+#endif
+
+static struct dvb_ca_en50221 en_templ = {
+ .read_attribute_mem = read_attribute_mem,
+ .write_attribute_mem = write_attribute_mem,
+ .read_cam_control = read_cam_control,
+ .write_cam_control = write_cam_control,
+ .slot_reset = slot_reset,
+ .slot_shutdown = slot_shutdown,
+ .slot_ts_enable = slot_ts_enable,
+ .poll_slot_status = poll_slot_status,
+#ifdef BUFFER_MODE
+ .read_data = read_data,
+ .write_data = write_data,
+#endif
+
+};
+
+struct dvb_ca_en50221 *cxd2099_attach(u8 adr, void *priv,
+ struct i2c_adapter *i2c)
+{
+ struct cxd *ci = 0;
+ u32 bitrate = 62000000;
+ u8 val;
+
+ if (i2c_read_reg(i2c, adr, 0, &val) < 0) {
+ printk(KERN_ERR "No CXD2099 detected at %02x\n", adr);
+ return 0;
+ }
+
+ ci = kmalloc(sizeof(struct cxd), GFP_KERNEL);
+ if (!ci)
+ return 0;
+ memset(ci, 0, sizeof(*ci));
+
+ mutex_init(&ci->lock);
+ ci->i2c = i2c;
+ ci->adr = adr;
+ ci->lastaddress = 0xff;
+ ci->clk_reg_b = 0x4a;
+ ci->clk_reg_f = 0x1b;
+ ci->bitrate = bitrate;
+
+ memcpy(&ci->en, &en_templ, sizeof(en_templ));
+ ci->en.data = ci;
+ init(ci);
+ printk(KERN_INFO "Attached CXD2099AR at %02x\n", ci->adr);
+ return &ci->en;
+}
+EXPORT_SYMBOL(cxd2099_attach);
+
+MODULE_DESCRIPTION("cxd2099");
+MODULE_AUTHOR("Ralph Metzler <rjkm@metzlerbros.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/cxd2099/cxd2099.h b/drivers/staging/cxd2099/cxd2099.h
new file mode 100644
index 000000000000..bed54ff3e30b
--- /dev/null
+++ b/drivers/staging/cxd2099/cxd2099.h
@@ -0,0 +1,41 @@
+/*
+ * cxd2099.h: Driver for the CXD2099AR Common Interface Controller
+ *
+ * Copyright (C) 2010 DigitalDevices UG
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 only, as published by the Free Software Foundation.
+ *
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ */
+
+#ifndef _CXD2099_H_
+#define _CXD2099_H_
+
+#include <dvb_ca_en50221.h>
+
+#if defined(CONFIG_DVB_CXD2099) || \
+ (defined(CONFIG_DVB_CXD2099_MODULE) && defined(MODULE))
+struct dvb_ca_en50221 *cxd2099_attach(u8 adr, void *priv, struct i2c_adapter *i2c);
+#else
+static inline struct dvb_ca_en50221 *cxd2099_attach(u8 adr, void *priv, struct i2c_adapter *i2c)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+#endif
diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c
index 2163d60c2eaf..3724e1e67ec2 100644
--- a/drivers/staging/winbond/wbusb.c
+++ b/drivers/staging/winbond/wbusb.c
@@ -118,13 +118,14 @@ static void wbsoft_configure_filter(struct ieee80211_hw *dev,
*total_flags = new_flags;
}
-static int wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
{
struct wbsoft_priv *priv = dev->priv;
if (priv->sMlmeFrame.IsInUsed != PACKET_FREE_TO_USE) {
priv->sMlmeFrame.wNumTxMMPDUDiscarded++;
- return NETDEV_TX_BUSY;
+ kfree_skb(skb);
+ return;
}
priv->sMlmeFrame.IsInUsed = PACKET_COME_FROM_MLME;
@@ -140,8 +141,6 @@ static int wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
*/
Mds_Tx(priv);
-
- return NETDEV_TX_OK;
}
static int wbsoft_start(struct ieee80211_hw *dev)
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index 4bbe8208b241..74d84f7fbb3d 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -38,7 +38,6 @@
#include <target/target_core_base.h>
#include <target/target_core_device.h>
-#include <target/target_core_device.h>
#include <target/target_core_tpg.h>
#include <target/target_core_transport.h>
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 158cecbec718..4a109835e420 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -282,6 +282,9 @@ int core_tmr_lun_reset(
atomic_set(&task->task_active, 0);
atomic_set(&task->task_stop, 0);
+ } else {
+ if (atomic_read(&task->task_execute_queue) != 0)
+ transport_remove_task_from_execute_queue(task, dev);
}
__transport_stop_task_timer(task, &flags);
@@ -301,6 +304,7 @@ int core_tmr_lun_reset(
DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
" task: %p, t_fe_count: %d dev: %p\n", task,
fe_count, dev);
+ atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
flags);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
@@ -310,6 +314,7 @@ int core_tmr_lun_reset(
}
DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
" t_fe_count: %d dev: %p\n", task, fe_count, dev);
+ atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 236e22d8cfae..4bbf6c147f89 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1207,7 +1207,7 @@ transport_get_task_from_execute_queue(struct se_device *dev)
*
*
*/
-static void transport_remove_task_from_execute_queue(
+void transport_remove_task_from_execute_queue(
struct se_task *task,
struct se_device *dev)
{
@@ -5549,7 +5549,8 @@ static void transport_generic_wait_for_tasks(
atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
}
- if (!atomic_read(&T_TASK(cmd)->t_transport_active))
+ if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
+ atomic_read(&T_TASK(cmd)->t_transport_aborted))
goto remove;
atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
@@ -5956,6 +5957,9 @@ static void transport_processing_shutdown(struct se_device *dev)
atomic_set(&task->task_active, 0);
atomic_set(&task->task_stop, 0);
+ } else {
+ if (atomic_read(&task->task_execute_queue) != 0)
+ transport_remove_task_from_execute_queue(task, dev);
}
__transport_stop_task_timer(task, &flags);
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index f7a5dba3ca23..bf7c687519ef 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -4,7 +4,6 @@
menuconfig THERMAL
tristate "Generic Thermal sysfs driver"
- depends on NET
help
Generic Thermal Sysfs driver offers a generic mechanism for
thermal management. Usually it's made up of one or more thermal
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 7d0e63c79280..713b7ea4a607 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -62,20 +62,6 @@ static DEFINE_MUTEX(thermal_list_lock);
static unsigned int thermal_event_seqnum;
-static struct genl_family thermal_event_genl_family = {
- .id = GENL_ID_GENERATE,
- .name = THERMAL_GENL_FAMILY_NAME,
- .version = THERMAL_GENL_VERSION,
- .maxattr = THERMAL_GENL_ATTR_MAX,
-};
-
-static struct genl_multicast_group thermal_event_mcgrp = {
- .name = THERMAL_GENL_MCAST_GROUP_NAME,
-};
-
-static int genetlink_init(void);
-static void genetlink_exit(void);
-
static int get_idr(struct idr *idr, struct mutex *lock, int *id)
{
int err;
@@ -1225,6 +1211,18 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
EXPORT_SYMBOL(thermal_zone_device_unregister);
+#ifdef CONFIG_NET
+static struct genl_family thermal_event_genl_family = {
+ .id = GENL_ID_GENERATE,
+ .name = THERMAL_GENL_FAMILY_NAME,
+ .version = THERMAL_GENL_VERSION,
+ .maxattr = THERMAL_GENL_ATTR_MAX,
+};
+
+static struct genl_multicast_group thermal_event_mcgrp = {
+ .name = THERMAL_GENL_MCAST_GROUP_NAME,
+};
+
int generate_netlink_event(u32 orig, enum events event)
{
struct sk_buff *skb;
@@ -1301,6 +1299,15 @@ static int genetlink_init(void)
return result;
}
+static void genetlink_exit(void)
+{
+ genl_unregister_family(&thermal_event_genl_family);
+}
+#else /* !CONFIG_NET */
+static inline int genetlink_init(void) { return 0; }
+static inline void genetlink_exit(void) {}
+#endif /* !CONFIG_NET */
+
static int __init thermal_init(void)
{
int result = 0;
@@ -1316,11 +1323,6 @@ static int __init thermal_init(void)
return result;
}
-static void genetlink_exit(void)
-{
- genl_unregister_family(&thermal_event_genl_family);
-}
-
static void __exit thermal_exit(void)
{
class_unregister(&thermal_class);
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
index bedc6c1b6fa5..af4e6505780f 100644
--- a/drivers/tty/hvc/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
@@ -292,7 +292,7 @@ struct hvcs_struct {
/*
* Any variable below the kref is valid before a tty is connected and
* stays valid after the tty is disconnected. These shouldn't be
- * whacked until the koject refcount reaches zero though some entries
+ * whacked until the kobject refcount reaches zero though some entries
* may be changed via sysfs initiatives.
*/
struct kref kref; /* ref count & hvcs_struct lifetime */
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 2b8334601c8b..532cdd6fa6fc 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -537,8 +537,8 @@ config SERIAL_S3C6400
config SERIAL_S5PV210
tristate "Samsung S5PV210 Serial port support"
- depends on SERIAL_SAMSUNG && (CPU_S5PV210 || CPU_S5P6442 || CPU_S5PV310)
- select SERIAL_SAMSUNG_UARTS_4 if (CPU_S5PV210 || CPU_S5PV310)
+ depends on SERIAL_SAMSUNG && (CPU_S5PV210 || CPU_S5P6442 || CPU_EXYNOS4210)
+ select SERIAL_SAMSUNG_UARTS_4 if (CPU_S5PV210 || CPU_EXYNOS4210)
default y
help
Serial port support for Samsung's S5P Family of SoC's
@@ -1110,29 +1110,6 @@ config SERIAL_PMACZILOG_CONSOLE
on your (Power)Mac as the console, you can do so by answering
Y to this option.
-config SERIAL_LH7A40X
- tristate "Sharp LH7A40X embedded UART support"
- depends on ARM && ARCH_LH7A40X
- select SERIAL_CORE
- help
- This enables support for the three on-board UARTs of the
- Sharp LH7A40X series CPUs. Choose Y or M.
-
-config SERIAL_LH7A40X_CONSOLE
- bool "Support for console on Sharp LH7A40X serial port"
- depends on SERIAL_LH7A40X=y
- select SERIAL_CORE_CONSOLE
- help
- Say Y here if you wish to use one of the serial ports as the
- system console--the system console is the device which
- receives all kernel messages and warnings and which allows
- logins in single user mode.
-
- Even if you say Y here, the currently visible framebuffer console
- (/dev/tty0) will still be used as the default system console, but
- you can alter that using a kernel command line, for example
- "console=ttyAM1".
-
config SERIAL_CPM
tristate "CPM SCC/SMC serial port support"
depends on CPM2 || 8xx
@@ -1596,4 +1573,19 @@ config SERIAL_PCH_UART
This driver is for PCH(Platform controller Hub) UART of Intel EG20T
which is an IOH(Input/Output Hub) for x86 embedded processor.
Enabling PCH_DMA, this PCH UART works as DMA mode.
+
+config SERIAL_MXS_AUART
+ depends on ARCH_MXS
+ tristate "MXS AUART support"
+ select SERIAL_CORE
+ help
+ This driver supports the MXS Application UART (AUART) port.
+
+config SERIAL_MXS_AUART_CONSOLE
+ bool "MXS AUART console support"
+ depends on SERIAL_MXS_AUART=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Enable a MXS AUART port to be the system console.
+
endmenu
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 8ea92e9c73b0..cf4a43411d36 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -54,7 +54,6 @@ obj-$(CONFIG_SERIAL_68328) += 68328serial.o
obj-$(CONFIG_SERIAL_68360) += 68360serial.o
obj-$(CONFIG_SERIAL_MCF) += mcf.o
obj-$(CONFIG_SERIAL_PMACZILOG) += pmac_zilog.o
-obj-$(CONFIG_SERIAL_LH7A40X) += serial_lh7a40x.o
obj-$(CONFIG_SERIAL_DZ) += dz.o
obj-$(CONFIG_SERIAL_ZS) += zs.o
obj-$(CONFIG_SERIAL_SH_SCI) += sh-sci.o
@@ -92,3 +91,4 @@ obj-$(CONFIG_SERIAL_MRST_MAX3110) += mrst_max3110.o
obj-$(CONFIG_SERIAL_MFD_HSU) += mfd.o
obj-$(CONFIG_SERIAL_IFX6X60) += ifx6x60.o
obj-$(CONFIG_SERIAL_PCH_UART) += pch_uart.o
+obj-$(CONFIG_SERIAL_MXS_AUART) += mxs-auart.o
diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c
index f9b49b5ff5e1..a20927fc3e1a 100644
--- a/drivers/tty/serial/altera_jtaguart.c
+++ b/drivers/tty/serial/altera_jtaguart.c
@@ -465,12 +465,23 @@ static int __devexit altera_jtaguart_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
+static struct of_device_id altera_jtaguart_match[] = {
+ { .compatible = "ALTR,juart-1.0", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, altera_jtaguart_match);
+#else
+#define altera_jtaguart_match NULL
+#endif /* CONFIG_OF */
+
static struct platform_driver altera_jtaguart_platform_driver = {
.probe = altera_jtaguart_probe,
.remove = __devexit_p(altera_jtaguart_remove),
.driver = {
- .name = DRV_NAME,
- .owner = THIS_MODULE,
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = altera_jtaguart_match,
},
};
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
index 721216292a50..1803c37d58ab 100644
--- a/drivers/tty/serial/altera_uart.c
+++ b/drivers/tty/serial/altera_uart.c
@@ -24,6 +24,7 @@
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
#include <linux/io.h>
#include <linux/altera_uart.h>
@@ -511,6 +512,29 @@ static struct uart_driver altera_uart_driver = {
.cons = ALTERA_UART_CONSOLE,
};
+#ifdef CONFIG_OF
+static int altera_uart_get_of_uartclk(struct platform_device *pdev,
+ struct uart_port *port)
+{
+ int len;
+ const __be32 *clk;
+
+ clk = of_get_property(pdev->dev.of_node, "clock-frequency", &len);
+ if (!clk || len < sizeof(__be32))
+ return -ENODEV;
+
+ port->uartclk = be32_to_cpup(clk);
+
+ return 0;
+}
+#else
+static int altera_uart_get_of_uartclk(struct platform_device *pdev,
+ struct uart_port *port)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_OF */
+
static int __devinit altera_uart_probe(struct platform_device *pdev)
{
struct altera_uart_platform_uart *platp = pdev->dev.platform_data;
@@ -518,6 +542,7 @@ static int __devinit altera_uart_probe(struct platform_device *pdev)
struct resource *res_mem;
struct resource *res_irq;
int i = pdev->id;
+ int ret;
/* -1 emphasizes that the platform must have one port, no .N suffix */
if (i == -1)
@@ -542,6 +567,15 @@ static int __devinit altera_uart_probe(struct platform_device *pdev)
else if (platp->irq)
port->irq = platp->irq;
+ /* Check platform data first so we can override device node data */
+ if (platp)
+ port->uartclk = platp->uartclk;
+ else {
+ ret = altera_uart_get_of_uartclk(pdev, port);
+ if (ret)
+ return ret;
+ }
+
port->membase = ioremap(port->mapbase, ALTERA_UART_SIZE);
if (!port->membase)
return -ENOMEM;
@@ -549,7 +583,6 @@ static int __devinit altera_uart_probe(struct platform_device *pdev)
port->line = i;
port->type = PORT_ALTERA_UART;
port->iotype = SERIAL_IO_MEM;
- port->uartclk = platp->uartclk;
port->ops = &altera_uart_ops;
port->flags = UPF_BOOT_AUTOCONF;
port->private_data = platp;
@@ -567,13 +600,23 @@ static int __devexit altera_uart_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
+static struct of_device_id altera_uart_match[] = {
+ { .compatible = "ALTR,uart-1.0", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, altera_uart_match);
+#else
+#define altera_uart_match NULL
+#endif /* CONFIG_OF */
+
static struct platform_driver altera_uart_platform_driver = {
.probe = altera_uart_probe,
.remove = __devexit_p(altera_uart_remove),
.driver = {
- .name = DRV_NAME,
- .owner = THIS_MODULE,
- .pm = NULL,
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = altera_uart_match,
},
};
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index 2904aa044126..d742dd2c525c 100644
--- a/drivers/tty/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
@@ -676,7 +676,7 @@ static struct uart_driver amba_reg = {
.cons = AMBA_CONSOLE,
};
-static int pl010_probe(struct amba_device *dev, struct amba_id *id)
+static int pl010_probe(struct amba_device *dev, const struct amba_id *id)
{
struct uart_amba_port *uap;
void __iomem *base;
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index e76d7d000128..0dae46f91021 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -1349,7 +1349,7 @@ static struct uart_driver amba_reg = {
.cons = AMBA_CONSOLE,
};
-static int pl011_probe(struct amba_device *dev, struct amba_id *id)
+static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
{
struct uart_amba_port *uap;
struct vendor_data *vendor = id->data;
diff --git a/drivers/tty/serial/apbuart.c b/drivers/tty/serial/apbuart.c
index 095a5d562618..1ab999b04ef3 100644
--- a/drivers/tty/serial/apbuart.c
+++ b/drivers/tty/serial/apbuart.c
@@ -553,8 +553,7 @@ static struct uart_driver grlib_apbuart_driver = {
/* OF Platform Driver */
/* ======================================================================== */
-static int __devinit apbuart_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit apbuart_probe(struct platform_device *op)
{
int i = -1;
struct uart_port *port = NULL;
@@ -587,7 +586,7 @@ static struct of_device_id __initdata apbuart_match[] = {
{},
};
-static struct of_platform_driver grlib_apbuart_of_driver = {
+static struct platform_driver grlib_apbuart_of_driver = {
.probe = apbuart_probe,
.driver = {
.owner = THIS_MODULE,
@@ -676,10 +675,10 @@ static int __init grlib_apbuart_init(void)
return ret;
}
- ret = of_register_platform_driver(&grlib_apbuart_of_driver);
+ ret = platform_driver_register(&grlib_apbuart_of_driver);
if (ret) {
printk(KERN_ERR
- "%s: of_register_platform_driver failed (%i)\n",
+ "%s: platform_driver_register failed (%i)\n",
__FILE__, ret);
uart_unregister_driver(&grlib_apbuart_driver);
return ret;
@@ -697,7 +696,7 @@ static void __exit grlib_apbuart_exit(void)
&grlib_apbuart_ports[i]);
uart_unregister_driver(&grlib_apbuart_driver);
- of_unregister_platform_driver(&grlib_apbuart_of_driver);
+ platform_driver_unregister(&grlib_apbuart_of_driver);
}
module_init(grlib_apbuart_init);
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index 8692ff98fc07..a9a6a5fd169e 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -1359,8 +1359,7 @@ static struct uart_driver cpm_reg = {
static int probe_index;
-static int __devinit cpm_uart_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit cpm_uart_probe(struct platform_device *ofdev)
{
int index = probe_index++;
struct uart_cpm_port *pinfo = &cpm_uart_ports[index];
@@ -1405,7 +1404,7 @@ static struct of_device_id cpm_uart_match[] = {
{}
};
-static struct of_platform_driver cpm_uart_driver = {
+static struct platform_driver cpm_uart_driver = {
.driver = {
.name = "cpm_uart",
.owner = THIS_MODULE,
@@ -1421,7 +1420,7 @@ static int __init cpm_uart_init(void)
if (ret)
return ret;
- ret = of_register_platform_driver(&cpm_uart_driver);
+ ret = platform_driver_register(&cpm_uart_driver);
if (ret)
uart_unregister_driver(&cpm_reg);
@@ -1430,7 +1429,7 @@ static int __init cpm_uart_init(void)
static void __exit cpm_uart_exit(void)
{
- of_unregister_platform_driver(&cpm_uart_driver);
+ platform_driver_unregister(&cpm_uart_driver);
uart_unregister_driver(&cpm_reg);
}
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index 25a8bc565f40..87e7e6c876d4 100644
--- a/drivers/tty/serial/kgdboc.c
+++ b/drivers/tty/serial/kgdboc.c
@@ -131,7 +131,7 @@ static void kgdboc_unregister_kbd(void)
static int kgdboc_option_setup(char *opt)
{
- if (strlen(opt) > MAX_CONFIG_LEN) {
+ if (strlen(opt) >= MAX_CONFIG_LEN) {
printk(KERN_ERR "kgdboc: config string too long\n");
return -ENOSPC;
}
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index 126ec7f568ec..a0bcd8a3758d 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -1302,8 +1302,7 @@ static struct of_device_id mpc52xx_uart_of_match[] = {
{},
};
-static int __devinit
-mpc52xx_uart_of_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit mpc52xx_uart_of_probe(struct platform_device *op)
{
int idx = -1;
unsigned int uartclk;
@@ -1311,8 +1310,6 @@ mpc52xx_uart_of_probe(struct platform_device *op, const struct of_device_id *mat
struct resource res;
int ret;
- dev_dbg(&op->dev, "mpc52xx_uart_probe(op=%p, match=%p)\n", op, match);
-
/* Check validity & presence */
for (idx = 0; idx < MPC52xx_PSC_MAXNUM; idx++)
if (mpc52xx_uart_nodes[idx] == op->dev.of_node)
@@ -1453,7 +1450,7 @@ mpc52xx_uart_of_enumerate(void)
MODULE_DEVICE_TABLE(of, mpc52xx_uart_of_match);
-static struct of_platform_driver mpc52xx_uart_of_driver = {
+static struct platform_driver mpc52xx_uart_of_driver = {
.probe = mpc52xx_uart_of_probe,
.remove = mpc52xx_uart_of_remove,
#ifdef CONFIG_PM
@@ -1497,9 +1494,9 @@ mpc52xx_uart_init(void)
return ret;
}
- ret = of_register_platform_driver(&mpc52xx_uart_of_driver);
+ ret = platform_driver_register(&mpc52xx_uart_of_driver);
if (ret) {
- printk(KERN_ERR "%s: of_register_platform_driver failed (%i)\n",
+ printk(KERN_ERR "%s: platform_driver_register failed (%i)\n",
__FILE__, ret);
uart_unregister_driver(&mpc52xx_uart_driver);
return ret;
@@ -1514,7 +1511,7 @@ mpc52xx_uart_exit(void)
if (psc_ops->fifoc_uninit)
psc_ops->fifoc_uninit();
- of_unregister_platform_driver(&mpc52xx_uart_of_driver);
+ platform_driver_unregister(&mpc52xx_uart_of_driver);
uart_unregister_driver(&mpc52xx_uart_driver);
}
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 8e43a7b69e64..bfee9b4c6661 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2007 Google, Inc.
* Author: Robert Love <rlove@google.com>
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -31,6 +32,7 @@
#include <linux/serial.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
+#include <linux/delay.h>
#include "msm_serial.h"
@@ -38,9 +40,20 @@ struct msm_port {
struct uart_port uart;
char name[16];
struct clk *clk;
+ struct clk *pclk;
unsigned int imr;
+ unsigned int *gsbi_base;
+ int is_uartdm;
+ unsigned int old_snap_state;
};
+static inline void wait_for_xmitr(struct uart_port *port, int bits)
+{
+ if (!(msm_read(port, UART_SR) & UART_SR_TX_EMPTY))
+ while ((msm_read(port, UART_ISR) & bits) != bits)
+ cpu_relax();
+}
+
static void msm_stop_tx(struct uart_port *port)
{
struct msm_port *msm_port = UART_TO_MSM(port);
@@ -73,6 +86,61 @@ static void msm_enable_ms(struct uart_port *port)
msm_write(port, msm_port->imr, UART_IMR);
}
+static void handle_rx_dm(struct uart_port *port, unsigned int misr)
+{
+ struct tty_struct *tty = port->state->port.tty;
+ unsigned int sr;
+ int count = 0;
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ if ((msm_read(port, UART_SR) & UART_SR_OVERRUN)) {
+ port->icount.overrun++;
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+ }
+
+ if (misr & UART_IMR_RXSTALE) {
+ count = msm_read(port, UARTDM_RX_TOTAL_SNAP) -
+ msm_port->old_snap_state;
+ msm_port->old_snap_state = 0;
+ } else {
+ count = 4 * (msm_read(port, UART_RFWR));
+ msm_port->old_snap_state += count;
+ }
+
+ /* TODO: Precise error reporting */
+
+ port->icount.rx += count;
+
+ while (count > 0) {
+ unsigned int c;
+
+ sr = msm_read(port, UART_SR);
+ if ((sr & UART_SR_RX_READY) == 0) {
+ msm_port->old_snap_state -= count;
+ break;
+ }
+ c = msm_read(port, UARTDM_RF);
+ if (sr & UART_SR_RX_BREAK) {
+ port->icount.brk++;
+ if (uart_handle_break(port))
+ continue;
+ } else if (sr & UART_SR_PAR_FRAME_ERR)
+ port->icount.frame++;
+
+ /* TODO: handle sysrq */
+ tty_insert_flip_string(tty, (char *) &c,
+ (count > 4) ? 4 : count);
+ count -= 4;
+ }
+
+ tty_flip_buffer_push(tty);
+ if (misr & (UART_IMR_RXSTALE))
+ msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+ msm_write(port, 0xFFFFFF, UARTDM_DMRX);
+ msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+}
+
static void handle_rx(struct uart_port *port)
{
struct tty_struct *tty = port->state->port.tty;
@@ -121,6 +189,12 @@ static void handle_rx(struct uart_port *port)
tty_flip_buffer_push(tty);
}
+static void reset_dm_count(struct uart_port *port)
+{
+ wait_for_xmitr(port, UART_ISR_TX_READY);
+ msm_write(port, 1, UARTDM_NCF_TX);
+}
+
static void handle_tx(struct uart_port *port)
{
struct circ_buf *xmit = &port->state->xmit;
@@ -128,11 +202,18 @@ static void handle_tx(struct uart_port *port)
int sent_tx;
if (port->x_char) {
- msm_write(port, port->x_char, UART_TF);
+ if (msm_port->is_uartdm)
+ reset_dm_count(port);
+
+ msm_write(port, port->x_char,
+ msm_port->is_uartdm ? UARTDM_TF : UART_TF);
port->icount.tx++;
port->x_char = 0;
}
+ if (msm_port->is_uartdm)
+ reset_dm_count(port);
+
while (msm_read(port, UART_SR) & UART_SR_TX_READY) {
if (uart_circ_empty(xmit)) {
/* disable tx interrupts */
@@ -140,8 +221,11 @@ static void handle_tx(struct uart_port *port)
msm_write(port, msm_port->imr, UART_IMR);
break;
}
+ msm_write(port, xmit->buf[xmit->tail],
+ msm_port->is_uartdm ? UARTDM_TF : UART_TF);
- msm_write(port, xmit->buf[xmit->tail], UART_TF);
+ if (msm_port->is_uartdm)
+ reset_dm_count(port);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
@@ -169,8 +253,12 @@ static irqreturn_t msm_irq(int irq, void *dev_id)
misr = msm_read(port, UART_MISR);
msm_write(port, 0, UART_IMR); /* disable interrupt */
- if (misr & (UART_IMR_RXLEV | UART_IMR_RXSTALE))
- handle_rx(port);
+ if (misr & (UART_IMR_RXLEV | UART_IMR_RXSTALE)) {
+ if (msm_port->is_uartdm)
+ handle_rx_dm(port, misr);
+ else
+ handle_rx(port);
+ }
if (misr & UART_IMR_TXLEV)
handle_tx(port);
if (misr & UART_IMR_DELTA_CTS)
@@ -192,10 +280,21 @@ static unsigned int msm_get_mctrl(struct uart_port *port)
return TIOCM_CAR | TIOCM_CTS | TIOCM_DSR | TIOCM_RTS;
}
-static void msm_set_mctrl(struct uart_port *port, unsigned int mctrl)
+
+static void msm_reset(struct uart_port *port)
{
- unsigned int mr;
+ /* reset everything */
+ msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_TX, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_BREAK_INT, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
+ msm_write(port, UART_CR_CMD_SET_RFR, UART_CR);
+}
+void msm_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ unsigned int mr;
mr = msm_read(port, UART_MR1);
if (!(mctrl & TIOCM_RTS)) {
@@ -219,6 +318,7 @@ static void msm_break_ctl(struct uart_port *port, int break_ctl)
static int msm_set_baud_rate(struct uart_port *port, unsigned int baud)
{
unsigned int baud_code, rxstale, watermark;
+ struct msm_port *msm_port = UART_TO_MSM(port);
switch (baud) {
case 300:
@@ -273,6 +373,9 @@ static int msm_set_baud_rate(struct uart_port *port, unsigned int baud)
break;
}
+ if (msm_port->is_uartdm)
+ msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
+
msm_write(port, baud_code, UART_CSR);
/* RX stale watermark */
@@ -288,25 +391,23 @@ static int msm_set_baud_rate(struct uart_port *port, unsigned int baud)
/* set TX watermark */
msm_write(port, 10, UART_TFWR);
+ if (msm_port->is_uartdm) {
+ msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+ msm_write(port, 0xFFFFFF, UARTDM_DMRX);
+ msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+ }
+
return baud;
}
-static void msm_reset(struct uart_port *port)
-{
- /* reset everything */
- msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
- msm_write(port, UART_CR_CMD_RESET_TX, UART_CR);
- msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
- msm_write(port, UART_CR_CMD_RESET_BREAK_INT, UART_CR);
- msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
- msm_write(port, UART_CR_CMD_SET_RFR, UART_CR);
-}
static void msm_init_clock(struct uart_port *port)
{
struct msm_port *msm_port = UART_TO_MSM(port);
clk_enable(msm_port->clk);
+ if (!IS_ERR(msm_port->pclk))
+ clk_enable(msm_port->pclk);
msm_serial_set_mnd_regs(port);
}
@@ -347,15 +448,31 @@ static int msm_startup(struct uart_port *port)
msm_write(port, data, UART_IPR);
}
- msm_reset(port);
+ data = 0;
+ if (!port->cons || (port->cons && !(port->cons->flags & CON_ENABLED))) {
+ msm_write(port, UART_CR_CMD_PROTECTION_EN, UART_CR);
+ msm_reset(port);
+ data = UART_CR_TX_ENABLE;
+ }
+
+ data |= UART_CR_RX_ENABLE;
+ msm_write(port, data, UART_CR); /* enable TX & RX */
- msm_write(port, 0x05, UART_CR); /* enable TX & RX */
+ /* Make sure IPR is not 0 to start with*/
+ if (msm_port->is_uartdm)
+ msm_write(port, UART_IPR_STALE_LSB, UART_IPR);
/* turn on RX and CTS interrupts */
msm_port->imr = UART_IMR_RXLEV | UART_IMR_RXSTALE |
UART_IMR_CURRENT_CTS;
- msm_write(port, msm_port->imr, UART_IMR);
+ if (msm_port->is_uartdm) {
+ msm_write(port, 0xFFFFFF, UARTDM_DMRX);
+ msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+ msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+ }
+
+ msm_write(port, msm_port->imr, UART_IMR);
return 0;
}
@@ -384,7 +501,7 @@ static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
baud = msm_set_baud_rate(port, baud);
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
-
+
/* calculate parity */
mr = msm_read(port, UART_MR2);
mr &= ~UART_MR2_PARITY_MODE;
@@ -454,48 +571,105 @@ static const char *msm_type(struct uart_port *port)
static void msm_release_port(struct uart_port *port)
{
struct platform_device *pdev = to_platform_device(port->dev);
- struct resource *resource;
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ struct resource *uart_resource;
+ struct resource *gsbi_resource;
resource_size_t size;
- resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (unlikely(!resource))
+ uart_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(!uart_resource))
return;
- size = resource->end - resource->start + 1;
+ size = resource_size(uart_resource);
release_mem_region(port->mapbase, size);
iounmap(port->membase);
port->membase = NULL;
+
+ if (msm_port->gsbi_base) {
+ iowrite32(GSBI_PROTOCOL_IDLE, msm_port->gsbi_base +
+ GSBI_CONTROL);
+
+ gsbi_resource = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ "gsbi_resource");
+
+ if (unlikely(!gsbi_resource))
+ return;
+
+ size = resource_size(gsbi_resource);
+ release_mem_region(gsbi_resource->start, size);
+ iounmap(msm_port->gsbi_base);
+ msm_port->gsbi_base = NULL;
+ }
}
static int msm_request_port(struct uart_port *port)
{
+ struct msm_port *msm_port = UART_TO_MSM(port);
struct platform_device *pdev = to_platform_device(port->dev);
- struct resource *resource;
+ struct resource *uart_resource;
+ struct resource *gsbi_resource;
resource_size_t size;
+ int ret;
- resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (unlikely(!resource))
+ uart_resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "uart_resource");
+ if (unlikely(!uart_resource))
return -ENXIO;
- size = resource->end - resource->start + 1;
- if (unlikely(!request_mem_region(port->mapbase, size, "msm_serial")))
+ size = resource_size(uart_resource);
+
+ if (!request_mem_region(port->mapbase, size, "msm_serial"))
return -EBUSY;
port->membase = ioremap(port->mapbase, size);
if (!port->membase) {
- release_mem_region(port->mapbase, size);
- return -EBUSY;
+ ret = -EBUSY;
+ goto fail_release_port;
+ }
+
+ gsbi_resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "gsbi_resource");
+ /* Is this a GSBI-based port? */
+ if (gsbi_resource) {
+ size = resource_size(gsbi_resource);
+
+ if (!request_mem_region(gsbi_resource->start, size,
+ "msm_serial")) {
+ ret = -EBUSY;
+ goto fail_release_port;
+ }
+
+ msm_port->gsbi_base = ioremap(gsbi_resource->start, size);
+ if (!msm_port->gsbi_base) {
+ ret = -EBUSY;
+ goto fail_release_gsbi;
+ }
}
return 0;
+
+fail_release_gsbi:
+ release_mem_region(gsbi_resource->start, size);
+fail_release_port:
+ release_mem_region(port->mapbase, size);
+ return ret;
}
static void msm_config_port(struct uart_port *port, int flags)
{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ int ret;
if (flags & UART_CONFIG_TYPE) {
port->type = PORT_MSM;
- msm_request_port(port);
+ ret = msm_request_port(port);
+ if (ret)
+ return;
}
+
+ if (msm_port->is_uartdm)
+ iowrite32(GSBI_PROTOCOL_UART, msm_port->gsbi_base +
+ GSBI_CONTROL);
}
static int msm_verify_port(struct uart_port *port, struct serial_struct *ser)
@@ -515,9 +689,13 @@ static void msm_power(struct uart_port *port, unsigned int state,
switch (state) {
case 0:
clk_enable(msm_port->clk);
+ if (!IS_ERR(msm_port->pclk))
+ clk_enable(msm_port->pclk);
break;
case 3:
clk_disable(msm_port->clk);
+ if (!IS_ERR(msm_port->pclk))
+ clk_disable(msm_port->pclk);
break;
default:
printk(KERN_ERR "msm_serial: Unknown PM state %d\n", state);
@@ -550,7 +728,7 @@ static struct msm_port msm_uart_ports[] = {
.iotype = UPIO_MEM,
.ops = &msm_uart_pops,
.flags = UPF_BOOT_AUTOCONF,
- .fifosize = 512,
+ .fifosize = 64,
.line = 0,
},
},
@@ -559,7 +737,7 @@ static struct msm_port msm_uart_ports[] = {
.iotype = UPIO_MEM,
.ops = &msm_uart_pops,
.flags = UPF_BOOT_AUTOCONF,
- .fifosize = 512,
+ .fifosize = 64,
.line = 1,
},
},
@@ -585,9 +763,14 @@ static inline struct uart_port *get_port_from_line(unsigned int line)
static void msm_console_putchar(struct uart_port *port, int c)
{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ if (msm_port->is_uartdm)
+ reset_dm_count(port);
+
while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
;
- msm_write(port, c, UART_TF);
+ msm_write(port, c, msm_port->is_uartdm ? UARTDM_TF : UART_TF);
}
static void msm_console_write(struct console *co, const char *s,
@@ -609,12 +792,14 @@ static void msm_console_write(struct console *co, const char *s,
static int __init msm_console_setup(struct console *co, char *options)
{
struct uart_port *port;
+ struct msm_port *msm_port;
int baud, flow, bits, parity;
if (unlikely(co->index >= UART_NR || co->index < 0))
return -ENXIO;
port = get_port_from_line(co->index);
+ msm_port = UART_TO_MSM(port);
if (unlikely(!port->membase))
return -ENXIO;
@@ -638,6 +823,11 @@ static int __init msm_console_setup(struct console *co, char *options)
msm_reset(port);
+ if (msm_port->is_uartdm) {
+ msm_write(port, UART_CR_CMD_PROTECTION_EN, UART_CR);
+ msm_write(port, UART_CR_TX_ENABLE, UART_CR);
+ }
+
printk(KERN_INFO "msm_serial: console setup on port #%d\n", port->line);
return uart_set_options(port, co, baud, parity, bits, flow);
@@ -685,14 +875,32 @@ static int __init msm_serial_probe(struct platform_device *pdev)
port->dev = &pdev->dev;
msm_port = UART_TO_MSM(port);
- msm_port->clk = clk_get(&pdev->dev, "uart_clk");
- if (IS_ERR(msm_port->clk))
- return PTR_ERR(msm_port->clk);
+ if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsbi_resource"))
+ msm_port->is_uartdm = 1;
+ else
+ msm_port->is_uartdm = 0;
+
+ if (msm_port->is_uartdm) {
+ msm_port->clk = clk_get(&pdev->dev, "gsbi_uart_clk");
+ msm_port->pclk = clk_get(&pdev->dev, "gsbi_pclk");
+ } else {
+ msm_port->clk = clk_get(&pdev->dev, "uart_clk");
+ msm_port->pclk = ERR_PTR(-ENOENT);
+ }
+
+ if (unlikely(IS_ERR(msm_port->clk) || (IS_ERR(msm_port->pclk) &&
+ msm_port->is_uartdm)))
+ return PTR_ERR(msm_port->clk);
+
+ if (msm_port->is_uartdm)
+ clk_set_rate(msm_port->clk, 7372800);
+
port->uartclk = clk_get_rate(msm_port->clk);
printk(KERN_INFO "uartclk = %d\n", port->uartclk);
- resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "uart_resource");
if (unlikely(!resource))
return -ENXIO;
port->mapbase = resource->start;
diff --git a/drivers/tty/serial/msm_serial.h b/drivers/tty/serial/msm_serial.h
index f6ca9ca79e98..9b8dc5d0d855 100644
--- a/drivers/tty/serial/msm_serial.h
+++ b/drivers/tty/serial/msm_serial.h
@@ -3,6 +3,7 @@
*
* Copyright (C) 2007 Google, Inc.
* Author: Robert Love <rlove@google.com>
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -54,6 +55,7 @@
#define UART_CSR_300 0x22
#define UART_TF 0x000C
+#define UARTDM_TF 0x0070
#define UART_CR 0x0010
#define UART_CR_CMD_NULL (0 << 4)
@@ -64,14 +66,17 @@
#define UART_CR_CMD_START_BREAK (5 << 4)
#define UART_CR_CMD_STOP_BREAK (6 << 4)
#define UART_CR_CMD_RESET_CTS (7 << 4)
+#define UART_CR_CMD_RESET_STALE_INT (8 << 4)
#define UART_CR_CMD_PACKET_MODE (9 << 4)
#define UART_CR_CMD_MODE_RESET (12 << 4)
#define UART_CR_CMD_SET_RFR (13 << 4)
#define UART_CR_CMD_RESET_RFR (14 << 4)
+#define UART_CR_CMD_PROTECTION_EN (16 << 4)
+#define UART_CR_CMD_STALE_EVENT_ENABLE (80 << 4)
#define UART_CR_TX_DISABLE (1 << 3)
-#define UART_CR_TX_ENABLE (1 << 3)
-#define UART_CR_RX_DISABLE (1 << 3)
-#define UART_CR_RX_ENABLE (1 << 3)
+#define UART_CR_TX_ENABLE (1 << 2)
+#define UART_CR_RX_DISABLE (1 << 1)
+#define UART_CR_RX_ENABLE (1 << 0)
#define UART_IMR 0x0014
#define UART_IMR_TXLEV (1 << 0)
@@ -110,9 +115,20 @@
#define UART_SR_RX_FULL (1 << 1)
#define UART_SR_RX_READY (1 << 0)
-#define UART_RF 0x000C
-#define UART_MISR 0x0010
-#define UART_ISR 0x0014
+#define UART_RF 0x000C
+#define UARTDM_RF 0x0070
+#define UART_MISR 0x0010
+#define UART_ISR 0x0014
+#define UART_ISR_TX_READY (1 << 7)
+
+#define GSBI_CONTROL 0x0
+#define GSBI_PROTOCOL_CODE 0x30
+#define GSBI_PROTOCOL_UART 0x40
+#define GSBI_PROTOCOL_IDLE 0x0
+
+#define UARTDM_DMRX 0x34
+#define UARTDM_NCF_TX 0x40
+#define UARTDM_RX_TOTAL_SNAP 0x38
#define UART_TO_MSM(uart_port) ((struct msm_port *) uart_port)
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
new file mode 100644
index 000000000000..7e02c9c344fe
--- /dev/null
+++ b/drivers/tty/serial/mxs-auart.c
@@ -0,0 +1,798 @@
+/*
+ * Freescale STMP37XX/STMP378X Application UART driver
+ *
+ * Author: dmitry pervushin <dimka@embeddedalley.com>
+ *
+ * Copyright 2008-2010 Freescale Semiconductor, Inc.
+ * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <asm/cacheflush.h>
+
+#define MXS_AUART_PORTS 5
+
+#define AUART_CTRL0 0x00000000
+#define AUART_CTRL0_SET 0x00000004
+#define AUART_CTRL0_CLR 0x00000008
+#define AUART_CTRL0_TOG 0x0000000c
+#define AUART_CTRL1 0x00000010
+#define AUART_CTRL1_SET 0x00000014
+#define AUART_CTRL1_CLR 0x00000018
+#define AUART_CTRL1_TOG 0x0000001c
+#define AUART_CTRL2 0x00000020
+#define AUART_CTRL2_SET 0x00000024
+#define AUART_CTRL2_CLR 0x00000028
+#define AUART_CTRL2_TOG 0x0000002c
+#define AUART_LINECTRL 0x00000030
+#define AUART_LINECTRL_SET 0x00000034
+#define AUART_LINECTRL_CLR 0x00000038
+#define AUART_LINECTRL_TOG 0x0000003c
+#define AUART_LINECTRL2 0x00000040
+#define AUART_LINECTRL2_SET 0x00000044
+#define AUART_LINECTRL2_CLR 0x00000048
+#define AUART_LINECTRL2_TOG 0x0000004c
+#define AUART_INTR 0x00000050
+#define AUART_INTR_SET 0x00000054
+#define AUART_INTR_CLR 0x00000058
+#define AUART_INTR_TOG 0x0000005c
+#define AUART_DATA 0x00000060
+#define AUART_STAT 0x00000070
+#define AUART_DEBUG 0x00000080
+#define AUART_VERSION 0x00000090
+#define AUART_AUTOBAUD 0x000000a0
+
+#define AUART_CTRL0_SFTRST (1 << 31)
+#define AUART_CTRL0_CLKGATE (1 << 30)
+
+#define AUART_CTRL2_CTSEN (1 << 15)
+#define AUART_CTRL2_RTS (1 << 11)
+#define AUART_CTRL2_RXE (1 << 9)
+#define AUART_CTRL2_TXE (1 << 8)
+#define AUART_CTRL2_UARTEN (1 << 0)
+
+#define AUART_LINECTRL_BAUD_DIVINT_SHIFT 16
+#define AUART_LINECTRL_BAUD_DIVINT_MASK 0xffff0000
+#define AUART_LINECTRL_BAUD_DIVINT(v) (((v) & 0xffff) << 16)
+#define AUART_LINECTRL_BAUD_DIVFRAC_SHIFT 8
+#define AUART_LINECTRL_BAUD_DIVFRAC_MASK 0x00003f00
+#define AUART_LINECTRL_BAUD_DIVFRAC(v) (((v) & 0x3f) << 8)
+#define AUART_LINECTRL_WLEN_MASK 0x00000060
+#define AUART_LINECTRL_WLEN(v) (((v) & 0x3) << 5)
+#define AUART_LINECTRL_FEN (1 << 4)
+#define AUART_LINECTRL_STP2 (1 << 3)
+#define AUART_LINECTRL_EPS (1 << 2)
+#define AUART_LINECTRL_PEN (1 << 1)
+#define AUART_LINECTRL_BRK (1 << 0)
+
+#define AUART_INTR_RTIEN (1 << 22)
+#define AUART_INTR_TXIEN (1 << 21)
+#define AUART_INTR_RXIEN (1 << 20)
+#define AUART_INTR_CTSMIEN (1 << 17)
+#define AUART_INTR_RTIS (1 << 6)
+#define AUART_INTR_TXIS (1 << 5)
+#define AUART_INTR_RXIS (1 << 4)
+#define AUART_INTR_CTSMIS (1 << 1)
+
+#define AUART_STAT_BUSY (1 << 29)
+#define AUART_STAT_CTS (1 << 28)
+#define AUART_STAT_TXFE (1 << 27)
+#define AUART_STAT_TXFF (1 << 25)
+#define AUART_STAT_RXFE (1 << 24)
+#define AUART_STAT_OERR (1 << 19)
+#define AUART_STAT_BERR (1 << 18)
+#define AUART_STAT_PERR (1 << 17)
+#define AUART_STAT_FERR (1 << 16)
+
+static struct uart_driver auart_driver;
+
+struct mxs_auart_port {
+ struct uart_port port;
+
+ unsigned int flags;
+ unsigned int ctrl;
+
+ unsigned int irq;
+
+ struct clk *clk;
+ struct device *dev;
+};
+
+static void mxs_auart_stop_tx(struct uart_port *u);
+
+#define to_auart_port(u) container_of(u, struct mxs_auart_port, port)
+
+static inline void mxs_auart_tx_chars(struct mxs_auart_port *s)
+{
+ struct circ_buf *xmit = &s->port.state->xmit;
+
+ while (!(readl(s->port.membase + AUART_STAT) &
+ AUART_STAT_TXFF)) {
+ if (s->port.x_char) {
+ s->port.icount.tx++;
+ writel(s->port.x_char,
+ s->port.membase + AUART_DATA);
+ s->port.x_char = 0;
+ continue;
+ }
+ if (!uart_circ_empty(xmit) && !uart_tx_stopped(&s->port)) {
+ s->port.icount.tx++;
+ writel(xmit->buf[xmit->tail],
+ s->port.membase + AUART_DATA);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&s->port);
+ } else
+ break;
+ }
+ if (uart_circ_empty(&(s->port.state->xmit)))
+ writel(AUART_INTR_TXIEN,
+ s->port.membase + AUART_INTR_CLR);
+ else
+ writel(AUART_INTR_TXIEN,
+ s->port.membase + AUART_INTR_SET);
+
+ if (uart_tx_stopped(&s->port))
+ mxs_auart_stop_tx(&s->port);
+}
+
+static void mxs_auart_rx_char(struct mxs_auart_port *s)
+{
+ int flag;
+ u32 stat;
+ u8 c;
+
+ c = readl(s->port.membase + AUART_DATA);
+ stat = readl(s->port.membase + AUART_STAT);
+
+ flag = TTY_NORMAL;
+ s->port.icount.rx++;
+
+ if (stat & AUART_STAT_BERR) {
+ s->port.icount.brk++;
+ if (uart_handle_break(&s->port))
+ goto out;
+ } else if (stat & AUART_STAT_PERR) {
+ s->port.icount.parity++;
+ } else if (stat & AUART_STAT_FERR) {
+ s->port.icount.frame++;
+ }
+
+ /*
+ * Mask off conditions which should be ingored.
+ */
+ stat &= s->port.read_status_mask;
+
+ if (stat & AUART_STAT_BERR) {
+ flag = TTY_BREAK;
+ } else if (stat & AUART_STAT_PERR)
+ flag = TTY_PARITY;
+ else if (stat & AUART_STAT_FERR)
+ flag = TTY_FRAME;
+
+ if (stat & AUART_STAT_OERR)
+ s->port.icount.overrun++;
+
+ if (uart_handle_sysrq_char(&s->port, c))
+ goto out;
+
+ uart_insert_char(&s->port, stat, AUART_STAT_OERR, c, flag);
+out:
+ writel(stat, s->port.membase + AUART_STAT);
+}
+
+static void mxs_auart_rx_chars(struct mxs_auart_port *s)
+{
+ struct tty_struct *tty = s->port.state->port.tty;
+ u32 stat = 0;
+
+ for (;;) {
+ stat = readl(s->port.membase + AUART_STAT);
+ if (stat & AUART_STAT_RXFE)
+ break;
+ mxs_auart_rx_char(s);
+ }
+
+ writel(stat, s->port.membase + AUART_STAT);
+ tty_flip_buffer_push(tty);
+}
+
+static int mxs_auart_request_port(struct uart_port *u)
+{
+ return 0;
+}
+
+static int mxs_auart_verify_port(struct uart_port *u,
+ struct serial_struct *ser)
+{
+ if (u->type != PORT_UNKNOWN && u->type != PORT_IMX)
+ return -EINVAL;
+ return 0;
+}
+
+static void mxs_auart_config_port(struct uart_port *u, int flags)
+{
+}
+
+static const char *mxs_auart_type(struct uart_port *u)
+{
+ struct mxs_auart_port *s = to_auart_port(u);
+
+ return dev_name(s->dev);
+}
+
+static void mxs_auart_release_port(struct uart_port *u)
+{
+}
+
+static void mxs_auart_set_mctrl(struct uart_port *u, unsigned mctrl)
+{
+ struct mxs_auart_port *s = to_auart_port(u);
+
+ u32 ctrl = readl(u->membase + AUART_CTRL2);
+
+ ctrl &= ~AUART_CTRL2_RTS;
+ if (mctrl & TIOCM_RTS)
+ ctrl |= AUART_CTRL2_RTS;
+ s->ctrl = mctrl;
+ writel(ctrl, u->membase + AUART_CTRL2);
+}
+
+static u32 mxs_auart_get_mctrl(struct uart_port *u)
+{
+ struct mxs_auart_port *s = to_auart_port(u);
+ u32 stat = readl(u->membase + AUART_STAT);
+ int ctrl2 = readl(u->membase + AUART_CTRL2);
+ u32 mctrl = s->ctrl;
+
+ mctrl &= ~TIOCM_CTS;
+ if (stat & AUART_STAT_CTS)
+ mctrl |= TIOCM_CTS;
+
+ if (ctrl2 & AUART_CTRL2_RTS)
+ mctrl |= TIOCM_RTS;
+
+ return mctrl;
+}
+
+static void mxs_auart_settermios(struct uart_port *u,
+ struct ktermios *termios,
+ struct ktermios *old)
+{
+ u32 bm, ctrl, ctrl2, div;
+ unsigned int cflag, baud;
+
+ cflag = termios->c_cflag;
+
+ ctrl = AUART_LINECTRL_FEN;
+ ctrl2 = readl(u->membase + AUART_CTRL2);
+
+ /* byte size */
+ switch (cflag & CSIZE) {
+ case CS5:
+ bm = 0;
+ break;
+ case CS6:
+ bm = 1;
+ break;
+ case CS7:
+ bm = 2;
+ break;
+ case CS8:
+ bm = 3;
+ break;
+ default:
+ return;
+ }
+
+ ctrl |= AUART_LINECTRL_WLEN(bm);
+
+ /* parity */
+ if (cflag & PARENB) {
+ ctrl |= AUART_LINECTRL_PEN;
+ if ((cflag & PARODD) == 0)
+ ctrl |= AUART_LINECTRL_EPS;
+ }
+
+ u->read_status_mask = 0;
+
+ if (termios->c_iflag & INPCK)
+ u->read_status_mask |= AUART_STAT_PERR;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ u->read_status_mask |= AUART_STAT_BERR;
+
+ /*
+ * Characters to ignore
+ */
+ u->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ u->ignore_status_mask |= AUART_STAT_PERR;
+ if (termios->c_iflag & IGNBRK) {
+ u->ignore_status_mask |= AUART_STAT_BERR;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ u->ignore_status_mask |= AUART_STAT_OERR;
+ }
+
+ /*
+ * ignore all characters if CREAD is not set
+ */
+ if (cflag & CREAD)
+ ctrl2 |= AUART_CTRL2_RXE;
+ else
+ ctrl2 &= ~AUART_CTRL2_RXE;
+
+ /* figure out the stop bits requested */
+ if (cflag & CSTOPB)
+ ctrl |= AUART_LINECTRL_STP2;
+
+ /* figure out the hardware flow control settings */
+ if (cflag & CRTSCTS)
+ ctrl2 |= AUART_CTRL2_CTSEN;
+ else
+ ctrl2 &= ~AUART_CTRL2_CTSEN;
+
+ /* set baud rate */
+ baud = uart_get_baud_rate(u, termios, old, 0, u->uartclk);
+ div = u->uartclk * 32 / baud;
+ ctrl |= AUART_LINECTRL_BAUD_DIVFRAC(div & 0x3F);
+ ctrl |= AUART_LINECTRL_BAUD_DIVINT(div >> 6);
+
+ writel(ctrl, u->membase + AUART_LINECTRL);
+ writel(ctrl2, u->membase + AUART_CTRL2);
+}
+
+static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
+{
+ u32 istatus, istat;
+ struct mxs_auart_port *s = context;
+ u32 stat = readl(s->port.membase + AUART_STAT);
+
+ istatus = istat = readl(s->port.membase + AUART_INTR);
+
+ if (istat & AUART_INTR_CTSMIS) {
+ uart_handle_cts_change(&s->port, stat & AUART_STAT_CTS);
+ writel(AUART_INTR_CTSMIS,
+ s->port.membase + AUART_INTR_CLR);
+ istat &= ~AUART_INTR_CTSMIS;
+ }
+
+ if (istat & (AUART_INTR_RTIS | AUART_INTR_RXIS)) {
+ mxs_auart_rx_chars(s);
+ istat &= ~(AUART_INTR_RTIS | AUART_INTR_RXIS);
+ }
+
+ if (istat & AUART_INTR_TXIS) {
+ mxs_auart_tx_chars(s);
+ istat &= ~AUART_INTR_TXIS;
+ }
+
+ writel(istatus & (AUART_INTR_RTIS
+ | AUART_INTR_TXIS
+ | AUART_INTR_RXIS
+ | AUART_INTR_CTSMIS),
+ s->port.membase + AUART_INTR_CLR);
+
+ return IRQ_HANDLED;
+}
+
+static void mxs_auart_reset(struct uart_port *u)
+{
+ int i;
+ unsigned int reg;
+
+ writel(AUART_CTRL0_SFTRST, u->membase + AUART_CTRL0_CLR);
+
+ for (i = 0; i < 10000; i++) {
+ reg = readl(u->membase + AUART_CTRL0);
+ if (!(reg & AUART_CTRL0_SFTRST))
+ break;
+ udelay(3);
+ }
+ writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_CLR);
+}
+
+static int mxs_auart_startup(struct uart_port *u)
+{
+ struct mxs_auart_port *s = to_auart_port(u);
+
+ clk_enable(s->clk);
+
+ writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_CLR);
+
+ writel(AUART_CTRL2_UARTEN, u->membase + AUART_CTRL2_SET);
+
+ writel(AUART_INTR_RXIEN | AUART_INTR_RTIEN | AUART_INTR_CTSMIEN,
+ u->membase + AUART_INTR);
+
+ /*
+ * Enable fifo so all four bytes of a DMA word are written to
+ * output (otherwise, only the LSB is written, ie. 1 in 4 bytes)
+ */
+ writel(AUART_LINECTRL_FEN, u->membase + AUART_LINECTRL_SET);
+
+ return 0;
+}
+
+static void mxs_auart_shutdown(struct uart_port *u)
+{
+ struct mxs_auart_port *s = to_auart_port(u);
+
+ writel(AUART_CTRL2_UARTEN, u->membase + AUART_CTRL2_CLR);
+
+ writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_SET);
+
+ writel(AUART_INTR_RXIEN | AUART_INTR_RTIEN | AUART_INTR_CTSMIEN,
+ u->membase + AUART_INTR_CLR);
+
+ clk_disable(s->clk);
+}
+
+static unsigned int mxs_auart_tx_empty(struct uart_port *u)
+{
+ if (readl(u->membase + AUART_STAT) & AUART_STAT_TXFE)
+ return TIOCSER_TEMT;
+ else
+ return 0;
+}
+
+static void mxs_auart_start_tx(struct uart_port *u)
+{
+ struct mxs_auart_port *s = to_auart_port(u);
+
+ /* enable transmitter */
+ writel(AUART_CTRL2_TXE, u->membase + AUART_CTRL2_SET);
+
+ mxs_auart_tx_chars(s);
+}
+
+static void mxs_auart_stop_tx(struct uart_port *u)
+{
+ writel(AUART_CTRL2_TXE, u->membase + AUART_CTRL2_CLR);
+}
+
+static void mxs_auart_stop_rx(struct uart_port *u)
+{
+ writel(AUART_CTRL2_RXE, u->membase + AUART_CTRL2_CLR);
+}
+
+static void mxs_auart_break_ctl(struct uart_port *u, int ctl)
+{
+ if (ctl)
+ writel(AUART_LINECTRL_BRK,
+ u->membase + AUART_LINECTRL_SET);
+ else
+ writel(AUART_LINECTRL_BRK,
+ u->membase + AUART_LINECTRL_CLR);
+}
+
+static void mxs_auart_enable_ms(struct uart_port *port)
+{
+ /* just empty */
+}
+
+static struct uart_ops mxs_auart_ops = {
+ .tx_empty = mxs_auart_tx_empty,
+ .start_tx = mxs_auart_start_tx,
+ .stop_tx = mxs_auart_stop_tx,
+ .stop_rx = mxs_auart_stop_rx,
+ .enable_ms = mxs_auart_enable_ms,
+ .break_ctl = mxs_auart_break_ctl,
+ .set_mctrl = mxs_auart_set_mctrl,
+ .get_mctrl = mxs_auart_get_mctrl,
+ .startup = mxs_auart_startup,
+ .shutdown = mxs_auart_shutdown,
+ .set_termios = mxs_auart_settermios,
+ .type = mxs_auart_type,
+ .release_port = mxs_auart_release_port,
+ .request_port = mxs_auart_request_port,
+ .config_port = mxs_auart_config_port,
+ .verify_port = mxs_auart_verify_port,
+};
+
+static struct mxs_auart_port *auart_port[MXS_AUART_PORTS];
+
+#ifdef CONFIG_SERIAL_MXS_AUART_CONSOLE
+static void mxs_auart_console_putchar(struct uart_port *port, int ch)
+{
+ unsigned int to = 1000;
+
+ while (readl(port->membase + AUART_STAT) & AUART_STAT_TXFF) {
+ if (!to--)
+ break;
+ udelay(1);
+ }
+
+ writel(ch, port->membase + AUART_DATA);
+}
+
+static void
+auart_console_write(struct console *co, const char *str, unsigned int count)
+{
+ struct mxs_auart_port *s;
+ struct uart_port *port;
+ unsigned int old_ctrl0, old_ctrl2;
+ unsigned int to = 1000;
+
+ if (co->index > MXS_AUART_PORTS || co->index < 0)
+ return;
+
+ s = auart_port[co->index];
+ port = &s->port;
+
+ clk_enable(s->clk);
+
+ /* First save the CR then disable the interrupts */
+ old_ctrl2 = readl(port->membase + AUART_CTRL2);
+ old_ctrl0 = readl(port->membase + AUART_CTRL0);
+
+ writel(AUART_CTRL0_CLKGATE,
+ port->membase + AUART_CTRL0_CLR);
+ writel(AUART_CTRL2_UARTEN | AUART_CTRL2_TXE,
+ port->membase + AUART_CTRL2_SET);
+
+ uart_console_write(port, str, count, mxs_auart_console_putchar);
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore the TCR
+ */
+ while (readl(port->membase + AUART_STAT) & AUART_STAT_BUSY) {
+ if (!to--)
+ break;
+ udelay(1);
+ }
+
+ writel(old_ctrl0, port->membase + AUART_CTRL0);
+ writel(old_ctrl2, port->membase + AUART_CTRL2);
+
+ clk_disable(s->clk);
+}
+
+static void __init
+auart_console_get_options(struct uart_port *port, int *baud,
+ int *parity, int *bits)
+{
+ unsigned int lcr_h, quot;
+
+ if (!(readl(port->membase + AUART_CTRL2) & AUART_CTRL2_UARTEN))
+ return;
+
+ lcr_h = readl(port->membase + AUART_LINECTRL);
+
+ *parity = 'n';
+ if (lcr_h & AUART_LINECTRL_PEN) {
+ if (lcr_h & AUART_LINECTRL_EPS)
+ *parity = 'e';
+ else
+ *parity = 'o';
+ }
+
+ if ((lcr_h & AUART_LINECTRL_WLEN_MASK) == AUART_LINECTRL_WLEN(2))
+ *bits = 7;
+ else
+ *bits = 8;
+
+ quot = ((readl(port->membase + AUART_LINECTRL)
+ & AUART_LINECTRL_BAUD_DIVINT_MASK))
+ >> (AUART_LINECTRL_BAUD_DIVINT_SHIFT - 6);
+ quot |= ((readl(port->membase + AUART_LINECTRL)
+ & AUART_LINECTRL_BAUD_DIVFRAC_MASK))
+ >> AUART_LINECTRL_BAUD_DIVFRAC_SHIFT;
+ if (quot == 0)
+ quot = 1;
+
+ *baud = (port->uartclk << 2) / quot;
+}
+
+static int __init
+auart_console_setup(struct console *co, char *options)
+{
+ struct mxs_auart_port *s;
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+ int ret;
+
+ /*
+ * Check whether an invalid uart number has been specified, and
+ * if so, search for the first available port that does have
+ * console support.
+ */
+ if (co->index == -1 || co->index >= ARRAY_SIZE(auart_port))
+ co->index = 0;
+ s = auart_port[co->index];
+ if (!s)
+ return -ENODEV;
+
+ clk_enable(s->clk);
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ else
+ auart_console_get_options(&s->port, &baud, &parity, &bits);
+
+ ret = uart_set_options(&s->port, co, baud, parity, bits, flow);
+
+ clk_disable(s->clk);
+
+ return ret;
+}
+
+static struct console auart_console = {
+ .name = "ttyAPP",
+ .write = auart_console_write,
+ .device = uart_console_device,
+ .setup = auart_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &auart_driver,
+};
+#endif
+
+static struct uart_driver auart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "ttyAPP",
+ .dev_name = "ttyAPP",
+ .major = 0,
+ .minor = 0,
+ .nr = MXS_AUART_PORTS,
+#ifdef CONFIG_SERIAL_MXS_AUART_CONSOLE
+ .cons = &auart_console,
+#endif
+};
+
+static int __devinit mxs_auart_probe(struct platform_device *pdev)
+{
+ struct mxs_auart_port *s;
+ u32 version;
+ int ret = 0;
+ struct resource *r;
+
+ s = kzalloc(sizeof(struct mxs_auart_port), GFP_KERNEL);
+ if (!s) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ s->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(s->clk)) {
+ ret = PTR_ERR(s->clk);
+ goto out_free;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ ret = -ENXIO;
+ goto out_free_clk;
+ }
+
+ s->port.mapbase = r->start;
+ s->port.membase = ioremap(r->start, resource_size(r));
+ s->port.ops = &mxs_auart_ops;
+ s->port.iotype = UPIO_MEM;
+ s->port.line = pdev->id < 0 ? 0 : pdev->id;
+ s->port.fifosize = 16;
+ s->port.uartclk = clk_get_rate(s->clk);
+ s->port.type = PORT_IMX;
+ s->port.dev = s->dev = get_device(&pdev->dev);
+
+ s->flags = 0;
+ s->ctrl = 0;
+
+ s->irq = platform_get_irq(pdev, 0);
+ s->port.irq = s->irq;
+ ret = request_irq(s->irq, mxs_auart_irq_handle, 0, dev_name(&pdev->dev), s);
+ if (ret)
+ goto out_free_clk;
+
+ platform_set_drvdata(pdev, s);
+
+ auart_port[pdev->id] = s;
+
+ mxs_auart_reset(&s->port);
+
+ ret = uart_add_one_port(&auart_driver, &s->port);
+ if (ret)
+ goto out_free_irq;
+
+ version = readl(s->port.membase + AUART_VERSION);
+ dev_info(&pdev->dev, "Found APPUART %d.%d.%d\n",
+ (version >> 24) & 0xff,
+ (version >> 16) & 0xff, version & 0xffff);
+
+ return 0;
+
+out_free_irq:
+ auart_port[pdev->id] = NULL;
+ free_irq(s->irq, s);
+out_free_clk:
+ clk_put(s->clk);
+out_free:
+ kfree(s);
+out:
+ return ret;
+}
+
+static int __devexit mxs_auart_remove(struct platform_device *pdev)
+{
+ struct mxs_auart_port *s = platform_get_drvdata(pdev);
+
+ uart_remove_one_port(&auart_driver, &s->port);
+
+ auart_port[pdev->id] = NULL;
+
+ clk_put(s->clk);
+ free_irq(s->irq, s);
+ kfree(s);
+
+ return 0;
+}
+
+static struct platform_driver mxs_auart_driver = {
+ .probe = mxs_auart_probe,
+ .remove = __devexit_p(mxs_auart_remove),
+ .driver = {
+ .name = "mxs-auart",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init mxs_auart_init(void)
+{
+ int r;
+
+ r = uart_register_driver(&auart_driver);
+ if (r)
+ goto out;
+
+ r = platform_driver_register(&mxs_auart_driver);
+ if (r)
+ goto out_err;
+
+ return 0;
+out_err:
+ uart_unregister_driver(&auart_driver);
+out:
+ return r;
+}
+
+static void __exit mxs_auart_exit(void)
+{
+ platform_driver_unregister(&mxs_auart_driver);
+ uart_unregister_driver(&auart_driver);
+}
+
+module_init(mxs_auart_init);
+module_exit(mxs_auart_exit);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Freescale MXS application uart driver");
diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
index 5c7abe4c94dd..1a43197138cf 100644
--- a/drivers/tty/serial/of_serial.c
+++ b/drivers/tty/serial/of_serial.c
@@ -80,14 +80,16 @@ static int __devinit of_platform_serial_setup(struct platform_device *ofdev,
/*
* Try to register a serial port
*/
-static int __devinit of_platform_serial_probe(struct platform_device *ofdev,
- const struct of_device_id *id)
+static int __devinit of_platform_serial_probe(struct platform_device *ofdev)
{
struct of_serial_info *info;
struct uart_port port;
int port_type;
int ret;
+ if (!ofdev->dev.of_match)
+ return -EINVAL;
+
if (of_find_property(ofdev->dev.of_node, "used-by-rtas", NULL))
return -EBUSY;
@@ -95,7 +97,7 @@ static int __devinit of_platform_serial_probe(struct platform_device *ofdev,
if (info == NULL)
return -ENOMEM;
- port_type = (unsigned long)id->data;
+ port_type = (unsigned long)ofdev->dev.of_match->data;
ret = of_platform_serial_setup(ofdev, port_type, &port);
if (ret)
goto out;
@@ -174,7 +176,7 @@ static struct of_device_id __devinitdata of_platform_serial_table[] = {
{ /* end of list */ },
};
-static struct of_platform_driver of_platform_serial_driver = {
+static struct platform_driver of_platform_serial_driver = {
.driver = {
.name = "of_serial",
.owner = THIS_MODULE,
@@ -186,13 +188,13 @@ static struct of_platform_driver of_platform_serial_driver = {
static int __init of_platform_serial_init(void)
{
- return of_register_platform_driver(&of_platform_serial_driver);
+ return platform_driver_register(&of_platform_serial_driver);
}
module_init(of_platform_serial_init);
static void __exit of_platform_serial_exit(void)
{
- return of_unregister_platform_driver(&of_platform_serial_driver);
+ return platform_driver_unregister(&of_platform_serial_driver);
};
module_exit(of_platform_serial_exit);
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 70a61458ec42..8be9faad0eb1 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -15,7 +15,6 @@
*Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/serial_reg.h>
-#include <linux/pci.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/serial_core.h>
diff --git a/drivers/tty/serial/serial_cs.c b/drivers/tty/serial/serial_cs.c
index 93760b2ea172..1ef4df9bf7e4 100644
--- a/drivers/tty/serial/serial_cs.c
+++ b/drivers/tty/serial/serial_cs.c
@@ -712,6 +712,7 @@ static struct pcmcia_device_id serial_ids[] = {
PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf),
PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01),
PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05),
+ PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0b05),
PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101),
PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070),
PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0101, 0x0562),
diff --git a/drivers/tty/serial/serial_lh7a40x.c b/drivers/tty/serial/serial_lh7a40x.c
deleted file mode 100644
index ea744707c4d6..000000000000
--- a/drivers/tty/serial/serial_lh7a40x.c
+++ /dev/null
@@ -1,682 +0,0 @@
-/* drivers/serial/serial_lh7a40x.c
- *
- * Copyright (C) 2004 Coastal Environmental Systems
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- */
-
-/* Driver for Sharp LH7A40X embedded serial ports
- *
- * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
- * Based on drivers/serial/amba.c, by Deep Blue Solutions Ltd.
- *
- * ---
- *
- * This driver supports the embedded UARTs of the Sharp LH7A40X series
- * CPUs. While similar to the 16550 and other UART chips, there is
- * nothing close to register compatibility. Moreover, some of the
- * modem control lines are not available, either in the chip or they
- * are lacking in the board-level implementation.
- *
- * - Use of SIRDIS
- * For simplicity, we disable the IR functions of any UART whenever
- * we enable it.
- *
- */
-
-
-#if defined(CONFIG_SERIAL_LH7A40X_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/console.h>
-#include <linux/sysrq.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial_core.h>
-#include <linux/serial.h>
-#include <linux/io.h>
-
-#include <asm/irq.h>
-#include <mach/hardware.h>
-
-#define DEV_MAJOR 204
-#define DEV_MINOR 16
-#define DEV_NR 3
-
-#define ISR_LOOP_LIMIT 256
-
-#define UR(p,o) _UR ((p)->membase, o)
-#define _UR(b,o) (*((volatile unsigned int*)(((unsigned char*) b) + (o))))
-#define BIT_CLR(p,o,m) UR(p,o) = UR(p,o) & (~(unsigned int)m)
-#define BIT_SET(p,o,m) UR(p,o) = UR(p,o) | ( (unsigned int)m)
-
-#define UART_REG_SIZE 32
-
-#define UART_R_DATA (0x00)
-#define UART_R_FCON (0x04)
-#define UART_R_BRCON (0x08)
-#define UART_R_CON (0x0c)
-#define UART_R_STATUS (0x10)
-#define UART_R_RAWISR (0x14)
-#define UART_R_INTEN (0x18)
-#define UART_R_ISR (0x1c)
-
-#define UARTEN (0x01) /* UART enable */
-#define SIRDIS (0x02) /* Serial IR disable (UART1 only) */
-
-#define RxEmpty (0x10)
-#define TxEmpty (0x80)
-#define TxFull (0x20)
-#define nRxRdy RxEmpty
-#define nTxRdy TxFull
-#define TxBusy (0x08)
-
-#define RxBreak (0x0800)
-#define RxOverrunError (0x0400)
-#define RxParityError (0x0200)
-#define RxFramingError (0x0100)
-#define RxError (RxBreak | RxOverrunError | RxParityError | RxFramingError)
-
-#define DCD (0x04)
-#define DSR (0x02)
-#define CTS (0x01)
-
-#define RxInt (0x01)
-#define TxInt (0x02)
-#define ModemInt (0x04)
-#define RxTimeoutInt (0x08)
-
-#define MSEOI (0x10)
-
-#define WLEN_8 (0x60)
-#define WLEN_7 (0x40)
-#define WLEN_6 (0x20)
-#define WLEN_5 (0x00)
-#define WLEN (0x60) /* Mask for all word-length bits */
-#define STP2 (0x08)
-#define PEN (0x02) /* Parity Enable */
-#define EPS (0x04) /* Even Parity Set */
-#define FEN (0x10) /* FIFO Enable */
-#define BRK (0x01) /* Send Break */
-
-
-struct uart_port_lh7a40x {
- struct uart_port port;
- unsigned int statusPrev; /* Most recently read modem status */
-};
-
-static void lh7a40xuart_stop_tx (struct uart_port* port)
-{
- BIT_CLR (port, UART_R_INTEN, TxInt);
-}
-
-static void lh7a40xuart_start_tx (struct uart_port* port)
-{
- BIT_SET (port, UART_R_INTEN, TxInt);
-
- /* *** FIXME: do I need to check for startup of the
- transmitter? The old driver did, but AMBA
- doesn't . */
-}
-
-static void lh7a40xuart_stop_rx (struct uart_port* port)
-{
- BIT_SET (port, UART_R_INTEN, RxTimeoutInt | RxInt);
-}
-
-static void lh7a40xuart_enable_ms (struct uart_port* port)
-{
- BIT_SET (port, UART_R_INTEN, ModemInt);
-}
-
-static void lh7a40xuart_rx_chars (struct uart_port* port)
-{
- struct tty_struct* tty = port->state->port.tty;
- int cbRxMax = 256; /* (Gross) limit on receive */
- unsigned int data; /* Received data and status */
- unsigned int flag;
-
- while (!(UR (port, UART_R_STATUS) & nRxRdy) && --cbRxMax) {
- data = UR (port, UART_R_DATA);
- flag = TTY_NORMAL;
- ++port->icount.rx;
-
- if (unlikely(data & RxError)) {
- if (data & RxBreak) {
- data &= ~(RxFramingError | RxParityError);
- ++port->icount.brk;
- if (uart_handle_break (port))
- continue;
- }
- else if (data & RxParityError)
- ++port->icount.parity;
- else if (data & RxFramingError)
- ++port->icount.frame;
- if (data & RxOverrunError)
- ++port->icount.overrun;
-
- /* Mask by termios, leave Rx'd byte */
- data &= port->read_status_mask | 0xff;
-
- if (data & RxBreak)
- flag = TTY_BREAK;
- else if (data & RxParityError)
- flag = TTY_PARITY;
- else if (data & RxFramingError)
- flag = TTY_FRAME;
- }
-
- if (uart_handle_sysrq_char (port, (unsigned char) data))
- continue;
-
- uart_insert_char(port, data, RxOverrunError, data, flag);
- }
- tty_flip_buffer_push (tty);
- return;
-}
-
-static void lh7a40xuart_tx_chars (struct uart_port* port)
-{
- struct circ_buf* xmit = &port->state->xmit;
- int cbTxMax = port->fifosize;
-
- if (port->x_char) {
- UR (port, UART_R_DATA) = port->x_char;
- ++port->icount.tx;
- port->x_char = 0;
- return;
- }
- if (uart_circ_empty (xmit) || uart_tx_stopped (port)) {
- lh7a40xuart_stop_tx (port);
- return;
- }
-
- /* Unlike the AMBA UART, the lh7a40x UART does not guarantee
- that at least half of the FIFO is empty. Instead, we check
- status for every character. Using the AMBA method causes
- the transmitter to drop characters. */
-
- do {
- UR (port, UART_R_DATA) = xmit->buf[xmit->tail];
- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- ++port->icount.tx;
- if (uart_circ_empty(xmit))
- break;
- } while (!(UR (port, UART_R_STATUS) & nTxRdy)
- && cbTxMax--);
-
- if (uart_circ_chars_pending (xmit) < WAKEUP_CHARS)
- uart_write_wakeup (port);
-
- if (uart_circ_empty (xmit))
- lh7a40xuart_stop_tx (port);
-}
-
-static void lh7a40xuart_modem_status (struct uart_port* port)
-{
- unsigned int status = UR (port, UART_R_STATUS);
- unsigned int delta
- = status ^ ((struct uart_port_lh7a40x*) port)->statusPrev;
-
- BIT_SET (port, UART_R_RAWISR, MSEOI); /* Clear modem status intr */
-
- if (!delta) /* Only happens if we missed 2 transitions */
- return;
-
- ((struct uart_port_lh7a40x*) port)->statusPrev = status;
-
- if (delta & DCD)
- uart_handle_dcd_change (port, status & DCD);
-
- if (delta & DSR)
- ++port->icount.dsr;
-
- if (delta & CTS)
- uart_handle_cts_change (port, status & CTS);
-
- wake_up_interruptible (&port->state->port.delta_msr_wait);
-}
-
-static irqreturn_t lh7a40xuart_int (int irq, void* dev_id)
-{
- struct uart_port* port = dev_id;
- unsigned int cLoopLimit = ISR_LOOP_LIMIT;
- unsigned int isr = UR (port, UART_R_ISR);
-
-
- do {
- if (isr & (RxInt | RxTimeoutInt))
- lh7a40xuart_rx_chars(port);
- if (isr & ModemInt)
- lh7a40xuart_modem_status (port);
- if (isr & TxInt)
- lh7a40xuart_tx_chars (port);
-
- if (--cLoopLimit == 0)
- break;
-
- isr = UR (port, UART_R_ISR);
- } while (isr & (RxInt | TxInt | RxTimeoutInt));
-
- return IRQ_HANDLED;
-}
-
-static unsigned int lh7a40xuart_tx_empty (struct uart_port* port)
-{
- return (UR (port, UART_R_STATUS) & TxEmpty) ? TIOCSER_TEMT : 0;
-}
-
-static unsigned int lh7a40xuart_get_mctrl (struct uart_port* port)
-{
- unsigned int result = 0;
- unsigned int status = UR (port, UART_R_STATUS);
-
- if (status & DCD)
- result |= TIOCM_CAR;
- if (status & DSR)
- result |= TIOCM_DSR;
- if (status & CTS)
- result |= TIOCM_CTS;
-
- return result;
-}
-
-static void lh7a40xuart_set_mctrl (struct uart_port* port, unsigned int mctrl)
-{
- /* None of the ports supports DTR. UART1 supports RTS through GPIO. */
- /* Note, kernel appears to be setting DTR and RTS on console. */
-
- /* *** FIXME: this deserves more work. There's some work in
- tracing all of the IO pins. */
-#if 0
- if( port->mapbase == UART1_PHYS) {
- gpioRegs_t *gpio = (gpioRegs_t *)IO_ADDRESS(GPIO_PHYS);
-
- if (mctrl & TIOCM_RTS)
- gpio->pbdr &= ~GPIOB_UART1_RTS;
- else
- gpio->pbdr |= GPIOB_UART1_RTS;
- }
-#endif
-}
-
-static void lh7a40xuart_break_ctl (struct uart_port* port, int break_state)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&port->lock, flags);
- if (break_state == -1)
- BIT_SET (port, UART_R_FCON, BRK); /* Assert break */
- else
- BIT_CLR (port, UART_R_FCON, BRK); /* Deassert break */
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static int lh7a40xuart_startup (struct uart_port* port)
-{
- int retval;
-
- retval = request_irq (port->irq, lh7a40xuart_int, 0,
- "serial_lh7a40x", port);
- if (retval)
- return retval;
-
- /* Initial modem control-line settings */
- ((struct uart_port_lh7a40x*) port)->statusPrev
- = UR (port, UART_R_STATUS);
-
- /* There is presently no configuration option to enable IR.
- Thus, we always disable it. */
-
- BIT_SET (port, UART_R_CON, UARTEN | SIRDIS);
- BIT_SET (port, UART_R_INTEN, RxTimeoutInt | RxInt);
-
- return 0;
-}
-
-static void lh7a40xuart_shutdown (struct uart_port* port)
-{
- free_irq (port->irq, port);
- BIT_CLR (port, UART_R_FCON, BRK | FEN);
- BIT_CLR (port, UART_R_CON, UARTEN);
-}
-
-static void lh7a40xuart_set_termios (struct uart_port* port,
- struct ktermios* termios,
- struct ktermios* old)
-{
- unsigned int con;
- unsigned int inten;
- unsigned int fcon;
- unsigned long flags;
- unsigned int baud;
- unsigned int quot;
-
- baud = uart_get_baud_rate (port, termios, old, 8, port->uartclk/16);
- quot = uart_get_divisor (port, baud); /* -1 performed elsewhere */
-
- switch (termios->c_cflag & CSIZE) {
- case CS5:
- fcon = WLEN_5;
- break;
- case CS6:
- fcon = WLEN_6;
- break;
- case CS7:
- fcon = WLEN_7;
- break;
- case CS8:
- default:
- fcon = WLEN_8;
- break;
- }
- if (termios->c_cflag & CSTOPB)
- fcon |= STP2;
- if (termios->c_cflag & PARENB) {
- fcon |= PEN;
- if (!(termios->c_cflag & PARODD))
- fcon |= EPS;
- }
- if (port->fifosize > 1)
- fcon |= FEN;
-
- spin_lock_irqsave (&port->lock, flags);
-
- uart_update_timeout (port, termios->c_cflag, baud);
-
- port->read_status_mask = RxOverrunError;
- if (termios->c_iflag & INPCK)
- port->read_status_mask |= RxFramingError | RxParityError;
- if (termios->c_iflag & (BRKINT | PARMRK))
- port->read_status_mask |= RxBreak;
-
- /* Figure mask for status we ignore */
- port->ignore_status_mask = 0;
- if (termios->c_iflag & IGNPAR)
- port->ignore_status_mask |= RxFramingError | RxParityError;
- if (termios->c_iflag & IGNBRK) {
- port->ignore_status_mask |= RxBreak;
- /* Ignore overrun when ignorning parity */
- /* *** FIXME: is this in the right place? */
- if (termios->c_iflag & IGNPAR)
- port->ignore_status_mask |= RxOverrunError;
- }
-
- /* Ignore all receive errors when receive disabled */
- if ((termios->c_cflag & CREAD) == 0)
- port->ignore_status_mask |= RxError;
-
- con = UR (port, UART_R_CON);
- inten = (UR (port, UART_R_INTEN) & ~ModemInt);
-
- if (UART_ENABLE_MS (port, termios->c_cflag))
- inten |= ModemInt;
-
- BIT_CLR (port, UART_R_CON, UARTEN); /* Disable UART */
- UR (port, UART_R_INTEN) = 0; /* Disable interrupts */
- UR (port, UART_R_BRCON) = quot - 1; /* Set baud rate divisor */
- UR (port, UART_R_FCON) = fcon; /* Set FIFO and frame ctrl */
- UR (port, UART_R_INTEN) = inten; /* Enable interrupts */
- UR (port, UART_R_CON) = con; /* Restore UART mode */
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static const char* lh7a40xuart_type (struct uart_port* port)
-{
- return port->type == PORT_LH7A40X ? "LH7A40X" : NULL;
-}
-
-static void lh7a40xuart_release_port (struct uart_port* port)
-{
- release_mem_region (port->mapbase, UART_REG_SIZE);
-}
-
-static int lh7a40xuart_request_port (struct uart_port* port)
-{
- return request_mem_region (port->mapbase, UART_REG_SIZE,
- "serial_lh7a40x") != NULL
- ? 0 : -EBUSY;
-}
-
-static void lh7a40xuart_config_port (struct uart_port* port, int flags)
-{
- if (flags & UART_CONFIG_TYPE) {
- port->type = PORT_LH7A40X;
- lh7a40xuart_request_port (port);
- }
-}
-
-static int lh7a40xuart_verify_port (struct uart_port* port,
- struct serial_struct* ser)
-{
- int ret = 0;
-
- if (ser->type != PORT_UNKNOWN && ser->type != PORT_LH7A40X)
- ret = -EINVAL;
- if (ser->irq < 0 || ser->irq >= nr_irqs)
- ret = -EINVAL;
- if (ser->baud_base < 9600) /* *** FIXME: is this true? */
- ret = -EINVAL;
- return ret;
-}
-
-static struct uart_ops lh7a40x_uart_ops = {
- .tx_empty = lh7a40xuart_tx_empty,
- .set_mctrl = lh7a40xuart_set_mctrl,
- .get_mctrl = lh7a40xuart_get_mctrl,
- .stop_tx = lh7a40xuart_stop_tx,
- .start_tx = lh7a40xuart_start_tx,
- .stop_rx = lh7a40xuart_stop_rx,
- .enable_ms = lh7a40xuart_enable_ms,
- .break_ctl = lh7a40xuart_break_ctl,
- .startup = lh7a40xuart_startup,
- .shutdown = lh7a40xuart_shutdown,
- .set_termios = lh7a40xuart_set_termios,
- .type = lh7a40xuart_type,
- .release_port = lh7a40xuart_release_port,
- .request_port = lh7a40xuart_request_port,
- .config_port = lh7a40xuart_config_port,
- .verify_port = lh7a40xuart_verify_port,
-};
-
-static struct uart_port_lh7a40x lh7a40x_ports[DEV_NR] = {
- {
- .port = {
- .membase = (void*) io_p2v (UART1_PHYS),
- .mapbase = UART1_PHYS,
- .iotype = UPIO_MEM,
- .irq = IRQ_UART1INTR,
- .uartclk = 14745600/2,
- .fifosize = 16,
- .ops = &lh7a40x_uart_ops,
- .flags = UPF_BOOT_AUTOCONF,
- .line = 0,
- },
- },
- {
- .port = {
- .membase = (void*) io_p2v (UART2_PHYS),
- .mapbase = UART2_PHYS,
- .iotype = UPIO_MEM,
- .irq = IRQ_UART2INTR,
- .uartclk = 14745600/2,
- .fifosize = 16,
- .ops = &lh7a40x_uart_ops,
- .flags = UPF_BOOT_AUTOCONF,
- .line = 1,
- },
- },
- {
- .port = {
- .membase = (void*) io_p2v (UART3_PHYS),
- .mapbase = UART3_PHYS,
- .iotype = UPIO_MEM,
- .irq = IRQ_UART3INTR,
- .uartclk = 14745600/2,
- .fifosize = 16,
- .ops = &lh7a40x_uart_ops,
- .flags = UPF_BOOT_AUTOCONF,
- .line = 2,
- },
- },
-};
-
-#ifndef CONFIG_SERIAL_LH7A40X_CONSOLE
-# define LH7A40X_CONSOLE NULL
-#else
-# define LH7A40X_CONSOLE &lh7a40x_console
-
-static void lh7a40xuart_console_putchar(struct uart_port *port, int ch)
-{
- while (UR(port, UART_R_STATUS) & nTxRdy)
- ;
- UR(port, UART_R_DATA) = ch;
-}
-
-static void lh7a40xuart_console_write (struct console* co,
- const char* s,
- unsigned int count)
-{
- struct uart_port* port = &lh7a40x_ports[co->index].port;
- unsigned int con = UR (port, UART_R_CON);
- unsigned int inten = UR (port, UART_R_INTEN);
-
-
- UR (port, UART_R_INTEN) = 0; /* Disable all interrupts */
- BIT_SET (port, UART_R_CON, UARTEN | SIRDIS); /* Enable UART */
-
- uart_console_write(port, s, count, lh7a40xuart_console_putchar);
-
- /* Wait until all characters are sent */
- while (UR (port, UART_R_STATUS) & TxBusy)
- ;
-
- /* Restore control and interrupt mask */
- UR (port, UART_R_CON) = con;
- UR (port, UART_R_INTEN) = inten;
-}
-
-static void __init lh7a40xuart_console_get_options (struct uart_port* port,
- int* baud,
- int* parity,
- int* bits)
-{
- if (UR (port, UART_R_CON) & UARTEN) {
- unsigned int fcon = UR (port, UART_R_FCON);
- unsigned int quot = UR (port, UART_R_BRCON) + 1;
-
- switch (fcon & (PEN | EPS)) {
- default: *parity = 'n'; break;
- case PEN: *parity = 'o'; break;
- case PEN | EPS: *parity = 'e'; break;
- }
-
- switch (fcon & WLEN) {
- default:
- case WLEN_8: *bits = 8; break;
- case WLEN_7: *bits = 7; break;
- case WLEN_6: *bits = 6; break;
- case WLEN_5: *bits = 5; break;
- }
-
- *baud = port->uartclk/(16*quot);
- }
-}
-
-static int __init lh7a40xuart_console_setup (struct console* co, char* options)
-{
- struct uart_port* port;
- int baud = 38400;
- int bits = 8;
- int parity = 'n';
- int flow = 'n';
-
- if (co->index >= DEV_NR) /* Bounds check on device number */
- co->index = 0;
- port = &lh7a40x_ports[co->index].port;
-
- if (options)
- uart_parse_options (options, &baud, &parity, &bits, &flow);
- else
- lh7a40xuart_console_get_options (port, &baud, &parity, &bits);
-
- return uart_set_options (port, co, baud, parity, bits, flow);
-}
-
-static struct uart_driver lh7a40x_reg;
-static struct console lh7a40x_console = {
- .name = "ttyAM",
- .write = lh7a40xuart_console_write,
- .device = uart_console_device,
- .setup = lh7a40xuart_console_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1,
- .data = &lh7a40x_reg,
-};
-
-static int __init lh7a40xuart_console_init(void)
-{
- register_console (&lh7a40x_console);
- return 0;
-}
-
-console_initcall (lh7a40xuart_console_init);
-
-#endif
-
-static struct uart_driver lh7a40x_reg = {
- .owner = THIS_MODULE,
- .driver_name = "ttyAM",
- .dev_name = "ttyAM",
- .major = DEV_MAJOR,
- .minor = DEV_MINOR,
- .nr = DEV_NR,
- .cons = LH7A40X_CONSOLE,
-};
-
-static int __init lh7a40xuart_init(void)
-{
- int ret;
-
- printk (KERN_INFO "serial: LH7A40X serial driver\n");
-
- ret = uart_register_driver (&lh7a40x_reg);
-
- if (ret == 0) {
- int i;
-
- for (i = 0; i < DEV_NR; i++) {
- /* UART3, when used, requires GPIO pin reallocation */
- if (lh7a40x_ports[i].port.mapbase == UART3_PHYS)
- GPIO_PINMUX |= 1<<3;
- uart_add_one_port (&lh7a40x_reg,
- &lh7a40x_ports[i].port);
- }
- }
- return ret;
-}
-
-static void __exit lh7a40xuart_exit(void)
-{
- int i;
-
- for (i = 0; i < DEV_NR; i++)
- uart_remove_one_port (&lh7a40x_reg, &lh7a40x_ports[i].port);
-
- uart_unregister_driver (&lh7a40x_reg);
-}
-
-module_init (lh7a40xuart_init);
-module_exit (lh7a40xuart_exit);
-
-MODULE_AUTHOR ("Marc Singer");
-MODULE_DESCRIPTION ("Sharp LH7A40X serial port driver");
-MODULE_LICENSE ("GPL");
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 92c91c83edde..0257fd5ede52 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -47,7 +47,6 @@
#include <linux/clk.h>
#include <linux/ctype.h>
#include <linux/err.h>
-#include <linux/list.h>
#include <linux/dmaengine.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
@@ -65,11 +64,8 @@
struct sci_port {
struct uart_port port;
- /* Port type */
- unsigned int type;
-
- /* Port IRQs: ERI, RXI, TXI, BRI (optional) */
- unsigned int irqs[SCIx_NR_IRQS];
+ /* Platform configuration */
+ struct plat_sci_port *cfg;
/* Port enable callback */
void (*enable)(struct uart_port *port);
@@ -81,26 +77,15 @@ struct sci_port {
struct timer_list break_timer;
int break_flag;
- /* SCSCR initialization */
- unsigned int scscr;
-
- /* SCBRR calculation algo */
- unsigned int scbrr_algo_id;
-
/* Interface clock */
struct clk *iclk;
/* Function clock */
struct clk *fclk;
- struct list_head node;
-
struct dma_chan *chan_tx;
struct dma_chan *chan_rx;
#ifdef CONFIG_SERIAL_SH_SCI_DMA
- struct device *dma_dev;
- unsigned int slave_tx;
- unsigned int slave_rx;
struct dma_async_tx_descriptor *desc_tx;
struct dma_async_tx_descriptor *desc_rx[2];
dma_cookie_t cookie_tx;
@@ -117,16 +102,14 @@ struct sci_port {
struct timer_list rx_timer;
unsigned int rx_timeout;
#endif
-};
-struct sh_sci_priv {
- spinlock_t lock;
- struct list_head ports;
- struct notifier_block clk_nb;
+ struct notifier_block freq_transition;
};
/* Function prototypes */
+static void sci_start_tx(struct uart_port *port);
static void sci_stop_tx(struct uart_port *port);
+static void sci_start_rx(struct uart_port *port);
#define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS
@@ -142,12 +125,6 @@ to_sci_port(struct uart_port *uart)
#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE)
#ifdef CONFIG_CONSOLE_POLL
-static inline void handle_error(struct uart_port *port)
-{
- /* Clear error flags */
- sci_out(port, SCxSR, SCxSR_ERROR_CLEAR(port));
-}
-
static int sci_poll_get_char(struct uart_port *port)
{
unsigned short status;
@@ -156,7 +133,7 @@ static int sci_poll_get_char(struct uart_port *port)
do {
status = sci_in(port, SCxSR);
if (status & SCxSR_ERRORS(port)) {
- handle_error(port);
+ sci_out(port, SCxSR, SCxSR_ERROR_CLEAR(port));
continue;
}
break;
@@ -475,7 +452,7 @@ static void sci_transmit_chars(struct uart_port *port)
/* On SH3, SCIF may read end-of-break as a space->mark char */
#define STEPFN(c) ({int __c = (c); (((__c-1)|(__c)) == -1); })
-static inline void sci_receive_chars(struct uart_port *port)
+static void sci_receive_chars(struct uart_port *port)
{
struct sci_port *sci_port = to_sci_port(port);
struct tty_struct *tty = port->state->port.tty;
@@ -566,18 +543,20 @@ static inline void sci_receive_chars(struct uart_port *port)
}
#define SCI_BREAK_JIFFIES (HZ/20)
-/* The sci generates interrupts during the break,
+
+/*
+ * The sci generates interrupts during the break,
* 1 per millisecond or so during the break period, for 9600 baud.
* So dont bother disabling interrupts.
* But dont want more than 1 break event.
* Use a kernel timer to periodically poll the rx line until
* the break is finished.
*/
-static void sci_schedule_break_timer(struct sci_port *port)
+static inline void sci_schedule_break_timer(struct sci_port *port)
{
- port->break_timer.expires = jiffies + SCI_BREAK_JIFFIES;
- add_timer(&port->break_timer);
+ mod_timer(&port->break_timer, jiffies + SCI_BREAK_JIFFIES);
}
+
/* Ensure that two consecutive samples find the break over. */
static void sci_break_timer(unsigned long data)
{
@@ -594,7 +573,7 @@ static void sci_break_timer(unsigned long data)
port->break_flag = 0;
}
-static inline int sci_handle_errors(struct uart_port *port)
+static int sci_handle_errors(struct uart_port *port)
{
int copied = 0;
unsigned short status = sci_in(port, SCxSR);
@@ -650,7 +629,7 @@ static inline int sci_handle_errors(struct uart_port *port)
return copied;
}
-static inline int sci_handle_fifo_overrun(struct uart_port *port)
+static int sci_handle_fifo_overrun(struct uart_port *port)
{
struct tty_struct *tty = port->state->port.tty;
int copied = 0;
@@ -671,7 +650,7 @@ static inline int sci_handle_fifo_overrun(struct uart_port *port)
return copied;
}
-static inline int sci_handle_breaks(struct uart_port *port)
+static int sci_handle_breaks(struct uart_port *port)
{
int copied = 0;
unsigned short status = sci_in(port, SCxSR);
@@ -794,7 +773,7 @@ static inline unsigned long port_rx_irq_mask(struct uart_port *port)
* it's unset, it's logically inferred that there's no point in
* testing for it.
*/
- return SCSCR_RIE | (to_sci_port(port)->scscr & SCSCR_REIE);
+ return SCSCR_RIE | (to_sci_port(port)->cfg->scscr & SCSCR_REIE);
}
static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
@@ -839,17 +818,18 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
static int sci_notifier(struct notifier_block *self,
unsigned long phase, void *p)
{
- struct sh_sci_priv *priv = container_of(self,
- struct sh_sci_priv, clk_nb);
struct sci_port *sci_port;
unsigned long flags;
+ sci_port = container_of(self, struct sci_port, freq_transition);
+
if ((phase == CPUFREQ_POSTCHANGE) ||
(phase == CPUFREQ_RESUMECHANGE)) {
- spin_lock_irqsave(&priv->lock, flags);
- list_for_each_entry(sci_port, &priv->ports, node)
- sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
- spin_unlock_irqrestore(&priv->lock, flags);
+ struct uart_port *port = &sci_port->port;
+
+ spin_lock_irqsave(&port->lock, flags);
+ port->uartclk = clk_get_rate(sci_port->iclk);
+ spin_unlock_irqrestore(&port->lock, flags);
}
return NOTIFY_OK;
@@ -882,21 +862,21 @@ static int sci_request_irq(struct sci_port *port)
const char *desc[] = { "SCI Receive Error", "SCI Receive Data Full",
"SCI Transmit Data Empty", "SCI Break" };
- if (port->irqs[0] == port->irqs[1]) {
- if (unlikely(!port->irqs[0]))
+ if (port->cfg->irqs[0] == port->cfg->irqs[1]) {
+ if (unlikely(!port->cfg->irqs[0]))
return -ENODEV;
- if (request_irq(port->irqs[0], sci_mpxed_interrupt,
+ if (request_irq(port->cfg->irqs[0], sci_mpxed_interrupt,
IRQF_DISABLED, "sci", port)) {
dev_err(port->port.dev, "Can't allocate IRQ\n");
return -ENODEV;
}
} else {
for (i = 0; i < ARRAY_SIZE(handlers); i++) {
- if (unlikely(!port->irqs[i]))
+ if (unlikely(!port->cfg->irqs[i]))
continue;
- if (request_irq(port->irqs[i], handlers[i],
+ if (request_irq(port->cfg->irqs[i], handlers[i],
IRQF_DISABLED, desc[i], port)) {
dev_err(port->port.dev, "Can't allocate IRQ\n");
return -ENODEV;
@@ -911,14 +891,14 @@ static void sci_free_irq(struct sci_port *port)
{
int i;
- if (port->irqs[0] == port->irqs[1])
- free_irq(port->irqs[0], port);
+ if (port->cfg->irqs[0] == port->cfg->irqs[1])
+ free_irq(port->cfg->irqs[0], port);
else {
- for (i = 0; i < ARRAY_SIZE(port->irqs); i++) {
- if (!port->irqs[i])
+ for (i = 0; i < ARRAY_SIZE(port->cfg->irqs); i++) {
+ if (!port->cfg->irqs[i])
continue;
- free_irq(port->irqs[i], port);
+ free_irq(port->cfg->irqs[i], port);
}
}
}
@@ -1037,9 +1017,6 @@ static void sci_dma_rx_complete(void *arg)
schedule_work(&s->work_rx);
}
-static void sci_start_rx(struct uart_port *port);
-static void sci_start_tx(struct uart_port *port);
-
static void sci_rx_dma_release(struct sci_port *s, bool enable_pio)
{
struct dma_chan *chan = s->chan_rx;
@@ -1325,7 +1302,7 @@ static void rx_timer_fn(unsigned long arg)
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
scr &= ~0x4000;
- enable_irq(s->irqs[1]);
+ enable_irq(s->cfg->irqs[1]);
}
sci_out(port, SCSCR, scr | SCSCR_RIE);
dev_dbg(port->dev, "DMA Rx timed out\n");
@@ -1341,9 +1318,9 @@ static void sci_request_dma(struct uart_port *port)
int nent;
dev_dbg(port->dev, "%s: port %d DMA %p\n", __func__,
- port->line, s->dma_dev);
+ port->line, s->cfg->dma_dev);
- if (!s->dma_dev)
+ if (!s->cfg->dma_dev)
return;
dma_cap_zero(mask);
@@ -1352,8 +1329,8 @@ static void sci_request_dma(struct uart_port *port)
param = &s->param_tx;
/* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */
- param->slave_id = s->slave_tx;
- param->dma_dev = s->dma_dev;
+ param->slave_id = s->cfg->dma_slave_tx;
+ param->dma_dev = s->cfg->dma_dev;
s->cookie_tx = -EINVAL;
chan = dma_request_channel(mask, filter, param);
@@ -1381,8 +1358,8 @@ static void sci_request_dma(struct uart_port *port)
param = &s->param_rx;
/* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */
- param->slave_id = s->slave_rx;
- param->dma_dev = s->dma_dev;
+ param->slave_id = s->cfg->dma_slave_rx;
+ param->dma_dev = s->cfg->dma_dev;
chan = dma_request_channel(mask, filter, param);
dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan);
@@ -1427,7 +1404,7 @@ static void sci_free_dma(struct uart_port *port)
{
struct sci_port *s = to_sci_port(port);
- if (!s->dma_dev)
+ if (!s->cfg->dma_dev)
return;
if (s->chan_tx)
@@ -1435,21 +1412,32 @@ static void sci_free_dma(struct uart_port *port)
if (s->chan_rx)
sci_rx_dma_release(s, false);
}
+#else
+static inline void sci_request_dma(struct uart_port *port)
+{
+}
+
+static inline void sci_free_dma(struct uart_port *port)
+{
+}
#endif
static int sci_startup(struct uart_port *port)
{
struct sci_port *s = to_sci_port(port);
+ int ret;
dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
if (s->enable)
s->enable(port);
- sci_request_irq(s);
-#ifdef CONFIG_SERIAL_SH_SCI_DMA
+ ret = sci_request_irq(s);
+ if (unlikely(ret < 0))
+ return ret;
+
sci_request_dma(port);
-#endif
+
sci_start_tx(port);
sci_start_rx(port);
@@ -1464,9 +1452,8 @@ static void sci_shutdown(struct uart_port *port)
sci_stop_rx(port);
sci_stop_tx(port);
-#ifdef CONFIG_SERIAL_SH_SCI_DMA
+
sci_free_dma(port);
-#endif
sci_free_irq(s);
if (s->disable)
@@ -1491,6 +1478,7 @@ static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps,
/* Warn, but use a safe default */
WARN_ON(1);
+
return ((freq + 16 * bps) / (32 * bps) - 1);
}
@@ -1514,7 +1502,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
baud = uart_get_baud_rate(port, termios, old, 0, max_baud);
if (likely(baud && port->uartclk))
- t = sci_scbrr_calc(s->scbrr_algo_id, baud, port->uartclk);
+ t = sci_scbrr_calc(s->cfg->scbrr_algo_id, baud, port->uartclk);
do {
status = sci_in(port, SCxSR);
@@ -1526,6 +1514,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
sci_out(port, SCFCR, scfcr | SCFCR_RFRST | SCFCR_TFRST);
smr_val = sci_in(port, SCSMR) & 3;
+
if ((termios->c_cflag & CSIZE) == CS7)
smr_val |= 0x40;
if (termios->c_cflag & PARENB)
@@ -1540,7 +1529,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
sci_out(port, SCSMR, smr_val);
dev_dbg(port->dev, "%s: SMR %x, t %x, SCSCR %x\n", __func__, smr_val, t,
- s->scscr);
+ s->cfg->scscr);
if (t > 0) {
if (t >= 256) {
@@ -1556,7 +1545,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
sci_init_pins(port, termios->c_cflag);
sci_out(port, SCFCR, scfcr | ((termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0));
- sci_out(port, SCSCR, s->scscr);
+ sci_out(port, SCSCR, s->cfg->scscr);
#ifdef CONFIG_SERIAL_SH_SCI_DMA
/*
@@ -1602,31 +1591,33 @@ static const char *sci_type(struct uart_port *port)
return NULL;
}
-static void sci_release_port(struct uart_port *port)
+static inline unsigned long sci_port_size(struct uart_port *port)
{
- /* Nothing here yet .. */
-}
-
-static int sci_request_port(struct uart_port *port)
-{
- /* Nothing here yet .. */
- return 0;
+ /*
+ * Pick an arbitrary size that encapsulates all of the base
+ * registers by default. This can be optimized later, or derived
+ * from platform resource data at such a time that ports begin to
+ * behave more erratically.
+ */
+ return 64;
}
-static void sci_config_port(struct uart_port *port, int flags)
+static int sci_remap_port(struct uart_port *port)
{
- struct sci_port *s = to_sci_port(port);
-
- port->type = s->type;
+ unsigned long size = sci_port_size(port);
+ /*
+ * Nothing to do if there's already an established membase.
+ */
if (port->membase)
- return;
+ return 0;
if (port->flags & UPF_IOREMAP) {
- port->membase = ioremap_nocache(port->mapbase, 0x40);
-
- if (IS_ERR(port->membase))
+ port->membase = ioremap_nocache(port->mapbase, size);
+ if (unlikely(!port->membase)) {
dev_err(port->dev, "can't remap port#%d\n", port->line);
+ return -ENXIO;
+ }
} else {
/*
* For the simple (and majority of) cases where we don't
@@ -1635,13 +1626,54 @@ static void sci_config_port(struct uart_port *port, int flags)
*/
port->membase = (void __iomem *)port->mapbase;
}
+
+ return 0;
+}
+
+static void sci_release_port(struct uart_port *port)
+{
+ if (port->flags & UPF_IOREMAP) {
+ iounmap(port->membase);
+ port->membase = NULL;
+ }
+
+ release_mem_region(port->mapbase, sci_port_size(port));
+}
+
+static int sci_request_port(struct uart_port *port)
+{
+ unsigned long size = sci_port_size(port);
+ struct resource *res;
+ int ret;
+
+ res = request_mem_region(port->mapbase, size, dev_name(port->dev));
+ if (unlikely(res == NULL))
+ return -EBUSY;
+
+ ret = sci_remap_port(port);
+ if (unlikely(ret != 0)) {
+ release_resource(res);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sci_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE) {
+ struct sci_port *sport = to_sci_port(port);
+
+ port->type = sport->cfg->type;
+ sci_request_port(port);
+ }
}
static int sci_verify_port(struct uart_port *port, struct serial_struct *ser)
{
struct sci_port *s = to_sci_port(port);
- if (ser->irq != s->irqs[SCIx_TXI_IRQ] || ser->irq > nr_irqs)
+ if (ser->irq != s->cfg->irqs[SCIx_TXI_IRQ] || ser->irq > nr_irqs)
return -EINVAL;
if (ser->baud_base < 2400)
/* No paper tape reader for Mitch.. */
@@ -1726,36 +1758,29 @@ static int __devinit sci_init_single(struct platform_device *dev,
sci_port->break_timer.function = sci_break_timer;
init_timer(&sci_port->break_timer);
- port->mapbase = p->mapbase;
- port->membase = p->membase;
+ sci_port->cfg = p;
- port->irq = p->irqs[SCIx_TXI_IRQ];
+ port->mapbase = p->mapbase;
+ port->type = p->type;
port->flags = p->flags;
- sci_port->type = port->type = p->type;
- sci_port->scscr = p->scscr;
- sci_port->scbrr_algo_id = p->scbrr_algo_id;
-#ifdef CONFIG_SERIAL_SH_SCI_DMA
- sci_port->dma_dev = p->dma_dev;
- sci_port->slave_tx = p->dma_slave_tx;
- sci_port->slave_rx = p->dma_slave_rx;
+ /*
+ * The UART port needs an IRQ value, so we peg this to the TX IRQ
+ * for the multi-IRQ ports, which is where we are primarily
+ * concerned with the shutdown path synchronization.
+ *
+ * For the muxed case there's nothing more to do.
+ */
+ port->irq = p->irqs[SCIx_TXI_IRQ];
- dev_dbg(port->dev, "%s: DMA device %p, tx %d, rx %d\n", __func__,
- p->dma_dev, p->dma_slave_tx, p->dma_slave_rx);
-#endif
+ if (p->dma_dev)
+ dev_dbg(port->dev, "DMA device %p, tx %d, rx %d\n",
+ p->dma_dev, p->dma_slave_tx, p->dma_slave_rx);
- memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs));
return 0;
}
#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
-static struct tty_driver *serial_console_device(struct console *co, int *index)
-{
- struct uart_driver *p = &sci_uart_driver;
- *index = co->index;
- return p->tty_driver;
-}
-
static void serial_console_putchar(struct uart_port *port, int ch)
{
sci_poll_put_char(port, ch);
@@ -1768,8 +1793,8 @@ static void serial_console_putchar(struct uart_port *port, int ch)
static void serial_console_write(struct console *co, const char *s,
unsigned count)
{
- struct uart_port *port = co->data;
- struct sci_port *sci_port = to_sci_port(port);
+ struct sci_port *sci_port = &sci_ports[co->index];
+ struct uart_port *port = &sci_port->port;
unsigned short bits;
if (sci_port->enable)
@@ -1797,32 +1822,17 @@ static int __devinit serial_console_setup(struct console *co, char *options)
int ret;
/*
- * Check whether an invalid uart number has been specified, and
- * if so, search for the first available port that does have
- * console support.
- */
- if (co->index >= SCI_NPORTS)
- co->index = 0;
-
- if (co->data) {
- port = co->data;
- sci_port = to_sci_port(port);
- } else {
- sci_port = &sci_ports[co->index];
- port = &sci_port->port;
- co->data = port;
- }
-
- /*
- * Also need to check port->type, we don't actually have any
- * UPIO_PORT ports, but uart_report_port() handily misreports
- * it anyways if we don't have a port available by the time this is
- * called.
+ * Refuse to handle any bogus ports.
*/
- if (!port->type)
+ if (co->index < 0 || co->index >= SCI_NPORTS)
return -ENODEV;
- sci_config_port(port, 0);
+ sci_port = &sci_ports[co->index];
+ port = &sci_port->port;
+
+ ret = sci_remap_port(port);
+ if (unlikely(ret != 0))
+ return ret;
if (sci_port->enable)
sci_port->enable(port);
@@ -1842,11 +1852,12 @@ static int __devinit serial_console_setup(struct console *co, char *options)
static struct console serial_console = {
.name = "ttySC",
- .device = serial_console_device,
+ .device = uart_console_device,
.write = serial_console_write,
.setup = serial_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
+ .data = &sci_uart_driver,
};
static int __init sci_console_init(void)
@@ -1856,14 +1867,39 @@ static int __init sci_console_init(void)
}
console_initcall(sci_console_init);
-static struct sci_port early_serial_port;
static struct console early_serial_console = {
.name = "early_ttySC",
.write = serial_console_write,
.flags = CON_PRINTBUFFER,
+ .index = -1,
};
+
static char early_serial_buf[32];
+static int __devinit sci_probe_earlyprintk(struct platform_device *pdev)
+{
+ struct plat_sci_port *cfg = pdev->dev.platform_data;
+
+ if (early_serial_console.data)
+ return -EEXIST;
+
+ early_serial_console.index = pdev->id;
+
+ sci_init_single(NULL, &sci_ports[pdev->id], pdev->id, cfg);
+
+ serial_console_setup(&early_serial_console, early_serial_buf);
+
+ if (!strstr(early_serial_buf, "keep"))
+ early_serial_console.flags |= CON_BOOT;
+
+ register_console(&early_serial_console);
+ return 0;
+}
+#else
+static inline int __devinit sci_probe_earlyprintk(struct platform_device *pdev)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */
#if defined(CONFIG_SERIAL_SH_SCI_CONSOLE)
@@ -1885,24 +1921,18 @@ static struct uart_driver sci_uart_driver = {
.cons = SCI_CONSOLE,
};
-
static int sci_remove(struct platform_device *dev)
{
- struct sh_sci_priv *priv = platform_get_drvdata(dev);
- struct sci_port *p;
- unsigned long flags;
+ struct sci_port *port = platform_get_drvdata(dev);
- cpufreq_unregister_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER);
+ cpufreq_unregister_notifier(&port->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
- spin_lock_irqsave(&priv->lock, flags);
- list_for_each_entry(p, &priv->ports, node) {
- uart_remove_one_port(&sci_uart_driver, &p->port);
- clk_put(p->iclk);
- clk_put(p->fclk);
- }
- spin_unlock_irqrestore(&priv->lock, flags);
+ uart_remove_one_port(&sci_uart_driver, &port->port);
+
+ clk_put(port->iclk);
+ clk_put(port->fclk);
- kfree(priv);
return 0;
}
@@ -1911,8 +1941,6 @@ static int __devinit sci_probe_single(struct platform_device *dev,
struct plat_sci_port *p,
struct sci_port *sciport)
{
- struct sh_sci_priv *priv = platform_get_drvdata(dev);
- unsigned long flags;
int ret;
/* Sanity check */
@@ -1929,68 +1957,35 @@ static int __devinit sci_probe_single(struct platform_device *dev,
if (ret)
return ret;
- ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
- if (ret)
- return ret;
-
- INIT_LIST_HEAD(&sciport->node);
-
- spin_lock_irqsave(&priv->lock, flags);
- list_add(&sciport->node, &priv->ports);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
+ return uart_add_one_port(&sci_uart_driver, &sciport->port);
}
-/*
- * Register a set of serial devices attached to a platform device. The
- * list is terminated with a zero flags entry, which means we expect
- * all entries to have at least UPF_BOOT_AUTOCONF set. Platforms that need
- * remapping (such as sh64) should also set UPF_IOREMAP.
- */
static int __devinit sci_probe(struct platform_device *dev)
{
struct plat_sci_port *p = dev->dev.platform_data;
- struct sh_sci_priv *priv;
- int i, ret = -EINVAL;
+ struct sci_port *sp = &sci_ports[dev->id];
+ int ret;
-#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
- if (is_early_platform_device(dev)) {
- if (dev->id == -1)
- return -ENOTSUPP;
- early_serial_console.index = dev->id;
- early_serial_console.data = &early_serial_port.port;
- sci_init_single(NULL, &early_serial_port, dev->id, p);
- serial_console_setup(&early_serial_console, early_serial_buf);
- if (!strstr(early_serial_buf, "keep"))
- early_serial_console.flags |= CON_BOOT;
- register_console(&early_serial_console);
- return 0;
- }
-#endif
+ /*
+ * If we've come here via earlyprintk initialization, head off to
+ * the special early probe. We don't have sufficient device state
+ * to make it beyond this yet.
+ */
+ if (is_early_platform_device(dev))
+ return sci_probe_earlyprintk(dev);
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ platform_set_drvdata(dev, sp);
- INIT_LIST_HEAD(&priv->ports);
- spin_lock_init(&priv->lock);
- platform_set_drvdata(dev, priv);
+ ret = sci_probe_single(dev, dev->id, p, sp);
+ if (ret)
+ goto err_unreg;
- priv->clk_nb.notifier_call = sci_notifier;
- cpufreq_register_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER);
+ sp->freq_transition.notifier_call = sci_notifier;
- if (dev->id != -1) {
- ret = sci_probe_single(dev, dev->id, p, &sci_ports[dev->id]);
- if (ret)
- goto err_unreg;
- } else {
- for (i = 0; p && p->flags != 0; p++, i++) {
- ret = sci_probe_single(dev, i, p, &sci_ports[i]);
- if (ret)
- goto err_unreg;
- }
- }
+ ret = cpufreq_register_notifier(&sp->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ if (unlikely(ret < 0))
+ goto err_unreg;
#ifdef CONFIG_SH_STANDARD_BIOS
sh_bios_gdb_detach();
@@ -2005,28 +2000,20 @@ err_unreg:
static int sci_suspend(struct device *dev)
{
- struct sh_sci_priv *priv = dev_get_drvdata(dev);
- struct sci_port *p;
- unsigned long flags;
+ struct sci_port *sport = dev_get_drvdata(dev);
- spin_lock_irqsave(&priv->lock, flags);
- list_for_each_entry(p, &priv->ports, node)
- uart_suspend_port(&sci_uart_driver, &p->port);
- spin_unlock_irqrestore(&priv->lock, flags);
+ if (sport)
+ uart_suspend_port(&sci_uart_driver, &sport->port);
return 0;
}
static int sci_resume(struct device *dev)
{
- struct sh_sci_priv *priv = dev_get_drvdata(dev);
- struct sci_port *p;
- unsigned long flags;
+ struct sci_port *sport = dev_get_drvdata(dev);
- spin_lock_irqsave(&priv->lock, flags);
- list_for_each_entry(p, &priv->ports, node)
- uart_resume_port(&sci_uart_driver, &p->port);
- spin_unlock_irqrestore(&priv->lock, flags);
+ if (sport)
+ uart_resume_port(&sci_uart_driver, &sport->port);
return 0;
}
diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h
index b223d6cbf33a..5fefed53fa42 100644
--- a/drivers/tty/serial/sh-sci.h
+++ b/drivers/tty/serial/sh-sci.h
@@ -54,9 +54,6 @@
# define PBCR 0xa4050102
#elif defined(CONFIG_CPU_SUBTYPE_SH7343)
# define SCSPTR0 0xffe00010 /* 16 bit SCIF */
-# define SCSPTR1 0xffe10010 /* 16 bit SCIF */
-# define SCSPTR2 0xffe20010 /* 16 bit SCIF */
-# define SCSPTR3 0xffe30010 /* 16 bit SCIF */
#elif defined(CONFIG_CPU_SUBTYPE_SH7722)
# define PADR 0xA4050120
# define PSDR 0xA405013e
@@ -69,77 +66,42 @@
# define SCIF_ORER 0x0001 /* overrun error bit */
#elif defined(CONFIG_CPU_SUBTYPE_SH7723)
# define SCSPTR0 0xa4050160
-# define SCSPTR1 0xa405013e
-# define SCSPTR2 0xa4050160
-# define SCSPTR3 0xa405013e
-# define SCSPTR4 0xa4050128
-# define SCSPTR5 0xa4050128
# define SCIF_ORER 0x0001 /* overrun error bit */
#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
# define SCIF_ORER 0x0001 /* overrun error bit */
#elif defined(CONFIG_CPU_SUBTYPE_SH4_202)
# define SCSPTR2 0xffe80020 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103)
-# define SCIF_PTR2_OFFS 0x0000020
-# define SCSPTR2 ((port->mapbase)+SCIF_PTR2_OFFS) /* 16 bit SCIF */
#elif defined(CONFIG_H83007) || defined(CONFIG_H83068)
# define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port)
#elif defined(CONFIG_H8S2678)
# define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port)
#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
# define SCSPTR0 0xfe4b0020
-# define SCSPTR1 0xfe4b0020
-# define SCSPTR2 0xfe4b0020
# define SCIF_ORER 0x0001
-# define SCIF_ONLY
#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
# define SCSPTR0 0xffe00024 /* 16 bit SCIF */
-# define SCSPTR1 0xffe08024 /* 16 bit SCIF */
-# define SCSPTR2 0xffe10020 /* 16 bit SCIF/IRDA */
# define SCIF_ORER 0x0001 /* overrun error bit */
#elif defined(CONFIG_CPU_SUBTYPE_SH7770)
# define SCSPTR0 0xff923020 /* 16 bit SCIF */
-# define SCSPTR1 0xff924020 /* 16 bit SCIF */
-# define SCSPTR2 0xff925020 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* overrun error bit */
#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
# define SCSPTR0 0xffe00024 /* 16 bit SCIF */
-# define SCSPTR1 0xffe10024 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* Overrun error bit */
#elif defined(CONFIG_CPU_SUBTYPE_SH7785) || \
defined(CONFIG_CPU_SUBTYPE_SH7786)
# define SCSPTR0 0xffea0024 /* 16 bit SCIF */
-# define SCSPTR1 0xffeb0024 /* 16 bit SCIF */
-# define SCSPTR2 0xffec0024 /* 16 bit SCIF */
-# define SCSPTR3 0xffed0024 /* 16 bit SCIF */
-# define SCSPTR4 0xffee0024 /* 16 bit SCIF */
-# define SCSPTR5 0xffef0024 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* Overrun error bit */
#elif defined(CONFIG_CPU_SUBTYPE_SH7201) || \
defined(CONFIG_CPU_SUBTYPE_SH7203) || \
defined(CONFIG_CPU_SUBTYPE_SH7206) || \
defined(CONFIG_CPU_SUBTYPE_SH7263)
# define SCSPTR0 0xfffe8020 /* 16 bit SCIF */
-# define SCSPTR1 0xfffe8820 /* 16 bit SCIF */
-# define SCSPTR2 0xfffe9020 /* 16 bit SCIF */
-# define SCSPTR3 0xfffe9820 /* 16 bit SCIF */
-# if defined(CONFIG_CPU_SUBTYPE_SH7201)
-# define SCSPTR4 0xfffeA020 /* 16 bit SCIF */
-# define SCSPTR5 0xfffeA820 /* 16 bit SCIF */
-# define SCSPTR6 0xfffeB020 /* 16 bit SCIF */
-# define SCSPTR7 0xfffeB820 /* 16 bit SCIF */
-# endif
#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
# define SCSPTR0 0xf8400020 /* 16 bit SCIF */
-# define SCSPTR1 0xf8410020 /* 16 bit SCIF */
-# define SCSPTR2 0xf8420020 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* overrun error bit */
#elif defined(CONFIG_CPU_SUBTYPE_SHX3)
# define SCSPTR0 0xffc30020 /* 16 bit SCIF */
-# define SCSPTR1 0xffc40020 /* 16 bit SCIF */
-# define SCSPTR2 0xffc50020 /* 16 bit SCIF */
-# define SCSPTR3 0xffc60020 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* Overrun error bit */
#else
# error CPU subtype not defined
@@ -411,7 +373,6 @@ SCIF_FNS(SCSPTR, 0, 0, 0x24, 16)
SCIF_FNS(SCLSR, 0, 0, 0x28, 16)
#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
SCIF_FNS(SCFDR, 0, 0, 0x1C, 16)
-SCIF_FNS(SCSPTR2, 0, 0, 0x20, 16)
SCIF_FNS(SCTFDR, 0x0e, 16, 0x1C, 16)
SCIF_FNS(SCRFDR, 0x0e, 16, 0x20, 16)
SCIF_FNS(SCSPTR, 0, 0, 0x24, 16)
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index c9014868297d..c0b7246d7339 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -519,7 +519,7 @@ static struct console sunhv_console = {
.data = &sunhv_reg,
};
-static int __devinit hv_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit hv_probe(struct platform_device *op)
{
struct uart_port *port;
unsigned long minor;
@@ -629,7 +629,7 @@ static const struct of_device_id hv_match[] = {
};
MODULE_DEVICE_TABLE(of, hv_match);
-static struct of_platform_driver hv_driver = {
+static struct platform_driver hv_driver = {
.driver = {
.name = "hv",
.owner = THIS_MODULE,
@@ -644,12 +644,12 @@ static int __init sunhv_init(void)
if (tlb_type != hypervisor)
return -ENODEV;
- return of_register_platform_driver(&hv_driver);
+ return platform_driver_register(&hv_driver);
}
static void __exit sunhv_exit(void)
{
- of_unregister_platform_driver(&hv_driver);
+ platform_driver_unregister(&hv_driver);
}
module_init(sunhv_init);
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index 5b246b18f42f..b5fa2a57b9da 100644
--- a/drivers/tty/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
@@ -1006,7 +1006,7 @@ static int __devinit sunsab_init_one(struct uart_sunsab_port *up,
return 0;
}
-static int __devinit sab_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit sab_probe(struct platform_device *op)
{
static int inst;
struct uart_sunsab_port *up;
@@ -1092,7 +1092,7 @@ static const struct of_device_id sab_match[] = {
};
MODULE_DEVICE_TABLE(of, sab_match);
-static struct of_platform_driver sab_driver = {
+static struct platform_driver sab_driver = {
.driver = {
.name = "sab",
.owner = THIS_MODULE,
@@ -1130,12 +1130,12 @@ static int __init sunsab_init(void)
}
}
- return of_register_platform_driver(&sab_driver);
+ return platform_driver_register(&sab_driver);
}
static void __exit sunsab_exit(void)
{
- of_unregister_platform_driver(&sab_driver);
+ platform_driver_unregister(&sab_driver);
if (sunsab_reg.nr) {
sunserial_unregister_minors(&sunsab_reg, sunsab_reg.nr);
}
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 551ebfe3ccbb..92aa54550e84 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -1406,7 +1406,7 @@ static enum su_type __devinit su_get_type(struct device_node *dp)
return SU_PORT_PORT;
}
-static int __devinit su_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit su_probe(struct platform_device *op)
{
static int inst;
struct device_node *dp = op->dev.of_node;
@@ -1543,7 +1543,7 @@ static const struct of_device_id su_match[] = {
};
MODULE_DEVICE_TABLE(of, su_match);
-static struct of_platform_driver su_driver = {
+static struct platform_driver su_driver = {
.driver = {
.name = "su",
.owner = THIS_MODULE,
@@ -1586,7 +1586,7 @@ static int __init sunsu_init(void)
return err;
}
- err = of_register_platform_driver(&su_driver);
+ err = platform_driver_register(&su_driver);
if (err && num_uart)
sunserial_unregister_minors(&sunsu_reg, num_uart);
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index c1967ac1c07f..99ff9abf57ce 100644
--- a/drivers/tty/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
@@ -1399,7 +1399,7 @@ static void __devinit sunzilog_init_hw(struct uart_sunzilog_port *up)
static int zilog_irq = -1;
-static int __devinit zs_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit zs_probe(struct platform_device *op)
{
static int kbm_inst, uart_inst;
int inst;
@@ -1540,7 +1540,7 @@ static const struct of_device_id zs_match[] = {
};
MODULE_DEVICE_TABLE(of, zs_match);
-static struct of_platform_driver zs_driver = {
+static struct platform_driver zs_driver = {
.driver = {
.name = "zs",
.owner = THIS_MODULE,
@@ -1576,7 +1576,7 @@ static int __init sunzilog_init(void)
goto out_free_tables;
}
- err = of_register_platform_driver(&zs_driver);
+ err = platform_driver_register(&zs_driver);
if (err)
goto out_unregister_uart;
@@ -1604,7 +1604,7 @@ out:
return err;
out_unregister_driver:
- of_unregister_platform_driver(&zs_driver);
+ platform_driver_unregister(&zs_driver);
out_unregister_uart:
if (num_sunzilog) {
@@ -1619,7 +1619,7 @@ out_free_tables:
static void __exit sunzilog_exit(void)
{
- of_unregister_platform_driver(&zs_driver);
+ platform_driver_unregister(&zs_driver);
if (zilog_irq != -1) {
struct uart_sunzilog_port *up = sunzilog_irq_chain;
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index d2fce865b731..8af1ed83a4c0 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -19,22 +19,11 @@
#include <linux/interrupt.h>
#include <linux/init.h>
#include <asm/io.h>
-#if defined(CONFIG_OF) && (defined(CONFIG_PPC32) || defined(CONFIG_MICROBLAZE))
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
-/* Match table for of_platform binding */
-static struct of_device_id ulite_of_match[] __devinitdata = {
- { .compatible = "xlnx,opb-uartlite-1.00.b", },
- { .compatible = "xlnx,xps-uartlite-1.00.a", },
- {}
-};
-MODULE_DEVICE_TABLE(of, ulite_of_match);
-
-#endif
-
#define ULITE_NAME "ttyUL"
#define ULITE_MAJOR 204
#define ULITE_MINOR 187
@@ -571,9 +560,29 @@ static int __devexit ulite_release(struct device *dev)
* Platform bus binding
*/
+#if defined(CONFIG_OF)
+/* Match table for of_platform binding */
+static struct of_device_id ulite_of_match[] __devinitdata = {
+ { .compatible = "xlnx,opb-uartlite-1.00.b", },
+ { .compatible = "xlnx,xps-uartlite-1.00.a", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ulite_of_match);
+#else /* CONFIG_OF */
+#define ulite_of_match NULL
+#endif /* CONFIG_OF */
+
static int __devinit ulite_probe(struct platform_device *pdev)
{
struct resource *res, *res2;
+ int id = pdev->id;
+#ifdef CONFIG_OF
+ const __be32 *prop;
+
+ prop = of_get_property(pdev->dev.of_node, "port-number", NULL);
+ if (prop)
+ id = be32_to_cpup(prop);
+#endif
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
@@ -583,7 +592,7 @@ static int __devinit ulite_probe(struct platform_device *pdev)
if (!res2)
return -ENODEV;
- return ulite_assign(&pdev->dev, pdev->id, res->start, res2->start);
+ return ulite_assign(&pdev->dev, id, res->start, res2->start);
}
static int __devexit ulite_remove(struct platform_device *pdev)
@@ -595,72 +604,15 @@ static int __devexit ulite_remove(struct platform_device *pdev)
MODULE_ALIAS("platform:uartlite");
static struct platform_driver ulite_platform_driver = {
- .probe = ulite_probe,
- .remove = __devexit_p(ulite_remove),
- .driver = {
- .owner = THIS_MODULE,
- .name = "uartlite",
- },
-};
-
-/* ---------------------------------------------------------------------
- * OF bus bindings
- */
-#if defined(CONFIG_OF) && (defined(CONFIG_PPC32) || defined(CONFIG_MICROBLAZE))
-static int __devinit
-ulite_of_probe(struct platform_device *op, const struct of_device_id *match)
-{
- struct resource res;
- const unsigned int *id;
- int irq, rc;
-
- dev_dbg(&op->dev, "%s(%p, %p)\n", __func__, op, match);
-
- rc = of_address_to_resource(op->dev.of_node, 0, &res);
- if (rc) {
- dev_err(&op->dev, "invalid address\n");
- return rc;
- }
-
- irq = irq_of_parse_and_map(op->dev.of_node, 0);
-
- id = of_get_property(op->dev.of_node, "port-number", NULL);
-
- return ulite_assign(&op->dev, id ? *id : -1, res.start, irq);
-}
-
-static int __devexit ulite_of_remove(struct platform_device *op)
-{
- return ulite_release(&op->dev);
-}
-
-static struct of_platform_driver ulite_of_driver = {
- .probe = ulite_of_probe,
- .remove = __devexit_p(ulite_of_remove),
+ .probe = ulite_probe,
+ .remove = __devexit_p(ulite_remove),
.driver = {
- .name = "uartlite",
.owner = THIS_MODULE,
+ .name = "uartlite",
.of_match_table = ulite_of_match,
},
};
-/* Registration helpers to keep the number of #ifdefs to a minimum */
-static inline int __init ulite_of_register(void)
-{
- pr_debug("uartlite: calling of_register_platform_driver()\n");
- return of_register_platform_driver(&ulite_of_driver);
-}
-
-static inline void __exit ulite_of_unregister(void)
-{
- of_unregister_platform_driver(&ulite_of_driver);
-}
-#else /* CONFIG_OF && (CONFIG_PPC32 || CONFIG_MICROBLAZE) */
-/* Appropriate config not enabled; do nothing helpers */
-static inline int __init ulite_of_register(void) { return 0; }
-static inline void __exit ulite_of_unregister(void) { }
-#endif /* CONFIG_OF && (CONFIG_PPC32 || CONFIG_MICROBLAZE) */
-
/* ---------------------------------------------------------------------
* Module setup/teardown
*/
@@ -674,10 +626,6 @@ int __init ulite_init(void)
if (ret)
goto err_uart;
- ret = ulite_of_register();
- if (ret)
- goto err_of;
-
pr_debug("uartlite: calling platform_driver_register()\n");
ret = platform_driver_register(&ulite_platform_driver);
if (ret)
@@ -686,8 +634,6 @@ int __init ulite_init(void)
return 0;
err_plat:
- ulite_of_unregister();
-err_of:
uart_unregister_driver(&ulite_uart_driver);
err_uart:
printk(KERN_ERR "registering uartlite driver failed: err=%i", ret);
@@ -697,7 +643,6 @@ err_uart:
void __exit ulite_exit(void)
{
platform_driver_unregister(&ulite_platform_driver);
- ulite_of_unregister();
uart_unregister_driver(&ulite_uart_driver);
}
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index 3f4848e2174a..ff51dae1df0c 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -1194,8 +1194,7 @@ static void uart_firmware_cont(const struct firmware *fw, void *context)
release_firmware(fw);
}
-static int ucc_uart_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int ucc_uart_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
const unsigned int *iprop; /* Integer OF properties */
@@ -1485,7 +1484,7 @@ static struct of_device_id ucc_uart_match[] = {
};
MODULE_DEVICE_TABLE(of, ucc_uart_match);
-static struct of_platform_driver ucc_uart_of_driver = {
+static struct platform_driver ucc_uart_of_driver = {
.driver = {
.name = "ucc_uart",
.owner = THIS_MODULE,
@@ -1510,7 +1509,7 @@ static int __init ucc_uart_init(void)
return ret;
}
- ret = of_register_platform_driver(&ucc_uart_of_driver);
+ ret = platform_driver_register(&ucc_uart_of_driver);
if (ret)
printk(KERN_ERR
"ucc-uart: could not register platform driver\n");
@@ -1523,7 +1522,7 @@ static void __exit ucc_uart_exit(void)
printk(KERN_INFO
"Freescale QUICC Engine UART device driver unloading\n");
- of_unregister_platform_driver(&ucc_uart_of_driver);
+ platform_driver_unregister(&ucc_uart_of_driver);
uart_unregister_driver(&ucc_uart_driver);
}
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index fceea5e4e02f..41b6e51188e4 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -31,7 +31,6 @@ config USB_ARCH_HAS_OHCI
# ARM:
default y if SA1111
default y if ARCH_OMAP
- default y if ARCH_LH7A404
default y if ARCH_S3C2410
default y if PXA27x
default y if PXA3xx
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index f71e8e307e0f..64a035ba2eab 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -335,7 +335,7 @@ void usb_hcd_pci_shutdown(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(usb_hcd_pci_shutdown);
-#ifdef CONFIG_PM_OPS
+#ifdef CONFIG_PM
#ifdef CONFIG_PPC_PMAC
static void powermac_set_asic(struct pci_dev *pci_dev, int enable)
@@ -580,4 +580,4 @@ const struct dev_pm_ops usb_hcd_pci_pm_ops = {
};
EXPORT_SYMBOL_GPL(usb_hcd_pci_pm_ops);
-#endif /* CONFIG_PM_OPS */
+#endif /* CONFIG_PM */
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index d041c6826e43..c03c65b63b9d 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -616,7 +616,7 @@ static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
}
/*
- * Disable a port and mark a logical connnect-change event, so that some
+ * Disable a port and mark a logical connect-change event, so that some
* time later khubd will disconnect() any existing usb_device on the port
* and will re-enumerate if there actually is a device attached.
*/
@@ -1465,6 +1465,7 @@ void usb_set_device_state(struct usb_device *udev,
enum usb_device_state new_state)
{
unsigned long flags;
+ int wakeup = -1;
spin_lock_irqsave(&device_state_lock, flags);
if (udev->state == USB_STATE_NOTATTACHED)
@@ -1479,11 +1480,10 @@ void usb_set_device_state(struct usb_device *udev,
|| new_state == USB_STATE_SUSPENDED)
; /* No change to wakeup settings */
else if (new_state == USB_STATE_CONFIGURED)
- device_set_wakeup_capable(&udev->dev,
- (udev->actconfig->desc.bmAttributes
- & USB_CONFIG_ATT_WAKEUP));
+ wakeup = udev->actconfig->desc.bmAttributes
+ & USB_CONFIG_ATT_WAKEUP;
else
- device_set_wakeup_capable(&udev->dev, 0);
+ wakeup = 0;
}
if (udev->state == USB_STATE_SUSPENDED &&
new_state != USB_STATE_SUSPENDED)
@@ -1495,6 +1495,8 @@ void usb_set_device_state(struct usb_device *udev,
} else
recursively_mark_NOTATTACHED(udev);
spin_unlock_irqrestore(&device_state_lock, flags);
+ if (wakeup >= 0)
+ device_set_wakeup_capable(&udev->dev, wakeup);
}
EXPORT_SYMBOL_GPL(usb_set_device_state);
@@ -2681,17 +2683,13 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
mutex_lock(&usb_address0_mutex);
- if (!udev->config && oldspeed == USB_SPEED_SUPER) {
- /* Don't reset USB 3.0 devices during an initial setup */
- usb_set_device_state(udev, USB_STATE_DEFAULT);
- } else {
- /* Reset the device; full speed may morph to high speed */
- /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
- retval = hub_port_reset(hub, port1, udev, delay);
- if (retval < 0) /* error or disconnect */
- goto fail;
- /* success, speed is known */
- }
+ /* Reset the device; full speed may morph to high speed */
+ /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
+ retval = hub_port_reset(hub, port1, udev, delay);
+ if (retval < 0) /* error or disconnect */
+ goto fail;
+ /* success, speed is known */
+
retval = -ENODEV;
if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) {
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 44c595432d6f..81ce6a8e1d94 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -48,6 +48,10 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x04b4, 0x0526), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
+ /* Samsung Android phone modem - ID conflict with SPH-I500 */
+ { USB_DEVICE(0x04e8, 0x6601), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
/* Roland SC-8820 */
{ USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
@@ -68,6 +72,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* M-Systems Flash Disk Pioneers */
{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Keytouch QWERTY Panel keyboard */
+ { USB_DEVICE(0x0926, 0x3333), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
/* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
{ USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index d50099675f28..e8cbbdac7a15 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -176,18 +176,6 @@ config USB_FSL_USB2
default USB_GADGET
select USB_GADGET_SELECTED
-config USB_GADGET_LH7A40X
- boolean "LH7A40X"
- depends on ARCH_LH7A40X
- help
- This driver provides USB Device Controller driver for LH7A40x
-
-config USB_LH7A40X
- tristate
- depends on USB_GADGET_LH7A40X
- default USB_GADGET
- select USB_GADGET_SELECTED
-
config USB_GADGET_OMAP
boolean "OMAP USB Device Controller"
depends on ARCH_OMAP
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 55f5e8ae5924..a2f7f9a4f4a8 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -11,7 +11,6 @@ obj-$(CONFIG_USB_PXA27X) += pxa27x_udc.o
obj-$(CONFIG_USB_IMX) += imx_udc.o
obj-$(CONFIG_USB_GOKU) += goku_udc.o
obj-$(CONFIG_USB_OMAP) += omap_udc.o
-obj-$(CONFIG_USB_LH7A40X) += lh7a40x_udc.o
obj-$(CONFIG_USB_S3C2410) += s3c2410_udc.o
obj-$(CONFIG_USB_AT91) += at91_udc.o
obj-$(CONFIG_USB_ATMEL_USBA) += atmel_usba_udc.o
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index 3c6e1a058745..5e1495097ec3 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -346,14 +346,19 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req)
if (unlikely(!skb))
break;
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0,
- req->actual);
- page = NULL;
- if (req->actual < req->length) { /* Last fragment */
+ if (skb->len == 0) { /* First fragment */
skb->protocol = htons(ETH_P_PHONET);
skb_reset_mac_header(skb);
- pskb_pull(skb, 1);
+ /* Can't use pskb_pull() on page in IRQ */
+ memcpy(skb_put(skb, 1), page_address(page), 1);
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ skb->len == 0, req->actual);
+ page = NULL;
+
+ if (req->actual < req->length) { /* Last fragment */
skb->dev = dev;
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index 792d5ef40137..aee7e3c53c38 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -2523,8 +2523,7 @@ static void qe_udc_release(struct device *dev)
}
/* Driver probe functions */
-static int __devinit qe_udc_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit qe_udc_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct qe_ep *ep;
@@ -2532,6 +2531,9 @@ static int __devinit qe_udc_probe(struct platform_device *ofdev,
unsigned int i;
const void *prop;
+ if (!ofdev->dev.of_match)
+ return -EINVAL;
+
prop = of_get_property(np, "mode", NULL);
if (!prop || strcmp(prop, "peripheral"))
return -ENODEV;
@@ -2543,7 +2545,7 @@ static int __devinit qe_udc_probe(struct platform_device *ofdev,
return -ENOMEM;
}
- udc_controller->soc_type = (unsigned long)match->data;
+ udc_controller->soc_type = (unsigned long)ofdev->dev.of_match->data;
udc_controller->usb_regs = of_iomap(np, 0);
if (!udc_controller->usb_regs) {
ret = -ENOMEM;
@@ -2768,7 +2770,7 @@ static const struct of_device_id qe_udc_match[] __devinitconst = {
MODULE_DEVICE_TABLE(of, qe_udc_match);
-static struct of_platform_driver udc_driver = {
+static struct platform_driver udc_driver = {
.driver = {
.name = (char *)driver_name,
.owner = THIS_MODULE,
@@ -2786,12 +2788,12 @@ static int __init qe_udc_init(void)
{
printk(KERN_INFO "%s: %s, %s\n", driver_name, driver_desc,
DRIVER_VERSION);
- return of_register_platform_driver(&udc_driver);
+ return platform_driver_register(&udc_driver);
}
static void __exit qe_udc_exit(void)
{
- of_unregister_platform_driver(&udc_driver);
+ platform_driver_unregister(&udc_driver);
}
module_init(qe_udc_init);
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index 5c2720d64ffa..e896f6359dfe 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -45,12 +45,6 @@
#define gadget_is_goku(g) 0
#endif
-#ifdef CONFIG_USB_GADGET_LH7A40X
-#define gadget_is_lh7a40x(g) !strcmp("lh7a40x_udc", (g)->name)
-#else
-#define gadget_is_lh7a40x(g) 0
-#endif
-
#ifdef CONFIG_USB_GADGET_OMAP
#define gadget_is_omap(g) !strcmp("omap_udc", (g)->name)
#else
@@ -181,8 +175,6 @@ static inline int usb_gadget_controller_number(struct usb_gadget *gadget)
return 0x06;
else if (gadget_is_omap(gadget))
return 0x08;
- else if (gadget_is_lh7a40x(gadget))
- return 0x09;
else if (gadget_is_pxa27x(gadget))
return 0x11;
else if (gadget_is_s3c2410(gadget))
diff --git a/drivers/usb/gadget/lh7a40x_udc.c b/drivers/usb/gadget/lh7a40x_udc.c
deleted file mode 100644
index 6b58bd8ce623..000000000000
--- a/drivers/usb/gadget/lh7a40x_udc.c
+++ /dev/null
@@ -1,2152 +0,0 @@
-/*
- * linux/drivers/usb/gadget/lh7a40x_udc.c
- * Sharp LH7A40x on-chip full speed USB device controllers
- *
- * Copyright (C) 2004 Mikko Lahteenmaki, Nordic ID
- * Copyright (C) 2004 Bo Henriksen, Nordic ID
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include "lh7a40x_udc.h"
-
-//#define DEBUG printk
-//#define DEBUG_EP0 printk
-//#define DEBUG_SETUP printk
-
-#ifndef DEBUG_EP0
-# define DEBUG_EP0(fmt,args...)
-#endif
-#ifndef DEBUG_SETUP
-# define DEBUG_SETUP(fmt,args...)
-#endif
-#ifndef DEBUG
-# define NO_STATES
-# define DEBUG(fmt,args...)
-#endif
-
-#define DRIVER_DESC "LH7A40x USB Device Controller"
-#define DRIVER_VERSION __DATE__
-
-#ifndef _BIT /* FIXME - what happended to _BIT in 2.6.7bk18? */
-#define _BIT(x) (1<<(x))
-#endif
-
-struct lh7a40x_udc *the_controller;
-
-static const char driver_name[] = "lh7a40x_udc";
-static const char driver_desc[] = DRIVER_DESC;
-static const char ep0name[] = "ep0-control";
-
-/*
- Local definintions.
-*/
-
-#ifndef NO_STATES
-static char *state_names[] = {
- "WAIT_FOR_SETUP",
- "DATA_STATE_XMIT",
- "DATA_STATE_NEED_ZLP",
- "WAIT_FOR_OUT_STATUS",
- "DATA_STATE_RECV"
-};
-#endif
-
-/*
- Local declarations.
-*/
-static int lh7a40x_ep_enable(struct usb_ep *ep,
- const struct usb_endpoint_descriptor *);
-static int lh7a40x_ep_disable(struct usb_ep *ep);
-static struct usb_request *lh7a40x_alloc_request(struct usb_ep *ep, gfp_t);
-static void lh7a40x_free_request(struct usb_ep *ep, struct usb_request *);
-static int lh7a40x_queue(struct usb_ep *ep, struct usb_request *, gfp_t);
-static int lh7a40x_dequeue(struct usb_ep *ep, struct usb_request *);
-static int lh7a40x_set_halt(struct usb_ep *ep, int);
-static int lh7a40x_fifo_status(struct usb_ep *ep);
-static void lh7a40x_fifo_flush(struct usb_ep *ep);
-static void lh7a40x_ep0_kick(struct lh7a40x_udc *dev, struct lh7a40x_ep *ep);
-static void lh7a40x_handle_ep0(struct lh7a40x_udc *dev, u32 intr);
-
-static void done(struct lh7a40x_ep *ep, struct lh7a40x_request *req,
- int status);
-static void pio_irq_enable(int bEndpointAddress);
-static void pio_irq_disable(int bEndpointAddress);
-static void stop_activity(struct lh7a40x_udc *dev,
- struct usb_gadget_driver *driver);
-static void flush(struct lh7a40x_ep *ep);
-static void udc_enable(struct lh7a40x_udc *dev);
-static void udc_set_address(struct lh7a40x_udc *dev, unsigned char address);
-
-static struct usb_ep_ops lh7a40x_ep_ops = {
- .enable = lh7a40x_ep_enable,
- .disable = lh7a40x_ep_disable,
-
- .alloc_request = lh7a40x_alloc_request,
- .free_request = lh7a40x_free_request,
-
- .queue = lh7a40x_queue,
- .dequeue = lh7a40x_dequeue,
-
- .set_halt = lh7a40x_set_halt,
- .fifo_status = lh7a40x_fifo_status,
- .fifo_flush = lh7a40x_fifo_flush,
-};
-
-/* Inline code */
-
-static __inline__ int write_packet(struct lh7a40x_ep *ep,
- struct lh7a40x_request *req, int max)
-{
- u8 *buf;
- int length, count;
- volatile u32 *fifo = (volatile u32 *)ep->fifo;
-
- buf = req->req.buf + req->req.actual;
- prefetch(buf);
-
- length = req->req.length - req->req.actual;
- length = min(length, max);
- req->req.actual += length;
-
- DEBUG("Write %d (max %d), fifo %p\n", length, max, fifo);
-
- count = length;
- while (count--) {
- *fifo = *buf++;
- }
-
- return length;
-}
-
-static __inline__ void usb_set_index(u32 ep)
-{
- *(volatile u32 *)io_p2v(USB_INDEX) = ep;
-}
-
-static __inline__ u32 usb_read(u32 port)
-{
- return *(volatile u32 *)io_p2v(port);
-}
-
-static __inline__ void usb_write(u32 val, u32 port)
-{
- *(volatile u32 *)io_p2v(port) = val;
-}
-
-static __inline__ void usb_set(u32 val, u32 port)
-{
- volatile u32 *ioport = (volatile u32 *)io_p2v(port);
- u32 after = (*ioport) | val;
- *ioport = after;
-}
-
-static __inline__ void usb_clear(u32 val, u32 port)
-{
- volatile u32 *ioport = (volatile u32 *)io_p2v(port);
- u32 after = (*ioport) & ~val;
- *ioport = after;
-}
-
-/*-------------------------------------------------------------------------*/
-
-#define GPIO_PORTC_DR (0x80000E08)
-#define GPIO_PORTC_DDR (0x80000E18)
-#define GPIO_PORTC_PDR (0x80000E70)
-
-/* get port C pin data register */
-#define get_portc_pdr(bit) ((usb_read(GPIO_PORTC_PDR) & _BIT(bit)) != 0)
-/* get port C data direction register */
-#define get_portc_ddr(bit) ((usb_read(GPIO_PORTC_DDR) & _BIT(bit)) != 0)
-/* set port C data register */
-#define set_portc_dr(bit, val) (val ? usb_set(_BIT(bit), GPIO_PORTC_DR) : usb_clear(_BIT(bit), GPIO_PORTC_DR))
-/* set port C data direction register */
-#define set_portc_ddr(bit, val) (val ? usb_set(_BIT(bit), GPIO_PORTC_DDR) : usb_clear(_BIT(bit), GPIO_PORTC_DDR))
-
-/*
- * LPD7A404 GPIO's:
- * Port C bit 1 = USB Port 1 Power Enable
- * Port C bit 2 = USB Port 1 Data Carrier Detect
- */
-#define is_usb_connected() get_portc_pdr(2)
-
-#ifdef CONFIG_USB_GADGET_DEBUG_FILES
-
-static const char proc_node_name[] = "driver/udc";
-
-static int
-udc_proc_read(char *page, char **start, off_t off, int count,
- int *eof, void *_dev)
-{
- char *buf = page;
- struct lh7a40x_udc *dev = _dev;
- char *next = buf;
- unsigned size = count;
- unsigned long flags;
- int t;
-
- if (off != 0)
- return 0;
-
- local_irq_save(flags);
-
- /* basic device status */
- t = scnprintf(next, size,
- DRIVER_DESC "\n"
- "%s version: %s\n"
- "Gadget driver: %s\n"
- "Host: %s\n\n",
- driver_name, DRIVER_VERSION,
- dev->driver ? dev->driver->driver.name : "(none)",
- is_usb_connected()? "full speed" : "disconnected");
- size -= t;
- next += t;
-
- t = scnprintf(next, size,
- "GPIO:\n"
- " Port C bit 1: %d, dir %d\n"
- " Port C bit 2: %d, dir %d\n\n",
- get_portc_pdr(1), get_portc_ddr(1),
- get_portc_pdr(2), get_portc_ddr(2)
- );
- size -= t;
- next += t;
-
- t = scnprintf(next, size,
- "DCP pullup: %d\n\n",
- (usb_read(USB_PM) & PM_USB_DCP) != 0);
- size -= t;
- next += t;
-
- local_irq_restore(flags);
- *eof = 1;
- return count - size;
-}
-
-#define create_proc_files() create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev)
-#define remove_proc_files() remove_proc_entry(proc_node_name, NULL)
-
-#else /* !CONFIG_USB_GADGET_DEBUG_FILES */
-
-#define create_proc_files() do {} while (0)
-#define remove_proc_files() do {} while (0)
-
-#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
-
-/*
- * udc_disable - disable USB device controller
- */
-static void udc_disable(struct lh7a40x_udc *dev)
-{
- DEBUG("%s, %p\n", __func__, dev);
-
- udc_set_address(dev, 0);
-
- /* Disable interrupts */
- usb_write(0, USB_IN_INT_EN);
- usb_write(0, USB_OUT_INT_EN);
- usb_write(0, USB_INT_EN);
-
- /* Disable the USB */
- usb_write(0, USB_PM);
-
-#ifdef CONFIG_ARCH_LH7A404
- /* Disable USB power */
- set_portc_dr(1, 0);
-#endif
-
- /* if hardware supports it, disconnect from usb */
- /* make_usb_disappear(); */
-
- dev->ep0state = WAIT_FOR_SETUP;
- dev->gadget.speed = USB_SPEED_UNKNOWN;
- dev->usb_address = 0;
-}
-
-/*
- * udc_reinit - initialize software state
- */
-static void udc_reinit(struct lh7a40x_udc *dev)
-{
- u32 i;
-
- DEBUG("%s, %p\n", __func__, dev);
-
- /* device/ep0 records init */
- INIT_LIST_HEAD(&dev->gadget.ep_list);
- INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
- dev->ep0state = WAIT_FOR_SETUP;
-
- /* basic endpoint records init */
- for (i = 0; i < UDC_MAX_ENDPOINTS; i++) {
- struct lh7a40x_ep *ep = &dev->ep[i];
-
- if (i != 0)
- list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
-
- ep->desc = 0;
- ep->stopped = 0;
- INIT_LIST_HEAD(&ep->queue);
- ep->pio_irqs = 0;
- }
-
- /* the rest was statically initialized, and is read-only */
-}
-
-#define BYTES2MAXP(x) (x / 8)
-#define MAXP2BYTES(x) (x * 8)
-
-/* until it's enabled, this UDC should be completely invisible
- * to any USB host.
- */
-static void udc_enable(struct lh7a40x_udc *dev)
-{
- int ep;
-
- DEBUG("%s, %p\n", __func__, dev);
-
- dev->gadget.speed = USB_SPEED_UNKNOWN;
-
-#ifdef CONFIG_ARCH_LH7A404
- /* Set Port C bit 1 & 2 as output */
- set_portc_ddr(1, 1);
- set_portc_ddr(2, 1);
-
- /* Enable USB power */
- set_portc_dr(1, 0);
-#endif
-
- /*
- * C.f Chapter 18.1.3.1 Initializing the USB
- */
-
- /* Disable the USB */
- usb_clear(PM_USB_ENABLE, USB_PM);
-
- /* Reset APB & I/O sides of the USB */
- usb_set(USB_RESET_APB | USB_RESET_IO, USB_RESET);
- mdelay(5);
- usb_clear(USB_RESET_APB | USB_RESET_IO, USB_RESET);
-
- /* Set MAXP values for each */
- for (ep = 0; ep < UDC_MAX_ENDPOINTS; ep++) {
- struct lh7a40x_ep *ep_reg = &dev->ep[ep];
- u32 csr;
-
- usb_set_index(ep);
-
- switch (ep_reg->ep_type) {
- case ep_bulk_in:
- case ep_interrupt:
- usb_clear(USB_IN_CSR2_USB_DMA_EN | USB_IN_CSR2_AUTO_SET,
- ep_reg->csr2);
- /* Fall through */
- case ep_control:
- usb_write(BYTES2MAXP(ep_maxpacket(ep_reg)),
- USB_IN_MAXP);
- break;
- case ep_bulk_out:
- usb_clear(USB_OUT_CSR2_USB_DMA_EN |
- USB_OUT_CSR2_AUTO_CLR, ep_reg->csr2);
- usb_write(BYTES2MAXP(ep_maxpacket(ep_reg)),
- USB_OUT_MAXP);
- break;
- }
-
- /* Read & Write CSR1, just in case */
- csr = usb_read(ep_reg->csr1);
- usb_write(csr, ep_reg->csr1);
-
- flush(ep_reg);
- }
-
- /* Disable interrupts */
- usb_write(0, USB_IN_INT_EN);
- usb_write(0, USB_OUT_INT_EN);
- usb_write(0, USB_INT_EN);
-
- /* Enable interrupts */
- usb_set(USB_IN_INT_EP0, USB_IN_INT_EN);
- usb_set(USB_INT_RESET_INT | USB_INT_RESUME_INT, USB_INT_EN);
- /* Dont enable rest of the interrupts */
- /* usb_set(USB_IN_INT_EP3 | USB_IN_INT_EP1 | USB_IN_INT_EP0, USB_IN_INT_EN);
- usb_set(USB_OUT_INT_EP2, USB_OUT_INT_EN); */
-
- /* Enable SUSPEND */
- usb_set(PM_ENABLE_SUSPEND, USB_PM);
-
- /* Enable the USB */
- usb_set(PM_USB_ENABLE, USB_PM);
-
-#ifdef CONFIG_ARCH_LH7A404
- /* NOTE: DOES NOT WORK! */
- /* Let host detect UDC:
- * Software must write a 0 to the PMR:DCP_CTRL bit to turn this
- * transistor on and pull the USBDP pin HIGH.
- */
- /* usb_clear(PM_USB_DCP, USB_PM);
- usb_set(PM_USB_DCP, USB_PM); */
-#endif
-}
-
-/*
- Register entry point for the peripheral controller driver.
-*/
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *))
-{
- struct lh7a40x_udc *dev = the_controller;
- int retval;
-
- DEBUG("%s: %s\n", __func__, driver->driver.name);
-
- if (!driver
- || driver->speed != USB_SPEED_FULL
- || !bind
- || !driver->disconnect
- || !driver->setup)
- return -EINVAL;
- if (!dev)
- return -ENODEV;
- if (dev->driver)
- return -EBUSY;
-
- /* first hook up the driver ... */
- dev->driver = driver;
- dev->gadget.dev.driver = &driver->driver;
-
- device_add(&dev->gadget.dev);
- retval = bind(&dev->gadget);
- if (retval) {
- printk(KERN_WARNING "%s: bind to driver %s --> error %d\n",
- dev->gadget.name, driver->driver.name, retval);
- device_del(&dev->gadget.dev);
-
- dev->driver = 0;
- dev->gadget.dev.driver = 0;
- return retval;
- }
-
- /* ... then enable host detection and ep0; and we're ready
- * for set_configuration as well as eventual disconnect.
- * NOTE: this shouldn't power up until later.
- */
- printk(KERN_WARNING "%s: registered gadget driver '%s'\n",
- dev->gadget.name, driver->driver.name);
-
- udc_enable(dev);
-
- return 0;
-}
-EXPORT_SYMBOL(usb_gadget_probe_driver);
-
-/*
- Unregister entry point for the peripheral controller driver.
-*/
-int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
-{
- struct lh7a40x_udc *dev = the_controller;
- unsigned long flags;
-
- if (!dev)
- return -ENODEV;
- if (!driver || driver != dev->driver || !driver->unbind)
- return -EINVAL;
-
- spin_lock_irqsave(&dev->lock, flags);
- dev->driver = 0;
- stop_activity(dev, driver);
- spin_unlock_irqrestore(&dev->lock, flags);
-
- driver->unbind(&dev->gadget);
- dev->gadget.dev.driver = NULL;
- device_del(&dev->gadget.dev);
-
- udc_disable(dev);
-
- DEBUG("unregistered gadget driver '%s'\n", driver->driver.name);
- return 0;
-}
-
-EXPORT_SYMBOL(usb_gadget_unregister_driver);
-
-/*-------------------------------------------------------------------------*/
-
-/** Write request to FIFO (max write == maxp size)
- * Return: 0 = still running, 1 = completed, negative = errno
- * NOTE: INDEX register must be set for EP
- */
-static int write_fifo(struct lh7a40x_ep *ep, struct lh7a40x_request *req)
-{
- u32 max;
- u32 csr;
-
- max = le16_to_cpu(ep->desc->wMaxPacketSize);
-
- csr = usb_read(ep->csr1);
- DEBUG("CSR: %x %d\n", csr, csr & USB_IN_CSR1_FIFO_NOT_EMPTY);
-
- if (!(csr & USB_IN_CSR1_FIFO_NOT_EMPTY)) {
- unsigned count;
- int is_last, is_short;
-
- count = write_packet(ep, req, max);
- usb_set(USB_IN_CSR1_IN_PKT_RDY, ep->csr1);
-
- /* last packet is usually short (or a zlp) */
- if (unlikely(count != max))
- is_last = is_short = 1;
- else {
- if (likely(req->req.length != req->req.actual)
- || req->req.zero)
- is_last = 0;
- else
- is_last = 1;
- /* interrupt/iso maxpacket may not fill the fifo */
- is_short = unlikely(max < ep_maxpacket(ep));
- }
-
- DEBUG("%s: wrote %s %d bytes%s%s %d left %p\n", __func__,
- ep->ep.name, count,
- is_last ? "/L" : "", is_short ? "/S" : "",
- req->req.length - req->req.actual, req);
-
- /* requests complete when all IN data is in the FIFO */
- if (is_last) {
- done(ep, req, 0);
- if (list_empty(&ep->queue)) {
- pio_irq_disable(ep_index(ep));
- }
- return 1;
- }
- } else {
- DEBUG("Hmm.. %d ep FIFO is not empty!\n", ep_index(ep));
- }
-
- return 0;
-}
-
-/** Read to request from FIFO (max read == bytes in fifo)
- * Return: 0 = still running, 1 = completed, negative = errno
- * NOTE: INDEX register must be set for EP
- */
-static int read_fifo(struct lh7a40x_ep *ep, struct lh7a40x_request *req)
-{
- u32 csr;
- u8 *buf;
- unsigned bufferspace, count, is_short;
- volatile u32 *fifo = (volatile u32 *)ep->fifo;
-
- /* make sure there's a packet in the FIFO. */
- csr = usb_read(ep->csr1);
- if (!(csr & USB_OUT_CSR1_OUT_PKT_RDY)) {
- DEBUG("%s: Packet NOT ready!\n", __func__);
- return -EINVAL;
- }
-
- buf = req->req.buf + req->req.actual;
- prefetchw(buf);
- bufferspace = req->req.length - req->req.actual;
-
- /* read all bytes from this packet */
- count = usb_read(USB_OUT_FIFO_WC1);
- req->req.actual += min(count, bufferspace);
-
- is_short = (count < ep->ep.maxpacket);
- DEBUG("read %s %02x, %d bytes%s req %p %d/%d\n",
- ep->ep.name, csr, count,
- is_short ? "/S" : "", req, req->req.actual, req->req.length);
-
- while (likely(count-- != 0)) {
- u8 byte = (u8) (*fifo & 0xff);
-
- if (unlikely(bufferspace == 0)) {
- /* this happens when the driver's buffer
- * is smaller than what the host sent.
- * discard the extra data.
- */
- if (req->req.status != -EOVERFLOW)
- printk(KERN_WARNING "%s overflow %d\n",
- ep->ep.name, count);
- req->req.status = -EOVERFLOW;
- } else {
- *buf++ = byte;
- bufferspace--;
- }
- }
-
- usb_clear(USB_OUT_CSR1_OUT_PKT_RDY, ep->csr1);
-
- /* completion */
- if (is_short || req->req.actual == req->req.length) {
- done(ep, req, 0);
- usb_set(USB_OUT_CSR1_FIFO_FLUSH, ep->csr1);
-
- if (list_empty(&ep->queue))
- pio_irq_disable(ep_index(ep));
- return 1;
- }
-
- /* finished that packet. the next one may be waiting... */
- return 0;
-}
-
-/*
- * done - retire a request; caller blocked irqs
- * INDEX register is preserved to keep same
- */
-static void done(struct lh7a40x_ep *ep, struct lh7a40x_request *req, int status)
-{
- unsigned int stopped = ep->stopped;
- u32 index;
-
- DEBUG("%s, %p\n", __func__, ep);
- list_del_init(&req->queue);
-
- if (likely(req->req.status == -EINPROGRESS))
- req->req.status = status;
- else
- status = req->req.status;
-
- if (status && status != -ESHUTDOWN)
- DEBUG("complete %s req %p stat %d len %u/%u\n",
- ep->ep.name, &req->req, status,
- req->req.actual, req->req.length);
-
- /* don't modify queue heads during completion callback */
- ep->stopped = 1;
- /* Read current index (completion may modify it) */
- index = usb_read(USB_INDEX);
-
- spin_unlock(&ep->dev->lock);
- req->req.complete(&ep->ep, &req->req);
- spin_lock(&ep->dev->lock);
-
- /* Restore index */
- usb_set_index(index);
- ep->stopped = stopped;
-}
-
-/** Enable EP interrupt */
-static void pio_irq_enable(int ep)
-{
- DEBUG("%s: %d\n", __func__, ep);
-
- switch (ep) {
- case 1:
- usb_set(USB_IN_INT_EP1, USB_IN_INT_EN);
- break;
- case 2:
- usb_set(USB_OUT_INT_EP2, USB_OUT_INT_EN);
- break;
- case 3:
- usb_set(USB_IN_INT_EP3, USB_IN_INT_EN);
- break;
- default:
- DEBUG("Unknown endpoint: %d\n", ep);
- break;
- }
-}
-
-/** Disable EP interrupt */
-static void pio_irq_disable(int ep)
-{
- DEBUG("%s: %d\n", __func__, ep);
-
- switch (ep) {
- case 1:
- usb_clear(USB_IN_INT_EP1, USB_IN_INT_EN);
- break;
- case 2:
- usb_clear(USB_OUT_INT_EP2, USB_OUT_INT_EN);
- break;
- case 3:
- usb_clear(USB_IN_INT_EP3, USB_IN_INT_EN);
- break;
- default:
- DEBUG("Unknown endpoint: %d\n", ep);
- break;
- }
-}
-
-/*
- * nuke - dequeue ALL requests
- */
-void nuke(struct lh7a40x_ep *ep, int status)
-{
- struct lh7a40x_request *req;
-
- DEBUG("%s, %p\n", __func__, ep);
-
- /* Flush FIFO */
- flush(ep);
-
- /* called with irqs blocked */
- while (!list_empty(&ep->queue)) {
- req = list_entry(ep->queue.next, struct lh7a40x_request, queue);
- done(ep, req, status);
- }
-
- /* Disable IRQ if EP is enabled (has descriptor) */
- if (ep->desc)
- pio_irq_disable(ep_index(ep));
-}
-
-/*
-void nuke_all(struct lh7a40x_udc *dev)
-{
- int n;
- for(n=0; n<UDC_MAX_ENDPOINTS; n++) {
- struct lh7a40x_ep *ep = &dev->ep[n];
- usb_set_index(n);
- nuke(ep, 0);
- }
-}*/
-
-/*
-static void flush_all(struct lh7a40x_udc *dev)
-{
- int n;
- for (n = 0; n < UDC_MAX_ENDPOINTS; n++)
- {
- struct lh7a40x_ep *ep = &dev->ep[n];
- flush(ep);
- }
-}
-*/
-
-/** Flush EP
- * NOTE: INDEX register must be set before this call
- */
-static void flush(struct lh7a40x_ep *ep)
-{
- DEBUG("%s, %p\n", __func__, ep);
-
- switch (ep->ep_type) {
- case ep_control:
- /* check, by implication c.f. 15.1.2.11 */
- break;
-
- case ep_bulk_in:
- case ep_interrupt:
- /* if(csr & USB_IN_CSR1_IN_PKT_RDY) */
- usb_set(USB_IN_CSR1_FIFO_FLUSH, ep->csr1);
- break;
-
- case ep_bulk_out:
- /* if(csr & USB_OUT_CSR1_OUT_PKT_RDY) */
- usb_set(USB_OUT_CSR1_FIFO_FLUSH, ep->csr1);
- break;
- }
-}
-
-/**
- * lh7a40x_in_epn - handle IN interrupt
- */
-static void lh7a40x_in_epn(struct lh7a40x_udc *dev, u32 ep_idx, u32 intr)
-{
- u32 csr;
- struct lh7a40x_ep *ep = &dev->ep[ep_idx];
- struct lh7a40x_request *req;
-
- usb_set_index(ep_idx);
-
- csr = usb_read(ep->csr1);
- DEBUG("%s: %d, csr %x\n", __func__, ep_idx, csr);
-
- if (csr & USB_IN_CSR1_SENT_STALL) {
- DEBUG("USB_IN_CSR1_SENT_STALL\n");
- usb_set(USB_IN_CSR1_SENT_STALL /*|USB_IN_CSR1_SEND_STALL */ ,
- ep->csr1);
- return;
- }
-
- if (!ep->desc) {
- DEBUG("%s: NO EP DESC\n", __func__);
- return;
- }
-
- if (list_empty(&ep->queue))
- req = 0;
- else
- req = list_entry(ep->queue.next, struct lh7a40x_request, queue);
-
- DEBUG("req: %p\n", req);
-
- if (!req)
- return;
-
- write_fifo(ep, req);
-}
-
-/* ********************************************************************************************* */
-/* Bulk OUT (recv)
- */
-
-static void lh7a40x_out_epn(struct lh7a40x_udc *dev, u32 ep_idx, u32 intr)
-{
- struct lh7a40x_ep *ep = &dev->ep[ep_idx];
- struct lh7a40x_request *req;
-
- DEBUG("%s: %d\n", __func__, ep_idx);
-
- usb_set_index(ep_idx);
-
- if (ep->desc) {
- u32 csr;
- csr = usb_read(ep->csr1);
-
- while ((csr =
- usb_read(ep->
- csr1)) & (USB_OUT_CSR1_OUT_PKT_RDY |
- USB_OUT_CSR1_SENT_STALL)) {
- DEBUG("%s: %x\n", __func__, csr);
-
- if (csr & USB_OUT_CSR1_SENT_STALL) {
- DEBUG("%s: stall sent, flush fifo\n",
- __func__);
- /* usb_set(USB_OUT_CSR1_FIFO_FLUSH, ep->csr1); */
- flush(ep);
- } else if (csr & USB_OUT_CSR1_OUT_PKT_RDY) {
- if (list_empty(&ep->queue))
- req = 0;
- else
- req =
- list_entry(ep->queue.next,
- struct lh7a40x_request,
- queue);
-
- if (!req) {
- printk(KERN_WARNING
- "%s: NULL REQ %d\n",
- __func__, ep_idx);
- flush(ep);
- break;
- } else {
- read_fifo(ep, req);
- }
- }
-
- }
-
- } else {
- /* Throw packet away.. */
- printk(KERN_WARNING "%s: No descriptor?!?\n", __func__);
- flush(ep);
- }
-}
-
-static void stop_activity(struct lh7a40x_udc *dev,
- struct usb_gadget_driver *driver)
-{
- int i;
-
- /* don't disconnect drivers more than once */
- if (dev->gadget.speed == USB_SPEED_UNKNOWN)
- driver = 0;
- dev->gadget.speed = USB_SPEED_UNKNOWN;
-
- /* prevent new request submissions, kill any outstanding requests */
- for (i = 0; i < UDC_MAX_ENDPOINTS; i++) {
- struct lh7a40x_ep *ep = &dev->ep[i];
- ep->stopped = 1;
-
- usb_set_index(i);
- nuke(ep, -ESHUTDOWN);
- }
-
- /* report disconnect; the driver is already quiesced */
- if (driver) {
- spin_unlock(&dev->lock);
- driver->disconnect(&dev->gadget);
- spin_lock(&dev->lock);
- }
-
- /* re-init driver-visible data structures */
- udc_reinit(dev);
-}
-
-/** Handle USB RESET interrupt
- */
-static void lh7a40x_reset_intr(struct lh7a40x_udc *dev)
-{
-#if 0 /* def CONFIG_ARCH_LH7A404 */
- /* Does not work always... */
-
- DEBUG("%s: %d\n", __func__, dev->usb_address);
-
- if (!dev->usb_address) {
- /*usb_set(USB_RESET_IO, USB_RESET);
- mdelay(5);
- usb_clear(USB_RESET_IO, USB_RESET); */
- return;
- }
- /* Put the USB controller into reset. */
- usb_set(USB_RESET_IO, USB_RESET);
-
- /* Set Device ID to 0 */
- udc_set_address(dev, 0);
-
- /* Let PLL2 settle down */
- mdelay(5);
-
- /* Release the USB controller from reset */
- usb_clear(USB_RESET_IO, USB_RESET);
-
- /* Re-enable UDC */
- udc_enable(dev);
-
-#endif
- dev->gadget.speed = USB_SPEED_FULL;
-}
-
-/*
- * lh7a40x usb client interrupt handler.
- */
-static irqreturn_t lh7a40x_udc_irq(int irq, void *_dev)
-{
- struct lh7a40x_udc *dev = _dev;
-
- DEBUG("\n\n");
-
- spin_lock(&dev->lock);
-
- for (;;) {
- u32 intr_in = usb_read(USB_IN_INT);
- u32 intr_out = usb_read(USB_OUT_INT);
- u32 intr_int = usb_read(USB_INT);
-
- /* Test also against enable bits.. (lh7a40x errata).. Sigh.. */
- u32 in_en = usb_read(USB_IN_INT_EN);
- u32 out_en = usb_read(USB_OUT_INT_EN);
-
- if (!intr_out && !intr_in && !intr_int)
- break;
-
- DEBUG("%s (on state %s)\n", __func__,
- state_names[dev->ep0state]);
- DEBUG("intr_out = %x\n", intr_out);
- DEBUG("intr_in = %x\n", intr_in);
- DEBUG("intr_int = %x\n", intr_int);
-
- if (intr_in) {
- usb_write(intr_in, USB_IN_INT);
-
- if ((intr_in & USB_IN_INT_EP1)
- && (in_en & USB_IN_INT_EP1)) {
- DEBUG("USB_IN_INT_EP1\n");
- lh7a40x_in_epn(dev, 1, intr_in);
- }
- if ((intr_in & USB_IN_INT_EP3)
- && (in_en & USB_IN_INT_EP3)) {
- DEBUG("USB_IN_INT_EP3\n");
- lh7a40x_in_epn(dev, 3, intr_in);
- }
- if (intr_in & USB_IN_INT_EP0) {
- DEBUG("USB_IN_INT_EP0 (control)\n");
- lh7a40x_handle_ep0(dev, intr_in);
- }
- }
-
- if (intr_out) {
- usb_write(intr_out, USB_OUT_INT);
-
- if ((intr_out & USB_OUT_INT_EP2)
- && (out_en & USB_OUT_INT_EP2)) {
- DEBUG("USB_OUT_INT_EP2\n");
- lh7a40x_out_epn(dev, 2, intr_out);
- }
- }
-
- if (intr_int) {
- usb_write(intr_int, USB_INT);
-
- if (intr_int & USB_INT_RESET_INT) {
- lh7a40x_reset_intr(dev);
- }
-
- if (intr_int & USB_INT_RESUME_INT) {
- DEBUG("USB resume\n");
-
- if (dev->gadget.speed != USB_SPEED_UNKNOWN
- && dev->driver
- && dev->driver->resume
- && is_usb_connected()) {
- dev->driver->resume(&dev->gadget);
- }
- }
-
- if (intr_int & USB_INT_SUSPEND_INT) {
- DEBUG("USB suspend%s\n",
- is_usb_connected()? "" : "+disconnect");
- if (!is_usb_connected()) {
- stop_activity(dev, dev->driver);
- } else if (dev->gadget.speed !=
- USB_SPEED_UNKNOWN && dev->driver
- && dev->driver->suspend) {
- dev->driver->suspend(&dev->gadget);
- }
- }
-
- }
- }
-
- spin_unlock(&dev->lock);
-
- return IRQ_HANDLED;
-}
-
-static int lh7a40x_ep_enable(struct usb_ep *_ep,
- const struct usb_endpoint_descriptor *desc)
-{
- struct lh7a40x_ep *ep;
- struct lh7a40x_udc *dev;
- unsigned long flags;
-
- DEBUG("%s, %p\n", __func__, _ep);
-
- ep = container_of(_ep, struct lh7a40x_ep, ep);
- if (!_ep || !desc || ep->desc || _ep->name == ep0name
- || desc->bDescriptorType != USB_DT_ENDPOINT
- || ep->bEndpointAddress != desc->bEndpointAddress
- || ep_maxpacket(ep) < le16_to_cpu(desc->wMaxPacketSize)) {
- DEBUG("%s, bad ep or descriptor\n", __func__);
- return -EINVAL;
- }
-
- /* xfer types must match, except that interrupt ~= bulk */
- if (ep->bmAttributes != desc->bmAttributes
- && ep->bmAttributes != USB_ENDPOINT_XFER_BULK
- && desc->bmAttributes != USB_ENDPOINT_XFER_INT) {
- DEBUG("%s, %s type mismatch\n", __func__, _ep->name);
- return -EINVAL;
- }
-
- /* hardware _could_ do smaller, but driver doesn't */
- if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
- && le16_to_cpu(desc->wMaxPacketSize) != ep_maxpacket(ep))
- || !desc->wMaxPacketSize) {
- DEBUG("%s, bad %s maxpacket\n", __func__, _ep->name);
- return -ERANGE;
- }
-
- dev = ep->dev;
- if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
- DEBUG("%s, bogus device state\n", __func__);
- return -ESHUTDOWN;
- }
-
- spin_lock_irqsave(&ep->dev->lock, flags);
-
- ep->stopped = 0;
- ep->desc = desc;
- ep->pio_irqs = 0;
- ep->ep.maxpacket = le16_to_cpu(desc->wMaxPacketSize);
-
- spin_unlock_irqrestore(&ep->dev->lock, flags);
-
- /* Reset halt state (does flush) */
- lh7a40x_set_halt(_ep, 0);
-
- DEBUG("%s: enabled %s\n", __func__, _ep->name);
- return 0;
-}
-
-/** Disable EP
- * NOTE: Sets INDEX register
- */
-static int lh7a40x_ep_disable(struct usb_ep *_ep)
-{
- struct lh7a40x_ep *ep;
- unsigned long flags;
-
- DEBUG("%s, %p\n", __func__, _ep);
-
- ep = container_of(_ep, struct lh7a40x_ep, ep);
- if (!_ep || !ep->desc) {
- DEBUG("%s, %s not enabled\n", __func__,
- _ep ? ep->ep.name : NULL);
- return -EINVAL;
- }
-
- spin_lock_irqsave(&ep->dev->lock, flags);
-
- usb_set_index(ep_index(ep));
-
- /* Nuke all pending requests (does flush) */
- nuke(ep, -ESHUTDOWN);
-
- /* Disable ep IRQ */
- pio_irq_disable(ep_index(ep));
-
- ep->desc = 0;
- ep->stopped = 1;
-
- spin_unlock_irqrestore(&ep->dev->lock, flags);
-
- DEBUG("%s: disabled %s\n", __func__, _ep->name);
- return 0;
-}
-
-static struct usb_request *lh7a40x_alloc_request(struct usb_ep *ep,
- gfp_t gfp_flags)
-{
- struct lh7a40x_request *req;
-
- DEBUG("%s, %p\n", __func__, ep);
-
- req = kzalloc(sizeof(*req), gfp_flags);
- if (!req)
- return 0;
-
- INIT_LIST_HEAD(&req->queue);
-
- return &req->req;
-}
-
-static void lh7a40x_free_request(struct usb_ep *ep, struct usb_request *_req)
-{
- struct lh7a40x_request *req;
-
- DEBUG("%s, %p\n", __func__, ep);
-
- req = container_of(_req, struct lh7a40x_request, req);
- WARN_ON(!list_empty(&req->queue));
- kfree(req);
-}
-
-/** Queue one request
- * Kickstart transfer if needed
- * NOTE: Sets INDEX register
- */
-static int lh7a40x_queue(struct usb_ep *_ep, struct usb_request *_req,
- gfp_t gfp_flags)
-{
- struct lh7a40x_request *req;
- struct lh7a40x_ep *ep;
- struct lh7a40x_udc *dev;
- unsigned long flags;
-
- DEBUG("\n\n\n%s, %p\n", __func__, _ep);
-
- req = container_of(_req, struct lh7a40x_request, req);
- if (unlikely
- (!_req || !_req->complete || !_req->buf
- || !list_empty(&req->queue))) {
- DEBUG("%s, bad params\n", __func__);
- return -EINVAL;
- }
-
- ep = container_of(_ep, struct lh7a40x_ep, ep);
- if (unlikely(!_ep || (!ep->desc && ep->ep.name != ep0name))) {
- DEBUG("%s, bad ep\n", __func__);
- return -EINVAL;
- }
-
- dev = ep->dev;
- if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
- DEBUG("%s, bogus device state %p\n", __func__, dev->driver);
- return -ESHUTDOWN;
- }
-
- DEBUG("%s queue req %p, len %d buf %p\n", _ep->name, _req, _req->length,
- _req->buf);
-
- spin_lock_irqsave(&dev->lock, flags);
-
- _req->status = -EINPROGRESS;
- _req->actual = 0;
-
- /* kickstart this i/o queue? */
- DEBUG("Add to %d Q %d %d\n", ep_index(ep), list_empty(&ep->queue),
- ep->stopped);
- if (list_empty(&ep->queue) && likely(!ep->stopped)) {
- u32 csr;
-
- if (unlikely(ep_index(ep) == 0)) {
- /* EP0 */
- list_add_tail(&req->queue, &ep->queue);
- lh7a40x_ep0_kick(dev, ep);
- req = 0;
- } else if (ep_is_in(ep)) {
- /* EP1 & EP3 */
- usb_set_index(ep_index(ep));
- csr = usb_read(ep->csr1);
- pio_irq_enable(ep_index(ep));
- if ((csr & USB_IN_CSR1_FIFO_NOT_EMPTY) == 0) {
- if (write_fifo(ep, req) == 1)
- req = 0;
- }
- } else {
- /* EP2 */
- usb_set_index(ep_index(ep));
- csr = usb_read(ep->csr1);
- pio_irq_enable(ep_index(ep));
- if (!(csr & USB_OUT_CSR1_FIFO_FULL)) {
- if (read_fifo(ep, req) == 1)
- req = 0;
- }
- }
- }
-
- /* pio or dma irq handler advances the queue. */
- if (likely(req != 0))
- list_add_tail(&req->queue, &ep->queue);
-
- spin_unlock_irqrestore(&dev->lock, flags);
-
- return 0;
-}
-
-/* dequeue JUST ONE request */
-static int lh7a40x_dequeue(struct usb_ep *_ep, struct usb_request *_req)
-{
- struct lh7a40x_ep *ep;
- struct lh7a40x_request *req;
- unsigned long flags;
-
- DEBUG("%s, %p\n", __func__, _ep);
-
- ep = container_of(_ep, struct lh7a40x_ep, ep);
- if (!_ep || ep->ep.name == ep0name)
- return -EINVAL;
-
- spin_lock_irqsave(&ep->dev->lock, flags);
-
- /* make sure it's actually queued on this endpoint */
- list_for_each_entry(req, &ep->queue, queue) {
- if (&req->req == _req)
- break;
- }
- if (&req->req != _req) {
- spin_unlock_irqrestore(&ep->dev->lock, flags);
- return -EINVAL;
- }
-
- done(ep, req, -ECONNRESET);
-
- spin_unlock_irqrestore(&ep->dev->lock, flags);
- return 0;
-}
-
-/** Halt specific EP
- * Return 0 if success
- * NOTE: Sets INDEX register to EP !
- */
-static int lh7a40x_set_halt(struct usb_ep *_ep, int value)
-{
- struct lh7a40x_ep *ep;
- unsigned long flags;
-
- ep = container_of(_ep, struct lh7a40x_ep, ep);
- if (unlikely(!_ep || (!ep->desc && ep->ep.name != ep0name))) {
- DEBUG("%s, bad ep\n", __func__);
- return -EINVAL;
- }
-
- usb_set_index(ep_index(ep));
-
- DEBUG("%s, ep %d, val %d\n", __func__, ep_index(ep), value);
-
- spin_lock_irqsave(&ep->dev->lock, flags);
-
- if (ep_index(ep) == 0) {
- /* EP0 */
- usb_set(EP0_SEND_STALL, ep->csr1);
- } else if (ep_is_in(ep)) {
- u32 csr = usb_read(ep->csr1);
- if (value && ((csr & USB_IN_CSR1_FIFO_NOT_EMPTY)
- || !list_empty(&ep->queue))) {
- /*
- * Attempts to halt IN endpoints will fail (returning -EAGAIN)
- * if any transfer requests are still queued, or if the controller
- * FIFO still holds bytes that the host hasn't collected.
- */
- spin_unlock_irqrestore(&ep->dev->lock, flags);
- DEBUG
- ("Attempt to halt IN endpoint failed (returning -EAGAIN) %d %d\n",
- (csr & USB_IN_CSR1_FIFO_NOT_EMPTY),
- !list_empty(&ep->queue));
- return -EAGAIN;
- }
- flush(ep);
- if (value)
- usb_set(USB_IN_CSR1_SEND_STALL, ep->csr1);
- else {
- usb_clear(USB_IN_CSR1_SEND_STALL, ep->csr1);
- usb_set(USB_IN_CSR1_CLR_DATA_TOGGLE, ep->csr1);
- }
-
- } else {
-
- flush(ep);
- if (value)
- usb_set(USB_OUT_CSR1_SEND_STALL, ep->csr1);
- else {
- usb_clear(USB_OUT_CSR1_SEND_STALL, ep->csr1);
- usb_set(USB_OUT_CSR1_CLR_DATA_REG, ep->csr1);
- }
- }
-
- if (value) {
- ep->stopped = 1;
- } else {
- ep->stopped = 0;
- }
-
- spin_unlock_irqrestore(&ep->dev->lock, flags);
-
- DEBUG("%s %s halted\n", _ep->name, value == 0 ? "NOT" : "IS");
-
- return 0;
-}
-
-/** Return bytes in EP FIFO
- * NOTE: Sets INDEX register to EP
- */
-static int lh7a40x_fifo_status(struct usb_ep *_ep)
-{
- u32 csr;
- int count = 0;
- struct lh7a40x_ep *ep;
-
- ep = container_of(_ep, struct lh7a40x_ep, ep);
- if (!_ep) {
- DEBUG("%s, bad ep\n", __func__);
- return -ENODEV;
- }
-
- DEBUG("%s, %d\n", __func__, ep_index(ep));
-
- /* LPD can't report unclaimed bytes from IN fifos */
- if (ep_is_in(ep))
- return -EOPNOTSUPP;
-
- usb_set_index(ep_index(ep));
-
- csr = usb_read(ep->csr1);
- if (ep->dev->gadget.speed != USB_SPEED_UNKNOWN ||
- csr & USB_OUT_CSR1_OUT_PKT_RDY) {
- count = usb_read(USB_OUT_FIFO_WC1);
- }
-
- return count;
-}
-
-/** Flush EP FIFO
- * NOTE: Sets INDEX register to EP
- */
-static void lh7a40x_fifo_flush(struct usb_ep *_ep)
-{
- struct lh7a40x_ep *ep;
-
- ep = container_of(_ep, struct lh7a40x_ep, ep);
- if (unlikely(!_ep || (!ep->desc && ep->ep.name != ep0name))) {
- DEBUG("%s, bad ep\n", __func__);
- return;
- }
-
- usb_set_index(ep_index(ep));
- flush(ep);
-}
-
-/****************************************************************/
-/* End Point 0 related functions */
-/****************************************************************/
-
-/* return: 0 = still running, 1 = completed, negative = errno */
-static int write_fifo_ep0(struct lh7a40x_ep *ep, struct lh7a40x_request *req)
-{
- u32 max;
- unsigned count;
- int is_last;
-
- max = ep_maxpacket(ep);
-
- DEBUG_EP0("%s\n", __func__);
-
- count = write_packet(ep, req, max);
-
- /* last packet is usually short (or a zlp) */
- if (unlikely(count != max))
- is_last = 1;
- else {
- if (likely(req->req.length != req->req.actual) || req->req.zero)
- is_last = 0;
- else
- is_last = 1;
- }
-
- DEBUG_EP0("%s: wrote %s %d bytes%s %d left %p\n", __func__,
- ep->ep.name, count,
- is_last ? "/L" : "", req->req.length - req->req.actual, req);
-
- /* requests complete when all IN data is in the FIFO */
- if (is_last) {
- done(ep, req, 0);
- return 1;
- }
-
- return 0;
-}
-
-static __inline__ int lh7a40x_fifo_read(struct lh7a40x_ep *ep,
- unsigned char *cp, int max)
-{
- int bytes;
- int count = usb_read(USB_OUT_FIFO_WC1);
- volatile u32 *fifo = (volatile u32 *)ep->fifo;
-
- if (count > max)
- count = max;
- bytes = count;
- while (count--)
- *cp++ = *fifo & 0xFF;
- return bytes;
-}
-
-static __inline__ void lh7a40x_fifo_write(struct lh7a40x_ep *ep,
- unsigned char *cp, int count)
-{
- volatile u32 *fifo = (volatile u32 *)ep->fifo;
- DEBUG_EP0("fifo_write: %d %d\n", ep_index(ep), count);
- while (count--)
- *fifo = *cp++;
-}
-
-static int read_fifo_ep0(struct lh7a40x_ep *ep, struct lh7a40x_request *req)
-{
- u32 csr;
- u8 *buf;
- unsigned bufferspace, count, is_short;
- volatile u32 *fifo = (volatile u32 *)ep->fifo;
-
- DEBUG_EP0("%s\n", __func__);
-
- csr = usb_read(USB_EP0_CSR);
- if (!(csr & USB_OUT_CSR1_OUT_PKT_RDY))
- return 0;
-
- buf = req->req.buf + req->req.actual;
- prefetchw(buf);
- bufferspace = req->req.length - req->req.actual;
-
- /* read all bytes from this packet */
- if (likely(csr & EP0_OUT_PKT_RDY)) {
- count = usb_read(USB_OUT_FIFO_WC1);
- req->req.actual += min(count, bufferspace);
- } else /* zlp */
- count = 0;
-
- is_short = (count < ep->ep.maxpacket);
- DEBUG_EP0("read %s %02x, %d bytes%s req %p %d/%d\n",
- ep->ep.name, csr, count,
- is_short ? "/S" : "", req, req->req.actual, req->req.length);
-
- while (likely(count-- != 0)) {
- u8 byte = (u8) (*fifo & 0xff);
-
- if (unlikely(bufferspace == 0)) {
- /* this happens when the driver's buffer
- * is smaller than what the host sent.
- * discard the extra data.
- */
- if (req->req.status != -EOVERFLOW)
- DEBUG_EP0("%s overflow %d\n", ep->ep.name,
- count);
- req->req.status = -EOVERFLOW;
- } else {
- *buf++ = byte;
- bufferspace--;
- }
- }
-
- /* completion */
- if (is_short || req->req.actual == req->req.length) {
- done(ep, req, 0);
- return 1;
- }
-
- /* finished that packet. the next one may be waiting... */
- return 0;
-}
-
-/**
- * udc_set_address - set the USB address for this device
- * @address:
- *
- * Called from control endpoint function after it decodes a set address setup packet.
- */
-static void udc_set_address(struct lh7a40x_udc *dev, unsigned char address)
-{
- DEBUG_EP0("%s: %d\n", __func__, address);
- /* c.f. 15.1.2.2 Table 15-4 address will be used after DATA_END is set */
- dev->usb_address = address;
- usb_set((address & USB_FA_FUNCTION_ADDR), USB_FA);
- usb_set(USB_FA_ADDR_UPDATE | (address & USB_FA_FUNCTION_ADDR), USB_FA);
- /* usb_read(USB_FA); */
-}
-
-/*
- * DATA_STATE_RECV (OUT_PKT_RDY)
- * - if error
- * set EP0_CLR_OUT | EP0_DATA_END | EP0_SEND_STALL bits
- * - else
- * set EP0_CLR_OUT bit
- if last set EP0_DATA_END bit
- */
-static void lh7a40x_ep0_out(struct lh7a40x_udc *dev, u32 csr)
-{
- struct lh7a40x_request *req;
- struct lh7a40x_ep *ep = &dev->ep[0];
- int ret;
-
- DEBUG_EP0("%s: %x\n", __func__, csr);
-
- if (list_empty(&ep->queue))
- req = 0;
- else
- req = list_entry(ep->queue.next, struct lh7a40x_request, queue);
-
- if (req) {
-
- if (req->req.length == 0) {
- DEBUG_EP0("ZERO LENGTH OUT!\n");
- usb_set((EP0_CLR_OUT | EP0_DATA_END), USB_EP0_CSR);
- dev->ep0state = WAIT_FOR_SETUP;
- return;
- }
- ret = read_fifo_ep0(ep, req);
- if (ret) {
- /* Done! */
- DEBUG_EP0("%s: finished, waiting for status\n",
- __func__);
-
- usb_set((EP0_CLR_OUT | EP0_DATA_END), USB_EP0_CSR);
- dev->ep0state = WAIT_FOR_SETUP;
- } else {
- /* Not done yet.. */
- DEBUG_EP0("%s: not finished\n", __func__);
- usb_set(EP0_CLR_OUT, USB_EP0_CSR);
- }
- } else {
- DEBUG_EP0("NO REQ??!\n");
- }
-}
-
-/*
- * DATA_STATE_XMIT
- */
-static int lh7a40x_ep0_in(struct lh7a40x_udc *dev, u32 csr)
-{
- struct lh7a40x_request *req;
- struct lh7a40x_ep *ep = &dev->ep[0];
- int ret, need_zlp = 0;
-
- DEBUG_EP0("%s: %x\n", __func__, csr);
-
- if (list_empty(&ep->queue))
- req = 0;
- else
- req = list_entry(ep->queue.next, struct lh7a40x_request, queue);
-
- if (!req) {
- DEBUG_EP0("%s: NULL REQ\n", __func__);
- return 0;
- }
-
- if (req->req.length == 0) {
-
- usb_set((EP0_IN_PKT_RDY | EP0_DATA_END), USB_EP0_CSR);
- dev->ep0state = WAIT_FOR_SETUP;
- return 1;
- }
-
- if (req->req.length - req->req.actual == EP0_PACKETSIZE) {
- /* Next write will end with the packet size, */
- /* so we need Zero-length-packet */
- need_zlp = 1;
- }
-
- ret = write_fifo_ep0(ep, req);
-
- if (ret == 1 && !need_zlp) {
- /* Last packet */
- DEBUG_EP0("%s: finished, waiting for status\n", __func__);
-
- usb_set((EP0_IN_PKT_RDY | EP0_DATA_END), USB_EP0_CSR);
- dev->ep0state = WAIT_FOR_SETUP;
- } else {
- DEBUG_EP0("%s: not finished\n", __func__);
- usb_set(EP0_IN_PKT_RDY, USB_EP0_CSR);
- }
-
- if (need_zlp) {
- DEBUG_EP0("%s: Need ZLP!\n", __func__);
- usb_set(EP0_IN_PKT_RDY, USB_EP0_CSR);
- dev->ep0state = DATA_STATE_NEED_ZLP;
- }
-
- return 1;
-}
-
-static int lh7a40x_handle_get_status(struct lh7a40x_udc *dev,
- struct usb_ctrlrequest *ctrl)
-{
- struct lh7a40x_ep *ep0 = &dev->ep[0];
- struct lh7a40x_ep *qep;
- int reqtype = (ctrl->bRequestType & USB_RECIP_MASK);
- u16 val = 0;
-
- if (reqtype == USB_RECIP_INTERFACE) {
- /* This is not supported.
- * And according to the USB spec, this one does nothing..
- * Just return 0
- */
- DEBUG_SETUP("GET_STATUS: USB_RECIP_INTERFACE\n");
- } else if (reqtype == USB_RECIP_DEVICE) {
- DEBUG_SETUP("GET_STATUS: USB_RECIP_DEVICE\n");
- val |= (1 << 0); /* Self powered */
- /*val |= (1<<1); *//* Remote wakeup */
- } else if (reqtype == USB_RECIP_ENDPOINT) {
- int ep_num = (ctrl->wIndex & ~USB_DIR_IN);
-
- DEBUG_SETUP
- ("GET_STATUS: USB_RECIP_ENDPOINT (%d), ctrl->wLength = %d\n",
- ep_num, ctrl->wLength);
-
- if (ctrl->wLength > 2 || ep_num > 3)
- return -EOPNOTSUPP;
-
- qep = &dev->ep[ep_num];
- if (ep_is_in(qep) != ((ctrl->wIndex & USB_DIR_IN) ? 1 : 0)
- && ep_index(qep) != 0) {
- return -EOPNOTSUPP;
- }
-
- usb_set_index(ep_index(qep));
-
- /* Return status on next IN token */
- switch (qep->ep_type) {
- case ep_control:
- val =
- (usb_read(qep->csr1) & EP0_SEND_STALL) ==
- EP0_SEND_STALL;
- break;
- case ep_bulk_in:
- case ep_interrupt:
- val =
- (usb_read(qep->csr1) & USB_IN_CSR1_SEND_STALL) ==
- USB_IN_CSR1_SEND_STALL;
- break;
- case ep_bulk_out:
- val =
- (usb_read(qep->csr1) & USB_OUT_CSR1_SEND_STALL) ==
- USB_OUT_CSR1_SEND_STALL;
- break;
- }
-
- /* Back to EP0 index */
- usb_set_index(0);
-
- DEBUG_SETUP("GET_STATUS, ep: %d (%x), val = %d\n", ep_num,
- ctrl->wIndex, val);
- } else {
- DEBUG_SETUP("Unknown REQ TYPE: %d\n", reqtype);
- return -EOPNOTSUPP;
- }
-
- /* Clear "out packet ready" */
- usb_set((EP0_CLR_OUT), USB_EP0_CSR);
- /* Put status to FIFO */
- lh7a40x_fifo_write(ep0, (u8 *) & val, sizeof(val));
- /* Issue "In packet ready" */
- usb_set((EP0_IN_PKT_RDY | EP0_DATA_END), USB_EP0_CSR);
-
- return 0;
-}
-
-/*
- * WAIT_FOR_SETUP (OUT_PKT_RDY)
- * - read data packet from EP0 FIFO
- * - decode command
- * - if error
- * set EP0_CLR_OUT | EP0_DATA_END | EP0_SEND_STALL bits
- * - else
- * set EP0_CLR_OUT | EP0_DATA_END bits
- */
-static void lh7a40x_ep0_setup(struct lh7a40x_udc *dev, u32 csr)
-{
- struct lh7a40x_ep *ep = &dev->ep[0];
- struct usb_ctrlrequest ctrl;
- int i, bytes, is_in;
-
- DEBUG_SETUP("%s: %x\n", __func__, csr);
-
- /* Nuke all previous transfers */
- nuke(ep, -EPROTO);
-
- /* read control req from fifo (8 bytes) */
- bytes = lh7a40x_fifo_read(ep, (unsigned char *)&ctrl, 8);
-
- DEBUG_SETUP("Read CTRL REQ %d bytes\n", bytes);
- DEBUG_SETUP("CTRL.bRequestType = %d (is_in %d)\n", ctrl.bRequestType,
- ctrl.bRequestType == USB_DIR_IN);
- DEBUG_SETUP("CTRL.bRequest = %d\n", ctrl.bRequest);
- DEBUG_SETUP("CTRL.wLength = %d\n", ctrl.wLength);
- DEBUG_SETUP("CTRL.wValue = %d (%d)\n", ctrl.wValue, ctrl.wValue >> 8);
- DEBUG_SETUP("CTRL.wIndex = %d\n", ctrl.wIndex);
-
- /* Set direction of EP0 */
- if (likely(ctrl.bRequestType & USB_DIR_IN)) {
- ep->bEndpointAddress |= USB_DIR_IN;
- is_in = 1;
- } else {
- ep->bEndpointAddress &= ~USB_DIR_IN;
- is_in = 0;
- }
-
- dev->req_pending = 1;
-
- /* Handle some SETUP packets ourselves */
- switch (ctrl.bRequest) {
- case USB_REQ_SET_ADDRESS:
- if (ctrl.bRequestType != (USB_TYPE_STANDARD | USB_RECIP_DEVICE))
- break;
-
- DEBUG_SETUP("USB_REQ_SET_ADDRESS (%d)\n", ctrl.wValue);
- udc_set_address(dev, ctrl.wValue);
- usb_set((EP0_CLR_OUT | EP0_DATA_END), USB_EP0_CSR);
- return;
-
- case USB_REQ_GET_STATUS:{
- if (lh7a40x_handle_get_status(dev, &ctrl) == 0)
- return;
-
- case USB_REQ_CLEAR_FEATURE:
- case USB_REQ_SET_FEATURE:
- if (ctrl.bRequestType == USB_RECIP_ENDPOINT) {
- struct lh7a40x_ep *qep;
- int ep_num = (ctrl.wIndex & 0x0f);
-
- /* Support only HALT feature */
- if (ctrl.wValue != 0 || ctrl.wLength != 0
- || ep_num > 3 || ep_num < 1)
- break;
-
- qep = &dev->ep[ep_num];
- spin_unlock(&dev->lock);
- if (ctrl.bRequest == USB_REQ_SET_FEATURE) {
- DEBUG_SETUP("SET_FEATURE (%d)\n",
- ep_num);
- lh7a40x_set_halt(&qep->ep, 1);
- } else {
- DEBUG_SETUP("CLR_FEATURE (%d)\n",
- ep_num);
- lh7a40x_set_halt(&qep->ep, 0);
- }
- spin_lock(&dev->lock);
- usb_set_index(0);
-
- /* Reply with a ZLP on next IN token */
- usb_set((EP0_CLR_OUT | EP0_DATA_END),
- USB_EP0_CSR);
- return;
- }
- break;
- }
-
- default:
- break;
- }
-
- if (likely(dev->driver)) {
- /* device-2-host (IN) or no data setup command, process immediately */
- spin_unlock(&dev->lock);
- i = dev->driver->setup(&dev->gadget, &ctrl);
- spin_lock(&dev->lock);
-
- if (i < 0) {
- /* setup processing failed, force stall */
- DEBUG_SETUP
- (" --> ERROR: gadget setup FAILED (stalling), setup returned %d\n",
- i);
- usb_set_index(0);
- usb_set((EP0_CLR_OUT | EP0_DATA_END | EP0_SEND_STALL),
- USB_EP0_CSR);
-
- /* ep->stopped = 1; */
- dev->ep0state = WAIT_FOR_SETUP;
- }
- }
-}
-
-/*
- * DATA_STATE_NEED_ZLP
- */
-static void lh7a40x_ep0_in_zlp(struct lh7a40x_udc *dev, u32 csr)
-{
- DEBUG_EP0("%s: %x\n", __func__, csr);
-
- /* c.f. Table 15-14 */
- usb_set((EP0_IN_PKT_RDY | EP0_DATA_END), USB_EP0_CSR);
- dev->ep0state = WAIT_FOR_SETUP;
-}
-
-/*
- * handle ep0 interrupt
- */
-static void lh7a40x_handle_ep0(struct lh7a40x_udc *dev, u32 intr)
-{
- struct lh7a40x_ep *ep = &dev->ep[0];
- u32 csr;
-
- /* Set index 0 */
- usb_set_index(0);
- csr = usb_read(USB_EP0_CSR);
-
- DEBUG_EP0("%s: csr = %x\n", __func__, csr);
-
- /*
- * For overview of what we should be doing see c.f. Chapter 18.1.2.4
- * We will follow that outline here modified by our own global state
- * indication which provides hints as to what we think should be
- * happening..
- */
-
- /*
- * if SENT_STALL is set
- * - clear the SENT_STALL bit
- */
- if (csr & EP0_SENT_STALL) {
- DEBUG_EP0("%s: EP0_SENT_STALL is set: %x\n", __func__, csr);
- usb_clear((EP0_SENT_STALL | EP0_SEND_STALL), USB_EP0_CSR);
- nuke(ep, -ECONNABORTED);
- dev->ep0state = WAIT_FOR_SETUP;
- return;
- }
-
- /*
- * if a transfer is in progress && IN_PKT_RDY and OUT_PKT_RDY are clear
- * - fill EP0 FIFO
- * - if last packet
- * - set IN_PKT_RDY | DATA_END
- * - else
- * set IN_PKT_RDY
- */
- if (!(csr & (EP0_IN_PKT_RDY | EP0_OUT_PKT_RDY))) {
- DEBUG_EP0("%s: IN_PKT_RDY and OUT_PKT_RDY are clear\n",
- __func__);
-
- switch (dev->ep0state) {
- case DATA_STATE_XMIT:
- DEBUG_EP0("continue with DATA_STATE_XMIT\n");
- lh7a40x_ep0_in(dev, csr);
- return;
- case DATA_STATE_NEED_ZLP:
- DEBUG_EP0("continue with DATA_STATE_NEED_ZLP\n");
- lh7a40x_ep0_in_zlp(dev, csr);
- return;
- default:
- /* Stall? */
- DEBUG_EP0("Odd state!! state = %s\n",
- state_names[dev->ep0state]);
- dev->ep0state = WAIT_FOR_SETUP;
- /* nuke(ep, 0); */
- /* usb_set(EP0_SEND_STALL, ep->csr1); */
- break;
- }
- }
-
- /*
- * if SETUP_END is set
- * - abort the last transfer
- * - set SERVICED_SETUP_END_BIT
- */
- if (csr & EP0_SETUP_END) {
- DEBUG_EP0("%s: EP0_SETUP_END is set: %x\n", __func__, csr);
-
- usb_set(EP0_CLR_SETUP_END, USB_EP0_CSR);
-
- nuke(ep, 0);
- dev->ep0state = WAIT_FOR_SETUP;
- }
-
- /*
- * if EP0_OUT_PKT_RDY is set
- * - read data packet from EP0 FIFO
- * - decode command
- * - if error
- * set SERVICED_OUT_PKT_RDY | DATA_END bits | SEND_STALL
- * - else
- * set SERVICED_OUT_PKT_RDY | DATA_END bits
- */
- if (csr & EP0_OUT_PKT_RDY) {
-
- DEBUG_EP0("%s: EP0_OUT_PKT_RDY is set: %x\n", __func__,
- csr);
-
- switch (dev->ep0state) {
- case WAIT_FOR_SETUP:
- DEBUG_EP0("WAIT_FOR_SETUP\n");
- lh7a40x_ep0_setup(dev, csr);
- break;
-
- case DATA_STATE_RECV:
- DEBUG_EP0("DATA_STATE_RECV\n");
- lh7a40x_ep0_out(dev, csr);
- break;
-
- default:
- /* send stall? */
- DEBUG_EP0("strange state!! 2. send stall? state = %d\n",
- dev->ep0state);
- break;
- }
- }
-}
-
-static void lh7a40x_ep0_kick(struct lh7a40x_udc *dev, struct lh7a40x_ep *ep)
-{
- u32 csr;
-
- usb_set_index(0);
- csr = usb_read(USB_EP0_CSR);
-
- DEBUG_EP0("%s: %x\n", __func__, csr);
-
- /* Clear "out packet ready" */
- usb_set(EP0_CLR_OUT, USB_EP0_CSR);
-
- if (ep_is_in(ep)) {
- dev->ep0state = DATA_STATE_XMIT;
- lh7a40x_ep0_in(dev, csr);
- } else {
- dev->ep0state = DATA_STATE_RECV;
- lh7a40x_ep0_out(dev, csr);
- }
-}
-
-/* ---------------------------------------------------------------------------
- * device-scoped parts of the api to the usb controller hardware
- * ---------------------------------------------------------------------------
- */
-
-static int lh7a40x_udc_get_frame(struct usb_gadget *_gadget)
-{
- u32 frame1 = usb_read(USB_FRM_NUM1); /* Least significant 8 bits */
- u32 frame2 = usb_read(USB_FRM_NUM2); /* Most significant 3 bits */
- DEBUG("%s, %p\n", __func__, _gadget);
- return ((frame2 & 0x07) << 8) | (frame1 & 0xff);
-}
-
-static int lh7a40x_udc_wakeup(struct usb_gadget *_gadget)
-{
- /* host may not have enabled remote wakeup */
- /*if ((UDCCS0 & UDCCS0_DRWF) == 0)
- return -EHOSTUNREACH;
- udc_set_mask_UDCCR(UDCCR_RSM); */
- return -ENOTSUPP;
-}
-
-static const struct usb_gadget_ops lh7a40x_udc_ops = {
- .get_frame = lh7a40x_udc_get_frame,
- .wakeup = lh7a40x_udc_wakeup,
- /* current versions must always be self-powered */
-};
-
-static void nop_release(struct device *dev)
-{
- DEBUG("%s %s\n", __func__, dev_name(dev));
-}
-
-static struct lh7a40x_udc memory = {
- .usb_address = 0,
-
- .gadget = {
- .ops = &lh7a40x_udc_ops,
- .ep0 = &memory.ep[0].ep,
- .name = driver_name,
- .dev = {
- .init_name = "gadget",
- .release = nop_release,
- },
- },
-
- /* control endpoint */
- .ep[0] = {
- .ep = {
- .name = ep0name,
- .ops = &lh7a40x_ep_ops,
- .maxpacket = EP0_PACKETSIZE,
- },
- .dev = &memory,
-
- .bEndpointAddress = 0,
- .bmAttributes = 0,
-
- .ep_type = ep_control,
- .fifo = io_p2v(USB_EP0_FIFO),
- .csr1 = USB_EP0_CSR,
- .csr2 = USB_EP0_CSR,
- },
-
- /* first group of endpoints */
- .ep[1] = {
- .ep = {
- .name = "ep1in-bulk",
- .ops = &lh7a40x_ep_ops,
- .maxpacket = 64,
- },
- .dev = &memory,
-
- .bEndpointAddress = USB_DIR_IN | 1,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
-
- .ep_type = ep_bulk_in,
- .fifo = io_p2v(USB_EP1_FIFO),
- .csr1 = USB_IN_CSR1,
- .csr2 = USB_IN_CSR2,
- },
-
- .ep[2] = {
- .ep = {
- .name = "ep2out-bulk",
- .ops = &lh7a40x_ep_ops,
- .maxpacket = 64,
- },
- .dev = &memory,
-
- .bEndpointAddress = 2,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
-
- .ep_type = ep_bulk_out,
- .fifo = io_p2v(USB_EP2_FIFO),
- .csr1 = USB_OUT_CSR1,
- .csr2 = USB_OUT_CSR2,
- },
-
- .ep[3] = {
- .ep = {
- .name = "ep3in-int",
- .ops = &lh7a40x_ep_ops,
- .maxpacket = 64,
- },
- .dev = &memory,
-
- .bEndpointAddress = USB_DIR_IN | 3,
- .bmAttributes = USB_ENDPOINT_XFER_INT,
-
- .ep_type = ep_interrupt,
- .fifo = io_p2v(USB_EP3_FIFO),
- .csr1 = USB_IN_CSR1,
- .csr2 = USB_IN_CSR2,
- },
-};
-
-/*
- * probe - binds to the platform device
- */
-static int lh7a40x_udc_probe(struct platform_device *pdev)
-{
- struct lh7a40x_udc *dev = &memory;
- int retval;
-
- DEBUG("%s: %p\n", __func__, pdev);
-
- spin_lock_init(&dev->lock);
- dev->dev = &pdev->dev;
-
- device_initialize(&dev->gadget.dev);
- dev->gadget.dev.parent = &pdev->dev;
-
- the_controller = dev;
- platform_set_drvdata(pdev, dev);
-
- udc_disable(dev);
- udc_reinit(dev);
-
- /* irq setup after old hardware state is cleaned up */
- retval =
- request_irq(IRQ_USBINTR, lh7a40x_udc_irq, IRQF_DISABLED, driver_name,
- dev);
- if (retval != 0) {
- DEBUG(KERN_ERR "%s: can't get irq %i, err %d\n", driver_name,
- IRQ_USBINTR, retval);
- return -EBUSY;
- }
-
- create_proc_files();
-
- return retval;
-}
-
-static int lh7a40x_udc_remove(struct platform_device *pdev)
-{
- struct lh7a40x_udc *dev = platform_get_drvdata(pdev);
-
- DEBUG("%s: %p\n", __func__, pdev);
-
- if (dev->driver)
- return -EBUSY;
-
- udc_disable(dev);
- remove_proc_files();
-
- free_irq(IRQ_USBINTR, dev);
-
- platform_set_drvdata(pdev, 0);
-
- the_controller = 0;
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-
-static struct platform_driver udc_driver = {
- .probe = lh7a40x_udc_probe,
- .remove = lh7a40x_udc_remove,
- /* FIXME power management support */
- /* .suspend = ... disable UDC */
- /* .resume = ... re-enable UDC */
- .driver = {
- .name = (char *)driver_name,
- .owner = THIS_MODULE,
- },
-};
-
-static int __init udc_init(void)
-{
- DEBUG("%s: %s version %s\n", __func__, driver_name, DRIVER_VERSION);
- return platform_driver_register(&udc_driver);
-}
-
-static void __exit udc_exit(void)
-{
- platform_driver_unregister(&udc_driver);
-}
-
-module_init(udc_init);
-module_exit(udc_exit);
-
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_AUTHOR("Mikko Lahteenmaki, Bo Henriksen");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:lh7a40x_udc");
diff --git a/drivers/usb/gadget/lh7a40x_udc.h b/drivers/usb/gadget/lh7a40x_udc.h
deleted file mode 100644
index ca861203a301..000000000000
--- a/drivers/usb/gadget/lh7a40x_udc.h
+++ /dev/null
@@ -1,259 +0,0 @@
-/*
- * linux/drivers/usb/gadget/lh7a40x_udc.h
- * Sharp LH7A40x on-chip full speed USB device controllers
- *
- * Copyright (C) 2004 Mikko Lahteenmaki, Nordic ID
- * Copyright (C) 2004 Bo Henriksen, Nordic ID
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#ifndef __LH7A40X_H_
-#define __LH7A40X_H_
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/timer.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
-#include <linux/proc_fs.h>
-#include <linux/mm.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-
-#include <asm/byteorder.h>
-#include <asm/dma.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/system.h>
-#include <asm/unaligned.h>
-#include <mach/hardware.h>
-
-#include <linux/usb/ch9.h>
-#include <linux/usb/gadget.h>
-
-/*
- * Memory map
- */
-
-#define USB_FA 0x80000200 // function address register
-#define USB_PM 0x80000204 // power management register
-
-#define USB_IN_INT 0x80000208 // IN interrupt register bank (EP0-EP3)
-#define USB_OUT_INT 0x80000210 // OUT interrupt register bank (EP2)
-#define USB_INT 0x80000218 // interrupt register bank
-
-#define USB_IN_INT_EN 0x8000021C // IN interrupt enable register bank
-#define USB_OUT_INT_EN 0x80000224 // OUT interrupt enable register bank
-#define USB_INT_EN 0x8000022C // USB interrupt enable register bank
-
-#define USB_FRM_NUM1 0x80000230 // Frame number1 register
-#define USB_FRM_NUM2 0x80000234 // Frame number2 register
-#define USB_INDEX 0x80000238 // index register
-
-#define USB_IN_MAXP 0x80000240 // IN MAXP register
-#define USB_IN_CSR1 0x80000244 // IN CSR1 register/EP0 CSR register
-#define USB_EP0_CSR 0x80000244 // IN CSR1 register/EP0 CSR register
-#define USB_IN_CSR2 0x80000248 // IN CSR2 register
-#define USB_OUT_MAXP 0x8000024C // OUT MAXP register
-
-#define USB_OUT_CSR1 0x80000250 // OUT CSR1 register
-#define USB_OUT_CSR2 0x80000254 // OUT CSR2 register
-#define USB_OUT_FIFO_WC1 0x80000258 // OUT FIFO write count1 register
-#define USB_OUT_FIFO_WC2 0x8000025C // OUT FIFO write count2 register
-
-#define USB_RESET 0x8000044C // USB reset register
-
-#define USB_EP0_FIFO 0x80000280
-#define USB_EP1_FIFO 0x80000284
-#define USB_EP2_FIFO 0x80000288
-#define USB_EP3_FIFO 0x8000028c
-
-/*
- * USB reset register
- */
-#define USB_RESET_APB (1<<1) //resets USB APB control side WRITE
-#define USB_RESET_IO (1<<0) //resets USB IO side WRITE
-
-/*
- * USB function address register
- */
-#define USB_FA_ADDR_UPDATE (1<<7)
-#define USB_FA_FUNCTION_ADDR (0x7F)
-
-/*
- * Power Management register
- */
-#define PM_USB_DCP (1<<5)
-#define PM_USB_ENABLE (1<<4)
-#define PM_USB_RESET (1<<3)
-#define PM_UC_RESUME (1<<2)
-#define PM_SUSPEND_MODE (1<<1)
-#define PM_ENABLE_SUSPEND (1<<0)
-
-/*
- * IN interrupt register
- */
-#define USB_IN_INT_EP3 (1<<3)
-#define USB_IN_INT_EP1 (1<<1)
-#define USB_IN_INT_EP0 (1<<0)
-
-/*
- * OUT interrupt register
- */
-#define USB_OUT_INT_EP2 (1<<2)
-
-/*
- * USB interrupt register
- */
-#define USB_INT_RESET_INT (1<<2)
-#define USB_INT_RESUME_INT (1<<1)
-#define USB_INT_SUSPEND_INT (1<<0)
-
-/*
- * USB interrupt enable register
- */
-#define USB_INT_EN_USB_RESET_INTER (1<<2)
-#define USB_INT_EN_RESUME_INTER (1<<1)
-#define USB_INT_EN_SUSPEND_INTER (1<<0)
-
-/*
- * INCSR1 register
- */
-#define USB_IN_CSR1_CLR_DATA_TOGGLE (1<<6)
-#define USB_IN_CSR1_SENT_STALL (1<<5)
-#define USB_IN_CSR1_SEND_STALL (1<<4)
-#define USB_IN_CSR1_FIFO_FLUSH (1<<3)
-#define USB_IN_CSR1_FIFO_NOT_EMPTY (1<<1)
-#define USB_IN_CSR1_IN_PKT_RDY (1<<0)
-
-/*
- * INCSR2 register
- */
-#define USB_IN_CSR2_AUTO_SET (1<<7)
-#define USB_IN_CSR2_USB_DMA_EN (1<<4)
-
-/*
- * OUT CSR1 register
- */
-#define USB_OUT_CSR1_CLR_DATA_REG (1<<7)
-#define USB_OUT_CSR1_SENT_STALL (1<<6)
-#define USB_OUT_CSR1_SEND_STALL (1<<5)
-#define USB_OUT_CSR1_FIFO_FLUSH (1<<4)
-#define USB_OUT_CSR1_FIFO_FULL (1<<1)
-#define USB_OUT_CSR1_OUT_PKT_RDY (1<<0)
-
-/*
- * OUT CSR2 register
- */
-#define USB_OUT_CSR2_AUTO_CLR (1<<7)
-#define USB_OUT_CSR2_USB_DMA_EN (1<<4)
-
-/*
- * EP0 CSR
- */
-#define EP0_CLR_SETUP_END (1<<7) /* Clear "Setup Ends" Bit (w) */
-#define EP0_CLR_OUT (1<<6) /* Clear "Out packet ready" Bit (w) */
-#define EP0_SEND_STALL (1<<5) /* Send STALL Handshake (rw) */
-#define EP0_SETUP_END (1<<4) /* Setup Ends (r) */
-
-#define EP0_DATA_END (1<<3) /* Data end (rw) */
-#define EP0_SENT_STALL (1<<2) /* Sent Stall Handshake (r) */
-#define EP0_IN_PKT_RDY (1<<1) /* In packet ready (rw) */
-#define EP0_OUT_PKT_RDY (1<<0) /* Out packet ready (r) */
-
-/* general CSR */
-#define OUT_PKT_RDY (1<<0)
-#define IN_PKT_RDY (1<<0)
-
-/*
- * IN/OUT MAXP register
- */
-#define USB_OUT_MAXP_MAXP (0xF)
-#define USB_IN_MAXP_MAXP (0xF)
-
-// Max packet size
-//#define EP0_PACKETSIZE 0x10
-#define EP0_PACKETSIZE 0x8
-#define EP0_MAXPACKETSIZE 0x10
-
-#define UDC_MAX_ENDPOINTS 4
-
-#define WAIT_FOR_SETUP 0
-#define DATA_STATE_XMIT 1
-#define DATA_STATE_NEED_ZLP 2
-#define WAIT_FOR_OUT_STATUS 3
-#define DATA_STATE_RECV 4
-
-/* ********************************************************************************************* */
-/* IO
- */
-
-typedef enum ep_type {
- ep_control, ep_bulk_in, ep_bulk_out, ep_interrupt
-} ep_type_t;
-
-struct lh7a40x_ep {
- struct usb_ep ep;
- struct lh7a40x_udc *dev;
-
- const struct usb_endpoint_descriptor *desc;
- struct list_head queue;
- unsigned long pio_irqs;
-
- u8 stopped;
- u8 bEndpointAddress;
- u8 bmAttributes;
-
- ep_type_t ep_type;
- u32 fifo;
- u32 csr1;
- u32 csr2;
-};
-
-struct lh7a40x_request {
- struct usb_request req;
- struct list_head queue;
-};
-
-struct lh7a40x_udc {
- struct usb_gadget gadget;
- struct usb_gadget_driver *driver;
- struct device *dev;
- spinlock_t lock;
-
- int ep0state;
- struct lh7a40x_ep ep[UDC_MAX_ENDPOINTS];
-
- unsigned char usb_address;
-
- unsigned req_pending:1, req_std:1, req_config:1;
-};
-
-extern struct lh7a40x_udc *the_controller;
-
-#define ep_is_in(EP) (((EP)->bEndpointAddress&USB_DIR_IN)==USB_DIR_IN)
-#define ep_index(EP) ((EP)->bEndpointAddress&0xF)
-#define ep_maxpacket(EP) ((EP)->ep.maxpacket)
-
-#endif
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 74dcf49bd015..c4de2d75a3d7 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1306,24 +1306,24 @@ static int __init ehci_hcd_init(void)
#endif
#ifdef OF_PLATFORM_DRIVER
- retval = of_register_platform_driver(&OF_PLATFORM_DRIVER);
+ retval = platform_driver_register(&OF_PLATFORM_DRIVER);
if (retval < 0)
goto clean3;
#endif
#ifdef XILINX_OF_PLATFORM_DRIVER
- retval = of_register_platform_driver(&XILINX_OF_PLATFORM_DRIVER);
+ retval = platform_driver_register(&XILINX_OF_PLATFORM_DRIVER);
if (retval < 0)
goto clean4;
#endif
return retval;
#ifdef XILINX_OF_PLATFORM_DRIVER
- /* of_unregister_platform_driver(&XILINX_OF_PLATFORM_DRIVER); */
+ /* platform_driver_unregister(&XILINX_OF_PLATFORM_DRIVER); */
clean4:
#endif
#ifdef OF_PLATFORM_DRIVER
- of_unregister_platform_driver(&OF_PLATFORM_DRIVER);
+ platform_driver_unregister(&OF_PLATFORM_DRIVER);
clean3:
#endif
#ifdef PS3_SYSTEM_BUS_DRIVER
@@ -1351,10 +1351,10 @@ module_init(ehci_hcd_init);
static void __exit ehci_hcd_cleanup(void)
{
#ifdef XILINX_OF_PLATFORM_DRIVER
- of_unregister_platform_driver(&XILINX_OF_PLATFORM_DRIVER);
+ platform_driver_unregister(&XILINX_OF_PLATFORM_DRIVER);
#endif
#ifdef OF_PLATFORM_DRIVER
- of_unregister_platform_driver(&OF_PLATFORM_DRIVER);
+ platform_driver_unregister(&OF_PLATFORM_DRIVER);
#endif
#ifdef PLATFORM_DRIVER
platform_driver_unregister(&PLATFORM_DRIVER);
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index c8e360d7d975..25c8c10bb689 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -203,11 +203,6 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
mdelay(10);
}
- /* setup specific usb hw */
- ret = mxc_initialize_usb_hw(pdev->id, pdata->flags);
- if (ret < 0)
- goto err_init;
-
ehci = hcd_to_ehci(hcd);
/* EHCI registers start at offset 0x100 */
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index ba52be473027..1f09f253697e 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -105,8 +105,7 @@ ppc44x_enable_bmt(struct device_node *dn)
}
-static int __devinit
-ehci_hcd_ppc_of_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit ehci_hcd_ppc_of_probe(struct platform_device *op)
{
struct device_node *dn = op->dev.of_node;
struct usb_hcd *hcd;
@@ -255,14 +254,12 @@ static int ehci_hcd_ppc_of_remove(struct platform_device *op)
}
-static int ehci_hcd_ppc_of_shutdown(struct platform_device *op)
+static void ehci_hcd_ppc_of_shutdown(struct platform_device *op)
{
struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
if (hcd->driver->shutdown)
hcd->driver->shutdown(hcd);
-
- return 0;
}
@@ -275,7 +272,7 @@ static const struct of_device_id ehci_hcd_ppc_of_match[] = {
MODULE_DEVICE_TABLE(of, ehci_hcd_ppc_of_match);
-static struct of_platform_driver ehci_hcd_ppc_of_driver = {
+static struct platform_driver ehci_hcd_ppc_of_driver = {
.probe = ehci_hcd_ppc_of_probe,
.remove = ehci_hcd_ppc_of_remove,
.shutdown = ehci_hcd_ppc_of_shutdown,
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index e8f4f36fdf0b..effc58d7af8b 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -29,6 +29,7 @@
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/of_address.h>
/**
* ehci_xilinx_of_setup - Initialize the device for ehci_reset()
@@ -142,15 +143,13 @@ static const struct hc_driver ehci_xilinx_of_hc_driver = {
/**
* ehci_hcd_xilinx_of_probe - Probe method for the USB host controller
* @op: pointer to the platform_device bound to the host controller
- * @match: pointer to of_device_id structure, not used
*
* This function requests resources and sets up appropriate properties for the
* host controller. Because the Xilinx USB host controller can be configured
* as HS only or HS/FS only, it checks the configuration in the device tree
* entry, and sets an appropriate value for hcd->has_tt.
*/
-static int __devinit
-ehci_hcd_xilinx_of_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit ehci_hcd_xilinx_of_probe(struct platform_device *op)
{
struct device_node *dn = op->dev.of_node;
struct usb_hcd *hcd;
@@ -288,7 +287,7 @@ static const struct of_device_id ehci_hcd_xilinx_of_match[] = {
};
MODULE_DEVICE_TABLE(of, ehci_hcd_xilinx_of_match);
-static struct of_platform_driver ehci_hcd_xilinx_of_driver = {
+static struct platform_driver ehci_hcd_xilinx_of_driver = {
.probe = ehci_hcd_xilinx_of_probe,
.remove = ehci_hcd_xilinx_of_remove,
.shutdown = ehci_hcd_xilinx_of_shutdown,
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 12fd184226f2..b84ff7e51896 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -561,8 +561,7 @@ static const struct hc_driver fhci_driver = {
.hub_control = fhci_hub_control,
};
-static int __devinit of_fhci_probe(struct platform_device *ofdev,
- const struct of_device_id *ofid)
+static int __devinit of_fhci_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct device_node *node = dev->of_node;
@@ -812,7 +811,7 @@ static const struct of_device_id of_fhci_match[] = {
};
MODULE_DEVICE_TABLE(of, of_fhci_match);
-static struct of_platform_driver of_fhci_driver = {
+static struct platform_driver of_fhci_driver = {
.driver = {
.name = "fsl,usb-fhci",
.owner = THIS_MODULE,
@@ -824,13 +823,13 @@ static struct of_platform_driver of_fhci_driver = {
static int __init fhci_module_init(void)
{
- return of_register_platform_driver(&of_fhci_driver);
+ return platform_driver_register(&of_fhci_driver);
}
module_init(fhci_module_init);
static void __exit fhci_module_exit(void)
{
- of_unregister_platform_driver(&of_fhci_driver);
+ platform_driver_unregister(&of_fhci_driver);
}
module_exit(fhci_module_exit);
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index 3b28dbfca058..7ee30056f373 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -27,8 +27,7 @@
#endif
#ifdef CONFIG_PPC_OF
-static int of_isp1760_probe(struct platform_device *dev,
- const struct of_device_id *match)
+static int of_isp1760_probe(struct platform_device *dev)
{
struct usb_hcd *hcd;
struct device_node *dp = dev->dev.of_node;
@@ -119,7 +118,7 @@ static const struct of_device_id of_isp1760_match[] = {
};
MODULE_DEVICE_TABLE(of, of_isp1760_match);
-static struct of_platform_driver isp1760_of_driver = {
+static struct platform_driver isp1760_of_driver = {
.driver = {
.name = "nxp-isp1760",
.owner = THIS_MODULE,
@@ -398,7 +397,7 @@ static int __init isp1760_init(void)
if (!ret)
any_ret = 0;
#ifdef CONFIG_PPC_OF
- ret = of_register_platform_driver(&isp1760_of_driver);
+ ret = platform_driver_register(&isp1760_of_driver);
if (!ret)
any_ret = 0;
#endif
@@ -418,7 +417,7 @@ static void __exit isp1760_exit(void)
{
platform_driver_unregister(&isp1760_plat_driver);
#ifdef CONFIG_PPC_OF
- of_unregister_platform_driver(&isp1760_of_driver);
+ platform_driver_unregister(&isp1760_of_driver);
#endif
#ifdef CONFIG_PCI
pci_unregister_driver(&isp1761_pci_driver);
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 759a12ff8048..2be257efb01a 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1023,11 +1023,6 @@ MODULE_LICENSE ("GPL");
#define OMAP3_PLATFORM_DRIVER ohci_hcd_omap3_driver
#endif
-#ifdef CONFIG_ARCH_LH7A404
-#include "ohci-lh7a404.c"
-#define PLATFORM_DRIVER ohci_hcd_lh7a404_driver
-#endif
-
#if defined(CONFIG_PXA27x) || defined(CONFIG_PXA3xx)
#include "ohci-pxa27x.c"
#define PLATFORM_DRIVER ohci_hcd_pxa27x_driver
@@ -1180,7 +1175,7 @@ static int __init ohci_hcd_mod_init(void)
#endif
#ifdef OF_PLATFORM_DRIVER
- retval = of_register_platform_driver(&OF_PLATFORM_DRIVER);
+ retval = platform_driver_register(&OF_PLATFORM_DRIVER);
if (retval < 0)
goto error_of_platform;
#endif
@@ -1239,7 +1234,7 @@ static int __init ohci_hcd_mod_init(void)
error_sa1111:
#endif
#ifdef OF_PLATFORM_DRIVER
- of_unregister_platform_driver(&OF_PLATFORM_DRIVER);
+ platform_driver_unregister(&OF_PLATFORM_DRIVER);
error_of_platform:
#endif
#ifdef PLATFORM_DRIVER
@@ -1287,7 +1282,7 @@ static void __exit ohci_hcd_mod_exit(void)
sa1111_driver_unregister(&SA1111_DRIVER);
#endif
#ifdef OF_PLATFORM_DRIVER
- of_unregister_platform_driver(&OF_PLATFORM_DRIVER);
+ platform_driver_unregister(&OF_PLATFORM_DRIVER);
#endif
#ifdef PLATFORM_DRIVER
platform_driver_unregister(&PLATFORM_DRIVER);
diff --git a/drivers/usb/host/ohci-lh7a404.c b/drivers/usb/host/ohci-lh7a404.c
deleted file mode 100644
index 18d39f0463ee..000000000000
--- a/drivers/usb/host/ohci-lh7a404.c
+++ /dev/null
@@ -1,252 +0,0 @@
-/*
- * OHCI HCD (Host Controller Driver) for USB.
- *
- * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
- * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
- * (C) Copyright 2002 Hewlett-Packard Company
- *
- * Bus Glue for Sharp LH7A404
- *
- * Written by Christopher Hoover <ch@hpl.hp.com>
- * Based on fragments of previous driver by Russell King et al.
- *
- * Modified for LH7A404 from ohci-sa1111.c
- * by Durgesh Pattamatta <pattamattad@sharpsec.com>
- *
- * This file is licenced under the GPL.
- */
-
-#include <linux/platform_device.h>
-#include <linux/signal.h>
-
-#include <mach/hardware.h>
-
-
-extern int usb_disabled(void);
-
-/*-------------------------------------------------------------------------*/
-
-static void lh7a404_start_hc(struct platform_device *dev)
-{
- printk(KERN_DEBUG "%s: starting LH7A404 OHCI USB Controller\n",
- __FILE__);
-
- /*
- * Now, carefully enable the USB clock, and take
- * the USB host controller out of reset.
- */
- CSC_PWRCNT |= CSC_PWRCNT_USBH_EN; /* Enable clock */
- udelay(1000);
- USBH_CMDSTATUS = OHCI_HCR;
-
- printk(KERN_DEBUG "%s: Clock to USB host has been enabled \n", __FILE__);
-}
-
-static void lh7a404_stop_hc(struct platform_device *dev)
-{
- printk(KERN_DEBUG "%s: stopping LH7A404 OHCI USB Controller\n",
- __FILE__);
-
- CSC_PWRCNT &= ~CSC_PWRCNT_USBH_EN; /* Disable clock */
-}
-
-
-/*-------------------------------------------------------------------------*/
-
-/* configure so an HC device and id are always provided */
-/* always called with process context; sleeping is OK */
-
-
-/**
- * usb_hcd_lh7a404_probe - initialize LH7A404-based HCDs
- * Context: !in_interrupt()
- *
- * Allocates basic resources for this USB host controller, and
- * then invokes the start() method for the HCD associated with it
- * through the hotplug entry's driver_data.
- *
- */
-int usb_hcd_lh7a404_probe (const struct hc_driver *driver,
- struct platform_device *dev)
-{
- int retval;
- struct usb_hcd *hcd;
-
- if (dev->resource[1].flags != IORESOURCE_IRQ) {
- pr_debug("resource[1] is not IORESOURCE_IRQ");
- return -ENOMEM;
- }
-
- hcd = usb_create_hcd(driver, &dev->dev, "lh7a404");
- if (!hcd)
- return -ENOMEM;
- hcd->rsrc_start = dev->resource[0].start;
- hcd->rsrc_len = dev->resource[0].end - dev->resource[0].start + 1;
-
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- pr_debug("request_mem_region failed");
- retval = -EBUSY;
- goto err1;
- }
-
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs) {
- pr_debug("ioremap failed");
- retval = -ENOMEM;
- goto err2;
- }
-
- lh7a404_start_hc(dev);
- ohci_hcd_init(hcd_to_ohci(hcd));
-
- retval = usb_add_hcd(hcd, dev->resource[1].start, IRQF_DISABLED);
- if (retval == 0)
- return retval;
-
- lh7a404_stop_hc(dev);
- iounmap(hcd->regs);
- err2:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
- err1:
- usb_put_hcd(hcd);
- return retval;
-}
-
-
-/* may be called without controller electrically present */
-/* may be called with controller, bus, and devices active */
-
-/**
- * usb_hcd_lh7a404_remove - shutdown processing for LH7A404-based HCDs
- * @dev: USB Host Controller being removed
- * Context: !in_interrupt()
- *
- * Reverses the effect of usb_hcd_lh7a404_probe(), first invoking
- * the HCD's stop() method. It is always called from a thread
- * context, normally "rmmod", "apmd", or something similar.
- *
- */
-void usb_hcd_lh7a404_remove (struct usb_hcd *hcd, struct platform_device *dev)
-{
- usb_remove_hcd(hcd);
- lh7a404_stop_hc(dev);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
- usb_put_hcd(hcd);
-}
-
-/*-------------------------------------------------------------------------*/
-
-static int __devinit
-ohci_lh7a404_start (struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci (hcd);
- int ret;
-
- ohci_dbg (ohci, "ohci_lh7a404_start, ohci:%p", ohci);
- if ((ret = ohci_init(ohci)) < 0)
- return ret;
-
- if ((ret = ohci_run (ohci)) < 0) {
- err ("can't start %s", hcd->self.bus_name);
- ohci_stop (hcd);
- return ret;
- }
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-
-static const struct hc_driver ohci_lh7a404_hc_driver = {
- .description = hcd_name,
- .product_desc = "LH7A404 OHCI",
- .hcd_priv_size = sizeof(struct ohci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ohci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
-
- /*
- * basic lifecycle operations
- */
- .start = ohci_lh7a404_start,
- .stop = ohci_stop,
- .shutdown = ohci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
-
- /*
- * scheduling support
- */
- .get_frame_number = ohci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ohci_hub_status_data,
- .hub_control = ohci_hub_control,
-#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
-#endif
- .start_port_reset = ohci_start_port_reset,
-};
-
-/*-------------------------------------------------------------------------*/
-
-static int ohci_hcd_lh7a404_drv_probe(struct platform_device *pdev)
-{
- int ret;
-
- pr_debug ("In ohci_hcd_lh7a404_drv_probe");
-
- if (usb_disabled())
- return -ENODEV;
-
- ret = usb_hcd_lh7a404_probe(&ohci_lh7a404_hc_driver, pdev);
- return ret;
-}
-
-static int ohci_hcd_lh7a404_drv_remove(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
- usb_hcd_lh7a404_remove(hcd, pdev);
- return 0;
-}
- /*TBD*/
-/*static int ohci_hcd_lh7a404_drv_suspend(struct platform_device *dev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(dev);
-
- return 0;
-}
-static int ohci_hcd_lh7a404_drv_resume(struct platform_device *dev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(dev);
-
-
- return 0;
-}
-*/
-
-static struct platform_driver ohci_hcd_lh7a404_driver = {
- .probe = ohci_hcd_lh7a404_drv_probe,
- .remove = ohci_hcd_lh7a404_drv_remove,
- .shutdown = usb_hcd_platform_shutdown,
- /*.suspend = ohci_hcd_lh7a404_drv_suspend, */
- /*.resume = ohci_hcd_lh7a404_drv_resume, */
- .driver = {
- .name = "lh7a404-ohci",
- .owner = THIS_MODULE,
- },
-};
-
-MODULE_ALIAS("platform:lh7a404-ohci");
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index b2c2dbf08766..1ca1821320f4 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -80,8 +80,7 @@ static const struct hc_driver ohci_ppc_of_hc_driver = {
};
-static int __devinit
-ohci_hcd_ppc_of_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit ohci_hcd_ppc_of_probe(struct platform_device *op)
{
struct device_node *dn = op->dev.of_node;
struct usb_hcd *hcd;
@@ -201,14 +200,12 @@ static int ohci_hcd_ppc_of_remove(struct platform_device *op)
return 0;
}
-static int ohci_hcd_ppc_of_shutdown(struct platform_device *op)
+static void ohci_hcd_ppc_of_shutdown(struct platform_device *op)
{
struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
if (hcd->driver->shutdown)
hcd->driver->shutdown(hcd);
-
- return 0;
}
@@ -243,7 +240,7 @@ MODULE_DEVICE_TABLE(of, ohci_hcd_ppc_of_match);
#endif
-static struct of_platform_driver ohci_hcd_ppc_of_driver = {
+static struct platform_driver ohci_hcd_ppc_of_driver = {
.probe = ohci_hcd_ppc_of_probe,
.remove = ohci_hcd_ppc_of_remove,
.shutdown = ohci_hcd_ppc_of_shutdown,
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
index 8dabe8e31d8c..3558491dd87d 100644
--- a/drivers/usb/host/ohci-tmio.c
+++ b/drivers/usb/host/ohci-tmio.c
@@ -185,7 +185,7 @@ static struct platform_driver ohci_hcd_tmio_driver;
static int __devinit ohci_hcd_tmio_drv_probe(struct platform_device *dev)
{
- struct mfd_cell *cell = dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
struct resource *regs = platform_get_resource(dev, IORESOURCE_MEM, 0);
struct resource *config = platform_get_resource(dev, IORESOURCE_MEM, 1);
struct resource *sram = platform_get_resource(dev, IORESOURCE_MEM, 2);
@@ -274,7 +274,7 @@ static int __devexit ohci_hcd_tmio_drv_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct tmio_hcd *tmio = hcd_to_tmio(hcd);
- struct mfd_cell *cell = dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
usb_remove_hcd(hcd);
tmio_stop_hc(dev);
@@ -293,7 +293,7 @@ static int __devexit ohci_hcd_tmio_drv_remove(struct platform_device *dev)
#ifdef CONFIG_PM
static int ohci_hcd_tmio_drv_suspend(struct platform_device *dev, pm_message_t state)
{
- struct mfd_cell *cell = dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
struct tmio_hcd *tmio = hcd_to_tmio(hcd);
@@ -326,7 +326,7 @@ static int ohci_hcd_tmio_drv_suspend(struct platform_device *dev, pm_message_t s
static int ohci_hcd_tmio_drv_resume(struct platform_device *dev)
{
- struct mfd_cell *cell = dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
struct tmio_hcd *tmio = hcd_to_tmio(hcd);
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index 51facb985c84..04812b42fe6f 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -575,18 +575,8 @@ static inline void _ohci_writel (const struct ohci_hcd *ohci,
#endif
}
-#ifdef CONFIG_ARCH_LH7A404
-/* Marc Singer: at the time this code was written, the LH7A404
- * had a problem reading the USB host registers. This
- * implementation of the ohci_readl function performs the read
- * twice as a work-around.
- */
-#define ohci_readl(o,r) (_ohci_readl(o,r),_ohci_readl(o,r))
-#define ohci_writel(o,v,r) _ohci_writel(o,v,r)
-#else
#define ohci_readl(o,r) _ohci_readl(o,r)
#define ohci_writel(o,v,r) _ohci_writel(o,v,r)
-#endif
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index cee867829ec9..4f65b14e5e08 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -471,7 +471,7 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd)
/*
* Store the current frame number in uhci->frame_number if the controller
- * is runnning. Expand from 11 bits (of which we use only 10) to a
+ * is running. Expand from 11 bits (of which we use only 10) to a
* full-sized integer.
*
* Like many other parts of the driver, this code relies on being polled
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index fcbf4abbf381..0231814a97a5 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -169,9 +169,10 @@ static void xhci_print_ports(struct xhci_hcd *xhci)
}
}
-void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num)
+void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num)
{
- void *addr;
+ struct xhci_intr_reg __iomem *ir_set = &xhci->run_regs->ir_set[set_num];
+ void __iomem *addr;
u32 temp;
u64 temp_64;
@@ -449,7 +450,7 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci,
}
}
-void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
+static void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
{
/* Fields are 32 bits wide, DMA addresses are in bytes */
int field_size = 32 / 8;
@@ -488,7 +489,7 @@ void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
}
-void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
+static void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx,
unsigned int last_ep)
{
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 1d0f45f0e7a6..a9534396e85b 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -307,7 +307,7 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
/***************** Streams structures manipulation *************************/
-void xhci_free_stream_ctx(struct xhci_hcd *xhci,
+static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs,
struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
{
@@ -335,7 +335,7 @@ void xhci_free_stream_ctx(struct xhci_hcd *xhci,
* The stream context array must be a power of 2, and can be as small as
* 64 bytes or as large as 1MB.
*/
-struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
+static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs, dma_addr_t *dma,
gfp_t mem_flags)
{
@@ -1900,11 +1900,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
val &= DBOFF_MASK;
xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
" from cap regs base addr\n", val);
- xhci->dba = (void *) xhci->cap_regs + val;
+ xhci->dba = (void __iomem *) xhci->cap_regs + val;
xhci_dbg_regs(xhci);
xhci_print_run_regs(xhci);
/* Set ir_set to interrupt register set 0 */
- xhci->ir_set = (void *) xhci->run_regs->ir_set;
+ xhci->ir_set = &xhci->run_regs->ir_set[0];
/*
* Event ring setup: Allocate a normal ring, but also setup
@@ -1961,7 +1961,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
/* Set the event ring dequeue address */
xhci_set_hc_event_deq(xhci);
xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
- xhci_print_ir_set(xhci, xhci->ir_set, 0);
+ xhci_print_ir_set(xhci, 0);
/*
* XXX: Might need to set the Interrupter Moderation Register to
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 3e8211c1ce5a..3289bf4832c9 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -474,8 +474,11 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
state->new_deq_seg = find_trb_seg(cur_td->start_seg,
dev->eps[ep_index].stopped_trb,
&state->new_cycle_state);
- if (!state->new_deq_seg)
- BUG();
+ if (!state->new_deq_seg) {
+ WARN_ON(1);
+ return;
+ }
+
/* Dig out the cycle state saved by the xHC during the stop ep cmd */
xhci_dbg(xhci, "Finding endpoint context\n");
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
@@ -486,8 +489,10 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
state->new_deq_seg = find_trb_seg(state->new_deq_seg,
state->new_deq_ptr,
&state->new_cycle_state);
- if (!state->new_deq_seg)
- BUG();
+ if (!state->new_deq_seg) {
+ WARN_ON(1);
+ return;
+ }
trb = &state->new_deq_ptr->generic;
if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
@@ -2363,12 +2368,13 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
/* Scatter gather list entries may cross 64KB boundaries */
running_total = TRB_MAX_BUFF_SIZE -
- (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
+ running_total &= TRB_MAX_BUFF_SIZE - 1;
if (running_total != 0)
num_trbs++;
/* How many more 64KB chunks to transfer, how many more TRBs? */
- while (running_total < sg_dma_len(sg)) {
+ while (running_total < sg_dma_len(sg) && running_total < temp) {
num_trbs++;
running_total += TRB_MAX_BUFF_SIZE;
}
@@ -2394,11 +2400,11 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
{
if (num_trbs != 0)
- dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
+ dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
"TRBs, %d left\n", __func__,
urb->ep->desc.bEndpointAddress, num_trbs);
if (running_total != urb->transfer_buffer_length)
- dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
+ dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
"queued %#x (%d), asked for %#x (%d)\n",
__func__,
urb->ep->desc.bEndpointAddress,
@@ -2533,8 +2539,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
sg = urb->sg;
addr = (u64) sg_dma_address(sg);
this_sg_len = sg_dma_len(sg);
- trb_buff_len = TRB_MAX_BUFF_SIZE -
- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
if (trb_buff_len > urb->transfer_buffer_length)
trb_buff_len = urb->transfer_buffer_length;
@@ -2572,7 +2577,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
(unsigned int) addr + trb_buff_len);
if (TRB_MAX_BUFF_SIZE -
- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) {
+ (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
@@ -2616,7 +2621,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
trb_buff_len = TRB_MAX_BUFF_SIZE -
- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ (addr & (TRB_MAX_BUFF_SIZE - 1));
trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
if (running_total + trb_buff_len > urb->transfer_buffer_length)
trb_buff_len =
@@ -2656,7 +2661,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
num_trbs = 0;
/* How much data is (potentially) left before the 64KB boundary? */
running_total = TRB_MAX_BUFF_SIZE -
- (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
+ running_total &= TRB_MAX_BUFF_SIZE - 1;
/* If there's some data on this 64KB chunk, or we have to send a
* zero-length transfer, we need at least one TRB
@@ -2700,8 +2706,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* How much data is in the first TRB? */
addr = (u64) urb->transfer_dma;
trb_buff_len = TRB_MAX_BUFF_SIZE -
- (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
- if (urb->transfer_buffer_length < trb_buff_len)
+ (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
+ if (trb_buff_len > urb->transfer_buffer_length)
trb_buff_len = urb->transfer_buffer_length;
first_trb = true;
@@ -2879,8 +2885,8 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
td_len = urb->iso_frame_desc[i].length;
- running_total = TRB_MAX_BUFF_SIZE -
- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
+ running_total &= TRB_MAX_BUFF_SIZE - 1;
if (running_total != 0)
num_trbs++;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 34cf4e165877..2083fc2179b2 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -109,7 +109,7 @@ int xhci_halt(struct xhci_hcd *xhci)
/*
* Set the run bit and wait for the host to be running.
*/
-int xhci_start(struct xhci_hcd *xhci)
+static int xhci_start(struct xhci_hcd *xhci)
{
u32 temp;
int ret;
@@ -329,7 +329,7 @@ int xhci_init(struct usb_hcd *hcd)
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
-void xhci_event_ring_work(unsigned long arg)
+static void xhci_event_ring_work(unsigned long arg)
{
unsigned long flags;
int temp;
@@ -473,7 +473,7 @@ int xhci_run(struct usb_hcd *hcd)
xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
xhci_writel(xhci, ER_IRQ_ENABLE(temp),
&xhci->ir_set->irq_pending);
- xhci_print_ir_set(xhci, xhci->ir_set, 0);
+ xhci_print_ir_set(xhci, 0);
if (NUM_TEST_NOOPS > 0)
doorbell = xhci_setup_one_noop(xhci);
@@ -528,7 +528,7 @@ void xhci_stop(struct usb_hcd *hcd)
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
xhci_writel(xhci, ER_IRQ_DISABLE(temp),
&xhci->ir_set->irq_pending);
- xhci_print_ir_set(xhci, xhci->ir_set, 0);
+ xhci_print_ir_set(xhci, 0);
xhci_dbg(xhci, "cleaning up memory\n");
xhci_mem_cleanup(xhci);
@@ -755,7 +755,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
xhci_writel(xhci, ER_IRQ_DISABLE(temp),
&xhci->ir_set->irq_pending);
- xhci_print_ir_set(xhci, xhci->ir_set, 0);
+ xhci_print_ir_set(xhci, 0);
xhci_dbg(xhci, "cleaning up memory\n");
xhci_mem_cleanup(xhci);
@@ -857,7 +857,7 @@ unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
/* Returns 1 if the arguments are OK;
* returns 0 this is a root hub; returns -EINVAL for NULL pointers.
*/
-int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
+static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
const char *func) {
struct xhci_hcd *xhci;
@@ -1693,7 +1693,7 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
}
-void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
+static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
struct xhci_dequeue_state *deq_state)
{
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 7f236fd22015..7f127df6dd55 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1348,7 +1348,7 @@ static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci)
}
/* xHCI debugging */
-void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num);
+void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num);
void xhci_print_registers(struct xhci_hcd *xhci);
void xhci_dbg_regs(struct xhci_hcd *xhci);
void xhci_print_run_regs(struct xhci_hcd *xhci);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 54a8bd1047d6..e550f35eff0f 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1530,7 +1530,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
/*-------------------------------------------------------------------------*/
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430) || \
+#if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_SOC_OMAP3430) || \
defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_ARCH_U8500) || \
defined(CONFIG_ARCH_U5500)
@@ -1864,6 +1864,7 @@ allocate_instance(struct device *dev,
INIT_LIST_HEAD(&musb->out_bulk);
hcd->uses_new_polling = 1;
+ hcd->has_tt = 1;
musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index d74a8113ae74..3fb617ef9e72 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -212,8 +212,8 @@ enum musb_g_ep0_state {
* directly with the "flat" model, or after setting up an index register.
*/
-#if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_ARCH_OMAP2430) \
- || defined(CONFIG_ARCH_OMAP3430) || defined(CONFIG_BLACKFIN) \
+#if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_SOC_OMAP2430) \
+ || defined(CONFIG_SOC_OMAP3430) || defined(CONFIG_BLACKFIN) \
|| defined(CONFIG_ARCH_OMAP4)
/* REVISIT indexed access seemed to
* misbehave (on DaVinci) for at least peripheral IN ...
@@ -358,7 +358,7 @@ struct musb_csr_regs {
struct musb_context_registers {
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \
+#if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \
defined(CONFIG_ARCH_OMAP4)
u32 otg_sysconfig, otg_forcestandby;
#endif
@@ -488,6 +488,15 @@ struct musb {
unsigned set_address:1;
unsigned test_mode:1;
unsigned softconnect:1;
+
+ u8 address;
+ u8 test_mode_nr;
+ u16 ackpend; /* ep0 */
+ enum musb_g_ep0_state ep0_state;
+ struct usb_gadget g; /* the gadget */
+ struct usb_gadget_driver *gadget_driver; /* its driver */
+#endif
+
/*
* FIXME: Remove this flag.
*
@@ -501,14 +510,6 @@ struct musb {
*/
unsigned double_buffer_not_ok:1 __deprecated;
- u8 address;
- u8 test_mode_nr;
- u16 ackpend; /* ep0 */
- enum musb_g_ep0_state ep0_state;
- struct usb_gadget g; /* the gadget */
- struct usb_gadget_driver *gadget_driver; /* its driver */
-#endif
-
struct musb_hdrc_config *config;
#ifdef MUSB_CONFIG_PROC_FS
diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h
index 21056c924c74..320fd4afb93f 100644
--- a/drivers/usb/musb/musbhsdma.h
+++ b/drivers/usb/musb/musbhsdma.h
@@ -31,7 +31,7 @@
*
*/
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
+#if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_SOC_OMAP3430)
#include "omap2430.h"
#endif
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index a3f12333fc41..bc8badd16897 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -362,6 +362,7 @@ static int omap2430_musb_init(struct musb *musb)
static int omap2430_musb_exit(struct musb *musb)
{
+ del_timer_sync(&musb_idle_timer);
omap2430_low_level_exit(musb);
otg_put_transceiver(musb->xceiv);
diff --git a/drivers/usb/otg/isp1301_omap.c b/drivers/usb/otg/isp1301_omap.c
index e00fa1b22ecd..8c6fdef61d1c 100644
--- a/drivers/usb/otg/isp1301_omap.c
+++ b/drivers/usb/otg/isp1301_omap.c
@@ -1510,7 +1510,7 @@ isp1301_start_hnp(struct otg_transceiver *dev)
/*-------------------------------------------------------------------------*/
-static int __init
+static int __devinit
isp1301_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
{
int status;
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 7481ff8a49e4..0457813eebee 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -301,6 +301,9 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */
.driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
},
+ { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */
+ .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+ },
{ USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */
{ }
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index b004b2a485c3..9c014e2ecd68 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -295,12 +295,15 @@ static void usb_wwan_indat_callback(struct urb *urb)
__func__, status, endpoint);
} else {
tty = tty_port_tty_get(&port->port);
- if (urb->actual_length) {
- tty_insert_flip_string(tty, data, urb->actual_length);
- tty_flip_buffer_push(tty);
- } else
- dbg("%s: empty read urb received", __func__);
- tty_kref_put(tty);
+ if (tty) {
+ if (urb->actual_length) {
+ tty_insert_flip_string(tty, data,
+ urb->actual_length);
+ tty_flip_buffer_push(tty);
+ } else
+ dbg("%s: empty read urb received", __func__);
+ tty_kref_put(tty);
+ }
/* Resubmit urb so we continue receiving */
if (status != -ESHUTDOWN) {
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 15a5d89b7f39..1c11959a7d58 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -27,6 +27,7 @@
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
+#include <linux/usb/cdc.h>
#include "visor.h"
/*
@@ -479,6 +480,17 @@ static int visor_probe(struct usb_serial *serial,
dbg("%s", __func__);
+ /*
+ * some Samsung Android phones in modem mode have the same ID
+ * as SPH-I500, but they are ACM devices, so dont bind to them
+ */
+ if (id->idVendor == SAMSUNG_VENDOR_ID &&
+ id->idProduct == SAMSUNG_SPH_I500_ID &&
+ serial->dev->descriptor.bDeviceClass == USB_CLASS_COMM &&
+ serial->dev->descriptor.bDeviceSubClass ==
+ USB_CDC_SUBCLASS_ACM)
+ return -ENODEV;
+
if (serial->dev->actconfig->desc.bConfigurationValue != 1) {
dev_err(&serial->dev->dev, "active config #%d != 1 ??\n",
serial->dev->actconfig->desc.bConfigurationValue);
diff --git a/drivers/usb/wusbcore/rh.c b/drivers/usb/wusbcore/rh.c
index a68ad7aa0b59..785772e66ed0 100644
--- a/drivers/usb/wusbcore/rh.c
+++ b/drivers/usb/wusbcore/rh.c
@@ -156,7 +156,7 @@ int wusbhc_rh_status_data(struct usb_hcd *usb_hcd, char *_buf)
EXPORT_SYMBOL_GPL(wusbhc_rh_status_data);
/*
- * Return the hub's desciptor
+ * Return the hub's descriptor
*
* NOTE: almost cut and paste from ehci-hub.c
*
diff --git a/drivers/vbus/Kconfig b/drivers/vbus/Kconfig
new file mode 100644
index 000000000000..f51cba10913e
--- /dev/null
+++ b/drivers/vbus/Kconfig
@@ -0,0 +1,25 @@
+#
+# Virtual-Bus (VBus) driver configuration
+#
+
+config VBUS_PROXY
+ bool "Virtual-Bus support"
+ select SHM_SIGNAL
+ select IOQ
+ default n
+ help
+ Adds support for a virtual-bus model drivers in a guest to connect
+ to host side virtual-bus resources. If you are using this kernel
+ in a virtualization solution which implements virtual-bus devices
+ on the backend, say Y. If unsure, say N.
+
+config VBUS_PCIBRIDGE
+ bool "PCI to Virtual-Bus bridge"
+ depends on PCI
+ depends on VBUS_PROXY
+ select IOQ
+ default n
+ help
+ Provides a way to bridge host side vbus devices via a PCI-BRIDGE
+ object. If you are running virtualization with vbus devices on the
+ host, and the vbus is exposed via PCI, say Y. Otherwise, say N.
diff --git a/drivers/vbus/Makefile b/drivers/vbus/Makefile
new file mode 100644
index 000000000000..944b7f1fec90
--- /dev/null
+++ b/drivers/vbus/Makefile
@@ -0,0 +1,6 @@
+
+vbus-proxy-objs += bus-proxy.o
+obj-$(CONFIG_VBUS_PROXY) += vbus-proxy.o
+
+vbus-pcibridge-objs += pci-bridge.o
+obj-$(CONFIG_VBUS_PCIBRIDGE) += vbus-pcibridge.o
diff --git a/drivers/vbus/bus-proxy.c b/drivers/vbus/bus-proxy.c
new file mode 100644
index 000000000000..ae11f679d34e
--- /dev/null
+++ b/drivers/vbus/bus-proxy.c
@@ -0,0 +1,248 @@
+/*
+ * Copyright 2009 Novell. All Rights Reserved.
+ *
+ * Author:
+ * Gregory Haskins <ghaskins@novell.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/vbus_driver.h>
+
+MODULE_AUTHOR("Gregory Haskins");
+MODULE_LICENSE("GPL");
+
+#define VBUS_PROXY_NAME "vbus-proxy"
+
+static struct vbus_device_proxy *to_dev(struct device *_dev)
+{
+ return _dev ? container_of(_dev, struct vbus_device_proxy, dev) : NULL;
+}
+
+static struct vbus_driver *to_drv(struct device_driver *_drv)
+{
+ return container_of(_drv, struct vbus_driver, drv);
+}
+
+/*
+ * This function is invoked whenever a new driver and/or device is added
+ * to check if there is a match
+ */
+static int vbus_dev_proxy_match(struct device *_dev, struct device_driver *_drv)
+{
+ struct vbus_device_proxy *dev = to_dev(_dev);
+ struct vbus_driver *drv = to_drv(_drv);
+
+ return !strcmp(dev->type, drv->type);
+}
+
+static int vbus_dev_proxy_uevent(struct device *_dev, struct kobj_uevent_env *env)
+{
+ struct vbus_device_proxy *dev = to_dev(_dev);
+
+ if (add_uevent_var(env, "MODALIAS=vbus-proxy:%s", dev->type))
+ return -ENOMEM;
+
+ return 0;
+}
+
+/*
+ * This function is invoked after the bus infrastructure has already made a
+ * match. The device will contain a reference to the paired driver which
+ * we will extract.
+ */
+static int vbus_dev_proxy_probe(struct device *_dev)
+{
+ int ret = 0;
+ struct vbus_device_proxy *dev = to_dev(_dev);
+ struct vbus_driver *drv = to_drv(_dev->driver);
+
+ if (drv->ops->probe)
+ ret = drv->ops->probe(dev);
+
+ return ret;
+}
+
+static struct bus_type vbus_proxy = {
+ .name = VBUS_PROXY_NAME,
+ .match = vbus_dev_proxy_match,
+ .uevent = vbus_dev_proxy_uevent,
+};
+
+static struct device vbus_proxy_rootdev = {
+ .parent = NULL,
+ .init_name = VBUS_PROXY_NAME,
+};
+
+static int __init vbus_init(void)
+{
+ int ret;
+
+ ret = bus_register(&vbus_proxy);
+ BUG_ON(ret < 0);
+
+ ret = device_register(&vbus_proxy_rootdev);
+ BUG_ON(ret < 0);
+
+ return 0;
+}
+
+postcore_initcall(vbus_init);
+
+static void device_release(struct device *dev)
+{
+ struct vbus_device_proxy *_dev;
+
+ _dev = container_of(dev, struct vbus_device_proxy, dev);
+
+ _dev->ops->release(_dev);
+}
+
+static ssize_t _show_modalias(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "vbus-proxy:%s\n", to_dev(dev)->type);
+}
+static DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, _show_modalias, NULL);
+
+int vbus_device_proxy_register(struct vbus_device_proxy *new)
+{
+ int ret;
+
+ new->dev.parent = &vbus_proxy_rootdev;
+ new->dev.bus = &vbus_proxy;
+ new->dev.release = &device_release;
+
+ ret = device_register(&new->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = device_create_file(&new->dev, &dev_attr_modalias);
+ if (ret < 0) {
+ device_unregister(&new->dev);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vbus_device_proxy_register);
+
+void vbus_device_proxy_unregister(struct vbus_device_proxy *dev)
+{
+ device_remove_file(&dev->dev, &dev_attr_modalias);
+ device_unregister(&dev->dev);
+}
+EXPORT_SYMBOL_GPL(vbus_device_proxy_unregister);
+
+static int match_device_id(struct device *_dev, void *data)
+{
+ struct vbus_device_proxy *dev = to_dev(_dev);
+ u64 id = *(u64 *)data;
+
+ return dev->id == id;
+}
+
+struct vbus_device_proxy *vbus_device_proxy_find(u64 id)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&vbus_proxy, NULL, &id, &match_device_id);
+
+ return to_dev(dev);
+}
+EXPORT_SYMBOL_GPL(vbus_device_proxy_find);
+
+int vbus_driver_register(struct vbus_driver *new)
+{
+ new->drv.bus = &vbus_proxy;
+ new->drv.name = new->type;
+ new->drv.owner = new->owner;
+ new->drv.probe = vbus_dev_proxy_probe;
+
+ return driver_register(&new->drv);
+}
+EXPORT_SYMBOL_GPL(vbus_driver_register);
+
+void vbus_driver_unregister(struct vbus_driver *drv)
+{
+ driver_unregister(&drv->drv);
+}
+EXPORT_SYMBOL_GPL(vbus_driver_unregister);
+
+/*
+ *---------------------------------
+ * driver-side IOQ helper
+ *---------------------------------
+ */
+static void
+vbus_driver_ioq_release(struct ioq *ioq)
+{
+ kfree(ioq->head_desc);
+ kfree(ioq);
+}
+
+static struct ioq_ops vbus_driver_ioq_ops = {
+ .release = vbus_driver_ioq_release,
+};
+
+
+int vbus_driver_ioq_alloc(struct vbus_device_proxy *dev, const char *name,
+ int id, int prio, size_t count, struct ioq **ioq)
+{
+ struct ioq *_ioq;
+ struct ioq_ring_head *head = NULL;
+ struct shm_signal *signal = NULL;
+ size_t len = IOQ_HEAD_DESC_SIZE(count);
+ int ret = -ENOMEM;
+
+ _ioq = kzalloc(sizeof(*_ioq), GFP_KERNEL);
+ if (!_ioq)
+ goto error;
+
+ head = kzalloc(len, GFP_KERNEL | GFP_DMA);
+ if (!head)
+ goto error;
+
+ head->magic = IOQ_RING_MAGIC;
+ head->ver = IOQ_RING_VER;
+ head->count = cpu_to_le32(count);
+
+ ret = dev->ops->shm(dev, name, id, prio, head, len,
+ &head->signal, &signal, 0);
+ if (ret < 0)
+ goto error;
+
+ ioq_init(_ioq,
+ &vbus_driver_ioq_ops,
+ ioq_locality_north,
+ head,
+ signal,
+ count);
+
+ *ioq = _ioq;
+
+ return 0;
+
+ error:
+ kfree(_ioq);
+ kfree(head);
+
+ if (signal)
+ shm_signal_put(signal);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vbus_driver_ioq_alloc);
diff --git a/drivers/vbus/pci-bridge.c b/drivers/vbus/pci-bridge.c
new file mode 100644
index 000000000000..36de7c48891c
--- /dev/null
+++ b/drivers/vbus/pci-bridge.c
@@ -0,0 +1,1016 @@
+/*
+ * Copyright (C) 2009 Novell. All Rights Reserved.
+ *
+ * Author:
+ * Gregory Haskins <ghaskins@novell.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/mm.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/ioq.h>
+#include <linux/interrupt.h>
+#include <linux/vbus_driver.h>
+#include <linux/vbus_pci.h>
+
+MODULE_AUTHOR("Gregory Haskins");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1");
+
+#define VBUS_PCI_NAME "pci-to-vbus-bridge"
+
+struct vbus_pci {
+ spinlock_t lock;
+ struct pci_dev *dev;
+ struct ioq eventq;
+ struct vbus_pci_event *ring;
+ struct vbus_pci_regs *regs;
+ struct vbus_pci_signals *signals;
+ int irq;
+ bool enabled;
+ struct {
+ struct dentry *fs;
+ int events;
+ int qnotify;
+ int qinject;
+ int notify;
+ int inject;
+ int bridgecalls;
+ int buscalls;
+ } stats;
+};
+
+static struct vbus_pci vbus_pci;
+
+struct vbus_pci_device {
+ char type[VBUS_MAX_DEVTYPE_LEN];
+ u64 handle;
+ struct list_head shms;
+ struct vbus_device_proxy vdev;
+ struct work_struct drop;
+};
+
+static DEFINE_PER_CPU(struct vbus_pci_fastcall_desc, vbus_pci_percpu_fastcall)
+____cacheline_aligned;
+
+/*
+ * -------------------
+ * common routines
+ * -------------------
+ */
+
+static int
+vbus_pci_bridgecall(unsigned long nr, void *data, unsigned long len)
+{
+ struct vbus_pci_call_desc params = {
+ .vector = nr,
+ .len = len,
+ .datap = __pa(data),
+ };
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&vbus_pci.lock, flags);
+
+ memcpy_toio(&vbus_pci.regs->bridgecall, &params, sizeof(params));
+ ret = ioread32(&vbus_pci.regs->bridgecall);
+
+ spin_unlock_irqrestore(&vbus_pci.lock, flags);
+
+ vbus_pci.stats.bridgecalls++;
+
+ return ret;
+}
+
+static int
+vbus_pci_buscall(unsigned long nr, void *data, unsigned long len)
+{
+ struct vbus_pci_fastcall_desc *params;
+ int ret;
+
+ preempt_disable();
+
+ params = &get_cpu_var(vbus_pci_percpu_fastcall);
+
+ params->call.vector = nr;
+ params->call.len = len;
+ params->call.datap = __pa(data);
+
+ iowrite32(smp_processor_id(), &vbus_pci.signals->fastcall);
+
+ ret = params->result;
+
+ preempt_enable();
+
+ vbus_pci.stats.buscalls++;
+
+ return ret;
+}
+
+static struct vbus_pci_device *
+to_dev(struct vbus_device_proxy *vdev)
+{
+ return container_of(vdev, struct vbus_pci_device, vdev);
+}
+
+static void
+_signal_init(struct shm_signal *signal, struct shm_signal_desc *desc,
+ struct shm_signal_ops *ops)
+{
+ desc->magic = SHM_SIGNAL_MAGIC;
+ desc->ver = SHM_SIGNAL_VER;
+
+ shm_signal_init(signal, shm_locality_north, ops, desc);
+}
+
+/*
+ * -------------------
+ * _signal
+ * -------------------
+ */
+
+struct _signal {
+ char name[64];
+ struct vbus_pci *pcivbus;
+ struct shm_signal signal;
+ u32 handle;
+ struct rb_node node;
+ struct list_head list;
+ int irq;
+ struct irq_desc *desc;
+};
+
+static struct _signal *
+to_signal(struct shm_signal *signal)
+{
+ return container_of(signal, struct _signal, signal);
+}
+
+static int
+_signal_inject(struct shm_signal *signal)
+{
+ struct _signal *_signal = to_signal(signal);
+
+ vbus_pci.stats.inject++;
+ iowrite32(_signal->handle, &vbus_pci.signals->shmsignal);
+
+ return 0;
+}
+
+static void
+_signal_release(struct shm_signal *signal)
+{
+ struct _signal *_signal = to_signal(signal);
+
+ kfree(_signal);
+}
+
+static struct shm_signal_ops _signal_ops = {
+ .inject = _signal_inject,
+ .release = _signal_release,
+};
+
+static void shmsignal_disconnect(struct _signal *_signal);
+
+/*
+ * -------------------
+ * vbus_device_proxy routines
+ * -------------------
+ */
+
+static int
+vbus_pci_device_open(struct vbus_device_proxy *vdev, int version, int flags)
+{
+ struct vbus_pci_device *dev = to_dev(vdev);
+ struct vbus_pci_deviceopen params;
+ int ret;
+
+ if (dev->handle)
+ return -EINVAL;
+
+ params.devid = vdev->id;
+ params.version = version;
+
+ ret = vbus_pci_buscall(VBUS_PCI_HC_DEVOPEN,
+ &params, sizeof(params));
+ if (ret < 0)
+ return ret;
+
+ dev->handle = params.handle;
+
+ return 0;
+}
+
+static int
+vbus_pci_device_close(struct vbus_device_proxy *vdev, int flags)
+{
+ struct vbus_pci_device *dev = to_dev(vdev);
+ unsigned long iflags;
+ int ret;
+
+ if (!dev->handle)
+ return -EINVAL;
+
+ spin_lock_irqsave(&vbus_pci.lock, iflags);
+
+ while (!list_empty(&dev->shms)) {
+ struct _signal *_signal;
+
+ _signal = list_first_entry(&dev->shms, struct _signal, list);
+
+ list_del(&_signal->list);
+ shmsignal_disconnect(_signal);
+
+ spin_unlock_irqrestore(&vbus_pci.lock, iflags);
+ shm_signal_put(&_signal->signal);
+ spin_lock_irqsave(&vbus_pci.lock, iflags);
+ }
+
+ spin_unlock_irqrestore(&vbus_pci.lock, iflags);
+
+ /*
+ * The DEVICECLOSE will implicitly close all of the shm on the
+ * host-side, so there is no need to do an explicit per-shm
+ * hypercall
+ */
+ ret = vbus_pci_buscall(VBUS_PCI_HC_DEVCLOSE,
+ &dev->handle, sizeof(dev->handle));
+
+ if (ret < 0)
+ printk(KERN_ERR "VBUS-PCI: Error closing device %s/%lld: %d\n",
+ vdev->type, vdev->id, ret);
+
+ dev->handle = 0;
+
+ return 0;
+}
+
+/*
+ * -------------------
+ * shmsignal interrupt routines
+ * -------------------
+ */
+
+/* We abstract these routines so that we can drop in irqchip later */
+
+static void
+shmsignal_wakeup(struct _signal *_signal)
+{
+ _shm_signal_wakeup(&_signal->signal);
+}
+
+static int
+shmsignal_connect(struct _signal *_signal)
+{
+ return 0;
+}
+
+static void
+shmsignal_disconnect(struct _signal *_signal)
+{
+
+}
+
+static int
+vbus_pci_device_shm(struct vbus_device_proxy *vdev, const char *name,
+ int id, int prio,
+ void *ptr, size_t len,
+ struct shm_signal_desc *sdesc, struct shm_signal **signal,
+ int flags)
+{
+ struct vbus_pci_device *dev = to_dev(vdev);
+ struct _signal *_signal = NULL;
+ struct vbus_pci_deviceshm params;
+ unsigned long iflags;
+ int ret;
+
+ if (!dev->handle)
+ return -EINVAL;
+
+ params.devh = dev->handle;
+ params.id = id;
+ params.flags = flags;
+ params.datap = (u64)__pa(ptr);
+ params.len = len;
+
+ if (signal) {
+ /*
+ * The signal descriptor must be embedded within the
+ * provided ptr
+ */
+ if (!sdesc
+ || (len < sizeof(*sdesc))
+ || ((void *)sdesc < ptr)
+ || ((void *)sdesc > (ptr + len - sizeof(*sdesc))))
+ return -EINVAL;
+
+ _signal = kzalloc(sizeof(*_signal), GFP_KERNEL);
+ if (!_signal)
+ return -ENOMEM;
+
+ _signal_init(&_signal->signal, sdesc, &_signal_ops);
+
+ /*
+ * take another reference for the host. This is dropped
+ * by a SHMCLOSE event
+ */
+ shm_signal_get(&_signal->signal);
+
+ params.signal.offset = (u64)(unsigned long)sdesc -
+ (u64)(unsigned long)ptr;
+ params.signal.prio = prio;
+ params.signal.cookie = (u64)(unsigned long)_signal;
+
+ } else
+ params.signal.offset = -1; /* yes, this is a u32, but its ok */
+
+ ret = vbus_pci_buscall(VBUS_PCI_HC_DEVSHM,
+ &params, sizeof(params));
+ if (ret < 0)
+ goto fail;
+
+ if (signal) {
+
+ BUG_ON(ret < 0);
+
+ _signal->handle = ret;
+
+ if (!name)
+ snprintf(_signal->name, sizeof(_signal->name),
+ "dev%lld-id%d", vdev->id, id);
+ else
+ snprintf(_signal->name, sizeof(_signal->name),
+ "%s", name);
+
+ shmsignal_connect(_signal);
+
+ spin_lock_irqsave(&vbus_pci.lock, iflags);
+ list_add_tail(&_signal->list, &dev->shms);
+ spin_unlock_irqrestore(&vbus_pci.lock, iflags);
+
+ shm_signal_get(&_signal->signal);
+ *signal = &_signal->signal;
+ }
+
+ return 0;
+
+fail:
+ if (_signal) {
+ /*
+ * We held two references above, so we need to drop
+ * both of them
+ */
+ shm_signal_put(&_signal->signal);
+ shm_signal_put(&_signal->signal);
+ }
+
+ return ret;
+}
+
+static int
+vbus_pci_device_call(struct vbus_device_proxy *vdev, u32 func, void *data,
+ size_t len, int flags)
+{
+ struct vbus_pci_device *dev = to_dev(vdev);
+ struct vbus_pci_devicecall params = {
+ .devh = dev->handle,
+ .func = func,
+ .datap = (u64)__pa(data),
+ .len = len,
+ .flags = flags,
+ };
+
+ if (!dev->handle)
+ return -EINVAL;
+
+ return vbus_pci_buscall(VBUS_PCI_HC_DEVCALL, &params, sizeof(params));
+}
+
+static void
+vbus_pci_device_release(struct vbus_device_proxy *vdev)
+{
+ struct vbus_pci_device *_dev = to_dev(vdev);
+
+ vbus_pci_device_close(vdev, 0);
+
+ kfree(_dev);
+}
+
+static struct vbus_device_proxy_ops vbus_pci_device_ops = {
+ .open = vbus_pci_device_open,
+ .close = vbus_pci_device_close,
+ .shm = vbus_pci_device_shm,
+ .call = vbus_pci_device_call,
+ .release = vbus_pci_device_release,
+};
+
+/*
+ * -------------------
+ * vbus events
+ * -------------------
+ */
+
+struct deferred_devadd_event {
+ struct work_struct work;
+ struct vbus_pci_add_event event;
+};
+
+static void deferred_devdrop(struct work_struct *work);
+
+static void
+deferred_devadd(struct work_struct *work)
+{
+ struct deferred_devadd_event *_event;
+ struct vbus_pci_device *new;
+ int ret;
+
+ _event = container_of(work, struct deferred_devadd_event, work);
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new) {
+ printk(KERN_ERR "VBUS_PCI: Out of memory on add_event\n");
+ return;
+ }
+
+ INIT_LIST_HEAD(&new->shms);
+
+ memcpy(new->type, _event->event.type, VBUS_MAX_DEVTYPE_LEN);
+ new->vdev.type = new->type;
+ new->vdev.id = _event->event.id;
+ new->vdev.ops = &vbus_pci_device_ops;
+
+ dev_set_name(&new->vdev.dev, "%lld", _event->event.id);
+
+ INIT_WORK(&new->drop, deferred_devdrop);
+
+ ret = vbus_device_proxy_register(&new->vdev);
+ if (ret < 0)
+ panic("failed to register device %lld(%s): %d\n",
+ new->vdev.id, new->type, ret);
+
+ kfree(_event);
+}
+
+static void
+deferred_devdrop(struct work_struct *work)
+{
+ struct vbus_pci_device *dev;
+
+ dev = container_of(work, struct vbus_pci_device, drop);
+ vbus_device_proxy_unregister(&dev->vdev);
+}
+
+static void
+event_devadd(struct vbus_pci_add_event *event)
+{
+ struct deferred_devadd_event *_event;
+
+ _event = kzalloc(sizeof(*_event), GFP_ATOMIC);
+ if (!_event) {
+ printk(KERN_ERR \
+ "VBUS_PCI: Out of ATOMIC memory on add_event\n");
+ return;
+ }
+
+ INIT_WORK(&_event->work, deferred_devadd);
+ memcpy(&_event->event, event, sizeof(*event));
+
+ schedule_work(&_event->work);
+}
+
+static void
+event_devdrop(struct vbus_pci_handle_event *event)
+{
+ struct vbus_device_proxy *dev = vbus_device_proxy_find(event->handle);
+
+ if (!dev) {
+ printk(KERN_WARNING "VBUS-PCI: devdrop failed: %lld\n",
+ event->handle);
+ return;
+ }
+
+ schedule_work(&to_dev(dev)->drop);
+}
+
+static void
+event_shmsignal(struct vbus_pci_handle_event *event)
+{
+ struct _signal *_signal = (struct _signal *)(unsigned long)event->handle;
+
+ vbus_pci.stats.notify++;
+
+ shmsignal_wakeup(_signal);
+}
+
+static void
+event_shmclose(struct vbus_pci_handle_event *event)
+{
+ struct _signal *_signal = (struct _signal *)(unsigned long)event->handle;
+
+ /*
+ * This reference was taken during the DEVICESHM call
+ */
+ shm_signal_put(&_signal->signal);
+}
+
+/*
+ * -------------------
+ * eventq routines
+ * -------------------
+ */
+
+static struct ioq_notifier eventq_notifier;
+
+static int __devinit
+eventq_init(int qlen)
+{
+ struct ioq_iterator iter;
+ int ret;
+ int i;
+
+ vbus_pci.ring = kzalloc(sizeof(struct vbus_pci_event) * qlen,
+ GFP_KERNEL);
+ if (!vbus_pci.ring)
+ return -ENOMEM;
+
+ /*
+ * We want to iterate on the "valid" index. By default the iterator
+ * will not "autoupdate" which means it will not hypercall the host
+ * with our changes. This is good, because we are really just
+ * initializing stuff here anyway. Note that you can always manually
+ * signal the host with ioq_signal() if the autoupdate feature is not
+ * used.
+ */
+ ret = ioq_iter_init(&vbus_pci.eventq, &iter, ioq_idxtype_valid, 0);
+ BUG_ON(ret < 0);
+
+ /*
+ * Seek to the tail of the valid index (which should be our first
+ * item since the queue is brand-new)
+ */
+ ret = ioq_iter_seek(&iter, ioq_seek_tail, 0, 0);
+ BUG_ON(ret < 0);
+
+ /*
+ * Now populate each descriptor with an empty vbus_event and mark it
+ * valid
+ */
+ for (i = 0; i < qlen; i++) {
+ struct vbus_pci_event *event = &vbus_pci.ring[i];
+ size_t len = sizeof(*event);
+ struct ioq_ring_desc *desc = iter.desc;
+
+ BUG_ON(iter.desc->valid);
+
+ desc->cookie = (u64)(unsigned long)event;
+ desc->ptr = cpu_to_le64(__pa(event));
+ desc->len = cpu_to_le64(len); /* total length */
+ desc->valid = 1;
+
+ /*
+ * This push operation will simultaneously advance the
+ * valid-tail index and increment our position in the queue
+ * by one.
+ */
+ ret = ioq_iter_push(&iter, 0);
+ BUG_ON(ret < 0);
+ }
+
+ vbus_pci.eventq.notifier = &eventq_notifier;
+
+ /*
+ * And finally, ensure that we can receive notification
+ */
+ ioq_notify_enable(&vbus_pci.eventq, 0);
+
+ return 0;
+}
+
+/* Invoked whenever the hypervisor ioq_signal()s our eventq */
+static void
+eventq_wakeup(struct ioq_notifier *notifier)
+{
+ struct ioq_iterator iter;
+ int ret;
+
+ /* We want to iterate on the head of the in-use index */
+ ret = ioq_iter_init(&vbus_pci.eventq, &iter, ioq_idxtype_inuse, 0);
+ BUG_ON(ret < 0);
+
+ ret = ioq_iter_seek(&iter, ioq_seek_head, 0, 0);
+ BUG_ON(ret < 0);
+
+ /*
+ * The EOM is indicated by finding a packet that is still owned by
+ * the south side.
+ *
+ * FIXME: This in theory could run indefinitely if the host keeps
+ * feeding us events since there is nothing like a NAPI budget. We
+ * might need to address that
+ */
+ while (!iter.desc->sown) {
+ struct ioq_ring_desc *desc = iter.desc;
+ struct vbus_pci_event *event;
+
+ event = (struct vbus_pci_event *)(unsigned long)desc->cookie;
+
+ switch (event->eventid) {
+ case VBUS_PCI_EVENT_DEVADD:
+ event_devadd(&event->data.add);
+ break;
+ case VBUS_PCI_EVENT_DEVDROP:
+ event_devdrop(&event->data.handle);
+ break;
+ case VBUS_PCI_EVENT_SHMSIGNAL:
+ event_shmsignal(&event->data.handle);
+ break;
+ case VBUS_PCI_EVENT_SHMCLOSE:
+ event_shmclose(&event->data.handle);
+ break;
+ default:
+ printk(KERN_WARNING "VBUS_PCI: Unexpected event %d\n",
+ event->eventid);
+ break;
+ };
+
+ memset(event, 0, sizeof(*event));
+
+ /* Advance the in-use head */
+ ret = ioq_iter_pop(&iter, 0);
+ BUG_ON(ret < 0);
+
+ vbus_pci.stats.events++;
+ }
+
+ /* And let the south side know that we changed the queue */
+ ioq_signal(&vbus_pci.eventq, 0);
+}
+
+static struct ioq_notifier eventq_notifier = {
+ .signal = &eventq_wakeup,
+};
+
+/* Injected whenever the host issues an ioq_signal() on the eventq */
+static irqreturn_t
+eventq_intr(int irq, void *dev)
+{
+ vbus_pci.stats.qnotify++;
+ _shm_signal_wakeup(vbus_pci.eventq.signal);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * -------------------
+ */
+
+static int
+eventq_signal_inject(struct shm_signal *signal)
+{
+ vbus_pci.stats.qinject++;
+
+ /* The eventq uses the special-case handle=0 */
+ iowrite32(0, &vbus_pci.signals->eventq);
+
+ return 0;
+}
+
+static void
+eventq_signal_release(struct shm_signal *signal)
+{
+ kfree(signal);
+}
+
+static struct shm_signal_ops eventq_signal_ops = {
+ .inject = eventq_signal_inject,
+ .release = eventq_signal_release,
+};
+
+/*
+ * -------------------
+ */
+
+static void
+eventq_ioq_release(struct ioq *ioq)
+{
+ /* released as part of the vbus_pci object */
+}
+
+static struct ioq_ops eventq_ioq_ops = {
+ .release = eventq_ioq_release,
+};
+
+/*
+ * -------------------
+ */
+
+static void
+vbus_pci_release(void)
+{
+#ifdef CONFIG_DEBUG_FS
+ if (vbus_pci.stats.fs)
+ debugfs_remove(vbus_pci.stats.fs);
+#endif
+
+ if (vbus_pci.irq > 0)
+ free_irq(vbus_pci.irq, NULL);
+
+ if (vbus_pci.signals)
+ pci_iounmap(vbus_pci.dev, (void *)vbus_pci.signals);
+
+ if (vbus_pci.regs)
+ pci_iounmap(vbus_pci.dev, (void *)vbus_pci.regs);
+
+ pci_release_regions(vbus_pci.dev);
+ pci_disable_device(vbus_pci.dev);
+
+ kfree(vbus_pci.eventq.head_desc);
+ kfree(vbus_pci.ring);
+
+ vbus_pci.enabled = false;
+}
+
+static int __devinit
+vbus_pci_open(void)
+{
+ struct vbus_pci_bridge_negotiate params = {
+ .magic = VBUS_PCI_ABI_MAGIC,
+ .version = VBUS_PCI_HC_VERSION,
+ .capabilities = 0,
+ };
+
+ return vbus_pci_bridgecall(VBUS_PCI_BRIDGE_NEGOTIATE,
+ &params, sizeof(params));
+}
+
+#define QLEN 1024
+
+static int __devinit
+vbus_pci_eventq_register(void)
+{
+ struct vbus_pci_busreg params = {
+ .count = 1,
+ .eventq = {
+ {
+ .count = QLEN,
+ .ring = (u64)__pa(vbus_pci.eventq.head_desc),
+ .data = (u64)__pa(vbus_pci.ring),
+ },
+ },
+ };
+
+ return vbus_pci_bridgecall(VBUS_PCI_BRIDGE_QREG,
+ &params, sizeof(params));
+}
+
+static int __devinit
+_ioq_init(size_t ringsize, struct ioq *ioq, struct ioq_ops *ops)
+{
+ struct shm_signal *signal = NULL;
+ struct ioq_ring_head *head = NULL;
+ size_t len = IOQ_HEAD_DESC_SIZE(ringsize);
+
+ head = kzalloc(len, GFP_KERNEL | GFP_DMA);
+ if (!head)
+ return -ENOMEM;
+
+ signal = kzalloc(sizeof(*signal), GFP_KERNEL);
+ if (!signal) {
+ kfree(head);
+ return -ENOMEM;
+ }
+
+ head->magic = IOQ_RING_MAGIC;
+ head->ver = IOQ_RING_VER;
+ head->count = cpu_to_le32(ringsize);
+
+ _signal_init(signal, &head->signal, &eventq_signal_ops);
+
+ ioq_init(ioq, ops, ioq_locality_north, head, signal, ringsize);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _debugfs_seq_show(struct seq_file *m, void *p)
+{
+#define P(F) \
+ seq_printf(m, " .%-30s: %d\n", #F, (int)vbus_pci.stats.F)
+
+ P(events);
+ P(qnotify);
+ P(qinject);
+ P(notify);
+ P(inject);
+ P(bridgecalls);
+ P(buscalls);
+
+#undef P
+
+ return 0;
+}
+
+static int _debugfs_fops_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, _debugfs_seq_show, inode->i_private);
+}
+
+static const struct file_operations stat_fops = {
+ .open = _debugfs_fops_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+#endif
+
+static int __devinit
+vbus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int ret;
+ int cpu;
+
+ if (vbus_pci.enabled)
+ return -EEXIST; /* we only support one bridge per kernel */
+
+ if (pdev->revision != VBUS_PCI_ABI_VERSION) {
+ printk(KERN_DEBUG "VBUS_PCI: expected ABI version %d, got %d\n",
+ VBUS_PCI_ABI_VERSION,
+ pdev->revision);
+ return -ENODEV;
+ }
+
+ vbus_pci.dev = pdev;
+
+ ret = pci_enable_device(pdev);
+ if (ret < 0)
+ return ret;
+
+ pci_set_master(pdev);
+
+ ret = pci_request_regions(pdev, VBUS_PCI_NAME);
+ if (ret < 0) {
+ printk(KERN_ERR "VBUS_PCI: Could not init BARs: %d\n", ret);
+ goto out_fail;
+ }
+
+ vbus_pci.regs = pci_iomap(pdev, 0, sizeof(struct vbus_pci_regs));
+ if (!vbus_pci.regs) {
+ printk(KERN_ERR "VBUS_PCI: Could not map BARs\n");
+ goto out_fail;
+ }
+
+ vbus_pci.signals = pci_iomap(pdev, 1, sizeof(struct vbus_pci_signals));
+ if (!vbus_pci.signals) {
+ printk(KERN_ERR "VBUS_PCI: Could not map BARs\n");
+ goto out_fail;
+ }
+
+ ret = vbus_pci_open();
+ if (ret < 0) {
+ printk(KERN_DEBUG "VBUS_PCI: Could not register with host: %d\n",
+ ret);
+ goto out_fail;
+ }
+
+ /*
+ * Allocate an IOQ to use for host-2-guest event notification
+ */
+ ret = _ioq_init(QLEN, &vbus_pci.eventq, &eventq_ioq_ops);
+ if (ret < 0) {
+ printk(KERN_ERR "VBUS_PCI: Cound not init eventq: %d\n", ret);
+ goto out_fail;
+ }
+
+ ret = eventq_init(QLEN);
+ if (ret < 0) {
+ printk(KERN_ERR "VBUS_PCI: Cound not setup ring: %d\n", ret);
+ goto out_fail;
+ }
+
+ ret = pci_enable_msi(pdev);
+ if (ret < 0) {
+ printk(KERN_ERR "VBUS_PCI: Cound not enable MSI: %d\n", ret);
+ goto out_fail;
+ }
+
+ vbus_pci.irq = pdev->irq;
+
+ ret = request_irq(pdev->irq, eventq_intr, 0, "vbus", NULL);
+ if (ret < 0) {
+ printk(KERN_ERR "VBUS_PCI: Failed to register IRQ %d\n: %d",
+ pdev->irq, ret);
+ goto out_fail;
+ }
+
+ /*
+ * Add one fastcall vector per cpu so that we can do lockless
+ * hypercalls
+ */
+ for_each_possible_cpu(cpu) {
+ struct vbus_pci_fastcall_desc *desc =
+ &per_cpu(vbus_pci_percpu_fastcall, cpu);
+ struct vbus_pci_call_desc params = {
+ .vector = cpu,
+ .len = sizeof(*desc),
+ .datap = __pa(desc),
+ };
+
+ ret = vbus_pci_bridgecall(VBUS_PCI_BRIDGE_FASTCALL_ADD,
+ &params, sizeof(params));
+ if (ret < 0) {
+ printk(KERN_ERR \
+ "VBUS_PCI: Failed to register cpu:%d\n: %d",
+ cpu, ret);
+ goto out_fail;
+ }
+ }
+
+ /*
+ * Finally register our queue on the host to start receiving events
+ */
+ ret = vbus_pci_eventq_register();
+ if (ret < 0) {
+ printk(KERN_ERR "VBUS_PCI: Could not register with host: %d\n",
+ ret);
+ goto out_fail;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ vbus_pci.stats.fs = debugfs_create_file(VBUS_PCI_NAME, S_IRUGO,
+ NULL, NULL, &stat_fops);
+ if (IS_ERR(vbus_pci.stats.fs)) {
+ ret = PTR_ERR(vbus_pci.stats.fs);
+ printk(KERN_ERR "VBUS_PCI: error creating stats-fs: %d\n", ret);
+ goto out_fail;
+ }
+#endif
+
+ vbus_pci.enabled = true;
+
+ printk(KERN_INFO "Virtual-Bus: Copyright (c) 2009, " \
+ "Gregory Haskins <ghaskins@novell.com>\n");
+
+ return 0;
+
+ out_fail:
+ vbus_pci_release();
+
+ return ret;
+}
+
+static void __devexit
+vbus_pci_remove(struct pci_dev *pdev)
+{
+ vbus_pci_release();
+}
+
+static DEFINE_PCI_DEVICE_TABLE(vbus_pci_tbl) = {
+ { PCI_DEVICE(0x11da, 0x2000) },
+ { 0 },
+};
+
+MODULE_DEVICE_TABLE(pci, vbus_pci_tbl);
+
+static struct pci_driver vbus_pci_driver = {
+ .name = VBUS_PCI_NAME,
+ .id_table = vbus_pci_tbl,
+ .probe = vbus_pci_probe,
+ .remove = vbus_pci_remove,
+};
+
+static int __init
+vbus_pci_init(void)
+{
+ memset(&vbus_pci, 0, sizeof(vbus_pci));
+ spin_lock_init(&vbus_pci.lock);
+
+ return pci_register_driver(&vbus_pci_driver);
+}
+
+static void __exit
+vbus_pci_exit(void)
+{
+ pci_unregister_driver(&vbus_pci_driver);
+}
+
+module_init(vbus_pci_init);
+module_exit(vbus_pci_exit);
+
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 6bafb51bb437..eaa5ed7de8d9 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -322,69 +322,6 @@ config FB_ARMCLCD
here and read <file:Documentation/kbuild/modules.txt>. The module
will be called amba-clcd.
-choice
-
- depends on FB_ARMCLCD && (ARCH_LH7A40X || ARCH_LH7952X)
- prompt "LCD Panel"
- default FB_ARMCLCD_SHARP_LQ035Q7DB02
-
-config FB_ARMCLCD_SHARP_LQ035Q7DB02_HRTFT
- bool "LogicPD LCD 3.5\" QVGA w/HRTFT IC"
- help
- This is an implementation of the Sharp LQ035Q7DB02, a 3.5"
- color QVGA, HRTFT panel. The LogicPD device includes
- an integrated HRTFT controller IC.
- The native resolution is 240x320.
-
-config FB_ARMCLCD_SHARP_LQ057Q3DC02
- bool "LogicPD LCD 5.7\" QVGA"
- help
- This is an implementation of the Sharp LQ057Q3DC02, a 5.7"
- color QVGA, TFT panel. The LogicPD device includes an
- The native resolution is 320x240.
-
-config FB_ARMCLCD_SHARP_LQ64D343
- bool "LogicPD LCD 6.4\" VGA"
- help
- This is an implementation of the Sharp LQ64D343, a 6.4"
- color VGA, TFT panel. The LogicPD device includes an
- The native resolution is 640x480.
-
-config FB_ARMCLCD_SHARP_LQ10D368
- bool "LogicPD LCD 10.4\" VGA"
- help
- This is an implementation of the Sharp LQ10D368, a 10.4"
- color VGA, TFT panel. The LogicPD device includes an
- The native resolution is 640x480.
-
-
-config FB_ARMCLCD_SHARP_LQ121S1DG41
- bool "LogicPD LCD 12.1\" SVGA"
- help
- This is an implementation of the Sharp LQ121S1DG41, a 12.1"
- color SVGA, TFT panel. The LogicPD device includes an
- The native resolution is 800x600.
-
- This panel requires a clock rate may be an integer fraction
- of the base LCDCLK frequency. The driver will select the
- highest frequency available that is lower than the maximum
- allowed. The panel may flicker if the clock rate is
- slower than the recommended minimum.
-
-config FB_ARMCLCD_AUO_A070VW01_WIDE
- bool "AU Optronics A070VW01 LCD 7.0\" WIDE"
- help
- This is an implementation of the AU Optronics, a 7.0"
- WIDE Color. The native resolution is 234x480.
-
-config FB_ARMCLCD_HITACHI
- bool "Hitachi Wide Screen 800x480"
- help
- This is an implementation of the Hitachi 800x480.
-
-endchoice
-
-
config FB_ACORN
bool "Acorn VIDC support"
depends on (FB = y) && ARM && ARCH_ACORN
@@ -439,6 +376,24 @@ config FB_CYBER2000
Say Y if you have a NetWinder or a graphics card containing this
device, otherwise say N.
+config FB_CYBER2000_DDC
+ bool "DDC for CyberPro support"
+ depends on FB_CYBER2000
+ select FB_DDC
+ default y
+ help
+ Say Y here if you want DDC support for your CyberPro graphics
+ card. This is only I2C bus support, driver does not use EDID.
+
+config FB_CYBER2000_I2C
+ bool "CyberPro 2000/2010/5000 I2C support"
+ depends on FB_CYBER2000 && I2C && ARCH_NETWINDER
+ select I2C_ALGOBIT
+ help
+ Enable support for the I2C video decoder interface on the
+ Integraphics CyberPro 20x0 and 5000 VGA chips. This is used
+ on the Netwinder machines for the SAA7111 video capture.
+
config FB_APOLLO
bool
depends on (FB = y) && APOLLO
@@ -2365,6 +2320,26 @@ config FB_JZ4740
help
Framebuffer support for the JZ4740 SoC.
+config FB_MXS
+ tristate "MXS LCD framebuffer support"
+ depends on FB && ARCH_MXS
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ help
+ Framebuffer support for the MXS SoC.
+
+config FB_PUV3_UNIGFX
+ tristate "PKUnity v3 Unigfx framebuffer support"
+ depends on FB && UNICORE32 && ARCH_PUV3
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_SYS_FOPS
+ help
+ Choose this option if you want to use the Unigfx device as a
+ framebuffer device. Without the support of PCI & AGP.
+
source "drivers/video/omap/Kconfig"
source "drivers/video/omap2/Kconfig"
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 8c8fabdff9d0..2ea44b6625fe 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -139,6 +139,7 @@ obj-$(CONFIG_FB_MB862XX) += mb862xx/
obj-$(CONFIG_FB_MSM) += msm/
obj-$(CONFIG_FB_NUC900) += nuc900fb.o
obj-$(CONFIG_FB_JZ4740) += jz4740_fb.o
+obj-$(CONFIG_FB_PUV3_UNIGFX) += fb-puv3.o
# Platform or fallback drivers go here
obj-$(CONFIG_FB_UVESA) += uvesafb.o
@@ -153,6 +154,7 @@ obj-$(CONFIG_FB_BFIN_T350MCQB) += bfin-t350mcqb-fb.o
obj-$(CONFIG_FB_BFIN_7393) += bfin_adv7393fb.o
obj-$(CONFIG_FB_MX3) += mx3fb.o
obj-$(CONFIG_FB_DA8XX) += da8xx-fb.o
+obj-$(CONFIG_FB_MXS) += mxsfb.o
# the test framebuffer is last
obj-$(CONFIG_FB_VIRTUAL) += vfb.o
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index 1c2c68356ea7..5fc983c5b92c 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -120,8 +120,23 @@ static void clcdfb_enable(struct clcd_fb *fb, u32 cntl)
static int
clcdfb_set_bitfields(struct clcd_fb *fb, struct fb_var_screeninfo *var)
{
+ u32 caps;
int ret = 0;
+ if (fb->panel->caps && fb->board->caps)
+ caps = fb->panel->caps & fb->board->caps;
+ else {
+ /* Old way of specifying what can be used */
+ caps = fb->panel->cntl & CNTL_BGR ?
+ CLCD_CAP_BGR : CLCD_CAP_RGB;
+ /* But mask out 444 modes as they weren't supported */
+ caps &= ~CLCD_CAP_444;
+ }
+
+ /* Only TFT panels can do RGB888/BGR888 */
+ if (!(fb->panel->cntl & CNTL_LCDTFT))
+ caps &= ~CLCD_CAP_888;
+
memset(&var->transp, 0, sizeof(var->transp));
var->red.msb_right = 0;
@@ -133,6 +148,13 @@ clcdfb_set_bitfields(struct clcd_fb *fb, struct fb_var_screeninfo *var)
case 2:
case 4:
case 8:
+ /* If we can't do 5551, reject */
+ caps &= CLCD_CAP_5551;
+ if (!caps) {
+ ret = -EINVAL;
+ break;
+ }
+
var->red.length = var->bits_per_pixel;
var->red.offset = 0;
var->green.length = var->bits_per_pixel;
@@ -140,23 +162,61 @@ clcdfb_set_bitfields(struct clcd_fb *fb, struct fb_var_screeninfo *var)
var->blue.length = var->bits_per_pixel;
var->blue.offset = 0;
break;
+
case 16:
- var->red.length = 5;
- var->blue.length = 5;
+ /* If we can't do 444, 5551 or 565, reject */
+ if (!(caps & (CLCD_CAP_444 | CLCD_CAP_5551 | CLCD_CAP_565))) {
+ ret = -EINVAL;
+ break;
+ }
+
/*
- * Green length can be 5 or 6 depending whether
- * we're operating in RGB555 or RGB565 mode.
+ * Green length can be 4, 5 or 6 depending whether
+ * we're operating in 444, 5551 or 565 mode.
*/
- if (var->green.length != 5 && var->green.length != 6)
- var->green.length = 6;
+ if (var->green.length == 4 && caps & CLCD_CAP_444)
+ caps &= CLCD_CAP_444;
+ if (var->green.length == 5 && caps & CLCD_CAP_5551)
+ caps &= CLCD_CAP_5551;
+ else if (var->green.length == 6 && caps & CLCD_CAP_565)
+ caps &= CLCD_CAP_565;
+ else {
+ /*
+ * PL110 officially only supports RGB555,
+ * but may be wired up to allow RGB565.
+ */
+ if (caps & CLCD_CAP_565) {
+ var->green.length = 6;
+ caps &= CLCD_CAP_565;
+ } else if (caps & CLCD_CAP_5551) {
+ var->green.length = 5;
+ caps &= CLCD_CAP_5551;
+ } else {
+ var->green.length = 4;
+ caps &= CLCD_CAP_444;
+ }
+ }
+
+ if (var->green.length >= 5) {
+ var->red.length = 5;
+ var->blue.length = 5;
+ } else {
+ var->red.length = 4;
+ var->blue.length = 4;
+ }
break;
case 32:
- if (fb->panel->cntl & CNTL_LCDTFT) {
- var->red.length = 8;
- var->green.length = 8;
- var->blue.length = 8;
+ /* If we can't do 888, reject */
+ caps &= CLCD_CAP_888;
+ if (!caps) {
+ ret = -EINVAL;
break;
}
+
+ var->red.length = 8;
+ var->green.length = 8;
+ var->blue.length = 8;
+ break;
default:
ret = -EINVAL;
break;
@@ -168,7 +228,20 @@ clcdfb_set_bitfields(struct clcd_fb *fb, struct fb_var_screeninfo *var)
* the bitfield length defined above.
*/
if (ret == 0 && var->bits_per_pixel >= 16) {
- if (fb->panel->cntl & CNTL_BGR) {
+ bool bgr, rgb;
+
+ bgr = caps & CLCD_CAP_BGR && var->blue.offset == 0;
+ rgb = caps & CLCD_CAP_RGB && var->red.offset == 0;
+
+ if (!bgr && !rgb)
+ /*
+ * The requested format was not possible, try just
+ * our capabilities. One of BGR or RGB must be
+ * supported.
+ */
+ bgr = caps & CLCD_CAP_BGR;
+
+ if (bgr) {
var->blue.offset = 0;
var->green.offset = var->blue.offset + var->blue.length;
var->red.offset = var->green.offset + var->green.length;
@@ -443,8 +516,8 @@ static int clcdfb_register(struct clcd_fb *fb)
fb_set_var(&fb->fb, &fb->fb.var);
- printk(KERN_INFO "CLCD: %s hardware, %s display\n",
- fb->board->name, fb->panel->mode.name);
+ dev_info(&fb->dev->dev, "%s hardware, %s display\n",
+ fb->board->name, fb->panel->mode.name);
ret = register_framebuffer(&fb->fb);
if (ret == 0)
@@ -461,7 +534,7 @@ static int clcdfb_register(struct clcd_fb *fb)
return ret;
}
-static int clcdfb_probe(struct amba_device *dev, struct amba_id *id)
+static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id)
{
struct clcd_board *board = dev->dev.platform_data;
struct clcd_fb *fb;
@@ -486,6 +559,10 @@ static int clcdfb_probe(struct amba_device *dev, struct amba_id *id)
fb->dev = dev;
fb->board = board;
+ dev_info(&fb->dev->dev, "PL%03x rev%u at 0x%08llx\n",
+ amba_part(dev), amba_rev(dev),
+ (unsigned long long)dev->res.start);
+
ret = fb->board->setup(fb);
if (ret)
goto free_fb;
diff --git a/drivers/video/bw2.c b/drivers/video/bw2.c
index 4dc13467281d..7ba74cd4be61 100644
--- a/drivers/video/bw2.c
+++ b/drivers/video/bw2.c
@@ -273,7 +273,7 @@ static int __devinit bw2_do_default_mode(struct bw2_par *par,
return 0;
}
-static int __devinit bw2_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit bw2_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
struct fb_info *info;
@@ -375,7 +375,7 @@ static const struct of_device_id bw2_match[] = {
};
MODULE_DEVICE_TABLE(of, bw2_match);
-static struct of_platform_driver bw2_driver = {
+static struct platform_driver bw2_driver = {
.driver = {
.name = "bw2",
.owner = THIS_MODULE,
@@ -390,12 +390,12 @@ static int __init bw2_init(void)
if (fb_get_options("bw2fb", NULL))
return -ENODEV;
- return of_register_platform_driver(&bw2_driver);
+ return platform_driver_register(&bw2_driver);
}
static void __exit bw2_exit(void)
{
- of_unregister_platform_driver(&bw2_driver);
+ platform_driver_unregister(&bw2_driver);
}
module_init(bw2_init);
diff --git a/drivers/video/cg14.c b/drivers/video/cg14.c
index 24249535ac86..e2c85b0db632 100644
--- a/drivers/video/cg14.c
+++ b/drivers/video/cg14.c
@@ -463,7 +463,7 @@ static void cg14_unmap_regs(struct platform_device *op, struct fb_info *info,
info->screen_base, info->fix.smem_len);
}
-static int __devinit cg14_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit cg14_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
struct fb_info *info;
@@ -595,7 +595,7 @@ static const struct of_device_id cg14_match[] = {
};
MODULE_DEVICE_TABLE(of, cg14_match);
-static struct of_platform_driver cg14_driver = {
+static struct platform_driver cg14_driver = {
.driver = {
.name = "cg14",
.owner = THIS_MODULE,
@@ -610,12 +610,12 @@ static int __init cg14_init(void)
if (fb_get_options("cg14fb", NULL))
return -ENODEV;
- return of_register_platform_driver(&cg14_driver);
+ return platform_driver_register(&cg14_driver);
}
static void __exit cg14_exit(void)
{
- of_unregister_platform_driver(&cg14_driver);
+ platform_driver_unregister(&cg14_driver);
}
module_init(cg14_init);
diff --git a/drivers/video/cg3.c b/drivers/video/cg3.c
index 09c0c3c42482..f927a7b1a8d4 100644
--- a/drivers/video/cg3.c
+++ b/drivers/video/cg3.c
@@ -346,8 +346,7 @@ static int __devinit cg3_do_default_mode(struct cg3_par *par)
return 0;
}
-static int __devinit cg3_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit cg3_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
struct fb_info *info;
@@ -462,7 +461,7 @@ static const struct of_device_id cg3_match[] = {
};
MODULE_DEVICE_TABLE(of, cg3_match);
-static struct of_platform_driver cg3_driver = {
+static struct platform_driver cg3_driver = {
.driver = {
.name = "cg3",
.owner = THIS_MODULE,
@@ -477,12 +476,12 @@ static int __init cg3_init(void)
if (fb_get_options("cg3fb", NULL))
return -ENODEV;
- return of_register_platform_driver(&cg3_driver);
+ return platform_driver_register(&cg3_driver);
}
static void __exit cg3_exit(void)
{
- of_unregister_platform_driver(&cg3_driver);
+ platform_driver_unregister(&cg3_driver);
}
module_init(cg3_init);
diff --git a/drivers/video/cg6.c b/drivers/video/cg6.c
index 2b5a97058b08..4ffad90bde42 100644
--- a/drivers/video/cg6.c
+++ b/drivers/video/cg6.c
@@ -737,8 +737,7 @@ static void cg6_unmap_regs(struct platform_device *op, struct fb_info *info,
info->fix.smem_len);
}
-static int __devinit cg6_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit cg6_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
struct fb_info *info;
@@ -855,7 +854,7 @@ static const struct of_device_id cg6_match[] = {
};
MODULE_DEVICE_TABLE(of, cg6_match);
-static struct of_platform_driver cg6_driver = {
+static struct platform_driver cg6_driver = {
.driver = {
.name = "cg6",
.owner = THIS_MODULE,
@@ -870,12 +869,12 @@ static int __init cg6_init(void)
if (fb_get_options("cg6fb", NULL))
return -ENODEV;
- return of_register_platform_driver(&cg6_driver);
+ return platform_driver_register(&cg6_driver);
}
static void __exit cg6_exit(void)
{
- of_unregister_platform_driver(&cg6_driver);
+ platform_driver_unregister(&cg6_driver);
}
module_init(cg6_init);
diff --git a/drivers/video/cyber2000fb.c b/drivers/video/cyber2000fb.c
index 0c1afd13ddd3..850380795b05 100644
--- a/drivers/video/cyber2000fb.c
+++ b/drivers/video/cyber2000fb.c
@@ -47,6 +47,8 @@
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
#include <asm/pgtable.h>
#include <asm/system.h>
@@ -61,10 +63,10 @@ struct cfb_info {
struct fb_info fb;
struct display_switch *dispsw;
struct display *display;
- struct pci_dev *dev;
unsigned char __iomem *region;
unsigned char __iomem *regs;
u_int id;
+ u_int irq;
int func_use_count;
u_long ref_ps;
@@ -88,6 +90,19 @@ struct cfb_info {
u_char ramdac_powerdown;
u32 pseudo_palette[16];
+
+ spinlock_t reg_b0_lock;
+
+#ifdef CONFIG_FB_CYBER2000_DDC
+ bool ddc_registered;
+ struct i2c_adapter ddc_adapter;
+ struct i2c_algo_bit_data ddc_algo;
+#endif
+
+#ifdef CONFIG_FB_CYBER2000_I2C
+ struct i2c_adapter i2c_adapter;
+ struct i2c_algo_bit_data i2c_algo;
+#endif
};
static char *default_font = "Acorn8x8";
@@ -494,6 +509,7 @@ static void cyber2000fb_set_timing(struct cfb_info *cfb, struct par_info *hw)
cyber2000_attrw(0x14, 0x00, cfb);
/* PLL registers */
+ spin_lock(&cfb->reg_b0_lock);
cyber2000_grphw(EXT_DCLK_MULT, hw->clock_mult, cfb);
cyber2000_grphw(EXT_DCLK_DIV, hw->clock_div, cfb);
cyber2000_grphw(EXT_MCLK_MULT, cfb->mclk_mult, cfb);
@@ -501,6 +517,7 @@ static void cyber2000fb_set_timing(struct cfb_info *cfb, struct par_info *hw)
cyber2000_grphw(0x90, 0x01, cfb);
cyber2000_grphw(0xb9, 0x80, cfb);
cyber2000_grphw(0xb9, 0x00, cfb);
+ spin_unlock(&cfb->reg_b0_lock);
cfb->ramdac_ctrl = hw->ramdac;
cyber2000fb_write_ramdac_ctrl(cfb);
@@ -681,9 +698,9 @@ cyber2000fb_decode_clock(struct par_info *hw, struct cfb_info *cfb,
* pll_ps_calc = best_div1 / (ref_ps * best_mult)
*/
best_diff = 0x7fffffff;
- best_mult = 32;
- best_div1 = 255;
- for (t_div1 = 32; t_div1 > 1; t_div1 -= 1) {
+ best_mult = 2;
+ best_div1 = 32;
+ for (t_div1 = 2; t_div1 < 32; t_div1 += 1) {
u_int rr, t_mult, t_pll_ps;
int diff;
@@ -1105,24 +1122,22 @@ void cyber2000fb_disable_extregs(struct cfb_info *cfb)
}
EXPORT_SYMBOL(cyber2000fb_disable_extregs);
-void cyber2000fb_get_fb_var(struct cfb_info *cfb, struct fb_var_screeninfo *var)
-{
- memcpy(var, &cfb->fb.var, sizeof(struct fb_var_screeninfo));
-}
-EXPORT_SYMBOL(cyber2000fb_get_fb_var);
-
/*
* Attach a capture/tv driver to the core CyberX0X0 driver.
*/
int cyber2000fb_attach(struct cyberpro_info *info, int idx)
{
if (int_cfb_info != NULL) {
- info->dev = int_cfb_info->dev;
+ info->dev = int_cfb_info->fb.device;
+#ifdef CONFIG_FB_CYBER2000_I2C
+ info->i2c = &int_cfb_info->i2c_adapter;
+#else
+ info->i2c = NULL;
+#endif
info->regs = int_cfb_info->regs;
+ info->irq = int_cfb_info->irq;
info->fb = int_cfb_info->fb.screen_base;
info->fb_size = int_cfb_info->fb.fix.smem_len;
- info->enable_extregs = cyber2000fb_enable_extregs;
- info->disable_extregs = cyber2000fb_disable_extregs;
info->info = int_cfb_info;
strlcpy(info->dev_name, int_cfb_info->fb.fix.id,
@@ -1141,6 +1156,183 @@ void cyber2000fb_detach(int idx)
}
EXPORT_SYMBOL(cyber2000fb_detach);
+#ifdef CONFIG_FB_CYBER2000_DDC
+
+#define DDC_REG 0xb0
+#define DDC_SCL_OUT (1 << 0)
+#define DDC_SDA_OUT (1 << 4)
+#define DDC_SCL_IN (1 << 2)
+#define DDC_SDA_IN (1 << 6)
+
+static void cyber2000fb_enable_ddc(struct cfb_info *cfb)
+{
+ spin_lock(&cfb->reg_b0_lock);
+ cyber2000fb_writew(0x1bf, 0x3ce, cfb);
+}
+
+static void cyber2000fb_disable_ddc(struct cfb_info *cfb)
+{
+ cyber2000fb_writew(0x0bf, 0x3ce, cfb);
+ spin_unlock(&cfb->reg_b0_lock);
+}
+
+
+static void cyber2000fb_ddc_setscl(void *data, int val)
+{
+ struct cfb_info *cfb = data;
+ unsigned char reg;
+
+ cyber2000fb_enable_ddc(cfb);
+ reg = cyber2000_grphr(DDC_REG, cfb);
+ if (!val) /* bit is inverted */
+ reg |= DDC_SCL_OUT;
+ else
+ reg &= ~DDC_SCL_OUT;
+ cyber2000_grphw(DDC_REG, reg, cfb);
+ cyber2000fb_disable_ddc(cfb);
+}
+
+static void cyber2000fb_ddc_setsda(void *data, int val)
+{
+ struct cfb_info *cfb = data;
+ unsigned char reg;
+
+ cyber2000fb_enable_ddc(cfb);
+ reg = cyber2000_grphr(DDC_REG, cfb);
+ if (!val) /* bit is inverted */
+ reg |= DDC_SDA_OUT;
+ else
+ reg &= ~DDC_SDA_OUT;
+ cyber2000_grphw(DDC_REG, reg, cfb);
+ cyber2000fb_disable_ddc(cfb);
+}
+
+static int cyber2000fb_ddc_getscl(void *data)
+{
+ struct cfb_info *cfb = data;
+ int retval;
+
+ cyber2000fb_enable_ddc(cfb);
+ retval = !!(cyber2000_grphr(DDC_REG, cfb) & DDC_SCL_IN);
+ cyber2000fb_disable_ddc(cfb);
+
+ return retval;
+}
+
+static int cyber2000fb_ddc_getsda(void *data)
+{
+ struct cfb_info *cfb = data;
+ int retval;
+
+ cyber2000fb_enable_ddc(cfb);
+ retval = !!(cyber2000_grphr(DDC_REG, cfb) & DDC_SDA_IN);
+ cyber2000fb_disable_ddc(cfb);
+
+ return retval;
+}
+
+static int __devinit cyber2000fb_setup_ddc_bus(struct cfb_info *cfb)
+{
+ strlcpy(cfb->ddc_adapter.name, cfb->fb.fix.id,
+ sizeof(cfb->ddc_adapter.name));
+ cfb->ddc_adapter.owner = THIS_MODULE;
+ cfb->ddc_adapter.class = I2C_CLASS_DDC;
+ cfb->ddc_adapter.algo_data = &cfb->ddc_algo;
+ cfb->ddc_adapter.dev.parent = cfb->fb.device;
+ cfb->ddc_algo.setsda = cyber2000fb_ddc_setsda;
+ cfb->ddc_algo.setscl = cyber2000fb_ddc_setscl;
+ cfb->ddc_algo.getsda = cyber2000fb_ddc_getsda;
+ cfb->ddc_algo.getscl = cyber2000fb_ddc_getscl;
+ cfb->ddc_algo.udelay = 10;
+ cfb->ddc_algo.timeout = 20;
+ cfb->ddc_algo.data = cfb;
+
+ i2c_set_adapdata(&cfb->ddc_adapter, cfb);
+
+ return i2c_bit_add_bus(&cfb->ddc_adapter);
+}
+#endif /* CONFIG_FB_CYBER2000_DDC */
+
+#ifdef CONFIG_FB_CYBER2000_I2C
+static void cyber2000fb_i2c_setsda(void *data, int state)
+{
+ struct cfb_info *cfb = data;
+ unsigned int latch2;
+
+ spin_lock(&cfb->reg_b0_lock);
+ latch2 = cyber2000_grphr(EXT_LATCH2, cfb);
+ latch2 &= EXT_LATCH2_I2C_CLKEN;
+ if (state)
+ latch2 |= EXT_LATCH2_I2C_DATEN;
+ cyber2000_grphw(EXT_LATCH2, latch2, cfb);
+ spin_unlock(&cfb->reg_b0_lock);
+}
+
+static void cyber2000fb_i2c_setscl(void *data, int state)
+{
+ struct cfb_info *cfb = data;
+ unsigned int latch2;
+
+ spin_lock(&cfb->reg_b0_lock);
+ latch2 = cyber2000_grphr(EXT_LATCH2, cfb);
+ latch2 &= EXT_LATCH2_I2C_DATEN;
+ if (state)
+ latch2 |= EXT_LATCH2_I2C_CLKEN;
+ cyber2000_grphw(EXT_LATCH2, latch2, cfb);
+ spin_unlock(&cfb->reg_b0_lock);
+}
+
+static int cyber2000fb_i2c_getsda(void *data)
+{
+ struct cfb_info *cfb = data;
+ int ret;
+
+ spin_lock(&cfb->reg_b0_lock);
+ ret = !!(cyber2000_grphr(EXT_LATCH2, cfb) & EXT_LATCH2_I2C_DAT);
+ spin_unlock(&cfb->reg_b0_lock);
+
+ return ret;
+}
+
+static int cyber2000fb_i2c_getscl(void *data)
+{
+ struct cfb_info *cfb = data;
+ int ret;
+
+ spin_lock(&cfb->reg_b0_lock);
+ ret = !!(cyber2000_grphr(EXT_LATCH2, cfb) & EXT_LATCH2_I2C_CLK);
+ spin_unlock(&cfb->reg_b0_lock);
+
+ return ret;
+}
+
+static int __devinit cyber2000fb_i2c_register(struct cfb_info *cfb)
+{
+ strlcpy(cfb->i2c_adapter.name, cfb->fb.fix.id,
+ sizeof(cfb->i2c_adapter.name));
+ cfb->i2c_adapter.owner = THIS_MODULE;
+ cfb->i2c_adapter.algo_data = &cfb->i2c_algo;
+ cfb->i2c_adapter.dev.parent = cfb->fb.device;
+ cfb->i2c_algo.setsda = cyber2000fb_i2c_setsda;
+ cfb->i2c_algo.setscl = cyber2000fb_i2c_setscl;
+ cfb->i2c_algo.getsda = cyber2000fb_i2c_getsda;
+ cfb->i2c_algo.getscl = cyber2000fb_i2c_getscl;
+ cfb->i2c_algo.udelay = 5;
+ cfb->i2c_algo.timeout = msecs_to_jiffies(100);
+ cfb->i2c_algo.data = cfb;
+
+ return i2c_bit_add_bus(&cfb->i2c_adapter);
+}
+
+static void cyber2000fb_i2c_unregister(struct cfb_info *cfb)
+{
+ i2c_del_adapter(&cfb->i2c_adapter);
+}
+#else
+#define cyber2000fb_i2c_register(cfb) (0)
+#define cyber2000fb_i2c_unregister(cfb) do { } while (0)
+#endif
+
/*
* These parameters give
* 640x480, hsync 31.5kHz, vsync 60Hz
@@ -1275,6 +1467,8 @@ static struct cfb_info __devinit *cyberpro_alloc_fb_info(unsigned int id,
cfb->fb.flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
cfb->fb.pseudo_palette = cfb->pseudo_palette;
+ spin_lock_init(&cfb->reg_b0_lock);
+
fb_alloc_cmap(&cfb->fb.cmap, NR_PALETTE, 0);
return cfb;
@@ -1369,6 +1563,11 @@ static int __devinit cyberpro_common_probe(struct cfb_info *cfb)
cfb->fb.fix.mmio_len = MMIO_SIZE;
cfb->fb.screen_base = cfb->region;
+#ifdef CONFIG_FB_CYBER2000_DDC
+ if (cyber2000fb_setup_ddc_bus(cfb) == 0)
+ cfb->ddc_registered = true;
+#endif
+
err = -EINVAL;
if (!fb_find_mode(&cfb->fb.var, &cfb->fb, NULL, NULL, 0,
&cyber2000fb_default_mode, 8)) {
@@ -1401,14 +1600,32 @@ static int __devinit cyberpro_common_probe(struct cfb_info *cfb)
cfb->fb.var.xres, cfb->fb.var.yres,
h_sync / 1000, h_sync % 1000, v_sync);
- if (cfb->dev)
- cfb->fb.device = &cfb->dev->dev;
+ err = cyber2000fb_i2c_register(cfb);
+ if (err)
+ goto failed;
+
err = register_framebuffer(&cfb->fb);
+ if (err)
+ cyber2000fb_i2c_unregister(cfb);
failed:
+#ifdef CONFIG_FB_CYBER2000_DDC
+ if (err && cfb->ddc_registered)
+ i2c_del_adapter(&cfb->ddc_adapter);
+#endif
return err;
}
+static void __devexit cyberpro_common_remove(struct cfb_info *cfb)
+{
+ unregister_framebuffer(&cfb->fb);
+#ifdef CONFIG_FB_CYBER2000_DDC
+ if (cfb->ddc_registered)
+ i2c_del_adapter(&cfb->ddc_adapter);
+#endif
+ cyber2000fb_i2c_unregister(cfb);
+}
+
static void cyberpro_common_resume(struct cfb_info *cfb)
{
cyberpro_init_hw(cfb);
@@ -1442,12 +1659,13 @@ static int __devinit cyberpro_vl_probe(void)
if (!cfb)
goto failed_release;
- cfb->dev = NULL;
+ cfb->irq = -1;
cfb->region = ioremap(FB_START, FB_SIZE);
if (!cfb->region)
goto failed_ioremap;
cfb->regs = cfb->region + MMIO_OFFSET;
+ cfb->fb.device = NULL;
cfb->fb.fix.mmio_start = FB_START + MMIO_OFFSET;
cfb->fb.fix.smem_start = FB_START;
@@ -1585,12 +1803,13 @@ cyberpro_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (err)
goto failed_regions;
- cfb->dev = dev;
+ cfb->irq = dev->irq;
cfb->region = pci_ioremap_bar(dev, 0);
if (!cfb->region)
goto failed_ioremap;
cfb->regs = cfb->region + MMIO_OFFSET;
+ cfb->fb.device = &dev->dev;
cfb->fb.fix.mmio_start = pci_resource_start(dev, 0) + MMIO_OFFSET;
cfb->fb.fix.smem_start = pci_resource_start(dev, 0);
@@ -1648,15 +1867,7 @@ static void __devexit cyberpro_pci_remove(struct pci_dev *dev)
struct cfb_info *cfb = pci_get_drvdata(dev);
if (cfb) {
- /*
- * If unregister_framebuffer fails, then
- * we will be leaving hooks that could cause
- * oopsen laying around.
- */
- if (unregister_framebuffer(&cfb->fb))
- printk(KERN_WARNING "%s: danger Will Robinson, "
- "danger danger! Oopsen imminent!\n",
- cfb->fb.fix.id);
+ cyberpro_common_remove(cfb);
iounmap(cfb->region);
cyberpro_free_fb_info(cfb);
diff --git a/drivers/video/cyber2000fb.h b/drivers/video/cyber2000fb.h
index de4fc43e51c1..bad69102e774 100644
--- a/drivers/video/cyber2000fb.h
+++ b/drivers/video/cyber2000fb.h
@@ -464,12 +464,14 @@ static void debug_printf(char *fmt, ...)
struct cfb_info;
struct cyberpro_info {
- struct pci_dev *dev;
+ struct device *dev;
+ struct i2c_adapter *i2c;
unsigned char __iomem *regs;
char __iomem *fb;
char dev_name[32];
unsigned int fb_size;
unsigned int chip_id;
+ unsigned int irq;
/*
* The following is a pointer to be passed into the
@@ -478,15 +480,6 @@ struct cyberpro_info {
* is within this structure.
*/
struct cfb_info *info;
-
- /*
- * Use these to enable the BM or TV registers. In an SMP
- * environment, these two function pointers should only be
- * called from the module_init() or module_exit()
- * functions.
- */
- void (*enable_extregs)(struct cfb_info *);
- void (*disable_extregs)(struct cfb_info *);
};
#define ID_IGA_1682 0
@@ -494,8 +487,6 @@ struct cyberpro_info {
#define ID_CYBERPRO_2010 2
#define ID_CYBERPRO_5000 3
-struct fb_var_screeninfo;
-
/*
* Note! Writing to the Cyber20x0 registers from an interrupt
* routine is definitely a bad idea atm.
@@ -504,4 +495,3 @@ int cyber2000fb_attach(struct cyberpro_info *info, int idx);
void cyber2000fb_detach(int idx);
void cyber2000fb_enable_extregs(struct cfb_info *cfb);
void cyber2000fb_disable_extregs(struct cfb_info *cfb);
-void cyber2000fb_get_fb_var(struct cfb_info *cfb, struct fb_var_screeninfo *var);
diff --git a/drivers/video/fb-puv3.c b/drivers/video/fb-puv3.c
new file mode 100644
index 000000000000..dbd2dc4745d1
--- /dev/null
+++ b/drivers/video/fb-puv3.c
@@ -0,0 +1,846 @@
+/*
+ * Frame Buffer Driver for PKUnity-v3 Unigfx
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/vmalloc.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/console.h>
+
+#include <asm/sizes.h>
+#include <mach/hardware.h>
+
+/* Platform_data reserved for unifb registers. */
+#define UNIFB_REGS_NUM 10
+/* RAM reserved for the frame buffer. */
+#define UNIFB_MEMSIZE (SZ_4M) /* 4 MB for 1024*768*32b */
+
+/*
+ * cause UNIGFX don not have EDID
+ * all the modes are organized as follow
+ */
+static const struct fb_videomode unifb_modes[] = {
+ /* 0 640x480-60 VESA */
+ { "640x480@60", 60, 640, 480, 25175000, 48, 16, 34, 10, 96, 1,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 1 640x480-75 VESA */
+ { "640x480@75", 75, 640, 480, 31500000, 120, 16, 18, 1, 64, 1,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 2 800x600-60 VESA */
+ { "800x600@60", 60, 800, 600, 40000000, 88, 40, 26, 1, 128, 1,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 3 800x600-75 VESA */
+ { "800x600@75", 75, 800, 600, 49500000, 160, 16, 23, 1, 80, 1,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 4 1024x768-60 VESA */
+ { "1024x768@60", 60, 1024, 768, 65000000, 160, 24, 34, 3, 136, 1,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 5 1024x768-75 VESA */
+ { "1024x768@75", 75, 1024, 768, 78750000, 176, 16, 30, 1, 96, 1,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 6 1280x960-60 VESA */
+ { "1280x960@60", 60, 1280, 960, 108000000, 312, 96, 38, 1, 112, 1,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 7 1440x900-60 VESA */
+ { "1440x900@60", 60, 1440, 900, 106500000, 232, 80, 30, 3, 152, 1,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 8 FIXME 9 1024x600-60 VESA UNTESTED */
+ { "1024x600@60", 60, 1024, 600, 50650000, 160, 24, 26, 1, 136, 1,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 9 FIXME 10 1024x600-75 VESA UNTESTED */
+ { "1024x600@75", 75, 1024, 600, 61500000, 176, 16, 23, 1, 96, 1,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 10 FIXME 11 1366x768-60 VESA UNTESTED */
+ { "1366x768@60", 60, 1366, 768, 85500000, 256, 58, 18, 1, 112, 3,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+};
+
+static struct fb_var_screeninfo unifb_default = {
+ .xres = 640,
+ .yres = 480,
+ .xres_virtual = 640,
+ .yres_virtual = 480,
+ .bits_per_pixel = 16,
+ .red = { 11, 5, 0 },
+ .green = { 5, 6, 0 },
+ .blue = { 0, 5, 0 },
+ .activate = FB_ACTIVATE_NOW,
+ .height = -1,
+ .width = -1,
+ .pixclock = 25175000,
+ .left_margin = 48,
+ .right_margin = 16,
+ .upper_margin = 33,
+ .lower_margin = 10,
+ .hsync_len = 96,
+ .vsync_len = 2,
+ .vmode = FB_VMODE_NONINTERLACED,
+};
+
+static struct fb_fix_screeninfo unifb_fix = {
+ .id = "UNIGFX FB",
+ .type = FB_TYPE_PACKED_PIXELS,
+ .visual = FB_VISUAL_TRUECOLOR,
+ .xpanstep = 1,
+ .ypanstep = 1,
+ .ywrapstep = 1,
+ .accel = FB_ACCEL_NONE,
+};
+
+static void unifb_sync(struct fb_info *info)
+{
+ /* TODO: may, this can be replaced by interrupt */
+ int cnt;
+
+ for (cnt = 0; cnt < 0x10000000; cnt++) {
+ if (readl(UGE_COMMAND) & 0x1000000)
+ return;
+ }
+
+ if (cnt > 0x8000000)
+ dev_warn(info->device, "Warning: UniGFX GE time out ...\n");
+}
+
+static void unifb_prim_fillrect(struct fb_info *info,
+ const struct fb_fillrect *region)
+{
+ int awidth = region->width;
+ int aheight = region->height;
+ int m_iBpp = info->var.bits_per_pixel;
+ int screen_width = info->var.xres;
+ int src_sel = 1; /* from fg_color */
+ int pat_sel = 1;
+ int src_x0 = 0;
+ int dst_x0 = region->dx;
+ int src_y0 = 0;
+ int dst_y0 = region->dy;
+ int rop_alpha_sel = 0;
+ int rop_alpha_code = 0xCC;
+ int x_dir = 1;
+ int y_dir = 1;
+ int alpha_r = 0;
+ int alpha_sel = 0;
+ int dst_pitch = screen_width * (m_iBpp / 8);
+ int dst_offset = dst_y0 * dst_pitch + dst_x0 * (m_iBpp / 8);
+ int src_pitch = screen_width * (m_iBpp / 8);
+ int src_offset = src_y0 * src_pitch + src_x0 * (m_iBpp / 8);
+ unsigned int command = 0;
+ int clip_region = 0;
+ int clip_en = 0;
+ int tp_en = 0;
+ int fg_color = 0;
+ int bottom = info->var.yres - 1;
+ int right = info->var.xres - 1;
+ int top = 0;
+
+ bottom = (bottom << 16) | right;
+ command = (rop_alpha_sel << 26) | (pat_sel << 18) | (src_sel << 16)
+ | (x_dir << 20) | (y_dir << 21) | (command << 24)
+ | (clip_region << 23) | (clip_en << 22) | (tp_en << 27);
+ src_pitch = (dst_pitch << 16) | src_pitch;
+ awidth = awidth | (aheight << 16);
+ alpha_r = ((rop_alpha_code & 0xff) << 8) | (alpha_r & 0xff)
+ | (alpha_sel << 16);
+ src_x0 = (src_x0 & 0x1fff) | ((src_y0 & 0x1fff) << 16);
+ dst_x0 = (dst_x0 & 0x1fff) | ((dst_y0 & 0x1fff) << 16);
+ fg_color = region->color;
+
+ unifb_sync(info);
+
+ writel(((u32 *)(info->pseudo_palette))[fg_color], UGE_FCOLOR);
+ writel(0, UGE_BCOLOR);
+ writel(src_pitch, UGE_PITCH);
+ writel(src_offset, UGE_SRCSTART);
+ writel(dst_offset, UGE_DSTSTART);
+ writel(awidth, UGE_WIDHEIGHT);
+ writel(top, UGE_CLIP0);
+ writel(bottom, UGE_CLIP1);
+ writel(alpha_r, UGE_ROPALPHA);
+ writel(src_x0, UGE_SRCXY);
+ writel(dst_x0, UGE_DSTXY);
+ writel(command, UGE_COMMAND);
+}
+
+static void unifb_fillrect(struct fb_info *info,
+ const struct fb_fillrect *region)
+{
+ struct fb_fillrect modded;
+ int vxres, vyres;
+
+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
+ sys_fillrect(info, region);
+ return;
+ }
+
+ vxres = info->var.xres_virtual;
+ vyres = info->var.yres_virtual;
+
+ memcpy(&modded, region, sizeof(struct fb_fillrect));
+
+ if (!modded.width || !modded.height ||
+ modded.dx >= vxres || modded.dy >= vyres)
+ return;
+
+ if (modded.dx + modded.width > vxres)
+ modded.width = vxres - modded.dx;
+ if (modded.dy + modded.height > vyres)
+ modded.height = vyres - modded.dy;
+
+ unifb_prim_fillrect(info, &modded);
+}
+
+static void unifb_prim_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ int awidth = area->width;
+ int aheight = area->height;
+ int m_iBpp = info->var.bits_per_pixel;
+ int screen_width = info->var.xres;
+ int src_sel = 2; /* from mem */
+ int pat_sel = 0;
+ int src_x0 = area->sx;
+ int dst_x0 = area->dx;
+ int src_y0 = area->sy;
+ int dst_y0 = area->dy;
+
+ int rop_alpha_sel = 0;
+ int rop_alpha_code = 0xCC;
+ int x_dir = 1;
+ int y_dir = 1;
+
+ int alpha_r = 0;
+ int alpha_sel = 0;
+ int dst_pitch = screen_width * (m_iBpp / 8);
+ int dst_offset = dst_y0 * dst_pitch + dst_x0 * (m_iBpp / 8);
+ int src_pitch = screen_width * (m_iBpp / 8);
+ int src_offset = src_y0 * src_pitch + src_x0 * (m_iBpp / 8);
+ unsigned int command = 0;
+ int clip_region = 0;
+ int clip_en = 1;
+ int tp_en = 0;
+ int top = 0;
+ int bottom = info->var.yres;
+ int right = info->var.xres;
+ int fg_color = 0;
+ int bg_color = 0;
+
+ if (src_x0 < 0)
+ src_x0 = 0;
+ if (src_y0 < 0)
+ src_y0 = 0;
+
+ if (src_y0 - dst_y0 > 0) {
+ y_dir = 1;
+ } else {
+ y_dir = 0;
+ src_offset = (src_y0 + aheight) * src_pitch +
+ src_x0 * (m_iBpp / 8);
+ dst_offset = (dst_y0 + aheight) * dst_pitch +
+ dst_x0 * (m_iBpp / 8);
+ src_y0 += aheight;
+ dst_y0 += aheight;
+ }
+
+ command = (rop_alpha_sel << 26) | (pat_sel << 18) | (src_sel << 16) |
+ (x_dir << 20) | (y_dir << 21) | (command << 24) |
+ (clip_region << 23) | (clip_en << 22) | (tp_en << 27);
+ src_pitch = (dst_pitch << 16) | src_pitch;
+ awidth = awidth | (aheight << 16);
+ alpha_r = ((rop_alpha_code & 0xff) << 8) | (alpha_r & 0xff) |
+ (alpha_sel << 16);
+ src_x0 = (src_x0 & 0x1fff) | ((src_y0 & 0x1fff) << 16);
+ dst_x0 = (dst_x0 & 0x1fff) | ((dst_y0 & 0x1fff) << 16);
+ bottom = (bottom << 16) | right;
+
+ unifb_sync(info);
+
+ writel(src_pitch, UGE_PITCH);
+ writel(src_offset, UGE_SRCSTART);
+ writel(dst_offset, UGE_DSTSTART);
+ writel(awidth, UGE_WIDHEIGHT);
+ writel(top, UGE_CLIP0);
+ writel(bottom, UGE_CLIP1);
+ writel(bg_color, UGE_BCOLOR);
+ writel(fg_color, UGE_FCOLOR);
+ writel(alpha_r, UGE_ROPALPHA);
+ writel(src_x0, UGE_SRCXY);
+ writel(dst_x0, UGE_DSTXY);
+ writel(command, UGE_COMMAND);
+}
+
+static void unifb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
+{
+ struct fb_copyarea modded;
+ u32 vxres, vyres;
+ modded.sx = area->sx;
+ modded.sy = area->sy;
+ modded.dx = area->dx;
+ modded.dy = area->dy;
+ modded.width = area->width;
+ modded.height = area->height;
+
+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
+ sys_copyarea(info, area);
+ return;
+ }
+
+ vxres = info->var.xres_virtual;
+ vyres = info->var.yres_virtual;
+
+ if (!modded.width || !modded.height ||
+ modded.sx >= vxres || modded.sy >= vyres ||
+ modded.dx >= vxres || modded.dy >= vyres)
+ return;
+
+ if (modded.sx + modded.width > vxres)
+ modded.width = vxres - modded.sx;
+ if (modded.dx + modded.width > vxres)
+ modded.width = vxres - modded.dx;
+ if (modded.sy + modded.height > vyres)
+ modded.height = vyres - modded.sy;
+ if (modded.dy + modded.height > vyres)
+ modded.height = vyres - modded.dy;
+
+ unifb_prim_copyarea(info, &modded);
+}
+
+static void unifb_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ sys_imageblit(info, image);
+}
+
+static u_long get_line_length(int xres_virtual, int bpp)
+{
+ u_long length;
+
+ length = xres_virtual * bpp;
+ length = (length + 31) & ~31;
+ length >>= 3;
+ return length;
+}
+
+/*
+ * Setting the video mode has been split into two parts.
+ * First part, xxxfb_check_var, must not write anything
+ * to hardware, it should only verify and adjust var.
+ * This means it doesn't alter par but it does use hardware
+ * data from it to check this var.
+ */
+static int unifb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ u_long line_length;
+
+ /*
+ * FB_VMODE_CONUPDATE and FB_VMODE_SMOOTH_XPAN are equal!
+ * as FB_VMODE_SMOOTH_XPAN is only used internally
+ */
+
+ if (var->vmode & FB_VMODE_CONUPDATE) {
+ var->vmode |= FB_VMODE_YWRAP;
+ var->xoffset = info->var.xoffset;
+ var->yoffset = info->var.yoffset;
+ }
+
+ /*
+ * Some very basic checks
+ */
+ if (!var->xres)
+ var->xres = 1;
+ if (!var->yres)
+ var->yres = 1;
+ if (var->xres > var->xres_virtual)
+ var->xres_virtual = var->xres;
+ if (var->yres > var->yres_virtual)
+ var->yres_virtual = var->yres;
+ if (var->bits_per_pixel <= 1)
+ var->bits_per_pixel = 1;
+ else if (var->bits_per_pixel <= 8)
+ var->bits_per_pixel = 8;
+ else if (var->bits_per_pixel <= 16)
+ var->bits_per_pixel = 16;
+ else if (var->bits_per_pixel <= 24)
+ var->bits_per_pixel = 24;
+ else if (var->bits_per_pixel <= 32)
+ var->bits_per_pixel = 32;
+ else
+ return -EINVAL;
+
+ if (var->xres_virtual < var->xoffset + var->xres)
+ var->xres_virtual = var->xoffset + var->xres;
+ if (var->yres_virtual < var->yoffset + var->yres)
+ var->yres_virtual = var->yoffset + var->yres;
+
+ /*
+ * Memory limit
+ */
+ line_length =
+ get_line_length(var->xres_virtual, var->bits_per_pixel);
+ if (line_length * var->yres_virtual > UNIFB_MEMSIZE)
+ return -ENOMEM;
+
+ /*
+ * Now that we checked it we alter var. The reason being is that the
+ * video mode passed in might not work but slight changes to it might
+ * make it work. This way we let the user know what is acceptable.
+ */
+ switch (var->bits_per_pixel) {
+ case 1:
+ case 8:
+ var->red.offset = 0;
+ var->red.length = 8;
+ var->green.offset = 0;
+ var->green.length = 8;
+ var->blue.offset = 0;
+ var->blue.length = 8;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ break;
+ case 16: /* RGBA 5551 */
+ if (var->transp.length) {
+ var->red.offset = 0;
+ var->red.length = 5;
+ var->green.offset = 5;
+ var->green.length = 5;
+ var->blue.offset = 10;
+ var->blue.length = 5;
+ var->transp.offset = 15;
+ var->transp.length = 1;
+ } else { /* RGB 565 */
+ var->red.offset = 11;
+ var->red.length = 5;
+ var->green.offset = 5;
+ var->green.length = 6;
+ var->blue.offset = 0;
+ var->blue.length = 5;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ }
+ break;
+ case 24: /* RGB 888 */
+ var->red.offset = 0;
+ var->red.length = 8;
+ var->green.offset = 8;
+ var->green.length = 8;
+ var->blue.offset = 16;
+ var->blue.length = 8;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ break;
+ case 32: /* RGBA 8888 */
+ var->red.offset = 16;
+ var->red.length = 8;
+ var->green.offset = 8;
+ var->green.length = 8;
+ var->blue.offset = 0;
+ var->blue.length = 8;
+ var->transp.offset = 24;
+ var->transp.length = 8;
+ break;
+ }
+ var->red.msb_right = 0;
+ var->green.msb_right = 0;
+ var->blue.msb_right = 0;
+ var->transp.msb_right = 0;
+
+ return 0;
+}
+
+/*
+ * This routine actually sets the video mode. It's in here where we
+ * the hardware state info->par and fix which can be affected by the
+ * change in par. For this driver it doesn't do much.
+ */
+static int unifb_set_par(struct fb_info *info)
+{
+ int hTotal, vTotal, hSyncStart, hSyncEnd, vSyncStart, vSyncEnd;
+ int format;
+
+#ifdef CONFIG_PUV3_PM
+ struct clk *clk_vga;
+ u32 pixclk = 0;
+ int i;
+
+ for (i = 0; i <= 10; i++) {
+ if (info->var.xres == unifb_modes[i].xres
+ && info->var.yres == unifb_modes[i].yres
+ && info->var.upper_margin == unifb_modes[i].upper_margin
+ && info->var.lower_margin == unifb_modes[i].lower_margin
+ && info->var.left_margin == unifb_modes[i].left_margin
+ && info->var.right_margin == unifb_modes[i].right_margin
+ && info->var.hsync_len == unifb_modes[i].hsync_len
+ && info->var.vsync_len == unifb_modes[i].vsync_len) {
+ pixclk = unifb_modes[i].pixclock;
+ break;
+ }
+ }
+
+ /* set clock rate */
+ clk_vga = clk_get(info->device, "VGA_CLK");
+ if (clk_vga == ERR_PTR(-ENOENT))
+ return -ENOENT;
+
+ if (pixclk != 0) {
+ if (clk_set_rate(clk_vga, pixclk)) { /* set clock failed */
+ info->fix = unifb_fix;
+ info->var = unifb_default;
+ if (clk_set_rate(clk_vga, unifb_default.pixclock))
+ return -EINVAL;
+ }
+ }
+#endif
+
+ info->fix.line_length = get_line_length(info->var.xres_virtual,
+ info->var.bits_per_pixel);
+
+ hSyncStart = info->var.xres + info->var.right_margin;
+ hSyncEnd = hSyncStart + info->var.hsync_len;
+ hTotal = hSyncEnd + info->var.left_margin;
+
+ vSyncStart = info->var.yres + info->var.lower_margin;
+ vSyncEnd = vSyncStart + info->var.vsync_len;
+ vTotal = vSyncEnd + info->var.upper_margin;
+
+ switch (info->var.bits_per_pixel) {
+ case 8:
+ format = UDE_CFG_DST8;
+ break;
+ case 16:
+ format = UDE_CFG_DST16;
+ break;
+ case 24:
+ format = UDE_CFG_DST24;
+ break;
+ case 32:
+ format = UDE_CFG_DST32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ writel(PKUNITY_UNIGFX_MMAP_BASE, UDE_FSA);
+ writel(info->var.yres, UDE_LS);
+ writel(get_line_length(info->var.xres,
+ info->var.bits_per_pixel) >> 3, UDE_PS);
+ /* >> 3 for hardware required. */
+ writel((hTotal << 16) | (info->var.xres), UDE_HAT);
+ writel(((hTotal - 1) << 16) | (info->var.xres - 1), UDE_HBT);
+ writel(((hSyncEnd - 1) << 16) | (hSyncStart - 1), UDE_HST);
+ writel((vTotal << 16) | (info->var.yres), UDE_VAT);
+ writel(((vTotal - 1) << 16) | (info->var.yres - 1), UDE_VBT);
+ writel(((vSyncEnd - 1) << 16) | (vSyncStart - 1), UDE_VST);
+ writel(UDE_CFG_GDEN_ENABLE | UDE_CFG_TIMEUP_ENABLE
+ | format | 0xC0000001, UDE_CFG);
+
+ return 0;
+}
+
+/*
+ * Set a single color register. The values supplied are already
+ * rounded down to the hardware's capabilities (according to the
+ * entries in the var structure). Return != 0 for invalid regno.
+ */
+static int unifb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
+ u_int transp, struct fb_info *info)
+{
+ if (regno >= 256) /* no. of hw registers */
+ return 1;
+
+ /* grayscale works only partially under directcolor */
+ if (info->var.grayscale) {
+ /* grayscale = 0.30*R + 0.59*G + 0.11*B */
+ red = green = blue =
+ (red * 77 + green * 151 + blue * 28) >> 8;
+ }
+
+#define CNVT_TOHW(val, width) ((((val)<<(width))+0x7FFF-(val))>>16)
+ switch (info->fix.visual) {
+ case FB_VISUAL_TRUECOLOR:
+ case FB_VISUAL_PSEUDOCOLOR:
+ red = CNVT_TOHW(red, info->var.red.length);
+ green = CNVT_TOHW(green, info->var.green.length);
+ blue = CNVT_TOHW(blue, info->var.blue.length);
+ transp = CNVT_TOHW(transp, info->var.transp.length);
+ break;
+ case FB_VISUAL_DIRECTCOLOR:
+ red = CNVT_TOHW(red, 8); /* expect 8 bit DAC */
+ green = CNVT_TOHW(green, 8);
+ blue = CNVT_TOHW(blue, 8);
+ /* hey, there is bug in transp handling... */
+ transp = CNVT_TOHW(transp, 8);
+ break;
+ }
+#undef CNVT_TOHW
+ /* Truecolor has hardware independent palette */
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
+ u32 v;
+
+ if (regno >= 16)
+ return 1;
+
+ v = (red << info->var.red.offset) |
+ (green << info->var.green.offset) |
+ (blue << info->var.blue.offset) |
+ (transp << info->var.transp.offset);
+ switch (info->var.bits_per_pixel) {
+ case 8:
+ break;
+ case 16:
+ case 24:
+ case 32:
+ ((u32 *) (info->pseudo_palette))[regno] = v;
+ break;
+ default:
+ return 1;
+ }
+ return 0;
+ }
+ return 0;
+}
+
+/*
+ * Pan or Wrap the Display
+ *
+ * This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag
+ */
+static int unifb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ if (var->vmode & FB_VMODE_YWRAP) {
+ if (var->yoffset < 0
+ || var->yoffset >= info->var.yres_virtual
+ || var->xoffset)
+ return -EINVAL;
+ } else {
+ if (var->xoffset + var->xres > info->var.xres_virtual ||
+ var->yoffset + var->yres > info->var.yres_virtual)
+ return -EINVAL;
+ }
+ info->var.xoffset = var->xoffset;
+ info->var.yoffset = var->yoffset;
+ if (var->vmode & FB_VMODE_YWRAP)
+ info->var.vmode |= FB_VMODE_YWRAP;
+ else
+ info->var.vmode &= ~FB_VMODE_YWRAP;
+ return 0;
+}
+
+int unifb_mmap(struct fb_info *info,
+ struct vm_area_struct *vma)
+{
+ unsigned long size = vma->vm_end - vma->vm_start;
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long pos = info->fix.smem_start + offset;
+
+ if (offset + size > info->fix.smem_len)
+ return -EINVAL;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ if (io_remap_pfn_range(vma, vma->vm_start, pos >> PAGE_SHIFT, size,
+ vma->vm_page_prot))
+ return -EAGAIN;
+
+ vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */
+ return 0;
+
+}
+
+static struct fb_ops unifb_ops = {
+ .fb_read = fb_sys_read,
+ .fb_write = fb_sys_write,
+ .fb_check_var = unifb_check_var,
+ .fb_set_par = unifb_set_par,
+ .fb_setcolreg = unifb_setcolreg,
+ .fb_pan_display = unifb_pan_display,
+ .fb_fillrect = unifb_fillrect,
+ .fb_copyarea = unifb_copyarea,
+ .fb_imageblit = unifb_imageblit,
+ .fb_mmap = unifb_mmap,
+};
+
+/*
+ * Initialisation
+ */
+static int unifb_probe(struct platform_device *dev)
+{
+ struct fb_info *info;
+ u32 unifb_regs[UNIFB_REGS_NUM];
+ int retval = -ENOMEM;
+ struct resource *iomem, *mapmem;
+
+ info = framebuffer_alloc(sizeof(u32)*256, &dev->dev);
+ if (!info)
+ goto err;
+
+ info->screen_base = (char __iomem *)KUSER_UNIGFX_BASE;
+ info->fbops = &unifb_ops;
+
+ retval = fb_find_mode(&info->var, info, NULL,
+ unifb_modes, 10, &unifb_modes[0], 16);
+
+ if (!retval || (retval == 4))
+ info->var = unifb_default;
+
+ iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ unifb_fix.mmio_start = iomem->start;
+
+ mapmem = platform_get_resource(dev, IORESOURCE_MEM, 1);
+ unifb_fix.smem_start = mapmem->start;
+ unifb_fix.smem_len = UNIFB_MEMSIZE;
+
+ info->fix = unifb_fix;
+ info->pseudo_palette = info->par;
+ info->par = NULL;
+ info->flags = FBINFO_FLAG_DEFAULT;
+#ifdef FB_ACCEL_PUV3_UNIGFX
+ info->fix.accel = FB_ACCEL_PUV3_UNIGFX;
+#endif
+
+ retval = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (retval < 0)
+ goto err1;
+
+ retval = register_framebuffer(info);
+ if (retval < 0)
+ goto err2;
+ platform_set_drvdata(dev, info);
+ platform_device_add_data(dev, unifb_regs, sizeof(u32) * UNIFB_REGS_NUM);
+
+ printk(KERN_INFO
+ "fb%d: Virtual frame buffer device, using %dM of video memory\n",
+ info->node, UNIFB_MEMSIZE >> 20);
+ return 0;
+err2:
+ fb_dealloc_cmap(&info->cmap);
+err1:
+ framebuffer_release(info);
+err:
+ return retval;
+}
+
+static int unifb_remove(struct platform_device *dev)
+{
+ struct fb_info *info = platform_get_drvdata(dev);
+
+ if (info) {
+ unregister_framebuffer(info);
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int unifb_resume(struct platform_device *dev)
+{
+ int rc = 0;
+ u32 *unifb_regs = dev->dev.platform_data;
+
+ if (dev->dev.power.power_state.event == PM_EVENT_ON)
+ return 0;
+
+ console_lock();
+
+ if (dev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
+ writel(unifb_regs[0], UDE_FSA);
+ writel(unifb_regs[1], UDE_LS);
+ writel(unifb_regs[2], UDE_PS);
+ writel(unifb_regs[3], UDE_HAT);
+ writel(unifb_regs[4], UDE_HBT);
+ writel(unifb_regs[5], UDE_HST);
+ writel(unifb_regs[6], UDE_VAT);
+ writel(unifb_regs[7], UDE_VBT);
+ writel(unifb_regs[8], UDE_VST);
+ writel(unifb_regs[9], UDE_CFG);
+ }
+ dev->dev.power.power_state = PMSG_ON;
+
+ console_unlock();
+
+ return rc;
+}
+
+static int unifb_suspend(struct platform_device *dev, pm_message_t mesg)
+{
+ u32 *unifb_regs = dev->dev.platform_data;
+
+ unifb_regs[0] = readl(UDE_FSA);
+ unifb_regs[1] = readl(UDE_LS);
+ unifb_regs[2] = readl(UDE_PS);
+ unifb_regs[3] = readl(UDE_HAT);
+ unifb_regs[4] = readl(UDE_HBT);
+ unifb_regs[5] = readl(UDE_HST);
+ unifb_regs[6] = readl(UDE_VAT);
+ unifb_regs[7] = readl(UDE_VBT);
+ unifb_regs[8] = readl(UDE_VST);
+ unifb_regs[9] = readl(UDE_CFG);
+
+ if (mesg.event == dev->dev.power.power_state.event)
+ return 0;
+
+ switch (mesg.event) {
+ case PM_EVENT_FREEZE: /* about to take snapshot */
+ case PM_EVENT_PRETHAW: /* before restoring snapshot */
+ goto done;
+ }
+
+ console_lock();
+
+ /* do nothing... */
+
+ console_unlock();
+
+done:
+ dev->dev.power.power_state = mesg;
+
+ return 0;
+}
+#else
+#define unifb_resume NULL
+#define unifb_suspend NULL
+#endif
+
+static struct platform_driver unifb_driver = {
+ .probe = unifb_probe,
+ .remove = unifb_remove,
+ .resume = unifb_resume,
+ .suspend = unifb_suspend,
+ .driver = {
+ .name = "PKUnity-v3-UNIGFX",
+ },
+};
+
+static int __init unifb_init(void)
+{
+#ifndef MODULE
+ if (fb_get_options("unifb", NULL))
+ return -ENODEV;
+#endif
+
+ return platform_driver_register(&unifb_driver);
+}
+
+module_init(unifb_init);
+
+static void __exit unifb_exit(void)
+{
+ platform_driver_unregister(&unifb_driver);
+}
+
+module_exit(unifb_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/ffb.c b/drivers/video/ffb.c
index 6739b2af3bc0..910c5e6f6702 100644
--- a/drivers/video/ffb.c
+++ b/drivers/video/ffb.c
@@ -893,8 +893,7 @@ static void ffb_init_fix(struct fb_info *info)
info->fix.accel = FB_ACCEL_SUN_CREATOR;
}
-static int __devinit ffb_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit ffb_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
struct ffb_fbc __iomem *fbc;
@@ -1052,7 +1051,7 @@ static const struct of_device_id ffb_match[] = {
};
MODULE_DEVICE_TABLE(of, ffb_match);
-static struct of_platform_driver ffb_driver = {
+static struct platform_driver ffb_driver = {
.driver = {
.name = "ffb",
.owner = THIS_MODULE,
@@ -1067,12 +1066,12 @@ static int __init ffb_init(void)
if (fb_get_options("ffb", NULL))
return -ENODEV;
- return of_register_platform_driver(&ffb_driver);
+ return platform_driver_register(&ffb_driver);
}
static void __exit ffb_exit(void)
{
- of_unregister_platform_driver(&ffb_driver);
+ platform_driver_unregister(&ffb_driver);
}
module_init(ffb_init);
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index 8bbbf08fa3ce..9048f87fa8c1 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -1487,8 +1487,7 @@ static ssize_t show_monitor(struct device *device,
return diu_ops.show_monitor_port(machine_data->monitor_port, buf);
}
-static int __devinit fsl_diu_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit fsl_diu_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct mfb_info *mfbi;
@@ -1735,7 +1734,7 @@ static struct of_device_id fsl_diu_match[] = {
};
MODULE_DEVICE_TABLE(of, fsl_diu_match);
-static struct of_platform_driver fsl_diu_driver = {
+static struct platform_driver fsl_diu_driver = {
.driver = {
.name = "fsl_diu",
.owner = THIS_MODULE,
@@ -1797,7 +1796,7 @@ static int __init fsl_diu_init(void)
if (!coherence_data)
return -ENOMEM;
#endif
- ret = of_register_platform_driver(&fsl_diu_driver);
+ ret = platform_driver_register(&fsl_diu_driver);
if (ret) {
printk(KERN_ERR
"fsl-diu: failed to register platform driver\n");
@@ -1811,7 +1810,7 @@ static int __init fsl_diu_init(void)
static void __exit fsl_diu_exit(void)
{
- of_unregister_platform_driver(&fsl_diu_driver);
+ platform_driver_unregister(&fsl_diu_driver);
#if defined(CONFIG_NOT_COHERENT_CACHE)
vfree(coherence_data);
#endif
diff --git a/drivers/video/leo.c b/drivers/video/leo.c
index b599e5e36ced..9e946e2c1da9 100644
--- a/drivers/video/leo.c
+++ b/drivers/video/leo.c
@@ -547,8 +547,7 @@ static void leo_unmap_regs(struct platform_device *op, struct fb_info *info,
of_iounmap(&op->resource[0], info->screen_base, 0x800000);
}
-static int __devinit leo_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit leo_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
struct fb_info *info;
@@ -662,7 +661,7 @@ static const struct of_device_id leo_match[] = {
};
MODULE_DEVICE_TABLE(of, leo_match);
-static struct of_platform_driver leo_driver = {
+static struct platform_driver leo_driver = {
.driver = {
.name = "leo",
.owner = THIS_MODULE,
@@ -677,12 +676,12 @@ static int __init leo_init(void)
if (fb_get_options("leofb", NULL))
return -ENODEV;
- return of_register_platform_driver(&leo_driver);
+ return platform_driver_register(&leo_driver);
}
static void __exit leo_exit(void)
{
- of_unregister_platform_driver(&leo_driver);
+ platform_driver_unregister(&leo_driver);
}
module_init(leo_init);
diff --git a/drivers/video/mb862xx/mb862xxfb.c b/drivers/video/mb862xx/mb862xxfb.c
index b1c4374cf940..c76e663a6cd4 100644
--- a/drivers/video/mb862xx/mb862xxfb.c
+++ b/drivers/video/mb862xx/mb862xxfb.c
@@ -550,8 +550,7 @@ static int mb862xx_gdc_init(struct mb862xxfb_par *par)
return 0;
}
-static int __devinit of_platform_mb862xx_probe(struct platform_device *ofdev,
- const struct of_device_id *id)
+static int __devinit of_platform_mb862xx_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct device *dev = &ofdev->dev;
@@ -717,7 +716,7 @@ static struct of_device_id __devinitdata of_platform_mb862xx_tbl[] = {
{ /* end */ }
};
-static struct of_platform_driver of_platform_mb862xxfb_driver = {
+static struct platform_driver of_platform_mb862xxfb_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
@@ -1038,7 +1037,7 @@ static int __devinit mb862xxfb_init(void)
int ret = -ENODEV;
#if defined(CONFIG_FB_MB862XX_LIME)
- ret = of_register_platform_driver(&of_platform_mb862xxfb_driver);
+ ret = platform_driver_register(&of_platform_mb862xxfb_driver);
#endif
#if defined(CONFIG_FB_MB862XX_PCI_GDC)
ret = pci_register_driver(&mb862xxfb_pci_driver);
@@ -1049,7 +1048,7 @@ static int __devinit mb862xxfb_init(void)
static void __exit mb862xxfb_exit(void)
{
#if defined(CONFIG_FB_MB862XX_LIME)
- of_unregister_platform_driver(&of_platform_mb862xxfb_driver);
+ platform_driver_unregister(&of_platform_mb862xxfb_driver);
#endif
#if defined(CONFIG_FB_MB862XX_PCI_GDC)
pci_unregister_driver(&mb862xxfb_pci_driver);
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c
index debe5933fd2e..5436aeb94456 100644
--- a/drivers/video/msm/msm_fb.c
+++ b/drivers/video/msm/msm_fb.c
@@ -81,7 +81,6 @@ struct msmfb_info {
spinlock_t update_lock;
struct mutex panel_init_lock;
wait_queue_head_t frame_wq;
- struct workqueue_struct *resume_workqueue;
struct work_struct resume_work;
struct msmfb_callback dma_callback;
struct msmfb_callback vsync_callback;
@@ -111,7 +110,7 @@ static void msmfb_handle_dma_interrupt(struct msmfb_callback *callback)
if (msmfb->sleeping == UPDATING &&
msmfb->frame_done == msmfb->update_frame) {
DLOG(SUSPEND_RESUME, "full update completed\n");
- queue_work(msmfb->resume_workqueue, &msmfb->resume_work);
+ schedule_work(&msmfb->resume_work);
}
spin_unlock_irqrestore(&msmfb->update_lock, irq_flags);
wake_up(&msmfb->frame_wq);
@@ -559,12 +558,6 @@ static int msmfb_probe(struct platform_device *pdev)
spin_lock_init(&msmfb->update_lock);
mutex_init(&msmfb->panel_init_lock);
init_waitqueue_head(&msmfb->frame_wq);
- msmfb->resume_workqueue = create_workqueue("panel_on");
- if (msmfb->resume_workqueue == NULL) {
- printk(KERN_ERR "failed to create panel_on workqueue\n");
- ret = -ENOMEM;
- goto error_create_workqueue;
- }
INIT_WORK(&msmfb->resume_work, power_on_panel);
msmfb->black = kzalloc(msmfb->fb->var.bits_per_pixel*msmfb->xres,
GFP_KERNEL);
@@ -589,8 +582,6 @@ static int msmfb_probe(struct platform_device *pdev)
return 0;
error_register_framebuffer:
- destroy_workqueue(msmfb->resume_workqueue);
-error_create_workqueue:
iounmap(fb->screen_base);
error_setup_fbmem:
framebuffer_release(msmfb->fb);
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
new file mode 100644
index 000000000000..a3b1179b606b
--- /dev/null
+++ b/drivers/video/mxsfb.c
@@ -0,0 +1,914 @@
+/*
+ * Copyright (C) 2010 Juergen Beisert, Pengutronix
+ *
+ * This code is based on:
+ * Author: Vitaly Wool <vital@embeddedalley.com>
+ *
+ * Copyright 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define DRIVER_NAME "mxsfb"
+
+/**
+ * @file
+ * @brief LCDIF driver for i.MX23 and i.MX28
+ *
+ * The LCDIF support four modes of operation
+ * - MPU interface (to drive smart displays) -> not supported yet
+ * - VSYNC interface (like MPU interface plus Vsync) -> not supported yet
+ * - Dotclock interface (to drive LC displays with RGB data and sync signals)
+ * - DVI (to drive ITU-R BT656) -> not supported yet
+ *
+ * This driver depends on a correct setup of the pins used for this purpose
+ * (platform specific).
+ *
+ * For the developer: Don't forget to set the data bus width to the display
+ * in the imx_fb_videomode structure. You will else end up with ugly colours.
+ * If you fight against jitter you can vary the clock delay. This is a feature
+ * of the i.MX28 and you can vary it between 2 ns ... 8 ns in 2 ns steps. Give
+ * the required value in the imx_fb_videomode structure.
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <mach/mxsfb.h>
+
+#define REG_SET 4
+#define REG_CLR 8
+
+#define LCDC_CTRL 0x00
+#define LCDC_CTRL1 0x10
+#define LCDC_V4_CTRL2 0x20
+#define LCDC_V3_TRANSFER_COUNT 0x20
+#define LCDC_V4_TRANSFER_COUNT 0x30
+#define LCDC_V4_CUR_BUF 0x40
+#define LCDC_V4_NEXT_BUF 0x50
+#define LCDC_V3_CUR_BUF 0x30
+#define LCDC_V3_NEXT_BUF 0x40
+#define LCDC_TIMING 0x60
+#define LCDC_VDCTRL0 0x70
+#define LCDC_VDCTRL1 0x80
+#define LCDC_VDCTRL2 0x90
+#define LCDC_VDCTRL3 0xa0
+#define LCDC_VDCTRL4 0xb0
+#define LCDC_DVICTRL0 0xc0
+#define LCDC_DVICTRL1 0xd0
+#define LCDC_DVICTRL2 0xe0
+#define LCDC_DVICTRL3 0xf0
+#define LCDC_DVICTRL4 0x100
+#define LCDC_V4_DATA 0x180
+#define LCDC_V3_DATA 0x1b0
+#define LCDC_V4_DEBUG0 0x1d0
+#define LCDC_V3_DEBUG0 0x1f0
+
+#define CTRL_SFTRST (1 << 31)
+#define CTRL_CLKGATE (1 << 30)
+#define CTRL_BYPASS_COUNT (1 << 19)
+#define CTRL_VSYNC_MODE (1 << 18)
+#define CTRL_DOTCLK_MODE (1 << 17)
+#define CTRL_DATA_SELECT (1 << 16)
+#define CTRL_SET_BUS_WIDTH(x) (((x) & 0x3) << 10)
+#define CTRL_GET_BUS_WIDTH(x) (((x) >> 10) & 0x3)
+#define CTRL_SET_WORD_LENGTH(x) (((x) & 0x3) << 8)
+#define CTRL_GET_WORD_LENGTH(x) (((x) >> 8) & 0x3)
+#define CTRL_MASTER (1 << 5)
+#define CTRL_DF16 (1 << 3)
+#define CTRL_DF18 (1 << 2)
+#define CTRL_DF24 (1 << 1)
+#define CTRL_RUN (1 << 0)
+
+#define CTRL1_FIFO_CLEAR (1 << 21)
+#define CTRL1_SET_BYTE_PACKAGING(x) (((x) & 0xf) << 16)
+#define CTRL1_GET_BYTE_PACKAGING(x) (((x) >> 16) & 0xf)
+
+#define TRANSFER_COUNT_SET_VCOUNT(x) (((x) & 0xffff) << 16)
+#define TRANSFER_COUNT_GET_VCOUNT(x) (((x) >> 16) & 0xffff)
+#define TRANSFER_COUNT_SET_HCOUNT(x) ((x) & 0xffff)
+#define TRANSFER_COUNT_GET_HCOUNT(x) ((x) & 0xffff)
+
+
+#define VDCTRL0_ENABLE_PRESENT (1 << 28)
+#define VDCTRL0_VSYNC_ACT_HIGH (1 << 27)
+#define VDCTRL0_HSYNC_ACT_HIGH (1 << 26)
+#define VDCTRL0_DOTCLK_ACTIVE_HIGH (1 << 25)
+#define VDCTRL0_POL_ACTIVE_HIGH (1 << 24)
+#define VDCTRL0_VSYNC_PERIOD_UNIT (1 << 21)
+#define VDCTRL0_VSYNC_PULSE_WIDTH_UNIT (1 << 20)
+#define VDCTRL0_HALF_LINE (1 << 19)
+#define VDCTRL0_HALF_LINE_MODE (1 << 18)
+#define VDCTRL0_SET_VSYNC_PULSE_WIDTH(x) ((x) & 0x3ffff)
+#define VDCTRL0_GET_VSYNC_PULSE_WIDTH(x) ((x) & 0x3ffff)
+
+#define VDCTRL2_SET_HSYNC_PERIOD(x) ((x) & 0x3ffff)
+#define VDCTRL2_GET_HSYNC_PERIOD(x) ((x) & 0x3ffff)
+
+#define VDCTRL3_MUX_SYNC_SIGNALS (1 << 29)
+#define VDCTRL3_VSYNC_ONLY (1 << 28)
+#define SET_HOR_WAIT_CNT(x) (((x) & 0xfff) << 16)
+#define GET_HOR_WAIT_CNT(x) (((x) >> 16) & 0xfff)
+#define SET_VERT_WAIT_CNT(x) ((x) & 0xffff)
+#define GET_VERT_WAIT_CNT(x) ((x) & 0xffff)
+
+#define VDCTRL4_SET_DOTCLK_DLY(x) (((x) & 0x7) << 29) /* v4 only */
+#define VDCTRL4_GET_DOTCLK_DLY(x) (((x) >> 29) & 0x7) /* v4 only */
+#define VDCTRL4_SYNC_SIGNALS_ON (1 << 18)
+#define SET_DOTCLK_H_VALID_DATA_CNT(x) ((x) & 0x3ffff)
+
+#define DEBUG0_HSYNC (1 < 26)
+#define DEBUG0_VSYNC (1 < 25)
+
+#define MIN_XRES 120
+#define MIN_YRES 120
+
+#define RED 0
+#define GREEN 1
+#define BLUE 2
+#define TRANSP 3
+
+enum mxsfb_devtype {
+ MXSFB_V3,
+ MXSFB_V4,
+};
+
+/* CPU dependent register offsets */
+struct mxsfb_devdata {
+ unsigned transfer_count;
+ unsigned cur_buf;
+ unsigned next_buf;
+ unsigned debug0;
+ unsigned hs_wdth_mask;
+ unsigned hs_wdth_shift;
+ unsigned ipversion;
+};
+
+struct mxsfb_info {
+ struct fb_info fb_info;
+ struct platform_device *pdev;
+ struct clk *clk;
+ void __iomem *base; /* registers */
+ unsigned allocated_size;
+ int enabled;
+ unsigned ld_intf_width;
+ unsigned dotclk_delay;
+ const struct mxsfb_devdata *devdata;
+ int mapped;
+};
+
+#define mxsfb_is_v3(host) (host->devdata->ipversion == 3)
+#define mxsfb_is_v4(host) (host->devdata->ipversion == 4)
+
+static const struct mxsfb_devdata mxsfb_devdata[] = {
+ [MXSFB_V3] = {
+ .transfer_count = LCDC_V3_TRANSFER_COUNT,
+ .cur_buf = LCDC_V3_CUR_BUF,
+ .next_buf = LCDC_V3_NEXT_BUF,
+ .debug0 = LCDC_V3_DEBUG0,
+ .hs_wdth_mask = 0xff,
+ .hs_wdth_shift = 24,
+ .ipversion = 3,
+ },
+ [MXSFB_V4] = {
+ .transfer_count = LCDC_V4_TRANSFER_COUNT,
+ .cur_buf = LCDC_V4_CUR_BUF,
+ .next_buf = LCDC_V4_NEXT_BUF,
+ .debug0 = LCDC_V4_DEBUG0,
+ .hs_wdth_mask = 0x3fff,
+ .hs_wdth_shift = 18,
+ .ipversion = 4,
+ },
+};
+
+#define to_imxfb_host(x) (container_of(x, struct mxsfb_info, fb_info))
+
+/* mask and shift depends on architecture */
+static inline u32 set_hsync_pulse_width(struct mxsfb_info *host, unsigned val)
+{
+ return (val & host->devdata->hs_wdth_mask) <<
+ host->devdata->hs_wdth_shift;
+}
+
+static inline u32 get_hsync_pulse_width(struct mxsfb_info *host, unsigned val)
+{
+ return (val >> host->devdata->hs_wdth_shift) &
+ host->devdata->hs_wdth_mask;
+}
+
+static const struct fb_bitfield def_rgb565[] = {
+ [RED] = {
+ .offset = 11,
+ .length = 5,
+ },
+ [GREEN] = {
+ .offset = 5,
+ .length = 6,
+ },
+ [BLUE] = {
+ .offset = 0,
+ .length = 5,
+ },
+ [TRANSP] = { /* no support for transparency */
+ .length = 0,
+ }
+};
+
+static const struct fb_bitfield def_rgb666[] = {
+ [RED] = {
+ .offset = 16,
+ .length = 6,
+ },
+ [GREEN] = {
+ .offset = 8,
+ .length = 6,
+ },
+ [BLUE] = {
+ .offset = 0,
+ .length = 6,
+ },
+ [TRANSP] = { /* no support for transparency */
+ .length = 0,
+ }
+};
+
+static const struct fb_bitfield def_rgb888[] = {
+ [RED] = {
+ .offset = 16,
+ .length = 8,
+ },
+ [GREEN] = {
+ .offset = 8,
+ .length = 8,
+ },
+ [BLUE] = {
+ .offset = 0,
+ .length = 8,
+ },
+ [TRANSP] = { /* no support for transparency */
+ .length = 0,
+ }
+};
+
+static inline unsigned chan_to_field(unsigned chan, struct fb_bitfield *bf)
+{
+ chan &= 0xffff;
+ chan >>= 16 - bf->length;
+ return chan << bf->offset;
+}
+
+static int mxsfb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *fb_info)
+{
+ struct mxsfb_info *host = to_imxfb_host(fb_info);
+ const struct fb_bitfield *rgb = NULL;
+
+ if (var->xres < MIN_XRES)
+ var->xres = MIN_XRES;
+ if (var->yres < MIN_YRES)
+ var->yres = MIN_YRES;
+
+ var->xres_virtual = var->xres;
+
+ var->yres_virtual = var->yres;
+
+ switch (var->bits_per_pixel) {
+ case 16:
+ /* always expect RGB 565 */
+ rgb = def_rgb565;
+ break;
+ case 32:
+ switch (host->ld_intf_width) {
+ case STMLCDIF_8BIT:
+ pr_debug("Unsupported LCD bus width mapping\n");
+ break;
+ case STMLCDIF_16BIT:
+ case STMLCDIF_18BIT:
+ /* 24 bit to 18 bit mapping */
+ rgb = def_rgb666;
+ break;
+ case STMLCDIF_24BIT:
+ /* real 24 bit */
+ rgb = def_rgb888;
+ break;
+ }
+ break;
+ default:
+ pr_debug("Unsupported colour depth: %u\n", var->bits_per_pixel);
+ return -EINVAL;
+ }
+
+ /*
+ * Copy the RGB parameters for this display
+ * from the machine specific parameters.
+ */
+ var->red = rgb[RED];
+ var->green = rgb[GREEN];
+ var->blue = rgb[BLUE];
+ var->transp = rgb[TRANSP];
+
+ return 0;
+}
+
+static void mxsfb_enable_controller(struct fb_info *fb_info)
+{
+ struct mxsfb_info *host = to_imxfb_host(fb_info);
+ u32 reg;
+
+ dev_dbg(&host->pdev->dev, "%s\n", __func__);
+
+ clk_enable(host->clk);
+ clk_set_rate(host->clk, PICOS2KHZ(fb_info->var.pixclock) * 1000U);
+
+ /* if it was disabled, re-enable the mode again */
+ writel(CTRL_DOTCLK_MODE, host->base + LCDC_CTRL + REG_SET);
+
+ /* enable the SYNC signals first, then the DMA engine */
+ reg = readl(host->base + LCDC_VDCTRL4);
+ reg |= VDCTRL4_SYNC_SIGNALS_ON;
+ writel(reg, host->base + LCDC_VDCTRL4);
+
+ writel(CTRL_RUN, host->base + LCDC_CTRL + REG_SET);
+
+ host->enabled = 1;
+}
+
+static void mxsfb_disable_controller(struct fb_info *fb_info)
+{
+ struct mxsfb_info *host = to_imxfb_host(fb_info);
+ unsigned loop;
+ u32 reg;
+
+ dev_dbg(&host->pdev->dev, "%s\n", __func__);
+
+ /*
+ * Even if we disable the controller here, it will still continue
+ * until its FIFOs are running out of data
+ */
+ writel(CTRL_DOTCLK_MODE, host->base + LCDC_CTRL + REG_CLR);
+
+ loop = 1000;
+ while (loop) {
+ reg = readl(host->base + LCDC_CTRL);
+ if (!(reg & CTRL_RUN))
+ break;
+ loop--;
+ }
+
+ writel(VDCTRL4_SYNC_SIGNALS_ON, host->base + LCDC_VDCTRL4 + REG_CLR);
+
+ clk_disable(host->clk);
+
+ host->enabled = 0;
+}
+
+static int mxsfb_set_par(struct fb_info *fb_info)
+{
+ struct mxsfb_info *host = to_imxfb_host(fb_info);
+ u32 ctrl, vdctrl0, vdctrl4;
+ int line_size, fb_size;
+ int reenable = 0;
+
+ line_size = fb_info->var.xres * (fb_info->var.bits_per_pixel >> 3);
+ fb_size = fb_info->var.yres_virtual * line_size;
+
+ if (fb_size > fb_info->fix.smem_len)
+ return -ENOMEM;
+
+ fb_info->fix.line_length = line_size;
+
+ /*
+ * It seems, you can't re-program the controller if it is still running.
+ * This may lead into shifted pictures (FIFO issue?).
+ * So, first stop the controller and drain its FIFOs
+ */
+ if (host->enabled) {
+ reenable = 1;
+ mxsfb_disable_controller(fb_info);
+ }
+
+ /* clear the FIFOs */
+ writel(CTRL1_FIFO_CLEAR, host->base + LCDC_CTRL1 + REG_SET);
+
+ ctrl = CTRL_BYPASS_COUNT | CTRL_MASTER |
+ CTRL_SET_BUS_WIDTH(host->ld_intf_width);;
+
+ switch (fb_info->var.bits_per_pixel) {
+ case 16:
+ dev_dbg(&host->pdev->dev, "Setting up RGB565 mode\n");
+ ctrl |= CTRL_SET_WORD_LENGTH(0);
+ writel(CTRL1_SET_BYTE_PACKAGING(0xf), host->base + LCDC_CTRL1);
+ break;
+ case 32:
+ dev_dbg(&host->pdev->dev, "Setting up RGB888/666 mode\n");
+ ctrl |= CTRL_SET_WORD_LENGTH(3);
+ switch (host->ld_intf_width) {
+ case STMLCDIF_8BIT:
+ dev_dbg(&host->pdev->dev,
+ "Unsupported LCD bus width mapping\n");
+ return -EINVAL;
+ case STMLCDIF_16BIT:
+ case STMLCDIF_18BIT:
+ /* 24 bit to 18 bit mapping */
+ ctrl |= CTRL_DF24; /* ignore the upper 2 bits in
+ * each colour component
+ */
+ break;
+ case STMLCDIF_24BIT:
+ /* real 24 bit */
+ break;
+ }
+ /* do not use packed pixels = one pixel per word instead */
+ writel(CTRL1_SET_BYTE_PACKAGING(0x7), host->base + LCDC_CTRL1);
+ break;
+ default:
+ dev_dbg(&host->pdev->dev, "Unhandled color depth of %u\n",
+ fb_info->var.bits_per_pixel);
+ return -EINVAL;
+ }
+
+ writel(ctrl, host->base + LCDC_CTRL);
+
+ writel(TRANSFER_COUNT_SET_VCOUNT(fb_info->var.yres) |
+ TRANSFER_COUNT_SET_HCOUNT(fb_info->var.xres),
+ host->base + host->devdata->transfer_count);
+
+ vdctrl0 = VDCTRL0_ENABLE_PRESENT | /* always in DOTCLOCK mode */
+ VDCTRL0_VSYNC_PERIOD_UNIT |
+ VDCTRL0_VSYNC_PULSE_WIDTH_UNIT |
+ VDCTRL0_SET_VSYNC_PULSE_WIDTH(fb_info->var.vsync_len);
+ if (fb_info->var.sync & FB_SYNC_HOR_HIGH_ACT)
+ vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH;
+ if (fb_info->var.sync & FB_SYNC_VERT_HIGH_ACT)
+ vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH;
+ if (fb_info->var.sync & FB_SYNC_DATA_ENABLE_HIGH_ACT)
+ vdctrl0 |= VDCTRL0_POL_ACTIVE_HIGH;
+
+ writel(vdctrl0, host->base + LCDC_VDCTRL0);
+
+ /* frame length in lines */
+ writel(fb_info->var.upper_margin + fb_info->var.vsync_len +
+ fb_info->var.lower_margin + fb_info->var.yres,
+ host->base + LCDC_VDCTRL1);
+
+ /* line length in units of clocks or pixels */
+ writel(set_hsync_pulse_width(host, fb_info->var.hsync_len) |
+ VDCTRL2_SET_HSYNC_PERIOD(fb_info->var.left_margin +
+ fb_info->var.hsync_len + fb_info->var.right_margin +
+ fb_info->var.xres),
+ host->base + LCDC_VDCTRL2);
+
+ writel(SET_HOR_WAIT_CNT(fb_info->var.left_margin +
+ fb_info->var.hsync_len) |
+ SET_VERT_WAIT_CNT(fb_info->var.upper_margin +
+ fb_info->var.vsync_len),
+ host->base + LCDC_VDCTRL3);
+
+ vdctrl4 = SET_DOTCLK_H_VALID_DATA_CNT(fb_info->var.xres);
+ if (mxsfb_is_v4(host))
+ vdctrl4 |= VDCTRL4_SET_DOTCLK_DLY(host->dotclk_delay);
+ writel(vdctrl4, host->base + LCDC_VDCTRL4);
+
+ writel(fb_info->fix.smem_start +
+ fb_info->fix.line_length * fb_info->var.yoffset,
+ host->base + host->devdata->next_buf);
+
+ if (reenable)
+ mxsfb_enable_controller(fb_info);
+
+ return 0;
+}
+
+static int mxsfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
+ u_int transp, struct fb_info *fb_info)
+{
+ unsigned int val;
+ int ret = -EINVAL;
+
+ /*
+ * If greyscale is true, then we convert the RGB value
+ * to greyscale no matter what visual we are using.
+ */
+ if (fb_info->var.grayscale)
+ red = green = blue = (19595 * red + 38470 * green +
+ 7471 * blue) >> 16;
+
+ switch (fb_info->fix.visual) {
+ case FB_VISUAL_TRUECOLOR:
+ /*
+ * 12 or 16-bit True Colour. We encode the RGB value
+ * according to the RGB bitfield information.
+ */
+ if (regno < 16) {
+ u32 *pal = fb_info->pseudo_palette;
+
+ val = chan_to_field(red, &fb_info->var.red);
+ val |= chan_to_field(green, &fb_info->var.green);
+ val |= chan_to_field(blue, &fb_info->var.blue);
+
+ pal[regno] = val;
+ ret = 0;
+ }
+ break;
+
+ case FB_VISUAL_STATIC_PSEUDOCOLOR:
+ case FB_VISUAL_PSEUDOCOLOR:
+ break;
+ }
+
+ return ret;
+}
+
+static int mxsfb_blank(int blank, struct fb_info *fb_info)
+{
+ struct mxsfb_info *host = to_imxfb_host(fb_info);
+
+ switch (blank) {
+ case FB_BLANK_POWERDOWN:
+ case FB_BLANK_VSYNC_SUSPEND:
+ case FB_BLANK_HSYNC_SUSPEND:
+ case FB_BLANK_NORMAL:
+ if (host->enabled)
+ mxsfb_disable_controller(fb_info);
+ break;
+
+ case FB_BLANK_UNBLANK:
+ if (!host->enabled)
+ mxsfb_enable_controller(fb_info);
+ break;
+ }
+ return 0;
+}
+
+static int mxsfb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *fb_info)
+{
+ struct mxsfb_info *host = to_imxfb_host(fb_info);
+ unsigned offset;
+
+ if (var->xoffset != 0)
+ return -EINVAL;
+
+ offset = fb_info->fix.line_length * var->yoffset;
+
+ /* update on next VSYNC */
+ writel(fb_info->fix.smem_start + offset,
+ host->base + host->devdata->next_buf);
+
+ return 0;
+}
+
+static struct fb_ops mxsfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = mxsfb_check_var,
+ .fb_set_par = mxsfb_set_par,
+ .fb_setcolreg = mxsfb_setcolreg,
+ .fb_blank = mxsfb_blank,
+ .fb_pan_display = mxsfb_pan_display,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+};
+
+static int __devinit mxsfb_restore_mode(struct mxsfb_info *host)
+{
+ struct fb_info *fb_info = &host->fb_info;
+ unsigned line_count;
+ unsigned period;
+ unsigned long pa, fbsize;
+ int bits_per_pixel, ofs;
+ u32 transfer_count, vdctrl0, vdctrl2, vdctrl3, vdctrl4, ctrl;
+ struct fb_videomode vmode;
+
+ /* Only restore the mode when the controller is running */
+ ctrl = readl(host->base + LCDC_CTRL);
+ if (!(ctrl & CTRL_RUN))
+ return -EINVAL;
+
+ vdctrl0 = readl(host->base + LCDC_VDCTRL0);
+ vdctrl2 = readl(host->base + LCDC_VDCTRL2);
+ vdctrl3 = readl(host->base + LCDC_VDCTRL3);
+ vdctrl4 = readl(host->base + LCDC_VDCTRL4);
+
+ transfer_count = readl(host->base + host->devdata->transfer_count);
+
+ vmode.xres = TRANSFER_COUNT_GET_HCOUNT(transfer_count);
+ vmode.yres = TRANSFER_COUNT_GET_VCOUNT(transfer_count);
+
+ switch (CTRL_GET_WORD_LENGTH(ctrl)) {
+ case 0:
+ bits_per_pixel = 16;
+ break;
+ case 3:
+ bits_per_pixel = 32;
+ case 1:
+ default:
+ return -EINVAL;
+ }
+
+ fb_info->var.bits_per_pixel = bits_per_pixel;
+
+ vmode.pixclock = KHZ2PICOS(clk_get_rate(host->clk) / 1000U);
+ vmode.hsync_len = get_hsync_pulse_width(host, vdctrl2);
+ vmode.left_margin = GET_HOR_WAIT_CNT(vdctrl3) - vmode.hsync_len;
+ vmode.right_margin = VDCTRL2_GET_HSYNC_PERIOD(vdctrl2) - vmode.hsync_len -
+ vmode.left_margin - vmode.xres;
+ vmode.vsync_len = VDCTRL0_GET_VSYNC_PULSE_WIDTH(vdctrl0);
+ period = readl(host->base + LCDC_VDCTRL1);
+ vmode.upper_margin = GET_VERT_WAIT_CNT(vdctrl3) - vmode.vsync_len;
+ vmode.lower_margin = period - vmode.vsync_len - vmode.upper_margin - vmode.yres;
+
+ vmode.vmode = FB_VMODE_NONINTERLACED;
+
+ vmode.sync = 0;
+ if (vdctrl0 & VDCTRL0_HSYNC_ACT_HIGH)
+ vmode.sync |= FB_SYNC_HOR_HIGH_ACT;
+ if (vdctrl0 & VDCTRL0_VSYNC_ACT_HIGH)
+ vmode.sync |= FB_SYNC_VERT_HIGH_ACT;
+
+ pr_debug("Reconstructed video mode:\n");
+ pr_debug("%dx%d, hsync: %u left: %u, right: %u, vsync: %u, upper: %u, lower: %u\n",
+ vmode.xres, vmode.yres,
+ vmode.hsync_len, vmode.left_margin, vmode.right_margin,
+ vmode.vsync_len, vmode.upper_margin, vmode.lower_margin);
+ pr_debug("pixclk: %ldkHz\n", PICOS2KHZ(vmode.pixclock));
+
+ fb_add_videomode(&vmode, &fb_info->modelist);
+
+ host->ld_intf_width = CTRL_GET_BUS_WIDTH(ctrl);
+ host->dotclk_delay = VDCTRL4_GET_DOTCLK_DLY(vdctrl4);
+
+ fb_info->fix.line_length = vmode.xres * (bits_per_pixel >> 3);
+
+ pa = readl(host->base + host->devdata->cur_buf);
+ fbsize = fb_info->fix.line_length * vmode.yres;
+ if (pa < fb_info->fix.smem_start)
+ return -EINVAL;
+ if (pa + fbsize > fb_info->fix.smem_start + fb_info->fix.smem_len)
+ return -EINVAL;
+ ofs = pa - fb_info->fix.smem_start;
+ if (ofs) {
+ memmove(fb_info->screen_base, fb_info->screen_base + ofs, fbsize);
+ writel(fb_info->fix.smem_start, host->base + host->devdata->next_buf);
+ }
+
+ line_count = fb_info->fix.smem_len / fb_info->fix.line_length;
+ fb_info->fix.ypanstep = 1;
+
+ clk_enable(host->clk);
+ host->enabled = 1;
+
+ return 0;
+}
+
+static int __devinit mxsfb_init_fbinfo(struct mxsfb_info *host)
+{
+ struct fb_info *fb_info = &host->fb_info;
+ struct fb_var_screeninfo *var = &fb_info->var;
+ struct mxsfb_platform_data *pdata = host->pdev->dev.platform_data;
+ dma_addr_t fb_phys;
+ void *fb_virt;
+ unsigned fb_size = pdata->fb_size;
+
+ fb_info->fbops = &mxsfb_ops;
+ fb_info->flags = FBINFO_FLAG_DEFAULT | FBINFO_READS_FAST;
+ strlcpy(fb_info->fix.id, "mxs", sizeof(fb_info->fix.id));
+ fb_info->fix.type = FB_TYPE_PACKED_PIXELS;
+ fb_info->fix.ypanstep = 1;
+ fb_info->fix.visual = FB_VISUAL_TRUECOLOR,
+ fb_info->fix.accel = FB_ACCEL_NONE;
+
+ var->bits_per_pixel = pdata->default_bpp ? pdata->default_bpp : 16;
+ var->nonstd = 0;
+ var->activate = FB_ACTIVATE_NOW;
+ var->accel_flags = 0;
+ var->vmode = FB_VMODE_NONINTERLACED;
+
+ host->dotclk_delay = pdata->dotclk_delay;
+ host->ld_intf_width = pdata->ld_intf_width;
+
+ /* Memory allocation for framebuffer */
+ if (pdata->fb_phys) {
+ if (!fb_size)
+ return -EINVAL;
+
+ fb_phys = pdata->fb_phys;
+
+ if (!request_mem_region(fb_phys, fb_size, host->pdev->name))
+ return -ENOMEM;
+
+ fb_virt = ioremap(fb_phys, fb_size);
+ if (!fb_virt) {
+ release_mem_region(fb_phys, fb_size);
+ return -ENOMEM;
+ }
+ host->mapped = 1;
+ } else {
+ if (!fb_size)
+ fb_size = SZ_2M; /* default */
+ fb_virt = alloc_pages_exact(fb_size, GFP_DMA);
+ if (!fb_info->screen_base)
+ return -ENOMEM;
+
+ fb_phys = virt_to_phys(fb_virt);
+ }
+
+ fb_info->fix.smem_start = fb_phys;
+ fb_info->screen_base = fb_virt;
+ fb_info->screen_size = fb_info->fix.smem_len = fb_size;
+
+ if (mxsfb_restore_mode(host))
+ memset(fb_virt, 0, fb_size);
+
+ return 0;
+}
+
+static void __devexit mxsfb_free_videomem(struct mxsfb_info *host)
+{
+ struct fb_info *fb_info = &host->fb_info;
+
+ if (host->mapped) {
+ iounmap(fb_info->screen_base);
+ release_mem_region(fb_info->fix.smem_start,
+ fb_info->screen_size);
+ } else {
+ free_pages_exact(fb_info->screen_base, fb_info->fix.smem_len);
+ }
+}
+
+static int __devinit mxsfb_probe(struct platform_device *pdev)
+{
+ struct mxsfb_platform_data *pdata = pdev->dev.platform_data;
+ struct resource *res;
+ struct mxsfb_info *host;
+ struct fb_info *fb_info;
+ struct fb_modelist *modelist;
+ int i, ret;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platformdata. Giving up\n");
+ return -ENODEV;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Cannot get memory IO resource\n");
+ return -ENODEV;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res), pdev->name))
+ return -EBUSY;
+
+ fb_info = framebuffer_alloc(sizeof(struct mxsfb_info), &pdev->dev);
+ if (!fb_info) {
+ dev_err(&pdev->dev, "Failed to allocate fbdev\n");
+ ret = -ENOMEM;
+ goto error_alloc_info;
+ }
+
+ host = to_imxfb_host(fb_info);
+
+ host->base = ioremap(res->start, resource_size(res));
+ if (!host->base) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto error_ioremap;
+ }
+
+ host->pdev = pdev;
+ platform_set_drvdata(pdev, host);
+
+ host->devdata = &mxsfb_devdata[pdev->id_entry->driver_data];
+
+ host->clk = clk_get(&host->pdev->dev, NULL);
+ if (IS_ERR(host->clk)) {
+ ret = PTR_ERR(host->clk);
+ goto error_getclock;
+ }
+
+ fb_info->pseudo_palette = kmalloc(sizeof(u32) * 16, GFP_KERNEL);
+ if (!fb_info->pseudo_palette) {
+ ret = -ENOMEM;
+ goto error_pseudo_pallette;
+ }
+
+ INIT_LIST_HEAD(&fb_info->modelist);
+
+ ret = mxsfb_init_fbinfo(host);
+ if (ret != 0)
+ goto error_init_fb;
+
+ for (i = 0; i < pdata->mode_count; i++)
+ fb_add_videomode(&pdata->mode_list[i], &fb_info->modelist);
+
+ modelist = list_first_entry(&fb_info->modelist,
+ struct fb_modelist, list);
+ fb_videomode_to_var(&fb_info->var, &modelist->mode);
+
+ /* init the color fields */
+ mxsfb_check_var(&fb_info->var, fb_info);
+
+ platform_set_drvdata(pdev, fb_info);
+
+ ret = register_framebuffer(fb_info);
+ if (ret != 0) {
+ dev_err(&pdev->dev,"Failed to register framebuffer\n");
+ goto error_register;
+ }
+
+ if (!host->enabled) {
+ writel(0, host->base + LCDC_CTRL);
+ mxsfb_set_par(fb_info);
+ mxsfb_enable_controller(fb_info);
+ }
+
+ return 0;
+
+error_register:
+ if (host->enabled)
+ clk_disable(host->clk);
+ fb_destroy_modelist(&fb_info->modelist);
+error_init_fb:
+ kfree(fb_info->pseudo_palette);
+error_pseudo_pallette:
+ clk_put(host->clk);
+error_getclock:
+ iounmap(host->base);
+error_ioremap:
+ framebuffer_release(fb_info);
+error_alloc_info:
+ release_mem_region(res->start, resource_size(res));
+
+ return ret;
+}
+
+static int __devexit mxsfb_remove(struct platform_device *pdev)
+{
+ struct fb_info *fb_info = platform_get_drvdata(pdev);
+ struct mxsfb_info *host = to_imxfb_host(fb_info);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (host->enabled)
+ mxsfb_disable_controller(fb_info);
+
+ unregister_framebuffer(fb_info);
+ kfree(fb_info->pseudo_palette);
+ mxsfb_free_videomem(host);
+ iounmap(host->base);
+ clk_put(host->clk);
+
+ framebuffer_release(fb_info);
+ release_mem_region(res->start, resource_size(res));
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_device_id mxsfb_devtype[] = {
+ {
+ .name = "imx23-fb",
+ .driver_data = MXSFB_V3,
+ }, {
+ .name = "imx28-fb",
+ .driver_data = MXSFB_V4,
+ }, {
+ },
+};
+MODULE_DEVICE_TABLE(platform, mxsfb_devtype);
+
+static struct platform_driver mxsfb_driver = {
+ .probe = mxsfb_probe,
+ .remove = __devexit_p(mxsfb_remove),
+ .id_table = mxsfb_devtype,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init mxsfb_init(void)
+{
+ return platform_driver_register(&mxsfb_driver);
+}
+
+static void __exit mxsfb_exit(void)
+{
+ platform_driver_unregister(&mxsfb_driver);
+}
+
+module_init(mxsfb_init);
+module_exit(mxsfb_exit);
+
+MODULE_DESCRIPTION("Freescale mxs framebuffer driver");
+MODULE_AUTHOR("Sascha Hauer, Pengutronix");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-generic-dpi.c b/drivers/video/omap2/displays/panel-generic-dpi.c
index 07eb30ee59c8..4a9b9ff59467 100644
--- a/drivers/video/omap2/displays/panel-generic-dpi.c
+++ b/drivers/video/omap2/displays/panel-generic-dpi.c
@@ -156,6 +156,31 @@ static struct panel_config generic_dpi_panels[] = {
.power_off_delay = 0,
.name = "toppoly_tdo35s",
},
+
+ /* Samsung LTE430WQ-F0C */
+ {
+ {
+ .x_res = 480,
+ .y_res = 272,
+
+ .pixel_clock = 9200,
+
+ .hfp = 8,
+ .hsw = 41,
+ .hbp = 45 - 41,
+
+ .vfp = 4,
+ .vsw = 10,
+ .vbp = 12 - 10,
+ },
+ .acbi = 0x0,
+ .acb = 0x0,
+ .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+ OMAP_DSS_LCD_IHS,
+ .power_on_delay = 0,
+ .power_off_delay = 0,
+ .name = "samsung_lte430wq_f0c",
+ },
};
struct panel_drv_data {
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig
index 43b64403eaa4..db01473c3307 100644
--- a/drivers/video/omap2/dss/Kconfig
+++ b/drivers/video/omap2/dss/Kconfig
@@ -1,8 +1,8 @@
menuconfig OMAP2_DSS
- tristate "OMAP2/3 Display Subsystem support (EXPERIMENTAL)"
- depends on ARCH_OMAP2 || ARCH_OMAP3
+ tristate "OMAP2+ Display Subsystem support (EXPERIMENTAL)"
+ depends on ARCH_OMAP2PLUS
help
- OMAP2/3 Display Subsystem support.
+ OMAP2+ Display Subsystem support.
if OMAP2_DSS
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index 8e89f6049280..266cd3cbf4df 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -34,332 +34,26 @@
#include <linux/regulator/consumer.h>
#include <plat/display.h>
-#include <plat/clock.h>
#include "dss.h"
#include "dss_features.h"
static struct {
struct platform_device *pdev;
- int ctx_id;
-
- struct clk *dss_ick;
- struct clk *dss1_fck;
- struct clk *dss2_fck;
- struct clk *dss_54m_fck;
- struct clk *dss_96m_fck;
- unsigned num_clks_enabled;
struct regulator *vdds_dsi_reg;
struct regulator *vdds_sdi_reg;
- struct regulator *vdda_dac_reg;
} core;
-static void dss_clk_enable_all_no_ctx(void);
-static void dss_clk_disable_all_no_ctx(void);
-static void dss_clk_enable_no_ctx(enum dss_clock clks);
-static void dss_clk_disable_no_ctx(enum dss_clock clks);
-
static char *def_disp_name;
module_param_named(def_disp, def_disp_name, charp, 0);
-MODULE_PARM_DESC(def_disp_name, "default display name");
+MODULE_PARM_DESC(def_disp, "default display name");
#ifdef DEBUG
unsigned int dss_debug;
module_param_named(debug, dss_debug, bool, 0644);
#endif
-/* CONTEXT */
-static int dss_get_ctx_id(void)
-{
- struct omap_dss_board_info *pdata = core.pdev->dev.platform_data;
- int r;
-
- if (!pdata->get_last_off_on_transaction_id)
- return 0;
- r = pdata->get_last_off_on_transaction_id(&core.pdev->dev);
- if (r < 0) {
- dev_err(&core.pdev->dev, "getting transaction ID failed, "
- "will force context restore\n");
- r = -1;
- }
- return r;
-}
-
-int dss_need_ctx_restore(void)
-{
- int id = dss_get_ctx_id();
-
- if (id < 0 || id != core.ctx_id) {
- DSSDBG("ctx id %d -> id %d\n",
- core.ctx_id, id);
- core.ctx_id = id;
- return 1;
- } else {
- return 0;
- }
-}
-
-static void save_all_ctx(void)
-{
- DSSDBG("save context\n");
-
- dss_clk_enable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK1);
-
- dss_save_context();
- dispc_save_context();
-#ifdef CONFIG_OMAP2_DSS_DSI
- dsi_save_context();
-#endif
-
- dss_clk_disable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK1);
-}
-
-static void restore_all_ctx(void)
-{
- DSSDBG("restore context\n");
-
- dss_clk_enable_all_no_ctx();
-
- dss_restore_context();
- dispc_restore_context();
-#ifdef CONFIG_OMAP2_DSS_DSI
- dsi_restore_context();
-#endif
-
- dss_clk_disable_all_no_ctx();
-}
-
-#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
-/* CLOCKS */
-static void core_dump_clocks(struct seq_file *s)
-{
- int i;
- struct clk *clocks[5] = {
- core.dss_ick,
- core.dss1_fck,
- core.dss2_fck,
- core.dss_54m_fck,
- core.dss_96m_fck
- };
-
- seq_printf(s, "- CORE -\n");
-
- seq_printf(s, "internal clk count\t\t%u\n", core.num_clks_enabled);
-
- for (i = 0; i < 5; i++) {
- if (!clocks[i])
- continue;
- seq_printf(s, "%-15s\t%lu\t%d\n",
- clocks[i]->name,
- clk_get_rate(clocks[i]),
- clocks[i]->usecount);
- }
-}
-#endif /* defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) */
-
-static int dss_get_clock(struct clk **clock, const char *clk_name)
-{
- struct clk *clk;
-
- clk = clk_get(&core.pdev->dev, clk_name);
-
- if (IS_ERR(clk)) {
- DSSERR("can't get clock %s", clk_name);
- return PTR_ERR(clk);
- }
-
- *clock = clk;
-
- DSSDBG("clk %s, rate %ld\n", clk_name, clk_get_rate(clk));
-
- return 0;
-}
-
-static int dss_get_clocks(void)
-{
- int r;
-
- core.dss_ick = NULL;
- core.dss1_fck = NULL;
- core.dss2_fck = NULL;
- core.dss_54m_fck = NULL;
- core.dss_96m_fck = NULL;
-
- r = dss_get_clock(&core.dss_ick, "ick");
- if (r)
- goto err;
-
- r = dss_get_clock(&core.dss1_fck, "dss1_fck");
- if (r)
- goto err;
-
- r = dss_get_clock(&core.dss2_fck, "dss2_fck");
- if (r)
- goto err;
-
- r = dss_get_clock(&core.dss_54m_fck, "tv_fck");
- if (r)
- goto err;
-
- r = dss_get_clock(&core.dss_96m_fck, "video_fck");
- if (r)
- goto err;
-
- return 0;
-
-err:
- if (core.dss_ick)
- clk_put(core.dss_ick);
- if (core.dss1_fck)
- clk_put(core.dss1_fck);
- if (core.dss2_fck)
- clk_put(core.dss2_fck);
- if (core.dss_54m_fck)
- clk_put(core.dss_54m_fck);
- if (core.dss_96m_fck)
- clk_put(core.dss_96m_fck);
-
- return r;
-}
-
-static void dss_put_clocks(void)
-{
- if (core.dss_96m_fck)
- clk_put(core.dss_96m_fck);
- clk_put(core.dss_54m_fck);
- clk_put(core.dss1_fck);
- clk_put(core.dss2_fck);
- clk_put(core.dss_ick);
-}
-
-unsigned long dss_clk_get_rate(enum dss_clock clk)
-{
- switch (clk) {
- case DSS_CLK_ICK:
- return clk_get_rate(core.dss_ick);
- case DSS_CLK_FCK1:
- return clk_get_rate(core.dss1_fck);
- case DSS_CLK_FCK2:
- return clk_get_rate(core.dss2_fck);
- case DSS_CLK_54M:
- return clk_get_rate(core.dss_54m_fck);
- case DSS_CLK_96M:
- return clk_get_rate(core.dss_96m_fck);
- }
-
- BUG();
- return 0;
-}
-
-static unsigned count_clk_bits(enum dss_clock clks)
-{
- unsigned num_clks = 0;
-
- if (clks & DSS_CLK_ICK)
- ++num_clks;
- if (clks & DSS_CLK_FCK1)
- ++num_clks;
- if (clks & DSS_CLK_FCK2)
- ++num_clks;
- if (clks & DSS_CLK_54M)
- ++num_clks;
- if (clks & DSS_CLK_96M)
- ++num_clks;
-
- return num_clks;
-}
-
-static void dss_clk_enable_no_ctx(enum dss_clock clks)
-{
- unsigned num_clks = count_clk_bits(clks);
-
- if (clks & DSS_CLK_ICK)
- clk_enable(core.dss_ick);
- if (clks & DSS_CLK_FCK1)
- clk_enable(core.dss1_fck);
- if (clks & DSS_CLK_FCK2)
- clk_enable(core.dss2_fck);
- if (clks & DSS_CLK_54M)
- clk_enable(core.dss_54m_fck);
- if (clks & DSS_CLK_96M)
- clk_enable(core.dss_96m_fck);
-
- core.num_clks_enabled += num_clks;
-}
-
-void dss_clk_enable(enum dss_clock clks)
-{
- bool check_ctx = core.num_clks_enabled == 0;
-
- dss_clk_enable_no_ctx(clks);
-
- if (check_ctx && cpu_is_omap34xx() && dss_need_ctx_restore())
- restore_all_ctx();
-}
-
-static void dss_clk_disable_no_ctx(enum dss_clock clks)
-{
- unsigned num_clks = count_clk_bits(clks);
-
- if (clks & DSS_CLK_ICK)
- clk_disable(core.dss_ick);
- if (clks & DSS_CLK_FCK1)
- clk_disable(core.dss1_fck);
- if (clks & DSS_CLK_FCK2)
- clk_disable(core.dss2_fck);
- if (clks & DSS_CLK_54M)
- clk_disable(core.dss_54m_fck);
- if (clks & DSS_CLK_96M)
- clk_disable(core.dss_96m_fck);
-
- core.num_clks_enabled -= num_clks;
-}
-
-void dss_clk_disable(enum dss_clock clks)
-{
- if (cpu_is_omap34xx()) {
- unsigned num_clks = count_clk_bits(clks);
-
- BUG_ON(core.num_clks_enabled < num_clks);
-
- if (core.num_clks_enabled == num_clks)
- save_all_ctx();
- }
-
- dss_clk_disable_no_ctx(clks);
-}
-
-static void dss_clk_enable_all_no_ctx(void)
-{
- enum dss_clock clks;
-
- clks = DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_FCK2 | DSS_CLK_54M;
- if (cpu_is_omap34xx())
- clks |= DSS_CLK_96M;
- dss_clk_enable_no_ctx(clks);
-}
-
-static void dss_clk_disable_all_no_ctx(void)
-{
- enum dss_clock clks;
-
- clks = DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_FCK2 | DSS_CLK_54M;
- if (cpu_is_omap34xx())
- clks |= DSS_CLK_96M;
- dss_clk_disable_no_ctx(clks);
-}
-
-static void dss_clk_disable_all(void)
-{
- enum dss_clock clks;
-
- clks = DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_FCK2 | DSS_CLK_54M;
- if (cpu_is_omap34xx())
- clks |= DSS_CLK_96M;
- dss_clk_disable(clks);
-}
-
/* REGULATORS */
struct regulator *dss_get_vdds_dsi(void)
@@ -390,32 +84,7 @@ struct regulator *dss_get_vdds_sdi(void)
return reg;
}
-struct regulator *dss_get_vdda_dac(void)
-{
- struct regulator *reg;
-
- if (core.vdda_dac_reg != NULL)
- return core.vdda_dac_reg;
-
- reg = regulator_get(&core.pdev->dev, "vdda_dac");
- if (!IS_ERR(reg))
- core.vdda_dac_reg = reg;
-
- return reg;
-}
-
-/* DEBUGFS */
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
-static void dss_debug_dump_clocks(struct seq_file *s)
-{
- core_dump_clocks(s);
- dss_dump_clocks(s);
- dispc_dump_clocks(s);
-#ifdef CONFIG_OMAP2_DSS_DSI
- dsi_dump_clocks(s);
-#endif
-}
-
static int dss_debug_show(struct seq_file *s, void *unused)
{
void (*func)(struct seq_file *) = s->private;
@@ -508,30 +177,18 @@ static int omap_dss_probe(struct platform_device *pdev)
dss_init_overlay_managers(pdev);
dss_init_overlays(pdev);
- r = dss_get_clocks();
- if (r)
- goto err_clocks;
-
- dss_clk_enable_all_no_ctx();
-
- core.ctx_id = dss_get_ctx_id();
- DSSDBG("initial ctx id %u\n", core.ctx_id);
-
-#ifdef CONFIG_FB_OMAP_BOOTLOADER_INIT
- /* DISPC_CONTROL */
- if (omap_readl(0x48050440) & 1) /* LCD enabled? */
- skip_init = 1;
-#endif
-
- r = dss_init(skip_init);
+ r = dss_init_platform_driver();
if (r) {
- DSSERR("Failed to initialize DSS\n");
+ DSSERR("Failed to initialize DSS platform driver\n");
goto err_dss;
}
- r = rfbi_init();
+ /* keep clocks enabled to prevent context saves/restores during init */
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
+
+ r = rfbi_init_platform_driver();
if (r) {
- DSSERR("Failed to initialize rfbi\n");
+ DSSERR("Failed to initialize rfbi platform driver\n");
goto err_rfbi;
}
@@ -541,18 +198,23 @@ static int omap_dss_probe(struct platform_device *pdev)
goto err_dpi;
}
- r = dispc_init();
+ r = dispc_init_platform_driver();
if (r) {
- DSSERR("Failed to initialize dispc\n");
+ DSSERR("Failed to initialize dispc platform driver\n");
goto err_dispc;
}
- r = venc_init(pdev);
+ r = venc_init_platform_driver();
if (r) {
- DSSERR("Failed to initialize venc\n");
+ DSSERR("Failed to initialize venc platform driver\n");
goto err_venc;
}
+#ifdef CONFIG_FB_OMAP_BOOTLOADER_INIT
+ /* DISPC_CONTROL */
+ if (omap_readl(0x48050440) & 1) /* LCD enabled? */
+ skip_init = 1;
+#endif
if (cpu_is_omap34xx()) {
r = sdi_init(skip_init);
if (r) {
@@ -560,9 +222,9 @@ static int omap_dss_probe(struct platform_device *pdev)
goto err_sdi;
}
- r = dsi_init(pdev);
+ r = dsi_init_platform_driver();
if (r) {
- DSSERR("Failed to initialize DSI\n");
+ DSSERR("Failed to initialize DSI platform driver\n");
goto err_dsi;
}
}
@@ -589,7 +251,7 @@ static int omap_dss_probe(struct platform_device *pdev)
pdata->default_device = dssdev;
}
- dss_clk_disable_all();
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
return 0;
@@ -597,24 +259,21 @@ err_register:
dss_uninitialize_debugfs();
err_debugfs:
if (cpu_is_omap34xx())
- dsi_exit();
+ dsi_uninit_platform_driver();
err_dsi:
if (cpu_is_omap34xx())
sdi_exit();
err_sdi:
- venc_exit();
+ venc_uninit_platform_driver();
err_venc:
- dispc_exit();
+ dispc_uninit_platform_driver();
err_dispc:
dpi_exit();
err_dpi:
- rfbi_exit();
+ rfbi_uninit_platform_driver();
err_rfbi:
- dss_exit();
+ dss_uninit_platform_driver();
err_dss:
- dss_clk_disable_all_no_ctx();
- dss_put_clocks();
-err_clocks:
return r;
}
@@ -623,61 +282,19 @@ static int omap_dss_remove(struct platform_device *pdev)
{
struct omap_dss_board_info *pdata = pdev->dev.platform_data;
int i;
- int c;
dss_uninitialize_debugfs();
- venc_exit();
- dispc_exit();
+ venc_uninit_platform_driver();
+ dispc_uninit_platform_driver();
dpi_exit();
- rfbi_exit();
+ rfbi_uninit_platform_driver();
if (cpu_is_omap34xx()) {
- dsi_exit();
+ dsi_uninit_platform_driver();
sdi_exit();
}
- dss_exit();
-
- /* these should be removed at some point */
- c = core.dss_ick->usecount;
- if (c > 0) {
- DSSERR("warning: dss_ick usecount %d, disabling\n", c);
- while (c-- > 0)
- clk_disable(core.dss_ick);
- }
-
- c = core.dss1_fck->usecount;
- if (c > 0) {
- DSSERR("warning: dss1_fck usecount %d, disabling\n", c);
- while (c-- > 0)
- clk_disable(core.dss1_fck);
- }
-
- c = core.dss2_fck->usecount;
- if (c > 0) {
- DSSERR("warning: dss2_fck usecount %d, disabling\n", c);
- while (c-- > 0)
- clk_disable(core.dss2_fck);
- }
-
- c = core.dss_54m_fck->usecount;
- if (c > 0) {
- DSSERR("warning: dss_54m_fck usecount %d, disabling\n", c);
- while (c-- > 0)
- clk_disable(core.dss_54m_fck);
- }
-
- if (core.dss_96m_fck) {
- c = core.dss_96m_fck->usecount;
- if (c > 0) {
- DSSERR("warning: dss_96m_fck usecount %d, disabling\n",
- c);
- while (c-- > 0)
- clk_disable(core.dss_96m_fck);
- }
- }
-
- dss_put_clocks();
+ dss_uninit_platform_driver();
dss_uninit_overlays(pdev);
dss_uninit_overlay_managers(pdev);
@@ -715,7 +332,7 @@ static struct platform_driver omap_dss_driver = {
.suspend = omap_dss_suspend,
.resume = omap_dss_resume,
.driver = {
- .name = "omapdss",
+ .name = "omap_display",
.owner = THIS_MODULE,
},
};
@@ -965,11 +582,6 @@ static void __exit omap_dss_exit(void)
core.vdds_sdi_reg = NULL;
}
- if (core.vdda_dac_reg != NULL) {
- regulator_put(core.vdda_dac_reg);
- core.vdda_dac_reg = NULL;
- }
-
platform_driver_unregister(&omap_dss_driver);
omap_dss_bus_unregister();
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 9f8c69f16e61..762ce6beeba7 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -32,6 +32,7 @@
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/hardirq.h>
+#include <linux/interrupt.h>
#include <plat/sram.h>
#include <plat/clock.h>
@@ -42,8 +43,6 @@
#include "dss_features.h"
/* DISPC */
-#define DISPC_BASE 0x48050400
-
#define DISPC_SZ_REGS SZ_4K
struct dispc_reg { u16 idx; };
@@ -178,7 +177,9 @@ struct dispc_irq_stats {
};
static struct {
+ struct platform_device *pdev;
void __iomem *base;
+ int irq;
u32 fifo_size[3];
@@ -552,9 +553,9 @@ void dispc_restore_context(void)
static inline void enable_clocks(bool enable)
{
if (enable)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
else
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
}
bool dispc_go_busy(enum omap_channel channel)
@@ -2312,7 +2313,7 @@ unsigned long dispc_fclk_rate(void)
unsigned long r = 0;
if (dss_get_dispc_clk_source() == DSS_SRC_DSS1_ALWON_FCLK)
- r = dss_clk_get_rate(DSS_CLK_FCK1);
+ r = dss_clk_get_rate(DSS_CLK_FCK);
else
#ifdef CONFIG_OMAP2_DSS_DSI
r = dsi_get_dsi1_pll_rate();
@@ -2440,7 +2441,7 @@ void dispc_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dispc_read_reg(r))
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
DUMPREG(DISPC_REVISION);
DUMPREG(DISPC_SYSCONFIG);
@@ -2597,7 +2598,7 @@ void dispc_dump_regs(struct seq_file *s)
DUMPREG(DISPC_VID_PRELOAD(0));
DUMPREG(DISPC_VID_PRELOAD(1));
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
#undef DUMPREG
}
@@ -2866,10 +2867,10 @@ static void print_irq_status(u32 status)
* but we presume they are on because we got an IRQ. However,
* an irq handler may turn the clocks off, so we may not have
* clock later in the function. */
-void dispc_irq_handler(void)
+static irqreturn_t omap_dispc_irq_handler(int irq, void *arg)
{
int i;
- u32 irqstatus;
+ u32 irqstatus, irqenable;
u32 handledirqs = 0;
u32 unhandled_errors;
struct omap_dispc_isr_data *isr_data;
@@ -2878,6 +2879,13 @@ void dispc_irq_handler(void)
spin_lock(&dispc.irq_lock);
irqstatus = dispc_read_reg(DISPC_IRQSTATUS);
+ irqenable = dispc_read_reg(DISPC_IRQENABLE);
+
+ /* IRQ is not for us */
+ if (!(irqstatus & irqenable)) {
+ spin_unlock(&dispc.irq_lock);
+ return IRQ_NONE;
+ }
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
spin_lock(&dispc.irq_stats_lock);
@@ -2929,6 +2937,8 @@ void dispc_irq_handler(void)
}
spin_unlock(&dispc.irq_lock);
+
+ return IRQ_HANDLED;
}
static void dispc_error_worker(struct work_struct *work)
@@ -3269,47 +3279,6 @@ static void _omap_dispc_initial_config(void)
dispc_read_plane_fifo_sizes();
}
-int dispc_init(void)
-{
- u32 rev;
-
- spin_lock_init(&dispc.irq_lock);
-
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
- spin_lock_init(&dispc.irq_stats_lock);
- dispc.irq_stats.last_reset = jiffies;
-#endif
-
- INIT_WORK(&dispc.error_work, dispc_error_worker);
-
- dispc.base = ioremap(DISPC_BASE, DISPC_SZ_REGS);
- if (!dispc.base) {
- DSSERR("can't ioremap DISPC\n");
- return -ENOMEM;
- }
-
- enable_clocks(1);
-
- _omap_dispc_initial_config();
-
- _omap_dispc_initialize_irq();
-
- dispc_save_context();
-
- rev = dispc_read_reg(DISPC_REVISION);
- printk(KERN_INFO "OMAP DISPC rev %d.%d\n",
- FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
-
- enable_clocks(0);
-
- return 0;
-}
-
-void dispc_exit(void)
-{
- iounmap(dispc.base);
-}
-
int dispc_enable_plane(enum omap_plane plane, bool enable)
{
DSSDBG("dispc_enable_plane %d, %d\n", plane, enable);
@@ -3359,3 +3328,94 @@ int dispc_setup_plane(enum omap_plane plane,
return r;
}
+
+/* DISPC HW IP initialisation */
+static int omap_dispchw_probe(struct platform_device *pdev)
+{
+ u32 rev;
+ int r = 0;
+ struct resource *dispc_mem;
+
+ dispc.pdev = pdev;
+
+ spin_lock_init(&dispc.irq_lock);
+
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ spin_lock_init(&dispc.irq_stats_lock);
+ dispc.irq_stats.last_reset = jiffies;
+#endif
+
+ INIT_WORK(&dispc.error_work, dispc_error_worker);
+
+ dispc_mem = platform_get_resource(dispc.pdev, IORESOURCE_MEM, 0);
+ if (!dispc_mem) {
+ DSSERR("can't get IORESOURCE_MEM DISPC\n");
+ r = -EINVAL;
+ goto fail0;
+ }
+ dispc.base = ioremap(dispc_mem->start, resource_size(dispc_mem));
+ if (!dispc.base) {
+ DSSERR("can't ioremap DISPC\n");
+ r = -ENOMEM;
+ goto fail0;
+ }
+ dispc.irq = platform_get_irq(dispc.pdev, 0);
+ if (dispc.irq < 0) {
+ DSSERR("platform_get_irq failed\n");
+ r = -ENODEV;
+ goto fail1;
+ }
+
+ r = request_irq(dispc.irq, omap_dispc_irq_handler, IRQF_SHARED,
+ "OMAP DISPC", dispc.pdev);
+ if (r < 0) {
+ DSSERR("request_irq failed\n");
+ goto fail1;
+ }
+
+ enable_clocks(1);
+
+ _omap_dispc_initial_config();
+
+ _omap_dispc_initialize_irq();
+
+ dispc_save_context();
+
+ rev = dispc_read_reg(DISPC_REVISION);
+ dev_dbg(&pdev->dev, "OMAP DISPC rev %d.%d\n",
+ FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
+
+ enable_clocks(0);
+
+ return 0;
+fail1:
+ iounmap(dispc.base);
+fail0:
+ return r;
+}
+
+static int omap_dispchw_remove(struct platform_device *pdev)
+{
+ free_irq(dispc.irq, dispc.pdev);
+ iounmap(dispc.base);
+ return 0;
+}
+
+static struct platform_driver omap_dispchw_driver = {
+ .probe = omap_dispchw_probe,
+ .remove = omap_dispchw_remove,
+ .driver = {
+ .name = "omap_dispc",
+ .owner = THIS_MODULE,
+ },
+};
+
+int dispc_init_platform_driver(void)
+{
+ return platform_driver_register(&omap_dispchw_driver);
+}
+
+void dispc_uninit_platform_driver(void)
+{
+ return platform_driver_unregister(&omap_dispchw_driver);
+}
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index 75fb0a515430..026702b921e5 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -107,7 +107,7 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
bool is_tft;
int r = 0;
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
dispc_set_pol_freq(dssdev->manager->id, dssdev->panel.config,
dssdev->panel.acbi, dssdev->panel.acb);
@@ -137,7 +137,7 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
dispc_set_lcd_timings(dssdev->manager->id, t);
err0:
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
return r;
}
@@ -173,14 +173,14 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
goto err1;
}
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
r = dpi_basic_init(dssdev);
if (r)
goto err2;
#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
- dss_clk_enable(DSS_CLK_FCK2);
+ dss_clk_enable(DSS_CLK_SYSCK);
r = dsi_pll_init(dssdev, 0, 1);
if (r)
goto err3;
@@ -199,10 +199,10 @@ err4:
#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
dsi_pll_uninit();
err3:
- dss_clk_disable(DSS_CLK_FCK2);
+ dss_clk_disable(DSS_CLK_SYSCK);
#endif
err2:
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
if (cpu_is_omap34xx())
regulator_disable(dpi.vdds_dsi_reg);
err1:
@@ -219,10 +219,10 @@ void omapdss_dpi_display_disable(struct omap_dss_device *dssdev)
#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
dss_select_dispc_clk_source(DSS_SRC_DSS1_ALWON_FCLK);
dsi_pll_uninit();
- dss_clk_disable(DSS_CLK_FCK2);
+ dss_clk_disable(DSS_CLK_SYSCK);
#endif
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
if (cpu_is_omap34xx())
regulator_disable(dpi.vdds_dsi_reg);
@@ -303,22 +303,27 @@ int dpi_init_display(struct omap_dss_device *dssdev)
{
DSSDBG("init_display\n");
- return 0;
-}
+ if (cpu_is_omap34xx() && dpi.vdds_dsi_reg == NULL) {
+ struct regulator *vdds_dsi;
-int dpi_init(struct platform_device *pdev)
-{
- if (cpu_is_omap34xx()) {
- dpi.vdds_dsi_reg = dss_get_vdds_dsi();
- if (IS_ERR(dpi.vdds_dsi_reg)) {
+ vdds_dsi = dss_get_vdds_dsi();
+
+ if (IS_ERR(vdds_dsi)) {
DSSERR("can't get VDDS_DSI regulator\n");
- return PTR_ERR(dpi.vdds_dsi_reg);
+ return PTR_ERR(vdds_dsi);
}
+
+ dpi.vdds_dsi_reg = vdds_dsi;
}
return 0;
}
+int dpi_init(struct platform_device *pdev)
+{
+ return 0;
+}
+
void dpi_exit(void)
{
}
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index ddf3a0560822..2444404c6179 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -42,8 +42,6 @@
/*#define VERBOSE_IRQ*/
#define DSI_CATCH_MISSING_TE
-#define DSI_BASE 0x4804FC00
-
struct dsi_reg { u16 idx; };
#define DSI_REG(idx) ((const struct dsi_reg) { idx })
@@ -222,7 +220,9 @@ struct dsi_irq_stats {
static struct
{
+ struct platform_device *pdev;
void __iomem *base;
+ int irq;
struct dsi_clock_info current_cinfo;
@@ -481,13 +481,17 @@ static void print_irq_status_cio(u32 status)
static int debug_irq;
/* called from dss */
-void dsi_irq_handler(void)
+static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
{
u32 irqstatus, vcstatus, ciostatus;
int i;
irqstatus = dsi_read_reg(DSI_IRQSTATUS);
+ /* IRQ is not for us */
+ if (!irqstatus)
+ return IRQ_NONE;
+
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
spin_lock(&dsi.irq_stats_lock);
dsi.irq_stats.irq_count++;
@@ -565,9 +569,9 @@ void dsi_irq_handler(void)
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
spin_unlock(&dsi.irq_stats_lock);
#endif
+ return IRQ_HANDLED;
}
-
static void _dsi_initialize_irq(void)
{
u32 l;
@@ -641,18 +645,18 @@ static void dsi_vc_disable_bta_irq(int channel)
static inline void enable_clocks(bool enable)
{
if (enable)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
else
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
}
/* source clock for DSI PLL. this could also be PCLKFREE */
static inline void dsi_enable_pll_clock(bool enable)
{
if (enable)
- dss_clk_enable(DSS_CLK_FCK2);
+ dss_clk_enable(DSS_CLK_SYSCK);
else
- dss_clk_disable(DSS_CLK_FCK2);
+ dss_clk_disable(DSS_CLK_SYSCK);
if (enable && dsi.pll_locked) {
if (wait_for_bit_change(DSI_PLL_STATUS, 1, 1) != 1)
@@ -728,7 +732,7 @@ static unsigned long dsi_fclk_rate(void)
if (dss_get_dsi_clk_source() == DSS_SRC_DSS1_ALWON_FCLK) {
/* DSI FCLK source is DSS1_ALWON_FCK, which is dss1_fck */
- r = dss_clk_get_rate(DSS_CLK_FCK1);
+ r = dss_clk_get_rate(DSS_CLK_FCK);
} else {
/* DSI FCLK source is DSI2_PLL_FCLK */
r = dsi_get_dsi2_pll_rate();
@@ -808,7 +812,7 @@ static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
return -EINVAL;
if (cinfo->use_dss2_fck) {
- cinfo->clkin = dss_clk_get_rate(DSS_CLK_FCK2);
+ cinfo->clkin = dss_clk_get_rate(DSS_CLK_SYSCK);
/* XXX it is unclear if highfreq should be used
* with DSS2_FCK source also */
cinfo->highfreq = 0;
@@ -854,7 +858,7 @@ int dsi_pll_calc_clock_div_pck(bool is_tft, unsigned long req_pck,
int match = 0;
unsigned long dss_clk_fck2;
- dss_clk_fck2 = dss_clk_get_rate(DSS_CLK_FCK2);
+ dss_clk_fck2 = dss_clk_get_rate(DSS_CLK_SYSCK);
if (req_pck == dsi.cache_req_pck &&
dsi.cache_cinfo.clkin == dss_clk_fck2) {
@@ -1306,7 +1310,7 @@ void dsi_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(r))
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
DUMPREG(DSI_REVISION);
DUMPREG(DSI_SYSCONFIG);
@@ -1378,7 +1382,7 @@ void dsi_dump_regs(struct seq_file *s)
DUMPREG(DSI_PLL_CONFIGURATION1);
DUMPREG(DSI_PLL_CONFIGURATION2);
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
#undef DUMPREG
}
@@ -3223,6 +3227,19 @@ int dsi_init_display(struct omap_dss_device *dssdev)
dsi.vc[0].dssdev = dssdev;
dsi.vc[1].dssdev = dssdev;
+ if (dsi.vdds_dsi_reg == NULL) {
+ struct regulator *vdds_dsi;
+
+ vdds_dsi = regulator_get(&dsi.pdev->dev, "vdds_dsi");
+
+ if (IS_ERR(vdds_dsi)) {
+ DSSERR("can't get VDDS_DSI regulator\n");
+ return PTR_ERR(vdds_dsi);
+ }
+
+ dsi.vdds_dsi_reg = vdds_dsi;
+ }
+
return 0;
}
@@ -3238,10 +3255,11 @@ void dsi_wait_dsi2_pll_active(void)
DSSERR("DSI2 PLL clock not active\n");
}
-int dsi_init(struct platform_device *pdev)
+static int dsi_init(struct platform_device *pdev)
{
u32 rev;
int r;
+ struct resource *dsi_mem;
spin_lock_init(&dsi.errors_lock);
dsi.errors = 0;
@@ -3268,24 +3286,36 @@ int dsi_init(struct platform_device *pdev)
dsi.te_timer.function = dsi_te_timeout;
dsi.te_timer.data = 0;
#endif
- dsi.base = ioremap(DSI_BASE, DSI_SZ_REGS);
+ dsi_mem = platform_get_resource(dsi.pdev, IORESOURCE_MEM, 0);
+ if (!dsi_mem) {
+ DSSERR("can't get IORESOURCE_MEM DSI\n");
+ r = -EINVAL;
+ goto err1;
+ }
+ dsi.base = ioremap(dsi_mem->start, resource_size(dsi_mem));
if (!dsi.base) {
DSSERR("can't ioremap DSI\n");
r = -ENOMEM;
goto err1;
}
+ dsi.irq = platform_get_irq(dsi.pdev, 0);
+ if (dsi.irq < 0) {
+ DSSERR("platform_get_irq failed\n");
+ r = -ENODEV;
+ goto err2;
+ }
- dsi.vdds_dsi_reg = dss_get_vdds_dsi();
- if (IS_ERR(dsi.vdds_dsi_reg)) {
- DSSERR("can't get VDDS_DSI regulator\n");
- r = PTR_ERR(dsi.vdds_dsi_reg);
+ r = request_irq(dsi.irq, omap_dsi_irq_handler, IRQF_SHARED,
+ "OMAP DSI1", dsi.pdev);
+ if (r < 0) {
+ DSSERR("request_irq failed\n");
goto err2;
}
enable_clocks(1);
rev = dsi_read_reg(DSI_REVISION);
- printk(KERN_INFO "OMAP DSI rev %d.%d\n",
+ dev_dbg(&pdev->dev, "OMAP DSI rev %d.%d\n",
FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
enable_clocks(0);
@@ -3298,8 +3328,14 @@ err1:
return r;
}
-void dsi_exit(void)
+static void dsi_exit(void)
{
+ if (dsi.vdds_dsi_reg != NULL) {
+ regulator_put(dsi.vdds_dsi_reg);
+ dsi.vdds_dsi_reg = NULL;
+ }
+
+ free_irq(dsi.irq, dsi.pdev);
iounmap(dsi.base);
destroy_workqueue(dsi.workqueue);
@@ -3307,3 +3343,41 @@ void dsi_exit(void)
DSSDBG("omap_dsi_exit\n");
}
+/* DSI1 HW IP initialisation */
+static int omap_dsi1hw_probe(struct platform_device *pdev)
+{
+ int r;
+ dsi.pdev = pdev;
+ r = dsi_init(pdev);
+ if (r) {
+ DSSERR("Failed to initialize DSI\n");
+ goto err_dsi;
+ }
+err_dsi:
+ return r;
+}
+
+static int omap_dsi1hw_remove(struct platform_device *pdev)
+{
+ dsi_exit();
+ return 0;
+}
+
+static struct platform_driver omap_dsi1hw_driver = {
+ .probe = omap_dsi1hw_probe,
+ .remove = omap_dsi1hw_remove,
+ .driver = {
+ .name = "omap_dsi1",
+ .owner = THIS_MODULE,
+ },
+};
+
+int dsi_init_platform_driver(void)
+{
+ return platform_driver_register(&omap_dsi1hw_driver);
+}
+
+void dsi_uninit_platform_driver(void)
+{
+ return platform_driver_unregister(&omap_dsi1hw_driver);
+}
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index 77c3621c9171..5a93e6678213 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -26,15 +26,13 @@
#include <linux/io.h>
#include <linux/err.h>
#include <linux/delay.h>
-#include <linux/interrupt.h>
#include <linux/seq_file.h>
#include <linux/clk.h>
#include <plat/display.h>
+#include <plat/clock.h>
#include "dss.h"
-#define DSS_BASE 0x48050000
-
#define DSS_SZ_REGS SZ_512
struct dss_reg {
@@ -59,9 +57,17 @@ struct dss_reg {
dss_write_reg(idx, FLD_MOD(dss_read_reg(idx), val, start, end))
static struct {
+ struct platform_device *pdev;
void __iomem *base;
+ int ctx_id;
struct clk *dpll4_m4_ck;
+ struct clk *dss_ick;
+ struct clk *dss_fck;
+ struct clk *dss_sys_clk;
+ struct clk *dss_tv_fck;
+ struct clk *dss_video_fck;
+ unsigned num_clks_enabled;
unsigned long cache_req_pck;
unsigned long cache_prate;
@@ -74,6 +80,11 @@ static struct {
u32 ctx[DSS_SZ_REGS / sizeof(u32)];
} dss;
+static void dss_clk_enable_all_no_ctx(void);
+static void dss_clk_disable_all_no_ctx(void);
+static void dss_clk_enable_no_ctx(enum dss_clock clks);
+static void dss_clk_disable_no_ctx(enum dss_clock clks);
+
static int _omap_dss_wait_reset(void);
static inline void dss_write_reg(const struct dss_reg idx, u32 val)
@@ -214,7 +225,7 @@ void dss_dump_clocks(struct seq_file *s)
unsigned long dpll4_ck_rate;
unsigned long dpll4_m4_ck_rate;
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
dpll4_ck_rate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
dpll4_m4_ck_rate = clk_get_rate(dss.dpll4_m4_ck);
@@ -227,21 +238,21 @@ void dss_dump_clocks(struct seq_file *s)
seq_printf(s, "dss1_alwon_fclk = %lu / %lu = %lu\n",
dpll4_ck_rate,
dpll4_ck_rate / dpll4_m4_ck_rate,
- dss_clk_get_rate(DSS_CLK_FCK1));
+ dss_clk_get_rate(DSS_CLK_FCK));
else
seq_printf(s, "dss1_alwon_fclk = %lu / %lu * 2 = %lu\n",
dpll4_ck_rate,
dpll4_ck_rate / dpll4_m4_ck_rate,
- dss_clk_get_rate(DSS_CLK_FCK1));
+ dss_clk_get_rate(DSS_CLK_FCK));
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
}
void dss_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(r))
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
DUMPREG(DSS_REVISION);
DUMPREG(DSS_SYSCONFIG);
@@ -252,7 +263,7 @@ void dss_dump_regs(struct seq_file *s)
DUMPREG(DSS_PLL_CONTROL);
DUMPREG(DSS_SDI_STATUS);
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
#undef DUMPREG
}
@@ -337,7 +348,7 @@ int dss_set_clock_div(struct dss_clock_info *cinfo)
int dss_get_clock_div(struct dss_clock_info *cinfo)
{
- cinfo->fck = dss_clk_get_rate(DSS_CLK_FCK1);
+ cinfo->fck = dss_clk_get_rate(DSS_CLK_FCK);
if (cpu_is_omap34xx()) {
unsigned long prate;
@@ -378,7 +389,7 @@ int dss_calc_clock_div(bool is_tft, unsigned long req_pck,
prate = dss_get_dpll4_rate();
- fck = dss_clk_get_rate(DSS_CLK_FCK1);
+ fck = dss_clk_get_rate(DSS_CLK_FCK);
if (req_pck == dss.cache_req_pck &&
((cpu_is_omap34xx() && prate == dss.cache_prate) ||
dss.cache_dss_cinfo.fck == fck)) {
@@ -405,7 +416,7 @@ retry:
if (cpu_is_omap24xx()) {
struct dispc_clock_info cur_dispc;
/* XXX can we change the clock on omap2? */
- fck = dss_clk_get_rate(DSS_CLK_FCK1);
+ fck = dss_clk_get_rate(DSS_CLK_FCK);
fck_div = 1;
dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc);
@@ -482,31 +493,6 @@ found:
return 0;
}
-
-
-static irqreturn_t dss_irq_handler_omap2(int irq, void *arg)
-{
- dispc_irq_handler();
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t dss_irq_handler_omap3(int irq, void *arg)
-{
- u32 irqstatus;
-
- irqstatus = dss_read_reg(DSS_IRQSTATUS);
-
- if (irqstatus & (1<<0)) /* DISPC_IRQ */
- dispc_irq_handler();
-#ifdef CONFIG_OMAP2_DSS_DSI
- if (irqstatus & (1<<1)) /* DSI_IRQ */
- dsi_irq_handler();
-#endif
-
- return IRQ_HANDLED;
-}
-
static int _omap_dss_wait_reset(void)
{
int t = 0;
@@ -549,12 +535,19 @@ void dss_set_dac_pwrdn_bgz(bool enable)
REG_FLD_MOD(DSS_CONTROL, enable, 5, 5); /* DAC Power-Down Control */
}
-int dss_init(bool skip_init)
+static int dss_init(bool skip_init)
{
int r;
u32 rev;
+ struct resource *dss_mem;
- dss.base = ioremap(DSS_BASE, DSS_SZ_REGS);
+ dss_mem = platform_get_resource(dss.pdev, IORESOURCE_MEM, 0);
+ if (!dss_mem) {
+ DSSERR("can't get IORESOURCE_MEM DSS\n");
+ r = -EINVAL;
+ goto fail0;
+ }
+ dss.base = ioremap(dss_mem->start, resource_size(dss_mem));
if (!dss.base) {
DSSERR("can't ioremap DSS\n");
r = -ENOMEM;
@@ -590,23 +583,12 @@ int dss_init(bool skip_init)
REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
#endif
- r = request_irq(INT_24XX_DSS_IRQ,
- cpu_is_omap24xx()
- ? dss_irq_handler_omap2
- : dss_irq_handler_omap3,
- 0, "OMAP DSS", NULL);
-
- if (r < 0) {
- DSSERR("omap2 dss: request_irq failed\n");
- goto fail1;
- }
-
if (cpu_is_omap34xx()) {
dss.dpll4_m4_ck = clk_get(NULL, "dpll4_m4_ck");
if (IS_ERR(dss.dpll4_m4_ck)) {
DSSERR("Failed to get dpll4_m4_ck\n");
r = PTR_ERR(dss.dpll4_m4_ck);
- goto fail2;
+ goto fail1;
}
}
@@ -621,21 +603,386 @@ int dss_init(bool skip_init)
return 0;
-fail2:
- free_irq(INT_24XX_DSS_IRQ, NULL);
fail1:
iounmap(dss.base);
fail0:
return r;
}
-void dss_exit(void)
+static void dss_exit(void)
{
if (cpu_is_omap34xx())
clk_put(dss.dpll4_m4_ck);
- free_irq(INT_24XX_DSS_IRQ, NULL);
-
iounmap(dss.base);
}
+/* CONTEXT */
+static int dss_get_ctx_id(void)
+{
+ struct omap_display_platform_data *pdata = dss.pdev->dev.platform_data;
+ int r;
+
+ if (!pdata->board_data->get_last_off_on_transaction_id)
+ return 0;
+ r = pdata->board_data->get_last_off_on_transaction_id(&dss.pdev->dev);
+ if (r < 0) {
+ dev_err(&dss.pdev->dev, "getting transaction ID failed, "
+ "will force context restore\n");
+ r = -1;
+ }
+ return r;
+}
+
+int dss_need_ctx_restore(void)
+{
+ int id = dss_get_ctx_id();
+
+ if (id < 0 || id != dss.ctx_id) {
+ DSSDBG("ctx id %d -> id %d\n",
+ dss.ctx_id, id);
+ dss.ctx_id = id;
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static void save_all_ctx(void)
+{
+ DSSDBG("save context\n");
+
+ dss_clk_enable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK);
+
+ dss_save_context();
+ dispc_save_context();
+#ifdef CONFIG_OMAP2_DSS_DSI
+ dsi_save_context();
+#endif
+
+ dss_clk_disable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK);
+}
+
+static void restore_all_ctx(void)
+{
+ DSSDBG("restore context\n");
+
+ dss_clk_enable_all_no_ctx();
+
+ dss_restore_context();
+ dispc_restore_context();
+#ifdef CONFIG_OMAP2_DSS_DSI
+ dsi_restore_context();
+#endif
+
+ dss_clk_disable_all_no_ctx();
+}
+
+static int dss_get_clock(struct clk **clock, const char *clk_name)
+{
+ struct clk *clk;
+
+ clk = clk_get(&dss.pdev->dev, clk_name);
+
+ if (IS_ERR(clk)) {
+ DSSERR("can't get clock %s", clk_name);
+ return PTR_ERR(clk);
+ }
+
+ *clock = clk;
+
+ DSSDBG("clk %s, rate %ld\n", clk_name, clk_get_rate(clk));
+
+ return 0;
+}
+
+static int dss_get_clocks(void)
+{
+ int r;
+
+ dss.dss_ick = NULL;
+ dss.dss_fck = NULL;
+ dss.dss_sys_clk = NULL;
+ dss.dss_tv_fck = NULL;
+ dss.dss_video_fck = NULL;
+
+ r = dss_get_clock(&dss.dss_ick, "ick");
+ if (r)
+ goto err;
+
+ r = dss_get_clock(&dss.dss_fck, "fck");
+ if (r)
+ goto err;
+
+ r = dss_get_clock(&dss.dss_sys_clk, "sys_clk");
+ if (r)
+ goto err;
+
+ r = dss_get_clock(&dss.dss_tv_fck, "tv_clk");
+ if (r)
+ goto err;
+
+ r = dss_get_clock(&dss.dss_video_fck, "video_clk");
+ if (r)
+ goto err;
+
+ return 0;
+
+err:
+ if (dss.dss_ick)
+ clk_put(dss.dss_ick);
+ if (dss.dss_fck)
+ clk_put(dss.dss_fck);
+ if (dss.dss_sys_clk)
+ clk_put(dss.dss_sys_clk);
+ if (dss.dss_tv_fck)
+ clk_put(dss.dss_tv_fck);
+ if (dss.dss_video_fck)
+ clk_put(dss.dss_video_fck);
+
+ return r;
+}
+
+static void dss_put_clocks(void)
+{
+ if (dss.dss_video_fck)
+ clk_put(dss.dss_video_fck);
+ clk_put(dss.dss_tv_fck);
+ clk_put(dss.dss_fck);
+ clk_put(dss.dss_sys_clk);
+ clk_put(dss.dss_ick);
+}
+
+unsigned long dss_clk_get_rate(enum dss_clock clk)
+{
+ switch (clk) {
+ case DSS_CLK_ICK:
+ return clk_get_rate(dss.dss_ick);
+ case DSS_CLK_FCK:
+ return clk_get_rate(dss.dss_fck);
+ case DSS_CLK_SYSCK:
+ return clk_get_rate(dss.dss_sys_clk);
+ case DSS_CLK_TVFCK:
+ return clk_get_rate(dss.dss_tv_fck);
+ case DSS_CLK_VIDFCK:
+ return clk_get_rate(dss.dss_video_fck);
+ }
+
+ BUG();
+ return 0;
+}
+
+static unsigned count_clk_bits(enum dss_clock clks)
+{
+ unsigned num_clks = 0;
+
+ if (clks & DSS_CLK_ICK)
+ ++num_clks;
+ if (clks & DSS_CLK_FCK)
+ ++num_clks;
+ if (clks & DSS_CLK_SYSCK)
+ ++num_clks;
+ if (clks & DSS_CLK_TVFCK)
+ ++num_clks;
+ if (clks & DSS_CLK_VIDFCK)
+ ++num_clks;
+
+ return num_clks;
+}
+
+static void dss_clk_enable_no_ctx(enum dss_clock clks)
+{
+ unsigned num_clks = count_clk_bits(clks);
+
+ if (clks & DSS_CLK_ICK)
+ clk_enable(dss.dss_ick);
+ if (clks & DSS_CLK_FCK)
+ clk_enable(dss.dss_fck);
+ if (clks & DSS_CLK_SYSCK)
+ clk_enable(dss.dss_sys_clk);
+ if (clks & DSS_CLK_TVFCK)
+ clk_enable(dss.dss_tv_fck);
+ if (clks & DSS_CLK_VIDFCK)
+ clk_enable(dss.dss_video_fck);
+
+ dss.num_clks_enabled += num_clks;
+}
+
+void dss_clk_enable(enum dss_clock clks)
+{
+ bool check_ctx = dss.num_clks_enabled == 0;
+
+ dss_clk_enable_no_ctx(clks);
+
+ if (check_ctx && cpu_is_omap34xx() && dss_need_ctx_restore())
+ restore_all_ctx();
+}
+
+static void dss_clk_disable_no_ctx(enum dss_clock clks)
+{
+ unsigned num_clks = count_clk_bits(clks);
+
+ if (clks & DSS_CLK_ICK)
+ clk_disable(dss.dss_ick);
+ if (clks & DSS_CLK_FCK)
+ clk_disable(dss.dss_fck);
+ if (clks & DSS_CLK_SYSCK)
+ clk_disable(dss.dss_sys_clk);
+ if (clks & DSS_CLK_TVFCK)
+ clk_disable(dss.dss_tv_fck);
+ if (clks & DSS_CLK_VIDFCK)
+ clk_disable(dss.dss_video_fck);
+
+ dss.num_clks_enabled -= num_clks;
+}
+
+void dss_clk_disable(enum dss_clock clks)
+{
+ if (cpu_is_omap34xx()) {
+ unsigned num_clks = count_clk_bits(clks);
+
+ BUG_ON(dss.num_clks_enabled < num_clks);
+
+ if (dss.num_clks_enabled == num_clks)
+ save_all_ctx();
+ }
+
+ dss_clk_disable_no_ctx(clks);
+}
+
+static void dss_clk_enable_all_no_ctx(void)
+{
+ enum dss_clock clks;
+
+ clks = DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_SYSCK | DSS_CLK_TVFCK;
+ if (cpu_is_omap34xx())
+ clks |= DSS_CLK_VIDFCK;
+ dss_clk_enable_no_ctx(clks);
+}
+
+static void dss_clk_disable_all_no_ctx(void)
+{
+ enum dss_clock clks;
+
+ clks = DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_SYSCK | DSS_CLK_TVFCK;
+ if (cpu_is_omap34xx())
+ clks |= DSS_CLK_VIDFCK;
+ dss_clk_disable_no_ctx(clks);
+}
+
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
+/* CLOCKS */
+static void core_dump_clocks(struct seq_file *s)
+{
+ int i;
+ struct clk *clocks[5] = {
+ dss.dss_ick,
+ dss.dss_fck,
+ dss.dss_sys_clk,
+ dss.dss_tv_fck,
+ dss.dss_video_fck
+ };
+
+ seq_printf(s, "- CORE -\n");
+
+ seq_printf(s, "internal clk count\t\t%u\n", dss.num_clks_enabled);
+
+ for (i = 0; i < 5; i++) {
+ if (!clocks[i])
+ continue;
+ seq_printf(s, "%-15s\t%lu\t%d\n",
+ clocks[i]->name,
+ clk_get_rate(clocks[i]),
+ clocks[i]->usecount);
+ }
+}
+#endif /* defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) */
+
+/* DEBUGFS */
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
+void dss_debug_dump_clocks(struct seq_file *s)
+{
+ core_dump_clocks(s);
+ dss_dump_clocks(s);
+ dispc_dump_clocks(s);
+#ifdef CONFIG_OMAP2_DSS_DSI
+ dsi_dump_clocks(s);
+#endif
+}
+#endif
+
+
+/* DSS HW IP initialisation */
+static int omap_dsshw_probe(struct platform_device *pdev)
+{
+ int r;
+ int skip_init = 0;
+
+ dss.pdev = pdev;
+
+ r = dss_get_clocks();
+ if (r)
+ goto err_clocks;
+
+ dss_clk_enable_all_no_ctx();
+
+ dss.ctx_id = dss_get_ctx_id();
+ DSSDBG("initial ctx id %u\n", dss.ctx_id);
+
+#ifdef CONFIG_FB_OMAP_BOOTLOADER_INIT
+ /* DISPC_CONTROL */
+ if (omap_readl(0x48050440) & 1) /* LCD enabled? */
+ skip_init = 1;
+#endif
+
+ r = dss_init(skip_init);
+ if (r) {
+ DSSERR("Failed to initialize DSS\n");
+ goto err_dss;
+ }
+
+ dss_clk_disable_all_no_ctx();
+ return 0;
+
+err_dss:
+ dss_clk_disable_all_no_ctx();
+ dss_put_clocks();
+err_clocks:
+ return r;
+}
+
+static int omap_dsshw_remove(struct platform_device *pdev)
+{
+
+ dss_exit();
+
+ /*
+ * As part of hwmod changes, DSS is not the only controller of dss
+ * clocks; hwmod framework itself will also enable clocks during hwmod
+ * init for dss, and autoidle is set in h/w for DSS. Hence, there's no
+ * need to disable clocks if their usecounts > 1.
+ */
+ WARN_ON(dss.num_clks_enabled > 0);
+
+ dss_put_clocks();
+ return 0;
+}
+
+static struct platform_driver omap_dsshw_driver = {
+ .probe = omap_dsshw_probe,
+ .remove = omap_dsshw_remove,
+ .driver = {
+ .name = "omap_dss",
+ .owner = THIS_MODULE,
+ },
+};
+
+int dss_init_platform_driver(void)
+{
+ return platform_driver_register(&omap_dsshw_driver);
+}
+
+void dss_uninit_platform_driver(void)
+{
+ return platform_driver_unregister(&omap_dsshw_driver);
+}
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index b394951120ac..4b02e079f20e 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -112,11 +112,11 @@ enum omap_parallel_interface_mode {
};
enum dss_clock {
- DSS_CLK_ICK = 1 << 0,
- DSS_CLK_FCK1 = 1 << 1,
- DSS_CLK_FCK2 = 1 << 2,
- DSS_CLK_54M = 1 << 3,
- DSS_CLK_96M = 1 << 4,
+ DSS_CLK_ICK = 1 << 0, /* DSS_L3_ICLK and DSS_L4_ICLK */
+ DSS_CLK_FCK = 1 << 1, /* DSS1_ALWON_FCLK */
+ DSS_CLK_SYSCK = 1 << 2, /* DSS2_ALWON_FCLK */
+ DSS_CLK_TVFCK = 1 << 3, /* DSS_TV_FCLK */
+ DSS_CLK_VIDFCK = 1 << 4, /* DSS_96M_FCLK*/
};
enum dss_clk_source {
@@ -169,15 +169,9 @@ struct seq_file;
struct platform_device;
/* core */
-void dss_clk_enable(enum dss_clock clks);
-void dss_clk_disable(enum dss_clock clks);
-unsigned long dss_clk_get_rate(enum dss_clock clk);
-int dss_need_ctx_restore(void);
-void dss_dump_clocks(struct seq_file *s);
struct bus_type *dss_get_bus(void);
struct regulator *dss_get_vdds_dsi(void);
struct regulator *dss_get_vdds_sdi(void);
-struct regulator *dss_get_vdda_dac(void);
/* display */
int dss_suspend_all_devices(void);
@@ -214,13 +208,21 @@ void dss_overlay_setup_l4_manager(struct omap_overlay_manager *mgr);
void dss_recheck_connections(struct omap_dss_device *dssdev, bool force);
/* DSS */
-int dss_init(bool skip_init);
-void dss_exit(void);
+int dss_init_platform_driver(void);
+void dss_uninit_platform_driver(void);
void dss_save_context(void);
void dss_restore_context(void);
+void dss_clk_enable(enum dss_clock clks);
+void dss_clk_disable(enum dss_clock clks);
+unsigned long dss_clk_get_rate(enum dss_clock clk);
+int dss_need_ctx_restore(void);
+void dss_dump_clocks(struct seq_file *s);
void dss_dump_regs(struct seq_file *s);
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
+void dss_debug_dump_clocks(struct seq_file *s);
+#endif
void dss_sdi_init(u8 datapairs);
int dss_sdi_enable(void);
@@ -259,8 +261,8 @@ static inline void sdi_exit(void)
/* DSI */
#ifdef CONFIG_OMAP2_DSS_DSI
-int dsi_init(struct platform_device *pdev);
-void dsi_exit(void);
+int dsi_init_platform_driver(void);
+void dsi_uninit_platform_driver(void);
void dsi_dump_clocks(struct seq_file *s);
void dsi_dump_irqs(struct seq_file *s);
@@ -285,11 +287,11 @@ void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
void dsi_wait_dsi1_pll_active(void);
void dsi_wait_dsi2_pll_active(void);
#else
-static inline int dsi_init(struct platform_device *pdev)
+static inline int dsi_init_platform_driver(void)
{
return 0;
}
-static inline void dsi_exit(void)
+static inline void dsi_uninit_platform_driver(void)
{
}
static inline void dsi_wait_dsi1_pll_active(void)
@@ -316,8 +318,8 @@ static inline void dpi_exit(void)
#endif
/* DISPC */
-int dispc_init(void);
-void dispc_exit(void);
+int dispc_init_platform_driver(void);
+void dispc_uninit_platform_driver(void);
void dispc_dump_clocks(struct seq_file *s);
void dispc_dump_irqs(struct seq_file *s);
void dispc_dump_regs(struct seq_file *s);
@@ -409,24 +411,24 @@ int dispc_get_clock_div(enum omap_channel channel,
/* VENC */
#ifdef CONFIG_OMAP2_DSS_VENC
-int venc_init(struct platform_device *pdev);
-void venc_exit(void);
+int venc_init_platform_driver(void);
+void venc_uninit_platform_driver(void);
void venc_dump_regs(struct seq_file *s);
int venc_init_display(struct omap_dss_device *display);
#else
-static inline int venc_init(struct platform_device *pdev)
+static inline int venc_init_platform_driver(void)
{
return 0;
}
-static inline void venc_exit(void)
+static inline void venc_uninit_platform_driver(void)
{
}
#endif
/* RFBI */
#ifdef CONFIG_OMAP2_DSS_RFBI
-int rfbi_init(void);
-void rfbi_exit(void);
+int rfbi_init_platform_driver(void);
+void rfbi_uninit_platform_driver(void);
void rfbi_dump_regs(struct seq_file *s);
int rfbi_configure(int rfbi_module, int bpp, int lines);
@@ -437,11 +439,11 @@ void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t);
unsigned long rfbi_get_max_tx_rate(void);
int rfbi_init_display(struct omap_dss_device *display);
#else
-static inline int rfbi_init(void)
+static inline int rfbi_init_platform_driver(void)
{
return 0;
}
-static inline void rfbi_exit(void)
+static inline void rfbi_uninit_platform_driver(void)
{
}
#endif
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
index 172d4e697309..1f53bf20b6cb 100644
--- a/drivers/video/omap2/dss/manager.c
+++ b/drivers/video/omap2/dss/manager.c
@@ -1394,7 +1394,7 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
}
r = 0;
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
if (!dss_cache.irq_enabled) {
u32 mask;
@@ -1407,7 +1407,7 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
dss_cache.irq_enabled = true;
}
configure_dispc();
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
spin_unlock_irqrestore(&dss_cache.lock, flags);
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
index 456efef03c20..996e9a4f6771 100644
--- a/drivers/video/omap2/dss/overlay.c
+++ b/drivers/video/omap2/dss/overlay.c
@@ -490,7 +490,7 @@ static int omap_dss_set_manager(struct omap_overlay *ovl,
ovl->manager = mgr;
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
/* XXX: on manual update display, in auto update mode, a bug happens
* here. When an overlay is first enabled on LCD, then it's disabled,
* and the manager is changed to TV, we sometimes get SYNC_LOST_DIGIT
@@ -499,7 +499,7 @@ static int omap_dss_set_manager(struct omap_overlay *ovl,
* but I don't understand how or why. */
msleep(40);
dispc_set_channel_out(ovl->id, mgr->id);
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
return 0;
}
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index 10a2ffe02882..9e0f196a9839 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -36,8 +36,6 @@
#include <plat/display.h>
#include "dss.h"
-#define RFBI_BASE 0x48050800
-
struct rfbi_reg { u16 idx; };
#define RFBI_REG(idx) ((const struct rfbi_reg) { idx })
@@ -100,6 +98,7 @@ static int rfbi_convert_timings(struct rfbi_timings *t);
static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div);
static struct {
+ struct platform_device *pdev;
void __iomem *base;
unsigned long l4_khz;
@@ -142,9 +141,9 @@ static inline u32 rfbi_read_reg(const struct rfbi_reg idx)
static void rfbi_enable_clocks(bool enable)
{
if (enable)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
else
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
}
void omap_rfbi_write_command(const void *buf, u32 len)
@@ -497,7 +496,7 @@ unsigned long rfbi_get_max_tx_rate(void)
};
l4_rate = rfbi.l4_khz / 1000;
- dss1_rate = dss_clk_get_rate(DSS_CLK_FCK1) / 1000000;
+ dss1_rate = dss_clk_get_rate(DSS_CLK_FCK) / 1000000;
for (i = 0; i < ARRAY_SIZE(ftab); i++) {
/* Use a window instead of an exact match, to account
@@ -922,7 +921,7 @@ void rfbi_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, rfbi_read_reg(r))
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
DUMPREG(RFBI_REVISION);
DUMPREG(RFBI_SYSCONFIG);
@@ -953,54 +952,10 @@ void rfbi_dump_regs(struct seq_file *s)
DUMPREG(RFBI_VSYNC_WIDTH);
DUMPREG(RFBI_HSYNC_WIDTH);
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
#undef DUMPREG
}
-int rfbi_init(void)
-{
- u32 rev;
- u32 l;
-
- spin_lock_init(&rfbi.cmd_lock);
-
- init_completion(&rfbi.cmd_done);
- atomic_set(&rfbi.cmd_fifo_full, 0);
- atomic_set(&rfbi.cmd_pending, 0);
-
- rfbi.base = ioremap(RFBI_BASE, SZ_256);
- if (!rfbi.base) {
- DSSERR("can't ioremap RFBI\n");
- return -ENOMEM;
- }
-
- rfbi_enable_clocks(1);
-
- msleep(10);
-
- rfbi.l4_khz = dss_clk_get_rate(DSS_CLK_ICK) / 1000;
-
- /* Enable autoidle and smart-idle */
- l = rfbi_read_reg(RFBI_SYSCONFIG);
- l |= (1 << 0) | (2 << 3);
- rfbi_write_reg(RFBI_SYSCONFIG, l);
-
- rev = rfbi_read_reg(RFBI_REVISION);
- printk(KERN_INFO "OMAP RFBI rev %d.%d\n",
- FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
-
- rfbi_enable_clocks(0);
-
- return 0;
-}
-
-void rfbi_exit(void)
-{
- DSSDBG("rfbi_exit\n");
-
- iounmap(rfbi.base);
-}
-
int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev)
{
int r;
@@ -1056,3 +1011,74 @@ int rfbi_init_display(struct omap_dss_device *dssdev)
dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
return 0;
}
+
+/* RFBI HW IP initialisation */
+static int omap_rfbihw_probe(struct platform_device *pdev)
+{
+ u32 rev;
+ u32 l;
+ struct resource *rfbi_mem;
+
+ rfbi.pdev = pdev;
+
+ spin_lock_init(&rfbi.cmd_lock);
+
+ init_completion(&rfbi.cmd_done);
+ atomic_set(&rfbi.cmd_fifo_full, 0);
+ atomic_set(&rfbi.cmd_pending, 0);
+
+ rfbi_mem = platform_get_resource(rfbi.pdev, IORESOURCE_MEM, 0);
+ if (!rfbi_mem) {
+ DSSERR("can't get IORESOURCE_MEM RFBI\n");
+ return -EINVAL;
+ }
+ rfbi.base = ioremap(rfbi_mem->start, resource_size(rfbi_mem));
+ if (!rfbi.base) {
+ DSSERR("can't ioremap RFBI\n");
+ return -ENOMEM;
+ }
+
+ rfbi_enable_clocks(1);
+
+ msleep(10);
+
+ rfbi.l4_khz = dss_clk_get_rate(DSS_CLK_ICK) / 1000;
+
+ /* Enable autoidle and smart-idle */
+ l = rfbi_read_reg(RFBI_SYSCONFIG);
+ l |= (1 << 0) | (2 << 3);
+ rfbi_write_reg(RFBI_SYSCONFIG, l);
+
+ rev = rfbi_read_reg(RFBI_REVISION);
+ dev_dbg(&pdev->dev, "OMAP RFBI rev %d.%d\n",
+ FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
+
+ rfbi_enable_clocks(0);
+
+ return 0;
+}
+
+static int omap_rfbihw_remove(struct platform_device *pdev)
+{
+ iounmap(rfbi.base);
+ return 0;
+}
+
+static struct platform_driver omap_rfbihw_driver = {
+ .probe = omap_rfbihw_probe,
+ .remove = omap_rfbihw_remove,
+ .driver = {
+ .name = "omap_rfbi",
+ .owner = THIS_MODULE,
+ },
+};
+
+int rfbi_init_platform_driver(void)
+{
+ return platform_driver_register(&omap_rfbihw_driver);
+}
+
+void rfbi_uninit_platform_driver(void)
+{
+ return platform_driver_unregister(&omap_rfbihw_driver);
+}
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index b64adf7dfc88..9f10a0d9e760 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -70,7 +70,7 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
/* In case of skip_init sdi_init has already enabled the clocks */
if (!sdi.skip_init)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
sdi_basic_init(dssdev);
@@ -130,7 +130,7 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
return 0;
err2:
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
regulator_disable(sdi.vdds_sdi_reg);
err1:
omap_dss_stop_device(dssdev);
@@ -145,7 +145,7 @@ void omapdss_sdi_display_disable(struct omap_dss_device *dssdev)
dss_sdi_disable();
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
regulator_disable(sdi.vdds_sdi_reg);
@@ -157,6 +157,19 @@ int sdi_init_display(struct omap_dss_device *dssdev)
{
DSSDBG("SDI init\n");
+ if (sdi.vdds_sdi_reg == NULL) {
+ struct regulator *vdds_sdi;
+
+ vdds_sdi = dss_get_vdds_sdi();
+
+ if (IS_ERR(vdds_sdi)) {
+ DSSERR("can't get VDDS_SDI regulator\n");
+ return PTR_ERR(vdds_sdi);
+ }
+
+ sdi.vdds_sdi_reg = vdds_sdi;
+ }
+
return 0;
}
@@ -165,17 +178,12 @@ int sdi_init(bool skip_init)
/* we store this for first display enable, then clear it */
sdi.skip_init = skip_init;
- sdi.vdds_sdi_reg = dss_get_vdds_sdi();
- if (IS_ERR(sdi.vdds_sdi_reg)) {
- DSSERR("can't get VDDS_SDI regulator\n");
- return PTR_ERR(sdi.vdds_sdi_reg);
- }
/*
* Enable clocks already here, otherwise there would be a toggle
* of them until sdi_display_enable is called.
*/
if (skip_init)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
return 0;
}
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
index eff35050e28a..9cfcc364882f 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/omap2/dss/venc.c
@@ -39,8 +39,6 @@
#include "dss.h"
-#define VENC_BASE 0x48050C00
-
/* Venc registers */
#define VENC_REV_ID 0x00
#define VENC_STATUS 0x04
@@ -289,6 +287,7 @@ const struct omap_video_timings omap_dss_ntsc_timings = {
EXPORT_SYMBOL(omap_dss_ntsc_timings);
static struct {
+ struct platform_device *pdev;
void __iomem *base;
struct mutex venc_lock;
u32 wss_data;
@@ -381,11 +380,11 @@ static void venc_reset(void)
static void venc_enable_clocks(int enable)
{
if (enable)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_54M |
- DSS_CLK_96M);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_TVFCK |
+ DSS_CLK_VIDFCK);
else
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_54M |
- DSS_CLK_96M);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_TVFCK |
+ DSS_CLK_VIDFCK);
}
static const struct venc_config *venc_timings_to_config(
@@ -641,50 +640,23 @@ static struct omap_dss_driver venc_driver = {
};
/* driver end */
-
-
-int venc_init(struct platform_device *pdev)
+int venc_init_display(struct omap_dss_device *dssdev)
{
- u8 rev_id;
+ DSSDBG("init_display\n");
- mutex_init(&venc.venc_lock);
+ if (venc.vdda_dac_reg == NULL) {
+ struct regulator *vdda_dac;
- venc.wss_data = 0;
+ vdda_dac = regulator_get(&venc.pdev->dev, "vdda_dac");
- venc.base = ioremap(VENC_BASE, SZ_1K);
- if (!venc.base) {
- DSSERR("can't ioremap VENC\n");
- return -ENOMEM;
- }
+ if (IS_ERR(vdda_dac)) {
+ DSSERR("can't get VDDA_DAC regulator\n");
+ return PTR_ERR(vdda_dac);
+ }
- venc.vdda_dac_reg = dss_get_vdda_dac();
- if (IS_ERR(venc.vdda_dac_reg)) {
- iounmap(venc.base);
- DSSERR("can't get VDDA_DAC regulator\n");
- return PTR_ERR(venc.vdda_dac_reg);
+ venc.vdda_dac_reg = vdda_dac;
}
- venc_enable_clocks(1);
-
- rev_id = (u8)(venc_read_reg(VENC_REV_ID) & 0xff);
- printk(KERN_INFO "OMAP VENC rev %d\n", rev_id);
-
- venc_enable_clocks(0);
-
- return omap_dss_register_driver(&venc_driver);
-}
-
-void venc_exit(void)
-{
- omap_dss_unregister_driver(&venc_driver);
-
- iounmap(venc.base);
-}
-
-int venc_init_display(struct omap_dss_device *dssdev)
-{
- DSSDBG("init_display\n");
-
return 0;
}
@@ -740,3 +712,67 @@ void venc_dump_regs(struct seq_file *s)
#undef DUMPREG
}
+
+/* VENC HW IP initialisation */
+static int omap_venchw_probe(struct platform_device *pdev)
+{
+ u8 rev_id;
+ struct resource *venc_mem;
+
+ venc.pdev = pdev;
+
+ mutex_init(&venc.venc_lock);
+
+ venc.wss_data = 0;
+
+ venc_mem = platform_get_resource(venc.pdev, IORESOURCE_MEM, 0);
+ if (!venc_mem) {
+ DSSERR("can't get IORESOURCE_MEM VENC\n");
+ return -EINVAL;
+ }
+ venc.base = ioremap(venc_mem->start, resource_size(venc_mem));
+ if (!venc.base) {
+ DSSERR("can't ioremap VENC\n");
+ return -ENOMEM;
+ }
+
+ venc_enable_clocks(1);
+
+ rev_id = (u8)(venc_read_reg(VENC_REV_ID) & 0xff);
+ dev_dbg(&pdev->dev, "OMAP VENC rev %d\n", rev_id);
+
+ venc_enable_clocks(0);
+
+ return omap_dss_register_driver(&venc_driver);
+}
+
+static int omap_venchw_remove(struct platform_device *pdev)
+{
+ if (venc.vdda_dac_reg != NULL) {
+ regulator_put(venc.vdda_dac_reg);
+ venc.vdda_dac_reg = NULL;
+ }
+ omap_dss_unregister_driver(&venc_driver);
+
+ iounmap(venc.base);
+ return 0;
+}
+
+static struct platform_driver omap_venchw_driver = {
+ .probe = omap_venchw_probe,
+ .remove = omap_venchw_remove,
+ .driver = {
+ .name = "omap_venc",
+ .owner = THIS_MODULE,
+ },
+};
+
+int venc_init_platform_driver(void)
+{
+ return platform_driver_register(&omap_venchw_driver);
+}
+
+void venc_uninit_platform_driver(void)
+{
+ return platform_driver_unregister(&omap_venchw_driver);
+}
diff --git a/drivers/video/omap2/omapfb/Kconfig b/drivers/video/omap2/omapfb/Kconfig
index 65149b22cf37..aa33386c81ff 100644
--- a/drivers/video/omap2/omapfb/Kconfig
+++ b/drivers/video/omap2/omapfb/Kconfig
@@ -1,5 +1,5 @@
menuconfig FB_OMAP2
- tristate "OMAP2/3 frame buffer support (EXPERIMENTAL)"
+ tristate "OMAP2+ frame buffer support (EXPERIMENTAL)"
depends on FB && OMAP2_DSS
select OMAP2_VRAM
@@ -8,10 +8,10 @@ menuconfig FB_OMAP2
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
help
- Frame buffer driver for OMAP2/3 based boards.
+ Frame buffer driver for OMAP2+ based boards.
config FB_OMAP2_DEBUG_SUPPORT
- bool "Debug support for OMAP2/3 FB"
+ bool "Debug support for OMAP2+ FB"
default y
depends on FB_OMAP2
help
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index 4fdab8e9c496..505ec6672049 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -2090,7 +2090,7 @@ static int omapfb_set_def_mode(struct omapfb2_device *fbdev,
{
int r;
u8 bpp;
- struct omap_video_timings timings;
+ struct omap_video_timings timings, temp_timings;
r = omapfb_mode_to_timings(mode_str, &timings, &bpp);
if (r)
@@ -2100,14 +2100,23 @@ static int omapfb_set_def_mode(struct omapfb2_device *fbdev,
fbdev->bpp_overrides[fbdev->num_bpp_overrides].bpp = bpp;
++fbdev->num_bpp_overrides;
- if (!display->driver->check_timings || !display->driver->set_timings)
- return -EINVAL;
+ if (display->driver->check_timings) {
+ r = display->driver->check_timings(display, &timings);
+ if (r)
+ return r;
+ } else {
+ /* If check_timings is not present compare xres and yres */
+ if (display->driver->get_timings) {
+ display->driver->get_timings(display, &temp_timings);
- r = display->driver->check_timings(display, &timings);
- if (r)
- return r;
+ if (temp_timings.x_res != timings.x_res ||
+ temp_timings.y_res != timings.y_res)
+ return -EINVAL;
+ }
+ }
- display->driver->set_timings(display, &timings);
+ if (display->driver->set_timings)
+ display->driver->set_timings(display, &timings);
return 0;
}
diff --git a/drivers/video/p9100.c b/drivers/video/p9100.c
index b6c3fc2db632..d57cc58c5168 100644
--- a/drivers/video/p9100.c
+++ b/drivers/video/p9100.c
@@ -249,7 +249,7 @@ static void p9100_init_fix(struct fb_info *info, int linebytes, struct device_no
info->fix.accel = FB_ACCEL_SUN_CGTHREE;
}
-static int __devinit p9100_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit p9100_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
struct fb_info *info;
@@ -352,7 +352,7 @@ static const struct of_device_id p9100_match[] = {
};
MODULE_DEVICE_TABLE(of, p9100_match);
-static struct of_platform_driver p9100_driver = {
+static struct platform_driver p9100_driver = {
.driver = {
.name = "p9100",
.owner = THIS_MODULE,
@@ -367,12 +367,12 @@ static int __init p9100_init(void)
if (fb_get_options("p9100fb", NULL))
return -ENODEV;
- return of_register_platform_driver(&p9100_driver);
+ return platform_driver_register(&p9100_driver);
}
static void __exit p9100_exit(void)
{
- of_unregister_platform_driver(&p9100_driver);
+ platform_driver_unregister(&p9100_driver);
}
module_init(p9100_init);
diff --git a/drivers/video/platinumfb.c b/drivers/video/platinumfb.c
index a50e1977b316..ef532d9d3c99 100644
--- a/drivers/video/platinumfb.c
+++ b/drivers/video/platinumfb.c
@@ -533,8 +533,7 @@ static int __init platinumfb_setup(char *options)
#define invalidate_cache(addr)
#endif
-static int __devinit platinumfb_probe(struct platform_device* odev,
- const struct of_device_id *match)
+static int __devinit platinumfb_probe(struct platform_device* odev)
{
struct device_node *dp = odev->dev.of_node;
struct fb_info *info;
@@ -677,7 +676,7 @@ static struct of_device_id platinumfb_match[] =
{},
};
-static struct of_platform_driver platinum_driver =
+static struct platform_driver platinum_driver =
{
.driver = {
.name = "platinumfb",
@@ -697,14 +696,14 @@ static int __init platinumfb_init(void)
return -ENODEV;
platinumfb_setup(option);
#endif
- of_register_platform_driver(&platinum_driver);
+ platform_driver_register(&platinum_driver);
return 0;
}
static void __exit platinumfb_exit(void)
{
- of_unregister_platform_driver(&platinum_driver);
+ platform_driver_unregister(&platinum_driver);
}
MODULE_LICENSE("GPL");
diff --git a/drivers/video/sunxvr1000.c b/drivers/video/sunxvr1000.c
index 5dbe06af2226..b7f27acaf817 100644
--- a/drivers/video/sunxvr1000.c
+++ b/drivers/video/sunxvr1000.c
@@ -111,8 +111,7 @@ static int __devinit gfb_set_fbinfo(struct gfb_info *gp)
return 0;
}
-static int __devinit gfb_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit gfb_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
struct fb_info *info;
@@ -198,7 +197,7 @@ static const struct of_device_id gfb_match[] = {
};
MODULE_DEVICE_TABLE(of, ffb_match);
-static struct of_platform_driver gfb_driver = {
+static struct platform_driver gfb_driver = {
.probe = gfb_probe,
.remove = __devexit_p(gfb_remove),
.driver = {
@@ -213,12 +212,12 @@ static int __init gfb_init(void)
if (fb_get_options("gfb", NULL))
return -ENODEV;
- return of_register_platform_driver(&gfb_driver);
+ return platform_driver_register(&gfb_driver);
}
static void __exit gfb_exit(void)
{
- of_unregister_platform_driver(&gfb_driver);
+ platform_driver_unregister(&gfb_driver);
}
module_init(gfb_init);
diff --git a/drivers/video/tcx.c b/drivers/video/tcx.c
index 77ad27955cf0..855b71993f61 100644
--- a/drivers/video/tcx.c
+++ b/drivers/video/tcx.c
@@ -362,8 +362,7 @@ static void tcx_unmap_regs(struct platform_device *op, struct fb_info *info,
info->screen_base, info->fix.smem_len);
}
-static int __devinit tcx_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit tcx_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
struct fb_info *info;
@@ -511,7 +510,7 @@ static const struct of_device_id tcx_match[] = {
};
MODULE_DEVICE_TABLE(of, tcx_match);
-static struct of_platform_driver tcx_driver = {
+static struct platform_driver tcx_driver = {
.driver = {
.name = "tcx",
.owner = THIS_MODULE,
@@ -526,12 +525,12 @@ static int __init tcx_init(void)
if (fb_get_options("tcxfb", NULL))
return -ENODEV;
- return of_register_platform_driver(&tcx_driver);
+ return platform_driver_register(&tcx_driver);
}
static void __exit tcx_exit(void)
{
- of_unregister_platform_driver(&tcx_driver);
+ platform_driver_unregister(&tcx_driver);
}
module_init(tcx_init);
diff --git a/drivers/video/tmiofb.c b/drivers/video/tmiofb.c
index dfef88c803d4..9710bf8caeae 100644
--- a/drivers/video/tmiofb.c
+++ b/drivers/video/tmiofb.c
@@ -250,8 +250,7 @@ static irqreturn_t tmiofb_irq(int irq, void *__info)
*/
static int tmiofb_hw_stop(struct platform_device *dev)
{
- struct mfd_cell *cell = dev->dev.platform_data;
- struct tmio_fb_data *data = cell->driver_data;
+ struct tmio_fb_data *data = mfd_get_data(dev);
struct fb_info *info = platform_get_drvdata(dev);
struct tmiofb_par *par = info->par;
@@ -268,7 +267,7 @@ static int tmiofb_hw_stop(struct platform_device *dev)
*/
static int tmiofb_hw_init(struct platform_device *dev)
{
- struct mfd_cell *cell = dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
struct fb_info *info = platform_get_drvdata(dev);
struct tmiofb_par *par = info->par;
const struct resource *nlcr = &cell->resources[0];
@@ -312,8 +311,7 @@ static int tmiofb_hw_init(struct platform_device *dev)
*/
static void tmiofb_hw_mode(struct platform_device *dev)
{
- struct mfd_cell *cell = dev->dev.platform_data;
- struct tmio_fb_data *data = cell->driver_data;
+ struct tmio_fb_data *data = mfd_get_data(dev);
struct fb_info *info = platform_get_drvdata(dev);
struct fb_videomode *mode = info->mode;
struct tmiofb_par *par = info->par;
@@ -559,9 +557,8 @@ static int tmiofb_ioctl(struct fb_info *fbi,
static struct fb_videomode *
tmiofb_find_mode(struct fb_info *info, struct fb_var_screeninfo *var)
{
- struct mfd_cell *cell =
- info->device->platform_data;
- struct tmio_fb_data *data = cell->driver_data;
+ struct tmio_fb_data *data =
+ mfd_get_data(to_platform_device(info->device));
struct fb_videomode *best = NULL;
int i;
@@ -581,9 +578,8 @@ static int tmiofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct fb_videomode *mode;
- struct mfd_cell *cell =
- info->device->platform_data;
- struct tmio_fb_data *data = cell->driver_data;
+ struct tmio_fb_data *data =
+ mfd_get_data(to_platform_device(info->device));
mode = tmiofb_find_mode(info, var);
if (!mode || var->bits_per_pixel > 16)
@@ -683,8 +679,8 @@ static struct fb_ops tmiofb_ops = {
static int __devinit tmiofb_probe(struct platform_device *dev)
{
- struct mfd_cell *cell = dev->dev.platform_data;
- struct tmio_fb_data *data = cell->driver_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
+ struct tmio_fb_data *data = mfd_get_data(dev);
struct resource *ccr = platform_get_resource(dev, IORESOURCE_MEM, 1);
struct resource *lcr = platform_get_resource(dev, IORESOURCE_MEM, 0);
struct resource *vram = platform_get_resource(dev, IORESOURCE_MEM, 2);
@@ -811,7 +807,7 @@ err_ioremap_ccr:
static int __devexit tmiofb_remove(struct platform_device *dev)
{
- struct mfd_cell *cell = dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
struct fb_info *info = platform_get_drvdata(dev);
int irq = platform_get_irq(dev, 0);
struct tmiofb_par *par;
@@ -941,7 +937,7 @@ static int tmiofb_suspend(struct platform_device *dev, pm_message_t state)
#ifdef CONFIG_FB_TMIO_ACCELL
struct tmiofb_par *par = info->par;
#endif
- struct mfd_cell *cell = dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
int retval = 0;
console_lock();
@@ -973,7 +969,7 @@ static int tmiofb_suspend(struct platform_device *dev, pm_message_t state)
static int tmiofb_resume(struct platform_device *dev)
{
struct fb_info *info = platform_get_drvdata(dev);
- struct mfd_cell *cell = dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
int retval = 0;
console_lock();
diff --git a/drivers/video/xilinxfb.c b/drivers/video/xilinxfb.c
index 68bd23476c64..77dea015ff69 100644
--- a/drivers/video/xilinxfb.c
+++ b/drivers/video/xilinxfb.c
@@ -404,8 +404,7 @@ static int xilinxfb_release(struct device *dev)
* OF bus binding
*/
-static int __devinit
-xilinxfb_of_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit xilinxfb_of_probe(struct platform_device *op)
{
const u32 *prop;
u32 *p;
@@ -418,8 +417,6 @@ xilinxfb_of_probe(struct platform_device *op, const struct of_device_id *match)
/* Copy with the default pdata (not a ptr reference!) */
pdata = xilinx_fb_default_pdata;
- dev_dbg(&op->dev, "xilinxfb_of_probe(%p, %p)\n", op, match);
-
/* Allocate the driver data region */
drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
if (!drvdata) {
@@ -505,7 +502,7 @@ static struct of_device_id xilinxfb_of_match[] __devinitdata = {
};
MODULE_DEVICE_TABLE(of, xilinxfb_of_match);
-static struct of_platform_driver xilinxfb_of_driver = {
+static struct platform_driver xilinxfb_of_driver = {
.probe = xilinxfb_of_probe,
.remove = __devexit_p(xilinxfb_of_remove),
.driver = {
@@ -523,13 +520,13 @@ static struct of_platform_driver xilinxfb_of_driver = {
static int __init
xilinxfb_init(void)
{
- return of_register_platform_driver(&xilinxfb_of_driver);
+ return platform_driver_register(&xilinxfb_of_driver);
}
static void __exit
xilinxfb_cleanup(void)
{
- of_unregister_platform_driver(&xilinxfb_of_driver);
+ platform_driver_unregister(&xilinxfb_of_driver);
}
module_init(xilinxfb_init);
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index 80b3b123dd7f..7c608c5ccf84 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -60,7 +60,7 @@ config W1_MASTER_GPIO
config HDQ_MASTER_OMAP
tristate "OMAP HDQ driver"
- depends on ARCH_OMAP2430 || ARCH_OMAP3
+ depends on SOC_OMAP2430 || ARCH_OMAP3
help
Say Y here if you want support for the 1-wire or HDQ Interface
on an OMAP processor.
diff --git a/drivers/w1/masters/ds1wm.c b/drivers/w1/masters/ds1wm.c
index 6b85e7fefa43..22fc726feb9b 100644
--- a/drivers/w1/masters/ds1wm.c
+++ b/drivers/w1/masters/ds1wm.c
@@ -216,7 +216,7 @@ static int ds1wm_find_divisor(int gclk)
static void ds1wm_up(struct ds1wm_data *ds1wm_data)
{
int divisor;
- struct ds1wm_driver_data *plat = ds1wm_data->cell->driver_data;
+ struct ds1wm_driver_data *plat = mfd_get_data(ds1wm_data->pdev);
if (ds1wm_data->cell->enable)
ds1wm_data->cell->enable(ds1wm_data->pdev);
@@ -336,7 +336,7 @@ static int ds1wm_probe(struct platform_device *pdev)
if (!pdev)
return -ENODEV;
- cell = pdev->dev.platform_data;
+ cell = mfd_get_cell(pdev);
if (!cell)
return -ENODEV;
@@ -356,7 +356,7 @@ static int ds1wm_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto err0;
}
- plat = cell->driver_data;
+ plat = mfd_get_data(pdev);
/* calculate bus shift from mem resource */
ds1wm_data->bus_shift = resource_size(res) >> 3;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 31649b7b672f..b69d71482554 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -533,6 +533,16 @@ config I6300ESB_WDT
To compile this driver as a module, choose M here: the
module will be called i6300esb.
+config INTEL_SCU_WATCHDOG
+ bool "Intel SCU Watchdog for Mobile Platforms"
+ depends on WATCHDOG
+ depends on INTEL_SCU_IPC
+ ---help---
+ Hardware driver for the watchdog time built into the Intel SCU
+ for Intel Mobile Platforms.
+
+ To compile this driver as a module, choose M here.
+
config ITCO_WDT
tristate "Intel TCO Timer/Watchdog"
depends on (X86 || IA64) && PCI
@@ -580,7 +590,7 @@ config IT87_WDT
depends on X86 && EXPERIMENTAL
---help---
This is the driver for the hardware watchdog on the ITE IT8702,
- IT8712, IT8716, IT8718, IT8720, IT8726, IT8712 Super I/O chips.
+ IT8712, IT8716, IT8718, IT8720, IT8721, IT8726 Super I/O chips.
This watchdog simply watches your kernel to make sure it doesn't
freeze, and if it does, it reboots your computer after a certain
amount of time.
@@ -589,18 +599,20 @@ config IT87_WDT
be called it87_wdt.
config HP_WATCHDOG
- tristate "HP Proliant iLO2+ Hardware Watchdog Timer"
+ tristate "HP ProLiant iLO2+ Hardware Watchdog Timer"
depends on X86
+ default m
help
A software monitoring watchdog and NMI sourcing driver. This driver
will detect lockups and provide a stack trace. This is a driver that
- will only load on a HP ProLiant system with a minimum of iLO2 support.
+ will only load on an HP ProLiant system with a minimum of iLO2 support.
To compile this driver as a module, choose M here: the module will be
called hpwdt.
config HPWDT_NMI_DECODING
bool "NMI decoding support for the HP ProLiant iLO2+ Hardware Watchdog Timer"
depends on HP_WATCHDOG
+ default y
help
When an NMI occurs this feature will make the necessary BIOS calls to
log the cause of the NMI.
@@ -903,6 +915,12 @@ config INDYDOG
timer expired and no process has written to /dev/watchdog during
that time.
+config JZ4740_WDT
+ tristate "Ingenic jz4740 SoC hardware watchdog"
+ depends on MACH_JZ4740
+ help
+ Hardware driver for the built-in watchdog timer on Ingenic jz4740 SoCs.
+
config WDT_MTX1
tristate "MTX-1 Hardware Watchdog"
depends on MIPS_MTX1
@@ -1083,14 +1101,6 @@ config SH_WDT
To compile this driver as a module, choose M here: the
module will be called shwdt.
-config SH_WDT_MMAP
- bool "Allow mmap of SH WDT"
- default n
- depends on SH_WDT
- help
- If you say Y here, user applications will be able to mmap the
- WDT/CPG registers.
-
# SPARC Architecture
# SPARC64 Architecture
@@ -1119,6 +1129,16 @@ config WATCHDOG_RIO
# XTENSA Architecture
+# Xen Architecture
+
+config XEN_WDT
+ tristate "Xen Watchdog support"
+ depends on XEN
+ help
+ Say Y here to support the hypervisor watchdog capability provided
+ by Xen 4.0 and newer. The watchdog timeout period is normally one
+ minute but can be changed with a boot-time parameter.
+
#
# ISA-based Watchdog Cards
#
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 20e44c4782b3..d520bf9c3355 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -102,6 +102,7 @@ obj-$(CONFIG_W83877F_WDT) += w83877f_wdt.o
obj-$(CONFIG_W83977F_WDT) += w83977f_wdt.o
obj-$(CONFIG_MACHZ_WDT) += machzwd.o
obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o
+obj-$(CONFIG_INTEL_SCU_WATCHDOG) += intel_scu_watchdog.o
# M32R Architecture
@@ -114,6 +115,7 @@ obj-$(CONFIG_BCM47XX_WDT) += bcm47xx_wdt.o
obj-$(CONFIG_BCM63XX_WDT) += bcm63xx_wdt.o
obj-$(CONFIG_RC32434_WDT) += rc32434_wdt.o
obj-$(CONFIG_INDYDOG) += indydog.o
+obj-$(CONFIG_JZ4740_WDT) += jz4740_wdt.o
obj-$(CONFIG_WDT_MTX1) += mtx-1_wdt.o
obj-$(CONFIG_PNX833X_WDT) += pnx833x_wdt.o
obj-$(CONFIG_SIBYTE_WDOG) += sb_wdog.o
@@ -148,6 +150,9 @@ obj-$(CONFIG_WATCHDOG_CP1XXX) += cpwd.o
# XTENSA Architecture
+# Xen
+obj-$(CONFIG_XEN_WDT) += xen_wdt.o
+
# Architecture Independant
obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o
obj-$(CONFIG_WM8350_WATCHDOG) += wm8350_wdt.o
diff --git a/drivers/watchdog/alim1535_wdt.c b/drivers/watchdog/alim1535_wdt.c
index fa4d36033552..f16dcbd475fb 100644
--- a/drivers/watchdog/alim1535_wdt.c
+++ b/drivers/watchdog/alim1535_wdt.c
@@ -301,7 +301,7 @@ static int ali_notify_sys(struct notifier_block *this,
* want to register another driver on the same PCI id.
*/
-static struct pci_device_id ali_pci_tbl[] __used = {
+static DEFINE_PCI_DEVICE_TABLE(ali_pci_tbl) __used = {
{ PCI_VENDOR_ID_AL, 0x1533, PCI_ANY_ID, PCI_ANY_ID,},
{ PCI_VENDOR_ID_AL, 0x1535, PCI_ANY_ID, PCI_ANY_ID,},
{ 0, },
@@ -362,12 +362,12 @@ static int __init ali_find_watchdog(void)
*/
static const struct file_operations ali_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
.write = ali_write,
.unlocked_ioctl = ali_ioctl,
- .open = ali_open,
- .release = ali_release,
+ .open = ali_open,
+ .release = ali_release,
};
static struct miscdevice ali_miscdev = {
diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c
index 4b7a2b4138ed..46f4b85b46de 100644
--- a/drivers/watchdog/alim7101_wdt.c
+++ b/drivers/watchdog/alim7101_wdt.c
@@ -430,7 +430,7 @@ err_out:
module_init(alim7101_wdt_init);
module_exit(alim7101_wdt_unload);
-static struct pci_device_id alim7101_pci_tbl[] __devinitdata __used = {
+static DEFINE_PCI_DEVICE_TABLE(alim7101_pci_tbl) __used = {
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533) },
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) },
{ }
diff --git a/drivers/watchdog/bcm47xx_wdt.c b/drivers/watchdog/bcm47xx_wdt.c
index 5f245522397b..bd44417c84d4 100644
--- a/drivers/watchdog/bcm47xx_wdt.c
+++ b/drivers/watchdog/bcm47xx_wdt.c
@@ -150,8 +150,8 @@ static ssize_t bcm47xx_wdt_write(struct file *file, const char __user *data,
}
static const struct watchdog_info bcm47xx_wdt_info = {
- .identity = DRV_NAME,
- .options = WDIOF_SETTIMEOUT |
+ .identity = DRV_NAME,
+ .options = WDIOF_SETTIMEOUT |
WDIOF_KEEPALIVEPING |
WDIOF_MAGICCLOSE,
};
diff --git a/drivers/watchdog/bfin_wdt.c b/drivers/watchdog/bfin_wdt.c
index 9042a95fc98c..b9fa9b71583a 100644
--- a/drivers/watchdog/bfin_wdt.c
+++ b/drivers/watchdog/bfin_wdt.c
@@ -63,7 +63,7 @@ static DEFINE_SPINLOCK(bfin_wdt_spinlock);
/**
* bfin_wdt_keepalive - Keep the Userspace Watchdog Alive
*
- * The Userspace watchdog got a KeepAlive: schedule the next timeout.
+ * The Userspace watchdog got a KeepAlive: schedule the next timeout.
*/
static int bfin_wdt_keepalive(void)
{
@@ -337,7 +337,7 @@ static int bfin_wdt_resume(struct platform_device *pdev)
static const struct file_operations bfin_wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
- .write = bfin_wdt_write,
+ .write = bfin_wdt_write,
.unlocked_ioctl = bfin_wdt_ioctl,
.open = bfin_wdt_open,
.release = bfin_wdt_release,
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index 7e7ec9c35b6a..337265b47305 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -4,7 +4,7 @@
* Author: Matthew McClintock
* Maintainer: Kumar Gala <galak@kernel.crashing.org>
*
- * Copyright 2005, 2008, 2010 Freescale Semiconductor Inc.
+ * Copyright 2005, 2008, 2010-2011 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -221,9 +221,8 @@ static int booke_wdt_open(struct inode *inode, struct file *file)
if (booke_wdt_enabled == 0) {
booke_wdt_enabled = 1;
on_each_cpu(__booke_wdt_enable, NULL, 0);
- printk(KERN_INFO
- "PowerPC Book-E Watchdog Timer Enabled (wdt_period=%d)\n",
- booke_wdt_period);
+ pr_debug("booke_wdt: watchdog enabled (timeout = %llu sec)\n",
+ period_to_sec(booke_wdt_period));
}
spin_unlock(&booke_wdt_lock);
@@ -240,6 +239,7 @@ static int booke_wdt_release(struct inode *inode, struct file *file)
*/
on_each_cpu(__booke_wdt_disable, NULL, 0);
booke_wdt_enabled = 0;
+ pr_debug("booke_wdt: watchdog disabled\n");
#endif
clear_bit(0, &wdt_is_active);
@@ -271,21 +271,20 @@ static int __init booke_wdt_init(void)
{
int ret = 0;
- printk(KERN_INFO "PowerPC Book-E Watchdog Timer Loaded\n");
+ pr_info("booke_wdt: powerpc book-e watchdog driver loaded\n");
ident.firmware_version = cur_cpu_spec->pvr_value;
ret = misc_register(&booke_wdt_miscdev);
if (ret) {
- printk(KERN_CRIT "Cannot register miscdev on minor=%d: %d\n",
- WATCHDOG_MINOR, ret);
+ pr_err("booke_wdt: cannot register device (minor=%u, ret=%i)\n",
+ WATCHDOG_MINOR, ret);
return ret;
}
spin_lock(&booke_wdt_lock);
if (booke_wdt_enabled == 1) {
- printk(KERN_INFO
- "PowerPC Book-E Watchdog Timer Enabled (wdt_period=%d)\n",
- booke_wdt_period);
+ pr_info("booke_wdt: watchdog enabled (timeout = %llu sec)\n",
+ period_to_sec(booke_wdt_period));
on_each_cpu(__booke_wdt_enable, NULL, 0);
}
spin_unlock(&booke_wdt_lock);
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c
index eca855a55c0d..1e013e8457b7 100644
--- a/drivers/watchdog/cpwd.c
+++ b/drivers/watchdog/cpwd.c
@@ -5,10 +5,10 @@
* interface and Solaris-compatible ioctls as best it is
* able.
*
- * NOTE: CP1400 systems appear to have a defective intr_mask
- * register on the PLD, preventing the disabling of
- * timer interrupts. We use a timer to periodically
- * reset 'stopped' watchdogs on affected platforms.
+ * NOTE: CP1400 systems appear to have a defective intr_mask
+ * register on the PLD, preventing the disabling of
+ * timer interrupts. We use a timer to periodically
+ * reset 'stopped' watchdogs on affected platforms.
*
* Copyright (c) 2000 Eric Brower (ebrower@usa.net)
* Copyright (C) 2008 David S. Miller <davem@davemloft.net>
@@ -107,13 +107,13 @@ static struct cpwd *cpwd_device;
* -------------------
* |- counter val -|
* -------------------
- * dcntr - Current 16-bit downcounter value.
- * When downcounter reaches '0' watchdog expires.
- * Reading this register resets downcounter with
- * 'limit' value.
- * limit - 16-bit countdown value in 1/10th second increments.
- * Writing this register begins countdown with input value.
- * Reading from this register does not affect counter.
+ * dcntr - Current 16-bit downcounter value.
+ * When downcounter reaches '0' watchdog expires.
+ * Reading this register resets downcounter with
+ * 'limit' value.
+ * limit - 16-bit countdown value in 1/10th second increments.
+ * Writing this register begins countdown with input value.
+ * Reading from this register does not affect counter.
* NOTES: After watchdog reset, dcntr and limit contain '1'
*
* status register (byte access):
@@ -123,7 +123,7 @@ static struct cpwd *cpwd_device;
* |- UNUSED -| EXP | RUN |
* ---------------------------
* status- Bit 0 - Watchdog is running
- * Bit 1 - Watchdog has expired
+ * Bit 1 - Watchdog has expired
*
*** PLD register block definition (struct wd_pld_regblk)
*
@@ -197,7 +197,7 @@ static u8 cpwd_readb(void __iomem *addr)
* Because of the CP1400 defect this should only be
* called during initialzation or by wd_[start|stop]timer()
*
- * index - sub-device index, or -1 for 'all'
+ * index - sub-device index, or -1 for 'all'
* enable - non-zero to enable interrupts, zero to disable
*/
static void cpwd_toggleintr(struct cpwd *p, int index, int enable)
@@ -317,13 +317,13 @@ static int cpwd_getstatus(struct cpwd *p, int index)
} else {
/* Fudge WD_EXPIRED status for defective CP1400--
* IF timer is running
- * AND brokenstop is set
- * AND an interrupt has been serviced
+ * AND brokenstop is set
+ * AND an interrupt has been serviced
* we are WD_EXPIRED.
*
* IF timer is running
- * AND brokenstop is set
- * AND no interrupt has been serviced
+ * AND brokenstop is set
+ * AND no interrupt has been serviced
* we are WD_FREERUN.
*/
if (p->broken &&
@@ -528,8 +528,7 @@ static const struct file_operations cpwd_fops = {
.llseek = no_llseek,
};
-static int __devinit cpwd_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit cpwd_probe(struct platform_device *op)
{
struct device_node *options;
const char *str_prop;
@@ -614,7 +613,7 @@ static int __devinit cpwd_probe(struct platform_device *op,
if (p->broken) {
init_timer(&cpwd_timer);
- cpwd_timer.function = cpwd_brokentimer;
+ cpwd_timer.function = cpwd_brokentimer;
cpwd_timer.data = (unsigned long) p;
cpwd_timer.expires = WD_BTIMEOUT;
@@ -646,7 +645,7 @@ static int __devexit cpwd_remove(struct platform_device *op)
struct cpwd *p = dev_get_drvdata(&op->dev);
int i;
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < WD_NUMDEVS; i++) {
misc_deregister(&p->devs[i].misc);
if (!p->enabled) {
@@ -678,7 +677,7 @@ static const struct of_device_id cpwd_match[] = {
};
MODULE_DEVICE_TABLE(of, cpwd_match);
-static struct of_platform_driver cpwd_driver = {
+static struct platform_driver cpwd_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
@@ -690,12 +689,12 @@ static struct of_platform_driver cpwd_driver = {
static int __init cpwd_init(void)
{
- return of_register_platform_driver(&cpwd_driver);
+ return platform_driver_register(&cpwd_driver);
}
static void __exit cpwd_exit(void)
{
- of_unregister_platform_driver(&cpwd_driver);
+ platform_driver_unregister(&cpwd_driver);
}
module_init(cpwd_init);
diff --git a/drivers/watchdog/eurotechwdt.c b/drivers/watchdog/eurotechwdt.c
index 3f3dc093ad68..f1d1da662fbe 100644
--- a/drivers/watchdog/eurotechwdt.c
+++ b/drivers/watchdog/eurotechwdt.c
@@ -201,7 +201,7 @@ static void eurwdt_ping(void)
static ssize_t eurwdt_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- if (count) {
+ if (count) {
if (!nowayout) {
size_t i;
diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c
index f6bd6f10fcec..29a7cd4b90c8 100644
--- a/drivers/watchdog/gef_wdt.c
+++ b/drivers/watchdog/gef_wdt.c
@@ -261,8 +261,7 @@ static struct miscdevice gef_wdt_miscdev = {
};
-static int __devinit gef_wdt_probe(struct platform_device *dev,
- const struct of_device_id *match)
+static int __devinit gef_wdt_probe(struct platform_device *dev)
{
int timeout = 10;
u32 freq;
@@ -303,7 +302,7 @@ static const struct of_device_id gef_wdt_ids[] = {
{},
};
-static struct of_platform_driver gef_wdt_driver = {
+static struct platform_driver gef_wdt_driver = {
.driver = {
.name = "gef_wdt",
.owner = THIS_MODULE,
@@ -315,12 +314,12 @@ static struct of_platform_driver gef_wdt_driver = {
static int __init gef_wdt_init(void)
{
printk(KERN_INFO "GE watchdog driver\n");
- return of_register_platform_driver(&gef_wdt_driver);
+ return platform_driver_register(&gef_wdt_driver);
}
static void __exit gef_wdt_exit(void)
{
- of_unregister_platform_driver(&gef_wdt_driver);
+ platform_driver_unregister(&gef_wdt_driver);
}
module_init(gef_wdt_init);
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 24b966d5061a..d54d4bab7fad 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -52,7 +52,7 @@ static void __iomem *pci_mem_addr; /* the PCI-memory address */
static unsigned long __iomem *hpwdt_timer_reg;
static unsigned long __iomem *hpwdt_timer_con;
-static struct pci_device_id hpwdt_devices[] = {
+static DEFINE_PCI_DEVICE_TABLE(hpwdt_devices) = {
{ PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB203) }, /* iLO2 */
{ PCI_DEVICE(PCI_VENDOR_ID_HP, 0x3306) }, /* iLO3 */
{0}, /* terminate list */
diff --git a/drivers/watchdog/i6300esb.c b/drivers/watchdog/i6300esb.c
index bb9750a03942..db45091ef434 100644
--- a/drivers/watchdog/i6300esb.c
+++ b/drivers/watchdog/i6300esb.c
@@ -334,7 +334,7 @@ static struct miscdevice esb_miscdev = {
/*
* Data for PCI driver interface
*/
-static struct pci_device_id esb_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(esb_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_9), },
{ 0, }, /* End of list */
};
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 2c6c2b4ad8bf..35a0d12dad73 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -247,7 +247,7 @@ static struct {
{NULL, 0}
};
-#define ITCO_PCI_DEVICE(dev, data) \
+#define ITCO_PCI_DEVICE(dev, data) \
.vendor = PCI_VENDOR_ID_INTEL, \
.device = dev, \
.subvendor = PCI_ANY_ID, \
@@ -262,7 +262,7 @@ static struct {
* pci_driver, because the I/O Controller Hub has also other
* functions that probably will be registered by other drivers.
*/
-static struct pci_device_id iTCO_wdt_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(iTCO_wdt_pci_tbl) = {
{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801AA_0, TCO_ICH)},
{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801AB_0, TCO_ICH0)},
{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801BA_0, TCO_ICH2)},
diff --git a/drivers/watchdog/intel_scu_watchdog.c b/drivers/watchdog/intel_scu_watchdog.c
new file mode 100644
index 000000000000..919bdd16136f
--- /dev/null
+++ b/drivers/watchdog/intel_scu_watchdog.c
@@ -0,0 +1,572 @@
+/*
+ * Intel_SCU 0.2: An Intel SCU IOH Based Watchdog Device
+ * for Intel part #(s):
+ * - AF82MP20 PCH
+ *
+ * Copyright (C) 2009-2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General
+ * Public License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the Free
+ * Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ * The full GNU General Public License is included in this
+ * distribution in the file called COPYING.
+ *
+ */
+
+#include <linux/compiler.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/fs.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/sfi.h>
+#include <linux/types.h>
+#include <asm/irq.h>
+#include <asm/atomic.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/apb_timer.h>
+#include <asm/mrst.h>
+
+#include "intel_scu_watchdog.h"
+
+/* Bounds number of times we will retry loading time count */
+/* This retry is a work around for a silicon bug. */
+#define MAX_RETRY 16
+
+#define IPC_SET_WATCHDOG_TIMER 0xF8
+
+static int timer_margin = DEFAULT_SOFT_TO_HARD_MARGIN;
+module_param(timer_margin, int, 0);
+MODULE_PARM_DESC(timer_margin,
+ "Watchdog timer margin"
+ "Time between interrupt and resetting the system"
+ "The range is from 1 to 160"
+ "This is the time for all keep alives to arrive");
+
+static int timer_set = DEFAULT_TIME;
+module_param(timer_set, int, 0);
+MODULE_PARM_DESC(timer_set,
+ "Default Watchdog timer setting"
+ "Complete cycle time"
+ "The range is from 1 to 170"
+ "This is the time for all keep alives to arrive");
+
+/* After watchdog device is closed, check force_boot. If:
+ * force_boot == 0, then force boot on next watchdog interrupt after close,
+ * force_boot == 1, then force boot immediately when device is closed.
+ */
+static int force_boot;
+module_param(force_boot, int, 0);
+MODULE_PARM_DESC(force_boot,
+ "A value of 1 means that the driver will reboot"
+ "the system immediately if the /dev/watchdog device is closed"
+ "A value of 0 means that when /dev/watchdog device is closed"
+ "the watchdog timer will be refreshed for one more interval"
+ "of length: timer_set. At the end of this interval, the"
+ "watchdog timer will reset the system."
+ );
+
+/* there is only one device in the system now; this can be made into
+ * an array in the future if we have more than one device */
+
+static struct intel_scu_watchdog_dev watchdog_device;
+
+/* Forces restart, if force_reboot is set */
+static void watchdog_fire(void)
+{
+ if (force_boot) {
+ printk(KERN_CRIT PFX "Initiating system reboot.\n");
+ emergency_restart();
+ printk(KERN_CRIT PFX "Reboot didn't ?????\n");
+ }
+
+ else {
+ printk(KERN_CRIT PFX "Immediate Reboot Disabled\n");
+ printk(KERN_CRIT PFX
+ "System will reset when watchdog timer times out!\n");
+ }
+}
+
+static int check_timer_margin(int new_margin)
+{
+ if ((new_margin < MIN_TIME_CYCLE) ||
+ (new_margin > MAX_TIME - timer_set)) {
+ pr_debug("Watchdog timer: value of new_margin %d is out of the range %d to %d\n",
+ new_margin, MIN_TIME_CYCLE, MAX_TIME - timer_set);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * IPC operations
+ */
+static int watchdog_set_ipc(int soft_threshold, int threshold)
+{
+ u32 *ipc_wbuf;
+ u8 cbuf[16] = { '\0' };
+ int ipc_ret = 0;
+
+ ipc_wbuf = (u32 *)&cbuf;
+ ipc_wbuf[0] = soft_threshold;
+ ipc_wbuf[1] = threshold;
+
+ ipc_ret = intel_scu_ipc_command(
+ IPC_SET_WATCHDOG_TIMER,
+ 0,
+ ipc_wbuf,
+ 2,
+ NULL,
+ 0);
+
+ if (ipc_ret != 0)
+ pr_err("Error setting SCU watchdog timer: %x\n", ipc_ret);
+
+ return ipc_ret;
+};
+
+/*
+ * Intel_SCU operations
+ */
+
+/* timer interrupt handler */
+static irqreturn_t watchdog_timer_interrupt(int irq, void *dev_id)
+{
+ int int_status;
+ int_status = ioread32(watchdog_device.timer_interrupt_status_addr);
+
+ pr_debug("Watchdog timer: irq, int_status: %x\n", int_status);
+
+ if (int_status != 0)
+ return IRQ_NONE;
+
+ /* has the timer been started? If not, then this is spurious */
+ if (watchdog_device.timer_started == 0) {
+ pr_debug("Watchdog timer: spurious interrupt received\n");
+ return IRQ_HANDLED;
+ }
+
+ /* temporarily disable the timer */
+ iowrite32(0x00000002, watchdog_device.timer_control_addr);
+
+ /* set the timer to the threshold */
+ iowrite32(watchdog_device.threshold,
+ watchdog_device.timer_load_count_addr);
+
+ /* allow the timer to run */
+ iowrite32(0x00000003, watchdog_device.timer_control_addr);
+
+ return IRQ_HANDLED;
+}
+
+static int intel_scu_keepalive(void)
+{
+
+ /* read eoi register - clears interrupt */
+ ioread32(watchdog_device.timer_clear_interrupt_addr);
+
+ /* temporarily disable the timer */
+ iowrite32(0x00000002, watchdog_device.timer_control_addr);
+
+ /* set the timer to the soft_threshold */
+ iowrite32(watchdog_device.soft_threshold,
+ watchdog_device.timer_load_count_addr);
+
+ /* allow the timer to run */
+ iowrite32(0x00000003, watchdog_device.timer_control_addr);
+
+ return 0;
+}
+
+static int intel_scu_stop(void)
+{
+ iowrite32(0, watchdog_device.timer_control_addr);
+ return 0;
+}
+
+static int intel_scu_set_heartbeat(u32 t)
+{
+ int ipc_ret;
+ int retry_count;
+ u32 soft_value;
+ u32 hw_pre_value;
+ u32 hw_value;
+
+ watchdog_device.timer_set = t;
+ watchdog_device.threshold =
+ timer_margin * watchdog_device.timer_tbl_ptr->freq_hz;
+ watchdog_device.soft_threshold =
+ (watchdog_device.timer_set - timer_margin)
+ * watchdog_device.timer_tbl_ptr->freq_hz;
+
+ pr_debug("Watchdog timer: set_heartbeat: timer freq is %d\n",
+ watchdog_device.timer_tbl_ptr->freq_hz);
+ pr_debug("Watchdog timer: set_heartbeat: timer_set is %x (hex)\n",
+ watchdog_device.timer_set);
+ pr_debug("Watchdog timer: set_hearbeat: timer_margin is %x (hex)\n",
+ timer_margin);
+ pr_debug("Watchdog timer: set_heartbeat: threshold is %x (hex)\n",
+ watchdog_device.threshold);
+ pr_debug("Watchdog timer: set_heartbeat: soft_threshold is %x (hex)\n",
+ watchdog_device.soft_threshold);
+
+ /* Adjust thresholds by FREQ_ADJUSTMENT factor, to make the */
+ /* watchdog timing come out right. */
+ watchdog_device.threshold =
+ watchdog_device.threshold / FREQ_ADJUSTMENT;
+ watchdog_device.soft_threshold =
+ watchdog_device.soft_threshold / FREQ_ADJUSTMENT;
+
+ /* temporarily disable the timer */
+ iowrite32(0x00000002, watchdog_device.timer_control_addr);
+
+ /* send the threshold and soft_threshold via IPC to the processor */
+ ipc_ret = watchdog_set_ipc(watchdog_device.soft_threshold,
+ watchdog_device.threshold);
+
+ if (ipc_ret != 0) {
+ /* Make sure the watchdog timer is stopped */
+ intel_scu_stop();
+ return ipc_ret;
+ }
+
+ /* Soft Threshold set loop. Early versions of silicon did */
+ /* not always set this count correctly. This loop checks */
+ /* the value and retries if it was not set correctly. */
+
+ retry_count = 0;
+ soft_value = watchdog_device.soft_threshold & 0xFFFF0000;
+ do {
+
+ /* Make sure timer is stopped */
+ intel_scu_stop();
+
+ if (MAX_RETRY < retry_count++) {
+ /* Unable to set timer value */
+ pr_err("Watchdog timer: Unable to set timer\n");
+ return -ENODEV;
+ }
+
+ /* set the timer to the soft threshold */
+ iowrite32(watchdog_device.soft_threshold,
+ watchdog_device.timer_load_count_addr);
+
+ /* read count value before starting timer */
+ hw_pre_value = ioread32(watchdog_device.timer_load_count_addr);
+ hw_pre_value = hw_pre_value & 0xFFFF0000;
+
+ /* Start the timer */
+ iowrite32(0x00000003, watchdog_device.timer_control_addr);
+
+ /* read the value the time loaded into its count reg */
+ hw_value = ioread32(watchdog_device.timer_load_count_addr);
+ hw_value = hw_value & 0xFFFF0000;
+
+
+ } while (soft_value != hw_value);
+
+ watchdog_device.timer_started = 1;
+
+ return 0;
+}
+
+/*
+ * /dev/watchdog handling
+ */
+
+static int intel_scu_open(struct inode *inode, struct file *file)
+{
+
+ /* Set flag to indicate that watchdog device is open */
+ if (test_and_set_bit(0, &watchdog_device.driver_open))
+ return -EBUSY;
+
+ /* Check for reopen of driver. Reopens are not allowed */
+ if (watchdog_device.driver_closed)
+ return -EPERM;
+
+ return nonseekable_open(inode, file);
+}
+
+static int intel_scu_release(struct inode *inode, struct file *file)
+{
+ /*
+ * This watchdog should not be closed, after the timer
+ * is started with the WDIPC_SETTIMEOUT ioctl
+ * If force_boot is set watchdog_fire() will cause an
+ * immediate reset. If force_boot is not set, the watchdog
+ * timer is refreshed for one more interval. At the end
+ * of that interval, the watchdog timer will reset the system.
+ */
+
+ if (!test_and_clear_bit(0, &watchdog_device.driver_open)) {
+ pr_debug("Watchdog timer: intel_scu_release, without open\n");
+ return -ENOTTY;
+ }
+
+ if (!watchdog_device.timer_started) {
+ /* Just close, since timer has not been started */
+ pr_debug("Watchdog timer: closed, without starting timer\n");
+ return 0;
+ }
+
+ printk(KERN_CRIT PFX
+ "Unexpected close of /dev/watchdog!\n");
+
+ /* Since the timer was started, prevent future reopens */
+ watchdog_device.driver_closed = 1;
+
+ /* Refresh the timer for one more interval */
+ intel_scu_keepalive();
+
+ /* Reboot system (if force_boot is set) */
+ watchdog_fire();
+
+ /* We should only reach this point if force_boot is not set */
+ return 0;
+}
+
+static ssize_t intel_scu_write(struct file *file,
+ char const *data,
+ size_t len,
+ loff_t *ppos)
+{
+
+ if (watchdog_device.timer_started)
+ /* Watchdog already started, keep it alive */
+ intel_scu_keepalive();
+ else
+ /* Start watchdog with timer value set by init */
+ intel_scu_set_heartbeat(watchdog_device.timer_set);
+
+ return len;
+}
+
+static long intel_scu_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ u32 __user *p = argp;
+ u32 new_margin;
+
+
+ static const struct watchdog_info ident = {
+ .options = WDIOF_SETTIMEOUT
+ | WDIOF_KEEPALIVEPING,
+ .firmware_version = 0, /* @todo Get from SCU via
+ ipc_get_scu_fw_version()? */
+ .identity = "Intel_SCU IOH Watchdog" /* len < 32 */
+ };
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user(argp,
+ &ident,
+ sizeof(ident)) ? -EFAULT : 0;
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, p);
+ case WDIOC_KEEPALIVE:
+ intel_scu_keepalive();
+
+ return 0;
+ case WDIOC_SETTIMEOUT:
+ if (get_user(new_margin, p))
+ return -EFAULT;
+
+ if (check_timer_margin(new_margin))
+ return -EINVAL;
+
+ if (intel_scu_set_heartbeat(new_margin))
+ return -EINVAL;
+ return 0;
+ case WDIOC_GETTIMEOUT:
+ return put_user(watchdog_device.soft_threshold, p);
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+/*
+ * Notifier for system down
+ */
+static int intel_scu_notify_sys(struct notifier_block *this,
+ unsigned long code,
+ void *another_unused)
+{
+ if (code == SYS_DOWN || code == SYS_HALT)
+ /* Turn off the watchdog timer. */
+ intel_scu_stop();
+ return NOTIFY_DONE;
+}
+
+/*
+ * Kernel Interfaces
+ */
+static const struct file_operations intel_scu_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = intel_scu_write,
+ .unlocked_ioctl = intel_scu_ioctl,
+ .open = intel_scu_open,
+ .release = intel_scu_release,
+};
+
+static int __init intel_scu_watchdog_init(void)
+{
+ int ret;
+ u32 __iomem *tmp_addr;
+
+ /*
+ * We don't really need to check this as the SFI timer get will fail
+ * but if we do so we can exit with a clearer reason and no noise.
+ *
+ * If it isn't an intel MID device then it doesn't have this watchdog
+ */
+ if (!mrst_identify_cpu())
+ return -ENODEV;
+
+ /* Check boot parameters to verify that their initial values */
+ /* are in range. */
+ /* Check value of timer_set boot parameter */
+ if ((timer_set < MIN_TIME_CYCLE) ||
+ (timer_set > MAX_TIME - MIN_TIME_CYCLE)) {
+ pr_err("Watchdog timer: value of timer_set %x (hex) "
+ "is out of range from %x to %x (hex)\n",
+ timer_set, MIN_TIME_CYCLE, MAX_TIME - MIN_TIME_CYCLE);
+ return -EINVAL;
+ }
+
+ /* Check value of timer_margin boot parameter */
+ if (check_timer_margin(timer_margin))
+ return -EINVAL;
+
+ watchdog_device.timer_tbl_ptr = sfi_get_mtmr(sfi_mtimer_num-1);
+
+ if (watchdog_device.timer_tbl_ptr == NULL) {
+ pr_debug("Watchdog timer - Intel SCU watchdog: timer is not available\n");
+ return -ENODEV;
+ }
+ /* make sure the timer exists */
+ if (watchdog_device.timer_tbl_ptr->phys_addr == 0) {
+ pr_debug("Watchdog timer - Intel SCU watchdog - timer %d does not have valid physical memory\n",
+ sfi_mtimer_num);
+ return -ENODEV;
+ }
+
+ if (watchdog_device.timer_tbl_ptr->irq == 0) {
+ pr_debug("Watchdog timer: timer %d invalid irq\n",
+ sfi_mtimer_num);
+ return -ENODEV;
+ }
+
+ tmp_addr = ioremap_nocache(watchdog_device.timer_tbl_ptr->phys_addr,
+ 20);
+
+ if (tmp_addr == NULL) {
+ pr_debug("Watchdog timer: timer unable to ioremap\n");
+ return -ENOMEM;
+ }
+
+ watchdog_device.timer_load_count_addr = tmp_addr++;
+ watchdog_device.timer_current_value_addr = tmp_addr++;
+ watchdog_device.timer_control_addr = tmp_addr++;
+ watchdog_device.timer_clear_interrupt_addr = tmp_addr++;
+ watchdog_device.timer_interrupt_status_addr = tmp_addr++;
+
+ /* Set the default time values in device structure */
+
+ watchdog_device.timer_set = timer_set;
+ watchdog_device.threshold =
+ timer_margin * watchdog_device.timer_tbl_ptr->freq_hz;
+ watchdog_device.soft_threshold =
+ (watchdog_device.timer_set - timer_margin)
+ * watchdog_device.timer_tbl_ptr->freq_hz;
+
+
+ watchdog_device.intel_scu_notifier.notifier_call =
+ intel_scu_notify_sys;
+
+ ret = register_reboot_notifier(&watchdog_device.intel_scu_notifier);
+ if (ret) {
+ pr_err("Watchdog timer: cannot register notifier %d)\n", ret);
+ goto register_reboot_error;
+ }
+
+ watchdog_device.miscdev.minor = WATCHDOG_MINOR;
+ watchdog_device.miscdev.name = "watchdog";
+ watchdog_device.miscdev.fops = &intel_scu_fops;
+
+ ret = misc_register(&watchdog_device.miscdev);
+ if (ret) {
+ pr_err("Watchdog timer: cannot register miscdev %d err =%d\n",
+ WATCHDOG_MINOR, ret);
+ goto misc_register_error;
+ }
+
+ ret = request_irq((unsigned int)watchdog_device.timer_tbl_ptr->irq,
+ watchdog_timer_interrupt,
+ IRQF_SHARED, "watchdog",
+ &watchdog_device.timer_load_count_addr);
+ if (ret) {
+ pr_err("Watchdog timer: error requesting irq %d\n", ret);
+ goto request_irq_error;
+ }
+ /* Make sure timer is disabled before returning */
+ intel_scu_stop();
+ return 0;
+
+/* error cleanup */
+
+request_irq_error:
+ misc_deregister(&watchdog_device.miscdev);
+misc_register_error:
+ unregister_reboot_notifier(&watchdog_device.intel_scu_notifier);
+register_reboot_error:
+ intel_scu_stop();
+ iounmap(watchdog_device.timer_load_count_addr);
+ return ret;
+}
+
+static void __exit intel_scu_watchdog_exit(void)
+{
+
+ misc_deregister(&watchdog_device.miscdev);
+ unregister_reboot_notifier(&watchdog_device.intel_scu_notifier);
+ /* disable the timer */
+ iowrite32(0x00000002, watchdog_device.timer_control_addr);
+ iounmap(watchdog_device.timer_load_count_addr);
+}
+
+late_initcall(intel_scu_watchdog_init);
+module_exit(intel_scu_watchdog_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel SCU Watchdog Device Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+MODULE_VERSION(WDT_VER);
diff --git a/drivers/watchdog/intel_scu_watchdog.h b/drivers/watchdog/intel_scu_watchdog.h
new file mode 100644
index 000000000000..d2b074a82db6
--- /dev/null
+++ b/drivers/watchdog/intel_scu_watchdog.h
@@ -0,0 +1,66 @@
+/*
+ * Intel_SCU 0.2: An Intel SCU IOH Based Watchdog Device
+ * for Intel part #(s):
+ * - AF82MP20 PCH
+ *
+ * Copyright (C) 2009-2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General
+ * Public License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the Free
+ * Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ * The full GNU General Public License is included in this
+ * distribution in the file called COPYING.
+ *
+ */
+
+#ifndef __INTEL_SCU_WATCHDOG_H
+#define __INTEL_SCU_WATCHDOG_H
+
+#define PFX "Intel_SCU: "
+#define WDT_VER "0.3"
+
+/* minimum time between interrupts */
+#define MIN_TIME_CYCLE 1
+
+/* Time from warning to reboot is 2 seconds */
+#define DEFAULT_SOFT_TO_HARD_MARGIN 2
+
+#define MAX_TIME 170
+
+#define DEFAULT_TIME 5
+
+#define MAX_SOFT_TO_HARD_MARGIN (MAX_TIME-MIN_TIME_CYCLE)
+
+/* Ajustment to clock tick frequency to make timing come out right */
+#define FREQ_ADJUSTMENT 8
+
+struct intel_scu_watchdog_dev {
+ ulong driver_open;
+ ulong driver_closed;
+ u32 timer_started;
+ u32 timer_set;
+ u32 threshold;
+ u32 soft_threshold;
+ u32 __iomem *timer_load_count_addr;
+ u32 __iomem *timer_current_value_addr;
+ u32 __iomem *timer_control_addr;
+ u32 __iomem *timer_clear_interrupt_addr;
+ u32 __iomem *timer_interrupt_status_addr;
+ struct sfi_timer_table_entry *timer_tbl_ptr;
+ struct notifier_block intel_scu_notifier;
+ struct miscdevice miscdev;
+};
+
+extern int sfi_mtimer_num;
+
+/* extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint); */
+#endif /* __INTEL_SCU_WATCHDOG_H */
diff --git a/drivers/watchdog/it8712f_wdt.c b/drivers/watchdog/it8712f_wdt.c
index b32c6c045b1a..6143f52ba6b8 100644
--- a/drivers/watchdog/it8712f_wdt.c
+++ b/drivers/watchdog/it8712f_wdt.c
@@ -69,7 +69,7 @@ static unsigned short address;
#define IT8712F_DEVID 0x8712
#define LDN_GPIO 0x07 /* GPIO and Watch Dog Timer */
-#define LDN_GAME 0x09 /* Game Port */
+#define LDN_GAME 0x09 /* Game Port */
#define WDT_CONTROL 0x71 /* WDT Register: Control */
#define WDT_CONFIG 0x72 /* WDT Register: Configuration */
diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c
index dad29245a6a7..b1bc72f9a209 100644
--- a/drivers/watchdog/it87_wdt.c
+++ b/drivers/watchdog/it87_wdt.c
@@ -12,7 +12,7 @@
* http://www.ite.com.tw/
*
* Support of the watchdog timers, which are available on
- * IT8702, IT8712, IT8716, IT8718, IT8720 and IT8726.
+ * IT8702, IT8712, IT8716, IT8718, IT8720, IT8721 and IT8726.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -45,7 +45,7 @@
#include <asm/system.h>
-#define WATCHDOG_VERSION "1.13"
+#define WATCHDOG_VERSION "1.14"
#define WATCHDOG_NAME "IT87 WDT"
#define PFX WATCHDOG_NAME ": "
#define DRIVER_VERSION WATCHDOG_NAME " driver, v" WATCHDOG_VERSION "\n"
@@ -54,7 +54,7 @@
/* Defaults for Module Parameter */
#define DEFAULT_NOGAMEPORT 0
#define DEFAULT_EXCLUSIVE 1
-#define DEFAULT_TIMEOUT 60
+#define DEFAULT_TIMEOUT 60
#define DEFAULT_TESTMODE 0
#define DEFAULT_NOWAYOUT WATCHDOG_NOWAYOUT
@@ -70,9 +70,9 @@
/* Configuration Registers and Functions */
#define LDNREG 0x07
#define CHIPID 0x20
-#define CHIPREV 0x22
+#define CHIPREV 0x22
#define ACTREG 0x30
-#define BASEREG 0x60
+#define BASEREG 0x60
/* Chip Id numbers */
#define NO_DEV_ID 0xffff
@@ -82,10 +82,11 @@
#define IT8716_ID 0x8716
#define IT8718_ID 0x8718
#define IT8720_ID 0x8720
+#define IT8721_ID 0x8721
#define IT8726_ID 0x8726 /* the data sheet suggest wrongly 0x8716 */
/* GPIO Configuration Registers LDN=0x07 */
-#define WDTCTRL 0x71
+#define WDTCTRL 0x71
#define WDTCFG 0x72
#define WDTVALLSB 0x73
#define WDTVALMSB 0x74
@@ -94,7 +95,7 @@
#define WDT_CIRINT 0x80
#define WDT_MOUSEINT 0x40
#define WDT_KYBINT 0x20
-#define WDT_GAMEPORT 0x10 /* not in it8718, it8720 */
+#define WDT_GAMEPORT 0x10 /* not in it8718, it8720, it8721 */
#define WDT_FORCE 0x02
#define WDT_ZERO 0x01
@@ -102,11 +103,11 @@
#define WDT_TOV1 0x80
#define WDT_KRST 0x40
#define WDT_TOVE 0x20
-#define WDT_PWROK 0x10
+#define WDT_PWROK 0x10 /* not in it8721 */
#define WDT_INT_MASK 0x0f
/* CIR Configuration Register LDN=0x0a */
-#define CIR_ILS 0x70
+#define CIR_ILS 0x70
/* The default Base address is not always available, we use this */
#define CIR_BASE 0x0208
@@ -134,7 +135,7 @@
#define WDTS_USE_GP 4
#define WDTS_EXPECTED 5
-static unsigned int base, gpact, ciract, max_units;
+static unsigned int base, gpact, ciract, max_units, chip_type;
static unsigned long wdt_status;
static DEFINE_SPINLOCK(spinlock);
@@ -215,7 +216,7 @@ static inline void superio_outw(int val, int reg)
/* Internal function, should be called after superio_select(GPIO) */
static void wdt_update_timeout(void)
{
- unsigned char cfg = WDT_KRST | WDT_PWROK;
+ unsigned char cfg = WDT_KRST;
int tm = timeout;
if (testmode)
@@ -226,6 +227,9 @@ static void wdt_update_timeout(void)
else
tm /= 60;
+ if (chip_type != IT8721_ID)
+ cfg |= WDT_PWROK;
+
superio_outb(cfg, WDTCFG);
superio_outb(tm, WDTVALLSB);
if (max_units > 255)
@@ -555,7 +559,6 @@ static int __init it87_wdt_init(void)
{
int rc = 0;
int try_gameport = !nogameport;
- u16 chip_type;
u8 chip_rev;
unsigned long flags;
@@ -581,6 +584,7 @@ static int __init it87_wdt_init(void)
break;
case IT8718_ID:
case IT8720_ID:
+ case IT8721_ID:
max_units = 65535;
try_gameport = 0;
break;
diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c
new file mode 100644
index 000000000000..684ba01fb540
--- /dev/null
+++ b/drivers/watchdog/jz4740_wdt.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright (C) 2010, Paul Cercueil <paul@crapouillou.net>
+ * JZ4740 Watchdog driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+
+#include <asm/mach-jz4740/timer.h>
+
+#define JZ_REG_WDT_TIMER_DATA 0x0
+#define JZ_REG_WDT_COUNTER_ENABLE 0x4
+#define JZ_REG_WDT_TIMER_COUNTER 0x8
+#define JZ_REG_WDT_TIMER_CONTROL 0xC
+
+#define JZ_WDT_CLOCK_PCLK 0x1
+#define JZ_WDT_CLOCK_RTC 0x2
+#define JZ_WDT_CLOCK_EXT 0x4
+
+#define WDT_IN_USE 0
+#define WDT_OK_TO_CLOSE 1
+
+#define JZ_WDT_CLOCK_DIV_SHIFT 3
+
+#define JZ_WDT_CLOCK_DIV_1 (0 << JZ_WDT_CLOCK_DIV_SHIFT)
+#define JZ_WDT_CLOCK_DIV_4 (1 << JZ_WDT_CLOCK_DIV_SHIFT)
+#define JZ_WDT_CLOCK_DIV_16 (2 << JZ_WDT_CLOCK_DIV_SHIFT)
+#define JZ_WDT_CLOCK_DIV_64 (3 << JZ_WDT_CLOCK_DIV_SHIFT)
+#define JZ_WDT_CLOCK_DIV_256 (4 << JZ_WDT_CLOCK_DIV_SHIFT)
+#define JZ_WDT_CLOCK_DIV_1024 (5 << JZ_WDT_CLOCK_DIV_SHIFT)
+
+#define DEFAULT_HEARTBEAT 5
+#define MAX_HEARTBEAT 2048
+
+static struct {
+ void __iomem *base;
+ struct resource *mem;
+ struct clk *rtc_clk;
+ unsigned long status;
+} jz4740_wdt;
+
+static int heartbeat = DEFAULT_HEARTBEAT;
+
+
+static void jz4740_wdt_service(void)
+{
+ writew(0x0, jz4740_wdt.base + JZ_REG_WDT_TIMER_COUNTER);
+}
+
+static void jz4740_wdt_set_heartbeat(int new_heartbeat)
+{
+ unsigned int rtc_clk_rate;
+ unsigned int timeout_value;
+ unsigned short clock_div = JZ_WDT_CLOCK_DIV_1;
+
+ heartbeat = new_heartbeat;
+
+ rtc_clk_rate = clk_get_rate(jz4740_wdt.rtc_clk);
+
+ timeout_value = rtc_clk_rate * heartbeat;
+ while (timeout_value > 0xffff) {
+ if (clock_div == JZ_WDT_CLOCK_DIV_1024) {
+ /* Requested timeout too high;
+ * use highest possible value. */
+ timeout_value = 0xffff;
+ break;
+ }
+ timeout_value >>= 2;
+ clock_div += (1 << JZ_WDT_CLOCK_DIV_SHIFT);
+ }
+
+ writeb(0x0, jz4740_wdt.base + JZ_REG_WDT_COUNTER_ENABLE);
+ writew(clock_div, jz4740_wdt.base + JZ_REG_WDT_TIMER_CONTROL);
+
+ writew((u16)timeout_value, jz4740_wdt.base + JZ_REG_WDT_TIMER_DATA);
+ writew(0x0, jz4740_wdt.base + JZ_REG_WDT_TIMER_COUNTER);
+ writew(clock_div | JZ_WDT_CLOCK_RTC,
+ jz4740_wdt.base + JZ_REG_WDT_TIMER_CONTROL);
+
+ writeb(0x1, jz4740_wdt.base + JZ_REG_WDT_COUNTER_ENABLE);
+}
+
+static void jz4740_wdt_enable(void)
+{
+ jz4740_timer_enable_watchdog();
+ jz4740_wdt_set_heartbeat(heartbeat);
+}
+
+static void jz4740_wdt_disable(void)
+{
+ jz4740_timer_disable_watchdog();
+ writeb(0x0, jz4740_wdt.base + JZ_REG_WDT_COUNTER_ENABLE);
+}
+
+static int jz4740_wdt_open(struct inode *inode, struct file *file)
+{
+ if (test_and_set_bit(WDT_IN_USE, &jz4740_wdt.status))
+ return -EBUSY;
+
+ jz4740_wdt_enable();
+
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t jz4740_wdt_write(struct file *file, const char *data,
+ size_t len, loff_t *ppos)
+{
+ if (len) {
+ size_t i;
+
+ clear_bit(WDT_OK_TO_CLOSE, &jz4740_wdt.status);
+ for (i = 0; i != len; i++) {
+ char c;
+
+ if (get_user(c, data + i))
+ return -EFAULT;
+
+ if (c == 'V')
+ set_bit(WDT_OK_TO_CLOSE, &jz4740_wdt.status);
+ }
+ jz4740_wdt_service();
+ }
+
+ return len;
+}
+
+static const struct watchdog_info ident = {
+ .options = WDIOF_KEEPALIVEPING,
+ .identity = "jz4740 Watchdog",
+};
+
+static long jz4740_wdt_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret = -ENOTTY;
+ int heartbeat_seconds;
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ ret = copy_to_user((struct watchdog_info *)arg, &ident,
+ sizeof(ident)) ? -EFAULT : 0;
+ break;
+
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ ret = put_user(0, (int *)arg);
+ break;
+
+ case WDIOC_KEEPALIVE:
+ jz4740_wdt_service();
+ return 0;
+
+ case WDIOC_SETTIMEOUT:
+ if (get_user(heartbeat_seconds, (int __user *)arg))
+ return -EFAULT;
+
+ jz4740_wdt_set_heartbeat(heartbeat_seconds);
+ return 0;
+
+ case WDIOC_GETTIMEOUT:
+ return put_user(heartbeat, (int *)arg);
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int jz4740_wdt_release(struct inode *inode, struct file *file)
+{
+ jz4740_wdt_service();
+
+ if (test_and_clear_bit(WDT_OK_TO_CLOSE, &jz4740_wdt.status))
+ jz4740_wdt_disable();
+
+ clear_bit(WDT_IN_USE, &jz4740_wdt.status);
+ return 0;
+}
+
+static const struct file_operations jz4740_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = jz4740_wdt_write,
+ .unlocked_ioctl = jz4740_wdt_ioctl,
+ .open = jz4740_wdt_open,
+ .release = jz4740_wdt_release,
+};
+
+static struct miscdevice jz4740_wdt_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &jz4740_wdt_fops,
+};
+
+static int __devinit jz4740_wdt_probe(struct platform_device *pdev)
+{
+ int ret = 0, size;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(dev, "failed to get memory region resource\n");
+ return -ENXIO;
+ }
+
+ size = resource_size(res);
+ jz4740_wdt.mem = request_mem_region(res->start, size, pdev->name);
+ if (jz4740_wdt.mem == NULL) {
+ dev_err(dev, "failed to get memory region\n");
+ return -EBUSY;
+ }
+
+ jz4740_wdt.base = ioremap_nocache(res->start, size);
+ if (jz4740_wdt.base == NULL) {
+ dev_err(dev, "failed to map memory region\n");
+ ret = -EBUSY;
+ goto err_release_region;
+ }
+
+ jz4740_wdt.rtc_clk = clk_get(NULL, "rtc");
+ if (IS_ERR(jz4740_wdt.rtc_clk)) {
+ dev_err(dev, "cannot find RTC clock\n");
+ ret = PTR_ERR(jz4740_wdt.rtc_clk);
+ goto err_iounmap;
+ }
+
+ ret = misc_register(&jz4740_wdt_miscdev);
+ if (ret < 0) {
+ dev_err(dev, "cannot register misc device\n");
+ goto err_disable_clk;
+ }
+
+ return 0;
+
+err_disable_clk:
+ clk_put(jz4740_wdt.rtc_clk);
+err_iounmap:
+ iounmap(jz4740_wdt.base);
+err_release_region:
+ release_mem_region(jz4740_wdt.mem->start,
+ resource_size(jz4740_wdt.mem));
+ return ret;
+}
+
+
+static int __devexit jz4740_wdt_remove(struct platform_device *pdev)
+{
+ jz4740_wdt_disable();
+ misc_deregister(&jz4740_wdt_miscdev);
+ clk_put(jz4740_wdt.rtc_clk);
+
+ iounmap(jz4740_wdt.base);
+ jz4740_wdt.base = NULL;
+
+ release_mem_region(jz4740_wdt.mem->start,
+ resource_size(jz4740_wdt.mem));
+ jz4740_wdt.mem = NULL;
+
+ return 0;
+}
+
+
+static struct platform_driver jz4740_wdt_driver = {
+ .probe = jz4740_wdt_probe,
+ .remove = __devexit_p(jz4740_wdt_remove),
+ .driver = {
+ .name = "jz4740-wdt",
+ .owner = THIS_MODULE,
+ },
+};
+
+
+static int __init jz4740_wdt_init(void)
+{
+ return platform_driver_register(&jz4740_wdt_driver);
+}
+module_init(jz4740_wdt_init);
+
+static void __exit jz4740_wdt_exit(void)
+{
+ platform_driver_unregister(&jz4740_wdt_driver);
+}
+module_exit(jz4740_wdt_exit);
+
+MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
+MODULE_DESCRIPTION("jz4740 Watchdog Driver");
+
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat,
+ "Watchdog heartbeat period in seconds from 1 to "
+ __MODULE_STRING(MAX_HEARTBEAT) ", default "
+ __MODULE_STRING(DEFAULT_HEARTBEAT));
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+MODULE_ALIAS("platform:jz4740-wdt");
diff --git a/drivers/watchdog/machzwd.c b/drivers/watchdog/machzwd.c
index 928035069396..1332b838cc58 100644
--- a/drivers/watchdog/machzwd.c
+++ b/drivers/watchdog/machzwd.c
@@ -54,7 +54,7 @@
/* indexes */ /* size */
#define ZFL_VERSION 0x02 /* 16 */
-#define CONTROL 0x10 /* 16 */
+#define CONTROL 0x10 /* 16 */
#define STATUS 0x12 /* 8 */
#define COUNTER_1 0x0C /* 16 */
#define COUNTER_2 0x0E /* 8 */
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c
index 3053ff05ca41..7a82ce5a6337 100644
--- a/drivers/watchdog/max63xx_wdt.c
+++ b/drivers/watchdog/max63xx_wdt.c
@@ -41,7 +41,7 @@ static int nowayout = WATCHDOG_NOWAYOUT;
* to ping the watchdog.
*/
#define MAX6369_WDSET (7 << 0)
-#define MAX6369_WDI (1 << 3)
+#define MAX6369_WDI (1 << 3)
static DEFINE_SPINLOCK(io_lock);
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index 8fa213cdb499..6709d723e017 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -2,9 +2,9 @@
* mpc8xxx_wdt.c - MPC8xx/MPC83xx/MPC86xx watchdog userspace interface
*
* Authors: Dave Updegraff <dave@cray.org>
- * Kumar Gala <galak@kernel.crashing.org>
- * Attribution: from 83xx_wst: Florian Schirmer <jolt@tuxbox.org>
- * ..and from sc520_wdt
+ * Kumar Gala <galak@kernel.crashing.org>
+ * Attribution: from 83xx_wst: Florian Schirmer <jolt@tuxbox.org>
+ * ..and from sc520_wdt
* Copyright (c) 2008 MontaVista Software, Inc.
* Anton Vorontsov <avorontsov@ru.mvista.com>
*
@@ -185,15 +185,18 @@ static struct miscdevice mpc8xxx_wdt_miscdev = {
.fops = &mpc8xxx_wdt_fops,
};
-static int __devinit mpc8xxx_wdt_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit mpc8xxx_wdt_probe(struct platform_device *ofdev)
{
int ret;
struct device_node *np = ofdev->dev.of_node;
- struct mpc8xxx_wdt_type *wdt_type = match->data;
+ struct mpc8xxx_wdt_type *wdt_type;
u32 freq = fsl_get_sys_freq();
bool enabled;
+ if (!ofdev->dev.of_match)
+ return -EINVAL;
+ wdt_type = match->data;
+
if (!freq || freq == -1)
return -EINVAL;
@@ -272,7 +275,7 @@ static const struct of_device_id mpc8xxx_wdt_match[] = {
};
MODULE_DEVICE_TABLE(of, mpc8xxx_wdt_match);
-static struct of_platform_driver mpc8xxx_wdt_driver = {
+static struct platform_driver mpc8xxx_wdt_driver = {
.probe = mpc8xxx_wdt_probe,
.remove = __devexit_p(mpc8xxx_wdt_remove),
.driver = {
@@ -308,13 +311,13 @@ module_init(mpc8xxx_wdt_init_late);
static int __init mpc8xxx_wdt_init(void)
{
- return of_register_platform_driver(&mpc8xxx_wdt_driver);
+ return platform_driver_register(&mpc8xxx_wdt_driver);
}
arch_initcall(mpc8xxx_wdt_init);
static void __exit mpc8xxx_wdt_exit(void)
{
- of_unregister_platform_driver(&mpc8xxx_wdt_driver);
+ platform_driver_unregister(&mpc8xxx_wdt_driver);
}
module_exit(mpc8xxx_wdt_exit);
diff --git a/drivers/watchdog/mpcore_wdt.c b/drivers/watchdog/mpcore_wdt.c
index b8ec7aca3c8e..2b4af222b5f2 100644
--- a/drivers/watchdog/mpcore_wdt.c
+++ b/drivers/watchdog/mpcore_wdt.c
@@ -172,7 +172,7 @@ static int mpcore_wdt_release(struct inode *inode, struct file *file)
/*
* Shut off the timer.
- * Lock it in if it's a module and we set nowayout
+ * Lock it in if it's a module and we set nowayout
*/
if (wdt->expect_close == 42)
mpcore_wdt_stop(wdt);
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index 08e8a6ab74e1..5ec5ac1f7878 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -190,19 +190,19 @@ static ssize_t mtx1_wdt_write(struct file *file, const char *buf,
}
static const struct file_operations mtx1_wdt_fops = {
- .owner = THIS_MODULE,
+ .owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = mtx1_wdt_ioctl,
- .open = mtx1_wdt_open,
- .write = mtx1_wdt_write,
- .release = mtx1_wdt_release,
+ .open = mtx1_wdt_open,
+ .write = mtx1_wdt_write,
+ .release = mtx1_wdt_release,
};
static struct miscdevice mtx1_wdt_misc = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &mtx1_wdt_fops,
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &mtx1_wdt_fops,
};
diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c
index 1a50aa7079bf..267377a5a83e 100644
--- a/drivers/watchdog/nv_tco.c
+++ b/drivers/watchdog/nv_tco.c
@@ -289,7 +289,7 @@ static struct miscdevice nv_tco_miscdev = {
* register a pci_driver, because someone else might one day
* want to register another driver on the same PCI id.
*/
-static struct pci_device_id tco_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(tco_pci_tbl) = {
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS,
diff --git a/drivers/watchdog/omap_wdt.h b/drivers/watchdog/omap_wdt.h
index fc02ec6a0386..09b774cf75b9 100644
--- a/drivers/watchdog/omap_wdt.h
+++ b/drivers/watchdog/omap_wdt.h
@@ -44,7 +44,7 @@
* months before firing. These limits work without scaling,
* with the 60 second default assumed by most tools and docs.
*/
-#define TIMER_MARGIN_MAX (24 * 60 * 60) /* 1 day */
+#define TIMER_MARGIN_MAX (24 * 60 * 60) /* 1 day */
#define TIMER_MARGIN_DEFAULT 60 /* 60 secs */
#define TIMER_MARGIN_MIN 1
diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c
index 3a56bc360924..139d773300c6 100644
--- a/drivers/watchdog/pc87413_wdt.c
+++ b/drivers/watchdog/pc87413_wdt.c
@@ -514,7 +514,7 @@ static struct miscdevice pc87413_miscdev = {
/* -- Module init functions -------------------------------------*/
/**
- * pc87413_init: module's "constructor"
+ * pc87413_init: module's "constructor"
*
* Set up the WDT watchdog board. All we have to do is grab the
* resources we require and bitch if anyone beat us to them.
diff --git a/drivers/watchdog/pcwd_pci.c b/drivers/watchdog/pcwd_pci.c
index 64374d636f09..b8d14f88f0b5 100644
--- a/drivers/watchdog/pcwd_pci.c
+++ b/drivers/watchdog/pcwd_pci.c
@@ -817,7 +817,7 @@ static void __devexit pcipcwd_card_exit(struct pci_dev *pdev)
cards_found--;
}
-static struct pci_device_id pcipcwd_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(pcipcwd_pci_tbl) = {
{ PCI_VENDOR_ID_QUICKLOGIC, PCI_DEVICE_ID_WATCHDOG_PCIPCWD,
PCI_ANY_ID, PCI_ANY_ID, },
{ 0 }, /* End of list */
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index bf5b97c546eb..c7cf4cbf8ab3 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -4,7 +4,7 @@
* Watchdog driver for PNX4008 board
*
* Authors: Dmitry Chigirev <source@mvista.com>,
- * Vitaly Wool <vitalywool@gmail.com>
+ * Vitaly Wool <vitalywool@gmail.com>
* Based on sa1100 driver,
* Copyright (C) 2000 Oleg Drokin <green@crimea.edu>
*
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
index 3939e53f5f98..d8e725082fdc 100644
--- a/drivers/watchdog/rdc321x_wdt.c
+++ b/drivers/watchdog/rdc321x_wdt.c
@@ -37,6 +37,7 @@
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/mfd/rdc321x.h>
+#include <linux/mfd/core.h>
#define RDC_WDT_MASK 0x80000000 /* Mask */
#define RDC_WDT_EN 0x00800000 /* Enable bit */
@@ -231,7 +232,7 @@ static int __devinit rdc321x_wdt_probe(struct platform_device *pdev)
struct resource *r;
struct rdc321x_wdt_pdata *pdata;
- pdata = platform_get_drvdata(pdev);
+ pdata = mfd_get_data(pdev);
if (!pdata) {
dev_err(&pdev->dev, "no platform data supplied\n");
return -ENODEV;
diff --git a/drivers/watchdog/riowd.c b/drivers/watchdog/riowd.c
index 3faee1ae64bd..109b533896b7 100644
--- a/drivers/watchdog/riowd.c
+++ b/drivers/watchdog/riowd.c
@@ -172,8 +172,7 @@ static struct miscdevice riowd_miscdev = {
.fops = &riowd_fops
};
-static int __devinit riowd_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit riowd_probe(struct platform_device *op)
{
struct riowd *p;
int err = -EINVAL;
@@ -238,7 +237,7 @@ static const struct of_device_id riowd_match[] = {
};
MODULE_DEVICE_TABLE(of, riowd_match);
-static struct of_platform_driver riowd_driver = {
+static struct platform_driver riowd_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
@@ -250,12 +249,12 @@ static struct of_platform_driver riowd_driver = {
static int __init riowd_init(void)
{
- return of_register_platform_driver(&riowd_driver);
+ return platform_driver_register(&riowd_driver);
}
static void __exit riowd_exit(void)
{
- of_unregister_platform_driver(&riowd_driver);
+ platform_driver_unregister(&riowd_driver);
}
module_init(riowd_init);
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index ae53662c29bc..25b39bf35925 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -224,7 +224,7 @@ static int s3c2410wdt_release(struct inode *inode, struct file *file)
{
/*
* Shut off the timer.
- * Lock it in if it's a module and we set nowayout
+ * Lock it in if it's a module and we set nowayout
*/
if (expect_close == 42)
diff --git a/drivers/watchdog/sbc8360.c b/drivers/watchdog/sbc8360.c
index 68e2e2d6f73d..514ec23050f7 100644
--- a/drivers/watchdog/sbc8360.c
+++ b/drivers/watchdog/sbc8360.c
@@ -114,7 +114,7 @@ static char expect_close;
* C | 6.5s 65s 650s 1300s
* D | 7s 70s 700s 1400s
* E | 7.5s 75s 750s 1500s
- * F | 8s 80s 800s 1600s
+ * F | 8s 80s 800s 1600s
*
* Another way to say the same things is:
* For N=1, Timeout = (M+1) * 0.5s
diff --git a/drivers/watchdog/sbc_epx_c3.c b/drivers/watchdog/sbc_epx_c3.c
index 28f1214457bd..3066a5127ca8 100644
--- a/drivers/watchdog/sbc_epx_c3.c
+++ b/drivers/watchdog/sbc_epx_c3.c
@@ -220,7 +220,7 @@ module_exit(watchdog_exit);
MODULE_AUTHOR("Calin A. Culianu <calin@ajvar.org>");
MODULE_DESCRIPTION("Hardware Watchdog Device for Winsystems EPX-C3 SBC. "
"Note that there is no way to probe for this device -- "
- "so only use it if you are *sure* you are runnning on this specific "
+ "so only use it if you are *sure* you are running on this specific "
"SBC system from Winsystems! It writes to IO ports 0x1ee and 0x1ef!");
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c
index c7d67e9a7465..38322401f392 100644
--- a/drivers/watchdog/sbc_fitpc2_wdt.c
+++ b/drivers/watchdog/sbc_fitpc2_wdt.c
@@ -41,7 +41,7 @@ static DEFINE_MUTEX(wdt_lock);
#define IFACE_ON_COMMAND 1
#define REBOOT_COMMAND 2
-#define WATCHDOG_NAME "SBC-FITPC2 Watchdog"
+#define WATCHDOG_NAME "SBC-FITPC2 Watchdog"
static void wdt_send_data(unsigned char command, unsigned char data)
{
diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c
index 0461858e07d0..b61ab1c54293 100644
--- a/drivers/watchdog/sch311x_wdt.c
+++ b/drivers/watchdog/sch311x_wdt.c
@@ -508,7 +508,7 @@ static int __init sch311x_detect(int sio_config_port, unsigned short *addr)
sch311x_sio_outb(sio_config_port, 0x07, 0x0a);
/* Check if Logical Device Register is currently active */
- if (sch311x_sio_inb(sio_config_port, 0x30) && 0x01 == 0)
+ if ((sch311x_sio_inb(sio_config_port, 0x30) & 0x01) == 0)
printk(KERN_INFO PFX "Seems that LDN 0x0a is not active...\n");
/* Get the base address of the runtime registers */
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index 6fc74065abee..4e3e7eb5919c 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -1,9 +1,9 @@
/*
- * drivers/char/watchdog/shwdt.c
+ * drivers/watchdog/shwdt.c
*
* Watchdog driver for integrated watchdog in the SuperH processors.
*
- * Copyright (C) 2001, 2002, 2003 Paul Mundt <lethal@linux-sh.org>
+ * Copyright (C) 2001 - 2010 Paul Mundt <lethal@linux-sh.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -19,6 +19,7 @@
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/miscdevice.h>
@@ -28,11 +29,12 @@
#include <linux/ioport.h>
#include <linux/fs.h>
#include <linux/mm.h>
+#include <linux/slab.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <asm/watchdog.h>
-#define PFX "shwdt: "
+#define DRV_NAME "sh-wdt"
/*
* Default clock division ratio is 5.25 msecs. For an additional table of
@@ -62,37 +64,36 @@
* misses its deadline, the kernel timer will allow the WDT to overflow.
*/
static int clock_division_ratio = WTCSR_CKS_4096;
-
#define next_ping_period(cks) msecs_to_jiffies(cks - 4)
-static void sh_wdt_ping(unsigned long data);
-
-static unsigned long shwdt_is_open;
static const struct watchdog_info sh_wdt_info;
-static char shwdt_expect_close;
-static DEFINE_TIMER(timer, sh_wdt_ping, 0, 0);
-static unsigned long next_heartbeat;
+static struct platform_device *sh_wdt_dev;
static DEFINE_SPINLOCK(shwdt_lock);
#define WATCHDOG_HEARTBEAT 30 /* 30 sec default heartbeat */
static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */
-
static int nowayout = WATCHDOG_NOWAYOUT;
+static unsigned long next_heartbeat;
-/**
- * sh_wdt_start - Start the Watchdog
- *
- * Starts the watchdog.
- */
-static void sh_wdt_start(void)
+struct sh_wdt {
+ void __iomem *base;
+ struct device *dev;
+
+ struct timer_list timer;
+
+ unsigned long enabled;
+ char expect_close;
+};
+
+static void sh_wdt_start(struct sh_wdt *wdt)
{
- __u8 csr;
unsigned long flags;
+ u8 csr;
spin_lock_irqsave(&shwdt_lock, flags);
next_heartbeat = jiffies + (heartbeat * HZ);
- mod_timer(&timer, next_ping_period(clock_division_ratio));
+ mod_timer(&wdt->timer, next_ping_period(clock_division_ratio));
csr = sh_wdt_read_csr();
csr |= WTCSR_WT | clock_division_ratio;
@@ -114,15 +115,6 @@ static void sh_wdt_start(void)
sh_wdt_write_csr(csr);
#ifdef CONFIG_CPU_SH2
- /*
- * Whoever came up with the RSTCSR semantics must've been smoking
- * some of the good stuff, since in addition to the WTCSR/WTCNT write
- * brain-damage, it's managed to fuck things up one step further..
- *
- * If we need to clear the WOVF bit, the upper byte has to be 0xa5..
- * but if we want to touch RSTE or RSTS, the upper byte has to be
- * 0x5a..
- */
csr = sh_wdt_read_rstcsr();
csr &= ~RSTCSR_RSTS;
sh_wdt_write_rstcsr(csr);
@@ -130,30 +122,23 @@ static void sh_wdt_start(void)
spin_unlock_irqrestore(&shwdt_lock, flags);
}
-/**
- * sh_wdt_stop - Stop the Watchdog
- * Stops the watchdog.
- */
-static void sh_wdt_stop(void)
+static void sh_wdt_stop(struct sh_wdt *wdt)
{
- __u8 csr;
unsigned long flags;
+ u8 csr;
spin_lock_irqsave(&shwdt_lock, flags);
- del_timer(&timer);
+ del_timer(&wdt->timer);
csr = sh_wdt_read_csr();
csr &= ~WTCSR_TME;
sh_wdt_write_csr(csr);
+
spin_unlock_irqrestore(&shwdt_lock, flags);
}
-/**
- * sh_wdt_keepalive - Keep the Userspace Watchdog Alive
- * The Userspace watchdog got a KeepAlive: schedule the next heartbeat.
- */
-static inline void sh_wdt_keepalive(void)
+static inline void sh_wdt_keepalive(struct sh_wdt *wdt)
{
unsigned long flags;
@@ -162,10 +147,6 @@ static inline void sh_wdt_keepalive(void)
spin_unlock_irqrestore(&shwdt_lock, flags);
}
-/**
- * sh_wdt_set_heartbeat - Set the Userspace Watchdog heartbeat
- * Set the Userspace Watchdog heartbeat
- */
static int sh_wdt_set_heartbeat(int t)
{
unsigned long flags;
@@ -179,19 +160,14 @@ static int sh_wdt_set_heartbeat(int t)
return 0;
}
-/**
- * sh_wdt_ping - Ping the Watchdog
- * @data: Unused
- *
- * Clears overflow bit, resets timer counter.
- */
static void sh_wdt_ping(unsigned long data)
{
+ struct sh_wdt *wdt = (struct sh_wdt *)data;
unsigned long flags;
spin_lock_irqsave(&shwdt_lock, flags);
if (time_before(jiffies, next_heartbeat)) {
- __u8 csr;
+ u8 csr;
csr = sh_wdt_read_csr();
csr &= ~WTCSR_IOVF;
@@ -199,148 +175,76 @@ static void sh_wdt_ping(unsigned long data)
sh_wdt_write_cnt(0);
- mod_timer(&timer, next_ping_period(clock_division_ratio));
+ mod_timer(&wdt->timer, next_ping_period(clock_division_ratio));
} else
- printk(KERN_WARNING PFX "Heartbeat lost! Will not ping "
- "the watchdog\n");
+ dev_warn(wdt->dev, "Heartbeat lost! Will not ping "
+ "the watchdog\n");
spin_unlock_irqrestore(&shwdt_lock, flags);
}
-/**
- * sh_wdt_open - Open the Device
- * @inode: inode of device
- * @file: file handle of device
- *
- * Watchdog device is opened and started.
- */
static int sh_wdt_open(struct inode *inode, struct file *file)
{
- if (test_and_set_bit(0, &shwdt_is_open))
+ struct sh_wdt *wdt = platform_get_drvdata(sh_wdt_dev);
+
+ if (test_and_set_bit(0, &wdt->enabled))
return -EBUSY;
if (nowayout)
__module_get(THIS_MODULE);
- sh_wdt_start();
+ file->private_data = wdt;
+
+ sh_wdt_start(wdt);
return nonseekable_open(inode, file);
}
-/**
- * sh_wdt_close - Close the Device
- * @inode: inode of device
- * @file: file handle of device
- *
- * Watchdog device is closed and stopped.
- */
static int sh_wdt_close(struct inode *inode, struct file *file)
{
- if (shwdt_expect_close == 42) {
- sh_wdt_stop();
+ struct sh_wdt *wdt = file->private_data;
+
+ if (wdt->expect_close == 42) {
+ sh_wdt_stop(wdt);
} else {
- printk(KERN_CRIT PFX "Unexpected close, not "
- "stopping watchdog!\n");
- sh_wdt_keepalive();
+ dev_crit(wdt->dev, "Unexpected close, not "
+ "stopping watchdog!\n");
+ sh_wdt_keepalive(wdt);
}
- clear_bit(0, &shwdt_is_open);
- shwdt_expect_close = 0;
+ clear_bit(0, &wdt->enabled);
+ wdt->expect_close = 0;
return 0;
}
-/**
- * sh_wdt_write - Write to Device
- * @file: file handle of device
- * @buf: buffer to write
- * @count: length of buffer
- * @ppos: offset
- *
- * Pings the watchdog on write.
- */
static ssize_t sh_wdt_write(struct file *file, const char *buf,
size_t count, loff_t *ppos)
{
+ struct sh_wdt *wdt = file->private_data;
+
if (count) {
if (!nowayout) {
size_t i;
- shwdt_expect_close = 0;
+ wdt->expect_close = 0;
for (i = 0; i != count; i++) {
char c;
if (get_user(c, buf + i))
return -EFAULT;
if (c == 'V')
- shwdt_expect_close = 42;
+ wdt->expect_close = 42;
}
}
- sh_wdt_keepalive();
+ sh_wdt_keepalive(wdt);
}
return count;
}
-/**
- * sh_wdt_mmap - map WDT/CPG registers into userspace
- * @file: file structure for the device
- * @vma: VMA to map the registers into
- *
- * A simple mmap() implementation for the corner cases where the counter
- * needs to be mapped in userspace directly. Due to the relatively small
- * size of the area, neighbouring registers not necessarily tied to the
- * CPG will also be accessible through the register page, so this remains
- * configurable for users that really know what they're doing.
- *
- * Additionaly, the register page maps in the CPG register base relative
- * to the nearest page-aligned boundary, which requires that userspace do
- * the appropriate CPU subtype math for calculating the page offset for
- * the counter value.
- */
-static int sh_wdt_mmap(struct file *file, struct vm_area_struct *vma)
-{
- int ret = -ENOSYS;
-
-#ifdef CONFIG_SH_WDT_MMAP
- unsigned long addr;
-
- /* Only support the simple cases where we map in a register page. */
- if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff)
- return -EINVAL;
-
- /*
- * Pick WTCNT as the start, it's usually the first register after the
- * FRQCR, and neither one are generally page-aligned out of the box.
- */
- addr = WTCNT & ~(PAGE_SIZE - 1);
-
- vma->vm_flags |= VM_IO;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
- PAGE_SIZE, vma->vm_page_prot)) {
- printk(KERN_ERR PFX "%s: io_remap_pfn_range failed\n",
- __func__);
- return -EAGAIN;
- }
-
- ret = 0;
-#endif
-
- return ret;
-}
-
-/**
- * sh_wdt_ioctl - Query Device
- * @file: file handle of device
- * @cmd: watchdog command
- * @arg: argument
- *
- * Query basic information from the device or ping it, as outlined by the
- * watchdog API.
- */
static long sh_wdt_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
+ struct sh_wdt *wdt = file->private_data;
int new_heartbeat;
int options, retval = -EINVAL;
@@ -356,18 +260,18 @@ static long sh_wdt_ioctl(struct file *file, unsigned int cmd,
return -EFAULT;
if (options & WDIOS_DISABLECARD) {
- sh_wdt_stop();
+ sh_wdt_stop(wdt);
retval = 0;
}
if (options & WDIOS_ENABLECARD) {
- sh_wdt_start();
+ sh_wdt_start(wdt);
retval = 0;
}
return retval;
case WDIOC_KEEPALIVE:
- sh_wdt_keepalive();
+ sh_wdt_keepalive(wdt);
return 0;
case WDIOC_SETTIMEOUT:
if (get_user(new_heartbeat, (int *)arg))
@@ -376,7 +280,7 @@ static long sh_wdt_ioctl(struct file *file, unsigned int cmd,
if (sh_wdt_set_heartbeat(new_heartbeat))
return -EINVAL;
- sh_wdt_keepalive();
+ sh_wdt_keepalive(wdt);
/* Fall */
case WDIOC_GETTIMEOUT:
return put_user(heartbeat, (int *)arg);
@@ -386,20 +290,13 @@ static long sh_wdt_ioctl(struct file *file, unsigned int cmd,
return 0;
}
-/**
- * sh_wdt_notify_sys - Notifier Handler
- * @this: notifier block
- * @code: notifier event
- * @unused: unused
- *
- * Handles specific events, such as turning off the watchdog during a
- * shutdown event.
- */
static int sh_wdt_notify_sys(struct notifier_block *this,
unsigned long code, void *unused)
{
+ struct sh_wdt *wdt = platform_get_drvdata(sh_wdt_dev);
+
if (code == SYS_DOWN || code == SYS_HALT)
- sh_wdt_stop();
+ sh_wdt_stop(wdt);
return NOTIFY_DONE;
}
@@ -411,7 +308,6 @@ static const struct file_operations sh_wdt_fops = {
.unlocked_ioctl = sh_wdt_ioctl,
.open = sh_wdt_open,
.release = sh_wdt_close,
- .mmap = sh_wdt_mmap,
};
static const struct watchdog_info sh_wdt_info = {
@@ -431,66 +327,148 @@ static struct miscdevice sh_wdt_miscdev = {
.fops = &sh_wdt_fops,
};
-/**
- * sh_wdt_init - Initialize module
- * Registers the device and notifier handler. Actual device
- * initialization is handled by sh_wdt_open().
- */
-static int __init sh_wdt_init(void)
+static int __devinit sh_wdt_probe(struct platform_device *pdev)
{
+ struct sh_wdt *wdt;
+ struct resource *res;
int rc;
- if (clock_division_ratio < 0x5 || clock_division_ratio > 0x7) {
- clock_division_ratio = WTCSR_CKS_4096;
- printk(KERN_INFO PFX
- "clock_division_ratio value must be 0x5<=x<=0x7, using %d\n",
- clock_division_ratio);
+ /*
+ * As this driver only covers the global watchdog case, reject
+ * any attempts to register per-CPU watchdogs.
+ */
+ if (pdev->id != -1)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(!res))
+ return -EINVAL;
+
+ if (!devm_request_mem_region(&pdev->dev, res->start,
+ resource_size(res), DRV_NAME))
+ return -EBUSY;
+
+ wdt = devm_kzalloc(&pdev->dev, sizeof(struct sh_wdt), GFP_KERNEL);
+ if (unlikely(!wdt)) {
+ rc = -ENOMEM;
+ goto out_release;
}
- rc = sh_wdt_set_heartbeat(heartbeat);
- if (unlikely(rc)) {
- heartbeat = WATCHDOG_HEARTBEAT;
- printk(KERN_INFO PFX
- "heartbeat value must be 1<=x<=3600, using %d\n",
- heartbeat);
+ wdt->dev = &pdev->dev;
+
+ wdt->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (unlikely(!wdt->base)) {
+ rc = -ENXIO;
+ goto out_err;
}
rc = register_reboot_notifier(&sh_wdt_notifier);
if (unlikely(rc)) {
- printk(KERN_ERR PFX
+ dev_err(&pdev->dev,
"Can't register reboot notifier (err=%d)\n", rc);
- return rc;
+ goto out_unmap;
}
+ sh_wdt_miscdev.parent = wdt->dev;
+
rc = misc_register(&sh_wdt_miscdev);
if (unlikely(rc)) {
- printk(KERN_ERR PFX
+ dev_err(&pdev->dev,
"Can't register miscdev on minor=%d (err=%d)\n",
sh_wdt_miscdev.minor, rc);
- unregister_reboot_notifier(&sh_wdt_notifier);
- return rc;
+ goto out_unreg;
}
- printk(KERN_INFO PFX "initialized. heartbeat=%d sec (nowayout=%d)\n",
- heartbeat, nowayout);
+ init_timer(&wdt->timer);
+ wdt->timer.function = sh_wdt_ping;
+ wdt->timer.data = (unsigned long)wdt;
+ wdt->timer.expires = next_ping_period(clock_division_ratio);
+
+ platform_set_drvdata(pdev, wdt);
+ sh_wdt_dev = pdev;
+
+ dev_info(&pdev->dev, "initialized.\n");
return 0;
+
+out_unreg:
+ unregister_reboot_notifier(&sh_wdt_notifier);
+out_unmap:
+ devm_iounmap(&pdev->dev, wdt->base);
+out_err:
+ devm_kfree(&pdev->dev, wdt);
+out_release:
+ devm_release_mem_region(&pdev->dev, res->start, resource_size(res));
+
+ return rc;
}
-/**
- * sh_wdt_exit - Deinitialize module
- * Unregisters the device and notifier handler. Actual device
- * deinitialization is handled by sh_wdt_close().
- */
-static void __exit sh_wdt_exit(void)
+static int __devexit sh_wdt_remove(struct platform_device *pdev)
{
+ struct sh_wdt *wdt = platform_get_drvdata(pdev);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ platform_set_drvdata(pdev, NULL);
+
misc_deregister(&sh_wdt_miscdev);
+
+ sh_wdt_dev = NULL;
+
unregister_reboot_notifier(&sh_wdt_notifier);
+ devm_release_mem_region(&pdev->dev, res->start, resource_size(res));
+ devm_iounmap(&pdev->dev, wdt->base);
+ devm_kfree(&pdev->dev, wdt);
+
+ return 0;
}
+static struct platform_driver sh_wdt_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+
+ .probe = sh_wdt_probe,
+ .remove = __devexit_p(sh_wdt_remove),
+};
+
+static int __init sh_wdt_init(void)
+{
+ int rc;
+
+ if (unlikely(clock_division_ratio < 0x5 ||
+ clock_division_ratio > 0x7)) {
+ clock_division_ratio = WTCSR_CKS_4096;
+
+ pr_info("%s: divisor must be 0x5<=x<=0x7, using %d\n",
+ DRV_NAME, clock_division_ratio);
+ }
+
+ rc = sh_wdt_set_heartbeat(heartbeat);
+ if (unlikely(rc)) {
+ heartbeat = WATCHDOG_HEARTBEAT;
+
+ pr_info("%s: heartbeat value must be 1<=x<=3600, using %d\n",
+ DRV_NAME, heartbeat);
+ }
+
+ pr_info("%s: configured with heartbeat=%d sec (nowayout=%d)\n",
+ DRV_NAME, heartbeat, nowayout);
+
+ return platform_driver_register(&sh_wdt_driver);
+}
+
+static void __exit sh_wdt_exit(void)
+{
+ platform_driver_unregister(&sh_wdt_driver);
+}
+module_init(sh_wdt_init);
+module_exit(sh_wdt_exit);
+
MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>");
MODULE_DESCRIPTION("SuperH watchdog driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
module_param(clock_division_ratio, int, 0);
@@ -507,6 +485,3 @@ module_param(nowayout, int, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-
-module_init(sh_wdt_init);
-module_exit(sh_wdt_exit);
diff --git a/drivers/watchdog/smsc37b787_wdt.c b/drivers/watchdog/smsc37b787_wdt.c
index 8a1f0bc3e271..df88cfa05f35 100644
--- a/drivers/watchdog/smsc37b787_wdt.c
+++ b/drivers/watchdog/smsc37b787_wdt.c
@@ -434,11 +434,11 @@ static long wb_smsc_wdt_ioctl(struct file *file,
} uarg;
static const struct watchdog_info ident = {
- .options = WDIOF_KEEPALIVEPING |
+ .options = WDIOF_KEEPALIVEPING |
WDIOF_SETTIMEOUT |
WDIOF_MAGICCLOSE,
.firmware_version = 0,
- .identity = "SMsC 37B787 Watchdog",
+ .identity = "SMsC 37B787 Watchdog",
};
uarg.i = (int __user *)arg;
diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c
index 833f49f43d43..100b114e3c3c 100644
--- a/drivers/watchdog/softdog.c
+++ b/drivers/watchdog/softdog.c
@@ -151,7 +151,7 @@ static int softdog_release(struct inode *inode, struct file *file)
{
/*
* Shut off the timer.
- * Lock it in if it's a module and we set nowayout
+ * Lock it in if it's a module and we set nowayout
*/
if (expect_close == 42) {
softdog_stop();
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
index 808372883e88..1bc493848ed4 100644
--- a/drivers/watchdog/sp5100_tco.c
+++ b/drivers/watchdog/sp5100_tco.c
@@ -259,7 +259,7 @@ static struct miscdevice sp5100_tco_miscdev = {
* register a pci_driver, because someone else might
* want to register another driver on the same PCI id.
*/
-static struct pci_device_id sp5100_tco_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(sp5100_tco_pci_tbl) = {
{ PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, PCI_ANY_ID,
PCI_ANY_ID, },
{ 0, }, /* End of list */
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index 9127eda2145b..0a0efe713bc8 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -278,7 +278,7 @@ static struct miscdevice sp805_wdt_miscdev = {
};
static int __devinit
-sp805_wdt_probe(struct amba_device *adev, struct amba_id *id)
+sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret = 0;
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index 18cdeb4c4258..5a90a4a871dd 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -68,7 +68,7 @@ struct platform_device *ts72xx_wdt_pdev;
* to control register):
* value description
* -------------------------
- * 0x00 watchdog disabled
+ * 0x00 watchdog disabled
* 0x01 250ms
* 0x02 500ms
* 0x03 1s
diff --git a/drivers/watchdog/w83697ug_wdt.c b/drivers/watchdog/w83697ug_wdt.c
index a6c12dec91a1..be9c4d839e15 100644
--- a/drivers/watchdog/w83697ug_wdt.c
+++ b/drivers/watchdog/w83697ug_wdt.c
@@ -87,10 +87,10 @@ static int w83697ug_select_wd_register(void)
outb_p(0x87, WDT_EFER); /* Enter extended function mode */
outb_p(0x87, WDT_EFER); /* Again according to manual */
- outb(0x20, WDT_EFER); /* check chip version */
+ outb(0x20, WDT_EFER); /* check chip version */
version = inb(WDT_EFDR);
- if (version == 0x68) { /* W83697UG */
+ if (version == 0x68) { /* W83697UG */
printk(KERN_INFO PFX "Watchdog chip version 0x%02x = "
"W83697UG/UF found at 0x%04x\n", version, wdt_io);
@@ -109,7 +109,7 @@ static int w83697ug_select_wd_register(void)
outb_p(0x08, WDT_EFDR); /* select logical device 8 (GPIO2) */
outb_p(0x30, WDT_EFER); /* select CR30 */
c = inb_p(WDT_EFDR);
- outb_p(c || 0x01, WDT_EFDR); /* set bit 0 to activate GPIO2 */
+ outb_p(c | 0x01, WDT_EFDR); /* set bit 0 to activate GPIO2 */
return 0;
}
diff --git a/drivers/watchdog/wdt.c b/drivers/watchdog/wdt.c
index 552a4381e78f..bb03e151a1d0 100644
--- a/drivers/watchdog/wdt.c
+++ b/drivers/watchdog/wdt.c
@@ -581,7 +581,7 @@ static void __exit wdt_exit(void)
}
/**
- * wdt_init:
+ * wdt_init:
*
* Set up the WDT watchdog board. All we have to do is grab the
* resources we require and bitch if anyone beat us to them.
diff --git a/drivers/watchdog/wdt977.c b/drivers/watchdog/wdt977.c
index 5c2521fc836c..a2f01c9f5c34 100644
--- a/drivers/watchdog/wdt977.c
+++ b/drivers/watchdog/wdt977.c
@@ -281,7 +281,7 @@ static int wdt977_release(struct inode *inode, struct file *file)
{
/*
* Shut off the timer.
- * Lock it in if it's a module and we set nowayout
+ * Lock it in if it's a module and we set nowayout
*/
if (expect_close == 42) {
wdt977_stop();
diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c
index 6130c88fa5ac..172dad6c7693 100644
--- a/drivers/watchdog/wdt_pci.c
+++ b/drivers/watchdog/wdt_pci.c
@@ -31,7 +31,7 @@
* Jeff Garzik : PCI cleanups
* Tigran Aivazian : Restructured wdtpci_init_one() to handle
* failures
- * Joel Becker : Added WDIOC_GET/SETTIMEOUT
+ * Joel Becker : Added WDIOC_GET/SETTIMEOUT
* Zwane Mwaikambo : Magic char closing, locking changes,
* cleanups
* Matt Domsch : nowayout module option
@@ -727,7 +727,7 @@ static void __devexit wdtpci_remove_one(struct pci_dev *pdev)
}
-static struct pci_device_id wdtpci_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(wdtpci_pci_tbl) = {
{
.vendor = PCI_VENDOR_ID_ACCESSIO,
.device = PCI_DEVICE_ID_ACCESSIO_WDG_CSM,
@@ -764,7 +764,7 @@ static void __exit wdtpci_cleanup(void)
/**
- * wdtpci_init:
+ * wdtpci_init:
*
* Set up the WDT watchdog board. All we have to do is grab the
* resources we require and bitch if anyone beat us to them.
diff --git a/drivers/watchdog/xen_wdt.c b/drivers/watchdog/xen_wdt.c
new file mode 100644
index 000000000000..49bd9d395562
--- /dev/null
+++ b/drivers/watchdog/xen_wdt.c
@@ -0,0 +1,359 @@
+/*
+ * Xen Watchdog Driver
+ *
+ * (c) Copyright 2010 Novell, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define DRV_NAME "wdt"
+#define DRV_VERSION "0.01"
+#define PFX DRV_NAME ": "
+
+#include <linux/bug.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/hrtimer.h>
+#include <linux/kernel.h>
+#include <linux/ktime.h>
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/watchdog.h>
+#include <xen/xen.h>
+#include <asm/xen/hypercall.h>
+#include <xen/interface/sched.h>
+
+static struct platform_device *platform_device;
+static DEFINE_SPINLOCK(wdt_lock);
+static struct sched_watchdog wdt;
+static __kernel_time_t wdt_expires;
+static bool is_active, expect_release;
+
+#define WATCHDOG_TIMEOUT 60 /* in seconds */
+static unsigned int timeout = WATCHDOG_TIMEOUT;
+module_param(timeout, uint, S_IRUGO);
+MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds "
+ "(default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ")");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, S_IRUGO);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+ "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static inline __kernel_time_t set_timeout(void)
+{
+ wdt.timeout = timeout;
+ return ktime_to_timespec(ktime_get()).tv_sec + timeout;
+}
+
+static int xen_wdt_start(void)
+{
+ __kernel_time_t expires;
+ int err;
+
+ spin_lock(&wdt_lock);
+
+ expires = set_timeout();
+ if (!wdt.id)
+ err = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wdt);
+ else
+ err = -EBUSY;
+ if (err > 0) {
+ wdt.id = err;
+ wdt_expires = expires;
+ err = 0;
+ } else
+ BUG_ON(!err);
+
+ spin_unlock(&wdt_lock);
+
+ return err;
+}
+
+static int xen_wdt_stop(void)
+{
+ int err = 0;
+
+ spin_lock(&wdt_lock);
+
+ wdt.timeout = 0;
+ if (wdt.id)
+ err = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wdt);
+ if (!err)
+ wdt.id = 0;
+
+ spin_unlock(&wdt_lock);
+
+ return err;
+}
+
+static int xen_wdt_kick(void)
+{
+ __kernel_time_t expires;
+ int err;
+
+ spin_lock(&wdt_lock);
+
+ expires = set_timeout();
+ if (wdt.id)
+ err = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wdt);
+ else
+ err = -ENXIO;
+ if (!err)
+ wdt_expires = expires;
+
+ spin_unlock(&wdt_lock);
+
+ return err;
+}
+
+static int xen_wdt_open(struct inode *inode, struct file *file)
+{
+ int err;
+
+ /* /dev/watchdog can only be opened once */
+ if (xchg(&is_active, true))
+ return -EBUSY;
+
+ err = xen_wdt_start();
+ if (err == -EBUSY)
+ err = xen_wdt_kick();
+ return err ?: nonseekable_open(inode, file);
+}
+
+static int xen_wdt_release(struct inode *inode, struct file *file)
+{
+ if (expect_release)
+ xen_wdt_stop();
+ else {
+ printk(KERN_CRIT PFX
+ "unexpected close, not stopping watchdog!\n");
+ xen_wdt_kick();
+ }
+ is_active = false;
+ expect_release = false;
+ return 0;
+}
+
+static ssize_t xen_wdt_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos)
+{
+ /* See if we got the magic character 'V' and reload the timer */
+ if (len) {
+ if (!nowayout) {
+ size_t i;
+
+ /* in case it was set long ago */
+ expect_release = false;
+
+ /* scan to see whether or not we got the magic
+ character */
+ for (i = 0; i != len; i++) {
+ char c;
+ if (get_user(c, data + i))
+ return -EFAULT;
+ if (c == 'V')
+ expect_release = true;
+ }
+ }
+
+ /* someone wrote to us, we should reload the timer */
+ xen_wdt_kick();
+ }
+ return len;
+}
+
+static long xen_wdt_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int new_options, retval = -EINVAL;
+ int new_timeout;
+ int __user *argp = (void __user *)arg;
+ static const struct watchdog_info ident = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE,
+ .firmware_version = 0,
+ .identity = DRV_NAME,
+ };
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
+
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, argp);
+
+ case WDIOC_SETOPTIONS:
+ if (get_user(new_options, argp))
+ return -EFAULT;
+
+ if (new_options & WDIOS_DISABLECARD)
+ retval = xen_wdt_stop();
+ if (new_options & WDIOS_ENABLECARD) {
+ retval = xen_wdt_start();
+ if (retval == -EBUSY)
+ retval = xen_wdt_kick();
+ }
+ return retval;
+
+ case WDIOC_KEEPALIVE:
+ xen_wdt_kick();
+ return 0;
+
+ case WDIOC_SETTIMEOUT:
+ if (get_user(new_timeout, argp))
+ return -EFAULT;
+ if (!new_timeout)
+ return -EINVAL;
+ timeout = new_timeout;
+ xen_wdt_kick();
+ /* fall through */
+ case WDIOC_GETTIMEOUT:
+ return put_user(timeout, argp);
+
+ case WDIOC_GETTIMELEFT:
+ retval = wdt_expires - ktime_to_timespec(ktime_get()).tv_sec;
+ return put_user(retval, argp);
+ }
+
+ return -ENOTTY;
+}
+
+static const struct file_operations xen_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = xen_wdt_write,
+ .unlocked_ioctl = xen_wdt_ioctl,
+ .open = xen_wdt_open,
+ .release = xen_wdt_release,
+};
+
+static struct miscdevice xen_wdt_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &xen_wdt_fops,
+};
+
+static int __devinit xen_wdt_probe(struct platform_device *dev)
+{
+ struct sched_watchdog wd = { .id = ~0 };
+ int ret = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wd);
+
+ switch (ret) {
+ case -EINVAL:
+ if (!timeout) {
+ timeout = WATCHDOG_TIMEOUT;
+ printk(KERN_INFO PFX
+ "timeout value invalid, using %d\n", timeout);
+ }
+
+ ret = misc_register(&xen_wdt_miscdev);
+ if (ret) {
+ printk(KERN_ERR PFX
+ "cannot register miscdev on minor=%d (%d)\n",
+ WATCHDOG_MINOR, ret);
+ break;
+ }
+
+ printk(KERN_INFO PFX
+ "initialized (timeout=%ds, nowayout=%d)\n",
+ timeout, nowayout);
+ break;
+
+ case -ENOSYS:
+ printk(KERN_INFO PFX "not supported\n");
+ ret = -ENODEV;
+ break;
+
+ default:
+ printk(KERN_INFO PFX "bogus return value %d\n", ret);
+ break;
+ }
+
+ return ret;
+}
+
+static int __devexit xen_wdt_remove(struct platform_device *dev)
+{
+ /* Stop the timer before we leave */
+ if (!nowayout)
+ xen_wdt_stop();
+
+ misc_deregister(&xen_wdt_miscdev);
+
+ return 0;
+}
+
+static void xen_wdt_shutdown(struct platform_device *dev)
+{
+ xen_wdt_stop();
+}
+
+static int xen_wdt_suspend(struct platform_device *dev, pm_message_t state)
+{
+ return xen_wdt_stop();
+}
+
+static int xen_wdt_resume(struct platform_device *dev)
+{
+ return xen_wdt_start();
+}
+
+static struct platform_driver xen_wdt_driver = {
+ .probe = xen_wdt_probe,
+ .remove = __devexit_p(xen_wdt_remove),
+ .shutdown = xen_wdt_shutdown,
+ .suspend = xen_wdt_suspend,
+ .resume = xen_wdt_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRV_NAME,
+ },
+};
+
+static int __init xen_wdt_init_module(void)
+{
+ int err;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ printk(KERN_INFO PFX "Xen WatchDog Timer Driver v%s\n", DRV_VERSION);
+
+ err = platform_driver_register(&xen_wdt_driver);
+ if (err)
+ return err;
+
+ platform_device = platform_device_register_simple(DRV_NAME,
+ -1, NULL, 0);
+ if (IS_ERR(platform_device)) {
+ err = PTR_ERR(platform_device);
+ platform_driver_unregister(&xen_wdt_driver);
+ }
+
+ return err;
+}
+
+static void __exit xen_wdt_cleanup_module(void)
+{
+ platform_device_unregister(platform_device);
+ platform_driver_unregister(&xen_wdt_driver);
+ printk(KERN_INFO PFX "module unloaded\n");
+}
+
+module_init(xen_wdt_init_module);
+module_exit(xen_wdt_cleanup_module);
+
+MODULE_AUTHOR("Jan Beulich <jbeulich@novell.com>");
+MODULE_DESCRIPTION("Xen WatchDog Timer Driver");
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 07bec09d1dad..a59638b37c1a 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -76,10 +76,20 @@ config XEN_XENBUS_FRONTEND
config XEN_GNTDEV
tristate "userspace grant access device driver"
depends on XEN
+ default m
select MMU_NOTIFIER
help
Allows userspace processes to use grants.
+config XEN_GRANT_DEV_ALLOC
+ tristate "User-space grant reference allocator driver"
+ depends on XEN
+ default m
+ help
+ Allows userspace processes to create pages with access granted
+ to other domains. This can be used to implement frontend drivers
+ or as part of an inter-domain shared memory channel.
+
config XEN_PLATFORM_PCI
tristate "xen platform pci device driver"
depends on XEN_PVHVM && PCI
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 5088cc2e6fe2..9585a1da52c6 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
obj-$(CONFIG_XEN_BALLOON) += balloon.o
obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o
obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o
+obj-$(CONFIG_XEN_GRANT_DEV_ALLOC) += xen-gntalloc.o
obj-$(CONFIG_XENFS) += xenfs/
obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o
obj-$(CONFIG_XEN_PLATFORM_PCI) += xen-platform-pci.o
@@ -18,5 +19,6 @@ obj-$(CONFIG_XEN_DOM0) += pci.o
xen-evtchn-y := evtchn.o
xen-gntdev-y := gntdev.o
+xen-gntalloc-y := gntalloc.o
xen-platform-pci-y := platform-pci.o
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 43f9f02c7db0..718050ace08f 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -232,7 +232,7 @@ static int increase_reservation(unsigned long nr_pages)
set_phys_to_machine(pfn, frame_list[i]);
/* Link back into the page tables if not highmem. */
- if (pfn < max_low_pfn) {
+ if (!xen_hvm_domain() && pfn < max_low_pfn) {
int ret;
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
@@ -280,7 +280,7 @@ static int decrease_reservation(unsigned long nr_pages)
scrub_page(page);
- if (!PageHighMem(page)) {
+ if (!xen_hvm_domain() && !PageHighMem(page)) {
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
__pte_ma(0), 0);
@@ -296,7 +296,7 @@ static int decrease_reservation(unsigned long nr_pages)
/* No more mappings: invalidate P2M and add to balloon. */
for (i = 0; i < nr_pages; i++) {
pfn = mfn_to_pfn(frame_list[i]);
- set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
+ __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
balloon_append(pfn_to_page(pfn));
}
@@ -392,15 +392,19 @@ static struct notifier_block xenstore_notifier;
static int __init balloon_init(void)
{
- unsigned long pfn, extra_pfn_end;
+ unsigned long pfn, nr_pages, extra_pfn_end;
struct page *page;
- if (!xen_pv_domain())
+ if (!xen_domain())
return -ENODEV;
pr_info("xen_balloon: Initialising balloon driver.\n");
- balloon_stats.current_pages = min(xen_start_info->nr_pages, max_pfn);
+ if (xen_pv_domain())
+ nr_pages = xen_start_info->nr_pages;
+ else
+ nr_pages = max_pfn;
+ balloon_stats.current_pages = min(nr_pages, max_pfn);
balloon_stats.target_pages = balloon_stats.current_pages;
balloon_stats.balloon_low = 0;
balloon_stats.balloon_high = 0;
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 74681478100a..0dd0af30d6a0 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -114,7 +114,7 @@ struct cpu_evtchn_s {
static __initdata struct cpu_evtchn_s init_evtchn_mask = {
.bits[0 ... (NR_EVENT_CHANNELS/BITS_PER_LONG)-1] = ~0ul,
};
-static struct cpu_evtchn_s *cpu_evtchn_mask_p = &init_evtchn_mask;
+static struct cpu_evtchn_s __refdata *cpu_evtchn_mask_p = &init_evtchn_mask;
static inline unsigned long *cpu_evtchn_mask(int cpu)
{
@@ -277,7 +277,7 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
BUG_ON(irq == -1);
#ifdef CONFIG_SMP
- cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
+ cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
#endif
clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
@@ -294,7 +294,7 @@ static void init_evtchn_cpu_bindings(void)
/* By default all event channels notify CPU#0. */
for_each_irq_desc(i, desc) {
- cpumask_copy(desc->affinity, cpumask_of(0));
+ cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
}
#endif
@@ -376,81 +376,69 @@ static void unmask_evtchn(int port)
put_cpu();
}
-static int get_nr_hw_irqs(void)
+static int xen_allocate_irq_dynamic(void)
{
- int ret = 1;
+ int first = 0;
+ int irq;
#ifdef CONFIG_X86_IO_APIC
- ret = get_nr_irqs_gsi();
+ /*
+ * For an HVM guest or domain 0 which see "real" (emulated or
+ * actual repectively) GSIs we allocate dynamic IRQs
+ * e.g. those corresponding to event channels or MSIs
+ * etc. from the range above those "real" GSIs to avoid
+ * collisions.
+ */
+ if (xen_initial_domain() || xen_hvm_domain())
+ first = get_nr_irqs_gsi();
#endif
- return ret;
-}
+retry:
+ irq = irq_alloc_desc_from(first, -1);
-static int find_unbound_pirq(int type)
-{
- int rc, i;
- struct physdev_get_free_pirq op_get_free_pirq;
- op_get_free_pirq.type = type;
+ if (irq == -ENOMEM && first > NR_IRQS_LEGACY) {
+ printk(KERN_ERR "Out of dynamic IRQ space and eating into GSI space. You should increase nr_irqs\n");
+ first = max(NR_IRQS_LEGACY, first - NR_IRQS_LEGACY);
+ goto retry;
+ }
- rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
- if (!rc)
- return op_get_free_pirq.pirq;
+ if (irq < 0)
+ panic("No available IRQ to bind to: increase nr_irqs!\n");
- for (i = 0; i < nr_irqs; i++) {
- if (pirq_to_irq[i] < 0)
- return i;
- }
- return -1;
+ return irq;
}
-static int find_unbound_irq(void)
+static int xen_allocate_irq_gsi(unsigned gsi)
{
- struct irq_data *data;
- int irq, res;
- int bottom = get_nr_hw_irqs();
- int top = nr_irqs-1;
-
- if (bottom == nr_irqs)
- goto no_irqs;
+ int irq;
- /* This loop starts from the top of IRQ space and goes down.
- * We need this b/c if we have a PCI device in a Xen PV guest
- * we do not have an IO-APIC (though the backend might have them)
- * mapped in. To not have a collision of physical IRQs with the Xen
- * event channels start at the top of the IRQ space for virtual IRQs.
+ /*
+ * A PV guest has no concept of a GSI (since it has no ACPI
+ * nor access to/knowledge of the physical APICs). Therefore
+ * all IRQs are dynamically allocated from the entire IRQ
+ * space.
*/
- for (irq = top; irq > bottom; irq--) {
- data = irq_get_irq_data(irq);
- /* only 15->0 have init'd desc; handle irq > 16 */
- if (!data)
- break;
- if (data->chip == &no_irq_chip)
- break;
- if (data->chip != &xen_dynamic_chip)
- continue;
- if (irq_info[irq].type == IRQT_UNBOUND)
- return irq;
- }
+ if (xen_pv_domain() && !xen_initial_domain())
+ return xen_allocate_irq_dynamic();
- if (irq == bottom)
- goto no_irqs;
+ /* Legacy IRQ descriptors are already allocated by the arch. */
+ if (gsi < NR_IRQS_LEGACY)
+ return gsi;
- res = irq_alloc_desc_at(irq, -1);
-
- if (WARN_ON(res != irq))
- return -1;
+ irq = irq_alloc_desc_at(gsi, -1);
+ if (irq < 0)
+ panic("Unable to allocate to IRQ%d (%d)\n", gsi, irq);
return irq;
-
-no_irqs:
- panic("No available IRQ to bind to: increase nr_irqs!\n");
}
-static bool identity_mapped_irq(unsigned irq)
+static void xen_free_irq(unsigned irq)
{
- /* identity map all the hardware irqs */
- return irq < get_nr_hw_irqs();
+ /* Legacy IRQ descriptors are managed by the arch. */
+ if (irq < NR_IRQS_LEGACY)
+ return;
+
+ irq_free_desc(irq);
}
static void pirq_unmask_notify(int irq)
@@ -486,7 +474,7 @@ static bool probing_irq(int irq)
return desc && desc->action == NULL;
}
-static unsigned int startup_pirq(unsigned int irq)
+static unsigned int __startup_pirq(unsigned int irq)
{
struct evtchn_bind_pirq bind_pirq;
struct irq_info *info = info_for_irq(irq);
@@ -524,9 +512,15 @@ out:
return 0;
}
-static void shutdown_pirq(unsigned int irq)
+static unsigned int startup_pirq(struct irq_data *data)
+{
+ return __startup_pirq(data->irq);
+}
+
+static void shutdown_pirq(struct irq_data *data)
{
struct evtchn_close close;
+ unsigned int irq = data->irq;
struct irq_info *info = info_for_irq(irq);
int evtchn = evtchn_from_irq(irq);
@@ -546,20 +540,20 @@ static void shutdown_pirq(unsigned int irq)
info->evtchn = 0;
}
-static void enable_pirq(unsigned int irq)
+static void enable_pirq(struct irq_data *data)
{
- startup_pirq(irq);
+ startup_pirq(data);
}
-static void disable_pirq(unsigned int irq)
+static void disable_pirq(struct irq_data *data)
{
}
-static void ack_pirq(unsigned int irq)
+static void ack_pirq(struct irq_data *data)
{
- int evtchn = evtchn_from_irq(irq);
+ int evtchn = evtchn_from_irq(data->irq);
- move_native_irq(irq);
+ move_native_irq(data->irq);
if (VALID_EVTCHN(evtchn)) {
mask_evtchn(evtchn);
@@ -567,23 +561,6 @@ static void ack_pirq(unsigned int irq)
}
}
-static void end_pirq(unsigned int irq)
-{
- int evtchn = evtchn_from_irq(irq);
- struct irq_desc *desc = irq_to_desc(irq);
-
- if (WARN_ON(!desc))
- return;
-
- if ((desc->status & (IRQ_DISABLED|IRQ_PENDING)) ==
- (IRQ_DISABLED|IRQ_PENDING)) {
- shutdown_pirq(irq);
- } else if (VALID_EVTCHN(evtchn)) {
- unmask_evtchn(evtchn);
- pirq_unmask_notify(irq);
- }
-}
-
static int find_irq_by_gsi(unsigned gsi)
{
int irq;
@@ -638,14 +615,7 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
goto out; /* XXX need refcount? */
}
- /* If we are a PV guest, we don't have GSIs (no ACPI passed). Therefore
- * we are using the !xen_initial_domain() to drop in the function.*/
- if (identity_mapped_irq(gsi) || (!xen_initial_domain() &&
- xen_pv_domain())) {
- irq = gsi;
- irq_alloc_desc_at(irq, -1);
- } else
- irq = find_unbound_irq();
+ irq = xen_allocate_irq_gsi(gsi);
set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
handle_level_irq, name);
@@ -658,7 +628,7 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
* this in the priv domain. */
if (xen_initial_domain() &&
HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
- irq_free_desc(irq);
+ xen_free_irq(irq);
irq = -ENOSPC;
goto out;
}
@@ -674,87 +644,46 @@ out:
}
#ifdef CONFIG_PCI_MSI
-#include <linux/msi.h>
-#include "../pci/msi.h"
-
-void xen_allocate_pirq_msi(char *name, int *irq, int *pirq, int alloc)
+int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
{
- spin_lock(&irq_mapping_update_lock);
-
- if (alloc & XEN_ALLOC_IRQ) {
- *irq = find_unbound_irq();
- if (*irq == -1)
- goto out;
- }
-
- if (alloc & XEN_ALLOC_PIRQ) {
- *pirq = find_unbound_pirq(MAP_PIRQ_TYPE_MSI);
- if (*pirq == -1)
- goto out;
- }
+ int rc;
+ struct physdev_get_free_pirq op_get_free_pirq;
- set_irq_chip_and_handler_name(*irq, &xen_pirq_chip,
- handle_level_irq, name);
+ op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
+ rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
- irq_info[*irq] = mk_pirq_info(0, *pirq, 0, 0);
- pirq_to_irq[*pirq] = *irq;
+ WARN_ONCE(rc == -ENOSYS,
+ "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
-out:
- spin_unlock(&irq_mapping_update_lock);
+ return rc ? -1 : op_get_free_pirq.pirq;
}
-int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int type)
+int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
+ int pirq, int vector, const char *name)
{
- int irq = -1;
- struct physdev_map_pirq map_irq;
- int rc;
- int pos;
- u32 table_offset, bir;
-
- memset(&map_irq, 0, sizeof(map_irq));
- map_irq.domid = DOMID_SELF;
- map_irq.type = MAP_PIRQ_TYPE_MSI;
- map_irq.index = -1;
- map_irq.pirq = -1;
- map_irq.bus = dev->bus->number;
- map_irq.devfn = dev->devfn;
-
- if (type == PCI_CAP_ID_MSIX) {
- pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
-
- pci_read_config_dword(dev, msix_table_offset_reg(pos),
- &table_offset);
- bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
-
- map_irq.table_base = pci_resource_start(dev, bir);
- map_irq.entry_nr = msidesc->msi_attrib.entry_nr;
- }
+ int irq, ret;
spin_lock(&irq_mapping_update_lock);
- irq = find_unbound_irq();
-
+ irq = xen_allocate_irq_dynamic();
if (irq == -1)
goto out;
- rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
- if (rc) {
- printk(KERN_WARNING "xen map irq failed %d\n", rc);
-
- irq_free_desc(irq);
-
- irq = -1;
- goto out;
- }
- irq_info[irq] = mk_pirq_info(0, map_irq.pirq, 0, map_irq.index);
-
set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
- handle_level_irq,
- (type == PCI_CAP_ID_MSIX) ? "msi-x":"msi");
+ handle_level_irq, name);
+ irq_info[irq] = mk_pirq_info(0, pirq, 0, vector);
+ pirq_to_irq[pirq] = irq;
+ ret = set_irq_msi(irq, msidesc);
+ if (ret < 0)
+ goto error_irq;
out:
spin_unlock(&irq_mapping_update_lock);
return irq;
+error_irq:
+ spin_unlock(&irq_mapping_update_lock);
+ xen_free_irq(irq);
+ return -1;
}
#endif
@@ -779,11 +708,12 @@ int xen_destroy_irq(int irq)
printk(KERN_WARNING "unmap irq failed %d\n", rc);
goto out;
}
- pirq_to_irq[info->u.pirq.pirq] = -1;
}
+ pirq_to_irq[info->u.pirq.pirq] = -1;
+
irq_info[irq] = mk_unbound_info();
- irq_free_desc(irq);
+ xen_free_irq(irq);
out:
spin_unlock(&irq_mapping_update_lock);
@@ -814,7 +744,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
irq = evtchn_to_irq[evtchn];
if (irq == -1) {
- irq = find_unbound_irq();
+ irq = xen_allocate_irq_dynamic();
set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
handle_fasteoi_irq, "event");
@@ -839,7 +769,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
irq = per_cpu(ipi_to_irq, cpu)[ipi];
if (irq == -1) {
- irq = find_unbound_irq();
+ irq = xen_allocate_irq_dynamic();
if (irq < 0)
goto out;
@@ -864,6 +794,21 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
return irq;
}
+static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
+ unsigned int remote_port)
+{
+ struct evtchn_bind_interdomain bind_interdomain;
+ int err;
+
+ bind_interdomain.remote_dom = remote_domain;
+ bind_interdomain.remote_port = remote_port;
+
+ err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
+ &bind_interdomain);
+
+ return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
+}
+
int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
{
@@ -875,7 +820,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
irq = per_cpu(virq_to_irq, cpu)[virq];
if (irq == -1) {
- irq = find_unbound_irq();
+ irq = xen_allocate_irq_dynamic();
set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
handle_percpu_irq, "virq");
@@ -934,7 +879,7 @@ static void unbind_from_irq(unsigned int irq)
if (irq_info[irq].type != IRQT_UNBOUND) {
irq_info[irq] = mk_unbound_info();
- irq_free_desc(irq);
+ xen_free_irq(irq);
}
spin_unlock(&irq_mapping_update_lock);
@@ -959,6 +904,29 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn,
}
EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
+int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
+ unsigned int remote_port,
+ irq_handler_t handler,
+ unsigned long irqflags,
+ const char *devname,
+ void *dev_id)
+{
+ int irq, retval;
+
+ irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
+ if (irq < 0)
+ return irq;
+
+ retval = request_irq(irq, handler, irqflags, devname, dev_id);
+ if (retval != 0) {
+ unbind_from_irq(irq);
+ return retval;
+ }
+
+ return irq;
+}
+EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
+
int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
irq_handler_t handler,
unsigned long irqflags, const char *devname, void *dev_id)
@@ -990,7 +958,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
if (irq < 0)
return irq;
- irqflags |= IRQF_NO_SUSPEND;
+ irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME;
retval = request_irq(irq, handler, irqflags, devname, dev_id);
if (retval != 0) {
unbind_from_irq(irq);
@@ -1234,11 +1202,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
return 0;
}
-static int set_affinity_irq(unsigned irq, const struct cpumask *dest)
+static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
+ bool force)
{
unsigned tcpu = cpumask_first(dest);
- return rebind_irq_to_cpu(irq, tcpu);
+ return rebind_irq_to_cpu(data->irq, tcpu);
}
int resend_irq_on_evtchn(unsigned int irq)
@@ -1257,35 +1226,35 @@ int resend_irq_on_evtchn(unsigned int irq)
return 1;
}
-static void enable_dynirq(unsigned int irq)
+static void enable_dynirq(struct irq_data *data)
{
- int evtchn = evtchn_from_irq(irq);
+ int evtchn = evtchn_from_irq(data->irq);
if (VALID_EVTCHN(evtchn))
unmask_evtchn(evtchn);
}
-static void disable_dynirq(unsigned int irq)
+static void disable_dynirq(struct irq_data *data)
{
- int evtchn = evtchn_from_irq(irq);
+ int evtchn = evtchn_from_irq(data->irq);
if (VALID_EVTCHN(evtchn))
mask_evtchn(evtchn);
}
-static void ack_dynirq(unsigned int irq)
+static void ack_dynirq(struct irq_data *data)
{
- int evtchn = evtchn_from_irq(irq);
+ int evtchn = evtchn_from_irq(data->irq);
- move_masked_irq(irq);
+ move_masked_irq(data->irq);
if (VALID_EVTCHN(evtchn))
unmask_evtchn(evtchn);
}
-static int retrigger_dynirq(unsigned int irq)
+static int retrigger_dynirq(struct irq_data *data)
{
- int evtchn = evtchn_from_irq(irq);
+ int evtchn = evtchn_from_irq(data->irq);
struct shared_info *sh = HYPERVISOR_shared_info;
int ret = 0;
@@ -1334,7 +1303,7 @@ static void restore_cpu_pirqs(void)
printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
- startup_pirq(irq);
+ __startup_pirq(irq);
}
}
@@ -1445,7 +1414,6 @@ void xen_poll_irq(int irq)
void xen_irq_resume(void)
{
unsigned int cpu, irq, evtchn;
- struct irq_desc *desc;
init_evtchn_cpu_bindings();
@@ -1465,66 +1433,48 @@ void xen_irq_resume(void)
restore_cpu_ipis(cpu);
}
- /*
- * Unmask any IRQF_NO_SUSPEND IRQs which are enabled. These
- * are not handled by the IRQ core.
- */
- for_each_irq_desc(irq, desc) {
- if (!desc->action || !(desc->action->flags & IRQF_NO_SUSPEND))
- continue;
- if (desc->status & IRQ_DISABLED)
- continue;
-
- evtchn = evtchn_from_irq(irq);
- if (evtchn == -1)
- continue;
-
- unmask_evtchn(evtchn);
- }
-
restore_cpu_pirqs();
}
static struct irq_chip xen_dynamic_chip __read_mostly = {
- .name = "xen-dyn",
+ .name = "xen-dyn",
- .disable = disable_dynirq,
- .mask = disable_dynirq,
- .unmask = enable_dynirq,
+ .irq_disable = disable_dynirq,
+ .irq_mask = disable_dynirq,
+ .irq_unmask = enable_dynirq,
- .eoi = ack_dynirq,
- .set_affinity = set_affinity_irq,
- .retrigger = retrigger_dynirq,
+ .irq_eoi = ack_dynirq,
+ .irq_set_affinity = set_affinity_irq,
+ .irq_retrigger = retrigger_dynirq,
};
static struct irq_chip xen_pirq_chip __read_mostly = {
- .name = "xen-pirq",
+ .name = "xen-pirq",
- .startup = startup_pirq,
- .shutdown = shutdown_pirq,
+ .irq_startup = startup_pirq,
+ .irq_shutdown = shutdown_pirq,
- .enable = enable_pirq,
- .unmask = enable_pirq,
+ .irq_enable = enable_pirq,
+ .irq_unmask = enable_pirq,
- .disable = disable_pirq,
- .mask = disable_pirq,
+ .irq_disable = disable_pirq,
+ .irq_mask = disable_pirq,
- .ack = ack_pirq,
- .end = end_pirq,
+ .irq_ack = ack_pirq,
- .set_affinity = set_affinity_irq,
+ .irq_set_affinity = set_affinity_irq,
- .retrigger = retrigger_dynirq,
+ .irq_retrigger = retrigger_dynirq,
};
static struct irq_chip xen_percpu_chip __read_mostly = {
- .name = "xen-percpu",
+ .name = "xen-percpu",
- .disable = disable_dynirq,
- .mask = disable_dynirq,
- .unmask = enable_dynirq,
+ .irq_disable = disable_dynirq,
+ .irq_mask = disable_dynirq,
+ .irq_unmask = enable_dynirq,
- .ack = ack_dynirq,
+ .irq_ack = ack_dynirq,
};
int xen_set_callback_via(uint64_t via)
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
new file mode 100644
index 000000000000..a7ffdfe19fc9
--- /dev/null
+++ b/drivers/xen/gntalloc.c
@@ -0,0 +1,545 @@
+/******************************************************************************
+ * gntalloc.c
+ *
+ * Device for creating grant references (in user-space) that may be shared
+ * with other domains.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ * This driver exists to allow userspace programs in Linux to allocate kernel
+ * memory that will later be shared with another domain. Without this device,
+ * Linux userspace programs cannot create grant references.
+ *
+ * How this stuff works:
+ * X -> granting a page to Y
+ * Y -> mapping the grant from X
+ *
+ * 1. X uses the gntalloc device to allocate a page of kernel memory, P.
+ * 2. X creates an entry in the grant table that says domid(Y) can access P.
+ * This is done without a hypercall unless the grant table needs expansion.
+ * 3. X gives the grant reference identifier, GREF, to Y.
+ * 4. Y maps the page, either directly into kernel memory for use in a backend
+ * driver, or via a the gntdev device to map into the address space of an
+ * application running in Y. This is the first point at which Xen does any
+ * tracking of the page.
+ * 5. A program in X mmap()s a segment of the gntalloc device that corresponds
+ * to the shared page, and can now communicate with Y over the shared page.
+ *
+ *
+ * NOTE TO USERSPACE LIBRARIES:
+ * The grant allocation and mmap()ing are, naturally, two separate operations.
+ * You set up the sharing by calling the create ioctl() and then the mmap().
+ * Teardown requires munmap() and either close() or ioctl().
+ *
+ * WARNING: Since Xen does not allow a guest to forcibly end the use of a grant
+ * reference, this device can be used to consume kernel memory by leaving grant
+ * references mapped by another domain when an application exits. Therefore,
+ * there is a global limit on the number of pages that can be allocated. When
+ * all references to the page are unmapped, it will be freed during the next
+ * grant operation.
+ */
+
+#include <linux/atomic.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/highmem.h>
+
+#include <xen/xen.h>
+#include <xen/page.h>
+#include <xen/grant_table.h>
+#include <xen/gntalloc.h>
+#include <xen/events.h>
+
+static int limit = 1024;
+module_param(limit, int, 0644);
+MODULE_PARM_DESC(limit, "Maximum number of grants that may be allocated by "
+ "the gntalloc device");
+
+static LIST_HEAD(gref_list);
+static DEFINE_SPINLOCK(gref_lock);
+static int gref_size;
+
+struct notify_info {
+ uint16_t pgoff:12; /* Bits 0-11: Offset of the byte to clear */
+ uint16_t flags:2; /* Bits 12-13: Unmap notification flags */
+ int event; /* Port (event channel) to notify */
+};
+
+/* Metadata on a grant reference. */
+struct gntalloc_gref {
+ struct list_head next_gref; /* list entry gref_list */
+ struct list_head next_file; /* list entry file->list, if open */
+ struct page *page; /* The shared page */
+ uint64_t file_index; /* File offset for mmap() */
+ unsigned int users; /* Use count - when zero, waiting on Xen */
+ grant_ref_t gref_id; /* The grant reference number */
+ struct notify_info notify; /* Unmap notification */
+};
+
+struct gntalloc_file_private_data {
+ struct list_head list;
+ uint64_t index;
+};
+
+static void __del_gref(struct gntalloc_gref *gref);
+
+static void do_cleanup(void)
+{
+ struct gntalloc_gref *gref, *n;
+ list_for_each_entry_safe(gref, n, &gref_list, next_gref) {
+ if (!gref->users)
+ __del_gref(gref);
+ }
+}
+
+static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
+ uint32_t *gref_ids, struct gntalloc_file_private_data *priv)
+{
+ int i, rc, readonly;
+ LIST_HEAD(queue_gref);
+ LIST_HEAD(queue_file);
+ struct gntalloc_gref *gref;
+
+ readonly = !(op->flags & GNTALLOC_FLAG_WRITABLE);
+ rc = -ENOMEM;
+ for (i = 0; i < op->count; i++) {
+ gref = kzalloc(sizeof(*gref), GFP_KERNEL);
+ if (!gref)
+ goto undo;
+ list_add_tail(&gref->next_gref, &queue_gref);
+ list_add_tail(&gref->next_file, &queue_file);
+ gref->users = 1;
+ gref->file_index = op->index + i * PAGE_SIZE;
+ gref->page = alloc_page(GFP_KERNEL|__GFP_ZERO);
+ if (!gref->page)
+ goto undo;
+
+ /* Grant foreign access to the page. */
+ gref->gref_id = gnttab_grant_foreign_access(op->domid,
+ pfn_to_mfn(page_to_pfn(gref->page)), readonly);
+ if (gref->gref_id < 0) {
+ rc = gref->gref_id;
+ goto undo;
+ }
+ gref_ids[i] = gref->gref_id;
+ }
+
+ /* Add to gref lists. */
+ spin_lock(&gref_lock);
+ list_splice_tail(&queue_gref, &gref_list);
+ list_splice_tail(&queue_file, &priv->list);
+ spin_unlock(&gref_lock);
+
+ return 0;
+
+undo:
+ spin_lock(&gref_lock);
+ gref_size -= (op->count - i);
+
+ list_for_each_entry(gref, &queue_file, next_file) {
+ /* __del_gref does not remove from queue_file */
+ __del_gref(gref);
+ }
+
+ /* It's possible for the target domain to map the just-allocated grant
+ * references by blindly guessing their IDs; if this is done, then
+ * __del_gref will leave them in the queue_gref list. They need to be
+ * added to the global list so that we can free them when they are no
+ * longer referenced.
+ */
+ if (unlikely(!list_empty(&queue_gref)))
+ list_splice_tail(&queue_gref, &gref_list);
+ spin_unlock(&gref_lock);
+ return rc;
+}
+
+static void __del_gref(struct gntalloc_gref *gref)
+{
+ if (gref->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
+ uint8_t *tmp = kmap(gref->page);
+ tmp[gref->notify.pgoff] = 0;
+ kunmap(gref->page);
+ }
+ if (gref->notify.flags & UNMAP_NOTIFY_SEND_EVENT)
+ notify_remote_via_evtchn(gref->notify.event);
+
+ gref->notify.flags = 0;
+
+ if (gref->gref_id > 0) {
+ if (gnttab_query_foreign_access(gref->gref_id))
+ return;
+
+ if (!gnttab_end_foreign_access_ref(gref->gref_id, 0))
+ return;
+ }
+
+ gref_size--;
+ list_del(&gref->next_gref);
+
+ if (gref->page)
+ __free_page(gref->page);
+
+ kfree(gref);
+}
+
+/* finds contiguous grant references in a file, returns the first */
+static struct gntalloc_gref *find_grefs(struct gntalloc_file_private_data *priv,
+ uint64_t index, uint32_t count)
+{
+ struct gntalloc_gref *rv = NULL, *gref;
+ list_for_each_entry(gref, &priv->list, next_file) {
+ if (gref->file_index == index && !rv)
+ rv = gref;
+ if (rv) {
+ if (gref->file_index != index)
+ return NULL;
+ index += PAGE_SIZE;
+ count--;
+ if (count == 0)
+ return rv;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * -------------------------------------
+ * File operations.
+ * -------------------------------------
+ */
+static int gntalloc_open(struct inode *inode, struct file *filp)
+{
+ struct gntalloc_file_private_data *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ goto out_nomem;
+ INIT_LIST_HEAD(&priv->list);
+
+ filp->private_data = priv;
+
+ pr_debug("%s: priv %p\n", __func__, priv);
+
+ return 0;
+
+out_nomem:
+ return -ENOMEM;
+}
+
+static int gntalloc_release(struct inode *inode, struct file *filp)
+{
+ struct gntalloc_file_private_data *priv = filp->private_data;
+ struct gntalloc_gref *gref;
+
+ pr_debug("%s: priv %p\n", __func__, priv);
+
+ spin_lock(&gref_lock);
+ while (!list_empty(&priv->list)) {
+ gref = list_entry(priv->list.next,
+ struct gntalloc_gref, next_file);
+ list_del(&gref->next_file);
+ gref->users--;
+ if (gref->users == 0)
+ __del_gref(gref);
+ }
+ kfree(priv);
+ spin_unlock(&gref_lock);
+
+ return 0;
+}
+
+static long gntalloc_ioctl_alloc(struct gntalloc_file_private_data *priv,
+ struct ioctl_gntalloc_alloc_gref __user *arg)
+{
+ int rc = 0;
+ struct ioctl_gntalloc_alloc_gref op;
+ uint32_t *gref_ids;
+
+ pr_debug("%s: priv %p\n", __func__, priv);
+
+ if (copy_from_user(&op, arg, sizeof(op))) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ gref_ids = kzalloc(sizeof(gref_ids[0]) * op.count, GFP_TEMPORARY);
+ if (!gref_ids) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ spin_lock(&gref_lock);
+ /* Clean up pages that were at zero (local) users but were still mapped
+ * by remote domains. Since those pages count towards the limit that we
+ * are about to enforce, removing them here is a good idea.
+ */
+ do_cleanup();
+ if (gref_size + op.count > limit) {
+ spin_unlock(&gref_lock);
+ rc = -ENOSPC;
+ goto out_free;
+ }
+ gref_size += op.count;
+ op.index = priv->index;
+ priv->index += op.count * PAGE_SIZE;
+ spin_unlock(&gref_lock);
+
+ rc = add_grefs(&op, gref_ids, priv);
+ if (rc < 0)
+ goto out_free;
+
+ /* Once we finish add_grefs, it is unsafe to touch the new reference,
+ * since it is possible for a concurrent ioctl to remove it (by guessing
+ * its index). If the userspace application doesn't provide valid memory
+ * to write the IDs to, then it will need to close the file in order to
+ * release - which it will do by segfaulting when it tries to access the
+ * IDs to close them.
+ */
+ if (copy_to_user(arg, &op, sizeof(op))) {
+ rc = -EFAULT;
+ goto out_free;
+ }
+ if (copy_to_user(arg->gref_ids, gref_ids,
+ sizeof(gref_ids[0]) * op.count)) {
+ rc = -EFAULT;
+ goto out_free;
+ }
+
+out_free:
+ kfree(gref_ids);
+out:
+ return rc;
+}
+
+static long gntalloc_ioctl_dealloc(struct gntalloc_file_private_data *priv,
+ void __user *arg)
+{
+ int i, rc = 0;
+ struct ioctl_gntalloc_dealloc_gref op;
+ struct gntalloc_gref *gref, *n;
+
+ pr_debug("%s: priv %p\n", __func__, priv);
+
+ if (copy_from_user(&op, arg, sizeof(op))) {
+ rc = -EFAULT;
+ goto dealloc_grant_out;
+ }
+
+ spin_lock(&gref_lock);
+ gref = find_grefs(priv, op.index, op.count);
+ if (gref) {
+ /* Remove from the file list only, and decrease reference count.
+ * The later call to do_cleanup() will remove from gref_list and
+ * free the memory if the pages aren't mapped anywhere.
+ */
+ for (i = 0; i < op.count; i++) {
+ n = list_entry(gref->next_file.next,
+ struct gntalloc_gref, next_file);
+ list_del(&gref->next_file);
+ gref->users--;
+ gref = n;
+ }
+ } else {
+ rc = -EINVAL;
+ }
+
+ do_cleanup();
+
+ spin_unlock(&gref_lock);
+dealloc_grant_out:
+ return rc;
+}
+
+static long gntalloc_ioctl_unmap_notify(struct gntalloc_file_private_data *priv,
+ void __user *arg)
+{
+ struct ioctl_gntalloc_unmap_notify op;
+ struct gntalloc_gref *gref;
+ uint64_t index;
+ int pgoff;
+ int rc;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ index = op.index & ~(PAGE_SIZE - 1);
+ pgoff = op.index & (PAGE_SIZE - 1);
+
+ spin_lock(&gref_lock);
+
+ gref = find_grefs(priv, index, 1);
+ if (!gref) {
+ rc = -ENOENT;
+ goto unlock_out;
+ }
+
+ if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT)) {
+ rc = -EINVAL;
+ goto unlock_out;
+ }
+
+ gref->notify.flags = op.action;
+ gref->notify.pgoff = pgoff;
+ gref->notify.event = op.event_channel_port;
+ rc = 0;
+ unlock_out:
+ spin_unlock(&gref_lock);
+ return rc;
+}
+
+static long gntalloc_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct gntalloc_file_private_data *priv = filp->private_data;
+
+ switch (cmd) {
+ case IOCTL_GNTALLOC_ALLOC_GREF:
+ return gntalloc_ioctl_alloc(priv, (void __user *)arg);
+
+ case IOCTL_GNTALLOC_DEALLOC_GREF:
+ return gntalloc_ioctl_dealloc(priv, (void __user *)arg);
+
+ case IOCTL_GNTALLOC_SET_UNMAP_NOTIFY:
+ return gntalloc_ioctl_unmap_notify(priv, (void __user *)arg);
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ return 0;
+}
+
+static void gntalloc_vma_close(struct vm_area_struct *vma)
+{
+ struct gntalloc_gref *gref = vma->vm_private_data;
+ if (!gref)
+ return;
+
+ spin_lock(&gref_lock);
+ gref->users--;
+ if (gref->users == 0)
+ __del_gref(gref);
+ spin_unlock(&gref_lock);
+}
+
+static struct vm_operations_struct gntalloc_vmops = {
+ .close = gntalloc_vma_close,
+};
+
+static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct gntalloc_file_private_data *priv = filp->private_data;
+ struct gntalloc_gref *gref;
+ int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ int rv, i;
+
+ pr_debug("%s: priv %p, page %lu+%d\n", __func__,
+ priv, vma->vm_pgoff, count);
+
+ if (!(vma->vm_flags & VM_SHARED)) {
+ printk(KERN_ERR "%s: Mapping must be shared.\n", __func__);
+ return -EINVAL;
+ }
+
+ spin_lock(&gref_lock);
+ gref = find_grefs(priv, vma->vm_pgoff << PAGE_SHIFT, count);
+ if (gref == NULL) {
+ rv = -ENOENT;
+ pr_debug("%s: Could not find grant reference",
+ __func__);
+ goto out_unlock;
+ }
+
+ vma->vm_private_data = gref;
+
+ vma->vm_flags |= VM_RESERVED;
+ vma->vm_flags |= VM_DONTCOPY;
+ vma->vm_flags |= VM_PFNMAP | VM_PFN_AT_MMAP;
+
+ vma->vm_ops = &gntalloc_vmops;
+
+ for (i = 0; i < count; i++) {
+ gref->users++;
+ rv = vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE,
+ gref->page);
+ if (rv)
+ goto out_unlock;
+
+ gref = list_entry(gref->next_file.next,
+ struct gntalloc_gref, next_file);
+ }
+ rv = 0;
+
+out_unlock:
+ spin_unlock(&gref_lock);
+ return rv;
+}
+
+static const struct file_operations gntalloc_fops = {
+ .owner = THIS_MODULE,
+ .open = gntalloc_open,
+ .release = gntalloc_release,
+ .unlocked_ioctl = gntalloc_ioctl,
+ .mmap = gntalloc_mmap
+};
+
+/*
+ * -------------------------------------
+ * Module creation/destruction.
+ * -------------------------------------
+ */
+static struct miscdevice gntalloc_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "xen/gntalloc",
+ .fops = &gntalloc_fops,
+};
+
+static int __init gntalloc_init(void)
+{
+ int err;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ err = misc_register(&gntalloc_miscdev);
+ if (err != 0) {
+ printk(KERN_ERR "Could not register misc gntalloc device\n");
+ return err;
+ }
+
+ pr_debug("Created grant allocation device at %d,%d\n",
+ MISC_MAJOR, gntalloc_miscdev.minor);
+
+ return 0;
+}
+
+static void __exit gntalloc_exit(void)
+{
+ misc_deregister(&gntalloc_miscdev);
+}
+
+module_init(gntalloc_init);
+module_exit(gntalloc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Carter Weatherly <carter.weatherly@jhuapl.edu>, "
+ "Daniel De Graaf <dgdegra@tycho.nsa.gov>");
+MODULE_DESCRIPTION("User-space grant reference allocator driver");
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 1e31cdcdae1e..d43ff3072c99 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -32,10 +32,12 @@
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
+#include <linux/highmem.h>
#include <xen/xen.h>
#include <xen/grant_table.h>
#include <xen/gntdev.h>
+#include <xen/events.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/page.h>
@@ -45,35 +47,46 @@ MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
"Gerd Hoffmann <kraxel@redhat.com>");
MODULE_DESCRIPTION("User-space granted page access driver");
-static int limit = 1024;
+static int limit = 1024*1024;
module_param(limit, int, 0644);
-MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped at "
- "once by a gntdev instance");
+MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by "
+ "the gntdev device");
+
+static atomic_t pages_mapped = ATOMIC_INIT(0);
+
+static int use_ptemod;
struct gntdev_priv {
struct list_head maps;
- uint32_t used;
- uint32_t limit;
/* lock protects maps from concurrent changes */
spinlock_t lock;
struct mm_struct *mm;
struct mmu_notifier mn;
};
+struct unmap_notify {
+ int flags;
+ /* Address relative to the start of the grant_map */
+ int addr;
+ int event;
+};
+
struct grant_map {
struct list_head next;
- struct gntdev_priv *priv;
struct vm_area_struct *vma;
int index;
int count;
int flags;
- int is_mapped;
+ atomic_t users;
+ struct unmap_notify notify;
struct ioctl_gntdev_grant_ref *grants;
struct gnttab_map_grant_ref *map_ops;
struct gnttab_unmap_grant_ref *unmap_ops;
struct page **pages;
};
+static int unmap_grant_pages(struct grant_map *map, int offset, int pages);
+
/* ------------------------------------------------------------------ */
static void gntdev_print_maps(struct gntdev_priv *priv,
@@ -82,9 +95,7 @@ static void gntdev_print_maps(struct gntdev_priv *priv,
#ifdef DEBUG
struct grant_map *map;
- pr_debug("maps list (priv %p, usage %d/%d)\n",
- priv, priv->used, priv->limit);
-
+ pr_debug("%s: maps list (priv %p)\n", __func__, priv);
list_for_each_entry(map, &priv->maps, next)
pr_debug(" index %2d, count %2d %s\n",
map->index, map->count,
@@ -115,14 +126,13 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
add->pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
if (add->pages[i] == NULL)
goto err;
+ add->map_ops[i].handle = -1;
+ add->unmap_ops[i].handle = -1;
}
add->index = 0;
add->count = count;
- add->priv = priv;
-
- if (add->count + priv->used > priv->limit)
- goto err;
+ atomic_set(&add->users, 1);
return add;
@@ -154,7 +164,6 @@ static void gntdev_add_map(struct gntdev_priv *priv, struct grant_map *add)
list_add_tail(&add->next, &priv->maps);
done:
- priv->used += add->count;
gntdev_print_maps(priv, "[new]", add->index);
}
@@ -166,57 +175,57 @@ static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
list_for_each_entry(map, &priv->maps, next) {
if (map->index != index)
continue;
- if (map->count != count)
- continue;
- return map;
- }
- return NULL;
-}
-
-static struct grant_map *gntdev_find_map_vaddr(struct gntdev_priv *priv,
- unsigned long vaddr)
-{
- struct grant_map *map;
-
- list_for_each_entry(map, &priv->maps, next) {
- if (!map->vma)
- continue;
- if (vaddr < map->vma->vm_start)
- continue;
- if (vaddr >= map->vma->vm_end)
+ if (count && map->count != count)
continue;
return map;
}
return NULL;
}
-static int gntdev_del_map(struct grant_map *map)
+static void gntdev_put_map(struct grant_map *map)
{
int i;
- if (map->vma)
- return -EBUSY;
- for (i = 0; i < map->count; i++)
- if (map->unmap_ops[i].handle)
- return -EBUSY;
+ if (!map)
+ return;
- map->priv->used -= map->count;
- list_del(&map->next);
- return 0;
-}
+ if (!atomic_dec_and_test(&map->users))
+ return;
-static void gntdev_free_map(struct grant_map *map)
-{
- int i;
+ atomic_sub(map->count, &pages_mapped);
- if (!map)
- return;
+ if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
+ notify_remote_via_evtchn(map->notify.event);
+ }
+
+ if (map->pages) {
+ if (!use_ptemod)
+ unmap_grant_pages(map, 0, map->count);
- if (map->pages)
for (i = 0; i < map->count; i++) {
- if (map->pages[i])
+ uint32_t check, *tmp;
+ if (!map->pages[i])
+ continue;
+ /* XXX When unmapping in an HVM domain, Xen will
+ * sometimes end up mapping the GFN to an invalid MFN.
+ * In this case, writes will be discarded and reads will
+ * return all 0xFF bytes. Leak these unusable GFNs
+ * until Xen supports fixing their p2m mapping.
+ *
+ * Confirmed present in Xen 4.1-RC3 with HVM source
+ */
+ tmp = kmap(map->pages[i]);
+ *tmp = 0xdeaddead;
+ mb();
+ check = *tmp;
+ kunmap(map->pages[i]);
+ if (check == 0xdeaddead)
__free_page(map->pages[i]);
+ else
+ pr_debug("Discard page %d=%ld\n", i,
+ page_to_pfn(map->pages[i]));
}
+ }
kfree(map->pages);
kfree(map->grants);
kfree(map->map_ops);
@@ -231,24 +240,39 @@ static int find_grant_ptes(pte_t *pte, pgtable_t token,
{
struct grant_map *map = data;
unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
+ int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte;
u64 pte_maddr;
BUG_ON(pgnr >= map->count);
pte_maddr = arbitrary_virt_to_machine(pte).maddr;
- gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr,
- GNTMAP_contains_pte | map->flags,
+ gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
map->grants[pgnr].ref,
map->grants[pgnr].domid);
- gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr,
- GNTMAP_contains_pte | map->flags,
- 0 /* handle */);
+ gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags,
+ -1 /* handle */);
return 0;
}
static int map_grant_pages(struct grant_map *map)
{
int i, err = 0;
+ phys_addr_t addr;
+
+ if (!use_ptemod) {
+ /* Note: it could already be mapped */
+ if (map->map_ops[0].handle != -1)
+ return 0;
+ for (i = 0; i < map->count; i++) {
+ addr = (phys_addr_t)
+ pfn_to_kaddr(page_to_pfn(map->pages[i]));
+ gnttab_set_map_op(&map->map_ops[i], addr, map->flags,
+ map->grants[i].ref,
+ map->grants[i].domid);
+ gnttab_set_unmap_op(&map->unmap_ops[i], addr,
+ map->flags, -1 /* handle */);
+ }
+ }
pr_debug("map %d+%d\n", map->index, map->count);
err = gnttab_map_refs(map->map_ops, map->pages, map->count);
@@ -258,28 +282,81 @@ static int map_grant_pages(struct grant_map *map)
for (i = 0; i < map->count; i++) {
if (map->map_ops[i].status)
err = -EINVAL;
- map->unmap_ops[i].handle = map->map_ops[i].handle;
+ else {
+ BUG_ON(map->map_ops[i].handle == -1);
+ map->unmap_ops[i].handle = map->map_ops[i].handle;
+ pr_debug("map handle=%d\n", map->map_ops[i].handle);
+ }
}
return err;
}
-static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
+static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
{
int i, err = 0;
- pr_debug("map %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
- err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages, pages);
+ if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
+ int pgno = (map->notify.addr >> PAGE_SHIFT);
+ if (pgno >= offset && pgno < offset + pages && use_ptemod) {
+ void __user *tmp = (void __user *)
+ map->vma->vm_start + map->notify.addr;
+ err = copy_to_user(tmp, &err, 1);
+ if (err)
+ return err;
+ map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
+ } else if (pgno >= offset && pgno < offset + pages) {
+ uint8_t *tmp = kmap(map->pages[pgno]);
+ tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
+ kunmap(map->pages[pgno]);
+ map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
+ }
+ }
+
+ err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages + offset, pages);
if (err)
return err;
for (i = 0; i < pages; i++) {
if (map->unmap_ops[offset+i].status)
err = -EINVAL;
- map->unmap_ops[offset+i].handle = 0;
+ pr_debug("unmap handle=%d st=%d\n",
+ map->unmap_ops[offset+i].handle,
+ map->unmap_ops[offset+i].status);
+ map->unmap_ops[offset+i].handle = -1;
}
return err;
}
+static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
+{
+ int range, err = 0;
+
+ pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
+
+ /* It is possible the requested range will have a "hole" where we
+ * already unmapped some of the grants. Only unmap valid ranges.
+ */
+ while (pages && !err) {
+ while (pages && map->unmap_ops[offset].handle == -1) {
+ offset++;
+ pages--;
+ }
+ range = 0;
+ while (range < pages) {
+ if (map->unmap_ops[offset+range].handle == -1) {
+ range--;
+ break;
+ }
+ range++;
+ }
+ err = __unmap_grant_pages(map, offset, range);
+ offset += range;
+ pages -= range;
+ }
+
+ return err;
+}
+
/* ------------------------------------------------------------------ */
static void gntdev_vma_close(struct vm_area_struct *vma)
@@ -287,22 +364,13 @@ static void gntdev_vma_close(struct vm_area_struct *vma)
struct grant_map *map = vma->vm_private_data;
pr_debug("close %p\n", vma);
- map->is_mapped = 0;
map->vma = NULL;
vma->vm_private_data = NULL;
-}
-
-static int gntdev_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- pr_debug("vaddr %p, pgoff %ld (shouldn't happen)\n",
- vmf->virtual_address, vmf->pgoff);
- vmf->flags = VM_FAULT_ERROR;
- return 0;
+ gntdev_put_map(map);
}
static struct vm_operations_struct gntdev_vmops = {
.close = gntdev_vma_close,
- .fault = gntdev_vma_fault,
};
/* ------------------------------------------------------------------ */
@@ -320,8 +388,6 @@ static void mn_invl_range_start(struct mmu_notifier *mn,
list_for_each_entry(map, &priv->maps, next) {
if (!map->vma)
continue;
- if (!map->is_mapped)
- continue;
if (map->vma->vm_start >= end)
continue;
if (map->vma->vm_end <= start)
@@ -386,16 +452,17 @@ static int gntdev_open(struct inode *inode, struct file *flip)
INIT_LIST_HEAD(&priv->maps);
spin_lock_init(&priv->lock);
- priv->limit = limit;
- priv->mm = get_task_mm(current);
- if (!priv->mm) {
- kfree(priv);
- return -ENOMEM;
+ if (use_ptemod) {
+ priv->mm = get_task_mm(current);
+ if (!priv->mm) {
+ kfree(priv);
+ return -ENOMEM;
+ }
+ priv->mn.ops = &gntdev_mmu_ops;
+ ret = mmu_notifier_register(&priv->mn, priv->mm);
+ mmput(priv->mm);
}
- priv->mn.ops = &gntdev_mmu_ops;
- ret = mmu_notifier_register(&priv->mn, priv->mm);
- mmput(priv->mm);
if (ret) {
kfree(priv);
@@ -412,21 +479,19 @@ static int gntdev_release(struct inode *inode, struct file *flip)
{
struct gntdev_priv *priv = flip->private_data;
struct grant_map *map;
- int err;
pr_debug("priv %p\n", priv);
spin_lock(&priv->lock);
while (!list_empty(&priv->maps)) {
map = list_entry(priv->maps.next, struct grant_map, next);
- err = gntdev_del_map(map);
- if (WARN_ON(err))
- gntdev_free_map(map);
-
+ list_del(&map->next);
+ gntdev_put_map(map);
}
spin_unlock(&priv->lock);
- mmu_notifier_unregister(&priv->mn, priv->mm);
+ if (use_ptemod)
+ mmu_notifier_unregister(&priv->mn, priv->mm);
kfree(priv);
return 0;
}
@@ -443,16 +508,21 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
pr_debug("priv %p, add %d\n", priv, op.count);
if (unlikely(op.count <= 0))
return -EINVAL;
- if (unlikely(op.count > priv->limit))
- return -EINVAL;
err = -ENOMEM;
map = gntdev_alloc_map(priv, op.count);
if (!map)
return err;
+
+ if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) {
+ pr_debug("can't map: over limit\n");
+ gntdev_put_map(map);
+ return err;
+ }
+
if (copy_from_user(map->grants, &u->refs,
sizeof(map->grants[0]) * op.count) != 0) {
- gntdev_free_map(map);
+ gntdev_put_map(map);
return err;
}
@@ -461,13 +531,9 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
op.index = map->index << PAGE_SHIFT;
spin_unlock(&priv->lock);
- if (copy_to_user(u, &op, sizeof(op)) != 0) {
- spin_lock(&priv->lock);
- gntdev_del_map(map);
- spin_unlock(&priv->lock);
- gntdev_free_map(map);
- return err;
- }
+ if (copy_to_user(u, &op, sizeof(op)) != 0)
+ return -EFAULT;
+
return 0;
}
@@ -484,11 +550,12 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
spin_lock(&priv->lock);
map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
- if (map)
- err = gntdev_del_map(map);
+ if (map) {
+ list_del(&map->next);
+ gntdev_put_map(map);
+ err = 0;
+ }
spin_unlock(&priv->lock);
- if (!err)
- gntdev_free_map(map);
return err;
}
@@ -496,43 +563,66 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
struct ioctl_gntdev_get_offset_for_vaddr __user *u)
{
struct ioctl_gntdev_get_offset_for_vaddr op;
+ struct vm_area_struct *vma;
struct grant_map *map;
if (copy_from_user(&op, u, sizeof(op)) != 0)
return -EFAULT;
pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
- spin_lock(&priv->lock);
- map = gntdev_find_map_vaddr(priv, op.vaddr);
- if (map == NULL ||
- map->vma->vm_start != op.vaddr) {
- spin_unlock(&priv->lock);
+ vma = find_vma(current->mm, op.vaddr);
+ if (!vma || vma->vm_ops != &gntdev_vmops)
return -EINVAL;
- }
+
+ map = vma->vm_private_data;
+ if (!map)
+ return -EINVAL;
+
op.offset = map->index << PAGE_SHIFT;
op.count = map->count;
- spin_unlock(&priv->lock);
if (copy_to_user(u, &op, sizeof(op)) != 0)
return -EFAULT;
return 0;
}
-static long gntdev_ioctl_set_max_grants(struct gntdev_priv *priv,
- struct ioctl_gntdev_set_max_grants __user *u)
+static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
{
- struct ioctl_gntdev_set_max_grants op;
+ struct ioctl_gntdev_unmap_notify op;
+ struct grant_map *map;
+ int rc;
- if (copy_from_user(&op, u, sizeof(op)) != 0)
+ if (copy_from_user(&op, u, sizeof(op)))
return -EFAULT;
- pr_debug("priv %p, limit %d\n", priv, op.count);
- if (op.count > limit)
- return -E2BIG;
+
+ if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT))
+ return -EINVAL;
spin_lock(&priv->lock);
- priv->limit = op.count;
+
+ list_for_each_entry(map, &priv->maps, next) {
+ uint64_t begin = map->index << PAGE_SHIFT;
+ uint64_t end = (map->index + map->count) << PAGE_SHIFT;
+ if (op.index >= begin && op.index < end)
+ goto found;
+ }
+ rc = -ENOENT;
+ goto unlock_out;
+
+ found:
+ if ((op.action & UNMAP_NOTIFY_CLEAR_BYTE) &&
+ (map->flags & GNTMAP_readonly)) {
+ rc = -EINVAL;
+ goto unlock_out;
+ }
+
+ map->notify.flags = op.action;
+ map->notify.addr = op.index - (map->index << PAGE_SHIFT);
+ map->notify.event = op.event_channel_port;
+ rc = 0;
+ unlock_out:
spin_unlock(&priv->lock);
- return 0;
+ return rc;
}
static long gntdev_ioctl(struct file *flip,
@@ -551,8 +641,8 @@ static long gntdev_ioctl(struct file *flip,
case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
return gntdev_ioctl_get_offset_for_vaddr(priv, ptr);
- case IOCTL_GNTDEV_SET_MAX_GRANTS:
- return gntdev_ioctl_set_max_grants(priv, ptr);
+ case IOCTL_GNTDEV_SET_UNMAP_NOTIFY:
+ return gntdev_ioctl_notify(priv, ptr);
default:
pr_debug("priv %p, unknown cmd %x\n", priv, cmd);
@@ -568,7 +658,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
int index = vma->vm_pgoff;
int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
struct grant_map *map;
- int err = -EINVAL;
+ int i, err = -EINVAL;
if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
return -EINVAL;
@@ -580,47 +670,70 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
map = gntdev_find_map_index(priv, index, count);
if (!map)
goto unlock_out;
- if (map->vma)
+ if (use_ptemod && map->vma)
goto unlock_out;
- if (priv->mm != vma->vm_mm) {
+ if (use_ptemod && priv->mm != vma->vm_mm) {
printk(KERN_WARNING "Huh? Other mm?\n");
goto unlock_out;
}
+ atomic_inc(&map->users);
+
vma->vm_ops = &gntdev_vmops;
vma->vm_flags |= VM_RESERVED|VM_DONTCOPY|VM_DONTEXPAND|VM_PFNMAP;
vma->vm_private_data = map;
- map->vma = vma;
- map->flags = GNTMAP_host_map | GNTMAP_application_map;
- if (!(vma->vm_flags & VM_WRITE))
- map->flags |= GNTMAP_readonly;
+ if (use_ptemod)
+ map->vma = vma;
+
+ if (map->flags) {
+ if ((vma->vm_flags & VM_WRITE) &&
+ (map->flags & GNTMAP_readonly))
+ return -EINVAL;
+ } else {
+ map->flags = GNTMAP_host_map;
+ if (!(vma->vm_flags & VM_WRITE))
+ map->flags |= GNTMAP_readonly;
+ }
spin_unlock(&priv->lock);
- err = apply_to_page_range(vma->vm_mm, vma->vm_start,
- vma->vm_end - vma->vm_start,
- find_grant_ptes, map);
- if (err) {
- printk(KERN_WARNING "find_grant_ptes() failure.\n");
- return err;
+ if (use_ptemod) {
+ err = apply_to_page_range(vma->vm_mm, vma->vm_start,
+ vma->vm_end - vma->vm_start,
+ find_grant_ptes, map);
+ if (err) {
+ printk(KERN_WARNING "find_grant_ptes() failure.\n");
+ goto out_put_map;
+ }
}
err = map_grant_pages(map);
- if (err) {
- printk(KERN_WARNING "map_grant_pages() failure.\n");
- return err;
- }
+ if (err)
+ goto out_put_map;
- map->is_mapped = 1;
+ if (!use_ptemod) {
+ for (i = 0; i < count; i++) {
+ err = vm_insert_page(vma, vma->vm_start + i*PAGE_SIZE,
+ map->pages[i]);
+ if (err)
+ goto out_put_map;
+ }
+ }
return 0;
unlock_out:
spin_unlock(&priv->lock);
return err;
+
+out_put_map:
+ if (use_ptemod)
+ map->vma = NULL;
+ gntdev_put_map(map);
+ return err;
}
static const struct file_operations gntdev_fops = {
@@ -646,6 +759,8 @@ static int __init gntdev_init(void)
if (!xen_domain())
return -ENODEV;
+ use_ptemod = xen_pv_domain();
+
err = misc_register(&gntdev_miscdev);
if (err != 0) {
printk(KERN_ERR "Could not register gntdev device\n");
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 9ef54ebc1194..9428ced04807 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -458,6 +458,9 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
if (ret)
return ret;
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return ret;
+
for (i = 0; i < count; i++) {
/* m2p override only supported for GNTMAP_contains_pte mappings */
if (!(map_ops[i].flags & GNTMAP_contains_pte))
@@ -483,6 +486,9 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
if (ret)
return ret;
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return ret;
+
for (i = 0; i < count; i++) {
ret = m2p_remove_override(pages[i]);
if (ret)
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 24177272bcb8..ebb292859b59 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -34,42 +34,38 @@ enum shutdown_state {
/* Ignore multiple shutdown requests. */
static enum shutdown_state shutting_down = SHUTDOWN_INVALID;
-#ifdef CONFIG_PM_SLEEP
-static int xen_hvm_suspend(void *data)
-{
- int err;
- struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
- int *cancelled = data;
-
- BUG_ON(!irqs_disabled());
-
- err = sysdev_suspend(PMSG_SUSPEND);
- if (err) {
- printk(KERN_ERR "xen_hvm_suspend: sysdev_suspend failed: %d\n",
- err);
- return err;
- }
-
- *cancelled = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
+struct suspend_info {
+ int cancelled;
+ unsigned long arg; /* extra hypercall argument */
+ void (*pre)(void);
+ void (*post)(int cancelled);
+};
- xen_hvm_post_suspend(*cancelled);
+static void xen_hvm_post_suspend(int cancelled)
+{
+ xen_arch_hvm_post_suspend(cancelled);
gnttab_resume();
+}
- if (!*cancelled) {
- xen_irq_resume();
- xen_console_resume();
- xen_timer_resume();
- }
-
- sysdev_resume();
+static void xen_pre_suspend(void)
+{
+ xen_mm_pin_all();
+ gnttab_suspend();
+ xen_arch_pre_suspend();
+}
- return 0;
+static void xen_post_suspend(int cancelled)
+{
+ xen_arch_post_suspend(cancelled);
+ gnttab_resume();
+ xen_mm_unpin_all();
}
+#ifdef CONFIG_PM_SLEEP
static int xen_suspend(void *data)
{
+ struct suspend_info *si = data;
int err;
- int *cancelled = data;
BUG_ON(!irqs_disabled());
@@ -80,22 +76,20 @@ static int xen_suspend(void *data)
return err;
}
- xen_mm_pin_all();
- gnttab_suspend();
- xen_pre_suspend();
+ if (si->pre)
+ si->pre();
/*
* This hypercall returns 1 if suspend was cancelled
* or the domain was merely checkpointed, and 0 if it
* is resuming in a new domain.
*/
- *cancelled = HYPERVISOR_suspend(virt_to_mfn(xen_start_info));
+ si->cancelled = HYPERVISOR_suspend(si->arg);
- xen_post_suspend(*cancelled);
- gnttab_resume();
- xen_mm_unpin_all();
+ if (si->post)
+ si->post(si->cancelled);
- if (!*cancelled) {
+ if (!si->cancelled) {
xen_irq_resume();
xen_console_resume();
xen_timer_resume();
@@ -109,7 +103,7 @@ static int xen_suspend(void *data)
static void do_suspend(void)
{
int err;
- int cancelled = 1;
+ struct suspend_info si;
shutting_down = SHUTDOWN_SUSPEND;
@@ -139,20 +133,29 @@ static void do_suspend(void)
goto out_resume;
}
- if (xen_hvm_domain())
- err = stop_machine(xen_hvm_suspend, &cancelled, cpumask_of(0));
- else
- err = stop_machine(xen_suspend, &cancelled, cpumask_of(0));
+ si.cancelled = 1;
+
+ if (xen_hvm_domain()) {
+ si.arg = 0UL;
+ si.pre = NULL;
+ si.post = &xen_hvm_post_suspend;
+ } else {
+ si.arg = virt_to_mfn(xen_start_info);
+ si.pre = &xen_pre_suspend;
+ si.post = &xen_post_suspend;
+ }
+
+ err = stop_machine(xen_suspend, &si, cpumask_of(0));
dpm_resume_noirq(PMSG_RESUME);
if (err) {
printk(KERN_ERR "failed to start xen_suspend: %d\n", err);
- cancelled = 1;
+ si.cancelled = 1;
}
out_resume:
- if (!cancelled) {
+ if (!si.cancelled) {
xen_arch_resume();
xs_resume();
} else
@@ -172,12 +175,39 @@ out:
}
#endif /* CONFIG_PM_SLEEP */
+struct shutdown_handler {
+ const char *command;
+ void (*cb)(void);
+};
+
+static void do_poweroff(void)
+{
+ shutting_down = SHUTDOWN_POWEROFF;
+ orderly_poweroff(false);
+}
+
+static void do_reboot(void)
+{
+ shutting_down = SHUTDOWN_POWEROFF; /* ? */
+ ctrl_alt_del();
+}
+
static void shutdown_handler(struct xenbus_watch *watch,
const char **vec, unsigned int len)
{
char *str;
struct xenbus_transaction xbt;
int err;
+ static struct shutdown_handler handlers[] = {
+ { "poweroff", do_poweroff },
+ { "halt", do_poweroff },
+ { "reboot", do_reboot },
+#ifdef CONFIG_PM_SLEEP
+ { "suspend", do_suspend },
+#endif
+ {NULL, NULL},
+ };
+ static struct shutdown_handler *handler;
if (shutting_down != SHUTDOWN_INVALID)
return;
@@ -194,7 +224,14 @@ static void shutdown_handler(struct xenbus_watch *watch,
return;
}
- xenbus_write(xbt, "control", "shutdown", "");
+ for (handler = &handlers[0]; handler->command; handler++) {
+ if (strcmp(str, handler->command) == 0)
+ break;
+ }
+
+ /* Only acknowledge commands which we are prepared to handle. */
+ if (handler->cb)
+ xenbus_write(xbt, "control", "shutdown", "");
err = xenbus_transaction_end(xbt, 0);
if (err == -EAGAIN) {
@@ -202,17 +239,8 @@ static void shutdown_handler(struct xenbus_watch *watch,
goto again;
}
- if (strcmp(str, "poweroff") == 0 ||
- strcmp(str, "halt") == 0) {
- shutting_down = SHUTDOWN_POWEROFF;
- orderly_poweroff(false);
- } else if (strcmp(str, "reboot") == 0) {
- shutting_down = SHUTDOWN_POWEROFF; /* ? */
- ctrl_alt_del();
-#ifdef CONFIG_PM_SLEEP
- } else if (strcmp(str, "suspend") == 0) {
- do_suspend();
-#endif
+ if (handler->cb) {
+ handler->cb();
} else {
printk(KERN_INFO "Ignoring shutdown request: %s\n", str);
shutting_down = SHUTDOWN_INVALID;
@@ -291,27 +319,18 @@ static int shutdown_event(struct notifier_block *notifier,
return NOTIFY_DONE;
}
-static int __init __setup_shutdown_event(void)
-{
- /* Delay initialization in the PV on HVM case */
- if (xen_hvm_domain())
- return 0;
-
- if (!xen_pv_domain())
- return -ENODEV;
-
- return xen_setup_shutdown_event();
-}
-
int xen_setup_shutdown_event(void)
{
static struct notifier_block xenstore_notifier = {
.notifier_call = shutdown_event
};
+
+ if (!xen_domain())
+ return -ENODEV;
register_xenstore_notifier(&xenstore_notifier);
return 0;
}
EXPORT_SYMBOL_GPL(xen_setup_shutdown_event);
-subsys_initcall(__setup_shutdown_event);
+subsys_initcall(xen_setup_shutdown_event);
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index afbe041f42c5..319dd0a94d51 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -156,9 +156,6 @@ static int __devinit platform_pci_init(struct pci_dev *pdev,
if (ret)
goto out;
xenbus_probe(NULL);
- ret = xen_setup_shutdown_event();
- if (ret)
- goto out;
return 0;
out:
diff --git a/fs/9p/acl.c b/fs/9p/acl.c
index 02a2cf616318..515455296378 100644
--- a/fs/9p/acl.c
+++ b/fs/9p/acl.c
@@ -21,8 +21,8 @@
#include <linux/posix_acl_xattr.h>
#include "xattr.h"
#include "acl.h"
-#include "v9fs_vfs.h"
#include "v9fs.h"
+#include "v9fs_vfs.h"
static struct posix_acl *__v9fs_get_acl(struct p9_fid *fid, char *name)
{
@@ -59,7 +59,8 @@ int v9fs_get_acl(struct inode *inode, struct p9_fid *fid)
struct v9fs_session_info *v9ses;
v9ses = v9fs_inode2v9ses(inode);
- if ((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) {
+ if (((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) ||
+ ((v9ses->flags & V9FS_ACL_MASK) != V9FS_POSIX_ACL)) {
set_cached_acl(inode, ACL_TYPE_DEFAULT, NULL);
set_cached_acl(inode, ACL_TYPE_ACCESS, NULL);
return 0;
@@ -71,11 +72,15 @@ int v9fs_get_acl(struct inode *inode, struct p9_fid *fid)
if (!IS_ERR(dacl) && !IS_ERR(pacl)) {
set_cached_acl(inode, ACL_TYPE_DEFAULT, dacl);
set_cached_acl(inode, ACL_TYPE_ACCESS, pacl);
- posix_acl_release(dacl);
- posix_acl_release(pacl);
} else
retval = -EIO;
+ if (!IS_ERR(dacl))
+ posix_acl_release(dacl);
+
+ if (!IS_ERR(pacl))
+ posix_acl_release(pacl);
+
return retval;
}
@@ -100,9 +105,10 @@ int v9fs_check_acl(struct inode *inode, int mask, unsigned int flags)
return -ECHILD;
v9ses = v9fs_inode2v9ses(inode);
- if ((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) {
+ if (((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) ||
+ ((v9ses->flags & V9FS_ACL_MASK) != V9FS_POSIX_ACL)) {
/*
- * On access = client mode get the acl
+ * On access = client and acl = on mode get the acl
* values from the server
*/
return 0;
@@ -128,6 +134,10 @@ static int v9fs_set_acl(struct dentry *dentry, int type, struct posix_acl *acl)
struct inode *inode = dentry->d_inode;
set_cached_acl(inode, type, acl);
+
+ if (!acl)
+ return 0;
+
/* Set a setxattr request to server */
size = posix_acl_xattr_size(acl->a_count);
buffer = kmalloc(size, GFP_KERNEL);
@@ -177,10 +187,8 @@ int v9fs_acl_chmod(struct dentry *dentry)
int v9fs_set_create_acl(struct dentry *dentry,
struct posix_acl *dpacl, struct posix_acl *pacl)
{
- if (dpacl)
- v9fs_set_acl(dentry, ACL_TYPE_DEFAULT, dpacl);
- if (pacl)
- v9fs_set_acl(dentry, ACL_TYPE_ACCESS, pacl);
+ v9fs_set_acl(dentry, ACL_TYPE_DEFAULT, dpacl);
+ v9fs_set_acl(dentry, ACL_TYPE_ACCESS, pacl);
posix_acl_release(dpacl);
posix_acl_release(pacl);
return 0;
diff --git a/fs/9p/cache.c b/fs/9p/cache.c
index 0dbe0d139ac2..5b335c5086a1 100644
--- a/fs/9p/cache.c
+++ b/fs/9p/cache.c
@@ -33,67 +33,11 @@
#define CACHETAG_LEN 11
-struct kmem_cache *vcookie_cache;
-
struct fscache_netfs v9fs_cache_netfs = {
.name = "9p",
.version = 0,
};
-static void init_once(void *foo)
-{
- struct v9fs_cookie *vcookie = (struct v9fs_cookie *) foo;
- vcookie->fscache = NULL;
- vcookie->qid = NULL;
- inode_init_once(&vcookie->inode);
-}
-
-/**
- * v9fs_init_vcookiecache - initialize a cache for vcookies to maintain
- * vcookie to inode mapping
- *
- * Returns 0 on success.
- */
-
-static int v9fs_init_vcookiecache(void)
-{
- vcookie_cache = kmem_cache_create("vcookie_cache",
- sizeof(struct v9fs_cookie),
- 0, (SLAB_RECLAIM_ACCOUNT|
- SLAB_MEM_SPREAD),
- init_once);
- if (!vcookie_cache)
- return -ENOMEM;
-
- return 0;
-}
-
-/**
- * v9fs_destroy_vcookiecache - destroy the cache of vcookies
- *
- */
-
-static void v9fs_destroy_vcookiecache(void)
-{
- kmem_cache_destroy(vcookie_cache);
-}
-
-int __v9fs_cache_register(void)
-{
- int ret;
- ret = v9fs_init_vcookiecache();
- if (ret < 0)
- return ret;
-
- return fscache_register_netfs(&v9fs_cache_netfs);
-}
-
-void __v9fs_cache_unregister(void)
-{
- v9fs_destroy_vcookiecache();
- fscache_unregister_netfs(&v9fs_cache_netfs);
-}
-
/**
* v9fs_random_cachetag - Generate a random tag to be associated
* with a new cache session.
@@ -133,9 +77,9 @@ static uint16_t v9fs_cache_session_get_key(const void *cookie_netfs_data,
}
const struct fscache_cookie_def v9fs_cache_session_index_def = {
- .name = "9P.session",
- .type = FSCACHE_COOKIE_TYPE_INDEX,
- .get_key = v9fs_cache_session_get_key,
+ .name = "9P.session",
+ .type = FSCACHE_COOKIE_TYPE_INDEX,
+ .get_key = v9fs_cache_session_get_key,
};
void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses)
@@ -163,33 +107,33 @@ void v9fs_cache_session_put_cookie(struct v9fs_session_info *v9ses)
static uint16_t v9fs_cache_inode_get_key(const void *cookie_netfs_data,
void *buffer, uint16_t bufmax)
{
- const struct v9fs_cookie *vcookie = cookie_netfs_data;
- memcpy(buffer, &vcookie->qid->path, sizeof(vcookie->qid->path));
-
- P9_DPRINTK(P9_DEBUG_FSC, "inode %p get key %llu", &vcookie->inode,
- vcookie->qid->path);
- return sizeof(vcookie->qid->path);
+ const struct v9fs_inode *v9inode = cookie_netfs_data;
+ memcpy(buffer, &v9inode->fscache_key->path,
+ sizeof(v9inode->fscache_key->path));
+ P9_DPRINTK(P9_DEBUG_FSC, "inode %p get key %llu", &v9inode->vfs_inode,
+ v9inode->fscache_key->path);
+ return sizeof(v9inode->fscache_key->path);
}
static void v9fs_cache_inode_get_attr(const void *cookie_netfs_data,
uint64_t *size)
{
- const struct v9fs_cookie *vcookie = cookie_netfs_data;
- *size = i_size_read(&vcookie->inode);
+ const struct v9fs_inode *v9inode = cookie_netfs_data;
+ *size = i_size_read(&v9inode->vfs_inode);
- P9_DPRINTK(P9_DEBUG_FSC, "inode %p get attr %llu", &vcookie->inode,
+ P9_DPRINTK(P9_DEBUG_FSC, "inode %p get attr %llu", &v9inode->vfs_inode,
*size);
}
static uint16_t v9fs_cache_inode_get_aux(const void *cookie_netfs_data,
void *buffer, uint16_t buflen)
{
- const struct v9fs_cookie *vcookie = cookie_netfs_data;
- memcpy(buffer, &vcookie->qid->version, sizeof(vcookie->qid->version));
-
- P9_DPRINTK(P9_DEBUG_FSC, "inode %p get aux %u", &vcookie->inode,
- vcookie->qid->version);
- return sizeof(vcookie->qid->version);
+ const struct v9fs_inode *v9inode = cookie_netfs_data;
+ memcpy(buffer, &v9inode->fscache_key->version,
+ sizeof(v9inode->fscache_key->version));
+ P9_DPRINTK(P9_DEBUG_FSC, "inode %p get aux %u", &v9inode->vfs_inode,
+ v9inode->fscache_key->version);
+ return sizeof(v9inode->fscache_key->version);
}
static enum
@@ -197,13 +141,13 @@ fscache_checkaux v9fs_cache_inode_check_aux(void *cookie_netfs_data,
const void *buffer,
uint16_t buflen)
{
- const struct v9fs_cookie *vcookie = cookie_netfs_data;
+ const struct v9fs_inode *v9inode = cookie_netfs_data;
- if (buflen != sizeof(vcookie->qid->version))
+ if (buflen != sizeof(v9inode->fscache_key->version))
return FSCACHE_CHECKAUX_OBSOLETE;
- if (memcmp(buffer, &vcookie->qid->version,
- sizeof(vcookie->qid->version)))
+ if (memcmp(buffer, &v9inode->fscache_key->version,
+ sizeof(v9inode->fscache_key->version)))
return FSCACHE_CHECKAUX_OBSOLETE;
return FSCACHE_CHECKAUX_OKAY;
@@ -211,7 +155,7 @@ fscache_checkaux v9fs_cache_inode_check_aux(void *cookie_netfs_data,
static void v9fs_cache_inode_now_uncached(void *cookie_netfs_data)
{
- struct v9fs_cookie *vcookie = cookie_netfs_data;
+ struct v9fs_inode *v9inode = cookie_netfs_data;
struct pagevec pvec;
pgoff_t first;
int loop, nr_pages;
@@ -220,7 +164,7 @@ static void v9fs_cache_inode_now_uncached(void *cookie_netfs_data)
first = 0;
for (;;) {
- nr_pages = pagevec_lookup(&pvec, vcookie->inode.i_mapping,
+ nr_pages = pagevec_lookup(&pvec, v9inode->vfs_inode.i_mapping,
first,
PAGEVEC_SIZE - pagevec_count(&pvec));
if (!nr_pages)
@@ -249,115 +193,114 @@ const struct fscache_cookie_def v9fs_cache_inode_index_def = {
void v9fs_cache_inode_get_cookie(struct inode *inode)
{
- struct v9fs_cookie *vcookie;
+ struct v9fs_inode *v9inode;
struct v9fs_session_info *v9ses;
if (!S_ISREG(inode->i_mode))
return;
- vcookie = v9fs_inode2cookie(inode);
- if (vcookie->fscache)
+ v9inode = V9FS_I(inode);
+ if (v9inode->fscache)
return;
v9ses = v9fs_inode2v9ses(inode);
- vcookie->fscache = fscache_acquire_cookie(v9ses->fscache,
+ v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
&v9fs_cache_inode_index_def,
- vcookie);
+ v9inode);
P9_DPRINTK(P9_DEBUG_FSC, "inode %p get cookie %p", inode,
- vcookie->fscache);
+ v9inode->fscache);
}
void v9fs_cache_inode_put_cookie(struct inode *inode)
{
- struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode);
+ struct v9fs_inode *v9inode = V9FS_I(inode);
- if (!vcookie->fscache)
+ if (!v9inode->fscache)
return;
P9_DPRINTK(P9_DEBUG_FSC, "inode %p put cookie %p", inode,
- vcookie->fscache);
+ v9inode->fscache);
- fscache_relinquish_cookie(vcookie->fscache, 0);
- vcookie->fscache = NULL;
+ fscache_relinquish_cookie(v9inode->fscache, 0);
+ v9inode->fscache = NULL;
}
void v9fs_cache_inode_flush_cookie(struct inode *inode)
{
- struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode);
+ struct v9fs_inode *v9inode = V9FS_I(inode);
- if (!vcookie->fscache)
+ if (!v9inode->fscache)
return;
P9_DPRINTK(P9_DEBUG_FSC, "inode %p flush cookie %p", inode,
- vcookie->fscache);
+ v9inode->fscache);
- fscache_relinquish_cookie(vcookie->fscache, 1);
- vcookie->fscache = NULL;
+ fscache_relinquish_cookie(v9inode->fscache, 1);
+ v9inode->fscache = NULL;
}
void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *filp)
{
- struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode);
+ struct v9fs_inode *v9inode = V9FS_I(inode);
struct p9_fid *fid;
- if (!vcookie->fscache)
+ if (!v9inode->fscache)
return;
- spin_lock(&vcookie->lock);
+ spin_lock(&v9inode->fscache_lock);
fid = filp->private_data;
if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
v9fs_cache_inode_flush_cookie(inode);
else
v9fs_cache_inode_get_cookie(inode);
- spin_unlock(&vcookie->lock);
+ spin_unlock(&v9inode->fscache_lock);
}
void v9fs_cache_inode_reset_cookie(struct inode *inode)
{
- struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode);
+ struct v9fs_inode *v9inode = V9FS_I(inode);
struct v9fs_session_info *v9ses;
struct fscache_cookie *old;
- if (!vcookie->fscache)
+ if (!v9inode->fscache)
return;
- old = vcookie->fscache;
+ old = v9inode->fscache;
- spin_lock(&vcookie->lock);
- fscache_relinquish_cookie(vcookie->fscache, 1);
+ spin_lock(&v9inode->fscache_lock);
+ fscache_relinquish_cookie(v9inode->fscache, 1);
v9ses = v9fs_inode2v9ses(inode);
- vcookie->fscache = fscache_acquire_cookie(v9ses->fscache,
+ v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
&v9fs_cache_inode_index_def,
- vcookie);
-
+ v9inode);
P9_DPRINTK(P9_DEBUG_FSC, "inode %p revalidating cookie old %p new %p",
- inode, old, vcookie->fscache);
+ inode, old, v9inode->fscache);
- spin_unlock(&vcookie->lock);
+ spin_unlock(&v9inode->fscache_lock);
}
int __v9fs_fscache_release_page(struct page *page, gfp_t gfp)
{
struct inode *inode = page->mapping->host;
- struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode);
+ struct v9fs_inode *v9inode = V9FS_I(inode);
- BUG_ON(!vcookie->fscache);
+ BUG_ON(!v9inode->fscache);
- return fscache_maybe_release_page(vcookie->fscache, page, gfp);
+ return fscache_maybe_release_page(v9inode->fscache, page, gfp);
}
void __v9fs_fscache_invalidate_page(struct page *page)
{
struct inode *inode = page->mapping->host;
- struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode);
+ struct v9fs_inode *v9inode = V9FS_I(inode);
- BUG_ON(!vcookie->fscache);
+ BUG_ON(!v9inode->fscache);
if (PageFsCache(page)) {
- fscache_wait_on_page_write(vcookie->fscache, page);
+ fscache_wait_on_page_write(v9inode->fscache, page);
BUG_ON(!PageLocked(page));
- fscache_uncache_page(vcookie->fscache, page);
+ fscache_uncache_page(v9inode->fscache, page);
}
}
@@ -380,13 +323,13 @@ static void v9fs_vfs_readpage_complete(struct page *page, void *data,
int __v9fs_readpage_from_fscache(struct inode *inode, struct page *page)
{
int ret;
- const struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode);
+ const struct v9fs_inode *v9inode = V9FS_I(inode);
P9_DPRINTK(P9_DEBUG_FSC, "inode %p page %p", inode, page);
- if (!vcookie->fscache)
+ if (!v9inode->fscache)
return -ENOBUFS;
- ret = fscache_read_or_alloc_page(vcookie->fscache,
+ ret = fscache_read_or_alloc_page(v9inode->fscache,
page,
v9fs_vfs_readpage_complete,
NULL,
@@ -418,13 +361,13 @@ int __v9fs_readpages_from_fscache(struct inode *inode,
unsigned *nr_pages)
{
int ret;
- const struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode);
+ const struct v9fs_inode *v9inode = V9FS_I(inode);
P9_DPRINTK(P9_DEBUG_FSC, "inode %p pages %u", inode, *nr_pages);
- if (!vcookie->fscache)
+ if (!v9inode->fscache)
return -ENOBUFS;
- ret = fscache_read_or_alloc_pages(vcookie->fscache,
+ ret = fscache_read_or_alloc_pages(v9inode->fscache,
mapping, pages, nr_pages,
v9fs_vfs_readpage_complete,
NULL,
@@ -453,11 +396,22 @@ int __v9fs_readpages_from_fscache(struct inode *inode,
void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page)
{
int ret;
- const struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode);
+ const struct v9fs_inode *v9inode = V9FS_I(inode);
P9_DPRINTK(P9_DEBUG_FSC, "inode %p page %p", inode, page);
- ret = fscache_write_page(vcookie->fscache, page, GFP_KERNEL);
+ ret = fscache_write_page(v9inode->fscache, page, GFP_KERNEL);
P9_DPRINTK(P9_DEBUG_FSC, "ret = %d", ret);
if (ret != 0)
v9fs_uncache_page(inode, page);
}
+
+/*
+ * wait for a page to complete writing to the cache
+ */
+void __v9fs_fscache_wait_on_page_write(struct inode *inode, struct page *page)
+{
+ const struct v9fs_inode *v9inode = V9FS_I(inode);
+ P9_DPRINTK(P9_DEBUG_FSC, "inode %p page %p", inode, page);
+ if (PageFsCache(page))
+ fscache_wait_on_page_write(v9inode->fscache, page);
+}
diff --git a/fs/9p/cache.h b/fs/9p/cache.h
index a94192bfaee8..049507a5b01c 100644
--- a/fs/9p/cache.h
+++ b/fs/9p/cache.h
@@ -25,20 +25,6 @@
#include <linux/fscache.h>
#include <linux/spinlock.h>
-extern struct kmem_cache *vcookie_cache;
-
-struct v9fs_cookie {
- spinlock_t lock;
- struct inode inode;
- struct fscache_cookie *fscache;
- struct p9_qid *qid;
-};
-
-static inline struct v9fs_cookie *v9fs_inode2cookie(const struct inode *inode)
-{
- return container_of(inode, struct v9fs_cookie, inode);
-}
-
extern struct fscache_netfs v9fs_cache_netfs;
extern const struct fscache_cookie_def v9fs_cache_session_index_def;
extern const struct fscache_cookie_def v9fs_cache_inode_index_def;
@@ -64,23 +50,8 @@ extern int __v9fs_readpages_from_fscache(struct inode *inode,
struct list_head *pages,
unsigned *nr_pages);
extern void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page);
-
-
-/**
- * v9fs_cache_register - Register v9fs file system with the cache
- */
-static inline int v9fs_cache_register(void)
-{
- return __v9fs_cache_register();
-}
-
-/**
- * v9fs_cache_unregister - Unregister v9fs from the cache
- */
-static inline void v9fs_cache_unregister(void)
-{
- __v9fs_cache_unregister();
-}
+extern void __v9fs_fscache_wait_on_page_write(struct inode *inode,
+ struct page *page);
static inline int v9fs_fscache_release_page(struct page *page,
gfp_t gfp)
@@ -117,28 +88,27 @@ static inline void v9fs_readpage_to_fscache(struct inode *inode,
static inline void v9fs_uncache_page(struct inode *inode, struct page *page)
{
- struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode);
- fscache_uncache_page(vcookie->fscache, page);
+ struct v9fs_inode *v9inode = V9FS_I(inode);
+ fscache_uncache_page(v9inode->fscache, page);
BUG_ON(PageFsCache(page));
}
-static inline void v9fs_vcookie_set_qid(struct inode *inode,
+static inline void v9fs_fscache_set_key(struct inode *inode,
struct p9_qid *qid)
{
- struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode);
- spin_lock(&vcookie->lock);
- vcookie->qid = qid;
- spin_unlock(&vcookie->lock);
+ struct v9fs_inode *v9inode = V9FS_I(inode);
+ spin_lock(&v9inode->fscache_lock);
+ v9inode->fscache_key = qid;
+ spin_unlock(&v9inode->fscache_lock);
}
-#else /* CONFIG_9P_FSCACHE */
-
-static inline int v9fs_cache_register(void)
+static inline void v9fs_fscache_wait_on_page_write(struct inode *inode,
+ struct page *page)
{
- return 1;
+ return __v9fs_fscache_wait_on_page_write(inode, page);
}
-static inline void v9fs_cache_unregister(void) {}
+#else /* CONFIG_9P_FSCACHE */
static inline int v9fs_fscache_release_page(struct page *page,
gfp_t gfp) {
@@ -168,9 +138,11 @@ static inline void v9fs_readpage_to_fscache(struct inode *inode,
static inline void v9fs_uncache_page(struct inode *inode, struct page *page)
{}
-static inline void v9fs_vcookie_set_qid(struct inode *inode,
- struct p9_qid *qid)
-{}
+static inline void v9fs_fscache_wait_on_page_write(struct inode *inode,
+ struct page *page)
+{
+ return;
+}
#endif /* CONFIG_9P_FSCACHE */
#endif /* _9P_CACHE_H */
diff --git a/fs/9p/fid.c b/fs/9p/fid.c
index b00223c99d70..cd63e002d826 100644
--- a/fs/9p/fid.c
+++ b/fs/9p/fid.c
@@ -125,46 +125,17 @@ err_out:
return -ENOMEM;
}
-/**
- * v9fs_fid_lookup - lookup for a fid, try to walk if not found
- * @dentry: dentry to look for fid in
- *
- * Look for a fid in the specified dentry for the current user.
- * If no fid is found, try to create one walking from a fid from the parent
- * dentry (if it has one), or the root dentry. If the user haven't accessed
- * the fs yet, attach now and walk from the root.
- */
-
-struct p9_fid *v9fs_fid_lookup(struct dentry *dentry)
+static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry,
+ uid_t uid, int any)
{
- int i, n, l, clone, any, access;
- u32 uid;
- struct p9_fid *fid, *old_fid = NULL;
struct dentry *ds;
- struct v9fs_session_info *v9ses;
char **wnames, *uname;
+ int i, n, l, clone, access;
+ struct v9fs_session_info *v9ses;
+ struct p9_fid *fid, *old_fid = NULL;
v9ses = v9fs_inode2v9ses(dentry->d_inode);
access = v9ses->flags & V9FS_ACCESS_MASK;
- switch (access) {
- case V9FS_ACCESS_SINGLE:
- case V9FS_ACCESS_USER:
- case V9FS_ACCESS_CLIENT:
- uid = current_fsuid();
- any = 0;
- break;
-
- case V9FS_ACCESS_ANY:
- uid = v9ses->uid;
- any = 1;
- break;
-
- default:
- uid = ~0;
- any = 0;
- break;
- }
-
fid = v9fs_fid_find(dentry, uid, any);
if (fid)
return fid;
@@ -250,6 +221,45 @@ err_out:
return fid;
}
+/**
+ * v9fs_fid_lookup - lookup for a fid, try to walk if not found
+ * @dentry: dentry to look for fid in
+ *
+ * Look for a fid in the specified dentry for the current user.
+ * If no fid is found, try to create one walking from a fid from the parent
+ * dentry (if it has one), or the root dentry. If the user haven't accessed
+ * the fs yet, attach now and walk from the root.
+ */
+
+struct p9_fid *v9fs_fid_lookup(struct dentry *dentry)
+{
+ uid_t uid;
+ int any, access;
+ struct v9fs_session_info *v9ses;
+
+ v9ses = v9fs_inode2v9ses(dentry->d_inode);
+ access = v9ses->flags & V9FS_ACCESS_MASK;
+ switch (access) {
+ case V9FS_ACCESS_SINGLE:
+ case V9FS_ACCESS_USER:
+ case V9FS_ACCESS_CLIENT:
+ uid = current_fsuid();
+ any = 0;
+ break;
+
+ case V9FS_ACCESS_ANY:
+ uid = v9ses->uid;
+ any = 1;
+ break;
+
+ default:
+ uid = ~0;
+ any = 0;
+ break;
+ }
+ return v9fs_fid_lookup_with_uid(dentry, uid, any);
+}
+
struct p9_fid *v9fs_fid_clone(struct dentry *dentry)
{
struct p9_fid *fid, *ret;
@@ -261,3 +271,39 @@ struct p9_fid *v9fs_fid_clone(struct dentry *dentry)
ret = p9_client_walk(fid, 0, NULL, 1);
return ret;
}
+
+static struct p9_fid *v9fs_fid_clone_with_uid(struct dentry *dentry, uid_t uid)
+{
+ struct p9_fid *fid, *ret;
+
+ fid = v9fs_fid_lookup_with_uid(dentry, uid, 0);
+ if (IS_ERR(fid))
+ return fid;
+
+ ret = p9_client_walk(fid, 0, NULL, 1);
+ return ret;
+}
+
+struct p9_fid *v9fs_writeback_fid(struct dentry *dentry)
+{
+ int err;
+ struct p9_fid *fid;
+
+ fid = v9fs_fid_clone_with_uid(dentry, 0);
+ if (IS_ERR(fid))
+ goto error_out;
+ /*
+ * writeback fid will only be used to write back the
+ * dirty pages. We always request for the open fid in read-write
+ * mode so that a partial page write which result in page
+ * read can work.
+ */
+ err = p9_client_open(fid, O_RDWR);
+ if (err < 0) {
+ p9_client_clunk(fid);
+ fid = ERR_PTR(err);
+ goto error_out;
+ }
+error_out:
+ return fid;
+}
diff --git a/fs/9p/fid.h b/fs/9p/fid.h
index c3bbd6af996d..bb0b6e7f58fc 100644
--- a/fs/9p/fid.h
+++ b/fs/9p/fid.h
@@ -19,7 +19,8 @@
* Boston, MA 02111-1301 USA
*
*/
-
+#ifndef FS_9P_FID_H
+#define FS_9P_FID_H
#include <linux/list.h>
/**
@@ -45,3 +46,5 @@ struct v9fs_dentry {
struct p9_fid *v9fs_fid_lookup(struct dentry *dentry);
struct p9_fid *v9fs_fid_clone(struct dentry *dentry);
int v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid);
+struct p9_fid *v9fs_writeback_fid(struct dentry *dentry);
+#endif
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index 2f77cd33ba83..c82b017f51f3 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -39,6 +39,7 @@
static DEFINE_SPINLOCK(v9fs_sessionlist_lock);
static LIST_HEAD(v9fs_sessionlist);
+struct kmem_cache *v9fs_inode_cache;
/*
* Option Parsing (code inspired by NFS code)
@@ -55,7 +56,7 @@ enum {
/* Cache options */
Opt_cache_loose, Opt_fscache,
/* Access options */
- Opt_access,
+ Opt_access, Opt_posixacl,
/* Error token */
Opt_err
};
@@ -73,6 +74,7 @@ static const match_table_t tokens = {
{Opt_fscache, "fscache"},
{Opt_cachetag, "cachetag=%s"},
{Opt_access, "access=%s"},
+ {Opt_posixacl, "posixacl"},
{Opt_err, NULL}
};
@@ -194,15 +196,7 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
else if (strcmp(s, "any") == 0)
v9ses->flags |= V9FS_ACCESS_ANY;
else if (strcmp(s, "client") == 0) {
-#ifdef CONFIG_9P_FS_POSIX_ACL
v9ses->flags |= V9FS_ACCESS_CLIENT;
-#else
- P9_DPRINTK(P9_DEBUG_ERROR,
- "access=client option not supported\n");
- kfree(s);
- ret = -EINVAL;
- goto free_and_return;
-#endif
} else {
v9ses->flags |= V9FS_ACCESS_SINGLE;
v9ses->uid = simple_strtoul(s, &e, 10);
@@ -212,6 +206,16 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
kfree(s);
break;
+ case Opt_posixacl:
+#ifdef CONFIG_9P_FS_POSIX_ACL
+ v9ses->flags |= V9FS_POSIX_ACL;
+#else
+ P9_DPRINTK(P9_DEBUG_ERROR,
+ "Not defined CONFIG_9P_FS_POSIX_ACL. "
+ "Ignoring posixacl option\n");
+#endif
+ break;
+
default:
continue;
}
@@ -260,19 +264,12 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
list_add(&v9ses->slist, &v9fs_sessionlist);
spin_unlock(&v9fs_sessionlist_lock);
- v9ses->flags = V9FS_ACCESS_USER;
strcpy(v9ses->uname, V9FS_DEFUSER);
strcpy(v9ses->aname, V9FS_DEFANAME);
v9ses->uid = ~0;
v9ses->dfltuid = V9FS_DEFUID;
v9ses->dfltgid = V9FS_DEFGID;
- rc = v9fs_parse_options(v9ses, data);
- if (rc < 0) {
- retval = rc;
- goto error;
- }
-
v9ses->clnt = p9_client_create(dev_name, data);
if (IS_ERR(v9ses->clnt)) {
retval = PTR_ERR(v9ses->clnt);
@@ -281,10 +278,20 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
goto error;
}
- if (p9_is_proto_dotl(v9ses->clnt))
+ v9ses->flags = V9FS_ACCESS_USER;
+
+ if (p9_is_proto_dotl(v9ses->clnt)) {
+ v9ses->flags = V9FS_ACCESS_CLIENT;
v9ses->flags |= V9FS_PROTO_2000L;
- else if (p9_is_proto_dotu(v9ses->clnt))
+ } else if (p9_is_proto_dotu(v9ses->clnt)) {
v9ses->flags |= V9FS_PROTO_2000U;
+ }
+
+ rc = v9fs_parse_options(v9ses, data);
+ if (rc < 0) {
+ retval = rc;
+ goto error;
+ }
v9ses->maxdata = v9ses->clnt->msize - P9_IOHDRSZ;
@@ -306,6 +313,14 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
v9ses->flags |= V9FS_ACCESS_ANY;
v9ses->uid = ~0;
}
+ if (!v9fs_proto_dotl(v9ses) ||
+ !((v9ses->flags & V9FS_ACCESS_MASK) == V9FS_ACCESS_CLIENT)) {
+ /*
+ * We support ACL checks on clinet only if the protocol is
+ * 9P2000.L and access is V9FS_ACCESS_CLIENT.
+ */
+ v9ses->flags &= ~V9FS_ACL_MASK;
+ }
fid = p9_client_attach(v9ses->clnt, NULL, v9ses->uname, ~0,
v9ses->aname);
@@ -467,6 +482,63 @@ static void v9fs_sysfs_cleanup(void)
kobject_put(v9fs_kobj);
}
+static void v9fs_inode_init_once(void *foo)
+{
+ struct v9fs_inode *v9inode = (struct v9fs_inode *)foo;
+#ifdef CONFIG_9P_FSCACHE
+ v9inode->fscache = NULL;
+ v9inode->fscache_key = NULL;
+#endif
+ inode_init_once(&v9inode->vfs_inode);
+}
+
+/**
+ * v9fs_init_inode_cache - initialize a cache for 9P
+ * Returns 0 on success.
+ */
+static int v9fs_init_inode_cache(void)
+{
+ v9fs_inode_cache = kmem_cache_create("v9fs_inode_cache",
+ sizeof(struct v9fs_inode),
+ 0, (SLAB_RECLAIM_ACCOUNT|
+ SLAB_MEM_SPREAD),
+ v9fs_inode_init_once);
+ if (!v9fs_inode_cache)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * v9fs_destroy_inode_cache - destroy the cache of 9P inode
+ *
+ */
+static void v9fs_destroy_inode_cache(void)
+{
+ kmem_cache_destroy(v9fs_inode_cache);
+}
+
+static int v9fs_cache_register(void)
+{
+ int ret;
+ ret = v9fs_init_inode_cache();
+ if (ret < 0)
+ return ret;
+#ifdef CONFIG_9P_FSCACHE
+ return fscache_register_netfs(&v9fs_cache_netfs);
+#else
+ return ret;
+#endif
+}
+
+static void v9fs_cache_unregister(void)
+{
+ v9fs_destroy_inode_cache();
+#ifdef CONFIG_9P_FSCACHE
+ fscache_unregister_netfs(&v9fs_cache_netfs);
+#endif
+}
+
/**
* init_v9fs - Initialize module
*
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index c4b5d8864f0d..bd8496db135b 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -20,6 +20,9 @@
* Boston, MA 02111-1301 USA
*
*/
+#ifndef FS_9P_V9FS_H
+#define FS_9P_V9FS_H
+
#include <linux/backing-dev.h>
/**
@@ -28,8 +31,10 @@
* @V9FS_PROTO_2000L: whether or not to use 9P2000.l extensions
* @V9FS_ACCESS_SINGLE: only the mounting user can access the hierarchy
* @V9FS_ACCESS_USER: a new attach will be issued for every user (default)
+ * @V9FS_ACCESS_CLIENT: Just like user, but access check is performed on client.
* @V9FS_ACCESS_ANY: use a single attach for all users
* @V9FS_ACCESS_MASK: bit mask of different ACCESS options
+ * @V9FS_POSIX_ACL: POSIX ACLs are enforced
*
* Session flags reflect options selected by users at mount time
*/
@@ -37,13 +42,15 @@
V9FS_ACCESS_USER | \
V9FS_ACCESS_CLIENT)
#define V9FS_ACCESS_MASK V9FS_ACCESS_ANY
+#define V9FS_ACL_MASK V9FS_POSIX_ACL
enum p9_session_flags {
V9FS_PROTO_2000U = 0x01,
V9FS_PROTO_2000L = 0x02,
V9FS_ACCESS_SINGLE = 0x04,
V9FS_ACCESS_USER = 0x08,
- V9FS_ACCESS_CLIENT = 0x10
+ V9FS_ACCESS_CLIENT = 0x10,
+ V9FS_POSIX_ACL = 0x20
};
/* possible values of ->cache */
@@ -109,8 +116,28 @@ struct v9fs_session_info {
struct list_head slist; /* list of sessions registered with v9fs */
struct backing_dev_info bdi;
struct rw_semaphore rename_sem;
+ struct p9_fid *root_fid; /* Used for file system sync */
+};
+
+/* cache_validity flags */
+#define V9FS_INO_INVALID_ATTR 0x01
+
+struct v9fs_inode {
+#ifdef CONFIG_9P_FSCACHE
+ spinlock_t fscache_lock;
+ struct fscache_cookie *fscache;
+ struct p9_qid *fscache_key;
+#endif
+ unsigned int cache_validity;
+ struct p9_fid *writeback_fid;
+ struct inode vfs_inode;
};
+static inline struct v9fs_inode *V9FS_I(const struct inode *inode)
+{
+ return container_of(inode, struct v9fs_inode, vfs_inode);
+}
+
struct p9_fid *v9fs_session_init(struct v9fs_session_info *, const char *,
char *);
extern void v9fs_session_close(struct v9fs_session_info *v9ses);
@@ -124,16 +151,15 @@ extern int v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry);
extern void v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd,
void *p);
-extern struct inode *v9fs_inode(struct v9fs_session_info *v9ses,
- struct p9_fid *fid,
- struct super_block *sb);
-
+extern struct inode *v9fs_inode_from_fid(struct v9fs_session_info *v9ses,
+ struct p9_fid *fid,
+ struct super_block *sb);
extern const struct inode_operations v9fs_dir_inode_operations_dotl;
extern const struct inode_operations v9fs_file_inode_operations_dotl;
extern const struct inode_operations v9fs_symlink_inode_operations_dotl;
-extern struct inode *v9fs_inode_dotl(struct v9fs_session_info *v9ses,
- struct p9_fid *fid,
- struct super_block *sb);
+extern struct inode *v9fs_inode_from_fid_dotl(struct v9fs_session_info *v9ses,
+ struct p9_fid *fid,
+ struct super_block *sb);
/* other default globals */
#define V9FS_PORT 564
@@ -158,7 +184,7 @@ static inline int v9fs_proto_dotl(struct v9fs_session_info *v9ses)
}
/**
- * v9fs_inode_from_fid - Helper routine to populate an inode by
+ * v9fs_get_inode_from_fid - Helper routine to populate an inode by
* issuing a attribute request
* @v9ses: session information
* @fid: fid to issue attribute request for
@@ -166,11 +192,12 @@ static inline int v9fs_proto_dotl(struct v9fs_session_info *v9ses)
*
*/
static inline struct inode *
-v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
- struct super_block *sb)
+v9fs_get_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
+ struct super_block *sb)
{
if (v9fs_proto_dotl(v9ses))
- return v9fs_inode_dotl(v9ses, fid, sb);
+ return v9fs_inode_from_fid_dotl(v9ses, fid, sb);
else
- return v9fs_inode(v9ses, fid, sb);
+ return v9fs_inode_from_fid(v9ses, fid, sb);
}
+#endif
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index b789f8e597ec..4014160903a9 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -20,6 +20,8 @@
* Boston, MA 02111-1301 USA
*
*/
+#ifndef FS_9P_V9FS_VFS_H
+#define FS_9P_V9FS_VFS_H
/* plan9 semantics are that created files are implicitly opened.
* But linux semantics are that you call create, then open.
@@ -36,6 +38,7 @@
* unlink calls remove, which is an implicit clunk. So we have to track
* that kind of thing so that we don't try to clunk a dead fid.
*/
+#define P9_LOCK_TIMEOUT (30*HZ)
extern struct file_system_type v9fs_fs_type;
extern const struct address_space_operations v9fs_addr_operations;
@@ -45,13 +48,15 @@ extern const struct file_operations v9fs_dir_operations;
extern const struct file_operations v9fs_dir_operations_dotl;
extern const struct dentry_operations v9fs_dentry_operations;
extern const struct dentry_operations v9fs_cached_dentry_operations;
+extern const struct file_operations v9fs_cached_file_operations;
+extern const struct file_operations v9fs_cached_file_operations_dotl;
+extern struct kmem_cache *v9fs_inode_cache;
-#ifdef CONFIG_9P_FSCACHE
struct inode *v9fs_alloc_inode(struct super_block *sb);
void v9fs_destroy_inode(struct inode *inode);
-#endif
-
struct inode *v9fs_get_inode(struct super_block *sb, int mode);
+int v9fs_init_inode(struct v9fs_session_info *v9ses,
+ struct inode *inode, int mode);
void v9fs_evict_inode(struct inode *inode);
ino_t v9fs_qid2ino(struct p9_qid *qid);
void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *);
@@ -62,8 +67,19 @@ void v9fs_inode2stat(struct inode *inode, struct p9_wstat *stat);
int v9fs_uflags2omode(int uflags, int extended);
ssize_t v9fs_file_readn(struct file *, char *, char __user *, u32, u64);
+ssize_t v9fs_fid_readn(struct p9_fid *, char *, char __user *, u32, u64);
void v9fs_blank_wstat(struct p9_wstat *wstat);
int v9fs_vfs_setattr_dotl(struct dentry *, struct iattr *);
int v9fs_file_fsync_dotl(struct file *filp, int datasync);
-
-#define P9_LOCK_TIMEOUT (30*HZ)
+ssize_t v9fs_file_write_internal(struct inode *, struct p9_fid *,
+ const char __user *, size_t, loff_t *, int);
+int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode);
+int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode);
+static inline void v9fs_invalidate_inode_attr(struct inode *inode)
+{
+ struct v9fs_inode *v9inode;
+ v9inode = V9FS_I(inode);
+ v9inode->cache_validity |= V9FS_INO_INVALID_ATTR;
+ return;
+}
+#endif
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index b7f2a8e3863e..2524e4cbb8ea 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -39,16 +39,16 @@
#include "v9fs.h"
#include "v9fs_vfs.h"
#include "cache.h"
+#include "fid.h"
/**
- * v9fs_vfs_readpage - read an entire page in from 9P
+ * v9fs_fid_readpage - read an entire page in from 9P
*
- * @filp: file being read
+ * @fid: fid being read
* @page: structure to page
*
*/
-
-static int v9fs_vfs_readpage(struct file *filp, struct page *page)
+static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page)
{
int retval;
loff_t offset;
@@ -67,7 +67,7 @@ static int v9fs_vfs_readpage(struct file *filp, struct page *page)
buffer = kmap(page);
offset = page_offset(page);
- retval = v9fs_file_readn(filp, buffer, NULL, PAGE_CACHE_SIZE, offset);
+ retval = v9fs_fid_readn(fid, buffer, NULL, PAGE_CACHE_SIZE, offset);
if (retval < 0) {
v9fs_uncache_page(inode, page);
goto done;
@@ -87,6 +87,19 @@ done:
}
/**
+ * v9fs_vfs_readpage - read an entire page in from 9P
+ *
+ * @filp: file being read
+ * @page: structure to page
+ *
+ */
+
+static int v9fs_vfs_readpage(struct file *filp, struct page *page)
+{
+ return v9fs_fid_readpage(filp->private_data, page);
+}
+
+/**
* v9fs_vfs_readpages - read a set of pages from 9P
*
* @filp: file being read
@@ -124,7 +137,6 @@ static int v9fs_release_page(struct page *page, gfp_t gfp)
{
if (PagePrivate(page))
return 0;
-
return v9fs_fscache_release_page(page, gfp);
}
@@ -137,20 +149,89 @@ static int v9fs_release_page(struct page *page, gfp_t gfp)
static void v9fs_invalidate_page(struct page *page, unsigned long offset)
{
+ /*
+ * If called with zero offset, we should release
+ * the private state assocated with the page
+ */
if (offset == 0)
v9fs_fscache_invalidate_page(page);
}
+static int v9fs_vfs_writepage_locked(struct page *page)
+{
+ char *buffer;
+ int retval, len;
+ loff_t offset, size;
+ mm_segment_t old_fs;
+ struct v9fs_inode *v9inode;
+ struct inode *inode = page->mapping->host;
+
+ v9inode = V9FS_I(inode);
+ size = i_size_read(inode);
+ if (page->index == size >> PAGE_CACHE_SHIFT)
+ len = size & ~PAGE_CACHE_MASK;
+ else
+ len = PAGE_CACHE_SIZE;
+
+ set_page_writeback(page);
+
+ buffer = kmap(page);
+ offset = page_offset(page);
+
+ old_fs = get_fs();
+ set_fs(get_ds());
+ /* We should have writeback_fid always set */
+ BUG_ON(!v9inode->writeback_fid);
+
+ retval = v9fs_file_write_internal(inode,
+ v9inode->writeback_fid,
+ (__force const char __user *)buffer,
+ len, &offset, 0);
+ if (retval > 0)
+ retval = 0;
+
+ set_fs(old_fs);
+ kunmap(page);
+ end_page_writeback(page);
+ return retval;
+}
+
+static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
+{
+ int retval;
+
+ retval = v9fs_vfs_writepage_locked(page);
+ if (retval < 0) {
+ if (retval == -EAGAIN) {
+ redirty_page_for_writepage(wbc, page);
+ retval = 0;
+ } else {
+ SetPageError(page);
+ mapping_set_error(page->mapping, retval);
+ }
+ } else
+ retval = 0;
+
+ unlock_page(page);
+ return retval;
+}
+
/**
* v9fs_launder_page - Writeback a dirty page
- * Since the writes go directly to the server, we simply return a 0
- * here to indicate success.
- *
* Returns 0 on success.
*/
static int v9fs_launder_page(struct page *page)
{
+ int retval;
+ struct inode *inode = page->mapping->host;
+
+ v9fs_fscache_wait_on_page_write(inode, page);
+ if (clear_page_dirty_for_io(page)) {
+ retval = v9fs_vfs_writepage_locked(page);
+ if (retval)
+ return retval;
+ }
return 0;
}
@@ -173,9 +254,15 @@ static int v9fs_launder_page(struct page *page)
* with an error.
*
*/
-ssize_t v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
- loff_t pos, unsigned long nr_segs)
+static ssize_t
+v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
+ loff_t pos, unsigned long nr_segs)
{
+ /*
+ * FIXME
+ * Now that we do caching with cache mode enabled, We need
+ * to support direct IO
+ */
P9_DPRINTK(P9_DEBUG_VFS, "v9fs_direct_IO: v9fs_direct_IO (%s) "
"off/no(%lld/%lu) EINVAL\n",
iocb->ki_filp->f_path.dentry->d_name.name,
@@ -183,11 +270,84 @@ ssize_t v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
return -EINVAL;
}
+
+static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata)
+{
+ int retval = 0;
+ struct page *page;
+ struct v9fs_inode *v9inode;
+ pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+ struct inode *inode = mapping->host;
+
+ v9inode = V9FS_I(inode);
+start:
+ page = grab_cache_page_write_begin(mapping, index, flags);
+ if (!page) {
+ retval = -ENOMEM;
+ goto out;
+ }
+ BUG_ON(!v9inode->writeback_fid);
+ if (PageUptodate(page))
+ goto out;
+
+ if (len == PAGE_CACHE_SIZE)
+ goto out;
+
+ retval = v9fs_fid_readpage(v9inode->writeback_fid, page);
+ page_cache_release(page);
+ if (!retval)
+ goto start;
+out:
+ *pagep = page;
+ return retval;
+}
+
+static int v9fs_write_end(struct file *filp, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *page, void *fsdata)
+{
+ loff_t last_pos = pos + copied;
+ struct inode *inode = page->mapping->host;
+
+ if (unlikely(copied < len)) {
+ /*
+ * zero out the rest of the area
+ */
+ unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+
+ zero_user(page, from + copied, len - copied);
+ flush_dcache_page(page);
+ }
+
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
+ /*
+ * No need to use i_size_read() here, the i_size
+ * cannot change under us because we hold the i_mutex.
+ */
+ if (last_pos > inode->i_size) {
+ inode_add_bytes(inode, last_pos - inode->i_size);
+ i_size_write(inode, last_pos);
+ }
+ set_page_dirty(page);
+ unlock_page(page);
+ page_cache_release(page);
+
+ return copied;
+}
+
+
const struct address_space_operations v9fs_addr_operations = {
- .readpage = v9fs_vfs_readpage,
- .readpages = v9fs_vfs_readpages,
- .releasepage = v9fs_release_page,
- .invalidatepage = v9fs_invalidate_page,
- .launder_page = v9fs_launder_page,
- .direct_IO = v9fs_direct_IO,
+ .readpage = v9fs_vfs_readpage,
+ .readpages = v9fs_vfs_readpages,
+ .set_page_dirty = __set_page_dirty_nobuffers,
+ .writepage = v9fs_vfs_writepage,
+ .write_begin = v9fs_write_begin,
+ .write_end = v9fs_write_end,
+ .releasepage = v9fs_release_page,
+ .invalidatepage = v9fs_invalidate_page,
+ .launder_page = v9fs_launder_page,
+ .direct_IO = v9fs_direct_IO,
};
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
index 233b7d4ffe5e..b6a3b9f7fe4d 100644
--- a/fs/9p/vfs_dentry.c
+++ b/fs/9p/vfs_dentry.c
@@ -63,20 +63,15 @@ static int v9fs_dentry_delete(const struct dentry *dentry)
* v9fs_cached_dentry_delete - called when dentry refcount equals 0
* @dentry: dentry in question
*
- * Only return 1 if our inode is invalid. Only non-synthetic files
- * (ones without mtime == 0) should be calling this function.
- *
*/
-
static int v9fs_cached_dentry_delete(const struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
- P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_name.name,
- dentry);
+ P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n",
+ dentry->d_name.name, dentry);
- if(!inode)
+ /* Don't cache negative dentries */
+ if (!dentry->d_inode)
return 1;
-
return 0;
}
@@ -105,7 +100,41 @@ static void v9fs_dentry_release(struct dentry *dentry)
}
}
+static int v9fs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
+{
+ struct p9_fid *fid;
+ struct inode *inode;
+ struct v9fs_inode *v9inode;
+
+ if (nd->flags & LOOKUP_RCU)
+ return -ECHILD;
+
+ inode = dentry->d_inode;
+ if (!inode)
+ goto out_valid;
+
+ v9inode = V9FS_I(inode);
+ if (v9inode->cache_validity & V9FS_INO_INVALID_ATTR) {
+ int retval;
+ struct v9fs_session_info *v9ses;
+ fid = v9fs_fid_lookup(dentry);
+ if (IS_ERR(fid))
+ return PTR_ERR(fid);
+
+ v9ses = v9fs_inode2v9ses(inode);
+ if (v9fs_proto_dotl(v9ses))
+ retval = v9fs_refresh_inode_dotl(fid, inode);
+ else
+ retval = v9fs_refresh_inode(fid, inode);
+ if (retval <= 0)
+ return retval;
+ }
+out_valid:
+ return 1;
+}
+
const struct dentry_operations v9fs_cached_dentry_operations = {
+ .d_revalidate = v9fs_lookup_revalidate,
.d_delete = v9fs_cached_dentry_delete,
.d_release = v9fs_dentry_release,
};
diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
index b84ebe8cefed..9c2bdda5cd9d 100644
--- a/fs/9p/vfs_dir.c
+++ b/fs/9p/vfs_dir.c
@@ -295,7 +295,6 @@ int v9fs_dir_release(struct inode *inode, struct file *filp)
P9_DPRINTK(P9_DEBUG_VFS,
"v9fs_dir_release: inode: %p filp: %p fid: %d\n",
inode, filp, fid ? fid->fid : -1);
- filemap_write_and_wait(inode->i_mapping);
if (fid)
p9_client_clunk(fid);
return 0;
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 240c30674396..78bcb97c3425 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -44,8 +44,7 @@
#include "fid.h"
#include "cache.h"
-static const struct file_operations v9fs_cached_file_operations;
-static const struct file_operations v9fs_cached_file_operations_dotl;
+static const struct vm_operations_struct v9fs_file_vm_ops;
/**
* v9fs_file_open - open a file (or directory)
@@ -57,11 +56,13 @@ static const struct file_operations v9fs_cached_file_operations_dotl;
int v9fs_file_open(struct inode *inode, struct file *file)
{
int err;
+ struct v9fs_inode *v9inode;
struct v9fs_session_info *v9ses;
struct p9_fid *fid;
int omode;
P9_DPRINTK(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file);
+ v9inode = V9FS_I(inode);
v9ses = v9fs_inode2v9ses(inode);
if (v9fs_proto_dotl(v9ses))
omode = file->f_flags;
@@ -89,20 +90,30 @@ int v9fs_file_open(struct inode *inode, struct file *file)
}
file->private_data = fid;
- if ((fid->qid.version) && (v9ses->cache)) {
- P9_DPRINTK(P9_DEBUG_VFS, "cached");
- /* enable cached file options */
- if(file->f_op == &v9fs_file_operations)
- file->f_op = &v9fs_cached_file_operations;
- else if (file->f_op == &v9fs_file_operations_dotl)
- file->f_op = &v9fs_cached_file_operations_dotl;
-
+ if (v9ses->cache && !v9inode->writeback_fid) {
+ /*
+ * clone a fid and add it to writeback_fid
+ * we do it during open time instead of
+ * page dirty time via write_begin/page_mkwrite
+ * because we want write after unlink usecase
+ * to work.
+ */
+ fid = v9fs_writeback_fid(file->f_path.dentry);
+ if (IS_ERR(fid)) {
+ err = PTR_ERR(fid);
+ goto out_error;
+ }
+ v9inode->writeback_fid = (void *) fid;
+ }
#ifdef CONFIG_9P_FSCACHE
+ if (v9ses->cache)
v9fs_cache_inode_set_cookie(inode, file);
#endif
- }
-
return 0;
+out_error:
+ p9_client_clunk(file->private_data);
+ file->private_data = NULL;
+ return err;
}
/**
@@ -335,25 +346,22 @@ out_err:
}
/**
- * v9fs_file_readn - read from a file
- * @filp: file pointer to read
+ * v9fs_fid_readn - read from a fid
+ * @fid: fid to read
* @data: data buffer to read data into
* @udata: user data buffer to read data into
* @count: size of buffer
* @offset: offset at which to read data
*
*/
-
ssize_t
-v9fs_file_readn(struct file *filp, char *data, char __user *udata, u32 count,
+v9fs_fid_readn(struct p9_fid *fid, char *data, char __user *udata, u32 count,
u64 offset)
{
int n, total, size;
- struct p9_fid *fid = filp->private_data;
P9_DPRINTK(P9_DEBUG_VFS, "fid %d offset %llu count %d\n", fid->fid,
- (long long unsigned) offset, count);
-
+ (long long unsigned) offset, count);
n = 0;
total = 0;
size = fid->iounit ? fid->iounit : fid->clnt->msize - P9_IOHDRSZ;
@@ -379,6 +387,22 @@ v9fs_file_readn(struct file *filp, char *data, char __user *udata, u32 count,
}
/**
+ * v9fs_file_readn - read from a file
+ * @filp: file pointer to read
+ * @data: data buffer to read data into
+ * @udata: user data buffer to read data into
+ * @count: size of buffer
+ * @offset: offset at which to read data
+ *
+ */
+ssize_t
+v9fs_file_readn(struct file *filp, char *data, char __user *udata, u32 count,
+ u64 offset)
+{
+ return v9fs_fid_readn(filp->private_data, data, udata, count, offset);
+}
+
+/**
* v9fs_file_read - read from a file
* @filp: file pointer to read
* @udata: user data buffer to read data into
@@ -410,45 +434,22 @@ v9fs_file_read(struct file *filp, char __user *udata, size_t count,
return ret;
}
-/**
- * v9fs_file_write - write to a file
- * @filp: file pointer to write
- * @data: data buffer to write data from
- * @count: size of buffer
- * @offset: offset at which to write data
- *
- */
-
-static ssize_t
-v9fs_file_write(struct file *filp, const char __user * data,
- size_t count, loff_t * offset)
+ssize_t
+v9fs_file_write_internal(struct inode *inode, struct p9_fid *fid,
+ const char __user *data, size_t count,
+ loff_t *offset, int invalidate)
{
- ssize_t retval;
- size_t total = 0;
int n;
- struct p9_fid *fid;
+ loff_t i_size;
+ size_t total = 0;
struct p9_client *clnt;
- struct inode *inode = filp->f_path.dentry->d_inode;
loff_t origin = *offset;
unsigned long pg_start, pg_end;
P9_DPRINTK(P9_DEBUG_VFS, "data %p count %d offset %x\n", data,
(int)count, (int)*offset);
- fid = filp->private_data;
clnt = fid->clnt;
-
- retval = generic_write_checks(filp, &origin, &count, 0);
- if (retval)
- goto out;
-
- retval = -EINVAL;
- if ((ssize_t) count < 0)
- goto out;
- retval = 0;
- if (!count)
- goto out;
-
do {
n = p9_client_write(fid, NULL, data+total, origin+total, count);
if (n <= 0)
@@ -457,25 +458,60 @@ v9fs_file_write(struct file *filp, const char __user * data,
total += n;
} while (count > 0);
- if (total > 0) {
+ if (invalidate && (total > 0)) {
pg_start = origin >> PAGE_CACHE_SHIFT;
pg_end = (origin + total - 1) >> PAGE_CACHE_SHIFT;
if (inode->i_mapping && inode->i_mapping->nrpages)
invalidate_inode_pages2_range(inode->i_mapping,
pg_start, pg_end);
*offset += total;
- i_size_write(inode, i_size_read(inode) + total);
- inode->i_blocks = (i_size_read(inode) + 512 - 1) >> 9;
+ i_size = i_size_read(inode);
+ if (*offset > i_size) {
+ inode_add_bytes(inode, *offset - i_size);
+ i_size_write(inode, *offset);
+ }
}
-
if (n < 0)
- retval = n;
- else
- retval = total;
+ return n;
+
+ return total;
+}
+
+/**
+ * v9fs_file_write - write to a file
+ * @filp: file pointer to write
+ * @data: data buffer to write data from
+ * @count: size of buffer
+ * @offset: offset at which to write data
+ *
+ */
+static ssize_t
+v9fs_file_write(struct file *filp, const char __user * data,
+ size_t count, loff_t *offset)
+{
+ ssize_t retval = 0;
+ loff_t origin = *offset;
+
+
+ retval = generic_write_checks(filp, &origin, &count, 0);
+ if (retval)
+ goto out;
+
+ retval = -EINVAL;
+ if ((ssize_t) count < 0)
+ goto out;
+ retval = 0;
+ if (!count)
+ goto out;
+
+ return v9fs_file_write_internal(filp->f_path.dentry->d_inode,
+ filp->private_data,
+ data, count, offset, 1);
out:
return retval;
}
+
static int v9fs_file_fsync(struct file *filp, int datasync)
{
struct p9_fid *fid;
@@ -505,28 +541,182 @@ int v9fs_file_fsync_dotl(struct file *filp, int datasync)
return retval;
}
-static const struct file_operations v9fs_cached_file_operations = {
+static int
+v9fs_file_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ int retval;
+
+ retval = generic_file_mmap(file, vma);
+ if (!retval)
+ vma->vm_ops = &v9fs_file_vm_ops;
+
+ return retval;
+}
+
+static int
+v9fs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct v9fs_inode *v9inode;
+ struct page *page = vmf->page;
+ struct file *filp = vma->vm_file;
+ struct inode *inode = filp->f_path.dentry->d_inode;
+
+
+ P9_DPRINTK(P9_DEBUG_VFS, "page %p fid %lx\n",
+ page, (unsigned long)filp->private_data);
+
+ v9inode = V9FS_I(inode);
+ /* make sure the cache has finished storing the page */
+ v9fs_fscache_wait_on_page_write(inode, page);
+ BUG_ON(!v9inode->writeback_fid);
+ lock_page(page);
+ if (page->mapping != inode->i_mapping)
+ goto out_unlock;
+
+ return VM_FAULT_LOCKED;
+out_unlock:
+ unlock_page(page);
+ return VM_FAULT_NOPAGE;
+}
+
+static ssize_t
+v9fs_direct_read(struct file *filp, char __user *udata, size_t count,
+ loff_t *offsetp)
+{
+ loff_t size, offset;
+ struct inode *inode;
+ struct address_space *mapping;
+
+ offset = *offsetp;
+ mapping = filp->f_mapping;
+ inode = mapping->host;
+ if (!count)
+ return 0;
+ size = i_size_read(inode);
+ if (offset < size)
+ filemap_write_and_wait_range(mapping, offset,
+ offset + count - 1);
+
+ return v9fs_file_read(filp, udata, count, offsetp);
+}
+
+/**
+ * v9fs_cached_file_read - read from a file
+ * @filp: file pointer to read
+ * @udata: user data buffer to read data into
+ * @count: size of buffer
+ * @offset: offset at which to read data
+ *
+ */
+static ssize_t
+v9fs_cached_file_read(struct file *filp, char __user *data, size_t count,
+ loff_t *offset)
+{
+ if (filp->f_flags & O_DIRECT)
+ return v9fs_direct_read(filp, data, count, offset);
+ return do_sync_read(filp, data, count, offset);
+}
+
+static ssize_t
+v9fs_direct_write(struct file *filp, const char __user * data,
+ size_t count, loff_t *offsetp)
+{
+ loff_t offset;
+ ssize_t retval;
+ struct inode *inode;
+ struct address_space *mapping;
+
+ offset = *offsetp;
+ mapping = filp->f_mapping;
+ inode = mapping->host;
+ if (!count)
+ return 0;
+
+ mutex_lock(&inode->i_mutex);
+ retval = filemap_write_and_wait_range(mapping, offset,
+ offset + count - 1);
+ if (retval)
+ goto err_out;
+ /*
+ * After a write we want buffered reads to be sure to go to disk to get
+ * the new data. We invalidate clean cached page from the region we're
+ * about to write. We do this *before* the write so that if we fail
+ * here we fall back to buffered write
+ */
+ if (mapping->nrpages) {
+ pgoff_t pg_start = offset >> PAGE_CACHE_SHIFT;
+ pgoff_t pg_end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
+
+ retval = invalidate_inode_pages2_range(mapping,
+ pg_start, pg_end);
+ /*
+ * If a page can not be invalidated, fall back
+ * to buffered write.
+ */
+ if (retval) {
+ if (retval == -EBUSY)
+ goto buff_write;
+ goto err_out;
+ }
+ }
+ retval = v9fs_file_write(filp, data, count, offsetp);
+err_out:
+ mutex_unlock(&inode->i_mutex);
+ return retval;
+
+buff_write:
+ mutex_unlock(&inode->i_mutex);
+ return do_sync_write(filp, data, count, offsetp);
+}
+
+/**
+ * v9fs_cached_file_write - write to a file
+ * @filp: file pointer to write
+ * @data: data buffer to write data from
+ * @count: size of buffer
+ * @offset: offset at which to write data
+ *
+ */
+static ssize_t
+v9fs_cached_file_write(struct file *filp, const char __user * data,
+ size_t count, loff_t *offset)
+{
+
+ if (filp->f_flags & O_DIRECT)
+ return v9fs_direct_write(filp, data, count, offset);
+ return do_sync_write(filp, data, count, offset);
+}
+
+static const struct vm_operations_struct v9fs_file_vm_ops = {
+ .fault = filemap_fault,
+ .page_mkwrite = v9fs_vm_page_mkwrite,
+};
+
+
+const struct file_operations v9fs_cached_file_operations = {
.llseek = generic_file_llseek,
- .read = do_sync_read,
+ .read = v9fs_cached_file_read,
+ .write = v9fs_cached_file_write,
.aio_read = generic_file_aio_read,
- .write = v9fs_file_write,
+ .aio_write = generic_file_aio_write,
.open = v9fs_file_open,
.release = v9fs_dir_release,
.lock = v9fs_file_lock,
- .mmap = generic_file_readonly_mmap,
+ .mmap = v9fs_file_mmap,
.fsync = v9fs_file_fsync,
};
-static const struct file_operations v9fs_cached_file_operations_dotl = {
+const struct file_operations v9fs_cached_file_operations_dotl = {
.llseek = generic_file_llseek,
- .read = do_sync_read,
+ .read = v9fs_cached_file_read,
+ .write = v9fs_cached_file_write,
.aio_read = generic_file_aio_read,
- .write = v9fs_file_write,
+ .aio_write = generic_file_aio_write,
.open = v9fs_file_open,
.release = v9fs_dir_release,
.lock = v9fs_file_lock_dotl,
.flock = v9fs_file_flock_dotl,
- .mmap = generic_file_readonly_mmap,
+ .mmap = v9fs_file_mmap,
.fsync = v9fs_file_fsync_dotl,
};
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index b76a40bdf4c2..8a2c232f708a 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -203,26 +203,25 @@ v9fs_blank_wstat(struct p9_wstat *wstat)
wstat->extension = NULL;
}
-#ifdef CONFIG_9P_FSCACHE
/**
* v9fs_alloc_inode - helper function to allocate an inode
- * This callback is executed before setting up the inode so that we
- * can associate a vcookie with each inode.
*
*/
-
struct inode *v9fs_alloc_inode(struct super_block *sb)
{
- struct v9fs_cookie *vcookie;
- vcookie = (struct v9fs_cookie *)kmem_cache_alloc(vcookie_cache,
- GFP_KERNEL);
- if (!vcookie)
+ struct v9fs_inode *v9inode;
+ v9inode = (struct v9fs_inode *)kmem_cache_alloc(v9fs_inode_cache,
+ GFP_KERNEL);
+ if (!v9inode)
return NULL;
-
- vcookie->fscache = NULL;
- vcookie->qid = NULL;
- spin_lock_init(&vcookie->lock);
- return &vcookie->inode;
+#ifdef CONFIG_9P_FSCACHE
+ v9inode->fscache = NULL;
+ v9inode->fscache_key = NULL;
+ spin_lock_init(&v9inode->fscache_lock);
+#endif
+ v9inode->writeback_fid = NULL;
+ v9inode->cache_validity = 0;
+ return &v9inode->vfs_inode;
}
/**
@@ -234,35 +233,18 @@ static void v9fs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
INIT_LIST_HEAD(&inode->i_dentry);
- kmem_cache_free(vcookie_cache, v9fs_inode2cookie(inode));
+ kmem_cache_free(v9fs_inode_cache, V9FS_I(inode));
}
void v9fs_destroy_inode(struct inode *inode)
{
call_rcu(&inode->i_rcu, v9fs_i_callback);
}
-#endif
-/**
- * v9fs_get_inode - helper function to setup an inode
- * @sb: superblock
- * @mode: mode to setup inode with
- *
- */
-
-struct inode *v9fs_get_inode(struct super_block *sb, int mode)
+int v9fs_init_inode(struct v9fs_session_info *v9ses,
+ struct inode *inode, int mode)
{
- int err;
- struct inode *inode;
- struct v9fs_session_info *v9ses = sb->s_fs_info;
-
- P9_DPRINTK(P9_DEBUG_VFS, "super block: %p mode: %o\n", sb, mode);
-
- inode = new_inode(sb);
- if (!inode) {
- P9_EPRINTK(KERN_WARNING, "Problem allocating inode\n");
- return ERR_PTR(-ENOMEM);
- }
+ int err = 0;
inode_init_owner(inode, NULL, mode);
inode->i_blocks = 0;
@@ -292,14 +274,20 @@ struct inode *v9fs_get_inode(struct super_block *sb, int mode)
case S_IFREG:
if (v9fs_proto_dotl(v9ses)) {
inode->i_op = &v9fs_file_inode_operations_dotl;
- inode->i_fop = &v9fs_file_operations_dotl;
+ if (v9ses->cache)
+ inode->i_fop =
+ &v9fs_cached_file_operations_dotl;
+ else
+ inode->i_fop = &v9fs_file_operations_dotl;
} else {
inode->i_op = &v9fs_file_inode_operations;
- inode->i_fop = &v9fs_file_operations;
+ if (v9ses->cache)
+ inode->i_fop = &v9fs_cached_file_operations;
+ else
+ inode->i_fop = &v9fs_file_operations;
}
break;
-
case S_IFLNK:
if (!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses)) {
P9_DPRINTK(P9_DEBUG_ERROR, "extended modes used with "
@@ -335,12 +323,37 @@ struct inode *v9fs_get_inode(struct super_block *sb, int mode)
err = -EINVAL;
goto error;
}
+error:
+ return err;
- return inode;
+}
-error:
- iput(inode);
- return ERR_PTR(err);
+/**
+ * v9fs_get_inode - helper function to setup an inode
+ * @sb: superblock
+ * @mode: mode to setup inode with
+ *
+ */
+
+struct inode *v9fs_get_inode(struct super_block *sb, int mode)
+{
+ int err;
+ struct inode *inode;
+ struct v9fs_session_info *v9ses = sb->s_fs_info;
+
+ P9_DPRINTK(P9_DEBUG_VFS, "super block: %p mode: %o\n", sb, mode);
+
+ inode = new_inode(sb);
+ if (!inode) {
+ P9_EPRINTK(KERN_WARNING, "Problem allocating inode\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ err = v9fs_init_inode(v9ses, inode, mode);
+ if (err) {
+ iput(inode);
+ return ERR_PTR(err);
+ }
+ return inode;
}
/*
@@ -403,6 +416,8 @@ error:
*/
void v9fs_evict_inode(struct inode *inode)
{
+ struct v9fs_inode *v9inode = V9FS_I(inode);
+
truncate_inode_pages(inode->i_mapping, 0);
end_writeback(inode);
filemap_fdatawrite(inode->i_mapping);
@@ -410,41 +425,67 @@ void v9fs_evict_inode(struct inode *inode)
#ifdef CONFIG_9P_FSCACHE
v9fs_cache_inode_put_cookie(inode);
#endif
+ /* clunk the fid stashed in writeback_fid */
+ if (v9inode->writeback_fid) {
+ p9_client_clunk(v9inode->writeback_fid);
+ v9inode->writeback_fid = NULL;
+ }
}
-struct inode *
-v9fs_inode(struct v9fs_session_info *v9ses, struct p9_fid *fid,
- struct super_block *sb)
+static struct inode *v9fs_qid_iget(struct super_block *sb,
+ struct p9_qid *qid,
+ struct p9_wstat *st)
{
- int err, umode;
- struct inode *ret = NULL;
- struct p9_wstat *st;
-
- st = p9_client_stat(fid);
- if (IS_ERR(st))
- return ERR_CAST(st);
+ int retval, umode;
+ unsigned long i_ino;
+ struct inode *inode;
+ struct v9fs_session_info *v9ses = sb->s_fs_info;
+ i_ino = v9fs_qid2ino(qid);
+ inode = iget_locked(sb, i_ino);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW))
+ return inode;
+ /*
+ * initialize the inode with the stat info
+ * FIXME!! we may need support for stale inodes
+ * later.
+ */
umode = p9mode2unixmode(v9ses, st->mode);
- ret = v9fs_get_inode(sb, umode);
- if (IS_ERR(ret)) {
- err = PTR_ERR(ret);
+ retval = v9fs_init_inode(v9ses, inode, umode);
+ if (retval)
goto error;
- }
-
- v9fs_stat2inode(st, ret, sb);
- ret->i_ino = v9fs_qid2ino(&st->qid);
+ v9fs_stat2inode(st, inode, sb);
#ifdef CONFIG_9P_FSCACHE
- v9fs_vcookie_set_qid(ret, &st->qid);
- v9fs_cache_inode_get_cookie(ret);
+ v9fs_fscache_set_key(inode, &st->qid);
+ v9fs_cache_inode_get_cookie(inode);
#endif
- p9stat_free(st);
- kfree(st);
- return ret;
+ unlock_new_inode(inode);
+ return inode;
error:
+ unlock_new_inode(inode);
+ iput(inode);
+ return ERR_PTR(retval);
+
+}
+
+struct inode *
+v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
+ struct super_block *sb)
+{
+ struct p9_wstat *st;
+ struct inode *inode = NULL;
+
+ st = p9_client_stat(fid);
+ if (IS_ERR(st))
+ return ERR_CAST(st);
+
+ inode = v9fs_qid_iget(sb, &st->qid, st);
p9stat_free(st);
kfree(st);
- return ERR_PTR(err);
+ return inode;
}
/**
@@ -458,8 +499,8 @@ error:
static int v9fs_remove(struct inode *dir, struct dentry *file, int rmdir)
{
int retval;
- struct inode *file_inode;
struct p9_fid *v9fid;
+ struct inode *file_inode;
P9_DPRINTK(P9_DEBUG_VFS, "inode: %p dentry: %p rmdir: %d\n", dir, file,
rmdir);
@@ -470,8 +511,20 @@ static int v9fs_remove(struct inode *dir, struct dentry *file, int rmdir)
return PTR_ERR(v9fid);
retval = p9_client_remove(v9fid);
- if (!retval)
- drop_nlink(file_inode);
+ if (!retval) {
+ /*
+ * directories on unlink should have zero
+ * link count
+ */
+ if (rmdir) {
+ clear_nlink(file_inode);
+ drop_nlink(dir);
+ } else
+ drop_nlink(file_inode);
+
+ v9fs_invalidate_inode_attr(file_inode);
+ v9fs_invalidate_inode_attr(dir);
+ }
return retval;
}
@@ -531,7 +584,7 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
}
/* instantiate inode and assign the unopened fid to the dentry */
- inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
+ inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
@@ -570,9 +623,10 @@ v9fs_vfs_create(struct inode *dir, struct dentry *dentry, int mode,
int err;
u32 perm;
int flags;
- struct v9fs_session_info *v9ses;
- struct p9_fid *fid;
struct file *filp;
+ struct v9fs_inode *v9inode;
+ struct v9fs_session_info *v9ses;
+ struct p9_fid *fid, *inode_fid;
err = 0;
fid = NULL;
@@ -592,8 +646,25 @@ v9fs_vfs_create(struct inode *dir, struct dentry *dentry, int mode,
goto error;
}
+ v9fs_invalidate_inode_attr(dir);
/* if we are opening a file, assign the open fid to the file */
if (nd && nd->flags & LOOKUP_OPEN) {
+ v9inode = V9FS_I(dentry->d_inode);
+ if (v9ses->cache && !v9inode->writeback_fid) {
+ /*
+ * clone a fid and add it to writeback_fid
+ * we do it during open time instead of
+ * page dirty time via write_begin/page_mkwrite
+ * because we want write after unlink usecase
+ * to work.
+ */
+ inode_fid = v9fs_writeback_fid(dentry);
+ if (IS_ERR(inode_fid)) {
+ err = PTR_ERR(inode_fid);
+ goto error;
+ }
+ v9inode->writeback_fid = (void *) inode_fid;
+ }
filp = lookup_instantiate_filp(nd, dentry, generic_file_open);
if (IS_ERR(filp)) {
err = PTR_ERR(filp);
@@ -601,6 +672,10 @@ v9fs_vfs_create(struct inode *dir, struct dentry *dentry, int mode,
}
filp->private_data = fid;
+#ifdef CONFIG_9P_FSCACHE
+ if (v9ses->cache)
+ v9fs_cache_inode_set_cookie(dentry->d_inode, filp);
+#endif
} else
p9_client_clunk(fid);
@@ -625,8 +700,8 @@ static int v9fs_vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
{
int err;
u32 perm;
- struct v9fs_session_info *v9ses;
struct p9_fid *fid;
+ struct v9fs_session_info *v9ses;
P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name);
err = 0;
@@ -636,6 +711,9 @@ static int v9fs_vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
fid = NULL;
+ } else {
+ inc_nlink(dir);
+ v9fs_invalidate_inode_attr(dir);
}
if (fid)
@@ -687,7 +765,7 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
return ERR_PTR(result);
}
- inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
+ inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
if (IS_ERR(inode)) {
result = PTR_ERR(inode);
inode = NULL;
@@ -747,17 +825,19 @@ int
v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
+ int retval;
struct inode *old_inode;
+ struct inode *new_inode;
struct v9fs_session_info *v9ses;
struct p9_fid *oldfid;
struct p9_fid *olddirfid;
struct p9_fid *newdirfid;
struct p9_wstat wstat;
- int retval;
P9_DPRINTK(P9_DEBUG_VFS, "\n");
retval = 0;
old_inode = old_dentry->d_inode;
+ new_inode = new_dentry->d_inode;
v9ses = v9fs_inode2v9ses(old_inode);
oldfid = v9fs_fid_lookup(old_dentry);
if (IS_ERR(oldfid))
@@ -798,9 +878,30 @@ v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
retval = p9_client_wstat(oldfid, &wstat);
clunk_newdir:
- if (!retval)
+ if (!retval) {
+ if (new_inode) {
+ if (S_ISDIR(new_inode->i_mode))
+ clear_nlink(new_inode);
+ else
+ drop_nlink(new_inode);
+ /*
+ * Work around vfs rename rehash bug with
+ * FS_RENAME_DOES_D_MOVE
+ */
+ v9fs_invalidate_inode_attr(new_inode);
+ }
+ if (S_ISDIR(old_inode->i_mode)) {
+ if (!new_inode)
+ inc_nlink(new_dir);
+ drop_nlink(old_dir);
+ }
+ v9fs_invalidate_inode_attr(old_inode);
+ v9fs_invalidate_inode_attr(old_dir);
+ v9fs_invalidate_inode_attr(new_dir);
+
/* successful rename */
d_move(old_dentry, new_dentry);
+ }
up_write(&v9ses->rename_sem);
p9_client_clunk(newdirfid);
@@ -831,9 +932,10 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry);
err = -EPERM;
v9ses = v9fs_inode2v9ses(dentry->d_inode);
- if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
- return simple_getattr(mnt, dentry, stat);
-
+ if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
+ generic_fillattr(dentry->d_inode, stat);
+ return 0;
+ }
fid = v9fs_fid_lookup(dentry);
if (IS_ERR(fid))
return PTR_ERR(fid);
@@ -891,17 +993,20 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
if (iattr->ia_valid & ATTR_GID)
wstat.n_gid = iattr->ia_gid;
}
-
- retval = p9_client_wstat(fid, &wstat);
- if (retval < 0)
- return retval;
-
if ((iattr->ia_valid & ATTR_SIZE) &&
iattr->ia_size != i_size_read(dentry->d_inode)) {
retval = vmtruncate(dentry->d_inode, iattr->ia_size);
if (retval)
return retval;
}
+ /* Write all dirty data */
+ if (S_ISREG(dentry->d_inode->i_mode))
+ filemap_write_and_wait(dentry->d_inode->i_mapping);
+
+ retval = p9_client_wstat(fid, &wstat);
+ if (retval < 0)
+ return retval;
+ v9fs_invalidate_inode_attr(dentry->d_inode);
setattr_copy(dentry->d_inode, iattr);
mark_inode_dirty(dentry->d_inode);
@@ -924,6 +1029,7 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
char tag_name[14];
unsigned int i_nlink;
struct v9fs_session_info *v9ses = sb->s_fs_info;
+ struct v9fs_inode *v9inode = V9FS_I(inode);
inode->i_nlink = 1;
@@ -983,6 +1089,7 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
/* not real number of blocks, but 512 byte ones ... */
inode->i_blocks = (i_size_read(inode) + 512 - 1) >> 9;
+ v9inode->cache_validity &= ~V9FS_INO_INVALID_ATTR;
}
/**
@@ -1115,8 +1222,8 @@ static int v9fs_vfs_mkspecial(struct inode *dir, struct dentry *dentry,
int mode, const char *extension)
{
u32 perm;
- struct v9fs_session_info *v9ses;
struct p9_fid *fid;
+ struct v9fs_session_info *v9ses;
v9ses = v9fs_inode2v9ses(dir);
if (!v9fs_proto_dotu(v9ses)) {
@@ -1130,6 +1237,7 @@ static int v9fs_vfs_mkspecial(struct inode *dir, struct dentry *dentry,
if (IS_ERR(fid))
return PTR_ERR(fid);
+ v9fs_invalidate_inode_attr(dir);
p9_client_clunk(fid);
return 0;
}
@@ -1166,8 +1274,8 @@ v9fs_vfs_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *dentry)
{
int retval;
- struct p9_fid *oldfid;
char *name;
+ struct p9_fid *oldfid;
P9_DPRINTK(P9_DEBUG_VFS,
" %lu,%s,%s\n", dir->i_ino, dentry->d_name.name,
@@ -1186,7 +1294,10 @@ v9fs_vfs_link(struct dentry *old_dentry, struct inode *dir,
sprintf(name, "%d\n", oldfid->fid);
retval = v9fs_vfs_mkspecial(dir, dentry, P9_DMLINK, name);
__putname(name);
-
+ if (!retval) {
+ v9fs_refresh_inode(oldfid, old_dentry->d_inode);
+ v9fs_invalidate_inode_attr(dir);
+ }
clunk_fid:
p9_client_clunk(oldfid);
return retval;
@@ -1237,6 +1348,32 @@ v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
return retval;
}
+int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
+{
+ loff_t i_size;
+ struct p9_wstat *st;
+ struct v9fs_session_info *v9ses;
+
+ v9ses = v9fs_inode2v9ses(inode);
+ st = p9_client_stat(fid);
+ if (IS_ERR(st))
+ return PTR_ERR(st);
+
+ spin_lock(&inode->i_lock);
+ /*
+ * We don't want to refresh inode->i_size,
+ * because we may have cached data
+ */
+ i_size = inode->i_size;
+ v9fs_stat2inode(st, inode, inode->i_sb);
+ if (v9ses->cache)
+ inode->i_size = i_size;
+ spin_unlock(&inode->i_lock);
+ p9stat_free(st);
+ kfree(st);
+ return 0;
+}
+
static const struct inode_operations v9fs_dir_inode_operations_dotu = {
.create = v9fs_vfs_create,
.lookup = v9fs_vfs_lookup,
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index fe3ffa9aace4..67c138e94feb 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -86,40 +86,63 @@ static struct dentry *v9fs_dentry_from_dir_inode(struct inode *inode)
return dentry;
}
+static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
+ struct p9_qid *qid,
+ struct p9_fid *fid,
+ struct p9_stat_dotl *st)
+{
+ int retval;
+ unsigned long i_ino;
+ struct inode *inode;
+ struct v9fs_session_info *v9ses = sb->s_fs_info;
+
+ i_ino = v9fs_qid2ino(qid);
+ inode = iget_locked(sb, i_ino);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW))
+ return inode;
+ /*
+ * initialize the inode with the stat info
+ * FIXME!! we may need support for stale inodes
+ * later.
+ */
+ retval = v9fs_init_inode(v9ses, inode, st->st_mode);
+ if (retval)
+ goto error;
+
+ v9fs_stat2inode_dotl(st, inode);
+#ifdef CONFIG_9P_FSCACHE
+ v9fs_fscache_set_key(inode, &st->qid);
+ v9fs_cache_inode_get_cookie(inode);
+#endif
+ retval = v9fs_get_acl(inode, fid);
+ if (retval)
+ goto error;
+
+ unlock_new_inode(inode);
+ return inode;
+error:
+ unlock_new_inode(inode);
+ iput(inode);
+ return ERR_PTR(retval);
+
+}
+
struct inode *
-v9fs_inode_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid,
- struct super_block *sb)
+v9fs_inode_from_fid_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid,
+ struct super_block *sb)
{
- struct inode *ret = NULL;
- int err;
struct p9_stat_dotl *st;
+ struct inode *inode = NULL;
st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
if (IS_ERR(st))
return ERR_CAST(st);
- ret = v9fs_get_inode(sb, st->st_mode);
- if (IS_ERR(ret)) {
- err = PTR_ERR(ret);
- goto error;
- }
-
- v9fs_stat2inode_dotl(st, ret);
- ret->i_ino = v9fs_qid2ino(&st->qid);
-#ifdef CONFIG_9P_FSCACHE
- v9fs_vcookie_set_qid(ret, &st->qid);
- v9fs_cache_inode_get_cookie(ret);
-#endif
- err = v9fs_get_acl(ret, fid);
- if (err) {
- iput(ret);
- goto error;
- }
- kfree(st);
- return ret;
-error:
+ inode = v9fs_qid_iget_dotl(sb, &st->qid, fid, st);
kfree(st);
- return ERR_PTR(err);
+ return inode;
}
/**
@@ -136,16 +159,17 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
struct nameidata *nd)
{
int err = 0;
- char *name = NULL;
gid_t gid;
int flags;
mode_t mode;
- struct v9fs_session_info *v9ses;
- struct p9_fid *fid = NULL;
- struct p9_fid *dfid, *ofid;
+ char *name = NULL;
struct file *filp;
struct p9_qid qid;
struct inode *inode;
+ struct p9_fid *fid = NULL;
+ struct v9fs_inode *v9inode;
+ struct p9_fid *dfid, *ofid, *inode_fid;
+ struct v9fs_session_info *v9ses;
struct posix_acl *pacl = NULL, *dacl = NULL;
v9ses = v9fs_inode2v9ses(dir);
@@ -196,6 +220,7 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
err);
goto error;
}
+ v9fs_invalidate_inode_attr(dir);
/* instantiate inode and assign the unopened fid to the dentry */
fid = p9_client_walk(dfid, 1, &name, 1);
@@ -205,7 +230,7 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
fid = NULL;
goto error;
}
- inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
+ inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
@@ -219,6 +244,22 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
/* Now set the ACL based on the default value */
v9fs_set_create_acl(dentry, dacl, pacl);
+ v9inode = V9FS_I(inode);
+ if (v9ses->cache && !v9inode->writeback_fid) {
+ /*
+ * clone a fid and add it to writeback_fid
+ * we do it during open time instead of
+ * page dirty time via write_begin/page_mkwrite
+ * because we want write after unlink usecase
+ * to work.
+ */
+ inode_fid = v9fs_writeback_fid(dentry);
+ if (IS_ERR(inode_fid)) {
+ err = PTR_ERR(inode_fid);
+ goto error;
+ }
+ v9inode->writeback_fid = (void *) inode_fid;
+ }
/* Since we are opening a file, assign the open fid to the file */
filp = lookup_instantiate_filp(nd, dentry, generic_file_open);
if (IS_ERR(filp)) {
@@ -226,6 +267,10 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
return PTR_ERR(filp);
}
filp->private_data = ofid;
+#ifdef CONFIG_9P_FSCACHE
+ if (v9ses->cache)
+ v9fs_cache_inode_set_cookie(inode, filp);
+#endif
return 0;
error:
@@ -300,7 +345,7 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
goto error;
}
- inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
+ inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
@@ -327,7 +372,8 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
}
/* Now set the ACL based on the default value */
v9fs_set_create_acl(dentry, dacl, pacl);
-
+ inc_nlink(dir);
+ v9fs_invalidate_inode_attr(dir);
error:
if (fid)
p9_client_clunk(fid);
@@ -346,9 +392,10 @@ v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry,
P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry);
err = -EPERM;
v9ses = v9fs_inode2v9ses(dentry->d_inode);
- if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
- return simple_getattr(mnt, dentry, stat);
-
+ if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
+ generic_fillattr(dentry->d_inode, stat);
+ return 0;
+ }
fid = v9fs_fid_lookup(dentry);
if (IS_ERR(fid))
return PTR_ERR(fid);
@@ -406,16 +453,20 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
if (IS_ERR(fid))
return PTR_ERR(fid);
- retval = p9_client_setattr(fid, &p9attr);
- if (retval < 0)
- return retval;
-
if ((iattr->ia_valid & ATTR_SIZE) &&
iattr->ia_size != i_size_read(dentry->d_inode)) {
retval = vmtruncate(dentry->d_inode, iattr->ia_size);
if (retval)
return retval;
}
+ /* Write all dirty data */
+ if (S_ISREG(dentry->d_inode->i_mode))
+ filemap_write_and_wait(dentry->d_inode->i_mapping);
+
+ retval = p9_client_setattr(fid, &p9attr);
+ if (retval < 0)
+ return retval;
+ v9fs_invalidate_inode_attr(dentry->d_inode);
setattr_copy(dentry->d_inode, iattr);
mark_inode_dirty(dentry->d_inode);
@@ -439,6 +490,7 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
void
v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
{
+ struct v9fs_inode *v9inode = V9FS_I(inode);
if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) {
inode->i_atime.tv_sec = stat->st_atime_sec;
@@ -497,20 +549,21 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
/* Currently we don't support P9_STATS_BTIME and P9_STATS_DATA_VERSION
* because the inode structure does not have fields for them.
*/
+ v9inode->cache_validity &= ~V9FS_INO_INVALID_ATTR;
}
static int
v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
const char *symname)
{
- struct v9fs_session_info *v9ses;
- struct p9_fid *dfid;
- struct p9_fid *fid = NULL;
- struct inode *inode;
- struct p9_qid qid;
- char *name;
int err;
gid_t gid;
+ char *name;
+ struct p9_qid qid;
+ struct inode *inode;
+ struct p9_fid *dfid;
+ struct p9_fid *fid = NULL;
+ struct v9fs_session_info *v9ses;
name = (char *) dentry->d_name.name;
P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_symlink_dotl : %lu,%s,%s\n",
@@ -534,6 +587,7 @@ v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
goto error;
}
+ v9fs_invalidate_inode_attr(dir);
if (v9ses->cache) {
/* Now walk from the parent so we can get an unopened fid. */
fid = p9_client_walk(dfid, 1, &name, 1);
@@ -546,7 +600,7 @@ v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
}
/* instantiate inode and assign the unopened fid to dentry */
- inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
+ inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
@@ -588,10 +642,10 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
struct dentry *dentry)
{
int err;
- struct p9_fid *dfid, *oldfid;
char *name;
- struct v9fs_session_info *v9ses;
struct dentry *dir_dentry;
+ struct p9_fid *dfid, *oldfid;
+ struct v9fs_session_info *v9ses;
P9_DPRINTK(P9_DEBUG_VFS, "dir ino: %lu, old_name: %s, new_name: %s\n",
dir->i_ino, old_dentry->d_name.name,
@@ -616,29 +670,17 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
return err;
}
+ v9fs_invalidate_inode_attr(dir);
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
/* Get the latest stat info from server. */
struct p9_fid *fid;
- struct p9_stat_dotl *st;
-
fid = v9fs_fid_lookup(old_dentry);
if (IS_ERR(fid))
return PTR_ERR(fid);
- st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
- if (IS_ERR(st))
- return PTR_ERR(st);
-
- v9fs_stat2inode_dotl(st, old_dentry->d_inode);
-
- kfree(st);
- } else {
- /* Caching disabled. No need to get upto date stat info.
- * This dentry will be released immediately. So, just hold the
- * inode
- */
- ihold(old_dentry->d_inode);
+ v9fs_refresh_inode_dotl(fid, old_dentry->d_inode);
}
+ ihold(old_dentry->d_inode);
d_instantiate(dentry, old_dentry->d_inode);
return err;
@@ -657,12 +699,12 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
dev_t rdev)
{
int err;
+ gid_t gid;
char *name;
mode_t mode;
struct v9fs_session_info *v9ses;
struct p9_fid *fid = NULL, *dfid = NULL;
struct inode *inode;
- gid_t gid;
struct p9_qid qid;
struct dentry *dir_dentry;
struct posix_acl *dacl = NULL, *pacl = NULL;
@@ -699,6 +741,7 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
if (err < 0)
goto error;
+ v9fs_invalidate_inode_attr(dir);
/* instantiate inode and assign the unopened fid to the dentry */
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
fid = p9_client_walk(dfid, 1, &name, 1);
@@ -710,7 +753,7 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
goto error;
}
- inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
+ inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
@@ -782,6 +825,31 @@ ndset:
return NULL;
}
+int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
+{
+ loff_t i_size;
+ struct p9_stat_dotl *st;
+ struct v9fs_session_info *v9ses;
+
+ v9ses = v9fs_inode2v9ses(inode);
+ st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
+ if (IS_ERR(st))
+ return PTR_ERR(st);
+
+ spin_lock(&inode->i_lock);
+ /*
+ * We don't want to refresh inode->i_size,
+ * because we may have cached data
+ */
+ i_size = inode->i_size;
+ v9fs_stat2inode_dotl(st, inode);
+ if (v9ses->cache)
+ inode->i_size = i_size;
+ spin_unlock(&inode->i_lock);
+ kfree(st);
+ return 0;
+}
+
const struct inode_operations v9fs_dir_inode_operations_dotl = {
.create = v9fs_vfs_create_dotl,
.lookup = v9fs_vfs_lookup,
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index dbaabe3b8131..09fd08d1606f 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -86,12 +86,15 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
} else
sb->s_op = &v9fs_super_ops;
sb->s_bdi = &v9ses->bdi;
+ if (v9ses->cache)
+ sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024)/PAGE_CACHE_SIZE;
- sb->s_flags = flags | MS_ACTIVE | MS_SYNCHRONOUS | MS_DIRSYNC |
- MS_NOATIME;
+ sb->s_flags = flags | MS_ACTIVE | MS_DIRSYNC | MS_NOATIME;
+ if (!v9ses->cache)
+ sb->s_flags |= MS_SYNCHRONOUS;
#ifdef CONFIG_9P_FS_POSIX_ACL
- if ((v9ses->flags & V9FS_ACCESS_MASK) == V9FS_ACCESS_CLIENT)
+ if ((v9ses->flags & V9FS_ACL_MASK) == V9FS_POSIX_ACL)
sb->s_flags |= MS_POSIXACL;
#endif
@@ -151,7 +154,6 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
retval = PTR_ERR(inode);
goto release_sb;
}
-
root = d_alloc_root(inode);
if (!root) {
iput(inode);
@@ -166,7 +168,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
retval = PTR_ERR(st);
goto release_sb;
}
-
+ root->d_inode->i_ino = v9fs_qid2ino(&st->qid);
v9fs_stat2inode_dotl(st, root->d_inode);
kfree(st);
} else {
@@ -183,10 +185,21 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
p9stat_free(st);
kfree(st);
}
+ v9fs_fid_add(root, fid);
retval = v9fs_get_acl(inode, fid);
if (retval)
goto release_sb;
- v9fs_fid_add(root, fid);
+ /*
+ * Add the root fid to session info. This is used
+ * for file system sync. We want a cloned fid here
+ * so that we can do a sync_filesystem after a
+ * shrink_dcache_for_umount
+ */
+ v9ses->root_fid = v9fs_fid_clone(root);
+ if (IS_ERR(v9ses->root_fid)) {
+ retval = PTR_ERR(v9ses->root_fid);
+ goto release_sb;
+ }
P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n");
return dget(sb->s_root);
@@ -197,15 +210,11 @@ close_session:
v9fs_session_close(v9ses);
kfree(v9ses);
return ERR_PTR(retval);
-
release_sb:
/*
- * we will do the session_close and root dentry release
- * in the below call. But we need to clunk fid, because we haven't
- * attached the fid to dentry so it won't get clunked
- * automatically.
+ * we will do the session_close and root dentry
+ * release in the below call.
*/
- p9_client_clunk(fid);
deactivate_locked_super(sb);
return ERR_PTR(retval);
}
@@ -223,7 +232,7 @@ static void v9fs_kill_super(struct super_block *s)
P9_DPRINTK(P9_DEBUG_VFS, " %p\n", s);
kill_anon_super(s);
-
+ p9_client_clunk(v9ses->root_fid);
v9fs_session_cancel(v9ses);
v9fs_session_close(v9ses);
kfree(v9ses);
@@ -276,11 +285,31 @@ done:
return res;
}
+static int v9fs_sync_fs(struct super_block *sb, int wait)
+{
+ struct v9fs_session_info *v9ses = sb->s_fs_info;
+
+ P9_DPRINTK(P9_DEBUG_VFS, "v9fs_sync_fs: super_block %p\n", sb);
+ return p9_client_sync_fs(v9ses->root_fid);
+}
+
+static int v9fs_drop_inode(struct inode *inode)
+{
+ struct v9fs_session_info *v9ses;
+ v9ses = v9fs_inode2v9ses(inode);
+ if (v9ses->cache)
+ return generic_drop_inode(inode);
+ /*
+ * in case of non cached mode always drop the
+ * the inode because we want the inode attribute
+ * to always match that on the server.
+ */
+ return 1;
+}
+
static const struct super_operations v9fs_super_ops = {
-#ifdef CONFIG_9P_FSCACHE
.alloc_inode = v9fs_alloc_inode,
.destroy_inode = v9fs_destroy_inode,
-#endif
.statfs = simple_statfs,
.evict_inode = v9fs_evict_inode,
.show_options = generic_show_options,
@@ -288,11 +317,11 @@ static const struct super_operations v9fs_super_ops = {
};
static const struct super_operations v9fs_super_ops_dotl = {
-#ifdef CONFIG_9P_FSCACHE
.alloc_inode = v9fs_alloc_inode,
.destroy_inode = v9fs_destroy_inode,
-#endif
+ .sync_fs = v9fs_sync_fs,
.statfs = v9fs_statfs,
+ .drop_inode = v9fs_drop_inode,
.evict_inode = v9fs_evict_inode,
.show_options = generic_show_options,
.umount_begin = v9fs_umount_begin,
@@ -303,5 +332,5 @@ struct file_system_type v9fs_fs_type = {
.mount = v9fs_mount,
.kill_sb = v9fs_kill_super,
.owner = THIS_MODULE,
- .fs_flags = FS_RENAME_DOES_D_MOVE,
+ .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT,
};
diff --git a/fs/Kconfig b/fs/Kconfig
index 3db9caa57edc..efb7d4ec6fcf 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -187,6 +187,7 @@ source "fs/omfs/Kconfig"
source "fs/hpfs/Kconfig"
source "fs/qnx4/Kconfig"
source "fs/romfs/Kconfig"
+source "fs/pstore/Kconfig"
source "fs/sysv/Kconfig"
source "fs/ufs/Kconfig"
source "fs/exofs/Kconfig"
diff --git a/fs/Makefile b/fs/Makefile
index a7f7cef0c0c8..db71a5b21a4f 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -121,3 +121,4 @@ obj-$(CONFIG_BTRFS_FS) += btrfs/
obj-$(CONFIG_GFS2_FS) += gfs2/
obj-$(CONFIG_EXOFS_FS) += exofs/
obj-$(CONFIG_CEPH_FS) += ceph/
+obj-$(CONFIG_PSTORE) += pstore/
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 15690bb1d3b5..789b3afb3423 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -140,6 +140,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
candidate->first = candidate->last = index;
candidate->offset_first = from;
candidate->to_last = to;
+ INIT_LIST_HEAD(&candidate->link);
candidate->usage = 1;
candidate->state = AFS_WBACK_PENDING;
init_waitqueue_head(&candidate->waitq);
diff --git a/fs/aio.c b/fs/aio.c
index fc557a3be0a9..7f54f43b8f7c 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -85,7 +85,7 @@ static int __init aio_setup(void)
kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
- aio_wq = create_workqueue("aio");
+ aio_wq = alloc_workqueue("aio", 0, 1); /* used to limit concurrency */
abe_pool = mempool_create_kmalloc_pool(1, sizeof(struct aio_batch_entry));
BUG_ON(!aio_wq || !abe_pool);
@@ -239,15 +239,23 @@ static void __put_ioctx(struct kioctx *ctx)
call_rcu(&ctx->rcu_head, ctx_rcu_free);
}
-#define get_ioctx(kioctx) do { \
- BUG_ON(atomic_read(&(kioctx)->users) <= 0); \
- atomic_inc(&(kioctx)->users); \
-} while (0)
-#define put_ioctx(kioctx) do { \
- BUG_ON(atomic_read(&(kioctx)->users) <= 0); \
- if (unlikely(atomic_dec_and_test(&(kioctx)->users))) \
- __put_ioctx(kioctx); \
-} while (0)
+static inline void get_ioctx(struct kioctx *kioctx)
+{
+ BUG_ON(atomic_read(&kioctx->users) <= 0);
+ atomic_inc(&kioctx->users);
+}
+
+static inline int try_get_ioctx(struct kioctx *kioctx)
+{
+ return atomic_inc_not_zero(&kioctx->users);
+}
+
+static inline void put_ioctx(struct kioctx *kioctx)
+{
+ BUG_ON(atomic_read(&kioctx->users) <= 0);
+ if (unlikely(atomic_dec_and_test(&kioctx->users)))
+ __put_ioctx(kioctx);
+}
/* ioctx_alloc
* Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
@@ -569,7 +577,7 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
spin_lock(&fput_lock);
list_add(&req->ki_list, &fput_head);
spin_unlock(&fput_lock);
- queue_work(aio_wq, &fput_work);
+ schedule_work(&fput_work);
} else {
req->ki_filp = NULL;
really_put_req(ctx, req);
@@ -601,8 +609,13 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
rcu_read_lock();
hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) {
- if (ctx->user_id == ctx_id && !ctx->dead) {
- get_ioctx(ctx);
+ /*
+ * RCU protects us against accessing freed memory but
+ * we have to be careful not to get a reference when the
+ * reference count already dropped to 0 (ctx->dead test
+ * is unreliable because of races).
+ */
+ if (ctx->user_id == ctx_id && !ctx->dead && try_get_ioctx(ctx)){
ret = ctx;
break;
}
@@ -1629,6 +1642,23 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
goto out_put_req;
spin_lock_irq(&ctx->ctx_lock);
+ /*
+ * We could have raced with io_destroy() and are currently holding a
+ * reference to ctx which should be destroyed. We cannot submit IO
+ * since ctx gets freed as soon as io_submit() puts its reference. The
+ * check here is reliable: io_destroy() sets ctx->dead before waiting
+ * for outstanding IO and the barrier between these two is realized by
+ * unlock of mm->ioctx_lock and lock of ctx->ctx_lock. Analogously we
+ * increment ctx->reqs_active before checking for ctx->dead and the
+ * barrier is realized by unlock and lock of ctx->ctx_lock. Thus if we
+ * don't see ctx->dead set here, io_destroy() waits for our IO to
+ * finish.
+ */
+ if (ctx->dead) {
+ spin_unlock_irq(&ctx->ctx_lock);
+ ret = -EINVAL;
+ goto out_put_req;
+ }
aio_run_iocb(req);
if (!list_empty(&ctx->run_list)) {
/* drain the run list */
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 4fb8a3431531..889287019599 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -873,6 +873,11 @@ int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj);
if (ret)
goto out_del;
+ /*
+ * bdev could be deleted beneath us which would implicitly destroy
+ * the holder directory. Hold on to it.
+ */
+ kobject_get(bdev->bd_part->holder_dir);
list_add(&holder->list, &bdev->bd_holder_disks);
goto out_unlock;
@@ -909,6 +914,7 @@ void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
del_symlink(bdev->bd_part->holder_dir,
&disk_to_dev(disk)->kobj);
+ kobject_put(bdev->bd_part->holder_dir);
list_del_init(&holder->list);
kfree(holder);
}
@@ -922,14 +928,15 @@ EXPORT_SYMBOL_GPL(bd_unlink_disk_holder);
* flush_disk - invalidates all buffer-cache entries on a disk
*
* @bdev: struct block device to be flushed
+ * @kill_dirty: flag to guide handling of dirty inodes
*
* Invalidates all buffer-cache entries on a disk. It should be called
* when a disk has been changed -- either by a media change or online
* resize.
*/
-static void flush_disk(struct block_device *bdev)
+static void flush_disk(struct block_device *bdev, bool kill_dirty)
{
- if (__invalidate_device(bdev)) {
+ if (__invalidate_device(bdev, kill_dirty)) {
char name[BDEVNAME_SIZE] = "";
if (bdev->bd_disk)
@@ -966,7 +973,7 @@ void check_disk_size_change(struct gendisk *disk, struct block_device *bdev)
"%s: detected capacity change from %lld to %lld\n",
name, bdev_size, disk_size);
i_size_write(bdev->bd_inode, disk_size);
- flush_disk(bdev);
+ flush_disk(bdev, false);
}
}
EXPORT_SYMBOL(check_disk_size_change);
@@ -1019,7 +1026,7 @@ int check_disk_change(struct block_device *bdev)
if (!(events & DISK_EVENT_MEDIA_CHANGE))
return 0;
- flush_disk(bdev);
+ flush_disk(bdev, true);
if (bdops->revalidate_disk)
bdops->revalidate_disk(bdev->bd_disk);
return 1;
@@ -1600,7 +1607,7 @@ fail:
}
EXPORT_SYMBOL(lookup_bdev);
-int __invalidate_device(struct block_device *bdev)
+int __invalidate_device(struct block_device *bdev, bool kill_dirty)
{
struct super_block *sb = get_super(bdev);
int res = 0;
@@ -1613,7 +1620,7 @@ int __invalidate_device(struct block_device *bdev)
* hold).
*/
shrink_dcache_sb(sb);
- res = invalidate_inodes(sb);
+ res = invalidate_inodes(sb, kill_dirty);
drop_super(sb);
}
invalidate_bdev(bdev);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 2c98b3af6052..6f820fa23df4 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1254,6 +1254,7 @@ struct btrfs_root {
#define BTRFS_MOUNT_SPACE_CACHE (1 << 12)
#define BTRFS_MOUNT_CLEAR_CACHE (1 << 13)
#define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14)
+#define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15)
#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
@@ -2218,6 +2219,8 @@ int btrfs_error_unpin_extent_range(struct btrfs_root *root,
u64 start, u64 end);
int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
u64 num_bytes);
+int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, u64 type);
/* ctree.c */
int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index e1aa8d607bc7..100b07f021b4 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2493,7 +2493,7 @@ int close_ctree(struct btrfs_root *root)
* ERROR state on disk.
*
* 2. when btrfs flips readonly just in btrfs_commit_super,
- * and in such case, btrfs cannnot write sb via btrfs_commit_super,
+ * and in such case, btrfs cannot write sb via btrfs_commit_super,
* and since fs_state has been set BTRFS_SUPER_FLAG_ERROR flag,
* btrfs will cleanup all FS resources first and write sb then.
*/
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index f3c96fc01439..588ff9849873 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -5376,7 +5376,7 @@ again:
num_bytes, data, 1);
goto again;
}
- if (ret == -ENOSPC) {
+ if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) {
struct btrfs_space_info *sinfo;
sinfo = __find_space_info(root->fs_info, data);
@@ -8065,6 +8065,13 @@ out:
return ret;
}
+int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, u64 type)
+{
+ u64 alloc_flags = get_alloc_profile(root, type);
+ return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
+}
+
/*
* helper to account the unused space of all the readonly block group in the
* list. takes mirrors into account.
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 92ac5192c518..fd3f172e94e6 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1433,12 +1433,13 @@ int extent_clear_unlock_delalloc(struct inode *inode,
*/
u64 count_range_bits(struct extent_io_tree *tree,
u64 *start, u64 search_end, u64 max_bytes,
- unsigned long bits)
+ unsigned long bits, int contig)
{
struct rb_node *node;
struct extent_state *state;
u64 cur_start = *start;
u64 total_bytes = 0;
+ u64 last = 0;
int found = 0;
if (search_end <= cur_start) {
@@ -1463,7 +1464,9 @@ u64 count_range_bits(struct extent_io_tree *tree,
state = rb_entry(node, struct extent_state, rb_node);
if (state->start > search_end)
break;
- if (state->end >= cur_start && (state->state & bits)) {
+ if (contig && found && state->start > last + 1)
+ break;
+ if (state->end >= cur_start && (state->state & bits) == bits) {
total_bytes += min(search_end, state->end) + 1 -
max(cur_start, state->start);
if (total_bytes >= max_bytes)
@@ -1472,6 +1475,9 @@ u64 count_range_bits(struct extent_io_tree *tree,
*start = state->start;
found = 1;
}
+ last = state->end;
+ } else if (contig && found) {
+ break;
}
node = rb_next(node);
if (!node)
@@ -2912,6 +2918,46 @@ out:
return sector;
}
+/*
+ * helper function for fiemap, which doesn't want to see any holes.
+ * This maps until we find something past 'last'
+ */
+static struct extent_map *get_extent_skip_holes(struct inode *inode,
+ u64 offset,
+ u64 last,
+ get_extent_t *get_extent)
+{
+ u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
+ struct extent_map *em;
+ u64 len;
+
+ if (offset >= last)
+ return NULL;
+
+ while(1) {
+ len = last - offset;
+ if (len == 0)
+ break;
+ len = (len + sectorsize - 1) & ~(sectorsize - 1);
+ em = get_extent(inode, NULL, 0, offset, len, 0);
+ if (!em || IS_ERR(em))
+ return em;
+
+ /* if this isn't a hole return it */
+ if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
+ em->block_start != EXTENT_MAP_HOLE) {
+ return em;
+ }
+
+ /* this is a hole, advance to the next extent */
+ offset = extent_map_end(em);
+ free_extent_map(em);
+ if (offset >= last)
+ break;
+ }
+ return NULL;
+}
+
int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len, get_extent_t *get_extent)
{
@@ -2921,16 +2967,19 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u32 flags = 0;
u32 found_type;
u64 last;
+ u64 last_for_get_extent = 0;
u64 disko = 0;
+ u64 isize = i_size_read(inode);
struct btrfs_key found_key;
struct extent_map *em = NULL;
struct extent_state *cached_state = NULL;
struct btrfs_path *path;
struct btrfs_file_extent_item *item;
int end = 0;
- u64 em_start = 0, em_len = 0;
+ u64 em_start = 0;
+ u64 em_len = 0;
+ u64 em_end = 0;
unsigned long emflags;
- int hole = 0;
if (len == 0)
return -EINVAL;
@@ -2940,6 +2989,10 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
return -ENOMEM;
path->leave_spinning = 1;
+ /*
+ * lookup the last file extent. We're not using i_size here
+ * because there might be preallocation past i_size
+ */
ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
path, inode->i_ino, -1, 0);
if (ret < 0) {
@@ -2953,18 +3006,38 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
found_type = btrfs_key_type(&found_key);
- /* No extents, just return */
+ /* No extents, but there might be delalloc bits */
if (found_key.objectid != inode->i_ino ||
found_type != BTRFS_EXTENT_DATA_KEY) {
- btrfs_free_path(path);
- return 0;
+ /* have to trust i_size as the end */
+ last = (u64)-1;
+ last_for_get_extent = isize;
+ } else {
+ /*
+ * remember the start of the last extent. There are a
+ * bunch of different factors that go into the length of the
+ * extent, so its much less complex to remember where it started
+ */
+ last = found_key.offset;
+ last_for_get_extent = last + 1;
}
- last = found_key.offset;
btrfs_free_path(path);
+ /*
+ * we might have some extents allocated but more delalloc past those
+ * extents. so, we trust isize unless the start of the last extent is
+ * beyond isize
+ */
+ if (last < isize) {
+ last = (u64)-1;
+ last_for_get_extent = isize;
+ }
+
lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
&cached_state, GFP_NOFS);
- em = get_extent(inode, NULL, 0, off, max - off, 0);
+
+ em = get_extent_skip_holes(inode, off, last_for_get_extent,
+ get_extent);
if (!em)
goto out;
if (IS_ERR(em)) {
@@ -2973,19 +3046,14 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
}
while (!end) {
- hole = 0;
- off = em->start + em->len;
+ off = extent_map_end(em);
if (off >= max)
end = 1;
- if (em->block_start == EXTENT_MAP_HOLE) {
- hole = 1;
- goto next;
- }
-
em_start = em->start;
em_len = em->len;
-
+ em_end = extent_map_end(em);
+ emflags = em->flags;
disko = 0;
flags = 0;
@@ -3004,37 +3072,29 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
flags |= FIEMAP_EXTENT_ENCODED;
-next:
- emflags = em->flags;
free_extent_map(em);
em = NULL;
- if (!end) {
- em = get_extent(inode, NULL, 0, off, max - off, 0);
- if (!em)
- goto out;
- if (IS_ERR(em)) {
- ret = PTR_ERR(em);
- goto out;
- }
- emflags = em->flags;
- }
-
- if (test_bit(EXTENT_FLAG_VACANCY, &emflags)) {
+ if ((em_start >= last) || em_len == (u64)-1 ||
+ (last == (u64)-1 && isize <= em_end)) {
flags |= FIEMAP_EXTENT_LAST;
end = 1;
}
- if (em_start == last) {
+ /* now scan forward to see if this is really the last extent. */
+ em = get_extent_skip_holes(inode, off, last_for_get_extent,
+ get_extent);
+ if (IS_ERR(em)) {
+ ret = PTR_ERR(em);
+ goto out;
+ }
+ if (!em) {
flags |= FIEMAP_EXTENT_LAST;
end = 1;
}
-
- if (!hole) {
- ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
- em_len, flags);
- if (ret)
- goto out_free;
- }
+ ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
+ em_len, flags);
+ if (ret)
+ goto out_free;
}
out_free:
free_extent_map(em);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 7083cfafd061..9318dfefd59c 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -191,7 +191,7 @@ void extent_io_exit(void);
u64 count_range_bits(struct extent_io_tree *tree,
u64 *start, u64 search_end,
- u64 max_bytes, unsigned long bits);
+ u64 max_bytes, unsigned long bits, int contig);
void free_extent_state(struct extent_state *state);
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index fb9bd7832b6d..a45785ab197b 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -90,13 +90,14 @@ static noinline int cow_file_range(struct inode *inode,
unsigned long *nr_written, int unlock);
static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
- struct inode *inode, struct inode *dir)
+ struct inode *inode, struct inode *dir,
+ const struct qstr *qstr)
{
int err;
err = btrfs_init_acl(trans, inode, dir);
if (!err)
- err = btrfs_xattr_security_init(trans, inode, dir);
+ err = btrfs_xattr_security_init(trans, inode, dir, qstr);
return err;
}
@@ -1913,7 +1914,7 @@ static int btrfs_clean_io_failures(struct inode *inode, u64 start)
private = 0;
if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
- (u64)-1, 1, EXTENT_DIRTY)) {
+ (u64)-1, 1, EXTENT_DIRTY, 0)) {
ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
start, &private_failure);
if (ret == 0) {
@@ -4704,7 +4705,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
if (IS_ERR(inode))
goto out_unlock;
- err = btrfs_init_inode_security(trans, inode, dir);
+ err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err) {
drop_inode = 1;
goto out_unlock;
@@ -4765,7 +4766,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
if (IS_ERR(inode))
goto out_unlock;
- err = btrfs_init_inode_security(trans, inode, dir);
+ err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err) {
drop_inode = 1;
goto out_unlock;
@@ -4893,7 +4894,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
drop_on_err = 1;
- err = btrfs_init_inode_security(trans, inode, dir);
+ err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err)
goto out_fail;
@@ -5280,6 +5281,128 @@ out:
return em;
}
+struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
+ size_t pg_offset, u64 start, u64 len,
+ int create)
+{
+ struct extent_map *em;
+ struct extent_map *hole_em = NULL;
+ u64 range_start = start;
+ u64 end;
+ u64 found;
+ u64 found_end;
+ int err = 0;
+
+ em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
+ if (IS_ERR(em))
+ return em;
+ if (em) {
+ /*
+ * if our em maps to a hole, there might
+ * actually be delalloc bytes behind it
+ */
+ if (em->block_start != EXTENT_MAP_HOLE)
+ return em;
+ else
+ hole_em = em;
+ }
+
+ /* check to see if we've wrapped (len == -1 or similar) */
+ end = start + len;
+ if (end < start)
+ end = (u64)-1;
+ else
+ end -= 1;
+
+ em = NULL;
+
+ /* ok, we didn't find anything, lets look for delalloc */
+ found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
+ end, len, EXTENT_DELALLOC, 1);
+ found_end = range_start + found;
+ if (found_end < range_start)
+ found_end = (u64)-1;
+
+ /*
+ * we didn't find anything useful, return
+ * the original results from get_extent()
+ */
+ if (range_start > end || found_end <= start) {
+ em = hole_em;
+ hole_em = NULL;
+ goto out;
+ }
+
+ /* adjust the range_start to make sure it doesn't
+ * go backwards from the start they passed in
+ */
+ range_start = max(start,range_start);
+ found = found_end - range_start;
+
+ if (found > 0) {
+ u64 hole_start = start;
+ u64 hole_len = len;
+
+ em = alloc_extent_map(GFP_NOFS);
+ if (!em) {
+ err = -ENOMEM;
+ goto out;
+ }
+ /*
+ * when btrfs_get_extent can't find anything it
+ * returns one huge hole
+ *
+ * make sure what it found really fits our range, and
+ * adjust to make sure it is based on the start from
+ * the caller
+ */
+ if (hole_em) {
+ u64 calc_end = extent_map_end(hole_em);
+
+ if (calc_end <= start || (hole_em->start > end)) {
+ free_extent_map(hole_em);
+ hole_em = NULL;
+ } else {
+ hole_start = max(hole_em->start, start);
+ hole_len = calc_end - hole_start;
+ }
+ }
+ em->bdev = NULL;
+ if (hole_em && range_start > hole_start) {
+ /* our hole starts before our delalloc, so we
+ * have to return just the parts of the hole
+ * that go until the delalloc starts
+ */
+ em->len = min(hole_len,
+ range_start - hole_start);
+ em->start = hole_start;
+ em->orig_start = hole_start;
+ /*
+ * don't adjust block start at all,
+ * it is fixed at EXTENT_MAP_HOLE
+ */
+ em->block_start = hole_em->block_start;
+ em->block_len = hole_len;
+ } else {
+ em->start = range_start;
+ em->len = found;
+ em->orig_start = range_start;
+ em->block_start = EXTENT_MAP_DELALLOC;
+ em->block_len = found;
+ }
+ } else if (hole_em) {
+ return hole_em;
+ }
+out:
+
+ free_extent_map(hole_em);
+ if (err) {
+ free_extent_map(em);
+ return ERR_PTR(err);
+ }
+ return em;
+}
+
static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
u64 start, u64 len)
{
@@ -6102,7 +6225,7 @@ out:
static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len)
{
- return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent);
+ return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
}
int btrfs_readpage(struct file *file, struct page *page)
@@ -6982,7 +7105,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
if (IS_ERR(inode))
goto out_unlock;
- err = btrfs_init_inode_security(trans, inode, dir);
+ err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err) {
drop_inode = 1;
goto out_unlock;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index be2d4f6aaa5e..5fdb2abc4fa7 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1071,12 +1071,15 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
if (copy_from_user(&flags, arg, sizeof(flags)))
return -EFAULT;
- if (flags & ~BTRFS_SUBVOL_CREATE_ASYNC)
+ if (flags & BTRFS_SUBVOL_CREATE_ASYNC)
return -EINVAL;
if (flags & ~BTRFS_SUBVOL_RDONLY)
return -EOPNOTSUPP;
+ if (!is_owner_or_cap(inode))
+ return -EACCES;
+
down_write(&root->fs_info->subvol_sem);
/* nothing to do */
@@ -1097,7 +1100,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
goto out_reset;
}
- ret = btrfs_update_root(trans, root,
+ ret = btrfs_update_root(trans, root->fs_info->tree_root,
&root->root_key, &root->root_item);
btrfs_commit_transaction(trans, root);
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index cc9b450399df..a178f5ebea78 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -280,6 +280,7 @@ static int lzo_decompress_biovec(struct list_head *ws,
unsigned long tot_out;
unsigned long tot_len;
char *buf;
+ bool may_late_unmap, need_unmap;
data_in = kmap(pages_in[0]);
tot_len = read_compress_length(data_in);
@@ -300,11 +301,13 @@ static int lzo_decompress_biovec(struct list_head *ws,
tot_in += in_len;
working_bytes = in_len;
+ may_late_unmap = need_unmap = false;
/* fast path: avoid using the working buffer */
if (in_page_bytes_left >= in_len) {
buf = data_in + in_offset;
bytes = in_len;
+ may_late_unmap = true;
goto cont;
}
@@ -329,14 +332,17 @@ cont:
if (working_bytes == 0 && tot_in >= tot_len)
break;
- kunmap(pages_in[page_in_index]);
- page_in_index++;
- if (page_in_index >= total_pages_in) {
+ if (page_in_index + 1 >= total_pages_in) {
ret = -1;
- data_in = NULL;
goto done;
}
- data_in = kmap(pages_in[page_in_index]);
+
+ if (may_late_unmap)
+ need_unmap = true;
+ else
+ kunmap(pages_in[page_in_index]);
+
+ data_in = kmap(pages_in[++page_in_index]);
in_page_bytes_left = PAGE_CACHE_SIZE;
in_offset = 0;
@@ -346,6 +352,8 @@ cont:
out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE);
ret = lzo1x_decompress_safe(buf, in_len, workspace->buf,
&out_len);
+ if (need_unmap)
+ kunmap(pages_in[page_in_index - 1]);
if (ret != LZO_E_OK) {
printk(KERN_WARNING "btrfs decompress failed\n");
ret = -1;
@@ -363,8 +371,7 @@ cont:
break;
}
done:
- if (data_in)
- kunmap(pages_in[page_in_index]);
+ kunmap(pages_in[page_in_index]);
return ret;
}
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 0825e4ed9447..31ade5802ae8 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -3654,6 +3654,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
u32 item_size;
int ret;
int err = 0;
+ int progress = 0;
path = btrfs_alloc_path();
if (!path)
@@ -3666,9 +3667,10 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
}
while (1) {
+ progress++;
trans = btrfs_start_transaction(rc->extent_root, 0);
BUG_ON(IS_ERR(trans));
-
+restart:
if (update_backref_cache(trans, &rc->backref_cache)) {
btrfs_end_transaction(trans, rc->extent_root);
continue;
@@ -3781,6 +3783,15 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
}
}
}
+ if (trans && progress && err == -ENOSPC) {
+ ret = btrfs_force_chunk_alloc(trans, rc->extent_root,
+ rc->block_group->flags);
+ if (ret == 0) {
+ err = 0;
+ progress = 0;
+ goto restart;
+ }
+ }
btrfs_release_path(rc->extent_root, path);
clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY,
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index a004008f7d28..d39a9895d932 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -155,7 +155,8 @@ enum {
Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress,
Opt_compress_type, Opt_compress_force, Opt_compress_force_type,
Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard,
- Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, Opt_err,
+ Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed,
+ Opt_enospc_debug, Opt_err,
};
static match_table_t tokens = {
@@ -184,6 +185,7 @@ static match_table_t tokens = {
{Opt_space_cache, "space_cache"},
{Opt_clear_cache, "clear_cache"},
{Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"},
+ {Opt_enospc_debug, "enospc_debug"},
{Opt_err, NULL},
};
@@ -358,6 +360,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
case Opt_user_subvol_rm_allowed:
btrfs_set_opt(info->mount_opt, USER_SUBVOL_RM_ALLOWED);
break;
+ case Opt_enospc_debug:
+ btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG);
+ break;
case Opt_err:
printk(KERN_INFO "btrfs: unrecognized mount option "
"'%s'\n", p);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index af7dbca15276..dd13eb81ee40 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1338,11 +1338,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
ret = btrfs_shrink_device(device, 0);
if (ret)
- goto error_brelse;
+ goto error_undo;
ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
if (ret)
- goto error_brelse;
+ goto error_undo;
device->in_fs_metadata = 0;
@@ -1416,6 +1416,13 @@ out:
mutex_unlock(&root->fs_info->volume_mutex);
mutex_unlock(&uuid_mutex);
return ret;
+error_undo:
+ if (device->writeable) {
+ list_add(&device->dev_alloc_list,
+ &root->fs_info->fs_devices->alloc_list);
+ root->fs_info->fs_devices->rw_devices++;
+ }
+ goto error_brelse;
}
/*
@@ -1633,7 +1640,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
device->dev_root = root->fs_info->dev_root;
device->bdev = bdev;
device->in_fs_metadata = 1;
- device->mode = 0;
+ device->mode = FMODE_EXCL;
set_blocksize(device->bdev, 4096);
if (seeding_dev) {
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index a5776531dc2b..d779cefcfd7d 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -370,7 +370,8 @@ int btrfs_removexattr(struct dentry *dentry, const char *name)
}
int btrfs_xattr_security_init(struct btrfs_trans_handle *trans,
- struct inode *inode, struct inode *dir)
+ struct inode *inode, struct inode *dir,
+ const struct qstr *qstr)
{
int err;
size_t len;
@@ -378,7 +379,8 @@ int btrfs_xattr_security_init(struct btrfs_trans_handle *trans,
char *suffix;
char *name;
- err = security_inode_init_security(inode, dir, &suffix, &value, &len);
+ err = security_inode_init_security(inode, dir, qstr, &suffix, &value,
+ &len);
if (err) {
if (err == -EOPNOTSUPP)
return 0;
diff --git a/fs/btrfs/xattr.h b/fs/btrfs/xattr.h
index 7a43fd640bbb..b3cc8039134b 100644
--- a/fs/btrfs/xattr.h
+++ b/fs/btrfs/xattr.h
@@ -37,6 +37,7 @@ extern int btrfs_setxattr(struct dentry *dentry, const char *name,
extern int btrfs_removexattr(struct dentry *dentry, const char *name);
extern int btrfs_xattr_security_init(struct btrfs_trans_handle *trans,
- struct inode *inode, struct inode *dir);
+ struct inode *inode, struct inode *dir,
+ const struct qstr *qstr);
#endif /* __XATTR__ */
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 42c7fafc8bfe..3f458310e287 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -13,7 +13,6 @@
#include <linux/sched.h>
#include <linux/file.h>
#include <linux/fs.h>
-#include <linux/fsnotify.h>
#include <linux/quotaops.h>
#include <linux/xattr.h>
#include <linux/mount.h>
@@ -275,6 +274,7 @@ static int cachefiles_bury_object(struct cachefiles_cache *cache,
bool preemptive)
{
struct dentry *grave, *trap;
+ struct path path, path_to_graveyard;
char nbuffer[8 + 8 + 1];
int ret;
@@ -287,10 +287,18 @@ static int cachefiles_bury_object(struct cachefiles_cache *cache,
/* non-directories can just be unlinked */
if (!S_ISDIR(rep->d_inode->i_mode)) {
_debug("unlink stale object");
- ret = vfs_unlink(dir->d_inode, rep);
- if (preemptive)
- cachefiles_mark_object_buried(cache, rep);
+ path.mnt = cache->mnt;
+ path.dentry = dir;
+ ret = security_path_unlink(&path, rep);
+ if (ret < 0) {
+ cachefiles_io_error(cache, "Unlink security error");
+ } else {
+ ret = vfs_unlink(dir->d_inode, rep);
+
+ if (preemptive)
+ cachefiles_mark_object_buried(cache, rep);
+ }
mutex_unlock(&dir->d_inode->i_mutex);
@@ -379,12 +387,23 @@ try_again:
}
/* attempt the rename */
- ret = vfs_rename(dir->d_inode, rep, cache->graveyard->d_inode, grave);
- if (ret != 0 && ret != -ENOMEM)
- cachefiles_io_error(cache, "Rename failed with error %d", ret);
+ path.mnt = cache->mnt;
+ path.dentry = dir;
+ path_to_graveyard.mnt = cache->mnt;
+ path_to_graveyard.dentry = cache->graveyard;
+ ret = security_path_rename(&path, rep, &path_to_graveyard, grave);
+ if (ret < 0) {
+ cachefiles_io_error(cache, "Rename security error %d", ret);
+ } else {
+ ret = vfs_rename(dir->d_inode, rep,
+ cache->graveyard->d_inode, grave);
+ if (ret != 0 && ret != -ENOMEM)
+ cachefiles_io_error(cache,
+ "Rename failed with error %d", ret);
- if (preemptive)
- cachefiles_mark_object_buried(cache, rep);
+ if (preemptive)
+ cachefiles_mark_object_buried(cache, rep);
+ }
unlock_rename(cache->graveyard, dir);
dput(grave);
@@ -448,6 +467,7 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent,
{
struct cachefiles_cache *cache;
struct dentry *dir, *next = NULL;
+ struct path path;
unsigned long start;
const char *name;
int ret, nlen;
@@ -458,6 +478,7 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent,
cache = container_of(parent->fscache.cache,
struct cachefiles_cache, cache);
+ path.mnt = cache->mnt;
ASSERT(parent->dentry);
ASSERT(parent->dentry->d_inode);
@@ -511,6 +532,10 @@ lookup_again:
if (ret < 0)
goto create_error;
+ path.dentry = dir;
+ ret = security_path_mkdir(&path, next, 0);
+ if (ret < 0)
+ goto create_error;
start = jiffies;
ret = vfs_mkdir(dir->d_inode, next, 0);
cachefiles_hist(cachefiles_mkdir_histogram, start);
@@ -536,6 +561,10 @@ lookup_again:
if (ret < 0)
goto create_error;
+ path.dentry = dir;
+ ret = security_path_mknod(&path, next, S_IFREG, 0);
+ if (ret < 0)
+ goto create_error;
start = jiffies;
ret = vfs_create(dir->d_inode, next, S_IFREG, NULL);
cachefiles_hist(cachefiles_create_histogram, start);
@@ -692,6 +721,7 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
{
struct dentry *subdir;
unsigned long start;
+ struct path path;
int ret;
_enter(",,%s", dirname);
@@ -719,6 +749,11 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
_debug("attempt mkdir");
+ path.mnt = cache->mnt;
+ path.dentry = dir;
+ ret = security_path_mkdir(&path, subdir, 0700);
+ if (ret < 0)
+ goto mkdir_error;
ret = vfs_mkdir(dir->d_inode, subdir, 0700);
if (ret < 0)
goto mkdir_error;
diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c
index e18b183b47e1..6e050686e9b3 100644
--- a/fs/cachefiles/xattr.c
+++ b/fs/cachefiles/xattr.c
@@ -13,7 +13,6 @@
#include <linux/sched.h>
#include <linux/file.h>
#include <linux/fs.h>
-#include <linux/fsnotify.h>
#include <linux/quotaops.h>
#include <linux/xattr.h>
#include <linux/slab.h>
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 0bc68de8edd7..f0aef787a102 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -60,6 +60,7 @@ int ceph_init_dentry(struct dentry *dentry)
}
di->dentry = dentry;
di->lease_session = NULL;
+ di->parent_inode = igrab(dentry->d_parent->d_inode);
dentry->d_fsdata = di;
dentry->d_time = jiffies;
ceph_dentry_lru_add(dentry);
@@ -1033,7 +1034,7 @@ static void ceph_dentry_release(struct dentry *dentry)
u64 snapid = CEPH_NOSNAP;
if (!IS_ROOT(dentry)) {
- parent_inode = dentry->d_parent->d_inode;
+ parent_inode = di->parent_inode;
if (parent_inode)
snapid = ceph_snap(parent_inode);
}
@@ -1058,6 +1059,8 @@ static void ceph_dentry_release(struct dentry *dentry)
kmem_cache_free(ceph_dentry_cachep, di);
dentry->d_fsdata = NULL;
}
+ if (parent_inode)
+ iput(parent_inode);
}
static int ceph_snapdir_d_revalidate(struct dentry *dentry,
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 39c243acd062..f40b9139e437 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -584,10 +584,14 @@ static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
if (lastinode)
iput(lastinode);
- dout("queue_realm_cap_snaps %p %llx children\n", realm, realm->ino);
- list_for_each_entry(child, &realm->children, child_item)
- queue_realm_cap_snaps(child);
+ list_for_each_entry(child, &realm->children, child_item) {
+ dout("queue_realm_cap_snaps %p %llx queue child %p %llx\n",
+ realm, realm->ino, child, child->ino);
+ list_del_init(&child->dirty_item);
+ list_add(&child->dirty_item, &realm->dirty_item);
+ }
+ list_del_init(&realm->dirty_item);
dout("queue_realm_cap_snaps %p %llx done\n", realm, realm->ino);
}
@@ -683,7 +687,9 @@ more:
* queue cap snaps _after_ we've built the new snap contexts,
* so that i_head_snapc can be set appropriately.
*/
- list_for_each_entry(realm, &dirty_realms, dirty_item) {
+ while (!list_empty(&dirty_realms)) {
+ realm = list_first_entry(&dirty_realms, struct ceph_snap_realm,
+ dirty_item);
queue_realm_cap_snaps(realm);
}
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 20b907d76ae2..88fcaa21b801 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -207,6 +207,7 @@ struct ceph_dentry_info {
struct dentry *dentry;
u64 time;
u64 offset;
+ struct inode *parent_inode;
};
struct ceph_inode_xattrs_info {
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index 7cb0f7f847e4..7c584be27fdf 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -152,16 +152,28 @@ config CIFS_ACL
Allows to fetch CIFS/NTFS ACL from the server. The DACL blob
is handed over to the application/caller.
-config CIFS_EXPERIMENTAL
- bool "CIFS Experimental Features (EXPERIMENTAL)"
+config CIFS_SMB2
+ bool "SMB2 network file system support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && INET && BROKEN
+ select NLS
+ select KEYS
+ select FSCACHE
+ select DNS_RESOLVER
+
+ help
+ This enables experimental support for the SMB2 (Server Message Block
+ version 2) protocol. The SMB2 protocol is the successor to the
+ popular CIFS and SMB network file sharing protocols. SMB2 is the
+ native file sharing mechanism for recent versions of Windows
+ operating systems (since Vista). SMB2 enablement will eventually
+ allow users better performance, security and features, than would be
+ possible with cifs. Note that smb2 mount options also are simpler
+ (compared to cifs) due to protocol improvements.
+
+ Unless you are a developer or tester, say N.
+
+config CIFS_NFSD_EXPORT
+ bool "Allow nfsd to export CIFS file system (EXPERIMENTAL)"
depends on CIFS && EXPERIMENTAL
help
- Enables cifs features under testing. These features are
- experimental and currently include DFS support and directory
- change notification ie fcntl(F_DNOTIFY), as well as the upcall
- mechanism which will be used for Kerberos session negotiation
- and uid remapping. Some of these features also may depend on
- setting a value of 1 to the pseudo-file /proc/fs/cifs/Experimental
- (which is disabled by default). See the file fs/cifs/README
- for more details. If unsure, say N.
-
+ Allows NFS server to export a CIFS mounted share (nfsd over cifs)
diff --git a/fs/cifs/README b/fs/cifs/README
index fe1683590828..4a3ca0e5ca24 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -685,22 +685,6 @@ LinuxExtensionsEnabled If set to one then the client will attempt to
support and want to map the uid and gid fields
to values supplied at mount (rather than the
actual values, then set this to zero. (default 1)
-Experimental When set to 1 used to enable certain experimental
- features (currently enables multipage writes
- when signing is enabled, the multipage write
- performance enhancement was disabled when
- signing turned on in case buffer was modified
- just before it was sent, also this flag will
- be used to use the new experimental directory change
- notification code). When set to 2 enables
- an additional experimental feature, "raw ntlmssp"
- session establishment support (which allows
- specifying "sec=ntlmssp" on mount). The Linux cifs
- module will use ntlmv2 authentication encapsulated
- in "raw ntlmssp" (not using SPNEGO) when
- "sec=ntlmssp" is specified on mount.
- This support also requires building cifs with
- the CONFIG_CIFS_EXPERIMENTAL configuration flag.
These experimental features and tracing can be enabled by changing flags in
/proc/fs/cifs (after the cifs module has been installed or built into the
@@ -720,18 +704,6 @@ the start of smb requests and responses can be enabled via:
echo 1 > /proc/fs/cifs/traceSMB
-Two other experimental features are under development. To test these
-requires enabling CONFIG_CIFS_EXPERIMENTAL
-
- cifsacl support needed to retrieve approximated mode bits based on
- the contents on the CIFS ACL.
-
- lease support: cifs will check the oplock state before calling into
- the vfs to see if we can grant a lease on a file.
-
- DNOTIFY fcntl: needed for support of directory change
- notification and perhaps later for file leases)
-
Per share (per client mount) statistics are available in /proc/fs/cifs/Stats
if the kernel was configured with cifs statistics enabled. The statistics
represent the number of successful (ie non-zero return code from the server)
diff --git a/fs/cifs/TODO b/fs/cifs/TODO
index 355abcdcda98..2c68abc58d6f 100644
--- a/fs/cifs/TODO
+++ b/fs/cifs/TODO
@@ -1,5 +1,7 @@
Version 1.53 May 20, 2008
+PUT IN WARNING FOR NTLMV2 becoming default
+
A Partial List of Missing Features
==================================
diff --git a/fs/cifs/cache.c b/fs/cifs/cache.c
index e654dfd092c3..29b20ae945db 100644
--- a/fs/cifs/cache.c
+++ b/fs/cifs/cache.c
@@ -146,7 +146,7 @@ static char *extract_sharename(const char *treename)
static uint16_t cifs_super_get_key(const void *cookie_netfs_data, void *buffer,
uint16_t maxbuf)
{
- const struct cifsTconInfo *tcon = cookie_netfs_data;
+ const struct cifs_tcon *tcon = cookie_netfs_data;
char *sharename;
uint16_t len;
@@ -173,7 +173,7 @@ cifs_fscache_super_get_aux(const void *cookie_netfs_data, void *buffer,
uint16_t maxbuf)
{
struct cifs_fscache_super_auxdata auxdata;
- const struct cifsTconInfo *tcon = cookie_netfs_data;
+ const struct cifs_tcon *tcon = cookie_netfs_data;
memset(&auxdata, 0, sizeof(auxdata));
auxdata.resource_id = tcon->resource_id;
@@ -192,7 +192,7 @@ fscache_checkaux cifs_fscache_super_check_aux(void *cookie_netfs_data,
uint16_t datalen)
{
struct cifs_fscache_super_auxdata auxdata;
- const struct cifsTconInfo *tcon = cookie_netfs_data;
+ const struct cifs_tcon *tcon = cookie_netfs_data;
if (datalen != sizeof(auxdata))
return FSCACHE_CHECKAUX_OBSOLETE;
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 65829d32128c..d925cf67723f 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -110,8 +110,8 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
struct list_head *tmp1, *tmp2, *tmp3;
struct mid_q_entry *mid_entry;
struct TCP_Server_Info *server;
- struct cifsSesInfo *ses;
- struct cifsTconInfo *tcon;
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
int i, j;
__u32 dev_type;
@@ -152,7 +152,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
tcp_ses_list);
i++;
list_for_each(tmp2, &server->smb_ses_list) {
- ses = list_entry(tmp2, struct cifsSesInfo,
+ ses = list_entry(tmp2, struct cifs_ses,
smb_ses_list);
if ((ses->serverDomain == NULL) ||
(ses->serverOS == NULL) ||
@@ -171,7 +171,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
seq_printf(m, "TCP status: %d\n\tLocal Users To "
"Server: %d SecMode: 0x%x Req On Wire: %d",
server->tcpStatus, server->srv_count,
- server->secMode,
+ server->sec_mode,
atomic_read(&server->inFlight));
#ifdef CONFIG_CIFS_STATS2
@@ -183,7 +183,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
seq_puts(m, "\n\tShares:");
j = 0;
list_for_each(tmp3, &ses->tcon_list) {
- tcon = list_entry(tmp3, struct cifsTconInfo,
+ tcon = list_entry(tmp3, struct cifs_tcon,
tcon_list);
++j;
dev_type = le32_to_cpu(tcon->fsDevInfo.DeviceType);
@@ -249,6 +249,55 @@ static const struct file_operations cifs_debug_data_proc_fops = {
};
#ifdef CONFIG_CIFS_STATS
+
+#ifdef CONFIG_CIFS_SMB2
+static void smb2_clear_stats(struct cifs_tcon *tcon)
+{
+ int i;
+
+ atomic_set(&tcon->num_smbs_sent, 0);
+ for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
+ atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
+ atomic_set(&tcon->stats.smb2_stats.smb2_com_fail[i], 0);
+ }
+}
+#endif /* CONFIG_CIFS_SMB2 */
+
+static void clear_cifs_stats(struct cifs_tcon *tcon)
+{
+ atomic_set(&tcon->num_smbs_sent, 0);
+
+#ifdef CONFIG_CIFS_SMB2
+ if (tcon->ses->server->is_smb2) {
+ smb2_clear_stats(tcon);
+ return;
+ }
+#endif /* CONFIG_CIFS_SMB2 */
+
+ /* cifs specific statistics, not applicable to smb2 sessions */
+ atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
+}
+
static ssize_t cifs_stats_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *ppos)
{
@@ -256,8 +305,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
int rc;
struct list_head *tmp1, *tmp2, *tmp3;
struct TCP_Server_Info *server;
- struct cifsSesInfo *ses;
- struct cifsTconInfo *tcon;
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
rc = get_user(c, buffer);
if (rc)
@@ -273,31 +322,13 @@ static ssize_t cifs_stats_proc_write(struct file *file,
server = list_entry(tmp1, struct TCP_Server_Info,
tcp_ses_list);
list_for_each(tmp2, &server->smb_ses_list) {
- ses = list_entry(tmp2, struct cifsSesInfo,
+ ses = list_entry(tmp2, struct cifs_ses,
smb_ses_list);
list_for_each(tmp3, &ses->tcon_list) {
tcon = list_entry(tmp3,
- struct cifsTconInfo,
+ struct cifs_tcon,
tcon_list);
- atomic_set(&tcon->num_smbs_sent, 0);
- atomic_set(&tcon->num_writes, 0);
- atomic_set(&tcon->num_reads, 0);
- atomic_set(&tcon->num_oplock_brks, 0);
- atomic_set(&tcon->num_opens, 0);
- atomic_set(&tcon->num_posixopens, 0);
- atomic_set(&tcon->num_posixmkdirs, 0);
- atomic_set(&tcon->num_closes, 0);
- atomic_set(&tcon->num_deletes, 0);
- atomic_set(&tcon->num_mkdirs, 0);
- atomic_set(&tcon->num_rmdirs, 0);
- atomic_set(&tcon->num_renames, 0);
- atomic_set(&tcon->num_t2renames, 0);
- atomic_set(&tcon->num_ffirst, 0);
- atomic_set(&tcon->num_fnext, 0);
- atomic_set(&tcon->num_fclose, 0);
- atomic_set(&tcon->num_hardlinks, 0);
- atomic_set(&tcon->num_symlinks, 0);
- atomic_set(&tcon->num_locks, 0);
+ clear_cifs_stats(tcon);
}
}
}
@@ -307,13 +338,51 @@ static ssize_t cifs_stats_proc_write(struct file *file,
return count;
}
+static void cifs_stats_print(struct seq_file *m, struct cifs_tcon *tcon)
+{
+ if (tcon->need_reconnect)
+ seq_puts(m, "\tDISCONNECTED ");
+ seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
+ atomic_read(&tcon->num_smbs_sent),
+ atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
+ seq_printf(m, "\nReads: %d Bytes: %lld",
+ atomic_read(&tcon->stats.cifs_stats.num_reads),
+ (long long)(tcon->bytes_read));
+ seq_printf(m, "\nWrites: %d Bytes: %lld",
+ atomic_read(&tcon->stats.cifs_stats.num_writes),
+ (long long)(tcon->bytes_written));
+ seq_printf(m, "\nFlushes: %d",
+ atomic_read(&tcon->stats.cifs_stats.num_flushes));
+ seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
+ atomic_read(&tcon->stats.cifs_stats.num_locks),
+ atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
+ atomic_read(&tcon->stats.cifs_stats.num_symlinks));
+ seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
+ atomic_read(&tcon->stats.cifs_stats.num_opens),
+ atomic_read(&tcon->stats.cifs_stats.num_closes),
+ atomic_read(&tcon->stats.cifs_stats.num_deletes));
+ seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
+ atomic_read(&tcon->stats.cifs_stats.num_posixopens),
+ atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
+ seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
+ atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
+ atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
+ seq_printf(m, "\nRenames: %d T2 Renames %d",
+ atomic_read(&tcon->stats.cifs_stats.num_renames),
+ atomic_read(&tcon->stats.cifs_stats.num_t2renames));
+ seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
+ atomic_read(&tcon->stats.cifs_stats.num_ffirst),
+ atomic_read(&tcon->stats.cifs_stats.num_fnext),
+ atomic_read(&tcon->stats.cifs_stats.num_fclose));
+}
+
static int cifs_stats_proc_show(struct seq_file *m, void *v)
{
int i;
struct list_head *tmp1, *tmp2, *tmp3;
struct TCP_Server_Info *server;
- struct cifsSesInfo *ses;
- struct cifsTconInfo *tcon;
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
seq_printf(m,
"Resources in use\nCIFS Session: %d\n",
@@ -346,52 +415,15 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
server = list_entry(tmp1, struct TCP_Server_Info,
tcp_ses_list);
list_for_each(tmp2, &server->smb_ses_list) {
- ses = list_entry(tmp2, struct cifsSesInfo,
+ ses = list_entry(tmp2, struct cifs_ses,
smb_ses_list);
list_for_each(tmp3, &ses->tcon_list) {
tcon = list_entry(tmp3,
- struct cifsTconInfo,
+ struct cifs_tcon,
tcon_list);
i++;
seq_printf(m, "\n%d) %s", i, tcon->treeName);
- if (tcon->need_reconnect)
- seq_puts(m, "\tDISCONNECTED ");
- seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
- atomic_read(&tcon->num_smbs_sent),
- atomic_read(&tcon->num_oplock_brks));
- seq_printf(m, "\nReads: %d Bytes: %lld",
- atomic_read(&tcon->num_reads),
- (long long)(tcon->bytes_read));
- seq_printf(m, "\nWrites: %d Bytes: %lld",
- atomic_read(&tcon->num_writes),
- (long long)(tcon->bytes_written));
- seq_printf(m, "\nFlushes: %d",
- atomic_read(&tcon->num_flushes));
- seq_printf(m, "\nLocks: %d HardLinks: %d "
- "Symlinks: %d",
- atomic_read(&tcon->num_locks),
- atomic_read(&tcon->num_hardlinks),
- atomic_read(&tcon->num_symlinks));
- seq_printf(m, "\nOpens: %d Closes: %d "
- "Deletes: %d",
- atomic_read(&tcon->num_opens),
- atomic_read(&tcon->num_closes),
- atomic_read(&tcon->num_deletes));
- seq_printf(m, "\nPosix Opens: %d "
- "Posix Mkdirs: %d",
- atomic_read(&tcon->num_posixopens),
- atomic_read(&tcon->num_posixmkdirs));
- seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
- atomic_read(&tcon->num_mkdirs),
- atomic_read(&tcon->num_rmdirs));
- seq_printf(m, "\nRenames: %d T2 Renames %d",
- atomic_read(&tcon->num_renames),
- atomic_read(&tcon->num_t2renames));
- seq_printf(m, "\nFindFirst: %d FNext %d "
- "FClose %d",
- atomic_read(&tcon->num_ffirst),
- atomic_read(&tcon->num_fnext),
- atomic_read(&tcon->num_fclose));
+ cifs_stats_print(m, tcon);
}
}
}
@@ -423,7 +455,6 @@ static const struct file_operations cifs_lookup_cache_proc_fops;
static const struct file_operations traceSMB_proc_fops;
static const struct file_operations cifs_multiuser_mount_proc_fops;
static const struct file_operations cifs_security_flags_proc_fops;
-static const struct file_operations cifs_experimental_proc_fops;
static const struct file_operations cifs_linux_ext_proc_fops;
void
@@ -441,8 +472,6 @@ cifs_proc_init(void)
proc_create("cifsFYI", 0, proc_fs_cifs, &cifsFYI_proc_fops);
proc_create("traceSMB", 0, proc_fs_cifs, &traceSMB_proc_fops);
proc_create("OplockEnabled", 0, proc_fs_cifs, &cifs_oplock_proc_fops);
- proc_create("Experimental", 0, proc_fs_cifs,
- &cifs_experimental_proc_fops);
proc_create("LinuxExtensionsEnabled", 0, proc_fs_cifs,
&cifs_linux_ext_proc_fops);
proc_create("MultiuserMount", 0, proc_fs_cifs,
@@ -469,7 +498,6 @@ cifs_proc_clean(void)
remove_proc_entry("OplockEnabled", proc_fs_cifs);
remove_proc_entry("SecurityFlags", proc_fs_cifs);
remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs);
- remove_proc_entry("Experimental", proc_fs_cifs);
remove_proc_entry("LookupCacheEnabled", proc_fs_cifs);
remove_proc_entry("fs/cifs", NULL);
}
@@ -550,45 +578,6 @@ static const struct file_operations cifs_oplock_proc_fops = {
.write = cifs_oplock_proc_write,
};
-static int cifs_experimental_proc_show(struct seq_file *m, void *v)
-{
- seq_printf(m, "%d\n", experimEnabled);
- return 0;
-}
-
-static int cifs_experimental_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, cifs_experimental_proc_show, NULL);
-}
-
-static ssize_t cifs_experimental_proc_write(struct file *file,
- const char __user *buffer, size_t count, loff_t *ppos)
-{
- char c;
- int rc;
-
- rc = get_user(c, buffer);
- if (rc)
- return rc;
- if (c == '0' || c == 'n' || c == 'N')
- experimEnabled = 0;
- else if (c == '1' || c == 'y' || c == 'Y')
- experimEnabled = 1;
- else if (c == '2')
- experimEnabled = 2;
-
- return count;
-}
-
-static const struct file_operations cifs_experimental_proc_fops = {
- .owner = THIS_MODULE,
- .open = cifs_experimental_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = cifs_experimental_proc_write,
-};
-
static int cifs_linux_ext_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "%d\n", linuxExtEnabled);
diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
index 8942b28cf807..fec074389c11 100644
--- a/fs/cifs/cifs_debug.h
+++ b/fs/cifs/cifs_debug.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright (c) International Business Machines Corp., 2000,2002
+ * Copyright (c) International Business Machines Corp., 2000,2011
* Modified by Steve French (sfrench@us.ibm.com)
*
* This program is free software; you can redistribute it and/or modify
@@ -18,7 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
-#define CIFS_DEBUG /* BB temporary */
+#define CIFS_DEBUG /* remove to disable cifserror logging */
#ifndef _H_CIFS_DEBUG
#define _H_CIFS_DEBUG
@@ -28,6 +28,9 @@ void cifs_dump_mem(char *label, void *data, int length);
#define DBG2 2
void cifs_dump_detail(struct smb_hdr *);
void cifs_dump_mids(struct TCP_Server_Info *);
+#ifdef CONFIG_CIFS_SMB2
+void smb2_dump_detail(struct smb2_hdr *);
+#endif /* CONFIG_CIFS_SMB2 */
#else
#define DBG2 0
#endif
@@ -37,12 +40,15 @@ void dump_smb(struct smb_hdr *, int);
#define CIFS_RC 0x02
#define CIFS_TIMER 0x04
+#ifdef CONFIG_CIFS_SMB2
+void dump_smb2(struct smb2_hdr *, int);
+#endif /* CONFIG_CIFS_SMB2 */
+
/*
* debug ON
* --------
*/
#ifdef CIFS_DEBUG
-
/* information message: e.g., configuration, major event */
extern int cifsFYI;
#define cifsfyi(fmt, arg...) \
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 0a265ad9e426..e5826f867728 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -272,7 +272,7 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
struct dfs_info3_param *referrals = NULL;
unsigned int num_referrals = 0;
struct cifs_sb_info *cifs_sb;
- struct cifsSesInfo *ses;
+ struct cifs_ses *ses;
char *full_path;
int xid, i;
int rc;
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index ac51cd2d33ae..1ef54abd0714 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -1,7 +1,7 @@
/*
* fs/cifs/cifs_fs_sb.h
*
- * Copyright (c) International Business Machines Corp., 2002,2004
+ * Copyright (c) International Business Machines Corp., 2002,2010
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
@@ -24,10 +24,12 @@
#define CIFS_MOUNT_NO_PERM 1 /* do not do client vfs_perm check */
#define CIFS_MOUNT_SET_UID 2 /* set current's euid in create etc. */
+/* SERVER_INUM is always set for SMB2 */
#define CIFS_MOUNT_SERVER_INUM 4 /* inode numbers from uniqueid from server */
#define CIFS_MOUNT_DIRECT_IO 8 /* do not write nor read through page cache */
#define CIFS_MOUNT_NO_XATTR 0x10 /* if set - disable xattr support */
#define CIFS_MOUNT_MAP_SPECIAL_CHR 0x20 /* remap illegal chars in filenames */
+/* The following two are not supported for SMB2 */
#define CIFS_MOUNT_POSIX_PATHS 0x40 /* Negotiate posix pathnames if possible*/
#define CIFS_MOUNT_UNX_EMUL 0x80 /* Network compat with SFUnix emulation */
#define CIFS_MOUNT_NO_BRL 0x100 /* No sending byte range locks to srv */
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
index 4dfba8283165..2272fd5fe5b7 100644
--- a/fs/cifs/cifs_spnego.c
+++ b/fs/cifs/cifs_spnego.c
@@ -95,7 +95,7 @@ struct key_type cifs_spnego_key_type = {
/* get a key struct with a SPNEGO security blob, suitable for session setup */
struct key *
-cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
+cifs_get_spnego_key(struct cifs_ses *sesInfo)
{
struct TCP_Server_Info *server = sesInfo->server;
struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
@@ -113,7 +113,7 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
MAX_MECH_STR_LEN +
UID_KEY_LEN + (sizeof(uid_t) * 2) +
CREDUID_KEY_LEN + (sizeof(uid_t) * 2) +
- USER_KEY_LEN + strlen(sesInfo->userName) +
+ USER_KEY_LEN + strlen(sesInfo->user_name) +
PID_KEY_LEN + (sizeof(pid_t) * 2) + 1;
spnego_key = ERR_PTR(-ENOMEM);
@@ -153,7 +153,7 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
sprintf(dp, ";creduid=0x%x", sesInfo->cred_uid);
dp = description + strlen(description);
- sprintf(dp, ";user=%s", sesInfo->userName);
+ sprintf(dp, ";user=%s", sesInfo->user_name);
dp = description + strlen(description);
sprintf(dp, ";pid=0x%x", current->pid);
diff --git a/fs/cifs/cifs_spnego.h b/fs/cifs/cifs_spnego.h
index e4041ec4d712..31bef9ee078b 100644
--- a/fs/cifs/cifs_spnego.h
+++ b/fs/cifs/cifs_spnego.h
@@ -41,7 +41,7 @@ struct cifs_spnego_msg {
#ifdef __KERNEL__
extern struct key_type cifs_spnego_key_type;
-extern struct key *cifs_get_spnego_key(struct cifsSesInfo *sesInfo);
+extern struct key *cifs_get_spnego_key(struct cifs_ses *sesInfo);
#endif /* KERNEL */
#endif /* _CIFS_SPNEGO_H */
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index beeebf194234..555e8c58edb6 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -592,7 +592,7 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
int oplock = 0;
int xid, rc;
__u16 fid;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
@@ -660,7 +660,7 @@ static int set_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path,
int oplock = 0;
int xid, rc;
__u16 fid;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
diff --git a/fs/cifs/cifsacl.h b/fs/cifs/cifsacl.h
index c4ae7d036563..025e9438ce97 100644
--- a/fs/cifs/cifsacl.h
+++ b/fs/cifs/cifsacl.h
@@ -1,7 +1,7 @@
/*
* fs/cifs/cifsacl.h
*
- * Copyright (c) International Business Machines Corp., 2007
+ * Copyright (c) International Business Machines Corp., 2007,2010
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index a51585f9852b..42ed85ed988f 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -226,7 +226,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
}
/* first calculate 24 bytes ntlm response and then 16 byte session key */
-int setup_ntlm_response(struct cifsSesInfo *ses)
+int setup_ntlm_response(struct cifs_ses *ses)
{
int rc = 0;
unsigned int temp_len = CIFS_SESS_KEY_SIZE + CIFS_AUTH_RESP_SIZE;
@@ -309,7 +309,7 @@ void calc_lanman_hash(const char *password, const char *cryptkey, bool encrypt,
* Allocate domain name which gets freed when session struct is deallocated.
*/
static int
-build_avpair_blob(struct cifsSesInfo *ses, const struct nls_table *nls_cp)
+build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp)
{
unsigned int dlen;
unsigned int wlen;
@@ -397,7 +397,7 @@ build_avpair_blob(struct cifsSesInfo *ses, const struct nls_table *nls_cp)
* about target string i.e. for some, just user name might suffice.
*/
static int
-find_domain_name(struct cifsSesInfo *ses, const struct nls_table *nls_cp)
+find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp)
{
unsigned int attrsize;
unsigned int type;
@@ -442,7 +442,7 @@ find_domain_name(struct cifsSesInfo *ses, const struct nls_table *nls_cp)
return 0;
}
-static int calc_ntlmv2_hash(struct cifsSesInfo *ses, char *ntlmv2_hash,
+static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
const struct nls_table *nls_cp)
{
int rc = 0;
@@ -469,15 +469,15 @@ static int calc_ntlmv2_hash(struct cifsSesInfo *ses, char *ntlmv2_hash,
return rc;
}
- /* convert ses->userName to unicode and uppercase */
- len = strlen(ses->userName);
+ /* convert ses->user_name to unicode and uppercase */
+ len = strlen(ses->user_name);
user = kmalloc(2 + (len * 2), GFP_KERNEL);
if (user == NULL) {
cERROR(1, "calc_ntlmv2_hash: user mem alloc failure\n");
rc = -ENOMEM;
goto calc_exit_2;
}
- len = cifs_strtoUCS((__le16 *)user, ses->userName, len, nls_cp);
+ len = cifs_strtoUCS((__le16 *)user, ses->user_name, len, nls_cp);
UniStrupr(user);
crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
@@ -524,7 +524,7 @@ calc_exit_2:
}
static int
-CalcNTLMv2_response(const struct cifsSesInfo *ses, char *ntlmv2_hash)
+CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash)
{
int rc;
unsigned int offset = CIFS_SESS_KEY_SIZE + 8;
@@ -560,7 +560,7 @@ CalcNTLMv2_response(const struct cifsSesInfo *ses, char *ntlmv2_hash)
int
-setup_ntlmv2_rsp(struct cifsSesInfo *ses, const struct nls_table *nls_cp)
+setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
{
int rc;
int baselen;
@@ -646,7 +646,7 @@ setup_ntlmv2_rsp_ret:
}
int
-calc_seckey(struct cifsSesInfo *ses)
+calc_seckey(struct cifs_ses *ses)
{
int rc;
struct crypto_blkcipher *tfm_arc4;
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index f2970136d17d..120a9cf37275 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -53,7 +53,6 @@ int cifsFYI = 0;
int cifsERROR = 1;
int traceSMB = 0;
unsigned int oplockEnabled = 1;
-unsigned int experimEnabled = 0;
unsigned int linuxExtEnabled = 1;
unsigned int lookupCacheEnabled = 1;
unsigned int multiuser_mount = 0;
@@ -82,6 +81,10 @@ module_param(echo_retries, ushort, 0644);
MODULE_PARM_DESC(echo_retries, "Number of echo attempts before giving up and "
"reconnecting server. Default: 5. 0 means "
"never reconnect.");
+bool sign_zero_copy; /* globals init to false automatically */
+module_param(sign_zero_copy, bool, 0644);
+MODULE_PARM_DESC(sign_zero_copy, "Don't copy pages on write with signing "
+ "enabled. Default: N");
extern mempool_t *cifs_sm_req_poolp;
extern mempool_t *cifs_req_poolp;
extern mempool_t *cifs_mid_poolp;
@@ -163,7 +166,7 @@ cifs_read_super(struct super_block *sb, void *data,
sb->s_bdi = &cifs_sb->bdi;
sb->s_blocksize = CIFS_MAX_MSGSIZE;
sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
- inode = cifs_root_iget(sb, ROOT_I);
+ inode = cifs_root_iget(sb);
if (IS_ERR(inode)) {
rc = PTR_ERR(inode);
@@ -184,12 +187,12 @@ cifs_read_super(struct super_block *sb, void *data,
else
sb->s_d_op = &cifs_dentry_ops;
-#ifdef CONFIG_CIFS_EXPERIMENTAL
+#ifdef CIFS_NFSD_EXPORT
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
cFYI(1, "export ops supported");
sb->s_export_op = &cifs_export_ops;
}
-#endif /* EXPERIMENTAL */
+#endif /* CIFS_NFSD_EXPORT */
return 0;
@@ -248,7 +251,7 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
+ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
int rc = -EOPNOTSUPP;
int xid;
@@ -401,7 +404,7 @@ static int
cifs_show_options(struct seq_file *s, struct vfsmount *m)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(m->mnt_sb);
- struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
+ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
struct sockaddr *srcaddr;
srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
@@ -409,8 +412,8 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
seq_printf(s, ",multiuser");
- else if (tcon->ses->userName)
- seq_printf(s, ",username=%s", tcon->ses->userName);
+ else if (tcon->ses->user_name)
+ seq_printf(s, ",username=%s", tcon->ses->user_name);
if (tcon->ses->domainName)
seq_printf(s, ",domain=%s", tcon->ses->domainName);
@@ -495,7 +498,7 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
static void cifs_umount_begin(struct super_block *sb)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
if (cifs_sb == NULL)
return;
@@ -981,10 +984,10 @@ init_cifs(void)
int rc = 0;
cifs_proc_init();
INIT_LIST_HEAD(&cifs_tcp_ses_list);
-#ifdef CONFIG_CIFS_EXPERIMENTAL
+#ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
INIT_LIST_HEAD(&GlobalDnotifyReqList);
INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
-#endif
+#endif /* was needed for dnotify, and will be needed for inotify when VFS fix */
/*
* Initialize Global counters
*/
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 4a3330235d55..371d021815b1 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -47,7 +47,7 @@ extern void cifs_sb_deactive(struct super_block *sb);
/* Functions related to inodes */
extern const struct inode_operations cifs_dir_inode_ops;
-extern struct inode *cifs_root_iget(struct super_block *, unsigned long);
+extern struct inode *cifs_root_iget(struct super_block *);
extern int cifs_create(struct inode *, struct dentry *, int,
struct nameidata *);
extern struct dentry *cifs_lookup(struct inode *, struct dentry *,
@@ -123,9 +123,9 @@ extern ssize_t cifs_getxattr(struct dentry *, const char *, void *, size_t);
extern ssize_t cifs_listxattr(struct dentry *, char *, size_t);
extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
-#ifdef CONFIG_CIFS_EXPERIMENTAL
+#ifdef CIFS_NFSD_EXPORT
extern const struct export_operations cifs_export_ops;
-#endif /* EXPERIMENTAL */
+#endif /* CIFS_NFSD_EXPORT */
-#define CIFS_VERSION "1.70"
+#define CIFS_VERSION "1.71"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 17afb0fbcaed..0c443eb64b44 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1,7 +1,7 @@
/*
* fs/cifs/cifsglob.h
*
- * Copyright (C) International Business Machines Corp., 2002,2008
+ * Copyright (C) International Business Machines Corp., 2002,2011
* Author(s): Steve French (sfrench@us.ibm.com)
* Jeremy Allison (jra@samba.org)
*
@@ -27,6 +27,9 @@
#include "cifsacl.h"
#include <crypto/internal/hash.h>
#include <linux/scatterlist.h>
+#ifdef CONFIG_CIFS_SMB2
+#include "smb2glob.h"
+#endif /* CONFIG_CIFS_SMB2 */
/*
* The sizes of various internal tables and strings
@@ -38,9 +41,8 @@
#define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1)
#define MAX_SERVER_SIZE 15
#define MAX_SHARE_SIZE 64 /* used to be 20, this should still be enough */
-#define MAX_USERNAME_SIZE 32 /* 32 is to allow for 15 char names + null
- termination then *2 for unicode versions */
-#define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */
+#define MAX_USERNAME_SIZE 256 /* reasonable maximum for current servers */
+#define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */
#define CIFS_MIN_RCV_POOL 4
@@ -179,7 +181,7 @@ struct TCP_Server_Info {
struct mutex srv_mutex;
struct task_struct *tsk;
char server_GUID[16];
- char secMode;
+ __u16 sec_mode;
bool session_estab; /* mark when very first sess is established */
u16 dialect; /* dialect index that server chose */
enum securityEnum secType;
@@ -211,6 +213,7 @@ struct TCP_Server_Info {
bool sec_kerberosu2u; /* supports U2U Kerberos */
bool sec_kerberos; /* supports plain Kerberos */
bool sec_mskerberos; /* supports legacy MS Kerberos */
+ bool is_smb2; /* smb2 not cifs protocol negotiated */
struct delayed_work echo; /* echo ping workqueue job */
#ifdef CONFIG_CIFS_FSCACHE
struct fscache_cookie *fscache; /* client index cache cookie */
@@ -219,6 +222,18 @@ struct TCP_Server_Info {
atomic_t inSend; /* requests trying to send */
atomic_t num_waiters; /* blocked waiting to get in sendrecv */
#endif
+#ifdef CONFIG_CIFS_SMB2
+ wait_queue_head_t read_q; /* used by readpages */
+ atomic_t active_readpage_req; /* used by readpages */
+ atomic_t resp_rdy; /* used by readpages and demultiplex */
+ __le16 smb21_minor_version; /* SMB2.0 implemented, but 2.1 recognized */
+ struct task_struct *observe;
+/* char smb2_crypt_key[SMB2_CRYPTO_KEY_SIZE]; BB can we use cifs key? */
+ __u64 current_smb2_mid; /* multiplex id - rotating counter */
+ __u8 speed; /* helps us identify if this is a slow link */
+ unsigned int max_read;
+ unsigned int max_write;
+#endif /* CONFIG_CIFS_SMB2 */
};
/*
@@ -254,7 +269,7 @@ static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net)
/*
* Session structure. One of these for each uid session with a particular host
*/
-struct cifsSesInfo {
+struct cifs_ses {
struct list_head smb_ses_list;
struct list_head tcon_list;
struct mutex session_mutex;
@@ -274,7 +289,8 @@ struct cifsSesInfo {
int capabilities;
char serverName[SERVER_NAME_LEN_WITH_NULL * 2]; /* BB make bigger for
TCP names - will ipv6 and sctp addresses fit? */
- char userName[MAX_USERNAME_SIZE + 1];
+ char *user_name; /* must not be null except during init of sess
+ and after mount option parsing we fill it */
char *domainName;
char *password;
struct session_key auth_key;
@@ -289,15 +305,16 @@ struct cifsSesInfo {
which do not negotiate NTLM or POSIX dialects, but instead
negotiate one of the older LANMAN dialects */
#define CIFS_SES_LANMAN 8
+#define CIFS_SES_SMB2 16
/*
* there is one of these for each connection to a resource on a particular
* session
*/
-struct cifsTconInfo {
+struct cifs_tcon {
struct list_head tcon_list;
int tc_count;
struct list_head openFileList;
- struct cifsSesInfo *ses; /* pointer to session associated with */
+ struct cifs_ses *ses; /* pointer to session associated with */
char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */
char *nativeFileSystem;
char *password; /* for share-level security */
@@ -306,27 +323,37 @@ struct cifsTconInfo {
enum statusEnum tidStatus;
#ifdef CONFIG_CIFS_STATS
atomic_t num_smbs_sent;
- atomic_t num_writes;
- atomic_t num_reads;
- atomic_t num_flushes;
- atomic_t num_oplock_brks;
- atomic_t num_opens;
- atomic_t num_closes;
- atomic_t num_deletes;
- atomic_t num_mkdirs;
- atomic_t num_posixopens;
- atomic_t num_posixmkdirs;
- atomic_t num_rmdirs;
- atomic_t num_renames;
- atomic_t num_t2renames;
- atomic_t num_ffirst;
- atomic_t num_fnext;
- atomic_t num_fclose;
- atomic_t num_hardlinks;
- atomic_t num_symlinks;
- atomic_t num_locks;
- atomic_t num_acl_get;
- atomic_t num_acl_set;
+ union {
+ struct {
+ atomic_t num_writes;
+ atomic_t num_reads;
+ atomic_t num_flushes;
+ atomic_t num_oplock_brks;
+ atomic_t num_opens;
+ atomic_t num_closes;
+ atomic_t num_deletes;
+ atomic_t num_mkdirs;
+ atomic_t num_posixopens;
+ atomic_t num_posixmkdirs;
+ atomic_t num_rmdirs;
+ atomic_t num_renames;
+ atomic_t num_t2renames;
+ atomic_t num_ffirst;
+ atomic_t num_fnext;
+ atomic_t num_fclose;
+ atomic_t num_hardlinks;
+ atomic_t num_symlinks;
+ atomic_t num_locks;
+ atomic_t num_acl_get;
+ atomic_t num_acl_set;
+ } cifs_stats;
+#ifdef CONFIG_CIFS_SMB2
+ struct {
+ atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
+ atomic_t smb2_com_fail[NUMBER_OF_SMB2_COMMANDS];
+ } smb2_stats;
+#endif /* CONFIG CIFS_SMB2 */
+ } stats;
#ifdef CONFIG_CIFS_STATS2
unsigned long long time_writes;
unsigned long long time_reads;
@@ -357,6 +384,13 @@ struct cifsTconInfo {
bool local_lease:1; /* check leases (only) on local system not remote */
bool broken_posix_open; /* e.g. Samba server versions < 3.3.2, 3.2.9 */
bool need_reconnect:1; /* connection reset, tid now invalid */
+ bool bad_network_name:1;
+#ifdef CONFIG_CIFS_SMB2
+ __u32 capabilities;
+ __u32 maximal_access;
+ __u32 vol_serial_number;
+ __le64 vol_create_time;
+#endif /* CONFIG_CIFS_SMB2 */
#ifdef CONFIG_CIFS_FSCACHE
u64 resource_id; /* server resource id */
struct fscache_cookie *fscache; /* cookie for share */
@@ -379,12 +413,12 @@ struct tcon_link {
#define TCON_LINK_IN_TREE 2
unsigned long tl_time;
atomic_t tl_count;
- struct cifsTconInfo *tl_tcon;
+ struct cifs_tcon *tl_tcon;
};
extern struct tcon_link *cifs_sb_tlink(struct cifs_sb_info *cifs_sb);
-static inline struct cifsTconInfo *
+static inline struct cifs_tcon *
tlink_tcon(struct tcon_link *tlink)
{
return tlink->tl_tcon;
@@ -401,7 +435,7 @@ cifs_get_tlink(struct tcon_link *tlink)
}
/* This function is always expected to succeed */
-extern struct cifsTconInfo *cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb);
+extern struct cifs_tcon *cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb);
/*
* This info hangs off the cifsFileInfo structure, pointed to by llist.
@@ -511,7 +545,7 @@ static inline char CIFS_DIR_SEP(const struct cifs_sb_info *cifs_sb)
#ifdef CONFIG_CIFS_STATS
#define cifs_stats_inc atomic_inc
-static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
+static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
unsigned int bytes)
{
if (bytes) {
@@ -521,7 +555,7 @@ static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
}
}
-static inline void cifs_stats_bytes_read(struct cifsTconInfo *tcon,
+static inline void cifs_stats_bytes_read(struct cifs_tcon *tcon,
unsigned int bytes)
{
spin_lock(&tcon->stat_lock);
@@ -569,13 +603,6 @@ struct mid_q_entry {
bool multiEnd:1; /* both received */
};
-struct oplock_q_entry {
- struct list_head qhead;
- struct inode *pinode;
- struct cifsTconInfo *tcon;
- __u16 netfid;
-};
-
/* for pending dnotify requests */
struct dir_notify_req {
struct list_head lhead;
@@ -625,6 +652,7 @@ struct cifs_fattr {
struct timespec cf_atime;
struct timespec cf_mtime;
struct timespec cf_ctime;
+ u32 ea_size;
};
static inline void free_dfs_info_param(struct dfs_info3_param *param)
@@ -655,6 +683,7 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
#define MID_RESPONSE_RECEIVED 4
#define MID_RETRY_NEEDED 8 /* session closed while this request out */
#define MID_RESPONSE_MALFORMED 0x10
+#define MID_NO_RESPONSE_NEEDED 0x20
/* Types of response buffer returned from SendReceive2 */
#define CIFS_NO_BUFFER 0 /* Response buffer not returned */
@@ -663,6 +692,7 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
#define CIFS_IOVEC 4 /* array of response buffers */
/* Type of Request to SendReceive2 */
+#define CIFS_STD_OP 0
#define CIFS_BLOCKING_OP 1 /* operation can block */
#define CIFS_ASYNC_OP 2 /* do not wait for response */
#define CIFS_TIMEOUT_MASK 0x003 /* only one of above set in req */
@@ -713,13 +743,6 @@ require use of the stronger protocol */
#define CIFSSEC_DEF (CIFSSEC_MAY_SIGN | CIFSSEC_MAY_NTLM | CIFSSEC_MAY_NTLMV2)
#define CIFSSEC_MAX (CIFSSEC_MUST_SIGN | CIFSSEC_MUST_NTLMV2)
#define CIFSSEC_AUTH_MASK (CIFSSEC_MAY_NTLM | CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_LANMAN | CIFSSEC_MAY_PLNTXT | CIFSSEC_MAY_KRB5 | CIFSSEC_MAY_NTLMSSP)
-/*
- *****************************************************************
- * All constants go here
- *****************************************************************
- */
-
-#define UID_HASH (16)
/*
* Note that ONE module should define _DECLARE_GLOBALS_HERE to cause the
@@ -780,10 +803,12 @@ GLOBAL_EXTERN spinlock_t cifs_tcp_ses_lock;
*/
GLOBAL_EXTERN spinlock_t cifs_file_list_lock;
+#ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
/* Outstanding dir notify requests */
GLOBAL_EXTERN struct list_head GlobalDnotifyReqList;
/* DirNotify response queue */
GLOBAL_EXTERN struct list_head GlobalDnotifyRsp_Q;
+#endif /* was needed for dnotify, and will be needed for inotify when VFS fix */
/*
* Global transaction id (XID) information
@@ -817,7 +842,6 @@ GLOBAL_EXTERN unsigned int multiuser_mount; /* if enabled allows new sessions
have the uid/password or Kerberos credential
or equivalent for current user */
GLOBAL_EXTERN unsigned int oplockEnabled;
-GLOBAL_EXTERN unsigned int experimEnabled;
GLOBAL_EXTERN unsigned int lookupCacheEnabled;
GLOBAL_EXTERN unsigned int global_secflags; /* if on, session setup sent
with more secure ntlmssp2 challenge/resp */
@@ -827,6 +851,7 @@ GLOBAL_EXTERN unsigned int CIFSMaxBufSize; /* max size not including hdr */
GLOBAL_EXTERN unsigned int cifs_min_rcv; /* min size of big ntwrk buf pool */
GLOBAL_EXTERN unsigned int cifs_min_small; /* min size of small buf pool */
GLOBAL_EXTERN unsigned int cifs_max_pending; /* MAX requests at once to server*/
+GLOBAL_EXTERN bool sign_zero_copy; /* don't copy written pages with signing */
/* reconnect after this many failed echo attempts */
GLOBAL_EXTERN unsigned short echo_retries;
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 8096f27ad9a8..ecef17eace9c 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -55,7 +55,7 @@ do { \
} while (0)
extern char *build_path_from_dentry(struct dentry *);
extern char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb,
- struct cifsTconInfo *tcon);
+ struct cifs_tcon *tcon);
extern char *build_wildcard_path_from_dentry(struct dentry *direntry);
extern char *cifs_compose_mount_options(const char *sb_mountdata,
const char *fullpath, const struct dfs_info3_param *ref,
@@ -67,17 +67,17 @@ extern void DeleteMidQEntry(struct mid_q_entry *midEntry);
extern int cifs_call_async(struct TCP_Server_Info *server,
struct smb_hdr *in_buf, mid_callback_t *callback,
void *cbdata);
-extern int SendReceive(const unsigned int /* xid */ , struct cifsSesInfo *,
+extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *,
struct smb_hdr * /* input */ ,
struct smb_hdr * /* out */ ,
int * /* bytes returned */ , const int long_op);
-extern int SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses,
+extern int SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
struct smb_hdr *in_buf, int flags);
-extern int SendReceive2(const unsigned int /* xid */ , struct cifsSesInfo *,
+extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *,
struct kvec *, int /* nvec to send */,
int * /* type of buf returned */ , const int flags);
extern int SendReceiveBlockingLock(const unsigned int xid,
- struct cifsTconInfo *ptcon,
+ struct cifs_tcon *ptcon,
struct smb_hdr *in_buf ,
struct smb_hdr *out_buf,
int *bytes_returned);
@@ -99,12 +99,12 @@ extern int cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len,
const unsigned short int port);
extern int map_smb_to_linux_error(struct smb_hdr *smb, int logErr);
extern void header_assemble(struct smb_hdr *, char /* command */ ,
- const struct cifsTconInfo *, int /* length of
+ const struct cifs_tcon *, int /* length of
fixed section (word count) in two byte units */);
extern int small_smb_init_no_tc(const int smb_cmd, const int wct,
- struct cifsSesInfo *ses,
+ struct cifs_ses *ses,
void **request_buf);
-extern int CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses,
+extern int CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses,
const struct nls_table *nls_cp);
extern __u16 GetNextMid(struct TCP_Server_Info *server);
extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
@@ -152,94 +152,94 @@ void cifs_proc_init(void);
void cifs_proc_clean(void);
extern int cifs_negotiate_protocol(unsigned int xid,
- struct cifsSesInfo *ses);
-extern int cifs_setup_session(unsigned int xid, struct cifsSesInfo *ses,
+ struct cifs_ses *ses);
+extern int cifs_setup_session(unsigned int xid, struct cifs_ses *ses,
struct nls_table *nls_info);
-extern int CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses);
+extern int CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses);
-extern int CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
- const char *tree, struct cifsTconInfo *tcon,
+extern int CIFSTCon(unsigned int xid, struct cifs_ses *ses,
+ const char *tree, struct cifs_tcon *tcon,
const struct nls_table *);
-extern int CIFSFindFirst(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSFindFirst(const int xid, struct cifs_tcon *tcon,
const char *searchName, const struct nls_table *nls_codepage,
__u16 *searchHandle, struct cifs_search_info *psrch_inf,
int map, const char dirsep);
-extern int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
__u16 searchHandle, struct cifs_search_info *psrch_inf);
-extern int CIFSFindClose(const int, struct cifsTconInfo *tcon,
+extern int CIFSFindClose(const int, struct cifs_tcon *tcon,
const __u16 search_handle);
-extern int CIFSSMBQFileInfo(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBQFileInfo(const int xid, struct cifs_tcon *tcon,
u16 netfid, FILE_ALL_INFO *pFindData);
-extern int CIFSSMBQPathInfo(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBQPathInfo(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName,
FILE_ALL_INFO *findData,
int legacy /* whether to use old info level */,
const struct nls_table *nls_codepage, int remap);
-extern int SMBQueryInformation(const int xid, struct cifsTconInfo *tcon,
+extern int SMBQueryInformation(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName,
FILE_ALL_INFO *findData,
const struct nls_table *nls_codepage, int remap);
-extern int CIFSSMBUnixQFileInfo(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBUnixQFileInfo(const int xid, struct cifs_tcon *tcon,
u16 netfid, FILE_UNIX_BASIC_INFO *pFindData);
extern int CIFSSMBUnixQPathInfo(const int xid,
- struct cifsTconInfo *tcon,
+ struct cifs_tcon *tcon,
const unsigned char *searchName,
FILE_UNIX_BASIC_INFO *pFindData,
const struct nls_table *nls_codepage, int remap);
-extern int CIFSGetDFSRefer(const int xid, struct cifsSesInfo *ses,
+extern int CIFSGetDFSRefer(const int xid, struct cifs_ses *ses,
const unsigned char *searchName,
struct dfs_info3_param **target_nodes,
unsigned int *number_of_nodes_in_array,
const struct nls_table *nls_codepage, int remap);
-extern int get_dfs_path(int xid, struct cifsSesInfo *pSesInfo,
+extern int get_dfs_path(int xid, struct cifs_ses *pSesInfo,
const char *old_path,
const struct nls_table *nls_codepage,
unsigned int *pnum_referrals,
struct dfs_info3_param **preferrals,
int remap);
-extern void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
+extern void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon,
struct super_block *sb, struct smb_vol *vol);
-extern int CIFSSMBQFSInfo(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBQFSInfo(const int xid, struct cifs_tcon *tcon,
struct kstatfs *FSData);
-extern int SMBOldQFSInfo(const int xid, struct cifsTconInfo *tcon,
+extern int SMBOldQFSInfo(const int xid, struct cifs_tcon *tcon,
struct kstatfs *FSData);
-extern int CIFSSMBSetFSUnixInfo(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBSetFSUnixInfo(const int xid, struct cifs_tcon *tcon,
__u64 cap);
extern int CIFSSMBQFSAttributeInfo(const int xid,
- struct cifsTconInfo *tcon);
-extern int CIFSSMBQFSDeviceInfo(const int xid, struct cifsTconInfo *tcon);
-extern int CIFSSMBQFSUnixInfo(const int xid, struct cifsTconInfo *tcon);
-extern int CIFSSMBQFSPosixInfo(const int xid, struct cifsTconInfo *tcon,
+ struct cifs_tcon *tcon);
+extern int CIFSSMBQFSDeviceInfo(const int xid, struct cifs_tcon *tcon);
+extern int CIFSSMBQFSUnixInfo(const int xid, struct cifs_tcon *tcon);
+extern int CIFSSMBQFSPosixInfo(const int xid, struct cifs_tcon *tcon,
struct kstatfs *FSData);
-extern int CIFSSMBSetPathInfo(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBSetPathInfo(const int xid, struct cifs_tcon *tcon,
const char *fileName, const FILE_BASIC_INFO *data,
const struct nls_table *nls_codepage,
int remap_special_chars);
-extern int CIFSSMBSetFileInfo(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBSetFileInfo(const int xid, struct cifs_tcon *tcon,
const FILE_BASIC_INFO *data, __u16 fid,
__u32 pid_of_opener);
-extern int CIFSSMBSetFileDisposition(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBSetFileDisposition(const int xid, struct cifs_tcon *tcon,
bool delete_file, __u16 fid, __u32 pid_of_opener);
#if 0
-extern int CIFSSMBSetAttrLegacy(int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBSetAttrLegacy(int xid, struct cifs_tcon *tcon,
char *fileName, __u16 dos_attributes,
const struct nls_table *nls_codepage);
#endif /* possibly unneeded function */
-extern int CIFSSMBSetEOF(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBSetEOF(const int xid, struct cifs_tcon *tcon,
const char *fileName, __u64 size,
bool setAllocationSizeFlag,
const struct nls_table *nls_codepage,
int remap_special_chars);
-extern int CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBSetFileSize(const int xid, struct cifs_tcon *tcon,
__u64 size, __u16 fileHandle, __u32 opener_pid,
bool AllocSizeFlag);
@@ -253,121 +253,122 @@ struct cifs_unix_set_info_args {
dev_t device;
};
-extern int CIFSSMBUnixSetFileInfo(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBUnixSetFileInfo(const int xid, struct cifs_tcon *tcon,
const struct cifs_unix_set_info_args *args,
u16 fid, u32 pid_of_opener);
-extern int CIFSSMBUnixSetPathInfo(const int xid, struct cifsTconInfo *pTcon,
+extern int CIFSSMBUnixSetPathInfo(const int xid, struct cifs_tcon *pTcon,
char *fileName,
const struct cifs_unix_set_info_args *args,
const struct nls_table *nls_codepage,
int remap_special_chars);
-extern int CIFSSMBMkDir(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBMkDir(const int xid, struct cifs_tcon *tcon,
const char *newName,
const struct nls_table *nls_codepage,
int remap_special_chars);
-extern int CIFSSMBRmDir(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBRmDir(const int xid, struct cifs_tcon *tcon,
const char *name, const struct nls_table *nls_codepage,
int remap_special_chars);
-extern int CIFSPOSIXDelFile(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSPOSIXDelFile(const int xid, struct cifs_tcon *tcon,
const char *name, __u16 type,
const struct nls_table *nls_codepage,
int remap_special_chars);
-extern int CIFSSMBDelFile(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBDelFile(const int xid, struct cifs_tcon *tcon,
const char *name,
const struct nls_table *nls_codepage,
int remap_special_chars);
-extern int CIFSSMBRename(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBRename(const int xid, struct cifs_tcon *tcon,
const char *fromName, const char *toName,
const struct nls_table *nls_codepage,
int remap_special_chars);
-extern int CIFSSMBRenameOpenFile(const int xid, struct cifsTconInfo *pTcon,
+extern int CIFSSMBRenameOpenFile(const int xid, struct cifs_tcon *pTcon,
int netfid, const char *target_name,
const struct nls_table *nls_codepage,
int remap_special_chars);
extern int CIFSCreateHardLink(const int xid,
- struct cifsTconInfo *tcon,
+ struct cifs_tcon *tcon,
const char *fromName, const char *toName,
const struct nls_table *nls_codepage,
int remap_special_chars);
extern int CIFSUnixCreateHardLink(const int xid,
- struct cifsTconInfo *tcon,
+ struct cifs_tcon *tcon,
const char *fromName, const char *toName,
const struct nls_table *nls_codepage,
int remap_special_chars);
extern int CIFSUnixCreateSymLink(const int xid,
- struct cifsTconInfo *tcon,
+ struct cifs_tcon *tcon,
const char *fromName, const char *toName,
const struct nls_table *nls_codepage);
extern int CIFSSMBUnixQuerySymLink(const int xid,
- struct cifsTconInfo *tcon,
+ struct cifs_tcon *tcon,
const unsigned char *searchName, char **syminfo,
const struct nls_table *nls_codepage);
+#ifdef CONFIG_CIFS_SYMLINK_EXPERIMENTAL
extern int CIFSSMBQueryReparseLinkInfo(const int xid,
- struct cifsTconInfo *tcon,
+ struct cifs_tcon *tcon,
const unsigned char *searchName,
char *symlinkinfo, const int buflen, __u16 fid,
const struct nls_table *nls_codepage);
-
-extern int CIFSSMBOpen(const int xid, struct cifsTconInfo *tcon,
+#endif /* temporarily unused until cifs_symlink fixed */
+extern int CIFSSMBOpen(const int xid, struct cifs_tcon *tcon,
const char *fileName, const int disposition,
const int access_flags, const int omode,
__u16 *netfid, int *pOplock, FILE_ALL_INFO *,
const struct nls_table *nls_codepage, int remap);
-extern int SMBLegacyOpen(const int xid, struct cifsTconInfo *tcon,
+extern int SMBLegacyOpen(const int xid, struct cifs_tcon *tcon,
const char *fileName, const int disposition,
const int access_flags, const int omode,
__u16 *netfid, int *pOplock, FILE_ALL_INFO *,
const struct nls_table *nls_codepage, int remap);
-extern int CIFSPOSIXCreate(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSPOSIXCreate(const int xid, struct cifs_tcon *tcon,
u32 posix_flags, __u64 mode, __u16 *netfid,
FILE_UNIX_BASIC_INFO *pRetData,
__u32 *pOplock, const char *name,
const struct nls_table *nls_codepage, int remap);
-extern int CIFSSMBClose(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBClose(const int xid, struct cifs_tcon *tcon,
const int smb_file_id);
-extern int CIFSSMBFlush(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBFlush(const int xid, struct cifs_tcon *tcon,
const int smb_file_id);
-extern int CIFSSMBRead(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBRead(const int xid, struct cifs_tcon *tcon,
const int netfid, unsigned int count,
const __u64 lseek, unsigned int *nbytes, char **buf,
int *return_buf_type);
-extern int CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBWrite(const int xid, struct cifs_tcon *tcon,
const int netfid, const unsigned int count,
const __u64 lseek, unsigned int *nbytes,
const char *buf, const char __user *ubuf,
const int long_op);
-extern int CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBWrite2(const int xid, struct cifs_tcon *tcon,
const int netfid, const unsigned int count,
const __u64 offset, unsigned int *nbytes,
struct kvec *iov, const int nvec, const int long_op);
-extern int CIFSGetSrvInodeNumber(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSGetSrvInodeNumber(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName, __u64 *inode_number,
const struct nls_table *nls_codepage,
int remap_special_chars);
extern int cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
const struct nls_table *cp, int mapChars);
-extern int CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBLock(const int xid, struct cifs_tcon *tcon,
const __u16 netfid, const __u64 len,
const __u64 offset, const __u32 numUnlock,
const __u32 numLock, const __u8 lockType,
const bool waitFlag, const __u8 oplock_level);
-extern int CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBPosixLock(const int xid, struct cifs_tcon *tcon,
const __u16 smb_file_id, const int get_flag,
const __u64 len, struct file_lock *,
const __u16 lock_type, const bool waitFlag);
-extern int CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon);
+extern int CIFSSMBTDis(const int xid, struct cifs_tcon *tcon);
extern int CIFSSMBEcho(struct TCP_Server_Info *server);
-extern int CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses);
+extern int CIFSSMBLogoff(const int xid, struct cifs_ses *ses);
-extern struct cifsSesInfo *sesInfoAlloc(void);
-extern void sesInfoFree(struct cifsSesInfo *);
-extern struct cifsTconInfo *tconInfoAlloc(void);
-extern void tconInfoFree(struct cifsTconInfo *);
+extern struct cifs_ses *sesInfoAlloc(void);
+extern void sesInfoFree(struct cifs_ses *);
+extern struct cifs_tcon *tconInfoAlloc(void);
+extern void tconInfoFree(struct cifs_tcon *);
extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *, __u32 *);
extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *,
@@ -376,49 +377,51 @@ extern int cifs_verify_signature(struct smb_hdr *,
struct TCP_Server_Info *server,
__u32 expected_sequence_number);
extern int SMBNTencrypt(unsigned char *, unsigned char *, unsigned char *);
-extern int setup_ntlm_response(struct cifsSesInfo *);
-extern int setup_ntlmv2_rsp(struct cifsSesInfo *, const struct nls_table *);
+extern int setup_ntlm_response(struct cifs_ses *);
+extern int setup_ntlmv2_rsp(struct cifs_ses *, const struct nls_table *);
extern int cifs_crypto_shash_allocate(struct TCP_Server_Info *);
extern void cifs_crypto_shash_release(struct TCP_Server_Info *);
-extern int calc_seckey(struct cifsSesInfo *);
+extern int calc_seckey(struct cifs_ses *);
#ifdef CONFIG_CIFS_WEAK_PW_HASH
extern void calc_lanman_hash(const char *password, const char *cryptkey,
bool encrypt, char *lnm_session_key);
#endif /* CIFS_WEAK_PW_HASH */
+#ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
+extern int CIFSSMBNotify(const int xid, struct cifs_tcon *tcon,
+ const int notify_subdirs, const __u16 netfid,
+ __u32 filter, struct file *file, int multishot,
+ const struct nls_table *nls_codepage);
+#endif /* was needed for dnotify, and will be needed for inotify when VFS fix */
extern int CIFSSMBCopy(int xid,
- struct cifsTconInfo *source_tcon,
+ struct cifs_tcon *source_tcon,
const char *fromName,
const __u16 target_tid,
const char *toName, const int flags,
const struct nls_table *nls_codepage,
int remap_special_chars);
-extern int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
- const int notify_subdirs, const __u16 netfid,
- __u32 filter, struct file *file, int multishot,
- const struct nls_table *nls_codepage);
-extern ssize_t CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon,
+extern ssize_t CIFSSMBQAllEAs(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName,
const unsigned char *ea_name, char *EAData,
size_t bufsize, const struct nls_table *nls_codepage,
int remap_special_chars);
-extern int CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBSetEA(const int xid, struct cifs_tcon *tcon,
const char *fileName, const char *ea_name,
const void *ea_value, const __u16 ea_value_len,
const struct nls_table *nls_codepage, int remap_special_chars);
-extern int CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBGetCIFSACL(const int xid, struct cifs_tcon *tcon,
__u16 fid, struct cifs_ntsd **acl_inf, __u32 *buflen);
-extern int CIFSSMBSetCIFSACL(const int, struct cifsTconInfo *, __u16,
+extern int CIFSSMBSetCIFSACL(const int, struct cifs_tcon *, __u16,
struct cifs_ntsd *, __u32);
-extern int CIFSSMBGetPosixACL(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBGetPosixACL(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName,
char *acl_inf, const int buflen, const int acl_type,
const struct nls_table *nls_codepage, int remap_special_chars);
-extern int CIFSSMBSetPosixACL(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBSetPosixACL(const int xid, struct cifs_tcon *tcon,
const unsigned char *fileName,
const char *local_acl, const int buflen, const int acl_type,
const struct nls_table *nls_codepage, int remap_special_chars);
-extern int CIFSGetExtAttr(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSGetExtAttr(const int xid, struct cifs_tcon *tcon,
const int netfid, __u64 *pExtAttrBits, __u64 *pMask);
extern void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb);
extern bool CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 904aa47e3515..6488a521cada 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -84,7 +84,7 @@ static struct {
/* Mark as invalid, all open files on tree connections since they
were closed when session to server was lost */
-static void mark_open_files_invalid(struct cifsTconInfo *pTcon)
+static void mark_open_files_invalid(struct cifs_tcon *pTcon)
{
struct cifsFileInfo *open_file = NULL;
struct list_head *tmp;
@@ -104,10 +104,10 @@ static void mark_open_files_invalid(struct cifsTconInfo *pTcon)
/* reconnect the socket, tcon, and smb session if needed */
static int
-cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command)
+cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
{
int rc = 0;
- struct cifsSesInfo *ses;
+ struct cifs_ses *ses;
struct TCP_Server_Info *server;
struct nls_table *nls_codepage;
@@ -226,7 +226,7 @@ out:
SMB information in the SMB header. If the return code is zero, this
function must have filled in request_buf pointer */
static int
-small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
+small_smb_init(int smb_command, int wct, struct cifs_tcon *tcon,
void **request_buf)
{
int rc;
@@ -252,7 +252,7 @@ small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
int
small_smb_init_no_tc(const int smb_command, const int wct,
- struct cifsSesInfo *ses, void **request_buf)
+ struct cifs_ses *ses, void **request_buf)
{
int rc;
struct smb_hdr *buffer;
@@ -278,7 +278,7 @@ small_smb_init_no_tc(const int smb_command, const int wct,
/* If the return code is zero, this function must fill in request_buf pointer */
static int
-__smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
+__smb_init(int smb_command, int wct, struct cifs_tcon *tcon,
void **request_buf, void **response_buf)
{
*request_buf = cifs_buf_get();
@@ -304,7 +304,7 @@ __smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
/* If the return code is zero, this function must fill in request_buf pointer */
static int
-smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
+smb_init(int smb_command, int wct, struct cifs_tcon *tcon,
void **request_buf, void **response_buf)
{
int rc;
@@ -317,7 +317,7 @@ smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
}
static int
-smb_init_no_reconnect(int smb_command, int wct, struct cifsTconInfo *tcon,
+smb_init_no_reconnect(int smb_command, int wct, struct cifs_tcon *tcon,
void **request_buf, void **response_buf)
{
if (tcon->ses->need_reconnect || tcon->need_reconnect)
@@ -358,7 +358,7 @@ vt2_err:
}
int
-CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
+CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses)
{
NEGOTIATE_REQ *pSMB;
NEGOTIATE_RSP *pSMBr;
@@ -442,7 +442,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
rc = -EOPNOTSUPP;
goto neg_err_exit;
}
- server->secMode = (__u8)le16_to_cpu(rsp->SecurityMode);
+ server->sec_mode = le16_to_cpu(rsp->SecurityMode);
server->maxReq = le16_to_cpu(rsp->MaxMpxCount);
server->maxBuf = min((__u32)le16_to_cpu(rsp->MaxBufSize),
(__u32)CIFSMaxBufSize + MAX_CIFS_HDR_SIZE);
@@ -496,7 +496,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
cpu_to_le16(CIFS_CRYPTO_KEY_SIZE)) {
memcpy(ses->server->cryptkey, rsp->EncryptionKey,
CIFS_CRYPTO_KEY_SIZE);
- } else if (server->secMode & SECMODE_PW_ENCRYPT) {
+ } else if (server->sec_mode & SECMODE_PW_ENCRYPT) {
rc = -EIO; /* need cryptkey unless plain text */
goto neg_err_exit;
}
@@ -518,11 +518,11 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
goto neg_err_exit;
}
/* else wct == 17 NTLM */
- server->secMode = pSMBr->SecurityMode;
- if ((server->secMode & SECMODE_USER) == 0)
+ server->sec_mode = pSMBr->SecurityMode;
+ if ((server->sec_mode & SECMODE_USER) == 0)
cFYI(1, "share mode security");
- if ((server->secMode & SECMODE_PW_ENCRYPT) == 0)
+ if ((server->sec_mode & SECMODE_PW_ENCRYPT) == 0)
#ifdef CONFIG_CIFS_WEAK_PW_HASH
if ((secFlags & CIFSSEC_MAY_PLNTXT) == 0)
#endif /* CIFS_WEAK_PW_HASH */
@@ -541,10 +541,6 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
server->secType = RawNTLMSSP;
else if (secFlags & CIFSSEC_MAY_LANMAN)
server->secType = LANMAN;
-/* #ifdef CONFIG_CIFS_EXPERIMENTAL
- else if (secFlags & CIFSSEC_MAY_PLNTXT)
- server->secType = ??
-#endif */
else {
rc = -EOPNOTSUPP;
cERROR(1, "Invalid security type");
@@ -569,7 +565,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
} else if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC)
&& (pSMBr->EncryptionKeyLength == 0)) {
/* decode security blob */
- } else if (server->secMode & SECMODE_PW_ENCRYPT) {
+ } else if (server->sec_mode & SECMODE_PW_ENCRYPT) {
rc = -EIO; /* no crypt key only if plain text pwd */
goto neg_err_exit;
}
@@ -630,27 +626,27 @@ signing_check:
/* MUST_SIGN already includes the MAY_SIGN FLAG
so if this is zero it means that signing is disabled */
cFYI(1, "Signing disabled");
- if (server->secMode & SECMODE_SIGN_REQUIRED) {
+ if (server->sec_mode & SECMODE_SIGN_REQUIRED) {
cERROR(1, "Server requires "
"packet signing to be enabled in "
"/proc/fs/cifs/SecurityFlags.");
rc = -EOPNOTSUPP;
}
- server->secMode &=
+ server->sec_mode &=
~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
} else if ((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) {
/* signing required */
cFYI(1, "Must sign - secFlags 0x%x", secFlags);
- if ((server->secMode &
+ if ((server->sec_mode &
(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED)) == 0) {
cERROR(1, "signing required but server lacks support");
rc = -EOPNOTSUPP;
} else
- server->secMode |= SECMODE_SIGN_REQUIRED;
+ server->sec_mode |= SECMODE_SIGN_REQUIRED;
} else {
/* signing optional ie CIFSSEC_MAY_SIGN */
- if ((server->secMode & SECMODE_SIGN_REQUIRED) == 0)
- server->secMode &=
+ if ((server->sec_mode & SECMODE_SIGN_REQUIRED) == 0)
+ server->sec_mode &=
~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
}
@@ -662,7 +658,7 @@ neg_err_exit:
}
int
-CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon)
+CIFSSMBTDis(const int xid, struct cifs_tcon *tcon)
{
struct smb_hdr *smb_buffer;
int rc = 0;
@@ -747,7 +743,7 @@ CIFSSMBEcho(struct TCP_Server_Info *server)
}
int
-CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses)
+CIFSSMBLogoff(const int xid, struct cifs_ses *ses)
{
LOGOFF_ANDX_REQ *pSMB;
int rc = 0;
@@ -774,7 +770,7 @@ CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses)
pSMB->hdr.Mid = GetNextMid(ses->server);
- if (ses->server->secMode &
+ if (ses->server->sec_mode &
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
@@ -794,7 +790,7 @@ session_already_dead:
}
int
-CIFSPOSIXDelFile(const int xid, struct cifsTconInfo *tcon, const char *fileName,
+CIFSPOSIXDelFile(const int xid, struct cifs_tcon *tcon, const char *fileName,
__u16 type, const struct nls_table *nls_codepage, int remap)
{
TRANSACTION2_SPI_REQ *pSMB = NULL;
@@ -860,7 +856,7 @@ PsxDelete:
cFYI(1, "Posix delete returned %d", rc);
cifs_buf_release(pSMB);
- cifs_stats_inc(&tcon->num_deletes);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_deletes);
if (rc == -EAGAIN)
goto PsxDelete;
@@ -869,7 +865,7 @@ PsxDelete:
}
int
-CIFSSMBDelFile(const int xid, struct cifsTconInfo *tcon, const char *fileName,
+CIFSSMBDelFile(const int xid, struct cifs_tcon *tcon, const char *fileName,
const struct nls_table *nls_codepage, int remap)
{
DELETE_FILE_REQ *pSMB = NULL;
@@ -902,7 +898,7 @@ DelFileRetry:
pSMB->ByteCount = cpu_to_le16(name_len + 1);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- cifs_stats_inc(&tcon->num_deletes);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_deletes);
if (rc)
cFYI(1, "Error in RMFile = %d", rc);
@@ -914,7 +910,7 @@ DelFileRetry:
}
int
-CIFSSMBRmDir(const int xid, struct cifsTconInfo *tcon, const char *dirName,
+CIFSSMBRmDir(const int xid, struct cifs_tcon *tcon, const char *dirName,
const struct nls_table *nls_codepage, int remap)
{
DELETE_DIRECTORY_REQ *pSMB = NULL;
@@ -946,7 +942,7 @@ RmDirRetry:
pSMB->ByteCount = cpu_to_le16(name_len + 1);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- cifs_stats_inc(&tcon->num_rmdirs);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_rmdirs);
if (rc)
cFYI(1, "Error in RMDir = %d", rc);
@@ -957,7 +953,7 @@ RmDirRetry:
}
int
-CIFSSMBMkDir(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBMkDir(const int xid, struct cifs_tcon *tcon,
const char *name, const struct nls_table *nls_codepage, int remap)
{
int rc = 0;
@@ -989,7 +985,7 @@ MkDirRetry:
pSMB->ByteCount = cpu_to_le16(name_len + 1);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- cifs_stats_inc(&tcon->num_mkdirs);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_mkdirs);
if (rc)
cFYI(1, "Error in Mkdir = %d", rc);
@@ -1000,7 +996,7 @@ MkDirRetry:
}
int
-CIFSPOSIXCreate(const int xid, struct cifsTconInfo *tcon, __u32 posix_flags,
+CIFSPOSIXCreate(const int xid, struct cifs_tcon *tcon, __u32 posix_flags,
__u64 mode, __u16 *netfid, FILE_UNIX_BASIC_INFO *pRetData,
__u32 *pOplock, const char *name,
const struct nls_table *nls_codepage, int remap)
@@ -1111,9 +1107,9 @@ psx_create_err:
cifs_buf_release(pSMB);
if (posix_flags & SMB_O_DIRECTORY)
- cifs_stats_inc(&tcon->num_posixmkdirs);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_posixmkdirs);
else
- cifs_stats_inc(&tcon->num_posixopens);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_posixopens);
if (rc == -EAGAIN)
goto PsxCreat;
@@ -1166,7 +1162,7 @@ access_flags_to_smbopen_mode(const int access_flags)
}
int
-SMBLegacyOpen(const int xid, struct cifsTconInfo *tcon,
+SMBLegacyOpen(const int xid, struct cifs_tcon *tcon,
const char *fileName, const int openDisposition,
const int access_flags, const int create_options, __u16 *netfid,
int *pOplock, FILE_ALL_INFO *pfile_info,
@@ -1234,7 +1230,7 @@ OldOpenRetry:
/* long_op set to 1 to allow for oplock break timeouts */
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *)pSMBr, &bytes_returned, 0);
- cifs_stats_inc(&tcon->num_opens);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_opens);
if (rc) {
cFYI(1, "Error in Open = %d", rc);
} else {
@@ -1273,7 +1269,7 @@ OldOpenRetry:
}
int
-CIFSSMBOpen(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBOpen(const int xid, struct cifs_tcon *tcon,
const char *fileName, const int openDisposition,
const int access_flags, const int create_options, __u16 *netfid,
int *pOplock, FILE_ALL_INFO *pfile_info,
@@ -1347,7 +1343,7 @@ openRetry:
/* long_op set to 1 to allow for oplock break timeouts */
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *)pSMBr, &bytes_returned, 0);
- cifs_stats_inc(&tcon->num_opens);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_opens);
if (rc) {
cFYI(1, "Error in Open = %d", rc);
} else {
@@ -1375,7 +1371,7 @@ openRetry:
}
int
-CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid,
+CIFSSMBRead(const int xid, struct cifs_tcon *tcon, const int netfid,
const unsigned int count, const __u64 lseek, unsigned int *nbytes,
char **buf, int *pbuf_type)
{
@@ -1429,7 +1425,7 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid,
iov[0].iov_len = pSMB->hdr.smb_buf_length + 4;
rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovecs */,
&resp_buf_type, CIFS_LOG_ERROR);
- cifs_stats_inc(&tcon->num_reads);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_reads);
pSMBr = (READ_RSP *)iov[0].iov_base;
if (rc) {
cERROR(1, "Send error in read = %d", rc);
@@ -1480,7 +1476,7 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid,
int
-CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBWrite(const int xid, struct cifs_tcon *tcon,
const int netfid, const unsigned int count,
const __u64 offset, unsigned int *nbytes, const char *buf,
const char __user *ubuf, const int long_op)
@@ -1573,7 +1569,7 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, long_op);
- cifs_stats_inc(&tcon->num_writes);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_writes);
if (rc) {
cFYI(1, "Send error in write = %d", rc);
} else {
@@ -1599,7 +1595,7 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
}
int
-CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBWrite2(const int xid, struct cifs_tcon *tcon,
const int netfid, const unsigned int count,
const __u64 offset, unsigned int *nbytes, struct kvec *iov,
int n_vec, const int long_op)
@@ -1665,7 +1661,7 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
rc = SendReceive2(xid, tcon->ses, iov, n_vec + 1, &resp_buf_type,
long_op);
- cifs_stats_inc(&tcon->num_writes);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_writes);
if (rc) {
cFYI(1, "Send error Write2 = %d", rc);
} else if (resp_buf_type == 0) {
@@ -1700,7 +1696,7 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
int
-CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBLock(const int xid, struct cifs_tcon *tcon,
const __u16 smb_file_id, const __u64 len,
const __u64 offset, const __u32 numUnlock,
const __u32 numLock, const __u8 lockType,
@@ -1760,7 +1756,7 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
timeout);
/* SMB buffer freed by function above */
}
- cifs_stats_inc(&tcon->num_locks);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
if (rc)
cFYI(1, "Send error in Lock = %d", rc);
@@ -1770,7 +1766,7 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
}
int
-CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBPosixLock(const int xid, struct cifs_tcon *tcon,
const __u16 smb_file_id, const int get_flag, const __u64 len,
struct file_lock *pLockData, const __u16 lock_type,
const bool waitFlag)
@@ -1908,7 +1904,7 @@ plk_err_exit:
int
-CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
+CIFSSMBClose(const int xid, struct cifs_tcon *tcon, int smb_file_id)
{
int rc = 0;
CLOSE_REQ *pSMB = NULL;
@@ -1925,7 +1921,7 @@ CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
pSMB->LastWriteTime = 0xFFFFFFFF;
pSMB->ByteCount = 0;
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
- cifs_stats_inc(&tcon->num_closes);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_closes);
if (rc) {
if (rc != -EINTR) {
/* EINTR is expected when user ctl-c to kill app */
@@ -1941,7 +1937,7 @@ CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
}
int
-CIFSSMBFlush(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
+CIFSSMBFlush(const int xid, struct cifs_tcon *tcon, int smb_file_id)
{
int rc = 0;
FLUSH_REQ *pSMB = NULL;
@@ -1954,7 +1950,7 @@ CIFSSMBFlush(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
pSMB->FileID = (__u16) smb_file_id;
pSMB->ByteCount = 0;
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
- cifs_stats_inc(&tcon->num_flushes);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_flushes);
if (rc)
cERROR(1, "Send error in Flush = %d", rc);
@@ -1962,7 +1958,7 @@ CIFSSMBFlush(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
}
int
-CIFSSMBRename(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBRename(const int xid, struct cifs_tcon *tcon,
const char *fromName, const char *toName,
const struct nls_table *nls_codepage, int remap)
{
@@ -2017,7 +2013,7 @@ renameRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- cifs_stats_inc(&tcon->num_renames);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_renames);
if (rc)
cFYI(1, "Send error in rename = %d", rc);
@@ -2029,7 +2025,7 @@ renameRetry:
return rc;
}
-int CIFSSMBRenameOpenFile(const int xid, struct cifsTconInfo *pTcon,
+int CIFSSMBRenameOpenFile(const int xid, struct cifs_tcon *pTcon,
int netfid, const char *target_name,
const struct nls_table *nls_codepage, int remap)
{
@@ -2096,7 +2092,7 @@ int CIFSSMBRenameOpenFile(const int xid, struct cifsTconInfo *pTcon,
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, pTcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- cifs_stats_inc(&pTcon->num_t2renames);
+ cifs_stats_inc(&pTcon->stats.cifs_stats.num_t2renames);
if (rc)
cFYI(1, "Send error in Rename (by file handle) = %d", rc);
@@ -2109,7 +2105,7 @@ int CIFSSMBRenameOpenFile(const int xid, struct cifsTconInfo *pTcon,
}
int
-CIFSSMBCopy(const int xid, struct cifsTconInfo *tcon, const char *fromName,
+CIFSSMBCopy(const int xid, struct cifs_tcon *tcon, const char *fromName,
const __u16 target_tid, const char *toName, const int flags,
const struct nls_table *nls_codepage, int remap)
{
@@ -2177,7 +2173,7 @@ copyRetry:
}
int
-CIFSUnixCreateSymLink(const int xid, struct cifsTconInfo *tcon,
+CIFSUnixCreateSymLink(const int xid, struct cifs_tcon *tcon,
const char *fromName, const char *toName,
const struct nls_table *nls_codepage)
{
@@ -2253,7 +2249,7 @@ createSymLinkRetry:
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- cifs_stats_inc(&tcon->num_symlinks);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_symlinks);
if (rc)
cFYI(1, "Send error in SetPathInfo create symlink = %d", rc);
@@ -2266,7 +2262,7 @@ createSymLinkRetry:
}
int
-CIFSUnixCreateHardLink(const int xid, struct cifsTconInfo *tcon,
+CIFSUnixCreateHardLink(const int xid, struct cifs_tcon *tcon,
const char *fromName, const char *toName,
const struct nls_table *nls_codepage, int remap)
{
@@ -2339,7 +2335,7 @@ createHardLinkRetry:
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- cifs_stats_inc(&tcon->num_hardlinks);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_hardlinks);
if (rc)
cFYI(1, "Send error in SetPathInfo (hard link) = %d", rc);
@@ -2351,7 +2347,7 @@ createHardLinkRetry:
}
int
-CIFSCreateHardLink(const int xid, struct cifsTconInfo *tcon,
+CIFSCreateHardLink(const int xid, struct cifs_tcon *tcon,
const char *fromName, const char *toName,
const struct nls_table *nls_codepage, int remap)
{
@@ -2411,7 +2407,7 @@ winCreateHardLinkRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- cifs_stats_inc(&tcon->num_hardlinks);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_hardlinks);
if (rc)
cFYI(1, "Send error in hard link (NT rename) = %d", rc);
@@ -2423,7 +2419,7 @@ winCreateHardLinkRetry:
}
int
-CIFSSMBUnixQuerySymLink(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBUnixQuerySymLink(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName, char **symlinkinfo,
const struct nls_table *nls_codepage)
{
@@ -2516,9 +2512,19 @@ querySymLinkRetry:
return rc;
}
-#ifdef CONFIG_CIFS_EXPERIMENTAL
+#ifdef CONFIG_CIFS_SYMLINK_EXPERIMENTAL
+/*
+ * Recent Windows versions now create symlinks more frequently
+ * and they use the "reparse point" mechanism below. We can of course
+ * do symlinks nicely to Samba and other servers which support the
+ * CIFS Unix Extensions and we can also do SFU symlinks and "client only"
+ * "MF" symlinks optionally, but for recent Windows we really need to
+ * reenable the code below and fix the cifs_symlink callers to handle this.
+ * In the interim this code has been moved to its own config option so
+ * it is not compiled in by default until callers fixed up and more tested.
+ */
int
-CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBQueryReparseLinkInfo(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName,
char *symlinkinfo, const int buflen, __u16 fid,
const struct nls_table *nls_codepage)
@@ -2618,7 +2624,7 @@ qreparse_out:
return rc;
}
-#endif /* CIFS_EXPERIMENTAL */
+#endif /* CIFS_SYMLINK_EXPERIMENTAL */ /* BB temporarily unused */
#ifdef CONFIG_CIFS_POSIX
@@ -2756,7 +2762,7 @@ static __u16 ACL_to_cifs_posix(char *parm_data, const char *pACL,
}
int
-CIFSSMBGetPosixACL(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBGetPosixACL(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName,
char *acl_inf, const int buflen, const int acl_type,
const struct nls_table *nls_codepage, int remap)
@@ -2819,7 +2825,7 @@ queryAclRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- cifs_stats_inc(&tcon->num_acl_get);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_acl_get);
if (rc) {
cFYI(1, "Send error in Query POSIX ACL = %d", rc);
} else {
@@ -2844,7 +2850,7 @@ queryAclRetry:
}
int
-CIFSSMBSetPosixACL(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBSetPosixACL(const int xid, struct cifs_tcon *tcon,
const unsigned char *fileName,
const char *local_acl, const int buflen,
const int acl_type,
@@ -2924,7 +2930,7 @@ setACLerrorExit:
/* BB fix tabs in this function FIXME BB */
int
-CIFSGetExtAttr(const int xid, struct cifsTconInfo *tcon,
+CIFSGetExtAttr(const int xid, struct cifs_tcon *tcon,
const int netfid, __u64 *pExtAttrBits, __u64 *pMask)
{
int rc = 0;
@@ -3017,7 +3023,7 @@ GetExtAttrOut:
*/
static int
smb_init_nttransact(const __u16 sub_command, const int setup_count,
- const int parm_len, struct cifsTconInfo *tcon,
+ const int parm_len, struct cifs_tcon *tcon,
void **ret_buf)
{
int rc;
@@ -3099,7 +3105,7 @@ validate_ntransact(char *buf, char **ppparm, char **ppdata,
/* Get Security Descriptor (by handle) from remote server for a file or dir */
int
-CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
+CIFSSMBGetCIFSACL(const int xid, struct cifs_tcon *tcon, __u16 fid,
struct cifs_ntsd **acl_inf, __u32 *pbuflen)
{
int rc = 0;
@@ -3130,7 +3136,7 @@ CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovec */, &buf_type,
0);
- cifs_stats_inc(&tcon->num_acl_get);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_acl_get);
if (rc) {
cFYI(1, "Send error in QuerySecDesc = %d", rc);
} else { /* decode response */
@@ -3191,7 +3197,7 @@ qsec_out:
}
int
-CIFSSMBSetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
+CIFSSMBSetCIFSACL(const int xid, struct cifs_tcon *tcon, __u16 fid,
struct cifs_ntsd *pntsd, __u32 acllen)
{
__u16 byte_count, param_count, data_count, param_offset, data_offset;
@@ -3258,7 +3264,7 @@ setCifsAclRetry:
/* Legacy Query Path Information call for lookup to old servers such
as Win9x/WinME */
-int SMBQueryInformation(const int xid, struct cifsTconInfo *tcon,
+int SMBQueryInformation(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName,
FILE_ALL_INFO *pFinfo,
const struct nls_table *nls_codepage, int remap)
@@ -3326,7 +3332,7 @@ QInfRetry:
}
int
-CIFSSMBQFileInfo(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBQFileInfo(const int xid, struct cifs_tcon *tcon,
u16 netfid, FILE_ALL_INFO *pFindData)
{
struct smb_t2_qfi_req *pSMB = NULL;
@@ -3393,7 +3399,7 @@ QFileInfoRetry:
}
int
-CIFSSMBQPathInfo(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBQPathInfo(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName,
FILE_ALL_INFO *pFindData,
int legacy /* old style infolevel */,
@@ -3494,7 +3500,7 @@ QPathInfoRetry:
}
int
-CIFSSMBUnixQFileInfo(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBUnixQFileInfo(const int xid, struct cifs_tcon *tcon,
u16 netfid, FILE_UNIX_BASIC_INFO *pFindData)
{
struct smb_t2_qfi_req *pSMB = NULL;
@@ -3563,7 +3569,7 @@ UnixQFileInfoRetry:
}
int
-CIFSSMBUnixQPathInfo(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBUnixQPathInfo(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName,
FILE_UNIX_BASIC_INFO *pFindData,
const struct nls_table *nls_codepage, int remap)
@@ -3649,7 +3655,7 @@ UnixQPathInfoRetry:
/* xid, tcon, searchName and codepage are input parms, rest are returned */
int
-CIFSFindFirst(const int xid, struct cifsTconInfo *tcon,
+CIFSFindFirst(const int xid, struct cifs_tcon *tcon,
const char *searchName,
const struct nls_table *nls_codepage,
__u16 *pnetfid,
@@ -3736,7 +3742,7 @@ findFirstRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- cifs_stats_inc(&tcon->num_ffirst);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_ffirst);
if (rc) {/* BB add logic to retry regular search if Unix search
rejected unexpectedly by server */
@@ -3797,7 +3803,7 @@ findFirstRetry:
return rc;
}
-int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
+int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
__u16 searchHandle, struct cifs_search_info *psrch_inf)
{
TRANSACTION2_FNEXT_REQ *pSMB = NULL;
@@ -3865,7 +3871,7 @@ int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- cifs_stats_inc(&tcon->num_fnext);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_fnext);
if (rc) {
if (rc == -EBADF) {
psrch_inf->endOfSearch = true;
@@ -3935,7 +3941,7 @@ FNext2_err_exit:
}
int
-CIFSFindClose(const int xid, struct cifsTconInfo *tcon,
+CIFSFindClose(const int xid, struct cifs_tcon *tcon,
const __u16 searchHandle)
{
int rc = 0;
@@ -3957,7 +3963,7 @@ CIFSFindClose(const int xid, struct cifsTconInfo *tcon,
if (rc)
cERROR(1, "Send error in FindClose = %d", rc);
- cifs_stats_inc(&tcon->num_fclose);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_fclose);
/* Since session is dead, search handle closed on server already */
if (rc == -EAGAIN)
@@ -3967,7 +3973,7 @@ CIFSFindClose(const int xid, struct cifsTconInfo *tcon,
}
int
-CIFSGetSrvInodeNumber(const int xid, struct cifsTconInfo *tcon,
+CIFSGetSrvInodeNumber(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName,
__u64 *inode_number,
const struct nls_table *nls_codepage, int remap)
@@ -4169,7 +4175,7 @@ parse_DFS_referrals_exit:
}
int
-CIFSGetDFSRefer(const int xid, struct cifsSesInfo *ses,
+CIFSGetDFSRefer(const int xid, struct cifs_ses *ses,
const unsigned char *searchName,
struct dfs_info3_param **target_nodes,
unsigned int *num_of_nodes,
@@ -4218,7 +4224,7 @@ getDFSRetry:
}
if (ses->server) {
- if (ses->server->secMode &
+ if (ses->server->sec_mode &
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
}
@@ -4283,7 +4289,7 @@ GetDFSRefExit:
/* Query File System Info such as free space to old servers such as Win 9x */
int
-SMBOldQFSInfo(const int xid, struct cifsTconInfo *tcon, struct kstatfs *FSData)
+SMBOldQFSInfo(const int xid, struct cifs_tcon *tcon, struct kstatfs *FSData)
{
/* level 0x01 SMB_QUERY_FILE_SYSTEM_INFO */
TRANSACTION2_QFSI_REQ *pSMB = NULL;
@@ -4362,7 +4368,7 @@ oldQFSInfoRetry:
}
int
-CIFSSMBQFSInfo(const int xid, struct cifsTconInfo *tcon, struct kstatfs *FSData)
+CIFSSMBQFSInfo(const int xid, struct cifs_tcon *tcon, struct kstatfs *FSData)
{
/* level 0x103 SMB_QUERY_FILE_SYSTEM_INFO */
TRANSACTION2_QFSI_REQ *pSMB = NULL;
@@ -4441,7 +4447,7 @@ QFSInfoRetry:
}
int
-CIFSSMBQFSAttributeInfo(const int xid, struct cifsTconInfo *tcon)
+CIFSSMBQFSAttributeInfo(const int xid, struct cifs_tcon *tcon)
{
/* level 0x105 SMB_QUERY_FILE_SYSTEM_INFO */
TRANSACTION2_QFSI_REQ *pSMB = NULL;
@@ -4511,7 +4517,7 @@ QFSAttributeRetry:
}
int
-CIFSSMBQFSDeviceInfo(const int xid, struct cifsTconInfo *tcon)
+CIFSSMBQFSDeviceInfo(const int xid, struct cifs_tcon *tcon)
{
/* level 0x104 SMB_QUERY_FILE_SYSTEM_INFO */
TRANSACTION2_QFSI_REQ *pSMB = NULL;
@@ -4581,7 +4587,7 @@ QFSDeviceRetry:
}
int
-CIFSSMBQFSUnixInfo(const int xid, struct cifsTconInfo *tcon)
+CIFSSMBQFSUnixInfo(const int xid, struct cifs_tcon *tcon)
{
/* level 0x200 SMB_QUERY_CIFS_UNIX_INFO */
TRANSACTION2_QFSI_REQ *pSMB = NULL;
@@ -4651,7 +4657,7 @@ QFSUnixRetry:
}
int
-CIFSSMBSetFSUnixInfo(const int xid, struct cifsTconInfo *tcon, __u64 cap)
+CIFSSMBSetFSUnixInfo(const int xid, struct cifs_tcon *tcon, __u64 cap)
{
/* level 0x200 SMB_SET_CIFS_UNIX_INFO */
TRANSACTION2_SETFSI_REQ *pSMB = NULL;
@@ -4725,7 +4731,7 @@ SETFSUnixRetry:
int
-CIFSSMBQFSPosixInfo(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBQFSPosixInfo(const int xid, struct cifs_tcon *tcon,
struct kstatfs *FSData)
{
/* level 0x201 SMB_QUERY_CIFS_POSIX_INFO */
@@ -4818,7 +4824,7 @@ QFSPosixRetry:
in Samba which this routine can run into */
int
-CIFSSMBSetEOF(const int xid, struct cifsTconInfo *tcon, const char *fileName,
+CIFSSMBSetEOF(const int xid, struct cifs_tcon *tcon, const char *fileName,
__u64 size, bool SetAllocation,
const struct nls_table *nls_codepage, int remap)
{
@@ -4907,7 +4913,7 @@ SetEOFRetry:
}
int
-CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size,
+CIFSSMBSetFileSize(const int xid, struct cifs_tcon *tcon, __u64 size,
__u16 fid, __u32 pid_of_opener, bool SetAllocation)
{
struct smb_com_transaction2_sfi_req *pSMB = NULL;
@@ -4989,7 +4995,7 @@ CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size,
time and resort to the original setpathinfo level which takes the ancient
DOS time format with 2 second granularity */
int
-CIFSSMBSetFileInfo(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBSetFileInfo(const int xid, struct cifs_tcon *tcon,
const FILE_BASIC_INFO *data, __u16 fid, __u32 pid_of_opener)
{
struct smb_com_transaction2_sfi_req *pSMB = NULL;
@@ -5051,7 +5057,7 @@ CIFSSMBSetFileInfo(const int xid, struct cifsTconInfo *tcon,
}
int
-CIFSSMBSetFileDisposition(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBSetFileDisposition(const int xid, struct cifs_tcon *tcon,
bool delete_file, __u16 fid, __u32 pid_of_opener)
{
struct smb_com_transaction2_sfi_req *pSMB = NULL;
@@ -5107,7 +5113,7 @@ CIFSSMBSetFileDisposition(const int xid, struct cifsTconInfo *tcon,
}
int
-CIFSSMBSetPathInfo(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBSetPathInfo(const int xid, struct cifs_tcon *tcon,
const char *fileName, const FILE_BASIC_INFO *data,
const struct nls_table *nls_codepage, int remap)
{
@@ -5191,7 +5197,7 @@ SetTimesRetry:
handling it anyway and NT4 was what we thought it would be needed for
Do not delete it until we prove whether needed for Win9x though */
int
-CIFSSMBSetAttrLegacy(int xid, struct cifsTconInfo *tcon, char *fileName,
+CIFSSMBSetAttrLegacy(int xid, struct cifs_tcon *tcon, char *fileName,
__u16 dos_attrs, const struct nls_table *nls_codepage)
{
SETATTR_REQ *pSMB = NULL;
@@ -5279,7 +5285,7 @@ cifs_fill_unix_set_info(FILE_UNIX_BASIC_INFO *data_offset,
}
int
-CIFSSMBUnixSetFileInfo(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBUnixSetFileInfo(const int xid, struct cifs_tcon *tcon,
const struct cifs_unix_set_info_args *args,
u16 fid, u32 pid_of_opener)
{
@@ -5342,7 +5348,7 @@ CIFSSMBUnixSetFileInfo(const int xid, struct cifsTconInfo *tcon,
}
int
-CIFSSMBUnixSetPathInfo(const int xid, struct cifsTconInfo *tcon, char *fileName,
+CIFSSMBUnixSetPathInfo(const int xid, struct cifs_tcon *tcon, char *fileName,
const struct cifs_unix_set_info_args *args,
const struct nls_table *nls_codepage, int remap)
{
@@ -5418,79 +5424,6 @@ setPermsRetry:
return rc;
}
-int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
- const int notify_subdirs, const __u16 netfid,
- __u32 filter, struct file *pfile, int multishot,
- const struct nls_table *nls_codepage)
-{
- int rc = 0;
- struct smb_com_transaction_change_notify_req *pSMB = NULL;
- struct smb_com_ntransaction_change_notify_rsp *pSMBr = NULL;
- struct dir_notify_req *dnotify_req;
- int bytes_returned;
-
- cFYI(1, "In CIFSSMBNotify for file handle %d", (int)netfid);
- rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB,
- (void **) &pSMBr);
- if (rc)
- return rc;
-
- pSMB->TotalParameterCount = 0 ;
- pSMB->TotalDataCount = 0;
- pSMB->MaxParameterCount = cpu_to_le32(2);
- /* BB find exact data count max from sess structure BB */
- pSMB->MaxDataCount = 0; /* same in little endian or be */
-/* BB VERIFY verify which is correct for above BB */
- pSMB->MaxDataCount = cpu_to_le32((tcon->ses->server->maxBuf -
- MAX_CIFS_HDR_SIZE) & 0xFFFFFF00);
-
- pSMB->MaxSetupCount = 4;
- pSMB->Reserved = 0;
- pSMB->ParameterOffset = 0;
- pSMB->DataCount = 0;
- pSMB->DataOffset = 0;
- pSMB->SetupCount = 4; /* single byte does not need le conversion */
- pSMB->SubCommand = cpu_to_le16(NT_TRANSACT_NOTIFY_CHANGE);
- pSMB->ParameterCount = pSMB->TotalParameterCount;
- if (notify_subdirs)
- pSMB->WatchTree = 1; /* one byte - no le conversion needed */
- pSMB->Reserved2 = 0;
- pSMB->CompletionFilter = cpu_to_le32(filter);
- pSMB->Fid = netfid; /* file handle always le */
- pSMB->ByteCount = 0;
-
- rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
- (struct smb_hdr *)pSMBr, &bytes_returned,
- CIFS_ASYNC_OP);
- if (rc) {
- cFYI(1, "Error in Notify = %d", rc);
- } else {
- /* Add file to outstanding requests */
- /* BB change to kmem cache alloc */
- dnotify_req = kmalloc(
- sizeof(struct dir_notify_req),
- GFP_KERNEL);
- if (dnotify_req) {
- dnotify_req->Pid = pSMB->hdr.Pid;
- dnotify_req->PidHigh = pSMB->hdr.PidHigh;
- dnotify_req->Mid = pSMB->hdr.Mid;
- dnotify_req->Tid = pSMB->hdr.Tid;
- dnotify_req->Uid = pSMB->hdr.Uid;
- dnotify_req->netfid = netfid;
- dnotify_req->pfile = pfile;
- dnotify_req->filter = filter;
- dnotify_req->multishot = multishot;
- spin_lock(&GlobalMid_Lock);
- list_add_tail(&dnotify_req->lhead,
- &GlobalDnotifyReqList);
- spin_unlock(&GlobalMid_Lock);
- } else
- rc = -ENOMEM;
- }
- cifs_buf_release(pSMB);
- return rc;
-}
-
#ifdef CONFIG_CIFS_XATTR
/*
* Do a path-based QUERY_ALL_EAS call and parse the result. This is a common
@@ -5502,7 +5435,7 @@ int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
* the data isn't copied to it, but the length is returned.
*/
ssize_t
-CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBQAllEAs(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName, const unsigned char *ea_name,
char *EAData, size_t buf_size,
const struct nls_table *nls_codepage, int remap)
@@ -5683,7 +5616,7 @@ QAllEAsOut:
}
int
-CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon, const char *fileName,
+CIFSSMBSetEA(const int xid, struct cifs_tcon *tcon, const char *fileName,
const char *ea_name, const void *ea_value,
const __u16 ea_value_len, const struct nls_table *nls_codepage,
int remap)
@@ -5787,5 +5720,99 @@ SetEARetry:
return rc;
}
-
#endif
+
+#ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* BB unused temporarily */
+/*
+ * Years ago the kernel added a "dnotify" function for Samba server,
+ * to allow network clients (such as Windows) to display updated
+ * lists of files in directory listings automatically when
+ * files are added by one user when another user has the
+ * same directory open on their desktop. The Linux cifs kernel
+ * client hooked into the kernel side of this interface for
+ * the same reason, but ironically when the VFS moved from
+ * "dnotify" to "inotify" it became harder to plug in Linux
+ * network file system clients (the most obvious use case
+ * for notify interfaces is when multiple users can update
+ * the contents of the same directory - exactly what network
+ * file systems can do) although the server (Samba) could
+ * still use it. For the short term we leave the worker
+ * function ifdeffed out (below) until inotify is fixed
+ * in the VFS to make it easier to plug in network file
+ * system clients. If inotify turns out to be permanently
+ * incompatible for network fs clients, we could instead simply
+ * expose this config flag by adding a future cifs (and smb2) notify ioctl.
+ */
+int CIFSSMBNotify(const int xid, struct cifs_tcon *tcon,
+ const int notify_subdirs, const __u16 netfid,
+ __u32 filter, struct file *pfile, int multishot,
+ const struct nls_table *nls_codepage)
+{
+ int rc = 0;
+ struct smb_com_transaction_change_notify_req *pSMB = NULL;
+ struct smb_com_ntransaction_change_notify_rsp *pSMBr = NULL;
+ struct dir_notify_req *dnotify_req;
+ int bytes_returned;
+
+ cFYI(1, "In CIFSSMBNotify for file handle %d", (int)netfid);
+ rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB,
+ (void **) &pSMBr);
+ if (rc)
+ return rc;
+
+ pSMB->TotalParameterCount = 0 ;
+ pSMB->TotalDataCount = 0;
+ pSMB->MaxParameterCount = cpu_to_le32(2);
+ /* BB find exact data count max from sess structure BB */
+ pSMB->MaxDataCount = 0; /* same in little endian or be */
+/* BB VERIFY verify which is correct for above BB */
+ pSMB->MaxDataCount = cpu_to_le32((tcon->ses->server->maxBuf -
+ MAX_CIFS_HDR_SIZE) & 0xFFFFFF00);
+
+ pSMB->MaxSetupCount = 4;
+ pSMB->Reserved = 0;
+ pSMB->ParameterOffset = 0;
+ pSMB->DataCount = 0;
+ pSMB->DataOffset = 0;
+ pSMB->SetupCount = 4; /* single byte does not need le conversion */
+ pSMB->SubCommand = cpu_to_le16(NT_TRANSACT_NOTIFY_CHANGE);
+ pSMB->ParameterCount = pSMB->TotalParameterCount;
+ if (notify_subdirs)
+ pSMB->WatchTree = 1; /* one byte - no le conversion needed */
+ pSMB->Reserved2 = 0;
+ pSMB->CompletionFilter = cpu_to_le32(filter);
+ pSMB->Fid = netfid; /* file handle always le */
+ pSMB->ByteCount = 0;
+
+ rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+ (struct smb_hdr *)pSMBr, &bytes_returned,
+ CIFS_ASYNC_OP);
+ if (rc) {
+ cFYI(1, "Error in Notify = %d", rc);
+ } else {
+ /* Add file to outstanding requests */
+ /* BB change to kmem cache alloc */
+ dnotify_req = kmalloc(
+ sizeof(struct dir_notify_req),
+ GFP_KERNEL);
+ if (dnotify_req) {
+ dnotify_req->Pid = pSMB->hdr.Pid;
+ dnotify_req->PidHigh = pSMB->hdr.PidHigh;
+ dnotify_req->Mid = pSMB->hdr.Mid;
+ dnotify_req->Tid = pSMB->hdr.Tid;
+ dnotify_req->Uid = pSMB->hdr.Uid;
+ dnotify_req->netfid = netfid;
+ dnotify_req->pfile = pfile;
+ dnotify_req->filter = filter;
+ dnotify_req->multishot = multishot;
+ spin_lock(&GlobalMid_Lock);
+ list_add_tail(&dnotify_req->lhead,
+ &GlobalDnotifyReqList);
+ spin_unlock(&GlobalMid_Lock);
+ } else
+ rc = -ENOMEM;
+ }
+ cifs_buf_release(pSMB);
+ return rc;
+}
+#endif /* was needed for dnotify, and will be needed for inotify when VFS fix */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 8d6c17ab593d..f07d60f8486b 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1,7 +1,7 @@
/*
* fs/cifs/connect.c
*
- * Copyright (C) International Business Machines Corp., 2002,2009
+ * Copyright (C) International Business Machines Corp., 2002,2011
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
@@ -102,6 +102,7 @@ struct smb_vol {
bool fsc:1; /* enable fscache */
bool mfsymlinks:1; /* use Minshall+French Symlinks */
bool multiuser:1;
+ bool use_smb2:1; /* force smb2 use on mount instead of cifs */
unsigned int rsize;
unsigned int wsize;
bool sockopt_tcp_nodelay:1;
@@ -134,8 +135,8 @@ cifs_reconnect(struct TCP_Server_Info *server)
{
int rc = 0;
struct list_head *tmp, *tmp2;
- struct cifsSesInfo *ses;
- struct cifsTconInfo *tcon;
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
struct mid_q_entry *mid_entry;
spin_lock(&GlobalMid_Lock);
@@ -156,11 +157,11 @@ cifs_reconnect(struct TCP_Server_Info *server)
cFYI(1, "%s: marking sessions and tcons for reconnect", __func__);
spin_lock(&cifs_tcp_ses_lock);
list_for_each(tmp, &server->smb_ses_list) {
- ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list);
+ ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
ses->need_reconnect = true;
ses->ipc_tid = 0;
list_for_each(tmp2, &ses->tcon_list) {
- tcon = list_entry(tmp2, struct cifsTconInfo, tcon_list);
+ tcon = list_entry(tmp2, struct cifs_tcon, tcon_list);
tcon->need_reconnect = true;
}
}
@@ -722,7 +723,7 @@ multi_t2_fnd:
sock_release(csocket);
server->ssocket = NULL;
}
- /* buffer usuallly freed in free_mid - need to free it here on exit */
+ /* buffer usually freed in free_mid - need to free it here on exit */
cifs_buf_release(bigbuf);
if (smallbuf) /* no sense logging a debug message if NULL */
cifs_small_buf_release(smallbuf);
@@ -881,7 +882,8 @@ cifs_parse_mount_options(char *options, const char *devname,
/* null user, ie anonymous, authentication */
vol->nullauth = 1;
}
- if (strnlen(value, 200) < 200) {
+ if (strnlen(value, MAX_USERNAME_SIZE) <
+ MAX_USERNAME_SIZE) {
vol->username = value;
} else {
printk(KERN_WARNING "CIFS: username too long\n");
@@ -1023,6 +1025,22 @@ cifs_parse_mount_options(char *options, const char *devname,
cERROR(1, "bad security option: %s", value);
return 1;
}
+ } else if (strnicmp(data, "vers", 3) == 0) {
+ if (!value || !*value) {
+ cERROR(1, "no protocol version specified"
+ " after vers= mount option");
+ } else if ((strnicmp(value, "cifs", 4) == 0) ||
+ (strnicmp(value, "1", 1) == 0)) {
+ /* this is the default */
+ continue;
+ } else if ((strnicmp(value, "smb2", 4) == 0) ||
+ (strnicmp(value, "2", 1) == 0)) {
+#ifdef CONFIG_CIFS_SMB2
+ vol->use_smb2 = true;
+#else
+ cERROR(1, "smb2 support not enabled");
+#endif /* CONFIG_CIFS_SMB2 */
+ }
} else if ((strnicmp(data, "unc", 3) == 0)
|| (strnicmp(data, "target", 6) == 0)
|| (strnicmp(data, "path", 4) == 0)) {
@@ -1574,10 +1592,10 @@ match_security(struct TCP_Server_Info *server, struct smb_vol *vol)
/* now check if signing mode is acceptible */
if ((secFlags & CIFSSEC_MAY_SIGN) == 0 &&
- (server->secMode & SECMODE_SIGN_REQUIRED))
+ (server->sec_mode & SECMODE_SIGN_REQUIRED))
return false;
else if (((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) &&
- (server->secMode &
+ (server->sec_mode &
(SECMODE_SIGN_ENABLED|SECMODE_SIGN_REQUIRED)) == 0)
return false;
@@ -1598,6 +1616,14 @@ cifs_find_tcp_session(struct sockaddr *addr, struct smb_vol *vol)
(struct sockaddr *)&vol->srcaddr))
continue;
+#ifdef CONFIG_CIFS_SMB2
+ if ((server->is_smb2 == true) && (vol->use_smb2 == false))
+ continue;
+
+ if ((server->is_smb2 == false) && (vol->use_smb2 == true))
+ continue;
+#endif /* CONFIG_CIFS_SMB2 */
+
if (!match_port(server, addr))
continue;
@@ -1709,6 +1735,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
tcp_ses->noblocksnd = volume_info->noblocksnd;
tcp_ses->noautotune = volume_info->noautotune;
+ /* BB should we set this unconditionally now, especially for SMB2 */
tcp_ses->tcp_nodelay = volume_info->sockopt_tcp_nodelay;
atomic_set(&tcp_ses->inFlight, 0);
init_waitqueue_head(&tcp_ses->response_q);
@@ -1752,6 +1779,11 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
goto out_err_crypto_release;
}
+#ifdef CONFIG_CIFS_SMB2
+ if (volume_info->use_smb2)
+ tcp_ses->is_smb2 = true;
+#endif /* CONFIG_CIFS_SMB2 */
+
/*
* since we're in a cifs function already, we know that
* this will succeed. No need for try_module_get().
@@ -1794,10 +1826,10 @@ out_err:
return ERR_PTR(rc);
}
-static struct cifsSesInfo *
+static struct cifs_ses *
cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
{
- struct cifsSesInfo *ses;
+ struct cifs_ses *ses;
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
@@ -1808,7 +1840,9 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
break;
default:
/* anything else takes username/password */
- if (strncmp(ses->userName, vol->username,
+ if (ses->user_name == NULL)
+ continue;
+ if (strncmp(ses->user_name, vol->username,
MAX_USERNAME_SIZE))
continue;
if (strlen(vol->username) != 0 &&
@@ -1827,7 +1861,7 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
}
static void
-cifs_put_smb_ses(struct cifsSesInfo *ses)
+cifs_put_smb_ses(struct cifs_ses *ses)
{
int xid;
struct TCP_Server_Info *server = ses->server;
@@ -1851,11 +1885,11 @@ cifs_put_smb_ses(struct cifsSesInfo *ses)
cifs_put_tcp_session(server);
}
-static struct cifsSesInfo *
+static struct cifs_ses *
cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
{
int rc = -ENOMEM, xid;
- struct cifsSesInfo *ses;
+ struct cifs_ses *ses;
struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
@@ -1906,9 +1940,11 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
else
sprintf(ses->serverName, "%pI4", &addr->sin_addr);
- if (volume_info->username)
- strncpy(ses->userName, volume_info->username,
- MAX_USERNAME_SIZE);
+ if (volume_info->username) {
+ ses->user_name = kstrdup(volume_info->username, GFP_KERNEL);
+ if (!ses->user_name)
+ goto get_ses_fail;
+ }
/* volume_info->password freed at unmount */
if (volume_info->password) {
@@ -1947,15 +1983,15 @@ get_ses_fail:
return ERR_PTR(rc);
}
-static struct cifsTconInfo *
-cifs_find_tcon(struct cifsSesInfo *ses, const char *unc)
+static struct cifs_tcon *
+cifs_find_tcon(struct cifs_ses *ses, const char *unc)
{
struct list_head *tmp;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
spin_lock(&cifs_tcp_ses_lock);
list_for_each(tmp, &ses->tcon_list) {
- tcon = list_entry(tmp, struct cifsTconInfo, tcon_list);
+ tcon = list_entry(tmp, struct cifs_tcon, tcon_list);
if (tcon->tidStatus == CifsExiting)
continue;
if (strncmp(tcon->treeName, unc, MAX_TREE_SIZE))
@@ -1970,10 +2006,10 @@ cifs_find_tcon(struct cifsSesInfo *ses, const char *unc)
}
static void
-cifs_put_tcon(struct cifsTconInfo *tcon)
+cifs_put_tcon(struct cifs_tcon *tcon)
{
int xid;
- struct cifsSesInfo *ses = tcon->ses;
+ struct cifs_ses *ses = tcon->ses;
cFYI(1, "%s: tc_count=%d\n", __func__, tcon->tc_count);
spin_lock(&cifs_tcp_ses_lock);
@@ -1994,11 +2030,11 @@ cifs_put_tcon(struct cifsTconInfo *tcon)
cifs_put_smb_ses(ses);
}
-static struct cifsTconInfo *
-cifs_get_tcon(struct cifsSesInfo *ses, struct smb_vol *volume_info)
+static struct cifs_tcon *
+cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
{
int rc, xid;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
tcon = cifs_find_tcon(ses, volume_info->UNC);
if (tcon) {
@@ -2088,7 +2124,7 @@ cifs_put_tlink(struct tcon_link *tlink)
}
int
-get_dfs_path(int xid, struct cifsSesInfo *pSesInfo, const char *old_path,
+get_dfs_path(int xid, struct cifs_ses *pSesInfo, const char *old_path,
const struct nls_table *nls_codepage, unsigned int *pnum_referrals,
struct dfs_info3_param **preferrals, int remap)
{
@@ -2387,7 +2423,7 @@ ip_connect(struct TCP_Server_Info *server)
return generic_ip_connect(server);
}
-void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
+void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon,
struct super_block *sb, struct smb_vol *vol_info)
{
/* if we are reconnecting then should we check to see if
@@ -2628,7 +2664,7 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info,
}
static int
-is_path_accessible(int xid, struct cifsTconInfo *tcon,
+is_path_accessible(int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, const char *full_path)
{
int rc;
@@ -2702,8 +2738,8 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
int rc;
int xid;
struct smb_vol *volume_info;
- struct cifsSesInfo *pSesInfo;
- struct cifsTconInfo *tcon;
+ struct cifs_ses *pSesInfo;
+ struct cifs_tcon *tcon;
struct TCP_Server_Info *srvTcp;
char *full_path;
char *mount_data = mount_data_global;
@@ -2955,8 +2991,8 @@ out:
}
int
-CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
- const char *tree, struct cifsTconInfo *tcon,
+CIFSTCon(unsigned int xid, struct cifs_ses *ses,
+ const char *tree, struct cifs_tcon *tcon,
const struct nls_table *nls_codepage)
{
struct smb_hdr *smb_buffer;
@@ -2988,7 +3024,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
pSMB->AndXCommand = 0xFF;
pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO);
bcc_ptr = &pSMB->Password[0];
- if ((ses->server->secMode) & SECMODE_USER) {
+ if ((ses->server->sec_mode) & SECMODE_USER) {
pSMB->PasswordLength = cpu_to_le16(1); /* minimum */
*bcc_ptr = 0; /* password is null byte */
bcc_ptr++; /* skip password */
@@ -3005,7 +3041,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
if ((global_secflags & CIFSSEC_MAY_LANMAN) &&
(ses->server->secType == LANMAN))
calc_lanman_hash(tcon->password, ses->server->cryptkey,
- ses->server->secMode &
+ ses->server->sec_mode &
SECMODE_PW_ENCRYPT ? true : false,
bcc_ptr);
else
@@ -3021,7 +3057,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
}
}
- if (ses->server->secMode &
+ if (ses->server->sec_mode &
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
@@ -3141,7 +3177,7 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
return 0;
}
-int cifs_negotiate_protocol(unsigned int xid, struct cifsSesInfo *ses)
+int cifs_negotiate_protocol(unsigned int xid, struct cifs_ses *ses)
{
int rc = 0;
struct TCP_Server_Info *server = ses->server;
@@ -3171,7 +3207,7 @@ int cifs_negotiate_protocol(unsigned int xid, struct cifsSesInfo *ses)
}
-int cifs_setup_session(unsigned int xid, struct cifsSesInfo *ses,
+int cifs_setup_session(unsigned int xid, struct cifs_ses *ses,
struct nls_table *nls_info)
{
int rc = 0;
@@ -3183,7 +3219,7 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *ses,
ses->capabilities &= (~CAP_UNIX);
cFYI(1, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d",
- server->secMode, server->capabilities, server->timeAdj);
+ server->sec_mode, server->capabilities, server->timeAdj);
rc = CIFS_SessSetup(xid, ses, nls_info);
if (rc) {
@@ -3215,14 +3251,16 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *ses,
return rc;
}
-static struct cifsTconInfo *
+static struct cifs_tcon *
cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid)
{
- struct cifsTconInfo *master_tcon = cifs_sb_master_tcon(cifs_sb);
- struct cifsSesInfo *ses;
- struct cifsTconInfo *tcon = NULL;
+ struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb);
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon = NULL;
struct smb_vol *vol_info;
- char username[MAX_USERNAME_SIZE + 1];
+ char username[28]; /* big enough for "krb50x" + hex of ULONG_MAX 6+16 */
+ /* We used to have this as MAX_USERNAME which is */
+ /* way too big now (256 instead of 32) */
vol_info = kzalloc(sizeof(*vol_info), GFP_KERNEL);
if (vol_info == NULL) {
@@ -3251,7 +3289,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid)
ses = cifs_get_smb_ses(master_tcon->ses->server, vol_info);
if (IS_ERR(ses)) {
- tcon = (struct cifsTconInfo *)ses;
+ tcon = (struct cifs_tcon *)ses;
cifs_put_tcp_session(master_tcon->ses->server);
goto out;
}
@@ -3276,7 +3314,7 @@ cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb)
return cifs_sb->master_tlink;
}
-struct cifsTconInfo *
+struct cifs_tcon *
cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb)
{
return tlink_tcon(cifs_sb_master_tlink(cifs_sb));
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index dd5f22918c33..ab74179681c9 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -55,7 +55,7 @@ build_path_from_dentry(struct dentry *direntry)
char *full_path;
char dirsep;
struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
- struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
+ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
if (direntry == NULL)
return NULL; /* not much we can do if dentry is freed and
@@ -152,7 +152,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
__u16 fileHandle;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
char *full_path = NULL;
FILE_ALL_INFO *buf = NULL;
struct inode *newinode = NULL;
@@ -356,7 +356,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
int xid;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
char *full_path = NULL;
struct inode *newinode = NULL;
int oplock = 0;
@@ -486,7 +486,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
bool posix_open = false;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
struct cifsFileInfo *cfile;
struct inode *newInode = NULL;
char *full_path = NULL;
diff --git a/fs/cifs/export.c b/fs/cifs/export.c
index 993f82045bf6..55d87ac52000 100644
--- a/fs/cifs/export.c
+++ b/fs/cifs/export.c
@@ -45,7 +45,7 @@
#include "cifs_debug.h"
#include "cifsfs.h"
-#ifdef CONFIG_CIFS_EXPERIMENTAL
+#ifdef CIFS_NFSD_EXPORT
static struct dentry *cifs_get_parent(struct dentry *dentry)
{
/* BB need to add code here eventually to enable export via NFSD */
@@ -63,5 +63,5 @@ const struct export_operations cifs_export_ops = {
.encode_fs = */
};
-#endif /* EXPERIMENTAL */
+#endif /* CIFS_NFSD_EXPORT */
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index e964b1cd5dd0..9293ff2e7590 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -114,7 +114,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct cifs_fattr fattr;
struct tcon_link *tlink;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
cFYI(1, "posix open %s", full_path);
@@ -168,7 +168,7 @@ posix_open_ret:
static int
cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
- struct cifsTconInfo *tcon, unsigned int f_flags, __u32 *poplock,
+ struct cifs_tcon *tcon, unsigned int f_flags, __u32 *poplock,
__u16 *pnetfid, int xid)
{
int rc;
@@ -285,7 +285,7 @@ cifs_new_fileinfo(__u16 fileHandle, struct file *file,
void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
{
struct inode *inode = cifs_file->dentry->d_inode;
- struct cifsTconInfo *tcon = tlink_tcon(cifs_file->tlink);
+ struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
struct cifsInodeInfo *cifsi = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsLockInfo *li, *tmp;
@@ -343,7 +343,7 @@ int cifs_open(struct inode *inode, struct file *file)
int xid;
__u32 oplock;
struct cifs_sb_info *cifs_sb;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
struct tcon_link *tlink;
struct cifsFileInfo *pCifsFile = NULL;
char *full_path = NULL;
@@ -457,7 +457,7 @@ static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
int xid;
__u32 oplock;
struct cifs_sb_info *cifs_sb;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
struct cifsInodeInfo *pCifsInode;
struct inode *inode;
char *full_path = NULL;
@@ -594,7 +594,7 @@ int cifs_closedir(struct inode *inode, struct file *file)
xid = GetXid();
if (pCFileStruct) {
- struct cifsTconInfo *pTcon = tlink_tcon(pCFileStruct->tlink);
+ struct cifs_tcon *pTcon = tlink_tcon(pCFileStruct->tlink);
cFYI(1, "Freeing private data in close dir");
spin_lock(&cifs_file_list_lock);
@@ -651,7 +651,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
__u64 length;
bool wait_flag = false;
struct cifs_sb_info *cifs_sb;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
__u16 netfid;
__u8 lockType = LOCKING_ANDX_LARGE_FILES;
bool posix_locking = 0;
@@ -863,7 +863,7 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
unsigned int bytes_written = 0;
unsigned int total_written;
struct cifs_sb_info *cifs_sb;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
int xid;
struct cifsFileInfo *open_file;
struct cifsInodeInfo *cifsi = CIFS_I(inode);
@@ -952,7 +952,7 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
unsigned int bytes_written = 0;
unsigned int total_written;
struct cifs_sb_info *cifs_sb;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
int xid;
struct dentry *dentry = open_file->dentry;
struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
@@ -979,8 +979,8 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
if (rc != 0)
break;
}
- if (experimEnabled || (pTcon->ses->server &&
- ((pTcon->ses->server->secMode &
+ if (sign_zero_copy || (pTcon->ses->server &&
+ ((pTcon->ses->server->sec_mode &
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
== 0))) {
struct kvec iov[2];
@@ -1207,7 +1207,7 @@ static int cifs_writepages(struct address_space *mapping,
int nr_pages;
__u64 offset = 0;
struct cifsFileInfo *open_file;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
struct cifsInodeInfo *cifsi = CIFS_I(mapping->host);
struct page *page;
struct pagevec pvec;
@@ -1240,7 +1240,7 @@ static int cifs_writepages(struct address_space *mapping,
}
tcon = tlink_tcon(open_file->tlink);
- if (!experimEnabled && tcon->ses->server->secMode &
+ if (!sign_zero_copy && tcon->ses->server->sec_mode &
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
cifsFileInfo_put(open_file);
kfree(iov);
@@ -1527,7 +1527,7 @@ int cifs_strict_fsync(struct file *file, int datasync)
{
int xid;
int rc = 0;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
struct cifsFileInfo *smbfile = file->private_data;
struct inode *inode = file->f_path.dentry->d_inode;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
@@ -1552,7 +1552,7 @@ int cifs_fsync(struct file *file, int datasync)
{
int xid;
int rc = 0;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
struct cifsFileInfo *smbfile = file->private_data;
struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
@@ -1671,7 +1671,7 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
struct iov_iter it;
struct inode *inode;
struct cifsFileInfo *open_file;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
struct cifs_sb_info *cifs_sb;
int xid, rc;
@@ -1826,7 +1826,7 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
size_t len, cur_len;
int iov_offset = 0;
struct cifs_sb_info *cifs_sb;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
struct cifsFileInfo *open_file;
struct smb_com_read_rsp *pSMBr;
char *read_data;
@@ -1947,7 +1947,7 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
unsigned int total_read;
unsigned int current_read_size;
struct cifs_sb_info *cifs_sb;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
int xid;
char *current_offset;
struct cifsFileInfo *open_file;
@@ -2096,7 +2096,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
loff_t offset;
struct page *page;
struct cifs_sb_info *cifs_sb;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
unsigned int bytes_read = 0;
unsigned int read_size, i;
char *smb_read_data = NULL;
diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c
index 297a43d0ff7f..d368a47ba5eb 100644
--- a/fs/cifs/fscache.c
+++ b/fs/cifs/fscache.c
@@ -40,7 +40,7 @@ void cifs_fscache_release_client_cookie(struct TCP_Server_Info *server)
server->fscache = NULL;
}
-void cifs_fscache_get_super_cookie(struct cifsTconInfo *tcon)
+void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
{
struct TCP_Server_Info *server = tcon->ses->server;
@@ -51,7 +51,7 @@ void cifs_fscache_get_super_cookie(struct cifsTconInfo *tcon)
server->fscache, tcon->fscache);
}
-void cifs_fscache_release_super_cookie(struct cifsTconInfo *tcon)
+void cifs_fscache_release_super_cookie(struct cifs_tcon *tcon)
{
cFYI(1, "CIFS: releasing superblock cookie (0x%p)", tcon->fscache);
fscache_relinquish_cookie(tcon->fscache, 0);
@@ -62,7 +62,7 @@ static void cifs_fscache_enable_inode_cookie(struct inode *inode)
{
struct cifsInodeInfo *cifsi = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
- struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
+ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
if (cifsi->fscache)
return;
diff --git a/fs/cifs/fscache.h b/fs/cifs/fscache.h
index 31b88ec2341e..63539323e0b9 100644
--- a/fs/cifs/fscache.h
+++ b/fs/cifs/fscache.h
@@ -40,8 +40,8 @@ extern void cifs_fscache_unregister(void);
*/
extern void cifs_fscache_get_client_cookie(struct TCP_Server_Info *);
extern void cifs_fscache_release_client_cookie(struct TCP_Server_Info *);
-extern void cifs_fscache_get_super_cookie(struct cifsTconInfo *);
-extern void cifs_fscache_release_super_cookie(struct cifsTconInfo *);
+extern void cifs_fscache_get_super_cookie(struct cifs_tcon *);
+extern void cifs_fscache_release_super_cookie(struct cifs_tcon *);
extern void cifs_fscache_release_inode_cookie(struct inode *);
extern void cifs_fscache_set_inode_cookie(struct inode *, struct file *);
@@ -99,9 +99,9 @@ static inline void
cifs_fscache_get_client_cookie(struct TCP_Server_Info *server) {}
static inline void
cifs_fscache_release_client_cookie(struct TCP_Server_Info *server) {}
-static inline void cifs_fscache_get_super_cookie(struct cifsTconInfo *tcon) {}
+static inline void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon) {}
static inline void
-cifs_fscache_release_super_cookie(struct cifsTconInfo *tcon) {}
+cifs_fscache_release_super_cookie(struct cifs_tcon *tcon) {}
static inline void cifs_fscache_release_inode_cookie(struct inode *inode) {}
static inline void cifs_fscache_set_inode_cookie(struct inode *inode,
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 8852470b4fbb..41e5651b1701 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -295,7 +295,7 @@ int cifs_get_file_info_unix(struct file *filp)
struct inode *inode = filp->f_path.dentry->d_inode;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsFileInfo *cfile = filp->private_data;
- struct cifsTconInfo *tcon = tlink_tcon(cfile->tlink);
+ struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
xid = GetXid();
rc = CIFSSMBUnixQFileInfo(xid, tcon, cfile->netfid, &find_data);
@@ -318,7 +318,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
int rc;
FILE_UNIX_BASIC_INFO find_data;
struct cifs_fattr fattr;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
struct tcon_link *tlink;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
@@ -373,7 +373,7 @@ cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path,
int oplock = 0;
__u16 netfid;
struct tcon_link *tlink;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
char buf[24];
unsigned int bytes_read;
char *pbuf;
@@ -468,7 +468,7 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
char ea_value[4];
__u32 mode;
struct tcon_link *tlink;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
@@ -502,7 +502,7 @@ static void
cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
struct cifs_sb_info *cifs_sb, bool adjust_tz)
{
- struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
+ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
memset(fattr, 0, sizeof(*fattr));
fattr->cf_cifsattrs = le32_to_cpu(info->Attributes);
@@ -553,7 +553,7 @@ int cifs_get_file_info(struct file *filp)
struct inode *inode = filp->f_path.dentry->d_inode;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsFileInfo *cfile = filp->private_data;
- struct cifsTconInfo *tcon = tlink_tcon(cfile->tlink);
+ struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
xid = GetXid();
rc = CIFSSMBQFileInfo(xid, tcon, cfile->netfid, &find_data);
@@ -590,7 +590,7 @@ int cifs_get_inode_info(struct inode **pinode,
struct super_block *sb, int xid, const __u16 *pfid)
{
int rc = 0, tmprc;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
struct tcon_link *tlink;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
char *buf = NULL;
@@ -736,7 +736,7 @@ static const struct inode_operations cifs_ipc_inode_ops = {
};
char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb,
- struct cifsTconInfo *tcon)
+ struct cifs_tcon *tcon)
{
int pplen = cifs_sb->prepathlen;
int dfsplen;
@@ -878,14 +878,14 @@ retry_iget5_locked:
}
/* gets root inode */
-struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino)
+struct inode *cifs_root_iget(struct super_block *sb)
{
int xid;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct inode *inode = NULL;
long rc;
char *full_path;
- struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
+ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
full_path = cifs_build_path_to_root(cifs_sb, tcon);
if (full_path == NULL)
@@ -943,7 +943,7 @@ cifs_set_file_info(struct inode *inode, struct iattr *attrs, int xid,
struct cifsInodeInfo *cifsInode = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink = NULL;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
FILE_BASIC_INFO info_buf;
if (attrs == NULL)
@@ -1061,7 +1061,7 @@ cifs_rename_pending_delete(char *full_path, struct dentry *dentry, int xid)
struct cifsInodeInfo *cifsInode = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
__u32 dosattr, origattr;
FILE_BASIC_INFO *info_buf = NULL;
@@ -1179,7 +1179,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
struct super_block *sb = dir->i_sb;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct tcon_link *tlink;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
struct iattr *attrs = NULL;
__u32 dosattr = 0, origattr = 0;
@@ -1277,7 +1277,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
int xid;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
char *full_path = NULL;
struct inode *newinode = NULL;
struct cifs_fattr fattr;
@@ -1455,7 +1455,7 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
int xid;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
char *full_path = NULL;
struct cifsInodeInfo *cifsInode;
@@ -1512,7 +1512,7 @@ cifs_do_rename(int xid, struct dentry *from_dentry, const char *fromPath,
{
struct cifs_sb_info *cifs_sb = CIFS_SB(from_dentry->d_sb);
struct tcon_link *tlink;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
__u16 srcfid;
int oplock, rc;
@@ -1564,7 +1564,7 @@ int cifs_rename(struct inode *source_dir, struct dentry *source_dentry,
char *toName = NULL;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
FILE_UNIX_BASIC_INFO *info_buf_source = NULL;
FILE_UNIX_BASIC_INFO *info_buf_target;
int xid, rc, tmprc;
@@ -1769,7 +1769,7 @@ int cifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(dentry->d_sb);
- struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
+ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
int err = cifs_revalidate_dentry(dentry);
if (!err) {
@@ -1831,7 +1831,7 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
struct cifsInodeInfo *cifsInode = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink = NULL;
- struct cifsTconInfo *pTcon = NULL;
+ struct cifs_tcon *pTcon = NULL;
/*
* To avoid spurious oplock breaks from server, in the case of
@@ -1920,7 +1920,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
struct cifsInodeInfo *cifsInode = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
struct cifs_unix_set_info_args *args = NULL;
struct cifsFileInfo *open_file;
@@ -2206,7 +2206,7 @@ cifs_setattr(struct dentry *direntry, struct iattr *attrs)
{
struct inode *inode = direntry->d_inode;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
- struct cifsTconInfo *pTcon = cifs_sb_master_tcon(cifs_sb);
+ struct cifs_tcon *pTcon = cifs_sb_master_tcon(cifs_sb);
if (pTcon->unix_ext)
return cifs_setattr_unix(direntry, attrs);
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 0c98672d0122..4221b5e48a42 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -38,7 +38,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
struct cifs_sb_info *cifs_sb;
#ifdef CONFIG_CIFS_POSIX
struct cifsFileInfo *pSMBFile = filep->private_data;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
__u64 ExtAttrBits = 0;
__u64 ExtAttrMask = 0;
__u64 caps;
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index e8804d373404..2904baa1ecb2 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -175,7 +175,7 @@ CIFSFormatMFSymlink(u8 *buf, unsigned int buf_len, const char *link_str)
}
static int
-CIFSCreateMFSymLink(const int xid, struct cifsTconInfo *tcon,
+CIFSCreateMFSymLink(const int xid, struct cifs_tcon *tcon,
const char *fromName, const char *toName,
const struct nls_table *nls_codepage, int remap)
{
@@ -219,7 +219,7 @@ CIFSCreateMFSymLink(const int xid, struct cifsTconInfo *tcon,
}
static int
-CIFSQueryMFSymLink(const int xid, struct cifsTconInfo *tcon,
+CIFSQueryMFSymLink(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName, char **symlinkinfo,
const struct nls_table *nls_codepage, int remap)
{
@@ -291,7 +291,7 @@ CIFSCheckMFSymlink(struct cifs_fattr *fattr,
int oplock = 0;
__u16 netfid = 0;
struct tcon_link *tlink;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
u8 *buf;
char *pbuf;
unsigned int bytes_read = 0;
@@ -370,7 +370,7 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode,
char *toName = NULL;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
struct cifsInodeInfo *cifsInode;
tlink = cifs_sb_tlink(cifs_sb);
@@ -445,7 +445,7 @@ cifs_follow_link(struct dentry *direntry, struct nameidata *nd)
char *target_path = NULL;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink = NULL;
- struct cifsTconInfo *tcon;
+ struct cifs_tcon *tcon;
xid = GetXid();
@@ -518,7 +518,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
int xid;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
char *full_path = NULL;
struct inode *newinode = NULL;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 2a930a752a78..718268fde042 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -67,12 +67,12 @@ _FreeXid(unsigned int xid)
spin_unlock(&GlobalMid_Lock);
}
-struct cifsSesInfo *
+struct cifs_ses *
sesInfoAlloc(void)
{
- struct cifsSesInfo *ret_buf;
+ struct cifs_ses *ret_buf;
- ret_buf = kzalloc(sizeof(struct cifsSesInfo), GFP_KERNEL);
+ ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
if (ret_buf) {
atomic_inc(&sesInfoAllocCount);
ret_buf->status = CifsNew;
@@ -85,7 +85,7 @@ sesInfoAlloc(void)
}
void
-sesInfoFree(struct cifsSesInfo *buf_to_free)
+sesInfoFree(struct cifs_ses *buf_to_free)
{
if (buf_to_free == NULL) {
cFYI(1, "Null buffer passed to sesInfoFree");
@@ -100,15 +100,16 @@ sesInfoFree(struct cifsSesInfo *buf_to_free)
memset(buf_to_free->password, 0, strlen(buf_to_free->password));
kfree(buf_to_free->password);
}
+ kfree(buf_to_free->user_name);
kfree(buf_to_free->domainName);
kfree(buf_to_free);
}
-struct cifsTconInfo *
+struct cifs_tcon *
tconInfoAlloc(void)
{
- struct cifsTconInfo *ret_buf;
- ret_buf = kzalloc(sizeof(struct cifsTconInfo), GFP_KERNEL);
+ struct cifs_tcon *ret_buf;
+ ret_buf = kzalloc(sizeof(struct cifs_tcon), GFP_KERNEL);
if (ret_buf) {
atomic_inc(&tconInfoAllocCount);
ret_buf->tidStatus = CifsNew;
@@ -123,7 +124,7 @@ tconInfoAlloc(void)
}
void
-tconInfoFree(struct cifsTconInfo *buf_to_free)
+tconInfoFree(struct cifs_tcon *buf_to_free)
{
if (buf_to_free == NULL) {
cFYI(1, "Null buffer passed to tconInfoFree");
@@ -294,11 +295,11 @@ __u16 GetNextMid(struct TCP_Server_Info *server)
case it is responsbility of caller to set the mid */
void
header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
- const struct cifsTconInfo *treeCon, int word_count
+ const struct cifs_tcon *treeCon, int word_count
/* length of fixed section (word count) in two byte units */)
{
struct list_head *temp_item;
- struct cifsSesInfo *ses;
+ struct cifs_ses *ses;
char *temp = (char *) buffer;
memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
@@ -360,7 +361,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
"did not match tcon uid");
spin_lock(&cifs_tcp_ses_lock);
list_for_each(temp_item, &treeCon->ses->server->smb_ses_list) {
- ses = list_entry(temp_item, struct cifsSesInfo, smb_ses_list);
+ ses = list_entry(temp_item, struct cifs_ses, smb_ses_list);
if (ses->linux_uid == current_fsuid()) {
if (ses->server == treeCon->ses->server) {
cFYI(1, "found matching uid substitute right smb_uid");
@@ -381,7 +382,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
if (treeCon->nocase)
buffer->Flags |= SMBFLG_CASELESS;
if ((treeCon->ses) && (treeCon->ses->server))
- if (treeCon->ses->server->secMode &
+ if (treeCon->ses->server->sec_mode &
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
}
@@ -508,8 +509,8 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
{
struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
struct list_head *tmp, *tmp1, *tmp2;
- struct cifsSesInfo *ses;
- struct cifsTconInfo *tcon;
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
struct cifsInodeInfo *pCifsInode;
struct cifsFileInfo *netfile;
@@ -567,13 +568,13 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
/* look up tcon based on tid & uid */
spin_lock(&cifs_tcp_ses_lock);
list_for_each(tmp, &srv->smb_ses_list) {
- ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list);
+ ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
list_for_each(tmp1, &ses->tcon_list) {
- tcon = list_entry(tmp1, struct cifsTconInfo, tcon_list);
+ tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
if (tcon->tid != buf->Tid)
continue;
- cifs_stats_inc(&tcon->num_oplock_brks);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
spin_lock(&cifs_file_list_lock);
list_for_each(tmp2, &tcon->openFileList) {
netfile = list_entry(tmp2, struct cifsFileInfo,
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index 8d9189f64477..79f641eeda30 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -170,7 +170,7 @@ cifs_convert_address(struct sockaddr *dst, const char *src, int len)
{
int rc, alen, slen;
const char *pct;
- char *endp, scope_id[13];
+ char scope_id[13];
struct sockaddr_in *s4 = (struct sockaddr_in *) dst;
struct sockaddr_in6 *s6 = (struct sockaddr_in6 *) dst;
@@ -197,9 +197,9 @@ cifs_convert_address(struct sockaddr *dst, const char *src, int len)
memcpy(scope_id, pct + 1, slen);
scope_id[slen] = '\0';
- s6->sin6_scope_id = (u32) simple_strtoul(pct, &endp, 0);
- if (endp != scope_id + slen)
- return 0;
+ rc = strict_strtoul(scope_id, 0,
+ (unsigned long *)&s6->sin6_scope_id);
+ rc = (rc == 0) ? 1 : 0;
}
return rc;
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index f8e4cd2a7912..6751e745bbc6 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -195,7 +195,7 @@ int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb,
int len;
int oplock = 0;
int rc;
- struct cifsTconInfo *ptcon = cifs_sb_tcon(cifs_sb);
+ struct cifs_tcon *ptcon = cifs_sb_tcon(cifs_sb);
char *tmpbuffer;
rc = CIFSSMBOpen(xid, ptcon, full_path, FILE_OPEN, GENERIC_READ,
@@ -223,7 +223,7 @@ static int initiate_cifs_search(const int xid, struct file *file)
struct cifsFileInfo *cifsFile;
struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
struct tcon_link *tlink = NULL;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
if (file->private_data == NULL) {
tlink = cifs_sb_tlink(cifs_sb);
@@ -496,7 +496,7 @@ static int cifs_save_resume_key(const char *current_entry,
assume that they are located in the findfirst return buffer.*/
/* We start counting in the buffer with entry 2 and increment for every
entry (do not increment for . or .. entry) */
-static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
+static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
struct file *file, char **ppCurrentEntry, int *num_to_ret)
{
int rc = 0;
@@ -764,7 +764,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
{
int rc = 0;
int xid, i;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
struct cifsFileInfo *cifsFile = NULL;
char *current_entry;
int num_to_fill = 0;
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 1adc9625a344..4270b0236ee7 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -37,13 +37,13 @@
* the socket has been reestablished (so we know whether to use vc 0).
* Called while holding the cifs_tcp_ses_lock, so do not block
*/
-static bool is_first_ses_reconnect(struct cifsSesInfo *ses)
+static bool is_first_ses_reconnect(struct cifs_ses *ses)
{
struct list_head *tmp;
- struct cifsSesInfo *tmp_ses;
+ struct cifs_ses *tmp_ses;
list_for_each(tmp, &ses->server->smb_ses_list) {
- tmp_ses = list_entry(tmp, struct cifsSesInfo,
+ tmp_ses = list_entry(tmp, struct cifs_ses,
smb_ses_list);
if (tmp_ses->need_reconnect == false)
return false;
@@ -61,11 +61,11 @@ static bool is_first_ses_reconnect(struct cifsSesInfo *ses)
* any vc but zero (some servers reset the connection on vcnum zero)
*
*/
-static __le16 get_next_vcnum(struct cifsSesInfo *ses)
+static __le16 get_next_vcnum(struct cifs_ses *ses)
{
__u16 vcnum = 0;
struct list_head *tmp;
- struct cifsSesInfo *tmp_ses;
+ struct cifs_ses *tmp_ses;
__u16 max_vcs = ses->server->max_vcs;
__u16 i;
int free_vc_found = 0;
@@ -87,7 +87,7 @@ static __le16 get_next_vcnum(struct cifsSesInfo *ses)
free_vc_found = 1;
list_for_each(tmp, &ses->server->smb_ses_list) {
- tmp_ses = list_entry(tmp, struct cifsSesInfo,
+ tmp_ses = list_entry(tmp, struct cifs_ses,
smb_ses_list);
if (tmp_ses->vcnum == i) {
free_vc_found = 0;
@@ -114,7 +114,7 @@ get_vc_num_exit:
return cpu_to_le16(vcnum);
}
-static __u32 cifs_ssetup_hdr(struct cifsSesInfo *ses, SESSION_SETUP_ANDX *pSMB)
+static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB)
{
__u32 capabilities = 0;
@@ -136,7 +136,7 @@ static __u32 cifs_ssetup_hdr(struct cifsSesInfo *ses, SESSION_SETUP_ANDX *pSMB)
capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
CAP_LARGE_WRITE_X | CAP_LARGE_READ_X;
- if (ses->server->secMode &
+ if (ses->server->sec_mode &
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
pSMB->req.hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
@@ -181,7 +181,7 @@ unicode_oslm_strings(char **pbcc_area, const struct nls_table *nls_cp)
*pbcc_area = bcc_ptr;
}
-static void unicode_domain_string(char **pbcc_area, struct cifsSesInfo *ses,
+static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses,
const struct nls_table *nls_cp)
{
char *bcc_ptr = *pbcc_area;
@@ -204,7 +204,7 @@ static void unicode_domain_string(char **pbcc_area, struct cifsSesInfo *ses,
}
-static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
+static void unicode_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
const struct nls_table *nls_cp)
{
char *bcc_ptr = *pbcc_area;
@@ -219,12 +219,12 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
bcc_ptr++;
} */
/* copy user */
- if (ses->userName == NULL) {
+ if (ses->user_name == NULL) {
/* null user mount */
*bcc_ptr = 0;
*(bcc_ptr+1) = 0;
} else {
- bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->userName,
+ bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->user_name,
MAX_USERNAME_SIZE, nls_cp);
}
bcc_ptr += 2 * bytes_ret;
@@ -236,7 +236,7 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
*pbcc_area = bcc_ptr;
}
-static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
+static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
const struct nls_table *nls_cp)
{
char *bcc_ptr = *pbcc_area;
@@ -244,12 +244,11 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
/* copy user */
/* BB what about null user mounts - check that we do this BB */
/* copy user */
- if (ses->userName == NULL) {
- /* BB what about null user mounts - check that we do this BB */
- } else {
- strncpy(bcc_ptr, ses->userName, MAX_USERNAME_SIZE);
- }
- bcc_ptr += strnlen(ses->userName, MAX_USERNAME_SIZE);
+ if (ses->user_name != NULL)
+ strncpy(bcc_ptr, ses->user_name, MAX_USERNAME_SIZE);
+ /* else null user mount */
+
+ bcc_ptr += strnlen(ses->user_name, MAX_USERNAME_SIZE);
*bcc_ptr = 0;
bcc_ptr++; /* account for null termination */
@@ -277,7 +276,7 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
}
static void
-decode_unicode_ssetup(char **pbcc_area, __u16 bleft, struct cifsSesInfo *ses,
+decode_unicode_ssetup(char **pbcc_area, __u16 bleft, struct cifs_ses *ses,
const struct nls_table *nls_cp)
{
int len;
@@ -324,7 +323,7 @@ decode_unicode_ssetup(char **pbcc_area, __u16 bleft, struct cifsSesInfo *ses,
}
static int decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
- struct cifsSesInfo *ses,
+ struct cifs_ses *ses,
const struct nls_table *nls_cp)
{
int rc = 0;
@@ -378,7 +377,7 @@ static int decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
}
static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
- struct cifsSesInfo *ses)
+ struct cifs_ses *ses)
{
unsigned int tioffset; /* challenge message target info area */
unsigned int tilen; /* challenge message target info area length */
@@ -425,7 +424,7 @@ static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
/* We do not malloc the blob, it is passed in pbuffer, because
it is fixed size, and small, making this approach cleaner */
static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
- struct cifsSesInfo *ses)
+ struct cifs_ses *ses)
{
NEGOTIATE_MESSAGE *sec_blob = (NEGOTIATE_MESSAGE *)pbuffer;
__u32 flags;
@@ -438,7 +437,7 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET |
NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC;
- if (ses->server->secMode &
+ if (ses->server->sec_mode &
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
flags |= NTLMSSP_NEGOTIATE_SIGN;
if (!ses->server->session_estab)
@@ -463,7 +462,7 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
This function returns the length of the data in the blob */
static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
u16 *buflen,
- struct cifsSesInfo *ses,
+ struct cifs_ses *ses,
const struct nls_table *nls_cp)
{
int rc;
@@ -478,10 +477,10 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO |
NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC;
- if (ses->server->secMode &
+ if (ses->server->sec_mode &
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
flags |= NTLMSSP_NEGOTIATE_SIGN;
- if (ses->server->secMode & SECMODE_SIGN_REQUIRED)
+ if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED)
flags |= NTLMSSP_NEGOTIATE_ALWAYS_SIGN;
tmp = pbuffer + sizeof(AUTHENTICATE_MESSAGE);
@@ -523,14 +522,14 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
tmp += len;
}
- if (ses->userName == NULL) {
+ if (ses->user_name == NULL) {
sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
sec_blob->UserName.Length = 0;
sec_blob->UserName.MaximumLength = 0;
tmp += 2;
} else {
int len;
- len = cifs_strtoUCS((__le16 *)tmp, ses->userName,
+ len = cifs_strtoUCS((__le16 *)tmp, ses->user_name,
MAX_USERNAME_SIZE, nls_cp);
len *= 2; /* unicode is 2 bytes each */
sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
@@ -565,7 +564,7 @@ setup_ntlmv2_ret:
}
int
-CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses,
+CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses,
const struct nls_table *nls_cp)
{
int rc = 0;
@@ -656,13 +655,13 @@ ssetup_ntlmssp_authenticate:
if (type == LANMAN) {
#ifdef CONFIG_CIFS_WEAK_PW_HASH
- char lnm_session_key[CIFS_SESS_KEY_SIZE];
+ char lnm_session_key[CIFS_AUTH_RESP_SIZE];
pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE;
/* no capabilities flags in old lanman negotiation */
- pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_SESS_KEY_SIZE);
+ pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
/* Calculate hash with password and copy into bcc_ptr.
* Encryption Key (stored as in cryptkey) gets used if the
@@ -671,12 +670,12 @@ ssetup_ntlmssp_authenticate:
*/
calc_lanman_hash(ses->password, ses->server->cryptkey,
- ses->server->secMode & SECMODE_PW_ENCRYPT ?
+ ses->server->sec_mode & SECMODE_PW_ENCRYPT ?
true : false, lnm_session_key);
ses->flags |= CIFS_SES_LANMAN;
- memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_SESS_KEY_SIZE);
- bcc_ptr += CIFS_SESS_KEY_SIZE;
+ memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE);
+ bcc_ptr += CIFS_AUTH_RESP_SIZE;
/* can not sign if LANMAN negotiated so no need
to calculate signing key? but what if server
diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
new file mode 100644
index 000000000000..28f9025443c5
--- /dev/null
+++ b/fs/cifs/smb2glob.h
@@ -0,0 +1,258 @@
+/*
+ * fs/smb2/smb2glob.h
+ *
+ * Definitions for various global variables and structures
+ *
+ * Copyright (C) International Business Machines Corp., 2002,2011
+ * Author(s): Steve French (sfrench@us.ibm.com)
+ * Jeremy Allison (jra@samba.org)
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU Lesser General Public License for more details.
+ *
+ */
+#ifndef _SMB2_GLOB_H
+#define _SMB2_GLOB_H
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/workqueue.h>
+#include "smb2pdu.h"
+
+/*
+ * The sizes of various internal tables and strings
+ */
+
+#define SMB2_MIN_RCV_POOL 4
+
+
+/*
+ *****************************************************************
+ * Except the SMB2 PDUs themselves all the
+ * globally interesting structs should go here
+ *****************************************************************
+ */
+
+#define SMB2_OBSERVE_CHECK_TIME 20
+#define SMB2_OBSERVE_DELETE_TIME 45
+
+/* Values for tcon->speed set by echo command */
+#define SMB2_ECHO_FAST 1 /* Less than 3 jiffies, 12ms */
+#define SMB2_ECHO_OK 2 /* Less than 15 jiffies, 60ms */
+#define SMB2_ECHO_SLOW 3 /* Less than 125 jiffies, 1/2 sec */
+#define SMB2_ECHO_VERY_SLOW 4 /* Less than 1000 jiffies, 4 seconds */
+#define SMB2_ECHO_TIMEOUT 5 /* Else Too long, server is sick ... */
+
+/*
+ * This info hangs off the smb2_file structure, pointed to by llist.
+ * This is used to track byte stream locks on the file
+ */
+struct smb2_lock {
+ struct list_head llist; /* pointer to next smb2_lock */
+ __u64 offset;
+ __u64 length;
+ __u32 flags;
+};
+
+/*
+ * One of these for each open instance of a file
+ */
+struct smb2_search {
+ loff_t index_of_last_entry;
+ __u16 entries_in_buf;
+ __u32 resume_key;
+ char *ntwrk_buf_start;
+ char *srch_entries_start;
+ char *last_entry;
+ char *presume_name;
+ unsigned int resume_name_len;
+ bool search_end:1;
+ bool empty_dir:1;
+ bool small_buf:1; /* so we know which buf_release function to call */
+};
+
+struct smb2_file {
+ struct list_head tlist; /* pointer to next fid owned by tcon */
+ struct list_head flist; /* next fid (file instance) for this inode */
+ unsigned int uid; /* allows finding which FileInfo structure */
+ __u32 pid; /* process id who opened file */
+ u64 persist_fid;
+ u64 volatile_fid;
+ /* BB add lock scope info here if needed */ ;
+ /* lock scope id (0 if none) */
+ struct dentry *dentry; /* needed for writepage and oplock break */
+ struct mutex lock_mutex;
+ struct list_head llist; /* list of byte range locks we have. */
+ unsigned int f_flags;
+ bool close_pend:1; /* file is marked to close */
+ bool invalid_handle:1; /* file closed via session abend */
+ bool message_mode:1; /* for pipes: message vs byte mode */
+ bool is_dir:1; /* Open directory rather than file */
+ bool oplock_break_cancelled:1;
+ atomic_t count; /* reference count */
+ atomic_t wrt_pending; /* handle in use - defer close */
+ struct work_struct oplock_break; /* work for oplock breaks */
+ struct mutex fh_mutex; /* prevents reopen race after dead ses*/
+ struct smb2_search srch_inf;
+};
+
+/*
+ * One of these for each file inode
+ */
+
+struct smb2_inode {
+ struct list_head lock_list;
+ /* BB add in lists for dirty pages i.e. write caching info for oplock */
+ struct list_head open_file_list;
+ int write_behind_rc;
+ __u32 dos_attrs; /* e.g. DOS archive bit, sparse, compressed, system */
+ __u32 ea_size;
+ unsigned long time; /* jiffies of last update/check of inode */
+ u64 server_eof; /* current file size on server */
+ bool can_cache_read:1; /* read oplock */
+ bool can_cache_all:1; /* read and writebehind oplock */
+ bool can_defer_close:1; /* batch oplock: as above but can defer close */
+ bool oplock_pending:1;
+ bool delete_pending:1; /* DELETE_ON_CLOSE is set */
+ u64 uniqueid; /* server inode number */
+ struct fscache_cookie *fscache;
+ struct inode vfs_inode;
+};
+
+static inline struct smb2_inode *
+SMB2_I(struct inode *inode)
+{
+ return container_of(inode, struct smb2_inode, vfs_inode);
+}
+
+
+static inline void smb2_file_get(struct smb2_file *smb2_file)
+{
+ atomic_inc(&smb2_file->count);
+}
+
+/* Release a reference on the file private data */
+/* BB see file.c cifsFileInfo_put MUSTFIX BB */
+static inline void smb2_file_put(struct smb2_file *smb2_file)
+{
+ if (atomic_dec_and_test(&smb2_file->count)) {
+ iput(smb2_file->dentry->d_inode);
+ kfree(smb2_file);
+ }
+}
+
+/* Represents a read request in page sized blocks.
+ * Used by smb2_readpages().*/
+struct page_req {
+ struct list_head req_node; /* list of page_req's */
+ struct list_head *page_list_index; /* list of pages */
+ struct address_space *mapping;
+ int num_pages;
+ int read_size;
+ int start_page_index; /* first expected index */
+ int end_page_index; /* last expected index */
+ struct mid_q_entry *midq; /* queue structure for demultiplex */
+};
+
+/* one of these for every pending SMB2 request to the server */
+struct smb2_mid_entry {
+ struct list_head qhead; /* mids waiting on reply from this server */
+ __u64 mid; /* multiplex id(s) */
+ __u16 pid; /* process id */
+ __u32 sequence_number; /* for signing */ /* BB check if needed */
+ unsigned long when_alloc; /* when mid was created */
+#ifdef CONFIG_CIFS_STATS2
+ unsigned long when_sent; /* time when smb send finished */
+ unsigned long when_received; /* when demux complete (taken off wire) */
+#endif
+ struct task_struct *tsk; /* task waiting for response */
+ struct smb2_hdr *resp_buf; /* response buffer */
+ char **pagebuf_list; /* response buffer */
+ int num_pages;
+ int mid_state; /* wish this were enum but can not pass to wait_event */
+ __le16 command; /* smb command code */
+ bool async_resp_rcvd:1; /* if server has responded with interim resp */
+ bool large_buf:1; /* if valid response, is pointer to large buf */
+ bool is_kmap_buf:1;
+/* bool multi_rsp:1; BB do we have to account for something in SMB2 like
+ we saw multiple trans2 responses for one request (possible in CIFS) */
+ /* Async things */
+ __u64 *mid_list; /* multiplex id(s) */
+ int *mid_state_list;
+ short int *large_buf_list;
+ unsigned int num_mid;
+ unsigned int act_num_mid;
+ unsigned int num_received;
+ unsigned int cur_id;
+ struct smb2_hdr **resp_buf_list; /* response buffer */
+ __le16 *command_list;
+ bool async:1;
+ bool complex_mid:1; /* complex entry - consists of several messages */
+ int result;
+ unsigned long last_rsp_time;
+ int (*callback)(struct smb2_mid_entry * , void *);
+ void *callback_data;
+};
+
+
+#define SMB2SEC_DEF (SMB2SEC_MAY_SIGN | SMB2SEC_MAY_NTLM | SMB2SEC_MAY_NTLMV2)
+#define SMB2SEC_MAX (SMB2SEC_MUST_SIGN | SMB2SEC_MUST_NTLMV2)
+#define SMB2SEC_AUTH_MASK (SMB2SEC_MAY_NTLM | SMB2SEC_MAY_NTLMV2 | \
+ SMB2SEC_MAY_PLNTXT | SMB2SEC_MAY_KRB5)
+/*
+ *****************************************************************
+ * Constants go here
+ *****************************************************************
+ */
+
+
+/* BB since num credits could be negotiated higher, override with max_pending */
+#define SMB2_MAX_REQ 256
+
+/* Identifiers for functions that use the open, operation, close pattern
+ * in inode.c:open_op_close() */
+#define SMB2_OP_SET_DELETE 1
+#define SMB2_OP_SET_INFO 2
+#define SMB2_OP_QUERY_INFO 3
+#define SMB2_OP_QUERY_DIR 4
+#define SMB2_OP_MKDIR 5
+#define SMB2_OP_RENAME 6
+
+/* Used when constructing chained read requests. */
+#define CHAINED_REQUEST 1
+#define START_OF_CHAIN 2
+#define END_OF_CHAIN 4
+#define RELATED_REQUEST 8
+
+/*
+ * The externs for various global variables put here. The actual declarations
+ * of these should be mostly placed at the front of smb2fs.c to be easy to spot
+ */
+
+
+/*
+ * Global counters, updated atomically
+ */
+
+/* Various Debug counters */
+extern atomic_t smb2_buf_alloc_count; /* current number allocated */
+#ifdef CONFIG_SMB2_STATS2
+extern atomic_t smb2_total_buf_alloc; /* total allocated over all time */
+extern atomic_t smb2_total_smbuf_alloc;
+#endif
+extern atomic_t smb2_smbuf_alloc_count;
+extern atomic_t smb2_mid_count;
+
+/* Misc globals */
+extern unsigned int SMB2_max_buf_size; /* max size not including hdr */
+extern unsigned int smb2_min_rcv; /* min size of big ntwrk buf pool */
+extern unsigned int smb2_min_small; /* min size of small buf pool */
+extern unsigned int smb2_max_pending; /* MAX requests at once to server*/
+
+#endif /* _SMB2_GLOB_H */
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
new file mode 100644
index 000000000000..03f69e62e0e0
--- /dev/null
+++ b/fs/cifs/smb2pdu.h
@@ -0,0 +1,996 @@
+/*
+ * fs/smb2/smb2pdu.h
+ *
+ * Copyright (c) International Business Machines Corp., 2009, 2010
+ * Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _SMB2PDU_H
+#define _SMB2PDU_H
+
+#include <net/sock.h>
+
+#define SMB2_PORT 445
+#define RFC1001_PORT 139
+
+/*
+ * Note that, due to trying to use names similar to the protocol specifications,
+ * there are many mixed case field names in the structures below. Although
+ * this does not match typical Linux kernel style, it is necessary to be
+ * be able to match against the protocol specfication.
+ *
+ * SMB2 commands
+ * Some commands have minimal (wct=0,bcc=0), or uninteresting, responses
+ * (ie no useful data other than the SMB error code itself) and are marked such
+ * Knowing this helps avoid response buffer allocations and copy in some cases
+ */
+
+/* List is sent on wire as little endian */
+#define SMB2_NEGOTIATE cpu_to_le16(0x0000)
+#define SMB2_SESSION_SETUP cpu_to_le16(0x0001)
+#define SMB2_LOGOFF cpu_to_le16(0x0002) /* trivial request/resp */
+#define SMB2_TREE_CONNECT cpu_to_le16(0x0003)
+#define SMB2_TREE_DISCONNECT cpu_to_le16(0x0004) /* trivial req/resp */
+#define SMB2_CREATE cpu_to_le16(0x0005)
+#define SMB2_CLOSE cpu_to_le16(0x0006)
+#define SMB2_FLUSH cpu_to_le16(0x0007) /* trivial resp */
+#define SMB2_READ cpu_to_le16(0x0008)
+#define SMB2_WRITE cpu_to_le16(0x0009)
+#define SMB2_LOCK cpu_to_le16(0x000A)
+#define SMB2_IOCTL cpu_to_le16(0x000B)
+#define SMB2_CANCEL cpu_to_le16(0x000C)
+#define SMB2_ECHO cpu_to_le16(0x000D)
+#define SMB2_QUERY_DIRECTORY cpu_to_le16(0x000E)
+#define SMB2_CHANGE_NOTIFY cpu_to_le16(0x000F)
+#define SMB2_QUERY_INFO cpu_to_le16(0x0010)
+#define SMB2_SET_INFO cpu_to_le16(0x0011)
+#define SMB2_OPLOCK_BREAK cpu_to_le16(0x0012)
+
+/* Same List of commands in host endian */
+#define SMB2NEGOTIATE 0x0000
+#define SMB2SESSION_SETUP 0x0001
+#define SMB2LOGOFF 0x0002 /* trivial request/resp */
+#define SMB2TREE_CONNECT 0x0003
+#define SMB2TREE_DISCONNECT 0x0004 /* trivial req/resp */
+#define SMB2CREATE 0x0005
+#define SMB2CLOSE 0x0006
+#define SMB2FLUSH 0x0007 /* trivial resp */
+#define SMB2READ 0x0008
+#define SMB2WRITE 0x0009
+#define SMB2LOCK 0x000A
+#define SMB2IOCTL 0x000B
+#define SMB2CANCEL 0x000C
+#define SMB2ECHO 0x000D
+#define SMB2QUERY_DIRECTORY 0x000E
+#define SMB2CHANGE_NOTIFY 0x000F
+#define SMB2QUERY_INFO 0x0010
+#define SMB2SET_INFO 0x0011
+#define SMB2OPLOCK_BREAK 0x0012
+
+#define NUMBER_OF_SMB2_COMMANDS 0x0013
+
+/* BB FIXME - analyze following three length fields BB */
+#define MAX_SMB2_SMALL_BUFFER_SIZE 460 /* big enough for most */
+#define MAX_SMB2_HDR_SIZE 0x78 /* 4 len + 64 hdr + (2*24 wct) + 2 bct + 2 pad */
+#define SMB2_SMALL_PATH 112 /* allows for one fewer than (460-120)/3 */
+
+/* File Attrubutes */
+
+#define FILE_ATTRIBUTE_READONLY 0x00000001
+#define FILE_ATTRIBUTE_HIDDEN 0x00000002
+#define FILE_ATTRIBUTE_SYSTEM 0x00000004
+#define FILE_ATTRIBUTE_DIRECTORY 0x00000010
+#define FILE_ATTRIBUTE_ARCHIVE 0x00000020
+#define FILE_ATTRIBUTE_NORMAL 0x00000080
+#define FILE_ATTRIBUTE_TEMPORARY 0x00000100
+#define FILE_ATTRIBUTE_SPARSE_FILE 0x00000200
+#define FILE_ATTRIBUTE_REPARSE_POINT 0x00000400
+#define FILE_ATTRIBUTE_COMPRESSED 0x00000800
+#define FILE_ATTRIBUTE_OFFLINE 0x00001000
+#define FILE_ATTRIBUTE_NOT_CONTENT_INDEXED 0x00002000
+#define FILE_ATTRIBUTE_ENCRYPTED 0x00004000
+
+/*
+ * SMB2 flag definitions
+ */
+
+#define SMB2FLG_RESPONSE 0x0001 /* this PDU is a response from server */
+
+/*
+ * SMB2 Header Definition
+ *
+ * "MBZ" : Must be Zero
+ * "BB" : BugBug, Something to check/review/analyze later
+ * "PDU" : "Protocol Data Unit" (ie a network "frame")
+ *
+ */
+struct smb2_hdr {
+ __be32 smb2_buf_length; /* big endian on wire */
+ /* length is only two or three bytes - with
+ one or two byte type preceding it that MBZ */
+ __u8 ProtocolId[4]; /* 0xFE 'S' 'M' 'B' */
+ __le16 StructureSize; /* 64 */
+ __le16 CreditCharge; /* MBZ */
+ __le32 Status; /* Error from server */
+ __le16 Command;
+ __le16 CreditRequest; /* CreditResponse */
+ __le32 Flags;
+ __le32 NextCommand;
+ __u64 MessageId; /* opaque - so can stay little endian */
+ __le32 ProcessId;
+ __u32 TreeId; /* opaque - so do not make little endian */
+ __u64 SessionId; /* opaque - so do not make little endian */
+ __u8 Signature[16];
+ /* Usually followed by __le16 StructureSize of request or response
+ structure which follows */
+} __attribute__((packed));
+
+struct smb2_async_hdr {
+ __be32 smb2_buf_length; /* big endian on wire */
+ /* length is only two or three bytes - with
+ one or two byte type preceding it that MBZ */
+ __u8 ProtocolId[4]; /* 0xFE 'S' 'M' 'B' */
+ __le16 StructureSize; /* 64 */
+ __le16 CreditCharge; /* MBZ */
+ __le32 Status; /* Error from server */
+ __le16 Command;
+ __le16 CreditResponse;
+ __le32 Flags;
+ __le32 NextCommand;
+ __u64 MessageId; /* opaque - so can stay little endian */
+ __u64 AsyncId;
+ __u64 SessionId; /* opaque - so do not make little endian */
+ __u8 Signature[16];
+ /* Usually followed by __le16 StructureSize of request or response
+ structure which follows */
+} __attribute__((packed));
+
+struct smb2_pdu {
+ struct smb2_hdr hdr;
+ __le16 StructureSize2; /* size of wct area (varies, request specific) */
+} __attribute__((packed));
+
+/*
+ * SMB2 flag definitions
+ */
+#define SMB2_FLAGS_SERVER_TO_REDIR cpu_to_le32(0x00000001) /* Response */
+#define SMB2_FLAGS_ASYNC_COMMAND cpu_to_le32(0x00000002)
+#define SMB2_FLAGS_RELATED_OPERATIONS cpu_to_le32(0x00000004)
+#define SMB2_FLAGS_SIGNED cpu_to_le32(0x00000008)
+#define SMB2_FLAGS_DFS_OPERATIONS cpu_to_le32(0x10000000)
+
+/*
+ * Definitions for SMB2 Protocol Data Units (network frames)
+ *
+ * See MS-SMB2.PDF specification for protocol details.
+ * The Naming convention is the lower case version of the SMB2
+ * command code name for the struct. Note that structures must be packed.
+ *
+ */
+struct smb2_err_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize;
+ __le16 Reserved; /* MBZ */
+ __le32 ByteCount; /* even if zero, at least one byte follows */
+ __u8 ErrorData[1]; /* variable length */
+} __attribute__((packed));
+
+/* Symlink error response for error STATUS_STOPPED_ON_SYMLINK
+ Following is the ErrorData structure */
+struct symlink_err_data {
+ __le32 SymLinkLength;
+ __le32 SymLinkErrorTag;
+ __le32 ReparseTag;
+ __le16 ReparseDataLength;
+ __le16 Reserved; /* MBZ */
+ __le16 SubstituteNameOffset;
+ __le16 SubstituteNameLength;
+ __le16 PrintNameOffset;
+ __le16 PrintNameLength;
+ __le32 Flags;
+ __u8 PathBuffer[0]; /* variable length */
+} __attribute__((packed));
+
+struct smb2_negotiate_req {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 36 */
+ __le16 DialectCount;
+ __le16 SecurityMode;
+ __le16 Reserved; /* MBZ */
+ __le32 Capabilities;
+ __u8 ClientGUID[16]; /* MBZ */
+ __le64 ClientStartTime; /* MBZ */
+ __le16 Dialects[2]; /* variable length */ /* Must include 0x0202 */
+} __attribute__((packed));
+/* SecurityMode flags */
+#define SMB2_NEGOTIATE_SIGNING_ENABLED 0x0001
+#define SMB2_NEGOTIATE_SIGNING_REQUIRED 0x0002
+/* Capabilities flags */
+#define SMB2_GLOBAL_CAP_DFS 0x00000001
+#define SMB2_GLOBAL_CAP_LEASING 0x00000002 /* Resp only New to SMB2.1 */
+#define SMB2_GLOBAL_CAP_LARGE_MTU 0X00000004 /* Resp only New to SMB2.1 */
+
+struct smb2_negotiate_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 65 */
+ __le16 SecurityMode;
+ __le16 DialectRevision; /* Should be 0x0202 */
+ __le16 Reserved; /* MBZ */
+ __u8 ServerGUID[16];
+ __le32 Capabilities;
+ __le32 MaxTransactSize;
+ __le32 MaxReadSize;
+ __le32 MaxWriteSize;
+ __le64 SystemTime; /* MBZ */
+ __le64 ServerStartTime;
+ __le16 SecurityBufferOffset;
+ __le16 SecurityBufferLength;
+ __le32 Reserved2; /* may be any value, Ignore */
+ __u8 Buffer[1]; /* variable length GSS security buffer */
+} __attribute__((packed));
+
+struct sess_setup_req {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 25 */
+ __u8 VcNumber;
+ __u8 SecurityMode;
+ __le32 Capabilities;
+ __le32 Channel;
+ __le16 SecurityBufferOffset;
+ __le16 SecurityBufferLength;
+ __le64 PreviousSessionId;
+ __u8 Buffer[1]; /* variable length GSS security buffer */
+} __attribute__((packed));
+
+/* Currently defined SessionFlags */
+#define SMB2_SESSION_FLAG_IS_GUEST 0x0001
+#define SMB2_SESSION_FLAG_IS_NULL 0x0002
+struct sess_setup_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 9 */
+ __le16 SessionFlags;
+ __le16 SecurityBufferOffset;
+ __le16 SecurityBufferLength;
+ __u8 Buffer[1]; /* variable length GSS security buffer */
+} __attribute__((packed));
+
+struct logoff_req {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 4 */
+ __le16 Reserved;
+} __attribute__((packed));
+
+struct logoff_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 4 */
+ __le16 Reserved;
+} __attribute__((packed));
+
+struct tree_connect_req {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 9 */
+ __le16 Reserved;
+ __le16 PathOffset;
+ __le16 PathLength;
+ __u8 Buffer[1]; /* variable length */
+} __attribute__((packed));
+
+struct tree_connect_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 16 */
+ __u8 ShareType; /* see below */
+ __u8 Reserved;
+ __le32 ShareFlags; /* see below */
+ __le32 Capabilities; /* see below */
+ __le32 MaximalAccess;
+} __attribute__((packed));
+
+/* Possible ShareType values */
+#define SMB2_SHARE_TYPE_DISK 0x01
+#define SMB2_SHARE_TYPE_PIPE 0x02
+#define SMB2_SHARE_TYPE_PRINT 0x03
+
+/* Possible ShareFlags - exactly one and only one of the first 4 caching flags
+ must be set (any of the remaining, SHI1005, flags may be set individually
+ or in combination */
+#define SMB2_SHAREFLAG_MANUAL_CACHING 0x00000000
+#define SMB2_SHAREFLAG_AUTO_CACHING 0x00000010
+#define SMB2_SHAREFLAG_VDO_CACHING 0x00000020
+#define SMB2_SHAREFLAG_NO_CACHING 0x00000030
+#define SHI1005_FLAGS_DFS 0x00000001
+#define SHI1005_FLAGS_DFS_ROOT 0x00000002
+#define SHI1005_FLAGS_RESTRICT_EXCLUSIVE_OPENS 0x00000100
+#define SHI1005_FLAGS_FORCE_SHARED_DELETE 0x00000200
+#define SHI1005_FLAGS_ALLOW_NAMESPACE_CACHING 0x00000400
+#define SHI1005_FLAGS_ACCESS_BASED_DIRECTORY_ENUM 0x00000800
+#define SHI1005_FLAGS_FORCE_LEVELII_OPLOCK 0x00001000
+#define SHI1005_FLAGS_ENABLE_HASH 0x00002000
+
+/* Possible share capabilities */
+#define SMB2_SHARE_CAP_DFS cpu_to_le32(0x00000008)
+
+struct tree_disconnect_req {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 4 */
+ __le16 Reserved;
+} __attribute__((packed));
+
+struct tree_disconnect_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 4 */
+ __le16 Reserved;
+} __attribute__((packed));
+
+/* Oplock levels */
+#define SMB2_OPLOCK_LEVEL_NONE 0x00
+#define SMB2_OPLOCK_LEVEL_II 0x01
+#define SMB2_OPLOCK_LEVEL_EXCLUSIVE 0x08
+#define SMB2_OPLOCK_LEVEL_BATCH 0x09
+#define SMB2_OPLOCK_LEVEL_LEASE 0xFF
+
+/* Desired Access Flags */
+#define FILE_READ_DATA_LE cpu_to_le32(0x00000001)
+#define FILE_WRITE_DATA_LE cpu_to_le32(0x00000002)
+#define FILE_APPEND_DATA_LE cpu_to_le32(0x00000004)
+#define FILE_READ_EA_LE cpu_to_le32(0x00000008)
+#define FILE_WRITE_EA_LE cpu_to_le32(0x00000010)
+#define FILE_EXECUTE_LE cpu_to_le32(0x00000020)
+#define FILE_READ_ATTRIBUTES_LE cpu_to_le32(0x00000080)
+#define FILE_WRITE_ATTRIBUTES_LE cpu_to_le32(0x00000100)
+#define FILE_DELETE_LE cpu_to_le32(0x00010000)
+#define FILE_READ_CONTROL_LE cpu_to_le32(0x00020000)
+#define FILE_WRITE_DAC_LE cpu_to_le32(0x00040000)
+#define FILE_WRITE_OWNER_LE cpu_to_le32(0x00080000)
+#define FILE_SYNCHRONIZE_LE cpu_to_le32(0x00100000)
+#define FILE_ACCESS_SYSTEM_SECURITY_LE cpu_to_le32(0x01000000)
+#define FILE_MAXIMAL_ACCESS_LE cpu_to_le32(0x02000000)
+#define FILE_GENERIC_ALL_LE cpu_to_le32(0x10000000)
+#define FILE_GENERIC_EXECUTE_LE cpu_to_le32(0x20000000)
+#define FILE_GENERIC_WRITE_LE cpu_to_le32(0x40000000)
+#define FILE_GENERIC_READ_LE cpu_to_le32(0x80000000)
+
+/* ShareAccess Flags */
+#define FILE_SHARE_READ_LE cpu_to_le32(0x00000001)
+#define FILE_SHARE_WRITE_LE cpu_to_le32(0x00000002)
+#define FILE_SHARE_DELETE_LE cpu_to_le32(0x00000004)
+#define FILE_SHARE_ALL_LE cpu_to_le32(0x00000007)
+
+/* CreateDisposition Flags */
+#define FILE_SUPERSEDE_LE cpu_to_le32(0x00000000)
+#define FILE_OPEN_LE cpu_to_le32(0x00000001)
+#define FILE_CREATE_LE cpu_to_le32(0x00000002)
+#define FILE_OPEN_IF_LE cpu_to_le32(0x00000003)
+#define FILE_OVERWRITE_LE cpu_to_le32(0x00000004)
+#define FILE_OVERWRITE_IF_LE cpu_to_le32(0x00000005)
+
+/* CreateOptions Flags */
+#define FILE_DIRECTORY_FILE_LE cpu_to_le32(0x00000001)
+#define FILE_WRITE_THROUGH_LE cpu_to_le32(0x00000002)
+#define FILE_SEQUENTIAL_ONLY_LE cpu_to_le32(0x00000004)
+#define FILE_NO_INTERMEDIATE_BUFFERRING_LE cpu_to_le32(0x00000008)
+#define FILE_SYNCHRONOUS_IO_ALERT_LE cpu_to_le32(0x00000010)
+#define FILE_SYNCHRONOUS_IO_NON_ALERT_LE cpu_to_le32(0x00000020)
+#define FILE_NON_DIRECTORY_FILE_LE cpu_to_le32(0x00000040)
+#define FILE_COMPLETE_IF_OPLOCKED_LE cpu_to_le32(0x00000100)
+#define FILE_NO_EA_KNOWLEDGE_LE cpu_to_le32(0x00000200)
+#define FILE_RANDOM_ACCESS_LE cpu_to_le32(0x00000800)
+#define FILE_DELETE_ON_CLOSE_LE cpu_to_le32(0x00001000)
+#define FILE_OPEN_BY_FILE_ID_LE cpu_to_le32(0x00002000)
+#define FILE_OPEN_FOR_BACKUP_INTENT_LE cpu_to_le32(0x00004000)
+#define FILE_NO_COMPRESSION_LE cpu_to_le32(0x00008000)
+#define FILE_RESERVE_OPFILTER_LE cpu_to_le32(0x00100000)
+#define FILE_OPEN_REPARSE_POINT_LE cpu_to_le32(0x00200000)
+#define FILE_OPEN_NO_RECALL_LE cpu_to_le32(0x00400000)
+#define FILE_OPEN_FOR_FREE_SPACE_QUERY_LE cpu_to_le32(0x00800000)
+
+#define FILE_READ_RIGHTS_LE (FILE_READ_DATA_LE | FILE_READ_EA_LE \
+ | FILE_READ_ATTRIBUTES_LE)
+#define FILE_WRITE_RIGHTS_LE (FILE_WRITE_DATA_LE | FILE_APPEND_DATA_LE \
+ | FILE_WRITE_EA_LE | FILE_WRITE_ATTRIBUTES_LE)
+#define FILE_EXEC_RIGHTS_LE (FILE_EXECUTE_LE)
+
+/* Impersonation Levels */
+#define IL_ANONYMOUS cpu_to_le32(0x00000000)
+#define IL_IDENTIFICATION cpu_to_le32(0x00000001)
+#define IL_IMPERSONATION cpu_to_le32(0x00000002)
+#define IL_DELEGATE cpu_to_le32(0x00000003)
+
+/* Create Context Values */
+#define SMB2_CREATE_EA_BUFFER "ExtA" /* extended attributes */
+#define SMB2_CREATE_SD_BUFFER "SecD" /* security descriptor */
+#define SMB2_CREATE_DURABLE_HANDLE_REQUEST "DHnQ"
+#define SMB2_CREATE_DURABLE_HANDLE_RECONNECT "DHnC"
+#define SMB2_CREATE_ALLOCATION_SIZE "AlSi"
+#define SMB2_CREATE_QUERY_MAXIMAL_ACCESS_REQUEST "MxAc"
+#define SMB2_CREATE_TIMEWARP_REQUEST "TWrp"
+#define SMB2_CREATE_QUERY_ON_DISK_ID "QFid"
+#define SMB2_CREATE_REQUEST_LEASE "RqLs"
+
+struct create_req {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 57 */
+ __u8 SecurityFlags;
+ __u8 RequestedOplockLevel;
+ __le32 ImpersonationLevel;
+ __le64 SmbCreateFlags;
+ __le64 Reserved;
+ __le32 DesiredAccess;
+ __le32 FileAttributes;
+ __le32 ShareAccess;
+ __le32 CreateDisposition;
+ __le32 CreateOptions;
+ __le16 NameOffset;
+ __le16 NameLength;
+ __le32 CreateContextsOffset;
+ __le32 CreateContextsLength;
+ __u8 Buffer[1];
+} __attribute__((packed));
+
+struct create_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 89 */
+ __u8 OplockLevel;
+ __u8 Reserved;
+ __le32 CreateAction;
+ __le64 CreationTime;
+ __le64 LastAccessTime;
+ __le64 LastWriteTime;
+ __le64 ChangeTime;
+ __le64 AllocationSize;
+ __le64 EndofFile;
+ __le32 FileAttributes;
+ __le32 Reserved2;
+ __u64 PersistentFileId; /* opaque endianness */
+ __u64 VolatileFileId; /* opaque endianness */
+ __le32 CreateContextsOffset;
+ __le32 CreateContextsLength;
+ __u8 Buffer[1];
+} __attribute__((packed));
+
+/* Currently defined values for close flags */
+#define SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB cpu_to_le16(0x0001)
+struct close_req {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 24 */
+ __le16 Flags;
+ __le32 Reserved;
+ __u64 PersistentFileId; /* opaque endianness */
+ __u64 VolatileFileId; /* opaque endianness */
+} __attribute__((packed));
+
+struct close_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* 60 */
+ __le16 Flags;
+ __le32 Reserved;
+ __le64 CreationTime;
+ __le64 LastAccessTime;
+ __le64 LastWriteTime;
+ __le64 ChangeTime;
+ __le64 AllocationSize; /* Beginning of FILE_STANDARD_INFO equivalent */
+ __le64 EndOfFile;
+ __le32 Attributes;
+} __attribute__((packed));
+
+struct flush_req {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 24 */
+ __le16 Reserved1;
+ __le32 Reserved2;
+ __u64 PersistentFileId; /* opaque endianness */
+ __u64 VolatileFileId; /* opaque endianness */
+} __attribute__((packed));
+
+struct flush_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize;
+ __le16 Reserved;
+} __attribute__((packed));
+
+struct read_req {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 49 */
+ __u8 Padding; /* offset from start of SMB2 header to place read */
+ __u8 Reserved;
+ __le32 Length;
+ __le64 Offset;
+ __u64 PersistentFileId; /* opaque endianness */
+ __u64 VolatileFileId; /* opaque endianness */
+ __le32 MinimumCount;
+ __le32 Channel; /* Reserved MBZ */
+ __le32 RemainingBytes;
+ __le16 ReadChannelInfoOffset; /* Reserved MBZ */
+ __le16 ReadChannelInfoLength; /* Reserved MBZ */
+ __u8 Buffer[1];
+} __attribute__((packed));
+
+struct read_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 17 */
+ __u8 DataOffset;
+ __u8 Reserved;
+ __le32 DataLength;
+ __le32 DataRemaining;
+ __u32 Reserved2;
+ __u8 Buffer[1];
+} __attribute__((packed));
+
+/* For write request Flags field below the following flag is defined: */
+#define SMB2_WRITEFLAG_WRITE_THROUGH 0x00000001
+
+struct write_req {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 49 */
+ __le16 DataOffset; /* offset from start of SMB2 header to write data */
+ __le32 Length;
+ __le64 Offset;
+ __u64 PersistentFileId; /* opaque endianness */
+ __u64 VolatileFileId; /* opaque endianness */
+ __le32 Channel; /* Reserved MBZ */
+ __le32 RemainingBytes;
+ __le16 WriteChannelInfoOffset; /* Reserved MBZ */
+ __le16 WriteChannelInfoLength; /* Reserved MBZ */
+ __le32 Flags;
+ __u8 Buffer[1];
+} __attribute__((packed));
+
+struct write_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 17 */
+ __u8 DataOffset;
+ __u8 Reserved;
+ __le32 DataLength;
+ __le32 DataRemaining;
+ __u32 Reserved2;
+ __u8 Buffer[1];
+} __attribute__((packed));
+
+#define SMB2_LOCKFLAG_SHARED_LOCK 0x0001
+#define SMB2_LOCKFLAG_EXCLUSIVE_LOCK 0x0002
+#define SMB2_LOCKFLAG_UNLOCK 0x0004
+#define SMB2_LOCKFLAG_FAIL_IMMEDIATELY 0x0010
+
+struct smb2_lock_element {
+ __le64 Offset;
+ __le64 Length;
+ __le16 Flags;
+ __le16 Reserved;
+} __attribute__((packed));
+
+#define LOCKING_ANDX_OPLOCK_RELEASE 0x02
+
+struct lock_req {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 48 */
+ __le16 LockCount;
+ __le32 Reserved;
+ __u64 PersistentFileId; /* opaque endianness */
+ __u64 VolatileFileId; /* opaque endianness */
+ /* Followed by at least one */
+ struct smb2_lock_element locks[1];
+} __attribute__((packed));
+
+struct lock_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 4 */
+ __le16 Reserved;
+} __attribute__((packed));
+
+struct echo_req {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 4 */
+ __u16 Reserved;
+} __attribute__((packed));
+
+struct echo_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 4 */
+ __u16 Reserved;
+} __attribute__((packed));
+
+/* search (query_directory) Flags field */
+#define SMB2_RESTART_SCANS 0x01
+#define SMB2_RETURN_SINGLE_ENTRY 0x02
+#define SMB2_INDEX_SPECIFIED 0x04
+#define SMB2_REOPEN 0x10
+
+struct query_directory_req {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 33 */
+ __u8 FileInformationClass;
+ __u8 Flags;
+ __le32 FileIndex;
+ __u64 PersistentFileId; /* opaque endianness */
+ __u64 VolatileFileId; /* opaque endianness */
+ __le16 FileNameOffset;
+ __le16 FileNameLength;
+ __le32 OutputBufferLength;
+ __u8 Buffer[1];
+} __attribute__((packed));
+
+struct query_directory_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 9 */
+ __le16 OutputBufferOffset;
+ __le32 OutputBufferLength;
+ __u8 Buffer[1];
+} __attribute__((packed));
+
+/* Possible InfoType values */
+#define SMB2_O_INFO_FILE 0x01
+#define SMB2_O_INFO_FILESYSTEM 0x02
+#define SMB2_O_INFO_SECURITY 0x03
+#define SMB2_O_INFO_QUOTA 0x04
+
+struct query_info_req {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 41 */
+ __u8 InfoType;
+ __u8 FileInfoClass;
+ __le32 OutputBufferLength;
+ __le16 InputBufferOffset;
+ __u16 Reserved;
+ __le32 InputBufferLength;
+ __le32 AdditionalInformation;
+ __le32 Flags;
+ __u64 PersistentFileId; /* opaque endianness */
+ __u64 VolatileFileId; /* opaque endianness */
+ __u8 Buffer[1];
+} __attribute__((packed));
+
+struct query_info_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 9 */
+ __le16 OutputBufferOffset;
+ __le32 OutputBufferLength;
+ __u8 Buffer[1];
+} __attribute__((packed));
+
+struct set_info_req {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 33 */
+ __u8 InfoType;
+ __u8 FileInfoClass;
+ __le32 BufferLength;
+ __le16 BufferOffset;
+ __u16 Reserved;
+ __le32 AdditionalInformation;
+ __u64 PersistentFileId; /* opaque endianness */
+ __u64 VolatileFileId; /* opaque endianness */
+ __u8 Buffer[1];
+} __attribute__((packed));
+
+struct set_info_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 2 */
+} __attribute__((packed));
+
+struct ioctl_req {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 57 */
+ __le16 Reserved;
+ __le32 Ctlcode;
+ __le64 Fileid[2]; /* persistent and volatile */
+ __le32 Inputoffset;
+ __le32 Inputcount;
+ __le32 Maxinputresp;
+ __le32 Outputoffset;
+ __le32 Outputcount;
+ __le32 Maxoutputresp;
+ __le32 Flags;
+ __le32 Reserved2;
+ __u8 Buffer[1];
+} __attribute__((packed));
+
+struct ioctl_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 49 */
+ __le16 Reserved;
+ __le32 Ctlcode;
+ __le16 Fileid[2]; /* persistent and volatile */
+ __le32 Inputoffset;
+ __le32 Inputcount;
+ __le32 Outputoffset;
+ __le32 Outputcount;
+ __le32 Flags;
+ __le32 Reserved2;
+ __u8 Buffer[1];
+} __attribute__((packed));
+
+/*****************************************************************
+ * All constants go here
+ *****************************************************************
+ */
+
+/*
+ * Starting value for maximum SMB size negotiation
+ */
+#define SMB2_MAX_MSGSIZE (4*4096)
+
+
+#define SMB2_NETWORK_OPSYS "SMB2 VFS Client for Linux"
+
+/*
+ * PDU infolevel structure definitions
+ * BB consider moving to a different header
+ */
+
+/* File System Information Classes */
+#define FS_VOLUME_INFORMATION 1 /* Query */
+#define FS_LABEL_INFORMATION 2 /* Set */
+#define FS_SIZE_INFORMATION 3 /* Query */
+#define FS_DEVICE_INFORMATION 4 /* Query */
+#define FS_ATTRIBUTE_INFORMATION 5 /* Query */
+#define FS_CONTROL_INFORMATION 6 /* Query, Set */
+#define FS_FULL_SIZE_INFORMATION 7 /* Query */
+#define FS_OBJECT_ID_INFORMATION 8 /* Query, Set */
+#define FS_DRIVER_PATH_INFORMATION 9 /* Query */
+
+struct fs_full_size_info {
+ __le64 TotalAllocationUnits;
+ __le64 CallerAvailableAllocationUnits;
+ __le64 ActualAvailableAllocationUnits;
+ __le32 SectorsPerAllocationUnit;
+ __le32 BytesPerSector;
+};
+
+struct fs_volume_info {
+ __le64 VolumeCreationTime;
+ __le32 VolumeSerialNumber;
+ __le32 VolumeLabelLength;
+ __u8 SupportsObjects; /* 1 = supports object oriented fs objects */
+ __u8 Reserved;
+ /* Variable Length VolumeLabel follows */
+};
+
+struct fs_device_info {
+ __le32 DeviceType; /* see below */
+ __le32 DeviceCharacteristics;
+} __attribute__((packed)); /* device info level 0x104 */
+
+/* DeviceType Flags (remember to endian convert them) */
+#define FILE_DEVICE_CD_ROM 0x00000002
+#define FILE_DEVICE_CD_ROM_FILE_SYSTEM 0x00000003
+#define FILE_DEVICE_DFS 0x00000006
+#define FILE_DEVICE_DISK 0x00000007
+#define FILE_DEVICE_DISK_FILE_SYSTEM 0x00000008
+#define FILE_DEVICE_FILE_SYSTEM 0x00000009
+#define FILE_DEVICE_NAMED_PIPE 0x00000011
+#define FILE_DEVICE_NETWORK 0x00000012
+#define FILE_DEVICE_NETWORK_FILE_SYSTEM 0x00000014
+#define FILE_DEVICE_NULL 0x00000015
+#define FILE_DEVICE_PARALLEL_PORT 0x00000016
+#define FILE_DEVICE_PRINTER 0x00000018
+#define FILE_DEVICE_SERIAL_PORT 0x0000001b
+#define FILE_DEVICE_STREAMS 0x0000001e
+#define FILE_DEVICE_TAPE 0x0000001f
+#define FILE_DEVICE_TAPE_FILE_SYSTEM 0x00000020
+#define FILE_DEVICE_VIRTUAL_DISK 0x00000024
+#define FILE_DEVICE_NETWORK_REDIRECTOR 0x00000028
+
+#define MAX_FS_NAME 20 /* twice size of longest name ("FAT32") is plenty */
+/* this field is for debugging/informational purposes anyway, so could be
+truncated if extremely long fs names were later created */
+struct fs_attribute_info {
+ __le32 Attributes;
+ __le32 MaximumComponentNameLength;
+ __le32 FileSystemNameLength;
+ char FileSystemName[MAX_FS_NAME]; /* not null terminated */
+} __attribute__((packed));
+
+/* Attributes (remember to endian convert them) */
+#define FILE_CASE_SENSITIVE_SEARCH 0x00000001
+#define FILE_CASE_PRESERVED_NAMES 0x00000002
+#define FILE_UNICODE_ON_DISK 0x00000004
+#define FILE_PERSISTENT_ACLS 0x00000008
+#define FILE_FILE_COMPRESSION 0x00000010
+#define FILE_VOLUME_QUOTAS 0x00000020
+#define FILE_SUPPORTS_SPARSE_FILES 0x00000040
+#define FILE_SUPPORTS_REPARSE_POINTS 0x00000080
+#define FILE_SUPPORTS_REMOTE_STORAGE 0x00000100
+#define FILE_VOLUME_IS_COMPRESSED 0x00008000
+#define FILE_SUPPORTS_OBJECT_IDS 0x00010000
+#define FILE_SUPPORTS_ENCRYPTION 0x00020000
+#define FILE_NAMED_STREAMS 0x00040000
+#define FILE_READ_ONLY_VOLUME 0x00080000
+#define FILE_SEQUENTIAL_WRITE_ONCE 0x00100000
+#define FILE_SUPPORTS_TRANSACTIONS 0x00200000
+#define FILE_SUPPORTS_HARD_LINKS 0x00400000
+#define FILE_SUPPORTS_EXTENDED_ATTRS 0x00800000
+#define FILE_SUPPORTS_OPEN_BY_FILE_ID 0x01000000
+#define FILE_SUPPORTS_USN_JOURNAL 0x02000000
+
+/* partial list of QUERY INFO levels */
+#define FILE_DIRECTORY_INFORMATION 1
+#define FILE_FULL_DIRECTORY_INFORMATION 2
+#define FILE_BOTH_DIRECTORY_INFORMATION 3
+#define FILE_BASIC_INFORMATION 4
+#define FILE_STANDARD_INFORMATION 5
+#define FILE_INTERNAL_INFORMATION 6
+#define FILE_EA_INFORMATION 7
+#define FILE_ACCESS_INFORMATION 8
+#define FILE_NAME_INFORMATION 9
+#define FILE_RENAME_INFORMATION 10
+#define FILE_LINK_INFORMATION 11
+#define FILE_NAMES_INFORMATION 12
+#define FILE_DISPOSITION_INFORMATION 13
+#define FILE_POSITION_INFORMATION 14
+#define FILE_FULL_EA_INFORMATION 15
+#define FILE_MODE_INFORMATION 16
+#define FILE_ALIGNMENT_INFORMATION 17
+#define FILE_ALL_INFORMATION 18
+#define FILE_ALLOCATION_INFORMATION 19
+#define FILE_END_OF_FILE_INFORMATION 20
+#define FILE_ALTERNATE_NAME_INFORMATION 21
+#define FILE_STREAM_INFORMATION 22
+#define FILE_PIPE_INFORMATION 23
+#define FILE_PIPE_LOCAL_INFORMATION 24
+#define FILE_PIPE_REMOTE_INFORMATION 25
+#define FILE_MAILSLOT_QUERY_INFORMATION 26
+#define FILE_MAILSLOT_SET_INFORMATION 27
+#define FILE_COMPRESSION_INFORMATION 28
+#define FILE_OBJECT_ID_INFORMATION 29
+/* Number 30 not defined in documents */
+#define FILE_MOVE_CLUSTER_INFORMATION 31
+#define FILE_QUOTA_INFORMATION 32
+#define FILE_REPARSE_POINT_INFORMATION 33
+#define FILE_NETWORK_OPEN_INFORMATION 34
+#define FILE_ATTRIBUTE_TAG_INFORMATION 35
+#define FILE_TRACKING_INFORMATION 36
+#define FILEID_BOTH_DIRECTORY_INFORMATION 37
+#define FILEID_FULL_DIRECTORY_INFORMATION 38
+#define FILE_VALID_DATA_LENGTH_INFORMATION 39
+#define FILE_SHORT_NAME_INFORMATION 40
+#define FILE_SFIO_RESERVE_INFORMATION 44
+#define FILE_SFIO_VOLUME_INFORMATION 45
+#define FILE_HARD_LINK_INFORMATION 46
+#define FILE_NORMALIZED_NAME_INFORMATION 48
+#define FILEID_GLOBAL_TX_DIRECTORY_INFORMATION 50
+#define FILE_STANDARD_LINK_INFORMATION 54
+
+struct file_both_directory_info {
+ __le32 next_entry_offset;
+ __u32 file_index;
+ __le64 creation_time;
+ __le64 last_access_time;
+ __le64 last_write_time;
+ __le64 change_time;
+ __le64 end_of_file;
+ __le64 allocation_size;
+ __le32 file_attribute;
+ __le32 filename_length;
+ __le32 easize;
+ __u8 shortname_length;
+ __u8 reserved1;
+ __u8 short_name[24];
+ __u16 reserved2;
+ __le64 file_id;
+ char filename[0];
+} __attribute__((packed));
+
+struct file_full_directory_info {
+ __le32 next_entry_offset;
+ __u32 file_index;
+ __le64 creation_time;
+ __le64 last_access_time;
+ __le64 last_write_time;
+ __le64 change_time;
+ __le64 end_of_file;
+ __le64 allocation_size;
+ __le32 file_attribute;
+ __le32 filename_length;
+ __le32 easize;
+ __u32 reserved2;
+ __le64 file_id;
+ char filename[0];
+} __attribute__((packed));
+
+typedef struct { /* data block encoding of response to level 5 query */
+ __le64 AllocationSize;
+ __le64 EndOfFile; /* size ie offset to first free byte in file */
+ __le32 NumberOfLinks; /* hard links */
+ __u8 DeletePending;
+ __u8 Directory;
+ __u16 Pad2;
+} __attribute__((packed)) FILE_STANDARD_INFO; /* level 5 QPathInfo */
+
+struct FileRenameInformation { /* encoding of request for level 10 */
+ __u8 ReplaceIfExists; /* 1 = replace existing file with new */
+ /* 0 = fail if target file already exists */
+ __u8 Reserved[7];
+ __u64 RootDirectory; /* MBZ for network operations (why says spec?) */
+ __le32 FileNameLength;
+ char FileName[0]; /* New name to be assigned */
+} __attribute__((packed)); /* level 10 set */
+
+struct FileLinkInformation { /* encoding of request for level 11 */
+ __u8 ReplaceIfExists; /* 1 = replace existing link with new */
+ /* 0 = fail if link already exists */
+ __u8 Reserved[7];
+ __u64 RootDirectory; /* MBZ for network operations (why says spec?) */
+ __le32 FileNameLength;
+ char FileName[0]; /* Name to be assigned to new link */
+} __attribute__((packed)); /* level 11 set */
+
+
+/* This level 18, although with struct with same name is different from cifs
+ level 0x107. level 0x107 has an extra u64 between AccessFlags and
+ CurrentByteOffset */
+typedef struct { /* data block encoding of response to level 18 */
+ __le64 CreationTime; /* Beginning of FILE_BASIC_INFO equivalent */
+ __le64 LastAccessTime;
+ __le64 LastWriteTime;
+ __le64 ChangeTime;
+ __le32 Attributes;
+ __u32 Pad1; /* End of FILE_BASIC_INFO_INFO equivalent */
+ __le64 AllocationSize; /* Beginning of FILE_STANDARD_INFO equivalent */
+ __le64 EndOfFile; /* size ie offset to first free byte in file */
+ __le32 NumberOfLinks; /* hard links */
+ __u8 DeletePending;
+ __u8 Directory;
+ __u16 Pad2; /* End of FILE_STANDARD_INFO equivalent */
+ __le64 IndexNumber;
+ __le32 EASize;
+ __le32 AccessFlags;
+ __le64 CurrentByteOffset;
+ __le32 Mode;
+ __le32 AlignmentRequirement;
+ __le32 FileNameLength;
+ char FileName[1];
+} __attribute__((packed)) FILE_ALL_INFO_SMB2; /* level 18 Query */
+
+typedef struct { /* data block encoding of request to level 15 */
+ __le64 NextEntryOffset;
+ __u8 EaNameLength;
+ char EaName[1];
+} __attribute__((packed)) FILE_GET_EA_INFO; /* level 15 Query */
+
+typedef struct { /* data block encoding of response to level 15 */
+ __le32 NextEntryOffset;
+ __u8 Flags;
+ __u8 EaNameLength;
+ __le16 EaValueLength;
+ char EaName[1];
+ char EaValue[1];
+} __attribute__((packed)) FILE_FULL_EA_INFO; /* level 15 Query */
+
+/* DFS Flags */ /* BB check if these changed in SMB2 from old transact2
+ get DFS referral flags BB */
+#define DFSREF_REFERRAL_SERVER 0x00000001 /* all targets are DFS roots */
+#define DFSREF_STORAGE_SERVER 0x00000002 /* no further ref requests needed */
+#define DFSREF_TARGET_FAILBACK 0x00000004 /* only for DFS referral version 4 */
+
+/* BB note that various other infolevels are defined in cifspdu.h, although most
+ are obsolete now (for SMB2) or unneeded, some may be useful to move here */
+
+#define SMB2_IOCTL_IS_FSCTL 0x1
+#define SYMLINK_FLAG_RELATIVE 0x1
+#define MS_REPARSE_TAG 0xA000000C
+
+struct symlink_reparse_data_buf {
+ __le32 reparse_tag;
+ __le16 reparse_datalength;
+ __le16 reserved1;
+ __le16 sub_nameoffset;
+ __le16 sub_namelength;
+ __le16 print_nameoffset;
+ __le16 print_namelength;
+ __le32 flags;
+ char pathbuffer[1];
+} __attribute__((packed));
+
+#endif /* _SMB2PDU_H */
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 46d8756f2b24..29dd7ed12d23 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -302,7 +302,7 @@ static int wait_for_free_request(struct TCP_Server_Info *server,
return 0;
}
-static int allocate_mid(struct cifsSesInfo *ses, struct smb_hdr *in_buf,
+static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
struct mid_q_entry **ppmidQ)
{
if (ses->server->tcpStatus == CifsExiting) {
@@ -360,7 +360,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
return rc;
/* enable signing if server requires it */
- if (server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+ if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
in_buf->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
mutex_lock(&server->srv_mutex);
@@ -414,7 +414,7 @@ out_err:
*
*/
int
-SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses,
+SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
struct smb_hdr *in_buf, int flags)
{
int rc;
@@ -509,7 +509,7 @@ send_nt_cancel(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
}
int
-SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
+SendReceive2(const unsigned int xid, struct cifs_ses *ses,
struct kvec *iov, int n_vec, int *pRespBufType /* ret */,
const int flags)
{
@@ -636,7 +636,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
dump_smb(midQ->resp_buf, 80);
/* convert the length into a more usable form */
if ((receive_len > 24) &&
- (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
+ (ses->server->sec_mode & (SECMODE_SIGN_REQUIRED |
SECMODE_SIGN_ENABLED))) {
rc = cifs_verify_signature(midQ->resp_buf,
ses->server,
@@ -674,7 +674,7 @@ out:
}
int
-SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
+SendReceive(const unsigned int xid, struct cifs_ses *ses,
struct smb_hdr *in_buf, struct smb_hdr *out_buf,
int *pbytes_returned, const int long_op)
{
@@ -789,7 +789,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
dump_smb(out_buf, 92);
/* convert the length into a more usable form */
if ((receive_len > 24) &&
- (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
+ (ses->server->sec_mode & (SECMODE_SIGN_REQUIRED |
SECMODE_SIGN_ENABLED))) {
rc = cifs_verify_signature(out_buf,
ses->server,
@@ -827,12 +827,12 @@ out:
blocking lock to return. */
static int
-send_lock_cancel(const unsigned int xid, struct cifsTconInfo *tcon,
+send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
struct smb_hdr *in_buf,
struct smb_hdr *out_buf)
{
int bytes_returned;
- struct cifsSesInfo *ses = tcon->ses;
+ struct cifs_ses *ses = tcon->ses;
LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
/* We just modify the current in_buf to change
@@ -849,7 +849,7 @@ send_lock_cancel(const unsigned int xid, struct cifsTconInfo *tcon,
}
int
-SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
+SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
struct smb_hdr *in_buf, struct smb_hdr *out_buf,
int *pbytes_returned)
{
@@ -857,7 +857,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
int rstart = 0;
unsigned int receive_len;
struct mid_q_entry *midQ;
- struct cifsSesInfo *ses;
+ struct cifs_ses *ses;
if (tcon == NULL || tcon->ses == NULL) {
cERROR(1, "Null smb session");
@@ -1001,7 +1001,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
dump_smb(out_buf, 92);
/* convert the length into a more usable form */
if ((receive_len > 24) &&
- (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
+ (ses->server->sec_mode & (SECMODE_SIGN_REQUIRED |
SECMODE_SIGN_ENABLED))) {
rc = cifs_verify_signature(out_buf,
ses->server,
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index eae2a1491608..2ed33a221930 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -49,7 +49,7 @@ int cifs_removexattr(struct dentry *direntry, const char *ea_name)
int xid;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
struct super_block *sb;
char *full_path = NULL;
@@ -109,7 +109,7 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
int xid;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
struct super_block *sb;
char *full_path;
@@ -220,7 +220,7 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
int xid;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
struct super_block *sb;
char *full_path;
@@ -352,7 +352,7 @@ ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size)
int xid;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
- struct cifsTconInfo *pTcon;
+ struct cifs_tcon *pTcon;
struct super_block *sb;
char *full_path;
diff --git a/fs/dcache.c b/fs/dcache.c
index 2a6bd9a4ae97..79802bd790e4 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1781,7 +1781,7 @@ struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name,
* false-negative result. d_lookup() protects against concurrent
* renames using rename_lock seqlock.
*
- * See Documentation/vfs/dcache-locking.txt for more details.
+ * See Documentation/filesystems/path-lookup.txt for more details.
*/
hlist_bl_for_each_entry_rcu(dentry, node, &b->head, d_hash) {
struct inode *i;
@@ -1901,7 +1901,7 @@ struct dentry *__d_lookup(struct dentry *parent, struct qstr *name)
* false-negative result. d_lookup() protects against concurrent
* renames using rename_lock seqlock.
*
- * See Documentation/vfs/dcache-locking.txt for more details.
+ * See Documentation/filesystems/path-lookup.txt for more details.
*/
rcu_read_lock();
diff --git a/fs/direct-io.c b/fs/direct-io.c
index b044705eedd4..dcb5577cde1d 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -645,11 +645,11 @@ static int dio_send_cur_page(struct dio *dio)
/*
* See whether this new request is contiguous with the old.
*
- * Btrfs cannot handl having logically non-contiguous requests
- * submitted. For exmple if you have
+ * Btrfs cannot handle having logically non-contiguous requests
+ * submitted. For example if you have
*
* Logical: [0-4095][HOLE][8192-12287]
- * Phyiscal: [0-4095] [4096-8181]
+ * Physical: [0-4095] [4096-8191]
*
* We cannot submit those pages together as one BIO. So if our
* current logical offset in the file does not equal what would
diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c
index 6fc4f319b550..534c1d46e69e 100644
--- a/fs/ecryptfs/dentry.c
+++ b/fs/ecryptfs/dentry.c
@@ -46,24 +46,28 @@ static int ecryptfs_d_revalidate(struct dentry *dentry, struct nameidata *nd)
{
struct dentry *lower_dentry;
struct vfsmount *lower_mnt;
- struct dentry *dentry_save;
- struct vfsmount *vfsmount_save;
+ struct dentry *dentry_save = NULL;
+ struct vfsmount *vfsmount_save = NULL;
int rc = 1;
- if (nd->flags & LOOKUP_RCU)
+ if (nd && nd->flags & LOOKUP_RCU)
return -ECHILD;
lower_dentry = ecryptfs_dentry_to_lower(dentry);
lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry);
if (!lower_dentry->d_op || !lower_dentry->d_op->d_revalidate)
goto out;
- dentry_save = nd->path.dentry;
- vfsmount_save = nd->path.mnt;
- nd->path.dentry = lower_dentry;
- nd->path.mnt = lower_mnt;
+ if (nd) {
+ dentry_save = nd->path.dentry;
+ vfsmount_save = nd->path.mnt;
+ nd->path.dentry = lower_dentry;
+ nd->path.mnt = lower_mnt;
+ }
rc = lower_dentry->d_op->d_revalidate(lower_dentry, nd);
- nd->path.dentry = dentry_save;
- nd->path.mnt = vfsmount_save;
+ if (nd) {
+ nd->path.dentry = dentry_save;
+ nd->path.mnt = vfsmount_save;
+ }
if (dentry->d_inode) {
struct inode *lower_inode =
ecryptfs_inode_to_lower(dentry->d_inode);
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index dbc84ed96336..e00753496e3e 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -632,8 +632,7 @@ int ecryptfs_interpose(struct dentry *hidden_dentry,
u32 flags);
int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
struct dentry *lower_dentry,
- struct inode *ecryptfs_dir_inode,
- struct nameidata *ecryptfs_nd);
+ struct inode *ecryptfs_dir_inode);
int ecryptfs_decode_and_decrypt_filename(char **decrypted_name,
size_t *decrypted_name_size,
struct dentry *ecryptfs_dentry,
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 81e10e6a9443..7d1050e254f9 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -317,6 +317,7 @@ ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
const struct file_operations ecryptfs_dir_fops = {
.readdir = ecryptfs_readdir,
+ .read = generic_read_dir,
.unlocked_ioctl = ecryptfs_unlocked_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ecryptfs_compat_ioctl,
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index bd33f87a1907..b592938a84bc 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -74,16 +74,20 @@ ecryptfs_create_underlying_file(struct inode *lower_dir_inode,
unsigned int flags_save;
int rc;
- dentry_save = nd->path.dentry;
- vfsmount_save = nd->path.mnt;
- flags_save = nd->flags;
- nd->path.dentry = lower_dentry;
- nd->path.mnt = lower_mnt;
- nd->flags &= ~LOOKUP_OPEN;
+ if (nd) {
+ dentry_save = nd->path.dentry;
+ vfsmount_save = nd->path.mnt;
+ flags_save = nd->flags;
+ nd->path.dentry = lower_dentry;
+ nd->path.mnt = lower_mnt;
+ nd->flags &= ~LOOKUP_OPEN;
+ }
rc = vfs_create(lower_dir_inode, lower_dentry, mode, nd);
- nd->path.dentry = dentry_save;
- nd->path.mnt = vfsmount_save;
- nd->flags = flags_save;
+ if (nd) {
+ nd->path.dentry = dentry_save;
+ nd->path.mnt = vfsmount_save;
+ nd->flags = flags_save;
+ }
return rc;
}
@@ -241,8 +245,7 @@ out:
*/
int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
struct dentry *lower_dentry,
- struct inode *ecryptfs_dir_inode,
- struct nameidata *ecryptfs_nd)
+ struct inode *ecryptfs_dir_inode)
{
struct dentry *lower_dir_dentry;
struct vfsmount *lower_mnt;
@@ -290,8 +293,6 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
goto out;
if (special_file(lower_inode->i_mode))
goto out;
- if (!ecryptfs_nd)
- goto out;
/* Released in this function */
page_virt = kmem_cache_zalloc(ecryptfs_header_cache_2, GFP_USER);
if (!page_virt) {
@@ -349,75 +350,6 @@ out:
}
/**
- * ecryptfs_new_lower_dentry
- * @name: The name of the new dentry.
- * @lower_dir_dentry: Parent directory of the new dentry.
- * @nd: nameidata from last lookup.
- *
- * Create a new dentry or get it from lower parent dir.
- */
-static struct dentry *
-ecryptfs_new_lower_dentry(struct qstr *name, struct dentry *lower_dir_dentry,
- struct nameidata *nd)
-{
- struct dentry *new_dentry;
- struct dentry *tmp;
- struct inode *lower_dir_inode;
-
- lower_dir_inode = lower_dir_dentry->d_inode;
-
- tmp = d_alloc(lower_dir_dentry, name);
- if (!tmp)
- return ERR_PTR(-ENOMEM);
-
- mutex_lock(&lower_dir_inode->i_mutex);
- new_dentry = lower_dir_inode->i_op->lookup(lower_dir_inode, tmp, nd);
- mutex_unlock(&lower_dir_inode->i_mutex);
-
- if (!new_dentry)
- new_dentry = tmp;
- else
- dput(tmp);
-
- return new_dentry;
-}
-
-
-/**
- * ecryptfs_lookup_one_lower
- * @ecryptfs_dentry: The eCryptfs dentry that we are looking up
- * @lower_dir_dentry: lower parent directory
- * @name: lower file name
- *
- * Get the lower dentry from vfs. If lower dentry does not exist yet,
- * create it.
- */
-static struct dentry *
-ecryptfs_lookup_one_lower(struct dentry *ecryptfs_dentry,
- struct dentry *lower_dir_dentry, struct qstr *name)
-{
- struct nameidata nd;
- struct vfsmount *lower_mnt;
- int err;
-
- lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(
- ecryptfs_dentry->d_parent));
- err = vfs_path_lookup(lower_dir_dentry, lower_mnt, name->name , 0, &nd);
- mntput(lower_mnt);
-
- if (!err) {
- /* we dont need the mount */
- mntput(nd.path.mnt);
- return nd.path.dentry;
- }
- if (err != -ENOENT)
- return ERR_PTR(err);
-
- /* create a new lower dentry */
- return ecryptfs_new_lower_dentry(name, lower_dir_dentry, &nd);
-}
-
-/**
* ecryptfs_lookup
* @ecryptfs_dir_inode: The eCryptfs directory inode
* @ecryptfs_dentry: The eCryptfs dentry that we are looking up
@@ -434,7 +366,6 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
size_t encrypted_and_encoded_name_size;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat = NULL;
struct dentry *lower_dir_dentry, *lower_dentry;
- struct qstr lower_name;
int rc = 0;
if ((ecryptfs_dentry->d_name.len == 1
@@ -444,20 +375,14 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
goto out_d_drop;
}
lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent);
- lower_name.name = ecryptfs_dentry->d_name.name;
- lower_name.len = ecryptfs_dentry->d_name.len;
- lower_name.hash = ecryptfs_dentry->d_name.hash;
- if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) {
- rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry,
- lower_dir_dentry->d_inode, &lower_name);
- if (rc < 0)
- goto out_d_drop;
- }
- lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry,
- lower_dir_dentry, &lower_name);
+ mutex_lock(&lower_dir_dentry->d_inode->i_mutex);
+ lower_dentry = lookup_one_len(ecryptfs_dentry->d_name.name,
+ lower_dir_dentry,
+ ecryptfs_dentry->d_name.len);
+ mutex_unlock(&lower_dir_dentry->d_inode->i_mutex);
if (IS_ERR(lower_dentry)) {
rc = PTR_ERR(lower_dentry);
- ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned "
+ ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
"[%d] on lower_dentry = [%s]\n", __func__, rc,
encrypted_and_encoded_name);
goto out_d_drop;
@@ -479,28 +404,21 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
"filename; rc = [%d]\n", __func__, rc);
goto out_d_drop;
}
- lower_name.name = encrypted_and_encoded_name;
- lower_name.len = encrypted_and_encoded_name_size;
- lower_name.hash = full_name_hash(lower_name.name, lower_name.len);
- if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) {
- rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry,
- lower_dir_dentry->d_inode, &lower_name);
- if (rc < 0)
- goto out_d_drop;
- }
- lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry,
- lower_dir_dentry, &lower_name);
+ mutex_lock(&lower_dir_dentry->d_inode->i_mutex);
+ lower_dentry = lookup_one_len(encrypted_and_encoded_name,
+ lower_dir_dentry,
+ encrypted_and_encoded_name_size);
+ mutex_unlock(&lower_dir_dentry->d_inode->i_mutex);
if (IS_ERR(lower_dentry)) {
rc = PTR_ERR(lower_dentry);
- ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned "
+ ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
"[%d] on lower_dentry = [%s]\n", __func__, rc,
encrypted_and_encoded_name);
goto out_d_drop;
}
lookup_and_interpose:
rc = ecryptfs_lookup_and_interpose_lower(ecryptfs_dentry, lower_dentry,
- ecryptfs_dir_inode,
- ecryptfs_nd);
+ ecryptfs_dir_inode);
goto out;
out_d_drop:
d_drop(ecryptfs_dentry);
@@ -1092,6 +1010,8 @@ int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
rc = vfs_getattr(ecryptfs_dentry_to_lower_mnt(dentry),
ecryptfs_dentry_to_lower(dentry), &lower_stat);
if (!rc) {
+ fsstack_copy_attr_all(dentry->d_inode,
+ ecryptfs_inode_to_lower(dentry->d_inode));
generic_fillattr(dentry->d_inode, stat);
stat->blocks = lower_stat.blocks;
}
diff --git a/fs/eventfd.c b/fs/eventfd.c
index e0194b3e14d6..d9a591773919 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -99,7 +99,7 @@ EXPORT_SYMBOL_GPL(eventfd_ctx_get);
* @ctx: [in] Pointer to eventfd context.
*
* The eventfd context reference must have been previously acquired either
- * with eventfd_ctx_get() or eventfd_ctx_fdget()).
+ * with eventfd_ctx_get() or eventfd_ctx_fdget().
*/
void eventfd_ctx_put(struct eventfd_ctx *ctx)
{
@@ -146,9 +146,9 @@ static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
* eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue.
* @ctx: [in] Pointer to eventfd context.
* @wait: [in] Wait queue to be removed.
- * @cnt: [out] Pointer to the 64bit conter value.
+ * @cnt: [out] Pointer to the 64-bit counter value.
*
- * Returns zero if successful, or the following error codes:
+ * Returns %0 if successful, or the following error codes:
*
* -EAGAIN : The operation would have blocked.
*
@@ -175,11 +175,11 @@ EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue);
* eventfd_ctx_read - Reads the eventfd counter or wait if it is zero.
* @ctx: [in] Pointer to eventfd context.
* @no_wait: [in] Different from zero if the operation should not block.
- * @cnt: [out] Pointer to the 64bit conter value.
+ * @cnt: [out] Pointer to the 64-bit counter value.
*
- * Returns zero if successful, or the following error codes:
+ * Returns %0 if successful, or the following error codes:
*
- * -EAGAIN : The operation would have blocked but @no_wait was nonzero.
+ * -EAGAIN : The operation would have blocked but @no_wait was non-zero.
* -ERESTARTSYS : A signal interrupted the wait operation.
*
* If @no_wait is zero, the function might sleep until the eventfd internal
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 267d0ada4541..ff12f7ac73ef 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -62,7 +62,14 @@
* This mutex is acquired by ep_free() during the epoll file
* cleanup path and it is also acquired by eventpoll_release_file()
* if a file has been pushed inside an epoll set and it is then
- * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL).
+ * close()d without a previous call to epoll_ctl(EPOLL_CTL_DEL).
+ * It is also acquired when inserting an epoll fd onto another epoll
+ * fd. We do this so that we walk the epoll tree and ensure that this
+ * insertion does not create a cycle of epoll file descriptors, which
+ * could lead to deadlock. We need a global mutex to prevent two
+ * simultaneous inserts (A into B and B into A) from racing and
+ * constructing a cycle without either insert observing that it is
+ * going to.
* It is possible to drop the "ep->mtx" and to use the global
* mutex "epmutex" (together with "ep->lock") to have it working,
* but having "ep->mtx" will make the interface more scalable.
@@ -145,11 +152,11 @@ struct epitem {
/*
* This structure is stored inside the "private_data" member of the file
- * structure and rapresent the main data sructure for the eventpoll
+ * structure and represents the main data structure for the eventpoll
* interface.
*/
struct eventpoll {
- /* Protect the this structure access */
+ /* Protect the access to this structure */
spinlock_t lock;
/*
@@ -224,6 +231,9 @@ static long max_user_watches __read_mostly;
*/
static DEFINE_MUTEX(epmutex);
+/* Used to check for epoll file descriptor inclusion loops */
+static struct nested_calls poll_loop_ncalls;
+
/* Used for safe wake up implementation */
static struct nested_calls poll_safewake_ncalls;
@@ -783,7 +793,7 @@ static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
/*
* This is the callback that is passed to the wait queue wakeup
- * machanism. It is called by the stored file descriptors when they
+ * mechanism. It is called by the stored file descriptors when they
* have events to report.
*/
static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key)
@@ -814,9 +824,9 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k
goto out_unlock;
/*
- * If we are trasfering events to userspace, we can hold no locks
+ * If we are transferring events to userspace, we can hold no locks
* (because we're accessing user memory, and because of linux f_op->poll()
- * semantics). All the events that happens during that period of time are
+ * semantics). All the events that happen during that period of time are
* chained in ep->ovflist and requeued later on.
*/
if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) {
@@ -1198,6 +1208,62 @@ retry:
return res;
}
+/**
+ * ep_loop_check_proc - Callback function to be passed to the @ep_call_nested()
+ * API, to verify that adding an epoll file inside another
+ * epoll structure, does not violate the constraints, in
+ * terms of closed loops, or too deep chains (which can
+ * result in excessive stack usage).
+ *
+ * @priv: Pointer to the epoll file to be currently checked.
+ * @cookie: Original cookie for this call. This is the top-of-the-chain epoll
+ * data structure pointer.
+ * @call_nests: Current dept of the @ep_call_nested() call stack.
+ *
+ * Returns: Returns zero if adding the epoll @file inside current epoll
+ * structure @ep does not violate the constraints, or -1 otherwise.
+ */
+static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
+{
+ int error = 0;
+ struct file *file = priv;
+ struct eventpoll *ep = file->private_data;
+ struct rb_node *rbp;
+ struct epitem *epi;
+
+ mutex_lock(&ep->mtx);
+ for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
+ epi = rb_entry(rbp, struct epitem, rbn);
+ if (unlikely(is_file_epoll(epi->ffd.file))) {
+ error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
+ ep_loop_check_proc, epi->ffd.file,
+ epi->ffd.file->private_data, current);
+ if (error != 0)
+ break;
+ }
+ }
+ mutex_unlock(&ep->mtx);
+
+ return error;
+}
+
+/**
+ * ep_loop_check - Performs a check to verify that adding an epoll file (@file)
+ * another epoll file (represented by @ep) does not create
+ * closed loops or too deep chains.
+ *
+ * @ep: Pointer to the epoll private data structure.
+ * @file: Pointer to the epoll file to be checked.
+ *
+ * Returns: Returns zero if adding the epoll @file inside current epoll
+ * structure @ep does not violate the constraints, or -1 otherwise.
+ */
+static int ep_loop_check(struct eventpoll *ep, struct file *file)
+{
+ return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
+ ep_loop_check_proc, file, ep, current);
+}
+
/*
* Open an eventpoll file descriptor.
*/
@@ -1246,6 +1312,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
struct epoll_event __user *, event)
{
int error;
+ int did_lock_epmutex = 0;
struct file *file, *tfile;
struct eventpoll *ep;
struct epitem *epi;
@@ -1287,6 +1354,25 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
*/
ep = file->private_data;
+ /*
+ * When we insert an epoll file descriptor, inside another epoll file
+ * descriptor, there is the change of creating closed loops, which are
+ * better be handled here, than in more critical paths.
+ *
+ * We hold epmutex across the loop check and the insert in this case, in
+ * order to prevent two separate inserts from racing and each doing the
+ * insert "at the same time" such that ep_loop_check passes on both
+ * before either one does the insert, thereby creating a cycle.
+ */
+ if (unlikely(is_file_epoll(tfile) && op == EPOLL_CTL_ADD)) {
+ mutex_lock(&epmutex);
+ did_lock_epmutex = 1;
+ error = -ELOOP;
+ if (ep_loop_check(ep, tfile) != 0)
+ goto error_tgt_fput;
+ }
+
+
mutex_lock(&ep->mtx);
/*
@@ -1322,6 +1408,9 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
mutex_unlock(&ep->mtx);
error_tgt_fput:
+ if (unlikely(did_lock_epmutex))
+ mutex_unlock(&epmutex);
+
fput(tfile);
error_fput:
fput(file);
@@ -1441,6 +1530,12 @@ static int __init eventpoll_init(void)
EP_ITEM_COST;
BUG_ON(max_user_watches < 0);
+ /*
+ * Initialize the structure used to perform epoll file descriptor
+ * inclusion loops checks.
+ */
+ ep_nested_calls_init(&poll_loop_ncalls);
+
/* Initialize the structure used to perform safe poll wait head wake ups */
ep_nested_calls_init(&poll_safewake_ncalls);
diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
index dcc941d82d67..132721a04a1f 100644
--- a/fs/exofs/dir.c
+++ b/fs/exofs/dir.c
@@ -261,9 +261,7 @@ exofs_readdir(struct file *filp, void *dirent, filldir_t filldir)
struct page *page = exofs_get_page(inode, n);
if (IS_ERR(page)) {
- EXOFS_ERR("ERROR: "
- "bad page in #%lu",
- inode->i_ino);
+ EXOFS_ERR("ERROR: bad page in #%lu", inode->i_ino);
filp->f_pos += PAGE_CACHE_SIZE - offset;
return PTR_ERR(page);
}
@@ -282,8 +280,7 @@ exofs_readdir(struct file *filp, void *dirent, filldir_t filldir)
EXOFS_DIR_REC_LEN(1);
for (; (char *)de <= limit; de = exofs_next_entry(de)) {
if (de->rec_len == 0) {
- EXOFS_ERR("ERROR: "
- "zero-length directory entry");
+ EXOFS_ERR("ERROR: zero-length directory entry");
exofs_put_page(page);
return -EIO;
}
@@ -342,9 +339,8 @@ struct exofs_dir_entry *exofs_find_entry(struct inode *dir,
kaddr += exofs_last_byte(dir, n) - reclen;
while ((char *) de <= kaddr) {
if (de->rec_len == 0) {
- EXOFS_ERR(
- "ERROR: exofs_find_entry: "
- "zero-length directory entry");
+ EXOFS_ERR("ERROR: exofs_find_entry: "
+ "zero-length directory entry");
exofs_put_page(page);
goto out;
}
@@ -491,7 +487,8 @@ int exofs_add_link(struct dentry *dentry, struct inode *inode)
exofs_put_page(page);
}
- EXOFS_ERR("exofs_add_link: BAD dentry=%p or inode=%p", dentry, inode);
+ EXOFS_ERR("exofs_add_link: BAD dentry=%p or inode=%lu",
+ dentry, inode->i_ino);
return -EINVAL;
got_it:
diff --git a/fs/exofs/exofs.h b/fs/exofs/exofs.h
index 2dc925fa1010..99fcb9126a97 100644
--- a/fs/exofs/exofs.h
+++ b/fs/exofs/exofs.h
@@ -256,6 +256,8 @@ static inline int exofs_oi_read(struct exofs_i_info *oi,
}
/* inode.c */
+unsigned exofs_max_io_pages(struct exofs_layout *layout,
+ unsigned expected_pages);
int exofs_setattr(struct dentry *, struct iattr *);
int exofs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
diff --git a/fs/exofs/file.c b/fs/exofs/file.c
index b905c79b4f0a..4c0d6bac9143 100644
--- a/fs/exofs/file.c
+++ b/fs/exofs/file.c
@@ -48,11 +48,6 @@ static int exofs_file_fsync(struct file *filp, int datasync)
struct inode *inode = filp->f_mapping->host;
struct super_block *sb;
- if (!(inode->i_state & I_DIRTY))
- return 0;
- if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
- return 0;
-
ret = sync_inode_metadata(inode, 1);
/* This is a good place to write the sb */
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index a7555238c41a..7f36e2871ba6 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -43,6 +43,17 @@ enum { BIO_MAX_PAGES_KMALLOC =
PAGE_SIZE / sizeof(struct page *),
};
+unsigned exofs_max_io_pages(struct exofs_layout *layout,
+ unsigned expected_pages)
+{
+ unsigned pages = min_t(unsigned, expected_pages, MAX_PAGES_KMALLOC);
+
+ /* TODO: easily support bio chaining */
+ pages = min_t(unsigned, pages,
+ layout->group_width * BIO_MAX_PAGES_KMALLOC);
+ return pages;
+}
+
struct page_collect {
struct exofs_sb_info *sbi;
struct inode *inode;
@@ -97,8 +108,7 @@ static void _pcol_reset(struct page_collect *pcol)
static int pcol_try_alloc(struct page_collect *pcol)
{
- unsigned pages = min_t(unsigned, pcol->expected_pages,
- MAX_PAGES_KMALLOC);
+ unsigned pages;
if (!pcol->ios) { /* First time allocate io_state */
int ret = exofs_get_io_state(&pcol->sbi->layout, &pcol->ios);
@@ -108,8 +118,7 @@ static int pcol_try_alloc(struct page_collect *pcol)
}
/* TODO: easily support bio chaining */
- pages = min_t(unsigned, pages,
- pcol->sbi->layout.group_width * BIO_MAX_PAGES_KMALLOC);
+ pages = exofs_max_io_pages(&pcol->sbi->layout, pcol->expected_pages);
for (; pages; pages >>= 1) {
pcol->pages = kmalloc(pages * sizeof(struct page *),
@@ -350,8 +359,8 @@ static int readpage_strip(void *data, struct page *page)
if (!pcol->read_4_write)
unlock_page(page);
- EXOFS_DBGMSG("readpage_strip(0x%lx, 0x%lx) empty page,"
- " splitting\n", inode->i_ino, page->index);
+ EXOFS_DBGMSG("readpage_strip(0x%lx) empty page len=%zx read_4_write=%d index=0x%lx end_index=0x%lx"
+ " splitting\n", inode->i_ino, len, pcol->read_4_write, page->index, end_index);
return read_exec(pcol);
}
@@ -722,11 +731,28 @@ int exofs_write_begin(struct file *file, struct address_space *mapping,
/* read modify write */
if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) {
+ loff_t i_size = i_size_read(mapping->host);
+ pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+ size_t rlen;
+
+ if (page->index < end_index)
+ rlen = PAGE_CACHE_SIZE;
+ else if (page->index == end_index)
+ rlen = i_size & ~PAGE_CACHE_MASK;
+ else
+ rlen = 0;
+
+ if (!rlen) {
+ clear_highpage(page);
+ SetPageUptodate(page);
+ goto out;
+ }
+
ret = _readpage(page, true);
if (ret) {
/*SetPageError was done by _readpage. Is it ok?*/
unlock_page(page);
- EXOFS_DBGMSG("__readpage_filler failed\n");
+ EXOFS_DBGMSG("__readpage failed\n");
}
}
out:
@@ -1030,6 +1056,7 @@ struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
memcpy(oi->i_data, fcb.i_data, sizeof(fcb.i_data));
}
+ inode->i_mapping->backing_dev_info = sb->s_bdi;
if (S_ISREG(inode->i_mode)) {
inode->i_op = &exofs_file_inode_operations;
inode->i_fop = &exofs_file_operations;
@@ -1130,6 +1157,7 @@ struct inode *exofs_new_inode(struct inode *dir, int mode)
sbi = sb->s_fs_info;
+ inode->i_mapping->backing_dev_info = sb->s_bdi;
sb->s_dirt = 1;
inode_init_owner(inode, dir, mode);
inode->i_ino = sbi->s_nextid++;
@@ -1271,7 +1299,8 @@ out:
int exofs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
- return exofs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
+ /* FIXME: fix fsync and use wbc->sync_mode == WB_SYNC_ALL */
+ return exofs_update_inode(inode, 1);
}
/*
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index 8c6c4669b381..a50b7aaa1fbf 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -390,6 +390,23 @@ static int _read_and_match_data_map(struct exofs_sb_info *sbi, unsigned numdevs,
return 0;
}
+static unsigned __ra_pages(struct exofs_layout *layout)
+{
+ const unsigned _MIN_RA = 32; /* min 128K read-ahead */
+ unsigned ra_pages = layout->group_width * layout->stripe_unit /
+ PAGE_SIZE;
+ unsigned max_io_pages = exofs_max_io_pages(layout, ~0);
+
+ ra_pages *= 2; /* two stripes */
+ if (ra_pages < _MIN_RA)
+ ra_pages = roundup(_MIN_RA, ra_pages / 2);
+
+ if (ra_pages > max_io_pages)
+ ra_pages = max_io_pages;
+
+ return ra_pages;
+}
+
/* @odi is valid only as long as @fscb_dev is valid */
static int exofs_devs_2_odi(struct exofs_dt_device_info *dt_dev,
struct osd_dev_info *odi)
@@ -623,6 +640,7 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
}
/* set up operation vectors */
+ sbi->bdi.ra_pages = __ra_pages(&sbi->layout);
sb->s_bdi = &sbi->bdi;
sb->s_fs_info = sbi;
sb->s_op = &exofs_sops;
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index 6346a2acf326..1b48c3370872 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -110,7 +110,7 @@ extern struct ext2_dir_entry_2 * ext2_dotdot (struct inode *, struct page **);
extern void ext2_set_link(struct inode *, struct ext2_dir_entry_2 *, struct page *, struct inode *, int);
/* ialloc.c */
-extern struct inode * ext2_new_inode (struct inode *, int);
+extern struct inode * ext2_new_inode (struct inode *, int, const struct qstr *);
extern void ext2_free_inode (struct inode *);
extern unsigned long ext2_count_free_inodes (struct super_block *);
extern void ext2_check_inodes_bitmap (struct super_block *);
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index ad70479aabff..ee9ed31948e1 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -429,7 +429,8 @@ found:
return group;
}
-struct inode *ext2_new_inode(struct inode *dir, int mode)
+struct inode *ext2_new_inode(struct inode *dir, int mode,
+ const struct qstr *qstr)
{
struct super_block *sb;
struct buffer_head *bitmap_bh = NULL;
@@ -585,7 +586,7 @@ got:
if (err)
goto fail_free_drop;
- err = ext2_init_security(inode,dir);
+ err = ext2_init_security(inode, dir, qstr);
if (err)
goto fail_free_drop;
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 2e1d8341d827..ed5c5d496ee9 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -104,7 +104,7 @@ static int ext2_create (struct inode * dir, struct dentry * dentry, int mode, st
dquot_initialize(dir);
- inode = ext2_new_inode(dir, mode);
+ inode = ext2_new_inode(dir, mode, &dentry->d_name);
if (IS_ERR(inode))
return PTR_ERR(inode);
@@ -133,7 +133,7 @@ static int ext2_mknod (struct inode * dir, struct dentry *dentry, int mode, dev_
dquot_initialize(dir);
- inode = ext2_new_inode (dir, mode);
+ inode = ext2_new_inode (dir, mode, &dentry->d_name);
err = PTR_ERR(inode);
if (!IS_ERR(inode)) {
init_special_inode(inode, inode->i_mode, rdev);
@@ -159,7 +159,7 @@ static int ext2_symlink (struct inode * dir, struct dentry * dentry,
dquot_initialize(dir);
- inode = ext2_new_inode (dir, S_IFLNK | S_IRWXUGO);
+ inode = ext2_new_inode (dir, S_IFLNK | S_IRWXUGO, &dentry->d_name);
err = PTR_ERR(inode);
if (IS_ERR(inode))
goto out;
@@ -230,7 +230,7 @@ static int ext2_mkdir(struct inode * dir, struct dentry * dentry, int mode)
inode_inc_link_count(dir);
- inode = ext2_new_inode (dir, S_IFDIR | mode);
+ inode = ext2_new_inode(dir, S_IFDIR | mode, &dentry->d_name);
err = PTR_ERR(inode);
if (IS_ERR(inode))
goto out_dir;
@@ -344,7 +344,6 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page);
if (!new_de)
goto out_dir;
- inode_inc_link_count(old_inode);
ext2_set_link(new_dir, new_de, new_page, old_inode, 1);
new_inode->i_ctime = CURRENT_TIME_SEC;
if (dir_de)
@@ -356,12 +355,9 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
if (new_dir->i_nlink >= EXT2_LINK_MAX)
goto out_dir;
}
- inode_inc_link_count(old_inode);
err = ext2_add_link(new_dentry, old_inode);
- if (err) {
- inode_dec_link_count(old_inode);
+ if (err)
goto out_dir;
- }
if (dir_de)
inode_inc_link_count(new_dir);
}
@@ -369,12 +365,11 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
/*
* Like most other Unix systems, set the ctime for inodes on a
* rename.
- * inode_dec_link_count() will mark the inode dirty.
*/
old_inode->i_ctime = CURRENT_TIME_SEC;
+ mark_inode_dirty(old_inode);
ext2_delete_entry (old_de, old_page);
- inode_dec_link_count(old_inode);
if (dir_de) {
if (old_dir != new_dir)
diff --git a/fs/ext2/xattr.h b/fs/ext2/xattr.h
index a1a1c2184616..5e41cccff762 100644
--- a/fs/ext2/xattr.h
+++ b/fs/ext2/xattr.h
@@ -116,9 +116,11 @@ exit_ext2_xattr(void)
# endif /* CONFIG_EXT2_FS_XATTR */
#ifdef CONFIG_EXT2_FS_SECURITY
-extern int ext2_init_security(struct inode *inode, struct inode *dir);
+extern int ext2_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr);
#else
-static inline int ext2_init_security(struct inode *inode, struct inode *dir)
+static inline int ext2_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr)
{
return 0;
}
diff --git a/fs/ext2/xattr_security.c b/fs/ext2/xattr_security.c
index 3004e15d5da5..5d979b4347b0 100644
--- a/fs/ext2/xattr_security.c
+++ b/fs/ext2/xattr_security.c
@@ -47,14 +47,15 @@ ext2_xattr_security_set(struct dentry *dentry, const char *name,
}
int
-ext2_init_security(struct inode *inode, struct inode *dir)
+ext2_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr)
{
int err;
size_t len;
void *value;
char *name;
- err = security_inode_init_security(inode, dir, &name, &value, &len);
+ err = security_inode_init_security(inode, dir, qstr, &name, &value, &len);
if (err) {
if (err == -EOPNOTSUPP)
return 0;
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index 045995c8ce5a..db1906b4e39c 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -1991,6 +1991,7 @@ ext3_grpblk_t ext3_trim_all_free(struct super_block *sb, unsigned int group,
spin_unlock(sb_bgl_lock(sbi, group));
percpu_counter_sub(&sbi->s_freeblocks_counter, next - start);
+ free_blocks -= next - start;
/* Do not issue a TRIM on extents smaller than minblocks */
if ((next - start) < minblocks)
goto free_extent;
@@ -2040,7 +2041,7 @@ free_extent:
cond_resched();
/* No more suitable extents */
- if ((free_blocks - count) < minblocks)
+ if (free_blocks < minblocks)
break;
}
@@ -2090,7 +2091,8 @@ int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range)
ext3_fsblk_t max_blks = le32_to_cpu(es->s_blocks_count);
int ret = 0;
- start = range->start >> sb->s_blocksize_bits;
+ start = (range->start >> sb->s_blocksize_bits) +
+ le32_to_cpu(es->s_first_data_block);
len = range->len >> sb->s_blocksize_bits;
minlen = range->minlen >> sb->s_blocksize_bits;
trimmed = 0;
@@ -2099,10 +2101,6 @@ int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range)
return -EINVAL;
if (start >= max_blks)
goto out;
- if (start < le32_to_cpu(es->s_first_data_block)) {
- len -= le32_to_cpu(es->s_first_data_block) - start;
- start = le32_to_cpu(es->s_first_data_block);
- }
if (start + len > max_blks)
len = max_blks - start;
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index 9724aef22460..bfc2dc43681d 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -404,7 +404,8 @@ static int find_group_other(struct super_block *sb, struct inode *parent)
* For other inodes, search forward from the parent directory's block
* group to find a free inode.
*/
-struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, int mode)
+struct inode *ext3_new_inode(handle_t *handle, struct inode * dir,
+ const struct qstr *qstr, int mode)
{
struct super_block *sb;
struct buffer_head *bitmap_bh = NULL;
@@ -589,7 +590,7 @@ got:
if (err)
goto fail_free_drop;
- err = ext3_init_security(handle,inode, dir);
+ err = ext3_init_security(handle, inode, dir, qstr);
if (err)
goto fail_free_drop;
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index b27ba71810ec..9dba3bd69d9a 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -1710,7 +1710,7 @@ retry:
if (IS_DIRSYNC(dir))
handle->h_sync = 1;
- inode = ext3_new_inode (handle, dir, mode);
+ inode = ext3_new_inode (handle, dir, &dentry->d_name, mode);
err = PTR_ERR(inode);
if (!IS_ERR(inode)) {
inode->i_op = &ext3_file_inode_operations;
@@ -1746,7 +1746,7 @@ retry:
if (IS_DIRSYNC(dir))
handle->h_sync = 1;
- inode = ext3_new_inode (handle, dir, mode);
+ inode = ext3_new_inode (handle, dir, &dentry->d_name, mode);
err = PTR_ERR(inode);
if (!IS_ERR(inode)) {
init_special_inode(inode, inode->i_mode, rdev);
@@ -1784,7 +1784,7 @@ retry:
if (IS_DIRSYNC(dir))
handle->h_sync = 1;
- inode = ext3_new_inode (handle, dir, S_IFDIR | mode);
+ inode = ext3_new_inode (handle, dir, &dentry->d_name, S_IFDIR | mode);
err = PTR_ERR(inode);
if (IS_ERR(inode))
goto out_stop;
@@ -2206,7 +2206,7 @@ retry:
if (IS_DIRSYNC(dir))
handle->h_sync = 1;
- inode = ext3_new_inode (handle, dir, S_IFLNK|S_IRWXUGO);
+ inode = ext3_new_inode (handle, dir, &dentry->d_name, S_IFLNK|S_IRWXUGO);
err = PTR_ERR(inode);
if (IS_ERR(inode))
goto out_stop;
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 85c8cc8f2473..0d62f29f213a 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -1464,6 +1464,13 @@ static void ext3_orphan_cleanup (struct super_block * sb,
return;
}
+ /* Check if feature set allows readwrite operations */
+ if (EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP)) {
+ ext3_msg(sb, KERN_INFO, "Skipping orphan cleanup due to "
+ "unknown ROCOMPAT features");
+ return;
+ }
+
if (EXT3_SB(sb)->s_mount_state & EXT3_ERROR_FS) {
if (es->s_last_orphan)
jbd_debug(1, "Errors on filesystem, "
diff --git a/fs/ext3/xattr.h b/fs/ext3/xattr.h
index 377fe7201169..2be4f69bfa64 100644
--- a/fs/ext3/xattr.h
+++ b/fs/ext3/xattr.h
@@ -128,10 +128,10 @@ exit_ext3_xattr(void)
#ifdef CONFIG_EXT3_FS_SECURITY
extern int ext3_init_security(handle_t *handle, struct inode *inode,
- struct inode *dir);
+ struct inode *dir, const struct qstr *qstr);
#else
static inline int ext3_init_security(handle_t *handle, struct inode *inode,
- struct inode *dir)
+ struct inode *dir, const struct qstr *qstr)
{
return 0;
}
diff --git a/fs/ext3/xattr_security.c b/fs/ext3/xattr_security.c
index 03a99bfc59f9..b8d9f83aa5c5 100644
--- a/fs/ext3/xattr_security.c
+++ b/fs/ext3/xattr_security.c
@@ -49,14 +49,15 @@ ext3_xattr_security_set(struct dentry *dentry, const char *name,
}
int
-ext3_init_security(handle_t *handle, struct inode *inode, struct inode *dir)
+ext3_init_security(handle_t *handle, struct inode *inode, struct inode *dir,
+ const struct qstr *qstr)
{
int err;
size_t len;
void *value;
char *name;
- err = security_inode_init_security(inode, dir, &name, &value, &len);
+ err = security_inode_init_security(inode, dir, qstr, &name, &value, &len);
if (err) {
if (err == -EOPNOTSUPP)
return 0;
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index ccce8a7e94ed..9375e7c2790f 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -131,7 +131,7 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
* fragmenting the file system's free space. Maybe we
* should have some hueristics or some way to allow
* userspace to pass a hint to file system,
- * especiially if the latter case turns out to be
+ * especially if the latter case turns out to be
* common.
*/
ex = path[depth].p_ext;
@@ -1034,7 +1034,7 @@ cleanup:
for (i = 0; i < depth; i++) {
if (!ablocks[i])
continue;
- ext4_free_blocks(handle, inode, 0, ablocks[i], 1,
+ ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
EXT4_FREE_BLOCKS_METADATA);
}
}
@@ -2059,7 +2059,7 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
if (err)
return err;
ext_debug("index is empty, remove it, free block %llu\n", leaf);
- ext4_free_blocks(handle, inode, 0, leaf, 1,
+ ext4_free_blocks(handle, inode, NULL, leaf, 1,
EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
return err;
}
@@ -2156,7 +2156,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
num = le32_to_cpu(ex->ee_block) + ee_len - from;
start = ext4_ext_pblock(ex) + ee_len - num;
ext_debug("free last %u blocks starting %llu\n", num, start);
- ext4_free_blocks(handle, inode, 0, start, num, flags);
+ ext4_free_blocks(handle, inode, NULL, start, num, flags);
} else if (from == le32_to_cpu(ex->ee_block)
&& to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
printk(KERN_INFO "strange request: removal %u-%u from %u:%u\n",
@@ -2844,7 +2844,7 @@ fix_extent_len:
* ext4_get_blocks_dio_write() when DIO to write
* to an uninitialized extent.
*
- * Writing to an uninitized extent may result in splitting the uninitialized
+ * Writing to an uninitialized extent may result in splitting the uninitialized
* extent into multiple /initialized uninitialized extents (up to three)
* There are three possibilities:
* a> There is no split required: Entire extent should be uninitialized
@@ -3485,7 +3485,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
/* not a good idea to call discard here directly,
* but otherwise we'd need to call it every free() */
ext4_discard_preallocations(inode);
- ext4_free_blocks(handle, inode, 0, ext4_ext_pblock(&newex),
+ ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex),
ext4_ext_get_actual_len(&newex), 0);
goto out2;
}
@@ -3775,6 +3775,7 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
}
return ret > 0 ? ret2 : ret;
}
+
/*
* Callback function called for each extent to gather FIEMAP information.
*/
@@ -3782,38 +3783,162 @@ static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
struct ext4_ext_cache *newex, struct ext4_extent *ex,
void *data)
{
- struct fiemap_extent_info *fieinfo = data;
- unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
__u64 logical;
__u64 physical;
__u64 length;
+ loff_t size;
__u32 flags = 0;
- int error;
+ int ret = 0;
+ struct fiemap_extent_info *fieinfo = data;
+ unsigned char blksize_bits;
- logical = (__u64)newex->ec_block << blksize_bits;
+ blksize_bits = inode->i_sb->s_blocksize_bits;
+ logical = (__u64)newex->ec_block << blksize_bits;
if (newex->ec_start == 0) {
- pgoff_t offset;
- struct page *page;
+ /*
+ * No extent in extent-tree contains block @newex->ec_start,
+ * then the block may stay in 1)a hole or 2)delayed-extent.
+ *
+ * Holes or delayed-extents are processed as follows.
+ * 1. lookup dirty pages with specified range in pagecache.
+ * If no page is got, then there is no delayed-extent and
+ * return with EXT_CONTINUE.
+ * 2. find the 1st mapped buffer,
+ * 3. check if the mapped buffer is both in the request range
+ * and a delayed buffer. If not, there is no delayed-extent,
+ * then return.
+ * 4. a delayed-extent is found, the extent will be collected.
+ */
+ ext4_lblk_t end = 0;
+ pgoff_t last_offset;
+ pgoff_t offset;
+ pgoff_t index;
+ struct page **pages = NULL;
struct buffer_head *bh = NULL;
+ struct buffer_head *head = NULL;
+ unsigned int nr_pages = PAGE_SIZE / sizeof(struct page *);
+
+ pages = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (pages == NULL)
+ return -ENOMEM;
offset = logical >> PAGE_SHIFT;
- page = find_get_page(inode->i_mapping, offset);
- if (!page || !page_has_buffers(page))
- return EXT_CONTINUE;
+repeat:
+ last_offset = offset;
+ head = NULL;
+ ret = find_get_pages_tag(inode->i_mapping, &offset,
+ PAGECACHE_TAG_DIRTY, nr_pages, pages);
+
+ if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
+ /* First time, try to find a mapped buffer. */
+ if (ret == 0) {
+out:
+ for (index = 0; index < ret; index++)
+ page_cache_release(pages[index]);
+ /* just a hole. */
+ kfree(pages);
+ return EXT_CONTINUE;
+ }
- bh = page_buffers(page);
+ /* Try to find the 1st mapped buffer. */
+ end = ((__u64)pages[0]->index << PAGE_SHIFT) >>
+ blksize_bits;
+ if (!page_has_buffers(pages[0]))
+ goto out;
+ head = page_buffers(pages[0]);
+ if (!head)
+ goto out;
- if (!bh)
- return EXT_CONTINUE;
+ bh = head;
+ do {
+ if (buffer_mapped(bh)) {
+ /* get the 1st mapped buffer. */
+ if (end > newex->ec_block +
+ newex->ec_len)
+ /* The buffer is out of
+ * the request range.
+ */
+ goto out;
+ goto found_mapped_buffer;
+ }
+ bh = bh->b_this_page;
+ end++;
+ } while (bh != head);
- if (buffer_delay(bh)) {
- flags |= FIEMAP_EXTENT_DELALLOC;
- page_cache_release(page);
+ /* No mapped buffer found. */
+ goto out;
} else {
- page_cache_release(page);
- return EXT_CONTINUE;
+ /*Find contiguous delayed buffers. */
+ if (ret > 0 && pages[0]->index == last_offset)
+ head = page_buffers(pages[0]);
+ bh = head;
+ }
+
+found_mapped_buffer:
+ if (bh != NULL && buffer_delay(bh)) {
+ /* 1st or contiguous delayed buffer found. */
+ if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
+ /*
+ * 1st delayed buffer found, record
+ * the start of extent.
+ */
+ flags |= FIEMAP_EXTENT_DELALLOC;
+ newex->ec_block = end;
+ logical = (__u64)end << blksize_bits;
+ }
+ /* Find contiguous delayed buffers. */
+ do {
+ if (!buffer_delay(bh))
+ goto found_delayed_extent;
+ bh = bh->b_this_page;
+ end++;
+ } while (bh != head);
+
+ for (index = 1; index < ret; index++) {
+ if (!page_has_buffers(pages[index])) {
+ bh = NULL;
+ break;
+ }
+ head = page_buffers(pages[index]);
+ if (!head) {
+ bh = NULL;
+ break;
+ }
+ if (pages[index]->index !=
+ pages[0]->index + index) {
+ /* Blocks are not contiguous. */
+ bh = NULL;
+ break;
+ }
+ bh = head;
+ do {
+ if (!buffer_delay(bh))
+ /* Delayed-extent ends. */
+ goto found_delayed_extent;
+ bh = bh->b_this_page;
+ end++;
+ } while (bh != head);
+ }
+ } else if (!(flags & FIEMAP_EXTENT_DELALLOC))
+ /* a hole found. */
+ goto out;
+
+found_delayed_extent:
+ newex->ec_len = min(end - newex->ec_block,
+ (ext4_lblk_t)EXT_INIT_MAX_LEN);
+ if (ret == nr_pages && bh != NULL &&
+ newex->ec_len < EXT_INIT_MAX_LEN &&
+ buffer_delay(bh)) {
+ /* Have not collected an extent and continue. */
+ for (index = 0; index < ret; index++)
+ page_cache_release(pages[index]);
+ goto repeat;
}
+
+ for (index = 0; index < ret; index++)
+ page_cache_release(pages[index]);
+ kfree(pages);
}
physical = (__u64)newex->ec_start << blksize_bits;
@@ -3822,32 +3947,16 @@ static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
if (ex && ext4_ext_is_uninitialized(ex))
flags |= FIEMAP_EXTENT_UNWRITTEN;
- /*
- * If this extent reaches EXT_MAX_BLOCK, it must be last.
- *
- * Or if ext4_ext_next_allocated_block is EXT_MAX_BLOCK,
- * this also indicates no more allocated blocks.
- *
- * XXX this might miss a single-block extent at EXT_MAX_BLOCK
- */
- if (ext4_ext_next_allocated_block(path) == EXT_MAX_BLOCK ||
- newex->ec_block + newex->ec_len - 1 == EXT_MAX_BLOCK) {
- loff_t size = i_size_read(inode);
- loff_t bs = EXT4_BLOCK_SIZE(inode->i_sb);
-
+ size = i_size_read(inode);
+ if (logical + length >= size)
flags |= FIEMAP_EXTENT_LAST;
- if ((flags & FIEMAP_EXTENT_DELALLOC) &&
- logical+length > size)
- length = (size - logical + bs - 1) & ~(bs-1);
- }
- error = fiemap_fill_next_extent(fieinfo, logical, physical,
+ ret = fiemap_fill_next_extent(fieinfo, logical, physical,
length, flags);
- if (error < 0)
- return error;
- if (error == 1)
+ if (ret < 0)
+ return ret;
+ if (ret == 1)
return EXT_BREAK;
-
return EXT_CONTINUE;
}
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index eb9097aec6f0..f6610106fbbf 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -649,7 +649,7 @@ static int find_group_other(struct super_block *sb, struct inode *parent,
*group = parent_group + flex_size;
if (*group > ngroups)
*group = 0;
- return find_group_orlov(sb, parent, group, mode, 0);
+ return find_group_orlov(sb, parent, group, mode, NULL);
}
/*
@@ -1042,7 +1042,7 @@ got:
if (err)
goto fail_free_drop;
- err = ext4_init_security(handle, inode, dir);
+ err = ext4_init_security(handle, inode, dir, qstr);
if (err)
goto fail_free_drop;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 9f7f9e49914f..67e7a3caf9ed 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -173,7 +173,7 @@ int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
BUG_ON(EXT4_JOURNAL(inode) == NULL);
jbd_debug(2, "restarting handle %p\n", handle);
up_write(&EXT4_I(inode)->i_data_sem);
- ret = ext4_journal_restart(handle, blocks_for_truncate(inode));
+ ret = ext4_journal_restart(handle, nblocks);
down_write(&EXT4_I(inode)->i_data_sem);
ext4_discard_preallocations(inode);
@@ -720,7 +720,7 @@ allocated:
return ret;
failed_out:
for (i = 0; i < index; i++)
- ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0);
+ ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0);
return ret;
}
@@ -823,20 +823,20 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
return err;
failed:
/* Allocation failed, free what we already allocated */
- ext4_free_blocks(handle, inode, 0, new_blocks[0], 1, 0);
+ ext4_free_blocks(handle, inode, NULL, new_blocks[0], 1, 0);
for (i = 1; i <= n ; i++) {
/*
* branch[i].bh is newly allocated, so there is no
* need to revoke the block, which is why we don't
* need to set EXT4_FREE_BLOCKS_METADATA.
*/
- ext4_free_blocks(handle, inode, 0, new_blocks[i], 1,
+ ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1,
EXT4_FREE_BLOCKS_FORGET);
}
for (i = n+1; i < indirect_blks; i++)
- ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0);
+ ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0);
- ext4_free_blocks(handle, inode, 0, new_blocks[i], num, 0);
+ ext4_free_blocks(handle, inode, NULL, new_blocks[i], num, 0);
return err;
}
@@ -924,7 +924,7 @@ err_out:
ext4_free_blocks(handle, inode, where[i].bh, 0, 1,
EXT4_FREE_BLOCKS_FORGET);
}
- ext4_free_blocks(handle, inode, 0, le32_to_cpu(where[num].key),
+ ext4_free_blocks(handle, inode, NULL, le32_to_cpu(where[num].key),
blks, 0);
return err;
@@ -2060,7 +2060,7 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
if (nr_pages == 0)
break;
for (i = 0; i < nr_pages; i++) {
- int commit_write = 0, redirty_page = 0;
+ int commit_write = 0, skip_page = 0;
struct page *page = pvec.pages[i];
index = page->index;
@@ -2086,14 +2086,12 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
* If the page does not have buffers (for
* whatever reason), try to create them using
* __block_write_begin. If this fails,
- * redirty the page and move on.
+ * skip the page and move on.
*/
if (!page_has_buffers(page)) {
if (__block_write_begin(page, 0, len,
noalloc_get_block_write)) {
- redirty_page:
- redirty_page_for_writepage(mpd->wbc,
- page);
+ skip_page:
unlock_page(page);
continue;
}
@@ -2104,7 +2102,7 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
block_start = 0;
do {
if (!bh)
- goto redirty_page;
+ goto skip_page;
if (map && (cur_logical >= map->m_lblk) &&
(cur_logical <= (map->m_lblk +
(map->m_len - 1)))) {
@@ -2120,22 +2118,23 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
clear_buffer_unwritten(bh);
}
- /* redirty page if block allocation undone */
+ /* skip page if block allocation undone */
if (buffer_delay(bh) || buffer_unwritten(bh))
- redirty_page = 1;
+ skip_page = 1;
bh = bh->b_this_page;
block_start += bh->b_size;
cur_logical++;
pblock++;
} while (bh != page_bufs);
- if (redirty_page)
- goto redirty_page;
+ if (skip_page)
+ goto skip_page;
if (commit_write)
/* mark the buffer_heads as dirty & uptodate */
block_commit_write(page, 0, len);
+ clear_page_dirty_for_io(page);
/*
* Delalloc doesn't support data journalling,
* but eventually maybe we'll lift this
@@ -2165,8 +2164,7 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
return ret;
}
-static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
- sector_t logical, long blk_cnt)
+static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd)
{
int nr_pages, i;
pgoff_t index, end;
@@ -2174,9 +2172,8 @@ static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
struct inode *inode = mpd->inode;
struct address_space *mapping = inode->i_mapping;
- index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
- end = (logical + blk_cnt - 1) >>
- (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ index = mpd->first_page;
+ end = mpd->next_page - 1;
while (index <= end) {
nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
if (nr_pages == 0)
@@ -2279,9 +2276,8 @@ static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
err = blks;
/*
* If get block returns EAGAIN or ENOSPC and there
- * appears to be free blocks we will call
- * ext4_writepage() for all of the pages which will
- * just redirty the pages.
+ * appears to be free blocks we will just let
+ * mpage_da_submit_io() unlock all of the pages.
*/
if (err == -EAGAIN)
goto submit_io;
@@ -2312,8 +2308,10 @@ static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
ext4_print_free_blocks(mpd->inode);
}
/* invalidate all the pages */
- ext4_da_block_invalidatepages(mpd, next,
- mpd->b_size >> mpd->inode->i_blkbits);
+ ext4_da_block_invalidatepages(mpd);
+
+ /* Mark this page range as having been completed */
+ mpd->io_done = 1;
return;
}
BUG_ON(blks == 0);
@@ -2438,102 +2436,6 @@ static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
}
/*
- * __mpage_da_writepage - finds extent of pages and blocks
- *
- * @page: page to consider
- * @wbc: not used, we just follow rules
- * @data: context
- *
- * The function finds extents of pages and scan them for all blocks.
- */
-static int __mpage_da_writepage(struct page *page,
- struct writeback_control *wbc,
- struct mpage_da_data *mpd)
-{
- struct inode *inode = mpd->inode;
- struct buffer_head *bh, *head;
- sector_t logical;
-
- /*
- * Can we merge this page to current extent?
- */
- if (mpd->next_page != page->index) {
- /*
- * Nope, we can't. So, we map non-allocated blocks
- * and start IO on them
- */
- if (mpd->next_page != mpd->first_page) {
- mpage_da_map_and_submit(mpd);
- /*
- * skip rest of the page in the page_vec
- */
- redirty_page_for_writepage(wbc, page);
- unlock_page(page);
- return MPAGE_DA_EXTENT_TAIL;
- }
-
- /*
- * Start next extent of pages ...
- */
- mpd->first_page = page->index;
-
- /*
- * ... and blocks
- */
- mpd->b_size = 0;
- mpd->b_state = 0;
- mpd->b_blocknr = 0;
- }
-
- mpd->next_page = page->index + 1;
- logical = (sector_t) page->index <<
- (PAGE_CACHE_SHIFT - inode->i_blkbits);
-
- if (!page_has_buffers(page)) {
- mpage_add_bh_to_extent(mpd, logical, PAGE_CACHE_SIZE,
- (1 << BH_Dirty) | (1 << BH_Uptodate));
- if (mpd->io_done)
- return MPAGE_DA_EXTENT_TAIL;
- } else {
- /*
- * Page with regular buffer heads, just add all dirty ones
- */
- head = page_buffers(page);
- bh = head;
- do {
- BUG_ON(buffer_locked(bh));
- /*
- * We need to try to allocate
- * unmapped blocks in the same page.
- * Otherwise we won't make progress
- * with the page in ext4_writepage
- */
- if (ext4_bh_delay_or_unwritten(NULL, bh)) {
- mpage_add_bh_to_extent(mpd, logical,
- bh->b_size,
- bh->b_state);
- if (mpd->io_done)
- return MPAGE_DA_EXTENT_TAIL;
- } else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
- /*
- * mapped dirty buffer. We need to update
- * the b_state because we look at
- * b_state in mpage_da_map_blocks. We don't
- * update b_size because if we find an
- * unmapped buffer_head later we need to
- * use the b_state flag of that buffer_head.
- */
- if (mpd->b_size == 0)
- mpd->b_state = bh->b_state & BH_FLAGS;
- }
- logical++;
- } while ((bh = bh->b_this_page) != head);
- }
-
- return 0;
-}
-
-/*
* This is a special get_blocks_t callback which is used by
* ext4_da_write_begin(). It will either return mapped block or
* reserve space for a single block.
@@ -2811,27 +2713,27 @@ static int ext4_da_writepages_trans_blocks(struct inode *inode)
/*
* write_cache_pages_da - walk the list of dirty pages of the given
- * address space and call the callback function (which usually writes
- * the pages).
- *
- * This is a forked version of write_cache_pages(). Differences:
- * Range cyclic is ignored.
- * no_nrwrite_index_update is always presumed true
+ * address space and accumulate pages that need writing, and call
+ * mpage_da_map_and_submit to map a single contiguous memory region
+ * and then write them.
*/
static int write_cache_pages_da(struct address_space *mapping,
struct writeback_control *wbc,
struct mpage_da_data *mpd,
pgoff_t *done_index)
{
- int ret = 0;
- int done = 0;
- struct pagevec pvec;
- unsigned nr_pages;
- pgoff_t index;
- pgoff_t end; /* Inclusive */
- long nr_to_write = wbc->nr_to_write;
- int tag;
-
+ struct buffer_head *bh, *head;
+ struct inode *inode = mapping->host;
+ struct pagevec pvec;
+ unsigned int nr_pages;
+ sector_t logical;
+ pgoff_t index, end;
+ long nr_to_write = wbc->nr_to_write;
+ int i, tag, ret = 0;
+
+ memset(mpd, 0, sizeof(struct mpage_da_data));
+ mpd->wbc = wbc;
+ mpd->inode = inode;
pagevec_init(&pvec, 0);
index = wbc->range_start >> PAGE_CACHE_SHIFT;
end = wbc->range_end >> PAGE_CACHE_SHIFT;
@@ -2842,13 +2744,11 @@ static int write_cache_pages_da(struct address_space *mapping,
tag = PAGECACHE_TAG_DIRTY;
*done_index = index;
- while (!done && (index <= end)) {
- int i;
-
+ while (index <= end) {
nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
if (nr_pages == 0)
- break;
+ return 0;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
@@ -2860,60 +2760,100 @@ static int write_cache_pages_da(struct address_space *mapping,
* mapping. However, page->index will not change
* because we have a reference on the page.
*/
- if (page->index > end) {
- done = 1;
- break;
- }
+ if (page->index > end)
+ goto out;
*done_index = page->index + 1;
+ /*
+ * If we can't merge this page, and we have
+ * accumulated an contiguous region, write it
+ */
+ if ((mpd->next_page != page->index) &&
+ (mpd->next_page != mpd->first_page)) {
+ mpage_da_map_and_submit(mpd);
+ goto ret_extent_tail;
+ }
+
lock_page(page);
/*
- * Page truncated or invalidated. We can freely skip it
- * then, even for data integrity operations: the page
- * has disappeared concurrently, so there could be no
- * real expectation of this data interity operation
- * even if there is now a new, dirty page at the same
- * pagecache address.
+ * If the page is no longer dirty, or its
+ * mapping no longer corresponds to inode we
+ * are writing (which means it has been
+ * truncated or invalidated), or the page is
+ * already under writeback and we are not
+ * doing a data integrity writeback, skip the page
*/
- if (unlikely(page->mapping != mapping)) {
-continue_unlock:
+ if (!PageDirty(page) ||
+ (PageWriteback(page) &&
+ (wbc->sync_mode == WB_SYNC_NONE)) ||
+ unlikely(page->mapping != mapping)) {
unlock_page(page);
continue;
}
- if (!PageDirty(page)) {
- /* someone wrote it for us */
- goto continue_unlock;
- }
-
- if (PageWriteback(page)) {
- if (wbc->sync_mode != WB_SYNC_NONE)
- wait_on_page_writeback(page);
- else
- goto continue_unlock;
- }
+ if (PageWriteback(page))
+ wait_on_page_writeback(page);
BUG_ON(PageWriteback(page));
- if (!clear_page_dirty_for_io(page))
- goto continue_unlock;
- ret = __mpage_da_writepage(page, wbc, mpd);
- if (unlikely(ret)) {
- if (ret == AOP_WRITEPAGE_ACTIVATE) {
- unlock_page(page);
- ret = 0;
- } else {
- done = 1;
- break;
- }
+ if (mpd->next_page != page->index)
+ mpd->first_page = page->index;
+ mpd->next_page = page->index + 1;
+ logical = (sector_t) page->index <<
+ (PAGE_CACHE_SHIFT - inode->i_blkbits);
+
+ if (!page_has_buffers(page)) {
+ mpage_add_bh_to_extent(mpd, logical,
+ PAGE_CACHE_SIZE,
+ (1 << BH_Dirty) | (1 << BH_Uptodate));
+ if (mpd->io_done)
+ goto ret_extent_tail;
+ } else {
+ /*
+ * Page with regular buffer heads,
+ * just add all dirty ones
+ */
+ head = page_buffers(page);
+ bh = head;
+ do {
+ BUG_ON(buffer_locked(bh));
+ /*
+ * We need to try to allocate
+ * unmapped blocks in the same page.
+ * Otherwise we won't make progress
+ * with the page in ext4_writepage
+ */
+ if (ext4_bh_delay_or_unwritten(NULL, bh)) {
+ mpage_add_bh_to_extent(mpd, logical,
+ bh->b_size,
+ bh->b_state);
+ if (mpd->io_done)
+ goto ret_extent_tail;
+ } else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
+ /*
+ * mapped dirty buffer. We need
+ * to update the b_state
+ * because we look at b_state
+ * in mpage_da_map_blocks. We
+ * don't update b_size because
+ * if we find an unmapped
+ * buffer_head later we need to
+ * use the b_state flag of that
+ * buffer_head.
+ */
+ if (mpd->b_size == 0)
+ mpd->b_state = bh->b_state & BH_FLAGS;
+ }
+ logical++;
+ } while ((bh = bh->b_this_page) != head);
}
if (nr_to_write > 0) {
nr_to_write--;
if (nr_to_write == 0 &&
- wbc->sync_mode == WB_SYNC_NONE) {
+ wbc->sync_mode == WB_SYNC_NONE)
/*
* We stop writing back only if we are
* not doing integrity sync. In case of
@@ -2924,14 +2864,18 @@ continue_unlock:
* pages, but have not synced all of the
* old dirty pages.
*/
- done = 1;
- break;
- }
+ goto out;
}
}
pagevec_release(&pvec);
cond_resched();
}
+ return 0;
+ret_extent_tail:
+ ret = MPAGE_DA_EXTENT_TAIL;
+out:
+ pagevec_release(&pvec);
+ cond_resched();
return ret;
}
@@ -2945,7 +2889,6 @@ static int ext4_da_writepages(struct address_space *mapping,
struct mpage_da_data mpd;
struct inode *inode = mapping->host;
int pages_written = 0;
- long pages_skipped;
unsigned int max_pages;
int range_cyclic, cycled = 1, io_done = 0;
int needed_blocks, ret = 0;
@@ -3028,11 +2971,6 @@ static int ext4_da_writepages(struct address_space *mapping,
wbc->nr_to_write = desired_nr_to_write;
}
- mpd.wbc = wbc;
- mpd.inode = mapping->host;
-
- pages_skipped = wbc->pages_skipped;
-
retry:
if (wbc->sync_mode == WB_SYNC_ALL)
tag_pages_for_writeback(mapping, index, end);
@@ -3059,22 +2997,10 @@ retry:
}
/*
- * Now call __mpage_da_writepage to find the next
+ * Now call write_cache_pages_da() to find the next
* contiguous region of logical blocks that need
- * blocks to be allocated by ext4. We don't actually
- * submit the blocks for I/O here, even though
- * write_cache_pages thinks it will, and will set the
- * pages as clean for write before calling
- * __mpage_da_writepage().
+ * blocks to be allocated by ext4 and submit them.
*/
- mpd.b_size = 0;
- mpd.b_state = 0;
- mpd.b_blocknr = 0;
- mpd.first_page = 0;
- mpd.next_page = 0;
- mpd.io_done = 0;
- mpd.pages_written = 0;
- mpd.retval = 0;
ret = write_cache_pages_da(mapping, wbc, &mpd, &done_index);
/*
* If we have a contiguous extent of pages and we
@@ -3096,7 +3022,6 @@ retry:
* and try again
*/
jbd2_journal_force_commit_nested(sbi->s_journal);
- wbc->pages_skipped = pages_skipped;
ret = 0;
} else if (ret == MPAGE_DA_EXTENT_TAIL) {
/*
@@ -3104,7 +3029,6 @@ retry:
* rest of the pages
*/
pages_written += mpd.pages_written;
- wbc->pages_skipped = pages_skipped;
ret = 0;
io_done = 1;
} else if (wbc->nr_to_write)
@@ -3122,11 +3046,6 @@ retry:
wbc->range_end = mapping->writeback_index - 1;
goto retry;
}
- if (pages_skipped != wbc->pages_skipped)
- ext4_msg(inode->i_sb, KERN_CRIT,
- "This should not happen leaving %s "
- "with nr_to_write = %ld ret = %d",
- __func__, wbc->nr_to_write, ret);
/* Update index */
wbc->range_cyclic = range_cyclic;
@@ -4228,7 +4147,7 @@ static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
for (p = first; p < last; p++)
*p = 0;
- ext4_free_blocks(handle, inode, 0, block_to_free, count, flags);
+ ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags);
return 0;
}
@@ -4416,7 +4335,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
* transaction where the data blocks are
* actually freed.
*/
- ext4_free_blocks(handle, inode, 0, nr, 1,
+ ext4_free_blocks(handle, inode, NULL, nr, 1,
EXT4_FREE_BLOCKS_METADATA|
EXT4_FREE_BLOCKS_FORGET);
@@ -4875,7 +4794,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
return inode;
ei = EXT4_I(inode);
- iloc.bh = 0;
+ iloc.bh = NULL;
ret = __ext4_get_inode_loc(inode, &iloc, 0);
if (ret < 0)
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index eb3bc2fe647e..c052c9f0f3a1 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -334,16 +334,22 @@ mext_out:
case FITRIM:
{
struct super_block *sb = inode->i_sb;
+ struct request_queue *q = bdev_get_queue(sb->s_bdev);
struct fstrim_range range;
int ret = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ if (!blk_queue_discard(q))
+ return -EOPNOTSUPP;
+
if (copy_from_user(&range, (struct fstrim_range *)arg,
sizeof(range)))
return -EFAULT;
+ range.minlen = max((unsigned int)range.minlen,
+ q->limits.discard_granularity);
ret = ext4_trim_fs(sb, &range);
if (ret < 0)
return ret;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index d1fe09aea73d..2f6f0dd08fca 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -432,9 +432,10 @@ static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
}
/* at order 0 we see each particular block */
- *max = 1 << (e4b->bd_blkbits + 3);
- if (order == 0)
+ if (order == 0) {
+ *max = 1 << (e4b->bd_blkbits + 3);
return EXT4_MB_BITMAP(e4b);
+ }
bb = EXT4_MB_BUDDY(e4b) + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
*max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
@@ -616,7 +617,6 @@ static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
grp = ext4_get_group_info(sb, e4b->bd_group);
- buddy = mb_find_buddy(e4b, 0, &max);
list_for_each(cur, &grp->bb_prealloc_list) {
ext4_group_t groupnr;
struct ext4_prealloc_space *pa;
@@ -635,7 +635,12 @@ static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
#define mb_check_buddy(e4b)
#endif
-/* FIXME!! need more doc */
+/*
+ * Divide blocks started from @first with length @len into
+ * smaller chunks with power of 2 blocks.
+ * Clear the bits in bitmap which the blocks of the chunk(s) covered,
+ * then increase bb_counters[] for corresponded chunk size.
+ */
static void ext4_mb_mark_free_simple(struct super_block *sb,
void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
struct ext4_group_info *grp)
@@ -3208,7 +3213,7 @@ ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
cur_distance = abs(goal_block - cpa->pa_pstart);
new_distance = abs(goal_block - pa->pa_pstart);
- if (cur_distance < new_distance)
+ if (cur_distance <= new_distance)
return cpa;
/* drop the previous reference */
@@ -3907,7 +3912,8 @@ static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
struct super_block *sb = ac->ac_sb;
ext4_group_t ngroups, i;
- if (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)
+ if (!mb_enable_debug ||
+ (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED))
return;
printk(KERN_ERR "EXT4-fs: Can't allocate:"
@@ -4753,7 +4759,8 @@ static int ext4_trim_extent(struct super_block *sb, int start, int count,
* bitmap. Then issue a TRIM command on this extent and free the extent in
* the group buddy bitmap. This is done until whole group is scanned.
*/
-ext4_grpblk_t ext4_trim_all_free(struct super_block *sb, struct ext4_buddy *e4b,
+static ext4_grpblk_t
+ext4_trim_all_free(struct super_block *sb, struct ext4_buddy *e4b,
ext4_grpblk_t start, ext4_grpblk_t max, ext4_grpblk_t minblocks)
{
void *bitmap;
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
index b619322c76f0..22bd4d7f289b 100644
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -169,7 +169,7 @@ struct ext4_allocation_context {
/* original request */
struct ext4_free_extent ac_o_ex;
- /* goal request (after normalization) */
+ /* goal request (normalized ac_o_ex) */
struct ext4_free_extent ac_g_ex;
/* the best found extent */
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index b0a126f23c20..d1bafa57f483 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -263,7 +263,7 @@ static int free_dind_blocks(handle_t *handle,
for (i = 0; i < max_entries; i++) {
if (tmp_idata[i]) {
extend_credit_for_blkdel(handle, inode);
- ext4_free_blocks(handle, inode, 0,
+ ext4_free_blocks(handle, inode, NULL,
le32_to_cpu(tmp_idata[i]), 1,
EXT4_FREE_BLOCKS_METADATA |
EXT4_FREE_BLOCKS_FORGET);
@@ -271,7 +271,7 @@ static int free_dind_blocks(handle_t *handle,
}
put_bh(bh);
extend_credit_for_blkdel(handle, inode);
- ext4_free_blocks(handle, inode, 0, le32_to_cpu(i_data), 1,
+ ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
EXT4_FREE_BLOCKS_METADATA |
EXT4_FREE_BLOCKS_FORGET);
return 0;
@@ -302,7 +302,7 @@ static int free_tind_blocks(handle_t *handle,
}
put_bh(bh);
extend_credit_for_blkdel(handle, inode);
- ext4_free_blocks(handle, inode, 0, le32_to_cpu(i_data), 1,
+ ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
EXT4_FREE_BLOCKS_METADATA |
EXT4_FREE_BLOCKS_FORGET);
return 0;
@@ -315,7 +315,7 @@ static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data)
/* ei->i_data[EXT4_IND_BLOCK] */
if (i_data[0]) {
extend_credit_for_blkdel(handle, inode);
- ext4_free_blocks(handle, inode, 0,
+ ext4_free_blocks(handle, inode, NULL,
le32_to_cpu(i_data[0]), 1,
EXT4_FREE_BLOCKS_METADATA |
EXT4_FREE_BLOCKS_FORGET);
@@ -428,7 +428,7 @@ static int free_ext_idx(handle_t *handle, struct inode *inode,
}
put_bh(bh);
extend_credit_for_blkdel(handle, inode);
- ext4_free_blocks(handle, inode, 0, block, 1,
+ ext4_free_blocks(handle, inode, NULL, block, 1,
EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
return retval;
}
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 955cc309142f..0cfd03e19d7d 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -259,6 +259,11 @@ static void ext4_end_bio(struct bio *bio, int error)
bi_sector >> (inode->i_blkbits - 9));
}
+ if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
+ ext4_free_io_end(io_end);
+ return;
+ }
+
/* Add the io_end to per-inode completed io list*/
spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list);
@@ -279,9 +284,9 @@ void ext4_io_submit(struct ext4_io_submit *io)
BUG_ON(bio_flagged(io->io_bio, BIO_EOPNOTSUPP));
bio_put(io->io_bio);
}
- io->io_bio = 0;
+ io->io_bio = NULL;
io->io_op = 0;
- io->io_end = 0;
+ io->io_end = NULL;
}
static int io_submit_init(struct ext4_io_submit *io,
@@ -381,8 +386,6 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
BUG_ON(!PageLocked(page));
BUG_ON(PageWriteback(page));
- set_page_writeback(page);
- ClearPageError(page);
io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS);
if (!io_page) {
@@ -393,6 +396,8 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
io_page->p_page = page;
atomic_set(&io_page->p_count, 1);
get_page(page);
+ set_page_writeback(page);
+ ClearPageError(page);
for (bh = head = page_buffers(page), block_start = 0;
bh != head || !block_start;
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 3ecc6e45d2f9..66fec4ee76fa 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -230,7 +230,7 @@ static int setup_new_group_blocks(struct super_block *sb,
}
/* Zero out all of the reserved backup group descriptor table blocks */
- ext4_debug("clear inode table blocks %#04llx -> %#04llx\n",
+ ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
block, sbi->s_itb_per_group);
err = sb_issue_zeroout(sb, gdblocks + start + 1, reserved_gdb,
GFP_NOFS);
@@ -248,7 +248,7 @@ static int setup_new_group_blocks(struct super_block *sb,
/* Zero out all of the inode table blocks */
block = input->inode_table;
- ext4_debug("clear inode table blocks %#04llx -> %#04llx\n",
+ ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
block, sbi->s_itb_per_group);
err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group, GFP_NOFS);
if (err)
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index f6a318f836b2..4804762d1527 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -54,9 +54,9 @@
static struct proc_dir_entry *ext4_proc_root;
static struct kset *ext4_kset;
-struct ext4_lazy_init *ext4_li_info;
-struct mutex ext4_li_mtx;
-struct ext4_features *ext4_feat;
+static struct ext4_lazy_init *ext4_li_info;
+static struct mutex ext4_li_mtx;
+static struct ext4_features *ext4_feat;
static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
unsigned long journal_devnum);
@@ -75,6 +75,7 @@ static void ext4_write_super(struct super_block *sb);
static int ext4_freeze(struct super_block *sb);
static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data);
+static int ext4_feature_set_ok(struct super_block *sb, int readonly);
static void ext4_destroy_lazyinit_thread(void);
static void ext4_unregister_li_request(struct super_block *sb);
static void ext4_clear_request_list(void);
@@ -997,13 +998,10 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
if (test_opt(sb, OLDALLOC))
seq_puts(seq, ",oldalloc");
#ifdef CONFIG_EXT4_FS_XATTR
- if (test_opt(sb, XATTR_USER) &&
- !(def_mount_opts & EXT4_DEFM_XATTR_USER))
+ if (test_opt(sb, XATTR_USER))
seq_puts(seq, ",user_xattr");
- if (!test_opt(sb, XATTR_USER) &&
- (def_mount_opts & EXT4_DEFM_XATTR_USER)) {
+ if (!test_opt(sb, XATTR_USER))
seq_puts(seq, ",nouser_xattr");
- }
#endif
#ifdef CONFIG_EXT4_FS_POSIX_ACL
if (test_opt(sb, POSIX_ACL) && !(def_mount_opts & EXT4_DEFM_ACL))
@@ -1041,8 +1039,8 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
!(def_mount_opts & EXT4_DEFM_NODELALLOC))
seq_puts(seq, ",nodelalloc");
- if (test_opt(sb, MBLK_IO_SUBMIT))
- seq_puts(seq, ",mblk_io_submit");
+ if (!test_opt(sb, MBLK_IO_SUBMIT))
+ seq_puts(seq, ",nomblk_io_submit");
if (sbi->s_stripe)
seq_printf(seq, ",stripe=%lu", sbi->s_stripe);
/*
@@ -1451,7 +1449,7 @@ static int parse_options(char *options, struct super_block *sb,
* Initialize args struct so we know whether arg was
* found; some options take optional arguments.
*/
- args[0].to = args[0].from = 0;
+ args[0].to = args[0].from = NULL;
token = match_token(p, tokens, args);
switch (token) {
case Opt_bsd_df:
@@ -1771,7 +1769,7 @@ set_qf_format:
return 0;
if (option < 0 || option > (1 << 30))
return 0;
- if (!is_power_of_2(option)) {
+ if (option && !is_power_of_2(option)) {
ext4_msg(sb, KERN_ERR,
"EXT4-fs: inode_readahead_blks"
" must be a power of 2");
@@ -2120,6 +2118,13 @@ static void ext4_orphan_cleanup(struct super_block *sb,
return;
}
+ /* Check if feature set would not allow a r/w mount */
+ if (!ext4_feature_set_ok(sb, 0)) {
+ ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to "
+ "unknown ROCOMPAT features");
+ return;
+ }
+
if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
if (es->s_last_orphan)
jbd_debug(1, "Errors on filesystem, "
@@ -2412,7 +2417,7 @@ static ssize_t inode_readahead_blks_store(struct ext4_attr *a,
if (parse_strtoul(buf, 0x40000000, &t))
return -EINVAL;
- if (!is_power_of_2(t))
+ if (t && !is_power_of_2(t))
return -EINVAL;
sbi->s_inode_readahead_blks = t;
@@ -3095,14 +3100,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
}
if (def_mount_opts & EXT4_DEFM_UID16)
set_opt(sb, NO_UID32);
+ /* xattr user namespace & acls are now defaulted on */
#ifdef CONFIG_EXT4_FS_XATTR
- if (def_mount_opts & EXT4_DEFM_XATTR_USER)
- set_opt(sb, XATTR_USER);
+ set_opt(sb, XATTR_USER);
#endif
#ifdef CONFIG_EXT4_FS_POSIX_ACL
- if (def_mount_opts & EXT4_DEFM_ACL)
- set_opt(sb, POSIX_ACL);
+ set_opt(sb, POSIX_ACL);
#endif
+ set_opt(sb, MBLK_IO_SUBMIT);
if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
set_opt(sb, JOURNAL_DATA);
else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
@@ -3509,7 +3514,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
percpu_counter_set(&sbi->s_dirtyblocks_counter, 0);
no_journal:
- EXT4_SB(sb)->dio_unwritten_wq = create_workqueue("ext4-dio-unwritten");
+ /*
+ * The maximum number of concurrent works can be high and
+ * concurrency isn't really necessary. Limit it to 1.
+ */
+ EXT4_SB(sb)->dio_unwritten_wq =
+ alloc_workqueue("ext4-dio-unwritten", WQ_MEM_RECLAIM, 1);
if (!EXT4_SB(sb)->dio_unwritten_wq) {
printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n");
goto failed_mount_wq;
@@ -3524,17 +3534,16 @@ no_journal:
if (IS_ERR(root)) {
ext4_msg(sb, KERN_ERR, "get root inode failed");
ret = PTR_ERR(root);
+ root = NULL;
goto failed_mount4;
}
if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
- iput(root);
ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
goto failed_mount4;
}
sb->s_root = d_alloc_root(root);
if (!sb->s_root) {
ext4_msg(sb, KERN_ERR, "get root dentry failed");
- iput(root);
ret = -ENOMEM;
goto failed_mount4;
}
@@ -3650,6 +3659,8 @@ cantfind_ext4:
goto failed_mount;
failed_mount4:
+ iput(root);
+ sb->s_root = NULL;
ext4_msg(sb, KERN_ERR, "mount failed");
destroy_workqueue(EXT4_SB(sb)->dio_unwritten_wq);
failed_mount_wq:
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index fc32176eee39..f4c03af05d69 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -833,7 +833,7 @@ inserted:
new_bh = sb_getblk(sb, block);
if (!new_bh) {
getblk_failed:
- ext4_free_blocks(handle, inode, 0, block, 1,
+ ext4_free_blocks(handle, inode, NULL, block, 1,
EXT4_FREE_BLOCKS_METADATA);
error = -EIO;
goto cleanup;
diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
index 1ef16520b950..25b7387ff183 100644
--- a/fs/ext4/xattr.h
+++ b/fs/ext4/xattr.h
@@ -145,10 +145,10 @@ ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
#ifdef CONFIG_EXT4_FS_SECURITY
extern int ext4_init_security(handle_t *handle, struct inode *inode,
- struct inode *dir);
+ struct inode *dir, const struct qstr *qstr);
#else
static inline int ext4_init_security(handle_t *handle, struct inode *inode,
- struct inode *dir)
+ struct inode *dir, const struct qstr *qstr)
{
return 0;
}
diff --git a/fs/ext4/xattr_security.c b/fs/ext4/xattr_security.c
index 9b21268e121c..007c3bfbf094 100644
--- a/fs/ext4/xattr_security.c
+++ b/fs/ext4/xattr_security.c
@@ -49,14 +49,15 @@ ext4_xattr_security_set(struct dentry *dentry, const char *name,
}
int
-ext4_init_security(handle_t *handle, struct inode *inode, struct inode *dir)
+ext4_init_security(handle_t *handle, struct inode *inode, struct inode *dir,
+ const struct qstr *qstr)
{
int err;
size_t len;
void *value;
char *name;
- err = security_inode_init_security(inode, dir, &name, &value, &len);
+ err = security_inode_init_security(inode, dir, qstr, &name, &value, &len);
if (err) {
if (err == -EOPNOTSUPP)
return 0;
diff --git a/fs/file_table.c b/fs/file_table.c
index eb36b6b17e26..cbeec70ee310 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -190,7 +190,8 @@ struct file *alloc_file(struct path *path, fmode_t mode,
file_take_write(file);
WARN_ON(mnt_clone_write(path->mnt));
}
- ima_counts_get(file);
+ if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
+ i_readcount_inc(path->dentry->d_inode);
return file;
}
EXPORT_SYMBOL(alloc_file);
@@ -251,6 +252,8 @@ static void __fput(struct file *file)
fops_put(file->f_op);
put_pid(file->f_owner.pid);
file_sb_list_del(file);
+ if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
+ i_readcount_dec(inode);
if (file->f_mode & FMODE_WRITE)
drop_file_write_access(file);
file->f_path.dentry = NULL;
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index 3e87cce5837d..b6cca47f7b07 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -305,7 +305,7 @@ static void cuse_gendev_release(struct device *dev)
static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
{
struct cuse_conn *cc = fc_to_cc(fc);
- struct cuse_init_out *arg = &req->misc.cuse_init_out;
+ struct cuse_init_out *arg = req->out.args[0].value;
struct page *page = req->pages[0];
struct cuse_devinfo devinfo = { };
struct device *dev;
@@ -384,6 +384,7 @@ static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
dev_set_uevent_suppress(dev, 0);
kobject_uevent(&dev->kobj, KOBJ_ADD);
out:
+ kfree(arg);
__free_page(page);
return;
@@ -405,6 +406,7 @@ static int cuse_send_init(struct cuse_conn *cc)
struct page *page;
struct fuse_conn *fc = &cc->fc;
struct cuse_init_in *arg;
+ void *outarg;
BUILD_BUG_ON(CUSE_INIT_INFO_MAX > PAGE_SIZE);
@@ -419,6 +421,10 @@ static int cuse_send_init(struct cuse_conn *cc)
if (!page)
goto err_put_req;
+ outarg = kzalloc(sizeof(struct cuse_init_out), GFP_KERNEL);
+ if (!outarg)
+ goto err_free_page;
+
arg = &req->misc.cuse_init_in;
arg->major = FUSE_KERNEL_VERSION;
arg->minor = FUSE_KERNEL_MINOR_VERSION;
@@ -429,7 +435,7 @@ static int cuse_send_init(struct cuse_conn *cc)
req->in.args[0].value = arg;
req->out.numargs = 2;
req->out.args[0].size = sizeof(struct cuse_init_out);
- req->out.args[0].value = &req->misc.cuse_init_out;
+ req->out.args[0].value = outarg;
req->out.args[1].size = CUSE_INIT_INFO_MAX;
req->out.argvar = 1;
req->out.argpages = 1;
@@ -440,6 +446,8 @@ static int cuse_send_init(struct cuse_conn *cc)
return 0;
+err_free_page:
+ __free_page(page);
err_put_req:
fuse_put_request(fc, req);
err:
@@ -458,7 +466,7 @@ static void cuse_fc_release(struct fuse_conn *fc)
* @file: file struct being opened
*
* Userland CUSE server can create a CUSE device by opening /dev/cuse
- * and replying to the initilaization request kernel sends. This
+ * and replying to the initialization request kernel sends. This
* function is responsible for handling CUSE device initialization.
* Because the fd opened by this function is used during
* initialization, this function only creates cuse_conn and sends
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index bfed8447ed80..83543b5ff941 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1283,8 +1283,11 @@ static int fuse_do_setattr(struct dentry *entry, struct iattr *attr,
if (err)
return err;
- if ((attr->ia_valid & ATTR_OPEN) && fc->atomic_o_trunc)
- return 0;
+ if (attr->ia_valid & ATTR_OPEN) {
+ if (fc->atomic_o_trunc)
+ return 0;
+ file = NULL;
+ }
if (attr->ia_valid & ATTR_SIZE)
is_truncate = true;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 95da1bc1c826..9e0832dbb1e3 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -86,18 +86,52 @@ struct fuse_file *fuse_file_get(struct fuse_file *ff)
return ff;
}
+static void fuse_release_async(struct work_struct *work)
+{
+ struct fuse_req *req;
+ struct fuse_conn *fc;
+ struct path path;
+
+ req = container_of(work, struct fuse_req, misc.release.work);
+ path = req->misc.release.path;
+ fc = get_fuse_conn(path.dentry->d_inode);
+
+ fuse_put_request(fc, req);
+ path_put(&path);
+}
+
static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
{
- path_put(&req->misc.release.path);
+ if (fc->destroy_req) {
+ /*
+ * If this is a fuseblk mount, then it's possible that
+ * releasing the path will result in releasing the
+ * super block and sending the DESTROY request. If
+ * the server is single threaded, this would hang.
+ * For this reason do the path_put() in a separate
+ * thread.
+ */
+ atomic_inc(&req->count);
+ INIT_WORK(&req->misc.release.work, fuse_release_async);
+ schedule_work(&req->misc.release.work);
+ } else {
+ path_put(&req->misc.release.path);
+ }
}
-static void fuse_file_put(struct fuse_file *ff)
+static void fuse_file_put(struct fuse_file *ff, bool sync)
{
if (atomic_dec_and_test(&ff->count)) {
struct fuse_req *req = ff->reserved_req;
- req->end = fuse_release_end;
- fuse_request_send_background(ff->fc, req);
+ if (sync) {
+ fuse_request_send(ff->fc, req);
+ path_put(&req->misc.release.path);
+ fuse_put_request(ff->fc, req);
+ } else {
+ req->end = fuse_release_end;
+ fuse_request_send_background(ff->fc, req);
+ }
kfree(ff);
}
}
@@ -219,8 +253,12 @@ void fuse_release_common(struct file *file, int opcode)
* Normally this will send the RELEASE request, however if
* some asynchronous READ or WRITE requests are outstanding,
* the sending will be delayed.
+ *
+ * Make the release synchronous if this is a fuseblk mount,
+ * synchronous RELEASE is allowed (and desirable) in this case
+ * because the server can be trusted not to screw up.
*/
- fuse_file_put(ff);
+ fuse_file_put(ff, ff->fc->destroy_req != NULL);
}
static int fuse_open(struct inode *inode, struct file *file)
@@ -558,7 +596,7 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
page_cache_release(page);
}
if (req->ff)
- fuse_file_put(req->ff);
+ fuse_file_put(req->ff, false);
}
static void fuse_send_readpages(struct fuse_req *req, struct file *file)
@@ -1137,7 +1175,7 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
{
__free_page(req->pages[0]);
- fuse_file_put(req->ff);
+ fuse_file_put(req->ff, false);
}
static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index ae5744a2f9e9..b788becada76 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -21,6 +21,7 @@
#include <linux/rwsem.h>
#include <linux/rbtree.h>
#include <linux/poll.h>
+#include <linux/workqueue.h>
/** Max number of pages that can be used in a single read request */
#define FUSE_MAX_PAGES_PER_REQ 32
@@ -262,13 +263,15 @@ struct fuse_req {
/** Data for asynchronous requests */
union {
struct {
- struct fuse_release_in in;
+ union {
+ struct fuse_release_in in;
+ struct work_struct work;
+ };
struct path path;
} release;
struct fuse_init_in init_in;
struct fuse_init_out init_out;
struct cuse_init_in cuse_init_in;
- struct cuse_init_out cuse_init_out;
struct {
struct fuse_read_in in;
u64 attr_ver;
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 7118f1a780a9..cbc07155b1a0 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -80,8 +80,11 @@ int gfs2_check_acl(struct inode *inode, int mask, unsigned int flags)
struct posix_acl *acl;
int error;
- if (flags & IPERM_FLAG_RCU)
- return -ECHILD;
+ if (flags & IPERM_FLAG_RCU) {
+ if (!negative_cached_acl(inode, ACL_TYPE_ACCESS))
+ return -ECHILD;
+ return -EAGAIN;
+ }
acl = gfs2_acl_get(GFS2_I(inode), ACL_TYPE_ACCESS);
if (IS_ERR(acl))
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 3c4039d5eef1..ef3dc4b9fae2 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -21,6 +21,7 @@
#include "meta_io.h"
#include "quota.h"
#include "rgrp.h"
+#include "super.h"
#include "trans.h"
#include "dir.h"
#include "util.h"
@@ -757,7 +758,7 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_rgrp_list rlist;
u64 bn, bstart;
- u32 blen;
+ u32 blen, btotal;
__be64 *p;
unsigned int rg_blocks = 0;
int metadata;
@@ -839,6 +840,7 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
bstart = 0;
blen = 0;
+ btotal = 0;
for (p = top; p < bottom; p++) {
if (!*p)
@@ -851,9 +853,11 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
else {
if (bstart) {
if (metadata)
- gfs2_free_meta(ip, bstart, blen);
+ __gfs2_free_meta(ip, bstart, blen);
else
- gfs2_free_data(ip, bstart, blen);
+ __gfs2_free_data(ip, bstart, blen);
+
+ btotal += blen;
}
bstart = bn;
@@ -865,11 +869,17 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
}
if (bstart) {
if (metadata)
- gfs2_free_meta(ip, bstart, blen);
+ __gfs2_free_meta(ip, bstart, blen);
else
- gfs2_free_data(ip, bstart, blen);
+ __gfs2_free_data(ip, bstart, blen);
+
+ btotal += blen;
}
+ gfs2_statfs_change(sdp, 0, +btotal, 0);
+ gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
+ ip->i_inode.i_gid);
+
ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
gfs2_dinode_out(ip, dibh->b_data);
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 7cfdcb913363..216ad2774a62 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -448,15 +448,20 @@ static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
{
struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
- if (!(file->f_flags & O_NOATIME)) {
+ if (!(file->f_flags & O_NOATIME) &&
+ !IS_NOATIME(&ip->i_inode)) {
struct gfs2_holder i_gh;
int error;
- gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
+ gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
error = gfs2_glock_nq(&i_gh);
- file_accessed(file);
- if (error == 0)
- gfs2_glock_dq_uninit(&i_gh);
+ if (error == 0) {
+ file_accessed(file);
+ gfs2_glock_dq(&i_gh);
+ }
+ gfs2_holder_uninit(&i_gh);
+ if (error)
+ return error;
}
vma->vm_ops = &gfs2_vm_ops;
vma->vm_flags |= VM_CAN_NONLINEAR;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 7cd9a5a68d59..bc36aef058ff 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -26,6 +26,9 @@
#include <linux/freezer.h>
#include <linux/workqueue.h>
#include <linux/jiffies.h>
+#include <linux/rcupdate.h>
+#include <linux/rculist_bl.h>
+#include <linux/bit_spinlock.h>
#include "gfs2.h"
#include "incore.h"
@@ -41,10 +44,6 @@
#define CREATE_TRACE_POINTS
#include "trace_gfs2.h"
-struct gfs2_gl_hash_bucket {
- struct hlist_head hb_list;
-};
-
struct gfs2_glock_iter {
int hash; /* hash bucket index */
struct gfs2_sbd *sdp; /* incore superblock */
@@ -54,7 +53,6 @@ struct gfs2_glock_iter {
typedef void (*glock_examiner) (struct gfs2_glock * gl);
-static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
@@ -70,57 +68,9 @@ static DEFINE_SPINLOCK(lru_lock);
#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
#define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
-static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
+static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE];
static struct dentry *gfs2_root;
-/*
- * Despite what you might think, the numbers below are not arbitrary :-)
- * They are taken from the ipv4 routing hash code, which is well tested
- * and thus should be nearly optimal. Later on we might tweek the numbers
- * but for now this should be fine.
- *
- * The reason for putting the locks in a separate array from the list heads
- * is that we can have fewer locks than list heads and save memory. We use
- * the same hash function for both, but with a different hash mask.
- */
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
- defined(CONFIG_PROVE_LOCKING)
-
-#ifdef CONFIG_LOCKDEP
-# define GL_HASH_LOCK_SZ 256
-#else
-# if NR_CPUS >= 32
-# define GL_HASH_LOCK_SZ 4096
-# elif NR_CPUS >= 16
-# define GL_HASH_LOCK_SZ 2048
-# elif NR_CPUS >= 8
-# define GL_HASH_LOCK_SZ 1024
-# elif NR_CPUS >= 4
-# define GL_HASH_LOCK_SZ 512
-# else
-# define GL_HASH_LOCK_SZ 256
-# endif
-#endif
-
-/* We never want more locks than chains */
-#if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
-# undef GL_HASH_LOCK_SZ
-# define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
-#endif
-
-static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
-
-static inline rwlock_t *gl_lock_addr(unsigned int x)
-{
- return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
-}
-#else /* not SMP, so no spinlocks required */
-static inline rwlock_t *gl_lock_addr(unsigned int x)
-{
- return NULL;
-}
-#endif
-
/**
* gl_hash() - Turn glock number into hash bucket number
* @lock: The glock number
@@ -141,25 +91,30 @@ static unsigned int gl_hash(const struct gfs2_sbd *sdp,
return h;
}
-/**
- * glock_free() - Perform a few checks and then release struct gfs2_glock
- * @gl: The glock to release
- *
- * Also calls lock module to release its internal structure for this glock.
- *
- */
+static inline void spin_lock_bucket(unsigned int hash)
+{
+ struct hlist_bl_head *bl = &gl_hash_table[hash];
+ bit_spin_lock(0, (unsigned long *)bl);
+}
+
+static inline void spin_unlock_bucket(unsigned int hash)
+{
+ struct hlist_bl_head *bl = &gl_hash_table[hash];
+ __bit_spin_unlock(0, (unsigned long *)bl);
+}
-static void glock_free(struct gfs2_glock *gl)
+void gfs2_glock_free(struct rcu_head *rcu)
{
+ struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
struct gfs2_sbd *sdp = gl->gl_sbd;
- struct address_space *mapping = gfs2_glock2aspace(gl);
- struct kmem_cache *cachep = gfs2_glock_cachep;
- GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
- trace_gfs2_glock_put(gl);
- if (mapping)
- cachep = gfs2_glock_aspace_cachep;
- sdp->sd_lockstruct.ls_ops->lm_put_lock(cachep, gl);
+ if (gl->gl_ops->go_flags & GLOF_ASPACE)
+ kmem_cache_free(gfs2_glock_aspace_cachep, gl);
+ else
+ kmem_cache_free(gfs2_glock_cachep, gl);
+
+ if (atomic_dec_and_test(&sdp->sd_glock_disposal))
+ wake_up(&sdp->sd_glock_wait);
}
/**
@@ -185,34 +140,49 @@ static int demote_ok(const struct gfs2_glock *gl)
{
const struct gfs2_glock_operations *glops = gl->gl_ops;
+ /* assert_spin_locked(&gl->gl_spin); */
+
if (gl->gl_state == LM_ST_UNLOCKED)
return 0;
- if (!list_empty(&gl->gl_holders))
+ if (test_bit(GLF_LFLUSH, &gl->gl_flags))
+ return 0;
+ if ((gl->gl_name.ln_type != LM_TYPE_INODE) &&
+ !list_empty(&gl->gl_holders))
return 0;
if (glops->go_demote_ok)
return glops->go_demote_ok(gl);
return 1;
}
+
/**
- * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
+ * __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
* @gl: the glock
*
+ * If the glock is demotable, then we add it (or move it) to the end
+ * of the glock LRU list.
*/
-static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
+static void __gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
{
- int may_reclaim;
- may_reclaim = (demote_ok(gl) &&
- (atomic_read(&gl->gl_ref) == 1 ||
- (gl->gl_name.ln_type == LM_TYPE_INODE &&
- atomic_read(&gl->gl_ref) <= 2)));
- spin_lock(&lru_lock);
- if (list_empty(&gl->gl_lru) && may_reclaim) {
+ if (demote_ok(gl)) {
+ spin_lock(&lru_lock);
+
+ if (!list_empty(&gl->gl_lru))
+ list_del_init(&gl->gl_lru);
+ else
+ atomic_inc(&lru_count);
+
list_add_tail(&gl->gl_lru, &lru_list);
- atomic_inc(&lru_count);
+ spin_unlock(&lru_lock);
}
- spin_unlock(&lru_lock);
+}
+
+void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
+{
+ spin_lock(&gl->gl_spin);
+ __gfs2_glock_schedule_for_reclaim(gl);
+ spin_unlock(&gl->gl_spin);
}
/**
@@ -227,7 +197,6 @@ void gfs2_glock_put_nolock(struct gfs2_glock *gl)
{
if (atomic_dec_and_test(&gl->gl_ref))
GLOCK_BUG_ON(gl, 1);
- gfs2_glock_schedule_for_reclaim(gl);
}
/**
@@ -236,30 +205,26 @@ void gfs2_glock_put_nolock(struct gfs2_glock *gl)
*
*/
-int gfs2_glock_put(struct gfs2_glock *gl)
+void gfs2_glock_put(struct gfs2_glock *gl)
{
- int rv = 0;
+ struct gfs2_sbd *sdp = gl->gl_sbd;
+ struct address_space *mapping = gfs2_glock2aspace(gl);
- write_lock(gl_lock_addr(gl->gl_hash));
- if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) {
- hlist_del(&gl->gl_list);
+ if (atomic_dec_and_test(&gl->gl_ref)) {
+ spin_lock_bucket(gl->gl_hash);
+ hlist_bl_del_rcu(&gl->gl_list);
+ spin_unlock_bucket(gl->gl_hash);
+ spin_lock(&lru_lock);
if (!list_empty(&gl->gl_lru)) {
list_del_init(&gl->gl_lru);
atomic_dec(&lru_count);
}
spin_unlock(&lru_lock);
- write_unlock(gl_lock_addr(gl->gl_hash));
GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
- glock_free(gl);
- rv = 1;
- goto out;
+ GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
+ trace_gfs2_glock_put(gl);
+ sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
}
- spin_lock(&gl->gl_spin);
- gfs2_glock_schedule_for_reclaim(gl);
- spin_unlock(&gl->gl_spin);
- write_unlock(gl_lock_addr(gl->gl_hash));
-out:
- return rv;
}
/**
@@ -275,17 +240,15 @@ static struct gfs2_glock *search_bucket(unsigned int hash,
const struct lm_lockname *name)
{
struct gfs2_glock *gl;
- struct hlist_node *h;
+ struct hlist_bl_node *h;
- hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
+ hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) {
if (!lm_name_equal(&gl->gl_name, name))
continue;
if (gl->gl_sbd != sdp)
continue;
-
- atomic_inc(&gl->gl_ref);
-
- return gl;
+ if (atomic_inc_not_zero(&gl->gl_ref))
+ return gl;
}
return NULL;
@@ -743,10 +706,11 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
struct gfs2_glock *gl, *tmp;
unsigned int hash = gl_hash(sdp, &name);
struct address_space *mapping;
+ struct kmem_cache *cachep;
- read_lock(gl_lock_addr(hash));
+ rcu_read_lock();
gl = search_bucket(hash, sdp, &name);
- read_unlock(gl_lock_addr(hash));
+ rcu_read_unlock();
*glp = gl;
if (gl)
@@ -755,9 +719,10 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
return -ENOENT;
if (glops->go_flags & GLOF_ASPACE)
- gl = kmem_cache_alloc(gfs2_glock_aspace_cachep, GFP_KERNEL);
+ cachep = gfs2_glock_aspace_cachep;
else
- gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
+ cachep = gfs2_glock_cachep;
+ gl = kmem_cache_alloc(cachep, GFP_KERNEL);
if (!gl)
return -ENOMEM;
@@ -790,15 +755,15 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
mapping->writeback_index = 0;
}
- write_lock(gl_lock_addr(hash));
+ spin_lock_bucket(hash);
tmp = search_bucket(hash, sdp, &name);
if (tmp) {
- write_unlock(gl_lock_addr(hash));
- glock_free(gl);
+ spin_unlock_bucket(hash);
+ kmem_cache_free(cachep, gl);
gl = tmp;
} else {
- hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
- write_unlock(gl_lock_addr(hash));
+ hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]);
+ spin_unlock_bucket(hash);
}
*glp = gl;
@@ -1007,13 +972,13 @@ fail:
insert_pt = &gh2->gh_list;
}
set_bit(GLF_QUEUED, &gl->gl_flags);
+ trace_gfs2_glock_queue(gh, 1);
if (likely(insert_pt == NULL)) {
list_add_tail(&gh->gh_list, &gl->gl_holders);
if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
goto do_cancel;
return;
}
- trace_gfs2_glock_queue(gh, 1);
list_add_tail(&gh->gh_list, insert_pt);
do_cancel:
gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
@@ -1113,6 +1078,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
!test_bit(GLF_DEMOTE, &gl->gl_flags))
fast_path = 1;
}
+ __gfs2_glock_schedule_for_reclaim(gl);
trace_gfs2_glock_queue(gh, 0);
spin_unlock(&gl->gl_spin);
if (likely(fast_path))
@@ -1440,42 +1406,30 @@ static struct shrinker glock_shrinker = {
* @sdp: the filesystem
* @bucket: the bucket
*
- * Returns: 1 if the bucket has entries
*/
-static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
+static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
unsigned int hash)
{
- struct gfs2_glock *gl, *prev = NULL;
- int has_entries = 0;
- struct hlist_head *head = &gl_hash_table[hash].hb_list;
+ struct gfs2_glock *gl;
+ struct hlist_bl_head *head = &gl_hash_table[hash];
+ struct hlist_bl_node *pos;
- read_lock(gl_lock_addr(hash));
- /* Can't use hlist_for_each_entry - don't want prefetch here */
- if (hlist_empty(head))
- goto out;
- gl = list_entry(head->first, struct gfs2_glock, gl_list);
- while(1) {
- if (!sdp || gl->gl_sbd == sdp) {
- gfs2_glock_hold(gl);
- read_unlock(gl_lock_addr(hash));
- if (prev)
- gfs2_glock_put(prev);
- prev = gl;
+ rcu_read_lock();
+ hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
+ if ((gl->gl_sbd == sdp) && atomic_read(&gl->gl_ref))
examiner(gl);
- has_entries = 1;
- read_lock(gl_lock_addr(hash));
- }
- if (gl->gl_list.next == NULL)
- break;
- gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
}
-out:
- read_unlock(gl_lock_addr(hash));
- if (prev)
- gfs2_glock_put(prev);
+ rcu_read_unlock();
cond_resched();
- return has_entries;
+}
+
+static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
+{
+ unsigned x;
+
+ for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
+ examine_bucket(examiner, sdp, x);
}
@@ -1529,10 +1483,21 @@ static void clear_glock(struct gfs2_glock *gl)
void gfs2_glock_thaw(struct gfs2_sbd *sdp)
{
- unsigned x;
+ glock_hash_walk(thaw_glock, sdp);
+}
- for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
- examine_bucket(thaw_glock, sdp, x);
+static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
+{
+ int ret;
+ spin_lock(&gl->gl_spin);
+ ret = __dump_glock(seq, gl);
+ spin_unlock(&gl->gl_spin);
+ return ret;
+}
+
+static void dump_glock_func(struct gfs2_glock *gl)
+{
+ dump_glock(NULL, gl);
}
/**
@@ -1545,13 +1510,10 @@ void gfs2_glock_thaw(struct gfs2_sbd *sdp)
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
{
- unsigned int x;
-
- for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
- examine_bucket(clear_glock, sdp, x);
+ glock_hash_walk(clear_glock, sdp);
flush_workqueue(glock_workqueue);
wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
- gfs2_dump_lockstate(sdp);
+ glock_hash_walk(dump_glock_func, sdp);
}
void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
@@ -1717,66 +1679,15 @@ out:
return error;
}
-static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
-{
- int ret;
- spin_lock(&gl->gl_spin);
- ret = __dump_glock(seq, gl);
- spin_unlock(&gl->gl_spin);
- return ret;
-}
-/**
- * gfs2_dump_lockstate - print out the current lockstate
- * @sdp: the filesystem
- * @ub: the buffer to copy the information into
- *
- * If @ub is NULL, dump the lockstate to the console.
- *
- */
-
-static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
-{
- struct gfs2_glock *gl;
- struct hlist_node *h;
- unsigned int x;
- int error = 0;
-
- for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
-
- read_lock(gl_lock_addr(x));
-
- hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
- if (gl->gl_sbd != sdp)
- continue;
-
- error = dump_glock(NULL, gl);
- if (error)
- break;
- }
-
- read_unlock(gl_lock_addr(x));
-
- if (error)
- break;
- }
-
-
- return error;
-}
int __init gfs2_glock_init(void)
{
unsigned i;
for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
- INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
- }
-#ifdef GL_HASH_LOCK_SZ
- for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
- rwlock_init(&gl_hash_locks[i]);
+ INIT_HLIST_BL_HEAD(&gl_hash_table[i]);
}
-#endif
glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
WQ_HIGHPRI | WQ_FREEZABLE, 0);
@@ -1802,62 +1713,54 @@ void gfs2_glock_exit(void)
destroy_workqueue(gfs2_delete_workqueue);
}
+static inline struct gfs2_glock *glock_hash_chain(unsigned hash)
+{
+ return hlist_bl_entry(hlist_bl_first_rcu(&gl_hash_table[hash]),
+ struct gfs2_glock, gl_list);
+}
+
+static inline struct gfs2_glock *glock_hash_next(struct gfs2_glock *gl)
+{
+ return hlist_bl_entry(rcu_dereference_raw(gl->gl_list.next),
+ struct gfs2_glock, gl_list);
+}
+
static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
{
struct gfs2_glock *gl;
-restart:
- read_lock(gl_lock_addr(gi->hash));
- gl = gi->gl;
- if (gl) {
- gi->gl = hlist_entry(gl->gl_list.next,
- struct gfs2_glock, gl_list);
- } else {
- gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
- struct gfs2_glock, gl_list);
- }
- if (gi->gl)
- gfs2_glock_hold(gi->gl);
- read_unlock(gl_lock_addr(gi->hash));
- if (gl)
- gfs2_glock_put(gl);
- while (gi->gl == NULL) {
- gi->hash++;
- if (gi->hash >= GFS2_GL_HASH_SIZE)
- return 1;
- read_lock(gl_lock_addr(gi->hash));
- gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
- struct gfs2_glock, gl_list);
- if (gi->gl)
- gfs2_glock_hold(gi->gl);
- read_unlock(gl_lock_addr(gi->hash));
- }
-
- if (gi->sdp != gi->gl->gl_sbd)
- goto restart;
+ do {
+ gl = gi->gl;
+ if (gl) {
+ gi->gl = glock_hash_next(gl);
+ } else {
+ gi->gl = glock_hash_chain(gi->hash);
+ }
+ while (gi->gl == NULL) {
+ gi->hash++;
+ if (gi->hash >= GFS2_GL_HASH_SIZE) {
+ rcu_read_unlock();
+ return 1;
+ }
+ gi->gl = glock_hash_chain(gi->hash);
+ }
+ /* Skip entries for other sb and dead entries */
+ } while (gi->sdp != gi->gl->gl_sbd || atomic_read(&gi->gl->gl_ref) == 0);
return 0;
}
-static void gfs2_glock_iter_free(struct gfs2_glock_iter *gi)
-{
- if (gi->gl)
- gfs2_glock_put(gi->gl);
- gi->gl = NULL;
-}
-
static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
{
struct gfs2_glock_iter *gi = seq->private;
loff_t n = *pos;
gi->hash = 0;
+ rcu_read_lock();
do {
- if (gfs2_glock_iter_next(gi)) {
- gfs2_glock_iter_free(gi);
+ if (gfs2_glock_iter_next(gi))
return NULL;
- }
} while (n--);
return gi->gl;
@@ -1870,10 +1773,8 @@ static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
(*pos)++;
- if (gfs2_glock_iter_next(gi)) {
- gfs2_glock_iter_free(gi);
+ if (gfs2_glock_iter_next(gi))
return NULL;
- }
return gi->gl;
}
@@ -1881,7 +1782,10 @@ static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
{
struct gfs2_glock_iter *gi = seq->private;
- gfs2_glock_iter_free(gi);
+
+ if (gi->gl)
+ rcu_read_unlock();
+ gi->gl = NULL;
}
static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 691851ceb615..afa8bfea5647 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -118,7 +118,7 @@ struct lm_lockops {
int (*lm_mount) (struct gfs2_sbd *sdp, const char *fsname);
void (*lm_unmount) (struct gfs2_sbd *sdp);
void (*lm_withdraw) (struct gfs2_sbd *sdp);
- void (*lm_put_lock) (struct kmem_cache *cachep, struct gfs2_glock *gl);
+ void (*lm_put_lock) (struct gfs2_glock *gl);
int (*lm_lock) (struct gfs2_glock *gl, unsigned int req_state,
unsigned int flags);
void (*lm_cancel) (struct gfs2_glock *gl);
@@ -174,7 +174,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp,
int create, struct gfs2_glock **glp);
void gfs2_glock_hold(struct gfs2_glock *gl);
void gfs2_glock_put_nolock(struct gfs2_glock *gl);
-int gfs2_glock_put(struct gfs2_glock *gl);
+void gfs2_glock_put(struct gfs2_glock *gl);
void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
struct gfs2_holder *gh);
void gfs2_holder_reinit(unsigned int state, unsigned flags,
@@ -223,25 +223,22 @@ static inline int gfs2_glock_nq_init(struct gfs2_glock *gl,
return error;
}
-/* Lock Value Block functions */
-
-int gfs2_lvb_hold(struct gfs2_glock *gl);
-void gfs2_lvb_unhold(struct gfs2_glock *gl);
-
-void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
-void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
-void gfs2_reclaim_glock(struct gfs2_sbd *sdp);
-void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
-void gfs2_glock_finish_truncate(struct gfs2_inode *ip);
-void gfs2_glock_thaw(struct gfs2_sbd *sdp);
-
-int __init gfs2_glock_init(void);
-void gfs2_glock_exit(void);
-
-int gfs2_create_debugfs_file(struct gfs2_sbd *sdp);
-void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp);
-int gfs2_register_debugfs(void);
-void gfs2_unregister_debugfs(void);
+extern void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
+extern void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
+extern void gfs2_reclaim_glock(struct gfs2_sbd *sdp);
+extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
+extern void gfs2_glock_finish_truncate(struct gfs2_inode *ip);
+extern void gfs2_glock_thaw(struct gfs2_sbd *sdp);
+extern void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl);
+extern void gfs2_glock_free(struct rcu_head *rcu);
+
+extern int __init gfs2_glock_init(void);
+extern void gfs2_glock_exit(void);
+
+extern int gfs2_create_debugfs_file(struct gfs2_sbd *sdp);
+extern void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp);
+extern int gfs2_register_debugfs(void);
+extern void gfs2_unregister_debugfs(void);
extern const struct lm_lockops gfs2_dlm_ops;
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 263561bf1a50..ac5fac948f87 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -206,8 +206,17 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
static int inode_go_demote_ok(const struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_sbd;
+ struct gfs2_holder *gh;
+
if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
return 0;
+
+ if (!list_empty(&gl->gl_holders)) {
+ gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
+ if (gh->gh_list.next != &gl->gl_holders)
+ return 0;
+ }
+
return 1;
}
@@ -272,19 +281,6 @@ static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
}
/**
- * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock
- * @gl: the glock
- *
- * Returns: 1 if it's ok
- */
-
-static int rgrp_go_demote_ok(const struct gfs2_glock *gl)
-{
- const struct address_space *mapping = (const struct address_space *)(gl + 1);
- return !mapping->nrpages;
-}
-
-/**
* rgrp_go_lock - operation done after an rgrp lock is locked by
* a first holder on this node.
* @gl: the glock
@@ -410,7 +406,6 @@ const struct gfs2_glock_operations gfs2_inode_glops = {
const struct gfs2_glock_operations gfs2_rgrp_glops = {
.go_xmote_th = rgrp_go_sync,
.go_inval = rgrp_go_inval,
- .go_demote_ok = rgrp_go_demote_ok,
.go_lock = rgrp_go_lock,
.go_unlock = rgrp_go_unlock,
.go_dump = gfs2_rgrp_dump,
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index a79790c06275..720c1e66b343 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -15,6 +15,8 @@
#include <linux/workqueue.h>
#include <linux/dlm.h>
#include <linux/buffer_head.h>
+#include <linux/rcupdate.h>
+#include <linux/rculist_bl.h>
#define DIO_WAIT 0x00000010
#define DIO_METADATA 0x00000020
@@ -201,7 +203,7 @@ enum {
};
struct gfs2_glock {
- struct hlist_node gl_list;
+ struct hlist_bl_node gl_list;
unsigned long gl_flags; /* GLF_... */
struct lm_lockname gl_name;
atomic_t gl_ref;
@@ -234,6 +236,7 @@ struct gfs2_glock {
atomic_t gl_ail_count;
struct delayed_work gl_work;
struct work_struct gl_delete;
+ struct rcu_head gl_rcu;
};
#define GFS2_MIN_LVB_SIZE 32 /* Min size of LVB that gfs2 supports */
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 7aa7d4f8984a..97d54a28776a 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -763,14 +763,15 @@ fail:
return error;
}
-static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip)
+static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip,
+ const struct qstr *qstr)
{
int err;
size_t len;
void *value;
char *name;
- err = security_inode_init_security(&ip->i_inode, &dip->i_inode,
+ err = security_inode_init_security(&ip->i_inode, &dip->i_inode, qstr,
&name, &value, &len);
if (err) {
@@ -854,7 +855,7 @@ struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
if (error)
goto fail_gunlock2;
- error = gfs2_security_init(dip, GFS2_I(inode));
+ error = gfs2_security_init(dip, GFS2_I(inode), name);
if (error)
goto fail_gunlock2;
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 6e493aee28f8..c80485cb6f25 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -22,7 +22,6 @@ static void gdlm_ast(void *arg)
{
struct gfs2_glock *gl = arg;
unsigned ret = gl->gl_state;
- struct gfs2_sbd *sdp = gl->gl_sbd;
BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
@@ -31,12 +30,7 @@ static void gdlm_ast(void *arg)
switch (gl->gl_lksb.sb_status) {
case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */
- if (gl->gl_ops->go_flags & GLOF_ASPACE)
- kmem_cache_free(gfs2_glock_aspace_cachep, gl);
- else
- kmem_cache_free(gfs2_glock_cachep, gl);
- if (atomic_dec_and_test(&sdp->sd_glock_disposal))
- wake_up(&sdp->sd_glock_wait);
+ call_rcu(&gl->gl_rcu, gfs2_glock_free);
return;
case -DLM_ECANCEL: /* Cancel while getting lock */
ret |= LM_OUT_CANCELED;
@@ -164,16 +158,14 @@ static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast);
}
-static void gdlm_put_lock(struct kmem_cache *cachep, struct gfs2_glock *gl)
+static void gdlm_put_lock(struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_sbd;
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
int error;
if (gl->gl_lksb.sb_lkid == 0) {
- kmem_cache_free(cachep, gl);
- if (atomic_dec_and_test(&sdp->sd_glock_disposal))
- wake_up(&sdp->sd_glock_wait);
+ call_rcu(&gl->gl_rcu, gfs2_glock_free);
return;
}
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index bf33f822058d..11a73efa8261 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -91,7 +91,8 @@ static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
}
bd->bd_ail = ai;
list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list);
- clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
+ if (test_and_clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags))
+ gfs2_glock_schedule_for_reclaim(bd->bd_gl);
trace_gfs2_pin(bd, 0);
gfs2_log_unlock(sdp);
unlock_buffer(bh);
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 85ba027d1c4d..888a5f5a1a58 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -14,6 +14,8 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/gfs2_ondisk.h>
+#include <linux/rcupdate.h>
+#include <linux/rculist_bl.h>
#include <asm/atomic.h>
#include "gfs2.h"
@@ -45,7 +47,7 @@ static void gfs2_init_glock_once(void *foo)
{
struct gfs2_glock *gl = foo;
- INIT_HLIST_NODE(&gl->gl_list);
+ INIT_HLIST_BL_NODE(&gl->gl_list);
spin_lock_init(&gl->gl_spin);
INIT_LIST_HEAD(&gl->gl_holders);
INIT_LIST_HEAD(&gl->gl_lru);
@@ -59,14 +61,7 @@ static void gfs2_init_gl_aspace_once(void *foo)
struct address_space *mapping = (struct address_space *)(gl + 1);
gfs2_init_glock_once(gl);
- memset(mapping, 0, sizeof(*mapping));
- INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
- spin_lock_init(&mapping->tree_lock);
- spin_lock_init(&mapping->i_mmap_lock);
- INIT_LIST_HEAD(&mapping->private_list);
- spin_lock_init(&mapping->private_lock);
- INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
- INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
+ address_space_init_once(mapping);
}
/**
@@ -198,6 +193,8 @@ static void __exit exit_gfs2_fs(void)
unregister_filesystem(&gfs2meta_fs_type);
destroy_workqueue(gfs_recovery_wq);
+ rcu_barrier();
+
kmem_cache_destroy(gfs2_quotad_cachep);
kmem_cache_destroy(gfs2_rgrpd_cachep);
kmem_cache_destroy(gfs2_bufdata_cachep);
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 777927ce6f79..a39c103ba499 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -928,12 +928,9 @@ static const match_table_t nolock_tokens = {
{ Opt_err, NULL },
};
-static void nolock_put_lock(struct kmem_cache *cachep, struct gfs2_glock *gl)
+static void nolock_put_lock(struct gfs2_glock *gl)
{
- struct gfs2_sbd *sdp = gl->gl_sbd;
- kmem_cache_free(cachep, gl);
- if (atomic_dec_and_test(&sdp->sd_glock_disposal))
- wake_up(&sdp->sd_glock_wait);
+ call_rcu(&gl->gl_rcu, gfs2_glock_free);
}
static const struct lm_lockops nolock_ops = {
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index d8b26ac2e20b..09e436a50723 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -1026,9 +1026,9 @@ static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
/**
* gfs2_permission -
- * @inode:
- * @mask:
- * @nd: passed from Linux VFS, ignored by us
+ * @inode: The inode
+ * @mask: The mask to be tested
+ * @flags: Indicates whether this is an RCU path walk or not
*
* This may be called from the VFS directly, or from within GFS2 with the
* inode locked, so we look to see if the glock is already locked and only
@@ -1044,11 +1044,11 @@ int gfs2_permission(struct inode *inode, int mask, unsigned int flags)
int error;
int unlock = 0;
- if (flags & IPERM_FLAG_RCU)
- return -ECHILD;
ip = GFS2_I(inode);
if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) {
+ if (flags & IPERM_FLAG_RCU)
+ return -ECHILD;
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
if (error)
return error;
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index a689901963de..6ec964c31dc6 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -1587,6 +1587,8 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
offset = qd2offset(qd);
alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
+ if (gfs2_is_stuffed(ip))
+ alloc_required = 1;
if (alloc_required) {
al = gfs2_alloc_get(ip);
if (al == NULL)
@@ -1600,7 +1602,9 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
blocks += gfs2_rg_blocks(al);
}
- error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 1, 0);
+ /* Some quotas span block boundaries and can update two blocks,
+ adding an extra block to the transaction to handle such quotas */
+ error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
if (error)
goto out_release;
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 7293ea27020c..cf930cd9664a 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1602,7 +1602,7 @@ rgrp_error:
*
*/
-void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen)
+void __gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_rgrpd *rgd;
@@ -1617,7 +1617,21 @@ void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen)
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
gfs2_trans_add_rg(rgd);
+}
+/**
+ * gfs2_free_data - free a contiguous run of data block(s)
+ * @ip: the inode these blocks are being freed from
+ * @bstart: first block of a run of contiguous blocks
+ * @blen: the length of the block run
+ *
+ */
+
+void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen)
+{
+ struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+
+ __gfs2_free_data(ip, bstart, blen);
gfs2_statfs_change(sdp, 0, +blen, 0);
gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
}
@@ -1630,7 +1644,7 @@ void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen)
*
*/
-void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
+void __gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_rgrpd *rgd;
@@ -1645,10 +1659,24 @@ void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
gfs2_trans_add_rg(rgd);
+ gfs2_meta_wipe(ip, bstart, blen);
+}
+/**
+ * gfs2_free_meta - free a contiguous run of data block(s)
+ * @ip: the inode these blocks are being freed from
+ * @bstart: first block of a run of contiguous blocks
+ * @blen: the length of the block run
+ *
+ */
+
+void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
+{
+ struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+
+ __gfs2_free_meta(ip, bstart, blen);
gfs2_statfs_change(sdp, 0, +blen, 0);
gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
- gfs2_meta_wipe(ip, bstart, blen);
}
void gfs2_unlink_di(struct inode *inode)
diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
index 50c2bb04369c..a80e3034ac47 100644
--- a/fs/gfs2/rgrp.h
+++ b/fs/gfs2/rgrp.h
@@ -52,7 +52,9 @@ extern int gfs2_ri_update(struct gfs2_inode *ip);
extern int gfs2_alloc_block(struct gfs2_inode *ip, u64 *bn, unsigned int *n);
extern int gfs2_alloc_di(struct gfs2_inode *ip, u64 *bn, u64 *generation);
+extern void __gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen);
extern void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen);
+extern void __gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen);
extern void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen);
extern void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip);
extern void gfs2_unlink_di(struct inode *inode);
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
index b1991a2a08e0..b9c1a4b5ba89 100644
--- a/fs/hfsplus/extents.c
+++ b/fs/hfsplus/extents.c
@@ -209,6 +209,7 @@ int hfsplus_get_block(struct inode *inode, sector_t iblock,
struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
int res = -EIO;
u32 ablock, dblock, mask;
+ sector_t sector;
int was_dirty = 0;
int shift;
@@ -255,10 +256,12 @@ int hfsplus_get_block(struct inode *inode, sector_t iblock,
done:
dprint(DBG_EXTENT, "get_block(%lu): %llu - %u\n",
inode->i_ino, (long long)iblock, dblock);
+
mask = (1 << sbi->fs_shift) - 1;
- map_bh(bh_result, sb,
- (dblock << sbi->fs_shift) + sbi->blockoffset +
- (iblock & mask));
+ sector = ((sector_t)dblock << sbi->fs_shift) +
+ sbi->blockoffset + (iblock & mask);
+ map_bh(bh_result, sb, sector);
+
if (create) {
set_buffer_new(bh_result);
hip->phys_size += sb->s_blocksize;
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index b49b55584c84..0813af083576 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -393,6 +393,13 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
if (!sbi->rsrc_clump_blocks)
sbi->rsrc_clump_blocks = 1;
+ err = generic_check_addressable(sbi->alloc_blksz_shift,
+ sbi->total_blocks);
+ if (err) {
+ printk(KERN_ERR "hfs: filesystem size too large.\n");
+ goto out_free_vhdr;
+ }
+
/* Set up operations so we can load metadata */
sb->s_op = &hfsplus_sops;
sb->s_maxbytes = MAX_LFS_FILESIZE;
@@ -417,6 +424,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
sb->s_flags |= MS_RDONLY;
}
+ err = -EINVAL;
+
/* Load metadata objects (B*Trees) */
sbi->ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID);
if (!sbi->ext_tree) {
diff --git a/fs/hfsplus/unicode.c b/fs/hfsplus/unicode.c
index a3f0bfcc881e..a32998f29f0b 100644
--- a/fs/hfsplus/unicode.c
+++ b/fs/hfsplus/unicode.c
@@ -142,7 +142,11 @@ int hfsplus_uni2asc(struct super_block *sb,
/* search for single decomposed char */
if (likely(compose))
ce1 = hfsplus_compose_lookup(hfsplus_compose_table, c0);
- if (ce1 && (cc = ce1[0])) {
+ if (ce1)
+ cc = ce1[0];
+ else
+ cc = 0;
+ if (cc) {
/* start of a possibly decomposed Hangul char */
if (cc != 0xffff)
goto done;
@@ -209,7 +213,8 @@ int hfsplus_uni2asc(struct super_block *sb,
i++;
ce2 = ce1;
}
- if ((cc = ce2[0])) {
+ cc = ce2[0];
+ if (cc) {
ip += i;
ustrlen -= i;
goto done;
@@ -301,7 +306,11 @@ int hfsplus_asc2uni(struct super_block *sb, struct hfsplus_unistr *ustr,
while (outlen < HFSPLUS_MAX_STRLEN && len > 0) {
size = asc2unichar(sb, astr, len, &c);
- if (decompose && (dstr = decompose_unichar(c, &dsize))) {
+ if (decompose)
+ dstr = decompose_unichar(c, &dsize);
+ else
+ dstr = NULL;
+ if (dstr) {
if (outlen + dsize > HFSPLUS_MAX_STRLEN)
break;
do {
@@ -346,15 +355,23 @@ int hfsplus_hash_dentry(const struct dentry *dentry, const struct inode *inode,
astr += size;
len -= size;
- if (decompose && (dstr = decompose_unichar(c, &dsize))) {
+ if (decompose)
+ dstr = decompose_unichar(c, &dsize);
+ else
+ dstr = NULL;
+ if (dstr) {
do {
c2 = *dstr++;
- if (!casefold || (c2 = case_fold(c2)))
+ if (casefold)
+ c2 = case_fold(c2);
+ if (!casefold || c2)
hash = partial_name_hash(c2, hash);
} while (--dsize > 0);
} else {
c2 = c;
- if (!casefold || (c2 = case_fold(c2)))
+ if (casefold)
+ c2 = case_fold(c2);
+ if (!casefold || c2)
hash = partial_name_hash(c2, hash);
}
}
@@ -422,12 +439,14 @@ int hfsplus_compare_dentry(const struct dentry *parent,
c1 = *dstr1;
c2 = *dstr2;
if (casefold) {
- if (!(c1 = case_fold(c1))) {
+ c1 = case_fold(c1);
+ if (!c1) {
dstr1++;
dsize1--;
continue;
}
- if (!(c2 = case_fold(c2))) {
+ c2 = case_fold(c2);
+ if (!c2) {
dstr2++;
dsize2--;
continue;
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index 3031d81f5f0f..c946d3da095d 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -138,10 +138,6 @@ int hfsplus_read_wrapper(struct super_block *sb)
if (hfsplus_get_last_session(sb, &part_start, &part_size))
goto out;
- if ((u64)part_start + part_size > 0x100000000ULL) {
- pr_err("hfs: volumes larger than 2TB are not supported yet\n");
- goto out;
- }
error = -ENOMEM;
sbi->s_vhdr = kmalloc(HFSPLUS_SECTOR_SIZE, GFP_KERNEL);
@@ -169,8 +165,9 @@ reread:
if (!hfsplus_read_mdb(sbi->s_vhdr, &wd))
goto out_free_backup_vhdr;
wd.ablk_size >>= HFSPLUS_SECTOR_SHIFT;
- part_start += wd.ablk_start + wd.embed_start * wd.ablk_size;
- part_size = wd.embed_count * wd.ablk_size;
+ part_start += (sector_t)wd.ablk_start +
+ (sector_t)wd.embed_start * wd.ablk_size;
+ part_size = (sector_t)wd.embed_count * wd.ablk_size;
goto reread;
default:
/*
diff --git a/fs/inode.c b/fs/inode.c
index da85e56378f3..0647d80accf6 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -295,6 +295,20 @@ static void destroy_inode(struct inode *inode)
call_rcu(&inode->i_rcu, i_callback);
}
+void address_space_init_once(struct address_space *mapping)
+{
+ memset(mapping, 0, sizeof(*mapping));
+ INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
+ spin_lock_init(&mapping->tree_lock);
+ spin_lock_init(&mapping->i_mmap_lock);
+ INIT_LIST_HEAD(&mapping->private_list);
+ spin_lock_init(&mapping->private_lock);
+ INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
+ INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
+ mutex_init(&mapping->unmap_mutex);
+}
+EXPORT_SYMBOL(address_space_init_once);
+
/*
* These are initializations that only need to be done
* once, because the fields are idempotent across use
@@ -308,13 +322,7 @@ void inode_init_once(struct inode *inode)
INIT_LIST_HEAD(&inode->i_devices);
INIT_LIST_HEAD(&inode->i_wb_list);
INIT_LIST_HEAD(&inode->i_lru);
- INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
- spin_lock_init(&inode->i_data.tree_lock);
- spin_lock_init(&inode->i_data.i_mmap_lock);
- INIT_LIST_HEAD(&inode->i_data.private_list);
- spin_lock_init(&inode->i_data.private_lock);
- INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap);
- INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear);
+ address_space_init_once(&inode->i_data);
i_size_ordered_init(inode);
#ifdef CONFIG_FSNOTIFY
INIT_HLIST_HEAD(&inode->i_fsnotify_marks);
@@ -540,11 +548,14 @@ void evict_inodes(struct super_block *sb)
/**
* invalidate_inodes - attempt to free all inodes on a superblock
* @sb: superblock to operate on
+ * @kill_dirty: flag to guide handling of dirty inodes
*
* Attempts to free all inodes for a given superblock. If there were any
* busy inodes return a non-zero value, else zero.
+ * If @kill_dirty is set, discard dirty inodes too, otherwise treat
+ * them as busy.
*/
-int invalidate_inodes(struct super_block *sb)
+int invalidate_inodes(struct super_block *sb, bool kill_dirty)
{
int busy = 0;
struct inode *inode, *next;
@@ -556,6 +567,10 @@ int invalidate_inodes(struct super_block *sb)
list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE))
continue;
+ if (inode->i_state & I_DIRTY && !kill_dirty) {
+ busy = 1;
+ continue;
+ }
if (atomic_read(&inode->i_count)) {
busy = 1;
continue;
diff --git a/fs/internal.h b/fs/internal.h
index 0663568b1247..9b976b57d7fe 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -112,4 +112,4 @@ extern void release_open_intent(struct nameidata *);
*/
extern int get_nr_dirty_inodes(void);
extern void evict_inodes(struct super_block *);
-extern int invalidate_inodes(struct super_block *);
+extern int invalidate_inodes(struct super_block *, bool);
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index da1b5e4ffce1..eb11601f2e00 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -839,7 +839,7 @@ journal_t * journal_init_inode (struct inode *inode)
err = journal_bmap(journal, 0, &blocknr);
/* If that failed, give up */
if (err) {
- printk(KERN_ERR "%s: Cannnot locate journal superblock\n",
+ printk(KERN_ERR "%s: Cannot locate journal superblock\n",
__func__);
goto out_err;
}
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 97e73469b2c4..90407b8fece7 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -991,7 +991,7 @@ journal_t * jbd2_journal_init_inode (struct inode *inode)
err = jbd2_journal_bmap(journal, 0, &blocknr);
/* If that failed, give up */
if (err) {
- printk(KERN_ERR "%s: Cannnot locate journal superblock\n",
+ printk(KERN_ERR "%s: Cannot locate journal superblock\n",
__func__);
goto out_err;
}
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index 92978658ed18..82faddd1f321 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -215,8 +215,7 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode,
no chance of AB-BA deadlock involving its f->sem). */
mutex_unlock(&f->sem);
- ret = jffs2_do_create(c, dir_f, f, ri,
- dentry->d_name.name, dentry->d_name.len);
+ ret = jffs2_do_create(c, dir_f, f, ri, &dentry->d_name);
if (ret)
goto fail;
@@ -386,7 +385,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
jffs2_complete_reservation(c);
- ret = jffs2_init_security(inode, dir_i);
+ ret = jffs2_init_security(inode, dir_i, &dentry->d_name);
if (ret)
goto fail;
@@ -530,7 +529,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
jffs2_complete_reservation(c);
- ret = jffs2_init_security(inode, dir_i);
+ ret = jffs2_init_security(inode, dir_i, &dentry->d_name);
if (ret)
goto fail;
@@ -703,7 +702,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
jffs2_complete_reservation(c);
- ret = jffs2_init_security(inode, dir_i);
+ ret = jffs2_init_security(inode, dir_i, &dentry->d_name);
if (ret)
goto fail;
diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h
index 5a53d9bdb2b5..e4619b00f7c5 100644
--- a/fs/jffs2/nodelist.h
+++ b/fs/jffs2/nodelist.h
@@ -401,7 +401,7 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
struct jffs2_raw_inode *ri, unsigned char *buf,
uint32_t offset, uint32_t writelen, uint32_t *retlen);
int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, struct jffs2_inode_info *f,
- struct jffs2_raw_inode *ri, const char *name, int namelen);
+ struct jffs2_raw_inode *ri, const struct qstr *qstr);
int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, const char *name,
int namelen, struct jffs2_inode_info *dead_f, uint32_t time);
int jffs2_do_link(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino,
diff --git a/fs/jffs2/security.c b/fs/jffs2/security.c
index 239f51216a68..cfeb7164b085 100644
--- a/fs/jffs2/security.c
+++ b/fs/jffs2/security.c
@@ -23,14 +23,15 @@
#include "nodelist.h"
/* ---- Initial Security Label Attachment -------------- */
-int jffs2_init_security(struct inode *inode, struct inode *dir)
+int jffs2_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr)
{
int rc;
size_t len;
void *value;
char *name;
- rc = security_inode_init_security(inode, dir, &name, &value, &len);
+ rc = security_inode_init_security(inode, dir, qstr, &name, &value, &len);
if (rc) {
if (rc == -EOPNOTSUPP)
return 0;
diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c
index c819eb0e982d..30d175b6d290 100644
--- a/fs/jffs2/write.c
+++ b/fs/jffs2/write.c
@@ -424,7 +424,9 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
return ret;
}
-int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const char *name, int namelen)
+int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
+ struct jffs2_inode_info *f, struct jffs2_raw_inode *ri,
+ const struct qstr *qstr)
{
struct jffs2_raw_dirent *rd;
struct jffs2_full_dnode *fn;
@@ -466,15 +468,15 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str
mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
- ret = jffs2_init_security(&f->vfs_inode, &dir_f->vfs_inode);
+ ret = jffs2_init_security(&f->vfs_inode, &dir_f->vfs_inode, qstr);
if (ret)
return ret;
ret = jffs2_init_acl_post(&f->vfs_inode);
if (ret)
return ret;
- ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen,
- ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
+ ret = jffs2_reserve_space(c, sizeof(*rd)+qstr->len, &alloclen,
+ ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(qstr->len));
if (ret) {
/* Eep. */
@@ -493,19 +495,19 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str
rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
- rd->totlen = cpu_to_je32(sizeof(*rd) + namelen);
+ rd->totlen = cpu_to_je32(sizeof(*rd) + qstr->len);
rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4));
rd->pino = cpu_to_je32(dir_f->inocache->ino);
rd->version = cpu_to_je32(++dir_f->highest_version);
rd->ino = ri->ino;
rd->mctime = ri->ctime;
- rd->nsize = namelen;
+ rd->nsize = qstr->len;
rd->type = DT_REG;
rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
- rd->name_crc = cpu_to_je32(crc32(0, name, namelen));
+ rd->name_crc = cpu_to_je32(crc32(0, qstr->name, qstr->len));
- fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, ALLOC_NORMAL);
+ fd = jffs2_write_dirent(c, dir_f, rd, qstr->name, qstr->len, ALLOC_NORMAL);
jffs2_free_raw_dirent(rd);
diff --git a/fs/jffs2/xattr.h b/fs/jffs2/xattr.h
index cf4f5759b42b..7be4beb306f3 100644
--- a/fs/jffs2/xattr.h
+++ b/fs/jffs2/xattr.h
@@ -121,10 +121,11 @@ extern ssize_t jffs2_listxattr(struct dentry *, char *, size_t);
#endif /* CONFIG_JFFS2_FS_XATTR */
#ifdef CONFIG_JFFS2_FS_SECURITY
-extern int jffs2_init_security(struct inode *inode, struct inode *dir);
+extern int jffs2_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr);
extern const struct xattr_handler jffs2_security_xattr_handler;
#else
-#define jffs2_init_security(inode,dir) (0)
+#define jffs2_init_security(inode,dir,qstr) (0)
#endif /* CONFIG_JFFS2_FS_SECURITY */
#endif /* _JFFS2_FS_XATTR_H_ */
diff --git a/fs/jfs/jfs_xattr.h b/fs/jfs/jfs_xattr.h
index 88b6cc535bf2..e9e100fd7c09 100644
--- a/fs/jfs/jfs_xattr.h
+++ b/fs/jfs/jfs_xattr.h
@@ -62,10 +62,11 @@ extern ssize_t jfs_listxattr(struct dentry *, char *, size_t);
extern int jfs_removexattr(struct dentry *, const char *);
#ifdef CONFIG_JFS_SECURITY
-extern int jfs_init_security(tid_t, struct inode *, struct inode *);
+extern int jfs_init_security(tid_t, struct inode *, struct inode *,
+ const struct qstr *);
#else
static inline int jfs_init_security(tid_t tid, struct inode *inode,
- struct inode *dir)
+ struct inode *dir, const struct qstr *qstr)
{
return 0;
}
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 81ead850ddb6..2a37d2fdd707 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -115,7 +115,7 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, int mode,
if (rc)
goto out3;
- rc = jfs_init_security(tid, ip, dip);
+ rc = jfs_init_security(tid, ip, dip, &dentry->d_name);
if (rc) {
txAbort(tid, 0);
goto out3;
@@ -253,7 +253,7 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, int mode)
if (rc)
goto out3;
- rc = jfs_init_security(tid, ip, dip);
+ rc = jfs_init_security(tid, ip, dip, &dentry->d_name);
if (rc) {
txAbort(tid, 0);
goto out3;
@@ -932,7 +932,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
mutex_lock_nested(&JFS_IP(dip)->commit_mutex, COMMIT_MUTEX_PARENT);
mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD);
- rc = jfs_init_security(tid, ip, dip);
+ rc = jfs_init_security(tid, ip, dip, &dentry->d_name);
if (rc)
goto out3;
@@ -1395,7 +1395,7 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry,
if (rc)
goto out3;
- rc = jfs_init_security(tid, ip, dir);
+ rc = jfs_init_security(tid, ip, dir, &dentry->d_name);
if (rc) {
txAbort(tid, 0);
goto out3;
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index 2d7f165d0f1d..3fa4c32272df 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -1091,7 +1091,8 @@ int jfs_removexattr(struct dentry *dentry, const char *name)
}
#ifdef CONFIG_JFS_SECURITY
-int jfs_init_security(tid_t tid, struct inode *inode, struct inode *dir)
+int jfs_init_security(tid_t tid, struct inode *inode, struct inode *dir,
+ const struct qstr *qstr)
{
int rc;
size_t len;
@@ -1099,7 +1100,8 @@ int jfs_init_security(tid_t tid, struct inode *inode, struct inode *dir)
char *suffix;
char *name;
- rc = security_inode_init_security(inode, dir, &suffix, &value, &len);
+ rc = security_inode_init_security(inode, dir, qstr, &suffix, &value,
+ &len);
if (rc) {
if (rc == -EOPNOTSUPP)
return 0;
diff --git a/fs/namespace.c b/fs/namespace.c
index 7b0b95371696..d1edf26025dc 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1244,7 +1244,7 @@ static int do_umount(struct vfsmount *mnt, int flags)
*/
br_write_lock(vfsmount_lock);
if (mnt_get_count(mnt) != 2) {
- br_write_lock(vfsmount_lock);
+ br_write_unlock(vfsmount_lock);
return -EBUSY;
}
br_write_unlock(vfsmount_lock);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 1cc600e77bb4..caa33188f637 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1513,7 +1513,7 @@ static int nfsiod_start(void)
{
struct workqueue_struct *wq;
dprintk("RPC: creating workqueue nfsiod\n");
- wq = alloc_workqueue("nfsiod", WQ_RESCUER, 0);
+ wq = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM, 0);
if (wq == NULL)
return -ENOMEM;
nfsiod_workqueue = wq;
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index da1d9701f8e4..4c189bad22a1 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -19,7 +19,6 @@
#include <linux/fcntl.h>
#include <linux/namei.h>
#include <linux/delay.h>
-#include <linux/fsnotify.h>
#include <linux/posix_acl_xattr.h>
#include <linux/xattr.h>
#include <linux/jhash.h>
@@ -906,7 +905,6 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
nfsdstats.io_read += host_err;
*count = host_err;
err = 0;
- fsnotify_access(file);
} else
err = nfserrno(host_err);
return err;
@@ -1008,7 +1006,6 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
goto out_nfserr;
*cnt = host_err;
nfsdstats.io_write += host_err;
- fsnotify_modify(file);
/* clear setuid/setgid flag after write */
if (inode->i_mode & (S_ISUID | S_ISGID))
diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c
index d7fd696e595c..0a0a66d98cce 100644
--- a/fs/nilfs2/alloc.c
+++ b/fs/nilfs2/alloc.c
@@ -521,8 +521,8 @@ void nilfs_palloc_commit_free_entry(struct inode *inode,
group_offset, bitmap))
printk(KERN_WARNING "%s: entry number %llu already freed\n",
__func__, (unsigned long long)req->pr_entry_nr);
-
- nilfs_palloc_group_desc_add_entries(inode, group, desc, 1);
+ else
+ nilfs_palloc_group_desc_add_entries(inode, group, desc, 1);
kunmap(req->pr_bitmap_bh->b_page);
kunmap(req->pr_desc_bh->b_page);
@@ -558,8 +558,8 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode,
group_offset, bitmap))
printk(KERN_WARNING "%s: entry number %llu already freed\n",
__func__, (unsigned long long)req->pr_entry_nr);
-
- nilfs_palloc_group_desc_add_entries(inode, group, desc, 1);
+ else
+ nilfs_palloc_group_desc_add_entries(inode, group, desc, 1);
kunmap(req->pr_bitmap_bh->b_page);
kunmap(req->pr_desc_bh->b_page);
@@ -665,7 +665,7 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
for (j = i, n = 0;
(j < nitems) && nilfs_palloc_group_is_in(inode, group,
entry_nrs[j]);
- j++, n++) {
+ j++) {
nilfs_palloc_group(inode, entry_nrs[j], &group_offset);
if (!nilfs_clear_bit_atomic(
nilfs_mdt_bgl_lock(inode, group),
@@ -674,6 +674,8 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
"%s: entry number %llu already freed\n",
__func__,
(unsigned long long)entry_nrs[j]);
+ } else {
+ n++;
}
}
nilfs_palloc_group_desc_add_entries(inode, group, desc, n);
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
index 3ee67c67cc52..85447a2fab33 100644
--- a/fs/nilfs2/bmap.c
+++ b/fs/nilfs2/bmap.c
@@ -425,17 +425,6 @@ int nilfs_bmap_test_and_clear_dirty(struct nilfs_bmap *bmap)
/*
* Internal use only
*/
-
-void nilfs_bmap_add_blocks(const struct nilfs_bmap *bmap, int n)
-{
- inode_add_bytes(bmap->b_inode, (1 << bmap->b_inode->i_blkbits) * n);
-}
-
-void nilfs_bmap_sub_blocks(const struct nilfs_bmap *bmap, int n)
-{
- inode_sub_bytes(bmap->b_inode, (1 << bmap->b_inode->i_blkbits) * n);
-}
-
__u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *bmap,
const struct buffer_head *bh)
{
diff --git a/fs/nilfs2/bmap.h b/fs/nilfs2/bmap.h
index bde1c0aa2e15..40d9f453d31c 100644
--- a/fs/nilfs2/bmap.h
+++ b/fs/nilfs2/bmap.h
@@ -240,9 +240,6 @@ __u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *,
__u64 nilfs_bmap_find_target_seq(const struct nilfs_bmap *, __u64);
__u64 nilfs_bmap_find_target_in_group(const struct nilfs_bmap *);
-void nilfs_bmap_add_blocks(const struct nilfs_bmap *, int);
-void nilfs_bmap_sub_blocks(const struct nilfs_bmap *, int);
-
/* Assume that bmap semaphore is locked. */
static inline int nilfs_bmap_dirty(const struct nilfs_bmap *bmap)
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index 388e9e8f5286..85f7baa15f5d 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -35,11 +35,6 @@
#include "btnode.h"
-void nilfs_btnode_cache_init_once(struct address_space *btnc)
-{
- nilfs_mapping_init_once(btnc);
-}
-
static const struct address_space_operations def_btnode_aops = {
.sync_page = block_sync_page,
};
diff --git a/fs/nilfs2/btnode.h b/fs/nilfs2/btnode.h
index 79037494f1e0..1b8ebd888c28 100644
--- a/fs/nilfs2/btnode.h
+++ b/fs/nilfs2/btnode.h
@@ -37,7 +37,6 @@ struct nilfs_btnode_chkey_ctxt {
struct buffer_head *newbh;
};
-void nilfs_btnode_cache_init_once(struct address_space *);
void nilfs_btnode_cache_init(struct address_space *, struct backing_dev_info *);
void nilfs_btnode_cache_clear(struct address_space *);
struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc,
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index 300c2bc00c3f..d451ae0e0bf3 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -1174,7 +1174,7 @@ static int nilfs_btree_insert(struct nilfs_bmap *btree, __u64 key, __u64 ptr)
if (ret < 0)
goto out;
nilfs_btree_commit_insert(btree, path, level, key, ptr);
- nilfs_bmap_add_blocks(btree, stats.bs_nblocks);
+ nilfs_inode_add_blocks(btree->b_inode, stats.bs_nblocks);
out:
nilfs_btree_free_path(path);
@@ -1511,7 +1511,7 @@ static int nilfs_btree_delete(struct nilfs_bmap *btree, __u64 key)
if (ret < 0)
goto out;
nilfs_btree_commit_delete(btree, path, level, dat);
- nilfs_bmap_sub_blocks(btree, stats.bs_nblocks);
+ nilfs_inode_sub_blocks(btree->b_inode, stats.bs_nblocks);
out:
nilfs_btree_free_path(path);
@@ -1776,7 +1776,7 @@ int nilfs_btree_convert_and_insert(struct nilfs_bmap *btree,
return ret;
nilfs_btree_commit_convert_and_insert(btree, key, ptr, keys, ptrs, n,
di, ni, bh);
- nilfs_bmap_add_blocks(btree, stats.bs_nblocks);
+ nilfs_inode_add_blocks(btree->b_inode, stats.bs_nblocks);
return 0;
}
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index 9d45773b79e6..3a1923943b14 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -440,7 +440,6 @@ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
nilfs_commit_chunk(page, mapping, from, to);
nilfs_put_page(page);
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
-/* NILFS_I(dir)->i_flags &= ~NILFS_BTREE_FL; */
}
/*
@@ -531,7 +530,6 @@ got_it:
nilfs_set_de_type(de, inode);
nilfs_commit_chunk(page, page->mapping, from, to);
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
-/* NILFS_I(dir)->i_flags &= ~NILFS_BTREE_FL; */
nilfs_mark_inode_dirty(dir);
/* OFFSET_CACHE */
out_put:
@@ -579,7 +577,6 @@ int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct page *page)
dir->inode = 0;
nilfs_commit_chunk(page, mapping, from, to);
inode->i_ctime = inode->i_mtime = CURRENT_TIME;
-/* NILFS_I(inode)->i_flags &= ~NILFS_BTREE_FL; */
out:
nilfs_put_page(page);
return err;
@@ -684,7 +681,7 @@ const struct file_operations nilfs_dir_operations = {
.readdir = nilfs_readdir,
.unlocked_ioctl = nilfs_ioctl,
#ifdef CONFIG_COMPAT
- .compat_ioctl = nilfs_ioctl,
+ .compat_ioctl = nilfs_compat_ioctl,
#endif /* CONFIG_COMPAT */
.fsync = nilfs_sync_file,
diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c
index 324d80c57518..82f4865e86dd 100644
--- a/fs/nilfs2/direct.c
+++ b/fs/nilfs2/direct.c
@@ -146,7 +146,7 @@ static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
if (NILFS_BMAP_USE_VBN(bmap))
nilfs_bmap_set_target_v(bmap, key, req.bpr_ptr);
- nilfs_bmap_add_blocks(bmap, 1);
+ nilfs_inode_add_blocks(bmap->b_inode, 1);
}
return ret;
}
@@ -168,7 +168,7 @@ static int nilfs_direct_delete(struct nilfs_bmap *bmap, __u64 key)
if (!ret) {
nilfs_bmap_commit_end_ptr(bmap, &req, dat);
nilfs_direct_set_ptr(bmap, key, NILFS_BMAP_INVALID_PTR);
- nilfs_bmap_sub_blocks(bmap, 1);
+ nilfs_inode_sub_blocks(bmap->b_inode, 1);
}
return ret;
}
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index 2f560c9fb808..7a5e4ab15c6e 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -142,7 +142,7 @@ const struct file_operations nilfs_file_operations = {
.aio_write = generic_file_aio_write,
.unlocked_ioctl = nilfs_ioctl,
#ifdef CONFIG_COMPAT
- .compat_ioctl = nilfs_ioctl,
+ .compat_ioctl = nilfs_compat_ioctl,
#endif /* CONFIG_COMPAT */
.mmap = nilfs_file_mmap,
.open = generic_file_open,
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 2fd440d8d6b8..22a816ba3621 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -41,6 +41,24 @@ struct nilfs_iget_args {
int for_gc;
};
+void nilfs_inode_add_blocks(struct inode *inode, int n)
+{
+ struct nilfs_root *root = NILFS_I(inode)->i_root;
+
+ inode_add_bytes(inode, (1 << inode->i_blkbits) * n);
+ if (root)
+ atomic_add(n, &root->blocks_count);
+}
+
+void nilfs_inode_sub_blocks(struct inode *inode, int n)
+{
+ struct nilfs_root *root = NILFS_I(inode)->i_root;
+
+ inode_sub_bytes(inode, (1 << inode->i_blkbits) * n);
+ if (root)
+ atomic_sub(n, &root->blocks_count);
+}
+
/**
* nilfs_get_block() - get a file block on the filesystem (callback function)
* @inode - inode struct of the target file
@@ -315,11 +333,8 @@ struct inode *nilfs_new_inode(struct inode *dir, int mode)
/* No lock is needed; iget() ensures it. */
}
- ii->i_flags = NILFS_I(dir)->i_flags;
- if (S_ISLNK(mode))
- ii->i_flags &= ~(NILFS_IMMUTABLE_FL | NILFS_APPEND_FL);
- if (!S_ISDIR(mode))
- ii->i_flags &= ~NILFS_DIRSYNC_FL;
+ ii->i_flags = nilfs_mask_flags(
+ mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
/* ii->i_file_acl = 0; */
/* ii->i_dir_acl = 0; */
@@ -359,17 +374,15 @@ void nilfs_set_inode_flags(struct inode *inode)
inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME |
S_DIRSYNC);
- if (flags & NILFS_SYNC_FL)
+ if (flags & FS_SYNC_FL)
inode->i_flags |= S_SYNC;
- if (flags & NILFS_APPEND_FL)
+ if (flags & FS_APPEND_FL)
inode->i_flags |= S_APPEND;
- if (flags & NILFS_IMMUTABLE_FL)
+ if (flags & FS_IMMUTABLE_FL)
inode->i_flags |= S_IMMUTABLE;
-#ifndef NILFS_ATIME_DISABLE
- if (flags & NILFS_NOATIME_FL)
-#endif
+ if (flags & FS_NOATIME_FL)
inode->i_flags |= S_NOATIME;
- if (flags & NILFS_DIRSYNC_FL)
+ if (flags & FS_DIRSYNC_FL)
inode->i_flags |= S_DIRSYNC;
mapping_set_gfp_mask(inode->i_mapping,
mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
@@ -707,6 +720,7 @@ void nilfs_evict_inode(struct inode *inode)
struct nilfs_transaction_info ti;
struct super_block *sb = inode->i_sb;
struct nilfs_inode_info *ii = NILFS_I(inode);
+ int ret;
if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
if (inode->i_data.nrpages)
@@ -725,8 +739,9 @@ void nilfs_evict_inode(struct inode *inode)
nilfs_mark_inode_dirty(inode);
end_writeback(inode);
- nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
- atomic_dec(&ii->i_root->inodes_count);
+ ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
+ if (!ret)
+ atomic_dec(&ii->i_root->inodes_count);
nilfs_clear_inode(inode);
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 496738963fdb..5471eed5eccb 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -26,7 +26,9 @@
#include <linux/capability.h> /* capable() */
#include <linux/uaccess.h> /* copy_from_user(), copy_to_user() */
#include <linux/vmalloc.h>
+#include <linux/compat.h> /* compat_ptr() */
#include <linux/mount.h> /* mnt_want_write(), mnt_drop_write() */
+#include <linux/buffer_head.h>
#include <linux/nilfs2_fs.h>
#include "nilfs.h"
#include "segment.h"
@@ -97,6 +99,70 @@ static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs,
return ret;
}
+static int nilfs_ioctl_getflags(struct inode *inode, void __user *argp)
+{
+ unsigned int flags = NILFS_I(inode)->i_flags & FS_FL_USER_VISIBLE;
+
+ return put_user(flags, (int __user *)argp);
+}
+
+static int nilfs_ioctl_setflags(struct inode *inode, struct file *filp,
+ void __user *argp)
+{
+ struct nilfs_transaction_info ti;
+ unsigned int flags, oldflags;
+ int ret;
+
+ if (!is_owner_or_cap(inode))
+ return -EACCES;
+
+ if (get_user(flags, (int __user *)argp))
+ return -EFAULT;
+
+ ret = mnt_want_write(filp->f_path.mnt);
+ if (ret)
+ return ret;
+
+ flags = nilfs_mask_flags(inode->i_mode, flags);
+
+ mutex_lock(&inode->i_mutex);
+
+ oldflags = NILFS_I(inode)->i_flags;
+
+ /*
+ * The IMMUTABLE and APPEND_ONLY flags can only be changed by the
+ * relevant capability.
+ */
+ ret = -EPERM;
+ if (((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) &&
+ !capable(CAP_LINUX_IMMUTABLE))
+ goto out;
+
+ ret = nilfs_transaction_begin(inode->i_sb, &ti, 0);
+ if (ret)
+ goto out;
+
+ NILFS_I(inode)->i_flags = (oldflags & ~FS_FL_USER_MODIFIABLE) |
+ (flags & FS_FL_USER_MODIFIABLE);
+
+ nilfs_set_inode_flags(inode);
+ inode->i_ctime = CURRENT_TIME;
+ if (IS_SYNC(inode))
+ nilfs_set_transaction_flag(NILFS_TI_SYNC);
+
+ nilfs_mark_inode_dirty(inode);
+ ret = nilfs_transaction_commit(inode->i_sb);
+out:
+ mutex_unlock(&inode->i_mutex);
+ mnt_drop_write(filp->f_path.mnt);
+ return ret;
+}
+
+static int nilfs_ioctl_getversion(struct inode *inode, void __user *argp)
+{
+ return put_user(inode->i_generation, (int __user *)argp);
+}
+
static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
unsigned int cmd, void __user *argp)
{
@@ -666,6 +732,12 @@ long nilfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
void __user *argp = (void __user *)arg;
switch (cmd) {
+ case FS_IOC_GETFLAGS:
+ return nilfs_ioctl_getflags(inode, argp);
+ case FS_IOC_SETFLAGS:
+ return nilfs_ioctl_setflags(inode, filp, argp);
+ case FS_IOC_GETVERSION:
+ return nilfs_ioctl_getversion(inode, argp);
case NILFS_IOCTL_CHANGE_CPMODE:
return nilfs_ioctl_change_cpmode(inode, filp, cmd, argp);
case NILFS_IOCTL_DELETE_CHECKPOINT:
@@ -696,3 +768,23 @@ long nilfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return -ENOTTY;
}
}
+
+#ifdef CONFIG_COMPAT
+long nilfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case FS_IOC32_GETFLAGS:
+ cmd = FS_IOC_GETFLAGS;
+ break;
+ case FS_IOC32_SETFLAGS:
+ cmd = FS_IOC_SETFLAGS;
+ break;
+ case FS_IOC32_GETVERSION:
+ cmd = FS_IOC_GETVERSION;
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return nilfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index 6a0e2a189f60..a0babd2bff6a 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -454,9 +454,9 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
struct backing_dev_info *bdi = inode->i_sb->s_bdi;
INIT_LIST_HEAD(&shadow->frozen_buffers);
- nilfs_mapping_init_once(&shadow->frozen_data);
+ address_space_init_once(&shadow->frozen_data);
nilfs_mapping_init(&shadow->frozen_data, bdi, &shadow_map_aops);
- nilfs_mapping_init_once(&shadow->frozen_btnodes);
+ address_space_init_once(&shadow->frozen_btnodes);
nilfs_mapping_init(&shadow->frozen_btnodes, bdi, &shadow_map_aops);
mi->mi_shadow = shadow;
return 0;
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index 777e8fd04304..03ba4d88083f 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -212,6 +212,23 @@ static inline int nilfs_init_acl(struct inode *inode, struct inode *dir)
#define NILFS_ATIME_DISABLE
+/* Flags that should be inherited by new inodes from their parent. */
+#define NILFS_FL_INHERITED \
+ (FS_SECRM_FL | FS_UNRM_FL | FS_COMPR_FL | FS_SYNC_FL | \
+ FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL | FS_NOATIME_FL |\
+ FS_COMPRBLK_FL | FS_NOCOMP_FL | FS_NOTAIL_FL | FS_DIRSYNC_FL)
+
+/* Mask out flags that are inappropriate for the given type of inode. */
+static inline __u32 nilfs_mask_flags(umode_t mode, __u32 flags)
+{
+ if (S_ISDIR(mode))
+ return flags;
+ else if (S_ISREG(mode))
+ return flags & ~(FS_DIRSYNC_FL | FS_TOPDIR_FL);
+ else
+ return flags & (FS_NODUMP_FL | FS_NOATIME_FL);
+}
+
/* dir.c */
extern int nilfs_add_link(struct dentry *, struct inode *);
extern ino_t nilfs_inode_by_name(struct inode *, const struct qstr *);
@@ -229,10 +246,13 @@ extern int nilfs_sync_file(struct file *, int);
/* ioctl.c */
long nilfs_ioctl(struct file *, unsigned int, unsigned long);
+long nilfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *, struct nilfs_argv *,
void **);
/* inode.c */
+void nilfs_inode_add_blocks(struct inode *inode, int n);
+void nilfs_inode_sub_blocks(struct inode *inode, int n);
extern struct inode *nilfs_new_inode(struct inode *, int);
extern void nilfs_free_inode(struct inode *);
extern int nilfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 0c432416cfef..a585b35fd6bc 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -492,19 +492,6 @@ unsigned nilfs_page_count_clean_buffers(struct page *page,
return nc;
}
-void nilfs_mapping_init_once(struct address_space *mapping)
-{
- memset(mapping, 0, sizeof(*mapping));
- INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
- spin_lock_init(&mapping->tree_lock);
- INIT_LIST_HEAD(&mapping->private_list);
- spin_lock_init(&mapping->private_lock);
-
- spin_lock_init(&mapping->i_mmap_lock);
- INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
- INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
-}
-
void nilfs_mapping_init(struct address_space *mapping,
struct backing_dev_info *bdi,
const struct address_space_operations *aops)
diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h
index 622df27cd891..2a00953ebd5f 100644
--- a/fs/nilfs2/page.h
+++ b/fs/nilfs2/page.h
@@ -61,7 +61,6 @@ void nilfs_free_private_page(struct page *);
int nilfs_copy_dirty_pages(struct address_space *, struct address_space *);
void nilfs_copy_back_pages(struct address_space *, struct address_space *);
void nilfs_clear_dirty_pages(struct address_space *);
-void nilfs_mapping_init_once(struct address_space *mapping);
void nilfs_mapping_init(struct address_space *mapping,
struct backing_dev_info *bdi,
const struct address_space_operations *aops);
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 55ebae5c7f39..2de9f636792a 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -430,7 +430,8 @@ static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
nilfs_segctor_map_segsum_entry(
sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
- if (inode->i_sb && !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
+ if (NILFS_I(inode)->i_root &&
+ !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
/* skip finfo */
}
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 58fd707174e1..1673b3d99842 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -1279,7 +1279,7 @@ static void nilfs_inode_init_once(void *obj)
#ifdef CONFIG_NILFS_XATTR
init_rwsem(&ii->xattr_sem);
#endif
- nilfs_btnode_cache_init_once(&ii->i_btnode_cache);
+ address_space_init_once(&ii->i_btnode_cache);
ii->i_bmap = &ii->i_bmap_data;
inode_init_once(&ii->vfs_inode);
}
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index ad4ac607cf57..9098909d5cef 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -475,10 +475,13 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
return -EIO;
}
printk(KERN_WARNING
- "NILFS warning: unable to read primary superblock\n");
- } else if (!sbp[1])
+ "NILFS warning: unable to read primary superblock "
+ "(blocksize = %d)\n", blocksize);
+ } else if (!sbp[1]) {
printk(KERN_WARNING
- "NILFS warning: unable to read secondary superblock\n");
+ "NILFS warning: unable to read secondary superblock "
+ "(blocksize = %d)\n", blocksize);
+ }
/*
* Compare two super blocks and set 1 in swp if the secondary
@@ -505,7 +508,7 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
if (!valid[!swp])
printk(KERN_WARNING "NILFS warning: broken superblock. "
- "using spare superblock.\n");
+ "using spare superblock (blocksize = %d).\n", blocksize);
if (swp)
nilfs_swap_super_block(nilfs);
diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
index 3344bdd5506e..89ec7e0c15bc 100644
--- a/fs/notify/dnotify/dnotify.c
+++ b/fs/notify/dnotify/dnotify.c
@@ -31,7 +31,6 @@ int dir_notify_enable __read_mostly = 1;
static struct kmem_cache *dnotify_struct_cache __read_mostly;
static struct kmem_cache *dnotify_mark_cache __read_mostly;
static struct fsnotify_group *dnotify_group __read_mostly;
-static DEFINE_MUTEX(dnotify_mark_mutex);
/*
* dnotify will attach one of these to each inode (i_fsnotify_marks) which
@@ -183,7 +182,7 @@ void dnotify_flush(struct file *filp, fl_owner_t id)
return;
dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark);
- mutex_lock(&dnotify_mark_mutex);
+ mutex_lock(&dnotify_group->mutex);
spin_lock(&fsn_mark->lock);
prev = &dn_mark->dn;
@@ -199,11 +198,11 @@ void dnotify_flush(struct file *filp, fl_owner_t id)
spin_unlock(&fsn_mark->lock);
- /* nothing else could have found us thanks to the dnotify_mark_mutex */
+ /* nothing else could have found us thanks to the dnotify_group mutex */
if (dn_mark->dn == NULL)
fsnotify_destroy_mark(fsn_mark);
- mutex_unlock(&dnotify_mark_mutex);
+ mutex_unlock(&dnotify_group->mutex);
fsnotify_put_mark(fsn_mark);
}
@@ -326,7 +325,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
new_dn_mark->dn = NULL;
/* this is needed to prevent the fcntl/close race described below */
- mutex_lock(&dnotify_mark_mutex);
+ mutex_lock(&dnotify_group->mutex);
/* add the new_fsn_mark or find an old one. */
fsn_mark = fsnotify_find_inode_mark(dnotify_group, inode);
@@ -348,8 +347,8 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
/* if (f != filp) means that we lost a race and another task/thread
* actually closed the fd we are still playing with before we grabbed
- * the dnotify_mark_mutex and fsn_mark->lock. Since closing the fd is the
- * only time we clean up the marks we need to get our mark off
+ * the dnotify_group mutex and fsn_mark->lock. Since closing the fd is
+ * the only time we clean up the marks we need to get our mark off
* the list. */
if (f != filp) {
/* if we added ourselves, shoot ourselves, it's possible that
@@ -387,7 +386,7 @@ out:
if (destroy)
fsnotify_destroy_mark(fsn_mark);
- mutex_unlock(&dnotify_mark_mutex);
+ mutex_unlock(&dnotify_group->mutex);
fsnotify_put_mark(fsn_mark);
out_err:
if (new_fsn_mark)
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index f35794b97e8e..c2ba86a972e1 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -184,13 +184,6 @@ static bool fanotify_should_send_event(struct fsnotify_group *group,
marks_mask = (vfsmnt_mark->mask | inode_mark->mask);
marks_ignored_mask = (vfsmnt_mark->ignored_mask | inode_mark->ignored_mask);
} else if (inode_mark) {
- /*
- * if the event is for a child and this inode doesn't care about
- * events on the child, don't send it!
- */
- if ((event_mask & FS_EVENT_ON_CHILD) &&
- !(inode_mark->mask & FS_EVENT_ON_CHILD))
- return false;
marks_mask = inode_mark->mask;
marks_ignored_mask = inode_mark->ignored_mask;
} else if (vfsmnt_mark) {
@@ -204,10 +197,21 @@ static bool fanotify_should_send_event(struct fsnotify_group *group,
(marks_ignored_mask & FS_ISDIR))
return false;
- if (event_mask & marks_mask & ~marks_ignored_mask)
- return true;
+ /*
+ * if the event is for a child and this inode doesn't care about
+ * events on the child, don't send it!
+ */
+ if ((event_mask & FS_EVENT_ON_CHILD) &&
+ !(marks_mask & FS_EVENT_ON_CHILD))
+ return false;
- return false;
+ /*
+ * It might seem logical to check:
+ * if (event_mask & marks_mask & ~marks_ignored_mask)
+ * return true;
+ * but we we know this was true from the caller so just return true.
+ */
+ return true;
}
static void fanotify_free_group_priv(struct fsnotify_group *group)
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 8b61220cffc5..b147bdf781c0 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -62,6 +62,7 @@ static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event)
struct dentry *dentry;
struct vfsmount *mnt;
struct file *new_file;
+ unsigned int flags;
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
@@ -83,12 +84,22 @@ static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event)
mnt = mntget(event->path.mnt);
/* it's possible this event was an overflow event. in that case dentry and mnt
* are NULL; That's fine, just don't call dentry open */
- if (dentry && mnt)
- new_file = dentry_open(dentry, mnt,
- group->fanotify_data.f_flags | FMODE_NONOTIFY,
- current_cred());
- else
+ if (dentry && mnt) {
+ flags = group->fanotify_data.f_flags;
+ new_file = dentry_open(dentry, mnt, flags, current_cred());
+ /*
+ * Attempt fallback to read-only access if writable was not possible
+ * in order to at least provide something to the listener.
+ */
+ if (IS_ERR(new_file) && group->fanotify_data.readonly_fallback) {
+ flags &= ~O_ACCMODE;
+ flags |= O_RDONLY;
+ new_file = dentry_open(dentry, mnt, flags,
+ current_cred());
+ }
+ } else {
new_file = ERR_PTR(-EOVERFLOW);
+ }
if (IS_ERR(new_file)) {
/*
* we still send an event even if we can't open the file. this
@@ -208,14 +219,6 @@ static int prepare_for_access_response(struct fsnotify_group *group,
re->fd = fd;
mutex_lock(&group->fanotify_data.access_mutex);
-
- if (atomic_read(&group->fanotify_data.bypass_perm)) {
- mutex_unlock(&group->fanotify_data.access_mutex);
- kmem_cache_free(fanotify_response_event_cache, re);
- event->response = FAN_ALLOW;
- return 0;
- }
-
list_add_tail(&re->list, &group->fanotify_data.access_list);
mutex_unlock(&group->fanotify_data.access_mutex);
@@ -516,6 +519,7 @@ static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
unsigned int flags)
{
__u32 oldmask;
+ int destroy_mark;
spin_lock(&fsn_mark->lock);
if (!(flags & FAN_MARK_IGNORED_MASK)) {
@@ -525,9 +529,10 @@ static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
oldmask = fsn_mark->ignored_mask;
fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask));
}
+ destroy_mark = (!fsn_mark->mask && !fsn_mark->ignored_mask);
spin_unlock(&fsn_mark->lock);
- if (!(oldmask & ~mask))
+ if (destroy_mark)
fsnotify_destroy_mark(fsn_mark);
return mask & oldmask;
@@ -539,17 +544,23 @@ static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
{
struct fsnotify_mark *fsn_mark = NULL;
__u32 removed;
+ int ret;
+ mutex_lock(&group->mutex);
+ ret = -ENOENT;
fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
if (!fsn_mark)
- return -ENOENT;
+ goto err;
removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
fsnotify_put_mark(fsn_mark);
if (removed & mnt->mnt_fsnotify_mask)
fsnotify_recalc_vfsmount_mask(mnt);
+ ret = 0;
+err:
+ mutex_unlock(&group->mutex);
- return 0;
+ return ret;
}
static int fanotify_remove_inode_mark(struct fsnotify_group *group,
@@ -558,18 +569,24 @@ static int fanotify_remove_inode_mark(struct fsnotify_group *group,
{
struct fsnotify_mark *fsn_mark = NULL;
__u32 removed;
+ int ret;
+ mutex_lock(&group->mutex);
+ ret = -ENOENT;
fsn_mark = fsnotify_find_inode_mark(group, inode);
if (!fsn_mark)
- return -ENOENT;
+ goto err;
removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
/* matches the fsnotify_find_inode_mark() */
fsnotify_put_mark(fsn_mark);
if (removed & inode->i_fsnotify_mask)
fsnotify_recalc_inode_mask(inode);
+ ret = 0;
+err:
+ mutex_unlock(&group->mutex);
- return 0;
+ return ret;
}
static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
@@ -605,28 +622,35 @@ static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
{
struct fsnotify_mark *fsn_mark;
__u32 added;
- int ret = 0;
+ int ret;
+ mutex_lock(&group->mutex);
fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
if (!fsn_mark) {
- if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
- return -ENOSPC;
+ ret = -ENOSPC;
+ if (atomic_read(&group->num_marks) >
+ group->fanotify_data.max_marks)
+ goto err;
+ ret = -ENOMEM;
fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
if (!fsn_mark)
- return -ENOMEM;
+ goto err;
fsnotify_init_mark(fsn_mark, fanotify_free_mark);
ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0);
if (ret)
- goto err;
+ goto err2;
}
added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
if (added & ~mnt->mnt_fsnotify_mask)
fsnotify_recalc_vfsmount_mask(mnt);
-err:
+ ret = 0;
+err2:
fsnotify_put_mark(fsn_mark);
+err:
+ mutex_unlock(&group->mutex);
return ret;
}
@@ -636,7 +660,7 @@ static int fanotify_add_inode_mark(struct fsnotify_group *group,
{
struct fsnotify_mark *fsn_mark;
__u32 added;
- int ret = 0;
+ int ret;
pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
@@ -650,26 +674,33 @@ static int fanotify_add_inode_mark(struct fsnotify_group *group,
(atomic_read(&inode->i_writecount) > 0))
return 0;
+ mutex_lock(&group->mutex);
fsn_mark = fsnotify_find_inode_mark(group, inode);
if (!fsn_mark) {
- if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
- return -ENOSPC;
+ ret = -ENOSPC;
+ if (atomic_read(&group->num_marks) >
+ group->fanotify_data.max_marks)
+ goto err;
+ ret = -ENOMEM;
fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
if (!fsn_mark)
- return -ENOMEM;
+ goto err;
fsnotify_init_mark(fsn_mark, fanotify_free_mark);
ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0);
if (ret)
- goto err;
+ goto err2;
}
added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
if (added & ~inode->i_fsnotify_mask)
fsnotify_recalc_inode_mask(inode);
-err:
+ ret = 0;
+err2:
fsnotify_put_mark(fsn_mark);
+err:
+ mutex_unlock(&group->mutex);
return ret;
}
@@ -711,7 +742,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
group->fanotify_data.user = user;
atomic_inc(&user->fanotify_listeners);
- group->fanotify_data.f_flags = event_f_flags;
+ group->fanotify_data.f_flags = event_f_flags | FMODE_NONOTIFY;
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
mutex_init(&group->fanotify_data.access_mutex);
init_waitqueue_head(&group->fanotify_data.access_waitq);
@@ -751,6 +782,14 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS;
}
+ fd = -EINVAL;
+ if (flags & FAN_READONLY_FALLBACK) {
+ if ((event_f_flags & O_ACCMODE) == O_RDWR)
+ group->fanotify_data.readonly_fallback = true;
+ else
+ goto out_put_group;
+ }
+
fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
if (fd < 0)
goto out_put_group;
@@ -876,7 +915,7 @@ SYSCALL_ALIAS(sys_fanotify_mark, SyS_fanotify_mark);
#endif
/*
- * fanotify_user_setup - Our initialization function. Note that we cannnot return
+ * fanotify_user_setup - Our initialization function. Note that we cannot return
* error because we have compiled-in VFS hooks. So an (unlikely) failure here
* must result in panic().
*/
diff --git a/fs/notify/group.c b/fs/notify/group.c
index d309f38449cb..cc341d33f5c8 100644
--- a/fs/notify/group.c
+++ b/fs/notify/group.c
@@ -90,6 +90,7 @@ struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)
*/
atomic_set(&group->num_marks, 1);
+ mutex_init(&group->mutex);
mutex_init(&group->notification_mutex);
INIT_LIST_HEAD(&group->notification_list);
init_waitqueue_head(&group->notification_waitq);
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 4cd5d5d78f9f..bd46e7c8a0ef 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -841,7 +841,7 @@ out:
}
/*
- * inotify_user_setup - Our initialization function. Note that we cannnot return
+ * inotify_user_setup - Our initialization function. Note that we cannot return
* error because we have compiled-in VFS hooks. So an (unlikely) failure here
* must result in panic().
*/
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 325185e514bb..28b64eb03e33 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -103,17 +103,6 @@ static DEFINE_SPINLOCK(destroy_lock);
static LIST_HEAD(destroy_list);
static DECLARE_WAIT_QUEUE_HEAD(destroy_waitq);
-void fsnotify_get_mark(struct fsnotify_mark *mark)
-{
- atomic_inc(&mark->refcnt);
-}
-
-void fsnotify_put_mark(struct fsnotify_mark *mark)
-{
- if (atomic_dec_and_test(&mark->refcnt))
- mark->free_mark(mark);
-}
-
/*
* Any time a mark is getting freed we end up here.
* The caller had better be holding a reference to this mark so we don't actually
@@ -217,7 +206,7 @@ int fsnotify_add_mark(struct fsnotify_mark *mark,
struct fsnotify_group *group, struct inode *inode,
struct vfsmount *mnt, int allow_dups)
{
- int ret = 0;
+ int ret;
BUG_ON(inode && mnt);
BUG_ON(!inode && !mnt);
@@ -232,23 +221,20 @@ int fsnotify_add_mark(struct fsnotify_mark *mark,
spin_lock(&group->mark_lock);
mark->flags |= FSNOTIFY_MARK_FLAG_ALIVE;
-
mark->group = group;
list_add(&mark->g_list, &group->marks_list);
- atomic_inc(&group->num_marks);
fsnotify_get_mark(mark); /* for i_list and g_list */
+ atomic_inc(&group->num_marks);
- if (inode) {
+ ret = 0;
+ if (inode)
ret = fsnotify_add_inode_mark(mark, group, inode, allow_dups);
- if (ret)
- goto err;
- } else if (mnt) {
+ else if (mnt)
ret = fsnotify_add_vfsmount_mark(mark, group, mnt, allow_dups);
- if (ret)
- goto err;
- } else {
+ else
BUG();
- }
+ if (ret)
+ goto err;
spin_unlock(&group->mark_lock);
@@ -260,7 +246,7 @@ int fsnotify_add_mark(struct fsnotify_mark *mark,
if (inode)
__fsnotify_update_child_dentry_flags(inode);
- return ret;
+ return 0;
err:
mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
list_del_init(&mark->g_list);
@@ -346,6 +332,10 @@ static int fsnotify_mark_destroy(void *ignored)
synchronize_srcu(&fsnotify_mark_srcu);
+ /*
+ * at this point we cannot be found via the i_list or g_list so
+ * drop that reference.
+ */
list_for_each_entry_safe(mark, next, &private_destroy_list, destroy_list) {
list_del_init(&mark->destroy_list);
fsnotify_put_mark(mark);
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index d417b3f9b0c7..8e4bb529e4de 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -322,21 +322,23 @@ static int ocfs2_check_dir_entry(struct inode * dir,
const char *error_msg = NULL;
const int rlen = le16_to_cpu(de->rec_len);
- if (rlen < OCFS2_DIR_REC_LEN(1))
+ if (unlikely(rlen < OCFS2_DIR_REC_LEN(1)))
error_msg = "rec_len is smaller than minimal";
- else if (rlen % 4 != 0)
+ else if (unlikely(rlen % 4 != 0))
error_msg = "rec_len % 4 != 0";
- else if (rlen < OCFS2_DIR_REC_LEN(de->name_len))
+ else if (unlikely(rlen < OCFS2_DIR_REC_LEN(de->name_len)))
error_msg = "rec_len is too small for name_len";
- else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)
+ else if (unlikely(
+ ((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize))
error_msg = "directory entry across blocks";
- if (error_msg != NULL)
+ if (unlikely(error_msg != NULL))
mlog(ML_ERROR, "bad entry in directory #%llu: %s - "
"offset=%lu, inode=%llu, rec_len=%d, name_len=%d\n",
(unsigned long long)OCFS2_I(dir)->ip_blkno, error_msg,
offset, (unsigned long long)le64_to_cpu(de->inode), rlen,
de->name_len);
+
return error_msg == NULL ? 1 : 0;
}
@@ -354,7 +356,7 @@ static inline int ocfs2_match(int len,
/*
* Returns 0 if not found, -1 on failure, and 1 on success
*/
-static int inline ocfs2_search_dirblock(struct buffer_head *bh,
+static inline int ocfs2_search_dirblock(struct buffer_head *bh,
struct inode *dir,
const char *name, int namelen,
unsigned long offset,
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 7e38a072d720..99805d578ab5 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -926,9 +926,10 @@ static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data,
}
static int dlm_match_regions(struct dlm_ctxt *dlm,
- struct dlm_query_region *qr)
+ struct dlm_query_region *qr,
+ char *local, int locallen)
{
- char *local = NULL, *remote = qr->qr_regions;
+ char *remote = qr->qr_regions;
char *l, *r;
int localnr, i, j, foundit;
int status = 0;
@@ -957,13 +958,8 @@ static int dlm_match_regions(struct dlm_ctxt *dlm,
r += O2HB_MAX_REGION_NAME_LEN;
}
- local = kmalloc(sizeof(qr->qr_regions), GFP_ATOMIC);
- if (!local) {
- status = -ENOMEM;
- goto bail;
- }
-
- localnr = o2hb_get_all_regions(local, O2NM_MAX_REGIONS);
+ localnr = min(O2NM_MAX_REGIONS, locallen/O2HB_MAX_REGION_NAME_LEN);
+ localnr = o2hb_get_all_regions(local, (u8)localnr);
/* compare local regions with remote */
l = local;
@@ -1012,8 +1008,6 @@ static int dlm_match_regions(struct dlm_ctxt *dlm,
}
bail:
- kfree(local);
-
return status;
}
@@ -1075,6 +1069,7 @@ static int dlm_query_region_handler(struct o2net_msg *msg, u32 len,
{
struct dlm_query_region *qr;
struct dlm_ctxt *dlm = NULL;
+ char *local = NULL;
int status = 0;
int locked = 0;
@@ -1083,6 +1078,13 @@ static int dlm_query_region_handler(struct o2net_msg *msg, u32 len,
mlog(0, "Node %u queries hb regions on domain %s\n", qr->qr_node,
qr->qr_domain);
+ /* buffer used in dlm_mast_regions() */
+ local = kmalloc(sizeof(qr->qr_regions), GFP_KERNEL);
+ if (!local) {
+ status = -ENOMEM;
+ goto bail;
+ }
+
status = -EINVAL;
spin_lock(&dlm_domain_lock);
@@ -1112,13 +1114,15 @@ static int dlm_query_region_handler(struct o2net_msg *msg, u32 len,
goto bail;
}
- status = dlm_match_regions(dlm, qr);
+ status = dlm_match_regions(dlm, qr, local, sizeof(qr->qr_regions));
bail:
if (locked)
spin_unlock(&dlm->spinlock);
spin_unlock(&dlm_domain_lock);
+ kfree(local);
+
return status;
}
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index e8d94d722ecb..7484bb9a3438 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -64,7 +64,7 @@ struct ocfs2_mask_waiter {
unsigned long mw_mask;
unsigned long mw_goal;
#ifdef CONFIG_OCFS2_FS_STATS
- unsigned long long mw_lock_start;
+ ktime_t mw_lock_start;
#endif
};
@@ -435,44 +435,41 @@ static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res)
#ifdef CONFIG_OCFS2_FS_STATS
static void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
{
- res->l_lock_num_prmode = 0;
- res->l_lock_num_prmode_failed = 0;
- res->l_lock_total_prmode = 0;
- res->l_lock_max_prmode = 0;
- res->l_lock_num_exmode = 0;
- res->l_lock_num_exmode_failed = 0;
- res->l_lock_total_exmode = 0;
- res->l_lock_max_exmode = 0;
res->l_lock_refresh = 0;
+ memset(&res->l_lock_prmode, 0, sizeof(struct ocfs2_lock_stats));
+ memset(&res->l_lock_exmode, 0, sizeof(struct ocfs2_lock_stats));
}
static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level,
struct ocfs2_mask_waiter *mw, int ret)
{
- unsigned long long *num, *sum;
- unsigned int *max, *failed;
- struct timespec ts = current_kernel_time();
- unsigned long long time = timespec_to_ns(&ts) - mw->mw_lock_start;
-
- if (level == LKM_PRMODE) {
- num = &res->l_lock_num_prmode;
- sum = &res->l_lock_total_prmode;
- max = &res->l_lock_max_prmode;
- failed = &res->l_lock_num_prmode_failed;
- } else if (level == LKM_EXMODE) {
- num = &res->l_lock_num_exmode;
- sum = &res->l_lock_total_exmode;
- max = &res->l_lock_max_exmode;
- failed = &res->l_lock_num_exmode_failed;
- } else
+ u32 usec;
+ ktime_t kt;
+ struct ocfs2_lock_stats *stats;
+
+ if (level == LKM_PRMODE)
+ stats = &res->l_lock_prmode;
+ else if (level == LKM_EXMODE)
+ stats = &res->l_lock_exmode;
+ else
return;
- (*num)++;
- (*sum) += time;
- if (time > *max)
- *max = time;
+ kt = ktime_sub(ktime_get(), mw->mw_lock_start);
+ usec = ktime_to_us(kt);
+
+ stats->ls_gets++;
+ stats->ls_total += ktime_to_ns(kt);
+ /* overflow */
+ if (unlikely(stats->ls_gets) == 0) {
+ stats->ls_gets++;
+ stats->ls_total = ktime_to_ns(kt);
+ }
+
+ if (stats->ls_max < usec)
+ stats->ls_max = usec;
+
if (ret)
- (*failed)++;
+ stats->ls_fail++;
}
static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
@@ -482,8 +479,7 @@ static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
{
- struct timespec ts = current_kernel_time();
- mw->mw_lock_start = timespec_to_ns(&ts);
+ mw->mw_lock_start = ktime_get();
}
#else
static inline void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
@@ -2869,8 +2865,15 @@ static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos)
return iter;
}
-/* So that debugfs.ocfs2 can determine which format is being used */
-#define OCFS2_DLM_DEBUG_STR_VERSION 2
+/*
+ * Version is used by debugfs.ocfs2 to determine the format being used
+ *
+ * New in version 2
+ * - Lock stats printed
+ * New in version 3
+ * - Max time in lock stats is in usecs (instead of nsecs)
+ */
+#define OCFS2_DLM_DEBUG_STR_VERSION 3
static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
{
int i;
@@ -2912,18 +2915,18 @@ static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
seq_printf(m, "0x%x\t", lvb[i]);
#ifdef CONFIG_OCFS2_FS_STATS
-# define lock_num_prmode(_l) (_l)->l_lock_num_prmode
-# define lock_num_exmode(_l) (_l)->l_lock_num_exmode
-# define lock_num_prmode_failed(_l) (_l)->l_lock_num_prmode_failed
-# define lock_num_exmode_failed(_l) (_l)->l_lock_num_exmode_failed
-# define lock_total_prmode(_l) (_l)->l_lock_total_prmode
-# define lock_total_exmode(_l) (_l)->l_lock_total_exmode
-# define lock_max_prmode(_l) (_l)->l_lock_max_prmode
-# define lock_max_exmode(_l) (_l)->l_lock_max_exmode
-# define lock_refresh(_l) (_l)->l_lock_refresh
+# define lock_num_prmode(_l) ((_l)->l_lock_prmode.ls_gets)
+# define lock_num_exmode(_l) ((_l)->l_lock_exmode.ls_gets)
+# define lock_num_prmode_failed(_l) ((_l)->l_lock_prmode.ls_fail)
+# define lock_num_exmode_failed(_l) ((_l)->l_lock_exmode.ls_fail)
+# define lock_total_prmode(_l) ((_l)->l_lock_prmode.ls_total)
+# define lock_total_exmode(_l) ((_l)->l_lock_exmode.ls_total)
+# define lock_max_prmode(_l) ((_l)->l_lock_prmode.ls_max)
+# define lock_max_exmode(_l) ((_l)->l_lock_exmode.ls_max)
+# define lock_refresh(_l) ((_l)->l_lock_refresh)
#else
-# define lock_num_prmode(_l) (0ULL)
-# define lock_num_exmode(_l) (0ULL)
+# define lock_num_prmode(_l) (0)
+# define lock_num_exmode(_l) (0)
# define lock_num_prmode_failed(_l) (0)
# define lock_num_exmode_failed(_l) (0)
# define lock_total_prmode(_l) (0ULL)
@@ -2933,8 +2936,8 @@ static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
# define lock_refresh(_l) (0)
#endif
/* The following seq_print was added in version 2 of this output */
- seq_printf(m, "%llu\t"
- "%llu\t"
+ seq_printf(m, "%u\t"
+ "%u\t"
"%u\t"
"%u\t"
"%llu\t"
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index 7a4868196152..731cf4670b97 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -46,6 +46,22 @@ static inline void __o2info_set_request_error(struct ocfs2_info_request *kreq,
#define o2info_set_request_error(a, b) \
__o2info_set_request_error((struct ocfs2_info_request *)&(a), b)
+static inline void __o2info_set_request_filled(struct ocfs2_info_request *req)
+{
+ req->ir_flags |= OCFS2_INFO_FL_FILLED;
+}
+
+#define o2info_set_request_filled(a) \
+ __o2info_set_request_filled((struct ocfs2_info_request *)&(a))
+
+static inline void __o2info_clear_request_filled(struct ocfs2_info_request *req)
+{
+ req->ir_flags &= ~OCFS2_INFO_FL_FILLED;
+}
+
+#define o2info_clear_request_filled(a) \
+ __o2info_clear_request_filled((struct ocfs2_info_request *)&(a))
+
static int ocfs2_get_inode_attr(struct inode *inode, unsigned *flags)
{
int status;
@@ -139,7 +155,8 @@ int ocfs2_info_handle_blocksize(struct inode *inode,
goto bail;
oib.ib_blocksize = inode->i_sb->s_blocksize;
- oib.ib_req.ir_flags |= OCFS2_INFO_FL_FILLED;
+
+ o2info_set_request_filled(oib);
if (o2info_to_user(oib, req))
goto bail;
@@ -163,7 +180,8 @@ int ocfs2_info_handle_clustersize(struct inode *inode,
goto bail;
oic.ic_clustersize = osb->s_clustersize;
- oic.ic_req.ir_flags |= OCFS2_INFO_FL_FILLED;
+
+ o2info_set_request_filled(oic);
if (o2info_to_user(oic, req))
goto bail;
@@ -187,7 +205,8 @@ int ocfs2_info_handle_maxslots(struct inode *inode,
goto bail;
oim.im_max_slots = osb->max_slots;
- oim.im_req.ir_flags |= OCFS2_INFO_FL_FILLED;
+
+ o2info_set_request_filled(oim);
if (o2info_to_user(oim, req))
goto bail;
@@ -211,7 +230,8 @@ int ocfs2_info_handle_label(struct inode *inode,
goto bail;
memcpy(oil.il_label, osb->vol_label, OCFS2_MAX_VOL_LABEL_LEN);
- oil.il_req.ir_flags |= OCFS2_INFO_FL_FILLED;
+
+ o2info_set_request_filled(oil);
if (o2info_to_user(oil, req))
goto bail;
@@ -235,7 +255,8 @@ int ocfs2_info_handle_uuid(struct inode *inode,
goto bail;
memcpy(oiu.iu_uuid_str, osb->uuid_str, OCFS2_TEXT_UUID_LEN + 1);
- oiu.iu_req.ir_flags |= OCFS2_INFO_FL_FILLED;
+
+ o2info_set_request_filled(oiu);
if (o2info_to_user(oiu, req))
goto bail;
@@ -261,7 +282,8 @@ int ocfs2_info_handle_fs_features(struct inode *inode,
oif.if_compat_features = osb->s_feature_compat;
oif.if_incompat_features = osb->s_feature_incompat;
oif.if_ro_compat_features = osb->s_feature_ro_compat;
- oif.if_req.ir_flags |= OCFS2_INFO_FL_FILLED;
+
+ o2info_set_request_filled(oif);
if (o2info_to_user(oif, req))
goto bail;
@@ -286,7 +308,7 @@ int ocfs2_info_handle_journal_size(struct inode *inode,
oij.ij_journal_size = osb->journal->j_inode->i_size;
- oij.ij_req.ir_flags |= OCFS2_INFO_FL_FILLED;
+ o2info_set_request_filled(oij);
if (o2info_to_user(oij, req))
goto bail;
@@ -308,7 +330,7 @@ int ocfs2_info_handle_unknown(struct inode *inode,
if (o2info_from_user(oir, req))
goto bail;
- oir.ir_flags &= ~OCFS2_INFO_FL_FILLED;
+ o2info_clear_request_filled(oir);
if (o2info_to_user(oir, req))
goto bail;
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 43e56b97f9c0..6180da1e37e6 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -405,9 +405,9 @@ static inline int ocfs2_remove_extent_credits(struct super_block *sb)
ocfs2_quota_trans_credits(sb);
}
-/* data block for new dir/symlink, 2 for bitmap updates (bitmap fe +
- * bitmap block for the new bit) dx_root update for free list */
-#define OCFS2_DIR_LINK_ADDITIONAL_CREDITS (1 + 2 + 1)
+/* data block for new dir/symlink, allocation of directory block, dx_root
+ * update for free list */
+#define OCFS2_DIR_LINK_ADDITIONAL_CREDITS (1 + OCFS2_SUBALLOC_ALLOC + 1)
static inline int ocfs2_add_dir_index_credits(struct super_block *sb)
{
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 849fb4a2e814..d6c25d76b537 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -293,7 +293,7 @@ static int ocfs2_mknod(struct inode *dir,
}
/* get security xattr */
- status = ocfs2_init_security_get(inode, dir, &si);
+ status = ocfs2_init_security_get(inode, dir, &dentry->d_name, &si);
if (status) {
if (status == -EOPNOTSUPP)
si.enable = 0;
@@ -1665,7 +1665,7 @@ static int ocfs2_symlink(struct inode *dir,
}
/* get security xattr */
- status = ocfs2_init_security_get(inode, dir, &si);
+ status = ocfs2_init_security_get(inode, dir, &dentry->d_name, &si);
if (status) {
if (status == -EOPNOTSUPP)
si.enable = 0;
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 51cd6898e7f1..4e3d3c1363f3 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -147,6 +147,17 @@ struct ocfs2_lock_res_ops;
typedef void (*ocfs2_lock_callback)(int status, unsigned long data);
+#ifdef CONFIG_OCFS2_FS_STATS
+struct ocfs2_lock_stats {
+ u64 ls_total; /* Total wait in NSEC */
+ u32 ls_gets; /* Num acquires */
+ u32 ls_fail; /* Num failed acquires */
+
+ /* Storing max wait in usecs saves 24 bytes per inode */
+ u32 ls_max; /* Max wait in USEC */
+};
+#endif
+
struct ocfs2_lock_res {
void *l_priv;
struct ocfs2_lock_res_ops *l_ops;
@@ -182,15 +193,9 @@ struct ocfs2_lock_res {
struct list_head l_debug_list;
#ifdef CONFIG_OCFS2_FS_STATS
- unsigned long long l_lock_num_prmode; /* PR acquires */
- unsigned long long l_lock_num_exmode; /* EX acquires */
- unsigned int l_lock_num_prmode_failed; /* Failed PR gets */
- unsigned int l_lock_num_exmode_failed; /* Failed EX gets */
- unsigned long long l_lock_total_prmode; /* Tot wait for PR */
- unsigned long long l_lock_total_exmode; /* Tot wait for EX */
- unsigned int l_lock_max_prmode; /* Max wait for PR */
- unsigned int l_lock_max_exmode; /* Max wait for EX */
- unsigned int l_lock_refresh; /* Disk refreshes */
+ struct ocfs2_lock_stats l_lock_prmode; /* PR mode stats */
+ u32 l_lock_refresh; /* Disk refreshes */
+ struct ocfs2_lock_stats l_lock_exmode; /* EX mode stats */
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map l_lockdep_map;
diff --git a/fs/ocfs2/quota.h b/fs/ocfs2/quota.h
index 196fcb52d95d..d5ab56cbe5c5 100644
--- a/fs/ocfs2/quota.h
+++ b/fs/ocfs2/quota.h
@@ -114,7 +114,4 @@ int ocfs2_local_write_dquot(struct dquot *dquot);
extern const struct dquot_operations ocfs2_quota_operations;
extern struct quota_format_type ocfs2_quota_format;
-int ocfs2_quota_setup(void);
-void ocfs2_quota_shutdown(void);
-
#endif /* _OCFS2_QUOTA_H */
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index 4607923eb24c..a73f64166481 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -63,8 +63,6 @@
* write to gf
*/
-static struct workqueue_struct *ocfs2_quota_wq = NULL;
-
static void qsync_work_fn(struct work_struct *work);
static void ocfs2_global_disk2memdqb(struct dquot *dquot, void *dp)
@@ -400,8 +398,8 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
OCFS2_QBLK_RESERVED_SPACE;
oinfo->dqi_gi.dqi_qtree_depth = qtree_depth(&oinfo->dqi_gi);
INIT_DELAYED_WORK(&oinfo->dqi_sync_work, qsync_work_fn);
- queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
- msecs_to_jiffies(oinfo->dqi_syncms));
+ schedule_delayed_work(&oinfo->dqi_sync_work,
+ msecs_to_jiffies(oinfo->dqi_syncms));
out_err:
mlog_exit(status);
@@ -635,8 +633,8 @@ static void qsync_work_fn(struct work_struct *work)
struct super_block *sb = oinfo->dqi_gqinode->i_sb;
dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type);
- queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
- msecs_to_jiffies(oinfo->dqi_syncms));
+ schedule_delayed_work(&oinfo->dqi_sync_work,
+ msecs_to_jiffies(oinfo->dqi_syncms));
}
/*
@@ -923,20 +921,3 @@ const struct dquot_operations ocfs2_quota_operations = {
.alloc_dquot = ocfs2_alloc_dquot,
.destroy_dquot = ocfs2_destroy_dquot,
};
-
-int ocfs2_quota_setup(void)
-{
- ocfs2_quota_wq = create_workqueue("o2quot");
- if (!ocfs2_quota_wq)
- return -ENOMEM;
- return 0;
-}
-
-void ocfs2_quota_shutdown(void)
-{
- if (ocfs2_quota_wq) {
- flush_workqueue(ocfs2_quota_wq);
- destroy_workqueue(ocfs2_quota_wq);
- ocfs2_quota_wq = NULL;
- }
-}
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index b5f9160e93e9..35798b88042d 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -3228,7 +3228,7 @@ static int ocfs2_make_clusters_writable(struct super_block *sb,
u32 num_clusters, unsigned int e_flags)
{
int ret, delete, index, credits = 0;
- u32 new_bit, new_len;
+ u32 new_bit, new_len, orig_num_clusters;
unsigned int set_len;
struct ocfs2_super *osb = OCFS2_SB(sb);
handle_t *handle;
@@ -3261,6 +3261,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb,
goto out;
}
+ orig_num_clusters = num_clusters;
+
while (num_clusters) {
ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh,
p_cluster, num_clusters,
@@ -3348,7 +3350,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb,
* in write-back mode.
*/
if (context->get_clusters == ocfs2_di_get_clusters) {
- ret = ocfs2_cow_sync_writeback(sb, context, cpos, num_clusters);
+ ret = ocfs2_cow_sync_writeback(sb, context, cpos,
+ orig_num_clusters);
if (ret)
mlog_errno(ret);
}
@@ -4325,7 +4328,8 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
/* If the security isn't preserved, we need to re-initialize them. */
if (!preserve) {
- error = ocfs2_init_security_and_acl(dir, new_orphan_inode);
+ error = ocfs2_init_security_and_acl(dir, new_orphan_inode,
+ &new_dentry->d_name);
if (error)
mlog_errno(error);
}
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 38f986d2447e..236ed1bdca2c 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1316,7 +1316,7 @@ static int ocfs2_parse_options(struct super_block *sb,
struct mount_options *mopt,
int is_remount)
{
- int status;
+ int status, user_stack = 0;
char *p;
u32 tmp;
@@ -1459,6 +1459,15 @@ static int ocfs2_parse_options(struct super_block *sb,
memcpy(mopt->cluster_stack, args[0].from,
OCFS2_STACK_LABEL_LEN);
mopt->cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0';
+ /*
+ * Open code the memcmp here as we don't have
+ * an osb to pass to
+ * ocfs2_userspace_stack().
+ */
+ if (memcmp(mopt->cluster_stack,
+ OCFS2_CLASSIC_CLUSTER_STACK,
+ OCFS2_STACK_LABEL_LEN))
+ user_stack = 1;
break;
case Opt_inode64:
mopt->mount_opt |= OCFS2_MOUNT_INODE64;
@@ -1514,13 +1523,16 @@ static int ocfs2_parse_options(struct super_block *sb,
}
}
- /* Ensure only one heartbeat mode */
- tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL |
- OCFS2_MOUNT_HB_NONE);
- if (hweight32(tmp) != 1) {
- mlog(ML_ERROR, "Invalid heartbeat mount options\n");
- status = 0;
- goto bail;
+ if (user_stack == 0) {
+ /* Ensure only one heartbeat mode */
+ tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL |
+ OCFS2_MOUNT_HB_GLOBAL |
+ OCFS2_MOUNT_HB_NONE);
+ if (hweight32(tmp) != 1) {
+ mlog(ML_ERROR, "Invalid heartbeat mount options\n");
+ status = 0;
+ goto bail;
+ }
}
status = 1;
@@ -1645,16 +1657,11 @@ static int __init ocfs2_init(void)
mlog(ML_ERROR, "Unable to create ocfs2 debugfs root.\n");
}
- status = ocfs2_quota_setup();
- if (status)
- goto leave;
-
ocfs2_set_locking_protocol();
status = register_quota_format(&ocfs2_quota_format);
leave:
if (status < 0) {
- ocfs2_quota_shutdown();
ocfs2_free_mem_caches();
exit_ocfs2_uptodate_cache();
}
@@ -1671,8 +1678,6 @@ static void __exit ocfs2_exit(void)
{
mlog_entry_void();
- ocfs2_quota_shutdown();
-
if (ocfs2_wq) {
flush_workqueue(ocfs2_wq);
destroy_workqueue(ocfs2_wq);
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 67cd43914641..6bb602486c6b 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -7185,7 +7185,8 @@ out:
* must not hold any lock expect i_mutex.
*/
int ocfs2_init_security_and_acl(struct inode *dir,
- struct inode *inode)
+ struct inode *inode,
+ const struct qstr *qstr)
{
int ret = 0;
struct buffer_head *dir_bh = NULL;
@@ -7193,7 +7194,7 @@ int ocfs2_init_security_and_acl(struct inode *dir,
.enable = 1,
};
- ret = ocfs2_init_security_get(inode, dir, &si);
+ ret = ocfs2_init_security_get(inode, dir, qstr, &si);
if (!ret) {
ret = ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_SECURITY,
si.name, si.value, si.value_len,
@@ -7261,13 +7262,14 @@ static int ocfs2_xattr_security_set(struct dentry *dentry, const char *name,
int ocfs2_init_security_get(struct inode *inode,
struct inode *dir,
+ const struct qstr *qstr,
struct ocfs2_security_xattr_info *si)
{
/* check whether ocfs2 support feature xattr */
if (!ocfs2_supports_xattr(OCFS2_SB(dir->i_sb)))
return -EOPNOTSUPP;
- return security_inode_init_security(inode, dir, &si->name, &si->value,
- &si->value_len);
+ return security_inode_init_security(inode, dir, qstr, &si->name,
+ &si->value, &si->value_len);
}
int ocfs2_init_security_set(handle_t *handle,
diff --git a/fs/ocfs2/xattr.h b/fs/ocfs2/xattr.h
index aa64bb37a65b..d63cfb72316b 100644
--- a/fs/ocfs2/xattr.h
+++ b/fs/ocfs2/xattr.h
@@ -57,6 +57,7 @@ int ocfs2_has_inline_xattr_value_outside(struct inode *inode,
struct ocfs2_dinode *di);
int ocfs2_xattr_remove(struct inode *, struct buffer_head *);
int ocfs2_init_security_get(struct inode *, struct inode *,
+ const struct qstr *,
struct ocfs2_security_xattr_info *);
int ocfs2_init_security_set(handle_t *, struct inode *,
struct buffer_head *,
@@ -94,5 +95,6 @@ int ocfs2_reflink_xattrs(struct inode *old_inode,
struct buffer_head *new_bh,
bool preserve_security);
int ocfs2_init_security_and_acl(struct inode *dir,
- struct inode *inode);
+ struct inode *inode,
+ const struct qstr *qstr);
#endif /* OCFS2_XATTR_H */
diff --git a/fs/open.c b/fs/open.c
index 5a2c6ebc22b5..49b91ed9588d 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -693,7 +693,8 @@ static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
if (error)
goto cleanup_all;
}
- ima_counts_get(f);
+ if ((f->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
+ i_readcount_inc(inode);
f->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC);
diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
index 789c625c7aa5..b10e3540d5b7 100644
--- a/fs/partitions/ldm.c
+++ b/fs/partitions/ldm.c
@@ -251,6 +251,11 @@ static bool ldm_parse_vmdb (const u8 *data, struct vmdb *vm)
}
vm->vblk_size = get_unaligned_be32(data + 0x08);
+ if (vm->vblk_size == 0) {
+ ldm_error ("Illegal VBLK size");
+ return false;
+ }
+
vm->vblk_offset = get_unaligned_be32(data + 0x0C);
vm->last_vblk_seq = get_unaligned_be32(data + 0x04);
diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c
index d9396a4fc7ff..927cbd115e53 100644
--- a/fs/proc/proc_devtree.c
+++ b/fs/proc/proc_devtree.c
@@ -233,7 +233,7 @@ void __init proc_device_tree_init(void)
return;
root = of_find_node_by_path("/");
if (root == NULL) {
- printk(KERN_ERR "/proc/device-tree: can't find root\n");
+ pr_debug("/proc/device-tree: can't find root\n");
return;
}
proc_device_tree_add_node(root, proc_device_tree);
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 09a1f92a34ef..fb707e018a81 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -32,7 +32,6 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
ei->sysctl_entry = table;
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
- inode->i_flags |= S_PRIVATE; /* tell selinux to ignore this inode */
inode->i_mode = table->mode;
if (!table->child) {
inode->i_mode |= S_IFREG;
diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig
new file mode 100644
index 000000000000..867d0ac026ce
--- /dev/null
+++ b/fs/pstore/Kconfig
@@ -0,0 +1,13 @@
+config PSTORE
+ bool "Persistant store support"
+ default n
+ help
+ This option enables generic access to platform level
+ persistent storage via "pstore" filesystem that can
+ be mounted as /dev/pstore. Only useful if you have
+ a platform level driver that registers with pstore to
+ provide the data, so you probably should just go say "Y"
+ (or "M") to a platform specific persistent store driver
+ (e.g. ACPI_APEI on X86) which will select this for you.
+ If you don't have a platform persistent store driver,
+ say N.
diff --git a/fs/pstore/Makefile b/fs/pstore/Makefile
new file mode 100644
index 000000000000..760f4bce7d1d
--- /dev/null
+++ b/fs/pstore/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the linux pstorefs routines.
+#
+
+obj-y += pstore.o
+
+pstore-objs += inode.o platform.o
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
new file mode 100644
index 000000000000..549d245d0b42
--- /dev/null
+++ b/fs/pstore/inode.c
@@ -0,0 +1,285 @@
+/*
+ * Persistent Storage - ramfs parts.
+ *
+ * Copyright (C) 2010 Intel Corporation <tony.luck@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/fsnotify.h>
+#include <linux/pagemap.h>
+#include <linux/highmem.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/mount.h>
+#include <linux/ramfs.h>
+#include <linux/sched.h>
+#include <linux/magic.h>
+#include <linux/pstore.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "internal.h"
+
+#define PSTORE_NAMELEN 64
+
+struct pstore_private {
+ u64 id;
+ int (*erase)(u64);
+};
+
+#define pstore_get_inode ramfs_get_inode
+
+/*
+ * When a file is unlinked from our file system we call the
+ * platform driver to erase the record from persistent store.
+ */
+static int pstore_unlink(struct inode *dir, struct dentry *dentry)
+{
+ struct pstore_private *p = dentry->d_inode->i_private;
+
+ p->erase(p->id);
+ kfree(p);
+
+ return simple_unlink(dir, dentry);
+}
+
+static const struct inode_operations pstore_dir_inode_operations = {
+ .lookup = simple_lookup,
+ .unlink = pstore_unlink,
+};
+
+static const struct super_operations pstore_ops = {
+ .statfs = simple_statfs,
+ .drop_inode = generic_delete_inode,
+ .show_options = generic_show_options,
+};
+
+static struct super_block *pstore_sb;
+static struct vfsmount *pstore_mnt;
+
+int pstore_is_mounted(void)
+{
+ return pstore_mnt != NULL;
+}
+
+/*
+ * Set up a file structure as if we had opened this file and
+ * write our data to it.
+ */
+static int pstore_writefile(struct inode *inode, struct dentry *dentry,
+ char *data, size_t size)
+{
+ struct file f;
+ ssize_t n;
+ mm_segment_t old_fs = get_fs();
+
+ memset(&f, '0', sizeof f);
+ f.f_mapping = inode->i_mapping;
+ f.f_path.dentry = dentry;
+ f.f_path.mnt = pstore_mnt;
+ f.f_pos = 0;
+ f.f_op = inode->i_fop;
+ set_fs(KERNEL_DS);
+ n = do_sync_write(&f, data, size, &f.f_pos);
+ set_fs(old_fs);
+
+ fsnotify_modify(&f);
+
+ return n == size;
+}
+
+/*
+ * Make a regular file in the root directory of our file system.
+ * Load it up with "size" bytes of data from "buf".
+ * Set the mtime & ctime to the date that this record was originally stored.
+ */
+int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id,
+ char *data, size_t size,
+ struct timespec time, int (*erase)(u64))
+{
+ struct dentry *root = pstore_sb->s_root;
+ struct dentry *dentry;
+ struct inode *inode;
+ int rc;
+ char name[PSTORE_NAMELEN];
+ struct pstore_private *private;
+
+ rc = -ENOMEM;
+ inode = pstore_get_inode(pstore_sb, root->d_inode, S_IFREG | 0444, 0);
+ if (!inode)
+ goto fail;
+ inode->i_uid = inode->i_gid = 0;
+ private = kmalloc(sizeof *private, GFP_KERNEL);
+ if (!private)
+ goto fail_alloc;
+ private->id = id;
+ private->erase = erase;
+
+ switch (type) {
+ case PSTORE_TYPE_DMESG:
+ sprintf(name, "dmesg-%s-%lld", psname, id);
+ break;
+ case PSTORE_TYPE_MCE:
+ sprintf(name, "mce-%s-%lld", psname, id);
+ break;
+ case PSTORE_TYPE_UNKNOWN:
+ sprintf(name, "unknown-%s-%lld", psname, id);
+ break;
+ default:
+ sprintf(name, "type%d-%s-%lld", type, psname, id);
+ break;
+ }
+
+ mutex_lock(&root->d_inode->i_mutex);
+
+ rc = -ENOSPC;
+ dentry = d_alloc_name(root, name);
+ if (IS_ERR(dentry))
+ goto fail_lockedalloc;
+
+ d_add(dentry, inode);
+
+ mutex_unlock(&root->d_inode->i_mutex);
+
+ if (!pstore_writefile(inode, dentry, data, size))
+ goto fail_write;
+
+ inode->i_private = private;
+
+ if (time.tv_sec)
+ inode->i_mtime = inode->i_ctime = time;
+
+ return 0;
+
+fail_write:
+ kfree(private);
+ inode->i_nlink--;
+ mutex_lock(&root->d_inode->i_mutex);
+ d_delete(dentry);
+ dput(dentry);
+ mutex_unlock(&root->d_inode->i_mutex);
+ goto fail;
+
+fail_lockedalloc:
+ mutex_unlock(&root->d_inode->i_mutex);
+ kfree(private);
+fail_alloc:
+ iput(inode);
+
+fail:
+ return rc;
+}
+
+int pstore_fill_super(struct super_block *sb, void *data, int silent)
+{
+ struct inode *inode = NULL;
+ struct dentry *root;
+ int err;
+
+ save_mount_options(sb, data);
+
+ pstore_sb = sb;
+
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+ sb->s_blocksize = PAGE_CACHE_SIZE;
+ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_magic = PSTOREFS_MAGIC;
+ sb->s_op = &pstore_ops;
+ sb->s_time_gran = 1;
+
+ inode = pstore_get_inode(sb, NULL, S_IFDIR | 0755, 0);
+ if (!inode) {
+ err = -ENOMEM;
+ goto fail;
+ }
+ /* override ramfs "dir" options so we catch unlink(2) */
+ inode->i_op = &pstore_dir_inode_operations;
+
+ root = d_alloc_root(inode);
+ sb->s_root = root;
+ if (!root) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ pstore_get_records();
+
+ return 0;
+fail:
+ iput(inode);
+ return err;
+}
+
+static int pstore_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
+{
+ struct dentry *root;
+
+ root = mount_nodev(fs_type, flags, data, pstore_fill_super);
+ if (IS_ERR(root))
+ return -ENOMEM;
+
+ mnt->mnt_root = root;
+ mnt->mnt_sb = root->d_sb;
+ pstore_mnt = mnt;
+
+ return 0;
+}
+
+static void pstore_kill_sb(struct super_block *sb)
+{
+ kill_litter_super(sb);
+ pstore_sb = NULL;
+ pstore_mnt = NULL;
+}
+
+static struct file_system_type pstore_fs_type = {
+ .name = "pstore",
+ .get_sb = pstore_get_sb,
+ .kill_sb = pstore_kill_sb,
+};
+
+static int __init init_pstore_fs(void)
+{
+ int rc = 0;
+ struct kobject *pstorefs_kobj;
+
+ pstorefs_kobj = kobject_create_and_add("pstore", fs_kobj);
+ if (!pstorefs_kobj) {
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ rc = sysfs_create_file(pstorefs_kobj, &pstore_kmsg_bytes_attr.attr);
+ if (rc)
+ goto done1;
+
+ rc = register_filesystem(&pstore_fs_type);
+ if (rc == 0)
+ goto done;
+
+ sysfs_remove_file(pstorefs_kobj, &pstore_kmsg_bytes_attr.attr);
+done1:
+ kobject_put(pstorefs_kobj);
+done:
+ return rc;
+}
+module_init(init_pstore_fs)
+
+MODULE_AUTHOR("Tony Luck <tony.luck@intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/fs/pstore/internal.h b/fs/pstore/internal.h
new file mode 100644
index 000000000000..76c26d2fab29
--- /dev/null
+++ b/fs/pstore/internal.h
@@ -0,0 +1,7 @@
+extern void pstore_get_records(void);
+extern int pstore_mkfile(enum pstore_type_id, char *psname, u64 id,
+ char *data, size_t size,
+ struct timespec time, int (*erase)(u64));
+extern int pstore_is_mounted(void);
+
+extern struct kobj_attribute pstore_kmsg_bytes_attr;
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
new file mode 100644
index 000000000000..705fdf8abf6e
--- /dev/null
+++ b/fs/pstore/platform.c
@@ -0,0 +1,202 @@
+/*
+ * Persistent Storage - platform driver interface parts.
+ *
+ * Copyright (C) 2010 Intel Corporation <tony.luck@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/atomic.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kmsg_dump.h>
+#include <linux/module.h>
+#include <linux/pstore.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "internal.h"
+
+/*
+ * pstore_lock just protects "psinfo" during
+ * calls to pstore_register()
+ */
+static DEFINE_SPINLOCK(pstore_lock);
+static struct pstore_info *psinfo;
+
+/* How much of the console log to snapshot. /sys/fs/pstore/kmsg_bytes */
+static unsigned long kmsg_bytes = 10240;
+
+static ssize_t b_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%lu\n", kmsg_bytes);
+}
+
+static ssize_t b_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ return (sscanf(buf, "%lu", &kmsg_bytes) > 0) ? count : 0;
+}
+
+struct kobj_attribute pstore_kmsg_bytes_attr =
+ __ATTR(kmsg_bytes, S_IRUGO | S_IWUSR, b_show, b_store);
+
+/* Tag each group of saved records with a sequence number */
+static int oopscount;
+
+/*
+ * callback from kmsg_dump. (s2,l2) has the most recently
+ * written bytes, older bytes are in (s1,l1). Save as much
+ * as we can from the end of the buffer.
+ */
+static void pstore_dump(struct kmsg_dumper *dumper,
+ enum kmsg_dump_reason reason,
+ const char *s1, unsigned long l1,
+ const char *s2, unsigned long l2)
+{
+ unsigned long s1_start, s2_start;
+ unsigned long l1_cpy, l2_cpy;
+ unsigned long size, total = 0;
+ char *dst;
+ u64 id;
+ int hsize, part = 1;
+
+ mutex_lock(&psinfo->buf_mutex);
+ oopscount++;
+ while (total < kmsg_bytes) {
+ dst = psinfo->buf;
+ hsize = sprintf(dst, "Oops#%d Part%d\n", oopscount, part++);
+ size = psinfo->bufsize - hsize;
+ dst += hsize;
+
+ l2_cpy = min(l2, size);
+ l1_cpy = min(l1, size - l2_cpy);
+
+ if (l1_cpy + l2_cpy == 0)
+ break;
+
+ s2_start = l2 - l2_cpy;
+ s1_start = l1 - l1_cpy;
+
+ memcpy(dst, s1 + s1_start, l1_cpy);
+ memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
+
+ id = psinfo->write(PSTORE_TYPE_DMESG, hsize + l1_cpy + l2_cpy);
+ if (pstore_is_mounted())
+ pstore_mkfile(PSTORE_TYPE_DMESG, psinfo->name, id,
+ psinfo->buf, hsize + l1_cpy + l2_cpy,
+ CURRENT_TIME, psinfo->erase);
+ l1 -= l1_cpy;
+ l2 -= l2_cpy;
+ total += l1_cpy + l2_cpy;
+ }
+ mutex_unlock(&psinfo->buf_mutex);
+}
+
+static struct kmsg_dumper pstore_dumper = {
+ .dump = pstore_dump,
+};
+
+/*
+ * platform specific persistent storage driver registers with
+ * us here. If pstore is already mounted, call the platform
+ * read function right away to populate the file system. If not
+ * then the pstore mount code will call us later to fill out
+ * the file system.
+ *
+ * Register with kmsg_dump to save last part of console log on panic.
+ */
+int pstore_register(struct pstore_info *psi)
+{
+ struct module *owner = psi->owner;
+
+ spin_lock(&pstore_lock);
+ if (psinfo) {
+ spin_unlock(&pstore_lock);
+ return -EBUSY;
+ }
+ psinfo = psi;
+ spin_unlock(&pstore_lock);
+
+ if (owner && !try_module_get(owner)) {
+ psinfo = NULL;
+ return -EINVAL;
+ }
+
+ if (pstore_is_mounted())
+ pstore_get_records();
+
+ kmsg_dump_register(&pstore_dumper);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pstore_register);
+
+/*
+ * Read all the records from the persistent store. Create and
+ * file files in our filesystem.
+ */
+void pstore_get_records(void)
+{
+ struct pstore_info *psi = psinfo;
+ size_t size;
+ u64 id;
+ enum pstore_type_id type;
+ struct timespec time;
+ int failed = 0;
+
+ if (!psi)
+ return;
+
+ mutex_lock(&psinfo->buf_mutex);
+ while ((size = psi->read(&id, &type, &time)) > 0) {
+ if (pstore_mkfile(type, psi->name, id, psi->buf, size,
+ time, psi->erase))
+ failed++;
+ }
+ mutex_unlock(&psinfo->buf_mutex);
+
+ if (failed)
+ printk(KERN_WARNING "pstore: failed to load %d record(s) from '%s'\n",
+ failed, psi->name);
+}
+
+/*
+ * Call platform driver to write a record to the
+ * persistent store.
+ */
+int pstore_write(enum pstore_type_id type, char *buf, size_t size)
+{
+ u64 id;
+
+ if (!psinfo)
+ return -ENODEV;
+
+ if (size > psinfo->bufsize)
+ return -EFBIG;
+
+ mutex_lock(&psinfo->buf_mutex);
+ memcpy(psinfo->buf, buf, size);
+ id = psinfo->write(type, size);
+ if (pstore_is_mounted())
+ pstore_mkfile(PSTORE_TYPE_DMESG, psinfo->name, id, psinfo->buf,
+ size, CURRENT_TIME, psinfo->erase);
+ mutex_unlock(&psinfo->buf_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pstore_write);
diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
index 65444d29406b..f1ab3604db5a 100644
--- a/fs/quota/quota_v2.c
+++ b/fs/quota/quota_v2.c
@@ -112,7 +112,7 @@ static int v2_read_file_info(struct super_block *sb, int type)
if (!info->dqi_priv) {
printk(KERN_WARNING
"Not enough memory for quota information structure.\n");
- return -1;
+ return -ENOMEM;
}
qinfo = info->dqi_priv;
if (version == 0) {
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 3eea859e6990..c77514bd5776 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -2876,7 +2876,7 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
reiserfs_mounted_fs_count++;
if (reiserfs_mounted_fs_count <= 1) {
reiserfs_write_unlock(sb);
- commit_wq = create_workqueue("reiserfs");
+ commit_wq = alloc_workqueue("reiserfs", WQ_MEM_RECLAIM, 0);
reiserfs_write_lock(sb);
}
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index ba5f51ec3458..d5b22ed06779 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -593,7 +593,7 @@ static int reiserfs_create(struct inode *dir, struct dentry *dentry, int mode,
new_inode_init(inode, dir, mode);
jbegin_count += reiserfs_cache_default_acl(dir);
- retval = reiserfs_security_init(dir, inode, &security);
+ retval = reiserfs_security_init(dir, inode, &dentry->d_name, &security);
if (retval < 0) {
drop_new_inode(inode);
return retval;
@@ -667,7 +667,7 @@ static int reiserfs_mknod(struct inode *dir, struct dentry *dentry, int mode,
new_inode_init(inode, dir, mode);
jbegin_count += reiserfs_cache_default_acl(dir);
- retval = reiserfs_security_init(dir, inode, &security);
+ retval = reiserfs_security_init(dir, inode, &dentry->d_name, &security);
if (retval < 0) {
drop_new_inode(inode);
return retval;
@@ -747,7 +747,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
new_inode_init(inode, dir, mode);
jbegin_count += reiserfs_cache_default_acl(dir);
- retval = reiserfs_security_init(dir, inode, &security);
+ retval = reiserfs_security_init(dir, inode, &dentry->d_name, &security);
if (retval < 0) {
drop_new_inode(inode);
return retval;
@@ -1032,7 +1032,8 @@ static int reiserfs_symlink(struct inode *parent_dir,
}
new_inode_init(inode, parent_dir, mode);
- retval = reiserfs_security_init(parent_dir, inode, &security);
+ retval = reiserfs_security_init(parent_dir, inode, &dentry->d_name,
+ &security);
if (retval < 0) {
drop_new_inode(inode);
return retval;
diff --git a/fs/reiserfs/xattr_security.c b/fs/reiserfs/xattr_security.c
index 237c6928d3c6..ef66c18a9332 100644
--- a/fs/reiserfs/xattr_security.c
+++ b/fs/reiserfs/xattr_security.c
@@ -54,6 +54,7 @@ static size_t security_list(struct dentry *dentry, char *list, size_t list_len,
* of blocks needed for the transaction. If successful, reiserfs_security
* must be released using reiserfs_security_free when the caller is done. */
int reiserfs_security_init(struct inode *dir, struct inode *inode,
+ const struct qstr *qstr,
struct reiserfs_security_handle *sec)
{
int blocks = 0;
@@ -65,7 +66,7 @@ int reiserfs_security_init(struct inode *dir, struct inode *inode,
if (IS_PRIVATE(dir))
return 0;
- error = security_inode_init_security(inode, dir, &sec->name,
+ error = security_inode_init_security(inode, dir, qstr, &sec->name,
&sec->value, &sec->length);
if (error) {
if (error == -EOPNOTSUPP)
diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c
index 02429d81ca33..b148fbc80f8d 100644
--- a/fs/ubifs/commit.c
+++ b/fs/ubifs/commit.c
@@ -48,6 +48,56 @@
#include <linux/slab.h>
#include "ubifs.h"
+/*
+ * nothing_to_commit - check if there is nothing to commit.
+ * @c: UBIFS file-system description object
+ *
+ * This is a helper function which checks if there is anything to commit. It is
+ * used as an optimization to avoid starting the commit if it is not really
+ * necessary. Indeed, the commit operation always assumes flash I/O (e.g.,
+ * writing the commit start node to the log), and it is better to avoid doing
+ * this unnecessarily. E.g., 'ubifs_sync_fs()' runs the commit, but if there is
+ * nothing to commit, it is more optimal to avoid any flash I/O.
+ *
+ * This function has to be called with @c->commit_sem locked for writing -
+ * this function does not take LPT/TNC locks because the @c->commit_sem
+ * guarantees that we have exclusive access to the TNC and LPT data structures.
+ *
+ * This function returns %1 if there is nothing to commit and %0 otherwise.
+ */
+static int nothing_to_commit(struct ubifs_info *c)
+{
+ /*
+ * During mounting or remounting from R/O mode to R/W mode we may
+ * commit for various recovery-related reasons.
+ */
+ if (c->mounting || c->remounting_rw)
+ return 0;
+
+ /*
+ * If the root TNC node is dirty, we definitely have something to
+ * commit.
+ */
+ if (c->zroot.znode && test_bit(DIRTY_ZNODE, &c->zroot.znode->flags))
+ return 0;
+
+ /*
+ * Even though the TNC is clean, the LPT tree may have dirty nodes. For
+ * example, this may happen if the budgeting subsystem invoked GC to
+ * make some free space, and the GC found an LEB with only dirty and
+ * free space. In this case GC would just change the lprops of this
+ * LEB (by turning all space into free space) and unmap it.
+ */
+ if (c->nroot && test_bit(DIRTY_CNODE, &c->nroot->flags))
+ return 0;
+
+ ubifs_assert(atomic_long_read(&c->dirty_zn_cnt) == 0);
+ ubifs_assert(c->dirty_pn_cnt == 0);
+ ubifs_assert(c->dirty_nn_cnt == 0);
+
+ return 1;
+}
+
/**
* do_commit - commit the journal.
* @c: UBIFS file-system description object
@@ -70,6 +120,12 @@ static int do_commit(struct ubifs_info *c)
goto out_up;
}
+ if (nothing_to_commit(c)) {
+ up_write(&c->commit_sem);
+ err = 0;
+ goto out_cancel;
+ }
+
/* Sync all write buffers (necessary for recovery) */
for (i = 0; i < c->jhead_cnt; i++) {
err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
@@ -162,12 +218,12 @@ static int do_commit(struct ubifs_info *c)
if (err)
goto out;
+out_cancel:
spin_lock(&c->cs_lock);
c->cmt_state = COMMIT_RESTING;
wake_up(&c->cmt_wq);
dbg_cmt("commit end");
spin_unlock(&c->cs_lock);
-
return 0;
out_up:
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 0bee4dbffc31..bcb1acb79263 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -2813,19 +2813,19 @@ int dbg_debugfs_init_fs(struct ubifs_info *c)
}
fname = "dump_lprops";
- dent = debugfs_create_file(fname, S_IWUGO, d->dfs_dir, c, &dfs_fops);
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops);
if (IS_ERR(dent))
goto out_remove;
d->dfs_dump_lprops = dent;
fname = "dump_budg";
- dent = debugfs_create_file(fname, S_IWUGO, d->dfs_dir, c, &dfs_fops);
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops);
if (IS_ERR(dent))
goto out_remove;
d->dfs_dump_budg = dent;
fname = "dump_tnc";
- dent = debugfs_create_file(fname, S_IWUGO, d->dfs_dir, c, &dfs_fops);
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops);
if (IS_ERR(dent))
goto out_remove;
d->dfs_dump_tnc = dent;
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
index d82173182eeb..d1fe56203a1d 100644
--- a/fs/ubifs/io.c
+++ b/fs/ubifs/io.c
@@ -88,8 +88,12 @@ void ubifs_ro_mode(struct ubifs_info *c, int err)
* This function may skip data nodes CRC checking if @c->no_chk_data_crc is
* true, which is controlled by corresponding UBIFS mount option. However, if
* @must_chk_crc is true, then @c->no_chk_data_crc is ignored and CRC is
- * checked. Similarly, if @c->always_chk_crc is true, @c->no_chk_data_crc is
- * ignored and CRC is checked.
+ * checked. Similarly, if @c->mounting or @c->remounting_rw is true (we are
+ * mounting or re-mounting to R/W mode), @c->no_chk_data_crc is ignored and CRC
+ * is checked. This is because during mounting or re-mounting from R/O mode to
+ * R/W mode we may read journal nodes (when replying the journal or doing the
+ * recovery) and the journal nodes may potentially be corrupted, so checking is
+ * required.
*
* This function returns zero in case of success and %-EUCLEAN in case of bad
* CRC or magic.
@@ -131,8 +135,8 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
node_len > c->ranges[type].max_len)
goto out_len;
- if (!must_chk_crc && type == UBIFS_DATA_NODE && !c->always_chk_crc &&
- c->no_chk_data_crc)
+ if (!must_chk_crc && type == UBIFS_DATA_NODE && !c->mounting &&
+ !c->remounting_rw && c->no_chk_data_crc)
return 0;
crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8);
diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c
index 77e9b874b6c2..e2714f8f05ff 100644
--- a/fs/ubifs/recovery.c
+++ b/fs/ubifs/recovery.c
@@ -28,6 +28,23 @@
* UBIFS always cleans away all remnants of an unclean un-mount, so that
* errors do not accumulate. However UBIFS defers recovery if it is mounted
* read-only, and the flash is not modified in that case.
+ *
+ * The general UBIFS approach to the recovery is that it recovers from
+ * corruptions which could be caused by power cuts, but it refuses to recover
+ * from corruption caused by other reasons. And UBIFS tries to distinguish
+ * between these 2 reasons of corruptions and silently recover in the former
+ * case and loudly complain in the latter case.
+ *
+ * UBIFS writes only to erased LEBs, so it writes only to the flash space
+ * containing only 0xFFs. UBIFS also always writes strictly from the beginning
+ * of the LEB to the end. And UBIFS assumes that the underlying flash media
+ * writes in @c->min_io_unit bytes at a time.
+ *
+ * Hence, if UBIFS finds a corrupted node at offset X, it expects only the min.
+ * I/O unit corresponding to offset X to contain corrupted data, all the
+ * following min. I/O units have to contain empty space (all 0xFFs). If this is
+ * not true, the corruption cannot be the result of a power cut, and UBIFS
+ * refuses to mount.
*/
#include <linux/crc32.h>
@@ -671,10 +688,14 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
} else {
int corruption = first_non_ff(buf, len);
+ /*
+ * See header comment for this file for more
+ * explanations about the reasons we have this check.
+ */
ubifs_err("corrupt empty space LEB %d:%d, corruption "
"starts at %d", lnum, offs, corruption);
/* Make sure we dump interesting non-0xFF data */
- offs = corruption;
+ offs += corruption;
buf += corruption;
goto corrupted;
}
diff --git a/fs/ubifs/scan.c b/fs/ubifs/scan.c
index 3e1ee57dbeaa..36216b46f772 100644
--- a/fs/ubifs/scan.c
+++ b/fs/ubifs/scan.c
@@ -328,7 +328,7 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum,
if (!quiet)
ubifs_err("empty space starts at non-aligned offset %d",
offs);
- goto corrupted;;
+ goto corrupted;
}
ubifs_end_scan(c, sleb, lnum, offs);
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 6e11c2975dcf..d203c99faa1f 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1202,11 +1202,7 @@ static int mount_ubifs(struct ubifs_info *c)
if (c->bulk_read == 1)
bu_init(c);
- /*
- * We have to check all CRCs, even for data nodes, when we mount the FS
- * (specifically, when we are replaying).
- */
- c->always_chk_crc = 1;
+ c->mounting = 1;
err = ubifs_read_superblock(c);
if (err)
@@ -1382,7 +1378,7 @@ static int mount_ubifs(struct ubifs_info *c)
if (err)
goto out_infos;
- c->always_chk_crc = 0;
+ c->mounting = 0;
ubifs_msg("mounted UBI device %d, volume %d, name \"%s\"",
c->vi.ubi_num, c->vi.vol_id, c->vi.name);
@@ -1543,7 +1539,6 @@ static int ubifs_remount_rw(struct ubifs_info *c)
mutex_lock(&c->umount_mutex);
dbg_save_space_info(c);
c->remounting_rw = 1;
- c->always_chk_crc = 1;
err = check_free_space(c);
if (err)
@@ -1650,7 +1645,6 @@ static int ubifs_remount_rw(struct ubifs_info *c)
dbg_gen("re-mounted read-write");
c->ro_mount = 0;
c->remounting_rw = 0;
- c->always_chk_crc = 0;
err = dbg_check_space_info(c);
mutex_unlock(&c->umount_mutex);
return err;
@@ -1667,7 +1661,6 @@ out:
c->ileb_buf = NULL;
ubifs_lpt_free(c, 1);
c->remounting_rw = 0;
- c->always_chk_crc = 0;
mutex_unlock(&c->umount_mutex);
return err;
}
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
index ad9cf0133622..de485979ca39 100644
--- a/fs/ubifs/tnc.c
+++ b/fs/ubifs/tnc.c
@@ -447,8 +447,11 @@ static int tnc_read_node_nm(struct ubifs_info *c, struct ubifs_zbranch *zbr,
*
* Note, this function does not check CRC of data nodes if @c->no_chk_data_crc
* is true (it is controlled by corresponding mount option). However, if
- * @c->always_chk_crc is true, @c->no_chk_data_crc is ignored and CRC is always
- * checked.
+ * @c->mounting or @c->remounting_rw is true (we are mounting or re-mounting to
+ * R/W mode), @c->no_chk_data_crc is ignored and CRC is checked. This is
+ * because during mounting or re-mounting from R/O mode to R/W mode we may read
+ * journal nodes (when replying the journal or doing the recovery) and the
+ * journal nodes may potentially be corrupted, so checking is required.
*/
static int try_read_node(const struct ubifs_info *c, void *buf, int type,
int len, int lnum, int offs)
@@ -476,7 +479,8 @@ static int try_read_node(const struct ubifs_info *c, void *buf, int type,
if (node_len != len)
return 0;
- if (type == UBIFS_DATA_NODE && !c->always_chk_crc && c->no_chk_data_crc)
+ if (type == UBIFS_DATA_NODE && c->no_chk_data_crc && !c->mounting &&
+ !c->remounting_rw)
return 1;
crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8);
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index 381d6b207a52..d1823541f987 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -1166,22 +1166,21 @@ struct ubifs_debug_info;
* @rp_uid: reserved pool user ID
* @rp_gid: reserved pool group ID
*
- * @empty: if the UBI device is empty
+ * @empty: %1 if the UBI device is empty
+ * @need_recovery: %1 if the file-system needs recovery
+ * @replaying: %1 during journal replay
+ * @mounting: %1 while mounting
+ * @remounting_rw: %1 while re-mounting from R/O mode to R/W mode
* @replay_tree: temporary tree used during journal replay
* @replay_list: temporary list used during journal replay
* @replay_buds: list of buds to replay
* @cs_sqnum: sequence number of first node in the log (commit start node)
* @replay_sqnum: sequence number of node currently being replayed
- * @need_recovery: file-system needs recovery
- * @replaying: set to %1 during journal replay
* @unclean_leb_list: LEBs to recover when re-mounting R/O mounted FS to R/W
* mode
* @rcvrd_mst_node: recovered master node to write when re-mounting R/O mounted
* FS to R/W mode
* @size_tree: inode size information for recovery
- * @remounting_rw: set while re-mounting from R/O mode to R/W mode
- * @always_chk_crc: always check CRCs (while mounting and remounting to R/W
- * mode)
* @mount_opts: UBIFS-specific mount options
*
* @dbg: debugging-related information
@@ -1402,19 +1401,19 @@ struct ubifs_info {
gid_t rp_gid;
/* The below fields are used only during mounting and re-mounting */
- int empty;
+ unsigned int empty:1;
+ unsigned int need_recovery:1;
+ unsigned int replaying:1;
+ unsigned int mounting:1;
+ unsigned int remounting_rw:1;
struct rb_root replay_tree;
struct list_head replay_list;
struct list_head replay_buds;
unsigned long long cs_sqnum;
unsigned long long replay_sqnum;
- int need_recovery;
- int replaying;
struct list_head unclean_leb_list;
struct ubifs_mst_node *rcvrd_mst_node;
struct rb_root size_tree;
- int remounting_rw;
- int always_chk_crc;
struct ubifs_mount_opts mount_opts;
#ifdef CONFIG_UBIFS_FS_DEBUG
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index 306ee39ef2c3..8994dd041660 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -31,7 +31,7 @@
#define udf_set_bit(nr, addr) ext2_set_bit(nr, addr)
#define udf_test_bit(nr, addr) ext2_test_bit(nr, addr)
#define udf_find_next_one_bit(addr, size, offset) \
- ext2_find_next_bit(addr, size, offset)
+ ext2_find_next_bit((unsigned long *)(addr), size, offset)
static int read_block_bitmap(struct super_block *sb,
struct udf_bitmap *bitmap, unsigned int block,
@@ -297,7 +297,7 @@ repeat:
break;
}
} else {
- bit = udf_find_next_one_bit((char *)bh->b_data,
+ bit = udf_find_next_one_bit(bh->b_data,
sb->s_blocksize << 3,
group_start << 3);
if (bit < sb->s_blocksize << 3)
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 89c78486cbbe..f391a2adc699 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -123,8 +123,8 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
if (inode->i_sb->s_blocksize <
(udf_file_entry_alloc_offset(inode) +
pos + count)) {
- udf_expand_file_adinicb(inode, pos + count, &err);
- if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
+ err = udf_expand_file_adinicb(inode);
+ if (err) {
udf_debug("udf_expand_adinicb: err=%d\n", err);
up_write(&iinfo->i_data_sem);
return err;
@@ -237,7 +237,7 @@ static int udf_setattr(struct dentry *dentry, struct iattr *attr)
if ((attr->ia_valid & ATTR_SIZE) &&
attr->ia_size != i_size_read(inode)) {
- error = vmtruncate(inode, attr->ia_size);
+ error = udf_setsize(inode, attr->ia_size);
if (error)
return error;
}
@@ -249,5 +249,4 @@ static int udf_setattr(struct dentry *dentry, struct iattr *attr)
const struct inode_operations udf_file_inode_operations = {
.setattr = udf_setattr,
- .truncate = udf_truncate,
};
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index c6a2e782b97b..ccc814321414 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -73,14 +73,12 @@ void udf_evict_inode(struct inode *inode)
struct udf_inode_info *iinfo = UDF_I(inode);
int want_delete = 0;
- truncate_inode_pages(&inode->i_data, 0);
-
if (!inode->i_nlink && !is_bad_inode(inode)) {
want_delete = 1;
- inode->i_size = 0;
- udf_truncate(inode);
+ udf_setsize(inode, 0);
udf_update_inode(inode, IS_SYNC(inode));
- }
+ } else
+ truncate_inode_pages(&inode->i_data, 0);
invalidate_inode_buffers(inode);
end_writeback(inode);
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
@@ -117,9 +115,18 @@ static int udf_write_begin(struct file *file, struct address_space *mapping,
ret = block_write_begin(mapping, pos, len, flags, pagep, udf_get_block);
if (unlikely(ret)) {
- loff_t isize = mapping->host->i_size;
- if (pos + len > isize)
- vmtruncate(mapping->host, isize);
+ struct inode *inode = mapping->host;
+ struct udf_inode_info *iinfo = UDF_I(inode);
+ loff_t isize = inode->i_size;
+
+ if (pos + len > isize) {
+ truncate_pagecache(inode, pos + len, isize);
+ if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
+ down_write(&iinfo->i_data_sem);
+ udf_truncate_extents(inode);
+ up_write(&iinfo->i_data_sem);
+ }
+ }
}
return ret;
@@ -139,30 +146,31 @@ const struct address_space_operations udf_aops = {
.bmap = udf_bmap,
};
-void udf_expand_file_adinicb(struct inode *inode, int newsize, int *err)
+int udf_expand_file_adinicb(struct inode *inode)
{
struct page *page;
char *kaddr;
struct udf_inode_info *iinfo = UDF_I(inode);
+ int err;
struct writeback_control udf_wbc = {
.sync_mode = WB_SYNC_NONE,
.nr_to_write = 1,
};
- /* from now on we have normal address_space methods */
- inode->i_data.a_ops = &udf_aops;
-
if (!iinfo->i_lenAlloc) {
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
else
iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
+ /* from now on we have normal address_space methods */
+ inode->i_data.a_ops = &udf_aops;
mark_inode_dirty(inode);
- return;
+ return 0;
}
- page = grab_cache_page(inode->i_mapping, 0);
- BUG_ON(!PageLocked(page));
+ page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
+ if (!page)
+ return -ENOMEM;
if (!PageUptodate(page)) {
kaddr = kmap(page);
@@ -181,11 +189,24 @@ void udf_expand_file_adinicb(struct inode *inode, int newsize, int *err)
iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
else
iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
-
- inode->i_data.a_ops->writepage(page, &udf_wbc);
+ /* from now on we have normal address_space methods */
+ inode->i_data.a_ops = &udf_aops;
+ err = inode->i_data.a_ops->writepage(page, &udf_wbc);
+ if (err) {
+ /* Restore everything back so that we don't lose data... */
+ lock_page(page);
+ kaddr = kmap(page);
+ memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr, kaddr,
+ inode->i_size);
+ kunmap(page);
+ unlock_page(page);
+ iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
+ inode->i_data.a_ops = &udf_adinicb_aops;
+ }
page_cache_release(page);
-
mark_inode_dirty(inode);
+
+ return err;
}
struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, int *block,
@@ -348,8 +369,10 @@ static struct buffer_head *udf_getblk(struct inode *inode, long block,
}
/* Extend the file by 'blocks' blocks, return the number of extents added */
-int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
- struct kernel_long_ad *last_ext, sector_t blocks)
+static int udf_do_extend_file(struct inode *inode,
+ struct extent_position *last_pos,
+ struct kernel_long_ad *last_ext,
+ sector_t blocks)
{
sector_t add;
int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
@@ -357,6 +380,7 @@ int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
struct kernel_lb_addr prealloc_loc = {};
int prealloc_len = 0;
struct udf_inode_info *iinfo;
+ int err;
/* The previous extent is fake and we should not extend by anything
* - there's nothing to do... */
@@ -422,26 +446,29 @@ int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
/* Create enough extents to cover the whole hole */
while (blocks > add) {
blocks -= add;
- if (udf_add_aext(inode, last_pos, &last_ext->extLocation,
- last_ext->extLength, 1) == -1)
- return -1;
+ err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
+ last_ext->extLength, 1);
+ if (err)
+ return err;
count++;
}
if (blocks) {
last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
(blocks << sb->s_blocksize_bits);
- if (udf_add_aext(inode, last_pos, &last_ext->extLocation,
- last_ext->extLength, 1) == -1)
- return -1;
+ err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
+ last_ext->extLength, 1);
+ if (err)
+ return err;
count++;
}
out:
/* Do we have some preallocated blocks saved? */
if (prealloc_len) {
- if (udf_add_aext(inode, last_pos, &prealloc_loc,
- prealloc_len, 1) == -1)
- return -1;
+ err = udf_add_aext(inode, last_pos, &prealloc_loc,
+ prealloc_len, 1);
+ if (err)
+ return err;
last_ext->extLocation = prealloc_loc;
last_ext->extLength = prealloc_len;
count++;
@@ -453,11 +480,68 @@ out:
else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
last_pos->offset -= sizeof(struct long_ad);
else
- return -1;
+ return -EIO;
return count;
}
+static int udf_extend_file(struct inode *inode, loff_t newsize)
+{
+
+ struct extent_position epos;
+ struct kernel_lb_addr eloc;
+ uint32_t elen;
+ int8_t etype;
+ struct super_block *sb = inode->i_sb;
+ sector_t first_block = newsize >> sb->s_blocksize_bits, offset;
+ int adsize;
+ struct udf_inode_info *iinfo = UDF_I(inode);
+ struct kernel_long_ad extent;
+ int err;
+
+ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
+ adsize = sizeof(struct short_ad);
+ else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
+ adsize = sizeof(struct long_ad);
+ else
+ BUG();
+
+ etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset);
+
+ /* File has extent covering the new size (could happen when extending
+ * inside a block)? */
+ if (etype != -1)
+ return 0;
+ if (newsize & (sb->s_blocksize - 1))
+ offset++;
+ /* Extended file just to the boundary of the last file block? */
+ if (offset == 0)
+ return 0;
+
+ /* Truncate is extending the file by 'offset' blocks */
+ if ((!epos.bh && epos.offset == udf_file_entry_alloc_offset(inode)) ||
+ (epos.bh && epos.offset == sizeof(struct allocExtDesc))) {
+ /* File has no extents at all or has empty last
+ * indirect extent! Create a fake extent... */
+ extent.extLocation.logicalBlockNum = 0;
+ extent.extLocation.partitionReferenceNum = 0;
+ extent.extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
+ } else {
+ epos.offset -= adsize;
+ etype = udf_next_aext(inode, &epos, &extent.extLocation,
+ &extent.extLength, 0);
+ extent.extLength |= etype << 30;
+ }
+ err = udf_do_extend_file(inode, &epos, &extent, offset);
+ if (err < 0)
+ goto out;
+ err = 0;
+ iinfo->i_lenExtents = newsize;
+out:
+ brelse(epos.bh);
+ return err;
+}
+
static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
int *err, sector_t *phys, int *new)
{
@@ -540,7 +624,7 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
elen = EXT_RECORDED_ALLOCATED |
((elen + inode->i_sb->s_blocksize - 1) &
~(inode->i_sb->s_blocksize - 1));
- etype = udf_write_aext(inode, &cur_epos, &eloc, elen, 1);
+ udf_write_aext(inode, &cur_epos, &eloc, elen, 1);
}
brelse(prev_epos.bh);
brelse(cur_epos.bh);
@@ -564,19 +648,17 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
memset(&laarr[0].extLocation, 0x00,
sizeof(struct kernel_lb_addr));
laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
- /* Will udf_extend_file() create real extent from
+ /* Will udf_do_extend_file() create real extent from
a fake one? */
startnum = (offset > 0);
}
/* Create extents for the hole between EOF and offset */
- ret = udf_extend_file(inode, &prev_epos, laarr, offset);
- if (ret == -1) {
+ ret = udf_do_extend_file(inode, &prev_epos, laarr, offset);
+ if (ret < 0) {
brelse(prev_epos.bh);
brelse(cur_epos.bh);
brelse(next_epos.bh);
- /* We don't really know the error here so we just make
- * something up */
- *err = -ENOSPC;
+ *err = ret;
return NULL;
}
c = 0;
@@ -1005,52 +1087,66 @@ struct buffer_head *udf_bread(struct inode *inode, int block,
return NULL;
}
-void udf_truncate(struct inode *inode)
+int udf_setsize(struct inode *inode, loff_t newsize)
{
- int offset;
int err;
struct udf_inode_info *iinfo;
+ int bsize = 1 << inode->i_blkbits;
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
S_ISLNK(inode->i_mode)))
- return;
+ return -EINVAL;
if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
- return;
+ return -EPERM;
iinfo = UDF_I(inode);
- if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
+ if (newsize > inode->i_size) {
down_write(&iinfo->i_data_sem);
- if (inode->i_sb->s_blocksize <
- (udf_file_entry_alloc_offset(inode) +
- inode->i_size)) {
- udf_expand_file_adinicb(inode, inode->i_size, &err);
- if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
- inode->i_size = iinfo->i_lenAlloc;
- up_write(&iinfo->i_data_sem);
- return;
+ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
+ if (bsize <
+ (udf_file_entry_alloc_offset(inode) + newsize)) {
+ err = udf_expand_file_adinicb(inode);
+ if (err) {
+ up_write(&iinfo->i_data_sem);
+ return err;
+ }
} else
- udf_truncate_extents(inode);
- } else {
- offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
- memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr + offset,
- 0x00, inode->i_sb->s_blocksize -
- offset - udf_file_entry_alloc_offset(inode));
- iinfo->i_lenAlloc = inode->i_size;
+ iinfo->i_lenAlloc = newsize;
+ }
+ err = udf_extend_file(inode, newsize);
+ if (err) {
+ up_write(&iinfo->i_data_sem);
+ return err;
}
+ truncate_setsize(inode, newsize);
up_write(&iinfo->i_data_sem);
} else {
- block_truncate_page(inode->i_mapping, inode->i_size,
- udf_get_block);
+ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
+ down_write(&iinfo->i_data_sem);
+ memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr + newsize,
+ 0x00, bsize - newsize -
+ udf_file_entry_alloc_offset(inode));
+ iinfo->i_lenAlloc = newsize;
+ truncate_setsize(inode, newsize);
+ up_write(&iinfo->i_data_sem);
+ goto update_time;
+ }
+ err = block_truncate_page(inode->i_mapping, newsize,
+ udf_get_block);
+ if (err)
+ return err;
down_write(&iinfo->i_data_sem);
+ truncate_setsize(inode, newsize);
udf_truncate_extents(inode);
up_write(&iinfo->i_data_sem);
}
-
+update_time:
inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
if (IS_SYNC(inode))
udf_sync_inode(inode);
else
mark_inode_dirty(inode);
+ return 0;
}
static void __udf_read_inode(struct inode *inode)
@@ -1637,14 +1733,13 @@ struct inode *udf_iget(struct super_block *sb, struct kernel_lb_addr *ino)
return NULL;
}
-int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
- struct kernel_lb_addr *eloc, uint32_t elen, int inc)
+int udf_add_aext(struct inode *inode, struct extent_position *epos,
+ struct kernel_lb_addr *eloc, uint32_t elen, int inc)
{
int adsize;
struct short_ad *sad = NULL;
struct long_ad *lad = NULL;
struct allocExtDesc *aed;
- int8_t etype;
uint8_t *ptr;
struct udf_inode_info *iinfo = UDF_I(inode);
@@ -1660,7 +1755,7 @@ int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
adsize = sizeof(struct long_ad);
else
- return -1;
+ return -EIO;
if (epos->offset + (2 * adsize) > inode->i_sb->s_blocksize) {
unsigned char *sptr, *dptr;
@@ -1672,12 +1767,12 @@ int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
obloc.partitionReferenceNum,
obloc.logicalBlockNum, &err);
if (!epos->block.logicalBlockNum)
- return -1;
+ return -ENOSPC;
nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
&epos->block,
0));
if (!nbh)
- return -1;
+ return -EIO;
lock_buffer(nbh);
memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize);
set_buffer_uptodate(nbh);
@@ -1746,7 +1841,7 @@ int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
epos->bh = nbh;
}
- etype = udf_write_aext(inode, epos, eloc, elen, inc);
+ udf_write_aext(inode, epos, eloc, elen, inc);
if (!epos->bh) {
iinfo->i_lenAlloc += adsize;
@@ -1764,11 +1859,11 @@ int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
mark_buffer_dirty_inode(epos->bh, inode);
}
- return etype;
+ return 0;
}
-int8_t udf_write_aext(struct inode *inode, struct extent_position *epos,
- struct kernel_lb_addr *eloc, uint32_t elen, int inc)
+void udf_write_aext(struct inode *inode, struct extent_position *epos,
+ struct kernel_lb_addr *eloc, uint32_t elen, int inc)
{
int adsize;
uint8_t *ptr;
@@ -1798,7 +1893,7 @@ int8_t udf_write_aext(struct inode *inode, struct extent_position *epos,
adsize = sizeof(struct long_ad);
break;
default:
- return -1;
+ return;
}
if (epos->bh) {
@@ -1817,8 +1912,6 @@ int8_t udf_write_aext(struct inode *inode, struct extent_position *epos,
if (inc)
epos->offset += adsize;
-
- return (elen >> 30);
}
int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c
index 225527cdc885..8424308db4b4 100644
--- a/fs/udf/truncate.c
+++ b/fs/udf/truncate.c
@@ -197,6 +197,11 @@ static void udf_update_alloc_ext_desc(struct inode *inode,
mark_buffer_dirty_inode(epos->bh, inode);
}
+/*
+ * Truncate extents of inode to inode->i_size. This function can be used only
+ * for making file shorter. For making file longer, udf_extend_file() has to
+ * be used.
+ */
void udf_truncate_extents(struct inode *inode)
{
struct extent_position epos;
@@ -219,96 +224,65 @@ void udf_truncate_extents(struct inode *inode)
etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset);
byte_offset = (offset << sb->s_blocksize_bits) +
(inode->i_size & (sb->s_blocksize - 1));
- if (etype != -1) {
- epos.offset -= adsize;
- extent_trunc(inode, &epos, &eloc, etype, elen, byte_offset);
- epos.offset += adsize;
- if (byte_offset)
- lenalloc = epos.offset;
- else
- lenalloc = epos.offset - adsize;
-
- if (!epos.bh)
- lenalloc -= udf_file_entry_alloc_offset(inode);
- else
- lenalloc -= sizeof(struct allocExtDesc);
-
- while ((etype = udf_current_aext(inode, &epos, &eloc,
- &elen, 0)) != -1) {
- if (etype == (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
- udf_write_aext(inode, &epos, &neloc, nelen, 0);
- if (indirect_ext_len) {
- /* We managed to free all extents in the
- * indirect extent - free it too */
- BUG_ON(!epos.bh);
- udf_free_blocks(sb, inode, &epos.block,
- 0, indirect_ext_len);
- } else if (!epos.bh) {
- iinfo->i_lenAlloc = lenalloc;
- mark_inode_dirty(inode);
- } else
- udf_update_alloc_ext_desc(inode,
- &epos, lenalloc);
- brelse(epos.bh);
- epos.offset = sizeof(struct allocExtDesc);
- epos.block = eloc;
- epos.bh = udf_tread(sb,
- udf_get_lb_pblock(sb, &eloc, 0));
- if (elen)
- indirect_ext_len =
- (elen + sb->s_blocksize - 1) >>
- sb->s_blocksize_bits;
- else
- indirect_ext_len = 1;
- } else {
- extent_trunc(inode, &epos, &eloc, etype,
- elen, 0);
- epos.offset += adsize;
- }
- }
+ if (etype == -1) {
+ /* We should extend the file? */
+ WARN_ON(byte_offset);
+ return;
+ }
+ epos.offset -= adsize;
+ extent_trunc(inode, &epos, &eloc, etype, elen, byte_offset);
+ epos.offset += adsize;
+ if (byte_offset)
+ lenalloc = epos.offset;
+ else
+ lenalloc = epos.offset - adsize;
- if (indirect_ext_len) {
- BUG_ON(!epos.bh);
- udf_free_blocks(sb, inode, &epos.block, 0,
- indirect_ext_len);
- } else if (!epos.bh) {
- iinfo->i_lenAlloc = lenalloc;
- mark_inode_dirty(inode);
- } else
- udf_update_alloc_ext_desc(inode, &epos, lenalloc);
- } else if (inode->i_size) {
- if (byte_offset) {
- struct kernel_long_ad extent;
+ if (!epos.bh)
+ lenalloc -= udf_file_entry_alloc_offset(inode);
+ else
+ lenalloc -= sizeof(struct allocExtDesc);
- /*
- * OK, there is not extent covering inode->i_size and
- * no extent above inode->i_size => truncate is
- * extending the file by 'offset' blocks.
- */
- if ((!epos.bh &&
- epos.offset ==
- udf_file_entry_alloc_offset(inode)) ||
- (epos.bh && epos.offset ==
- sizeof(struct allocExtDesc))) {
- /* File has no extents at all or has empty last
- * indirect extent! Create a fake extent... */
- extent.extLocation.logicalBlockNum = 0;
- extent.extLocation.partitionReferenceNum = 0;
- extent.extLength =
- EXT_NOT_RECORDED_NOT_ALLOCATED;
- } else {
- epos.offset -= adsize;
- etype = udf_next_aext(inode, &epos,
- &extent.extLocation,
- &extent.extLength, 0);
- extent.extLength |= etype << 30;
- }
- udf_extend_file(inode, &epos, &extent,
- offset +
- ((inode->i_size &
- (sb->s_blocksize - 1)) != 0));
+ while ((etype = udf_current_aext(inode, &epos, &eloc,
+ &elen, 0)) != -1) {
+ if (etype == (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
+ udf_write_aext(inode, &epos, &neloc, nelen, 0);
+ if (indirect_ext_len) {
+ /* We managed to free all extents in the
+ * indirect extent - free it too */
+ BUG_ON(!epos.bh);
+ udf_free_blocks(sb, inode, &epos.block,
+ 0, indirect_ext_len);
+ } else if (!epos.bh) {
+ iinfo->i_lenAlloc = lenalloc;
+ mark_inode_dirty(inode);
+ } else
+ udf_update_alloc_ext_desc(inode,
+ &epos, lenalloc);
+ brelse(epos.bh);
+ epos.offset = sizeof(struct allocExtDesc);
+ epos.block = eloc;
+ epos.bh = udf_tread(sb,
+ udf_get_lb_pblock(sb, &eloc, 0));
+ if (elen)
+ indirect_ext_len =
+ (elen + sb->s_blocksize - 1) >>
+ sb->s_blocksize_bits;
+ else
+ indirect_ext_len = 1;
+ } else {
+ extent_trunc(inode, &epos, &eloc, etype, elen, 0);
+ epos.offset += adsize;
}
}
+
+ if (indirect_ext_len) {
+ BUG_ON(!epos.bh);
+ udf_free_blocks(sb, inode, &epos.block, 0, indirect_ext_len);
+ } else if (!epos.bh) {
+ iinfo->i_lenAlloc = lenalloc;
+ mark_inode_dirty(inode);
+ } else
+ udf_update_alloc_ext_desc(inode, &epos, lenalloc);
iinfo->i_lenExtents = inode->i_size;
brelse(epos.bh);
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index eba48209f9f3..dbd52d4b5eed 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -136,22 +136,20 @@ extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *,
extern long udf_ioctl(struct file *, unsigned int, unsigned long);
/* inode.c */
extern struct inode *udf_iget(struct super_block *, struct kernel_lb_addr *);
-extern void udf_expand_file_adinicb(struct inode *, int, int *);
+extern int udf_expand_file_adinicb(struct inode *);
extern struct buffer_head *udf_expand_dir_adinicb(struct inode *, int *, int *);
extern struct buffer_head *udf_bread(struct inode *, int, int, int *);
-extern void udf_truncate(struct inode *);
+extern int udf_setsize(struct inode *, loff_t);
extern void udf_read_inode(struct inode *);
extern void udf_evict_inode(struct inode *);
extern int udf_write_inode(struct inode *, struct writeback_control *wbc);
extern long udf_block_map(struct inode *, sector_t);
-extern int udf_extend_file(struct inode *, struct extent_position *,
- struct kernel_long_ad *, sector_t);
extern int8_t inode_bmap(struct inode *, sector_t, struct extent_position *,
struct kernel_lb_addr *, uint32_t *, sector_t *);
-extern int8_t udf_add_aext(struct inode *, struct extent_position *,
+extern int udf_add_aext(struct inode *, struct extent_position *,
+ struct kernel_lb_addr *, uint32_t, int);
+extern void udf_write_aext(struct inode *, struct extent_position *,
struct kernel_lb_addr *, uint32_t, int);
-extern int8_t udf_write_aext(struct inode *, struct extent_position *,
- struct kernel_lb_addr *, uint32_t, int);
extern int8_t udf_delete_aext(struct inode *, struct extent_position,
struct kernel_lb_addr, uint32_t);
extern int8_t udf_next_aext(struct inode *, struct extent_position *,
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index ac1c7e8378dd..f83a4c830a65 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -2022,11 +2022,12 @@ xfs_buf_init(void)
if (!xfslogd_workqueue)
goto out_free_buf_zone;
- xfsdatad_workqueue = create_workqueue("xfsdatad");
+ xfsdatad_workqueue = alloc_workqueue("xfsdatad", WQ_MEM_RECLAIM, 1);
if (!xfsdatad_workqueue)
goto out_destroy_xfslogd_workqueue;
- xfsconvertd_workqueue = create_workqueue("xfsconvertd");
+ xfsconvertd_workqueue = alloc_workqueue("xfsconvertd",
+ WQ_MEM_RECLAIM, 1);
if (!xfsconvertd_workqueue)
goto out_destroy_xfsdatad_workqueue;
diff --git a/fs/xfs/linux-2.6/xfs_discard.c b/fs/xfs/linux-2.6/xfs_discard.c
index 05201ae719e5..d61611c88012 100644
--- a/fs/xfs/linux-2.6/xfs_discard.c
+++ b/fs/xfs/linux-2.6/xfs_discard.c
@@ -152,6 +152,8 @@ xfs_ioc_trim(
if (!capable(CAP_SYS_ADMIN))
return -XFS_ERROR(EPERM);
+ if (!blk_queue_discard(q))
+ return -XFS_ERROR(EOPNOTSUPP);
if (copy_from_user(&range, urange, sizeof(range)))
return -XFS_ERROR(EFAULT);
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index f5e2a19e0f8e..0ca0e3c024d7 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -695,14 +695,19 @@ xfs_ioc_fsgeometry_v1(
xfs_mount_t *mp,
void __user *arg)
{
- xfs_fsop_geom_v1_t fsgeo;
+ xfs_fsop_geom_t fsgeo;
int error;
- error = xfs_fs_geometry(mp, (xfs_fsop_geom_t *)&fsgeo, 3);
+ error = xfs_fs_geometry(mp, &fsgeo, 3);
if (error)
return -error;
- if (copy_to_user(arg, &fsgeo, sizeof(fsgeo)))
+ /*
+ * Caller should have passed an argument of type
+ * xfs_fsop_geom_v1_t. This is a proper subset of the
+ * xfs_fsop_geom_t that xfs_fs_geometry() fills in.
+ */
+ if (copy_to_user(arg, &fsgeo, sizeof(xfs_fsop_geom_v1_t)))
return -XFS_ERROR(EFAULT);
return 0;
}
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index bd5727852fd6..9ff7fc603d2f 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -102,7 +102,8 @@ xfs_mark_inode_dirty(
STATIC int
xfs_init_security(
struct inode *inode,
- struct inode *dir)
+ struct inode *dir,
+ const struct qstr *qstr)
{
struct xfs_inode *ip = XFS_I(inode);
size_t length;
@@ -110,7 +111,7 @@ xfs_init_security(
unsigned char *name;
int error;
- error = security_inode_init_security(inode, dir, (char **)&name,
+ error = security_inode_init_security(inode, dir, qstr, (char **)&name,
&value, &length);
if (error) {
if (error == -EOPNOTSUPP)
@@ -194,7 +195,7 @@ xfs_vn_mknod(
inode = VFS_I(ip);
- error = xfs_init_security(inode, dir);
+ error = xfs_init_security(inode, dir, &dentry->d_name);
if (unlikely(error))
goto out_cleanup_inode;
@@ -367,7 +368,7 @@ xfs_vn_symlink(
inode = VFS_I(cip);
- error = xfs_init_security(inode, dir);
+ error = xfs_init_security(inode, dir, &dentry->d_name);
if (unlikely(error))
goto out_cleanup_inode;
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 9731898083ae..7ec1fb8c1316 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -189,6 +189,7 @@ xfs_parseargs(
mp->m_flags |= XFS_MOUNT_BARRIER;
mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
+ mp->m_flags |= XFS_MOUNT_DELAYLOG;
/*
* These can be overridden by the mount option parsing.
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 206a2815ced6..f517963aec07 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -1230,13 +1230,6 @@ xfs_qm_qino_alloc(
}
/*
- * Keep an extra reference to this quota inode. This inode is
- * locked exclusively and joined to the transaction already.
- */
- ASSERT(xfs_isilocked(*ip, XFS_ILOCK_EXCL));
- IHOLD(*ip);
-
- /*
* Make the changes in the superblock, and log those too.
* sbfields arg may contain fields other than *QUOTINO;
* VERSIONNUM for example.
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index dc3afd7739ff..e7b441db0530 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -2365,6 +2365,13 @@ xfs_bmap_rtalloc(
*/
if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
+
+ /*
+ * Lock out other modifications to the RT bitmap inode.
+ */
+ xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin_ref(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
+
/*
* If it's an allocation to an empty file at offset 0,
* pick an extent that will space things out in the rt area.
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index cec89dd5d7d2..85668efb3e3e 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -53,6 +53,9 @@ xfs_fs_geometry(
xfs_fsop_geom_t *geo,
int new_version)
{
+
+ memset(geo, 0, sizeof(*geo));
+
geo->blocksize = mp->m_sb.sb_blocksize;
geo->rtextsize = mp->m_sb.sb_rextsize;
geo->agblocks = mp->m_sb.sb_agblocks;
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index be7cf625421f..c39278b6c871 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1016,8 +1016,8 @@ xfs_ialloc(
* This is because we're setting fields here we need
* to prevent others from looking at until we're done.
*/
- error = xfs_trans_iget(tp->t_mountp, tp, ino,
- XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
+ error = xfs_iget(tp->t_mountp, tp, ino, XFS_IGET_CREATE,
+ XFS_ILOCK_EXCL, &ip);
if (error)
return error;
ASSERT(ip != NULL);
@@ -1166,6 +1166,7 @@ xfs_ialloc(
/*
* Log the new values stuffed into the inode.
*/
+ xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_log_inode(tp, ip, flags);
/* now that we have an i_mode we can setup inode ops and unlock */
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 5c95fa8ec11d..f753200cef8d 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -409,28 +409,35 @@ static inline void xfs_ifunlock(xfs_inode_t *ip)
/*
* Flags for lockdep annotations.
*
- * XFS_I[O]LOCK_PARENT - for operations that require locking two inodes
- * (ie directory operations that require locking a directory inode and
- * an entry inode). The first inode gets locked with this flag so it
- * gets a lockdep subclass of 1 and the second lock will have a lockdep
- * subclass of 0.
+ * XFS_LOCK_PARENT - for directory operations that require locking a
+ * parent directory inode and a child entry inode. The parent gets locked
+ * with this flag so it gets a lockdep subclass of 1 and the child entry
+ * lock will have a lockdep subclass of 0.
+ *
+ * XFS_LOCK_RTBITMAP/XFS_LOCK_RTSUM - the realtime device bitmap and summary
+ * inodes do not participate in the normal lock order, and thus have their
+ * own subclasses.
*
* XFS_LOCK_INUMORDER - for locking several inodes at the some time
* with xfs_lock_inodes(). This flag is used as the starting subclass
* and each subsequent lock acquired will increment the subclass by one.
- * So the first lock acquired will have a lockdep subclass of 2, the
- * second lock will have a lockdep subclass of 3, and so on. It is
+ * So the first lock acquired will have a lockdep subclass of 4, the
+ * second lock will have a lockdep subclass of 5, and so on. It is
* the responsibility of the class builder to shift this to the correct
* portion of the lock_mode lockdep mask.
*/
#define XFS_LOCK_PARENT 1
-#define XFS_LOCK_INUMORDER 2
+#define XFS_LOCK_RTBITMAP 2
+#define XFS_LOCK_RTSUM 3
+#define XFS_LOCK_INUMORDER 4
#define XFS_IOLOCK_SHIFT 16
#define XFS_IOLOCK_PARENT (XFS_LOCK_PARENT << XFS_IOLOCK_SHIFT)
#define XFS_ILOCK_SHIFT 24
#define XFS_ILOCK_PARENT (XFS_LOCK_PARENT << XFS_ILOCK_SHIFT)
+#define XFS_ILOCK_RTBITMAP (XFS_LOCK_RTBITMAP << XFS_ILOCK_SHIFT)
+#define XFS_ILOCK_RTSUM (XFS_LOCK_RTSUM << XFS_ILOCK_SHIFT)
#define XFS_IOLOCK_DEP_MASK 0x00ff0000
#define XFS_ILOCK_DEP_MASK 0xff000000
diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c
index edfa178bafb6..4aff56395732 100644
--- a/fs/xfs/xfs_mru_cache.c
+++ b/fs/xfs/xfs_mru_cache.c
@@ -309,7 +309,7 @@ xfs_mru_cache_init(void)
if (!xfs_mru_elem_zone)
goto out;
- xfs_mru_reap_wq = create_singlethread_workqueue("xfs_mru_cache");
+ xfs_mru_reap_wq = alloc_workqueue("xfs_mru_cache", WQ_MEM_RECLAIM, 1);
if (!xfs_mru_reap_wq)
goto out_destroy_mru_elem_zone;
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 12a191385310..fbff89344bad 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -76,7 +76,7 @@ xfs_growfs_rt_alloc(
xfs_mount_t *mp, /* file system mount point */
xfs_extlen_t oblocks, /* old count of blocks */
xfs_extlen_t nblocks, /* new count of blocks */
- xfs_ino_t ino) /* inode number (bitmap/summary) */
+ xfs_inode_t *ip) /* inode (bitmap/summary) */
{
xfs_fileoff_t bno; /* block number in file */
xfs_buf_t *bp; /* temporary buffer for zeroing */
@@ -86,7 +86,6 @@ xfs_growfs_rt_alloc(
xfs_fsblock_t firstblock; /* first block allocated in xaction */
xfs_bmap_free_t flist; /* list of freed blocks */
xfs_fsblock_t fsbno; /* filesystem block for bno */
- xfs_inode_t *ip; /* pointer to incore inode */
xfs_bmbt_irec_t map; /* block map output */
int nmap; /* number of block maps */
int resblks; /* space reservation */
@@ -112,9 +111,9 @@ xfs_growfs_rt_alloc(
/*
* Lock the inode.
*/
- if ((error = xfs_trans_iget(mp, tp, ino, 0,
- XFS_ILOCK_EXCL, &ip)))
- goto error_cancel;
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL);
+
xfs_bmap_init(&flist, &firstblock);
/*
* Allocate blocks to the bitmap file.
@@ -155,9 +154,8 @@ xfs_growfs_rt_alloc(
/*
* Lock the bitmap inode.
*/
- if ((error = xfs_trans_iget(mp, tp, ino, 0,
- XFS_ILOCK_EXCL, &ip)))
- goto error_cancel;
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL);
/*
* Get a buffer for the block.
*/
@@ -1854,7 +1852,6 @@ xfs_growfs_rt(
xfs_rtblock_t bmbno; /* bitmap block number */
xfs_buf_t *bp; /* temporary buffer */
int error; /* error return value */
- xfs_inode_t *ip; /* bitmap inode, used as lock */
xfs_mount_t *nmp; /* new (fake) mount structure */
xfs_drfsbno_t nrblocks; /* new number of realtime blocks */
xfs_extlen_t nrbmblocks; /* new number of rt bitmap blocks */
@@ -1918,11 +1915,11 @@ xfs_growfs_rt(
/*
* Allocate space to the bitmap and summary files, as necessary.
*/
- if ((error = xfs_growfs_rt_alloc(mp, rbmblocks, nrbmblocks,
- mp->m_sb.sb_rbmino)))
+ error = xfs_growfs_rt_alloc(mp, rbmblocks, nrbmblocks, mp->m_rbmip);
+ if (error)
return error;
- if ((error = xfs_growfs_rt_alloc(mp, rsumblocks, nrsumblocks,
- mp->m_sb.sb_rsumino)))
+ error = xfs_growfs_rt_alloc(mp, rsumblocks, nrsumblocks, mp->m_rsumip);
+ if (error)
return error;
/*
* Allocate a new (fake) mount/sb.
@@ -1972,10 +1969,8 @@ xfs_growfs_rt(
/*
* Lock out other callers by grabbing the bitmap inode lock.
*/
- if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, 0,
- XFS_ILOCK_EXCL, &ip)))
- goto error_cancel;
- ASSERT(ip == mp->m_rbmip);
+ xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin_ref(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
/*
* Update the bitmap inode's size.
*/
@@ -1986,10 +1981,8 @@ xfs_growfs_rt(
/*
* Get the summary inode into the transaction.
*/
- if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rsumino, 0,
- XFS_ILOCK_EXCL, &ip)))
- goto error_cancel;
- ASSERT(ip == mp->m_rsumip);
+ xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin_ref(tp, mp->m_rsumip, XFS_ILOCK_EXCL);
/*
* Update the summary inode's size.
*/
@@ -2075,15 +2068,15 @@ xfs_rtallocate_extent(
xfs_extlen_t prod, /* extent product factor */
xfs_rtblock_t *rtblock) /* out: start block allocated */
{
+ xfs_mount_t *mp = tp->t_mountp;
int error; /* error value */
- xfs_inode_t *ip; /* inode for bitmap file */
- xfs_mount_t *mp; /* file system mount structure */
xfs_rtblock_t r; /* result allocated block */
xfs_fsblock_t sb; /* summary file block number */
xfs_buf_t *sumbp; /* summary file block buffer */
+ ASSERT(xfs_isilocked(mp->m_rbmip, XFS_ILOCK_EXCL));
ASSERT(minlen > 0 && minlen <= maxlen);
- mp = tp->t_mountp;
+
/*
* If prod is set then figure out what to do to minlen and maxlen.
*/
@@ -2099,12 +2092,7 @@ xfs_rtallocate_extent(
return 0;
}
}
- /*
- * Lock out other callers by grabbing the bitmap inode lock.
- */
- if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, 0,
- XFS_ILOCK_EXCL, &ip)))
- return error;
+
sumbp = NULL;
/*
* Allocate by size, or near another block, or exactly at some block.
@@ -2123,11 +2111,12 @@ xfs_rtallocate_extent(
len, &sumbp, &sb, prod, &r);
break;
default:
+ error = EIO;
ASSERT(0);
}
- if (error) {
+ if (error)
return error;
- }
+
/*
* If it worked, update the superblock.
*/
@@ -2155,7 +2144,6 @@ xfs_rtfree_extent(
xfs_extlen_t len) /* length of extent freed */
{
int error; /* error value */
- xfs_inode_t *ip; /* bitmap file inode */
xfs_mount_t *mp; /* file system mount structure */
xfs_fsblock_t sb; /* summary file block number */
xfs_buf_t *sumbp; /* summary file block buffer */
@@ -2164,9 +2152,9 @@ xfs_rtfree_extent(
/*
* Synchronize by locking the bitmap inode.
*/
- if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, 0,
- XFS_ILOCK_EXCL, &ip)))
- return error;
+ xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin_ref(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
+
#if defined(__KERNEL__) && defined(DEBUG)
/*
* Check to see that this whole range is currently allocated.
@@ -2199,10 +2187,10 @@ xfs_rtfree_extent(
*/
if (tp->t_frextents_delta + mp->m_sb.sb_frextents ==
mp->m_sb.sb_rextents) {
- if (!(ip->i_d.di_flags & XFS_DIFLAG_NEWRTBM))
- ip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM;
- *(__uint64_t *)&ip->i_d.di_atime = 0;
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+ if (!(mp->m_rbmip->i_d.di_flags & XFS_DIFLAG_NEWRTBM))
+ mp->m_rbmip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM;
+ *(__uint64_t *)&mp->m_rbmip->i_d.di_atime = 0;
+ xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE);
}
return 0;
}
@@ -2306,20 +2294,16 @@ xfs_rtpick_extent(
xfs_rtblock_t *pick) /* result rt extent */
{
xfs_rtblock_t b; /* result block */
- int error; /* error return value */
- xfs_inode_t *ip; /* bitmap incore inode */
int log2; /* log of sequence number */
__uint64_t resid; /* residual after log removed */
__uint64_t seq; /* sequence number of file creation */
__uint64_t *seqp; /* pointer to seqno in inode */
- if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, 0,
- XFS_ILOCK_EXCL, &ip)))
- return error;
- ASSERT(ip == mp->m_rbmip);
- seqp = (__uint64_t *)&ip->i_d.di_atime;
- if (!(ip->i_d.di_flags & XFS_DIFLAG_NEWRTBM)) {
- ip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM;
+ ASSERT(xfs_isilocked(mp->m_rbmip, XFS_ILOCK_EXCL));
+
+ seqp = (__uint64_t *)&mp->m_rbmip->i_d.di_atime;
+ if (!(mp->m_rbmip->i_d.di_flags & XFS_DIFLAG_NEWRTBM)) {
+ mp->m_rbmip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM;
*seqp = 0;
}
seq = *seqp;
@@ -2335,7 +2319,7 @@ xfs_rtpick_extent(
b = mp->m_sb.sb_rextents - len;
}
*seqp = seq + 1;
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+ xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE);
*pick = b;
return 0;
}
diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c
index 56861d5daaef..ccd3adf640ee 100644
--- a/fs/xfs/xfs_rw.c
+++ b/fs/xfs/xfs_rw.c
@@ -173,17 +173,9 @@ xfs_extlen_t
xfs_get_extsz_hint(
struct xfs_inode *ip)
{
- xfs_extlen_t extsz;
-
- if (unlikely(XFS_IS_REALTIME_INODE(ip))) {
- extsz = (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)
- ? ip->i_d.di_extsize
- : ip->i_mount->m_sb.sb_rextsize;
- ASSERT(extsz);
- } else {
- extsz = (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)
- ? ip->i_d.di_extsize : 0;
- }
-
- return extsz;
+ if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
+ return ip->i_d.di_extsize;
+ if (XFS_IS_REALTIME_INODE(ip))
+ return ip->i_mount->m_sb.sb_rextsize;
+ return 0;
}
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index c2042b736b81..06a9759b6352 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -469,8 +469,6 @@ void xfs_trans_inode_buf(xfs_trans_t *, struct xfs_buf *);
void xfs_trans_stale_inode_buf(xfs_trans_t *, struct xfs_buf *);
void xfs_trans_dquot_buf(xfs_trans_t *, struct xfs_buf *, uint);
void xfs_trans_inode_alloc_buf(xfs_trans_t *, struct xfs_buf *);
-int xfs_trans_iget(struct xfs_mount *, xfs_trans_t *,
- xfs_ino_t , uint, uint, struct xfs_inode **);
void xfs_trans_ichgtime(struct xfs_trans *, struct xfs_inode *, int);
void xfs_trans_ijoin_ref(struct xfs_trans *, struct xfs_inode *, uint);
void xfs_trans_ijoin(struct xfs_trans *, struct xfs_inode *);
diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c
index ccb34532768b..16084d8ea231 100644
--- a/fs/xfs/xfs_trans_inode.c
+++ b/fs/xfs/xfs_trans_inode.c
@@ -44,28 +44,6 @@ xfs_trans_inode_broot_debug(
#endif
/*
- * Get an inode and join it to the transaction.
- */
-int
-xfs_trans_iget(
- xfs_mount_t *mp,
- xfs_trans_t *tp,
- xfs_ino_t ino,
- uint flags,
- uint lock_flags,
- xfs_inode_t **ipp)
-{
- int error;
-
- error = xfs_iget(mp, tp, ino, flags, lock_flags, ipp);
- if (!error && tp) {
- xfs_trans_ijoin(tp, *ipp);
- (*ipp)->i_itemp->ili_lock_flags = lock_flags;
- }
- return error;
-}
-
-/*
* Add a locked inode to the transaction.
*
* The inode must be locked, and it cannot be associated with any transaction.
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index d8e6f8cd6f0c..258d4f98eb9b 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -1310,7 +1310,7 @@ xfs_create(
error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
if (error)
- goto std_return;
+ return error;
if (is_dir) {
rdev = 0;
@@ -1390,12 +1390,6 @@ xfs_create(
}
/*
- * At this point, we've gotten a newly allocated inode.
- * It is locked (and joined to the transaction).
- */
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
-
- /*
* Now we join the directory inode to the transaction. We do not do it
* earlier because xfs_dir_ialloc might commit the previous transaction
* (and release all the locks). An error from here on will result in
@@ -1440,22 +1434,13 @@ xfs_create(
*/
xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp);
- /*
- * xfs_trans_commit normally decrements the vnode ref count
- * when it unlocks the inode. Since we want to return the
- * vnode to the caller, we bump the vnode ref count now.
- */
- IHOLD(ip);
-
error = xfs_bmap_finish(&tp, &free_list, &committed);
if (error)
- goto out_abort_rele;
+ goto out_bmap_cancel;
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
- if (error) {
- IRELE(ip);
- goto out_dqrele;
- }
+ if (error)
+ goto out_release_inode;
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
@@ -1469,27 +1454,21 @@ xfs_create(
cancel_flags |= XFS_TRANS_ABORT;
out_trans_cancel:
xfs_trans_cancel(tp, cancel_flags);
- out_dqrele:
+ out_release_inode:
+ /*
+ * Wait until after the current transaction is aborted to
+ * release the inode. This prevents recursive transactions
+ * and deadlocks from xfs_inactive.
+ */
+ if (ip)
+ IRELE(ip);
+
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
if (unlock_dp_on_error)
xfs_iunlock(dp, XFS_ILOCK_EXCL);
- std_return:
return error;
-
- out_abort_rele:
- /*
- * Wait until after the current transaction is aborted to
- * release the inode. This prevents recursive transactions
- * and deadlocks from xfs_inactive.
- */
- xfs_bmap_cancel(&free_list);
- cancel_flags |= XFS_TRANS_ABORT;
- xfs_trans_cancel(tp, cancel_flags);
- IRELE(ip);
- unlock_dp_on_error = B_FALSE;
- goto out_dqrele;
}
#ifdef DEBUG
@@ -2114,9 +2093,8 @@ xfs_symlink(
XFS_BMAPI_WRITE | XFS_BMAPI_METADATA,
&first_block, resblks, mval, &nmaps,
&free_list);
- if (error) {
- goto error1;
- }
+ if (error)
+ goto error2;
if (resblks)
resblks -= fs_blocks;
@@ -2148,7 +2126,7 @@ xfs_symlink(
error = xfs_dir_createname(tp, dp, link_name, ip->i_ino,
&first_block, &free_list, resblks);
if (error)
- goto error1;
+ goto error2;
xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
@@ -2161,13 +2139,6 @@ xfs_symlink(
xfs_trans_set_sync(tp);
}
- /*
- * xfs_trans_commit normally decrements the vnode ref count
- * when it unlocks the inode. Since we want to return the
- * vnode to the caller, we bump the vnode ref count now.
- */
- IHOLD(ip);
-
error = xfs_bmap_finish(&tp, &free_list, &committed);
if (error) {
goto error2;
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 78ca429929f7..3a10ef5914eb 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -250,7 +250,6 @@ struct acpi_device_wakeup {
struct acpi_handle_list resources;
struct acpi_device_wakeup_flags flags;
int prepare_count;
- int run_wake_count;
};
/* Device */
@@ -381,7 +380,7 @@ struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle);
int acpi_enable_wakeup_device_power(struct acpi_device *dev, int state);
int acpi_disable_wakeup_device_power(struct acpi_device *dev);
-#ifdef CONFIG_PM_OPS
+#ifdef CONFIG_PM
int acpi_pm_device_sleep_state(struct device *, int *);
#else
static inline int acpi_pm_device_sleep_state(struct device *d, int *p)
diff --git a/include/acpi/apei.h b/include/acpi/apei.h
index c4dbb132d902..e67b523a50e1 100644
--- a/include/acpi/apei.h
+++ b/include/acpi/apei.h
@@ -30,10 +30,11 @@ int apei_hest_parse(apei_hest_func_t func, void *data);
int erst_write(const struct cper_record_header *record);
ssize_t erst_get_record_count(void);
-int erst_get_next_record_id(u64 *record_id);
+int erst_get_record_id_begin(int *pos);
+int erst_get_record_id_next(int *pos, u64 *record_id);
+void erst_get_record_id_end(void);
ssize_t erst_read(u64 record_id, struct cper_record_header *record,
size_t buflen);
-ssize_t erst_read_next(struct cper_record_header *record, size_t buflen);
int erst_clear(u64 record_id);
#endif
diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h
index 2bcc5c7c22a6..61e03dd7939e 100644
--- a/include/asm-generic/cputime.h
+++ b/include/asm-generic/cputime.h
@@ -30,6 +30,9 @@ typedef u64 cputime64_t;
#define cputime64_to_jiffies64(__ct) (__ct)
#define jiffies64_to_cputime64(__jif) (__jif)
#define cputime_to_cputime64(__ct) ((u64) __ct)
+#define cputime64_gt(__a, __b) ((__a) > (__b))
+
+#define nsecs_to_cputime64(__ct) nsecs_to_jiffies64(__ct)
/*
diff --git a/include/asm-generic/errno.h b/include/asm-generic/errno.h
index 28cc03bf19e6..a1331ce50445 100644
--- a/include/asm-generic/errno.h
+++ b/include/asm-generic/errno.h
@@ -108,4 +108,6 @@
#define ERFKILL 132 /* Operation not possible due to RF-kill */
+#define EHWPOISON 133 /* Memory page has hardware error */
+
#endif
diff --git a/include/asm-generic/ftrace.h b/include/asm-generic/ftrace.h
new file mode 100644
index 000000000000..51abba9ea7ad
--- /dev/null
+++ b/include/asm-generic/ftrace.h
@@ -0,0 +1,16 @@
+/*
+ * linux/include/asm-generic/ftrace.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_GENERIC_FTRACE_H__
+#define __ASM_GENERIC_FTRACE_H__
+
+/*
+ * Not all architectures need their own ftrace.h, the most
+ * common definitions are already in linux/ftrace.h.
+ */
+
+#endif /* __ASM_GENERIC_FTRACE_H__ */
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 4644c9a7f724..e0ffa3ddb02a 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -94,6 +94,10 @@ static inline void __raw_writeq(u64 b, volatile void __iomem *addr)
#define writeq(b,addr) __raw_writeq(__cpu_to_le64(b),addr)
#endif
+#ifndef PCI_IOBASE
+#define PCI_IOBASE ((void __iomem *) 0)
+#endif
+
/*****************************************************************************/
/*
* traditional input/output functions
@@ -101,32 +105,32 @@ static inline void __raw_writeq(u64 b, volatile void __iomem *addr)
static inline u8 inb(unsigned long addr)
{
- return readb((volatile void __iomem *) addr);
+ return readb(addr + PCI_IOBASE);
}
static inline u16 inw(unsigned long addr)
{
- return readw((volatile void __iomem *) addr);
+ return readw(addr + PCI_IOBASE);
}
static inline u32 inl(unsigned long addr)
{
- return readl((volatile void __iomem *) addr);
+ return readl(addr + PCI_IOBASE);
}
static inline void outb(u8 b, unsigned long addr)
{
- writeb(b, (volatile void __iomem *) addr);
+ writeb(b, addr + PCI_IOBASE);
}
static inline void outw(u16 b, unsigned long addr)
{
- writew(b, (volatile void __iomem *) addr);
+ writew(b, addr + PCI_IOBASE);
}
static inline void outl(u32 b, unsigned long addr)
{
- writel(b, (volatile void __iomem *) addr);
+ writel(b, addr + PCI_IOBASE);
}
#define inb_p(addr) inb(addr)
@@ -213,32 +217,32 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
static inline void readsl(const void __iomem *addr, void *buf, int len)
{
- insl((unsigned long)addr, buf, len);
+ insl(addr - PCI_IOBASE, buf, len);
}
static inline void readsw(const void __iomem *addr, void *buf, int len)
{
- insw((unsigned long)addr, buf, len);
+ insw(addr - PCI_IOBASE, buf, len);
}
static inline void readsb(const void __iomem *addr, void *buf, int len)
{
- insb((unsigned long)addr, buf, len);
+ insb(addr - PCI_IOBASE, buf, len);
}
static inline void writesl(const void __iomem *addr, const void *buf, int len)
{
- outsl((unsigned long)addr, buf, len);
+ outsl(addr - PCI_IOBASE, buf, len);
}
static inline void writesw(const void __iomem *addr, const void *buf, int len)
{
- outsw((unsigned long)addr, buf, len);
+ outsw(addr - PCI_IOBASE, buf, len);
}
static inline void writesb(const void __iomem *addr, const void *buf, int len)
{
- outsb((unsigned long)addr, buf, len);
+ outsb(addr - PCI_IOBASE, buf, len);
}
#ifndef CONFIG_GENERIC_IOMAP
@@ -269,8 +273,9 @@ static inline void writesb(const void __iomem *addr, const void *buf, int len)
outsl((unsigned long) (p), (src), (count))
#endif /* CONFIG_GENERIC_IOMAP */
-
-#define IO_SPACE_LIMIT 0xffffffff
+#ifndef IO_SPACE_LIMIT
+#define IO_SPACE_LIMIT 0xffff
+#endif
#ifdef __KERNEL__
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 31b6188df221..b4bfe338ea0e 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -4,6 +4,8 @@
#ifndef __ASSEMBLY__
#ifdef CONFIG_MMU
+#include <linux/mm_types.h>
+
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
extern int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
diff --git a/include/asm-generic/sizes.h b/include/asm-generic/sizes.h
new file mode 100644
index 000000000000..ea5d4ef81061
--- /dev/null
+++ b/include/asm-generic/sizes.h
@@ -0,0 +1,47 @@
+/*
+ * linux/include/asm-generic/sizes.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_GENERIC_SIZES_H__
+#define __ASM_GENERIC_SIZES_H__
+
+#define SZ_1 0x00000001
+#define SZ_2 0x00000002
+#define SZ_4 0x00000004
+#define SZ_8 0x00000008
+#define SZ_16 0x00000010
+#define SZ_32 0x00000020
+#define SZ_64 0x00000040
+#define SZ_128 0x00000080
+#define SZ_256 0x00000100
+#define SZ_512 0x00000200
+
+#define SZ_1K 0x00000400
+#define SZ_2K 0x00000800
+#define SZ_4K 0x00001000
+#define SZ_8K 0x00002000
+#define SZ_16K 0x00004000
+#define SZ_32K 0x00008000
+#define SZ_64K 0x00010000
+#define SZ_128K 0x00020000
+#define SZ_256K 0x00040000
+#define SZ_512K 0x00080000
+
+#define SZ_1M 0x00100000
+#define SZ_2M 0x00200000
+#define SZ_4M 0x00400000
+#define SZ_8M 0x00800000
+#define SZ_16M 0x01000000
+#define SZ_32M 0x02000000
+#define SZ_64M 0x04000000
+#define SZ_128M 0x08000000
+#define SZ_256M 0x10000000
+#define SZ_512M 0x20000000
+
+#define SZ_1G 0x40000000
+#define SZ_2G 0x80000000
+
+#endif /* __ASM_GENERIC_SIZES_H__ */
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index b218b8513d04..ac68c999b6c2 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -288,14 +288,16 @@ strncpy_from_user(char *dst, const char __user *src, long count)
*
* Return 0 on exception, a value greater than N if too long
*/
-#ifndef strnlen_user
+#ifndef __strnlen_user
+#define __strnlen_user strnlen
+#endif
+
static inline long strnlen_user(const char __user *src, long n)
{
if (!access_ok(VERIFY_READ, src, 1))
return 0;
- return strlen((void * __force)src) + 1;
+ return __strnlen_user(src, n);
}
-#endif
static inline long strlen_user(const char __user *src)
{
diff --git a/include/asm-generic/user.h b/include/asm-generic/user.h
index 8b9c3c960aeb..35638c34700f 100644
--- a/include/asm-generic/user.h
+++ b/include/asm-generic/user.h
@@ -1,8 +1,8 @@
#ifndef __ASM_GENERIC_USER_H
#define __ASM_GENERIC_USER_H
/*
- * This file may define a 'struct user' structure. However, it it only
- * used for a.out file, which are not supported on new architectures.
+ * This file may define a 'struct user' structure. However, it is only
+ * used for a.out files, which are not supported on new architectures.
*/
#endif /* __ASM_GENERIC_USER_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index fe77e3395b40..22d3342d6af4 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -15,7 +15,7 @@
* HEAD_TEXT_SECTION
* INIT_TEXT_SECTION(PAGE_SIZE)
* INIT_DATA_SECTION(...)
- * PERCPU(PAGE_SIZE)
+ * PERCPU(CACHELINE_SIZE, PAGE_SIZE)
* __init_end = .;
*
* _stext = .;
@@ -683,13 +683,18 @@
/**
* PERCPU_VADDR - define output section for percpu area
+ * @cacheline: cacheline size
* @vaddr: explicit base address (optional)
* @phdr: destination PHDR (optional)
*
- * Macro which expands to output section for percpu area. If @vaddr
- * is not blank, it specifies explicit base address and all percpu
- * symbols will be offset from the given address. If blank, @vaddr
- * always equals @laddr + LOAD_OFFSET.
+ * Macro which expands to output section for percpu area.
+ *
+ * @cacheline is used to align subsections to avoid false cacheline
+ * sharing between subsections for different purposes.
+ *
+ * If @vaddr is not blank, it specifies explicit base address and all
+ * percpu symbols will be offset from the given address. If blank,
+ * @vaddr always equals @laddr + LOAD_OFFSET.
*
* @phdr defines the output PHDR to use if not blank. Be warned that
* output PHDR is sticky. If @phdr is specified, the next output
@@ -700,7 +705,7 @@
* If there is no need to put the percpu section at a predetermined
* address, use PERCPU().
*/
-#define PERCPU_VADDR(vaddr, phdr) \
+#define PERCPU_VADDR(cacheline, vaddr, phdr) \
VMLINUX_SYMBOL(__per_cpu_load) = .; \
.data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
- LOAD_OFFSET) { \
@@ -708,7 +713,9 @@
*(.data..percpu..first) \
. = ALIGN(PAGE_SIZE); \
*(.data..percpu..page_aligned) \
+ . = ALIGN(cacheline); \
*(.data..percpu..readmostly) \
+ . = ALIGN(cacheline); \
*(.data..percpu) \
*(.data..percpu..shared_aligned) \
VMLINUX_SYMBOL(__per_cpu_end) = .; \
@@ -717,18 +724,18 @@
/**
* PERCPU - define output section for percpu area, simple version
+ * @cacheline: cacheline size
* @align: required alignment
*
- * Align to @align and outputs output section for percpu area. This
- * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and
+ * Align to @align and outputs output section for percpu area. This macro
+ * doesn't manipulate @vaddr or @phdr and __per_cpu_load and
* __per_cpu_start will be identical.
*
- * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except
- * that __per_cpu_load is defined as a relative symbol against
- * .data..percpu which is required for relocatable x86_32
- * configuration.
+ * This macro is equivalent to ALIGN(@align); PERCPU_VADDR(@cacheline,,)
+ * except that __per_cpu_load is defined as a relative symbol against
+ * .data..percpu which is required for relocatable x86_32 configuration.
*/
-#define PERCPU(align) \
+#define PERCPU(cacheline, align) \
. = ALIGN(align); \
.data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__per_cpu_load) = .; \
@@ -736,7 +743,9 @@
*(.data..percpu..first) \
. = ALIGN(PAGE_SIZE); \
*(.data..percpu..page_aligned) \
+ . = ALIGN(cacheline); \
*(.data..percpu..readmostly) \
+ . = ALIGN(cacheline); \
*(.data..percpu) \
*(.data..percpu..shared_aligned) \
VMLINUX_SYMBOL(__per_cpu_end) = .; \
diff --git a/include/drm/Kbuild b/include/drm/Kbuild
index ffec177f3481..3a60ac889520 100644
--- a/include/drm/Kbuild
+++ b/include/drm/Kbuild
@@ -2,7 +2,6 @@ header-y += drm.h
header-y += drm_mode.h
header-y += drm_sarea.h
header-y += i810_drm.h
-header-y += i830_drm.h
header-y += i915_drm.h
header-y += mga_drm.h
header-y += nouveau_drm.h
diff --git a/include/drm/drm.h b/include/drm/drm.h
index e5f70617dec5..8598cc94e169 100644
--- a/include/drm/drm.h
+++ b/include/drm/drm.h
@@ -701,6 +701,10 @@ struct drm_gem_open {
#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
+#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
+#define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb)
+#define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
+
/**
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x99.
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index fe29aadb129d..540880e7d3ee 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -145,7 +145,10 @@ extern void drm_ut_debug_printk(unsigned int request_level,
#define DRIVER_IRQ_VBL2 0x800
#define DRIVER_GEM 0x1000
#define DRIVER_MODESET 0x2000
-#define DRIVER_USE_PLATFORM_DEVICE 0x4000
+
+#define DRIVER_BUS_PCI 0x1
+#define DRIVER_BUS_PLATFORM 0x2
+#define DRIVER_BUS_USB 0x3
/***********************************************************************/
/** \name Begin the DRM... */
@@ -698,6 +701,19 @@ struct drm_master {
#define DRM_SCANOUTPOS_INVBL (1 << 1)
#define DRM_SCANOUTPOS_ACCURATE (1 << 2)
+struct drm_bus {
+ int bus_type;
+ int (*get_irq)(struct drm_device *dev);
+ const char *(*get_name)(struct drm_device *dev);
+ int (*set_busid)(struct drm_device *dev, struct drm_master *master);
+ int (*set_unique)(struct drm_device *dev, struct drm_master *master,
+ struct drm_unique *unique);
+ int (*irq_by_busid)(struct drm_device *dev, struct drm_irq_busid *p);
+ /* hooks that are for PCI */
+ int (*agp_init)(struct drm_device *dev);
+
+};
+
/**
* DRM driver structure. This structure represent the common code for
* a family of cards. There will one drm_device for each card present
@@ -880,6 +896,17 @@ struct drm_driver {
/* vga arb irq handler */
void (*vgaarb_irq)(struct drm_device *dev, bool state);
+ /* dumb alloc support */
+ int (*dumb_create)(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+ int (*dumb_map_offset)(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset);
+ int (*dumb_destroy)(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle);
+
/* Driver private ops for this object */
struct vm_operations_struct *gem_vm_ops;
@@ -895,8 +922,13 @@ struct drm_driver {
struct drm_ioctl_desc *ioctls;
int num_ioctls;
struct file_operations fops;
- struct pci_driver pci_driver;
- struct platform_device *platform_device;
+ union {
+ struct pci_driver *pci;
+ struct platform_device *platform_device;
+ struct usb_driver *usb;
+ } kdriver;
+ struct drm_bus *bus;
+
/* List of devices hanging off this driver */
struct list_head device_list;
};
@@ -1099,9 +1131,10 @@ struct drm_device {
#endif
struct platform_device *platformdev; /**< Platform device struture */
+ struct usb_device *usbdev;
struct drm_sg_mem *sg; /**< Scatter gather memory */
- int num_crtcs; /**< Number of CRTCs on this device */
+ unsigned int num_crtcs; /**< Number of CRTCs on this device */
void *dev_private; /**< device private data */
void *mm_private;
struct address_space *dev_mapping;
@@ -1136,28 +1169,9 @@ static __inline__ int drm_core_check_feature(struct drm_device *dev,
static inline int drm_dev_to_irq(struct drm_device *dev)
{
- if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE))
- return platform_get_irq(dev->platformdev, 0);
- else
- return dev->pdev->irq;
+ return dev->driver->bus->get_irq(dev);
}
-static inline int drm_get_pci_domain(struct drm_device *dev)
-{
- if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE))
- return 0;
-
-#ifndef __alpha__
- /* For historical reasons, drm_get_pci_domain() is busticated
- * on most archs and has to remain so for userspace interface
- * < 1.4, except on alpha which was right from the beginning
- */
- if (dev->if_version < 0x10004)
- return 0;
-#endif /* __alpha__ */
-
- return pci_domain_nr(dev->pdev->bus);
-}
#if __OS_HAS_AGP
static inline int drm_core_has_AGP(struct drm_device *dev)
@@ -1211,8 +1225,6 @@ static inline int drm_mtrr_del(int handle, unsigned long offset,
/*@{*/
/* Driver support (drm_drv.h) */
-extern int drm_init(struct drm_driver *driver);
-extern void drm_exit(struct drm_driver *driver);
extern long drm_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg);
extern long drm_compat_ioctl(struct file *filp,
@@ -1422,11 +1434,7 @@ extern int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
struct drm_master *drm_master_create(struct drm_minor *minor);
extern struct drm_master *drm_master_get(struct drm_master *master);
extern void drm_master_put(struct drm_master **master);
-extern int drm_get_pci_dev(struct pci_dev *pdev,
- const struct pci_device_id *ent,
- struct drm_driver *driver);
-extern int drm_get_platform_dev(struct platform_device *pdev,
- struct drm_driver *driver);
+
extern void drm_put_dev(struct drm_device *dev);
extern int drm_put_minor(struct drm_minor **minor);
extern unsigned int drm_debug;
@@ -1544,6 +1552,7 @@ drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
int drm_gem_handle_create(struct drm_file *file_priv,
struct drm_gem_object *obj,
u32 *handlep);
+int drm_gem_handle_delete(struct drm_file *filp, u32 handle);
static inline void
drm_gem_object_handle_reference(struct drm_gem_object *obj)
@@ -1616,11 +1625,21 @@ static __inline__ struct drm_local_map *drm_core_findmap(struct drm_device *dev,
return NULL;
}
-static __inline__ int drm_device_is_agp(struct drm_device *dev)
+static __inline__ void drm_core_dropmap(struct drm_local_map *map)
{
- if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE))
- return 0;
+}
+
+#include "drm_mem_util.h"
+
+extern int drm_fill_in_dev(struct drm_device *dev,
+ const struct pci_device_id *ent,
+ struct drm_driver *driver);
+int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type);
+/*@}*/
+/* PCI section */
+static __inline__ int drm_pci_device_is_agp(struct drm_device *dev)
+{
if (dev->driver->device_is_agp != NULL) {
int err = (*dev->driver->device_is_agp) (dev);
@@ -1632,35 +1651,26 @@ static __inline__ int drm_device_is_agp(struct drm_device *dev)
return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP);
}
-static __inline__ int drm_device_is_pcie(struct drm_device *dev)
-{
- if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE))
- return 0;
- else
- return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP);
-}
-static __inline__ void drm_core_dropmap(struct drm_local_map *map)
+static __inline__ int drm_pci_device_is_pcie(struct drm_device *dev)
{
+ return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP);
}
-#include "drm_mem_util.h"
-
-static inline void *drm_get_device(struct drm_device *dev)
-{
- if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE))
- return dev->platformdev;
- else
- return dev->pdev;
-}
-extern int drm_platform_init(struct drm_driver *driver);
-extern int drm_pci_init(struct drm_driver *driver);
-extern int drm_fill_in_dev(struct drm_device *dev,
+extern int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
+extern void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
+extern int drm_get_pci_dev(struct pci_dev *pdev,
const struct pci_device_id *ent,
struct drm_driver *driver);
-int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type);
-/*@}*/
+
+
+/* platform section */
+extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device);
+extern void drm_platform_exit(struct drm_driver *driver, struct platform_device *platform_device);
+
+extern int drm_get_platform_dev(struct platform_device *pdev,
+ struct drm_driver *driver);
#endif /* __KERNEL__ */
#endif
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 801be59f4f15..60edf9be31e5 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -659,7 +659,7 @@ extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid
extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode);
extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
- struct drm_display_mode *mode);
+ const struct drm_display_mode *mode);
extern void drm_mode_debug_printmodeline(struct drm_display_mode *mode);
extern void drm_mode_config_init(struct drm_device *dev);
extern void drm_mode_config_reset(struct drm_device *dev);
@@ -685,8 +685,8 @@ extern void drm_mode_validate_size(struct drm_device *dev,
extern void drm_mode_prune_invalid(struct drm_device *dev,
struct list_head *mode_list, bool verbose);
extern void drm_mode_sort(struct list_head *mode_list);
-extern int drm_mode_hsync(struct drm_display_mode *mode);
-extern int drm_mode_vrefresh(struct drm_display_mode *mode);
+extern int drm_mode_hsync(const struct drm_display_mode *mode);
+extern int drm_mode_vrefresh(const struct drm_display_mode *mode);
extern void drm_mode_set_crtcinfo(struct drm_display_mode *p,
int adjust_flags);
extern void drm_mode_connector_list_update(struct drm_connector *connector);
@@ -798,4 +798,11 @@ extern int drm_add_modes_noedid(struct drm_connector *connector,
extern bool drm_edid_is_valid(struct edid *edid);
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh);
+
+extern int drm_mode_create_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
#endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_hashtab.h b/include/drm/drm_hashtab.h
index 0af087a4d3b3..3650d5d011ee 100644
--- a/include/drm/drm_hashtab.h
+++ b/include/drm/drm_hashtab.h
@@ -45,14 +45,10 @@ struct drm_hash_item {
};
struct drm_open_hash {
- unsigned int size;
- unsigned int order;
- unsigned int fill;
struct hlist_head *table;
- int use_vmalloc;
+ u8 order;
};
-
extern int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
extern int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item);
extern int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index e39177778601..b1e7809e5e15 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -42,23 +42,25 @@
#endif
struct drm_mm_node {
- struct list_head free_stack;
struct list_head node_list;
- unsigned free : 1;
+ struct list_head hole_stack;
+ unsigned hole_follows : 1;
unsigned scanned_block : 1;
unsigned scanned_prev_free : 1;
unsigned scanned_next_free : 1;
+ unsigned scanned_preceeds_hole : 1;
+ unsigned allocated : 1;
unsigned long start;
unsigned long size;
struct drm_mm *mm;
};
struct drm_mm {
- /* List of free memory blocks, most recently freed ordered. */
- struct list_head free_stack;
- /* List of all memory nodes, ordered according to the (increasing) start
- * address of the memory node. */
- struct list_head node_list;
+ /* List of all memory nodes that immediatly preceed a free hole. */
+ struct list_head hole_stack;
+ /* head_node.node_list is the list of all memory nodes, ordered
+ * according to the (increasing) start address of the memory node. */
+ struct drm_mm_node head_node;
struct list_head unused_nodes;
int num_unused;
spinlock_t unused_lock;
@@ -70,8 +72,28 @@ struct drm_mm {
unsigned scanned_blocks;
unsigned long scan_start;
unsigned long scan_end;
+ struct drm_mm_node *prev_scanned_node;
};
+static inline bool drm_mm_node_allocated(struct drm_mm_node *node)
+{
+ return node->allocated;
+}
+
+static inline bool drm_mm_initialized(struct drm_mm *mm)
+{
+ return mm->hole_stack.next;
+}
+#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
+ &(mm)->head_node.node_list, \
+ node_list);
+#define drm_mm_for_each_scanned_node_reverse(entry, n, mm) \
+ for (entry = (mm)->prev_scanned_node, \
+ next = entry ? list_entry(entry->node_list.next, \
+ struct drm_mm_node, node_list) : NULL; \
+ entry != NULL; entry = next, \
+ next = entry ? list_entry(entry->node_list.next, \
+ struct drm_mm_node, node_list) : NULL) \
/*
* Basic range manager support (drm_mm.c)
*/
@@ -118,7 +140,15 @@ static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
return drm_mm_get_block_range_generic(parent, size, alignment,
start, end, 1);
}
+extern int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment);
+extern int drm_mm_insert_node_in_range(struct drm_mm *mm,
+ struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long start, unsigned long end);
extern void drm_mm_put_block(struct drm_mm_node *cur);
+extern void drm_mm_remove_node(struct drm_mm_node *node);
+extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
@@ -134,11 +164,6 @@ extern int drm_mm_init(struct drm_mm *mm, unsigned long start,
unsigned long size);
extern void drm_mm_takedown(struct drm_mm *mm);
extern int drm_mm_clean(struct drm_mm *mm);
-extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
-extern int drm_mm_remove_space_from_tail(struct drm_mm *mm,
- unsigned long size);
-extern int drm_mm_add_space_to_tail(struct drm_mm *mm,
- unsigned long size, int atomic);
extern int drm_mm_pre_get(struct drm_mm *mm);
static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
index 0fc7397c8f1f..ae6b7a3dbec7 100644
--- a/include/drm/drm_mode.h
+++ b/include/drm/drm_mode.h
@@ -344,4 +344,33 @@ struct drm_mode_crtc_page_flip {
__u64 user_data;
};
+/* create a dumb scanout buffer */
+struct drm_mode_create_dumb {
+ uint32_t height;
+ uint32_t width;
+ uint32_t bpp;
+ uint32_t flags;
+ /* handle, pitch, size will be returned */
+ uint32_t handle;
+ uint32_t pitch;
+ uint64_t size;
+};
+
+/* set up for mmap of a dumb scanout buffer */
+struct drm_mode_map_dumb {
+ /** Handle for the object being mapped. */
+ __u32 handle;
+ __u32 pad;
+ /**
+ * Fake offset to use for subsequent mmap call
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 offset;
+};
+
+struct drm_mode_destroy_dumb {
+ uint32_t handle;
+};
+
#endif
diff --git a/include/drm/drm_usb.h b/include/drm/drm_usb.h
new file mode 100644
index 000000000000..33506c11da8b
--- /dev/null
+++ b/include/drm/drm_usb.h
@@ -0,0 +1,15 @@
+#ifndef DRM_USB_H
+#define DRM_USB_H
+
+#include <drmP.h>
+
+#include <linux/usb.h>
+
+extern int drm_usb_init(struct drm_driver *driver, struct usb_driver *udriver);
+extern void drm_usb_exit(struct drm_driver *driver, struct usb_driver *udriver);
+
+int drm_get_usb_dev(struct usb_interface *interface,
+ const struct usb_device_id *id,
+ struct drm_driver *driver);
+
+#endif
diff --git a/include/drm/i830_drm.h b/include/drm/i830_drm.h
deleted file mode 100644
index 61315c29b8f3..000000000000
--- a/include/drm/i830_drm.h
+++ /dev/null
@@ -1,342 +0,0 @@
-#ifndef _I830_DRM_H_
-#define _I830_DRM_H_
-
-/* WARNING: These defines must be the same as what the Xserver uses.
- * if you change them, you must change the defines in the Xserver.
- *
- * KW: Actually, you can't ever change them because doing so would
- * break backwards compatibility.
- */
-
-#ifndef _I830_DEFINES_
-#define _I830_DEFINES_
-
-#define I830_DMA_BUF_ORDER 12
-#define I830_DMA_BUF_SZ (1<<I830_DMA_BUF_ORDER)
-#define I830_DMA_BUF_NR 256
-#define I830_NR_SAREA_CLIPRECTS 8
-
-/* Each region is a minimum of 64k, and there are at most 64 of them.
- */
-#define I830_NR_TEX_REGIONS 64
-#define I830_LOG_MIN_TEX_REGION_SIZE 16
-
-/* KW: These aren't correct but someone set them to two and then
- * released the module. Now we can't change them as doing so would
- * break backwards compatibility.
- */
-#define I830_TEXTURE_COUNT 2
-#define I830_TEXBLEND_COUNT I830_TEXTURE_COUNT
-
-#define I830_TEXBLEND_SIZE 12 /* (4 args + op) * 2 + COLOR_FACTOR */
-
-#define I830_UPLOAD_CTX 0x1
-#define I830_UPLOAD_BUFFERS 0x2
-#define I830_UPLOAD_CLIPRECTS 0x4
-#define I830_UPLOAD_TEX0_IMAGE 0x100 /* handled clientside */
-#define I830_UPLOAD_TEX0_CUBE 0x200 /* handled clientside */
-#define I830_UPLOAD_TEX1_IMAGE 0x400 /* handled clientside */
-#define I830_UPLOAD_TEX1_CUBE 0x800 /* handled clientside */
-#define I830_UPLOAD_TEX2_IMAGE 0x1000 /* handled clientside */
-#define I830_UPLOAD_TEX2_CUBE 0x2000 /* handled clientside */
-#define I830_UPLOAD_TEX3_IMAGE 0x4000 /* handled clientside */
-#define I830_UPLOAD_TEX3_CUBE 0x8000 /* handled clientside */
-#define I830_UPLOAD_TEX_N_IMAGE(n) (0x100 << (n * 2))
-#define I830_UPLOAD_TEX_N_CUBE(n) (0x200 << (n * 2))
-#define I830_UPLOAD_TEXIMAGE_MASK 0xff00
-#define I830_UPLOAD_TEX0 0x10000
-#define I830_UPLOAD_TEX1 0x20000
-#define I830_UPLOAD_TEX2 0x40000
-#define I830_UPLOAD_TEX3 0x80000
-#define I830_UPLOAD_TEX_N(n) (0x10000 << (n))
-#define I830_UPLOAD_TEX_MASK 0xf0000
-#define I830_UPLOAD_TEXBLEND0 0x100000
-#define I830_UPLOAD_TEXBLEND1 0x200000
-#define I830_UPLOAD_TEXBLEND2 0x400000
-#define I830_UPLOAD_TEXBLEND3 0x800000
-#define I830_UPLOAD_TEXBLEND_N(n) (0x100000 << (n))
-#define I830_UPLOAD_TEXBLEND_MASK 0xf00000
-#define I830_UPLOAD_TEX_PALETTE_N(n) (0x1000000 << (n))
-#define I830_UPLOAD_TEX_PALETTE_SHARED 0x4000000
-#define I830_UPLOAD_STIPPLE 0x8000000
-
-/* Indices into buf.Setup where various bits of state are mirrored per
- * context and per buffer. These can be fired at the card as a unit,
- * or in a piecewise fashion as required.
- */
-
-/* Destbuffer state
- * - backbuffer linear offset and pitch -- invarient in the current dri
- * - zbuffer linear offset and pitch -- also invarient
- * - drawing origin in back and depth buffers.
- *
- * Keep the depth/back buffer state here to accommodate private buffers
- * in the future.
- */
-
-#define I830_DESTREG_CBUFADDR 0
-#define I830_DESTREG_DBUFADDR 1
-#define I830_DESTREG_DV0 2
-#define I830_DESTREG_DV1 3
-#define I830_DESTREG_SENABLE 4
-#define I830_DESTREG_SR0 5
-#define I830_DESTREG_SR1 6
-#define I830_DESTREG_SR2 7
-#define I830_DESTREG_DR0 8
-#define I830_DESTREG_DR1 9
-#define I830_DESTREG_DR2 10
-#define I830_DESTREG_DR3 11
-#define I830_DESTREG_DR4 12
-#define I830_DEST_SETUP_SIZE 13
-
-/* Context state
- */
-#define I830_CTXREG_STATE1 0
-#define I830_CTXREG_STATE2 1
-#define I830_CTXREG_STATE3 2
-#define I830_CTXREG_STATE4 3
-#define I830_CTXREG_STATE5 4
-#define I830_CTXREG_IALPHAB 5
-#define I830_CTXREG_STENCILTST 6
-#define I830_CTXREG_ENABLES_1 7
-#define I830_CTXREG_ENABLES_2 8
-#define I830_CTXREG_AA 9
-#define I830_CTXREG_FOGCOLOR 10
-#define I830_CTXREG_BLENDCOLR0 11
-#define I830_CTXREG_BLENDCOLR 12 /* Dword 1 of 2 dword command */
-#define I830_CTXREG_VF 13
-#define I830_CTXREG_VF2 14
-#define I830_CTXREG_MCSB0 15
-#define I830_CTXREG_MCSB1 16
-#define I830_CTX_SETUP_SIZE 17
-
-/* 1.3: Stipple state
- */
-#define I830_STPREG_ST0 0
-#define I830_STPREG_ST1 1
-#define I830_STP_SETUP_SIZE 2
-
-/* Texture state (per tex unit)
- */
-
-#define I830_TEXREG_MI0 0 /* GFX_OP_MAP_INFO (6 dwords) */
-#define I830_TEXREG_MI1 1
-#define I830_TEXREG_MI2 2
-#define I830_TEXREG_MI3 3
-#define I830_TEXREG_MI4 4
-#define I830_TEXREG_MI5 5
-#define I830_TEXREG_MF 6 /* GFX_OP_MAP_FILTER */
-#define I830_TEXREG_MLC 7 /* GFX_OP_MAP_LOD_CTL */
-#define I830_TEXREG_MLL 8 /* GFX_OP_MAP_LOD_LIMITS */
-#define I830_TEXREG_MCS 9 /* GFX_OP_MAP_COORD_SETS */
-#define I830_TEX_SETUP_SIZE 10
-
-#define I830_TEXREG_TM0LI 0 /* load immediate 2 texture map n */
-#define I830_TEXREG_TM0S0 1
-#define I830_TEXREG_TM0S1 2
-#define I830_TEXREG_TM0S2 3
-#define I830_TEXREG_TM0S3 4
-#define I830_TEXREG_TM0S4 5
-#define I830_TEXREG_NOP0 6 /* noop */
-#define I830_TEXREG_NOP1 7 /* noop */
-#define I830_TEXREG_NOP2 8 /* noop */
-#define __I830_TEXREG_MCS 9 /* GFX_OP_MAP_COORD_SETS -- shared */
-#define __I830_TEX_SETUP_SIZE 10
-
-#define I830_FRONT 0x1
-#define I830_BACK 0x2
-#define I830_DEPTH 0x4
-
-#endif /* _I830_DEFINES_ */
-
-typedef struct _drm_i830_init {
- enum {
- I830_INIT_DMA = 0x01,
- I830_CLEANUP_DMA = 0x02
- } func;
- unsigned int mmio_offset;
- unsigned int buffers_offset;
- int sarea_priv_offset;
- unsigned int ring_start;
- unsigned int ring_end;
- unsigned int ring_size;
- unsigned int front_offset;
- unsigned int back_offset;
- unsigned int depth_offset;
- unsigned int w;
- unsigned int h;
- unsigned int pitch;
- unsigned int pitch_bits;
- unsigned int back_pitch;
- unsigned int depth_pitch;
- unsigned int cpp;
-} drm_i830_init_t;
-
-/* Warning: If you change the SAREA structure you must change the Xserver
- * structure as well */
-
-typedef struct _drm_i830_tex_region {
- unsigned char next, prev; /* indices to form a circular LRU */
- unsigned char in_use; /* owned by a client, or free? */
- int age; /* tracked by clients to update local LRU's */
-} drm_i830_tex_region_t;
-
-typedef struct _drm_i830_sarea {
- unsigned int ContextState[I830_CTX_SETUP_SIZE];
- unsigned int BufferState[I830_DEST_SETUP_SIZE];
- unsigned int TexState[I830_TEXTURE_COUNT][I830_TEX_SETUP_SIZE];
- unsigned int TexBlendState[I830_TEXBLEND_COUNT][I830_TEXBLEND_SIZE];
- unsigned int TexBlendStateWordsUsed[I830_TEXBLEND_COUNT];
- unsigned int Palette[2][256];
- unsigned int dirty;
-
- unsigned int nbox;
- struct drm_clip_rect boxes[I830_NR_SAREA_CLIPRECTS];
-
- /* Maintain an LRU of contiguous regions of texture space. If
- * you think you own a region of texture memory, and it has an
- * age different to the one you set, then you are mistaken and
- * it has been stolen by another client. If global texAge
- * hasn't changed, there is no need to walk the list.
- *
- * These regions can be used as a proxy for the fine-grained
- * texture information of other clients - by maintaining them
- * in the same lru which is used to age their own textures,
- * clients have an approximate lru for the whole of global
- * texture space, and can make informed decisions as to which
- * areas to kick out. There is no need to choose whether to
- * kick out your own texture or someone else's - simply eject
- * them all in LRU order.
- */
-
- drm_i830_tex_region_t texList[I830_NR_TEX_REGIONS + 1];
- /* Last elt is sentinal */
- int texAge; /* last time texture was uploaded */
- int last_enqueue; /* last time a buffer was enqueued */
- int last_dispatch; /* age of the most recently dispatched buffer */
- int last_quiescent; /* */
- int ctxOwner; /* last context to upload state */
-
- int vertex_prim;
-
- int pf_enabled; /* is pageflipping allowed? */
- int pf_active;
- int pf_current_page; /* which buffer is being displayed? */
-
- int perf_boxes; /* performance boxes to be displayed */
-
- /* Here's the state for texunits 2,3:
- */
- unsigned int TexState2[I830_TEX_SETUP_SIZE];
- unsigned int TexBlendState2[I830_TEXBLEND_SIZE];
- unsigned int TexBlendStateWordsUsed2;
-
- unsigned int TexState3[I830_TEX_SETUP_SIZE];
- unsigned int TexBlendState3[I830_TEXBLEND_SIZE];
- unsigned int TexBlendStateWordsUsed3;
-
- unsigned int StippleState[I830_STP_SETUP_SIZE];
-} drm_i830_sarea_t;
-
-/* Flags for perf_boxes
- */
-#define I830_BOX_RING_EMPTY 0x1 /* populated by kernel */
-#define I830_BOX_FLIP 0x2 /* populated by kernel */
-#define I830_BOX_WAIT 0x4 /* populated by kernel & client */
-#define I830_BOX_TEXTURE_LOAD 0x8 /* populated by kernel */
-#define I830_BOX_LOST_CONTEXT 0x10 /* populated by client */
-
-/* I830 specific ioctls
- * The device specific ioctl range is 0x40 to 0x79.
- */
-#define DRM_I830_INIT 0x00
-#define DRM_I830_VERTEX 0x01
-#define DRM_I830_CLEAR 0x02
-#define DRM_I830_FLUSH 0x03
-#define DRM_I830_GETAGE 0x04
-#define DRM_I830_GETBUF 0x05
-#define DRM_I830_SWAP 0x06
-#define DRM_I830_COPY 0x07
-#define DRM_I830_DOCOPY 0x08
-#define DRM_I830_FLIP 0x09
-#define DRM_I830_IRQ_EMIT 0x0a
-#define DRM_I830_IRQ_WAIT 0x0b
-#define DRM_I830_GETPARAM 0x0c
-#define DRM_I830_SETPARAM 0x0d
-
-#define DRM_IOCTL_I830_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I830_INIT, drm_i830_init_t)
-#define DRM_IOCTL_I830_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_I830_VERTEX, drm_i830_vertex_t)
-#define DRM_IOCTL_I830_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_I830_CLEAR, drm_i830_clear_t)
-#define DRM_IOCTL_I830_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I830_FLUSH)
-#define DRM_IOCTL_I830_GETAGE DRM_IO ( DRM_COMMAND_BASE + DRM_I830_GETAGE)
-#define DRM_IOCTL_I830_GETBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_GETBUF, drm_i830_dma_t)
-#define DRM_IOCTL_I830_SWAP DRM_IO ( DRM_COMMAND_BASE + DRM_I830_SWAP)
-#define DRM_IOCTL_I830_COPY DRM_IOW( DRM_COMMAND_BASE + DRM_I830_COPY, drm_i830_copy_t)
-#define DRM_IOCTL_I830_DOCOPY DRM_IO ( DRM_COMMAND_BASE + DRM_I830_DOCOPY)
-#define DRM_IOCTL_I830_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I830_FLIP)
-#define DRM_IOCTL_I830_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_IRQ_EMIT, drm_i830_irq_emit_t)
-#define DRM_IOCTL_I830_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I830_IRQ_WAIT, drm_i830_irq_wait_t)
-#define DRM_IOCTL_I830_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_GETPARAM, drm_i830_getparam_t)
-#define DRM_IOCTL_I830_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_SETPARAM, drm_i830_setparam_t)
-
-typedef struct _drm_i830_clear {
- int clear_color;
- int clear_depth;
- int flags;
- unsigned int clear_colormask;
- unsigned int clear_depthmask;
-} drm_i830_clear_t;
-
-/* These may be placeholders if we have more cliprects than
- * I830_NR_SAREA_CLIPRECTS. In that case, the client sets discard to
- * false, indicating that the buffer will be dispatched again with a
- * new set of cliprects.
- */
-typedef struct _drm_i830_vertex {
- int idx; /* buffer index */
- int used; /* nr bytes in use */
- int discard; /* client is finished with the buffer? */
-} drm_i830_vertex_t;
-
-typedef struct _drm_i830_copy_t {
- int idx; /* buffer index */
- int used; /* nr bytes in use */
- void __user *address; /* Address to copy from */
-} drm_i830_copy_t;
-
-typedef struct drm_i830_dma {
- void __user *virtual;
- int request_idx;
- int request_size;
- int granted;
-} drm_i830_dma_t;
-
-/* 1.3: Userspace can request & wait on irq's:
- */
-typedef struct drm_i830_irq_emit {
- int __user *irq_seq;
-} drm_i830_irq_emit_t;
-
-typedef struct drm_i830_irq_wait {
- int irq_seq;
-} drm_i830_irq_wait_t;
-
-/* 1.3: New ioctl to query kernel params:
- */
-#define I830_PARAM_IRQ_ACTIVE 1
-
-typedef struct drm_i830_getparam {
- int param;
- int __user *value;
-} drm_i830_getparam_t;
-
-/* 1.3: New ioctl to set kernel params:
- */
-#define I830_SETPARAM_USE_MI_BATCHBUFFER_START 1
-
-typedef struct drm_i830_setparam {
- int param;
- int value;
-} drm_i830_setparam_t;
-
-#endif /* _I830_DRM_H_ */
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h
index e2cfe80f6fca..5edd3a76fffa 100644
--- a/include/drm/nouveau_drm.h
+++ b/include/drm/nouveau_drm.h
@@ -94,6 +94,7 @@ struct drm_nouveau_setparam {
#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3)
+#define NOUVEAU_GEM_TILE_COMP 0x00030000 /* nv50-only */
#define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00
#define NOUVEAU_GEM_TILE_16BPP 0x00000001
#define NOUVEAU_GEM_TILE_32BPP 0x00000002
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index e5c607a02d57..3dec41cf8342 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -908,6 +908,7 @@ struct drm_radeon_cs {
#define RADEON_INFO_WANT_HYPERZ 0x07
#define RADEON_INFO_WANT_CMASK 0x08 /* get access to CMASK on r300 */
#define RADEON_INFO_CLOCK_CRYSTAL_FREQ 0x09 /* clock crystal frequency */
+#define RADEON_INFO_NUM_BACKENDS 0x0a /* DB/backends for r600+ - need for OQ */
struct drm_radeon_info {
uint32_t request;
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 1da8af6ac884..efed0820d9fa 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -50,13 +50,15 @@ struct ttm_backend_func {
* @pages: Array of pointers to ttm pages.
* @dummy_read_page: Page to be used instead of NULL pages in the
* array @pages.
+ * @dma_addrs: Array of DMA (bus) address of the ttm pages.
*
* Populate the backend with ttm pages. Depending on the backend,
* it may or may not copy the @pages array.
*/
int (*populate) (struct ttm_backend *backend,
unsigned long num_pages, struct page **pages,
- struct page *dummy_read_page);
+ struct page *dummy_read_page,
+ dma_addr_t *dma_addrs);
/**
* struct ttm_backend_func member clear
*
@@ -149,6 +151,7 @@ enum ttm_caching_state {
* @swap_storage: Pointer to shmem struct file for swap storage.
* @caching_state: The current caching state of the pages.
* @state: The current binding state of the pages.
+ * @dma_address: The DMA (bus) addresses of the pages (if TTM_PAGE_FLAG_DMA32)
*
* This is a structure holding the pages, caching- and aperture binding
* status for a buffer object that isn't backed by fixed (VRAM / AGP)
@@ -173,6 +176,7 @@ struct ttm_tt {
tt_unbound,
tt_unpopulated,
} state;
+ dma_addr_t *dma_address;
};
#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
index 116821448c38..8062890f725e 100644
--- a/include/drm/ttm/ttm_page_alloc.h
+++ b/include/drm/ttm/ttm_page_alloc.h
@@ -36,11 +36,13 @@
* @flags: ttm flags for page allocation.
* @cstate: ttm caching state for the page.
* @count: number of pages to allocate.
+ * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
*/
int ttm_get_pages(struct list_head *pages,
int flags,
enum ttm_caching_state cstate,
- unsigned count);
+ unsigned count,
+ dma_addr_t *dma_address);
/**
* Put linked list of pages to pool.
*
@@ -49,11 +51,13 @@ int ttm_get_pages(struct list_head *pages,
* count.
* @flags: ttm flags for page allocation.
* @cstate: ttm caching state.
+ * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
*/
void ttm_put_pages(struct list_head *pages,
unsigned page_count,
int flags,
- enum ttm_caching_state cstate);
+ enum ttm_caching_state cstate,
+ dma_addr_t *dma_address);
/**
* Initialize pool allocator.
*/
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index b0ada6f37dd6..525a565c2f7e 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -196,6 +196,7 @@ header-y += inet_diag.h
header-y += inotify.h
header-y += input.h
header-y += ioctl.h
+header-y += ioq.h
header-y += ip.h
header-y += ip6_tunnel.h
header-y += ip_vs.h
@@ -330,6 +331,7 @@ header-y += serial_core.h
header-y += serial_reg.h
header-y += serio.h
header-y += shm.h
+header-y += shm_signal.h
header-y += signal.h
header-y += signalfd.h
header-y += snmp.h
@@ -370,6 +372,8 @@ header-y += unistd.h
header-y += usbdevice_fs.h
header-y += utime.h
header-y += utsname.h
+header-y += vbus_pci.h
+header-y += venet.h
header-y += veth.h
header-y += vhost.h
header-y += videodev2.h
diff --git a/include/linux/acpi_io.h b/include/linux/acpi_io.h
index 7180013a4a3a..4afd7102459d 100644
--- a/include/linux/acpi_io.h
+++ b/include/linux/acpi_io.h
@@ -10,7 +10,6 @@ static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
return ioremap_cache(phys, size);
}
-int acpi_os_map_generic_address(struct acpi_generic_address *addr);
-void acpi_os_unmap_generic_address(struct acpi_generic_address *addr);
+void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size);
#endif
diff --git a/include/linux/aer.h b/include/linux/aer.h
index f7df1eefc107..8414de22a779 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -7,6 +7,28 @@
#ifndef _AER_H_
#define _AER_H_
+struct aer_header_log_regs {
+ unsigned int dw0;
+ unsigned int dw1;
+ unsigned int dw2;
+ unsigned int dw3;
+};
+
+struct aer_capability_regs {
+ u32 header;
+ u32 uncor_status;
+ u32 uncor_mask;
+ u32 uncor_severity;
+ u32 cor_status;
+ u32 cor_mask;
+ u32 cap_control;
+ struct aer_header_log_regs header_log;
+ u32 root_command;
+ u32 root_status;
+ u16 cor_err_source;
+ u16 uncor_err_source;
+};
+
#if defined(CONFIG_PCIEAER)
/* pci-e port driver needs this function to enable aer */
extern int pci_enable_pcie_error_reporting(struct pci_dev *dev);
@@ -27,5 +49,7 @@ static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
}
#endif
+extern void cper_print_aer(const char *prefix, int cper_severity,
+ struct aer_capability_regs *aer);
#endif //_AER_H_
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 9e7f259346e1..fcbbe71a3cc1 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -43,12 +43,12 @@ struct amba_id {
struct amba_driver {
struct device_driver drv;
- int (*probe)(struct amba_device *, struct amba_id *);
+ int (*probe)(struct amba_device *, const struct amba_id *);
int (*remove)(struct amba_device *);
void (*shutdown)(struct amba_device *);
int (*suspend)(struct amba_device *, pm_message_t);
int (*resume)(struct amba_device *);
- struct amba_id *id_table;
+ const struct amba_id *id_table;
};
enum amba_vendor {
@@ -56,6 +56,10 @@ enum amba_vendor {
AMBA_VENDOR_ST = 0x80,
};
+extern struct bus_type amba_bustype;
+
+#define to_amba_device(d) container_of(d, struct amba_device, dev)
+
#define amba_get_drvdata(d) dev_get_drvdata(&d->dev)
#define amba_set_drvdata(d,p) dev_set_drvdata(&d->dev, p)
diff --git a/include/linux/amba/clcd.h b/include/linux/amba/clcd.h
index be33b3affc8a..24d26efd1432 100644
--- a/include/linux/amba/clcd.h
+++ b/include/linux/amba/clcd.h
@@ -53,6 +53,7 @@
#define CNTL_LCDBPP8 (3 << 1)
#define CNTL_LCDBPP16 (4 << 1)
#define CNTL_LCDBPP16_565 (6 << 1)
+#define CNTL_LCDBPP16_444 (7 << 1)
#define CNTL_LCDBPP24 (5 << 1)
#define CNTL_LCDBW (1 << 4)
#define CNTL_LCDTFT (1 << 5)
@@ -66,6 +67,32 @@
#define CNTL_LDMAFIFOTIME (1 << 15)
#define CNTL_WATERMARK (1 << 16)
+enum {
+ /* individual formats */
+ CLCD_CAP_RGB444 = (1 << 0),
+ CLCD_CAP_RGB5551 = (1 << 1),
+ CLCD_CAP_RGB565 = (1 << 2),
+ CLCD_CAP_RGB888 = (1 << 3),
+ CLCD_CAP_BGR444 = (1 << 4),
+ CLCD_CAP_BGR5551 = (1 << 5),
+ CLCD_CAP_BGR565 = (1 << 6),
+ CLCD_CAP_BGR888 = (1 << 7),
+
+ /* connection layouts */
+ CLCD_CAP_444 = CLCD_CAP_RGB444 | CLCD_CAP_BGR444,
+ CLCD_CAP_5551 = CLCD_CAP_RGB5551 | CLCD_CAP_BGR5551,
+ CLCD_CAP_565 = CLCD_CAP_RGB565 | CLCD_CAP_BGR565,
+ CLCD_CAP_888 = CLCD_CAP_RGB888 | CLCD_CAP_BGR888,
+
+ /* red/blue ordering */
+ CLCD_CAP_RGB = CLCD_CAP_RGB444 | CLCD_CAP_RGB5551 |
+ CLCD_CAP_RGB565 | CLCD_CAP_RGB888,
+ CLCD_CAP_BGR = CLCD_CAP_BGR444 | CLCD_CAP_BGR5551 |
+ CLCD_CAP_BGR565 | CLCD_CAP_BGR888,
+
+ CLCD_CAP_ALL = CLCD_CAP_BGR | CLCD_CAP_RGB,
+};
+
struct clcd_panel {
struct fb_videomode mode;
signed short width; /* width in mm */
@@ -73,6 +100,7 @@ struct clcd_panel {
u32 tim2;
u32 tim3;
u32 cntl;
+ u32 caps;
unsigned int bpp:8,
fixedtimings:1,
grayscale:1;
@@ -97,6 +125,11 @@ struct clcd_board {
const char *name;
/*
+ * Optional. Hardware capability flags.
+ */
+ u32 caps;
+
+ /*
* Optional. Check whether the var structure is acceptable
* for this display.
*/
@@ -155,34 +188,35 @@ struct clcd_fb {
static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs)
{
+ struct fb_var_screeninfo *var = &fb->fb.var;
u32 val, cpl;
/*
* Program the CLCD controller registers and start the CLCD
*/
- val = ((fb->fb.var.xres / 16) - 1) << 2;
- val |= (fb->fb.var.hsync_len - 1) << 8;
- val |= (fb->fb.var.right_margin - 1) << 16;
- val |= (fb->fb.var.left_margin - 1) << 24;
+ val = ((var->xres / 16) - 1) << 2;
+ val |= (var->hsync_len - 1) << 8;
+ val |= (var->right_margin - 1) << 16;
+ val |= (var->left_margin - 1) << 24;
regs->tim0 = val;
- val = fb->fb.var.yres;
+ val = var->yres;
if (fb->panel->cntl & CNTL_LCDDUAL)
val /= 2;
val -= 1;
- val |= (fb->fb.var.vsync_len - 1) << 10;
- val |= fb->fb.var.lower_margin << 16;
- val |= fb->fb.var.upper_margin << 24;
+ val |= (var->vsync_len - 1) << 10;
+ val |= var->lower_margin << 16;
+ val |= var->upper_margin << 24;
regs->tim1 = val;
val = fb->panel->tim2;
- val |= fb->fb.var.sync & FB_SYNC_HOR_HIGH_ACT ? 0 : TIM2_IHS;
- val |= fb->fb.var.sync & FB_SYNC_VERT_HIGH_ACT ? 0 : TIM2_IVS;
+ val |= var->sync & FB_SYNC_HOR_HIGH_ACT ? 0 : TIM2_IHS;
+ val |= var->sync & FB_SYNC_VERT_HIGH_ACT ? 0 : TIM2_IVS;
- cpl = fb->fb.var.xres_virtual;
+ cpl = var->xres_virtual;
if (fb->panel->cntl & CNTL_LCDTFT) /* TFT */
/* / 1 */;
- else if (!fb->fb.var.grayscale) /* STN color */
+ else if (!var->grayscale) /* STN color */
cpl = cpl * 8 / 3;
else if (fb->panel->cntl & CNTL_LCDMONO8) /* STN monochrome, 8bit */
cpl /= 8;
@@ -194,10 +228,22 @@ static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs)
regs->tim3 = fb->panel->tim3;
val = fb->panel->cntl;
- if (fb->fb.var.grayscale)
+ if (var->grayscale)
val |= CNTL_LCDBW;
- switch (fb->fb.var.bits_per_pixel) {
+ if (fb->panel->caps && fb->board->caps &&
+ var->bits_per_pixel >= 16) {
+ /*
+ * if board and panel supply capabilities, we can support
+ * changing BGR/RGB depending on supplied parameters
+ */
+ if (var->red.offset == 0)
+ val &= ~CNTL_BGR;
+ else
+ val |= CNTL_BGR;
+ }
+
+ switch (var->bits_per_pixel) {
case 1:
val |= CNTL_LCDBPP1;
break;
@@ -212,15 +258,17 @@ static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs)
break;
case 16:
/*
- * PL110 cannot choose between 5551 and 565 modes in
- * its control register
+ * PL110 cannot choose between 5551 and 565 modes in its
+ * control register. It is possible to use 565 with
+ * custom external wiring.
*/
- if ((fb->dev->periphid & 0x000fffff) == 0x00041110)
+ if (amba_part(fb->dev) == 0x110 ||
+ var->green.length == 5)
val |= CNTL_LCDBPP16;
- else if (fb->fb.var.green.length == 5)
- val |= CNTL_LCDBPP16;
- else
+ else if (var->green.length == 6)
val |= CNTL_LCDBPP16_565;
+ else
+ val |= CNTL_LCDBPP16_444;
break;
case 32:
val |= CNTL_LCDBPP24;
@@ -228,7 +276,7 @@ static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs)
}
regs->cntl = val;
- regs->pixclock = fb->fb.var.pixclock;
+ regs->pixclock = var->pixclock;
}
static inline int clcdfb_check(struct clcd_fb *fb, struct fb_var_screeninfo *var)
diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h
index f4ee9acc9721..f60227088b7b 100644
--- a/include/linux/amba/mmci.h
+++ b/include/linux/amba/mmci.h
@@ -6,6 +6,9 @@
#include <linux/mmc/host.h>
+/* Just some dummy forwarding */
+struct dma_chan;
+
/**
* struct mmci_platform_data - platform configuration for the MMCI
* (also known as PL180) block.
@@ -27,6 +30,17 @@
* @cd_invert: true if the gpio_cd pin value is active low
* @capabilities: the capabilities of the block as implemented in
* this platform, signify anything MMC_CAP_* from mmc/host.h
+ * @dma_filter: function used to select an apropriate RX and TX
+ * DMA channel to be used for DMA, if and only if you're deploying the
+ * generic DMA engine
+ * @dma_rx_param: parameter passed to the DMA allocation
+ * filter in order to select an apropriate RX channel. If
+ * there is a bidirectional RX+TX channel, then just specify
+ * this and leave dma_tx_param set to NULL
+ * @dma_tx_param: parameter passed to the DMA allocation
+ * filter in order to select an apropriate TX channel. If this
+ * is NULL the driver will attempt to use the RX channel as a
+ * bidirectional channel
*/
struct mmci_platform_data {
unsigned int f_max;
@@ -38,6 +52,9 @@ struct mmci_platform_data {
int gpio_cd;
bool cd_invert;
unsigned long capabilities;
+ bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
+ void *dma_rx_param;
+ void *dma_tx_param;
};
#endif
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 359df0487690..9d339eb27881 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -103,6 +103,8 @@
#define AUDIT_BPRM_FCAPS 1321 /* Information about fcaps increasing perms */
#define AUDIT_CAPSET 1322 /* Record showing argument to sys_capset */
#define AUDIT_MMAP 1323 /* Record showing descriptor and flags in mmap */
+#define AUDIT_NETFILTER_PKT 1324 /* Packets traversing netfilter chains */
+#define AUDIT_NETFILTER_CFG 1325 /* Netfilter chain modifications */
#define AUDIT_AVC 1400 /* SE Linux avc denial or grant */
#define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index daf8c480c786..109226994d75 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -142,6 +142,7 @@ extern void bitmap_release_region(unsigned long *bitmap, int pos, int order);
extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order);
extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits);
+#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG))
#define BITMAP_LAST_WORD_MASK(nbits) \
( \
((nbits) % BITS_PER_LONG) ? \
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 46ad5197537a..dddedfc0af81 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -148,6 +148,7 @@ enum rq_flag_bits {
__REQ_ALLOCED, /* request came from our alloc pool */
__REQ_COPY_USER, /* contains copies of user pages */
__REQ_FLUSH, /* request for cache flush */
+ __REQ_FLUSH_SEQ, /* request for flush sequence */
__REQ_IO_STAT, /* account I/O stat */
__REQ_MIXED_MERGE, /* merge of different types, fail separately */
__REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */
@@ -188,6 +189,7 @@ enum rq_flag_bits {
#define REQ_ALLOCED (1 << __REQ_ALLOCED)
#define REQ_COPY_USER (1 << __REQ_COPY_USER)
#define REQ_FLUSH (1 << __REQ_FLUSH)
+#define REQ_FLUSH_SEQ (1 << __REQ_FLUSH_SEQ)
#define REQ_IO_STAT (1 << __REQ_IO_STAT)
#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE)
#define REQ_SECURE (1 << __REQ_SECURE)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4d18ff34670a..e3ee74fc5903 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -108,11 +108,17 @@ struct request {
/*
* Three pointers are available for the IO schedulers, if they need
- * more they have to dynamically allocate it.
+ * more they have to dynamically allocate it. Flush requests are
+ * never put on the IO scheduler. So let the flush fields share
+ * space with the three elevator_private pointers.
*/
- void *elevator_private;
- void *elevator_private2;
- void *elevator_private3;
+ union {
+ void *elevator_private[3];
+ struct {
+ unsigned int seq;
+ struct list_head list;
+ } flush;
+ };
struct gendisk *rq_disk;
struct hd_struct *part;
@@ -363,11 +369,12 @@ struct request_queue
* for flush operations
*/
unsigned int flush_flags;
- unsigned int flush_seq;
- int flush_err;
+ unsigned int flush_pending_idx:1;
+ unsigned int flush_running_idx:1;
+ unsigned long flush_pending_since;
+ struct list_head flush_queue[2];
+ struct list_head flush_data_in_flight;
struct request flush_rq;
- struct request *orig_flush_rq;
- struct list_head pending_flushes;
struct mutex sysfs_lock;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index ce104e33cd22..e654fa239916 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -474,7 +474,8 @@ struct cgroup_subsys {
struct cgroup *old_cgrp, struct task_struct *tsk,
bool threadgroup);
void (*fork)(struct cgroup_subsys *ss, struct task_struct *task);
- void (*exit)(struct cgroup_subsys *ss, struct task_struct *task);
+ void (*exit)(struct cgroup_subsys *ss, struct cgroup *cgrp,
+ struct cgroup *old_cgrp, struct task_struct *task);
int (*populate)(struct cgroup_subsys *ss,
struct cgroup *cgrp);
void (*post_clone)(struct cgroup_subsys *ss, struct cgroup *cgrp);
@@ -626,6 +627,7 @@ bool css_is_ancestor(struct cgroup_subsys_state *cg,
/* Get id and depth of css */
unsigned short css_id(struct cgroup_subsys_state *css);
unsigned short css_depth(struct cgroup_subsys_state *css);
+struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id);
#else /* !CONFIG_CGROUPS */
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index ccefff02b6cb..cdbfcb8780ec 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -65,4 +65,8 @@ SUBSYS(net_cls)
SUBSYS(blkio)
#endif
+#ifdef CONFIG_CGROUP_PERF
+SUBSYS(perf)
+#endif
+
/* */
diff --git a/include/linux/cper.h b/include/linux/cper.h
index 3104aaff5dd0..372a25839fd1 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -388,5 +388,7 @@ struct cper_sec_pcie {
#pragma pack()
u64 cper_next_record_id(void);
+void cper_print_bits(const char *prefix, unsigned int bits,
+ const char *strs[], unsigned int strs_size);
#endif
diff --git a/include/linux/cpu_rmap.h b/include/linux/cpu_rmap.h
new file mode 100644
index 000000000000..473771a528c0
--- /dev/null
+++ b/include/linux/cpu_rmap.h
@@ -0,0 +1,73 @@
+/*
+ * cpu_rmap.c: CPU affinity reverse-map support
+ * Copyright 2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/cpumask.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+
+/**
+ * struct cpu_rmap - CPU affinity reverse-map
+ * @size: Number of objects to be reverse-mapped
+ * @used: Number of objects added
+ * @obj: Pointer to array of object pointers
+ * @near: For each CPU, the index and distance to the nearest object,
+ * based on affinity masks
+ */
+struct cpu_rmap {
+ u16 size, used;
+ void **obj;
+ struct {
+ u16 index;
+ u16 dist;
+ } near[0];
+};
+#define CPU_RMAP_DIST_INF 0xffff
+
+extern struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags);
+
+/**
+ * free_cpu_rmap - free CPU affinity reverse-map
+ * @rmap: Reverse-map allocated with alloc_cpu_rmap(), or %NULL
+ */
+static inline void free_cpu_rmap(struct cpu_rmap *rmap)
+{
+ kfree(rmap);
+}
+
+extern int cpu_rmap_add(struct cpu_rmap *rmap, void *obj);
+extern int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
+ const struct cpumask *affinity);
+
+static inline u16 cpu_rmap_lookup_index(struct cpu_rmap *rmap, unsigned int cpu)
+{
+ return rmap->near[cpu].index;
+}
+
+static inline void *cpu_rmap_lookup_obj(struct cpu_rmap *rmap, unsigned int cpu)
+{
+ return rmap->obj[rmap->near[cpu].index];
+}
+
+#ifdef CONFIG_GENERIC_HARDIRQS
+
+/**
+ * alloc_irq_cpu_rmap - allocate CPU affinity reverse-map for IRQs
+ * @size: Number of objects to be mapped
+ *
+ * Must be called in process context.
+ */
+static inline struct cpu_rmap *alloc_irq_cpu_rmap(unsigned int size)
+{
+ return alloc_cpu_rmap(size, GFP_KERNEL);
+}
+extern void free_irq_cpu_rmap(struct cpu_rmap *rmap);
+
+extern int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq);
+
+#endif
diff --git a/include/linux/dcbnl.h b/include/linux/dcbnl.h
index 68cd248f6d3e..4c5b26e0cc48 100644
--- a/include/linux/dcbnl.h
+++ b/include/linux/dcbnl.h
@@ -25,6 +25,11 @@
/* IEEE 802.1Qaz std supported values */
#define IEEE_8021QAZ_MAX_TCS 8
+#define IEEE_8021QAZ_TSA_STRICT 0
+#define IEEE_8021QAZ_TSA_CB_SHABER 1
+#define IEEE_8021QAZ_TSA_ETS 2
+#define IEEE_8021QAZ_TSA_VENDOR 255
+
/* This structure contains the IEEE 802.1Qaz ETS managed object
*
* @willing: willing bit in ETS configuratin TLV
@@ -101,8 +106,8 @@ struct ieee_pfc {
*/
struct dcb_app {
__u8 selector;
- __u32 protocol;
__u8 priority;
+ __u16 protocol;
};
struct dcbmsg {
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 010e2d87ed75..d638e85dc501 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -279,8 +279,6 @@ enum dccp_state {
DCCP_MAX_STATES
};
-#define DCCP_STATE_MASK 0x1f
-
enum {
DCCPF_OPEN = TCPF_ESTABLISHED,
DCCPF_REQUESTING = TCPF_SYN_SENT,
diff --git a/include/linux/device.h b/include/linux/device.h
index 1bf5cf0b4513..144ec135875f 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -128,9 +128,7 @@ struct device_driver {
bool suppress_bind_attrs; /* disables bind/unbind via sysfs */
-#if defined(CONFIG_OF)
const struct of_device_id *of_match_table;
-#endif
int (*probe) (struct device *dev);
int (*remove) (struct device *dev);
@@ -422,6 +420,7 @@ struct device {
void *platform_data; /* Platform specific data, device
core doesn't touch it */
struct dev_pm_info power;
+ struct dev_power_domain *pwr_domain;
#ifdef CONFIG_NUMA
int numa_node; /* NUMA node this device is close to */
@@ -441,9 +440,9 @@ struct device {
override */
/* arch specific additions */
struct dev_archdata archdata;
-#ifdef CONFIG_OF
- struct device_node *of_node;
-#endif
+
+ struct device_node *of_node; /* associated device tree node */
+ const struct of_device_id *of_match; /* matching of_device_id from driver */
dev_t devt; /* dev_t, creates the sysfs "dev" */
diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h
index 78bbf47bbb96..3708455ee6c3 100644
--- a/include/linux/dm-ioctl.h
+++ b/include/linux/dm-ioctl.h
@@ -267,9 +267,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 19
-#define DM_VERSION_PATCHLEVEL 1
-#define DM_VERSION_EXTRA "-ioctl (2011-01-07)"
+#define DM_VERSION_MINOR 20
+#define DM_VERSION_PATCHLEVEL 0
+#define DM_VERSION_EXTRA "-ioctl (2011-02-02)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
@@ -328,4 +328,10 @@ enum {
*/
#define DM_UUID_FLAG (1 << 14) /* In */
+/*
+ * If set, all buffers are wiped after use. Use when sending
+ * or requesting sensitive data such as an encryption key.
+ */
+#define DM_SECURE_DATA_FLAG (1 << 15) /* In */
+
#endif /* _LINUX_DM_IOCTL_H */
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h
index c8aad713a046..deec66b37180 100644
--- a/include/linux/dw_dmac.h
+++ b/include/linux/dw_dmac.h
@@ -16,9 +16,12 @@
/**
* struct dw_dma_platform_data - Controller configuration parameters
* @nr_channels: Number of channels supported by hardware (max 8)
+ * @is_private: The device channels should be marked as private and not for
+ * by the general purpose DMA channel allocator.
*/
struct dw_dma_platform_data {
unsigned int nr_channels;
+ bool is_private;
};
/**
@@ -52,6 +55,8 @@ struct dw_dma_slave {
enum dw_dma_slave_width reg_width;
u32 cfg_hi;
u32 cfg_lo;
+ int src_master;
+ int dst_master;
};
/* Platform-configurable bits in CFG_HI */
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 4d857973d2c9..39b68edb388d 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -167,6 +167,7 @@ extern struct request *elv_rb_find(struct rb_root *, sector_t);
#define ELEVATOR_INSERT_BACK 2
#define ELEVATOR_INSERT_SORT 3
#define ELEVATOR_INSERT_REQUEUE 4
+#define ELEVATOR_INSERT_FLUSH 5
/*
* return values from elevator_may_queue_fn
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 1908929204a9..aac3e2eeb4fd 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -251,6 +251,7 @@ enum ethtool_stringset {
ETH_SS_STATS,
ETH_SS_PRIV_FLAGS,
ETH_SS_NTUPLE_FILTERS,
+ ETH_SS_FEATURES,
};
/* for passing string sets for data tagging */
@@ -523,6 +524,92 @@ struct ethtool_flash {
char data[ETHTOOL_FLASH_MAX_FILENAME];
};
+/* for returning and changing feature sets */
+
+/**
+ * struct ethtool_get_features_block - block with state of 32 features
+ * @available: mask of changeable features
+ * @requested: mask of features requested to be enabled if possible
+ * @active: mask of currently enabled features
+ * @never_changed: mask of features not changeable for any device
+ */
+struct ethtool_get_features_block {
+ __u32 available;
+ __u32 requested;
+ __u32 active;
+ __u32 never_changed;
+};
+
+/**
+ * struct ethtool_gfeatures - command to get state of device's features
+ * @cmd: command number = %ETHTOOL_GFEATURES
+ * @size: in: number of elements in the features[] array;
+ * out: number of elements in features[] needed to hold all features
+ * @features: state of features
+ */
+struct ethtool_gfeatures {
+ __u32 cmd;
+ __u32 size;
+ struct ethtool_get_features_block features[0];
+};
+
+/**
+ * struct ethtool_set_features_block - block with request for 32 features
+ * @valid: mask of features to be changed
+ * @requested: values of features to be changed
+ */
+struct ethtool_set_features_block {
+ __u32 valid;
+ __u32 requested;
+};
+
+/**
+ * struct ethtool_sfeatures - command to request change in device's features
+ * @cmd: command number = %ETHTOOL_SFEATURES
+ * @size: array size of the features[] array
+ * @features: feature change masks
+ */
+struct ethtool_sfeatures {
+ __u32 cmd;
+ __u32 size;
+ struct ethtool_set_features_block features[0];
+};
+
+/*
+ * %ETHTOOL_SFEATURES changes features present in features[].valid to the
+ * values of corresponding bits in features[].requested. Bits in .requested
+ * not set in .valid or not changeable are ignored.
+ *
+ * Returns %EINVAL when .valid contains undefined or never-changable bits
+ * or size is not equal to required number of features words (32-bit blocks).
+ * Returns >= 0 if request was completed; bits set in the value mean:
+ * %ETHTOOL_F_UNSUPPORTED - there were bits set in .valid that are not
+ * changeable (not present in %ETHTOOL_GFEATURES' features[].available)
+ * those bits were ignored.
+ * %ETHTOOL_F_WISH - some or all changes requested were recorded but the
+ * resulting state of bits masked by .valid is not equal to .requested.
+ * Probably there are other device-specific constraints on some features
+ * in the set. When %ETHTOOL_F_UNSUPPORTED is set, .valid is considered
+ * here as though ignored bits were cleared.
+ * %ETHTOOL_F_COMPAT - some or all changes requested were made by calling
+ * compatibility functions. Requested offload state cannot be properly
+ * managed by kernel.
+ *
+ * Meaning of bits in the masks are obtained by %ETHTOOL_GSSET_INFO (number of
+ * bits in the arrays - always multiple of 32) and %ETHTOOL_GSTRINGS commands
+ * for ETH_SS_FEATURES string set. First entry in the table corresponds to least
+ * significant bit in features[0] fields. Empty strings mark undefined features.
+ */
+enum ethtool_sfeatures_retval_bits {
+ ETHTOOL_F_UNSUPPORTED__BIT,
+ ETHTOOL_F_WISH__BIT,
+ ETHTOOL_F_COMPAT__BIT,
+};
+
+#define ETHTOOL_F_UNSUPPORTED (1 << ETHTOOL_F_UNSUPPORTED__BIT)
+#define ETHTOOL_F_WISH (1 << ETHTOOL_F_WISH__BIT)
+#define ETHTOOL_F_COMPAT (1 << ETHTOOL_F_COMPAT__BIT)
+
#ifdef __KERNEL__
#include <linux/rculist.h>
@@ -543,7 +630,6 @@ struct net_device;
/* Some generic methods drivers may use in their ethtool_ops */
u32 ethtool_op_get_link(struct net_device *dev);
-u32 ethtool_op_get_rx_csum(struct net_device *dev);
u32 ethtool_op_get_tx_csum(struct net_device *dev);
int ethtool_op_set_tx_csum(struct net_device *dev, u32 data);
int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data);
@@ -744,6 +830,9 @@ struct ethtool_ops {
#define ETHTOOL_GRXFHINDIR 0x00000038 /* Get RX flow hash indir'n table */
#define ETHTOOL_SRXFHINDIR 0x00000039 /* Set RX flow hash indir'n table */
+#define ETHTOOL_GFEATURES 0x0000003a /* Get device offload settings */
+#define ETHTOOL_SFEATURES 0x0000003b /* Change device offload settings */
+
/* compatibility with older code */
#define SPARC_ETH_GSET ETHTOOL_GSET
#define SPARC_ETH_SSET ETHTOOL_SSET
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index 65990ef612f5..6043c64c207a 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -884,7 +884,8 @@ extern int ext3fs_dirhash(const char *name, int len, struct
dx_hash_info *hinfo);
/* ialloc.c */
-extern struct inode * ext3_new_inode (handle_t *, struct inode *, int);
+extern struct inode * ext3_new_inode (handle_t *, struct inode *,
+ const struct qstr *, int);
extern void ext3_free_inode (handle_t *, struct inode *);
extern struct inode * ext3_orphan_get (struct super_block *, unsigned long);
extern unsigned long ext3_count_free_inodes (struct super_block *);
diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h
index 6c6133f76e16..b5fac2ba4a07 100644
--- a/include/linux/fanotify.h
+++ b/include/linux/fanotify.h
@@ -36,9 +36,12 @@
#define FAN_UNLIMITED_QUEUE 0x00000010
#define FAN_UNLIMITED_MARKS 0x00000020
+/* Attempt read-only open if read-write failed. */
+#define FAN_READONLY_FALLBACK 0x00000040
+
#define FAN_ALL_INIT_FLAGS (FAN_CLOEXEC | FAN_NONBLOCK | \
FAN_ALL_CLASS_BITS | FAN_UNLIMITED_QUEUE |\
- FAN_UNLIMITED_MARKS)
+ FAN_UNLIMITED_MARKS | FAN_READONLY_FALLBACK)
/* flags used for fanotify_modify_mark() */
#define FAN_MARK_ADD 0x00000001
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 68ba85a00c06..b2a36391d2a1 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -152,6 +152,8 @@
#define FB_ACCEL_PROSAVAGE_DDR 0x8d /* S3 ProSavage DDR */
#define FB_ACCEL_PROSAVAGE_DDRK 0x8e /* S3 ProSavage DDR-K */
+#define FB_ACCEL_PUV3_UNIGFX 0xa0 /* PKUnity-v3 Unigfx */
+
struct fb_fix_screeninfo {
char id[16]; /* identification string eg "TT Builtin" */
unsigned long smem_start; /* Start of frame buffer mem */
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index 9a3f5f9383f6..b6d21d5a11a2 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -89,7 +89,7 @@ struct fw_card {
int current_tlabel;
u64 tlabel_mask;
struct list_head transaction_list;
- unsigned long reset_jiffies;
+ u64 reset_jiffies;
u32 split_timeout_hi;
u32 split_timeout_lo;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index bd3215940c37..af5bd7a629e5 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -649,6 +649,7 @@ struct address_space {
spinlock_t private_lock; /* for use by the address_space */
struct list_head private_list; /* ditto */
struct address_space *assoc_mapping; /* ditto */
+ struct mutex unmap_mutex; /* to protect unmapping */
} __attribute__((aligned(sizeof(long))));
/*
* On most architectures that alignment is already the case; but
@@ -797,8 +798,7 @@ struct inode {
#endif
#ifdef CONFIG_IMA
- /* protected by i_lock */
- unsigned int i_readcount; /* struct files open RO */
+ atomic_t i_readcount; /* struct files open RO */
#endif
atomic_t i_writecount;
#ifdef CONFIG_SECURITY
@@ -2139,7 +2139,7 @@ extern void check_disk_size_change(struct gendisk *disk,
struct block_device *bdev);
extern int revalidate_disk(struct gendisk *);
extern int check_disk_change(struct block_device *);
-extern int __invalidate_device(struct block_device *);
+extern int __invalidate_device(struct block_device *, bool);
extern int invalidate_partition(struct gendisk *, int);
#endif
unsigned long invalidate_mapping_pages(struct address_space *mapping,
@@ -2199,6 +2199,26 @@ static inline void allow_write_access(struct file *file)
if (file)
atomic_inc(&file->f_path.dentry->d_inode->i_writecount);
}
+#ifdef CONFIG_IMA
+static inline void i_readcount_dec(struct inode *inode)
+{
+ BUG_ON(!atomic_read(&inode->i_readcount));
+ atomic_dec(&inode->i_readcount);
+}
+static inline void i_readcount_inc(struct inode *inode)
+{
+ atomic_inc(&inode->i_readcount);
+}
+#else
+static inline void i_readcount_dec(struct inode *inode)
+{
+ return;
+}
+static inline void i_readcount_inc(struct inode *inode)
+{
+ return;
+}
+#endif
extern int do_pipe_flags(int *, int);
extern struct file *create_read_pipe(struct file *f, int flags);
extern struct file *create_write_pipe(int flags);
@@ -2225,6 +2245,7 @@ extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
extern int inode_init_always(struct super_block *, struct inode *);
extern void inode_init_once(struct inode *);
+extern void address_space_init_once(struct address_space *mapping);
extern void ihold(struct inode * inode);
extern void iput(struct inode *);
extern struct inode * igrab(struct inode *);
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 69ad89b50489..8aa4731c9f6f 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -125,6 +125,7 @@ struct fsnotify_group {
const struct fsnotify_ops *ops; /* how this group handles things */
+ struct mutex mutex;
/* needed to send notification to userspace */
struct mutex notification_mutex; /* protect the notification_list */
struct list_head notification_list; /* list of event_holder this group needs to send to userspace */
@@ -168,6 +169,7 @@ struct fsnotify_group {
wait_queue_head_t access_waitq;
atomic_t bypass_perm;
#endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
+ bool readonly_fallback;
int f_flags;
unsigned int max_marks;
struct user_struct *user;
@@ -415,8 +417,6 @@ extern void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group);
extern void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, unsigned int flags);
/* run all the marks in a group, and flag them to be freed */
extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group);
-extern void fsnotify_get_mark(struct fsnotify_mark *mark);
-extern void fsnotify_put_mark(struct fsnotify_mark *mark);
extern void fsnotify_unmount_inodes(struct list_head *list);
/* put here because inotify does some weird stuff when destroying watches */
@@ -430,6 +430,16 @@ extern struct fsnotify_event *fsnotify_clone_event(struct fsnotify_event *old_ev
extern int fsnotify_replace_event(struct fsnotify_event_holder *old_holder,
struct fsnotify_event *new_event);
+static inline void fsnotify_get_mark(struct fsnotify_mark *mark)
+{
+ atomic_inc(&mark->refcnt);
+}
+
+static inline void fsnotify_put_mark(struct fsnotify_mark *mark)
+{
+ if (atomic_dec_and_test(&mark->refcnt))
+ mark->free_mark(mark);
+}
#else
static inline int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index dcd6a7c3a435..ca29e03c1fac 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -428,6 +428,7 @@ extern void unregister_ftrace_graph(void);
extern void ftrace_graph_init_task(struct task_struct *t);
extern void ftrace_graph_exit_task(struct task_struct *t);
+extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
static inline int task_curr_ret_stack(struct task_struct *t)
{
@@ -451,6 +452,7 @@ static inline void unpause_graph_tracing(void)
static inline void ftrace_graph_init_task(struct task_struct *t) { }
static inline void ftrace_graph_exit_task(struct task_struct *t) { }
+static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
trace_func_graph_ent_t entryfunc)
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 9869ef3674ac..400b008d4e54 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -1,8 +1,28 @@
+#ifndef GENALLOC_H
+#define GENALLOC_H
/*
- * Basic general purpose allocator for managing special purpose memory
- * not managed by the regular kmalloc/kfree interface.
- * Uses for this includes on-device special memory, uncached memory
- * etc.
+ * Basic general purpose allocator for managing special purpose
+ * memory, for example, memory that is not managed by the regular
+ * kmalloc/kfree interface. Uses for this includes on-device special
+ * memory, uncached memory etc.
+ *
+ * It is safe to use the allocator in NMI handlers and other special
+ * unblockable contexts that could otherwise deadlock on locks. This
+ * is implemented by using atomic operations and retries on any
+ * conflicts. The disadvantage is that there may be livelocks in
+ * extreme cases. For better scalability, one allocator can be used
+ * for each CPU.
+ *
+ * The lockless operation only works if there is enough memory
+ * available. If new memory is added to the pool a lock has to be
+ * still taken. So any user relying on locklessness has to ensure
+ * that sufficient memory is preallocated.
+ *
+ * The basic atomic operation of this allocator is cmpxchg on long.
+ * On architectures that don't have NMI-safe cmpxchg implementation,
+ * the allocator can NOT be used in NMI handler. So code uses the
+ * allocator in NMI handler should depend on
+ * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
@@ -13,7 +33,7 @@
* General purpose special memory pool descriptor.
*/
struct gen_pool {
- rwlock_t lock;
+ spinlock_t lock;
struct list_head chunks; /* list of chunks in this pool */
int min_alloc_order; /* minimum allocation order */
};
@@ -22,15 +42,29 @@ struct gen_pool {
* General purpose special memory pool chunk descriptor.
*/
struct gen_pool_chunk {
- spinlock_t lock;
struct list_head next_chunk; /* next chunk in pool */
+ atomic_t avail;
unsigned long start_addr; /* starting address of memory chunk */
unsigned long end_addr; /* ending address of memory chunk */
unsigned long bits[0]; /* bitmap for allocating memory chunk */
};
+/**
+ * gen_pool_for_each_chunk - iterate over chunks of generic memory pool
+ * @chunk: the struct gen_pool_chunk * to use as a loop cursor
+ * @pool: the generic memory pool
+ *
+ * Not lockless, proper mutual exclusion is needed to use this macro
+ * with other gen_pool function simultaneously.
+ */
+#define gen_pool_for_each_chunk(chunk, pool) \
+ list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
+
extern struct gen_pool *gen_pool_create(int, int);
extern int gen_pool_add(struct gen_pool *, unsigned long, size_t, int);
extern void gen_pool_destroy(struct gen_pool *);
extern unsigned long gen_pool_alloc(struct gen_pool *, size_t);
extern void gen_pool_free(struct gen_pool *, unsigned long, size_t);
+extern size_t gen_pool_avail(struct gen_pool *);
+extern size_t gen_pool_size(struct gen_pool *);
+#endif /* GENALLOC_H */
diff --git a/drivers/hid/hid-roccat.h b/include/linux/hid-roccat.h
index 5784281d613f..24e1ca01f9a0 100644
--- a/drivers/hid/hid-roccat.h
+++ b/include/linux/hid-roccat.h
@@ -15,18 +15,15 @@
#include <linux/hid.h>
#include <linux/types.h>
-#if defined(CONFIG_HID_ROCCAT) || defined(CONFIG_HID_ROCCAT_MODULE)
-int roccat_connect(struct class *klass, struct hid_device *hid);
+#define ROCCATIOCGREPSIZE _IOR('H', 0xf1, int)
+
+#ifdef __KERNEL__
+
+int roccat_connect(struct class *klass, struct hid_device *hid,
+ int report_size);
void roccat_disconnect(int minor);
-int roccat_report_event(int minor, u8 const *data, int len);
-#else
-static inline int roccat_connect(struct class *klass,
- struct hid_device *hid) { return -1; }
-static inline void roccat_disconnect(int minor) {}
-static inline int roccat_report_event(int minor, u8 const *data, int len)
-{
- return 0;
-}
+int roccat_report_event(int minor, u8 const *data);
+
#endif
#endif
diff --git a/include/linux/hid.h b/include/linux/hid.h
index d91c25e253c8..bb29bb1dbd2f 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -504,6 +504,9 @@ struct hid_device { /* device report descriptor */
struct hid_usage *, __s32);
void (*hiddev_report_event) (struct hid_device *, struct hid_report *);
+ /* handler for raw input (Get_Report) data, used by hidraw */
+ int (*hid_get_raw_report) (struct hid_device *, unsigned char, __u8 *, size_t, unsigned char);
+
/* handler for raw output data, used by hidraw */
int (*hid_output_raw_report) (struct hid_device *, __u8 *, size_t, unsigned char);
@@ -638,7 +641,7 @@ struct hid_driver {
struct hid_input *hidinput, struct hid_field *field,
struct hid_usage *usage, unsigned long **bit, int *max);
void (*feature_mapping)(struct hid_device *hdev,
- struct hid_input *hidinput, struct hid_field *field,
+ struct hid_field *field,
struct hid_usage *usage);
#ifdef CONFIG_PM
int (*suspend)(struct hid_device *hdev, pm_message_t message);
diff --git a/include/linux/hidraw.h b/include/linux/hidraw.h
index dd8d69269176..4b88e697c4e9 100644
--- a/include/linux/hidraw.h
+++ b/include/linux/hidraw.h
@@ -35,6 +35,9 @@ struct hidraw_devinfo {
#define HIDIOCGRAWINFO _IOR('H', 0x03, struct hidraw_devinfo)
#define HIDIOCGRAWNAME(len) _IOC(_IOC_READ, 'H', 0x04, len)
#define HIDIOCGRAWPHYS(len) _IOC(_IOC_READ, 'H', 0x05, len)
+/* The first byte of SFEATURE and GFEATURE is the report number */
+#define HIDIOCSFEATURE(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x06, len)
+#define HIDIOCGFEATURE(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x07, len)
#define HIDRAW_FIRST_MINOR 0
#define HIDRAW_MAX_DEVICES 64
diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h
new file mode 100644
index 000000000000..8390efc457eb
--- /dev/null
+++ b/include/linux/hwspinlock.h
@@ -0,0 +1,292 @@
+/*
+ * Hardware spinlock public header
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Contact: Ohad Ben-Cohen <ohad@wizery.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_HWSPINLOCK_H
+#define __LINUX_HWSPINLOCK_H
+
+#include <linux/err.h>
+#include <linux/sched.h>
+
+/* hwspinlock mode argument */
+#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
+#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
+
+struct hwspinlock;
+
+#if defined(CONFIG_HWSPINLOCK) || defined(CONFIG_HWSPINLOCK_MODULE)
+
+int hwspin_lock_register(struct hwspinlock *lock);
+struct hwspinlock *hwspin_lock_unregister(unsigned int id);
+struct hwspinlock *hwspin_lock_request(void);
+struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
+int hwspin_lock_free(struct hwspinlock *hwlock);
+int hwspin_lock_get_id(struct hwspinlock *hwlock);
+int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
+ unsigned long *);
+int __hwspin_trylock(struct hwspinlock *, int, unsigned long *);
+void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
+
+#else /* !CONFIG_HWSPINLOCK */
+
+/*
+ * We don't want these functions to fail if CONFIG_HWSPINLOCK is not
+ * enabled. We prefer to silently succeed in this case, and let the
+ * code path get compiled away. This way, if CONFIG_HWSPINLOCK is not
+ * required on a given setup, users will still work.
+ *
+ * The only exception is hwspin_lock_register/hwspin_lock_unregister, with which
+ * we _do_ want users to fail (no point in registering hwspinlock instances if
+ * the framework is not available).
+ *
+ * Note: ERR_PTR(-ENODEV) will still be considered a success for NULL-checking
+ * users. Others, which care, can still check this with IS_ERR.
+ */
+static inline struct hwspinlock *hwspin_lock_request(void)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int hwspin_lock_free(struct hwspinlock *hwlock)
+{
+ return 0;
+}
+
+static inline
+int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
+ int mode, unsigned long *flags)
+{
+ return 0;
+}
+
+static inline
+int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
+{
+ return 0;
+}
+
+static inline
+void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
+{
+ return 0;
+}
+
+static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
+{
+ return 0;
+}
+
+static inline int hwspin_lock_register(struct hwspinlock *hwlock)
+{
+ return -ENODEV;
+}
+
+static inline struct hwspinlock *hwspin_lock_unregister(unsigned int id)
+{
+ return NULL;
+}
+
+#endif /* !CONFIG_HWSPINLOCK */
+
+/**
+ * hwspin_trylock_irqsave() - try to lock an hwspinlock, disable interrupts
+ * @hwlock: an hwspinlock which we want to trylock
+ * @flags: a pointer to where the caller's interrupt state will be saved at
+ *
+ * This function attempts to lock the underlying hwspinlock, and will
+ * immediately fail if the hwspinlock is already locked.
+ *
+ * Upon a successful return from this function, preemption and local
+ * interrupts are disabled (previous interrupts state is saved at @flags),
+ * so the caller must not sleep, and is advised to release the hwspinlock
+ * as soon as possible.
+ *
+ * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
+ * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
+ */
+static inline
+int hwspin_trylock_irqsave(struct hwspinlock *hwlock, unsigned long *flags)
+{
+ return __hwspin_trylock(hwlock, HWLOCK_IRQSTATE, flags);
+}
+
+/**
+ * hwspin_trylock_irq() - try to lock an hwspinlock, disable interrupts
+ * @hwlock: an hwspinlock which we want to trylock
+ *
+ * This function attempts to lock the underlying hwspinlock, and will
+ * immediately fail if the hwspinlock is already locked.
+ *
+ * Upon a successful return from this function, preemption and local
+ * interrupts are disabled, so the caller must not sleep, and is advised
+ * to release the hwspinlock as soon as possible.
+ *
+ * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
+ * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
+ */
+static inline int hwspin_trylock_irq(struct hwspinlock *hwlock)
+{
+ return __hwspin_trylock(hwlock, HWLOCK_IRQ, NULL);
+}
+
+/**
+ * hwspin_trylock() - attempt to lock a specific hwspinlock
+ * @hwlock: an hwspinlock which we want to trylock
+ *
+ * This function attempts to lock an hwspinlock, and will immediately fail
+ * if the hwspinlock is already taken.
+ *
+ * Upon a successful return from this function, preemption is disabled,
+ * so the caller must not sleep, and is advised to release the hwspinlock
+ * as soon as possible. This is required in order to minimize remote cores
+ * polling on the hardware interconnect.
+ *
+ * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
+ * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
+ */
+static inline int hwspin_trylock(struct hwspinlock *hwlock)
+{
+ return __hwspin_trylock(hwlock, 0, NULL);
+}
+
+/**
+ * hwspin_lock_timeout_irqsave() - lock hwspinlock, with timeout, disable irqs
+ * @hwlock: the hwspinlock to be locked
+ * @to: timeout value in msecs
+ * @flags: a pointer to where the caller's interrupt state will be saved at
+ *
+ * This function locks the underlying @hwlock. If the @hwlock
+ * is already taken, the function will busy loop waiting for it to
+ * be released, but give up when @timeout msecs have elapsed.
+ *
+ * Upon a successful return from this function, preemption and local interrupts
+ * are disabled (plus previous interrupt state is saved), so the caller must
+ * not sleep, and is advised to release the hwspinlock as soon as possible.
+ *
+ * Returns 0 when the @hwlock was successfully taken, and an appropriate
+ * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
+ * busy after @timeout msecs). The function will never sleep.
+ */
+static inline int hwspin_lock_timeout_irqsave(struct hwspinlock *hwlock,
+ unsigned int to, unsigned long *flags)
+{
+ return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQSTATE, flags);
+}
+
+/**
+ * hwspin_lock_timeout_irq() - lock hwspinlock, with timeout, disable irqs
+ * @hwlock: the hwspinlock to be locked
+ * @to: timeout value in msecs
+ *
+ * This function locks the underlying @hwlock. If the @hwlock
+ * is already taken, the function will busy loop waiting for it to
+ * be released, but give up when @timeout msecs have elapsed.
+ *
+ * Upon a successful return from this function, preemption and local interrupts
+ * are disabled so the caller must not sleep, and is advised to release the
+ * hwspinlock as soon as possible.
+ *
+ * Returns 0 when the @hwlock was successfully taken, and an appropriate
+ * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
+ * busy after @timeout msecs). The function will never sleep.
+ */
+static inline
+int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to)
+{
+ return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQ, NULL);
+}
+
+/**
+ * hwspin_lock_timeout() - lock an hwspinlock with timeout limit
+ * @hwlock: the hwspinlock to be locked
+ * @to: timeout value in msecs
+ *
+ * This function locks the underlying @hwlock. If the @hwlock
+ * is already taken, the function will busy loop waiting for it to
+ * be released, but give up when @timeout msecs have elapsed.
+ *
+ * Upon a successful return from this function, preemption is disabled
+ * so the caller must not sleep, and is advised to release the hwspinlock
+ * as soon as possible.
+ * This is required in order to minimize remote cores polling on the
+ * hardware interconnect.
+ *
+ * Returns 0 when the @hwlock was successfully taken, and an appropriate
+ * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
+ * busy after @timeout msecs). The function will never sleep.
+ */
+static inline
+int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to)
+{
+ return __hwspin_lock_timeout(hwlock, to, 0, NULL);
+}
+
+/**
+ * hwspin_unlock_irqrestore() - unlock hwspinlock, restore irq state
+ * @hwlock: a previously-acquired hwspinlock which we want to unlock
+ * @flags: previous caller's interrupt state to restore
+ *
+ * This function will unlock a specific hwspinlock, enable preemption and
+ * restore the previous state of the local interrupts. It should be used
+ * to undo, e.g., hwspin_trylock_irqsave().
+ *
+ * @hwlock must be already locked before calling this function: it is a bug
+ * to call unlock on a @hwlock that is already unlocked.
+ */
+static inline void hwspin_unlock_irqrestore(struct hwspinlock *hwlock,
+ unsigned long *flags)
+{
+ __hwspin_unlock(hwlock, HWLOCK_IRQSTATE, flags);
+}
+
+/**
+ * hwspin_unlock_irq() - unlock hwspinlock, enable interrupts
+ * @hwlock: a previously-acquired hwspinlock which we want to unlock
+ *
+ * This function will unlock a specific hwspinlock, enable preemption and
+ * enable local interrupts. Should be used to undo hwspin_lock_irq().
+ *
+ * @hwlock must be already locked (e.g. by hwspin_trylock_irq()) before
+ * calling this function: it is a bug to call unlock on a @hwlock that is
+ * already unlocked.
+ */
+static inline void hwspin_unlock_irq(struct hwspinlock *hwlock)
+{
+ __hwspin_unlock(hwlock, HWLOCK_IRQ, NULL);
+}
+
+/**
+ * hwspin_unlock() - unlock hwspinlock
+ * @hwlock: a previously-acquired hwspinlock which we want to unlock
+ *
+ * This function will unlock a specific hwspinlock and enable preemption
+ * back.
+ *
+ * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
+ * this function: it is a bug to call unlock on a @hwlock that is already
+ * unlocked.
+ */
+static inline void hwspin_unlock(struct hwspinlock *hwlock)
+{
+ __hwspin_unlock(hwlock, 0, NULL);
+}
+
+#endif /* __LINUX_HWSPINLOCK_H */
diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h
deleted file mode 100644
index 4bef5c557160..000000000000
--- a/include/linux/i2c-id.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* ------------------------------------------------------------------------- */
-/* */
-/* i2c-id.h - identifier values for i2c drivers and adapters */
-/* */
-/* ------------------------------------------------------------------------- */
-/* Copyright (C) 1995-1999 Simon G. Vogl
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
-/* ------------------------------------------------------------------------- */
-
-#ifndef LINUX_I2C_ID_H
-#define LINUX_I2C_ID_H
-
-/* Please note that I2C driver IDs are optional. They are only needed if a
- legacy chip driver needs to identify a bus or a bus driver needs to
- identify a legacy client. If you don't need them, just don't set them. */
-
-/*
- * ---- Adapter types ----------------------------------------------------
- */
-
-/* --- Bit algorithm adapters */
-#define I2C_HW_B_CX2388x 0x01001b /* connexant 2388x based tv cards */
-
-#endif /* LINUX_I2C_ID_H */
diff --git a/include/linux/i2c-tegra.h b/include/linux/i2c-tegra.h
new file mode 100644
index 000000000000..9c85da49857a
--- /dev/null
+++ b/include/linux/i2c-tegra.h
@@ -0,0 +1,25 @@
+/*
+ * drivers/i2c/busses/i2c-tegra.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_I2C_TEGRA_H
+#define _LINUX_I2C_TEGRA_H
+
+struct tegra_i2c_platform_data {
+ unsigned long bus_clk_rate;
+};
+
+#endif /* _LINUX_I2C_TEGRA_H */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 903576df88dc..cd2f75b742ec 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -29,7 +29,6 @@
#include <linux/types.h>
#ifdef __KERNEL__
#include <linux/module.h>
-#include <linux/i2c-id.h>
#include <linux/mod_devicetable.h>
#include <linux/device.h> /* for struct device */
#include <linux/sched.h> /* for completion */
@@ -105,8 +104,8 @@ extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client,
/**
* struct i2c_driver - represent an I2C device driver
* @class: What kind of i2c device we instantiate (for detect)
- * @attach_adapter: Callback for bus addition (for legacy drivers)
- * @detach_adapter: Callback for bus removal (for legacy drivers)
+ * @attach_adapter: Callback for bus addition (deprecated)
+ * @detach_adapter: Callback for bus removal (deprecated)
* @probe: Callback for device binding
* @remove: Callback for device unbinding
* @shutdown: Callback for device shutdown
@@ -144,11 +143,11 @@ struct i2c_driver {
unsigned int class;
/* Notifies the driver that a new bus has appeared or is about to be
- * removed. You should avoid using this if you can, it will probably
- * be removed in a near future.
+ * removed. You should avoid using this, it will be removed in a
+ * near future.
*/
- int (*attach_adapter)(struct i2c_adapter *);
- int (*detach_adapter)(struct i2c_adapter *);
+ int (*attach_adapter)(struct i2c_adapter *) __deprecated;
+ int (*detach_adapter)(struct i2c_adapter *) __deprecated;
/* Standard driver model interfaces */
int (*probe)(struct i2c_client *, const struct i2c_device_id *);
@@ -258,9 +257,7 @@ struct i2c_board_info {
unsigned short addr;
void *platform_data;
struct dev_archdata *archdata;
-#ifdef CONFIG_OF
struct device_node *of_node;
-#endif
int irq;
};
@@ -398,6 +395,8 @@ i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter)
return NULL;
}
+int i2c_for_each_dev(void *data, int (*fn)(struct device *, void *));
+
/* Adapter locking functions, exported for shared pin cases */
void i2c_lock_adapter(struct i2c_adapter *);
void i2c_unlock_adapter(struct i2c_adapter *);
@@ -449,7 +448,7 @@ extern void i2c_release_client(struct i2c_client *client);
extern void i2c_clients_command(struct i2c_adapter *adap,
unsigned int cmd, void *arg);
-extern struct i2c_adapter *i2c_get_adapter(int id);
+extern struct i2c_adapter *i2c_get_adapter(int nr);
extern void i2c_put_adapter(struct i2c_adapter *adap);
diff --git a/include/linux/i2c/ads1015.h b/include/linux/i2c/ads1015.h
new file mode 100644
index 000000000000..8541c6acfafd
--- /dev/null
+++ b/include/linux/i2c/ads1015.h
@@ -0,0 +1,28 @@
+/*
+ * Platform Data for ADS1015 12-bit 4-input ADC
+ * (C) Copyright 2010
+ * Dirk Eibach, Guntermann & Drunck GmbH <eibach@gdsys.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef LINUX_ADS1015_H
+#define LINUX_ADS1015_H
+
+struct ads1015_platform_data {
+ unsigned int exported_channels;
+};
+
+#endif /* LINUX_ADS1015_H */
diff --git a/include/linux/i2c/qt602240_ts.h b/include/linux/i2c/atmel_mxt_ts.h
index c5033e101094..f027f7a63511 100644
--- a/include/linux/i2c/qt602240_ts.h
+++ b/include/linux/i2c/atmel_mxt_ts.h
@@ -1,5 +1,5 @@
/*
- * AT42QT602240/ATMXT224 Touchscreen driver
+ * Atmel maXTouch Touchscreen driver
*
* Copyright (C) 2010 Samsung Electronics Co.Ltd
* Author: Joonyoung Shim <jy0922.shim@samsung.com>
@@ -10,21 +10,26 @@
* option) any later version.
*/
-#ifndef __LINUX_QT602240_TS_H
-#define __LINUX_QT602240_TS_H
+#ifndef __LINUX_ATMEL_MXT_TS_H
+#define __LINUX_ATMEL_MXT_TS_H
+
+#include <linux/types.h>
/* Orient */
-#define QT602240_NORMAL 0x0
-#define QT602240_DIAGONAL 0x1
-#define QT602240_HORIZONTAL_FLIP 0x2
-#define QT602240_ROTATED_90_COUNTER 0x3
-#define QT602240_VERTICAL_FLIP 0x4
-#define QT602240_ROTATED_90 0x5
-#define QT602240_ROTATED_180 0x6
-#define QT602240_DIAGONAL_COUNTER 0x7
+#define MXT_NORMAL 0x0
+#define MXT_DIAGONAL 0x1
+#define MXT_HORIZONTAL_FLIP 0x2
+#define MXT_ROTATED_90_COUNTER 0x3
+#define MXT_VERTICAL_FLIP 0x4
+#define MXT_ROTATED_90 0x5
+#define MXT_ROTATED_180 0x6
+#define MXT_DIAGONAL_COUNTER 0x7
+
+/* The platform data for the Atmel maXTouch touchscreen driver */
+struct mxt_platform_data {
+ const u8 *config;
+ size_t config_length;
-/* The platform data for the AT42QT602240/ATMXT224 touchscreen driver */
-struct qt602240_platform_data {
unsigned int x_line;
unsigned int y_line;
unsigned int x_size;
@@ -33,6 +38,7 @@ struct qt602240_platform_data {
unsigned int threshold;
unsigned int voltage;
unsigned char orient;
+ unsigned long irqflags;
};
-#endif /* __LINUX_QT602240_TS_H */
+#endif /* __LINUX_ATMEL_MXT_TS_H */
diff --git a/include/linux/i2c/max6639.h b/include/linux/i2c/max6639.h
new file mode 100644
index 000000000000..6011c42034da
--- /dev/null
+++ b/include/linux/i2c/max6639.h
@@ -0,0 +1,14 @@
+#ifndef _LINUX_MAX6639_H
+#define _LINUX_MAX6639_H
+
+#include <linux/types.h>
+
+/* platform data for the MAX6639 temperature sensor and fan control */
+
+struct max6639_platform_data {
+ bool pwm_polarity; /* Polarity low (0) or high (1, default) */
+ int ppr; /* Pulses per rotation 1..4 (default == 2) */
+ int rpm_range; /* 2000, 4000 (default), 8000 or 16000 */
+};
+
+#endif /* _LINUX_MAX6639_H */
diff --git a/include/linux/i2c/mcs.h b/include/linux/i2c/mcs.h
index 725ae7c313ff..61bb18a4fd3c 100644
--- a/include/linux/i2c/mcs.h
+++ b/include/linux/i2c/mcs.h
@@ -18,6 +18,7 @@
#define MCS_KEY_CODE(v) ((v) & 0xffff)
struct mcs_platform_data {
+ void (*poweron)(bool);
void (*cfg_pin)(void);
/* touchscreen */
diff --git a/include/linux/i2c/pmbus.h b/include/linux/i2c/pmbus.h
new file mode 100644
index 000000000000..69280db02c41
--- /dev/null
+++ b/include/linux/i2c/pmbus.h
@@ -0,0 +1,45 @@
+/*
+ * Hardware monitoring driver for PMBus devices
+ *
+ * Copyright (c) 2010, 2011 Ericsson AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _PMBUS_H_
+#define _PMBUS_H_
+
+/* flags */
+
+/*
+ * PMBUS_SKIP_STATUS_CHECK
+ *
+ * During register detection, skip checking the status register for
+ * communication or command errors.
+ *
+ * Some PMBus chips respond with valid data when trying to read an unsupported
+ * register. For such chips, checking the status register is mandatory when
+ * trying to determine if a chip register exists or not.
+ * Other PMBus chips don't support the STATUS_CML register, or report
+ * communication errors for no explicable reason. For such chips, checking
+ * the status register must be disabled.
+ */
+#define PMBUS_SKIP_STATUS_CHECK (1 << 0)
+
+struct pmbus_platform_data {
+ u32 flags; /* Device specific flags */
+};
+
+#endif /* _PMBUS_H_ */
diff --git a/arch/arm/plat-pxa/include/plat/i2c.h b/include/linux/i2c/pxa-i2c.h
index 1a9f65e6ec0f..1a9f65e6ec0f 100644
--- a/arch/arm/plat-pxa/include/plat/i2c.h
+++ b/include/linux/i2c/pxa-i2c.h
diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h
index 61b9609e55f2..7086e1e04fe9 100644
--- a/include/linux/i2c/twl.h
+++ b/include/linux/i2c/twl.h
@@ -698,6 +698,7 @@ struct twl4030_platform_data {
struct regulator_init_data *vana;
struct regulator_init_data *vcxio;
struct regulator_init_data *vusb;
+ struct regulator_init_data *clk32kg;
};
/*----------------------------------------------------------------------*/
@@ -777,5 +778,6 @@ static inline int twl4030charger_usb_en(int enable) { return 0; }
/* INTERNAL LDOs */
#define TWL6030_REG_VRTC 47
+#define TWL6030_REG_CLK32KG 48
#endif /* End of __TWL4030_H */
diff --git a/include/linux/i2c/twl4030-madc.h b/include/linux/i2c/twl4030-madc.h
new file mode 100644
index 000000000000..6427d298fbfc
--- /dev/null
+++ b/include/linux/i2c/twl4030-madc.h
@@ -0,0 +1,141 @@
+/*
+ * twl4030_madc.h - Header for TWL4030 MADC
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * J Keerthy <j-keerthy@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef _TWL4030_MADC_H
+#define _TWL4030_MADC_H
+
+struct twl4030_madc_conversion_method {
+ u8 sel;
+ u8 avg;
+ u8 rbase;
+ u8 ctrl;
+};
+
+#define TWL4030_MADC_MAX_CHANNELS 16
+
+
+/*
+ * twl4030_madc_request- madc request packet for channel conversion
+ * @channels: 16 bit bitmap for individual channels
+ * @do_avgP: sample the input channel for 4 consecutive cycles
+ * @method: RT, SW1, SW2
+ * @type: Polling or interrupt based method
+ */
+
+struct twl4030_madc_request {
+ unsigned long channels;
+ u16 do_avg;
+ u16 method;
+ u16 type;
+ bool active;
+ bool result_pending;
+ int rbuf[TWL4030_MADC_MAX_CHANNELS];
+ void (*func_cb)(int len, int channels, int *buf);
+};
+
+enum conversion_methods {
+ TWL4030_MADC_RT,
+ TWL4030_MADC_SW1,
+ TWL4030_MADC_SW2,
+ TWL4030_MADC_NUM_METHODS
+};
+
+enum sample_type {
+ TWL4030_MADC_WAIT,
+ TWL4030_MADC_IRQ_ONESHOT,
+ TWL4030_MADC_IRQ_REARM
+};
+
+#define TWL4030_MADC_CTRL1 0x00
+#define TWL4030_MADC_CTRL2 0x01
+
+#define TWL4030_MADC_RTSELECT_LSB 0x02
+#define TWL4030_MADC_SW1SELECT_LSB 0x06
+#define TWL4030_MADC_SW2SELECT_LSB 0x0A
+
+#define TWL4030_MADC_RTAVERAGE_LSB 0x04
+#define TWL4030_MADC_SW1AVERAGE_LSB 0x08
+#define TWL4030_MADC_SW2AVERAGE_LSB 0x0C
+
+#define TWL4030_MADC_CTRL_SW1 0x12
+#define TWL4030_MADC_CTRL_SW2 0x13
+
+#define TWL4030_MADC_RTCH0_LSB 0x17
+#define TWL4030_MADC_GPCH0_LSB 0x37
+
+#define TWL4030_MADC_MADCON (1 << 0) /* MADC power on */
+#define TWL4030_MADC_BUSY (1 << 0) /* MADC busy */
+/* MADC conversion completion */
+#define TWL4030_MADC_EOC_SW (1 << 1)
+/* MADC SWx start conversion */
+#define TWL4030_MADC_SW_START (1 << 5)
+#define TWL4030_MADC_ADCIN0 (1 << 0)
+#define TWL4030_MADC_ADCIN1 (1 << 1)
+#define TWL4030_MADC_ADCIN2 (1 << 2)
+#define TWL4030_MADC_ADCIN3 (1 << 3)
+#define TWL4030_MADC_ADCIN4 (1 << 4)
+#define TWL4030_MADC_ADCIN5 (1 << 5)
+#define TWL4030_MADC_ADCIN6 (1 << 6)
+#define TWL4030_MADC_ADCIN7 (1 << 7)
+#define TWL4030_MADC_ADCIN8 (1 << 8)
+#define TWL4030_MADC_ADCIN9 (1 << 9)
+#define TWL4030_MADC_ADCIN10 (1 << 10)
+#define TWL4030_MADC_ADCIN11 (1 << 11)
+#define TWL4030_MADC_ADCIN12 (1 << 12)
+#define TWL4030_MADC_ADCIN13 (1 << 13)
+#define TWL4030_MADC_ADCIN14 (1 << 14)
+#define TWL4030_MADC_ADCIN15 (1 << 15)
+
+/* Fixed channels */
+#define TWL4030_MADC_BTEMP TWL4030_MADC_ADCIN1
+#define TWL4030_MADC_VBUS TWL4030_MADC_ADCIN8
+#define TWL4030_MADC_VBKB TWL4030_MADC_ADCIN9
+#define TWL4030_MADC_ICHG TWL4030_MADC_ADCIN10
+#define TWL4030_MADC_VCHG TWL4030_MADC_ADCIN11
+#define TWL4030_MADC_VBAT TWL4030_MADC_ADCIN12
+
+/* Step size and prescaler ratio */
+#define TEMP_STEP_SIZE 147
+#define TEMP_PSR_R 100
+#define CURR_STEP_SIZE 147
+#define CURR_PSR_R1 44
+#define CURR_PSR_R2 88
+
+#define TWL4030_BCI_BCICTL1 0x23
+#define TWL4030_BCI_CGAIN 0x020
+#define TWL4030_BCI_MESBAT (1 << 1)
+#define TWL4030_BCI_TYPEN (1 << 4)
+#define TWL4030_BCI_ITHEN (1 << 3)
+
+#define REG_BCICTL2 0x024
+#define TWL4030_BCI_ITHSENS 0x007
+
+struct twl4030_madc_user_parms {
+ int channel;
+ int average;
+ int status;
+ u16 result;
+};
+
+int twl4030_madc_conversion(struct twl4030_madc_request *conv);
+int twl4030_get_madc_conversion(int channel_no);
+#endif
diff --git a/include/linux/if.h b/include/linux/if.h
index 123959927745..3bc63e6a02f7 100644
--- a/include/linux/if.h
+++ b/include/linux/if.h
@@ -71,11 +71,10 @@
* release skb->dst
*/
#define IFF_DONT_BRIDGE 0x800 /* disallow bridging this ether dev */
-#define IFF_IN_NETPOLL 0x1000 /* whether we are processing netpoll */
-#define IFF_DISABLE_NETPOLL 0x2000 /* disable netpoll at run-time */
-#define IFF_MACVLAN_PORT 0x4000 /* device used as macvlan port */
-#define IFF_BRIDGE_PORT 0x8000 /* device used as bridge port */
-#define IFF_OVS_DATAPATH 0x10000 /* device used as Open vSwitch
+#define IFF_DISABLE_NETPOLL 0x1000 /* disable netpoll at run-time */
+#define IFF_MACVLAN_PORT 0x2000 /* device used as macvlan port */
+#define IFF_BRIDGE_PORT 0x4000 /* device used as bridge port */
+#define IFF_OVS_DATAPATH 0x8000 /* device used as Open vSwitch
* datapath port */
#define IF_GET_IFACE 0x0001 /* for querying only */
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index 6485d2a89bec..f4a2e6b1b864 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -135,6 +135,7 @@ enum {
IFLA_VF_PORTS,
IFLA_PORT_SELF,
IFLA_AF_SPEC,
+ IFLA_GROUP, /* Group the device belongs to */
__IFLA_MAX
};
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 975837e7d6c0..09e6e62f9953 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -20,7 +20,6 @@ extern void ima_inode_free(struct inode *inode);
extern int ima_file_check(struct file *file, int mask);
extern void ima_file_free(struct file *file);
extern int ima_file_mmap(struct file *file, unsigned long prot);
-extern void ima_counts_get(struct file *file);
#else
static inline int ima_bprm_check(struct linux_binprm *bprm)
@@ -53,10 +52,5 @@ static inline int ima_file_mmap(struct file *file, unsigned long prot)
return 0;
}
-static inline void ima_counts_get(struct file *file)
-{
- return;
-}
-
#endif /* CONFIG_IMA_H */
#endif /* _LINUX_IMA_H */
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index ae8fdc54e0c0..5f8146695b7f 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -144,6 +144,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
#define IN_DEV_ARP_NOTIFY(in_dev) IN_DEV_MAXCONF((in_dev), ARP_NOTIFY)
struct in_ifaddr {
+ struct hlist_node hash;
struct in_ifaddr *ifa_next;
struct in_device *ifa_dev;
struct rcu_head rcu_head;
diff --git a/include/linux/input-polldev.h b/include/linux/input-polldev.h
index 5e3dddf8f562..ce0b72464eb8 100644
--- a/include/linux/input-polldev.h
+++ b/include/linux/input-polldev.h
@@ -22,12 +22,12 @@
* @poll: driver-supplied method that polls the device and posts
* input events (mandatory).
* @poll_interval: specifies how often the poll() method should be called.
- * Defaults to 500 msec unless overriden when registering the device.
+ * Defaults to 500 msec unless overridden when registering the device.
* @poll_interval_max: specifies upper bound for the poll interval.
* Defaults to the initial value of @poll_interval.
* @poll_interval_min: specifies lower bound for the poll interval.
* Defaults to 0.
- * @input: input device structire associated with the polled device.
+ * @input: input device structure associated with the polled device.
* Must be properly initialized by the driver (id, name, phys, bits).
*
* Polled input device provides a skeleton for supporting simple input
diff --git a/include/linux/input.h b/include/linux/input.h
index 050504260259..f3a7794a18c4 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -1161,8 +1161,6 @@ struct ff_effect {
* sparse keymaps. If not supplied default mechanism will be used.
* The method is being called while holding event_lock and thus must
* not sleep
- * @getkeycode_new: transition method
- * @setkeycode_new: transition method
* @ff: force feedback structure associated with the device if device
* supports force feedback effects
* @repeat_key: stores key code of the last key pressed; used to implement
@@ -1241,14 +1239,10 @@ struct input_dev {
void *keycode;
int (*setkeycode)(struct input_dev *dev,
- unsigned int scancode, unsigned int keycode);
+ const struct input_keymap_entry *ke,
+ unsigned int *old_keycode);
int (*getkeycode)(struct input_dev *dev,
- unsigned int scancode, unsigned int *keycode);
- int (*setkeycode_new)(struct input_dev *dev,
- const struct input_keymap_entry *ke,
- unsigned int *old_keycode);
- int (*getkeycode_new)(struct input_dev *dev,
- struct input_keymap_entry *ke);
+ struct input_keymap_entry *ke);
struct ff_device *ff;
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 55e0d4253e49..59b72ca1c5d1 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -14,6 +14,8 @@
#include <linux/smp.h>
#include <linux/percpu.h>
#include <linux/hrtimer.h>
+#include <linux/kref.h>
+#include <linux/workqueue.h>
#include <asm/atomic.h>
#include <asm/ptrace.h>
@@ -55,7 +57,8 @@
* Used by threaded interrupts which need to keep the
* irq line disabled until the threaded handler has been run.
* IRQF_NO_SUSPEND - Do not disable this IRQ during suspend
- *
+ * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
+ * IRQF_NO_THREAD - Interrupt cannot be threaded
*/
#define IRQF_DISABLED 0x00000020
#define IRQF_SAMPLE_RANDOM 0x00000040
@@ -67,22 +70,10 @@
#define IRQF_IRQPOLL 0x00001000
#define IRQF_ONESHOT 0x00002000
#define IRQF_NO_SUSPEND 0x00004000
+#define IRQF_FORCE_RESUME 0x00008000
+#define IRQF_NO_THREAD 0x00010000
-#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND)
-
-/*
- * Bits used by threaded handlers:
- * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
- * IRQTF_DIED - handler thread died
- * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
- * IRQTF_AFFINITY - irq thread is requested to adjust affinity
- */
-enum {
- IRQTF_RUNTHREAD,
- IRQTF_DIED,
- IRQTF_WARNED,
- IRQTF_AFFINITY,
-};
+#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
/*
* These values can be returned by request_any_context_irq() and
@@ -110,6 +101,7 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
* @thread_fn: interupt handler function for threaded interrupts
* @thread: thread pointer for threaded interrupts
* @thread_flags: flags related to @thread
+ * @thread_mask: bitmask for keeping track of @thread activity
*/
struct irqaction {
irq_handler_t handler;
@@ -120,6 +112,7 @@ struct irqaction {
irq_handler_t thread_fn;
struct task_struct *thread;
unsigned long thread_flags;
+ unsigned long thread_mask;
const char *name;
struct proc_dir_entry *dir;
} ____cacheline_internodealigned_in_smp;
@@ -240,6 +233,35 @@ extern int irq_can_set_affinity(unsigned int irq);
extern int irq_select_affinity(unsigned int irq);
extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
+
+/**
+ * struct irq_affinity_notify - context for notification of IRQ affinity changes
+ * @irq: Interrupt to which notification applies
+ * @kref: Reference count, for internal use
+ * @work: Work item, for internal use
+ * @notify: Function to be called on change. This will be
+ * called in process context.
+ * @release: Function to be called on release. This will be
+ * called in process context. Once registered, the
+ * structure must only be freed when this function is
+ * called or later.
+ */
+struct irq_affinity_notify {
+ unsigned int irq;
+ struct kref kref;
+ struct work_struct work;
+ void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
+ void (*release)(struct kref *ref);
+};
+
+extern int
+irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
+
+static inline void irq_run_affinity_notifiers(void)
+{
+ flush_scheduled_work();
+}
+
#else /* CONFIG_SMP */
static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
@@ -255,7 +277,7 @@ static inline int irq_can_set_affinity(unsigned int irq)
static inline int irq_select_affinity(unsigned int irq) { return 0; }
static inline int irq_set_affinity_hint(unsigned int irq,
- const struct cpumask *m)
+ const struct cpumask *m)
{
return -EINVAL;
}
@@ -314,16 +336,24 @@ static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long
}
/* IRQ wakeup (PM) control: */
-extern int set_irq_wake(unsigned int irq, unsigned int on);
+extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
+
+#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
+/* Please do not use: Use the replacement functions instead */
+static inline int set_irq_wake(unsigned int irq, unsigned int on)
+{
+ return irq_set_irq_wake(irq, on);
+}
+#endif
static inline int enable_irq_wake(unsigned int irq)
{
- return set_irq_wake(irq, 1);
+ return irq_set_irq_wake(irq, 1);
}
static inline int disable_irq_wake(unsigned int irq)
{
- return set_irq_wake(irq, 0);
+ return irq_set_irq_wake(irq, 0);
}
#else /* !CONFIG_GENERIC_HARDIRQS */
@@ -353,6 +383,13 @@ static inline int disable_irq_wake(unsigned int irq)
}
#endif /* CONFIG_GENERIC_HARDIRQS */
+
+#ifdef CONFIG_IRQ_FORCED_THREADING
+extern bool force_irqthreads;
+#else
+#define force_irqthreads (0)
+#endif
+
#ifndef __ARCH_SET_SOFTIRQ_PENDING
#define set_softirq_pending(x) (local_softirq_pending() = (x))
#define or_softirq_pending(x) (local_softirq_pending() |= (x))
@@ -426,6 +463,13 @@ extern void raise_softirq(unsigned int nr);
*/
DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
+DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
+
+static inline struct task_struct *this_cpu_ksoftirqd(void)
+{
+ return this_cpu_read(ksoftirqd);
+}
+
/* Try to send a softirq to a remote cpu. If this cannot be done, the
* work will be queued to the local cpu.
*/
@@ -645,6 +689,7 @@ static inline void init_irq_proc(void)
struct seq_file;
int show_interrupts(struct seq_file *p, void *v);
+int arch_show_interrupts(struct seq_file *p, int prec);
extern int early_irq_init(void);
extern int arch_probe_nr_irqs(void);
diff --git a/include/linux/ioq.h b/include/linux/ioq.h
new file mode 100644
index 000000000000..7c6d6cad83c7
--- /dev/null
+++ b/include/linux/ioq.h
@@ -0,0 +1,414 @@
+/*
+ * Copyright 2009 Novell. All Rights Reserved.
+ *
+ * IOQ is a generic shared-memory, lockless queue mechanism. It can be used
+ * in a variety of ways, though its intended purpose is to become the
+ * asynchronous communication path for virtual-bus drivers.
+ *
+ * The following are a list of key design points:
+ *
+ * #) All shared-memory is always allocated on explicitly one side of the
+ * link. This typically would be the guest side in a VM/VMM scenario.
+ * #) Each IOQ has the concept of "north" and "south" locales, where
+ * north denotes the memory-owner side (e.g. guest).
+ * #) An IOQ is manipulated using an iterator idiom.
+ * #) Provides a bi-directional signaling/notification infrastructure on
+ * a per-queue basis, which includes an event mitigation strategy
+ * to reduce boundary switching.
+ * #) The signaling path is abstracted so that various technologies and
+ * topologies can define their own specific implementation while sharing
+ * the basic structures and code.
+ *
+ * Author:
+ * Gregory Haskins <ghaskins@novell.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _LINUX_IOQ_H
+#define _LINUX_IOQ_H
+
+#include <linux/types.h>
+#include <linux/shm_signal.h>
+
+/*
+ *---------
+ * The following structures represent data that is shared across boundaries
+ * which may be quite disparate from one another (e.g. Windows vs Linux,
+ * 32 vs 64 bit, etc). Therefore, care has been taken to make sure they
+ * present data in a manner that is independent of the environment.
+ *-----------
+ */
+struct ioq_ring_desc {
+ __u64 cookie; /* for arbitrary use by north-side */
+ __le64 ptr;
+ __le64 len;
+ __u8 valid;
+ __u8 sown; /* South owned = 1, North owned = 0 */
+};
+
+#define IOQ_RING_MAGIC cpu_to_le32(0x47fa2fe4)
+#define IOQ_RING_VER cpu_to_le32(4)
+
+struct ioq_ring_idx {
+ __le32 head; /* 0 based index to head of ptr array */
+ __le32 tail; /* 0 based index to tail of ptr array */
+ __u8 full;
+};
+
+enum ioq_locality {
+ ioq_locality_north,
+ ioq_locality_south,
+};
+
+struct ioq_ring_head {
+ __le32 magic;
+ __le32 ver;
+ struct shm_signal_desc signal;
+ struct ioq_ring_idx idx[2];
+ __le32 count;
+ struct ioq_ring_desc ring[1]; /* "count" elements will be allocated */
+};
+
+#define IOQ_HEAD_DESC_SIZE(count) \
+ (sizeof(struct ioq_ring_head) + sizeof(struct ioq_ring_desc) * (count - 1))
+
+/* --- END SHARED STRUCTURES --- */
+
+#ifdef __KERNEL__
+
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/interrupt.h>
+#include <linux/kref.h>
+
+enum ioq_idx_type {
+ ioq_idxtype_valid,
+ ioq_idxtype_inuse,
+ ioq_idxtype_both,
+ ioq_idxtype_invalid,
+};
+
+enum ioq_seek_type {
+ ioq_seek_tail,
+ ioq_seek_next,
+ ioq_seek_head,
+ ioq_seek_set
+};
+
+struct ioq_iterator {
+ struct ioq *ioq;
+ struct ioq_ring_idx *idx;
+ u32 pos;
+ struct ioq_ring_desc *desc;
+ bool update;
+ bool dualidx;
+ bool flipowner;
+};
+
+struct ioq_notifier {
+ void (*signal)(struct ioq_notifier *);
+};
+
+struct ioq_ops {
+ void (*release)(struct ioq *ioq);
+};
+
+struct ioq {
+ struct ioq_ops *ops;
+
+ struct kref kref;
+ enum ioq_locality locale;
+ struct ioq_ring_head *head_desc;
+ struct ioq_ring_desc *ring;
+ struct shm_signal *signal;
+ wait_queue_head_t wq;
+ struct ioq_notifier *notifier;
+ size_t count;
+ struct shm_signal_notifier shm_notifier;
+};
+
+#define IOQ_ITER_AUTOUPDATE (1 << 0)
+#define IOQ_ITER_NOFLIPOWNER (1 << 1)
+
+/**
+ * ioq_init() - initialize an IOQ
+ * @ioq: IOQ context
+ *
+ * Initializes IOQ context before first use
+ *
+ **/
+void ioq_init(struct ioq *ioq,
+ struct ioq_ops *ops,
+ enum ioq_locality locale,
+ struct ioq_ring_head *head,
+ struct shm_signal *signal,
+ size_t count);
+
+/**
+ * ioq_get() - acquire an IOQ context reference
+ * @ioq: IOQ context
+ *
+ **/
+static inline struct ioq *ioq_get(struct ioq *ioq)
+{
+ kref_get(&ioq->kref);
+
+ return ioq;
+}
+
+static inline void _ioq_kref_release(struct kref *kref)
+{
+ struct ioq *ioq = container_of(kref, struct ioq, kref);
+
+ shm_signal_put(ioq->signal);
+ ioq->ops->release(ioq);
+}
+
+/**
+ * ioq_put() - release an IOQ context reference
+ * @ioq: IOQ context
+ *
+ **/
+static inline void ioq_put(struct ioq *ioq)
+{
+ kref_put(&ioq->kref, _ioq_kref_release);
+}
+
+/**
+ * ioq_notify_enable() - enables local notifications on an IOQ
+ * @ioq: IOQ context
+ * @flags: Reserved for future use, must be 0
+ *
+ * Enables/unmasks the registered ioq_notifier (if applicable) and waitq to
+ * receive wakeups whenever the remote side performs an ioq_signal() operation.
+ * A notification will be dispatched immediately if any pending signals have
+ * already been issued prior to invoking this call.
+ *
+ * This is synonymous with unmasking an interrupt.
+ *
+ * Returns: success = 0, <0 = ERRNO
+ *
+ **/
+static inline int ioq_notify_enable(struct ioq *ioq, int flags)
+{
+ return shm_signal_enable(ioq->signal, 0);
+}
+
+/**
+ * ioq_notify_disable() - disable local notifications on an IOQ
+ * @ioq: IOQ context
+ * @flags: Reserved for future use, must be 0
+ *
+ * Disables/masks the registered ioq_notifier (if applicable) and waitq
+ * from receiving any further notifications. Any subsequent calls to
+ * ioq_signal() by the remote side will update the ring as dirty, but
+ * will not traverse the locale boundary and will not invoke the notifier
+ * callback or wakeup the waitq. Signals delivered while masked will
+ * be deferred until ioq_notify_enable() is invoked
+ *
+ * This is synonymous with masking an interrupt
+ *
+ * Returns: success = 0, <0 = ERRNO
+ *
+ **/
+static inline int ioq_notify_disable(struct ioq *ioq, int flags)
+{
+ return shm_signal_disable(ioq->signal, 0);
+}
+
+/**
+ * ioq_signal() - notify the remote side about ring changes
+ * @ioq: IOQ context
+ * @flags: Reserved for future use, must be 0
+ *
+ * Marks the ring state as "dirty" and, if enabled, will traverse
+ * a locale boundary to invoke a remote notification. The remote
+ * side controls whether the notification should be delivered via
+ * the ioq_notify_enable/disable() interface.
+ *
+ * The specifics of how to traverse a locale boundary are abstracted
+ * by the ioq_ops->signal() interface and provided by a particular
+ * implementation. However, typically going north to south would be
+ * something like a syscall/hypercall, and going south to north would be
+ * something like a posix-signal/guest-interrupt.
+ *
+ * Returns: success = 0, <0 = ERRNO
+ *
+ **/
+static inline int ioq_signal(struct ioq *ioq, int flags)
+{
+ return shm_signal_inject(ioq->signal, 0);
+}
+
+/**
+ * ioq_count() - counts the number of outstanding descriptors in an index
+ * @ioq: IOQ context
+ * @type: Specifies the index type
+ * (*) valid: the descriptor is valid. This is usually
+ * used to keep track of descriptors that may not
+ * be carrying a useful payload, but still need to
+ * be tracked carefully.
+ * (*) inuse: Descriptors that carry useful payload
+ *
+ * Returns:
+ * (*) >=0: # of descriptors outstanding in the index
+ * (*) <0 = ERRNO
+ *
+ **/
+int ioq_count(struct ioq *ioq, enum ioq_idx_type type);
+
+/**
+ * ioq_remain() - counts the number of remaining descriptors in an index
+ * @ioq: IOQ context
+ * @type: Specifies the index type
+ * (*) valid: the descriptor is valid. This is usually
+ * used to keep track of descriptors that may not
+ * be carrying a useful payload, but still need to
+ * be tracked carefully.
+ * (*) inuse: Descriptors that carry useful payload
+ *
+ * This is the converse of ioq_count(). This function returns the number
+ * of "free" descriptors left in a particular index
+ *
+ * Returns:
+ * (*) >=0: # of descriptors remaining in the index
+ * (*) <0 = ERRNO
+ *
+ **/
+int ioq_remain(struct ioq *ioq, enum ioq_idx_type type);
+
+/**
+ * ioq_size() - counts the maximum number of descriptors in an ring
+ * @ioq: IOQ context
+ *
+ * This function returns the maximum number of descriptors supported in
+ * a ring, regardless of their current state (free or inuse).
+ *
+ * Returns:
+ * (*) >=0: total # of descriptors in the ring
+ * (*) <0 = ERRNO
+ *
+ **/
+int ioq_size(struct ioq *ioq);
+
+/**
+ * ioq_full() - determines if a specific index is "full"
+ * @ioq: IOQ context
+ * @type: Specifies the index type
+ * (*) valid: the descriptor is valid. This is usually
+ * used to keep track of descriptors that may not
+ * be carrying a useful payload, but still need to
+ * be tracked carefully.
+ * (*) inuse: Descriptors that carry useful payload
+ *
+ * Returns:
+ * (*) 0: index is not full
+ * (*) 1: index is full
+ * (*) <0 = ERRNO
+ *
+ **/
+int ioq_full(struct ioq *ioq, enum ioq_idx_type type);
+
+/**
+ * ioq_empty() - determines if a specific index is "empty"
+ * @ioq: IOQ context
+ * @type: Specifies the index type
+ * (*) valid: the descriptor is valid. This is usually
+ * used to keep track of descriptors that may not
+ * be carrying a useful payload, but still need to
+ * be tracked carefully.
+ * (*) inuse: Descriptors that carry useful payload
+ *
+ * Returns:
+ * (*) 0: index is not empty
+ * (*) 1: index is empty
+ * (*) <0 = ERRNO
+ *
+ **/
+static inline int ioq_empty(struct ioq *ioq, enum ioq_idx_type type)
+{
+ return !ioq_count(ioq, type);
+}
+
+/**
+ * ioq_iter_init() - initialize an iterator for IOQ descriptor traversal
+ * @ioq: IOQ context to iterate on
+ * @iter: Iterator context to init (usually from stack)
+ * @type: Specifies the index type to iterate against
+ * (*) valid: iterate against the "valid" index
+ * (*) inuse: iterate against the "inuse" index
+ * (*) both: iterate against both indexes simultaneously
+ * @flags: Bitfield with 0 or more bits set to alter behavior
+ * (*) autoupdate: automatically signal the remote side
+ * whenever the iterator pushes/pops to a new desc
+ * (*) noflipowner: do not flip the ownership bit during
+ * a push/pop operation
+ *
+ * Returns: success = 0, <0 = ERRNO
+ *
+ **/
+int ioq_iter_init(struct ioq *ioq, struct ioq_iterator *iter,
+ enum ioq_idx_type type, int flags);
+
+/**
+ * ioq_iter_seek() - seek to a specific location in the IOQ ring
+ * @iter: Iterator context (must be initialized with ioq_iter_init)
+ * @type: Specifies the type of seek operation
+ * (*) tail: seek to the absolute tail, offset is ignored
+ * (*) next: seek to the relative next, offset is ignored
+ * (*) head: seek to the absolute head, offset is ignored
+ * (*) set: seek to the absolute offset
+ * @offset: Offset for ioq_seek_set operations
+ * @flags: Reserved for future use, must be 0
+ *
+ * Returns: success = 0, <0 = ERRNO
+ *
+ **/
+int ioq_iter_seek(struct ioq_iterator *iter, enum ioq_seek_type type,
+ long offset, int flags);
+
+/**
+ * ioq_iter_push() - push the tail pointer forward
+ * @iter: Iterator context (must be initialized with ioq_iter_init)
+ * @flags: Reserved for future use, must be 0
+ *
+ * This function will simultaneously advance the tail ptr in the current
+ * index (valid/inuse, as specified in the ioq_iter_init) as well as
+ * perform a seek(next) operation. This effectively "pushes" a new pointer
+ * onto the tail of the index.
+ *
+ * Returns: success = 0, <0 = ERRNO
+ *
+ **/
+int ioq_iter_push(struct ioq_iterator *iter, int flags);
+
+/**
+ * ioq_iter_pop() - pop the head pointer from the ring
+ * @iter: Iterator context (must be initialized with ioq_iter_init)
+ * @flags: Reserved for future use, must be 0
+ *
+ * This function will simultaneously advance the head ptr in the current
+ * index (valid/inuse, as specified in the ioq_iter_init) as well as
+ * perform a seek(next) operation. This effectively "pops" a pointer
+ * from the head of the index.
+ *
+ * Returns: success = 0, <0 = ERRNO
+ *
+ **/
+int ioq_iter_pop(struct ioq_iterator *iter, int flags);
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_IOQ_H */
diff --git a/include/linux/ip_vs.h b/include/linux/ip_vs.h
index 5f43a3b2e3ad..4deb3834d62c 100644
--- a/include/linux/ip_vs.h
+++ b/include/linux/ip_vs.h
@@ -89,6 +89,14 @@
#define IP_VS_CONN_F_TEMPLATE 0x1000 /* template, not connection */
#define IP_VS_CONN_F_ONE_PACKET 0x2000 /* forward only one packet */
+#define IP_VS_CONN_F_BACKUP_MASK (IP_VS_CONN_F_FWD_MASK | \
+ IP_VS_CONN_F_NOOUTPUT | \
+ IP_VS_CONN_F_INACTIVE | \
+ IP_VS_CONN_F_SEQ_MASK | \
+ IP_VS_CONN_F_NO_CPORT | \
+ IP_VS_CONN_F_TEMPLATE \
+ )
+
/* Flags that are not sent to backup server start from bit 16 */
#define IP_VS_CONN_F_NFCT (1 << 16) /* use netfilter conntrack */
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 80fcb53057bc..ff62d0145b8f 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -29,61 +29,104 @@
#include <asm/irq_regs.h>
struct irq_desc;
+struct irq_data;
typedef void (*irq_flow_handler_t)(unsigned int irq,
struct irq_desc *desc);
-
+typedef void (*irq_preflow_handler_t)(struct irq_data *data);
/*
* IRQ line status.
*
- * Bits 0-7 are reserved for the IRQF_* bits in linux/interrupt.h
+ * Bits 0-7 are the same as the IRQF_* bits in linux/interrupt.h
+ *
+ * IRQ_TYPE_NONE - default, unspecified type
+ * IRQ_TYPE_EDGE_RISING - rising edge triggered
+ * IRQ_TYPE_EDGE_FALLING - falling edge triggered
+ * IRQ_TYPE_EDGE_BOTH - rising and falling edge triggered
+ * IRQ_TYPE_LEVEL_HIGH - high level triggered
+ * IRQ_TYPE_LEVEL_LOW - low level triggered
+ * IRQ_TYPE_LEVEL_MASK - Mask to filter out the level bits
+ * IRQ_TYPE_SENSE_MASK - Mask for all the above bits
+ * IRQ_TYPE_PROBE - Special flag for probing in progress
+ *
+ * Bits which can be modified via irq_set/clear/modify_status_flags()
+ * IRQ_LEVEL - Interrupt is level type. Will be also
+ * updated in the code when the above trigger
+ * bits are modified via set_irq_type()
+ * IRQ_PER_CPU - Mark an interrupt PER_CPU. Will protect
+ * it from affinity setting
+ * IRQ_NOPROBE - Interrupt cannot be probed by autoprobing
+ * IRQ_NOREQUEST - Interrupt cannot be requested via
+ * request_irq()
+ * IRQ_NOAUTOEN - Interrupt is not automatically enabled in
+ * request/setup_irq()
+ * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set)
+ * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context
+ * IRQ_NESTED_TRHEAD - Interrupt nests into another thread
+ *
+ * Deprecated bits. They are kept updated as long as
+ * CONFIG_GENERIC_HARDIRQS_NO_COMPAT is not set. Will go away soon. These bits
+ * are internal state of the core code and if you really need to acces
+ * them then talk to the genirq maintainer instead of hacking
+ * something weird.
*
- * IRQ types
*/
-#define IRQ_TYPE_NONE 0x00000000 /* Default, unspecified type */
-#define IRQ_TYPE_EDGE_RISING 0x00000001 /* Edge rising type */
-#define IRQ_TYPE_EDGE_FALLING 0x00000002 /* Edge falling type */
-#define IRQ_TYPE_EDGE_BOTH (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)
-#define IRQ_TYPE_LEVEL_HIGH 0x00000004 /* Level high type */
-#define IRQ_TYPE_LEVEL_LOW 0x00000008 /* Level low type */
-#define IRQ_TYPE_SENSE_MASK 0x0000000f /* Mask of the above */
-#define IRQ_TYPE_PROBE 0x00000010 /* Probing in progress */
-
-/* Internal flags */
-#define IRQ_INPROGRESS 0x00000100 /* IRQ handler active - do not enter! */
-#define IRQ_DISABLED 0x00000200 /* IRQ disabled - do not enter! */
-#define IRQ_PENDING 0x00000400 /* IRQ pending - replay on enable */
-#define IRQ_REPLAY 0x00000800 /* IRQ has been replayed but not acked yet */
-#define IRQ_AUTODETECT 0x00001000 /* IRQ is being autodetected */
-#define IRQ_WAITING 0x00002000 /* IRQ not yet seen - for autodetection */
-#define IRQ_LEVEL 0x00004000 /* IRQ level triggered */
-#define IRQ_MASKED 0x00008000 /* IRQ masked - shouldn't be seen again */
-#define IRQ_PER_CPU 0x00010000 /* IRQ is per CPU */
-#define IRQ_NOPROBE 0x00020000 /* IRQ is not valid for probing */
-#define IRQ_NOREQUEST 0x00040000 /* IRQ cannot be requested */
-#define IRQ_NOAUTOEN 0x00080000 /* IRQ will not be enabled on request irq */
-#define IRQ_WAKEUP 0x00100000 /* IRQ triggers system wakeup */
-#define IRQ_MOVE_PENDING 0x00200000 /* need to re-target IRQ destination */
-#define IRQ_NO_BALANCING 0x00400000 /* IRQ is excluded from balancing */
-#define IRQ_SPURIOUS_DISABLED 0x00800000 /* IRQ was disabled by the spurious trap */
-#define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */
-#define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/
-#define IRQ_SUSPENDED 0x04000000 /* IRQ has gone through suspend sequence */
-#define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */
-#define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */
+enum {
+ IRQ_TYPE_NONE = 0x00000000,
+ IRQ_TYPE_EDGE_RISING = 0x00000001,
+ IRQ_TYPE_EDGE_FALLING = 0x00000002,
+ IRQ_TYPE_EDGE_BOTH = (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING),
+ IRQ_TYPE_LEVEL_HIGH = 0x00000004,
+ IRQ_TYPE_LEVEL_LOW = 0x00000008,
+ IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH),
+ IRQ_TYPE_SENSE_MASK = 0x0000000f,
+
+ IRQ_TYPE_PROBE = 0x00000010,
+
+ IRQ_LEVEL = (1 << 8),
+ IRQ_PER_CPU = (1 << 9),
+ IRQ_NOPROBE = (1 << 10),
+ IRQ_NOREQUEST = (1 << 11),
+ IRQ_NOAUTOEN = (1 << 12),
+ IRQ_NO_BALANCING = (1 << 13),
+ IRQ_MOVE_PCNTXT = (1 << 14),
+ IRQ_NESTED_THREAD = (1 << 15),
+
+#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
+ IRQ_INPROGRESS = (1 << 16),
+ IRQ_REPLAY = (1 << 17),
+ IRQ_WAITING = (1 << 18),
+ IRQ_DISABLED = (1 << 19),
+ IRQ_PENDING = (1 << 20),
+ IRQ_MASKED = (1 << 21),
+ IRQ_MOVE_PENDING = (1 << 22),
+ IRQ_AFFINITY_SET = (1 << 23),
+ IRQ_WAKEUP = (1 << 24),
+#endif
+};
#define IRQF_MODIFY_MASK \
(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
- IRQ_PER_CPU)
+ IRQ_PER_CPU | IRQ_NESTED_THREAD)
-#ifdef CONFIG_IRQ_PER_CPU
-# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
-# define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
-#else
-# define CHECK_IRQ_PER_CPU(var) 0
-# define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING
-#endif
+#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
+
+static inline __deprecated bool CHECK_IRQ_PER_CPU(unsigned int status)
+{
+ return status & IRQ_PER_CPU;
+}
+
+/*
+ * Return value for chip->irq_set_affinity()
+ *
+ * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity
+ * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity
+ */
+enum {
+ IRQ_SET_MASK_OK = 0,
+ IRQ_SET_MASK_OK_NOCOPY,
+};
struct msi_desc;
@@ -91,6 +134,8 @@ struct msi_desc;
* struct irq_data - per irq and irq chip data passed down to chip functions
* @irq: interrupt number
* @node: node index useful for balancing
+ * @state_use_accessor: status information for irq chip functions.
+ * Use accessor functions to deal with it
* @chip: low level interrupt hardware access
* @handler_data: per-IRQ data for the irq_chip methods
* @chip_data: platform-specific per-chip private data for the chip
@@ -105,6 +150,7 @@ struct msi_desc;
struct irq_data {
unsigned int irq;
unsigned int node;
+ unsigned int state_use_accessors;
struct irq_chip *chip;
void *handler_data;
void *chip_data;
@@ -114,6 +160,80 @@ struct irq_data {
#endif
};
+/*
+ * Bit masks for irq_data.state
+ *
+ * IRQD_TRIGGER_MASK - Mask for the trigger type bits
+ * IRQD_SETAFFINITY_PENDING - Affinity setting is pending
+ * IRQD_NO_BALANCING - Balancing disabled for this IRQ
+ * IRQD_PER_CPU - Interrupt is per cpu
+ * IRQD_AFFINITY_SET - Interrupt affinity was set
+ * IRQD_LEVEL - Interrupt is level triggered
+ * IRQD_WAKEUP_STATE - Interrupt is configured for wakeup
+ * from suspend
+ * IRDQ_MOVE_PCNTXT - Interrupt can be moved in process
+ * context
+ */
+enum {
+ IRQD_TRIGGER_MASK = 0xf,
+ IRQD_SETAFFINITY_PENDING = (1 << 8),
+ IRQD_NO_BALANCING = (1 << 10),
+ IRQD_PER_CPU = (1 << 11),
+ IRQD_AFFINITY_SET = (1 << 12),
+ IRQD_LEVEL = (1 << 13),
+ IRQD_WAKEUP_STATE = (1 << 14),
+ IRQD_MOVE_PCNTXT = (1 << 15),
+};
+
+static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
+{
+ return d->state_use_accessors & IRQD_SETAFFINITY_PENDING;
+}
+
+static inline bool irqd_is_per_cpu(struct irq_data *d)
+{
+ return d->state_use_accessors & IRQD_PER_CPU;
+}
+
+static inline bool irqd_can_balance(struct irq_data *d)
+{
+ return !(d->state_use_accessors & (IRQD_PER_CPU | IRQD_NO_BALANCING));
+}
+
+static inline bool irqd_affinity_was_set(struct irq_data *d)
+{
+ return d->state_use_accessors & IRQD_AFFINITY_SET;
+}
+
+static inline u32 irqd_get_trigger_type(struct irq_data *d)
+{
+ return d->state_use_accessors & IRQD_TRIGGER_MASK;
+}
+
+/*
+ * Must only be called inside irq_chip.irq_set_type() functions.
+ */
+static inline void irqd_set_trigger_type(struct irq_data *d, u32 type)
+{
+ d->state_use_accessors &= ~IRQD_TRIGGER_MASK;
+ d->state_use_accessors |= type & IRQD_TRIGGER_MASK;
+}
+
+static inline bool irqd_is_level_type(struct irq_data *d)
+{
+ return d->state_use_accessors & IRQD_LEVEL;
+}
+
+static inline bool irqd_is_wakeup_set(struct irq_data *d)
+{
+ return d->state_use_accessors & IRQD_WAKEUP_STATE;
+}
+
+static inline bool irqd_can_move_in_process_context(struct irq_data *d)
+{
+ return d->state_use_accessors & IRQD_MOVE_PCNTXT;
+}
+
/**
* struct irq_chip - hardware interrupt chip descriptor
*
@@ -150,6 +270,7 @@ struct irq_data {
* @irq_set_wake: enable/disable power-management wake-on of an IRQ
* @irq_bus_lock: function to lock access to slow bus (i2c) chips
* @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips
+ * @flags: chip specific flags
*
* @release: release function solely used by UML
*/
@@ -196,12 +317,25 @@ struct irq_chip {
void (*irq_bus_lock)(struct irq_data *data);
void (*irq_bus_sync_unlock)(struct irq_data *data);
+ unsigned long flags;
+
/* Currently used only by UML, might disappear one day.*/
#ifdef CONFIG_IRQ_RELEASE_METHOD
void (*release)(unsigned int irq, void *dev_id);
#endif
};
+/*
+ * irq_chip specific flags
+ *
+ * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type()
+ * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled
+ */
+enum {
+ IRQCHIP_SET_TYPE_MASKED = (1 << 0),
+ IRQCHIP_EOI_IF_HANDLED = (1 << 1),
+};
+
/* This include will go away once we isolated irq_desc usage to core code */
#include <linux/irqdesc.h>
@@ -218,7 +352,7 @@ struct irq_chip {
# define ARCH_IRQ_INIT_FLAGS 0
#endif
-#define IRQ_DEFAULT_INIT_FLAGS (IRQ_DISABLED | ARCH_IRQ_INIT_FLAGS)
+#define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS
struct irqaction;
extern int setup_irq(unsigned int irq, struct irqaction *new);
@@ -229,9 +363,13 @@ extern void remove_irq(unsigned int irq, struct irqaction *act);
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
void move_native_irq(int irq);
void move_masked_irq(int irq);
+void irq_move_irq(struct irq_data *data);
+void irq_move_masked_irq(struct irq_data *data);
#else
static inline void move_native_irq(int irq) { }
static inline void move_masked_irq(int irq) { }
+static inline void irq_move_irq(struct irq_data *data) { }
+static inline void irq_move_masked_irq(struct irq_data *data) { }
#endif
extern int no_irq_affinity;
@@ -267,23 +405,23 @@ extern struct irq_chip no_irq_chip;
extern struct irq_chip dummy_irq_chip;
extern void
-set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
- irq_flow_handler_t handle);
-extern void
-set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
+irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
irq_flow_handler_t handle, const char *name);
+static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip,
+ irq_flow_handler_t handle)
+{
+ irq_set_chip_and_handler_name(irq, chip, handle, NULL);
+}
+
extern void
-__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
+__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
const char *name);
-/*
- * Set a highlevel flow handler for a given IRQ:
- */
static inline void
-set_irq_handler(unsigned int irq, irq_flow_handler_t handle)
+irq_set_handler(unsigned int irq, irq_flow_handler_t handle)
{
- __set_irq_handler(irq, handle, 0, NULL);
+ __irq_set_handler(irq, handle, 0, NULL);
}
/*
@@ -292,14 +430,11 @@ set_irq_handler(unsigned int irq, irq_flow_handler_t handle)
* IRQ_NOREQUEST and IRQ_NOPROBE)
*/
static inline void
-set_irq_chained_handler(unsigned int irq,
- irq_flow_handler_t handle)
+irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle)
{
- __set_irq_handler(irq, handle, 1, NULL);
+ __irq_set_handler(irq, handle, 1, NULL);
}
-extern void set_irq_nested_thread(unsigned int irq, int nest);
-
void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set);
static inline void irq_set_status_flags(unsigned int irq, unsigned long set)
@@ -312,16 +447,24 @@ static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr)
irq_modify_status(irq, clr, 0);
}
-static inline void set_irq_noprobe(unsigned int irq)
+static inline void irq_set_noprobe(unsigned int irq)
{
irq_modify_status(irq, 0, IRQ_NOPROBE);
}
-static inline void set_irq_probe(unsigned int irq)
+static inline void irq_set_probe(unsigned int irq)
{
irq_modify_status(irq, IRQ_NOPROBE, 0);
}
+static inline void irq_set_nested_thread(unsigned int irq, bool nest)
+{
+ if (nest)
+ irq_set_status_flags(irq, IRQ_NESTED_THREAD);
+ else
+ irq_clear_status_flags(irq, IRQ_NESTED_THREAD);
+}
+
/* Handle dynamic irq creation and destruction */
extern unsigned int create_irq_nr(unsigned int irq_want, int node);
extern int create_irq(void);
@@ -338,14 +481,14 @@ static inline void dynamic_irq_init(unsigned int irq)
}
/* Set/get chip/data for an IRQ: */
-extern int set_irq_chip(unsigned int irq, struct irq_chip *chip);
-extern int set_irq_data(unsigned int irq, void *data);
-extern int set_irq_chip_data(unsigned int irq, void *data);
-extern int set_irq_type(unsigned int irq, unsigned int type);
-extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
+extern int irq_set_chip(unsigned int irq, struct irq_chip *chip);
+extern int irq_set_handler_data(unsigned int irq, void *data);
+extern int irq_set_chip_data(unsigned int irq, void *data);
+extern int irq_set_irq_type(unsigned int irq, unsigned int type);
+extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry);
extern struct irq_data *irq_get_irq_data(unsigned int irq);
-static inline struct irq_chip *get_irq_chip(unsigned int irq)
+static inline struct irq_chip *irq_get_chip(unsigned int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
return d ? d->chip : NULL;
@@ -356,7 +499,7 @@ static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d)
return d->chip;
}
-static inline void *get_irq_chip_data(unsigned int irq)
+static inline void *irq_get_chip_data(unsigned int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
return d ? d->chip_data : NULL;
@@ -367,18 +510,18 @@ static inline void *irq_data_get_irq_chip_data(struct irq_data *d)
return d->chip_data;
}
-static inline void *get_irq_data(unsigned int irq)
+static inline void *irq_get_handler_data(unsigned int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
return d ? d->handler_data : NULL;
}
-static inline void *irq_data_get_irq_data(struct irq_data *d)
+static inline void *irq_data_get_irq_handler_data(struct irq_data *d)
{
return d->handler_data;
}
-static inline struct msi_desc *get_irq_msi(unsigned int irq)
+static inline struct msi_desc *irq_get_msi_desc(unsigned int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
return d ? d->msi_desc : NULL;
@@ -389,6 +532,89 @@ static inline struct msi_desc *irq_data_get_msi(struct irq_data *d)
return d->msi_desc;
}
+#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
+/* Please do not use: Use the replacement functions instead */
+static inline int set_irq_chip(unsigned int irq, struct irq_chip *chip)
+{
+ return irq_set_chip(irq, chip);
+}
+static inline int set_irq_data(unsigned int irq, void *data)
+{
+ return irq_set_handler_data(irq, data);
+}
+static inline int set_irq_chip_data(unsigned int irq, void *data)
+{
+ return irq_set_chip_data(irq, data);
+}
+static inline int set_irq_type(unsigned int irq, unsigned int type)
+{
+ return irq_set_irq_type(irq, type);
+}
+static inline int set_irq_msi(unsigned int irq, struct msi_desc *entry)
+{
+ return irq_set_msi_desc(irq, entry);
+}
+static inline struct irq_chip *get_irq_chip(unsigned int irq)
+{
+ return irq_get_chip(irq);
+}
+static inline void *get_irq_chip_data(unsigned int irq)
+{
+ return irq_get_chip_data(irq);
+}
+static inline void *get_irq_data(unsigned int irq)
+{
+ return irq_get_handler_data(irq);
+}
+static inline void *irq_data_get_irq_data(struct irq_data *d)
+{
+ return irq_data_get_irq_handler_data(d);
+}
+static inline struct msi_desc *get_irq_msi(unsigned int irq)
+{
+ return irq_get_msi_desc(irq);
+}
+static inline void set_irq_noprobe(unsigned int irq)
+{
+ irq_set_noprobe(irq);
+}
+static inline void set_irq_probe(unsigned int irq)
+{
+ irq_set_probe(irq);
+}
+static inline void set_irq_nested_thread(unsigned int irq, int nest)
+{
+ irq_set_nested_thread(irq, nest);
+}
+static inline void
+set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
+ irq_flow_handler_t handle, const char *name)
+{
+ irq_set_chip_and_handler_name(irq, chip, handle, name);
+}
+static inline void
+set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
+ irq_flow_handler_t handle)
+{
+ irq_set_chip_and_handler(irq, chip, handle);
+}
+static inline void
+__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
+ const char *name)
+{
+ __irq_set_handler(irq, handle, is_chained, name);
+}
+static inline void set_irq_handler(unsigned int irq, irq_flow_handler_t handle)
+{
+ irq_set_handler(irq, handle);
+}
+static inline void
+set_irq_chained_handler(unsigned int irq, irq_flow_handler_t handle)
+{
+ irq_set_chained_handler(irq, handle);
+}
+#endif
+
int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node);
void irq_free_descs(unsigned int irq, unsigned int cnt);
int irq_reserve_irqs(unsigned int from, unsigned int cnt);
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index c1a95b7b58de..9eb9cd313052 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -8,6 +8,7 @@
* For now it's included from <linux/irq.h>
*/
+struct irq_affinity_notify;
struct proc_dir_entry;
struct timer_rand_state;
/**
@@ -18,13 +19,16 @@ struct timer_rand_state;
* @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()]
* @action: the irq action chain
* @status: status information
+ * @core_internal_state__do_not_mess_with_it: core internal status information
* @depth: disable-depth, for nested irq_disable() calls
* @wake_depth: enable depth, for multiple set_irq_wake() callers
* @irq_count: stats field to detect stalled irqs
* @last_unhandled: aging timer for unhandled count
* @irqs_unhandled: stats field for spurious unhandled interrupts
* @lock: locking for SMP
+ * @affinity_notify: context for notification of affinity changes
* @pending_mask: pending rebalanced interrupts
+ * @threads_oneshot: bitfield to handle shared oneshot threads
* @threads_active: number of irqaction threads currently running
* @wait_for_threads: wait queue for sync_irq to wait for threaded handlers
* @dir: /proc/irq/ procfs entry
@@ -45,6 +49,7 @@ struct irq_desc {
struct {
unsigned int irq;
unsigned int node;
+ unsigned int pad_do_not_even_think_about_it;
struct irq_chip *chip;
void *handler_data;
void *chip_data;
@@ -59,9 +64,16 @@ struct irq_desc {
struct timer_rand_state *timer_rand_state;
unsigned int __percpu *kstat_irqs;
irq_flow_handler_t handle_irq;
+#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
+ irq_preflow_handler_t preflow_handler;
+#endif
struct irqaction *action; /* IRQ action list */
+#ifdef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
+ unsigned int status_use_accessors;
+#else
unsigned int status; /* IRQ status */
-
+#endif
+ unsigned int core_internal_state__do_not_mess_with_it;
unsigned int depth; /* nested irq disables */
unsigned int wake_depth; /* nested wake enables */
unsigned int irq_count; /* For detecting broken IRQs */
@@ -70,10 +82,12 @@ struct irq_desc {
raw_spinlock_t lock;
#ifdef CONFIG_SMP
const struct cpumask *affinity_hint;
+ struct irq_affinity_notify *affinity_notify;
#ifdef CONFIG_GENERIC_PENDING_IRQ
cpumask_var_t pending_mask;
#endif
#endif
+ unsigned long threads_oneshot;
atomic_t threads_active;
wait_queue_head_t wait_for_threads;
#ifdef CONFIG_PROC_FS
@@ -95,10 +109,46 @@ static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
#ifdef CONFIG_GENERIC_HARDIRQS
-#define get_irq_desc_chip(desc) ((desc)->irq_data.chip)
-#define get_irq_desc_chip_data(desc) ((desc)->irq_data.chip_data)
-#define get_irq_desc_data(desc) ((desc)->irq_data.handler_data)
-#define get_irq_desc_msi(desc) ((desc)->irq_data.msi_desc)
+static inline struct irq_chip *irq_desc_get_chip(struct irq_desc *desc)
+{
+ return desc->irq_data.chip;
+}
+
+static inline void *irq_desc_get_chip_data(struct irq_desc *desc)
+{
+ return desc->irq_data.chip_data;
+}
+
+static inline void *irq_desc_get_handler_data(struct irq_desc *desc)
+{
+ return desc->irq_data.handler_data;
+}
+
+static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc)
+{
+ return desc->irq_data.msi_desc;
+}
+
+#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
+static inline struct irq_chip *get_irq_desc_chip(struct irq_desc *desc)
+{
+ return irq_desc_get_chip(desc);
+}
+static inline void *get_irq_desc_data(struct irq_desc *desc)
+{
+ return irq_desc_get_handler_data(desc);
+}
+
+static inline void *get_irq_desc_chip_data(struct irq_desc *desc)
+{
+ return irq_desc_get_chip_data(desc);
+}
+
+static inline struct msi_desc *get_irq_desc_msi(struct irq_desc *desc)
+{
+ return irq_desc_get_msi_desc(desc);
+}
+#endif
/*
* Architectures call this to let the generic IRQ layer
@@ -123,6 +173,7 @@ static inline int irq_has_action(unsigned int irq)
return desc->action != NULL;
}
+#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
static inline int irq_balancing_disabled(unsigned int irq)
{
struct irq_desc *desc;
@@ -130,6 +181,7 @@ static inline int irq_balancing_disabled(unsigned int irq)
desc = irq_to_desc(irq);
return desc->status & IRQ_NO_BALANCING_MASK;
}
+#endif
/* caller has locked the irq_desc and both params are valid */
static inline void __set_irq_handler_unlocked(int irq,
@@ -140,6 +192,17 @@ static inline void __set_irq_handler_unlocked(int irq,
desc = irq_to_desc(irq);
desc->handle_irq = handler;
}
+
+#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
+static inline void
+__irq_set_preflow_handler(unsigned int irq, irq_preflow_handler_t handler)
+{
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+ desc->preflow_handler = handler;
+}
+#endif
#endif
#endif
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 6811f4bfc6e7..922aa313c9f9 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -307,6 +307,7 @@ extern clock_t jiffies_to_clock_t(long x);
extern unsigned long clock_t_to_jiffies(unsigned long x);
extern u64 jiffies_64_to_clock_t(u64 x);
extern u64 nsec_to_clock_t(u64 x);
+extern u64 nsecs_to_jiffies64(u64 n);
extern unsigned long nsecs_to_jiffies(u64 n);
#define TIMESTAMP_SIZE 30
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index ce0775aa64c3..7ff16f7d3ed4 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -64,7 +64,7 @@ struct kthread_work {
};
#define KTHREAD_WORKER_INIT(worker) { \
- .lock = SPIN_LOCK_UNLOCKED, \
+ .lock = __SPIN_LOCK_UNLOCKED((worker).lock), \
.work_list = LIST_HEAD_INIT((worker).work_list), \
}
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index b5021db21858..ab428552af8e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -43,6 +43,7 @@
#define KVM_REQ_DEACTIVATE_FPU 10
#define KVM_REQ_EVENT 11
#define KVM_REQ_APF_HALT 12
+#define KVM_REQ_NMI 13
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
@@ -98,23 +99,31 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
#endif
+enum {
+ OUTSIDE_GUEST_MODE,
+ IN_GUEST_MODE,
+ EXITING_GUEST_MODE
+};
+
struct kvm_vcpu {
struct kvm *kvm;
#ifdef CONFIG_PREEMPT_NOTIFIERS
struct preempt_notifier preempt_notifier;
#endif
+ int cpu;
int vcpu_id;
- struct mutex mutex;
- int cpu;
- atomic_t guest_mode;
- struct kvm_run *run;
+ int srcu_idx;
+ int mode;
unsigned long requests;
unsigned long guest_debug;
- int srcu_idx;
+
+ struct mutex mutex;
+ struct kvm_run *run;
int fpu_active;
int guest_fpu_loaded, guest_xcr0_loaded;
wait_queue_head_t wq;
+ struct pid *pid;
int sigset_active;
sigset_t sigset;
struct kvm_vcpu_stat stat;
@@ -140,6 +149,11 @@ struct kvm_vcpu {
struct kvm_vcpu_arch arch;
};
+static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
+{
+ return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
+}
+
/*
* Some of the bitops functions do not support too long bitmaps.
* This number must be determined not to exceed such limits.
@@ -212,7 +226,6 @@ struct kvm_memslots {
struct kvm {
spinlock_t mmu_lock;
- raw_spinlock_t requests_lock;
struct mutex slots_lock;
struct mm_struct *mm; /* userspace tied to this vm */
struct kvm_memslots *memslots;
@@ -223,6 +236,7 @@ struct kvm {
#endif
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
atomic_t online_vcpus;
+ int last_boosted_vcpu;
struct list_head vm_list;
struct mutex lock;
struct kvm_io_bus *buses[KVM_NR_BUSES];
@@ -719,11 +733,6 @@ static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
set_bit(req, &vcpu->requests);
}
-static inline bool kvm_make_check_request(int req, struct kvm_vcpu *vcpu)
-{
- return test_and_set_bit(req, &vcpu->requests);
-}
-
static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
{
if (test_bit(req, &vcpu->requests)) {
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 0f19df9e37b0..ffd5c3d91193 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -145,6 +145,9 @@ extern void led_trigger_register_simple(const char *name,
extern void led_trigger_unregister_simple(struct led_trigger *trigger);
extern void led_trigger_event(struct led_trigger *trigger,
enum led_brightness event);
+extern void led_trigger_blink(struct led_trigger *trigger,
+ unsigned long *delay_on,
+ unsigned long *delay_off);
#else
diff --git a/include/linux/libata.h b/include/linux/libata.h
index c9c5d7ad1a2b..71333aa39532 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -179,10 +179,6 @@ enum {
ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
/* (doesn't imply presence) */
ATA_FLAG_SATA = (1 << 1),
- ATA_FLAG_NO_LEGACY = (1 << 2), /* no legacy mode check */
- ATA_FLAG_MMIO = (1 << 3), /* use MMIO, not PIO */
- ATA_FLAG_SRST = (1 << 4), /* (obsolete) use ATA SRST, not E.D.D. */
- ATA_FLAG_SATA_RESET = (1 << 5), /* (obsolete) use COMRESET */
ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */
ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */
ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */
@@ -198,7 +194,6 @@ enum {
ATA_FLAG_ACPI_SATA = (1 << 17), /* need native SATA ACPI layout */
ATA_FLAG_AN = (1 << 18), /* controller supports AN */
ATA_FLAG_PMP = (1 << 19), /* controller supports PMP */
- ATA_FLAG_LPM = (1 << 20), /* driver can handle LPM */
ATA_FLAG_EM = (1 << 21), /* driver supports enclosure
* management */
ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity
@@ -1050,6 +1045,8 @@ extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
int queue_depth, int reason);
extern struct ata_device *ata_dev_pair(struct ata_device *adev);
extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
+extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
+extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_q);
extern int ata_cable_40wire(struct ata_port *ap);
extern int ata_cable_80wire(struct ata_port *ap);
diff --git a/include/linux/llist.h b/include/linux/llist.h
new file mode 100644
index 000000000000..f875604e67f9
--- /dev/null
+++ b/include/linux/llist.h
@@ -0,0 +1,98 @@
+#ifndef LLIST_H
+#define LLIST_H
+/*
+ * Lock-less NULL terminated single linked list
+ *
+ * If there are multiple producers and multiple consumers, llist_add
+ * can be used in producers and llist_del_all can be used in
+ * consumers. They can work simultaneously without lock. But
+ * llist_del_first can not be used here. Because llist_del_first
+ * depends on list->first->next does not changed if list->first is not
+ * changed during its operation, but llist_del_first, llist_add,
+ * llist_add sequence in another consumer may violate that.
+ *
+ * If there are multiple producers and one consumer, llist_add can be
+ * used in producers and llist_del_all or llist_del_first can be used
+ * in the consumer.
+ *
+ * The list entries deleted via llist_del_all can be traversed with
+ * traversing function such as llist_for_each etc. But the list
+ * entries can not be traversed safely before deleted from the list.
+ *
+ * The basic atomic operation of this list is cmpxchg on long. On
+ * architectures that don't have NMI-safe cmpxchg implementation, the
+ * list can NOT be used in NMI handler. So code uses the list in NMI
+ * handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
+ */
+
+struct llist_head {
+ struct llist_node *first;
+};
+
+struct llist_node {
+ struct llist_node *next;
+};
+
+#define LLIST_HEAD_INIT(name) { NULL }
+#define LLIST_HEAD(name) struct llist_head name = LLIST_HEAD_INIT(name)
+
+/**
+ * init_llist_head - initialize lock-less list head
+ * @head: the head for your lock-less list
+ */
+static inline void init_llist_head(struct llist_head *list)
+{
+ list->first = NULL;
+}
+
+/**
+ * llist_entry - get the struct of this entry
+ * @ptr: the &struct llist_node pointer.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the llist_node within the struct.
+ */
+#define llist_entry(ptr, type, member) \
+ container_of(ptr, type, member)
+
+/**
+ * llist_for_each - iterate over some deleted entries of a lock-less list
+ * @pos: the &struct llist_node to use as a loop cursor
+ * @node: the first entry of deleted list entries
+ *
+ * In general, some entries of the lock-less list can be traversed
+ * safely only after being deleted from list, so start with an entry
+ * instead of list head.
+ */
+#define llist_for_each(pos, node) \
+ for (pos = (node); pos; pos = pos->next)
+
+/**
+ * llist_for_each_entry - iterate over some deleted entries of lock-less list of given type
+ * @pos: the type * to use as a loop cursor.
+ * @node: the fist entry of deleted list entries.
+ * @member: the name of the llist_node with the struct.
+ *
+ * In general, some entries of the lock-less list can be traversed
+ * safely only after being removed from list, so start with an entry
+ * instead of list head.
+ */
+#define llist_for_each_entry(pos, node, member) \
+ for (pos = llist_entry((node), typeof(*pos), member); \
+ &pos->member != NULL; \
+ pos = llist_entry(pos->member.next, typeof(*pos), member))
+
+/**
+ * llist_empty - tests whether a lock-less list is empty
+ * @head: the list to test
+ */
+static inline int llist_empty(const struct llist_head *head)
+{
+ return head->first == NULL;
+}
+
+void llist_add(struct llist_node *new, struct llist_head *head);
+void llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
+ struct llist_head *head);
+struct llist_node *llist_del_first(struct llist_head *head);
+struct llist_node *llist_del_all(struct llist_head *head);
+#endif /* LLIST_H */
diff --git a/include/linux/magic.h b/include/linux/magic.h
index 62730ea2b56e..6cfe344f9559 100644
--- a/include/linux/magic.h
+++ b/include/linux/magic.h
@@ -27,6 +27,7 @@
#define ISOFS_SUPER_MAGIC 0x9660
#define JFFS2_SUPER_MAGIC 0x72b6
#define ANON_INODE_FS_MAGIC 0x09041934
+#define PSTOREFS_MAGIC 0x6165676C
#define MINIX_SUPER_MAGIC 0x137F /* original minix fs */
#define MINIX_SUPER_MAGIC2 0x138F /* minix fs, 30 char names */
diff --git a/include/linux/mfd/ab8500-gpadc.h b/include/linux/mfd/ab8500-gpadc.h
new file mode 100644
index 000000000000..9f6cc26bc734
--- /dev/null
+++ b/include/linux/mfd/ab8500-gpadc.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2010 ST-Ericsson SA
+ * Licensed under GPLv2.
+ *
+ * Author: Arun R Murthy <arun.murthy@stericsson.com>
+ */
+
+#ifndef _AB8500_GPADC_H
+#define _AB8500_GPADC_H
+
+/* GPADC source: From datasheet(ADCSwSel[4:0] in GPADCCtrl2) */
+#define BAT_CTRL 0x01
+#define BTEMP_BALL 0x02
+#define MAIN_CHARGER_V 0x03
+#define ACC_DETECT1 0x04
+#define ACC_DETECT2 0x05
+#define ADC_AUX1 0x06
+#define ADC_AUX2 0x07
+#define MAIN_BAT_V 0x08
+#define VBUS_V 0x09
+#define MAIN_CHARGER_C 0x0A
+#define USB_CHARGER_C 0x0B
+#define BK_BAT_V 0x0C
+#define DIE_TEMP 0x0D
+
+int ab8500_gpadc_convert(u8 input);
+
+#endif /* _AB8500_GPADC_H */
diff --git a/include/linux/mfd/ab8500.h b/include/linux/mfd/ab8500.h
index 37f56b7c4c15..56f8dea72152 100644
--- a/include/linux/mfd/ab8500.h
+++ b/include/linux/mfd/ab8500.h
@@ -111,8 +111,8 @@
* @dev: parent device
* @lock: read/write operations lock
* @irq_lock: genirq bus lock
- * @revision: chip revision
* @irq: irq line
+ * @chip_id: chip revision id
* @write: register write
* @read: register read
* @rx_buf: rx buf for SPI
@@ -124,7 +124,7 @@ struct ab8500 {
struct device *dev;
struct mutex lock;
struct mutex irq_lock;
- int revision;
+
int irq_base;
int irq;
u8 chip_id;
diff --git a/include/linux/mfd/ab8500/sysctrl.h b/include/linux/mfd/ab8500/sysctrl.h
new file mode 100644
index 000000000000..10da0291f8f8
--- /dev/null
+++ b/include/linux/mfd/ab8500/sysctrl.h
@@ -0,0 +1,254 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com> for ST Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef __AB8500_SYSCTRL_H
+#define __AB8500_SYSCTRL_H
+
+#include <linux/bitops.h>
+
+#ifdef CONFIG_AB8500_CORE
+
+int ab8500_sysctrl_read(u16 reg, u8 *value);
+int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value);
+
+#else
+
+static inline int ab8500_sysctrl_read(u16 reg, u8 *value)
+{
+ return 0;
+}
+
+static inline int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value)
+{
+ return 0;
+}
+
+#endif /* CONFIG_AB8500_CORE */
+
+static inline int ab8500_sysctrl_set(u16 reg, u8 bits)
+{
+ return ab8500_sysctrl_write(reg, bits, bits);
+}
+
+static inline int ab8500_sysctrl_clear(u16 reg, u8 bits)
+{
+ return ab8500_sysctrl_write(reg, bits, 0);
+}
+
+/* Registers */
+#define AB8500_TURNONSTATUS 0x100
+#define AB8500_RESETSTATUS 0x101
+#define AB8500_PONKEY1PRESSSTATUS 0x102
+#define AB8500_SYSCLKREQSTATUS 0x142
+#define AB8500_STW4500CTRL1 0x180
+#define AB8500_STW4500CTRL2 0x181
+#define AB8500_STW4500CTRL3 0x200
+#define AB8500_MAINWDOGCTRL 0x201
+#define AB8500_MAINWDOGTIMER 0x202
+#define AB8500_LOWBAT 0x203
+#define AB8500_BATTOK 0x204
+#define AB8500_SYSCLKTIMER 0x205
+#define AB8500_SMPSCLKCTRL 0x206
+#define AB8500_SMPSCLKSEL1 0x207
+#define AB8500_SMPSCLKSEL2 0x208
+#define AB8500_SMPSCLKSEL3 0x209
+#define AB8500_SYSULPCLKCONF 0x20A
+#define AB8500_SYSULPCLKCTRL1 0x20B
+#define AB8500_SYSCLKCTRL 0x20C
+#define AB8500_SYSCLKREQ1VALID 0x20D
+#define AB8500_SYSTEMCTRLSUP 0x20F
+#define AB8500_SYSCLKREQ1RFCLKBUF 0x210
+#define AB8500_SYSCLKREQ2RFCLKBUF 0x211
+#define AB8500_SYSCLKREQ3RFCLKBUF 0x212
+#define AB8500_SYSCLKREQ4RFCLKBUF 0x213
+#define AB8500_SYSCLKREQ5RFCLKBUF 0x214
+#define AB8500_SYSCLKREQ6RFCLKBUF 0x215
+#define AB8500_SYSCLKREQ7RFCLKBUF 0x216
+#define AB8500_SYSCLKREQ8RFCLKBUF 0x217
+#define AB8500_DITHERCLKCTRL 0x220
+#define AB8500_SWATCTRL 0x230
+#define AB8500_HIQCLKCTRL 0x232
+#define AB8500_VSIMSYSCLKCTRL 0x233
+
+/* Bits */
+#define AB8500_TURNONSTATUS_PORNVBAT BIT(0)
+#define AB8500_TURNONSTATUS_PONKEY1DBF BIT(1)
+#define AB8500_TURNONSTATUS_PONKEY2DBF BIT(2)
+#define AB8500_TURNONSTATUS_RTCALARM BIT(3)
+#define AB8500_TURNONSTATUS_MAINCHDET BIT(4)
+#define AB8500_TURNONSTATUS_VBUSDET BIT(5)
+#define AB8500_TURNONSTATUS_USBIDDETECT BIT(6)
+
+#define AB8500_RESETSTATUS_RESETN4500NSTATUS BIT(0)
+#define AB8500_RESETSTATUS_SWRESETN4500NSTATUS BIT(2)
+
+#define AB8500_PONKEY1PRESSSTATUS_PONKEY1PRESSTIME_MASK 0x7F
+#define AB8500_PONKEY1PRESSSTATUS_PONKEY1PRESSTIME_SHIFT 0
+
+#define AB8500_SYSCLKREQSTATUS_SYSCLKREQ1STATUS BIT(0)
+#define AB8500_SYSCLKREQSTATUS_SYSCLKREQ2STATUS BIT(1)
+#define AB8500_SYSCLKREQSTATUS_SYSCLKREQ3STATUS BIT(2)
+#define AB8500_SYSCLKREQSTATUS_SYSCLKREQ4STATUS BIT(3)
+#define AB8500_SYSCLKREQSTATUS_SYSCLKREQ5STATUS BIT(4)
+#define AB8500_SYSCLKREQSTATUS_SYSCLKREQ6STATUS BIT(5)
+#define AB8500_SYSCLKREQSTATUS_SYSCLKREQ7STATUS BIT(6)
+#define AB8500_SYSCLKREQSTATUS_SYSCLKREQ8STATUS BIT(7)
+
+#define AB8500_STW4500CTRL1_SWOFF BIT(0)
+#define AB8500_STW4500CTRL1_SWRESET4500N BIT(1)
+#define AB8500_STW4500CTRL1_THDB8500SWOFF BIT(2)
+
+#define AB8500_STW4500CTRL2_RESETNVAUX1VALID BIT(0)
+#define AB8500_STW4500CTRL2_RESETNVAUX2VALID BIT(1)
+#define AB8500_STW4500CTRL2_RESETNVAUX3VALID BIT(2)
+#define AB8500_STW4500CTRL2_RESETNVMODVALID BIT(3)
+#define AB8500_STW4500CTRL2_RESETNVEXTSUPPLY1VALID BIT(4)
+#define AB8500_STW4500CTRL2_RESETNVEXTSUPPLY2VALID BIT(5)
+#define AB8500_STW4500CTRL2_RESETNVEXTSUPPLY3VALID BIT(6)
+#define AB8500_STW4500CTRL2_RESETNVSMPS1VALID BIT(7)
+
+#define AB8500_STW4500CTRL3_CLK32KOUT2DIS BIT(0)
+#define AB8500_STW4500CTRL3_RESETAUDN BIT(1)
+#define AB8500_STW4500CTRL3_RESETDENCN BIT(2)
+#define AB8500_STW4500CTRL3_THSDENA BIT(3)
+
+#define AB8500_MAINWDOGCTRL_MAINWDOGENA BIT(0)
+#define AB8500_MAINWDOGCTRL_MAINWDOGKICK BIT(1)
+#define AB8500_MAINWDOGCTRL_WDEXPTURNONVALID BIT(4)
+
+#define AB8500_MAINWDOGTIMER_MAINWDOGTIMER_MASK 0x7F
+#define AB8500_MAINWDOGTIMER_MAINWDOGTIMER_SHIFT 0
+
+#define AB8500_LOWBAT_LOWBATENA BIT(0)
+#define AB8500_LOWBAT_LOWBAT_MASK 0x7E
+#define AB8500_LOWBAT_LOWBAT_SHIFT 1
+
+#define AB8500_BATTOK_BATTOKSEL0THF_MASK 0x0F
+#define AB8500_BATTOK_BATTOKSEL0THF_SHIFT 0
+#define AB8500_BATTOK_BATTOKSEL1THF_MASK 0xF0
+#define AB8500_BATTOK_BATTOKSEL1THF_SHIFT 4
+
+#define AB8500_SYSCLKTIMER_SYSCLKTIMER_MASK 0x0F
+#define AB8500_SYSCLKTIMER_SYSCLKTIMER_SHIFT 0
+#define AB8500_SYSCLKTIMER_SYSCLKTIMERADJ_MASK 0xF0
+#define AB8500_SYSCLKTIMER_SYSCLKTIMERADJ_SHIFT 4
+
+#define AB8500_SMPSCLKCTRL_SMPSCLKINTSEL_MASK 0x03
+#define AB8500_SMPSCLKCTRL_SMPSCLKINTSEL_SHIFT 0
+#define AB8500_SMPSCLKCTRL_3M2CLKINTENA BIT(2)
+
+#define AB8500_SMPSCLKSEL1_VARMCLKSEL_MASK 0x07
+#define AB8500_SMPSCLKSEL1_VARMCLKSEL_SHIFT 0
+#define AB8500_SMPSCLKSEL1_VAPECLKSEL_MASK 0x38
+#define AB8500_SMPSCLKSEL1_VAPECLKSEL_SHIFT 3
+
+#define AB8500_SMPSCLKSEL2_VMODCLKSEL_MASK 0x07
+#define AB8500_SMPSCLKSEL2_VMODCLKSEL_SHIFT 0
+#define AB8500_SMPSCLKSEL2_VSMPS1CLKSEL_MASK 0x38
+#define AB8500_SMPSCLKSEL2_VSMPS1CLKSEL_SHIFT 3
+
+#define AB8500_SMPSCLKSEL3_VSMPS2CLKSEL_MASK 0x07
+#define AB8500_SMPSCLKSEL3_VSMPS2CLKSEL_SHIFT 0
+#define AB8500_SMPSCLKSEL3_VSMPS3CLKSEL_MASK 0x38
+#define AB8500_SMPSCLKSEL3_VSMPS3CLKSEL_SHIFT 3
+
+#define AB8500_SYSULPCLKCONF_ULPCLKCONF_MASK 0x03
+#define AB8500_SYSULPCLKCONF_ULPCLKCONF_SHIFT 0
+#define AB8500_SYSULPCLKCONF_CLK27MHZSTRE BIT(2)
+#define AB8500_SYSULPCLKCONF_TVOUTCLKDELN BIT(3)
+#define AB8500_SYSULPCLKCONF_TVOUTCLKINV BIT(4)
+#define AB8500_SYSULPCLKCONF_ULPCLKSTRE BIT(5)
+#define AB8500_SYSULPCLKCONF_CLK27MHZBUFENA BIT(6)
+#define AB8500_SYSULPCLKCONF_CLK27MHZPDENA BIT(7)
+
+#define AB8500_SYSULPCLKCTRL1_SYSULPCLKINTSEL_MASK 0x03
+#define AB8500_SYSULPCLKCTRL1_SYSULPCLKINTSEL_SHIFT 0
+#define AB8500_SYSULPCLKCTRL1_ULPCLKREQ BIT(2)
+#define AB8500_SYSULPCLKCTRL1_4500SYSCLKREQ BIT(3)
+#define AB8500_SYSULPCLKCTRL1_AUDIOCLKENA BIT(4)
+#define AB8500_SYSULPCLKCTRL1_SYSCLKBUF2REQ BIT(5)
+#define AB8500_SYSULPCLKCTRL1_SYSCLKBUF3REQ BIT(6)
+#define AB8500_SYSULPCLKCTRL1_SYSCLKBUF4REQ BIT(7)
+
+#define AB8500_SYSCLKCTRL_TVOUTPLLENA BIT(0)
+#define AB8500_SYSCLKCTRL_TVOUTCLKENA BIT(1)
+#define AB8500_SYSCLKCTRL_USBCLKENA BIT(2)
+
+#define AB8500_SYSCLKREQ1VALID_SYSCLKREQ1VALID BIT(0)
+#define AB8500_SYSCLKREQ1VALID_ULPCLKREQ1VALID BIT(1)
+#define AB8500_SYSCLKREQ1VALID_USBSYSCLKREQ1VALID BIT(2)
+
+#define AB8500_SYSTEMCTRLSUP_EXTSUP12LPNCLKSEL_MASK 0x03
+#define AB8500_SYSTEMCTRLSUP_EXTSUP12LPNCLKSEL_SHIFT 0
+#define AB8500_SYSTEMCTRLSUP_EXTSUP3LPNCLKSEL_MASK 0x0C
+#define AB8500_SYSTEMCTRLSUP_EXTSUP3LPNCLKSEL_SHIFT 2
+#define AB8500_SYSTEMCTRLSUP_INTDB8500NOD BIT(4)
+
+#define AB8500_SYSCLKREQ1RFCLKBUF_SYSCLKREQ1RFCLKBUF2 BIT(2)
+#define AB8500_SYSCLKREQ1RFCLKBUF_SYSCLKREQ1RFCLKBUF3 BIT(3)
+#define AB8500_SYSCLKREQ1RFCLKBUF_SYSCLKREQ1RFCLKBUF4 BIT(4)
+
+#define AB8500_SYSCLKREQ2RFCLKBUF_SYSCLKREQ2RFCLKBUF2 BIT(2)
+#define AB8500_SYSCLKREQ2RFCLKBUF_SYSCLKREQ2RFCLKBUF3 BIT(3)
+#define AB8500_SYSCLKREQ2RFCLKBUF_SYSCLKREQ2RFCLKBUF4 BIT(4)
+
+#define AB8500_SYSCLKREQ3RFCLKBUF_SYSCLKREQ3RFCLKBUF2 BIT(2)
+#define AB8500_SYSCLKREQ3RFCLKBUF_SYSCLKREQ3RFCLKBUF3 BIT(3)
+#define AB8500_SYSCLKREQ3RFCLKBUF_SYSCLKREQ3RFCLKBUF4 BIT(4)
+
+#define AB8500_SYSCLKREQ4RFCLKBUF_SYSCLKREQ4RFCLKBUF2 BIT(2)
+#define AB8500_SYSCLKREQ4RFCLKBUF_SYSCLKREQ4RFCLKBUF3 BIT(3)
+#define AB8500_SYSCLKREQ4RFCLKBUF_SYSCLKREQ4RFCLKBUF4 BIT(4)
+
+#define AB8500_SYSCLKREQ5RFCLKBUF_SYSCLKREQ5RFCLKBUF2 BIT(2)
+#define AB8500_SYSCLKREQ5RFCLKBUF_SYSCLKREQ5RFCLKBUF3 BIT(3)
+#define AB8500_SYSCLKREQ5RFCLKBUF_SYSCLKREQ5RFCLKBUF4 BIT(4)
+
+#define AB8500_SYSCLKREQ6RFCLKBUF_SYSCLKREQ6RFCLKBUF2 BIT(2)
+#define AB8500_SYSCLKREQ6RFCLKBUF_SYSCLKREQ6RFCLKBUF3 BIT(3)
+#define AB8500_SYSCLKREQ6RFCLKBUF_SYSCLKREQ6RFCLKBUF4 BIT(4)
+
+#define AB8500_SYSCLKREQ7RFCLKBUF_SYSCLKREQ7RFCLKBUF2 BIT(2)
+#define AB8500_SYSCLKREQ7RFCLKBUF_SYSCLKREQ7RFCLKBUF3 BIT(3)
+#define AB8500_SYSCLKREQ7RFCLKBUF_SYSCLKREQ7RFCLKBUF4 BIT(4)
+
+#define AB8500_SYSCLKREQ8RFCLKBUF_SYSCLKREQ8RFCLKBUF2 BIT(2)
+#define AB8500_SYSCLKREQ8RFCLKBUF_SYSCLKREQ8RFCLKBUF3 BIT(3)
+#define AB8500_SYSCLKREQ8RFCLKBUF_SYSCLKREQ8RFCLKBUF4 BIT(4)
+
+#define AB8500_DITHERCLKCTRL_VARMDITHERENA BIT(0)
+#define AB8500_DITHERCLKCTRL_VSMPS3DITHERENA BIT(1)
+#define AB8500_DITHERCLKCTRL_VSMPS1DITHERENA BIT(2)
+#define AB8500_DITHERCLKCTRL_VSMPS2DITHERENA BIT(3)
+#define AB8500_DITHERCLKCTRL_VMODDITHERENA BIT(4)
+#define AB8500_DITHERCLKCTRL_VAPEDITHERENA BIT(5)
+#define AB8500_DITHERCLKCTRL_DITHERDEL_MASK 0xC0
+#define AB8500_DITHERCLKCTRL_DITHERDEL_SHIFT 6
+
+#define AB8500_SWATCTRL_UPDATERF BIT(0)
+#define AB8500_SWATCTRL_SWATENABLE BIT(1)
+#define AB8500_SWATCTRL_RFOFFTIMER_MASK 0x1C
+#define AB8500_SWATCTRL_RFOFFTIMER_SHIFT 2
+#define AB8500_SWATCTRL_SWATBIT5 BIT(6)
+
+#define AB8500_HIQCLKCTRL_SYSCLKREQ1HIQENAVALID BIT(0)
+#define AB8500_HIQCLKCTRL_SYSCLKREQ2HIQENAVALID BIT(1)
+#define AB8500_HIQCLKCTRL_SYSCLKREQ3HIQENAVALID BIT(2)
+#define AB8500_HIQCLKCTRL_SYSCLKREQ4HIQENAVALID BIT(3)
+#define AB8500_HIQCLKCTRL_SYSCLKREQ5HIQENAVALID BIT(4)
+#define AB8500_HIQCLKCTRL_SYSCLKREQ6HIQENAVALID BIT(5)
+#define AB8500_HIQCLKCTRL_SYSCLKREQ7HIQENAVALID BIT(6)
+#define AB8500_HIQCLKCTRL_SYSCLKREQ8HIQENAVALID BIT(7)
+
+#define AB8500_VSIMSYSCLKCTRL_VSIMSYSCLKREQ1VALID BIT(0)
+#define AB8500_VSIMSYSCLKCTRL_VSIMSYSCLKREQ2VALID BIT(1)
+#define AB8500_VSIMSYSCLKCTRL_VSIMSYSCLKREQ3VALID BIT(2)
+#define AB8500_VSIMSYSCLKCTRL_VSIMSYSCLKREQ4VALID BIT(3)
+#define AB8500_VSIMSYSCLKCTRL_VSIMSYSCLKREQ5VALID BIT(4)
+#define AB8500_VSIMSYSCLKCTRL_VSIMSYSCLKREQ6VALID BIT(5)
+#define AB8500_VSIMSYSCLKCTRL_VSIMSYSCLKREQ7VALID BIT(6)
+#define AB8500_VSIMSYSCLKCTRL_VSIMSYSCLKREQ8VALID BIT(7)
+
+#endif /* __AB8500_SYSCTRL_H */
diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
index 67bd6f7ecf32..7d9b6ae1c203 100644
--- a/include/linux/mfd/abx500.h
+++ b/include/linux/mfd/abx500.h
@@ -186,7 +186,6 @@ struct abx500_init_settings {
struct ab3550_platform_data {
struct {unsigned int base; unsigned int count; } irq;
void *dev_data[AB3550_NUM_DEVICES];
- size_t dev_data_sz[AB3550_NUM_DEVICES];
struct abx500_init_settings *init_settings;
unsigned int init_settings_sz;
};
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index 835996e167e1..ed9970412cc2 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -25,22 +25,20 @@ struct mfd_cell {
const char *name;
int id;
+ /* refcounting for multiple drivers to use a single cell */
+ atomic_t *usage_count;
int (*enable)(struct platform_device *dev);
int (*disable)(struct platform_device *dev);
+
int (*suspend)(struct platform_device *dev);
int (*resume)(struct platform_device *dev);
- /* driver-specific data for MFD-aware "cell" drivers */
- void *driver_data;
-
- /* platform_data can be used to either pass data to "generic"
- driver or as a hook to mfd_cell for the "cell" drivers */
- void *platform_data;
- size_t data_size;
+ /* mfd_data can be used to pass data to client drivers */
+ void *mfd_data;
/*
- * This resources can be specified relatively to the parent device.
- * For accessing device you should use resources from device
+ * These resources can be specified relative to the parent device.
+ * For accessing hardware you should use resources from the platform dev
*/
int num_resources;
const struct resource *resources;
@@ -55,11 +53,47 @@ struct mfd_cell {
bool pm_runtime_no_callbacks;
};
+/*
+ * Convenience functions for clients using shared cells. Refcounting
+ * happens automatically, with the cell's enable/disable callbacks
+ * being called only when a device is first being enabled or no other
+ * clients are making use of it.
+ */
+extern int mfd_shared_cell_enable(struct platform_device *pdev);
+extern int mfd_shared_cell_disable(struct platform_device *pdev);
+
+/*
+ * Given a platform device that's been created by mfd_add_devices(), fetch
+ * the mfd_cell that created it.
+ */
+static inline const struct mfd_cell *mfd_get_cell(struct platform_device *pdev)
+{
+ return pdev->dev.platform_data;
+}
+
+/*
+ * Given a platform device that's been created by mfd_add_devices(), fetch
+ * the .mfd_data entry from the mfd_cell that created it.
+ */
+static inline void *mfd_get_data(struct platform_device *pdev)
+{
+ return mfd_get_cell(pdev)->mfd_data;
+}
+
extern int mfd_add_devices(struct device *parent, int id,
- const struct mfd_cell *cells, int n_devs,
+ struct mfd_cell *cells, int n_devs,
struct resource *mem_base,
int irq_base);
extern void mfd_remove_devices(struct device *parent);
+/*
+ * For MFD drivers with clients sharing access to resources, these create
+ * multiple platform devices per cell. Contention handling must still be
+ * handled via drivers (ie, with enable/disable hooks).
+ */
+extern int mfd_shared_platform_driver_register(struct platform_driver *drv,
+ const char *cellname);
+extern void mfd_shared_platform_driver_unregister(struct platform_driver *drv);
+
#endif
diff --git a/include/linux/mfd/mc13xxx.h b/include/linux/mfd/mc13xxx.h
index a1d391b40e68..c064beaaccb7 100644
--- a/include/linux/mfd/mc13xxx.h
+++ b/include/linux/mfd/mc13xxx.h
@@ -146,8 +146,7 @@ struct mc13xxx_platform_data {
#define MC13XXX_USE_LED (1 << 5)
unsigned int flags;
- int num_regulators;
- struct mc13xxx_regulator_init_data *regulators;
+ struct mc13xxx_regulator_platform_data regulators;
struct mc13xxx_leds_platform_data *leds;
};
diff --git a/include/linux/mfd/wm831x/pdata.h b/include/linux/mfd/wm831x/pdata.h
index fd322aca33ba..afe4db49402d 100644
--- a/include/linux/mfd/wm831x/pdata.h
+++ b/include/linux/mfd/wm831x/pdata.h
@@ -80,7 +80,8 @@ struct wm831x_touch_pdata {
int isel; /** Current for pen down (uA) */
int rpu; /** Pen down sensitivity resistor divider */
int pressure; /** Report pressure (boolean) */
- int data_irq; /** Touch data ready IRQ */
+ unsigned int data_irq; /** Touch data ready IRQ */
+ unsigned int pd_irq; /** Touch pendown detect IRQ */
};
enum wm831x_watchdog_action {
@@ -103,11 +104,17 @@ struct wm831x_watchdog_pdata {
#define WM831X_MAX_ISINK 2
struct wm831x_pdata {
+ /** Used to distinguish multiple WM831x chips */
+ int wm831x_num;
+
/** Called before subdevices are set up */
int (*pre_init)(struct wm831x *wm831x);
/** Called after subdevices are set up */
int (*post_init)(struct wm831x *wm831x);
+ /** Put the /IRQ line into CMOS mode */
+ bool irq_cmos;
+
int irq_base;
int gpio_base;
struct wm831x_backlight_pdata *backlight;
diff --git a/include/linux/mfd/wm8994/core.h b/include/linux/mfd/wm8994/core.h
index 3fd36845ca45..ef4f0b6083a3 100644
--- a/include/linux/mfd/wm8994/core.h
+++ b/include/linux/mfd/wm8994/core.h
@@ -71,6 +71,7 @@ struct wm8994 {
u16 irq_masks_cache[WM8994_NUM_IRQ_REGS];
/* Used over suspend/resume */
+ bool suspended;
u16 ldo_regs[WM8994_NUM_LDO_REGS];
u16 gpio_regs[WM8994_NUM_GPIO_REGS];
diff --git a/include/linux/mfd/wm8994/pdata.h b/include/linux/mfd/wm8994/pdata.h
index 9eab263658be..466b1c777aff 100644
--- a/include/linux/mfd/wm8994/pdata.h
+++ b/include/linux/mfd/wm8994/pdata.h
@@ -103,13 +103,21 @@ struct wm8994_pdata {
unsigned int lineout1fb:1;
unsigned int lineout2fb:1;
- /* Microphone biases: 0=0.9*AVDD1 1=0.65*AVVD1 */
+ /* IRQ for microphone detection if brought out directly as a
+ * signal.
+ */
+ int micdet_irq;
+
+ /* WM8994 microphone biases: 0=0.9*AVDD1 1=0.65*AVVD1 */
unsigned int micbias1_lvl:1;
unsigned int micbias2_lvl:1;
- /* Jack detect threashold levels, see datasheet for values */
+ /* WM8994 jack detect threashold levels, see datasheet for values */
unsigned int jd_scthr:2;
unsigned int jd_thr:2;
+
+ /* WM8958 microphone bias configuration */
+ int micbias[2];
};
#endif
diff --git a/include/linux/mfd/wm8994/registers.h b/include/linux/mfd/wm8994/registers.h
index be072faec6f0..f3ee84284670 100644
--- a/include/linux/mfd/wm8994/registers.h
+++ b/include/linux/mfd/wm8994/registers.h
@@ -63,6 +63,8 @@
#define WM8994_MICBIAS 0x3A
#define WM8994_LDO_1 0x3B
#define WM8994_LDO_2 0x3C
+#define WM8958_MICBIAS1 0x3D
+#define WM8958_MICBIAS2 0x3E
#define WM8994_CHARGE_PUMP_1 0x4C
#define WM8958_CHARGE_PUMP_2 0x4D
#define WM8994_CLASS_W_1 0x51
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
new file mode 100644
index 000000000000..dd8da342a991
--- /dev/null
+++ b/include/linux/micrel_phy.h
@@ -0,0 +1,16 @@
+#ifndef _MICREL_PHY_H
+#define _MICREL_PHY_H
+
+#define MICREL_PHY_ID_MASK 0x00fffff0
+
+#define PHY_ID_KSZ9021 0x00221611
+#define PHY_ID_KS8737 0x00221720
+#define PHY_ID_KS8041 0x00221510
+#define PHY_ID_KS8051 0x00221550
+/* both for ks8001 Rev. A/B, and for ks8721 Rev 3. */
+#define PHY_ID_KS8001 0x0022161A
+
+/* struct phy_device dev_flags definitions */
+#define MICREL_PHY_50MHZ_CLK 0x00000001
+
+#endif /* _MICREL_PHY_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f6385fc17ad4..78219887308e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -965,6 +965,10 @@ static inline int handle_mm_fault(struct mm_struct *mm,
extern int make_pages_present(unsigned long addr, unsigned long end);
extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
+int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, int len, unsigned int foll_flags,
+ struct page **pages, struct vm_area_struct **vmas,
+ int *nonblocking);
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int nr_pages, int write, int force,
struct page **pages, struct vm_area_struct **vmas);
@@ -1309,8 +1313,6 @@ int add_from_early_node_map(struct range *range, int az,
int nr_range, int nid);
u64 __init find_memory_core_early(int nid, u64 size, u64 align,
u64 goal, u64 limit);
-void *__alloc_memory_core_early(int nodeid, u64 size, u64 align,
- u64 goal, u64 limit);
typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data);
extern void sparse_memory_present_with_active_regions(int nid);
@@ -1530,6 +1532,7 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address,
#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */
#define FOLL_MLOCK 0x40 /* mark page as mlocked */
#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */
+#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
void *data);
@@ -1622,14 +1625,6 @@ extern int sysctl_memory_failure_recovery;
extern void shake_page(struct page *p, int access);
extern atomic_long_t mce_bad_pages;
extern int soft_offline_page(struct page *page, int flags);
-#ifdef CONFIG_MEMORY_FAILURE
-int is_hwpoison_address(unsigned long addr);
-#else
-static inline int is_hwpoison_address(unsigned long addr)
-{
- return 0;
-}
-#endif
extern void dump_page(struct page *page);
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 8ce082781ccb..adb4888248be 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -54,6 +54,9 @@ struct mmc_ext_csd {
unsigned int sec_trim_mult; /* Secure trim multiplier */
unsigned int sec_erase_mult; /* Secure erase multiplier */
unsigned int trim_timeout; /* In milliseconds */
+ bool enhanced_area_en; /* enable bit */
+ unsigned long long enhanced_area_offset; /* Units: Byte */
+ unsigned int enhanced_area_size; /* Units: KB */
};
struct sd_scr {
@@ -121,6 +124,7 @@ struct mmc_card {
/* for byte mode */
#define MMC_QUIRK_NONSTD_SDIO (1<<2) /* non-standard SDIO card attached */
/* (missing CIA registers) */
+#define MMC_QUIRK_BROKEN_CLK_GATING (1<<3) /* clock gating the sdio bus will make card fail */
unsigned int erase_size; /* erase size in sectors */
unsigned int erase_shift; /* if erase unit is power 2 */
@@ -148,6 +152,8 @@ struct mmc_card {
struct dentry *debugfs_root;
};
+void mmc_fixup_device(struct mmc_card *dev);
+
#define mmc_card_mmc(c) ((c)->type == MMC_TYPE_MMC)
#define mmc_card_sd(c) ((c)->type == MMC_TYPE_SD)
#define mmc_card_sdio(c) ((c)->type == MMC_TYPE_SDIO)
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 16b0261763ed..3f22c201ee3a 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -165,14 +165,12 @@ struct dw_mci_dma_ops {
};
/* IP Quirks/flags. */
-/* No special quirks or flags to cater for */
-#define DW_MCI_QUIRK_NONE 0
/* DTO fix for command transmission with IDMAC configured */
-#define DW_MCI_QUIRK_IDMAC_DTO 1
+#define DW_MCI_QUIRK_IDMAC_DTO BIT(0)
/* delay needed between retries on some 2.11a implementations */
-#define DW_MCI_QUIRK_RETRY_DELAY 2
+#define DW_MCI_QUIRK_RETRY_DELAY BIT(1)
/* High Speed Capable - Supports HS cards (upto 50MHz) */
-#define DW_MCI_QUIRK_HIGHSPEED 4
+#define DW_MCI_QUIRK_HIGHSPEED BIT(2)
struct dma_pdata;
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 612301f85d14..264ba5451e3b 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -253,6 +253,8 @@ struct _mmc_csd {
* EXT_CSD fields
*/
+#define EXT_CSD_PARTITION_ATTRIBUTE 156 /* R/W */
+#define EXT_CSD_PARTITION_SUPPORT 160 /* RO */
#define EXT_CSD_ERASE_GROUP_DEF 175 /* R/W */
#define EXT_CSD_ERASED_MEM_CONT 181 /* RO */
#define EXT_CSD_BUS_WIDTH 183 /* R/W */
@@ -262,6 +264,7 @@ struct _mmc_csd {
#define EXT_CSD_CARD_TYPE 196 /* RO */
#define EXT_CSD_SEC_CNT 212 /* RO, 4 bytes */
#define EXT_CSD_S_A_TIMEOUT 217 /* RO */
+#define EXT_CSD_HC_WP_GRP_SIZE 221 /* RO */
#define EXT_CSD_ERASE_TIMEOUT_MULT 223 /* RO */
#define EXT_CSD_HC_ERASE_GRP_SIZE 224 /* RO */
#define EXT_CSD_SEC_TRIM_MULT 229 /* RO */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 02ecb0189b1d..e56f835274c9 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -472,7 +472,7 @@ static inline int zone_is_oom_locked(const struct zone *zone)
#ifdef CONFIG_NUMA
/*
- * The NUMA zonelists are doubled becausse we need zonelists that restrict the
+ * The NUMA zonelists are doubled because we need zonelists that restrict the
* allocations to a single node for GFP_THISNODE.
*
* [0] : Zonelist with fallback
diff --git a/include/linux/module.h b/include/linux/module.h
index 9bdf27c7615b..65cc6cc73ca8 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -62,7 +62,10 @@ struct module_version_attribute {
struct module_attribute mattr;
const char *module_name;
const char *version;
-};
+} __attribute__ ((__aligned__(sizeof(void *))));
+
+extern ssize_t __modver_version_show(struct module_attribute *,
+ struct module *, char *);
struct module_kobject
{
@@ -172,12 +175,7 @@ extern struct module __this_module;
#define MODULE_VERSION(_version) MODULE_INFO(version, _version)
#else
#define MODULE_VERSION(_version) \
- extern ssize_t __modver_version_show(struct module_attribute *, \
- struct module *, char *); \
- static struct module_version_attribute __modver_version_attr \
- __used \
- __attribute__ ((__section__ ("__modver"),aligned(sizeof(void *)))) \
- = { \
+ static struct module_version_attribute ___modver_attr = { \
.mattr = { \
.attr = { \
.name = "version", \
@@ -187,7 +185,10 @@ extern struct module __this_module;
}, \
.module_name = KBUILD_MODNAME, \
.version = _version, \
- }
+ }; \
+ static const struct module_version_attribute \
+ __used __attribute__ ((__section__ ("__modver"))) \
+ * __moduleparam_const __modver_attr = &___modver_attr
#endif
/* Optional firmware file (or files) needed by the module
@@ -367,34 +368,35 @@ struct module
struct module_notes_attrs *notes_attrs;
#endif
+ /* The command line arguments (may be mangled). People like
+ keeping pointers to this stuff */
+ char *args;
+
#ifdef CONFIG_SMP
/* Per-cpu data. */
void __percpu *percpu;
unsigned int percpu_size;
#endif
- /* The command line arguments (may be mangled). People like
- keeping pointers to this stuff */
- char *args;
#ifdef CONFIG_TRACEPOINTS
- struct tracepoint * const *tracepoints_ptrs;
unsigned int num_tracepoints;
+ struct tracepoint * const *tracepoints_ptrs;
#endif
#ifdef HAVE_JUMP_LABEL
struct jump_entry *jump_entries;
unsigned int num_jump_entries;
#endif
#ifdef CONFIG_TRACING
- const char **trace_bprintk_fmt_start;
unsigned int num_trace_bprintk_fmt;
+ const char **trace_bprintk_fmt_start;
#endif
#ifdef CONFIG_EVENT_TRACING
struct ftrace_event_call **trace_events;
unsigned int num_trace_events;
#endif
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
- unsigned long *ftrace_callsites;
unsigned int num_ftrace_callsites;
+ unsigned long *ftrace_callsites;
#endif
#ifdef CONFIG_MODULE_UNLOAD
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 07b41951e3fa..ddaae98c53f9 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -67,9 +67,9 @@ struct kparam_string {
struct kparam_array
{
unsigned int max;
+ unsigned int elemsize;
unsigned int *num;
const struct kernel_param_ops *ops;
- unsigned int elemsize;
void *elem;
};
@@ -371,8 +371,9 @@ extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
*/
#define module_param_array_named(name, array, type, nump, perm) \
static const struct kparam_array __param_arr_##name \
- = { ARRAY_SIZE(array), nump, &param_ops_##type, \
- sizeof(array[0]), array }; \
+ = { .max = ARRAY_SIZE(array), .num = nump, \
+ .ops = &param_ops_##type, \
+ .elemsize = sizeof(array[0]), .elem = array }; \
__module_param_call(MODULE_PARAM_PREFIX, name, \
&param_array_ops, \
.arr = &__param_arr_##name, \
diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h
index cd6f3b431195..d60130f88eed 100644
--- a/include/linux/mtd/onenand_regs.h
+++ b/include/linux/mtd/onenand_regs.h
@@ -168,6 +168,7 @@
#define ONENAND_SYS_CFG1_INT (1 << 6)
#define ONENAND_SYS_CFG1_IOBE (1 << 5)
#define ONENAND_SYS_CFG1_RDY_CONF (1 << 4)
+#define ONENAND_SYS_CFG1_VHF (1 << 3)
#define ONENAND_SYS_CFG1_HF (1 << 2)
#define ONENAND_SYS_CFG1_SYNC_WRITE (1 << 1)
diff --git a/include/linux/net.h b/include/linux/net.h
index 16faa130088c..94de83c0f877 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -118,6 +118,7 @@ enum sock_shutdown_cmd {
};
struct socket_wq {
+ /* Note: wait MUST be first field of socket_wq */
wait_queue_head_t wait;
struct fasync_struct *fasync_list;
struct rcu_head rcu;
@@ -142,7 +143,7 @@ struct socket {
unsigned long flags;
- struct socket_wq *wq;
+ struct socket_wq __rcu *wq;
struct file *file;
struct sock *sk;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d971346b0340..ffe56c16df8a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -75,6 +75,9 @@ struct wireless_dev;
#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
#define NET_RX_DROP 1 /* packet dropped */
+/* Initial net device group. All devices belong to group 0 by default. */
+#define INIT_NETDEV_GROUP 0
+
/*
* Transmit return codes: transmit return codes originate from three different
* namespaces:
@@ -551,14 +554,16 @@ struct rps_map {
#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16)))
/*
- * The rps_dev_flow structure contains the mapping of a flow to a CPU and the
- * tail pointer for that CPU's input queue at the time of last enqueue.
+ * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
+ * tail pointer for that CPU's input queue at the time of last enqueue, and
+ * a hardware filter index.
*/
struct rps_dev_flow {
u16 cpu;
- u16 fill;
+ u16 filter;
unsigned int last_qtail;
};
+#define RPS_NO_FILTER 0xffff
/*
* The rps_dev_flow_table structure contains a table of flow mappings.
@@ -608,6 +613,11 @@ static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
+#ifdef CONFIG_RFS_ACCEL
+extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
+ u32 flow_id, u16 filter_id);
+#endif
+
/* This structure contains an instance of an RX queue. */
struct netdev_rx_queue {
struct rps_map __rcu *rps_map;
@@ -643,6 +653,14 @@ struct xps_dev_maps {
(nr_cpu_ids * sizeof(struct xps_map *)))
#endif /* CONFIG_XPS */
+#define TC_MAX_QUEUE 16
+#define TC_BITMASK 15
+/* HW offloaded queuing disciplines txq count and offset maps */
+struct netdev_tc_txq {
+ u16 count;
+ u16 offset;
+};
+
/*
* This structure defines the management hooks for network devices.
* The following hooks can be defined; unless noted otherwise, they are
@@ -753,6 +771,38 @@ struct xps_dev_maps {
* int (*ndo_set_vf_port)(struct net_device *dev, int vf,
* struct nlattr *port[]);
* int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
+ * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
+ * Called to setup 'tc' number of traffic classes in the net device. This
+ * is always called from the stack with the rtnl lock held and netif tx
+ * queues stopped. This allows the netdevice to perform queue management
+ * safely.
+ *
+ * RFS acceleration.
+ * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
+ * u16 rxq_index, u32 flow_id);
+ * Set hardware filter for RFS. rxq_index is the target queue index;
+ * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
+ * Return the filter ID on success, or a negative error code.
+ *
+ * Slave management functions (for bridge, bonding, etc). User should
+ * call netdev_set_master() to set dev->master properly.
+ * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
+ * Called to make another netdev an underling.
+ *
+ * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
+ * Called to release previously enslaved netdev.
+ *
+ * Feature/offload setting functions.
+ * u32 (*ndo_fix_features)(struct net_device *dev, u32 features);
+ * Adjusts the requested feature flags according to device-specific
+ * constraints, and returns the resulting flags. Must not modify
+ * the device state.
+ *
+ * int (*ndo_set_features)(struct net_device *dev, u32 features);
+ * Called to update device configuration to new features. Passed
+ * feature set might be less than what was returned by ndo_fix_features()).
+ * Must return >0 or -errno if it changed dev->features itself.
+ *
*/
#define HAVE_NET_DEVICE_OPS
struct net_device_ops {
@@ -811,6 +861,7 @@ struct net_device_ops {
struct nlattr *port[]);
int (*ndo_get_vf_port)(struct net_device *dev,
int vf, struct sk_buff *skb);
+ int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
int (*ndo_fcoe_enable)(struct net_device *dev);
int (*ndo_fcoe_disable)(struct net_device *dev);
@@ -825,6 +876,20 @@ struct net_device_ops {
int (*ndo_fcoe_get_wwn)(struct net_device *dev,
u64 *wwn, int type);
#endif
+#ifdef CONFIG_RFS_ACCEL
+ int (*ndo_rx_flow_steer)(struct net_device *dev,
+ const struct sk_buff *skb,
+ u16 rxq_index,
+ u32 flow_id);
+#endif
+ int (*ndo_add_slave)(struct net_device *dev,
+ struct net_device *slave_dev);
+ int (*ndo_del_slave)(struct net_device *dev,
+ struct net_device *slave_dev);
+ u32 (*ndo_fix_features)(struct net_device *dev,
+ u32 features);
+ int (*ndo_set_features)(struct net_device *dev,
+ u32 features);
};
/*
@@ -876,8 +941,18 @@ struct net_device {
struct list_head napi_list;
struct list_head unreg_list;
- /* Net device features */
- unsigned long features;
+ /* currently active device features */
+ u32 features;
+ /* user-changeable features */
+ u32 hw_features;
+ /* user-requested features */
+ u32 wanted_features;
+ /* VLAN feature mask */
+ u32 vlan_features;
+
+ /* Net device feature bits; if you change something,
+ * also update netdev_features_strings[] in ethtool.c */
+
#define NETIF_F_SG 1 /* Scatter/gather IO. */
#define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */
#define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
@@ -902,6 +977,7 @@ struct net_device {
#define NETIF_F_FCOE_MTU (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/
#define NETIF_F_NTUPLE (1 << 27) /* N-tuple filters supported */
#define NETIF_F_RXHASH (1 << 28) /* Receive hashing offload */
+#define NETIF_F_RXCSUM (1 << 29) /* Receive checksumming offload */
/* Segmentation offload features */
#define NETIF_F_GSO_SHIFT 16
@@ -913,6 +989,12 @@ struct net_device {
#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
#define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT)
+ /* Features valid for ethtool to change */
+ /* = all defined minus driver/device-class-related */
+#define NETIF_F_NEVER_CHANGE (NETIF_F_HIGHDMA | NETIF_F_VLAN_CHALLENGED | \
+ NETIF_F_LLTX | NETIF_F_NETNS_LOCAL)
+#define NETIF_F_ETHTOOL_BITS (0x3f3fffff & ~NETIF_F_NEVER_CHANGE)
+
/* List of features with software fallbacks. */
#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \
NETIF_F_TSO6 | NETIF_F_UFO)
@@ -923,6 +1005,12 @@ struct net_device {
#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
+#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+
+#define NETIF_F_ALL_TX_OFFLOADS (NETIF_F_ALL_CSUM | NETIF_F_SG | \
+ NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
+ NETIF_F_SCTP_CSUM | NETIF_F_FCOE_CRC)
+
/*
* If one device supports one of these features, then enable them
* for all in netdev_increment_features.
@@ -931,6 +1019,9 @@ struct net_device {
NETIF_F_SG | NETIF_F_HIGHDMA | \
NETIF_F_FRAGLIST)
+ /* changeable features with no special hardware requirements */
+#define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO)
+
/* Interface index. Unique device identifier */
int ifindex;
int iflink;
@@ -1039,6 +1130,13 @@ struct net_device {
/* Number of RX queues currently active in device */
unsigned int real_num_rx_queues;
+
+#ifdef CONFIG_RFS_ACCEL
+ /* CPU reverse-mapping for RX completion interrupts, indexed
+ * by RX queue number. Assigned by driver. This must only be
+ * set if the ndo_rx_flow_steer operation is defined. */
+ struct cpu_rmap *rx_cpu_rmap;
+#endif
#endif
rx_handler_func_t __rcu *rx_handler;
@@ -1132,9 +1230,6 @@ struct net_device {
/* rtnetlink link ops */
const struct rtnl_link_ops *rtnl_link_ops;
- /* VLAN feature mask */
- unsigned long vlan_features;
-
/* for setting kernel sock attribute on TCP connection setup */
#define GSO_MAX_SIZE 65536
unsigned int gso_max_size;
@@ -1143,6 +1238,9 @@ struct net_device {
/* Data Center Bridging netlink ops */
const struct dcbnl_rtnl_ops *dcbnl_ops;
#endif
+ u8 num_tc;
+ struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
+ u8 prio_tc_map[TC_BITMASK + 1];
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
/* max exchange id for FCoE LRO by ddp */
@@ -1153,12 +1251,66 @@ struct net_device {
/* phy device may attach itself for hardware timestamping */
struct phy_device *phydev;
+
+ /* group the device belongs to */
+ int group;
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
#define NETDEV_ALIGN 32
static inline
+int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
+{
+ return dev->prio_tc_map[prio & TC_BITMASK];
+}
+
+static inline
+int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
+{
+ if (tc >= dev->num_tc)
+ return -EINVAL;
+
+ dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
+ return 0;
+}
+
+static inline
+void netdev_reset_tc(struct net_device *dev)
+{
+ dev->num_tc = 0;
+ memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
+ memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
+}
+
+static inline
+int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
+{
+ if (tc >= dev->num_tc)
+ return -EINVAL;
+
+ dev->tc_to_txq[tc].count = count;
+ dev->tc_to_txq[tc].offset = offset;
+ return 0;
+}
+
+static inline
+int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
+{
+ if (num_tc > TC_MAX_QUEUE)
+ return -EINVAL;
+
+ dev->num_tc = num_tc;
+ return 0;
+}
+
+static inline
+int netdev_get_num_tc(struct net_device *dev)
+{
+ return dev->num_tc;
+}
+
+static inline
struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
unsigned int index)
{
@@ -1300,7 +1452,7 @@ struct packet_type {
struct packet_type *,
struct net_device *);
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
- int features);
+ u32 features);
int (*gso_send_check)(struct sk_buff *skb);
struct sk_buff **(*gro_receive)(struct sk_buff **head,
struct sk_buff *skb);
@@ -1345,7 +1497,7 @@ static inline struct net_device *next_net_device_rcu(struct net_device *dev)
struct net *net;
net = dev_net(dev);
- lh = rcu_dereference(dev->dev_list.next);
+ lh = rcu_dereference(list_next_rcu(&dev->dev_list));
return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
}
@@ -1355,6 +1507,13 @@ static inline struct net_device *first_net_device(struct net *net)
net_device_entry(net->dev_base_head.next);
}
+static inline struct net_device *first_net_device_rcu(struct net *net)
+{
+ struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
+
+ return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
+}
+
extern int netdev_boot_setup_check(struct net_device *dev);
extern unsigned long netdev_boot_base(const char *prefix, int unit);
extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
@@ -1844,6 +2003,7 @@ extern int dev_set_alias(struct net_device *, const char *, size_t);
extern int dev_change_net_namespace(struct net_device *,
struct net *, const char *);
extern int dev_set_mtu(struct net_device *, int);
+extern void dev_set_group(struct net_device *, int);
extern int dev_set_mac_address(struct net_device *,
struct sockaddr *);
extern int dev_hard_start_xmit(struct sk_buff *skb,
@@ -2267,8 +2427,10 @@ extern int netdev_max_backlog;
extern int netdev_tstamp_prequeue;
extern int weight_p;
extern int netdev_set_master(struct net_device *dev, struct net_device *master);
+extern int netdev_set_bond_master(struct net_device *dev,
+ struct net_device *master);
extern int skb_checksum_help(struct sk_buff *skb);
-extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
+extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features);
#ifdef CONFIG_BUG
extern void netdev_rx_csum_fault(struct net_device *dev);
#else
@@ -2295,22 +2457,26 @@ extern char *netdev_drivername(const struct net_device *dev, char *buffer, int l
extern void linkwatch_run_queue(void);
-unsigned long netdev_increment_features(unsigned long all, unsigned long one,
- unsigned long mask);
-unsigned long netdev_fix_features(unsigned long features, const char *name);
+static inline u32 netdev_get_wanted_features(struct net_device *dev)
+{
+ return (dev->features & ~dev->hw_features) | dev->wanted_features;
+}
+u32 netdev_increment_features(u32 all, u32 one, u32 mask);
+u32 netdev_fix_features(struct net_device *dev, u32 features);
+void netdev_update_features(struct net_device *dev);
void netif_stacked_transfer_operstate(const struct net_device *rootdev,
struct net_device *dev);
-int netif_skb_features(struct sk_buff *skb);
+u32 netif_skb_features(struct sk_buff *skb);
-static inline int net_gso_ok(int features, int gso_type)
+static inline int net_gso_ok(u32 features, int gso_type)
{
int feature = gso_type << NETIF_F_GSO_SHIFT;
return (features & feature) == feature;
}
-static inline int skb_gso_ok(struct sk_buff *skb, int features)
+static inline int skb_gso_ok(struct sk_buff *skb, u32 features)
{
return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
(!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
@@ -2328,15 +2494,9 @@ static inline void netif_set_gso_max_size(struct net_device *dev,
dev->gso_max_size = size;
}
-extern int __skb_bond_should_drop(struct sk_buff *skb,
- struct net_device *master);
-
-static inline int skb_bond_should_drop(struct sk_buff *skb,
- struct net_device *master)
+static inline int netif_is_bond_slave(struct net_device *dev)
{
- if (master)
- return __skb_bond_should_drop(skb, master);
- return 0;
+ return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
}
extern struct pernet_operations __net_initdata loopback_net_ops;
@@ -2351,6 +2511,8 @@ static inline int dev_ethtool_get_settings(struct net_device *dev,
static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev)
{
+ if (dev->hw_features & NETIF_F_RXCSUM)
+ return !!(dev->features & NETIF_F_RXCSUM);
if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum)
return 0;
return dev->ethtool_ops->get_rx_csum(dev);
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 1893837b3966..eeec00abb664 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -24,16 +24,20 @@
#define NF_MAX_VERDICT NF_STOP
/* we overload the higher bits for encoding auxiliary data such as the queue
- * number. Not nice, but better than additional function arguments. */
-#define NF_VERDICT_MASK 0x0000ffff
-#define NF_VERDICT_BITS 16
+ * number or errno values. Not nice, but better than additional function
+ * arguments. */
+#define NF_VERDICT_MASK 0x000000ff
+
+/* extra verdict flags have mask 0x0000ff00 */
+#define NF_VERDICT_FLAG_QUEUE_BYPASS 0x00008000
+/* queue number (NF_QUEUE) or errno (NF_DROP) */
#define NF_VERDICT_QMASK 0xffff0000
#define NF_VERDICT_QBITS 16
-#define NF_QUEUE_NR(x) ((((x) << NF_VERDICT_BITS) & NF_VERDICT_QMASK) | NF_QUEUE)
+#define NF_QUEUE_NR(x) ((((x) << 16) & NF_VERDICT_QMASK) | NF_QUEUE)
-#define NF_DROP_ERR(x) (((-x) << NF_VERDICT_BITS) | NF_DROP)
+#define NF_DROP_ERR(x) (((-x) << 16) | NF_DROP)
/* only for userspace compatibility */
#ifndef __KERNEL__
@@ -41,6 +45,9 @@
<= 0x2000 is used for protocol-flags. */
#define NFC_UNKNOWN 0x4000
#define NFC_ALTERED 0x8000
+
+/* NF_VERDICT_BITS should be 8 now, but userspace might break if this changes */
+#define NF_VERDICT_BITS 16
#endif
enum nf_inet_hooks {
@@ -72,6 +79,10 @@ union nf_inet_addr {
#ifdef __KERNEL__
#ifdef CONFIG_NETFILTER
+static inline int NF_DROP_GETERR(int verdict)
+{
+ return -(verdict >> NF_VERDICT_QBITS);
+}
static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1,
const union nf_inet_addr *a2)
@@ -267,7 +278,7 @@ struct nf_afinfo {
int route_key_size;
};
-extern const struct nf_afinfo *nf_afinfo[NFPROTO_NUMPROTO];
+extern const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO];
static inline const struct nf_afinfo *nf_get_afinfo(unsigned short family)
{
return rcu_dereference(nf_afinfo[family]);
@@ -357,9 +368,9 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
#endif /*CONFIG_NETFILTER*/
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-extern void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *);
+extern void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *) __rcu;
extern void nf_ct_attach(struct sk_buff *, struct sk_buff *);
-extern void (*nf_ct_destroy)(struct nf_conntrack *);
+extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
#else
static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
#endif
diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild
index 9d40effe7ca7..15e83bf3dd58 100644
--- a/include/linux/netfilter/Kbuild
+++ b/include/linux/netfilter/Kbuild
@@ -1,3 +1,5 @@
+header-y += ipset/
+
header-y += nf_conntrack_common.h
header-y += nf_conntrack_ftp.h
header-y += nf_conntrack_sctp.h
@@ -9,6 +11,7 @@ header-y += nfnetlink_conntrack.h
header-y += nfnetlink_log.h
header-y += nfnetlink_queue.h
header-y += x_tables.h
+header-y += xt_AUDIT.h
header-y += xt_CHECKSUM.h
header-y += xt_CLASSIFY.h
header-y += xt_CONNMARK.h
@@ -34,6 +37,7 @@ header-y += xt_connmark.h
header-y += xt_conntrack.h
header-y += xt_cpu.h
header-y += xt_dccp.h
+header-y += xt_devgroup.h
header-y += xt_dscp.h
header-y += xt_esp.h
header-y += xt_hashlimit.h
@@ -54,7 +58,9 @@ header-y += xt_quota.h
header-y += xt_rateest.h
header-y += xt_realm.h
header-y += xt_recent.h
+header-y += xt_set.h
header-y += xt_sctp.h
+header-y += xt_socket.h
header-y += xt_state.h
header-y += xt_statistic.h
header-y += xt_string.h
diff --git a/include/linux/netfilter/ipset/Kbuild b/include/linux/netfilter/ipset/Kbuild
new file mode 100644
index 000000000000..601fe71d34d5
--- /dev/null
+++ b/include/linux/netfilter/ipset/Kbuild
@@ -0,0 +1,4 @@
+header-y += ip_set.h
+header-y += ip_set_bitmap.h
+header-y += ip_set_hash.h
+header-y += ip_set_list.h
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
new file mode 100644
index 000000000000..ec333d83f3b4
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -0,0 +1,452 @@
+#ifndef _IP_SET_H
+#define _IP_SET_H
+
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Martin Josefsson <gandalf@wlug.westbo.se>
+ * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* The protocol version */
+#define IPSET_PROTOCOL 6
+
+/* The max length of strings including NUL: set and type identifiers */
+#define IPSET_MAXNAMELEN 32
+
+/* Message types and commands */
+enum ipset_cmd {
+ IPSET_CMD_NONE,
+ IPSET_CMD_PROTOCOL, /* 1: Return protocol version */
+ IPSET_CMD_CREATE, /* 2: Create a new (empty) set */
+ IPSET_CMD_DESTROY, /* 3: Destroy a (empty) set */
+ IPSET_CMD_FLUSH, /* 4: Remove all elements from a set */
+ IPSET_CMD_RENAME, /* 5: Rename a set */
+ IPSET_CMD_SWAP, /* 6: Swap two sets */
+ IPSET_CMD_LIST, /* 7: List sets */
+ IPSET_CMD_SAVE, /* 8: Save sets */
+ IPSET_CMD_ADD, /* 9: Add an element to a set */
+ IPSET_CMD_DEL, /* 10: Delete an element from a set */
+ IPSET_CMD_TEST, /* 11: Test an element in a set */
+ IPSET_CMD_HEADER, /* 12: Get set header data only */
+ IPSET_CMD_TYPE, /* 13: Get set type */
+ IPSET_MSG_MAX, /* Netlink message commands */
+
+ /* Commands in userspace: */
+ IPSET_CMD_RESTORE = IPSET_MSG_MAX, /* 14: Enter restore mode */
+ IPSET_CMD_HELP, /* 15: Get help */
+ IPSET_CMD_VERSION, /* 16: Get program version */
+ IPSET_CMD_QUIT, /* 17: Quit from interactive mode */
+
+ IPSET_CMD_MAX,
+
+ IPSET_CMD_COMMIT = IPSET_CMD_MAX, /* 18: Commit buffered commands */
+};
+
+/* Attributes at command level */
+enum {
+ IPSET_ATTR_UNSPEC,
+ IPSET_ATTR_PROTOCOL, /* 1: Protocol version */
+ IPSET_ATTR_SETNAME, /* 2: Name of the set */
+ IPSET_ATTR_TYPENAME, /* 3: Typename */
+ IPSET_ATTR_SETNAME2 = IPSET_ATTR_TYPENAME, /* Setname at rename/swap */
+ IPSET_ATTR_REVISION, /* 4: Settype revision */
+ IPSET_ATTR_FAMILY, /* 5: Settype family */
+ IPSET_ATTR_FLAGS, /* 6: Flags at command level */
+ IPSET_ATTR_DATA, /* 7: Nested attributes */
+ IPSET_ATTR_ADT, /* 8: Multiple data containers */
+ IPSET_ATTR_LINENO, /* 9: Restore lineno */
+ IPSET_ATTR_PROTOCOL_MIN, /* 10: Minimal supported version number */
+ IPSET_ATTR_REVISION_MIN = IPSET_ATTR_PROTOCOL_MIN, /* type rev min */
+ __IPSET_ATTR_CMD_MAX,
+};
+#define IPSET_ATTR_CMD_MAX (__IPSET_ATTR_CMD_MAX - 1)
+
+/* CADT specific attributes */
+enum {
+ IPSET_ATTR_IP = IPSET_ATTR_UNSPEC + 1,
+ IPSET_ATTR_IP_FROM = IPSET_ATTR_IP,
+ IPSET_ATTR_IP_TO, /* 2 */
+ IPSET_ATTR_CIDR, /* 3 */
+ IPSET_ATTR_PORT, /* 4 */
+ IPSET_ATTR_PORT_FROM = IPSET_ATTR_PORT,
+ IPSET_ATTR_PORT_TO, /* 5 */
+ IPSET_ATTR_TIMEOUT, /* 6 */
+ IPSET_ATTR_PROTO, /* 7 */
+ IPSET_ATTR_CADT_FLAGS, /* 8 */
+ IPSET_ATTR_CADT_LINENO = IPSET_ATTR_LINENO, /* 9 */
+ /* Reserve empty slots */
+ IPSET_ATTR_CADT_MAX = 16,
+ /* Create-only specific attributes */
+ IPSET_ATTR_GC,
+ IPSET_ATTR_HASHSIZE,
+ IPSET_ATTR_MAXELEM,
+ IPSET_ATTR_NETMASK,
+ IPSET_ATTR_PROBES,
+ IPSET_ATTR_RESIZE,
+ IPSET_ATTR_SIZE,
+ /* Kernel-only */
+ IPSET_ATTR_ELEMENTS,
+ IPSET_ATTR_REFERENCES,
+ IPSET_ATTR_MEMSIZE,
+
+ __IPSET_ATTR_CREATE_MAX,
+};
+#define IPSET_ATTR_CREATE_MAX (__IPSET_ATTR_CREATE_MAX - 1)
+
+/* ADT specific attributes */
+enum {
+ IPSET_ATTR_ETHER = IPSET_ATTR_CADT_MAX + 1,
+ IPSET_ATTR_NAME,
+ IPSET_ATTR_NAMEREF,
+ IPSET_ATTR_IP2,
+ IPSET_ATTR_CIDR2,
+ __IPSET_ATTR_ADT_MAX,
+};
+#define IPSET_ATTR_ADT_MAX (__IPSET_ATTR_ADT_MAX - 1)
+
+/* IP specific attributes */
+enum {
+ IPSET_ATTR_IPADDR_IPV4 = IPSET_ATTR_UNSPEC + 1,
+ IPSET_ATTR_IPADDR_IPV6,
+ __IPSET_ATTR_IPADDR_MAX,
+};
+#define IPSET_ATTR_IPADDR_MAX (__IPSET_ATTR_IPADDR_MAX - 1)
+
+/* Error codes */
+enum ipset_errno {
+ IPSET_ERR_PRIVATE = 4096,
+ IPSET_ERR_PROTOCOL,
+ IPSET_ERR_FIND_TYPE,
+ IPSET_ERR_MAX_SETS,
+ IPSET_ERR_BUSY,
+ IPSET_ERR_EXIST_SETNAME2,
+ IPSET_ERR_TYPE_MISMATCH,
+ IPSET_ERR_EXIST,
+ IPSET_ERR_INVALID_CIDR,
+ IPSET_ERR_INVALID_NETMASK,
+ IPSET_ERR_INVALID_FAMILY,
+ IPSET_ERR_TIMEOUT,
+ IPSET_ERR_REFERENCED,
+ IPSET_ERR_IPADDR_IPV4,
+ IPSET_ERR_IPADDR_IPV6,
+
+ /* Type specific error codes */
+ IPSET_ERR_TYPE_SPECIFIC = 4352,
+};
+
+/* Flags at command level */
+enum ipset_cmd_flags {
+ IPSET_FLAG_BIT_EXIST = 0,
+ IPSET_FLAG_EXIST = (1 << IPSET_FLAG_BIT_EXIST),
+};
+
+/* Flags at CADT attribute level */
+enum ipset_cadt_flags {
+ IPSET_FLAG_BIT_BEFORE = 0,
+ IPSET_FLAG_BEFORE = (1 << IPSET_FLAG_BIT_BEFORE),
+};
+
+/* Commands with settype-specific attributes */
+enum ipset_adt {
+ IPSET_ADD,
+ IPSET_DEL,
+ IPSET_TEST,
+ IPSET_ADT_MAX,
+ IPSET_CREATE = IPSET_ADT_MAX,
+ IPSET_CADT_MAX,
+};
+
+#ifdef __KERNEL__
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/vmalloc.h>
+#include <net/netlink.h>
+
+/* Sets are identified by an index in kernel space. Tweak with ip_set_id_t
+ * and IPSET_INVALID_ID if you want to increase the max number of sets.
+ */
+typedef u16 ip_set_id_t;
+
+#define IPSET_INVALID_ID 65535
+
+enum ip_set_dim {
+ IPSET_DIM_ZERO = 0,
+ IPSET_DIM_ONE,
+ IPSET_DIM_TWO,
+ IPSET_DIM_THREE,
+ /* Max dimension in elements.
+ * If changed, new revision of iptables match/target is required.
+ */
+ IPSET_DIM_MAX = 6,
+};
+
+/* Option flags for kernel operations */
+enum ip_set_kopt {
+ IPSET_INV_MATCH = (1 << IPSET_DIM_ZERO),
+ IPSET_DIM_ONE_SRC = (1 << IPSET_DIM_ONE),
+ IPSET_DIM_TWO_SRC = (1 << IPSET_DIM_TWO),
+ IPSET_DIM_THREE_SRC = (1 << IPSET_DIM_THREE),
+};
+
+/* Set features */
+enum ip_set_feature {
+ IPSET_TYPE_IP_FLAG = 0,
+ IPSET_TYPE_IP = (1 << IPSET_TYPE_IP_FLAG),
+ IPSET_TYPE_PORT_FLAG = 1,
+ IPSET_TYPE_PORT = (1 << IPSET_TYPE_PORT_FLAG),
+ IPSET_TYPE_MAC_FLAG = 2,
+ IPSET_TYPE_MAC = (1 << IPSET_TYPE_MAC_FLAG),
+ IPSET_TYPE_IP2_FLAG = 3,
+ IPSET_TYPE_IP2 = (1 << IPSET_TYPE_IP2_FLAG),
+ IPSET_TYPE_NAME_FLAG = 4,
+ IPSET_TYPE_NAME = (1 << IPSET_TYPE_NAME_FLAG),
+ /* Strictly speaking not a feature, but a flag for dumping:
+ * this settype must be dumped last */
+ IPSET_DUMP_LAST_FLAG = 7,
+ IPSET_DUMP_LAST = (1 << IPSET_DUMP_LAST_FLAG),
+};
+
+struct ip_set;
+
+typedef int (*ipset_adtfn)(struct ip_set *set, void *value, u32 timeout);
+
+/* Set type, variant-specific part */
+struct ip_set_type_variant {
+ /* Kernelspace: test/add/del entries
+ * returns negative error code,
+ * zero for no match/success to add/delete
+ * positive for matching element */
+ int (*kadt)(struct ip_set *set, const struct sk_buff * skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags);
+
+ /* Userspace: test/add/del entries
+ * returns negative error code,
+ * zero for no match/success to add/delete
+ * positive for matching element */
+ int (*uadt)(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags);
+
+ /* Low level add/del/test functions */
+ ipset_adtfn adt[IPSET_ADT_MAX];
+
+ /* When adding entries and set is full, try to resize the set */
+ int (*resize)(struct ip_set *set, bool retried);
+ /* Destroy the set */
+ void (*destroy)(struct ip_set *set);
+ /* Flush the elements */
+ void (*flush)(struct ip_set *set);
+ /* Expire entries before listing */
+ void (*expire)(struct ip_set *set);
+ /* List set header data */
+ int (*head)(struct ip_set *set, struct sk_buff *skb);
+ /* List elements */
+ int (*list)(const struct ip_set *set, struct sk_buff *skb,
+ struct netlink_callback *cb);
+
+ /* Return true if "b" set is the same as "a"
+ * according to the create set parameters */
+ bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
+};
+
+/* The core set type structure */
+struct ip_set_type {
+ struct list_head list;
+
+ /* Typename */
+ char name[IPSET_MAXNAMELEN];
+ /* Protocol version */
+ u8 protocol;
+ /* Set features to control swapping */
+ u8 features;
+ /* Set type dimension */
+ u8 dimension;
+ /* Supported family: may be AF_UNSPEC for both AF_INET/AF_INET6 */
+ u8 family;
+ /* Type revision */
+ u8 revision;
+
+ /* Create set */
+ int (*create)(struct ip_set *set, struct nlattr *tb[], u32 flags);
+
+ /* Attribute policies */
+ const struct nla_policy create_policy[IPSET_ATTR_CREATE_MAX + 1];
+ const struct nla_policy adt_policy[IPSET_ATTR_ADT_MAX + 1];
+
+ /* Set this to THIS_MODULE if you are a module, otherwise NULL */
+ struct module *me;
+};
+
+/* register and unregister set type */
+extern int ip_set_type_register(struct ip_set_type *set_type);
+extern void ip_set_type_unregister(struct ip_set_type *set_type);
+
+/* A generic IP set */
+struct ip_set {
+ /* The name of the set */
+ char name[IPSET_MAXNAMELEN];
+ /* Lock protecting the set data */
+ rwlock_t lock;
+ /* References to the set */
+ atomic_t ref;
+ /* The core set type */
+ struct ip_set_type *type;
+ /* The type variant doing the real job */
+ const struct ip_set_type_variant *variant;
+ /* The actual INET family of the set */
+ u8 family;
+ /* The type specific data */
+ void *data;
+};
+
+/* register and unregister set references */
+extern ip_set_id_t ip_set_get_byname(const char *name, struct ip_set **set);
+extern void ip_set_put_byindex(ip_set_id_t index);
+extern const char * ip_set_name_byindex(ip_set_id_t index);
+extern ip_set_id_t ip_set_nfnl_get(const char *name);
+extern ip_set_id_t ip_set_nfnl_get_byindex(ip_set_id_t index);
+extern void ip_set_nfnl_put(ip_set_id_t index);
+
+/* API for iptables set match, and SET target */
+extern int ip_set_add(ip_set_id_t id, const struct sk_buff *skb,
+ u8 family, u8 dim, u8 flags);
+extern int ip_set_del(ip_set_id_t id, const struct sk_buff *skb,
+ u8 family, u8 dim, u8 flags);
+extern int ip_set_test(ip_set_id_t id, const struct sk_buff *skb,
+ u8 family, u8 dim, u8 flags);
+
+/* Utility functions */
+extern void * ip_set_alloc(size_t size);
+extern void ip_set_free(void *members);
+extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr);
+extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr);
+
+static inline int
+ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr)
+{
+ __be32 ip;
+ int ret = ip_set_get_ipaddr4(nla, &ip);
+
+ if (ret)
+ return ret;
+ *ipaddr = ntohl(ip);
+ return 0;
+}
+
+/* Ignore IPSET_ERR_EXIST errors if asked to do so? */
+static inline bool
+ip_set_eexist(int ret, u32 flags)
+{
+ return ret == -IPSET_ERR_EXIST && (flags & IPSET_FLAG_EXIST);
+}
+
+/* Check the NLA_F_NET_BYTEORDER flag */
+static inline bool
+ip_set_attr_netorder(struct nlattr *tb[], int type)
+{
+ return tb[type] && (tb[type]->nla_type & NLA_F_NET_BYTEORDER);
+}
+
+static inline bool
+ip_set_optattr_netorder(struct nlattr *tb[], int type)
+{
+ return !tb[type] || (tb[type]->nla_type & NLA_F_NET_BYTEORDER);
+}
+
+/* Useful converters */
+static inline u32
+ip_set_get_h32(const struct nlattr *attr)
+{
+ return ntohl(nla_get_be32(attr));
+}
+
+static inline u16
+ip_set_get_h16(const struct nlattr *attr)
+{
+ return ntohs(nla_get_be16(attr));
+}
+
+#define ipset_nest_start(skb, attr) nla_nest_start(skb, attr | NLA_F_NESTED)
+#define ipset_nest_end(skb, start) nla_nest_end(skb, start)
+
+#define NLA_PUT_IPADDR4(skb, type, ipaddr) \
+do { \
+ struct nlattr *__nested = ipset_nest_start(skb, type); \
+ \
+ if (!__nested) \
+ goto nla_put_failure; \
+ NLA_PUT_NET32(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr); \
+ ipset_nest_end(skb, __nested); \
+} while (0)
+
+#define NLA_PUT_IPADDR6(skb, type, ipaddrptr) \
+do { \
+ struct nlattr *__nested = ipset_nest_start(skb, type); \
+ \
+ if (!__nested) \
+ goto nla_put_failure; \
+ NLA_PUT(skb, IPSET_ATTR_IPADDR_IPV6, \
+ sizeof(struct in6_addr), ipaddrptr); \
+ ipset_nest_end(skb, __nested); \
+} while (0)
+
+/* Get address from skbuff */
+static inline __be32
+ip4addr(const struct sk_buff *skb, bool src)
+{
+ return src ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr;
+}
+
+static inline void
+ip4addrptr(const struct sk_buff *skb, bool src, __be32 *addr)
+{
+ *addr = src ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr;
+}
+
+static inline void
+ip6addrptr(const struct sk_buff *skb, bool src, struct in6_addr *addr)
+{
+ memcpy(addr, src ? &ipv6_hdr(skb)->saddr : &ipv6_hdr(skb)->daddr,
+ sizeof(*addr));
+}
+
+/* Calculate the bytes required to store the inclusive range of a-b */
+static inline int
+bitmap_bytes(u32 a, u32 b)
+{
+ return 4 * ((((b - a + 8) / 8) + 3) / 4);
+}
+
+/* Interface to iptables/ip6tables */
+
+#define SO_IP_SET 83
+
+union ip_set_name_index {
+ char name[IPSET_MAXNAMELEN];
+ ip_set_id_t index;
+};
+
+#define IP_SET_OP_GET_BYNAME 0x00000006 /* Get set index by name */
+struct ip_set_req_get_set {
+ unsigned op;
+ unsigned version;
+ union ip_set_name_index set;
+};
+
+#define IP_SET_OP_GET_BYINDEX 0x00000007 /* Get set name by index */
+/* Uses ip_set_req_get_set */
+
+#define IP_SET_OP_VERSION 0x00000100 /* Ask kernel version */
+struct ip_set_req_version {
+ unsigned op;
+ unsigned version;
+};
+
+#endif /* __KERNEL__ */
+
+#endif /*_IP_SET_H */
diff --git a/include/linux/netfilter/ipset/ip_set_ahash.h b/include/linux/netfilter/ipset/ip_set_ahash.h
new file mode 100644
index 000000000000..ec9d9bea1e37
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_ahash.h
@@ -0,0 +1,1074 @@
+#ifndef _IP_SET_AHASH_H
+#define _IP_SET_AHASH_H
+
+#include <linux/rcupdate.h>
+#include <linux/jhash.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+
+/* Hashing which uses arrays to resolve clashing. The hash table is resized
+ * (doubled) when searching becomes too long.
+ * Internally jhash is used with the assumption that the size of the
+ * stored data is a multiple of sizeof(u32). If storage supports timeout,
+ * the timeout field must be the last one in the data structure - that field
+ * is ignored when computing the hash key.
+ *
+ * Readers and resizing
+ *
+ * Resizing can be triggered by userspace command only, and those
+ * are serialized by the nfnl mutex. During resizing the set is
+ * read-locked, so the only possible concurrent operations are
+ * the kernel side readers. Those must be protected by proper RCU locking.
+ */
+
+/* Number of elements to store in an initial array block */
+#define AHASH_INIT_SIZE 4
+/* Max number of elements to store in an array block */
+#define AHASH_MAX_SIZE (3*4)
+
+/* A hash bucket */
+struct hbucket {
+ void *value; /* the array of the values */
+ u8 size; /* size of the array */
+ u8 pos; /* position of the first free entry */
+};
+
+/* The hash table: the table size stored here in order to make resizing easy */
+struct htable {
+ u8 htable_bits; /* size of hash table == 2^htable_bits */
+ struct hbucket bucket[0]; /* hashtable buckets */
+};
+
+#define hbucket(h, i) &((h)->bucket[i])
+
+/* Book-keeping of the prefixes added to the set */
+struct ip_set_hash_nets {
+ u8 cidr; /* the different cidr values in the set */
+ u32 nets; /* number of elements per cidr */
+};
+
+/* The generic ip_set hash structure */
+struct ip_set_hash {
+ struct htable *table; /* the hash table */
+ u32 maxelem; /* max elements in the hash */
+ u32 elements; /* current element (vs timeout) */
+ u32 initval; /* random jhash init value */
+ u32 timeout; /* timeout value, if enabled */
+ struct timer_list gc; /* garbage collection when timeout enabled */
+#ifdef IP_SET_HASH_WITH_NETMASK
+ u8 netmask; /* netmask value for subnets to store */
+#endif
+#ifdef IP_SET_HASH_WITH_NETS
+ struct ip_set_hash_nets nets[0]; /* book-keeping of prefixes */
+#endif
+};
+
+/* Compute htable_bits from the user input parameter hashsize */
+static u8
+htable_bits(u32 hashsize)
+{
+ /* Assume that hashsize == 2^htable_bits */
+ u8 bits = fls(hashsize - 1);
+ if (jhash_size(bits) != hashsize)
+ /* Round up to the first 2^n value */
+ bits = fls(hashsize);
+
+ return bits;
+}
+
+#ifdef IP_SET_HASH_WITH_NETS
+
+#define SET_HOST_MASK(family) (family == AF_INET ? 32 : 128)
+
+/* Network cidr size book keeping when the hash stores different
+ * sized networks */
+static void
+add_cidr(struct ip_set_hash *h, u8 cidr, u8 host_mask)
+{
+ u8 i;
+
+ ++h->nets[cidr-1].nets;
+
+ pr_debug("add_cidr added %u: %u\n", cidr, h->nets[cidr-1].nets);
+
+ if (h->nets[cidr-1].nets > 1)
+ return;
+
+ /* New cidr size */
+ for (i = 0; i < host_mask && h->nets[i].cidr; i++) {
+ /* Add in increasing prefix order, so larger cidr first */
+ if (h->nets[i].cidr < cidr)
+ swap(h->nets[i].cidr, cidr);
+ }
+ if (i < host_mask)
+ h->nets[i].cidr = cidr;
+}
+
+static void
+del_cidr(struct ip_set_hash *h, u8 cidr, u8 host_mask)
+{
+ u8 i;
+
+ --h->nets[cidr-1].nets;
+
+ pr_debug("del_cidr deleted %u: %u\n", cidr, h->nets[cidr-1].nets);
+
+ if (h->nets[cidr-1].nets != 0)
+ return;
+
+ /* All entries with this cidr size deleted, so cleanup h->cidr[] */
+ for (i = 0; i < host_mask - 1 && h->nets[i].cidr; i++) {
+ if (h->nets[i].cidr == cidr)
+ h->nets[i].cidr = cidr = h->nets[i+1].cidr;
+ }
+ h->nets[i - 1].cidr = 0;
+}
+#endif
+
+/* Destroy the hashtable part of the set */
+static void
+ahash_destroy(struct htable *t)
+{
+ struct hbucket *n;
+ u32 i;
+
+ for (i = 0; i < jhash_size(t->htable_bits); i++) {
+ n = hbucket(t, i);
+ if (n->size)
+ /* FIXME: use slab cache */
+ kfree(n->value);
+ }
+
+ ip_set_free(t);
+}
+
+/* Calculate the actual memory size of the set data */
+static size_t
+ahash_memsize(const struct ip_set_hash *h, size_t dsize, u8 host_mask)
+{
+ u32 i;
+ struct htable *t = h->table;
+ size_t memsize = sizeof(*h)
+ + sizeof(*t)
+#ifdef IP_SET_HASH_WITH_NETS
+ + sizeof(struct ip_set_hash_nets) * host_mask
+#endif
+ + jhash_size(t->htable_bits) * sizeof(struct hbucket);
+
+ for (i = 0; i < jhash_size(t->htable_bits); i++)
+ memsize += t->bucket[i].size * dsize;
+
+ return memsize;
+}
+
+/* Flush a hash type of set: destroy all elements */
+static void
+ip_set_hash_flush(struct ip_set *set)
+{
+ struct ip_set_hash *h = set->data;
+ struct htable *t = h->table;
+ struct hbucket *n;
+ u32 i;
+
+ for (i = 0; i < jhash_size(t->htable_bits); i++) {
+ n = hbucket(t, i);
+ if (n->size) {
+ n->size = n->pos = 0;
+ /* FIXME: use slab cache */
+ kfree(n->value);
+ }
+ }
+#ifdef IP_SET_HASH_WITH_NETS
+ memset(h->nets, 0, sizeof(struct ip_set_hash_nets)
+ * SET_HOST_MASK(set->family));
+#endif
+ h->elements = 0;
+}
+
+/* Destroy a hash type of set */
+static void
+ip_set_hash_destroy(struct ip_set *set)
+{
+ struct ip_set_hash *h = set->data;
+
+ if (with_timeout(h->timeout))
+ del_timer_sync(&h->gc);
+
+ ahash_destroy(h->table);
+ kfree(h);
+
+ set->data = NULL;
+}
+
+#define HKEY(data, initval, htable_bits) \
+(jhash2((u32 *)(data), sizeof(struct type_pf_elem)/sizeof(u32), initval) \
+ & jhash_mask(htable_bits))
+
+#endif /* _IP_SET_AHASH_H */
+
+#define CONCAT(a, b, c) a##b##c
+#define TOKEN(a, b, c) CONCAT(a, b, c)
+
+/* Type/family dependent function prototypes */
+
+#define type_pf_data_equal TOKEN(TYPE, PF, _data_equal)
+#define type_pf_data_isnull TOKEN(TYPE, PF, _data_isnull)
+#define type_pf_data_copy TOKEN(TYPE, PF, _data_copy)
+#define type_pf_data_zero_out TOKEN(TYPE, PF, _data_zero_out)
+#define type_pf_data_netmask TOKEN(TYPE, PF, _data_netmask)
+#define type_pf_data_list TOKEN(TYPE, PF, _data_list)
+#define type_pf_data_tlist TOKEN(TYPE, PF, _data_tlist)
+
+#define type_pf_elem TOKEN(TYPE, PF, _elem)
+#define type_pf_telem TOKEN(TYPE, PF, _telem)
+#define type_pf_data_timeout TOKEN(TYPE, PF, _data_timeout)
+#define type_pf_data_expired TOKEN(TYPE, PF, _data_expired)
+#define type_pf_data_timeout_set TOKEN(TYPE, PF, _data_timeout_set)
+
+#define type_pf_elem_add TOKEN(TYPE, PF, _elem_add)
+#define type_pf_add TOKEN(TYPE, PF, _add)
+#define type_pf_del TOKEN(TYPE, PF, _del)
+#define type_pf_test_cidrs TOKEN(TYPE, PF, _test_cidrs)
+#define type_pf_test TOKEN(TYPE, PF, _test)
+
+#define type_pf_elem_tadd TOKEN(TYPE, PF, _elem_tadd)
+#define type_pf_del_telem TOKEN(TYPE, PF, _ahash_del_telem)
+#define type_pf_expire TOKEN(TYPE, PF, _expire)
+#define type_pf_tadd TOKEN(TYPE, PF, _tadd)
+#define type_pf_tdel TOKEN(TYPE, PF, _tdel)
+#define type_pf_ttest_cidrs TOKEN(TYPE, PF, _ahash_ttest_cidrs)
+#define type_pf_ttest TOKEN(TYPE, PF, _ahash_ttest)
+
+#define type_pf_resize TOKEN(TYPE, PF, _resize)
+#define type_pf_tresize TOKEN(TYPE, PF, _tresize)
+#define type_pf_flush ip_set_hash_flush
+#define type_pf_destroy ip_set_hash_destroy
+#define type_pf_head TOKEN(TYPE, PF, _head)
+#define type_pf_list TOKEN(TYPE, PF, _list)
+#define type_pf_tlist TOKEN(TYPE, PF, _tlist)
+#define type_pf_same_set TOKEN(TYPE, PF, _same_set)
+#define type_pf_kadt TOKEN(TYPE, PF, _kadt)
+#define type_pf_uadt TOKEN(TYPE, PF, _uadt)
+#define type_pf_gc TOKEN(TYPE, PF, _gc)
+#define type_pf_gc_init TOKEN(TYPE, PF, _gc_init)
+#define type_pf_variant TOKEN(TYPE, PF, _variant)
+#define type_pf_tvariant TOKEN(TYPE, PF, _tvariant)
+
+/* Flavour without timeout */
+
+/* Get the ith element from the array block n */
+#define ahash_data(n, i) \
+ ((struct type_pf_elem *)((n)->value) + (i))
+
+/* Add an element to the hash table when resizing the set:
+ * we spare the maintenance of the internal counters. */
+static int
+type_pf_elem_add(struct hbucket *n, const struct type_pf_elem *value)
+{
+ if (n->pos >= n->size) {
+ void *tmp;
+
+ if (n->size >= AHASH_MAX_SIZE)
+ /* Trigger rehashing */
+ return -EAGAIN;
+
+ tmp = kzalloc((n->size + AHASH_INIT_SIZE)
+ * sizeof(struct type_pf_elem),
+ GFP_ATOMIC);
+ if (!tmp)
+ return -ENOMEM;
+ if (n->size) {
+ memcpy(tmp, n->value,
+ sizeof(struct type_pf_elem) * n->size);
+ kfree(n->value);
+ }
+ n->value = tmp;
+ n->size += AHASH_INIT_SIZE;
+ }
+ type_pf_data_copy(ahash_data(n, n->pos++), value);
+ return 0;
+}
+
+/* Resize a hash: create a new hash table with doubling the hashsize
+ * and inserting the elements to it. Repeat until we succeed or
+ * fail due to memory pressures. */
+static int
+type_pf_resize(struct ip_set *set, bool retried)
+{
+ struct ip_set_hash *h = set->data;
+ struct htable *t, *orig = h->table;
+ u8 htable_bits = orig->htable_bits;
+ const struct type_pf_elem *data;
+ struct hbucket *n, *m;
+ u32 i, j;
+ int ret;
+
+retry:
+ ret = 0;
+ htable_bits++;
+ pr_debug("attempt to resize set %s from %u to %u, t %p\n",
+ set->name, orig->htable_bits, htable_bits, orig);
+ if (!htable_bits)
+ /* In case we have plenty of memory :-) */
+ return -IPSET_ERR_HASH_FULL;
+ t = ip_set_alloc(sizeof(*t)
+ + jhash_size(htable_bits) * sizeof(struct hbucket));
+ if (!t)
+ return -ENOMEM;
+ t->htable_bits = htable_bits;
+
+ read_lock_bh(&set->lock);
+ for (i = 0; i < jhash_size(orig->htable_bits); i++) {
+ n = hbucket(orig, i);
+ for (j = 0; j < n->pos; j++) {
+ data = ahash_data(n, j);
+ m = hbucket(t, HKEY(data, h->initval, htable_bits));
+ ret = type_pf_elem_add(m, data);
+ if (ret < 0) {
+ read_unlock_bh(&set->lock);
+ ahash_destroy(t);
+ if (ret == -EAGAIN)
+ goto retry;
+ return ret;
+ }
+ }
+ }
+
+ rcu_assign_pointer(h->table, t);
+ read_unlock_bh(&set->lock);
+
+ /* Give time to other readers of the set */
+ synchronize_rcu_bh();
+
+ pr_debug("set %s resized from %u (%p) to %u (%p)\n", set->name,
+ orig->htable_bits, orig, t->htable_bits, t);
+ ahash_destroy(orig);
+
+ return 0;
+}
+
+/* Add an element to a hash and update the internal counters when succeeded,
+ * otherwise report the proper error code. */
+static int
+type_pf_add(struct ip_set *set, void *value, u32 timeout)
+{
+ struct ip_set_hash *h = set->data;
+ struct htable *t;
+ const struct type_pf_elem *d = value;
+ struct hbucket *n;
+ int i, ret = 0;
+ u32 key;
+
+ if (h->elements >= h->maxelem)
+ return -IPSET_ERR_HASH_FULL;
+
+ rcu_read_lock_bh();
+ t = rcu_dereference_bh(h->table);
+ key = HKEY(value, h->initval, t->htable_bits);
+ n = hbucket(t, key);
+ for (i = 0; i < n->pos; i++)
+ if (type_pf_data_equal(ahash_data(n, i), d)) {
+ ret = -IPSET_ERR_EXIST;
+ goto out;
+ }
+
+ ret = type_pf_elem_add(n, value);
+ if (ret != 0)
+ goto out;
+
+#ifdef IP_SET_HASH_WITH_NETS
+ add_cidr(h, d->cidr, HOST_MASK);
+#endif
+ h->elements++;
+out:
+ rcu_read_unlock_bh();
+ return ret;
+}
+
+/* Delete an element from the hash: swap it with the last element
+ * and free up space if possible.
+ */
+static int
+type_pf_del(struct ip_set *set, void *value, u32 timeout)
+{
+ struct ip_set_hash *h = set->data;
+ struct htable *t = h->table;
+ const struct type_pf_elem *d = value;
+ struct hbucket *n;
+ int i;
+ struct type_pf_elem *data;
+ u32 key;
+
+ key = HKEY(value, h->initval, t->htable_bits);
+ n = hbucket(t, key);
+ for (i = 0; i < n->pos; i++) {
+ data = ahash_data(n, i);
+ if (!type_pf_data_equal(data, d))
+ continue;
+ if (i != n->pos - 1)
+ /* Not last one */
+ type_pf_data_copy(data, ahash_data(n, n->pos - 1));
+
+ n->pos--;
+ h->elements--;
+#ifdef IP_SET_HASH_WITH_NETS
+ del_cidr(h, d->cidr, HOST_MASK);
+#endif
+ if (n->pos + AHASH_INIT_SIZE < n->size) {
+ void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
+ * sizeof(struct type_pf_elem),
+ GFP_ATOMIC);
+ if (!tmp)
+ return 0;
+ n->size -= AHASH_INIT_SIZE;
+ memcpy(tmp, n->value,
+ n->size * sizeof(struct type_pf_elem));
+ kfree(n->value);
+ n->value = tmp;
+ }
+ return 0;
+ }
+
+ return -IPSET_ERR_EXIST;
+}
+
+#ifdef IP_SET_HASH_WITH_NETS
+
+/* Special test function which takes into account the different network
+ * sizes added to the set */
+static int
+type_pf_test_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout)
+{
+ struct ip_set_hash *h = set->data;
+ struct htable *t = h->table;
+ struct hbucket *n;
+ const struct type_pf_elem *data;
+ int i, j = 0;
+ u32 key;
+ u8 host_mask = SET_HOST_MASK(set->family);
+
+ pr_debug("test by nets\n");
+ for (; j < host_mask && h->nets[j].cidr; j++) {
+ type_pf_data_netmask(d, h->nets[j].cidr);
+ key = HKEY(d, h->initval, t->htable_bits);
+ n = hbucket(t, key);
+ for (i = 0; i < n->pos; i++) {
+ data = ahash_data(n, i);
+ if (type_pf_data_equal(data, d))
+ return 1;
+ }
+ }
+ return 0;
+}
+#endif
+
+/* Test whether the element is added to the set */
+static int
+type_pf_test(struct ip_set *set, void *value, u32 timeout)
+{
+ struct ip_set_hash *h = set->data;
+ struct htable *t = h->table;
+ struct type_pf_elem *d = value;
+ struct hbucket *n;
+ const struct type_pf_elem *data;
+ int i;
+ u32 key;
+
+#ifdef IP_SET_HASH_WITH_NETS
+ /* If we test an IP address and not a network address,
+ * try all possible network sizes */
+ if (d->cidr == SET_HOST_MASK(set->family))
+ return type_pf_test_cidrs(set, d, timeout);
+#endif
+
+ key = HKEY(d, h->initval, t->htable_bits);
+ n = hbucket(t, key);
+ for (i = 0; i < n->pos; i++) {
+ data = ahash_data(n, i);
+ if (type_pf_data_equal(data, d))
+ return 1;
+ }
+ return 0;
+}
+
+/* Reply a HEADER request: fill out the header part of the set */
+static int
+type_pf_head(struct ip_set *set, struct sk_buff *skb)
+{
+ const struct ip_set_hash *h = set->data;
+ struct nlattr *nested;
+ size_t memsize;
+
+ read_lock_bh(&set->lock);
+ memsize = ahash_memsize(h, with_timeout(h->timeout)
+ ? sizeof(struct type_pf_telem)
+ : sizeof(struct type_pf_elem),
+ set->family == AF_INET ? 32 : 128);
+ read_unlock_bh(&set->lock);
+
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested)
+ goto nla_put_failure;
+ NLA_PUT_NET32(skb, IPSET_ATTR_HASHSIZE,
+ htonl(jhash_size(h->table->htable_bits)));
+ NLA_PUT_NET32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem));
+#ifdef IP_SET_HASH_WITH_NETMASK
+ if (h->netmask != HOST_MASK)
+ NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, h->netmask);
+#endif
+ NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
+ htonl(atomic_read(&set->ref) - 1));
+ NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize));
+ if (with_timeout(h->timeout))
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout));
+ ipset_nest_end(skb, nested);
+
+ return 0;
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+/* Reply a LIST/SAVE request: dump the elements of the specified set */
+static int
+type_pf_list(const struct ip_set *set,
+ struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const struct ip_set_hash *h = set->data;
+ const struct htable *t = h->table;
+ struct nlattr *atd, *nested;
+ const struct hbucket *n;
+ const struct type_pf_elem *data;
+ u32 first = cb->args[2];
+ /* We assume that one hash bucket fills into one page */
+ void *incomplete;
+ int i;
+
+ atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+ if (!atd)
+ return -EMSGSIZE;
+ pr_debug("list hash set %s\n", set->name);
+ for (; cb->args[2] < jhash_size(t->htable_bits); cb->args[2]++) {
+ incomplete = skb_tail_pointer(skb);
+ n = hbucket(t, cb->args[2]);
+ pr_debug("cb->args[2]: %lu, t %p n %p\n", cb->args[2], t, n);
+ for (i = 0; i < n->pos; i++) {
+ data = ahash_data(n, i);
+ pr_debug("list hash %lu hbucket %p i %u, data %p\n",
+ cb->args[2], n, i, data);
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested) {
+ if (cb->args[2] == first) {
+ nla_nest_cancel(skb, atd);
+ return -EMSGSIZE;
+ } else
+ goto nla_put_failure;
+ }
+ if (type_pf_data_list(skb, data))
+ goto nla_put_failure;
+ ipset_nest_end(skb, nested);
+ }
+ }
+ ipset_nest_end(skb, atd);
+ /* Set listing finished */
+ cb->args[2] = 0;
+
+ return 0;
+
+nla_put_failure:
+ nlmsg_trim(skb, incomplete);
+ ipset_nest_end(skb, atd);
+ if (unlikely(first == cb->args[2])) {
+ pr_warning("Can't list set %s: one bucket does not fit into "
+ "a message. Please report it!\n", set->name);
+ cb->args[2] = 0;
+ return -EMSGSIZE;
+ }
+ return 0;
+}
+
+static int
+type_pf_kadt(struct ip_set *set, const struct sk_buff * skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags);
+static int
+type_pf_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags);
+
+static const struct ip_set_type_variant type_pf_variant = {
+ .kadt = type_pf_kadt,
+ .uadt = type_pf_uadt,
+ .adt = {
+ [IPSET_ADD] = type_pf_add,
+ [IPSET_DEL] = type_pf_del,
+ [IPSET_TEST] = type_pf_test,
+ },
+ .destroy = type_pf_destroy,
+ .flush = type_pf_flush,
+ .head = type_pf_head,
+ .list = type_pf_list,
+ .resize = type_pf_resize,
+ .same_set = type_pf_same_set,
+};
+
+/* Flavour with timeout support */
+
+#define ahash_tdata(n, i) \
+ (struct type_pf_elem *)((struct type_pf_telem *)((n)->value) + (i))
+
+static inline u32
+type_pf_data_timeout(const struct type_pf_elem *data)
+{
+ const struct type_pf_telem *tdata =
+ (const struct type_pf_telem *) data;
+
+ return tdata->timeout;
+}
+
+static inline bool
+type_pf_data_expired(const struct type_pf_elem *data)
+{
+ const struct type_pf_telem *tdata =
+ (const struct type_pf_telem *) data;
+
+ return ip_set_timeout_expired(tdata->timeout);
+}
+
+static inline void
+type_pf_data_timeout_set(struct type_pf_elem *data, u32 timeout)
+{
+ struct type_pf_telem *tdata = (struct type_pf_telem *) data;
+
+ tdata->timeout = ip_set_timeout_set(timeout);
+}
+
+static int
+type_pf_elem_tadd(struct hbucket *n, const struct type_pf_elem *value,
+ u32 timeout)
+{
+ struct type_pf_elem *data;
+
+ if (n->pos >= n->size) {
+ void *tmp;
+
+ if (n->size >= AHASH_MAX_SIZE)
+ /* Trigger rehashing */
+ return -EAGAIN;
+
+ tmp = kzalloc((n->size + AHASH_INIT_SIZE)
+ * sizeof(struct type_pf_telem),
+ GFP_ATOMIC);
+ if (!tmp)
+ return -ENOMEM;
+ if (n->size) {
+ memcpy(tmp, n->value,
+ sizeof(struct type_pf_telem) * n->size);
+ kfree(n->value);
+ }
+ n->value = tmp;
+ n->size += AHASH_INIT_SIZE;
+ }
+ data = ahash_tdata(n, n->pos++);
+ type_pf_data_copy(data, value);
+ type_pf_data_timeout_set(data, timeout);
+ return 0;
+}
+
+/* Delete expired elements from the hashtable */
+static void
+type_pf_expire(struct ip_set_hash *h)
+{
+ struct htable *t = h->table;
+ struct hbucket *n;
+ struct type_pf_elem *data;
+ u32 i;
+ int j;
+
+ for (i = 0; i < jhash_size(t->htable_bits); i++) {
+ n = hbucket(t, i);
+ for (j = 0; j < n->pos; j++) {
+ data = ahash_tdata(n, j);
+ if (type_pf_data_expired(data)) {
+ pr_debug("expired %u/%u\n", i, j);
+#ifdef IP_SET_HASH_WITH_NETS
+ del_cidr(h, data->cidr, HOST_MASK);
+#endif
+ if (j != n->pos - 1)
+ /* Not last one */
+ type_pf_data_copy(data,
+ ahash_tdata(n, n->pos - 1));
+ n->pos--;
+ h->elements--;
+ }
+ }
+ if (n->pos + AHASH_INIT_SIZE < n->size) {
+ void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
+ * sizeof(struct type_pf_telem),
+ GFP_ATOMIC);
+ if (!tmp)
+ /* Still try to delete expired elements */
+ continue;
+ n->size -= AHASH_INIT_SIZE;
+ memcpy(tmp, n->value,
+ n->size * sizeof(struct type_pf_telem));
+ kfree(n->value);
+ n->value = tmp;
+ }
+ }
+}
+
+static int
+type_pf_tresize(struct ip_set *set, bool retried)
+{
+ struct ip_set_hash *h = set->data;
+ struct htable *t, *orig = h->table;
+ u8 htable_bits = orig->htable_bits;
+ const struct type_pf_elem *data;
+ struct hbucket *n, *m;
+ u32 i, j;
+ int ret;
+
+ /* Try to cleanup once */
+ if (!retried) {
+ i = h->elements;
+ write_lock_bh(&set->lock);
+ type_pf_expire(set->data);
+ write_unlock_bh(&set->lock);
+ if (h->elements < i)
+ return 0;
+ }
+
+retry:
+ ret = 0;
+ htable_bits++;
+ if (!htable_bits)
+ /* In case we have plenty of memory :-) */
+ return -IPSET_ERR_HASH_FULL;
+ t = ip_set_alloc(sizeof(*t)
+ + jhash_size(htable_bits) * sizeof(struct hbucket));
+ if (!t)
+ return -ENOMEM;
+ t->htable_bits = htable_bits;
+
+ read_lock_bh(&set->lock);
+ for (i = 0; i < jhash_size(orig->htable_bits); i++) {
+ n = hbucket(orig, i);
+ for (j = 0; j < n->pos; j++) {
+ data = ahash_tdata(n, j);
+ m = hbucket(t, HKEY(data, h->initval, htable_bits));
+ ret = type_pf_elem_tadd(m, data,
+ type_pf_data_timeout(data));
+ if (ret < 0) {
+ read_unlock_bh(&set->lock);
+ ahash_destroy(t);
+ if (ret == -EAGAIN)
+ goto retry;
+ return ret;
+ }
+ }
+ }
+
+ rcu_assign_pointer(h->table, t);
+ read_unlock_bh(&set->lock);
+
+ /* Give time to other readers of the set */
+ synchronize_rcu_bh();
+
+ ahash_destroy(orig);
+
+ return 0;
+}
+
+static int
+type_pf_tadd(struct ip_set *set, void *value, u32 timeout)
+{
+ struct ip_set_hash *h = set->data;
+ struct htable *t = h->table;
+ const struct type_pf_elem *d = value;
+ struct hbucket *n;
+ struct type_pf_elem *data;
+ int ret = 0, i, j = AHASH_MAX_SIZE + 1;
+ u32 key;
+
+ if (h->elements >= h->maxelem)
+ /* FIXME: when set is full, we slow down here */
+ type_pf_expire(h);
+ if (h->elements >= h->maxelem)
+ return -IPSET_ERR_HASH_FULL;
+
+ rcu_read_lock_bh();
+ t = rcu_dereference_bh(h->table);
+ key = HKEY(d, h->initval, t->htable_bits);
+ n = hbucket(t, key);
+ for (i = 0; i < n->pos; i++) {
+ data = ahash_tdata(n, i);
+ if (type_pf_data_equal(data, d)) {
+ if (type_pf_data_expired(data))
+ j = i;
+ else {
+ ret = -IPSET_ERR_EXIST;
+ goto out;
+ }
+ } else if (j == AHASH_MAX_SIZE + 1 &&
+ type_pf_data_expired(data))
+ j = i;
+ }
+ if (j != AHASH_MAX_SIZE + 1) {
+ data = ahash_tdata(n, j);
+#ifdef IP_SET_HASH_WITH_NETS
+ del_cidr(h, data->cidr, HOST_MASK);
+ add_cidr(h, d->cidr, HOST_MASK);
+#endif
+ type_pf_data_copy(data, d);
+ type_pf_data_timeout_set(data, timeout);
+ goto out;
+ }
+ ret = type_pf_elem_tadd(n, d, timeout);
+ if (ret != 0)
+ goto out;
+
+#ifdef IP_SET_HASH_WITH_NETS
+ add_cidr(h, d->cidr, HOST_MASK);
+#endif
+ h->elements++;
+out:
+ rcu_read_unlock_bh();
+ return ret;
+}
+
+static int
+type_pf_tdel(struct ip_set *set, void *value, u32 timeout)
+{
+ struct ip_set_hash *h = set->data;
+ struct htable *t = h->table;
+ const struct type_pf_elem *d = value;
+ struct hbucket *n;
+ int i, ret = 0;
+ struct type_pf_elem *data;
+ u32 key;
+
+ key = HKEY(value, h->initval, t->htable_bits);
+ n = hbucket(t, key);
+ for (i = 0; i < n->pos; i++) {
+ data = ahash_tdata(n, i);
+ if (!type_pf_data_equal(data, d))
+ continue;
+ if (type_pf_data_expired(data))
+ ret = -IPSET_ERR_EXIST;
+ if (i != n->pos - 1)
+ /* Not last one */
+ type_pf_data_copy(data, ahash_tdata(n, n->pos - 1));
+
+ n->pos--;
+ h->elements--;
+#ifdef IP_SET_HASH_WITH_NETS
+ del_cidr(h, d->cidr, HOST_MASK);
+#endif
+ if (n->pos + AHASH_INIT_SIZE < n->size) {
+ void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
+ * sizeof(struct type_pf_telem),
+ GFP_ATOMIC);
+ if (!tmp)
+ return 0;
+ n->size -= AHASH_INIT_SIZE;
+ memcpy(tmp, n->value,
+ n->size * sizeof(struct type_pf_telem));
+ kfree(n->value);
+ n->value = tmp;
+ }
+ return 0;
+ }
+
+ return -IPSET_ERR_EXIST;
+}
+
+#ifdef IP_SET_HASH_WITH_NETS
+static int
+type_pf_ttest_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout)
+{
+ struct ip_set_hash *h = set->data;
+ struct htable *t = h->table;
+ struct type_pf_elem *data;
+ struct hbucket *n;
+ int i, j = 0;
+ u32 key;
+ u8 host_mask = SET_HOST_MASK(set->family);
+
+ for (; j < host_mask && h->nets[j].cidr; j++) {
+ type_pf_data_netmask(d, h->nets[j].cidr);
+ key = HKEY(d, h->initval, t->htable_bits);
+ n = hbucket(t, key);
+ for (i = 0; i < n->pos; i++) {
+ data = ahash_tdata(n, i);
+ if (type_pf_data_equal(data, d))
+ return !type_pf_data_expired(data);
+ }
+ }
+ return 0;
+}
+#endif
+
+static int
+type_pf_ttest(struct ip_set *set, void *value, u32 timeout)
+{
+ struct ip_set_hash *h = set->data;
+ struct htable *t = h->table;
+ struct type_pf_elem *data, *d = value;
+ struct hbucket *n;
+ int i;
+ u32 key;
+
+#ifdef IP_SET_HASH_WITH_NETS
+ if (d->cidr == SET_HOST_MASK(set->family))
+ return type_pf_ttest_cidrs(set, d, timeout);
+#endif
+ key = HKEY(d, h->initval, t->htable_bits);
+ n = hbucket(t, key);
+ for (i = 0; i < n->pos; i++) {
+ data = ahash_tdata(n, i);
+ if (type_pf_data_equal(data, d))
+ return !type_pf_data_expired(data);
+ }
+ return 0;
+}
+
+static int
+type_pf_tlist(const struct ip_set *set,
+ struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const struct ip_set_hash *h = set->data;
+ const struct htable *t = h->table;
+ struct nlattr *atd, *nested;
+ const struct hbucket *n;
+ const struct type_pf_elem *data;
+ u32 first = cb->args[2];
+ /* We assume that one hash bucket fills into one page */
+ void *incomplete;
+ int i;
+
+ atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+ if (!atd)
+ return -EMSGSIZE;
+ for (; cb->args[2] < jhash_size(t->htable_bits); cb->args[2]++) {
+ incomplete = skb_tail_pointer(skb);
+ n = hbucket(t, cb->args[2]);
+ for (i = 0; i < n->pos; i++) {
+ data = ahash_tdata(n, i);
+ pr_debug("list %p %u\n", n, i);
+ if (type_pf_data_expired(data))
+ continue;
+ pr_debug("do list %p %u\n", n, i);
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested) {
+ if (cb->args[2] == first) {
+ nla_nest_cancel(skb, atd);
+ return -EMSGSIZE;
+ } else
+ goto nla_put_failure;
+ }
+ if (type_pf_data_tlist(skb, data))
+ goto nla_put_failure;
+ ipset_nest_end(skb, nested);
+ }
+ }
+ ipset_nest_end(skb, atd);
+ /* Set listing finished */
+ cb->args[2] = 0;
+
+ return 0;
+
+nla_put_failure:
+ nlmsg_trim(skb, incomplete);
+ ipset_nest_end(skb, atd);
+ if (unlikely(first == cb->args[2])) {
+ pr_warning("Can't list set %s: one bucket does not fit into "
+ "a message. Please report it!\n", set->name);
+ cb->args[2] = 0;
+ return -EMSGSIZE;
+ }
+ return 0;
+}
+
+static const struct ip_set_type_variant type_pf_tvariant = {
+ .kadt = type_pf_kadt,
+ .uadt = type_pf_uadt,
+ .adt = {
+ [IPSET_ADD] = type_pf_tadd,
+ [IPSET_DEL] = type_pf_tdel,
+ [IPSET_TEST] = type_pf_ttest,
+ },
+ .destroy = type_pf_destroy,
+ .flush = type_pf_flush,
+ .head = type_pf_head,
+ .list = type_pf_tlist,
+ .resize = type_pf_tresize,
+ .same_set = type_pf_same_set,
+};
+
+static void
+type_pf_gc(unsigned long ul_set)
+{
+ struct ip_set *set = (struct ip_set *) ul_set;
+ struct ip_set_hash *h = set->data;
+
+ pr_debug("called\n");
+ write_lock_bh(&set->lock);
+ type_pf_expire(h);
+ write_unlock_bh(&set->lock);
+
+ h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ;
+ add_timer(&h->gc);
+}
+
+static void
+type_pf_gc_init(struct ip_set *set)
+{
+ struct ip_set_hash *h = set->data;
+
+ init_timer(&h->gc);
+ h->gc.data = (unsigned long) set;
+ h->gc.function = type_pf_gc;
+ h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ;
+ add_timer(&h->gc);
+ pr_debug("gc initialized, run in every %u\n",
+ IPSET_GC_PERIOD(h->timeout));
+}
+
+#undef type_pf_data_equal
+#undef type_pf_data_isnull
+#undef type_pf_data_copy
+#undef type_pf_data_zero_out
+#undef type_pf_data_list
+#undef type_pf_data_tlist
+
+#undef type_pf_elem
+#undef type_pf_telem
+#undef type_pf_data_timeout
+#undef type_pf_data_expired
+#undef type_pf_data_netmask
+#undef type_pf_data_timeout_set
+
+#undef type_pf_elem_add
+#undef type_pf_add
+#undef type_pf_del
+#undef type_pf_test_cidrs
+#undef type_pf_test
+
+#undef type_pf_elem_tadd
+#undef type_pf_expire
+#undef type_pf_tadd
+#undef type_pf_tdel
+#undef type_pf_ttest_cidrs
+#undef type_pf_ttest
+
+#undef type_pf_resize
+#undef type_pf_tresize
+#undef type_pf_flush
+#undef type_pf_destroy
+#undef type_pf_head
+#undef type_pf_list
+#undef type_pf_tlist
+#undef type_pf_same_set
+#undef type_pf_kadt
+#undef type_pf_uadt
+#undef type_pf_gc
+#undef type_pf_gc_init
+#undef type_pf_variant
+#undef type_pf_tvariant
diff --git a/include/linux/netfilter/ipset/ip_set_bitmap.h b/include/linux/netfilter/ipset/ip_set_bitmap.h
new file mode 100644
index 000000000000..61a9e8746c83
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_bitmap.h
@@ -0,0 +1,31 @@
+#ifndef __IP_SET_BITMAP_H
+#define __IP_SET_BITMAP_H
+
+/* Bitmap type specific error codes */
+enum {
+ /* The element is out of the range of the set */
+ IPSET_ERR_BITMAP_RANGE = IPSET_ERR_TYPE_SPECIFIC,
+ /* The range exceeds the size limit of the set type */
+ IPSET_ERR_BITMAP_RANGE_SIZE,
+};
+
+#ifdef __KERNEL__
+#define IPSET_BITMAP_MAX_RANGE 0x0000FFFF
+
+/* Common functions */
+
+static inline u32
+range_to_mask(u32 from, u32 to, u8 *bits)
+{
+ u32 mask = 0xFFFFFFFE;
+
+ *bits = 32;
+ while (--(*bits) > 0 && mask && (to & mask) != from)
+ mask <<= 1;
+
+ return mask;
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* __IP_SET_BITMAP_H */
diff --git a/include/linux/netfilter/ipset/ip_set_getport.h b/include/linux/netfilter/ipset/ip_set_getport.h
new file mode 100644
index 000000000000..3882a81a3b3c
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_getport.h
@@ -0,0 +1,21 @@
+#ifndef _IP_SET_GETPORT_H
+#define _IP_SET_GETPORT_H
+
+extern bool ip_set_get_ip4_port(const struct sk_buff *skb, bool src,
+ __be16 *port, u8 *proto);
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+extern bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
+ __be16 *port, u8 *proto);
+#else
+static inline bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
+ __be16 *port, u8 *proto)
+{
+ return false;
+}
+#endif
+
+extern bool ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src,
+ __be16 *port);
+
+#endif /*_IP_SET_GETPORT_H*/
diff --git a/include/linux/netfilter/ipset/ip_set_hash.h b/include/linux/netfilter/ipset/ip_set_hash.h
new file mode 100644
index 000000000000..b86f15c04524
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_hash.h
@@ -0,0 +1,26 @@
+#ifndef __IP_SET_HASH_H
+#define __IP_SET_HASH_H
+
+/* Hash type specific error codes */
+enum {
+ /* Hash is full */
+ IPSET_ERR_HASH_FULL = IPSET_ERR_TYPE_SPECIFIC,
+ /* Null-valued element */
+ IPSET_ERR_HASH_ELEM,
+ /* Invalid protocol */
+ IPSET_ERR_INVALID_PROTO,
+ /* Protocol missing but must be specified */
+ IPSET_ERR_MISSING_PROTO,
+};
+
+#ifdef __KERNEL__
+
+#define IPSET_DEFAULT_HASHSIZE 1024
+#define IPSET_MIMINAL_HASHSIZE 64
+#define IPSET_DEFAULT_MAXELEM 65536
+#define IPSET_DEFAULT_PROBES 4
+#define IPSET_DEFAULT_RESIZE 100
+
+#endif /* __KERNEL__ */
+
+#endif /* __IP_SET_HASH_H */
diff --git a/include/linux/netfilter/ipset/ip_set_list.h b/include/linux/netfilter/ipset/ip_set_list.h
new file mode 100644
index 000000000000..40a63f302613
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_list.h
@@ -0,0 +1,27 @@
+#ifndef __IP_SET_LIST_H
+#define __IP_SET_LIST_H
+
+/* List type specific error codes */
+enum {
+ /* Set name to be added/deleted/tested does not exist. */
+ IPSET_ERR_NAME = IPSET_ERR_TYPE_SPECIFIC,
+ /* list:set type is not permitted to add */
+ IPSET_ERR_LOOP,
+ /* Missing reference set */
+ IPSET_ERR_BEFORE,
+ /* Reference set does not exist */
+ IPSET_ERR_NAMEREF,
+ /* Set is full */
+ IPSET_ERR_LIST_FULL,
+ /* Reference set is not added to the set */
+ IPSET_ERR_REF_EXIST,
+};
+
+#ifdef __KERNEL__
+
+#define IP_SET_LIST_DEFAULT_SIZE 8
+#define IP_SET_LIST_MIN_SIZE 4
+
+#endif /* __KERNEL__ */
+
+#endif /* __IP_SET_LIST_H */
diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h
new file mode 100644
index 000000000000..9f30c5f2ec1c
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_timeout.h
@@ -0,0 +1,127 @@
+#ifndef _IP_SET_TIMEOUT_H
+#define _IP_SET_TIMEOUT_H
+
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifdef __KERNEL__
+
+/* How often should the gc be run by default */
+#define IPSET_GC_TIME (3 * 60)
+
+/* Timeout period depending on the timeout value of the given set */
+#define IPSET_GC_PERIOD(timeout) \
+ ((timeout/3) ? min_t(u32, (timeout)/3, IPSET_GC_TIME) : 1)
+
+/* Set is defined without timeout support: timeout value may be 0 */
+#define IPSET_NO_TIMEOUT UINT_MAX
+
+#define with_timeout(timeout) ((timeout) != IPSET_NO_TIMEOUT)
+
+static inline unsigned int
+ip_set_timeout_uget(struct nlattr *tb)
+{
+ unsigned int timeout = ip_set_get_h32(tb);
+
+ /* Userspace supplied TIMEOUT parameter: adjust crazy size */
+ return timeout == IPSET_NO_TIMEOUT ? IPSET_NO_TIMEOUT - 1 : timeout;
+}
+
+#ifdef IP_SET_BITMAP_TIMEOUT
+
+/* Bitmap specific timeout constants and macros for the entries */
+
+/* Bitmap entry is unset */
+#define IPSET_ELEM_UNSET 0
+/* Bitmap entry is set with no timeout value */
+#define IPSET_ELEM_PERMANENT (UINT_MAX/2)
+
+static inline bool
+ip_set_timeout_test(unsigned long timeout)
+{
+ return timeout != IPSET_ELEM_UNSET &&
+ (timeout == IPSET_ELEM_PERMANENT ||
+ time_after(timeout, jiffies));
+}
+
+static inline bool
+ip_set_timeout_expired(unsigned long timeout)
+{
+ return timeout != IPSET_ELEM_UNSET &&
+ timeout != IPSET_ELEM_PERMANENT &&
+ time_before(timeout, jiffies);
+}
+
+static inline unsigned long
+ip_set_timeout_set(u32 timeout)
+{
+ unsigned long t;
+
+ if (!timeout)
+ return IPSET_ELEM_PERMANENT;
+
+ t = timeout * HZ + jiffies;
+ if (t == IPSET_ELEM_UNSET || t == IPSET_ELEM_PERMANENT)
+ /* Bingo! */
+ t++;
+
+ return t;
+}
+
+static inline u32
+ip_set_timeout_get(unsigned long timeout)
+{
+ return timeout == IPSET_ELEM_PERMANENT ? 0 : (timeout - jiffies)/HZ;
+}
+
+#else
+
+/* Hash specific timeout constants and macros for the entries */
+
+/* Hash entry is set with no timeout value */
+#define IPSET_ELEM_PERMANENT 0
+
+static inline bool
+ip_set_timeout_test(unsigned long timeout)
+{
+ return timeout == IPSET_ELEM_PERMANENT ||
+ time_after(timeout, jiffies);
+}
+
+static inline bool
+ip_set_timeout_expired(unsigned long timeout)
+{
+ return timeout != IPSET_ELEM_PERMANENT &&
+ time_before(timeout, jiffies);
+}
+
+static inline unsigned long
+ip_set_timeout_set(u32 timeout)
+{
+ unsigned long t;
+
+ if (!timeout)
+ return IPSET_ELEM_PERMANENT;
+
+ t = timeout * HZ + jiffies;
+ if (t == IPSET_ELEM_PERMANENT)
+ /* Bingo! :-) */
+ t++;
+
+ return t;
+}
+
+static inline u32
+ip_set_timeout_get(unsigned long timeout)
+{
+ return timeout == IPSET_ELEM_PERMANENT ? 0 : (timeout - jiffies)/HZ;
+}
+#endif /* ! IP_SET_BITMAP_TIMEOUT */
+
+#endif /* __KERNEL__ */
+
+#endif /* _IP_SET_TIMEOUT_H */
diff --git a/include/linux/netfilter/ipset/pfxlen.h b/include/linux/netfilter/ipset/pfxlen.h
new file mode 100644
index 000000000000..0e1fb50da562
--- /dev/null
+++ b/include/linux/netfilter/ipset/pfxlen.h
@@ -0,0 +1,35 @@
+#ifndef _PFXLEN_H
+#define _PFXLEN_H
+
+#include <asm/byteorder.h>
+#include <linux/netfilter.h>
+
+/* Prefixlen maps, by Jan Engelhardt */
+extern const union nf_inet_addr ip_set_netmask_map[];
+extern const union nf_inet_addr ip_set_hostmask_map[];
+
+static inline __be32
+ip_set_netmask(u8 pfxlen)
+{
+ return ip_set_netmask_map[pfxlen].ip;
+}
+
+static inline const __be32 *
+ip_set_netmask6(u8 pfxlen)
+{
+ return &ip_set_netmask_map[pfxlen].ip6[0];
+}
+
+static inline u32
+ip_set_hostmask(u8 pfxlen)
+{
+ return (__force u32) ip_set_hostmask_map[pfxlen].ip;
+}
+
+static inline const __be32 *
+ip_set_hostmask6(u8 pfxlen)
+{
+ return &ip_set_hostmask_map[pfxlen].ip6[0];
+}
+
+#endif /*_PFXLEN_H */
diff --git a/include/linux/netfilter/nf_conntrack_snmp.h b/include/linux/netfilter/nf_conntrack_snmp.h
new file mode 100644
index 000000000000..064bc63a5346
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_snmp.h
@@ -0,0 +1,9 @@
+#ifndef _NF_CONNTRACK_SNMP_H
+#define _NF_CONNTRACK_SNMP_H
+
+extern int (*nf_nat_snmp_hook)(struct sk_buff *skb,
+ unsigned int protoff,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo);
+
+#endif /* _NF_CONNTRACK_SNMP_H */
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 361d6b5630ee..2b11fc1a86be 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -47,7 +47,8 @@ struct nfgenmsg {
#define NFNL_SUBSYS_QUEUE 3
#define NFNL_SUBSYS_ULOG 4
#define NFNL_SUBSYS_OSF 5
-#define NFNL_SUBSYS_COUNT 6
+#define NFNL_SUBSYS_IPSET 6
+#define NFNL_SUBSYS_COUNT 7
#ifdef __KERNEL__
diff --git a/include/linux/netfilter/nfnetlink_conntrack.h b/include/linux/netfilter/nfnetlink_conntrack.h
index 19711e3ffd42..debf1aefd753 100644
--- a/include/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/linux/netfilter/nfnetlink_conntrack.h
@@ -42,6 +42,7 @@ enum ctattr_type {
CTA_SECMARK, /* obsolete */
CTA_ZONE,
CTA_SECCTX,
+ CTA_TIMESTAMP,
__CTA_MAX
};
#define CTA_MAX (__CTA_MAX - 1)
@@ -127,6 +128,14 @@ enum ctattr_counters {
};
#define CTA_COUNTERS_MAX (__CTA_COUNTERS_MAX - 1)
+enum ctattr_tstamp {
+ CTA_TIMESTAMP_UNSPEC,
+ CTA_TIMESTAMP_START,
+ CTA_TIMESTAMP_STOP,
+ __CTA_TIMESTAMP_MAX
+};
+#define CTA_TIMESTAMP_MAX (__CTA_TIMESTAMP_MAX - 1)
+
enum ctattr_nat {
CTA_NAT_UNSPEC,
CTA_NAT_MINIP,
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 6712e713b299..37219525ff6f 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -611,8 +611,9 @@ struct _compat_xt_align {
extern void xt_compat_lock(u_int8_t af);
extern void xt_compat_unlock(u_int8_t af);
-extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta);
+extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
extern void xt_compat_flush_offsets(u_int8_t af);
+extern void xt_compat_init_offsets(u_int8_t af, unsigned int number);
extern int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
extern int xt_compat_match_offset(const struct xt_match *match);
diff --git a/include/linux/netfilter/xt_AUDIT.h b/include/linux/netfilter/xt_AUDIT.h
new file mode 100644
index 000000000000..38751d2ea52b
--- /dev/null
+++ b/include/linux/netfilter/xt_AUDIT.h
@@ -0,0 +1,30 @@
+/*
+ * Header file for iptables xt_AUDIT target
+ *
+ * (C) 2010-2011 Thomas Graf <tgraf@redhat.com>
+ * (C) 2010-2011 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _XT_AUDIT_TARGET_H
+#define _XT_AUDIT_TARGET_H
+
+#include <linux/types.h>
+
+enum {
+ XT_AUDIT_TYPE_ACCEPT = 0,
+ XT_AUDIT_TYPE_DROP,
+ XT_AUDIT_TYPE_REJECT,
+ __XT_AUDIT_TYPE_MAX,
+};
+
+#define XT_AUDIT_TYPE_MAX (__XT_AUDIT_TYPE_MAX - 1)
+
+struct xt_audit_info {
+ __u8 type; /* XT_AUDIT_TYPE_* */
+};
+
+#endif /* _XT_AUDIT_TARGET_H */
diff --git a/include/linux/netfilter/xt_CT.h b/include/linux/netfilter/xt_CT.h
index 1b564106891d..b56e76811c04 100644
--- a/include/linux/netfilter/xt_CT.h
+++ b/include/linux/netfilter/xt_CT.h
@@ -1,14 +1,16 @@
#ifndef _XT_CT_H
#define _XT_CT_H
+#include <linux/types.h>
+
#define XT_CT_NOTRACK 0x1
struct xt_ct_target_info {
- u_int16_t flags;
- u_int16_t zone;
- u_int32_t ct_events;
- u_int32_t exp_events;
- char helper[16];
+ __u16 flags;
+ __u16 zone;
+ __u32 ct_events;
+ __u32 exp_events;
+ char helper[16];
/* Used internally by the kernel */
struct nf_conn *ct __attribute__((aligned(8)));
diff --git a/include/linux/netfilter/xt_NFQUEUE.h b/include/linux/netfilter/xt_NFQUEUE.h
index 2584f4a777de..9eafdbbb401c 100644
--- a/include/linux/netfilter/xt_NFQUEUE.h
+++ b/include/linux/netfilter/xt_NFQUEUE.h
@@ -20,4 +20,10 @@ struct xt_NFQ_info_v1 {
__u16 queues_total;
};
+struct xt_NFQ_info_v2 {
+ __u16 queuenum;
+ __u16 queues_total;
+ __u16 bypass;
+};
+
#endif /* _XT_NFQ_TARGET_H */
diff --git a/include/linux/netfilter/xt_TCPOPTSTRIP.h b/include/linux/netfilter/xt_TCPOPTSTRIP.h
index 2db543214ff5..7157318499c2 100644
--- a/include/linux/netfilter/xt_TCPOPTSTRIP.h
+++ b/include/linux/netfilter/xt_TCPOPTSTRIP.h
@@ -1,13 +1,15 @@
#ifndef _XT_TCPOPTSTRIP_H
#define _XT_TCPOPTSTRIP_H
+#include <linux/types.h>
+
#define tcpoptstrip_set_bit(bmap, idx) \
(bmap[(idx) >> 5] |= 1U << (idx & 31))
#define tcpoptstrip_test_bit(bmap, idx) \
(((1U << (idx & 31)) & bmap[(idx) >> 5]) != 0)
struct xt_tcpoptstrip_target_info {
- u_int32_t strip_bmap[8];
+ __u32 strip_bmap[8];
};
#endif /* _XT_TCPOPTSTRIP_H */
diff --git a/include/linux/netfilter/xt_TPROXY.h b/include/linux/netfilter/xt_TPROXY.h
index 3f3d69361289..902043c2073f 100644
--- a/include/linux/netfilter/xt_TPROXY.h
+++ b/include/linux/netfilter/xt_TPROXY.h
@@ -1,19 +1,21 @@
#ifndef _XT_TPROXY_H
#define _XT_TPROXY_H
+#include <linux/types.h>
+
/* TPROXY target is capable of marking the packet to perform
* redirection. We can get rid of that whenever we get support for
* mutliple targets in the same rule. */
struct xt_tproxy_target_info {
- u_int32_t mark_mask;
- u_int32_t mark_value;
+ __u32 mark_mask;
+ __u32 mark_value;
__be32 laddr;
__be16 lport;
};
struct xt_tproxy_target_info_v1 {
- u_int32_t mark_mask;
- u_int32_t mark_value;
+ __u32 mark_mask;
+ __u32 mark_value;
union nf_inet_addr laddr;
__be16 lport;
};
diff --git a/include/linux/netfilter/xt_cluster.h b/include/linux/netfilter/xt_cluster.h
index 886682656f09..9b883c8fbf54 100644
--- a/include/linux/netfilter/xt_cluster.h
+++ b/include/linux/netfilter/xt_cluster.h
@@ -1,15 +1,17 @@
#ifndef _XT_CLUSTER_MATCH_H
#define _XT_CLUSTER_MATCH_H
+#include <linux/types.h>
+
enum xt_cluster_flags {
XT_CLUSTER_F_INV = (1 << 0)
};
struct xt_cluster_match_info {
- u_int32_t total_nodes;
- u_int32_t node_mask;
- u_int32_t hash_seed;
- u_int32_t flags;
+ __u32 total_nodes;
+ __u32 node_mask;
+ __u32 hash_seed;
+ __u32 flags;
};
#define XT_CLUSTER_NODES_MAX 32
diff --git a/include/linux/netfilter/xt_comment.h b/include/linux/netfilter/xt_comment.h
index eacfedc6b5d0..0ea5e79f5bd7 100644
--- a/include/linux/netfilter/xt_comment.h
+++ b/include/linux/netfilter/xt_comment.h
@@ -4,7 +4,7 @@
#define XT_MAX_COMMENT_LEN 256
struct xt_comment_info {
- unsigned char comment[XT_MAX_COMMENT_LEN];
+ char comment[XT_MAX_COMMENT_LEN];
};
#endif /* XT_COMMENT_H */
diff --git a/include/linux/netfilter/xt_connlimit.h b/include/linux/netfilter/xt_connlimit.h
index 7e3284bcbd2b..0ca66e97acbc 100644
--- a/include/linux/netfilter/xt_connlimit.h
+++ b/include/linux/netfilter/xt_connlimit.h
@@ -1,8 +1,15 @@
#ifndef _XT_CONNLIMIT_H
#define _XT_CONNLIMIT_H
+#include <linux/types.h>
+
struct xt_connlimit_data;
+enum {
+ XT_CONNLIMIT_INVERT = 1 << 0,
+ XT_CONNLIMIT_DADDR = 1 << 1,
+};
+
struct xt_connlimit_info {
union {
union nf_inet_addr mask;
@@ -13,7 +20,14 @@ struct xt_connlimit_info {
};
#endif
};
- unsigned int limit, inverse;
+ unsigned int limit;
+ union {
+ /* revision 0 */
+ unsigned int inverse;
+
+ /* revision 1 */
+ __u32 flags;
+ };
/* Used internally by the kernel */
struct xt_connlimit_data *data __attribute__((aligned(8)));
diff --git a/include/linux/netfilter/xt_conntrack.h b/include/linux/netfilter/xt_conntrack.h
index 54f47a2f6152..74b904d8f99c 100644
--- a/include/linux/netfilter/xt_conntrack.h
+++ b/include/linux/netfilter/xt_conntrack.h
@@ -58,4 +58,19 @@ struct xt_conntrack_mtinfo2 {
__u16 state_mask, status_mask;
};
+struct xt_conntrack_mtinfo3 {
+ union nf_inet_addr origsrc_addr, origsrc_mask;
+ union nf_inet_addr origdst_addr, origdst_mask;
+ union nf_inet_addr replsrc_addr, replsrc_mask;
+ union nf_inet_addr repldst_addr, repldst_mask;
+ __u32 expires_min, expires_max;
+ __u16 l4proto;
+ __u16 origsrc_port, origdst_port;
+ __u16 replsrc_port, repldst_port;
+ __u16 match_flags, invert_flags;
+ __u16 state_mask, status_mask;
+ __u16 origsrc_port_high, origdst_port_high;
+ __u16 replsrc_port_high, repldst_port_high;
+};
+
#endif /*_XT_CONNTRACK_H*/
diff --git a/include/linux/netfilter/xt_devgroup.h b/include/linux/netfilter/xt_devgroup.h
new file mode 100644
index 000000000000..1babde0ec900
--- /dev/null
+++ b/include/linux/netfilter/xt_devgroup.h
@@ -0,0 +1,21 @@
+#ifndef _XT_DEVGROUP_H
+#define _XT_DEVGROUP_H
+
+#include <linux/types.h>
+
+enum xt_devgroup_flags {
+ XT_DEVGROUP_MATCH_SRC = 0x1,
+ XT_DEVGROUP_INVERT_SRC = 0x2,
+ XT_DEVGROUP_MATCH_DST = 0x4,
+ XT_DEVGROUP_INVERT_DST = 0x8,
+};
+
+struct xt_devgroup_info {
+ __u32 flags;
+ __u32 src_group;
+ __u32 src_mask;
+ __u32 dst_group;
+ __u32 dst_mask;
+};
+
+#endif /* _XT_DEVGROUP_H */
diff --git a/include/linux/netfilter/xt_quota.h b/include/linux/netfilter/xt_quota.h
index b0d28c659ab7..ca6e03e47a17 100644
--- a/include/linux/netfilter/xt_quota.h
+++ b/include/linux/netfilter/xt_quota.h
@@ -1,6 +1,8 @@
#ifndef _XT_QUOTA_H
#define _XT_QUOTA_H
+#include <linux/types.h>
+
enum xt_quota_flags {
XT_QUOTA_INVERT = 0x1,
};
@@ -9,9 +11,9 @@ enum xt_quota_flags {
struct xt_quota_priv;
struct xt_quota_info {
- u_int32_t flags;
- u_int32_t pad;
- aligned_u64 quota;
+ __u32 flags;
+ __u32 pad;
+ aligned_u64 quota;
/* Used internally by the kernel */
struct xt_quota_priv *master;
diff --git a/include/linux/netfilter/xt_set.h b/include/linux/netfilter/xt_set.h
new file mode 100644
index 000000000000..081f1ded2842
--- /dev/null
+++ b/include/linux/netfilter/xt_set.h
@@ -0,0 +1,56 @@
+#ifndef _XT_SET_H
+#define _XT_SET_H
+
+#include <linux/types.h>
+#include <linux/netfilter/ipset/ip_set.h>
+
+/* Revision 0 interface: backward compatible with netfilter/iptables */
+
+/*
+ * Option flags for kernel operations (xt_set_info_v0)
+ */
+#define IPSET_SRC 0x01 /* Source match/add */
+#define IPSET_DST 0x02 /* Destination match/add */
+#define IPSET_MATCH_INV 0x04 /* Inverse matching */
+
+struct xt_set_info_v0 {
+ ip_set_id_t index;
+ union {
+ __u32 flags[IPSET_DIM_MAX + 1];
+ struct {
+ __u32 __flags[IPSET_DIM_MAX];
+ __u8 dim;
+ __u8 flags;
+ } compat;
+ } u;
+};
+
+/* match and target infos */
+struct xt_set_info_match_v0 {
+ struct xt_set_info_v0 match_set;
+};
+
+struct xt_set_info_target_v0 {
+ struct xt_set_info_v0 add_set;
+ struct xt_set_info_v0 del_set;
+};
+
+/* Revision 1: current interface to netfilter/iptables */
+
+struct xt_set_info {
+ ip_set_id_t index;
+ __u8 dim;
+ __u8 flags;
+};
+
+/* match and target infos */
+struct xt_set_info_match {
+ struct xt_set_info match_set;
+};
+
+struct xt_set_info_target {
+ struct xt_set_info add_set;
+ struct xt_set_info del_set;
+};
+
+#endif /*_XT_SET_H*/
diff --git a/include/linux/netfilter/xt_socket.h b/include/linux/netfilter/xt_socket.h
index 6f475b8ff34b..26d7217bd4f1 100644
--- a/include/linux/netfilter/xt_socket.h
+++ b/include/linux/netfilter/xt_socket.h
@@ -1,6 +1,8 @@
#ifndef _XT_SOCKET_H
#define _XT_SOCKET_H
+#include <linux/types.h>
+
enum {
XT_SOCKET_TRANSPARENT = 1 << 0,
};
diff --git a/include/linux/netfilter/xt_time.h b/include/linux/netfilter/xt_time.h
index 14b6df412c9f..7c37fac576c4 100644
--- a/include/linux/netfilter/xt_time.h
+++ b/include/linux/netfilter/xt_time.h
@@ -1,14 +1,16 @@
#ifndef _XT_TIME_H
#define _XT_TIME_H 1
+#include <linux/types.h>
+
struct xt_time_info {
- u_int32_t date_start;
- u_int32_t date_stop;
- u_int32_t daytime_start;
- u_int32_t daytime_stop;
- u_int32_t monthdays_match;
- u_int8_t weekdays_match;
- u_int8_t flags;
+ __u32 date_start;
+ __u32 date_stop;
+ __u32 daytime_start;
+ __u32 daytime_stop;
+ __u32 monthdays_match;
+ __u8 weekdays_match;
+ __u8 flags;
};
enum {
diff --git a/include/linux/netfilter/xt_u32.h b/include/linux/netfilter/xt_u32.h
index 9947f56cdbdd..04d1bfea03c2 100644
--- a/include/linux/netfilter/xt_u32.h
+++ b/include/linux/netfilter/xt_u32.h
@@ -1,6 +1,8 @@
#ifndef _XT_U32_H
#define _XT_U32_H 1
+#include <linux/types.h>
+
enum xt_u32_ops {
XT_U32_AND,
XT_U32_LEFTSH,
@@ -9,13 +11,13 @@ enum xt_u32_ops {
};
struct xt_u32_location_element {
- u_int32_t number;
- u_int8_t nextop;
+ __u32 number;
+ __u8 nextop;
};
struct xt_u32_value_element {
- u_int32_t min;
- u_int32_t max;
+ __u32 min;
+ __u32 max;
};
/*
@@ -27,14 +29,14 @@ struct xt_u32_value_element {
struct xt_u32_test {
struct xt_u32_location_element location[XT_U32_MAXSIZE+1];
struct xt_u32_value_element value[XT_U32_MAXSIZE+1];
- u_int8_t nnums;
- u_int8_t nvalues;
+ __u8 nnums;
+ __u8 nvalues;
};
struct xt_u32 {
struct xt_u32_test tests[XT_U32_MAXSIZE+1];
- u_int8_t ntests;
- u_int8_t invert;
+ __u8 ntests;
+ __u8 invert;
};
#endif /* _XT_U32_H */
diff --git a/include/linux/netfilter_bridge/ebt_802_3.h b/include/linux/netfilter_bridge/ebt_802_3.h
index c73ef0b18bdc..be5be1577a56 100644
--- a/include/linux/netfilter_bridge/ebt_802_3.h
+++ b/include/linux/netfilter_bridge/ebt_802_3.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_BRIDGE_EBT_802_3_H
#define __LINUX_BRIDGE_EBT_802_3_H
+#include <linux/types.h>
+
#define EBT_802_3_SAP 0x01
#define EBT_802_3_TYPE 0x02
@@ -24,24 +26,24 @@
/* ui has one byte ctrl, ni has two */
struct hdr_ui {
- uint8_t dsap;
- uint8_t ssap;
- uint8_t ctrl;
- uint8_t orig[3];
+ __u8 dsap;
+ __u8 ssap;
+ __u8 ctrl;
+ __u8 orig[3];
__be16 type;
};
struct hdr_ni {
- uint8_t dsap;
- uint8_t ssap;
+ __u8 dsap;
+ __u8 ssap;
__be16 ctrl;
- uint8_t orig[3];
+ __u8 orig[3];
__be16 type;
};
struct ebt_802_3_hdr {
- uint8_t daddr[6];
- uint8_t saddr[6];
+ __u8 daddr[6];
+ __u8 saddr[6];
__be16 len;
union {
struct hdr_ui ui;
@@ -59,10 +61,10 @@ static inline struct ebt_802_3_hdr *ebt_802_3_hdr(const struct sk_buff *skb)
#endif
struct ebt_802_3_info {
- uint8_t sap;
+ __u8 sap;
__be16 type;
- uint8_t bitmask;
- uint8_t invflags;
+ __u8 bitmask;
+ __u8 invflags;
};
#endif
diff --git a/include/linux/netfilter_bridge/ebt_among.h b/include/linux/netfilter_bridge/ebt_among.h
index 0009558609a7..bd4e3ad0b706 100644
--- a/include/linux/netfilter_bridge/ebt_among.h
+++ b/include/linux/netfilter_bridge/ebt_among.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_BRIDGE_EBT_AMONG_H
#define __LINUX_BRIDGE_EBT_AMONG_H
+#include <linux/types.h>
+
#define EBT_AMONG_DST 0x01
#define EBT_AMONG_SRC 0x02
@@ -30,7 +32,7 @@
*/
struct ebt_mac_wormhash_tuple {
- uint32_t cmp[2];
+ __u32 cmp[2];
__be32 ip;
};
diff --git a/include/linux/netfilter_bridge/ebt_arp.h b/include/linux/netfilter_bridge/ebt_arp.h
index cbf4843b6b0f..522f3e427f49 100644
--- a/include/linux/netfilter_bridge/ebt_arp.h
+++ b/include/linux/netfilter_bridge/ebt_arp.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_BRIDGE_EBT_ARP_H
#define __LINUX_BRIDGE_EBT_ARP_H
+#include <linux/types.h>
+
#define EBT_ARP_OPCODE 0x01
#define EBT_ARP_HTYPE 0x02
#define EBT_ARP_PTYPE 0x04
@@ -27,8 +29,8 @@ struct ebt_arp_info
unsigned char smmsk[ETH_ALEN];
unsigned char dmaddr[ETH_ALEN];
unsigned char dmmsk[ETH_ALEN];
- uint8_t bitmask;
- uint8_t invflags;
+ __u8 bitmask;
+ __u8 invflags;
};
#endif
diff --git a/include/linux/netfilter_bridge/ebt_ip.h b/include/linux/netfilter_bridge/ebt_ip.h
index 6a708fb92241..c4bbc41b0ea4 100644
--- a/include/linux/netfilter_bridge/ebt_ip.h
+++ b/include/linux/netfilter_bridge/ebt_ip.h
@@ -15,6 +15,8 @@
#ifndef __LINUX_BRIDGE_EBT_IP_H
#define __LINUX_BRIDGE_EBT_IP_H
+#include <linux/types.h>
+
#define EBT_IP_SOURCE 0x01
#define EBT_IP_DEST 0x02
#define EBT_IP_TOS 0x04
@@ -31,12 +33,12 @@ struct ebt_ip_info {
__be32 daddr;
__be32 smsk;
__be32 dmsk;
- uint8_t tos;
- uint8_t protocol;
- uint8_t bitmask;
- uint8_t invflags;
- uint16_t sport[2];
- uint16_t dport[2];
+ __u8 tos;
+ __u8 protocol;
+ __u8 bitmask;
+ __u8 invflags;
+ __u16 sport[2];
+ __u16 dport[2];
};
#endif
diff --git a/include/linux/netfilter_bridge/ebt_ip6.h b/include/linux/netfilter_bridge/ebt_ip6.h
index e5de98701519..42b889682721 100644
--- a/include/linux/netfilter_bridge/ebt_ip6.h
+++ b/include/linux/netfilter_bridge/ebt_ip6.h
@@ -12,14 +12,19 @@
#ifndef __LINUX_BRIDGE_EBT_IP6_H
#define __LINUX_BRIDGE_EBT_IP6_H
+#include <linux/types.h>
+
#define EBT_IP6_SOURCE 0x01
#define EBT_IP6_DEST 0x02
#define EBT_IP6_TCLASS 0x04
#define EBT_IP6_PROTO 0x08
#define EBT_IP6_SPORT 0x10
#define EBT_IP6_DPORT 0x20
+#define EBT_IP6_ICMP6 0x40
+
#define EBT_IP6_MASK (EBT_IP6_SOURCE | EBT_IP6_DEST | EBT_IP6_TCLASS |\
- EBT_IP6_PROTO | EBT_IP6_SPORT | EBT_IP6_DPORT)
+ EBT_IP6_PROTO | EBT_IP6_SPORT | EBT_IP6_DPORT | \
+ EBT_IP6_ICMP6)
#define EBT_IP6_MATCH "ip6"
/* the same values are used for the invflags */
@@ -28,12 +33,18 @@ struct ebt_ip6_info {
struct in6_addr daddr;
struct in6_addr smsk;
struct in6_addr dmsk;
- uint8_t tclass;
- uint8_t protocol;
- uint8_t bitmask;
- uint8_t invflags;
- uint16_t sport[2];
- uint16_t dport[2];
+ __u8 tclass;
+ __u8 protocol;
+ __u8 bitmask;
+ __u8 invflags;
+ union {
+ __u16 sport[2];
+ __u8 icmpv6_type[2];
+ };
+ union {
+ __u16 dport[2];
+ __u8 icmpv6_code[2];
+ };
};
#endif
diff --git a/include/linux/netfilter_bridge/ebt_limit.h b/include/linux/netfilter_bridge/ebt_limit.h
index 4bf76b751676..66d80b30ba0e 100644
--- a/include/linux/netfilter_bridge/ebt_limit.h
+++ b/include/linux/netfilter_bridge/ebt_limit.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_BRIDGE_EBT_LIMIT_H
#define __LINUX_BRIDGE_EBT_LIMIT_H
+#include <linux/types.h>
+
#define EBT_LIMIT_MATCH "limit"
/* timings are in milliseconds. */
@@ -10,13 +12,13 @@
seconds, or one every 59 hours. */
struct ebt_limit_info {
- u_int32_t avg; /* Average secs between packets * scale */
- u_int32_t burst; /* Period multiplier for upper limit. */
+ __u32 avg; /* Average secs between packets * scale */
+ __u32 burst; /* Period multiplier for upper limit. */
/* Used internally by the kernel */
unsigned long prev;
- u_int32_t credit;
- u_int32_t credit_cap, cost;
+ __u32 credit;
+ __u32 credit_cap, cost;
};
#endif
diff --git a/include/linux/netfilter_bridge/ebt_log.h b/include/linux/netfilter_bridge/ebt_log.h
index cc2cdfb764bc..7e7f1d1fe494 100644
--- a/include/linux/netfilter_bridge/ebt_log.h
+++ b/include/linux/netfilter_bridge/ebt_log.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_BRIDGE_EBT_LOG_H
#define __LINUX_BRIDGE_EBT_LOG_H
+#include <linux/types.h>
+
#define EBT_LOG_IP 0x01 /* if the frame is made by ip, log the ip information */
#define EBT_LOG_ARP 0x02
#define EBT_LOG_NFLOG 0x04
@@ -10,9 +12,9 @@
#define EBT_LOG_WATCHER "log"
struct ebt_log_info {
- uint8_t loglevel;
- uint8_t prefix[EBT_LOG_PREFIX_SIZE];
- uint32_t bitmask;
+ __u8 loglevel;
+ __u8 prefix[EBT_LOG_PREFIX_SIZE];
+ __u32 bitmask;
};
#endif
diff --git a/include/linux/netfilter_bridge/ebt_mark_m.h b/include/linux/netfilter_bridge/ebt_mark_m.h
index 9ceb10ec0ed6..410f9e5a71d4 100644
--- a/include/linux/netfilter_bridge/ebt_mark_m.h
+++ b/include/linux/netfilter_bridge/ebt_mark_m.h
@@ -1,13 +1,15 @@
#ifndef __LINUX_BRIDGE_EBT_MARK_M_H
#define __LINUX_BRIDGE_EBT_MARK_M_H
+#include <linux/types.h>
+
#define EBT_MARK_AND 0x01
#define EBT_MARK_OR 0x02
#define EBT_MARK_MASK (EBT_MARK_AND | EBT_MARK_OR)
struct ebt_mark_m_info {
unsigned long mark, mask;
- uint8_t invert;
- uint8_t bitmask;
+ __u8 invert;
+ __u8 bitmask;
};
#define EBT_MARK_MATCH "mark_m"
diff --git a/include/linux/netfilter_bridge/ebt_nflog.h b/include/linux/netfilter_bridge/ebt_nflog.h
index 052817849b83..df829fce9125 100644
--- a/include/linux/netfilter_bridge/ebt_nflog.h
+++ b/include/linux/netfilter_bridge/ebt_nflog.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_BRIDGE_EBT_NFLOG_H
#define __LINUX_BRIDGE_EBT_NFLOG_H
+#include <linux/types.h>
+
#define EBT_NFLOG_MASK 0x0
#define EBT_NFLOG_PREFIX_SIZE 64
@@ -10,11 +12,11 @@
#define EBT_NFLOG_DEFAULT_THRESHOLD 1
struct ebt_nflog_info {
- u_int32_t len;
- u_int16_t group;
- u_int16_t threshold;
- u_int16_t flags;
- u_int16_t pad;
+ __u32 len;
+ __u16 group;
+ __u16 threshold;
+ __u16 flags;
+ __u16 pad;
char prefix[EBT_NFLOG_PREFIX_SIZE];
};
diff --git a/include/linux/netfilter_bridge/ebt_pkttype.h b/include/linux/netfilter_bridge/ebt_pkttype.h
index 51a799840931..c241badcd036 100644
--- a/include/linux/netfilter_bridge/ebt_pkttype.h
+++ b/include/linux/netfilter_bridge/ebt_pkttype.h
@@ -1,9 +1,11 @@
#ifndef __LINUX_BRIDGE_EBT_PKTTYPE_H
#define __LINUX_BRIDGE_EBT_PKTTYPE_H
+#include <linux/types.h>
+
struct ebt_pkttype_info {
- uint8_t pkt_type;
- uint8_t invert;
+ __u8 pkt_type;
+ __u8 invert;
};
#define EBT_PKTTYPE_MATCH "pkttype"
diff --git a/include/linux/netfilter_bridge/ebt_stp.h b/include/linux/netfilter_bridge/ebt_stp.h
index e503a0aa2728..1025b9f5fb7d 100644
--- a/include/linux/netfilter_bridge/ebt_stp.h
+++ b/include/linux/netfilter_bridge/ebt_stp.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_BRIDGE_EBT_STP_H
#define __LINUX_BRIDGE_EBT_STP_H
+#include <linux/types.h>
+
#define EBT_STP_TYPE 0x0001
#define EBT_STP_FLAGS 0x0002
@@ -21,24 +23,24 @@
#define EBT_STP_MATCH "stp"
struct ebt_stp_config_info {
- uint8_t flags;
- uint16_t root_priol, root_priou;
+ __u8 flags;
+ __u16 root_priol, root_priou;
char root_addr[6], root_addrmsk[6];
- uint32_t root_costl, root_costu;
- uint16_t sender_priol, sender_priou;
+ __u32 root_costl, root_costu;
+ __u16 sender_priol, sender_priou;
char sender_addr[6], sender_addrmsk[6];
- uint16_t portl, portu;
- uint16_t msg_agel, msg_ageu;
- uint16_t max_agel, max_ageu;
- uint16_t hello_timel, hello_timeu;
- uint16_t forward_delayl, forward_delayu;
+ __u16 portl, portu;
+ __u16 msg_agel, msg_ageu;
+ __u16 max_agel, max_ageu;
+ __u16 hello_timel, hello_timeu;
+ __u16 forward_delayl, forward_delayu;
};
struct ebt_stp_info {
- uint8_t type;
+ __u8 type;
struct ebt_stp_config_info config;
- uint16_t bitmask;
- uint16_t invflags;
+ __u16 bitmask;
+ __u16 invflags;
};
#endif
diff --git a/include/linux/netfilter_bridge/ebt_ulog.h b/include/linux/netfilter_bridge/ebt_ulog.h
index b677e2671541..89a6becb5269 100644
--- a/include/linux/netfilter_bridge/ebt_ulog.h
+++ b/include/linux/netfilter_bridge/ebt_ulog.h
@@ -1,6 +1,8 @@
#ifndef _EBT_ULOG_H
#define _EBT_ULOG_H
+#include <linux/types.h>
+
#define EBT_ULOG_DEFAULT_NLGROUP 0
#define EBT_ULOG_DEFAULT_QTHRESHOLD 1
#define EBT_ULOG_MAXNLGROUPS 32 /* hardcoded netlink max */
@@ -10,7 +12,7 @@
#define EBT_ULOG_VERSION 1
struct ebt_ulog_info {
- uint32_t nlgroup;
+ __u32 nlgroup;
unsigned int cprange;
unsigned int qthreshold;
char prefix[EBT_ULOG_PREFIX_LEN];
diff --git a/include/linux/netfilter_bridge/ebt_vlan.h b/include/linux/netfilter_bridge/ebt_vlan.h
index 1d98be4031e7..967d1d5cf98d 100644
--- a/include/linux/netfilter_bridge/ebt_vlan.h
+++ b/include/linux/netfilter_bridge/ebt_vlan.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_BRIDGE_EBT_VLAN_H
#define __LINUX_BRIDGE_EBT_VLAN_H
+#include <linux/types.h>
+
#define EBT_VLAN_ID 0x01
#define EBT_VLAN_PRIO 0x02
#define EBT_VLAN_ENCAP 0x04
@@ -8,12 +10,12 @@
#define EBT_VLAN_MATCH "vlan"
struct ebt_vlan_info {
- uint16_t id; /* VLAN ID {1-4095} */
- uint8_t prio; /* VLAN User Priority {0-7} */
+ __u16 id; /* VLAN ID {1-4095} */
+ __u8 prio; /* VLAN User Priority {0-7} */
__be16 encap; /* VLAN Encapsulated frame code {0-65535} */
- uint8_t bitmask; /* Args bitmask bit 1=1 - ID arg,
+ __u8 bitmask; /* Args bitmask bit 1=1 - ID arg,
bit 2=1 User-Priority arg, bit 3=1 encap*/
- uint8_t invflags; /* Inverse bitmask bit 1=1 - inversed ID arg,
+ __u8 invflags; /* Inverse bitmask bit 1=1 - inversed ID arg,
bit 2=1 - inversed Pirority arg */
};
diff --git a/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h b/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h
index e5a3687c8a72..c6a204c97047 100644
--- a/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h
+++ b/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h
@@ -1,6 +1,8 @@
#ifndef _IPT_CLUSTERIP_H_target
#define _IPT_CLUSTERIP_H_target
+#include <linux/types.h>
+
enum clusterip_hashmode {
CLUSTERIP_HASHMODE_SIP = 0,
CLUSTERIP_HASHMODE_SIP_SPT,
@@ -17,15 +19,15 @@ struct clusterip_config;
struct ipt_clusterip_tgt_info {
- u_int32_t flags;
+ __u32 flags;
/* only relevant for new ones */
- u_int8_t clustermac[6];
- u_int16_t num_total_nodes;
- u_int16_t num_local_nodes;
- u_int16_t local_nodes[CLUSTERIP_MAX_NODES];
- u_int32_t hash_mode;
- u_int32_t hash_initval;
+ __u8 clustermac[6];
+ __u16 num_total_nodes;
+ __u16 num_local_nodes;
+ __u16 local_nodes[CLUSTERIP_MAX_NODES];
+ __u32 hash_mode;
+ __u32 hash_initval;
/* Used internally by the kernel */
struct clusterip_config *config;
diff --git a/include/linux/netfilter_ipv4/ipt_ECN.h b/include/linux/netfilter_ipv4/ipt_ECN.h
index 7ca45918ab8e..bb88d5315a4d 100644
--- a/include/linux/netfilter_ipv4/ipt_ECN.h
+++ b/include/linux/netfilter_ipv4/ipt_ECN.h
@@ -8,6 +8,8 @@
*/
#ifndef _IPT_ECN_TARGET_H
#define _IPT_ECN_TARGET_H
+
+#include <linux/types.h>
#include <linux/netfilter/xt_DSCP.h>
#define IPT_ECN_IP_MASK (~XT_DSCP_MASK)
@@ -19,11 +21,11 @@
#define IPT_ECN_OP_MASK 0xce
struct ipt_ECN_info {
- u_int8_t operation; /* bitset of operations */
- u_int8_t ip_ect; /* ECT codepoint of IPv4 header, pre-shifted */
+ __u8 operation; /* bitset of operations */
+ __u8 ip_ect; /* ECT codepoint of IPv4 header, pre-shifted */
union {
struct {
- u_int8_t ece:1, cwr:1; /* TCP ECT bits */
+ __u8 ece:1, cwr:1; /* TCP ECT bits */
} tcp;
} proto;
};
diff --git a/include/linux/netfilter_ipv4/ipt_SAME.h b/include/linux/netfilter_ipv4/ipt_SAME.h
index 2529660c5b38..5bca78267afd 100644
--- a/include/linux/netfilter_ipv4/ipt_SAME.h
+++ b/include/linux/netfilter_ipv4/ipt_SAME.h
@@ -1,15 +1,17 @@
#ifndef _IPT_SAME_H
#define _IPT_SAME_H
+#include <linux/types.h>
+
#define IPT_SAME_MAX_RANGE 10
#define IPT_SAME_NODST 0x01
struct ipt_same_info {
unsigned char info;
- u_int32_t rangesize;
- u_int32_t ipnum;
- u_int32_t *iparray;
+ __u32 rangesize;
+ __u32 ipnum;
+ __u32 *iparray;
/* hangs off end. */
struct nf_nat_range range[IPT_SAME_MAX_RANGE];
diff --git a/include/linux/netfilter_ipv4/ipt_TTL.h b/include/linux/netfilter_ipv4/ipt_TTL.h
index ee6611edc112..f6ac169d92f9 100644
--- a/include/linux/netfilter_ipv4/ipt_TTL.h
+++ b/include/linux/netfilter_ipv4/ipt_TTL.h
@@ -4,6 +4,8 @@
#ifndef _IPT_TTL_H
#define _IPT_TTL_H
+#include <linux/types.h>
+
enum {
IPT_TTL_SET = 0,
IPT_TTL_INC,
@@ -13,8 +15,8 @@ enum {
#define IPT_TTL_MAXMODE IPT_TTL_DEC
struct ipt_TTL_info {
- u_int8_t mode;
- u_int8_t ttl;
+ __u8 mode;
+ __u8 ttl;
};
diff --git a/include/linux/netfilter_ipv4/ipt_addrtype.h b/include/linux/netfilter_ipv4/ipt_addrtype.h
index 446de6aef983..0da42237c8da 100644
--- a/include/linux/netfilter_ipv4/ipt_addrtype.h
+++ b/include/linux/netfilter_ipv4/ipt_addrtype.h
@@ -1,6 +1,8 @@
#ifndef _IPT_ADDRTYPE_H
#define _IPT_ADDRTYPE_H
+#include <linux/types.h>
+
enum {
IPT_ADDRTYPE_INVERT_SOURCE = 0x0001,
IPT_ADDRTYPE_INVERT_DEST = 0x0002,
@@ -9,17 +11,17 @@ enum {
};
struct ipt_addrtype_info_v1 {
- u_int16_t source; /* source-type mask */
- u_int16_t dest; /* dest-type mask */
- u_int32_t flags;
+ __u16 source; /* source-type mask */
+ __u16 dest; /* dest-type mask */
+ __u32 flags;
};
/* revision 0 */
struct ipt_addrtype_info {
- u_int16_t source; /* source-type mask */
- u_int16_t dest; /* dest-type mask */
- u_int32_t invert_source;
- u_int32_t invert_dest;
+ __u16 source; /* source-type mask */
+ __u16 dest; /* dest-type mask */
+ __u32 invert_source;
+ __u32 invert_dest;
};
#endif
diff --git a/include/linux/netfilter_ipv4/ipt_ah.h b/include/linux/netfilter_ipv4/ipt_ah.h
index 2e555b4d05e3..4e02bb0119e3 100644
--- a/include/linux/netfilter_ipv4/ipt_ah.h
+++ b/include/linux/netfilter_ipv4/ipt_ah.h
@@ -1,9 +1,11 @@
#ifndef _IPT_AH_H
#define _IPT_AH_H
+#include <linux/types.h>
+
struct ipt_ah {
- u_int32_t spis[2]; /* Security Parameter Index */
- u_int8_t invflags; /* Inverse flags */
+ __u32 spis[2]; /* Security Parameter Index */
+ __u8 invflags; /* Inverse flags */
};
diff --git a/include/linux/netfilter_ipv4/ipt_ecn.h b/include/linux/netfilter_ipv4/ipt_ecn.h
index 9945baa4ccd7..eabf95fb7d3e 100644
--- a/include/linux/netfilter_ipv4/ipt_ecn.h
+++ b/include/linux/netfilter_ipv4/ipt_ecn.h
@@ -8,6 +8,8 @@
*/
#ifndef _IPT_ECN_H
#define _IPT_ECN_H
+
+#include <linux/types.h>
#include <linux/netfilter/xt_dscp.h>
#define IPT_ECN_IP_MASK (~XT_DSCP_MASK)
@@ -20,12 +22,12 @@
/* match info */
struct ipt_ecn_info {
- u_int8_t operation;
- u_int8_t invert;
- u_int8_t ip_ect;
+ __u8 operation;
+ __u8 invert;
+ __u8 ip_ect;
union {
struct {
- u_int8_t ect;
+ __u8 ect;
} tcp;
} proto;
};
diff --git a/include/linux/netfilter_ipv4/ipt_ttl.h b/include/linux/netfilter_ipv4/ipt_ttl.h
index ee24fd86a3aa..37bee4442486 100644
--- a/include/linux/netfilter_ipv4/ipt_ttl.h
+++ b/include/linux/netfilter_ipv4/ipt_ttl.h
@@ -4,6 +4,8 @@
#ifndef _IPT_TTL_H
#define _IPT_TTL_H
+#include <linux/types.h>
+
enum {
IPT_TTL_EQ = 0, /* equals */
IPT_TTL_NE, /* not equals */
@@ -13,8 +15,8 @@ enum {
struct ipt_ttl_info {
- u_int8_t mode;
- u_int8_t ttl;
+ __u8 mode;
+ __u8 ttl;
};
diff --git a/include/linux/netfilter_ipv6/ip6t_HL.h b/include/linux/netfilter_ipv6/ip6t_HL.h
index afb7813d45ab..ebd8ead1bb63 100644
--- a/include/linux/netfilter_ipv6/ip6t_HL.h
+++ b/include/linux/netfilter_ipv6/ip6t_HL.h
@@ -5,6 +5,8 @@
#ifndef _IP6T_HL_H
#define _IP6T_HL_H
+#include <linux/types.h>
+
enum {
IP6T_HL_SET = 0,
IP6T_HL_INC,
@@ -14,8 +16,8 @@ enum {
#define IP6T_HL_MAXMODE IP6T_HL_DEC
struct ip6t_HL_info {
- u_int8_t mode;
- u_int8_t hop_limit;
+ __u8 mode;
+ __u8 hop_limit;
};
diff --git a/include/linux/netfilter_ipv6/ip6t_REJECT.h b/include/linux/netfilter_ipv6/ip6t_REJECT.h
index 6be6504162bb..205ed62e4605 100644
--- a/include/linux/netfilter_ipv6/ip6t_REJECT.h
+++ b/include/linux/netfilter_ipv6/ip6t_REJECT.h
@@ -1,6 +1,8 @@
#ifndef _IP6T_REJECT_H
#define _IP6T_REJECT_H
+#include <linux/types.h>
+
enum ip6t_reject_with {
IP6T_ICMP6_NO_ROUTE,
IP6T_ICMP6_ADM_PROHIBITED,
@@ -12,7 +14,7 @@ enum ip6t_reject_with {
};
struct ip6t_reject_info {
- u_int32_t with; /* reject type */
+ __u32 with; /* reject type */
};
#endif /*_IP6T_REJECT_H*/
diff --git a/include/linux/netfilter_ipv6/ip6t_ah.h b/include/linux/netfilter_ipv6/ip6t_ah.h
index 17a745cfb2c7..5da2b65cb3ad 100644
--- a/include/linux/netfilter_ipv6/ip6t_ah.h
+++ b/include/linux/netfilter_ipv6/ip6t_ah.h
@@ -1,11 +1,13 @@
#ifndef _IP6T_AH_H
#define _IP6T_AH_H
+#include <linux/types.h>
+
struct ip6t_ah {
- u_int32_t spis[2]; /* Security Parameter Index */
- u_int32_t hdrlen; /* Header Length */
- u_int8_t hdrres; /* Test of the Reserved Filed */
- u_int8_t invflags; /* Inverse flags */
+ __u32 spis[2]; /* Security Parameter Index */
+ __u32 hdrlen; /* Header Length */
+ __u8 hdrres; /* Test of the Reserved Filed */
+ __u8 invflags; /* Inverse flags */
};
#define IP6T_AH_SPI 0x01
diff --git a/include/linux/netfilter_ipv6/ip6t_frag.h b/include/linux/netfilter_ipv6/ip6t_frag.h
index 3724d0850920..b47f61b9e082 100644
--- a/include/linux/netfilter_ipv6/ip6t_frag.h
+++ b/include/linux/netfilter_ipv6/ip6t_frag.h
@@ -1,11 +1,13 @@
#ifndef _IP6T_FRAG_H
#define _IP6T_FRAG_H
+#include <linux/types.h>
+
struct ip6t_frag {
- u_int32_t ids[2]; /* Security Parameter Index */
- u_int32_t hdrlen; /* Header Length */
- u_int8_t flags; /* */
- u_int8_t invflags; /* Inverse flags */
+ __u32 ids[2]; /* Security Parameter Index */
+ __u32 hdrlen; /* Header Length */
+ __u8 flags; /* */
+ __u8 invflags; /* Inverse flags */
};
#define IP6T_FRAG_IDS 0x01
diff --git a/include/linux/netfilter_ipv6/ip6t_hl.h b/include/linux/netfilter_ipv6/ip6t_hl.h
index 5ef91b8319a8..6e76dbc6c19a 100644
--- a/include/linux/netfilter_ipv6/ip6t_hl.h
+++ b/include/linux/netfilter_ipv6/ip6t_hl.h
@@ -5,6 +5,8 @@
#ifndef _IP6T_HL_H
#define _IP6T_HL_H
+#include <linux/types.h>
+
enum {
IP6T_HL_EQ = 0, /* equals */
IP6T_HL_NE, /* not equals */
@@ -14,8 +16,8 @@ enum {
struct ip6t_hl_info {
- u_int8_t mode;
- u_int8_t hop_limit;
+ __u8 mode;
+ __u8 hop_limit;
};
diff --git a/include/linux/netfilter_ipv6/ip6t_ipv6header.h b/include/linux/netfilter_ipv6/ip6t_ipv6header.h
index 01dfd445596a..efae3a20c214 100644
--- a/include/linux/netfilter_ipv6/ip6t_ipv6header.h
+++ b/include/linux/netfilter_ipv6/ip6t_ipv6header.h
@@ -8,10 +8,12 @@ on whether they contain certain headers */
#ifndef __IPV6HEADER_H
#define __IPV6HEADER_H
+#include <linux/types.h>
+
struct ip6t_ipv6header_info {
- u_int8_t matchflags;
- u_int8_t invflags;
- u_int8_t modeflag;
+ __u8 matchflags;
+ __u8 invflags;
+ __u8 modeflag;
};
#define MASK_HOPOPTS 128
diff --git a/include/linux/netfilter_ipv6/ip6t_mh.h b/include/linux/netfilter_ipv6/ip6t_mh.h
index 18549bca2d1f..a7729a5025cd 100644
--- a/include/linux/netfilter_ipv6/ip6t_mh.h
+++ b/include/linux/netfilter_ipv6/ip6t_mh.h
@@ -1,10 +1,12 @@
#ifndef _IP6T_MH_H
#define _IP6T_MH_H
+#include <linux/types.h>
+
/* MH matching stuff */
struct ip6t_mh {
- u_int8_t types[2]; /* MH type range */
- u_int8_t invflags; /* Inverse flags */
+ __u8 types[2]; /* MH type range */
+ __u8 invflags; /* Inverse flags */
};
/* Values for "invflags" field in struct ip6t_mh. */
diff --git a/include/linux/netfilter_ipv6/ip6t_opts.h b/include/linux/netfilter_ipv6/ip6t_opts.h
index 62d89bcd9f9c..17d419a811fd 100644
--- a/include/linux/netfilter_ipv6/ip6t_opts.h
+++ b/include/linux/netfilter_ipv6/ip6t_opts.h
@@ -1,14 +1,16 @@
#ifndef _IP6T_OPTS_H
#define _IP6T_OPTS_H
+#include <linux/types.h>
+
#define IP6T_OPTS_OPTSNR 16
struct ip6t_opts {
- u_int32_t hdrlen; /* Header Length */
- u_int8_t flags; /* */
- u_int8_t invflags; /* Inverse flags */
- u_int16_t opts[IP6T_OPTS_OPTSNR]; /* opts */
- u_int8_t optsnr; /* Nr of OPts */
+ __u32 hdrlen; /* Header Length */
+ __u8 flags; /* */
+ __u8 invflags; /* Inverse flags */
+ __u16 opts[IP6T_OPTS_OPTSNR]; /* opts */
+ __u8 optsnr; /* Nr of OPts */
};
#define IP6T_OPTS_LEN 0x01
diff --git a/include/linux/netfilter_ipv6/ip6t_rt.h b/include/linux/netfilter_ipv6/ip6t_rt.h
index ab91bfd2cd00..7605a5ff81cd 100644
--- a/include/linux/netfilter_ipv6/ip6t_rt.h
+++ b/include/linux/netfilter_ipv6/ip6t_rt.h
@@ -1,18 +1,19 @@
#ifndef _IP6T_RT_H
#define _IP6T_RT_H
+#include <linux/types.h>
/*#include <linux/in6.h>*/
#define IP6T_RT_HOPS 16
struct ip6t_rt {
- u_int32_t rt_type; /* Routing Type */
- u_int32_t segsleft[2]; /* Segments Left */
- u_int32_t hdrlen; /* Header Length */
- u_int8_t flags; /* */
- u_int8_t invflags; /* Inverse flags */
+ __u32 rt_type; /* Routing Type */
+ __u32 segsleft[2]; /* Segments Left */
+ __u32 hdrlen; /* Header Length */
+ __u8 flags; /* */
+ __u8 invflags; /* Inverse flags */
struct in6_addr addrs[IP6T_RT_HOPS]; /* Hops */
- u_int8_t addrnr; /* Nr of Addresses */
+ __u8 addrnr; /* Nr of Addresses */
};
#define IP6T_RT_TYP 0x01
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
index 227e49dd5720..cc5bbe9428c7 100644
--- a/include/linux/nilfs2_fs.h
+++ b/include/linux/nilfs2_fs.h
@@ -41,26 +41,6 @@
#include <linux/types.h>
#include <linux/ioctl.h>
-/*
- * Inode flags stored in nilfs_inode and on-memory nilfs inode
- *
- * We define these flags based on ext2-fs because of the
- * compatibility reason; to avoid problems in chattr(1)
- */
-#define NILFS_SECRM_FL 0x00000001 /* Secure deletion */
-#define NILFS_UNRM_FL 0x00000002 /* Undelete */
-#define NILFS_SYNC_FL 0x00000008 /* Synchronous updates */
-#define NILFS_IMMUTABLE_FL 0x00000010 /* Immutable file */
-#define NILFS_APPEND_FL 0x00000020 /* writes to file may only append */
-#define NILFS_NODUMP_FL 0x00000040 /* do not dump file */
-#define NILFS_NOATIME_FL 0x00000080 /* do not update atime */
-/* Reserved for compression usage... */
-#define NILFS_NOTAIL_FL 0x00008000 /* file tail should not be merged */
-#define NILFS_DIRSYNC_FL 0x00010000 /* dirsync behaviour */
-
-#define NILFS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
-#define NILFS_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */
-
#define NILFS_INODE_BMAP_SIZE 7
/**
@@ -346,17 +326,21 @@ static inline unsigned nilfs_rec_len_from_disk(__le16 dlen)
{
unsigned len = le16_to_cpu(dlen);
+#if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536)
if (len == NILFS_MAX_REC_LEN)
return 1 << 16;
+#endif
return len;
}
static inline __le16 nilfs_rec_len_to_disk(unsigned len)
{
+#if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536)
if (len == (1 << 16))
return cpu_to_le16(NILFS_MAX_REC_LEN);
else if (len > (1 << 16))
BUG();
+#endif
return cpu_to_le16(len);
}
@@ -525,7 +509,7 @@ struct nilfs_checkpoint {
__le64 cp_create;
__le64 cp_nblk_inc;
__le64 cp_inodes_count;
- __le64 cp_blocks_count; /* Reserved (might be deleted) */
+ __le64 cp_blocks_count;
/* Do not change the byte offset of ifile inode.
To keep the compatibility of the disk format,
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index 821ffb954f14..30022189104d 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -1243,6 +1243,8 @@ enum nl80211_rate_info {
* @NL80211_STA_INFO_LLID: the station's mesh LLID
* @NL80211_STA_INFO_PLID: the station's mesh PLID
* @NL80211_STA_INFO_PLINK_STATE: peer link state for the station
+ * @NL80211_STA_INFO_RX_BITRATE: last unicast data frame rx rate, nested
+ * attribute, like NL80211_STA_INFO_TX_BITRATE.
* @__NL80211_STA_INFO_AFTER_LAST: internal
* @NL80211_STA_INFO_MAX: highest possible station info attribute
*/
@@ -1261,6 +1263,7 @@ enum nl80211_sta_info {
NL80211_STA_INFO_TX_RETRIES,
NL80211_STA_INFO_TX_FAILED,
NL80211_STA_INFO_SIGNAL_AVG,
+ NL80211_STA_INFO_RX_BITRATE,
/* keep last */
__NL80211_STA_INFO_AFTER_LAST,
diff --git a/include/linux/of.h b/include/linux/of.h
index cad7cf0ab278..bfc0ed1b0ced 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -23,8 +23,6 @@
#include <asm/byteorder.h>
-#ifdef CONFIG_OF
-
typedef u32 phandle;
typedef u32 ihandle;
@@ -65,11 +63,18 @@ struct device_node {
#endif
};
+#ifdef CONFIG_OF
+
/* Pointer for first entry in chain of all nodes. */
extern struct device_node *allnodes;
extern struct device_node *of_chosen;
extern rwlock_t devtree_lock;
+static inline bool of_have_populated_dt(void)
+{
+ return allnodes != NULL;
+}
+
static inline bool of_node_is_root(const struct device_node *node)
{
return node && (node->parent == NULL);
@@ -103,7 +108,7 @@ extern void of_node_put(struct device_node *node);
#endif
/*
- * OF address retreival & translation
+ * OF address retrieval & translation
*/
/* Helper to read a big number; size is in cells (not bytes) */
@@ -222,5 +227,12 @@ extern void of_attach_node(struct device_node *);
extern void of_detach_node(struct device_node *);
#endif
+#else
+
+static inline bool of_have_populated_dt(void)
+{
+ return false;
+}
+
#endif /* CONFIG_OF */
#endif /* _LINUX_OF_H */
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index 975d347079d9..8bfe6c1d4365 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -18,10 +18,11 @@ extern void of_device_make_bus_id(struct device *dev);
* @drv: the device_driver structure to test
* @dev: the device structure to match against
*/
-static inline int of_driver_match_device(const struct device *dev,
+static inline int of_driver_match_device(struct device *dev,
const struct device_driver *drv)
{
- return of_match_device(drv->of_match_table, dev) != NULL;
+ dev->of_match = of_match_device(drv->of_match_table, dev);
+ return dev->of_match != NULL;
}
extern struct platform_device *of_dev_get(struct platform_device *dev);
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
new file mode 100644
index 000000000000..85a27b650d76
--- /dev/null
+++ b/include/linux/of_pci.h
@@ -0,0 +1,9 @@
+#ifndef __OF_PCI_H
+#define __OF_PCI_H
+
+#include <linux/pci.h>
+
+struct pci_dev;
+struct of_irq;
+int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
+#endif
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index a68716ad38ce..17c7e21c0bd7 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -23,13 +23,7 @@
* of_platform_driver - Legacy of-aware driver for platform devices.
*
* An of_platform_driver driver is attached to a basic platform_device on
- * ether the "platform bus" (platform_bus_type), or the ibm ebus
- * (ibmebus_bus_type).
- *
- * of_platform_driver is being phased out when used with the platform_bus_type,
- * and regular platform_drivers should be used instead. When the transition
- * is complete, only ibmebus will be using this structure, and the
- * platform_driver member of this structure will be removed.
+ * the ibm ebus (ibmebus_bus_type).
*/
struct of_platform_driver
{
@@ -42,26 +36,16 @@ struct of_platform_driver
int (*shutdown)(struct platform_device* dev);
struct device_driver driver;
- struct platform_driver platform_driver;
};
#define to_of_platform_driver(drv) \
container_of(drv,struct of_platform_driver, driver)
-extern int of_register_driver(struct of_platform_driver *drv,
- struct bus_type *bus);
-extern void of_unregister_driver(struct of_platform_driver *drv);
-
/* Platform drivers register/unregister */
-extern int of_register_platform_driver(struct of_platform_driver *drv);
-extern void of_unregister_platform_driver(struct of_platform_driver *drv);
-
extern struct platform_device *of_device_alloc(struct device_node *np,
const char *bus_id,
struct device *parent);
extern struct platform_device *of_find_device_by_node(struct device_node *np);
-extern int of_bus_type_init(struct bus_type *bus, const char *name);
-
#if !defined(CONFIG_SPARC) /* SPARC has its own device registration method */
/* Platform devices and busses creation */
extern struct platform_device *of_platform_device_create(struct device_node *np,
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
index 1ca64113efe8..7f5cfd3b37dd 100644
--- a/include/linux/oprofile.h
+++ b/include/linux/oprofile.h
@@ -106,6 +106,13 @@ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event);
void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
unsigned long event, int is_kernel);
+/**
+ * Add an hardware sample.
+ */
+void oprofile_add_ext_hw_sample(unsigned long pc, struct pt_regs * const regs,
+ unsigned long event, int is_kernel,
+ struct task_struct *task);
+
/* Use this instead when the PC value is not from the regs. Doesn't
* backtrace. */
void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 559d02897075..c77e730bcd38 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1191,6 +1191,11 @@ static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
return 0;
}
+static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable)
+{
+ return 0;
+}
+
static inline pci_power_t pci_choose_state(struct pci_dev *dev,
pm_message_t state)
{
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 3adb06ebf841..bda221dfaf0a 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -517,7 +517,8 @@
#define PCI_DEVICE_ID_AMD_11H_NB_DRAM 0x1302
#define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303
#define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304
-#define PCI_DEVICE_ID_AMD_15H_NB_MISC 0x1603
+#define PCI_DEVICE_ID_AMD_15H_NB_F3 0x1603
+#define PCI_DEVICE_ID_AMD_15H_NB_LINK 0x1604
#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
@@ -2078,6 +2079,8 @@
#define PCI_DEVICE_ID_TIGON3_5723 0x165b
#define PCI_DEVICE_ID_TIGON3_5705M 0x165d
#define PCI_DEVICE_ID_TIGON3_5705M_2 0x165e
+#define PCI_DEVICE_ID_NX2_57712 0x1662
+#define PCI_DEVICE_ID_NX2_57712E 0x1663
#define PCI_DEVICE_ID_TIGON3_5714 0x1668
#define PCI_DEVICE_ID_TIGON3_5714S 0x1669
#define PCI_DEVICE_ID_TIGON3_5780 0x166a
@@ -2480,6 +2483,9 @@
#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22
#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_0 0x1d40
#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_1 0x1d41
+#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN 0x2310
+#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MAX 0x231f
+#define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330
#define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410
#define PCI_DEVICE_ID_INTEL_82801AA_1 0x2411
#define PCI_DEVICE_ID_INTEL_82801AA_3 0x2413
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 27c3c6fcfad3..3a5c4449fd36 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -255,6 +255,30 @@ extern void __bad_size_call_parameter(void);
pscr2_ret__; \
})
+/*
+ * Special handling for cmpxchg_double. cmpxchg_double is passed two
+ * percpu variables. The first has to be aligned to a double word
+ * boundary and the second has to follow directly thereafter.
+ */
+#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \
+({ \
+ bool pdcrb_ret__; \
+ __verify_pcpu_ptr(&pcp1); \
+ BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \
+ VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1))); \
+ VM_BUG_ON((unsigned long)(&pcp2) != \
+ (unsigned long)(&pcp1) + sizeof(pcp1)); \
+ switch(sizeof(pcp1)) { \
+ case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \
+ case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \
+ case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \
+ case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \
+ default: \
+ __bad_size_call_parameter(); break; \
+ } \
+ pdcrb_ret__; \
+})
+
#define __pcpu_size_call(stem, variable, ...) \
do { \
__verify_pcpu_ptr(&(variable)); \
@@ -501,6 +525,45 @@ do { \
#endif
/*
+ * cmpxchg_double replaces two adjacent scalars at once. The first
+ * two parameters are per cpu variables which have to be of the same
+ * size. A truth value is returned to indicate success or failure
+ * (since a double register result is difficult to handle). There is
+ * very limited hardware support for these operations, so only certain
+ * sizes may work.
+ */
+#define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+({ \
+ int ret__; \
+ preempt_disable(); \
+ ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \
+ oval1, oval2, nval1, nval2); \
+ preempt_enable(); \
+ ret__; \
+})
+
+#ifndef this_cpu_cmpxchg_double
+# ifndef this_cpu_cmpxchg_double_1
+# define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+# endif
+# ifndef this_cpu_cmpxchg_double_2
+# define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+# endif
+# ifndef this_cpu_cmpxchg_double_4
+# define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+# endif
+# ifndef this_cpu_cmpxchg_double_8
+# define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+# endif
+# define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
+#endif
+
+/*
* Generic percpu operations that do not require preemption handling.
* Either we do not care about races or the caller has the
* responsibility of handling preemptions issues. Arch code can still
@@ -703,6 +766,39 @@ do { \
__pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval)
#endif
+#define __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+({ \
+ int __ret = 0; \
+ if (__this_cpu_read(pcp1) == (oval1) && \
+ __this_cpu_read(pcp2) == (oval2)) { \
+ __this_cpu_write(pcp1, (nval1)); \
+ __this_cpu_write(pcp2, (nval2)); \
+ __ret = 1; \
+ } \
+ (__ret); \
+})
+
+#ifndef __this_cpu_cmpxchg_double
+# ifndef __this_cpu_cmpxchg_double_1
+# define __this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+# endif
+# ifndef __this_cpu_cmpxchg_double_2
+# define __this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+# endif
+# ifndef __this_cpu_cmpxchg_double_4
+# define __this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+# endif
+# ifndef __this_cpu_cmpxchg_double_8
+# define __this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+# endif
+# define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ __pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
+#endif
+
/*
* IRQ safe versions of the per cpu RMW operations. Note that these operations
* are *not* safe against modification of the same variable from another
@@ -823,4 +919,36 @@ do { \
__pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval)
#endif
+#define irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+({ \
+ int ret__; \
+ unsigned long flags; \
+ local_irq_save(flags); \
+ ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \
+ oval1, oval2, nval1, nval2); \
+ local_irq_restore(flags); \
+ ret__; \
+})
+
+#ifndef irqsafe_cpu_cmpxchg_double
+# ifndef irqsafe_cpu_cmpxchg_double_1
+# define irqsafe_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+# endif
+# ifndef irqsafe_cpu_cmpxchg_double_2
+# define irqsafe_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+# endif
+# ifndef irqsafe_cpu_cmpxchg_double_4
+# define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+# endif
+# ifndef irqsafe_cpu_cmpxchg_double_8
+# define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+# endif
+# define irqsafe_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ __pcpu_double_call_return_int(irqsafe_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
+#endif
+
#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index dda5b0a3ff60..8ceb5a6fd9c9 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -464,6 +464,7 @@ enum perf_callchain_context {
#define PERF_FLAG_FD_NO_GROUP (1U << 0)
#define PERF_FLAG_FD_OUTPUT (1U << 1)
+#define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */
#ifdef __KERNEL__
/*
@@ -471,6 +472,7 @@ enum perf_callchain_context {
*/
#ifdef CONFIG_PERF_EVENTS
+# include <linux/cgroup.h>
# include <asm/perf_event.h>
# include <asm/local64.h>
#endif
@@ -716,6 +718,22 @@ struct swevent_hlist {
#define PERF_ATTACH_GROUP 0x02
#define PERF_ATTACH_TASK 0x04
+#ifdef CONFIG_CGROUP_PERF
+/*
+ * perf_cgroup_info keeps track of time_enabled for a cgroup.
+ * This is a per-cpu dynamically allocated data structure.
+ */
+struct perf_cgroup_info {
+ u64 time;
+ u64 timestamp;
+};
+
+struct perf_cgroup {
+ struct cgroup_subsys_state css;
+ struct perf_cgroup_info *info; /* timing info, one per cpu */
+};
+#endif
+
/**
* struct perf_event - performance event kernel representation:
*/
@@ -832,6 +850,11 @@ struct perf_event {
struct event_filter *filter;
#endif
+#ifdef CONFIG_CGROUP_PERF
+ struct perf_cgroup *cgrp; /* cgroup event is attach to */
+ int cgrp_defer_enabled;
+#endif
+
#endif /* CONFIG_PERF_EVENTS */
};
@@ -886,6 +909,7 @@ struct perf_event_context {
u64 generation;
int pin_count;
struct rcu_head rcu_head;
+ int nr_cgroups; /* cgroup events present */
};
/*
@@ -905,6 +929,9 @@ struct perf_cpu_context {
struct list_head rotation_list;
int jiffies_interval;
struct pmu *active_pmu;
+#ifdef CONFIG_CGROUP_PERF
+ struct perf_cgroup *cgrp;
+#endif
};
struct perf_output_handle {
@@ -1040,11 +1067,11 @@ have_event:
__perf_sw_event(event_id, nr, nmi, regs, addr);
}
-extern atomic_t perf_task_events;
+extern atomic_t perf_sched_events;
static inline void perf_event_task_sched_in(struct task_struct *task)
{
- COND_STMT(&perf_task_events, __perf_event_task_sched_in(task));
+ COND_STMT(&perf_sched_events, __perf_event_task_sched_in(task));
}
static inline
@@ -1052,7 +1079,7 @@ void perf_event_task_sched_out(struct task_struct *task, struct task_struct *nex
{
perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
- COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next));
+ COND_STMT(&perf_sched_events, __perf_event_task_sched_out(task, next));
}
extern void perf_event_mmap(struct vm_area_struct *vma);
@@ -1083,6 +1110,10 @@ extern int sysctl_perf_event_paranoid;
extern int sysctl_perf_event_mlock;
extern int sysctl_perf_event_sample_rate;
+extern int perf_proc_update_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+
static inline bool perf_paranoid_tracepoint_raw(void)
{
return sysctl_perf_event_paranoid > -1;
diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
index 2cfa4bc8dea6..b1032a3fafdc 100644
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -247,6 +247,35 @@ struct tc_gred_sopt {
__u16 pad1;
};
+/* CHOKe section */
+
+enum {
+ TCA_CHOKE_UNSPEC,
+ TCA_CHOKE_PARMS,
+ TCA_CHOKE_STAB,
+ __TCA_CHOKE_MAX,
+};
+
+#define TCA_CHOKE_MAX (__TCA_CHOKE_MAX - 1)
+
+struct tc_choke_qopt {
+ __u32 limit; /* Hard queue length (packets) */
+ __u32 qth_min; /* Min average threshold (packets) */
+ __u32 qth_max; /* Max average threshold (packets) */
+ unsigned char Wlog; /* log(W) */
+ unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
+ unsigned char Scell_log; /* cell size for idle damping */
+ unsigned char flags; /* see RED flags */
+};
+
+struct tc_choke_xstats {
+ __u32 early; /* Early drops */
+ __u32 pdrop; /* Drops due to queue limits */
+ __u32 other; /* Drops due to drop() calls */
+ __u32 marked; /* Marked packets */
+ __u32 matched; /* Drops due to flow match */
+};
+
/* HTB section */
#define TC_HTB_NUMPRIO 8
#define TC_HTB_MAXDEPTH 8
@@ -435,6 +464,7 @@ enum {
TCA_NETEM_DELAY_DIST,
TCA_NETEM_REORDER,
TCA_NETEM_CORRUPT,
+ TCA_NETEM_LOSS,
__TCA_NETEM_MAX,
};
@@ -465,7 +495,33 @@ struct tc_netem_corrupt {
__u32 correlation;
};
+enum {
+ NETEM_LOSS_UNSPEC,
+ NETEM_LOSS_GI, /* General Intuitive - 4 state model */
+ NETEM_LOSS_GE, /* Gilbert Elliot models */
+ __NETEM_LOSS_MAX
+};
+#define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1)
+
+/* State transition probablities for 4 state model */
+struct tc_netem_gimodel {
+ __u32 p13;
+ __u32 p31;
+ __u32 p32;
+ __u32 p14;
+ __u32 p23;
+};
+
+/* Gilbert-Elliot models */
+struct tc_netem_gemodel {
+ __u32 p;
+ __u32 r;
+ __u32 h;
+ __u32 k1;
+};
+
#define NETEM_DIST_SCALE 8192
+#define NETEM_DIST_MAX 16384
/* DRR */
@@ -481,4 +537,55 @@ struct tc_drr_stats {
__u32 deficit;
};
+/* MQPRIO */
+#define TC_QOPT_BITMASK 15
+#define TC_QOPT_MAX_QUEUE 16
+
+struct tc_mqprio_qopt {
+ __u8 num_tc;
+ __u8 prio_tc_map[TC_QOPT_BITMASK + 1];
+ __u8 hw;
+ __u16 count[TC_QOPT_MAX_QUEUE];
+ __u16 offset[TC_QOPT_MAX_QUEUE];
+};
+
+/* SFB */
+
+enum {
+ TCA_SFB_UNSPEC,
+ TCA_SFB_PARMS,
+ __TCA_SFB_MAX,
+};
+
+#define TCA_SFB_MAX (__TCA_SFB_MAX - 1)
+
+/*
+ * Note: increment, decrement are Q0.16 fixed-point values.
+ */
+struct tc_sfb_qopt {
+ __u32 rehash_interval; /* delay between hash move, in ms */
+ __u32 warmup_time; /* double buffering warmup time in ms (warmup_time < rehash_interval) */
+ __u32 max; /* max len of qlen_min */
+ __u32 bin_size; /* maximum queue length per bin */
+ __u32 increment; /* probability increment, (d1 in Blue) */
+ __u32 decrement; /* probability decrement, (d2 in Blue) */
+ __u32 limit; /* max SFB queue length */
+ __u32 penalty_rate; /* inelastic flows are rate limited to 'rate' pps */
+ __u32 penalty_burst;
+};
+
+struct tc_sfb_xstats {
+ __u32 earlydrop;
+ __u32 penaltydrop;
+ __u32 bucketdrop;
+ __u32 queuedrop;
+ __u32 childdrop; /* drops in child qdisc */
+ __u32 marked;
+ __u32 maxqlen;
+ __u32 maxprob;
+ __u32 avgprob;
+};
+
+#define SFB_MAX_PROB 0xFFFF
+
#endif
diff --git a/include/linux/pm.h b/include/linux/pm.h
index dd9c7ab38270..6618216bb973 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -267,7 +267,7 @@ const struct dev_pm_ops name = { \
* callbacks provided by device drivers supporting both the system sleep PM and
* runtime PM, make the pm member point to generic_subsys_pm_ops.
*/
-#ifdef CONFIG_PM_OPS
+#ifdef CONFIG_PM
extern struct dev_pm_ops generic_subsys_pm_ops;
#define GENERIC_SUBSYS_PM_OPS (&generic_subsys_pm_ops)
#else
@@ -431,6 +431,8 @@ struct dev_pm_info {
struct list_head entry;
struct completion completion;
struct wakeup_source *wakeup;
+#else
+ unsigned int should_wakeup:1;
#endif
#ifdef CONFIG_PM_RUNTIME
struct timer_list suspend_timer;
@@ -463,6 +465,14 @@ struct dev_pm_info {
extern void update_pm_runtime_accounting(struct device *dev);
+/*
+ * Power domains provide callbacks that are executed during system suspend,
+ * hibernation, system resume and during runtime PM transitions along with
+ * subsystem-level and driver-level callbacks.
+ */
+struct dev_power_domain {
+ struct dev_pm_ops ops;
+};
/*
* The PM_EVENT_ messages are also used by drivers implementing the legacy
@@ -563,15 +573,6 @@ enum dpm_order {
DPM_ORDER_DEV_LAST,
};
-/*
- * Global Power Management flags
- * Used to keep APM and ACPI from both being active
- */
-extern unsigned int pm_flags;
-
-#define PM_APM 1
-#define PM_ACPI 2
-
extern int pm_generic_suspend(struct device *dev);
extern int pm_generic_resume(struct device *dev);
extern int pm_generic_freeze(struct device *dev);
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index d34f067e2a7f..8de9aa6e7def 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -87,6 +87,11 @@ static inline bool pm_runtime_enabled(struct device *dev)
return !dev->power.disable_depth;
}
+static inline bool pm_runtime_callbacks_present(struct device *dev)
+{
+ return !dev->power.no_callbacks;
+}
+
static inline void pm_runtime_mark_last_busy(struct device *dev)
{
ACCESS_ONCE(dev->power.last_busy) = jiffies;
@@ -133,6 +138,7 @@ static inline int pm_generic_runtime_resume(struct device *dev) { return 0; }
static inline void pm_runtime_no_callbacks(struct device *dev) {}
static inline void pm_runtime_irq_safe(struct device *dev) {}
+static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; }
static inline void pm_runtime_mark_last_busy(struct device *dev) {}
static inline void __pm_runtime_use_autosuspend(struct device *dev,
bool use) {}
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index 9cff00dd6b63..a32da962d693 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -62,18 +62,11 @@ struct wakeup_source {
* Changes to device_may_wakeup take effect on the next pm state change.
*/
-static inline void device_set_wakeup_capable(struct device *dev, bool capable)
-{
- dev->power.can_wakeup = capable;
-}
-
static inline bool device_can_wakeup(struct device *dev)
{
return dev->power.can_wakeup;
}
-
-
static inline bool device_may_wakeup(struct device *dev)
{
return dev->power.can_wakeup && !!dev->power.wakeup;
@@ -88,6 +81,7 @@ extern struct wakeup_source *wakeup_source_register(const char *name);
extern void wakeup_source_unregister(struct wakeup_source *ws);
extern int device_wakeup_enable(struct device *dev);
extern int device_wakeup_disable(struct device *dev);
+extern void device_set_wakeup_capable(struct device *dev, bool capable);
extern int device_init_wakeup(struct device *dev, bool val);
extern int device_set_wakeup_enable(struct device *dev, bool enable);
extern void __pm_stay_awake(struct wakeup_source *ws);
@@ -109,11 +103,6 @@ static inline bool device_can_wakeup(struct device *dev)
return dev->power.can_wakeup;
}
-static inline bool device_may_wakeup(struct device *dev)
-{
- return false;
-}
-
static inline struct wakeup_source *wakeup_source_create(const char *name)
{
return NULL;
@@ -134,24 +123,32 @@ static inline void wakeup_source_unregister(struct wakeup_source *ws) {}
static inline int device_wakeup_enable(struct device *dev)
{
- return -EINVAL;
+ dev->power.should_wakeup = true;
+ return 0;
}
static inline int device_wakeup_disable(struct device *dev)
{
+ dev->power.should_wakeup = false;
return 0;
}
-static inline int device_init_wakeup(struct device *dev, bool val)
+static inline int device_set_wakeup_enable(struct device *dev, bool enable)
{
- dev->power.can_wakeup = val;
- return val ? -EINVAL : 0;
+ dev->power.should_wakeup = enable;
+ return 0;
}
+static inline int device_init_wakeup(struct device *dev, bool val)
+{
+ device_set_wakeup_capable(dev, val);
+ device_set_wakeup_enable(dev, val);
+ return 0;
+}
-static inline int device_set_wakeup_enable(struct device *dev, bool enable)
+static inline bool device_may_wakeup(struct device *dev)
{
- return -EINVAL;
+ return dev->power.can_wakeup && dev->power.should_wakeup;
}
static inline void __pm_stay_awake(struct wakeup_source *ws) {}
diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h
new file mode 100644
index 000000000000..369e19d3750b
--- /dev/null
+++ b/include/linux/posix-clock.h
@@ -0,0 +1,150 @@
+/*
+ * posix-clock.h - support for dynamic clock devices
+ *
+ * Copyright (C) 2010 OMICRON electronics GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef _LINUX_POSIX_CLOCK_H_
+#define _LINUX_POSIX_CLOCK_H_
+
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/posix-timers.h>
+
+struct posix_clock;
+
+/**
+ * struct posix_clock_operations - functional interface to the clock
+ *
+ * Every posix clock is represented by a character device. Drivers may
+ * optionally offer extended capabilities by implementing the
+ * character device methods. The character device file operations are
+ * first handled by the clock device layer, then passed on to the
+ * driver by calling these functions.
+ *
+ * @owner: The clock driver should set to THIS_MODULE
+ * @clock_adjtime: Adjust the clock
+ * @clock_gettime: Read the current time
+ * @clock_getres: Get the clock resolution
+ * @clock_settime: Set the current time value
+ * @timer_create: Create a new timer
+ * @timer_delete: Remove a previously created timer
+ * @timer_gettime: Get remaining time and interval of a timer
+ * @timer_setttime: Set a timer's initial expiration and interval
+ * @fasync: Optional character device fasync method
+ * @mmap: Optional character device mmap method
+ * @open: Optional character device open method
+ * @release: Optional character device release method
+ * @ioctl: Optional character device ioctl method
+ * @read: Optional character device read method
+ * @poll: Optional character device poll method
+ */
+struct posix_clock_operations {
+ struct module *owner;
+
+ int (*clock_adjtime)(struct posix_clock *pc, struct timex *tx);
+
+ int (*clock_gettime)(struct posix_clock *pc, struct timespec *ts);
+
+ int (*clock_getres) (struct posix_clock *pc, struct timespec *ts);
+
+ int (*clock_settime)(struct posix_clock *pc,
+ const struct timespec *ts);
+
+ int (*timer_create) (struct posix_clock *pc, struct k_itimer *kit);
+
+ int (*timer_delete) (struct posix_clock *pc, struct k_itimer *kit);
+
+ void (*timer_gettime)(struct posix_clock *pc,
+ struct k_itimer *kit, struct itimerspec *tsp);
+
+ int (*timer_settime)(struct posix_clock *pc,
+ struct k_itimer *kit, int flags,
+ struct itimerspec *tsp, struct itimerspec *old);
+ /*
+ * Optional character device methods:
+ */
+ int (*fasync) (struct posix_clock *pc,
+ int fd, struct file *file, int on);
+
+ long (*ioctl) (struct posix_clock *pc,
+ unsigned int cmd, unsigned long arg);
+
+ int (*mmap) (struct posix_clock *pc,
+ struct vm_area_struct *vma);
+
+ int (*open) (struct posix_clock *pc, fmode_t f_mode);
+
+ uint (*poll) (struct posix_clock *pc,
+ struct file *file, poll_table *wait);
+
+ int (*release) (struct posix_clock *pc);
+
+ ssize_t (*read) (struct posix_clock *pc,
+ uint flags, char __user *buf, size_t cnt);
+};
+
+/**
+ * struct posix_clock - represents a dynamic posix clock
+ *
+ * @ops: Functional interface to the clock
+ * @cdev: Character device instance for this clock
+ * @kref: Reference count.
+ * @mutex: Protects the 'zombie' field from concurrent access.
+ * @zombie: If 'zombie' is true, then the hardware has disappeared.
+ * @release: A function to free the structure when the reference count reaches
+ * zero. May be NULL if structure is statically allocated.
+ *
+ * Drivers should embed their struct posix_clock within a private
+ * structure, obtaining a reference to it during callbacks using
+ * container_of().
+ */
+struct posix_clock {
+ struct posix_clock_operations ops;
+ struct cdev cdev;
+ struct kref kref;
+ struct mutex mutex;
+ bool zombie;
+ void (*release)(struct posix_clock *clk);
+};
+
+/**
+ * posix_clock_register() - register a new clock
+ * @clk: Pointer to the clock. Caller must provide 'ops' and 'release'
+ * @devid: Allocated device id
+ *
+ * A clock driver calls this function to register itself with the
+ * clock device subsystem. If 'clk' points to dynamically allocated
+ * memory, then the caller must provide a 'release' function to free
+ * that memory.
+ *
+ * Returns zero on success, non-zero otherwise.
+ */
+int posix_clock_register(struct posix_clock *clk, dev_t devid);
+
+/**
+ * posix_clock_unregister() - unregister a clock
+ * @clk: Clock instance previously registered via posix_clock_register()
+ *
+ * A clock driver calls this function to remove itself from the clock
+ * device subsystem. The posix_clock itself will remain (in an
+ * inactive state) until its reference count drops to zero, at which
+ * point it will be deallocated with its 'release' method.
+ */
+void posix_clock_unregister(struct posix_clock *clk);
+
+#endif
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 3e23844a6990..d51243ae0726 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -4,6 +4,7 @@
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/sched.h>
+#include <linux/timex.h>
union cpu_time_count {
cputime_t cpu;
@@ -17,10 +18,21 @@ struct cpu_timer_list {
int firing;
};
+/*
+ * Bit fields within a clockid:
+ *
+ * The most significant 29 bits hold either a pid or a file descriptor.
+ *
+ * Bit 2 indicates whether a cpu clock refers to a thread or a process.
+ *
+ * Bits 1 and 0 give the type: PROF=0, VIRT=1, SCHED=2, or FD=3.
+ *
+ * A clockid is invalid if bits 2, 1, and 0 are all set.
+ */
#define CPUCLOCK_PID(clock) ((pid_t) ~((clock) >> 3))
#define CPUCLOCK_PERTHREAD(clock) \
(((clock) & (clockid_t) CPUCLOCK_PERTHREAD_MASK) != 0)
-#define CPUCLOCK_PID_MASK 7
+
#define CPUCLOCK_PERTHREAD_MASK 4
#define CPUCLOCK_WHICH(clock) ((clock) & (clockid_t) CPUCLOCK_CLOCK_MASK)
#define CPUCLOCK_CLOCK_MASK 3
@@ -28,12 +40,17 @@ struct cpu_timer_list {
#define CPUCLOCK_VIRT 1
#define CPUCLOCK_SCHED 2
#define CPUCLOCK_MAX 3
+#define CLOCKFD CPUCLOCK_MAX
+#define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK)
#define MAKE_PROCESS_CPUCLOCK(pid, clock) \
((~(clockid_t) (pid) << 3) | (clockid_t) (clock))
#define MAKE_THREAD_CPUCLOCK(tid, clock) \
MAKE_PROCESS_CPUCLOCK((tid), (clock) | CPUCLOCK_PERTHREAD_MASK)
+#define FD_TO_CLOCKID(fd) ((~(clockid_t) (fd) << 3) | CLOCKFD)
+#define CLOCKID_TO_FD(clk) ((unsigned int) ~((clk) >> 3))
+
/* POSIX.1b interval timer structure. */
struct k_itimer {
struct list_head list; /* free/ allocate list */
@@ -67,10 +84,11 @@ struct k_itimer {
};
struct k_clock {
- int res; /* in nanoseconds */
int (*clock_getres) (const clockid_t which_clock, struct timespec *tp);
- int (*clock_set) (const clockid_t which_clock, struct timespec * tp);
+ int (*clock_set) (const clockid_t which_clock,
+ const struct timespec *tp);
int (*clock_get) (const clockid_t which_clock, struct timespec * tp);
+ int (*clock_adj) (const clockid_t which_clock, struct timex *tx);
int (*timer_create) (struct k_itimer *timer);
int (*nsleep) (const clockid_t which_clock, int flags,
struct timespec *, struct timespec __user *);
@@ -84,28 +102,14 @@ struct k_clock {
struct itimerspec * cur_setting);
};
-void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock);
+extern struct k_clock clock_posix_cpu;
+extern struct k_clock clock_posix_dynamic;
-/* error handlers for timer_create, nanosleep and settime */
-int do_posix_clock_nonanosleep(const clockid_t, int flags, struct timespec *,
- struct timespec __user *);
-int do_posix_clock_nosettime(const clockid_t, struct timespec *tp);
+void posix_timers_register_clock(const clockid_t clock_id, struct k_clock *new_clock);
/* function to call to trigger timer event */
int posix_timer_event(struct k_itimer *timr, int si_private);
-int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *ts);
-int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *ts);
-int posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *ts);
-int posix_cpu_timer_create(struct k_itimer *timer);
-int posix_cpu_nsleep(const clockid_t which_clock, int flags,
- struct timespec *rqtp, struct timespec __user *rmtp);
-long posix_cpu_nsleep_restart(struct restart_block *restart_block);
-int posix_cpu_timer_set(struct k_itimer *timer, int flags,
- struct itimerspec *new, struct itimerspec *old);
-int posix_cpu_timer_del(struct k_itimer *timer);
-void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp);
-
void posix_cpu_timer_schedule(struct k_itimer *timer);
void run_posix_cpu_timers(struct task_struct *task);
diff --git a/include/linux/power/bq20z75.h b/include/linux/power/bq20z75.h
new file mode 100644
index 000000000000..b0843b68af92
--- /dev/null
+++ b/include/linux/power/bq20z75.h
@@ -0,0 +1,39 @@
+/*
+ * Gas Gauge driver for TI's BQ20Z75
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __LINUX_POWER_BQ20Z75_H_
+#define __LINUX_POWER_BQ20Z75_H_
+
+#include <linux/power_supply.h>
+#include <linux/types.h>
+
+/**
+ * struct bq20z75_platform_data - platform data for bq20z75 devices
+ * @battery_detect: GPIO which is used to detect battery presence
+ * @battery_detect_present: gpio state when battery is present (0 / 1)
+ * @i2c_retry_count: # of times to retry on i2c IO failure
+ */
+struct bq20z75_platform_data {
+ int battery_detect;
+ int battery_detect_present;
+ int i2c_retry_count;
+};
+
+#endif
diff --git a/include/linux/power/bq27x00_battery.h b/include/linux/power/bq27x00_battery.h
new file mode 100644
index 000000000000..a857f719bf40
--- /dev/null
+++ b/include/linux/power/bq27x00_battery.h
@@ -0,0 +1,19 @@
+#ifndef __LINUX_BQ27X00_BATTERY_H__
+#define __LINUX_BQ27X00_BATTERY_H__
+
+/**
+ * struct bq27000_plaform_data - Platform data for bq27000 devices
+ * @name: Name of the battery. If NULL the driver will fallback to "bq27000".
+ * @read: HDQ read callback.
+ * This function should provide access to the HDQ bus the battery is
+ * connected to.
+ * The first parameter is a pointer to the battery device, the second the
+ * register to be read. The return value should either be the content of
+ * the passed register or an error value.
+ */
+struct bq27000_platform_data {
+ const char *name;
+ int (*read)(struct device *dev, unsigned int);
+};
+
+#endif
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 7d7325685c42..204c18dfdc9e 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -173,6 +173,8 @@ struct power_supply {
char *full_trig_name;
struct led_trigger *online_trig;
char *online_trig_name;
+ struct led_trigger *charging_blink_full_solid_trig;
+ char *charging_blink_full_solid_trig_name;
#endif
};
@@ -213,4 +215,49 @@ extern void power_supply_unregister(struct power_supply *psy);
/* For APM emulation, think legacy userspace. */
extern struct class *power_supply_class;
+static inline bool power_supply_is_amp_property(enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ case POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN:
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ case POWER_SUPPLY_PROP_CHARGE_EMPTY:
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ case POWER_SUPPLY_PROP_CHARGE_AVG:
+ case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ return 1;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static inline bool power_supply_is_watt_property(enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+ case POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN:
+ case POWER_SUPPLY_PROP_ENERGY_FULL:
+ case POWER_SUPPLY_PROP_ENERGY_EMPTY:
+ case POWER_SUPPLY_PROP_ENERGY_NOW:
+ case POWER_SUPPLY_PROP_ENERGY_AVG:
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+ case POWER_SUPPLY_PROP_POWER_NOW:
+ return 1;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
#endif /* __LINUX_POWER_SUPPLY_H__ */
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
new file mode 100644
index 000000000000..41977737bb7d
--- /dev/null
+++ b/include/linux/pstore.h
@@ -0,0 +1,60 @@
+/*
+ * Persistent Storage - pstore.h
+ *
+ * Copyright (C) 2010 Intel Corporation <tony.luck@intel.com>
+ *
+ * This code is the generic layer to export data records from platform
+ * level persistent storage via a file system.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _LINUX_PSTORE_H
+#define _LINUX_PSTORE_H
+
+/* types */
+enum pstore_type_id {
+ PSTORE_TYPE_DMESG = 0,
+ PSTORE_TYPE_MCE = 1,
+ PSTORE_TYPE_UNKNOWN = 255
+};
+
+struct pstore_info {
+ struct module *owner;
+ char *name;
+ struct mutex buf_mutex; /* serialize access to 'buf' */
+ char *buf;
+ size_t bufsize;
+ size_t (*read)(u64 *id, enum pstore_type_id *type,
+ struct timespec *time);
+ u64 (*write)(enum pstore_type_id type, size_t size);
+ int (*erase)(u64 id);
+};
+
+#ifdef CONFIG_PSTORE
+extern int pstore_register(struct pstore_info *);
+extern int pstore_write(enum pstore_type_id type, char *buf, size_t size);
+#else
+static inline int
+pstore_register(struct pstore_info *psi)
+{
+ return -ENODEV;
+}
+static inline int
+pstore_write(enum pstore_type_id type, char *buf, size_t size)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /*_LINUX_PSTORE_H*/
diff --git a/include/linux/reiserfs_xattr.h b/include/linux/reiserfs_xattr.h
index 3b94c91f20a6..6deef5dc95fb 100644
--- a/include/linux/reiserfs_xattr.h
+++ b/include/linux/reiserfs_xattr.h
@@ -63,6 +63,7 @@ extern const struct xattr_handler reiserfs_xattr_trusted_handler;
extern const struct xattr_handler reiserfs_xattr_security_handler;
#ifdef CONFIG_REISERFS_FS_SECURITY
int reiserfs_security_init(struct inode *dir, struct inode *inode,
+ const struct qstr *qstr,
struct reiserfs_security_handle *sec);
int reiserfs_security_write(struct reiserfs_transaction_handle *th,
struct inode *inode,
@@ -130,6 +131,7 @@ static inline void reiserfs_init_xattr_rwsem(struct inode *inode)
#ifndef CONFIG_REISERFS_FS_SECURITY
static inline int reiserfs_security_init(struct inode *dir,
struct inode *inode,
+ const struct qstr *qstr,
struct reiserfs_security_handle *sec)
{
return 0;
diff --git a/include/linux/rio_regs.h b/include/linux/rio_regs.h
index d63dcbaea169..9026b30238f3 100644
--- a/include/linux/rio_regs.h
+++ b/include/linux/rio_regs.h
@@ -14,10 +14,12 @@
#define LINUX_RIO_REGS_H
/*
- * In RapidIO, each device has a 2MB configuration space that is
+ * In RapidIO, each device has a 16MB configuration space that is
* accessed via maintenance transactions. Portions of configuration
* space are standardized and/or reserved.
*/
+#define RIO_MAINT_SPACE_SZ 0x1000000 /* 16MB of RapidIO mainenance space */
+
#define RIO_DEV_ID_CAR 0x00 /* [I] Device Identity CAR */
#define RIO_DEV_INFO_CAR 0x04 /* [I] Device Information CAR */
#define RIO_ASM_ID_CAR 0x08 /* [I] Assembly Identity CAR */
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
index bd31808c7d8e..cc0072e93e36 100644
--- a/include/linux/rwlock_types.h
+++ b/include/linux/rwlock_types.h
@@ -43,14 +43,6 @@ typedef struct {
RW_DEP_MAP_INIT(lockname) }
#endif
-/*
- * RW_LOCK_UNLOCKED defeat lockdep state tracking and is hence
- * deprecated.
- *
- * Please use DEFINE_RWLOCK() or __RW_LOCK_UNLOCKED() as appropriate.
- */
-#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
-
#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
#endif /* __LINUX_RWLOCK_TYPES_H */
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index bdfcc2527970..34701241b673 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -12,15 +12,7 @@
#error "please don't include linux/rwsem-spinlock.h directly, use linux/rwsem.h instead"
#endif
-#include <linux/spinlock.h>
-#include <linux/list.h>
-
#ifdef __KERNEL__
-
-#include <linux/types.h>
-
-struct rwsem_waiter;
-
/*
* the rw-semaphore definition
* - if activity is 0 then there are no active readers or writers
@@ -37,28 +29,7 @@ struct rw_semaphore {
#endif
};
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
-#else
-# define __RWSEM_DEP_MAP_INIT(lockname)
-#endif
-
-#define __RWSEM_INITIALIZER(name) \
-{ 0, __SPIN_LOCK_UNLOCKED(name.wait_lock), LIST_HEAD_INIT((name).wait_list) \
- __RWSEM_DEP_MAP_INIT(name) }
-
-#define DECLARE_RWSEM(name) \
- struct rw_semaphore name = __RWSEM_INITIALIZER(name)
-
-extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
- struct lock_class_key *key);
-
-#define init_rwsem(sem) \
-do { \
- static struct lock_class_key __key; \
- \
- __init_rwsem((sem), #sem, &__key); \
-} while (0)
+#define RWSEM_UNLOCKED_VALUE 0x00000000
extern void __down_read(struct rw_semaphore *sem);
extern int __down_read_trylock(struct rw_semaphore *sem);
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index efd348fe8ca7..a8afe9cd000c 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -11,6 +11,9 @@
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
#include <asm/system.h>
#include <asm/atomic.h>
@@ -19,9 +22,57 @@ struct rw_semaphore;
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
#include <linux/rwsem-spinlock.h> /* use a generic implementation */
#else
-#include <asm/rwsem.h> /* use an arch-specific implementation */
+/* All arch specific implementations share the same struct */
+struct rw_semaphore {
+ long count;
+ spinlock_t wait_lock;
+ struct list_head wait_list;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
+extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
+
+/* Include the arch specific part */
+#include <asm/rwsem.h>
+
+/* In all implementations count != 0 means locked */
+static inline int rwsem_is_locked(struct rw_semaphore *sem)
+{
+ return sem->count != 0;
+}
+
+#endif
+
+/* Common initializer macros and functions */
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
+#else
+# define __RWSEM_DEP_MAP_INIT(lockname)
#endif
+#define __RWSEM_INITIALIZER(name) \
+ { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED(name.wait_lock), \
+ LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
+
+#define DECLARE_RWSEM(name) \
+ struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+
+extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
+ struct lock_class_key *key);
+
+#define init_rwsem(sem) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __init_rwsem((sem), #sem, &__key); \
+} while (0)
+
/*
* lock for reading
*/
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 777d8a5ed06b..c15936fe998b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1058,6 +1058,7 @@ struct sched_class {
void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
void (*yield_task) (struct rq *rq);
+ bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
@@ -1084,12 +1085,10 @@ struct sched_class {
void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
void (*task_fork) (struct task_struct *p);
- void (*switched_from) (struct rq *this_rq, struct task_struct *task,
- int running);
- void (*switched_to) (struct rq *this_rq, struct task_struct *task,
- int running);
+ void (*switched_from) (struct rq *this_rq, struct task_struct *task);
+ void (*switched_to) (struct rq *this_rq, struct task_struct *task);
void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
- int oldprio, int running);
+ int oldprio);
unsigned int (*get_rr_interval) (struct rq *rq,
struct task_struct *task);
@@ -1715,7 +1714,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
/*
* Per process flags
*/
-#define PF_KSOFTIRQD 0x00000001 /* I am ksoftirqd */
#define PF_STARTING 0x00000002 /* being created */
#define PF_EXITING 0x00000004 /* getting shut down */
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
@@ -1945,8 +1943,6 @@ int sched_rt_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
-extern unsigned int sysctl_sched_compat_yield;
-
#ifdef CONFIG_SCHED_AUTOGROUP
extern unsigned int sysctl_sched_autogroup_enabled;
@@ -1977,6 +1973,7 @@ static inline int rt_mutex_getprio(struct task_struct *p)
# define rt_mutex_adjust_pi(p) do { } while (0)
#endif
+extern bool yield_to(struct task_struct *p, bool preempt);
extern void set_user_nice(struct task_struct *p, long nice);
extern int task_prio(const struct task_struct *p);
extern int task_nice(const struct task_struct *p);
@@ -2049,7 +2046,7 @@ extern void release_uids(struct user_namespace *ns);
#include <asm/current.h>
-extern void do_timer(unsigned long ticks);
+extern void xtime_update(unsigned long ticks);
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
@@ -2578,13 +2575,6 @@ static inline void inc_syscw(struct task_struct *tsk)
#define TASK_SIZE_OF(tsk) TASK_SIZE
#endif
-/*
- * Call the function if the target task is executing on a CPU right now:
- */
-extern void task_oncpu_function_call(struct task_struct *p,
- void (*func) (void *info), void *info);
-
-
#ifdef CONFIG_MM_OWNER
extern void mm_update_next_owner(struct mm_struct *mm);
extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
diff --git a/include/linux/security.h b/include/linux/security.h
index b2b7f9749f5e..29c6012794e2 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -25,6 +25,7 @@
#include <linux/fs.h>
#include <linux/fsnotify.h>
#include <linux/binfmts.h>
+#include <linux/dcache.h>
#include <linux/signal.h>
#include <linux/resource.h>
#include <linux/sem.h>
@@ -53,7 +54,7 @@ struct audit_krule;
*/
extern int cap_capable(struct task_struct *tsk, const struct cred *cred,
int cap, int audit);
-extern int cap_settime(struct timespec *ts, struct timezone *tz);
+extern int cap_settime(const struct timespec *ts, const struct timezone *tz);
extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode);
extern int cap_ptrace_traceme(struct task_struct *parent);
extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
@@ -315,6 +316,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* then it should return -EOPNOTSUPP to skip this processing.
* @inode contains the inode structure of the newly created inode.
* @dir contains the inode structure of the parent directory.
+ * @qstr contains the last path component of the new object
* @name will be set to the allocated name suffix (e.g. selinux).
* @value will be set to the allocated attribute value.
* @len will be set to the length of the value.
@@ -1257,12 +1259,6 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @cap contains the capability <include/linux/capability.h>.
* @audit: Whether to write an audit message or not
* Return 0 if the capability is granted for @tsk.
- * @sysctl:
- * Check permission before accessing the @table sysctl variable in the
- * manner specified by @op.
- * @table contains the ctl_table structure for the sysctl variable.
- * @op contains the operation (001 = search, 002 = write, 004 = read).
- * Return 0 if permission is granted.
* @syslog:
* Check permission before accessing the kernel message ring or changing
* logging to the console.
@@ -1383,11 +1379,10 @@ struct security_operations {
const kernel_cap_t *permitted);
int (*capable) (struct task_struct *tsk, const struct cred *cred,
int cap, int audit);
- int (*sysctl) (struct ctl_table *table, int op);
int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
int (*quota_on) (struct dentry *dentry);
int (*syslog) (int type);
- int (*settime) (struct timespec *ts, struct timezone *tz);
+ int (*settime) (const struct timespec *ts, const struct timezone *tz);
int (*vm_enough_memory) (struct mm_struct *mm, long pages);
int (*bprm_set_creds) (struct linux_binprm *bprm);
@@ -1435,7 +1430,8 @@ struct security_operations {
int (*inode_alloc_security) (struct inode *inode);
void (*inode_free_security) (struct inode *inode);
int (*inode_init_security) (struct inode *inode, struct inode *dir,
- char **name, void **value, size_t *len);
+ const struct qstr *qstr, char **name,
+ void **value, size_t *len);
int (*inode_create) (struct inode *dir,
struct dentry *dentry, int mode);
int (*inode_link) (struct dentry *old_dentry,
@@ -1623,7 +1619,7 @@ struct security_operations {
int (*xfrm_policy_lookup) (struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
int (*xfrm_state_pol_flow_match) (struct xfrm_state *x,
struct xfrm_policy *xp,
- struct flowi *fl);
+ const struct flowi *fl);
int (*xfrm_decode_session) (struct sk_buff *skb, u32 *secid, int ckall);
#endif /* CONFIG_SECURITY_NETWORK_XFRM */
@@ -1665,11 +1661,10 @@ int security_capset(struct cred *new, const struct cred *old,
int security_capable(const struct cred *cred, int cap);
int security_real_capable(struct task_struct *tsk, int cap);
int security_real_capable_noaudit(struct task_struct *tsk, int cap);
-int security_sysctl(struct ctl_table *table, int op);
int security_quotactl(int cmds, int type, int id, struct super_block *sb);
int security_quota_on(struct dentry *dentry);
int security_syslog(int type);
-int security_settime(struct timespec *ts, struct timezone *tz);
+int security_settime(const struct timespec *ts, const struct timezone *tz);
int security_vm_enough_memory(long pages);
int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
int security_vm_enough_memory_kern(long pages);
@@ -1696,7 +1691,8 @@ int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts);
int security_inode_alloc(struct inode *inode);
void security_inode_free(struct inode *inode);
int security_inode_init_security(struct inode *inode, struct inode *dir,
- char **name, void **value, size_t *len);
+ const struct qstr *qstr, char **name,
+ void **value, size_t *len);
int security_inode_create(struct inode *dir, struct dentry *dentry, int mode);
int security_inode_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry);
@@ -1883,11 +1879,6 @@ int security_real_capable_noaudit(struct task_struct *tsk, int cap)
return ret;
}
-static inline int security_sysctl(struct ctl_table *table, int op)
-{
- return 0;
-}
-
static inline int security_quotactl(int cmds, int type, int id,
struct super_block *sb)
{
@@ -1904,7 +1895,8 @@ static inline int security_syslog(int type)
return 0;
}
-static inline int security_settime(struct timespec *ts, struct timezone *tz)
+static inline int security_settime(const struct timespec *ts,
+ const struct timezone *tz)
{
return cap_settime(ts, tz);
}
@@ -2023,6 +2015,7 @@ static inline void security_inode_free(struct inode *inode)
static inline int security_inode_init_security(struct inode *inode,
struct inode *dir,
+ const struct qstr *qstr,
char **name,
void **value,
size_t *len)
@@ -2761,7 +2754,8 @@ int security_xfrm_state_delete(struct xfrm_state *x);
void security_xfrm_state_free(struct xfrm_state *x);
int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
- struct xfrm_policy *xp, struct flowi *fl);
+ struct xfrm_policy *xp,
+ const struct flowi *fl);
int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid);
void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl);
@@ -2813,7 +2807,7 @@ static inline int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_s
}
static inline int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
- struct xfrm_policy *xp, struct flowi *fl)
+ struct xfrm_policy *xp, const struct flowi *fl)
{
return 1;
}
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h
index 1630d9cae22a..a2afc9fbe186 100644
--- a/include/linux/serial_sci.h
+++ b/include/linux/serial_sci.h
@@ -34,28 +34,32 @@ enum {
SCIx_NR_IRQS,
};
+#define SCIx_IRQ_MUXED(irq) \
+{ \
+ [SCIx_ERI_IRQ] = (irq), \
+ [SCIx_RXI_IRQ] = (irq), \
+ [SCIx_TXI_IRQ] = (irq), \
+ [SCIx_BRI_IRQ] = (irq), \
+}
+
struct device;
/*
* Platform device specific platform_data struct
*/
struct plat_sci_port {
- void __iomem *membase; /* io cookie */
unsigned long mapbase; /* resource base */
unsigned int irqs[SCIx_NR_IRQS]; /* ERI, RXI, TXI, BRI */
unsigned int type; /* SCI / SCIF / IRDA */
upf_t flags; /* UPF_* flags */
- char *clk; /* clock string */
unsigned int scbrr_algo_id; /* SCBRR calculation algo */
unsigned int scscr; /* SCSCR initialization */
struct device *dma_dev;
-#ifdef CONFIG_SERIAL_SH_SCI_DMA
- unsigned int dma_slave_tx;
- unsigned int dma_slave_rx;
-#endif
+ unsigned int dma_slave_tx;
+ unsigned int dma_slave_rx;
};
#endif /* __LINUX_SERIAL_SCI_H */
diff --git a/include/linux/shm_signal.h b/include/linux/shm_signal.h
new file mode 100644
index 000000000000..b2efd72669fb
--- /dev/null
+++ b/include/linux/shm_signal.h
@@ -0,0 +1,189 @@
+/*
+ * Copyright 2009 Novell. All Rights Reserved.
+ *
+ * Author:
+ * Gregory Haskins <ghaskins@novell.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _LINUX_SHM_SIGNAL_H
+#define _LINUX_SHM_SIGNAL_H
+
+#include <linux/types.h>
+
+/*
+ *---------
+ * The following structures represent data that is shared across boundaries
+ * which may be quite disparate from one another (e.g. Windows vs Linux,
+ * 32 vs 64 bit, etc). Therefore, care has been taken to make sure they
+ * present data in a manner that is independent of the environment.
+ *-----------
+ */
+
+#define SHM_SIGNAL_MAGIC cpu_to_le32(0x58fa39df)
+#define SHM_SIGNAL_VER cpu_to_le32(1)
+
+struct shm_signal_irq {
+ __u8 enabled;
+ __u8 pending;
+ __u8 dirty;
+};
+
+enum shm_signal_locality {
+ shm_locality_north,
+ shm_locality_south,
+};
+
+struct shm_signal_desc {
+ __le32 magic;
+ __le32 ver;
+ struct shm_signal_irq irq[2];
+};
+
+/* --- END SHARED STRUCTURES --- */
+
+#ifdef __KERNEL__
+
+#include <linux/kref.h>
+#include <linux/interrupt.h>
+
+struct shm_signal_notifier {
+ void (*signal)(struct shm_signal_notifier *);
+};
+
+struct shm_signal;
+
+struct shm_signal_ops {
+ int (*inject)(struct shm_signal *s);
+ void (*fault)(struct shm_signal *s, const char *fmt, ...);
+ void (*release)(struct shm_signal *s);
+};
+
+enum {
+ shm_signal_in_wakeup,
+};
+
+struct shm_signal {
+ struct kref kref;
+ spinlock_t lock;
+ enum shm_signal_locality locale;
+ unsigned long flags;
+ struct shm_signal_ops *ops;
+ struct shm_signal_desc *desc;
+ struct shm_signal_notifier *notifier;
+ struct tasklet_struct deferred_notify;
+};
+
+#define SHM_SIGNAL_FAULT(s, fmt, args...) \
+ ((s)->ops->fault ? (s)->ops->fault((s), fmt, ## args) : panic(fmt, ## args))
+
+ /*
+ * These functions should only be used internally
+ */
+void _shm_signal_release(struct kref *kref);
+void _shm_signal_wakeup(struct shm_signal *s);
+
+/**
+ * shm_signal_init() - initialize an SHM_SIGNAL
+ * @s: SHM_SIGNAL context
+ *
+ * Initializes SHM_SIGNAL context before first use
+ *
+ **/
+void shm_signal_init(struct shm_signal *s, enum shm_signal_locality locale,
+ struct shm_signal_ops *ops, struct shm_signal_desc *desc);
+
+/**
+ * shm_signal_get() - acquire an SHM_SIGNAL context reference
+ * @s: SHM_SIGNAL context
+ *
+ **/
+static inline struct shm_signal *shm_signal_get(struct shm_signal *s)
+{
+ kref_get(&s->kref);
+
+ return s;
+}
+
+/**
+ * shm_signal_put() - release an SHM_SIGNAL context reference
+ * @s: SHM_SIGNAL context
+ *
+ **/
+static inline void shm_signal_put(struct shm_signal *s)
+{
+ kref_put(&s->kref, _shm_signal_release);
+}
+
+/**
+ * shm_signal_enable() - enables local notifications on an SHM_SIGNAL
+ * @s: SHM_SIGNAL context
+ * @flags: Reserved for future use, must be 0
+ *
+ * Enables/unmasks the registered notifier (if applicable) to receive wakeups
+ * whenever the remote side performs an shm_signal() operation. A notification
+ * will be dispatched immediately if any pending signals have already been
+ * issued prior to invoking this call.
+ *
+ * This is synonymous with unmasking an interrupt.
+ *
+ * Returns: success = 0, <0 = ERRNO
+ *
+ **/
+int shm_signal_enable(struct shm_signal *s, int flags);
+
+/**
+ * shm_signal_disable() - disable local notifications on an SHM_SIGNAL
+ * @s: SHM_SIGNAL context
+ * @flags: Reserved for future use, must be 0
+ *
+ * Disables/masks the registered shm_signal_notifier (if applicable) from
+ * receiving any further notifications. Any subsequent calls to shm_signal()
+ * by the remote side will update the shm as dirty, but will not traverse the
+ * locale boundary and will not invoke the notifier callback. Signals
+ * delivered while masked will be deferred until shm_signal_enable() is
+ * invoked.
+ *
+ * This is synonymous with masking an interrupt
+ *
+ * Returns: success = 0, <0 = ERRNO
+ *
+ **/
+int shm_signal_disable(struct shm_signal *s, int flags);
+
+/**
+ * shm_signal_inject() - notify the remote side about shm changes
+ * @s: SHM_SIGNAL context
+ * @flags: Reserved for future use, must be 0
+ *
+ * Marks the shm state as "dirty" and, if enabled, will traverse
+ * a locale boundary to inject a remote notification. The remote
+ * side controls whether the notification should be delivered via
+ * the shm_signal_enable/disable() interface.
+ *
+ * The specifics of how to traverse a locale boundary are abstracted
+ * by the shm_signal_ops->signal() interface and provided by a particular
+ * implementation. However, typically going north to south would be
+ * something like a syscall/hypercall, and going south to north would be
+ * something like a posix-signal/guest-interrupt.
+ *
+ * Returns: success = 0, <0 = ERRNO
+ *
+ **/
+int shm_signal_inject(struct shm_signal *s, int flags);
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_SHM_SIGNAL_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index bf221d65d9ad..31f02d0b46a7 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1801,6 +1801,15 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \
skb = skb->prev)
+#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
+ for (skb = (queue)->prev, tmp = skb->prev; \
+ skb != (struct sk_buff *)(queue); \
+ skb = tmp, tmp = skb->prev)
+
+#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
+ for (tmp = skb->prev; \
+ skb != (struct sk_buff *)(queue); \
+ skb = tmp, tmp = skb->prev)
static inline bool skb_has_frag_list(const struct sk_buff *skb)
{
@@ -1868,7 +1877,7 @@ extern void skb_split(struct sk_buff *skb,
extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
int shiftlen);
-extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
+extern struct sk_buff *skb_segment(struct sk_buff *skb, u32 features);
static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
int len, void *buffer)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index fa9086647eb7..ad4dd1c8d30a 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -105,7 +105,6 @@ void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
void kmem_cache_free(struct kmem_cache *, void *);
unsigned int kmem_cache_size(struct kmem_cache *);
-const char *kmem_cache_name(struct kmem_cache *);
/*
* Please use this macro to create slab caches. Simply specify the
diff --git a/include/linux/spi/spi_oc_tiny.h b/include/linux/spi/spi_oc_tiny.h
new file mode 100644
index 000000000000..1ac529cf4f06
--- /dev/null
+++ b/include/linux/spi/spi_oc_tiny.h
@@ -0,0 +1,20 @@
+#ifndef _LINUX_SPI_SPI_OC_TINY_H
+#define _LINUX_SPI_SPI_OC_TINY_H
+
+/**
+ * struct tiny_spi_platform_data - platform data of the OpenCores tiny SPI
+ * @freq: input clock freq to the core.
+ * @baudwidth: baud rate divider width of the core.
+ * @gpio_cs_count: number of gpio pins used for chipselect.
+ * @gpio_cs: array of gpio pins used for chipselect.
+ *
+ * freq and baudwidth are used only if the divider is programmable.
+ */
+struct tiny_spi_platform_data {
+ unsigned int freq;
+ unsigned int baudwidth;
+ unsigned int gpio_cs_count;
+ int *gpio_cs;
+};
+
+#endif /* _LINUX_SPI_SPI_OC_TINY_H */
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 851b7783720d..73548eb13a5d 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -81,14 +81,6 @@ typedef struct spinlock {
#define __SPIN_LOCK_UNLOCKED(lockname) \
(spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
-/*
- * SPIN_LOCK_UNLOCKED defeats lockdep state tracking and is hence
- * deprecated.
- * Please use DEFINE_SPINLOCK() or __SPIN_LOCK_UNLOCKED() as
- * appropriate.
- */
-#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init)
-
#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
#include <linux/rwlock_types.h>
diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h
index 489f7b6d61c5..402955ae48ce 100644
--- a/include/linux/ssb/ssb_regs.h
+++ b/include/linux/ssb/ssb_regs.h
@@ -85,6 +85,8 @@
#define SSB_IMSTATE_AP_RSV 0x00000030 /* Reserved */
#define SSB_IMSTATE_IBE 0x00020000 /* In Band Error */
#define SSB_IMSTATE_TO 0x00040000 /* Timeout */
+#define SSB_IMSTATE_BUSY 0x01800000 /* Busy (Backplane rev >= 2.3 only) */
+#define SSB_IMSTATE_REJECT 0x02000000 /* Reject (Backplane rev >= 2.3 only) */
#define SSB_INTVEC 0x0F94 /* SB Interrupt Mask */
#define SSB_INTVEC_PCI 0x00000001 /* Enable interrupts for PCI */
#define SSB_INTVEC_ENET0 0x00000002 /* Enable interrupts for enet 0 */
@@ -97,7 +99,6 @@
#define SSB_TMSLOW_RESET 0x00000001 /* Reset */
#define SSB_TMSLOW_REJECT_22 0x00000002 /* Reject (Backplane rev 2.2) */
#define SSB_TMSLOW_REJECT_23 0x00000004 /* Reject (Backplane rev 2.3) */
-#define SSB_TMSLOW_PHYCLK 0x00000010 /* MAC PHY Clock Control Enable */
#define SSB_TMSLOW_CLOCK 0x00010000 /* Clock Enable */
#define SSB_TMSLOW_FGC 0x00020000 /* Force Gated Clocks On */
#define SSB_TMSLOW_PE 0x40000000 /* Power Management Enable */
@@ -268,6 +269,8 @@
/* SPROM Revision 4 */
#define SSB_SPROM4_BFLLO 0x0044 /* Boardflags (low 16 bits) */
#define SSB_SPROM4_BFLHI 0x0046 /* Board Flags Hi */
+#define SSB_SPROM4_BFL2LO 0x0048 /* Board flags 2 (low 16 bits) */
+#define SSB_SPROM4_BFL2HI 0x004A /* Board flags 2 Hi */
#define SSB_SPROM4_IL0MAC 0x004C /* 6 byte MAC address for a/b/g/n */
#define SSB_SPROM4_CCODE 0x0052 /* Country Code (2 bytes) */
#define SSB_SPROM4_GPIOA 0x0056 /* Gen. Purpose IO # 0 and 1 */
@@ -358,6 +361,8 @@
#define SSB_SPROM5_CCODE 0x0044 /* Country Code (2 bytes) */
#define SSB_SPROM5_BFLLO 0x004A /* Boardflags (low 16 bits) */
#define SSB_SPROM5_BFLHI 0x004C /* Board Flags Hi */
+#define SSB_SPROM5_BFL2LO 0x004E /* Board flags 2 (low 16 bits) */
+#define SSB_SPROM5_BFL2HI 0x0050 /* Board flags 2 Hi */
#define SSB_SPROM5_IL0MAC 0x0052 /* 6 byte MAC address for a/b/g/n */
#define SSB_SPROM5_GPIOA 0x0076 /* Gen. Purpose IO # 0 and 1 */
#define SSB_SPROM5_GPIOA_P0 0x00FF /* Pin 0 */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 98664db1be47..833d8dd3ea8c 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -132,11 +132,11 @@ extern struct trace_event_functions exit_syscall_print_funcs;
.class = &event_class_syscall_enter, \
.event.funcs = &enter_syscall_print_funcs, \
.data = (void *)&__syscall_meta_##sname,\
+ .flags = TRACE_EVENT_FL_CAP_ANY, \
}; \
static struct ftrace_event_call __used \
__attribute__((section("_ftrace_events"))) \
- *__event_enter_##sname = &event_enter_##sname; \
- __TRACE_EVENT_FLAGS(enter_##sname, TRACE_EVENT_FL_CAP_ANY)
+ *__event_enter_##sname = &event_enter_##sname;
#define SYSCALL_TRACE_EXIT_EVENT(sname) \
static struct syscall_metadata __syscall_meta_##sname; \
@@ -146,11 +146,11 @@ extern struct trace_event_functions exit_syscall_print_funcs;
.class = &event_class_syscall_exit, \
.event.funcs = &exit_syscall_print_funcs, \
.data = (void *)&__syscall_meta_##sname,\
+ .flags = TRACE_EVENT_FL_CAP_ANY, \
}; \
static struct ftrace_event_call __used \
__attribute__((section("_ftrace_events"))) \
- *__event_exit_##sname = &event_exit_##sname; \
- __TRACE_EVENT_FLAGS(exit_##sname, TRACE_EVENT_FL_CAP_ANY)
+ *__event_exit_##sname = &event_exit_##sname;
#define SYSCALL_METADATA(sname, nb) \
SYSCALL_TRACE_ENTER_EVENT(sname); \
@@ -158,6 +158,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
static struct syscall_metadata __used \
__syscall_meta_##sname = { \
.name = "sys"#sname, \
+ .syscall_nr = -1, /* Filled in at boot */ \
.nb_args = nb, \
.types = types_##sname, \
.args = args_##sname, \
@@ -175,6 +176,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
static struct syscall_metadata __used \
__syscall_meta__##sname = { \
.name = "sys_"#sname, \
+ .syscall_nr = -1, /* Filled in at boot */ \
.nb_args = 0, \
.enter_event = &event_enter__##sname, \
.exit_event = &event_exit__##sname, \
@@ -313,6 +315,8 @@ asmlinkage long sys_clock_settime(clockid_t which_clock,
const struct timespec __user *tp);
asmlinkage long sys_clock_gettime(clockid_t which_clock,
struct timespec __user *tp);
+asmlinkage long sys_clock_adjtime(clockid_t which_clock,
+ struct timex __user *tx);
asmlinkage long sys_clock_getres(clockid_t which_clock,
struct timespec __user *tp);
asmlinkage long sys_clock_nanosleep(clockid_t which_clock, int flags,
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 8651556dbd52..d3ec89fb4122 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -172,6 +172,14 @@ void thermal_zone_device_update(struct thermal_zone_device *);
struct thermal_cooling_device *thermal_cooling_device_register(char *, void *,
const struct thermal_cooling_device_ops *);
void thermal_cooling_device_unregister(struct thermal_cooling_device *);
+
+#ifdef CONFIG_NET
extern int generate_netlink_event(u32 orig, enum events event);
+#else
+static inline int generate_netlink_event(u32 orig, enum events event)
+{
+ return 0;
+}
+#endif
#endif /* __THERMAL_H__ */
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index c90696544176..20fc303947d3 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -18,9 +18,6 @@ struct compat_timespec;
struct restart_block {
long (*fn)(struct restart_block *);
union {
- struct {
- unsigned long arg0, arg1, arg2, arg3;
- };
/* For futex_wait and futex_wait_requeue_pi */
struct {
u32 __user *uaddr;
diff --git a/include/linux/time.h b/include/linux/time.h
index 1e6d3b59238d..379b9037b5b4 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -113,8 +113,6 @@ static inline struct timespec timespec_sub(struct timespec lhs,
#define timespec_valid(ts) \
(((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC))
-extern seqlock_t xtime_lock;
-
extern void read_persistent_clock(struct timespec *ts);
extern void read_boot_clock(struct timespec *ts);
extern int update_persistent_clock(struct timespec now);
@@ -125,8 +123,8 @@ extern int timekeeping_suspended;
unsigned long get_seconds(void);
struct timespec current_kernel_time(void);
struct timespec __current_kernel_time(void); /* does not take xtime_lock */
-struct timespec __get_wall_to_monotonic(void); /* does not take xtime_lock */
struct timespec get_monotonic_coarse(void);
+void get_xtime_and_monotonic_offset(struct timespec *xtim, struct timespec *wtom);
#define CURRENT_TIME (current_kernel_time())
#define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 })
@@ -147,8 +145,9 @@ static inline u32 arch_gettimeoffset(void) { return 0; }
#endif
extern void do_gettimeofday(struct timeval *tv);
-extern int do_settimeofday(struct timespec *tv);
-extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz);
+extern int do_settimeofday(const struct timespec *tv);
+extern int do_sys_settimeofday(const struct timespec *tv,
+ const struct timezone *tz);
#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags);
struct itimerval;
@@ -166,8 +165,8 @@ extern void monotonic_to_bootbased(struct timespec *ts);
extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
extern int timekeeping_valid_for_hres(void);
extern u64 timekeeping_max_deferment(void);
-extern void update_wall_time(void);
extern void timekeeping_leap_insert(int leapsecond);
+extern int timekeeping_inject_offset(struct timespec *ts);
struct tms;
extern void do_sys_times(struct tms *);
diff --git a/include/linux/timex.h b/include/linux/timex.h
index d23999f9499d..aa60fe7b6ed6 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -73,7 +73,7 @@ struct timex {
long tolerance; /* clock frequency tolerance (ppm)
* (read only)
*/
- struct timeval time; /* (read only) */
+ struct timeval time; /* (read only, except for ADJ_SETOFFSET) */
long tick; /* (modified) usecs between clock ticks */
long ppsfreq; /* pps frequency (scaled ppm) (ro) */
@@ -102,6 +102,7 @@ struct timex {
#define ADJ_STATUS 0x0010 /* clock status */
#define ADJ_TIMECONST 0x0020 /* pll time constant */
#define ADJ_TAI 0x0080 /* set TAI offset */
+#define ADJ_SETOFFSET 0x0100 /* add 'time' to current time */
#define ADJ_MICRO 0x1000 /* select microsecond resolution */
#define ADJ_NANO 0x2000 /* select nanosecond resolution */
#define ADJ_TICK 0x4000 /* tick value */
diff --git a/include/linux/tipc.h b/include/linux/tipc.h
index 1eefa3f6d1f4..a5b994a204d2 100644
--- a/include/linux/tipc.h
+++ b/include/linux/tipc.h
@@ -2,7 +2,7 @@
* include/linux/tipc.h: Include file for TIPC socket interface
*
* Copyright (c) 2003-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -130,12 +130,6 @@ static inline unsigned int tipc_node(__u32 addr)
#define TIPC_SUB_PORTS 0x01 /* filter for port availability */
#define TIPC_SUB_SERVICE 0x02 /* filter for service availability */
#define TIPC_SUB_CANCEL 0x04 /* cancel a subscription */
-#if 0
-/* The following filter options are not currently implemented */
-#define TIPC_SUB_NO_BIND_EVTS 0x04 /* filter out "publish" events */
-#define TIPC_SUB_NO_UNBIND_EVTS 0x08 /* filter out "withdraw" events */
-#define TIPC_SUB_SINGLE_EVT 0x10 /* expire after first event */
-#endif
#define TIPC_WAIT_FOREVER (~0) /* timeout for permanent subscription */
diff --git a/include/linux/tipc_config.h b/include/linux/tipc_config.h
index 7d42460a5e3c..011556fcef04 100644
--- a/include/linux/tipc_config.h
+++ b/include/linux/tipc_config.h
@@ -2,7 +2,7 @@
* include/linux/tipc_config.h: Include file for TIPC configuration interface
*
* Copyright (c) 2003-2006, Ericsson AB
- * Copyright (c) 2005-2007, Wind River Systems
+ * Copyright (c) 2005-2007, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -76,13 +76,6 @@
#define TIPC_CMD_SHOW_LINK_STATS 0x000B /* tx link_name, rx ultra_string */
#define TIPC_CMD_SHOW_STATS 0x000F /* tx unsigned, rx ultra_string */
-#if 0
-#define TIPC_CMD_SHOW_PORT_STATS 0x0008 /* tx port_ref, rx ultra_string */
-#define TIPC_CMD_RESET_PORT_STATS 0x0009 /* tx port_ref, rx none */
-#define TIPC_CMD_GET_ROUTES 0x000A /* tx ?, rx ? */
-#define TIPC_CMD_GET_LINK_PEER 0x000D /* tx link_name, rx ? */
-#endif
-
/*
* Protected commands:
* May only be issued by "network administration capable" process.
@@ -109,13 +102,6 @@
#define TIPC_CMD_DUMP_LOG 0x410B /* tx none, rx ultra_string */
#define TIPC_CMD_RESET_LINK_STATS 0x410C /* tx link_name, rx none */
-#if 0
-#define TIPC_CMD_CREATE_LINK 0x4103 /* tx link_create, rx none */
-#define TIPC_CMD_REMOVE_LINK 0x4104 /* tx link_name, rx none */
-#define TIPC_CMD_BLOCK_LINK 0x4105 /* tx link_name, rx none */
-#define TIPC_CMD_UNBLOCK_LINK 0x4106 /* tx link_name, rx none */
-#endif
-
/*
* Private commands:
* May only be issued by "network administration capable" process.
@@ -123,9 +109,6 @@
*/
#define TIPC_CMD_SET_NODE_ADDR 0x8001 /* tx net_addr, rx none */
-#if 0
-#define TIPC_CMD_SET_ZONE_MASTER 0x8002 /* tx none, rx none */
-#endif
#define TIPC_CMD_SET_REMOTE_MNG 0x8003 /* tx unsigned, rx none */
#define TIPC_CMD_SET_MAX_PORTS 0x8004 /* tx unsigned, rx none */
#define TIPC_CMD_SET_MAX_PUBL 0x8005 /* tx unsigned, rx none */
@@ -193,6 +176,10 @@
#define TIPC_DEF_LINK_TOL 1500
#define TIPC_MAX_LINK_TOL 30000
+#if (TIPC_MIN_LINK_TOL < 16)
+#error "TIPC_MIN_LINK_TOL is too small (abort limit may be NaN)"
+#endif
+
/*
* Link window limits (min, default, max), in packets
*/
@@ -247,15 +234,6 @@ struct tipc_name_table_query {
#define TIPC_CFG_NOT_SUPPORTED "\x84" /* request is not supported by TIPC */
#define TIPC_CFG_INVALID_VALUE "\x85" /* request has invalid argument value */
-#if 0
-/* prototypes TLV structures for proposed commands */
-struct tipc_link_create {
- __u32 domain;
- struct tipc_media_addr peer_addr;
- char bearer_name[TIPC_MAX_BEARER_NAME];
-};
-#endif
-
/*
* A TLV consists of a descriptor, followed by the TLV value.
* TLV descriptor fields are stored in network byte order;
diff --git a/include/linux/vbus_driver.h b/include/linux/vbus_driver.h
new file mode 100644
index 000000000000..8a7acb1a7a05
--- /dev/null
+++ b/include/linux/vbus_driver.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2009 Novell. All Rights Reserved.
+ *
+ * Mediates access to a host VBUS from a guest kernel by providing a
+ * global view of all VBUS devices
+ *
+ * Author:
+ * Gregory Haskins <ghaskins@novell.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _LINUX_VBUS_DRIVER_H
+#define _LINUX_VBUS_DRIVER_H
+
+#include <linux/device.h>
+#include <linux/shm_signal.h>
+#include <linux/ioq.h>
+
+struct vbus_device_proxy;
+struct vbus_driver;
+
+struct vbus_device_proxy_ops {
+ int (*open)(struct vbus_device_proxy *dev, int version, int flags);
+ int (*close)(struct vbus_device_proxy *dev, int flags);
+ int (*shm)(struct vbus_device_proxy *dev, const char *name,
+ int id, int prio,
+ void *ptr, size_t len,
+ struct shm_signal_desc *sigdesc, struct shm_signal **signal,
+ int flags);
+ int (*call)(struct vbus_device_proxy *dev, u32 func,
+ void *data, size_t len, int flags);
+ void (*release)(struct vbus_device_proxy *dev);
+};
+
+struct vbus_device_proxy {
+ char *type;
+ u64 id;
+ void *priv; /* Used by drivers */
+ struct vbus_device_proxy_ops *ops;
+ struct device dev;
+};
+
+int vbus_device_proxy_register(struct vbus_device_proxy *dev);
+void vbus_device_proxy_unregister(struct vbus_device_proxy *dev);
+
+struct vbus_device_proxy *vbus_device_proxy_find(u64 id);
+
+struct vbus_driver_ops {
+ int (*probe)(struct vbus_device_proxy *dev);
+ int (*remove)(struct vbus_device_proxy *dev);
+};
+
+struct vbus_driver {
+ char *type;
+ struct module *owner;
+ struct vbus_driver_ops *ops;
+ struct device_driver drv;
+};
+
+int vbus_driver_register(struct vbus_driver *drv);
+void vbus_driver_unregister(struct vbus_driver *drv);
+
+/*
+ * driver-side IOQ helper - allocates device-shm and maps an IOQ on it
+ */
+int vbus_driver_ioq_alloc(struct vbus_device_proxy *dev, const char *name,
+ int id, int prio, size_t ringsize, struct ioq **ioq);
+
+#define VBUS_DRIVER_AUTOPROBE(name) MODULE_ALIAS("vbus-proxy:" name)
+
+#endif /* _LINUX_VBUS_DRIVER_H */
diff --git a/include/linux/vbus_pci.h b/include/linux/vbus_pci.h
new file mode 100644
index 000000000000..fe337590e644
--- /dev/null
+++ b/include/linux/vbus_pci.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2009 Novell. All Rights Reserved.
+ *
+ * PCI to Virtual-Bus Bridge
+ *
+ * Author:
+ * Gregory Haskins <ghaskins@novell.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _LINUX_VBUS_PCI_H
+#define _LINUX_VBUS_PCI_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define VBUS_PCI_ABI_MAGIC 0xbf53eef5
+#define VBUS_PCI_ABI_VERSION 2
+#define VBUS_PCI_HC_VERSION 1
+
+enum {
+ VBUS_PCI_BRIDGE_NEGOTIATE,
+ VBUS_PCI_BRIDGE_QREG,
+ VBUS_PCI_BRIDGE_SLOWCALL,
+ VBUS_PCI_BRIDGE_FASTCALL_ADD,
+ VBUS_PCI_BRIDGE_FASTCALL_DROP,
+
+ VBUS_PCI_BRIDGE_MAX, /* must be last */
+};
+
+enum {
+ VBUS_PCI_HC_DEVOPEN,
+ VBUS_PCI_HC_DEVCLOSE,
+ VBUS_PCI_HC_DEVCALL,
+ VBUS_PCI_HC_DEVSHM,
+
+ VBUS_PCI_HC_MAX, /* must be last */
+};
+
+struct vbus_pci_bridge_negotiate {
+ __u32 magic;
+ __u32 version;
+ __u64 capabilities;
+};
+
+struct vbus_pci_deviceopen {
+ __u32 devid;
+ __u32 version; /* device ABI version */
+ __u64 handle; /* return value for devh */
+};
+
+struct vbus_pci_devicecall {
+ __u64 devh; /* device-handle (returned from DEVICEOPEN */
+ __u32 func;
+ __u32 len;
+ __u32 flags;
+ __u64 datap;
+};
+
+struct vbus_pci_deviceshm {
+ __u64 devh; /* device-handle (returned from DEVICEOPEN */
+ __u32 id;
+ __u32 len;
+ __u32 flags;
+ struct {
+ __u32 offset;
+ __u32 prio;
+ __u64 cookie; /* token to pass back when signaling client */
+ } signal;
+ __u64 datap;
+};
+
+struct vbus_pci_call_desc {
+ __u32 vector;
+ __u32 len;
+ __u64 datap;
+};
+
+struct vbus_pci_fastcall_desc {
+ struct vbus_pci_call_desc call;
+ __u32 result;
+};
+
+struct vbus_pci_regs {
+ struct vbus_pci_call_desc bridgecall;
+ __u8 pad[48];
+};
+
+struct vbus_pci_signals {
+ __u32 eventq;
+ __u32 fastcall;
+ __u32 shmsignal;
+ __u8 pad[20];
+};
+
+struct vbus_pci_eventqreg {
+ __u32 count;
+ __u64 ring;
+ __u64 data;
+};
+
+struct vbus_pci_busreg {
+ __u32 count; /* supporting multiple queues allows for prio, etc */
+ struct vbus_pci_eventqreg eventq[1];
+};
+
+enum vbus_pci_eventid {
+ VBUS_PCI_EVENT_DEVADD,
+ VBUS_PCI_EVENT_DEVDROP,
+ VBUS_PCI_EVENT_SHMSIGNAL,
+ VBUS_PCI_EVENT_SHMCLOSE,
+};
+
+#define VBUS_MAX_DEVTYPE_LEN 128
+
+struct vbus_pci_add_event {
+ __u64 id;
+ char type[VBUS_MAX_DEVTYPE_LEN];
+};
+
+struct vbus_pci_handle_event {
+ __u64 handle;
+};
+
+struct vbus_pci_event {
+ __u32 eventid;
+ union {
+ struct vbus_pci_add_event add;
+ struct vbus_pci_handle_event handle;
+ } data;
+};
+
+#endif /* _LINUX_VBUS_PCI_H */
diff --git a/include/linux/venet.h b/include/linux/venet.h
new file mode 100644
index 000000000000..0578d797c973
--- /dev/null
+++ b/include/linux/venet.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2009 Novell. All Rights Reserved.
+ *
+ * Virtual-Ethernet adapter
+ *
+ * Author:
+ * Gregory Haskins <ghaskins@novell.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _LINUX_VENET_H
+#define _LINUX_VENET_H
+
+#include <linux/types.h>
+
+#define VENET_VERSION 1
+
+#define VENET_TYPE "virtual-ethernet"
+
+#define VENET_QUEUE_RX 0
+#define VENET_QUEUE_TX 1
+
+struct venet_capabilities {
+ __u32 gid;
+ __u32 bits;
+};
+
+#define VENET_CAP_GROUP_SG 0
+#define VENET_CAP_GROUP_EVENTQ 1
+#define VENET_CAP_GROUP_L4RO 2 /* layer-4 reassem offloading */
+
+/* CAPABILITIES-GROUP SG */
+#define VENET_CAP_SG (1 << 0)
+#define VENET_CAP_TSO4 (1 << 1)
+#define VENET_CAP_TSO6 (1 << 2)
+#define VENET_CAP_ECN (1 << 3)
+#define VENET_CAP_UFO (1 << 4)
+#define VENET_CAP_PMTD (1 << 5) /* pre-mapped tx desc */
+
+/* CAPABILITIES-GROUP EVENTQ */
+#define VENET_CAP_EVQ_LINKSTATE (1 << 0)
+#define VENET_CAP_EVQ_TXC (1 << 1) /* tx-complete */
+
+struct venet_iov {
+ __u32 len;
+ __u64 ptr;
+};
+
+#define VENET_SG_FLAG_NEEDS_CSUM (1 << 0)
+#define VENET_SG_FLAG_GSO (1 << 1)
+#define VENET_SG_FLAG_ECN (1 << 2)
+
+struct venet_sg {
+ __u64 cookie;
+ __u32 flags;
+ __u32 len; /* total length of all iovs */
+ struct {
+ __u16 start; /* csum starting position */
+ __u16 offset; /* offset to place csum */
+ } csum;
+ struct {
+#define VENET_GSO_TYPE_TCPV4 0 /* IPv4 TCP (TSO) */
+#define VENET_GSO_TYPE_UDP 1 /* IPv4 UDP (UFO) */
+#define VENET_GSO_TYPE_TCPV6 2 /* IPv6 TCP */
+ __u8 type;
+ __u16 hdrlen;
+ __u16 size;
+ } gso;
+ __u32 count; /* nr of iovs */
+ struct venet_iov iov[1];
+};
+
+struct venet_eventq_query {
+ __u32 flags;
+ __u32 evsize; /* size of each event */
+ __u32 dpid; /* descriptor pool-id */
+ __u32 qid;
+ __u8 pad[16];
+};
+
+#define VENET_EVENT_LINKSTATE 0
+#define VENET_EVENT_TXC 1
+
+struct venet_event_header {
+ __u32 flags;
+ __u32 size;
+ __u32 id;
+};
+
+struct venet_event_linkstate {
+ struct venet_event_header header;
+ __u8 state; /* 0 = down, 1 = up */
+};
+
+struct venet_event_txc {
+ struct venet_event_header header;
+ __u32 txqid;
+ __u64 cookie;
+};
+
+struct venet_l4ro_query {
+ __u32 flags;
+ __u32 dpid; /* descriptor pool-id */
+ __u32 pqid; /* page queue-id */
+ __u8 pad[20];
+};
+
+
+#define VSG_DESC_SIZE(count) (sizeof(struct venet_sg) + \
+ sizeof(struct venet_iov) * ((count) - 1))
+
+#define VENET_FUNC_LINKUP 0
+#define VENET_FUNC_LINKDOWN 1
+#define VENET_FUNC_MACQUERY 2
+#define VENET_FUNC_NEGCAP 3 /* negotiate capabilities */
+#define VENET_FUNC_FLUSHRX 4
+#define VENET_FUNC_PMTDQUERY 5
+#define VENET_FUNC_EVQQUERY 6
+#define VENET_FUNC_L4ROQUERY 7
+
+#endif /* _LINUX_VENET_H */
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 5f6f47044abf..5122b265dde6 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -70,6 +70,7 @@
* Moved from videodev.h
*/
#define VIDEO_MAX_FRAME 32
+#define VIDEO_MAX_PLANES 8
#ifndef __KERNEL__
@@ -157,9 +158,23 @@ enum v4l2_buf_type {
/* Experimental */
V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY = 8,
#endif
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE = 9,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE = 10,
V4L2_BUF_TYPE_PRIVATE = 0x80,
};
+#define V4L2_TYPE_IS_MULTIPLANAR(type) \
+ ((type) == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE \
+ || (type) == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+
+#define V4L2_TYPE_IS_OUTPUT(type) \
+ ((type) == V4L2_BUF_TYPE_VIDEO_OUTPUT \
+ || (type) == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE \
+ || (type) == V4L2_BUF_TYPE_VIDEO_OVERLAY \
+ || (type) == V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY \
+ || (type) == V4L2_BUF_TYPE_VBI_OUTPUT \
+ || (type) == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT)
+
enum v4l2_tuner_type {
V4L2_TUNER_RADIO = 1,
V4L2_TUNER_ANALOG_TV = 2,
@@ -245,6 +260,11 @@ struct v4l2_capability {
#define V4L2_CAP_HW_FREQ_SEEK 0x00000400 /* Can do hardware frequency seek */
#define V4L2_CAP_RDS_OUTPUT 0x00000800 /* Is an RDS encoder */
+/* Is a video capture device that supports multiplanar formats */
+#define V4L2_CAP_VIDEO_CAPTURE_MPLANE 0x00001000
+/* Is a video output device that supports multiplanar formats */
+#define V4L2_CAP_VIDEO_OUTPUT_MPLANE 0x00002000
+
#define V4L2_CAP_TUNER 0x00010000 /* has a tuner */
#define V4L2_CAP_AUDIO 0x00020000 /* has audio support */
#define V4L2_CAP_RADIO 0x00040000 /* is a radio device */
@@ -319,6 +339,13 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_NV16 v4l2_fourcc('N', 'V', '1', '6') /* 16 Y/CbCr 4:2:2 */
#define V4L2_PIX_FMT_NV61 v4l2_fourcc('N', 'V', '6', '1') /* 16 Y/CrCb 4:2:2 */
+/* two non contiguous planes - one Y, one Cr + Cb interleaved */
+#define V4L2_PIX_FMT_NV12M v4l2_fourcc('N', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 */
+#define V4L2_PIX_FMT_NV12MT v4l2_fourcc('T', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 64x32 macroblocks */
+
+/* three non contiguous planes - Y, Cb, Cr */
+#define V4L2_PIX_FMT_YUV420M v4l2_fourcc('Y', 'M', '1', '2') /* 12 YUV420 planar */
+
/* Bayer formats - see http://www.siliconimaging.com/RGB%20Bayer.htm */
#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */
#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */
@@ -517,6 +544,62 @@ struct v4l2_requestbuffers {
__u32 reserved[2];
};
+/**
+ * struct v4l2_plane - plane info for multi-planar buffers
+ * @bytesused: number of bytes occupied by data in the plane (payload)
+ * @length: size of this plane (NOT the payload) in bytes
+ * @mem_offset: when memory in the associated struct v4l2_buffer is
+ * V4L2_MEMORY_MMAP, equals the offset from the start of
+ * the device memory for this plane (or is a "cookie" that
+ * should be passed to mmap() called on the video node)
+ * @userptr: when memory is V4L2_MEMORY_USERPTR, a userspace pointer
+ * pointing to this plane
+ * @data_offset: offset in the plane to the start of data; usually 0,
+ * unless there is a header in front of the data
+ *
+ * Multi-planar buffers consist of one or more planes, e.g. an YCbCr buffer
+ * with two planes can have one plane for Y, and another for interleaved CbCr
+ * components. Each plane can reside in a separate memory buffer, or even in
+ * a completely separate memory node (e.g. in embedded devices).
+ */
+struct v4l2_plane {
+ __u32 bytesused;
+ __u32 length;
+ union {
+ __u32 mem_offset;
+ unsigned long userptr;
+ } m;
+ __u32 data_offset;
+ __u32 reserved[11];
+};
+
+/**
+ * struct v4l2_buffer - video buffer info
+ * @index: id number of the buffer
+ * @type: buffer type (type == *_MPLANE for multiplanar buffers)
+ * @bytesused: number of bytes occupied by data in the buffer (payload);
+ * unused (set to 0) for multiplanar buffers
+ * @flags: buffer informational flags
+ * @field: field order of the image in the buffer
+ * @timestamp: frame timestamp
+ * @timecode: frame timecode
+ * @sequence: sequence count of this frame
+ * @memory: the method, in which the actual video data is passed
+ * @offset: for non-multiplanar buffers with memory == V4L2_MEMORY_MMAP;
+ * offset from the start of the device memory for this plane,
+ * (or a "cookie" that should be passed to mmap() as offset)
+ * @userptr: for non-multiplanar buffers with memory == V4L2_MEMORY_USERPTR;
+ * a userspace pointer pointing to this buffer
+ * @planes: for multiplanar buffers; userspace pointer to the array of plane
+ * info structs for this buffer
+ * @length: size in bytes of the buffer (NOT its payload) for single-plane
+ * buffers (when type != *_MPLANE); number of elements in the
+ * planes array for multi-plane buffers
+ * @input: input number from which the video data has has been captured
+ *
+ * Contains data exchanged by application and driver using one of the Streaming
+ * I/O methods.
+ */
struct v4l2_buffer {
__u32 index;
enum v4l2_buf_type type;
@@ -532,6 +615,7 @@ struct v4l2_buffer {
union {
__u32 offset;
unsigned long userptr;
+ struct v4l2_plane *planes;
} m;
__u32 length;
__u32 input;
@@ -1622,12 +1706,56 @@ struct v4l2_mpeg_vbi_fmt_ivtv {
* A G G R E G A T E S T R U C T U R E S
*/
-/* Stream data format
+/**
+ * struct v4l2_plane_pix_format - additional, per-plane format definition
+ * @sizeimage: maximum size in bytes required for data, for which
+ * this plane will be used
+ * @bytesperline: distance in bytes between the leftmost pixels in two
+ * adjacent lines
+ */
+struct v4l2_plane_pix_format {
+ __u32 sizeimage;
+ __u16 bytesperline;
+ __u16 reserved[7];
+} __attribute__ ((packed));
+
+/**
+ * struct v4l2_pix_format_mplane - multiplanar format definition
+ * @width: image width in pixels
+ * @height: image height in pixels
+ * @pixelformat: little endian four character code (fourcc)
+ * @field: field order (for interlaced video)
+ * @colorspace: supplemental to pixelformat
+ * @plane_fmt: per-plane information
+ * @num_planes: number of planes for this format
+ */
+struct v4l2_pix_format_mplane {
+ __u32 width;
+ __u32 height;
+ __u32 pixelformat;
+ enum v4l2_field field;
+ enum v4l2_colorspace colorspace;
+
+ struct v4l2_plane_pix_format plane_fmt[VIDEO_MAX_PLANES];
+ __u8 num_planes;
+ __u8 reserved[11];
+} __attribute__ ((packed));
+
+/**
+ * struct v4l2_format - stream data format
+ * @type: type of the data stream
+ * @pix: definition of an image format
+ * @pix_mp: definition of a multiplanar image format
+ * @win: definition of an overlaid image
+ * @vbi: raw VBI capture or output parameters
+ * @sliced: sliced VBI capture or output parameters
+ * @raw_data: placeholder for future extensions and custom formats
*/
struct v4l2_format {
enum v4l2_buf_type type;
union {
struct v4l2_pix_format pix; /* V4L2_BUF_TYPE_VIDEO_CAPTURE */
+ struct v4l2_pix_format_mplane pix_mp; /* V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE */
struct v4l2_window win; /* V4L2_BUF_TYPE_VIDEO_OVERLAY */
struct v4l2_vbi_format vbi; /* V4L2_BUF_TYPE_VBI_CAPTURE */
struct v4l2_sliced_vbi_format sliced; /* V4L2_BUF_TYPE_SLICED_VBI_CAPTURE */
@@ -1635,7 +1763,6 @@ struct v4l2_format {
} fmt;
};
-
/* Stream type-dependent parameters
*/
struct v4l2_streamparm {
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index f7998a3bf020..f584aba78ca9 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -286,11 +286,15 @@ enum {
* any specific CPU, not concurrency managed, and all queued works are
* executed immediately as long as max_active limit is not reached and
* resources are available.
+ *
+ * system_freezable_wq is equivalent to system_wq except that it's
+ * freezable.
*/
extern struct workqueue_struct *system_wq;
extern struct workqueue_struct *system_long_wq;
extern struct workqueue_struct *system_nrt_wq;
extern struct workqueue_struct *system_unbound_wq;
+extern struct workqueue_struct *system_freezable_wq;
extern struct workqueue_struct *
__alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index e6131ef98d8f..6050783005bd 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -42,11 +42,13 @@
#define XATTR_SMACK_IPOUT "SMACK64IPOUT"
#define XATTR_SMACK_EXEC "SMACK64EXEC"
#define XATTR_SMACK_TRANSMUTE "SMACK64TRANSMUTE"
+#define XATTR_SMACK_MMAP "SMACK64MMAP"
#define XATTR_NAME_SMACK XATTR_SECURITY_PREFIX XATTR_SMACK_SUFFIX
#define XATTR_NAME_SMACKIPIN XATTR_SECURITY_PREFIX XATTR_SMACK_IPIN
#define XATTR_NAME_SMACKIPOUT XATTR_SECURITY_PREFIX XATTR_SMACK_IPOUT
#define XATTR_NAME_SMACKEXEC XATTR_SECURITY_PREFIX XATTR_SMACK_EXEC
#define XATTR_NAME_SMACKTRANSMUTE XATTR_SECURITY_PREFIX XATTR_SMACK_TRANSMUTE
+#define XATTR_NAME_SMACKMMAP XATTR_SECURITY_PREFIX XATTR_SMACK_MMAP
#define XATTR_CAPS_SUFFIX "capability"
#define XATTR_NAME_CAPS XATTR_SECURITY_PREFIX XATTR_CAPS_SUFFIX
diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
index 930fdd2de79c..b93d6f598085 100644
--- a/include/linux/xfrm.h
+++ b/include/linux/xfrm.h
@@ -350,6 +350,7 @@ struct xfrm_usersa_info {
#define XFRM_STATE_WILDRECV 8
#define XFRM_STATE_ICMP 16
#define XFRM_STATE_AF_UNSPEC 32
+#define XFRM_STATE_ALIGN4 64
};
struct xfrm_usersa_id {
diff --git a/include/media/noon010pc30.h b/include/media/noon010pc30.h
new file mode 100644
index 000000000000..58eafee36b30
--- /dev/null
+++ b/include/media/noon010pc30.h
@@ -0,0 +1,28 @@
+/*
+ * Driver header for NOON010PC30L camera sensor chip.
+ *
+ * Copyright (c) 2010 Samsung Electronics, Co. Ltd
+ * Contact: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef NOON010PC30_H
+#define NOON010PC30_H
+
+/**
+ * @clk_rate: the clock frequency in Hz
+ * @gpio_nreset: GPIO driving nRESET pin
+ * @gpio_nstby: GPIO driving nSTBY pin
+ */
+
+struct noon010pc30_platform_data {
+ unsigned long clk_rate;
+ int gpio_nreset;
+ int gpio_nstby;
+};
+
+#endif /* NOON010PC30_H */
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index ee9e2f747c76..461711741d67 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -131,6 +131,7 @@ void rc_map_init(void);
#define RC_MAP_REAL_AUDIO_220_32_KEYS "rc-real-audio-220-32-keys"
#define RC_MAP_STREAMZAP "rc-streamzap"
#define RC_MAP_TBS_NEC "rc-tbs-nec"
+#define RC_MAP_TECHNISAT_USB2 "rc-technisat-usb2"
#define RC_MAP_TERRATEC_CINERGY_XS "rc-terratec-cinergy-xs"
#define RC_MAP_TERRATEC_SLIM "rc-terratec-slim"
#define RC_MAP_TEVII_NEC "rc-tevii-nec"
diff --git a/include/media/s3c_fimc.h b/include/media/s5p_fimc.h
index ca1b6738e4a4..0d457cac8f7d 100644
--- a/include/media/s3c_fimc.h
+++ b/include/media/s5p_fimc.h
@@ -9,8 +9,8 @@
* published by the Free Software Foundation.
*/
-#ifndef S3C_FIMC_H_
-#define S3C_FIMC_H_
+#ifndef S5P_FIMC_H_
+#define S5P_FIMC_H_
enum cam_bus_type {
FIMC_ITU_601 = 1,
@@ -27,22 +27,22 @@ enum cam_bus_type {
struct i2c_board_info;
/**
- * struct s3c_fimc_isp_info - image sensor information required for host
+ * struct s5p_fimc_isp_info - image sensor information required for host
* interace configuration.
*
* @board_info: pointer to I2C subdevice's board info
+ * @clk_frequency: frequency of the clock the host interface provides to sensor
* @bus_type: determines bus type, MIPI, ITU-R BT.601 etc.
* @i2c_bus_num: i2c control bus id the sensor is attached to
* @mux_id: FIMC camera interface multiplexer index (separate for MIPI and ITU)
- * @bus_width: camera data bus width in bits
* @flags: flags defining bus signals polarity inversion (High by default)
*/
-struct s3c_fimc_isp_info {
+struct s5p_fimc_isp_info {
struct i2c_board_info *board_info;
+ unsigned long clk_frequency;
enum cam_bus_type bus_type;
u16 i2c_bus_num;
u16 mux_id;
- u16 bus_width;
u16 flags;
};
@@ -50,11 +50,11 @@ struct s3c_fimc_isp_info {
#define FIMC_MAX_CAMIF_CLIENTS 2
/**
- * struct s3c_platform_fimc - camera host interface platform data
+ * struct s5p_platform_fimc - camera host interface platform data
*
* @isp_info: properties of camera sensor required for host interface setup
*/
-struct s3c_platform_fimc {
- struct s3c_fimc_isp_info *isp_info[FIMC_MAX_CAMIF_CLIENTS];
+struct s5p_platform_fimc {
+ struct s5p_fimc_isp_info *isp_info[FIMC_MAX_CAMIF_CLIENTS];
};
-#endif /* S3C_FIMC_H_ */
+#endif /* S5P_FIMC_H_ */
diff --git a/include/media/tuner.h b/include/media/tuner.h
index 51811eac46f1..5eec5292d01e 100644
--- a/include/media/tuner.h
+++ b/include/media/tuner.h
@@ -131,6 +131,7 @@
#define TUNER_NXP_TDA18271 83
#define TUNER_SONY_BTF_PXN01Z 84
#define TUNER_PHILIPS_FQ1236_MK5 85 /* NTSC, TDA9885, no FM radio */
+#define TUNER_TENA_TNF_5337 86
/* tv card specific */
#define TDA9887_PRESENT (1<<0)
diff --git a/include/media/v4l2-chip-ident.h b/include/media/v4l2-chip-ident.h
index 44fe44ec9ea7..ff4a52ca881b 100644
--- a/include/media/v4l2-chip-ident.h
+++ b/include/media/v4l2-chip-ident.h
@@ -209,6 +209,9 @@ enum {
/* module sn9c20x: just ident 10000 */
V4L2_IDENT_SN9C20X = 10000,
+ /* Siliconfile sensors: reserved range 10100 - 10199 */
+ V4L2_IDENT_NOON010PC30 = 10100,
+
/* module cx231xx and cx25840 */
V4L2_IDENT_CX2310X_AV = 23099, /* Integrated A/V decoder; not in '100 */
V4L2_IDENT_CX23100 = 23100,
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index 67df37542c68..1572c7f25777 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -37,6 +37,10 @@ struct v4l2_ioctl_ops {
struct v4l2_fmtdesc *f);
int (*vidioc_enum_fmt_vid_out) (struct file *file, void *fh,
struct v4l2_fmtdesc *f);
+ int (*vidioc_enum_fmt_vid_cap_mplane)(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f);
+ int (*vidioc_enum_fmt_vid_out_mplane)(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f);
int (*vidioc_enum_fmt_type_private)(struct file *file, void *fh,
struct v4l2_fmtdesc *f);
@@ -57,6 +61,10 @@ struct v4l2_ioctl_ops {
struct v4l2_format *f);
int (*vidioc_g_fmt_sliced_vbi_out)(struct file *file, void *fh,
struct v4l2_format *f);
+ int (*vidioc_g_fmt_vid_cap_mplane)(struct file *file, void *fh,
+ struct v4l2_format *f);
+ int (*vidioc_g_fmt_vid_out_mplane)(struct file *file, void *fh,
+ struct v4l2_format *f);
int (*vidioc_g_fmt_type_private)(struct file *file, void *fh,
struct v4l2_format *f);
@@ -77,6 +85,10 @@ struct v4l2_ioctl_ops {
struct v4l2_format *f);
int (*vidioc_s_fmt_sliced_vbi_out)(struct file *file, void *fh,
struct v4l2_format *f);
+ int (*vidioc_s_fmt_vid_cap_mplane)(struct file *file, void *fh,
+ struct v4l2_format *f);
+ int (*vidioc_s_fmt_vid_out_mplane)(struct file *file, void *fh,
+ struct v4l2_format *f);
int (*vidioc_s_fmt_type_private)(struct file *file, void *fh,
struct v4l2_format *f);
@@ -97,6 +109,10 @@ struct v4l2_ioctl_ops {
struct v4l2_format *f);
int (*vidioc_try_fmt_sliced_vbi_out)(struct file *file, void *fh,
struct v4l2_format *f);
+ int (*vidioc_try_fmt_vid_cap_mplane)(struct file *file, void *fh,
+ struct v4l2_format *f);
+ int (*vidioc_try_fmt_vid_out_mplane)(struct file *file, void *fh,
+ struct v4l2_format *f);
int (*vidioc_try_fmt_type_private)(struct file *file, void *fh,
struct v4l2_format *f);
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 8d149f1c58d0..bf5eaaf3bd97 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -17,7 +17,7 @@
#ifndef _MEDIA_V4L2_MEM2MEM_H
#define _MEDIA_V4L2_MEM2MEM_H
-#include <media/videobuf-core.h>
+#include <media/videobuf2-core.h>
/**
* struct v4l2_m2m_ops - mem-to-mem device driver callbacks
@@ -45,17 +45,20 @@ struct v4l2_m2m_ops {
void (*device_run)(void *priv);
int (*job_ready)(void *priv);
void (*job_abort)(void *priv);
+ void (*lock)(void *priv);
+ void (*unlock)(void *priv);
};
struct v4l2_m2m_dev;
struct v4l2_m2m_queue_ctx {
/* private: internal use only */
- struct videobuf_queue q;
+ struct vb2_queue q;
/* Queue for buffers ready to be processed as soon as this
* instance receives access to the device */
struct list_head rdy_queue;
+ spinlock_t rdy_spinlock;
u8 num_rdy;
};
@@ -72,19 +75,31 @@ struct v4l2_m2m_ctx {
/* For device job queue */
struct list_head queue;
unsigned long job_flags;
+ wait_queue_head_t finished;
/* Instance private data */
void *priv;
};
+struct v4l2_m2m_buffer {
+ struct vb2_buffer vb;
+ struct list_head list;
+};
+
void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev);
-struct videobuf_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
+struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
enum v4l2_buf_type type);
void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
struct v4l2_m2m_ctx *m2m_ctx);
+static inline void
+v4l2_m2m_buf_done(struct vb2_buffer *buf, enum vb2_buffer_state state)
+{
+ vb2_buffer_done(buf, state);
+}
+
int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_requestbuffers *reqbufs);
@@ -110,13 +125,13 @@ int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_m2m_dev *v4l2_m2m_init(struct v4l2_m2m_ops *m2m_ops);
void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev);
-struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(void *priv, struct v4l2_m2m_dev *m2m_dev,
- void (*vq_init)(void *priv, struct videobuf_queue *,
- enum v4l2_buf_type));
+struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
+ void *drv_priv,
+ int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq));
+
void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx);
-void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct videobuf_queue *vq,
- struct videobuf_buffer *vb);
+void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb);
/**
* v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for
@@ -138,7 +153,7 @@ unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
return m2m_ctx->out_q_ctx.num_rdy;
}
-void *v4l2_m2m_next_buf(struct v4l2_m2m_ctx *m2m_ctx, enum v4l2_buf_type type);
+void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx);
/**
* v4l2_m2m_next_src_buf() - return next source buffer from the list of ready
@@ -146,7 +161,7 @@ void *v4l2_m2m_next_buf(struct v4l2_m2m_ctx *m2m_ctx, enum v4l2_buf_type type);
*/
static inline void *v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
{
- return v4l2_m2m_next_buf(m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ return v4l2_m2m_next_buf(&m2m_ctx->out_q_ctx);
}
/**
@@ -155,29 +170,28 @@ static inline void *v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
*/
static inline void *v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
{
- return v4l2_m2m_next_buf(m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ return v4l2_m2m_next_buf(&m2m_ctx->cap_q_ctx);
}
/**
- * v4l2_m2m_get_src_vq() - return videobuf_queue for source buffers
+ * v4l2_m2m_get_src_vq() - return vb2_queue for source buffers
*/
static inline
-struct videobuf_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx)
+struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx)
{
- return v4l2_m2m_get_vq(m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ return &m2m_ctx->out_q_ctx.q;
}
/**
- * v4l2_m2m_get_dst_vq() - return videobuf_queue for destination buffers
+ * v4l2_m2m_get_dst_vq() - return vb2_queue for destination buffers
*/
static inline
-struct videobuf_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx)
+struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx)
{
- return v4l2_m2m_get_vq(m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ return &m2m_ctx->cap_q_ctx.q;
}
-void *v4l2_m2m_buf_remove(struct v4l2_m2m_ctx *m2m_ctx,
- enum v4l2_buf_type type);
+void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx);
/**
* v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready
@@ -185,7 +199,7 @@ void *v4l2_m2m_buf_remove(struct v4l2_m2m_ctx *m2m_ctx,
*/
static inline void *v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
{
- return v4l2_m2m_buf_remove(m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ return v4l2_m2m_buf_remove(&m2m_ctx->out_q_ctx);
}
/**
@@ -194,7 +208,7 @@ static inline void *v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
*/
static inline void *v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
{
- return v4l2_m2m_buf_remove(m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ return v4l2_m2m_buf_remove(&m2m_ctx->cap_q_ctx);
}
#endif /* _MEDIA_V4L2_MEM2MEM_H */
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
new file mode 100644
index 000000000000..0d71fc5efc46
--- /dev/null
+++ b/include/media/videobuf2-core.h
@@ -0,0 +1,380 @@
+/*
+ * videobuf2-core.h - V4L2 driver helper framework
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Pawel Osciak <p.osciak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+#ifndef _MEDIA_VIDEOBUF2_CORE_H
+#define _MEDIA_VIDEOBUF2_CORE_H
+
+#include <linux/mm_types.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/videodev2.h>
+
+struct vb2_alloc_ctx;
+struct vb2_fileio_data;
+
+/**
+ * struct vb2_mem_ops - memory handling/memory allocator operations
+ * @alloc: allocate video memory and, optionally, allocator private data,
+ * return NULL on failure or a pointer to allocator private,
+ * per-buffer data on success; the returned private structure
+ * will then be passed as buf_priv argument to other ops in this
+ * structure
+ * @put: inform the allocator that the buffer will no longer be used;
+ * usually will result in the allocator freeing the buffer (if
+ * no other users of this buffer are present); the buf_priv
+ * argument is the allocator private per-buffer structure
+ * previously returned from the alloc callback
+ * @get_userptr: acquire userspace memory for a hardware operation; used for
+ * USERPTR memory types; vaddr is the address passed to the
+ * videobuf layer when queuing a video buffer of USERPTR type;
+ * should return an allocator private per-buffer structure
+ * associated with the buffer on success, NULL on failure;
+ * the returned private structure will then be passed as buf_priv
+ * argument to other ops in this structure
+ * @put_userptr: inform the allocator that a USERPTR buffer will no longer
+ * be used
+ * @vaddr: return a kernel virtual address to a given memory buffer
+ * associated with the passed private structure or NULL if no
+ * such mapping exists
+ * @cookie: return allocator specific cookie for a given memory buffer
+ * associated with the passed private structure or NULL if not
+ * available
+ * @num_users: return the current number of users of a memory buffer;
+ * return 1 if the videobuf layer (or actually the driver using
+ * it) is the only user
+ * @mmap: setup a userspace mapping for a given memory buffer under
+ * the provided virtual memory region
+ *
+ * Required ops for USERPTR types: get_userptr, put_userptr.
+ * Required ops for MMAP types: alloc, put, num_users, mmap.
+ * Required ops for read/write access types: alloc, put, num_users, vaddr
+ */
+struct vb2_mem_ops {
+ void *(*alloc)(void *alloc_ctx, unsigned long size);
+ void (*put)(void *buf_priv);
+
+ void *(*get_userptr)(void *alloc_ctx, unsigned long vaddr,
+ unsigned long size, int write);
+ void (*put_userptr)(void *buf_priv);
+
+ void *(*vaddr)(void *buf_priv);
+ void *(*cookie)(void *buf_priv);
+
+ unsigned int (*num_users)(void *buf_priv);
+
+ int (*mmap)(void *buf_priv, struct vm_area_struct *vma);
+};
+
+struct vb2_plane {
+ void *mem_priv;
+ int mapped:1;
+};
+
+/**
+ * enum vb2_io_modes - queue access methods
+ * @VB2_MMAP: driver supports MMAP with streaming API
+ * @VB2_USERPTR: driver supports USERPTR with streaming API
+ * @VB2_READ: driver supports read() style access
+ * @VB2_WRITE: driver supports write() style access
+ */
+enum vb2_io_modes {
+ VB2_MMAP = (1 << 0),
+ VB2_USERPTR = (1 << 1),
+ VB2_READ = (1 << 2),
+ VB2_WRITE = (1 << 3),
+};
+
+/**
+ * enum vb2_fileio_flags - flags for selecting a mode of the file io emulator,
+ * by default the 'streaming' style is used by the file io emulator
+ * @VB2_FILEIO_READ_ONCE: report EOF after reading the first buffer
+ * @VB2_FILEIO_WRITE_IMMEDIATELY: queue buffer after each write() call
+ */
+enum vb2_fileio_flags {
+ VB2_FILEIO_READ_ONCE = (1 << 0),
+ VB2_FILEIO_WRITE_IMMEDIATELY = (1 << 1),
+};
+
+/**
+ * enum vb2_buffer_state - current video buffer state
+ * @VB2_BUF_STATE_DEQUEUED: buffer under userspace control
+ * @VB2_BUF_STATE_QUEUED: buffer queued in videobuf, but not in driver
+ * @VB2_BUF_STATE_ACTIVE: buffer queued in driver and possibly used
+ * in a hardware operation
+ * @VB2_BUF_STATE_DONE: buffer returned from driver to videobuf, but
+ * not yet dequeued to userspace
+ * @VB2_BUF_STATE_ERROR: same as above, but the operation on the buffer
+ * has ended with an error, which will be reported
+ * to the userspace when it is dequeued
+ */
+enum vb2_buffer_state {
+ VB2_BUF_STATE_DEQUEUED,
+ VB2_BUF_STATE_QUEUED,
+ VB2_BUF_STATE_ACTIVE,
+ VB2_BUF_STATE_DONE,
+ VB2_BUF_STATE_ERROR,
+};
+
+struct vb2_queue;
+
+/**
+ * struct vb2_buffer - represents a video buffer
+ * @v4l2_buf: struct v4l2_buffer associated with this buffer; can
+ * be read by the driver and relevant entries can be
+ * changed by the driver in case of CAPTURE types
+ * (such as timestamp)
+ * @v4l2_planes: struct v4l2_planes associated with this buffer; can
+ * be read by the driver and relevant entries can be
+ * changed by the driver in case of CAPTURE types
+ * (such as bytesused); NOTE that even for single-planar
+ * types, the v4l2_planes[0] struct should be used
+ * instead of v4l2_buf for filling bytesused - drivers
+ * should use the vb2_set_plane_payload() function for that
+ * @vb2_queue: the queue to which this driver belongs
+ * @num_planes: number of planes in the buffer
+ * on an internal driver queue
+ * @state: current buffer state; do not change
+ * @queued_entry: entry on the queued buffers list, which holds all
+ * buffers queued from userspace
+ * @done_entry: entry on the list that stores all buffers ready to
+ * be dequeued to userspace
+ * @planes: private per-plane information; do not change
+ * @num_planes_mapped: number of mapped planes; do not change
+ */
+struct vb2_buffer {
+ struct v4l2_buffer v4l2_buf;
+ struct v4l2_plane v4l2_planes[VIDEO_MAX_PLANES];
+
+ struct vb2_queue *vb2_queue;
+
+ unsigned int num_planes;
+
+/* Private: internal use only */
+ enum vb2_buffer_state state;
+
+ struct list_head queued_entry;
+ struct list_head done_entry;
+
+ struct vb2_plane planes[VIDEO_MAX_PLANES];
+ unsigned int num_planes_mapped;
+};
+
+/**
+ * struct vb2_ops - driver-specific callbacks
+ *
+ * @queue_setup: called from a VIDIOC_REQBUFS handler, before
+ * memory allocation; driver should return the required
+ * number of buffers in num_buffers, the required number
+ * of planes per buffer in num_planes; the size of each
+ * plane should be set in the sizes[] array and optional
+ * per-plane allocator specific context in alloc_ctxs[]
+ * array
+ * @wait_prepare: release any locks taken while calling vb2 functions;
+ * it is called before an ioctl needs to wait for a new
+ * buffer to arrive; required to avoid a deadlock in
+ * blocking access type
+ * @wait_finish: reacquire all locks released in the previous callback;
+ * required to continue operation after sleeping while
+ * waiting for a new buffer to arrive
+ * @buf_init: called once after allocating a buffer (in MMAP case)
+ * or after acquiring a new USERPTR buffer; drivers may
+ * perform additional buffer-related initialization;
+ * initialization failure (return != 0) will prevent
+ * queue setup from completing successfully; optional
+ * @buf_prepare: called every time the buffer is queued from userspace;
+ * drivers may perform any initialization required before
+ * each hardware operation in this callback;
+ * if an error is returned, the buffer will not be queued
+ * in driver; optional
+ * @buf_finish: called before every dequeue of the buffer back to
+ * userspace; drivers may perform any operations required
+ * before userspace accesses the buffer; optional
+ * @buf_cleanup: called once before the buffer is freed; drivers may
+ * perform any additional cleanup; optional
+ * @start_streaming: called once before entering 'streaming' state; enables
+ * driver to receive buffers over buf_queue() callback
+ * @stop_streaming: called when 'streaming' state must be disabled; driver
+ * should stop any DMA transactions or wait until they
+ * finish and give back all buffers it got from buf_queue()
+ * callback; may use vb2_wait_for_all_buffers() function
+ * @buf_queue: passes buffer vb to the driver; driver may start
+ * hardware operation on this buffer; driver should give
+ * the buffer back by calling vb2_buffer_done() function
+ */
+struct vb2_ops {
+ int (*queue_setup)(struct vb2_queue *q, unsigned int *num_buffers,
+ unsigned int *num_planes, unsigned long sizes[],
+ void *alloc_ctxs[]);
+
+ void (*wait_prepare)(struct vb2_queue *q);
+ void (*wait_finish)(struct vb2_queue *q);
+
+ int (*buf_init)(struct vb2_buffer *vb);
+ int (*buf_prepare)(struct vb2_buffer *vb);
+ int (*buf_finish)(struct vb2_buffer *vb);
+ void (*buf_cleanup)(struct vb2_buffer *vb);
+
+ int (*start_streaming)(struct vb2_queue *q);
+ int (*stop_streaming)(struct vb2_queue *q);
+
+ void (*buf_queue)(struct vb2_buffer *vb);
+};
+
+/**
+ * struct vb2_queue - a videobuf queue
+ *
+ * @type: queue type (see V4L2_BUF_TYPE_* in linux/videodev2.h
+ * @io_modes: supported io methods (see vb2_io_modes enum)
+ * @io_flags: additional io flags (see vb2_fileio_flags enum)
+ * @ops: driver-specific callbacks
+ * @mem_ops: memory allocator specific callbacks
+ * @drv_priv: driver private data
+ * @buf_struct_size: size of the driver-specific buffer structure;
+ * "0" indicates the driver doesn't want to use a custom buffer
+ * structure type, so sizeof(struct vb2_buffer) will is used
+ *
+ * @memory: current memory type used
+ * @bufs: videobuf buffer structures
+ * @num_buffers: number of allocated/used buffers
+ * @queued_list: list of buffers currently queued from userspace
+ * @queued_count: number of buffers owned by the driver
+ * @done_list: list of buffers ready to be dequeued to userspace
+ * @done_lock: lock to protect done_list list
+ * @done_wq: waitqueue for processes waiting for buffers ready to be dequeued
+ * @alloc_ctx: memory type/allocator-specific contexts for each plane
+ * @streaming: current streaming state
+ * @fileio: file io emulator internal data, used only if emulator is active
+ */
+struct vb2_queue {
+ enum v4l2_buf_type type;
+ unsigned int io_modes;
+ unsigned int io_flags;
+
+ const struct vb2_ops *ops;
+ const struct vb2_mem_ops *mem_ops;
+ void *drv_priv;
+ unsigned int buf_struct_size;
+
+/* private: internal use only */
+ enum v4l2_memory memory;
+ struct vb2_buffer *bufs[VIDEO_MAX_FRAME];
+ unsigned int num_buffers;
+
+ struct list_head queued_list;
+
+ atomic_t queued_count;
+ struct list_head done_list;
+ spinlock_t done_lock;
+ wait_queue_head_t done_wq;
+
+ void *alloc_ctx[VIDEO_MAX_PLANES];
+
+ unsigned int streaming:1;
+
+ struct vb2_fileio_data *fileio;
+};
+
+void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no);
+void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no);
+
+void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state);
+int vb2_wait_for_all_buffers(struct vb2_queue *q);
+
+int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b);
+int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req);
+
+int vb2_queue_init(struct vb2_queue *q);
+
+void vb2_queue_release(struct vb2_queue *q);
+
+int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b);
+int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking);
+
+int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type);
+int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type);
+
+int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma);
+unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait);
+size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
+ loff_t *ppos, int nonblock);
+size_t vb2_write(struct vb2_queue *q, char __user *data, size_t count,
+ loff_t *ppos, int nonblock);
+
+/**
+ * vb2_is_streaming() - return streaming status of the queue
+ * @q: videobuf queue
+ */
+static inline bool vb2_is_streaming(struct vb2_queue *q)
+{
+ return q->streaming;
+}
+
+/**
+ * vb2_is_busy() - return busy status of the queue
+ * @q: videobuf queue
+ *
+ * This function checks if queue has any buffers allocated.
+ */
+static inline bool vb2_is_busy(struct vb2_queue *q)
+{
+ return (q->num_buffers > 0);
+}
+
+/**
+ * vb2_get_drv_priv() - return driver private data associated with the queue
+ * @q: videobuf queue
+ */
+static inline void *vb2_get_drv_priv(struct vb2_queue *q)
+{
+ return q->drv_priv;
+}
+
+/**
+ * vb2_set_plane_payload() - set bytesused for the plane plane_no
+ * @vb: buffer for which plane payload should be set
+ * @plane_no: plane number for which payload should be set
+ * @size: payload in bytes
+ */
+static inline void vb2_set_plane_payload(struct vb2_buffer *vb,
+ unsigned int plane_no, unsigned long size)
+{
+ if (plane_no < vb->num_planes)
+ vb->v4l2_planes[plane_no].bytesused = size;
+}
+
+/**
+ * vb2_get_plane_payload() - set bytesused for the plane plane_no
+ * @vb: buffer for which plane payload should be set
+ * @plane_no: plane number for which payload should be set
+ * @size: payload in bytes
+ */
+static inline unsigned long vb2_get_plane_payload(struct vb2_buffer *vb,
+ unsigned int plane_no)
+{
+ if (plane_no < vb->num_planes)
+ return vb->v4l2_planes[plane_no].bytesused;
+ return 0;
+}
+
+/**
+ * vb2_plane_size() - return plane size in bytes
+ * @vb: buffer for which plane size should be returned
+ * @plane_no: plane number for which size should be returned
+ */
+static inline unsigned long
+vb2_plane_size(struct vb2_buffer *vb, unsigned int plane_no)
+{
+ if (plane_no < vb->num_planes)
+ return vb->v4l2_planes[plane_no].length;
+ return 0;
+}
+
+#endif /* _MEDIA_VIDEOBUF2_CORE_H */
diff --git a/include/media/videobuf2-dma-contig.h b/include/media/videobuf2-dma-contig.h
new file mode 100644
index 000000000000..fb7ca849d993
--- /dev/null
+++ b/include/media/videobuf2-dma-contig.h
@@ -0,0 +1,29 @@
+/*
+ * videobuf2-dma-coherent.h - DMA coherent memory allocator for videobuf2
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Pawel Osciak <p.osciak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _MEDIA_VIDEOBUF2_DMA_COHERENT_H
+#define _MEDIA_VIDEOBUF2_DMA_COHERENT_H
+
+#include <media/videobuf2-core.h>
+
+static inline unsigned long vb2_dma_contig_plane_paddr(
+ struct vb2_buffer *vb, unsigned int plane_no)
+{
+ return (unsigned long)vb2_plane_cookie(vb, plane_no);
+}
+
+void *vb2_dma_contig_init_ctx(struct device *dev);
+void vb2_dma_contig_cleanup_ctx(void *alloc_ctx);
+
+extern const struct vb2_mem_ops vb2_dma_contig_memops;
+
+#endif
diff --git a/include/media/videobuf2-dma-sg.h b/include/media/videobuf2-dma-sg.h
new file mode 100644
index 000000000000..0038526b8ef7
--- /dev/null
+++ b/include/media/videobuf2-dma-sg.h
@@ -0,0 +1,32 @@
+/*
+ * videobuf2-dma-sg.h - DMA scatter/gather memory allocator for videobuf2
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _MEDIA_VIDEOBUF2_DMA_SG_H
+#define _MEDIA_VIDEOBUF2_DMA_SG_H
+
+#include <media/videobuf2-core.h>
+
+struct vb2_dma_sg_desc {
+ unsigned long size;
+ unsigned int num_pages;
+ struct scatterlist *sglist;
+};
+
+static inline struct vb2_dma_sg_desc *vb2_dma_sg_plane_desc(
+ struct vb2_buffer *vb, unsigned int plane_no)
+{
+ return (struct vb2_dma_sg_desc *)vb2_plane_cookie(vb, plane_no);
+}
+
+extern const struct vb2_mem_ops vb2_dma_sg_memops;
+
+#endif
diff --git a/include/media/videobuf2-memops.h b/include/media/videobuf2-memops.h
new file mode 100644
index 000000000000..fee17033a728
--- /dev/null
+++ b/include/media/videobuf2-memops.h
@@ -0,0 +1,45 @@
+/*
+ * videobuf2-memops.h - generic memory handling routines for videobuf2
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Pawel Osciak <p.osciak@samsung.com>
+ * Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _MEDIA_VIDEOBUF2_MEMOPS_H
+#define _MEDIA_VIDEOBUF2_MEMOPS_H
+
+#include <media/videobuf2-core.h>
+
+/**
+ * vb2_vmarea_handler - common vma refcount tracking handler
+ * @refcount: pointer to refcount entry in the buffer
+ * @put: callback to function that decreases buffer refcount
+ * @arg: argument for @put callback
+ */
+struct vb2_vmarea_handler {
+ atomic_t *refcount;
+ void (*put)(void *arg);
+ void *arg;
+};
+
+extern const struct vm_operations_struct vb2_common_vm_ops;
+
+int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
+ struct vm_area_struct **res_vma, dma_addr_t *res_pa);
+
+int vb2_mmap_pfn_range(struct vm_area_struct *vma, unsigned long paddr,
+ unsigned long size,
+ const struct vm_operations_struct *vm_ops,
+ void *priv);
+
+struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma);
+void vb2_put_vma(struct vm_area_struct *vma);
+
+
+#endif
diff --git a/include/media/videobuf2-vmalloc.h b/include/media/videobuf2-vmalloc.h
new file mode 100644
index 000000000000..a76b8afaa31e
--- /dev/null
+++ b/include/media/videobuf2-vmalloc.h
@@ -0,0 +1,20 @@
+/*
+ * videobuf2-vmalloc.h - vmalloc memory allocator for videobuf2
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Pawel Osciak <p.osciak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _MEDIA_VIDEOBUF2_VMALLOC_H
+#define _MEDIA_VIDEOBUF2_VMALLOC_H
+
+#include <media/videobuf2-core.h>
+
+extern const struct vb2_mem_ops vb2_vmalloc_memops;
+
+#endif
diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h
index 071fd7a8d781..6b75a6971346 100644
--- a/include/net/9p/9p.h
+++ b/include/net/9p/9p.h
@@ -139,6 +139,8 @@ do { \
*/
enum p9_msg_t {
+ P9_TSYNCFS = 0,
+ P9_RSYNCFS,
P9_TLERROR = 6,
P9_RLERROR,
P9_TSTATFS = 8,
@@ -688,7 +690,11 @@ struct p9_rwstat {
* @id: protocol operating identifier of type &p9_msg_t
* @tag: transaction id of the request
* @offset: used by marshalling routines to track currentposition in buffer
- * @capacity: used by marshalling routines to track total capacity
+ * @capacity: used by marshalling routines to track total malloc'd capacity
+ * @pubuf: Payload user buffer given by the caller
+ * @pubuf: Payload kernel buffer given by the caller
+ * @pbuf_size: pubuf/pkbuf(only one will be !NULL) size to be read/write.
+ * @private: For transport layer's use.
* @sdata: payload
*
* &p9_fcall represents the structure for all 9P RPC
@@ -705,6 +711,10 @@ struct p9_fcall {
size_t offset;
size_t capacity;
+ char __user *pubuf;
+ char *pkbuf;
+ size_t pbuf_size;
+ void *private;
uint8_t *sdata;
};
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index 83ba6a4d58a3..0a30977e3c1f 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -230,6 +230,7 @@ int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode,
gid_t gid, struct p9_qid *qid);
int p9_client_clunk(struct p9_fid *fid);
int p9_client_fsync(struct p9_fid *fid, int datasync);
+int p9_client_sync_fs(struct p9_fid *fid);
int p9_client_remove(struct p9_fid *fid);
int p9_client_read(struct p9_fid *fid, char *data, char __user *udata,
u64 offset, u32 count);
diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
index 6d5886efb102..82868f18c573 100644
--- a/include/net/9p/transport.h
+++ b/include/net/9p/transport.h
@@ -26,11 +26,19 @@
#ifndef NET_9P_TRANSPORT_H
#define NET_9P_TRANSPORT_H
+#define P9_TRANS_PREF_PAYLOAD_MASK 0x1
+
+/* Default. Add Payload to PDU before sending it down to transport layer */
+#define P9_TRANS_PREF_PAYLOAD_DEF 0x0
+/* Send pay load seperately to transport layer along with PDU.*/
+#define P9_TRANS_PREF_PAYLOAD_SEP 0x1
+
/**
* struct p9_trans_module - transport module interface
* @list: used to maintain a list of currently available transports
* @name: the human-readable name of the transport
* @maxsize: transport provided maximum packet size
+ * @pref: Preferences of this transport
* @def: set if this transport should be considered the default
* @create: member function to create a new connection on this transport
* @request: member function to issue a request to the transport
@@ -47,6 +55,7 @@ struct p9_trans_module {
struct list_head list;
char *name; /* name of transport */
int maxsize; /* max message size of transport */
+ int pref; /* Preferences of this transport */
int def; /* this transport should be default */
struct module *owner;
int (*create)(struct p9_client *, const char *, char *);
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 0c5e72503b77..43750439c521 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -64,6 +64,11 @@ struct bt_security {
#define BT_DEFER_SETUP 7
+#define BT_FLUSHABLE 8
+
+#define BT_FLUSHABLE_OFF 0
+#define BT_FLUSHABLE_ON 1
+
#define BT_INFO(fmt, arg...) printk(KERN_INFO "Bluetooth: " fmt "\n" , ## arg)
#define BT_ERR(fmt, arg...) printk(KERN_ERR "%s: " fmt "\n" , __func__ , ## arg)
#define BT_DBG(fmt, arg...) pr_debug("%s: " fmt "\n" , __func__ , ## arg)
@@ -200,4 +205,32 @@ extern void bt_sysfs_cleanup(void);
extern struct dentry *bt_debugfs;
+#ifdef CONFIG_BT_L2CAP
+int l2cap_init(void);
+void l2cap_exit(void);
+#else
+static inline int l2cap_init(void)
+{
+ return 0;
+}
+
+static inline void l2cap_exit(void)
+{
+}
+#endif
+
+#ifdef CONFIG_BT_SCO
+int sco_init(void);
+void sco_exit(void);
+#else
+static inline int sco_init(void)
+{
+ return 0;
+}
+
+static inline void sco_exit(void)
+{
+}
+#endif
+
#endif /* __BLUETOOTH_H */
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 29a7a8ca0438..ec6acf2f1c0b 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -76,6 +76,14 @@ enum {
HCI_INQUIRY,
HCI_RAW,
+
+ HCI_SETUP,
+ HCI_AUTO_OFF,
+ HCI_MGMT,
+ HCI_PAIRABLE,
+ HCI_SERVICE_CACHE,
+ HCI_LINK_KEYS,
+ HCI_DEBUG_KEYS,
};
/* HCI ioctl defines */
@@ -111,6 +119,7 @@ enum {
#define HCI_PAIRING_TIMEOUT (60000) /* 60 seconds */
#define HCI_IDLE_TIMEOUT (6000) /* 6 seconds */
#define HCI_INIT_TIMEOUT (10000) /* 10 seconds */
+#define HCI_CMD_TIMEOUT (1000) /* 1 seconds */
/* HCI data types */
#define HCI_COMMAND_PKT 0x01
@@ -150,6 +159,7 @@ enum {
#define EDR_ESCO_MASK (ESCO_2EV3 | ESCO_3EV3 | ESCO_2EV5 | ESCO_3EV5)
/* ACL flags */
+#define ACL_START_NO_FLUSH 0x00
#define ACL_CONT 0x01
#define ACL_START 0x02
#define ACL_ACTIVE_BCAST 0x04
@@ -159,6 +169,8 @@ enum {
#define SCO_LINK 0x00
#define ACL_LINK 0x01
#define ESCO_LINK 0x02
+/* Low Energy links do not have defined link type. Use invented one */
+#define LE_LINK 0x80
/* LMP features */
#define LMP_3SLOT 0x01
@@ -183,17 +195,25 @@ enum {
#define LMP_PSCHEME 0x02
#define LMP_PCONTROL 0x04
+#define LMP_RSSI_INQ 0x40
#define LMP_ESCO 0x80
#define LMP_EV4 0x01
#define LMP_EV5 0x02
+#define LMP_LE 0x40
#define LMP_SNIFF_SUBR 0x02
+#define LMP_PAUSE_ENC 0x04
#define LMP_EDR_ESCO_2M 0x20
#define LMP_EDR_ESCO_3M 0x40
#define LMP_EDR_3S_ESCO 0x80
+#define LMP_EXT_INQ 0x01
#define LMP_SIMPLE_PAIR 0x08
+#define LMP_NO_FLUSH 0x40
+
+#define LMP_LSTO 0x01
+#define LMP_INQ_TX_PWR 0x02
/* Connection modes */
#define HCI_CM_ACTIVE 0x0000
@@ -225,6 +245,8 @@ enum {
#define HCI_AT_GENERAL_BONDING_MITM 0x05
/* ----- HCI Commands ---- */
+#define HCI_OP_NOP 0x0000
+
#define HCI_OP_INQUIRY 0x0401
struct hci_cp_inquiry {
__u8 lap[3];
@@ -292,11 +314,19 @@ struct hci_cp_pin_code_reply {
__u8 pin_len;
__u8 pin_code[16];
} __packed;
+struct hci_rp_pin_code_reply {
+ __u8 status;
+ bdaddr_t bdaddr;
+} __packed;
#define HCI_OP_PIN_CODE_NEG_REPLY 0x040e
struct hci_cp_pin_code_neg_reply {
bdaddr_t bdaddr;
} __packed;
+struct hci_rp_pin_code_neg_reply {
+ __u8 status;
+ bdaddr_t bdaddr;
+} __packed;
#define HCI_OP_CHANGE_CONN_PTYPE 0x040f
struct hci_cp_change_conn_ptype {
@@ -377,6 +407,31 @@ struct hci_cp_reject_sync_conn_req {
__u8 reason;
} __packed;
+#define HCI_OP_IO_CAPABILITY_REPLY 0x042b
+struct hci_cp_io_capability_reply {
+ bdaddr_t bdaddr;
+ __u8 capability;
+ __u8 oob_data;
+ __u8 authentication;
+} __packed;
+
+#define HCI_OP_USER_CONFIRM_REPLY 0x042c
+struct hci_cp_user_confirm_reply {
+ bdaddr_t bdaddr;
+} __packed;
+struct hci_rp_user_confirm_reply {
+ __u8 status;
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_OP_USER_CONFIRM_NEG_REPLY 0x042d
+
+#define HCI_OP_IO_CAPABILITY_NEG_REPLY 0x0434
+struct hci_cp_io_capability_neg_reply {
+ bdaddr_t bdaddr;
+ __u8 reason;
+} __packed;
+
#define HCI_OP_SNIFF_MODE 0x0803
struct hci_cp_sniff_mode {
__le16 handle;
@@ -474,6 +529,12 @@ struct hci_cp_set_event_flt {
#define HCI_CONN_SETUP_AUTO_OFF 0x01
#define HCI_CONN_SETUP_AUTO_ON 0x02
+#define HCI_OP_DELETE_STORED_LINK_KEY 0x0c12
+struct hci_cp_delete_stored_link_key {
+ bdaddr_t bdaddr;
+ __u8 delete_all;
+} __packed;
+
#define HCI_OP_WRITE_LOCAL_NAME 0x0c13
struct hci_cp_write_local_name {
__u8 name[248];
@@ -537,6 +598,8 @@ struct hci_cp_host_buffer_size {
__le16 sco_max_pkt;
} __packed;
+#define HCI_OP_WRITE_INQUIRY_MODE 0x0c45
+
#define HCI_OP_READ_SSP_MODE 0x0c55
struct hci_rp_read_ssp_mode {
__u8 status;
@@ -548,6 +611,8 @@ struct hci_cp_write_ssp_mode {
__u8 mode;
} __packed;
+#define HCI_OP_READ_INQ_RSP_TX_POWER 0x0c58
+
#define HCI_OP_READ_LOCAL_VERSION 0x1001
struct hci_rp_read_local_version {
__u8 status;
@@ -593,6 +658,47 @@ struct hci_rp_read_bd_addr {
bdaddr_t bdaddr;
} __packed;
+#define HCI_OP_LE_SET_EVENT_MASK 0x2001
+struct hci_cp_le_set_event_mask {
+ __u8 mask[8];
+} __packed;
+
+#define HCI_OP_LE_READ_BUFFER_SIZE 0x2002
+struct hci_rp_le_read_buffer_size {
+ __u8 status;
+ __le16 le_mtu;
+ __u8 le_max_pkt;
+} __packed;
+
+#define HCI_OP_LE_CREATE_CONN 0x200d
+struct hci_cp_le_create_conn {
+ __le16 scan_interval;
+ __le16 scan_window;
+ __u8 filter_policy;
+ __u8 peer_addr_type;
+ bdaddr_t peer_addr;
+ __u8 own_address_type;
+ __le16 conn_interval_min;
+ __le16 conn_interval_max;
+ __le16 conn_latency;
+ __le16 supervision_timeout;
+ __le16 min_ce_len;
+ __le16 max_ce_len;
+} __packed;
+
+#define HCI_OP_LE_CREATE_CONN_CANCEL 0x200e
+
+#define HCI_OP_LE_CONN_UPDATE 0x2013
+struct hci_cp_le_conn_update {
+ __le16 handle;
+ __le16 conn_interval_min;
+ __le16 conn_interval_max;
+ __le16 conn_latency;
+ __le16 supervision_timeout;
+ __le16 min_ce_len;
+ __le16 max_ce_len;
+} __packed;
+
/* ---- HCI Events ---- */
#define HCI_EV_INQUIRY_COMPLETE 0x01
@@ -833,6 +939,20 @@ struct hci_ev_io_capa_request {
bdaddr_t bdaddr;
} __packed;
+#define HCI_EV_IO_CAPA_REPLY 0x32
+struct hci_ev_io_capa_reply {
+ bdaddr_t bdaddr;
+ __u8 capability;
+ __u8 oob_data;
+ __u8 authentication;
+} __packed;
+
+#define HCI_EV_USER_CONFIRM_REQUEST 0x33
+struct hci_ev_user_confirm_req {
+ bdaddr_t bdaddr;
+ __le32 passkey;
+} __packed;
+
#define HCI_EV_SIMPLE_PAIR_COMPLETE 0x36
struct hci_ev_simple_pair_complete {
__u8 status;
@@ -845,6 +965,25 @@ struct hci_ev_remote_host_features {
__u8 features[8];
} __packed;
+#define HCI_EV_LE_META 0x3e
+struct hci_ev_le_meta {
+ __u8 subevent;
+} __packed;
+
+/* Low energy meta events */
+#define HCI_EV_LE_CONN_COMPLETE 0x01
+struct hci_ev_le_conn_complete {
+ __u8 status;
+ __le16 handle;
+ __u8 role;
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+ __le16 interval;
+ __le16 latency;
+ __le16 supervision_timeout;
+ __u8 clk_accurancy;
+} __packed;
+
/* Internal events generated by Bluetooth stack */
#define HCI_EV_STACK_INTERNAL 0xfd
struct hci_ev_stack_internal {
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index d2cf88407690..441dadbf6a89 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -60,12 +60,28 @@ struct hci_conn_hash {
spinlock_t lock;
unsigned int acl_num;
unsigned int sco_num;
+ unsigned int le_num;
};
struct bdaddr_list {
struct list_head list;
bdaddr_t bdaddr;
};
+
+struct bt_uuid {
+ struct list_head list;
+ u8 uuid[16];
+ u8 svc_hint;
+};
+
+struct link_key {
+ struct list_head list;
+ bdaddr_t bdaddr;
+ u8 type;
+ u8 val[16];
+ u8 pin_len;
+};
+
#define NUM_REASSEMBLY 4
struct hci_dev {
struct list_head list;
@@ -80,13 +96,18 @@ struct hci_dev {
bdaddr_t bdaddr;
__u8 dev_name[248];
__u8 dev_class[3];
+ __u8 major_class;
+ __u8 minor_class;
__u8 features[8];
__u8 commands[64];
__u8 ssp_mode;
__u8 hci_ver;
__u16 hci_rev;
+ __u8 lmp_ver;
__u16 manufacturer;
+ __le16 lmp_subver;
__u16 voice_setting;
+ __u8 io_capability;
__u16 pkt_type;
__u16 esco_type;
@@ -102,18 +123,26 @@ struct hci_dev {
atomic_t cmd_cnt;
unsigned int acl_cnt;
unsigned int sco_cnt;
+ unsigned int le_cnt;
unsigned int acl_mtu;
unsigned int sco_mtu;
+ unsigned int le_mtu;
unsigned int acl_pkts;
unsigned int sco_pkts;
+ unsigned int le_pkts;
- unsigned long cmd_last_tx;
unsigned long acl_last_tx;
unsigned long sco_last_tx;
+ unsigned long le_last_tx;
struct workqueue_struct *workqueue;
+ struct work_struct power_on;
+ struct work_struct power_off;
+ struct timer_list off_timer;
+
+ struct timer_list cmd_timer;
struct tasklet_struct cmd_task;
struct tasklet_struct rx_task;
struct tasklet_struct tx_task;
@@ -129,12 +158,17 @@ struct hci_dev {
wait_queue_head_t req_wait_q;
__u32 req_status;
__u32 req_result;
- __u16 req_last_cmd;
+
+ __u16 init_last_cmd;
struct inquiry_cache inq_cache;
struct hci_conn_hash conn_hash;
struct list_head blacklist;
+ struct list_head uuids;
+
+ struct list_head link_keys;
+
struct hci_dev_stats stat;
struct sk_buff_head driver_init;
@@ -165,31 +199,37 @@ struct hci_dev {
struct hci_conn {
struct list_head list;
- atomic_t refcnt;
- spinlock_t lock;
-
- bdaddr_t dst;
- __u16 handle;
- __u16 state;
- __u8 mode;
- __u8 type;
- __u8 out;
- __u8 attempt;
- __u8 dev_class[3];
- __u8 features[8];
- __u8 ssp_mode;
- __u16 interval;
- __u16 pkt_type;
- __u16 link_policy;
- __u32 link_mode;
- __u8 auth_type;
- __u8 sec_level;
- __u8 pending_sec_level;
- __u8 power_save;
- __u16 disc_timeout;
- unsigned long pend;
-
- unsigned int sent;
+ atomic_t refcnt;
+ spinlock_t lock;
+
+ bdaddr_t dst;
+ __u16 handle;
+ __u16 state;
+ __u8 mode;
+ __u8 type;
+ __u8 out;
+ __u8 attempt;
+ __u8 dev_class[3];
+ __u8 features[8];
+ __u8 ssp_mode;
+ __u16 interval;
+ __u16 pkt_type;
+ __u16 link_policy;
+ __u32 link_mode;
+ __u8 auth_type;
+ __u8 sec_level;
+ __u8 pending_sec_level;
+ __u8 pin_length;
+ __u8 io_capability;
+ __u8 power_save;
+ __u16 disc_timeout;
+ unsigned long pend;
+
+ __u8 remote_cap;
+ __u8 remote_oob;
+ __u8 remote_auth;
+
+ unsigned int sent;
struct sk_buff_head data_q;
@@ -208,6 +248,10 @@ struct hci_conn {
void *priv;
struct hci_conn *link;
+
+ void (*connect_cfm_cb) (struct hci_conn *conn, u8 status);
+ void (*security_cfm_cb) (struct hci_conn *conn, u8 status);
+ void (*disconn_cfm_cb) (struct hci_conn *conn, u8 reason);
};
extern struct hci_proto *hci_proto[];
@@ -274,24 +318,40 @@ static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
{
struct hci_conn_hash *h = &hdev->conn_hash;
list_add(&c->list, &h->list);
- if (c->type == ACL_LINK)
+ switch (c->type) {
+ case ACL_LINK:
h->acl_num++;
- else
+ break;
+ case LE_LINK:
+ h->le_num++;
+ break;
+ case SCO_LINK:
+ case ESCO_LINK:
h->sco_num++;
+ break;
+ }
}
static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
{
struct hci_conn_hash *h = &hdev->conn_hash;
list_del(&c->list);
- if (c->type == ACL_LINK)
+ switch (c->type) {
+ case ACL_LINK:
h->acl_num--;
- else
+ break;
+ case LE_LINK:
+ h->le_num--;
+ break;
+ case SCO_LINK:
+ case ESCO_LINK:
h->sco_num--;
+ break;
+ }
}
static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
- __u16 handle)
+ __u16 handle)
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct list_head *p;
@@ -306,7 +366,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
}
static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
- __u8 type, bdaddr_t *ba)
+ __u8 type, bdaddr_t *ba)
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct list_head *p;
@@ -321,7 +381,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
}
static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
- __u8 type, __u16 state)
+ __u8 type, __u16 state)
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct list_head *p;
@@ -437,6 +497,16 @@ int hci_inquiry(void __user *arg);
struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
int hci_blacklist_clear(struct hci_dev *hdev);
+int hci_uuids_clear(struct hci_dev *hdev);
+
+int hci_link_keys_clear(struct hci_dev *hdev);
+struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
+int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
+ u8 *key, u8 type, u8 pin_len);
+int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
+
+void hci_del_off_timer(struct hci_dev *hdev);
+
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
int hci_recv_frame(struct sk_buff *skb);
@@ -458,6 +528,8 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
#define lmp_esco_capable(dev) ((dev)->features[3] & LMP_ESCO)
#define lmp_ssp_capable(dev) ((dev)->features[6] & LMP_SIMPLE_PAIR)
+#define lmp_no_flush_capable(dev) ((dev)->features[6] & LMP_NO_FLUSH)
+#define lmp_le_capable(dev) ((dev)->features[4] & LMP_LE)
/* ----- HCI protocols ----- */
struct hci_proto {
@@ -503,6 +575,9 @@ static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
hp = hci_proto[HCI_PROTO_SCO];
if (hp && hp->connect_cfm)
hp->connect_cfm(conn, status);
+
+ if (conn->connect_cfm_cb)
+ conn->connect_cfm_cb(conn, status);
}
static inline int hci_proto_disconn_ind(struct hci_conn *conn)
@@ -532,6 +607,9 @@ static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason)
hp = hci_proto[HCI_PROTO_SCO];
if (hp && hp->disconn_cfm)
hp->disconn_cfm(conn, reason);
+
+ if (conn->disconn_cfm_cb)
+ conn->disconn_cfm_cb(conn, reason);
}
static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
@@ -551,6 +629,9 @@ static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
hp = hci_proto[HCI_PROTO_SCO];
if (hp && hp->security_cfm)
hp->security_cfm(conn, status, encrypt);
+
+ if (conn->security_cfm_cb)
+ conn->security_cfm_cb(conn, status);
}
static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
@@ -564,6 +645,9 @@ static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, __u
hp = hci_proto[HCI_PROTO_SCO];
if (hp && hp->security_cfm)
hp->security_cfm(conn, status, encrypt);
+
+ if (conn->security_cfm_cb)
+ conn->security_cfm_cb(conn, status);
}
int hci_register_proto(struct hci_proto *hproto);
@@ -660,12 +744,29 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
/* ----- HCI Sockets ----- */
-void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
+void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb,
+ struct sock *skip_sk);
/* Management interface */
int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
int mgmt_index_added(u16 index);
int mgmt_index_removed(u16 index);
+int mgmt_powered(u16 index, u8 powered);
+int mgmt_discoverable(u16 index, u8 discoverable);
+int mgmt_connectable(u16 index, u8 connectable);
+int mgmt_new_key(u16 index, struct link_key *key, u8 old_key_type);
+int mgmt_connected(u16 index, bdaddr_t *bdaddr);
+int mgmt_disconnected(u16 index, bdaddr_t *bdaddr);
+int mgmt_disconnect_failed(u16 index);
+int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status);
+int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr);
+int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status);
+int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status);
+int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value);
+int mgmt_user_confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status);
+int mgmt_user_confirm_neg_reply_complete(u16 index, bdaddr_t *bdaddr,
+ u8 status);
+int mgmt_auth_failed(u16 index, bdaddr_t *bdaddr, u8 status);
/* HCI info for socket */
#define hci_pi(sk) ((struct hci_pinfo *) sk)
@@ -697,4 +798,6 @@ struct hci_sec_filter {
void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result);
+void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
+ u16 latency, u16 to_multiplier);
#endif /* __HCI_CORE_H */
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 7ad25ca60ec0..4f4bff1eaed6 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -38,6 +38,7 @@
#define L2CAP_DEFAULT_MAX_PDU_SIZE 1009 /* Sized for 3-DH5 packet */
#define L2CAP_DEFAULT_ACK_TO 200
#define L2CAP_LOCAL_BUSY_TRIES 12
+#define L2CAP_LE_DEFAULT_MTU 23
#define L2CAP_CONN_TIMEOUT (40000) /* 40 seconds */
#define L2CAP_INFO_TIMEOUT (4000) /* 4 seconds */
@@ -88,6 +89,8 @@ struct l2cap_conninfo {
#define L2CAP_ECHO_RSP 0x09
#define L2CAP_INFO_REQ 0x0a
#define L2CAP_INFO_RSP 0x0b
+#define L2CAP_CONN_PARAM_UPDATE_REQ 0x12
+#define L2CAP_CONN_PARAM_UPDATE_RSP 0x13
/* L2CAP feature mask */
#define L2CAP_FEAT_FLOWCTL 0x00000001
@@ -160,6 +163,9 @@ struct l2cap_conn_rsp {
/* channel indentifier */
#define L2CAP_CID_SIGNALING 0x0001
#define L2CAP_CID_CONN_LESS 0x0002
+#define L2CAP_CID_LE_DATA 0x0004
+#define L2CAP_CID_LE_SIGNALING 0x0005
+#define L2CAP_CID_SMP 0x0006
#define L2CAP_CID_DYN_START 0x0040
#define L2CAP_CID_DYN_END 0xffff
@@ -255,6 +261,21 @@ struct l2cap_info_rsp {
#define L2CAP_IR_SUCCESS 0x0000
#define L2CAP_IR_NOTSUPP 0x0001
+struct l2cap_conn_param_update_req {
+ __le16 min;
+ __le16 max;
+ __le16 latency;
+ __le16 to_multiplier;
+} __packed;
+
+struct l2cap_conn_param_update_rsp {
+ __le16 result;
+} __packed;
+
+/* Connection Parameters result */
+#define L2CAP_CONN_PARAM_ACCEPTED 0x0000
+#define L2CAP_CONN_PARAM_REJECTED 0x0001
+
/* ----- L2CAP connections ----- */
struct l2cap_chan_list {
struct sock *head;
@@ -327,6 +348,7 @@ struct l2cap_pinfo {
__u8 sec_level;
__u8 role_switch;
__u8 force_reliable;
+ __u8 flushable;
__u8 conf_req[64];
__u8 conf_len;
@@ -423,6 +445,35 @@ static inline int l2cap_tx_window_full(struct sock *sk)
#define __is_sframe(ctrl) ((ctrl) & L2CAP_CTRL_FRAME_TYPE)
#define __is_sar_start(ctrl) (((ctrl) & L2CAP_CTRL_SAR) == L2CAP_SDU_START)
-void l2cap_load(void);
+extern int disable_ertm;
+extern const struct proto_ops l2cap_sock_ops;
+extern struct bt_sock_list l2cap_sk_list;
+
+int l2cap_init_sockets(void);
+void l2cap_cleanup_sockets(void);
+
+u8 l2cap_get_ident(struct l2cap_conn *conn);
+void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data);
+int l2cap_build_conf_req(struct sock *sk, void *data);
+int __l2cap_wait_ack(struct sock *sk);
+
+struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len);
+struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len);
+struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen);
+int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len);
+void l2cap_do_send(struct sock *sk, struct sk_buff *skb);
+void l2cap_streaming_send(struct sock *sk);
+int l2cap_ertm_send(struct sock *sk);
+
+void l2cap_sock_set_timer(struct sock *sk, long timeout);
+void l2cap_sock_clear_timer(struct sock *sk);
+void __l2cap_sock_close(struct sock *sk, int reason);
+void l2cap_sock_kill(struct sock *sk);
+void l2cap_sock_init(struct sock *sk, struct sock *parent);
+struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
+ int proto, gfp_t prio);
+void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err);
+void l2cap_chan_del(struct sock *sk, int err);
+int l2cap_do_connect(struct sock *sk);
#endif /* __L2CAP_H */
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index ca29c1367ffd..5fabfa886b3e 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -21,11 +21,13 @@
SOFTWARE IS DISCLAIMED.
*/
+#define MGMT_INDEX_NONE 0xFFFF
+
struct mgmt_hdr {
__le16 opcode;
+ __le16 index;
__le16 len;
} __packed;
-#define MGMT_HDR_SIZE 4
#define MGMT_OP_READ_VERSION 0x0001
struct mgmt_rp_read_version {
@@ -40,13 +42,10 @@ struct mgmt_rp_read_index_list {
} __packed;
#define MGMT_OP_READ_INFO 0x0004
-struct mgmt_cp_read_info {
- __le16 index;
-} __packed;
struct mgmt_rp_read_info {
- __le16 index;
__u8 type;
__u8 powered;
+ __u8 connectable;
__u8 discoverable;
__u8 pairable;
__u8 sec_mode;
@@ -58,6 +57,116 @@ struct mgmt_rp_read_info {
__u16 hci_rev;
} __packed;
+struct mgmt_mode {
+ __u8 val;
+} __packed;
+
+#define MGMT_OP_SET_POWERED 0x0005
+
+#define MGMT_OP_SET_DISCOVERABLE 0x0006
+
+#define MGMT_OP_SET_CONNECTABLE 0x0007
+
+#define MGMT_OP_SET_PAIRABLE 0x0008
+
+#define MGMT_OP_ADD_UUID 0x0009
+struct mgmt_cp_add_uuid {
+ __u8 uuid[16];
+ __u8 svc_hint;
+} __packed;
+
+#define MGMT_OP_REMOVE_UUID 0x000A
+struct mgmt_cp_remove_uuid {
+ __u8 uuid[16];
+} __packed;
+
+#define MGMT_OP_SET_DEV_CLASS 0x000B
+struct mgmt_cp_set_dev_class {
+ __u8 major;
+ __u8 minor;
+} __packed;
+
+#define MGMT_OP_SET_SERVICE_CACHE 0x000C
+struct mgmt_cp_set_service_cache {
+ __u8 enable;
+} __packed;
+
+struct mgmt_key_info {
+ bdaddr_t bdaddr;
+ u8 type;
+ u8 val[16];
+ u8 pin_len;
+} __packed;
+
+#define MGMT_OP_LOAD_KEYS 0x000D
+struct mgmt_cp_load_keys {
+ __u8 debug_keys;
+ __le16 key_count;
+ struct mgmt_key_info keys[0];
+} __packed;
+
+#define MGMT_OP_REMOVE_KEY 0x000E
+struct mgmt_cp_remove_key {
+ bdaddr_t bdaddr;
+ __u8 disconnect;
+} __packed;
+
+#define MGMT_OP_DISCONNECT 0x000F
+struct mgmt_cp_disconnect {
+ bdaddr_t bdaddr;
+} __packed;
+struct mgmt_rp_disconnect {
+ bdaddr_t bdaddr;
+} __packed;
+
+#define MGMT_OP_GET_CONNECTIONS 0x0010
+struct mgmt_rp_get_connections {
+ __le16 conn_count;
+ bdaddr_t conn[0];
+} __packed;
+
+#define MGMT_OP_PIN_CODE_REPLY 0x0011
+struct mgmt_cp_pin_code_reply {
+ bdaddr_t bdaddr;
+ __u8 pin_len;
+ __u8 pin_code[16];
+} __packed;
+struct mgmt_rp_pin_code_reply {
+ bdaddr_t bdaddr;
+ uint8_t status;
+} __packed;
+
+#define MGMT_OP_PIN_CODE_NEG_REPLY 0x0012
+struct mgmt_cp_pin_code_neg_reply {
+ bdaddr_t bdaddr;
+} __packed;
+
+#define MGMT_OP_SET_IO_CAPABILITY 0x0013
+struct mgmt_cp_set_io_capability {
+ __u8 io_capability;
+} __packed;
+
+#define MGMT_OP_PAIR_DEVICE 0x0014
+struct mgmt_cp_pair_device {
+ bdaddr_t bdaddr;
+ __u8 io_cap;
+} __packed;
+struct mgmt_rp_pair_device {
+ bdaddr_t bdaddr;
+ __u8 status;
+} __packed;
+
+#define MGMT_OP_USER_CONFIRM_REPLY 0x0015
+struct mgmt_cp_user_confirm_reply {
+ bdaddr_t bdaddr;
+} __packed;
+struct mgmt_rp_user_confirm_reply {
+ bdaddr_t bdaddr;
+ __u8 status;
+} __packed;
+
+#define MGMT_OP_USER_CONFIRM_NEG_REPLY 0x0016
+
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
__le16 opcode;
@@ -72,16 +181,56 @@ struct mgmt_ev_cmd_status {
#define MGMT_EV_CONTROLLER_ERROR 0x0003
struct mgmt_ev_controller_error {
- __le16 index;
__u8 error_code;
} __packed;
#define MGMT_EV_INDEX_ADDED 0x0004
-struct mgmt_ev_index_added {
- __le16 index;
-} __packed;
#define MGMT_EV_INDEX_REMOVED 0x0005
-struct mgmt_ev_index_removed {
- __le16 index;
+
+#define MGMT_EV_POWERED 0x0006
+
+#define MGMT_EV_DISCOVERABLE 0x0007
+
+#define MGMT_EV_CONNECTABLE 0x0008
+
+#define MGMT_EV_PAIRABLE 0x0009
+
+#define MGMT_EV_NEW_KEY 0x000A
+struct mgmt_ev_new_key {
+ struct mgmt_key_info key;
+ __u8 old_key_type;
+} __packed;
+
+#define MGMT_EV_CONNECTED 0x000B
+struct mgmt_ev_connected {
+ bdaddr_t bdaddr;
+} __packed;
+
+#define MGMT_EV_DISCONNECTED 0x000C
+struct mgmt_ev_disconnected {
+ bdaddr_t bdaddr;
+} __packed;
+
+#define MGMT_EV_CONNECT_FAILED 0x000D
+struct mgmt_ev_connect_failed {
+ bdaddr_t bdaddr;
+ __u8 status;
+} __packed;
+
+#define MGMT_EV_PIN_CODE_REQUEST 0x000E
+struct mgmt_ev_pin_code_request {
+ bdaddr_t bdaddr;
+} __packed;
+
+#define MGMT_EV_USER_CONFIRM_REQUEST 0x000F
+struct mgmt_ev_user_confirm_request {
+ bdaddr_t bdaddr;
+ __le32 value;
+} __packed;
+
+#define MGMT_EV_AUTH_FAILED 0x0010
+struct mgmt_ev_auth_failed {
+ bdaddr_t bdaddr;
+ __u8 status;
} __packed;
diff --git a/include/net/bluetooth/smp.h b/include/net/bluetooth/smp.h
new file mode 100644
index 000000000000..8f2edbf979dc
--- /dev/null
+++ b/include/net/bluetooth/smp.h
@@ -0,0 +1,76 @@
+#ifndef __SMP_H
+#define __SMP_H
+
+struct smp_command_hdr {
+ __u8 code;
+} __packed;
+
+#define SMP_CMD_PAIRING_REQ 0x01
+#define SMP_CMD_PAIRING_RSP 0x02
+struct smp_cmd_pairing {
+ __u8 io_capability;
+ __u8 oob_flag;
+ __u8 auth_req;
+ __u8 max_key_size;
+ __u8 init_key_dist;
+ __u8 resp_key_dist;
+} __packed;
+
+#define SMP_CMD_PAIRING_CONFIRM 0x03
+struct smp_cmd_pairing_confirm {
+ __u8 confirm_val[16];
+} __packed;
+
+#define SMP_CMD_PAIRING_RANDOM 0x04
+struct smp_cmd_pairing_random {
+ __u8 rand_val[16];
+} __packed;
+
+#define SMP_CMD_PAIRING_FAIL 0x05
+struct smp_cmd_pairing_fail {
+ __u8 reason;
+} __packed;
+
+#define SMP_CMD_ENCRYPT_INFO 0x06
+struct smp_cmd_encrypt_info {
+ __u8 ltk[16];
+} __packed;
+
+#define SMP_CMD_MASTER_IDENT 0x07
+struct smp_cmd_master_ident {
+ __u16 ediv;
+ __u8 rand[8];
+} __packed;
+
+#define SMP_CMD_IDENT_INFO 0x08
+struct smp_cmd_ident_info {
+ __u8 irk[16];
+} __packed;
+
+#define SMP_CMD_IDENT_ADDR_INFO 0x09
+struct smp_cmd_ident_addr_info {
+ __u8 addr_type;
+ bdaddr_t bdaddr;
+} __packed;
+
+#define SMP_CMD_SIGN_INFO 0x0a
+struct smp_cmd_sign_info {
+ __u8 csrk[16];
+} __packed;
+
+#define SMP_CMD_SECURITY_REQ 0x0b
+struct smp_cmd_security_req {
+ __u8 auth_req;
+} __packed;
+
+#define SMP_PASSKEY_ENTRY_FAILED 0x01
+#define SMP_OOB_NOT_AVAIL 0x02
+#define SMP_AUTH_REQUIREMENTS 0x03
+#define SMP_CONFIRM_FAILED 0x04
+#define SMP_PAIRING_NOTSUPP 0x05
+#define SMP_ENC_KEY_SIZE 0x06
+#define SMP_CMD_NOTSUPP 0x07
+#define SMP_UNSPECIFIED 0x08
+#define SMP_REPEATED_ATTEMPTS 0x09
+
+#endif /* __SMP_H */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 1322695beb52..1ac5786da14b 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -413,7 +413,7 @@ struct station_parameters {
* @STATION_INFO_PLID: @plid filled
* @STATION_INFO_PLINK_STATE: @plink_state filled
* @STATION_INFO_SIGNAL: @signal filled
- * @STATION_INFO_TX_BITRATE: @tx_bitrate fields are filled
+ * @STATION_INFO_TX_BITRATE: @txrate fields are filled
* (tx_bitrate, tx_bitrate_flags and tx_bitrate_mcs)
* @STATION_INFO_RX_PACKETS: @rx_packets filled
* @STATION_INFO_TX_PACKETS: @tx_packets filled
@@ -421,6 +421,7 @@ struct station_parameters {
* @STATION_INFO_TX_FAILED: @tx_failed filled
* @STATION_INFO_RX_DROP_MISC: @rx_dropped_misc filled
* @STATION_INFO_SIGNAL_AVG: @signal_avg filled
+ * @STATION_INFO_RX_BITRATE: @rxrate fields are filled
*/
enum station_info_flags {
STATION_INFO_INACTIVE_TIME = 1<<0,
@@ -437,6 +438,7 @@ enum station_info_flags {
STATION_INFO_TX_FAILED = 1<<11,
STATION_INFO_RX_DROP_MISC = 1<<12,
STATION_INFO_SIGNAL_AVG = 1<<13,
+ STATION_INFO_RX_BITRATE = 1<<14,
};
/**
@@ -506,6 +508,7 @@ struct station_info {
s8 signal;
s8 signal_avg;
struct rate_info txrate;
+ struct rate_info rxrate;
u32 rx_packets;
u32 tx_packets;
u32 tx_retries;
@@ -1790,8 +1793,9 @@ static inline void *wdev_priv(struct wireless_dev *wdev)
/**
* ieee80211_channel_to_frequency - convert channel number to frequency
* @chan: channel number
+ * @band: band, necessary due to channel number overlap
*/
-extern int ieee80211_channel_to_frequency(int chan);
+extern int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band);
/**
* ieee80211_frequency_to_channel - convert frequency to channel number
diff --git a/include/net/dst.h b/include/net/dst.h
index 93b0310317be..8948452132ad 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -40,24 +40,10 @@ struct dst_entry {
struct rcu_head rcu_head;
struct dst_entry *child;
struct net_device *dev;
- short error;
- short obsolete;
- int flags;
-#define DST_HOST 0x0001
-#define DST_NOXFRM 0x0002
-#define DST_NOPOLICY 0x0004
-#define DST_NOHASH 0x0008
-#define DST_NOCACHE 0x0010
+ struct dst_ops *ops;
+ unsigned long _metrics;
unsigned long expires;
-
- unsigned short header_len; /* more space at head required */
- unsigned short trailer_len; /* space to reserve at tail */
-
- unsigned int rate_tokens;
- unsigned long rate_last; /* rate limiting for ICMP */
-
struct dst_entry *path;
-
struct neighbour *neighbour;
struct hh_cache *hh;
#ifdef CONFIG_XFRM
@@ -68,17 +54,16 @@ struct dst_entry {
int (*input)(struct sk_buff*);
int (*output)(struct sk_buff*);
- struct dst_ops *ops;
-
- u32 _metrics[RTAX_MAX];
-
-#ifdef CONFIG_NET_CLS_ROUTE
+ short error;
+ short obsolete;
+ unsigned short header_len; /* more space at head required */
+ unsigned short trailer_len; /* space to reserve at tail */
+#ifdef CONFIG_IP_ROUTE_CLASSID
__u32 tclassid;
#else
__u32 __pad2;
#endif
-
/*
* Align __refcnt to a 64 bytes alignment
* (L1_CACHE_SIZE would be too much)
@@ -93,6 +78,12 @@ struct dst_entry {
atomic_t __refcnt; /* client references */
int __use;
unsigned long lastuse;
+ int flags;
+#define DST_HOST 0x0001
+#define DST_NOXFRM 0x0002
+#define DST_NOPOLICY 0x0004
+#define DST_NOHASH 0x0008
+#define DST_NOCACHE 0x0010
union {
struct dst_entry *next;
struct rtable __rcu *rt_next;
@@ -103,10 +94,70 @@ struct dst_entry {
#ifdef __KERNEL__
+extern u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
+extern const u32 dst_default_metrics[RTAX_MAX];
+
+#define DST_METRICS_READ_ONLY 0x1UL
+#define __DST_METRICS_PTR(Y) \
+ ((u32 *)((Y) & ~DST_METRICS_READ_ONLY))
+#define DST_METRICS_PTR(X) __DST_METRICS_PTR((X)->_metrics)
+
+static inline bool dst_metrics_read_only(const struct dst_entry *dst)
+{
+ return dst->_metrics & DST_METRICS_READ_ONLY;
+}
+
+extern void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
+
+static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
+{
+ unsigned long val = dst->_metrics;
+ if (!(val & DST_METRICS_READ_ONLY))
+ __dst_destroy_metrics_generic(dst, val);
+}
+
+static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst)
+{
+ unsigned long p = dst->_metrics;
+
+ if (p & DST_METRICS_READ_ONLY)
+ return dst->ops->cow_metrics(dst, p);
+ return __DST_METRICS_PTR(p);
+}
+
+/* This may only be invoked before the entry has reached global
+ * visibility.
+ */
+static inline void dst_init_metrics(struct dst_entry *dst,
+ const u32 *src_metrics,
+ bool read_only)
+{
+ dst->_metrics = ((unsigned long) src_metrics) |
+ (read_only ? DST_METRICS_READ_ONLY : 0);
+}
+
+static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
+{
+ u32 *dst_metrics = dst_metrics_write_ptr(dest);
+
+ if (dst_metrics) {
+ u32 *src_metrics = DST_METRICS_PTR(src);
+
+ memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32));
+ }
+}
+
+static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
+{
+ return DST_METRICS_PTR(dst);
+}
+
static inline u32
dst_metric_raw(const struct dst_entry *dst, const int metric)
{
- return dst->_metrics[metric-1];
+ u32 *p = DST_METRICS_PTR(dst);
+
+ return p[metric-1];
}
static inline u32
@@ -131,22 +182,10 @@ dst_metric_advmss(const struct dst_entry *dst)
static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
{
- dst->_metrics[metric-1] = val;
-}
+ u32 *p = dst_metrics_write_ptr(dst);
-static inline void dst_import_metrics(struct dst_entry *dst, const u32 *src_metrics)
-{
- memcpy(dst->_metrics, src_metrics, RTAX_MAX * sizeof(u32));
-}
-
-static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
-{
- dst_import_metrics(dest, src->_metrics);
-}
-
-static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
-{
- return dst->_metrics;
+ if (p)
+ p[metric-1] = val;
}
static inline u32
@@ -181,8 +220,6 @@ static inline u32
dst_allfrag(const struct dst_entry *dst)
{
int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG);
- /* Yes, _exactly_. This is paranoia. */
- barrier();
return ret;
}
@@ -315,7 +352,7 @@ static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb)
}
extern int dst_discard(struct sk_buff *skb);
-extern void * dst_alloc(struct dst_ops * ops);
+extern void *dst_alloc(struct dst_ops * ops, int initial_ref);
extern void __dst_free(struct dst_entry * dst);
extern struct dst_entry *dst_destroy(struct dst_entry * dst);
@@ -384,27 +421,20 @@ extern void dst_init(void);
/* Flags for xfrm_lookup flags argument. */
enum {
- XFRM_LOOKUP_WAIT = 1 << 0,
- XFRM_LOOKUP_ICMP = 1 << 1,
+ XFRM_LOOKUP_ICMP = 1 << 0,
};
struct flowi;
#ifndef CONFIG_XFRM
static inline int xfrm_lookup(struct net *net, struct dst_entry **dst_p,
- struct flowi *fl, struct sock *sk, int flags)
+ const struct flowi *fl, struct sock *sk,
+ int flags)
{
return 0;
}
-static inline int __xfrm_lookup(struct net *net, struct dst_entry **dst_p,
- struct flowi *fl, struct sock *sk, int flags)
-{
- return 0;
-}
#else
extern int xfrm_lookup(struct net *net, struct dst_entry **dst_p,
- struct flowi *fl, struct sock *sk, int flags);
-extern int __xfrm_lookup(struct net *net, struct dst_entry **dst_p,
- struct flowi *fl, struct sock *sk, int flags);
+ const struct flowi *fl, struct sock *sk, int flags);
#endif
#endif
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 21a320b8708e..dc0746328947 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -18,6 +18,7 @@ struct dst_ops {
struct dst_entry * (*check)(struct dst_entry *, __u32 cookie);
unsigned int (*default_advmss)(const struct dst_entry *);
unsigned int (*default_mtu)(const struct dst_entry *);
+ u32 * (*cow_metrics)(struct dst_entry *, unsigned long);
void (*destroy)(struct dst_entry *);
void (*ifdown)(struct dst_entry *,
struct net_device *dev, int how);
diff --git a/include/net/flow.h b/include/net/flow.h
index 240b7f356c71..fd0413873b8e 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -48,7 +48,9 @@ struct flowi {
__u8 proto;
__u8 flags;
-#define FLOWI_FLAG_ANYSRC 0x01
+#define FLOWI_FLAG_ANYSRC 0x01
+#define FLOWI_FLAG_PRECOW_METRICS 0x02
+#define FLOWI_FLAG_CAN_SLEEP 0x04
union {
struct {
__be16 sport;
@@ -101,17 +103,18 @@ struct flow_cache_ops {
};
typedef struct flow_cache_object *(*flow_resolve_t)(
- struct net *net, struct flowi *key, u16 family,
+ struct net *net, const struct flowi *key, u16 family,
u8 dir, struct flow_cache_object *oldobj, void *ctx);
extern struct flow_cache_object *flow_cache_lookup(
- struct net *net, struct flowi *key, u16 family,
+ struct net *net, const struct flowi *key, u16 family,
u8 dir, flow_resolve_t resolver, void *ctx);
extern void flow_cache_flush(void);
extern atomic_t flow_cache_genid;
-static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
+static inline int flow_cache_uli_match(const struct flowi *fl1,
+ const struct flowi *fl2)
{
return (fl1->proto == fl2->proto &&
!memcmp(&fl1->uli_u, &fl2->uli_u, sizeof(fl1->uli_u)));
diff --git a/include/net/icmp.h b/include/net/icmp.h
index 6e991e0d0d6f..f0698b955b73 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -45,7 +45,4 @@ extern int icmp_ioctl(struct sock *sk, int cmd, unsigned long arg);
extern int icmp_init(void);
extern void icmp_out_count(struct net *net, unsigned char type);
-/* Move into dst.h ? */
-extern int xrlim_allow(struct dst_entry *dst, int timeout);
-
#endif /* _ICMP_H */
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index af49f8ab7f81..b0be5fb9de19 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -178,6 +178,11 @@ struct ieee80211_radiotap_header {
*
* Number of unicast retries a transmitted frame used.
*
+ * IEEE80211_RADIOTAP_MCS u8, u8, u8 unitless
+ *
+ * Contains a bitmap of known fields/flags, the flags, and
+ * the MCS index.
+ *
*/
enum ieee80211_radiotap_type {
IEEE80211_RADIOTAP_TSFT = 0,
@@ -199,6 +204,8 @@ enum ieee80211_radiotap_type {
IEEE80211_RADIOTAP_RTS_RETRIES = 16,
IEEE80211_RADIOTAP_DATA_RETRIES = 17,
+ IEEE80211_RADIOTAP_MCS = 19,
+
/* valid in every it_present bitmap, even vendor namespaces */
IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29,
IEEE80211_RADIOTAP_VENDOR_NAMESPACE = 30,
@@ -245,6 +252,24 @@ enum ieee80211_radiotap_type {
#define IEEE80211_RADIOTAP_F_TX_CTS 0x0002 /* used cts 'protection' */
#define IEEE80211_RADIOTAP_F_TX_RTS 0x0004 /* used rts/cts handshake */
+
+/* For IEEE80211_RADIOTAP_MCS */
+#define IEEE80211_RADIOTAP_MCS_HAVE_BW 0x01
+#define IEEE80211_RADIOTAP_MCS_HAVE_MCS 0x02
+#define IEEE80211_RADIOTAP_MCS_HAVE_GI 0x04
+#define IEEE80211_RADIOTAP_MCS_HAVE_FMT 0x08
+#define IEEE80211_RADIOTAP_MCS_HAVE_FEC 0x10
+
+#define IEEE80211_RADIOTAP_MCS_BW_MASK 0x03
+#define IEEE80211_RADIOTAP_MCS_BW_20 0
+#define IEEE80211_RADIOTAP_MCS_BW_40 1
+#define IEEE80211_RADIOTAP_MCS_BW_20L 2
+#define IEEE80211_RADIOTAP_MCS_BW_20U 3
+#define IEEE80211_RADIOTAP_MCS_SGI 0x04
+#define IEEE80211_RADIOTAP_MCS_FMT_GF 0x08
+#define IEEE80211_RADIOTAP_MCS_FEC_LDPC 0x10
+
+
/* Ugly macro to convert literal channel numbers into their mhz equivalents
* There are certianly some conditions that will break this (like feeding it '30')
* but they shouldn't arise since nothing talks on channel 30. */
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 8181498fa96c..7a37369f8ea3 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -86,6 +86,19 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
return (struct inet_request_sock *)sk;
}
+struct inet_cork {
+ unsigned int flags;
+ unsigned int fragsize;
+ struct ip_options *opt;
+ struct dst_entry *dst;
+ int length; /* Total length of all frames */
+ __be32 addr;
+ struct flowi fl;
+ struct page *page;
+ u32 off;
+ u8 tx_flags;
+};
+
struct ip_mc_socklist;
struct ipv6_pinfo;
struct rtable;
@@ -143,15 +156,7 @@ struct inet_sock {
int mc_index;
__be32 mc_addr;
struct ip_mc_socklist __rcu *mc_list;
- struct {
- unsigned int flags;
- unsigned int fragsize;
- struct ip_options *opt;
- struct dst_entry *dst;
- int length; /* Total length of all frames */
- __be32 addr;
- struct flowi fl;
- } cork;
+ struct inet_cork cork;
};
#define IPCORK_OPT 1 /* ip-options has been held in ipcork.opt */
@@ -219,7 +224,13 @@ static inline struct request_sock *inet_reqsk_alloc(struct request_sock_ops *ops
static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
{
- return inet_sk(sk)->transparent ? FLOWI_FLAG_ANYSRC : 0;
+ __u8 flags = 0;
+
+ if (inet_sk(sk)->transparent)
+ flags |= FLOWI_FLAG_ANYSRC;
+ if (sk->sk_protocol == IPPROTO_TCP)
+ flags |= FLOWI_FLAG_PRECOW_METRICS;
+ return flags;
}
#endif /* _INET_SOCK_H */
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 599d96e74114..e6dd8da6b2ad 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -11,15 +11,20 @@
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/spinlock.h>
+#include <linux/rtnetlink.h>
#include <net/ipv6.h>
#include <asm/atomic.h>
-struct inetpeer_addr {
+struct inetpeer_addr_base {
union {
- __be32 a4;
- __be32 a6[4];
+ __be32 a4;
+ __be32 a6[4];
};
- __u16 family;
+};
+
+struct inetpeer_addr {
+ struct inetpeer_addr_base addr;
+ __u16 family;
};
struct inet_peer {
@@ -33,15 +38,22 @@ struct inet_peer {
atomic_t refcnt;
/*
* Once inet_peer is queued for deletion (refcnt == -1), following fields
- * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
- * We can share memory with rcu_head to keep inet_peer small
+ * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp, metrics
+ * We can share memory with rcu_head to help keep inet_peer small.
*/
union {
struct {
- atomic_t rid; /* Frag reception counter */
- atomic_t ip_id_count; /* IP ID for the next packet */
- __u32 tcp_ts;
- __u32 tcp_ts_stamp;
+ atomic_t rid; /* Frag reception counter */
+ atomic_t ip_id_count; /* IP ID for the next packet */
+ __u32 tcp_ts;
+ __u32 tcp_ts_stamp;
+ u32 metrics[RTAX_MAX];
+ u32 rate_tokens; /* rate limiting for ICMP */
+ unsigned long rate_last;
+ unsigned long pmtu_expires;
+ u32 pmtu_orig;
+ u32 pmtu_learned;
+ struct inetpeer_addr_base redirect_learned;
};
struct rcu_head rcu;
};
@@ -49,6 +61,13 @@ struct inet_peer {
void inet_initpeers(void) __init;
+#define INETPEER_METRICS_NEW (~(u32) 0)
+
+static inline bool inet_metrics_new(const struct inet_peer *p)
+{
+ return p->metrics[RTAX_LOCK-1] == INETPEER_METRICS_NEW;
+}
+
/* can be called with or without local BH being disabled */
struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create);
@@ -56,7 +75,7 @@ static inline struct inet_peer *inet_getpeer_v4(__be32 v4daddr, int create)
{
struct inetpeer_addr daddr;
- daddr.a4 = v4daddr;
+ daddr.addr.a4 = v4daddr;
daddr.family = AF_INET;
return inet_getpeer(&daddr, create);
}
@@ -65,13 +84,14 @@ static inline struct inet_peer *inet_getpeer_v6(struct in6_addr *v6daddr, int cr
{
struct inetpeer_addr daddr;
- ipv6_addr_copy((struct in6_addr *)daddr.a6, v6daddr);
+ ipv6_addr_copy((struct in6_addr *)daddr.addr.a6, v6daddr);
daddr.family = AF_INET6;
return inet_getpeer(&daddr, create);
}
/* can be called from BH context or outside */
extern void inet_putpeer(struct inet_peer *p);
+extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
/*
* temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
diff --git a/include/net/ip.h b/include/net/ip.h
index 67fac78a186b..a4f631108c54 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -116,8 +116,24 @@ extern int ip_append_data(struct sock *sk,
extern int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb);
extern ssize_t ip_append_page(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
+extern struct sk_buff *__ip_make_skb(struct sock *sk,
+ struct sk_buff_head *queue,
+ struct inet_cork *cork);
+extern int ip_send_skb(struct sk_buff *skb);
extern int ip_push_pending_frames(struct sock *sk);
extern void ip_flush_pending_frames(struct sock *sk);
+extern struct sk_buff *ip_make_skb(struct sock *sk,
+ int getfrag(void *from, char *to, int offset, int len,
+ int odd, struct sk_buff *skb),
+ void *from, int length, int transhdrlen,
+ struct ipcm_cookie *ipc,
+ struct rtable **rtp,
+ unsigned int flags);
+
+static inline struct sk_buff *ip_finish_skb(struct sock *sk)
+{
+ return __ip_make_skb(sk, &sk->sk_write_queue, &inet_sk(sk)->cork);
+}
/* datagram.c */
extern int ip4_datagram_connect(struct sock *sk,
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 708ff7cb8806..46a6e8ae232c 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -108,6 +108,7 @@ struct rt6_info {
u32 rt6i_flags;
struct rt6key rt6i_src;
u32 rt6i_metric;
+ u32 rt6i_peer_genid;
struct inet6_dev *rt6i_idev;
struct inet_peer *rt6i_peer;
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 07bdb5e9e8ac..523a170b0ecb 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -55,7 +55,7 @@ struct fib_nh {
int nh_weight;
int nh_power;
#endif
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
__u32 nh_tclassid;
#endif
int nh_oif;
@@ -77,7 +77,7 @@ struct fib_info {
int fib_protocol;
__be32 fib_prefsrc;
u32 fib_priority;
- u32 fib_metrics[RTAX_MAX];
+ u32 *fib_metrics;
#define fib_mtu fib_metrics[RTAX_MTU-1]
#define fib_window fib_metrics[RTAX_WINDOW-1]
#define fib_rtt fib_metrics[RTAX_RTT-1]
@@ -96,12 +96,15 @@ struct fib_info {
struct fib_rule;
#endif
+struct fib_table;
struct fib_result {
unsigned char prefixlen;
unsigned char nh_sel;
unsigned char type;
unsigned char scope;
struct fib_info *fi;
+ struct fib_table *table;
+ struct list_head *fa_head;
#ifdef CONFIG_IP_MULTIPLE_TABLES
struct fib_rule *r;
#endif
@@ -155,9 +158,6 @@ extern int fib_table_delete(struct fib_table *, struct fib_config *);
extern int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
struct netlink_callback *cb);
extern int fib_table_flush(struct fib_table *table);
-extern void fib_table_select_default(struct fib_table *table,
- const struct flowi *flp,
- struct fib_result *res);
extern void fib_free_table(struct fib_table *tb);
@@ -201,8 +201,8 @@ static inline int fib_lookup(struct net *net, const struct flowi *flp,
extern int __net_init fib4_rules_init(struct net *net);
extern void __net_exit fib4_rules_exit(struct net *net);
-#ifdef CONFIG_NET_CLS_ROUTE
-extern u32 fib_rules_tclass(struct fib_result *res);
+#ifdef CONFIG_IP_ROUTE_CLASSID
+extern u32 fib_rules_tclass(const struct fib_result *res);
#endif
extern int fib_lookup(struct net *n, struct flowi *flp, struct fib_result *res);
@@ -218,8 +218,7 @@ extern void ip_fib_init(void);
extern int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
struct net_device *dev, __be32 *spec_dst,
u32 *itag, u32 mark);
-extern void fib_select_default(struct net *net, const struct flowi *flp,
- struct fib_result *res);
+extern void fib_select_default(struct fib_result *res);
/* Exported by fib_semantics.c */
extern int ip_fib_check_default(__be32 gw, struct net_device *dev);
@@ -229,13 +228,13 @@ extern int fib_sync_up(struct net_device *dev);
extern __be32 __fib_res_prefsrc(struct fib_result *res);
extern void fib_select_multipath(const struct flowi *flp, struct fib_result *res);
-/* Exported by fib_{hash|trie}.c */
-extern void fib_hash_init(void);
-extern struct fib_table *fib_hash_table(u32 id);
+/* Exported by fib_trie.c */
+extern void fib_trie_init(void);
+extern struct fib_table *fib_trie_table(u32 id);
-static inline void fib_combine_itag(u32 *itag, struct fib_result *res)
+static inline void fib_combine_itag(u32 *itag, const struct fib_result *res)
{
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
#ifdef CONFIG_IP_MULTIPLE_TABLES
u32 rtag;
#endif
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index b7bbd6c28cfa..e74da41ebd1b 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -28,6 +28,80 @@
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
#include <net/netfilter/nf_conntrack.h>
#endif
+#include <net/net_namespace.h> /* Netw namespace */
+
+/*
+ * Generic access of ipvs struct
+ */
+static inline struct netns_ipvs *net_ipvs(struct net* net)
+{
+ return net->ipvs;
+}
+/*
+ * Get net ptr from skb in traffic cases
+ * use skb_sknet when call is from userland (ioctl or netlink)
+ */
+static inline struct net *skb_net(const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_NS
+#ifdef CONFIG_IP_VS_DEBUG
+ /*
+ * This is used for debug only.
+ * Start with the most likely hit
+ * End with BUG
+ */
+ if (likely(skb->dev && skb->dev->nd_net))
+ return dev_net(skb->dev);
+ if (skb_dst(skb)->dev)
+ return dev_net(skb_dst(skb)->dev);
+ WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n",
+ __func__, __LINE__);
+ if (likely(skb->sk && skb->sk->sk_net))
+ return sock_net(skb->sk);
+ pr_err("There is no net ptr to find in the skb in %s() line:%d\n",
+ __func__, __LINE__);
+ BUG();
+#else
+ return dev_net(skb->dev ? : skb_dst(skb)->dev);
+#endif
+#else
+ return &init_net;
+#endif
+}
+
+static inline struct net *skb_sknet(const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_NS
+#ifdef CONFIG_IP_VS_DEBUG
+ /* Start with the most likely hit */
+ if (likely(skb->sk && skb->sk->sk_net))
+ return sock_net(skb->sk);
+ WARN(skb->dev, "Maybe skb_net should be used instead in %s() line:%d\n",
+ __func__, __LINE__);
+ if (likely(skb->dev && skb->dev->nd_net))
+ return dev_net(skb->dev);
+ pr_err("There is no net ptr to find in the skb in %s() line:%d\n",
+ __func__, __LINE__);
+ BUG();
+#else
+ return sock_net(skb->sk);
+#endif
+#else
+ return &init_net;
+#endif
+}
+/*
+ * This one needed for single_open_net since net is stored directly in
+ * private not as a struct i.e. seq_file_net cant be used.
+ */
+static inline struct net *seq_file_single_net(struct seq_file *seq)
+{
+#ifdef CONFIG_NET_NS
+ return (struct net *)seq->private;
+#else
+ return &init_net;
+#endif
+}
/* Connections' size value needed by ip_vs_ctl.c */
extern int ip_vs_conn_tab_size;
@@ -258,6 +332,23 @@ struct ip_vs_seq {
before last resized pkt */
};
+/*
+ * counters per cpu
+ */
+struct ip_vs_counters {
+ __u32 conns; /* connections scheduled */
+ __u32 inpkts; /* incoming packets */
+ __u32 outpkts; /* outgoing packets */
+ __u64 inbytes; /* incoming bytes */
+ __u64 outbytes; /* outgoing bytes */
+};
+/*
+ * Stats per cpu
+ */
+struct ip_vs_cpu_stats {
+ struct ip_vs_counters ustats;
+ struct u64_stats_sync syncp;
+};
/*
* IPVS statistics objects
@@ -279,17 +370,34 @@ struct ip_vs_estimator {
};
struct ip_vs_stats {
- struct ip_vs_stats_user ustats; /* statistics */
+ struct ip_vs_stats_user ustats; /* statistics */
struct ip_vs_estimator est; /* estimator */
-
- spinlock_t lock; /* spin lock */
+ struct ip_vs_cpu_stats *cpustats; /* per cpu counters */
+ spinlock_t lock; /* spin lock */
};
+/*
+ * Helper Macros for per cpu
+ * ipvs->tot_stats->ustats.count
+ */
+#define IPVS_STAT_INC(ipvs, count) \
+ __this_cpu_inc((ipvs)->ustats->count)
+
+#define IPVS_STAT_ADD(ipvs, count, value) \
+ do {\
+ write_seqcount_begin(per_cpu_ptr((ipvs)->ustats_seq, \
+ raw_smp_processor_id())); \
+ __this_cpu_add((ipvs)->ustats->count, value); \
+ write_seqcount_end(per_cpu_ptr((ipvs)->ustats_seq, \
+ raw_smp_processor_id())); \
+ } while (0)
+
struct dst_entry;
struct iphdr;
struct ip_vs_conn;
struct ip_vs_app;
struct sk_buff;
+struct ip_vs_proto_data;
struct ip_vs_protocol {
struct ip_vs_protocol *next;
@@ -297,21 +405,22 @@ struct ip_vs_protocol {
u16 protocol;
u16 num_states;
int dont_defrag;
- atomic_t appcnt; /* counter of proto app incs */
- int *timeout_table; /* protocol timeout table */
void (*init)(struct ip_vs_protocol *pp);
void (*exit)(struct ip_vs_protocol *pp);
+ void (*init_netns)(struct net *net, struct ip_vs_proto_data *pd);
+
+ void (*exit_netns)(struct net *net, struct ip_vs_proto_data *pd);
+
int (*conn_schedule)(int af, struct sk_buff *skb,
- struct ip_vs_protocol *pp,
+ struct ip_vs_proto_data *pd,
int *verdict, struct ip_vs_conn **cpp);
struct ip_vs_conn *
(*conn_in_get)(int af,
const struct sk_buff *skb,
- struct ip_vs_protocol *pp,
const struct ip_vs_iphdr *iph,
unsigned int proto_off,
int inverse);
@@ -319,7 +428,6 @@ struct ip_vs_protocol {
struct ip_vs_conn *
(*conn_out_get)(int af,
const struct sk_buff *skb,
- struct ip_vs_protocol *pp,
const struct ip_vs_iphdr *iph,
unsigned int proto_off,
int inverse);
@@ -337,11 +445,11 @@ struct ip_vs_protocol {
int (*state_transition)(struct ip_vs_conn *cp, int direction,
const struct sk_buff *skb,
- struct ip_vs_protocol *pp);
+ struct ip_vs_proto_data *pd);
- int (*register_app)(struct ip_vs_app *inc);
+ int (*register_app)(struct net *net, struct ip_vs_app *inc);
- void (*unregister_app)(struct ip_vs_app *inc);
+ void (*unregister_app)(struct net *net, struct ip_vs_app *inc);
int (*app_conn_bind)(struct ip_vs_conn *cp);
@@ -350,14 +458,26 @@ struct ip_vs_protocol {
int offset,
const char *msg);
- void (*timeout_change)(struct ip_vs_protocol *pp, int flags);
+ void (*timeout_change)(struct ip_vs_proto_data *pd, int flags);
+};
- int (*set_state_timeout)(struct ip_vs_protocol *pp, char *sname, int to);
+/*
+ * protocol data per netns
+ */
+struct ip_vs_proto_data {
+ struct ip_vs_proto_data *next;
+ struct ip_vs_protocol *pp;
+ int *timeout_table; /* protocol timeout table */
+ atomic_t appcnt; /* counter of proto app incs. */
+ struct tcp_states_t *tcp_state_table;
};
-extern struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto);
+extern struct ip_vs_protocol *ip_vs_proto_get(unsigned short proto);
+extern struct ip_vs_proto_data *ip_vs_proto_data_get(struct net *net,
+ unsigned short proto);
struct ip_vs_conn_param {
+ struct net *net;
const union nf_inet_addr *caddr;
const union nf_inet_addr *vaddr;
__be16 cport;
@@ -374,17 +494,20 @@ struct ip_vs_conn_param {
* IP_VS structure allocated for each dynamically scheduled connection
*/
struct ip_vs_conn {
- struct list_head c_list; /* hashed list heads */
-
+ struct hlist_node c_list; /* hashed list heads */
+#ifdef CONFIG_NET_NS
+ struct net *net; /* Name space */
+#endif
/* Protocol, addresses and port numbers */
- u16 af; /* address family */
- union nf_inet_addr caddr; /* client address */
- union nf_inet_addr vaddr; /* virtual address */
- union nf_inet_addr daddr; /* destination address */
- volatile __u32 flags; /* status flags */
- __be16 cport;
- __be16 vport;
- __be16 dport;
+ u16 af; /* address family */
+ __be16 cport;
+ __be16 vport;
+ __be16 dport;
+ __u32 fwmark; /* Fire wall mark from skb */
+ union nf_inet_addr caddr; /* client address */
+ union nf_inet_addr vaddr; /* virtual address */
+ union nf_inet_addr daddr; /* destination address */
+ volatile __u32 flags; /* status flags */
__u16 protocol; /* Which protocol (TCP/UDP) */
/* counter and timer */
@@ -422,10 +545,38 @@ struct ip_vs_conn {
struct ip_vs_seq in_seq; /* incoming seq. struct */
struct ip_vs_seq out_seq; /* outgoing seq. struct */
+ const struct ip_vs_pe *pe;
char *pe_data;
__u8 pe_data_len;
};
+/*
+ * To save some memory in conn table when name space is disabled.
+ */
+static inline struct net *ip_vs_conn_net(const struct ip_vs_conn *cp)
+{
+#ifdef CONFIG_NET_NS
+ return cp->net;
+#else
+ return &init_net;
+#endif
+}
+static inline void ip_vs_conn_net_set(struct ip_vs_conn *cp, struct net *net)
+{
+#ifdef CONFIG_NET_NS
+ cp->net = net;
+#endif
+}
+
+static inline int ip_vs_conn_net_eq(const struct ip_vs_conn *cp,
+ struct net *net)
+{
+#ifdef CONFIG_NET_NS
+ return cp->net == net;
+#else
+ return 1;
+#endif
+}
/*
* Extended internal versions of struct ip_vs_service_user and
@@ -485,6 +636,7 @@ struct ip_vs_service {
unsigned flags; /* service status flags */
unsigned timeout; /* persistent timeout in ticks */
__be32 netmask; /* grouping granularity */
+ struct net *net;
struct list_head destinations; /* real server d-linked list */
__u32 num_dests; /* number of servers */
@@ -510,8 +662,8 @@ struct ip_vs_dest {
struct list_head d_list; /* for table with all the dests */
u16 af; /* address family */
- union nf_inet_addr addr; /* IP address of the server */
__be16 port; /* port number of the server */
+ union nf_inet_addr addr; /* IP address of the server */
volatile unsigned flags; /* dest status flags */
atomic_t conn_flags; /* flags to copy to conn */
atomic_t weight; /* server weight */
@@ -538,8 +690,8 @@ struct ip_vs_dest {
/* for virtual service */
struct ip_vs_service *svc; /* service it belongs to */
__u16 protocol; /* which protocol (TCP/UDP) */
- union nf_inet_addr vaddr; /* virtual IP address */
__be16 vport; /* virtual port number */
+ union nf_inet_addr vaddr; /* virtual IP address */
__u32 vfwmark; /* firewall mark of service */
};
@@ -674,13 +826,14 @@ enum {
IP_VS_DIR_LAST,
};
-static inline void ip_vs_conn_fill_param(int af, int protocol,
+static inline void ip_vs_conn_fill_param(struct net *net, int af, int protocol,
const union nf_inet_addr *caddr,
__be16 cport,
const union nf_inet_addr *vaddr,
__be16 vport,
struct ip_vs_conn_param *p)
{
+ p->net = net;
p->af = af;
p->protocol = protocol;
p->caddr = caddr;
@@ -695,7 +848,6 @@ struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p);
struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p);
struct ip_vs_conn * ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
- struct ip_vs_protocol *pp,
const struct ip_vs_iphdr *iph,
unsigned int proto_off,
int inverse);
@@ -703,7 +855,6 @@ struct ip_vs_conn * ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p);
struct ip_vs_conn * ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb,
- struct ip_vs_protocol *pp,
const struct ip_vs_iphdr *iph,
unsigned int proto_off,
int inverse);
@@ -719,14 +870,14 @@ extern void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport);
struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p,
const union nf_inet_addr *daddr,
__be16 dport, unsigned flags,
- struct ip_vs_dest *dest);
+ struct ip_vs_dest *dest, __u32 fwmark);
extern void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
extern const char * ip_vs_state_name(__u16 proto, int state);
-extern void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp);
+extern void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp);
extern int ip_vs_check_template(struct ip_vs_conn *ct);
-extern void ip_vs_random_dropentry(void);
+extern void ip_vs_random_dropentry(struct net *net);
extern int ip_vs_conn_init(void);
extern void ip_vs_conn_cleanup(void);
@@ -796,12 +947,12 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp)
* (from ip_vs_app.c)
*/
#define IP_VS_APP_MAX_PORTS 8
-extern int register_ip_vs_app(struct ip_vs_app *app);
-extern void unregister_ip_vs_app(struct ip_vs_app *app);
+extern int register_ip_vs_app(struct net *net, struct ip_vs_app *app);
+extern void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app);
extern int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
extern void ip_vs_unbind_app(struct ip_vs_conn *cp);
-extern int
-register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port);
+extern int register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app,
+ __u16 proto, __u16 port);
extern int ip_vs_app_inc_get(struct ip_vs_app *inc);
extern void ip_vs_app_inc_put(struct ip_vs_app *inc);
@@ -814,15 +965,27 @@ void ip_vs_bind_pe(struct ip_vs_service *svc, struct ip_vs_pe *pe);
void ip_vs_unbind_pe(struct ip_vs_service *svc);
int register_ip_vs_pe(struct ip_vs_pe *pe);
int unregister_ip_vs_pe(struct ip_vs_pe *pe);
-extern struct ip_vs_pe *ip_vs_pe_get(const char *name);
-extern void ip_vs_pe_put(struct ip_vs_pe *pe);
+struct ip_vs_pe *ip_vs_pe_getbyname(const char *name);
+struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name);
+
+static inline void ip_vs_pe_get(const struct ip_vs_pe *pe)
+{
+ if (pe && pe->module)
+ __module_get(pe->module);
+}
+
+static inline void ip_vs_pe_put(const struct ip_vs_pe *pe)
+{
+ if (pe && pe->module)
+ module_put(pe->module);
+}
/*
* IPVS protocol functions (from ip_vs_proto.c)
*/
extern int ip_vs_protocol_init(void);
extern void ip_vs_protocol_cleanup(void);
-extern void ip_vs_protocol_timeout_change(int flags);
+extern void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags);
extern int *ip_vs_create_timeout_table(int *table, int size);
extern int
ip_vs_set_state_timeout(int *table, int num, const char *const *names,
@@ -852,26 +1015,23 @@ extern struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name);
extern void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler);
extern struct ip_vs_conn *
ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
- struct ip_vs_protocol *pp, int *ignored);
+ struct ip_vs_proto_data *pd, int *ignored);
extern int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
- struct ip_vs_protocol *pp);
+ struct ip_vs_proto_data *pd);
+
+extern void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg);
/*
* IPVS control data and functions (from ip_vs_ctl.c)
*/
-extern int sysctl_ip_vs_cache_bypass;
-extern int sysctl_ip_vs_expire_nodest_conn;
-extern int sysctl_ip_vs_expire_quiescent_template;
-extern int sysctl_ip_vs_sync_threshold[2];
-extern int sysctl_ip_vs_nat_icmp_send;
-extern int sysctl_ip_vs_conntrack;
-extern int sysctl_ip_vs_snat_reroute;
extern struct ip_vs_stats ip_vs_stats;
extern const struct ctl_path net_vs_ctl_path[];
+extern int sysctl_ip_vs_sync_ver;
+extern void ip_vs_sync_switch_mode(struct net *net, int mode);
extern struct ip_vs_service *
-ip_vs_service_get(int af, __u32 fwmark, __u16 protocol,
+ip_vs_service_get(struct net *net, int af, __u32 fwmark, __u16 protocol,
const union nf_inet_addr *vaddr, __be16 vport);
static inline void ip_vs_service_put(struct ip_vs_service *svc)
@@ -880,7 +1040,7 @@ static inline void ip_vs_service_put(struct ip_vs_service *svc)
}
extern struct ip_vs_dest *
-ip_vs_lookup_real_service(int af, __u16 protocol,
+ip_vs_lookup_real_service(struct net *net, int af, __u16 protocol,
const union nf_inet_addr *daddr, __be16 dport);
extern int ip_vs_use_count_inc(void);
@@ -888,8 +1048,9 @@ extern void ip_vs_use_count_dec(void);
extern int ip_vs_control_init(void);
extern void ip_vs_control_cleanup(void);
extern struct ip_vs_dest *
-ip_vs_find_dest(int af, const union nf_inet_addr *daddr, __be16 dport,
- const union nf_inet_addr *vaddr, __be16 vport, __u16 protocol);
+ip_vs_find_dest(struct net *net, int af, const union nf_inet_addr *daddr,
+ __be16 dport, const union nf_inet_addr *vaddr, __be16 vport,
+ __u16 protocol, __u32 fwmark);
extern struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp);
@@ -897,14 +1058,12 @@ extern struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp);
* IPVS sync daemon data and function prototypes
* (from ip_vs_sync.c)
*/
-extern volatile int ip_vs_sync_state;
-extern volatile int ip_vs_master_syncid;
-extern volatile int ip_vs_backup_syncid;
-extern char ip_vs_master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
-extern char ip_vs_backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
-extern int start_sync_thread(int state, char *mcast_ifn, __u8 syncid);
-extern int stop_sync_thread(int state);
-extern void ip_vs_sync_conn(struct ip_vs_conn *cp);
+extern int start_sync_thread(struct net *net, int state, char *mcast_ifn,
+ __u8 syncid);
+extern int stop_sync_thread(struct net *net, int state);
+extern void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp);
+extern int ip_vs_sync_init(void);
+extern void ip_vs_sync_cleanup(void);
/*
@@ -912,8 +1071,8 @@ extern void ip_vs_sync_conn(struct ip_vs_conn *cp);
*/
extern int ip_vs_estimator_init(void);
extern void ip_vs_estimator_cleanup(void);
-extern void ip_vs_new_estimator(struct ip_vs_stats *stats);
-extern void ip_vs_kill_estimator(struct ip_vs_stats *stats);
+extern void ip_vs_new_estimator(struct net *net, struct ip_vs_stats *stats);
+extern void ip_vs_kill_estimator(struct net *net, struct ip_vs_stats *stats);
extern void ip_vs_zero_estimator(struct ip_vs_stats *stats);
/*
@@ -952,14 +1111,14 @@ extern int ip_vs_icmp_xmit_v6
* we are loaded. Just set ip_vs_drop_rate to 'n' and
* we start to drop 1/rate of the packets
*/
-extern int ip_vs_drop_rate;
-extern int ip_vs_drop_counter;
-static __inline__ int ip_vs_todrop(void)
+static inline int ip_vs_todrop(struct netns_ipvs *ipvs)
{
- if (!ip_vs_drop_rate) return 0;
- if (--ip_vs_drop_counter > 0) return 0;
- ip_vs_drop_counter = ip_vs_drop_rate;
+ if (!ipvs->drop_rate)
+ return 0;
+ if (--ipvs->drop_counter > 0)
+ return 0;
+ ipvs->drop_counter = ipvs->drop_rate;
return 1;
}
@@ -1047,9 +1206,9 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
* Netfilter connection tracking
* (from ip_vs_nfct.c)
*/
-static inline int ip_vs_conntrack_enabled(void)
+static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs)
{
- return sysctl_ip_vs_conntrack;
+ return ipvs->sysctl_conntrack;
}
extern void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp,
@@ -1062,7 +1221,7 @@ extern void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp);
#else
-static inline int ip_vs_conntrack_enabled(void)
+static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs)
{
return 0;
}
@@ -1084,6 +1243,20 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
/* CONFIG_IP_VS_NFCT */
#endif
+static inline unsigned int
+ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
+{
+ /*
+ * We think the overhead of processing active connections is 256
+ * times higher than that of inactive connections in average. (This
+ * 256 times might not be accurate, we will change it later) We
+ * use the following formula to estimate the overhead now:
+ * dest->activeconns*256 + dest->inactconns
+ */
+ return (atomic_read(&dest->activeconns) << 8) +
+ atomic_read(&dest->inactconns);
+}
+
#endif /* __KERNEL__ */
#endif /* _NET_IP_VS_H */
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 4a3cd2cd2f5e..4635a5c80967 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -89,6 +89,18 @@
#define IPV6_ADDR_SCOPE_GLOBAL 0x0e
/*
+ * Addr flags
+ */
+#ifdef __KERNEL__
+#define IPV6_ADDR_MC_FLAG_TRANSIENT(a) \
+ ((a)->s6_addr[1] & 0x10)
+#define IPV6_ADDR_MC_FLAG_PREFIX(a) \
+ ((a)->s6_addr[1] & 0x20)
+#define IPV6_ADDR_MC_FLAG_RENDEZVOUS(a) \
+ ((a)->s6_addr[1] & 0x40)
+#endif
+
+/*
* fragmentation header
*/
@@ -512,12 +524,16 @@ extern void ip6_flush_pending_frames(struct sock *sk);
extern int ip6_dst_lookup(struct sock *sk,
struct dst_entry **dst,
struct flowi *fl);
-extern int ip6_dst_blackhole(struct sock *sk,
- struct dst_entry **dst,
- struct flowi *fl);
-extern int ip6_sk_dst_lookup(struct sock *sk,
- struct dst_entry **dst,
- struct flowi *fl);
+extern struct dst_entry * ip6_dst_lookup_flow(struct sock *sk,
+ struct flowi *fl,
+ const struct in6_addr *final_dst,
+ bool can_sleep);
+extern struct dst_entry * ip6_sk_dst_lookup_flow(struct sock *sk,
+ struct flowi *fl,
+ const struct in6_addr *final_dst,
+ bool can_sleep);
+extern struct dst_entry * ip6_blackhole_route(struct net *net,
+ struct dst_entry *orig_dst);
/*
* skb processing functions
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 62c0ce2d1dc8..2b072fa99399 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -341,6 +341,9 @@ struct ieee80211_bss_conf {
* the off-channel channel when a remain-on-channel offload is done
* in hardware -- normal packets still flow and are expected to be
* handled properly by the device.
+ * @IEEE80211_TX_INTFL_TKIP_MIC_FAILURE: Marks this packet to be used for TKIP
+ * testing. It will be sent out with incorrect Michael MIC key to allow
+ * TKIP countermeasures to be tested.
*
* Note: If you have to add new flags to the enumeration, then don't
* forget to update %IEEE80211_TX_TEMPORARY_FLAGS when necessary.
@@ -370,6 +373,7 @@ enum mac80211_tx_control_flags {
IEEE80211_TX_CTL_LDPC = BIT(22),
IEEE80211_TX_CTL_STBC = BIT(23) | BIT(24),
IEEE80211_TX_CTL_TX_OFFCHAN = BIT(25),
+ IEEE80211_TX_INTFL_TKIP_MIC_FAILURE = BIT(26),
};
#define IEEE80211_TX_CTL_STBC_SHIFT 23
@@ -595,9 +599,10 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* the frame.
* @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on
* the frame.
- * @RX_FLAG_TSFT: The timestamp passed in the RX status (@mactime field)
- * is valid. This is useful in monitor mode and necessary for beacon frames
- * to enable IBSS merging.
+ * @RX_FLAG_MACTIME_MPDU: The timestamp passed in the RX status (@mactime
+ * field) is valid and contains the time the first symbol of the MPDU
+ * was received. This is useful in monitor mode and for proper IBSS
+ * merging.
* @RX_FLAG_SHORTPRE: Short preamble was used for this frame
* @RX_FLAG_HT: HT MCS was used and rate_idx is MCS index
* @RX_FLAG_40MHZ: HT40 (40 MHz) was used
@@ -610,7 +615,7 @@ enum mac80211_rx_flags {
RX_FLAG_IV_STRIPPED = 1<<4,
RX_FLAG_FAILED_FCS_CRC = 1<<5,
RX_FLAG_FAILED_PLCP_CRC = 1<<6,
- RX_FLAG_TSFT = 1<<7,
+ RX_FLAG_MACTIME_MPDU = 1<<7,
RX_FLAG_SHORTPRE = 1<<8,
RX_FLAG_HT = 1<<9,
RX_FLAG_40MHZ = 1<<10,
@@ -1069,6 +1074,13 @@ enum ieee80211_tkip_key_type {
* to decrypt group addressed frames, then IBSS RSN support is still
* possible but software crypto will be used. Advertise the wiphy flag
* only in that case.
+ *
+ * @IEEE80211_HW_AP_LINK_PS: When operating in AP mode the device
+ * autonomously manages the PS status of connected stations. When
+ * this flag is set mac80211 will not trigger PS mode for connected
+ * stations based on the PM bit of incoming frames.
+ * Use ieee80211_start_ps()/ieee8021_end_ps() to manually configure
+ * the PS mode of connected stations.
*/
enum ieee80211_hw_flags {
IEEE80211_HW_HAS_RATE_CONTROL = 1<<0,
@@ -1093,6 +1105,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_CONNECTION_MONITOR = 1<<19,
IEEE80211_HW_SUPPORTS_CQM_RSSI = 1<<20,
IEEE80211_HW_SUPPORTS_PER_STA_GTK = 1<<21,
+ IEEE80211_HW_AP_LINK_PS = 1<<22,
};
/**
@@ -1147,6 +1160,17 @@ enum ieee80211_hw_flags {
* @napi_weight: weight used for NAPI polling. You must specify an
* appropriate value here if a napi_poll operation is provided
* by your driver.
+
+ * @max_rx_aggregation_subframes: maximum buffer size (number of
+ * sub-frames) to be used for A-MPDU block ack receiver
+ * aggregation.
+ * This is only relevant if the device has restrictions on the
+ * number of subframes, if it relies on mac80211 to do reordering
+ * it shouldn't be set.
+ *
+ * @max_tx_aggregation_subframes: maximum number of subframes in an
+ * aggregate an HT driver will transmit, used by the peer as a
+ * hint to size its reorder buffer.
*/
struct ieee80211_hw {
struct ieee80211_conf conf;
@@ -1165,6 +1189,8 @@ struct ieee80211_hw {
u8 max_rates;
u8 max_report_rates;
u8 max_rate_tries;
+ u8 max_rx_aggregation_subframes;
+ u8 max_tx_aggregation_subframes;
};
/**
@@ -1688,7 +1714,9 @@ enum ieee80211_ampdu_mlme_action {
* station, AP, IBSS/WDS/mesh peer etc. This callback can sleep.
*
* @sta_notify: Notifies low level driver about power state transition of an
- * associated station, AP, IBSS/WDS/mesh peer etc. Must be atomic.
+ * associated station, AP, IBSS/WDS/mesh peer etc. For a VIF operating
+ * in AP mode, this callback will not be called when the flag
+ * %IEEE80211_HW_AP_LINK_PS is set. Must be atomic.
*
* @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max),
* bursting) for a hardware TX queue.
@@ -1723,6 +1751,10 @@ enum ieee80211_ampdu_mlme_action {
* ieee80211_ampdu_mlme_action. Starting sequence number (@ssn)
* is the first frame we expect to perform the action on. Notice
* that TX/RX_STOP can pass NULL for this parameter.
+ * The @buf_size parameter is only valid when the action is set to
+ * %IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's reorder
+ * buffer size (number of subframes) for this session -- aggregates
+ * containing more subframes than this may not be transmitted to the peer.
* Returns a negative error code on failure.
* The callback can sleep.
*
@@ -1767,9 +1799,14 @@ enum ieee80211_ampdu_mlme_action {
* ieee80211_remain_on_channel_expired(). This callback may sleep.
* @cancel_remain_on_channel: Requests that an ongoing off-channel period is
* aborted before it expires. This callback may sleep.
+ * @offchannel_tx: Transmit frame on another channel, wait for a response
+ * and return. Reliable TX status must be reported for the frame. If the
+ * return value is 1, then the @remain_on_channel will be used with a
+ * regular transmission (if supported.)
+ * @offchannel_tx_cancel_wait: cancel wait associated with offchannel TX
*/
struct ieee80211_ops {
- int (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
+ void (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
int (*start)(struct ieee80211_hw *hw);
void (*stop)(struct ieee80211_hw *hw);
int (*add_interface)(struct ieee80211_hw *hw,
@@ -1825,7 +1862,8 @@ struct ieee80211_ops {
int (*ampdu_action)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size);
int (*get_survey)(struct ieee80211_hw *hw, int idx,
struct survey_info *survey);
void (*rfkill_poll)(struct ieee80211_hw *hw);
@@ -1845,6 +1883,11 @@ struct ieee80211_ops {
enum nl80211_channel_type channel_type,
int duration);
int (*cancel_remain_on_channel)(struct ieee80211_hw *hw);
+ int (*offchannel_tx)(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ unsigned int wait);
+ int (*offchannel_tx_cancel_wait)(struct ieee80211_hw *hw);
};
/**
@@ -2113,6 +2156,48 @@ static inline void ieee80211_rx_ni(struct ieee80211_hw *hw,
local_bh_enable();
}
+/**
+ * ieee80211_sta_ps_transition - PS transition for connected sta
+ *
+ * When operating in AP mode with the %IEEE80211_HW_AP_LINK_PS
+ * flag set, use this function to inform mac80211 about a connected station
+ * entering/leaving PS mode.
+ *
+ * This function may not be called in IRQ context or with softirqs enabled.
+ *
+ * Calls to this function for a single hardware must be synchronized against
+ * each other.
+ *
+ * The function returns -EINVAL when the requested PS mode is already set.
+ *
+ * @sta: currently connected sta
+ * @start: start or stop PS
+ */
+int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start);
+
+/**
+ * ieee80211_sta_ps_transition_ni - PS transition for connected sta
+ * (in process context)
+ *
+ * Like ieee80211_sta_ps_transition() but can be called in process context
+ * (internally disables bottom halves). Concurrent call restriction still
+ * applies.
+ *
+ * @sta: currently connected sta
+ * @start: start or stop PS
+ */
+static inline int ieee80211_sta_ps_transition_ni(struct ieee80211_sta *sta,
+ bool start)
+{
+ int ret;
+
+ local_bh_disable();
+ ret = ieee80211_sta_ps_transition(sta, start);
+ local_bh_enable();
+
+ return ret;
+}
+
/*
* The TX headroom reserved by mac80211 for its own tx_status functions.
* This is enough for the radiotap header.
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 1bf812b21fb7..b3b4a34cb2cc 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -20,6 +20,7 @@
#include <net/netns/conntrack.h>
#endif
#include <net/netns/xfrm.h>
+#include <net/netns/ip_vs.h>
struct proc_dir_entry;
struct net_device;
@@ -94,6 +95,7 @@ struct net {
#ifdef CONFIG_XFRM
struct netns_xfrm xfrm;
#endif
+ struct netns_ipvs *ipvs;
};
diff --git a/include/net/netevent.h b/include/net/netevent.h
index e82b7bab3ff3..22b239c17eaa 100644
--- a/include/net/netevent.h
+++ b/include/net/netevent.h
@@ -21,7 +21,6 @@ struct netevent_redirect {
enum netevent_notif_type {
NETEVENT_NEIGH_UPDATE = 1, /* arg is struct neighbour ptr */
- NETEVENT_PMTU_UPDATE, /* arg is struct dst_entry ptr */
NETEVENT_REDIRECT, /* arg is struct netevent_redirect ptr */
};
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index d85cff10e169..d0d13378991e 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -50,11 +50,24 @@ union nf_conntrack_expect_proto {
/* per conntrack: application helper private data */
union nf_conntrack_help {
/* insert conntrack helper private data (master) here */
+#if defined(CONFIG_NF_CONNTRACK_FTP) || defined(CONFIG_NF_CONNTRACK_FTP_MODULE)
struct nf_ct_ftp_master ct_ftp_info;
+#endif
+#if defined(CONFIG_NF_CONNTRACK_PPTP) || \
+ defined(CONFIG_NF_CONNTRACK_PPTP_MODULE)
struct nf_ct_pptp_master ct_pptp_info;
+#endif
+#if defined(CONFIG_NF_CONNTRACK_H323) || \
+ defined(CONFIG_NF_CONNTRACK_H323_MODULE)
struct nf_ct_h323_master ct_h323_info;
+#endif
+#if defined(CONFIG_NF_CONNTRACK_SANE) || \
+ defined(CONFIG_NF_CONNTRACK_SANE_MODULE)
struct nf_ct_sane_master ct_sane_info;
+#endif
+#if defined(CONFIG_NF_CONNTRACK_SIP) || defined(CONFIG_NF_CONNTRACK_SIP_MODULE)
struct nf_ct_sip_master ct_sip_info;
+#endif
};
#include <linux/types.h>
@@ -116,14 +129,14 @@ struct nf_conn {
u_int32_t secmark;
#endif
- /* Storage reserved for other modules: */
- union nf_conntrack_proto proto;
-
/* Extensions */
struct nf_ct_ext *ext;
#ifdef CONFIG_NET_NS
struct net *ct_net;
#endif
+
+ /* Storage reserved for other modules, must be the last member */
+ union nf_conntrack_proto proto;
};
static inline struct nf_conn *
@@ -189,9 +202,9 @@ extern void nf_ct_l3proto_module_put(unsigned short l3proto);
* Allocate a hashtable of hlist_head (if nulls == 0),
* or hlist_nulls_head (if nulls == 1)
*/
-extern void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls);
+extern void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls);
-extern void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size);
+extern void nf_ct_free_hashtable(void *hash, unsigned int size);
extern struct nf_conntrack_tuple_hash *
__nf_conntrack_find(struct net *net, u16 zone,
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index 349cefedc9f3..4283508b3e18 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -23,12 +23,17 @@ struct nf_conntrack_ecache {
static inline struct nf_conntrack_ecache *
nf_ct_ecache_find(const struct nf_conn *ct)
{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
return nf_ct_ext_find(ct, NF_CT_EXT_ECACHE);
+#else
+ return NULL;
+#endif
}
static inline struct nf_conntrack_ecache *
nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp)
{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
struct net *net = nf_ct_net(ct);
struct nf_conntrack_ecache *e;
@@ -45,6 +50,9 @@ nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp)
e->expmask = expmask;
}
return e;
+#else
+ return NULL;
+#endif
};
#ifdef CONFIG_NF_CONNTRACK_EVENTS
@@ -59,7 +67,7 @@ struct nf_ct_event_notifier {
int (*fcn)(unsigned int events, struct nf_ct_event *item);
};
-extern struct nf_ct_event_notifier *nf_conntrack_event_cb;
+extern struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
extern int nf_conntrack_register_notifier(struct nf_ct_event_notifier *nb);
extern void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *nb);
@@ -156,7 +164,7 @@ struct nf_exp_event_notifier {
int (*fcn)(unsigned int events, struct nf_exp_event *item);
};
-extern struct nf_exp_event_notifier *nf_expect_event_cb;
+extern struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
extern int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *nb);
extern void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *nb);
diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h
index 0772d296dfdb..2dcf31703acb 100644
--- a/include/net/netfilter/nf_conntrack_extend.h
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -7,10 +7,19 @@
enum nf_ct_ext_id {
NF_CT_EXT_HELPER,
+#if defined(CONFIG_NF_NAT) || defined(CONFIG_NF_NAT_MODULE)
NF_CT_EXT_NAT,
+#endif
NF_CT_EXT_ACCT,
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
NF_CT_EXT_ECACHE,
+#endif
+#ifdef CONFIG_NF_CONNTRACK_ZONES
NF_CT_EXT_ZONE,
+#endif
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+ NF_CT_EXT_TSTAMP,
+#endif
NF_CT_EXT_NUM,
};
@@ -19,6 +28,7 @@ enum nf_ct_ext_id {
#define NF_CT_EXT_ACCT_TYPE struct nf_conn_counter
#define NF_CT_EXT_ECACHE_TYPE struct nf_conntrack_ecache
#define NF_CT_EXT_ZONE_TYPE struct nf_conntrack_zone
+#define NF_CT_EXT_TSTAMP_TYPE struct nf_conn_tstamp
/* Extensions: optional stuff which isn't permanently in struct. */
struct nf_ct_ext {
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
index 32c305dbdab6..f1c1311adc2c 100644
--- a/include/net/netfilter/nf_conntrack_helper.h
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -63,4 +63,10 @@ static inline struct nf_conn_help *nfct_help(const struct nf_conn *ct)
extern int nf_conntrack_helper_init(void);
extern void nf_conntrack_helper_fini(void);
+extern int nf_conntrack_broadcast_help(struct sk_buff *skb,
+ unsigned int protoff,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int timeout);
+
#endif /*_NF_CONNTRACK_HELPER_H*/
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
index a7547611e8f1..e8010f445ae1 100644
--- a/include/net/netfilter/nf_conntrack_l3proto.h
+++ b/include/net/netfilter/nf_conntrack_l3proto.h
@@ -73,7 +73,7 @@ struct nf_conntrack_l3proto {
struct module *me;
};
-extern struct nf_conntrack_l3proto *nf_ct_l3protos[AF_MAX];
+extern struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[AF_MAX];
/* Protocol registration. */
extern int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto);
diff --git a/include/net/netfilter/nf_conntrack_timestamp.h b/include/net/netfilter/nf_conntrack_timestamp.h
new file mode 100644
index 000000000000..fc9c82b1f06b
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_timestamp.h
@@ -0,0 +1,65 @@
+#ifndef _NF_CONNTRACK_TSTAMP_H
+#define _NF_CONNTRACK_TSTAMP_H
+
+#include <net/net_namespace.h>
+#include <linux/netfilter/nf_conntrack_common.h>
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+
+struct nf_conn_tstamp {
+ u_int64_t start;
+ u_int64_t stop;
+};
+
+static inline
+struct nf_conn_tstamp *nf_conn_tstamp_find(const struct nf_conn *ct)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+ return nf_ct_ext_find(ct, NF_CT_EXT_TSTAMP);
+#else
+ return NULL;
+#endif
+}
+
+static inline
+struct nf_conn_tstamp *nf_ct_tstamp_ext_add(struct nf_conn *ct, gfp_t gfp)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+ struct net *net = nf_ct_net(ct);
+
+ if (!net->ct.sysctl_tstamp)
+ return NULL;
+
+ return nf_ct_ext_add(ct, NF_CT_EXT_TSTAMP, gfp);
+#else
+ return NULL;
+#endif
+};
+
+static inline bool nf_ct_tstamp_enabled(struct net *net)
+{
+ return net->ct.sysctl_tstamp != 0;
+}
+
+static inline void nf_ct_set_tstamp(struct net *net, bool enable)
+{
+ net->ct.sysctl_tstamp = enable;
+}
+
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+extern int nf_conntrack_tstamp_init(struct net *net);
+extern void nf_conntrack_tstamp_fini(struct net *net);
+#else
+static inline int nf_conntrack_tstamp_init(struct net *net)
+{
+ return 0;
+}
+
+static inline void nf_conntrack_tstamp_fini(struct net *net)
+{
+ return;
+}
+#endif /* CONFIG_NF_CONNTRACK_TIMESTAMP */
+
+#endif /* _NF_CONNTRACK_TSTAMP_H */
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
index f5f09f032a90..aff80b190c12 100644
--- a/include/net/netfilter/nf_nat.h
+++ b/include/net/netfilter/nf_nat.h
@@ -56,7 +56,9 @@ struct nf_nat_multi_range_compat {
/* per conntrack: nat application helper private data */
union nf_conntrack_nat_help {
/* insert nat helper private data here */
+#if defined(CONFIG_NF_NAT_PPTP) || defined(CONFIG_NF_NAT_PPTP_MODULE)
struct nf_nat_pptp nat_pptp_info;
+#endif
};
struct nf_conn;
@@ -84,7 +86,11 @@ extern int nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
static inline struct nf_conn_nat *nfct_nat(const struct nf_conn *ct)
{
+#if defined(CONFIG_NF_NAT) || defined(CONFIG_NF_NAT_MODULE)
return nf_ct_ext_find(ct, NF_CT_EXT_NAT);
+#else
+ return NULL;
+#endif
}
#else /* !__KERNEL__: iptables wants this to compile. */
diff --git a/include/net/netfilter/nf_nat_core.h b/include/net/netfilter/nf_nat_core.h
index 33602ab66190..3dc7b98effeb 100644
--- a/include/net/netfilter/nf_nat_core.h
+++ b/include/net/netfilter/nf_nat_core.h
@@ -21,9 +21,9 @@ static inline int nf_nat_initialized(struct nf_conn *ct,
enum nf_nat_manip_type manip)
{
if (manip == IP_NAT_MANIP_SRC)
- return test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
+ return ct->status & IPS_SRC_NAT_DONE;
else
- return test_bit(IPS_DST_NAT_DONE_BIT, &ct->status);
+ return ct->status & IPS_DST_NAT_DONE;
}
struct nlattr;
diff --git a/include/net/netfilter/nf_tproxy_core.h b/include/net/netfilter/nf_tproxy_core.h
index cd85b3bc8327..e505358d8999 100644
--- a/include/net/netfilter/nf_tproxy_core.h
+++ b/include/net/netfilter/nf_tproxy_core.h
@@ -201,18 +201,8 @@ nf_tproxy_get_sock_v6(struct net *net, const u8 protocol,
}
#endif
-static inline void
-nf_tproxy_put_sock(struct sock *sk)
-{
- /* TIME_WAIT inet sockets have to be handled differently */
- if ((sk->sk_protocol == IPPROTO_TCP) && (sk->sk_state == TCP_TIME_WAIT))
- inet_twsk_put(inet_twsk(sk));
- else
- sock_put(sk);
-}
-
/* assign a socket to the skb -- consumes sk */
-int
+void
nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk);
#endif
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 373f1a900cf4..8a3906a08f5f 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -856,18 +856,27 @@ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
#define NLA_PUT_BE16(skb, attrtype, value) \
NLA_PUT_TYPE(skb, __be16, attrtype, value)
+#define NLA_PUT_NET16(skb, attrtype, value) \
+ NLA_PUT_BE16(skb, attrtype | NLA_F_NET_BYTEORDER, value)
+
#define NLA_PUT_U32(skb, attrtype, value) \
NLA_PUT_TYPE(skb, u32, attrtype, value)
#define NLA_PUT_BE32(skb, attrtype, value) \
NLA_PUT_TYPE(skb, __be32, attrtype, value)
+#define NLA_PUT_NET32(skb, attrtype, value) \
+ NLA_PUT_BE32(skb, attrtype | NLA_F_NET_BYTEORDER, value)
+
#define NLA_PUT_U64(skb, attrtype, value) \
NLA_PUT_TYPE(skb, u64, attrtype, value)
#define NLA_PUT_BE64(skb, attrtype, value) \
NLA_PUT_TYPE(skb, __be64, attrtype, value)
+#define NLA_PUT_NET64(skb, attrtype, value) \
+ NLA_PUT_BE64(skb, attrtype | NLA_F_NET_BYTEORDER, value)
+
#define NLA_PUT_STRING(skb, attrtype, value) \
NLA_PUT(skb, attrtype, strlen(value) + 1, value)
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index d4958d4c6574..341eb089349e 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -21,15 +21,15 @@ struct netns_ct {
int sysctl_events;
unsigned int sysctl_events_retry_timeout;
int sysctl_acct;
+ int sysctl_tstamp;
int sysctl_checksum;
unsigned int sysctl_log_invalid; /* Log invalid packets */
#ifdef CONFIG_SYSCTL
struct ctl_table_header *sysctl_header;
struct ctl_table_header *acct_sysctl_header;
+ struct ctl_table_header *tstamp_sysctl_header;
struct ctl_table_header *event_sysctl_header;
#endif
- int hash_vmalloc;
- int expect_vmalloc;
char *slabname;
};
#endif
diff --git a/include/net/netns/ip_vs.h b/include/net/netns/ip_vs.h
new file mode 100644
index 000000000000..259ebac904bf
--- /dev/null
+++ b/include/net/netns/ip_vs.h
@@ -0,0 +1,143 @@
+/*
+ * IP Virtual Server
+ * Data structure for network namspace
+ *
+ */
+
+#ifndef IP_VS_H_
+#define IP_VS_H_
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/list_nulls.h>
+#include <linux/ip_vs.h>
+#include <asm/atomic.h>
+#include <linux/in.h>
+
+struct ip_vs_stats;
+struct ip_vs_sync_buff;
+struct ctl_table_header;
+
+struct netns_ipvs {
+ int gen; /* Generation */
+ /*
+ * Hash table: for real service lookups
+ */
+ #define IP_VS_RTAB_BITS 4
+ #define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS)
+ #define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1)
+
+ struct list_head rs_table[IP_VS_RTAB_SIZE];
+ /* ip_vs_app */
+ struct list_head app_list;
+ struct mutex app_mutex;
+ struct lock_class_key app_key; /* mutex debuging */
+
+ /* ip_vs_proto */
+ #define IP_VS_PROTO_TAB_SIZE 32 /* must be power of 2 */
+ struct ip_vs_proto_data *proto_data_table[IP_VS_PROTO_TAB_SIZE];
+ /* ip_vs_proto_tcp */
+#ifdef CONFIG_IP_VS_PROTO_TCP
+ #define TCP_APP_TAB_BITS 4
+ #define TCP_APP_TAB_SIZE (1 << TCP_APP_TAB_BITS)
+ #define TCP_APP_TAB_MASK (TCP_APP_TAB_SIZE - 1)
+ struct list_head tcp_apps[TCP_APP_TAB_SIZE];
+ spinlock_t tcp_app_lock;
+#endif
+ /* ip_vs_proto_udp */
+#ifdef CONFIG_IP_VS_PROTO_UDP
+ #define UDP_APP_TAB_BITS 4
+ #define UDP_APP_TAB_SIZE (1 << UDP_APP_TAB_BITS)
+ #define UDP_APP_TAB_MASK (UDP_APP_TAB_SIZE - 1)
+ struct list_head udp_apps[UDP_APP_TAB_SIZE];
+ spinlock_t udp_app_lock;
+#endif
+ /* ip_vs_proto_sctp */
+#ifdef CONFIG_IP_VS_PROTO_SCTP
+ #define SCTP_APP_TAB_BITS 4
+ #define SCTP_APP_TAB_SIZE (1 << SCTP_APP_TAB_BITS)
+ #define SCTP_APP_TAB_MASK (SCTP_APP_TAB_SIZE - 1)
+ /* Hash table for SCTP application incarnations */
+ struct list_head sctp_apps[SCTP_APP_TAB_SIZE];
+ spinlock_t sctp_app_lock;
+#endif
+ /* ip_vs_conn */
+ atomic_t conn_count; /* connection counter */
+
+ /* ip_vs_ctl */
+ struct ip_vs_stats *tot_stats; /* Statistics & est. */
+ struct ip_vs_cpu_stats __percpu *cpustats; /* Stats per cpu */
+ seqcount_t *ustats_seq; /* u64 read retry */
+
+ int num_services; /* no of virtual services */
+ /* 1/rate drop and drop-entry variables */
+ struct delayed_work defense_work; /* Work handler */
+ int drop_rate;
+ int drop_counter;
+ atomic_t dropentry;
+ /* locks in ctl.c */
+ spinlock_t dropentry_lock; /* drop entry handling */
+ spinlock_t droppacket_lock; /* drop packet handling */
+ spinlock_t securetcp_lock; /* state and timeout tables */
+ rwlock_t rs_lock; /* real services table */
+ /* semaphore for IPVS sockopts. And, [gs]etsockopt may sleep. */
+ struct lock_class_key ctl_key; /* ctl_mutex debuging */
+ /* Trash for destinations */
+ struct list_head dest_trash;
+ /* Service counters */
+ atomic_t ftpsvc_counter;
+ atomic_t nullsvc_counter;
+
+ /* sys-ctl struct */
+ struct ctl_table_header *sysctl_hdr;
+ struct ctl_table *sysctl_tbl;
+ /* sysctl variables */
+ int sysctl_amemthresh;
+ int sysctl_am_droprate;
+ int sysctl_drop_entry;
+ int sysctl_drop_packet;
+ int sysctl_secure_tcp;
+#ifdef CONFIG_IP_VS_NFCT
+ int sysctl_conntrack;
+#endif
+ int sysctl_snat_reroute;
+ int sysctl_sync_ver;
+ int sysctl_cache_bypass;
+ int sysctl_expire_nodest_conn;
+ int sysctl_expire_quiescent_template;
+ int sysctl_sync_threshold[2];
+ int sysctl_nat_icmp_send;
+
+ /* ip_vs_lblc */
+ int sysctl_lblc_expiration;
+ struct ctl_table_header *lblc_ctl_header;
+ struct ctl_table *lblc_ctl_table;
+ /* ip_vs_lblcr */
+ int sysctl_lblcr_expiration;
+ struct ctl_table_header *lblcr_ctl_header;
+ struct ctl_table *lblcr_ctl_table;
+ /* ip_vs_est */
+ struct list_head est_list; /* estimator list */
+ spinlock_t est_lock;
+ struct timer_list est_timer; /* Estimation timer */
+ /* ip_vs_sync */
+ struct list_head sync_queue;
+ spinlock_t sync_lock;
+ struct ip_vs_sync_buff *sync_buff;
+ spinlock_t sync_buff_lock;
+ struct sockaddr_in sync_mcast_addr;
+ struct task_struct *master_thread;
+ struct task_struct *backup_thread;
+ int send_mesg_maxlen;
+ int recv_mesg_maxlen;
+ volatile int sync_state;
+ volatile int master_syncid;
+ volatile int backup_syncid;
+ /* multicast interface name */
+ char master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
+ char backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
+ /* net name space ptr */
+ struct net *net; /* Needed by timer routines */
+};
+
+#endif /* IP_VS_H_ */
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index d68c3f121774..e2e2ef57eca2 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -43,7 +43,6 @@ struct netns_ipv4 {
struct xt_table *nat_table;
struct hlist_head *nat_bysource;
unsigned int nat_htable_size;
- int nat_vmalloced;
#endif
int sysctl_icmp_echo_ignore_all;
diff --git a/include/net/phonet/pep.h b/include/net/phonet/pep.h
index b60b28c99e87..38eed1be6ffd 100644
--- a/include/net/phonet/pep.h
+++ b/include/net/phonet/pep.h
@@ -45,10 +45,6 @@ struct pep_sock {
u8 tx_fc; /* TX flow control */
u8 init_enable; /* auto-enable at creation */
u8 aligned;
-#ifdef CONFIG_PHONET_PIPECTRLR
- u8 pipe_state;
- struct sockaddr_pn remote_pep;
-#endif
};
static inline struct pep_sock *pep_sk(struct sock *sk)
@@ -158,6 +154,7 @@ enum {
PN_LEGACY_FLOW_CONTROL,
PN_ONE_CREDIT_FLOW_CONTROL,
PN_MULTI_CREDIT_FLOW_CONTROL,
+ PN_MAX_FLOW_CONTROL,
};
#define pn_flow_safe(fc) ((fc) >> 1)
@@ -169,21 +166,4 @@ enum {
PEP_IND_READY,
};
-#ifdef CONFIG_PHONET_PIPECTRLR
-#define PNS_PEP_CONNECT_UTID 0x02
-#define PNS_PIPE_CREATED_IND_UTID 0x04
-#define PNS_PIPE_ENABLE_UTID 0x0A
-#define PNS_PIPE_ENABLED_IND_UTID 0x0C
-#define PNS_PIPE_DISABLE_UTID 0x0F
-#define PNS_PIPE_DISABLED_IND_UTID 0x11
-#define PNS_PEP_DISCONNECT_UTID 0x06
-
-/* Used for tracking state of a pipe */
-enum {
- PIPE_IDLE,
- PIPE_DISABLED,
- PIPE_ENABLED,
-};
-#endif /* CONFIG_PHONET_PIPECTRLR */
-
#endif
diff --git a/include/net/phonet/phonet.h b/include/net/phonet/phonet.h
index 5395e09187df..68e509750caa 100644
--- a/include/net/phonet/phonet.h
+++ b/include/net/phonet/phonet.h
@@ -36,6 +36,7 @@
struct pn_sock {
struct sock sk;
u16 sobject;
+ u16 dobject;
u8 resource;
};
diff --git a/include/net/protocol.h b/include/net/protocol.h
index dc07495bce4c..6f7eb800974a 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -38,7 +38,7 @@ struct net_protocol {
void (*err_handler)(struct sk_buff *skb, u32 info);
int (*gso_send_check)(struct sk_buff *skb);
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
- int features);
+ u32 features);
struct sk_buff **(*gro_receive)(struct sk_buff **head,
struct sk_buff *skb);
int (*gro_complete)(struct sk_buff *skb);
@@ -57,7 +57,7 @@ struct inet6_protocol {
int (*gso_send_check)(struct sk_buff *skb);
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
- int features);
+ u32 features);
struct sk_buff **(*gro_receive)(struct sk_buff **head,
struct sk_buff *skb);
int (*gro_complete)(struct sk_buff *skb);
diff --git a/include/net/route.h b/include/net/route.h
index 93e10c453f6b..707cfc8eccdc 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -49,6 +49,7 @@
struct fib_nh;
struct inet_peer;
+struct fib_info;
struct rtable {
struct dst_entry dst;
@@ -68,7 +69,9 @@ struct rtable {
/* Miscellaneous cached information */
__be32 rt_spec_dst; /* RFC1122 specific destination */
+ u32 rt_peer_genid;
struct inet_peer *peer; /* long-living peer info */
+ struct fib_info *fi; /* for client ref to shared metrics */
};
static inline bool rt_is_input_route(struct rtable *rt)
@@ -117,7 +120,8 @@ extern void rt_cache_flush(struct net *net, int how);
extern void rt_cache_flush_batch(struct net *net);
extern int __ip_route_output_key(struct net *, struct rtable **, const struct flowi *flp);
extern int ip_route_output_key(struct net *, struct rtable **, struct flowi *flp);
-extern int ip_route_output_flow(struct net *, struct rtable **rp, struct flowi *flp, struct sock *sk, int flags);
+extern int ip_route_output_flow(struct net *, struct rtable **rp, struct flowi *flp, struct sock *sk);
+extern struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig);
extern int ip_route_input_common(struct sk_buff *skb, __be32 dst, __be32 src,
u8 tos, struct net_device *devin, bool noref);
@@ -165,7 +169,7 @@ static inline char rt_tos2priority(u8 tos)
static inline int ip_route_connect(struct rtable **rp, __be32 dst,
__be32 src, u32 tos, int oif, u8 protocol,
__be16 sport, __be16 dport, struct sock *sk,
- int flags)
+ bool can_sleep)
{
struct flowi fl = { .oif = oif,
.mark = sk->sk_mark,
@@ -180,6 +184,10 @@ static inline int ip_route_connect(struct rtable **rp, __be32 dst,
if (inet_sk(sk)->transparent)
fl.flags |= FLOWI_FLAG_ANYSRC;
+ if (protocol == IPPROTO_TCP)
+ fl.flags |= FLOWI_FLAG_PRECOW_METRICS;
+ if (can_sleep)
+ fl.flags |= FLOWI_FLAG_CAN_SLEEP;
if (!dst || !src) {
err = __ip_route_output_key(net, rp, &fl);
@@ -191,26 +199,31 @@ static inline int ip_route_connect(struct rtable **rp, __be32 dst,
*rp = NULL;
}
security_sk_classify_flow(sk, &fl);
- return ip_route_output_flow(net, rp, &fl, sk, flags);
+ return ip_route_output_flow(net, rp, &fl, sk);
}
static inline int ip_route_newports(struct rtable **rp, u8 protocol,
+ __be16 orig_sport, __be16 orig_dport,
__be16 sport, __be16 dport, struct sock *sk)
{
- if (sport != (*rp)->fl.fl_ip_sport ||
- dport != (*rp)->fl.fl_ip_dport) {
- struct flowi fl;
-
- memcpy(&fl, &(*rp)->fl, sizeof(fl));
- fl.fl_ip_sport = sport;
- fl.fl_ip_dport = dport;
- fl.proto = protocol;
+ if (sport != orig_sport || dport != orig_dport) {
+ struct flowi fl = { .oif = (*rp)->fl.oif,
+ .mark = (*rp)->fl.mark,
+ .fl4_dst = (*rp)->fl.fl4_dst,
+ .fl4_src = (*rp)->fl.fl4_src,
+ .fl4_tos = (*rp)->fl.fl4_tos,
+ .proto = (*rp)->fl.proto,
+ .fl_ip_sport = sport,
+ .fl_ip_dport = dport };
+
if (inet_sk(sk)->transparent)
fl.flags |= FLOWI_FLAG_ANYSRC;
+ if (protocol == IPPROTO_TCP)
+ fl.flags |= FLOWI_FLAG_PRECOW_METRICS;
ip_rt_put(*rp);
*rp = NULL;
security_sk_classify_flow(sk, &fl);
- return ip_route_output_flow(sock_net(sk), rp, &fl, sk, 0);
+ return ip_route_output_flow(sock_net(sk), rp, &fl, sk);
}
return 0;
}
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 160a407c1963..dbdf2b530897 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -31,10 +31,12 @@ enum qdisc_state_t {
* following bits are only changed while qdisc lock is held
*/
enum qdisc___state_t {
- __QDISC___STATE_RUNNING,
+ __QDISC___STATE_RUNNING = 1,
+ __QDISC___STATE_THROTTLED = 2,
};
struct qdisc_size_table {
+ struct rcu_head rcu;
struct list_head list;
struct tc_sizespec szopts;
int refcnt;
@@ -46,14 +48,13 @@ struct Qdisc {
struct sk_buff * (*dequeue)(struct Qdisc *dev);
unsigned flags;
#define TCQ_F_BUILTIN 1
-#define TCQ_F_THROTTLED 2
-#define TCQ_F_INGRESS 4
-#define TCQ_F_CAN_BYPASS 8
-#define TCQ_F_MQROOT 16
+#define TCQ_F_INGRESS 2
+#define TCQ_F_CAN_BYPASS 4
+#define TCQ_F_MQROOT 8
#define TCQ_F_WARN_NONWC (1 << 16)
int padded;
struct Qdisc_ops *ops;
- struct qdisc_size_table *stab;
+ struct qdisc_size_table __rcu *stab;
struct list_head list;
u32 handle;
u32 parent;
@@ -78,25 +79,43 @@ struct Qdisc {
unsigned long state;
struct sk_buff_head q;
struct gnet_stats_basic_packed bstats;
- unsigned long __state;
+ unsigned int __state;
struct gnet_stats_queue qstats;
struct rcu_head rcu_head;
spinlock_t busylock;
};
-static inline bool qdisc_is_running(struct Qdisc *qdisc)
+static inline bool qdisc_is_running(const struct Qdisc *qdisc)
{
- return test_bit(__QDISC___STATE_RUNNING, &qdisc->__state);
+ return (qdisc->__state & __QDISC___STATE_RUNNING) ? true : false;
}
static inline bool qdisc_run_begin(struct Qdisc *qdisc)
{
- return !__test_and_set_bit(__QDISC___STATE_RUNNING, &qdisc->__state);
+ if (qdisc_is_running(qdisc))
+ return false;
+ qdisc->__state |= __QDISC___STATE_RUNNING;
+ return true;
}
static inline void qdisc_run_end(struct Qdisc *qdisc)
{
- __clear_bit(__QDISC___STATE_RUNNING, &qdisc->__state);
+ qdisc->__state &= ~__QDISC___STATE_RUNNING;
+}
+
+static inline bool qdisc_is_throttled(const struct Qdisc *qdisc)
+{
+ return (qdisc->__state & __QDISC___STATE_THROTTLED) ? true : false;
+}
+
+static inline void qdisc_throttled(struct Qdisc *qdisc)
+{
+ qdisc->__state |= __QDISC___STATE_THROTTLED;
+}
+
+static inline void qdisc_unthrottled(struct Qdisc *qdisc)
+{
+ qdisc->__state &= ~__QDISC___STATE_THROTTLED;
}
struct Qdisc_class_ops {
@@ -199,7 +218,7 @@ struct tcf_proto {
struct qdisc_skb_cb {
unsigned int pkt_len;
- char data[];
+ long data[];
};
static inline int qdisc_qlen(struct Qdisc *q)
@@ -331,8 +350,8 @@ extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
struct Qdisc_ops *ops);
extern struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
struct Qdisc_ops *ops, u32 parentid);
-extern void qdisc_calculate_pkt_len(struct sk_buff *skb,
- struct qdisc_size_table *stab);
+extern void __qdisc_calculate_pkt_len(struct sk_buff *skb,
+ const struct qdisc_size_table *stab);
extern void tcf_destroy(struct tcf_proto *tp);
extern void tcf_destroy_chain(struct tcf_proto **fl);
@@ -411,12 +430,20 @@ enum net_xmit_qdisc_t {
#define net_xmit_drop_count(e) (1)
#endif
-static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
+ const struct Qdisc *sch)
{
#ifdef CONFIG_NET_SCHED
- if (sch->stab)
- qdisc_calculate_pkt_len(skb, sch->stab);
+ struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
+
+ if (stab)
+ __qdisc_calculate_pkt_len(skb, stab);
#endif
+}
+
+static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+ qdisc_calculate_pkt_len(skb, sch);
return sch->enqueue(skb, sch);
}
diff --git a/include/net/sock.h b/include/net/sock.h
index bc1cf7d88ccb..da0534d3401c 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -281,7 +281,7 @@ struct sock {
int sk_rcvbuf;
struct sk_filter __rcu *sk_filter;
- struct socket_wq *sk_wq;
+ struct socket_wq __rcu *sk_wq;
#ifdef CONFIG_NET_DMA
struct sk_buff_head sk_async_wait_queue;
@@ -1191,7 +1191,7 @@ extern void sk_filter_release_rcu(struct rcu_head *rcu);
static inline void sk_filter_release(struct sk_filter *fp)
{
if (atomic_dec_and_test(&fp->refcnt))
- call_rcu_bh(&fp->rcu, sk_filter_release_rcu);
+ call_rcu(&fp->rcu, sk_filter_release_rcu);
}
static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
@@ -1266,7 +1266,8 @@ static inline void sk_set_socket(struct sock *sk, struct socket *sock)
static inline wait_queue_head_t *sk_sleep(struct sock *sk)
{
- return &sk->sk_wq->wait;
+ BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0);
+ return &rcu_dereference_raw(sk->sk_wq)->wait;
}
/* Detach socket from process context.
* Announce socket dead, detach it from wait queue and inode.
@@ -1287,7 +1288,7 @@ static inline void sock_orphan(struct sock *sk)
static inline void sock_graft(struct sock *sk, struct socket *parent)
{
write_lock_bh(&sk->sk_callback_lock);
- rcu_assign_pointer(sk->sk_wq, parent->wq);
+ sk->sk_wq = parent->wq;
parent->sk = sk;
sk_set_socket(sk, parent);
security_sock_graft(sk, parent);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 38509f047382..cda30ea354a2 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -196,6 +196,9 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
/* TCP thin-stream limits */
#define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */
+/* TCP initial congestion window as per draft-hkchu-tcpm-initcwnd-01 */
+#define TCP_INIT_CWND 10
+
extern struct inet_timewait_death_row tcp_death_row;
/* sysctl variables for tcp */
@@ -799,15 +802,6 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
/* Use define here intentionally to get WARN_ON location shown at the caller */
#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
-/*
- * Convert RFC 3390 larger initial window into an equivalent number of packets.
- * This is based on the numbers specified in RFC 5681, 3.1.
- */
-static inline u32 rfc3390_bytes_to_packets(const u32 smss)
-{
- return smss <= 1095 ? 4 : (smss > 2190 ? 2 : 3);
-}
-
extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
@@ -1074,8 +1068,6 @@ static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt,
return 1;
}
-#define TCP_CHECK_TIMER(sk) do { } while (0)
-
static inline void tcp_mib_init(struct net *net)
{
/* See RFC 2012 */
@@ -1404,7 +1396,7 @@ extern struct request_sock_ops tcp6_request_sock_ops;
extern void tcp_v4_destroy_sock(struct sock *sk);
extern int tcp_v4_gso_send_check(struct sk_buff *skb);
-extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
+extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features);
extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
struct sk_buff *skb);
extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
diff --git a/include/net/udp.h b/include/net/udp.h
index bb967dd59bf7..67ea6fcb3ec0 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -144,6 +144,17 @@ static inline __wsum udp_csum_outgoing(struct sock *sk, struct sk_buff *skb)
return csum;
}
+static inline __wsum udp_csum(struct sk_buff *skb)
+{
+ __wsum csum = csum_partial(skb_transport_header(skb),
+ sizeof(struct udphdr), skb->csum);
+
+ for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) {
+ csum = csum_add(csum, skb->csum);
+ }
+ return csum;
+}
+
/* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
static inline void udp_lib_hash(struct sock *sk)
{
@@ -245,5 +256,5 @@ extern void udp4_proc_exit(void);
extern void udp_init(void);
extern int udp4_ufo_send_check(struct sk_buff *skb);
-extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, int features);
+extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features);
#endif /* _UDP_H */
diff --git a/include/net/udplite.h b/include/net/udplite.h
index afdffe607b24..673a024c6b2a 100644
--- a/include/net/udplite.h
+++ b/include/net/udplite.h
@@ -115,6 +115,18 @@ static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
return csum;
}
+static inline __wsum udplite_csum(struct sk_buff *skb)
+{
+ struct sock *sk = skb->sk;
+ int cscov = udplite_sender_cscov(udp_sk(sk), udp_hdr(skb));
+ const int off = skb_transport_offset(skb);
+ const int len = skb->len - off;
+
+ skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */
+
+ return skb_checksum(skb, off, min(cscov, len), 0);
+}
+
extern void udplite4_register(void);
extern int udplite_get_port(struct sock *sk, unsigned short snum,
int (*scmp)(const struct sock *, const struct sock *));
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index b9f385da758e..d5dcf3974636 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -36,6 +36,7 @@
#define XFRM_PROTO_ROUTING IPPROTO_ROUTING
#define XFRM_PROTO_DSTOPTS IPPROTO_DSTOPTS
+#define XFRM_ALIGN4(len) (((len) + 3) & ~3)
#define XFRM_ALIGN8(len) (((len) + 7) & ~7)
#define MODULE_ALIAS_XFRM_MODE(family, encap) \
MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
@@ -266,25 +267,26 @@ struct xfrm_policy_afinfo {
struct dst_ops *dst_ops;
void (*garbage_collect)(struct net *net);
struct dst_entry *(*dst_lookup)(struct net *net, int tos,
- xfrm_address_t *saddr,
- xfrm_address_t *daddr);
+ const xfrm_address_t *saddr,
+ const xfrm_address_t *daddr);
int (*get_saddr)(struct net *net, xfrm_address_t *saddr, xfrm_address_t *daddr);
void (*decode_session)(struct sk_buff *skb,
struct flowi *fl,
int reverse);
- int (*get_tos)(struct flowi *fl);
+ int (*get_tos)(const struct flowi *fl);
int (*init_path)(struct xfrm_dst *path,
struct dst_entry *dst,
int nfheader_len);
int (*fill_dst)(struct xfrm_dst *xdst,
struct net_device *dev,
- struct flowi *fl);
+ const struct flowi *fl);
+ struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
};
extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
-extern void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c);
-extern void km_state_notify(struct xfrm_state *x, struct km_event *c);
+extern void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c);
+extern void km_state_notify(struct xfrm_state *x, const struct km_event *c);
struct xfrm_tmpl;
extern int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
@@ -299,9 +301,12 @@ struct xfrm_state_afinfo {
const struct xfrm_type *type_map[IPPROTO_MAX];
struct xfrm_mode *mode_map[XFRM_MODE_MAX];
int (*init_flags)(struct xfrm_state *x);
- void (*init_tempsel)(struct xfrm_selector *sel, struct flowi *fl);
- void (*init_temprop)(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
- xfrm_address_t *daddr, xfrm_address_t *saddr);
+ void (*init_tempsel)(struct xfrm_selector *sel,
+ const struct flowi *fl);
+ void (*init_temprop)(struct xfrm_state *x,
+ const struct xfrm_tmpl *tmpl,
+ const xfrm_address_t *daddr,
+ const xfrm_address_t *saddr);
int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
int (*output)(struct sk_buff *skb);
@@ -332,7 +337,8 @@ struct xfrm_type {
void (*destructor)(struct xfrm_state *);
int (*input)(struct xfrm_state *, struct sk_buff *skb);
int (*output)(struct xfrm_state *, struct sk_buff *pskb);
- int (*reject)(struct xfrm_state *, struct sk_buff *, struct flowi *);
+ int (*reject)(struct xfrm_state *, struct sk_buff *,
+ const struct flowi *);
int (*hdr_offset)(struct xfrm_state *, struct sk_buff *, u8 **);
/* Estimate maximal size of result of transformation of a dgram */
u32 (*get_mtu)(struct xfrm_state *, int size);
@@ -501,7 +507,7 @@ struct xfrm_policy {
struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
};
-static inline struct net *xp_net(struct xfrm_policy *xp)
+static inline struct net *xp_net(const struct xfrm_policy *xp)
{
return read_pnet(&xp->xp_net);
}
@@ -545,13 +551,17 @@ struct xfrm_migrate {
struct xfrm_mgr {
struct list_head list;
char *id;
- int (*notify)(struct xfrm_state *x, struct km_event *c);
+ int (*notify)(struct xfrm_state *x, const struct km_event *c);
int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir);
struct xfrm_policy *(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
- int (*notify_policy)(struct xfrm_policy *x, int dir, struct km_event *c);
+ int (*notify_policy)(struct xfrm_policy *x, int dir, const struct km_event *c);
int (*report)(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
- int (*migrate)(struct xfrm_selector *sel, u8 dir, u8 type, struct xfrm_migrate *m, int num_bundles, struct xfrm_kmaddress *k);
+ int (*migrate)(const struct xfrm_selector *sel,
+ u8 dir, u8 type,
+ const struct xfrm_migrate *m,
+ int num_bundles,
+ const struct xfrm_kmaddress *k);
};
extern int xfrm_register_km(struct xfrm_mgr *km);
@@ -762,10 +772,11 @@ static inline void xfrm_state_hold(struct xfrm_state *x)
atomic_inc(&x->refcnt);
}
-static __inline__ int addr_match(void *token1, void *token2, int prefixlen)
+static inline bool addr_match(const void *token1, const void *token2,
+ int prefixlen)
{
- __be32 *a1 = token1;
- __be32 *a2 = token2;
+ const __be32 *a1 = token1;
+ const __be32 *a2 = token2;
int pdw;
int pbi;
@@ -774,7 +785,7 @@ static __inline__ int addr_match(void *token1, void *token2, int prefixlen)
if (pdw)
if (memcmp(a1, a2, pdw << 2))
- return 0;
+ return false;
if (pbi) {
__be32 mask;
@@ -782,14 +793,14 @@ static __inline__ int addr_match(void *token1, void *token2, int prefixlen)
mask = htonl((0xffffffff) << (32 - pbi));
if ((a1[pdw] ^ a2[pdw]) & mask)
- return 0;
+ return false;
}
- return 1;
+ return true;
}
static __inline__
-__be16 xfrm_flowi_sport(struct flowi *fl)
+__be16 xfrm_flowi_sport(const struct flowi *fl)
{
__be16 port;
switch(fl->proto) {
@@ -816,7 +827,7 @@ __be16 xfrm_flowi_sport(struct flowi *fl)
}
static __inline__
-__be16 xfrm_flowi_dport(struct flowi *fl)
+__be16 xfrm_flowi_dport(const struct flowi *fl)
{
__be16 port;
switch(fl->proto) {
@@ -839,7 +850,8 @@ __be16 xfrm_flowi_dport(struct flowi *fl)
return port;
}
-extern int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
+extern int xfrm_selector_match(const struct xfrm_selector *sel,
+ const struct flowi *fl,
unsigned short family);
#ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -947,7 +959,7 @@ secpath_reset(struct sk_buff *skb)
}
static inline int
-xfrm_addr_any(xfrm_address_t *addr, unsigned short family)
+xfrm_addr_any(const xfrm_address_t *addr, unsigned short family)
{
switch (family) {
case AF_INET:
@@ -959,21 +971,21 @@ xfrm_addr_any(xfrm_address_t *addr, unsigned short family)
}
static inline int
-__xfrm4_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x)
+__xfrm4_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
{
return (tmpl->saddr.a4 &&
tmpl->saddr.a4 != x->props.saddr.a4);
}
static inline int
-__xfrm6_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x)
+__xfrm6_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
{
return (!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) &&
ipv6_addr_cmp((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr));
}
static inline int
-xfrm_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x, unsigned short family)
+xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, unsigned short family)
{
switch (family) {
case AF_INET:
@@ -1126,7 +1138,7 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
#endif
static __inline__
-xfrm_address_t *xfrm_flowi_daddr(struct flowi *fl, unsigned short family)
+xfrm_address_t *xfrm_flowi_daddr(const struct flowi *fl, unsigned short family)
{
switch (family){
case AF_INET:
@@ -1138,7 +1150,7 @@ xfrm_address_t *xfrm_flowi_daddr(struct flowi *fl, unsigned short family)
}
static __inline__
-xfrm_address_t *xfrm_flowi_saddr(struct flowi *fl, unsigned short family)
+xfrm_address_t *xfrm_flowi_saddr(const struct flowi *fl, unsigned short family)
{
switch (family){
case AF_INET:
@@ -1150,7 +1162,7 @@ xfrm_address_t *xfrm_flowi_saddr(struct flowi *fl, unsigned short family)
}
static __inline__
-void xfrm_flowi_addr_get(struct flowi *fl,
+void xfrm_flowi_addr_get(const struct flowi *fl,
xfrm_address_t *saddr, xfrm_address_t *daddr,
unsigned short family)
{
@@ -1167,8 +1179,8 @@ void xfrm_flowi_addr_get(struct flowi *fl,
}
static __inline__ int
-__xfrm4_state_addr_check(struct xfrm_state *x,
- xfrm_address_t *daddr, xfrm_address_t *saddr)
+__xfrm4_state_addr_check(const struct xfrm_state *x,
+ const xfrm_address_t *daddr, const xfrm_address_t *saddr)
{
if (daddr->a4 == x->id.daddr.a4 &&
(saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4))
@@ -1177,8 +1189,8 @@ __xfrm4_state_addr_check(struct xfrm_state *x,
}
static __inline__ int
-__xfrm6_state_addr_check(struct xfrm_state *x,
- xfrm_address_t *daddr, xfrm_address_t *saddr)
+__xfrm6_state_addr_check(const struct xfrm_state *x,
+ const xfrm_address_t *daddr, const xfrm_address_t *saddr)
{
if (!ipv6_addr_cmp((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) &&
(!ipv6_addr_cmp((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr)||
@@ -1189,8 +1201,8 @@ __xfrm6_state_addr_check(struct xfrm_state *x,
}
static __inline__ int
-xfrm_state_addr_check(struct xfrm_state *x,
- xfrm_address_t *daddr, xfrm_address_t *saddr,
+xfrm_state_addr_check(const struct xfrm_state *x,
+ const xfrm_address_t *daddr, const xfrm_address_t *saddr,
unsigned short family)
{
switch (family) {
@@ -1203,23 +1215,23 @@ xfrm_state_addr_check(struct xfrm_state *x,
}
static __inline__ int
-xfrm_state_addr_flow_check(struct xfrm_state *x, struct flowi *fl,
+xfrm_state_addr_flow_check(const struct xfrm_state *x, const struct flowi *fl,
unsigned short family)
{
switch (family) {
case AF_INET:
return __xfrm4_state_addr_check(x,
- (xfrm_address_t *)&fl->fl4_dst,
- (xfrm_address_t *)&fl->fl4_src);
+ (const xfrm_address_t *)&fl->fl4_dst,
+ (const xfrm_address_t *)&fl->fl4_src);
case AF_INET6:
return __xfrm6_state_addr_check(x,
- (xfrm_address_t *)&fl->fl6_dst,
- (xfrm_address_t *)&fl->fl6_src);
+ (const xfrm_address_t *)&fl->fl6_dst,
+ (const xfrm_address_t *)&fl->fl6_src);
}
return 0;
}
-static inline int xfrm_state_kern(struct xfrm_state *x)
+static inline int xfrm_state_kern(const struct xfrm_state *x)
{
return atomic_read(&x->tunnel_users);
}
@@ -1323,8 +1335,10 @@ extern int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
int (*func)(struct xfrm_state *, int, void*), void *);
extern void xfrm_state_walk_done(struct xfrm_state_walk *walk);
extern struct xfrm_state *xfrm_state_alloc(struct net *net);
-extern struct xfrm_state *xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
- struct flowi *fl, struct xfrm_tmpl *tmpl,
+extern struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
+ const xfrm_address_t *saddr,
+ const struct flowi *fl,
+ struct xfrm_tmpl *tmpl,
struct xfrm_policy *pol, int *err,
unsigned short family);
extern struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark,
@@ -1337,11 +1351,11 @@ extern void xfrm_state_insert(struct xfrm_state *x);
extern int xfrm_state_add(struct xfrm_state *x);
extern int xfrm_state_update(struct xfrm_state *x);
extern struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
- xfrm_address_t *daddr, __be32 spi,
+ const xfrm_address_t *daddr, __be32 spi,
u8 proto, unsigned short family);
extern struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
- xfrm_address_t *daddr,
- xfrm_address_t *saddr,
+ const xfrm_address_t *daddr,
+ const xfrm_address_t *saddr,
u8 proto,
unsigned short family);
#ifdef CONFIG_XFRM_SUB_POLICY
@@ -1468,19 +1482,19 @@ u32 xfrm_get_acqseq(void);
extern int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
struct xfrm_state *xfrm_find_acq(struct net *net, struct xfrm_mark *mark,
u8 mode, u32 reqid, u8 proto,
- xfrm_address_t *daddr,
- xfrm_address_t *saddr, int create,
+ const xfrm_address_t *daddr,
+ const xfrm_address_t *saddr, int create,
unsigned short family);
extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
#ifdef CONFIG_XFRM_MIGRATE
-extern int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
- struct xfrm_migrate *m, int num_bundles,
- struct xfrm_kmaddress *k);
+extern int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+ const struct xfrm_migrate *m, int num_bundles,
+ const struct xfrm_kmaddress *k);
extern struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m);
extern struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
struct xfrm_migrate *m);
-extern int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
+extern int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
struct xfrm_migrate *m, int num_bundles,
struct xfrm_kmaddress *k);
#endif
@@ -1500,10 +1514,10 @@ extern struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
extern struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
extern struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
extern struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
-extern struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe);
-extern struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe);
-extern struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe);
-extern struct xfrm_algo_desc *xfrm_aead_get_byname(char *name, int icv_len,
+extern struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe);
+extern struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe);
+extern struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe);
+extern struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len,
int probe);
struct hash_desc;
@@ -1511,7 +1525,8 @@ struct scatterlist;
typedef int (icv_update_fn_t)(struct hash_desc *, struct scatterlist *,
unsigned int);
-static inline int xfrm_addr_cmp(xfrm_address_t *a, xfrm_address_t *b,
+static inline int xfrm_addr_cmp(const xfrm_address_t *a,
+ const xfrm_address_t *b,
int family)
{
switch (family) {
@@ -1544,12 +1559,12 @@ static inline int xfrm_aevent_is_on(struct net *net)
}
#endif
-static inline int xfrm_alg_len(struct xfrm_algo *alg)
+static inline int xfrm_alg_len(const struct xfrm_algo *alg)
{
return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
}
-static inline int xfrm_alg_auth_len(struct xfrm_algo_auth *alg)
+static inline int xfrm_alg_auth_len(const struct xfrm_algo_auth *alg)
{
return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
}
@@ -1597,7 +1612,7 @@ static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m)
return m->v & m->m;
}
-static inline int xfrm_mark_put(struct sk_buff *skb, struct xfrm_mark *m)
+static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
{
if (m->m | m->v)
NLA_PUT(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m);
diff --git a/include/pcmcia/ds.h b/include/pcmcia/ds.h
index 8479b66c067b..3fd5064dd43a 100644
--- a/include/pcmcia/ds.h
+++ b/include/pcmcia/ds.h
@@ -261,6 +261,7 @@ void pcmcia_disable_device(struct pcmcia_device *p_dev);
#define CONF_ENABLE_ESR 0x0008
#define CONF_ENABLE_IOCARD 0x0010 /* auto-enabled if IO resources or IRQ
* (CONF_ENABLE_IRQ) in use */
+#define CONF_ENABLE_ZVCARD 0x0020
/* flags used by pcmcia_loop_config() autoconfiguration */
#define CONF_AUTO_CHECK_VCC 0x0100 /* check for matching Vcc? */
diff --git a/include/scsi/fc/fc_ns.h b/include/scsi/fc/fc_ns.h
index 185015dd1166..f7751d53f1d3 100644
--- a/include/scsi/fc/fc_ns.h
+++ b/include/scsi/fc/fc_ns.h
@@ -41,6 +41,7 @@ enum fc_ns_req {
FC_NS_GI_A = 0x0101, /* get identifiers - scope */
FC_NS_GPN_ID = 0x0112, /* get port name by ID */
FC_NS_GNN_ID = 0x0113, /* get node name by ID */
+ FC_NS_GSPN_ID = 0x0118, /* get symbolic port name */
FC_NS_GID_PN = 0x0121, /* get ID for port name */
FC_NS_GID_NN = 0x0131, /* get IDs for node name */
FC_NS_GID_FT = 0x0171, /* get IDs by FC4 type */
@@ -144,7 +145,7 @@ struct fc_ns_gid_pn {
};
/*
- * GID_PN response
+ * GID_PN response or GSPN_ID request
*/
struct fc_gid_pn_resp {
__u8 fp_resvd;
@@ -152,6 +153,14 @@ struct fc_gid_pn_resp {
};
/*
+ * GSPN_ID response
+ */
+struct fc_gspn_resp {
+ __u8 fp_name_len;
+ char fp_name[];
+};
+
+/*
* RFT_ID request - register FC-4 types for ID.
*/
struct fc_ns_rft_id {
diff --git a/include/scsi/fc_encode.h b/include/scsi/fc_encode.h
index 6d293c846a46..be418d8448a5 100644
--- a/include/scsi/fc_encode.h
+++ b/include/scsi/fc_encode.h
@@ -46,16 +46,11 @@ struct fc_ct_req {
} payload;
};
-/**
- * fill FC header fields in specified fc_frame
- */
-static inline void fc_fill_fc_hdr(struct fc_frame *fp, enum fc_rctl r_ctl,
- u32 did, u32 sid, enum fc_fh_type type,
- u32 f_ctl, u32 parm_offset)
+static inline void __fc_fill_fc_hdr(struct fc_frame_header *fh,
+ enum fc_rctl r_ctl,
+ u32 did, u32 sid, enum fc_fh_type type,
+ u32 f_ctl, u32 parm_offset)
{
- struct fc_frame_header *fh;
-
- fh = fc_frame_header_get(fp);
WARN_ON(r_ctl == 0);
fh->fh_r_ctl = r_ctl;
hton24(fh->fh_d_id, did);
@@ -68,6 +63,19 @@ static inline void fc_fill_fc_hdr(struct fc_frame *fp, enum fc_rctl r_ctl,
}
/**
+ * fill FC header fields in specified fc_frame
+ */
+static inline void fc_fill_fc_hdr(struct fc_frame *fp, enum fc_rctl r_ctl,
+ u32 did, u32 sid, enum fc_fh_type type,
+ u32 f_ctl, u32 parm_offset)
+{
+ struct fc_frame_header *fh;
+
+ fh = fc_frame_header_get(fp);
+ __fc_fill_fc_hdr(fh, r_ctl, did, sid, type, f_ctl, parm_offset);
+}
+
+/**
* fc_adisc_fill() - Fill in adisc request frame
* @lport: local port.
* @fp: fc frame where payload will be placed.
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index f53c8e31d5fb..24193c1b0da0 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -35,6 +35,8 @@
#include <scsi/fc_frame.h>
+#define FC_FC4_PROV_SIZE (FC_TYPE_FCP + 1) /* size of tables */
+
/*
* libfc error codes
*/
@@ -156,6 +158,7 @@ struct fc_rport_libfc_priv {
#define FC_RP_FLAGS_REC_SUPPORTED (1 << 0)
#define FC_RP_FLAGS_RETRY (1 << 1)
#define FC_RP_STARTED (1 << 2)
+ #define FC_RP_FLAGS_CONF_REQ (1 << 3)
unsigned int e_d_tov;
unsigned int r_a_tov;
};
@@ -179,6 +182,7 @@ struct fc_rport_libfc_priv {
* @rp_mutex: The mutex that protects the remote port
* @retry_work: Handle for retries
* @event_callback: Callback when READY, FAILED or LOGO states complete
+ * @prli_count: Count of open PRLI sessions in providers
* @rcu: Structure used for freeing in an RCU-safe manner
*/
struct fc_rport_priv {
@@ -202,7 +206,13 @@ struct fc_rport_priv {
struct list_head peers;
struct work_struct event_work;
u32 supported_classes;
+ u16 prli_count;
struct rcu_head rcu;
+ u16 sp_features;
+ u8 spp_type;
+ void (*lld_event_callback)(struct fc_lport *,
+ struct fc_rport_priv *,
+ enum fc_rport_event);
};
/**
@@ -551,6 +561,16 @@ struct libfc_function_template {
struct fc_seq *(*seq_start_next)(struct fc_seq *);
/*
+ * Set a response handler for the exchange of the sequence.
+ *
+ * STATUS: OPTIONAL
+ */
+ void (*seq_set_resp)(struct fc_seq *sp,
+ void (*resp)(struct fc_seq *, struct fc_frame *,
+ void *),
+ void *arg);
+
+ /*
* Assign a sequence for an incoming request frame.
*
* STATUS: OPTIONAL
@@ -558,6 +578,13 @@ struct libfc_function_template {
struct fc_seq *(*seq_assign)(struct fc_lport *, struct fc_frame *);
/*
+ * Release the reference on the sequence returned by seq_assign().
+ *
+ * STATUS: OPTIONAL
+ */
+ void (*seq_release)(struct fc_seq *);
+
+ /*
* Reset an exchange manager, completing all sequences and exchanges.
* If s_id is non-zero, reset only exchanges originating from that FID.
* If d_id is non-zero, reset only exchanges sending to that FID.
@@ -656,6 +683,15 @@ struct libfc_function_template {
void (*rport_destroy)(struct kref *);
/*
+ * Callback routine after the remote port is logged in
+ *
+ * STATUS: OPTIONAL
+ */
+ void (*rport_event_callback)(struct fc_lport *,
+ struct fc_rport_priv *,
+ enum fc_rport_event);
+
+ /*
* Send a fcp cmd from fsp pkt.
* Called with the SCSI host lock unlocked and irqs disabled.
*
@@ -749,6 +785,15 @@ struct fc_disc {
enum fc_disc_event);
};
+/*
+ * Local port notifier and events.
+ */
+extern struct blocking_notifier_head fc_lport_notifier_head;
+enum fc_lport_event {
+ FC_LPORT_EV_ADD,
+ FC_LPORT_EV_DEL,
+};
+
/**
* struct fc_lport - Local port
* @host: The SCSI host associated with a local port
@@ -789,8 +834,10 @@ struct fc_disc {
* @lso_max: The maximum large offload send size
* @fcts: FC-4 type mask
* @lp_mutex: Mutex to protect the local port
- * @list: Handle for list of local ports
+ * @list: Linkage on list of vport peers
* @retry_work: Handle to local port for delayed retry context
+ * @prov: Pointers available for use by passive FC-4 providers
+ * @lport_list: Linkage on module-wide list of local ports
*/
struct fc_lport {
/* Associations */
@@ -846,8 +893,32 @@ struct fc_lport {
struct mutex lp_mutex;
struct list_head list;
struct delayed_work retry_work;
+ void *prov[FC_FC4_PROV_SIZE];
+ struct list_head lport_list;
};
+/**
+ * struct fc4_prov - FC-4 provider registration
+ * @prli: Handler for incoming PRLI
+ * @prlo: Handler for session reset
+ * @recv: Handler for incoming request
+ * @module: Pointer to module. May be NULL.
+ */
+struct fc4_prov {
+ int (*prli)(struct fc_rport_priv *, u32 spp_len,
+ const struct fc_els_spp *spp_in,
+ struct fc_els_spp *spp_out);
+ void (*prlo)(struct fc_rport_priv *);
+ void (*recv)(struct fc_lport *, struct fc_frame *);
+ struct module *module;
+};
+
+/*
+ * Register FC-4 provider with libfc.
+ */
+int fc_fc4_register_provider(enum fc_fh_type type, struct fc4_prov *);
+void fc_fc4_deregister_provider(enum fc_fh_type type, struct fc4_prov *);
+
/*
* FC_LPORT HELPER FUNCTIONS
*****************************/
@@ -978,6 +1049,7 @@ struct fc_lport *libfc_vport_create(struct fc_vport *, int privsize);
struct fc_lport *fc_vport_id_lookup(struct fc_lport *, u32 port_id);
int fc_lport_bsg_request(struct fc_bsg_job *);
void fc_lport_set_local_id(struct fc_lport *, u32 port_id);
+void fc_lport_iterate(void (*func)(struct fc_lport *, void *), void *);
/*
* REMOTE PORT LAYER
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index feb6a94c90ea..8c1638b8c28e 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -33,6 +33,12 @@
#define FCOE_MAX_CMD_LEN 16 /* Supported CDB length */
/*
+ * Max MTU for FCoE: 14 (FCoE header) + 24 (FC header) + 2112 (max FC payload)
+ * + 4 (FC CRC) + 4 (FCoE trailer) = 2158 bytes
+ */
+#define FCOE_MTU 2158
+
+/*
* FIP tunable parameters.
*/
#define FCOE_CTLR_START_DELAY 2000 /* mS after first adv. to choose FCF */
@@ -221,6 +227,8 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_lport *,
u64 fcoe_wwn_from_mac(unsigned char mac[], unsigned int, unsigned int);
int fcoe_libfc_config(struct fc_lport *, struct fcoe_ctlr *,
const struct libfc_function_template *, int init_fcp);
+u32 fcoe_fc_crc(struct fc_frame *fp);
+int fcoe_start_io(struct sk_buff *skb);
/**
* is_fip_mode() - returns true if FIP mode selected.
@@ -231,5 +239,102 @@ static inline bool is_fip_mode(struct fcoe_ctlr *fip)
return fip->state == FIP_ST_ENABLED;
}
+/* helper for FCoE SW HBA drivers, can include subven and subdev if needed. The
+ * modpost would use pci_device_id table to auto-generate formatted module alias
+ * into the corresponding .mod.c file, but there may or may not be a pci device
+ * id table for FCoE drivers so we use the following helper for build the fcoe
+ * driver module alias.
+ */
+#define MODULE_ALIAS_FCOE_PCI(ven, dev) \
+ MODULE_ALIAS("fcoe-pci:" \
+ "v" __stringify(ven) \
+ "d" __stringify(dev) "sv*sd*bc*sc*i*")
+
+/* the name of the default FCoE transport driver fcoe.ko */
+#define FCOE_TRANSPORT_DEFAULT "fcoe"
+
+/* struct fcoe_transport - The FCoE transport interface
+ * @name: a vendor specific name for their FCoE transport driver
+ * @attached: whether this transport is already attached
+ * @list: list linkage to all attached transports
+ * @match: handler to allow the transport driver to match up a given netdev
+ * @create: handler to sysfs entry of create for FCoE instances
+ * @destroy: handler to sysfs entry of destroy for FCoE instances
+ * @enable: handler to sysfs entry of enable for FCoE instances
+ * @disable: handler to sysfs entry of disable for FCoE instances
+ */
+struct fcoe_transport {
+ char name[IFNAMSIZ];
+ bool attached;
+ struct list_head list;
+ bool (*match) (struct net_device *device);
+ int (*create) (struct net_device *device, enum fip_state fip_mode);
+ int (*destroy) (struct net_device *device);
+ int (*enable) (struct net_device *device);
+ int (*disable) (struct net_device *device);
+};
+
+/**
+ * struct fcoe_percpu_s - The context for FCoE receive thread(s)
+ * @thread: The thread context
+ * @fcoe_rx_list: The queue of pending packets to process
+ * @page: The memory page for calculating frame trailer CRCs
+ * @crc_eof_offset: The offset into the CRC page pointing to available
+ * memory for a new trailer
+ */
+struct fcoe_percpu_s {
+ struct task_struct *thread;
+ struct sk_buff_head fcoe_rx_list;
+ struct page *crc_eof_page;
+ int crc_eof_offset;
+};
+
+/**
+ * struct fcoe_port - The FCoE private structure
+ * @priv: The associated fcoe interface. The structure is
+ * defined by the low level driver
+ * @lport: The associated local port
+ * @fcoe_pending_queue: The pending Rx queue of skbs
+ * @fcoe_pending_queue_active: Indicates if the pending queue is active
+ * @max_queue_depth: Max queue depth of pending queue
+ * @min_queue_depth: Min queue depth of pending queue
+ * @timer: The queue timer
+ * @destroy_work: Handle for work context
+ * (to prevent RTNL deadlocks)
+ * @data_srt_addr: Source address for data
+ *
+ * An instance of this structure is to be allocated along with the
+ * Scsi_Host and libfc fc_lport structures.
+ */
+struct fcoe_port {
+ void *priv;
+ struct fc_lport *lport;
+ struct sk_buff_head fcoe_pending_queue;
+ u8 fcoe_pending_queue_active;
+ u32 max_queue_depth;
+ u32 min_queue_depth;
+ struct timer_list timer;
+ struct work_struct destroy_work;
+ u8 data_src_addr[ETH_ALEN];
+};
+void fcoe_clean_pending_queue(struct fc_lport *);
+void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb);
+void fcoe_queue_timer(ulong lport);
+int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen,
+ struct fcoe_percpu_s *fps);
+
+/**
+ * struct netdev_list
+ * A mapping from netdevice to fcoe_transport
+ */
+struct fcoe_netdev_mapping {
+ struct list_head list;
+ struct net_device *netdev;
+ struct fcoe_transport *ft;
+};
+
+/* fcoe transports registration and deregistration */
+int fcoe_transport_attach(struct fcoe_transport *ft);
+int fcoe_transport_detach(struct fcoe_transport *ft);
#endif /* _LIBFCOE_H */
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 748382b32b52..0f4367751b71 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -212,9 +212,6 @@ struct iscsi_conn {
/* values userspace uses to id a conn */
int persistent_port;
char *persistent_address;
- /* remote portal currently connected to */
- int portal_port;
- char portal_address[ISCSI_ADDRESS_BUF_LEN];
/* MIB-statistics */
uint64_t txdata_octets;
@@ -319,9 +316,6 @@ struct iscsi_host {
/* hw address or netdev iscsi connection is bound to */
char *hwaddress;
char *netdev;
- /* local address */
- int local_port;
- char local_address[ISCSI_ADDRESS_BUF_LEN];
wait_queue_head_t session_removal_wq;
/* protects sessions and state */
@@ -394,6 +388,8 @@ extern void iscsi_session_failure(struct iscsi_session *session,
enum iscsi_err err);
extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf);
+extern int iscsi_conn_get_addr_param(struct sockaddr_storage *addr,
+ enum iscsi_param param, char *buf);
extern void iscsi_suspend_tx(struct iscsi_conn *conn);
extern void iscsi_suspend_queue(struct iscsi_conn *conn);
extern void iscsi_conn_queue_work(struct iscsi_conn *conn);
diff --git a/include/scsi/sas_ata.h b/include/scsi/sas_ata.h
index c583193ae929..9c159f74c6d0 100644
--- a/include/scsi/sas_ata.h
+++ b/include/scsi/sas_ata.h
@@ -39,6 +39,11 @@ int sas_ata_init_host_and_port(struct domain_device *found_dev,
struct scsi_target *starget);
void sas_ata_task_abort(struct sas_task *task);
+void sas_ata_strategy_handler(struct Scsi_Host *shost);
+int sas_ata_timed_out(struct scsi_cmnd *cmd, struct sas_task *task,
+ enum blk_eh_timer_return *rtn);
+int sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
+ struct list_head *done_q);
#else
@@ -55,6 +60,23 @@ static inline int sas_ata_init_host_and_port(struct domain_device *found_dev,
static inline void sas_ata_task_abort(struct sas_task *task)
{
}
+
+static inline void sas_ata_strategy_handler(struct Scsi_Host *shost)
+{
+}
+
+static inline int sas_ata_timed_out(struct scsi_cmnd *cmd,
+ struct sas_task *task,
+ enum blk_eh_timer_return *rtn)
+{
+ return 0;
+}
+static inline int sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
+ struct list_head *done_q)
+{
+ return 0;
+}
+
#endif
#endif /* _SAS_ATA_H_ */
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index b76d4006e36d..3668903e397b 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -435,6 +435,10 @@ static inline int scsi_is_wlun(unsigned int lun)
* recover the link. Transport class will
* retry or fail IO */
#define DID_TRANSPORT_FAILFAST 0x0f /* Transport class fastfailed the io */
+#define DID_TARGET_FAILURE 0x10 /* Permanent target failure, do not retry on
+ * other paths */
+#define DID_NEXUS_FAILURE 0x11 /* Permanent nexus failure, retry on other
+ * paths might yield different results */
#define DRIVER_OK 0x00 /* Driver status */
/*
@@ -464,6 +468,7 @@ static inline int scsi_is_wlun(unsigned int lun)
#define TIMEOUT_ERROR 0x2007
#define SCSI_RETURN_NOT_HANDLED 0x2008
#define FAST_IO_FAIL 0x2009
+#define TARGET_ERROR 0x200A
/*
* Midlevel queue return values.
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 85867dcde335..f171c65dc5a8 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -184,6 +184,7 @@ typedef void (*activate_complete)(void *, int);
struct scsi_device_handler {
/* Used by the infrastructure */
struct list_head list; /* list of scsi_device_handlers */
+ int idx;
/* Filled by the hardware handler */
struct module *module;
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 7fff94b3b2a8..bf8f52965675 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -101,6 +101,8 @@ struct iscsi_transport {
void (*destroy_conn) (struct iscsi_cls_conn *conn);
int (*set_param) (struct iscsi_cls_conn *conn, enum iscsi_param param,
char *buf, int buflen);
+ int (*get_ep_param) (struct iscsi_endpoint *ep, enum iscsi_param param,
+ char *buf);
int (*get_conn_param) (struct iscsi_cls_conn *conn,
enum iscsi_param param, char *buf);
int (*get_session_param) (struct iscsi_cls_session *session,
@@ -160,8 +162,9 @@ struct iscsi_cls_conn {
void *dd_data; /* LLD private data */
struct iscsi_transport *transport;
uint32_t cid; /* connection id */
+ struct mutex ep_mutex;
+ struct iscsi_endpoint *ep;
- int active; /* must be accessed with the connlock */
struct device dev; /* sysfs transport/container device */
};
@@ -222,6 +225,7 @@ struct iscsi_endpoint {
void *dd_data; /* LLD private data */
struct device dev;
uint64_t id;
+ struct iscsi_cls_conn *conn;
};
/*
diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
index b602f475cdbb..f1dcefe4532b 100644
--- a/include/sound/ac97_codec.h
+++ b/include/sound/ac97_codec.h
@@ -96,6 +96,10 @@
#define AC97_FUNC_INFO 0x68 /* Function Information */
#define AC97_SENSE_INFO 0x6a /* Sense Details */
+/* volume controls */
+#define AC97_MUTE_MASK_MONO 0x8000
+#define AC97_MUTE_MASK_STEREO 0x8080
+
/* slot allocation */
#define AC97_SLOT_TAG 0
#define AC97_SLOT_CMD_ADDR 1
@@ -138,6 +142,7 @@
#define AC97_BC_18BIT_ADC 0x0100 /* 18-bit ADC resolution */
#define AC97_BC_20BIT_ADC 0x0200 /* 20-bit ADC resolution */
#define AC97_BC_ADC_MASK 0x0300
+#define AC97_BC_3D_TECH_ID_MASK 0x7c00 /* Per-vendor ID of 3D enhancement */
/* general purpose */
#define AC97_GP_DRSS_MASK 0x0c00 /* double rate slot select */
diff --git a/include/sound/cs4271.h b/include/sound/cs4271.h
new file mode 100644
index 000000000000..50a059e7d116
--- /dev/null
+++ b/include/sound/cs4271.h
@@ -0,0 +1,24 @@
+/*
+ * Definitions for CS4271 ASoC codec driver
+ *
+ * Copyright (c) 2010 Alexander Sverdlin <subaparts@yandex.ru>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CS4271_H
+#define __CS4271_H
+
+struct cs4271_platform_data {
+ int gpio_nreset; /* GPIO driving Reset pin, if any */
+};
+
+#endif /* __CS4271_H */
diff --git a/include/sound/hdspm.h b/include/sound/hdspm.h
index 81990b2bcc98..1774ff5ff632 100644
--- a/include/sound/hdspm.h
+++ b/include/sound/hdspm.h
@@ -3,8 +3,8 @@
/*
* Copyright (C) 2003 Winfried Ritsch (IEM)
* based on hdsp.h from Thomas Charbonnel (thomas@undata.org)
- *
- *
+ *
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -23,50 +23,41 @@
/* Maximum channels is 64 even on 56Mode you have 64playbacks to matrix */
#define HDSPM_MAX_CHANNELS 64
-/* -------------------- IOCTL Peak/RMS Meters -------------------- */
-
-/* peam rms level structure like we get from hardware
-
- maybe in future we can memory map it so I just copy it
- to user on ioctl call now an dont change anything
- rms are made out of low and high values
- where (long) ????_rms = (????_rms_l >> 8) + ((????_rms_h & 0xFFFFFF00)<<24)
- (i asume so from the code)
-*/
-
-struct hdspm_peak_rms {
-
- unsigned int level_offset[1024];
+enum hdspm_io_type {
+ MADI,
+ MADIface,
+ AIO,
+ AES32,
+ RayDAT
+};
- unsigned int input_peak[64];
- unsigned int playback_peak[64];
- unsigned int output_peak[64];
- unsigned int xxx_peak[64]; /* not used */
+enum hdspm_speed {
+ ss,
+ ds,
+ qs
+};
- unsigned int reserved[256]; /* not used */
+/* -------------------- IOCTL Peak/RMS Meters -------------------- */
- unsigned int input_rms_l[64];
- unsigned int playback_rms_l[64];
- unsigned int output_rms_l[64];
- unsigned int xxx_rms_l[64]; /* not used */
+struct hdspm_peak_rms {
+ uint32_t input_peaks[64];
+ uint32_t playback_peaks[64];
+ uint32_t output_peaks[64];
- unsigned int input_rms_h[64];
- unsigned int playback_rms_h[64];
- unsigned int output_rms_h[64];
- unsigned int xxx_rms_h[64]; /* not used */
-};
+ uint64_t input_rms[64];
+ uint64_t playback_rms[64];
+ uint64_t output_rms[64];
-struct hdspm_peak_rms_ioctl {
- struct hdspm_peak_rms *peak;
+ uint8_t speed; /* enum {ss, ds, qs} */
+ int status2;
};
-/* use indirect access due to the limit of ioctl bit size */
#define SNDRV_HDSPM_IOCTL_GET_PEAK_RMS \
- _IOR('H', 0x40, struct hdspm_peak_rms_ioctl)
+ _IOR('H', 0x42, struct hdspm_peak_rms)
/* ------------ CONFIG block IOCTL ---------------------- */
-struct hdspm_config_info {
+struct hdspm_config {
unsigned char pref_sync_ref;
unsigned char wordclock_sync_check;
unsigned char madi_sync_check;
@@ -80,18 +71,121 @@ struct hdspm_config_info {
unsigned int analog_out;
};
-#define SNDRV_HDSPM_IOCTL_GET_CONFIG_INFO \
- _IOR('H', 0x41, struct hdspm_config_info)
+#define SNDRV_HDSPM_IOCTL_GET_CONFIG \
+ _IOR('H', 0x41, struct hdspm_config)
+
+/**
+ * If there's a TCO (TimeCode Option) board installed,
+ * there are further options and status data available.
+ * The hdspm_ltc structure contains the current SMPTE
+ * timecode and some status information and can be
+ * obtained via SNDRV_HDSPM_IOCTL_GET_LTC or in the
+ * hdspm_status struct.
+ **/
+
+enum hdspm_ltc_format {
+ format_invalid,
+ fps_24,
+ fps_25,
+ fps_2997,
+ fps_30
+};
+
+enum hdspm_ltc_frame {
+ frame_invalid,
+ drop_frame,
+ full_frame
+};
+
+enum hdspm_ltc_input_format {
+ ntsc,
+ pal,
+ no_video
+};
+
+struct hdspm_ltc {
+ unsigned int ltc;
+ enum hdspm_ltc_format format;
+ enum hdspm_ltc_frame frame;
+ enum hdspm_ltc_input_format input_format;
+};
+
+#define SNDRV_HDSPM_IOCTL_GET_LTC _IOR('H', 0x46, struct hdspm_mixer_ioctl)
+
+/**
+ * The status data reflects the device's current state
+ * as determined by the card's configuration and
+ * connection status.
+ **/
+
+enum hdspm_sync {
+ hdspm_sync_no_lock = 0,
+ hdspm_sync_lock = 1,
+ hdspm_sync_sync = 2
+};
-/* get Soundcard Version */
+enum hdspm_madi_input {
+ hdspm_input_optical = 0,
+ hdspm_input_coax = 1
+};
+
+enum hdspm_madi_channel_format {
+ hdspm_format_ch_64 = 0,
+ hdspm_format_ch_56 = 1
+};
+
+enum hdspm_madi_frame_format {
+ hdspm_frame_48 = 0,
+ hdspm_frame_96 = 1
+};
+
+enum hdspm_syncsource {
+ syncsource_wc = 0,
+ syncsource_madi = 1,
+ syncsource_tco = 2,
+ syncsource_sync = 3,
+ syncsource_none = 4
+};
+
+struct hdspm_status {
+ uint8_t card_type; /* enum hdspm_io_type */
+ enum hdspm_syncsource autosync_source;
+
+ uint64_t card_clock;
+ uint32_t master_period;
+
+ union {
+ struct {
+ uint8_t sync_wc; /* enum hdspm_sync */
+ uint8_t sync_madi; /* enum hdspm_sync */
+ uint8_t sync_tco; /* enum hdspm_sync */
+ uint8_t sync_in; /* enum hdspm_sync */
+ uint8_t madi_input; /* enum hdspm_madi_input */
+ uint8_t channel_format; /* enum hdspm_madi_channel_format */
+ uint8_t frame_format; /* enum hdspm_madi_frame_format */
+ } madi;
+ } card_specific;
+};
+
+#define SNDRV_HDSPM_IOCTL_GET_STATUS \
+ _IOR('H', 0x47, struct hdspm_status)
+
+/**
+ * Get information about the card and its add-ons.
+ **/
+
+#define HDSPM_ADDON_TCO 1
struct hdspm_version {
+ uint8_t card_type; /* enum hdspm_io_type */
+ char cardname[20];
+ unsigned int serial;
unsigned short firmware_rev;
+ int addons;
};
-#define SNDRV_HDSPM_IOCTL_GET_VERSION _IOR('H', 0x43, struct hdspm_version)
-
+#define SNDRV_HDSPM_IOCTL_GET_VERSION _IOR('H', 0x48, struct hdspm_version)
/* ------------- get Matrix Mixer IOCTL --------------- */
@@ -103,7 +197,7 @@ struct hdspm_version {
/* equivalent to hardware definition, maybe for future feature of mmap of
* them
*/
-/* each of 64 outputs has 64 infader and 64 outfader:
+/* each of 64 outputs has 64 infader and 64 outfader:
Ins to Outs mixer[out].in[in], Outstreams to Outs mixer[out].pb[pb] */
#define HDSPM_MIXER_CHANNELS HDSPM_MAX_CHANNELS
@@ -131,4 +225,5 @@ typedef struct hdspm_version hdspm_version_t;
typedef struct hdspm_channelfader snd_hdspm_channelfader_t;
typedef struct hdspm_mixer hdspm_mixer_t;
-#endif /* __SOUND_HDSPM_H */
+
+#endif
diff --git a/include/sound/mixer_oss.h b/include/sound/mixer_oss.h
index 51fbcb4a277a..13cb0b430a1b 100644
--- a/include/sound/mixer_oss.h
+++ b/include/sound/mixer_oss.h
@@ -73,6 +73,9 @@ struct snd_mixer_oss_file {
struct snd_mixer_oss *mixer;
};
+int snd_mixer_oss_ioctl_card(struct snd_card *card,
+ unsigned int cmd, unsigned long arg);
+
#endif /* CONFIG_SND_MIXER_OSS */
#endif /* __SOUND_MIXER_OSS_H */
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index e731f8d71934..430a9cc045e2 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -136,48 +136,49 @@ struct snd_pcm_ops {
SNDRV_PCM_RATE_88200|SNDRV_PCM_RATE_96000)
#define SNDRV_PCM_RATE_8000_192000 (SNDRV_PCM_RATE_8000_96000|SNDRV_PCM_RATE_176400|\
SNDRV_PCM_RATE_192000)
-#define SNDRV_PCM_FMTBIT_S8 (1ULL << SNDRV_PCM_FORMAT_S8)
-#define SNDRV_PCM_FMTBIT_U8 (1ULL << SNDRV_PCM_FORMAT_U8)
-#define SNDRV_PCM_FMTBIT_S16_LE (1ULL << SNDRV_PCM_FORMAT_S16_LE)
-#define SNDRV_PCM_FMTBIT_S16_BE (1ULL << SNDRV_PCM_FORMAT_S16_BE)
-#define SNDRV_PCM_FMTBIT_U16_LE (1ULL << SNDRV_PCM_FORMAT_U16_LE)
-#define SNDRV_PCM_FMTBIT_U16_BE (1ULL << SNDRV_PCM_FORMAT_U16_BE)
-#define SNDRV_PCM_FMTBIT_S24_LE (1ULL << SNDRV_PCM_FORMAT_S24_LE)
-#define SNDRV_PCM_FMTBIT_S24_BE (1ULL << SNDRV_PCM_FORMAT_S24_BE)
-#define SNDRV_PCM_FMTBIT_U24_LE (1ULL << SNDRV_PCM_FORMAT_U24_LE)
-#define SNDRV_PCM_FMTBIT_U24_BE (1ULL << SNDRV_PCM_FORMAT_U24_BE)
-#define SNDRV_PCM_FMTBIT_S32_LE (1ULL << SNDRV_PCM_FORMAT_S32_LE)
-#define SNDRV_PCM_FMTBIT_S32_BE (1ULL << SNDRV_PCM_FORMAT_S32_BE)
-#define SNDRV_PCM_FMTBIT_U32_LE (1ULL << SNDRV_PCM_FORMAT_U32_LE)
-#define SNDRV_PCM_FMTBIT_U32_BE (1ULL << SNDRV_PCM_FORMAT_U32_BE)
-#define SNDRV_PCM_FMTBIT_FLOAT_LE (1ULL << SNDRV_PCM_FORMAT_FLOAT_LE)
-#define SNDRV_PCM_FMTBIT_FLOAT_BE (1ULL << SNDRV_PCM_FORMAT_FLOAT_BE)
-#define SNDRV_PCM_FMTBIT_FLOAT64_LE (1ULL << SNDRV_PCM_FORMAT_FLOAT64_LE)
-#define SNDRV_PCM_FMTBIT_FLOAT64_BE (1ULL << SNDRV_PCM_FORMAT_FLOAT64_BE)
-#define SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE (1ULL << SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE)
-#define SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_BE (1ULL << SNDRV_PCM_FORMAT_IEC958_SUBFRAME_BE)
-#define SNDRV_PCM_FMTBIT_MU_LAW (1ULL << SNDRV_PCM_FORMAT_MU_LAW)
-#define SNDRV_PCM_FMTBIT_A_LAW (1ULL << SNDRV_PCM_FORMAT_A_LAW)
-#define SNDRV_PCM_FMTBIT_IMA_ADPCM (1ULL << SNDRV_PCM_FORMAT_IMA_ADPCM)
-#define SNDRV_PCM_FMTBIT_MPEG (1ULL << SNDRV_PCM_FORMAT_MPEG)
-#define SNDRV_PCM_FMTBIT_GSM (1ULL << SNDRV_PCM_FORMAT_GSM)
-#define SNDRV_PCM_FMTBIT_SPECIAL (1ULL << SNDRV_PCM_FORMAT_SPECIAL)
-#define SNDRV_PCM_FMTBIT_S24_3LE (1ULL << SNDRV_PCM_FORMAT_S24_3LE)
-#define SNDRV_PCM_FMTBIT_U24_3LE (1ULL << SNDRV_PCM_FORMAT_U24_3LE)
-#define SNDRV_PCM_FMTBIT_S24_3BE (1ULL << SNDRV_PCM_FORMAT_S24_3BE)
-#define SNDRV_PCM_FMTBIT_U24_3BE (1ULL << SNDRV_PCM_FORMAT_U24_3BE)
-#define SNDRV_PCM_FMTBIT_S20_3LE (1ULL << SNDRV_PCM_FORMAT_S20_3LE)
-#define SNDRV_PCM_FMTBIT_U20_3LE (1ULL << SNDRV_PCM_FORMAT_U20_3LE)
-#define SNDRV_PCM_FMTBIT_S20_3BE (1ULL << SNDRV_PCM_FORMAT_S20_3BE)
-#define SNDRV_PCM_FMTBIT_U20_3BE (1ULL << SNDRV_PCM_FORMAT_U20_3BE)
-#define SNDRV_PCM_FMTBIT_S18_3LE (1ULL << SNDRV_PCM_FORMAT_S18_3LE)
-#define SNDRV_PCM_FMTBIT_U18_3LE (1ULL << SNDRV_PCM_FORMAT_U18_3LE)
-#define SNDRV_PCM_FMTBIT_S18_3BE (1ULL << SNDRV_PCM_FORMAT_S18_3BE)
-#define SNDRV_PCM_FMTBIT_U18_3BE (1ULL << SNDRV_PCM_FORMAT_U18_3BE)
-#define SNDRV_PCM_FMTBIT_G723_24 (1ULL << SNDRV_PCM_FORMAT_G723_24)
-#define SNDRV_PCM_FMTBIT_G723_24_1B (1ULL << SNDRV_PCM_FORMAT_G723_24_1B)
-#define SNDRV_PCM_FMTBIT_G723_40 (1ULL << SNDRV_PCM_FORMAT_G723_40)
-#define SNDRV_PCM_FMTBIT_G723_40_1B (1ULL << SNDRV_PCM_FORMAT_G723_40_1B)
+#define _SNDRV_PCM_FMTBIT(fmt) (1ULL << (__force int)SNDRV_PCM_FORMAT_##fmt)
+#define SNDRV_PCM_FMTBIT_S8 _SNDRV_PCM_FMTBIT(S8)
+#define SNDRV_PCM_FMTBIT_U8 _SNDRV_PCM_FMTBIT(U8)
+#define SNDRV_PCM_FMTBIT_S16_LE _SNDRV_PCM_FMTBIT(S16_LE)
+#define SNDRV_PCM_FMTBIT_S16_BE _SNDRV_PCM_FMTBIT(S16_BE)
+#define SNDRV_PCM_FMTBIT_U16_LE _SNDRV_PCM_FMTBIT(U16_LE)
+#define SNDRV_PCM_FMTBIT_U16_BE _SNDRV_PCM_FMTBIT(U16_BE)
+#define SNDRV_PCM_FMTBIT_S24_LE _SNDRV_PCM_FMTBIT(S24_LE)
+#define SNDRV_PCM_FMTBIT_S24_BE _SNDRV_PCM_FMTBIT(S24_BE)
+#define SNDRV_PCM_FMTBIT_U24_LE _SNDRV_PCM_FMTBIT(U24_LE)
+#define SNDRV_PCM_FMTBIT_U24_BE _SNDRV_PCM_FMTBIT(U24_BE)
+#define SNDRV_PCM_FMTBIT_S32_LE _SNDRV_PCM_FMTBIT(S32_LE)
+#define SNDRV_PCM_FMTBIT_S32_BE _SNDRV_PCM_FMTBIT(S32_BE)
+#define SNDRV_PCM_FMTBIT_U32_LE _SNDRV_PCM_FMTBIT(U32_LE)
+#define SNDRV_PCM_FMTBIT_U32_BE _SNDRV_PCM_FMTBIT(U32_BE)
+#define SNDRV_PCM_FMTBIT_FLOAT_LE _SNDRV_PCM_FMTBIT(FLOAT_LE)
+#define SNDRV_PCM_FMTBIT_FLOAT_BE _SNDRV_PCM_FMTBIT(FLOAT_BE)
+#define SNDRV_PCM_FMTBIT_FLOAT64_LE _SNDRV_PCM_FMTBIT(FLOAT64_LE)
+#define SNDRV_PCM_FMTBIT_FLOAT64_BE _SNDRV_PCM_FMTBIT(FLOAT64_BE)
+#define SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE _SNDRV_PCM_FMTBIT(IEC958_SUBFRAME_LE)
+#define SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_BE _SNDRV_PCM_FMTBIT(IEC958_SUBFRAME_BE)
+#define SNDRV_PCM_FMTBIT_MU_LAW _SNDRV_PCM_FMTBIT(MU_LAW)
+#define SNDRV_PCM_FMTBIT_A_LAW _SNDRV_PCM_FMTBIT(A_LAW)
+#define SNDRV_PCM_FMTBIT_IMA_ADPCM _SNDRV_PCM_FMTBIT(IMA_ADPCM)
+#define SNDRV_PCM_FMTBIT_MPEG _SNDRV_PCM_FMTBIT(MPEG)
+#define SNDRV_PCM_FMTBIT_GSM _SNDRV_PCM_FMTBIT(GSM)
+#define SNDRV_PCM_FMTBIT_SPECIAL _SNDRV_PCM_FMTBIT(SPECIAL)
+#define SNDRV_PCM_FMTBIT_S24_3LE _SNDRV_PCM_FMTBIT(S24_3LE)
+#define SNDRV_PCM_FMTBIT_U24_3LE _SNDRV_PCM_FMTBIT(U24_3LE)
+#define SNDRV_PCM_FMTBIT_S24_3BE _SNDRV_PCM_FMTBIT(S24_3BE)
+#define SNDRV_PCM_FMTBIT_U24_3BE _SNDRV_PCM_FMTBIT(U24_3BE)
+#define SNDRV_PCM_FMTBIT_S20_3LE _SNDRV_PCM_FMTBIT(S20_3LE)
+#define SNDRV_PCM_FMTBIT_U20_3LE _SNDRV_PCM_FMTBIT(U20_3LE)
+#define SNDRV_PCM_FMTBIT_S20_3BE _SNDRV_PCM_FMTBIT(S20_3BE)
+#define SNDRV_PCM_FMTBIT_U20_3BE _SNDRV_PCM_FMTBIT(U20_3BE)
+#define SNDRV_PCM_FMTBIT_S18_3LE _SNDRV_PCM_FMTBIT(S18_3LE)
+#define SNDRV_PCM_FMTBIT_U18_3LE _SNDRV_PCM_FMTBIT(U18_3LE)
+#define SNDRV_PCM_FMTBIT_S18_3BE _SNDRV_PCM_FMTBIT(S18_3BE)
+#define SNDRV_PCM_FMTBIT_U18_3BE _SNDRV_PCM_FMTBIT(U18_3BE)
+#define SNDRV_PCM_FMTBIT_G723_24 _SNDRV_PCM_FMTBIT(G723_24)
+#define SNDRV_PCM_FMTBIT_G723_24_1B _SNDRV_PCM_FMTBIT(G723_24_1B)
+#define SNDRV_PCM_FMTBIT_G723_40 _SNDRV_PCM_FMTBIT(G723_40)
+#define SNDRV_PCM_FMTBIT_G723_40_1B _SNDRV_PCM_FMTBIT(G723_40_1B)
#ifdef SNDRV_LITTLE_ENDIAN
#define SNDRV_PCM_FMTBIT_S16 SNDRV_PCM_FMTBIT_S16_LE
@@ -490,7 +491,7 @@ int snd_pcm_info_user(struct snd_pcm_substream *substream,
int snd_pcm_status(struct snd_pcm_substream *substream,
struct snd_pcm_status *status);
int snd_pcm_start(struct snd_pcm_substream *substream);
-int snd_pcm_stop(struct snd_pcm_substream *substream, int status);
+int snd_pcm_stop(struct snd_pcm_substream *substream, snd_pcm_state_t status);
int snd_pcm_drain_done(struct snd_pcm_substream *substream);
#ifdef CONFIG_PM
int snd_pcm_suspend(struct snd_pcm_substream *substream);
@@ -748,8 +749,8 @@ static inline const struct snd_interval *hw_param_interval_c(const struct snd_pc
return &params->intervals[var - SNDRV_PCM_HW_PARAM_FIRST_INTERVAL];
}
-#define params_access(p) snd_mask_min(hw_param_mask((p), SNDRV_PCM_HW_PARAM_ACCESS))
-#define params_format(p) snd_mask_min(hw_param_mask((p), SNDRV_PCM_HW_PARAM_FORMAT))
+#define params_access(p) ((__force snd_pcm_access_t)snd_mask_min(hw_param_mask((p), SNDRV_PCM_HW_PARAM_ACCESS)))
+#define params_format(p) ((__force snd_pcm_format_t)snd_mask_min(hw_param_mask((p), SNDRV_PCM_HW_PARAM_FORMAT)))
#define params_subformat(p) snd_mask_min(hw_param_mask((p), SNDRV_PCM_HW_PARAM_SUBFORMAT))
#define params_channels(p) hw_param_interval((p), SNDRV_PCM_HW_PARAM_CHANNELS)->min
#define params_rate(p) hw_param_interval((p), SNDRV_PCM_HW_PARAM_RATE)->min
diff --git a/include/sound/sh_fsi.h b/include/sound/sh_fsi.h
index d79894192ae3..9a155f9d0a12 100644
--- a/include/sound/sh_fsi.h
+++ b/include/sound/sh_fsi.h
@@ -15,67 +15,29 @@
#define FSI_PORT_A 0
#define FSI_PORT_B 1
-/* flags format
-
- * 0xABCDEEFF
- *
- * A: channel size for TDM (input)
- * B: channel size for TDM (ooutput)
- * C: inversion
- * D: mode
- * E: input format
- * F: output format
- */
-
#include <linux/clk.h>
#include <sound/soc.h>
-/* TDM channel */
-#define SH_FSI_SET_CH_I(x) ((x & 0xF) << 28)
-#define SH_FSI_SET_CH_O(x) ((x & 0xF) << 24)
-
-#define SH_FSI_CH_IMASK 0xF0000000
-#define SH_FSI_CH_OMASK 0x0F000000
-#define SH_FSI_GET_CH_I(x) ((x & SH_FSI_CH_IMASK) >> 28)
-#define SH_FSI_GET_CH_O(x) ((x & SH_FSI_CH_OMASK) >> 24)
-
-/* clock inversion */
-#define SH_FSI_INVERSION_MASK 0x00F00000
-#define SH_FSI_LRM_INV (1 << 20)
-#define SH_FSI_BRM_INV (1 << 21)
-#define SH_FSI_LRS_INV (1 << 22)
-#define SH_FSI_BRS_INV (1 << 23)
-
-/* mode */
-#define SH_FSI_MODE_MASK 0x000F0000
-#define SH_FSI_IN_SLAVE_MODE (1 << 16) /* default master mode */
-#define SH_FSI_OUT_SLAVE_MODE (1 << 17) /* default master mode */
-
-/* DI format */
-#define SH_FSI_FMT_MASK 0x000000FF
-#define SH_FSI_IFMT(x) (((SH_FSI_FMT_ ## x) & SH_FSI_FMT_MASK) << 8)
-#define SH_FSI_OFMT(x) (((SH_FSI_FMT_ ## x) & SH_FSI_FMT_MASK) << 0)
-#define SH_FSI_GET_IFMT(x) ((x >> 8) & SH_FSI_FMT_MASK)
-#define SH_FSI_GET_OFMT(x) ((x >> 0) & SH_FSI_FMT_MASK)
-
-#define SH_FSI_FMT_MONO 0
-#define SH_FSI_FMT_MONO_DELAY 1
-#define SH_FSI_FMT_PCM 2
-#define SH_FSI_FMT_I2S 3
-#define SH_FSI_FMT_TDM 4
-#define SH_FSI_FMT_TDM_DELAY 5
-#define SH_FSI_FMT_SPDIF 6
-
-
-#define SH_FSI_IFMT_TDM_CH(x) \
- (SH_FSI_IFMT(TDM) | SH_FSI_SET_CH_I(x))
-#define SH_FSI_IFMT_TDM_DELAY_CH(x) \
- (SH_FSI_IFMT(TDM_DELAY) | SH_FSI_SET_CH_I(x))
+/*
+ * flags format
+ *
+ * 0x000000BA
+ *
+ * A: inversion
+ * B: format mode
+ */
-#define SH_FSI_OFMT_TDM_CH(x) \
- (SH_FSI_OFMT(TDM) | SH_FSI_SET_CH_O(x))
-#define SH_FSI_OFMT_TDM_DELAY_CH(x) \
- (SH_FSI_OFMT(TDM_DELAY) | SH_FSI_SET_CH_O(x))
+/* A: clock inversion */
+#define SH_FSI_INVERSION_MASK 0x0000000F
+#define SH_FSI_LRM_INV (1 << 0)
+#define SH_FSI_BRM_INV (1 << 1)
+#define SH_FSI_LRS_INV (1 << 2)
+#define SH_FSI_BRS_INV (1 << 3)
+
+/* B: format mode */
+#define SH_FSI_FMT_MASK 0x000000F0
+#define SH_FSI_FMT_DAI (0 << 4)
+#define SH_FSI_FMT_SPDIF (1 << 4)
/*
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 8031769ac485..979ed84e07d6 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -157,6 +157,18 @@
.invert = winvert, .kcontrols = wcontrols, .num_kcontrols = 1, \
.event = wevent, .event_flags = wflags}
+/* additional sequencing control within an event type */
+#define SND_SOC_DAPM_PGA_S(wname, wsubseq, wreg, wshift, winvert, \
+ wevent, wflags) \
+{ .id = snd_soc_dapm_pga, .name = wname, .reg = wreg, .shift = wshift, \
+ .invert = winvert, .event = wevent, .event_flags = wflags, \
+ .subseq = wsubseq}
+#define SND_SOC_DAPM_SUPPLY_S(wname, wsubseq, wreg, wshift, winvert, wevent, \
+ wflags) \
+{ .id = snd_soc_dapm_supply, .name = wname, .reg = wreg, \
+ .shift = wshift, .invert = winvert, .event = wevent, \
+ .event_flags = wflags, .subseq = wsubseq}
+
/* Simplified versions of above macros, assuming wncontrols = ARRAY_SIZE(wcontrols) */
#define SOC_PGA_E_ARRAY(wname, wreg, wshift, winvert, wcontrols, \
wevent, wflags) \
@@ -450,6 +462,7 @@ struct snd_soc_dapm_widget {
unsigned char ext:1; /* has external widgets */
unsigned char force:1; /* force state */
unsigned char ignore_suspend:1; /* kept enabled over suspend */
+ int subseq; /* sort within widget type */
int (*power_check)(struct snd_soc_dapm_widget *w);
@@ -487,6 +500,9 @@ struct snd_soc_dapm_context {
struct snd_soc_dapm_update *update;
+ void (*seq_notifier)(struct snd_soc_dapm_context *,
+ enum snd_soc_dapm_type, int);
+
struct device *dev; /* from parent - for debug */
struct snd_soc_codec *codec; /* parent codec */
struct snd_soc_card *card; /* parent card */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 74921f20a1d8..65d865f7e8c0 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -234,6 +234,7 @@ struct snd_soc_codec;
struct snd_soc_codec_driver;
struct soc_enum;
struct snd_soc_jack;
+struct snd_soc_jack_zone;
struct snd_soc_jack_pin;
struct snd_soc_cache_ops;
#include <sound/soc-dapm.h>
@@ -258,6 +259,11 @@ enum snd_soc_compress_type {
SND_SOC_RBTREE_COMPRESSION
};
+int snd_soc_register_card(struct snd_soc_card *card);
+int snd_soc_unregister_card(struct snd_soc_card *card);
+int snd_soc_suspend(struct device *dev);
+int snd_soc_resume(struct device *dev);
+int snd_soc_poweroff(struct device *dev);
int snd_soc_register_platform(struct device *dev,
struct snd_soc_platform_driver *platform_drv);
void snd_soc_unregister_platform(struct device *dev);
@@ -265,7 +271,8 @@ int snd_soc_register_codec(struct device *dev,
const struct snd_soc_codec_driver *codec_drv,
struct snd_soc_dai_driver *dai_drv, int num_dai);
void snd_soc_unregister_codec(struct device *dev);
-int snd_soc_codec_volatile_register(struct snd_soc_codec *codec, int reg);
+int snd_soc_codec_volatile_register(struct snd_soc_codec *codec,
+ unsigned int reg);
int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec,
int addr_bits, int data_bits,
enum snd_soc_control_type control);
@@ -276,6 +283,10 @@ int snd_soc_cache_write(struct snd_soc_codec *codec,
unsigned int reg, unsigned int value);
int snd_soc_cache_read(struct snd_soc_codec *codec,
unsigned int reg, unsigned int *value);
+int snd_soc_default_volatile_register(struct snd_soc_codec *codec,
+ unsigned int reg);
+int snd_soc_default_readable_register(struct snd_soc_codec *codec,
+ unsigned int reg);
/* Utility functions to get clock rates from various things */
int snd_soc_calc_frame_size(int sample_size, int channels, int tdm_slots);
@@ -297,6 +308,9 @@ void snd_soc_jack_notifier_register(struct snd_soc_jack *jack,
struct notifier_block *nb);
void snd_soc_jack_notifier_unregister(struct snd_soc_jack *jack,
struct notifier_block *nb);
+int snd_soc_jack_add_zones(struct snd_soc_jack *jack, int count,
+ struct snd_soc_jack_zone *zones);
+int snd_soc_jack_get_type(struct snd_soc_jack *jack, int micbias_voltage);
#ifdef CONFIG_GPIOLIB
int snd_soc_jack_add_gpios(struct snd_soc_jack *jack, int count,
struct snd_soc_jack_gpio *gpios);
@@ -367,6 +381,22 @@ int snd_soc_put_volsw_2r_sx(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
/**
+ * struct snd_soc_reg_access - Describes whether a given register is
+ * readable, writable or volatile.
+ *
+ * @reg: the register number
+ * @read: whether this register is readable
+ * @write: whether this register is writable
+ * @vol: whether this register is volatile
+ */
+struct snd_soc_reg_access {
+ u16 reg;
+ u16 read;
+ u16 write;
+ u16 vol;
+};
+
+/**
* struct snd_soc_jack_pin - Describes a pin to update based on jack detection
*
* @pin: name of the pin to update
@@ -381,6 +411,24 @@ struct snd_soc_jack_pin {
};
/**
+ * struct snd_soc_jack_zone - Describes voltage zones of jack detection
+ *
+ * @min_mv: start voltage in mv
+ * @max_mv: end voltage in mv
+ * @jack_type: type of jack that is expected for this voltage
+ * @debounce_time: debounce_time for jack, codec driver should wait for this
+ * duration before reading the adc for voltages
+ * @:list: list container
+ */
+struct snd_soc_jack_zone {
+ unsigned int min_mv;
+ unsigned int max_mv;
+ unsigned int jack_type;
+ unsigned int debounce_time;
+ struct list_head list;
+};
+
+/**
* struct snd_soc_jack_gpio - Describes a gpio pin for jack detection
*
* @gpio: gpio number
@@ -388,6 +436,10 @@ struct snd_soc_jack_pin {
* @report: value to report when jack detected
* @invert: report presence in low state
* @debouce_time: debouce time in ms
+ * @wake: enable as wake source
+ * @jack_status_check: callback function which overrides the detection
+ * to provide more complex checks (eg, reading an
+ * ADC).
*/
#ifdef CONFIG_GPIOLIB
struct snd_soc_jack_gpio {
@@ -396,6 +448,8 @@ struct snd_soc_jack_gpio {
int report;
int invert;
int debounce_time;
+ bool wake;
+
struct snd_soc_jack *jack;
struct delayed_work work;
@@ -409,6 +463,7 @@ struct snd_soc_jack {
struct list_head pins;
int status;
struct blocking_notifier_head notifier;
+ struct list_head jack_zones;
};
/* SoC PCM stream information */
@@ -459,18 +514,22 @@ struct snd_soc_codec {
struct list_head card_list;
int num_dai;
enum snd_soc_compress_type compress_type;
+ size_t reg_size; /* reg_cache_size * reg_word_size */
+ int (*volatile_register)(struct snd_soc_codec *, unsigned int);
+ int (*readable_register)(struct snd_soc_codec *, unsigned int);
/* runtime */
struct snd_ac97 *ac97; /* for ad-hoc ac97 devices */
unsigned int active;
- unsigned int cache_only:1; /* Suppress writes to hardware */
- unsigned int cache_sync:1; /* Cache needs to be synced to hardware */
+ unsigned int cache_bypass:1; /* Suppress access to the cache */
unsigned int suspended:1; /* Codec is in suspend PM state */
unsigned int probed:1; /* Codec has been probed */
unsigned int ac97_registered:1; /* Codec has been AC97 registered */
unsigned int ac97_created:1; /* Codec has been created by SoC */
unsigned int sysfs_registered:1; /* codec has been sysfs registered */
unsigned int cache_init:1; /* codec cache has been initialized */
+ u32 cache_only; /* Suppress writes to hardware */
+ u32 cache_sync; /* Cache needs to be synced to hardware */
/* codec IO */
void *control_data; /* codec control (i2c/3wire) data */
@@ -508,17 +567,22 @@ struct snd_soc_codec_driver {
int (*write)(struct snd_soc_codec *, unsigned int, unsigned int);
int (*display_register)(struct snd_soc_codec *, char *,
size_t, unsigned int);
- int (*volatile_register)(unsigned int);
- int (*readable_register)(unsigned int);
+ int (*volatile_register)(struct snd_soc_codec *, unsigned int);
+ int (*readable_register)(struct snd_soc_codec *, unsigned int);
short reg_cache_size;
short reg_cache_step;
short reg_word_size;
const void *reg_cache_default;
+ short reg_access_size;
+ const struct snd_soc_reg_access *reg_access_default;
enum snd_soc_compress_type compress_type;
/* codec bias level */
int (*set_bias_level)(struct snd_soc_codec *,
enum snd_soc_bias_level level);
+
+ void (*seq_notifier)(struct snd_soc_dapm_context *,
+ enum snd_soc_dapm_type, int);
};
/* SoC platform interface */
@@ -617,15 +681,15 @@ struct snd_soc_card {
bool instantiated;
- int (*probe)(struct platform_device *pdev);
- int (*remove)(struct platform_device *pdev);
+ int (*probe)(struct snd_soc_card *card);
+ int (*remove)(struct snd_soc_card *card);
/* the pre and post PM functions are used to do any PM work before and
* after the codec and DAI's do any PM work. */
- int (*suspend_pre)(struct platform_device *pdev, pm_message_t state);
- int (*suspend_post)(struct platform_device *pdev, pm_message_t state);
- int (*resume_pre)(struct platform_device *pdev);
- int (*resume_post)(struct platform_device *pdev);
+ int (*suspend_pre)(struct snd_soc_card *card);
+ int (*suspend_post)(struct snd_soc_card *card);
+ int (*resume_pre)(struct snd_soc_card *card);
+ int (*resume_post)(struct snd_soc_card *card);
/* callbacks */
int (*set_bias_level)(struct snd_soc_card *,
@@ -670,6 +734,8 @@ struct snd_soc_card {
struct dentry *debugfs_pop_time;
#endif
u32 pop_time;
+
+ void *drvdata;
};
/* SoC machine DAI configuration, glues a codec and cpu DAI together */
@@ -721,6 +787,17 @@ unsigned int snd_soc_write(struct snd_soc_codec *codec,
/* device driver data */
+static inline void snd_soc_card_set_drvdata(struct snd_soc_card *card,
+ void *data)
+{
+ card->drvdata = data;
+}
+
+static inline void *snd_soc_card_get_drvdata(struct snd_soc_card *card)
+{
+ return card->drvdata;
+}
+
static inline void snd_soc_codec_set_drvdata(struct snd_soc_codec *codec,
void *data)
{
@@ -754,6 +831,22 @@ static inline void *snd_soc_pcm_get_drvdata(struct snd_soc_pcm_runtime *rtd)
return dev_get_drvdata(&rtd->dev);
}
+static inline void snd_soc_initialize_card_lists(struct snd_soc_card *card)
+{
+ INIT_LIST_HEAD(&card->dai_dev_list);
+ INIT_LIST_HEAD(&card->codec_dev_list);
+ INIT_LIST_HEAD(&card->platform_dev_list);
+ INIT_LIST_HEAD(&card->widgets);
+ INIT_LIST_HEAD(&card->paths);
+ INIT_LIST_HEAD(&card->dapm_list);
+}
+
#include <sound/soc-dai.h>
+#ifdef CONFIG_DEBUG_FS
+extern struct dentry *snd_soc_debugfs_root;
+#endif
+
+extern const struct dev_pm_ops snd_soc_pm_ops;
+
#endif
diff --git a/include/sound/version.h b/include/sound/version.h
index bf69a5b7e65f..8fc5321e1ecc 100644
--- a/include/sound/version.h
+++ b/include/sound/version.h
@@ -1,3 +1,3 @@
/* include/version.h */
-#define CONFIG_SND_VERSION "1.0.23"
+#define CONFIG_SND_VERSION "1.0.24"
#define CONFIG_SND_DATE ""
diff --git a/include/sound/wm8903.h b/include/sound/wm8903.h
index b4a0db2307ef..cf7ccb76a8de 100644
--- a/include/sound/wm8903.h
+++ b/include/sound/wm8903.h
@@ -17,13 +17,9 @@
/*
* R6 (0x06) - Mic Bias Control 0
*/
-#define WM8903_MICDET_HYST_ENA 0x0080 /* MICDET_HYST_ENA */
-#define WM8903_MICDET_HYST_ENA_MASK 0x0080 /* MICDET_HYST_ENA */
-#define WM8903_MICDET_HYST_ENA_SHIFT 7 /* MICDET_HYST_ENA */
-#define WM8903_MICDET_HYST_ENA_WIDTH 1 /* MICDET_HYST_ENA */
-#define WM8903_MICDET_THR_MASK 0x0070 /* MICDET_THR - [6:4] */
-#define WM8903_MICDET_THR_SHIFT 4 /* MICDET_THR - [6:4] */
-#define WM8903_MICDET_THR_WIDTH 3 /* MICDET_THR - [6:4] */
+#define WM8903_MICDET_THR_MASK 0x0030 /* MICDET_THR - [5:4] */
+#define WM8903_MICDET_THR_SHIFT 4 /* MICDET_THR - [5:4] */
+#define WM8903_MICDET_THR_WIDTH 2 /* MICDET_THR - [5:4] */
#define WM8903_MICSHORT_THR_MASK 0x000C /* MICSHORT_THR - [3:2] */
#define WM8903_MICSHORT_THR_SHIFT 2 /* MICSHORT_THR - [3:2] */
#define WM8903_MICSHORT_THR_WIDTH 2 /* MICSHORT_THR - [3:2] */
@@ -37,6 +33,21 @@
#define WM8903_MICBIAS_ENA_WIDTH 1 /* MICBIAS_ENA */
/*
+ * WM8903_GPn_FN values
+ *
+ * See datasheets for list of valid values per pin
+ */
+#define WM8903_GPn_FN_GPIO_OUTPUT 0
+#define WM8903_GPn_FN_BCLK 1
+#define WM8903_GPn_FN_IRQ_OUTPT 2
+#define WM8903_GPn_FN_GPIO_INPUT 3
+#define WM8903_GPn_FN_MICBIAS_CURRENT_DETECT 4
+#define WM8903_GPn_FN_MICBIAS_SHORT_DETECT 5
+#define WM8903_GPn_FN_DMIC_LR_CLK_OUTPUT 6
+#define WM8903_GPn_FN_FLL_LOCK_OUTPUT 8
+#define WM8903_GPn_FN_FLL_CLOCK_OUTPUT 9
+
+/*
* R116 (0x74) - GPIO Control 1
*/
#define WM8903_GP1_FN_MASK 0x1F00 /* GP1_FN - [12:8] */
@@ -231,6 +242,8 @@
#define WM8903_GP5_DB_SHIFT 0 /* GP5_DB */
#define WM8903_GP5_DB_WIDTH 1 /* GP5_DB */
+#define WM8903_NUM_GPIO 5
+
struct wm8903_platform_data {
bool irq_active_low; /* Set if IRQ active low, default high */
@@ -243,7 +256,8 @@ struct wm8903_platform_data {
int micdet_delay; /* Delay after microphone detection (ms) */
- u32 gpio_cfg[5]; /* Default register values for GPIO pin mux */
+ int gpio_base;
+ u32 gpio_cfg[WM8903_NUM_GPIO]; /* Default register values for GPIO pin mux */
};
#endif
diff --git a/include/sound/wm9081.h b/include/sound/wm9081.h
index e173ddbf6bd4..f34b0b1716d8 100644
--- a/include/sound/wm9081.h
+++ b/include/sound/wm9081.h
@@ -17,9 +17,12 @@ struct wm9081_retune_mobile_setting {
u16 config[20];
};
-struct wm9081_retune_mobile_config {
- struct wm9081_retune_mobile_setting *configs;
- int num_configs;
+struct wm9081_pdata {
+ bool irq_high; /* IRQ is active high */
+ bool irq_cmos; /* IRQ is in CMOS mode */
+
+ struct wm9081_retune_mobile_setting *retune_configs;
+ int num_retune_configs;
};
#endif
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
index 246940511579..2e8ec51f0615 100644
--- a/include/target/target_core_transport.h
+++ b/include/target/target_core_transport.h
@@ -135,6 +135,8 @@ extern void transport_complete_task(struct se_task *, int);
extern void transport_add_task_to_execute_queue(struct se_task *,
struct se_task *,
struct se_device *);
+extern void transport_remove_task_from_execute_queue(struct se_task *,
+ struct se_device *);
unsigned char *transport_dump_cmd_direction(struct se_cmd *);
extern void transport_dump_dev_state(struct se_device *, char *, int *);
extern void transport_dump_dev_info(struct se_device *, struct se_lun *,
diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h
index 186e84db4b54..ae973d2e27a1 100644
--- a/include/trace/events/asoc.h
+++ b/include/trace/events/asoc.h
@@ -229,6 +229,31 @@ TRACE_EVENT(snd_soc_jack_notify,
TP_printk("jack=%s %x", __get_str(name), (int)__entry->val)
);
+TRACE_EVENT(snd_soc_cache_sync,
+
+ TP_PROTO(struct snd_soc_codec *codec, const char *type,
+ const char *status),
+
+ TP_ARGS(codec, type, status),
+
+ TP_STRUCT__entry(
+ __string( name, codec->name )
+ __string( status, status )
+ __string( type, type )
+ __field( int, id )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, codec->name);
+ __assign_str(status, status);
+ __assign_str(type, type);
+ __entry->id = codec->id;
+ ),
+
+ TP_printk("codec=%s.%d type=%s status=%s", __get_str(name),
+ (int)__entry->id, __get_str(type), __get_str(status))
+);
+
#endif /* _TRACE_ASOC_H */
/* This part must be outside protection */
diff --git a/include/xen/events.h b/include/xen/events.h
index 00f53ddcc062..d3b9010ee96a 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -23,6 +23,12 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
unsigned long irqflags,
const char *devname,
void *dev_id);
+int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
+ unsigned int remote_port,
+ irq_handler_t handler,
+ unsigned long irqflags,
+ const char *devname,
+ void *dev_id);
/*
* Common unbind function for all event sources. Takes IRQ to unbind from.
@@ -75,11 +81,9 @@ int xen_allocate_pirq(unsigned gsi, int shareable, char *name);
int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name);
#ifdef CONFIG_PCI_MSI
-/* Allocate an irq and a pirq to be used with MSIs. */
-#define XEN_ALLOC_PIRQ (1 << 0)
-#define XEN_ALLOC_IRQ (1 << 1)
-void xen_allocate_pirq_msi(char *name, int *irq, int *pirq, int alloc_mask);
-int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int type);
+int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc);
+int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
+ int pirq, int vector, const char *name);
#endif
/* De-allocates the above mentioned physical interrupt. */
diff --git a/include/xen/gntalloc.h b/include/xen/gntalloc.h
new file mode 100644
index 000000000000..76bd58065f4f
--- /dev/null
+++ b/include/xen/gntalloc.h
@@ -0,0 +1,82 @@
+/******************************************************************************
+ * gntalloc.h
+ *
+ * Interface to /dev/xen/gntalloc.
+ *
+ * Author: Daniel De Graaf <dgdegra@tycho.nsa.gov>
+ *
+ * This file is in the public domain.
+ */
+
+#ifndef __LINUX_PUBLIC_GNTALLOC_H__
+#define __LINUX_PUBLIC_GNTALLOC_H__
+
+/*
+ * Allocates a new page and creates a new grant reference.
+ */
+#define IOCTL_GNTALLOC_ALLOC_GREF \
+_IOC(_IOC_NONE, 'G', 5, sizeof(struct ioctl_gntalloc_alloc_gref))
+struct ioctl_gntalloc_alloc_gref {
+ /* IN parameters */
+ /* The ID of the domain to be given access to the grants. */
+ uint16_t domid;
+ /* Flags for this mapping */
+ uint16_t flags;
+ /* Number of pages to map */
+ uint32_t count;
+ /* OUT parameters */
+ /* The offset to be used on a subsequent call to mmap(). */
+ uint64_t index;
+ /* The grant references of the newly created grant, one per page */
+ /* Variable size, depending on count */
+ uint32_t gref_ids[1];
+};
+
+#define GNTALLOC_FLAG_WRITABLE 1
+
+/*
+ * Deallocates the grant reference, allowing the associated page to be freed if
+ * no other domains are using it.
+ */
+#define IOCTL_GNTALLOC_DEALLOC_GREF \
+_IOC(_IOC_NONE, 'G', 6, sizeof(struct ioctl_gntalloc_dealloc_gref))
+struct ioctl_gntalloc_dealloc_gref {
+ /* IN parameters */
+ /* The offset returned in the map operation */
+ uint64_t index;
+ /* Number of references to unmap */
+ uint32_t count;
+};
+
+/*
+ * Sets up an unmap notification within the page, so that the other side can do
+ * cleanup if this side crashes. Required to implement cross-domain robust
+ * mutexes or close notification on communication channels.
+ *
+ * Each mapped page only supports one notification; multiple calls referring to
+ * the same page overwrite the previous notification. You must clear the
+ * notification prior to the IOCTL_GNTALLOC_DEALLOC_GREF if you do not want it
+ * to occur.
+ */
+#define IOCTL_GNTALLOC_SET_UNMAP_NOTIFY \
+_IOC(_IOC_NONE, 'G', 7, sizeof(struct ioctl_gntalloc_unmap_notify))
+struct ioctl_gntalloc_unmap_notify {
+ /* IN parameters */
+ /* Offset in the file descriptor for a byte within the page (same as
+ * used in mmap). If using UNMAP_NOTIFY_CLEAR_BYTE, this is the byte to
+ * be cleared. Otherwise, it can be any byte in the page whose
+ * notification we are adjusting.
+ */
+ uint64_t index;
+ /* Action(s) to take on unmap */
+ uint32_t action;
+ /* Event channel to notify */
+ uint32_t event_channel_port;
+};
+
+/* Clear (set to zero) the byte specified by index */
+#define UNMAP_NOTIFY_CLEAR_BYTE 0x1
+/* Send an interrupt on the indicated event channel */
+#define UNMAP_NOTIFY_SEND_EVENT 0x2
+
+#endif /* __LINUX_PUBLIC_GNTALLOC_H__ */
diff --git a/include/xen/gntdev.h b/include/xen/gntdev.h
index eb23f4188f5a..5304bd3c84c5 100644
--- a/include/xen/gntdev.h
+++ b/include/xen/gntdev.h
@@ -116,4 +116,35 @@ struct ioctl_gntdev_set_max_grants {
uint32_t count;
};
+/*
+ * Sets up an unmap notification within the page, so that the other side can do
+ * cleanup if this side crashes. Required to implement cross-domain robust
+ * mutexes or close notification on communication channels.
+ *
+ * Each mapped page only supports one notification; multiple calls referring to
+ * the same page overwrite the previous notification. You must clear the
+ * notification prior to the IOCTL_GNTALLOC_DEALLOC_GREF if you do not want it
+ * to occur.
+ */
+#define IOCTL_GNTDEV_SET_UNMAP_NOTIFY \
+_IOC(_IOC_NONE, 'G', 7, sizeof(struct ioctl_gntdev_unmap_notify))
+struct ioctl_gntdev_unmap_notify {
+ /* IN parameters */
+ /* Offset in the file descriptor for a byte within the page (same as
+ * used in mmap). If using UNMAP_NOTIFY_CLEAR_BYTE, this is the byte to
+ * be cleared. Otherwise, it can be any byte in the page whose
+ * notification we are adjusting.
+ */
+ uint64_t index;
+ /* Action(s) to take on unmap */
+ uint32_t action;
+ /* Event channel to notify */
+ uint32_t event_channel_port;
+};
+
+/* Clear (set to zero) the byte specified by index */
+#define UNMAP_NOTIFY_CLEAR_BYTE 0x1
+/* Send an interrupt on the indicated event channel */
+#define UNMAP_NOTIFY_SEND_EVENT 0x2
+
#endif /* __LINUX_PUBLIC_GNTDEV_H__ */
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index c2d1fa4dc1ee..61e523af3c46 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -51,11 +51,7 @@ typedef uint64_t blkif_sector_t;
*/
#define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
-struct blkif_request {
- uint8_t operation; /* BLKIF_OP_??? */
- uint8_t nr_segments; /* number of segments */
- blkif_vdev_t handle; /* only for read/write requests */
- uint64_t id; /* private guest value, echoed in resp */
+struct blkif_request_rw {
blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
struct blkif_request_segment {
grant_ref_t gref; /* reference to I/O buffer frame */
@@ -65,6 +61,16 @@ struct blkif_request {
} seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
};
+struct blkif_request {
+ uint8_t operation; /* BLKIF_OP_??? */
+ uint8_t nr_segments; /* number of segments */
+ blkif_vdev_t handle; /* only for read/write requests */
+ uint64_t id; /* private guest value, echoed in resp */
+ union {
+ struct blkif_request_rw rw;
+ } u;
+};
+
struct blkif_response {
uint64_t id; /* copied from request */
uint8_t operation; /* copied from request */
@@ -91,4 +97,25 @@ DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response);
#define VDISK_REMOVABLE 0x2
#define VDISK_READONLY 0x4
+/* Xen-defined major numbers for virtual disks, they look strangely
+ * familiar */
+#define XEN_IDE0_MAJOR 3
+#define XEN_IDE1_MAJOR 22
+#define XEN_SCSI_DISK0_MAJOR 8
+#define XEN_SCSI_DISK1_MAJOR 65
+#define XEN_SCSI_DISK2_MAJOR 66
+#define XEN_SCSI_DISK3_MAJOR 67
+#define XEN_SCSI_DISK4_MAJOR 68
+#define XEN_SCSI_DISK5_MAJOR 69
+#define XEN_SCSI_DISK6_MAJOR 70
+#define XEN_SCSI_DISK7_MAJOR 71
+#define XEN_SCSI_DISK8_MAJOR 128
+#define XEN_SCSI_DISK9_MAJOR 129
+#define XEN_SCSI_DISK10_MAJOR 130
+#define XEN_SCSI_DISK11_MAJOR 131
+#define XEN_SCSI_DISK12_MAJOR 132
+#define XEN_SCSI_DISK13_MAJOR 133
+#define XEN_SCSI_DISK14_MAJOR 134
+#define XEN_SCSI_DISK15_MAJOR 135
+
#endif /* __XEN_PUBLIC_IO_BLKIF_H__ */
diff --git a/include/xen/interface/sched.h b/include/xen/interface/sched.h
index 5fec575a800a..dd55dac340de 100644
--- a/include/xen/interface/sched.h
+++ b/include/xen/interface/sched.h
@@ -65,6 +65,39 @@ struct sched_poll {
DEFINE_GUEST_HANDLE_STRUCT(sched_poll);
/*
+ * Declare a shutdown for another domain. The main use of this function is
+ * in interpreting shutdown requests and reasons for fully-virtualized
+ * domains. A para-virtualized domain may use SCHEDOP_shutdown directly.
+ * @arg == pointer to sched_remote_shutdown structure.
+ */
+#define SCHEDOP_remote_shutdown 4
+struct sched_remote_shutdown {
+ domid_t domain_id; /* Remote domain ID */
+ unsigned int reason; /* SHUTDOWN_xxx reason */
+};
+
+/*
+ * Latch a shutdown code, so that when the domain later shuts down it
+ * reports this code to the control tools.
+ * @arg == as for SCHEDOP_shutdown.
+ */
+#define SCHEDOP_shutdown_code 5
+
+/*
+ * Setup, poke and destroy a domain watchdog timer.
+ * @arg == pointer to sched_watchdog structure.
+ * With id == 0, setup a domain watchdog timer to cause domain shutdown
+ * after timeout, returns watchdog id.
+ * With id != 0 and timeout == 0, destroy domain watchdog timer.
+ * With id != 0 and timeout != 0, poke watchdog timer and set new timeout.
+ */
+#define SCHEDOP_watchdog 6
+struct sched_watchdog {
+ uint32_t id; /* watchdog ID */
+ uint32_t timeout; /* timeout */
+};
+
+/*
* Reason codes for SCHEDOP_shutdown. These may be interpreted by control
* software to determine the appropriate action. For the most part, Xen does
* not care about the shutdown code.
@@ -73,5 +106,6 @@ DEFINE_GUEST_HANDLE_STRUCT(sched_poll);
#define SHUTDOWN_reboot 1 /* Clean up, kill, and then restart. */
#define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */
#define SHUTDOWN_crash 3 /* Tell controller we've crashed. */
+#define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */
#endif /* __XEN_PUBLIC_SCHED_H__ */
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
index 2befa3e2f1bc..b33257bc7e83 100644
--- a/include/xen/interface/xen.h
+++ b/include/xen/interface/xen.h
@@ -30,7 +30,7 @@
#define __HYPERVISOR_stack_switch 3
#define __HYPERVISOR_set_callbacks 4
#define __HYPERVISOR_fpu_taskswitch 5
-#define __HYPERVISOR_sched_op 6
+#define __HYPERVISOR_sched_op_compat 6
#define __HYPERVISOR_dom0_op 7
#define __HYPERVISOR_set_debugreg 8
#define __HYPERVISOR_get_debugreg 9
@@ -52,7 +52,7 @@
#define __HYPERVISOR_mmuext_op 26
#define __HYPERVISOR_acm_op 27
#define __HYPERVISOR_nmi_op 28
-#define __HYPERVISOR_sched_op_new 29
+#define __HYPERVISOR_sched_op 29
#define __HYPERVISOR_callback_op 30
#define __HYPERVISOR_xenoprof_op 31
#define __HYPERVISOR_event_channel_op 32
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 98b92154a264..03c85d7387fb 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -5,9 +5,9 @@
DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
-void xen_pre_suspend(void);
-void xen_post_suspend(int suspend_cancelled);
-void xen_hvm_post_suspend(int suspend_cancelled);
+void xen_arch_pre_suspend(void);
+void xen_arch_post_suspend(int suspend_cancelled);
+void xen_arch_hvm_post_suspend(int suspend_cancelled);
void xen_mm_pin_all(void);
void xen_mm_unpin_all(void);
diff --git a/init/Kconfig b/init/Kconfig
index be788c0957d4..8fe6f268de12 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -683,6 +683,16 @@ config CGROUP_MEM_RES_CTLR_SWAP_ENABLED
select this option (if, for some reason, they need to disable it
then noswapaccount does the trick).
+config CGROUP_PERF
+ bool "Enable perf_event per-cpu per-container group (cgroup) monitoring"
+ depends on PERF_EVENTS && CGROUPS
+ help
+ This option extends the per-cpu mode to restrict monitoring to
+ threads which belong to the cgroup specificied and run on the
+ designated cpu.
+
+ Say N if unsure.
+
menuconfig CGROUP_SCHED
bool "Group CPU scheduler"
depends on EXPERIMENTAL
@@ -728,9 +738,9 @@ config BLK_CGROUP
This option only enables generic Block IO controller infrastructure.
One needs to also enable actual IO controlling logic/policy. For
- enabling proportional weight division of disk bandwidth in CFQ seti
- CONFIG_CFQ_GROUP_IOSCHED=y and for enabling throttling policy set
- CONFIG_BLK_THROTTLE=y.
+ enabling proportional weight division of disk bandwidth in CFQ, set
+ CONFIG_CFQ_GROUP_IOSCHED=y; for enabling throttling policy, set
+ CONFIG_BLK_DEV_THROTTLING=y.
See Documentation/cgroups/blkio-controller.txt for more information.
diff --git a/kernel/audit.c b/kernel/audit.c
index e4956244ae50..162e88e33bc9 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -74,6 +74,8 @@ static int audit_initialized;
int audit_enabled;
int audit_ever_enabled;
+EXPORT_SYMBOL_GPL(audit_enabled);
+
/* Default state when kernel boots without any parameters. */
static int audit_default;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index b24d7027b83c..95362d15128c 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -4230,20 +4230,8 @@ void cgroup_post_fork(struct task_struct *child)
*/
void cgroup_exit(struct task_struct *tsk, int run_callbacks)
{
- int i;
struct css_set *cg;
-
- if (run_callbacks && need_forkexit_callback) {
- /*
- * modular subsystems can't use callbacks, so no need to lock
- * the subsys array
- */
- for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
- struct cgroup_subsys *ss = subsys[i];
- if (ss->exit)
- ss->exit(ss, tsk);
- }
- }
+ int i;
/*
* Unlink from the css_set task list if necessary.
@@ -4261,7 +4249,24 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
task_lock(tsk);
cg = tsk->cgroups;
tsk->cgroups = &init_css_set;
+
+ if (run_callbacks && need_forkexit_callback) {
+ /*
+ * modular subsystems can't use callbacks, so no need to lock
+ * the subsys array
+ */
+ for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
+ struct cgroup_subsys *ss = subsys[i];
+ if (ss->exit) {
+ struct cgroup *old_cgrp =
+ rcu_dereference_raw(cg->subsys[i])->cgroup;
+ struct cgroup *cgrp = task_cgroup(tsk, i);
+ ss->exit(ss, cgrp, old_cgrp, tsk);
+ }
+ }
+ }
task_unlock(tsk);
+
if (cg)
put_css_set_taskexit(cg);
}
@@ -4813,6 +4818,29 @@ css_get_next(struct cgroup_subsys *ss, int id,
return ret;
}
+/*
+ * get corresponding css from file open on cgroupfs directory
+ */
+struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id)
+{
+ struct cgroup *cgrp;
+ struct inode *inode;
+ struct cgroup_subsys_state *css;
+
+ inode = f->f_dentry->d_inode;
+ /* check in cgroup filesystem dir */
+ if (inode->i_op != &cgroup_dir_inode_operations)
+ return ERR_PTR(-EBADF);
+
+ if (id < 0 || id >= CGROUP_SUBSYS_COUNT)
+ return ERR_PTR(-EINVAL);
+
+ /* get cgroup */
+ cgrp = __d_cgrp(f->f_dentry);
+ css = cgrp->subsys[id];
+ return css ? css : ERR_PTR(-ENOENT);
+}
+
#ifdef CONFIG_CGROUP_DEBUG
static struct cgroup_subsys_state *debug_create(struct cgroup_subsys *ss,
struct cgroup *cont)
diff --git a/kernel/compat.c b/kernel/compat.c
index c9e2ec0b34a8..38b1d2c1cbe8 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -52,6 +52,64 @@ static int compat_put_timeval(struct compat_timeval __user *o,
put_user(i->tv_usec, &o->tv_usec)) ? -EFAULT : 0;
}
+static int compat_get_timex(struct timex *txc, struct compat_timex __user *utp)
+{
+ memset(txc, 0, sizeof(struct timex));
+
+ if (!access_ok(VERIFY_READ, utp, sizeof(struct compat_timex)) ||
+ __get_user(txc->modes, &utp->modes) ||
+ __get_user(txc->offset, &utp->offset) ||
+ __get_user(txc->freq, &utp->freq) ||
+ __get_user(txc->maxerror, &utp->maxerror) ||
+ __get_user(txc->esterror, &utp->esterror) ||
+ __get_user(txc->status, &utp->status) ||
+ __get_user(txc->constant, &utp->constant) ||
+ __get_user(txc->precision, &utp->precision) ||
+ __get_user(txc->tolerance, &utp->tolerance) ||
+ __get_user(txc->time.tv_sec, &utp->time.tv_sec) ||
+ __get_user(txc->time.tv_usec, &utp->time.tv_usec) ||
+ __get_user(txc->tick, &utp->tick) ||
+ __get_user(txc->ppsfreq, &utp->ppsfreq) ||
+ __get_user(txc->jitter, &utp->jitter) ||
+ __get_user(txc->shift, &utp->shift) ||
+ __get_user(txc->stabil, &utp->stabil) ||
+ __get_user(txc->jitcnt, &utp->jitcnt) ||
+ __get_user(txc->calcnt, &utp->calcnt) ||
+ __get_user(txc->errcnt, &utp->errcnt) ||
+ __get_user(txc->stbcnt, &utp->stbcnt))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int compat_put_timex(struct compat_timex __user *utp, struct timex *txc)
+{
+ if (!access_ok(VERIFY_WRITE, utp, sizeof(struct compat_timex)) ||
+ __put_user(txc->modes, &utp->modes) ||
+ __put_user(txc->offset, &utp->offset) ||
+ __put_user(txc->freq, &utp->freq) ||
+ __put_user(txc->maxerror, &utp->maxerror) ||
+ __put_user(txc->esterror, &utp->esterror) ||
+ __put_user(txc->status, &utp->status) ||
+ __put_user(txc->constant, &utp->constant) ||
+ __put_user(txc->precision, &utp->precision) ||
+ __put_user(txc->tolerance, &utp->tolerance) ||
+ __put_user(txc->time.tv_sec, &utp->time.tv_sec) ||
+ __put_user(txc->time.tv_usec, &utp->time.tv_usec) ||
+ __put_user(txc->tick, &utp->tick) ||
+ __put_user(txc->ppsfreq, &utp->ppsfreq) ||
+ __put_user(txc->jitter, &utp->jitter) ||
+ __put_user(txc->shift, &utp->shift) ||
+ __put_user(txc->stabil, &utp->stabil) ||
+ __put_user(txc->jitcnt, &utp->jitcnt) ||
+ __put_user(txc->calcnt, &utp->calcnt) ||
+ __put_user(txc->errcnt, &utp->errcnt) ||
+ __put_user(txc->stbcnt, &utp->stbcnt) ||
+ __put_user(txc->tai, &utp->tai))
+ return -EFAULT;
+ return 0;
+}
+
asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv,
struct timezone __user *tz)
{
@@ -617,6 +675,29 @@ long compat_sys_clock_gettime(clockid_t which_clock,
return err;
}
+long compat_sys_clock_adjtime(clockid_t which_clock,
+ struct compat_timex __user *utp)
+{
+ struct timex txc;
+ mm_segment_t oldfs;
+ int err, ret;
+
+ err = compat_get_timex(&txc, utp);
+ if (err)
+ return err;
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
+ set_fs(oldfs);
+
+ err = compat_put_timex(utp, &txc);
+ if (err)
+ return err;
+
+ return ret;
+}
+
long compat_sys_clock_getres(clockid_t which_clock,
struct compat_timespec __user *tp)
{
@@ -951,58 +1032,17 @@ asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset, compat
asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp)
{
struct timex txc;
- int ret;
-
- memset(&txc, 0, sizeof(struct timex));
+ int err, ret;
- if (!access_ok(VERIFY_READ, utp, sizeof(struct compat_timex)) ||
- __get_user(txc.modes, &utp->modes) ||
- __get_user(txc.offset, &utp->offset) ||
- __get_user(txc.freq, &utp->freq) ||
- __get_user(txc.maxerror, &utp->maxerror) ||
- __get_user(txc.esterror, &utp->esterror) ||
- __get_user(txc.status, &utp->status) ||
- __get_user(txc.constant, &utp->constant) ||
- __get_user(txc.precision, &utp->precision) ||
- __get_user(txc.tolerance, &utp->tolerance) ||
- __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
- __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
- __get_user(txc.tick, &utp->tick) ||
- __get_user(txc.ppsfreq, &utp->ppsfreq) ||
- __get_user(txc.jitter, &utp->jitter) ||
- __get_user(txc.shift, &utp->shift) ||
- __get_user(txc.stabil, &utp->stabil) ||
- __get_user(txc.jitcnt, &utp->jitcnt) ||
- __get_user(txc.calcnt, &utp->calcnt) ||
- __get_user(txc.errcnt, &utp->errcnt) ||
- __get_user(txc.stbcnt, &utp->stbcnt))
- return -EFAULT;
+ err = compat_get_timex(&txc, utp);
+ if (err)
+ return err;
ret = do_adjtimex(&txc);
- if (!access_ok(VERIFY_WRITE, utp, sizeof(struct compat_timex)) ||
- __put_user(txc.modes, &utp->modes) ||
- __put_user(txc.offset, &utp->offset) ||
- __put_user(txc.freq, &utp->freq) ||
- __put_user(txc.maxerror, &utp->maxerror) ||
- __put_user(txc.esterror, &utp->esterror) ||
- __put_user(txc.status, &utp->status) ||
- __put_user(txc.constant, &utp->constant) ||
- __put_user(txc.precision, &utp->precision) ||
- __put_user(txc.tolerance, &utp->tolerance) ||
- __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
- __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
- __put_user(txc.tick, &utp->tick) ||
- __put_user(txc.ppsfreq, &utp->ppsfreq) ||
- __put_user(txc.jitter, &utp->jitter) ||
- __put_user(txc.shift, &utp->shift) ||
- __put_user(txc.stabil, &utp->stabil) ||
- __put_user(txc.jitcnt, &utp->jitcnt) ||
- __put_user(txc.calcnt, &utp->calcnt) ||
- __put_user(txc.errcnt, &utp->errcnt) ||
- __put_user(txc.stbcnt, &utp->stbcnt) ||
- __put_user(txc.tai, &utp->tai))
- ret = -EFAULT;
+ err = compat_put_timex(utp, &txc);
+ if (err)
+ return err;
return ret;
}
diff --git a/kernel/cred.c b/kernel/cred.c
index 3a9d6dd53a6c..2343c132c5a7 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -35,7 +35,7 @@ static struct kmem_cache *cred_jar;
static struct thread_group_cred init_tgcred = {
.usage = ATOMIC_INIT(2),
.tgid = 0,
- .lock = SPIN_LOCK_UNLOCKED,
+ .lock = __SPIN_LOCK_UNLOCKED(init_cred.tgcred.lock),
};
#endif
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index bd3e8e29caa3..38a85428c70f 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -78,7 +78,7 @@ static unsigned int kdb_continue_catastrophic;
static kdbtab_t *kdb_commands;
#define KDB_BASE_CMD_MAX 50
static int kdb_max_commands = KDB_BASE_CMD_MAX;
-static kdbtab_t kdb_base_commands[50];
+static kdbtab_t kdb_base_commands[KDB_BASE_CMD_MAX];
#define for_each_kdbcmd(cmd, num) \
for ((cmd) = kdb_base_commands, (num) = 0; \
num < kdb_max_commands; \
diff --git a/kernel/fork.c b/kernel/fork.c
index 25e429152ddc..05b92c457010 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -193,6 +193,7 @@ void __put_task_struct(struct task_struct *tsk)
if (!profile_handoff_task(tsk))
free_task(tsk);
}
+EXPORT_SYMBOL_GPL(__put_task_struct);
/*
* macro override instead of weak attribute alias, to workaround
diff --git a/kernel/futex.c b/kernel/futex.c
index b766d28accd6..64c38115c7b6 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1556,10 +1556,10 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
/*
* We are here either because we stole the rtmutex from the
- * pending owner or we are the pending owner which failed to
- * get the rtmutex. We have to replace the pending owner TID
- * in the user space variable. This must be atomic as we have
- * to preserve the owner died bit here.
+ * previous highest priority waiter or we are the highest priority
+ * waiter but failed to get the rtmutex the first time.
+ * We have to replace the newowner TID in the user space variable.
+ * This must be atomic as we have to preserve the owner died bit here.
*
* Note: We write the user space value _before_ changing the pi_state
* because we can fault here. Imagine swapped out pages or a fork
@@ -1608,8 +1608,8 @@ retry:
/*
* To handle the page fault we need to drop the hash bucket
- * lock here. That gives the other task (either the pending
- * owner itself or the task which stole the rtmutex) the
+ * lock here. That gives the other task (either the highest priority
+ * waiter itself or the task which stole the rtmutex) the
* chance to try the fixup of the pi_state. So once we are
* back from handling the fault we need to check the pi_state
* after reacquiring the hash bucket lock and before trying to
@@ -1685,18 +1685,20 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
/*
* pi_state is incorrect, some other task did a lock steal and
* we returned due to timeout or signal without taking the
- * rt_mutex. Too late. We can access the rt_mutex_owner without
- * locking, as the other task is now blocked on the hash bucket
- * lock. Fix the state up.
+ * rt_mutex. Too late.
*/
+ raw_spin_lock(&q->pi_state->pi_mutex.wait_lock);
owner = rt_mutex_owner(&q->pi_state->pi_mutex);
+ if (!owner)
+ owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
+ raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock);
ret = fixup_pi_state_owner(uaddr, q, owner);
goto out;
}
/*
* Paranoia check. If we did not take the lock, then we should not be
- * the owner, nor the pending owner, of the rt_mutex.
+ * the owner of the rt_mutex.
*/
if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig
index 70a298d6da71..b8cadf70b1fb 100644
--- a/kernel/gcov/Kconfig
+++ b/kernel/gcov/Kconfig
@@ -34,7 +34,7 @@ config GCOV_KERNEL
config GCOV_PROFILE_ALL
bool "Profile entire Kernel"
depends on GCOV_KERNEL
- depends on S390 || X86 || (PPC && EXPERIMENTAL) || MICROBLAZE
+ depends on SUPERH || S390 || X86 || (PPC && EXPERIMENTAL) || MICROBLAZE
default n
---help---
This options activates profiling for the entire kernel.
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 0c8d7c048615..57c4d33c9a9d 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -85,13 +85,8 @@ static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
{
ktime_t xtim, tomono;
struct timespec xts, tom;
- unsigned long seq;
- do {
- seq = read_seqbegin(&xtime_lock);
- xts = __current_kernel_time();
- tom = __get_wall_to_monotonic();
- } while (read_seqretry(&xtime_lock, seq));
+ get_xtime_and_monotonic_offset(&xts, &tom);
xtim = timespec_to_ktime(xts);
tomono = timespec_to_ktime(tom);
@@ -612,15 +607,11 @@ static void retrigger_next_event(void *arg)
{
struct hrtimer_cpu_base *base;
struct timespec realtime_offset, wtm;
- unsigned long seq;
if (!hrtimer_hres_active())
return;
- do {
- seq = read_seqbegin(&xtime_lock);
- wtm = __get_wall_to_monotonic();
- } while (read_seqretry(&xtime_lock, seq));
+ get_xtime_and_monotonic_offset(&realtime_offset, &wtm);
set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec);
base = &__get_cpu_var(hrtimer_bases);
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 8e42fec7686d..144db9dcfcde 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -1,5 +1,5 @@
config HAVE_GENERIC_HARDIRQS
- def_bool n
+ bool
if HAVE_GENERIC_HARDIRQS
menu "IRQ subsystem"
@@ -11,26 +11,35 @@ config GENERIC_HARDIRQS
# Select this to disable the deprecated stuff
config GENERIC_HARDIRQS_NO_DEPRECATED
- def_bool n
+ bool
+
+config GENERIC_HARDIRQS_NO_COMPAT
+ bool
# Options selectable by the architecture code
config HAVE_SPARSE_IRQ
- def_bool n
+ bool
config GENERIC_IRQ_PROBE
- def_bool n
+ bool
+
+config GENERIC_IRQ_SHOW
+ bool
config GENERIC_PENDING_IRQ
- def_bool n
+ bool
config AUTO_IRQ_AFFINITY
- def_bool n
-
-config IRQ_PER_CPU
- def_bool n
+ bool
config HARDIRQS_SW_RESEND
- def_bool n
+ bool
+
+config IRQ_PREFLOW_FASTEOI
+ bool
+
+config IRQ_FORCED_THREADING
+ bool
config SPARSE_IRQ
bool "Support sparse irq numbering"
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
index 505798f86c36..394784c57060 100644
--- a/kernel/irq/autoprobe.c
+++ b/kernel/irq/autoprobe.c
@@ -17,7 +17,7 @@
/*
* Autodetection depends on the fact that any interrupt that
* comes in on to an unassigned handler will get stuck with
- * "IRQ_WAITING" cleared and the interrupt disabled.
+ * "IRQS_WAITING" cleared and the interrupt disabled.
*/
static DEFINE_MUTEX(probing_active);
@@ -32,7 +32,6 @@ unsigned long probe_irq_on(void)
{
struct irq_desc *desc;
unsigned long mask = 0;
- unsigned int status;
int i;
/*
@@ -46,13 +45,7 @@ unsigned long probe_irq_on(void)
*/
for_each_irq_desc_reverse(i, desc) {
raw_spin_lock_irq(&desc->lock);
- if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
- /*
- * An old-style architecture might still have
- * the handle_bad_irq handler there:
- */
- compat_irq_chip_set_default_handler(desc);
-
+ if (!desc->action && irq_settings_can_probe(desc)) {
/*
* Some chips need to know about probing in
* progress:
@@ -60,7 +53,7 @@ unsigned long probe_irq_on(void)
if (desc->irq_data.chip->irq_set_type)
desc->irq_data.chip->irq_set_type(&desc->irq_data,
IRQ_TYPE_PROBE);
- desc->irq_data.chip->irq_startup(&desc->irq_data);
+ irq_startup(desc);
}
raw_spin_unlock_irq(&desc->lock);
}
@@ -75,10 +68,12 @@ unsigned long probe_irq_on(void)
*/
for_each_irq_desc_reverse(i, desc) {
raw_spin_lock_irq(&desc->lock);
- if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
- desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
- if (desc->irq_data.chip->irq_startup(&desc->irq_data))
- desc->status |= IRQ_PENDING;
+ if (!desc->action && irq_settings_can_probe(desc)) {
+ desc->istate |= IRQS_AUTODETECT | IRQS_WAITING;
+ if (irq_startup(desc)) {
+ irq_compat_set_pending(desc);
+ desc->istate |= IRQS_PENDING;
+ }
}
raw_spin_unlock_irq(&desc->lock);
}
@@ -93,13 +88,12 @@ unsigned long probe_irq_on(void)
*/
for_each_irq_desc(i, desc) {
raw_spin_lock_irq(&desc->lock);
- status = desc->status;
- if (status & IRQ_AUTODETECT) {
+ if (desc->istate & IRQS_AUTODETECT) {
/* It triggered already - consider it spurious. */
- if (!(status & IRQ_WAITING)) {
- desc->status = status & ~IRQ_AUTODETECT;
- desc->irq_data.chip->irq_shutdown(&desc->irq_data);
+ if (!(desc->istate & IRQS_WAITING)) {
+ desc->istate &= ~IRQS_AUTODETECT;
+ irq_shutdown(desc);
} else
if (i < 32)
mask |= 1 << i;
@@ -125,20 +119,18 @@ EXPORT_SYMBOL(probe_irq_on);
*/
unsigned int probe_irq_mask(unsigned long val)
{
- unsigned int status, mask = 0;
+ unsigned int mask = 0;
struct irq_desc *desc;
int i;
for_each_irq_desc(i, desc) {
raw_spin_lock_irq(&desc->lock);
- status = desc->status;
-
- if (status & IRQ_AUTODETECT) {
- if (i < 16 && !(status & IRQ_WAITING))
+ if (desc->istate & IRQS_AUTODETECT) {
+ if (i < 16 && !(desc->istate & IRQS_WAITING))
mask |= 1 << i;
- desc->status = status & ~IRQ_AUTODETECT;
- desc->irq_data.chip->irq_shutdown(&desc->irq_data);
+ desc->istate &= ~IRQS_AUTODETECT;
+ irq_shutdown(desc);
}
raw_spin_unlock_irq(&desc->lock);
}
@@ -169,20 +161,18 @@ int probe_irq_off(unsigned long val)
{
int i, irq_found = 0, nr_of_irqs = 0;
struct irq_desc *desc;
- unsigned int status;
for_each_irq_desc(i, desc) {
raw_spin_lock_irq(&desc->lock);
- status = desc->status;
- if (status & IRQ_AUTODETECT) {
- if (!(status & IRQ_WAITING)) {
+ if (desc->istate & IRQS_AUTODETECT) {
+ if (!(desc->istate & IRQS_WAITING)) {
if (!nr_of_irqs)
irq_found = i;
nr_of_irqs++;
}
- desc->status = status & ~IRQ_AUTODETECT;
- desc->irq_data.chip->irq_shutdown(&desc->irq_data);
+ desc->istate &= ~IRQS_AUTODETECT;
+ irq_shutdown(desc);
}
raw_spin_unlock_irq(&desc->lock);
}
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index baa5c4acad83..b5145654855f 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -19,140 +19,110 @@
#include "internals.h"
/**
- * set_irq_chip - set the irq chip for an irq
+ * irq_set_chip - set the irq chip for an irq
* @irq: irq number
* @chip: pointer to irq chip description structure
*/
-int set_irq_chip(unsigned int irq, struct irq_chip *chip)
+int irq_set_chip(unsigned int irq, struct irq_chip *chip)
{
- struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
- if (!desc) {
- WARN(1, KERN_ERR "Trying to install chip for IRQ%d\n", irq);
+ if (!desc)
return -EINVAL;
- }
if (!chip)
chip = &no_irq_chip;
- raw_spin_lock_irqsave(&desc->lock, flags);
irq_chip_set_defaults(chip);
desc->irq_data.chip = chip;
- raw_spin_unlock_irqrestore(&desc->lock, flags);
-
+ irq_put_desc_unlock(desc, flags);
return 0;
}
-EXPORT_SYMBOL(set_irq_chip);
+EXPORT_SYMBOL(irq_set_chip);
/**
- * set_irq_type - set the irq trigger type for an irq
+ * irq_set_type - set the irq trigger type for an irq
* @irq: irq number
* @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
*/
-int set_irq_type(unsigned int irq, unsigned int type)
+int irq_set_irq_type(unsigned int irq, unsigned int type)
{
- struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
- int ret = -ENXIO;
+ struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
+ int ret = 0;
- if (!desc) {
- printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
- return -ENODEV;
- }
+ if (!desc)
+ return -EINVAL;
type &= IRQ_TYPE_SENSE_MASK;
- if (type == IRQ_TYPE_NONE)
- return 0;
-
- raw_spin_lock_irqsave(&desc->lock, flags);
- ret = __irq_set_trigger(desc, irq, type);
- raw_spin_unlock_irqrestore(&desc->lock, flags);
+ if (type != IRQ_TYPE_NONE)
+ ret = __irq_set_trigger(desc, irq, type);
+ irq_put_desc_busunlock(desc, flags);
return ret;
}
-EXPORT_SYMBOL(set_irq_type);
+EXPORT_SYMBOL(irq_set_irq_type);
/**
- * set_irq_data - set irq type data for an irq
+ * irq_set_handler_data - set irq handler data for an irq
* @irq: Interrupt number
* @data: Pointer to interrupt specific data
*
* Set the hardware irq controller data for an irq
*/
-int set_irq_data(unsigned int irq, void *data)
+int irq_set_handler_data(unsigned int irq, void *data)
{
- struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
- if (!desc) {
- printk(KERN_ERR
- "Trying to install controller data for IRQ%d\n", irq);
+ if (!desc)
return -EINVAL;
- }
-
- raw_spin_lock_irqsave(&desc->lock, flags);
desc->irq_data.handler_data = data;
- raw_spin_unlock_irqrestore(&desc->lock, flags);
+ irq_put_desc_unlock(desc, flags);
return 0;
}
-EXPORT_SYMBOL(set_irq_data);
+EXPORT_SYMBOL(irq_set_handler_data);
/**
- * set_irq_msi - set MSI descriptor data for an irq
+ * irq_set_msi_desc - set MSI descriptor data for an irq
* @irq: Interrupt number
* @entry: Pointer to MSI descriptor data
*
* Set the MSI descriptor entry for an irq
*/
-int set_irq_msi(unsigned int irq, struct msi_desc *entry)
+int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
{
- struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
- if (!desc) {
- printk(KERN_ERR
- "Trying to install msi data for IRQ%d\n", irq);
+ if (!desc)
return -EINVAL;
- }
-
- raw_spin_lock_irqsave(&desc->lock, flags);
desc->irq_data.msi_desc = entry;
if (entry)
entry->irq = irq;
- raw_spin_unlock_irqrestore(&desc->lock, flags);
+ irq_put_desc_unlock(desc, flags);
return 0;
}
/**
- * set_irq_chip_data - set irq chip data for an irq
+ * irq_set_chip_data - set irq chip data for an irq
* @irq: Interrupt number
* @data: Pointer to chip specific data
*
* Set the hardware irq chip data for an irq
*/
-int set_irq_chip_data(unsigned int irq, void *data)
+int irq_set_chip_data(unsigned int irq, void *data)
{
- struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
- if (!desc) {
- printk(KERN_ERR
- "Trying to install chip data for IRQ%d\n", irq);
- return -EINVAL;
- }
-
- if (!desc->irq_data.chip) {
- printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq);
+ if (!desc)
return -EINVAL;
- }
-
- raw_spin_lock_irqsave(&desc->lock, flags);
desc->irq_data.chip_data = data;
- raw_spin_unlock_irqrestore(&desc->lock, flags);
-
+ irq_put_desc_unlock(desc, flags);
return 0;
}
-EXPORT_SYMBOL(set_irq_chip_data);
+EXPORT_SYMBOL(irq_set_chip_data);
struct irq_data *irq_get_irq_data(unsigned int irq)
{
@@ -162,72 +132,75 @@ struct irq_data *irq_get_irq_data(unsigned int irq)
}
EXPORT_SYMBOL_GPL(irq_get_irq_data);
-/**
- * set_irq_nested_thread - Set/Reset the IRQ_NESTED_THREAD flag of an irq
- *
- * @irq: Interrupt number
- * @nest: 0 to clear / 1 to set the IRQ_NESTED_THREAD flag
- *
- * The IRQ_NESTED_THREAD flag indicates that on
- * request_threaded_irq() no separate interrupt thread should be
- * created for the irq as the handler are called nested in the
- * context of a demultiplexing interrupt handler thread.
- */
-void set_irq_nested_thread(unsigned int irq, int nest)
+static void irq_state_clr_disabled(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_to_desc(irq);
- unsigned long flags;
-
- if (!desc)
- return;
-
- raw_spin_lock_irqsave(&desc->lock, flags);
- if (nest)
- desc->status |= IRQ_NESTED_THREAD;
- else
- desc->status &= ~IRQ_NESTED_THREAD;
- raw_spin_unlock_irqrestore(&desc->lock, flags);
+ desc->istate &= ~IRQS_DISABLED;
+ irq_compat_clr_disabled(desc);
}
-EXPORT_SYMBOL_GPL(set_irq_nested_thread);
-/*
- * default enable function
- */
-static void default_enable(struct irq_data *data)
+static void irq_state_set_disabled(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_data_to_desc(data);
+ desc->istate |= IRQS_DISABLED;
+ irq_compat_set_disabled(desc);
+}
- desc->irq_data.chip->irq_unmask(&desc->irq_data);
- desc->status &= ~IRQ_MASKED;
+static void irq_state_clr_masked(struct irq_desc *desc)
+{
+ desc->istate &= ~IRQS_MASKED;
+ irq_compat_clr_masked(desc);
}
-/*
- * default disable function
- */
-static void default_disable(struct irq_data *data)
+static void irq_state_set_masked(struct irq_desc *desc)
{
+ desc->istate |= IRQS_MASKED;
+ irq_compat_set_masked(desc);
}
-/*
- * default startup function
- */
-static unsigned int default_startup(struct irq_data *data)
+int irq_startup(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_data_to_desc(data);
+ irq_state_clr_disabled(desc);
+ desc->depth = 0;
- desc->irq_data.chip->irq_enable(data);
+ if (desc->irq_data.chip->irq_startup) {
+ int ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
+ irq_state_clr_masked(desc);
+ return ret;
+ }
+
+ irq_enable(desc);
return 0;
}
-/*
- * default shutdown function
- */
-static void default_shutdown(struct irq_data *data)
+void irq_shutdown(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_data_to_desc(data);
+ irq_state_set_disabled(desc);
+ desc->depth = 1;
+ if (desc->irq_data.chip->irq_shutdown)
+ desc->irq_data.chip->irq_shutdown(&desc->irq_data);
+ if (desc->irq_data.chip->irq_disable)
+ desc->irq_data.chip->irq_disable(&desc->irq_data);
+ else
+ desc->irq_data.chip->irq_mask(&desc->irq_data);
+ irq_state_set_masked(desc);
+}
- desc->irq_data.chip->irq_mask(&desc->irq_data);
- desc->status |= IRQ_MASKED;
+void irq_enable(struct irq_desc *desc)
+{
+ irq_state_clr_disabled(desc);
+ if (desc->irq_data.chip->irq_enable)
+ desc->irq_data.chip->irq_enable(&desc->irq_data);
+ else
+ desc->irq_data.chip->irq_unmask(&desc->irq_data);
+ irq_state_clr_masked(desc);
+}
+
+void irq_disable(struct irq_desc *desc)
+{
+ irq_state_set_disabled(desc);
+ if (desc->irq_data.chip->irq_disable) {
+ desc->irq_data.chip->irq_disable(&desc->irq_data);
+ irq_state_set_masked(desc);
+ }
}
#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
@@ -315,10 +288,6 @@ static void compat_bus_sync_unlock(struct irq_data *data)
void irq_chip_set_defaults(struct irq_chip *chip)
{
#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
- /*
- * Compat fixup functions need to be before we set the
- * defaults for enable/disable/startup/shutdown
- */
if (chip->enable)
chip->irq_enable = compat_irq_enable;
if (chip->disable)
@@ -327,33 +296,8 @@ void irq_chip_set_defaults(struct irq_chip *chip)
chip->irq_shutdown = compat_irq_shutdown;
if (chip->startup)
chip->irq_startup = compat_irq_startup;
-#endif
- /*
- * The real defaults
- */
- if (!chip->irq_enable)
- chip->irq_enable = default_enable;
- if (!chip->irq_disable)
- chip->irq_disable = default_disable;
- if (!chip->irq_startup)
- chip->irq_startup = default_startup;
- /*
- * We use chip->irq_disable, when the user provided its own. When
- * we have default_disable set for chip->irq_disable, then we need
- * to use default_shutdown, otherwise the irq line is not
- * disabled on free_irq():
- */
- if (!chip->irq_shutdown)
- chip->irq_shutdown = chip->irq_disable != default_disable ?
- chip->irq_disable : default_shutdown;
-
-#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
if (!chip->end)
chip->end = dummy_irq_chip.end;
-
- /*
- * Now fix up the remaining compat handlers
- */
if (chip->bus_lock)
chip->irq_bus_lock = compat_bus_lock;
if (chip->bus_sync_unlock)
@@ -388,22 +332,22 @@ static inline void mask_ack_irq(struct irq_desc *desc)
if (desc->irq_data.chip->irq_ack)
desc->irq_data.chip->irq_ack(&desc->irq_data);
}
- desc->status |= IRQ_MASKED;
+ irq_state_set_masked(desc);
}
-static inline void mask_irq(struct irq_desc *desc)
+void mask_irq(struct irq_desc *desc)
{
if (desc->irq_data.chip->irq_mask) {
desc->irq_data.chip->irq_mask(&desc->irq_data);
- desc->status |= IRQ_MASKED;
+ irq_state_set_masked(desc);
}
}
-static inline void unmask_irq(struct irq_desc *desc)
+void unmask_irq(struct irq_desc *desc)
{
if (desc->irq_data.chip->irq_unmask) {
desc->irq_data.chip->irq_unmask(&desc->irq_data);
- desc->status &= ~IRQ_MASKED;
+ irq_state_clr_masked(desc);
}
}
@@ -428,10 +372,11 @@ void handle_nested_irq(unsigned int irq)
kstat_incr_irqs_this_cpu(irq, desc);
action = desc->action;
- if (unlikely(!action || (desc->status & IRQ_DISABLED)))
+ if (unlikely(!action || (desc->istate & IRQS_DISABLED)))
goto out_unlock;
- desc->status |= IRQ_INPROGRESS;
+ irq_compat_set_progress(desc);
+ desc->istate |= IRQS_INPROGRESS;
raw_spin_unlock_irq(&desc->lock);
action_ret = action->thread_fn(action->irq, action->dev_id);
@@ -439,13 +384,21 @@ void handle_nested_irq(unsigned int irq)
note_interrupt(irq, desc, action_ret);
raw_spin_lock_irq(&desc->lock);
- desc->status &= ~IRQ_INPROGRESS;
+ desc->istate &= ~IRQS_INPROGRESS;
+ irq_compat_clr_progress(desc);
out_unlock:
raw_spin_unlock_irq(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_nested_irq);
+static bool irq_check_poll(struct irq_desc *desc)
+{
+ if (!(desc->istate & IRQS_POLL_INPROGRESS))
+ return false;
+ return irq_wait_for_poll(desc);
+}
+
/**
* handle_simple_irq - Simple and software-decoded IRQs.
* @irq: the interrupt number
@@ -461,29 +414,20 @@ EXPORT_SYMBOL_GPL(handle_nested_irq);
void
handle_simple_irq(unsigned int irq, struct irq_desc *desc)
{
- struct irqaction *action;
- irqreturn_t action_ret;
-
raw_spin_lock(&desc->lock);
- if (unlikely(desc->status & IRQ_INPROGRESS))
- goto out_unlock;
- desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
+ if (unlikely(desc->istate & IRQS_INPROGRESS))
+ if (!irq_check_poll(desc))
+ goto out_unlock;
+
+ desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
kstat_incr_irqs_this_cpu(irq, desc);
- action = desc->action;
- if (unlikely(!action || (desc->status & IRQ_DISABLED)))
+ if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED)))
goto out_unlock;
- desc->status |= IRQ_INPROGRESS;
- raw_spin_unlock(&desc->lock);
-
- action_ret = handle_IRQ_event(irq, action);
- if (!noirqdebug)
- note_interrupt(irq, desc, action_ret);
+ handle_irq_event(desc);
- raw_spin_lock(&desc->lock);
- desc->status &= ~IRQ_INPROGRESS;
out_unlock:
raw_spin_unlock(&desc->lock);
}
@@ -501,42 +445,42 @@ out_unlock:
void
handle_level_irq(unsigned int irq, struct irq_desc *desc)
{
- struct irqaction *action;
- irqreturn_t action_ret;
-
raw_spin_lock(&desc->lock);
mask_ack_irq(desc);
- if (unlikely(desc->status & IRQ_INPROGRESS))
- goto out_unlock;
- desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
+ if (unlikely(desc->istate & IRQS_INPROGRESS))
+ if (!irq_check_poll(desc))
+ goto out_unlock;
+
+ desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
kstat_incr_irqs_this_cpu(irq, desc);
/*
* If its disabled or no action available
* keep it masked and get out of here
*/
- action = desc->action;
- if (unlikely(!action || (desc->status & IRQ_DISABLED)))
+ if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED)))
goto out_unlock;
- desc->status |= IRQ_INPROGRESS;
- raw_spin_unlock(&desc->lock);
+ handle_irq_event(desc);
- action_ret = handle_IRQ_event(irq, action);
- if (!noirqdebug)
- note_interrupt(irq, desc, action_ret);
-
- raw_spin_lock(&desc->lock);
- desc->status &= ~IRQ_INPROGRESS;
-
- if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT)))
+ if (!(desc->istate & (IRQS_DISABLED | IRQS_ONESHOT)))
unmask_irq(desc);
out_unlock:
raw_spin_unlock(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_level_irq);
+#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
+static inline void preflow_handler(struct irq_desc *desc)
+{
+ if (desc->preflow_handler)
+ desc->preflow_handler(&desc->irq_data);
+}
+#else
+static inline void preflow_handler(struct irq_desc *desc) { }
+#endif
+
/**
* handle_fasteoi_irq - irq handler for transparent controllers
* @irq: the interrupt number
@@ -550,42 +494,37 @@ EXPORT_SYMBOL_GPL(handle_level_irq);
void
handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
{
- struct irqaction *action;
- irqreturn_t action_ret;
-
raw_spin_lock(&desc->lock);
- if (unlikely(desc->status & IRQ_INPROGRESS))
- goto out;
+ if (unlikely(desc->istate & IRQS_INPROGRESS))
+ if (!irq_check_poll(desc))
+ goto out;
- desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
+ desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
kstat_incr_irqs_this_cpu(irq, desc);
/*
* If its disabled or no action available
* then mask it and get out of here:
*/
- action = desc->action;
- if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
- desc->status |= IRQ_PENDING;
+ if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) {
+ irq_compat_set_pending(desc);
+ desc->istate |= IRQS_PENDING;
mask_irq(desc);
goto out;
}
+ preflow_handler(desc);
+ handle_irq_event(desc);
- desc->status |= IRQ_INPROGRESS;
- desc->status &= ~IRQ_PENDING;
- raw_spin_unlock(&desc->lock);
-
- action_ret = handle_IRQ_event(irq, action);
- if (!noirqdebug)
- note_interrupt(irq, desc, action_ret);
-
- raw_spin_lock(&desc->lock);
- desc->status &= ~IRQ_INPROGRESS;
-out:
+out_eoi:
desc->irq_data.chip->irq_eoi(&desc->irq_data);
-
+out_unlock:
raw_spin_unlock(&desc->lock);
+ return;
+out:
+ if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED))
+ goto out_eoi;
+ goto out_unlock;
}
/**
@@ -609,32 +548,28 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
{
raw_spin_lock(&desc->lock);
- desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
-
+ desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
/*
* If we're currently running this IRQ, or its disabled,
* we shouldn't process the IRQ. Mark it pending, handle
* the necessary masking and go out
*/
- if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) ||
- !desc->action)) {
- desc->status |= (IRQ_PENDING | IRQ_MASKED);
- mask_ack_irq(desc);
- goto out_unlock;
+ if (unlikely((desc->istate & (IRQS_DISABLED | IRQS_INPROGRESS) ||
+ !desc->action))) {
+ if (!irq_check_poll(desc)) {
+ irq_compat_set_pending(desc);
+ desc->istate |= IRQS_PENDING;
+ mask_ack_irq(desc);
+ goto out_unlock;
+ }
}
kstat_incr_irqs_this_cpu(irq, desc);
/* Start handling the irq */
desc->irq_data.chip->irq_ack(&desc->irq_data);
- /* Mark the IRQ currently in progress.*/
- desc->status |= IRQ_INPROGRESS;
-
do {
- struct irqaction *action = desc->action;
- irqreturn_t action_ret;
-
- if (unlikely(!action)) {
+ if (unlikely(!desc->action)) {
mask_irq(desc);
goto out_unlock;
}
@@ -644,22 +579,17 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
* one, we could have masked the irq.
* Renable it, if it was not disabled in meantime.
*/
- if (unlikely((desc->status &
- (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
- (IRQ_PENDING | IRQ_MASKED))) {
- unmask_irq(desc);
+ if (unlikely(desc->istate & IRQS_PENDING)) {
+ if (!(desc->istate & IRQS_DISABLED) &&
+ (desc->istate & IRQS_MASKED))
+ unmask_irq(desc);
}
- desc->status &= ~IRQ_PENDING;
- raw_spin_unlock(&desc->lock);
- action_ret = handle_IRQ_event(irq, action);
- if (!noirqdebug)
- note_interrupt(irq, desc, action_ret);
- raw_spin_lock(&desc->lock);
+ handle_irq_event(desc);
- } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
+ } while ((desc->istate & IRQS_PENDING) &&
+ !(desc->istate & IRQS_DISABLED));
- desc->status &= ~IRQ_INPROGRESS;
out_unlock:
raw_spin_unlock(&desc->lock);
}
@@ -674,103 +604,84 @@ out_unlock:
void
handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
{
- irqreturn_t action_ret;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
kstat_incr_irqs_this_cpu(irq, desc);
- if (desc->irq_data.chip->irq_ack)
- desc->irq_data.chip->irq_ack(&desc->irq_data);
+ if (chip->irq_ack)
+ chip->irq_ack(&desc->irq_data);
- action_ret = handle_IRQ_event(irq, desc->action);
- if (!noirqdebug)
- note_interrupt(irq, desc, action_ret);
+ handle_irq_event_percpu(desc, desc->action);
- if (desc->irq_data.chip->irq_eoi)
- desc->irq_data.chip->irq_eoi(&desc->irq_data);
+ if (chip->irq_eoi)
+ chip->irq_eoi(&desc->irq_data);
}
void
-__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
+__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
const char *name)
{
- struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
- if (!desc) {
- printk(KERN_ERR
- "Trying to install type control for IRQ%d\n", irq);
+ if (!desc)
return;
- }
- if (!handle)
+ if (!handle) {
handle = handle_bad_irq;
- else if (desc->irq_data.chip == &no_irq_chip) {
- printk(KERN_WARNING "Trying to install %sinterrupt handler "
- "for IRQ%d\n", is_chained ? "chained " : "", irq);
- /*
- * Some ARM implementations install a handler for really dumb
- * interrupt hardware without setting an irq_chip. This worked
- * with the ARM no_irq_chip but the check in setup_irq would
- * prevent us to setup the interrupt at all. Switch it to
- * dummy_irq_chip for easy transition.
- */
- desc->irq_data.chip = &dummy_irq_chip;
+ } else {
+ if (WARN_ON(desc->irq_data.chip == &no_irq_chip))
+ goto out;
}
- chip_bus_lock(desc);
- raw_spin_lock_irqsave(&desc->lock, flags);
-
/* Uninstall? */
if (handle == handle_bad_irq) {
if (desc->irq_data.chip != &no_irq_chip)
mask_ack_irq(desc);
- desc->status |= IRQ_DISABLED;
+ irq_compat_set_disabled(desc);
+ desc->istate |= IRQS_DISABLED;
desc->depth = 1;
}
desc->handle_irq = handle;
desc->name = name;
if (handle != handle_bad_irq && is_chained) {
- desc->status &= ~IRQ_DISABLED;
- desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE;
- desc->depth = 0;
- desc->irq_data.chip->irq_startup(&desc->irq_data);
+ irq_settings_set_noprobe(desc);
+ irq_settings_set_norequest(desc);
+ irq_startup(desc);
}
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- chip_bus_sync_unlock(desc);
-}
-EXPORT_SYMBOL_GPL(__set_irq_handler);
-
-void
-set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
- irq_flow_handler_t handle)
-{
- set_irq_chip(irq, chip);
- __set_irq_handler(irq, handle, 0, NULL);
+out:
+ irq_put_desc_busunlock(desc, flags);
}
+EXPORT_SYMBOL_GPL(__irq_set_handler);
void
-set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
+irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
irq_flow_handler_t handle, const char *name)
{
- set_irq_chip(irq, chip);
- __set_irq_handler(irq, handle, 0, name);
+ irq_set_chip(irq, chip);
+ __irq_set_handler(irq, handle, 0, name);
}
void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
{
- struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
if (!desc)
return;
+ irq_settings_clr_and_set(desc, clr, set);
+
+ irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
+ IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
+ if (irq_settings_has_no_balance_set(desc))
+ irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
+ if (irq_settings_is_per_cpu(desc))
+ irqd_set(&desc->irq_data, IRQD_PER_CPU);
+ if (irq_settings_can_move_pcntxt(desc))
+ irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
- /* Sanitize flags */
- set &= IRQF_MODIFY_MASK;
- clr &= IRQF_MODIFY_MASK;
+ irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
- raw_spin_lock_irqsave(&desc->lock, flags);
- desc->status &= ~clr;
- desc->status |= set;
- raw_spin_unlock_irqrestore(&desc->lock, flags);
+ irq_put_desc_unlock(desc, flags);
}
diff --git a/kernel/irq/compat.h b/kernel/irq/compat.h
new file mode 100644
index 000000000000..6bbaf66aca85
--- /dev/null
+++ b/kernel/irq/compat.h
@@ -0,0 +1,72 @@
+/*
+ * Compat layer for transition period
+ */
+#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
+static inline void irq_compat_set_progress(struct irq_desc *desc)
+{
+ desc->status |= IRQ_INPROGRESS;
+}
+
+static inline void irq_compat_clr_progress(struct irq_desc *desc)
+{
+ desc->status &= ~IRQ_INPROGRESS;
+}
+static inline void irq_compat_set_disabled(struct irq_desc *desc)
+{
+ desc->status |= IRQ_DISABLED;
+}
+static inline void irq_compat_clr_disabled(struct irq_desc *desc)
+{
+ desc->status &= ~IRQ_DISABLED;
+}
+static inline void irq_compat_set_pending(struct irq_desc *desc)
+{
+ desc->status |= IRQ_PENDING;
+}
+
+static inline void irq_compat_clr_pending(struct irq_desc *desc)
+{
+ desc->status &= ~IRQ_PENDING;
+}
+static inline void irq_compat_set_masked(struct irq_desc *desc)
+{
+ desc->status |= IRQ_MASKED;
+}
+
+static inline void irq_compat_clr_masked(struct irq_desc *desc)
+{
+ desc->status &= ~IRQ_MASKED;
+}
+static inline void irq_compat_set_move_pending(struct irq_desc *desc)
+{
+ desc->status |= IRQ_MOVE_PENDING;
+}
+
+static inline void irq_compat_clr_move_pending(struct irq_desc *desc)
+{
+ desc->status &= ~IRQ_MOVE_PENDING;
+}
+static inline void irq_compat_set_affinity(struct irq_desc *desc)
+{
+ desc->status |= IRQ_AFFINITY_SET;
+}
+
+static inline void irq_compat_clr_affinity(struct irq_desc *desc)
+{
+ desc->status &= ~IRQ_AFFINITY_SET;
+}
+#else
+static inline void irq_compat_set_progress(struct irq_desc *desc) { }
+static inline void irq_compat_clr_progress(struct irq_desc *desc) { }
+static inline void irq_compat_set_disabled(struct irq_desc *desc) { }
+static inline void irq_compat_clr_disabled(struct irq_desc *desc) { }
+static inline void irq_compat_set_pending(struct irq_desc *desc) { }
+static inline void irq_compat_clr_pending(struct irq_desc *desc) { }
+static inline void irq_compat_set_masked(struct irq_desc *desc) { }
+static inline void irq_compat_clr_masked(struct irq_desc *desc) { }
+static inline void irq_compat_set_move_pending(struct irq_desc *desc) { }
+static inline void irq_compat_clr_move_pending(struct irq_desc *desc) { }
+static inline void irq_compat_set_affinity(struct irq_desc *desc) { }
+static inline void irq_compat_clr_affinity(struct irq_desc *desc) { }
+#endif
+
diff --git a/kernel/irq/debug.h b/kernel/irq/debug.h
new file mode 100644
index 000000000000..d1a33b7fa61d
--- /dev/null
+++ b/kernel/irq/debug.h
@@ -0,0 +1,40 @@
+/*
+ * Debugging printout:
+ */
+
+#include <linux/kallsyms.h>
+
+#define P(f) if (desc->status & f) printk("%14s set\n", #f)
+#define PS(f) if (desc->istate & f) printk("%14s set\n", #f)
+
+static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
+{
+ printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n",
+ irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
+ printk("->handle_irq(): %p, ", desc->handle_irq);
+ print_symbol("%s\n", (unsigned long)desc->handle_irq);
+ printk("->irq_data.chip(): %p, ", desc->irq_data.chip);
+ print_symbol("%s\n", (unsigned long)desc->irq_data.chip);
+ printk("->action(): %p\n", desc->action);
+ if (desc->action) {
+ printk("->action->handler(): %p, ", desc->action->handler);
+ print_symbol("%s\n", (unsigned long)desc->action->handler);
+ }
+
+ P(IRQ_LEVEL);
+ P(IRQ_PER_CPU);
+ P(IRQ_NOPROBE);
+ P(IRQ_NOREQUEST);
+ P(IRQ_NOAUTOEN);
+
+ PS(IRQS_AUTODETECT);
+ PS(IRQS_INPROGRESS);
+ PS(IRQS_REPLAY);
+ PS(IRQS_WAITING);
+ PS(IRQS_DISABLED);
+ PS(IRQS_PENDING);
+ PS(IRQS_MASKED);
+}
+
+#undef P
+#undef PS
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 3540a7190122..517561fc7317 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -51,30 +51,92 @@ static void warn_no_thread(unsigned int irq, struct irqaction *action)
"but no thread function available.", irq, action->name);
}
-/**
- * handle_IRQ_event - irq action chain handler
- * @irq: the interrupt number
- * @action: the interrupt action chain for this irq
- *
- * Handles the action chain of an irq event
- */
-irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
+static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
+{
+ /*
+ * Wake up the handler thread for this action. In case the
+ * thread crashed and was killed we just pretend that we
+ * handled the interrupt. The hardirq handler has disabled the
+ * device interrupt, so no irq storm is lurking. If the
+ * RUNTHREAD bit is already set, nothing to do.
+ */
+ if (test_bit(IRQTF_DIED, &action->thread_flags) ||
+ test_and_set_bit(IRQTF_RUNTHREAD, &action->thread_flags))
+ return;
+
+ /*
+ * It's safe to OR the mask lockless here. We have only two
+ * places which write to threads_oneshot: This code and the
+ * irq thread.
+ *
+ * This code is the hard irq context and can never run on two
+ * cpus in parallel. If it ever does we have more serious
+ * problems than this bitmask.
+ *
+ * The irq threads of this irq which clear their "running" bit
+ * in threads_oneshot are serialized via desc->lock against
+ * each other and they are serialized against this code by
+ * IRQS_INPROGRESS.
+ *
+ * Hard irq handler:
+ *
+ * spin_lock(desc->lock);
+ * desc->state |= IRQS_INPROGRESS;
+ * spin_unlock(desc->lock);
+ * set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
+ * desc->threads_oneshot |= mask;
+ * spin_lock(desc->lock);
+ * desc->state &= ~IRQS_INPROGRESS;
+ * spin_unlock(desc->lock);
+ *
+ * irq thread:
+ *
+ * again:
+ * spin_lock(desc->lock);
+ * if (desc->state & IRQS_INPROGRESS) {
+ * spin_unlock(desc->lock);
+ * while(desc->state & IRQS_INPROGRESS)
+ * cpu_relax();
+ * goto again;
+ * }
+ * if (!test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
+ * desc->threads_oneshot &= ~mask;
+ * spin_unlock(desc->lock);
+ *
+ * So either the thread waits for us to clear IRQS_INPROGRESS
+ * or we are waiting in the flow handler for desc->lock to be
+ * released before we reach this point. The thread also checks
+ * IRQTF_RUNTHREAD under desc->lock. If set it leaves
+ * threads_oneshot untouched and runs the thread another time.
+ */
+ desc->threads_oneshot |= action->thread_mask;
+ wake_up_process(action->thread);
+}
+
+irqreturn_t
+handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
{
- irqreturn_t ret, retval = IRQ_NONE;
- unsigned int status = 0;
+ irqreturn_t retval = IRQ_NONE;
+ unsigned int random = 0, irq = desc->irq_data.irq;
do {
+ irqreturn_t res;
+
trace_irq_handler_entry(irq, action);
- ret = action->handler(irq, action->dev_id);
- trace_irq_handler_exit(irq, action, ret);
+ res = action->handler(irq, action->dev_id);
+ trace_irq_handler_exit(irq, action, res);
- switch (ret) {
+ if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pF enabled interrupts\n",
+ irq, action->handler))
+ local_irq_disable();
+
+ switch (res) {
case IRQ_WAKE_THREAD:
/*
* Set result to handled so the spurious check
* does not trigger.
*/
- ret = IRQ_HANDLED;
+ res = IRQ_HANDLED;
/*
* Catch drivers which return WAKE_THREAD but
@@ -85,36 +147,56 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
break;
}
- /*
- * Wake up the handler thread for this
- * action. In case the thread crashed and was
- * killed we just pretend that we handled the
- * interrupt. The hardirq handler above has
- * disabled the device interrupt, so no irq
- * storm is lurking.
- */
- if (likely(!test_bit(IRQTF_DIED,
- &action->thread_flags))) {
- set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
- wake_up_process(action->thread);
- }
+ irq_wake_thread(desc, action);
/* Fall through to add to randomness */
case IRQ_HANDLED:
- status |= action->flags;
+ random |= action->flags;
break;
default:
break;
}
- retval |= ret;
+ retval |= res;
action = action->next;
} while (action);
- if (status & IRQF_SAMPLE_RANDOM)
+ if (random & IRQF_SAMPLE_RANDOM)
add_interrupt_randomness(irq);
- local_irq_disable();
+ if (!noirqdebug)
+ note_interrupt(irq, desc, retval);
return retval;
}
+
+irqreturn_t handle_irq_event(struct irq_desc *desc)
+{
+ struct irqaction *action = desc->action;
+ irqreturn_t ret;
+
+ irq_compat_clr_pending(desc);
+ desc->istate &= ~IRQS_PENDING;
+ irq_compat_set_progress(desc);
+ desc->istate |= IRQS_INPROGRESS;
+ raw_spin_unlock(&desc->lock);
+
+ ret = handle_irq_event_percpu(desc, action);
+
+ raw_spin_lock(&desc->lock);
+ desc->istate &= ~IRQS_INPROGRESS;
+ irq_compat_clr_progress(desc);
+ return ret;
+}
+
+/**
+ * handle_IRQ_event - irq action chain handler
+ * @irq: the interrupt number
+ * @action: the interrupt action chain for this irq
+ *
+ * Handles the action chain of an irq event
+ */
+irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
+{
+ return handle_irq_event_percpu(irq_to_desc(irq), action);
+}
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 4571ae7e085a..6c6ec9a49027 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -1,27 +1,101 @@
/*
* IRQ subsystem internal functions and variables:
+ *
+ * Do not ever include this file from anything else than
+ * kernel/irq/. Do not even think about using any information outside
+ * of this file for your non core code.
*/
#include <linux/irqdesc.h>
+#ifdef CONFIG_SPARSE_IRQ
+# define IRQ_BITMAP_BITS (NR_IRQS + 8196)
+#else
+# define IRQ_BITMAP_BITS NR_IRQS
+#endif
+
+#define istate core_internal_state__do_not_mess_with_it
+
+#ifdef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
+# define status status_use_accessors
+#endif
+
extern int noirqdebug;
+/*
+ * Bits used by threaded handlers:
+ * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
+ * IRQTF_DIED - handler thread died
+ * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
+ * IRQTF_AFFINITY - irq thread is requested to adjust affinity
+ * IRQTF_FORCED_THREAD - irq action is force threaded
+ */
+enum {
+ IRQTF_RUNTHREAD,
+ IRQTF_DIED,
+ IRQTF_WARNED,
+ IRQTF_AFFINITY,
+ IRQTF_FORCED_THREAD,
+};
+
+/*
+ * Bit masks for desc->state
+ *
+ * IRQS_AUTODETECT - autodetection in progress
+ * IRQS_SPURIOUS_DISABLED - was disabled due to spurious interrupt
+ * detection
+ * IRQS_POLL_INPROGRESS - polling in progress
+ * IRQS_INPROGRESS - Interrupt in progress
+ * IRQS_ONESHOT - irq is not unmasked in primary handler
+ * IRQS_REPLAY - irq is replayed
+ * IRQS_WAITING - irq is waiting
+ * IRQS_DISABLED - irq is disabled
+ * IRQS_PENDING - irq is pending and replayed later
+ * IRQS_MASKED - irq is masked
+ * IRQS_SUSPENDED - irq is suspended
+ */
+enum {
+ IRQS_AUTODETECT = 0x00000001,
+ IRQS_SPURIOUS_DISABLED = 0x00000002,
+ IRQS_POLL_INPROGRESS = 0x00000008,
+ IRQS_INPROGRESS = 0x00000010,
+ IRQS_ONESHOT = 0x00000020,
+ IRQS_REPLAY = 0x00000040,
+ IRQS_WAITING = 0x00000080,
+ IRQS_DISABLED = 0x00000100,
+ IRQS_PENDING = 0x00000200,
+ IRQS_MASKED = 0x00000400,
+ IRQS_SUSPENDED = 0x00000800,
+};
+
+#include "compat.h"
+#include "debug.h"
+#include "settings.h"
+
#define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data)
/* Set default functions for irq_chip structures: */
extern void irq_chip_set_defaults(struct irq_chip *chip);
-/* Set default handler: */
-extern void compat_irq_chip_set_default_handler(struct irq_desc *desc);
-
extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
unsigned long flags);
extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp);
extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
+extern int irq_startup(struct irq_desc *desc);
+extern void irq_shutdown(struct irq_desc *desc);
+extern void irq_enable(struct irq_desc *desc);
+extern void irq_disable(struct irq_desc *desc);
+extern void mask_irq(struct irq_desc *desc);
+extern void unmask_irq(struct irq_desc *desc);
+
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
+irqreturn_t handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action);
+irqreturn_t handle_irq_event(struct irq_desc *desc);
+
/* Resending of interrupts :*/
void check_irq_resend(struct irq_desc *desc, unsigned int irq);
+bool irq_wait_for_poll(struct irq_desc *desc);
#ifdef CONFIG_PROC_FS
extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
@@ -37,20 +111,10 @@ static inline void unregister_handler_proc(unsigned int irq,
struct irqaction *action) { }
#endif
-extern int irq_select_affinity_usr(unsigned int irq);
+extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
extern void irq_set_thread_affinity(struct irq_desc *desc);
-#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
-static inline void irq_end(unsigned int irq, struct irq_desc *desc)
-{
- if (desc->irq_data.chip && desc->irq_data.chip->end)
- desc->irq_data.chip->end(irq);
-}
-#else
-static inline void irq_end(unsigned int irq, struct irq_desc *desc) { }
-#endif
-
/* Inline functions for support of irq chips on slow busses */
static inline void chip_bus_lock(struct irq_desc *desc)
{
@@ -64,43 +128,60 @@ static inline void chip_bus_sync_unlock(struct irq_desc *desc)
desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data);
}
+struct irq_desc *
+__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus);
+void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus);
+
+static inline struct irq_desc *
+irq_get_desc_buslock(unsigned int irq, unsigned long *flags)
+{
+ return __irq_get_desc_lock(irq, flags, true);
+}
+
+static inline void
+irq_put_desc_busunlock(struct irq_desc *desc, unsigned long flags)
+{
+ __irq_put_desc_unlock(desc, flags, true);
+}
+
+static inline struct irq_desc *
+irq_get_desc_lock(unsigned int irq, unsigned long *flags)
+{
+ return __irq_get_desc_lock(irq, flags, false);
+}
+
+static inline void
+irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags)
+{
+ __irq_put_desc_unlock(desc, flags, false);
+}
+
/*
- * Debugging printout:
+ * Manipulation functions for irq_data.state
*/
+static inline void irqd_set_move_pending(struct irq_data *d)
+{
+ d->state_use_accessors |= IRQD_SETAFFINITY_PENDING;
+ irq_compat_set_move_pending(irq_data_to_desc(d));
+}
-#include <linux/kallsyms.h>
-
-#define P(f) if (desc->status & f) printk("%14s set\n", #f)
+static inline void irqd_clr_move_pending(struct irq_data *d)
+{
+ d->state_use_accessors &= ~IRQD_SETAFFINITY_PENDING;
+ irq_compat_clr_move_pending(irq_data_to_desc(d));
+}
-static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
+static inline void irqd_clear(struct irq_data *d, unsigned int mask)
{
- printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n",
- irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
- printk("->handle_irq(): %p, ", desc->handle_irq);
- print_symbol("%s\n", (unsigned long)desc->handle_irq);
- printk("->irq_data.chip(): %p, ", desc->irq_data.chip);
- print_symbol("%s\n", (unsigned long)desc->irq_data.chip);
- printk("->action(): %p\n", desc->action);
- if (desc->action) {
- printk("->action->handler(): %p, ", desc->action->handler);
- print_symbol("%s\n", (unsigned long)desc->action->handler);
- }
-
- P(IRQ_INPROGRESS);
- P(IRQ_DISABLED);
- P(IRQ_PENDING);
- P(IRQ_REPLAY);
- P(IRQ_AUTODETECT);
- P(IRQ_WAITING);
- P(IRQ_LEVEL);
- P(IRQ_MASKED);
-#ifdef CONFIG_IRQ_PER_CPU
- P(IRQ_PER_CPU);
-#endif
- P(IRQ_NOPROBE);
- P(IRQ_NOREQUEST);
- P(IRQ_NOAUTOEN);
+ d->state_use_accessors &= ~mask;
}
-#undef P
+static inline void irqd_set(struct irq_data *d, unsigned int mask)
+{
+ d->state_use_accessors |= mask;
+}
+static inline bool irqd_has_set(struct irq_data *d, unsigned int mask)
+{
+ return d->state_use_accessors & mask;
+}
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 282f20230e67..dbccc799407f 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -79,7 +79,8 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node)
desc->irq_data.chip_data = NULL;
desc->irq_data.handler_data = NULL;
desc->irq_data.msi_desc = NULL;
- desc->status = IRQ_DEFAULT_INIT_FLAGS;
+ irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
+ desc->istate = IRQS_DISABLED;
desc->handle_irq = handle_bad_irq;
desc->depth = 1;
desc->irq_count = 0;
@@ -94,7 +95,7 @@ int nr_irqs = NR_IRQS;
EXPORT_SYMBOL_GPL(nr_irqs);
static DEFINE_MUTEX(sparse_irq_lock);
-static DECLARE_BITMAP(allocated_irqs, NR_IRQS);
+static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
#ifdef CONFIG_SPARSE_IRQ
@@ -206,6 +207,14 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
return NULL;
}
+static int irq_expand_nr_irqs(unsigned int nr)
+{
+ if (nr > IRQ_BITMAP_BITS)
+ return -ENOMEM;
+ nr_irqs = nr;
+ return 0;
+}
+
int __init early_irq_init(void)
{
int i, initcnt, node = first_online_node;
@@ -217,6 +226,15 @@ int __init early_irq_init(void)
initcnt = arch_probe_nr_irqs();
printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
+ if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
+ nr_irqs = IRQ_BITMAP_BITS;
+
+ if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
+ initcnt = IRQ_BITMAP_BITS;
+
+ if (initcnt > nr_irqs)
+ nr_irqs = initcnt;
+
for (i = 0; i < initcnt; i++) {
desc = alloc_desc(i, node);
set_bit(i, allocated_irqs);
@@ -229,7 +247,7 @@ int __init early_irq_init(void)
struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
[0 ... NR_IRQS-1] = {
- .status = IRQ_DEFAULT_INIT_FLAGS,
+ .istate = IRQS_DISABLED,
.handle_irq = handle_bad_irq,
.depth = 1,
.lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
@@ -251,8 +269,8 @@ int __init early_irq_init(void)
for (i = 0; i < count; i++) {
desc[i].irq_data.irq = i;
desc[i].irq_data.chip = &no_irq_chip;
- /* TODO : do this allocation on-demand ... */
desc[i].kstat_irqs = alloc_percpu(unsigned int);
+ irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
alloc_masks(desc + i, GFP_KERNEL, node);
desc_smp_init(desc + i, node);
lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
@@ -277,24 +295,14 @@ static void free_desc(unsigned int irq)
static inline int alloc_descs(unsigned int start, unsigned int cnt, int node)
{
-#if defined(CONFIG_KSTAT_IRQS_ONDEMAND)
- struct irq_desc *desc;
- unsigned int i;
-
- for (i = 0; i < cnt; i++) {
- desc = irq_to_desc(start + i);
- if (desc && !desc->kstat_irqs) {
- unsigned int __percpu *stats = alloc_percpu(unsigned int);
-
- if (!stats)
- return -1;
- if (cmpxchg(&desc->kstat_irqs, NULL, stats) != NULL)
- free_percpu(stats);
- }
- }
-#endif
return start;
}
+
+static int irq_expand_nr_irqs(unsigned int nr)
+{
+ return -ENOMEM;
+}
+
#endif /* !CONFIG_SPARSE_IRQ */
/* Dynamic interrupt handling */
@@ -338,14 +346,17 @@ irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node)
mutex_lock(&sparse_irq_lock);
- start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
+ start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
+ from, cnt, 0);
ret = -EEXIST;
if (irq >=0 && start != irq)
goto err;
- ret = -ENOMEM;
- if (start >= nr_irqs)
- goto err;
+ if (start + cnt > nr_irqs) {
+ ret = irq_expand_nr_irqs(start + cnt);
+ if (ret)
+ goto err;
+ }
bitmap_set(allocated_irqs, start, cnt);
mutex_unlock(&sparse_irq_lock);
@@ -392,6 +403,26 @@ unsigned int irq_get_next_irq(unsigned int offset)
return find_next_bit(allocated_irqs, nr_irqs, offset);
}
+struct irq_desc *
+__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (desc) {
+ if (bus)
+ chip_bus_lock(desc);
+ raw_spin_lock_irqsave(&desc->lock, *flags);
+ }
+ return desc;
+}
+
+void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
+{
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ if (bus)
+ chip_bus_sync_unlock(desc);
+}
+
/**
* dynamic_irq_cleanup - cleanup a dynamically allocated irq
* @irq: irq number to initialize
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 0caa59f747dd..acd599a43bfb 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -17,6 +17,17 @@
#include "internals.h"
+#ifdef CONFIG_IRQ_FORCED_THREADING
+__read_mostly bool force_irqthreads;
+
+static int __init setup_forced_irqthreads(char *arg)
+{
+ force_irqthreads = true;
+ return 0;
+}
+early_param("threadirqs", setup_forced_irqthreads);
+#endif
+
/**
* synchronize_irq - wait for pending IRQ handlers (on other CPUs)
* @irq: interrupt number to wait for
@@ -30,7 +41,7 @@
void synchronize_irq(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
- unsigned int status;
+ unsigned int state;
if (!desc)
return;
@@ -42,16 +53,16 @@ void synchronize_irq(unsigned int irq)
* Wait until we're out of the critical section. This might
* give the wrong answer due to the lack of memory barriers.
*/
- while (desc->status & IRQ_INPROGRESS)
+ while (desc->istate & IRQS_INPROGRESS)
cpu_relax();
/* Ok, that indicated we're done: double-check carefully. */
raw_spin_lock_irqsave(&desc->lock, flags);
- status = desc->status;
+ state = desc->istate;
raw_spin_unlock_irqrestore(&desc->lock, flags);
/* Oops, that failed? */
- } while (status & IRQ_INPROGRESS);
+ } while (state & IRQS_INPROGRESS);
/*
* We made sure that no hardirq handler is running. Now verify
@@ -73,8 +84,8 @@ int irq_can_set_affinity(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
- if (CHECK_IRQ_PER_CPU(desc->status) || !desc->irq_data.chip ||
- !desc->irq_data.chip->irq_set_affinity)
+ if (!desc || !irqd_can_balance(&desc->irq_data) ||
+ !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
return 0;
return 1;
@@ -100,67 +111,169 @@ void irq_set_thread_affinity(struct irq_desc *desc)
}
}
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+static inline bool irq_can_move_pcntxt(struct irq_desc *desc)
+{
+ return irq_settings_can_move_pcntxt(desc);
+}
+static inline bool irq_move_pending(struct irq_desc *desc)
+{
+ return irqd_is_setaffinity_pending(&desc->irq_data);
+}
+static inline void
+irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
+{
+ cpumask_copy(desc->pending_mask, mask);
+}
+static inline void
+irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
+{
+ cpumask_copy(mask, desc->pending_mask);
+}
+#else
+static inline bool irq_can_move_pcntxt(struct irq_desc *desc) { return true; }
+static inline bool irq_move_pending(struct irq_desc *desc) { return false; }
+static inline void
+irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
+static inline void
+irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
+#endif
+
/**
* irq_set_affinity - Set the irq affinity of a given irq
* @irq: Interrupt to set affinity
* @cpumask: cpumask
*
*/
-int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
+int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
{
struct irq_desc *desc = irq_to_desc(irq);
struct irq_chip *chip = desc->irq_data.chip;
unsigned long flags;
+ int ret = 0;
if (!chip->irq_set_affinity)
return -EINVAL;
raw_spin_lock_irqsave(&desc->lock, flags);
-#ifdef CONFIG_GENERIC_PENDING_IRQ
- if (desc->status & IRQ_MOVE_PCNTXT) {
- if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) {
- cpumask_copy(desc->irq_data.affinity, cpumask);
+ if (irq_can_move_pcntxt(desc)) {
+ ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
+ switch (ret) {
+ case IRQ_SET_MASK_OK:
+ cpumask_copy(desc->irq_data.affinity, mask);
+ case IRQ_SET_MASK_OK_NOCOPY:
irq_set_thread_affinity(desc);
+ ret = 0;
}
+ } else {
+ irqd_set_move_pending(&desc->irq_data);
+ irq_copy_pending(desc, mask);
}
- else {
- desc->status |= IRQ_MOVE_PENDING;
- cpumask_copy(desc->pending_mask, cpumask);
- }
-#else
- if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) {
- cpumask_copy(desc->irq_data.affinity, cpumask);
- irq_set_thread_affinity(desc);
+
+ if (desc->affinity_notify) {
+ kref_get(&desc->affinity_notify->kref);
+ schedule_work(&desc->affinity_notify->work);
}
-#endif
- desc->status |= IRQ_AFFINITY_SET;
+ irq_compat_set_affinity(desc);
+ irqd_set(&desc->irq_data, IRQD_AFFINITY_SET);
raw_spin_unlock_irqrestore(&desc->lock, flags);
- return 0;
+ return ret;
}
int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
{
+ unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
+
+ if (!desc)
+ return -EINVAL;
+ desc->affinity_hint = m;
+ irq_put_desc_unlock(desc, flags);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
+
+static void irq_affinity_notify(struct work_struct *work)
+{
+ struct irq_affinity_notify *notify =
+ container_of(work, struct irq_affinity_notify, work);
+ struct irq_desc *desc = irq_to_desc(notify->irq);
+ cpumask_var_t cpumask;
+ unsigned long flags;
+
+ if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
+ goto out;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ if (irq_move_pending(desc))
+ irq_get_pending(cpumask, desc);
+ else
+ cpumask_copy(cpumask, desc->irq_data.affinity);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+
+ notify->notify(notify, cpumask);
+
+ free_cpumask_var(cpumask);
+out:
+ kref_put(&notify->kref, notify->release);
+}
+
+/**
+ * irq_set_affinity_notifier - control notification of IRQ affinity changes
+ * @irq: Interrupt for which to enable/disable notification
+ * @notify: Context for notification, or %NULL to disable
+ * notification. Function pointers must be initialised;
+ * the other fields will be initialised by this function.
+ *
+ * Must be called in process context. Notification may only be enabled
+ * after the IRQ is allocated and must be disabled before the IRQ is
+ * freed using free_irq().
+ */
+int
+irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
+{
struct irq_desc *desc = irq_to_desc(irq);
+ struct irq_affinity_notify *old_notify;
unsigned long flags;
+ /* The release function is promised process context */
+ might_sleep();
+
if (!desc)
return -EINVAL;
+ /* Complete initialisation of *notify */
+ if (notify) {
+ notify->irq = irq;
+ kref_init(&notify->kref);
+ INIT_WORK(&notify->work, irq_affinity_notify);
+ }
+
raw_spin_lock_irqsave(&desc->lock, flags);
- desc->affinity_hint = m;
+ old_notify = desc->affinity_notify;
+ desc->affinity_notify = notify;
raw_spin_unlock_irqrestore(&desc->lock, flags);
+ if (old_notify)
+ kref_put(&old_notify->kref, old_notify->release);
+
return 0;
}
-EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
+EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
#ifndef CONFIG_AUTO_IRQ_AFFINITY
/*
* Generic version of the affinity autoselector.
*/
-static int setup_affinity(unsigned int irq, struct irq_desc *desc)
+static int
+setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct cpumask *set = irq_default_affinity;
+ int ret;
+
+ /* Excludes PER_CPU and NO_BALANCE interrupts */
if (!irq_can_set_affinity(irq))
return 0;
@@ -168,22 +281,29 @@ static int setup_affinity(unsigned int irq, struct irq_desc *desc)
* Preserve an userspace affinity setup, but make sure that
* one of the targets is online.
*/
- if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
- if (cpumask_any_and(desc->irq_data.affinity, cpu_online_mask)
- < nr_cpu_ids)
- goto set_affinity;
- else
- desc->status &= ~IRQ_AFFINITY_SET;
+ if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
+ if (cpumask_intersects(desc->irq_data.affinity,
+ cpu_online_mask))
+ set = desc->irq_data.affinity;
+ else {
+ irq_compat_clr_affinity(desc);
+ irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
+ }
}
- cpumask_and(desc->irq_data.affinity, cpu_online_mask, irq_default_affinity);
-set_affinity:
- desc->irq_data.chip->irq_set_affinity(&desc->irq_data, desc->irq_data.affinity, false);
-
+ cpumask_and(mask, cpu_online_mask, set);
+ ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
+ switch (ret) {
+ case IRQ_SET_MASK_OK:
+ cpumask_copy(desc->irq_data.affinity, mask);
+ case IRQ_SET_MASK_OK_NOCOPY:
+ irq_set_thread_affinity(desc);
+ }
return 0;
}
#else
-static inline int setup_affinity(unsigned int irq, struct irq_desc *d)
+static inline int
+setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
{
return irq_select_affinity(irq);
}
@@ -192,23 +312,21 @@ static inline int setup_affinity(unsigned int irq, struct irq_desc *d)
/*
* Called when affinity is set via /proc/irq
*/
-int irq_select_affinity_usr(unsigned int irq)
+int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
{
struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
int ret;
raw_spin_lock_irqsave(&desc->lock, flags);
- ret = setup_affinity(irq, desc);
- if (!ret)
- irq_set_thread_affinity(desc);
+ ret = setup_affinity(irq, desc, mask);
raw_spin_unlock_irqrestore(&desc->lock, flags);
-
return ret;
}
#else
-static inline int setup_affinity(unsigned int irq, struct irq_desc *desc)
+static inline int
+setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
{
return 0;
}
@@ -219,13 +337,23 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
if (suspend) {
if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
return;
- desc->status |= IRQ_SUSPENDED;
+ desc->istate |= IRQS_SUSPENDED;
}
- if (!desc->depth++) {
- desc->status |= IRQ_DISABLED;
- desc->irq_data.chip->irq_disable(&desc->irq_data);
- }
+ if (!desc->depth++)
+ irq_disable(desc);
+}
+
+static int __disable_irq_nosync(unsigned int irq)
+{
+ unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
+
+ if (!desc)
+ return -EINVAL;
+ __disable_irq(desc, irq, false);
+ irq_put_desc_busunlock(desc, flags);
+ return 0;
}
/**
@@ -241,17 +369,7 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
*/
void disable_irq_nosync(unsigned int irq)
{
- struct irq_desc *desc = irq_to_desc(irq);
- unsigned long flags;
-
- if (!desc)
- return;
-
- chip_bus_lock(desc);
- raw_spin_lock_irqsave(&desc->lock, flags);
- __disable_irq(desc, irq, false);
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- chip_bus_sync_unlock(desc);
+ __disable_irq_nosync(irq);
}
EXPORT_SYMBOL(disable_irq_nosync);
@@ -269,21 +387,24 @@ EXPORT_SYMBOL(disable_irq_nosync);
*/
void disable_irq(unsigned int irq)
{
- struct irq_desc *desc = irq_to_desc(irq);
-
- if (!desc)
- return;
-
- disable_irq_nosync(irq);
- if (desc->action)
+ if (!__disable_irq_nosync(irq))
synchronize_irq(irq);
}
EXPORT_SYMBOL(disable_irq);
void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
{
- if (resume)
- desc->status &= ~IRQ_SUSPENDED;
+ if (resume) {
+ if (!(desc->istate & IRQS_SUSPENDED)) {
+ if (!desc->action)
+ return;
+ if (!(desc->action->flags & IRQF_FORCE_RESUME))
+ return;
+ /* Pretend that it got disabled ! */
+ desc->depth++;
+ }
+ desc->istate &= ~IRQS_SUSPENDED;
+ }
switch (desc->depth) {
case 0:
@@ -291,12 +412,11 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
break;
case 1: {
- unsigned int status = desc->status & ~IRQ_DISABLED;
-
- if (desc->status & IRQ_SUSPENDED)
+ if (desc->istate & IRQS_SUSPENDED)
goto err_out;
/* Prevent probing on this irq: */
- desc->status = status | IRQ_NOPROBE;
+ irq_settings_set_noprobe(desc);
+ irq_enable(desc);
check_irq_resend(desc, irq);
/* fall-through */
}
@@ -318,21 +438,18 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
*/
void enable_irq(unsigned int irq)
{
- struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
if (!desc)
return;
+ if (WARN(!desc->irq_data.chip,
+ KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
+ goto out;
- if (WARN(!desc->irq_data.chip || !desc->irq_data.chip->irq_enable,
- KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
- return;
-
- chip_bus_lock(desc);
- raw_spin_lock_irqsave(&desc->lock, flags);
__enable_irq(desc, irq, false);
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- chip_bus_sync_unlock(desc);
+out:
+ irq_put_desc_busunlock(desc, flags);
}
EXPORT_SYMBOL(enable_irq);
@@ -348,7 +465,7 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on)
}
/**
- * set_irq_wake - control irq power management wakeup
+ * irq_set_irq_wake - control irq power management wakeup
* @irq: interrupt to control
* @on: enable/disable power management wakeup
*
@@ -359,23 +476,22 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on)
* Wakeup mode lets this IRQ wake the system from sleep
* states like "suspend to RAM".
*/
-int set_irq_wake(unsigned int irq, unsigned int on)
+int irq_set_irq_wake(unsigned int irq, unsigned int on)
{
- struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
int ret = 0;
/* wakeup-capable irqs can be shared between drivers that
* don't need to have the same sleep mode behaviors.
*/
- raw_spin_lock_irqsave(&desc->lock, flags);
if (on) {
if (desc->wake_depth++ == 0) {
ret = set_irq_wake_real(irq, on);
if (ret)
desc->wake_depth = 0;
else
- desc->status |= IRQ_WAKEUP;
+ irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
}
} else {
if (desc->wake_depth == 0) {
@@ -385,14 +501,13 @@ int set_irq_wake(unsigned int irq, unsigned int on)
if (ret)
desc->wake_depth = 1;
else
- desc->status &= ~IRQ_WAKEUP;
+ irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
}
}
-
- raw_spin_unlock_irqrestore(&desc->lock, flags);
+ irq_put_desc_busunlock(desc, flags);
return ret;
}
-EXPORT_SYMBOL(set_irq_wake);
+EXPORT_SYMBOL(irq_set_irq_wake);
/*
* Internal function that tells the architecture code whether a
@@ -401,43 +516,27 @@ EXPORT_SYMBOL(set_irq_wake);
*/
int can_request_irq(unsigned int irq, unsigned long irqflags)
{
- struct irq_desc *desc = irq_to_desc(irq);
- struct irqaction *action;
unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
+ int canrequest = 0;
if (!desc)
return 0;
- if (desc->status & IRQ_NOREQUEST)
- return 0;
-
- raw_spin_lock_irqsave(&desc->lock, flags);
- action = desc->action;
- if (action)
- if (irqflags & action->flags & IRQF_SHARED)
- action = NULL;
-
- raw_spin_unlock_irqrestore(&desc->lock, flags);
-
- return !action;
-}
-
-void compat_irq_chip_set_default_handler(struct irq_desc *desc)
-{
- /*
- * If the architecture still has not overriden
- * the flow handler then zap the default. This
- * should catch incorrect flow-type setting.
- */
- if (desc->handle_irq == &handle_bad_irq)
- desc->handle_irq = NULL;
+ if (irq_settings_can_request(desc)) {
+ if (desc->action)
+ if (irqflags & desc->action->flags & IRQF_SHARED)
+ canrequest =1;
+ }
+ irq_put_desc_unlock(desc, flags);
+ return canrequest;
}
int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
unsigned long flags)
{
- int ret;
struct irq_chip *chip = desc->irq_data.chip;
+ int ret, unmask = 0;
if (!chip || !chip->irq_set_type) {
/*
@@ -449,23 +548,43 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
return 0;
}
+ flags &= IRQ_TYPE_SENSE_MASK;
+
+ if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
+ if (!(desc->istate & IRQS_MASKED))
+ mask_irq(desc);
+ if (!(desc->istate & IRQS_DISABLED))
+ unmask = 1;
+ }
+
/* caller masked out all except trigger mode flags */
ret = chip->irq_set_type(&desc->irq_data, flags);
- if (ret)
- pr_err("setting trigger mode %lu for irq %u failed (%pF)\n",
- flags, irq, chip->irq_set_type);
- else {
- if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
- flags |= IRQ_LEVEL;
- /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */
- desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
- desc->status |= flags;
+ switch (ret) {
+ case IRQ_SET_MASK_OK:
+ irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
+ irqd_set(&desc->irq_data, flags);
+
+ case IRQ_SET_MASK_OK_NOCOPY:
+ flags = irqd_get_trigger_type(&desc->irq_data);
+ irq_settings_set_trigger_mask(desc, flags);
+ irqd_clear(&desc->irq_data, IRQD_LEVEL);
+ irq_settings_clr_level(desc);
+ if (flags & IRQ_TYPE_LEVEL_MASK) {
+ irq_settings_set_level(desc);
+ irqd_set(&desc->irq_data, IRQD_LEVEL);
+ }
if (chip != desc->irq_data.chip)
irq_chip_set_defaults(desc->irq_data.chip);
+ ret = 0;
+ break;
+ default:
+ pr_err("setting trigger mode %lu for irq %u failed (%pF)\n",
+ flags, irq, chip->irq_set_type);
}
-
+ if (unmask)
+ unmask_irq(desc);
return ret;
}
@@ -509,8 +628,11 @@ static int irq_wait_for_interrupt(struct irqaction *action)
* handler finished. unmask if the interrupt has not been disabled and
* is marked MASKED.
*/
-static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
+static void irq_finalize_oneshot(struct irq_desc *desc,
+ struct irqaction *action, bool force)
{
+ if (!(desc->istate & IRQS_ONESHOT))
+ return;
again:
chip_bus_lock(desc);
raw_spin_lock_irq(&desc->lock);
@@ -522,26 +644,44 @@ again:
* The thread is faster done than the hard interrupt handler
* on the other CPU. If we unmask the irq line then the
* interrupt can come in again and masks the line, leaves due
- * to IRQ_INPROGRESS and the irq line is masked forever.
+ * to IRQS_INPROGRESS and the irq line is masked forever.
+ *
+ * This also serializes the state of shared oneshot handlers
+ * versus "desc->threads_onehsot |= action->thread_mask;" in
+ * irq_wake_thread(). See the comment there which explains the
+ * serialization.
*/
- if (unlikely(desc->status & IRQ_INPROGRESS)) {
+ if (unlikely(desc->istate & IRQS_INPROGRESS)) {
raw_spin_unlock_irq(&desc->lock);
chip_bus_sync_unlock(desc);
cpu_relax();
goto again;
}
- if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
- desc->status &= ~IRQ_MASKED;
+ /*
+ * Now check again, whether the thread should run. Otherwise
+ * we would clear the threads_oneshot bit of this thread which
+ * was just set.
+ */
+ if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
+ goto out_unlock;
+
+ desc->threads_oneshot &= ~action->thread_mask;
+
+ if (!desc->threads_oneshot && !(desc->istate & IRQS_DISABLED) &&
+ (desc->istate & IRQS_MASKED)) {
+ irq_compat_clr_masked(desc);
+ desc->istate &= ~IRQS_MASKED;
desc->irq_data.chip->irq_unmask(&desc->irq_data);
}
+out_unlock:
raw_spin_unlock_irq(&desc->lock);
chip_bus_sync_unlock(desc);
}
#ifdef CONFIG_SMP
/*
- * Check whether we need to change the affinity of the interrupt thread.
+ * Check whether we need to chasnge the affinity of the interrupt thread.
*/
static void
irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
@@ -573,6 +713,32 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
#endif
/*
+ * Interrupts which are not explicitely requested as threaded
+ * interrupts rely on the implicit bh/preempt disable of the hard irq
+ * context. So we need to disable bh here to avoid deadlocks and other
+ * side effects.
+ */
+static void
+irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
+{
+ local_bh_disable();
+ action->thread_fn(action->irq, action->dev_id);
+ irq_finalize_oneshot(desc, action, false);
+ local_bh_enable();
+}
+
+/*
+ * Interrupts explicitely requested as threaded interupts want to be
+ * preemtible - many of them need to sleep and wait for slow busses to
+ * complete.
+ */
+static void irq_thread_fn(struct irq_desc *desc, struct irqaction *action)
+{
+ action->thread_fn(action->irq, action->dev_id);
+ irq_finalize_oneshot(desc, action, false);
+}
+
+/*
* Interrupt handler thread
*/
static int irq_thread(void *data)
@@ -582,7 +748,14 @@ static int irq_thread(void *data)
};
struct irqaction *action = data;
struct irq_desc *desc = irq_to_desc(action->irq);
- int wake, oneshot = desc->status & IRQ_ONESHOT;
+ void (*handler_fn)(struct irq_desc *desc, struct irqaction *action);
+ int wake;
+
+ if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD,
+ &action->thread_flags))
+ handler_fn = irq_forced_thread_fn;
+ else
+ handler_fn = irq_thread_fn;
sched_setscheduler(current, SCHED_FIFO, &param);
current->irqaction = action;
@@ -594,23 +767,20 @@ static int irq_thread(void *data)
atomic_inc(&desc->threads_active);
raw_spin_lock_irq(&desc->lock);
- if (unlikely(desc->status & IRQ_DISABLED)) {
+ if (unlikely(desc->istate & IRQS_DISABLED)) {
/*
* CHECKME: We might need a dedicated
* IRQ_THREAD_PENDING flag here, which
* retriggers the thread in check_irq_resend()
- * but AFAICT IRQ_PENDING should be fine as it
+ * but AFAICT IRQS_PENDING should be fine as it
* retriggers the interrupt itself --- tglx
*/
- desc->status |= IRQ_PENDING;
+ irq_compat_set_pending(desc);
+ desc->istate |= IRQS_PENDING;
raw_spin_unlock_irq(&desc->lock);
} else {
raw_spin_unlock_irq(&desc->lock);
-
- action->thread_fn(action->irq, action->dev_id);
-
- if (oneshot)
- irq_finalize_oneshot(action->irq, desc);
+ handler_fn(desc, action);
}
wake = atomic_dec_and_test(&desc->threads_active);
@@ -619,6 +789,9 @@ static int irq_thread(void *data)
wake_up(&desc->wait_for_threads);
}
+ /* Prevent a stale desc->threads_oneshot */
+ irq_finalize_oneshot(desc, action, true);
+
/*
* Clear irqaction. Otherwise exit_irq_thread() would make
* fuzz about an active irq thread going into nirvana.
@@ -633,6 +806,7 @@ static int irq_thread(void *data)
void exit_irq_thread(void)
{
struct task_struct *tsk = current;
+ struct irq_desc *desc;
if (!tsk->irqaction)
return;
@@ -641,6 +815,14 @@ void exit_irq_thread(void)
"exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq);
+ desc = irq_to_desc(tsk->irqaction->irq);
+
+ /*
+ * Prevent a stale desc->threads_oneshot. Must be called
+ * before setting the IRQTF_DIED flag.
+ */
+ irq_finalize_oneshot(desc, tsk->irqaction, true);
+
/*
* Set the THREAD DIED flag to prevent further wakeups of the
* soon to be gone threaded handler.
@@ -648,6 +830,22 @@ void exit_irq_thread(void)
set_bit(IRQTF_DIED, &tsk->irqaction->flags);
}
+static void irq_setup_forced_threading(struct irqaction *new)
+{
+ if (!force_irqthreads)
+ return;
+ if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
+ return;
+
+ new->flags |= IRQF_ONESHOT;
+
+ if (!new->thread_fn) {
+ set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
+ new->thread_fn = new->handler;
+ new->handler = irq_default_primary_handler;
+ }
+}
+
/*
* Internal function to register an irqaction - typically used to
* allocate special interrupts that are part of the architecture.
@@ -657,9 +855,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
{
struct irqaction *old, **old_ptr;
const char *old_name = NULL;
- unsigned long flags;
- int nested, shared = 0;
- int ret;
+ unsigned long flags, thread_mask = 0;
+ int ret, nested, shared = 0;
+ cpumask_var_t mask;
if (!desc)
return -EINVAL;
@@ -683,15 +881,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
rand_initialize_irq(irq);
}
- /* Oneshot interrupts are not allowed with shared */
- if ((new->flags & IRQF_ONESHOT) && (new->flags & IRQF_SHARED))
- return -EINVAL;
-
/*
* Check whether the interrupt nests into another interrupt
* thread.
*/
- nested = desc->status & IRQ_NESTED_THREAD;
+ nested = irq_settings_is_nested_thread(desc);
if (nested) {
if (!new->thread_fn)
return -EINVAL;
@@ -701,6 +895,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
* dummy function which warns when called.
*/
new->handler = irq_nested_primary_handler;
+ } else {
+ irq_setup_forced_threading(new);
}
/*
@@ -724,6 +920,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
new->thread = t;
}
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto out_thread;
+ }
+
/*
* The following block of code has to be executed atomically
*/
@@ -735,29 +936,40 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
* Can't share interrupts unless both agree to and are
* the same type (level, edge, polarity). So both flag
* fields must have IRQF_SHARED set and the bits which
- * set the trigger type must match.
+ * set the trigger type must match. Also all must
+ * agree on ONESHOT.
*/
if (!((old->flags & new->flags) & IRQF_SHARED) ||
- ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) {
+ ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
+ ((old->flags ^ new->flags) & IRQF_ONESHOT)) {
old_name = old->name;
goto mismatch;
}
-#if defined(CONFIG_IRQ_PER_CPU)
/* All handlers must agree on per-cpuness */
if ((old->flags & IRQF_PERCPU) !=
(new->flags & IRQF_PERCPU))
goto mismatch;
-#endif
/* add new interrupt at end of irq queue */
do {
+ thread_mask |= old->thread_mask;
old_ptr = &old->next;
old = *old_ptr;
} while (old);
shared = 1;
}
+ /*
+ * Setup the thread mask for this irqaction. Unlikely to have
+ * 32 resp 64 irqs sharing one line, but who knows.
+ */
+ if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) {
+ ret = -EBUSY;
+ goto out_mask;
+ }
+ new->thread_mask = 1 << ffz(thread_mask);
+
if (!shared) {
irq_chip_set_defaults(desc->irq_data.chip);
@@ -769,42 +981,44 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
new->flags & IRQF_TRIGGER_MASK);
if (ret)
- goto out_thread;
- } else
- compat_irq_chip_set_default_handler(desc);
-#if defined(CONFIG_IRQ_PER_CPU)
- if (new->flags & IRQF_PERCPU)
- desc->status |= IRQ_PER_CPU;
-#endif
+ goto out_mask;
+ }
- desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | IRQ_ONESHOT |
- IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED);
+ desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
+ IRQS_INPROGRESS | IRQS_ONESHOT | \
+ IRQS_WAITING);
+
+ if (new->flags & IRQF_PERCPU) {
+ irqd_set(&desc->irq_data, IRQD_PER_CPU);
+ irq_settings_set_per_cpu(desc);
+ }
if (new->flags & IRQF_ONESHOT)
- desc->status |= IRQ_ONESHOT;
+ desc->istate |= IRQS_ONESHOT;
- if (!(desc->status & IRQ_NOAUTOEN)) {
- desc->depth = 0;
- desc->status &= ~IRQ_DISABLED;
- desc->irq_data.chip->irq_startup(&desc->irq_data);
- } else
+ if (irq_settings_can_autoenable(desc))
+ irq_startup(desc);
+ else
/* Undo nested disables: */
desc->depth = 1;
/* Exclude IRQ from balancing if requested */
- if (new->flags & IRQF_NOBALANCING)
- desc->status |= IRQ_NO_BALANCING;
+ if (new->flags & IRQF_NOBALANCING) {
+ irq_settings_set_no_balancing(desc);
+ irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
+ }
/* Set default affinity mask once everything is setup */
- setup_affinity(irq, desc);
-
- } else if ((new->flags & IRQF_TRIGGER_MASK)
- && (new->flags & IRQF_TRIGGER_MASK)
- != (desc->status & IRQ_TYPE_SENSE_MASK)) {
- /* hope the handler works with the actual trigger mode... */
- pr_warning("IRQ %d uses trigger mode %d; requested %d\n",
- irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK),
- (int)(new->flags & IRQF_TRIGGER_MASK));
+ setup_affinity(irq, desc, mask);
+
+ } else if (new->flags & IRQF_TRIGGER_MASK) {
+ unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
+ unsigned int omsk = irq_settings_get_trigger_mask(desc);
+
+ if (nmsk != omsk)
+ /* hope the handler works with current trigger mode */
+ pr_warning("IRQ %d uses trigger mode %u; requested %u\n",
+ irq, nmsk, omsk);
}
new->irq = irq;
@@ -818,8 +1032,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
* Check whether we disabled the irq via the spurious handler
* before. Reenable it and give it another chance.
*/
- if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) {
- desc->status &= ~IRQ_SPURIOUS_DISABLED;
+ if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
+ desc->istate &= ~IRQS_SPURIOUS_DISABLED;
__enable_irq(desc, irq, false);
}
@@ -849,6 +1063,9 @@ mismatch:
#endif
ret = -EBUSY;
+out_mask:
+ free_cpumask_var(mask);
+
out_thread:
raw_spin_unlock_irqrestore(&desc->lock, flags);
if (new->thread) {
@@ -871,9 +1088,14 @@ out_thread:
*/
int setup_irq(unsigned int irq, struct irqaction *act)
{
+ int retval;
struct irq_desc *desc = irq_to_desc(irq);
- return __setup_irq(irq, desc, act);
+ chip_bus_lock(desc);
+ retval = __setup_irq(irq, desc, act);
+ chip_bus_sync_unlock(desc);
+
+ return retval;
}
EXPORT_SYMBOL_GPL(setup_irq);
@@ -924,13 +1146,8 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
#endif
/* If this was the last handler, shut down the IRQ line: */
- if (!desc->action) {
- desc->status |= IRQ_DISABLED;
- if (desc->irq_data.chip->irq_shutdown)
- desc->irq_data.chip->irq_shutdown(&desc->irq_data);
- else
- desc->irq_data.chip->irq_disable(&desc->irq_data);
- }
+ if (!desc->action)
+ irq_shutdown(desc);
#ifdef CONFIG_SMP
/* make sure affinity_hint is cleaned up */
@@ -1004,6 +1221,11 @@ void free_irq(unsigned int irq, void *dev_id)
if (!desc)
return;
+#ifdef CONFIG_SMP
+ if (WARN_ON(desc->affinity_notify))
+ desc->affinity_notify = NULL;
+#endif
+
chip_bus_lock(desc);
kfree(__free_irq(irq, dev_id));
chip_bus_sync_unlock(desc);
@@ -1074,7 +1296,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
if (!desc)
return -EINVAL;
- if (desc->status & IRQ_NOREQUEST)
+ if (!irq_settings_can_request(desc))
return -EINVAL;
if (!handler) {
@@ -1100,7 +1322,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
if (retval)
kfree(action);
-#ifdef CONFIG_DEBUG_SHIRQ
+#ifdef CONFIG_DEBUG_SHIRQ_FIXME
if (!retval && (irqflags & IRQF_SHARED)) {
/*
* It's a shared IRQ -- the driver ought to be prepared for it
@@ -1149,7 +1371,7 @@ int request_any_context_irq(unsigned int irq, irq_handler_t handler,
if (!desc)
return -EINVAL;
- if (desc->status & IRQ_NESTED_THREAD) {
+ if (irq_settings_is_nested_thread(desc)) {
ret = request_threaded_irq(irq, NULL, handler,
flags, name, dev_id);
return !ret ? IRQC_IS_NESTED : ret;
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 441fd629ff04..ec4806d4778b 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -4,23 +4,23 @@
#include "internals.h"
-void move_masked_irq(int irq)
+void irq_move_masked_irq(struct irq_data *idata)
{
- struct irq_desc *desc = irq_to_desc(irq);
- struct irq_chip *chip = desc->irq_data.chip;
+ struct irq_desc *desc = irq_data_to_desc(idata);
+ struct irq_chip *chip = idata->chip;
- if (likely(!(desc->status & IRQ_MOVE_PENDING)))
+ if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
return;
/*
* Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
*/
- if (CHECK_IRQ_PER_CPU(desc->status)) {
+ if (!irqd_can_balance(&desc->irq_data)) {
WARN_ON(1);
return;
}
- desc->status &= ~IRQ_MOVE_PENDING;
+ irqd_clr_move_pending(&desc->irq_data);
if (unlikely(cpumask_empty(desc->pending_mask)))
return;
@@ -53,15 +53,20 @@ void move_masked_irq(int irq)
cpumask_clear(desc->pending_mask);
}
-void move_native_irq(int irq)
+void move_masked_irq(int irq)
+{
+ irq_move_masked_irq(irq_get_irq_data(irq));
+}
+
+void irq_move_irq(struct irq_data *idata)
{
- struct irq_desc *desc = irq_to_desc(irq);
+ struct irq_desc *desc = irq_data_to_desc(idata);
bool masked;
- if (likely(!(desc->status & IRQ_MOVE_PENDING)))
+ if (likely(!irqd_is_setaffinity_pending(idata)))
return;
- if (unlikely(desc->status & IRQ_DISABLED))
+ if (unlikely(desc->istate & IRQS_DISABLED))
return;
/*
@@ -69,10 +74,15 @@ void move_native_irq(int irq)
* threaded interrupt with ONESHOT set, we can end up with an
* interrupt storm.
*/
- masked = desc->status & IRQ_MASKED;
+ masked = desc->istate & IRQS_MASKED;
if (!masked)
- desc->irq_data.chip->irq_mask(&desc->irq_data);
- move_masked_irq(irq);
+ idata->chip->irq_mask(idata);
+ irq_move_masked_irq(idata);
if (!masked)
- desc->irq_data.chip->irq_unmask(&desc->irq_data);
+ idata->chip->irq_unmask(idata);
+}
+
+void move_native_irq(int irq)
+{
+ irq_move_irq(irq_get_irq_data(irq));
}
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index 0d4005d85b03..1329f0eff49e 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -18,7 +18,7 @@
* During system-wide suspend or hibernation device drivers need to be prevented
* from receiving interrupts and this function is provided for this purpose.
* It marks all interrupt lines in use, except for the timer ones, as disabled
- * and sets the IRQ_SUSPENDED flag for each of them.
+ * and sets the IRQS_SUSPENDED flag for each of them.
*/
void suspend_device_irqs(void)
{
@@ -34,7 +34,7 @@ void suspend_device_irqs(void)
}
for_each_irq_desc(irq, desc)
- if (desc->status & IRQ_SUSPENDED)
+ if (desc->istate & IRQS_SUSPENDED)
synchronize_irq(irq);
}
EXPORT_SYMBOL_GPL(suspend_device_irqs);
@@ -43,7 +43,7 @@ EXPORT_SYMBOL_GPL(suspend_device_irqs);
* resume_device_irqs - enable interrupt lines disabled by suspend_device_irqs()
*
* Enable all interrupt lines previously disabled by suspend_device_irqs() that
- * have the IRQ_SUSPENDED flag set.
+ * have the IRQS_SUSPENDED flag set.
*/
void resume_device_irqs(void)
{
@@ -53,9 +53,6 @@ void resume_device_irqs(void)
for_each_irq_desc(irq, desc) {
unsigned long flags;
- if (!(desc->status & IRQ_SUSPENDED))
- continue;
-
raw_spin_lock_irqsave(&desc->lock, flags);
__enable_irq(desc, irq, true);
raw_spin_unlock_irqrestore(&desc->lock, flags);
@@ -72,7 +69,8 @@ int check_wakeup_irqs(void)
int irq;
for_each_irq_desc(irq, desc)
- if ((desc->status & IRQ_WAKEUP) && (desc->status & IRQ_PENDING))
+ if (irqd_is_wakeup_set(&desc->irq_data) &&
+ (desc->istate & IRQS_PENDING))
return -EBUSY;
return 0;
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 6c8a2a9f8a7b..4cc2e5ed0bec 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -11,6 +11,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
#include "internals.h"
@@ -24,7 +25,7 @@ static int irq_affinity_proc_show(struct seq_file *m, void *v)
const struct cpumask *mask = desc->irq_data.affinity;
#ifdef CONFIG_GENERIC_PENDING_IRQ
- if (desc->status & IRQ_MOVE_PENDING)
+ if (irqd_is_setaffinity_pending(&desc->irq_data))
mask = desc->pending_mask;
#endif
seq_cpumask(m, mask);
@@ -65,8 +66,7 @@ static ssize_t irq_affinity_proc_write(struct file *file,
cpumask_var_t new_value;
int err;
- if (!irq_to_desc(irq)->irq_data.chip->irq_set_affinity || no_irq_affinity ||
- irq_balancing_disabled(irq))
+ if (!irq_can_set_affinity(irq) || no_irq_affinity)
return -EIO;
if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
@@ -89,7 +89,7 @@ static ssize_t irq_affinity_proc_write(struct file *file,
if (!cpumask_intersects(new_value, cpu_online_mask)) {
/* Special case for empty set - allow the architecture
code to set default SMP affinity. */
- err = irq_select_affinity_usr(irq) ? -EINVAL : count;
+ err = irq_select_affinity_usr(irq, new_value) ? -EINVAL : count;
} else {
irq_set_affinity(irq, new_value);
err = count;
@@ -357,3 +357,65 @@ void init_irq_proc(void)
}
}
+#ifdef CONFIG_GENERIC_IRQ_SHOW
+
+int __weak arch_show_interrupts(struct seq_file *p, int prec)
+{
+ return 0;
+}
+
+int show_interrupts(struct seq_file *p, void *v)
+{
+ static int prec;
+
+ unsigned long flags, any_count = 0;
+ int i = *(loff_t *) v, j;
+ struct irqaction *action;
+ struct irq_desc *desc;
+
+ if (i > nr_irqs)
+ return 0;
+
+ if (i == nr_irqs)
+ return arch_show_interrupts(p, prec);
+
+ /* print header and calculate the width of the first column */
+ if (i == 0) {
+ for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
+ j *= 10;
+
+ seq_printf(p, "%*s", prec + 8, "");
+ for_each_online_cpu(j)
+ seq_printf(p, "CPU%-8d", j);
+ seq_putc(p, '\n');
+ }
+
+ desc = irq_to_desc(i);
+ if (!desc)
+ return 0;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ for_each_online_cpu(j)
+ any_count |= kstat_irqs_cpu(i, j);
+ action = desc->action;
+ if (!action && !any_count)
+ goto out;
+
+ seq_printf(p, "%*d: ", prec, i);
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
+ seq_printf(p, " %8s", desc->irq_data.chip->name);
+ seq_printf(p, "-%-8s", desc->name);
+
+ if (action) {
+ seq_printf(p, " %s", action->name);
+ while ((action = action->next) != NULL)
+ seq_printf(p, ", %s", action->name);
+ }
+
+ seq_putc(p, '\n');
+out:
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ return 0;
+}
+#endif
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index 891115a929aa..ad683a99b1ec 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -23,7 +23,7 @@
#ifdef CONFIG_HARDIRQS_SW_RESEND
/* Bitmap to handle software resend of interrupts: */
-static DECLARE_BITMAP(irqs_resend, NR_IRQS);
+static DECLARE_BITMAP(irqs_resend, IRQ_BITMAP_BITS);
/*
* Run software resends of IRQ's
@@ -55,20 +55,19 @@ static DECLARE_TASKLET(resend_tasklet, resend_irqs, 0);
*/
void check_irq_resend(struct irq_desc *desc, unsigned int irq)
{
- unsigned int status = desc->status;
-
- /*
- * Make sure the interrupt is enabled, before resending it:
- */
- desc->irq_data.chip->irq_enable(&desc->irq_data);
-
/*
* We do not resend level type interrupts. Level type
* interrupts are resent by hardware when they are still
* active.
*/
- if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
- desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY;
+ if (irq_settings_is_level(desc))
+ return;
+ if (desc->istate & IRQS_REPLAY)
+ return;
+ if (desc->istate & IRQS_PENDING) {
+ irq_compat_clr_pending(desc);
+ desc->istate &= ~IRQS_PENDING;
+ desc->istate |= IRQS_REPLAY;
if (!desc->irq_data.chip->irq_retrigger ||
!desc->irq_data.chip->irq_retrigger(&desc->irq_data)) {
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h
new file mode 100644
index 000000000000..0227ad358272
--- /dev/null
+++ b/kernel/irq/settings.h
@@ -0,0 +1,138 @@
+/*
+ * Internal header to deal with irq_desc->status which will be renamed
+ * to irq_desc->settings.
+ */
+enum {
+ _IRQ_DEFAULT_INIT_FLAGS = IRQ_DEFAULT_INIT_FLAGS,
+ _IRQ_PER_CPU = IRQ_PER_CPU,
+ _IRQ_LEVEL = IRQ_LEVEL,
+ _IRQ_NOPROBE = IRQ_NOPROBE,
+ _IRQ_NOREQUEST = IRQ_NOREQUEST,
+ _IRQ_NOAUTOEN = IRQ_NOAUTOEN,
+ _IRQ_MOVE_PCNTXT = IRQ_MOVE_PCNTXT,
+ _IRQ_NO_BALANCING = IRQ_NO_BALANCING,
+ _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD,
+ _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
+};
+
+#define IRQ_INPROGRESS GOT_YOU_MORON
+#define IRQ_REPLAY GOT_YOU_MORON
+#define IRQ_WAITING GOT_YOU_MORON
+#define IRQ_DISABLED GOT_YOU_MORON
+#define IRQ_PENDING GOT_YOU_MORON
+#define IRQ_MASKED GOT_YOU_MORON
+#define IRQ_WAKEUP GOT_YOU_MORON
+#define IRQ_MOVE_PENDING GOT_YOU_MORON
+#define IRQ_PER_CPU GOT_YOU_MORON
+#define IRQ_NO_BALANCING GOT_YOU_MORON
+#define IRQ_AFFINITY_SET GOT_YOU_MORON
+#define IRQ_LEVEL GOT_YOU_MORON
+#define IRQ_NOPROBE GOT_YOU_MORON
+#define IRQ_NOREQUEST GOT_YOU_MORON
+#define IRQ_NOAUTOEN GOT_YOU_MORON
+#define IRQ_NESTED_THREAD GOT_YOU_MORON
+#undef IRQF_MODIFY_MASK
+#define IRQF_MODIFY_MASK GOT_YOU_MORON
+
+static inline void
+irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set)
+{
+ desc->status &= ~(clr & _IRQF_MODIFY_MASK);
+ desc->status |= (set & _IRQF_MODIFY_MASK);
+}
+
+static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
+{
+ return desc->status & _IRQ_PER_CPU;
+}
+
+static inline void irq_settings_set_per_cpu(struct irq_desc *desc)
+{
+ desc->status |= _IRQ_PER_CPU;
+}
+
+static inline void irq_settings_set_no_balancing(struct irq_desc *desc)
+{
+ desc->status |= _IRQ_NO_BALANCING;
+}
+
+static inline bool irq_settings_has_no_balance_set(struct irq_desc *desc)
+{
+ return desc->status & _IRQ_NO_BALANCING;
+}
+
+static inline u32 irq_settings_get_trigger_mask(struct irq_desc *desc)
+{
+ return desc->status & IRQ_TYPE_SENSE_MASK;
+}
+
+static inline void
+irq_settings_set_trigger_mask(struct irq_desc *desc, u32 mask)
+{
+ desc->status &= ~IRQ_TYPE_SENSE_MASK;
+ desc->status |= mask & IRQ_TYPE_SENSE_MASK;
+}
+
+static inline bool irq_settings_is_level(struct irq_desc *desc)
+{
+ return desc->status & _IRQ_LEVEL;
+}
+
+static inline void irq_settings_clr_level(struct irq_desc *desc)
+{
+ desc->status &= ~_IRQ_LEVEL;
+}
+
+static inline void irq_settings_set_level(struct irq_desc *desc)
+{
+ desc->status |= _IRQ_LEVEL;
+}
+
+static inline bool irq_settings_can_request(struct irq_desc *desc)
+{
+ return !(desc->status & _IRQ_NOREQUEST);
+}
+
+static inline void irq_settings_clr_norequest(struct irq_desc *desc)
+{
+ desc->status &= ~_IRQ_NOREQUEST;
+}
+
+static inline void irq_settings_set_norequest(struct irq_desc *desc)
+{
+ desc->status |= _IRQ_NOREQUEST;
+}
+
+static inline bool irq_settings_can_probe(struct irq_desc *desc)
+{
+ return !(desc->status & _IRQ_NOPROBE);
+}
+
+static inline void irq_settings_clr_noprobe(struct irq_desc *desc)
+{
+ desc->status &= ~_IRQ_NOPROBE;
+}
+
+static inline void irq_settings_set_noprobe(struct irq_desc *desc)
+{
+ desc->status |= _IRQ_NOPROBE;
+}
+
+static inline bool irq_settings_can_move_pcntxt(struct irq_desc *desc)
+{
+ return desc->status & _IRQ_MOVE_PCNTXT;
+}
+
+static inline bool irq_settings_can_autoenable(struct irq_desc *desc)
+{
+ return !(desc->status & _IRQ_NOAUTOEN);
+}
+
+static inline bool irq_settings_is_nested_thread(struct irq_desc *desc)
+{
+ return desc->status & _IRQ_NESTED_THREAD;
+}
+
+/* Nothing should touch desc->status from now on */
+#undef status
+#define status USE_THE_PROPER_WRAPPERS_YOU_MORON
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 3089d3b9d5f3..dd586ebf9c8c 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -21,70 +21,94 @@ static int irqfixup __read_mostly;
#define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
static void poll_spurious_irqs(unsigned long dummy);
static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0);
+static int irq_poll_cpu;
+static atomic_t irq_poll_active;
+
+/*
+ * We wait here for a poller to finish.
+ *
+ * If the poll runs on this CPU, then we yell loudly and return
+ * false. That will leave the interrupt line disabled in the worst
+ * case, but it should never happen.
+ *
+ * We wait until the poller is done and then recheck disabled and
+ * action (about to be disabled). Only if it's still active, we return
+ * true and let the handler run.
+ */
+bool irq_wait_for_poll(struct irq_desc *desc)
+{
+ if (WARN_ONCE(irq_poll_cpu == smp_processor_id(),
+ "irq poll in progress on cpu %d for irq %d\n",
+ smp_processor_id(), desc->irq_data.irq))
+ return false;
+
+#ifdef CONFIG_SMP
+ do {
+ raw_spin_unlock(&desc->lock);
+ while (desc->istate & IRQS_INPROGRESS)
+ cpu_relax();
+ raw_spin_lock(&desc->lock);
+ } while (desc->istate & IRQS_INPROGRESS);
+ /* Might have been disabled in meantime */
+ return !(desc->istate & IRQS_DISABLED) && desc->action;
+#else
+ return false;
+#endif
+}
+
/*
* Recovery handler for misrouted interrupts.
*/
-static int try_one_irq(int irq, struct irq_desc *desc)
+static int try_one_irq(int irq, struct irq_desc *desc, bool force)
{
+ irqreturn_t ret = IRQ_NONE;
struct irqaction *action;
- int ok = 0, work = 0;
raw_spin_lock(&desc->lock);
- /* Already running on another processor */
- if (desc->status & IRQ_INPROGRESS) {
- /*
- * Already running: If it is shared get the other
- * CPU to go looking for our mystery interrupt too
- */
- if (desc->action && (desc->action->flags & IRQF_SHARED))
- desc->status |= IRQ_PENDING;
- raw_spin_unlock(&desc->lock);
- return ok;
- }
- /* Honour the normal IRQ locking */
- desc->status |= IRQ_INPROGRESS;
- action = desc->action;
- raw_spin_unlock(&desc->lock);
- while (action) {
- /* Only shared IRQ handlers are safe to call */
- if (action->flags & IRQF_SHARED) {
- if (action->handler(irq, action->dev_id) ==
- IRQ_HANDLED)
- ok = 1;
- }
- action = action->next;
- }
- local_irq_disable();
- /* Now clean up the flags */
- raw_spin_lock(&desc->lock);
- action = desc->action;
+ /* PER_CPU and nested thread interrupts are never polled */
+ if (irq_settings_is_per_cpu(desc) || irq_settings_is_nested_thread(desc))
+ goto out;
/*
- * While we were looking for a fixup someone queued a real
- * IRQ clashing with our walk:
+ * Do not poll disabled interrupts unless the spurious
+ * disabled poller asks explicitely.
*/
- while ((desc->status & IRQ_PENDING) && action) {
+ if ((desc->istate & IRQS_DISABLED) && !force)
+ goto out;
+
+ /*
+ * All handlers must agree on IRQF_SHARED, so we test just the
+ * first. Check for action->next as well.
+ */
+ action = desc->action;
+ if (!action || !(action->flags & IRQF_SHARED) ||
+ (action->flags & __IRQF_TIMER) || !action->next)
+ goto out;
+
+ /* Already running on another processor */
+ if (desc->istate & IRQS_INPROGRESS) {
/*
- * Perform real IRQ processing for the IRQ we deferred
+ * Already running: If it is shared get the other
+ * CPU to go looking for our mystery interrupt too
*/
- work = 1;
- raw_spin_unlock(&desc->lock);
- handle_IRQ_event(irq, action);
- raw_spin_lock(&desc->lock);
- desc->status &= ~IRQ_PENDING;
+ irq_compat_set_pending(desc);
+ desc->istate |= IRQS_PENDING;
+ goto out;
}
- desc->status &= ~IRQ_INPROGRESS;
- /*
- * If we did actual work for the real IRQ line we must let the
- * IRQ controller clean up too
- */
- if (work)
- irq_end(irq, desc);
- raw_spin_unlock(&desc->lock);
- return ok;
+ /* Mark it poll in progress */
+ desc->istate |= IRQS_POLL_INPROGRESS;
+ do {
+ if (handle_irq_event(desc) == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ action = desc->action;
+ } while ((desc->istate & IRQS_PENDING) && action);
+ desc->istate &= ~IRQS_POLL_INPROGRESS;
+out:
+ raw_spin_unlock(&desc->lock);
+ return ret == IRQ_HANDLED;
}
static int misrouted_irq(int irq)
@@ -92,6 +116,11 @@ static int misrouted_irq(int irq)
struct irq_desc *desc;
int i, ok = 0;
+ if (atomic_inc_return(&irq_poll_active) == 1)
+ goto out;
+
+ irq_poll_cpu = smp_processor_id();
+
for_each_irq_desc(i, desc) {
if (!i)
continue;
@@ -99,9 +128,11 @@ static int misrouted_irq(int irq)
if (i == irq) /* Already tried */
continue;
- if (try_one_irq(i, desc))
+ if (try_one_irq(i, desc, false))
ok = 1;
}
+out:
+ atomic_dec(&irq_poll_active);
/* So the caller can adjust the irq error counts */
return ok;
}
@@ -111,23 +142,28 @@ static void poll_spurious_irqs(unsigned long dummy)
struct irq_desc *desc;
int i;
+ if (atomic_inc_return(&irq_poll_active) != 1)
+ goto out;
+ irq_poll_cpu = smp_processor_id();
+
for_each_irq_desc(i, desc) {
- unsigned int status;
+ unsigned int state;
if (!i)
continue;
/* Racy but it doesn't matter */
- status = desc->status;
+ state = desc->istate;
barrier();
- if (!(status & IRQ_SPURIOUS_DISABLED))
+ if (!(state & IRQS_SPURIOUS_DISABLED))
continue;
local_irq_disable();
- try_one_irq(i, desc);
+ try_one_irq(i, desc, true);
local_irq_enable();
}
-
+out:
+ atomic_dec(&irq_poll_active);
mod_timer(&poll_spurious_irq_timer,
jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
}
@@ -139,15 +175,13 @@ static void poll_spurious_irqs(unsigned long dummy)
*
* (The other 100-of-100,000 interrupts may have been a correctly
* functioning device sharing an IRQ with the failing one)
- *
- * Called under desc->lock
*/
-
static void
__report_bad_irq(unsigned int irq, struct irq_desc *desc,
irqreturn_t action_ret)
{
struct irqaction *action;
+ unsigned long flags;
if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
printk(KERN_ERR "irq event %d: bogus return value %x\n",
@@ -159,6 +193,13 @@ __report_bad_irq(unsigned int irq, struct irq_desc *desc,
dump_stack();
printk(KERN_ERR "handlers:\n");
+ /*
+ * We need to take desc->lock here. note_interrupt() is called
+ * w/o desc->lock held, but IRQ_PROGRESS set. We might race
+ * with something else removing an action. It's ok to take
+ * desc->lock here. See synchronize_irq().
+ */
+ raw_spin_lock_irqsave(&desc->lock, flags);
action = desc->action;
while (action) {
printk(KERN_ERR "[<%p>]", action->handler);
@@ -167,6 +208,7 @@ __report_bad_irq(unsigned int irq, struct irq_desc *desc,
printk("\n");
action = action->next;
}
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
static void
@@ -218,6 +260,9 @@ try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
void note_interrupt(unsigned int irq, struct irq_desc *desc,
irqreturn_t action_ret)
{
+ if (desc->istate & IRQS_POLL_INPROGRESS)
+ return;
+
if (unlikely(action_ret != IRQ_HANDLED)) {
/*
* If we are seeing only the odd spurious IRQ caused by
@@ -254,9 +299,9 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
* Now kill the IRQ
*/
printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
- desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED;
+ desc->istate |= IRQS_SPURIOUS_DISABLED;
desc->depth++;
- desc->irq_data.chip->irq_disable(&desc->irq_data);
+ irq_disable(desc);
mod_timer(&poll_spurious_irq_timer,
jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
diff --git a/kernel/params.c b/kernel/params.c
index 0da1411222b9..09a08cb7f70d 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -821,15 +821,18 @@ ssize_t __modver_version_show(struct module_attribute *mattr,
return sprintf(buf, "%s\n", vattr->version);
}
-extern struct module_version_attribute __start___modver[], __stop___modver[];
+extern const struct module_version_attribute *__start___modver[];
+extern const struct module_version_attribute *__stop___modver[];
static void __init version_sysfs_builtin(void)
{
- const struct module_version_attribute *vattr;
+ const struct module_version_attribute **p;
struct module_kobject *mk;
int err;
- for (vattr = __start___modver; vattr < __stop___modver; vattr++) {
+ for (p = __start___modver; p < __stop___modver; p++) {
+ const struct module_version_attribute *vattr = *p;
+
mk = locate_module_kobject(vattr->module_name);
if (mk) {
err = sysfs_create_file(&mk->kobj, &vattr->mattr.attr);
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 999835b6112b..64a018e94fca 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -38,13 +38,96 @@
#include <asm/irq_regs.h>
+struct remote_function_call {
+ struct task_struct *p;
+ int (*func)(void *info);
+ void *info;
+ int ret;
+};
+
+static void remote_function(void *data)
+{
+ struct remote_function_call *tfc = data;
+ struct task_struct *p = tfc->p;
+
+ if (p) {
+ tfc->ret = -EAGAIN;
+ if (task_cpu(p) != smp_processor_id() || !task_curr(p))
+ return;
+ }
+
+ tfc->ret = tfc->func(tfc->info);
+}
+
+/**
+ * task_function_call - call a function on the cpu on which a task runs
+ * @p: the task to evaluate
+ * @func: the function to be called
+ * @info: the function call argument
+ *
+ * Calls the function @func when the task is currently running. This might
+ * be on the current CPU, which just calls the function directly
+ *
+ * returns: @func return value, or
+ * -ESRCH - when the process isn't running
+ * -EAGAIN - when the process moved away
+ */
+static int
+task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
+{
+ struct remote_function_call data = {
+ .p = p,
+ .func = func,
+ .info = info,
+ .ret = -ESRCH, /* No such (running) process */
+ };
+
+ if (task_curr(p))
+ smp_call_function_single(task_cpu(p), remote_function, &data, 1);
+
+ return data.ret;
+}
+
+/**
+ * cpu_function_call - call a function on the cpu
+ * @func: the function to be called
+ * @info: the function call argument
+ *
+ * Calls the function @func on the remote cpu.
+ *
+ * returns: @func return value or -ENXIO when the cpu is offline
+ */
+static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
+{
+ struct remote_function_call data = {
+ .p = NULL,
+ .func = func,
+ .info = info,
+ .ret = -ENXIO, /* No such CPU */
+ };
+
+ smp_call_function_single(cpu, remote_function, &data, 1);
+
+ return data.ret;
+}
+
+#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
+ PERF_FLAG_FD_OUTPUT |\
+ PERF_FLAG_PID_CGROUP)
+
enum event_type_t {
EVENT_FLEXIBLE = 0x1,
EVENT_PINNED = 0x2,
EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
};
-atomic_t perf_task_events __read_mostly;
+/*
+ * perf_sched_events : >0 events exist
+ * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
+ */
+atomic_t perf_sched_events __read_mostly;
+static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
+
static atomic_t nr_mmap_events __read_mostly;
static atomic_t nr_comm_events __read_mostly;
static atomic_t nr_task_events __read_mostly;
@@ -67,7 +150,24 @@ int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
/*
* max perf event sample rate
*/
-int sysctl_perf_event_sample_rate __read_mostly = 100000;
+#define DEFAULT_MAX_SAMPLE_RATE 100000
+int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
+static int max_samples_per_tick __read_mostly =
+ DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
+
+int perf_proc_update_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret = proc_dointvec(table, write, buffer, lenp, ppos);
+
+ if (ret || !write)
+ return ret;
+
+ max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
+
+ return 0;
+}
static atomic64_t perf_event_id;
@@ -75,7 +175,11 @@ static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
enum event_type_t event_type);
static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
- enum event_type_t event_type);
+ enum event_type_t event_type,
+ struct task_struct *task);
+
+static void update_context_time(struct perf_event_context *ctx);
+static u64 perf_event_time(struct perf_event *event);
void __weak perf_event_print_debug(void) { }
@@ -89,6 +193,357 @@ static inline u64 perf_clock(void)
return local_clock();
}
+static inline struct perf_cpu_context *
+__get_cpu_context(struct perf_event_context *ctx)
+{
+ return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
+}
+
+#ifdef CONFIG_CGROUP_PERF
+
+/*
+ * Must ensure cgroup is pinned (css_get) before calling
+ * this function. In other words, we cannot call this function
+ * if there is no cgroup event for the current CPU context.
+ */
+static inline struct perf_cgroup *
+perf_cgroup_from_task(struct task_struct *task)
+{
+ return container_of(task_subsys_state(task, perf_subsys_id),
+ struct perf_cgroup, css);
+}
+
+static inline bool
+perf_cgroup_match(struct perf_event *event)
+{
+ struct perf_event_context *ctx = event->ctx;
+ struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
+
+ return !event->cgrp || event->cgrp == cpuctx->cgrp;
+}
+
+static inline void perf_get_cgroup(struct perf_event *event)
+{
+ css_get(&event->cgrp->css);
+}
+
+static inline void perf_put_cgroup(struct perf_event *event)
+{
+ css_put(&event->cgrp->css);
+}
+
+static inline void perf_detach_cgroup(struct perf_event *event)
+{
+ perf_put_cgroup(event);
+ event->cgrp = NULL;
+}
+
+static inline int is_cgroup_event(struct perf_event *event)
+{
+ return event->cgrp != NULL;
+}
+
+static inline u64 perf_cgroup_event_time(struct perf_event *event)
+{
+ struct perf_cgroup_info *t;
+
+ t = per_cpu_ptr(event->cgrp->info, event->cpu);
+ return t->time;
+}
+
+static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
+{
+ struct perf_cgroup_info *info;
+ u64 now;
+
+ now = perf_clock();
+
+ info = this_cpu_ptr(cgrp->info);
+
+ info->time += now - info->timestamp;
+ info->timestamp = now;
+}
+
+static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
+{
+ struct perf_cgroup *cgrp_out = cpuctx->cgrp;
+ if (cgrp_out)
+ __update_cgrp_time(cgrp_out);
+}
+
+static inline void update_cgrp_time_from_event(struct perf_event *event)
+{
+ struct perf_cgroup *cgrp;
+
+ /*
+ * ensure we access cgroup data only when needed and
+ * when we know the cgroup is pinned (css_get)
+ */
+ if (!is_cgroup_event(event))
+ return;
+
+ cgrp = perf_cgroup_from_task(current);
+ /*
+ * Do not update time when cgroup is not active
+ */
+ if (cgrp == event->cgrp)
+ __update_cgrp_time(event->cgrp);
+}
+
+static inline void
+perf_cgroup_set_timestamp(struct task_struct *task,
+ struct perf_event_context *ctx)
+{
+ struct perf_cgroup *cgrp;
+ struct perf_cgroup_info *info;
+
+ /*
+ * ctx->lock held by caller
+ * ensure we do not access cgroup data
+ * unless we have the cgroup pinned (css_get)
+ */
+ if (!task || !ctx->nr_cgroups)
+ return;
+
+ cgrp = perf_cgroup_from_task(task);
+ info = this_cpu_ptr(cgrp->info);
+ info->timestamp = ctx->timestamp;
+}
+
+#define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
+#define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */
+
+/*
+ * reschedule events based on the cgroup constraint of task.
+ *
+ * mode SWOUT : schedule out everything
+ * mode SWIN : schedule in based on cgroup for next
+ */
+void perf_cgroup_switch(struct task_struct *task, int mode)
+{
+ struct perf_cpu_context *cpuctx;
+ struct pmu *pmu;
+ unsigned long flags;
+
+ /*
+ * disable interrupts to avoid geting nr_cgroup
+ * changes via __perf_event_disable(). Also
+ * avoids preemption.
+ */
+ local_irq_save(flags);
+
+ /*
+ * we reschedule only in the presence of cgroup
+ * constrained events.
+ */
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(pmu, &pmus, entry) {
+
+ cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
+
+ perf_pmu_disable(cpuctx->ctx.pmu);
+
+ /*
+ * perf_cgroup_events says at least one
+ * context on this CPU has cgroup events.
+ *
+ * ctx->nr_cgroups reports the number of cgroup
+ * events for a context.
+ */
+ if (cpuctx->ctx.nr_cgroups > 0) {
+
+ if (mode & PERF_CGROUP_SWOUT) {
+ cpu_ctx_sched_out(cpuctx, EVENT_ALL);
+ /*
+ * must not be done before ctxswout due
+ * to event_filter_match() in event_sched_out()
+ */
+ cpuctx->cgrp = NULL;
+ }
+
+ if (mode & PERF_CGROUP_SWIN) {
+ /* set cgrp before ctxsw in to
+ * allow event_filter_match() to not
+ * have to pass task around
+ */
+ cpuctx->cgrp = perf_cgroup_from_task(task);
+ cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
+ }
+ }
+
+ perf_pmu_enable(cpuctx->ctx.pmu);
+ }
+
+ rcu_read_unlock();
+
+ local_irq_restore(flags);
+}
+
+static inline void perf_cgroup_sched_out(struct task_struct *task)
+{
+ perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
+}
+
+static inline void perf_cgroup_sched_in(struct task_struct *task)
+{
+ perf_cgroup_switch(task, PERF_CGROUP_SWIN);
+}
+
+static inline int perf_cgroup_connect(int fd, struct perf_event *event,
+ struct perf_event_attr *attr,
+ struct perf_event *group_leader)
+{
+ struct perf_cgroup *cgrp;
+ struct cgroup_subsys_state *css;
+ struct file *file;
+ int ret = 0, fput_needed;
+
+ file = fget_light(fd, &fput_needed);
+ if (!file)
+ return -EBADF;
+
+ css = cgroup_css_from_dir(file, perf_subsys_id);
+ if (IS_ERR(css))
+ return PTR_ERR(css);
+
+ cgrp = container_of(css, struct perf_cgroup, css);
+ event->cgrp = cgrp;
+
+ /*
+ * all events in a group must monitor
+ * the same cgroup because a task belongs
+ * to only one perf cgroup at a time
+ */
+ if (group_leader && group_leader->cgrp != cgrp) {
+ perf_detach_cgroup(event);
+ ret = -EINVAL;
+ } else {
+ /* must be done before we fput() the file */
+ perf_get_cgroup(event);
+ }
+ fput_light(file, fput_needed);
+ return ret;
+}
+
+static inline void
+perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
+{
+ struct perf_cgroup_info *t;
+ t = per_cpu_ptr(event->cgrp->info, event->cpu);
+ event->shadow_ctx_time = now - t->timestamp;
+}
+
+static inline void
+perf_cgroup_defer_enabled(struct perf_event *event)
+{
+ /*
+ * when the current task's perf cgroup does not match
+ * the event's, we need to remember to call the
+ * perf_mark_enable() function the first time a task with
+ * a matching perf cgroup is scheduled in.
+ */
+ if (is_cgroup_event(event) && !perf_cgroup_match(event))
+ event->cgrp_defer_enabled = 1;
+}
+
+static inline void
+perf_cgroup_mark_enabled(struct perf_event *event,
+ struct perf_event_context *ctx)
+{
+ struct perf_event *sub;
+ u64 tstamp = perf_event_time(event);
+
+ if (!event->cgrp_defer_enabled)
+ return;
+
+ event->cgrp_defer_enabled = 0;
+
+ event->tstamp_enabled = tstamp - event->total_time_enabled;
+ list_for_each_entry(sub, &event->sibling_list, group_entry) {
+ if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
+ sub->tstamp_enabled = tstamp - sub->total_time_enabled;
+ sub->cgrp_defer_enabled = 0;
+ }
+ }
+}
+#else /* !CONFIG_CGROUP_PERF */
+
+static inline bool
+perf_cgroup_match(struct perf_event *event)
+{
+ return true;
+}
+
+static inline void perf_detach_cgroup(struct perf_event *event)
+{}
+
+static inline int is_cgroup_event(struct perf_event *event)
+{
+ return 0;
+}
+
+static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
+{
+ return 0;
+}
+
+static inline void update_cgrp_time_from_event(struct perf_event *event)
+{
+}
+
+static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
+{
+}
+
+static inline void perf_cgroup_sched_out(struct task_struct *task)
+{
+}
+
+static inline void perf_cgroup_sched_in(struct task_struct *task)
+{
+}
+
+static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
+ struct perf_event_attr *attr,
+ struct perf_event *group_leader)
+{
+ return -EINVAL;
+}
+
+static inline void
+perf_cgroup_set_timestamp(struct task_struct *task,
+ struct perf_event_context *ctx)
+{
+}
+
+void
+perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
+{
+}
+
+static inline void
+perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
+{
+}
+
+static inline u64 perf_cgroup_event_time(struct perf_event *event)
+{
+ return 0;
+}
+
+static inline void
+perf_cgroup_defer_enabled(struct perf_event *event)
+{
+}
+
+static inline void
+perf_cgroup_mark_enabled(struct perf_event *event,
+ struct perf_event_context *ctx)
+{
+}
+#endif
+
void perf_pmu_disable(struct pmu *pmu)
{
int *count = this_cpu_ptr(pmu->pmu_disable_count);
@@ -254,7 +709,6 @@ static void perf_unpin_context(struct perf_event_context *ctx)
raw_spin_lock_irqsave(&ctx->lock, flags);
--ctx->pin_count;
raw_spin_unlock_irqrestore(&ctx->lock, flags);
- put_ctx(ctx);
}
/*
@@ -271,6 +725,10 @@ static void update_context_time(struct perf_event_context *ctx)
static u64 perf_event_time(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
+
+ if (is_cgroup_event(event))
+ return perf_cgroup_event_time(event);
+
return ctx ? ctx->time : 0;
}
@@ -285,9 +743,20 @@ static void update_event_times(struct perf_event *event)
if (event->state < PERF_EVENT_STATE_INACTIVE ||
event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
return;
-
- if (ctx->is_active)
+ /*
+ * in cgroup mode, time_enabled represents
+ * the time the event was enabled AND active
+ * tasks were in the monitored cgroup. This is
+ * independent of the activity of the context as
+ * there may be a mix of cgroup and non-cgroup events.
+ *
+ * That is why we treat cgroup events differently
+ * here.
+ */
+ if (is_cgroup_event(event))
run_end = perf_event_time(event);
+ else if (ctx->is_active)
+ run_end = ctx->time;
else
run_end = event->tstamp_stopped;
@@ -299,6 +768,7 @@ static void update_event_times(struct perf_event *event)
run_end = perf_event_time(event);
event->total_time_running = run_end - event->tstamp_running;
+
}
/*
@@ -347,6 +817,17 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
list_add_tail(&event->group_entry, list);
}
+ if (is_cgroup_event(event)) {
+ ctx->nr_cgroups++;
+ /*
+ * one more event:
+ * - that has cgroup constraint on event->cpu
+ * - that may need work on context switch
+ */
+ atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
+ jump_label_inc(&perf_sched_events);
+ }
+
list_add_rcu(&event->event_entry, &ctx->event_list);
if (!ctx->nr_events)
perf_pmu_rotate_start(ctx->pmu);
@@ -473,6 +954,12 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
event->attach_state &= ~PERF_ATTACH_CONTEXT;
+ if (is_cgroup_event(event)) {
+ ctx->nr_cgroups--;
+ atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
+ jump_label_dec(&perf_sched_events);
+ }
+
ctx->nr_events--;
if (event->attr.inherit_stat)
ctx->nr_stat--;
@@ -544,7 +1031,8 @@ out:
static inline int
event_filter_match(struct perf_event *event)
{
- return event->cpu == -1 || event->cpu == smp_processor_id();
+ return (event->cpu == -1 || event->cpu == smp_processor_id())
+ && perf_cgroup_match(event);
}
static void
@@ -562,7 +1050,7 @@ event_sched_out(struct perf_event *event,
*/
if (event->state == PERF_EVENT_STATE_INACTIVE
&& !event_filter_match(event)) {
- delta = ctx->time - event->tstamp_stopped;
+ delta = tstamp - event->tstamp_stopped;
event->tstamp_running += delta;
event->tstamp_stopped = tstamp;
}
@@ -606,47 +1094,30 @@ group_sched_out(struct perf_event *group_event,
cpuctx->exclusive = 0;
}
-static inline struct perf_cpu_context *
-__get_cpu_context(struct perf_event_context *ctx)
-{
- return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
-}
-
/*
* Cross CPU call to remove a performance event
*
* We disable the event on the hardware level first. After that we
* remove it from the context list.
*/
-static void __perf_event_remove_from_context(void *info)
+static int __perf_remove_from_context(void *info)
{
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
- /*
- * If this is a task context, we need to check whether it is
- * the current task context of this cpu. If not it has been
- * scheduled out before the smp call arrived.
- */
- if (ctx->task && cpuctx->task_ctx != ctx)
- return;
-
raw_spin_lock(&ctx->lock);
-
event_sched_out(event, cpuctx, ctx);
-
list_del_event(event, ctx);
-
raw_spin_unlock(&ctx->lock);
+
+ return 0;
}
/*
* Remove the event from a task's (or a CPU's) list of events.
*
- * Must be called with ctx->mutex held.
- *
* CPU events are removed with a smp call. For task events we only
* call when the task is on a CPU.
*
@@ -657,49 +1128,48 @@ static void __perf_event_remove_from_context(void *info)
* When called from perf_event_exit_task, it's OK because the
* context has been detached from its task.
*/
-static void perf_event_remove_from_context(struct perf_event *event)
+static void perf_remove_from_context(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
+ lockdep_assert_held(&ctx->mutex);
+
if (!task) {
/*
* Per cpu events are removed via an smp call and
* the removal is always successful.
*/
- smp_call_function_single(event->cpu,
- __perf_event_remove_from_context,
- event, 1);
+ cpu_function_call(event->cpu, __perf_remove_from_context, event);
return;
}
retry:
- task_oncpu_function_call(task, __perf_event_remove_from_context,
- event);
+ if (!task_function_call(task, __perf_remove_from_context, event))
+ return;
raw_spin_lock_irq(&ctx->lock);
/*
- * If the context is active we need to retry the smp call.
+ * If we failed to find a running task, but find the context active now
+ * that we've acquired the ctx->lock, retry.
*/
- if (ctx->nr_active && !list_empty(&event->group_entry)) {
+ if (ctx->is_active) {
raw_spin_unlock_irq(&ctx->lock);
goto retry;
}
/*
- * The lock prevents that this context is scheduled in so we
- * can remove the event safely, if the call above did not
- * succeed.
+ * Since the task isn't running, its safe to remove the event, us
+ * holding the ctx->lock ensures the task won't get scheduled in.
*/
- if (!list_empty(&event->group_entry))
- list_del_event(event, ctx);
+ list_del_event(event, ctx);
raw_spin_unlock_irq(&ctx->lock);
}
/*
* Cross CPU call to disable a performance event
*/
-static void __perf_event_disable(void *info)
+static int __perf_event_disable(void *info)
{
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
@@ -708,9 +1178,12 @@ static void __perf_event_disable(void *info)
/*
* If this is a per-task event, need to check whether this
* event's task is the current task on this cpu.
+ *
+ * Can trigger due to concurrent perf_event_context_sched_out()
+ * flipping contexts around.
*/
if (ctx->task && cpuctx->task_ctx != ctx)
- return;
+ return -EINVAL;
raw_spin_lock(&ctx->lock);
@@ -720,6 +1193,7 @@ static void __perf_event_disable(void *info)
*/
if (event->state >= PERF_EVENT_STATE_INACTIVE) {
update_context_time(ctx);
+ update_cgrp_time_from_event(event);
update_group_times(event);
if (event == event->group_leader)
group_sched_out(event, cpuctx, ctx);
@@ -729,6 +1203,8 @@ static void __perf_event_disable(void *info)
}
raw_spin_unlock(&ctx->lock);
+
+ return 0;
}
/*
@@ -753,13 +1229,13 @@ void perf_event_disable(struct perf_event *event)
/*
* Disable the event on the cpu that it's on
*/
- smp_call_function_single(event->cpu, __perf_event_disable,
- event, 1);
+ cpu_function_call(event->cpu, __perf_event_disable, event);
return;
}
retry:
- task_oncpu_function_call(task, __perf_event_disable, event);
+ if (!task_function_call(task, __perf_event_disable, event))
+ return;
raw_spin_lock_irq(&ctx->lock);
/*
@@ -767,6 +1243,11 @@ retry:
*/
if (event->state == PERF_EVENT_STATE_ACTIVE) {
raw_spin_unlock_irq(&ctx->lock);
+ /*
+ * Reload the task pointer, it might have been changed by
+ * a concurrent perf_event_context_sched_out().
+ */
+ task = ctx->task;
goto retry;
}
@@ -778,10 +1259,48 @@ retry:
update_group_times(event);
event->state = PERF_EVENT_STATE_OFF;
}
-
raw_spin_unlock_irq(&ctx->lock);
}
+static void perf_set_shadow_time(struct perf_event *event,
+ struct perf_event_context *ctx,
+ u64 tstamp)
+{
+ /*
+ * use the correct time source for the time snapshot
+ *
+ * We could get by without this by leveraging the
+ * fact that to get to this function, the caller
+ * has most likely already called update_context_time()
+ * and update_cgrp_time_xx() and thus both timestamp
+ * are identical (or very close). Given that tstamp is,
+ * already adjusted for cgroup, we could say that:
+ * tstamp - ctx->timestamp
+ * is equivalent to
+ * tstamp - cgrp->timestamp.
+ *
+ * Then, in perf_output_read(), the calculation would
+ * work with no changes because:
+ * - event is guaranteed scheduled in
+ * - no scheduled out in between
+ * - thus the timestamp would be the same
+ *
+ * But this is a bit hairy.
+ *
+ * So instead, we have an explicit cgroup call to remain
+ * within the time time source all along. We believe it
+ * is cleaner and simpler to understand.
+ */
+ if (is_cgroup_event(event))
+ perf_cgroup_set_shadow_time(event, tstamp);
+ else
+ event->shadow_ctx_time = tstamp - ctx->timestamp;
+}
+
+#define MAX_INTERRUPTS (~0ULL)
+
+static void perf_log_throttle(struct perf_event *event, int enable);
+
static int
event_sched_in(struct perf_event *event,
struct perf_cpu_context *cpuctx,
@@ -794,6 +1313,17 @@ event_sched_in(struct perf_event *event,
event->state = PERF_EVENT_STATE_ACTIVE;
event->oncpu = smp_processor_id();
+
+ /*
+ * Unthrottle events, since we scheduled we might have missed several
+ * ticks already, also for a heavily scheduling task there is little
+ * guarantee it'll get a tick in a timely manner.
+ */
+ if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
+ perf_log_throttle(event, 1);
+ event->hw.interrupts = 0;
+ }
+
/*
* The new state must be visible before we turn it on in the hardware:
*/
@@ -807,7 +1337,7 @@ event_sched_in(struct perf_event *event,
event->tstamp_running += tstamp - event->tstamp_stopped;
- event->shadow_ctx_time = tstamp - ctx->timestamp;
+ perf_set_shadow_time(event, ctx, tstamp);
if (!is_software_event(event))
cpuctx->active_oncpu++;
@@ -928,12 +1458,15 @@ static void add_event_to_ctx(struct perf_event *event,
event->tstamp_stopped = tstamp;
}
+static void perf_event_context_sched_in(struct perf_event_context *ctx,
+ struct task_struct *tsk);
+
/*
* Cross CPU call to install and enable a performance event
*
* Must be called with ctx->mutex held
*/
-static void __perf_install_in_context(void *info)
+static int __perf_install_in_context(void *info)
{
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
@@ -942,21 +1475,22 @@ static void __perf_install_in_context(void *info)
int err;
/*
- * If this is a task context, we need to check whether it is
- * the current task context of this cpu. If not it has been
- * scheduled out before the smp call arrived.
- * Or possibly this is the right context but it isn't
- * on this cpu because it had no events.
+ * In case we're installing a new context to an already running task,
+ * could also happen before perf_event_task_sched_in() on architectures
+ * which do context switches with IRQs enabled.
*/
- if (ctx->task && cpuctx->task_ctx != ctx) {
- if (cpuctx->task_ctx || ctx->task != current)
- return;
- cpuctx->task_ctx = ctx;
- }
+ if (ctx->task && !cpuctx->task_ctx)
+ perf_event_context_sched_in(ctx, ctx->task);
raw_spin_lock(&ctx->lock);
ctx->is_active = 1;
update_context_time(ctx);
+ /*
+ * update cgrp time only if current cgrp
+ * matches event->cgrp. Must be done before
+ * calling add_event_to_ctx()
+ */
+ update_cgrp_time_from_event(event);
add_event_to_ctx(event, ctx);
@@ -997,6 +1531,8 @@ static void __perf_install_in_context(void *info)
unlock:
raw_spin_unlock(&ctx->lock);
+
+ return 0;
}
/*
@@ -1008,8 +1544,6 @@ unlock:
* If the event is attached to a task which is on a CPU we use a smp
* call to enable it in the task context. The task might have been
* scheduled away, but we check this in the smp call again.
- *
- * Must be called with ctx->mutex held.
*/
static void
perf_install_in_context(struct perf_event_context *ctx,
@@ -1018,6 +1552,8 @@ perf_install_in_context(struct perf_event_context *ctx,
{
struct task_struct *task = ctx->task;
+ lockdep_assert_held(&ctx->mutex);
+
event->ctx = ctx;
if (!task) {
@@ -1025,31 +1561,29 @@ perf_install_in_context(struct perf_event_context *ctx,
* Per cpu events are installed via an smp call and
* the install is always successful.
*/
- smp_call_function_single(cpu, __perf_install_in_context,
- event, 1);
+ cpu_function_call(cpu, __perf_install_in_context, event);
return;
}
retry:
- task_oncpu_function_call(task, __perf_install_in_context,
- event);
+ if (!task_function_call(task, __perf_install_in_context, event))
+ return;
raw_spin_lock_irq(&ctx->lock);
/*
- * we need to retry the smp call.
+ * If we failed to find a running task, but find the context active now
+ * that we've acquired the ctx->lock, retry.
*/
- if (ctx->is_active && list_empty(&event->group_entry)) {
+ if (ctx->is_active) {
raw_spin_unlock_irq(&ctx->lock);
goto retry;
}
/*
- * The lock prevents that this context is scheduled in so we
- * can add the event safely, if it the call above did not
- * succeed.
+ * Since the task isn't running, its safe to add the event, us holding
+ * the ctx->lock ensures the task won't get scheduled in.
*/
- if (list_empty(&event->group_entry))
- add_event_to_ctx(event, ctx);
+ add_event_to_ctx(event, ctx);
raw_spin_unlock_irq(&ctx->lock);
}
@@ -1078,7 +1612,7 @@ static void __perf_event_mark_enabled(struct perf_event *event,
/*
* Cross CPU call to enable a performance event
*/
-static void __perf_event_enable(void *info)
+static int __perf_event_enable(void *info)
{
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
@@ -1086,26 +1620,27 @@ static void __perf_event_enable(void *info)
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
int err;
- /*
- * If this is a per-task event, need to check whether this
- * event's task is the current task on this cpu.
- */
- if (ctx->task && cpuctx->task_ctx != ctx) {
- if (cpuctx->task_ctx || ctx->task != current)
- return;
- cpuctx->task_ctx = ctx;
- }
+ if (WARN_ON_ONCE(!ctx->is_active))
+ return -EINVAL;
raw_spin_lock(&ctx->lock);
- ctx->is_active = 1;
update_context_time(ctx);
if (event->state >= PERF_EVENT_STATE_INACTIVE)
goto unlock;
+
+ /*
+ * set current task's cgroup time reference point
+ */
+ perf_cgroup_set_timestamp(current, ctx);
+
__perf_event_mark_enabled(event, ctx);
- if (!event_filter_match(event))
+ if (!event_filter_match(event)) {
+ if (is_cgroup_event(event))
+ perf_cgroup_defer_enabled(event);
goto unlock;
+ }
/*
* If the event is in a group and isn't the group leader,
@@ -1138,6 +1673,8 @@ static void __perf_event_enable(void *info)
unlock:
raw_spin_unlock(&ctx->lock);
+
+ return 0;
}
/*
@@ -1158,8 +1695,7 @@ void perf_event_enable(struct perf_event *event)
/*
* Enable the event on the cpu that it's on
*/
- smp_call_function_single(event->cpu, __perf_event_enable,
- event, 1);
+ cpu_function_call(event->cpu, __perf_event_enable, event);
return;
}
@@ -1178,8 +1714,15 @@ void perf_event_enable(struct perf_event *event)
event->state = PERF_EVENT_STATE_OFF;
retry:
+ if (!ctx->is_active) {
+ __perf_event_mark_enabled(event, ctx);
+ goto out;
+ }
+
raw_spin_unlock_irq(&ctx->lock);
- task_oncpu_function_call(task, __perf_event_enable, event);
+
+ if (!task_function_call(task, __perf_event_enable, event))
+ return;
raw_spin_lock_irq(&ctx->lock);
@@ -1187,15 +1730,14 @@ retry:
* If the context is active and the event is still off,
* we need to retry the cross-call.
*/
- if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF)
+ if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
+ /*
+ * task could have been flipped by a concurrent
+ * perf_event_context_sched_out()
+ */
+ task = ctx->task;
goto retry;
-
- /*
- * Since we have the lock this context can't be scheduled
- * in, so we can change the state safely.
- */
- if (event->state == PERF_EVENT_STATE_OFF)
- __perf_event_mark_enabled(event, ctx);
+ }
out:
raw_spin_unlock_irq(&ctx->lock);
@@ -1227,6 +1769,7 @@ static void ctx_sched_out(struct perf_event_context *ctx,
if (likely(!ctx->nr_events))
goto out;
update_context_time(ctx);
+ update_cgrp_time_from_cpuctx(cpuctx);
if (!ctx->nr_active)
goto out;
@@ -1339,8 +1882,8 @@ static void perf_event_sync_stat(struct perf_event_context *ctx,
}
}
-void perf_event_context_sched_out(struct task_struct *task, int ctxn,
- struct task_struct *next)
+static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
+ struct task_struct *next)
{
struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
struct perf_event_context *next_ctx;
@@ -1416,6 +1959,14 @@ void __perf_event_task_sched_out(struct task_struct *task,
for_each_task_context_nr(ctxn)
perf_event_context_sched_out(task, ctxn, next);
+
+ /*
+ * if cgroup events exist on this CPU, then we need
+ * to check if we have to switch out PMU state.
+ * cgroup event are system-wide mode only
+ */
+ if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
+ perf_cgroup_sched_out(task);
}
static void task_ctx_sched_out(struct perf_event_context *ctx,
@@ -1454,6 +2005,10 @@ ctx_pinned_sched_in(struct perf_event_context *ctx,
if (!event_filter_match(event))
continue;
+ /* may need to reset tstamp_enabled */
+ if (is_cgroup_event(event))
+ perf_cgroup_mark_enabled(event, ctx);
+
if (group_can_go_on(event, cpuctx, 1))
group_sched_in(event, cpuctx, ctx);
@@ -1486,6 +2041,10 @@ ctx_flexible_sched_in(struct perf_event_context *ctx,
if (!event_filter_match(event))
continue;
+ /* may need to reset tstamp_enabled */
+ if (is_cgroup_event(event))
+ perf_cgroup_mark_enabled(event, ctx);
+
if (group_can_go_on(event, cpuctx, can_add_hw)) {
if (group_sched_in(event, cpuctx, ctx))
can_add_hw = 0;
@@ -1496,15 +2055,19 @@ ctx_flexible_sched_in(struct perf_event_context *ctx,
static void
ctx_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
- enum event_type_t event_type)
+ enum event_type_t event_type,
+ struct task_struct *task)
{
+ u64 now;
+
raw_spin_lock(&ctx->lock);
ctx->is_active = 1;
if (likely(!ctx->nr_events))
goto out;
- ctx->timestamp = perf_clock();
-
+ now = perf_clock();
+ ctx->timestamp = now;
+ perf_cgroup_set_timestamp(task, ctx);
/*
* First go through the list and put on any pinned groups
* in order to give them the best chance of going on.
@@ -1521,11 +2084,12 @@ out:
}
static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
- enum event_type_t event_type)
+ enum event_type_t event_type,
+ struct task_struct *task)
{
struct perf_event_context *ctx = &cpuctx->ctx;
- ctx_sched_in(ctx, cpuctx, event_type);
+ ctx_sched_in(ctx, cpuctx, event_type, task);
}
static void task_ctx_sched_in(struct perf_event_context *ctx,
@@ -1533,15 +2097,16 @@ static void task_ctx_sched_in(struct perf_event_context *ctx,
{
struct perf_cpu_context *cpuctx;
- cpuctx = __get_cpu_context(ctx);
+ cpuctx = __get_cpu_context(ctx);
if (cpuctx->task_ctx == ctx)
return;
- ctx_sched_in(ctx, cpuctx, event_type);
+ ctx_sched_in(ctx, cpuctx, event_type, NULL);
cpuctx->task_ctx = ctx;
}
-void perf_event_context_sched_in(struct perf_event_context *ctx)
+static void perf_event_context_sched_in(struct perf_event_context *ctx,
+ struct task_struct *task)
{
struct perf_cpu_context *cpuctx;
@@ -1557,9 +2122,9 @@ void perf_event_context_sched_in(struct perf_event_context *ctx)
*/
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
- ctx_sched_in(ctx, cpuctx, EVENT_PINNED);
- cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
- ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
+ ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
+ cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
+ ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
cpuctx->task_ctx = ctx;
@@ -1592,14 +2157,17 @@ void __perf_event_task_sched_in(struct task_struct *task)
if (likely(!ctx))
continue;
- perf_event_context_sched_in(ctx);
+ perf_event_context_sched_in(ctx, task);
}
+ /*
+ * if cgroup events exist on this CPU, then we need
+ * to check if we have to switch in PMU state.
+ * cgroup event are system-wide mode only
+ */
+ if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
+ perf_cgroup_sched_in(task);
}
-#define MAX_INTERRUPTS (~0ULL)
-
-static void perf_log_throttle(struct perf_event *event, int enable);
-
static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
{
u64 frequency = event->attr.sample_freq;
@@ -1627,7 +2195,7 @@ static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
* Reduce accuracy by one bit such that @a and @b converge
* to a similar magnitude.
*/
-#define REDUCE_FLS(a, b) \
+#define REDUCE_FLS(a, b) \
do { \
if (a##_fls > b##_fls) { \
a >>= 1; \
@@ -1797,7 +2365,7 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx)
if (ctx)
rotate_ctx(ctx);
- cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
+ cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, current);
if (ctx)
task_ctx_sched_in(ctx, EVENT_FLEXIBLE);
@@ -1876,7 +2444,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
raw_spin_unlock(&ctx->lock);
- perf_event_context_sched_in(ctx);
+ perf_event_context_sched_in(ctx, ctx->task);
out:
local_irq_restore(flags);
}
@@ -1901,8 +2469,10 @@ static void __perf_event_read(void *info)
return;
raw_spin_lock(&ctx->lock);
- if (ctx->is_active)
+ if (ctx->is_active) {
update_context_time(ctx);
+ update_cgrp_time_from_event(event);
+ }
update_event_times(event);
if (event->state == PERF_EVENT_STATE_ACTIVE)
event->pmu->read(event);
@@ -1933,8 +2503,10 @@ static u64 perf_event_read(struct perf_event *event)
* (e.g., thread is blocked), in that case
* we cannot update context time
*/
- if (ctx->is_active)
+ if (ctx->is_active) {
update_context_time(ctx);
+ update_cgrp_time_from_event(event);
+ }
update_event_times(event);
raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
@@ -2213,6 +2785,9 @@ errout:
}
+/*
+ * Returns a matching context with refcount and pincount.
+ */
static struct perf_event_context *
find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
{
@@ -2237,6 +2812,7 @@ find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
ctx = &cpuctx->ctx;
get_ctx(ctx);
+ ++ctx->pin_count;
return ctx;
}
@@ -2250,6 +2826,7 @@ retry:
ctx = perf_lock_task_context(task, ctxn, &flags);
if (ctx) {
unclone_ctx(ctx);
+ ++ctx->pin_count;
raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
@@ -2271,8 +2848,10 @@ retry:
err = -ESRCH;
else if (task->perf_event_ctxp[ctxn])
err = -EAGAIN;
- else
+ else {
+ ++ctx->pin_count;
rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
+ }
mutex_unlock(&task->perf_event_mutex);
if (unlikely(err)) {
@@ -2312,7 +2891,7 @@ static void free_event(struct perf_event *event)
if (!event->parent) {
if (event->attach_state & PERF_ATTACH_TASK)
- jump_label_dec(&perf_task_events);
+ jump_label_dec(&perf_sched_events);
if (event->attr.mmap || event->attr.mmap_data)
atomic_dec(&nr_mmap_events);
if (event->attr.comm)
@@ -2328,6 +2907,9 @@ static void free_event(struct perf_event *event)
event->buffer = NULL;
}
+ if (is_cgroup_event(event))
+ perf_detach_cgroup(event);
+
if (event->destroy)
event->destroy(event);
@@ -4395,26 +4977,14 @@ static int __perf_event_overflow(struct perf_event *event, int nmi,
if (unlikely(!is_sampling_event(event)))
return 0;
- if (!throttle) {
- hwc->interrupts++;
- } else {
- if (hwc->interrupts != MAX_INTERRUPTS) {
- hwc->interrupts++;
- if (HZ * hwc->interrupts >
- (u64)sysctl_perf_event_sample_rate) {
- hwc->interrupts = MAX_INTERRUPTS;
- perf_log_throttle(event, 0);
- ret = 1;
- }
- } else {
- /*
- * Keep re-disabling events even though on the previous
- * pass we disabled it - just in case we raced with a
- * sched-in and the event got enabled again:
- */
+ if (unlikely(hwc->interrupts >= max_samples_per_tick)) {
+ if (throttle) {
+ hwc->interrupts = MAX_INTERRUPTS;
+ perf_log_throttle(event, 0);
ret = 1;
}
- }
+ } else
+ hwc->interrupts++;
if (event->attr.freq) {
u64 now = perf_clock();
@@ -5051,6 +5621,10 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
u64 period;
event = container_of(hrtimer, struct perf_event, hw.hrtimer);
+
+ if (event->state != PERF_EVENT_STATE_ACTIVE)
+ return HRTIMER_NORESTART;
+
event->pmu->read(event);
perf_sample_data_init(&data, 0);
@@ -5077,9 +5651,6 @@ static void perf_swevent_start_hrtimer(struct perf_event *event)
if (!is_sampling_event(event))
return;
- hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hwc->hrtimer.function = perf_swevent_hrtimer;
-
period = local64_read(&hwc->period_left);
if (period) {
if (period < 0)
@@ -5106,6 +5677,30 @@ static void perf_swevent_cancel_hrtimer(struct perf_event *event)
}
}
+static void perf_swevent_init_hrtimer(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (!is_sampling_event(event))
+ return;
+
+ hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hwc->hrtimer.function = perf_swevent_hrtimer;
+
+ /*
+ * Since hrtimers have a fixed rate, we can do a static freq->period
+ * mapping and avoid the whole period adjust feedback stuff.
+ */
+ if (event->attr.freq) {
+ long freq = event->attr.sample_freq;
+
+ event->attr.sample_period = NSEC_PER_SEC / freq;
+ hwc->sample_period = event->attr.sample_period;
+ local64_set(&hwc->period_left, hwc->sample_period);
+ event->attr.freq = 0;
+ }
+}
+
/*
* Software event: cpu wall time clock
*/
@@ -5158,6 +5753,8 @@ static int cpu_clock_event_init(struct perf_event *event)
if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
return -ENOENT;
+ perf_swevent_init_hrtimer(event);
+
return 0;
}
@@ -5213,16 +5810,9 @@ static void task_clock_event_del(struct perf_event *event, int flags)
static void task_clock_event_read(struct perf_event *event)
{
- u64 time;
-
- if (!in_nmi()) {
- update_context_time(event->ctx);
- time = event->ctx->time;
- } else {
- u64 now = perf_clock();
- u64 delta = now - event->ctx->timestamp;
- time = event->ctx->time + delta;
- }
+ u64 now = perf_clock();
+ u64 delta = now - event->ctx->timestamp;
+ u64 time = event->ctx->time + delta;
task_clock_event_update(event, time);
}
@@ -5235,6 +5825,8 @@ static int task_clock_event_init(struct perf_event *event)
if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
return -ENOENT;
+ perf_swevent_init_hrtimer(event);
+
return 0;
}
@@ -5642,7 +6234,7 @@ done:
if (!event->parent) {
if (event->attach_state & PERF_ATTACH_TASK)
- jump_label_inc(&perf_task_events);
+ jump_label_inc(&perf_sched_events);
if (event->attr.mmap || event->attr.mmap_data)
atomic_inc(&nr_mmap_events);
if (event->attr.comm)
@@ -5817,7 +6409,7 @@ SYSCALL_DEFINE5(perf_event_open,
int err;
/* for future expandability... */
- if (flags & ~(PERF_FLAG_FD_NO_GROUP | PERF_FLAG_FD_OUTPUT))
+ if (flags & ~PERF_FLAG_ALL)
return -EINVAL;
err = perf_copy_attr(attr_uptr, &attr);
@@ -5834,6 +6426,15 @@ SYSCALL_DEFINE5(perf_event_open,
return -EINVAL;
}
+ /*
+ * In cgroup mode, the pid argument is used to pass the fd
+ * opened to the cgroup directory in cgroupfs. The cpu argument
+ * designates the cpu on which to monitor threads from that
+ * cgroup.
+ */
+ if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
+ return -EINVAL;
+
event_fd = get_unused_fd_flags(O_RDWR);
if (event_fd < 0)
return event_fd;
@@ -5851,7 +6452,7 @@ SYSCALL_DEFINE5(perf_event_open,
group_leader = NULL;
}
- if (pid != -1) {
+ if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
task = find_lively_task_by_vpid(pid);
if (IS_ERR(task)) {
err = PTR_ERR(task);
@@ -5865,6 +6466,12 @@ SYSCALL_DEFINE5(perf_event_open,
goto err_task;
}
+ if (flags & PERF_FLAG_PID_CGROUP) {
+ err = perf_cgroup_connect(pid, event, &attr, group_leader);
+ if (err)
+ goto err_alloc;
+ }
+
/*
* Special case software events and allow them to be part of
* any hardware group.
@@ -5950,10 +6557,10 @@ SYSCALL_DEFINE5(perf_event_open,
struct perf_event_context *gctx = group_leader->ctx;
mutex_lock(&gctx->mutex);
- perf_event_remove_from_context(group_leader);
+ perf_remove_from_context(group_leader);
list_for_each_entry(sibling, &group_leader->sibling_list,
group_entry) {
- perf_event_remove_from_context(sibling);
+ perf_remove_from_context(sibling);
put_ctx(gctx);
}
mutex_unlock(&gctx->mutex);
@@ -5976,6 +6583,7 @@ SYSCALL_DEFINE5(perf_event_open,
perf_install_in_context(ctx, event, cpu);
++ctx->generation;
+ perf_unpin_context(ctx);
mutex_unlock(&ctx->mutex);
event->owner = current;
@@ -6001,6 +6609,7 @@ SYSCALL_DEFINE5(perf_event_open,
return event_fd;
err_context:
+ perf_unpin_context(ctx);
put_ctx(ctx);
err_alloc:
free_event(event);
@@ -6051,6 +6660,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
mutex_lock(&ctx->mutex);
perf_install_in_context(ctx, event, cpu);
++ctx->generation;
+ perf_unpin_context(ctx);
mutex_unlock(&ctx->mutex);
return event;
@@ -6104,7 +6714,7 @@ __perf_event_exit_task(struct perf_event *child_event,
{
struct perf_event *parent_event;
- perf_event_remove_from_context(child_event);
+ perf_remove_from_context(child_event);
parent_event = child_event->parent;
/*
@@ -6411,7 +7021,7 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent,
return 0;
}
- child_ctx = child->perf_event_ctxp[ctxn];
+ child_ctx = child->perf_event_ctxp[ctxn];
if (!child_ctx) {
/*
* This is executed from the parent task context, so
@@ -6526,6 +7136,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
mutex_unlock(&parent_ctx->mutex);
perf_unpin_context(parent_ctx);
+ put_ctx(parent_ctx);
return ret;
}
@@ -6595,9 +7206,9 @@ static void __perf_event_exit_context(void *__info)
perf_pmu_rotate_stop(ctx->pmu);
list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
- __perf_event_remove_from_context(event);
+ __perf_remove_from_context(event);
list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
- __perf_event_remove_from_context(event);
+ __perf_remove_from_context(event);
}
static void perf_event_exit_cpu_context(int cpu)
@@ -6721,3 +7332,92 @@ unlock:
return ret;
}
device_initcall(perf_event_sysfs_init);
+
+#ifdef CONFIG_CGROUP_PERF
+static struct cgroup_subsys_state *perf_cgroup_create(
+ struct cgroup_subsys *ss, struct cgroup *cont)
+{
+ struct perf_cgroup *jc;
+ struct perf_cgroup_info *t;
+ int c;
+
+ jc = kmalloc(sizeof(*jc), GFP_KERNEL);
+ if (!jc)
+ return ERR_PTR(-ENOMEM);
+
+ memset(jc, 0, sizeof(*jc));
+
+ jc->info = alloc_percpu(struct perf_cgroup_info);
+ if (!jc->info) {
+ kfree(jc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for_each_possible_cpu(c) {
+ t = per_cpu_ptr(jc->info, c);
+ t->time = 0;
+ t->timestamp = 0;
+ }
+ return &jc->css;
+}
+
+static void perf_cgroup_destroy(struct cgroup_subsys *ss,
+ struct cgroup *cont)
+{
+ struct perf_cgroup *jc;
+ jc = container_of(cgroup_subsys_state(cont, perf_subsys_id),
+ struct perf_cgroup, css);
+ free_percpu(jc->info);
+ kfree(jc);
+}
+
+static int __perf_cgroup_move(void *info)
+{
+ struct task_struct *task = info;
+ perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
+ return 0;
+}
+
+static void perf_cgroup_move(struct task_struct *task)
+{
+ task_function_call(task, __perf_cgroup_move, task);
+}
+
+static void perf_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
+ struct cgroup *old_cgrp, struct task_struct *task,
+ bool threadgroup)
+{
+ perf_cgroup_move(task);
+ if (threadgroup) {
+ struct task_struct *c;
+ rcu_read_lock();
+ list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
+ perf_cgroup_move(c);
+ }
+ rcu_read_unlock();
+ }
+}
+
+static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
+ struct cgroup *old_cgrp, struct task_struct *task)
+{
+ /*
+ * cgroup_exit() is called in the copy_process() failure path.
+ * Ignore this case since the task hasn't ran yet, this avoids
+ * trying to poke a half freed task state from generic code.
+ */
+ if (!(task->flags & PF_EXITING))
+ return;
+
+ perf_cgroup_move(task);
+}
+
+struct cgroup_subsys perf_subsys = {
+ .name = "perf_event",
+ .subsys_id = perf_subsys_id,
+ .create = perf_cgroup_create,
+ .destroy = perf_cgroup_destroy,
+ .exit = perf_cgroup_exit,
+ .attach = perf_cgroup_attach,
+};
+#endif /* CONFIG_CGROUP_PERF */
diff --git a/kernel/pid.c b/kernel/pid.c
index 39b65b69584f..02f221274265 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -435,6 +435,7 @@ struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
rcu_read_unlock();
return pid;
}
+EXPORT_SYMBOL_GPL(get_task_pid);
struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
{
@@ -446,6 +447,7 @@ struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
rcu_read_unlock();
return result;
}
+EXPORT_SYMBOL_GPL(get_pid_task);
struct pid *find_get_pid(pid_t nr)
{
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index aeaa7f846821..0da058bff8eb 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -103,11 +103,14 @@ static struct pm_qos_object *pm_qos_array[] = {
static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
size_t count, loff_t *f_pos);
+static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos);
static int pm_qos_power_open(struct inode *inode, struct file *filp);
static int pm_qos_power_release(struct inode *inode, struct file *filp);
static const struct file_operations pm_qos_power_fops = {
.write = pm_qos_power_write,
+ .read = pm_qos_power_read,
.open = pm_qos_power_open,
.release = pm_qos_power_release,
.llseek = noop_llseek,
@@ -376,6 +379,27 @@ static int pm_qos_power_release(struct inode *inode, struct file *filp)
}
+static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ s32 value;
+ unsigned long flags;
+ struct pm_qos_object *o;
+ struct pm_qos_request_list *pm_qos_req = filp->private_data;;
+
+ if (!pm_qos_req)
+ return -EINVAL;
+ if (!pm_qos_request_active(pm_qos_req))
+ return -EINVAL;
+
+ o = pm_qos_array[pm_qos_req->pm_qos_class];
+ spin_lock_irqsave(&pm_qos_lock, flags);
+ value = pm_qos_get_value(o);
+ spin_unlock_irqrestore(&pm_qos_lock, flags);
+
+ return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32));
+}
+
static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
size_t count, loff_t *f_pos)
{
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 05bb7173850e..67fea9d25d55 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -176,7 +176,8 @@ static inline cputime_t virt_ticks(struct task_struct *p)
return p->utime;
}
-int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
+static int
+posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
{
int error = check_clock(which_clock);
if (!error) {
@@ -194,7 +195,8 @@ int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
return error;
}
-int posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
+static int
+posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
{
/*
* You can never reset a CPU clock, but we check for other errors
@@ -317,7 +319,7 @@ static int cpu_clock_sample_group(const clockid_t which_clock,
}
-int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
+static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
{
const pid_t pid = CPUCLOCK_PID(which_clock);
int error = -EINVAL;
@@ -379,7 +381,7 @@ int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
* This is called from sys_timer_create() and do_cpu_nanosleep() with the
* new timer already all-zeros initialized.
*/
-int posix_cpu_timer_create(struct k_itimer *new_timer)
+static int posix_cpu_timer_create(struct k_itimer *new_timer)
{
int ret = 0;
const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
@@ -425,7 +427,7 @@ int posix_cpu_timer_create(struct k_itimer *new_timer)
* If we return TIMER_RETRY, it's necessary to release the timer's lock
* and try again. (This happens when the timer is in the middle of firing.)
*/
-int posix_cpu_timer_del(struct k_itimer *timer)
+static int posix_cpu_timer_del(struct k_itimer *timer)
{
struct task_struct *p = timer->it.cpu.task;
int ret = 0;
@@ -665,8 +667,8 @@ static int cpu_timer_sample_group(const clockid_t which_clock,
* If we return TIMER_RETRY, it's necessary to release the timer's lock
* and try again. (This happens when the timer is in the middle of firing.)
*/
-int posix_cpu_timer_set(struct k_itimer *timer, int flags,
- struct itimerspec *new, struct itimerspec *old)
+static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
+ struct itimerspec *new, struct itimerspec *old)
{
struct task_struct *p = timer->it.cpu.task;
union cpu_time_count old_expires, new_expires, old_incr, val;
@@ -820,7 +822,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
return ret;
}
-void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
+static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
{
union cpu_time_count now;
struct task_struct *p = timer->it.cpu.task;
@@ -1481,11 +1483,13 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
return error;
}
-int posix_cpu_nsleep(const clockid_t which_clock, int flags,
- struct timespec *rqtp, struct timespec __user *rmtp)
+static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
+
+static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
+ struct timespec *rqtp, struct timespec __user *rmtp)
{
struct restart_block *restart_block =
- &current_thread_info()->restart_block;
+ &current_thread_info()->restart_block;
struct itimerspec it;
int error;
@@ -1501,56 +1505,47 @@ int posix_cpu_nsleep(const clockid_t which_clock, int flags,
if (error == -ERESTART_RESTARTBLOCK) {
- if (flags & TIMER_ABSTIME)
+ if (flags & TIMER_ABSTIME)
return -ERESTARTNOHAND;
/*
- * Report back to the user the time still remaining.
- */
- if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
+ * Report back to the user the time still remaining.
+ */
+ if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
return -EFAULT;
restart_block->fn = posix_cpu_nsleep_restart;
- restart_block->arg0 = which_clock;
- restart_block->arg1 = (unsigned long) rmtp;
- restart_block->arg2 = rqtp->tv_sec;
- restart_block->arg3 = rqtp->tv_nsec;
+ restart_block->nanosleep.index = which_clock;
+ restart_block->nanosleep.rmtp = rmtp;
+ restart_block->nanosleep.expires = timespec_to_ns(rqtp);
}
return error;
}
-long posix_cpu_nsleep_restart(struct restart_block *restart_block)
+static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
{
- clockid_t which_clock = restart_block->arg0;
- struct timespec __user *rmtp;
+ clockid_t which_clock = restart_block->nanosleep.index;
struct timespec t;
struct itimerspec it;
int error;
- rmtp = (struct timespec __user *) restart_block->arg1;
- t.tv_sec = restart_block->arg2;
- t.tv_nsec = restart_block->arg3;
+ t = ns_to_timespec(restart_block->nanosleep.expires);
- restart_block->fn = do_no_restart_syscall;
error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it);
if (error == -ERESTART_RESTARTBLOCK) {
+ struct timespec __user *rmtp = restart_block->nanosleep.rmtp;
/*
- * Report back to the user the time still remaining.
- */
- if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
+ * Report back to the user the time still remaining.
+ */
+ if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
return -EFAULT;
- restart_block->fn = posix_cpu_nsleep_restart;
- restart_block->arg0 = which_clock;
- restart_block->arg1 = (unsigned long) rmtp;
- restart_block->arg2 = t.tv_sec;
- restart_block->arg3 = t.tv_nsec;
+ restart_block->nanosleep.expires = timespec_to_ns(&t);
}
return error;
}
-
#define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
#define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
@@ -1594,38 +1589,37 @@ static int thread_cpu_timer_create(struct k_itimer *timer)
timer->it_clock = THREAD_CLOCK;
return posix_cpu_timer_create(timer);
}
-static int thread_cpu_nsleep(const clockid_t which_clock, int flags,
- struct timespec *rqtp, struct timespec __user *rmtp)
-{
- return -EINVAL;
-}
-static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
-{
- return -EINVAL;
-}
+
+struct k_clock clock_posix_cpu = {
+ .clock_getres = posix_cpu_clock_getres,
+ .clock_set = posix_cpu_clock_set,
+ .clock_get = posix_cpu_clock_get,
+ .timer_create = posix_cpu_timer_create,
+ .nsleep = posix_cpu_nsleep,
+ .nsleep_restart = posix_cpu_nsleep_restart,
+ .timer_set = posix_cpu_timer_set,
+ .timer_del = posix_cpu_timer_del,
+ .timer_get = posix_cpu_timer_get,
+};
static __init int init_posix_cpu_timers(void)
{
struct k_clock process = {
- .clock_getres = process_cpu_clock_getres,
- .clock_get = process_cpu_clock_get,
- .clock_set = do_posix_clock_nosettime,
- .timer_create = process_cpu_timer_create,
- .nsleep = process_cpu_nsleep,
- .nsleep_restart = process_cpu_nsleep_restart,
+ .clock_getres = process_cpu_clock_getres,
+ .clock_get = process_cpu_clock_get,
+ .timer_create = process_cpu_timer_create,
+ .nsleep = process_cpu_nsleep,
+ .nsleep_restart = process_cpu_nsleep_restart,
};
struct k_clock thread = {
- .clock_getres = thread_cpu_clock_getres,
- .clock_get = thread_cpu_clock_get,
- .clock_set = do_posix_clock_nosettime,
- .timer_create = thread_cpu_timer_create,
- .nsleep = thread_cpu_nsleep,
- .nsleep_restart = thread_cpu_nsleep_restart,
+ .clock_getres = thread_cpu_clock_getres,
+ .clock_get = thread_cpu_clock_get,
+ .timer_create = thread_cpu_timer_create,
};
struct timespec ts;
- register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
- register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
+ posix_timers_register_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
+ posix_timers_register_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
cputime_to_timespec(cputime_one_jiffy, &ts);
onecputick = ts.tv_nsec;
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 93bd2eb2bc53..44fcff131b38 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -41,6 +41,7 @@
#include <linux/init.h>
#include <linux/compiler.h>
#include <linux/idr.h>
+#include <linux/posix-clock.h>
#include <linux/posix-timers.h>
#include <linux/syscalls.h>
#include <linux/wait.h>
@@ -81,6 +82,14 @@ static DEFINE_SPINLOCK(idr_lock);
#error "SIGEV_THREAD_ID must not share bit with other SIGEV values!"
#endif
+/*
+ * parisc wants ENOTSUP instead of EOPNOTSUPP
+ */
+#ifndef ENOTSUP
+# define ENANOSLEEP_NOTSUP EOPNOTSUPP
+#else
+# define ENANOSLEEP_NOTSUP ENOTSUP
+#endif
/*
* The timer ID is turned into a timer address by idr_find().
@@ -94,11 +103,7 @@ static DEFINE_SPINLOCK(idr_lock);
/*
* CLOCKs: The POSIX standard calls for a couple of clocks and allows us
* to implement others. This structure defines the various
- * clocks and allows the possibility of adding others. We
- * provide an interface to add clocks to the table and expect
- * the "arch" code to add at least one clock that is high
- * resolution. Here we define the standard CLOCK_REALTIME as a
- * 1/HZ resolution clock.
+ * clocks.
*
* RESOLUTION: Clock resolution is used to round up timer and interval
* times, NOT to report clock times, which are reported with as
@@ -108,20 +113,13 @@ static DEFINE_SPINLOCK(idr_lock);
* necessary code is written. The standard says we should say
* something about this issue in the documentation...
*
- * FUNCTIONS: The CLOCKs structure defines possible functions to handle
- * various clock functions. For clocks that use the standard
- * system timer code these entries should be NULL. This will
- * allow dispatch without the overhead of indirect function
- * calls. CLOCKS that depend on other sources (e.g. WWV or GPS)
- * must supply functions here, even if the function just returns
- * ENOSYS. The standard POSIX timer management code assumes the
- * following: 1.) The k_itimer struct (sched.h) is used for the
- * timer. 2.) The list, it_lock, it_clock, it_id and it_pid
- * fields are not modified by timer code.
+ * FUNCTIONS: The CLOCKs structure defines possible functions to
+ * handle various clock functions.
*
- * At this time all functions EXCEPT clock_nanosleep can be
- * redirected by the CLOCKS structure. Clock_nanosleep is in
- * there, but the code ignores it.
+ * The standard POSIX timer management code assumes the
+ * following: 1.) The k_itimer struct (sched.h) is used for
+ * the timer. 2.) The list, it_lock, it_clock, it_id and
+ * it_pid fields are not modified by timer code.
*
* Permissions: It is assumed that the clock_settime() function defined
* for each clock will take care of permission checks. Some
@@ -138,6 +136,7 @@ static struct k_clock posix_clocks[MAX_CLOCKS];
*/
static int common_nsleep(const clockid_t, int flags, struct timespec *t,
struct timespec __user *rmtp);
+static int common_timer_create(struct k_itimer *new_timer);
static void common_timer_get(struct k_itimer *, struct itimerspec *);
static int common_timer_set(struct k_itimer *, int,
struct itimerspec *, struct itimerspec *);
@@ -158,76 +157,24 @@ static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
spin_unlock_irqrestore(&timr->it_lock, flags);
}
-/*
- * Call the k_clock hook function if non-null, or the default function.
- */
-#define CLOCK_DISPATCH(clock, call, arglist) \
- ((clock) < 0 ? posix_cpu_##call arglist : \
- (posix_clocks[clock].call != NULL \
- ? (*posix_clocks[clock].call) arglist : common_##call arglist))
-
-/*
- * Default clock hook functions when the struct k_clock passed
- * to register_posix_clock leaves a function pointer null.
- *
- * The function common_CALL is the default implementation for
- * the function pointer CALL in struct k_clock.
- */
-
-static inline int common_clock_getres(const clockid_t which_clock,
- struct timespec *tp)
-{
- tp->tv_sec = 0;
- tp->tv_nsec = posix_clocks[which_clock].res;
- return 0;
-}
-
-/*
- * Get real time for posix timers
- */
-static int common_clock_get(clockid_t which_clock, struct timespec *tp)
+/* Get clock_realtime */
+static int posix_clock_realtime_get(clockid_t which_clock, struct timespec *tp)
{
ktime_get_real_ts(tp);
return 0;
}
-static inline int common_clock_set(const clockid_t which_clock,
- struct timespec *tp)
+/* Set clock_realtime */
+static int posix_clock_realtime_set(const clockid_t which_clock,
+ const struct timespec *tp)
{
return do_sys_settimeofday(tp, NULL);
}
-static int common_timer_create(struct k_itimer *new_timer)
-{
- hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0);
- return 0;
-}
-
-static int no_timer_create(struct k_itimer *new_timer)
-{
- return -EOPNOTSUPP;
-}
-
-static int no_nsleep(const clockid_t which_clock, int flags,
- struct timespec *tsave, struct timespec __user *rmtp)
+static int posix_clock_realtime_adj(const clockid_t which_clock,
+ struct timex *t)
{
- return -EOPNOTSUPP;
-}
-
-/*
- * Return nonzero if we know a priori this clockid_t value is bogus.
- */
-static inline int invalid_clockid(const clockid_t which_clock)
-{
- if (which_clock < 0) /* CPU clock, posix_cpu_* will check it */
- return 0;
- if ((unsigned) which_clock >= MAX_CLOCKS)
- return 1;
- if (posix_clocks[which_clock].clock_getres != NULL)
- return 0;
- if (posix_clocks[which_clock].res != 0)
- return 0;
- return 1;
+ return do_adjtimex(t);
}
/*
@@ -273,40 +220,45 @@ static int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp
static __init int init_posix_timers(void)
{
struct k_clock clock_realtime = {
- .clock_getres = hrtimer_get_res,
+ .clock_getres = hrtimer_get_res,
+ .clock_get = posix_clock_realtime_get,
+ .clock_set = posix_clock_realtime_set,
+ .clock_adj = posix_clock_realtime_adj,
+ .nsleep = common_nsleep,
+ .nsleep_restart = hrtimer_nanosleep_restart,
+ .timer_create = common_timer_create,
+ .timer_set = common_timer_set,
+ .timer_get = common_timer_get,
+ .timer_del = common_timer_del,
};
struct k_clock clock_monotonic = {
- .clock_getres = hrtimer_get_res,
- .clock_get = posix_ktime_get_ts,
- .clock_set = do_posix_clock_nosettime,
+ .clock_getres = hrtimer_get_res,
+ .clock_get = posix_ktime_get_ts,
+ .nsleep = common_nsleep,
+ .nsleep_restart = hrtimer_nanosleep_restart,
+ .timer_create = common_timer_create,
+ .timer_set = common_timer_set,
+ .timer_get = common_timer_get,
+ .timer_del = common_timer_del,
};
struct k_clock clock_monotonic_raw = {
- .clock_getres = hrtimer_get_res,
- .clock_get = posix_get_monotonic_raw,
- .clock_set = do_posix_clock_nosettime,
- .timer_create = no_timer_create,
- .nsleep = no_nsleep,
+ .clock_getres = hrtimer_get_res,
+ .clock_get = posix_get_monotonic_raw,
};
struct k_clock clock_realtime_coarse = {
- .clock_getres = posix_get_coarse_res,
- .clock_get = posix_get_realtime_coarse,
- .clock_set = do_posix_clock_nosettime,
- .timer_create = no_timer_create,
- .nsleep = no_nsleep,
+ .clock_getres = posix_get_coarse_res,
+ .clock_get = posix_get_realtime_coarse,
};
struct k_clock clock_monotonic_coarse = {
- .clock_getres = posix_get_coarse_res,
- .clock_get = posix_get_monotonic_coarse,
- .clock_set = do_posix_clock_nosettime,
- .timer_create = no_timer_create,
- .nsleep = no_nsleep,
+ .clock_getres = posix_get_coarse_res,
+ .clock_get = posix_get_monotonic_coarse,
};
- register_posix_clock(CLOCK_REALTIME, &clock_realtime);
- register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
- register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
- register_posix_clock(CLOCK_REALTIME_COARSE, &clock_realtime_coarse);
- register_posix_clock(CLOCK_MONOTONIC_COARSE, &clock_monotonic_coarse);
+ posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
+ posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
+ posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
+ posix_timers_register_clock(CLOCK_REALTIME_COARSE, &clock_realtime_coarse);
+ posix_timers_register_clock(CLOCK_MONOTONIC_COARSE, &clock_monotonic_coarse);
posix_timers_cache = kmem_cache_create("posix_timers_cache",
sizeof (struct k_itimer), 0, SLAB_PANIC,
@@ -482,17 +434,29 @@ static struct pid *good_sigevent(sigevent_t * event)
return task_pid(rtn);
}
-void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
+void posix_timers_register_clock(const clockid_t clock_id,
+ struct k_clock *new_clock)
{
if ((unsigned) clock_id >= MAX_CLOCKS) {
- printk("POSIX clock register failed for clock_id %d\n",
+ printk(KERN_WARNING "POSIX clock register failed for clock_id %d\n",
+ clock_id);
+ return;
+ }
+
+ if (!new_clock->clock_get) {
+ printk(KERN_WARNING "POSIX clock id %d lacks clock_get()\n",
+ clock_id);
+ return;
+ }
+ if (!new_clock->clock_getres) {
+ printk(KERN_WARNING "POSIX clock id %d lacks clock_getres()\n",
clock_id);
return;
}
posix_clocks[clock_id] = *new_clock;
}
-EXPORT_SYMBOL_GPL(register_posix_clock);
+EXPORT_SYMBOL_GPL(posix_timers_register_clock);
static struct k_itimer * alloc_posix_timer(void)
{
@@ -523,19 +487,39 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
kmem_cache_free(posix_timers_cache, tmr);
}
+static struct k_clock *clockid_to_kclock(const clockid_t id)
+{
+ if (id < 0)
+ return (id & CLOCKFD_MASK) == CLOCKFD ?
+ &clock_posix_dynamic : &clock_posix_cpu;
+
+ if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
+ return NULL;
+ return &posix_clocks[id];
+}
+
+static int common_timer_create(struct k_itimer *new_timer)
+{
+ hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0);
+ return 0;
+}
+
/* Create a POSIX.1b interval timer. */
SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
struct sigevent __user *, timer_event_spec,
timer_t __user *, created_timer_id)
{
+ struct k_clock *kc = clockid_to_kclock(which_clock);
struct k_itimer *new_timer;
int error, new_timer_id;
sigevent_t event;
int it_id_set = IT_ID_NOT_SET;
- if (invalid_clockid(which_clock))
+ if (!kc)
return -EINVAL;
+ if (!kc->timer_create)
+ return -EOPNOTSUPP;
new_timer = alloc_posix_timer();
if (unlikely(!new_timer))
@@ -597,7 +581,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
goto out;
}
- error = CLOCK_DISPATCH(which_clock, timer_create, (new_timer));
+ error = kc->timer_create(new_timer);
if (error)
goto out;
@@ -607,7 +591,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
spin_unlock_irq(&current->sighand->siglock);
return 0;
- /*
+ /*
* In the case of the timer belonging to another task, after
* the task is unlocked, the timer is owned by the other task
* and may cease to exist at any time. Don't use or modify
@@ -709,22 +693,28 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
struct itimerspec __user *, setting)
{
- struct k_itimer *timr;
struct itimerspec cur_setting;
+ struct k_itimer *timr;
+ struct k_clock *kc;
unsigned long flags;
+ int ret = 0;
timr = lock_timer(timer_id, &flags);
if (!timr)
return -EINVAL;
- CLOCK_DISPATCH(timr->it_clock, timer_get, (timr, &cur_setting));
+ kc = clockid_to_kclock(timr->it_clock);
+ if (WARN_ON_ONCE(!kc || !kc->timer_get))
+ ret = -EINVAL;
+ else
+ kc->timer_get(timr, &cur_setting);
unlock_timer(timr, flags);
- if (copy_to_user(setting, &cur_setting, sizeof (cur_setting)))
+ if (!ret && copy_to_user(setting, &cur_setting, sizeof (cur_setting)))
return -EFAULT;
- return 0;
+ return ret;
}
/*
@@ -813,6 +803,7 @@ SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
int error = 0;
unsigned long flag;
struct itimerspec *rtn = old_setting ? &old_spec : NULL;
+ struct k_clock *kc;
if (!new_setting)
return -EINVAL;
@@ -828,8 +819,11 @@ retry:
if (!timr)
return -EINVAL;
- error = CLOCK_DISPATCH(timr->it_clock, timer_set,
- (timr, flags, &new_spec, rtn));
+ kc = clockid_to_kclock(timr->it_clock);
+ if (WARN_ON_ONCE(!kc || !kc->timer_set))
+ error = -EINVAL;
+ else
+ error = kc->timer_set(timr, flags, &new_spec, rtn);
unlock_timer(timr, flag);
if (error == TIMER_RETRY) {
@@ -844,7 +838,7 @@ retry:
return error;
}
-static inline int common_timer_del(struct k_itimer *timer)
+static int common_timer_del(struct k_itimer *timer)
{
timer->it.real.interval.tv64 = 0;
@@ -855,7 +849,11 @@ static inline int common_timer_del(struct k_itimer *timer)
static inline int timer_delete_hook(struct k_itimer *timer)
{
- return CLOCK_DISPATCH(timer->it_clock, timer_del, (timer));
+ struct k_clock *kc = clockid_to_kclock(timer->it_clock);
+
+ if (WARN_ON_ONCE(!kc || !kc->timer_del))
+ return -EINVAL;
+ return kc->timer_del(timer);
}
/* Delete a POSIX.1b interval timer. */
@@ -927,69 +925,76 @@ void exit_itimers(struct signal_struct *sig)
}
}
-/* Not available / possible... functions */
-int do_posix_clock_nosettime(const clockid_t clockid, struct timespec *tp)
-{
- return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(do_posix_clock_nosettime);
-
-int do_posix_clock_nonanosleep(const clockid_t clock, int flags,
- struct timespec *t, struct timespec __user *r)
-{
-#ifndef ENOTSUP
- return -EOPNOTSUPP; /* aka ENOTSUP in userland for POSIX */
-#else /* parisc does define it separately. */
- return -ENOTSUP;
-#endif
-}
-EXPORT_SYMBOL_GPL(do_posix_clock_nonanosleep);
-
SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
const struct timespec __user *, tp)
{
+ struct k_clock *kc = clockid_to_kclock(which_clock);
struct timespec new_tp;
- if (invalid_clockid(which_clock))
+ if (!kc || !kc->clock_set)
return -EINVAL;
+
if (copy_from_user(&new_tp, tp, sizeof (*tp)))
return -EFAULT;
- return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
+ return kc->clock_set(which_clock, &new_tp);
}
SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
struct timespec __user *,tp)
{
+ struct k_clock *kc = clockid_to_kclock(which_clock);
struct timespec kernel_tp;
int error;
- if (invalid_clockid(which_clock))
+ if (!kc)
return -EINVAL;
- error = CLOCK_DISPATCH(which_clock, clock_get,
- (which_clock, &kernel_tp));
+
+ error = kc->clock_get(which_clock, &kernel_tp);
+
if (!error && copy_to_user(tp, &kernel_tp, sizeof (kernel_tp)))
error = -EFAULT;
return error;
+}
+
+SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock,
+ struct timex __user *, utx)
+{
+ struct k_clock *kc = clockid_to_kclock(which_clock);
+ struct timex ktx;
+ int err;
+
+ if (!kc)
+ return -EINVAL;
+ if (!kc->clock_adj)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&ktx, utx, sizeof(ktx)))
+ return -EFAULT;
+ err = kc->clock_adj(which_clock, &ktx);
+
+ if (!err && copy_to_user(utx, &ktx, sizeof(ktx)))
+ return -EFAULT;
+
+ return err;
}
SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
struct timespec __user *, tp)
{
+ struct k_clock *kc = clockid_to_kclock(which_clock);
struct timespec rtn_tp;
int error;
- if (invalid_clockid(which_clock))
+ if (!kc)
return -EINVAL;
- error = CLOCK_DISPATCH(which_clock, clock_getres,
- (which_clock, &rtn_tp));
+ error = kc->clock_getres(which_clock, &rtn_tp);
- if (!error && tp && copy_to_user(tp, &rtn_tp, sizeof (rtn_tp))) {
+ if (!error && tp && copy_to_user(tp, &rtn_tp, sizeof (rtn_tp)))
error = -EFAULT;
- }
return error;
}
@@ -1009,10 +1014,13 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
const struct timespec __user *, rqtp,
struct timespec __user *, rmtp)
{
+ struct k_clock *kc = clockid_to_kclock(which_clock);
struct timespec t;
- if (invalid_clockid(which_clock))
+ if (!kc)
return -EINVAL;
+ if (!kc->nsleep)
+ return -ENANOSLEEP_NOTSUP;
if (copy_from_user(&t, rqtp, sizeof (struct timespec)))
return -EFAULT;
@@ -1020,27 +1028,20 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
if (!timespec_valid(&t))
return -EINVAL;
- return CLOCK_DISPATCH(which_clock, nsleep,
- (which_clock, flags, &t, rmtp));
-}
-
-/*
- * nanosleep_restart for monotonic and realtime clocks
- */
-static int common_nsleep_restart(struct restart_block *restart_block)
-{
- return hrtimer_nanosleep_restart(restart_block);
+ return kc->nsleep(which_clock, flags, &t, rmtp);
}
/*
* This will restart clock_nanosleep. This is required only by
* compat_clock_nanosleep_restart for now.
*/
-long
-clock_nanosleep_restart(struct restart_block *restart_block)
+long clock_nanosleep_restart(struct restart_block *restart_block)
{
- clockid_t which_clock = restart_block->arg0;
+ clockid_t which_clock = restart_block->nanosleep.index;
+ struct k_clock *kc = clockid_to_kclock(which_clock);
+
+ if (WARN_ON_ONCE(!kc || !kc->nsleep_restart))
+ return -EINVAL;
- return CLOCK_DISPATCH(which_clock, nsleep_restart,
- (restart_block));
+ return kc->nsleep_restart(restart_block);
}
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 265729966ece..4603f08dc47b 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -1,125 +1,12 @@
-config PM
- bool "Power Management support"
- depends on !IA64_HP_SIM
- ---help---
- "Power Management" means that parts of your computer are shut
- off or put into a power conserving "sleep" mode if they are not
- being used. There are two competing standards for doing this: APM
- and ACPI. If you want to use either one, say Y here and then also
- to the requisite support below.
-
- Power Management is most important for battery powered laptop
- computers; if you have a laptop, check out the Linux Laptop home
- page on the WWW at <http://www.linux-on-laptops.com/> or
- Tuxmobil - Linux on Mobile Computers at <http://www.tuxmobil.org/>
- and the Battery Powered Linux mini-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- Note that, even if you say N here, Linux on the x86 architecture
- will issue the hlt instruction if nothing is to be done, thereby
- sending the processor to sleep and saving power.
-
-config PM_DEBUG
- bool "Power Management Debug Support"
- depends on PM
- ---help---
- This option enables various debugging support in the Power Management
- code. This is helpful when debugging and reporting PM bugs, like
- suspend support.
-
-config PM_ADVANCED_DEBUG
- bool "Extra PM attributes in sysfs for low-level debugging/testing"
- depends on PM_DEBUG
- default n
- ---help---
- Add extra sysfs attributes allowing one to access some Power Management
- fields of device objects from user space. If you are not a kernel
- developer interested in debugging/testing Power Management, say "no".
-
-config PM_VERBOSE
- bool "Verbose Power Management debugging"
- depends on PM_DEBUG
- default n
- ---help---
- This option enables verbose messages from the Power Management code.
-
-config CAN_PM_TRACE
- def_bool y
- depends on PM_DEBUG && PM_SLEEP && EXPERIMENTAL
-
-config PM_TRACE
- bool
- help
- This enables code to save the last PM event point across
- reboot. The architecture needs to support this, x86 for
- example does by saving things in the RTC, see below.
-
- The architecture specific code must provide the extern
- functions from <linux/resume-trace.h> as well as the
- <asm/resume-trace.h> header with a TRACE_RESUME() macro.
-
- The way the information is presented is architecture-
- dependent, x86 will print the information during a
- late_initcall.
-
-config PM_TRACE_RTC
- bool "Suspend/resume event tracing"
- depends on CAN_PM_TRACE
- depends on X86
- select PM_TRACE
- default n
- ---help---
- This enables some cheesy code to save the last PM event point in the
- RTC across reboots, so that you can debug a machine that just hangs
- during suspend (or more commonly, during resume).
-
- To use this debugging feature you should attempt to suspend the
- machine, reboot it and then run
-
- dmesg -s 1000000 | grep 'hash matches'
-
- CAUTION: this option will cause your machine's real-time clock to be
- set to an invalid time after a resume.
-
-config PM_SLEEP_SMP
- bool
- depends on SMP
- depends on ARCH_SUSPEND_POSSIBLE || ARCH_HIBERNATION_POSSIBLE
- depends on PM_SLEEP
- select HOTPLUG
- select HOTPLUG_CPU
- default y
-
-config PM_SLEEP
- bool
- depends on SUSPEND || HIBERNATION || XEN_SAVE_RESTORE
- default y
-
-config PM_SLEEP_ADVANCED_DEBUG
- bool
- depends on PM_ADVANCED_DEBUG
- default n
-
config SUSPEND
bool "Suspend to RAM and standby"
- depends on PM && ARCH_SUSPEND_POSSIBLE
+ depends on ARCH_SUSPEND_POSSIBLE
default y
---help---
Allow the system to enter sleep states in which main memory is
powered and thus its contents are preserved, such as the
suspend-to-RAM state (e.g. the ACPI S3 state).
-config PM_TEST_SUSPEND
- bool "Test suspend/resume and wakealarm during bootup"
- depends on SUSPEND && PM_DEBUG && RTC_CLASS=y
- ---help---
- This option will let you suspend your machine during bootup, and
- make it wake up a few seconds later using an RTC wakeup alarm.
- Enable this with a kernel parameter like "test_suspend=mem".
-
- You probably want to have your system's RTC driver statically
- linked, ensuring that it's available when this test runs.
-
config SUSPEND_FREEZER
bool "Enable freezer for suspend to RAM/standby" \
if ARCH_WANTS_FREEZER_CONTROL || BROKEN
@@ -133,7 +20,7 @@ config SUSPEND_FREEZER
config HIBERNATION
bool "Hibernation (aka 'suspend to disk')"
- depends on PM && SWAP && ARCH_HIBERNATION_POSSIBLE
+ depends on SWAP && ARCH_HIBERNATION_POSSIBLE
select LZO_COMPRESS
select LZO_DECOMPRESS
---help---
@@ -196,6 +83,106 @@ config PM_STD_PARTITION
suspended image to. It will simply pick the first available swap
device.
+config PM_SLEEP
+ def_bool y
+ depends on SUSPEND || HIBERNATION || XEN_SAVE_RESTORE
+
+config PM_SLEEP_SMP
+ def_bool y
+ depends on SMP
+ depends on ARCH_SUSPEND_POSSIBLE || ARCH_HIBERNATION_POSSIBLE
+ depends on PM_SLEEP
+ select HOTPLUG
+ select HOTPLUG_CPU
+
+config PM_RUNTIME
+ bool "Run-time PM core functionality"
+ depends on !IA64_HP_SIM
+ ---help---
+ Enable functionality allowing I/O devices to be put into energy-saving
+ (low power) states at run time (or autosuspended) after a specified
+ period of inactivity and woken up in response to a hardware-generated
+ wake-up event or a driver's request.
+
+ Hardware support is generally required for this functionality to work
+ and the bus type drivers of the buses the devices are on are
+ responsible for the actual handling of the autosuspend requests and
+ wake-up events.
+
+config PM
+ def_bool y
+ depends on PM_SLEEP || PM_RUNTIME
+
+config PM_DEBUG
+ bool "Power Management Debug Support"
+ depends on PM
+ ---help---
+ This option enables various debugging support in the Power Management
+ code. This is helpful when debugging and reporting PM bugs, like
+ suspend support.
+
+config PM_VERBOSE
+ bool "Verbose Power Management debugging"
+ depends on PM_DEBUG
+ ---help---
+ This option enables verbose messages from the Power Management code.
+
+config PM_ADVANCED_DEBUG
+ bool "Extra PM attributes in sysfs for low-level debugging/testing"
+ depends on PM_DEBUG
+ ---help---
+ Add extra sysfs attributes allowing one to access some Power Management
+ fields of device objects from user space. If you are not a kernel
+ developer interested in debugging/testing Power Management, say "no".
+
+config PM_TEST_SUSPEND
+ bool "Test suspend/resume and wakealarm during bootup"
+ depends on SUSPEND && PM_DEBUG && RTC_CLASS=y
+ ---help---
+ This option will let you suspend your machine during bootup, and
+ make it wake up a few seconds later using an RTC wakeup alarm.
+ Enable this with a kernel parameter like "test_suspend=mem".
+
+ You probably want to have your system's RTC driver statically
+ linked, ensuring that it's available when this test runs.
+
+config CAN_PM_TRACE
+ def_bool y
+ depends on PM_DEBUG && PM_SLEEP
+
+config PM_TRACE
+ bool
+ help
+ This enables code to save the last PM event point across
+ reboot. The architecture needs to support this, x86 for
+ example does by saving things in the RTC, see below.
+
+ The architecture specific code must provide the extern
+ functions from <linux/resume-trace.h> as well as the
+ <asm/resume-trace.h> header with a TRACE_RESUME() macro.
+
+ The way the information is presented is architecture-
+ dependent, x86 will print the information during a
+ late_initcall.
+
+config PM_TRACE_RTC
+ bool "Suspend/resume event tracing"
+ depends on CAN_PM_TRACE
+ depends on X86
+ select PM_TRACE
+ ---help---
+ This enables some cheesy code to save the last PM event point in the
+ RTC across reboots, so that you can debug a machine that just hangs
+ during suspend (or more commonly, during resume).
+
+ To use this debugging feature you should attempt to suspend the
+ machine, reboot it and then run
+
+ dmesg -s 1000000 | grep 'hash matches'
+
+ CAUTION: this option will cause your machine's real-time clock to be
+ set to an invalid time after a resume.
+
config APM_EMULATION
tristate "Advanced Power Management Emulation"
depends on PM && SYS_SUPPORTS_APM_EMULATION
@@ -222,31 +209,11 @@ config APM_EMULATION
anything, try disabling/enabling this option (or disabling/enabling
APM in your BIOS).
-config PM_RUNTIME
- bool "Run-time PM core functionality"
- depends on PM
- ---help---
- Enable functionality allowing I/O devices to be put into energy-saving
- (low power) states at run time (or autosuspended) after a specified
- period of inactivity and woken up in response to a hardware-generated
- wake-up event or a driver's request.
-
- Hardware support is generally required for this functionality to work
- and the bus type drivers of the buses the devices are on are
- responsible for the actual handling of the autosuspend requests and
- wake-up events.
-
-config PM_OPS
- bool
- depends on PM_SLEEP || PM_RUNTIME
- default y
-
config ARCH_HAS_OPP
bool
config PM_OPP
bool "Operating Performance Point (OPP) Layer library"
- depends on PM
depends on ARCH_HAS_OPP
---help---
SOCs have a standard set of tuples consisting of frequency and
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 701853042c28..8eaba5f27b10 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -17,9 +17,6 @@
DEFINE_MUTEX(pm_mutex);
-unsigned int pm_flags;
-EXPORT_SYMBOL(pm_flags);
-
#ifdef CONFIG_PM_SLEEP
/* Routines for PM-transition notifications */
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index a23a57a976d1..f3240e987928 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -214,11 +214,12 @@ static int rcuhead_fixup_free(void *addr, enum debug_obj_state state)
* Ensure that queued callbacks are all executed.
* If we detect that we are nested in a RCU read-side critical
* section, we should simply fail, otherwise we would deadlock.
+ * Note that the machinery to reliably determine whether
+ * or not we are in an RCU read-side critical section
+ * exists only in the preemptible RCU implementations
+ * (TINY_PREEMPT_RCU and TREE_PREEMPT_RCU), which is why
+ * DEBUG_OBJECTS_RCU_HEAD is disallowed if !PREEMPT.
*/
-#ifndef CONFIG_PREEMPT
- WARN_ON(1);
- return 0;
-#else
if (rcu_preempt_depth() != 0 || preempt_count() != 0 ||
irqs_disabled()) {
WARN_ON(1);
@@ -229,7 +230,6 @@ static int rcuhead_fixup_free(void *addr, enum debug_obj_state state)
rcu_barrier_bh();
debug_object_free(head, &rcuhead_debug_descr);
return 1;
-#endif
default:
return 0;
}
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
index 015abaea962a..3cb8e362e883 100644
--- a/kernel/rcutiny_plugin.h
+++ b/kernel/rcutiny_plugin.h
@@ -852,7 +852,7 @@ void exit_rcu(void)
if (t->rcu_read_lock_nesting == 0)
return;
t->rcu_read_lock_nesting = 1;
- rcu_read_unlock();
+ __rcu_read_unlock();
}
#else /* #ifdef CONFIG_TINY_PREEMPT_RCU */
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 89613f97ff26..c224da41890c 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -47,7 +47,6 @@
#include <linux/srcu.h>
#include <linux/slab.h>
#include <asm/byteorder.h>
-#include <linux/sched.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and "
diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c
index ddabb54bb5c8..3c7cbc2c33be 100644
--- a/kernel/rtmutex-debug.c
+++ b/kernel/rtmutex-debug.c
@@ -215,7 +215,6 @@ void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
put_pid(waiter->deadlock_task_pid);
TRACE_WARN_ON(!plist_node_empty(&waiter->list_entry));
TRACE_WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
- TRACE_WARN_ON(waiter->task);
memset(waiter, 0x22, sizeof(*waiter));
}
diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
index 66cb89bc5ef1..5c9ccd380966 100644
--- a/kernel/rtmutex-tester.c
+++ b/kernel/rtmutex-tester.c
@@ -9,7 +9,6 @@
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/sched.h>
-#include <linux/smp_lock.h>
#include <linux/spinlock.h>
#include <linux/sysdev.h>
#include <linux/timer.h>
@@ -27,7 +26,6 @@ struct test_thread_data {
int opcode;
int opdata;
int mutexes[MAX_RT_TEST_MUTEXES];
- int bkl;
int event;
struct sys_device sysdev;
};
@@ -46,9 +44,8 @@ enum test_opcodes {
RTTEST_LOCKINTNOWAIT, /* 6 Lock interruptible no wait in wakeup, data = lockindex */
RTTEST_LOCKCONT, /* 7 Continue locking after the wakeup delay */
RTTEST_UNLOCK, /* 8 Unlock, data = lockindex */
- RTTEST_LOCKBKL, /* 9 Lock BKL */
- RTTEST_UNLOCKBKL, /* 10 Unlock BKL */
- RTTEST_SIGNAL, /* 11 Signal other test thread, data = thread id */
+ /* 9, 10 - reserved for BKL commemoration */
+ RTTEST_SIGNAL = 11, /* 11 Signal other test thread, data = thread id */
RTTEST_RESETEVENT = 98, /* 98 Reset event counter */
RTTEST_RESET = 99, /* 99 Reset all pending operations */
};
@@ -74,13 +71,6 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
td->mutexes[i] = 0;
}
}
-
- if (!lockwakeup && td->bkl == 4) {
-#ifdef CONFIG_LOCK_KERNEL
- unlock_kernel();
-#endif
- td->bkl = 0;
- }
return 0;
case RTTEST_RESETEVENT:
@@ -131,25 +121,6 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
td->mutexes[id] = 0;
return 0;
- case RTTEST_LOCKBKL:
- if (td->bkl)
- return 0;
- td->bkl = 1;
-#ifdef CONFIG_LOCK_KERNEL
- lock_kernel();
-#endif
- td->bkl = 4;
- return 0;
-
- case RTTEST_UNLOCKBKL:
- if (td->bkl != 4)
- break;
-#ifdef CONFIG_LOCK_KERNEL
- unlock_kernel();
-#endif
- td->bkl = 0;
- return 0;
-
default:
break;
}
@@ -196,7 +167,6 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
td->event = atomic_add_return(1, &rttest_event);
break;
- case RTTEST_LOCKBKL:
default:
break;
}
@@ -229,8 +199,6 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
td->event = atomic_add_return(1, &rttest_event);
return;
- case RTTEST_LOCKBKL:
- return;
default:
return;
}
@@ -380,11 +348,11 @@ static ssize_t sysfs_test_status(struct sys_device *dev, struct sysdev_attribute
spin_lock(&rttest_lock);
curr += sprintf(curr,
- "O: %4d, E:%8d, S: 0x%08lx, P: %4d, N: %4d, B: %p, K: %d, M:",
+ "O: %4d, E:%8d, S: 0x%08lx, P: %4d, N: %4d, B: %p, M:",
td->opcode, td->event, tsk->state,
(MAX_RT_PRIO - 1) - tsk->prio,
(MAX_RT_PRIO - 1) - tsk->normal_prio,
- tsk->pi_blocked_on, td->bkl);
+ tsk->pi_blocked_on);
for (i = MAX_RT_TEST_MUTEXES - 1; i >=0 ; i--)
curr += sprintf(curr, "%d", td->mutexes[i]);
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index a9604815786a..ab449117aaf2 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -20,41 +20,34 @@
/*
* lock->owner state tracking:
*
- * lock->owner holds the task_struct pointer of the owner. Bit 0 and 1
- * are used to keep track of the "owner is pending" and "lock has
- * waiters" state.
+ * lock->owner holds the task_struct pointer of the owner. Bit 0
+ * is used to keep track of the "lock has waiters" state.
*
- * owner bit1 bit0
- * NULL 0 0 lock is free (fast acquire possible)
- * NULL 0 1 invalid state
- * NULL 1 0 Transitional State*
- * NULL 1 1 invalid state
- * taskpointer 0 0 lock is held (fast release possible)
- * taskpointer 0 1 task is pending owner
- * taskpointer 1 0 lock is held and has waiters
- * taskpointer 1 1 task is pending owner and lock has more waiters
- *
- * Pending ownership is assigned to the top (highest priority)
- * waiter of the lock, when the lock is released. The thread is woken
- * up and can now take the lock. Until the lock is taken (bit 0
- * cleared) a competing higher priority thread can steal the lock
- * which puts the woken up thread back on the waiters list.
+ * owner bit0
+ * NULL 0 lock is free (fast acquire possible)
+ * NULL 1 lock is free and has waiters and the top waiter
+ * is going to take the lock*
+ * taskpointer 0 lock is held (fast release possible)
+ * taskpointer 1 lock is held and has waiters**
*
* The fast atomic compare exchange based acquire and release is only
- * possible when bit 0 and 1 of lock->owner are 0.
+ * possible when bit 0 of lock->owner is 0.
+ *
+ * (*) It also can be a transitional state when grabbing the lock
+ * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
+ * we need to set the bit0 before looking at the lock, and the owner may be
+ * NULL in this small time, hence this can be a transitional state.
*
- * (*) There's a small time where the owner can be NULL and the
- * "lock has waiters" bit is set. This can happen when grabbing the lock.
- * To prevent a cmpxchg of the owner releasing the lock, we need to set this
- * bit before looking at the lock, hence the reason this is a transitional
- * state.
+ * (**) There is a small time when bit 0 is set but there are no
+ * waiters. This can happen when grabbing the lock in the slow path.
+ * To prevent a cmpxchg of the owner releasing the lock, we need to
+ * set this bit before looking at the lock.
*/
static void
-rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner,
- unsigned long mask)
+rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
{
- unsigned long val = (unsigned long)owner | mask;
+ unsigned long val = (unsigned long)owner;
if (rt_mutex_has_waiters(lock))
val |= RT_MUTEX_HAS_WAITERS;
@@ -203,15 +196,14 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
* reached or the state of the chain has changed while we
* dropped the locks.
*/
- if (!waiter || !waiter->task)
+ if (!waiter)
goto out_unlock_pi;
/*
* Check the orig_waiter state. After we dropped the locks,
- * the previous owner of the lock might have released the lock
- * and made us the pending owner:
+ * the previous owner of the lock might have released the lock.
*/
- if (orig_waiter && !orig_waiter->task)
+ if (orig_waiter && !rt_mutex_owner(orig_lock))
goto out_unlock_pi;
/*
@@ -254,6 +246,17 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
/* Release the task */
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ if (!rt_mutex_owner(lock)) {
+ /*
+ * If the requeue above changed the top waiter, then we need
+ * to wake the new top waiter up to try to get the lock.
+ */
+
+ if (top_waiter != rt_mutex_top_waiter(lock))
+ wake_up_process(rt_mutex_top_waiter(lock)->task);
+ raw_spin_unlock(&lock->wait_lock);
+ goto out_put_task;
+ }
put_task_struct(task);
/* Grab the next task */
@@ -296,78 +299,16 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
}
/*
- * Optimization: check if we can steal the lock from the
- * assigned pending owner [which might not have taken the
- * lock yet]:
- */
-static inline int try_to_steal_lock(struct rt_mutex *lock,
- struct task_struct *task)
-{
- struct task_struct *pendowner = rt_mutex_owner(lock);
- struct rt_mutex_waiter *next;
- unsigned long flags;
-
- if (!rt_mutex_owner_pending(lock))
- return 0;
-
- if (pendowner == task)
- return 1;
-
- raw_spin_lock_irqsave(&pendowner->pi_lock, flags);
- if (task->prio >= pendowner->prio) {
- raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
- return 0;
- }
-
- /*
- * Check if a waiter is enqueued on the pending owners
- * pi_waiters list. Remove it and readjust pending owners
- * priority.
- */
- if (likely(!rt_mutex_has_waiters(lock))) {
- raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
- return 1;
- }
-
- /* No chain handling, pending owner is not blocked on anything: */
- next = rt_mutex_top_waiter(lock);
- plist_del(&next->pi_list_entry, &pendowner->pi_waiters);
- __rt_mutex_adjust_prio(pendowner);
- raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
-
- /*
- * We are going to steal the lock and a waiter was
- * enqueued on the pending owners pi_waiters queue. So
- * we have to enqueue this waiter into
- * task->pi_waiters list. This covers the case,
- * where task is boosted because it holds another
- * lock and gets unboosted because the booster is
- * interrupted, so we would delay a waiter with higher
- * priority as task->normal_prio.
- *
- * Note: in the rare case of a SCHED_OTHER task changing
- * its priority and thus stealing the lock, next->task
- * might be task:
- */
- if (likely(next->task != task)) {
- raw_spin_lock_irqsave(&task->pi_lock, flags);
- plist_add(&next->pi_list_entry, &task->pi_waiters);
- __rt_mutex_adjust_prio(task);
- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
- }
- return 1;
-}
-
-/*
* Try to take an rt-mutex
*
- * This fails
- * - when the lock has a real owner
- * - when a different pending owner exists and has higher priority than current
- *
* Must be called with lock->wait_lock held.
+ *
+ * @lock: the lock to be acquired.
+ * @task: the task which wants to acquire the lock
+ * @waiter: the waiter that is queued to the lock's wait list. (could be NULL)
*/
-static int try_to_take_rt_mutex(struct rt_mutex *lock)
+static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
+ struct rt_mutex_waiter *waiter)
{
/*
* We have to be careful here if the atomic speedups are
@@ -390,15 +331,52 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock)
*/
mark_rt_mutex_waiters(lock);
- if (rt_mutex_owner(lock) && !try_to_steal_lock(lock, current))
+ if (rt_mutex_owner(lock))
return 0;
+ /*
+ * It will get the lock because of one of these conditions:
+ * 1) there is no waiter
+ * 2) higher priority than waiters
+ * 3) it is top waiter
+ */
+ if (rt_mutex_has_waiters(lock)) {
+ if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) {
+ if (!waiter || waiter != rt_mutex_top_waiter(lock))
+ return 0;
+ }
+ }
+
+ if (waiter || rt_mutex_has_waiters(lock)) {
+ unsigned long flags;
+ struct rt_mutex_waiter *top;
+
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
+
+ /* remove the queued waiter. */
+ if (waiter) {
+ plist_del(&waiter->list_entry, &lock->wait_list);
+ task->pi_blocked_on = NULL;
+ }
+
+ /*
+ * We have to enqueue the top waiter(if it exists) into
+ * task->pi_waiters list.
+ */
+ if (rt_mutex_has_waiters(lock)) {
+ top = rt_mutex_top_waiter(lock);
+ top->pi_list_entry.prio = top->list_entry.prio;
+ plist_add(&top->pi_list_entry, &task->pi_waiters);
+ }
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ }
+
/* We got the lock. */
debug_rt_mutex_lock(lock);
- rt_mutex_set_owner(lock, current, 0);
+ rt_mutex_set_owner(lock, task);
- rt_mutex_deadlock_account_lock(lock, current);
+ rt_mutex_deadlock_account_lock(lock, task);
return 1;
}
@@ -436,6 +414,9 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ if (!owner)
+ return 0;
+
if (waiter == rt_mutex_top_waiter(lock)) {
raw_spin_lock_irqsave(&owner->pi_lock, flags);
plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
@@ -472,21 +453,18 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
/*
* Wake up the next waiter on the lock.
*
- * Remove the top waiter from the current tasks waiter list and from
- * the lock waiter list. Set it as pending owner. Then wake it up.
+ * Remove the top waiter from the current tasks waiter list and wake it up.
*
* Called with lock->wait_lock held.
*/
static void wakeup_next_waiter(struct rt_mutex *lock)
{
struct rt_mutex_waiter *waiter;
- struct task_struct *pendowner;
unsigned long flags;
raw_spin_lock_irqsave(&current->pi_lock, flags);
waiter = rt_mutex_top_waiter(lock);
- plist_del(&waiter->list_entry, &lock->wait_list);
/*
* Remove it from current->pi_waiters. We do not adjust a
@@ -495,43 +473,19 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
* lock->wait_lock.
*/
plist_del(&waiter->pi_list_entry, &current->pi_waiters);
- pendowner = waiter->task;
- waiter->task = NULL;
- rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING);
+ rt_mutex_set_owner(lock, NULL);
raw_spin_unlock_irqrestore(&current->pi_lock, flags);
- /*
- * Clear the pi_blocked_on variable and enqueue a possible
- * waiter into the pi_waiters list of the pending owner. This
- * prevents that in case the pending owner gets unboosted a
- * waiter with higher priority than pending-owner->normal_prio
- * is blocked on the unboosted (pending) owner.
- */
- raw_spin_lock_irqsave(&pendowner->pi_lock, flags);
-
- WARN_ON(!pendowner->pi_blocked_on);
- WARN_ON(pendowner->pi_blocked_on != waiter);
- WARN_ON(pendowner->pi_blocked_on->lock != lock);
-
- pendowner->pi_blocked_on = NULL;
-
- if (rt_mutex_has_waiters(lock)) {
- struct rt_mutex_waiter *next;
-
- next = rt_mutex_top_waiter(lock);
- plist_add(&next->pi_list_entry, &pendowner->pi_waiters);
- }
- raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
-
- wake_up_process(pendowner);
+ wake_up_process(waiter->task);
}
/*
- * Remove a waiter from a lock
+ * Remove a waiter from a lock and give up
*
- * Must be called with lock->wait_lock held
+ * Must be called with lock->wait_lock held and
+ * have just failed to try_to_take_rt_mutex().
*/
static void remove_waiter(struct rt_mutex *lock,
struct rt_mutex_waiter *waiter)
@@ -543,11 +497,13 @@ static void remove_waiter(struct rt_mutex *lock,
raw_spin_lock_irqsave(&current->pi_lock, flags);
plist_del(&waiter->list_entry, &lock->wait_list);
- waiter->task = NULL;
current->pi_blocked_on = NULL;
raw_spin_unlock_irqrestore(&current->pi_lock, flags);
- if (first && owner != current) {
+ if (!owner)
+ return;
+
+ if (first) {
raw_spin_lock_irqsave(&owner->pi_lock, flags);
@@ -614,21 +570,19 @@ void rt_mutex_adjust_pi(struct task_struct *task)
* or TASK_UNINTERRUPTIBLE)
* @timeout: the pre-initialized and started timer, or NULL for none
* @waiter: the pre-initialized rt_mutex_waiter
- * @detect_deadlock: passed to task_blocks_on_rt_mutex
*
* lock->wait_lock must be held by the caller.
*/
static int __sched
__rt_mutex_slowlock(struct rt_mutex *lock, int state,
struct hrtimer_sleeper *timeout,
- struct rt_mutex_waiter *waiter,
- int detect_deadlock)
+ struct rt_mutex_waiter *waiter)
{
int ret = 0;
for (;;) {
/* Try to acquire the lock: */
- if (try_to_take_rt_mutex(lock))
+ if (try_to_take_rt_mutex(lock, current, waiter))
break;
/*
@@ -645,39 +599,11 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
break;
}
- /*
- * waiter->task is NULL the first time we come here and
- * when we have been woken up by the previous owner
- * but the lock got stolen by a higher prio task.
- */
- if (!waiter->task) {
- ret = task_blocks_on_rt_mutex(lock, waiter, current,
- detect_deadlock);
- /*
- * If we got woken up by the owner then start loop
- * all over without going into schedule to try
- * to get the lock now:
- */
- if (unlikely(!waiter->task)) {
- /*
- * Reset the return value. We might
- * have returned with -EDEADLK and the
- * owner released the lock while we
- * were walking the pi chain.
- */
- ret = 0;
- continue;
- }
- if (unlikely(ret))
- break;
- }
-
raw_spin_unlock(&lock->wait_lock);
debug_rt_mutex_print_deadlock(waiter);
- if (waiter->task)
- schedule_rt_mutex(lock);
+ schedule_rt_mutex(lock);
raw_spin_lock(&lock->wait_lock);
set_current_state(state);
@@ -698,12 +624,11 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
int ret = 0;
debug_rt_mutex_init_waiter(&waiter);
- waiter.task = NULL;
raw_spin_lock(&lock->wait_lock);
/* Try to acquire the lock again: */
- if (try_to_take_rt_mutex(lock)) {
+ if (try_to_take_rt_mutex(lock, current, NULL)) {
raw_spin_unlock(&lock->wait_lock);
return 0;
}
@@ -717,12 +642,14 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
timeout->task = NULL;
}
- ret = __rt_mutex_slowlock(lock, state, timeout, &waiter,
- detect_deadlock);
+ ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock);
+
+ if (likely(!ret))
+ ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
set_current_state(TASK_RUNNING);
- if (unlikely(waiter.task))
+ if (unlikely(ret))
remove_waiter(lock, &waiter);
/*
@@ -737,14 +664,6 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
if (unlikely(timeout))
hrtimer_cancel(&timeout->timer);
- /*
- * Readjust priority, when we did not get the lock. We might
- * have been the pending owner and boosted. Since we did not
- * take the lock, the PI boost has to go.
- */
- if (unlikely(ret))
- rt_mutex_adjust_prio(current);
-
debug_rt_mutex_free_waiter(&waiter);
return ret;
@@ -762,7 +681,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock)
if (likely(rt_mutex_owner(lock) != current)) {
- ret = try_to_take_rt_mutex(lock);
+ ret = try_to_take_rt_mutex(lock, current, NULL);
/*
* try_to_take_rt_mutex() sets the lock waiters
* bit unconditionally. Clean this up.
@@ -992,7 +911,7 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
{
__rt_mutex_init(lock, NULL);
debug_rt_mutex_proxy_lock(lock, proxy_owner);
- rt_mutex_set_owner(lock, proxy_owner, 0);
+ rt_mutex_set_owner(lock, proxy_owner);
rt_mutex_deadlock_account_lock(lock, proxy_owner);
}
@@ -1008,7 +927,7 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock,
struct task_struct *proxy_owner)
{
debug_rt_mutex_proxy_unlock(lock);
- rt_mutex_set_owner(lock, NULL, 0);
+ rt_mutex_set_owner(lock, NULL);
rt_mutex_deadlock_account_unlock(proxy_owner);
}
@@ -1034,20 +953,14 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
raw_spin_lock(&lock->wait_lock);
- mark_rt_mutex_waiters(lock);
-
- if (!rt_mutex_owner(lock) || try_to_steal_lock(lock, task)) {
- /* We got the lock for task. */
- debug_rt_mutex_lock(lock);
- rt_mutex_set_owner(lock, task, 0);
+ if (try_to_take_rt_mutex(lock, task, NULL)) {
raw_spin_unlock(&lock->wait_lock);
- rt_mutex_deadlock_account_lock(lock, task);
return 1;
}
ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
- if (ret && !waiter->task) {
+ if (ret && !rt_mutex_owner(lock)) {
/*
* Reset the return value. We might have
* returned with -EDEADLK and the owner
@@ -1056,6 +969,10 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
*/
ret = 0;
}
+
+ if (unlikely(ret))
+ remove_waiter(lock, waiter);
+
raw_spin_unlock(&lock->wait_lock);
debug_rt_mutex_print_deadlock(waiter);
@@ -1110,12 +1027,11 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
set_current_state(TASK_INTERRUPTIBLE);
- ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter,
- detect_deadlock);
+ ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
set_current_state(TASK_RUNNING);
- if (unlikely(waiter->task))
+ if (unlikely(ret))
remove_waiter(lock, waiter);
/*
@@ -1126,13 +1042,5 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
raw_spin_unlock(&lock->wait_lock);
- /*
- * Readjust priority, when we did not get the lock. We might have been
- * the pending owner and boosted. Since we did not take the lock, the
- * PI boost has to go.
- */
- if (unlikely(ret))
- rt_mutex_adjust_prio(current);
-
return ret;
}
diff --git a/kernel/rtmutex_common.h b/kernel/rtmutex_common.h
index 97a2f81866af..53a66c85261b 100644
--- a/kernel/rtmutex_common.h
+++ b/kernel/rtmutex_common.h
@@ -91,9 +91,8 @@ task_top_pi_waiter(struct task_struct *p)
/*
* lock->owner state tracking:
*/
-#define RT_MUTEX_OWNER_PENDING 1UL
-#define RT_MUTEX_HAS_WAITERS 2UL
-#define RT_MUTEX_OWNER_MASKALL 3UL
+#define RT_MUTEX_HAS_WAITERS 1UL
+#define RT_MUTEX_OWNER_MASKALL 1UL
static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
{
@@ -101,17 +100,6 @@ static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
((unsigned long)lock->owner & ~RT_MUTEX_OWNER_MASKALL);
}
-static inline struct task_struct *rt_mutex_real_owner(struct rt_mutex *lock)
-{
- return (struct task_struct *)
- ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
-}
-
-static inline unsigned long rt_mutex_owner_pending(struct rt_mutex *lock)
-{
- return (unsigned long)lock->owner & RT_MUTEX_OWNER_PENDING;
-}
-
/*
* PI-futex support (proxy locking functions, etc.):
*/
diff --git a/kernel/sched.c b/kernel/sched.c
index 18d38e4ec7ba..655164e30e88 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -324,7 +324,7 @@ struct cfs_rq {
* 'curr' points to currently running entity on this cfs_rq.
* It is set to NULL otherwise (i.e when none are currently running).
*/
- struct sched_entity *curr, *next, *last;
+ struct sched_entity *curr, *next, *last, *skip;
unsigned int nr_spread_over;
@@ -606,9 +606,6 @@ static inline struct task_group *task_group(struct task_struct *p)
struct task_group *tg;
struct cgroup_subsys_state *css;
- if (p->flags & PF_EXITING)
- return &root_task_group;
-
css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
lockdep_is_held(&task_rq(p)->lock));
tg = container_of(css, struct task_group, css);
@@ -1686,6 +1683,39 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
__release(rq2->lock);
}
+#else /* CONFIG_SMP */
+
+/*
+ * double_rq_lock - safely lock two runqueues
+ *
+ * Note this does not disable interrupts like task_rq_lock,
+ * you need to do so manually before calling.
+ */
+static void double_rq_lock(struct rq *rq1, struct rq *rq2)
+ __acquires(rq1->lock)
+ __acquires(rq2->lock)
+{
+ BUG_ON(!irqs_disabled());
+ BUG_ON(rq1 != rq2);
+ raw_spin_lock(&rq1->lock);
+ __acquire(rq2->lock); /* Fake it out ;) */
+}
+
+/*
+ * double_rq_unlock - safely unlock two runqueues
+ *
+ * Note this does not restore interrupts like task_rq_unlock,
+ * you need to do so manually after calling.
+ */
+static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
+ __releases(rq1->lock)
+ __releases(rq2->lock)
+{
+ BUG_ON(rq1 != rq2);
+ raw_spin_unlock(&rq1->lock);
+ __release(rq2->lock);
+}
+
#endif
static void calc_load_account_idle(struct rq *this_rq);
@@ -1880,7 +1910,7 @@ void account_system_vtime(struct task_struct *curr)
*/
if (hardirq_count())
__this_cpu_add(cpu_hardirq_time, delta);
- else if (in_serving_softirq() && !(curr->flags & PF_KSOFTIRQD))
+ else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
__this_cpu_add(cpu_softirq_time, delta);
irq_time_write_end();
@@ -1920,8 +1950,40 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
sched_rt_avg_update(rq, irq_delta);
}
+static int irqtime_account_hi_update(void)
+{
+ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
+ unsigned long flags;
+ u64 latest_ns;
+ int ret = 0;
+
+ local_irq_save(flags);
+ latest_ns = this_cpu_read(cpu_hardirq_time);
+ if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->irq))
+ ret = 1;
+ local_irq_restore(flags);
+ return ret;
+}
+
+static int irqtime_account_si_update(void)
+{
+ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
+ unsigned long flags;
+ u64 latest_ns;
+ int ret = 0;
+
+ local_irq_save(flags);
+ latest_ns = this_cpu_read(cpu_softirq_time);
+ if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->softirq))
+ ret = 1;
+ local_irq_restore(flags);
+ return ret;
+}
+
#else /* CONFIG_IRQ_TIME_ACCOUNTING */
+#define sched_clock_irqtime (0)
+
static void update_rq_clock_task(struct rq *rq, s64 delta)
{
rq->clock_task += delta;
@@ -2025,14 +2087,14 @@ inline int task_curr(const struct task_struct *p)
static inline void check_class_changed(struct rq *rq, struct task_struct *p,
const struct sched_class *prev_class,
- int oldprio, int running)
+ int oldprio)
{
if (prev_class != p->sched_class) {
if (prev_class->switched_from)
- prev_class->switched_from(rq, p, running);
- p->sched_class->switched_to(rq, p, running);
- } else
- p->sched_class->prio_changed(rq, p, oldprio, running);
+ prev_class->switched_from(rq, p);
+ p->sched_class->switched_to(rq, p);
+ } else if (oldprio != p->prio)
+ p->sched_class->prio_changed(rq, p, oldprio);
}
static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
@@ -2224,7 +2286,10 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
* yield - it could be a while.
*/
if (unlikely(on_rq)) {
- schedule_timeout_uninterruptible(1);
+ ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_hrtimeout(&to, HRTIMER_MODE_REL);
continue;
}
@@ -2265,27 +2330,6 @@ void kick_process(struct task_struct *p)
EXPORT_SYMBOL_GPL(kick_process);
#endif /* CONFIG_SMP */
-/**
- * task_oncpu_function_call - call a function on the cpu on which a task runs
- * @p: the task to evaluate
- * @func: the function to be called
- * @info: the function call argument
- *
- * Calls the function @func when the task is currently running. This might
- * be on the current CPU, which just calls the function directly
- */
-void task_oncpu_function_call(struct task_struct *p,
- void (*func) (void *info), void *info)
-{
- int cpu;
-
- preempt_disable();
- cpu = task_cpu(p);
- if (task_curr(p))
- smp_call_function_single(cpu, func, info, 1);
- preempt_enable();
-}
-
#ifdef CONFIG_SMP
/*
* ->cpus_allowed is protected by either TASK_WAKING or rq->lock held.
@@ -2566,6 +2610,7 @@ static void __sched_fork(struct task_struct *p)
p->se.sum_exec_runtime = 0;
p->se.prev_sum_exec_runtime = 0;
p->se.nr_migrations = 0;
+ p->se.vruntime = 0;
#ifdef CONFIG_SCHEDSTATS
memset(&p->se.statistics, 0, sizeof(p->se.statistics));
@@ -2776,9 +2821,12 @@ static inline void
prepare_task_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next)
{
+ sched_info_switch(prev, next);
+ perf_event_task_sched_out(prev, next);
fire_sched_out_preempt_notifiers(prev, next);
prepare_lock_switch(rq, next);
prepare_arch_switch(next);
+ trace_sched_switch(prev, next);
}
/**
@@ -2911,7 +2959,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
struct mm_struct *mm, *oldmm;
prepare_task_switch(rq, prev, next);
- trace_sched_switch(prev, next);
+
mm = next->mm;
oldmm = prev->active_mm;
/*
@@ -3568,6 +3616,32 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime,
}
/*
+ * Account system cpu time to a process and desired cpustat field
+ * @p: the process that the cpu time gets accounted to
+ * @cputime: the cpu time spent in kernel space since the last update
+ * @cputime_scaled: cputime scaled by cpu frequency
+ * @target_cputime64: pointer to cpustat field that has to be updated
+ */
+static inline
+void __account_system_time(struct task_struct *p, cputime_t cputime,
+ cputime_t cputime_scaled, cputime64_t *target_cputime64)
+{
+ cputime64_t tmp = cputime_to_cputime64(cputime);
+
+ /* Add system time to process. */
+ p->stime = cputime_add(p->stime, cputime);
+ p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
+ account_group_system_time(p, cputime);
+
+ /* Add system time to cpustat. */
+ *target_cputime64 = cputime64_add(*target_cputime64, tmp);
+ cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
+
+ /* Account for system time used */
+ acct_update_integrals(p);
+}
+
+/*
* Account system cpu time to a process.
* @p: the process that the cpu time gets accounted to
* @hardirq_offset: the offset to subtract from hardirq_count()
@@ -3578,36 +3652,26 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
cputime_t cputime, cputime_t cputime_scaled)
{
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
- cputime64_t tmp;
+ cputime64_t *target_cputime64;
if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
account_guest_time(p, cputime, cputime_scaled);
return;
}
- /* Add system time to process. */
- p->stime = cputime_add(p->stime, cputime);
- p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
- account_group_system_time(p, cputime);
-
- /* Add system time to cpustat. */
- tmp = cputime_to_cputime64(cputime);
if (hardirq_count() - hardirq_offset)
- cpustat->irq = cputime64_add(cpustat->irq, tmp);
+ target_cputime64 = &cpustat->irq;
else if (in_serving_softirq())
- cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
+ target_cputime64 = &cpustat->softirq;
else
- cpustat->system = cputime64_add(cpustat->system, tmp);
-
- cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
+ target_cputime64 = &cpustat->system;
- /* Account for system time used */
- acct_update_integrals(p);
+ __account_system_time(p, cputime, cputime_scaled, target_cputime64);
}
/*
* Account for involuntary wait time.
- * @steal: the cpu time spent in involuntary wait
+ * @cputime: the cpu time spent in involuntary wait
*/
void account_steal_time(cputime_t cputime)
{
@@ -3635,6 +3699,73 @@ void account_idle_time(cputime_t cputime)
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+/*
+ * Account a tick to a process and cpustat
+ * @p: the process that the cpu time gets accounted to
+ * @user_tick: is the tick from userspace
+ * @rq: the pointer to rq
+ *
+ * Tick demultiplexing follows the order
+ * - pending hardirq update
+ * - pending softirq update
+ * - user_time
+ * - idle_time
+ * - system time
+ * - check for guest_time
+ * - else account as system_time
+ *
+ * Check for hardirq is done both for system and user time as there is
+ * no timer going off while we are on hardirq and hence we may never get an
+ * opportunity to update it solely in system time.
+ * p->stime and friends are only updated on system time and not on irq
+ * softirq as those do not count in task exec_runtime any more.
+ */
+static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
+ struct rq *rq)
+{
+ cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
+ cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy);
+ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
+
+ if (irqtime_account_hi_update()) {
+ cpustat->irq = cputime64_add(cpustat->irq, tmp);
+ } else if (irqtime_account_si_update()) {
+ cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
+ } else if (this_cpu_ksoftirqd() == p) {
+ /*
+ * ksoftirqd time do not get accounted in cpu_softirq_time.
+ * So, we have to handle it separately here.
+ * Also, p->stime needs to be updated for ksoftirqd.
+ */
+ __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
+ &cpustat->softirq);
+ } else if (user_tick) {
+ account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
+ } else if (p == rq->idle) {
+ account_idle_time(cputime_one_jiffy);
+ } else if (p->flags & PF_VCPU) { /* System time or guest time */
+ account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
+ } else {
+ __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
+ &cpustat->system);
+ }
+}
+
+static void irqtime_account_idle_ticks(int ticks)
+{
+ int i;
+ struct rq *rq = this_rq();
+
+ for (i = 0; i < ticks; i++)
+ irqtime_account_process_tick(current, 0, rq);
+}
+#else /* CONFIG_IRQ_TIME_ACCOUNTING */
+static void irqtime_account_idle_ticks(int ticks) {}
+static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
+ struct rq *rq) {}
+#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
+
/*
* Account a single tick of cpu time.
* @p: the process that the cpu time gets accounted to
@@ -3645,6 +3776,11 @@ void account_process_tick(struct task_struct *p, int user_tick)
cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
struct rq *rq = this_rq();
+ if (sched_clock_irqtime) {
+ irqtime_account_process_tick(p, user_tick, rq);
+ return;
+ }
+
if (user_tick)
account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
@@ -3670,6 +3806,12 @@ void account_steal_ticks(unsigned long ticks)
*/
void account_idle_ticks(unsigned long ticks)
{
+
+ if (sched_clock_irqtime) {
+ irqtime_account_idle_ticks(ticks);
+ return;
+ }
+
account_idle_time(jiffies_to_cputime(ticks));
}
@@ -3989,9 +4131,6 @@ need_resched_nonpreemptible:
rq->skip_clock_update = 0;
if (likely(prev != next)) {
- sched_info_switch(prev, next);
- perf_event_task_sched_out(prev, next);
-
rq->nr_switches++;
rq->curr = next;
++*switch_count;
@@ -4570,11 +4709,10 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
if (running)
p->sched_class->set_curr_task(rq);
- if (on_rq) {
+ if (on_rq)
enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
- check_class_changed(rq, p, prev_class, oldprio, running);
- }
+ check_class_changed(rq, p, prev_class, oldprio);
task_rq_unlock(rq, &flags);
}
@@ -4902,11 +5040,10 @@ recheck:
if (running)
p->sched_class->set_curr_task(rq);
- if (on_rq) {
+ if (on_rq)
activate_task(rq, p, 0);
- check_class_changed(rq, p, prev_class, oldprio, running);
- }
+ check_class_changed(rq, p, prev_class, oldprio);
__task_rq_unlock(rq);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
@@ -5323,6 +5460,58 @@ void __sched yield(void)
}
EXPORT_SYMBOL(yield);
+/**
+ * yield_to - yield the current processor to another thread in
+ * your thread group, or accelerate that thread toward the
+ * processor it's on.
+ *
+ * It's the caller's job to ensure that the target task struct
+ * can't go away on us before we can do any checks.
+ *
+ * Returns true if we indeed boosted the target task.
+ */
+bool __sched yield_to(struct task_struct *p, bool preempt)
+{
+ struct task_struct *curr = current;
+ struct rq *rq, *p_rq;
+ unsigned long flags;
+ bool yielded = 0;
+
+ local_irq_save(flags);
+ rq = this_rq();
+
+again:
+ p_rq = task_rq(p);
+ double_rq_lock(rq, p_rq);
+ while (task_rq(p) != p_rq) {
+ double_rq_unlock(rq, p_rq);
+ goto again;
+ }
+
+ if (!curr->sched_class->yield_to_task)
+ goto out;
+
+ if (curr->sched_class != p->sched_class)
+ goto out;
+
+ if (task_running(p_rq, p) || p->state)
+ goto out;
+
+ yielded = curr->sched_class->yield_to_task(rq, p, preempt);
+ if (yielded)
+ schedstat_inc(rq, yld_count);
+
+out:
+ double_rq_unlock(rq, p_rq);
+ local_irq_restore(flags);
+
+ if (yielded)
+ schedule();
+
+ return yielded;
+}
+EXPORT_SYMBOL_GPL(yield_to);
+
/*
* This task is about to go to sleep on IO. Increment rq->nr_iowait so
* that process accounting knows that this is a task in IO wait state.
@@ -5571,7 +5760,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
* The idle tasks have their own, simple scheduling class:
*/
idle->sched_class = &idle_sched_class;
- ftrace_graph_init_task(idle);
+ ftrace_graph_init_idle_task(idle, cpu);
}
/*
@@ -7796,6 +7985,10 @@ static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
INIT_LIST_HEAD(&cfs_rq->tasks);
#ifdef CONFIG_FAIR_GROUP_SCHED
cfs_rq->rq = rq;
+ /* allow initial update_cfs_load() to truncate */
+#ifdef CONFIG_SMP
+ cfs_rq->load_stamp = 1;
+#endif
#endif
cfs_rq->min_vruntime = (u64)(-(1LL << 20));
}
@@ -8109,6 +8302,8 @@ EXPORT_SYMBOL(__might_sleep);
#ifdef CONFIG_MAGIC_SYSRQ
static void normalize_task(struct rq *rq, struct task_struct *p)
{
+ const struct sched_class *prev_class = p->sched_class;
+ int old_prio = p->prio;
int on_rq;
on_rq = p->se.on_rq;
@@ -8119,6 +8314,8 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
activate_task(rq, p, 0);
resched_task(rq->curr);
}
+
+ check_class_changed(rq, p, prev_class, old_prio);
}
void normalize_rt_tasks(void)
@@ -8510,7 +8707,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
/* Propagate contribution to hierarchy */
raw_spin_lock_irqsave(&rq->lock, flags);
for_each_sched_entity(se)
- update_cfs_shares(group_cfs_rq(se), 0);
+ update_cfs_shares(group_cfs_rq(se));
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
@@ -8884,7 +9081,8 @@ cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
}
static void
-cpu_cgroup_exit(struct cgroup_subsys *ss, struct task_struct *task)
+cpu_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
+ struct cgroup *old_cgrp, struct task_struct *task)
{
/*
* cgroup_exit() is called in the copy_process() failure path.
diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
index 9fb656283157..5946ac515602 100644
--- a/kernel/sched_autogroup.c
+++ b/kernel/sched_autogroup.c
@@ -12,7 +12,6 @@ static atomic_t autogroup_seq_nr;
static void __init autogroup_init(struct task_struct *init_task)
{
autogroup_default.tg = &root_task_group;
- root_task_group.autogroup = &autogroup_default;
kref_init(&autogroup_default.kref);
init_rwsem(&autogroup_default.lock);
init_task->signal->autogroup = &autogroup_default;
@@ -130,7 +129,7 @@ task_wants_autogroup(struct task_struct *p, struct task_group *tg)
static inline bool task_group_is_autogroup(struct task_group *tg)
{
- return tg != &root_task_group && tg->autogroup;
+ return !!tg->autogroup;
}
static inline struct task_group *
@@ -161,11 +160,15 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
p->signal->autogroup = autogroup_kref_get(ag);
+ if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled))
+ goto out;
+
t = p;
do {
sched_move_task(t);
} while_each_thread(p, t);
+out:
unlock_task_sighand(p, &flags);
autogroup_kref_put(prev);
}
@@ -247,10 +250,14 @@ void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m)
{
struct autogroup *ag = autogroup_task_get(p);
+ if (!task_group_is_autogroup(ag->tg))
+ goto out;
+
down_read(&ag->lock);
seq_printf(m, "/autogroup-%ld nice %d\n", ag->id, ag->nice);
up_read(&ag->lock);
+out:
autogroup_kref_put(ag);
}
#endif /* CONFIG_PROC_FS */
@@ -258,9 +265,7 @@ void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m)
#ifdef CONFIG_SCHED_DEBUG
static inline int autogroup_path(struct task_group *tg, char *buf, int buflen)
{
- int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled);
-
- if (!enabled || !tg->autogroup)
+ if (!task_group_is_autogroup(tg))
return 0;
return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id);
diff --git a/kernel/sched_autogroup.h b/kernel/sched_autogroup.h
index 7b859ffe5dad..05577055cfca 100644
--- a/kernel/sched_autogroup.h
+++ b/kernel/sched_autogroup.h
@@ -1,6 +1,11 @@
#ifdef CONFIG_SCHED_AUTOGROUP
struct autogroup {
+ /*
+ * reference doesn't mean how many thread attach to this
+ * autogroup now. It just stands for the number of task
+ * could use this autogroup.
+ */
struct kref kref;
struct task_group *tg;
struct rw_semaphore lock;
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index eb6cb8edd075..7bacd83a4158 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -179,7 +179,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
raw_spin_lock_irqsave(&rq->lock, flags);
if (cfs_rq->rb_leftmost)
- MIN_vruntime = (__pick_next_entity(cfs_rq))->vruntime;
+ MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
last = __pick_last_entity(cfs_rq);
if (last)
max_vruntime = last->vruntime;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 0c26e2df450e..3a88dee165c0 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -69,14 +69,6 @@ static unsigned int sched_nr_latency = 8;
unsigned int sysctl_sched_child_runs_first __read_mostly;
/*
- * sys_sched_yield() compat mode
- *
- * This option switches the agressive yield implementation of the
- * old scheduler back on.
- */
-unsigned int __read_mostly sysctl_sched_compat_yield;
-
-/*
* SCHED_OTHER wake-up granularity.
* (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
*
@@ -419,7 +411,7 @@ static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
}
-static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
+static struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
{
struct rb_node *left = cfs_rq->rb_leftmost;
@@ -429,6 +421,17 @@ static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
return rb_entry(left, struct sched_entity, run_node);
}
+static struct sched_entity *__pick_next_entity(struct sched_entity *se)
+{
+ struct rb_node *next = rb_next(&se->run_node);
+
+ if (!next)
+ return NULL;
+
+ return rb_entry(next, struct sched_entity, run_node);
+}
+
+#ifdef CONFIG_SCHED_DEBUG
static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
{
struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
@@ -443,7 +446,6 @@ static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
* Scheduling class statistics methods:
*/
-#ifdef CONFIG_SCHED_DEBUG
int sched_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
@@ -540,7 +542,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
}
static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update);
-static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta);
+static void update_cfs_shares(struct cfs_rq *cfs_rq);
/*
* Update the current task's runtime statistics. Skip current tasks that
@@ -733,6 +735,7 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
now - cfs_rq->load_last > 4 * period) {
cfs_rq->load_period = 0;
cfs_rq->load_avg = 0;
+ delta = period - 1;
}
cfs_rq->load_stamp = now;
@@ -763,16 +766,15 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
list_del_leaf_cfs_rq(cfs_rq);
}
-static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg,
- long weight_delta)
+static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
{
long load_weight, load, shares;
- load = cfs_rq->load.weight + weight_delta;
+ load = cfs_rq->load.weight;
load_weight = atomic_read(&tg->load_weight);
- load_weight -= cfs_rq->load_contribution;
load_weight += load;
+ load_weight -= cfs_rq->load_contribution;
shares = (tg->shares * load);
if (load_weight)
@@ -790,7 +792,7 @@ static void update_entity_shares_tick(struct cfs_rq *cfs_rq)
{
if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
update_cfs_load(cfs_rq, 0);
- update_cfs_shares(cfs_rq, 0);
+ update_cfs_shares(cfs_rq);
}
}
# else /* CONFIG_SMP */
@@ -798,8 +800,7 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
{
}
-static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg,
- long weight_delta)
+static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
{
return tg->shares;
}
@@ -824,7 +825,7 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
account_entity_enqueue(cfs_rq, se);
}
-static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta)
+static void update_cfs_shares(struct cfs_rq *cfs_rq)
{
struct task_group *tg;
struct sched_entity *se;
@@ -838,7 +839,7 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta)
if (likely(se->load.weight == tg->shares))
return;
#endif
- shares = calc_cfs_shares(cfs_rq, tg, weight_delta);
+ shares = calc_cfs_shares(cfs_rq, tg);
reweight_entity(cfs_rq_of(se), se, shares);
}
@@ -847,7 +848,7 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
{
}
-static inline void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta)
+static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
{
}
@@ -978,8 +979,8 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
update_curr(cfs_rq);
update_cfs_load(cfs_rq, 0);
- update_cfs_shares(cfs_rq, se->load.weight);
account_entity_enqueue(cfs_rq, se);
+ update_cfs_shares(cfs_rq);
if (flags & ENQUEUE_WAKEUP) {
place_entity(cfs_rq, se, 0);
@@ -996,19 +997,49 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
list_add_leaf_cfs_rq(cfs_rq);
}
-static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static void __clear_buddies_last(struct sched_entity *se)
+{
+ for_each_sched_entity(se) {
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ if (cfs_rq->last == se)
+ cfs_rq->last = NULL;
+ else
+ break;
+ }
+}
+
+static void __clear_buddies_next(struct sched_entity *se)
{
- if (!se || cfs_rq->last == se)
- cfs_rq->last = NULL;
+ for_each_sched_entity(se) {
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ if (cfs_rq->next == se)
+ cfs_rq->next = NULL;
+ else
+ break;
+ }
+}
- if (!se || cfs_rq->next == se)
- cfs_rq->next = NULL;
+static void __clear_buddies_skip(struct sched_entity *se)
+{
+ for_each_sched_entity(se) {
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ if (cfs_rq->skip == se)
+ cfs_rq->skip = NULL;
+ else
+ break;
+ }
}
static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- for_each_sched_entity(se)
- __clear_buddies(cfs_rq_of(se), se);
+ if (cfs_rq->last == se)
+ __clear_buddies_last(se);
+
+ if (cfs_rq->next == se)
+ __clear_buddies_next(se);
+
+ if (cfs_rq->skip == se)
+ __clear_buddies_skip(se);
}
static void
@@ -1041,7 +1072,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
update_cfs_load(cfs_rq, 0);
account_entity_dequeue(cfs_rq, se);
update_min_vruntime(cfs_rq);
- update_cfs_shares(cfs_rq, 0);
+ update_cfs_shares(cfs_rq);
/*
* Normalize the entity after updating the min_vruntime because the
@@ -1084,7 +1115,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
return;
if (cfs_rq->nr_running > 1) {
- struct sched_entity *se = __pick_next_entity(cfs_rq);
+ struct sched_entity *se = __pick_first_entity(cfs_rq);
s64 delta = curr->vruntime - se->vruntime;
if (delta < 0)
@@ -1128,13 +1159,27 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
static int
wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
+/*
+ * Pick the next process, keeping these things in mind, in this order:
+ * 1) keep things fair between processes/task groups
+ * 2) pick the "next" process, since someone really wants that to run
+ * 3) pick the "last" process, for cache locality
+ * 4) do not run the "skip" process, if something else is available
+ */
static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
{
- struct sched_entity *se = __pick_next_entity(cfs_rq);
+ struct sched_entity *se = __pick_first_entity(cfs_rq);
struct sched_entity *left = se;
- if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
- se = cfs_rq->next;
+ /*
+ * Avoid running the skip buddy, if running something else can
+ * be done without getting too unfair.
+ */
+ if (cfs_rq->skip == se) {
+ struct sched_entity *second = __pick_next_entity(se);
+ if (second && wakeup_preempt_entity(second, left) < 1)
+ se = second;
+ }
/*
* Prefer last buddy, try to return the CPU to a preempted task.
@@ -1142,6 +1187,12 @@ static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
se = cfs_rq->last;
+ /*
+ * Someone really wants this to run. If it's not unfair, run it.
+ */
+ if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
+ se = cfs_rq->next;
+
clear_buddies(cfs_rq, se);
return se;
@@ -1282,7 +1333,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
struct cfs_rq *cfs_rq = cfs_rq_of(se);
update_cfs_load(cfs_rq, 0);
- update_cfs_shares(cfs_rq, 0);
+ update_cfs_shares(cfs_rq);
}
hrtick_update(rq);
@@ -1312,58 +1363,12 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
struct cfs_rq *cfs_rq = cfs_rq_of(se);
update_cfs_load(cfs_rq, 0);
- update_cfs_shares(cfs_rq, 0);
+ update_cfs_shares(cfs_rq);
}
hrtick_update(rq);
}
-/*
- * sched_yield() support is very simple - we dequeue and enqueue.
- *
- * If compat_yield is turned on then we requeue to the end of the tree.
- */
-static void yield_task_fair(struct rq *rq)
-{
- struct task_struct *curr = rq->curr;
- struct cfs_rq *cfs_rq = task_cfs_rq(curr);
- struct sched_entity *rightmost, *se = &curr->se;
-
- /*
- * Are we the only task in the tree?
- */
- if (unlikely(cfs_rq->nr_running == 1))
- return;
-
- clear_buddies(cfs_rq, se);
-
- if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
- update_rq_clock(rq);
- /*
- * Update run-time statistics of the 'current'.
- */
- update_curr(cfs_rq);
-
- return;
- }
- /*
- * Find the rightmost entry in the rbtree:
- */
- rightmost = __pick_last_entity(cfs_rq);
- /*
- * Already in the rightmost position?
- */
- if (unlikely(!rightmost || entity_before(rightmost, se)))
- return;
-
- /*
- * Minimally necessary key value to be last in the tree:
- * Upon rescheduling, sched_class::put_prev_task() will place
- * 'current' within the tree based on its new key value.
- */
- se->vruntime = rightmost->vruntime + 1;
-}
-
#ifdef CONFIG_SMP
static void task_waking_fair(struct rq *rq, struct task_struct *p)
@@ -1834,6 +1839,14 @@ static void set_next_buddy(struct sched_entity *se)
}
}
+static void set_skip_buddy(struct sched_entity *se)
+{
+ if (likely(task_of(se)->policy != SCHED_IDLE)) {
+ for_each_sched_entity(se)
+ cfs_rq_of(se)->skip = se;
+ }
+}
+
/*
* Preempt the current task with a newly woken task if needed:
*/
@@ -1932,6 +1945,55 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
}
}
+/*
+ * sched_yield() is very simple
+ *
+ * The magic of dealing with the ->skip buddy is in pick_next_entity.
+ */
+static void yield_task_fair(struct rq *rq)
+{
+ struct task_struct *curr = rq->curr;
+ struct cfs_rq *cfs_rq = task_cfs_rq(curr);
+ struct sched_entity *se = &curr->se;
+
+ /*
+ * Are we the only task in the tree?
+ */
+ if (unlikely(rq->nr_running == 1))
+ return;
+
+ clear_buddies(cfs_rq, se);
+
+ if (curr->policy != SCHED_BATCH) {
+ update_rq_clock(rq);
+ /*
+ * Update run-time statistics of the 'current'.
+ */
+ update_curr(cfs_rq);
+ }
+
+ set_skip_buddy(se);
+}
+
+static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
+{
+ struct sched_entity *se = &p->se;
+
+ if (!se->on_rq)
+ return false;
+
+ /* Tell the scheduler that we'd really like pse to run next. */
+ set_next_buddy(se);
+
+ /* Make p's CPU reschedule; pick_next_entity takes care of fairness. */
+ if (preempt)
+ resched_task(rq->curr);
+
+ yield_task_fair(rq);
+
+ return true;
+}
+
#ifdef CONFIG_SMP
/**************************************************
* Fair scheduling class load-balancing methods:
@@ -2123,7 +2185,7 @@ static int update_shares_cpu(struct task_group *tg, int cpu)
* We need to update shares after updating tg->load_weight in
* order to adjust the weight of groups with long running tasks.
*/
- update_cfs_shares(cfs_rq, 0);
+ update_cfs_shares(cfs_rq);
raw_spin_unlock_irqrestore(&rq->lock, flags);
@@ -2610,7 +2672,6 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
* @this_cpu: Cpu for which load balance is currently performed.
* @idle: Idle status of this_cpu
* @load_idx: Load index of sched_domain of this_cpu for load calc.
- * @sd_idle: Idle status of the sched_domain containing group.
* @local_group: Does group contain this_cpu.
* @cpus: Set of cpus considered for load balancing.
* @balance: Should we balance.
@@ -2618,7 +2679,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
*/
static inline void update_sg_lb_stats(struct sched_domain *sd,
struct sched_group *group, int this_cpu,
- enum cpu_idle_type idle, int load_idx, int *sd_idle,
+ enum cpu_idle_type idle, int load_idx,
int local_group, const struct cpumask *cpus,
int *balance, struct sg_lb_stats *sgs)
{
@@ -2638,9 +2699,6 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
for_each_cpu_and(i, sched_group_cpus(group), cpus) {
struct rq *rq = cpu_rq(i);
- if (*sd_idle && rq->nr_running)
- *sd_idle = 0;
-
/* Bias balancing toward cpus of our domain */
if (local_group) {
if (idle_cpu(i) && !first_idle_cpu) {
@@ -2685,7 +2743,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
/*
* Consider the group unbalanced when the imbalance is larger
- * than the average weight of two tasks.
+ * than the average weight of a task.
*
* APZ: with cgroup the avg task weight can vary wildly and
* might not be a suitable number - should we keep a
@@ -2695,7 +2753,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
if (sgs->sum_nr_running)
avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
- if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task && max_nr_running > 1)
+ if ((max_cpu_load - min_cpu_load) >= avg_load_per_task && max_nr_running > 1)
sgs->group_imb = 1;
sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
@@ -2755,15 +2813,13 @@ static bool update_sd_pick_busiest(struct sched_domain *sd,
* @sd: sched_domain whose statistics are to be updated.
* @this_cpu: Cpu for which load balance is currently performed.
* @idle: Idle status of this_cpu
- * @sd_idle: Idle status of the sched_domain containing sg.
* @cpus: Set of cpus considered for load balancing.
* @balance: Should we balance.
* @sds: variable to hold the statistics for this sched_domain.
*/
static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
- enum cpu_idle_type idle, int *sd_idle,
- const struct cpumask *cpus, int *balance,
- struct sd_lb_stats *sds)
+ enum cpu_idle_type idle, const struct cpumask *cpus,
+ int *balance, struct sd_lb_stats *sds)
{
struct sched_domain *child = sd->child;
struct sched_group *sg = sd->groups;
@@ -2781,7 +2837,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
local_group = cpumask_test_cpu(this_cpu, sched_group_cpus(sg));
memset(&sgs, 0, sizeof(sgs));
- update_sg_lb_stats(sd, sg, this_cpu, idle, load_idx, sd_idle,
+ update_sg_lb_stats(sd, sg, this_cpu, idle, load_idx,
local_group, cpus, balance, &sgs);
if (local_group && !(*balance))
@@ -3033,7 +3089,6 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
* @imbalance: Variable which stores amount of weighted load which should
* be moved to restore balance/put a group to idle.
* @idle: The idle status of this_cpu.
- * @sd_idle: The idleness of sd
* @cpus: The set of CPUs under consideration for load-balancing.
* @balance: Pointer to a variable indicating if this_cpu
* is the appropriate cpu to perform load balancing at this_level.
@@ -3046,7 +3101,7 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
static struct sched_group *
find_busiest_group(struct sched_domain *sd, int this_cpu,
unsigned long *imbalance, enum cpu_idle_type idle,
- int *sd_idle, const struct cpumask *cpus, int *balance)
+ const struct cpumask *cpus, int *balance)
{
struct sd_lb_stats sds;
@@ -3056,22 +3111,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* Compute the various statistics relavent for load balancing at
* this level.
*/
- update_sd_lb_stats(sd, this_cpu, idle, sd_idle, cpus,
- balance, &sds);
-
- /* Cases where imbalance does not exist from POV of this_cpu */
- /* 1) this_cpu is not the appropriate cpu to perform load balancing
- * at this level.
- * 2) There is no busy sibling group to pull from.
- * 3) This group is the busiest group.
- * 4) This group is more busy than the avg busieness at this
- * sched_domain.
- * 5) The imbalance is within the specified limit.
- *
- * Note: when doing newidle balance, if the local group has excess
- * capacity (i.e. nr_running < group_capacity) and the busiest group
- * does not have any capacity, we force a load balance to pull tasks
- * to the local group. In this case, we skip past checks 3, 4 and 5.
+ update_sd_lb_stats(sd, this_cpu, idle, cpus, balance, &sds);
+
+ /*
+ * this_cpu is not the appropriate cpu to perform load balancing at
+ * this level.
*/
if (!(*balance))
goto ret;
@@ -3080,41 +3124,55 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
check_asym_packing(sd, &sds, this_cpu, imbalance))
return sds.busiest;
+ /* There is no busy sibling group to pull tasks from */
if (!sds.busiest || sds.busiest_nr_running == 0)
goto out_balanced;
- /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
+ /*
+ * If the busiest group is imbalanced the below checks don't
+ * work because they assumes all things are equal, which typically
+ * isn't true due to cpus_allowed constraints and the like.
+ */
+ if (sds.group_imb)
+ goto force_balance;
+
+ /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
if (idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
!sds.busiest_has_capacity)
goto force_balance;
+ /*
+ * If the local group is more busy than the selected busiest group
+ * don't try and pull any tasks.
+ */
if (sds.this_load >= sds.max_load)
goto out_balanced;
+ /*
+ * Don't pull any tasks if this group is already above the domain
+ * average load.
+ */
sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
-
if (sds.this_load >= sds.avg_load)
goto out_balanced;
- /*
- * In the CPU_NEWLY_IDLE, use imbalance_pct to be conservative.
- * And to check for busy balance use !idle_cpu instead of
- * CPU_NOT_IDLE. This is because HT siblings will use CPU_NOT_IDLE
- * even when they are idle.
- */
- if (idle == CPU_NEWLY_IDLE || !idle_cpu(this_cpu)) {
- if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
- goto out_balanced;
- } else {
+ if (idle == CPU_IDLE) {
/*
* This cpu is idle. If the busiest group load doesn't
* have more tasks than the number of available cpu's and
* there is no imbalance between this and busiest group
* wrt to idle cpu's, it is balanced.
*/
- if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
+ if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
sds.busiest_nr_running <= sds.busiest_group_weight)
goto out_balanced;
+ } else {
+ /*
+ * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
+ * imbalance_pct to be conservative.
+ */
+ if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
+ goto out_balanced;
}
force_balance:
@@ -3193,7 +3251,7 @@ find_busiest_queue(struct sched_domain *sd, struct sched_group *group,
/* Working cpumask for load_balance and load_balance_newidle. */
static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
-static int need_active_balance(struct sched_domain *sd, int sd_idle, int idle,
+static int need_active_balance(struct sched_domain *sd, int idle,
int busiest_cpu, int this_cpu)
{
if (idle == CPU_NEWLY_IDLE) {
@@ -3225,10 +3283,6 @@ static int need_active_balance(struct sched_domain *sd, int sd_idle, int idle,
* move_tasks() will succeed. ld_moved will be true and this
* active balance code will not be triggered.
*/
- if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
- !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
- return 0;
-
if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP)
return 0;
}
@@ -3246,7 +3300,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
struct sched_domain *sd, enum cpu_idle_type idle,
int *balance)
{
- int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
+ int ld_moved, all_pinned = 0, active_balance = 0;
struct sched_group *group;
unsigned long imbalance;
struct rq *busiest;
@@ -3255,20 +3309,10 @@ static int load_balance(int this_cpu, struct rq *this_rq,
cpumask_copy(cpus, cpu_active_mask);
- /*
- * When power savings policy is enabled for the parent domain, idle
- * sibling can pick up load irrespective of busy siblings. In this case,
- * let the state of idle sibling percolate up as CPU_IDLE, instead of
- * portraying it as CPU_NOT_IDLE.
- */
- if (idle != CPU_NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
- !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
- sd_idle = 1;
-
schedstat_inc(sd, lb_count[idle]);
redo:
- group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
+ group = find_busiest_group(sd, this_cpu, &imbalance, idle,
cpus, balance);
if (*balance == 0)
@@ -3330,8 +3374,7 @@ redo:
if (idle != CPU_NEWLY_IDLE)
sd->nr_balance_failed++;
- if (need_active_balance(sd, sd_idle, idle, cpu_of(busiest),
- this_cpu)) {
+ if (need_active_balance(sd, idle, cpu_of(busiest), this_cpu)) {
raw_spin_lock_irqsave(&busiest->lock, flags);
/* don't kick the active_load_balance_cpu_stop,
@@ -3386,10 +3429,6 @@ redo:
sd->balance_interval *= 2;
}
- if (!ld_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
- !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
- ld_moved = -1;
-
goto out;
out_balanced:
@@ -3403,11 +3442,7 @@ out_one_pinned:
(sd->balance_interval < sd->max_interval))
sd->balance_interval *= 2;
- if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
- !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
- ld_moved = -1;
- else
- ld_moved = 0;
+ ld_moved = 0;
out:
return ld_moved;
}
@@ -3831,8 +3866,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
if (load_balance(cpu, rq, sd, idle, &balance)) {
/*
* We've pulled tasks over so either we're no
- * longer idle, or one of our SMT siblings is
- * not idle.
+ * longer idle.
*/
idle = CPU_NOT_IDLE;
}
@@ -4079,33 +4113,62 @@ static void task_fork_fair(struct task_struct *p)
* Priority of the task has changed. Check to see if we preempt
* the current task.
*/
-static void prio_changed_fair(struct rq *rq, struct task_struct *p,
- int oldprio, int running)
+static void
+prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
{
+ if (!p->se.on_rq)
+ return;
+
/*
* Reschedule if we are currently running on this runqueue and
* our priority decreased, or if we are not currently running on
* this runqueue and our priority is higher than the current's
*/
- if (running) {
+ if (rq->curr == p) {
if (p->prio > oldprio)
resched_task(rq->curr);
} else
check_preempt_curr(rq, p, 0);
}
+static void switched_from_fair(struct rq *rq, struct task_struct *p)
+{
+ struct sched_entity *se = &p->se;
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+ /*
+ * Ensure the task's vruntime is normalized, so that when its
+ * switched back to the fair class the enqueue_entity(.flags=0) will
+ * do the right thing.
+ *
+ * If it was on_rq, then the dequeue_entity(.flags=0) will already
+ * have normalized the vruntime, if it was !on_rq, then only when
+ * the task is sleeping will it still have non-normalized vruntime.
+ */
+ if (!se->on_rq && p->state != TASK_RUNNING) {
+ /*
+ * Fix up our vruntime so that the current sleep doesn't
+ * cause 'unlimited' sleep bonus.
+ */
+ place_entity(cfs_rq, se, 0);
+ se->vruntime -= cfs_rq->min_vruntime;
+ }
+}
+
/*
* We switched to the sched_fair class.
*/
-static void switched_to_fair(struct rq *rq, struct task_struct *p,
- int running)
+static void switched_to_fair(struct rq *rq, struct task_struct *p)
{
+ if (!p->se.on_rq)
+ return;
+
/*
* We were most likely switched from sched_rt, so
* kick off the schedule if running, otherwise just see
* if we can still preempt the current task.
*/
- if (running)
+ if (rq->curr == p)
resched_task(rq->curr);
else
check_preempt_curr(rq, p, 0);
@@ -4171,6 +4234,7 @@ static const struct sched_class fair_sched_class = {
.enqueue_task = enqueue_task_fair,
.dequeue_task = dequeue_task_fair,
.yield_task = yield_task_fair,
+ .yield_to_task = yield_to_task_fair,
.check_preempt_curr = check_preempt_wakeup,
@@ -4191,6 +4255,7 @@ static const struct sched_class fair_sched_class = {
.task_fork = task_fork_fair,
.prio_changed = prio_changed_fair,
+ .switched_from = switched_from_fair,
.switched_to = switched_to_fair,
.get_rr_interval = get_rr_interval_fair,
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index 9fa0f402c87c..c82f26c1b7c3 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -52,31 +52,15 @@ static void set_curr_task_idle(struct rq *rq)
{
}
-static void switched_to_idle(struct rq *rq, struct task_struct *p,
- int running)
+static void switched_to_idle(struct rq *rq, struct task_struct *p)
{
- /* Can this actually happen?? */
- if (running)
- resched_task(rq->curr);
- else
- check_preempt_curr(rq, p, 0);
+ BUG();
}
-static void prio_changed_idle(struct rq *rq, struct task_struct *p,
- int oldprio, int running)
+static void
+prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
{
- /* This can happen for hot plug CPUS */
-
- /*
- * Reschedule if we are currently running on this runqueue and
- * our priority decreased, or if we are not currently running on
- * this runqueue and our priority is higher than the current's
- */
- if (running) {
- if (p->prio > oldprio)
- resched_task(rq->curr);
- } else
- check_preempt_curr(rq, p, 0);
+ BUG();
}
static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index ad6267714c84..4e108f8ecb6a 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1595,8 +1595,7 @@ static void rq_offline_rt(struct rq *rq)
* When switch from the rt queue, we bring ourselves to a position
* that we might want to pull RT tasks from other runqueues.
*/
-static void switched_from_rt(struct rq *rq, struct task_struct *p,
- int running)
+static void switched_from_rt(struct rq *rq, struct task_struct *p)
{
/*
* If there are other RT tasks then we will reschedule
@@ -1605,7 +1604,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p,
* we may need to handle the pulling of RT tasks
* now.
*/
- if (!rq->rt.rt_nr_running)
+ if (p->se.on_rq && !rq->rt.rt_nr_running)
pull_rt_task(rq);
}
@@ -1624,8 +1623,7 @@ static inline void init_sched_rt_class(void)
* with RT tasks. In this case we try to push them off to
* other runqueues.
*/
-static void switched_to_rt(struct rq *rq, struct task_struct *p,
- int running)
+static void switched_to_rt(struct rq *rq, struct task_struct *p)
{
int check_resched = 1;
@@ -1636,7 +1634,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p,
* If that current running task is also an RT task
* then see if we can move to another run queue.
*/
- if (!running) {
+ if (p->se.on_rq && rq->curr != p) {
#ifdef CONFIG_SMP
if (rq->rt.overloaded && push_rt_task(rq) &&
/* Don't resched if we changed runqueues */
@@ -1652,10 +1650,13 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p,
* Priority of the task has changed. This may cause
* us to initiate a push or pull.
*/
-static void prio_changed_rt(struct rq *rq, struct task_struct *p,
- int oldprio, int running)
+static void
+prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
{
- if (running) {
+ if (!p->se.on_rq)
+ return;
+
+ if (rq->curr == p) {
#ifdef CONFIG_SMP
/*
* If our priority decreases while running, we
diff --git a/kernel/sched_stoptask.c b/kernel/sched_stoptask.c
index 2bf6b47058c1..84ec9bcf82d9 100644
--- a/kernel/sched_stoptask.c
+++ b/kernel/sched_stoptask.c
@@ -59,14 +59,13 @@ static void set_curr_task_stop(struct rq *rq)
{
}
-static void switched_to_stop(struct rq *rq, struct task_struct *p,
- int running)
+static void switched_to_stop(struct rq *rq, struct task_struct *p)
{
BUG(); /* its impossible to change to this class */
}
-static void prio_changed_stop(struct rq *rq, struct task_struct *p,
- int oldprio, int running)
+static void
+prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio)
{
BUG(); /* how!?, what priority? */
}
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 68eb5efec388..56e5dec837f0 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -54,7 +54,7 @@ EXPORT_SYMBOL(irq_stat);
static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
-static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
+DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
char *softirq_to_name[NR_SOFTIRQS] = {
"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
@@ -311,9 +311,21 @@ void irq_enter(void)
}
#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
-# define invoke_softirq() __do_softirq()
+static inline void invoke_softirq(void)
+{
+ if (!force_irqthreads)
+ __do_softirq();
+ else
+ wakeup_softirqd();
+}
#else
-# define invoke_softirq() do_softirq()
+static inline void invoke_softirq(void)
+{
+ if (!force_irqthreads)
+ do_softirq();
+ else
+ wakeup_softirqd();
+}
#endif
/*
@@ -721,7 +733,6 @@ static int run_ksoftirqd(void * __bind_cpu)
{
set_current_state(TASK_INTERRUPTIBLE);
- current->flags |= PF_KSOFTIRQD;
while (!kthread_should_stop()) {
preempt_disable();
if (!local_softirq_pending()) {
@@ -738,7 +749,10 @@ static int run_ksoftirqd(void * __bind_cpu)
don't process */
if (cpu_is_offline((long)__bind_cpu))
goto wait_to_die;
- do_softirq();
+ local_irq_disable();
+ if (local_softirq_pending())
+ __do_softirq();
+ local_irq_enable();
preempt_enable_no_resched();
cond_resched();
preempt_disable();
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 0f1bd83db985..79268eef9afb 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -361,20 +361,13 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = sched_rt_handler,
},
- {
- .procname = "sched_compat_yield",
- .data = &sysctl_sched_compat_yield,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
#ifdef CONFIG_SCHED_AUTOGROUP
{
.procname = "sched_autogroup_enabled",
.data = &sysctl_sched_autogroup_enabled,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &one,
},
@@ -948,7 +941,7 @@ static struct ctl_table kern_table[] = {
.data = &sysctl_perf_event_sample_rate,
.maxlen = sizeof(sysctl_perf_event_sample_rate),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = perf_proc_update_handler,
},
#endif
#ifdef CONFIG_KMEMCHECK
@@ -1685,13 +1678,8 @@ static int test_perm(int mode, int op)
int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
{
- int error;
int mode;
- error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
- if (error)
- return error;
-
if (root->permissions)
mode = root->permissions(root, current->nsproxy, table);
else
diff --git a/kernel/time.c b/kernel/time.c
index 32174359576f..6430a7528fe4 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -150,7 +150,7 @@ static inline void warp_clock(void)
* various programs will get confused when the clock gets warped.
*/
-int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
+int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
{
static int firsttime = 1;
int error = 0;
@@ -644,6 +644,7 @@ u64 nsec_to_clock_t(u64 x)
#endif
}
+
/**
* nsecs_to_jiffies - Convert nsecs in u64 to jiffies
*
@@ -659,6 +660,24 @@ u64 nsec_to_clock_t(u64 x)
*/
unsigned long nsecs_to_jiffies(u64 n)
{
+ return (unsigned long)nsecs_to_jiffies64(n);
+}
+
+/**
+ * nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64
+ *
+ * @n: nsecs in u64
+ *
+ * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
+ * And this doesn't return MAX_JIFFY_OFFSET since this function is designed
+ * for scheduler, not for use in device drivers to calculate timeout value.
+ *
+ * note:
+ * NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
+ * ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
+ */
+u64 nsecs_to_jiffies64(u64 n)
+{
#if (NSEC_PER_SEC % HZ) == 0
/* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */
return div_u64(n, NSEC_PER_SEC / HZ);
@@ -674,23 +693,6 @@ unsigned long nsecs_to_jiffies(u64 n)
#endif
}
-#if (BITS_PER_LONG < 64)
-u64 get_jiffies_64(void)
-{
- unsigned long seq;
- u64 ret;
-
- do {
- seq = read_seqbegin(&xtime_lock);
- ret = jiffies_64;
- } while (read_seqretry(&xtime_lock, seq));
- return ret;
-}
-EXPORT_SYMBOL(get_jiffies_64);
-#endif
-
-EXPORT_SYMBOL(jiffies);
-
/*
* Add two timespec values and do a safety check for overflow.
* It's assumed that both values are valid (>= 0)
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
index ee266620b06c..b0425991e9ac 100644
--- a/kernel/time/Makefile
+++ b/kernel/time/Makefile
@@ -1,4 +1,5 @@
-obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o timecompare.o timeconv.o
+obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o timecompare.o
+obj-y += timeconv.o posix-clock.o
obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o
obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index d7395fdfb9f3..0d74b9ba90c8 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -18,7 +18,6 @@
#include <linux/notifier.h>
#include <linux/smp.h>
#include <linux/sysdev.h>
-#include <linux/tick.h>
#include "tick-internal.h"
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 5404a8456909..b2fa506667c0 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -22,8 +22,11 @@
************************************************************************/
#include <linux/clocksource.h>
#include <linux/jiffies.h>
+#include <linux/module.h>
#include <linux/init.h>
+#include "tick-internal.h"
+
/* The Jiffies based clocksource is the lowest common
* denominator clock source which should function on
* all systems. It has the same coarse resolution as
@@ -64,6 +67,23 @@ struct clocksource clocksource_jiffies = {
.shift = JIFFIES_SHIFT,
};
+#if (BITS_PER_LONG < 64)
+u64 get_jiffies_64(void)
+{
+ unsigned long seq;
+ u64 ret;
+
+ do {
+ seq = read_seqbegin(&xtime_lock);
+ ret = jiffies_64;
+ } while (read_seqretry(&xtime_lock, seq));
+ return ret;
+}
+EXPORT_SYMBOL(get_jiffies_64);
+#endif
+
+EXPORT_SYMBOL(jiffies);
+
static int __init init_jiffies_clocksource(void)
{
return clocksource_register(&clocksource_jiffies);
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 5c00242fa921..5f1bb8e2008f 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -16,6 +16,8 @@
#include <linux/mm.h>
#include <linux/module.h>
+#include "tick-internal.h"
+
/*
* NTP timekeeping variables:
*/
@@ -646,6 +648,17 @@ int do_adjtimex(struct timex *txc)
hrtimer_cancel(&leap_timer);
}
+ if (txc->modes & ADJ_SETOFFSET) {
+ struct timespec delta;
+ delta.tv_sec = txc->time.tv_sec;
+ delta.tv_nsec = txc->time.tv_usec;
+ if (!(txc->modes & ADJ_NANO))
+ delta.tv_nsec *= 1000;
+ result = timekeeping_inject_offset(&delta);
+ if (result)
+ return result;
+ }
+
getnstimeofday(&ts);
write_seqlock_irq(&xtime_lock);
diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
new file mode 100644
index 000000000000..04498cbf6002
--- /dev/null
+++ b/kernel/time/posix-clock.c
@@ -0,0 +1,441 @@
+/*
+ * posix-clock.c - support for dynamic clock devices
+ *
+ * Copyright (C) 2010 OMICRON electronics GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/device.h>
+#include <linux/file.h>
+#include <linux/mutex.h>
+#include <linux/posix-clock.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+
+static void delete_clock(struct kref *kref);
+
+/*
+ * Returns NULL if the posix_clock instance attached to 'fp' is old and stale.
+ */
+static struct posix_clock *get_posix_clock(struct file *fp)
+{
+ struct posix_clock *clk = fp->private_data;
+
+ mutex_lock(&clk->mutex);
+
+ if (!clk->zombie)
+ return clk;
+
+ mutex_unlock(&clk->mutex);
+
+ return NULL;
+}
+
+static void put_posix_clock(struct posix_clock *clk)
+{
+ mutex_unlock(&clk->mutex);
+}
+
+static ssize_t posix_clock_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct posix_clock *clk = get_posix_clock(fp);
+ int err = -EINVAL;
+
+ if (!clk)
+ return -ENODEV;
+
+ if (clk->ops.read)
+ err = clk->ops.read(clk, fp->f_flags, buf, count);
+
+ put_posix_clock(clk);
+
+ return err;
+}
+
+static unsigned int posix_clock_poll(struct file *fp, poll_table *wait)
+{
+ struct posix_clock *clk = get_posix_clock(fp);
+ int result = 0;
+
+ if (!clk)
+ return -ENODEV;
+
+ if (clk->ops.poll)
+ result = clk->ops.poll(clk, fp, wait);
+
+ put_posix_clock(clk);
+
+ return result;
+}
+
+static int posix_clock_fasync(int fd, struct file *fp, int on)
+{
+ struct posix_clock *clk = get_posix_clock(fp);
+ int err = 0;
+
+ if (!clk)
+ return -ENODEV;
+
+ if (clk->ops.fasync)
+ err = clk->ops.fasync(clk, fd, fp, on);
+
+ put_posix_clock(clk);
+
+ return err;
+}
+
+static int posix_clock_mmap(struct file *fp, struct vm_area_struct *vma)
+{
+ struct posix_clock *clk = get_posix_clock(fp);
+ int err = -ENODEV;
+
+ if (!clk)
+ return -ENODEV;
+
+ if (clk->ops.mmap)
+ err = clk->ops.mmap(clk, vma);
+
+ put_posix_clock(clk);
+
+ return err;
+}
+
+static long posix_clock_ioctl(struct file *fp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct posix_clock *clk = get_posix_clock(fp);
+ int err = -ENOTTY;
+
+ if (!clk)
+ return -ENODEV;
+
+ if (clk->ops.ioctl)
+ err = clk->ops.ioctl(clk, cmd, arg);
+
+ put_posix_clock(clk);
+
+ return err;
+}
+
+#ifdef CONFIG_COMPAT
+static long posix_clock_compat_ioctl(struct file *fp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct posix_clock *clk = get_posix_clock(fp);
+ int err = -ENOTTY;
+
+ if (!clk)
+ return -ENODEV;
+
+ if (clk->ops.ioctl)
+ err = clk->ops.ioctl(clk, cmd, arg);
+
+ put_posix_clock(clk);
+
+ return err;
+}
+#endif
+
+static int posix_clock_open(struct inode *inode, struct file *fp)
+{
+ int err;
+ struct posix_clock *clk =
+ container_of(inode->i_cdev, struct posix_clock, cdev);
+
+ mutex_lock(&clk->mutex);
+
+ if (clk->zombie) {
+ err = -ENODEV;
+ goto out;
+ }
+ if (clk->ops.open)
+ err = clk->ops.open(clk, fp->f_mode);
+ else
+ err = 0;
+
+ if (!err) {
+ kref_get(&clk->kref);
+ fp->private_data = clk;
+ }
+out:
+ mutex_unlock(&clk->mutex);
+ return err;
+}
+
+static int posix_clock_release(struct inode *inode, struct file *fp)
+{
+ struct posix_clock *clk = fp->private_data;
+ int err = 0;
+
+ if (clk->ops.release)
+ err = clk->ops.release(clk);
+
+ kref_put(&clk->kref, delete_clock);
+
+ fp->private_data = NULL;
+
+ return err;
+}
+
+static const struct file_operations posix_clock_file_operations = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = posix_clock_read,
+ .poll = posix_clock_poll,
+ .unlocked_ioctl = posix_clock_ioctl,
+ .open = posix_clock_open,
+ .release = posix_clock_release,
+ .fasync = posix_clock_fasync,
+ .mmap = posix_clock_mmap,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = posix_clock_compat_ioctl,
+#endif
+};
+
+int posix_clock_register(struct posix_clock *clk, dev_t devid)
+{
+ int err;
+
+ kref_init(&clk->kref);
+ mutex_init(&clk->mutex);
+
+ cdev_init(&clk->cdev, &posix_clock_file_operations);
+ clk->cdev.owner = clk->ops.owner;
+ err = cdev_add(&clk->cdev, devid, 1);
+ if (err)
+ goto no_cdev;
+
+ return err;
+no_cdev:
+ mutex_destroy(&clk->mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(posix_clock_register);
+
+static void delete_clock(struct kref *kref)
+{
+ struct posix_clock *clk = container_of(kref, struct posix_clock, kref);
+ mutex_destroy(&clk->mutex);
+ if (clk->release)
+ clk->release(clk);
+}
+
+void posix_clock_unregister(struct posix_clock *clk)
+{
+ cdev_del(&clk->cdev);
+
+ mutex_lock(&clk->mutex);
+ clk->zombie = true;
+ mutex_unlock(&clk->mutex);
+
+ kref_put(&clk->kref, delete_clock);
+}
+EXPORT_SYMBOL_GPL(posix_clock_unregister);
+
+struct posix_clock_desc {
+ struct file *fp;
+ struct posix_clock *clk;
+};
+
+static int get_clock_desc(const clockid_t id, struct posix_clock_desc *cd)
+{
+ struct file *fp = fget(CLOCKID_TO_FD(id));
+ int err = -EINVAL;
+
+ if (!fp)
+ return err;
+
+ if (fp->f_op->open != posix_clock_open || !fp->private_data)
+ goto out;
+
+ cd->fp = fp;
+ cd->clk = get_posix_clock(fp);
+
+ err = cd->clk ? 0 : -ENODEV;
+out:
+ if (err)
+ fput(fp);
+ return err;
+}
+
+static void put_clock_desc(struct posix_clock_desc *cd)
+{
+ put_posix_clock(cd->clk);
+ fput(cd->fp);
+}
+
+static int pc_clock_adjtime(clockid_t id, struct timex *tx)
+{
+ struct posix_clock_desc cd;
+ int err;
+
+ err = get_clock_desc(id, &cd);
+ if (err)
+ return err;
+
+ if (cd.clk->ops.clock_adjtime)
+ err = cd.clk->ops.clock_adjtime(cd.clk, tx);
+ else
+ err = -EOPNOTSUPP;
+
+ put_clock_desc(&cd);
+
+ return err;
+}
+
+static int pc_clock_gettime(clockid_t id, struct timespec *ts)
+{
+ struct posix_clock_desc cd;
+ int err;
+
+ err = get_clock_desc(id, &cd);
+ if (err)
+ return err;
+
+ if (cd.clk->ops.clock_gettime)
+ err = cd.clk->ops.clock_gettime(cd.clk, ts);
+ else
+ err = -EOPNOTSUPP;
+
+ put_clock_desc(&cd);
+
+ return err;
+}
+
+static int pc_clock_getres(clockid_t id, struct timespec *ts)
+{
+ struct posix_clock_desc cd;
+ int err;
+
+ err = get_clock_desc(id, &cd);
+ if (err)
+ return err;
+
+ if (cd.clk->ops.clock_getres)
+ err = cd.clk->ops.clock_getres(cd.clk, ts);
+ else
+ err = -EOPNOTSUPP;
+
+ put_clock_desc(&cd);
+
+ return err;
+}
+
+static int pc_clock_settime(clockid_t id, const struct timespec *ts)
+{
+ struct posix_clock_desc cd;
+ int err;
+
+ err = get_clock_desc(id, &cd);
+ if (err)
+ return err;
+
+ if (cd.clk->ops.clock_settime)
+ err = cd.clk->ops.clock_settime(cd.clk, ts);
+ else
+ err = -EOPNOTSUPP;
+
+ put_clock_desc(&cd);
+
+ return err;
+}
+
+static int pc_timer_create(struct k_itimer *kit)
+{
+ clockid_t id = kit->it_clock;
+ struct posix_clock_desc cd;
+ int err;
+
+ err = get_clock_desc(id, &cd);
+ if (err)
+ return err;
+
+ if (cd.clk->ops.timer_create)
+ err = cd.clk->ops.timer_create(cd.clk, kit);
+ else
+ err = -EOPNOTSUPP;
+
+ put_clock_desc(&cd);
+
+ return err;
+}
+
+static int pc_timer_delete(struct k_itimer *kit)
+{
+ clockid_t id = kit->it_clock;
+ struct posix_clock_desc cd;
+ int err;
+
+ err = get_clock_desc(id, &cd);
+ if (err)
+ return err;
+
+ if (cd.clk->ops.timer_delete)
+ err = cd.clk->ops.timer_delete(cd.clk, kit);
+ else
+ err = -EOPNOTSUPP;
+
+ put_clock_desc(&cd);
+
+ return err;
+}
+
+static void pc_timer_gettime(struct k_itimer *kit, struct itimerspec *ts)
+{
+ clockid_t id = kit->it_clock;
+ struct posix_clock_desc cd;
+
+ if (get_clock_desc(id, &cd))
+ return;
+
+ if (cd.clk->ops.timer_gettime)
+ cd.clk->ops.timer_gettime(cd.clk, kit, ts);
+
+ put_clock_desc(&cd);
+}
+
+static int pc_timer_settime(struct k_itimer *kit, int flags,
+ struct itimerspec *ts, struct itimerspec *old)
+{
+ clockid_t id = kit->it_clock;
+ struct posix_clock_desc cd;
+ int err;
+
+ err = get_clock_desc(id, &cd);
+ if (err)
+ return err;
+
+ if (cd.clk->ops.timer_settime)
+ err = cd.clk->ops.timer_settime(cd.clk, kit, flags, ts, old);
+ else
+ err = -EOPNOTSUPP;
+
+ put_clock_desc(&cd);
+
+ return err;
+}
+
+struct k_clock clock_posix_dynamic = {
+ .clock_getres = pc_clock_getres,
+ .clock_set = pc_clock_settime,
+ .clock_get = pc_clock_gettime,
+ .clock_adj = pc_clock_adjtime,
+ .timer_create = pc_timer_create,
+ .timer_set = pc_timer_settime,
+ .timer_del = pc_timer_delete,
+ .timer_get = pc_timer_gettime,
+};
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 48b2761b5668..da800ffa810c 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -18,7 +18,6 @@
#include <linux/percpu.h>
#include <linux/profile.h>
#include <linux/sched.h>
-#include <linux/tick.h>
#include "tick-internal.h"
@@ -600,4 +599,14 @@ int tick_broadcast_oneshot_active(void)
return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
}
+/*
+ * Check whether the broadcast device supports oneshot.
+ */
+bool tick_broadcast_oneshot_available(void)
+{
+ struct clock_event_device *bc = tick_broadcast_device.evtdev;
+
+ return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
+}
+
#endif
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 051bc80a0c43..119528de8235 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -18,7 +18,6 @@
#include <linux/percpu.h>
#include <linux/profile.h>
#include <linux/sched.h>
-#include <linux/tick.h>
#include <asm/irq_regs.h>
@@ -51,7 +50,11 @@ int tick_is_oneshot_available(void)
{
struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
- return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT);
+ if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT))
+ return 0;
+ if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
+ return 1;
+ return tick_broadcast_oneshot_available();
}
/*
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 290eefbc1f60..1009b06d6f89 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -1,6 +1,10 @@
/*
* tick internal variable and functions used by low/high res code
*/
+#include <linux/hrtimer.h>
+#include <linux/tick.h>
+
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BUILD
#define TICK_DO_TIMER_NONE -1
#define TICK_DO_TIMER_BOOT -2
@@ -36,6 +40,7 @@ extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
extern int tick_broadcast_oneshot_active(void);
extern void tick_check_oneshot_broadcast(int cpu);
+bool tick_broadcast_oneshot_available(void);
# else /* BROADCAST */
static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
{
@@ -46,6 +51,7 @@ static inline void tick_broadcast_switch_to_oneshot(void) { }
static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
static inline int tick_broadcast_oneshot_active(void) { return 0; }
static inline void tick_check_oneshot_broadcast(int cpu) { }
+static inline bool tick_broadcast_oneshot_available(void) { return true; }
# endif /* !BROADCAST */
#else /* !ONESHOT */
@@ -76,6 +82,7 @@ static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
return 0;
}
static inline int tick_broadcast_oneshot_active(void) { return 0; }
+static inline bool tick_broadcast_oneshot_available(void) { return false; }
#endif /* !TICK_ONESHOT */
/*
@@ -132,3 +139,8 @@ static inline int tick_device_is_functional(struct clock_event_device *dev)
{
return !(dev->features & CLOCK_EVT_FEAT_DUMMY);
}
+
+#endif
+
+extern void do_timer(unsigned long ticks);
+extern seqlock_t xtime_lock;
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index 5cbc101f908b..2d04411a5f05 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -18,7 +18,6 @@
#include <linux/percpu.h>
#include <linux/profile.h>
#include <linux/sched.h>
-#include <linux/tick.h>
#include "tick-internal.h"
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index c55ea2433471..d5097c44b407 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -19,7 +19,6 @@
#include <linux/percpu.h>
#include <linux/profile.h>
#include <linux/sched.h>
-#include <linux/tick.h>
#include <linux/module.h>
#include <asm/irq_regs.h>
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index d27c7562902c..6262c1d18397 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -353,7 +353,7 @@ EXPORT_SYMBOL(do_gettimeofday);
*
* Sets the time of day to the new time and update NTP and notify hrtimers
*/
-int do_settimeofday(struct timespec *tv)
+int do_settimeofday(const struct timespec *tv)
{
struct timespec ts_delta;
unsigned long flags;
@@ -387,6 +387,42 @@ int do_settimeofday(struct timespec *tv)
EXPORT_SYMBOL(do_settimeofday);
+
+/**
+ * timekeeping_inject_offset - Adds or subtracts from the current time.
+ * @tv: pointer to the timespec variable containing the offset
+ *
+ * Adds or subtracts an offset value from the current time.
+ */
+int timekeeping_inject_offset(struct timespec *ts)
+{
+ unsigned long flags;
+
+ if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
+ return -EINVAL;
+
+ write_seqlock_irqsave(&xtime_lock, flags);
+
+ timekeeping_forward_now();
+
+ xtime = timespec_add(xtime, *ts);
+ wall_to_monotonic = timespec_sub(wall_to_monotonic, *ts);
+
+ timekeeper.ntp_error = 0;
+ ntp_clear();
+
+ update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
+ timekeeper.mult);
+
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+
+ /* signal hrtimers about time change */
+ clock_was_set();
+
+ return 0;
+}
+EXPORT_SYMBOL(timekeeping_inject_offset);
+
/**
* change_clocksource - Swaps clocksources if a new one is available
*
@@ -779,7 +815,7 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
*
* Called from the timer interrupt, must hold a write on xtime_lock.
*/
-void update_wall_time(void)
+static void update_wall_time(void)
{
struct clocksource *clock;
cycle_t offset;
@@ -910,11 +946,6 @@ struct timespec __current_kernel_time(void)
return xtime;
}
-struct timespec __get_wall_to_monotonic(void)
-{
- return wall_to_monotonic;
-}
-
struct timespec current_kernel_time(void)
{
struct timespec now;
@@ -946,3 +977,44 @@ struct timespec get_monotonic_coarse(void)
now.tv_nsec + mono.tv_nsec);
return now;
}
+
+/*
+ * The 64-bit jiffies value is not atomic - you MUST NOT read it
+ * without sampling the sequence number in xtime_lock.
+ * jiffies is defined in the linker script...
+ */
+void do_timer(unsigned long ticks)
+{
+ jiffies_64 += ticks;
+ update_wall_time();
+ calc_global_load(ticks);
+}
+
+/**
+ * get_xtime_and_monotonic_offset() - get xtime and wall_to_monotonic
+ * @xtim: pointer to timespec to be set with xtime
+ * @wtom: pointer to timespec to be set with wall_to_monotonic
+ */
+void get_xtime_and_monotonic_offset(struct timespec *xtim, struct timespec *wtom)
+{
+ unsigned long seq;
+
+ do {
+ seq = read_seqbegin(&xtime_lock);
+ *xtim = xtime;
+ *wtom = wall_to_monotonic;
+ } while (read_seqretry(&xtime_lock, seq));
+}
+
+/**
+ * xtime_update() - advances the timekeeping infrastructure
+ * @ticks: number of ticks, that have elapsed since the last call.
+ *
+ * Must be called with interrupts disabled.
+ */
+void xtime_update(unsigned long ticks)
+{
+ write_seqlock(&xtime_lock);
+ do_timer(ticks);
+ write_sequnlock(&xtime_lock);
+}
diff --git a/kernel/timer.c b/kernel/timer.c
index d6459923d245..4234b08c0bf1 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -964,6 +964,25 @@ EXPORT_SYMBOL(try_to_del_timer_sync);
* add_timer_on(). Upon exit the timer is not queued and the handler is
* not running on any CPU.
*
+ * Note: You must not hold locks that are held in interrupt context
+ * while calling this function. Even if the lock has nothing to do
+ * with the timer in question. Here's why:
+ *
+ * CPU0 CPU1
+ * ---- ----
+ * <SOFTIRQ>
+ * call_timer_fn();
+ * base->running_timer = mytimer;
+ * spin_lock_irq(somelock);
+ * <IRQ>
+ * spin_lock(somelock);
+ * del_timer_sync(mytimer);
+ * while (base->running_timer == mytimer);
+ *
+ * Now del_timer_sync() will never return and never release somelock.
+ * The interrupt on the other CPU is waiting to grab somelock but
+ * it has interrupted the softirq that CPU0 is waiting to finish.
+ *
* The function returns whether it has deactivated a pending timer or not.
*/
int del_timer_sync(struct timer_list *timer)
@@ -971,6 +990,10 @@ int del_timer_sync(struct timer_list *timer)
#ifdef CONFIG_LOCKDEP
unsigned long flags;
+ /*
+ * If lockdep gives a backtrace here, please reference
+ * the synchronization rules above.
+ */
local_irq_save(flags);
lock_map_acquire(&timer->lockdep_map);
lock_map_release(&timer->lockdep_map);
@@ -1295,19 +1318,6 @@ void run_local_timers(void)
raise_softirq(TIMER_SOFTIRQ);
}
-/*
- * The 64-bit jiffies value is not atomic - you MUST NOT read it
- * without sampling the sequence number in xtime_lock.
- * jiffies is defined in the linker script...
- */
-
-void do_timer(unsigned long ticks)
-{
- jiffies_64 += ticks;
- update_wall_time();
- calc_global_load(ticks);
-}
-
#ifdef __ARCH_WANT_SYS_ALARM
/*
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index f3dadae83883..888b611897d3 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3328,7 +3328,7 @@ static int start_graph_tracing(void)
/* The cpu_boot init_task->ret_stack will never be freed */
for_each_online_cpu(cpu) {
if (!idle_task(cpu)->ret_stack)
- ftrace_graph_init_task(idle_task(cpu));
+ ftrace_graph_init_idle_task(idle_task(cpu), cpu);
}
do {
@@ -3418,6 +3418,49 @@ void unregister_ftrace_graph(void)
mutex_unlock(&ftrace_lock);
}
+static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
+
+static void
+graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
+{
+ atomic_set(&t->tracing_graph_pause, 0);
+ atomic_set(&t->trace_overrun, 0);
+ t->ftrace_timestamp = 0;
+ /* make curr_ret_stack visable before we add the ret_stack */
+ smp_wmb();
+ t->ret_stack = ret_stack;
+}
+
+/*
+ * Allocate a return stack for the idle task. May be the first
+ * time through, or it may be done by CPU hotplug online.
+ */
+void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
+{
+ t->curr_ret_stack = -1;
+ /*
+ * The idle task has no parent, it either has its own
+ * stack or no stack at all.
+ */
+ if (t->ret_stack)
+ WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
+
+ if (ftrace_graph_active) {
+ struct ftrace_ret_stack *ret_stack;
+
+ ret_stack = per_cpu(idle_ret_stack, cpu);
+ if (!ret_stack) {
+ ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
+ * sizeof(struct ftrace_ret_stack),
+ GFP_KERNEL);
+ if (!ret_stack)
+ return;
+ per_cpu(idle_ret_stack, cpu) = ret_stack;
+ }
+ graph_init_task(t, ret_stack);
+ }
+}
+
/* Allocate a return stack for newly created task */
void ftrace_graph_init_task(struct task_struct *t)
{
@@ -3433,12 +3476,7 @@ void ftrace_graph_init_task(struct task_struct *t)
GFP_KERNEL);
if (!ret_stack)
return;
- atomic_set(&t->tracing_graph_pause, 0);
- atomic_set(&t->trace_overrun, 0);
- t->ftrace_timestamp = 0;
- /* make curr_ret_stack visable before we add the ret_stack */
- smp_wmb();
- t->ret_stack = ret_stack;
+ graph_init_task(t, ret_stack);
}
}
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index bd1c35a4fbcc..6ee56b4ad136 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -669,7 +669,7 @@ static struct list_head *rb_list_head(struct list_head *list)
* the reader page). But if the next page is a header page,
* its flags will be non zero.
*/
-static int inline
+static inline int
rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
struct buffer_page *page, struct list_head *list)
{
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index dc53ecb80589..8dc8da6733f9 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2710,6 +2710,10 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf,
mutex_lock(&trace_types_lock);
if (tracer_enabled ^ val) {
+
+ /* Only need to warn if this is used to change the state */
+ WARN_ONCE(1, "tracing_enabled is deprecated. Use tracing_on");
+
if (val) {
tracer_enabled = 1;
if (current_trace->start)
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 2dec9bcde8b4..8435b43b1782 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -353,6 +353,43 @@ static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data)
kfree(data);
}
+/* Bitfield fetch function */
+struct bitfield_fetch_param {
+ struct fetch_param orig;
+ unsigned char hi_shift;
+ unsigned char low_shift;
+};
+
+#define DEFINE_FETCH_bitfield(type) \
+static __kprobes void FETCH_FUNC_NAME(bitfield, type)(struct pt_regs *regs,\
+ void *data, void *dest) \
+{ \
+ struct bitfield_fetch_param *bprm = data; \
+ type buf = 0; \
+ call_fetch(&bprm->orig, regs, &buf); \
+ if (buf) { \
+ buf <<= bprm->hi_shift; \
+ buf >>= bprm->low_shift; \
+ } \
+ *(type *)dest = buf; \
+}
+DEFINE_BASIC_FETCH_FUNCS(bitfield)
+#define fetch_bitfield_string NULL
+#define fetch_bitfield_string_size NULL
+
+static __kprobes void
+free_bitfield_fetch_param(struct bitfield_fetch_param *data)
+{
+ /*
+ * Don't check the bitfield itself, because this must be the
+ * last fetch function.
+ */
+ if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
+ free_deref_fetch_param(data->orig.data);
+ else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn))
+ free_symbol_cache(data->orig.data);
+ kfree(data);
+}
/* Default (unsigned long) fetch type */
#define __DEFAULT_FETCH_TYPE(t) u##t
#define _DEFAULT_FETCH_TYPE(t) __DEFAULT_FETCH_TYPE(t)
@@ -367,6 +404,7 @@ enum {
FETCH_MTD_memory,
FETCH_MTD_symbol,
FETCH_MTD_deref,
+ FETCH_MTD_bitfield,
FETCH_MTD_END,
};
@@ -387,6 +425,7 @@ ASSIGN_FETCH_FUNC(retval, ftype), \
ASSIGN_FETCH_FUNC(memory, ftype), \
ASSIGN_FETCH_FUNC(symbol, ftype), \
ASSIGN_FETCH_FUNC(deref, ftype), \
+ASSIGN_FETCH_FUNC(bitfield, ftype), \
} \
}
@@ -430,9 +469,33 @@ static const struct fetch_type *find_fetch_type(const char *type)
if (!type)
type = DEFAULT_FETCH_TYPE_STR;
+ /* Special case: bitfield */
+ if (*type == 'b') {
+ unsigned long bs;
+ type = strchr(type, '/');
+ if (!type)
+ goto fail;
+ type++;
+ if (strict_strtoul(type, 0, &bs))
+ goto fail;
+ switch (bs) {
+ case 8:
+ return find_fetch_type("u8");
+ case 16:
+ return find_fetch_type("u16");
+ case 32:
+ return find_fetch_type("u32");
+ case 64:
+ return find_fetch_type("u64");
+ default:
+ goto fail;
+ }
+ }
+
for (i = 0; i < ARRAY_SIZE(fetch_type_table); i++)
if (strcmp(type, fetch_type_table[i].name) == 0)
return &fetch_type_table[i];
+fail:
return NULL;
}
@@ -586,7 +649,9 @@ error:
static void free_probe_arg(struct probe_arg *arg)
{
- if (CHECK_FETCH_FUNCS(deref, arg->fetch.fn))
+ if (CHECK_FETCH_FUNCS(bitfield, arg->fetch.fn))
+ free_bitfield_fetch_param(arg->fetch.data);
+ else if (CHECK_FETCH_FUNCS(deref, arg->fetch.fn))
free_deref_fetch_param(arg->fetch.data);
else if (CHECK_FETCH_FUNCS(symbol, arg->fetch.fn))
free_symbol_cache(arg->fetch.data);
@@ -767,16 +832,15 @@ static int __parse_probe_arg(char *arg, const struct fetch_type *t,
}
break;
case '+': /* deref memory */
+ arg++; /* Skip '+', because strict_strtol() rejects it. */
case '-':
tmp = strchr(arg, '(');
if (!tmp)
break;
*tmp = '\0';
- ret = strict_strtol(arg + 1, 0, &offset);
+ ret = strict_strtol(arg, 0, &offset);
if (ret)
break;
- if (arg[0] == '-')
- offset = -offset;
arg = tmp + 1;
tmp = strrchr(arg, ')');
if (tmp) {
@@ -807,6 +871,41 @@ static int __parse_probe_arg(char *arg, const struct fetch_type *t,
return ret;
}
+#define BYTES_TO_BITS(nb) ((BITS_PER_LONG * (nb)) / sizeof(long))
+
+/* Bitfield type needs to be parsed into a fetch function */
+static int __parse_bitfield_probe_arg(const char *bf,
+ const struct fetch_type *t,
+ struct fetch_param *f)
+{
+ struct bitfield_fetch_param *bprm;
+ unsigned long bw, bo;
+ char *tail;
+
+ if (*bf != 'b')
+ return 0;
+
+ bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
+ if (!bprm)
+ return -ENOMEM;
+ bprm->orig = *f;
+ f->fn = t->fetch[FETCH_MTD_bitfield];
+ f->data = (void *)bprm;
+
+ bw = simple_strtoul(bf + 1, &tail, 0); /* Use simple one */
+ if (bw == 0 || *tail != '@')
+ return -EINVAL;
+
+ bf = tail + 1;
+ bo = simple_strtoul(bf, &tail, 0);
+ if (tail == bf || *tail != '/')
+ return -EINVAL;
+
+ bprm->hi_shift = BYTES_TO_BITS(t->size) - (bw + bo);
+ bprm->low_shift = bprm->hi_shift + bo;
+ return (BYTES_TO_BITS(t->size) < (bw + bo)) ? -EINVAL : 0;
+}
+
/* String length checking wrapper */
static int parse_probe_arg(char *arg, struct trace_probe *tp,
struct probe_arg *parg, int is_return)
@@ -836,6 +935,8 @@ static int parse_probe_arg(char *arg, struct trace_probe *tp,
parg->offset = tp->size;
tp->size += parg->type->size;
ret = __parse_probe_arg(arg, parg->type, &parg->fetch, is_return);
+ if (ret >= 0 && t != NULL)
+ ret = __parse_bitfield_probe_arg(t, parg->type, &parg->fetch);
if (ret >= 0) {
parg->fetch_size.fn = get_fetch_size_function(parg->type,
parg->fetch.fn);
@@ -1130,7 +1231,7 @@ static int command_trace_probe(const char *buf)
return ret;
}
-#define WRITE_BUFSIZE 128
+#define WRITE_BUFSIZE 4096
static ssize_t probes_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 8f758d070c43..7e62c0a18456 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -247,51 +247,3 @@ void tracing_sched_switch_assign_trace(struct trace_array *tr)
ctx_trace = tr;
}
-static void stop_sched_trace(struct trace_array *tr)
-{
- tracing_stop_sched_switch_record();
-}
-
-static int sched_switch_trace_init(struct trace_array *tr)
-{
- ctx_trace = tr;
- tracing_reset_online_cpus(tr);
- tracing_start_sched_switch_record();
- return 0;
-}
-
-static void sched_switch_trace_reset(struct trace_array *tr)
-{
- if (sched_ref)
- stop_sched_trace(tr);
-}
-
-static void sched_switch_trace_start(struct trace_array *tr)
-{
- sched_stopped = 0;
-}
-
-static void sched_switch_trace_stop(struct trace_array *tr)
-{
- sched_stopped = 1;
-}
-
-static struct tracer sched_switch_trace __read_mostly =
-{
- .name = "sched_switch",
- .init = sched_switch_trace_init,
- .reset = sched_switch_trace_reset,
- .start = sched_switch_trace_start,
- .stop = sched_switch_trace_stop,
- .wait_pipe = poll_wait_pipe,
-#ifdef CONFIG_FTRACE_SELFTEST
- .selftest = trace_selftest_startup_sched_switch,
-#endif
-};
-
-__init static int init_sched_switch_trace(void)
-{
- return register_tracer(&sched_switch_trace);
-}
-device_initcall(init_sched_switch_trace);
-
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 5c9fe08d2093..ee7b5a0bb9f8 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -60,6 +60,19 @@ extern struct syscall_metadata *__stop_syscalls_metadata[];
static struct syscall_metadata **syscalls_metadata;
+#ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME
+static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
+{
+ /*
+ * Only compare after the "sys" prefix. Archs that use
+ * syscall wrappers may have syscalls symbols aliases prefixed
+ * with "SyS" instead of "sys", leading to an unwanted
+ * mismatch.
+ */
+ return !strcmp(sym + 3, name + 3);
+}
+#endif
+
static __init struct syscall_metadata *
find_syscall_meta(unsigned long syscall)
{
@@ -72,14 +85,11 @@ find_syscall_meta(unsigned long syscall)
stop = __stop_syscalls_metadata;
kallsyms_lookup(syscall, NULL, NULL, NULL, str);
+ if (arch_syscall_match_sym_name(str, "sys_ni_syscall"))
+ return NULL;
+
for ( ; start < stop; start++) {
- /*
- * Only compare after the "sys" prefix. Archs that use
- * syscall wrappers may have syscalls symbols aliases prefixed
- * with "SyS" instead of "sys", leading to an unwanted
- * mismatch.
- */
- if ((*start)->name && !strcmp((*start)->name + 3, str + 3))
+ if ((*start)->name && arch_syscall_match_sym_name(str, (*start)->name))
return *start;
}
return NULL;
@@ -359,7 +369,7 @@ int reg_event_syscall_enter(struct ftrace_event_call *call)
int num;
num = ((struct syscall_metadata *)call->data)->syscall_nr;
- if (num < 0 || num >= NR_syscalls)
+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
return -ENOSYS;
mutex_lock(&syscall_trace_lock);
if (!sys_refcount_enter)
@@ -377,7 +387,7 @@ void unreg_event_syscall_enter(struct ftrace_event_call *call)
int num;
num = ((struct syscall_metadata *)call->data)->syscall_nr;
- if (num < 0 || num >= NR_syscalls)
+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
return;
mutex_lock(&syscall_trace_lock);
sys_refcount_enter--;
@@ -393,7 +403,7 @@ int reg_event_syscall_exit(struct ftrace_event_call *call)
int num;
num = ((struct syscall_metadata *)call->data)->syscall_nr;
- if (num < 0 || num >= NR_syscalls)
+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
return -ENOSYS;
mutex_lock(&syscall_trace_lock);
if (!sys_refcount_exit)
@@ -411,7 +421,7 @@ void unreg_event_syscall_exit(struct ftrace_event_call *call)
int num;
num = ((struct syscall_metadata *)call->data)->syscall_nr;
- if (num < 0 || num >= NR_syscalls)
+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
return;
mutex_lock(&syscall_trace_lock);
sys_refcount_exit--;
@@ -424,6 +434,14 @@ void unreg_event_syscall_exit(struct ftrace_event_call *call)
int init_syscall_trace(struct ftrace_event_call *call)
{
int id;
+ int num;
+
+ num = ((struct syscall_metadata *)call->data)->syscall_nr;
+ if (num < 0 || num >= NR_syscalls) {
+ pr_debug("syscall %s metadata not mapped, disabling ftrace event\n",
+ ((struct syscall_metadata *)call->data)->name);
+ return -ENOSYS;
+ }
if (set_syscall_print_fmt(call) < 0)
return -ENOMEM;
@@ -438,7 +456,7 @@ int init_syscall_trace(struct ftrace_event_call *call)
return id;
}
-unsigned long __init arch_syscall_addr(int nr)
+unsigned long __init __weak arch_syscall_addr(int nr)
{
return (unsigned long)sys_call_table[nr];
}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index ee6578b578ad..1b64d225f067 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -251,10 +251,12 @@ struct workqueue_struct *system_wq __read_mostly;
struct workqueue_struct *system_long_wq __read_mostly;
struct workqueue_struct *system_nrt_wq __read_mostly;
struct workqueue_struct *system_unbound_wq __read_mostly;
+struct workqueue_struct *system_freezable_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_wq);
EXPORT_SYMBOL_GPL(system_long_wq);
EXPORT_SYMBOL_GPL(system_nrt_wq);
EXPORT_SYMBOL_GPL(system_unbound_wq);
+EXPORT_SYMBOL_GPL(system_freezable_wq);
#define CREATE_TRACE_POINTS
#include <trace/events/workqueue.h>
@@ -3775,8 +3777,10 @@ static int __init init_workqueues(void)
system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);
system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
WQ_UNBOUND_MAX_ACTIVE);
+ system_freezable_wq = alloc_workqueue("events_freezable",
+ WQ_FREEZABLE, 0);
BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq ||
- !system_unbound_wq);
+ !system_unbound_wq || !system_freezable_wq);
return 0;
}
early_initcall(init_workqueues);
diff --git a/lib/Kconfig b/lib/Kconfig
index 0ee67e08ad3e..c8b7aba374eb 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -201,6 +201,10 @@ config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS
depends on EXPERIMENTAL && BROKEN
+config CPU_RMAP
+ bool
+ depends on SMP
+
#
# Netlink attribute parsing support is select'ed if needed
#
@@ -219,4 +223,28 @@ config LRU_CACHE
config AVERAGE
bool
+config LLIST
+ bool
+
+config SHM_SIGNAL
+ tristate "SHM Signal - Generic shared-memory signaling mechanism"
+ default n
+ help
+ Provides a shared-memory based signaling mechanism to indicate
+ memory-dirty notifications between two end-points.
+
+ If unsure, say N
+
+config IOQ
+ tristate "IO-Queue library - Generic shared-memory queue"
+ select SHM_SIGNAL
+ default n
+ help
+ IOQ is a generic shared-memory-queue mechanism that happens to be
+ friendly to virtualization boundaries. It can be used in a variety
+ of ways, though its intended purpose is to become a low-level
+ communication path for paravirtualized drivers.
+
+ If unsure, say N
+
endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2b97418c67e2..0f77ed13368d 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -102,11 +102,7 @@ config HEADERS_CHECK
config DEBUG_SECTION_MISMATCH
bool "Enable full Section mismatch analysis"
- depends on UNDEFINED || (BLACKFIN)
default y
- # This option is on purpose disabled for now.
- # It will be enabled when we are down to a reasonable number
- # of section mismatch warnings (< 10 for an allyesconfig build)
help
The section mismatch analysis checks if there are illegal
references from one section to another section.
diff --git a/lib/Makefile b/lib/Makefile
index cbb774f7d41d..fc9ccef59834 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -84,6 +84,8 @@ obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o
obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o
obj-$(CONFIG_SMP) += percpu_counter.o
obj-$(CONFIG_AUDIT_GENERIC) += audit.o
+obj-$(CONFIG_SHM_SIGNAL) += shm_signal.o
+obj-$(CONFIG_IOQ) += ioq.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o
obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
@@ -110,6 +112,10 @@ obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o
obj-$(CONFIG_AVERAGE) += average.o
+obj-$(CONFIG_LLIST) += llist.o
+
+obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
+
hostprogs-y := gen_crc32table
clean-files := crc32table.h
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 741fae905ae3..c2987144886b 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -271,8 +271,6 @@ int __bitmap_weight(const unsigned long *bitmap, int bits)
}
EXPORT_SYMBOL(__bitmap_weight);
-#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG))
-
void bitmap_set(unsigned long *map, int start, int nr)
{
unsigned long *p = map + BIT_WORD(start);
diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c
new file mode 100644
index 000000000000..987acfafeb83
--- /dev/null
+++ b/lib/cpu_rmap.c
@@ -0,0 +1,269 @@
+/*
+ * cpu_rmap.c: CPU affinity reverse-map support
+ * Copyright 2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/cpu_rmap.h>
+#ifdef CONFIG_GENERIC_HARDIRQS
+#include <linux/interrupt.h>
+#endif
+#include <linux/module.h>
+
+/*
+ * These functions maintain a mapping from CPUs to some ordered set of
+ * objects with CPU affinities. This can be seen as a reverse-map of
+ * CPU affinity. However, we do not assume that the object affinities
+ * cover all CPUs in the system. For those CPUs not directly covered
+ * by object affinities, we attempt to find a nearest object based on
+ * CPU topology.
+ */
+
+/**
+ * alloc_cpu_rmap - allocate CPU affinity reverse-map
+ * @size: Number of objects to be mapped
+ * @flags: Allocation flags e.g. %GFP_KERNEL
+ */
+struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags)
+{
+ struct cpu_rmap *rmap;
+ unsigned int cpu;
+ size_t obj_offset;
+
+ /* This is a silly number of objects, and we use u16 indices. */
+ if (size > 0xffff)
+ return NULL;
+
+ /* Offset of object pointer array from base structure */
+ obj_offset = ALIGN(offsetof(struct cpu_rmap, near[nr_cpu_ids]),
+ sizeof(void *));
+
+ rmap = kzalloc(obj_offset + size * sizeof(rmap->obj[0]), flags);
+ if (!rmap)
+ return NULL;
+
+ rmap->obj = (void **)((char *)rmap + obj_offset);
+
+ /* Initially assign CPUs to objects on a rota, since we have
+ * no idea where the objects are. Use infinite distance, so
+ * any object with known distance is preferable. Include the
+ * CPUs that are not present/online, since we definitely want
+ * any newly-hotplugged CPUs to have some object assigned.
+ */
+ for_each_possible_cpu(cpu) {
+ rmap->near[cpu].index = cpu % size;
+ rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
+ }
+
+ rmap->size = size;
+ return rmap;
+}
+EXPORT_SYMBOL(alloc_cpu_rmap);
+
+/* Reevaluate nearest object for given CPU, comparing with the given
+ * neighbours at the given distance.
+ */
+static bool cpu_rmap_copy_neigh(struct cpu_rmap *rmap, unsigned int cpu,
+ const struct cpumask *mask, u16 dist)
+{
+ int neigh;
+
+ for_each_cpu(neigh, mask) {
+ if (rmap->near[cpu].dist > dist &&
+ rmap->near[neigh].dist <= dist) {
+ rmap->near[cpu].index = rmap->near[neigh].index;
+ rmap->near[cpu].dist = dist;
+ return true;
+ }
+ }
+ return false;
+}
+
+#ifdef DEBUG
+static void debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix)
+{
+ unsigned index;
+ unsigned int cpu;
+
+ pr_info("cpu_rmap %p, %s:\n", rmap, prefix);
+
+ for_each_possible_cpu(cpu) {
+ index = rmap->near[cpu].index;
+ pr_info("cpu %d -> obj %u (distance %u)\n",
+ cpu, index, rmap->near[cpu].dist);
+ }
+}
+#else
+static inline void
+debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix)
+{
+}
+#endif
+
+/**
+ * cpu_rmap_add - add object to a rmap
+ * @rmap: CPU rmap allocated with alloc_cpu_rmap()
+ * @obj: Object to add to rmap
+ *
+ * Return index of object.
+ */
+int cpu_rmap_add(struct cpu_rmap *rmap, void *obj)
+{
+ u16 index;
+
+ BUG_ON(rmap->used >= rmap->size);
+ index = rmap->used++;
+ rmap->obj[index] = obj;
+ return index;
+}
+EXPORT_SYMBOL(cpu_rmap_add);
+
+/**
+ * cpu_rmap_update - update CPU rmap following a change of object affinity
+ * @rmap: CPU rmap to update
+ * @index: Index of object whose affinity changed
+ * @affinity: New CPU affinity of object
+ */
+int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
+ const struct cpumask *affinity)
+{
+ cpumask_var_t update_mask;
+ unsigned int cpu;
+
+ if (unlikely(!zalloc_cpumask_var(&update_mask, GFP_KERNEL)))
+ return -ENOMEM;
+
+ /* Invalidate distance for all CPUs for which this used to be
+ * the nearest object. Mark those CPUs for update.
+ */
+ for_each_online_cpu(cpu) {
+ if (rmap->near[cpu].index == index) {
+ rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
+ cpumask_set_cpu(cpu, update_mask);
+ }
+ }
+
+ debug_print_rmap(rmap, "after invalidating old distances");
+
+ /* Set distance to 0 for all CPUs in the new affinity mask.
+ * Mark all CPUs within their NUMA nodes for update.
+ */
+ for_each_cpu(cpu, affinity) {
+ rmap->near[cpu].index = index;
+ rmap->near[cpu].dist = 0;
+ cpumask_or(update_mask, update_mask,
+ cpumask_of_node(cpu_to_node(cpu)));
+ }
+
+ debug_print_rmap(rmap, "after updating neighbours");
+
+ /* Update distances based on topology */
+ for_each_cpu(cpu, update_mask) {
+ if (cpu_rmap_copy_neigh(rmap, cpu,
+ topology_thread_cpumask(cpu), 1))
+ continue;
+ if (cpu_rmap_copy_neigh(rmap, cpu,
+ topology_core_cpumask(cpu), 2))
+ continue;
+ if (cpu_rmap_copy_neigh(rmap, cpu,
+ cpumask_of_node(cpu_to_node(cpu)), 3))
+ continue;
+ /* We could continue into NUMA node distances, but for now
+ * we give up.
+ */
+ }
+
+ debug_print_rmap(rmap, "after copying neighbours");
+
+ free_cpumask_var(update_mask);
+ return 0;
+}
+EXPORT_SYMBOL(cpu_rmap_update);
+
+#ifdef CONFIG_GENERIC_HARDIRQS
+
+/* Glue between IRQ affinity notifiers and CPU rmaps */
+
+struct irq_glue {
+ struct irq_affinity_notify notify;
+ struct cpu_rmap *rmap;
+ u16 index;
+};
+
+/**
+ * free_irq_cpu_rmap - free a CPU affinity reverse-map used for IRQs
+ * @rmap: Reverse-map allocated with alloc_irq_cpu_map(), or %NULL
+ *
+ * Must be called in process context, before freeing the IRQs, and
+ * without holding any locks required by global workqueue items.
+ */
+void free_irq_cpu_rmap(struct cpu_rmap *rmap)
+{
+ struct irq_glue *glue;
+ u16 index;
+
+ if (!rmap)
+ return;
+
+ for (index = 0; index < rmap->used; index++) {
+ glue = rmap->obj[index];
+ irq_set_affinity_notifier(glue->notify.irq, NULL);
+ }
+ irq_run_affinity_notifiers();
+
+ kfree(rmap);
+}
+EXPORT_SYMBOL(free_irq_cpu_rmap);
+
+static void
+irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask)
+{
+ struct irq_glue *glue =
+ container_of(notify, struct irq_glue, notify);
+ int rc;
+
+ rc = cpu_rmap_update(glue->rmap, glue->index, mask);
+ if (rc)
+ pr_warning("irq_cpu_rmap_notify: update failed: %d\n", rc);
+}
+
+static void irq_cpu_rmap_release(struct kref *ref)
+{
+ struct irq_glue *glue =
+ container_of(ref, struct irq_glue, notify.kref);
+ kfree(glue);
+}
+
+/**
+ * irq_cpu_rmap_add - add an IRQ to a CPU affinity reverse-map
+ * @rmap: The reverse-map
+ * @irq: The IRQ number
+ *
+ * This adds an IRQ affinity notifier that will update the reverse-map
+ * automatically.
+ *
+ * Must be called in process context, after the IRQ is allocated but
+ * before it is bound with request_irq().
+ */
+int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq)
+{
+ struct irq_glue *glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+ int rc;
+
+ if (!glue)
+ return -ENOMEM;
+ glue->notify.notify = irq_cpu_rmap_notify;
+ glue->notify.release = irq_cpu_rmap_release;
+ glue->rmap = rmap;
+ glue->index = cpu_rmap_add(rmap, glue);
+ rc = irq_set_affinity_notifier(irq, &glue->notify);
+ if (rc)
+ kfree(glue);
+ return rc;
+}
+EXPORT_SYMBOL(irq_cpu_rmap_add);
+
+#endif /* CONFIG_GENERIC_HARDIRQS */
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 1923f1490e72..f9100adde7ed 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -1,8 +1,33 @@
/*
- * Basic general purpose allocator for managing special purpose memory
- * not managed by the regular kmalloc/kfree interface.
- * Uses for this includes on-device special memory, uncached memory
- * etc.
+ * Basic general purpose allocator for managing special purpose
+ * memory, for example, memory that is not managed by the regular
+ * kmalloc/kfree interface. Uses for this includes on-device special
+ * memory, uncached memory etc.
+ *
+ * It is safe to use the allocator in NMI handlers and other special
+ * unblockable contexts that could otherwise deadlock on locks. This
+ * is implemented by using atomic operations and retries on any
+ * conflicts. The disadvantage is that there may be livelocks in
+ * extreme cases. For better scalability, one allocator can be used
+ * for each CPU.
+ *
+ * The lockless operation only works if there is enough memory
+ * available. If new memory is added to the pool a lock has to be
+ * still taken. So any user relying on locklessness has to ensure
+ * that sufficient memory is preallocated.
+ *
+ * The basic atomic operation of this allocator is cmpxchg on long.
+ * On architectures that don't have NMI-safe cmpxchg implementation,
+ * the allocator can NOT be used in NMI handler. So code uses the
+ * allocator in NMI handler should depend on
+ * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
+ *
+ * rcu_read_lock and rcu_read_unlock is not used int gen_pool_alloc,
+ * gen_pool_free, gen_pool_avail and gen_pool_size etc, because chunks
+ * are only added into pool, not deleted from pool unless the pool
+ * itself is destroyed. If chunk will be deleted from pool,
+ * rcu_read_lock and rcu_read_unlock should be uses in these
+ * functions.
*
* Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
*
@@ -13,8 +38,109 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/bitmap.h>
+#include <linux/rculist.h>
+#include <linux/interrupt.h>
#include <linux/genalloc.h>
+static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
+{
+ unsigned long val, nval;
+
+ nval = *addr;
+ do {
+ val = nval;
+ if (val & mask_to_set)
+ return -EBUSY;
+ cpu_relax();
+ } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
+
+ return 0;
+}
+
+static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
+{
+ unsigned long val, nval;
+
+ nval = *addr;
+ do {
+ val = nval;
+ if ((val & mask_to_clear) != mask_to_clear)
+ return -EBUSY;
+ cpu_relax();
+ } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
+
+ return 0;
+}
+
+/*
+ * bitmap_set_ll - set the specified number of bits at the specified position
+ * @map: pointer to a bitmap
+ * @start: a bit position in @map
+ * @nr: number of bits to set
+ *
+ * Set @nr bits start from @start in @map lock-lessly. Several users
+ * can set/clear the same bitmap simultaneously without lock. If two
+ * users set the same bit, one user will return remain bits, otherwise
+ * return 0.
+ */
+static int bitmap_set_ll(unsigned long *map, int start, int nr)
+{
+ unsigned long *p = map + BIT_WORD(start);
+ const int size = start + nr;
+ int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
+ unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
+
+ while (nr - bits_to_set >= 0) {
+ if (set_bits_ll(p, mask_to_set))
+ return nr;
+ nr -= bits_to_set;
+ bits_to_set = BITS_PER_LONG;
+ mask_to_set = ~0UL;
+ p++;
+ }
+ if (nr) {
+ mask_to_set &= BITMAP_LAST_WORD_MASK(size);
+ if (set_bits_ll(p, mask_to_set))
+ return nr;
+ }
+
+ return 0;
+}
+
+/*
+ * bitmap_clear_ll - clear the specified number of bits at the specified position
+ * @map: pointer to a bitmap
+ * @start: a bit position in @map
+ * @nr: number of bits to set
+ *
+ * Clear @nr bits start from @start in @map lock-lessly. Several users
+ * can set/clear the same bitmap simultaneously without lock. If two
+ * users clear the same bit, one user will return remain bits,
+ * otherwise return 0.
+ */
+static int bitmap_clear_ll(unsigned long *map, int start, int nr)
+{
+ unsigned long *p = map + BIT_WORD(start);
+ const int size = start + nr;
+ int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
+ unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
+
+ while (nr - bits_to_clear >= 0) {
+ if (clear_bits_ll(p, mask_to_clear))
+ return nr;
+ nr -= bits_to_clear;
+ bits_to_clear = BITS_PER_LONG;
+ mask_to_clear = ~0UL;
+ p++;
+ }
+ if (nr) {
+ mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
+ if (clear_bits_ll(p, mask_to_clear))
+ return nr;
+ }
+
+ return 0;
+}
/**
* gen_pool_create - create a new special memory pool
@@ -30,7 +156,7 @@ struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
if (pool != NULL) {
- rwlock_init(&pool->lock);
+ spin_lock_init(&pool->lock);
INIT_LIST_HEAD(&pool->chunks);
pool->min_alloc_order = min_alloc_order;
}
@@ -58,15 +184,15 @@ int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size,
chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid);
if (unlikely(chunk == NULL))
- return -1;
+ return -ENOMEM;
- spin_lock_init(&chunk->lock);
chunk->start_addr = addr;
chunk->end_addr = addr + size;
+ atomic_set(&chunk->avail, size);
- write_lock(&pool->lock);
- list_add(&chunk->next_chunk, &pool->chunks);
- write_unlock(&pool->lock);
+ spin_lock(&pool->lock);
+ list_add_rcu(&chunk->next_chunk, &pool->chunks);
+ spin_unlock(&pool->lock);
return 0;
}
@@ -86,7 +212,6 @@ void gen_pool_destroy(struct gen_pool *pool)
int order = pool->min_alloc_order;
int bit, end_bit;
-
list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
list_del(&chunk->next_chunk);
@@ -108,43 +233,47 @@ EXPORT_SYMBOL(gen_pool_destroy);
* @size: number of bytes to allocate from the pool
*
* Allocate the requested number of bytes from the specified pool.
- * Uses a first-fit algorithm.
+ * Uses a first-fit algorithm. Can not be used in NMI handler on
+ * architectures without NMI-safe cmpxchg implementation.
*/
unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
{
- struct list_head *_chunk;
struct gen_pool_chunk *chunk;
- unsigned long addr, flags;
+ unsigned long addr;
int order = pool->min_alloc_order;
- int nbits, start_bit, end_bit;
+ int nbits, start_bit = 0, end_bit, remain;
+
+#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ BUG_ON(in_nmi());
+#endif
if (size == 0)
return 0;
nbits = (size + (1UL << order) - 1) >> order;
-
- read_lock(&pool->lock);
- list_for_each(_chunk, &pool->chunks) {
- chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
+ list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
+ if (size > atomic_read(&chunk->avail))
+ continue;
end_bit = (chunk->end_addr - chunk->start_addr) >> order;
-
- spin_lock_irqsave(&chunk->lock, flags);
- start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0,
- nbits, 0);
- if (start_bit >= end_bit) {
- spin_unlock_irqrestore(&chunk->lock, flags);
+retry:
+ start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit,
+ start_bit, nbits, 0);
+ if (start_bit >= end_bit)
continue;
+ remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
+ if (remain) {
+ remain = bitmap_clear_ll(chunk->bits, start_bit,
+ nbits - remain);
+ BUG_ON(remain);
+ goto retry;
}
addr = chunk->start_addr + ((unsigned long)start_bit << order);
-
- bitmap_set(chunk->bits, start_bit, nbits);
- spin_unlock_irqrestore(&chunk->lock, flags);
- read_unlock(&pool->lock);
+ size = nbits << order;
+ atomic_sub(size, &chunk->avail);
return addr;
}
- read_unlock(&pool->lock);
return 0;
}
EXPORT_SYMBOL(gen_pool_alloc);
@@ -155,33 +284,66 @@ EXPORT_SYMBOL(gen_pool_alloc);
* @addr: starting address of memory to free back to pool
* @size: size in bytes of memory to free
*
- * Free previously allocated special memory back to the specified pool.
+ * Free previously allocated special memory back to the specified
+ * pool. Can not be used in NMI handler on architectures without
+ * NMI-safe cmpxchg implementation.
*/
void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
{
- struct list_head *_chunk;
struct gen_pool_chunk *chunk;
- unsigned long flags;
int order = pool->min_alloc_order;
- int bit, nbits;
+ int start_bit, nbits, remain;
- nbits = (size + (1UL << order) - 1) >> order;
-
- read_lock(&pool->lock);
- list_for_each(_chunk, &pool->chunks) {
- chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
+#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ BUG_ON(in_nmi());
+#endif
+ nbits = (size + (1UL << order) - 1) >> order;
+ list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
if (addr >= chunk->start_addr && addr < chunk->end_addr) {
BUG_ON(addr + size > chunk->end_addr);
- spin_lock_irqsave(&chunk->lock, flags);
- bit = (addr - chunk->start_addr) >> order;
- while (nbits--)
- __clear_bit(bit++, chunk->bits);
- spin_unlock_irqrestore(&chunk->lock, flags);
- break;
+ start_bit = (addr - chunk->start_addr) >> order;
+ remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
+ BUG_ON(remain);
+ size = nbits << order;
+ atomic_add(size, &chunk->avail);
+ return;
}
}
- BUG_ON(nbits > 0);
- read_unlock(&pool->lock);
+ BUG();
}
EXPORT_SYMBOL(gen_pool_free);
+
+/**
+ * gen_pool_avail - get available free space of the pool
+ * @pool: pool to get available free space
+ *
+ * Return available free space of the specified pool.
+ */
+size_t gen_pool_avail(struct gen_pool *pool)
+{
+ struct gen_pool_chunk *chunk;
+ size_t avail = 0;
+
+ list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
+ avail += atomic_read(&chunk->avail);
+ return avail;
+}
+EXPORT_SYMBOL_GPL(gen_pool_avail);
+
+/**
+ * gen_pool_size - get size in bytes of memory managed by the pool
+ * @pool: pool to get size
+ *
+ * Return size in bytes of memory managed by the pool.
+ */
+size_t gen_pool_size(struct gen_pool *pool)
+{
+ struct gen_pool_chunk *chunk;
+ size_t size = 0;
+
+ list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
+ size += chunk->end_addr - chunk->start_addr;
+ return size;
+}
+EXPORT_SYMBOL_GPL(gen_pool_size);
diff --git a/lib/ioq.c b/lib/ioq.c
new file mode 100644
index 000000000000..4027848d7436
--- /dev/null
+++ b/lib/ioq.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright 2009 Novell. All Rights Reserved.
+ *
+ * See include/linux/ioq.h for documentation
+ *
+ * Author:
+ * Gregory Haskins <ghaskins@novell.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/sched.h>
+#include <linux/ioq.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+
+MODULE_AUTHOR("Gregory Haskins");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1");
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+static int ioq_iter_setpos(struct ioq_iterator *iter, u32 pos)
+{
+ struct ioq *ioq = iter->ioq;
+
+ BUG_ON(pos >= ioq->count);
+
+ iter->pos = pos;
+ iter->desc = &ioq->ring[pos];
+
+ return 0;
+}
+
+static inline u32 modulo_inc(u32 val, u32 mod)
+{
+ BUG_ON(val >= mod);
+
+ if (val == (mod - 1))
+ return 0;
+
+ return val + 1;
+}
+
+static inline int idx_full(struct ioq_ring_idx *idx)
+{
+ return idx->full && (idx->head == idx->tail);
+}
+
+int ioq_iter_seek(struct ioq_iterator *iter, enum ioq_seek_type type,
+ long offset, int flags)
+{
+ struct ioq_ring_idx *idx = iter->idx;
+ u32 pos;
+
+ switch (type) {
+ case ioq_seek_next:
+ pos = modulo_inc(iter->pos, iter->ioq->count);
+ break;
+ case ioq_seek_tail:
+ pos = le32_to_cpu(idx->tail);
+ break;
+ case ioq_seek_head:
+ pos = le32_to_cpu(idx->head);
+ break;
+ case ioq_seek_set:
+ if (offset >= iter->ioq->count)
+ return -1;
+ pos = offset;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ioq_iter_setpos(iter, pos);
+}
+EXPORT_SYMBOL_GPL(ioq_iter_seek);
+
+static int ioq_ring_count(struct ioq_ring_idx *idx, int count)
+{
+ u32 head = le32_to_cpu(idx->head);
+ u32 tail = le32_to_cpu(idx->tail);
+
+ if (idx->full && (head == tail))
+ return count;
+ else if (tail >= head)
+ return tail - head;
+ else
+ return (tail + count) - head;
+}
+
+static void idx_tail_push(struct ioq_ring_idx *idx, int count)
+{
+ u32 tail = modulo_inc(le32_to_cpu(idx->tail), count);
+ u32 head = le32_to_cpu(idx->head);
+
+ if (head == tail) {
+ rmb();
+
+ /*
+ * Setting full here may look racy, but note that we havent
+ * flipped the owner bit yet. So it is impossible for the
+ * remote locale to move head in such a way that this operation
+ * becomes invalid
+ */
+ idx->full = 1;
+ wmb();
+ }
+
+ idx->tail = cpu_to_le32(tail);
+}
+
+int ioq_iter_push(struct ioq_iterator *iter, int flags)
+{
+ struct ioq_ring_head *head_desc = iter->ioq->head_desc;
+ struct ioq_ring_idx *idx = iter->idx;
+ int ret;
+
+ /*
+ * Its only valid to push if we are currently pointed at the tail
+ */
+ if (iter->pos != le32_to_cpu(idx->tail) || iter->desc->sown != iter->ioq->locale)
+ return -EINVAL;
+
+ idx_tail_push(idx, iter->ioq->count);
+ if (iter->dualidx) {
+ idx_tail_push(&head_desc->idx[ioq_idxtype_inuse],
+ iter->ioq->count);
+ if (head_desc->idx[ioq_idxtype_inuse].tail !=
+ head_desc->idx[ioq_idxtype_valid].tail) {
+ SHM_SIGNAL_FAULT(iter->ioq->signal,
+ "Tails not synchronized");
+ return -EINVAL;
+ }
+ }
+
+ wmb(); /* the index must be visible before the sown, or signal */
+
+ if (iter->flipowner) {
+ iter->desc->sown = !iter->ioq->locale;
+ wmb(); /* sown must be visible before we signal */
+ }
+
+ ret = ioq_iter_seek(iter, ioq_seek_next, 0, flags);
+
+ if (iter->update)
+ ioq_signal(iter->ioq, 0);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ioq_iter_push);
+
+int ioq_iter_pop(struct ioq_iterator *iter, int flags)
+{
+ struct ioq_ring_idx *idx = iter->idx;
+ int ret;
+
+ /*
+ * Its only valid to pop if we are currently pointed at the head
+ */
+ if (iter->pos != le32_to_cpu(idx->head) || iter->desc->sown != iter->ioq->locale)
+ return -EINVAL;
+
+ idx->head = cpu_to_le32(modulo_inc(le32_to_cpu(idx->head), iter->ioq->count));
+ wmb(); /* head must be visible before full */
+
+ if (idx->full) {
+ idx->full = 0;
+ wmb(); /* full must be visible before sown */
+ }
+
+ if (iter->flipowner) {
+ iter->desc->sown = !iter->ioq->locale;
+ wmb(); /* sown must be visible before we signal */
+ }
+
+ ret = ioq_iter_seek(iter, ioq_seek_next, 0, flags);
+
+ if (iter->update)
+ ioq_signal(iter->ioq, 0);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ioq_iter_pop);
+
+static struct ioq_ring_idx *idxtype_to_idx(struct ioq *ioq,
+ enum ioq_idx_type type)
+{
+ struct ioq_ring_idx *idx;
+
+ switch (type) {
+ case ioq_idxtype_valid:
+ case ioq_idxtype_inuse:
+ idx = &ioq->head_desc->idx[type];
+ break;
+ default:
+ panic("IOQ: illegal index type: %d", type);
+ break;
+ }
+
+ return idx;
+}
+
+int ioq_iter_init(struct ioq *ioq, struct ioq_iterator *iter,
+ enum ioq_idx_type type, int flags)
+{
+ iter->ioq = ioq;
+ iter->update = (flags & IOQ_ITER_AUTOUPDATE);
+ iter->flipowner = !(flags & IOQ_ITER_NOFLIPOWNER);
+ iter->pos = -1;
+ iter->desc = NULL;
+ iter->dualidx = 0;
+
+ if (type == ioq_idxtype_both) {
+ /*
+ * "both" is a special case, so we set the dualidx flag.
+ *
+ * However, we also just want to use the valid-index
+ * for normal processing, so override that here
+ */
+ type = ioq_idxtype_valid;
+ iter->dualidx = 1;
+ }
+
+ iter->idx = idxtype_to_idx(ioq, type);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ioq_iter_init);
+
+int ioq_count(struct ioq *ioq, enum ioq_idx_type type)
+{
+ return ioq_ring_count(idxtype_to_idx(ioq, type), ioq->count);
+}
+EXPORT_SYMBOL_GPL(ioq_count);
+
+int ioq_remain(struct ioq *ioq, enum ioq_idx_type type)
+{
+ int count = ioq_ring_count(idxtype_to_idx(ioq, type), ioq->count);
+
+ return ioq->count - count;
+}
+EXPORT_SYMBOL_GPL(ioq_remain);
+
+int ioq_size(struct ioq *ioq)
+{
+ return ioq->count;
+}
+EXPORT_SYMBOL_GPL(ioq_size);
+
+int ioq_full(struct ioq *ioq, enum ioq_idx_type type)
+{
+ struct ioq_ring_idx *idx = idxtype_to_idx(ioq, type);
+
+ return idx_full(idx);
+}
+EXPORT_SYMBOL_GPL(ioq_full);
+
+static void ioq_shm_signal(struct shm_signal_notifier *notifier)
+{
+ struct ioq *ioq = container_of(notifier, struct ioq, shm_notifier);
+
+ if (waitqueue_active(&ioq->wq))
+ wake_up(&ioq->wq);
+
+ if (ioq->notifier)
+ ioq->notifier->signal(ioq->notifier);
+}
+
+void ioq_init(struct ioq *ioq,
+ struct ioq_ops *ops,
+ enum ioq_locality locale,
+ struct ioq_ring_head *head,
+ struct shm_signal *signal,
+ size_t count)
+{
+ memset(ioq, 0, sizeof(*ioq));
+ kref_init(&ioq->kref);
+ init_waitqueue_head(&ioq->wq);
+
+ ioq->ops = ops;
+ ioq->locale = locale;
+ ioq->head_desc = head;
+ ioq->ring = &head->ring[0];
+ ioq->count = count;
+ ioq->signal = signal;
+
+ ioq->shm_notifier.signal = &ioq_shm_signal;
+ signal->notifier = &ioq->shm_notifier;
+}
+EXPORT_SYMBOL_GPL(ioq_init);
diff --git a/lib/llist.c b/lib/llist.c
new file mode 100644
index 000000000000..ac1a136321a1
--- /dev/null
+++ b/lib/llist.c
@@ -0,0 +1,119 @@
+/*
+ * Lock-less NULL terminated single linked list
+ *
+ * The basic atomic operation of this list is cmpxchg on long. On
+ * architectures that don't have NMI-safe cmpxchg implementation, the
+ * list can NOT be used in NMI handler. So code uses the list in NMI
+ * handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
+ *
+ * Copyright 2010 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/llist.h>
+
+#include <asm/system.h>
+
+/**
+ * llist_add - add a new entry
+ * @new: new entry to be added
+ * @head: the head for your lock-less list
+ */
+void llist_add(struct llist_node *new, struct llist_head *head)
+{
+ struct llist_node *entry;
+
+#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ BUG_ON(in_nmi());
+#endif
+
+ do {
+ entry = head->first;
+ new->next = entry;
+ } while (cmpxchg(&head->first, entry, new) != entry);
+}
+EXPORT_SYMBOL_GPL(llist_add);
+
+/**
+ * llist_add_batch - add several linked entries in batch
+ * @new_first: first entry in batch to be added
+ * @new_last: last entry in batch to be added
+ * @head: the head for your lock-less list
+ */
+void llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
+ struct llist_head *head)
+{
+ struct llist_node *entry;
+
+#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ BUG_ON(in_nmi());
+#endif
+
+ do {
+ entry = head->first;
+ new_last->next = entry;
+ } while (cmpxchg(&head->first, entry, new_first) != entry);
+}
+EXPORT_SYMBOL_GPL(llist_add_batch);
+
+/**
+ * llist_del_first - delete the first entry of lock-less list
+ * @head: the head for your lock-less list
+ *
+ * If list is empty, return NULL, otherwise, return the first entry deleted.
+ *
+ * Only one llist_del_first user can be used simultaneously with
+ * multiple llist_add users without lock. Because otherwise
+ * llist_del_first, llist_add, llist_add sequence in another user may
+ * change @head->first->next, but keep @head->first. If multiple
+ * consumers are needed, please use llist_del_all.
+ */
+struct llist_node *llist_del_first(struct llist_head *head)
+{
+ struct llist_node *entry;
+
+#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ BUG_ON(in_nmi());
+#endif
+
+ do {
+ entry = head->first;
+ if (entry == NULL)
+ return NULL;
+ } while (cmpxchg(&head->first, entry, entry->next) != entry);
+
+ return entry;
+}
+EXPORT_SYMBOL_GPL(llist_del_first);
+
+/**
+ * llist_del_all - delete all entries from lock-less list
+ * @head: the head of lock-less list to delete all entries
+ *
+ * If list is empty, return NULL, otherwise, delete all entries and
+ * return the pointer to the first entry.
+ */
+struct llist_node *llist_del_all(struct llist_head *head)
+{
+#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ BUG_ON(in_nmi());
+#endif
+
+ return xchg(&head->first, NULL);
+}
+EXPORT_SYMBOL_GPL(llist_del_all);
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 5021cbc34411..ac09f2226dc7 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -148,7 +148,7 @@ nla_policy_len(const struct nla_policy *p, int n)
{
int i, len = 0;
- for (i = 0; i < n; i++) {
+ for (i = 0; i < n; i++, p++) {
if (p->len)
len += nla_total_size(p->len);
else if (nla_attr_minlen[p->type])
diff --git a/lib/rwsem.c b/lib/rwsem.c
index f236d7cd5cf3..aa7c3052261f 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -222,8 +222,7 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
/*
* wait for the read lock to be granted
*/
-asmregparm struct rw_semaphore __sched *
-rwsem_down_read_failed(struct rw_semaphore *sem)
+struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
{
return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_READ,
-RWSEM_ACTIVE_READ_BIAS);
@@ -232,8 +231,7 @@ rwsem_down_read_failed(struct rw_semaphore *sem)
/*
* wait for the write lock to be granted
*/
-asmregparm struct rw_semaphore __sched *
-rwsem_down_write_failed(struct rw_semaphore *sem)
+struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
{
return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_WRITE,
-RWSEM_ACTIVE_WRITE_BIAS);
@@ -243,7 +241,7 @@ rwsem_down_write_failed(struct rw_semaphore *sem)
* handle waking up a waiter on the semaphore
* - up_read/up_write has decremented the active part of count if we come here
*/
-asmregparm struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
+struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
{
unsigned long flags;
@@ -263,7 +261,7 @@ asmregparm struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
* - caller incremented waiting part of count and discovered it still negative
* - just wake up any readers at the front of the queue
*/
-asmregparm struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
+struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
{
unsigned long flags;
diff --git a/lib/shm_signal.c b/lib/shm_signal.c
new file mode 100644
index 000000000000..8d3e9b418a27
--- /dev/null
+++ b/lib/shm_signal.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright 2009 Novell. All Rights Reserved.
+ *
+ * See include/linux/shm_signal.h for documentation
+ *
+ * Author:
+ * Gregory Haskins <ghaskins@novell.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/shm_signal.h>
+
+MODULE_AUTHOR("Gregory Haskins");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1");
+
+int shm_signal_enable(struct shm_signal *s, int flags)
+{
+ struct shm_signal_irq *irq = &s->desc->irq[s->locale];
+ unsigned long iflags;
+
+ spin_lock_irqsave(&s->lock, iflags);
+
+ irq->enabled = 1;
+ wmb();
+
+ if ((irq->dirty || irq->pending)
+ && !test_bit(shm_signal_in_wakeup, &s->flags)) {
+ rmb();
+ tasklet_schedule(&s->deferred_notify);
+ }
+
+ spin_unlock_irqrestore(&s->lock, iflags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(shm_signal_enable);
+
+int shm_signal_disable(struct shm_signal *s, int flags)
+{
+ struct shm_signal_irq *irq = &s->desc->irq[s->locale];
+
+ irq->enabled = 0;
+ wmb();
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(shm_signal_disable);
+
+/*
+ * signaling protocol:
+ *
+ * each side of the shm_signal has an "irq" structure with the following
+ * fields:
+ *
+ * - enabled: controlled by shm_signal_enable/disable() to mask/unmask
+ * the notification locally
+ * - dirty: indicates if the shared-memory is dirty or clean. This
+ * is updated regardless of the enabled/pending state so that
+ * the state is always accurately tracked.
+ * - pending: indicates if a signal is pending to the remote locale.
+ * This allows us to determine if a remote-notification is
+ * already in flight to optimize spurious notifications away.
+ */
+int shm_signal_inject(struct shm_signal *s, int flags)
+{
+ /* Load the irq structure from the other locale */
+ struct shm_signal_irq *irq = &s->desc->irq[!s->locale];
+
+ /*
+ * We always mark the remote side as dirty regardless of whether
+ * they need to be notified.
+ */
+ irq->dirty = 1;
+ wmb(); /* dirty must be visible before we test the pending state */
+
+ if (irq->enabled && !irq->pending) {
+ rmb();
+
+ /*
+ * If the remote side has enabled notifications, and we do
+ * not see a notification pending, we must inject a new one.
+ */
+ irq->pending = 1;
+ wmb(); /* make it visible before we do the injection */
+
+ s->ops->inject(s);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(shm_signal_inject);
+
+void _shm_signal_wakeup(struct shm_signal *s)
+{
+ struct shm_signal_irq *irq = &s->desc->irq[s->locale];
+ int dirty;
+ unsigned long flags;
+
+ spin_lock_irqsave(&s->lock, flags);
+
+ __set_bit(shm_signal_in_wakeup, &s->flags);
+
+ /*
+ * The outer loop protects against race conditions between
+ * irq->dirty and irq->pending updates
+ */
+ while (irq->enabled && (irq->dirty || irq->pending)) {
+
+ /*
+ * Run until we completely exhaust irq->dirty (it may
+ * be re-dirtied by the remote side while we are in the
+ * callback). We let "pending" remain untouched until we have
+ * processed them all so that the remote side knows we do not
+ * need a new notification (yet).
+ */
+ do {
+ irq->dirty = 0;
+ /* the unlock is an implicit wmb() for dirty = 0 */
+ spin_unlock_irqrestore(&s->lock, flags);
+
+ if (s->notifier)
+ s->notifier->signal(s->notifier);
+
+ spin_lock_irqsave(&s->lock, flags);
+ dirty = irq->dirty;
+ rmb();
+
+ } while (irq->enabled && dirty);
+
+ barrier();
+
+ /*
+ * We can finally acknowledge the notification by clearing
+ * "pending" after all of the dirty memory has been processed
+ * Races against this clearing are handled by the outer loop.
+ * Subsequent iterations of this loop will execute with
+ * pending=0 potentially leading to future spurious
+ * notifications, but this is an acceptable tradeoff as this
+ * will be rare and harmless.
+ */
+ irq->pending = 0;
+ wmb();
+
+ }
+
+ __clear_bit(shm_signal_in_wakeup, &s->flags);
+ spin_unlock_irqrestore(&s->lock, flags);
+
+}
+EXPORT_SYMBOL_GPL(_shm_signal_wakeup);
+
+void _shm_signal_release(struct kref *kref)
+{
+ struct shm_signal *s = container_of(kref, struct shm_signal, kref);
+
+ s->ops->release(s);
+}
+EXPORT_SYMBOL_GPL(_shm_signal_release);
+
+static void
+deferred_notify(unsigned long data)
+{
+ struct shm_signal *s = (struct shm_signal *)data;
+
+ _shm_signal_wakeup(s);
+}
+
+void shm_signal_init(struct shm_signal *s, enum shm_signal_locality locale,
+ struct shm_signal_ops *ops, struct shm_signal_desc *desc)
+{
+ memset(s, 0, sizeof(*s));
+ kref_init(&s->kref);
+ spin_lock_init(&s->lock);
+ tasklet_init(&s->deferred_notify,
+ deferred_notify,
+ (unsigned long)s);
+ s->locale = locale;
+ s->ops = ops;
+ s->desc = desc;
+}
+EXPORT_SYMBOL_GPL(shm_signal_init);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index c47bbe11b804..93ca08b8a451 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -686,8 +686,10 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
/*
* Ensure that the address returned is DMA'ble
*/
- if (!dma_capable(dev, dev_addr, size))
- panic("map_single: bounce buffer is not DMA'ble");
+ if (!dma_capable(dev, dev_addr, size)) {
+ swiotlb_tbl_unmap_single(dev, map, size, dir);
+ dev_addr = swiotlb_virt_to_bus(dev, io_tlb_overflow_buffer);
+ }
return dev_addr;
}
diff --git a/mm/Makefile b/mm/Makefile
index 2b1b575ae712..42a8326c3e3d 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -7,7 +7,7 @@ mmu-$(CONFIG_MMU) := fremap.o highmem.o madvise.o memory.o mincore.o \
mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
vmalloc.o pagewalk.o pgtable-generic.o
-obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
+obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
maccess.o page_alloc.o page-writeback.o \
readahead.o swap.o truncate.o vmscan.o shmem.o \
prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
@@ -15,6 +15,12 @@ obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
$(mmu-y)
obj-y += init-mm.o
+ifdef CONFIG_NO_BOOTMEM
+ obj-y += nobootmem.o
+else
+ obj-y += bootmem.o
+endif
+
obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
obj-$(CONFIG_BOUNCE) += bounce.o
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 13b0caa9793c..07aeb89e396e 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -23,6 +23,13 @@
#include "internal.h"
+#ifndef CONFIG_NEED_MULTIPLE_NODES
+struct pglist_data __refdata contig_page_data = {
+ .bdata = &bootmem_node_data[0]
+};
+EXPORT_SYMBOL(contig_page_data);
+#endif
+
unsigned long max_low_pfn;
unsigned long min_low_pfn;
unsigned long max_pfn;
@@ -35,7 +42,6 @@ unsigned long max_pfn;
unsigned long saved_max_pfn;
#endif
-#ifndef CONFIG_NO_BOOTMEM
bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list);
@@ -146,7 +152,7 @@ unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
min_low_pfn = start;
return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
}
-#endif
+
/*
* free_bootmem_late - free bootmem pages directly to page allocator
* @addr: starting address of the range
@@ -171,53 +177,6 @@ void __init free_bootmem_late(unsigned long addr, unsigned long size)
}
}
-#ifdef CONFIG_NO_BOOTMEM
-static void __init __free_pages_memory(unsigned long start, unsigned long end)
-{
- int i;
- unsigned long start_aligned, end_aligned;
- int order = ilog2(BITS_PER_LONG);
-
- start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
- end_aligned = end & ~(BITS_PER_LONG - 1);
-
- if (end_aligned <= start_aligned) {
- for (i = start; i < end; i++)
- __free_pages_bootmem(pfn_to_page(i), 0);
-
- return;
- }
-
- for (i = start; i < start_aligned; i++)
- __free_pages_bootmem(pfn_to_page(i), 0);
-
- for (i = start_aligned; i < end_aligned; i += BITS_PER_LONG)
- __free_pages_bootmem(pfn_to_page(i), order);
-
- for (i = end_aligned; i < end; i++)
- __free_pages_bootmem(pfn_to_page(i), 0);
-}
-
-unsigned long __init free_all_memory_core_early(int nodeid)
-{
- int i;
- u64 start, end;
- unsigned long count = 0;
- struct range *range = NULL;
- int nr_range;
-
- nr_range = get_free_all_memory_range(&range, nodeid);
-
- for (i = 0; i < nr_range; i++) {
- start = range[i].start;
- end = range[i].end;
- count += end - start;
- __free_pages_memory(start, end);
- }
-
- return count;
-}
-#else
static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
{
int aligned;
@@ -278,7 +237,6 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
return count;
}
-#endif
/**
* free_all_bootmem_node - release a node's free pages to the buddy allocator
@@ -289,12 +247,7 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
{
register_page_bootmem_info_node(pgdat);
-#ifdef CONFIG_NO_BOOTMEM
- /* free_all_memory_core_early(MAX_NUMNODES) will be called later */
- return 0;
-#else
return free_all_bootmem_core(pgdat->bdata);
-#endif
}
/**
@@ -304,16 +257,6 @@ unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
*/
unsigned long __init free_all_bootmem(void)
{
-#ifdef CONFIG_NO_BOOTMEM
- /*
- * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id
- * because in some case like Node0 doesnt have RAM installed
- * low ram will be on Node1
- * Use MAX_NUMNODES will make sure all ranges in early_node_map[]
- * will be used instead of only Node0 related
- */
- return free_all_memory_core_early(MAX_NUMNODES);
-#else
unsigned long total_pages = 0;
bootmem_data_t *bdata;
@@ -321,10 +264,8 @@ unsigned long __init free_all_bootmem(void)
total_pages += free_all_bootmem_core(bdata);
return total_pages;
-#endif
}
-#ifndef CONFIG_NO_BOOTMEM
static void __init __free(bootmem_data_t *bdata,
unsigned long sidx, unsigned long eidx)
{
@@ -419,7 +360,6 @@ static int __init mark_bootmem(unsigned long start, unsigned long end,
}
BUG();
}
-#endif
/**
* free_bootmem_node - mark a page range as usable
@@ -434,10 +374,6 @@ static int __init mark_bootmem(unsigned long start, unsigned long end,
void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
unsigned long size)
{
-#ifdef CONFIG_NO_BOOTMEM
- kmemleak_free_part(__va(physaddr), size);
- memblock_x86_free_range(physaddr, physaddr + size);
-#else
unsigned long start, end;
kmemleak_free_part(__va(physaddr), size);
@@ -446,7 +382,6 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
end = PFN_DOWN(physaddr + size);
mark_bootmem_node(pgdat->bdata, start, end, 0, 0);
-#endif
}
/**
@@ -460,10 +395,6 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
*/
void __init free_bootmem(unsigned long addr, unsigned long size)
{
-#ifdef CONFIG_NO_BOOTMEM
- kmemleak_free_part(__va(addr), size);
- memblock_x86_free_range(addr, addr + size);
-#else
unsigned long start, end;
kmemleak_free_part(__va(addr), size);
@@ -472,7 +403,6 @@ void __init free_bootmem(unsigned long addr, unsigned long size)
end = PFN_DOWN(addr + size);
mark_bootmem(start, end, 0, 0);
-#endif
}
/**
@@ -489,17 +419,12 @@ void __init free_bootmem(unsigned long addr, unsigned long size)
int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
unsigned long size, int flags)
{
-#ifdef CONFIG_NO_BOOTMEM
- panic("no bootmem");
- return 0;
-#else
unsigned long start, end;
start = PFN_DOWN(physaddr);
end = PFN_UP(physaddr + size);
return mark_bootmem_node(pgdat->bdata, start, end, 1, flags);
-#endif
}
/**
@@ -515,20 +440,14 @@ int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
int __init reserve_bootmem(unsigned long addr, unsigned long size,
int flags)
{
-#ifdef CONFIG_NO_BOOTMEM
- panic("no bootmem");
- return 0;
-#else
unsigned long start, end;
start = PFN_DOWN(addr);
end = PFN_UP(addr + size);
return mark_bootmem(start, end, 1, flags);
-#endif
}
-#ifndef CONFIG_NO_BOOTMEM
int __weak __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
int flags)
{
@@ -685,33 +604,12 @@ static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata,
#endif
return NULL;
}
-#endif
static void * __init ___alloc_bootmem_nopanic(unsigned long size,
unsigned long align,
unsigned long goal,
unsigned long limit)
{
-#ifdef CONFIG_NO_BOOTMEM
- void *ptr;
-
- if (WARN_ON_ONCE(slab_is_available()))
- return kzalloc(size, GFP_NOWAIT);
-
-restart:
-
- ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit);
-
- if (ptr)
- return ptr;
-
- if (goal != 0) {
- goal = 0;
- goto restart;
- }
-
- return NULL;
-#else
bootmem_data_t *bdata;
void *region;
@@ -737,7 +635,6 @@ restart:
}
return NULL;
-#endif
}
/**
@@ -758,10 +655,6 @@ void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
{
unsigned long limit = 0;
-#ifdef CONFIG_NO_BOOTMEM
- limit = -1UL;
-#endif
-
return ___alloc_bootmem_nopanic(size, align, goal, limit);
}
@@ -798,14 +691,9 @@ void * __init __alloc_bootmem(unsigned long size, unsigned long align,
{
unsigned long limit = 0;
-#ifdef CONFIG_NO_BOOTMEM
- limit = -1UL;
-#endif
-
return ___alloc_bootmem(size, align, goal, limit);
}
-#ifndef CONFIG_NO_BOOTMEM
static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
unsigned long size, unsigned long align,
unsigned long goal, unsigned long limit)
@@ -822,7 +710,6 @@ static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
return ___alloc_bootmem(size, align, goal, limit);
}
-#endif
/**
* __alloc_bootmem_node - allocate boot memory from a specific node
@@ -842,24 +729,10 @@ static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
unsigned long align, unsigned long goal)
{
- void *ptr;
-
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
-#ifdef CONFIG_NO_BOOTMEM
- ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
- goal, -1ULL);
- if (ptr)
- return ptr;
-
- ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align,
- goal, -1ULL);
-#else
- ptr = ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0);
-#endif
-
- return ptr;
+ return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0);
}
void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
@@ -880,13 +753,8 @@ void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
unsigned long new_goal;
new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
-#ifdef CONFIG_NO_BOOTMEM
- ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
- new_goal, -1ULL);
-#else
ptr = alloc_bootmem_core(pgdat->bdata, size, align,
new_goal, 0);
-#endif
if (ptr)
return ptr;
}
@@ -907,16 +775,6 @@ void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
void * __init alloc_bootmem_section(unsigned long size,
unsigned long section_nr)
{
-#ifdef CONFIG_NO_BOOTMEM
- unsigned long pfn, goal, limit;
-
- pfn = section_nr_to_pfn(section_nr);
- goal = pfn << PAGE_SHIFT;
- limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
-
- return __alloc_memory_core_early(early_pfn_to_nid(pfn), size,
- SMP_CACHE_BYTES, goal, limit);
-#else
bootmem_data_t *bdata;
unsigned long pfn, goal, limit;
@@ -926,7 +784,6 @@ void * __init alloc_bootmem_section(unsigned long size,
bdata = &bootmem_node_data[early_pfn_to_nid(pfn)];
return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, limit);
-#endif
}
#endif
@@ -938,16 +795,11 @@ void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
-#ifdef CONFIG_NO_BOOTMEM
- ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
- goal, -1ULL);
-#else
ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0);
if (ptr)
return ptr;
ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
-#endif
if (ptr)
return ptr;
@@ -995,21 +847,9 @@ void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
unsigned long align, unsigned long goal)
{
- void *ptr;
-
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
-#ifdef CONFIG_NO_BOOTMEM
- ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
+ return ___alloc_bootmem_node(pgdat->bdata, size, align,
goal, ARCH_LOW_ADDRESS_LIMIT);
- if (ptr)
- return ptr;
- ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align,
- goal, ARCH_LOW_ADDRESS_LIMIT);
-#else
- ptr = ___alloc_bootmem_node(pgdat->bdata, size, align,
- goal, ARCH_LOW_ADDRESS_LIMIT);
-#endif
- return ptr;
}
diff --git a/mm/internal.h b/mm/internal.h
index 69488205723d..3438dd43a062 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -245,11 +245,6 @@ static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
}
#endif /* CONFIG_SPARSEMEM */
-int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, int len, unsigned int foll_flags,
- struct page **pages, struct vm_area_struct **vmas,
- int *nonblocking);
-
#define ZONE_RECLAIM_NOSCAN -2
#define ZONE_RECLAIM_FULL -1
#define ZONE_RECLAIM_SOME 0
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 0207c2f6f8bd..99ccb4472623 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1487,35 +1487,3 @@ done:
/* keep elevated page count for bad page */
return ret;
}
-
-/*
- * The caller must hold current->mm->mmap_sem in read mode.
- */
-int is_hwpoison_address(unsigned long addr)
-{
- pgd_t *pgdp;
- pud_t pud, *pudp;
- pmd_t pmd, *pmdp;
- pte_t pte, *ptep;
- swp_entry_t entry;
-
- pgdp = pgd_offset(current->mm, addr);
- if (!pgd_present(*pgdp))
- return 0;
- pudp = pud_offset(pgdp, addr);
- pud = *pudp;
- if (!pud_present(pud) || pud_large(pud))
- return 0;
- pmdp = pmd_offset(pudp, addr);
- pmd = *pmdp;
- if (!pmd_present(pmd) || pmd_large(pmd))
- return 0;
- ptep = pte_offset_map(pmdp, addr);
- pte = *ptep;
- pte_unmap(ptep);
- if (!is_swap_pte(pte))
- return 0;
- entry = pte_to_swp_entry(pte);
- return is_hwpoison_entry(entry);
-}
-EXPORT_SYMBOL_GPL(is_hwpoison_address);
diff --git a/mm/memory.c b/mm/memory.c
index 8e8c18324863..e48945ab362b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1410,6 +1410,55 @@ no_page_table:
return page;
}
+/**
+ * __get_user_pages() - pin user pages in memory
+ * @tsk: task_struct of target task
+ * @mm: mm_struct of target mm
+ * @start: starting user address
+ * @nr_pages: number of pages from start to pin
+ * @gup_flags: flags modifying pin behaviour
+ * @pages: array that receives pointers to the pages pinned.
+ * Should be at least nr_pages long. Or NULL, if caller
+ * only intends to ensure the pages are faulted in.
+ * @vmas: array of pointers to vmas corresponding to each page.
+ * Or NULL if the caller does not require them.
+ * @nonblocking: whether waiting for disk IO or mmap_sem contention
+ *
+ * Returns number of pages pinned. This may be fewer than the number
+ * requested. If nr_pages is 0 or negative, returns 0. If no pages
+ * were pinned, returns -errno. Each page returned must be released
+ * with a put_page() call when it is finished with. vmas will only
+ * remain valid while mmap_sem is held.
+ *
+ * Must be called with mmap_sem held for read or write.
+ *
+ * __get_user_pages walks a process's page tables and takes a reference to
+ * each struct page that each user address corresponds to at a given
+ * instant. That is, it takes the page that would be accessed if a user
+ * thread accesses the given user virtual address at that instant.
+ *
+ * This does not guarantee that the page exists in the user mappings when
+ * __get_user_pages returns, and there may even be a completely different
+ * page there in some cases (eg. if mmapped pagecache has been invalidated
+ * and subsequently re faulted). However it does guarantee that the page
+ * won't be freed completely. And mostly callers simply care that the page
+ * contains data that was valid *at some point in time*. Typically, an IO
+ * or similar operation cannot guarantee anything stronger anyway because
+ * locks can't be held over the syscall boundary.
+ *
+ * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
+ * the page is written to, set_page_dirty (or set_page_dirty_lock, as
+ * appropriate) must be called after the page is finished with, and
+ * before put_page is called.
+ *
+ * If @nonblocking != NULL, __get_user_pages will not wait for disk IO
+ * or mmap_sem contention, and if waiting is needed to pin all pages,
+ * *@nonblocking will be set to 0.
+ *
+ * In most cases, get_user_pages or get_user_pages_fast should be used
+ * instead of __get_user_pages. __get_user_pages should be used only if
+ * you need some special @gup_flags.
+ */
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int nr_pages, unsigned int gup_flags,
struct page **pages, struct vm_area_struct **vmas,
@@ -1527,9 +1576,16 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
if (ret & VM_FAULT_ERROR) {
if (ret & VM_FAULT_OOM)
return i ? i : -ENOMEM;
- if (ret &
- (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE|
- VM_FAULT_SIGBUS))
+ if (ret & (VM_FAULT_HWPOISON |
+ VM_FAULT_HWPOISON_LARGE)) {
+ if (i)
+ return i;
+ else if (gup_flags & FOLL_HWPOISON)
+ return -EHWPOISON;
+ else
+ return -EFAULT;
+ }
+ if (ret & VM_FAULT_SIGBUS)
return i ? i : -EFAULT;
BUG();
}
@@ -1578,6 +1634,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
} while (nr_pages);
return i;
}
+EXPORT_SYMBOL(__get_user_pages);
/**
* get_user_pages() - pin user pages in memory
@@ -2115,10 +2172,10 @@ EXPORT_SYMBOL_GPL(apply_to_page_range);
* handle_pte_fault chooses page fault handler according to an entry
* which was read non-atomically. Before making any commitment, on
* those architectures or configurations (e.g. i386 with PAE) which
- * might give a mix of unmatched parts, do_swap_page and do_file_page
+ * might give a mix of unmatched parts, do_swap_page and do_nonlinear_fault
* must check under lock before unmapping the pte and proceeding
* (but do_wp_page is only called after already making such a check;
- * and do_anonymous_page and do_no_page can safely check later on).
+ * and do_anonymous_page can safely check later on).
*/
static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
pte_t *page_table, pte_t orig_pte)
@@ -2314,7 +2371,7 @@ reuse:
* bit after it clear all dirty ptes, but before a racing
* do_wp_page installs a dirty pte.
*
- * do_no_page is protected similarly.
+ * __do_fault is protected similarly.
*/
if (!page_mkwrite) {
wait_on_page_locked(dirty_page);
@@ -2648,6 +2705,7 @@ void unmap_mapping_range(struct address_space *mapping,
details.last_index = ULONG_MAX;
details.i_mmap_lock = &mapping->i_mmap_lock;
+ mutex_lock(&mapping->unmap_mutex);
spin_lock(&mapping->i_mmap_lock);
/* Protect against endless unmapping loops */
@@ -2664,6 +2722,7 @@ void unmap_mapping_range(struct address_space *mapping,
if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
spin_unlock(&mapping->i_mmap_lock);
+ mutex_unlock(&mapping->unmap_mutex);
}
EXPORT_SYMBOL(unmap_mapping_range);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 368fc9d23610..b0e37b5ce7da 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -993,7 +993,7 @@ int do_migrate_pages(struct mm_struct *mm,
* most recent <s, d> pair that moved (s != d). If we find a pair
* that not only moved, but what's better, moved to an empty slot
* (d is not set in tmp), then we break out then, with that pair.
- * Otherwise when we finish scannng from_tmp, we at least have the
+ * Otherwise when we finish scanning from_tmp, we at least have the
* most recent <s, d> pair that moved. If we get all the way through
* the scan of tmp without finding any node that moved, much less
* moved to an empty node, then there is nothing left worth migrating.
@@ -1830,7 +1830,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
unsigned nid;
- nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
+ nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
mpol_cond_put(pol);
page = alloc_page_interleave(gfp, order, nid);
put_mems_allowed();
diff --git a/mm/migrate.c b/mm/migrate.c
index 766115253807..352de555626c 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1287,14 +1287,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
return -EPERM;
/* Find the mm_struct */
- read_lock(&tasklist_lock);
+ rcu_read_lock();
task = pid ? find_task_by_vpid(pid) : current;
if (!task) {
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
return -ESRCH;
}
mm = get_task_mm(task);
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
if (!mm)
return -EINVAL;
diff --git a/mm/mremap.c b/mm/mremap.c
index 9925b6391b80..1de98d492ddc 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -94,9 +94,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
*/
mapping = vma->vm_file->f_mapping;
spin_lock(&mapping->i_mmap_lock);
- if (new_vma->vm_truncate_count &&
- new_vma->vm_truncate_count != vma->vm_truncate_count)
- new_vma->vm_truncate_count = 0;
+ new_vma->vm_truncate_count = 0;
}
/*
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
new file mode 100644
index 000000000000..e2bdb07079ce
--- /dev/null
+++ b/mm/nobootmem.c
@@ -0,0 +1,435 @@
+/*
+ * bootmem - A boot-time physical memory allocator and configurator
+ *
+ * Copyright (C) 1999 Ingo Molnar
+ * 1999 Kanoj Sarcar, SGI
+ * 2008 Johannes Weiner
+ *
+ * Access to this subsystem has to be serialized externally (which is true
+ * for the boot process anyway).
+ */
+#include <linux/init.h>
+#include <linux/pfn.h>
+#include <linux/slab.h>
+#include <linux/bootmem.h>
+#include <linux/module.h>
+#include <linux/kmemleak.h>
+#include <linux/range.h>
+#include <linux/memblock.h>
+
+#include <asm/bug.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+
+#include "internal.h"
+
+#ifndef CONFIG_NEED_MULTIPLE_NODES
+struct pglist_data __refdata contig_page_data;
+EXPORT_SYMBOL(contig_page_data);
+#endif
+
+unsigned long max_low_pfn;
+unsigned long min_low_pfn;
+unsigned long max_pfn;
+
+#ifdef CONFIG_CRASH_DUMP
+/*
+ * If we have booted due to a crash, max_pfn will be a very low value. We need
+ * to know the amount of memory that the previous kernel used.
+ */
+unsigned long saved_max_pfn;
+#endif
+
+static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
+ u64 goal, u64 limit)
+{
+ void *ptr;
+ u64 addr;
+
+ if (limit > memblock.current_limit)
+ limit = memblock.current_limit;
+
+ addr = find_memory_core_early(nid, size, align, goal, limit);
+
+ if (addr == MEMBLOCK_ERROR)
+ return NULL;
+
+ ptr = phys_to_virt(addr);
+ memset(ptr, 0, size);
+ memblock_x86_reserve_range(addr, addr + size, "BOOTMEM");
+ /*
+ * The min_count is set to 0 so that bootmem allocated blocks
+ * are never reported as leaks.
+ */
+ kmemleak_alloc(ptr, size, 0, 0);
+ return ptr;
+}
+
+/*
+ * free_bootmem_late - free bootmem pages directly to page allocator
+ * @addr: starting address of the range
+ * @size: size of the range in bytes
+ *
+ * This is only useful when the bootmem allocator has already been torn
+ * down, but we are still initializing the system. Pages are given directly
+ * to the page allocator, no bootmem metadata is updated because it is gone.
+ */
+void __init free_bootmem_late(unsigned long addr, unsigned long size)
+{
+ unsigned long cursor, end;
+
+ kmemleak_free_part(__va(addr), size);
+
+ cursor = PFN_UP(addr);
+ end = PFN_DOWN(addr + size);
+
+ for (; cursor < end; cursor++) {
+ __free_pages_bootmem(pfn_to_page(cursor), 0);
+ totalram_pages++;
+ }
+}
+
+static void __init __free_pages_memory(unsigned long start, unsigned long end)
+{
+ int i;
+ unsigned long start_aligned, end_aligned;
+ int order = ilog2(BITS_PER_LONG);
+
+ start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
+ end_aligned = end & ~(BITS_PER_LONG - 1);
+
+ if (end_aligned <= start_aligned) {
+ for (i = start; i < end; i++)
+ __free_pages_bootmem(pfn_to_page(i), 0);
+
+ return;
+ }
+
+ for (i = start; i < start_aligned; i++)
+ __free_pages_bootmem(pfn_to_page(i), 0);
+
+ for (i = start_aligned; i < end_aligned; i += BITS_PER_LONG)
+ __free_pages_bootmem(pfn_to_page(i), order);
+
+ for (i = end_aligned; i < end; i++)
+ __free_pages_bootmem(pfn_to_page(i), 0);
+}
+
+unsigned long __init free_all_memory_core_early(int nodeid)
+{
+ int i;
+ u64 start, end;
+ unsigned long count = 0;
+ struct range *range = NULL;
+ int nr_range;
+
+ nr_range = get_free_all_memory_range(&range, nodeid);
+
+ for (i = 0; i < nr_range; i++) {
+ start = range[i].start;
+ end = range[i].end;
+ count += end - start;
+ __free_pages_memory(start, end);
+ }
+
+ return count;
+}
+
+/**
+ * free_all_bootmem_node - release a node's free pages to the buddy allocator
+ * @pgdat: node to be released
+ *
+ * Returns the number of pages actually released.
+ */
+unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
+{
+ register_page_bootmem_info_node(pgdat);
+
+ /* free_all_memory_core_early(MAX_NUMNODES) will be called later */
+ return 0;
+}
+
+/**
+ * free_all_bootmem - release free pages to the buddy allocator
+ *
+ * Returns the number of pages actually released.
+ */
+unsigned long __init free_all_bootmem(void)
+{
+ /*
+ * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id
+ * because in some case like Node0 doesnt have RAM installed
+ * low ram will be on Node1
+ * Use MAX_NUMNODES will make sure all ranges in early_node_map[]
+ * will be used instead of only Node0 related
+ */
+ return free_all_memory_core_early(MAX_NUMNODES);
+}
+
+/**
+ * free_bootmem_node - mark a page range as usable
+ * @pgdat: node the range resides on
+ * @physaddr: starting address of the range
+ * @size: size of the range in bytes
+ *
+ * Partial pages will be considered reserved and left as they are.
+ *
+ * The range must reside completely on the specified node.
+ */
+void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
+ unsigned long size)
+{
+ kmemleak_free_part(__va(physaddr), size);
+ memblock_x86_free_range(physaddr, physaddr + size);
+}
+
+/**
+ * free_bootmem - mark a page range as usable
+ * @addr: starting address of the range
+ * @size: size of the range in bytes
+ *
+ * Partial pages will be considered reserved and left as they are.
+ *
+ * The range must be contiguous but may span node boundaries.
+ */
+void __init free_bootmem(unsigned long addr, unsigned long size)
+{
+ kmemleak_free_part(__va(addr), size);
+ memblock_x86_free_range(addr, addr + size);
+}
+
+static void * __init ___alloc_bootmem_nopanic(unsigned long size,
+ unsigned long align,
+ unsigned long goal,
+ unsigned long limit)
+{
+ void *ptr;
+
+ if (WARN_ON_ONCE(slab_is_available()))
+ return kzalloc(size, GFP_NOWAIT);
+
+restart:
+
+ ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit);
+
+ if (ptr)
+ return ptr;
+
+ if (goal != 0) {
+ goal = 0;
+ goto restart;
+ }
+
+ return NULL;
+}
+
+/**
+ * __alloc_bootmem_nopanic - allocate boot memory without panicking
+ * @size: size of the request in bytes
+ * @align: alignment of the region
+ * @goal: preferred starting address of the region
+ *
+ * The goal is dropped if it can not be satisfied and the allocation will
+ * fall back to memory below @goal.
+ *
+ * Allocation may happen on any node in the system.
+ *
+ * Returns NULL on failure.
+ */
+void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
+ unsigned long goal)
+{
+ unsigned long limit = -1UL;
+
+ return ___alloc_bootmem_nopanic(size, align, goal, limit);
+}
+
+static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
+ unsigned long goal, unsigned long limit)
+{
+ void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
+
+ if (mem)
+ return mem;
+ /*
+ * Whoops, we cannot satisfy the allocation request.
+ */
+ printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
+ panic("Out of memory");
+ return NULL;
+}
+
+/**
+ * __alloc_bootmem - allocate boot memory
+ * @size: size of the request in bytes
+ * @align: alignment of the region
+ * @goal: preferred starting address of the region
+ *
+ * The goal is dropped if it can not be satisfied and the allocation will
+ * fall back to memory below @goal.
+ *
+ * Allocation may happen on any node in the system.
+ *
+ * The function panics if the request can not be satisfied.
+ */
+void * __init __alloc_bootmem(unsigned long size, unsigned long align,
+ unsigned long goal)
+{
+ unsigned long limit = -1UL;
+
+ return ___alloc_bootmem(size, align, goal, limit);
+}
+
+/**
+ * __alloc_bootmem_node - allocate boot memory from a specific node
+ * @pgdat: node to allocate from
+ * @size: size of the request in bytes
+ * @align: alignment of the region
+ * @goal: preferred starting address of the region
+ *
+ * The goal is dropped if it can not be satisfied and the allocation will
+ * fall back to memory below @goal.
+ *
+ * Allocation may fall back to any node in the system if the specified node
+ * can not hold the requested memory.
+ *
+ * The function panics if the request can not be satisfied.
+ */
+void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
+ unsigned long align, unsigned long goal)
+{
+ void *ptr;
+
+ if (WARN_ON_ONCE(slab_is_available()))
+ return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
+
+ ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
+ goal, -1ULL);
+ if (ptr)
+ return ptr;
+
+ return __alloc_memory_core_early(MAX_NUMNODES, size, align,
+ goal, -1ULL);
+}
+
+void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
+ unsigned long align, unsigned long goal)
+{
+#ifdef MAX_DMA32_PFN
+ unsigned long end_pfn;
+
+ if (WARN_ON_ONCE(slab_is_available()))
+ return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
+
+ /* update goal according ...MAX_DMA32_PFN */
+ end_pfn = pgdat->node_start_pfn + pgdat->node_spanned_pages;
+
+ if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) &&
+ (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) {
+ void *ptr;
+ unsigned long new_goal;
+
+ new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
+ ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
+ new_goal, -1ULL);
+ if (ptr)
+ return ptr;
+ }
+#endif
+
+ return __alloc_bootmem_node(pgdat, size, align, goal);
+
+}
+
+#ifdef CONFIG_SPARSEMEM
+/**
+ * alloc_bootmem_section - allocate boot memory from a specific section
+ * @size: size of the request in bytes
+ * @section_nr: sparse map section to allocate from
+ *
+ * Return NULL on failure.
+ */
+void * __init alloc_bootmem_section(unsigned long size,
+ unsigned long section_nr)
+{
+ unsigned long pfn, goal, limit;
+
+ pfn = section_nr_to_pfn(section_nr);
+ goal = pfn << PAGE_SHIFT;
+ limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
+
+ return __alloc_memory_core_early(early_pfn_to_nid(pfn), size,
+ SMP_CACHE_BYTES, goal, limit);
+}
+#endif
+
+void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
+ unsigned long align, unsigned long goal)
+{
+ void *ptr;
+
+ if (WARN_ON_ONCE(slab_is_available()))
+ return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
+
+ ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
+ goal, -1ULL);
+ if (ptr)
+ return ptr;
+
+ return __alloc_bootmem_nopanic(size, align, goal);
+}
+
+#ifndef ARCH_LOW_ADDRESS_LIMIT
+#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
+#endif
+
+/**
+ * __alloc_bootmem_low - allocate low boot memory
+ * @size: size of the request in bytes
+ * @align: alignment of the region
+ * @goal: preferred starting address of the region
+ *
+ * The goal is dropped if it can not be satisfied and the allocation will
+ * fall back to memory below @goal.
+ *
+ * Allocation may happen on any node in the system.
+ *
+ * The function panics if the request can not be satisfied.
+ */
+void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
+ unsigned long goal)
+{
+ return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
+}
+
+/**
+ * __alloc_bootmem_low_node - allocate low boot memory from a specific node
+ * @pgdat: node to allocate from
+ * @size: size of the request in bytes
+ * @align: alignment of the region
+ * @goal: preferred starting address of the region
+ *
+ * The goal is dropped if it can not be satisfied and the allocation will
+ * fall back to memory below @goal.
+ *
+ * Allocation may fall back to any node in the system if the specified node
+ * can not hold the requested memory.
+ *
+ * The function panics if the request can not be satisfied.
+ */
+void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
+ unsigned long align, unsigned long goal)
+{
+ void *ptr;
+
+ if (WARN_ON_ONCE(slab_is_available()))
+ return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
+
+ ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
+ goal, ARCH_LOW_ADDRESS_LIMIT);
+ if (ptr)
+ return ptr;
+
+ return __alloc_memory_core_early(MAX_NUMNODES, size, align,
+ goal, ARCH_LOW_ADDRESS_LIMIT);
+}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a873e61e312e..0b788f05c06e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3616,6 +3616,34 @@ static int __meminit next_active_region_index_in_nid(int index, int nid)
return -1;
}
+/*
+ * Basic iterator support. Return the last range of PFNs for a node
+ * Note: nid == MAX_NUMNODES returns last region regardless of node
+ */
+static int __meminit last_active_region_index_in_nid(int nid)
+{
+ int i;
+
+ for (i = nr_nodemap_entries - 1; i >= 0; i--)
+ if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
+ return i;
+
+ return -1;
+}
+
+/*
+ * Basic iterator support. Return the previous active range of PFNs for a node
+ * Note: nid == MAX_NUMNODES returns next region regardless of node
+ */
+static int __meminit previous_active_region_index_in_nid(int index, int nid)
+{
+ for (index = index - 1; index >= 0; index--)
+ if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
+ return index;
+
+ return -1;
+}
+
#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
/*
* Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
@@ -3667,6 +3695,10 @@ bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
for (i = first_active_region_index_in_nid(nid); i != -1; \
i = next_active_region_index_in_nid(i, nid))
+#define for_each_active_range_index_in_nid_reverse(i, nid) \
+ for (i = last_active_region_index_in_nid(nid); i != -1; \
+ i = previous_active_region_index_in_nid(i, nid))
+
/**
* free_bootmem_with_active_regions - Call free_bootmem_node for each active range
* @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
@@ -3705,7 +3737,7 @@ u64 __init find_memory_core_early(int nid, u64 size, u64 align,
int i;
/* Need to go over early_node_map to find out good range for node */
- for_each_active_range_index_in_nid(i, nid) {
+ for_each_active_range_index_in_nid_reverse(i, nid) {
u64 addr;
u64 ei_start, ei_last;
u64 final_start, final_end;
@@ -3748,34 +3780,6 @@ int __init add_from_early_node_map(struct range *range, int az,
return nr_range;
}
-#ifdef CONFIG_NO_BOOTMEM
-void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
- u64 goal, u64 limit)
-{
- void *ptr;
- u64 addr;
-
- if (limit > memblock.current_limit)
- limit = memblock.current_limit;
-
- addr = find_memory_core_early(nid, size, align, goal, limit);
-
- if (addr == MEMBLOCK_ERROR)
- return NULL;
-
- ptr = phys_to_virt(addr);
- memset(ptr, 0, size);
- memblock_x86_reserve_range(addr, addr + size, "BOOTMEM");
- /*
- * The min_count is set to 0 so that bootmem allocated blocks
- * are never reported as leaks.
- */
- kmemleak_alloc(ptr, size, 0, 0);
- return ptr;
-}
-#endif
-
-
void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
{
int i;
@@ -4809,15 +4813,6 @@ void __init set_dma_reserve(unsigned long new_dma_reserve)
dma_reserve = new_dma_reserve;
}
-#ifndef CONFIG_NEED_MULTIPLE_NODES
-struct pglist_data __refdata contig_page_data = {
-#ifndef CONFIG_NO_BOOTMEM
- .bdata = &bootmem_node_data[0]
-#endif
- };
-EXPORT_SYMBOL(contig_page_data);
-#endif
-
void __init free_area_init(unsigned long *zones_size)
{
free_area_init_node(0, zones_size,
@@ -5376,10 +5371,9 @@ __count_immobile_pages(struct zone *zone, struct page *page, int count)
for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
unsigned long check = pfn + iter;
- if (!pfn_valid_within(check)) {
- iter++;
+ if (!pfn_valid_within(check))
continue;
- }
+
page = pfn_to_page(check);
if (!page_count(page)) {
if (PageBuddy(page))
diff --git a/mm/shmem.c b/mm/shmem.c
index 5ee67c990602..af0860377b30 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -779,7 +779,7 @@ static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
* If truncating down to a partial page, then
* if that page is already allocated, hold it
* in memory until the truncation is over, so
- * truncate_partial_page cannnot miss it were
+ * truncate_partial_page cannot miss it were
* it assigned to swap.
*/
if (newsize & (PAGE_CACHE_SIZE-1)) {
@@ -1843,8 +1843,9 @@ shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
if (inode) {
- error = security_inode_init_security(inode, dir, NULL, NULL,
- NULL);
+ error = security_inode_init_security(inode, dir,
+ &dentry->d_name, NULL,
+ NULL, NULL);
if (error) {
if (error != -EOPNOTSUPP) {
iput(inode);
@@ -1983,8 +1984,8 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
if (!inode)
return -ENOSPC;
- error = security_inode_init_security(inode, dir, NULL, NULL,
- NULL);
+ error = security_inode_init_security(inode, dir, &dentry->d_name, NULL,
+ NULL, NULL);
if (error) {
if (error != -EOPNOTSUPP) {
iput(inode);
diff --git a/mm/slab.c b/mm/slab.c
index 37961d1f584f..2d5b92c09ae3 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2147,8 +2147,6 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
*
* @name must be valid until the cache is destroyed. This implies that
* the module calling this has to destroy the cache before getting unloaded.
- * Note that kmem_cache_name() is not guaranteed to return the same pointer,
- * therefore applications must manage it themselves.
*
* The flags are
*
@@ -2288,8 +2286,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
if (ralign < align) {
ralign = align;
}
- /* disable debug if not aligning with REDZONE_ALIGN */
- if (ralign & (__alignof__(unsigned long long) - 1))
+ /* disable debug if necessary */
+ if (ralign > __alignof__(unsigned long long))
flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
/*
* 4) Store it.
@@ -2315,8 +2313,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
*/
if (flags & SLAB_RED_ZONE) {
/* add space for red zone words */
- cachep->obj_offset += align;
- size += align + sizeof(unsigned long long);
+ cachep->obj_offset += sizeof(unsigned long long);
+ size += 2 * sizeof(unsigned long long);
}
if (flags & SLAB_STORE_USER) {
/* user store requires one word storage behind the end of
@@ -3840,12 +3838,6 @@ unsigned int kmem_cache_size(struct kmem_cache *cachep)
}
EXPORT_SYMBOL(kmem_cache_size);
-const char *kmem_cache_name(struct kmem_cache *cachep)
-{
- return cachep->name;
-}
-EXPORT_SYMBOL_GPL(kmem_cache_name);
-
/*
* This initializes kmem_list3 or resizes various caches for all nodes.
*/
diff --git a/mm/slob.c b/mm/slob.c
index 3588eaaef726..46e0aee33a23 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -666,12 +666,6 @@ unsigned int kmem_cache_size(struct kmem_cache *c)
}
EXPORT_SYMBOL(kmem_cache_size);
-const char *kmem_cache_name(struct kmem_cache *c)
-{
- return c->name;
-}
-EXPORT_SYMBOL(kmem_cache_name);
-
int kmem_cache_shrink(struct kmem_cache *d)
{
return 0;
diff --git a/mm/slub.c b/mm/slub.c
index e15aa7f193c9..ea6f0390996f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -281,6 +281,30 @@ static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
return (p - addr) / s->size;
}
+static inline size_t slab_ksize(const struct kmem_cache *s)
+{
+#ifdef CONFIG_SLUB_DEBUG
+ /*
+ * Debugging requires use of the padding between object
+ * and whatever may come after it.
+ */
+ if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
+ return s->objsize;
+
+#endif
+ /*
+ * If we have the need to store the freelist pointer
+ * back there or track user information then we can
+ * only use the space before that information.
+ */
+ if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
+ return s->inuse;
+ /*
+ * Else we can use all the padding etc for the allocation
+ */
+ return s->size;
+}
+
static inline struct kmem_cache_order_objects oo_make(int order,
unsigned long size)
{
@@ -800,7 +824,7 @@ static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
{
flags &= gfp_allowed_mask;
- kmemcheck_slab_alloc(s, flags, object, s->objsize);
+ kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags);
}
@@ -2399,12 +2423,6 @@ unsigned int kmem_cache_size(struct kmem_cache *s)
}
EXPORT_SYMBOL(kmem_cache_size);
-const char *kmem_cache_name(struct kmem_cache *s)
-{
- return s->name;
-}
-EXPORT_SYMBOL(kmem_cache_name);
-
static void list_slab_objects(struct kmem_cache *s, struct page *page,
const char *text)
{
@@ -2696,7 +2714,6 @@ EXPORT_SYMBOL(__kmalloc_node);
size_t ksize(const void *object)
{
struct page *page;
- struct kmem_cache *s;
if (unlikely(object == ZERO_SIZE_PTR))
return 0;
@@ -2707,28 +2724,8 @@ size_t ksize(const void *object)
WARN_ON(!PageCompound(page));
return PAGE_SIZE << compound_order(page);
}
- s = page->slab;
-
-#ifdef CONFIG_SLUB_DEBUG
- /*
- * Debugging requires use of the padding between object
- * and whatever may come after it.
- */
- if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
- return s->objsize;
-#endif
- /*
- * If we have the need to store the freelist pointer
- * back there or track user information then we can
- * only use the space before that information.
- */
- if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
- return s->inuse;
- /*
- * Else we can use all the padding etc for the allocation
- */
- return s->size;
+ return slab_ksize(page->slab);
}
EXPORT_SYMBOL(ksize);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 07a458d72fa8..0341c5700e34 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1940,7 +1940,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
error = -EINVAL;
if (S_ISBLK(inode->i_mode)) {
- bdev = I_BDEV(inode);
+ bdev = bdgrab(I_BDEV(inode));
error = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL,
sys_swapon);
if (error < 0) {
diff --git a/mm/truncate.c b/mm/truncate.c
index 49feb46e77b8..d64296be00d3 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -225,6 +225,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
next = start;
while (next <= end &&
pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
+ mem_cgroup_uncharge_start();
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
pgoff_t page_index = page->index;
@@ -247,6 +248,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
unlock_page(page);
}
pagevec_release(&pvec);
+ mem_cgroup_uncharge_end();
cond_resched();
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 17497d0cd8b9..6771ea70bfe7 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1841,16 +1841,28 @@ static inline bool should_continue_reclaim(struct zone *zone,
if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION))
return false;
- /*
- * If we failed to reclaim and have scanned the full list, stop.
- * NOTE: Checking just nr_reclaimed would exit reclaim/compaction far
- * faster but obviously would be less likely to succeed
- * allocation. If this is desirable, use GFP_REPEAT to decide
- * if both reclaimed and scanned should be checked or just
- * reclaimed
- */
- if (!nr_reclaimed && !nr_scanned)
- return false;
+ /* Consider stopping depending on scan and reclaim activity */
+ if (sc->gfp_mask & __GFP_REPEAT) {
+ /*
+ * For __GFP_REPEAT allocations, stop reclaiming if the
+ * full LRU list has been scanned and we are still failing
+ * to reclaim pages. This full LRU scan is potentially
+ * expensive but a __GFP_REPEAT caller really wants to succeed
+ */
+ if (!nr_reclaimed && !nr_scanned)
+ return false;
+ } else {
+ /*
+ * For non-__GFP_REPEAT allocations which can presumably
+ * fail without consequence, stop if we failed to reclaim
+ * any pages from the last SWAP_CLUSTER_MAX number of
+ * pages that were scanned. This will return to the
+ * caller faster at the risk reclaim/compaction and
+ * the resulting allocation attempt fails
+ */
+ if (!nr_reclaimed)
+ return false;
+ }
/*
* If we have not reclaimed enough pages for compaction and the
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 6e64f7c6a2e9..7850412f52b7 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -327,7 +327,7 @@ static void vlan_sync_address(struct net_device *dev,
static void vlan_transfer_features(struct net_device *dev,
struct net_device *vlandev)
{
- unsigned long old_features = vlandev->features;
+ u32 old_features = vlandev->features;
vlandev->features &= ~dev->vlan_features;
vlandev->features |= dev->features & dev->vlan_features;
diff --git a/net/9p/Makefile b/net/9p/Makefile
index 198a640d53a6..a0874cc1f718 100644
--- a/net/9p/Makefile
+++ b/net/9p/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_NET_9P_RDMA) += 9pnet_rdma.o
util.o \
protocol.o \
trans_fd.o \
+ trans_common.o \
9pnet_virtio-objs := \
trans_virtio.o \
diff --git a/net/9p/client.c b/net/9p/client.c
index a848bca9fbff..347ec0cd2718 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -229,10 +229,23 @@ static struct p9_req_t *p9_tag_alloc(struct p9_client *c, u16 tag)
return ERR_PTR(-ENOMEM);
}
init_waitqueue_head(req->wq);
- req->tc = kmalloc(sizeof(struct p9_fcall)+c->msize,
- GFP_KERNEL);
- req->rc = kmalloc(sizeof(struct p9_fcall)+c->msize,
- GFP_KERNEL);
+ if ((c->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) ==
+ P9_TRANS_PREF_PAYLOAD_SEP) {
+ int alloc_msize = min(c->msize, 4096);
+ req->tc = kmalloc(sizeof(struct p9_fcall)+alloc_msize,
+ GFP_KERNEL);
+ req->tc->capacity = alloc_msize;
+ req->rc = kmalloc(sizeof(struct p9_fcall)+alloc_msize,
+ GFP_KERNEL);
+ req->rc->capacity = alloc_msize;
+ } else {
+ req->tc = kmalloc(sizeof(struct p9_fcall)+c->msize,
+ GFP_KERNEL);
+ req->tc->capacity = c->msize;
+ req->rc = kmalloc(sizeof(struct p9_fcall)+c->msize,
+ GFP_KERNEL);
+ req->rc->capacity = c->msize;
+ }
if ((!req->tc) || (!req->rc)) {
printk(KERN_ERR "Couldn't grow tag array\n");
kfree(req->tc);
@@ -243,9 +256,7 @@ static struct p9_req_t *p9_tag_alloc(struct p9_client *c, u16 tag)
return ERR_PTR(-ENOMEM);
}
req->tc->sdata = (char *) req->tc + sizeof(struct p9_fcall);
- req->tc->capacity = c->msize;
req->rc->sdata = (char *) req->rc + sizeof(struct p9_fcall);
- req->rc->capacity = c->msize;
}
p9pdu_reset(req->tc);
@@ -443,6 +454,7 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
{
int8_t type;
int err;
+ int ecode;
err = p9_parse_header(req->rc, NULL, &type, NULL, 0);
if (err) {
@@ -450,36 +462,53 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
return err;
}
- if (type == P9_RERROR || type == P9_RLERROR) {
- int ecode;
-
- if (!p9_is_proto_dotl(c)) {
- char *ename;
+ if (type != P9_RERROR && type != P9_RLERROR)
+ return 0;
- err = p9pdu_readf(req->rc, c->proto_version, "s?d",
- &ename, &ecode);
- if (err)
- goto out_err;
+ if (!p9_is_proto_dotl(c)) {
+ char *ename;
+
+ if (req->tc->pbuf_size) {
+ /* Handle user buffers */
+ size_t len = req->rc->size - req->rc->offset;
+ if (req->tc->pubuf) {
+ /* User Buffer */
+ err = copy_from_user(
+ &req->rc->sdata[req->rc->offset],
+ req->tc->pubuf, len);
+ if (err) {
+ err = -EFAULT;
+ goto out_err;
+ }
+ } else {
+ /* Kernel Buffer */
+ memmove(&req->rc->sdata[req->rc->offset],
+ req->tc->pkbuf, len);
+ }
+ }
+ err = p9pdu_readf(req->rc, c->proto_version, "s?d",
+ &ename, &ecode);
+ if (err)
+ goto out_err;
- if (p9_is_proto_dotu(c))
- err = -ecode;
+ if (p9_is_proto_dotu(c))
+ err = -ecode;
- if (!err || !IS_ERR_VALUE(err)) {
- err = p9_errstr2errno(ename, strlen(ename));
+ if (!err || !IS_ERR_VALUE(err)) {
+ err = p9_errstr2errno(ename, strlen(ename));
- P9_DPRINTK(P9_DEBUG_9P, "<<< RERROR (%d) %s\n", -ecode, ename);
+ P9_DPRINTK(P9_DEBUG_9P, "<<< RERROR (%d) %s\n", -ecode,
+ ename);
- kfree(ename);
- }
- } else {
- err = p9pdu_readf(req->rc, c->proto_version, "d", &ecode);
- err = -ecode;
-
- P9_DPRINTK(P9_DEBUG_9P, "<<< RLERROR (%d)\n", -ecode);
+ kfree(ename);
}
+ } else {
+ err = p9pdu_readf(req->rc, c->proto_version, "d", &ecode);
+ err = -ecode;
+
+ P9_DPRINTK(P9_DEBUG_9P, "<<< RLERROR (%d)\n", -ecode);
+ }
- } else
- err = 0;
return err;
@@ -1191,6 +1220,27 @@ error:
}
EXPORT_SYMBOL(p9_client_fsync);
+int p9_client_sync_fs(struct p9_fid *fid)
+{
+ int err = 0;
+ struct p9_req_t *req;
+ struct p9_client *clnt;
+
+ P9_DPRINTK(P9_DEBUG_9P, ">>> TSYNC_FS fid %d\n", fid->fid);
+
+ clnt = fid->clnt;
+ req = p9_client_rpc(clnt, P9_TSYNCFS, "d", fid->fid);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ goto error;
+ }
+ P9_DPRINTK(P9_DEBUG_9P, "<<< RSYNCFS fid %d\n", fid->fid);
+ p9_free_req(clnt, req);
+error:
+ return err;
+}
+EXPORT_SYMBOL(p9_client_sync_fs);
+
int p9_client_clunk(struct p9_fid *fid)
{
int err;
@@ -1270,7 +1320,15 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
if (count < rsize)
rsize = count;
- req = p9_client_rpc(clnt, P9_TREAD, "dqd", fid->fid, offset, rsize);
+ /* Don't bother zerocopy form small IO (< 1024) */
+ if (((clnt->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) ==
+ P9_TRANS_PREF_PAYLOAD_SEP) && (rsize > 1024)) {
+ req = p9_client_rpc(clnt, P9_TREAD, "dqE", fid->fid, offset,
+ rsize, data, udata);
+ } else {
+ req = p9_client_rpc(clnt, P9_TREAD, "dqd", fid->fid, offset,
+ rsize);
+ }
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto error;
@@ -1284,13 +1342,15 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
P9_DPRINTK(P9_DEBUG_9P, "<<< RREAD count %d\n", count);
- if (data) {
- memmove(data, dataptr, count);
- } else {
- err = copy_to_user(udata, dataptr, count);
- if (err) {
- err = -EFAULT;
- goto free_and_error;
+ if (!req->tc->pbuf_size) {
+ if (data) {
+ memmove(data, dataptr, count);
+ } else {
+ err = copy_to_user(udata, dataptr, count);
+ if (err) {
+ err = -EFAULT;
+ goto free_and_error;
+ }
}
}
p9_free_req(clnt, req);
@@ -1323,12 +1383,21 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
if (count < rsize)
rsize = count;
- if (data)
- req = p9_client_rpc(clnt, P9_TWRITE, "dqD", fid->fid, offset,
- rsize, data);
- else
- req = p9_client_rpc(clnt, P9_TWRITE, "dqU", fid->fid, offset,
- rsize, udata);
+
+ /* Don't bother zerocopy form small IO (< 1024) */
+ if (((clnt->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) ==
+ P9_TRANS_PREF_PAYLOAD_SEP) && (rsize > 1024)) {
+ req = p9_client_rpc(clnt, P9_TWRITE, "dqE", fid->fid, offset,
+ rsize, data, udata);
+ } else {
+
+ if (data)
+ req = p9_client_rpc(clnt, P9_TWRITE, "dqD", fid->fid,
+ offset, rsize, data);
+ else
+ req = p9_client_rpc(clnt, P9_TWRITE, "dqU", fid->fid,
+ offset, rsize, udata);
+ }
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto error;
@@ -1716,7 +1785,14 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
if (count < rsize)
rsize = count;
- req = p9_client_rpc(clnt, P9_TREADDIR, "dqd", fid->fid, offset, rsize);
+ if ((clnt->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) ==
+ P9_TRANS_PREF_PAYLOAD_SEP) {
+ req = p9_client_rpc(clnt, P9_TREADDIR, "dqF", fid->fid,
+ offset, rsize, data);
+ } else {
+ req = p9_client_rpc(clnt, P9_TREADDIR, "dqd", fid->fid,
+ offset, rsize);
+ }
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto error;
@@ -1730,7 +1806,7 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
P9_DPRINTK(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count);
- if (data)
+ if (!req->tc->pbuf_size && data)
memmove(data, dataptr, count);
p9_free_req(clnt, req);
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 1e308f210928..2ce515b859b3 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -114,6 +114,26 @@ pdu_write_u(struct p9_fcall *pdu, const char __user *udata, size_t size)
return size - len;
}
+static size_t
+pdu_write_urw(struct p9_fcall *pdu, const char *kdata, const char __user *udata,
+ size_t size)
+{
+ BUG_ON(pdu->size > P9_IOHDRSZ);
+ pdu->pubuf = (char __user *)udata;
+ pdu->pkbuf = (char *)kdata;
+ pdu->pbuf_size = size;
+ return 0;
+}
+
+static size_t
+pdu_write_readdir(struct p9_fcall *pdu, const char *kdata, size_t size)
+{
+ BUG_ON(pdu->size > P9_READDIRHDRSZ);
+ pdu->pkbuf = (char *)kdata;
+ pdu->pbuf_size = size;
+ return 0;
+}
+
/*
b - int8_t
w - int16_t
@@ -445,6 +465,25 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
errcode = -EFAULT;
}
break;
+ case 'E':{
+ int32_t cnt = va_arg(ap, int32_t);
+ const char *k = va_arg(ap, const void *);
+ const char *u = va_arg(ap, const void *);
+ errcode = p9pdu_writef(pdu, proto_version, "d",
+ cnt);
+ if (!errcode && pdu_write_urw(pdu, k, u, cnt))
+ errcode = -EFAULT;
+ }
+ break;
+ case 'F':{
+ int32_t cnt = va_arg(ap, int32_t);
+ const char *k = va_arg(ap, const void *);
+ errcode = p9pdu_writef(pdu, proto_version, "d",
+ cnt);
+ if (!errcode && pdu_write_readdir(pdu, k, cnt))
+ errcode = -EFAULT;
+ }
+ break;
case 'U':{
int32_t count = va_arg(ap, int32_t);
const char __user *udata =
@@ -579,6 +618,7 @@ EXPORT_SYMBOL(p9stat_read);
int p9pdu_prepare(struct p9_fcall *pdu, int16_t tag, int8_t type)
{
+ pdu->id = type;
return p9pdu_writef(pdu, 0, "dbw", 0, type, tag);
}
@@ -606,6 +646,10 @@ void p9pdu_reset(struct p9_fcall *pdu)
{
pdu->offset = 0;
pdu->size = 0;
+ pdu->private = NULL;
+ pdu->pubuf = NULL;
+ pdu->pkbuf = NULL;
+ pdu->pbuf_size = 0;
}
int p9dirent_read(char *buf, int len, struct p9_dirent *dirent,
diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c
new file mode 100644
index 000000000000..d62b9aa58df8
--- /dev/null
+++ b/net/9p/trans_common.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright IBM Corporation, 2010
+ * Author Venkateswararao Jujjuri <jvrao@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2.1 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <net/9p/9p.h>
+#include <net/9p/client.h>
+#include <linux/scatterlist.h>
+#include "trans_common.h"
+
+/**
+ * p9_release_req_pages - Release pages after the transaction.
+ * @*private: PDU's private page of struct trans_rpage_info
+ */
+void
+p9_release_req_pages(struct trans_rpage_info *rpinfo)
+{
+ int i = 0;
+
+ while (rpinfo->rp_data[i] && rpinfo->rp_nr_pages--) {
+ put_page(rpinfo->rp_data[i]);
+ i++;
+ }
+}
+EXPORT_SYMBOL(p9_release_req_pages);
+
+/**
+ * p9_nr_pages - Return number of pages needed to accomodate the payload.
+ */
+int
+p9_nr_pages(struct p9_req_t *req)
+{
+ int start_page, end_page;
+ start_page = (unsigned long long)req->tc->pubuf >> PAGE_SHIFT;
+ end_page = ((unsigned long long)req->tc->pubuf + req->tc->pbuf_size +
+ PAGE_SIZE - 1) >> PAGE_SHIFT;
+ return end_page - start_page;
+}
+EXPORT_SYMBOL(p9_nr_pages);
+
+/**
+ * payload_gup - Translates user buffer into kernel pages and
+ * pins them either for read/write through get_user_pages_fast().
+ * @req: Request to be sent to server.
+ * @pdata_off: data offset into the first page after translation (gup).
+ * @pdata_len: Total length of the IO. gup may not return requested # of pages.
+ * @nr_pages: number of pages to accomodate the payload
+ * @rw: Indicates if the pages are for read or write.
+ */
+int
+p9_payload_gup(struct p9_req_t *req, size_t *pdata_off, int *pdata_len,
+ int nr_pages, u8 rw)
+{
+ uint32_t first_page_bytes = 0;
+ uint32_t pdata_mapped_pages;
+ struct trans_rpage_info *rpinfo;
+
+ *pdata_off = (size_t)req->tc->pubuf & (PAGE_SIZE-1);
+
+ if (*pdata_off)
+ first_page_bytes = min((PAGE_SIZE - *pdata_off),
+ req->tc->pbuf_size);
+
+ rpinfo = req->tc->private;
+ pdata_mapped_pages = get_user_pages_fast((unsigned long)req->tc->pubuf,
+ nr_pages, rw, &rpinfo->rp_data[0]);
+
+ if (pdata_mapped_pages < 0) {
+ printk(KERN_ERR "get_user_pages_fast failed:%d udata:%p"
+ "nr_pages:%d\n", pdata_mapped_pages,
+ req->tc->pubuf, nr_pages);
+ pdata_mapped_pages = 0;
+ return -EIO;
+ }
+ rpinfo->rp_nr_pages = pdata_mapped_pages;
+ if (*pdata_off) {
+ *pdata_len = first_page_bytes;
+ *pdata_len += min((req->tc->pbuf_size - *pdata_len),
+ ((size_t)pdata_mapped_pages - 1) << PAGE_SHIFT);
+ } else {
+ *pdata_len = min(req->tc->pbuf_size,
+ (size_t)pdata_mapped_pages << PAGE_SHIFT);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(p9_payload_gup);
diff --git a/net/9p/trans_common.h b/net/9p/trans_common.h
new file mode 100644
index 000000000000..76309223bb02
--- /dev/null
+++ b/net/9p/trans_common.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright IBM Corporation, 2010
+ * Author Venkateswararao Jujjuri <jvrao@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2.1 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ */
+
+/* TRUE if it is user context */
+#define P9_IS_USER_CONTEXT (!segment_eq(get_fs(), KERNEL_DS))
+
+/**
+ * struct trans_rpage_info - To store mapped page information in PDU.
+ * @rp_alloc:Set if this structure is allocd, not a reuse unused space in pdu.
+ * @rp_nr_pages: Number of mapped pages
+ * @rp_data: Array of page pointers
+ */
+struct trans_rpage_info {
+ u8 rp_alloc;
+ int rp_nr_pages;
+ struct page *rp_data[0];
+};
+
+void p9_release_req_pages(struct trans_rpage_info *);
+int p9_payload_gup(struct p9_req_t *, size_t *, int *, int, u8);
+int p9_nr_pages(struct p9_req_t *);
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 078eb162d9bf..a30471e51740 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -153,10 +153,11 @@ struct p9_conn {
unsigned long wsched;
};
+static void p9_poll_workfn(struct work_struct *work);
+
static DEFINE_SPINLOCK(p9_poll_lock);
static LIST_HEAD(p9_poll_pending_list);
-static struct workqueue_struct *p9_mux_wq;
-static struct task_struct *p9_poll_task;
+static DECLARE_WORK(p9_poll_work, p9_poll_workfn);
static void p9_mux_poll_stop(struct p9_conn *m)
{
@@ -384,7 +385,7 @@ static void p9_read_work(struct work_struct *work)
if (n & POLLIN) {
P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m);
- queue_work(p9_mux_wq, &m->rq);
+ schedule_work(&m->rq);
} else
clear_bit(Rworksched, &m->wsched);
} else
@@ -497,7 +498,7 @@ static void p9_write_work(struct work_struct *work)
if (n & POLLOUT) {
P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m);
- queue_work(p9_mux_wq, &m->wq);
+ schedule_work(&m->wq);
} else
clear_bit(Wworksched, &m->wsched);
} else
@@ -516,15 +517,14 @@ static int p9_pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
container_of(wait, struct p9_poll_wait, wait);
struct p9_conn *m = pwait->conn;
unsigned long flags;
- DECLARE_WAITQUEUE(dummy_wait, p9_poll_task);
spin_lock_irqsave(&p9_poll_lock, flags);
if (list_empty(&m->poll_pending_link))
list_add_tail(&m->poll_pending_link, &p9_poll_pending_list);
spin_unlock_irqrestore(&p9_poll_lock, flags);
- /* perform the default wake up operation */
- return default_wake_function(&dummy_wait, mode, sync, key);
+ schedule_work(&p9_poll_work);
+ return 1;
}
/**
@@ -629,7 +629,7 @@ static void p9_poll_mux(struct p9_conn *m)
P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can read\n", m);
if (!test_and_set_bit(Rworksched, &m->wsched)) {
P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m);
- queue_work(p9_mux_wq, &m->rq);
+ schedule_work(&m->rq);
}
}
@@ -639,7 +639,7 @@ static void p9_poll_mux(struct p9_conn *m)
if ((m->wsize || !list_empty(&m->unsent_req_list)) &&
!test_and_set_bit(Wworksched, &m->wsched)) {
P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m);
- queue_work(p9_mux_wq, &m->wq);
+ schedule_work(&m->wq);
}
}
}
@@ -677,7 +677,7 @@ static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
n = p9_fd_poll(m->client, NULL);
if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
- queue_work(p9_mux_wq, &m->wq);
+ schedule_work(&m->wq);
return 0;
}
@@ -1047,12 +1047,12 @@ static struct p9_trans_module p9_fd_trans = {
*
*/
-static int p9_poll_proc(void *a)
+static void p9_poll_workfn(struct work_struct *work)
{
unsigned long flags;
P9_DPRINTK(P9_DEBUG_TRANS, "start %p\n", current);
- repeat:
+
spin_lock_irqsave(&p9_poll_lock, flags);
while (!list_empty(&p9_poll_pending_list)) {
struct p9_conn *conn = list_first_entry(&p9_poll_pending_list,
@@ -1067,35 +1067,11 @@ static int p9_poll_proc(void *a)
}
spin_unlock_irqrestore(&p9_poll_lock, flags);
- set_current_state(TASK_INTERRUPTIBLE);
- if (list_empty(&p9_poll_pending_list)) {
- P9_DPRINTK(P9_DEBUG_TRANS, "sleeping...\n");
- schedule();
- }
- __set_current_state(TASK_RUNNING);
-
- if (!kthread_should_stop())
- goto repeat;
-
P9_DPRINTK(P9_DEBUG_TRANS, "finish\n");
- return 0;
}
int p9_trans_fd_init(void)
{
- p9_mux_wq = create_workqueue("v9fs");
- if (!p9_mux_wq) {
- printk(KERN_WARNING "v9fs: mux: creating workqueue failed\n");
- return -ENOMEM;
- }
-
- p9_poll_task = kthread_run(p9_poll_proc, NULL, "v9fs-poll");
- if (IS_ERR(p9_poll_task)) {
- destroy_workqueue(p9_mux_wq);
- printk(KERN_WARNING "v9fs: mux: creating poll task failed\n");
- return PTR_ERR(p9_poll_task);
- }
-
v9fs_register_trans(&p9_tcp_trans);
v9fs_register_trans(&p9_unix_trans);
v9fs_register_trans(&p9_fd_trans);
@@ -1105,10 +1081,8 @@ int p9_trans_fd_init(void)
void p9_trans_fd_exit(void)
{
- kthread_stop(p9_poll_task);
+ flush_work_sync(&p9_poll_work);
v9fs_unregister_trans(&p9_tcp_trans);
v9fs_unregister_trans(&p9_unix_trans);
v9fs_unregister_trans(&p9_fd_trans);
-
- destroy_workqueue(p9_mux_wq);
}
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 17c5ba7551a5..29a54ccd213d 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -59,7 +59,6 @@
* safely advertise a maxsize
* of 64k */
-#define P9_RDMA_MAX_SGE (P9_RDMA_MAXSIZE >> PAGE_SHIFT)
/**
* struct p9_trans_rdma - RDMA transport instance
*
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index c8f3f72ab20e..9b550ed9c711 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -45,6 +45,7 @@
#include <linux/scatterlist.h>
#include <linux/virtio.h>
#include <linux/virtio_9p.h>
+#include "trans_common.h"
#define VIRTQUEUE_NUM 128
@@ -155,6 +156,14 @@ static void req_done(struct virtqueue *vq)
rc->tag);
req = p9_tag_lookup(chan->client, rc->tag);
req->status = REQ_STATUS_RCVD;
+ if (req->tc->private) {
+ struct trans_rpage_info *rp = req->tc->private;
+ /*Release pages */
+ p9_release_req_pages(rp);
+ if (rp->rp_alloc)
+ kfree(rp);
+ req->tc->private = NULL;
+ }
p9_client_cb(chan->client, req);
} else {
spin_unlock_irqrestore(&chan->lock, flags);
@@ -203,6 +212,38 @@ static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
}
/**
+ * pack_sg_list_p - Just like pack_sg_list. Instead of taking a buffer,
+ * this takes a list of pages.
+ * @sg: scatter/gather list to pack into
+ * @start: which segment of the sg_list to start at
+ * @pdata_off: Offset into the first page
+ * @**pdata: a list of pages to add into sg.
+ * @count: amount of data to pack into the scatter/gather list
+ */
+static int
+pack_sg_list_p(struct scatterlist *sg, int start, int limit, size_t pdata_off,
+ struct page **pdata, int count)
+{
+ int s;
+ int i = 0;
+ int index = start;
+
+ if (pdata_off) {
+ s = min((int)(PAGE_SIZE - pdata_off), count);
+ sg_set_page(&sg[index++], pdata[i++], s, pdata_off);
+ count -= s;
+ }
+
+ while (count) {
+ BUG_ON(index > limit);
+ s = min((int)PAGE_SIZE, count);
+ sg_set_page(&sg[index++], pdata[i++], s, 0);
+ count -= s;
+ }
+ return index-start;
+}
+
+/**
* p9_virtio_request - issue a request
* @client: client instance issuing the request
* @req: request to be issued
@@ -212,22 +253,97 @@ static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
static int
p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
{
- int in, out;
+ int in, out, inp, outp;
struct virtio_chan *chan = client->trans;
char *rdata = (char *)req->rc+sizeof(struct p9_fcall);
unsigned long flags;
- int err;
+ size_t pdata_off = 0;
+ struct trans_rpage_info *rpinfo = NULL;
+ int err, pdata_len = 0;
P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request\n");
req_retry:
req->status = REQ_STATUS_SENT;
+ if (req->tc->pbuf_size && (req->tc->pubuf && P9_IS_USER_CONTEXT)) {
+ int nr_pages = p9_nr_pages(req);
+ int rpinfo_size = sizeof(struct trans_rpage_info) +
+ sizeof(struct page *) * nr_pages;
+
+ if (rpinfo_size <= (req->tc->capacity - req->tc->size)) {
+ /* We can use sdata */
+ req->tc->private = req->tc->sdata + req->tc->size;
+ rpinfo = (struct trans_rpage_info *)req->tc->private;
+ rpinfo->rp_alloc = 0;
+ } else {
+ req->tc->private = kmalloc(rpinfo_size, GFP_NOFS);
+ if (!req->tc->private) {
+ P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: "
+ "private kmalloc returned NULL");
+ return -ENOMEM;
+ }
+ rpinfo = (struct trans_rpage_info *)req->tc->private;
+ rpinfo->rp_alloc = 1;
+ }
+
+ err = p9_payload_gup(req, &pdata_off, &pdata_len, nr_pages,
+ req->tc->id == P9_TREAD ? 1 : 0);
+ if (err < 0) {
+ if (rpinfo->rp_alloc)
+ kfree(rpinfo);
+ return err;
+ }
+ }
+
spin_lock_irqsave(&chan->lock, flags);
+
+ /* Handle out VirtIO ring buffers */
out = pack_sg_list(chan->sg, 0, VIRTQUEUE_NUM, req->tc->sdata,
- req->tc->size);
- in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM-out, rdata,
- client->msize);
+ req->tc->size);
+
+ if (req->tc->pbuf_size && (req->tc->id == P9_TWRITE)) {
+ /* We have additional write payload buffer to take care */
+ if (req->tc->pubuf && P9_IS_USER_CONTEXT) {
+ outp = pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM,
+ pdata_off, rpinfo->rp_data, pdata_len);
+ } else {
+ char *pbuf = req->tc->pubuf ? req->tc->pubuf :
+ req->tc->pkbuf;
+ outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf,
+ req->tc->pbuf_size);
+ }
+ out += outp;
+ }
+
+ /* Handle in VirtIO ring buffers */
+ if (req->tc->pbuf_size &&
+ ((req->tc->id == P9_TREAD) || (req->tc->id == P9_TREADDIR))) {
+ /*
+ * Take care of additional Read payload.
+ * 11 is the read/write header = PDU Header(7) + IO Size (4).
+ * Arrange in such a way that server places header in the
+ * alloced memory and payload onto the user buffer.
+ */
+ inp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata, 11);
+ /*
+ * Running executables in the filesystem may result in
+ * a read request with kernel buffer as opposed to user buffer.
+ */
+ if (req->tc->pubuf && P9_IS_USER_CONTEXT) {
+ in = pack_sg_list_p(chan->sg, out+inp, VIRTQUEUE_NUM,
+ pdata_off, rpinfo->rp_data, pdata_len);
+ } else {
+ char *pbuf = req->tc->pubuf ? req->tc->pubuf :
+ req->tc->pkbuf;
+ in = pack_sg_list(chan->sg, out+inp, VIRTQUEUE_NUM,
+ pbuf, req->tc->pbuf_size);
+ }
+ in += inp;
+ } else {
+ in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata,
+ client->msize);
+ }
err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc);
if (err < 0) {
@@ -246,6 +362,8 @@ req_retry:
P9_DPRINTK(P9_DEBUG_TRANS,
"9p debug: "
"virtio rpc add_buf returned failure");
+ if (rpinfo && rpinfo->rp_alloc)
+ kfree(rpinfo);
return -EIO;
}
}
@@ -448,6 +566,7 @@ static struct p9_trans_module p9_virtio_trans = {
.request = p9_virtio_request,
.cancel = p9_virtio_cancel,
.maxsize = PAGE_SIZE*16,
+ .pref = P9_TRANS_PREF_PAYLOAD_SEP,
.def = 0,
.owner = THIS_MODULE,
};
diff --git a/net/Kconfig b/net/Kconfig
index 72840626284b..79cabf1ee68b 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -221,6 +221,12 @@ config RPS
depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
default y
+config RFS_ACCEL
+ boolean
+ depends on RPS && GENERIC_HARDIRQS
+ select CPU_RMAP
+ default y
+
config XPS
boolean
depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index d936aeccd194..2de93d00631b 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -1,5 +1,5 @@
#
-# Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+# Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
#
# Marek Lindner, Simon Wunderlich
#
diff --git a/net/batman-adv/aggregation.c b/net/batman-adv/aggregation.c
index 3850a3ecf947..1997725a243b 100644
--- a/net/batman-adv/aggregation.c
+++ b/net/batman-adv/aggregation.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/aggregation.h b/net/batman-adv/aggregation.h
index 71a91b3da913..6ce305b40017 100644
--- a/net/batman-adv/aggregation.h
+++ b/net/batman-adv/aggregation.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/bat_debugfs.c b/net/batman-adv/bat_debugfs.c
index 0ae81d07f102..0e9d43509935 100644
--- a/net/batman-adv/bat_debugfs.c
+++ b/net/batman-adv/bat_debugfs.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -52,7 +52,6 @@ static void emit_log_char(struct debug_log *debug_log, char c)
static int fdebug_log(struct debug_log *debug_log, char *fmt, ...)
{
- int printed_len;
va_list args;
static char debug_log_buf[256];
char *p;
@@ -62,8 +61,7 @@ static int fdebug_log(struct debug_log *debug_log, char *fmt, ...)
spin_lock_bh(&debug_log->lock);
va_start(args, fmt);
- printed_len = vscnprintf(debug_log_buf, sizeof(debug_log_buf),
- fmt, args);
+ vscnprintf(debug_log_buf, sizeof(debug_log_buf), fmt, args);
va_end(args);
for (p = debug_log_buf; *p != 0; p++)
diff --git a/net/batman-adv/bat_debugfs.h b/net/batman-adv/bat_debugfs.h
index 72df532b7d5f..bc9cda3f01e1 100644
--- a/net/batman-adv/bat_debugfs.h
+++ b/net/batman-adv/bat_debugfs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c
index cd7bb51825f1..f7b93a0805fe 100644
--- a/net/batman-adv/bat_sysfs.c
+++ b/net/batman-adv/bat_sysfs.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/bat_sysfs.h b/net/batman-adv/bat_sysfs.h
index 7f186c007b4f..02f1fa7aadfa 100644
--- a/net/batman-adv/bat_sysfs.h
+++ b/net/batman-adv/bat_sysfs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index bbcd8f744cdd..ad2ca925b3e0 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2006-2011 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
index ac54017601b1..769c246d1fc1 100644
--- a/net/batman-adv/bitarray.h
+++ b/net/batman-adv/bitarray.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2006-2011 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 0065ffb8d96d..429a013d2e0a 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index 4585e6549844..2aa439124ee3 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index b962982f017e..50d3a59a3d73 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/gateway_common.h b/net/batman-adv/gateway_common.h
index 5e728d0b7959..55e527a489fe 100644
--- a/net/batman-adv/gateway_common.h
+++ b/net/batman-adv/gateway_common.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 4f95777ce080..f2131f45aa9b 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -34,6 +34,12 @@
/* protect update critical side of if_list - but not the content */
static DEFINE_SPINLOCK(if_list_lock);
+
+static int batman_skb_recv(struct sk_buff *skb,
+ struct net_device *dev,
+ struct packet_type *ptype,
+ struct net_device *orig_dev);
+
static void hardif_free_rcu(struct rcu_head *rcu)
{
struct batman_if *batman_if;
@@ -549,8 +555,9 @@ out:
/* receive a packet with the batman ethertype coming on a hard
* interface */
-int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *ptype, struct net_device *orig_dev)
+static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype,
+ struct net_device *orig_dev)
{
struct bat_priv *bat_priv;
struct batman_packet *batman_packet;
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index 30ec3b8db459..ad195438428a 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -35,10 +35,6 @@ struct batman_if *get_batman_if_by_netdev(struct net_device *net_dev);
int hardif_enable_interface(struct batman_if *batman_if, char *iface_name);
void hardif_disable_interface(struct batman_if *batman_if);
void hardif_remove_interfaces(void);
-int batman_skb_recv(struct sk_buff *skb,
- struct net_device *dev,
- struct packet_type *ptype,
- struct net_device *orig_dev);
int hardif_min_mtu(struct net_device *soft_iface);
void update_min_mtu(struct net_device *soft_iface);
diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c
index 26e623eb9def..fa2693973ab8 100644
--- a/net/batman-adv/hash.c
+++ b/net/batman-adv/hash.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2006-2011 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index 09216ade16f1..eae24402fd0a 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2006-2011 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
@@ -49,11 +49,6 @@ struct hashtable_t {
/* allocates and clears the hash */
struct hashtable_t *hash_new(int size);
-/* remove element if you already found the element you want to delete and don't
- * need the overhead to find it again with hash_remove(). But usually, you
- * don't want to use this function, as it fiddles with hash-internals. */
-void *hash_remove_element(struct hashtable_t *hash, struct element_t *elem);
-
/* free only the hashtable and the hash itself. */
void hash_destroy(struct hashtable_t *hash);
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index ecf6d7ffab2e..319a7ccf6efa 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -24,7 +24,6 @@
#include <linux/slab.h>
#include "icmp_socket.h"
#include "send.h"
-#include "types.h"
#include "hash.h"
#include "originator.h"
#include "hard-interface.h"
diff --git a/net/batman-adv/icmp_socket.h b/net/batman-adv/icmp_socket.h
index bf9b348cde27..462b190fa101 100644
--- a/net/batman-adv/icmp_socket.h
+++ b/net/batman-adv/icmp_socket.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -22,8 +22,6 @@
#ifndef _NET_BATMAN_ADV_ICMP_SOCKET_H_
#define _NET_BATMAN_ADV_ICMP_SOCKET_H_
-#include "types.h"
-
#define ICMP_SOCKET "socket"
void bat_socket_init(void);
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index b827f6a158cb..06d956c91c27 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -30,7 +30,6 @@
#include "translation-table.h"
#include "hard-interface.h"
#include "gateway_client.h"
-#include "types.h"
#include "vis.h"
#include "hash.h"
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 65106fb61b8f..e235d7bbe045 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -22,9 +22,6 @@
#ifndef _NET_BATMAN_ADV_MAIN_H_
#define _NET_BATMAN_ADV_MAIN_H_
-/* Kernel Programming */
-#define LINUX
-
#define DRIVER_AUTHOR "Marek Lindner <lindner_marek@yahoo.de>, " \
"Simon Wunderlich <siwu@hrz.tu-chemnitz.de>"
#define DRIVER_DESC "B.A.T.M.A.N. advanced"
@@ -54,7 +51,6 @@
#define NUM_WORDS (TQ_LOCAL_WINDOW_SIZE / WORD_BIT_SIZE)
-#define PACKBUFF_SIZE 2000
#define LOG_BUF_LEN 8192 /* has to be a power of 2 */
#define VIS_INTERVAL 5000 /* 5 seconds */
@@ -96,15 +92,11 @@
#define DBG_ROUTES 2 /* route or hna added / changed / deleted */
#define DBG_ALL 3
-#define LOG_BUF_LEN 8192 /* has to be a power of 2 */
-
/*
* Vis
*/
-/* #define VIS_SUBCLUSTERS_DISABLED */
-
/*
* Kernel headers
*/
@@ -158,13 +150,6 @@ static inline void bat_dbg(char type __always_unused,
}
#endif
-#define bat_warning(net_dev, fmt, arg...) \
- do { \
- struct net_device *_netdev = (net_dev); \
- struct bat_priv *_batpriv = netdev_priv(_netdev); \
- bat_dbg(DBG_ALL, _batpriv, fmt, ## arg); \
- pr_warning("%s: " fmt, _netdev->name, ## arg); \
- } while (0)
#define bat_info(net_dev, fmt, arg...) \
do { \
struct net_device *_netdev = (net_dev); \
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 6b7fb6b7e6f9..54863c9385de 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -247,7 +247,7 @@ static bool purge_orig_node(struct bat_priv *bat_priv,
orig_node->hna_buff_len);
/* update bonding candidates, we could have lost
* some candidates. */
- update_bonding_candidates(bat_priv, orig_node);
+ update_bonding_candidates(orig_node);
}
}
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index d474ceb2a4eb..8019fbddffd0 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 2284e8129cb2..e7571879af3f 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -50,6 +50,7 @@
/* fragmentation defines */
#define UNI_FRAG_HEAD 0x01
+#define UNI_FRAG_LARGETAIL 0x02
struct batman_packet {
uint8_t packet_type;
diff --git a/net/batman-adv/ring_buffer.c b/net/batman-adv/ring_buffer.c
index defd37c9be1f..5bb6a619afee 100644
--- a/net/batman-adv/ring_buffer.c
+++ b/net/batman-adv/ring_buffer.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/ring_buffer.h b/net/batman-adv/ring_buffer.h
index 6b0cb9aaeba5..0395b2741864 100644
--- a/net/batman-adv/ring_buffer.h
+++ b/net/batman-adv/ring_buffer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 8828eddd3f72..827414067e46 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -28,7 +28,6 @@
#include "icmp_socket.h"
#include "translation-table.h"
#include "originator.h"
-#include "types.h"
#include "ring_buffer.h"
#include "vis.h"
#include "aggregation.h"
@@ -433,8 +432,7 @@ static char count_real_packets(struct ethhdr *ethhdr,
}
/* copy primary address for bonding */
-static void mark_bonding_address(struct bat_priv *bat_priv,
- struct orig_node *orig_node,
+static void mark_bonding_address(struct orig_node *orig_node,
struct orig_node *orig_neigh_node,
struct batman_packet *batman_packet)
@@ -447,8 +445,7 @@ static void mark_bonding_address(struct bat_priv *bat_priv,
}
/* mark possible bond.candidates in the neighbor list */
-void update_bonding_candidates(struct bat_priv *bat_priv,
- struct orig_node *orig_node)
+void update_bonding_candidates(struct orig_node *orig_node)
{
int candidates;
int interference_candidate;
@@ -730,9 +727,8 @@ void receive_bat_packet(struct ethhdr *ethhdr,
update_orig(bat_priv, orig_node, ethhdr, batman_packet,
if_incoming, hna_buff, hna_buff_len, is_duplicate);
- mark_bonding_address(bat_priv, orig_node,
- orig_neigh_node, batman_packet);
- update_bonding_candidates(bat_priv, orig_node);
+ mark_bonding_address(orig_node, orig_neigh_node, batman_packet);
+ update_bonding_candidates(orig_node);
/* is single hop (direct) neighbor */
if (is_single_hop_neigh) {
@@ -810,13 +806,11 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv,
{
struct orig_node *orig_node;
struct icmp_packet_rr *icmp_packet;
- struct ethhdr *ethhdr;
struct batman_if *batman_if;
int ret;
uint8_t dstaddr[ETH_ALEN];
icmp_packet = (struct icmp_packet_rr *)skb->data;
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* add data to device queue */
if (icmp_packet->msg_type != ECHO_REQUEST) {
@@ -848,7 +842,6 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv,
return NET_RX_DROP;
icmp_packet = (struct icmp_packet_rr *)skb->data;
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
memcpy(icmp_packet->orig,
@@ -866,17 +859,15 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv,
}
static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
- struct sk_buff *skb, size_t icmp_len)
+ struct sk_buff *skb)
{
struct orig_node *orig_node;
struct icmp_packet *icmp_packet;
- struct ethhdr *ethhdr;
struct batman_if *batman_if;
int ret;
uint8_t dstaddr[ETH_ALEN];
icmp_packet = (struct icmp_packet *)skb->data;
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* send TTL exceeded if packet is an echo request (traceroute) */
if (icmp_packet->msg_type != ECHO_REQUEST) {
@@ -909,7 +900,6 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
return NET_RX_DROP;
icmp_packet = (struct icmp_packet *) skb->data;
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
memcpy(icmp_packet->orig,
@@ -978,7 +968,7 @@ int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
/* TTL exceeded */
if (icmp_packet->ttl < 2)
- return recv_icmp_ttl_exceeded(bat_priv, skb, hdr_size);
+ return recv_icmp_ttl_exceeded(bat_priv, skb);
ret = NET_RX_DROP;
@@ -1001,7 +991,6 @@ int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
return NET_RX_DROP;
icmp_packet = (struct icmp_packet_rr *)skb->data;
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* decrement ttl */
icmp_packet->ttl--;
@@ -1193,7 +1182,7 @@ int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if,
dstaddr);
if (unicast_packet->packet_type == BAT_UNICAST_FRAG &&
- 2 * skb->len - hdr_size <= batman_if->net_dev->mtu) {
+ frag_can_reassemble(skb, batman_if->net_dev->mtu)) {
ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index f108f230bfdb..a09d16f0c3ab 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -22,8 +22,6 @@
#ifndef _NET_BATMAN_ADV_ROUTING_H_
#define _NET_BATMAN_ADV_ROUTING_H_
-#include "types.h"
-
void slide_own_bcast_window(struct batman_if *batman_if);
void receive_bat_packet(struct ethhdr *ethhdr,
struct batman_packet *batman_packet,
@@ -42,7 +40,6 @@ int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if);
int recv_bat_packet(struct sk_buff *skb, struct batman_if *recv_if);
struct neigh_node *find_router(struct bat_priv *bat_priv,
struct orig_node *orig_node, struct batman_if *recv_if);
-void update_bonding_candidates(struct bat_priv *bat_priv,
- struct orig_node *orig_node);
+void update_bonding_candidates(struct orig_node *orig_node);
#endif /* _NET_BATMAN_ADV_ROUTING_H_ */
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index b89b9f7709ae..831427694fc2 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -25,7 +25,6 @@
#include "translation-table.h"
#include "soft-interface.h"
#include "hard-interface.h"
-#include "types.h"
#include "vis.h"
#include "aggregation.h"
#include "gateway_common.h"
@@ -49,7 +48,7 @@ static unsigned long own_send_time(struct bat_priv *bat_priv)
}
/* when do we schedule a forwarded packet to be sent */
-static unsigned long forward_send_time(struct bat_priv *bat_priv)
+static unsigned long forward_send_time(void)
{
return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
}
@@ -356,7 +355,7 @@ void schedule_forward_packet(struct orig_node *orig_node,
else
batman_packet->flags &= ~DIRECTLINK;
- send_time = forward_send_time(bat_priv);
+ send_time = forward_send_time();
add_bat_packet_to_list(bat_priv,
(unsigned char *)batman_packet,
sizeof(struct batman_packet) + hna_buff_len,
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index c4cefa8e4f85..b68c272cb84f 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -22,8 +22,6 @@
#ifndef _NET_BATMAN_ADV_SEND_H_
#define _NET_BATMAN_ADV_SEND_H_
-#include "types.h"
-
int send_skb_packet(struct sk_buff *skb,
struct batman_if *batman_if,
uint8_t *dst_addr);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index e89ede192ed0..bd088f877e38 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -26,7 +26,6 @@
#include "send.h"
#include "bat_debugfs.h"
#include "translation-table.h"
-#include "types.h"
#include "hash.h"
#include "gateway_common.h"
#include "gateway_client.h"
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 02b77334d10d..e7b0e1a34a55 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index a633b5a435e2..7fb6726ccbdd 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -22,7 +22,6 @@
#include "main.h"
#include "translation-table.h"
#include "soft-interface.h"
-#include "types.h"
#include "hash.h"
#include "originator.h"
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index 10c4c5c319b6..f19931ca1457 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -22,8 +22,6 @@
#ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
#define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
-#include "types.h"
-
int hna_local_init(struct bat_priv *bat_priv);
void hna_local_add(struct net_device *soft_iface, uint8_t *addr);
void hna_local_remove(struct bat_priv *bat_priv,
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index bf3f6f5a12c4..7270405046e9 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index d1a611322549..121b11d2a23d 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
*
* Andreas Langer
*
@@ -39,8 +39,8 @@ static struct sk_buff *frag_merge_packet(struct list_head *head,
(struct unicast_frag_packet *)skb->data;
struct sk_buff *tmp_skb;
struct unicast_packet *unicast_packet;
- int hdr_len = sizeof(struct unicast_packet),
- uni_diff = sizeof(struct unicast_frag_packet) - hdr_len;
+ int hdr_len = sizeof(struct unicast_packet);
+ int uni_diff = sizeof(struct unicast_frag_packet) - hdr_len;
/* set skb to the first part and tmp_skb to the second part */
if (up->flags & UNI_FRAG_HEAD) {
@@ -229,7 +229,9 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
struct unicast_frag_packet *frag1, *frag2;
int uc_hdr_len = sizeof(struct unicast_packet);
int ucf_hdr_len = sizeof(struct unicast_frag_packet);
- int data_len = skb->len;
+ int data_len = skb->len - uc_hdr_len;
+ int large_tail = 0;
+ uint16_t seqno;
if (!bat_priv->primary_if)
goto dropped;
@@ -237,10 +239,11 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len);
if (!frag_skb)
goto dropped;
+ skb_reserve(frag_skb, ucf_hdr_len);
unicast_packet = (struct unicast_packet *) skb->data;
memcpy(&tmp_uc, unicast_packet, uc_hdr_len);
- skb_split(skb, frag_skb, data_len / 2);
+ skb_split(skb, frag_skb, data_len / 2 + uc_hdr_len);
if (my_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 ||
my_skb_head_push(frag_skb, ucf_hdr_len) < 0)
@@ -258,13 +261,15 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
memcpy(frag1->orig, bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
memcpy(frag2, frag1, sizeof(struct unicast_frag_packet));
- frag1->flags |= UNI_FRAG_HEAD;
- frag2->flags &= ~UNI_FRAG_HEAD;
+ if (data_len & 1)
+ large_tail = UNI_FRAG_LARGETAIL;
+
+ frag1->flags = UNI_FRAG_HEAD | large_tail;
+ frag2->flags = large_tail;
- frag1->seqno = htons((uint16_t)atomic_inc_return(
- &batman_if->frag_seqno));
- frag2->seqno = htons((uint16_t)atomic_inc_return(
- &batman_if->frag_seqno));
+ seqno = atomic_add_return(2, &batman_if->frag_seqno);
+ frag1->seqno = htons(seqno - 1);
+ frag2->seqno = htons(seqno);
send_skb_packet(skb, batman_if, dstaddr);
send_skb_packet(frag_skb, batman_if, dstaddr);
@@ -281,7 +286,7 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
{
struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
struct unicast_packet *unicast_packet;
- struct orig_node *orig_node;
+ struct orig_node *orig_node = NULL;
struct batman_if *batman_if;
struct neigh_node *router;
int data_len = skb->len;
@@ -292,11 +297,6 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
/* get routing information */
if (is_multicast_ether_addr(ethhdr->h_dest))
orig_node = (struct orig_node *)gw_get_selected(bat_priv);
- else
- orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
- compare_orig,
- choose_orig,
- ethhdr->h_dest));
/* check for hna host */
if (!orig_node)
diff --git a/net/batman-adv/unicast.h b/net/batman-adv/unicast.h
index e32b7867a9a4..8897308281d4 100644
--- a/net/batman-adv/unicast.h
+++ b/net/batman-adv/unicast.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
*
* Andreas Langer
*
@@ -22,6 +22,8 @@
#ifndef _NET_BATMAN_ADV_UNICAST_H_
#define _NET_BATMAN_ADV_UNICAST_H_
+#include "packet.h"
+
#define FRAG_TIMEOUT 10000 /* purge frag list entrys after time in ms */
#define FRAG_BUFFER_SIZE 6 /* number of list elements in buffer */
@@ -32,4 +34,25 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv);
int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
struct batman_if *batman_if, uint8_t dstaddr[]);
+static inline int frag_can_reassemble(struct sk_buff *skb, int mtu)
+{
+ struct unicast_frag_packet *unicast_packet;
+ int uneven_correction = 0;
+ unsigned int merged_size;
+
+ unicast_packet = (struct unicast_frag_packet *)skb->data;
+
+ if (unicast_packet->flags & UNI_FRAG_LARGETAIL) {
+ if (unicast_packet->flags & UNI_FRAG_HEAD)
+ uneven_correction = 1;
+ else
+ uneven_correction = -1;
+ }
+
+ merged_size = (skb->len - sizeof(struct unicast_frag_packet)) * 2;
+ merged_size += sizeof(struct unicast_packet) + uneven_correction;
+
+ return merged_size <= mtu;
+}
+
#endif /* _NET_BATMAN_ADV_UNICAST_H_ */
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index de1022cacaf7..7db9ad82cc00 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2008-2011 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich
*
diff --git a/net/batman-adv/vis.h b/net/batman-adv/vis.h
index 2c3b33089a9b..31b820d07f23 100644
--- a/net/batman-adv/vis.h
+++ b/net/batman-adv/vis.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2008-2011 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index ed371684c133..6ae5ec508587 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -27,31 +27,27 @@ menuconfig BT
compile it as module (bluetooth).
To use Linux Bluetooth subsystem, you will need several user-space
- utilities like hciconfig and hcid. These utilities and updates to
- Bluetooth kernel modules are provided in the BlueZ packages.
- For more information, see <http://www.bluez.org/>.
+ utilities like hciconfig and bluetoothd. These utilities and updates
+ to Bluetooth kernel modules are provided in the BlueZ packages. For
+ more information, see <http://www.bluez.org/>.
+
+if BT != n
config BT_L2CAP
- tristate "L2CAP protocol support"
- depends on BT
+ bool "L2CAP protocol support"
select CRC16
help
L2CAP (Logical Link Control and Adaptation Protocol) provides
connection oriented and connection-less data transport. L2CAP
support is required for most Bluetooth applications.
- Say Y here to compile L2CAP support into the kernel or say M to
- compile it as module (l2cap).
-
config BT_SCO
- tristate "SCO links support"
- depends on BT
+ bool "SCO links support"
help
SCO link provides voice transport over Bluetooth. SCO support is
required for voice applications like Headset and Audio.
- Say Y here to compile SCO support into the kernel or say M to
- compile it as module (sco).
+endif
source "net/bluetooth/rfcomm/Kconfig"
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 250f954f0213..f04fe9a9d634 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -3,11 +3,11 @@
#
obj-$(CONFIG_BT) += bluetooth.o
-obj-$(CONFIG_BT_L2CAP) += l2cap.o
-obj-$(CONFIG_BT_SCO) += sco.o
obj-$(CONFIG_BT_RFCOMM) += rfcomm/
obj-$(CONFIG_BT_BNEP) += bnep/
obj-$(CONFIG_BT_CMTP) += cmtp/
obj-$(CONFIG_BT_HIDP) += hidp/
bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o hci_sock.o hci_sysfs.o lib.o
+bluetooth-$(CONFIG_BT_L2CAP) += l2cap_core.o l2cap_sock.o
+bluetooth-$(CONFIG_BT_SCO) += sco.o
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index c4cf3f595004..8add9b499912 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -40,7 +40,7 @@
#include <net/bluetooth/bluetooth.h>
-#define VERSION "2.15"
+#define VERSION "2.16"
/* Bluetooth sockets */
#define BT_MAX_PROTO 8
@@ -199,14 +199,15 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
BT_DBG("parent %p", parent);
+ local_bh_disable();
list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
- lock_sock(sk);
+ bh_lock_sock(sk);
/* FIXME: Is this check still needed */
if (sk->sk_state == BT_CLOSED) {
- release_sock(sk);
+ bh_unlock_sock(sk);
bt_accept_unlink(sk);
continue;
}
@@ -216,12 +217,16 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
bt_accept_unlink(sk);
if (newsock)
sock_graft(sk, newsock);
- release_sock(sk);
+
+ bh_unlock_sock(sk);
+ local_bh_enable();
return sk;
}
- release_sock(sk);
+ bh_unlock_sock(sk);
}
+ local_bh_enable();
+
return NULL;
}
EXPORT_SYMBOL(bt_accept_dequeue);
@@ -240,7 +245,8 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
if (flags & (MSG_OOB))
return -EOPNOTSUPP;
- if (!(skb = skb_recv_datagram(sk, flags, noblock, &err))) {
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+ if (!skb) {
if (sk->sk_shutdown & RCV_SHUTDOWN)
return 0;
return err;
@@ -323,7 +329,8 @@ int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
if (copied >= target)
break;
- if ((err = sock_error(sk)) != 0)
+ err = sock_error(sk);
+ if (err)
break;
if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
@@ -390,7 +397,7 @@ static inline unsigned int bt_accept_poll(struct sock *parent)
return 0;
}
-unsigned int bt_sock_poll(struct file * file, struct socket *sock, poll_table *wait)
+unsigned int bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
unsigned int mask = 0;
@@ -538,13 +545,39 @@ static int __init bt_init(void)
BT_INFO("HCI device and connection manager initialized");
- hci_sock_init();
+ err = hci_sock_init();
+ if (err < 0)
+ goto error;
+
+ err = l2cap_init();
+ if (err < 0)
+ goto sock_err;
+
+ err = sco_init();
+ if (err < 0) {
+ l2cap_exit();
+ goto sock_err;
+ }
return 0;
+
+sock_err:
+ hci_sock_cleanup();
+
+error:
+ sock_unregister(PF_BLUETOOTH);
+ bt_sysfs_cleanup();
+
+ return err;
}
static void __exit bt_exit(void)
{
+
+ sco_exit();
+
+ l2cap_exit();
+
hci_sock_cleanup();
sock_unregister(PF_BLUETOOTH);
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 5868597534e5..03d4d1245d58 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -708,8 +708,6 @@ static int __init bnep_init(void)
{
char flt[50] = "";
- l2cap_load();
-
#ifdef CONFIG_BT_BNEP_PROTO_FILTER
strcat(flt, "protocol ");
#endif
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 2862f53b66b1..d935da71ab3b 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -88,6 +88,7 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
sockfd_put(nsock);
return -EBADFD;
}
+ ca.device[sizeof(ca.device)-1] = 0;
err = bnep_add_connection(&ca, nsock);
if (!err) {
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 3487cfe74aec..67cff810c77d 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -155,7 +155,8 @@ static void cmtp_send_interopmsg(struct cmtp_session *session,
BT_DBG("session %p subcmd 0x%02x appl %d msgnum %d", session, subcmd, appl, msgnum);
- if (!(skb = alloc_skb(CAPI_MSG_BASELEN + 6 + len, GFP_ATOMIC))) {
+ skb = alloc_skb(CAPI_MSG_BASELEN + 6 + len, GFP_ATOMIC);
+ if (!skb) {
BT_ERR("Can't allocate memory for interoperability packet");
return;
}
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 8e5f292529ac..964ea9126f9f 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -115,7 +115,8 @@ static inline void cmtp_add_msgpart(struct cmtp_session *session, int id, const
size = (skb) ? skb->len + count : count;
- if (!(nskb = alloc_skb(size, GFP_ATOMIC))) {
+ nskb = alloc_skb(size, GFP_ATOMIC);
+ if (!nskb) {
BT_ERR("Can't allocate memory for CAPI message");
return;
}
@@ -216,7 +217,8 @@ static void cmtp_process_transmit(struct cmtp_session *session)
BT_DBG("session %p", session);
- if (!(nskb = alloc_skb(session->mtu, GFP_ATOMIC))) {
+ nskb = alloc_skb(session->mtu, GFP_ATOMIC);
+ if (!nskb) {
BT_ERR("Can't allocate memory for new frame");
return;
}
@@ -224,7 +226,8 @@ static void cmtp_process_transmit(struct cmtp_session *session)
while ((skb = skb_dequeue(&session->transmit))) {
struct cmtp_scb *scb = (void *) skb->cb;
- if ((tail = (session->mtu - nskb->len)) < 5) {
+ tail = session->mtu - nskb->len;
+ if (tail < 5) {
cmtp_send_frame(session, nskb->data, nskb->len);
skb_trim(nskb, 0);
tail = session->mtu;
@@ -466,8 +469,6 @@ int cmtp_get_conninfo(struct cmtp_conninfo *ci)
static int __init cmtp_init(void)
{
- l2cap_load();
-
BT_INFO("CMTP (CAPI Emulation) ver %s", VERSION);
cmtp_init_sockets();
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 99cd8d9d891b..7a6f56b2f49d 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -45,6 +45,33 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+static void hci_le_connect(struct hci_conn *conn)
+{
+ struct hci_dev *hdev = conn->hdev;
+ struct hci_cp_le_create_conn cp;
+
+ conn->state = BT_CONNECT;
+ conn->out = 1;
+ conn->link_mode |= HCI_LM_MASTER;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.scan_interval = cpu_to_le16(0x0004);
+ cp.scan_window = cpu_to_le16(0x0004);
+ bacpy(&cp.peer_addr, &conn->dst);
+ cp.conn_interval_min = cpu_to_le16(0x0008);
+ cp.conn_interval_max = cpu_to_le16(0x0100);
+ cp.supervision_timeout = cpu_to_le16(0x0064);
+ cp.min_ce_len = cpu_to_le16(0x0001);
+ cp.max_ce_len = cpu_to_le16(0x0001);
+
+ hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
+}
+
+static void hci_le_connect_cancel(struct hci_conn *conn)
+{
+ hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
+}
+
void hci_acl_connect(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
@@ -156,6 +183,26 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
}
+void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
+ u16 latency, u16 to_multiplier)
+{
+ struct hci_cp_le_conn_update cp;
+ struct hci_dev *hdev = conn->hdev;
+
+ memset(&cp, 0, sizeof(cp));
+
+ cp.handle = cpu_to_le16(conn->handle);
+ cp.conn_interval_min = cpu_to_le16(min);
+ cp.conn_interval_max = cpu_to_le16(max);
+ cp.conn_latency = cpu_to_le16(latency);
+ cp.supervision_timeout = cpu_to_le16(to_multiplier);
+ cp.min_ce_len = cpu_to_le16(0x0001);
+ cp.max_ce_len = cpu_to_le16(0x0001);
+
+ hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
+}
+EXPORT_SYMBOL(hci_le_conn_update);
+
/* Device _must_ be locked */
void hci_sco_setup(struct hci_conn *conn, __u8 status)
{
@@ -193,8 +240,12 @@ static void hci_conn_timeout(unsigned long arg)
switch (conn->state) {
case BT_CONNECT:
case BT_CONNECT2:
- if (conn->type == ACL_LINK && conn->out)
- hci_acl_connect_cancel(conn);
+ if (conn->out) {
+ if (conn->type == ACL_LINK)
+ hci_acl_connect_cancel(conn);
+ else if (conn->type == LE_LINK)
+ hci_le_connect_cancel(conn);
+ }
break;
case BT_CONFIG:
case BT_CONNECTED:
@@ -234,6 +285,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
conn->mode = HCI_CM_ACTIVE;
conn->state = BT_OPEN;
conn->auth_type = HCI_AT_GENERAL_BONDING;
+ conn->io_capability = hdev->io_capability;
+ conn->remote_auth = 0xff;
conn->power_save = 1;
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
@@ -295,6 +348,11 @@ int hci_conn_del(struct hci_conn *conn)
/* Unacked frames */
hdev->acl_cnt += conn->sent;
+ } else if (conn->type == LE_LINK) {
+ if (hdev->le_pkts)
+ hdev->le_cnt += conn->sent;
+ else
+ hdev->acl_cnt += conn->sent;
} else {
struct hci_conn *acl = conn->link;
if (acl) {
@@ -360,15 +418,31 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
}
EXPORT_SYMBOL(hci_get_route);
-/* Create SCO or ACL connection.
+/* Create SCO, ACL or LE connection.
* Device _must_ be locked */
struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
{
struct hci_conn *acl;
struct hci_conn *sco;
+ struct hci_conn *le;
BT_DBG("%s dst %s", hdev->name, batostr(dst));
+ if (type == LE_LINK) {
+ le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
+ if (le)
+ return ERR_PTR(-EBUSY);
+ le = hci_conn_add(hdev, LE_LINK, dst);
+ if (!le)
+ return ERR_PTR(-ENOMEM);
+ if (le->state == BT_OPEN)
+ hci_le_connect(le);
+
+ hci_conn_hold(le);
+
+ return le;
+ }
+
acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
if (!acl) {
acl = hci_conn_add(hdev, ACL_LINK, dst);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 9c4541bc488a..b372fb8bcdcf 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -41,6 +41,7 @@
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/rfkill.h>
+#include <linux/timer.h>
#include <net/sock.h>
#include <asm/system.h>
@@ -50,6 +51,8 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+#define AUTO_OFF_TIMEOUT 2000
+
static void hci_cmd_task(unsigned long arg);
static void hci_rx_task(unsigned long arg);
static void hci_tx_task(unsigned long arg);
@@ -95,11 +98,10 @@ void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
{
BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
- /* If the request has set req_last_cmd (typical for multi-HCI
- * command requests) check if the completed command matches
- * this, and if not just return. Single HCI command requests
- * typically leave req_last_cmd as 0 */
- if (hdev->req_last_cmd && cmd != hdev->req_last_cmd)
+ /* If this is the init phase check if the completed command matches
+ * the last init command, and if not just return.
+ */
+ if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
return;
if (hdev->req_status == HCI_REQ_PEND) {
@@ -122,7 +124,7 @@ static void hci_req_cancel(struct hci_dev *hdev, int err)
/* Execute request and wait for completion. */
static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
- unsigned long opt, __u32 timeout)
+ unsigned long opt, __u32 timeout)
{
DECLARE_WAITQUEUE(wait, current);
int err = 0;
@@ -156,7 +158,7 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev,
break;
}
- hdev->req_last_cmd = hdev->req_status = hdev->req_result = 0;
+ hdev->req_status = hdev->req_result = 0;
BT_DBG("%s end: err %d", hdev->name, err);
@@ -164,7 +166,7 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev,
}
static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
- unsigned long opt, __u32 timeout)
+ unsigned long opt, __u32 timeout)
{
int ret;
@@ -189,6 +191,7 @@ static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
{
+ struct hci_cp_delete_stored_link_key cp;
struct sk_buff *skb;
__le16 param;
__u8 flt_type;
@@ -252,15 +255,21 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
flt_type = HCI_FLT_CLEAR_ALL;
hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
- /* Page timeout ~20 secs */
- param = cpu_to_le16(0x8000);
- hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
-
/* Connection accept timeout ~20 secs */
param = cpu_to_le16(0x7d00);
hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
- hdev->req_last_cmd = HCI_OP_WRITE_CA_TIMEOUT;
+ bacpy(&cp.bdaddr, BDADDR_ANY);
+ cp.delete_all = 1;
+ hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
+}
+
+static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
+{
+ BT_DBG("%s", hdev->name);
+
+ /* Read LE buffer size */
+ hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
}
static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
@@ -429,7 +438,8 @@ int hci_inquiry(void __user *arg)
if (copy_from_user(&ir, ptr, sizeof(ir)))
return -EFAULT;
- if (!(hdev = hci_dev_get(ir.dev_id)))
+ hdev = hci_dev_get(ir.dev_id);
+ if (!hdev)
return -ENODEV;
hci_dev_lock_bh(hdev);
@@ -455,7 +465,7 @@ int hci_inquiry(void __user *arg)
/* cache_dump can't sleep. Therefore we allocate temp buffer and then
* copy it to the user space.
*/
- buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL);
+ buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
if (!buf) {
err = -ENOMEM;
goto done;
@@ -489,7 +499,8 @@ int hci_dev_open(__u16 dev)
struct hci_dev *hdev;
int ret = 0;
- if (!(hdev = hci_dev_get(dev)))
+ hdev = hci_dev_get(dev);
+ if (!hdev)
return -ENODEV;
BT_DBG("%s %p", hdev->name, hdev);
@@ -521,11 +532,15 @@ int hci_dev_open(__u16 dev)
if (!test_bit(HCI_RAW, &hdev->flags)) {
atomic_set(&hdev->cmd_cnt, 1);
set_bit(HCI_INIT, &hdev->flags);
+ hdev->init_last_cmd = 0;
- //__hci_request(hdev, hci_reset_req, 0, HZ);
ret = __hci_request(hdev, hci_init_req, 0,
msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ if (lmp_le_capable(hdev))
+ ret = __hci_request(hdev, hci_le_init_req, 0,
+ msecs_to_jiffies(HCI_INIT_TIMEOUT));
+
clear_bit(HCI_INIT, &hdev->flags);
}
@@ -533,6 +548,8 @@ int hci_dev_open(__u16 dev)
hci_dev_hold(hdev);
set_bit(HCI_UP, &hdev->flags);
hci_notify(hdev, HCI_DEV_UP);
+ if (!test_bit(HCI_SETUP, &hdev->flags))
+ mgmt_powered(hdev->id, 1);
} else {
/* Init failed, cleanup */
tasklet_kill(&hdev->rx_task);
@@ -606,6 +623,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
/* Drop last sent command */
if (hdev->sent_cmd) {
+ del_timer_sync(&hdev->cmd_timer);
kfree_skb(hdev->sent_cmd);
hdev->sent_cmd = NULL;
}
@@ -614,6 +632,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
* and no tasks are scheduled. */
hdev->close(hdev);
+ mgmt_powered(hdev->id, 0);
+
/* Clear flags */
hdev->flags = 0;
@@ -664,7 +684,7 @@ int hci_dev_reset(__u16 dev)
hdev->flush(hdev);
atomic_set(&hdev->cmd_cnt, 1);
- hdev->acl_cnt = 0; hdev->sco_cnt = 0;
+ hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
if (!test_bit(HCI_RAW, &hdev->flags))
ret = __hci_request(hdev, hci_reset_req, 0,
@@ -793,9 +813,17 @@ int hci_get_dev_list(void __user *arg)
read_lock_bh(&hci_dev_list_lock);
list_for_each(p, &hci_dev_list) {
struct hci_dev *hdev;
+
hdev = list_entry(p, struct hci_dev, list);
+
+ hci_del_off_timer(hdev);
+
+ if (!test_bit(HCI_MGMT, &hdev->flags))
+ set_bit(HCI_PAIRABLE, &hdev->flags);
+
(dr + n)->dev_id = hdev->id;
(dr + n)->dev_opt = hdev->flags;
+
if (++n >= dev_num)
break;
}
@@ -823,6 +851,11 @@ int hci_get_dev_info(void __user *arg)
if (!hdev)
return -ENODEV;
+ hci_del_off_timer(hdev);
+
+ if (!test_bit(HCI_MGMT, &hdev->flags))
+ set_bit(HCI_PAIRABLE, &hdev->flags);
+
strcpy(di.name, hdev->name);
di.bdaddr = hdev->bdaddr;
di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
@@ -891,6 +924,159 @@ void hci_free_dev(struct hci_dev *hdev)
}
EXPORT_SYMBOL(hci_free_dev);
+static void hci_power_on(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
+
+ BT_DBG("%s", hdev->name);
+
+ if (hci_dev_open(hdev->id) < 0)
+ return;
+
+ if (test_bit(HCI_AUTO_OFF, &hdev->flags))
+ mod_timer(&hdev->off_timer,
+ jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
+
+ if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
+ mgmt_index_added(hdev->id);
+}
+
+static void hci_power_off(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
+
+ BT_DBG("%s", hdev->name);
+
+ hci_dev_close(hdev->id);
+}
+
+static void hci_auto_off(unsigned long data)
+{
+ struct hci_dev *hdev = (struct hci_dev *) data;
+
+ BT_DBG("%s", hdev->name);
+
+ clear_bit(HCI_AUTO_OFF, &hdev->flags);
+
+ queue_work(hdev->workqueue, &hdev->power_off);
+}
+
+void hci_del_off_timer(struct hci_dev *hdev)
+{
+ BT_DBG("%s", hdev->name);
+
+ clear_bit(HCI_AUTO_OFF, &hdev->flags);
+ del_timer(&hdev->off_timer);
+}
+
+int hci_uuids_clear(struct hci_dev *hdev)
+{
+ struct list_head *p, *n;
+
+ list_for_each_safe(p, n, &hdev->uuids) {
+ struct bt_uuid *uuid;
+
+ uuid = list_entry(p, struct bt_uuid, list);
+
+ list_del(p);
+ kfree(uuid);
+ }
+
+ return 0;
+}
+
+int hci_link_keys_clear(struct hci_dev *hdev)
+{
+ struct list_head *p, *n;
+
+ list_for_each_safe(p, n, &hdev->link_keys) {
+ struct link_key *key;
+
+ key = list_entry(p, struct link_key, list);
+
+ list_del(p);
+ kfree(key);
+ }
+
+ return 0;
+}
+
+struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
+{
+ struct list_head *p;
+
+ list_for_each(p, &hdev->link_keys) {
+ struct link_key *k;
+
+ k = list_entry(p, struct link_key, list);
+
+ if (bacmp(bdaddr, &k->bdaddr) == 0)
+ return k;
+ }
+
+ return NULL;
+}
+
+int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
+ u8 *val, u8 type, u8 pin_len)
+{
+ struct link_key *key, *old_key;
+ u8 old_key_type;
+
+ old_key = hci_find_link_key(hdev, bdaddr);
+ if (old_key) {
+ old_key_type = old_key->type;
+ key = old_key;
+ } else {
+ old_key_type = 0xff;
+ key = kzalloc(sizeof(*key), GFP_ATOMIC);
+ if (!key)
+ return -ENOMEM;
+ list_add(&key->list, &hdev->link_keys);
+ }
+
+ BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
+
+ bacpy(&key->bdaddr, bdaddr);
+ memcpy(key->val, val, 16);
+ key->type = type;
+ key->pin_len = pin_len;
+
+ if (new_key)
+ mgmt_new_key(hdev->id, key, old_key_type);
+
+ if (type == 0x06)
+ key->type = old_key_type;
+
+ return 0;
+}
+
+int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
+{
+ struct link_key *key;
+
+ key = hci_find_link_key(hdev, bdaddr);
+ if (!key)
+ return -ENOENT;
+
+ BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
+
+ list_del(&key->list);
+ kfree(key);
+
+ return 0;
+}
+
+/* HCI command timer function */
+static void hci_cmd_timer(unsigned long arg)
+{
+ struct hci_dev *hdev = (void *) arg;
+
+ BT_ERR("%s command tx timeout", hdev->name);
+ atomic_set(&hdev->cmd_cnt, 1);
+ tasklet_schedule(&hdev->cmd_task);
+}
+
/* Register HCI device */
int hci_register_dev(struct hci_dev *hdev)
{
@@ -923,6 +1109,7 @@ int hci_register_dev(struct hci_dev *hdev)
hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
hdev->esco_type = (ESCO_HV1);
hdev->link_mode = (HCI_LM_ACCEPT);
+ hdev->io_capability = 0x03; /* No Input No Output */
hdev->idle_timeout = 0;
hdev->sniff_max_interval = 800;
@@ -936,6 +1123,8 @@ int hci_register_dev(struct hci_dev *hdev)
skb_queue_head_init(&hdev->cmd_q);
skb_queue_head_init(&hdev->raw_q);
+ setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
+
for (i = 0; i < NUM_REASSEMBLY; i++)
hdev->reassembly[i] = NULL;
@@ -948,6 +1137,14 @@ int hci_register_dev(struct hci_dev *hdev)
INIT_LIST_HEAD(&hdev->blacklist);
+ INIT_LIST_HEAD(&hdev->uuids);
+
+ INIT_LIST_HEAD(&hdev->link_keys);
+
+ INIT_WORK(&hdev->power_on, hci_power_on);
+ INIT_WORK(&hdev->power_off, hci_power_off);
+ setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
+
memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
atomic_set(&hdev->promisc, 0);
@@ -969,7 +1166,10 @@ int hci_register_dev(struct hci_dev *hdev)
}
}
- mgmt_index_added(hdev->id);
+ set_bit(HCI_AUTO_OFF, &hdev->flags);
+ set_bit(HCI_SETUP, &hdev->flags);
+ queue_work(hdev->workqueue, &hdev->power_on);
+
hci_notify(hdev, HCI_DEV_REG);
return id;
@@ -999,7 +1199,10 @@ int hci_unregister_dev(struct hci_dev *hdev)
for (i = 0; i < NUM_REASSEMBLY; i++)
kfree_skb(hdev->reassembly[i]);
- mgmt_index_removed(hdev->id);
+ if (!test_bit(HCI_INIT, &hdev->flags) &&
+ !test_bit(HCI_SETUP, &hdev->flags))
+ mgmt_index_removed(hdev->id);
+
hci_notify(hdev, HCI_DEV_UNREG);
if (hdev->rfkill) {
@@ -1009,10 +1212,14 @@ int hci_unregister_dev(struct hci_dev *hdev)
hci_unregister_sysfs(hdev);
+ hci_del_off_timer(hdev);
+
destroy_workqueue(hdev->workqueue);
hci_dev_lock_bh(hdev);
hci_blacklist_clear(hdev);
+ hci_uuids_clear(hdev);
+ hci_link_keys_clear(hdev);
hci_dev_unlock_bh(hdev);
__hci_dev_put(hdev);
@@ -1313,7 +1520,7 @@ static int hci_send_frame(struct sk_buff *skb)
/* Time stamp */
__net_timestamp(skb);
- hci_send_to_sock(hdev, skb);
+ hci_send_to_sock(hdev, skb, NULL);
}
/* Get rid of skb owner, prior to sending to the driver. */
@@ -1349,6 +1556,9 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
skb->dev = (void *) hdev;
+ if (test_bit(HCI_INIT, &hdev->flags))
+ hdev->init_last_cmd = opcode;
+
skb_queue_tail(&hdev->cmd_q, skb);
tasklet_schedule(&hdev->cmd_task);
@@ -1395,7 +1605,7 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
skb->dev = (void *) hdev;
bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
- hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
+ hci_add_acl_hdr(skb, conn->handle, flags);
list = skb_shinfo(skb)->frag_list;
if (!list) {
@@ -1413,12 +1623,15 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
spin_lock_bh(&conn->data_q.lock);
__skb_queue_tail(&conn->data_q, skb);
+
+ flags &= ~ACL_START;
+ flags |= ACL_CONT;
do {
skb = list; list = list->next;
skb->dev = (void *) hdev;
bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
- hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
+ hci_add_acl_hdr(skb, conn->handle, flags);
BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
@@ -1486,8 +1699,25 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
}
if (conn) {
- int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
- int q = cnt / num;
+ int cnt, q;
+
+ switch (conn->type) {
+ case ACL_LINK:
+ cnt = hdev->acl_cnt;
+ break;
+ case SCO_LINK:
+ case ESCO_LINK:
+ cnt = hdev->sco_cnt;
+ break;
+ case LE_LINK:
+ cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
+ break;
+ default:
+ cnt = 0;
+ BT_ERR("Unknown link type");
+ }
+
+ q = cnt / num;
*quote = q ? q : 1;
} else
*quote = 0;
@@ -1496,19 +1726,19 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
return conn;
}
-static inline void hci_acl_tx_to(struct hci_dev *hdev)
+static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct list_head *p;
struct hci_conn *c;
- BT_ERR("%s ACL tx timeout", hdev->name);
+ BT_ERR("%s link tx timeout", hdev->name);
/* Kill stalled connections */
list_for_each(p, &h->list) {
c = list_entry(p, struct hci_conn, list);
- if (c->type == ACL_LINK && c->sent) {
- BT_ERR("%s killing stalled ACL connection %s",
+ if (c->type == type && c->sent) {
+ BT_ERR("%s killing stalled connection %s",
hdev->name, batostr(&c->dst));
hci_acl_disconn(c, 0x13);
}
@@ -1527,7 +1757,7 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
/* ACL tx timeout must be longer than maximum
* link supervision timeout (40.9 seconds) */
if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
- hci_acl_tx_to(hdev);
+ hci_link_tx_to(hdev, ACL_LINK);
}
while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
@@ -1586,6 +1816,40 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
}
}
+static inline void hci_sched_le(struct hci_dev *hdev)
+{
+ struct hci_conn *conn;
+ struct sk_buff *skb;
+ int quote, cnt;
+
+ BT_DBG("%s", hdev->name);
+
+ if (!test_bit(HCI_RAW, &hdev->flags)) {
+ /* LE tx timeout must be longer than maximum
+ * link supervision timeout (40.9 seconds) */
+ if (!hdev->le_cnt && hdev->le_pkts &&
+ time_after(jiffies, hdev->le_last_tx + HZ * 45))
+ hci_link_tx_to(hdev, LE_LINK);
+ }
+
+ cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
+ while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
+ while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
+ BT_DBG("skb %p len %d", skb, skb->len);
+
+ hci_send_frame(skb);
+ hdev->le_last_tx = jiffies;
+
+ cnt--;
+ conn->sent++;
+ }
+ }
+ if (hdev->le_pkts)
+ hdev->le_cnt = cnt;
+ else
+ hdev->acl_cnt = cnt;
+}
+
static void hci_tx_task(unsigned long arg)
{
struct hci_dev *hdev = (struct hci_dev *) arg;
@@ -1593,7 +1857,8 @@ static void hci_tx_task(unsigned long arg)
read_lock(&hci_task_lock);
- BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
+ BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
+ hdev->sco_cnt, hdev->le_cnt);
/* Schedule queues and send stuff to HCI driver */
@@ -1603,6 +1868,8 @@ static void hci_tx_task(unsigned long arg)
hci_sched_esco(hdev);
+ hci_sched_le(hdev);
+
/* Send next queued raw (unknown type) packet */
while ((skb = skb_dequeue(&hdev->raw_q)))
hci_send_frame(skb);
@@ -1700,7 +1967,7 @@ static void hci_rx_task(unsigned long arg)
while ((skb = skb_dequeue(&hdev->rx_q))) {
if (atomic_read(&hdev->promisc)) {
/* Send copy to the sockets */
- hci_send_to_sock(hdev, skb);
+ hci_send_to_sock(hdev, skb, NULL);
}
if (test_bit(HCI_RAW, &hdev->flags)) {
@@ -1750,20 +2017,20 @@ static void hci_cmd_task(unsigned long arg)
BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
- if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
- BT_ERR("%s command tx timeout", hdev->name);
- atomic_set(&hdev->cmd_cnt, 1);
- }
-
/* Send queued commands */
- if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
+ if (atomic_read(&hdev->cmd_cnt)) {
+ skb = skb_dequeue(&hdev->cmd_q);
+ if (!skb)
+ return;
+
kfree_skb(hdev->sent_cmd);
hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
if (hdev->sent_cmd) {
atomic_dec(&hdev->cmd_cnt);
hci_send_frame(skb);
- hdev->cmd_last_tx = jiffies;
+ mod_timer(&hdev->cmd_timer,
+ jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
} else {
skb_queue_head(&hdev->cmd_q, skb);
tasklet_schedule(&hdev->cmd_task);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index a290854fdaa6..3fbfa50c2bff 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -274,15 +274,24 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
if (!status) {
__u8 param = *((__u8 *) sent);
+ int old_pscan, old_iscan;
- clear_bit(HCI_PSCAN, &hdev->flags);
- clear_bit(HCI_ISCAN, &hdev->flags);
+ old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
+ old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
- if (param & SCAN_INQUIRY)
+ if (param & SCAN_INQUIRY) {
set_bit(HCI_ISCAN, &hdev->flags);
+ if (!old_iscan)
+ mgmt_discoverable(hdev->id, 1);
+ } else if (old_iscan)
+ mgmt_discoverable(hdev->id, 0);
- if (param & SCAN_PAGE)
+ if (param & SCAN_PAGE) {
set_bit(HCI_PSCAN, &hdev->flags);
+ if (!old_pscan)
+ mgmt_connectable(hdev->id, 1);
+ } else if (old_pscan)
+ mgmt_connectable(hdev->id, 0);
}
hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
@@ -415,6 +424,115 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
hdev->ssp_mode = *((__u8 *) sent);
}
+static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
+{
+ if (hdev->features[6] & LMP_EXT_INQ)
+ return 2;
+
+ if (hdev->features[3] & LMP_RSSI_INQ)
+ return 1;
+
+ if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
+ hdev->lmp_subver == 0x0757)
+ return 1;
+
+ if (hdev->manufacturer == 15) {
+ if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
+ return 1;
+ if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
+ return 1;
+ if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
+ return 1;
+ }
+
+ if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
+ hdev->lmp_subver == 0x1805)
+ return 1;
+
+ return 0;
+}
+
+static void hci_setup_inquiry_mode(struct hci_dev *hdev)
+{
+ u8 mode;
+
+ mode = hci_get_inquiry_mode(hdev);
+
+ hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
+}
+
+static void hci_setup_event_mask(struct hci_dev *hdev)
+{
+ /* The second byte is 0xff instead of 0x9f (two reserved bits
+ * disabled) since a Broadcom 1.2 dongle doesn't respond to the
+ * command otherwise */
+ u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
+
+ /* Events for 1.2 and newer controllers */
+ if (hdev->lmp_ver > 1) {
+ events[4] |= 0x01; /* Flow Specification Complete */
+ events[4] |= 0x02; /* Inquiry Result with RSSI */
+ events[4] |= 0x04; /* Read Remote Extended Features Complete */
+ events[5] |= 0x08; /* Synchronous Connection Complete */
+ events[5] |= 0x10; /* Synchronous Connection Changed */
+ }
+
+ if (hdev->features[3] & LMP_RSSI_INQ)
+ events[4] |= 0x04; /* Inquiry Result with RSSI */
+
+ if (hdev->features[5] & LMP_SNIFF_SUBR)
+ events[5] |= 0x20; /* Sniff Subrating */
+
+ if (hdev->features[5] & LMP_PAUSE_ENC)
+ events[5] |= 0x80; /* Encryption Key Refresh Complete */
+
+ if (hdev->features[6] & LMP_EXT_INQ)
+ events[5] |= 0x40; /* Extended Inquiry Result */
+
+ if (hdev->features[6] & LMP_NO_FLUSH)
+ events[7] |= 0x01; /* Enhanced Flush Complete */
+
+ if (hdev->features[7] & LMP_LSTO)
+ events[6] |= 0x80; /* Link Supervision Timeout Changed */
+
+ if (hdev->features[6] & LMP_SIMPLE_PAIR) {
+ events[6] |= 0x01; /* IO Capability Request */
+ events[6] |= 0x02; /* IO Capability Response */
+ events[6] |= 0x04; /* User Confirmation Request */
+ events[6] |= 0x08; /* User Passkey Request */
+ events[6] |= 0x10; /* Remote OOB Data Request */
+ events[6] |= 0x20; /* Simple Pairing Complete */
+ events[7] |= 0x04; /* User Passkey Notification */
+ events[7] |= 0x08; /* Keypress Notification */
+ events[7] |= 0x10; /* Remote Host Supported
+ * Features Notification */
+ }
+
+ if (hdev->features[4] & LMP_LE)
+ events[7] |= 0x20; /* LE Meta-Event */
+
+ hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
+}
+
+static void hci_setup(struct hci_dev *hdev)
+{
+ hci_setup_event_mask(hdev);
+
+ if (hdev->lmp_ver > 1)
+ hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
+
+ if (hdev->features[6] & LMP_SIMPLE_PAIR) {
+ u8 mode = 0x01;
+ hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
+ }
+
+ if (hdev->features[3] & LMP_RSSI_INQ)
+ hci_setup_inquiry_mode(hdev);
+
+ if (hdev->features[7] & LMP_INQ_TX_PWR)
+ hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
+}
+
static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_local_version *rp = (void *) skb->data;
@@ -426,11 +544,34 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
hdev->hci_ver = rp->hci_ver;
hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
+ hdev->lmp_ver = rp->lmp_ver;
hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
+ hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
hdev->manufacturer,
hdev->hci_ver, hdev->hci_rev);
+
+ if (test_bit(HCI_INIT, &hdev->flags))
+ hci_setup(hdev);
+}
+
+static void hci_setup_link_policy(struct hci_dev *hdev)
+{
+ u16 link_policy = 0;
+
+ if (hdev->features[0] & LMP_RSWITCH)
+ link_policy |= HCI_LP_RSWITCH;
+ if (hdev->features[0] & LMP_HOLD)
+ link_policy |= HCI_LP_HOLD;
+ if (hdev->features[0] & LMP_SNIFF)
+ link_policy |= HCI_LP_SNIFF;
+ if (hdev->features[1] & LMP_PARK)
+ link_policy |= HCI_LP_PARK;
+
+ link_policy = cpu_to_le16(link_policy);
+ hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
+ sizeof(link_policy), &link_policy);
}
static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
@@ -440,9 +581,15 @@ static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb
BT_DBG("%s status 0x%x", hdev->name, rp->status);
if (rp->status)
- return;
+ goto done;
memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
+
+ if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
+ hci_setup_link_policy(hdev);
+
+done:
+ hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
}
static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
@@ -548,6 +695,130 @@ static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
}
+static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%x", hdev->name, status);
+
+ hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
+}
+
+static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%x", hdev->name, status);
+
+ hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
+}
+
+static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%x", hdev->name, status);
+
+ hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
+}
+
+static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%x", hdev->name, status);
+
+ hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
+}
+
+static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%x", hdev->name, status);
+
+ hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
+}
+
+static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_rp_pin_code_reply *rp = (void *) skb->data;
+ struct hci_cp_pin_code_reply *cp;
+ struct hci_conn *conn;
+
+ BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+ if (test_bit(HCI_MGMT, &hdev->flags))
+ mgmt_pin_code_reply_complete(hdev->id, &rp->bdaddr, rp->status);
+
+ if (rp->status != 0)
+ return;
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
+ if (!cp)
+ return;
+
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
+ if (conn)
+ conn->pin_length = cp->pin_len;
+}
+
+static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+ if (test_bit(HCI_MGMT, &hdev->flags))
+ mgmt_pin_code_neg_reply_complete(hdev->id, &rp->bdaddr,
+ rp->status);
+}
+static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+ if (rp->status)
+ return;
+
+ hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
+ hdev->le_pkts = rp->le_max_pkt;
+
+ hdev->le_cnt = hdev->le_pkts;
+
+ BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
+
+ hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
+}
+
+static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+ if (test_bit(HCI_MGMT, &hdev->flags))
+ mgmt_user_confirm_reply_complete(hdev->id, &rp->bdaddr,
+ rp->status);
+}
+
+static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+ if (test_bit(HCI_MGMT, &hdev->flags))
+ mgmt_user_confirm_neg_reply_complete(hdev->id, &rp->bdaddr,
+ rp->status);
+}
+
static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
{
BT_DBG("%s status 0x%x", hdev->name, status);
@@ -622,11 +893,14 @@ static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
hci_dev_lock(hdev);
acl = hci_conn_hash_lookup_handle(hdev, handle);
- if (acl && (sco = acl->link)) {
- sco->state = BT_CLOSED;
+ if (acl) {
+ sco = acl->link;
+ if (sco) {
+ sco->state = BT_CLOSED;
- hci_proto_connect_cfm(sco, status);
- hci_conn_del(sco);
+ hci_proto_connect_cfm(sco, status);
+ hci_conn_del(sco);
+ }
}
hci_dev_unlock(hdev);
@@ -687,7 +961,7 @@ static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
}
static int hci_outgoing_auth_needed(struct hci_dev *hdev,
- struct hci_conn *conn)
+ struct hci_conn *conn)
{
if (conn->state != BT_CONFIG || !conn->out)
return 0;
@@ -808,11 +1082,14 @@ static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
hci_dev_lock(hdev);
acl = hci_conn_hash_lookup_handle(hdev, handle);
- if (acl && (sco = acl->link)) {
- sco->state = BT_CLOSED;
+ if (acl) {
+ sco = acl->link;
+ if (sco) {
+ sco->state = BT_CLOSED;
- hci_proto_connect_cfm(sco, status);
- hci_conn_del(sco);
+ hci_proto_connect_cfm(sco, status);
+ hci_conn_del(sco);
+ }
}
hci_dev_unlock(hdev);
@@ -872,6 +1149,43 @@ static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
hci_dev_unlock(hdev);
}
+static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
+{
+ struct hci_cp_le_create_conn *cp;
+ struct hci_conn *conn;
+
+ BT_DBG("%s status 0x%x", hdev->name, status);
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
+ if (!cp)
+ return;
+
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
+
+ BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
+ conn);
+
+ if (status) {
+ if (conn && conn->state == BT_CONNECT) {
+ conn->state = BT_CLOSED;
+ hci_proto_connect_cfm(conn, status);
+ hci_conn_del(conn);
+ }
+ } else {
+ if (!conn) {
+ conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
+ if (conn)
+ conn->out = 1;
+ else
+ BT_ERR("No memory for new connection");
+ }
+ }
+
+ hci_dev_unlock(hdev);
+}
+
static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
@@ -942,6 +1256,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
conn->state = BT_CONFIG;
hci_conn_hold(conn);
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+ mgmt_connected(hdev->id, &ev->bdaddr);
} else
conn->state = BT_CONNECTED;
@@ -970,8 +1285,11 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
sizeof(cp), &cp);
}
- } else
+ } else {
conn->state = BT_CLOSED;
+ if (conn->type == ACL_LINK)
+ mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
+ }
if (conn->type == ACL_LINK)
hci_sco_setup(conn, ev->status);
@@ -998,7 +1316,8 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
- if ((mask & HCI_LM_ACCEPT) && !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
+ if ((mask & HCI_LM_ACCEPT) &&
+ !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
/* Connection accepted */
struct inquiry_entry *ie;
struct hci_conn *conn;
@@ -1068,19 +1387,26 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
BT_DBG("%s status %d", hdev->name, ev->status);
- if (ev->status)
+ if (ev->status) {
+ mgmt_disconnect_failed(hdev->id);
return;
+ }
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
- if (conn) {
- conn->state = BT_CLOSED;
+ if (!conn)
+ goto unlock;
- hci_proto_disconn_cfm(conn, ev->reason);
- hci_conn_del(conn);
- }
+ conn->state = BT_CLOSED;
+
+ if (conn->type == ACL_LINK)
+ mgmt_disconnected(hdev->id, &conn->dst);
+ hci_proto_disconn_cfm(conn, ev->reason);
+ hci_conn_del(conn);
+
+unlock:
hci_dev_unlock(hdev);
}
@@ -1098,8 +1424,10 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
if (!ev->status) {
conn->link_mode |= HCI_LM_AUTH;
conn->sec_level = conn->pending_sec_level;
- } else
+ } else {
+ mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
conn->sec_level = BT_SECURITY_LOW;
+ }
clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
@@ -1393,11 +1721,54 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
hci_cc_write_ca_timeout(hdev, skb);
break;
+ case HCI_OP_DELETE_STORED_LINK_KEY:
+ hci_cc_delete_stored_link_key(hdev, skb);
+ break;
+
+ case HCI_OP_SET_EVENT_MASK:
+ hci_cc_set_event_mask(hdev, skb);
+ break;
+
+ case HCI_OP_WRITE_INQUIRY_MODE:
+ hci_cc_write_inquiry_mode(hdev, skb);
+ break;
+
+ case HCI_OP_READ_INQ_RSP_TX_POWER:
+ hci_cc_read_inq_rsp_tx_power(hdev, skb);
+ break;
+
+ case HCI_OP_SET_EVENT_FLT:
+ hci_cc_set_event_flt(hdev, skb);
+ break;
+
+ case HCI_OP_PIN_CODE_REPLY:
+ hci_cc_pin_code_reply(hdev, skb);
+ break;
+
+ case HCI_OP_PIN_CODE_NEG_REPLY:
+ hci_cc_pin_code_neg_reply(hdev, skb);
+ break;
+
+ case HCI_OP_LE_READ_BUFFER_SIZE:
+ hci_cc_le_read_buffer_size(hdev, skb);
+ break;
+
+ case HCI_OP_USER_CONFIRM_REPLY:
+ hci_cc_user_confirm_reply(hdev, skb);
+ break;
+
+ case HCI_OP_USER_CONFIRM_NEG_REPLY:
+ hci_cc_user_confirm_neg_reply(hdev, skb);
+ break;
+
default:
BT_DBG("%s opcode 0x%x", hdev->name, opcode);
break;
}
+ if (ev->opcode != HCI_OP_NOP)
+ del_timer(&hdev->cmd_timer);
+
if (ev->ncmd) {
atomic_set(&hdev->cmd_cnt, 1);
if (!skb_queue_empty(&hdev->cmd_q))
@@ -1459,11 +1830,23 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_cs_exit_sniff_mode(hdev, ev->status);
break;
+ case HCI_OP_DISCONNECT:
+ if (ev->status != 0)
+ mgmt_disconnect_failed(hdev->id);
+ break;
+
+ case HCI_OP_LE_CREATE_CONN:
+ hci_cs_le_create_conn(hdev, ev->status);
+ break;
+
default:
BT_DBG("%s opcode 0x%x", hdev->name, opcode);
break;
}
+ if (ev->opcode != HCI_OP_NOP)
+ del_timer(&hdev->cmd_timer);
+
if (ev->ncmd) {
atomic_set(&hdev->cmd_cnt, 1);
if (!skb_queue_empty(&hdev->cmd_q))
@@ -1529,6 +1912,16 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
hdev->acl_cnt += count;
if (hdev->acl_cnt > hdev->acl_pkts)
hdev->acl_cnt = hdev->acl_pkts;
+ } else if (conn->type == LE_LINK) {
+ if (hdev->le_pkts) {
+ hdev->le_cnt += count;
+ if (hdev->le_cnt > hdev->le_pkts)
+ hdev->le_cnt = hdev->le_pkts;
+ } else {
+ hdev->acl_cnt += count;
+ if (hdev->acl_cnt > hdev->acl_pkts)
+ hdev->acl_cnt = hdev->acl_pkts;
+ }
} else {
hdev->sco_cnt += count;
if (hdev->sco_cnt > hdev->sco_pkts)
@@ -1586,18 +1979,72 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
hci_conn_put(conn);
}
+ if (!test_bit(HCI_PAIRABLE, &hdev->flags))
+ hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
+ sizeof(ev->bdaddr), &ev->bdaddr);
+
+ if (test_bit(HCI_MGMT, &hdev->flags))
+ mgmt_pin_code_request(hdev->id, &ev->bdaddr);
+
hci_dev_unlock(hdev);
}
static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
+ struct hci_ev_link_key_req *ev = (void *) skb->data;
+ struct hci_cp_link_key_reply cp;
+ struct hci_conn *conn;
+ struct link_key *key;
+
BT_DBG("%s", hdev->name);
+
+ if (!test_bit(HCI_LINK_KEYS, &hdev->flags))
+ return;
+
+ hci_dev_lock(hdev);
+
+ key = hci_find_link_key(hdev, &ev->bdaddr);
+ if (!key) {
+ BT_DBG("%s link key not found for %s", hdev->name,
+ batostr(&ev->bdaddr));
+ goto not_found;
+ }
+
+ BT_DBG("%s found key type %u for %s", hdev->name, key->type,
+ batostr(&ev->bdaddr));
+
+ if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) && key->type == 0x03) {
+ BT_DBG("%s ignoring debug key", hdev->name);
+ goto not_found;
+ }
+
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+
+ if (key->type == 0x04 && conn && conn->auth_type != 0xff &&
+ (conn->auth_type & 0x01)) {
+ BT_DBG("%s ignoring unauthenticated key", hdev->name);
+ goto not_found;
+ }
+
+ bacpy(&cp.bdaddr, &ev->bdaddr);
+ memcpy(cp.link_key, key->val, 16);
+
+ hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
+
+ hci_dev_unlock(hdev);
+
+ return;
+
+not_found:
+ hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
+ hci_dev_unlock(hdev);
}
static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_link_key_notify *ev = (void *) skb->data;
struct hci_conn *conn;
+ u8 pin_len = 0;
BT_DBG("%s", hdev->name);
@@ -1607,9 +2054,14 @@ static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff
if (conn) {
hci_conn_hold(conn);
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+ pin_len = conn->pin_length;
hci_conn_put(conn);
}
+ if (test_bit(HCI_LINK_KEYS, &hdev->flags))
+ hci_add_link_key(hdev, 1, &ev->bdaddr, ev->link_key,
+ ev->key_type, pin_len);
+
hci_dev_unlock(hdev);
}
@@ -1683,7 +2135,8 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
hci_dev_lock(hdev);
if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
- struct inquiry_info_with_rssi_and_pscan_mode *info = (void *) (skb->data + 1);
+ struct inquiry_info_with_rssi_and_pscan_mode *info;
+ info = (void *) (skb->data + 1);
for (; num_rsp; num_rsp--) {
bacpy(&data.bdaddr, &info->bdaddr);
@@ -1824,17 +2277,8 @@ static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buf
static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_sniff_subrate *ev = (void *) skb->data;
- struct hci_conn *conn;
BT_DBG("%s status %d", hdev->name, ev->status);
-
- hci_dev_lock(hdev);
-
- conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
- if (conn) {
- }
-
- hci_dev_unlock(hdev);
}
static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1852,12 +2296,12 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
for (; num_rsp; num_rsp--) {
bacpy(&data.bdaddr, &info->bdaddr);
- data.pscan_rep_mode = info->pscan_rep_mode;
- data.pscan_period_mode = info->pscan_period_mode;
- data.pscan_mode = 0x00;
+ data.pscan_rep_mode = info->pscan_rep_mode;
+ data.pscan_period_mode = info->pscan_period_mode;
+ data.pscan_mode = 0x00;
memcpy(data.dev_class, info->dev_class, 3);
- data.clock_offset = info->clock_offset;
- data.rssi = info->rssi;
+ data.clock_offset = info->clock_offset;
+ data.rssi = info->rssi;
data.ssp_mode = 0x01;
info++;
hci_inquiry_cache_update(hdev, &data);
@@ -1866,6 +2310,25 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
hci_dev_unlock(hdev);
}
+static inline u8 hci_get_auth_req(struct hci_conn *conn)
+{
+ /* If remote requests dedicated bonding follow that lead */
+ if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
+ /* If both remote and local IO capabilities allow MITM
+ * protection then require it, otherwise don't */
+ if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
+ return 0x02;
+ else
+ return 0x03;
+ }
+
+ /* If remote requests no-bonding follow that lead */
+ if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
+ return 0x00;
+
+ return conn->auth_type;
+}
+
static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_io_capa_request *ev = (void *) skb->data;
@@ -1876,8 +2339,73 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
- if (conn)
- hci_conn_hold(conn);
+ if (!conn)
+ goto unlock;
+
+ hci_conn_hold(conn);
+
+ if (!test_bit(HCI_MGMT, &hdev->flags))
+ goto unlock;
+
+ if (test_bit(HCI_PAIRABLE, &hdev->flags) ||
+ (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
+ struct hci_cp_io_capability_reply cp;
+
+ bacpy(&cp.bdaddr, &ev->bdaddr);
+ cp.capability = conn->io_capability;
+ cp.oob_data = 0;
+ cp.authentication = hci_get_auth_req(conn);
+
+ hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
+ sizeof(cp), &cp);
+ } else {
+ struct hci_cp_io_capability_neg_reply cp;
+
+ bacpy(&cp.bdaddr, &ev->bdaddr);
+ cp.reason = 0x16; /* Pairing not allowed */
+
+ hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
+ sizeof(cp), &cp);
+ }
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_ev_io_capa_reply *ev = (void *) skb->data;
+ struct hci_conn *conn;
+
+ BT_DBG("%s", hdev->name);
+
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+ if (!conn)
+ goto unlock;
+
+ hci_conn_hold(conn);
+
+ conn->remote_cap = ev->capability;
+ conn->remote_oob = ev->oob_data;
+ conn->remote_auth = ev->authentication;
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_user_confirm_req *ev = (void *) skb->data;
+
+ BT_DBG("%s", hdev->name);
+
+ hci_dev_lock(hdev);
+
+ if (test_bit(HCI_MGMT, &hdev->flags))
+ mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey);
hci_dev_unlock(hdev);
}
@@ -1892,9 +2420,20 @@ static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
- if (conn)
- hci_conn_put(conn);
+ if (!conn)
+ goto unlock;
+
+ /* To avoid duplicate auth_failed events to user space we check
+ * the HCI_CONN_AUTH_PEND flag which will be set if we
+ * initiated the authentication. A traditional auth_complete
+ * event gets always produced as initiator and is also mapped to
+ * the mgmt_auth_failed event */
+ if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0)
+ mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
+
+ hci_conn_put(conn);
+unlock:
hci_dev_unlock(hdev);
}
@@ -1914,6 +2453,60 @@ static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_
hci_dev_unlock(hdev);
}
+static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_ev_le_conn_complete *ev = (void *) skb->data;
+ struct hci_conn *conn;
+
+ BT_DBG("%s status %d", hdev->name, ev->status);
+
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
+ if (!conn) {
+ conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
+ if (!conn) {
+ BT_ERR("No memory for new connection");
+ hci_dev_unlock(hdev);
+ return;
+ }
+ }
+
+ if (ev->status) {
+ hci_proto_connect_cfm(conn, ev->status);
+ conn->state = BT_CLOSED;
+ hci_conn_del(conn);
+ goto unlock;
+ }
+
+ conn->handle = __le16_to_cpu(ev->handle);
+ conn->state = BT_CONNECTED;
+
+ hci_conn_hold_device(conn);
+ hci_conn_add_sysfs(conn);
+
+ hci_proto_connect_cfm(conn, ev->status);
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_ev_le_meta *le_ev = (void *) skb->data;
+
+ skb_pull(skb, sizeof(*le_ev));
+
+ switch (le_ev->subevent) {
+ case HCI_EV_LE_CONN_COMPLETE:
+ hci_le_conn_complete_evt(hdev, skb);
+ break;
+
+ default:
+ break;
+ }
+}
+
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_event_hdr *hdr = (void *) skb->data;
@@ -2042,6 +2635,14 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
hci_io_capa_request_evt(hdev, skb);
break;
+ case HCI_EV_IO_CAPA_REPLY:
+ hci_io_capa_reply_evt(hdev, skb);
+ break;
+
+ case HCI_EV_USER_CONFIRM_REQUEST:
+ hci_user_confirm_request_evt(hdev, skb);
+ break;
+
case HCI_EV_SIMPLE_PAIR_COMPLETE:
hci_simple_pair_complete_evt(hdev, skb);
break;
@@ -2050,6 +2651,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
hci_remote_host_features_evt(hdev, skb);
break;
+ case HCI_EV_LE_META:
+ hci_le_meta_evt(hdev, skb);
+ break;
+
default:
BT_DBG("%s event 0x%x", hdev->name, event);
break;
@@ -2083,6 +2688,6 @@ void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
skb->dev = (void *) hdev;
- hci_send_to_sock(hdev, skb);
+ hci_send_to_sock(hdev, skb, NULL);
kfree_skb(skb);
}
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 29827c77f6ce..295e4a88fff8 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -85,7 +85,8 @@ static struct bt_sock_list hci_sk_list = {
};
/* Send frame to RAW socket */
-void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
+void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb,
+ struct sock *skip_sk)
{
struct sock *sk;
struct hlist_node *node;
@@ -97,6 +98,9 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
struct hci_filter *flt;
struct sk_buff *nskb;
+ if (sk == skip_sk)
+ continue;
+
if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
continue;
@@ -857,7 +861,7 @@ error:
return err;
}
-void __exit hci_sock_cleanup(void)
+void hci_sock_cleanup(void)
{
if (bt_sock_unregister(BTPROTO_HCI) < 0)
BT_ERR("HCI socket unregistration failed");
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 5fce3d6d07b4..3c838a65a75a 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -11,7 +11,7 @@
static struct class *bt_class;
-struct dentry *bt_debugfs = NULL;
+struct dentry *bt_debugfs;
EXPORT_SYMBOL_GPL(bt_debugfs);
static inline char *link_typetostr(int type)
@@ -51,8 +51,8 @@ static ssize_t show_link_features(struct device *dev, struct device_attribute *a
conn->features[6], conn->features[7]);
}
-#define LINK_ATTR(_name,_mode,_show,_store) \
-struct device_attribute link_attr_##_name = __ATTR(_name,_mode,_show,_store)
+#define LINK_ATTR(_name, _mode, _show, _store) \
+struct device_attribute link_attr_##_name = __ATTR(_name, _mode, _show, _store)
static LINK_ATTR(type, S_IRUGO, show_link_type, NULL);
static LINK_ATTR(address, S_IRUGO, show_link_address, NULL);
@@ -461,6 +461,56 @@ static const struct file_operations blacklist_fops = {
.llseek = seq_lseek,
.release = single_release,
};
+
+static void print_bt_uuid(struct seq_file *f, u8 *uuid)
+{
+ u32 data0, data4;
+ u16 data1, data2, data3, data5;
+
+ memcpy(&data0, &uuid[0], 4);
+ memcpy(&data1, &uuid[4], 2);
+ memcpy(&data2, &uuid[6], 2);
+ memcpy(&data3, &uuid[8], 2);
+ memcpy(&data4, &uuid[10], 4);
+ memcpy(&data5, &uuid[14], 2);
+
+ seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.8x%.4x\n",
+ ntohl(data0), ntohs(data1), ntohs(data2),
+ ntohs(data3), ntohl(data4), ntohs(data5));
+}
+
+static int uuids_show(struct seq_file *f, void *p)
+{
+ struct hci_dev *hdev = f->private;
+ struct list_head *l;
+
+ hci_dev_lock_bh(hdev);
+
+ list_for_each(l, &hdev->uuids) {
+ struct bt_uuid *uuid;
+
+ uuid = list_entry(l, struct bt_uuid, list);
+
+ print_bt_uuid(f, uuid->uuid);
+ }
+
+ hci_dev_unlock_bh(hdev);
+
+ return 0;
+}
+
+static int uuids_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, uuids_show, inode->i_private);
+}
+
+static const struct file_operations uuids_fops = {
+ .open = uuids_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
int hci_register_sysfs(struct hci_dev *hdev)
{
struct device *dev = &hdev->dev;
@@ -493,6 +543,8 @@ int hci_register_sysfs(struct hci_dev *hdev)
debugfs_create_file("blacklist", 0444, hdev->debugfs,
hdev, &blacklist_fops);
+ debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
+
return 0;
}
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 29544c21f4b5..5ec12971af6b 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -36,6 +36,7 @@
#include <linux/file.h>
#include <linux/init.h>
#include <linux/wait.h>
+#include <linux/mutex.h>
#include <net/sock.h>
#include <linux/input.h>
@@ -157,7 +158,8 @@ static int hidp_queue_event(struct hidp_session *session, struct input_dev *dev,
session->leds = newleds;
- if (!(skb = alloc_skb(3, GFP_ATOMIC))) {
+ skb = alloc_skb(3, GFP_ATOMIC);
+ if (!skb) {
BT_ERR("Can't allocate memory for new frame");
return -ENOMEM;
}
@@ -250,7 +252,8 @@ static int __hidp_send_ctrl_message(struct hidp_session *session,
BT_DBG("session %p data %p size %d", session, data, size);
- if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) {
+ skb = alloc_skb(size + 1, GFP_ATOMIC);
+ if (!skb) {
BT_ERR("Can't allocate memory for new frame");
return -ENOMEM;
}
@@ -283,7 +286,8 @@ static int hidp_queue_report(struct hidp_session *session,
BT_DBG("session %p hid %p data %p size %d", session, session->hid, data, size);
- if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) {
+ skb = alloc_skb(size + 1, GFP_ATOMIC);
+ if (!skb) {
BT_ERR("Can't allocate memory for new frame");
return -ENOMEM;
}
@@ -313,24 +317,144 @@ static int hidp_send_report(struct hidp_session *session, struct hid_report *rep
return hidp_queue_report(session, buf, rsize);
}
+static int hidp_get_raw_report(struct hid_device *hid,
+ unsigned char report_number,
+ unsigned char *data, size_t count,
+ unsigned char report_type)
+{
+ struct hidp_session *session = hid->driver_data;
+ struct sk_buff *skb;
+ size_t len;
+ int numbered_reports = hid->report_enum[report_type].numbered;
+
+ switch (report_type) {
+ case HID_FEATURE_REPORT:
+ report_type = HIDP_TRANS_GET_REPORT | HIDP_DATA_RTYPE_FEATURE;
+ break;
+ case HID_INPUT_REPORT:
+ report_type = HIDP_TRANS_GET_REPORT | HIDP_DATA_RTYPE_INPUT;
+ break;
+ case HID_OUTPUT_REPORT:
+ report_type = HIDP_TRANS_GET_REPORT | HIDP_DATA_RTYPE_OUPUT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (mutex_lock_interruptible(&session->report_mutex))
+ return -ERESTARTSYS;
+
+ /* Set up our wait, and send the report request to the device. */
+ session->waiting_report_type = report_type & HIDP_DATA_RTYPE_MASK;
+ session->waiting_report_number = numbered_reports ? report_number : -1;
+ set_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
+ data[0] = report_number;
+ if (hidp_send_ctrl_message(hid->driver_data, report_type, data, 1))
+ goto err_eio;
+
+ /* Wait for the return of the report. The returned report
+ gets put in session->report_return. */
+ while (test_bit(HIDP_WAITING_FOR_RETURN, &session->flags)) {
+ int res;
+
+ res = wait_event_interruptible_timeout(session->report_queue,
+ !test_bit(HIDP_WAITING_FOR_RETURN, &session->flags),
+ 5*HZ);
+ if (res == 0) {
+ /* timeout */
+ goto err_eio;
+ }
+ if (res < 0) {
+ /* signal */
+ goto err_restartsys;
+ }
+ }
+
+ skb = session->report_return;
+ if (skb) {
+ len = skb->len < count ? skb->len : count;
+ memcpy(data, skb->data, len);
+
+ kfree_skb(skb);
+ session->report_return = NULL;
+ } else {
+ /* Device returned a HANDSHAKE, indicating protocol error. */
+ len = -EIO;
+ }
+
+ clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
+ mutex_unlock(&session->report_mutex);
+
+ return len;
+
+err_restartsys:
+ clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
+ mutex_unlock(&session->report_mutex);
+ return -ERESTARTSYS;
+err_eio:
+ clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
+ mutex_unlock(&session->report_mutex);
+ return -EIO;
+}
+
static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count,
unsigned char report_type)
{
+ struct hidp_session *session = hid->driver_data;
+ int ret;
+
switch (report_type) {
case HID_FEATURE_REPORT:
report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE;
break;
case HID_OUTPUT_REPORT:
- report_type = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT;
+ report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_OUPUT;
break;
default:
return -EINVAL;
}
+ if (mutex_lock_interruptible(&session->report_mutex))
+ return -ERESTARTSYS;
+
+ /* Set up our wait, and send the report request to the device. */
+ set_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
if (hidp_send_ctrl_message(hid->driver_data, report_type,
- data, count))
- return -ENOMEM;
- return count;
+ data, count)) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /* Wait for the ACK from the device. */
+ while (test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)) {
+ int res;
+
+ res = wait_event_interruptible_timeout(session->report_queue,
+ !test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags),
+ 10*HZ);
+ if (res == 0) {
+ /* timeout */
+ ret = -EIO;
+ goto err;
+ }
+ if (res < 0) {
+ /* signal */
+ ret = -ERESTARTSYS;
+ goto err;
+ }
+ }
+
+ if (!session->output_report_success) {
+ ret = -EIO;
+ goto err;
+ }
+
+ ret = count;
+
+err:
+ clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
+ mutex_unlock(&session->report_mutex);
+ return ret;
}
static void hidp_idle_timeout(unsigned long arg)
@@ -357,16 +481,22 @@ static void hidp_process_handshake(struct hidp_session *session,
unsigned char param)
{
BT_DBG("session %p param 0x%02x", session, param);
+ session->output_report_success = 0; /* default condition */
switch (param) {
case HIDP_HSHK_SUCCESSFUL:
/* FIXME: Call into SET_ GET_ handlers here */
+ session->output_report_success = 1;
break;
case HIDP_HSHK_NOT_READY:
case HIDP_HSHK_ERR_INVALID_REPORT_ID:
case HIDP_HSHK_ERR_UNSUPPORTED_REQUEST:
case HIDP_HSHK_ERR_INVALID_PARAMETER:
+ if (test_bit(HIDP_WAITING_FOR_RETURN, &session->flags)) {
+ clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
+ wake_up_interruptible(&session->report_queue);
+ }
/* FIXME: Call into SET_ GET_ handlers here */
break;
@@ -385,6 +515,12 @@ static void hidp_process_handshake(struct hidp_session *session,
HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_INVALID_PARAMETER, NULL, 0);
break;
}
+
+ /* Wake up the waiting thread. */
+ if (test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)) {
+ clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
+ wake_up_interruptible(&session->report_queue);
+ }
}
static void hidp_process_hid_control(struct hidp_session *session,
@@ -403,9 +539,11 @@ static void hidp_process_hid_control(struct hidp_session *session,
}
}
-static void hidp_process_data(struct hidp_session *session, struct sk_buff *skb,
+/* Returns true if the passed-in skb should be freed by the caller. */
+static int hidp_process_data(struct hidp_session *session, struct sk_buff *skb,
unsigned char param)
{
+ int done_with_skb = 1;
BT_DBG("session %p skb %p len %d param 0x%02x", session, skb, skb->len, param);
switch (param) {
@@ -417,7 +555,6 @@ static void hidp_process_data(struct hidp_session *session, struct sk_buff *skb,
if (session->hid)
hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 0);
-
break;
case HIDP_DATA_RTYPE_OTHER:
@@ -429,12 +566,27 @@ static void hidp_process_data(struct hidp_session *session, struct sk_buff *skb,
__hidp_send_ctrl_message(session,
HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_INVALID_PARAMETER, NULL, 0);
}
+
+ if (test_bit(HIDP_WAITING_FOR_RETURN, &session->flags) &&
+ param == session->waiting_report_type) {
+ if (session->waiting_report_number < 0 ||
+ session->waiting_report_number == skb->data[0]) {
+ /* hidp_get_raw_report() is waiting on this report. */
+ session->report_return = skb;
+ done_with_skb = 0;
+ clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
+ wake_up_interruptible(&session->report_queue);
+ }
+ }
+
+ return done_with_skb;
}
static void hidp_recv_ctrl_frame(struct hidp_session *session,
struct sk_buff *skb)
{
unsigned char hdr, type, param;
+ int free_skb = 1;
BT_DBG("session %p skb %p len %d", session, skb, skb->len);
@@ -454,7 +606,7 @@ static void hidp_recv_ctrl_frame(struct hidp_session *session,
break;
case HIDP_TRANS_DATA:
- hidp_process_data(session, skb, param);
+ free_skb = hidp_process_data(session, skb, param);
break;
default:
@@ -463,7 +615,8 @@ static void hidp_recv_ctrl_frame(struct hidp_session *session,
break;
}
- kfree_skb(skb);
+ if (free_skb)
+ kfree_skb(skb);
}
static void hidp_recv_intr_frame(struct hidp_session *session,
@@ -563,6 +716,8 @@ static int hidp_session(void *arg)
init_waitqueue_entry(&intr_wait, current);
add_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait);
add_wait_queue(sk_sleep(intr_sk), &intr_wait);
+ session->waiting_for_startup = 0;
+ wake_up_interruptible(&session->startup_queue);
while (!atomic_read(&session->terminate)) {
set_current_state(TASK_INTERRUPTIBLE);
@@ -754,6 +909,8 @@ static struct hid_ll_driver hidp_hid_driver = {
.hidinput_input_event = hidp_hidinput_event,
};
+/* This function sets up the hid device. It does not add it
+ to the HID system. That is done in hidp_add_connection(). */
static int hidp_setup_hid(struct hidp_session *session,
struct hidp_connadd_req *req)
{
@@ -793,18 +950,11 @@ static int hidp_setup_hid(struct hidp_session *session,
hid->dev.parent = hidp_get_device(session);
hid->ll_driver = &hidp_hid_driver;
+ hid->hid_get_raw_report = hidp_get_raw_report;
hid->hid_output_raw_report = hidp_output_raw_report;
- err = hid_add_device(hid);
- if (err < 0)
- goto failed;
-
return 0;
-failed:
- hid_destroy_device(hid);
- session->hid = NULL;
-
fault:
kfree(session->rd_data);
session->rd_data = NULL;
@@ -853,6 +1003,10 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
skb_queue_head_init(&session->ctrl_transmit);
skb_queue_head_init(&session->intr_transmit);
+ mutex_init(&session->report_mutex);
+ init_waitqueue_head(&session->report_queue);
+ init_waitqueue_head(&session->startup_queue);
+ session->waiting_for_startup = 1;
session->flags = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID);
session->idle_to = req->idle_to;
@@ -875,6 +1029,14 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
err = kernel_thread(hidp_session, session, CLONE_KERNEL);
if (err < 0)
goto unlink;
+ while (session->waiting_for_startup) {
+ wait_event_interruptible(session->startup_queue,
+ !session->waiting_for_startup);
+ }
+
+ err = hid_add_device(session->hid);
+ if (err < 0)
+ goto err_add_device;
if (session->input) {
hidp_send_ctrl_message(session,
@@ -888,6 +1050,12 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
up_write(&hidp_session_sem);
return 0;
+err_add_device:
+ hid_destroy_device(session->hid);
+ session->hid = NULL;
+ atomic_inc(&session->terminate);
+ hidp_schedule(session);
+
unlink:
hidp_del_timer(session);
@@ -1016,8 +1184,6 @@ static int __init hidp_init(void)
{
int ret;
- l2cap_load();
-
BT_INFO("HIDP (Human Interface Emulation) ver %s", VERSION);
ret = hid_register_driver(&hidp_driver);
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index 8d934a19da0a..13de5fa03480 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -80,6 +80,8 @@
#define HIDP_VIRTUAL_CABLE_UNPLUG 0
#define HIDP_BOOT_PROTOCOL_MODE 1
#define HIDP_BLUETOOTH_VENDOR_ID 9
+#define HIDP_WAITING_FOR_RETURN 10
+#define HIDP_WAITING_FOR_SEND_ACK 11
struct hidp_connadd_req {
int ctrl_sock; // Connected control socket
@@ -154,9 +156,22 @@ struct hidp_session {
struct sk_buff_head ctrl_transmit;
struct sk_buff_head intr_transmit;
+ /* Used in hidp_get_raw_report() */
+ int waiting_report_type; /* HIDP_DATA_RTYPE_* */
+ int waiting_report_number; /* -1 for not numbered */
+ struct mutex report_mutex;
+ struct sk_buff *report_return;
+ wait_queue_head_t report_queue;
+
+ /* Used in hidp_output_raw_report() */
+ int output_report_success; /* boolean */
+
/* Report descriptor */
__u8 *rd_data;
uint rd_size;
+
+ wait_queue_head_t startup_queue;
+ int waiting_for_startup;
};
static inline void hidp_schedule(struct hidp_session *session)
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap_core.c
index 675614e38e14..c9f9cecca527 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap_core.c
@@ -24,7 +24,7 @@
SOFTWARE IS DISCLAIMED.
*/
-/* Bluetooth L2CAP core and sockets. */
+/* Bluetooth L2CAP core. */
#include <linux/module.h>
@@ -55,79 +55,24 @@
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
-#define VERSION "2.15"
-
-static int disable_ertm;
+int disable_ertm;
static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
static u8 l2cap_fixed_chan[8] = { 0x02, };
-static const struct proto_ops l2cap_sock_ops;
-
static struct workqueue_struct *_busy_wq;
-static struct bt_sock_list l2cap_sk_list = {
+struct bt_sock_list l2cap_sk_list = {
.lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
};
static void l2cap_busy_work(struct work_struct *work);
-static void __l2cap_sock_close(struct sock *sk, int reason);
-static void l2cap_sock_close(struct sock *sk);
-static void l2cap_sock_kill(struct sock *sk);
-
-static int l2cap_build_conf_req(struct sock *sk, void *data);
static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
u8 code, u8 ident, u16 dlen, void *data);
static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
-/* ---- L2CAP timers ---- */
-static void l2cap_sock_set_timer(struct sock *sk, long timeout)
-{
- BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
- sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
-}
-
-static void l2cap_sock_clear_timer(struct sock *sk)
-{
- BT_DBG("sock %p state %d", sk, sk->sk_state);
- sk_stop_timer(sk, &sk->sk_timer);
-}
-
-static void l2cap_sock_timeout(unsigned long arg)
-{
- struct sock *sk = (struct sock *) arg;
- int reason;
-
- BT_DBG("sock %p state %d", sk, sk->sk_state);
-
- bh_lock_sock(sk);
-
- if (sock_owned_by_user(sk)) {
- /* sk is owned by user. Try again later */
- l2cap_sock_set_timer(sk, HZ / 5);
- bh_unlock_sock(sk);
- sock_put(sk);
- return;
- }
-
- if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
- reason = ECONNREFUSED;
- else if (sk->sk_state == BT_CONNECT &&
- l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
- reason = ECONNREFUSED;
- else
- reason = ETIMEDOUT;
-
- __l2cap_sock_close(sk, reason);
-
- bh_unlock_sock(sk);
-
- l2cap_sock_kill(sk);
- sock_put(sk);
-}
-
/* ---- L2CAP channels ---- */
static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
{
@@ -236,8 +181,16 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct so
l2cap_pi(sk)->conn = conn;
if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
- /* Alloc CID for connection-oriented socket */
- l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
+ if (conn->hcon->type == LE_LINK) {
+ /* LE connection */
+ l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
+ l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
+ l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
+ } else {
+ /* Alloc CID for connection-oriented socket */
+ l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
+ l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
+ }
} else if (sk->sk_type == SOCK_DGRAM) {
/* Connectionless socket */
l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
@@ -258,7 +211,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct so
/* Delete channel.
* Must be called on the locked socket. */
-static void l2cap_chan_del(struct sock *sk, int err)
+void l2cap_chan_del(struct sock *sk, int err)
{
struct l2cap_conn *conn = l2cap_pi(sk)->conn;
struct sock *parent = bt_sk(sk)->parent;
@@ -348,7 +301,7 @@ static inline int l2cap_check_security(struct sock *sk)
auth_type);
}
-static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
+u8 l2cap_get_ident(struct l2cap_conn *conn)
{
u8 id;
@@ -370,16 +323,22 @@ static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
return id;
}
-static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
+void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
{
struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
+ u8 flags;
BT_DBG("code 0x%2.2x", code);
if (!skb)
return;
- hci_send_acl(conn->hcon, skb, 0);
+ if (lmp_no_flush_capable(conn->hcon->hdev))
+ flags = ACL_START_NO_FLUSH;
+ else
+ flags = ACL_START;
+
+ hci_send_acl(conn->hcon, skb, flags);
}
static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
@@ -389,6 +348,7 @@ static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
struct l2cap_conn *conn = pi->conn;
struct sock *sk = (struct sock *)pi;
int count, hlen = L2CAP_HDR_SIZE + 2;
+ u8 flags;
if (sk->sk_state != BT_CONNECTED)
return;
@@ -425,7 +385,12 @@ static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
put_unaligned_le16(fcs, skb_put(skb, 2));
}
- hci_send_acl(pi->conn->hcon, skb, 0);
+ if (lmp_no_flush_capable(conn->hcon->hdev))
+ flags = ACL_START_NO_FLUSH;
+ else
+ flags = ACL_START;
+
+ hci_send_acl(pi->conn->hcon, skb, flags);
}
static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
@@ -496,7 +461,7 @@ static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
}
}
-static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
+void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
{
struct l2cap_disconn_req req;
@@ -624,6 +589,82 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
}
}
+/* Find socket with cid and source bdaddr.
+ * Returns closest match, locked.
+ */
+static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
+{
+ struct sock *s, *sk = NULL, *sk1 = NULL;
+ struct hlist_node *node;
+
+ read_lock(&l2cap_sk_list.lock);
+
+ sk_for_each(sk, node, &l2cap_sk_list.head) {
+ if (state && sk->sk_state != state)
+ continue;
+
+ if (l2cap_pi(sk)->scid == cid) {
+ /* Exact match. */
+ if (!bacmp(&bt_sk(sk)->src, src))
+ break;
+
+ /* Closest match */
+ if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
+ sk1 = sk;
+ }
+ }
+ s = node ? sk : sk1;
+ if (s)
+ bh_lock_sock(s);
+ read_unlock(&l2cap_sk_list.lock);
+
+ return s;
+}
+
+static void l2cap_le_conn_ready(struct l2cap_conn *conn)
+{
+ struct l2cap_chan_list *list = &conn->chan_list;
+ struct sock *parent, *uninitialized_var(sk);
+
+ BT_DBG("");
+
+ /* Check if we have socket listening on cid */
+ parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
+ conn->src);
+ if (!parent)
+ return;
+
+ /* Check for backlog size */
+ if (sk_acceptq_is_full(parent)) {
+ BT_DBG("backlog full %d", parent->sk_ack_backlog);
+ goto clean;
+ }
+
+ sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
+ if (!sk)
+ goto clean;
+
+ write_lock_bh(&list->lock);
+
+ hci_conn_hold(conn->hcon);
+
+ l2cap_sock_init(sk, parent);
+ bacpy(&bt_sk(sk)->src, conn->src);
+ bacpy(&bt_sk(sk)->dst, conn->dst);
+
+ __l2cap_chan_add(conn, sk, parent);
+
+ l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
+
+ sk->sk_state = BT_CONNECTED;
+ parent->sk_data_ready(parent, 0);
+
+ write_unlock_bh(&list->lock);
+
+clean:
+ bh_unlock_sock(parent);
+}
+
static void l2cap_conn_ready(struct l2cap_conn *conn)
{
struct l2cap_chan_list *l = &conn->chan_list;
@@ -631,11 +672,20 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
BT_DBG("conn %p", conn);
+ if (!conn->hcon->out && conn->hcon->type == LE_LINK)
+ l2cap_le_conn_ready(conn);
+
read_lock(&l->lock);
for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
bh_lock_sock(sk);
+ if (conn->hcon->type == LE_LINK) {
+ l2cap_sock_clear_timer(sk);
+ sk->sk_state = BT_CONNECTED;
+ sk->sk_state_change(sk);
+ }
+
if (sk->sk_type != SOCK_SEQPACKET &&
sk->sk_type != SOCK_STREAM) {
l2cap_sock_clear_timer(sk);
@@ -694,7 +744,11 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
BT_DBG("hcon %p conn %p", hcon, conn);
- conn->mtu = hcon->hdev->acl_mtu;
+ if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
+ conn->mtu = hcon->hdev->le_mtu;
+ else
+ conn->mtu = hcon->hdev->acl_mtu;
+
conn->src = &hcon->hdev->bdaddr;
conn->dst = &hcon->dst;
@@ -703,7 +757,8 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
spin_lock_init(&conn->lock);
rwlock_init(&conn->chan_list.lock);
- setup_timer(&conn->info_timer, l2cap_info_timeout,
+ if (hcon->type != LE_LINK)
+ setup_timer(&conn->info_timer, l2cap_info_timeout,
(unsigned long) conn);
conn->disc_reason = 0x13;
@@ -747,17 +802,6 @@ static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, stru
}
/* ---- Socket interface ---- */
-static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
-{
- struct sock *sk;
- struct hlist_node *node;
- sk_for_each(sk, node, &l2cap_sk_list.head)
- if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
- goto found;
- sk = NULL;
-found:
- return sk;
-}
/* Find socket with psm and source bdaddr.
* Returns closest match.
@@ -789,277 +833,7 @@ static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
return node ? sk : sk1;
}
-static void l2cap_sock_destruct(struct sock *sk)
-{
- BT_DBG("sk %p", sk);
-
- skb_queue_purge(&sk->sk_receive_queue);
- skb_queue_purge(&sk->sk_write_queue);
-}
-
-static void l2cap_sock_cleanup_listen(struct sock *parent)
-{
- struct sock *sk;
-
- BT_DBG("parent %p", parent);
-
- /* Close not yet accepted channels */
- while ((sk = bt_accept_dequeue(parent, NULL)))
- l2cap_sock_close(sk);
-
- parent->sk_state = BT_CLOSED;
- sock_set_flag(parent, SOCK_ZAPPED);
-}
-
-/* Kill socket (only if zapped and orphan)
- * Must be called on unlocked socket.
- */
-static void l2cap_sock_kill(struct sock *sk)
-{
- if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
- return;
-
- BT_DBG("sk %p state %d", sk, sk->sk_state);
-
- /* Kill poor orphan */
- bt_sock_unlink(&l2cap_sk_list, sk);
- sock_set_flag(sk, SOCK_DEAD);
- sock_put(sk);
-}
-
-static void __l2cap_sock_close(struct sock *sk, int reason)
-{
- BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
-
- switch (sk->sk_state) {
- case BT_LISTEN:
- l2cap_sock_cleanup_listen(sk);
- break;
-
- case BT_CONNECTED:
- case BT_CONFIG:
- if (sk->sk_type == SOCK_SEQPACKET ||
- sk->sk_type == SOCK_STREAM) {
- struct l2cap_conn *conn = l2cap_pi(sk)->conn;
-
- l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
- l2cap_send_disconn_req(conn, sk, reason);
- } else
- l2cap_chan_del(sk, reason);
- break;
-
- case BT_CONNECT2:
- if (sk->sk_type == SOCK_SEQPACKET ||
- sk->sk_type == SOCK_STREAM) {
- struct l2cap_conn *conn = l2cap_pi(sk)->conn;
- struct l2cap_conn_rsp rsp;
- __u16 result;
-
- if (bt_sk(sk)->defer_setup)
- result = L2CAP_CR_SEC_BLOCK;
- else
- result = L2CAP_CR_BAD_PSM;
- sk->sk_state = BT_DISCONN;
-
- rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
- rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
- rsp.result = cpu_to_le16(result);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
- l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
- L2CAP_CONN_RSP, sizeof(rsp), &rsp);
- } else
- l2cap_chan_del(sk, reason);
- break;
-
- case BT_CONNECT:
- case BT_DISCONN:
- l2cap_chan_del(sk, reason);
- break;
-
- default:
- sock_set_flag(sk, SOCK_ZAPPED);
- break;
- }
-}
-
-/* Must be called on unlocked socket. */
-static void l2cap_sock_close(struct sock *sk)
-{
- l2cap_sock_clear_timer(sk);
- lock_sock(sk);
- __l2cap_sock_close(sk, ECONNRESET);
- release_sock(sk);
- l2cap_sock_kill(sk);
-}
-
-static void l2cap_sock_init(struct sock *sk, struct sock *parent)
-{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
-
- BT_DBG("sk %p", sk);
-
- if (parent) {
- sk->sk_type = parent->sk_type;
- bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
-
- pi->imtu = l2cap_pi(parent)->imtu;
- pi->omtu = l2cap_pi(parent)->omtu;
- pi->conf_state = l2cap_pi(parent)->conf_state;
- pi->mode = l2cap_pi(parent)->mode;
- pi->fcs = l2cap_pi(parent)->fcs;
- pi->max_tx = l2cap_pi(parent)->max_tx;
- pi->tx_win = l2cap_pi(parent)->tx_win;
- pi->sec_level = l2cap_pi(parent)->sec_level;
- pi->role_switch = l2cap_pi(parent)->role_switch;
- pi->force_reliable = l2cap_pi(parent)->force_reliable;
- } else {
- pi->imtu = L2CAP_DEFAULT_MTU;
- pi->omtu = 0;
- if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
- pi->mode = L2CAP_MODE_ERTM;
- pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
- } else {
- pi->mode = L2CAP_MODE_BASIC;
- }
- pi->max_tx = L2CAP_DEFAULT_MAX_TX;
- pi->fcs = L2CAP_FCS_CRC16;
- pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
- pi->sec_level = BT_SECURITY_LOW;
- pi->role_switch = 0;
- pi->force_reliable = 0;
- }
-
- /* Default config options */
- pi->conf_len = 0;
- pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
- skb_queue_head_init(TX_QUEUE(sk));
- skb_queue_head_init(SREJ_QUEUE(sk));
- skb_queue_head_init(BUSY_QUEUE(sk));
- INIT_LIST_HEAD(SREJ_LIST(sk));
-}
-
-static struct proto l2cap_proto = {
- .name = "L2CAP",
- .owner = THIS_MODULE,
- .obj_size = sizeof(struct l2cap_pinfo)
-};
-
-static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
-{
- struct sock *sk;
-
- sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
- if (!sk)
- return NULL;
-
- sock_init_data(sock, sk);
- INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
-
- sk->sk_destruct = l2cap_sock_destruct;
- sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
-
- sock_reset_flag(sk, SOCK_ZAPPED);
-
- sk->sk_protocol = proto;
- sk->sk_state = BT_OPEN;
-
- setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
-
- bt_sock_link(&l2cap_sk_list, sk);
- return sk;
-}
-
-static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
- int kern)
-{
- struct sock *sk;
-
- BT_DBG("sock %p", sock);
-
- sock->state = SS_UNCONNECTED;
-
- if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
- sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
- return -ESOCKTNOSUPPORT;
-
- if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
- return -EPERM;
-
- sock->ops = &l2cap_sock_ops;
-
- sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
- if (!sk)
- return -ENOMEM;
-
- l2cap_sock_init(sk, NULL);
- return 0;
-}
-
-static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
-{
- struct sock *sk = sock->sk;
- struct sockaddr_l2 la;
- int len, err = 0;
-
- BT_DBG("sk %p", sk);
-
- if (!addr || addr->sa_family != AF_BLUETOOTH)
- return -EINVAL;
-
- memset(&la, 0, sizeof(la));
- len = min_t(unsigned int, sizeof(la), alen);
- memcpy(&la, addr, len);
-
- if (la.l2_cid)
- return -EINVAL;
-
- lock_sock(sk);
-
- if (sk->sk_state != BT_OPEN) {
- err = -EBADFD;
- goto done;
- }
-
- if (la.l2_psm) {
- __u16 psm = __le16_to_cpu(la.l2_psm);
-
- /* PSM must be odd and lsb of upper byte must be 0 */
- if ((psm & 0x0101) != 0x0001) {
- err = -EINVAL;
- goto done;
- }
-
- /* Restrict usage of well-known PSMs */
- if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE)) {
- err = -EACCES;
- goto done;
- }
- }
-
- write_lock_bh(&l2cap_sk_list.lock);
-
- if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
- err = -EADDRINUSE;
- } else {
- /* Save source address */
- bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
- l2cap_pi(sk)->psm = la.l2_psm;
- l2cap_pi(sk)->sport = la.l2_psm;
- sk->sk_state = BT_BOUND;
-
- if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
- __le16_to_cpu(la.l2_psm) == 0x0003)
- l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
- }
-
- write_unlock_bh(&l2cap_sk_list.lock);
-
-done:
- release_sock(sk);
- return err;
-}
-
-static int l2cap_do_connect(struct sock *sk)
+int l2cap_do_connect(struct sock *sk)
{
bdaddr_t *src = &bt_sk(sk)->src;
bdaddr_t *dst = &bt_sk(sk)->dst;
@@ -1078,23 +852,27 @@ static int l2cap_do_connect(struct sock *sk)
hci_dev_lock_bh(hdev);
- err = -ENOMEM;
-
auth_type = l2cap_get_auth_type(sk);
- hcon = hci_connect(hdev, ACL_LINK, dst,
+ if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
+ hcon = hci_connect(hdev, LE_LINK, dst,
l2cap_pi(sk)->sec_level, auth_type);
- if (!hcon)
+ else
+ hcon = hci_connect(hdev, ACL_LINK, dst,
+ l2cap_pi(sk)->sec_level, auth_type);
+
+ if (IS_ERR(hcon)) {
+ err = PTR_ERR(hcon);
goto done;
+ }
conn = l2cap_conn_add(hcon, 0);
if (!conn) {
hci_conn_put(hcon);
+ err = -ENOMEM;
goto done;
}
- err = 0;
-
/* Update source addr of the socket */
bacpy(src, conn->src);
@@ -1113,236 +891,15 @@ static int l2cap_do_connect(struct sock *sk)
l2cap_do_start(sk);
}
+ err = 0;
+
done:
hci_dev_unlock_bh(hdev);
hci_dev_put(hdev);
return err;
}
-static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
-{
- struct sock *sk = sock->sk;
- struct sockaddr_l2 la;
- int len, err = 0;
-
- BT_DBG("sk %p", sk);
-
- if (!addr || alen < sizeof(addr->sa_family) ||
- addr->sa_family != AF_BLUETOOTH)
- return -EINVAL;
-
- memset(&la, 0, sizeof(la));
- len = min_t(unsigned int, sizeof(la), alen);
- memcpy(&la, addr, len);
-
- if (la.l2_cid)
- return -EINVAL;
-
- lock_sock(sk);
-
- if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
- && !la.l2_psm) {
- err = -EINVAL;
- goto done;
- }
-
- switch (l2cap_pi(sk)->mode) {
- case L2CAP_MODE_BASIC:
- break;
- case L2CAP_MODE_ERTM:
- case L2CAP_MODE_STREAMING:
- if (!disable_ertm)
- break;
- /* fall through */
- default:
- err = -ENOTSUPP;
- goto done;
- }
-
- switch (sk->sk_state) {
- case BT_CONNECT:
- case BT_CONNECT2:
- case BT_CONFIG:
- /* Already connecting */
- goto wait;
-
- case BT_CONNECTED:
- /* Already connected */
- err = -EISCONN;
- goto done;
-
- case BT_OPEN:
- case BT_BOUND:
- /* Can connect */
- break;
-
- default:
- err = -EBADFD;
- goto done;
- }
-
- /* PSM must be odd and lsb of upper byte must be 0 */
- if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 &&
- sk->sk_type != SOCK_RAW) {
- err = -EINVAL;
- goto done;
- }
-
- /* Set destination address and psm */
- bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
- l2cap_pi(sk)->psm = la.l2_psm;
-
- err = l2cap_do_connect(sk);
- if (err)
- goto done;
-
-wait:
- err = bt_sock_wait_state(sk, BT_CONNECTED,
- sock_sndtimeo(sk, flags & O_NONBLOCK));
-done:
- release_sock(sk);
- return err;
-}
-
-static int l2cap_sock_listen(struct socket *sock, int backlog)
-{
- struct sock *sk = sock->sk;
- int err = 0;
-
- BT_DBG("sk %p backlog %d", sk, backlog);
-
- lock_sock(sk);
-
- if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
- || sk->sk_state != BT_BOUND) {
- err = -EBADFD;
- goto done;
- }
-
- switch (l2cap_pi(sk)->mode) {
- case L2CAP_MODE_BASIC:
- break;
- case L2CAP_MODE_ERTM:
- case L2CAP_MODE_STREAMING:
- if (!disable_ertm)
- break;
- /* fall through */
- default:
- err = -ENOTSUPP;
- goto done;
- }
-
- if (!l2cap_pi(sk)->psm) {
- bdaddr_t *src = &bt_sk(sk)->src;
- u16 psm;
-
- err = -EINVAL;
-
- write_lock_bh(&l2cap_sk_list.lock);
-
- for (psm = 0x1001; psm < 0x1100; psm += 2)
- if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
- l2cap_pi(sk)->psm = cpu_to_le16(psm);
- l2cap_pi(sk)->sport = cpu_to_le16(psm);
- err = 0;
- break;
- }
-
- write_unlock_bh(&l2cap_sk_list.lock);
-
- if (err < 0)
- goto done;
- }
-
- sk->sk_max_ack_backlog = backlog;
- sk->sk_ack_backlog = 0;
- sk->sk_state = BT_LISTEN;
-
-done:
- release_sock(sk);
- return err;
-}
-
-static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
-{
- DECLARE_WAITQUEUE(wait, current);
- struct sock *sk = sock->sk, *nsk;
- long timeo;
- int err = 0;
-
- lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
-
- if (sk->sk_state != BT_LISTEN) {
- err = -EBADFD;
- goto done;
- }
-
- timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
-
- BT_DBG("sk %p timeo %ld", sk, timeo);
-
- /* Wait for an incoming connection. (wake-one). */
- add_wait_queue_exclusive(sk_sleep(sk), &wait);
- while (!(nsk = bt_accept_dequeue(sk, newsock))) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (!timeo) {
- err = -EAGAIN;
- break;
- }
-
- release_sock(sk);
- timeo = schedule_timeout(timeo);
- lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
-
- if (sk->sk_state != BT_LISTEN) {
- err = -EBADFD;
- break;
- }
-
- if (signal_pending(current)) {
- err = sock_intr_errno(timeo);
- break;
- }
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(sk_sleep(sk), &wait);
-
- if (err)
- goto done;
-
- newsock->state = SS_CONNECTED;
-
- BT_DBG("new socket %p", nsk);
-
-done:
- release_sock(sk);
- return err;
-}
-
-static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
-{
- struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
- struct sock *sk = sock->sk;
-
- BT_DBG("sock %p, sk %p", sock, sk);
-
- addr->sa_family = AF_BLUETOOTH;
- *len = sizeof(struct sockaddr_l2);
-
- if (peer) {
- la->l2_psm = l2cap_pi(sk)->psm;
- bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
- la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
- } else {
- la->l2_psm = l2cap_pi(sk)->sport;
- bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
- la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
- }
-
- return 0;
-}
-
-static int __l2cap_wait_ack(struct sock *sk)
+int __l2cap_wait_ack(struct sock *sk)
{
DECLARE_WAITQUEUE(wait, current);
int err = 0;
@@ -1428,16 +985,23 @@ static void l2cap_drop_acked_frames(struct sock *sk)
del_timer(&l2cap_pi(sk)->retrans_timer);
}
-static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
+void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
{
struct l2cap_pinfo *pi = l2cap_pi(sk);
+ struct hci_conn *hcon = pi->conn->hcon;
+ u16 flags;
BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
- hci_send_acl(pi->conn->hcon, skb, 0);
+ if (!pi->flushable && lmp_no_flush_capable(hcon->hdev))
+ flags = ACL_START_NO_FLUSH;
+ else
+ flags = ACL_START;
+
+ hci_send_acl(hcon, skb, flags);
}
-static void l2cap_streaming_send(struct sock *sk)
+void l2cap_streaming_send(struct sock *sk)
{
struct sk_buff *skb;
struct l2cap_pinfo *pi = l2cap_pi(sk);
@@ -1506,7 +1070,7 @@ static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
l2cap_do_send(sk, tx_skb);
}
-static int l2cap_ertm_send(struct sock *sk)
+int l2cap_ertm_send(struct sock *sk)
{
struct sk_buff *skb, *tx_skb;
struct l2cap_pinfo *pi = l2cap_pi(sk);
@@ -1646,7 +1210,7 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in
return sent;
}
-static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
+struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
{
struct l2cap_conn *conn = l2cap_pi(sk)->conn;
struct sk_buff *skb;
@@ -1675,7 +1239,7 @@ static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr
return skb;
}
-static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
+struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
{
struct l2cap_conn *conn = l2cap_pi(sk)->conn;
struct sk_buff *skb;
@@ -1703,7 +1267,7 @@ static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *ms
return skb;
}
-static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
+struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
{
struct l2cap_conn *conn = l2cap_pi(sk)->conn;
struct sk_buff *skb;
@@ -1748,7 +1312,7 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *m
return skb;
}
-static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
+int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
{
struct l2cap_pinfo *pi = l2cap_pi(sk);
struct sk_buff *skb;
@@ -1794,487 +1358,6 @@ static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, siz
return size;
}
-static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
-{
- struct sock *sk = sock->sk;
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- struct sk_buff *skb;
- u16 control;
- int err;
-
- BT_DBG("sock %p, sk %p", sock, sk);
-
- err = sock_error(sk);
- if (err)
- return err;
-
- if (msg->msg_flags & MSG_OOB)
- return -EOPNOTSUPP;
-
- lock_sock(sk);
-
- if (sk->sk_state != BT_CONNECTED) {
- err = -ENOTCONN;
- goto done;
- }
-
- /* Connectionless channel */
- if (sk->sk_type == SOCK_DGRAM) {
- skb = l2cap_create_connless_pdu(sk, msg, len);
- if (IS_ERR(skb)) {
- err = PTR_ERR(skb);
- } else {
- l2cap_do_send(sk, skb);
- err = len;
- }
- goto done;
- }
-
- switch (pi->mode) {
- case L2CAP_MODE_BASIC:
- /* Check outgoing MTU */
- if (len > pi->omtu) {
- err = -EMSGSIZE;
- goto done;
- }
-
- /* Create a basic PDU */
- skb = l2cap_create_basic_pdu(sk, msg, len);
- if (IS_ERR(skb)) {
- err = PTR_ERR(skb);
- goto done;
- }
-
- l2cap_do_send(sk, skb);
- err = len;
- break;
-
- case L2CAP_MODE_ERTM:
- case L2CAP_MODE_STREAMING:
- /* Entire SDU fits into one PDU */
- if (len <= pi->remote_mps) {
- control = L2CAP_SDU_UNSEGMENTED;
- skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
- if (IS_ERR(skb)) {
- err = PTR_ERR(skb);
- goto done;
- }
- __skb_queue_tail(TX_QUEUE(sk), skb);
-
- if (sk->sk_send_head == NULL)
- sk->sk_send_head = skb;
-
- } else {
- /* Segment SDU into multiples PDUs */
- err = l2cap_sar_segment_sdu(sk, msg, len);
- if (err < 0)
- goto done;
- }
-
- if (pi->mode == L2CAP_MODE_STREAMING) {
- l2cap_streaming_send(sk);
- } else {
- if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
- (pi->conn_state & L2CAP_CONN_WAIT_F)) {
- err = len;
- break;
- }
- err = l2cap_ertm_send(sk);
- }
-
- if (err >= 0)
- err = len;
- break;
-
- default:
- BT_DBG("bad state %1.1x", pi->mode);
- err = -EBADFD;
- }
-
-done:
- release_sock(sk);
- return err;
-}
-
-static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
-{
- struct sock *sk = sock->sk;
-
- lock_sock(sk);
-
- if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
- struct l2cap_conn_rsp rsp;
- struct l2cap_conn *conn = l2cap_pi(sk)->conn;
- u8 buf[128];
-
- sk->sk_state = BT_CONFIG;
-
- rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
- rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
- rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
- l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
- L2CAP_CONN_RSP, sizeof(rsp), &rsp);
-
- if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
- release_sock(sk);
- return 0;
- }
-
- l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
- l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(sk, buf), buf);
- l2cap_pi(sk)->num_conf_req++;
-
- release_sock(sk);
- return 0;
- }
-
- release_sock(sk);
-
- if (sock->type == SOCK_STREAM)
- return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
-
- return bt_sock_recvmsg(iocb, sock, msg, len, flags);
-}
-
-static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
-{
- struct sock *sk = sock->sk;
- struct l2cap_options opts;
- int len, err = 0;
- u32 opt;
-
- BT_DBG("sk %p", sk);
-
- lock_sock(sk);
-
- switch (optname) {
- case L2CAP_OPTIONS:
- if (sk->sk_state == BT_CONNECTED) {
- err = -EINVAL;
- break;
- }
-
- opts.imtu = l2cap_pi(sk)->imtu;
- opts.omtu = l2cap_pi(sk)->omtu;
- opts.flush_to = l2cap_pi(sk)->flush_to;
- opts.mode = l2cap_pi(sk)->mode;
- opts.fcs = l2cap_pi(sk)->fcs;
- opts.max_tx = l2cap_pi(sk)->max_tx;
- opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
-
- len = min_t(unsigned int, sizeof(opts), optlen);
- if (copy_from_user((char *) &opts, optval, len)) {
- err = -EFAULT;
- break;
- }
-
- if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
- err = -EINVAL;
- break;
- }
-
- l2cap_pi(sk)->mode = opts.mode;
- switch (l2cap_pi(sk)->mode) {
- case L2CAP_MODE_BASIC:
- l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
- break;
- case L2CAP_MODE_ERTM:
- case L2CAP_MODE_STREAMING:
- if (!disable_ertm)
- break;
- /* fall through */
- default:
- err = -EINVAL;
- break;
- }
-
- l2cap_pi(sk)->imtu = opts.imtu;
- l2cap_pi(sk)->omtu = opts.omtu;
- l2cap_pi(sk)->fcs = opts.fcs;
- l2cap_pi(sk)->max_tx = opts.max_tx;
- l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
- break;
-
- case L2CAP_LM:
- if (get_user(opt, (u32 __user *) optval)) {
- err = -EFAULT;
- break;
- }
-
- if (opt & L2CAP_LM_AUTH)
- l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
- if (opt & L2CAP_LM_ENCRYPT)
- l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
- if (opt & L2CAP_LM_SECURE)
- l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
-
- l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
- l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
- break;
-
- default:
- err = -ENOPROTOOPT;
- break;
- }
-
- release_sock(sk);
- return err;
-}
-
-static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
-{
- struct sock *sk = sock->sk;
- struct bt_security sec;
- int len, err = 0;
- u32 opt;
-
- BT_DBG("sk %p", sk);
-
- if (level == SOL_L2CAP)
- return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
-
- if (level != SOL_BLUETOOTH)
- return -ENOPROTOOPT;
-
- lock_sock(sk);
-
- switch (optname) {
- case BT_SECURITY:
- if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
- && sk->sk_type != SOCK_RAW) {
- err = -EINVAL;
- break;
- }
-
- sec.level = BT_SECURITY_LOW;
-
- len = min_t(unsigned int, sizeof(sec), optlen);
- if (copy_from_user((char *) &sec, optval, len)) {
- err = -EFAULT;
- break;
- }
-
- if (sec.level < BT_SECURITY_LOW ||
- sec.level > BT_SECURITY_HIGH) {
- err = -EINVAL;
- break;
- }
-
- l2cap_pi(sk)->sec_level = sec.level;
- break;
-
- case BT_DEFER_SETUP:
- if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
- err = -EINVAL;
- break;
- }
-
- if (get_user(opt, (u32 __user *) optval)) {
- err = -EFAULT;
- break;
- }
-
- bt_sk(sk)->defer_setup = opt;
- break;
-
- default:
- err = -ENOPROTOOPT;
- break;
- }
-
- release_sock(sk);
- return err;
-}
-
-static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
-{
- struct sock *sk = sock->sk;
- struct l2cap_options opts;
- struct l2cap_conninfo cinfo;
- int len, err = 0;
- u32 opt;
-
- BT_DBG("sk %p", sk);
-
- if (get_user(len, optlen))
- return -EFAULT;
-
- lock_sock(sk);
-
- switch (optname) {
- case L2CAP_OPTIONS:
- opts.imtu = l2cap_pi(sk)->imtu;
- opts.omtu = l2cap_pi(sk)->omtu;
- opts.flush_to = l2cap_pi(sk)->flush_to;
- opts.mode = l2cap_pi(sk)->mode;
- opts.fcs = l2cap_pi(sk)->fcs;
- opts.max_tx = l2cap_pi(sk)->max_tx;
- opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
-
- len = min_t(unsigned int, len, sizeof(opts));
- if (copy_to_user(optval, (char *) &opts, len))
- err = -EFAULT;
-
- break;
-
- case L2CAP_LM:
- switch (l2cap_pi(sk)->sec_level) {
- case BT_SECURITY_LOW:
- opt = L2CAP_LM_AUTH;
- break;
- case BT_SECURITY_MEDIUM:
- opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
- break;
- case BT_SECURITY_HIGH:
- opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
- L2CAP_LM_SECURE;
- break;
- default:
- opt = 0;
- break;
- }
-
- if (l2cap_pi(sk)->role_switch)
- opt |= L2CAP_LM_MASTER;
-
- if (l2cap_pi(sk)->force_reliable)
- opt |= L2CAP_LM_RELIABLE;
-
- if (put_user(opt, (u32 __user *) optval))
- err = -EFAULT;
- break;
-
- case L2CAP_CONNINFO:
- if (sk->sk_state != BT_CONNECTED &&
- !(sk->sk_state == BT_CONNECT2 &&
- bt_sk(sk)->defer_setup)) {
- err = -ENOTCONN;
- break;
- }
-
- cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
- memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
-
- len = min_t(unsigned int, len, sizeof(cinfo));
- if (copy_to_user(optval, (char *) &cinfo, len))
- err = -EFAULT;
-
- break;
-
- default:
- err = -ENOPROTOOPT;
- break;
- }
-
- release_sock(sk);
- return err;
-}
-
-static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
-{
- struct sock *sk = sock->sk;
- struct bt_security sec;
- int len, err = 0;
-
- BT_DBG("sk %p", sk);
-
- if (level == SOL_L2CAP)
- return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
-
- if (level != SOL_BLUETOOTH)
- return -ENOPROTOOPT;
-
- if (get_user(len, optlen))
- return -EFAULT;
-
- lock_sock(sk);
-
- switch (optname) {
- case BT_SECURITY:
- if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
- && sk->sk_type != SOCK_RAW) {
- err = -EINVAL;
- break;
- }
-
- sec.level = l2cap_pi(sk)->sec_level;
-
- len = min_t(unsigned int, len, sizeof(sec));
- if (copy_to_user(optval, (char *) &sec, len))
- err = -EFAULT;
-
- break;
-
- case BT_DEFER_SETUP:
- if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
- err = -EINVAL;
- break;
- }
-
- if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
- err = -EFAULT;
-
- break;
-
- default:
- err = -ENOPROTOOPT;
- break;
- }
-
- release_sock(sk);
- return err;
-}
-
-static int l2cap_sock_shutdown(struct socket *sock, int how)
-{
- struct sock *sk = sock->sk;
- int err = 0;
-
- BT_DBG("sock %p, sk %p", sock, sk);
-
- if (!sk)
- return 0;
-
- lock_sock(sk);
- if (!sk->sk_shutdown) {
- if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
- err = __l2cap_wait_ack(sk);
-
- sk->sk_shutdown = SHUTDOWN_MASK;
- l2cap_sock_clear_timer(sk);
- __l2cap_sock_close(sk, 0);
-
- if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
- err = bt_sock_wait_state(sk, BT_CLOSED,
- sk->sk_lingertime);
- }
-
- if (!err && sk->sk_err)
- err = -sk->sk_err;
-
- release_sock(sk);
- return err;
-}
-
-static int l2cap_sock_release(struct socket *sock)
-{
- struct sock *sk = sock->sk;
- int err;
-
- BT_DBG("sock %p, sk %p", sock, sk);
-
- if (!sk)
- return 0;
-
- err = l2cap_sock_shutdown(sock, 2);
-
- sock_orphan(sk);
- l2cap_sock_kill(sk);
- return err;
-}
-
static void l2cap_chan_ready(struct sock *sk)
{
struct sock *parent = bt_sk(sk)->parent;
@@ -2346,7 +1429,11 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
- lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
+
+ if (conn->hcon->type == LE_LINK)
+ lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
+ else
+ lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
cmd->code = code;
@@ -2493,7 +1580,7 @@ static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
}
}
-static int l2cap_build_conf_req(struct sock *sk, void *data)
+int l2cap_build_conf_req(struct sock *sk, void *data)
{
struct l2cap_pinfo *pi = l2cap_pi(sk);
struct l2cap_conf_req *req = data;
@@ -2518,11 +1605,11 @@ static int l2cap_build_conf_req(struct sock *sk, void *data)
}
done:
+ if (pi->imtu != L2CAP_DEFAULT_MTU)
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
+
switch (pi->mode) {
case L2CAP_MODE_BASIC:
- if (pi->imtu != L2CAP_DEFAULT_MTU)
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
-
if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
!(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
break;
@@ -2585,10 +1672,6 @@ done:
break;
}
- /* FIXME: Need actual value of the flush timeout */
- //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
- // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
-
req->dcid = cpu_to_le16(pi->dcid);
req->flags = cpu_to_le16(0);
@@ -3415,12 +2498,153 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
return 0;
}
-static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
+static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
+ u16 to_multiplier)
+{
+ u16 max_latency;
+
+ if (min > max || min < 6 || max > 3200)
+ return -EINVAL;
+
+ if (to_multiplier < 10 || to_multiplier > 3200)
+ return -EINVAL;
+
+ if (max >= to_multiplier * 8)
+ return -EINVAL;
+
+ max_latency = (to_multiplier * 8 / max) - 1;
+ if (latency > 499 || latency > max_latency)
+ return -EINVAL;
+
+ return 0;
+}
+
+static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u8 *data)
+{
+ struct hci_conn *hcon = conn->hcon;
+ struct l2cap_conn_param_update_req *req;
+ struct l2cap_conn_param_update_rsp rsp;
+ u16 min, max, latency, to_multiplier, cmd_len;
+ int err;
+
+ if (!(hcon->link_mode & HCI_LM_MASTER))
+ return -EINVAL;
+
+ cmd_len = __le16_to_cpu(cmd->len);
+ if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
+ return -EPROTO;
+
+ req = (struct l2cap_conn_param_update_req *) data;
+ min = __le16_to_cpu(req->min);
+ max = __le16_to_cpu(req->max);
+ latency = __le16_to_cpu(req->latency);
+ to_multiplier = __le16_to_cpu(req->to_multiplier);
+
+ BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
+ min, max, latency, to_multiplier);
+
+ memset(&rsp, 0, sizeof(rsp));
+
+ err = l2cap_check_conn_param(min, max, latency, to_multiplier);
+ if (err)
+ rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
+ else
+ rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
+
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
+ sizeof(rsp), &rsp);
+
+ if (!err)
+ hci_le_conn_update(hcon, min, max, latency, to_multiplier);
+
+ return 0;
+}
+
+static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
+{
+ int err = 0;
+
+ switch (cmd->code) {
+ case L2CAP_COMMAND_REJ:
+ l2cap_command_rej(conn, cmd, data);
+ break;
+
+ case L2CAP_CONN_REQ:
+ err = l2cap_connect_req(conn, cmd, data);
+ break;
+
+ case L2CAP_CONN_RSP:
+ err = l2cap_connect_rsp(conn, cmd, data);
+ break;
+
+ case L2CAP_CONF_REQ:
+ err = l2cap_config_req(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_CONF_RSP:
+ err = l2cap_config_rsp(conn, cmd, data);
+ break;
+
+ case L2CAP_DISCONN_REQ:
+ err = l2cap_disconnect_req(conn, cmd, data);
+ break;
+
+ case L2CAP_DISCONN_RSP:
+ err = l2cap_disconnect_rsp(conn, cmd, data);
+ break;
+
+ case L2CAP_ECHO_REQ:
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
+ break;
+
+ case L2CAP_ECHO_RSP:
+ break;
+
+ case L2CAP_INFO_REQ:
+ err = l2cap_information_req(conn, cmd, data);
+ break;
+
+ case L2CAP_INFO_RSP:
+ err = l2cap_information_rsp(conn, cmd, data);
+ break;
+
+ default:
+ BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u8 *data)
+{
+ switch (cmd->code) {
+ case L2CAP_COMMAND_REJ:
+ return 0;
+
+ case L2CAP_CONN_PARAM_UPDATE_REQ:
+ return l2cap_conn_param_update_req(conn, cmd, data);
+
+ case L2CAP_CONN_PARAM_UPDATE_RSP:
+ return 0;
+
+ default:
+ BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
+ return -EINVAL;
+ }
+}
+
+static inline void l2cap_sig_channel(struct l2cap_conn *conn,
+ struct sk_buff *skb)
{
u8 *data = skb->data;
int len = skb->len;
struct l2cap_cmd_hdr cmd;
- int err = 0;
+ int err;
l2cap_raw_recv(conn, skb);
@@ -3439,55 +2663,10 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *sk
break;
}
- switch (cmd.code) {
- case L2CAP_COMMAND_REJ:
- l2cap_command_rej(conn, &cmd, data);
- break;
-
- case L2CAP_CONN_REQ:
- err = l2cap_connect_req(conn, &cmd, data);
- break;
-
- case L2CAP_CONN_RSP:
- err = l2cap_connect_rsp(conn, &cmd, data);
- break;
-
- case L2CAP_CONF_REQ:
- err = l2cap_config_req(conn, &cmd, cmd_len, data);
- break;
-
- case L2CAP_CONF_RSP:
- err = l2cap_config_rsp(conn, &cmd, data);
- break;
-
- case L2CAP_DISCONN_REQ:
- err = l2cap_disconnect_req(conn, &cmd, data);
- break;
-
- case L2CAP_DISCONN_RSP:
- err = l2cap_disconnect_rsp(conn, &cmd, data);
- break;
-
- case L2CAP_ECHO_REQ:
- l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
- break;
-
- case L2CAP_ECHO_RSP:
- break;
-
- case L2CAP_INFO_REQ:
- err = l2cap_information_req(conn, &cmd, data);
- break;
-
- case L2CAP_INFO_RSP:
- err = l2cap_information_rsp(conn, &cmd, data);
- break;
-
- default:
- BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
- err = -EINVAL;
- break;
- }
+ if (conn->hcon->type == LE_LINK)
+ err = l2cap_le_sig_cmd(conn, &cmd, data);
+ else
+ err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
if (err) {
struct l2cap_cmd_rej rej;
@@ -4484,6 +3663,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
BT_DBG("len %d, cid 0x%4.4x", len, cid);
switch (cid) {
+ case L2CAP_CID_LE_SIGNALING:
case L2CAP_CID_SIGNALING:
l2cap_sig_channel(conn, skb);
break;
@@ -4541,7 +3721,7 @@ static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
- if (hcon->type != ACL_LINK)
+ if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
return -EINVAL;
if (!status) {
@@ -4570,7 +3750,7 @@ static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
{
BT_DBG("hcon %p reason %d", hcon, reason);
- if (hcon->type != ACL_LINK)
+ if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
return -EINVAL;
l2cap_conn_del(hcon, bt_err(reason));
@@ -4673,12 +3853,15 @@ static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 fl
{
struct l2cap_conn *conn = hcon->l2cap_data;
- if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
+ if (!conn)
+ conn = l2cap_conn_add(hcon, 0);
+
+ if (!conn)
goto drop;
BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
- if (flags & ACL_START) {
+ if (!(flags & ACL_CONT)) {
struct l2cap_hdr *hdr;
struct sock *sk;
u16 cid;
@@ -4784,12 +3967,13 @@ static int l2cap_debugfs_show(struct seq_file *f, void *p)
sk_for_each(sk, node, &l2cap_sk_list.head) {
struct l2cap_pinfo *pi = l2cap_pi(sk);
- seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
+ seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
batostr(&bt_sk(sk)->src),
batostr(&bt_sk(sk)->dst),
sk->sk_state, __le16_to_cpu(pi->psm),
pi->scid, pi->dcid,
- pi->imtu, pi->omtu, pi->sec_level);
+ pi->imtu, pi->omtu, pi->sec_level,
+ pi->mode);
}
read_unlock_bh(&l2cap_sk_list.lock);
@@ -4811,32 +3995,6 @@ static const struct file_operations l2cap_debugfs_fops = {
static struct dentry *l2cap_debugfs;
-static const struct proto_ops l2cap_sock_ops = {
- .family = PF_BLUETOOTH,
- .owner = THIS_MODULE,
- .release = l2cap_sock_release,
- .bind = l2cap_sock_bind,
- .connect = l2cap_sock_connect,
- .listen = l2cap_sock_listen,
- .accept = l2cap_sock_accept,
- .getname = l2cap_sock_getname,
- .sendmsg = l2cap_sock_sendmsg,
- .recvmsg = l2cap_sock_recvmsg,
- .poll = bt_sock_poll,
- .ioctl = bt_sock_ioctl,
- .mmap = sock_no_mmap,
- .socketpair = sock_no_socketpair,
- .shutdown = l2cap_sock_shutdown,
- .setsockopt = l2cap_sock_setsockopt,
- .getsockopt = l2cap_sock_getsockopt
-};
-
-static const struct net_proto_family l2cap_sock_family_ops = {
- .family = PF_BLUETOOTH,
- .owner = THIS_MODULE,
- .create = l2cap_sock_create,
-};
-
static struct hci_proto l2cap_hci_proto = {
.name = "L2CAP",
.id = HCI_PROTO_L2CAP,
@@ -4848,23 +4006,17 @@ static struct hci_proto l2cap_hci_proto = {
.recv_acldata = l2cap_recv_acldata
};
-static int __init l2cap_init(void)
+int __init l2cap_init(void)
{
int err;
- err = proto_register(&l2cap_proto, 0);
+ err = l2cap_init_sockets();
if (err < 0)
return err;
_busy_wq = create_singlethread_workqueue("l2cap");
if (!_busy_wq) {
- proto_unregister(&l2cap_proto);
- return -ENOMEM;
- }
-
- err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
- if (err < 0) {
- BT_ERR("L2CAP socket registration failed");
+ err = -ENOMEM;
goto error;
}
@@ -4882,49 +4034,26 @@ static int __init l2cap_init(void)
BT_ERR("Failed to create L2CAP debug file");
}
- BT_INFO("L2CAP ver %s", VERSION);
- BT_INFO("L2CAP socket layer initialized");
-
return 0;
error:
destroy_workqueue(_busy_wq);
- proto_unregister(&l2cap_proto);
+ l2cap_cleanup_sockets();
return err;
}
-static void __exit l2cap_exit(void)
+void l2cap_exit(void)
{
debugfs_remove(l2cap_debugfs);
flush_workqueue(_busy_wq);
destroy_workqueue(_busy_wq);
- if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
- BT_ERR("L2CAP socket unregistration failed");
-
if (hci_unregister_proto(&l2cap_hci_proto) < 0)
BT_ERR("L2CAP protocol unregistration failed");
- proto_unregister(&l2cap_proto);
-}
-
-void l2cap_load(void)
-{
- /* Dummy function to trigger automatic L2CAP module loading by
- * other modules that use L2CAP sockets but don't use any other
- * symbols from it. */
+ l2cap_cleanup_sockets();
}
-EXPORT_SYMBOL(l2cap_load);
-
-module_init(l2cap_init);
-module_exit(l2cap_exit);
module_param(disable_ertm, bool, 0644);
MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
-
-MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
-MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
-MODULE_VERSION(VERSION);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("bt-proto-0");
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
new file mode 100644
index 000000000000..fc85e7ae33c7
--- /dev/null
+++ b/net/bluetooth/l2cap_sock.c
@@ -0,0 +1,1156 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+ Copyright (C) 2000-2001 Qualcomm Incorporated
+ Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
+ Copyright (C) 2010 Google Inc.
+
+ Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+/* Bluetooth L2CAP sockets. */
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/l2cap.h>
+
+/* ---- L2CAP timers ---- */
+static void l2cap_sock_timeout(unsigned long arg)
+{
+ struct sock *sk = (struct sock *) arg;
+ int reason;
+
+ BT_DBG("sock %p state %d", sk, sk->sk_state);
+
+ bh_lock_sock(sk);
+
+ if (sock_owned_by_user(sk)) {
+ /* sk is owned by user. Try again later */
+ l2cap_sock_set_timer(sk, HZ / 5);
+ bh_unlock_sock(sk);
+ sock_put(sk);
+ return;
+ }
+
+ if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
+ reason = ECONNREFUSED;
+ else if (sk->sk_state == BT_CONNECT &&
+ l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
+ reason = ECONNREFUSED;
+ else
+ reason = ETIMEDOUT;
+
+ __l2cap_sock_close(sk, reason);
+
+ bh_unlock_sock(sk);
+
+ l2cap_sock_kill(sk);
+ sock_put(sk);
+}
+
+void l2cap_sock_set_timer(struct sock *sk, long timeout)
+{
+ BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
+}
+
+void l2cap_sock_clear_timer(struct sock *sk)
+{
+ BT_DBG("sock %p state %d", sk, sk->sk_state);
+ sk_stop_timer(sk, &sk->sk_timer);
+}
+
+static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
+{
+ struct sock *sk;
+ struct hlist_node *node;
+ sk_for_each(sk, node, &l2cap_sk_list.head)
+ if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
+ goto found;
+ sk = NULL;
+found:
+ return sk;
+}
+
+static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
+{
+ struct sock *sk = sock->sk;
+ struct sockaddr_l2 la;
+ int len, err = 0;
+
+ BT_DBG("sk %p", sk);
+
+ if (!addr || addr->sa_family != AF_BLUETOOTH)
+ return -EINVAL;
+
+ memset(&la, 0, sizeof(la));
+ len = min_t(unsigned int, sizeof(la), alen);
+ memcpy(&la, addr, len);
+
+ if (la.l2_cid && la.l2_psm)
+ return -EINVAL;
+
+ lock_sock(sk);
+
+ if (sk->sk_state != BT_OPEN) {
+ err = -EBADFD;
+ goto done;
+ }
+
+ if (la.l2_psm) {
+ __u16 psm = __le16_to_cpu(la.l2_psm);
+
+ /* PSM must be odd and lsb of upper byte must be 0 */
+ if ((psm & 0x0101) != 0x0001) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ /* Restrict usage of well-known PSMs */
+ if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE)) {
+ err = -EACCES;
+ goto done;
+ }
+ }
+
+ write_lock_bh(&l2cap_sk_list.lock);
+
+ if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
+ err = -EADDRINUSE;
+ } else {
+ /* Save source address */
+ bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
+ l2cap_pi(sk)->psm = la.l2_psm;
+ l2cap_pi(sk)->sport = la.l2_psm;
+ sk->sk_state = BT_BOUND;
+
+ if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
+ __le16_to_cpu(la.l2_psm) == 0x0003)
+ l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
+ }
+
+ if (la.l2_cid)
+ l2cap_pi(sk)->scid = la.l2_cid;
+
+ write_unlock_bh(&l2cap_sk_list.lock);
+
+done:
+ release_sock(sk);
+ return err;
+}
+
+static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct sockaddr_l2 la;
+ int len, err = 0;
+
+ BT_DBG("sk %p", sk);
+
+ if (!addr || alen < sizeof(addr->sa_family) ||
+ addr->sa_family != AF_BLUETOOTH)
+ return -EINVAL;
+
+ memset(&la, 0, sizeof(la));
+ len = min_t(unsigned int, sizeof(la), alen);
+ memcpy(&la, addr, len);
+
+ if (la.l2_cid && la.l2_psm)
+ return -EINVAL;
+
+ lock_sock(sk);
+
+ if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
+ && !(la.l2_psm || la.l2_cid)) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ switch (l2cap_pi(sk)->mode) {
+ case L2CAP_MODE_BASIC:
+ break;
+ case L2CAP_MODE_ERTM:
+ case L2CAP_MODE_STREAMING:
+ if (!disable_ertm)
+ break;
+ /* fall through */
+ default:
+ err = -ENOTSUPP;
+ goto done;
+ }
+
+ switch (sk->sk_state) {
+ case BT_CONNECT:
+ case BT_CONNECT2:
+ case BT_CONFIG:
+ /* Already connecting */
+ goto wait;
+
+ case BT_CONNECTED:
+ /* Already connected */
+ err = -EISCONN;
+ goto done;
+
+ case BT_OPEN:
+ case BT_BOUND:
+ /* Can connect */
+ break;
+
+ default:
+ err = -EBADFD;
+ goto done;
+ }
+
+ /* PSM must be odd and lsb of upper byte must be 0 */
+ if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 &&
+ sk->sk_type != SOCK_RAW && !la.l2_cid) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ /* Set destination address and psm */
+ bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
+ l2cap_pi(sk)->psm = la.l2_psm;
+ l2cap_pi(sk)->dcid = la.l2_cid;
+
+ err = l2cap_do_connect(sk);
+ if (err)
+ goto done;
+
+wait:
+ err = bt_sock_wait_state(sk, BT_CONNECTED,
+ sock_sndtimeo(sk, flags & O_NONBLOCK));
+done:
+ release_sock(sk);
+ return err;
+}
+
+static int l2cap_sock_listen(struct socket *sock, int backlog)
+{
+ struct sock *sk = sock->sk;
+ int err = 0;
+
+ BT_DBG("sk %p backlog %d", sk, backlog);
+
+ lock_sock(sk);
+
+ if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
+ || sk->sk_state != BT_BOUND) {
+ err = -EBADFD;
+ goto done;
+ }
+
+ switch (l2cap_pi(sk)->mode) {
+ case L2CAP_MODE_BASIC:
+ break;
+ case L2CAP_MODE_ERTM:
+ case L2CAP_MODE_STREAMING:
+ if (!disable_ertm)
+ break;
+ /* fall through */
+ default:
+ err = -ENOTSUPP;
+ goto done;
+ }
+
+ if (!l2cap_pi(sk)->psm && !l2cap_pi(sk)->dcid) {
+ bdaddr_t *src = &bt_sk(sk)->src;
+ u16 psm;
+
+ err = -EINVAL;
+
+ write_lock_bh(&l2cap_sk_list.lock);
+
+ for (psm = 0x1001; psm < 0x1100; psm += 2)
+ if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
+ l2cap_pi(sk)->psm = cpu_to_le16(psm);
+ l2cap_pi(sk)->sport = cpu_to_le16(psm);
+ err = 0;
+ break;
+ }
+
+ write_unlock_bh(&l2cap_sk_list.lock);
+
+ if (err < 0)
+ goto done;
+ }
+
+ sk->sk_max_ack_backlog = backlog;
+ sk->sk_ack_backlog = 0;
+ sk->sk_state = BT_LISTEN;
+
+done:
+ release_sock(sk);
+ return err;
+}
+
+static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ struct sock *sk = sock->sk, *nsk;
+ long timeo;
+ int err = 0;
+
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+
+ if (sk->sk_state != BT_LISTEN) {
+ err = -EBADFD;
+ goto done;
+ }
+
+ timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
+
+ BT_DBG("sk %p timeo %ld", sk, timeo);
+
+ /* Wait for an incoming connection. (wake-one). */
+ add_wait_queue_exclusive(sk_sleep(sk), &wait);
+ while (!(nsk = bt_accept_dequeue(sk, newsock))) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!timeo) {
+ err = -EAGAIN;
+ break;
+ }
+
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+
+ if (sk->sk_state != BT_LISTEN) {
+ err = -EBADFD;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ err = sock_intr_errno(timeo);
+ break;
+ }
+ }
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(sk_sleep(sk), &wait);
+
+ if (err)
+ goto done;
+
+ newsock->state = SS_CONNECTED;
+
+ BT_DBG("new socket %p", nsk);
+
+done:
+ release_sock(sk);
+ return err;
+}
+
+static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
+{
+ struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
+ struct sock *sk = sock->sk;
+
+ BT_DBG("sock %p, sk %p", sock, sk);
+
+ addr->sa_family = AF_BLUETOOTH;
+ *len = sizeof(struct sockaddr_l2);
+
+ if (peer) {
+ la->l2_psm = l2cap_pi(sk)->psm;
+ bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
+ la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
+ } else {
+ la->l2_psm = l2cap_pi(sk)->sport;
+ bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
+ la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
+ }
+
+ return 0;
+}
+
+static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
+{
+ struct sock *sk = sock->sk;
+ struct l2cap_options opts;
+ struct l2cap_conninfo cinfo;
+ int len, err = 0;
+ u32 opt;
+
+ BT_DBG("sk %p", sk);
+
+ if (get_user(len, optlen))
+ return -EFAULT;
+
+ lock_sock(sk);
+
+ switch (optname) {
+ case L2CAP_OPTIONS:
+ memset(&opts, 0, sizeof(opts));
+ opts.imtu = l2cap_pi(sk)->imtu;
+ opts.omtu = l2cap_pi(sk)->omtu;
+ opts.flush_to = l2cap_pi(sk)->flush_to;
+ opts.mode = l2cap_pi(sk)->mode;
+ opts.fcs = l2cap_pi(sk)->fcs;
+ opts.max_tx = l2cap_pi(sk)->max_tx;
+ opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
+
+ len = min_t(unsigned int, len, sizeof(opts));
+ if (copy_to_user(optval, (char *) &opts, len))
+ err = -EFAULT;
+
+ break;
+
+ case L2CAP_LM:
+ switch (l2cap_pi(sk)->sec_level) {
+ case BT_SECURITY_LOW:
+ opt = L2CAP_LM_AUTH;
+ break;
+ case BT_SECURITY_MEDIUM:
+ opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
+ break;
+ case BT_SECURITY_HIGH:
+ opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
+ L2CAP_LM_SECURE;
+ break;
+ default:
+ opt = 0;
+ break;
+ }
+
+ if (l2cap_pi(sk)->role_switch)
+ opt |= L2CAP_LM_MASTER;
+
+ if (l2cap_pi(sk)->force_reliable)
+ opt |= L2CAP_LM_RELIABLE;
+
+ if (put_user(opt, (u32 __user *) optval))
+ err = -EFAULT;
+ break;
+
+ case L2CAP_CONNINFO:
+ if (sk->sk_state != BT_CONNECTED &&
+ !(sk->sk_state == BT_CONNECT2 &&
+ bt_sk(sk)->defer_setup)) {
+ err = -ENOTCONN;
+ break;
+ }
+
+ cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
+ memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
+
+ len = min_t(unsigned int, len, sizeof(cinfo));
+ if (copy_to_user(optval, (char *) &cinfo, len))
+ err = -EFAULT;
+
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ release_sock(sk);
+ return err;
+}
+
+static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
+{
+ struct sock *sk = sock->sk;
+ struct bt_security sec;
+ int len, err = 0;
+
+ BT_DBG("sk %p", sk);
+
+ if (level == SOL_L2CAP)
+ return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
+
+ if (level != SOL_BLUETOOTH)
+ return -ENOPROTOOPT;
+
+ if (get_user(len, optlen))
+ return -EFAULT;
+
+ lock_sock(sk);
+
+ switch (optname) {
+ case BT_SECURITY:
+ if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
+ && sk->sk_type != SOCK_RAW) {
+ err = -EINVAL;
+ break;
+ }
+
+ sec.level = l2cap_pi(sk)->sec_level;
+
+ len = min_t(unsigned int, len, sizeof(sec));
+ if (copy_to_user(optval, (char *) &sec, len))
+ err = -EFAULT;
+
+ break;
+
+ case BT_DEFER_SETUP:
+ if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
+ err = -EINVAL;
+ break;
+ }
+
+ if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
+ err = -EFAULT;
+
+ break;
+
+ case BT_FLUSHABLE:
+ if (put_user(l2cap_pi(sk)->flushable, (u32 __user *) optval))
+ err = -EFAULT;
+
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ release_sock(sk);
+ return err;
+}
+
+static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
+{
+ struct sock *sk = sock->sk;
+ struct l2cap_options opts;
+ int len, err = 0;
+ u32 opt;
+
+ BT_DBG("sk %p", sk);
+
+ lock_sock(sk);
+
+ switch (optname) {
+ case L2CAP_OPTIONS:
+ if (sk->sk_state == BT_CONNECTED) {
+ err = -EINVAL;
+ break;
+ }
+
+ opts.imtu = l2cap_pi(sk)->imtu;
+ opts.omtu = l2cap_pi(sk)->omtu;
+ opts.flush_to = l2cap_pi(sk)->flush_to;
+ opts.mode = l2cap_pi(sk)->mode;
+ opts.fcs = l2cap_pi(sk)->fcs;
+ opts.max_tx = l2cap_pi(sk)->max_tx;
+ opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
+
+ len = min_t(unsigned int, sizeof(opts), optlen);
+ if (copy_from_user((char *) &opts, optval, len)) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
+ err = -EINVAL;
+ break;
+ }
+
+ l2cap_pi(sk)->mode = opts.mode;
+ switch (l2cap_pi(sk)->mode) {
+ case L2CAP_MODE_BASIC:
+ l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
+ break;
+ case L2CAP_MODE_ERTM:
+ case L2CAP_MODE_STREAMING:
+ if (!disable_ertm)
+ break;
+ /* fall through */
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ l2cap_pi(sk)->imtu = opts.imtu;
+ l2cap_pi(sk)->omtu = opts.omtu;
+ l2cap_pi(sk)->fcs = opts.fcs;
+ l2cap_pi(sk)->max_tx = opts.max_tx;
+ l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
+ break;
+
+ case L2CAP_LM:
+ if (get_user(opt, (u32 __user *) optval)) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (opt & L2CAP_LM_AUTH)
+ l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
+ if (opt & L2CAP_LM_ENCRYPT)
+ l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
+ if (opt & L2CAP_LM_SECURE)
+ l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
+
+ l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
+ l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ release_sock(sk);
+ return err;
+}
+
+static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
+{
+ struct sock *sk = sock->sk;
+ struct bt_security sec;
+ int len, err = 0;
+ u32 opt;
+
+ BT_DBG("sk %p", sk);
+
+ if (level == SOL_L2CAP)
+ return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
+
+ if (level != SOL_BLUETOOTH)
+ return -ENOPROTOOPT;
+
+ lock_sock(sk);
+
+ switch (optname) {
+ case BT_SECURITY:
+ if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
+ && sk->sk_type != SOCK_RAW) {
+ err = -EINVAL;
+ break;
+ }
+
+ sec.level = BT_SECURITY_LOW;
+
+ len = min_t(unsigned int, sizeof(sec), optlen);
+ if (copy_from_user((char *) &sec, optval, len)) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (sec.level < BT_SECURITY_LOW ||
+ sec.level > BT_SECURITY_HIGH) {
+ err = -EINVAL;
+ break;
+ }
+
+ l2cap_pi(sk)->sec_level = sec.level;
+ break;
+
+ case BT_DEFER_SETUP:
+ if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
+ err = -EINVAL;
+ break;
+ }
+
+ if (get_user(opt, (u32 __user *) optval)) {
+ err = -EFAULT;
+ break;
+ }
+
+ bt_sk(sk)->defer_setup = opt;
+ break;
+
+ case BT_FLUSHABLE:
+ if (get_user(opt, (u32 __user *) optval)) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (opt > BT_FLUSHABLE_ON) {
+ err = -EINVAL;
+ break;
+ }
+
+ if (opt == BT_FLUSHABLE_OFF) {
+ struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+ /* proceed futher only when we have l2cap_conn and
+ No Flush support in the LM */
+ if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) {
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ l2cap_pi(sk)->flushable = opt;
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ release_sock(sk);
+ return err;
+}
+
+static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
+{
+ struct sock *sk = sock->sk;
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ struct sk_buff *skb;
+ u16 control;
+ int err;
+
+ BT_DBG("sock %p, sk %p", sock, sk);
+
+ err = sock_error(sk);
+ if (err)
+ return err;
+
+ if (msg->msg_flags & MSG_OOB)
+ return -EOPNOTSUPP;
+
+ lock_sock(sk);
+
+ if (sk->sk_state != BT_CONNECTED) {
+ err = -ENOTCONN;
+ goto done;
+ }
+
+ /* Connectionless channel */
+ if (sk->sk_type == SOCK_DGRAM) {
+ skb = l2cap_create_connless_pdu(sk, msg, len);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ } else {
+ l2cap_do_send(sk, skb);
+ err = len;
+ }
+ goto done;
+ }
+
+ switch (pi->mode) {
+ case L2CAP_MODE_BASIC:
+ /* Check outgoing MTU */
+ if (len > pi->omtu) {
+ err = -EMSGSIZE;
+ goto done;
+ }
+
+ /* Create a basic PDU */
+ skb = l2cap_create_basic_pdu(sk, msg, len);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ goto done;
+ }
+
+ l2cap_do_send(sk, skb);
+ err = len;
+ break;
+
+ case L2CAP_MODE_ERTM:
+ case L2CAP_MODE_STREAMING:
+ /* Entire SDU fits into one PDU */
+ if (len <= pi->remote_mps) {
+ control = L2CAP_SDU_UNSEGMENTED;
+ skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ goto done;
+ }
+ __skb_queue_tail(TX_QUEUE(sk), skb);
+
+ if (sk->sk_send_head == NULL)
+ sk->sk_send_head = skb;
+
+ } else {
+ /* Segment SDU into multiples PDUs */
+ err = l2cap_sar_segment_sdu(sk, msg, len);
+ if (err < 0)
+ goto done;
+ }
+
+ if (pi->mode == L2CAP_MODE_STREAMING) {
+ l2cap_streaming_send(sk);
+ } else {
+ if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
+ (pi->conn_state & L2CAP_CONN_WAIT_F)) {
+ err = len;
+ break;
+ }
+ err = l2cap_ertm_send(sk);
+ }
+
+ if (err >= 0)
+ err = len;
+ break;
+
+ default:
+ BT_DBG("bad state %1.1x", pi->mode);
+ err = -EBADFD;
+ }
+
+done:
+ release_sock(sk);
+ return err;
+}
+
+static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
+{
+ struct sock *sk = sock->sk;
+
+ lock_sock(sk);
+
+ if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
+ struct l2cap_conn_rsp rsp;
+ struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+ u8 buf[128];
+
+ sk->sk_state = BT_CONFIG;
+
+ rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
+ rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
+ rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
+ rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+ l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
+ L2CAP_CONN_RSP, sizeof(rsp), &rsp);
+
+ if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
+ release_sock(sk);
+ return 0;
+ }
+
+ l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
+ l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+ l2cap_build_conf_req(sk, buf), buf);
+ l2cap_pi(sk)->num_conf_req++;
+
+ release_sock(sk);
+ return 0;
+ }
+
+ release_sock(sk);
+
+ if (sock->type == SOCK_STREAM)
+ return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
+
+ return bt_sock_recvmsg(iocb, sock, msg, len, flags);
+}
+
+/* Kill socket (only if zapped and orphan)
+ * Must be called on unlocked socket.
+ */
+void l2cap_sock_kill(struct sock *sk)
+{
+ if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
+ return;
+
+ BT_DBG("sk %p state %d", sk, sk->sk_state);
+
+ /* Kill poor orphan */
+ bt_sock_unlink(&l2cap_sk_list, sk);
+ sock_set_flag(sk, SOCK_DEAD);
+ sock_put(sk);
+}
+
+/* Must be called on unlocked socket. */
+static void l2cap_sock_close(struct sock *sk)
+{
+ l2cap_sock_clear_timer(sk);
+ lock_sock(sk);
+ __l2cap_sock_close(sk, ECONNRESET);
+ release_sock(sk);
+ l2cap_sock_kill(sk);
+}
+
+static void l2cap_sock_cleanup_listen(struct sock *parent)
+{
+ struct sock *sk;
+
+ BT_DBG("parent %p", parent);
+
+ /* Close not yet accepted channels */
+ while ((sk = bt_accept_dequeue(parent, NULL)))
+ l2cap_sock_close(sk);
+
+ parent->sk_state = BT_CLOSED;
+ sock_set_flag(parent, SOCK_ZAPPED);
+}
+
+void __l2cap_sock_close(struct sock *sk, int reason)
+{
+ struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+
+ BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
+
+ switch (sk->sk_state) {
+ case BT_LISTEN:
+ l2cap_sock_cleanup_listen(sk);
+ break;
+
+ case BT_CONNECTED:
+ case BT_CONFIG:
+ if ((sk->sk_type == SOCK_SEQPACKET ||
+ sk->sk_type == SOCK_STREAM) &&
+ conn->hcon->type == ACL_LINK) {
+ l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
+ l2cap_send_disconn_req(conn, sk, reason);
+ } else
+ l2cap_chan_del(sk, reason);
+ break;
+
+ case BT_CONNECT2:
+ if ((sk->sk_type == SOCK_SEQPACKET ||
+ sk->sk_type == SOCK_STREAM) &&
+ conn->hcon->type == ACL_LINK) {
+ struct l2cap_conn_rsp rsp;
+ __u16 result;
+
+ if (bt_sk(sk)->defer_setup)
+ result = L2CAP_CR_SEC_BLOCK;
+ else
+ result = L2CAP_CR_BAD_PSM;
+
+ rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
+ rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
+ rsp.result = cpu_to_le16(result);
+ rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+ l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
+ L2CAP_CONN_RSP, sizeof(rsp), &rsp);
+ } else
+ l2cap_chan_del(sk, reason);
+ break;
+
+ case BT_CONNECT:
+ case BT_DISCONN:
+ l2cap_chan_del(sk, reason);
+ break;
+
+ default:
+ sock_set_flag(sk, SOCK_ZAPPED);
+ break;
+ }
+}
+
+static int l2cap_sock_shutdown(struct socket *sock, int how)
+{
+ struct sock *sk = sock->sk;
+ int err = 0;
+
+ BT_DBG("sock %p, sk %p", sock, sk);
+
+ if (!sk)
+ return 0;
+
+ lock_sock(sk);
+ if (!sk->sk_shutdown) {
+ if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
+ err = __l2cap_wait_ack(sk);
+
+ sk->sk_shutdown = SHUTDOWN_MASK;
+ l2cap_sock_clear_timer(sk);
+ __l2cap_sock_close(sk, 0);
+
+ if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
+ err = bt_sock_wait_state(sk, BT_CLOSED,
+ sk->sk_lingertime);
+ }
+
+ if (!err && sk->sk_err)
+ err = -sk->sk_err;
+
+ release_sock(sk);
+ return err;
+}
+
+static int l2cap_sock_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+ int err;
+
+ BT_DBG("sock %p, sk %p", sock, sk);
+
+ if (!sk)
+ return 0;
+
+ err = l2cap_sock_shutdown(sock, 2);
+
+ sock_orphan(sk);
+ l2cap_sock_kill(sk);
+ return err;
+}
+
+static void l2cap_sock_destruct(struct sock *sk)
+{
+ BT_DBG("sk %p", sk);
+
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
+}
+
+void l2cap_sock_init(struct sock *sk, struct sock *parent)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+
+ BT_DBG("sk %p", sk);
+
+ if (parent) {
+ sk->sk_type = parent->sk_type;
+ bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
+
+ pi->imtu = l2cap_pi(parent)->imtu;
+ pi->omtu = l2cap_pi(parent)->omtu;
+ pi->conf_state = l2cap_pi(parent)->conf_state;
+ pi->mode = l2cap_pi(parent)->mode;
+ pi->fcs = l2cap_pi(parent)->fcs;
+ pi->max_tx = l2cap_pi(parent)->max_tx;
+ pi->tx_win = l2cap_pi(parent)->tx_win;
+ pi->sec_level = l2cap_pi(parent)->sec_level;
+ pi->role_switch = l2cap_pi(parent)->role_switch;
+ pi->force_reliable = l2cap_pi(parent)->force_reliable;
+ pi->flushable = l2cap_pi(parent)->flushable;
+ } else {
+ pi->imtu = L2CAP_DEFAULT_MTU;
+ pi->omtu = 0;
+ if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
+ pi->mode = L2CAP_MODE_ERTM;
+ pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
+ } else {
+ pi->mode = L2CAP_MODE_BASIC;
+ }
+ pi->max_tx = L2CAP_DEFAULT_MAX_TX;
+ pi->fcs = L2CAP_FCS_CRC16;
+ pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
+ pi->sec_level = BT_SECURITY_LOW;
+ pi->role_switch = 0;
+ pi->force_reliable = 0;
+ pi->flushable = BT_FLUSHABLE_OFF;
+ }
+
+ /* Default config options */
+ pi->conf_len = 0;
+ pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
+ skb_queue_head_init(TX_QUEUE(sk));
+ skb_queue_head_init(SREJ_QUEUE(sk));
+ skb_queue_head_init(BUSY_QUEUE(sk));
+ INIT_LIST_HEAD(SREJ_LIST(sk));
+}
+
+static struct proto l2cap_proto = {
+ .name = "L2CAP",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct l2cap_pinfo)
+};
+
+struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
+{
+ struct sock *sk;
+
+ sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
+ if (!sk)
+ return NULL;
+
+ sock_init_data(sock, sk);
+ INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
+
+ sk->sk_destruct = l2cap_sock_destruct;
+ sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
+
+ sock_reset_flag(sk, SOCK_ZAPPED);
+
+ sk->sk_protocol = proto;
+ sk->sk_state = BT_OPEN;
+
+ setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
+
+ bt_sock_link(&l2cap_sk_list, sk);
+ return sk;
+}
+
+static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
+{
+ struct sock *sk;
+
+ BT_DBG("sock %p", sock);
+
+ sock->state = SS_UNCONNECTED;
+
+ if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
+ sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
+ return -ESOCKTNOSUPPORT;
+
+ if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
+ return -EPERM;
+
+ sock->ops = &l2cap_sock_ops;
+
+ sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
+ if (!sk)
+ return -ENOMEM;
+
+ l2cap_sock_init(sk, NULL);
+ return 0;
+}
+
+const struct proto_ops l2cap_sock_ops = {
+ .family = PF_BLUETOOTH,
+ .owner = THIS_MODULE,
+ .release = l2cap_sock_release,
+ .bind = l2cap_sock_bind,
+ .connect = l2cap_sock_connect,
+ .listen = l2cap_sock_listen,
+ .accept = l2cap_sock_accept,
+ .getname = l2cap_sock_getname,
+ .sendmsg = l2cap_sock_sendmsg,
+ .recvmsg = l2cap_sock_recvmsg,
+ .poll = bt_sock_poll,
+ .ioctl = bt_sock_ioctl,
+ .mmap = sock_no_mmap,
+ .socketpair = sock_no_socketpair,
+ .shutdown = l2cap_sock_shutdown,
+ .setsockopt = l2cap_sock_setsockopt,
+ .getsockopt = l2cap_sock_getsockopt
+};
+
+static const struct net_proto_family l2cap_sock_family_ops = {
+ .family = PF_BLUETOOTH,
+ .owner = THIS_MODULE,
+ .create = l2cap_sock_create,
+};
+
+int __init l2cap_init_sockets(void)
+{
+ int err;
+
+ err = proto_register(&l2cap_proto, 0);
+ if (err < 0)
+ return err;
+
+ err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
+ if (err < 0)
+ goto error;
+
+ BT_INFO("L2CAP socket layer initialized");
+
+ return 0;
+
+error:
+ BT_ERR("L2CAP socket registration failed");
+ proto_unregister(&l2cap_proto);
+ return err;
+}
+
+void l2cap_cleanup_sockets(void)
+{
+ if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
+ BT_ERR("L2CAP socket unregistration failed");
+
+ proto_unregister(&l2cap_proto);
+}
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index f827fd908380..0054c74e27b7 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -22,7 +22,7 @@
/* Bluetooth HCI Management interface */
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -32,13 +32,24 @@
#define MGMT_VERSION 0
#define MGMT_REVISION 1
-static int cmd_status(struct sock *sk, u16 cmd, u8 status)
+struct pending_cmd {
+ struct list_head list;
+ __u16 opcode;
+ int index;
+ void *cmd;
+ struct sock *sk;
+ void *user_data;
+};
+
+LIST_HEAD(cmd_list);
+
+static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
{
struct sk_buff *skb;
struct mgmt_hdr *hdr;
struct mgmt_ev_cmd_status *ev;
- BT_DBG("sock %p", sk);
+ BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_ATOMIC);
if (!skb)
@@ -47,6 +58,7 @@ static int cmd_status(struct sock *sk, u16 cmd, u8 status)
hdr = (void *) skb_put(skb, sizeof(*hdr));
hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
+ hdr->index = cpu_to_le16(index);
hdr->len = cpu_to_le16(sizeof(*ev));
ev = (void *) skb_put(skb, sizeof(*ev));
@@ -59,29 +71,30 @@ static int cmd_status(struct sock *sk, u16 cmd, u8 status)
return 0;
}
-static int read_version(struct sock *sk)
+static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp,
+ size_t rp_len)
{
struct sk_buff *skb;
struct mgmt_hdr *hdr;
struct mgmt_ev_cmd_complete *ev;
- struct mgmt_rp_read_version *rp;
BT_DBG("sock %p", sk);
- skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + sizeof(*rp), GFP_ATOMIC);
+ skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_ATOMIC);
if (!skb)
return -ENOMEM;
hdr = (void *) skb_put(skb, sizeof(*hdr));
+
hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
- hdr->len = cpu_to_le16(sizeof(*ev) + sizeof(*rp));
+ hdr->index = cpu_to_le16(index);
+ hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
- ev = (void *) skb_put(skb, sizeof(*ev));
- put_unaligned_le16(MGMT_OP_READ_VERSION, &ev->opcode);
+ ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
+ put_unaligned_le16(cmd, &ev->opcode);
- rp = (void *) skb_put(skb, sizeof(*rp));
- rp->version = MGMT_VERSION;
- put_unaligned_le16(MGMT_REVISION, &rp->revision);
+ if (rp)
+ memcpy(ev->data, rp, rp_len);
if (sock_queue_rcv_skb(sk, skb) < 0)
kfree_skb(skb);
@@ -89,16 +102,26 @@ static int read_version(struct sock *sk)
return 0;
}
+static int read_version(struct sock *sk)
+{
+ struct mgmt_rp_read_version rp;
+
+ BT_DBG("sock %p", sk);
+
+ rp.version = MGMT_VERSION;
+ put_unaligned_le16(MGMT_REVISION, &rp.revision);
+
+ return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, &rp,
+ sizeof(rp));
+}
+
static int read_index_list(struct sock *sk)
{
- struct sk_buff *skb;
- struct mgmt_hdr *hdr;
- struct mgmt_ev_cmd_complete *ev;
struct mgmt_rp_read_index_list *rp;
struct list_head *p;
- size_t body_len;
+ size_t rp_len;
u16 count;
- int i;
+ int i, err;
BT_DBG("sock %p", sk);
@@ -109,112 +132,1131 @@ static int read_index_list(struct sock *sk)
count++;
}
- body_len = sizeof(*ev) + sizeof(*rp) + (2 * count);
- skb = alloc_skb(sizeof(*hdr) + body_len, GFP_ATOMIC);
- if (!skb)
+ rp_len = sizeof(*rp) + (2 * count);
+ rp = kmalloc(rp_len, GFP_ATOMIC);
+ if (!rp) {
+ read_unlock(&hci_dev_list_lock);
return -ENOMEM;
+ }
- hdr = (void *) skb_put(skb, sizeof(*hdr));
- hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
- hdr->len = cpu_to_le16(body_len);
-
- ev = (void *) skb_put(skb, sizeof(*ev));
- put_unaligned_le16(MGMT_OP_READ_INDEX_LIST, &ev->opcode);
-
- rp = (void *) skb_put(skb, sizeof(*rp) + (2 * count));
put_unaligned_le16(count, &rp->num_controllers);
i = 0;
list_for_each(p, &hci_dev_list) {
struct hci_dev *d = list_entry(p, struct hci_dev, list);
+
+ hci_del_off_timer(d);
+
+ set_bit(HCI_MGMT, &d->flags);
+
+ if (test_bit(HCI_SETUP, &d->flags))
+ continue;
+
put_unaligned_le16(d->id, &rp->index[i++]);
BT_DBG("Added hci%u", d->id);
}
read_unlock(&hci_dev_list_lock);
- if (sock_queue_rcv_skb(sk, skb) < 0)
- kfree_skb(skb);
+ err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, rp,
+ rp_len);
- return 0;
+ kfree(rp);
+
+ return err;
}
-static int read_controller_info(struct sock *sk, unsigned char *data, u16 len)
+static int read_controller_info(struct sock *sk, u16 index)
{
- struct sk_buff *skb;
- struct mgmt_hdr *hdr;
- struct mgmt_ev_cmd_complete *ev;
- struct mgmt_rp_read_info *rp;
- struct mgmt_cp_read_info *cp;
+ struct mgmt_rp_read_info rp;
struct hci_dev *hdev;
- u16 dev_id;
- BT_DBG("sock %p", sk);
+ BT_DBG("sock %p hci%u", sk, index);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_READ_INFO, ENODEV);
+
+ hci_del_off_timer(hdev);
+
+ hci_dev_lock_bh(hdev);
+
+ set_bit(HCI_MGMT, &hdev->flags);
+
+ rp.type = hdev->dev_type;
+
+ rp.powered = test_bit(HCI_UP, &hdev->flags);
+ rp.connectable = test_bit(HCI_PSCAN, &hdev->flags);
+ rp.discoverable = test_bit(HCI_ISCAN, &hdev->flags);
+ rp.pairable = test_bit(HCI_PSCAN, &hdev->flags);
+
+ if (test_bit(HCI_AUTH, &hdev->flags))
+ rp.sec_mode = 3;
+ else if (hdev->ssp_mode > 0)
+ rp.sec_mode = 4;
+ else
+ rp.sec_mode = 2;
+
+ bacpy(&rp.bdaddr, &hdev->bdaddr);
+ memcpy(rp.features, hdev->features, 8);
+ memcpy(rp.dev_class, hdev->dev_class, 3);
+ put_unaligned_le16(hdev->manufacturer, &rp.manufacturer);
+ rp.hci_ver = hdev->hci_ver;
+ put_unaligned_le16(hdev->hci_rev, &rp.hci_rev);
+
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp));
+}
+
+static void mgmt_pending_free(struct pending_cmd *cmd)
+{
+ sock_put(cmd->sk);
+ kfree(cmd->cmd);
+ kfree(cmd);
+}
+
+static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
+ u16 index, void *data, u16 len)
+{
+ struct pending_cmd *cmd;
+
+ cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (!cmd)
+ return NULL;
+
+ cmd->opcode = opcode;
+ cmd->index = index;
+
+ cmd->cmd = kmalloc(len, GFP_ATOMIC);
+ if (!cmd->cmd) {
+ kfree(cmd);
+ return NULL;
+ }
+
+ memcpy(cmd->cmd, data, len);
+
+ cmd->sk = sk;
+ sock_hold(sk);
+
+ list_add(&cmd->list, &cmd_list);
+
+ return cmd;
+}
+
+static void mgmt_pending_foreach(u16 opcode, int index,
+ void (*cb)(struct pending_cmd *cmd, void *data),
+ void *data)
+{
+ struct list_head *p, *n;
+
+ list_for_each_safe(p, n, &cmd_list) {
+ struct pending_cmd *cmd;
+
+ cmd = list_entry(p, struct pending_cmd, list);
+
+ if (cmd->opcode != opcode)
+ continue;
+
+ if (index >= 0 && cmd->index != index)
+ continue;
+
+ cb(cmd, data);
+ }
+}
+
+static struct pending_cmd *mgmt_pending_find(u16 opcode, int index)
+{
+ struct list_head *p;
+
+ list_for_each(p, &cmd_list) {
+ struct pending_cmd *cmd;
+
+ cmd = list_entry(p, struct pending_cmd, list);
+
+ if (cmd->opcode != opcode)
+ continue;
+
+ if (index >= 0 && cmd->index != index)
+ continue;
+
+ return cmd;
+ }
+
+ return NULL;
+}
+
+static void mgmt_pending_remove(struct pending_cmd *cmd)
+{
+ list_del(&cmd->list);
+ mgmt_pending_free(cmd);
+}
+
+static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
+{
+ struct mgmt_mode *cp;
+ struct hci_dev *hdev;
+ struct pending_cmd *cmd;
+ int err, up;
+
+ cp = (void *) data;
+
+ BT_DBG("request for hci%u", index);
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_SET_POWERED, EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_SET_POWERED, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ up = test_bit(HCI_UP, &hdev->flags);
+ if ((cp->val && up) || (!cp->val && !up)) {
+ err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EALREADY);
+ goto failed;
+ }
+
+ if (mgmt_pending_find(MGMT_OP_SET_POWERED, index)) {
+ err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EBUSY);
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, index, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ if (cp->val)
+ queue_work(hdev->workqueue, &hdev->power_on);
+ else
+ queue_work(hdev->workqueue, &hdev->power_off);
+
+ err = 0;
+
+failed:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+ return err;
+}
+
+static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
+ u16 len)
+{
+ struct mgmt_mode *cp;
+ struct hci_dev *hdev;
+ struct pending_cmd *cmd;
+ u8 scan;
+ int err;
+
+ cp = (void *) data;
+
+ BT_DBG("request for hci%u", index);
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ if (!test_bit(HCI_UP, &hdev->flags)) {
+ err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENETDOWN);
+ goto failed;
+ }
+
+ if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) ||
+ mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) {
+ err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EBUSY);
+ goto failed;
+ }
+
+ if (cp->val == test_bit(HCI_ISCAN, &hdev->flags) &&
+ test_bit(HCI_PSCAN, &hdev->flags)) {
+ err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EALREADY);
+ goto failed;
+ }
- if (len != 2)
- return cmd_status(sk, MGMT_OP_READ_INFO, EINVAL);
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, index, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ scan = SCAN_PAGE;
+
+ if (cp->val)
+ scan |= SCAN_INQUIRY;
+
+ err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
+failed:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
+ u16 len)
+{
+ struct mgmt_mode *cp;
+ struct hci_dev *hdev;
+ struct pending_cmd *cmd;
+ u8 scan;
+ int err;
+
+ cp = (void *) data;
+
+ BT_DBG("request for hci%u", index);
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ if (!test_bit(HCI_UP, &hdev->flags)) {
+ err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENETDOWN);
+ goto failed;
+ }
+
+ if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) ||
+ mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) {
+ err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EBUSY);
+ goto failed;
+ }
+
+ if (cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
+ err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EALREADY);
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, index, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ if (cp->val)
+ scan = SCAN_PAGE;
+ else
+ scan = 0;
+
+ err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
+failed:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+static int mgmt_event(u16 event, u16 index, void *data, u16 data_len,
+ struct sock *skip_sk)
+{
+ struct sk_buff *skb;
+ struct mgmt_hdr *hdr;
- skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + sizeof(*rp), GFP_ATOMIC);
+ skb = alloc_skb(sizeof(*hdr) + data_len, GFP_ATOMIC);
if (!skb)
return -ENOMEM;
+ bt_cb(skb)->channel = HCI_CHANNEL_CONTROL;
+
hdr = (void *) skb_put(skb, sizeof(*hdr));
- hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
- hdr->len = cpu_to_le16(sizeof(*ev) + sizeof(*rp));
+ hdr->opcode = cpu_to_le16(event);
+ hdr->index = cpu_to_le16(index);
+ hdr->len = cpu_to_le16(data_len);
- ev = (void *) skb_put(skb, sizeof(*ev));
- put_unaligned_le16(MGMT_OP_READ_INFO, &ev->opcode);
+ if (data)
+ memcpy(skb_put(skb, data_len), data, data_len);
+
+ hci_send_to_sock(NULL, skb, skip_sk);
+ kfree_skb(skb);
- rp = (void *) skb_put(skb, sizeof(*rp));
+ return 0;
+}
+
+static int send_mode_rsp(struct sock *sk, u16 opcode, u16 index, u8 val)
+{
+ struct mgmt_mode rp;
+
+ rp.val = val;
+
+ return cmd_complete(sk, index, opcode, &rp, sizeof(rp));
+}
+
+static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
+ u16 len)
+{
+ struct mgmt_mode *cp, ev;
+ struct hci_dev *hdev;
+ int err;
cp = (void *) data;
- dev_id = get_unaligned_le16(&cp->index);
- BT_DBG("request for hci%u", dev_id);
+ BT_DBG("request for hci%u", index);
- hdev = hci_dev_get(dev_id);
- if (!hdev) {
- kfree_skb(skb);
- return cmd_status(sk, MGMT_OP_READ_INFO, ENODEV);
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ if (cp->val)
+ set_bit(HCI_PAIRABLE, &hdev->flags);
+ else
+ clear_bit(HCI_PAIRABLE, &hdev->flags);
+
+ err = send_mode_rsp(sk, MGMT_OP_SET_PAIRABLE, index, cp->val);
+ if (err < 0)
+ goto failed;
+
+ ev.val = cp->val;
+
+ err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk);
+
+failed:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+static u8 get_service_classes(struct hci_dev *hdev)
+{
+ struct list_head *p;
+ u8 val = 0;
+
+ list_for_each(p, &hdev->uuids) {
+ struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list);
+
+ val |= uuid->svc_hint;
}
+ return val;
+}
+
+static int update_class(struct hci_dev *hdev)
+{
+ u8 cod[3];
+
+ BT_DBG("%s", hdev->name);
+
+ if (test_bit(HCI_SERVICE_CACHE, &hdev->flags))
+ return 0;
+
+ cod[0] = hdev->minor_class;
+ cod[1] = hdev->major_class;
+ cod[2] = get_service_classes(hdev);
+
+ if (memcmp(cod, hdev->dev_class, 3) == 0)
+ return 0;
+
+ return hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
+}
+
+static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
+{
+ struct mgmt_cp_add_uuid *cp;
+ struct hci_dev *hdev;
+ struct bt_uuid *uuid;
+ int err;
+
+ cp = (void *) data;
+
+ BT_DBG("request for hci%u", index);
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_ADD_UUID, EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_ADD_UUID, ENODEV);
+
hci_dev_lock_bh(hdev);
- put_unaligned_le16(hdev->id, &rp->index);
- rp->type = hdev->dev_type;
+ uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC);
+ if (!uuid) {
+ err = -ENOMEM;
+ goto failed;
+ }
- rp->powered = test_bit(HCI_UP, &hdev->flags);
- rp->discoverable = test_bit(HCI_ISCAN, &hdev->flags);
- rp->pairable = test_bit(HCI_PSCAN, &hdev->flags);
+ memcpy(uuid->uuid, cp->uuid, 16);
+ uuid->svc_hint = cp->svc_hint;
- if (test_bit(HCI_AUTH, &hdev->flags))
- rp->sec_mode = 3;
- else if (hdev->ssp_mode > 0)
- rp->sec_mode = 4;
- else
- rp->sec_mode = 2;
+ list_add(&uuid->list, &hdev->uuids);
- bacpy(&rp->bdaddr, &hdev->bdaddr);
- memcpy(rp->features, hdev->features, 8);
- memcpy(rp->dev_class, hdev->dev_class, 3);
- put_unaligned_le16(hdev->manufacturer, &rp->manufacturer);
- rp->hci_ver = hdev->hci_ver;
- put_unaligned_le16(hdev->hci_rev, &rp->hci_rev);
+ err = update_class(hdev);
+ if (err < 0)
+ goto failed;
+ err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0);
+
+failed:
hci_dev_unlock_bh(hdev);
hci_dev_put(hdev);
- if (sock_queue_rcv_skb(sk, skb) < 0)
- kfree_skb(skb);
+ return err;
+}
+
+static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
+{
+ struct list_head *p, *n;
+ struct mgmt_cp_remove_uuid *cp;
+ struct hci_dev *hdev;
+ u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+ int err, found;
+
+ cp = (void *) data;
+
+ BT_DBG("request for hci%u", index);
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
+ err = hci_uuids_clear(hdev);
+ goto unlock;
+ }
+
+ found = 0;
+
+ list_for_each_safe(p, n, &hdev->uuids) {
+ struct bt_uuid *match = list_entry(p, struct bt_uuid, list);
+
+ if (memcmp(match->uuid, cp->uuid, 16) != 0)
+ continue;
+
+ list_del(&match->list);
+ found++;
+ }
+
+ if (found == 0) {
+ err = cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENOENT);
+ goto unlock;
+ }
+
+ err = update_class(hdev);
+ if (err < 0)
+ goto unlock;
+
+ err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0);
+
+unlock:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+static int set_dev_class(struct sock *sk, u16 index, unsigned char *data,
+ u16 len)
+{
+ struct hci_dev *hdev;
+ struct mgmt_cp_set_dev_class *cp;
+ int err;
+
+ cp = (void *) data;
+
+ BT_DBG("request for hci%u", index);
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ hdev->major_class = cp->major;
+ hdev->minor_class = cp->minor;
+
+ err = update_class(hdev);
+
+ if (err == 0)
+ err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0);
+
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+static int set_service_cache(struct sock *sk, u16 index, unsigned char *data,
+ u16 len)
+{
+ struct hci_dev *hdev;
+ struct mgmt_cp_set_service_cache *cp;
+ int err;
+
+ cp = (void *) data;
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ BT_DBG("hci%u enable %d", index, cp->enable);
+
+ if (cp->enable) {
+ set_bit(HCI_SERVICE_CACHE, &hdev->flags);
+ err = 0;
+ } else {
+ clear_bit(HCI_SERVICE_CACHE, &hdev->flags);
+ err = update_class(hdev);
+ }
+
+ if (err == 0)
+ err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL,
+ 0);
+
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
+{
+ struct hci_dev *hdev;
+ struct mgmt_cp_load_keys *cp;
+ u16 key_count, expected_len;
+ int i;
+
+ cp = (void *) data;
+
+ if (len < sizeof(*cp))
+ return -EINVAL;
+
+ key_count = get_unaligned_le16(&cp->key_count);
+
+ expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info);
+ if (expected_len != len) {
+ BT_ERR("load_keys: expected %u bytes, got %u bytes",
+ len, expected_len);
+ return -EINVAL;
+ }
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_LOAD_KEYS, ENODEV);
+
+ BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys,
+ key_count);
+
+ hci_dev_lock_bh(hdev);
+
+ hci_link_keys_clear(hdev);
+
+ set_bit(HCI_LINK_KEYS, &hdev->flags);
+
+ if (cp->debug_keys)
+ set_bit(HCI_DEBUG_KEYS, &hdev->flags);
+ else
+ clear_bit(HCI_DEBUG_KEYS, &hdev->flags);
+
+ for (i = 0; i < key_count; i++) {
+ struct mgmt_key_info *key = &cp->keys[i];
+
+ hci_add_link_key(hdev, 0, &key->bdaddr, key->val, key->type,
+ key->pin_len);
+ }
+
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
return 0;
}
+static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len)
+{
+ struct hci_dev *hdev;
+ struct mgmt_cp_remove_key *cp;
+ struct hci_conn *conn;
+ int err;
+
+ cp = (void *) data;
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ err = hci_remove_link_key(hdev, &cp->bdaddr);
+ if (err < 0) {
+ err = cmd_status(sk, index, MGMT_OP_REMOVE_KEY, -err);
+ goto unlock;
+ }
+
+ err = 0;
+
+ if (!test_bit(HCI_UP, &hdev->flags) || !cp->disconnect)
+ goto unlock;
+
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
+ if (conn) {
+ struct hci_cp_disconnect dc;
+
+ put_unaligned_le16(conn->handle, &dc.handle);
+ dc.reason = 0x13; /* Remote User Terminated Connection */
+ err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, 0, NULL);
+ }
+
+unlock:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
+{
+ struct hci_dev *hdev;
+ struct mgmt_cp_disconnect *cp;
+ struct hci_cp_disconnect dc;
+ struct pending_cmd *cmd;
+ struct hci_conn *conn;
+ int err;
+
+ BT_DBG("");
+
+ cp = (void *) data;
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_DISCONNECT, EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_DISCONNECT, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ if (!test_bit(HCI_UP, &hdev->flags)) {
+ err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENETDOWN);
+ goto failed;
+ }
+
+ if (mgmt_pending_find(MGMT_OP_DISCONNECT, index)) {
+ err = cmd_status(sk, index, MGMT_OP_DISCONNECT, EBUSY);
+ goto failed;
+ }
+
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
+ if (!conn) {
+ err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENOTCONN);
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, index, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ put_unaligned_le16(conn->handle, &dc.handle);
+ dc.reason = 0x13; /* Remote User Terminated Connection */
+
+ err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
+failed:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+static int get_connections(struct sock *sk, u16 index)
+{
+ struct mgmt_rp_get_connections *rp;
+ struct hci_dev *hdev;
+ struct list_head *p;
+ size_t rp_len;
+ u16 count;
+ int i, err;
+
+ BT_DBG("");
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ count = 0;
+ list_for_each(p, &hdev->conn_hash.list) {
+ count++;
+ }
+
+ rp_len = sizeof(*rp) + (count * sizeof(bdaddr_t));
+ rp = kmalloc(rp_len, GFP_ATOMIC);
+ if (!rp) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ put_unaligned_le16(count, &rp->conn_count);
+
+ read_lock(&hci_dev_list_lock);
+
+ i = 0;
+ list_for_each(p, &hdev->conn_hash.list) {
+ struct hci_conn *c = list_entry(p, struct hci_conn, list);
+
+ bacpy(&rp->conn[i++], &c->dst);
+ }
+
+ read_unlock(&hci_dev_list_lock);
+
+ err = cmd_complete(sk, index, MGMT_OP_GET_CONNECTIONS, rp, rp_len);
+
+unlock:
+ kfree(rp);
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+ return err;
+}
+
+static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
+ u16 len)
+{
+ struct hci_dev *hdev;
+ struct mgmt_cp_pin_code_reply *cp;
+ struct hci_cp_pin_code_reply reply;
+ struct pending_cmd *cmd;
+ int err;
+
+ BT_DBG("");
+
+ cp = (void *) data;
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ if (!test_bit(HCI_UP, &hdev->flags)) {
+ err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENETDOWN);
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, index, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ bacpy(&reply.bdaddr, &cp->bdaddr);
+ reply.pin_len = cp->pin_len;
+ memcpy(reply.pin_code, cp->pin_code, 16);
+
+ err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
+failed:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data,
+ u16 len)
+{
+ struct hci_dev *hdev;
+ struct mgmt_cp_pin_code_neg_reply *cp;
+ struct pending_cmd *cmd;
+ int err;
+
+ BT_DBG("");
+
+ cp = (void *) data;
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
+ EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
+ ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ if (!test_bit(HCI_UP, &hdev->flags)) {
+ err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
+ ENETDOWN);
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, index,
+ data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(cp->bdaddr),
+ &cp->bdaddr);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
+failed:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+static int set_io_capability(struct sock *sk, u16 index, unsigned char *data,
+ u16 len)
+{
+ struct hci_dev *hdev;
+ struct mgmt_cp_set_io_capability *cp;
+
+ BT_DBG("");
+
+ cp = (void *) data;
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ hdev->io_capability = cp->io_capability;
+
+ BT_DBG("%s IO capability set to 0x%02x", hdev->name,
+ hdev->io_capability);
+
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0);
+}
+
+static inline struct pending_cmd *find_pairing(struct hci_conn *conn)
+{
+ struct hci_dev *hdev = conn->hdev;
+ struct list_head *p;
+
+ list_for_each(p, &cmd_list) {
+ struct pending_cmd *cmd;
+
+ cmd = list_entry(p, struct pending_cmd, list);
+
+ if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
+ continue;
+
+ if (cmd->index != hdev->id)
+ continue;
+
+ if (cmd->user_data != conn)
+ continue;
+
+ return cmd;
+ }
+
+ return NULL;
+}
+
+static void pairing_complete(struct pending_cmd *cmd, u8 status)
+{
+ struct mgmt_rp_pair_device rp;
+ struct hci_conn *conn = cmd->user_data;
+
+ bacpy(&rp.bdaddr, &conn->dst);
+ rp.status = status;
+
+ cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, &rp, sizeof(rp));
+
+ /* So we don't get further callbacks for this connection */
+ conn->connect_cfm_cb = NULL;
+ conn->security_cfm_cb = NULL;
+ conn->disconn_cfm_cb = NULL;
+
+ hci_conn_put(conn);
+
+ mgmt_pending_remove(cmd);
+}
+
+static void pairing_complete_cb(struct hci_conn *conn, u8 status)
+{
+ struct pending_cmd *cmd;
+
+ BT_DBG("status %u", status);
+
+ cmd = find_pairing(conn);
+ if (!cmd) {
+ BT_DBG("Unable to find a pending command");
+ return;
+ }
+
+ pairing_complete(cmd, status);
+}
+
+static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
+{
+ struct hci_dev *hdev;
+ struct mgmt_cp_pair_device *cp;
+ struct pending_cmd *cmd;
+ u8 sec_level, auth_type;
+ struct hci_conn *conn;
+ int err;
+
+ BT_DBG("");
+
+ cp = (void *) data;
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ if (cp->io_cap == 0x03) {
+ sec_level = BT_SECURITY_MEDIUM;
+ auth_type = HCI_AT_DEDICATED_BONDING;
+ } else {
+ sec_level = BT_SECURITY_HIGH;
+ auth_type = HCI_AT_DEDICATED_BONDING_MITM;
+ }
+
+ conn = hci_connect(hdev, ACL_LINK, &cp->bdaddr, sec_level, auth_type);
+ if (IS_ERR(conn)) {
+ err = PTR_ERR(conn);
+ goto unlock;
+ }
+
+ if (conn->connect_cfm_cb) {
+ hci_conn_put(conn);
+ err = cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EBUSY);
+ goto unlock;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, index, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ hci_conn_put(conn);
+ goto unlock;
+ }
+
+ conn->connect_cfm_cb = pairing_complete_cb;
+ conn->security_cfm_cb = pairing_complete_cb;
+ conn->disconn_cfm_cb = pairing_complete_cb;
+ conn->io_capability = cp->io_cap;
+ cmd->user_data = conn;
+
+ if (conn->state == BT_CONNECTED &&
+ hci_conn_security(conn, sec_level, auth_type))
+ pairing_complete(cmd, 0);
+
+ err = 0;
+
+unlock:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data,
+ u16 len, int success)
+{
+ struct mgmt_cp_user_confirm_reply *cp = (void *) data;
+ u16 mgmt_op, hci_op;
+ struct pending_cmd *cmd;
+ struct hci_dev *hdev;
+ int err;
+
+ BT_DBG("");
+
+ if (success) {
+ mgmt_op = MGMT_OP_USER_CONFIRM_REPLY;
+ hci_op = HCI_OP_USER_CONFIRM_REPLY;
+ } else {
+ mgmt_op = MGMT_OP_USER_CONFIRM_NEG_REPLY;
+ hci_op = HCI_OP_USER_CONFIRM_NEG_REPLY;
+ }
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, mgmt_op, EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, mgmt_op, ENODEV);
+
+ if (!test_bit(HCI_UP, &hdev->flags)) {
+ err = cmd_status(sk, index, mgmt_op, ENETDOWN);
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, mgmt_op, index, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ err = hci_send_cmd(hdev, hci_op, sizeof(cp->bdaddr), &cp->bdaddr);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
+failed:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
{
unsigned char *buf;
struct mgmt_hdr *hdr;
- u16 opcode, len;
+ u16 opcode, index, len;
int err;
BT_DBG("got %zu bytes", msglen);
@@ -233,6 +1275,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
hdr = (struct mgmt_hdr *) buf;
opcode = get_unaligned_le16(&hdr->opcode);
+ index = get_unaligned_le16(&hdr->index);
len = get_unaligned_le16(&hdr->len);
if (len != msglen - sizeof(*hdr)) {
@@ -248,11 +1291,65 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
err = read_index_list(sk);
break;
case MGMT_OP_READ_INFO:
- err = read_controller_info(sk, buf + sizeof(*hdr), len);
+ err = read_controller_info(sk, index);
+ break;
+ case MGMT_OP_SET_POWERED:
+ err = set_powered(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_SET_DISCOVERABLE:
+ err = set_discoverable(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_SET_CONNECTABLE:
+ err = set_connectable(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_SET_PAIRABLE:
+ err = set_pairable(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_ADD_UUID:
+ err = add_uuid(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_REMOVE_UUID:
+ err = remove_uuid(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_SET_DEV_CLASS:
+ err = set_dev_class(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_SET_SERVICE_CACHE:
+ err = set_service_cache(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_LOAD_KEYS:
+ err = load_keys(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_REMOVE_KEY:
+ err = remove_key(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_DISCONNECT:
+ err = disconnect(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_GET_CONNECTIONS:
+ err = get_connections(sk, index);
+ break;
+ case MGMT_OP_PIN_CODE_REPLY:
+ err = pin_code_reply(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_PIN_CODE_NEG_REPLY:
+ err = pin_code_neg_reply(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_SET_IO_CAPABILITY:
+ err = set_io_capability(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_PAIR_DEVICE:
+ err = pair_device(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_USER_CONFIRM_REPLY:
+ err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 1);
+ break;
+ case MGMT_OP_USER_CONFIRM_NEG_REPLY:
+ err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 0);
break;
default:
BT_DBG("Unknown op %u", opcode);
- err = cmd_status(sk, opcode, 0x01);
+ err = cmd_status(sk, index, opcode, 0x01);
break;
}
@@ -266,43 +1363,283 @@ done:
return err;
}
-static int mgmt_event(u16 event, void *data, u16 data_len)
+int mgmt_index_added(u16 index)
{
- struct sk_buff *skb;
- struct mgmt_hdr *hdr;
+ return mgmt_event(MGMT_EV_INDEX_ADDED, index, NULL, 0, NULL);
+}
- skb = alloc_skb(sizeof(*hdr) + data_len, GFP_ATOMIC);
- if (!skb)
- return -ENOMEM;
+int mgmt_index_removed(u16 index)
+{
+ return mgmt_event(MGMT_EV_INDEX_REMOVED, index, NULL, 0, NULL);
+}
- bt_cb(skb)->channel = HCI_CHANNEL_CONTROL;
+struct cmd_lookup {
+ u8 val;
+ struct sock *sk;
+};
- hdr = (void *) skb_put(skb, sizeof(*hdr));
- hdr->opcode = cpu_to_le16(event);
- hdr->len = cpu_to_le16(data_len);
+static void mode_rsp(struct pending_cmd *cmd, void *data)
+{
+ struct mgmt_mode *cp = cmd->cmd;
+ struct cmd_lookup *match = data;
- memcpy(skb_put(skb, data_len), data, data_len);
+ if (cp->val != match->val)
+ return;
- hci_send_to_sock(NULL, skb);
- kfree_skb(skb);
+ send_mode_rsp(cmd->sk, cmd->opcode, cmd->index, cp->val);
- return 0;
+ list_del(&cmd->list);
+
+ if (match->sk == NULL) {
+ match->sk = cmd->sk;
+ sock_hold(match->sk);
+ }
+
+ mgmt_pending_free(cmd);
}
-int mgmt_index_added(u16 index)
+int mgmt_powered(u16 index, u8 powered)
{
- struct mgmt_ev_index_added ev;
+ struct mgmt_mode ev;
+ struct cmd_lookup match = { powered, NULL };
+ int ret;
+
+ mgmt_pending_foreach(MGMT_OP_SET_POWERED, index, mode_rsp, &match);
- put_unaligned_le16(index, &ev.index);
+ ev.val = powered;
- return mgmt_event(MGMT_EV_INDEX_ADDED, &ev, sizeof(ev));
+ ret = mgmt_event(MGMT_EV_POWERED, index, &ev, sizeof(ev), match.sk);
+
+ if (match.sk)
+ sock_put(match.sk);
+
+ return ret;
}
-int mgmt_index_removed(u16 index)
+int mgmt_discoverable(u16 index, u8 discoverable)
+{
+ struct mgmt_mode ev;
+ struct cmd_lookup match = { discoverable, NULL };
+ int ret;
+
+ mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, index, mode_rsp, &match);
+
+ ev.val = discoverable;
+
+ ret = mgmt_event(MGMT_EV_DISCOVERABLE, index, &ev, sizeof(ev),
+ match.sk);
+
+ if (match.sk)
+ sock_put(match.sk);
+
+ return ret;
+}
+
+int mgmt_connectable(u16 index, u8 connectable)
+{
+ struct mgmt_mode ev;
+ struct cmd_lookup match = { connectable, NULL };
+ int ret;
+
+ mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, index, mode_rsp, &match);
+
+ ev.val = connectable;
+
+ ret = mgmt_event(MGMT_EV_CONNECTABLE, index, &ev, sizeof(ev), match.sk);
+
+ if (match.sk)
+ sock_put(match.sk);
+
+ return ret;
+}
+
+int mgmt_new_key(u16 index, struct link_key *key, u8 old_key_type)
+{
+ struct mgmt_ev_new_key ev;
+
+ memset(&ev, 0, sizeof(ev));
+
+ bacpy(&ev.key.bdaddr, &key->bdaddr);
+ ev.key.type = key->type;
+ memcpy(ev.key.val, key->val, 16);
+ ev.key.pin_len = key->pin_len;
+ ev.old_key_type = old_key_type;
+
+ return mgmt_event(MGMT_EV_NEW_KEY, index, &ev, sizeof(ev), NULL);
+}
+
+int mgmt_connected(u16 index, bdaddr_t *bdaddr)
+{
+ struct mgmt_ev_connected ev;
+
+ bacpy(&ev.bdaddr, bdaddr);
+
+ return mgmt_event(MGMT_EV_CONNECTED, index, &ev, sizeof(ev), NULL);
+}
+
+static void disconnect_rsp(struct pending_cmd *cmd, void *data)
+{
+ struct mgmt_cp_disconnect *cp = cmd->cmd;
+ struct sock **sk = data;
+ struct mgmt_rp_disconnect rp;
+
+ bacpy(&rp.bdaddr, &cp->bdaddr);
+
+ cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, &rp, sizeof(rp));
+
+ *sk = cmd->sk;
+ sock_hold(*sk);
+
+ mgmt_pending_remove(cmd);
+}
+
+int mgmt_disconnected(u16 index, bdaddr_t *bdaddr)
+{
+ struct mgmt_ev_disconnected ev;
+ struct sock *sk = NULL;
+ int err;
+
+ mgmt_pending_foreach(MGMT_OP_DISCONNECT, index, disconnect_rsp, &sk);
+
+ bacpy(&ev.bdaddr, bdaddr);
+
+ err = mgmt_event(MGMT_EV_DISCONNECTED, index, &ev, sizeof(ev), sk);
+
+ if (sk)
+ sock_put(sk);
+
+ return err;
+}
+
+int mgmt_disconnect_failed(u16 index)
+{
+ struct pending_cmd *cmd;
+ int err;
+
+ cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, index);
+ if (!cmd)
+ return -ENOENT;
+
+ err = cmd_status(cmd->sk, index, MGMT_OP_DISCONNECT, EIO);
+
+ mgmt_pending_remove(cmd);
+
+ return err;
+}
+
+int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status)
+{
+ struct mgmt_ev_connect_failed ev;
+
+ bacpy(&ev.bdaddr, bdaddr);
+ ev.status = status;
+
+ return mgmt_event(MGMT_EV_CONNECT_FAILED, index, &ev, sizeof(ev), NULL);
+}
+
+int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr)
+{
+ struct mgmt_ev_pin_code_request ev;
+
+ bacpy(&ev.bdaddr, bdaddr);
+
+ return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, index, &ev, sizeof(ev),
+ NULL);
+}
+
+int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+{
+ struct pending_cmd *cmd;
+ struct mgmt_rp_pin_code_reply rp;
+ int err;
+
+ cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, index);
+ if (!cmd)
+ return -ENOENT;
+
+ bacpy(&rp.bdaddr, bdaddr);
+ rp.status = status;
+
+ err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_REPLY, &rp,
+ sizeof(rp));
+
+ mgmt_pending_remove(cmd);
+
+ return err;
+}
+
+int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+{
+ struct pending_cmd *cmd;
+ struct mgmt_rp_pin_code_reply rp;
+ int err;
+
+ cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, index);
+ if (!cmd)
+ return -ENOENT;
+
+ bacpy(&rp.bdaddr, bdaddr);
+ rp.status = status;
+
+ err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, &rp,
+ sizeof(rp));
+
+ mgmt_pending_remove(cmd);
+
+ return err;
+}
+
+int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value)
+{
+ struct mgmt_ev_user_confirm_request ev;
+
+ BT_DBG("hci%u", index);
+
+ bacpy(&ev.bdaddr, bdaddr);
+ put_unaligned_le32(value, &ev.value);
+
+ return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, index, &ev, sizeof(ev),
+ NULL);
+}
+
+static int confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status,
+ u8 opcode)
+{
+ struct pending_cmd *cmd;
+ struct mgmt_rp_user_confirm_reply rp;
+ int err;
+
+ cmd = mgmt_pending_find(opcode, index);
+ if (!cmd)
+ return -ENOENT;
+
+ bacpy(&rp.bdaddr, bdaddr);
+ rp.status = status;
+ err = cmd_complete(cmd->sk, index, opcode, &rp, sizeof(rp));
+
+ mgmt_pending_remove(cmd);
+
+ return err;
+}
+
+int mgmt_user_confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+{
+ return confirm_reply_complete(index, bdaddr, status,
+ MGMT_OP_USER_CONFIRM_REPLY);
+}
+
+int mgmt_user_confirm_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+{
+ return confirm_reply_complete(index, bdaddr, status,
+ MGMT_OP_USER_CONFIRM_NEG_REPLY);
+}
+
+int mgmt_auth_failed(u16 index, bdaddr_t *bdaddr, u8 status)
{
- struct mgmt_ev_index_added ev;
+ struct mgmt_ev_auth_failed ev;
- put_unaligned_le16(index, &ev.index);
+ bacpy(&ev.bdaddr, bdaddr);
+ ev.status = status;
- return mgmt_event(MGMT_EV_INDEX_REMOVED, &ev, sizeof(ev));
+ return mgmt_event(MGMT_EV_AUTH_FAILED, index, &ev, sizeof(ev), NULL);
}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 6b83776534fb..c9973932456f 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -2154,8 +2154,6 @@ static int __init rfcomm_init(void)
{
int err;
- l2cap_load();
-
hci_register_cb(&rfcomm_cb);
rfcomm_thread = kthread_run(rfcomm_run, NULL, "krfcommd");
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 2575c2db6404..d7b9af4703d0 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -727,7 +727,9 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
break;
}
+ tty_unlock();
schedule();
+ tty_lock();
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&dev->wait, &wait);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 960c6d1637da..42fdffd1d76c 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -50,8 +50,6 @@
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/sco.h>
-#define VERSION "0.6"
-
static int disable_esco;
static const struct proto_ops sco_sock_ops;
@@ -192,20 +190,21 @@ static int sco_connect(struct sock *sk)
hci_dev_lock_bh(hdev);
- err = -ENOMEM;
-
if (lmp_esco_capable(hdev) && !disable_esco)
type = ESCO_LINK;
else
type = SCO_LINK;
hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
- if (!hcon)
+ if (IS_ERR(hcon)) {
+ err = PTR_ERR(hcon);
goto done;
+ }
conn = sco_conn_add(hcon, 0);
if (!conn) {
hci_conn_put(hcon);
+ err = -ENOMEM;
goto done;
}
@@ -703,6 +702,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user
break;
}
+ memset(&cinfo, 0, sizeof(cinfo));
cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3);
@@ -1023,7 +1023,7 @@ static struct hci_proto sco_hci_proto = {
.recv_scodata = sco_recv_scodata
};
-static int __init sco_init(void)
+int __init sco_init(void)
{
int err;
@@ -1051,7 +1051,6 @@ static int __init sco_init(void)
BT_ERR("Failed to create SCO debug file");
}
- BT_INFO("SCO (Voice Link) ver %s", VERSION);
BT_INFO("SCO socket layer initialized");
return 0;
@@ -1061,7 +1060,7 @@ error:
return err;
}
-static void __exit sco_exit(void)
+void __exit sco_exit(void)
{
debugfs_remove(sco_debugfs);
@@ -1074,14 +1073,5 @@ static void __exit sco_exit(void)
proto_unregister(&sco_proto);
}
-module_init(sco_init);
-module_exit(sco_exit);
-
module_param(disable_esco, bool, 0644);
MODULE_PARM_DESC(disable_esco, "Disable eSCO connection creation");
-
-MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
-MODULE_DESCRIPTION("Bluetooth SCO ver " VERSION);
-MODULE_VERSION(VERSION);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("bt-proto-2");
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 556443566e9c..1461b19efd38 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -297,6 +297,21 @@ void br_netpoll_disable(struct net_bridge_port *p)
#endif
+static int br_add_slave(struct net_device *dev, struct net_device *slave_dev)
+
+{
+ struct net_bridge *br = netdev_priv(dev);
+
+ return br_add_if(br, slave_dev);
+}
+
+static int br_del_slave(struct net_device *dev, struct net_device *slave_dev)
+{
+ struct net_bridge *br = netdev_priv(dev);
+
+ return br_del_if(br, slave_dev);
+}
+
static const struct ethtool_ops br_ethtool_ops = {
.get_drvinfo = br_getinfo,
.get_link = ethtool_op_get_link,
@@ -326,6 +341,8 @@ static const struct net_device_ops br_netdev_ops = {
.ndo_netpoll_cleanup = br_netpoll_cleanup,
.ndo_poll_controller = br_poll_controller,
#endif
+ .ndo_add_slave = br_add_slave,
+ .ndo_del_slave = br_del_slave,
};
static void br_dev_free(struct net_device *dev)
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index d9d1e2bac1d6..dce8f0009a12 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -148,6 +148,8 @@ static void del_nbp(struct net_bridge_port *p)
netdev_rx_handler_unregister(dev);
+ netdev_set_master(dev, NULL);
+
br_multicast_del_port(p);
kobject_uevent(&p->kobj, KOBJ_REMOVE);
@@ -365,7 +367,7 @@ int br_min_mtu(const struct net_bridge *br)
void br_features_recompute(struct net_bridge *br)
{
struct net_bridge_port *p;
- unsigned long features, mask;
+ u32 features, mask;
features = mask = br->feature_mask;
if (list_empty(&br->port_list))
@@ -379,7 +381,7 @@ void br_features_recompute(struct net_bridge *br)
}
done:
- br->dev->features = netdev_fix_features(features, NULL);
+ br->dev->features = netdev_fix_features(br->dev, features);
}
/* called with RTNL */
@@ -429,10 +431,14 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
if (br_netpoll_info(br) && ((err = br_netpoll_enable(p))))
goto err3;
- err = netdev_rx_handler_register(dev, br_handle_frame, p);
+ err = netdev_set_master(dev, br->dev);
if (err)
goto err3;
+ err = netdev_rx_handler_register(dev, br_handle_frame, p);
+ if (err)
+ goto err4;
+
dev->priv_flags |= IFF_BRIDGE_PORT;
dev_disable_lro(dev);
@@ -455,6 +461,9 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
kobject_uevent(&p->kobj, KOBJ_ADD);
return 0;
+
+err4:
+ netdev_set_master(dev, NULL);
err3:
sysfs_remove_link(br->ifobj, p->dev->name);
err2:
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 09d5c0987925..030a002ff8ee 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -37,10 +37,9 @@
rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-static inline int ipv6_is_local_multicast(const struct in6_addr *addr)
+static inline int ipv6_is_transient_multicast(const struct in6_addr *addr)
{
- if (ipv6_addr_is_multicast(addr) &&
- IPV6_ADDR_MC_SCOPE(addr) <= IPV6_ADDR_SCOPE_LINKLOCAL)
+ if (ipv6_addr_is_multicast(addr) && IPV6_ADDR_MC_FLAG_TRANSIENT(addr))
return 1;
return 0;
}
@@ -435,7 +434,6 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
eth = eth_hdr(skb);
memcpy(eth->h_source, br->dev->dev_addr, 6);
- ipv6_eth_mc_map(group, eth->h_dest);
eth->h_proto = htons(ETH_P_IPV6);
skb_put(skb, sizeof(*eth));
@@ -447,8 +445,10 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
ip6h->payload_len = htons(8 + sizeof(*mldq));
ip6h->nexthdr = IPPROTO_HOPOPTS;
ip6h->hop_limit = 1;
- ipv6_addr_set(&ip6h->saddr, 0, 0, 0, 0);
+ ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
+ &ip6h->saddr);
ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
+ ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
hopopt = (u8 *)(ip6h + 1);
hopopt[0] = IPPROTO_ICMPV6; /* next hdr */
@@ -780,11 +780,11 @@ static int br_ip6_multicast_add_group(struct net_bridge *br,
{
struct br_ip br_group;
- if (ipv6_is_local_multicast(group))
+ if (!ipv6_is_transient_multicast(group))
return 0;
ipv6_addr_copy(&br_group.u.ip6, group);
- br_group.proto = htons(ETH_P_IP);
+ br_group.proto = htons(ETH_P_IPV6);
return br_multicast_add_group(br, port, &br_group);
}
@@ -1013,18 +1013,19 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
nsrcs = skb_header_pointer(skb,
len + offsetof(struct mld2_grec,
- grec_mca),
+ grec_nsrcs),
sizeof(_nsrcs), &_nsrcs);
if (!nsrcs)
return -EINVAL;
if (!pskb_may_pull(skb,
len + sizeof(*grec) +
- sizeof(struct in6_addr) * (*nsrcs)))
+ sizeof(struct in6_addr) * ntohs(*nsrcs)))
return -EINVAL;
grec = (struct mld2_grec *)(skb->data + len);
- len += sizeof(*grec) + sizeof(struct in6_addr) * (*nsrcs);
+ len += sizeof(*grec) +
+ sizeof(struct in6_addr) * ntohs(*nsrcs);
/* We treat these as MLDv1 reports for now. */
switch (grec->grec_type) {
@@ -1340,7 +1341,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
{
struct br_ip br_group;
- if (ipv6_is_local_multicast(group))
+ if (!ipv6_is_transient_multicast(group))
return;
ipv6_addr_copy(&br_group.u.ip6, group);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 4e1b620b6be6..f7afc364d777 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -182,7 +182,7 @@ struct net_bridge
struct br_cpu_netstats __percpu *stats;
spinlock_t hash_lock;
struct hlist_head hash[BR_HASH_SIZE];
- unsigned long feature_mask;
+ u32 feature_mask;
#ifdef CONFIG_BRIDGE_NETFILTER
struct rtable fake_rtable;
bool nf_call_iptables;
diff --git a/net/bridge/netfilter/ebt_ip6.c b/net/bridge/netfilter/ebt_ip6.c
index 50a46afc2bcc..2ed0056a39a8 100644
--- a/net/bridge/netfilter/ebt_ip6.c
+++ b/net/bridge/netfilter/ebt_ip6.c
@@ -22,9 +22,15 @@
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_ip6.h>
-struct tcpudphdr {
- __be16 src;
- __be16 dst;
+union pkthdr {
+ struct {
+ __be16 src;
+ __be16 dst;
+ } tcpudphdr;
+ struct {
+ u8 type;
+ u8 code;
+ } icmphdr;
};
static bool
@@ -33,8 +39,8 @@ ebt_ip6_mt(const struct sk_buff *skb, struct xt_action_param *par)
const struct ebt_ip6_info *info = par->matchinfo;
const struct ipv6hdr *ih6;
struct ipv6hdr _ip6h;
- const struct tcpudphdr *pptr;
- struct tcpudphdr _ports;
+ const union pkthdr *pptr;
+ union pkthdr _pkthdr;
ih6 = skb_header_pointer(skb, 0, sizeof(_ip6h), &_ip6h);
if (ih6 == NULL)
@@ -56,26 +62,34 @@ ebt_ip6_mt(const struct sk_buff *skb, struct xt_action_param *par)
return false;
if (FWINV(info->protocol != nexthdr, EBT_IP6_PROTO))
return false;
- if (!(info->bitmask & EBT_IP6_DPORT) &&
- !(info->bitmask & EBT_IP6_SPORT))
+ if (!(info->bitmask & ( EBT_IP6_DPORT |
+ EBT_IP6_SPORT | EBT_IP6_ICMP6)))
return true;
- pptr = skb_header_pointer(skb, offset_ph, sizeof(_ports),
- &_ports);
+
+ /* min icmpv6 headersize is 4, so sizeof(_pkthdr) is ok. */
+ pptr = skb_header_pointer(skb, offset_ph, sizeof(_pkthdr),
+ &_pkthdr);
if (pptr == NULL)
return false;
if (info->bitmask & EBT_IP6_DPORT) {
- u32 dst = ntohs(pptr->dst);
+ u16 dst = ntohs(pptr->tcpudphdr.dst);
if (FWINV(dst < info->dport[0] ||
dst > info->dport[1], EBT_IP6_DPORT))
return false;
}
if (info->bitmask & EBT_IP6_SPORT) {
- u32 src = ntohs(pptr->src);
+ u16 src = ntohs(pptr->tcpudphdr.src);
if (FWINV(src < info->sport[0] ||
src > info->sport[1], EBT_IP6_SPORT))
return false;
}
- return true;
+ if ((info->bitmask & EBT_IP6_ICMP6) &&
+ FWINV(pptr->icmphdr.type < info->icmpv6_type[0] ||
+ pptr->icmphdr.type > info->icmpv6_type[1] ||
+ pptr->icmphdr.code < info->icmpv6_code[0] ||
+ pptr->icmphdr.code > info->icmpv6_code[1],
+ EBT_IP6_ICMP6))
+ return false;
}
return true;
}
@@ -103,6 +117,14 @@ static int ebt_ip6_mt_check(const struct xt_mtchk_param *par)
return -EINVAL;
if (info->bitmask & EBT_IP6_SPORT && info->sport[0] > info->sport[1])
return -EINVAL;
+ if (info->bitmask & EBT_IP6_ICMP6) {
+ if ((info->invflags & EBT_IP6_PROTO) ||
+ info->protocol != IPPROTO_ICMPV6)
+ return -EINVAL;
+ if (info->icmpv6_type[0] > info->icmpv6_type[1] ||
+ info->icmpv6_code[0] > info->icmpv6_code[1])
+ return -EINVAL;
+ }
return 0;
}
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 16df0532d4b9..893669caa8de 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1107,6 +1107,8 @@ static int do_replace(struct net *net, const void __user *user,
if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
return -ENOMEM;
+ tmp.name[sizeof(tmp.name) - 1] = 0;
+
countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
newinfo = vmalloc(sizeof(*newinfo) + countersize);
if (!newinfo)
@@ -1764,6 +1766,7 @@ static int compat_table_info(const struct ebt_table_info *info,
newinfo->entries_size = size;
+ xt_compat_init_offsets(AF_INET, info->nentries);
return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
entries, newinfo);
}
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index c665de778b60..f1f98d967d8a 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -23,10 +23,8 @@
#include <asm/atomic.h>
#define MAX_PHY_LAYERS 7
-#define PHY_NAME_LEN 20
#define container_obj(layr) container_of(layr, struct cfcnfg, layer)
-#define RFM_FRAGMENT_SIZE 4030
/* Information about CAIF physical interfaces held by Config Module in order
* to manage physical interfaces
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c
index d3ed264ad6c4..27dab26ad3b8 100644
--- a/net/caif/cfdgml.c
+++ b/net/caif/cfdgml.c
@@ -18,7 +18,6 @@
#define DGM_CMD_BIT 0x80
#define DGM_FLOW_OFF 0x81
#define DGM_FLOW_ON 0x80
-#define DGM_CTRL_PKT_SIZE 1
#define DGM_MTU 1500
static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt);
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index 9297f7dea9d8..8303fe3ebf89 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -25,7 +25,6 @@ struct cfserl {
spinlock_t sync;
bool usestx;
};
-#define STXLEN(layr) (layr->usestx ? 1 : 0)
static int cfserl_receive(struct cflayer *layr, struct cfpkt *pkt);
static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
diff --git a/net/caif/cfutill.c b/net/caif/cfutill.c
index efad410e4c82..315c0d601368 100644
--- a/net/caif/cfutill.c
+++ b/net/caif/cfutill.c
@@ -20,7 +20,7 @@
#define UTIL_REMOTE_SHUTDOWN 0x82
#define UTIL_FLOW_OFF 0x81
#define UTIL_FLOW_ON 0x80
-#define UTIL_CTRL_PKT_SIZE 1
+
static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt);
static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt);
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c
index 3b425b189a99..c3b1dec4acf6 100644
--- a/net/caif/cfveil.c
+++ b/net/caif/cfveil.c
@@ -17,7 +17,7 @@
#define VEI_FLOW_OFF 0x81
#define VEI_FLOW_ON 0x80
#define VEI_SET_PIN 0x82
-#define VEI_CTRL_PKT_SIZE 1
+
#define container_obj(layr) container_of(layr, struct cfsrvl, layer)
static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index dff633d62e5b..35b36b86d762 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -252,8 +252,12 @@ static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
{
struct kvec iov = {buf, len};
struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
+ int r;
- return kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags);
+ r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags);
+ if (r == -EAGAIN)
+ r = 0;
+ return r;
}
/*
@@ -264,13 +268,17 @@ static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
size_t kvlen, size_t len, int more)
{
struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
+ int r;
if (more)
msg.msg_flags |= MSG_MORE;
else
msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */
- return kernel_sendmsg(sock, &msg, iov, kvlen, len);
+ r = kernel_sendmsg(sock, &msg, iov, kvlen, len);
+ if (r == -EAGAIN)
+ r = 0;
+ return r;
}
@@ -847,6 +855,8 @@ static int write_partial_msg_pages(struct ceph_connection *con)
(msg->pages || msg->pagelist || msg->bio || in_trail))
kunmap(page);
+ if (ret == -EAGAIN)
+ ret = 0;
if (ret <= 0)
goto out;
@@ -1737,16 +1747,12 @@ more_kvec:
if (con->out_skip) {
ret = write_partial_skip(con);
if (ret <= 0)
- goto done;
- if (ret < 0) {
- dout("try_write write_partial_skip err %d\n", ret);
- goto done;
- }
+ goto out;
}
if (con->out_kvec_left) {
ret = write_partial_kvec(con);
if (ret <= 0)
- goto done;
+ goto out;
}
/* msg pages? */
@@ -1761,11 +1767,11 @@ more_kvec:
if (ret == 1)
goto more_kvec; /* we need to send the footer, too! */
if (ret == 0)
- goto done;
+ goto out;
if (ret < 0) {
dout("try_write write_partial_msg_pages err %d\n",
ret);
- goto done;
+ goto out;
}
}
@@ -1789,10 +1795,9 @@ do_next:
/* Nothing to do! */
clear_bit(WRITE_PENDING, &con->state);
dout("try_write nothing else to write.\n");
-done:
ret = 0;
out:
- dout("try_write done on %p\n", con);
+ dout("try_write done on %p ret %d\n", con, ret);
return ret;
}
@@ -1821,19 +1826,17 @@ more:
dout("try_read connecting\n");
ret = read_partial_banner(con);
if (ret <= 0)
- goto done;
- if (process_banner(con) < 0) {
- ret = -1;
goto out;
- }
+ ret = process_banner(con);
+ if (ret < 0)
+ goto out;
}
ret = read_partial_connect(con);
if (ret <= 0)
- goto done;
- if (process_connect(con) < 0) {
- ret = -1;
goto out;
- }
+ ret = process_connect(con);
+ if (ret < 0)
+ goto out;
goto more;
}
@@ -1848,7 +1851,7 @@ more:
dout("skipping %d / %d bytes\n", skip, -con->in_base_pos);
ret = ceph_tcp_recvmsg(con->sock, buf, skip);
if (ret <= 0)
- goto done;
+ goto out;
con->in_base_pos += ret;
if (con->in_base_pos)
goto more;
@@ -1859,7 +1862,7 @@ more:
*/
ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1);
if (ret <= 0)
- goto done;
+ goto out;
dout("try_read got tag %d\n", (int)con->in_tag);
switch (con->in_tag) {
case CEPH_MSGR_TAG_MSG:
@@ -1870,7 +1873,7 @@ more:
break;
case CEPH_MSGR_TAG_CLOSE:
set_bit(CLOSED, &con->state); /* fixme */
- goto done;
+ goto out;
default:
goto bad_tag;
}
@@ -1882,13 +1885,12 @@ more:
case -EBADMSG:
con->error_msg = "bad crc";
ret = -EIO;
- goto out;
+ break;
case -EIO:
con->error_msg = "io error";
- goto out;
- default:
- goto done;
+ break;
}
+ goto out;
}
if (con->in_tag == CEPH_MSGR_TAG_READY)
goto more;
@@ -1898,15 +1900,13 @@ more:
if (con->in_tag == CEPH_MSGR_TAG_ACK) {
ret = read_partial_ack(con);
if (ret <= 0)
- goto done;
+ goto out;
process_ack(con);
goto more;
}
-done:
- ret = 0;
out:
- dout("try_read done on %p\n", con);
+ dout("try_read done on %p ret %d\n", con, ret);
return ret;
bad_tag:
diff --git a/net/core/dev.c b/net/core/dev.c
index 8ae6631abcc2..30440e7b296c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -132,6 +132,7 @@
#include <trace/events/skb.h>
#include <linux/pci.h>
#include <linux/inetdevice.h>
+#include <linux/cpu_rmap.h>
#include "net-sysfs.h"
@@ -1289,7 +1290,7 @@ static int __dev_close(struct net_device *dev)
return retval;
}
-int dev_close_many(struct list_head *head)
+static int dev_close_many(struct list_head *head)
{
struct net_device *dev, *tmp;
LIST_HEAD(tmp_list);
@@ -1597,6 +1598,48 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
rcu_read_unlock();
}
+/* netif_setup_tc - Handle tc mappings on real_num_tx_queues change
+ * @dev: Network device
+ * @txq: number of queues available
+ *
+ * If real_num_tx_queues is changed the tc mappings may no longer be
+ * valid. To resolve this verify the tc mapping remains valid and if
+ * not NULL the mapping. With no priorities mapping to this
+ * offset/count pair it will no longer be used. In the worst case TC0
+ * is invalid nothing can be done so disable priority mappings. If is
+ * expected that drivers will fix this mapping if they can before
+ * calling netif_set_real_num_tx_queues.
+ */
+static void netif_setup_tc(struct net_device *dev, unsigned int txq)
+{
+ int i;
+ struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
+
+ /* If TC0 is invalidated disable TC mapping */
+ if (tc->offset + tc->count > txq) {
+ pr_warning("Number of in use tx queues changed "
+ "invalidating tc mappings. Priority "
+ "traffic classification disabled!\n");
+ dev->num_tc = 0;
+ return;
+ }
+
+ /* Invalidated prio to tc mappings set to TC0 */
+ for (i = 1; i < TC_BITMASK + 1; i++) {
+ int q = netdev_get_prio_tc_map(dev, i);
+
+ tc = &dev->tc_to_txq[q];
+ if (tc->offset + tc->count > txq) {
+ pr_warning("Number of in use tx queues "
+ "changed. Priority %i to tc "
+ "mapping %i is no longer valid "
+ "setting map to 0\n",
+ i, q);
+ netdev_set_prio_tc_map(dev, i, 0);
+ }
+ }
+}
+
/*
* Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
* greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
@@ -1608,7 +1651,8 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
if (txq < 1 || txq > dev->num_tx_queues)
return -EINVAL;
- if (dev->reg_state == NETREG_REGISTERED) {
+ if (dev->reg_state == NETREG_REGISTERED ||
+ dev->reg_state == NETREG_UNREGISTERING) {
ASSERT_RTNL();
rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
@@ -1616,6 +1660,9 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
if (rc)
return rc;
+ if (dev->num_tc)
+ netif_setup_tc(dev, txq);
+
if (txq < dev->real_num_tx_queues)
qdisc_reset_all_tx_gt(dev, txq);
}
@@ -1815,7 +1862,7 @@ EXPORT_SYMBOL(skb_checksum_help);
* It may return NULL if the skb requires no segmentation. This is
* only possible when GSO is used for verifying header integrity.
*/
-struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
+struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features)
{
struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
struct packet_type *ptype;
@@ -2003,7 +2050,7 @@ static bool can_checksum_protocol(unsigned long features, __be16 protocol)
protocol == htons(ETH_P_FCOE)));
}
-static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features)
+static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features)
{
if (!can_checksum_protocol(features, protocol)) {
features &= ~NETIF_F_ALL_CSUM;
@@ -2015,10 +2062,10 @@ static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features
return features;
}
-int netif_skb_features(struct sk_buff *skb)
+u32 netif_skb_features(struct sk_buff *skb)
{
__be16 protocol = skb->protocol;
- int features = skb->dev->features;
+ u32 features = skb->dev->features;
if (protocol == htons(ETH_P_8021Q)) {
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
@@ -2063,7 +2110,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
int rc = NETDEV_TX_OK;
if (likely(!skb->next)) {
- int features;
+ u32 features;
/*
* If device doesnt need skb->dst, release it right now while
@@ -2165,6 +2212,8 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
unsigned int num_tx_queues)
{
u32 hash;
+ u16 qoffset = 0;
+ u16 qcount = num_tx_queues;
if (skb_rx_queue_recorded(skb)) {
hash = skb_get_rx_queue(skb);
@@ -2173,13 +2222,19 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
return hash;
}
+ if (dev->num_tc) {
+ u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
+ qoffset = dev->tc_to_txq[tc].offset;
+ qcount = dev->tc_to_txq[tc].count;
+ }
+
if (skb->sk && skb->sk->sk_hash)
hash = skb->sk->sk_hash;
else
hash = (__force u16) skb->protocol ^ skb->rxhash;
hash = jhash_1word(hash, hashrnd);
- return (u16) (((u64) hash * num_tx_queues) >> 32);
+ return (u16) (((u64) hash * qcount) >> 32) + qoffset;
}
EXPORT_SYMBOL(__skb_tx_hash);
@@ -2276,15 +2331,18 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
struct netdev_queue *txq)
{
spinlock_t *root_lock = qdisc_lock(q);
- bool contended = qdisc_is_running(q);
+ bool contended;
int rc;
+ qdisc_skb_cb(skb)->pkt_len = skb->len;
+ qdisc_calculate_pkt_len(skb, q);
/*
* Heuristic to force contended enqueues to serialize on a
* separate lock before trying to get qdisc main lock.
* This permits __QDISC_STATE_RUNNING owner to get the lock more often
* and dequeue packets faster.
*/
+ contended = qdisc_is_running(q);
if (unlikely(contended))
spin_lock(&q->busylock);
@@ -2302,7 +2360,6 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
skb_dst_force(skb);
- qdisc_skb_cb(skb)->pkt_len = skb->len;
qdisc_bstats_update(q, skb);
if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
@@ -2317,7 +2374,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
rc = NET_XMIT_SUCCESS;
} else {
skb_dst_force(skb);
- rc = qdisc_enqueue_root(skb, q);
+ rc = q->enqueue(skb, q) & NET_XMIT_MASK;
if (qdisc_run_begin(q)) {
if (unlikely(contended)) {
spin_unlock(&q->busylock);
@@ -2536,6 +2593,54 @@ EXPORT_SYMBOL(__skb_get_rxhash);
struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
EXPORT_SYMBOL(rps_sock_flow_table);
+static struct rps_dev_flow *
+set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
+ struct rps_dev_flow *rflow, u16 next_cpu)
+{
+ u16 tcpu;
+
+ tcpu = rflow->cpu = next_cpu;
+ if (tcpu != RPS_NO_CPU) {
+#ifdef CONFIG_RFS_ACCEL
+ struct netdev_rx_queue *rxqueue;
+ struct rps_dev_flow_table *flow_table;
+ struct rps_dev_flow *old_rflow;
+ u32 flow_id;
+ u16 rxq_index;
+ int rc;
+
+ /* Should we steer this flow to a different hardware queue? */
+ if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
+ !(dev->features & NETIF_F_NTUPLE))
+ goto out;
+ rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
+ if (rxq_index == skb_get_rx_queue(skb))
+ goto out;
+
+ rxqueue = dev->_rx + rxq_index;
+ flow_table = rcu_dereference(rxqueue->rps_flow_table);
+ if (!flow_table)
+ goto out;
+ flow_id = skb->rxhash & flow_table->mask;
+ rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
+ rxq_index, flow_id);
+ if (rc < 0)
+ goto out;
+ old_rflow = rflow;
+ rflow = &flow_table->flows[flow_id];
+ rflow->cpu = next_cpu;
+ rflow->filter = rc;
+ if (old_rflow->filter == rflow->filter)
+ old_rflow->filter = RPS_NO_FILTER;
+ out:
+#endif
+ rflow->last_qtail =
+ per_cpu(softnet_data, tcpu).input_queue_head;
+ }
+
+ return rflow;
+}
+
/*
* get_rps_cpu is called from netif_receive_skb and returns the target
* CPU from the RPS map of the receiving queue for a given skb.
@@ -2607,12 +2712,9 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
if (unlikely(tcpu != next_cpu) &&
(tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
((int)(per_cpu(softnet_data, tcpu).input_queue_head -
- rflow->last_qtail)) >= 0)) {
- tcpu = rflow->cpu = next_cpu;
- if (tcpu != RPS_NO_CPU)
- rflow->last_qtail = per_cpu(softnet_data,
- tcpu).input_queue_head;
- }
+ rflow->last_qtail)) >= 0))
+ rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
+
if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
*rflowp = rflow;
cpu = tcpu;
@@ -2633,6 +2735,46 @@ done:
return cpu;
}
+#ifdef CONFIG_RFS_ACCEL
+
+/**
+ * rps_may_expire_flow - check whether an RFS hardware filter may be removed
+ * @dev: Device on which the filter was set
+ * @rxq_index: RX queue index
+ * @flow_id: Flow ID passed to ndo_rx_flow_steer()
+ * @filter_id: Filter ID returned by ndo_rx_flow_steer()
+ *
+ * Drivers that implement ndo_rx_flow_steer() should periodically call
+ * this function for each installed filter and remove the filters for
+ * which it returns %true.
+ */
+bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
+ u32 flow_id, u16 filter_id)
+{
+ struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
+ struct rps_dev_flow_table *flow_table;
+ struct rps_dev_flow *rflow;
+ bool expire = true;
+ int cpu;
+
+ rcu_read_lock();
+ flow_table = rcu_dereference(rxqueue->rps_flow_table);
+ if (flow_table && flow_id <= flow_table->mask) {
+ rflow = &flow_table->flows[flow_id];
+ cpu = ACCESS_ONCE(rflow->cpu);
+ if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
+ ((int)(per_cpu(softnet_data, cpu).input_queue_head -
+ rflow->last_qtail) <
+ (int)(10 * flow_table->mask)))
+ expire = false;
+ }
+ rcu_read_unlock();
+ return expire;
+}
+EXPORT_SYMBOL(rps_may_expire_flow);
+
+#endif /* CONFIG_RFS_ACCEL */
+
/* Called from hardirq (IPI) context */
static void rps_trigger_softirq(void *data)
{
@@ -2954,64 +3096,31 @@ void netdev_rx_handler_unregister(struct net_device *dev)
}
EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
-static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
- struct net_device *master)
-{
- if (skb->pkt_type == PACKET_HOST) {
- u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
-
- memcpy(dest, master->dev_addr, ETH_ALEN);
- }
-}
-
-/* On bonding slaves other than the currently active slave, suppress
- * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
- * ARP on active-backup slaves with arp_validate enabled.
- */
-int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master)
+static void vlan_on_bond_hook(struct sk_buff *skb)
{
- struct net_device *dev = skb->dev;
-
- if (master->priv_flags & IFF_MASTER_ARPMON)
- dev->last_rx = jiffies;
-
- if ((master->priv_flags & IFF_MASTER_ALB) &&
- (master->priv_flags & IFF_BRIDGE_PORT)) {
- /* Do address unmangle. The local destination address
- * will be always the one master has. Provides the right
- * functionality in a bridge.
- */
- skb_bond_set_mac_by_master(skb, master);
- }
-
- if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
- if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
- skb->protocol == __cpu_to_be16(ETH_P_ARP))
- return 0;
-
- if (master->priv_flags & IFF_MASTER_ALB) {
- if (skb->pkt_type != PACKET_BROADCAST &&
- skb->pkt_type != PACKET_MULTICAST)
- return 0;
- }
- if (master->priv_flags & IFF_MASTER_8023AD &&
- skb->protocol == __cpu_to_be16(ETH_P_SLOW))
- return 0;
+ /*
+ * Make sure ARP frames received on VLAN interfaces stacked on
+ * bonding interfaces still make their way to any base bonding
+ * device that may have registered for a specific ptype.
+ */
+ if (skb->dev->priv_flags & IFF_802_1Q_VLAN &&
+ vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING &&
+ skb->protocol == htons(ETH_P_ARP)) {
+ struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
- return 1;
+ if (!skb2)
+ return;
+ skb2->dev = vlan_dev_real_dev(skb->dev);
+ netif_rx(skb2);
}
- return 0;
}
-EXPORT_SYMBOL(__skb_bond_should_drop);
static int __netif_receive_skb(struct sk_buff *skb)
{
struct packet_type *ptype, *pt_prev;
rx_handler_func_t *rx_handler;
struct net_device *orig_dev;
- struct net_device *master;
- struct net_device *null_or_orig;
- struct net_device *orig_or_bond;
+ struct net_device *null_or_dev;
int ret = NET_RX_DROP;
__be16 type;
@@ -3026,28 +3135,8 @@ static int __netif_receive_skb(struct sk_buff *skb)
if (!skb->skb_iif)
skb->skb_iif = skb->dev->ifindex;
-
- /*
- * bonding note: skbs received on inactive slaves should only
- * be delivered to pkt handlers that are exact matches. Also
- * the deliver_no_wcard flag will be set. If packet handlers
- * are sensitive to duplicate packets these skbs will need to
- * be dropped at the handler.
- */
- null_or_orig = NULL;
orig_dev = skb->dev;
- master = ACCESS_ONCE(orig_dev->master);
- if (skb->deliver_no_wcard)
- null_or_orig = orig_dev;
- else if (master) {
- if (skb_bond_should_drop(skb, master)) {
- skb->deliver_no_wcard = 1;
- null_or_orig = orig_dev; /* deliver only exact match */
- } else
- skb->dev = master;
- }
- __this_cpu_inc(softnet_data.processed);
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
skb->mac_len = skb->network_header - skb->mac_header;
@@ -3056,6 +3145,10 @@ static int __netif_receive_skb(struct sk_buff *skb)
rcu_read_lock();
+another_round:
+
+ __this_cpu_inc(softnet_data.processed);
+
#ifdef CONFIG_NET_CLS_ACT
if (skb->tc_verd & TC_NCLS) {
skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
@@ -3064,8 +3157,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
#endif
list_for_each_entry_rcu(ptype, &ptype_all, list) {
- if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
- ptype->dev == orig_dev) {
+ if (!ptype->dev || ptype->dev == skb->dev) {
if (pt_prev)
ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = ptype;
@@ -3079,16 +3171,20 @@ static int __netif_receive_skb(struct sk_buff *skb)
ncls:
#endif
- /* Handle special case of bridge or macvlan */
rx_handler = rcu_dereference(skb->dev->rx_handler);
if (rx_handler) {
+ struct net_device *prev_dev;
+
if (pt_prev) {
ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = NULL;
}
+ prev_dev = skb->dev;
skb = rx_handler(skb);
if (!skb)
goto out;
+ if (skb->dev != prev_dev)
+ goto another_round;
}
if (vlan_tx_tag_present(skb)) {
@@ -3103,24 +3199,16 @@ ncls:
goto out;
}
- /*
- * Make sure frames received on VLAN interfaces stacked on
- * bonding interfaces still make their way to any base bonding
- * device that may have registered for a specific ptype. The
- * handler may have to adjust skb->dev and orig_dev.
- */
- orig_or_bond = orig_dev;
- if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
- (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
- orig_or_bond = vlan_dev_real_dev(skb->dev);
- }
+ vlan_on_bond_hook(skb);
+
+ /* deliver only exact match when indicated */
+ null_or_dev = skb->deliver_no_wcard ? skb->dev : NULL;
type = skb->protocol;
list_for_each_entry_rcu(ptype,
&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
- if (ptype->type == type && (ptype->dev == null_or_orig ||
- ptype->dev == skb->dev || ptype->dev == orig_dev ||
- ptype->dev == orig_or_bond)) {
+ if (ptype->type == type &&
+ (ptype->dev == null_or_dev || ptype->dev == skb->dev)) {
if (pt_prev)
ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = ptype;
@@ -3917,12 +4005,15 @@ void *dev_seq_start(struct seq_file *seq, loff_t *pos)
void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct net_device *dev = (v == SEQ_START_TOKEN) ?
- first_net_device(seq_file_net(seq)) :
- next_net_device((struct net_device *)v);
+ struct net_device *dev = v;
+
+ if (v == SEQ_START_TOKEN)
+ dev = first_net_device_rcu(seq_file_net(seq));
+ else
+ dev = next_net_device_rcu(dev);
++*pos;
- return rcu_dereference(dev);
+ return dev;
}
void dev_seq_stop(struct seq_file *seq, void *v)
@@ -4206,15 +4297,14 @@ static int __init dev_proc_init(void)
/**
- * netdev_set_master - set up master/slave pair
+ * netdev_set_master - set up master pointer
* @slave: slave device
* @master: new master device
*
* Changes the master device of the slave. Pass %NULL to break the
* bonding. The caller must hold the RTNL semaphore. On a failure
* a negative errno code is returned. On success the reference counts
- * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
- * function returns zero.
+ * are adjusted and the function returns zero.
*/
int netdev_set_master(struct net_device *slave, struct net_device *master)
{
@@ -4234,6 +4324,29 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
synchronize_net();
dev_put(old);
}
+ return 0;
+}
+EXPORT_SYMBOL(netdev_set_master);
+
+/**
+ * netdev_set_bond_master - set up bonding master/slave pair
+ * @slave: slave device
+ * @master: new master device
+ *
+ * Changes the master device of the slave. Pass %NULL to break the
+ * bonding. The caller must hold the RTNL semaphore. On a failure
+ * a negative errno code is returned. On success %RTM_NEWLINK is sent
+ * to the routing socket and the function returns zero.
+ */
+int netdev_set_bond_master(struct net_device *slave, struct net_device *master)
+{
+ int err;
+
+ ASSERT_RTNL();
+
+ err = netdev_set_master(slave, master);
+ if (err)
+ return err;
if (master)
slave->flags |= IFF_SLAVE;
else
@@ -4242,7 +4355,7 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
return 0;
}
-EXPORT_SYMBOL(netdev_set_master);
+EXPORT_SYMBOL(netdev_set_bond_master);
static void dev_change_rx_flags(struct net_device *dev, int flags)
{
@@ -4579,6 +4692,17 @@ int dev_set_mtu(struct net_device *dev, int new_mtu)
EXPORT_SYMBOL(dev_set_mtu);
/**
+ * dev_set_group - Change group this device belongs to
+ * @dev: device
+ * @new_group: group this device should belong to
+ */
+void dev_set_group(struct net_device *dev, int new_group)
+{
+ dev->group = new_group;
+}
+EXPORT_SYMBOL(dev_set_group);
+
+/**
* dev_set_mac_address - Change Media Access Control Address
* @dev: device
* @sa: new address
@@ -5069,41 +5193,55 @@ static void rollback_registered(struct net_device *dev)
list_del(&single);
}
-unsigned long netdev_fix_features(unsigned long features, const char *name)
+u32 netdev_fix_features(struct net_device *dev, u32 features)
{
+ /* Fix illegal checksum combinations */
+ if ((features & NETIF_F_HW_CSUM) &&
+ (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
+ netdev_info(dev, "mixed HW and IP checksum settings.\n");
+ features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
+ }
+
+ if ((features & NETIF_F_NO_CSUM) &&
+ (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
+ netdev_info(dev, "mixed no checksumming and other settings.\n");
+ features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
+ }
+
/* Fix illegal SG+CSUM combinations. */
if ((features & NETIF_F_SG) &&
!(features & NETIF_F_ALL_CSUM)) {
- if (name)
- printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
- "checksum feature.\n", name);
+ netdev_info(dev,
+ "Dropping NETIF_F_SG since no checksum feature.\n");
features &= ~NETIF_F_SG;
}
/* TSO requires that SG is present as well. */
if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
- if (name)
- printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
- "SG feature.\n", name);
+ netdev_info(dev, "Dropping NETIF_F_TSO since no SG feature.\n");
features &= ~NETIF_F_TSO;
}
+ /* Software GSO depends on SG. */
+ if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
+ netdev_info(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
+ features &= ~NETIF_F_GSO;
+ }
+
+ /* UFO needs SG and checksumming */
if (features & NETIF_F_UFO) {
/* maybe split UFO into V4 and V6? */
if (!((features & NETIF_F_GEN_CSUM) ||
(features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
== (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
- if (name)
- printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
- "since no checksum offload features.\n",
- name);
+ netdev_info(dev,
+ "Dropping NETIF_F_UFO since no checksum offload features.\n");
features &= ~NETIF_F_UFO;
}
if (!(features & NETIF_F_SG)) {
- if (name)
- printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
- "since no NETIF_F_SG feature.\n", name);
+ netdev_info(dev,
+ "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
features &= ~NETIF_F_UFO;
}
}
@@ -5112,6 +5250,37 @@ unsigned long netdev_fix_features(unsigned long features, const char *name)
}
EXPORT_SYMBOL(netdev_fix_features);
+void netdev_update_features(struct net_device *dev)
+{
+ u32 features;
+ int err = 0;
+
+ features = netdev_get_wanted_features(dev);
+
+ if (dev->netdev_ops->ndo_fix_features)
+ features = dev->netdev_ops->ndo_fix_features(dev, features);
+
+ /* driver might be less strict about feature dependencies */
+ features = netdev_fix_features(dev, features);
+
+ if (dev->features == features)
+ return;
+
+ netdev_info(dev, "Features changed: 0x%08x -> 0x%08x\n",
+ dev->features, features);
+
+ if (dev->netdev_ops->ndo_set_features)
+ err = dev->netdev_ops->ndo_set_features(dev, features);
+
+ if (!err)
+ dev->features = features;
+ else if (err < 0)
+ netdev_err(dev,
+ "set_features() failed (%d); wanted 0x%08x, left 0x%08x\n",
+ err, features, dev->features);
+}
+EXPORT_SYMBOL(netdev_update_features);
+
/**
* netif_stacked_transfer_operstate - transfer operstate
* @rootdev: the root or lower level device to transfer state from
@@ -5246,27 +5415,19 @@ int register_netdevice(struct net_device *dev)
if (dev->iflink == -1)
dev->iflink = dev->ifindex;
- /* Fix illegal checksum combinations */
- if ((dev->features & NETIF_F_HW_CSUM) &&
- (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
- printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
- dev->name);
- dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
- }
+ /* Transfer changeable features to wanted_features and enable
+ * software offloads (GSO and GRO).
+ */
+ dev->hw_features |= NETIF_F_SOFT_FEATURES;
+ dev->features |= NETIF_F_SOFT_FEATURES;
+ dev->wanted_features = dev->features & dev->hw_features;
- if ((dev->features & NETIF_F_NO_CSUM) &&
- (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
- printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
- dev->name);
- dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
+ /* Avoid warning from netdev_fix_features() for GSO without SG */
+ if (!(dev->wanted_features & NETIF_F_SG)) {
+ dev->wanted_features &= ~NETIF_F_GSO;
+ dev->features &= ~NETIF_F_GSO;
}
- dev->features = netdev_fix_features(dev->features, dev->name);
-
- /* Enable software GSO if SG is supported. */
- if (dev->features & NETIF_F_SG)
- dev->features |= NETIF_F_GSO;
-
/* Enable GRO and NETIF_F_HIGHDMA for vlans by default,
* vlan_dev_init() will do the dev->features check, so these features
* are enabled only if supported by underlying device.
@@ -5283,6 +5444,8 @@ int register_netdevice(struct net_device *dev)
goto err_uninit;
dev->reg_state = NETREG_REGISTERED;
+ netdev_update_features(dev);
+
/*
* Default initial state at registry is that the
* device is present.
@@ -5687,6 +5850,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
#endif
strcpy(dev->name, name);
+ dev->group = INIT_NETDEV_GROUP;
return dev;
free_all:
@@ -6001,8 +6165,7 @@ static int dev_cpu_callback(struct notifier_block *nfb,
* @one to the master device with current feature set @all. Will not
* enable anything that is off in @mask. Returns the new feature set.
*/
-unsigned long netdev_increment_features(unsigned long all, unsigned long one,
- unsigned long mask)
+u32 netdev_increment_features(u32 all, u32 one, u32 mask)
{
/* If device needs checksumming, downgrade to it. */
if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 508f9c18992f..7b39f3ed2fda 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -144,7 +144,7 @@ void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
list_for_each_entry(ha, &from_list->list, list) {
type = addr_type ? addr_type : ha->type;
- __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
+ __hw_addr_del(to_list, ha->addr, addr_len, type);
}
}
EXPORT_SYMBOL(__hw_addr_del_multiple);
@@ -357,8 +357,8 @@ EXPORT_SYMBOL(dev_addr_add_multiple);
/**
* dev_addr_del_multiple - Delete device addresses by another device
* @to_dev: device where the addresses will be deleted
- * @from_dev: device by which addresses the addresses will be deleted
- * @addr_type: address type - 0 means type will used from from_dev
+ * @from_dev: device supplying the addresses to be deleted
+ * @addr_type: address type - 0 means type will be used from from_dev
*
* Deletes addresses in to device by the list of addresses in from device.
*
diff --git a/net/core/dst.c b/net/core/dst.c
index b99c7c7ffce2..91104d35de7d 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -164,7 +164,9 @@ int dst_discard(struct sk_buff *skb)
}
EXPORT_SYMBOL(dst_discard);
-void *dst_alloc(struct dst_ops *ops)
+const u32 dst_default_metrics[RTAX_MAX];
+
+void *dst_alloc(struct dst_ops *ops, int initial_ref)
{
struct dst_entry *dst;
@@ -175,11 +177,12 @@ void *dst_alloc(struct dst_ops *ops)
dst = kmem_cache_zalloc(ops->kmem_cachep, GFP_ATOMIC);
if (!dst)
return NULL;
- atomic_set(&dst->__refcnt, 0);
+ atomic_set(&dst->__refcnt, initial_ref);
dst->ops = ops;
dst->lastuse = jiffies;
dst->path = dst;
dst->input = dst->output = dst_discard;
+ dst_init_metrics(dst, dst_default_metrics, true);
#if RT_CACHE_DEBUG >= 2
atomic_inc(&dst_total);
#endif
@@ -282,6 +285,42 @@ void dst_release(struct dst_entry *dst)
}
EXPORT_SYMBOL(dst_release);
+u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
+{
+ u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC);
+
+ if (p) {
+ u32 *old_p = __DST_METRICS_PTR(old);
+ unsigned long prev, new;
+
+ memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
+
+ new = (unsigned long) p;
+ prev = cmpxchg(&dst->_metrics, old, new);
+
+ if (prev != old) {
+ kfree(p);
+ p = __DST_METRICS_PTR(prev);
+ if (prev & DST_METRICS_READ_ONLY)
+ p = NULL;
+ }
+ }
+ return p;
+}
+EXPORT_SYMBOL(dst_cow_metrics_generic);
+
+/* Caller asserts that dst_metrics_read_only(dst) is false. */
+void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
+{
+ unsigned long prev, new;
+
+ new = (unsigned long) dst_default_metrics;
+ prev = cmpxchg(&dst->_metrics, old, new);
+ if (prev == old)
+ kfree(__DST_METRICS_PTR(old));
+}
+EXPORT_SYMBOL(__dst_destroy_metrics_generic);
+
/**
* skb_dst_set_noref - sets skb dst, without a reference
* @skb: buffer
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index ff2302910b5e..c1a71bb738da 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -34,12 +34,6 @@ u32 ethtool_op_get_link(struct net_device *dev)
}
EXPORT_SYMBOL(ethtool_op_get_link);
-u32 ethtool_op_get_rx_csum(struct net_device *dev)
-{
- return (dev->features & NETIF_F_ALL_CSUM) != 0;
-}
-EXPORT_SYMBOL(ethtool_op_get_rx_csum);
-
u32 ethtool_op_get_tx_csum(struct net_device *dev)
{
return (dev->features & NETIF_F_ALL_CSUM) != 0;
@@ -55,6 +49,7 @@ int ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
return 0;
}
+EXPORT_SYMBOL(ethtool_op_set_tx_csum);
int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data)
{
@@ -171,6 +166,381 @@ EXPORT_SYMBOL(ethtool_ntuple_flush);
/* Handlers for each ethtool command */
+#define ETHTOOL_DEV_FEATURE_WORDS 1
+
+static void ethtool_get_features_compat(struct net_device *dev,
+ struct ethtool_get_features_block *features)
+{
+ if (!dev->ethtool_ops)
+ return;
+
+ /* getting RX checksum */
+ if (dev->ethtool_ops->get_rx_csum)
+ if (dev->ethtool_ops->get_rx_csum(dev))
+ features[0].active |= NETIF_F_RXCSUM;
+
+ /* mark legacy-changeable features */
+ if (dev->ethtool_ops->set_sg)
+ features[0].available |= NETIF_F_SG;
+ if (dev->ethtool_ops->set_tx_csum)
+ features[0].available |= NETIF_F_ALL_CSUM;
+ if (dev->ethtool_ops->set_tso)
+ features[0].available |= NETIF_F_ALL_TSO;
+ if (dev->ethtool_ops->set_rx_csum)
+ features[0].available |= NETIF_F_RXCSUM;
+ if (dev->ethtool_ops->set_flags)
+ features[0].available |= flags_dup_features;
+}
+
+static int ethtool_set_feature_compat(struct net_device *dev,
+ int (*legacy_set)(struct net_device *, u32),
+ struct ethtool_set_features_block *features, u32 mask)
+{
+ u32 do_set;
+
+ if (!legacy_set)
+ return 0;
+
+ if (!(features[0].valid & mask))
+ return 0;
+
+ features[0].valid &= ~mask;
+
+ do_set = !!(features[0].requested & mask);
+
+ if (legacy_set(dev, do_set) < 0)
+ netdev_info(dev,
+ "Legacy feature change (%s) failed for 0x%08x\n",
+ do_set ? "set" : "clear", mask);
+
+ return 1;
+}
+
+static int ethtool_set_features_compat(struct net_device *dev,
+ struct ethtool_set_features_block *features)
+{
+ int compat;
+
+ if (!dev->ethtool_ops)
+ return 0;
+
+ compat = ethtool_set_feature_compat(dev, dev->ethtool_ops->set_sg,
+ features, NETIF_F_SG);
+ compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_tx_csum,
+ features, NETIF_F_ALL_CSUM);
+ compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_tso,
+ features, NETIF_F_ALL_TSO);
+ compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_rx_csum,
+ features, NETIF_F_RXCSUM);
+ compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_flags,
+ features, flags_dup_features);
+
+ return compat;
+}
+
+static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_gfeatures cmd = {
+ .cmd = ETHTOOL_GFEATURES,
+ .size = ETHTOOL_DEV_FEATURE_WORDS,
+ };
+ struct ethtool_get_features_block features[ETHTOOL_DEV_FEATURE_WORDS] = {
+ {
+ .available = dev->hw_features,
+ .requested = dev->wanted_features,
+ .active = dev->features,
+ .never_changed = NETIF_F_NEVER_CHANGE,
+ },
+ };
+ u32 __user *sizeaddr;
+ u32 copy_size;
+
+ ethtool_get_features_compat(dev, features);
+
+ sizeaddr = useraddr + offsetof(struct ethtool_gfeatures, size);
+ if (get_user(copy_size, sizeaddr))
+ return -EFAULT;
+
+ if (copy_size > ETHTOOL_DEV_FEATURE_WORDS)
+ copy_size = ETHTOOL_DEV_FEATURE_WORDS;
+
+ if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
+ return -EFAULT;
+ useraddr += sizeof(cmd);
+ if (copy_to_user(useraddr, features, copy_size * sizeof(*features)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int ethtool_set_features(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_sfeatures cmd;
+ struct ethtool_set_features_block features[ETHTOOL_DEV_FEATURE_WORDS];
+ int ret = 0;
+
+ if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
+ return -EFAULT;
+ useraddr += sizeof(cmd);
+
+ if (cmd.size != ETHTOOL_DEV_FEATURE_WORDS)
+ return -EINVAL;
+
+ if (copy_from_user(features, useraddr, sizeof(features)))
+ return -EFAULT;
+
+ if (features[0].valid & ~NETIF_F_ETHTOOL_BITS)
+ return -EINVAL;
+
+ if (ethtool_set_features_compat(dev, features))
+ ret |= ETHTOOL_F_COMPAT;
+
+ if (features[0].valid & ~dev->hw_features) {
+ features[0].valid &= dev->hw_features;
+ ret |= ETHTOOL_F_UNSUPPORTED;
+ }
+
+ dev->wanted_features &= ~features[0].valid;
+ dev->wanted_features |= features[0].valid & features[0].requested;
+ netdev_update_features(dev);
+
+ if ((dev->wanted_features ^ dev->features) & features[0].valid)
+ ret |= ETHTOOL_F_WISH;
+
+ return ret;
+}
+
+static const char netdev_features_strings[ETHTOOL_DEV_FEATURE_WORDS * 32][ETH_GSTRING_LEN] = {
+ /* NETIF_F_SG */ "tx-scatter-gather",
+ /* NETIF_F_IP_CSUM */ "tx-checksum-ipv4",
+ /* NETIF_F_NO_CSUM */ "tx-checksum-unneeded",
+ /* NETIF_F_HW_CSUM */ "tx-checksum-ip-generic",
+ /* NETIF_F_IPV6_CSUM */ "tx_checksum-ipv6",
+ /* NETIF_F_HIGHDMA */ "highdma",
+ /* NETIF_F_FRAGLIST */ "tx-scatter-gather-fraglist",
+ /* NETIF_F_HW_VLAN_TX */ "tx-vlan-hw-insert",
+
+ /* NETIF_F_HW_VLAN_RX */ "rx-vlan-hw-parse",
+ /* NETIF_F_HW_VLAN_FILTER */ "rx-vlan-filter",
+ /* NETIF_F_VLAN_CHALLENGED */ "vlan-challenged",
+ /* NETIF_F_GSO */ "tx-generic-segmentation",
+ /* NETIF_F_LLTX */ "tx-lockless",
+ /* NETIF_F_NETNS_LOCAL */ "netns-local",
+ /* NETIF_F_GRO */ "rx-gro",
+ /* NETIF_F_LRO */ "rx-lro",
+
+ /* NETIF_F_TSO */ "tx-tcp-segmentation",
+ /* NETIF_F_UFO */ "tx-udp-fragmentation",
+ /* NETIF_F_GSO_ROBUST */ "tx-gso-robust",
+ /* NETIF_F_TSO_ECN */ "tx-tcp-ecn-segmentation",
+ /* NETIF_F_TSO6 */ "tx-tcp6-segmentation",
+ /* NETIF_F_FSO */ "tx-fcoe-segmentation",
+ "",
+ "",
+
+ /* NETIF_F_FCOE_CRC */ "tx-checksum-fcoe-crc",
+ /* NETIF_F_SCTP_CSUM */ "tx-checksum-sctp",
+ /* NETIF_F_FCOE_MTU */ "fcoe-mtu",
+ /* NETIF_F_NTUPLE */ "rx-ntuple-filter",
+ /* NETIF_F_RXHASH */ "rx-hashing",
+ /* NETIF_F_RXCSUM */ "rx-checksum",
+ "",
+ "",
+};
+
+static int __ethtool_get_sset_count(struct net_device *dev, int sset)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+
+ if (sset == ETH_SS_FEATURES)
+ return ARRAY_SIZE(netdev_features_strings);
+
+ if (ops && ops->get_sset_count && ops->get_strings)
+ return ops->get_sset_count(dev, sset);
+ else
+ return -EOPNOTSUPP;
+}
+
+static void __ethtool_get_strings(struct net_device *dev,
+ u32 stringset, u8 *data)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+
+ if (stringset == ETH_SS_FEATURES)
+ memcpy(data, netdev_features_strings,
+ sizeof(netdev_features_strings));
+ else
+ /* ops->get_strings is valid because checked earlier */
+ ops->get_strings(dev, stringset, data);
+}
+
+static u32 ethtool_get_feature_mask(u32 eth_cmd)
+{
+ /* feature masks of legacy discrete ethtool ops */
+
+ switch (eth_cmd) {
+ case ETHTOOL_GTXCSUM:
+ case ETHTOOL_STXCSUM:
+ return NETIF_F_ALL_CSUM | NETIF_F_SCTP_CSUM;
+ case ETHTOOL_GRXCSUM:
+ case ETHTOOL_SRXCSUM:
+ return NETIF_F_RXCSUM;
+ case ETHTOOL_GSG:
+ case ETHTOOL_SSG:
+ return NETIF_F_SG;
+ case ETHTOOL_GTSO:
+ case ETHTOOL_STSO:
+ return NETIF_F_ALL_TSO;
+ case ETHTOOL_GUFO:
+ case ETHTOOL_SUFO:
+ return NETIF_F_UFO;
+ case ETHTOOL_GGSO:
+ case ETHTOOL_SGSO:
+ return NETIF_F_GSO;
+ case ETHTOOL_GGRO:
+ case ETHTOOL_SGRO:
+ return NETIF_F_GRO;
+ default:
+ BUG();
+ }
+}
+
+static void *__ethtool_get_one_feature_actor(struct net_device *dev, u32 ethcmd)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+
+ if (!ops)
+ return NULL;
+
+ switch (ethcmd) {
+ case ETHTOOL_GTXCSUM:
+ return ops->get_tx_csum;
+ case ETHTOOL_GRXCSUM:
+ return ops->get_rx_csum;
+ case ETHTOOL_SSG:
+ return ops->get_sg;
+ case ETHTOOL_STSO:
+ return ops->get_tso;
+ case ETHTOOL_SUFO:
+ return ops->get_ufo;
+ default:
+ return NULL;
+ }
+}
+
+static u32 __ethtool_get_rx_csum_oldbug(struct net_device *dev)
+{
+ return !!(dev->features & NETIF_F_ALL_CSUM);
+}
+
+static int ethtool_get_one_feature(struct net_device *dev,
+ char __user *useraddr, u32 ethcmd)
+{
+ u32 mask = ethtool_get_feature_mask(ethcmd);
+ struct ethtool_value edata = {
+ .cmd = ethcmd,
+ .data = !!(dev->features & mask),
+ };
+
+ /* compatibility with discrete get_ ops */
+ if (!(dev->hw_features & mask)) {
+ u32 (*actor)(struct net_device *);
+
+ actor = __ethtool_get_one_feature_actor(dev, ethcmd);
+
+ /* bug compatibility with old get_rx_csum */
+ if (ethcmd == ETHTOOL_GRXCSUM && !actor)
+ actor = __ethtool_get_rx_csum_oldbug;
+
+ if (actor)
+ edata.data = actor(dev);
+ }
+
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int __ethtool_set_tx_csum(struct net_device *dev, u32 data);
+static int __ethtool_set_rx_csum(struct net_device *dev, u32 data);
+static int __ethtool_set_sg(struct net_device *dev, u32 data);
+static int __ethtool_set_tso(struct net_device *dev, u32 data);
+static int __ethtool_set_ufo(struct net_device *dev, u32 data);
+
+static int ethtool_set_one_feature(struct net_device *dev,
+ void __user *useraddr, u32 ethcmd)
+{
+ struct ethtool_value edata;
+ u32 mask;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ mask = ethtool_get_feature_mask(ethcmd);
+ mask &= dev->hw_features;
+ if (mask) {
+ if (edata.data)
+ dev->wanted_features |= mask;
+ else
+ dev->wanted_features &= ~mask;
+
+ netdev_update_features(dev);
+ return 0;
+ }
+
+ /* Driver is not converted to ndo_fix_features or does not
+ * support changing this offload. In the latter case it won't
+ * have corresponding ethtool_ops field set.
+ *
+ * Following part is to be removed after all drivers advertise
+ * their changeable features in netdev->hw_features and stop
+ * using discrete offload setting ops.
+ */
+
+ switch (ethcmd) {
+ case ETHTOOL_STXCSUM:
+ return __ethtool_set_tx_csum(dev, edata.data);
+ case ETHTOOL_SRXCSUM:
+ return __ethtool_set_rx_csum(dev, edata.data);
+ case ETHTOOL_SSG:
+ return __ethtool_set_sg(dev, edata.data);
+ case ETHTOOL_STSO:
+ return __ethtool_set_tso(dev, edata.data);
+ case ETHTOOL_SUFO:
+ return __ethtool_set_ufo(dev, edata.data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int __ethtool_set_flags(struct net_device *dev, u32 data)
+{
+ u32 changed;
+
+ if (data & ~flags_dup_features)
+ return -EINVAL;
+
+ /* legacy set_flags() op */
+ if (dev->ethtool_ops->set_flags) {
+ if (unlikely(dev->hw_features & flags_dup_features))
+ netdev_warn(dev,
+ "driver BUG: mixed hw_features and set_flags()\n");
+ return dev->ethtool_ops->set_flags(dev, data);
+ }
+
+ /* allow changing only bits set in hw_features */
+ changed = (data ^ dev->wanted_features) & flags_dup_features;
+ if (changed & ~dev->hw_features)
+ return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP;
+
+ dev->wanted_features =
+ (dev->wanted_features & ~changed) | data;
+
+ netdev_update_features(dev);
+
+ return 0;
+}
+
static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
{
struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
@@ -251,14 +621,10 @@ static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev,
void __user *useraddr)
{
struct ethtool_sset_info info;
- const struct ethtool_ops *ops = dev->ethtool_ops;
u64 sset_mask;
int i, idx = 0, n_bits = 0, ret, rc;
u32 *info_buf = NULL;
- if (!ops->get_sset_count)
- return -EOPNOTSUPP;
-
if (copy_from_user(&info, useraddr, sizeof(info)))
return -EFAULT;
@@ -285,7 +651,7 @@ static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev,
if (!(sset_mask & (1ULL << i)))
continue;
- rc = ops->get_sset_count(dev, i);
+ rc = __ethtool_get_sset_count(dev, i);
if (rc >= 0) {
info.sset_mask |= (1ULL << i);
info_buf[idx++] = rc;
@@ -1091,6 +1457,9 @@ static int __ethtool_set_sg(struct net_device *dev, u32 data)
{
int err;
+ if (data && !(dev->features & NETIF_F_ALL_CSUM))
+ return -EINVAL;
+
if (!data && dev->ethtool_ops->set_tso) {
err = dev->ethtool_ops->set_tso(dev, 0);
if (err)
@@ -1105,145 +1474,55 @@ static int __ethtool_set_sg(struct net_device *dev, u32 data)
return dev->ethtool_ops->set_sg(dev, data);
}
-static int ethtool_set_tx_csum(struct net_device *dev, char __user *useraddr)
+static int __ethtool_set_tx_csum(struct net_device *dev, u32 data)
{
- struct ethtool_value edata;
int err;
if (!dev->ethtool_ops->set_tx_csum)
return -EOPNOTSUPP;
- if (copy_from_user(&edata, useraddr, sizeof(edata)))
- return -EFAULT;
-
- if (!edata.data && dev->ethtool_ops->set_sg) {
+ if (!data && dev->ethtool_ops->set_sg) {
err = __ethtool_set_sg(dev, 0);
if (err)
return err;
}
- return dev->ethtool_ops->set_tx_csum(dev, edata.data);
+ return dev->ethtool_ops->set_tx_csum(dev, data);
}
-EXPORT_SYMBOL(ethtool_op_set_tx_csum);
-static int ethtool_set_rx_csum(struct net_device *dev, char __user *useraddr)
+static int __ethtool_set_rx_csum(struct net_device *dev, u32 data)
{
- struct ethtool_value edata;
-
if (!dev->ethtool_ops->set_rx_csum)
return -EOPNOTSUPP;
- if (copy_from_user(&edata, useraddr, sizeof(edata)))
- return -EFAULT;
-
- if (!edata.data && dev->ethtool_ops->set_sg)
+ if (!data)
dev->features &= ~NETIF_F_GRO;
- return dev->ethtool_ops->set_rx_csum(dev, edata.data);
+ return dev->ethtool_ops->set_rx_csum(dev, data);
}
-static int ethtool_set_sg(struct net_device *dev, char __user *useraddr)
+static int __ethtool_set_tso(struct net_device *dev, u32 data)
{
- struct ethtool_value edata;
-
- if (!dev->ethtool_ops->set_sg)
- return -EOPNOTSUPP;
-
- if (copy_from_user(&edata, useraddr, sizeof(edata)))
- return -EFAULT;
-
- if (edata.data &&
- !(dev->features & NETIF_F_ALL_CSUM))
- return -EINVAL;
-
- return __ethtool_set_sg(dev, edata.data);
-}
-
-static int ethtool_set_tso(struct net_device *dev, char __user *useraddr)
-{
- struct ethtool_value edata;
-
if (!dev->ethtool_ops->set_tso)
return -EOPNOTSUPP;
- if (copy_from_user(&edata, useraddr, sizeof(edata)))
- return -EFAULT;
-
- if (edata.data && !(dev->features & NETIF_F_SG))
+ if (data && !(dev->features & NETIF_F_SG))
return -EINVAL;
- return dev->ethtool_ops->set_tso(dev, edata.data);
+ return dev->ethtool_ops->set_tso(dev, data);
}
-static int ethtool_set_ufo(struct net_device *dev, char __user *useraddr)
+static int __ethtool_set_ufo(struct net_device *dev, u32 data)
{
- struct ethtool_value edata;
-
if (!dev->ethtool_ops->set_ufo)
return -EOPNOTSUPP;
- if (copy_from_user(&edata, useraddr, sizeof(edata)))
- return -EFAULT;
- if (edata.data && !(dev->features & NETIF_F_SG))
+ if (data && !(dev->features & NETIF_F_SG))
return -EINVAL;
- if (edata.data && !((dev->features & NETIF_F_GEN_CSUM) ||
+ if (data && !((dev->features & NETIF_F_GEN_CSUM) ||
(dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
== (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)))
return -EINVAL;
- return dev->ethtool_ops->set_ufo(dev, edata.data);
-}
-
-static int ethtool_get_gso(struct net_device *dev, char __user *useraddr)
-{
- struct ethtool_value edata = { ETHTOOL_GGSO };
-
- edata.data = dev->features & NETIF_F_GSO;
- if (copy_to_user(useraddr, &edata, sizeof(edata)))
- return -EFAULT;
- return 0;
-}
-
-static int ethtool_set_gso(struct net_device *dev, char __user *useraddr)
-{
- struct ethtool_value edata;
-
- if (copy_from_user(&edata, useraddr, sizeof(edata)))
- return -EFAULT;
- if (edata.data)
- dev->features |= NETIF_F_GSO;
- else
- dev->features &= ~NETIF_F_GSO;
- return 0;
-}
-
-static int ethtool_get_gro(struct net_device *dev, char __user *useraddr)
-{
- struct ethtool_value edata = { ETHTOOL_GGRO };
-
- edata.data = dev->features & NETIF_F_GRO;
- if (copy_to_user(useraddr, &edata, sizeof(edata)))
- return -EFAULT;
- return 0;
-}
-
-static int ethtool_set_gro(struct net_device *dev, char __user *useraddr)
-{
- struct ethtool_value edata;
-
- if (copy_from_user(&edata, useraddr, sizeof(edata)))
- return -EFAULT;
-
- if (edata.data) {
- u32 rxcsum = dev->ethtool_ops->get_rx_csum ?
- dev->ethtool_ops->get_rx_csum(dev) :
- ethtool_op_get_rx_csum(dev);
-
- if (!rxcsum)
- return -EINVAL;
- dev->features |= NETIF_F_GRO;
- } else
- dev->features &= ~NETIF_F_GRO;
-
- return 0;
+ return dev->ethtool_ops->set_ufo(dev, data);
}
static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
@@ -1287,17 +1566,13 @@ static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
{
struct ethtool_gstrings gstrings;
- const struct ethtool_ops *ops = dev->ethtool_ops;
u8 *data;
int ret;
- if (!ops->get_strings || !ops->get_sset_count)
- return -EOPNOTSUPP;
-
if (copy_from_user(&gstrings, useraddr, sizeof(gstrings)))
return -EFAULT;
- ret = ops->get_sset_count(dev, gstrings.string_set);
+ ret = __ethtool_get_sset_count(dev, gstrings.string_set);
if (ret < 0)
return ret;
@@ -1307,7 +1582,7 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
if (!data)
return -ENOMEM;
- ops->get_strings(dev, gstrings.string_set, data);
+ __ethtool_get_strings(dev, gstrings.string_set, data);
ret = -EFAULT;
if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
@@ -1317,7 +1592,7 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
goto out;
ret = 0;
- out:
+out:
kfree(data);
return ret;
}
@@ -1458,7 +1733,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
void __user *useraddr = ifr->ifr_data;
u32 ethcmd;
int rc;
- unsigned long old_features;
+ u32 old_features;
if (!dev || !netif_device_present(dev))
return -ENODEV;
@@ -1500,6 +1775,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_GRXCLSRLCNT:
case ETHTOOL_GRXCLSRULE:
case ETHTOOL_GRXCLSRLALL:
+ case ETHTOOL_GFEATURES:
break;
default:
if (!capable(CAP_NET_ADMIN))
@@ -1570,42 +1846,6 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_SPAUSEPARAM:
rc = ethtool_set_pauseparam(dev, useraddr);
break;
- case ETHTOOL_GRXCSUM:
- rc = ethtool_get_value(dev, useraddr, ethcmd,
- (dev->ethtool_ops->get_rx_csum ?
- dev->ethtool_ops->get_rx_csum :
- ethtool_op_get_rx_csum));
- break;
- case ETHTOOL_SRXCSUM:
- rc = ethtool_set_rx_csum(dev, useraddr);
- break;
- case ETHTOOL_GTXCSUM:
- rc = ethtool_get_value(dev, useraddr, ethcmd,
- (dev->ethtool_ops->get_tx_csum ?
- dev->ethtool_ops->get_tx_csum :
- ethtool_op_get_tx_csum));
- break;
- case ETHTOOL_STXCSUM:
- rc = ethtool_set_tx_csum(dev, useraddr);
- break;
- case ETHTOOL_GSG:
- rc = ethtool_get_value(dev, useraddr, ethcmd,
- (dev->ethtool_ops->get_sg ?
- dev->ethtool_ops->get_sg :
- ethtool_op_get_sg));
- break;
- case ETHTOOL_SSG:
- rc = ethtool_set_sg(dev, useraddr);
- break;
- case ETHTOOL_GTSO:
- rc = ethtool_get_value(dev, useraddr, ethcmd,
- (dev->ethtool_ops->get_tso ?
- dev->ethtool_ops->get_tso :
- ethtool_op_get_tso));
- break;
- case ETHTOOL_STSO:
- rc = ethtool_set_tso(dev, useraddr);
- break;
case ETHTOOL_TEST:
rc = ethtool_self_test(dev, useraddr);
break;
@@ -1621,21 +1861,6 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_GPERMADDR:
rc = ethtool_get_perm_addr(dev, useraddr);
break;
- case ETHTOOL_GUFO:
- rc = ethtool_get_value(dev, useraddr, ethcmd,
- (dev->ethtool_ops->get_ufo ?
- dev->ethtool_ops->get_ufo :
- ethtool_op_get_ufo));
- break;
- case ETHTOOL_SUFO:
- rc = ethtool_set_ufo(dev, useraddr);
- break;
- case ETHTOOL_GGSO:
- rc = ethtool_get_gso(dev, useraddr);
- break;
- case ETHTOOL_SGSO:
- rc = ethtool_set_gso(dev, useraddr);
- break;
case ETHTOOL_GFLAGS:
rc = ethtool_get_value(dev, useraddr, ethcmd,
(dev->ethtool_ops->get_flags ?
@@ -1643,8 +1868,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
ethtool_op_get_flags));
break;
case ETHTOOL_SFLAGS:
- rc = ethtool_set_value(dev, useraddr,
- dev->ethtool_ops->set_flags);
+ rc = ethtool_set_value(dev, useraddr, __ethtool_set_flags);
break;
case ETHTOOL_GPFLAGS:
rc = ethtool_get_value(dev, useraddr, ethcmd,
@@ -1666,12 +1890,6 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_SRXCLSRLINS:
rc = ethtool_set_rxnfc(dev, ethcmd, useraddr);
break;
- case ETHTOOL_GGRO:
- rc = ethtool_get_gro(dev, useraddr);
- break;
- case ETHTOOL_SGRO:
- rc = ethtool_set_gro(dev, useraddr);
- break;
case ETHTOOL_FLASHDEV:
rc = ethtool_flash_device(dev, useraddr);
break;
@@ -1693,6 +1911,30 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_SRXFHINDIR:
rc = ethtool_set_rxfh_indir(dev, useraddr);
break;
+ case ETHTOOL_GFEATURES:
+ rc = ethtool_get_features(dev, useraddr);
+ break;
+ case ETHTOOL_SFEATURES:
+ rc = ethtool_set_features(dev, useraddr);
+ break;
+ case ETHTOOL_GTXCSUM:
+ case ETHTOOL_GRXCSUM:
+ case ETHTOOL_GSG:
+ case ETHTOOL_GTSO:
+ case ETHTOOL_GUFO:
+ case ETHTOOL_GGSO:
+ case ETHTOOL_GGRO:
+ rc = ethtool_get_one_feature(dev, useraddr, ethcmd);
+ break;
+ case ETHTOOL_STXCSUM:
+ case ETHTOOL_SRXCSUM:
+ case ETHTOOL_SSG:
+ case ETHTOOL_STSO:
+ case ETHTOOL_SUFO:
+ case ETHTOOL_SGSO:
+ case ETHTOOL_SGRO:
+ rc = ethtool_set_one_feature(dev, useraddr, ethcmd);
+ break;
default:
rc = -EOPNOTSUPP;
}
diff --git a/net/core/filter.c b/net/core/filter.c
index afc58374ca96..232b1873bb28 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -142,14 +142,14 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
if (err)
return err;
- rcu_read_lock_bh();
- filter = rcu_dereference_bh(sk->sk_filter);
+ rcu_read_lock();
+ filter = rcu_dereference(sk->sk_filter);
if (filter) {
unsigned int pkt_len = sk_run_filter(skb, filter->insns);
err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
}
- rcu_read_unlock_bh();
+ rcu_read_unlock();
return err;
}
diff --git a/net/core/flow.c b/net/core/flow.c
index 127c8a7ffd61..990703b8863b 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -172,9 +172,9 @@ static void flow_new_hash_rnd(struct flow_cache *fc,
static u32 flow_hash_code(struct flow_cache *fc,
struct flow_cache_percpu *fcp,
- struct flowi *key)
+ const struct flowi *key)
{
- u32 *k = (u32 *) key;
+ const u32 *k = (const u32 *) key;
return jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd)
& (flow_cache_hash_size(fc) - 1);
@@ -186,17 +186,17 @@ typedef unsigned long flow_compare_t;
* important assumptions that we can here, such as alignment and
* constant size.
*/
-static int flow_key_compare(struct flowi *key1, struct flowi *key2)
+static int flow_key_compare(const struct flowi *key1, const struct flowi *key2)
{
- flow_compare_t *k1, *k1_lim, *k2;
+ const flow_compare_t *k1, *k1_lim, *k2;
const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t);
BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t));
- k1 = (flow_compare_t *) key1;
+ k1 = (const flow_compare_t *) key1;
k1_lim = k1 + n_elem;
- k2 = (flow_compare_t *) key2;
+ k2 = (const flow_compare_t *) key2;
do {
if (*k1++ != *k2++)
@@ -207,7 +207,7 @@ static int flow_key_compare(struct flowi *key1, struct flowi *key2)
}
struct flow_cache_object *
-flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
+flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
flow_resolve_t resolver, void *ctx)
{
struct flow_cache *fc = &flow_cache_global;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 60a902913429..799f06e03a22 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -316,7 +316,7 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int entries)
{
size_t size = entries * sizeof(struct neighbour *);
struct neigh_hash_table *ret;
- struct neighbour **buckets;
+ struct neighbour __rcu **buckets;
ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
if (!ret)
@@ -324,14 +324,14 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int entries)
if (size <= PAGE_SIZE)
buckets = kzalloc(size, GFP_ATOMIC);
else
- buckets = (struct neighbour **)
+ buckets = (struct neighbour __rcu **)
__get_free_pages(GFP_ATOMIC | __GFP_ZERO,
get_order(size));
if (!buckets) {
kfree(ret);
return NULL;
}
- rcu_assign_pointer(ret->hash_buckets, buckets);
+ ret->hash_buckets = buckets;
ret->hash_mask = entries - 1;
get_random_bytes(&ret->hash_rnd, sizeof(ret->hash_rnd));
return ret;
@@ -343,7 +343,7 @@ static void neigh_hash_free_rcu(struct rcu_head *head)
struct neigh_hash_table,
rcu);
size_t size = (nht->hash_mask + 1) * sizeof(struct neighbour *);
- struct neighbour **buckets = nht->hash_buckets;
+ struct neighbour __rcu **buckets = nht->hash_buckets;
if (size <= PAGE_SIZE)
kfree(buckets);
@@ -1540,7 +1540,7 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl)
panic("cannot create neighbour proc dir entry");
#endif
- tbl->nht = neigh_hash_alloc(8);
+ RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(8));
phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
@@ -1602,7 +1602,8 @@ int neigh_table_clear(struct neigh_table *tbl)
}
write_unlock(&neigh_tbl_lock);
- call_rcu(&tbl->nht->rcu, neigh_hash_free_rcu);
+ call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
+ neigh_hash_free_rcu);
tbl->nht = NULL;
kfree(tbl->phash_buckets);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index e23c01be5a5b..5ceb257e860c 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -99,7 +99,7 @@ NETDEVICE_SHOW(addr_assign_type, fmt_dec);
NETDEVICE_SHOW(addr_len, fmt_dec);
NETDEVICE_SHOW(iflink, fmt_dec);
NETDEVICE_SHOW(ifindex, fmt_dec);
-NETDEVICE_SHOW(features, fmt_long_hex);
+NETDEVICE_SHOW(features, fmt_hex);
NETDEVICE_SHOW(type, fmt_dec);
NETDEVICE_SHOW(link_mode, fmt_dec);
@@ -295,6 +295,20 @@ static ssize_t show_ifalias(struct device *dev,
return ret;
}
+NETDEVICE_SHOW(group, fmt_dec);
+
+static int change_group(struct net_device *net, unsigned long new_group)
+{
+ dev_set_group(net, (int) new_group);
+ return 0;
+}
+
+static ssize_t store_group(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return netdev_store(dev, attr, buf, len, change_group);
+}
+
static struct device_attribute net_class_attributes[] = {
__ATTR(addr_assign_type, S_IRUGO, show_addr_assign_type, NULL),
__ATTR(addr_len, S_IRUGO, show_addr_len, NULL),
@@ -316,6 +330,7 @@ static struct device_attribute net_class_attributes[] = {
__ATTR(flags, S_IRUGO | S_IWUSR, show_flags, store_flags),
__ATTR(tx_queue_len, S_IRUGO | S_IWUSR, show_tx_queue_len,
store_tx_queue_len),
+ __ATTR(netdev_group, S_IRUGO | S_IWUSR, show_group, store_group),
{}
};
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 02dc2cbcbe86..06be2431753e 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -193,6 +193,17 @@ void netpoll_poll_dev(struct net_device *dev)
poll_napi(dev);
+ if (dev->priv_flags & IFF_SLAVE) {
+ if (dev->npinfo) {
+ struct net_device *bond_dev = dev->master;
+ struct sk_buff *skb;
+ while ((skb = skb_dequeue(&dev->npinfo->arp_tx))) {
+ skb->dev = bond_dev;
+ skb_queue_tail(&bond_dev->npinfo->arp_tx, skb);
+ }
+ }
+ }
+
service_arp_queue(dev->npinfo);
zap_completion_queue();
@@ -313,9 +324,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
tries > 0; --tries) {
if (__netif_tx_trylock(txq)) {
if (!netif_tx_queue_stopped(txq)) {
- dev->priv_flags |= IFF_IN_NETPOLL;
status = ops->ndo_start_xmit(skb, dev);
- dev->priv_flags &= ~IFF_IN_NETPOLL;
if (status == NETDEV_TX_OK)
txq_trans_update(txq);
}
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index a9e7fc4c461f..d73b77adb676 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -251,6 +251,7 @@ struct pktgen_dev {
int max_pkt_size; /* = ETH_ZLEN; */
int pkt_overhead; /* overhead for MPLS, VLANs, IPSEC etc */
int nfrags;
+ struct page *page;
u64 delay; /* nano-seconds */
__u64 count; /* Default No packets to send */
@@ -1134,6 +1135,10 @@ static ssize_t pktgen_if_write(struct file *file,
if (node_possible(value)) {
pkt_dev->node = value;
sprintf(pg_result, "OK: node=%d", pkt_dev->node);
+ if (pkt_dev->page) {
+ put_page(pkt_dev->page);
+ pkt_dev->page = NULL;
+ }
}
else
sprintf(pg_result, "ERROR: node not possible");
@@ -2605,6 +2610,90 @@ static inline __be16 build_tci(unsigned int id, unsigned int cfi,
return htons(id | (cfi << 12) | (prio << 13));
}
+static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
+ int datalen)
+{
+ struct timeval timestamp;
+ struct pktgen_hdr *pgh;
+
+ pgh = (struct pktgen_hdr *)skb_put(skb, sizeof(*pgh));
+ datalen -= sizeof(*pgh);
+
+ if (pkt_dev->nfrags <= 0) {
+ pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
+ memset(pgh + 1, 0, datalen);
+ } else {
+ int frags = pkt_dev->nfrags;
+ int i, len;
+
+
+ if (frags > MAX_SKB_FRAGS)
+ frags = MAX_SKB_FRAGS;
+ len = datalen - frags * PAGE_SIZE;
+ if (len > 0) {
+ memset(skb_put(skb, len), 0, len);
+ datalen = frags * PAGE_SIZE;
+ }
+
+ i = 0;
+ while (datalen > 0) {
+ if (unlikely(!pkt_dev->page)) {
+ int node = numa_node_id();
+
+ if (pkt_dev->node >= 0 && (pkt_dev->flags & F_NODE))
+ node = pkt_dev->node;
+ pkt_dev->page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
+ if (!pkt_dev->page)
+ break;
+ }
+ skb_shinfo(skb)->frags[i].page = pkt_dev->page;
+ get_page(pkt_dev->page);
+ skb_shinfo(skb)->frags[i].page_offset = 0;
+ skb_shinfo(skb)->frags[i].size =
+ (datalen < PAGE_SIZE ? datalen : PAGE_SIZE);
+ datalen -= skb_shinfo(skb)->frags[i].size;
+ skb->len += skb_shinfo(skb)->frags[i].size;
+ skb->data_len += skb_shinfo(skb)->frags[i].size;
+ i++;
+ skb_shinfo(skb)->nr_frags = i;
+ }
+
+ while (i < frags) {
+ int rem;
+
+ if (i == 0)
+ break;
+
+ rem = skb_shinfo(skb)->frags[i - 1].size / 2;
+ if (rem == 0)
+ break;
+
+ skb_shinfo(skb)->frags[i - 1].size -= rem;
+
+ skb_shinfo(skb)->frags[i] =
+ skb_shinfo(skb)->frags[i - 1];
+ get_page(skb_shinfo(skb)->frags[i].page);
+ skb_shinfo(skb)->frags[i].page =
+ skb_shinfo(skb)->frags[i - 1].page;
+ skb_shinfo(skb)->frags[i].page_offset +=
+ skb_shinfo(skb)->frags[i - 1].size;
+ skb_shinfo(skb)->frags[i].size = rem;
+ i++;
+ skb_shinfo(skb)->nr_frags = i;
+ }
+ }
+
+ /* Stamp the time, and sequence number,
+ * convert them to network byte order
+ */
+ pgh->pgh_magic = htonl(PKTGEN_MAGIC);
+ pgh->seq_num = htonl(pkt_dev->seq_num);
+
+ do_gettimeofday(&timestamp);
+ pgh->tv_sec = htonl(timestamp.tv_sec);
+ pgh->tv_usec = htonl(timestamp.tv_usec);
+}
+
static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
struct pktgen_dev *pkt_dev)
{
@@ -2613,7 +2702,6 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
struct udphdr *udph;
int datalen, iplen;
struct iphdr *iph;
- struct pktgen_hdr *pgh = NULL;
__be16 protocol = htons(ETH_P_IP);
__be32 *mpls;
__be16 *vlan_tci = NULL; /* Encapsulates priority and VLAN ID */
@@ -2729,76 +2817,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
pkt_dev->pkt_overhead);
skb->dev = odev;
skb->pkt_type = PACKET_HOST;
-
- if (pkt_dev->nfrags <= 0) {
- pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
- memset(pgh + 1, 0, datalen - sizeof(struct pktgen_hdr));
- } else {
- int frags = pkt_dev->nfrags;
- int i, len;
-
- pgh = (struct pktgen_hdr *)(((char *)(udph)) + 8);
-
- if (frags > MAX_SKB_FRAGS)
- frags = MAX_SKB_FRAGS;
- if (datalen > frags * PAGE_SIZE) {
- len = datalen - frags * PAGE_SIZE;
- memset(skb_put(skb, len), 0, len);
- datalen = frags * PAGE_SIZE;
- }
-
- i = 0;
- while (datalen > 0) {
- struct page *page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
- skb_shinfo(skb)->frags[i].page = page;
- skb_shinfo(skb)->frags[i].page_offset = 0;
- skb_shinfo(skb)->frags[i].size =
- (datalen < PAGE_SIZE ? datalen : PAGE_SIZE);
- datalen -= skb_shinfo(skb)->frags[i].size;
- skb->len += skb_shinfo(skb)->frags[i].size;
- skb->data_len += skb_shinfo(skb)->frags[i].size;
- i++;
- skb_shinfo(skb)->nr_frags = i;
- }
-
- while (i < frags) {
- int rem;
-
- if (i == 0)
- break;
-
- rem = skb_shinfo(skb)->frags[i - 1].size / 2;
- if (rem == 0)
- break;
-
- skb_shinfo(skb)->frags[i - 1].size -= rem;
-
- skb_shinfo(skb)->frags[i] =
- skb_shinfo(skb)->frags[i - 1];
- get_page(skb_shinfo(skb)->frags[i].page);
- skb_shinfo(skb)->frags[i].page =
- skb_shinfo(skb)->frags[i - 1].page;
- skb_shinfo(skb)->frags[i].page_offset +=
- skb_shinfo(skb)->frags[i - 1].size;
- skb_shinfo(skb)->frags[i].size = rem;
- i++;
- skb_shinfo(skb)->nr_frags = i;
- }
- }
-
- /* Stamp the time, and sequence number,
- * convert them to network byte order
- */
- if (pgh) {
- struct timeval timestamp;
-
- pgh->pgh_magic = htonl(PKTGEN_MAGIC);
- pgh->seq_num = htonl(pkt_dev->seq_num);
-
- do_gettimeofday(&timestamp);
- pgh->tv_sec = htonl(timestamp.tv_sec);
- pgh->tv_usec = htonl(timestamp.tv_usec);
- }
+ pktgen_finalize_skb(pkt_dev, skb, datalen);
#ifdef CONFIG_XFRM
if (!process_ipsec(pkt_dev, skb, protocol))
@@ -2980,7 +2999,6 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
struct udphdr *udph;
int datalen;
struct ipv6hdr *iph;
- struct pktgen_hdr *pgh = NULL;
__be16 protocol = htons(ETH_P_IPV6);
__be32 *mpls;
__be16 *vlan_tci = NULL; /* Encapsulates priority and VLAN ID */
@@ -3083,75 +3101,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
skb->dev = odev;
skb->pkt_type = PACKET_HOST;
- if (pkt_dev->nfrags <= 0)
- pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
- else {
- int frags = pkt_dev->nfrags;
- int i;
-
- pgh = (struct pktgen_hdr *)(((char *)(udph)) + 8);
-
- if (frags > MAX_SKB_FRAGS)
- frags = MAX_SKB_FRAGS;
- if (datalen > frags * PAGE_SIZE) {
- skb_put(skb, datalen - frags * PAGE_SIZE);
- datalen = frags * PAGE_SIZE;
- }
-
- i = 0;
- while (datalen > 0) {
- struct page *page = alloc_pages(GFP_KERNEL, 0);
- skb_shinfo(skb)->frags[i].page = page;
- skb_shinfo(skb)->frags[i].page_offset = 0;
- skb_shinfo(skb)->frags[i].size =
- (datalen < PAGE_SIZE ? datalen : PAGE_SIZE);
- datalen -= skb_shinfo(skb)->frags[i].size;
- skb->len += skb_shinfo(skb)->frags[i].size;
- skb->data_len += skb_shinfo(skb)->frags[i].size;
- i++;
- skb_shinfo(skb)->nr_frags = i;
- }
-
- while (i < frags) {
- int rem;
-
- if (i == 0)
- break;
-
- rem = skb_shinfo(skb)->frags[i - 1].size / 2;
- if (rem == 0)
- break;
-
- skb_shinfo(skb)->frags[i - 1].size -= rem;
-
- skb_shinfo(skb)->frags[i] =
- skb_shinfo(skb)->frags[i - 1];
- get_page(skb_shinfo(skb)->frags[i].page);
- skb_shinfo(skb)->frags[i].page =
- skb_shinfo(skb)->frags[i - 1].page;
- skb_shinfo(skb)->frags[i].page_offset +=
- skb_shinfo(skb)->frags[i - 1].size;
- skb_shinfo(skb)->frags[i].size = rem;
- i++;
- skb_shinfo(skb)->nr_frags = i;
- }
- }
-
- /* Stamp the time, and sequence number,
- * convert them to network byte order
- * should we update cloned packets too ?
- */
- if (pgh) {
- struct timeval timestamp;
-
- pgh->pgh_magic = htonl(PKTGEN_MAGIC);
- pgh->seq_num = htonl(pkt_dev->seq_num);
-
- do_gettimeofday(&timestamp);
- pgh->tv_sec = htonl(timestamp.tv_sec);
- pgh->tv_usec = htonl(timestamp.tv_usec);
- }
- /* pkt_dev->seq_num++; FF: you really mean this? */
+ pktgen_finalize_skb(pkt_dev, skb, datalen);
return skb;
}
@@ -3884,6 +3834,8 @@ static int pktgen_remove_device(struct pktgen_thread *t,
free_SAs(pkt_dev);
#endif
vfree(pkt_dev->flows);
+ if (pkt_dev->page)
+ put_page(pkt_dev->page);
kfree(pkt_dev);
return 0;
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 2d65c6bb24c1..49f7ea5b4c75 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -868,6 +868,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
netif_running(dev) ? dev->operstate : IF_OPER_DOWN);
NLA_PUT_U8(skb, IFLA_LINKMODE, dev->link_mode);
NLA_PUT_U32(skb, IFLA_MTU, dev->mtu);
+ NLA_PUT_U32(skb, IFLA_GROUP, dev->group);
if (dev->ifindex != dev->iflink)
NLA_PUT_U32(skb, IFLA_LINK, dev->iflink);
@@ -1035,6 +1036,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) },
[IFLA_MTU] = { .type = NLA_U32 },
[IFLA_LINK] = { .type = NLA_U32 },
+ [IFLA_MASTER] = { .type = NLA_U32 },
[IFLA_TXQLEN] = { .type = NLA_U32 },
[IFLA_WEIGHT] = { .type = NLA_U32 },
[IFLA_OPERSTATE] = { .type = NLA_U8 },
@@ -1177,6 +1179,41 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr *attr)
return err;
}
+static int do_set_master(struct net_device *dev, int ifindex)
+{
+ struct net_device *master_dev;
+ const struct net_device_ops *ops;
+ int err;
+
+ if (dev->master) {
+ if (dev->master->ifindex == ifindex)
+ return 0;
+ ops = dev->master->netdev_ops;
+ if (ops->ndo_del_slave) {
+ err = ops->ndo_del_slave(dev->master, dev);
+ if (err)
+ return err;
+ } else {
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (ifindex) {
+ master_dev = __dev_get_by_index(dev_net(dev), ifindex);
+ if (!master_dev)
+ return -EINVAL;
+ ops = master_dev->netdev_ops;
+ if (ops->ndo_add_slave) {
+ err = ops->ndo_add_slave(master_dev, dev);
+ if (err)
+ return err;
+ } else {
+ return -EOPNOTSUPP;
+ }
+ }
+ return 0;
+}
+
static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
struct nlattr **tb, char *ifname, int modified)
{
@@ -1264,6 +1301,11 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
modified = 1;
}
+ if (tb[IFLA_GROUP]) {
+ dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
+ modified = 1;
+ }
+
/*
* Interface selected by interface index but interface
* name provided implies that a name change has been
@@ -1295,6 +1337,13 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
goto errout;
}
+ if (tb[IFLA_MASTER]) {
+ err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]));
+ if (err)
+ goto errout;
+ modified = 1;
+ }
+
if (tb[IFLA_TXQLEN])
dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
@@ -1541,6 +1590,8 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
if (tb[IFLA_LINKMODE])
dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
+ if (tb[IFLA_GROUP])
+ dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
return dev;
@@ -1551,6 +1602,24 @@ err:
}
EXPORT_SYMBOL(rtnl_create_link);
+static int rtnl_group_changelink(struct net *net, int group,
+ struct ifinfomsg *ifm,
+ struct nlattr **tb)
+{
+ struct net_device *dev;
+ int err;
+
+ for_each_netdev(net, dev) {
+ if (dev->group == group) {
+ err = do_setlink(dev, ifm, tb, NULL, 0);
+ if (err < 0)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct net *net = sock_net(skb->sk);
@@ -1578,10 +1647,12 @@ replay:
ifm = nlmsg_data(nlh);
if (ifm->ifi_index > 0)
dev = __dev_get_by_index(net, ifm->ifi_index);
- else if (ifname[0])
- dev = __dev_get_by_name(net, ifname);
- else
- dev = NULL;
+ else {
+ if (ifname[0])
+ dev = __dev_get_by_name(net, ifname);
+ else
+ dev = NULL;
+ }
err = validate_linkmsg(dev, tb);
if (err < 0)
@@ -1645,8 +1716,13 @@ replay:
return do_setlink(dev, ifm, tb, ifname, modified);
}
- if (!(nlh->nlmsg_flags & NLM_F_CREATE))
+ if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
+ if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
+ return rtnl_group_changelink(net,
+ nla_get_u32(tb[IFLA_GROUP]),
+ ifm, tb);
return -ENODEV;
+ }
if (ifm->ifi_index)
return -EOPNOTSUPP;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d883dcc78b6b..1eb526a848ff 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2434,8 +2434,6 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
return -ENOMEM;
/* initialize the next frag */
- sk->sk_sndmsg_page = page;
- sk->sk_sndmsg_off = 0;
skb_fill_page_desc(skb, frg_cnt, page, 0, 0);
skb->truesize += PAGE_SIZE;
atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
@@ -2455,7 +2453,6 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
return -EFAULT;
/* copy was successful so update the size parameters */
- sk->sk_sndmsg_off += copy;
frag->size += copy;
skb->len += copy;
skb->data_len += copy;
@@ -2498,7 +2495,7 @@ EXPORT_SYMBOL_GPL(skb_pull_rcsum);
* a pointer to the first in a list of new skbs for the segments.
* In case of error it returns ERR_PTR(err).
*/
-struct sk_buff *skb_segment(struct sk_buff *skb, int features)
+struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
{
struct sk_buff *segs = NULL;
struct sk_buff *tail = NULL;
@@ -2508,7 +2505,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
unsigned int offset = doffset;
unsigned int headroom;
unsigned int len;
- int sg = features & NETIF_F_SG;
+ int sg = !!(features & NETIF_F_SG);
int nfrags = skb_shinfo(skb)->nr_frags;
int err = -ENOMEM;
int i = 0;
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index d5074a567289..c44348adba3b 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -1193,7 +1193,7 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
goto err;
}
- if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setets) {
+ if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
err = ops->ieee_setpfc(netdev, pfc);
if (err)
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index e96d5e810039..fadecd20d75b 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -583,6 +583,15 @@ done:
dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
}
+/*
+ * Convert RFC 3390 larger initial window into an equivalent number of packets.
+ * This is based on the numbers specified in RFC 5681, 3.1.
+ */
+static inline u32 rfc3390_bytes_to_packets(const u32 smss)
+{
+ return smss <= 1095 ? 4 : (smss > 2190 ? 2 : 3);
+}
+
static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
{
struct ccid2_hc_tx_sock *hc = ccid_priv(ccid);
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 8cde009e8b85..4222e7a654b0 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -614,6 +614,9 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
/* Caller (dccp_v4_do_rcv) will send Reset */
dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
return 1;
+ } else if (sk->sk_state == DCCP_CLOSED) {
+ dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
+ return 1;
}
if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) {
@@ -668,10 +671,6 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
}
switch (sk->sk_state) {
- case DCCP_CLOSED:
- dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
- return 1;
-
case DCCP_REQUESTING:
queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len);
if (queued >= 0)
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 45a434f94169..a8ff95502081 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -43,6 +43,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
struct inet_sock *inet = inet_sk(sk);
struct dccp_sock *dp = dccp_sk(sk);
const struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
+ __be16 orig_sport, orig_dport;
struct rtable *rt;
__be32 daddr, nexthop;
int tmp;
@@ -63,10 +64,12 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
nexthop = inet->opt->faddr;
}
+ orig_sport = inet->inet_sport;
+ orig_dport = usin->sin_port;
tmp = ip_route_connect(&rt, nexthop, inet->inet_saddr,
RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
IPPROTO_DCCP,
- inet->inet_sport, usin->sin_port, sk, 1);
+ orig_sport, orig_dport, sk, true);
if (tmp < 0)
return tmp;
@@ -99,8 +102,9 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
if (err != 0)
goto failure;
- err = ip_route_newports(&rt, IPPROTO_DCCP, inet->inet_sport,
- inet->inet_dport, sk);
+ err = ip_route_newports(&rt, IPPROTO_DCCP,
+ orig_sport, orig_dport,
+ inet->inet_sport, inet->inet_dport, sk);
if (err != 0)
goto failure;
@@ -471,7 +475,7 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
};
security_skb_classify_flow(skb, &fl);
- if (ip_route_output_flow(net, &rt, &fl, sk, 0)) {
+ if (ip_route_output_flow(net, &rt, &fl, sk)) {
IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
return NULL;
}
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index dca711df9b60..5efc57f5e605 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -162,15 +162,9 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
fl.fl_ip_sport = inet->inet_sport;
security_sk_classify_flow(sk, &fl);
- err = ip6_dst_lookup(sk, &dst, &fl);
- if (err) {
- sk->sk_err_soft = -err;
- goto out;
- }
-
- err = xfrm_lookup(net, &dst, &fl, sk, 0);
- if (err < 0) {
- sk->sk_err_soft = -err;
+ dst = ip6_dst_lookup_flow(sk, &fl, NULL, false);
+ if (IS_ERR(dst)) {
+ sk->sk_err_soft = -PTR_ERR(dst);
goto out;
}
} else
@@ -267,16 +261,12 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
final_p = fl6_update_dst(&fl, opt, &final);
- err = ip6_dst_lookup(sk, &dst, &fl);
- if (err)
- goto done;
-
- if (final_p)
- ipv6_addr_copy(&fl.fl6_dst, final_p);
-
- err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0);
- if (err < 0)
+ dst = ip6_dst_lookup_flow(sk, &fl, final_p, false);
+ if (IS_ERR(dst)) {
+ err = PTR_ERR(dst);
+ dst = NULL;
goto done;
+ }
skb = dccp_make_response(sk, dst, req);
if (skb != NULL) {
@@ -338,14 +328,13 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
security_skb_classify_flow(rxskb, &fl);
/* sk = NULL, but it is safe for now. RST socket required. */
- if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) {
- if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) {
- skb_dst_set(skb, dst);
- ip6_xmit(ctl_sk, skb, &fl, NULL);
- DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
- DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
- return;
- }
+ dst = ip6_dst_lookup_flow(ctl_sk, &fl, NULL, false);
+ if (!IS_ERR(dst)) {
+ skb_dst_set(skb, dst);
+ ip6_xmit(ctl_sk, skb, &fl, NULL);
+ DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
+ DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
+ return;
}
kfree_skb(skb);
@@ -484,7 +473,6 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
struct inet6_request_sock *ireq6 = inet6_rsk(req);
struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
struct inet_sock *newinet;
- struct dccp_sock *newdp;
struct dccp6_sock *newdp6;
struct sock *newsk;
struct ipv6_txoptions *opt;
@@ -498,7 +486,6 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
return NULL;
newdp6 = (struct dccp6_sock *)newsk;
- newdp = dccp_sk(newsk);
newinet = inet_sk(newsk);
newinet->pinet6 = &newdp6->inet6;
newnp = inet6_sk(newsk);
@@ -552,13 +539,8 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
fl.fl_ip_sport = inet_rsk(req)->loc_port;
security_sk_classify_flow(sk, &fl);
- if (ip6_dst_lookup(sk, &dst, &fl))
- goto out;
-
- if (final_p)
- ipv6_addr_copy(&fl.fl6_dst, final_p);
-
- if ((xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
+ dst = ip6_dst_lookup_flow(sk, &fl, final_p, false);
+ if (IS_ERR(dst))
goto out;
}
@@ -578,7 +560,6 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
newdp6 = (struct dccp6_sock *)newsk;
newinet = inet_sk(newsk);
newinet->pinet6 = &newdp6->inet6;
- newdp = dccp_sk(newsk);
newnp = inet6_sk(newsk);
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
@@ -982,19 +963,10 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
final_p = fl6_update_dst(&fl, np->opt, &final);
- err = ip6_dst_lookup(sk, &dst, &fl);
- if (err)
+ dst = ip6_dst_lookup_flow(sk, &fl, final_p, true);
+ if (IS_ERR(dst)) {
+ err = PTR_ERR(dst);
goto failure;
-
- if (final_p)
- ipv6_addr_copy(&fl.fl6_dst, final_p);
-
- err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
- if (err < 0) {
- if (err == -EREMOTE)
- err = ip6_dst_blackhole(sk, &dst, &fl);
- if (err < 0)
- goto failure;
}
if (saddr == NULL) {
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 5e636365d33c..0877147d2167 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -112,6 +112,7 @@ static int dn_dst_gc(struct dst_ops *ops);
static struct dst_entry *dn_dst_check(struct dst_entry *, __u32);
static unsigned int dn_dst_default_advmss(const struct dst_entry *dst);
static unsigned int dn_dst_default_mtu(const struct dst_entry *dst);
+static void dn_dst_destroy(struct dst_entry *);
static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
static void dn_dst_link_failure(struct sk_buff *);
static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu);
@@ -133,11 +134,18 @@ static struct dst_ops dn_dst_ops = {
.check = dn_dst_check,
.default_advmss = dn_dst_default_advmss,
.default_mtu = dn_dst_default_mtu,
+ .cow_metrics = dst_cow_metrics_generic,
+ .destroy = dn_dst_destroy,
.negative_advice = dn_dst_negative_advice,
.link_failure = dn_dst_link_failure,
.update_pmtu = dn_dst_update_pmtu,
};
+static void dn_dst_destroy(struct dst_entry *dst)
+{
+ dst_destroy_metrics_generic(dst);
+}
+
static __inline__ unsigned dn_hash(__le16 src, __le16 dst)
{
__u16 tmp = (__u16 __force)(src ^ dst);
@@ -814,14 +822,14 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
{
struct dn_fib_info *fi = res->fi;
struct net_device *dev = rt->dst.dev;
+ unsigned int mss_metric;
struct neighbour *n;
- unsigned int metric;
if (fi) {
if (DN_FIB_RES_GW(*res) &&
DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
rt->rt_gateway = DN_FIB_RES_GW(*res);
- dst_import_metrics(&rt->dst, fi->fib_metrics);
+ dst_init_metrics(&rt->dst, fi->fib_metrics, true);
}
rt->rt_type = res->type;
@@ -834,10 +842,10 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
if (dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu)
dst_metric_set(&rt->dst, RTAX_MTU, rt->dst.dev->mtu);
- metric = dst_metric_raw(&rt->dst, RTAX_ADVMSS);
- if (metric) {
+ mss_metric = dst_metric_raw(&rt->dst, RTAX_ADVMSS);
+ if (mss_metric) {
unsigned int mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst));
- if (metric > mss)
+ if (mss_metric > mss)
dst_metric_set(&rt->dst, RTAX_ADVMSS, mss);
}
return 0;
@@ -1114,7 +1122,7 @@ make_route:
if (dev_out->flags & IFF_LOOPBACK)
flags |= RTCF_LOCAL;
- rt = dst_alloc(&dn_dst_ops);
+ rt = dst_alloc(&dn_dst_ops, 0);
if (rt == NULL)
goto e_nobufs;
@@ -1225,8 +1233,9 @@ int dn_route_output_sock(struct dst_entry **pprt, struct flowi *fl, struct sock
err = __dn_route_output_key(pprt, fl, flags & MSG_TRYHARD);
if (err == 0 && fl->proto) {
- err = xfrm_lookup(&init_net, pprt, fl, sk,
- (flags & MSG_DONTWAIT) ? 0 : XFRM_LOOKUP_WAIT);
+ if (!(flags & MSG_DONTWAIT))
+ fl->flags |= FLOWI_FLAG_CAN_SLEEP;
+ err = xfrm_lookup(&init_net, pprt, fl, sk, 0);
}
return err;
}
@@ -1375,7 +1384,7 @@ static int dn_route_input_slow(struct sk_buff *skb)
}
make_route:
- rt = dst_alloc(&dn_dst_ops);
+ rt = dst_alloc(&dn_dst_ops, 0);
if (rt == NULL)
goto e_nobufs;
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index f2abd3755690..b66600b3f4b5 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -59,7 +59,6 @@ struct dn_hash
};
#define dz_key_0(key) ((key).datum = 0)
-#define dz_prefix(key,dz) ((key).datum)
#define for_nexthops(fi) { int nhsel; const struct dn_fib_nh *nh;\
for(nhsel = 0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++)
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index a5a1050595d1..cbb505ba9324 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -55,45 +55,9 @@ config IP_ADVANCED_ROUTER
If unsure, say N here.
-choice
- prompt "Choose IP: FIB lookup algorithm (choose FIB_HASH if unsure)"
- depends on IP_ADVANCED_ROUTER
- default ASK_IP_FIB_HASH
-
-config ASK_IP_FIB_HASH
- bool "FIB_HASH"
- ---help---
- Current FIB is very proven and good enough for most users.
-
-config IP_FIB_TRIE
- bool "FIB_TRIE"
- ---help---
- Use new experimental LC-trie as FIB lookup algorithm.
- This improves lookup performance if you have a large
- number of routes.
-
- LC-trie is a longest matching prefix lookup algorithm which
- performs better than FIB_HASH for large routing tables.
- But, it consumes more memory and is more complex.
-
- LC-trie is described in:
-
- IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
- IEEE Journal on Selected Areas in Communications, 17(6):1083-1092,
- June 1999
-
- An experimental study of compression methods for dynamic tries
- Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
- <http://www.csc.kth.se/~snilsson/software/dyntrie2/>
-
-endchoice
-
-config IP_FIB_HASH
- def_bool ASK_IP_FIB_HASH || !IP_ADVANCED_ROUTER
-
config IP_FIB_TRIE_STATS
bool "FIB TRIE statistics"
- depends on IP_FIB_TRIE
+ depends on IP_ADVANCED_ROUTER
---help---
Keep track of statistics on structure of FIB TRIE table.
Useful for testing and measuring TRIE performance.
@@ -140,6 +104,9 @@ config IP_ROUTE_VERBOSE
handled by the klogd daemon which is responsible for kernel messages
("man klogd").
+config IP_ROUTE_CLASSID
+ bool
+
config IP_PNP
bool "IP: kernel level autoconfiguration"
help
@@ -657,4 +624,3 @@ config TCP_MD5SIG
on the Internet.
If unsure, say N.
-
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index 4978d22f9a75..0dc772d0d125 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -10,12 +10,10 @@ obj-y := route.o inetpeer.o protocol.o \
tcp_minisocks.o tcp_cong.o \
datagram.o raw.o udp.o udplite.o \
arp.o icmp.o devinet.o af_inet.o igmp.o \
- fib_frontend.o fib_semantics.o \
+ fib_frontend.o fib_semantics.o fib_trie.o \
inet_fragment.o
obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o
-obj-$(CONFIG_IP_FIB_HASH) += fib_hash.o
-obj-$(CONFIG_IP_FIB_TRIE) += fib_trie.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o
obj-$(CONFIG_IP_MROUTE) += ipmr.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 45b89d7bda5a..44513bb8ac2e 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1115,7 +1115,7 @@ static int inet_sk_reselect_saddr(struct sock *sk)
RT_CONN_FLAGS(sk),
sk->sk_bound_dev_if,
sk->sk_protocol,
- inet->inet_sport, inet->inet_dport, sk, 0);
+ inet->inet_sport, inet->inet_dport, sk, false);
if (err)
return err;
@@ -1174,7 +1174,7 @@ int inet_sk_rebuild_header(struct sock *sk)
};
security_sk_classify_flow(sk, &fl);
- err = ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0);
+ err = ip_route_output_flow(sock_net(sk), &rt, &fl, sk);
}
if (!err)
sk_setup_caps(sk, &rt->dst);
@@ -1231,7 +1231,7 @@ out:
return err;
}
-static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
+static struct sk_buff *inet_gso_segment(struct sk_buff *skb, u32 features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct iphdr *iph;
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 86961bec70ab..325053df6e70 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -201,7 +201,10 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
top_iph->ttl = 0;
top_iph->check = 0;
- ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
+ if (x->props.flags & XFRM_STATE_ALIGN4)
+ ah->hdrlen = (XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
+ else
+ ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
ah->reserved = 0;
ah->spi = x->id.spi;
@@ -299,9 +302,15 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
nexthdr = ah->nexthdr;
ah_hlen = (ah->hdrlen + 2) << 2;
- if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) &&
- ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len))
- goto out;
+ if (x->props.flags & XFRM_STATE_ALIGN4) {
+ if (ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_full_len) &&
+ ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len))
+ goto out;
+ } else {
+ if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) &&
+ ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len))
+ goto out;
+ }
if (!pskb_may_pull(skb, ah_hlen))
goto out;
@@ -450,8 +459,12 @@ static int ah_init_state(struct xfrm_state *x)
BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN);
- x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
- ahp->icv_trunc_len);
+ if (x->props.flags & XFRM_STATE_ALIGN4)
+ x->props.header_len = XFRM_ALIGN4(sizeof(struct ip_auth_hdr) +
+ ahp->icv_trunc_len);
+ else
+ x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
+ ahp->icv_trunc_len);
if (x->props.mode == XFRM_MODE_TUNNEL)
x->props.header_len += sizeof(struct iphdr);
x->data = ahp;
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 174be6caa5c8..eaee1edd2dd7 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -49,7 +49,7 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
err = ip_route_connect(&rt, usin->sin_addr.s_addr, saddr,
RT_CONN_FLAGS(sk), oif,
sk->sk_protocol,
- inet->inet_sport, usin->sin_port, sk, 1);
+ inet->inet_sport, usin->sin_port, sk, true);
if (err) {
if (err == -ENETUNREACH)
IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index df4616fce929..90389281d97a 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -51,6 +51,7 @@
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/slab.h>
+#include <linux/hash.h>
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif
@@ -92,6 +93,71 @@ static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
[IFA_LABEL] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
};
+/* inet_addr_hash's shifting is dependent upon this IN4_ADDR_HSIZE
+ * value. So if you change this define, make appropriate changes to
+ * inet_addr_hash as well.
+ */
+#define IN4_ADDR_HSIZE 256
+static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
+static DEFINE_SPINLOCK(inet_addr_hash_lock);
+
+static inline unsigned int inet_addr_hash(struct net *net, __be32 addr)
+{
+ u32 val = (__force u32) addr ^ hash_ptr(net, 8);
+
+ return ((val ^ (val >> 8) ^ (val >> 16) ^ (val >> 24)) &
+ (IN4_ADDR_HSIZE - 1));
+}
+
+static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
+{
+ unsigned int hash = inet_addr_hash(net, ifa->ifa_address);
+
+ spin_lock(&inet_addr_hash_lock);
+ hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]);
+ spin_unlock(&inet_addr_hash_lock);
+}
+
+static void inet_hash_remove(struct in_ifaddr *ifa)
+{
+ spin_lock(&inet_addr_hash_lock);
+ hlist_del_init_rcu(&ifa->hash);
+ spin_unlock(&inet_addr_hash_lock);
+}
+
+/**
+ * __ip_dev_find - find the first device with a given source address.
+ * @net: the net namespace
+ * @addr: the source address
+ * @devref: if true, take a reference on the found device
+ *
+ * If a caller uses devref=false, it should be protected by RCU, or RTNL
+ */
+struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
+{
+ unsigned int hash = inet_addr_hash(net, addr);
+ struct net_device *result = NULL;
+ struct in_ifaddr *ifa;
+ struct hlist_node *node;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(ifa, node, &inet_addr_lst[hash], hash) {
+ struct net_device *dev = ifa->ifa_dev->dev;
+
+ if (!net_eq(dev_net(dev), net))
+ continue;
+ if (ifa->ifa_address == addr) {
+ result = dev;
+ break;
+ }
+ }
+ if (result && devref)
+ dev_hold(result);
+ rcu_read_unlock();
+ return result;
+}
+EXPORT_SYMBOL(__ip_dev_find);
+
static void rtmsg_ifa(int event, struct in_ifaddr *, struct nlmsghdr *, u32);
static BLOCKING_NOTIFIER_HEAD(inetaddr_chain);
@@ -265,6 +331,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
}
if (!do_promote) {
+ inet_hash_remove(ifa);
*ifap1 = ifa->ifa_next;
rtmsg_ifa(RTM_DELADDR, ifa, nlh, pid);
@@ -281,6 +348,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
/* 2. Unlink it */
*ifap = ifa1->ifa_next;
+ inet_hash_remove(ifa1);
/* 3. Announce address deletion */
@@ -368,6 +436,8 @@ static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
ifa->ifa_next = *ifap;
*ifap = ifa;
+ inet_hash_insert(dev_net(in_dev->dev), ifa);
+
/* Send message first, then call notifier.
Notifier will trigger FIB update, so that
listeners of netlink will know about new ifaddr */
@@ -521,6 +591,7 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh)
if (tb[IFA_ADDRESS] == NULL)
tb[IFA_ADDRESS] = tb[IFA_LOCAL];
+ INIT_HLIST_NODE(&ifa->hash);
ifa->ifa_prefixlen = ifm->ifa_prefixlen;
ifa->ifa_mask = inet_make_mask(ifm->ifa_prefixlen);
ifa->ifa_flags = ifm->ifa_flags;
@@ -728,6 +799,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
if (!ifa) {
ret = -ENOBUFS;
ifa = inet_alloc_ifa();
+ INIT_HLIST_NODE(&ifa->hash);
if (!ifa)
break;
if (colon)
@@ -1084,6 +1156,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
struct in_ifaddr *ifa = inet_alloc_ifa();
if (ifa) {
+ INIT_HLIST_NODE(&ifa->hash);
ifa->ifa_local =
ifa->ifa_address = htonl(INADDR_LOOPBACK);
ifa->ifa_prefixlen = 8;
@@ -1720,6 +1793,11 @@ static struct rtnl_af_ops inet_af_ops = {
void __init devinet_init(void)
{
+ int i;
+
+ for (i = 0; i < IN4_ADDR_HSIZE; i++)
+ INIT_HLIST_HEAD(&inet_addr_lst[i]);
+
register_pernet_subsys(&devinet_ops);
register_gifconf(PF_INET, inet_gifconf);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 1d2cdd43a878..ad0778a3fa53 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -51,11 +51,11 @@ static int __net_init fib4_rules_init(struct net *net)
{
struct fib_table *local_table, *main_table;
- local_table = fib_hash_table(RT_TABLE_LOCAL);
+ local_table = fib_trie_table(RT_TABLE_LOCAL);
if (local_table == NULL)
return -ENOMEM;
- main_table = fib_hash_table(RT_TABLE_MAIN);
+ main_table = fib_trie_table(RT_TABLE_MAIN);
if (main_table == NULL)
goto fail;
@@ -82,7 +82,7 @@ struct fib_table *fib_new_table(struct net *net, u32 id)
if (tb)
return tb;
- tb = fib_hash_table(id);
+ tb = fib_trie_table(id);
if (!tb)
return NULL;
h = id & (FIB_TABLE_HASHSZ - 1);
@@ -114,21 +114,6 @@ struct fib_table *fib_get_table(struct net *net, u32 id)
}
#endif /* CONFIG_IP_MULTIPLE_TABLES */
-void fib_select_default(struct net *net,
- const struct flowi *flp, struct fib_result *res)
-{
- struct fib_table *tb;
- int table = RT_TABLE_MAIN;
-#ifdef CONFIG_IP_MULTIPLE_TABLES
- if (res->r == NULL || res->r->action != FR_ACT_TO_TBL)
- return;
- table = res->r->table;
-#endif
- tb = fib_get_table(net, table);
- if (FIB_RES_GW(*res) && FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
- fib_table_select_default(tb, flp, res);
-}
-
static void fib_flush(struct net *net)
{
int flushed = 0;
@@ -147,46 +132,6 @@ static void fib_flush(struct net *net)
rt_cache_flush(net, -1);
}
-/**
- * __ip_dev_find - find the first device with a given source address.
- * @net: the net namespace
- * @addr: the source address
- * @devref: if true, take a reference on the found device
- *
- * If a caller uses devref=false, it should be protected by RCU, or RTNL
- */
-struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
-{
- struct flowi fl = {
- .fl4_dst = addr,
- };
- struct fib_result res = { 0 };
- struct net_device *dev = NULL;
- struct fib_table *local_table;
-
-#ifdef CONFIG_IP_MULTIPLE_TABLES
- res.r = NULL;
-#endif
-
- rcu_read_lock();
- local_table = fib_get_table(net, RT_TABLE_LOCAL);
- if (!local_table ||
- fib_table_lookup(local_table, &fl, &res, FIB_LOOKUP_NOREF)) {
- rcu_read_unlock();
- return NULL;
- }
- if (res.type != RTN_LOCAL)
- goto out;
- dev = FIB_RES_DEV(res);
-
- if (dev && devref)
- dev_hold(dev);
-out:
- rcu_read_unlock();
- return dev;
-}
-EXPORT_SYMBOL(__ip_dev_find);
-
/*
* Find address type as if only "dev" was present in the system. If
* on_dev is NULL then all interfaces are taken into consideration.
@@ -1101,5 +1046,5 @@ void __init ip_fib_init(void)
register_netdevice_notifier(&fib_netdev_notifier);
register_inetaddr_notifier(&fib_inetaddr_notifier);
- fib_hash_init();
+ fib_trie_init();
}
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
deleted file mode 100644
index b3acb0417b21..000000000000
--- a/net/ipv4/fib_hash.c
+++ /dev/null
@@ -1,1133 +0,0 @@
-/*
- * INET An implementation of the TCP/IP protocol suite for the LINUX
- * operating system. INET is implemented using the BSD Socket
- * interface as the means of communication with the user level.
- *
- * IPv4 FIB: lookup engine and maintenance routines.
- *
- * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <linux/bitops.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/errno.h>
-#include <linux/in.h>
-#include <linux/inet.h>
-#include <linux/inetdevice.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/proc_fs.h>
-#include <linux/skbuff.h>
-#include <linux/netlink.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-
-#include <net/net_namespace.h>
-#include <net/ip.h>
-#include <net/protocol.h>
-#include <net/route.h>
-#include <net/tcp.h>
-#include <net/sock.h>
-#include <net/ip_fib.h>
-
-#include "fib_lookup.h"
-
-static struct kmem_cache *fn_hash_kmem __read_mostly;
-static struct kmem_cache *fn_alias_kmem __read_mostly;
-
-struct fib_node {
- struct hlist_node fn_hash;
- struct list_head fn_alias;
- __be32 fn_key;
- struct fib_alias fn_embedded_alias;
-};
-
-#define EMBEDDED_HASH_SIZE (L1_CACHE_BYTES / sizeof(struct hlist_head))
-
-struct fn_zone {
- struct fn_zone __rcu *fz_next; /* Next not empty zone */
- struct hlist_head __rcu *fz_hash; /* Hash table pointer */
- seqlock_t fz_lock;
- u32 fz_hashmask; /* (fz_divisor - 1) */
-
- u8 fz_order; /* Zone order (0..32) */
- u8 fz_revorder; /* 32 - fz_order */
- __be32 fz_mask; /* inet_make_mask(order) */
-#define FZ_MASK(fz) ((fz)->fz_mask)
-
- struct hlist_head fz_embedded_hash[EMBEDDED_HASH_SIZE];
-
- int fz_nent; /* Number of entries */
- int fz_divisor; /* Hash size (mask+1) */
-};
-
-struct fn_hash {
- struct fn_zone *fn_zones[33];
- struct fn_zone __rcu *fn_zone_list;
-};
-
-static inline u32 fn_hash(__be32 key, struct fn_zone *fz)
-{
- u32 h = ntohl(key) >> fz->fz_revorder;
- h ^= (h>>20);
- h ^= (h>>10);
- h ^= (h>>5);
- h &= fz->fz_hashmask;
- return h;
-}
-
-static inline __be32 fz_key(__be32 dst, struct fn_zone *fz)
-{
- return dst & FZ_MASK(fz);
-}
-
-static unsigned int fib_hash_genid;
-
-#define FZ_MAX_DIVISOR ((PAGE_SIZE<<MAX_ORDER) / sizeof(struct hlist_head))
-
-static struct hlist_head *fz_hash_alloc(int divisor)
-{
- unsigned long size = divisor * sizeof(struct hlist_head);
-
- if (size <= PAGE_SIZE)
- return kzalloc(size, GFP_KERNEL);
-
- return (struct hlist_head *)
- __get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(size));
-}
-
-/* The fib hash lock must be held when this is called. */
-static inline void fn_rebuild_zone(struct fn_zone *fz,
- struct hlist_head *old_ht,
- int old_divisor)
-{
- int i;
-
- for (i = 0; i < old_divisor; i++) {
- struct hlist_node *node, *n;
- struct fib_node *f;
-
- hlist_for_each_entry_safe(f, node, n, &old_ht[i], fn_hash) {
- struct hlist_head *new_head;
-
- hlist_del_rcu(&f->fn_hash);
-
- new_head = rcu_dereference_protected(fz->fz_hash, 1) +
- fn_hash(f->fn_key, fz);
- hlist_add_head_rcu(&f->fn_hash, new_head);
- }
- }
-}
-
-static void fz_hash_free(struct hlist_head *hash, int divisor)
-{
- unsigned long size = divisor * sizeof(struct hlist_head);
-
- if (size <= PAGE_SIZE)
- kfree(hash);
- else
- free_pages((unsigned long)hash, get_order(size));
-}
-
-static void fn_rehash_zone(struct fn_zone *fz)
-{
- struct hlist_head *ht, *old_ht;
- int old_divisor, new_divisor;
- u32 new_hashmask;
-
- new_divisor = old_divisor = fz->fz_divisor;
-
- switch (old_divisor) {
- case EMBEDDED_HASH_SIZE:
- new_divisor *= EMBEDDED_HASH_SIZE;
- break;
- case EMBEDDED_HASH_SIZE*EMBEDDED_HASH_SIZE:
- new_divisor *= (EMBEDDED_HASH_SIZE/2);
- break;
- default:
- if ((old_divisor << 1) > FZ_MAX_DIVISOR) {
- printk(KERN_CRIT "route.c: bad divisor %d!\n", old_divisor);
- return;
- }
- new_divisor = (old_divisor << 1);
- break;
- }
-
- new_hashmask = (new_divisor - 1);
-
-#if RT_CACHE_DEBUG >= 2
- printk(KERN_DEBUG "fn_rehash_zone: hash for zone %d grows from %d\n",
- fz->fz_order, old_divisor);
-#endif
-
- ht = fz_hash_alloc(new_divisor);
-
- if (ht) {
- struct fn_zone nfz;
-
- memcpy(&nfz, fz, sizeof(nfz));
-
- write_seqlock_bh(&fz->fz_lock);
- old_ht = rcu_dereference_protected(fz->fz_hash, 1);
- RCU_INIT_POINTER(nfz.fz_hash, ht);
- nfz.fz_hashmask = new_hashmask;
- nfz.fz_divisor = new_divisor;
- fn_rebuild_zone(&nfz, old_ht, old_divisor);
- fib_hash_genid++;
- rcu_assign_pointer(fz->fz_hash, ht);
- fz->fz_hashmask = new_hashmask;
- fz->fz_divisor = new_divisor;
- write_sequnlock_bh(&fz->fz_lock);
-
- if (old_ht != fz->fz_embedded_hash) {
- synchronize_rcu();
- fz_hash_free(old_ht, old_divisor);
- }
- }
-}
-
-static void fn_free_node_rcu(struct rcu_head *head)
-{
- struct fib_node *f = container_of(head, struct fib_node, fn_embedded_alias.rcu);
-
- kmem_cache_free(fn_hash_kmem, f);
-}
-
-static inline void fn_free_node(struct fib_node *f)
-{
- call_rcu(&f->fn_embedded_alias.rcu, fn_free_node_rcu);
-}
-
-static void fn_free_alias_rcu(struct rcu_head *head)
-{
- struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
-
- kmem_cache_free(fn_alias_kmem, fa);
-}
-
-static inline void fn_free_alias(struct fib_alias *fa, struct fib_node *f)
-{
- fib_release_info(fa->fa_info);
- if (fa == &f->fn_embedded_alias)
- fa->fa_info = NULL;
- else
- call_rcu(&fa->rcu, fn_free_alias_rcu);
-}
-
-static struct fn_zone *
-fn_new_zone(struct fn_hash *table, int z)
-{
- int i;
- struct fn_zone *fz = kzalloc(sizeof(struct fn_zone), GFP_KERNEL);
- if (!fz)
- return NULL;
-
- seqlock_init(&fz->fz_lock);
- fz->fz_divisor = z ? EMBEDDED_HASH_SIZE : 1;
- fz->fz_hashmask = fz->fz_divisor - 1;
- RCU_INIT_POINTER(fz->fz_hash, fz->fz_embedded_hash);
- fz->fz_order = z;
- fz->fz_revorder = 32 - z;
- fz->fz_mask = inet_make_mask(z);
-
- /* Find the first not empty zone with more specific mask */
- for (i = z + 1; i <= 32; i++)
- if (table->fn_zones[i])
- break;
- if (i > 32) {
- /* No more specific masks, we are the first. */
- rcu_assign_pointer(fz->fz_next,
- rtnl_dereference(table->fn_zone_list));
- rcu_assign_pointer(table->fn_zone_list, fz);
- } else {
- rcu_assign_pointer(fz->fz_next,
- rtnl_dereference(table->fn_zones[i]->fz_next));
- rcu_assign_pointer(table->fn_zones[i]->fz_next, fz);
- }
- table->fn_zones[z] = fz;
- fib_hash_genid++;
- return fz;
-}
-
-int fib_table_lookup(struct fib_table *tb,
- const struct flowi *flp, struct fib_result *res,
- int fib_flags)
-{
- int err;
- struct fn_zone *fz;
- struct fn_hash *t = (struct fn_hash *)tb->tb_data;
-
- rcu_read_lock();
- for (fz = rcu_dereference(t->fn_zone_list);
- fz != NULL;
- fz = rcu_dereference(fz->fz_next)) {
- struct hlist_head *head;
- struct hlist_node *node;
- struct fib_node *f;
- __be32 k;
- unsigned int seq;
-
- do {
- seq = read_seqbegin(&fz->fz_lock);
- k = fz_key(flp->fl4_dst, fz);
-
- head = rcu_dereference(fz->fz_hash) + fn_hash(k, fz);
- hlist_for_each_entry_rcu(f, node, head, fn_hash) {
- if (f->fn_key != k)
- continue;
-
- err = fib_semantic_match(&f->fn_alias,
- flp, res,
- fz->fz_order, fib_flags);
- if (err <= 0)
- goto out;
- }
- } while (read_seqretry(&fz->fz_lock, seq));
- }
- err = 1;
-out:
- rcu_read_unlock();
- return err;
-}
-
-void fib_table_select_default(struct fib_table *tb,
- const struct flowi *flp, struct fib_result *res)
-{
- int order, last_idx;
- struct hlist_node *node;
- struct fib_node *f;
- struct fib_info *fi = NULL;
- struct fib_info *last_resort;
- struct fn_hash *t = (struct fn_hash *)tb->tb_data;
- struct fn_zone *fz = t->fn_zones[0];
- struct hlist_head *head;
-
- if (fz == NULL)
- return;
-
- last_idx = -1;
- last_resort = NULL;
- order = -1;
-
- rcu_read_lock();
- head = rcu_dereference(fz->fz_hash);
- hlist_for_each_entry_rcu(f, node, head, fn_hash) {
- struct fib_alias *fa;
-
- list_for_each_entry_rcu(fa, &f->fn_alias, fa_list) {
- struct fib_info *next_fi = fa->fa_info;
-
- if (fa->fa_scope != res->scope ||
- fa->fa_type != RTN_UNICAST)
- continue;
-
- if (next_fi->fib_priority > res->fi->fib_priority)
- break;
- if (!next_fi->fib_nh[0].nh_gw ||
- next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
- continue;
-
- fib_alias_accessed(fa);
-
- if (fi == NULL) {
- if (next_fi != res->fi)
- break;
- } else if (!fib_detect_death(fi, order, &last_resort,
- &last_idx, tb->tb_default)) {
- fib_result_assign(res, fi);
- tb->tb_default = order;
- goto out;
- }
- fi = next_fi;
- order++;
- }
- }
-
- if (order <= 0 || fi == NULL) {
- tb->tb_default = -1;
- goto out;
- }
-
- if (!fib_detect_death(fi, order, &last_resort, &last_idx,
- tb->tb_default)) {
- fib_result_assign(res, fi);
- tb->tb_default = order;
- goto out;
- }
-
- if (last_idx >= 0)
- fib_result_assign(res, last_resort);
- tb->tb_default = last_idx;
-out:
- rcu_read_unlock();
-}
-
-/* Insert node F to FZ. */
-static inline void fib_insert_node(struct fn_zone *fz, struct fib_node *f)
-{
- struct hlist_head *head = rtnl_dereference(fz->fz_hash) + fn_hash(f->fn_key, fz);
-
- hlist_add_head_rcu(&f->fn_hash, head);
-}
-
-/* Return the node in FZ matching KEY. */
-static struct fib_node *fib_find_node(struct fn_zone *fz, __be32 key)
-{
- struct hlist_head *head = rtnl_dereference(fz->fz_hash) + fn_hash(key, fz);
- struct hlist_node *node;
- struct fib_node *f;
-
- hlist_for_each_entry_rcu(f, node, head, fn_hash) {
- if (f->fn_key == key)
- return f;
- }
-
- return NULL;
-}
-
-
-static struct fib_alias *fib_fast_alloc(struct fib_node *f)
-{
- struct fib_alias *fa = &f->fn_embedded_alias;
-
- if (fa->fa_info != NULL)
- fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
- return fa;
-}
-
-/* Caller must hold RTNL. */
-int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
-{
- struct fn_hash *table = (struct fn_hash *) tb->tb_data;
- struct fib_node *new_f = NULL;
- struct fib_node *f;
- struct fib_alias *fa, *new_fa;
- struct fn_zone *fz;
- struct fib_info *fi;
- u8 tos = cfg->fc_tos;
- __be32 key;
- int err;
-
- if (cfg->fc_dst_len > 32)
- return -EINVAL;
-
- fz = table->fn_zones[cfg->fc_dst_len];
- if (!fz && !(fz = fn_new_zone(table, cfg->fc_dst_len)))
- return -ENOBUFS;
-
- key = 0;
- if (cfg->fc_dst) {
- if (cfg->fc_dst & ~FZ_MASK(fz))
- return -EINVAL;
- key = fz_key(cfg->fc_dst, fz);
- }
-
- fi = fib_create_info(cfg);
- if (IS_ERR(fi))
- return PTR_ERR(fi);
-
- if (fz->fz_nent > (fz->fz_divisor<<1) &&
- fz->fz_divisor < FZ_MAX_DIVISOR &&
- (cfg->fc_dst_len == 32 ||
- (1 << cfg->fc_dst_len) > fz->fz_divisor))
- fn_rehash_zone(fz);
-
- f = fib_find_node(fz, key);
-
- if (!f)
- fa = NULL;
- else
- fa = fib_find_alias(&f->fn_alias, tos, fi->fib_priority);
-
- /* Now fa, if non-NULL, points to the first fib alias
- * with the same keys [prefix,tos,priority], if such key already
- * exists or to the node before which we will insert new one.
- *
- * If fa is NULL, we will need to allocate a new one and
- * insert to the head of f.
- *
- * If f is NULL, no fib node matched the destination key
- * and we need to allocate a new one of those as well.
- */
-
- if (fa && fa->fa_tos == tos &&
- fa->fa_info->fib_priority == fi->fib_priority) {
- struct fib_alias *fa_first, *fa_match;
-
- err = -EEXIST;
- if (cfg->fc_nlflags & NLM_F_EXCL)
- goto out;
-
- /* We have 2 goals:
- * 1. Find exact match for type, scope, fib_info to avoid
- * duplicate routes
- * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it
- */
- fa_match = NULL;
- fa_first = fa;
- fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
- list_for_each_entry_continue(fa, &f->fn_alias, fa_list) {
- if (fa->fa_tos != tos)
- break;
- if (fa->fa_info->fib_priority != fi->fib_priority)
- break;
- if (fa->fa_type == cfg->fc_type &&
- fa->fa_scope == cfg->fc_scope &&
- fa->fa_info == fi) {
- fa_match = fa;
- break;
- }
- }
-
- if (cfg->fc_nlflags & NLM_F_REPLACE) {
- u8 state;
-
- fa = fa_first;
- if (fa_match) {
- if (fa == fa_match)
- err = 0;
- goto out;
- }
- err = -ENOBUFS;
- new_fa = fib_fast_alloc(f);
- if (new_fa == NULL)
- goto out;
-
- new_fa->fa_tos = fa->fa_tos;
- new_fa->fa_info = fi;
- new_fa->fa_type = cfg->fc_type;
- new_fa->fa_scope = cfg->fc_scope;
- state = fa->fa_state;
- new_fa->fa_state = state & ~FA_S_ACCESSED;
- fib_hash_genid++;
- list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
-
- fn_free_alias(fa, f);
- if (state & FA_S_ACCESSED)
- rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
- rtmsg_fib(RTM_NEWROUTE, key, new_fa, cfg->fc_dst_len,
- tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
- return 0;
- }
-
- /* Error if we find a perfect match which
- * uses the same scope, type, and nexthop
- * information.
- */
- if (fa_match)
- goto out;
-
- if (!(cfg->fc_nlflags & NLM_F_APPEND))
- fa = fa_first;
- }
-
- err = -ENOENT;
- if (!(cfg->fc_nlflags & NLM_F_CREATE))
- goto out;
-
- err = -ENOBUFS;
-
- if (!f) {
- new_f = kmem_cache_zalloc(fn_hash_kmem, GFP_KERNEL);
- if (new_f == NULL)
- goto out;
-
- INIT_HLIST_NODE(&new_f->fn_hash);
- INIT_LIST_HEAD(&new_f->fn_alias);
- new_f->fn_key = key;
- f = new_f;
- }
-
- new_fa = fib_fast_alloc(f);
- if (new_fa == NULL)
- goto out;
-
- new_fa->fa_info = fi;
- new_fa->fa_tos = tos;
- new_fa->fa_type = cfg->fc_type;
- new_fa->fa_scope = cfg->fc_scope;
- new_fa->fa_state = 0;
-
- /*
- * Insert new entry to the list.
- */
-
- if (new_f)
- fib_insert_node(fz, new_f);
- list_add_tail_rcu(&new_fa->fa_list,
- (fa ? &fa->fa_list : &f->fn_alias));
- fib_hash_genid++;
-
- if (new_f)
- fz->fz_nent++;
- rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
-
- rtmsg_fib(RTM_NEWROUTE, key, new_fa, cfg->fc_dst_len, tb->tb_id,
- &cfg->fc_nlinfo, 0);
- return 0;
-
-out:
- if (new_f)
- kmem_cache_free(fn_hash_kmem, new_f);
- fib_release_info(fi);
- return err;
-}
-
-int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
-{
- struct fn_hash *table = (struct fn_hash *)tb->tb_data;
- struct fib_node *f;
- struct fib_alias *fa, *fa_to_delete;
- struct fn_zone *fz;
- __be32 key;
-
- if (cfg->fc_dst_len > 32)
- return -EINVAL;
-
- if ((fz = table->fn_zones[cfg->fc_dst_len]) == NULL)
- return -ESRCH;
-
- key = 0;
- if (cfg->fc_dst) {
- if (cfg->fc_dst & ~FZ_MASK(fz))
- return -EINVAL;
- key = fz_key(cfg->fc_dst, fz);
- }
-
- f = fib_find_node(fz, key);
-
- if (!f)
- fa = NULL;
- else
- fa = fib_find_alias(&f->fn_alias, cfg->fc_tos, 0);
- if (!fa)
- return -ESRCH;
-
- fa_to_delete = NULL;
- fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
- list_for_each_entry_continue(fa, &f->fn_alias, fa_list) {
- struct fib_info *fi = fa->fa_info;
-
- if (fa->fa_tos != cfg->fc_tos)
- break;
-
- if ((!cfg->fc_type ||
- fa->fa_type == cfg->fc_type) &&
- (cfg->fc_scope == RT_SCOPE_NOWHERE ||
- fa->fa_scope == cfg->fc_scope) &&
- (!cfg->fc_protocol ||
- fi->fib_protocol == cfg->fc_protocol) &&
- fib_nh_match(cfg, fi) == 0) {
- fa_to_delete = fa;
- break;
- }
- }
-
- if (fa_to_delete) {
- int kill_fn;
-
- fa = fa_to_delete;
- rtmsg_fib(RTM_DELROUTE, key, fa, cfg->fc_dst_len,
- tb->tb_id, &cfg->fc_nlinfo, 0);
-
- kill_fn = 0;
- list_del_rcu(&fa->fa_list);
- if (list_empty(&f->fn_alias)) {
- hlist_del_rcu(&f->fn_hash);
- kill_fn = 1;
- }
- fib_hash_genid++;
-
- if (fa->fa_state & FA_S_ACCESSED)
- rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
- fn_free_alias(fa, f);
- if (kill_fn) {
- fn_free_node(f);
- fz->fz_nent--;
- }
-
- return 0;
- }
- return -ESRCH;
-}
-
-static int fn_flush_list(struct fn_zone *fz, int idx)
-{
- struct hlist_head *head = rtnl_dereference(fz->fz_hash) + idx;
- struct hlist_node *node, *n;
- struct fib_node *f;
- int found = 0;
-
- hlist_for_each_entry_safe(f, node, n, head, fn_hash) {
- struct fib_alias *fa, *fa_node;
- int kill_f;
-
- kill_f = 0;
- list_for_each_entry_safe(fa, fa_node, &f->fn_alias, fa_list) {
- struct fib_info *fi = fa->fa_info;
-
- if (fi && (fi->fib_flags&RTNH_F_DEAD)) {
- list_del_rcu(&fa->fa_list);
- if (list_empty(&f->fn_alias)) {
- hlist_del_rcu(&f->fn_hash);
- kill_f = 1;
- }
- fib_hash_genid++;
-
- fn_free_alias(fa, f);
- found++;
- }
- }
- if (kill_f) {
- fn_free_node(f);
- fz->fz_nent--;
- }
- }
- return found;
-}
-
-/* caller must hold RTNL. */
-int fib_table_flush(struct fib_table *tb)
-{
- struct fn_hash *table = (struct fn_hash *) tb->tb_data;
- struct fn_zone *fz;
- int found = 0;
-
- for (fz = rtnl_dereference(table->fn_zone_list);
- fz != NULL;
- fz = rtnl_dereference(fz->fz_next)) {
- int i;
-
- for (i = fz->fz_divisor - 1; i >= 0; i--)
- found += fn_flush_list(fz, i);
- }
- return found;
-}
-
-void fib_free_table(struct fib_table *tb)
-{
- struct fn_hash *table = (struct fn_hash *) tb->tb_data;
- struct fn_zone *fz, *next;
-
- next = table->fn_zone_list;
- while (next != NULL) {
- fz = next;
- next = fz->fz_next;
-
- if (fz->fz_hash != fz->fz_embedded_hash)
- fz_hash_free(fz->fz_hash, fz->fz_divisor);
-
- kfree(fz);
- }
-
- kfree(tb);
-}
-
-static inline int
-fn_hash_dump_bucket(struct sk_buff *skb, struct netlink_callback *cb,
- struct fib_table *tb,
- struct fn_zone *fz,
- struct hlist_head *head)
-{
- struct hlist_node *node;
- struct fib_node *f;
- int i, s_i;
-
- s_i = cb->args[4];
- i = 0;
- hlist_for_each_entry_rcu(f, node, head, fn_hash) {
- struct fib_alias *fa;
-
- list_for_each_entry_rcu(fa, &f->fn_alias, fa_list) {
- if (i < s_i)
- goto next;
-
- if (fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
- cb->nlh->nlmsg_seq,
- RTM_NEWROUTE,
- tb->tb_id,
- fa->fa_type,
- fa->fa_scope,
- f->fn_key,
- fz->fz_order,
- fa->fa_tos,
- fa->fa_info,
- NLM_F_MULTI) < 0) {
- cb->args[4] = i;
- return -1;
- }
-next:
- i++;
- }
- }
- cb->args[4] = i;
- return skb->len;
-}
-
-static inline int
-fn_hash_dump_zone(struct sk_buff *skb, struct netlink_callback *cb,
- struct fib_table *tb,
- struct fn_zone *fz)
-{
- int h, s_h;
- struct hlist_head *head = rcu_dereference(fz->fz_hash);
-
- if (head == NULL)
- return skb->len;
- s_h = cb->args[3];
- for (h = s_h; h < fz->fz_divisor; h++) {
- if (hlist_empty(head + h))
- continue;
- if (fn_hash_dump_bucket(skb, cb, tb, fz, head + h) < 0) {
- cb->args[3] = h;
- return -1;
- }
- memset(&cb->args[4], 0,
- sizeof(cb->args) - 4*sizeof(cb->args[0]));
- }
- cb->args[3] = h;
- return skb->len;
-}
-
-int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
- struct netlink_callback *cb)
-{
- int m = 0, s_m;
- struct fn_zone *fz;
- struct fn_hash *table = (struct fn_hash *)tb->tb_data;
-
- s_m = cb->args[2];
- rcu_read_lock();
- for (fz = rcu_dereference(table->fn_zone_list);
- fz != NULL;
- fz = rcu_dereference(fz->fz_next), m++) {
- if (m < s_m)
- continue;
- if (fn_hash_dump_zone(skb, cb, tb, fz) < 0) {
- cb->args[2] = m;
- rcu_read_unlock();
- return -1;
- }
- memset(&cb->args[3], 0,
- sizeof(cb->args) - 3*sizeof(cb->args[0]));
- }
- rcu_read_unlock();
- cb->args[2] = m;
- return skb->len;
-}
-
-void __init fib_hash_init(void)
-{
- fn_hash_kmem = kmem_cache_create("ip_fib_hash", sizeof(struct fib_node),
- 0, SLAB_PANIC, NULL);
-
- fn_alias_kmem = kmem_cache_create("ip_fib_alias", sizeof(struct fib_alias),
- 0, SLAB_PANIC, NULL);
-
-}
-
-struct fib_table *fib_hash_table(u32 id)
-{
- struct fib_table *tb;
-
- tb = kmalloc(sizeof(struct fib_table) + sizeof(struct fn_hash),
- GFP_KERNEL);
- if (tb == NULL)
- return NULL;
-
- tb->tb_id = id;
- tb->tb_default = -1;
-
- memset(tb->tb_data, 0, sizeof(struct fn_hash));
- return tb;
-}
-
-/* ------------------------------------------------------------------------ */
-#ifdef CONFIG_PROC_FS
-
-struct fib_iter_state {
- struct seq_net_private p;
- struct fn_zone *zone;
- int bucket;
- struct hlist_head *hash_head;
- struct fib_node *fn;
- struct fib_alias *fa;
- loff_t pos;
- unsigned int genid;
- int valid;
-};
-
-static struct fib_alias *fib_get_first(struct seq_file *seq)
-{
- struct fib_iter_state *iter = seq->private;
- struct fib_table *main_table;
- struct fn_hash *table;
-
- main_table = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN);
- table = (struct fn_hash *)main_table->tb_data;
-
- iter->bucket = 0;
- iter->hash_head = NULL;
- iter->fn = NULL;
- iter->fa = NULL;
- iter->pos = 0;
- iter->genid = fib_hash_genid;
- iter->valid = 1;
-
- for (iter->zone = rcu_dereference(table->fn_zone_list);
- iter->zone != NULL;
- iter->zone = rcu_dereference(iter->zone->fz_next)) {
- int maxslot;
-
- if (!iter->zone->fz_nent)
- continue;
-
- iter->hash_head = rcu_dereference(iter->zone->fz_hash);
- maxslot = iter->zone->fz_divisor;
-
- for (iter->bucket = 0; iter->bucket < maxslot;
- ++iter->bucket, ++iter->hash_head) {
- struct hlist_node *node;
- struct fib_node *fn;
-
- hlist_for_each_entry(fn, node, iter->hash_head, fn_hash) {
- struct fib_alias *fa;
-
- list_for_each_entry(fa, &fn->fn_alias, fa_list) {
- iter->fn = fn;
- iter->fa = fa;
- goto out;
- }
- }
- }
- }
-out:
- return iter->fa;
-}
-
-static struct fib_alias *fib_get_next(struct seq_file *seq)
-{
- struct fib_iter_state *iter = seq->private;
- struct fib_node *fn;
- struct fib_alias *fa;
-
- /* Advance FA, if any. */
- fn = iter->fn;
- fa = iter->fa;
- if (fa) {
- BUG_ON(!fn);
- list_for_each_entry_continue(fa, &fn->fn_alias, fa_list) {
- iter->fa = fa;
- goto out;
- }
- }
-
- fa = iter->fa = NULL;
-
- /* Advance FN. */
- if (fn) {
- struct hlist_node *node = &fn->fn_hash;
- hlist_for_each_entry_continue(fn, node, fn_hash) {
- iter->fn = fn;
-
- list_for_each_entry(fa, &fn->fn_alias, fa_list) {
- iter->fa = fa;
- goto out;
- }
- }
- }
-
- fn = iter->fn = NULL;
-
- /* Advance hash chain. */
- if (!iter->zone)
- goto out;
-
- for (;;) {
- struct hlist_node *node;
- int maxslot;
-
- maxslot = iter->zone->fz_divisor;
-
- while (++iter->bucket < maxslot) {
- iter->hash_head++;
-
- hlist_for_each_entry(fn, node, iter->hash_head, fn_hash) {
- list_for_each_entry(fa, &fn->fn_alias, fa_list) {
- iter->fn = fn;
- iter->fa = fa;
- goto out;
- }
- }
- }
-
- iter->zone = rcu_dereference(iter->zone->fz_next);
-
- if (!iter->zone)
- goto out;
-
- iter->bucket = 0;
- iter->hash_head = rcu_dereference(iter->zone->fz_hash);
-
- hlist_for_each_entry(fn, node, iter->hash_head, fn_hash) {
- list_for_each_entry(fa, &fn->fn_alias, fa_list) {
- iter->fn = fn;
- iter->fa = fa;
- goto out;
- }
- }
- }
-out:
- iter->pos++;
- return fa;
-}
-
-static struct fib_alias *fib_get_idx(struct seq_file *seq, loff_t pos)
-{
- struct fib_iter_state *iter = seq->private;
- struct fib_alias *fa;
-
- if (iter->valid && pos >= iter->pos && iter->genid == fib_hash_genid) {
- fa = iter->fa;
- pos -= iter->pos;
- } else
- fa = fib_get_first(seq);
-
- if (fa)
- while (pos && (fa = fib_get_next(seq)))
- --pos;
- return pos ? NULL : fa;
-}
-
-static void *fib_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(RCU)
-{
- void *v = NULL;
-
- rcu_read_lock();
- if (fib_get_table(seq_file_net(seq), RT_TABLE_MAIN))
- v = *pos ? fib_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
- return v;
-}
-
-static void *fib_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
- ++*pos;
- return v == SEQ_START_TOKEN ? fib_get_first(seq) : fib_get_next(seq);
-}
-
-static void fib_seq_stop(struct seq_file *seq, void *v)
- __releases(RCU)
-{
- rcu_read_unlock();
-}
-
-static unsigned fib_flag_trans(int type, __be32 mask, struct fib_info *fi)
-{
- static const unsigned type2flags[RTN_MAX + 1] = {
- [7] = RTF_REJECT,
- [8] = RTF_REJECT,
- };
- unsigned flags = type2flags[type];
-
- if (fi && fi->fib_nh->nh_gw)
- flags |= RTF_GATEWAY;
- if (mask == htonl(0xFFFFFFFF))
- flags |= RTF_HOST;
- flags |= RTF_UP;
- return flags;
-}
-
-/*
- * This outputs /proc/net/route.
- *
- * It always works in backward compatibility mode.
- * The format of the file is not supposed to be changed.
- */
-static int fib_seq_show(struct seq_file *seq, void *v)
-{
- struct fib_iter_state *iter;
- int len;
- __be32 prefix, mask;
- unsigned flags;
- struct fib_node *f;
- struct fib_alias *fa;
- struct fib_info *fi;
-
- if (v == SEQ_START_TOKEN) {
- seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
- "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
- "\tWindow\tIRTT");
- goto out;
- }
-
- iter = seq->private;
- f = iter->fn;
- fa = iter->fa;
- fi = fa->fa_info;
- prefix = f->fn_key;
- mask = FZ_MASK(iter->zone);
- flags = fib_flag_trans(fa->fa_type, mask, fi);
- if (fi)
- seq_printf(seq,
- "%s\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u%n",
- fi->fib_dev ? fi->fib_dev->name : "*", prefix,
- fi->fib_nh->nh_gw, flags, 0, 0, fi->fib_priority,
- mask, (fi->fib_advmss ? fi->fib_advmss + 40 : 0),
- fi->fib_window,
- fi->fib_rtt >> 3, &len);
- else
- seq_printf(seq,
- "*\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u%n",
- prefix, 0, flags, 0, 0, 0, mask, 0, 0, 0, &len);
-
- seq_printf(seq, "%*s\n", 127 - len, "");
-out:
- return 0;
-}
-
-static const struct seq_operations fib_seq_ops = {
- .start = fib_seq_start,
- .next = fib_seq_next,
- .stop = fib_seq_stop,
- .show = fib_seq_show,
-};
-
-static int fib_seq_open(struct inode *inode, struct file *file)
-{
- return seq_open_net(inode, file, &fib_seq_ops,
- sizeof(struct fib_iter_state));
-}
-
-static const struct file_operations fib_seq_fops = {
- .owner = THIS_MODULE,
- .open = fib_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release_net,
-};
-
-int __net_init fib_proc_init(struct net *net)
-{
- if (!proc_net_fops_create(net, "route", S_IRUGO, &fib_seq_fops))
- return -ENOMEM;
- return 0;
-}
-
-void __net_exit fib_proc_exit(struct net *net)
-{
- proc_net_remove(net, "route");
-}
-#endif /* CONFIG_PROC_FS */
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index c079cc0ec651..d5c40d8f6632 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -25,7 +25,7 @@ static inline void fib_alias_accessed(struct fib_alias *fa)
}
/* Exported by fib_semantics.c */
-extern int fib_semantic_match(struct list_head *head,
+extern int fib_semantic_match(struct fib_table *tb, struct list_head *head,
const struct flowi *flp,
struct fib_result *res, int prefixlen, int fib_flags);
extern void fib_release_info(struct fib_info *);
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 7981a24f5c7b..3018efbaea77 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -41,13 +41,13 @@ struct fib4_rule {
__be32 srcmask;
__be32 dst;
__be32 dstmask;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
u32 tclassid;
#endif
};
-#ifdef CONFIG_NET_CLS_ROUTE
-u32 fib_rules_tclass(struct fib_result *res)
+#ifdef CONFIG_IP_ROUTE_CLASSID
+u32 fib_rules_tclass(const struct fib_result *res)
{
return res->r ? ((struct fib4_rule *) res->r)->tclassid : 0;
}
@@ -165,7 +165,7 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
if (frh->dst_len)
rule4->dst = nla_get_be32(tb[FRA_DST]);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
if (tb[FRA_FLOW])
rule4->tclassid = nla_get_u32(tb[FRA_FLOW]);
#endif
@@ -195,7 +195,7 @@ static int fib4_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
if (frh->tos && (rule4->tos != frh->tos))
return 0;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
if (tb[FRA_FLOW] && (rule4->tclassid != nla_get_u32(tb[FRA_FLOW])))
return 0;
#endif
@@ -224,7 +224,7 @@ static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
if (rule4->src_len)
NLA_PUT_BE32(skb, FRA_SRC, rule4->src);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
if (rule4->tclassid)
NLA_PUT_U32(skb, FRA_FLOW, rule4->tclassid);
#endif
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 12d3dc3df1b7..562f34cd9303 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -49,7 +49,7 @@
static DEFINE_SPINLOCK(fib_info_lock);
static struct hlist_head *fib_info_hash;
static struct hlist_head *fib_info_laddrhash;
-static unsigned int fib_hash_size;
+static unsigned int fib_info_hash_size;
static unsigned int fib_info_cnt;
#define DEVINDEX_HASHBITS 8
@@ -152,6 +152,8 @@ static void free_fib_info_rcu(struct rcu_head *head)
{
struct fib_info *fi = container_of(head, struct fib_info, rcu);
+ if (fi->fib_metrics != (u32 *) dst_default_metrics)
+ kfree(fi->fib_metrics);
kfree(fi);
}
@@ -200,7 +202,7 @@ static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
#ifdef CONFIG_IP_ROUTE_MULTIPATH
nh->nh_weight != onh->nh_weight ||
#endif
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
nh->nh_tclassid != onh->nh_tclassid ||
#endif
((nh->nh_flags ^ onh->nh_flags) & ~RTNH_F_DEAD))
@@ -221,7 +223,7 @@ static inline unsigned int fib_devindex_hashfn(unsigned int val)
static inline unsigned int fib_info_hashfn(const struct fib_info *fi)
{
- unsigned int mask = (fib_hash_size - 1);
+ unsigned int mask = (fib_info_hash_size - 1);
unsigned int val = fi->fib_nhs;
val ^= fi->fib_protocol;
@@ -422,7 +424,7 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
nla = nla_find(attrs, attrlen, RTA_GATEWAY);
nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
nla = nla_find(attrs, attrlen, RTA_FLOW);
nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
#endif
@@ -476,7 +478,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
nla = nla_find(attrs, attrlen, RTA_GATEWAY);
if (nla && nla_get_be32(nla) != nh->nh_gw)
return 1;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
nla = nla_find(attrs, attrlen, RTA_FLOW);
if (nla && nla_get_u32(nla) != nh->nh_tclassid)
return 1;
@@ -613,14 +615,14 @@ out:
static inline unsigned int fib_laddr_hashfn(__be32 val)
{
- unsigned int mask = (fib_hash_size - 1);
+ unsigned int mask = (fib_info_hash_size - 1);
return ((__force u32)val ^
((__force u32)val >> 7) ^
((__force u32)val >> 14)) & mask;
}
-static struct hlist_head *fib_hash_alloc(int bytes)
+static struct hlist_head *fib_info_hash_alloc(int bytes)
{
if (bytes <= PAGE_SIZE)
return kzalloc(bytes, GFP_KERNEL);
@@ -630,7 +632,7 @@ static struct hlist_head *fib_hash_alloc(int bytes)
get_order(bytes));
}
-static void fib_hash_free(struct hlist_head *hash, int bytes)
+static void fib_info_hash_free(struct hlist_head *hash, int bytes)
{
if (!hash)
return;
@@ -641,18 +643,18 @@ static void fib_hash_free(struct hlist_head *hash, int bytes)
free_pages((unsigned long) hash, get_order(bytes));
}
-static void fib_hash_move(struct hlist_head *new_info_hash,
- struct hlist_head *new_laddrhash,
- unsigned int new_size)
+static void fib_info_hash_move(struct hlist_head *new_info_hash,
+ struct hlist_head *new_laddrhash,
+ unsigned int new_size)
{
struct hlist_head *old_info_hash, *old_laddrhash;
- unsigned int old_size = fib_hash_size;
+ unsigned int old_size = fib_info_hash_size;
unsigned int i, bytes;
spin_lock_bh(&fib_info_lock);
old_info_hash = fib_info_hash;
old_laddrhash = fib_info_laddrhash;
- fib_hash_size = new_size;
+ fib_info_hash_size = new_size;
for (i = 0; i < old_size; i++) {
struct hlist_head *head = &fib_info_hash[i];
@@ -693,8 +695,8 @@ static void fib_hash_move(struct hlist_head *new_info_hash,
spin_unlock_bh(&fib_info_lock);
bytes = old_size * sizeof(struct hlist_head *);
- fib_hash_free(old_info_hash, bytes);
- fib_hash_free(old_laddrhash, bytes);
+ fib_info_hash_free(old_info_hash, bytes);
+ fib_info_hash_free(old_laddrhash, bytes);
}
struct fib_info *fib_create_info(struct fib_config *cfg)
@@ -718,8 +720,8 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
#endif
err = -ENOBUFS;
- if (fib_info_cnt >= fib_hash_size) {
- unsigned int new_size = fib_hash_size << 1;
+ if (fib_info_cnt >= fib_info_hash_size) {
+ unsigned int new_size = fib_info_hash_size << 1;
struct hlist_head *new_info_hash;
struct hlist_head *new_laddrhash;
unsigned int bytes;
@@ -727,21 +729,27 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
if (!new_size)
new_size = 1;
bytes = new_size * sizeof(struct hlist_head *);
- new_info_hash = fib_hash_alloc(bytes);
- new_laddrhash = fib_hash_alloc(bytes);
+ new_info_hash = fib_info_hash_alloc(bytes);
+ new_laddrhash = fib_info_hash_alloc(bytes);
if (!new_info_hash || !new_laddrhash) {
- fib_hash_free(new_info_hash, bytes);
- fib_hash_free(new_laddrhash, bytes);
+ fib_info_hash_free(new_info_hash, bytes);
+ fib_info_hash_free(new_laddrhash, bytes);
} else
- fib_hash_move(new_info_hash, new_laddrhash, new_size);
+ fib_info_hash_move(new_info_hash, new_laddrhash, new_size);
- if (!fib_hash_size)
+ if (!fib_info_hash_size)
goto failure;
}
fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
if (fi == NULL)
goto failure;
+ if (cfg->fc_mx) {
+ fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
+ if (!fi->fib_metrics)
+ goto failure;
+ } else
+ fi->fib_metrics = (u32 *) dst_default_metrics;
fib_info_cnt++;
fi->fib_net = hold_net(net);
@@ -779,7 +787,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
goto err_inval;
if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw)
goto err_inval;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow)
goto err_inval;
#endif
@@ -792,7 +800,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
nh->nh_oif = cfg->fc_oif;
nh->nh_gw = cfg->fc_gw;
nh->nh_flags = cfg->fc_flags;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
nh->nh_tclassid = cfg->fc_flow;
#endif
#ifdef CONFIG_IP_ROUTE_MULTIPATH
@@ -881,8 +889,9 @@ failure:
}
/* Note! fib_semantic_match intentionally uses RCU list functions. */
-int fib_semantic_match(struct list_head *head, const struct flowi *flp,
- struct fib_result *res, int prefixlen, int fib_flags)
+int fib_semantic_match(struct fib_table *tb, struct list_head *head,
+ const struct flowi *flp, struct fib_result *res,
+ int prefixlen, int fib_flags)
{
struct fib_alias *fa;
int nh_sel = 0;
@@ -946,6 +955,8 @@ out_fill_res:
res->type = fa->fa_type;
res->scope = fa->fa_scope;
res->fi = fa->fa_info;
+ res->table = tb;
+ res->fa_head = head;
if (!(fib_flags & FIB_LOOKUP_NOREF))
atomic_inc(&res->fi->fib_clntref);
return 0;
@@ -1002,7 +1013,7 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
if (fi->fib_nh->nh_oif)
NLA_PUT_U32(skb, RTA_OIF, fi->fib_nh->nh_oif);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
if (fi->fib_nh[0].nh_tclassid)
NLA_PUT_U32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid);
#endif
@@ -1027,7 +1038,7 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
if (nh->nh_gw)
NLA_PUT_BE32(skb, RTA_GATEWAY, nh->nh_gw);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
if (nh->nh_tclassid)
NLA_PUT_U32(skb, RTA_FLOW, nh->nh_tclassid);
#endif
@@ -1125,6 +1136,62 @@ int fib_sync_down_dev(struct net_device *dev, int force)
return ret;
}
+/* Must be invoked inside of an RCU protected region. */
+void fib_select_default(struct fib_result *res)
+{
+ struct fib_info *fi = NULL, *last_resort = NULL;
+ struct list_head *fa_head = res->fa_head;
+ struct fib_table *tb = res->table;
+ int order = -1, last_idx = -1;
+ struct fib_alias *fa;
+
+ list_for_each_entry_rcu(fa, fa_head, fa_list) {
+ struct fib_info *next_fi = fa->fa_info;
+
+ if (fa->fa_scope != res->scope ||
+ fa->fa_type != RTN_UNICAST)
+ continue;
+
+ if (next_fi->fib_priority > res->fi->fib_priority)
+ break;
+ if (!next_fi->fib_nh[0].nh_gw ||
+ next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
+ continue;
+
+ fib_alias_accessed(fa);
+
+ if (fi == NULL) {
+ if (next_fi != res->fi)
+ break;
+ } else if (!fib_detect_death(fi, order, &last_resort,
+ &last_idx, tb->tb_default)) {
+ fib_result_assign(res, fi);
+ tb->tb_default = order;
+ goto out;
+ }
+ fi = next_fi;
+ order++;
+ }
+
+ if (order <= 0 || fi == NULL) {
+ tb->tb_default = -1;
+ goto out;
+ }
+
+ if (!fib_detect_death(fi, order, &last_resort, &last_idx,
+ tb->tb_default)) {
+ fib_result_assign(res, fi);
+ tb->tb_default = order;
+ goto out;
+ }
+
+ if (last_idx >= 0)
+ fib_result_assign(res, last_resort);
+ tb->tb_default = last_idx;
+out:
+ return;
+}
+
#ifdef CONFIG_IP_ROUTE_MULTIPATH
/*
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 0f280348e0fd..edf3b0997e01 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -95,7 +95,7 @@ typedef unsigned int t_key;
#define IS_TNODE(n) (!(n->parent & T_LEAF))
#define IS_LEAF(n) (n->parent & T_LEAF)
-struct node {
+struct rt_trie_node {
unsigned long parent;
t_key key;
};
@@ -126,7 +126,7 @@ struct tnode {
struct work_struct work;
struct tnode *tnode_free;
};
- struct node *child[0];
+ struct rt_trie_node *child[0];
};
#ifdef CONFIG_IP_FIB_TRIE_STATS
@@ -151,16 +151,16 @@ struct trie_stat {
};
struct trie {
- struct node *trie;
+ struct rt_trie_node *trie;
#ifdef CONFIG_IP_FIB_TRIE_STATS
struct trie_use_stats stats;
#endif
};
-static void put_child(struct trie *t, struct tnode *tn, int i, struct node *n);
-static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n,
+static void put_child(struct trie *t, struct tnode *tn, int i, struct rt_trie_node *n);
+static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n,
int wasfull);
-static struct node *resize(struct trie *t, struct tnode *tn);
+static struct rt_trie_node *resize(struct trie *t, struct tnode *tn);
static struct tnode *inflate(struct trie *t, struct tnode *tn);
static struct tnode *halve(struct trie *t, struct tnode *tn);
/* tnodes to free after resize(); protected by RTNL */
@@ -177,12 +177,12 @@ static const int sync_pages = 128;
static struct kmem_cache *fn_alias_kmem __read_mostly;
static struct kmem_cache *trie_leaf_kmem __read_mostly;
-static inline struct tnode *node_parent(struct node *node)
+static inline struct tnode *node_parent(struct rt_trie_node *node)
{
return (struct tnode *)(node->parent & ~NODE_TYPE_MASK);
}
-static inline struct tnode *node_parent_rcu(struct node *node)
+static inline struct tnode *node_parent_rcu(struct rt_trie_node *node)
{
struct tnode *ret = node_parent(node);
@@ -192,22 +192,22 @@ static inline struct tnode *node_parent_rcu(struct node *node)
/* Same as rcu_assign_pointer
* but that macro() assumes that value is a pointer.
*/
-static inline void node_set_parent(struct node *node, struct tnode *ptr)
+static inline void node_set_parent(struct rt_trie_node *node, struct tnode *ptr)
{
smp_wmb();
node->parent = (unsigned long)ptr | NODE_TYPE(node);
}
-static inline struct node *tnode_get_child(struct tnode *tn, unsigned int i)
+static inline struct rt_trie_node *tnode_get_child(struct tnode *tn, unsigned int i)
{
BUG_ON(i >= 1U << tn->bits);
return tn->child[i];
}
-static inline struct node *tnode_get_child_rcu(struct tnode *tn, unsigned int i)
+static inline struct rt_trie_node *tnode_get_child_rcu(struct tnode *tn, unsigned int i)
{
- struct node *ret = tnode_get_child(tn, i);
+ struct rt_trie_node *ret = tnode_get_child(tn, i);
return rcu_dereference_rtnl(ret);
}
@@ -217,12 +217,12 @@ static inline int tnode_child_length(const struct tnode *tn)
return 1 << tn->bits;
}
-static inline t_key mask_pfx(t_key k, unsigned short l)
+static inline t_key mask_pfx(t_key k, unsigned int l)
{
return (l == 0) ? 0 : k >> (KEYLENGTH-l) << (KEYLENGTH-l);
}
-static inline t_key tkey_extract_bits(t_key a, int offset, int bits)
+static inline t_key tkey_extract_bits(t_key a, unsigned int offset, unsigned int bits)
{
if (offset < KEYLENGTH)
return ((t_key)(a << offset)) >> (KEYLENGTH - bits);
@@ -378,7 +378,7 @@ static void __tnode_free_rcu(struct rcu_head *head)
{
struct tnode *tn = container_of(head, struct tnode, rcu);
size_t size = sizeof(struct tnode) +
- (sizeof(struct node *) << tn->bits);
+ (sizeof(struct rt_trie_node *) << tn->bits);
if (size <= PAGE_SIZE)
kfree(tn);
@@ -402,7 +402,7 @@ static void tnode_free_safe(struct tnode *tn)
tn->tnode_free = tnode_free_head;
tnode_free_head = tn;
tnode_free_size += sizeof(struct tnode) +
- (sizeof(struct node *) << tn->bits);
+ (sizeof(struct rt_trie_node *) << tn->bits);
}
static void tnode_free_flush(void)
@@ -443,7 +443,7 @@ static struct leaf_info *leaf_info_new(int plen)
static struct tnode *tnode_new(t_key key, int pos, int bits)
{
- size_t sz = sizeof(struct tnode) + (sizeof(struct node *) << bits);
+ size_t sz = sizeof(struct tnode) + (sizeof(struct rt_trie_node *) << bits);
struct tnode *tn = tnode_alloc(sz);
if (tn) {
@@ -456,7 +456,7 @@ static struct tnode *tnode_new(t_key key, int pos, int bits)
}
pr_debug("AT %p s=%zu %zu\n", tn, sizeof(struct tnode),
- sizeof(struct node) << bits);
+ sizeof(struct rt_trie_node) << bits);
return tn;
}
@@ -465,7 +465,7 @@ static struct tnode *tnode_new(t_key key, int pos, int bits)
* and no bits are skipped. See discussion in dyntree paper p. 6
*/
-static inline int tnode_full(const struct tnode *tn, const struct node *n)
+static inline int tnode_full(const struct tnode *tn, const struct rt_trie_node *n)
{
if (n == NULL || IS_LEAF(n))
return 0;
@@ -474,7 +474,7 @@ static inline int tnode_full(const struct tnode *tn, const struct node *n)
}
static inline void put_child(struct trie *t, struct tnode *tn, int i,
- struct node *n)
+ struct rt_trie_node *n)
{
tnode_put_child_reorg(tn, i, n, -1);
}
@@ -484,10 +484,10 @@ static inline void put_child(struct trie *t, struct tnode *tn, int i,
* Update the value of full_children and empty_children.
*/
-static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n,
+static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n,
int wasfull)
{
- struct node *chi = tn->child[i];
+ struct rt_trie_node *chi = tn->child[i];
int isfull;
BUG_ON(i >= 1<<tn->bits);
@@ -515,7 +515,7 @@ static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n,
}
#define MAX_WORK 10
-static struct node *resize(struct trie *t, struct tnode *tn)
+static struct rt_trie_node *resize(struct trie *t, struct tnode *tn)
{
int i;
struct tnode *old_tn;
@@ -605,7 +605,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
/* Keep root node larger */
- if (!node_parent((struct node *)tn)) {
+ if (!node_parent((struct rt_trie_node *)tn)) {
inflate_threshold_use = inflate_threshold_root;
halve_threshold_use = halve_threshold_root;
} else {
@@ -635,7 +635,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
/* Return if at least one inflate is run */
if (max_work != MAX_WORK)
- return (struct node *) tn;
+ return (struct rt_trie_node *) tn;
/*
* Halve as long as the number of empty children in this
@@ -663,7 +663,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
if (tn->empty_children == tnode_child_length(tn) - 1) {
one_child:
for (i = 0; i < tnode_child_length(tn); i++) {
- struct node *n;
+ struct rt_trie_node *n;
n = tn->child[i];
if (!n)
@@ -676,7 +676,7 @@ one_child:
return n;
}
}
- return (struct node *) tn;
+ return (struct rt_trie_node *) tn;
}
static struct tnode *inflate(struct trie *t, struct tnode *tn)
@@ -723,14 +723,14 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
goto nomem;
}
- put_child(t, tn, 2*i, (struct node *) left);
- put_child(t, tn, 2*i+1, (struct node *) right);
+ put_child(t, tn, 2*i, (struct rt_trie_node *) left);
+ put_child(t, tn, 2*i+1, (struct rt_trie_node *) right);
}
}
for (i = 0; i < olen; i++) {
struct tnode *inode;
- struct node *node = tnode_get_child(oldtnode, i);
+ struct rt_trie_node *node = tnode_get_child(oldtnode, i);
struct tnode *left, *right;
int size, j;
@@ -825,7 +825,7 @@ nomem:
static struct tnode *halve(struct trie *t, struct tnode *tn)
{
struct tnode *oldtnode = tn;
- struct node *left, *right;
+ struct rt_trie_node *left, *right;
int i;
int olen = tnode_child_length(tn);
@@ -856,7 +856,7 @@ static struct tnode *halve(struct trie *t, struct tnode *tn)
if (!newn)
goto nomem;
- put_child(t, tn, i/2, (struct node *)newn);
+ put_child(t, tn, i/2, (struct rt_trie_node *)newn);
}
}
@@ -958,7 +958,7 @@ fib_find_node(struct trie *t, u32 key)
{
int pos;
struct tnode *tn;
- struct node *n;
+ struct rt_trie_node *n;
pos = 0;
n = rcu_dereference_rtnl(t->trie);
@@ -993,17 +993,17 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
key = tn->key;
- while (tn != NULL && (tp = node_parent((struct node *)tn)) != NULL) {
+ while (tn != NULL && (tp = node_parent((struct rt_trie_node *)tn)) != NULL) {
cindex = tkey_extract_bits(key, tp->pos, tp->bits);
wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
tn = (struct tnode *) resize(t, (struct tnode *)tn);
tnode_put_child_reorg((struct tnode *)tp, cindex,
- (struct node *)tn, wasfull);
+ (struct rt_trie_node *)tn, wasfull);
- tp = node_parent((struct node *) tn);
+ tp = node_parent((struct rt_trie_node *) tn);
if (!tp)
- rcu_assign_pointer(t->trie, (struct node *)tn);
+ rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
tnode_free_flush();
if (!tp)
@@ -1015,7 +1015,7 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
if (IS_TNODE(tn))
tn = (struct tnode *)resize(t, (struct tnode *)tn);
- rcu_assign_pointer(t->trie, (struct node *)tn);
+ rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
tnode_free_flush();
}
@@ -1025,7 +1025,7 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
{
int pos, newpos;
struct tnode *tp = NULL, *tn = NULL;
- struct node *n;
+ struct rt_trie_node *n;
struct leaf *l;
int missbit;
struct list_head *fa_head = NULL;
@@ -1111,10 +1111,10 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
if (t->trie && n == NULL) {
/* Case 2: n is NULL, and will just insert a new leaf */
- node_set_parent((struct node *)l, tp);
+ node_set_parent((struct rt_trie_node *)l, tp);
cindex = tkey_extract_bits(key, tp->pos, tp->bits);
- put_child(t, (struct tnode *)tp, cindex, (struct node *)l);
+ put_child(t, (struct tnode *)tp, cindex, (struct rt_trie_node *)l);
} else {
/* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
/*
@@ -1141,18 +1141,18 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
return NULL;
}
- node_set_parent((struct node *)tn, tp);
+ node_set_parent((struct rt_trie_node *)tn, tp);
missbit = tkey_extract_bits(key, newpos, 1);
- put_child(t, tn, missbit, (struct node *)l);
+ put_child(t, tn, missbit, (struct rt_trie_node *)l);
put_child(t, tn, 1-missbit, n);
if (tp) {
cindex = tkey_extract_bits(key, tp->pos, tp->bits);
put_child(t, (struct tnode *)tp, cindex,
- (struct node *)tn);
+ (struct rt_trie_node *)tn);
} else {
- rcu_assign_pointer(t->trie, (struct node *)tn);
+ rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
tp = tn;
}
}
@@ -1340,7 +1340,7 @@ err:
}
/* should be called with rcu_read_lock */
-static int check_leaf(struct trie *t, struct leaf *l,
+static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
t_key key, const struct flowi *flp,
struct fib_result *res, int fib_flags)
{
@@ -1356,7 +1356,7 @@ static int check_leaf(struct trie *t, struct leaf *l,
if (l->key != (key & ntohl(mask)))
continue;
- err = fib_semantic_match(&li->falh, flp, res, plen, fib_flags);
+ err = fib_semantic_match(tb, &li->falh, flp, res, plen, fib_flags);
#ifdef CONFIG_IP_FIB_TRIE_STATS
if (err <= 0)
@@ -1376,13 +1376,13 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi *flp,
{
struct trie *t = (struct trie *) tb->tb_data;
int ret;
- struct node *n;
+ struct rt_trie_node *n;
struct tnode *pn;
- int pos, bits;
+ unsigned int pos, bits;
t_key key = ntohl(flp->fl4_dst);
- int chopped_off;
+ unsigned int chopped_off;
t_key cindex = 0;
- int current_prefix_length = KEYLENGTH;
+ unsigned int current_prefix_length = KEYLENGTH;
struct tnode *cn;
t_key pref_mismatch;
@@ -1398,7 +1398,7 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi *flp,
/* Just a leaf? */
if (IS_LEAF(n)) {
- ret = check_leaf(t, (struct leaf *)n, key, flp, res, fib_flags);
+ ret = check_leaf(tb, t, (struct leaf *)n, key, flp, res, fib_flags);
goto found;
}
@@ -1423,7 +1423,7 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi *flp,
}
if (IS_LEAF(n)) {
- ret = check_leaf(t, (struct leaf *)n, key, flp, res, fib_flags);
+ ret = check_leaf(tb, t, (struct leaf *)n, key, flp, res, fib_flags);
if (ret > 0)
goto backtrace;
goto found;
@@ -1541,7 +1541,7 @@ backtrace:
if (chopped_off <= pn->bits) {
cindex &= ~(1 << (chopped_off-1));
} else {
- struct tnode *parent = node_parent_rcu((struct node *) pn);
+ struct tnode *parent = node_parent_rcu((struct rt_trie_node *) pn);
if (!parent)
goto failed;
@@ -1568,7 +1568,7 @@ found:
*/
static void trie_leaf_remove(struct trie *t, struct leaf *l)
{
- struct tnode *tp = node_parent((struct node *) l);
+ struct tnode *tp = node_parent((struct rt_trie_node *) l);
pr_debug("entering trie_leaf_remove(%p)\n", l);
@@ -1706,7 +1706,7 @@ static int trie_flush_leaf(struct leaf *l)
* Scan for the next right leaf starting at node p->child[idx]
* Since we have back pointer, no recursion necessary.
*/
-static struct leaf *leaf_walk_rcu(struct tnode *p, struct node *c)
+static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
{
do {
t_key idx;
@@ -1732,7 +1732,7 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct node *c)
}
/* Node empty, walk back up to parent */
- c = (struct node *) p;
+ c = (struct rt_trie_node *) p;
} while ((p = node_parent_rcu(c)) != NULL);
return NULL; /* Root of trie */
@@ -1753,7 +1753,7 @@ static struct leaf *trie_firstleaf(struct trie *t)
static struct leaf *trie_nextleaf(struct leaf *l)
{
- struct node *c = (struct node *) l;
+ struct rt_trie_node *c = (struct rt_trie_node *) l;
struct tnode *p = node_parent_rcu(c);
if (!p)
@@ -1802,80 +1802,6 @@ void fib_free_table(struct fib_table *tb)
kfree(tb);
}
-void fib_table_select_default(struct fib_table *tb,
- const struct flowi *flp,
- struct fib_result *res)
-{
- struct trie *t = (struct trie *) tb->tb_data;
- int order, last_idx;
- struct fib_info *fi = NULL;
- struct fib_info *last_resort;
- struct fib_alias *fa = NULL;
- struct list_head *fa_head;
- struct leaf *l;
-
- last_idx = -1;
- last_resort = NULL;
- order = -1;
-
- rcu_read_lock();
-
- l = fib_find_node(t, 0);
- if (!l)
- goto out;
-
- fa_head = get_fa_head(l, 0);
- if (!fa_head)
- goto out;
-
- if (list_empty(fa_head))
- goto out;
-
- list_for_each_entry_rcu(fa, fa_head, fa_list) {
- struct fib_info *next_fi = fa->fa_info;
-
- if (fa->fa_scope != res->scope ||
- fa->fa_type != RTN_UNICAST)
- continue;
-
- if (next_fi->fib_priority > res->fi->fib_priority)
- break;
- if (!next_fi->fib_nh[0].nh_gw ||
- next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
- continue;
-
- fib_alias_accessed(fa);
-
- if (fi == NULL) {
- if (next_fi != res->fi)
- break;
- } else if (!fib_detect_death(fi, order, &last_resort,
- &last_idx, tb->tb_default)) {
- fib_result_assign(res, fi);
- tb->tb_default = order;
- goto out;
- }
- fi = next_fi;
- order++;
- }
- if (order <= 0 || fi == NULL) {
- tb->tb_default = -1;
- goto out;
- }
-
- if (!fib_detect_death(fi, order, &last_resort, &last_idx,
- tb->tb_default)) {
- fib_result_assign(res, fi);
- tb->tb_default = order;
- goto out;
- }
- if (last_idx >= 0)
- fib_result_assign(res, last_resort);
- tb->tb_default = last_idx;
-out:
- rcu_read_unlock();
-}
-
static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah,
struct fib_table *tb,
struct sk_buff *skb, struct netlink_callback *cb)
@@ -1990,7 +1916,7 @@ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
return skb->len;
}
-void __init fib_hash_init(void)
+void __init fib_trie_init(void)
{
fn_alias_kmem = kmem_cache_create("ip_fib_alias",
sizeof(struct fib_alias),
@@ -2003,8 +1929,7 @@ void __init fib_hash_init(void)
}
-/* Fix more generic FIB names for init later */
-struct fib_table *fib_hash_table(u32 id)
+struct fib_table *fib_trie_table(u32 id)
{
struct fib_table *tb;
struct trie *t;
@@ -2036,7 +1961,7 @@ struct fib_trie_iter {
unsigned int depth;
};
-static struct node *fib_trie_get_next(struct fib_trie_iter *iter)
+static struct rt_trie_node *fib_trie_get_next(struct fib_trie_iter *iter)
{
struct tnode *tn = iter->tnode;
unsigned int cindex = iter->index;
@@ -2050,7 +1975,7 @@ static struct node *fib_trie_get_next(struct fib_trie_iter *iter)
iter->tnode, iter->index, iter->depth);
rescan:
while (cindex < (1<<tn->bits)) {
- struct node *n = tnode_get_child_rcu(tn, cindex);
+ struct rt_trie_node *n = tnode_get_child_rcu(tn, cindex);
if (n) {
if (IS_LEAF(n)) {
@@ -2069,7 +1994,7 @@ rescan:
}
/* Current node exhausted, pop back up */
- p = node_parent_rcu((struct node *)tn);
+ p = node_parent_rcu((struct rt_trie_node *)tn);
if (p) {
cindex = tkey_extract_bits(tn->key, p->pos, p->bits)+1;
tn = p;
@@ -2081,10 +2006,10 @@ rescan:
return NULL;
}
-static struct node *fib_trie_get_first(struct fib_trie_iter *iter,
+static struct rt_trie_node *fib_trie_get_first(struct fib_trie_iter *iter,
struct trie *t)
{
- struct node *n;
+ struct rt_trie_node *n;
if (!t)
return NULL;
@@ -2108,7 +2033,7 @@ static struct node *fib_trie_get_first(struct fib_trie_iter *iter,
static void trie_collect_stats(struct trie *t, struct trie_stat *s)
{
- struct node *n;
+ struct rt_trie_node *n;
struct fib_trie_iter iter;
memset(s, 0, sizeof(*s));
@@ -2181,7 +2106,7 @@ static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
seq_putc(seq, '\n');
seq_printf(seq, "\tPointers: %u\n", pointers);
- bytes += sizeof(struct node *) * pointers;
+ bytes += sizeof(struct rt_trie_node *) * pointers;
seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers);
seq_printf(seq, "Total size: %u kB\n", (bytes + 1023) / 1024);
}
@@ -2262,7 +2187,7 @@ static const struct file_operations fib_triestat_fops = {
.release = single_release_net,
};
-static struct node *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
+static struct rt_trie_node *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
{
struct fib_trie_iter *iter = seq->private;
struct net *net = seq_file_net(seq);
@@ -2275,7 +2200,7 @@ static struct node *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
struct fib_table *tb;
hlist_for_each_entry_rcu(tb, node, head, tb_hlist) {
- struct node *n;
+ struct rt_trie_node *n;
for (n = fib_trie_get_first(iter,
(struct trie *) tb->tb_data);
@@ -2304,7 +2229,7 @@ static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
struct fib_table *tb = iter->tb;
struct hlist_node *tb_node;
unsigned int h;
- struct node *n;
+ struct rt_trie_node *n;
++*pos;
/* next node in same table */
@@ -2390,7 +2315,7 @@ static inline const char *rtn_type(char *buf, size_t len, unsigned int t)
static int fib_trie_seq_show(struct seq_file *seq, void *v)
{
const struct fib_trie_iter *iter = seq->private;
- struct node *n = v;
+ struct rt_trie_node *n = v;
if (!node_parent_rcu(n))
fib_table_print(seq, iter->tb);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 4aa1b7f01ea0..2a86c8951dcd 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -233,48 +233,11 @@ static inline void icmp_xmit_unlock(struct sock *sk)
* Send an ICMP frame.
*/
-/*
- * Check transmit rate limitation for given message.
- * The rate information is held in the destination cache now.
- * This function is generic and could be used for other purposes
- * too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
- *
- * Note that the same dst_entry fields are modified by functions in
- * route.c too, but these work for packet destinations while xrlim_allow
- * works for icmp destinations. This means the rate limiting information
- * for one "ip object" is shared - and these ICMPs are twice limited:
- * by source and by destination.
- *
- * RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
- * SHOULD allow setting of rate limits
- *
- * Shared between ICMPv4 and ICMPv6.
- */
-#define XRLIM_BURST_FACTOR 6
-int xrlim_allow(struct dst_entry *dst, int timeout)
-{
- unsigned long now, token = dst->rate_tokens;
- int rc = 0;
-
- now = jiffies;
- token += now - dst->rate_last;
- dst->rate_last = now;
- if (token > XRLIM_BURST_FACTOR * timeout)
- token = XRLIM_BURST_FACTOR * timeout;
- if (token >= timeout) {
- token -= timeout;
- rc = 1;
- }
- dst->rate_tokens = token;
- return rc;
-}
-EXPORT_SYMBOL(xrlim_allow);
-
-static inline int icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
+static inline bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
int type, int code)
{
struct dst_entry *dst = &rt->dst;
- int rc = 1;
+ bool rc = true;
if (type > NR_ICMP_TYPES)
goto out;
@@ -288,8 +251,12 @@ static inline int icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
goto out;
/* Limit if icmp type is enabled in ratemask. */
- if ((1 << type) & net->ipv4.sysctl_icmp_ratemask)
- rc = xrlim_allow(dst, net->ipv4.sysctl_icmp_ratelimit);
+ if ((1 << type) & net->ipv4.sysctl_icmp_ratemask) {
+ if (!rt->peer)
+ rt_bind_peer(rt, 1);
+ rc = inet_peer_xrlim_allow(rt->peer,
+ net->ipv4.sysctl_icmp_ratelimit);
+ }
out:
return rc;
}
@@ -402,6 +369,98 @@ out_unlock:
icmp_xmit_unlock(sk);
}
+static struct rtable *icmp_route_lookup(struct net *net, struct sk_buff *skb_in,
+ struct iphdr *iph,
+ __be32 saddr, u8 tos,
+ int type, int code,
+ struct icmp_bxm *param)
+{
+ struct flowi fl = {
+ .fl4_dst = (param->replyopts.srr ?
+ param->replyopts.faddr : iph->saddr),
+ .fl4_src = saddr,
+ .fl4_tos = RT_TOS(tos),
+ .proto = IPPROTO_ICMP,
+ .fl_icmp_type = type,
+ .fl_icmp_code = code,
+ };
+ struct rtable *rt, *rt2;
+ int err;
+
+ security_skb_classify_flow(skb_in, &fl);
+ err = __ip_route_output_key(net, &rt, &fl);
+ if (err)
+ return ERR_PTR(err);
+
+ /* No need to clone since we're just using its address. */
+ rt2 = rt;
+
+ if (!fl.fl4_src)
+ fl.fl4_src = rt->rt_src;
+
+ err = xfrm_lookup(net, (struct dst_entry **)&rt, &fl, NULL, 0);
+ switch (err) {
+ case 0:
+ if (rt != rt2)
+ return rt;
+ break;
+ case -EPERM:
+ rt = NULL;
+ break;
+ default:
+ return ERR_PTR(err);
+ }
+
+ err = xfrm_decode_session_reverse(skb_in, &fl, AF_INET);
+ if (err)
+ goto relookup_failed;
+
+ if (inet_addr_type(net, fl.fl4_src) == RTN_LOCAL) {
+ err = __ip_route_output_key(net, &rt2, &fl);
+ } else {
+ struct flowi fl2 = {};
+ unsigned long orefdst;
+
+ fl2.fl4_dst = fl.fl4_src;
+ err = ip_route_output_key(net, &rt2, &fl2);
+ if (err)
+ goto relookup_failed;
+ /* Ugh! */
+ orefdst = skb_in->_skb_refdst; /* save old refdst */
+ err = ip_route_input(skb_in, fl.fl4_dst, fl.fl4_src,
+ RT_TOS(tos), rt2->dst.dev);
+
+ dst_release(&rt2->dst);
+ rt2 = skb_rtable(skb_in);
+ skb_in->_skb_refdst = orefdst; /* restore old refdst */
+ }
+
+ if (err)
+ goto relookup_failed;
+
+ err = xfrm_lookup(net, (struct dst_entry **)&rt2, &fl, NULL,
+ XFRM_LOOKUP_ICMP);
+ switch (err) {
+ case 0:
+ dst_release(&rt->dst);
+ rt = rt2;
+ break;
+ case -EPERM:
+ return ERR_PTR(err);
+ default:
+ if (!rt)
+ return ERR_PTR(err);
+ break;
+ }
+
+
+ return rt;
+
+relookup_failed:
+ if (rt)
+ return rt;
+ return ERR_PTR(err);
+}
/*
* Send an ICMP message in response to a situation
@@ -539,86 +598,11 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
ipc.opt = &icmp_param.replyopts;
ipc.tx_flags = 0;
- {
- struct flowi fl = {
- .fl4_dst = icmp_param.replyopts.srr ?
- icmp_param.replyopts.faddr : iph->saddr,
- .fl4_src = saddr,
- .fl4_tos = RT_TOS(tos),
- .proto = IPPROTO_ICMP,
- .fl_icmp_type = type,
- .fl_icmp_code = code,
- };
- int err;
- struct rtable *rt2;
-
- security_skb_classify_flow(skb_in, &fl);
- if (__ip_route_output_key(net, &rt, &fl))
- goto out_unlock;
-
- /* No need to clone since we're just using its address. */
- rt2 = rt;
-
- if (!fl.nl_u.ip4_u.saddr)
- fl.nl_u.ip4_u.saddr = rt->rt_src;
-
- err = xfrm_lookup(net, (struct dst_entry **)&rt, &fl, NULL, 0);
- switch (err) {
- case 0:
- if (rt != rt2)
- goto route_done;
- break;
- case -EPERM:
- rt = NULL;
- break;
- default:
- goto out_unlock;
- }
-
- if (xfrm_decode_session_reverse(skb_in, &fl, AF_INET))
- goto relookup_failed;
-
- if (inet_addr_type(net, fl.fl4_src) == RTN_LOCAL)
- err = __ip_route_output_key(net, &rt2, &fl);
- else {
- struct flowi fl2 = {};
- unsigned long orefdst;
-
- fl2.fl4_dst = fl.fl4_src;
- if (ip_route_output_key(net, &rt2, &fl2))
- goto relookup_failed;
-
- /* Ugh! */
- orefdst = skb_in->_skb_refdst; /* save old refdst */
- err = ip_route_input(skb_in, fl.fl4_dst, fl.fl4_src,
- RT_TOS(tos), rt2->dst.dev);
-
- dst_release(&rt2->dst);
- rt2 = skb_rtable(skb_in);
- skb_in->_skb_refdst = orefdst; /* restore old refdst */
- }
-
- if (err)
- goto relookup_failed;
-
- err = xfrm_lookup(net, (struct dst_entry **)&rt2, &fl, NULL,
- XFRM_LOOKUP_ICMP);
- switch (err) {
- case 0:
- dst_release(&rt->dst);
- rt = rt2;
- break;
- case -EPERM:
- goto ende;
- default:
-relookup_failed:
- if (!rt)
- goto out_unlock;
- break;
- }
- }
+ rt = icmp_route_lookup(net, skb_in, iph, saddr, tos,
+ type, code, &icmp_param);
+ if (IS_ERR(rt))
+ goto out_unlock;
-route_done:
if (!icmpv4_xrlim_allow(net, rt, type, code))
goto ende;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 97e5fb765265..7f85d4aec26a 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -369,7 +369,7 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
struct net *net = sock_net(sk);
security_req_classify_flow(req, &fl);
- if (ip_route_output_flow(net, &rt, &fl, sk, 0))
+ if (ip_route_output_flow(net, &rt, &fl, sk))
goto no_route;
if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
goto route_err;
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index c5af909cf701..3c8dfa16614d 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -505,7 +505,9 @@ restart:
}
rcu_read_unlock();
+ local_bh_disable();
inet_twsk_deschedule(tw, twdr);
+ local_bh_enable();
inet_twsk_put(tw);
goto restart_rcu;
}
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index a96e65674ac3..48f8d4592ccd 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -167,9 +167,9 @@ static int addr_compare(const struct inetpeer_addr *a,
int i, n = (a->family == AF_INET ? 1 : 4);
for (i = 0; i < n; i++) {
- if (a->a6[i] == b->a6[i])
+ if (a->addr.a6[i] == b->addr.a6[i])
continue;
- if (a->a6[i] < b->a6[i])
+ if (a->addr.a6[i] < b->addr.a6[i])
return -1;
return 1;
}
@@ -510,8 +510,13 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
p->daddr = *daddr;
atomic_set(&p->refcnt, 1);
atomic_set(&p->rid, 0);
- atomic_set(&p->ip_id_count, secure_ip_id(daddr->a4));
+ atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
p->tcp_ts_stamp = 0;
+ p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
+ p->rate_tokens = 0;
+ p->rate_last = 0;
+ p->pmtu_expires = 0;
+ memset(&p->redirect_learned, 0, sizeof(p->redirect_learned));
INIT_LIST_HEAD(&p->unused);
@@ -579,3 +584,44 @@ void inet_putpeer(struct inet_peer *p)
local_bh_enable();
}
EXPORT_SYMBOL_GPL(inet_putpeer);
+
+/*
+ * Check transmit rate limitation for given message.
+ * The rate information is held in the inet_peer entries now.
+ * This function is generic and could be used for other purposes
+ * too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
+ *
+ * Note that the same inet_peer fields are modified by functions in
+ * route.c too, but these work for packet destinations while xrlim_allow
+ * works for icmp destinations. This means the rate limiting information
+ * for one "ip object" is shared - and these ICMPs are twice limited:
+ * by source and by destination.
+ *
+ * RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
+ * SHOULD allow setting of rate limits
+ *
+ * Shared between ICMPv4 and ICMPv6.
+ */
+#define XRLIM_BURST_FACTOR 6
+bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
+{
+ unsigned long now, token;
+ bool rc = false;
+
+ if (!peer)
+ return true;
+
+ token = peer->rate_tokens;
+ now = jiffies;
+ token += now - peer->rate_last;
+ peer->rate_last = now;
+ if (token > XRLIM_BURST_FACTOR * timeout)
+ token = XRLIM_BURST_FACTOR * timeout;
+ if (token >= timeout) {
+ token -= timeout;
+ rc = true;
+ }
+ peer->rate_tokens = token;
+ return rc;
+}
+EXPORT_SYMBOL(inet_peer_xrlim_allow);
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index d859bcc26cb7..d7b2b0987a3b 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -340,7 +340,7 @@ static int ip_rcv_finish(struct sk_buff *skb)
}
}
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
if (unlikely(skb_dst(skb)->tclassid)) {
struct ip_rt_acct *st = this_cpu_ptr(ip_rt_acct);
u32 idx = skb_dst(skb)->tclassid;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 04c7b3ba6b39..33316b3534ca 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -355,7 +355,7 @@ int ip_queue_xmit(struct sk_buff *skb)
* itself out.
*/
security_sk_classify_flow(sk, &fl);
- if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0))
+ if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk))
goto no_route;
}
sk_setup_caps(sk, &rt->dst);
@@ -733,6 +733,7 @@ csum_page(struct page *page, int offset, int copy)
}
static inline int ip_ufo_append_data(struct sock *sk,
+ struct sk_buff_head *queue,
int getfrag(void *from, char *to, int offset, int len,
int odd, struct sk_buff *skb),
void *from, int length, int hh_len, int fragheaderlen,
@@ -745,7 +746,7 @@ static inline int ip_ufo_append_data(struct sock *sk,
* device, so create one single skb packet containing complete
* udp datagram
*/
- if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
+ if ((skb = skb_peek_tail(queue)) == NULL) {
skb = sock_alloc_send_skb(sk,
hh_len + fragheaderlen + transhdrlen + 20,
(flags & MSG_DONTWAIT), &err);
@@ -767,40 +768,28 @@ static inline int ip_ufo_append_data(struct sock *sk,
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum = 0;
- sk->sk_sndmsg_off = 0;
/* specify the length of each IP datagram fragment */
skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
- __skb_queue_tail(&sk->sk_write_queue, skb);
+ __skb_queue_tail(queue, skb);
}
return skb_append_datato_frags(sk, skb, getfrag, from,
(length - transhdrlen));
}
-/*
- * ip_append_data() and ip_append_page() can make one large IP datagram
- * from many pieces of data. Each pieces will be holded on the socket
- * until ip_push_pending_frames() is called. Each piece can be a page
- * or non-page data.
- *
- * Not only UDP, other transport protocols - e.g. raw sockets - can use
- * this interface potentially.
- *
- * LATER: length must be adjusted by pad at tail, when it is required.
- */
-int ip_append_data(struct sock *sk,
- int getfrag(void *from, char *to, int offset, int len,
- int odd, struct sk_buff *skb),
- void *from, int length, int transhdrlen,
- struct ipcm_cookie *ipc, struct rtable **rtp,
- unsigned int flags)
+static int __ip_append_data(struct sock *sk, struct sk_buff_head *queue,
+ struct inet_cork *cork,
+ int getfrag(void *from, char *to, int offset,
+ int len, int odd, struct sk_buff *skb),
+ void *from, int length, int transhdrlen,
+ unsigned int flags)
{
struct inet_sock *inet = inet_sk(sk);
struct sk_buff *skb;
- struct ip_options *opt = NULL;
+ struct ip_options *opt = cork->opt;
int hh_len;
int exthdrlen;
int mtu;
@@ -809,58 +798,19 @@ int ip_append_data(struct sock *sk,
int offset = 0;
unsigned int maxfraglen, fragheaderlen;
int csummode = CHECKSUM_NONE;
- struct rtable *rt;
+ struct rtable *rt = (struct rtable *)cork->dst;
- if (flags&MSG_PROBE)
- return 0;
-
- if (skb_queue_empty(&sk->sk_write_queue)) {
- /*
- * setup for corking.
- */
- opt = ipc->opt;
- if (opt) {
- if (inet->cork.opt == NULL) {
- inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
- if (unlikely(inet->cork.opt == NULL))
- return -ENOBUFS;
- }
- memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
- inet->cork.flags |= IPCORK_OPT;
- inet->cork.addr = ipc->addr;
- }
- rt = *rtp;
- if (unlikely(!rt))
- return -EFAULT;
- /*
- * We steal reference to this route, caller should not release it
- */
- *rtp = NULL;
- inet->cork.fragsize = mtu = inet->pmtudisc == IP_PMTUDISC_PROBE ?
- rt->dst.dev->mtu :
- dst_mtu(rt->dst.path);
- inet->cork.dst = &rt->dst;
- inet->cork.length = 0;
- sk->sk_sndmsg_page = NULL;
- sk->sk_sndmsg_off = 0;
- exthdrlen = rt->dst.header_len;
- length += exthdrlen;
- transhdrlen += exthdrlen;
- } else {
- rt = (struct rtable *)inet->cork.dst;
- if (inet->cork.flags & IPCORK_OPT)
- opt = inet->cork.opt;
+ exthdrlen = transhdrlen ? rt->dst.header_len : 0;
+ length += exthdrlen;
+ transhdrlen += exthdrlen;
+ mtu = cork->fragsize;
- transhdrlen = 0;
- exthdrlen = 0;
- mtu = inet->cork.fragsize;
- }
hh_len = LL_RESERVED_SPACE(rt->dst.dev);
fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
- if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
+ if (cork->length + length > 0xFFFF - fragheaderlen) {
ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport,
mtu-exthdrlen);
return -EMSGSIZE;
@@ -876,15 +826,15 @@ int ip_append_data(struct sock *sk,
!exthdrlen)
csummode = CHECKSUM_PARTIAL;
- skb = skb_peek_tail(&sk->sk_write_queue);
+ skb = skb_peek_tail(queue);
- inet->cork.length += length;
+ cork->length += length;
if (((length > mtu) || (skb && skb_is_gso(skb))) &&
(sk->sk_protocol == IPPROTO_UDP) &&
(rt->dst.dev->features & NETIF_F_UFO)) {
- err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
- fragheaderlen, transhdrlen, mtu,
- flags);
+ err = ip_ufo_append_data(sk, queue, getfrag, from, length,
+ hh_len, fragheaderlen, transhdrlen,
+ mtu, flags);
if (err)
goto error;
return 0;
@@ -961,7 +911,7 @@ alloc_new_skb:
else
/* only the initial fragment is
time stamped */
- ipc->tx_flags = 0;
+ cork->tx_flags = 0;
}
if (skb == NULL)
goto error;
@@ -972,7 +922,7 @@ alloc_new_skb:
skb->ip_summed = csummode;
skb->csum = 0;
skb_reserve(skb, hh_len);
- skb_shinfo(skb)->tx_flags = ipc->tx_flags;
+ skb_shinfo(skb)->tx_flags = cork->tx_flags;
/*
* Find where to start putting bytes.
@@ -1009,7 +959,7 @@ alloc_new_skb:
/*
* Put the packet on the pending queue.
*/
- __skb_queue_tail(&sk->sk_write_queue, skb);
+ __skb_queue_tail(queue, skb);
continue;
}
@@ -1029,8 +979,8 @@ alloc_new_skb:
} else {
int i = skb_shinfo(skb)->nr_frags;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
- struct page *page = sk->sk_sndmsg_page;
- int off = sk->sk_sndmsg_off;
+ struct page *page = cork->page;
+ int off = cork->off;
unsigned int left;
if (page && (left = PAGE_SIZE - off) > 0) {
@@ -1042,7 +992,7 @@ alloc_new_skb:
goto error;
}
get_page(page);
- skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
+ skb_fill_page_desc(skb, i, page, off, 0);
frag = &skb_shinfo(skb)->frags[i];
}
} else if (i < MAX_SKB_FRAGS) {
@@ -1053,8 +1003,8 @@ alloc_new_skb:
err = -ENOMEM;
goto error;
}
- sk->sk_sndmsg_page = page;
- sk->sk_sndmsg_off = 0;
+ cork->page = page;
+ cork->off = 0;
skb_fill_page_desc(skb, i, page, 0, 0);
frag = &skb_shinfo(skb)->frags[i];
@@ -1066,7 +1016,7 @@ alloc_new_skb:
err = -EFAULT;
goto error;
}
- sk->sk_sndmsg_off += copy;
+ cork->off += copy;
frag->size += copy;
skb->len += copy;
skb->data_len += copy;
@@ -1080,11 +1030,87 @@ alloc_new_skb:
return 0;
error:
- inet->cork.length -= length;
+ cork->length -= length;
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
return err;
}
+static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
+ struct ipcm_cookie *ipc, struct rtable **rtp)
+{
+ struct inet_sock *inet = inet_sk(sk);
+ struct ip_options *opt;
+ struct rtable *rt;
+
+ /*
+ * setup for corking.
+ */
+ opt = ipc->opt;
+ if (opt) {
+ if (cork->opt == NULL) {
+ cork->opt = kmalloc(sizeof(struct ip_options) + 40,
+ sk->sk_allocation);
+ if (unlikely(cork->opt == NULL))
+ return -ENOBUFS;
+ }
+ memcpy(cork->opt, opt, sizeof(struct ip_options) + opt->optlen);
+ cork->flags |= IPCORK_OPT;
+ cork->addr = ipc->addr;
+ }
+ rt = *rtp;
+ if (unlikely(!rt))
+ return -EFAULT;
+ /*
+ * We steal reference to this route, caller should not release it
+ */
+ *rtp = NULL;
+ cork->fragsize = inet->pmtudisc == IP_PMTUDISC_PROBE ?
+ rt->dst.dev->mtu : dst_mtu(rt->dst.path);
+ cork->dst = &rt->dst;
+ cork->length = 0;
+ cork->tx_flags = ipc->tx_flags;
+ cork->page = NULL;
+ cork->off = 0;
+
+ return 0;
+}
+
+/*
+ * ip_append_data() and ip_append_page() can make one large IP datagram
+ * from many pieces of data. Each pieces will be holded on the socket
+ * until ip_push_pending_frames() is called. Each piece can be a page
+ * or non-page data.
+ *
+ * Not only UDP, other transport protocols - e.g. raw sockets - can use
+ * this interface potentially.
+ *
+ * LATER: length must be adjusted by pad at tail, when it is required.
+ */
+int ip_append_data(struct sock *sk,
+ int getfrag(void *from, char *to, int offset, int len,
+ int odd, struct sk_buff *skb),
+ void *from, int length, int transhdrlen,
+ struct ipcm_cookie *ipc, struct rtable **rtp,
+ unsigned int flags)
+{
+ struct inet_sock *inet = inet_sk(sk);
+ int err;
+
+ if (flags&MSG_PROBE)
+ return 0;
+
+ if (skb_queue_empty(&sk->sk_write_queue)) {
+ err = ip_setup_cork(sk, &inet->cork, ipc, rtp);
+ if (err)
+ return err;
+ } else {
+ transhdrlen = 0;
+ }
+
+ return __ip_append_data(sk, &sk->sk_write_queue, &inet->cork, getfrag,
+ from, length, transhdrlen, flags);
+}
+
ssize_t ip_append_page(struct sock *sk, struct page *page,
int offset, size_t size, int flags)
{
@@ -1228,40 +1254,41 @@ error:
return err;
}
-static void ip_cork_release(struct inet_sock *inet)
+static void ip_cork_release(struct inet_cork *cork)
{
- inet->cork.flags &= ~IPCORK_OPT;
- kfree(inet->cork.opt);
- inet->cork.opt = NULL;
- dst_release(inet->cork.dst);
- inet->cork.dst = NULL;
+ cork->flags &= ~IPCORK_OPT;
+ kfree(cork->opt);
+ cork->opt = NULL;
+ dst_release(cork->dst);
+ cork->dst = NULL;
}
/*
* Combined all pending IP fragments on the socket as one IP datagram
* and push them out.
*/
-int ip_push_pending_frames(struct sock *sk)
+struct sk_buff *__ip_make_skb(struct sock *sk,
+ struct sk_buff_head *queue,
+ struct inet_cork *cork)
{
struct sk_buff *skb, *tmp_skb;
struct sk_buff **tail_skb;
struct inet_sock *inet = inet_sk(sk);
struct net *net = sock_net(sk);
struct ip_options *opt = NULL;
- struct rtable *rt = (struct rtable *)inet->cork.dst;
+ struct rtable *rt = (struct rtable *)cork->dst;
struct iphdr *iph;
__be16 df = 0;
__u8 ttl;
- int err = 0;
- if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
+ if ((skb = __skb_dequeue(queue)) == NULL)
goto out;
tail_skb = &(skb_shinfo(skb)->frag_list);
/* move skb->data to ip header from ext header */
if (skb->data < skb_network_header(skb))
__skb_pull(skb, skb_network_offset(skb));
- while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
+ while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
__skb_pull(tmp_skb, skb_network_header_len(skb));
*tail_skb = tmp_skb;
tail_skb = &(tmp_skb->next);
@@ -1287,8 +1314,8 @@ int ip_push_pending_frames(struct sock *sk)
ip_dont_fragment(sk, &rt->dst)))
df = htons(IP_DF);
- if (inet->cork.flags & IPCORK_OPT)
- opt = inet->cork.opt;
+ if (cork->flags & IPCORK_OPT)
+ opt = cork->opt;
if (rt->rt_type == RTN_MULTICAST)
ttl = inet->mc_ttl;
@@ -1300,7 +1327,7 @@ int ip_push_pending_frames(struct sock *sk)
iph->ihl = 5;
if (opt) {
iph->ihl += opt->optlen>>2;
- ip_options_build(skb, opt, inet->cork.addr, rt, 0);
+ ip_options_build(skb, opt, cork->addr, rt, 0);
}
iph->tos = inet->tos;
iph->frag_off = df;
@@ -1316,44 +1343,95 @@ int ip_push_pending_frames(struct sock *sk)
* Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
* on dst refcount
*/
- inet->cork.dst = NULL;
+ cork->dst = NULL;
skb_dst_set(skb, &rt->dst);
if (iph->protocol == IPPROTO_ICMP)
icmp_out_count(net, ((struct icmphdr *)
skb_transport_header(skb))->type);
- /* Netfilter gets whole the not fragmented skb. */
+ ip_cork_release(cork);
+out:
+ return skb;
+}
+
+int ip_send_skb(struct sk_buff *skb)
+{
+ struct net *net = sock_net(skb->sk);
+ int err;
+
err = ip_local_out(skb);
if (err) {
if (err > 0)
err = net_xmit_errno(err);
if (err)
- goto error;
+ IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
}
-out:
- ip_cork_release(inet);
return err;
+}
-error:
- IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
- goto out;
+int ip_push_pending_frames(struct sock *sk)
+{
+ struct sk_buff *skb;
+
+ skb = ip_finish_skb(sk);
+ if (!skb)
+ return 0;
+
+ /* Netfilter gets whole the not fragmented skb. */
+ return ip_send_skb(skb);
}
/*
* Throw away all pending data on the socket.
*/
-void ip_flush_pending_frames(struct sock *sk)
+static void __ip_flush_pending_frames(struct sock *sk,
+ struct sk_buff_head *queue,
+ struct inet_cork *cork)
{
struct sk_buff *skb;
- while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
+ while ((skb = __skb_dequeue_tail(queue)) != NULL)
kfree_skb(skb);
- ip_cork_release(inet_sk(sk));
+ ip_cork_release(cork);
+}
+
+void ip_flush_pending_frames(struct sock *sk)
+{
+ __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork);
}
+struct sk_buff *ip_make_skb(struct sock *sk,
+ int getfrag(void *from, char *to, int offset,
+ int len, int odd, struct sk_buff *skb),
+ void *from, int length, int transhdrlen,
+ struct ipcm_cookie *ipc, struct rtable **rtp,
+ unsigned int flags)
+{
+ struct inet_cork cork = {};
+ struct sk_buff_head queue;
+ int err;
+
+ if (flags & MSG_PROBE)
+ return NULL;
+
+ __skb_queue_head_init(&queue);
+
+ err = ip_setup_cork(sk, &cork, ipc, rtp);
+ if (err)
+ return ERR_PTR(err);
+
+ err = __ip_append_data(sk, &queue, &cork, getfrag,
+ from, length, transhdrlen, flags);
+ if (err) {
+ __ip_flush_pending_frames(sk, &queue, &cork);
+ return ERR_PTR(err);
+ }
+
+ return __ip_make_skb(sk, &queue, &cork);
+}
/*
* Fetch data from kernel space and fill in checksum if needed.
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index babd1a2bae5f..f926a310075d 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -206,8 +206,9 @@ config IP_NF_TARGET_REDIRECT
config NF_NAT_SNMP_BASIC
tristate "Basic SNMP-ALG support"
- depends on NF_NAT
+ depends on NF_CONNTRACK_SNMP && NF_NAT
depends on NETFILTER_ADVANCED
+ default NF_NAT && NF_CONNTRACK_SNMP
---help---
This module implements an Application Layer Gateway (ALG) for
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index e855fffaed95..e95054c690c6 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -866,6 +866,7 @@ static int compat_table_info(const struct xt_table_info *info,
memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
newinfo->initial_entries = 0;
loc_cpu_entry = info->entries[raw_smp_processor_id()];
+ xt_compat_init_offsets(NFPROTO_ARP, info->number);
xt_entry_foreach(iter, loc_cpu_entry, info->size) {
ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
if (ret != 0)
@@ -1333,6 +1334,7 @@ static int translate_compat_table(const char *name,
duprintf("translate_compat_table: size %u\n", info->size);
j = 0;
xt_compat_lock(NFPROTO_ARP);
+ xt_compat_init_offsets(NFPROTO_ARP, number);
/* Walk through entries, checking offsets. */
xt_entry_foreach(iter0, entry0, total_size) {
ret = check_compat_entry_size_and_hooks(iter0, info, &size,
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 652efea013dc..ef7d7b9680ea 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1063,6 +1063,7 @@ static int compat_table_info(const struct xt_table_info *info,
memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
newinfo->initial_entries = 0;
loc_cpu_entry = info->entries[raw_smp_processor_id()];
+ xt_compat_init_offsets(AF_INET, info->number);
xt_entry_foreach(iter, loc_cpu_entry, info->size) {
ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
if (ret != 0)
@@ -1664,6 +1665,7 @@ translate_compat_table(struct net *net,
duprintf("translate_compat_table: size %u\n", info->size);
j = 0;
xt_compat_lock(AF_INET);
+ xt_compat_init_offsets(AF_INET, number);
/* Walk through entries, checking offsets. */
xt_entry_foreach(iter0, entry0, total_size) {
ret = check_compat_entry_size_and_hooks(iter0, info, &size,
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 1e26a4897655..403ca57f6011 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -300,13 +300,8 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
* that the ->target() function isn't called after ->destroy() */
ct = nf_ct_get(skb, &ctinfo);
- if (ct == NULL) {
- pr_info("no conntrack!\n");
- /* FIXME: need to drop invalid ones, since replies
- * to outgoing connections of other nodes will be
- * marked as INVALID */
+ if (ct == NULL)
return NF_DROP;
- }
/* special case: ICMP error handling. conntrack distinguishes between
* error messages (RELATED) and information requests (see below) */
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c
index 72ffc8fda2e9..d76d6c9ed946 100644
--- a/net/ipv4/netfilter/ipt_LOG.c
+++ b/net/ipv4/netfilter/ipt_LOG.c
@@ -442,8 +442,7 @@ ipt_log_packet(u_int8_t pf,
}
#endif
- /* MAC logging for input path only. */
- if (in && !out)
+ if (in != NULL)
dump_mac_header(m, loginfo, skb);
dump_packet(m, loginfo, skb, 0);
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index 294a2a32f293..aef5d1fbe77d 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -60,7 +60,7 @@ ipt_mangle_out(struct sk_buff *skb, const struct net_device *out)
ret = ipt_do_table(skb, NF_INET_LOCAL_OUT, NULL, out,
dev_net(out)->ipv4.iptable_mangle);
/* Reroute for ANY change. */
- if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE) {
+ if (ret != NF_DROP && ret != NF_STOLEN) {
iph = ip_hdr(skb);
if (iph->saddr != saddr ||
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 63f60fc5d26a..5585980fce2e 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -20,6 +20,7 @@
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_acct.h>
+#include <linux/rculist_nulls.h>
struct ct_iter_state {
struct seq_net_private p;
@@ -35,7 +36,8 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
for (st->bucket = 0;
st->bucket < net->ct.htable_size;
st->bucket++) {
- n = rcu_dereference(net->ct.hash[st->bucket].first);
+ n = rcu_dereference(
+ hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
if (!is_a_nulls(n))
return n;
}
@@ -48,13 +50,14 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
struct net *net = seq_file_net(seq);
struct ct_iter_state *st = seq->private;
- head = rcu_dereference(head->next);
+ head = rcu_dereference(hlist_nulls_next_rcu(head));
while (is_a_nulls(head)) {
if (likely(get_nulls_value(head) == st->bucket)) {
if (++st->bucket >= net->ct.htable_size)
return NULL;
}
- head = rcu_dereference(net->ct.hash[st->bucket].first);
+ head = rcu_dereference(
+ hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
}
return head;
}
@@ -217,7 +220,8 @@ static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
struct hlist_node *n;
for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
- n = rcu_dereference(net->ct.expect_hash[st->bucket].first);
+ n = rcu_dereference(
+ hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
if (n)
return n;
}
@@ -230,11 +234,12 @@ static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
struct net *net = seq_file_net(seq);
struct ct_expect_iter_state *st = seq->private;
- head = rcu_dereference(head->next);
+ head = rcu_dereference(hlist_next_rcu(head));
while (head == NULL) {
if (++st->bucket >= nf_ct_expect_hsize)
return NULL;
- head = rcu_dereference(net->ct.expect_hash[st->bucket].first);
+ head = rcu_dereference(
+ hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
}
return head;
}
diff --git a/net/ipv4/netfilter/nf_nat_amanda.c b/net/ipv4/netfilter/nf_nat_amanda.c
index 0f23b3f06df0..703f366fd235 100644
--- a/net/ipv4/netfilter/nf_nat_amanda.c
+++ b/net/ipv4/netfilter/nf_nat_amanda.c
@@ -44,13 +44,13 @@ static unsigned int help(struct sk_buff *skb,
/* Try to get same port: if not, try to change it. */
for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) {
- int ret;
+ int res;
exp->tuple.dst.u.tcp.port = htons(port);
- ret = nf_ct_expect_related(exp);
- if (ret == 0)
+ res = nf_ct_expect_related(exp);
+ if (res == 0)
break;
- else if (ret != -EBUSY) {
+ else if (res != -EBUSY) {
port = 0;
break;
}
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index c04787ce1a71..21bcf471b25a 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -221,7 +221,14 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
manips not an issue. */
if (maniptype == IP_NAT_MANIP_SRC &&
!(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) {
- if (find_appropriate_src(net, zone, orig_tuple, tuple, range)) {
+ /* try the original tuple first */
+ if (in_range(orig_tuple, range)) {
+ if (!nf_nat_used_tuple(orig_tuple, ct)) {
+ *tuple = *orig_tuple;
+ return;
+ }
+ } else if (find_appropriate_src(net, zone, orig_tuple, tuple,
+ range)) {
pr_debug("get_unique_tuple: Found current src map\n");
if (!nf_nat_used_tuple(tuple, ct))
return;
@@ -266,7 +273,6 @@ nf_nat_setup_info(struct nf_conn *ct,
struct net *net = nf_ct_net(ct);
struct nf_conntrack_tuple curr_tuple, new_tuple;
struct nf_conn_nat *nat;
- int have_to_hash = !(ct->status & IPS_NAT_DONE_MASK);
/* nat helper or nfctnetlink also setup binding */
nat = nfct_nat(ct);
@@ -306,8 +312,7 @@ nf_nat_setup_info(struct nf_conn *ct,
ct->status |= IPS_DST_NAT;
}
- /* Place in source hash if this is the first time. */
- if (have_to_hash) {
+ if (maniptype == IP_NAT_MANIP_SRC) {
unsigned int srchash;
srchash = hash_by_src(net, nf_ct_zone(ct),
@@ -323,9 +328,9 @@ nf_nat_setup_info(struct nf_conn *ct,
/* It's done. */
if (maniptype == IP_NAT_MANIP_DST)
- set_bit(IPS_DST_NAT_DONE_BIT, &ct->status);
+ ct->status |= IPS_DST_NAT_DONE;
else
- set_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
+ ct->status |= IPS_SRC_NAT_DONE;
return NF_ACCEPT;
}
@@ -502,7 +507,10 @@ int nf_nat_protocol_register(const struct nf_nat_protocol *proto)
int ret = 0;
spin_lock_bh(&nf_nat_lock);
- if (nf_nat_protos[proto->protonum] != &nf_nat_unknown_protocol) {
+ if (rcu_dereference_protected(
+ nf_nat_protos[proto->protonum],
+ lockdep_is_held(&nf_nat_lock)
+ ) != &nf_nat_unknown_protocol) {
ret = -EBUSY;
goto out;
}
@@ -532,7 +540,7 @@ static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
if (nat == NULL || nat->ct == NULL)
return;
- NF_CT_ASSERT(nat->ct->status & IPS_NAT_DONE_MASK);
+ NF_CT_ASSERT(nat->ct->status & IPS_SRC_NAT_DONE);
spin_lock_bh(&nf_nat_lock);
hlist_del_rcu(&nat->bysource);
@@ -545,11 +553,10 @@ static void nf_nat_move_storage(void *new, void *old)
struct nf_conn_nat *old_nat = old;
struct nf_conn *ct = old_nat->ct;
- if (!ct || !(ct->status & IPS_NAT_DONE_MASK))
+ if (!ct || !(ct->status & IPS_SRC_NAT_DONE))
return;
spin_lock_bh(&nf_nat_lock);
- new_nat->ct = ct;
hlist_replace_rcu(&old_nat->bysource, &new_nat->bysource);
spin_unlock_bh(&nf_nat_lock);
}
@@ -679,8 +686,7 @@ static int __net_init nf_nat_net_init(struct net *net)
{
/* Leave them the same for the moment. */
net->ipv4.nat_htable_size = net->ct.htable_size;
- net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size,
- &net->ipv4.nat_vmalloced, 0);
+ net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size, 0);
if (!net->ipv4.nat_bysource)
return -ENOMEM;
return 0;
@@ -702,8 +708,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
{
nf_ct_iterate_cleanup(net, &clean_nat, NULL);
synchronize_rcu();
- nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced,
- net->ipv4.nat_htable_size);
+ nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_htable_size);
}
static struct pernet_operations nf_nat_net_ops = {
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index ee5f419d0a56..8812a02078ab 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -54,6 +54,7 @@
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_nat_helper.h>
+#include <linux/netfilter/nf_conntrack_snmp.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
@@ -1310,9 +1311,9 @@ static int __init nf_nat_snmp_basic_init(void)
{
int ret = 0;
- ret = nf_conntrack_helper_register(&snmp_helper);
- if (ret < 0)
- return ret;
+ BUG_ON(nf_nat_snmp_hook != NULL);
+ rcu_assign_pointer(nf_nat_snmp_hook, help);
+
ret = nf_conntrack_helper_register(&snmp_trap_helper);
if (ret < 0) {
nf_conntrack_helper_unregister(&snmp_helper);
@@ -1323,7 +1324,7 @@ static int __init nf_nat_snmp_basic_init(void)
static void __exit nf_nat_snmp_basic_fini(void)
{
- nf_conntrack_helper_unregister(&snmp_helper);
+ rcu_assign_pointer(nf_nat_snmp_hook, NULL);
nf_conntrack_helper_unregister(&snmp_trap_helper);
}
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 6390ba299b3d..d7a2d1eaec09 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -555,7 +555,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
.fl4_tos = tos,
.proto = inet->hdrincl ? IPPROTO_RAW :
sk->sk_protocol,
- };
+ .flags = FLOWI_FLAG_CAN_SLEEP,
+ };
if (!inet->hdrincl) {
err = raw_probe_proto_opt(&fl, msg);
if (err)
@@ -563,7 +564,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
}
security_sk_classify_flow(sk, &fl);
- err = ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 1);
+ err = ip_route_output_flow(sock_net(sk), &rt, &fl, sk);
}
if (err)
goto done;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 6ed6603c2f6d..e24e4cf2a112 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -131,9 +131,6 @@ static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
static int ip_rt_min_advmss __read_mostly = 256;
static int rt_chain_length_max __read_mostly = 20;
-static struct delayed_work expires_work;
-static unsigned long expires_ljiffies;
-
/*
* Interface to generic destination cache.
*/
@@ -152,6 +149,41 @@ static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
{
}
+static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
+{
+ struct rtable *rt = (struct rtable *) dst;
+ struct inet_peer *peer;
+ u32 *p = NULL;
+
+ if (!rt->peer)
+ rt_bind_peer(rt, 1);
+
+ peer = rt->peer;
+ if (peer) {
+ u32 *old_p = __DST_METRICS_PTR(old);
+ unsigned long prev, new;
+
+ p = peer->metrics;
+ if (inet_metrics_new(peer))
+ memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
+
+ new = (unsigned long) p;
+ prev = cmpxchg(&dst->_metrics, old, new);
+
+ if (prev != old) {
+ p = __DST_METRICS_PTR(prev);
+ if (prev & DST_METRICS_READ_ONLY)
+ p = NULL;
+ } else {
+ if (rt->fi) {
+ fib_info_put(rt->fi);
+ rt->fi = NULL;
+ }
+ }
+ }
+ return p;
+}
+
static struct dst_ops ipv4_dst_ops = {
.family = AF_INET,
.protocol = cpu_to_be16(ETH_P_IP),
@@ -159,6 +191,7 @@ static struct dst_ops ipv4_dst_ops = {
.check = ipv4_dst_check,
.default_advmss = ipv4_default_advmss,
.default_mtu = ipv4_default_mtu,
+ .cow_metrics = ipv4_cow_metrics,
.destroy = ipv4_dst_destroy,
.ifdown = ipv4_dst_ifdown,
.negative_advice = ipv4_negative_advice,
@@ -514,7 +547,7 @@ static const struct file_operations rt_cpu_seq_fops = {
.release = seq_release,
};
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
static int rt_acct_proc_show(struct seq_file *m, void *v)
{
struct ip_rt_acct *dst, *src;
@@ -567,14 +600,14 @@ static int __net_init ip_rt_do_proc_init(struct net *net)
if (!pde)
goto err2;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
if (!pde)
goto err3;
#endif
return 0;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
err3:
remove_proc_entry("rt_cache", net->proc_net_stat);
#endif
@@ -588,7 +621,7 @@ static void __net_exit ip_rt_do_proc_exit(struct net *net)
{
remove_proc_entry("rt_cache", net->proc_net_stat);
remove_proc_entry("rt_cache", net->proc_net);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
remove_proc_entry("rt_acct", net->proc_net);
#endif
}
@@ -632,7 +665,7 @@ static inline int rt_fast_clean(struct rtable *rth)
static inline int rt_valuable(struct rtable *rth)
{
return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
- rth->dst.expires;
+ (rth->peer && rth->peer->pmtu_expires);
}
static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
@@ -643,13 +676,7 @@ static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long t
if (atomic_read(&rth->dst.__refcnt))
goto out;
- ret = 1;
- if (rth->dst.expires &&
- time_after_eq(jiffies, rth->dst.expires))
- goto out;
-
age = jiffies - rth->dst.lastuse;
- ret = 0;
if ((age <= tmo1 && !rt_fast_clean(rth)) ||
(age <= tmo2 && rt_valuable(rth)))
goto out;
@@ -793,97 +820,6 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
return ONE;
}
-static void rt_check_expire(void)
-{
- static unsigned int rover;
- unsigned int i = rover, goal;
- struct rtable *rth;
- struct rtable __rcu **rthp;
- unsigned long samples = 0;
- unsigned long sum = 0, sum2 = 0;
- unsigned long delta;
- u64 mult;
-
- delta = jiffies - expires_ljiffies;
- expires_ljiffies = jiffies;
- mult = ((u64)delta) << rt_hash_log;
- if (ip_rt_gc_timeout > 1)
- do_div(mult, ip_rt_gc_timeout);
- goal = (unsigned int)mult;
- if (goal > rt_hash_mask)
- goal = rt_hash_mask + 1;
- for (; goal > 0; goal--) {
- unsigned long tmo = ip_rt_gc_timeout;
- unsigned long length;
-
- i = (i + 1) & rt_hash_mask;
- rthp = &rt_hash_table[i].chain;
-
- if (need_resched())
- cond_resched();
-
- samples++;
-
- if (rcu_dereference_raw(*rthp) == NULL)
- continue;
- length = 0;
- spin_lock_bh(rt_hash_lock_addr(i));
- while ((rth = rcu_dereference_protected(*rthp,
- lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
- prefetch(rth->dst.rt_next);
- if (rt_is_expired(rth)) {
- *rthp = rth->dst.rt_next;
- rt_free(rth);
- continue;
- }
- if (rth->dst.expires) {
- /* Entry is expired even if it is in use */
- if (time_before_eq(jiffies, rth->dst.expires)) {
-nofree:
- tmo >>= 1;
- rthp = &rth->dst.rt_next;
- /*
- * We only count entries on
- * a chain with equal hash inputs once
- * so that entries for different QOS
- * levels, and other non-hash input
- * attributes don't unfairly skew
- * the length computation
- */
- length += has_noalias(rt_hash_table[i].chain, rth);
- continue;
- }
- } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
- goto nofree;
-
- /* Cleanup aged off entries. */
- *rthp = rth->dst.rt_next;
- rt_free(rth);
- }
- spin_unlock_bh(rt_hash_lock_addr(i));
- sum += length;
- sum2 += length*length;
- }
- if (samples) {
- unsigned long avg = sum / samples;
- unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
- rt_chain_length_max = max_t(unsigned long,
- ip_rt_gc_elasticity,
- (avg + 4*sd) >> FRACT_BITS);
- }
- rover = i;
-}
-
-/*
- * rt_worker_func() is run in process context.
- * we call rt_check_expire() to scan part of the hash table
- */
-static void rt_worker_func(struct work_struct *work)
-{
- rt_check_expire();
- schedule_delayed_work(&expires_work, ip_rt_gc_interval);
-}
-
/*
* Pertubation of rt_genid by a small quantity [1..256]
* Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
@@ -1272,6 +1208,13 @@ skip_hashing:
return 0;
}
+static atomic_t __rt_peer_genid = ATOMIC_INIT(0);
+
+static u32 rt_peer_genid(void)
+{
+ return atomic_read(&__rt_peer_genid);
+}
+
void rt_bind_peer(struct rtable *rt, int create)
{
struct inet_peer *peer;
@@ -1280,6 +1223,8 @@ void rt_bind_peer(struct rtable *rt, int create)
if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
inet_putpeer(peer);
+ else
+ rt->rt_peer_genid = rt_peer_genid();
}
/*
@@ -1349,13 +1294,8 @@ static void rt_del(unsigned hash, struct rtable *rt)
void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
__be32 saddr, struct net_device *dev)
{
- int i, k;
struct in_device *in_dev = __in_dev_get_rcu(dev);
- struct rtable *rth;
- struct rtable __rcu **rthp;
- __be32 skeys[2] = { saddr, 0 };
- int ikeys[2] = { dev->ifindex, 0 };
- struct netevent_redirect netevent;
+ struct inet_peer *peer;
struct net *net;
if (!in_dev)
@@ -1367,9 +1307,6 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
ipv4_is_zeronet(new_gw))
goto reject_redirect;
- if (!rt_caching(net))
- goto reject_redirect;
-
if (!IN_DEV_SHARED_MEDIA(in_dev)) {
if (!inet_addr_onlink(in_dev, new_gw, old_gw))
goto reject_redirect;
@@ -1380,91 +1317,13 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
goto reject_redirect;
}
- for (i = 0; i < 2; i++) {
- for (k = 0; k < 2; k++) {
- unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
- rt_genid(net));
-
- rthp = &rt_hash_table[hash].chain;
-
- while ((rth = rcu_dereference(*rthp)) != NULL) {
- struct rtable *rt;
-
- if (rth->fl.fl4_dst != daddr ||
- rth->fl.fl4_src != skeys[i] ||
- rth->fl.oif != ikeys[k] ||
- rt_is_input_route(rth) ||
- rt_is_expired(rth) ||
- !net_eq(dev_net(rth->dst.dev), net)) {
- rthp = &rth->dst.rt_next;
- continue;
- }
-
- if (rth->rt_dst != daddr ||
- rth->rt_src != saddr ||
- rth->dst.error ||
- rth->rt_gateway != old_gw ||
- rth->dst.dev != dev)
- break;
-
- dst_hold(&rth->dst);
-
- rt = dst_alloc(&ipv4_dst_ops);
- if (rt == NULL) {
- ip_rt_put(rth);
- return;
- }
-
- /* Copy all the information. */
- *rt = *rth;
- rt->dst.__use = 1;
- atomic_set(&rt->dst.__refcnt, 1);
- rt->dst.child = NULL;
- if (rt->dst.dev)
- dev_hold(rt->dst.dev);
- rt->dst.obsolete = -1;
- rt->dst.lastuse = jiffies;
- rt->dst.path = &rt->dst;
- rt->dst.neighbour = NULL;
- rt->dst.hh = NULL;
-#ifdef CONFIG_XFRM
- rt->dst.xfrm = NULL;
-#endif
- rt->rt_genid = rt_genid(net);
- rt->rt_flags |= RTCF_REDIRECTED;
-
- /* Gateway is different ... */
- rt->rt_gateway = new_gw;
-
- /* Redirect received -> path was valid */
- dst_confirm(&rth->dst);
-
- if (rt->peer)
- atomic_inc(&rt->peer->refcnt);
-
- if (arp_bind_neighbour(&rt->dst) ||
- !(rt->dst.neighbour->nud_state &
- NUD_VALID)) {
- if (rt->dst.neighbour)
- neigh_event_send(rt->dst.neighbour, NULL);
- ip_rt_put(rth);
- rt_drop(rt);
- goto do_next;
- }
+ peer = inet_getpeer_v4(daddr, 1);
+ if (peer) {
+ peer->redirect_learned.a4 = new_gw;
- netevent.old = &rth->dst;
- netevent.new = &rt->dst;
- call_netevent_notifiers(NETEVENT_REDIRECT,
- &netevent);
+ inet_putpeer(peer);
- rt_del(hash, rth);
- if (!rt_intern_hash(hash, rt, &rt, NULL, rt->fl.oif))
- ip_rt_put(rt);
- goto do_next;
- }
- do_next:
- ;
- }
+ atomic_inc(&__rt_peer_genid);
}
return;
@@ -1488,9 +1347,7 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
if (dst->obsolete > 0) {
ip_rt_put(rt);
ret = NULL;
- } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
- (rt->dst.expires &&
- time_after_eq(jiffies, rt->dst.expires))) {
+ } else if (rt->rt_flags & RTCF_REDIRECTED) {
unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
rt->fl.oif,
rt_genid(dev_net(dst->dev)));
@@ -1500,6 +1357,14 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
#endif
rt_del(hash, rt);
ret = NULL;
+ } else if (rt->peer &&
+ rt->peer->pmtu_expires &&
+ time_after_eq(jiffies, rt->peer->pmtu_expires)) {
+ unsigned long orig = rt->peer->pmtu_expires;
+
+ if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
+ dst_metric_set(dst, RTAX_MTU,
+ rt->peer->pmtu_orig);
}
}
return ret;
@@ -1525,6 +1390,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
{
struct rtable *rt = skb_rtable(skb);
struct in_device *in_dev;
+ struct inet_peer *peer;
int log_martians;
rcu_read_lock();
@@ -1536,33 +1402,41 @@ void ip_rt_send_redirect(struct sk_buff *skb)
log_martians = IN_DEV_LOG_MARTIANS(in_dev);
rcu_read_unlock();
+ if (!rt->peer)
+ rt_bind_peer(rt, 1);
+ peer = rt->peer;
+ if (!peer) {
+ icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
+ return;
+ }
+
/* No redirected packets during ip_rt_redirect_silence;
* reset the algorithm.
*/
- if (time_after(jiffies, rt->dst.rate_last + ip_rt_redirect_silence))
- rt->dst.rate_tokens = 0;
+ if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
+ peer->rate_tokens = 0;
/* Too many ignored redirects; do not send anything
* set dst.rate_last to the last seen redirected packet.
*/
- if (rt->dst.rate_tokens >= ip_rt_redirect_number) {
- rt->dst.rate_last = jiffies;
+ if (peer->rate_tokens >= ip_rt_redirect_number) {
+ peer->rate_last = jiffies;
return;
}
/* Check for load limit; set rate_last to the latest sent
* redirect.
*/
- if (rt->dst.rate_tokens == 0 ||
+ if (peer->rate_tokens == 0 ||
time_after(jiffies,
- (rt->dst.rate_last +
- (ip_rt_redirect_load << rt->dst.rate_tokens)))) {
+ (peer->rate_last +
+ (ip_rt_redirect_load << peer->rate_tokens)))) {
icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
- rt->dst.rate_last = jiffies;
- ++rt->dst.rate_tokens;
+ peer->rate_last = jiffies;
+ ++peer->rate_tokens;
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (log_martians &&
- rt->dst.rate_tokens == ip_rt_redirect_number &&
+ peer->rate_tokens == ip_rt_redirect_number &&
net_ratelimit())
printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
&rt->rt_src, rt->rt_iif,
@@ -1574,7 +1448,9 @@ void ip_rt_send_redirect(struct sk_buff *skb)
static int ip_error(struct sk_buff *skb)
{
struct rtable *rt = skb_rtable(skb);
+ struct inet_peer *peer;
unsigned long now;
+ bool send;
int code;
switch (rt->dst.error) {
@@ -1594,15 +1470,24 @@ static int ip_error(struct sk_buff *skb)
break;
}
- now = jiffies;
- rt->dst.rate_tokens += now - rt->dst.rate_last;
- if (rt->dst.rate_tokens > ip_rt_error_burst)
- rt->dst.rate_tokens = ip_rt_error_burst;
- rt->dst.rate_last = now;
- if (rt->dst.rate_tokens >= ip_rt_error_cost) {
- rt->dst.rate_tokens -= ip_rt_error_cost;
- icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
+ if (!rt->peer)
+ rt_bind_peer(rt, 1);
+ peer = rt->peer;
+
+ send = true;
+ if (peer) {
+ now = jiffies;
+ peer->rate_tokens += now - peer->rate_last;
+ if (peer->rate_tokens > ip_rt_error_burst)
+ peer->rate_tokens = ip_rt_error_burst;
+ peer->rate_last = now;
+ if (peer->rate_tokens >= ip_rt_error_cost)
+ peer->rate_tokens -= ip_rt_error_cost;
+ else
+ send = false;
}
+ if (send)
+ icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
out: kfree_skb(skb);
return 0;
@@ -1630,88 +1515,130 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
unsigned short new_mtu,
struct net_device *dev)
{
- int i, k;
unsigned short old_mtu = ntohs(iph->tot_len);
- struct rtable *rth;
- int ikeys[2] = { dev->ifindex, 0 };
- __be32 skeys[2] = { iph->saddr, 0, };
- __be32 daddr = iph->daddr;
unsigned short est_mtu = 0;
+ struct inet_peer *peer;
- for (k = 0; k < 2; k++) {
- for (i = 0; i < 2; i++) {
- unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
- rt_genid(net));
-
- rcu_read_lock();
- for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
- rth = rcu_dereference(rth->dst.rt_next)) {
- unsigned short mtu = new_mtu;
-
- if (rth->fl.fl4_dst != daddr ||
- rth->fl.fl4_src != skeys[i] ||
- rth->rt_dst != daddr ||
- rth->rt_src != iph->saddr ||
- rth->fl.oif != ikeys[k] ||
- rt_is_input_route(rth) ||
- dst_metric_locked(&rth->dst, RTAX_MTU) ||
- !net_eq(dev_net(rth->dst.dev), net) ||
- rt_is_expired(rth))
- continue;
-
- if (new_mtu < 68 || new_mtu >= old_mtu) {
+ peer = inet_getpeer_v4(iph->daddr, 1);
+ if (peer) {
+ unsigned short mtu = new_mtu;
- /* BSD 4.2 compatibility hack :-( */
- if (mtu == 0 &&
- old_mtu >= dst_mtu(&rth->dst) &&
- old_mtu >= 68 + (iph->ihl << 2))
- old_mtu -= iph->ihl << 2;
+ if (new_mtu < 68 || new_mtu >= old_mtu) {
+ /* BSD 4.2 derived systems incorrectly adjust
+ * tot_len by the IP header length, and report
+ * a zero MTU in the ICMP message.
+ */
+ if (mtu == 0 &&
+ old_mtu >= 68 + (iph->ihl << 2))
+ old_mtu -= iph->ihl << 2;
+ mtu = guess_mtu(old_mtu);
+ }
- mtu = guess_mtu(old_mtu);
- }
- if (mtu <= dst_mtu(&rth->dst)) {
- if (mtu < dst_mtu(&rth->dst)) {
- dst_confirm(&rth->dst);
- if (mtu < ip_rt_min_pmtu) {
- u32 lock = dst_metric(&rth->dst,
- RTAX_LOCK);
- mtu = ip_rt_min_pmtu;
- lock |= (1 << RTAX_MTU);
- dst_metric_set(&rth->dst, RTAX_LOCK,
- lock);
- }
- dst_metric_set(&rth->dst, RTAX_MTU, mtu);
- dst_set_expires(&rth->dst,
- ip_rt_mtu_expires);
- }
- est_mtu = mtu;
- }
- }
- rcu_read_unlock();
+ if (mtu < ip_rt_min_pmtu)
+ mtu = ip_rt_min_pmtu;
+ if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
+ est_mtu = mtu;
+ peer->pmtu_learned = mtu;
+ peer->pmtu_expires = jiffies + ip_rt_mtu_expires;
}
+
+ inet_putpeer(peer);
+
+ atomic_inc(&__rt_peer_genid);
}
return est_mtu ? : new_mtu;
}
+static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer)
+{
+ unsigned long expires = peer->pmtu_expires;
+
+ if (time_before(expires, jiffies)) {
+ u32 orig_dst_mtu = dst_mtu(dst);
+ if (peer->pmtu_learned < orig_dst_mtu) {
+ if (!peer->pmtu_orig)
+ peer->pmtu_orig = dst_metric_raw(dst, RTAX_MTU);
+ dst_metric_set(dst, RTAX_MTU, peer->pmtu_learned);
+ }
+ } else if (cmpxchg(&peer->pmtu_expires, expires, 0) == expires)
+ dst_metric_set(dst, RTAX_MTU, peer->pmtu_orig);
+}
+
static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
{
- if (dst_mtu(dst) > mtu && mtu >= 68 &&
- !(dst_metric_locked(dst, RTAX_MTU))) {
- if (mtu < ip_rt_min_pmtu) {
- u32 lock = dst_metric(dst, RTAX_LOCK);
+ struct rtable *rt = (struct rtable *) dst;
+ struct inet_peer *peer;
+
+ dst_confirm(dst);
+
+ if (!rt->peer)
+ rt_bind_peer(rt, 1);
+ peer = rt->peer;
+ if (peer) {
+ if (mtu < ip_rt_min_pmtu)
mtu = ip_rt_min_pmtu;
- dst_metric_set(dst, RTAX_LOCK, lock | (1 << RTAX_MTU));
+ if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
+ peer->pmtu_learned = mtu;
+ peer->pmtu_expires = jiffies + ip_rt_mtu_expires;
+
+ atomic_inc(&__rt_peer_genid);
+ rt->rt_peer_genid = rt_peer_genid();
+
+ check_peer_pmtu(dst, peer);
}
- dst_metric_set(dst, RTAX_MTU, mtu);
- dst_set_expires(dst, ip_rt_mtu_expires);
- call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
+ inet_putpeer(peer);
+ }
+}
+
+static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
+{
+ struct rtable *rt = (struct rtable *) dst;
+ __be32 orig_gw = rt->rt_gateway;
+
+ dst_confirm(&rt->dst);
+
+ neigh_release(rt->dst.neighbour);
+ rt->dst.neighbour = NULL;
+
+ rt->rt_gateway = peer->redirect_learned.a4;
+ if (arp_bind_neighbour(&rt->dst) ||
+ !(rt->dst.neighbour->nud_state & NUD_VALID)) {
+ if (rt->dst.neighbour)
+ neigh_event_send(rt->dst.neighbour, NULL);
+ rt->rt_gateway = orig_gw;
+ return -EAGAIN;
+ } else {
+ rt->rt_flags |= RTCF_REDIRECTED;
+ call_netevent_notifiers(NETEVENT_NEIGH_UPDATE,
+ rt->dst.neighbour);
}
+ return 0;
}
static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
{
- if (rt_is_expired((struct rtable *)dst))
+ struct rtable *rt = (struct rtable *) dst;
+
+ if (rt_is_expired(rt))
return NULL;
+ if (rt->rt_peer_genid != rt_peer_genid()) {
+ struct inet_peer *peer;
+
+ if (!rt->peer)
+ rt_bind_peer(rt, 0);
+
+ peer = rt->peer;
+ if (peer && peer->pmtu_expires)
+ check_peer_pmtu(dst, peer);
+
+ if (peer && peer->redirect_learned.a4 &&
+ peer->redirect_learned.a4 != rt->rt_gateway) {
+ if (check_peer_redir(dst, peer))
+ return NULL;
+ }
+
+ rt->rt_peer_genid = rt_peer_genid();
+ }
return dst;
}
@@ -1720,6 +1647,10 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
struct rtable *rt = (struct rtable *) dst;
struct inet_peer *peer = rt->peer;
+ if (rt->fi) {
+ fib_info_put(rt->fi);
+ rt->fi = NULL;
+ }
if (peer) {
rt->peer = NULL;
inet_putpeer(peer);
@@ -1734,8 +1665,14 @@ static void ipv4_link_failure(struct sk_buff *skb)
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
rt = skb_rtable(skb);
- if (rt)
- dst_set_expires(&rt->dst, 0);
+ if (rt &&
+ rt->peer &&
+ rt->peer->pmtu_expires) {
+ unsigned long orig = rt->peer->pmtu_expires;
+
+ if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
+ dst_metric_set(&rt->dst, RTAX_MTU, rt->peer->pmtu_orig);
+ }
}
static int ip_rt_bug(struct sk_buff *skb)
@@ -1775,7 +1712,7 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt)
memcpy(addr, &src, 4);
}
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
static void set_class_tag(struct rtable *rt, u32 tag)
{
if (!(rt->dst.tclassid & 0xFFFF))
@@ -1815,17 +1752,52 @@ static unsigned int ipv4_default_mtu(const struct dst_entry *dst)
return mtu;
}
-static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
+static void rt_init_metrics(struct rtable *rt, struct fib_info *fi)
+{
+ struct inet_peer *peer;
+ int create = 0;
+
+ /* If a peer entry exists for this destination, we must hook
+ * it up in order to get at cached metrics.
+ */
+ if (rt->fl.flags & FLOWI_FLAG_PRECOW_METRICS)
+ create = 1;
+
+ rt_bind_peer(rt, create);
+ peer = rt->peer;
+ if (peer) {
+ if (inet_metrics_new(peer))
+ memcpy(peer->metrics, fi->fib_metrics,
+ sizeof(u32) * RTAX_MAX);
+ dst_init_metrics(&rt->dst, peer->metrics, false);
+
+ if (peer->pmtu_expires)
+ check_peer_pmtu(&rt->dst, peer);
+ if (peer->redirect_learned.a4 &&
+ peer->redirect_learned.a4 != rt->rt_gateway) {
+ rt->rt_gateway = peer->redirect_learned.a4;
+ rt->rt_flags |= RTCF_REDIRECTED;
+ }
+ } else {
+ if (fi->fib_metrics != (u32 *) dst_default_metrics) {
+ rt->fi = fi;
+ atomic_inc(&fi->fib_clntref);
+ }
+ dst_init_metrics(&rt->dst, fi->fib_metrics, true);
+ }
+}
+
+static void rt_set_nexthop(struct rtable *rt, const struct fib_result *res,
+ struct fib_info *fi, u16 type, u32 itag)
{
struct dst_entry *dst = &rt->dst;
- struct fib_info *fi = res->fi;
if (fi) {
if (FIB_RES_GW(*res) &&
FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
rt->rt_gateway = FIB_RES_GW(*res);
- dst_import_metrics(dst, fi->fib_metrics);
-#ifdef CONFIG_NET_CLS_ROUTE
+ rt_init_metrics(rt, fi);
+#ifdef CONFIG_IP_ROUTE_CLASSID
dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
#endif
}
@@ -1835,13 +1807,26 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
if (dst_metric_raw(dst, RTAX_ADVMSS) > 65535 - 40)
dst_metric_set(dst, RTAX_ADVMSS, 65535 - 40);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
#ifdef CONFIG_IP_MULTIPLE_TABLES
set_class_tag(rt, fib_rules_tclass(res));
#endif
set_class_tag(rt, itag);
#endif
- rt->rt_type = res->type;
+ rt->rt_type = type;
+}
+
+static struct rtable *rt_dst_alloc(bool nopolicy, bool noxfrm)
+{
+ struct rtable *rt = dst_alloc(&ipv4_dst_ops, 1);
+ if (rt) {
+ rt->dst.obsolete = -1;
+
+ rt->dst.flags = DST_HOST |
+ (nopolicy ? DST_NOPOLICY : 0) |
+ (noxfrm ? DST_NOXFRM : 0);
+ }
+ return rt;
}
/* called in rcu_read_lock() section */
@@ -1874,24 +1859,19 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
if (err < 0)
goto e_err;
}
- rth = dst_alloc(&ipv4_dst_ops);
+ rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
if (!rth)
goto e_nobufs;
rth->dst.output = ip_rt_bug;
- rth->dst.obsolete = -1;
- atomic_set(&rth->dst.__refcnt, 1);
- rth->dst.flags= DST_HOST;
- if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
- rth->dst.flags |= DST_NOPOLICY;
rth->fl.fl4_dst = daddr;
rth->rt_dst = daddr;
rth->fl.fl4_tos = tos;
rth->fl.mark = skb->mark;
rth->fl.fl4_src = saddr;
rth->rt_src = saddr;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
rth->dst.tclassid = itag;
#endif
rth->rt_iif =
@@ -1959,7 +1939,7 @@ static void ip_handle_martian_source(struct net_device *dev,
/* called in rcu_read_lock() section */
static int __mkroute_input(struct sk_buff *skb,
- struct fib_result *res,
+ const struct fib_result *res,
struct in_device *in_dev,
__be32 daddr, __be32 saddr, u32 tos,
struct rtable **result)
@@ -2013,19 +1993,13 @@ static int __mkroute_input(struct sk_buff *skb,
}
}
-
- rth = dst_alloc(&ipv4_dst_ops);
+ rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY),
+ IN_DEV_CONF_GET(out_dev, NOXFRM));
if (!rth) {
err = -ENOBUFS;
goto cleanup;
}
- atomic_set(&rth->dst.__refcnt, 1);
- rth->dst.flags= DST_HOST;
- if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
- rth->dst.flags |= DST_NOPOLICY;
- if (IN_DEV_CONF_GET(out_dev, NOXFRM))
- rth->dst.flags |= DST_NOXFRM;
rth->fl.fl4_dst = daddr;
rth->rt_dst = daddr;
rth->fl.fl4_tos = tos;
@@ -2040,12 +2014,11 @@ static int __mkroute_input(struct sk_buff *skb,
rth->fl.oif = 0;
rth->rt_spec_dst= spec_dst;
- rth->dst.obsolete = -1;
rth->dst.input = ip_forward;
rth->dst.output = ip_output;
rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
- rt_set_nexthop(rth, res, itag);
+ rt_set_nexthop(rth, res, res->fi, res->type, itag);
rth->rt_flags = flags;
@@ -2190,25 +2163,20 @@ brd_input:
RT_CACHE_STAT_INC(in_brd);
local_input:
- rth = dst_alloc(&ipv4_dst_ops);
+ rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
if (!rth)
goto e_nobufs;
rth->dst.output= ip_rt_bug;
- rth->dst.obsolete = -1;
rth->rt_genid = rt_genid(net);
- atomic_set(&rth->dst.__refcnt, 1);
- rth->dst.flags= DST_HOST;
- if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
- rth->dst.flags |= DST_NOPOLICY;
rth->fl.fl4_dst = daddr;
rth->rt_dst = daddr;
rth->fl.fl4_tos = tos;
rth->fl.mark = skb->mark;
rth->fl.fl4_src = saddr;
rth->rt_src = saddr;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
rth->dst.tclassid = itag;
#endif
rth->rt_iif =
@@ -2351,38 +2319,39 @@ skip_cache:
EXPORT_SYMBOL(ip_route_input_common);
/* called with rcu_read_lock() */
-static int __mkroute_output(struct rtable **result,
- struct fib_result *res,
- const struct flowi *fl,
- const struct flowi *oldflp,
- struct net_device *dev_out,
- unsigned flags)
+static struct rtable *__mkroute_output(const struct fib_result *res,
+ const struct flowi *fl,
+ const struct flowi *oldflp,
+ struct net_device *dev_out,
+ unsigned int flags)
{
- struct rtable *rth;
- struct in_device *in_dev;
+ struct fib_info *fi = res->fi;
u32 tos = RT_FL_TOS(oldflp);
+ struct in_device *in_dev;
+ u16 type = res->type;
+ struct rtable *rth;
if (ipv4_is_loopback(fl->fl4_src) && !(dev_out->flags & IFF_LOOPBACK))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
if (ipv4_is_lbcast(fl->fl4_dst))
- res->type = RTN_BROADCAST;
+ type = RTN_BROADCAST;
else if (ipv4_is_multicast(fl->fl4_dst))
- res->type = RTN_MULTICAST;
+ type = RTN_MULTICAST;
else if (ipv4_is_zeronet(fl->fl4_dst))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
if (dev_out->flags & IFF_LOOPBACK)
flags |= RTCF_LOCAL;
in_dev = __in_dev_get_rcu(dev_out);
if (!in_dev)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
- if (res->type == RTN_BROADCAST) {
+ if (type == RTN_BROADCAST) {
flags |= RTCF_BROADCAST | RTCF_LOCAL;
- res->fi = NULL;
- } else if (res->type == RTN_MULTICAST) {
+ fi = NULL;
+ } else if (type == RTN_MULTICAST) {
flags |= RTCF_MULTICAST | RTCF_LOCAL;
if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
oldflp->proto))
@@ -2391,21 +2360,14 @@ static int __mkroute_output(struct rtable **result,
* default one, but do not gateway in this case.
* Yes, it is hack.
*/
- if (res->fi && res->prefixlen < 4)
- res->fi = NULL;
+ if (fi && res->prefixlen < 4)
+ fi = NULL;
}
-
- rth = dst_alloc(&ipv4_dst_ops);
+ rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY),
+ IN_DEV_CONF_GET(in_dev, NOXFRM));
if (!rth)
- return -ENOBUFS;
-
- atomic_set(&rth->dst.__refcnt, 1);
- rth->dst.flags= DST_HOST;
- if (IN_DEV_CONF_GET(in_dev, NOXFRM))
- rth->dst.flags |= DST_NOXFRM;
- if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
- rth->dst.flags |= DST_NOPOLICY;
+ return ERR_PTR(-ENOBUFS);
rth->fl.fl4_dst = oldflp->fl4_dst;
rth->fl.fl4_tos = tos;
@@ -2423,7 +2385,6 @@ static int __mkroute_output(struct rtable **result,
rth->rt_spec_dst= fl->fl4_src;
rth->dst.output=ip_output;
- rth->dst.obsolete = -1;
rth->rt_genid = rt_genid(dev_net(dev_out));
RT_CACHE_STAT_INC(out_slow_tot);
@@ -2440,7 +2401,7 @@ static int __mkroute_output(struct rtable **result,
RT_CACHE_STAT_INC(out_slow_mc);
}
#ifdef CONFIG_IP_MROUTE
- if (res->type == RTN_MULTICAST) {
+ if (type == RTN_MULTICAST) {
if (IN_DEV_MFORWARD(in_dev) &&
!ipv4_is_local_multicast(oldflp->fl4_dst)) {
rth->dst.input = ip_mr_input;
@@ -2450,31 +2411,10 @@ static int __mkroute_output(struct rtable **result,
#endif
}
- rt_set_nexthop(rth, res, 0);
+ rt_set_nexthop(rth, res, fi, type, 0);
rth->rt_flags = flags;
- *result = rth;
- return 0;
-}
-
-/* called with rcu_read_lock() */
-static int ip_mkroute_output(struct rtable **rp,
- struct fib_result *res,
- const struct flowi *fl,
- const struct flowi *oldflp,
- struct net_device *dev_out,
- unsigned flags)
-{
- struct rtable *rth = NULL;
- int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags);
- unsigned hash;
- if (err == 0) {
- hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif,
- rt_genid(dev_net(dev_out)));
- err = rt_intern_hash(hash, rth, rp, NULL, oldflp->oif);
- }
-
- return err;
+ return rth;
}
/*
@@ -2497,6 +2437,7 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
struct fib_result res;
unsigned int flags = 0;
struct net_device *dev_out = NULL;
+ struct rtable *rth;
int err;
@@ -2505,6 +2446,7 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
res.r = NULL;
#endif
+ rcu_read_lock();
if (oldflp->fl4_src) {
err = -EINVAL;
if (ipv4_is_multicast(oldflp->fl4_src) ||
@@ -2645,7 +2587,7 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
else
#endif
if (!res.prefixlen && res.type == RTN_UNICAST && !fl.oif)
- fib_select_default(net, &fl, &res);
+ fib_select_default(&res);
if (!fl.fl4_src)
fl.fl4_src = FIB_RES_PREFSRC(res);
@@ -2655,17 +2597,27 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
make_route:
- err = ip_mkroute_output(rp, &res, &fl, oldflp, dev_out, flags);
+ rth = __mkroute_output(&res, &fl, oldflp, dev_out, flags);
+ if (IS_ERR(rth))
+ err = PTR_ERR(rth);
+ else {
+ unsigned int hash;
-out: return err;
+ hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif,
+ rt_genid(dev_net(dev_out)));
+ err = rt_intern_hash(hash, rth, rp, NULL, oldflp->oif);
+ }
+
+out:
+ rcu_read_unlock();
+ return err;
}
int __ip_route_output_key(struct net *net, struct rtable **rp,
const struct flowi *flp)
{
- unsigned int hash;
- int res;
struct rtable *rth;
+ unsigned int hash;
if (!rt_caching(net))
goto slow_output;
@@ -2695,10 +2647,7 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
rcu_read_unlock_bh();
slow_output:
- rcu_read_lock();
- res = ip_route_output_slow(net, rp, flp);
- rcu_read_unlock();
- return res;
+ return ip_route_output_slow(net, rp, flp);
}
EXPORT_SYMBOL_GPL(__ip_route_output_key);
@@ -2726,17 +2675,14 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
.update_pmtu = ipv4_rt_blackhole_update_pmtu,
};
-
-static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi *flp)
+struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
{
- struct rtable *ort = *rp;
- struct rtable *rt = (struct rtable *)
- dst_alloc(&ipv4_dst_blackhole_ops);
+ struct rtable *rt = dst_alloc(&ipv4_dst_blackhole_ops, 1);
+ struct rtable *ort = (struct rtable *) dst_orig;
if (rt) {
struct dst_entry *new = &rt->dst;
- atomic_set(&new->__refcnt, 1);
new->__use = 1;
new->input = dst_discard;
new->output = dst_discard;
@@ -2759,17 +2705,20 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi
rt->peer = ort->peer;
if (rt->peer)
atomic_inc(&rt->peer->refcnt);
+ rt->fi = ort->fi;
+ if (rt->fi)
+ atomic_inc(&rt->fi->fib_clntref);
dst_free(new);
}
- dst_release(&(*rp)->dst);
- *rp = rt;
- return rt ? 0 : -ENOMEM;
+ dst_release(dst_orig);
+
+ return rt ? &rt->dst : ERR_PTR(-ENOMEM);
}
int ip_route_output_flow(struct net *net, struct rtable **rp, struct flowi *flp,
- struct sock *sk, int flags)
+ struct sock *sk)
{
int err;
@@ -2781,12 +2730,7 @@ int ip_route_output_flow(struct net *net, struct rtable **rp, struct flowi *flp,
flp->fl4_src = (*rp)->rt_src;
if (!flp->fl4_dst)
flp->fl4_dst = (*rp)->rt_dst;
- err = __xfrm_lookup(net, (struct dst_entry **)rp, flp, sk,
- flags ? XFRM_LOOKUP_WAIT : 0);
- if (err == -EREMOTE)
- err = ipv4_dst_blackhole(net, rp, flp);
-
- return err;
+ return xfrm_lookup(net, (struct dst_entry **)rp, flp, sk, 0);
}
return 0;
@@ -2795,7 +2739,7 @@ EXPORT_SYMBOL_GPL(ip_route_output_flow);
int ip_route_output_key(struct net *net, struct rtable **rp, struct flowi *flp)
{
- return ip_route_output_flow(net, rp, flp, NULL, 0);
+ return ip_route_output_flow(net, rp, flp, NULL);
}
EXPORT_SYMBOL(ip_route_output_key);
@@ -2835,7 +2779,7 @@ static int rt_fill_info(struct net *net,
}
if (rt->dst.dev)
NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
if (rt->dst.tclassid)
NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
#endif
@@ -2854,7 +2798,8 @@ static int rt_fill_info(struct net *net,
NLA_PUT_BE32(skb, RTA_MARK, rt->fl.mark);
error = rt->dst.error;
- expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
+ expires = (rt->peer && rt->peer->pmtu_expires) ?
+ rt->peer->pmtu_expires - jiffies : 0;
if (rt->peer) {
inet_peer_refcheck(rt->peer);
id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
@@ -3256,9 +3201,9 @@ static __net_initdata struct pernet_operations rt_genid_ops = {
};
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
-#endif /* CONFIG_NET_CLS_ROUTE */
+#endif /* CONFIG_IP_ROUTE_CLASSID */
static __initdata unsigned long rhash_entries;
static int __init set_rhash_entries(char *str)
@@ -3274,7 +3219,7 @@ int __init ip_rt_init(void)
{
int rc = 0;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
if (!ip_rt_acct)
panic("IP: failed to allocate ip_rt_acct\n");
@@ -3311,14 +3256,6 @@ int __init ip_rt_init(void)
devinet_init();
ip_fib_init();
- /* All the timers, started at system startup tend
- to synchronize. Perturb it a bit.
- */
- INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
- expires_ljiffies = jiffies;
- schedule_delayed_work(&expires_work,
- net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
-
if (ip_rt_proc_init())
printk(KERN_ERR "Unable to create route proc files\n");
#ifdef CONFIG_XFRM
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 6c11eece262c..a17a5a72b98d 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -873,9 +873,7 @@ int tcp_sendpage(struct sock *sk, struct page *page, int offset,
flags);
lock_sock(sk);
- TCP_CHECK_TIMER(sk);
res = do_tcp_sendpages(sk, &page, offset, size, flags);
- TCP_CHECK_TIMER(sk);
release_sock(sk);
return res;
}
@@ -916,7 +914,6 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
long timeo;
lock_sock(sk);
- TCP_CHECK_TIMER(sk);
flags = msg->msg_flags;
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
@@ -1104,7 +1101,6 @@ wait_for_memory:
out:
if (copied)
tcp_push(sk, flags, mss_now, tp->nonagle);
- TCP_CHECK_TIMER(sk);
release_sock(sk);
return copied;
@@ -1123,7 +1119,6 @@ do_error:
goto out;
out_err:
err = sk_stream_error(sk, flags, err);
- TCP_CHECK_TIMER(sk);
release_sock(sk);
return err;
}
@@ -1415,8 +1410,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
lock_sock(sk);
- TCP_CHECK_TIMER(sk);
-
err = -ENOTCONN;
if (sk->sk_state == TCP_LISTEN)
goto out;
@@ -1767,12 +1760,10 @@ skip_copy:
/* Clean up data we have read: This will do ACK frames. */
tcp_cleanup_rbuf(sk, copied);
- TCP_CHECK_TIMER(sk);
release_sock(sk);
return copied;
out:
- TCP_CHECK_TIMER(sk);
release_sock(sk);
return err;
@@ -2653,7 +2644,7 @@ int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
EXPORT_SYMBOL(compat_tcp_getsockopt);
#endif
-struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
+struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct tcphdr *th;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index eb7f82ebf4a3..08ea735b9d72 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -817,7 +817,7 @@ __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst)
__u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
if (!cwnd)
- cwnd = rfc3390_bytes_to_packets(tp->mss_cache);
+ cwnd = TCP_INIT_CWND;
return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
}
@@ -1222,7 +1222,7 @@ static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb,
}
/* D-SACK for already forgotten data... Do dumb counting. */
- if (dup_sack &&
+ if (dup_sack && tp->undo_marker && tp->undo_retrans &&
!after(end_seq_0, prior_snd_una) &&
after(end_seq_0, tp->undo_marker))
tp->undo_retrans--;
@@ -1299,7 +1299,8 @@ static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
/* Account D-SACK for retransmitted packet. */
if (dup_sack && (sacked & TCPCB_RETRANS)) {
- if (after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker))
+ if (tp->undo_marker && tp->undo_retrans &&
+ after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker))
tp->undo_retrans--;
if (sacked & TCPCB_SACKED_ACKED)
state->reord = min(fack_count, state->reord);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 02f583b3744a..05bc6d9455fc 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -149,6 +149,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
struct inet_sock *inet = inet_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
+ __be16 orig_sport, orig_dport;
struct rtable *rt;
__be32 daddr, nexthop;
int tmp;
@@ -167,10 +168,12 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
nexthop = inet->opt->faddr;
}
+ orig_sport = inet->inet_sport;
+ orig_dport = usin->sin_port;
tmp = ip_route_connect(&rt, nexthop, inet->inet_saddr,
RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
IPPROTO_TCP,
- inet->inet_sport, usin->sin_port, sk, 1);
+ orig_sport, orig_dport, sk, true);
if (tmp < 0) {
if (tmp == -ENETUNREACH)
IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
@@ -234,6 +237,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
goto failure;
err = ip_route_newports(&rt, IPPROTO_TCP,
+ orig_sport, orig_dport,
inet->inet_sport, inet->inet_dport, sk);
if (err)
goto failure;
@@ -1341,7 +1345,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
tcp_death_row.sysctl_tw_recycle &&
(dst = inet_csk_route_req(sk, req)) != NULL &&
(peer = rt_get_peer((struct rtable *)dst)) != NULL &&
- peer->daddr.a4 == saddr) {
+ peer->daddr.addr.a4 == saddr) {
inet_peer_refcheck(peer);
if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
(s32)(peer->tcp_ts - req->ts_recent) >
@@ -1556,12 +1560,10 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
sock_rps_save_rxhash(sk, skb->rxhash);
- TCP_CHECK_TIMER(sk);
if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
rsk = sk;
goto reset;
}
- TCP_CHECK_TIMER(sk);
return 0;
}
@@ -1583,13 +1585,10 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
} else
sock_rps_save_rxhash(sk, skb->rxhash);
-
- TCP_CHECK_TIMER(sk);
if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
rsk = sk;
goto reset;
}
- TCP_CHECK_TIMER(sk);
return 0;
reset:
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 406f320336e6..dfa5beb0c1c8 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2162,7 +2162,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
if (!tp->retrans_stamp)
tp->retrans_stamp = TCP_SKB_CB(skb)->when;
- tp->undo_retrans++;
+ tp->undo_retrans += tcp_skb_pcount(skb);
/* snd_nxt is stored to detect loss of retransmitted segment,
* see tcp_input.c tcp_sacktag_write_queue().
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 74a6aa003657..ecd44b0c45f1 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -259,7 +259,6 @@ static void tcp_delack_timer(unsigned long data)
tcp_send_ack(sk);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
}
- TCP_CHECK_TIMER(sk);
out:
if (tcp_memory_pressure)
@@ -481,7 +480,6 @@ static void tcp_write_timer(unsigned long data)
tcp_probe_timer(sk);
break;
}
- TCP_CHECK_TIMER(sk);
out:
sk_mem_reclaim(sk);
@@ -589,7 +587,6 @@ static void tcp_keepalive_timer (unsigned long data)
elapsed = keepalive_time_when(tp) - elapsed;
}
- TCP_CHECK_TIMER(sk);
sk_mem_reclaim(sk);
resched:
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 8157b17959ee..ed9a5b7bee53 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -663,75 +663,72 @@ void udp_flush_pending_frames(struct sock *sk)
EXPORT_SYMBOL(udp_flush_pending_frames);
/**
- * udp4_hwcsum_outgoing - handle outgoing HW checksumming
- * @sk: socket we are sending on
+ * udp4_hwcsum - handle outgoing HW checksumming
* @skb: sk_buff containing the filled-in UDP header
* (checksum field must be zeroed out)
+ * @src: source IP address
+ * @dst: destination IP address
*/
-static void udp4_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
- __be32 src, __be32 dst, int len)
+static void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
{
- unsigned int offset;
struct udphdr *uh = udp_hdr(skb);
+ struct sk_buff *frags = skb_shinfo(skb)->frag_list;
+ int offset = skb_transport_offset(skb);
+ int len = skb->len - offset;
+ int hlen = len;
__wsum csum = 0;
- if (skb_queue_len(&sk->sk_write_queue) == 1) {
+ if (!frags) {
/*
* Only one fragment on the socket.
*/
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct udphdr, check);
- uh->check = ~csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, 0);
+ uh->check = ~csum_tcpudp_magic(src, dst, len,
+ IPPROTO_UDP, 0);
} else {
/*
* HW-checksum won't work as there are two or more
* fragments on the socket so that all csums of sk_buffs
* should be together
*/
- offset = skb_transport_offset(skb);
- skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ do {
+ csum = csum_add(csum, frags->csum);
+ hlen -= frags->len;
+ } while ((frags = frags->next));
+ csum = skb_checksum(skb, offset, hlen, csum);
skb->ip_summed = CHECKSUM_NONE;
- skb_queue_walk(&sk->sk_write_queue, skb) {
- csum = csum_add(csum, skb->csum);
- }
-
uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
}
}
-/*
- * Push out all pending data as one UDP datagram. Socket is locked.
- */
-static int udp_push_pending_frames(struct sock *sk)
+static int udp_send_skb(struct sk_buff *skb, __be32 daddr, __be32 dport)
{
- struct udp_sock *up = udp_sk(sk);
+ struct sock *sk = skb->sk;
struct inet_sock *inet = inet_sk(sk);
- struct flowi *fl = &inet->cork.fl;
- struct sk_buff *skb;
struct udphdr *uh;
+ struct rtable *rt = (struct rtable *)skb_dst(skb);
int err = 0;
int is_udplite = IS_UDPLITE(sk);
+ int offset = skb_transport_offset(skb);
+ int len = skb->len - offset;
__wsum csum = 0;
- /* Grab the skbuff where UDP header space exists. */
- if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
- goto out;
-
/*
* Create a UDP header
*/
uh = udp_hdr(skb);
- uh->source = fl->fl_ip_sport;
- uh->dest = fl->fl_ip_dport;
- uh->len = htons(up->len);
+ uh->source = inet->inet_sport;
+ uh->dest = dport;
+ uh->len = htons(len);
uh->check = 0;
if (is_udplite) /* UDP-Lite */
- csum = udplite_csum_outgoing(sk, skb);
+ csum = udplite_csum(skb);
else if (sk->sk_no_check == UDP_CSUM_NOXMIT) { /* UDP csum disabled */
@@ -740,20 +737,20 @@ static int udp_push_pending_frames(struct sock *sk)
} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
- udp4_hwcsum_outgoing(sk, skb, fl->fl4_src, fl->fl4_dst, up->len);
+ udp4_hwcsum(skb, rt->rt_src, daddr);
goto send;
- } else /* `normal' UDP */
- csum = udp_csum_outgoing(sk, skb);
+ } else
+ csum = udp_csum(skb);
/* add protocol-dependent pseudo-header */
- uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst, up->len,
+ uh->check = csum_tcpudp_magic(rt->rt_src, daddr, len,
sk->sk_protocol, csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
send:
- err = ip_push_pending_frames(sk);
+ err = ip_send_skb(skb);
if (err) {
if (err == -ENOBUFS && !inet->recverr) {
UDP_INC_STATS_USER(sock_net(sk),
@@ -763,6 +760,26 @@ send:
} else
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_OUTDATAGRAMS, is_udplite);
+ return err;
+}
+
+/*
+ * Push out all pending data as one UDP datagram. Socket is locked.
+ */
+static int udp_push_pending_frames(struct sock *sk)
+{
+ struct udp_sock *up = udp_sk(sk);
+ struct inet_sock *inet = inet_sk(sk);
+ struct flowi *fl = &inet->cork.fl;
+ struct sk_buff *skb;
+ int err = 0;
+
+ skb = ip_finish_skb(sk);
+ if (!skb)
+ goto out;
+
+ err = udp_send_skb(skb, fl->fl4_dst, fl->fl_ip_dport);
+
out:
up->len = 0;
up->pending = 0;
@@ -785,6 +802,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
int err, is_udplite = IS_UDPLITE(sk);
int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
+ struct sk_buff *skb;
if (len > 0xFFFF)
return -EMSGSIZE;
@@ -799,6 +817,8 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
ipc.opt = NULL;
ipc.tx_flags = 0;
+ getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
+
if (up->pending) {
/*
* There are pending frames.
@@ -894,13 +914,15 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
.fl4_src = saddr,
.fl4_tos = tos,
.proto = sk->sk_protocol,
- .flags = inet_sk_flowi_flags(sk),
+ .flags = (inet_sk_flowi_flags(sk) |
+ FLOWI_FLAG_CAN_SLEEP),
.fl_ip_sport = inet->inet_sport,
- .fl_ip_dport = dport };
+ .fl_ip_dport = dport
+ };
struct net *net = sock_net(sk);
security_sk_classify_flow(sk, &fl);
- err = ip_route_output_flow(net, &rt, &fl, sk, 1);
+ err = ip_route_output_flow(net, &rt, &fl, sk);
if (err) {
if (err == -ENETUNREACH)
IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
@@ -923,6 +945,17 @@ back_from_confirm:
if (!ipc.addr)
daddr = ipc.addr = rt->rt_dst;
+ /* Lockless fast path for the non-corking case. */
+ if (!corkreq) {
+ skb = ip_make_skb(sk, getfrag, msg->msg_iov, ulen,
+ sizeof(struct udphdr), &ipc, &rt,
+ msg->msg_flags);
+ err = PTR_ERR(skb);
+ if (skb && !IS_ERR(skb))
+ err = udp_send_skb(skb, daddr, dport);
+ goto out;
+ }
+
lock_sock(sk);
if (unlikely(up->pending)) {
/* The socket is already corked while preparing it. */
@@ -944,7 +977,6 @@ back_from_confirm:
do_append_data:
up->len += ulen;
- getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
err = ip_append_data(sk, getfrag, msg->msg_iov, ulen,
sizeof(struct udphdr), &ipc, &rt,
corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
@@ -2199,7 +2231,7 @@ int udp4_ufo_send_check(struct sk_buff *skb)
return 0;
}
-struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, int features)
+struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
unsigned int mss;
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index b057d40addec..5f0f058dc376 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -19,8 +19,8 @@
static struct xfrm_policy_afinfo xfrm4_policy_afinfo;
static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos,
- xfrm_address_t *saddr,
- xfrm_address_t *daddr)
+ const xfrm_address_t *saddr,
+ const xfrm_address_t *daddr)
{
struct flowi fl = {
.fl4_dst = daddr->a4,
@@ -56,7 +56,7 @@ static int xfrm4_get_saddr(struct net *net,
return 0;
}
-static int xfrm4_get_tos(struct flowi *fl)
+static int xfrm4_get_tos(const struct flowi *fl)
{
return IPTOS_RT_MASK & fl->fl4_tos; /* Strip ECN bits */
}
@@ -68,7 +68,7 @@ static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
}
static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
- struct flowi *fl)
+ const struct flowi *fl)
{
struct rtable *rt = (struct rtable *)xdst->route;
@@ -196,8 +196,11 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
{
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
+ dst_destroy_metrics_generic(dst);
+
if (likely(xdst->u.rt.peer))
inet_putpeer(xdst->u.rt.peer);
+
xfrm_dst_destroy(xdst);
}
@@ -215,6 +218,7 @@ static struct dst_ops xfrm4_dst_ops = {
.protocol = cpu_to_be16(ETH_P_IP),
.gc = xfrm4_garbage_collect,
.update_pmtu = xfrm4_update_pmtu,
+ .cow_metrics = dst_cow_metrics_generic,
.destroy = xfrm4_dst_destroy,
.ifdown = xfrm4_dst_ifdown,
.local_out = __ip_local_out,
@@ -230,6 +234,7 @@ static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
.get_tos = xfrm4_get_tos,
.init_path = xfrm4_init_path,
.fill_dst = xfrm4_fill_dst,
+ .blackhole_route = ipv4_blackhole_route,
};
#ifdef CONFIG_SYSCTL
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index 47947624eccc..983eff248988 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -21,7 +21,7 @@ static int xfrm4_init_flags(struct xfrm_state *x)
}
static void
-__xfrm4_init_tempsel(struct xfrm_selector *sel, struct flowi *fl)
+__xfrm4_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl)
{
sel->daddr.a4 = fl->fl4_dst;
sel->saddr.a4 = fl->fl4_src;
@@ -37,8 +37,8 @@ __xfrm4_init_tempsel(struct xfrm_selector *sel, struct flowi *fl)
}
static void
-xfrm4_init_temprop(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
- xfrm_address_t *daddr, xfrm_address_t *saddr)
+xfrm4_init_temprop(struct xfrm_state *x, const struct xfrm_tmpl *tmpl,
+ const xfrm_address_t *daddr, const xfrm_address_t *saddr)
{
x->id = tmpl->id;
if (x->id.daddr.a4 == 0)
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index fd6782e3a038..3daaf3c7703c 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -718,12 +718,9 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
struct inet6_ifaddr *ifa, *ifn;
struct inet6_dev *idev = ifp->idev;
int state;
- int hash;
int deleted = 0, onlink = 0;
unsigned long expires = jiffies;
- hash = ipv6_addr_hash(&ifp->addr);
-
spin_lock_bh(&ifp->state_lock);
state = ifp->state;
ifp->state = INET6_IFADDR_STATE_DEAD;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 978e80e2c4a8..a88b2e9d25f1 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -644,9 +644,8 @@ EXPORT_SYMBOL(inet6_unregister_protosw);
int inet6_sk_rebuild_header(struct sock *sk)
{
- int err;
- struct dst_entry *dst;
struct ipv6_pinfo *np = inet6_sk(sk);
+ struct dst_entry *dst;
dst = __sk_dst_check(sk, np->dst_cookie);
@@ -668,17 +667,11 @@ int inet6_sk_rebuild_header(struct sock *sk)
final_p = fl6_update_dst(&fl, np->opt, &final);
- err = ip6_dst_lookup(sk, &dst, &fl);
- if (err) {
+ dst = ip6_dst_lookup_flow(sk, &fl, final_p, false);
+ if (IS_ERR(dst)) {
sk->sk_route_caps = 0;
- return err;
- }
- if (final_p)
- ipv6_addr_copy(&fl.fl6_dst, final_p);
-
- if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0) {
- sk->sk_err_soft = -err;
- return err;
+ sk->sk_err_soft = -PTR_ERR(dst);
+ return PTR_ERR(dst);
}
__ip6_dst_store(sk, dst, NULL, NULL);
@@ -772,7 +765,7 @@ out:
return err;
}
-static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
+static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, u32 features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct ipv6hdr *ipv6h;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 320bdb877eed..be3a781c0085 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -162,18 +162,11 @@ ipv4_connected:
opt = flowlabel ? flowlabel->opt : np->opt;
final_p = fl6_update_dst(&fl, opt, &final);
- err = ip6_dst_lookup(sk, &dst, &fl);
- if (err)
+ dst = ip6_dst_lookup_flow(sk, &fl, final_p, true);
+ err = 0;
+ if (IS_ERR(dst)) {
+ err = PTR_ERR(dst);
goto out;
- if (final_p)
- ipv6_addr_copy(&fl.fl6_dst, final_p);
-
- err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
- if (err < 0) {
- if (err == -EREMOTE)
- err = ip6_dst_blackhole(sk, &dst, &fl);
- if (err < 0)
- goto out;
}
/* source address lookup done in ip6_dst_lookup */
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 03e62f94ff8e..e332bae104ee 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -157,20 +157,20 @@ static int is_ineligible(struct sk_buff *skb)
/*
* Check the ICMP output rate limit
*/
-static inline int icmpv6_xrlim_allow(struct sock *sk, u8 type,
- struct flowi *fl)
+static inline bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
+ struct flowi *fl)
{
struct dst_entry *dst;
struct net *net = sock_net(sk);
- int res = 0;
+ bool res = false;
/* Informational messages are not limited. */
if (type & ICMPV6_INFOMSG_MASK)
- return 1;
+ return true;
/* Do not limit pmtu discovery, it would break it. */
if (type == ICMPV6_PKT_TOOBIG)
- return 1;
+ return true;
/*
* Look up the output route.
@@ -182,7 +182,7 @@ static inline int icmpv6_xrlim_allow(struct sock *sk, u8 type,
IP6_INC_STATS(net, ip6_dst_idev(dst),
IPSTATS_MIB_OUTNOROUTES);
} else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
- res = 1;
+ res = true;
} else {
struct rt6_info *rt = (struct rt6_info *)dst;
int tmo = net->ipv6.sysctl.icmpv6_time;
@@ -191,7 +191,9 @@ static inline int icmpv6_xrlim_allow(struct sock *sk, u8 type,
if (rt->rt6i_dst.plen < 128)
tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
- res = xrlim_allow(dst, tmo);
+ if (!rt->rt6i_peer)
+ rt6_bind_peer(rt, 1);
+ res = inet_peer_xrlim_allow(rt->rt6i_peer, tmo);
}
dst_release(dst);
return res;
@@ -298,6 +300,70 @@ static void mip6_addr_swap(struct sk_buff *skb)
static inline void mip6_addr_swap(struct sk_buff *skb) {}
#endif
+static struct dst_entry *icmpv6_route_lookup(struct net *net, struct sk_buff *skb,
+ struct sock *sk, struct flowi *fl)
+{
+ struct dst_entry *dst, *dst2;
+ struct flowi fl2;
+ int err;
+
+ err = ip6_dst_lookup(sk, &dst, fl);
+ if (err)
+ return ERR_PTR(err);
+
+ /*
+ * We won't send icmp if the destination is known
+ * anycast.
+ */
+ if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) {
+ LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: acast source\n");
+ dst_release(dst);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* No need to clone since we're just using its address. */
+ dst2 = dst;
+
+ err = xfrm_lookup(net, &dst, fl, sk, 0);
+ switch (err) {
+ case 0:
+ if (dst != dst2)
+ return dst;
+ break;
+ case -EPERM:
+ dst = NULL;
+ break;
+ default:
+ return ERR_PTR(err);
+ }
+
+ err = xfrm_decode_session_reverse(skb, &fl2, AF_INET6);
+ if (err)
+ goto relookup_failed;
+
+ err = ip6_dst_lookup(sk, &dst2, &fl2);
+ if (err)
+ goto relookup_failed;
+
+ err = xfrm_lookup(net, &dst2, &fl2, sk, XFRM_LOOKUP_ICMP);
+ switch (err) {
+ case 0:
+ dst_release(dst);
+ dst = dst2;
+ break;
+ case -EPERM:
+ dst_release(dst);
+ return ERR_PTR(err);
+ default:
+ goto relookup_failed;
+ }
+
+relookup_failed:
+ if (dst)
+ return dst;
+ return ERR_PTR(err);
+}
+
/*
* Send an ICMP message in response to a packet in error
*/
@@ -310,10 +376,8 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
struct ipv6_pinfo *np;
struct in6_addr *saddr = NULL;
struct dst_entry *dst;
- struct dst_entry *dst2;
struct icmp6hdr tmp_hdr;
struct flowi fl;
- struct flowi fl2;
struct icmpv6_msg msg;
int iif = 0;
int addr_type = 0;
@@ -406,57 +470,10 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
fl.oif = np->mcast_oif;
- err = ip6_dst_lookup(sk, &dst, &fl);
- if (err)
+ dst = icmpv6_route_lookup(net, skb, sk, &fl);
+ if (IS_ERR(dst))
goto out;
- /*
- * We won't send icmp if the destination is known
- * anycast.
- */
- if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) {
- LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: acast source\n");
- goto out_dst_release;
- }
-
- /* No need to clone since we're just using its address. */
- dst2 = dst;
-
- err = xfrm_lookup(net, &dst, &fl, sk, 0);
- switch (err) {
- case 0:
- if (dst != dst2)
- goto route_done;
- break;
- case -EPERM:
- dst = NULL;
- break;
- default:
- goto out;
- }
-
- if (xfrm_decode_session_reverse(skb, &fl2, AF_INET6))
- goto relookup_failed;
-
- if (ip6_dst_lookup(sk, &dst2, &fl2))
- goto relookup_failed;
-
- err = xfrm_lookup(net, &dst2, &fl2, sk, XFRM_LOOKUP_ICMP);
- switch (err) {
- case 0:
- dst_release(dst);
- dst = dst2;
- break;
- case -EPERM:
- goto out_dst_release;
- default:
-relookup_failed:
- if (!dst)
- goto out;
- break;
- }
-
-route_done:
if (ipv6_addr_is_multicast(&fl.fl6_dst))
hlimit = np->mcast_hops;
else
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index d144e629d2b4..d687e1397333 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -74,13 +74,8 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
fl.fl_ip_sport = inet_rsk(req)->loc_port;
security_req_classify_flow(req, &fl);
- if (ip6_dst_lookup(sk, &dst, &fl))
- return NULL;
-
- if (final_p)
- ipv6_addr_copy(&fl.fl6_dst, final_p);
-
- if ((xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
+ dst = ip6_dst_lookup_flow(sk, &fl, final_p, false);
+ if (IS_ERR(dst))
return NULL;
return dst;
@@ -234,21 +229,13 @@ int inet6_csk_xmit(struct sk_buff *skb)
dst = __inet6_csk_dst_check(sk, np->dst_cookie);
if (dst == NULL) {
- int err = ip6_dst_lookup(sk, &dst, &fl);
-
- if (err) {
- sk->sk_err_soft = -err;
- kfree_skb(skb);
- return err;
- }
-
- if (final_p)
- ipv6_addr_copy(&fl.fl6_dst, final_p);
+ dst = ip6_dst_lookup_flow(sk, &fl, final_p, false);
- if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0) {
+ if (IS_ERR(dst)) {
+ sk->sk_err_soft = -PTR_ERR(dst);
sk->sk_route_caps = 0;
kfree_skb(skb);
- return err;
+ return PTR_ERR(dst);
}
__inet6_csk_dst_store(sk, dst, NULL, NULL);
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 633a6c266136..b53197233709 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -124,7 +124,7 @@ out:
}
EXPORT_SYMBOL(__inet6_lookup_established);
-static int inline compute_score(struct sock *sk, struct net *net,
+static inline int compute_score(struct sock *sk, struct net *net,
const unsigned short hnum,
const struct in6_addr *daddr,
const int dif)
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 5f8d242be3f3..35a4ad90a0f5 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -274,13 +274,10 @@ int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct ipv6hdr *hdr;
- int totlen;
skb->protocol = htons(ETH_P_IPV6);
skb->dev = dev;
- totlen = len + sizeof(struct ipv6hdr);
-
skb_reset_network_header(skb);
skb_put(skb, sizeof(struct ipv6hdr));
hdr = ipv6_hdr(skb);
@@ -479,10 +476,13 @@ int ip6_forward(struct sk_buff *skb)
else
target = &hdr->daddr;
+ if (!rt->rt6i_peer)
+ rt6_bind_peer(rt, 1);
+
/* Limit redirects both by destination (here)
and by source (inside ndisc_send_redirect)
*/
- if (xrlim_allow(dst, 1*HZ))
+ if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
ndisc_send_redirect(skb, n, target);
} else {
int addrtype = ipv6_addr_type(&hdr->saddr);
@@ -1002,29 +1002,77 @@ int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
EXPORT_SYMBOL_GPL(ip6_dst_lookup);
/**
- * ip6_sk_dst_lookup - perform socket cached route lookup on flow
+ * ip6_dst_lookup_flow - perform route lookup on flow with ipsec
+ * @sk: socket which provides route info
+ * @fl: flow to lookup
+ * @final_dst: final destination address for ipsec lookup
+ * @can_sleep: we are in a sleepable context
+ *
+ * This function performs a route lookup on the given flow.
+ *
+ * It returns a valid dst pointer on success, or a pointer encoded
+ * error code.
+ */
+struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi *fl,
+ const struct in6_addr *final_dst,
+ bool can_sleep)
+{
+ struct dst_entry *dst = NULL;
+ int err;
+
+ err = ip6_dst_lookup_tail(sk, &dst, fl);
+ if (err)
+ return ERR_PTR(err);
+ if (final_dst)
+ ipv6_addr_copy(&fl->fl6_dst, final_dst);
+ if (can_sleep)
+ fl->flags |= FLOWI_FLAG_CAN_SLEEP;
+
+ err = xfrm_lookup(sock_net(sk), &dst, fl, sk, 0);
+ if (err)
+ return ERR_PTR(err);
+ return dst;
+}
+EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
+
+/**
+ * ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
* @sk: socket which provides the dst cache and route info
- * @dst: pointer to dst_entry * for result
* @fl: flow to lookup
+ * @final_dst: final destination address for ipsec lookup
+ * @can_sleep: we are in a sleepable context
*
* This function performs a route lookup on the given flow with the
* possibility of using the cached route in the socket if it is valid.
* It will take the socket dst lock when operating on the dst cache.
* As a result, this function can only be used in process context.
*
- * It returns zero on success, or a standard errno code on error.
+ * It returns a valid dst pointer on success, or a pointer encoded
+ * error code.
*/
-int ip6_sk_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
+struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi *fl,
+ const struct in6_addr *final_dst,
+ bool can_sleep)
{
- *dst = NULL;
- if (sk) {
- *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
- *dst = ip6_sk_dst_check(sk, *dst, fl);
- }
+ struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
+ int err;
- return ip6_dst_lookup_tail(sk, dst, fl);
+ dst = ip6_sk_dst_check(sk, dst, fl);
+
+ err = ip6_dst_lookup_tail(sk, &dst, fl);
+ if (err)
+ return ERR_PTR(err);
+ if (final_dst)
+ ipv6_addr_copy(&fl->fl6_dst, final_dst);
+ if (can_sleep)
+ fl->flags |= FLOWI_FLAG_CAN_SLEEP;
+
+ err = xfrm_lookup(sock_net(sk), &dst, fl, sk, 0);
+ if (err)
+ return ERR_PTR(err);
+ return dst;
}
-EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup);
+EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
static inline int ip6_ufo_append_data(struct sock *sk,
int getfrag(void *from, char *to, int offset, int len,
@@ -1061,7 +1109,6 @@ static inline int ip6_ufo_append_data(struct sock *sk,
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum = 0;
- sk->sk_sndmsg_off = 0;
}
err = skb_append_datato_frags(sk,skb, getfrag, from,
@@ -1118,6 +1165,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
int err;
int offset = 0;
int csummode = CHECKSUM_NONE;
+ __u8 tx_flags = 0;
if (flags&MSG_PROBE)
return 0;
@@ -1202,6 +1250,13 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
}
}
+ /* For UDP, check if TX timestamp is enabled */
+ if (sk->sk_type == SOCK_DGRAM) {
+ err = sock_tx_timestamp(sk, &tx_flags);
+ if (err)
+ goto error;
+ }
+
/*
* Let's try using as much space as possible.
* Use MTU if total length of the message fits into the MTU.
@@ -1306,6 +1361,12 @@ alloc_new_skb:
sk->sk_allocation);
if (unlikely(skb == NULL))
err = -ENOBUFS;
+ else {
+ /* Only the initial fragment
+ * is time stamped.
+ */
+ tx_flags = 0;
+ }
}
if (skb == NULL)
goto error;
@@ -1317,6 +1378,9 @@ alloc_new_skb:
/* reserve for fragmentation */
skb_reserve(skb, hh_len+sizeof(struct frag_hdr));
+ if (sk->sk_type == SOCK_DGRAM)
+ skb_shinfo(skb)->tx_flags = tx_flags;
+
/*
* Find where to start putting bytes
*/
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 0e1d53bcf1e0..618f67ccda31 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1039,7 +1039,6 @@ static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
while((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
if (ipv6_hdr(skb)->version == 0) {
- int err;
struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
if (__ip6mr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) {
@@ -1050,7 +1049,7 @@ static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
skb_trim(skb, nlh->nlmsg_len);
((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE;
}
- err = rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
+ rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
} else
ip6_mr_forward(net, mrt, skb, c);
}
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index d6e9599d0705..f3e3ca938a54 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -203,7 +203,8 @@ static inline int mip6_report_rl_allow(struct timeval *stamp,
return allow;
}
-static int mip6_destopt_reject(struct xfrm_state *x, struct sk_buff *skb, struct flowi *fl)
+static int mip6_destopt_reject(struct xfrm_state *x, struct sk_buff *skb,
+ const struct flowi *fl)
{
struct net *net = xs_net(x);
struct inet6_skb_parm *opt = (struct inet6_skb_parm *)skb->cb;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 2342545a5ee9..7254ce364006 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1553,7 +1553,9 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
"ICMPv6 Redirect: destination is not a neighbour.\n");
goto release;
}
- if (!xrlim_allow(dst, 1*HZ))
+ if (!rt->rt6i_peer)
+ rt6_bind_peer(rt, 1);
+ if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
goto release;
if (dev->addr_len) {
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 7d227c644f72..47b7b8df7fac 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1076,6 +1076,7 @@ static int compat_table_info(const struct xt_table_info *info,
memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
newinfo->initial_entries = 0;
loc_cpu_entry = info->entries[raw_smp_processor_id()];
+ xt_compat_init_offsets(AF_INET6, info->number);
xt_entry_foreach(iter, loc_cpu_entry, info->size) {
ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
if (ret != 0)
@@ -1679,6 +1680,7 @@ translate_compat_table(struct net *net,
duprintf("translate_compat_table: size %u\n", info->size);
j = 0;
xt_compat_lock(AF_INET6);
+ xt_compat_init_offsets(AF_INET6, number);
/* Walk through entries, checking offsets. */
xt_entry_foreach(iter0, entry0, total_size) {
ret = check_compat_entry_size_and_hooks(iter0, info, &size,
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
index 09c88891a753..e6af8d72f26b 100644
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ b/net/ipv6/netfilter/ip6t_LOG.c
@@ -410,7 +410,7 @@ fallback:
if (p != NULL) {
sb_add(m, "%02x", *p++);
for (i = 1; i < len; i++)
- sb_add(m, ":%02x", p[i]);
+ sb_add(m, ":%02x", *p++);
}
sb_add(m, " ");
@@ -452,8 +452,7 @@ ip6t_log_packet(u_int8_t pf,
in ? in->name : "",
out ? out->name : "");
- /* MAC logging for input path only. */
- if (in && !out)
+ if (in != NULL)
dump_mac_header(m, loginfo, skb);
dump_packet(m, loginfo, skb, skb_network_offset(skb), 1);
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 79d43aa8fa8d..085727263812 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -45,6 +45,7 @@
#include <linux/netfilter_ipv6.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
struct nf_ct_frag6_skb_cb
@@ -73,7 +74,7 @@ static struct inet_frags nf_frags;
static struct netns_frags nf_init_frags;
#ifdef CONFIG_SYSCTL
-struct ctl_table nf_ct_frag6_sysctl_table[] = {
+static struct ctl_table nf_ct_frag6_sysctl_table[] = {
{
.procname = "nf_conntrack_frag6_timeout",
.data = &nf_init_frags.timeout,
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index c5b0915d106b..dc29b07caf42 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -124,18 +124,18 @@ static __inline__ int icmpv6_filter(struct sock *sk, struct sk_buff *skb)
}
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
-static int (*mh_filter)(struct sock *sock, struct sk_buff *skb);
+typedef int mh_filter_t(struct sock *sock, struct sk_buff *skb);
-int rawv6_mh_filter_register(int (*filter)(struct sock *sock,
- struct sk_buff *skb))
+static mh_filter_t __rcu *mh_filter __read_mostly;
+
+int rawv6_mh_filter_register(mh_filter_t filter)
{
rcu_assign_pointer(mh_filter, filter);
return 0;
}
EXPORT_SYMBOL(rawv6_mh_filter_register);
-int rawv6_mh_filter_unregister(int (*filter)(struct sock *sock,
- struct sk_buff *skb))
+int rawv6_mh_filter_unregister(mh_filter_t filter)
{
rcu_assign_pointer(mh_filter, NULL);
synchronize_rcu();
@@ -193,10 +193,10 @@ static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
* policy is placed in rawv6_rcv() because it is
* required for each socket.
*/
- int (*filter)(struct sock *sock, struct sk_buff *skb);
+ mh_filter_t *filter;
filter = rcu_dereference(mh_filter);
- filtered = filter ? filter(sk, skb) : 0;
+ filtered = filter ? (*filter)(sk, skb) : 0;
break;
}
#endif
@@ -856,20 +856,11 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
fl.oif = np->mcast_oif;
security_sk_classify_flow(sk, &fl);
- err = ip6_dst_lookup(sk, &dst, &fl);
- if (err)
+ dst = ip6_dst_lookup_flow(sk, &fl, final_p, true);
+ if (IS_ERR(dst)) {
+ err = PTR_ERR(dst);
goto out;
- if (final_p)
- ipv6_addr_copy(&fl.fl6_dst, final_p);
-
- err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
- if (err < 0) {
- if (err == -EREMOTE)
- err = ip6_dst_blackhole(sk, &dst, &fl);
- if (err < 0)
- goto out;
}
-
if (hlimit < 0) {
if (ipv6_addr_is_multicast(&fl.fl6_dst))
hlimit = np->mcast_hops;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index a998db6e7895..054fdcc184e8 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -97,6 +97,36 @@ static struct rt6_info *rt6_get_route_info(struct net *net,
struct in6_addr *gwaddr, int ifindex);
#endif
+static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
+{
+ struct rt6_info *rt = (struct rt6_info *) dst;
+ struct inet_peer *peer;
+ u32 *p = NULL;
+
+ if (!rt->rt6i_peer)
+ rt6_bind_peer(rt, 1);
+
+ peer = rt->rt6i_peer;
+ if (peer) {
+ u32 *old_p = __DST_METRICS_PTR(old);
+ unsigned long prev, new;
+
+ p = peer->metrics;
+ if (inet_metrics_new(peer))
+ memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
+
+ new = (unsigned long) p;
+ prev = cmpxchg(&dst->_metrics, old, new);
+
+ if (prev != old) {
+ p = __DST_METRICS_PTR(prev);
+ if (prev & DST_METRICS_READ_ONLY)
+ p = NULL;
+ }
+ }
+ return p;
+}
+
static struct dst_ops ip6_dst_ops_template = {
.family = AF_INET6,
.protocol = cpu_to_be16(ETH_P_IPV6),
@@ -105,6 +135,7 @@ static struct dst_ops ip6_dst_ops_template = {
.check = ip6_dst_check,
.default_advmss = ip6_default_advmss,
.default_mtu = ip6_default_mtu,
+ .cow_metrics = ipv6_cow_metrics,
.destroy = ip6_dst_destroy,
.ifdown = ip6_dst_ifdown,
.negative_advice = ip6_negative_advice,
@@ -132,6 +163,10 @@ static struct dst_ops ip6_dst_blackhole_ops = {
.update_pmtu = ip6_rt_blackhole_update_pmtu,
};
+static const u32 ip6_template_metrics[RTAX_MAX] = {
+ [RTAX_HOPLIMIT - 1] = 255,
+};
+
static struct rt6_info ip6_null_entry_template = {
.dst = {
.__refcnt = ATOMIC_INIT(1),
@@ -187,7 +222,7 @@ static struct rt6_info ip6_blk_hole_entry_template = {
/* allocate dst with ip6_dst_ops */
static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops)
{
- return (struct rt6_info *)dst_alloc(ops);
+ return (struct rt6_info *)dst_alloc(ops, 0);
}
static void ip6_dst_destroy(struct dst_entry *dst)
@@ -206,6 +241,13 @@ static void ip6_dst_destroy(struct dst_entry *dst)
}
}
+static atomic_t __rt6_peer_genid = ATOMIC_INIT(0);
+
+static u32 rt6_peer_genid(void)
+{
+ return atomic_read(&__rt6_peer_genid);
+}
+
void rt6_bind_peer(struct rt6_info *rt, int create)
{
struct inet_peer *peer;
@@ -213,6 +255,8 @@ void rt6_bind_peer(struct rt6_info *rt, int create)
peer = inet_getpeer_v6(&rt->rt6i_dst.addr, create);
if (peer && cmpxchg(&rt->rt6i_peer, NULL, peer) != NULL)
inet_putpeer(peer);
+ else
+ rt->rt6i_peer_genid = rt6_peer_genid();
}
static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
@@ -826,17 +870,15 @@ struct dst_entry * ip6_route_output(struct net *net, struct sock *sk,
EXPORT_SYMBOL(ip6_route_output);
-int ip6_dst_blackhole(struct sock *sk, struct dst_entry **dstp, struct flowi *fl)
+struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
{
- struct rt6_info *ort = (struct rt6_info *) *dstp;
- struct rt6_info *rt = (struct rt6_info *)
- dst_alloc(&ip6_dst_blackhole_ops);
+ struct rt6_info *rt = dst_alloc(&ip6_dst_blackhole_ops, 1);
+ struct rt6_info *ort = (struct rt6_info *) dst_orig;
struct dst_entry *new = NULL;
if (rt) {
new = &rt->dst;
- atomic_set(&new->__refcnt, 1);
new->__use = 1;
new->input = dst_discard;
new->output = dst_discard;
@@ -862,11 +904,9 @@ int ip6_dst_blackhole(struct sock *sk, struct dst_entry **dstp, struct flowi *fl
dst_free(new);
}
- dst_release(*dstp);
- *dstp = new;
- return new ? 0 : -ENOMEM;
+ dst_release(dst_orig);
+ return new ? new : ERR_PTR(-ENOMEM);
}
-EXPORT_SYMBOL_GPL(ip6_dst_blackhole);
/*
* Destination cache support functions
@@ -878,9 +918,14 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
rt = (struct rt6_info *) dst;
- if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie))
+ if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) {
+ if (rt->rt6i_peer_genid != rt6_peer_genid()) {
+ if (!rt->rt6i_peer)
+ rt6_bind_peer(rt, 0);
+ rt->rt6i_peer_genid = rt6_peer_genid();
+ }
return dst;
-
+ }
return NULL;
}
@@ -931,7 +976,6 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
dst_metric_set(dst, RTAX_FEATURES, features);
}
dst_metric_set(dst, RTAX_MTU, mtu);
- call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
}
}
@@ -1028,11 +1072,9 @@ out:
int icmp6_dst_gc(void)
{
- struct dst_entry *dst, *next, **pprev;
+ struct dst_entry *dst, **pprev;
int more = 0;
- next = NULL;
-
spin_lock_bh(&icmp6_dst_lock);
pprev = &icmp6_dst_gc_list;
@@ -2557,14 +2599,16 @@ static
int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- struct net *net = current->nsproxy->net_ns;
- int delay = net->ipv6.sysctl.flush_delay;
- if (write) {
- proc_dointvec(ctl, write, buffer, lenp, ppos);
- fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
- return 0;
- } else
+ struct net *net;
+ int delay;
+ if (!write)
return -EINVAL;
+
+ net = (struct net *)ctl->extra1;
+ delay = net->ipv6.sysctl.flush_delay;
+ proc_dointvec(ctl, write, buffer, lenp, ppos);
+ fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
+ return 0;
}
ctl_table ipv6_route_table_template[] = {
@@ -2651,6 +2695,7 @@ struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
if (table) {
table[0].data = &net->ipv6.sysctl.flush_delay;
+ table[0].extra1 = net;
table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
@@ -2684,7 +2729,8 @@ static int __net_init ip6_route_net_init(struct net *net)
net->ipv6.ip6_null_entry->dst.path =
(struct dst_entry *)net->ipv6.ip6_null_entry;
net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
- dst_metric_set(&net->ipv6.ip6_null_entry->dst, RTAX_HOPLIMIT, 255);
+ dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
+ ip6_template_metrics, true);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
@@ -2695,7 +2741,8 @@ static int __net_init ip6_route_net_init(struct net *net)
net->ipv6.ip6_prohibit_entry->dst.path =
(struct dst_entry *)net->ipv6.ip6_prohibit_entry;
net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
- dst_metric_set(&net->ipv6.ip6_prohibit_entry->dst, RTAX_HOPLIMIT, 255);
+ dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
+ ip6_template_metrics, true);
net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
sizeof(*net->ipv6.ip6_blk_hole_entry),
@@ -2705,7 +2752,8 @@ static int __net_init ip6_route_net_init(struct net *net)
net->ipv6.ip6_blk_hole_entry->dst.path =
(struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
- dst_metric_set(&net->ipv6.ip6_blk_hole_entry->dst, RTAX_HOPLIMIT, 255);
+ dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
+ ip6_template_metrics, true);
#endif
net->ipv6.sysctl.flush_delay = 0;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 8ce38f10a547..b1599a345c10 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -412,7 +412,7 @@ static void prl_list_destroy_rcu(struct rcu_head *head)
p = container_of(head, struct ip_tunnel_prl_entry, rcu_head);
do {
- n = p->next;
+ n = rcu_dereference_protected(p->next, 1);
kfree(p);
p = n;
} while (p);
@@ -421,15 +421,17 @@ static void prl_list_destroy_rcu(struct rcu_head *head)
static int
ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
{
- struct ip_tunnel_prl_entry *x, **p;
+ struct ip_tunnel_prl_entry *x;
+ struct ip_tunnel_prl_entry __rcu **p;
int err = 0;
ASSERT_RTNL();
if (a && a->addr != htonl(INADDR_ANY)) {
- for (p = &t->prl; *p; p = &(*p)->next) {
- if ((*p)->addr == a->addr) {
- x = *p;
+ for (p = &t->prl;
+ (x = rtnl_dereference(*p)) != NULL;
+ p = &x->next) {
+ if (x->addr == a->addr) {
*p = x->next;
call_rcu(&x->rcu_head, prl_entry_destroy_rcu);
t->prl_count--;
@@ -438,9 +440,9 @@ ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
}
err = -ENXIO;
} else {
- if (t->prl) {
+ x = rtnl_dereference(t->prl);
+ if (x) {
t->prl_count = 0;
- x = t->prl;
call_rcu(&x->rcu_head, prl_list_destroy_rcu);
t->prl = NULL;
}
@@ -1179,7 +1181,7 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
if (!dev->tstats)
return -ENOMEM;
dev_hold(dev);
- sitn->tunnels_wc[0] = tunnel;
+ rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
return 0;
}
@@ -1196,11 +1198,12 @@ static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_hea
for (prio = 1; prio < 4; prio++) {
int h;
for (h = 0; h < HASH_SIZE; h++) {
- struct ip_tunnel *t = sitn->tunnels[prio][h];
+ struct ip_tunnel *t;
+ t = rtnl_dereference(sitn->tunnels[prio][h]);
while (t != NULL) {
unregister_netdevice_queue(t->dev, head);
- t = t->next;
+ t = rtnl_dereference(t->next);
}
}
}
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 09fd34f0dbf2..0b4cf350631b 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -243,12 +243,9 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
fl.fl_ip_dport = inet_rsk(req)->rmt_port;
fl.fl_ip_sport = inet_sk(sk)->inet_sport;
security_req_classify_flow(req, &fl);
- if (ip6_dst_lookup(sk, &dst, &fl))
- goto out_free;
- if (final_p)
- ipv6_addr_copy(&fl.fl6_dst, final_p);
- if ((xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
+ dst = ip6_dst_lookup_flow(sk, &fl, final_p, false);
+ if (IS_ERR(dst))
goto out_free;
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 20aa95e37359..e59a31c48baf 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -255,18 +255,10 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
security_sk_classify_flow(sk, &fl);
- err = ip6_dst_lookup(sk, &dst, &fl);
- if (err)
+ dst = ip6_dst_lookup_flow(sk, &fl, final_p, true);
+ if (IS_ERR(dst)) {
+ err = PTR_ERR(dst);
goto failure;
- if (final_p)
- ipv6_addr_copy(&fl.fl6_dst, final_p);
-
- err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
- if (err < 0) {
- if (err == -EREMOTE)
- err = ip6_dst_blackhole(sk, &dst, &fl);
- if (err < 0)
- goto failure;
}
if (saddr == NULL) {
@@ -385,7 +377,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
np = inet6_sk(sk);
if (type == ICMPV6_PKT_TOOBIG) {
- struct dst_entry *dst = NULL;
+ struct dst_entry *dst;
if (sock_owned_by_user(sk))
goto out;
@@ -413,13 +405,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
fl.fl_ip_sport = inet->inet_sport;
security_skb_classify_flow(skb, &fl);
- if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
- sk->sk_err_soft = -err;
- goto out;
- }
-
- if ((err = xfrm_lookup(net, &dst, &fl, sk, 0)) < 0) {
- sk->sk_err_soft = -err;
+ dst = ip6_dst_lookup_flow(sk, &fl, NULL, false);
+ if (IS_ERR(dst)) {
+ sk->sk_err_soft = -PTR_ERR(dst);
goto out;
}
@@ -496,7 +484,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
struct in6_addr * final_p, final;
struct flowi fl;
struct dst_entry *dst;
- int err = -1;
+ int err;
memset(&fl, 0, sizeof(fl));
fl.proto = IPPROTO_TCP;
@@ -512,15 +500,13 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
opt = np->opt;
final_p = fl6_update_dst(&fl, opt, &final);
- err = ip6_dst_lookup(sk, &dst, &fl);
- if (err)
+ dst = ip6_dst_lookup_flow(sk, &fl, final_p, false);
+ if (IS_ERR(dst)) {
+ err = PTR_ERR(dst);
goto done;
- if (final_p)
- ipv6_addr_copy(&fl.fl6_dst, final_p);
- if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
- goto done;
-
+ }
skb = tcp_make_synack(sk, dst, req, rvp);
+ err = -ENOMEM;
if (skb) {
__tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
@@ -1079,15 +1065,14 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
* Underlying function will use this to retrieve the network
* namespace
*/
- if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) {
- if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) {
- skb_dst_set(buff, dst);
- ip6_xmit(ctl_sk, buff, &fl, NULL);
- TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
- if (rst)
- TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
- return;
- }
+ dst = ip6_dst_lookup_flow(ctl_sk, &fl, NULL, false);
+ if (!IS_ERR(dst)) {
+ skb_dst_set(buff, dst);
+ ip6_xmit(ctl_sk, buff, &fl, NULL);
+ TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
+ if (rst)
+ TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
+ return;
}
kfree_skb(buff);
@@ -1323,7 +1308,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
tcp_death_row.sysctl_tw_recycle &&
(dst = inet6_csk_route_req(sk, req)) != NULL &&
(peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
- ipv6_addr_equal((struct in6_addr *)peer->daddr.a6,
+ ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
&treq->rmt_addr)) {
inet_peer_refcheck(peer);
if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
@@ -1636,10 +1621,8 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
opt_skb = skb_clone(skb, GFP_ATOMIC);
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
- TCP_CHECK_TIMER(sk);
if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
goto reset;
- TCP_CHECK_TIMER(sk);
if (opt_skb)
goto ipv6_pktoptions;
return 0;
@@ -1667,10 +1650,8 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
}
}
- TCP_CHECK_TIMER(sk);
if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
goto reset;
- TCP_CHECK_TIMER(sk);
if (opt_skb)
goto ipv6_pktoptions;
return 0;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 9a009c66c8a3..d86d7f67a597 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1125,18 +1125,11 @@ do_udp_sendmsg:
security_sk_classify_flow(sk, &fl);
- err = ip6_sk_dst_lookup(sk, &dst, &fl);
- if (err)
+ dst = ip6_sk_dst_lookup_flow(sk, &fl, final_p, true);
+ if (IS_ERR(dst)) {
+ err = PTR_ERR(dst);
+ dst = NULL;
goto out;
- if (final_p)
- ipv6_addr_copy(&fl.fl6_dst, final_p);
-
- err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
- if (err < 0) {
- if (err == -EREMOTE)
- err = ip6_dst_blackhole(sk, &dst, &fl);
- if (err < 0)
- goto out;
}
if (hlimit < 0) {
@@ -1299,7 +1292,7 @@ static int udp6_ufo_send_check(struct sk_buff *skb)
return 0;
}
-static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, int features)
+static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
unsigned int mss;
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index da87428681cc..48ce496802fd 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -27,8 +27,8 @@
static struct xfrm_policy_afinfo xfrm6_policy_afinfo;
static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos,
- xfrm_address_t *saddr,
- xfrm_address_t *daddr)
+ const xfrm_address_t *saddr,
+ const xfrm_address_t *daddr)
{
struct flowi fl = {};
struct dst_entry *dst;
@@ -67,7 +67,7 @@ static int xfrm6_get_saddr(struct net *net,
return 0;
}
-static int xfrm6_get_tos(struct flowi *fl)
+static int xfrm6_get_tos(const struct flowi *fl)
{
return 0;
}
@@ -87,7 +87,7 @@ static int xfrm6_init_path(struct xfrm_dst *path, struct dst_entry *dst,
}
static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
- struct flowi *fl)
+ const struct flowi *fl)
{
struct rt6_info *rt = (struct rt6_info*)xdst->route;
@@ -220,6 +220,7 @@ static void xfrm6_dst_destroy(struct dst_entry *dst)
if (likely(xdst->u.rt6.rt6i_idev))
in6_dev_put(xdst->u.rt6.rt6i_idev);
+ dst_destroy_metrics_generic(dst);
if (likely(xdst->u.rt6.rt6i_peer))
inet_putpeer(xdst->u.rt6.rt6i_peer);
xfrm_dst_destroy(xdst);
@@ -257,6 +258,7 @@ static struct dst_ops xfrm6_dst_ops = {
.protocol = cpu_to_be16(ETH_P_IPV6),
.gc = xfrm6_garbage_collect,
.update_pmtu = xfrm6_update_pmtu,
+ .cow_metrics = dst_cow_metrics_generic,
.destroy = xfrm6_dst_destroy,
.ifdown = xfrm6_dst_ifdown,
.local_out = __ip6_local_out,
@@ -272,6 +274,7 @@ static struct xfrm_policy_afinfo xfrm6_policy_afinfo = {
.get_tos = xfrm6_get_tos,
.init_path = xfrm6_init_path,
.fill_dst = xfrm6_fill_dst,
+ .blackhole_route = ip6_blackhole_route,
};
static int __init xfrm6_policy_init(void)
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c
index a67575d472a3..a02598e0079a 100644
--- a/net/ipv6/xfrm6_state.c
+++ b/net/ipv6/xfrm6_state.c
@@ -20,7 +20,7 @@
#include <net/addrconf.h>
static void
-__xfrm6_init_tempsel(struct xfrm_selector *sel, struct flowi *fl)
+__xfrm6_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl)
{
/* Initialize temporary selector matching only
* to current session. */
@@ -38,8 +38,8 @@ __xfrm6_init_tempsel(struct xfrm_selector *sel, struct flowi *fl)
}
static void
-xfrm6_init_temprop(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
- xfrm_address_t *daddr, xfrm_address_t *saddr)
+xfrm6_init_temprop(struct xfrm_state *x, const struct xfrm_tmpl *tmpl,
+ const xfrm_address_t *daddr, const xfrm_address_t *saddr)
{
x->id = tmpl->id;
if (ipv6_addr_any((struct in6_addr*)&x->id.daddr))
diff --git a/net/key/af_key.c b/net/key/af_key.c
index d87c22df6f1e..7db86ffcf070 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -70,7 +70,7 @@ static inline struct pfkey_sock *pfkey_sk(struct sock *sk)
return (struct pfkey_sock *)sk;
}
-static int pfkey_can_dump(struct sock *sk)
+static int pfkey_can_dump(const struct sock *sk)
{
if (3 * atomic_read(&sk->sk_rmem_alloc) <= 2 * sk->sk_rcvbuf)
return 1;
@@ -303,12 +303,13 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
return rc;
}
-static inline void pfkey_hdr_dup(struct sadb_msg *new, struct sadb_msg *orig)
+static inline void pfkey_hdr_dup(struct sadb_msg *new,
+ const struct sadb_msg *orig)
{
*new = *orig;
}
-static int pfkey_error(struct sadb_msg *orig, int err, struct sock *sk)
+static int pfkey_error(const struct sadb_msg *orig, int err, struct sock *sk)
{
struct sk_buff *skb = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_KERNEL);
struct sadb_msg *hdr;
@@ -369,13 +370,13 @@ static u8 sadb_ext_min_len[] = {
};
/* Verify sadb_address_{len,prefixlen} against sa_family. */
-static int verify_address_len(void *p)
+static int verify_address_len(const void *p)
{
- struct sadb_address *sp = p;
- struct sockaddr *addr = (struct sockaddr *)(sp + 1);
- struct sockaddr_in *sin;
+ const struct sadb_address *sp = p;
+ const struct sockaddr *addr = (const struct sockaddr *)(sp + 1);
+ const struct sockaddr_in *sin;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- struct sockaddr_in6 *sin6;
+ const struct sockaddr_in6 *sin6;
#endif
int len;
@@ -411,16 +412,16 @@ static int verify_address_len(void *p)
return 0;
}
-static inline int pfkey_sec_ctx_len(struct sadb_x_sec_ctx *sec_ctx)
+static inline int pfkey_sec_ctx_len(const struct sadb_x_sec_ctx *sec_ctx)
{
return DIV_ROUND_UP(sizeof(struct sadb_x_sec_ctx) +
sec_ctx->sadb_x_ctx_len,
sizeof(uint64_t));
}
-static inline int verify_sec_ctx_len(void *p)
+static inline int verify_sec_ctx_len(const void *p)
{
- struct sadb_x_sec_ctx *sec_ctx = (struct sadb_x_sec_ctx *)p;
+ const struct sadb_x_sec_ctx *sec_ctx = p;
int len = sec_ctx->sadb_x_ctx_len;
if (len > PAGE_SIZE)
@@ -434,7 +435,7 @@ static inline int verify_sec_ctx_len(void *p)
return 0;
}
-static inline struct xfrm_user_sec_ctx *pfkey_sadb2xfrm_user_sec_ctx(struct sadb_x_sec_ctx *sec_ctx)
+static inline struct xfrm_user_sec_ctx *pfkey_sadb2xfrm_user_sec_ctx(const struct sadb_x_sec_ctx *sec_ctx)
{
struct xfrm_user_sec_ctx *uctx = NULL;
int ctx_size = sec_ctx->sadb_x_ctx_len;
@@ -455,16 +456,16 @@ static inline struct xfrm_user_sec_ctx *pfkey_sadb2xfrm_user_sec_ctx(struct sadb
return uctx;
}
-static int present_and_same_family(struct sadb_address *src,
- struct sadb_address *dst)
+static int present_and_same_family(const struct sadb_address *src,
+ const struct sadb_address *dst)
{
- struct sockaddr *s_addr, *d_addr;
+ const struct sockaddr *s_addr, *d_addr;
if (!src || !dst)
return 0;
- s_addr = (struct sockaddr *)(src + 1);
- d_addr = (struct sockaddr *)(dst + 1);
+ s_addr = (const struct sockaddr *)(src + 1);
+ d_addr = (const struct sockaddr *)(dst + 1);
if (s_addr->sa_family != d_addr->sa_family)
return 0;
if (s_addr->sa_family != AF_INET
@@ -477,15 +478,15 @@ static int present_and_same_family(struct sadb_address *src,
return 1;
}
-static int parse_exthdrs(struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int parse_exthdrs(struct sk_buff *skb, const struct sadb_msg *hdr, void **ext_hdrs)
{
- char *p = (char *) hdr;
+ const char *p = (char *) hdr;
int len = skb->len;
len -= sizeof(*hdr);
p += sizeof(*hdr);
while (len > 0) {
- struct sadb_ext *ehdr = (struct sadb_ext *) p;
+ const struct sadb_ext *ehdr = (const struct sadb_ext *) p;
uint16_t ext_type;
int ext_len;
@@ -514,7 +515,7 @@ static int parse_exthdrs(struct sk_buff *skb, struct sadb_msg *hdr, void **ext_h
if (verify_sec_ctx_len(p))
return -EINVAL;
}
- ext_hdrs[ext_type-1] = p;
+ ext_hdrs[ext_type-1] = (void *) p;
}
p += ext_len;
len -= ext_len;
@@ -606,21 +607,21 @@ int pfkey_sockaddr_extract(const struct sockaddr *sa, xfrm_address_t *xaddr)
}
static
-int pfkey_sadb_addr2xfrm_addr(struct sadb_address *addr, xfrm_address_t *xaddr)
+int pfkey_sadb_addr2xfrm_addr(const struct sadb_address *addr, xfrm_address_t *xaddr)
{
return pfkey_sockaddr_extract((struct sockaddr *)(addr + 1),
xaddr);
}
-static struct xfrm_state *pfkey_xfrm_state_lookup(struct net *net, struct sadb_msg *hdr, void **ext_hdrs)
+static struct xfrm_state *pfkey_xfrm_state_lookup(struct net *net, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
- struct sadb_sa *sa;
- struct sadb_address *addr;
+ const struct sadb_sa *sa;
+ const struct sadb_address *addr;
uint16_t proto;
unsigned short family;
xfrm_address_t *xaddr;
- sa = (struct sadb_sa *) ext_hdrs[SADB_EXT_SA-1];
+ sa = (const struct sadb_sa *) ext_hdrs[SADB_EXT_SA-1];
if (sa == NULL)
return NULL;
@@ -629,18 +630,18 @@ static struct xfrm_state *pfkey_xfrm_state_lookup(struct net *net, struct sadb_
return NULL;
/* sadb_address_len should be checked by caller */
- addr = (struct sadb_address *) ext_hdrs[SADB_EXT_ADDRESS_DST-1];
+ addr = (const struct sadb_address *) ext_hdrs[SADB_EXT_ADDRESS_DST-1];
if (addr == NULL)
return NULL;
- family = ((struct sockaddr *)(addr + 1))->sa_family;
+ family = ((const struct sockaddr *)(addr + 1))->sa_family;
switch (family) {
case AF_INET:
- xaddr = (xfrm_address_t *)&((struct sockaddr_in *)(addr + 1))->sin_addr;
+ xaddr = (xfrm_address_t *)&((const struct sockaddr_in *)(addr + 1))->sin_addr;
break;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case AF_INET6:
- xaddr = (xfrm_address_t *)&((struct sockaddr_in6 *)(addr + 1))->sin6_addr;
+ xaddr = (xfrm_address_t *)&((const struct sockaddr_in6 *)(addr + 1))->sin6_addr;
break;
#endif
default:
@@ -690,9 +691,9 @@ static inline int pfkey_mode_to_xfrm(int mode)
}
}
-static unsigned int pfkey_sockaddr_fill(xfrm_address_t *xaddr, __be16 port,
- struct sockaddr *sa,
- unsigned short family)
+static unsigned int pfkey_sockaddr_fill(const xfrm_address_t *xaddr, __be16 port,
+ struct sockaddr *sa,
+ unsigned short family)
{
switch (family) {
case AF_INET:
@@ -720,7 +721,7 @@ static unsigned int pfkey_sockaddr_fill(xfrm_address_t *xaddr, __be16 port,
return 0;
}
-static struct sk_buff *__pfkey_xfrm_state2msg(struct xfrm_state *x,
+static struct sk_buff *__pfkey_xfrm_state2msg(const struct xfrm_state *x,
int add_keys, int hsc)
{
struct sk_buff *skb;
@@ -1010,7 +1011,7 @@ static struct sk_buff *__pfkey_xfrm_state2msg(struct xfrm_state *x,
}
-static inline struct sk_buff *pfkey_xfrm_state2msg(struct xfrm_state *x)
+static inline struct sk_buff *pfkey_xfrm_state2msg(const struct xfrm_state *x)
{
struct sk_buff *skb;
@@ -1019,26 +1020,26 @@ static inline struct sk_buff *pfkey_xfrm_state2msg(struct xfrm_state *x)
return skb;
}
-static inline struct sk_buff *pfkey_xfrm_state2msg_expire(struct xfrm_state *x,
+static inline struct sk_buff *pfkey_xfrm_state2msg_expire(const struct xfrm_state *x,
int hsc)
{
return __pfkey_xfrm_state2msg(x, 0, hsc);
}
static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
- struct sadb_msg *hdr,
- void **ext_hdrs)
+ const struct sadb_msg *hdr,
+ void * const *ext_hdrs)
{
struct xfrm_state *x;
- struct sadb_lifetime *lifetime;
- struct sadb_sa *sa;
- struct sadb_key *key;
- struct sadb_x_sec_ctx *sec_ctx;
+ const struct sadb_lifetime *lifetime;
+ const struct sadb_sa *sa;
+ const struct sadb_key *key;
+ const struct sadb_x_sec_ctx *sec_ctx;
uint16_t proto;
int err;
- sa = (struct sadb_sa *) ext_hdrs[SADB_EXT_SA-1];
+ sa = (const struct sadb_sa *) ext_hdrs[SADB_EXT_SA-1];
if (!sa ||
!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC-1],
ext_hdrs[SADB_EXT_ADDRESS_DST-1]))
@@ -1077,7 +1078,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
sa->sadb_sa_encrypt > SADB_X_CALG_MAX) ||
sa->sadb_sa_encrypt > SADB_EALG_MAX)
return ERR_PTR(-EINVAL);
- key = (struct sadb_key*) ext_hdrs[SADB_EXT_KEY_AUTH-1];
+ key = (const struct sadb_key*) ext_hdrs[SADB_EXT_KEY_AUTH-1];
if (key != NULL &&
sa->sadb_sa_auth != SADB_X_AALG_NULL &&
((key->sadb_key_bits+7) / 8 == 0 ||
@@ -1104,14 +1105,14 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
if (sa->sadb_sa_flags & SADB_SAFLAGS_NOPMTUDISC)
x->props.flags |= XFRM_STATE_NOPMTUDISC;
- lifetime = (struct sadb_lifetime*) ext_hdrs[SADB_EXT_LIFETIME_HARD-1];
+ lifetime = (const struct sadb_lifetime*) ext_hdrs[SADB_EXT_LIFETIME_HARD-1];
if (lifetime != NULL) {
x->lft.hard_packet_limit = _KEY2X(lifetime->sadb_lifetime_allocations);
x->lft.hard_byte_limit = _KEY2X(lifetime->sadb_lifetime_bytes);
x->lft.hard_add_expires_seconds = lifetime->sadb_lifetime_addtime;
x->lft.hard_use_expires_seconds = lifetime->sadb_lifetime_usetime;
}
- lifetime = (struct sadb_lifetime*) ext_hdrs[SADB_EXT_LIFETIME_SOFT-1];
+ lifetime = (const struct sadb_lifetime*) ext_hdrs[SADB_EXT_LIFETIME_SOFT-1];
if (lifetime != NULL) {
x->lft.soft_packet_limit = _KEY2X(lifetime->sadb_lifetime_allocations);
x->lft.soft_byte_limit = _KEY2X(lifetime->sadb_lifetime_bytes);
@@ -1119,7 +1120,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
x->lft.soft_use_expires_seconds = lifetime->sadb_lifetime_usetime;
}
- sec_ctx = (struct sadb_x_sec_ctx *) ext_hdrs[SADB_X_EXT_SEC_CTX-1];
+ sec_ctx = (const struct sadb_x_sec_ctx *) ext_hdrs[SADB_X_EXT_SEC_CTX-1];
if (sec_ctx != NULL) {
struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx);
@@ -1133,7 +1134,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
goto out;
}
- key = (struct sadb_key*) ext_hdrs[SADB_EXT_KEY_AUTH-1];
+ key = (const struct sadb_key*) ext_hdrs[SADB_EXT_KEY_AUTH-1];
if (sa->sadb_sa_auth) {
int keysize = 0;
struct xfrm_algo_desc *a = xfrm_aalg_get_byid(sa->sadb_sa_auth);
@@ -1202,7 +1203,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
&x->id.daddr);
if (ext_hdrs[SADB_X_EXT_SA2-1]) {
- struct sadb_x_sa2 *sa2 = (void*)ext_hdrs[SADB_X_EXT_SA2-1];
+ const struct sadb_x_sa2 *sa2 = ext_hdrs[SADB_X_EXT_SA2-1];
int mode = pfkey_mode_to_xfrm(sa2->sadb_x_sa2_mode);
if (mode < 0) {
err = -EINVAL;
@@ -1213,7 +1214,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
}
if (ext_hdrs[SADB_EXT_ADDRESS_PROXY-1]) {
- struct sadb_address *addr = ext_hdrs[SADB_EXT_ADDRESS_PROXY-1];
+ const struct sadb_address *addr = ext_hdrs[SADB_EXT_ADDRESS_PROXY-1];
/* Nobody uses this, but we try. */
x->sel.family = pfkey_sadb_addr2xfrm_addr(addr, &x->sel.saddr);
@@ -1224,7 +1225,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
x->sel.family = x->props.family;
if (ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1]) {
- struct sadb_x_nat_t_type* n_type;
+ const struct sadb_x_nat_t_type* n_type;
struct xfrm_encap_tmpl *natt;
x->encap = kmalloc(sizeof(*x->encap), GFP_KERNEL);
@@ -1236,12 +1237,12 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
natt->encap_type = n_type->sadb_x_nat_t_type_type;
if (ext_hdrs[SADB_X_EXT_NAT_T_SPORT-1]) {
- struct sadb_x_nat_t_port* n_port =
+ const struct sadb_x_nat_t_port *n_port =
ext_hdrs[SADB_X_EXT_NAT_T_SPORT-1];
natt->encap_sport = n_port->sadb_x_nat_t_port_port;
}
if (ext_hdrs[SADB_X_EXT_NAT_T_DPORT-1]) {
- struct sadb_x_nat_t_port* n_port =
+ const struct sadb_x_nat_t_port *n_port =
ext_hdrs[SADB_X_EXT_NAT_T_DPORT-1];
natt->encap_dport = n_port->sadb_x_nat_t_port_port;
}
@@ -1261,12 +1262,12 @@ out:
return ERR_PTR(err);
}
-static int pfkey_reserved(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_reserved(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
return -EOPNOTSUPP;
}
-static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
struct net *net = sock_net(sk);
struct sk_buff *resp_skb;
@@ -1365,7 +1366,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
return 0;
}
-static int pfkey_acquire(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_acquire(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
struct net *net = sock_net(sk);
struct xfrm_state *x;
@@ -1429,7 +1430,7 @@ static inline int event2keytype(int event)
}
/* ADD/UPD/DEL */
-static int key_notify_sa(struct xfrm_state *x, struct km_event *c)
+static int key_notify_sa(struct xfrm_state *x, const struct km_event *c)
{
struct sk_buff *skb;
struct sadb_msg *hdr;
@@ -1453,7 +1454,7 @@ static int key_notify_sa(struct xfrm_state *x, struct km_event *c)
return 0;
}
-static int pfkey_add(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_add(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
struct net *net = sock_net(sk);
struct xfrm_state *x;
@@ -1492,7 +1493,7 @@ out:
return err;
}
-static int pfkey_delete(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_delete(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
struct net *net = sock_net(sk);
struct xfrm_state *x;
@@ -1534,7 +1535,7 @@ out:
return err;
}
-static int pfkey_get(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_get(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
struct net *net = sock_net(sk);
__u8 proto;
@@ -1570,7 +1571,7 @@ static int pfkey_get(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr,
return 0;
}
-static struct sk_buff *compose_sadb_supported(struct sadb_msg *orig,
+static struct sk_buff *compose_sadb_supported(const struct sadb_msg *orig,
gfp_t allocation)
{
struct sk_buff *skb;
@@ -1642,7 +1643,7 @@ out_put_algs:
return skb;
}
-static int pfkey_register(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
struct pfkey_sock *pfk = pfkey_sk(sk);
struct sk_buff *supp_skb;
@@ -1671,7 +1672,7 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, struct sadb_msg
return 0;
}
-static int unicast_flush_resp(struct sock *sk, struct sadb_msg *ihdr)
+static int unicast_flush_resp(struct sock *sk, const struct sadb_msg *ihdr)
{
struct sk_buff *skb;
struct sadb_msg *hdr;
@@ -1688,7 +1689,7 @@ static int unicast_flush_resp(struct sock *sk, struct sadb_msg *ihdr)
return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk));
}
-static int key_notify_sa_flush(struct km_event *c)
+static int key_notify_sa_flush(const struct km_event *c)
{
struct sk_buff *skb;
struct sadb_msg *hdr;
@@ -1710,7 +1711,7 @@ static int key_notify_sa_flush(struct km_event *c)
return 0;
}
-static int pfkey_flush(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
struct net *net = sock_net(sk);
unsigned proto;
@@ -1784,7 +1785,7 @@ static void pfkey_dump_sa_done(struct pfkey_sock *pfk)
xfrm_state_walk_done(&pfk->dump.u.state);
}
-static int pfkey_dump(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
u8 proto;
struct pfkey_sock *pfk = pfkey_sk(sk);
@@ -1805,19 +1806,29 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr
return pfkey_do_dump(pfk);
}
-static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
struct pfkey_sock *pfk = pfkey_sk(sk);
int satype = hdr->sadb_msg_satype;
+ bool reset_errno = false;
if (hdr->sadb_msg_len == (sizeof(*hdr) / sizeof(uint64_t))) {
- /* XXX we mangle packet... */
- hdr->sadb_msg_errno = 0;
+ reset_errno = true;
if (satype != 0 && satype != 1)
return -EINVAL;
pfk->promisc = satype;
}
- pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk));
+ if (reset_errno && skb_cloned(skb))
+ skb = skb_copy(skb, GFP_KERNEL);
+ else
+ skb = skb_clone(skb, GFP_KERNEL);
+
+ if (reset_errno && skb) {
+ struct sadb_msg *new_hdr = (struct sadb_msg *) skb->data;
+ new_hdr->sadb_msg_errno = 0;
+ }
+
+ pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk));
return 0;
}
@@ -1921,7 +1932,7 @@ parse_ipsecrequests(struct xfrm_policy *xp, struct sadb_x_policy *pol)
return 0;
}
-static inline int pfkey_xfrm_policy2sec_ctx_size(struct xfrm_policy *xp)
+static inline int pfkey_xfrm_policy2sec_ctx_size(const struct xfrm_policy *xp)
{
struct xfrm_sec_ctx *xfrm_ctx = xp->security;
@@ -1933,9 +1944,9 @@ static inline int pfkey_xfrm_policy2sec_ctx_size(struct xfrm_policy *xp)
return 0;
}
-static int pfkey_xfrm_policy2msg_size(struct xfrm_policy *xp)
+static int pfkey_xfrm_policy2msg_size(const struct xfrm_policy *xp)
{
- struct xfrm_tmpl *t;
+ const struct xfrm_tmpl *t;
int sockaddr_size = pfkey_sockaddr_size(xp->family);
int socklen = 0;
int i;
@@ -1955,7 +1966,7 @@ static int pfkey_xfrm_policy2msg_size(struct xfrm_policy *xp)
pfkey_xfrm_policy2sec_ctx_size(xp);
}
-static struct sk_buff * pfkey_xfrm_policy2msg_prep(struct xfrm_policy *xp)
+static struct sk_buff * pfkey_xfrm_policy2msg_prep(const struct xfrm_policy *xp)
{
struct sk_buff *skb;
int size;
@@ -1969,7 +1980,7 @@ static struct sk_buff * pfkey_xfrm_policy2msg_prep(struct xfrm_policy *xp)
return skb;
}
-static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, int dir)
+static int pfkey_xfrm_policy2msg(struct sk_buff *skb, const struct xfrm_policy *xp, int dir)
{
struct sadb_msg *hdr;
struct sadb_address *addr;
@@ -2065,8 +2076,8 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, in
pol->sadb_x_policy_priority = xp->priority;
for (i=0; i<xp->xfrm_nr; i++) {
+ const struct xfrm_tmpl *t = xp->xfrm_vec + i;
struct sadb_x_ipsecrequest *rq;
- struct xfrm_tmpl *t = xp->xfrm_vec + i;
int req_size;
int mode;
@@ -2123,7 +2134,7 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, in
return 0;
}
-static int key_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c)
+static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c)
{
struct sk_buff *out_skb;
struct sadb_msg *out_hdr;
@@ -2152,7 +2163,7 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c
}
-static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
struct net *net = sock_net(sk);
int err = 0;
@@ -2273,7 +2284,7 @@ out:
return err;
}
-static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
struct net *net = sock_net(sk);
int err;
@@ -2350,7 +2361,7 @@ out:
return err;
}
-static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, struct sadb_msg *hdr, int dir)
+static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, const struct sadb_msg *hdr, int dir)
{
int err;
struct sk_buff *out_skb;
@@ -2458,7 +2469,7 @@ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
}
static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
- struct sadb_msg *hdr, void **ext_hdrs)
+ const struct sadb_msg *hdr, void * const *ext_hdrs)
{
int i, len, ret, err = -EINVAL;
u8 dir;
@@ -2549,14 +2560,14 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
}
#else
static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
- struct sadb_msg *hdr, void **ext_hdrs)
+ const struct sadb_msg *hdr, void * const *ext_hdrs)
{
return -ENOPROTOOPT;
}
#endif
-static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
struct net *net = sock_net(sk);
unsigned int dir;
@@ -2644,7 +2655,7 @@ static void pfkey_dump_sp_done(struct pfkey_sock *pfk)
xfrm_policy_walk_done(&pfk->dump.u.policy);
}
-static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
struct pfkey_sock *pfk = pfkey_sk(sk);
@@ -2660,7 +2671,7 @@ static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, struct sadb_msg *
return pfkey_do_dump(pfk);
}
-static int key_notify_policy_flush(struct km_event *c)
+static int key_notify_policy_flush(const struct km_event *c)
{
struct sk_buff *skb_out;
struct sadb_msg *hdr;
@@ -2680,7 +2691,7 @@ static int key_notify_policy_flush(struct km_event *c)
}
-static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
struct net *net = sock_net(sk);
struct km_event c;
@@ -2709,7 +2720,7 @@ static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, struct sadb_msg
}
typedef int (*pfkey_handler)(struct sock *sk, struct sk_buff *skb,
- struct sadb_msg *hdr, void **ext_hdrs);
+ const struct sadb_msg *hdr, void * const *ext_hdrs);
static pfkey_handler pfkey_funcs[SADB_MAX + 1] = {
[SADB_RESERVED] = pfkey_reserved,
[SADB_GETSPI] = pfkey_getspi,
@@ -2736,7 +2747,7 @@ static pfkey_handler pfkey_funcs[SADB_MAX + 1] = {
[SADB_X_MIGRATE] = pfkey_migrate,
};
-static int pfkey_process(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr)
+static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr)
{
void *ext_hdrs[SADB_EXT_MAX];
int err;
@@ -2781,7 +2792,8 @@ static struct sadb_msg *pfkey_get_base_msg(struct sk_buff *skb, int *errp)
return hdr;
}
-static inline int aalg_tmpl_set(struct xfrm_tmpl *t, struct xfrm_algo_desc *d)
+static inline int aalg_tmpl_set(const struct xfrm_tmpl *t,
+ const struct xfrm_algo_desc *d)
{
unsigned int id = d->desc.sadb_alg_id;
@@ -2791,7 +2803,8 @@ static inline int aalg_tmpl_set(struct xfrm_tmpl *t, struct xfrm_algo_desc *d)
return (t->aalgos >> id) & 1;
}
-static inline int ealg_tmpl_set(struct xfrm_tmpl *t, struct xfrm_algo_desc *d)
+static inline int ealg_tmpl_set(const struct xfrm_tmpl *t,
+ const struct xfrm_algo_desc *d)
{
unsigned int id = d->desc.sadb_alg_id;
@@ -2801,12 +2814,12 @@ static inline int ealg_tmpl_set(struct xfrm_tmpl *t, struct xfrm_algo_desc *d)
return (t->ealgos >> id) & 1;
}
-static int count_ah_combs(struct xfrm_tmpl *t)
+static int count_ah_combs(const struct xfrm_tmpl *t)
{
int i, sz = 0;
for (i = 0; ; i++) {
- struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(i);
+ const struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(i);
if (!aalg)
break;
if (aalg_tmpl_set(t, aalg) && aalg->available)
@@ -2815,12 +2828,12 @@ static int count_ah_combs(struct xfrm_tmpl *t)
return sz + sizeof(struct sadb_prop);
}
-static int count_esp_combs(struct xfrm_tmpl *t)
+static int count_esp_combs(const struct xfrm_tmpl *t)
{
int i, k, sz = 0;
for (i = 0; ; i++) {
- struct xfrm_algo_desc *ealg = xfrm_ealg_get_byidx(i);
+ const struct xfrm_algo_desc *ealg = xfrm_ealg_get_byidx(i);
if (!ealg)
break;
@@ -2828,7 +2841,7 @@ static int count_esp_combs(struct xfrm_tmpl *t)
continue;
for (k = 1; ; k++) {
- struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(k);
+ const struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(k);
if (!aalg)
break;
@@ -2839,7 +2852,7 @@ static int count_esp_combs(struct xfrm_tmpl *t)
return sz + sizeof(struct sadb_prop);
}
-static void dump_ah_combs(struct sk_buff *skb, struct xfrm_tmpl *t)
+static void dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
{
struct sadb_prop *p;
int i;
@@ -2851,7 +2864,7 @@ static void dump_ah_combs(struct sk_buff *skb, struct xfrm_tmpl *t)
memset(p->sadb_prop_reserved, 0, sizeof(p->sadb_prop_reserved));
for (i = 0; ; i++) {
- struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(i);
+ const struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(i);
if (!aalg)
break;
@@ -2871,7 +2884,7 @@ static void dump_ah_combs(struct sk_buff *skb, struct xfrm_tmpl *t)
}
}
-static void dump_esp_combs(struct sk_buff *skb, struct xfrm_tmpl *t)
+static void dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
{
struct sadb_prop *p;
int i, k;
@@ -2883,7 +2896,7 @@ static void dump_esp_combs(struct sk_buff *skb, struct xfrm_tmpl *t)
memset(p->sadb_prop_reserved, 0, sizeof(p->sadb_prop_reserved));
for (i=0; ; i++) {
- struct xfrm_algo_desc *ealg = xfrm_ealg_get_byidx(i);
+ const struct xfrm_algo_desc *ealg = xfrm_ealg_get_byidx(i);
if (!ealg)
break;
@@ -2892,7 +2905,7 @@ static void dump_esp_combs(struct sk_buff *skb, struct xfrm_tmpl *t)
for (k = 1; ; k++) {
struct sadb_comb *c;
- struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(k);
+ const struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(k);
if (!aalg)
break;
if (!(aalg_tmpl_set(t, aalg) && aalg->available))
@@ -2914,12 +2927,12 @@ static void dump_esp_combs(struct sk_buff *skb, struct xfrm_tmpl *t)
}
}
-static int key_notify_policy_expire(struct xfrm_policy *xp, struct km_event *c)
+static int key_notify_policy_expire(struct xfrm_policy *xp, const struct km_event *c)
{
return 0;
}
-static int key_notify_sa_expire(struct xfrm_state *x, struct km_event *c)
+static int key_notify_sa_expire(struct xfrm_state *x, const struct km_event *c)
{
struct sk_buff *out_skb;
struct sadb_msg *out_hdr;
@@ -2949,7 +2962,7 @@ static int key_notify_sa_expire(struct xfrm_state *x, struct km_event *c)
return 0;
}
-static int pfkey_send_notify(struct xfrm_state *x, struct km_event *c)
+static int pfkey_send_notify(struct xfrm_state *x, const struct km_event *c)
{
struct net *net = x ? xs_net(x) : c->net;
struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
@@ -2976,7 +2989,7 @@ static int pfkey_send_notify(struct xfrm_state *x, struct km_event *c)
return 0;
}
-static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
+static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
{
if (xp && xp->type != XFRM_POLICY_TYPE_MAIN)
return 0;
@@ -3318,7 +3331,7 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
#ifdef CONFIG_NET_KEY_MIGRATE
static int set_sadb_address(struct sk_buff *skb, int sasize, int type,
- struct xfrm_selector *sel)
+ const struct xfrm_selector *sel)
{
struct sadb_address *addr;
addr = (struct sadb_address *)skb_put(skb, sizeof(struct sadb_address) + sasize);
@@ -3348,7 +3361,7 @@ static int set_sadb_address(struct sk_buff *skb, int sasize, int type,
}
-static int set_sadb_kmaddress(struct sk_buff *skb, struct xfrm_kmaddress *k)
+static int set_sadb_kmaddress(struct sk_buff *skb, const struct xfrm_kmaddress *k)
{
struct sadb_x_kmaddress *kma;
u8 *sa;
@@ -3376,7 +3389,7 @@ static int set_sadb_kmaddress(struct sk_buff *skb, struct xfrm_kmaddress *k)
static int set_ipsecrequest(struct sk_buff *skb,
uint8_t proto, uint8_t mode, int level,
uint32_t reqid, uint8_t family,
- xfrm_address_t *src, xfrm_address_t *dst)
+ const xfrm_address_t *src, const xfrm_address_t *dst)
{
struct sadb_x_ipsecrequest *rq;
u8 *sa;
@@ -3404,9 +3417,9 @@ static int set_ipsecrequest(struct sk_buff *skb,
#endif
#ifdef CONFIG_NET_KEY_MIGRATE
-static int pfkey_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
- struct xfrm_migrate *m, int num_bundles,
- struct xfrm_kmaddress *k)
+static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+ const struct xfrm_migrate *m, int num_bundles,
+ const struct xfrm_kmaddress *k)
{
int i;
int sasize_sel;
@@ -3415,7 +3428,7 @@ static int pfkey_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
struct sk_buff *skb;
struct sadb_msg *hdr;
struct sadb_x_policy *pol;
- struct xfrm_migrate *mp;
+ const struct xfrm_migrate *mp;
if (type != XFRM_POLICY_TYPE_MAIN)
return 0;
@@ -3513,9 +3526,9 @@ err:
return -EINVAL;
}
#else
-static int pfkey_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
- struct xfrm_migrate *m, int num_bundles,
- struct xfrm_kmaddress *k)
+static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+ const struct xfrm_migrate *m, int num_bundles,
+ const struct xfrm_kmaddress *k)
{
return -ENOPROTOOPT;
}
@@ -3655,6 +3668,7 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
}
static void *pfkey_seq_start(struct seq_file *f, loff_t *ppos)
+ __acquires(rcu)
{
struct net *net = seq_file_net(f);
struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
@@ -3672,6 +3686,7 @@ static void *pfkey_seq_next(struct seq_file *f, void *v, loff_t *ppos)
}
static void pfkey_seq_stop(struct seq_file *f, void *v)
+ __releases(rcu)
{
rcu_read_unlock();
}
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 110efb704c9b..5381cebe516d 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -323,7 +323,7 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
rc = ip_route_connect(&rt, lsa->l2tp_addr.s_addr, saddr,
RT_CONN_FLAGS(sk), oif,
IPPROTO_L2TP,
- 0, 0, sk, 1);
+ 0, 0, sk, true);
if (rc) {
if (rc == -ENETUNREACH)
IP_INC_STATS_BH(&init_net, IPSTATS_MIB_OUTNOROUTES);
@@ -489,7 +489,7 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
* itself out.
*/
security_sk_classify_flow(sk, &fl);
- if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0))
+ if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk))
goto no_route;
}
sk_setup_caps(sk, &rt->dst);
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
index f99687439139..058f1e9a9128 100644
--- a/net/llc/llc_input.c
+++ b/net/llc/llc_input.c
@@ -181,25 +181,26 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev,
* LLC functionality
*/
rcv = rcu_dereference(sap->rcv_func);
- if (rcv) {
- struct sk_buff *cskb = skb_clone(skb, GFP_ATOMIC);
- if (cskb)
- rcv(cskb, dev, pt, orig_dev);
- }
dest = llc_pdu_type(skb);
- if (unlikely(!dest || !llc_type_handlers[dest - 1]))
- goto drop_put;
- llc_type_handlers[dest - 1](sap, skb);
-out_put:
+ if (unlikely(!dest || !llc_type_handlers[dest - 1])) {
+ if (rcv)
+ rcv(skb, dev, pt, orig_dev);
+ else
+ kfree_skb(skb);
+ } else {
+ if (rcv) {
+ struct sk_buff *cskb = skb_clone(skb, GFP_ATOMIC);
+ if (cskb)
+ rcv(cskb, dev, pt, orig_dev);
+ }
+ llc_type_handlers[dest - 1](sap, skb);
+ }
llc_sap_put(sap);
out:
return 0;
drop:
kfree_skb(skb);
goto out;
-drop_put:
- kfree_skb(skb);
- goto out_put;
handle_station:
if (!llc_station_handler)
goto drop;
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index c766056d0488..513f85cc2ae1 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -17,7 +17,7 @@ comment "CFG80211 needs to be enabled for MAC80211"
if MAC80211 != n
config MAC80211_HAS_RC
- def_bool n
+ bool
config MAC80211_RC_PID
bool "PID controller based rate control algorithm" if EXPERT
@@ -78,7 +78,7 @@ config MAC80211_RC_DEFAULT
endif
comment "Some wireless drivers require a rate control algorithm"
- depends on MAC80211_HAS_RC=n
+ depends on MAC80211 && MAC80211_HAS_RC=n
config MAC80211_MESH
bool "Enable mac80211 mesh networking (pre-802.11s) support"
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 227ca82eef72..0c9d0c07eae6 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -76,7 +76,7 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
#endif /* CONFIG_MAC80211_HT_DEBUG */
if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP,
- &sta->sta, tid, NULL))
+ &sta->sta, tid, NULL, 0))
printk(KERN_DEBUG "HW problem - can not stop rx "
"aggregation for tid %d\n", tid);
@@ -232,6 +232,9 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
if (buf_size == 0)
buf_size = IEEE80211_MAX_AMPDU_BUF;
+ /* make sure the size doesn't exceed the maximum supported by the hw */
+ if (buf_size > local->hw.max_rx_aggregation_subframes)
+ buf_size = local->hw.max_rx_aggregation_subframes;
/* examine state machine */
mutex_lock(&sta->ampdu_mlme.mtx);
@@ -287,7 +290,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
}
ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START,
- &sta->sta, tid, &start_seq_num);
+ &sta->sta, tid, &start_seq_num, 0);
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret);
#endif /* CONFIG_MAC80211_HT_DEBUG */
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 9cc472c6a6a5..63d852cb4ca2 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -190,7 +190,7 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
ret = drv_ampdu_action(local, sta->sdata,
IEEE80211_AMPDU_TX_STOP,
- &sta->sta, tid, NULL);
+ &sta->sta, tid, NULL, 0);
/* HW shall not deny going back to legacy */
if (WARN_ON(ret)) {
@@ -311,7 +311,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
start_seq_num = sta->tid_seq[tid] >> 4;
ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
- &sta->sta, tid, &start_seq_num);
+ &sta->sta, tid, &start_seq_num, 0);
if (ret) {
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "BA request denied - HW unavailable for"
@@ -342,7 +342,8 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
/* send AddBA request */
ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
tid_tx->dialog_token, start_seq_num,
- 0x40, tid_tx->timeout);
+ local->hw.max_tx_aggregation_subframes,
+ tid_tx->timeout);
}
int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
@@ -487,7 +488,8 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
drv_ampdu_action(local, sta->sdata,
IEEE80211_AMPDU_TX_OPERATIONAL,
- &sta->sta, tid, NULL);
+ &sta->sta, tid, NULL,
+ sta->ampdu_mlme.tid_tx[tid]->buf_size);
/*
* synchronize with TX path, while splicing the TX path
@@ -742,9 +744,11 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
{
struct tid_ampdu_tx *tid_tx;
u16 capab, tid;
+ u8 buf_size;
capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
+ buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
mutex_lock(&sta->ampdu_mlme.mtx);
@@ -767,12 +771,23 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
== WLAN_STATUS_SUCCESS) {
+ /*
+ * IEEE 802.11-2007 7.3.1.14:
+ * In an ADDBA Response frame, when the Status Code field
+ * is set to 0, the Buffer Size subfield is set to a value
+ * of at least 1.
+ */
+ if (!buf_size)
+ goto out;
+
if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED,
&tid_tx->state)) {
/* ignore duplicate response */
goto out;
}
+ tid_tx->buf_size = buf_size;
+
if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))
ieee80211_agg_tx_operational(local, sta, tid);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 9cd73b11506e..7b701dcddb50 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -316,6 +316,17 @@ static int ieee80211_config_default_mgmt_key(struct wiphy *wiphy,
return 0;
}
+static void rate_idx_to_bitrate(struct rate_info *rate, struct sta_info *sta, int idx)
+{
+ if (!(rate->flags & RATE_INFO_FLAGS_MCS)) {
+ struct ieee80211_supported_band *sband;
+ sband = sta->local->hw.wiphy->bands[
+ sta->local->hw.conf.channel->band];
+ rate->legacy = sband->bitrates[idx].bitrate;
+ } else
+ rate->mcs = idx;
+}
+
static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -330,6 +341,7 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
STATION_INFO_TX_RETRIES |
STATION_INFO_TX_FAILED |
STATION_INFO_TX_BITRATE |
+ STATION_INFO_RX_BITRATE |
STATION_INFO_RX_DROP_MISC;
sinfo->inactive_time = jiffies_to_msecs(jiffies - sta->last_rx);
@@ -355,15 +367,16 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
sinfo->txrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
if (sta->last_tx_rate.flags & IEEE80211_TX_RC_SHORT_GI)
sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ rate_idx_to_bitrate(&sinfo->txrate, sta, sta->last_tx_rate.idx);
- if (!(sta->last_tx_rate.flags & IEEE80211_TX_RC_MCS)) {
- struct ieee80211_supported_band *sband;
- sband = sta->local->hw.wiphy->bands[
- sta->local->hw.conf.channel->band];
- sinfo->txrate.legacy =
- sband->bitrates[sta->last_tx_rate.idx].bitrate;
- } else
- sinfo->txrate.mcs = sta->last_tx_rate.idx;
+ sinfo->rxrate.flags = 0;
+ if (sta->last_rx_rate_flag & RX_FLAG_HT)
+ sinfo->rxrate.flags |= RATE_INFO_FLAGS_MCS;
+ if (sta->last_rx_rate_flag & RX_FLAG_40MHZ)
+ sinfo->rxrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
+ if (sta->last_rx_rate_flag & RX_FLAG_SHORT_GI)
+ sinfo->rxrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ rate_idx_to_bitrate(&sinfo->rxrate, sta, sta->last_rx_rate_idx);
if (ieee80211_vif_is_mesh(&sdata->vif)) {
#ifdef CONFIG_MAC80211_MESH
@@ -1215,6 +1228,9 @@ static int ieee80211_set_channel(struct wiphy *wiphy,
{
struct ieee80211_local *local = wiphy_priv(wiphy);
struct ieee80211_sub_if_data *sdata = NULL;
+ struct ieee80211_channel *old_oper;
+ enum nl80211_channel_type old_oper_type;
+ enum nl80211_channel_type old_vif_oper_type= NL80211_CHAN_NO_HT;
if (netdev)
sdata = IEEE80211_DEV_TO_SUB_IF(netdev);
@@ -1232,13 +1248,23 @@ static int ieee80211_set_channel(struct wiphy *wiphy,
break;
}
- local->oper_channel = chan;
+ if (sdata)
+ old_vif_oper_type = sdata->vif.bss_conf.channel_type;
+ old_oper_type = local->_oper_channel_type;
if (!ieee80211_set_channel_type(local, sdata, channel_type))
return -EBUSY;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
- if (sdata && sdata->vif.type != NL80211_IFTYPE_MONITOR)
+ old_oper = local->oper_channel;
+ local->oper_channel = chan;
+
+ /* Update driver if changes were actually made. */
+ if ((old_oper != local->oper_channel) ||
+ (old_oper_type != local->_oper_channel_type))
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+
+ if ((sdata && sdata->vif.type != NL80211_IFTYPE_MONITOR) &&
+ old_vif_oper_type != sdata->vif.bss_conf.channel_type)
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT);
return 0;
@@ -1274,8 +1300,11 @@ static int ieee80211_scan(struct wiphy *wiphy,
case NL80211_IFTYPE_P2P_GO:
if (sdata->local->ops->hw_scan)
break;
- /* FIXME: implement NoA while scanning in software */
- return -EOPNOTSUPP;
+ /*
+ * FIXME: implement NoA while scanning in software,
+ * for now fall through to allow scanning only when
+ * beaconing hasn't been configured yet
+ */
case NL80211_IFTYPE_AP:
if (sdata->u.ap.beacon)
return -EOPNOTSUPP;
@@ -1784,6 +1813,33 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
*cookie = (unsigned long) skb;
+ if (is_offchan && local->ops->offchannel_tx) {
+ int ret;
+
+ IEEE80211_SKB_CB(skb)->band = chan->band;
+
+ mutex_lock(&local->mtx);
+
+ if (local->hw_offchan_tx_cookie) {
+ mutex_unlock(&local->mtx);
+ return -EBUSY;
+ }
+
+ /* TODO: bitrate control, TX processing? */
+ ret = drv_offchannel_tx(local, skb, chan, channel_type, wait);
+
+ if (ret == 0)
+ local->hw_offchan_tx_cookie = *cookie;
+ mutex_unlock(&local->mtx);
+
+ /*
+ * Allow driver to return 1 to indicate it wants to have the
+ * frame transmitted with a remain_on_channel + regular TX.
+ */
+ if (ret != 1)
+ return ret;
+ }
+
if (is_offchan && local->ops->remain_on_channel) {
unsigned int duration;
int ret;
@@ -1847,6 +1903,7 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
wk->type = IEEE80211_WORK_OFFCHANNEL_TX;
wk->chan = chan;
+ wk->chan_type = channel_type;
wk->sdata = sdata;
wk->done = ieee80211_offchan_tx_done;
wk->offchan_tx.frame = skb;
@@ -1869,6 +1926,18 @@ static int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
mutex_lock(&local->mtx);
+ if (local->ops->offchannel_tx_cancel_wait &&
+ local->hw_offchan_tx_cookie == cookie) {
+ ret = drv_offchannel_tx_cancel_wait(local);
+
+ if (!ret)
+ local->hw_offchan_tx_cookie = 0;
+
+ mutex_unlock(&local->mtx);
+
+ return ret;
+ }
+
if (local->ops->cancel_remain_on_channel) {
cookie ^= 2;
ret = ieee80211_cancel_remain_on_channel_hw(local, cookie);
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 1f02e599a318..51f0d780dafa 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -60,6 +60,10 @@ static const struct file_operations name## _ops = { \
debugfs_create_file(#name, mode, phyd, local, &name## _ops);
+DEBUGFS_READONLY_FILE(user_power, "%d",
+ local->user_power_level);
+DEBUGFS_READONLY_FILE(power, "%d",
+ local->hw.conf.power_level);
DEBUGFS_READONLY_FILE(frequency, "%d",
local->hw.conf.channel->center_freq);
DEBUGFS_READONLY_FILE(total_ps_buffered, "%d",
@@ -391,6 +395,8 @@ void debugfs_hw_add(struct ieee80211_local *local)
DEBUGFS_ADD(uapsd_queues);
DEBUGFS_ADD(uapsd_max_sp_len);
DEBUGFS_ADD(channel_type);
+ DEBUGFS_ADD(user_power);
+ DEBUGFS_ADD(power);
statsd = debugfs_create_dir("statistics", phyd);
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 2dabdf7680d0..dacace6b1393 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -36,7 +36,7 @@ static ssize_t ieee80211_if_read(
ret = (*format)(sdata, buf, sizeof(buf));
read_unlock(&dev_base_lock);
- if (ret != -EINVAL)
+ if (ret >= 0)
ret = simple_read_from_buffer(userbuf, count, ppos, buf, ret);
return ret;
@@ -81,6 +81,8 @@ static ssize_t ieee80211_if_fmt_##name( \
IEEE80211_IF_FMT(name, field, "%d\n")
#define IEEE80211_IF_FMT_HEX(name, field) \
IEEE80211_IF_FMT(name, field, "%#x\n")
+#define IEEE80211_IF_FMT_LHEX(name, field) \
+ IEEE80211_IF_FMT(name, field, "%#lx\n")
#define IEEE80211_IF_FMT_SIZE(name, field) \
IEEE80211_IF_FMT(name, field, "%zd\n")
@@ -145,6 +147,9 @@ IEEE80211_IF_FILE(rc_rateidx_mask_2ghz, rc_rateidx_mask[IEEE80211_BAND_2GHZ],
HEX);
IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[IEEE80211_BAND_5GHZ],
HEX);
+IEEE80211_IF_FILE(flags, flags, HEX);
+IEEE80211_IF_FILE(state, state, LHEX);
+IEEE80211_IF_FILE(channel_type, vif.bss_conf.channel_type, DEC);
/* STA attributes */
IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC);
@@ -216,6 +221,104 @@ static ssize_t ieee80211_if_parse_smps(struct ieee80211_sub_if_data *sdata,
__IEEE80211_IF_FILE_W(smps);
+static ssize_t ieee80211_if_fmt_tkip_mic_test(
+ const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
+{
+ return -EOPNOTSUPP;
+}
+
+static int hwaddr_aton(const char *txt, u8 *addr)
+{
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ int a, b;
+
+ a = hex_to_bin(*txt++);
+ if (a < 0)
+ return -1;
+ b = hex_to_bin(*txt++);
+ if (b < 0)
+ return -1;
+ *addr++ = (a << 4) | b;
+ if (i < 5 && *txt++ != ':')
+ return -1;
+ }
+
+ return 0;
+}
+
+static ssize_t ieee80211_if_parse_tkip_mic_test(
+ struct ieee80211_sub_if_data *sdata, const char *buf, int buflen)
+{
+ struct ieee80211_local *local = sdata->local;
+ u8 addr[ETH_ALEN];
+ struct sk_buff *skb;
+ struct ieee80211_hdr *hdr;
+ __le16 fc;
+
+ /*
+ * Assume colon-delimited MAC address with possible white space
+ * following.
+ */
+ if (buflen < 3 * ETH_ALEN - 1)
+ return -EINVAL;
+ if (hwaddr_aton(buf, addr) < 0)
+ return -EINVAL;
+
+ if (!ieee80211_sdata_running(sdata))
+ return -ENOTCONN;
+
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24 + 100);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, local->hw.extra_tx_headroom);
+
+ hdr = (struct ieee80211_hdr *) skb_put(skb, 24);
+ memset(hdr, 0, 24);
+ fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
+
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_AP:
+ fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
+ /* DA BSSID SA */
+ memcpy(hdr->addr1, addr, ETH_ALEN);
+ memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
+ memcpy(hdr->addr3, sdata->vif.addr, ETH_ALEN);
+ break;
+ case NL80211_IFTYPE_STATION:
+ fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
+ /* BSSID SA DA */
+ if (sdata->vif.bss_conf.bssid == NULL) {
+ dev_kfree_skb(skb);
+ return -ENOTCONN;
+ }
+ memcpy(hdr->addr1, sdata->vif.bss_conf.bssid, ETH_ALEN);
+ memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
+ memcpy(hdr->addr3, addr, ETH_ALEN);
+ break;
+ default:
+ dev_kfree_skb(skb);
+ return -EOPNOTSUPP;
+ }
+ hdr->frame_control = fc;
+
+ /*
+ * Add some length to the test frame to make it look bit more valid.
+ * The exact contents does not matter since the recipient is required
+ * to drop this because of the Michael MIC failure.
+ */
+ memset(skb_put(skb, 50), 0, 50);
+
+ IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_TKIP_MIC_FAILURE;
+
+ ieee80211_tx_skb(sdata, skb);
+
+ return buflen;
+}
+
+__IEEE80211_IF_FILE_W(tkip_mic_test);
+
/* AP attributes */
IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC);
IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC);
@@ -283,6 +386,9 @@ IEEE80211_IF_FILE(dot11MeshHWMPRootMode,
static void add_sta_files(struct ieee80211_sub_if_data *sdata)
{
DEBUGFS_ADD(drop_unencrypted);
+ DEBUGFS_ADD(flags);
+ DEBUGFS_ADD(state);
+ DEBUGFS_ADD(channel_type);
DEBUGFS_ADD(rc_rateidx_mask_2ghz);
DEBUGFS_ADD(rc_rateidx_mask_5ghz);
@@ -291,22 +397,30 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata)
DEBUGFS_ADD(last_beacon);
DEBUGFS_ADD(ave_beacon);
DEBUGFS_ADD_MODE(smps, 0600);
+ DEBUGFS_ADD_MODE(tkip_mic_test, 0200);
}
static void add_ap_files(struct ieee80211_sub_if_data *sdata)
{
DEBUGFS_ADD(drop_unencrypted);
+ DEBUGFS_ADD(flags);
+ DEBUGFS_ADD(state);
+ DEBUGFS_ADD(channel_type);
DEBUGFS_ADD(rc_rateidx_mask_2ghz);
DEBUGFS_ADD(rc_rateidx_mask_5ghz);
DEBUGFS_ADD(num_sta_ps);
DEBUGFS_ADD(dtim_count);
DEBUGFS_ADD(num_buffered_multicast);
+ DEBUGFS_ADD_MODE(tkip_mic_test, 0200);
}
static void add_wds_files(struct ieee80211_sub_if_data *sdata)
{
DEBUGFS_ADD(drop_unencrypted);
+ DEBUGFS_ADD(flags);
+ DEBUGFS_ADD(state);
+ DEBUGFS_ADD(channel_type);
DEBUGFS_ADD(rc_rateidx_mask_2ghz);
DEBUGFS_ADD(rc_rateidx_mask_5ghz);
@@ -316,12 +430,18 @@ static void add_wds_files(struct ieee80211_sub_if_data *sdata)
static void add_vlan_files(struct ieee80211_sub_if_data *sdata)
{
DEBUGFS_ADD(drop_unencrypted);
+ DEBUGFS_ADD(flags);
+ DEBUGFS_ADD(state);
+ DEBUGFS_ADD(channel_type);
DEBUGFS_ADD(rc_rateidx_mask_2ghz);
DEBUGFS_ADD(rc_rateidx_mask_5ghz);
}
static void add_monitor_files(struct ieee80211_sub_if_data *sdata)
{
+ DEBUGFS_ADD(flags);
+ DEBUGFS_ADD(state);
+ DEBUGFS_ADD(channel_type);
}
#ifdef CONFIG_MAC80211_MESH
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 98d589960a49..3729296f6f95 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -5,9 +5,9 @@
#include "ieee80211_i.h"
#include "driver-trace.h"
-static inline int drv_tx(struct ieee80211_local *local, struct sk_buff *skb)
+static inline void drv_tx(struct ieee80211_local *local, struct sk_buff *skb)
{
- return local->ops->tx(&local->hw, skb);
+ local->ops->tx(&local->hw, skb);
}
static inline int drv_start(struct ieee80211_local *local)
@@ -382,17 +382,17 @@ static inline int drv_ampdu_action(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid,
- u16 *ssn)
+ u16 *ssn, u8 buf_size)
{
int ret = -EOPNOTSUPP;
might_sleep();
- trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn);
+ trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, buf_size);
if (local->ops->ampdu_action)
ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action,
- sta, tid, ssn);
+ sta, tid, ssn, buf_size);
trace_drv_return_int(local, ret);
@@ -495,4 +495,35 @@ static inline int drv_cancel_remain_on_channel(struct ieee80211_local *local)
return ret;
}
+static inline int drv_offchannel_tx(struct ieee80211_local *local,
+ struct sk_buff *skb,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ unsigned int wait)
+{
+ int ret;
+
+ might_sleep();
+
+ trace_drv_offchannel_tx(local, skb, chan, channel_type, wait);
+ ret = local->ops->offchannel_tx(&local->hw, skb, chan,
+ channel_type, wait);
+ trace_drv_return_int(local, ret);
+
+ return ret;
+}
+
+static inline int drv_offchannel_tx_cancel_wait(struct ieee80211_local *local)
+{
+ int ret;
+
+ might_sleep();
+
+ trace_drv_offchannel_tx_cancel_wait(local);
+ ret = local->ops->offchannel_tx_cancel_wait(&local->hw);
+ trace_drv_return_int(local, ret);
+
+ return ret;
+}
+
#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index 49c84218b2f4..520fe2444893 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -9,6 +9,11 @@
#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, ...) \
static inline void trace_ ## name(proto) {}
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(...)
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(evt_class, name, proto, ...) \
+static inline void trace_ ## name(proto) {}
#endif
#undef TRACE_SYSTEM
@@ -38,7 +43,7 @@ static inline void trace_ ## name(proto) {}
* Tracing for driver callbacks.
*/
-TRACE_EVENT(drv_return_void,
+DECLARE_EVENT_CLASS(local_only_evt,
TP_PROTO(struct ieee80211_local *local),
TP_ARGS(local),
TP_STRUCT__entry(
@@ -50,6 +55,11 @@ TRACE_EVENT(drv_return_void,
TP_printk(LOCAL_PR_FMT, LOCAL_PR_ARG)
);
+DEFINE_EVENT(local_only_evt, drv_return_void,
+ TP_PROTO(struct ieee80211_local *local),
+ TP_ARGS(local)
+);
+
TRACE_EVENT(drv_return_int,
TP_PROTO(struct ieee80211_local *local, int ret),
TP_ARGS(local, ret),
@@ -78,40 +88,14 @@ TRACE_EVENT(drv_return_u64,
TP_printk(LOCAL_PR_FMT " - %llu", LOCAL_PR_ARG, __entry->ret)
);
-TRACE_EVENT(drv_start,
+DEFINE_EVENT(local_only_evt, drv_start,
TP_PROTO(struct ieee80211_local *local),
-
- TP_ARGS(local),
-
- TP_STRUCT__entry(
- LOCAL_ENTRY
- ),
-
- TP_fast_assign(
- LOCAL_ASSIGN;
- ),
-
- TP_printk(
- LOCAL_PR_FMT, LOCAL_PR_ARG
- )
+ TP_ARGS(local)
);
-TRACE_EVENT(drv_stop,
+DEFINE_EVENT(local_only_evt, drv_stop,
TP_PROTO(struct ieee80211_local *local),
-
- TP_ARGS(local),
-
- TP_STRUCT__entry(
- LOCAL_ENTRY
- ),
-
- TP_fast_assign(
- LOCAL_ASSIGN;
- ),
-
- TP_printk(
- LOCAL_PR_FMT, LOCAL_PR_ARG
- )
+ TP_ARGS(local)
);
TRACE_EVENT(drv_add_interface,
@@ -439,40 +423,14 @@ TRACE_EVENT(drv_hw_scan,
)
);
-TRACE_EVENT(drv_sw_scan_start,
+DEFINE_EVENT(local_only_evt, drv_sw_scan_start,
TP_PROTO(struct ieee80211_local *local),
-
- TP_ARGS(local),
-
- TP_STRUCT__entry(
- LOCAL_ENTRY
- ),
-
- TP_fast_assign(
- LOCAL_ASSIGN;
- ),
-
- TP_printk(
- LOCAL_PR_FMT, LOCAL_PR_ARG
- )
+ TP_ARGS(local)
);
-TRACE_EVENT(drv_sw_scan_complete,
+DEFINE_EVENT(local_only_evt, drv_sw_scan_complete,
TP_PROTO(struct ieee80211_local *local),
-
- TP_ARGS(local),
-
- TP_STRUCT__entry(
- LOCAL_ENTRY
- ),
-
- TP_fast_assign(
- LOCAL_ASSIGN;
- ),
-
- TP_printk(
- LOCAL_PR_FMT, LOCAL_PR_ARG
- )
+ TP_ARGS(local)
);
TRACE_EVENT(drv_get_stats,
@@ -702,23 +660,9 @@ TRACE_EVENT(drv_conf_tx,
)
);
-TRACE_EVENT(drv_get_tsf,
+DEFINE_EVENT(local_only_evt, drv_get_tsf,
TP_PROTO(struct ieee80211_local *local),
-
- TP_ARGS(local),
-
- TP_STRUCT__entry(
- LOCAL_ENTRY
- ),
-
- TP_fast_assign(
- LOCAL_ASSIGN;
- ),
-
- TP_printk(
- LOCAL_PR_FMT,
- LOCAL_PR_ARG
- )
+ TP_ARGS(local)
);
TRACE_EVENT(drv_set_tsf,
@@ -742,41 +686,14 @@ TRACE_EVENT(drv_set_tsf,
)
);
-TRACE_EVENT(drv_reset_tsf,
+DEFINE_EVENT(local_only_evt, drv_reset_tsf,
TP_PROTO(struct ieee80211_local *local),
-
- TP_ARGS(local),
-
- TP_STRUCT__entry(
- LOCAL_ENTRY
- ),
-
- TP_fast_assign(
- LOCAL_ASSIGN;
- ),
-
- TP_printk(
- LOCAL_PR_FMT, LOCAL_PR_ARG
- )
+ TP_ARGS(local)
);
-TRACE_EVENT(drv_tx_last_beacon,
+DEFINE_EVENT(local_only_evt, drv_tx_last_beacon,
TP_PROTO(struct ieee80211_local *local),
-
- TP_ARGS(local),
-
- TP_STRUCT__entry(
- LOCAL_ENTRY
- ),
-
- TP_fast_assign(
- LOCAL_ASSIGN;
- ),
-
- TP_printk(
- LOCAL_PR_FMT,
- LOCAL_PR_ARG
- )
+ TP_ARGS(local)
);
TRACE_EVENT(drv_ampdu_action,
@@ -784,9 +701,9 @@ TRACE_EVENT(drv_ampdu_action,
struct ieee80211_sub_if_data *sdata,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid,
- u16 *ssn),
+ u16 *ssn, u8 buf_size),
- TP_ARGS(local, sdata, action, sta, tid, ssn),
+ TP_ARGS(local, sdata, action, sta, tid, ssn, buf_size),
TP_STRUCT__entry(
LOCAL_ENTRY
@@ -794,6 +711,7 @@ TRACE_EVENT(drv_ampdu_action,
__field(u32, action)
__field(u16, tid)
__field(u16, ssn)
+ __field(u8, buf_size)
VIF_ENTRY
),
@@ -804,11 +722,13 @@ TRACE_EVENT(drv_ampdu_action,
__entry->action = action;
__entry->tid = tid;
__entry->ssn = ssn ? *ssn : 0;
+ __entry->buf_size = buf_size;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d",
- LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid
+ LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d buf:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action,
+ __entry->tid, __entry->buf_size
)
);
@@ -959,24 +879,44 @@ TRACE_EVENT(drv_remain_on_channel,
)
);
-TRACE_EVENT(drv_cancel_remain_on_channel,
+DEFINE_EVENT(local_only_evt, drv_cancel_remain_on_channel,
TP_PROTO(struct ieee80211_local *local),
+ TP_ARGS(local)
+);
- TP_ARGS(local),
+TRACE_EVENT(drv_offchannel_tx,
+ TP_PROTO(struct ieee80211_local *local, struct sk_buff *skb,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ unsigned int wait),
+
+ TP_ARGS(local, skb, chan, channel_type, wait),
TP_STRUCT__entry(
LOCAL_ENTRY
+ __field(int, center_freq)
+ __field(int, channel_type)
+ __field(unsigned int, wait)
),
TP_fast_assign(
LOCAL_ASSIGN;
+ __entry->center_freq = chan->center_freq;
+ __entry->channel_type = channel_type;
+ __entry->wait = wait;
),
TP_printk(
- LOCAL_PR_FMT, LOCAL_PR_ARG
+ LOCAL_PR_FMT " freq:%dMHz, wait:%dms",
+ LOCAL_PR_ARG, __entry->center_freq, __entry->wait
)
);
+DEFINE_EVENT(local_only_evt, drv_offchannel_tx_cancel_wait,
+ TP_PROTO(struct ieee80211_local *local),
+ TP_ARGS(local)
+);
+
/*
* Tracing for API calls that drivers call.
*/
@@ -1069,23 +1009,9 @@ TRACE_EVENT(api_stop_tx_ba_cb,
)
);
-TRACE_EVENT(api_restart_hw,
+DEFINE_EVENT(local_only_evt, api_restart_hw,
TP_PROTO(struct ieee80211_local *local),
-
- TP_ARGS(local),
-
- TP_STRUCT__entry(
- LOCAL_ENTRY
- ),
-
- TP_fast_assign(
- LOCAL_ASSIGN;
- ),
-
- TP_printk(
- LOCAL_PR_FMT,
- LOCAL_PR_ARG
- )
+ TP_ARGS(local)
);
TRACE_EVENT(api_beacon_loss,
@@ -1214,40 +1140,14 @@ TRACE_EVENT(api_chswitch_done,
)
);
-TRACE_EVENT(api_ready_on_channel,
+DEFINE_EVENT(local_only_evt, api_ready_on_channel,
TP_PROTO(struct ieee80211_local *local),
-
- TP_ARGS(local),
-
- TP_STRUCT__entry(
- LOCAL_ENTRY
- ),
-
- TP_fast_assign(
- LOCAL_ASSIGN;
- ),
-
- TP_printk(
- LOCAL_PR_FMT, LOCAL_PR_ARG
- )
+ TP_ARGS(local)
);
-TRACE_EVENT(api_remain_on_channel_expired,
+DEFINE_EVENT(local_only_evt, api_remain_on_channel_expired,
TP_PROTO(struct ieee80211_local *local),
-
- TP_ARGS(local),
-
- TP_STRUCT__entry(
- LOCAL_ENTRY
- ),
-
- TP_fast_assign(
- LOCAL_ASSIGN;
- ),
-
- TP_printk(
- LOCAL_PR_FMT, LOCAL_PR_ARG
- )
+ TP_ARGS(local)
);
/*
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 75d679d75e63..b9e4b9bd2179 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -66,6 +66,9 @@ void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
/* own MCS TX capabilities */
tx_mcs_set_cap = sband->ht_cap.mcs.tx_params;
+ /* Copy peer MCS TX capabilities, the driver might need them. */
+ ht_cap->mcs.tx_params = ht_cap_ie->mcs.tx_params;
+
/* can we TX with MCS rates? */
if (!(tx_mcs_set_cap & IEEE80211_HT_MCS_TX_DEFINED))
return;
@@ -79,7 +82,7 @@ void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
max_tx_streams = IEEE80211_HT_MCS_TX_MAX_STREAMS;
/*
- * 802.11n D5.0 20.3.5 / 20.6 says:
+ * 802.11n-2009 20.3.5 / 20.6 says:
* - indices 0 to 7 and 32 are single spatial stream
* - 8 to 31 are multiple spatial streams using equal modulation
* [8..15 for two streams, 16..23 for three and 24..31 for four]
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 53c7077ffd4f..3e81af1fce58 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -31,7 +31,6 @@
#define IEEE80211_IBSS_JOIN_TIMEOUT (7 * HZ)
#define IEEE80211_IBSS_MERGE_INTERVAL (30 * HZ)
-#define IEEE80211_IBSS_MERGE_DELAY 0x400000
#define IEEE80211_IBSS_INACTIVITY_LIMIT (60 * HZ)
#define IEEE80211_IBSS_MAX_STA_ENTRIES 128
@@ -270,7 +269,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
enum ieee80211_band band = rx_status->band;
if (elems->ds_params && elems->ds_params_len == 1)
- freq = ieee80211_channel_to_frequency(elems->ds_params[0]);
+ freq = ieee80211_channel_to_frequency(elems->ds_params[0],
+ band);
else
freq = rx_status->freq;
@@ -354,7 +354,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
if (memcmp(cbss->bssid, sdata->u.ibss.bssid, ETH_ALEN) == 0)
goto put_bss;
- if (rx_status->flag & RX_FLAG_TSFT) {
+ if (rx_status->flag & RX_FLAG_MACTIME_MPDU) {
/*
* For correct IBSS merging we need mactime; since mactime is
* defined as the time the first data symbol of the frame hits
@@ -396,10 +396,6 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
jiffies);
#endif
- /* give slow hardware some time to do the TSF sync */
- if (rx_timestamp < IEEE80211_IBSS_MERGE_DELAY)
- goto put_bss;
-
if (beacon_timestamp > rx_timestamp) {
#ifdef CONFIG_MAC80211_IBSS_DEBUG
printk(KERN_DEBUG "%s: beacon TSF higher than "
@@ -663,12 +659,13 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
}
static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgmt *mgmt,
- size_t len)
+ struct sk_buff *req)
{
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(req);
+ struct ieee80211_mgmt *mgmt = (void *)req->data;
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
struct ieee80211_local *local = sdata->local;
- int tx_last_beacon;
+ int tx_last_beacon, len = req->len;
struct sk_buff *skb;
struct ieee80211_mgmt *resp;
u8 *pos, *end;
@@ -688,7 +685,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
mgmt->bssid, tx_last_beacon);
#endif /* CONFIG_MAC80211_IBSS_DEBUG */
- if (!tx_last_beacon)
+ if (!tx_last_beacon && !(rx_status->rx_flags & IEEE80211_RX_RA_MATCH))
return;
if (memcmp(mgmt->bssid, ifibss->bssid, ETH_ALEN) != 0 &&
@@ -785,7 +782,7 @@ void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
switch (fc & IEEE80211_FCTL_STYPE) {
case IEEE80211_STYPE_PROBE_REQ:
- ieee80211_rx_mgmt_probe_req(sdata, mgmt, skb->len);
+ ieee80211_rx_mgmt_probe_req(sdata, skb);
break;
case IEEE80211_STYPE_PROBE_RESP:
ieee80211_rx_mgmt_probe_resp(sdata, mgmt, skb->len,
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 533fd32f49ff..a40401701424 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -225,6 +225,7 @@ struct ieee80211_if_ap {
struct sk_buff_head ps_bc_buf;
atomic_t num_sta_ps; /* number of stations in PS mode */
int dtim_count;
+ bool dtim_bc_mc;
};
struct ieee80211_if_wds {
@@ -654,8 +655,6 @@ struct tpt_led_trigger {
* well be on the operating channel
* @SCAN_HW_SCANNING: The hardware is scanning for us, we have no way to
* determine if we are on the operating channel or not
- * @SCAN_OFF_CHANNEL: We're off our operating channel for scanning,
- * gets only set in conjunction with SCAN_SW_SCANNING
* @SCAN_COMPLETED: Set for our scan work function when the driver reported
* that the scan completed.
* @SCAN_ABORTED: Set for our scan work function when the driver reported
@@ -664,7 +663,6 @@ struct tpt_led_trigger {
enum {
SCAN_SW_SCANNING,
SCAN_HW_SCANNING,
- SCAN_OFF_CHANNEL,
SCAN_COMPLETED,
SCAN_ABORTED,
};
@@ -959,6 +957,7 @@ struct ieee80211_local {
unsigned int hw_roc_duration;
u32 hw_roc_cookie;
bool hw_roc_for_tx;
+ unsigned long hw_offchan_tx_cookie;
/* dummy netdev for use w/ NAPI */
struct net_device napi_dev;
@@ -1068,8 +1067,6 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
void ieee80211_configure_filter(struct ieee80211_local *local);
u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata);
-extern bool ieee80211_disable_40mhz_24ghz;
-
/* STA code */
void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata);
int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
@@ -1147,10 +1144,14 @@ void ieee80211_rx_bss_put(struct ieee80211_local *local,
struct ieee80211_bss *bss);
/* off-channel helpers */
-void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local);
-void ieee80211_offchannel_stop_station(struct ieee80211_local *local);
+bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local);
+void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
+ bool tell_ap);
+void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
+ bool offchannel_ps_enable);
void ieee80211_offchannel_return(struct ieee80211_local *local,
- bool enable_beaconing);
+ bool enable_beaconing,
+ bool offchannel_ps_disable);
void ieee80211_hw_roc_setup(struct ieee80211_local *local);
/* interface handling */
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 8acba456744e..4054399be907 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -382,6 +382,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, *tmp;
u32 hw_reconf_flags = 0;
int i;
+ enum nl80211_channel_type orig_ct;
if (local->scan_sdata == sdata)
ieee80211_scan_cancel(local);
@@ -542,8 +543,14 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
hw_reconf_flags = 0;
}
+ /* Re-calculate channel-type, in case there are multiple vifs
+ * on different channel types.
+ */
+ orig_ct = local->_oper_channel_type;
+ ieee80211_set_channel_type(local, NULL, NL80211_CHAN_NO_HT);
+
/* do after stop to avoid reconfiguring when we stop anyway */
- if (hw_reconf_flags)
+ if (hw_reconf_flags || (orig_ct != local->_oper_channel_type))
ieee80211_hw_config(local, hw_reconf_flags);
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
@@ -1229,6 +1236,7 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
}
mutex_unlock(&local->iflist_mtx);
unregister_netdevice_many(&unreg_list);
+ list_del(&unreg_list);
}
static u32 ieee80211_idle_off(struct ieee80211_local *local,
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index a46ff06d7cb8..2543e48bd813 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -34,7 +34,7 @@
#include "debugfs.h"
-bool ieee80211_disable_40mhz_24ghz;
+static bool ieee80211_disable_40mhz_24ghz;
module_param(ieee80211_disable_40mhz_24ghz, bool, 0644);
MODULE_PARM_DESC(ieee80211_disable_40mhz_24ghz,
"Disable 40MHz support in the 2.4GHz band");
@@ -98,6 +98,47 @@ static void ieee80211_reconfig_filter(struct work_struct *work)
ieee80211_configure_filter(local);
}
+/*
+ * Returns true if we are logically configured to be on
+ * the operating channel AND the hardware-conf is currently
+ * configured on the operating channel. Compares channel-type
+ * as well.
+ */
+bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local)
+{
+ struct ieee80211_channel *chan, *scan_chan;
+ enum nl80211_channel_type channel_type;
+
+ /* This logic needs to match logic in ieee80211_hw_config */
+ if (local->scan_channel) {
+ chan = local->scan_channel;
+ /* If scanning on oper channel, use whatever channel-type
+ * is currently in use.
+ */
+ if (chan == local->oper_channel)
+ channel_type = local->_oper_channel_type;
+ else
+ channel_type = NL80211_CHAN_NO_HT;
+ } else if (local->tmp_channel) {
+ chan = scan_chan = local->tmp_channel;
+ channel_type = local->tmp_channel_type;
+ } else {
+ chan = local->oper_channel;
+ channel_type = local->_oper_channel_type;
+ }
+
+ if (chan != local->oper_channel ||
+ channel_type != local->_oper_channel_type)
+ return false;
+
+ /* Check current hardware-config against oper_channel. */
+ if ((local->oper_channel != local->hw.conf.channel) ||
+ (local->_oper_channel_type != local->hw.conf.channel_type))
+ return false;
+
+ return true;
+}
+
int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
{
struct ieee80211_channel *chan, *scan_chan;
@@ -110,21 +151,33 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
scan_chan = local->scan_channel;
+ /* If this off-channel logic ever changes, ieee80211_on_oper_channel
+ * may need to change as well.
+ */
offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
if (scan_chan) {
chan = scan_chan;
- channel_type = NL80211_CHAN_NO_HT;
- local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
- } else if (local->tmp_channel &&
- local->oper_channel != local->tmp_channel) {
+ /* If scanning on oper channel, use whatever channel-type
+ * is currently in use.
+ */
+ if (chan == local->oper_channel)
+ channel_type = local->_oper_channel_type;
+ else
+ channel_type = NL80211_CHAN_NO_HT;
+ } else if (local->tmp_channel) {
chan = scan_chan = local->tmp_channel;
channel_type = local->tmp_channel_type;
- local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
} else {
chan = local->oper_channel;
channel_type = local->_oper_channel_type;
- local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL;
}
+
+ if (chan != local->oper_channel ||
+ channel_type != local->_oper_channel_type)
+ local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
+ else
+ local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL;
+
offchannel_flag ^= local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
if (offchannel_flag || chan != local->hw.conf.channel ||
@@ -146,7 +199,8 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
changed |= IEEE80211_CONF_CHANGE_SMPS;
}
- if (scan_chan)
+ if ((local->scanning & SCAN_SW_SCANNING) ||
+ (local->scanning & SCAN_HW_SCANNING))
power = chan->max_power;
else
power = local->power_constr_level ?
@@ -231,7 +285,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
if (changed & BSS_CHANGED_BEACON_ENABLED) {
if (local->quiescing || !ieee80211_sdata_running(sdata) ||
- test_bit(SCAN_SW_SCANNING, &local->scanning)) {
+ test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)) {
sdata->vif.bss_conf.enable_beacon = false;
} else {
/*
@@ -554,6 +608,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
local->hw.queues = 1;
local->hw.max_rates = 1;
local->hw.max_report_rates = 0;
+ local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
local->user_power_level = -1;
@@ -668,6 +723,18 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
}
channels += sband->n_channels;
+ /*
+ * Since ieee80211_disable_40mhz_24ghz is global, we can
+ * modify the sband's ht data even if the driver uses a
+ * global structure for that.
+ */
+ if (ieee80211_disable_40mhz_24ghz &&
+ band == IEEE80211_BAND_2GHZ &&
+ sband->ht_cap.ht_supported) {
+ sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SGI_40;
+ }
+
if (max_bitrates < sband->n_bitrates)
max_bitrates = sband->n_bitrates;
supp_ht = supp_ht || sband->ht_cap.ht_supported;
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index ca3af4685b0a..2a57cc02c618 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -574,7 +574,7 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
&elems);
if (elems.ds_params && elems.ds_params_len == 1)
- freq = ieee80211_channel_to_frequency(elems.ds_params[0]);
+ freq = ieee80211_channel_to_frequency(elems.ds_params[0], band);
else
freq = rx_status->freq;
@@ -645,7 +645,7 @@ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata)
if (test_and_clear_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags))
mesh_mpath_table_grow();
- if (test_and_clear_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags))
+ if (test_and_clear_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags))
mesh_mpp_table_grow();
if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags))
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 45fbb9e33746..cc984bd861cf 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -28,8 +28,15 @@
#include "rate.h"
#include "led.h"
-#define IEEE80211_MAX_NULLFUNC_TRIES 2
-#define IEEE80211_MAX_PROBE_TRIES 5
+static int max_nullfunc_tries = 2;
+module_param(max_nullfunc_tries, int, 0644);
+MODULE_PARM_DESC(max_nullfunc_tries,
+ "Maximum nullfunc tx tries before disconnecting (reason 4).");
+
+static int max_probe_tries = 5;
+module_param(max_probe_tries, int, 0644);
+MODULE_PARM_DESC(max_probe_tries,
+ "Maximum probe tries before disconnecting (reason 4).");
/*
* Beacon loss timeout is calculated as N frames times the
@@ -51,7 +58,11 @@
* a probe request because of beacon loss or for
* checking the connection still works.
*/
-#define IEEE80211_PROBE_WAIT (HZ / 2)
+static int probe_wait_ms = 500;
+module_param(probe_wait_ms, int, 0644);
+MODULE_PARM_DESC(probe_wait_ms,
+ "Maximum time(ms) to wait for probe response"
+ " before disconnecting (reason 4).");
/*
* Weight given to the latest Beacon frame when calculating average signal
@@ -134,6 +145,9 @@ void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ if (unlikely(!sdata->u.mgd.associated))
+ return;
+
if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
return;
@@ -161,6 +175,7 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband;
struct sta_info *sta;
u32 changed = 0;
+ int hti_cfreq;
u16 ht_opmode;
bool enable_ht = true;
enum nl80211_channel_type prev_chantype;
@@ -174,10 +189,27 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
if (!sband->ht_cap.ht_supported)
enable_ht = false;
- /* check that channel matches the right operating channel */
- if (local->hw.conf.channel->center_freq !=
- ieee80211_channel_to_frequency(hti->control_chan))
- enable_ht = false;
+ if (enable_ht) {
+ hti_cfreq = ieee80211_channel_to_frequency(hti->control_chan,
+ sband->band);
+ /* check that channel matches the right operating channel */
+ if (local->hw.conf.channel->center_freq != hti_cfreq) {
+ /* Some APs mess this up, evidently.
+ * Netgear WNDR3700 sometimes reports 4 higher than
+ * the actual channel, for instance.
+ */
+ printk(KERN_DEBUG
+ "%s: Wrong control channel in association"
+ " response: configured center-freq: %d"
+ " hti-cfreq: %d hti->control_chan: %d"
+ " band: %d. Disabling HT.\n",
+ sdata->name,
+ local->hw.conf.channel->center_freq,
+ hti_cfreq, hti->control_chan,
+ sband->band);
+ enable_ht = false;
+ }
+ }
if (enable_ht) {
channel_type = NL80211_CHAN_HT20;
@@ -429,7 +461,8 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
container_of((void *)bss, struct cfg80211_bss, priv);
struct ieee80211_channel *new_ch;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num);
+ int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num,
+ cbss->channel->band);
ASSERT_MGD_MTX(ifmgd);
@@ -600,6 +633,14 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
list_for_each_entry(sdata, &local->interfaces, list) {
if (!ieee80211_sdata_running(sdata))
continue;
+ if (sdata->vif.type == NL80211_IFTYPE_AP) {
+ /* If an AP vif is found, then disable PS
+ * by setting the count to zero thereby setting
+ * ps_sdata to NULL.
+ */
+ count = 0;
+ break;
+ }
if (sdata->vif.type != NL80211_IFTYPE_STATION)
continue;
found = sdata;
@@ -700,9 +741,19 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
return;
if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
- (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)))
+ (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED))) {
+ netif_tx_stop_all_queues(sdata->dev);
+ /*
+ * Flush all the frames queued in the driver before
+ * going to power save
+ */
+ drv_flush(local, false);
ieee80211_send_nullfunc(local, sdata, 1);
+ /* Flush once again to get the tx status of nullfunc frame */
+ drv_flush(local, false);
+ }
+
if (!((local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) &&
(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)) ||
(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) {
@@ -710,6 +761,8 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
local->hw.conf.flags |= IEEE80211_CONF_PS;
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
}
+
+ netif_tx_start_all_queues(sdata->dev);
}
void ieee80211_dynamic_ps_timer(unsigned long data)
@@ -1089,7 +1142,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
const u8 *ssid;
u8 *dst = ifmgd->associated->bssid;
- u8 unicast_limit = max(1, IEEE80211_MAX_PROBE_TRIES - 3);
+ u8 unicast_limit = max(1, max_probe_tries - 3);
/*
* Try sending broadcast probe requests for the last three
@@ -1115,7 +1168,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
}
ifmgd->probe_send_count++;
- ifmgd->probe_timeout = jiffies + IEEE80211_PROBE_WAIT;
+ ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
run_again(ifmgd, ifmgd->probe_timeout);
}
@@ -1216,7 +1269,8 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
- printk(KERN_DEBUG "Connection to AP %pM lost.\n", bssid);
+ printk(KERN_DEBUG "%s: Connection to AP %pM lost.\n",
+ sdata->name, bssid);
ieee80211_set_disassoc(sdata, true, true);
mutex_unlock(&ifmgd->mtx);
@@ -1519,7 +1573,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
}
if (elems->ds_params && elems->ds_params_len == 1)
- freq = ieee80211_channel_to_frequency(elems->ds_params[0]);
+ freq = ieee80211_channel_to_frequency(elems->ds_params[0],
+ rx_status->band);
else
freq = rx_status->freq;
@@ -1960,9 +2015,9 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
- max_tries = IEEE80211_MAX_NULLFUNC_TRIES;
+ max_tries = max_nullfunc_tries;
else
- max_tries = IEEE80211_MAX_PROBE_TRIES;
+ max_tries = max_probe_tries;
/* ACK received for nullfunc probing frame */
if (!ifmgd->probe_send_count)
@@ -1972,9 +2027,9 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
wiphy_debug(local->hw.wiphy,
"%s: No ack for nullfunc frame to"
- " AP %pM, try %d\n",
+ " AP %pM, try %d/%i\n",
sdata->name, bssid,
- ifmgd->probe_send_count);
+ ifmgd->probe_send_count, max_tries);
#endif
ieee80211_mgd_probe_ap_send(sdata);
} else {
@@ -1994,17 +2049,17 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
"%s: Failed to send nullfunc to AP %pM"
" after %dms, disconnecting.\n",
sdata->name,
- bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ);
+ bssid, probe_wait_ms);
#endif
ieee80211_sta_connection_lost(sdata, bssid);
} else if (ifmgd->probe_send_count < max_tries) {
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
wiphy_debug(local->hw.wiphy,
"%s: No probe response from AP %pM"
- " after %dms, try %d\n",
+ " after %dms, try %d/%i\n",
sdata->name,
- bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ,
- ifmgd->probe_send_count);
+ bssid, probe_wait_ms,
+ ifmgd->probe_send_count, max_tries);
#endif
ieee80211_mgd_probe_ap_send(sdata);
} else {
@@ -2016,7 +2071,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
"%s: No probe response from AP %pM"
" after %dms, disconnecting.\n",
sdata->name,
- bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ);
+ bssid, probe_wait_ms);
ieee80211_sta_connection_lost(sdata, bssid);
}
@@ -2254,6 +2309,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
else
wk->type = IEEE80211_WORK_DIRECT_PROBE;
wk->chan = req->bss->channel;
+ wk->chan_type = NL80211_CHAN_NO_HT;
wk->sdata = sdata;
wk->done = ieee80211_probe_auth_done;
@@ -2403,6 +2459,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
memcpy(wk->assoc.prev_bssid, req->prev_bssid, ETH_ALEN);
wk->chan = req->bss->channel;
+ wk->chan_type = NL80211_CHAN_NO_HT;
wk->sdata = sdata;
wk->done = ieee80211_assoc_done;
if (!bss->dtim_period &&
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index b4e52676f3fb..13427b194ced 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -17,10 +17,14 @@
#include "driver-trace.h"
/*
- * inform AP that we will go to sleep so that it will buffer the frames
- * while we scan
+ * Tell our hardware to disable PS.
+ * Optionally inform AP that we will go to sleep so that it will buffer
+ * the frames while we are doing off-channel work. This is optional
+ * because we *may* be doing work on-operating channel, and want our
+ * hardware unconditionally awake, but still let the AP send us normal frames.
*/
-static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
+static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata,
+ bool tell_ap)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -41,8 +45,8 @@ static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
}
- if (!(local->offchannel_ps_enabled) ||
- !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
+ if (tell_ap && (!local->offchannel_ps_enabled ||
+ !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)))
/*
* If power save was enabled, no need to send a nullfunc
* frame because AP knows that we are sleeping. But if the
@@ -77,6 +81,9 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
* we are sleeping, let's just enable power save mode in
* hardware.
*/
+ /* TODO: Only set hardware if CONF_PS changed?
+ * TODO: Should we set offchannel_ps_enabled to false?
+ */
local->hw.conf.flags |= IEEE80211_CONF_PS;
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
} else if (local->hw.conf.dynamic_ps_timeout > 0) {
@@ -95,63 +102,61 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
ieee80211_sta_reset_conn_monitor(sdata);
}
-void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local)
+void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
+ bool offchannel_ps_enable)
{
struct ieee80211_sub_if_data *sdata;
+ /*
+ * notify the AP about us leaving the channel and stop all
+ * STA interfaces.
+ */
mutex_lock(&local->iflist_mtx);
list_for_each_entry(sdata, &local->interfaces, list) {
if (!ieee80211_sdata_running(sdata))
continue;
- /* disable beaconing */
+ if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
+ set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
+
+ /* Check to see if we should disable beaconing. */
if (sdata->vif.type == NL80211_IFTYPE_AP ||
sdata->vif.type == NL80211_IFTYPE_ADHOC ||
sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
ieee80211_bss_info_change_notify(
sdata, BSS_CHANGED_BEACON_ENABLED);
- /*
- * only handle non-STA interfaces here, STA interfaces
- * are handled in ieee80211_offchannel_stop_station(),
- * e.g., from the background scan state machine.
- *
- * In addition, do not stop monitor interface to allow it to be
- * used from user space controlled off-channel operations.
- */
- if (sdata->vif.type != NL80211_IFTYPE_STATION &&
- sdata->vif.type != NL80211_IFTYPE_MONITOR) {
- set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
+ if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
netif_tx_stop_all_queues(sdata->dev);
+ if (offchannel_ps_enable &&
+ (sdata->vif.type == NL80211_IFTYPE_STATION) &&
+ sdata->u.mgd.associated)
+ ieee80211_offchannel_ps_enable(sdata, true);
}
}
mutex_unlock(&local->iflist_mtx);
}
-void ieee80211_offchannel_stop_station(struct ieee80211_local *local)
+void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
+ bool tell_ap)
{
struct ieee80211_sub_if_data *sdata;
- /*
- * notify the AP about us leaving the channel and stop all STA interfaces
- */
mutex_lock(&local->iflist_mtx);
list_for_each_entry(sdata, &local->interfaces, list) {
if (!ieee80211_sdata_running(sdata))
continue;
- if (sdata->vif.type == NL80211_IFTYPE_STATION) {
- set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
- netif_tx_stop_all_queues(sdata->dev);
- if (sdata->u.mgd.associated)
- ieee80211_offchannel_ps_enable(sdata);
- }
+ if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+ sdata->u.mgd.associated)
+ ieee80211_offchannel_ps_enable(sdata, tell_ap);
}
mutex_unlock(&local->iflist_mtx);
}
void ieee80211_offchannel_return(struct ieee80211_local *local,
- bool enable_beaconing)
+ bool enable_beaconing,
+ bool offchannel_ps_disable)
{
struct ieee80211_sub_if_data *sdata;
@@ -161,7 +166,8 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
continue;
/* Tell AP we're back */
- if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+ if (offchannel_ps_disable &&
+ sdata->vif.type == NL80211_IFTYPE_STATION) {
if (sdata->u.mgd.associated)
ieee80211_offchannel_ps_disable(sdata);
}
@@ -181,7 +187,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
netif_tx_wake_all_queues(sdata->dev);
}
- /* re-enable beaconing */
+ /* Check to see if we should re-enable beaconing */
if (enable_beaconing &&
(sdata->vif.type == NL80211_IFTYPE_AP ||
sdata->vif.type == NL80211_IFTYPE_ADHOC ||
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index a6701ed87f0d..5c1930ba8ebe 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -77,7 +77,7 @@ ieee80211_rx_radiotap_len(struct ieee80211_local *local,
/* always present fields */
len = sizeof(struct ieee80211_radiotap_header) + 9;
- if (status->flag & RX_FLAG_TSFT)
+ if (status->flag & RX_FLAG_MACTIME_MPDU)
len += 8;
if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
len += 1;
@@ -85,6 +85,9 @@ ieee80211_rx_radiotap_len(struct ieee80211_local *local,
if (len & 1) /* padding for RX_FLAGS if necessary */
len++;
+ if (status->flag & RX_FLAG_HT) /* HT info */
+ len += 3;
+
return len;
}
@@ -120,7 +123,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
/* the order of the following fields is important */
/* IEEE80211_RADIOTAP_TSFT */
- if (status->flag & RX_FLAG_TSFT) {
+ if (status->flag & RX_FLAG_MACTIME_MPDU) {
put_unaligned_le64(status->mactime, pos);
rthdr->it_present |=
cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
@@ -139,11 +142,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
/* IEEE80211_RADIOTAP_RATE */
if (status->flag & RX_FLAG_HT) {
/*
- * TODO: add following information into radiotap header once
- * suitable fields are defined for it:
- * - MCS index (status->rate_idx)
- * - HT40 (status->flag & RX_FLAG_40MHZ)
- * - short-GI (status->flag & RX_FLAG_SHORT_GI)
+ * MCS information is a separate field in radiotap,
+ * added below.
*/
*pos = 0;
} else {
@@ -193,6 +193,20 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
put_unaligned_le16(rx_flags, pos);
pos += 2;
+
+ if (status->flag & RX_FLAG_HT) {
+ rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
+ *pos++ = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
+ IEEE80211_RADIOTAP_MCS_HAVE_GI |
+ IEEE80211_RADIOTAP_MCS_HAVE_BW;
+ *pos = 0;
+ if (status->flag & RX_FLAG_SHORT_GI)
+ *pos |= IEEE80211_RADIOTAP_MCS_SGI;
+ if (status->flag & RX_FLAG_40MHZ)
+ *pos |= IEEE80211_RADIOTAP_MCS_BW_40;
+ pos++;
+ *pos++ = status->rate_idx;
+ }
}
/*
@@ -392,16 +406,10 @@ ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
if (likely(!(status->rx_flags & IEEE80211_RX_IN_SCAN)))
return RX_CONTINUE;
- if (test_bit(SCAN_HW_SCANNING, &local->scanning))
+ if (test_bit(SCAN_HW_SCANNING, &local->scanning) ||
+ test_bit(SCAN_SW_SCANNING, &local->scanning))
return ieee80211_scan_rx(rx->sdata, skb);
- if (test_bit(SCAN_SW_SCANNING, &local->scanning)) {
- /* drop all the other packets during a software scan anyway */
- if (ieee80211_scan_rx(rx->sdata, skb) != RX_QUEUED)
- dev_kfree_skb(skb);
- return RX_QUEUED;
- }
-
/* scanning finished during invoking of handlers */
I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
return RX_DROP_UNUSABLE;
@@ -798,7 +806,7 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
rx->local->dot11FrameDuplicateCount++;
rx->sta->num_duplicates++;
}
- return RX_DROP_MONITOR;
+ return RX_DROP_UNUSABLE;
} else
rx->sta->last_seq_ctrl[rx->queue] = hdr->seq_ctrl;
}
@@ -824,18 +832,8 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
ieee80211_is_pspoll(hdr->frame_control)) &&
rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
- (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) {
- if ((!ieee80211_has_fromds(hdr->frame_control) &&
- !ieee80211_has_tods(hdr->frame_control) &&
- ieee80211_is_data(hdr->frame_control)) ||
- !(status->rx_flags & IEEE80211_RX_RA_MATCH)) {
- /* Drop IBSS frames and frames for other hosts
- * silently. */
- return RX_DROP_MONITOR;
- }
-
+ (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC))))
return RX_DROP_MONITOR;
- }
return RX_CONTINUE;
}
@@ -1088,7 +1086,8 @@ static void ap_sta_ps_start(struct sta_info *sta)
atomic_inc(&sdata->bss->num_sta_ps);
set_sta_flags(sta, WLAN_STA_PS_STA);
- drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
+ if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
+ drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
sdata->name, sta->sta.addr, sta->sta.aid);
@@ -1117,6 +1116,27 @@ static void ap_sta_ps_end(struct sta_info *sta)
ieee80211_sta_ps_deliver_wakeup(sta);
}
+int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start)
+{
+ struct sta_info *sta_inf = container_of(sta, struct sta_info, sta);
+ bool in_ps;
+
+ WARN_ON(!(sta_inf->local->hw.flags & IEEE80211_HW_AP_LINK_PS));
+
+ /* Don't let the same PS state be set twice */
+ in_ps = test_sta_flags(sta_inf, WLAN_STA_PS_STA);
+ if ((start && in_ps) || (!start && !in_ps))
+ return -EINVAL;
+
+ if (start)
+ ap_sta_ps_start(sta_inf);
+ else
+ ap_sta_ps_end(sta_inf);
+
+ return 0;
+}
+EXPORT_SYMBOL(ieee80211_sta_ps_transition);
+
static ieee80211_rx_result debug_noinline
ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
{
@@ -1136,14 +1156,23 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
NL80211_IFTYPE_ADHOC);
- if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0)
+ if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0) {
sta->last_rx = jiffies;
+ if (ieee80211_is_data(hdr->frame_control)) {
+ sta->last_rx_rate_idx = status->rate_idx;
+ sta->last_rx_rate_flag = status->flag;
+ }
+ }
} else if (!is_multicast_ether_addr(hdr->addr1)) {
/*
* Mesh beacons will update last_rx when if they are found to
* match the current local configuration when processed.
*/
sta->last_rx = jiffies;
+ if (ieee80211_is_data(hdr->frame_control)) {
+ sta->last_rx_rate_idx = status->rate_idx;
+ sta->last_rx_rate_flag = status->flag;
+ }
}
if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
@@ -1161,7 +1190,8 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
* Change STA power saving mode only at the end of a frame
* exchange sequence.
*/
- if (!ieee80211_has_morefrags(hdr->frame_control) &&
+ if (!(sta->local->hw.flags & IEEE80211_HW_AP_LINK_PS) &&
+ !ieee80211_has_morefrags(hdr->frame_control) &&
!(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
(rx->sdata->vif.type == NL80211_IFTYPE_AP ||
rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
@@ -1556,17 +1586,36 @@ __ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
{
struct ieee80211_sub_if_data *sdata = rx->sdata;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
+ bool check_port_control = false;
+ struct ethhdr *ehdr;
+ int ret;
if (ieee80211_has_a4(hdr->frame_control) &&
sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
return -1;
+ if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+ !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) {
+
+ if (!sdata->u.mgd.use_4addr)
+ return -1;
+ else
+ check_port_control = true;
+ }
+
if (is_multicast_ether_addr(hdr->addr1) &&
- ((sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) ||
- (sdata->vif.type == NL80211_IFTYPE_STATION && sdata->u.mgd.use_4addr)))
+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta)
return -1;
- return ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
+ ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
+ if (ret < 0 || !check_port_control)
+ return ret;
+
+ ehdr = (struct ethhdr *) rx->skb->data;
+ if (ehdr->h_proto != rx->sdata->control_port_protocol)
+ return -1;
+
+ return 0;
}
/*
@@ -1893,7 +1942,10 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
dev->stats.rx_bytes += rx->skb->len;
if (local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
- !is_multicast_ether_addr(((struct ethhdr *)rx->skb->data)->h_dest)) {
+ !is_multicast_ether_addr(
+ ((struct ethhdr *)rx->skb->data)->h_dest) &&
+ (!local->scanning &&
+ !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) {
mod_timer(&local->dynamic_ps_timer, jiffies +
msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
}
@@ -2590,7 +2642,8 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
return 0;
if (!multicast &&
compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) {
- if (!(sdata->dev->flags & IFF_PROMISC))
+ if (!(sdata->dev->flags & IFF_PROMISC) ||
+ sdata->u.mgd.use_4addr)
return 0;
status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
}
@@ -2639,7 +2692,8 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
return 0;
} else if (!ieee80211_bssid_match(bssid,
sdata->vif.addr)) {
- if (!(status->rx_flags & IEEE80211_RX_IN_SCAN))
+ if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
+ !ieee80211_is_beacon(hdr->frame_control))
return 0;
status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
}
@@ -2692,7 +2746,7 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
if (!skb) {
if (net_ratelimit())
wiphy_debug(local->hw.wiphy,
- "failed to copy multicast frame for %s\n",
+ "failed to copy skb for %s\n",
sdata->name);
return true;
}
@@ -2730,7 +2784,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
local->dot11ReceivedFragmentCount++;
if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
- test_bit(SCAN_OFF_CHANNEL, &local->scanning)))
+ test_bit(SCAN_SW_SCANNING, &local->scanning)))
status->rx_flags |= IEEE80211_RX_IN_SCAN;
if (ieee80211_is_mgmt(fc))
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index fb274db77e3c..842954509925 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -196,7 +196,8 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
ieee802_11_parse_elems(elements, skb->len - baselen, &elems);
if (elems.ds_params && elems.ds_params_len == 1)
- freq = ieee80211_channel_to_frequency(elems.ds_params[0]);
+ freq = ieee80211_channel_to_frequency(elems.ds_params[0],
+ rx_status->band);
else
freq = rx_status->freq;
@@ -211,6 +212,14 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
if (bss)
ieee80211_rx_bss_put(sdata->local, bss);
+ /* If we are on-operating-channel, and this packet is for the
+ * current channel, pass the pkt on up the stack so that
+ * the rest of the stack can make use of it.
+ */
+ if (ieee80211_cfg_on_oper_channel(sdata->local)
+ && (channel == sdata->local->oper_channel))
+ return RX_CONTINUE;
+
dev_kfree_skb(skb);
return RX_QUEUED;
}
@@ -292,15 +301,35 @@ static void __ieee80211_scan_completed_finish(struct ieee80211_hw *hw,
bool was_hw_scan)
{
struct ieee80211_local *local = hw_to_local(hw);
+ bool on_oper_chan;
+ bool enable_beacons = false;
+
+ mutex_lock(&local->mtx);
+ on_oper_chan = ieee80211_cfg_on_oper_channel(local);
+
+ WARN_ON(local->scanning & (SCAN_SW_SCANNING | SCAN_HW_SCANNING));
+
+ if (was_hw_scan || !on_oper_chan) {
+ if (WARN_ON(local->scan_channel))
+ local->scan_channel = NULL;
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+ } else
+ /* Set power back to normal operating levels. */
+ ieee80211_hw_config(local, 0);
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
if (!was_hw_scan) {
+ bool on_oper_chan2;
ieee80211_configure_filter(local);
drv_sw_scan_complete(local);
- ieee80211_offchannel_return(local, true);
+ on_oper_chan2 = ieee80211_cfg_on_oper_channel(local);
+ /* We should always be on-channel at this point. */
+ WARN_ON(!on_oper_chan2);
+ if (on_oper_chan2 && (on_oper_chan != on_oper_chan2))
+ enable_beacons = true;
+
+ ieee80211_offchannel_return(local, enable_beacons, true);
}
- mutex_lock(&local->mtx);
ieee80211_recalc_idle(local);
mutex_unlock(&local->mtx);
@@ -340,16 +369,21 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
*/
drv_sw_scan_start(local);
- ieee80211_offchannel_stop_beaconing(local);
-
local->leave_oper_channel_time = 0;
local->next_scan_state = SCAN_DECISION;
local->scan_channel_idx = 0;
- drv_flush(local, false);
+ /* We always want to use off-channel PS, even if we
+ * are not really leaving oper-channel. Don't
+ * tell the AP though, as long as we are on-channel.
+ */
+ ieee80211_offchannel_enable_all_ps(local, false);
ieee80211_configure_filter(local);
+ /* We need to set power level at maximum rate for scanning. */
+ ieee80211_hw_config(local, 0);
+
ieee80211_queue_delayed_work(&local->hw,
&local->scan_work,
IEEE80211_CHANNEL_TIME);
@@ -486,7 +520,20 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
}
mutex_unlock(&local->iflist_mtx);
- if (local->scan_channel) {
+ next_chan = local->scan_req->channels[local->scan_channel_idx];
+
+ if (ieee80211_cfg_on_oper_channel(local)) {
+ /* We're currently on operating channel. */
+ if (next_chan == local->oper_channel)
+ /* We don't need to move off of operating channel. */
+ local->next_scan_state = SCAN_SET_CHANNEL;
+ else
+ /*
+ * We do need to leave operating channel, as next
+ * scan is somewhere else.
+ */
+ local->next_scan_state = SCAN_LEAVE_OPER_CHANNEL;
+ } else {
/*
* we're currently scanning a different channel, let's
* see if we can scan another channel without interfering
@@ -502,7 +549,6 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
*
* Otherwise switch back to the operating channel.
*/
- next_chan = local->scan_req->channels[local->scan_channel_idx];
bad_latency = time_after(jiffies +
ieee80211_scan_get_channel_time(next_chan),
@@ -520,12 +566,6 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
local->next_scan_state = SCAN_ENTER_OPER_CHANNEL;
else
local->next_scan_state = SCAN_SET_CHANNEL;
- } else {
- /*
- * we're on the operating channel currently, let's
- * leave that channel now to scan another one
- */
- local->next_scan_state = SCAN_LEAVE_OPER_CHANNEL;
}
*next_delay = 0;
@@ -534,9 +574,10 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *local,
unsigned long *next_delay)
{
- ieee80211_offchannel_stop_station(local);
-
- __set_bit(SCAN_OFF_CHANNEL, &local->scanning);
+ /* PS will already be in off-channel mode,
+ * we do that once at the beginning of scanning.
+ */
+ ieee80211_offchannel_stop_vifs(local, false);
/*
* What if the nullfunc frames didn't arrive?
@@ -559,15 +600,15 @@ static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *loca
{
/* switch back to the operating channel */
local->scan_channel = NULL;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+ if (!ieee80211_cfg_on_oper_channel(local))
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
/*
- * Only re-enable station mode interface now; beaconing will be
- * re-enabled once the full scan has been completed.
+ * Re-enable vifs and beaconing. Leave PS
+ * in off-channel state..will put that back
+ * on-channel at the end of scanning.
*/
- ieee80211_offchannel_return(local, false);
-
- __clear_bit(SCAN_OFF_CHANNEL, &local->scanning);
+ ieee80211_offchannel_return(local, true, false);
*next_delay = HZ / 5;
local->next_scan_state = SCAN_DECISION;
@@ -583,8 +624,11 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
chan = local->scan_req->channels[local->scan_channel_idx];
local->scan_channel = chan;
- if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL))
- skip = 1;
+
+ /* Only call hw-config if we really need to change channels. */
+ if (chan != local->hw.conf.channel)
+ if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL))
+ skip = 1;
/* advance state machine to next channel/band */
local->scan_channel_idx++;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index c426504ed1cf..5a11078827ab 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -899,7 +899,8 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
struct ieee80211_local *local = sdata->local;
int sent, buffered;
- drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta);
+ if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
+ drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta);
if (!skb_queue_empty(&sta->ps_tx_buf))
sta_info_clear_tim_bit(sta);
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index bbdd2a86a94b..57681149e37f 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -82,6 +82,7 @@ enum ieee80211_sta_info_flags {
* @state: session state (see above)
* @stop_initiator: initiator of a session stop
* @tx_stop: TX DelBA frame when stopping
+ * @buf_size: reorder buffer size at receiver
*
* This structure's lifetime is managed by RCU, assignments to
* the array holding it must hold the aggregation mutex.
@@ -101,6 +102,7 @@ struct tid_ampdu_tx {
u8 dialog_token;
u8 stop_initiator;
bool tx_stop;
+ u8 buf_size;
};
/**
@@ -207,6 +209,8 @@ enum plink_state {
* @rate_ctrl_priv: rate control private per-STA pointer
* @last_tx_rate: rate used for last transmit, to report to userspace as
* "the" transmit rate
+ * @last_rx_rate_idx: rx status rate index of the last data packet
+ * @last_rx_rate_flag: rx status flag of the last data packet
* @lock: used for locking all fields that require locking, see comments
* in the header file.
* @flaglock: spinlock for flags accesses
@@ -309,6 +313,8 @@ struct sta_info {
unsigned long tx_bytes;
unsigned long tx_fragments;
struct ieee80211_tx_rate last_tx_rate;
+ int last_rx_rate_idx;
+ int last_rx_rate_flag;
u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1];
/*
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 071ac95c4aa0..b936dd29e92b 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -98,6 +98,10 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
* (b) always process RX events before TX status events if ordering
* can be unknown, for example with different interrupt status
* bits.
+ * (c) if PS mode transitions are manual (i.e. the flag
+ * %IEEE80211_HW_AP_LINK_PS is set), always process PS state
+ * changes before calling TX status events if ordering can be
+ * unknown.
*/
if (test_sta_flags(sta, WLAN_STA_PS_STA) &&
skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) {
@@ -314,8 +318,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
if (info->flags & IEEE80211_TX_STAT_ACK) {
local->ps_sdata->u.mgd.flags |=
IEEE80211_STA_NULLFUNC_ACKED;
- ieee80211_queue_work(&local->hw,
- &local->dynamic_ps_enable_work);
} else
mod_timer(&local->dynamic_ps_timer, jiffies +
msecs_to_jiffies(10));
@@ -339,6 +341,10 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
cookie = local->hw_roc_cookie ^ 2;
local->hw_roc_skb_for_status = NULL;
}
+
+ if (cookie == local->hw_offchan_tx_cookie)
+ local->hw_offchan_tx_cookie = 0;
+
cfg80211_mgmt_tx_status(
skb->dev, cookie, skb->data, skb->len,
!!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index b0beaa58246b..ce4596ed1268 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -33,10 +33,6 @@
#include "wme.h"
#include "rate.h"
-#define IEEE80211_TX_OK 0
-#define IEEE80211_TX_AGAIN 1
-#define IEEE80211_TX_PENDING 2
-
/* misc utils */
static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
@@ -173,7 +169,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
return cpu_to_le16(dur);
}
-static int inline is_ieee80211_device(struct ieee80211_local *local,
+static inline int is_ieee80211_device(struct ieee80211_local *local,
struct net_device *dev)
{
return local == wdev_priv(dev->ieee80211_ptr);
@@ -236,6 +232,7 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
if (local->hw.conf.flags & IEEE80211_CONF_PS) {
ieee80211_stop_queues_by_reason(&local->hw,
IEEE80211_QUEUE_STOP_REASON_PS);
+ ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
ieee80211_queue_work(&local->hw,
&local->dynamic_ps_disable_work);
}
@@ -257,7 +254,8 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED))
return TX_CONTINUE;
- if (unlikely(test_bit(SCAN_OFF_CHANNEL, &tx->local->scanning)) &&
+ if (unlikely(test_bit(SCAN_SW_SCANNING, &tx->local->scanning)) &&
+ test_bit(SDATA_STATE_OFFCHANNEL, &tx->sdata->state) &&
!ieee80211_is_probe_req(hdr->frame_control) &&
!ieee80211_is_nullfunc(hdr->frame_control))
/*
@@ -1283,16 +1281,17 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
return TX_CONTINUE;
}
-static int __ieee80211_tx(struct ieee80211_local *local,
- struct sk_buff **skbp,
- struct sta_info *sta,
- bool txpending)
+/*
+ * Returns false if the frame couldn't be transmitted but was queued instead.
+ */
+static bool __ieee80211_tx(struct ieee80211_local *local, struct sk_buff **skbp,
+ struct sta_info *sta, bool txpending)
{
struct sk_buff *skb = *skbp, *next;
struct ieee80211_tx_info *info;
struct ieee80211_sub_if_data *sdata;
unsigned long flags;
- int ret, len;
+ int len;
bool fragm = false;
while (skb) {
@@ -1300,13 +1299,37 @@ static int __ieee80211_tx(struct ieee80211_local *local,
__le16 fc;
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
- ret = IEEE80211_TX_OK;
if (local->queue_stop_reasons[q] ||
- (!txpending && !skb_queue_empty(&local->pending[q])))
- ret = IEEE80211_TX_PENDING;
+ (!txpending && !skb_queue_empty(&local->pending[q]))) {
+ /*
+ * Since queue is stopped, queue up frames for later
+ * transmission from the tx-pending tasklet when the
+ * queue is woken again.
+ */
+
+ do {
+ next = skb->next;
+ skb->next = NULL;
+ /*
+ * NB: If txpending is true, next must already
+ * be NULL since we must've gone through this
+ * loop before already; therefore we can just
+ * queue the frame to the head without worrying
+ * about reordering of fragments.
+ */
+ if (unlikely(txpending))
+ __skb_queue_head(&local->pending[q],
+ skb);
+ else
+ __skb_queue_tail(&local->pending[q],
+ skb);
+ } while ((skb = next));
+
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock,
+ flags);
+ return false;
+ }
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
- if (ret != IEEE80211_TX_OK)
- return ret;
info = IEEE80211_SKB_CB(skb);
@@ -1341,15 +1364,7 @@ static int __ieee80211_tx(struct ieee80211_local *local,
info->control.sta = NULL;
fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
- ret = drv_tx(local, skb);
- if (WARN_ON(ret != NETDEV_TX_OK && skb->len != len)) {
- dev_kfree_skb(skb);
- ret = NETDEV_TX_OK;
- }
- if (ret != NETDEV_TX_OK) {
- info->control.vif = &sdata->vif;
- return IEEE80211_TX_AGAIN;
- }
+ drv_tx(local, skb);
ieee80211_tpt_led_trig_tx(local, fc, len);
*skbp = skb = next;
@@ -1357,7 +1372,7 @@ static int __ieee80211_tx(struct ieee80211_local *local,
fragm = true;
}
- return IEEE80211_TX_OK;
+ return true;
}
/*
@@ -1394,7 +1409,8 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
/* handlers after fragment must be aware of tx info fragmentation! */
CALL_TXH(ieee80211_tx_h_stats);
CALL_TXH(ieee80211_tx_h_encrypt);
- CALL_TXH(ieee80211_tx_h_calculate_duration);
+ if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL))
+ CALL_TXH(ieee80211_tx_h_calculate_duration);
#undef CALL_TXH
txh_done:
@@ -1416,23 +1432,24 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
return 0;
}
-static void ieee80211_tx(struct ieee80211_sub_if_data *sdata,
+/*
+ * Returns false if the frame couldn't be transmitted but was queued instead.
+ */
+static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, bool txpending)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_tx_data tx;
ieee80211_tx_result res_prepare;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct sk_buff *next;
- unsigned long flags;
- int ret, retries;
u16 queue;
+ bool result = true;
queue = skb_get_queue_mapping(skb);
if (unlikely(skb->len < 10)) {
dev_kfree_skb(skb);
- return;
+ return true;
}
rcu_read_lock();
@@ -1442,85 +1459,19 @@ static void ieee80211_tx(struct ieee80211_sub_if_data *sdata,
if (unlikely(res_prepare == TX_DROP)) {
dev_kfree_skb(skb);
- rcu_read_unlock();
- return;
+ goto out;
} else if (unlikely(res_prepare == TX_QUEUED)) {
- rcu_read_unlock();
- return;
+ goto out;
}
tx.channel = local->hw.conf.channel;
info->band = tx.channel->band;
- if (invoke_tx_handlers(&tx))
- goto out;
-
- retries = 0;
- retry:
- ret = __ieee80211_tx(local, &tx.skb, tx.sta, txpending);
- switch (ret) {
- case IEEE80211_TX_OK:
- break;
- case IEEE80211_TX_AGAIN:
- /*
- * Since there are no fragmented frames on A-MPDU
- * queues, there's no reason for a driver to reject
- * a frame there, warn and drop it.
- */
- if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU))
- goto drop;
- /* fall through */
- case IEEE80211_TX_PENDING:
- skb = tx.skb;
-
- spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
-
- if (local->queue_stop_reasons[queue] ||
- !skb_queue_empty(&local->pending[queue])) {
- /*
- * if queue is stopped, queue up frames for later
- * transmission from the tasklet
- */
- do {
- next = skb->next;
- skb->next = NULL;
- if (unlikely(txpending))
- __skb_queue_head(&local->pending[queue],
- skb);
- else
- __skb_queue_tail(&local->pending[queue],
- skb);
- } while ((skb = next));
-
- spin_unlock_irqrestore(&local->queue_stop_reason_lock,
- flags);
- } else {
- /*
- * otherwise retry, but this is a race condition or
- * a driver bug (which we warn about if it persists)
- */
- spin_unlock_irqrestore(&local->queue_stop_reason_lock,
- flags);
-
- retries++;
- if (WARN(retries > 10, "tx refused but queue active\n"))
- goto drop;
- goto retry;
- }
- }
+ if (!invoke_tx_handlers(&tx))
+ result = __ieee80211_tx(local, &tx.skb, tx.sta, txpending);
out:
rcu_read_unlock();
- return;
-
- drop:
- rcu_read_unlock();
-
- skb = tx.skb;
- while (skb) {
- next = skb->next;
- dev_kfree_skb(skb);
- skb = next;
- }
+ return result;
}
/* device xmit handlers */
@@ -1750,7 +1701,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
__le16 fc;
struct ieee80211_hdr hdr;
struct ieee80211s_hdr mesh_hdr __maybe_unused;
- struct mesh_path *mppath = NULL;
+ struct mesh_path __maybe_unused *mppath = NULL;
const u8 *encaps_data;
int encaps_len, skip_header_bytes;
int nh_pos, h_pos;
@@ -1815,19 +1766,19 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
mppath = mpp_path_lookup(skb->data, sdata);
/*
- * Do not use address extension, if it is a packet from
- * the same interface and the destination is not being
- * proxied by any other mest point.
+ * Use address extension if it is a packet from
+ * another interface or if we know the destination
+ * is being proxied by a portal (i.e. portal address
+ * differs from proxied address)
*/
if (compare_ether_addr(sdata->vif.addr,
skb->data + ETH_ALEN) == 0 &&
- (!mppath || !compare_ether_addr(mppath->mpp, skb->data))) {
+ !(mppath && compare_ether_addr(mppath->mpp, skb->data))) {
hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
skb->data, skb->data + ETH_ALEN);
meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
sdata, NULL, NULL);
} else {
- /* packet from other interface */
int is_mesh_mcast = 1;
const u8 *mesh_da;
@@ -2067,6 +2018,11 @@ void ieee80211_clear_tx_pending(struct ieee80211_local *local)
skb_queue_purge(&local->pending[i]);
}
+/*
+ * Returns false if the frame couldn't be transmitted but was queued instead,
+ * which in this case means re-queued -- take as an indication to stop sending
+ * more pending frames.
+ */
static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
struct sk_buff *skb)
{
@@ -2074,20 +2030,17 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata;
struct sta_info *sta;
struct ieee80211_hdr *hdr;
- int ret;
- bool result = true;
+ bool result;
sdata = vif_to_sdata(info->control.vif);
if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) {
- ieee80211_tx(sdata, skb, true);
+ result = ieee80211_tx(sdata, skb, true);
} else {
hdr = (struct ieee80211_hdr *)skb->data;
sta = sta_info_get(sdata, hdr->addr1);
- ret = __ieee80211_tx(local, &skb, sta, true);
- if (ret != IEEE80211_TX_OK)
- result = false;
+ result = __ieee80211_tx(local, &skb, sta, true);
}
return result;
@@ -2129,8 +2082,6 @@ void ieee80211_tx_pending(unsigned long data)
flags);
txok = ieee80211_tx_pending_skb(local, skb);
- if (!txok)
- __skb_queue_head(&local->pending[i], skb);
spin_lock_irqsave(&local->queue_stop_reason_lock,
flags);
if (!txok)
@@ -2178,6 +2129,8 @@ static void ieee80211_beacon_add_tim(struct ieee80211_if_ap *bss,
if (bss->dtim_count == 0 && !skb_queue_empty(&bss->ps_bc_buf))
aid0 = 1;
+ bss->dtim_bc_mc = aid0 == 1;
+
if (have_bits) {
/* Find largest even number N1 so that bits numbered 1 through
* (N1 x 8) - 1 in the bitmap are 0 and number N2 so that bits
@@ -2241,7 +2194,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
if (sdata->vif.type == NL80211_IFTYPE_AP) {
ap = &sdata->u.ap;
beacon = rcu_dereference(ap->beacon);
- if (ap && beacon) {
+ if (beacon) {
/*
* headroom, head length,
* tail length and maximum TIM length
@@ -2302,6 +2255,11 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
struct ieee80211_mgmt *mgmt;
u8 *pos;
+#ifdef CONFIG_MAC80211_MESH
+ if (!sdata->u.mesh.mesh_id_len)
+ goto out;
+#endif
+
/* headroom, head length, tail length and maximum TIM length */
skb = dev_alloc_skb(local->tx_headroom + 400 +
sdata->u.mesh.vendor_ie_len);
@@ -2543,7 +2501,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
if (sdata->vif.type != NL80211_IFTYPE_AP || !beacon || !beacon->head)
goto out;
- if (bss->dtim_count != 0)
+ if (bss->dtim_count != 0 || !bss->dtim_bc_mc)
goto out; /* send buffered bc/mc only after DTIM beacon */
while (1) {
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index d036597aabbe..556647a910ac 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -986,12 +986,6 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
u16 cap = sband->ht_cap.cap;
__le16 tmp;
- if (ieee80211_disable_40mhz_24ghz &&
- sband->band == IEEE80211_BAND_2GHZ) {
- cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
- cap &= ~IEEE80211_HT_CAP_SGI_40;
- }
-
*pos++ = WLAN_EID_HT_CAPABILITY;
*pos++ = sizeof(struct ieee80211_ht_cap);
memset(pos, 0, sizeof(struct ieee80211_ht_cap));
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index 36305e0d06ef..204f0a4db969 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -126,12 +126,6 @@ static void ieee80211_add_ht_ie(struct sk_buff *skb, const u8 *ht_info_ie,
/* determine capability flags */
- if (ieee80211_disable_40mhz_24ghz &&
- sband->band == IEEE80211_BAND_2GHZ) {
- cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
- cap &= ~IEEE80211_HT_CAP_SGI_40;
- }
-
switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
@@ -874,6 +868,44 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
kfree_skb(skb);
}
+static bool ieee80211_work_ct_coexists(enum nl80211_channel_type wk_ct,
+ enum nl80211_channel_type oper_ct)
+{
+ switch (wk_ct) {
+ case NL80211_CHAN_NO_HT:
+ return true;
+ case NL80211_CHAN_HT20:
+ if (oper_ct != NL80211_CHAN_NO_HT)
+ return true;
+ return false;
+ case NL80211_CHAN_HT40MINUS:
+ case NL80211_CHAN_HT40PLUS:
+ return (wk_ct == oper_ct);
+ }
+ WARN_ON(1); /* shouldn't get here */
+ return false;
+}
+
+static enum nl80211_channel_type
+ieee80211_calc_ct(enum nl80211_channel_type wk_ct,
+ enum nl80211_channel_type oper_ct)
+{
+ switch (wk_ct) {
+ case NL80211_CHAN_NO_HT:
+ return oper_ct;
+ case NL80211_CHAN_HT20:
+ if (oper_ct != NL80211_CHAN_NO_HT)
+ return oper_ct;
+ return wk_ct;
+ case NL80211_CHAN_HT40MINUS:
+ case NL80211_CHAN_HT40PLUS:
+ return wk_ct;
+ }
+ WARN_ON(1); /* shouldn't get here */
+ return wk_ct;
+}
+
+
static void ieee80211_work_timer(unsigned long data)
{
struct ieee80211_local *local = (void *) data;
@@ -924,18 +956,52 @@ static void ieee80211_work_work(struct work_struct *work)
}
if (!started && !local->tmp_channel) {
+ bool on_oper_chan;
+ bool tmp_chan_changed = false;
+ bool on_oper_chan2;
+ enum nl80211_channel_type wk_ct;
+ on_oper_chan = ieee80211_cfg_on_oper_channel(local);
+
+ /* Work with existing channel type if possible. */
+ wk_ct = wk->chan_type;
+ if (wk->chan == local->hw.conf.channel)
+ wk_ct = ieee80211_calc_ct(wk->chan_type,
+ local->hw.conf.channel_type);
+
+ if (local->tmp_channel)
+ if ((local->tmp_channel != wk->chan) ||
+ (local->tmp_channel_type != wk_ct))
+ tmp_chan_changed = true;
+
+ local->tmp_channel = wk->chan;
+ local->tmp_channel_type = wk_ct;
/*
- * TODO: could optimize this by leaving the
- * station vifs in awake mode if they
- * happen to be on the same channel as
- * the requested channel
+ * Leave the station vifs in awake mode if they
+ * happen to be on the same channel as
+ * the requested channel.
*/
- ieee80211_offchannel_stop_beaconing(local);
- ieee80211_offchannel_stop_station(local);
+ on_oper_chan2 = ieee80211_cfg_on_oper_channel(local);
+ if (on_oper_chan != on_oper_chan2) {
+ if (on_oper_chan2) {
+ /* going off oper channel, PS too */
+ ieee80211_offchannel_stop_vifs(local,
+ true);
+ ieee80211_hw_config(local, 0);
+ } else {
+ /* going on channel, but leave PS
+ * off-channel. */
+ ieee80211_hw_config(local, 0);
+ ieee80211_offchannel_return(local,
+ true,
+ false);
+ }
+ } else if (tmp_chan_changed)
+ /* Still off-channel, but on some other
+ * channel, so update hardware.
+ * PS should already be off-channel.
+ */
+ ieee80211_hw_config(local, 0);
- local->tmp_channel = wk->chan;
- local->tmp_channel_type = wk->chan_type;
- ieee80211_hw_config(local, 0);
started = true;
wk->timeout = jiffies;
}
@@ -1005,15 +1071,34 @@ static void ieee80211_work_work(struct work_struct *work)
continue;
if (wk->chan != local->tmp_channel)
continue;
- if (wk->chan_type != local->tmp_channel_type)
+ if (ieee80211_work_ct_coexists(wk->chan_type,
+ local->tmp_channel_type))
continue;
remain_off_channel = true;
}
if (!remain_off_channel && local->tmp_channel) {
+ bool on_oper_chan = ieee80211_cfg_on_oper_channel(local);
local->tmp_channel = NULL;
- ieee80211_hw_config(local, 0);
- ieee80211_offchannel_return(local, true);
+ /* If tmp_channel wasn't operating channel, then
+ * we need to go back on-channel.
+ * NOTE: If we can ever be here while scannning,
+ * or if the hw_config() channel config logic changes,
+ * then we may need to do a more thorough check to see if
+ * we still need to do a hardware config. Currently,
+ * we cannot be here while scanning, however.
+ */
+ if (ieee80211_cfg_on_oper_channel(local) && !on_oper_chan)
+ ieee80211_hw_config(local, 0);
+
+ /* At the least, we need to disable offchannel_ps,
+ * so just go ahead and run the entire offchannel
+ * return logic here. We *could* skip enabling
+ * beaconing if we were already on-oper-channel
+ * as a future optimization.
+ */
+ ieee80211_offchannel_return(local, true, true);
+
/* give connection some time to breathe */
run_again(local, jiffies + HZ/2);
}
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index bee230d8fd11..f1765de2f4bf 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -26,13 +26,12 @@
ieee80211_tx_result
ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
{
- u8 *data, *key, *mic, key_offset;
+ u8 *data, *key, *mic;
size_t data_len;
unsigned int hdrlen;
struct ieee80211_hdr *hdr;
struct sk_buff *skb = tx->skb;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- int authenticator;
int tail;
hdr = (struct ieee80211_hdr *)skb->data;
@@ -47,6 +46,11 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
data = skb->data + hdrlen;
data_len = skb->len - hdrlen;
+ if (unlikely(info->flags & IEEE80211_TX_INTFL_TKIP_MIC_FAILURE)) {
+ /* Need to use software crypto for the test */
+ info->control.hw_key = NULL;
+ }
+
if (info->control.hw_key &&
!(tx->flags & IEEE80211_TX_FRAGMENTED) &&
!(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) {
@@ -62,17 +66,11 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
skb_headroom(skb) < TKIP_IV_LEN))
return TX_DROP;
-#if 0
- authenticator = fc & IEEE80211_FCTL_FROMDS; /* FIX */
-#else
- authenticator = 1;
-#endif
- key_offset = authenticator ?
- NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY :
- NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY;
- key = &tx->key->conf.key[key_offset];
+ key = &tx->key->conf.key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY];
mic = skb_put(skb, MICHAEL_MIC_LEN);
michael_mic(key, hdr, data, data_len, mic);
+ if (unlikely(info->flags & IEEE80211_TX_INTFL_TKIP_MIC_FAILURE))
+ mic[0]++;
return TX_CONTINUE;
}
@@ -81,14 +79,13 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
ieee80211_rx_result
ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
{
- u8 *data, *key = NULL, key_offset;
+ u8 *data, *key = NULL;
size_t data_len;
unsigned int hdrlen;
u8 mic[MICHAEL_MIC_LEN];
struct sk_buff *skb = rx->skb;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- int authenticator = 1, wpa_test = 0;
/* No way to verify the MIC if the hardware stripped it */
if (status->flag & RX_FLAG_MMIC_STRIPPED)
@@ -106,17 +103,9 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
data = skb->data + hdrlen;
data_len = skb->len - hdrlen - MICHAEL_MIC_LEN;
-#if 0
- authenticator = fc & IEEE80211_FCTL_TODS; /* FIX */
-#else
- authenticator = 1;
-#endif
- key_offset = authenticator ?
- NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY :
- NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY;
- key = &rx->key->conf.key[key_offset];
+ key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
michael_mic(key, hdr, data, data_len, mic);
- if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0 || wpa_test) {
+ if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0) {
if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
return RX_DROP_UNUSABLE;
@@ -208,7 +197,7 @@ ieee80211_rx_result
ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
- int hdrlen, res, hwaccel = 0, wpa_test = 0;
+ int hdrlen, res, hwaccel = 0;
struct ieee80211_key *key = rx->key;
struct sk_buff *skb = rx->skb;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
@@ -235,7 +224,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
hdr->addr1, hwaccel, rx->queue,
&rx->tkip_iv32,
&rx->tkip_iv16);
- if (res != TKIP_DECRYPT_OK || wpa_test)
+ if (res != TKIP_DECRYPT_OK)
return RX_DROP_UNUSABLE;
/* Trim ICV */
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 1534f2b44caf..82a6e0d80f05 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -85,6 +85,17 @@ config NF_CONNTRACK_EVENTS
If unsure, say `N'.
+config NF_CONNTRACK_TIMESTAMP
+ bool 'Connection tracking timestamping'
+ depends on NETFILTER_ADVANCED
+ help
+ This option enables support for connection tracking timestamping.
+ This allows you to store the flow start-time and to obtain
+ the flow-stop time (once it has been destroyed) via Connection
+ tracking events.
+
+ If unsure, say `N'.
+
config NF_CT_PROTO_DCCP
tristate 'DCCP protocol connection tracking support (EXPERIMENTAL)'
depends on EXPERIMENTAL
@@ -185,9 +196,13 @@ config NF_CONNTRACK_IRC
To compile it as a module, choose M here. If unsure, say N.
+config NF_CONNTRACK_BROADCAST
+ tristate
+
config NF_CONNTRACK_NETBIOS_NS
tristate "NetBIOS name service protocol support"
depends on NETFILTER_ADVANCED
+ select NF_CONNTRACK_BROADCAST
help
NetBIOS name service requests are sent as broadcast messages from an
unprivileged port and responded to with unicast messages to the
@@ -204,6 +219,21 @@ config NF_CONNTRACK_NETBIOS_NS
To compile it as a module, choose M here. If unsure, say N.
+config NF_CONNTRACK_SNMP
+ tristate "SNMP service protocol support"
+ depends on NETFILTER_ADVANCED
+ select NF_CONNTRACK_BROADCAST
+ help
+ SNMP service requests are sent as broadcast messages from an
+ unprivileged port and responded to with unicast messages to the
+ same port. This make them hard to firewall properly because connection
+ tracking doesn't deal with broadcasts. This helper tracks locally
+ originating SNMP service requests and the corresponding
+ responses. It relies on correct IP address configuration, specifically
+ netmask and broadcast address.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config NF_CONNTRACK_PPTP
tristate "PPtP protocol support"
depends on NETFILTER_ADVANCED
@@ -322,10 +352,32 @@ config NETFILTER_XT_CONNMARK
ctmark), similarly to the packet mark (nfmark). Using this
target and match, you can set and match on this mark.
+config NETFILTER_XT_SET
+ tristate 'set target and match support'
+ depends on IP_SET
+ depends on NETFILTER_ADVANCED
+ help
+ This option adds the "SET" target and "set" match.
+
+ Using this target and match, you can add/delete and match
+ elements in the sets created by ipset(8).
+
+ To compile it as a module, choose M here. If unsure, say N.
+
# alphabetically ordered list of targets
comment "Xtables targets"
+config NETFILTER_XT_TARGET_AUDIT
+ tristate "AUDIT target support"
+ depends on AUDIT
+ depends on NETFILTER_ADVANCED
+ ---help---
+ This option adds a 'AUDIT' target, which can be used to create
+ audit records for packets dropped/accepted.
+
+ To compileit as a module, choose M here. If unsure, say N.
+
config NETFILTER_XT_TARGET_CHECKSUM
tristate "CHECKSUM target support"
depends on IP_NF_MANGLE || IP6_NF_MANGLE
@@ -477,6 +529,7 @@ config NETFILTER_XT_TARGET_NFLOG
config NETFILTER_XT_TARGET_NFQUEUE
tristate '"NFQUEUE" target Support'
depends on NETFILTER_ADVANCED
+ select NETFILTER_NETLINK_QUEUE
help
This target replaced the old obsolete QUEUE target.
@@ -685,6 +738,15 @@ config NETFILTER_XT_MATCH_DCCP
If you want to compile it as a module, say M here and read
<file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
+config NETFILTER_XT_MATCH_DEVGROUP
+ tristate '"devgroup" match support'
+ depends on NETFILTER_ADVANCED
+ help
+ This options adds a `devgroup' match, which allows to match on the
+ device group a network device is assigned to.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config NETFILTER_XT_MATCH_DSCP
tristate '"dscp" and "tos" match support'
depends on NETFILTER_ADVANCED
@@ -886,7 +948,7 @@ config NETFILTER_XT_MATCH_RATEEST
config NETFILTER_XT_MATCH_REALM
tristate '"realm" match support'
depends on NETFILTER_ADVANCED
- select NET_CLS_ROUTE
+ select IP_ROUTE_CLASSID
help
This option adds a `realm' match, which allows you to use the realm
key from the routing subsystem inside iptables.
@@ -1011,4 +1073,6 @@ endif # NETFILTER_XTABLES
endmenu
+source "net/netfilter/ipset/Kconfig"
+
source "net/netfilter/ipvs/Kconfig"
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 441050f31111..d57a890eaee5 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -1,6 +1,7 @@
netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o
nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o nf_conntrack_extend.o nf_conntrack_acct.o
+nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMESTAMP) += nf_conntrack_timestamp.o
nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o
obj-$(CONFIG_NETFILTER) = netfilter.o
@@ -28,7 +29,9 @@ obj-$(CONFIG_NF_CONNTRACK_AMANDA) += nf_conntrack_amanda.o
obj-$(CONFIG_NF_CONNTRACK_FTP) += nf_conntrack_ftp.o
obj-$(CONFIG_NF_CONNTRACK_H323) += nf_conntrack_h323.o
obj-$(CONFIG_NF_CONNTRACK_IRC) += nf_conntrack_irc.o
+obj-$(CONFIG_NF_CONNTRACK_BROADCAST) += nf_conntrack_broadcast.o
obj-$(CONFIG_NF_CONNTRACK_NETBIOS_NS) += nf_conntrack_netbios_ns.o
+obj-$(CONFIG_NF_CONNTRACK_SNMP) += nf_conntrack_snmp.o
obj-$(CONFIG_NF_CONNTRACK_PPTP) += nf_conntrack_pptp.o
obj-$(CONFIG_NF_CONNTRACK_SANE) += nf_conntrack_sane.o
obj-$(CONFIG_NF_CONNTRACK_SIP) += nf_conntrack_sip.o
@@ -43,8 +46,10 @@ obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
# combos
obj-$(CONFIG_NETFILTER_XT_MARK) += xt_mark.o
obj-$(CONFIG_NETFILTER_XT_CONNMARK) += xt_connmark.o
+obj-$(CONFIG_NETFILTER_XT_SET) += xt_set.o
# targets
+obj-$(CONFIG_NETFILTER_XT_TARGET_AUDIT) += xt_AUDIT.o
obj-$(CONFIG_NETFILTER_XT_TARGET_CHECKSUM) += xt_CHECKSUM.o
obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o
obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
@@ -72,6 +77,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNLIMIT) += xt_connlimit.o
obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
obj-$(CONFIG_NETFILTER_XT_MATCH_CPU) += xt_cpu.o
obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
@@ -101,5 +107,8 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_TCPMSS) += xt_tcpmss.o
obj-$(CONFIG_NETFILTER_XT_MATCH_TIME) += xt_time.o
obj-$(CONFIG_NETFILTER_XT_MATCH_U32) += xt_u32.o
+# ipset
+obj-$(CONFIG_IP_SET) += ipset/
+
# IPVS
obj-$(CONFIG_IP_VS) += ipvs/
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 4aa614b8a96a..899b71c0ff5d 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -176,13 +176,21 @@ next_hook:
ret = 1;
} else if ((verdict & NF_VERDICT_MASK) == NF_DROP) {
kfree_skb(skb);
- ret = -(verdict >> NF_VERDICT_BITS);
+ ret = NF_DROP_GETERR(verdict);
if (ret == 0)
ret = -EPERM;
} else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
- if (!nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
- verdict >> NF_VERDICT_BITS))
- goto next_hook;
+ ret = nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
+ verdict >> NF_VERDICT_QBITS);
+ if (ret < 0) {
+ if (ret == -ECANCELED)
+ goto next_hook;
+ if (ret == -ESRCH &&
+ (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
+ goto next_hook;
+ kfree_skb(skb);
+ }
+ ret = 0;
}
rcu_read_unlock();
return ret;
@@ -215,7 +223,7 @@ EXPORT_SYMBOL(skb_make_writable);
/* This does not belong here, but locally generated errors need it if connection
tracking in use: without this, connection may not be in hash table, and hence
manufactured ICMP or RST packets will not be associated with it. */
-void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *);
+void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *) __rcu __read_mostly;
EXPORT_SYMBOL(ip_ct_attach);
void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb)
@@ -232,7 +240,7 @@ void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb)
}
EXPORT_SYMBOL(nf_ct_attach);
-void (*nf_ct_destroy)(struct nf_conntrack *);
+void (*nf_ct_destroy)(struct nf_conntrack *) __rcu __read_mostly;
EXPORT_SYMBOL(nf_ct_destroy);
void nf_conntrack_destroy(struct nf_conntrack *nfct)
diff --git a/net/netfilter/ipset/Kconfig b/net/netfilter/ipset/Kconfig
new file mode 100644
index 000000000000..2c5b348eb3a8
--- /dev/null
+++ b/net/netfilter/ipset/Kconfig
@@ -0,0 +1,122 @@
+menuconfig IP_SET
+ tristate "IP set support"
+ depends on INET && NETFILTER
+ depends on NETFILTER_NETLINK
+ help
+ This option adds IP set support to the kernel.
+ In order to define and use the sets, you need the userspace utility
+ ipset(8). You can use the sets in netfilter via the "set" match
+ and "SET" target.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+if IP_SET
+
+config IP_SET_MAX
+ int "Maximum number of IP sets"
+ default 256
+ range 2 65534
+ depends on IP_SET
+ help
+ You can define here default value of the maximum number
+ of IP sets for the kernel.
+
+ The value can be overriden by the 'max_sets' module
+ parameter of the 'ip_set' module.
+
+config IP_SET_BITMAP_IP
+ tristate "bitmap:ip set support"
+ depends on IP_SET
+ help
+ This option adds the bitmap:ip set type support, by which one
+ can store IPv4 addresses (or network addresse) from a range.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config IP_SET_BITMAP_IPMAC
+ tristate "bitmap:ip,mac set support"
+ depends on IP_SET
+ help
+ This option adds the bitmap:ip,mac set type support, by which one
+ can store IPv4 address and (source) MAC address pairs from a range.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config IP_SET_BITMAP_PORT
+ tristate "bitmap:port set support"
+ depends on IP_SET
+ help
+ This option adds the bitmap:port set type support, by which one
+ can store TCP/UDP port numbers from a range.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config IP_SET_HASH_IP
+ tristate "hash:ip set support"
+ depends on IP_SET
+ help
+ This option adds the hash:ip set type support, by which one
+ can store arbitrary IPv4 or IPv6 addresses (or network addresses)
+ in a set.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config IP_SET_HASH_IPPORT
+ tristate "hash:ip,port set support"
+ depends on IP_SET
+ help
+ This option adds the hash:ip,port set type support, by which one
+ can store IPv4/IPv6 address and protocol/port pairs.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config IP_SET_HASH_IPPORTIP
+ tristate "hash:ip,port,ip set support"
+ depends on IP_SET
+ help
+ This option adds the hash:ip,port,ip set type support, by which
+ one can store IPv4/IPv6 address, protocol/port, and IPv4/IPv6
+ address triples in a set.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config IP_SET_HASH_IPPORTNET
+ tristate "hash:ip,port,net set support"
+ depends on IP_SET
+ help
+ This option adds the hash:ip,port,net set type support, by which
+ one can store IPv4/IPv6 address, protocol/port, and IPv4/IPv6
+ network address/prefix triples in a set.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config IP_SET_HASH_NET
+ tristate "hash:net set support"
+ depends on IP_SET
+ help
+ This option adds the hash:net set type support, by which
+ one can store IPv4/IPv6 network address/prefix elements in a set.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config IP_SET_HASH_NETPORT
+ tristate "hash:net,port set support"
+ depends on IP_SET
+ help
+ This option adds the hash:net,port set type support, by which
+ one can store IPv4/IPv6 network address/prefix and
+ protocol/port pairs as elements in a set.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config IP_SET_LIST_SET
+ tristate "list:set set support"
+ depends on IP_SET
+ help
+ This option adds the list:set set type support. In this
+ kind of set one can store the name of other sets and it forms
+ an ordered union of the member sets.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+endif # IP_SET
diff --git a/net/netfilter/ipset/Makefile b/net/netfilter/ipset/Makefile
new file mode 100644
index 000000000000..5adbdab67bd2
--- /dev/null
+++ b/net/netfilter/ipset/Makefile
@@ -0,0 +1,24 @@
+#
+# Makefile for the ipset modules
+#
+
+ip_set-y := ip_set_core.o ip_set_getport.o pfxlen.o
+
+# ipset core
+obj-$(CONFIG_IP_SET) += ip_set.o
+
+# bitmap types
+obj-$(CONFIG_IP_SET_BITMAP_IP) += ip_set_bitmap_ip.o
+obj-$(CONFIG_IP_SET_BITMAP_IPMAC) += ip_set_bitmap_ipmac.o
+obj-$(CONFIG_IP_SET_BITMAP_PORT) += ip_set_bitmap_port.o
+
+# hash types
+obj-$(CONFIG_IP_SET_HASH_IP) += ip_set_hash_ip.o
+obj-$(CONFIG_IP_SET_HASH_IPPORT) += ip_set_hash_ipport.o
+obj-$(CONFIG_IP_SET_HASH_IPPORTIP) += ip_set_hash_ipportip.o
+obj-$(CONFIG_IP_SET_HASH_IPPORTNET) += ip_set_hash_ipportnet.o
+obj-$(CONFIG_IP_SET_HASH_NET) += ip_set_hash_net.o
+obj-$(CONFIG_IP_SET_HASH_NETPORT) += ip_set_hash_netport.o
+
+# list types
+obj-$(CONFIG_IP_SET_LIST_SET) += ip_set_list_set.o
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
new file mode 100644
index 000000000000..bca96990218d
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -0,0 +1,587 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the bitmap:ip type */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/bitops.h>
+#include <linux/spinlock.h>
+#include <linux/netlink.h>
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_bitmap.h>
+#define IP_SET_BITMAP_TIMEOUT
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("bitmap:ip type of IP sets");
+MODULE_ALIAS("ip_set_bitmap:ip");
+
+/* Type structure */
+struct bitmap_ip {
+ void *members; /* the set members */
+ u32 first_ip; /* host byte order, included in range */
+ u32 last_ip; /* host byte order, included in range */
+ u32 elements; /* number of max elements in the set */
+ u32 hosts; /* number of hosts in a subnet */
+ size_t memsize; /* members size */
+ u8 netmask; /* subnet netmask */
+ u32 timeout; /* timeout parameter */
+ struct timer_list gc; /* garbage collection */
+};
+
+/* Base variant */
+
+static inline u32
+ip_to_id(const struct bitmap_ip *m, u32 ip)
+{
+ return ((ip & ip_set_hostmask(m->netmask)) - m->first_ip)/m->hosts;
+}
+
+static int
+bitmap_ip_test(struct ip_set *set, void *value, u32 timeout)
+{
+ const struct bitmap_ip *map = set->data;
+ u16 id = *(u16 *)value;
+
+ return !!test_bit(id, map->members);
+}
+
+static int
+bitmap_ip_add(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_ip *map = set->data;
+ u16 id = *(u16 *)value;
+
+ if (test_and_set_bit(id, map->members))
+ return -IPSET_ERR_EXIST;
+
+ return 0;
+}
+
+static int
+bitmap_ip_del(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_ip *map = set->data;
+ u16 id = *(u16 *)value;
+
+ if (!test_and_clear_bit(id, map->members))
+ return -IPSET_ERR_EXIST;
+
+ return 0;
+}
+
+static int
+bitmap_ip_list(const struct ip_set *set,
+ struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const struct bitmap_ip *map = set->data;
+ struct nlattr *atd, *nested;
+ u32 id, first = cb->args[2];
+
+ atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+ if (!atd)
+ return -EMSGSIZE;
+ for (; cb->args[2] < map->elements; cb->args[2]++) {
+ id = cb->args[2];
+ if (!test_bit(id, map->members))
+ continue;
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested) {
+ if (id == first) {
+ nla_nest_cancel(skb, atd);
+ return -EMSGSIZE;
+ } else
+ goto nla_put_failure;
+ }
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
+ htonl(map->first_ip + id * map->hosts));
+ ipset_nest_end(skb, nested);
+ }
+ ipset_nest_end(skb, atd);
+ /* Set listing finished */
+ cb->args[2] = 0;
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nested);
+ ipset_nest_end(skb, atd);
+ if (unlikely(id == first)) {
+ cb->args[2] = 0;
+ return -EMSGSIZE;
+ }
+ return 0;
+}
+
+/* Timeout variant */
+
+static int
+bitmap_ip_ttest(struct ip_set *set, void *value, u32 timeout)
+{
+ const struct bitmap_ip *map = set->data;
+ const unsigned long *members = map->members;
+ u16 id = *(u16 *)value;
+
+ return ip_set_timeout_test(members[id]);
+}
+
+static int
+bitmap_ip_tadd(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_ip *map = set->data;
+ unsigned long *members = map->members;
+ u16 id = *(u16 *)value;
+
+ if (ip_set_timeout_test(members[id]))
+ return -IPSET_ERR_EXIST;
+
+ members[id] = ip_set_timeout_set(timeout);
+
+ return 0;
+}
+
+static int
+bitmap_ip_tdel(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_ip *map = set->data;
+ unsigned long *members = map->members;
+ u16 id = *(u16 *)value;
+ int ret = -IPSET_ERR_EXIST;
+
+ if (ip_set_timeout_test(members[id]))
+ ret = 0;
+
+ members[id] = IPSET_ELEM_UNSET;
+ return ret;
+}
+
+static int
+bitmap_ip_tlist(const struct ip_set *set,
+ struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const struct bitmap_ip *map = set->data;
+ struct nlattr *adt, *nested;
+ u32 id, first = cb->args[2];
+ const unsigned long *members = map->members;
+
+ adt = ipset_nest_start(skb, IPSET_ATTR_ADT);
+ if (!adt)
+ return -EMSGSIZE;
+ for (; cb->args[2] < map->elements; cb->args[2]++) {
+ id = cb->args[2];
+ if (!ip_set_timeout_test(members[id]))
+ continue;
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested) {
+ if (id == first) {
+ nla_nest_cancel(skb, adt);
+ return -EMSGSIZE;
+ } else
+ goto nla_put_failure;
+ }
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
+ htonl(map->first_ip + id * map->hosts));
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(members[id])));
+ ipset_nest_end(skb, nested);
+ }
+ ipset_nest_end(skb, adt);
+
+ /* Set listing finished */
+ cb->args[2] = 0;
+
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nested);
+ ipset_nest_end(skb, adt);
+ if (unlikely(id == first)) {
+ cb->args[2] = 0;
+ return -EMSGSIZE;
+ }
+ return 0;
+}
+
+static int
+bitmap_ip_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ struct bitmap_ip *map = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ u32 ip;
+
+ ip = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC));
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -IPSET_ERR_BITMAP_RANGE;
+
+ ip = ip_to_id(map, ip);
+
+ return adtfn(set, &ip, map->timeout);
+}
+
+static int
+bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ struct bitmap_ip *map = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ u32 timeout = map->timeout;
+ u32 ip, ip_to, id;
+ int ret = 0;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+ if (ret)
+ return ret;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -IPSET_ERR_BITMAP_RANGE;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(map->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ if (adt == IPSET_TEST) {
+ id = ip_to_id(map, ip);
+ return adtfn(set, &id, timeout);
+ }
+
+ if (tb[IPSET_ATTR_IP_TO]) {
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+ if (ret)
+ return ret;
+ if (ip > ip_to) {
+ swap(ip, ip_to);
+ if (ip < map->first_ip)
+ return -IPSET_ERR_BITMAP_RANGE;
+ }
+ } else if (tb[IPSET_ATTR_CIDR]) {
+ u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+ if (cidr > 32)
+ return -IPSET_ERR_INVALID_CIDR;
+ ip &= ip_set_hostmask(cidr);
+ ip_to = ip | ~ip_set_hostmask(cidr);
+ } else
+ ip_to = ip;
+
+ if (ip_to > map->last_ip)
+ return -IPSET_ERR_BITMAP_RANGE;
+
+ for (; !before(ip_to, ip); ip += map->hosts) {
+ id = ip_to_id(map, ip);
+ ret = adtfn(set, &id, timeout);;
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+static void
+bitmap_ip_destroy(struct ip_set *set)
+{
+ struct bitmap_ip *map = set->data;
+
+ if (with_timeout(map->timeout))
+ del_timer_sync(&map->gc);
+
+ ip_set_free(map->members);
+ kfree(map);
+
+ set->data = NULL;
+}
+
+static void
+bitmap_ip_flush(struct ip_set *set)
+{
+ struct bitmap_ip *map = set->data;
+
+ memset(map->members, 0, map->memsize);
+}
+
+static int
+bitmap_ip_head(struct ip_set *set, struct sk_buff *skb)
+{
+ const struct bitmap_ip *map = set->data;
+ struct nlattr *nested;
+
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested)
+ goto nla_put_failure;
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip));
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
+ if (map->netmask != 32)
+ NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask);
+ NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
+ htonl(atomic_read(&set->ref) - 1));
+ NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
+ htonl(sizeof(*map) + map->memsize));
+ if (with_timeout(map->timeout))
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+ ipset_nest_end(skb, nested);
+
+ return 0;
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static bool
+bitmap_ip_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+ const struct bitmap_ip *x = a->data;
+ const struct bitmap_ip *y = b->data;
+
+ return x->first_ip == y->first_ip &&
+ x->last_ip == y->last_ip &&
+ x->netmask == y->netmask &&
+ x->timeout == y->timeout;
+}
+
+static const struct ip_set_type_variant bitmap_ip = {
+ .kadt = bitmap_ip_kadt,
+ .uadt = bitmap_ip_uadt,
+ .adt = {
+ [IPSET_ADD] = bitmap_ip_add,
+ [IPSET_DEL] = bitmap_ip_del,
+ [IPSET_TEST] = bitmap_ip_test,
+ },
+ .destroy = bitmap_ip_destroy,
+ .flush = bitmap_ip_flush,
+ .head = bitmap_ip_head,
+ .list = bitmap_ip_list,
+ .same_set = bitmap_ip_same_set,
+};
+
+static const struct ip_set_type_variant bitmap_tip = {
+ .kadt = bitmap_ip_kadt,
+ .uadt = bitmap_ip_uadt,
+ .adt = {
+ [IPSET_ADD] = bitmap_ip_tadd,
+ [IPSET_DEL] = bitmap_ip_tdel,
+ [IPSET_TEST] = bitmap_ip_ttest,
+ },
+ .destroy = bitmap_ip_destroy,
+ .flush = bitmap_ip_flush,
+ .head = bitmap_ip_head,
+ .list = bitmap_ip_tlist,
+ .same_set = bitmap_ip_same_set,
+};
+
+static void
+bitmap_ip_gc(unsigned long ul_set)
+{
+ struct ip_set *set = (struct ip_set *) ul_set;
+ struct bitmap_ip *map = set->data;
+ unsigned long *table = map->members;
+ u32 id;
+
+ /* We run parallel with other readers (test element)
+ * but adding/deleting new entries is locked out */
+ read_lock_bh(&set->lock);
+ for (id = 0; id < map->elements; id++)
+ if (ip_set_timeout_expired(table[id]))
+ table[id] = IPSET_ELEM_UNSET;
+ read_unlock_bh(&set->lock);
+
+ map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+ add_timer(&map->gc);
+}
+
+static void
+bitmap_ip_gc_init(struct ip_set *set)
+{
+ struct bitmap_ip *map = set->data;
+
+ init_timer(&map->gc);
+ map->gc.data = (unsigned long) set;
+ map->gc.function = bitmap_ip_gc;
+ map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+ add_timer(&map->gc);
+}
+
+/* Create bitmap:ip type of sets */
+
+static bool
+init_map_ip(struct ip_set *set, struct bitmap_ip *map,
+ u32 first_ip, u32 last_ip,
+ u32 elements, u32 hosts, u8 netmask)
+{
+ map->members = ip_set_alloc(map->memsize);
+ if (!map->members)
+ return false;
+ map->first_ip = first_ip;
+ map->last_ip = last_ip;
+ map->elements = elements;
+ map->hosts = hosts;
+ map->netmask = netmask;
+ map->timeout = IPSET_NO_TIMEOUT;
+
+ set->data = map;
+ set->family = AF_INET;
+
+ return true;
+}
+
+static int
+bitmap_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+ struct bitmap_ip *map;
+ u32 first_ip, last_ip, hosts, elements;
+ u8 netmask = 32;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &first_ip);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_IP_TO]) {
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &last_ip);
+ if (ret)
+ return ret;
+ if (first_ip > last_ip) {
+ u32 tmp = first_ip;
+
+ first_ip = last_ip;
+ last_ip = tmp;
+ }
+ } else if (tb[IPSET_ATTR_CIDR]) {
+ u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+ if (cidr >= 32)
+ return -IPSET_ERR_INVALID_CIDR;
+ last_ip = first_ip | ~ip_set_hostmask(cidr);
+ } else
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_NETMASK]) {
+ netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
+
+ if (netmask > 32)
+ return -IPSET_ERR_INVALID_NETMASK;
+
+ first_ip &= ip_set_hostmask(netmask);
+ last_ip |= ~ip_set_hostmask(netmask);
+ }
+
+ if (netmask == 32) {
+ hosts = 1;
+ elements = last_ip - first_ip + 1;
+ } else {
+ u8 mask_bits;
+ u32 mask;
+
+ mask = range_to_mask(first_ip, last_ip, &mask_bits);
+
+ if ((!mask && (first_ip || last_ip != 0xFFFFFFFF)) ||
+ netmask <= mask_bits)
+ return -IPSET_ERR_BITMAP_RANGE;
+
+ pr_debug("mask_bits %u, netmask %u\n", mask_bits, netmask);
+ hosts = 2 << (32 - netmask - 1);
+ elements = 2 << (netmask - mask_bits - 1);
+ }
+ if (elements > IPSET_BITMAP_MAX_RANGE + 1)
+ return -IPSET_ERR_BITMAP_RANGE_SIZE;
+
+ pr_debug("hosts %u, elements %u\n", hosts, elements);
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ map->memsize = elements * sizeof(unsigned long);
+
+ if (!init_map_ip(set, map, first_ip, last_ip,
+ elements, hosts, netmask)) {
+ kfree(map);
+ return -ENOMEM;
+ }
+
+ map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ set->variant = &bitmap_tip;
+
+ bitmap_ip_gc_init(set);
+ } else {
+ map->memsize = bitmap_bytes(0, elements - 1);
+
+ if (!init_map_ip(set, map, first_ip, last_ip,
+ elements, hosts, netmask)) {
+ kfree(map);
+ return -ENOMEM;
+ }
+
+ set->variant = &bitmap_ip;
+ }
+ return 0;
+}
+
+static struct ip_set_type bitmap_ip_type __read_mostly = {
+ .name = "bitmap:ip",
+ .protocol = IPSET_PROTOCOL,
+ .features = IPSET_TYPE_IP,
+ .dimension = IPSET_DIM_ONE,
+ .family = AF_INET,
+ .revision = 0,
+ .create = bitmap_ip_create,
+ .create_policy = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
+ [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
+ [IPSET_ATTR_NETMASK] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ },
+ .adt_policy = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
+ [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init
+bitmap_ip_init(void)
+{
+ return ip_set_type_register(&bitmap_ip_type);
+}
+
+static void __exit
+bitmap_ip_fini(void)
+{
+ ip_set_type_unregister(&bitmap_ip_type);
+}
+
+module_init(bitmap_ip_init);
+module_exit(bitmap_ip_fini);
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
new file mode 100644
index 000000000000..5e790172deff
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -0,0 +1,652 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Martin Josefsson <gandalf@wlug.westbo.se>
+ * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the bitmap:ip,mac type */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/if_ether.h>
+#include <linux/netlink.h>
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_bitmap.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("bitmap:ip,mac type of IP sets");
+MODULE_ALIAS("ip_set_bitmap:ip,mac");
+
+enum {
+ MAC_EMPTY, /* element is not set */
+ MAC_FILLED, /* element is set with MAC */
+ MAC_UNSET, /* element is set, without MAC */
+};
+
+/* Type structure */
+struct bitmap_ipmac {
+ void *members; /* the set members */
+ u32 first_ip; /* host byte order, included in range */
+ u32 last_ip; /* host byte order, included in range */
+ u32 timeout; /* timeout value */
+ struct timer_list gc; /* garbage collector */
+ size_t dsize; /* size of element */
+};
+
+/* ADT structure for generic function args */
+struct ipmac {
+ u32 id; /* id in array */
+ unsigned char *ether; /* ethernet address */
+};
+
+/* Member element without and with timeout */
+
+struct ipmac_elem {
+ unsigned char ether[ETH_ALEN];
+ unsigned char match;
+} __attribute__ ((aligned));
+
+struct ipmac_telem {
+ unsigned char ether[ETH_ALEN];
+ unsigned char match;
+ unsigned long timeout;
+} __attribute__ ((aligned));
+
+static inline void *
+bitmap_ipmac_elem(const struct bitmap_ipmac *map, u32 id)
+{
+ return (void *)((char *)map->members + id * map->dsize);
+}
+
+static inline bool
+bitmap_timeout(const struct bitmap_ipmac *map, u32 id)
+{
+ const struct ipmac_telem *elem = bitmap_ipmac_elem(map, id);
+
+ return ip_set_timeout_test(elem->timeout);
+}
+
+static inline bool
+bitmap_expired(const struct bitmap_ipmac *map, u32 id)
+{
+ const struct ipmac_telem *elem = bitmap_ipmac_elem(map, id);
+
+ return ip_set_timeout_expired(elem->timeout);
+}
+
+static inline int
+bitmap_ipmac_exist(const struct ipmac_telem *elem)
+{
+ return elem->match == MAC_UNSET ||
+ (elem->match == MAC_FILLED &&
+ !ip_set_timeout_expired(elem->timeout));
+}
+
+/* Base variant */
+
+static int
+bitmap_ipmac_test(struct ip_set *set, void *value, u32 timeout)
+{
+ const struct bitmap_ipmac *map = set->data;
+ const struct ipmac *data = value;
+ const struct ipmac_elem *elem = bitmap_ipmac_elem(map, data->id);
+
+ switch (elem->match) {
+ case MAC_UNSET:
+ /* Trigger kernel to fill out the ethernet address */
+ return -EAGAIN;
+ case MAC_FILLED:
+ return data->ether == NULL ||
+ compare_ether_addr(data->ether, elem->ether) == 0;
+ }
+ return 0;
+}
+
+static int
+bitmap_ipmac_add(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_ipmac *map = set->data;
+ const struct ipmac *data = value;
+ struct ipmac_elem *elem = bitmap_ipmac_elem(map, data->id);
+
+ switch (elem->match) {
+ case MAC_UNSET:
+ if (!data->ether)
+ /* Already added without ethernet address */
+ return -IPSET_ERR_EXIST;
+ /* Fill the MAC address */
+ memcpy(elem->ether, data->ether, ETH_ALEN);
+ elem->match = MAC_FILLED;
+ break;
+ case MAC_FILLED:
+ return -IPSET_ERR_EXIST;
+ case MAC_EMPTY:
+ if (data->ether) {
+ memcpy(elem->ether, data->ether, ETH_ALEN);
+ elem->match = MAC_FILLED;
+ } else
+ elem->match = MAC_UNSET;
+ }
+
+ return 0;
+}
+
+static int
+bitmap_ipmac_del(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_ipmac *map = set->data;
+ const struct ipmac *data = value;
+ struct ipmac_elem *elem = bitmap_ipmac_elem(map, data->id);
+
+ if (elem->match == MAC_EMPTY)
+ return -IPSET_ERR_EXIST;
+
+ elem->match = MAC_EMPTY;
+
+ return 0;
+}
+
+static int
+bitmap_ipmac_list(const struct ip_set *set,
+ struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const struct bitmap_ipmac *map = set->data;
+ const struct ipmac_elem *elem;
+ struct nlattr *atd, *nested;
+ u32 id, first = cb->args[2];
+ u32 last = map->last_ip - map->first_ip;
+
+ atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+ if (!atd)
+ return -EMSGSIZE;
+ for (; cb->args[2] <= last; cb->args[2]++) {
+ id = cb->args[2];
+ elem = bitmap_ipmac_elem(map, id);
+ if (elem->match == MAC_EMPTY)
+ continue;
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested) {
+ if (id == first) {
+ nla_nest_cancel(skb, atd);
+ return -EMSGSIZE;
+ } else
+ goto nla_put_failure;
+ }
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
+ htonl(map->first_ip + id));
+ if (elem->match == MAC_FILLED)
+ NLA_PUT(skb, IPSET_ATTR_ETHER, ETH_ALEN,
+ elem->ether);
+ ipset_nest_end(skb, nested);
+ }
+ ipset_nest_end(skb, atd);
+ /* Set listing finished */
+ cb->args[2] = 0;
+
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nested);
+ ipset_nest_end(skb, atd);
+ if (unlikely(id == first)) {
+ cb->args[2] = 0;
+ return -EMSGSIZE;
+ }
+ return 0;
+}
+
+/* Timeout variant */
+
+static int
+bitmap_ipmac_ttest(struct ip_set *set, void *value, u32 timeout)
+{
+ const struct bitmap_ipmac *map = set->data;
+ const struct ipmac *data = value;
+ const struct ipmac_elem *elem = bitmap_ipmac_elem(map, data->id);
+
+ switch (elem->match) {
+ case MAC_UNSET:
+ /* Trigger kernel to fill out the ethernet address */
+ return -EAGAIN;
+ case MAC_FILLED:
+ return (data->ether == NULL ||
+ compare_ether_addr(data->ether, elem->ether) == 0) &&
+ !bitmap_expired(map, data->id);
+ }
+ return 0;
+}
+
+static int
+bitmap_ipmac_tadd(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_ipmac *map = set->data;
+ const struct ipmac *data = value;
+ struct ipmac_telem *elem = bitmap_ipmac_elem(map, data->id);
+
+ switch (elem->match) {
+ case MAC_UNSET:
+ if (!data->ether)
+ /* Already added without ethernet address */
+ return -IPSET_ERR_EXIST;
+ /* Fill the MAC address and activate the timer */
+ memcpy(elem->ether, data->ether, ETH_ALEN);
+ elem->match = MAC_FILLED;
+ if (timeout == map->timeout)
+ /* Timeout was not specified, get stored one */
+ timeout = elem->timeout;
+ elem->timeout = ip_set_timeout_set(timeout);
+ break;
+ case MAC_FILLED:
+ if (!bitmap_expired(map, data->id))
+ return -IPSET_ERR_EXIST;
+ /* Fall through */
+ case MAC_EMPTY:
+ if (data->ether) {
+ memcpy(elem->ether, data->ether, ETH_ALEN);
+ elem->match = MAC_FILLED;
+ } else
+ elem->match = MAC_UNSET;
+ /* If MAC is unset yet, we store plain timeout value
+ * because the timer is not activated yet
+ * and we can reuse it later when MAC is filled out,
+ * possibly by the kernel */
+ elem->timeout = data->ether ? ip_set_timeout_set(timeout)
+ : timeout;
+ break;
+ }
+
+ return 0;
+}
+
+static int
+bitmap_ipmac_tdel(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_ipmac *map = set->data;
+ const struct ipmac *data = value;
+ struct ipmac_telem *elem = bitmap_ipmac_elem(map, data->id);
+
+ if (elem->match == MAC_EMPTY || bitmap_expired(map, data->id))
+ return -IPSET_ERR_EXIST;
+
+ elem->match = MAC_EMPTY;
+
+ return 0;
+}
+
+static int
+bitmap_ipmac_tlist(const struct ip_set *set,
+ struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const struct bitmap_ipmac *map = set->data;
+ const struct ipmac_telem *elem;
+ struct nlattr *atd, *nested;
+ u32 id, first = cb->args[2];
+ u32 timeout, last = map->last_ip - map->first_ip;
+
+ atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+ if (!atd)
+ return -EMSGSIZE;
+ for (; cb->args[2] <= last; cb->args[2]++) {
+ id = cb->args[2];
+ elem = bitmap_ipmac_elem(map, id);
+ if (!bitmap_ipmac_exist(elem))
+ continue;
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested) {
+ if (id == first) {
+ nla_nest_cancel(skb, atd);
+ return -EMSGSIZE;
+ } else
+ goto nla_put_failure;
+ }
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
+ htonl(map->first_ip + id));
+ if (elem->match == MAC_FILLED)
+ NLA_PUT(skb, IPSET_ATTR_ETHER, ETH_ALEN,
+ elem->ether);
+ timeout = elem->match == MAC_UNSET ? elem->timeout
+ : ip_set_timeout_get(elem->timeout);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(timeout));
+ ipset_nest_end(skb, nested);
+ }
+ ipset_nest_end(skb, atd);
+ /* Set listing finished */
+ cb->args[2] = 0;
+
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nested);
+ ipset_nest_end(skb, atd);
+ return -EMSGSIZE;
+}
+
+static int
+bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ struct bitmap_ipmac *map = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct ipmac data;
+
+ data.id = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC));
+ if (data.id < map->first_ip || data.id > map->last_ip)
+ return -IPSET_ERR_BITMAP_RANGE;
+
+ /* Backward compatibility: we don't check the second flag */
+ if (skb_mac_header(skb) < skb->head ||
+ (skb_mac_header(skb) + ETH_HLEN) > skb->data)
+ return -EINVAL;
+
+ data.id -= map->first_ip;
+ data.ether = eth_hdr(skb)->h_source;
+
+ return adtfn(set, &data, map->timeout);
+}
+
+static int
+bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct bitmap_ipmac *map = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct ipmac data;
+ u32 timeout = map->timeout;
+ int ret = 0;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &data.id);
+ if (ret)
+ return ret;
+
+ if (data.id < map->first_ip || data.id > map->last_ip)
+ return -IPSET_ERR_BITMAP_RANGE;
+
+ if (tb[IPSET_ATTR_ETHER])
+ data.ether = nla_data(tb[IPSET_ATTR_ETHER]);
+ else
+ data.ether = NULL;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(map->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ data.id -= map->first_ip;
+
+ ret = adtfn(set, &data, timeout);
+
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+static void
+bitmap_ipmac_destroy(struct ip_set *set)
+{
+ struct bitmap_ipmac *map = set->data;
+
+ if (with_timeout(map->timeout))
+ del_timer_sync(&map->gc);
+
+ ip_set_free(map->members);
+ kfree(map);
+
+ set->data = NULL;
+}
+
+static void
+bitmap_ipmac_flush(struct ip_set *set)
+{
+ struct bitmap_ipmac *map = set->data;
+
+ memset(map->members, 0,
+ (map->last_ip - map->first_ip + 1) * map->dsize);
+}
+
+static int
+bitmap_ipmac_head(struct ip_set *set, struct sk_buff *skb)
+{
+ const struct bitmap_ipmac *map = set->data;
+ struct nlattr *nested;
+
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested)
+ goto nla_put_failure;
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip));
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
+ NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
+ htonl(atomic_read(&set->ref) - 1));
+ NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
+ htonl(sizeof(*map)
+ + (map->last_ip - map->first_ip + 1) * map->dsize));
+ if (with_timeout(map->timeout))
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+ ipset_nest_end(skb, nested);
+
+ return 0;
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static bool
+bitmap_ipmac_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+ const struct bitmap_ipmac *x = a->data;
+ const struct bitmap_ipmac *y = b->data;
+
+ return x->first_ip == y->first_ip &&
+ x->last_ip == y->last_ip &&
+ x->timeout == y->timeout;
+}
+
+static const struct ip_set_type_variant bitmap_ipmac = {
+ .kadt = bitmap_ipmac_kadt,
+ .uadt = bitmap_ipmac_uadt,
+ .adt = {
+ [IPSET_ADD] = bitmap_ipmac_add,
+ [IPSET_DEL] = bitmap_ipmac_del,
+ [IPSET_TEST] = bitmap_ipmac_test,
+ },
+ .destroy = bitmap_ipmac_destroy,
+ .flush = bitmap_ipmac_flush,
+ .head = bitmap_ipmac_head,
+ .list = bitmap_ipmac_list,
+ .same_set = bitmap_ipmac_same_set,
+};
+
+static const struct ip_set_type_variant bitmap_tipmac = {
+ .kadt = bitmap_ipmac_kadt,
+ .uadt = bitmap_ipmac_uadt,
+ .adt = {
+ [IPSET_ADD] = bitmap_ipmac_tadd,
+ [IPSET_DEL] = bitmap_ipmac_tdel,
+ [IPSET_TEST] = bitmap_ipmac_ttest,
+ },
+ .destroy = bitmap_ipmac_destroy,
+ .flush = bitmap_ipmac_flush,
+ .head = bitmap_ipmac_head,
+ .list = bitmap_ipmac_tlist,
+ .same_set = bitmap_ipmac_same_set,
+};
+
+static void
+bitmap_ipmac_gc(unsigned long ul_set)
+{
+ struct ip_set *set = (struct ip_set *) ul_set;
+ struct bitmap_ipmac *map = set->data;
+ struct ipmac_telem *elem;
+ u32 id, last = map->last_ip - map->first_ip;
+
+ /* We run parallel with other readers (test element)
+ * but adding/deleting new entries is locked out */
+ read_lock_bh(&set->lock);
+ for (id = 0; id <= last; id++) {
+ elem = bitmap_ipmac_elem(map, id);
+ if (elem->match == MAC_FILLED &&
+ ip_set_timeout_expired(elem->timeout))
+ elem->match = MAC_EMPTY;
+ }
+ read_unlock_bh(&set->lock);
+
+ map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+ add_timer(&map->gc);
+}
+
+static void
+bitmap_ipmac_gc_init(struct ip_set *set)
+{
+ struct bitmap_ipmac *map = set->data;
+
+ init_timer(&map->gc);
+ map->gc.data = (unsigned long) set;
+ map->gc.function = bitmap_ipmac_gc;
+ map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+ add_timer(&map->gc);
+}
+
+/* Create bitmap:ip,mac type of sets */
+
+static bool
+init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
+ u32 first_ip, u32 last_ip)
+{
+ map->members = ip_set_alloc((last_ip - first_ip + 1) * map->dsize);
+ if (!map->members)
+ return false;
+ map->first_ip = first_ip;
+ map->last_ip = last_ip;
+ map->timeout = IPSET_NO_TIMEOUT;
+
+ set->data = map;
+ set->family = AF_INET;
+
+ return true;
+}
+
+static int
+bitmap_ipmac_create(struct ip_set *set, struct nlattr *tb[],
+ u32 flags)
+{
+ u32 first_ip, last_ip, elements;
+ struct bitmap_ipmac *map;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &first_ip);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_IP_TO]) {
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &last_ip);
+ if (ret)
+ return ret;
+ if (first_ip > last_ip) {
+ u32 tmp = first_ip;
+
+ first_ip = last_ip;
+ last_ip = tmp;
+ }
+ } else if (tb[IPSET_ATTR_CIDR]) {
+ u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+ if (cidr >= 32)
+ return -IPSET_ERR_INVALID_CIDR;
+ last_ip = first_ip | ~ip_set_hostmask(cidr);
+ } else
+ return -IPSET_ERR_PROTOCOL;
+
+ elements = last_ip - first_ip + 1;
+
+ if (elements > IPSET_BITMAP_MAX_RANGE + 1)
+ return -IPSET_ERR_BITMAP_RANGE_SIZE;
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ map->dsize = sizeof(struct ipmac_telem);
+
+ if (!init_map_ipmac(set, map, first_ip, last_ip)) {
+ kfree(map);
+ return -ENOMEM;
+ }
+
+ map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+ set->variant = &bitmap_tipmac;
+
+ bitmap_ipmac_gc_init(set);
+ } else {
+ map->dsize = sizeof(struct ipmac_elem);
+
+ if (!init_map_ipmac(set, map, first_ip, last_ip)) {
+ kfree(map);
+ return -ENOMEM;
+ }
+ set->variant = &bitmap_ipmac;
+
+ }
+ return 0;
+}
+
+static struct ip_set_type bitmap_ipmac_type = {
+ .name = "bitmap:ip,mac",
+ .protocol = IPSET_PROTOCOL,
+ .features = IPSET_TYPE_IP | IPSET_TYPE_MAC,
+ .dimension = IPSET_DIM_TWO,
+ .family = AF_INET,
+ .revision = 0,
+ .create = bitmap_ipmac_create,
+ .create_policy = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
+ [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ },
+ .adt_policy = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_ETHER] = { .type = NLA_BINARY, .len = ETH_ALEN },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init
+bitmap_ipmac_init(void)
+{
+ return ip_set_type_register(&bitmap_ipmac_type);
+}
+
+static void __exit
+bitmap_ipmac_fini(void)
+{
+ ip_set_type_unregister(&bitmap_ipmac_type);
+}
+
+module_init(bitmap_ipmac_init);
+module_exit(bitmap_ipmac_fini);
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
new file mode 100644
index 000000000000..165f09b1a9cb
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -0,0 +1,515 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the bitmap:port type */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/netlink.h>
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_bitmap.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#define IP_SET_BITMAP_TIMEOUT
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("bitmap:port type of IP sets");
+MODULE_ALIAS("ip_set_bitmap:port");
+
+/* Type structure */
+struct bitmap_port {
+ void *members; /* the set members */
+ u16 first_port; /* host byte order, included in range */
+ u16 last_port; /* host byte order, included in range */
+ size_t memsize; /* members size */
+ u32 timeout; /* timeout parameter */
+ struct timer_list gc; /* garbage collection */
+};
+
+/* Base variant */
+
+static int
+bitmap_port_test(struct ip_set *set, void *value, u32 timeout)
+{
+ const struct bitmap_port *map = set->data;
+ u16 id = *(u16 *)value;
+
+ return !!test_bit(id, map->members);
+}
+
+static int
+bitmap_port_add(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_port *map = set->data;
+ u16 id = *(u16 *)value;
+
+ if (test_and_set_bit(id, map->members))
+ return -IPSET_ERR_EXIST;
+
+ return 0;
+}
+
+static int
+bitmap_port_del(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_port *map = set->data;
+ u16 id = *(u16 *)value;
+
+ if (!test_and_clear_bit(id, map->members))
+ return -IPSET_ERR_EXIST;
+
+ return 0;
+}
+
+static int
+bitmap_port_list(const struct ip_set *set,
+ struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const struct bitmap_port *map = set->data;
+ struct nlattr *atd, *nested;
+ u16 id, first = cb->args[2];
+ u16 last = map->last_port - map->first_port;
+
+ atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+ if (!atd)
+ return -EMSGSIZE;
+ for (; cb->args[2] <= last; cb->args[2]++) {
+ id = cb->args[2];
+ if (!test_bit(id, map->members))
+ continue;
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested) {
+ if (id == first) {
+ nla_nest_cancel(skb, atd);
+ return -EMSGSIZE;
+ } else
+ goto nla_put_failure;
+ }
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT,
+ htons(map->first_port + id));
+ ipset_nest_end(skb, nested);
+ }
+ ipset_nest_end(skb, atd);
+ /* Set listing finished */
+ cb->args[2] = 0;
+
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nested);
+ ipset_nest_end(skb, atd);
+ if (unlikely(id == first)) {
+ cb->args[2] = 0;
+ return -EMSGSIZE;
+ }
+ return 0;
+}
+
+/* Timeout variant */
+
+static int
+bitmap_port_ttest(struct ip_set *set, void *value, u32 timeout)
+{
+ const struct bitmap_port *map = set->data;
+ const unsigned long *members = map->members;
+ u16 id = *(u16 *)value;
+
+ return ip_set_timeout_test(members[id]);
+}
+
+static int
+bitmap_port_tadd(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_port *map = set->data;
+ unsigned long *members = map->members;
+ u16 id = *(u16 *)value;
+
+ if (ip_set_timeout_test(members[id]))
+ return -IPSET_ERR_EXIST;
+
+ members[id] = ip_set_timeout_set(timeout);
+
+ return 0;
+}
+
+static int
+bitmap_port_tdel(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_port *map = set->data;
+ unsigned long *members = map->members;
+ u16 id = *(u16 *)value;
+ int ret = -IPSET_ERR_EXIST;
+
+ if (ip_set_timeout_test(members[id]))
+ ret = 0;
+
+ members[id] = IPSET_ELEM_UNSET;
+ return ret;
+}
+
+static int
+bitmap_port_tlist(const struct ip_set *set,
+ struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const struct bitmap_port *map = set->data;
+ struct nlattr *adt, *nested;
+ u16 id, first = cb->args[2];
+ u16 last = map->last_port - map->first_port;
+ const unsigned long *members = map->members;
+
+ adt = ipset_nest_start(skb, IPSET_ATTR_ADT);
+ if (!adt)
+ return -EMSGSIZE;
+ for (; cb->args[2] <= last; cb->args[2]++) {
+ id = cb->args[2];
+ if (!ip_set_timeout_test(members[id]))
+ continue;
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested) {
+ if (id == first) {
+ nla_nest_cancel(skb, adt);
+ return -EMSGSIZE;
+ } else
+ goto nla_put_failure;
+ }
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT,
+ htons(map->first_port + id));
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(members[id])));
+ ipset_nest_end(skb, nested);
+ }
+ ipset_nest_end(skb, adt);
+
+ /* Set listing finished */
+ cb->args[2] = 0;
+
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nested);
+ ipset_nest_end(skb, adt);
+ if (unlikely(id == first)) {
+ cb->args[2] = 0;
+ return -EMSGSIZE;
+ }
+ return 0;
+}
+
+static int
+bitmap_port_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ struct bitmap_port *map = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ __be16 __port;
+ u16 port = 0;
+
+ if (!ip_set_get_ip_port(skb, pf, flags & IPSET_DIM_ONE_SRC, &__port))
+ return -EINVAL;
+
+ port = ntohs(__port);
+
+ if (port < map->first_port || port > map->last_port)
+ return -IPSET_ERR_BITMAP_RANGE;
+
+ port -= map->first_port;
+
+ return adtfn(set, &port, map->timeout);
+}
+
+static int
+bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ struct bitmap_port *map = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ u32 timeout = map->timeout;
+ u32 port; /* wraparound */
+ u16 id, port_to;
+ int ret = 0;
+
+ if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ port = ip_set_get_h16(tb[IPSET_ATTR_PORT]);
+ if (port < map->first_port || port > map->last_port)
+ return -IPSET_ERR_BITMAP_RANGE;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(map->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ if (adt == IPSET_TEST) {
+ id = port - map->first_port;
+ return adtfn(set, &id, timeout);
+ }
+
+ if (tb[IPSET_ATTR_PORT_TO]) {
+ port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+ if (port > port_to) {
+ swap(port, port_to);
+ if (port < map->first_port)
+ return -IPSET_ERR_BITMAP_RANGE;
+ }
+ } else
+ port_to = port;
+
+ if (port_to > map->last_port)
+ return -IPSET_ERR_BITMAP_RANGE;
+
+ for (; port <= port_to; port++) {
+ id = port - map->first_port;
+ ret = adtfn(set, &id, timeout);
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+static void
+bitmap_port_destroy(struct ip_set *set)
+{
+ struct bitmap_port *map = set->data;
+
+ if (with_timeout(map->timeout))
+ del_timer_sync(&map->gc);
+
+ ip_set_free(map->members);
+ kfree(map);
+
+ set->data = NULL;
+}
+
+static void
+bitmap_port_flush(struct ip_set *set)
+{
+ struct bitmap_port *map = set->data;
+
+ memset(map->members, 0, map->memsize);
+}
+
+static int
+bitmap_port_head(struct ip_set *set, struct sk_buff *skb)
+{
+ const struct bitmap_port *map = set->data;
+ struct nlattr *nested;
+
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested)
+ goto nla_put_failure;
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, htons(map->first_port));
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port));
+ NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
+ htonl(atomic_read(&set->ref) - 1));
+ NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
+ htonl(sizeof(*map) + map->memsize));
+ if (with_timeout(map->timeout))
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+ ipset_nest_end(skb, nested);
+
+ return 0;
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static bool
+bitmap_port_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+ const struct bitmap_port *x = a->data;
+ const struct bitmap_port *y = b->data;
+
+ return x->first_port == y->first_port &&
+ x->last_port == y->last_port &&
+ x->timeout == y->timeout;
+}
+
+static const struct ip_set_type_variant bitmap_port = {
+ .kadt = bitmap_port_kadt,
+ .uadt = bitmap_port_uadt,
+ .adt = {
+ [IPSET_ADD] = bitmap_port_add,
+ [IPSET_DEL] = bitmap_port_del,
+ [IPSET_TEST] = bitmap_port_test,
+ },
+ .destroy = bitmap_port_destroy,
+ .flush = bitmap_port_flush,
+ .head = bitmap_port_head,
+ .list = bitmap_port_list,
+ .same_set = bitmap_port_same_set,
+};
+
+static const struct ip_set_type_variant bitmap_tport = {
+ .kadt = bitmap_port_kadt,
+ .uadt = bitmap_port_uadt,
+ .adt = {
+ [IPSET_ADD] = bitmap_port_tadd,
+ [IPSET_DEL] = bitmap_port_tdel,
+ [IPSET_TEST] = bitmap_port_ttest,
+ },
+ .destroy = bitmap_port_destroy,
+ .flush = bitmap_port_flush,
+ .head = bitmap_port_head,
+ .list = bitmap_port_tlist,
+ .same_set = bitmap_port_same_set,
+};
+
+static void
+bitmap_port_gc(unsigned long ul_set)
+{
+ struct ip_set *set = (struct ip_set *) ul_set;
+ struct bitmap_port *map = set->data;
+ unsigned long *table = map->members;
+ u32 id; /* wraparound */
+ u16 last = map->last_port - map->first_port;
+
+ /* We run parallel with other readers (test element)
+ * but adding/deleting new entries is locked out */
+ read_lock_bh(&set->lock);
+ for (id = 0; id <= last; id++)
+ if (ip_set_timeout_expired(table[id]))
+ table[id] = IPSET_ELEM_UNSET;
+ read_unlock_bh(&set->lock);
+
+ map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+ add_timer(&map->gc);
+}
+
+static void
+bitmap_port_gc_init(struct ip_set *set)
+{
+ struct bitmap_port *map = set->data;
+
+ init_timer(&map->gc);
+ map->gc.data = (unsigned long) set;
+ map->gc.function = bitmap_port_gc;
+ map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+ add_timer(&map->gc);
+}
+
+/* Create bitmap:ip type of sets */
+
+static bool
+init_map_port(struct ip_set *set, struct bitmap_port *map,
+ u16 first_port, u16 last_port)
+{
+ map->members = ip_set_alloc(map->memsize);
+ if (!map->members)
+ return false;
+ map->first_port = first_port;
+ map->last_port = last_port;
+ map->timeout = IPSET_NO_TIMEOUT;
+
+ set->data = map;
+ set->family = AF_UNSPEC;
+
+ return true;
+}
+
+static int
+bitmap_port_create(struct ip_set *set, struct nlattr *tb[],
+ u32 flags)
+{
+ struct bitmap_port *map;
+ u16 first_port, last_port;
+
+ if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_attr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ first_port = ip_set_get_h16(tb[IPSET_ATTR_PORT]);
+ last_port = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+ if (first_port > last_port) {
+ u16 tmp = first_port;
+
+ first_port = last_port;
+ last_port = tmp;
+ }
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ map->memsize = (last_port - first_port + 1)
+ * sizeof(unsigned long);
+
+ if (!init_map_port(set, map, first_port, last_port)) {
+ kfree(map);
+ return -ENOMEM;
+ }
+
+ map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ set->variant = &bitmap_tport;
+
+ bitmap_port_gc_init(set);
+ } else {
+ map->memsize = bitmap_bytes(0, last_port - first_port);
+ pr_debug("memsize: %zu\n", map->memsize);
+ if (!init_map_port(set, map, first_port, last_port)) {
+ kfree(map);
+ return -ENOMEM;
+ }
+
+ set->variant = &bitmap_port;
+ }
+ return 0;
+}
+
+static struct ip_set_type bitmap_port_type = {
+ .name = "bitmap:port",
+ .protocol = IPSET_PROTOCOL,
+ .features = IPSET_TYPE_PORT,
+ .dimension = IPSET_DIM_ONE,
+ .family = AF_UNSPEC,
+ .revision = 0,
+ .create = bitmap_port_create,
+ .create_policy = {
+ [IPSET_ATTR_PORT] = { .type = NLA_U16 },
+ [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ },
+ .adt_policy = {
+ [IPSET_ATTR_PORT] = { .type = NLA_U16 },
+ [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init
+bitmap_port_init(void)
+{
+ return ip_set_type_register(&bitmap_port_type);
+}
+
+static void __exit
+bitmap_port_fini(void)
+{
+ ip_set_type_unregister(&bitmap_port_type);
+}
+
+module_init(bitmap_port_init);
+module_exit(bitmap_port_fini);
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
new file mode 100644
index 000000000000..8b1a54c1e400
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -0,0 +1,1671 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module for IP set management */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/netlink.h>
+#include <linux/rculist.h>
+#include <linux/version.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/ipset/ip_set.h>
+
+static LIST_HEAD(ip_set_type_list); /* all registered set types */
+static DEFINE_MUTEX(ip_set_type_mutex); /* protects ip_set_type_list */
+
+static struct ip_set **ip_set_list; /* all individual sets */
+static ip_set_id_t ip_set_max = CONFIG_IP_SET_MAX; /* max number of sets */
+
+#define STREQ(a, b) (strncmp(a, b, IPSET_MAXNAMELEN) == 0)
+
+static unsigned int max_sets;
+
+module_param(max_sets, int, 0600);
+MODULE_PARM_DESC(max_sets, "maximal number of sets");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("core IP set support");
+MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
+
+/*
+ * The set types are implemented in modules and registered set types
+ * can be found in ip_set_type_list. Adding/deleting types is
+ * serialized by ip_set_type_mutex.
+ */
+
+static inline void
+ip_set_type_lock(void)
+{
+ mutex_lock(&ip_set_type_mutex);
+}
+
+static inline void
+ip_set_type_unlock(void)
+{
+ mutex_unlock(&ip_set_type_mutex);
+}
+
+/* Register and deregister settype */
+
+static struct ip_set_type *
+find_set_type(const char *name, u8 family, u8 revision)
+{
+ struct ip_set_type *type;
+
+ list_for_each_entry_rcu(type, &ip_set_type_list, list)
+ if (STREQ(type->name, name) &&
+ (type->family == family || type->family == AF_UNSPEC) &&
+ type->revision == revision)
+ return type;
+ return NULL;
+}
+
+/* Unlock, try to load a set type module and lock again */
+static int
+try_to_load_type(const char *name)
+{
+ nfnl_unlock();
+ pr_debug("try to load ip_set_%s\n", name);
+ if (request_module("ip_set_%s", name) < 0) {
+ pr_warning("Can't find ip_set type %s\n", name);
+ nfnl_lock();
+ return -IPSET_ERR_FIND_TYPE;
+ }
+ nfnl_lock();
+ return -EAGAIN;
+}
+
+/* Find a set type and reference it */
+static int
+find_set_type_get(const char *name, u8 family, u8 revision,
+ struct ip_set_type **found)
+{
+ rcu_read_lock();
+ *found = find_set_type(name, family, revision);
+ if (*found) {
+ int err = !try_module_get((*found)->me);
+ rcu_read_unlock();
+ return err ? -EFAULT : 0;
+ }
+ rcu_read_unlock();
+
+ return try_to_load_type(name);
+}
+
+/* Find a given set type by name and family.
+ * If we succeeded, the supported minimal and maximum revisions are
+ * filled out.
+ */
+static int
+find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max)
+{
+ struct ip_set_type *type;
+ bool found = false;
+
+ *min = *max = 0;
+ rcu_read_lock();
+ list_for_each_entry_rcu(type, &ip_set_type_list, list)
+ if (STREQ(type->name, name) &&
+ (type->family == family || type->family == AF_UNSPEC)) {
+ found = true;
+ if (type->revision < *min)
+ *min = type->revision;
+ else if (type->revision > *max)
+ *max = type->revision;
+ }
+ rcu_read_unlock();
+ if (found)
+ return 0;
+
+ return try_to_load_type(name);
+}
+
+#define family_name(f) ((f) == AF_INET ? "inet" : \
+ (f) == AF_INET6 ? "inet6" : "any")
+
+/* Register a set type structure. The type is identified by
+ * the unique triple of name, family and revision.
+ */
+int
+ip_set_type_register(struct ip_set_type *type)
+{
+ int ret = 0;
+
+ if (type->protocol != IPSET_PROTOCOL) {
+ pr_warning("ip_set type %s, family %s, revision %u uses "
+ "wrong protocol version %u (want %u)\n",
+ type->name, family_name(type->family),
+ type->revision, type->protocol, IPSET_PROTOCOL);
+ return -EINVAL;
+ }
+
+ ip_set_type_lock();
+ if (find_set_type(type->name, type->family, type->revision)) {
+ /* Duplicate! */
+ pr_warning("ip_set type %s, family %s, revision %u "
+ "already registered!\n", type->name,
+ family_name(type->family), type->revision);
+ ret = -EINVAL;
+ goto unlock;
+ }
+ list_add_rcu(&type->list, &ip_set_type_list);
+ pr_debug("type %s, family %s, revision %u registered.\n",
+ type->name, family_name(type->family), type->revision);
+unlock:
+ ip_set_type_unlock();
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ip_set_type_register);
+
+/* Unregister a set type. There's a small race with ip_set_create */
+void
+ip_set_type_unregister(struct ip_set_type *type)
+{
+ ip_set_type_lock();
+ if (!find_set_type(type->name, type->family, type->revision)) {
+ pr_warning("ip_set type %s, family %s, revision %u "
+ "not registered\n", type->name,
+ family_name(type->family), type->revision);
+ goto unlock;
+ }
+ list_del_rcu(&type->list);
+ pr_debug("type %s, family %s, revision %u unregistered.\n",
+ type->name, family_name(type->family), type->revision);
+unlock:
+ ip_set_type_unlock();
+
+ synchronize_rcu();
+}
+EXPORT_SYMBOL_GPL(ip_set_type_unregister);
+
+/* Utility functions */
+void *
+ip_set_alloc(size_t size)
+{
+ void *members = NULL;
+
+ if (size < KMALLOC_MAX_SIZE)
+ members = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+
+ if (members) {
+ pr_debug("%p: allocated with kmalloc\n", members);
+ return members;
+ }
+
+ members = vzalloc(size);
+ if (!members)
+ return NULL;
+ pr_debug("%p: allocated with vmalloc\n", members);
+
+ return members;
+}
+EXPORT_SYMBOL_GPL(ip_set_alloc);
+
+void
+ip_set_free(void *members)
+{
+ pr_debug("%p: free with %s\n", members,
+ is_vmalloc_addr(members) ? "vfree" : "kfree");
+ if (is_vmalloc_addr(members))
+ vfree(members);
+ else
+ kfree(members);
+}
+EXPORT_SYMBOL_GPL(ip_set_free);
+
+static inline bool
+flag_nested(const struct nlattr *nla)
+{
+ return nla->nla_type & NLA_F_NESTED;
+}
+
+static const struct nla_policy ipaddr_policy[IPSET_ATTR_IPADDR_MAX + 1] = {
+ [IPSET_ATTR_IPADDR_IPV4] = { .type = NLA_U32 },
+ [IPSET_ATTR_IPADDR_IPV6] = { .type = NLA_BINARY,
+ .len = sizeof(struct in6_addr) },
+};
+
+int
+ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr)
+{
+ struct nlattr *tb[IPSET_ATTR_IPADDR_MAX+1];
+
+ if (unlikely(!flag_nested(nla)))
+ return -IPSET_ERR_PROTOCOL;
+ if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla, ipaddr_policy))
+ return -IPSET_ERR_PROTOCOL;
+ if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV4)))
+ return -IPSET_ERR_PROTOCOL;
+
+ *ipaddr = nla_get_be32(tb[IPSET_ATTR_IPADDR_IPV4]);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ip_set_get_ipaddr4);
+
+int
+ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr)
+{
+ struct nlattr *tb[IPSET_ATTR_IPADDR_MAX+1];
+
+ if (unlikely(!flag_nested(nla)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla, ipaddr_policy))
+ return -IPSET_ERR_PROTOCOL;
+ if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV6)))
+ return -IPSET_ERR_PROTOCOL;
+
+ memcpy(ipaddr, nla_data(tb[IPSET_ATTR_IPADDR_IPV6]),
+ sizeof(struct in6_addr));
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6);
+
+/*
+ * Creating/destroying/renaming/swapping affect the existence and
+ * the properties of a set. All of these can be executed from userspace
+ * only and serialized by the nfnl mutex indirectly from nfnetlink.
+ *
+ * Sets are identified by their index in ip_set_list and the index
+ * is used by the external references (set/SET netfilter modules).
+ *
+ * The set behind an index may change by swapping only, from userspace.
+ */
+
+static inline void
+__ip_set_get(ip_set_id_t index)
+{
+ atomic_inc(&ip_set_list[index]->ref);
+}
+
+static inline void
+__ip_set_put(ip_set_id_t index)
+{
+ atomic_dec(&ip_set_list[index]->ref);
+}
+
+/*
+ * Add, del and test set entries from kernel.
+ *
+ * The set behind the index must exist and must be referenced
+ * so it can't be destroyed (or changed) under our foot.
+ */
+
+int
+ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
+ u8 family, u8 dim, u8 flags)
+{
+ struct ip_set *set = ip_set_list[index];
+ int ret = 0;
+
+ BUG_ON(set == NULL || atomic_read(&set->ref) == 0);
+ pr_debug("set %s, index %u\n", set->name, index);
+
+ if (dim < set->type->dimension ||
+ !(family == set->family || set->family == AF_UNSPEC))
+ return 0;
+
+ read_lock_bh(&set->lock);
+ ret = set->variant->kadt(set, skb, IPSET_TEST, family, dim, flags);
+ read_unlock_bh(&set->lock);
+
+ if (ret == -EAGAIN) {
+ /* Type requests element to be completed */
+ pr_debug("element must be competed, ADD is triggered\n");
+ write_lock_bh(&set->lock);
+ set->variant->kadt(set, skb, IPSET_ADD, family, dim, flags);
+ write_unlock_bh(&set->lock);
+ ret = 1;
+ }
+
+ /* Convert error codes to nomatch */
+ return (ret < 0 ? 0 : ret);
+}
+EXPORT_SYMBOL_GPL(ip_set_test);
+
+int
+ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
+ u8 family, u8 dim, u8 flags)
+{
+ struct ip_set *set = ip_set_list[index];
+ int ret;
+
+ BUG_ON(set == NULL || atomic_read(&set->ref) == 0);
+ pr_debug("set %s, index %u\n", set->name, index);
+
+ if (dim < set->type->dimension ||
+ !(family == set->family || set->family == AF_UNSPEC))
+ return 0;
+
+ write_lock_bh(&set->lock);
+ ret = set->variant->kadt(set, skb, IPSET_ADD, family, dim, flags);
+ write_unlock_bh(&set->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ip_set_add);
+
+int
+ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
+ u8 family, u8 dim, u8 flags)
+{
+ struct ip_set *set = ip_set_list[index];
+ int ret = 0;
+
+ BUG_ON(set == NULL || atomic_read(&set->ref) == 0);
+ pr_debug("set %s, index %u\n", set->name, index);
+
+ if (dim < set->type->dimension ||
+ !(family == set->family || set->family == AF_UNSPEC))
+ return 0;
+
+ write_lock_bh(&set->lock);
+ ret = set->variant->kadt(set, skb, IPSET_DEL, family, dim, flags);
+ write_unlock_bh(&set->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ip_set_del);
+
+/*
+ * Find set by name, reference it once. The reference makes sure the
+ * thing pointed to, does not go away under our feet.
+ *
+ * The nfnl mutex must already be activated.
+ */
+ip_set_id_t
+ip_set_get_byname(const char *name, struct ip_set **set)
+{
+ ip_set_id_t i, index = IPSET_INVALID_ID;
+ struct ip_set *s;
+
+ for (i = 0; i < ip_set_max; i++) {
+ s = ip_set_list[i];
+ if (s != NULL && STREQ(s->name, name)) {
+ __ip_set_get(i);
+ index = i;
+ *set = s;
+ }
+ }
+
+ return index;
+}
+EXPORT_SYMBOL_GPL(ip_set_get_byname);
+
+/*
+ * If the given set pointer points to a valid set, decrement
+ * reference count by 1. The caller shall not assume the index
+ * to be valid, after calling this function.
+ *
+ * The nfnl mutex must already be activated.
+ */
+void
+ip_set_put_byindex(ip_set_id_t index)
+{
+ if (ip_set_list[index] != NULL) {
+ BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0);
+ __ip_set_put(index);
+ }
+}
+EXPORT_SYMBOL_GPL(ip_set_put_byindex);
+
+/*
+ * Get the name of a set behind a set index.
+ * We assume the set is referenced, so it does exist and
+ * can't be destroyed. The set cannot be renamed due to
+ * the referencing either.
+ *
+ * The nfnl mutex must already be activated.
+ */
+const char *
+ip_set_name_byindex(ip_set_id_t index)
+{
+ const struct ip_set *set = ip_set_list[index];
+
+ BUG_ON(set == NULL);
+ BUG_ON(atomic_read(&set->ref) == 0);
+
+ /* Referenced, so it's safe */
+ return set->name;
+}
+EXPORT_SYMBOL_GPL(ip_set_name_byindex);
+
+/*
+ * Routines to call by external subsystems, which do not
+ * call nfnl_lock for us.
+ */
+
+/*
+ * Find set by name, reference it once. The reference makes sure the
+ * thing pointed to, does not go away under our feet.
+ *
+ * The nfnl mutex is used in the function.
+ */
+ip_set_id_t
+ip_set_nfnl_get(const char *name)
+{
+ struct ip_set *s;
+ ip_set_id_t index;
+
+ nfnl_lock();
+ index = ip_set_get_byname(name, &s);
+ nfnl_unlock();
+
+ return index;
+}
+EXPORT_SYMBOL_GPL(ip_set_nfnl_get);
+
+/*
+ * Find set by index, reference it once. The reference makes sure the
+ * thing pointed to, does not go away under our feet.
+ *
+ * The nfnl mutex is used in the function.
+ */
+ip_set_id_t
+ip_set_nfnl_get_byindex(ip_set_id_t index)
+{
+ if (index > ip_set_max)
+ return IPSET_INVALID_ID;
+
+ nfnl_lock();
+ if (ip_set_list[index])
+ __ip_set_get(index);
+ else
+ index = IPSET_INVALID_ID;
+ nfnl_unlock();
+
+ return index;
+}
+EXPORT_SYMBOL_GPL(ip_set_nfnl_get_byindex);
+
+/*
+ * If the given set pointer points to a valid set, decrement
+ * reference count by 1. The caller shall not assume the index
+ * to be valid, after calling this function.
+ *
+ * The nfnl mutex is used in the function.
+ */
+void
+ip_set_nfnl_put(ip_set_id_t index)
+{
+ nfnl_lock();
+ if (ip_set_list[index] != NULL) {
+ BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0);
+ __ip_set_put(index);
+ }
+ nfnl_unlock();
+}
+EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
+
+/*
+ * Communication protocol with userspace over netlink.
+ *
+ * We already locked by nfnl_lock.
+ */
+
+static inline bool
+protocol_failed(const struct nlattr * const tb[])
+{
+ return !tb[IPSET_ATTR_PROTOCOL] ||
+ nla_get_u8(tb[IPSET_ATTR_PROTOCOL]) != IPSET_PROTOCOL;
+}
+
+static inline u32
+flag_exist(const struct nlmsghdr *nlh)
+{
+ return nlh->nlmsg_flags & NLM_F_EXCL ? 0 : IPSET_FLAG_EXIST;
+}
+
+static struct nlmsghdr *
+start_msg(struct sk_buff *skb, u32 pid, u32 seq, unsigned int flags,
+ enum ipset_cmd cmd)
+{
+ struct nlmsghdr *nlh;
+ struct nfgenmsg *nfmsg;
+
+ nlh = nlmsg_put(skb, pid, seq, cmd | (NFNL_SUBSYS_IPSET << 8),
+ sizeof(*nfmsg), flags);
+ if (nlh == NULL)
+ return NULL;
+
+ nfmsg = nlmsg_data(nlh);
+ nfmsg->nfgen_family = AF_INET;
+ nfmsg->version = NFNETLINK_V0;
+ nfmsg->res_id = 0;
+
+ return nlh;
+}
+
+/* Create a set */
+
+static const struct nla_policy ip_set_create_policy[IPSET_ATTR_CMD_MAX + 1] = {
+ [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
+ [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAXNAMELEN - 1 },
+ [IPSET_ATTR_TYPENAME] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAXNAMELEN - 1},
+ [IPSET_ATTR_REVISION] = { .type = NLA_U8 },
+ [IPSET_ATTR_FAMILY] = { .type = NLA_U8 },
+ [IPSET_ATTR_DATA] = { .type = NLA_NESTED },
+};
+
+static ip_set_id_t
+find_set_id(const char *name)
+{
+ ip_set_id_t i, index = IPSET_INVALID_ID;
+ const struct ip_set *set;
+
+ for (i = 0; index == IPSET_INVALID_ID && i < ip_set_max; i++) {
+ set = ip_set_list[i];
+ if (set != NULL && STREQ(set->name, name))
+ index = i;
+ }
+ return index;
+}
+
+static inline struct ip_set *
+find_set(const char *name)
+{
+ ip_set_id_t index = find_set_id(name);
+
+ return index == IPSET_INVALID_ID ? NULL : ip_set_list[index];
+}
+
+static int
+find_free_id(const char *name, ip_set_id_t *index, struct ip_set **set)
+{
+ ip_set_id_t i;
+
+ *index = IPSET_INVALID_ID;
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] == NULL) {
+ if (*index == IPSET_INVALID_ID)
+ *index = i;
+ } else if (STREQ(name, ip_set_list[i]->name)) {
+ /* Name clash */
+ *set = ip_set_list[i];
+ return -EEXIST;
+ }
+ }
+ if (*index == IPSET_INVALID_ID)
+ /* No free slot remained */
+ return -IPSET_ERR_MAX_SETS;
+ return 0;
+}
+
+static int
+ip_set_create(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ struct ip_set *set, *clash;
+ ip_set_id_t index = IPSET_INVALID_ID;
+ struct nlattr *tb[IPSET_ATTR_CREATE_MAX+1] = {};
+ const char *name, *typename;
+ u8 family, revision;
+ u32 flags = flag_exist(nlh);
+ int ret = 0;
+
+ if (unlikely(protocol_failed(attr) ||
+ attr[IPSET_ATTR_SETNAME] == NULL ||
+ attr[IPSET_ATTR_TYPENAME] == NULL ||
+ attr[IPSET_ATTR_REVISION] == NULL ||
+ attr[IPSET_ATTR_FAMILY] == NULL ||
+ (attr[IPSET_ATTR_DATA] != NULL &&
+ !flag_nested(attr[IPSET_ATTR_DATA]))))
+ return -IPSET_ERR_PROTOCOL;
+
+ name = nla_data(attr[IPSET_ATTR_SETNAME]);
+ typename = nla_data(attr[IPSET_ATTR_TYPENAME]);
+ family = nla_get_u8(attr[IPSET_ATTR_FAMILY]);
+ revision = nla_get_u8(attr[IPSET_ATTR_REVISION]);
+ pr_debug("setname: %s, typename: %s, family: %s, revision: %u\n",
+ name, typename, family_name(family), revision);
+
+ /*
+ * First, and without any locks, allocate and initialize
+ * a normal base set structure.
+ */
+ set = kzalloc(sizeof(struct ip_set), GFP_KERNEL);
+ if (!set)
+ return -ENOMEM;
+ rwlock_init(&set->lock);
+ strlcpy(set->name, name, IPSET_MAXNAMELEN);
+ atomic_set(&set->ref, 0);
+ set->family = family;
+
+ /*
+ * Next, check that we know the type, and take
+ * a reference on the type, to make sure it stays available
+ * while constructing our new set.
+ *
+ * After referencing the type, we try to create the type
+ * specific part of the set without holding any locks.
+ */
+ ret = find_set_type_get(typename, family, revision, &(set->type));
+ if (ret)
+ goto out;
+
+ /*
+ * Without holding any locks, create private part.
+ */
+ if (attr[IPSET_ATTR_DATA] &&
+ nla_parse_nested(tb, IPSET_ATTR_CREATE_MAX, attr[IPSET_ATTR_DATA],
+ set->type->create_policy)) {
+ ret = -IPSET_ERR_PROTOCOL;
+ goto put_out;
+ }
+
+ ret = set->type->create(set, tb, flags);
+ if (ret != 0)
+ goto put_out;
+
+ /* BTW, ret==0 here. */
+
+ /*
+ * Here, we have a valid, constructed set and we are protected
+ * by nfnl_lock. Find the first free index in ip_set_list and
+ * check clashing.
+ */
+ if ((ret = find_free_id(set->name, &index, &clash)) != 0) {
+ /* If this is the same set and requested, ignore error */
+ if (ret == -EEXIST &&
+ (flags & IPSET_FLAG_EXIST) &&
+ STREQ(set->type->name, clash->type->name) &&
+ set->type->family == clash->type->family &&
+ set->type->revision == clash->type->revision &&
+ set->variant->same_set(set, clash))
+ ret = 0;
+ goto cleanup;
+ }
+
+ /*
+ * Finally! Add our shiny new set to the list, and be done.
+ */
+ pr_debug("create: '%s' created with index %u!\n", set->name, index);
+ ip_set_list[index] = set;
+
+ return ret;
+
+cleanup:
+ set->variant->destroy(set);
+put_out:
+ module_put(set->type->me);
+out:
+ kfree(set);
+ return ret;
+}
+
+/* Destroy sets */
+
+static const struct nla_policy
+ip_set_setname_policy[IPSET_ATTR_CMD_MAX + 1] = {
+ [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
+ [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAXNAMELEN - 1 },
+};
+
+static void
+ip_set_destroy_set(ip_set_id_t index)
+{
+ struct ip_set *set = ip_set_list[index];
+
+ pr_debug("set: %s\n", set->name);
+ ip_set_list[index] = NULL;
+
+ /* Must call it without holding any lock */
+ set->variant->destroy(set);
+ module_put(set->type->me);
+ kfree(set);
+}
+
+static int
+ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ ip_set_id_t i;
+
+ if (unlikely(protocol_failed(attr)))
+ return -IPSET_ERR_PROTOCOL;
+
+ /* References are protected by the nfnl mutex */
+ if (!attr[IPSET_ATTR_SETNAME]) {
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL &&
+ (atomic_read(&ip_set_list[i]->ref)))
+ return -IPSET_ERR_BUSY;
+ }
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL)
+ ip_set_destroy_set(i);
+ }
+ } else {
+ i = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
+ if (i == IPSET_INVALID_ID)
+ return -ENOENT;
+ else if (atomic_read(&ip_set_list[i]->ref))
+ return -IPSET_ERR_BUSY;
+
+ ip_set_destroy_set(i);
+ }
+ return 0;
+}
+
+/* Flush sets */
+
+static void
+ip_set_flush_set(struct ip_set *set)
+{
+ pr_debug("set: %s\n", set->name);
+
+ write_lock_bh(&set->lock);
+ set->variant->flush(set);
+ write_unlock_bh(&set->lock);
+}
+
+static int
+ip_set_flush(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ ip_set_id_t i;
+
+ if (unlikely(protocol_failed(attr)))
+ return -EPROTO;
+
+ if (!attr[IPSET_ATTR_SETNAME]) {
+ for (i = 0; i < ip_set_max; i++)
+ if (ip_set_list[i] != NULL)
+ ip_set_flush_set(ip_set_list[i]);
+ } else {
+ i = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
+ if (i == IPSET_INVALID_ID)
+ return -ENOENT;
+
+ ip_set_flush_set(ip_set_list[i]);
+ }
+
+ return 0;
+}
+
+/* Rename a set */
+
+static const struct nla_policy
+ip_set_setname2_policy[IPSET_ATTR_CMD_MAX + 1] = {
+ [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
+ [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAXNAMELEN - 1 },
+ [IPSET_ATTR_SETNAME2] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAXNAMELEN - 1 },
+};
+
+static int
+ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ struct ip_set *set;
+ const char *name2;
+ ip_set_id_t i;
+
+ if (unlikely(protocol_failed(attr) ||
+ attr[IPSET_ATTR_SETNAME] == NULL ||
+ attr[IPSET_ATTR_SETNAME2] == NULL))
+ return -IPSET_ERR_PROTOCOL;
+
+ set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+ if (set == NULL)
+ return -ENOENT;
+ if (atomic_read(&set->ref) != 0)
+ return -IPSET_ERR_REFERENCED;
+
+ name2 = nla_data(attr[IPSET_ATTR_SETNAME2]);
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL &&
+ STREQ(ip_set_list[i]->name, name2))
+ return -IPSET_ERR_EXIST_SETNAME2;
+ }
+ strncpy(set->name, name2, IPSET_MAXNAMELEN);
+
+ return 0;
+}
+
+/* Swap two sets so that name/index points to the other.
+ * References and set names are also swapped.
+ *
+ * We are protected by the nfnl mutex and references are
+ * manipulated only by holding the mutex. The kernel interfaces
+ * do not hold the mutex but the pointer settings are atomic
+ * so the ip_set_list always contains valid pointers to the sets.
+ */
+
+static int
+ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ struct ip_set *from, *to;
+ ip_set_id_t from_id, to_id;
+ char from_name[IPSET_MAXNAMELEN];
+ u32 from_ref;
+
+ if (unlikely(protocol_failed(attr) ||
+ attr[IPSET_ATTR_SETNAME] == NULL ||
+ attr[IPSET_ATTR_SETNAME2] == NULL))
+ return -IPSET_ERR_PROTOCOL;
+
+ from_id = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
+ if (from_id == IPSET_INVALID_ID)
+ return -ENOENT;
+
+ to_id = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME2]));
+ if (to_id == IPSET_INVALID_ID)
+ return -IPSET_ERR_EXIST_SETNAME2;
+
+ from = ip_set_list[from_id];
+ to = ip_set_list[to_id];
+
+ /* Features must not change.
+ * Not an artifical restriction anymore, as we must prevent
+ * possible loops created by swapping in setlist type of sets. */
+ if (!(from->type->features == to->type->features &&
+ from->type->family == to->type->family))
+ return -IPSET_ERR_TYPE_MISMATCH;
+
+ /* No magic here: ref munging protected by the nfnl_lock */
+ strncpy(from_name, from->name, IPSET_MAXNAMELEN);
+ from_ref = atomic_read(&from->ref);
+
+ strncpy(from->name, to->name, IPSET_MAXNAMELEN);
+ atomic_set(&from->ref, atomic_read(&to->ref));
+ strncpy(to->name, from_name, IPSET_MAXNAMELEN);
+ atomic_set(&to->ref, from_ref);
+
+ ip_set_list[from_id] = to;
+ ip_set_list[to_id] = from;
+
+ return 0;
+}
+
+/* List/save set data */
+
+#define DUMP_INIT 0L
+#define DUMP_ALL 1L
+#define DUMP_ONE 2L
+#define DUMP_LAST 3L
+
+static int
+ip_set_dump_done(struct netlink_callback *cb)
+{
+ if (cb->args[2]) {
+ pr_debug("release set %s\n", ip_set_list[cb->args[1]]->name);
+ __ip_set_put((ip_set_id_t) cb->args[1]);
+ }
+ return 0;
+}
+
+static inline void
+dump_attrs(struct nlmsghdr *nlh)
+{
+ const struct nlattr *attr;
+ int rem;
+
+ pr_debug("dump nlmsg\n");
+ nlmsg_for_each_attr(attr, nlh, sizeof(struct nfgenmsg), rem) {
+ pr_debug("type: %u, len %u\n", nla_type(attr), attr->nla_len);
+ }
+}
+
+static int
+dump_init(struct netlink_callback *cb)
+{
+ struct nlmsghdr *nlh = nlmsg_hdr(cb->skb);
+ int min_len = NLMSG_SPACE(sizeof(struct nfgenmsg));
+ struct nlattr *cda[IPSET_ATTR_CMD_MAX+1];
+ struct nlattr *attr = (void *)nlh + min_len;
+ ip_set_id_t index;
+
+ /* Second pass, so parser can't fail */
+ nla_parse(cda, IPSET_ATTR_CMD_MAX,
+ attr, nlh->nlmsg_len - min_len, ip_set_setname_policy);
+
+ /* cb->args[0] : dump single set/all sets
+ * [1] : set index
+ * [..]: type specific
+ */
+
+ if (!cda[IPSET_ATTR_SETNAME]) {
+ cb->args[0] = DUMP_ALL;
+ return 0;
+ }
+
+ index = find_set_id(nla_data(cda[IPSET_ATTR_SETNAME]));
+ if (index == IPSET_INVALID_ID)
+ return -ENOENT;
+
+ cb->args[0] = DUMP_ONE;
+ cb->args[1] = index;
+ return 0;
+}
+
+static int
+ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ ip_set_id_t index = IPSET_INVALID_ID, max;
+ struct ip_set *set = NULL;
+ struct nlmsghdr *nlh = NULL;
+ unsigned int flags = NETLINK_CB(cb->skb).pid ? NLM_F_MULTI : 0;
+ int ret = 0;
+
+ if (cb->args[0] == DUMP_INIT) {
+ ret = dump_init(cb);
+ if (ret < 0) {
+ nlh = nlmsg_hdr(cb->skb);
+ /* We have to create and send the error message
+ * manually :-( */
+ if (nlh->nlmsg_flags & NLM_F_ACK)
+ netlink_ack(cb->skb, nlh, ret);
+ return ret;
+ }
+ }
+
+ if (cb->args[1] >= ip_set_max)
+ goto out;
+
+ pr_debug("args[0]: %ld args[1]: %ld\n", cb->args[0], cb->args[1]);
+ max = cb->args[0] == DUMP_ONE ? cb->args[1] + 1 : ip_set_max;
+ for (; cb->args[1] < max; cb->args[1]++) {
+ index = (ip_set_id_t) cb->args[1];
+ set = ip_set_list[index];
+ if (set == NULL) {
+ if (cb->args[0] == DUMP_ONE) {
+ ret = -ENOENT;
+ goto out;
+ }
+ continue;
+ }
+ /* When dumping all sets, we must dump "sorted"
+ * so that lists (unions of sets) are dumped last.
+ */
+ if (cb->args[0] != DUMP_ONE &&
+ !((cb->args[0] == DUMP_ALL) ^
+ (set->type->features & IPSET_DUMP_LAST)))
+ continue;
+ pr_debug("List set: %s\n", set->name);
+ if (!cb->args[2]) {
+ /* Start listing: make sure set won't be destroyed */
+ pr_debug("reference set\n");
+ __ip_set_get(index);
+ }
+ nlh = start_msg(skb, NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq, flags,
+ IPSET_CMD_LIST);
+ if (!nlh) {
+ ret = -EMSGSIZE;
+ goto release_refcount;
+ }
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
+ NLA_PUT_STRING(skb, IPSET_ATTR_SETNAME, set->name);
+ switch (cb->args[2]) {
+ case 0:
+ /* Core header data */
+ NLA_PUT_STRING(skb, IPSET_ATTR_TYPENAME,
+ set->type->name);
+ NLA_PUT_U8(skb, IPSET_ATTR_FAMILY,
+ set->family);
+ NLA_PUT_U8(skb, IPSET_ATTR_REVISION,
+ set->type->revision);
+ ret = set->variant->head(set, skb);
+ if (ret < 0)
+ goto release_refcount;
+ /* Fall through and add elements */
+ default:
+ read_lock_bh(&set->lock);
+ ret = set->variant->list(set, skb, cb);
+ read_unlock_bh(&set->lock);
+ if (!cb->args[2]) {
+ /* Set is done, proceed with next one */
+ if (cb->args[0] == DUMP_ONE)
+ cb->args[1] = IPSET_INVALID_ID;
+ else
+ cb->args[1]++;
+ }
+ goto release_refcount;
+ }
+ }
+ goto out;
+
+nla_put_failure:
+ ret = -EFAULT;
+release_refcount:
+ /* If there was an error or set is done, release set */
+ if (ret || !cb->args[2]) {
+ pr_debug("release set %s\n", ip_set_list[index]->name);
+ __ip_set_put(index);
+ }
+
+ /* If we dump all sets, continue with dumping last ones */
+ if (cb->args[0] == DUMP_ALL && cb->args[1] >= max && !cb->args[2])
+ cb->args[0] = DUMP_LAST;
+
+out:
+ if (nlh) {
+ nlmsg_end(skb, nlh);
+ pr_debug("nlmsg_len: %u\n", nlh->nlmsg_len);
+ dump_attrs(nlh);
+ }
+
+ return ret < 0 ? ret : skb->len;
+}
+
+static int
+ip_set_dump(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ if (unlikely(protocol_failed(attr)))
+ return -IPSET_ERR_PROTOCOL;
+
+ return netlink_dump_start(ctnl, skb, nlh,
+ ip_set_dump_start,
+ ip_set_dump_done);
+}
+
+/* Add, del and test */
+
+static const struct nla_policy ip_set_adt_policy[IPSET_ATTR_CMD_MAX + 1] = {
+ [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
+ [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAXNAMELEN - 1 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ [IPSET_ATTR_DATA] = { .type = NLA_NESTED },
+ [IPSET_ATTR_ADT] = { .type = NLA_NESTED },
+};
+
+static int
+call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
+ struct nlattr *tb[], enum ipset_adt adt,
+ u32 flags, bool use_lineno)
+{
+ int ret, retried = 0;
+ u32 lineno = 0;
+ bool eexist = flags & IPSET_FLAG_EXIST;
+
+ do {
+ write_lock_bh(&set->lock);
+ ret = set->variant->uadt(set, tb, adt, &lineno, flags);
+ write_unlock_bh(&set->lock);
+ } while (ret == -EAGAIN &&
+ set->variant->resize &&
+ (ret = set->variant->resize(set, retried++)) == 0);
+
+ if (!ret || (ret == -IPSET_ERR_EXIST && eexist))
+ return 0;
+ if (lineno && use_lineno) {
+ /* Error in restore/batch mode: send back lineno */
+ struct nlmsghdr *rep, *nlh = nlmsg_hdr(skb);
+ struct sk_buff *skb2;
+ struct nlmsgerr *errmsg;
+ size_t payload = sizeof(*errmsg) + nlmsg_len(nlh);
+ int min_len = NLMSG_SPACE(sizeof(struct nfgenmsg));
+ struct nlattr *cda[IPSET_ATTR_CMD_MAX+1];
+ struct nlattr *cmdattr;
+ u32 *errline;
+
+ skb2 = nlmsg_new(payload, GFP_KERNEL);
+ if (skb2 == NULL)
+ return -ENOMEM;
+ rep = __nlmsg_put(skb2, NETLINK_CB(skb).pid,
+ nlh->nlmsg_seq, NLMSG_ERROR, payload, 0);
+ errmsg = nlmsg_data(rep);
+ errmsg->error = ret;
+ memcpy(&errmsg->msg, nlh, nlh->nlmsg_len);
+ cmdattr = (void *)&errmsg->msg + min_len;
+
+ nla_parse(cda, IPSET_ATTR_CMD_MAX,
+ cmdattr, nlh->nlmsg_len - min_len,
+ ip_set_adt_policy);
+
+ errline = nla_data(cda[IPSET_ATTR_LINENO]);
+
+ *errline = lineno;
+
+ netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+ /* Signal netlink not to send its ACK/errmsg. */
+ return -EINTR;
+ }
+
+ return ret;
+}
+
+static int
+ip_set_uadd(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ struct ip_set *set;
+ struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
+ const struct nlattr *nla;
+ u32 flags = flag_exist(nlh);
+ bool use_lineno;
+ int ret = 0;
+
+ if (unlikely(protocol_failed(attr) ||
+ attr[IPSET_ATTR_SETNAME] == NULL ||
+ !((attr[IPSET_ATTR_DATA] != NULL) ^
+ (attr[IPSET_ATTR_ADT] != NULL)) ||
+ (attr[IPSET_ATTR_DATA] != NULL &&
+ !flag_nested(attr[IPSET_ATTR_DATA])) ||
+ (attr[IPSET_ATTR_ADT] != NULL &&
+ (!flag_nested(attr[IPSET_ATTR_ADT]) ||
+ attr[IPSET_ATTR_LINENO] == NULL))))
+ return -IPSET_ERR_PROTOCOL;
+
+ set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+ if (set == NULL)
+ return -ENOENT;
+
+ use_lineno = !!attr[IPSET_ATTR_LINENO];
+ if (attr[IPSET_ATTR_DATA]) {
+ if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX,
+ attr[IPSET_ATTR_DATA],
+ set->type->adt_policy))
+ return -IPSET_ERR_PROTOCOL;
+ ret = call_ad(ctnl, skb, set, tb, IPSET_ADD, flags,
+ use_lineno);
+ } else {
+ int nla_rem;
+
+ nla_for_each_nested(nla, attr[IPSET_ATTR_ADT], nla_rem) {
+ memset(tb, 0, sizeof(tb));
+ if (nla_type(nla) != IPSET_ATTR_DATA ||
+ !flag_nested(nla) ||
+ nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, nla,
+ set->type->adt_policy))
+ return -IPSET_ERR_PROTOCOL;
+ ret = call_ad(ctnl, skb, set, tb, IPSET_ADD,
+ flags, use_lineno);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ return ret;
+}
+
+static int
+ip_set_udel(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ struct ip_set *set;
+ struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
+ const struct nlattr *nla;
+ u32 flags = flag_exist(nlh);
+ bool use_lineno;
+ int ret = 0;
+
+ if (unlikely(protocol_failed(attr) ||
+ attr[IPSET_ATTR_SETNAME] == NULL ||
+ !((attr[IPSET_ATTR_DATA] != NULL) ^
+ (attr[IPSET_ATTR_ADT] != NULL)) ||
+ (attr[IPSET_ATTR_DATA] != NULL &&
+ !flag_nested(attr[IPSET_ATTR_DATA])) ||
+ (attr[IPSET_ATTR_ADT] != NULL &&
+ (!flag_nested(attr[IPSET_ATTR_ADT]) ||
+ attr[IPSET_ATTR_LINENO] == NULL))))
+ return -IPSET_ERR_PROTOCOL;
+
+ set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+ if (set == NULL)
+ return -ENOENT;
+
+ use_lineno = !!attr[IPSET_ATTR_LINENO];
+ if (attr[IPSET_ATTR_DATA]) {
+ if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX,
+ attr[IPSET_ATTR_DATA],
+ set->type->adt_policy))
+ return -IPSET_ERR_PROTOCOL;
+ ret = call_ad(ctnl, skb, set, tb, IPSET_DEL, flags,
+ use_lineno);
+ } else {
+ int nla_rem;
+
+ nla_for_each_nested(nla, attr[IPSET_ATTR_ADT], nla_rem) {
+ memset(tb, 0, sizeof(*tb));
+ if (nla_type(nla) != IPSET_ATTR_DATA ||
+ !flag_nested(nla) ||
+ nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, nla,
+ set->type->adt_policy))
+ return -IPSET_ERR_PROTOCOL;
+ ret = call_ad(ctnl, skb, set, tb, IPSET_DEL,
+ flags, use_lineno);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ return ret;
+}
+
+static int
+ip_set_utest(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ struct ip_set *set;
+ struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
+ int ret = 0;
+
+ if (unlikely(protocol_failed(attr) ||
+ attr[IPSET_ATTR_SETNAME] == NULL ||
+ attr[IPSET_ATTR_DATA] == NULL ||
+ !flag_nested(attr[IPSET_ATTR_DATA])))
+ return -IPSET_ERR_PROTOCOL;
+
+ set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+ if (set == NULL)
+ return -ENOENT;
+
+ if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA],
+ set->type->adt_policy))
+ return -IPSET_ERR_PROTOCOL;
+
+ read_lock_bh(&set->lock);
+ ret = set->variant->uadt(set, tb, IPSET_TEST, NULL, 0);
+ read_unlock_bh(&set->lock);
+ /* Userspace can't trigger element to be re-added */
+ if (ret == -EAGAIN)
+ ret = 1;
+
+ return ret < 0 ? ret : ret > 0 ? 0 : -IPSET_ERR_EXIST;
+}
+
+/* Get headed data of a set */
+
+static int
+ip_set_header(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ const struct ip_set *set;
+ struct sk_buff *skb2;
+ struct nlmsghdr *nlh2;
+ ip_set_id_t index;
+ int ret = 0;
+
+ if (unlikely(protocol_failed(attr) ||
+ attr[IPSET_ATTR_SETNAME] == NULL))
+ return -IPSET_ERR_PROTOCOL;
+
+ index = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
+ if (index == IPSET_INVALID_ID)
+ return -ENOENT;
+ set = ip_set_list[index];
+
+ skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (skb2 == NULL)
+ return -ENOMEM;
+
+ nlh2 = start_msg(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0,
+ IPSET_CMD_HEADER);
+ if (!nlh2)
+ goto nlmsg_failure;
+ NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
+ NLA_PUT_STRING(skb2, IPSET_ATTR_SETNAME, set->name);
+ NLA_PUT_STRING(skb2, IPSET_ATTR_TYPENAME, set->type->name);
+ NLA_PUT_U8(skb2, IPSET_ATTR_FAMILY, set->family);
+ NLA_PUT_U8(skb2, IPSET_ATTR_REVISION, set->type->revision);
+ nlmsg_end(skb2, nlh2);
+
+ ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+
+nla_put_failure:
+ nlmsg_cancel(skb2, nlh2);
+nlmsg_failure:
+ kfree_skb(skb2);
+ return -EMSGSIZE;
+}
+
+/* Get type data */
+
+static const struct nla_policy ip_set_type_policy[IPSET_ATTR_CMD_MAX + 1] = {
+ [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
+ [IPSET_ATTR_TYPENAME] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAXNAMELEN - 1 },
+ [IPSET_ATTR_FAMILY] = { .type = NLA_U8 },
+};
+
+static int
+ip_set_type(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ struct sk_buff *skb2;
+ struct nlmsghdr *nlh2;
+ u8 family, min, max;
+ const char *typename;
+ int ret = 0;
+
+ if (unlikely(protocol_failed(attr) ||
+ attr[IPSET_ATTR_TYPENAME] == NULL ||
+ attr[IPSET_ATTR_FAMILY] == NULL))
+ return -IPSET_ERR_PROTOCOL;
+
+ family = nla_get_u8(attr[IPSET_ATTR_FAMILY]);
+ typename = nla_data(attr[IPSET_ATTR_TYPENAME]);
+ ret = find_set_type_minmax(typename, family, &min, &max);
+ if (ret)
+ return ret;
+
+ skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (skb2 == NULL)
+ return -ENOMEM;
+
+ nlh2 = start_msg(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0,
+ IPSET_CMD_TYPE);
+ if (!nlh2)
+ goto nlmsg_failure;
+ NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
+ NLA_PUT_STRING(skb2, IPSET_ATTR_TYPENAME, typename);
+ NLA_PUT_U8(skb2, IPSET_ATTR_FAMILY, family);
+ NLA_PUT_U8(skb2, IPSET_ATTR_REVISION, max);
+ NLA_PUT_U8(skb2, IPSET_ATTR_REVISION_MIN, min);
+ nlmsg_end(skb2, nlh2);
+
+ pr_debug("Send TYPE, nlmsg_len: %u\n", nlh2->nlmsg_len);
+ ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+
+nla_put_failure:
+ nlmsg_cancel(skb2, nlh2);
+nlmsg_failure:
+ kfree_skb(skb2);
+ return -EMSGSIZE;
+}
+
+/* Get protocol version */
+
+static const struct nla_policy
+ip_set_protocol_policy[IPSET_ATTR_CMD_MAX + 1] = {
+ [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
+};
+
+static int
+ip_set_protocol(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ struct sk_buff *skb2;
+ struct nlmsghdr *nlh2;
+ int ret = 0;
+
+ if (unlikely(attr[IPSET_ATTR_PROTOCOL] == NULL))
+ return -IPSET_ERR_PROTOCOL;
+
+ skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (skb2 == NULL)
+ return -ENOMEM;
+
+ nlh2 = start_msg(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0,
+ IPSET_CMD_PROTOCOL);
+ if (!nlh2)
+ goto nlmsg_failure;
+ NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
+ nlmsg_end(skb2, nlh2);
+
+ ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+
+nla_put_failure:
+ nlmsg_cancel(skb2, nlh2);
+nlmsg_failure:
+ kfree_skb(skb2);
+ return -EMSGSIZE;
+}
+
+static const struct nfnl_callback ip_set_netlink_subsys_cb[IPSET_MSG_MAX] = {
+ [IPSET_CMD_CREATE] = {
+ .call = ip_set_create,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_create_policy,
+ },
+ [IPSET_CMD_DESTROY] = {
+ .call = ip_set_destroy,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_setname_policy,
+ },
+ [IPSET_CMD_FLUSH] = {
+ .call = ip_set_flush,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_setname_policy,
+ },
+ [IPSET_CMD_RENAME] = {
+ .call = ip_set_rename,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_setname2_policy,
+ },
+ [IPSET_CMD_SWAP] = {
+ .call = ip_set_swap,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_setname2_policy,
+ },
+ [IPSET_CMD_LIST] = {
+ .call = ip_set_dump,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_setname_policy,
+ },
+ [IPSET_CMD_SAVE] = {
+ .call = ip_set_dump,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_setname_policy,
+ },
+ [IPSET_CMD_ADD] = {
+ .call = ip_set_uadd,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_adt_policy,
+ },
+ [IPSET_CMD_DEL] = {
+ .call = ip_set_udel,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_adt_policy,
+ },
+ [IPSET_CMD_TEST] = {
+ .call = ip_set_utest,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_adt_policy,
+ },
+ [IPSET_CMD_HEADER] = {
+ .call = ip_set_header,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_setname_policy,
+ },
+ [IPSET_CMD_TYPE] = {
+ .call = ip_set_type,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_type_policy,
+ },
+ [IPSET_CMD_PROTOCOL] = {
+ .call = ip_set_protocol,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_protocol_policy,
+ },
+};
+
+static struct nfnetlink_subsystem ip_set_netlink_subsys __read_mostly = {
+ .name = "ip_set",
+ .subsys_id = NFNL_SUBSYS_IPSET,
+ .cb_count = IPSET_MSG_MAX,
+ .cb = ip_set_netlink_subsys_cb,
+};
+
+/* Interface to iptables/ip6tables */
+
+static int
+ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
+{
+ unsigned *op;
+ void *data;
+ int copylen = *len, ret = 0;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (optval != SO_IP_SET)
+ return -EBADF;
+ if (*len < sizeof(unsigned))
+ return -EINVAL;
+
+ data = vmalloc(*len);
+ if (!data)
+ return -ENOMEM;
+ if (copy_from_user(data, user, *len) != 0) {
+ ret = -EFAULT;
+ goto done;
+ }
+ op = (unsigned *) data;
+
+ if (*op < IP_SET_OP_VERSION) {
+ /* Check the version at the beginning of operations */
+ struct ip_set_req_version *req_version = data;
+ if (req_version->version != IPSET_PROTOCOL) {
+ ret = -EPROTO;
+ goto done;
+ }
+ }
+
+ switch (*op) {
+ case IP_SET_OP_VERSION: {
+ struct ip_set_req_version *req_version = data;
+
+ if (*len != sizeof(struct ip_set_req_version)) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ req_version->version = IPSET_PROTOCOL;
+ ret = copy_to_user(user, req_version,
+ sizeof(struct ip_set_req_version));
+ goto done;
+ }
+ case IP_SET_OP_GET_BYNAME: {
+ struct ip_set_req_get_set *req_get = data;
+
+ if (*len != sizeof(struct ip_set_req_get_set)) {
+ ret = -EINVAL;
+ goto done;
+ }
+ req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0';
+ nfnl_lock();
+ req_get->set.index = find_set_id(req_get->set.name);
+ nfnl_unlock();
+ goto copy;
+ }
+ case IP_SET_OP_GET_BYINDEX: {
+ struct ip_set_req_get_set *req_get = data;
+
+ if (*len != sizeof(struct ip_set_req_get_set) ||
+ req_get->set.index >= ip_set_max) {
+ ret = -EINVAL;
+ goto done;
+ }
+ nfnl_lock();
+ strncpy(req_get->set.name,
+ ip_set_list[req_get->set.index]
+ ? ip_set_list[req_get->set.index]->name : "",
+ IPSET_MAXNAMELEN);
+ nfnl_unlock();
+ goto copy;
+ }
+ default:
+ ret = -EBADMSG;
+ goto done;
+ } /* end of switch(op) */
+
+copy:
+ ret = copy_to_user(user, data, copylen);
+
+done:
+ vfree(data);
+ if (ret > 0)
+ ret = 0;
+ return ret;
+}
+
+static struct nf_sockopt_ops so_set __read_mostly = {
+ .pf = PF_INET,
+ .get_optmin = SO_IP_SET,
+ .get_optmax = SO_IP_SET + 1,
+ .get = &ip_set_sockfn_get,
+ .owner = THIS_MODULE,
+};
+
+static int __init
+ip_set_init(void)
+{
+ int ret;
+
+ if (max_sets)
+ ip_set_max = max_sets;
+ if (ip_set_max >= IPSET_INVALID_ID)
+ ip_set_max = IPSET_INVALID_ID - 1;
+
+ ip_set_list = kzalloc(sizeof(struct ip_set *) * ip_set_max,
+ GFP_KERNEL);
+ if (!ip_set_list) {
+ pr_err("ip_set: Unable to create ip_set_list\n");
+ return -ENOMEM;
+ }
+
+ ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
+ if (ret != 0) {
+ pr_err("ip_set: cannot register with nfnetlink.\n");
+ kfree(ip_set_list);
+ return ret;
+ }
+ ret = nf_register_sockopt(&so_set);
+ if (ret != 0) {
+ pr_err("SO_SET registry failed: %d\n", ret);
+ nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
+ kfree(ip_set_list);
+ return ret;
+ }
+
+ pr_notice("ip_set: protocol %u\n", IPSET_PROTOCOL);
+ return 0;
+}
+
+static void __exit
+ip_set_fini(void)
+{
+ /* There can't be any existing set */
+ nf_unregister_sockopt(&so_set);
+ nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
+ kfree(ip_set_list);
+ pr_debug("these are the famous last words\n");
+}
+
+module_init(ip_set_init);
+module_exit(ip_set_fini);
diff --git a/net/netfilter/ipset/ip_set_getport.c b/net/netfilter/ipset/ip_set_getport.c
new file mode 100644
index 000000000000..8d5227212686
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_getport.c
@@ -0,0 +1,141 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Get Layer-4 data from the packets */
+
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include <linux/netfilter/ipset/ip_set_getport.h>
+
+/* We must handle non-linear skbs */
+static bool
+get_port(const struct sk_buff *skb, int protocol, unsigned int protooff,
+ bool src, __be16 *port, u8 *proto)
+{
+ switch (protocol) {
+ case IPPROTO_TCP: {
+ struct tcphdr _tcph;
+ const struct tcphdr *th;
+
+ th = skb_header_pointer(skb, protooff, sizeof(_tcph), &_tcph);
+ if (th == NULL)
+ /* No choice either */
+ return false;
+
+ *port = src ? th->source : th->dest;
+ break;
+ }
+ case IPPROTO_UDP: {
+ struct udphdr _udph;
+ const struct udphdr *uh;
+
+ uh = skb_header_pointer(skb, protooff, sizeof(_udph), &_udph);
+ if (uh == NULL)
+ /* No choice either */
+ return false;
+
+ *port = src ? uh->source : uh->dest;
+ break;
+ }
+ case IPPROTO_ICMP: {
+ struct icmphdr _ich;
+ const struct icmphdr *ic;
+
+ ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich);
+ if (ic == NULL)
+ return false;
+
+ *port = (__force __be16)htons((ic->type << 8) | ic->code);
+ break;
+ }
+ case IPPROTO_ICMPV6: {
+ struct icmp6hdr _ich;
+ const struct icmp6hdr *ic;
+
+ ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich);
+ if (ic == NULL)
+ return false;
+
+ *port = (__force __be16)
+ htons((ic->icmp6_type << 8) | ic->icmp6_code);
+ break;
+ }
+ default:
+ break;
+ }
+ *proto = protocol;
+
+ return true;
+}
+
+bool
+ip_set_get_ip4_port(const struct sk_buff *skb, bool src,
+ __be16 *port, u8 *proto)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ unsigned int protooff = ip_hdrlen(skb);
+ int protocol = iph->protocol;
+
+ /* See comments at tcp_match in ip_tables.c */
+ if (protocol <= 0 || (ntohs(iph->frag_off) & IP_OFFSET))
+ return false;
+
+ return get_port(skb, protocol, protooff, src, port, proto);
+}
+EXPORT_SYMBOL_GPL(ip_set_get_ip4_port);
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+bool
+ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
+ __be16 *port, u8 *proto)
+{
+ int protoff;
+ u8 nexthdr;
+
+ nexthdr = ipv6_hdr(skb)->nexthdr;
+ protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
+ if (protoff < 0)
+ return false;
+
+ return get_port(skb, nexthdr, protoff, src, port, proto);
+}
+EXPORT_SYMBOL_GPL(ip_set_get_ip6_port);
+#endif
+
+bool
+ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src, __be16 *port)
+{
+ bool ret;
+ u8 proto;
+
+ switch (pf) {
+ case AF_INET:
+ ret = ip_set_get_ip4_port(skb, src, port, &proto);
+ break;
+ case AF_INET6:
+ ret = ip_set_get_ip6_port(skb, src, port, &proto);
+ break;
+ default:
+ return false;
+ }
+ if (!ret)
+ return ret;
+ switch (proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ return true;
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL_GPL(ip_set_get_ip_port);
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
new file mode 100644
index 000000000000..43bcce200129
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -0,0 +1,464 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:ip type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:ip type of IP sets");
+MODULE_ALIAS("ip_set_hash:ip");
+
+/* Type specific function prefix */
+#define TYPE hash_ip
+
+static bool
+hash_ip_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_ip4_same_set hash_ip_same_set
+#define hash_ip6_same_set hash_ip_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_ip4_elem {
+ __be32 ip;
+};
+
+/* Member elements with timeout support */
+struct hash_ip4_telem {
+ __be32 ip;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_ip4_data_equal(const struct hash_ip4_elem *ip1,
+ const struct hash_ip4_elem *ip2)
+{
+ return ip1->ip == ip2->ip;
+}
+
+static inline bool
+hash_ip4_data_isnull(const struct hash_ip4_elem *elem)
+{
+ return elem->ip == 0;
+}
+
+static inline void
+hash_ip4_data_copy(struct hash_ip4_elem *dst, const struct hash_ip4_elem *src)
+{
+ dst->ip = src->ip;
+}
+
+/* Zero valued IP addresses cannot be stored */
+static inline void
+hash_ip4_data_zero_out(struct hash_ip4_elem *elem)
+{
+ elem->ip = 0;
+}
+
+static inline bool
+hash_ip4_data_list(struct sk_buff *skb, const struct hash_ip4_elem *data)
+{
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_ip4_data_tlist(struct sk_buff *skb, const struct hash_ip4_elem *data)
+{
+ const struct hash_ip4_telem *tdata =
+ (const struct hash_ip4_telem *)data;
+
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(tdata->timeout)));
+
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#define IP_SET_HASH_WITH_NETMASK
+#define PF 4
+#define HOST_MASK 32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ip4_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ __be32 ip;
+
+ ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &ip);
+ ip &= ip_set_netmask(h->netmask);
+ if (ip == 0)
+ return -EINVAL;
+
+ return adtfn(set, &ip, h->timeout);
+}
+
+static int
+hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ u32 ip, ip_to, hosts, timeout = h->timeout;
+ __be32 nip;
+ int ret = 0;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+ if (ret)
+ return ret;
+
+ ip &= ip_set_hostmask(h->netmask);
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ if (adt == IPSET_TEST) {
+ nip = htonl(ip);
+ if (nip == 0)
+ return -IPSET_ERR_HASH_ELEM;
+ return adtfn(set, &nip, timeout);
+ }
+
+ if (tb[IPSET_ATTR_IP_TO]) {
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+ if (ret)
+ return ret;
+ if (ip > ip_to)
+ swap(ip, ip_to);
+ } else if (tb[IPSET_ATTR_CIDR]) {
+ u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+ if (cidr > 32)
+ return -IPSET_ERR_INVALID_CIDR;
+ ip &= ip_set_hostmask(cidr);
+ ip_to = ip | ~ip_set_hostmask(cidr);
+ } else
+ ip_to = ip;
+
+ hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1);
+
+ for (; !before(ip_to, ip); ip += hosts) {
+ nip = htonl(ip);
+ if (nip == 0)
+ return -IPSET_ERR_HASH_ELEM;
+ ret = adtfn(set, &nip, timeout);
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+static bool
+hash_ip_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+ const struct ip_set_hash *x = a->data;
+ const struct ip_set_hash *y = b->data;
+
+ /* Resizing changes htable_bits, so we ignore it */
+ return x->maxelem == y->maxelem &&
+ x->timeout == y->timeout &&
+ x->netmask == y->netmask;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_ip6_elem {
+ union nf_inet_addr ip;
+};
+
+struct hash_ip6_telem {
+ union nf_inet_addr ip;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_ip6_data_equal(const struct hash_ip6_elem *ip1,
+ const struct hash_ip6_elem *ip2)
+{
+ return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0;
+}
+
+static inline bool
+hash_ip6_data_isnull(const struct hash_ip6_elem *elem)
+{
+ return ipv6_addr_any(&elem->ip.in6);
+}
+
+static inline void
+hash_ip6_data_copy(struct hash_ip6_elem *dst, const struct hash_ip6_elem *src)
+{
+ ipv6_addr_copy(&dst->ip.in6, &src->ip.in6);
+}
+
+static inline void
+hash_ip6_data_zero_out(struct hash_ip6_elem *elem)
+{
+ ipv6_addr_set(&elem->ip.in6, 0, 0, 0, 0);
+}
+
+static inline void
+ip6_netmask(union nf_inet_addr *ip, u8 prefix)
+{
+ ip->ip6[0] &= ip_set_netmask6(prefix)[0];
+ ip->ip6[1] &= ip_set_netmask6(prefix)[1];
+ ip->ip6[2] &= ip_set_netmask6(prefix)[2];
+ ip->ip6[3] &= ip_set_netmask6(prefix)[3];
+}
+
+static bool
+hash_ip6_data_list(struct sk_buff *skb, const struct hash_ip6_elem *data)
+{
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_ip6_data_tlist(struct sk_buff *skb, const struct hash_ip6_elem *data)
+{
+ const struct hash_ip6_telem *e =
+ (const struct hash_ip6_telem *)data;
+
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(e->timeout)));
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF 6
+#define HOST_MASK 128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ip6_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ union nf_inet_addr ip;
+
+ ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &ip.in6);
+ ip6_netmask(&ip, h->netmask);
+ if (ipv6_addr_any(&ip.in6))
+ return -EINVAL;
+
+ return adtfn(set, &ip, h->timeout);
+}
+
+static const struct nla_policy hash_ip6_adt_policy[IPSET_ATTR_ADT_MAX + 1] = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+};
+
+static int
+hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ union nf_inet_addr ip;
+ u32 timeout = h->timeout;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+ tb[IPSET_ATTR_IP_TO] ||
+ tb[IPSET_ATTR_CIDR]))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &ip);
+ if (ret)
+ return ret;
+
+ ip6_netmask(&ip, h->netmask);
+ if (ipv6_addr_any(&ip.in6))
+ return -IPSET_ERR_HASH_ELEM;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ ret = adtfn(set, &ip, timeout);
+
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+ u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+ u8 netmask, hbits;
+ struct ip_set_hash *h;
+
+ if (!(set->family == AF_INET || set->family == AF_INET6))
+ return -IPSET_ERR_INVALID_FAMILY;
+ netmask = set->family == AF_INET ? 32 : 128;
+ pr_debug("Create set %s with family %s\n",
+ set->name, set->family == AF_INET ? "inet" : "inet6");
+
+ if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_HASHSIZE]) {
+ hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+ if (hashsize < IPSET_MIMINAL_HASHSIZE)
+ hashsize = IPSET_MIMINAL_HASHSIZE;
+ }
+
+ if (tb[IPSET_ATTR_MAXELEM])
+ maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+ if (tb[IPSET_ATTR_NETMASK]) {
+ netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
+
+ if ((set->family == AF_INET && netmask > 32) ||
+ (set->family == AF_INET6 && netmask > 128) ||
+ netmask == 0)
+ return -IPSET_ERR_INVALID_NETMASK;
+ }
+
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return -ENOMEM;
+
+ h->maxelem = maxelem;
+ h->netmask = netmask;
+ get_random_bytes(&h->initval, sizeof(h->initval));
+ h->timeout = IPSET_NO_TIMEOUT;
+
+ hbits = htable_bits(hashsize);
+ h->table = ip_set_alloc(
+ sizeof(struct htable)
+ + jhash_size(hbits) * sizeof(struct hbucket));
+ if (!h->table) {
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->table->htable_bits = hbits;
+
+ set->data = h;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+ set->variant = set->family == AF_INET
+ ? &hash_ip4_tvariant : &hash_ip6_tvariant;
+
+ if (set->family == AF_INET)
+ hash_ip4_gc_init(set);
+ else
+ hash_ip6_gc_init(set);
+ } else {
+ set->variant = set->family == AF_INET
+ ? &hash_ip4_variant : &hash_ip6_variant;
+ }
+
+ pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+ set->name, jhash_size(h->table->htable_bits),
+ h->table->htable_bits, h->maxelem, set->data, h->table);
+
+ return 0;
+}
+
+static struct ip_set_type hash_ip_type __read_mostly = {
+ .name = "hash:ip",
+ .protocol = IPSET_PROTOCOL,
+ .features = IPSET_TYPE_IP,
+ .dimension = IPSET_DIM_ONE,
+ .family = AF_UNSPEC,
+ .revision = 0,
+ .create = hash_ip_create,
+ .create_policy = {
+ [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
+ [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
+ [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_NETMASK] = { .type = NLA_U8 },
+ },
+ .adt_policy = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
+ [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init
+hash_ip_init(void)
+{
+ return ip_set_type_register(&hash_ip_type);
+}
+
+static void __exit
+hash_ip_fini(void)
+{
+ ip_set_type_unregister(&hash_ip_type);
+}
+
+module_init(hash_ip_init);
+module_exit(hash_ip_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
new file mode 100644
index 000000000000..adbe787ea5dc
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -0,0 +1,544 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:ip,port type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:ip,port type of IP sets");
+MODULE_ALIAS("ip_set_hash:ip,port");
+
+/* Type specific function prefix */
+#define TYPE hash_ipport
+
+static bool
+hash_ipport_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_ipport4_same_set hash_ipport_same_set
+#define hash_ipport6_same_set hash_ipport_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_ipport4_elem {
+ __be32 ip;
+ __be16 port;
+ u8 proto;
+ u8 padding;
+};
+
+/* Member elements with timeout support */
+struct hash_ipport4_telem {
+ __be32 ip;
+ __be16 port;
+ u8 proto;
+ u8 padding;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_ipport4_data_equal(const struct hash_ipport4_elem *ip1,
+ const struct hash_ipport4_elem *ip2)
+{
+ return ip1->ip == ip2->ip &&
+ ip1->port == ip2->port &&
+ ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipport4_data_isnull(const struct hash_ipport4_elem *elem)
+{
+ return elem->proto == 0;
+}
+
+static inline void
+hash_ipport4_data_copy(struct hash_ipport4_elem *dst,
+ const struct hash_ipport4_elem *src)
+{
+ dst->ip = src->ip;
+ dst->port = src->port;
+ dst->proto = src->proto;
+}
+
+static inline void
+hash_ipport4_data_zero_out(struct hash_ipport4_elem *elem)
+{
+ elem->proto = 0;
+}
+
+static bool
+hash_ipport4_data_list(struct sk_buff *skb,
+ const struct hash_ipport4_elem *data)
+{
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_ipport4_data_tlist(struct sk_buff *skb,
+ const struct hash_ipport4_elem *data)
+{
+ const struct hash_ipport4_telem *tdata =
+ (const struct hash_ipport4_telem *)data;
+
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(tdata->timeout)));
+
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#define PF 4
+#define HOST_MASK 32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipport4_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipport4_elem data = { };
+
+ if (!ip_set_get_ip4_port(skb, flags & IPSET_DIM_TWO_SRC,
+ &data.port, &data.proto))
+ return -EINVAL;
+
+ ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
+
+ return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipport4_elem data = { };
+ u32 ip, ip_to, p, port, port_to;
+ u32 timeout = h->timeout;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_PORT])
+ data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+ else
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_PROTO]) {
+ data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+ if (data.proto == 0)
+ return -IPSET_ERR_INVALID_PROTO;
+ } else
+ return -IPSET_ERR_MISSING_PROTO;
+
+ switch (data.proto) {
+ case IPPROTO_UDP:
+ case IPPROTO_TCP:
+ case IPPROTO_ICMP:
+ break;
+ default:
+ data.port = 0;
+ break;
+ }
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ if (adt == IPSET_TEST ||
+ !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+ !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
+ tb[IPSET_ATTR_PORT_TO])) {
+ ret = adtfn(set, &data, timeout);
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+ }
+
+ ip = ntohl(data.ip);
+ if (tb[IPSET_ATTR_IP_TO]) {
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+ if (ret)
+ return ret;
+ if (ip > ip_to)
+ swap(ip, ip_to);
+ } else if (tb[IPSET_ATTR_CIDR]) {
+ u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+ if (cidr > 32)
+ return -IPSET_ERR_INVALID_CIDR;
+ ip &= ip_set_hostmask(cidr);
+ ip_to = ip | ~ip_set_hostmask(cidr);
+ } else
+ ip_to = ip;
+
+ port = ntohs(data.port);
+ if (tb[IPSET_ATTR_PORT_TO]) {
+ port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+ if (port > port_to)
+ swap(port, port_to);
+ } else
+ port_to = port;
+
+ for (; !before(ip_to, ip); ip++)
+ for (p = port; p <= port_to; p++) {
+ data.ip = htonl(ip);
+ data.port = htons(p);
+ ret = adtfn(set, &data, timeout);
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+static bool
+hash_ipport_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+ const struct ip_set_hash *x = a->data;
+ const struct ip_set_hash *y = b->data;
+
+ /* Resizing changes htable_bits, so we ignore it */
+ return x->maxelem == y->maxelem &&
+ x->timeout == y->timeout;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_ipport6_elem {
+ union nf_inet_addr ip;
+ __be16 port;
+ u8 proto;
+ u8 padding;
+};
+
+struct hash_ipport6_telem {
+ union nf_inet_addr ip;
+ __be16 port;
+ u8 proto;
+ u8 padding;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_ipport6_data_equal(const struct hash_ipport6_elem *ip1,
+ const struct hash_ipport6_elem *ip2)
+{
+ return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
+ ip1->port == ip2->port &&
+ ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipport6_data_isnull(const struct hash_ipport6_elem *elem)
+{
+ return elem->proto == 0;
+}
+
+static inline void
+hash_ipport6_data_copy(struct hash_ipport6_elem *dst,
+ const struct hash_ipport6_elem *src)
+{
+ memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_ipport6_data_zero_out(struct hash_ipport6_elem *elem)
+{
+ elem->proto = 0;
+}
+
+static bool
+hash_ipport6_data_list(struct sk_buff *skb,
+ const struct hash_ipport6_elem *data)
+{
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_ipport6_data_tlist(struct sk_buff *skb,
+ const struct hash_ipport6_elem *data)
+{
+ const struct hash_ipport6_telem *e =
+ (const struct hash_ipport6_telem *)data;
+
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(e->timeout)));
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF 6
+#define HOST_MASK 128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipport6_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipport6_elem data = { };
+
+ if (!ip_set_get_ip6_port(skb, flags & IPSET_DIM_TWO_SRC,
+ &data.port, &data.proto))
+ return -EINVAL;
+
+ ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+
+ return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipport6_elem data = { };
+ u32 port, port_to;
+ u32 timeout = h->timeout;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+ tb[IPSET_ATTR_IP_TO] ||
+ tb[IPSET_ATTR_CIDR]))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_PORT])
+ data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+ else
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_PROTO]) {
+ data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+ if (data.proto == 0)
+ return -IPSET_ERR_INVALID_PROTO;
+ } else
+ return -IPSET_ERR_MISSING_PROTO;
+
+ switch (data.proto) {
+ case IPPROTO_UDP:
+ case IPPROTO_TCP:
+ case IPPROTO_ICMPV6:
+ break;
+ default:
+ data.port = 0;
+ break;
+ }
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ if (adt == IPSET_TEST ||
+ !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+ !tb[IPSET_ATTR_PORT_TO]) {
+ ret = adtfn(set, &data, timeout);
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+ }
+
+ port = ntohs(data.port);
+ port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+ if (port > port_to)
+ swap(port, port_to);
+
+ for (; port <= port_to; port++) {
+ data.port = htons(port);
+ ret = adtfn(set, &data, timeout);
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_ipport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+ struct ip_set_hash *h;
+ u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+ u8 hbits;
+
+ if (!(set->family == AF_INET || set->family == AF_INET6))
+ return -IPSET_ERR_INVALID_FAMILY;
+
+ if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_HASHSIZE]) {
+ hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+ if (hashsize < IPSET_MIMINAL_HASHSIZE)
+ hashsize = IPSET_MIMINAL_HASHSIZE;
+ }
+
+ if (tb[IPSET_ATTR_MAXELEM])
+ maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return -ENOMEM;
+
+ h->maxelem = maxelem;
+ get_random_bytes(&h->initval, sizeof(h->initval));
+ h->timeout = IPSET_NO_TIMEOUT;
+
+ hbits = htable_bits(hashsize);
+ h->table = ip_set_alloc(
+ sizeof(struct htable)
+ + jhash_size(hbits) * sizeof(struct hbucket));
+ if (!h->table) {
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->table->htable_bits = hbits;
+
+ set->data = h;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+ set->variant = set->family == AF_INET
+ ? &hash_ipport4_tvariant : &hash_ipport6_tvariant;
+
+ if (set->family == AF_INET)
+ hash_ipport4_gc_init(set);
+ else
+ hash_ipport6_gc_init(set);
+ } else {
+ set->variant = set->family == AF_INET
+ ? &hash_ipport4_variant : &hash_ipport6_variant;
+ }
+
+ pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+ set->name, jhash_size(h->table->htable_bits),
+ h->table->htable_bits, h->maxelem, set->data, h->table);
+
+ return 0;
+}
+
+static struct ip_set_type hash_ipport_type __read_mostly = {
+ .name = "hash:ip,port",
+ .protocol = IPSET_PROTOCOL,
+ .features = IPSET_TYPE_IP | IPSET_TYPE_PORT,
+ .dimension = IPSET_DIM_TWO,
+ .family = AF_UNSPEC,
+ .revision = 0,
+ .create = hash_ipport_create,
+ .create_policy = {
+ [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
+ [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
+ [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
+ [IPSET_ATTR_PROTO] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ },
+ .adt_policy = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
+ [IPSET_ATTR_PORT] = { .type = NLA_U16 },
+ [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 },
+ [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
+ [IPSET_ATTR_PROTO] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init
+hash_ipport_init(void)
+{
+ return ip_set_type_register(&hash_ipport_type);
+}
+
+static void __exit
+hash_ipport_fini(void)
+{
+ ip_set_type_unregister(&hash_ipport_type);
+}
+
+module_init(hash_ipport_init);
+module_exit(hash_ipport_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
new file mode 100644
index 000000000000..22e23abb86c6
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -0,0 +1,562 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:ip,port,ip type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:ip,port,ip type of IP sets");
+MODULE_ALIAS("ip_set_hash:ip,port,ip");
+
+/* Type specific function prefix */
+#define TYPE hash_ipportip
+
+static bool
+hash_ipportip_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_ipportip4_same_set hash_ipportip_same_set
+#define hash_ipportip6_same_set hash_ipportip_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_ipportip4_elem {
+ __be32 ip;
+ __be32 ip2;
+ __be16 port;
+ u8 proto;
+ u8 padding;
+};
+
+/* Member elements with timeout support */
+struct hash_ipportip4_telem {
+ __be32 ip;
+ __be32 ip2;
+ __be16 port;
+ u8 proto;
+ u8 padding;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_ipportip4_data_equal(const struct hash_ipportip4_elem *ip1,
+ const struct hash_ipportip4_elem *ip2)
+{
+ return ip1->ip == ip2->ip &&
+ ip1->ip2 == ip2->ip2 &&
+ ip1->port == ip2->port &&
+ ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipportip4_data_isnull(const struct hash_ipportip4_elem *elem)
+{
+ return elem->proto == 0;
+}
+
+static inline void
+hash_ipportip4_data_copy(struct hash_ipportip4_elem *dst,
+ const struct hash_ipportip4_elem *src)
+{
+ memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_ipportip4_data_zero_out(struct hash_ipportip4_elem *elem)
+{
+ elem->proto = 0;
+}
+
+static bool
+hash_ipportip4_data_list(struct sk_buff *skb,
+ const struct hash_ipportip4_elem *data)
+{
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_ipportip4_data_tlist(struct sk_buff *skb,
+ const struct hash_ipportip4_elem *data)
+{
+ const struct hash_ipportip4_telem *tdata =
+ (const struct hash_ipportip4_telem *)data;
+
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(tdata->timeout)));
+
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#define PF 4
+#define HOST_MASK 32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipportip4_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipportip4_elem data = { };
+
+ if (!ip_set_get_ip4_port(skb, flags & IPSET_DIM_TWO_SRC,
+ &data.port, &data.proto))
+ return -EINVAL;
+
+ ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
+ ip4addrptr(skb, flags & IPSET_DIM_THREE_SRC, &data.ip2);
+
+ return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipportip4_elem data = { };
+ u32 ip, ip_to, p, port, port_to;
+ u32 timeout = h->timeout;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+ !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP2], &data.ip2);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_PORT])
+ data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+ else
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_PROTO]) {
+ data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+ if (data.proto == 0)
+ return -IPSET_ERR_INVALID_PROTO;
+ } else
+ return -IPSET_ERR_MISSING_PROTO;
+
+ switch (data.proto) {
+ case IPPROTO_UDP:
+ case IPPROTO_TCP:
+ case IPPROTO_ICMP:
+ break;
+ default:
+ data.port = 0;
+ break;
+ }
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ if (adt == IPSET_TEST ||
+ !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+ !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
+ tb[IPSET_ATTR_PORT_TO])) {
+ ret = adtfn(set, &data, timeout);
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+ }
+
+ ip = ntohl(data.ip);
+ if (tb[IPSET_ATTR_IP_TO]) {
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+ if (ret)
+ return ret;
+ if (ip > ip_to)
+ swap(ip, ip_to);
+ } else if (tb[IPSET_ATTR_CIDR]) {
+ u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+ if (cidr > 32)
+ return -IPSET_ERR_INVALID_CIDR;
+ ip &= ip_set_hostmask(cidr);
+ ip_to = ip | ~ip_set_hostmask(cidr);
+ } else
+ ip_to = ip;
+
+ port = ntohs(data.port);
+ if (tb[IPSET_ATTR_PORT_TO]) {
+ port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+ if (port > port_to)
+ swap(port, port_to);
+ } else
+ port_to = port;
+
+ for (; !before(ip_to, ip); ip++)
+ for (p = port; p <= port_to; p++) {
+ data.ip = htonl(ip);
+ data.port = htons(p);
+ ret = adtfn(set, &data, timeout);
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+static bool
+hash_ipportip_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+ const struct ip_set_hash *x = a->data;
+ const struct ip_set_hash *y = b->data;
+
+ /* Resizing changes htable_bits, so we ignore it */
+ return x->maxelem == y->maxelem &&
+ x->timeout == y->timeout;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_ipportip6_elem {
+ union nf_inet_addr ip;
+ union nf_inet_addr ip2;
+ __be16 port;
+ u8 proto;
+ u8 padding;
+};
+
+struct hash_ipportip6_telem {
+ union nf_inet_addr ip;
+ union nf_inet_addr ip2;
+ __be16 port;
+ u8 proto;
+ u8 padding;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_ipportip6_data_equal(const struct hash_ipportip6_elem *ip1,
+ const struct hash_ipportip6_elem *ip2)
+{
+ return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
+ ipv6_addr_cmp(&ip1->ip2.in6, &ip2->ip2.in6) == 0 &&
+ ip1->port == ip2->port &&
+ ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipportip6_data_isnull(const struct hash_ipportip6_elem *elem)
+{
+ return elem->proto == 0;
+}
+
+static inline void
+hash_ipportip6_data_copy(struct hash_ipportip6_elem *dst,
+ const struct hash_ipportip6_elem *src)
+{
+ memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_ipportip6_data_zero_out(struct hash_ipportip6_elem *elem)
+{
+ elem->proto = 0;
+}
+
+static bool
+hash_ipportip6_data_list(struct sk_buff *skb,
+ const struct hash_ipportip6_elem *data)
+{
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_ipportip6_data_tlist(struct sk_buff *skb,
+ const struct hash_ipportip6_elem *data)
+{
+ const struct hash_ipportip6_telem *e =
+ (const struct hash_ipportip6_telem *)data;
+
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(e->timeout)));
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF 6
+#define HOST_MASK 128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipportip6_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipportip6_elem data = { };
+
+ if (!ip_set_get_ip6_port(skb, flags & IPSET_DIM_TWO_SRC,
+ &data.port, &data.proto))
+ return -EINVAL;
+
+ ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+ ip6addrptr(skb, flags & IPSET_DIM_THREE_SRC, &data.ip2.in6);
+
+ return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipportip6_elem data = { };
+ u32 port, port_to;
+ u32 timeout = h->timeout;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+ !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+ tb[IPSET_ATTR_IP_TO] ||
+ tb[IPSET_ATTR_CIDR]))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &data.ip2);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_PORT])
+ data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+ else
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_PROTO]) {
+ data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+ if (data.proto == 0)
+ return -IPSET_ERR_INVALID_PROTO;
+ } else
+ return -IPSET_ERR_MISSING_PROTO;
+
+ switch (data.proto) {
+ case IPPROTO_UDP:
+ case IPPROTO_TCP:
+ case IPPROTO_ICMPV6:
+ break;
+ default:
+ data.port = 0;
+ break;
+ }
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ if (adt == IPSET_TEST ||
+ !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+ !tb[IPSET_ATTR_PORT_TO]) {
+ ret = adtfn(set, &data, timeout);
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+ }
+
+ port = ntohs(data.port);
+ port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+ if (port > port_to)
+ swap(port, port_to);
+
+ for (; port <= port_to; port++) {
+ data.port = htons(port);
+ ret = adtfn(set, &data, timeout);
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_ipportip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+ struct ip_set_hash *h;
+ u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+ u8 hbits;
+
+ if (!(set->family == AF_INET || set->family == AF_INET6))
+ return -IPSET_ERR_INVALID_FAMILY;
+
+ if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_HASHSIZE]) {
+ hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+ if (hashsize < IPSET_MIMINAL_HASHSIZE)
+ hashsize = IPSET_MIMINAL_HASHSIZE;
+ }
+
+ if (tb[IPSET_ATTR_MAXELEM])
+ maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return -ENOMEM;
+
+ h->maxelem = maxelem;
+ get_random_bytes(&h->initval, sizeof(h->initval));
+ h->timeout = IPSET_NO_TIMEOUT;
+
+ hbits = htable_bits(hashsize);
+ h->table = ip_set_alloc(
+ sizeof(struct htable)
+ + jhash_size(hbits) * sizeof(struct hbucket));
+ if (!h->table) {
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->table->htable_bits = hbits;
+
+ set->data = h;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+ set->variant = set->family == AF_INET
+ ? &hash_ipportip4_tvariant : &hash_ipportip6_tvariant;
+
+ if (set->family == AF_INET)
+ hash_ipportip4_gc_init(set);
+ else
+ hash_ipportip6_gc_init(set);
+ } else {
+ set->variant = set->family == AF_INET
+ ? &hash_ipportip4_variant : &hash_ipportip6_variant;
+ }
+
+ pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+ set->name, jhash_size(h->table->htable_bits),
+ h->table->htable_bits, h->maxelem, set->data, h->table);
+
+ return 0;
+}
+
+static struct ip_set_type hash_ipportip_type __read_mostly = {
+ .name = "hash:ip,port,ip",
+ .protocol = IPSET_PROTOCOL,
+ .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
+ .dimension = IPSET_DIM_THREE,
+ .family = AF_UNSPEC,
+ .revision = 0,
+ .create = hash_ipportip_create,
+ .create_policy = {
+ [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
+ [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
+ [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ },
+ .adt_policy = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP2] = { .type = NLA_NESTED },
+ [IPSET_ATTR_PORT] = { .type = NLA_U16 },
+ [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 },
+ [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
+ [IPSET_ATTR_PROTO] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init
+hash_ipportip_init(void)
+{
+ return ip_set_type_register(&hash_ipportip_type);
+}
+
+static void __exit
+hash_ipportip_fini(void)
+{
+ ip_set_type_unregister(&hash_ipportip_type);
+}
+
+module_init(hash_ipportip_init);
+module_exit(hash_ipportip_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
new file mode 100644
index 000000000000..6033e8b54bbd
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -0,0 +1,628 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:ip,port,net type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:ip,port,net type of IP sets");
+MODULE_ALIAS("ip_set_hash:ip,port,net");
+
+/* Type specific function prefix */
+#define TYPE hash_ipportnet
+
+static bool
+hash_ipportnet_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_ipportnet4_same_set hash_ipportnet_same_set
+#define hash_ipportnet6_same_set hash_ipportnet_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_ipportnet4_elem {
+ __be32 ip;
+ __be32 ip2;
+ __be16 port;
+ u8 cidr;
+ u8 proto;
+};
+
+/* Member elements with timeout support */
+struct hash_ipportnet4_telem {
+ __be32 ip;
+ __be32 ip2;
+ __be16 port;
+ u8 cidr;
+ u8 proto;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_ipportnet4_data_equal(const struct hash_ipportnet4_elem *ip1,
+ const struct hash_ipportnet4_elem *ip2)
+{
+ return ip1->ip == ip2->ip &&
+ ip1->ip2 == ip2->ip2 &&
+ ip1->cidr == ip2->cidr &&
+ ip1->port == ip2->port &&
+ ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipportnet4_data_isnull(const struct hash_ipportnet4_elem *elem)
+{
+ return elem->proto == 0;
+}
+
+static inline void
+hash_ipportnet4_data_copy(struct hash_ipportnet4_elem *dst,
+ const struct hash_ipportnet4_elem *src)
+{
+ memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_ipportnet4_data_netmask(struct hash_ipportnet4_elem *elem, u8 cidr)
+{
+ elem->ip2 &= ip_set_netmask(cidr);
+ elem->cidr = cidr;
+}
+
+static inline void
+hash_ipportnet4_data_zero_out(struct hash_ipportnet4_elem *elem)
+{
+ elem->proto = 0;
+}
+
+static bool
+hash_ipportnet4_data_list(struct sk_buff *skb,
+ const struct hash_ipportnet4_elem *data)
+{
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_ipportnet4_data_tlist(struct sk_buff *skb,
+ const struct hash_ipportnet4_elem *data)
+{
+ const struct hash_ipportnet4_telem *tdata =
+ (const struct hash_ipportnet4_telem *)data;
+
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(tdata->timeout)));
+
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#define IP_SET_HASH_WITH_PROTO
+#define IP_SET_HASH_WITH_NETS
+
+#define PF 4
+#define HOST_MASK 32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipportnet4_elem data =
+ { .cidr = h->nets[0].cidr || HOST_MASK };
+
+ if (data.cidr == 0)
+ return -EINVAL;
+ if (adt == IPSET_TEST)
+ data.cidr = HOST_MASK;
+
+ if (!ip_set_get_ip4_port(skb, flags & IPSET_DIM_TWO_SRC,
+ &data.port, &data.proto))
+ return -EINVAL;
+
+ ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
+ ip4addrptr(skb, flags & IPSET_DIM_THREE_SRC, &data.ip2);
+ data.ip2 &= ip_set_netmask(data.cidr);
+
+ return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipportnet4_elem data = { .cidr = HOST_MASK };
+ u32 ip, ip_to, p, port, port_to;
+ u32 timeout = h->timeout;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+ !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP2], &data.ip2);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_CIDR2])
+ data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+
+ if (!data.cidr)
+ return -IPSET_ERR_INVALID_CIDR;
+
+ data.ip2 &= ip_set_netmask(data.cidr);
+
+ if (tb[IPSET_ATTR_PORT])
+ data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+ else
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_PROTO]) {
+ data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+ if (data.proto == 0)
+ return -IPSET_ERR_INVALID_PROTO;
+ } else
+ return -IPSET_ERR_MISSING_PROTO;
+
+ switch (data.proto) {
+ case IPPROTO_UDP:
+ case IPPROTO_TCP:
+ case IPPROTO_ICMP:
+ break;
+ default:
+ data.port = 0;
+ break;
+ }
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ if (adt == IPSET_TEST ||
+ !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+ !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
+ tb[IPSET_ATTR_PORT_TO])) {
+ ret = adtfn(set, &data, timeout);
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+ }
+
+ ip = ntohl(data.ip);
+ if (tb[IPSET_ATTR_IP_TO]) {
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+ if (ret)
+ return ret;
+ if (ip > ip_to)
+ swap(ip, ip_to);
+ } else if (tb[IPSET_ATTR_CIDR]) {
+ u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+ if (cidr > 32)
+ return -IPSET_ERR_INVALID_CIDR;
+ ip &= ip_set_hostmask(cidr);
+ ip_to = ip | ~ip_set_hostmask(cidr);
+ } else
+ ip_to = ip;
+
+ port = ntohs(data.port);
+ if (tb[IPSET_ATTR_PORT_TO]) {
+ port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+ if (port > port_to)
+ swap(port, port_to);
+ } else
+ port_to = port;
+
+ for (; !before(ip_to, ip); ip++)
+ for (p = port; p <= port_to; p++) {
+ data.ip = htonl(ip);
+ data.port = htons(p);
+ ret = adtfn(set, &data, timeout);
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+static bool
+hash_ipportnet_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+ const struct ip_set_hash *x = a->data;
+ const struct ip_set_hash *y = b->data;
+
+ /* Resizing changes htable_bits, so we ignore it */
+ return x->maxelem == y->maxelem &&
+ x->timeout == y->timeout;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_ipportnet6_elem {
+ union nf_inet_addr ip;
+ union nf_inet_addr ip2;
+ __be16 port;
+ u8 cidr;
+ u8 proto;
+};
+
+struct hash_ipportnet6_telem {
+ union nf_inet_addr ip;
+ union nf_inet_addr ip2;
+ __be16 port;
+ u8 cidr;
+ u8 proto;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_ipportnet6_data_equal(const struct hash_ipportnet6_elem *ip1,
+ const struct hash_ipportnet6_elem *ip2)
+{
+ return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
+ ipv6_addr_cmp(&ip1->ip2.in6, &ip2->ip2.in6) == 0 &&
+ ip1->cidr == ip2->cidr &&
+ ip1->port == ip2->port &&
+ ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipportnet6_data_isnull(const struct hash_ipportnet6_elem *elem)
+{
+ return elem->proto == 0;
+}
+
+static inline void
+hash_ipportnet6_data_copy(struct hash_ipportnet6_elem *dst,
+ const struct hash_ipportnet6_elem *src)
+{
+ memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_ipportnet6_data_zero_out(struct hash_ipportnet6_elem *elem)
+{
+ elem->proto = 0;
+}
+
+static inline void
+ip6_netmask(union nf_inet_addr *ip, u8 prefix)
+{
+ ip->ip6[0] &= ip_set_netmask6(prefix)[0];
+ ip->ip6[1] &= ip_set_netmask6(prefix)[1];
+ ip->ip6[2] &= ip_set_netmask6(prefix)[2];
+ ip->ip6[3] &= ip_set_netmask6(prefix)[3];
+}
+
+static inline void
+hash_ipportnet6_data_netmask(struct hash_ipportnet6_elem *elem, u8 cidr)
+{
+ ip6_netmask(&elem->ip2, cidr);
+ elem->cidr = cidr;
+}
+
+static bool
+hash_ipportnet6_data_list(struct sk_buff *skb,
+ const struct hash_ipportnet6_elem *data)
+{
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_ipportnet6_data_tlist(struct sk_buff *skb,
+ const struct hash_ipportnet6_elem *data)
+{
+ const struct hash_ipportnet6_telem *e =
+ (const struct hash_ipportnet6_telem *)data;
+
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(e->timeout)));
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF 6
+#define HOST_MASK 128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipportnet6_elem data =
+ { .cidr = h->nets[0].cidr || HOST_MASK };
+
+ if (data.cidr == 0)
+ return -EINVAL;
+ if (adt == IPSET_TEST)
+ data.cidr = HOST_MASK;
+
+ if (!ip_set_get_ip6_port(skb, flags & IPSET_DIM_TWO_SRC,
+ &data.port, &data.proto))
+ return -EINVAL;
+
+ ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+ ip6addrptr(skb, flags & IPSET_DIM_THREE_SRC, &data.ip2.in6);
+ ip6_netmask(&data.ip2, data.cidr);
+
+ return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipportnet6_elem data = { .cidr = HOST_MASK };
+ u32 port, port_to;
+ u32 timeout = h->timeout;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+ !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+ tb[IPSET_ATTR_IP_TO] ||
+ tb[IPSET_ATTR_CIDR]))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &data.ip2);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_CIDR2])
+ data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+
+ if (!data.cidr)
+ return -IPSET_ERR_INVALID_CIDR;
+
+ ip6_netmask(&data.ip2, data.cidr);
+
+ if (tb[IPSET_ATTR_PORT])
+ data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+ else
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_PROTO]) {
+ data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+ if (data.proto == 0)
+ return -IPSET_ERR_INVALID_PROTO;
+ } else
+ return -IPSET_ERR_MISSING_PROTO;
+
+ switch (data.proto) {
+ case IPPROTO_UDP:
+ case IPPROTO_TCP:
+ case IPPROTO_ICMPV6:
+ break;
+ default:
+ data.port = 0;
+ break;
+ }
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ if (adt == IPSET_TEST ||
+ !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+ !tb[IPSET_ATTR_PORT_TO]) {
+ ret = adtfn(set, &data, timeout);
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+ }
+
+ port = ntohs(data.port);
+ port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+ if (port > port_to)
+ swap(port, port_to);
+
+ for (; port <= port_to; port++) {
+ data.port = htons(port);
+ ret = adtfn(set, &data, timeout);
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_ipportnet_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+ struct ip_set_hash *h;
+ u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+ u8 hbits;
+
+ if (!(set->family == AF_INET || set->family == AF_INET6))
+ return -IPSET_ERR_INVALID_FAMILY;
+
+ if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_HASHSIZE]) {
+ hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+ if (hashsize < IPSET_MIMINAL_HASHSIZE)
+ hashsize = IPSET_MIMINAL_HASHSIZE;
+ }
+
+ if (tb[IPSET_ATTR_MAXELEM])
+ maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+ h = kzalloc(sizeof(*h)
+ + sizeof(struct ip_set_hash_nets)
+ * (set->family == AF_INET ? 32 : 128), GFP_KERNEL);
+ if (!h)
+ return -ENOMEM;
+
+ h->maxelem = maxelem;
+ get_random_bytes(&h->initval, sizeof(h->initval));
+ h->timeout = IPSET_NO_TIMEOUT;
+
+ hbits = htable_bits(hashsize);
+ h->table = ip_set_alloc(
+ sizeof(struct htable)
+ + jhash_size(hbits) * sizeof(struct hbucket));
+ if (!h->table) {
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->table->htable_bits = hbits;
+
+ set->data = h;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+ set->variant = set->family == AF_INET
+ ? &hash_ipportnet4_tvariant
+ : &hash_ipportnet6_tvariant;
+
+ if (set->family == AF_INET)
+ hash_ipportnet4_gc_init(set);
+ else
+ hash_ipportnet6_gc_init(set);
+ } else {
+ set->variant = set->family == AF_INET
+ ? &hash_ipportnet4_variant : &hash_ipportnet6_variant;
+ }
+
+ pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+ set->name, jhash_size(h->table->htable_bits),
+ h->table->htable_bits, h->maxelem, set->data, h->table);
+
+ return 0;
+}
+
+static struct ip_set_type hash_ipportnet_type __read_mostly = {
+ .name = "hash:ip,port,net",
+ .protocol = IPSET_PROTOCOL,
+ .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
+ .dimension = IPSET_DIM_THREE,
+ .family = AF_UNSPEC,
+ .revision = 0,
+ .create = hash_ipportnet_create,
+ .create_policy = {
+ [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
+ [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
+ [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ },
+ .adt_policy = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP2] = { .type = NLA_NESTED },
+ [IPSET_ATTR_PORT] = { .type = NLA_U16 },
+ [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 },
+ [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
+ [IPSET_ATTR_CIDR2] = { .type = NLA_U8 },
+ [IPSET_ATTR_PROTO] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init
+hash_ipportnet_init(void)
+{
+ return ip_set_type_register(&hash_ipportnet_type);
+}
+
+static void __exit
+hash_ipportnet_fini(void)
+{
+ ip_set_type_unregister(&hash_ipportnet_type);
+}
+
+module_init(hash_ipportnet_init);
+module_exit(hash_ipportnet_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
new file mode 100644
index 000000000000..c4db202b7da4
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -0,0 +1,458 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:net type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:net type of IP sets");
+MODULE_ALIAS("ip_set_hash:net");
+
+/* Type specific function prefix */
+#define TYPE hash_net
+
+static bool
+hash_net_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_net4_same_set hash_net_same_set
+#define hash_net6_same_set hash_net_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_net4_elem {
+ __be32 ip;
+ u16 padding0;
+ u8 padding1;
+ u8 cidr;
+};
+
+/* Member elements with timeout support */
+struct hash_net4_telem {
+ __be32 ip;
+ u16 padding0;
+ u8 padding1;
+ u8 cidr;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_net4_data_equal(const struct hash_net4_elem *ip1,
+ const struct hash_net4_elem *ip2)
+{
+ return ip1->ip == ip2->ip && ip1->cidr == ip2->cidr;
+}
+
+static inline bool
+hash_net4_data_isnull(const struct hash_net4_elem *elem)
+{
+ return elem->cidr == 0;
+}
+
+static inline void
+hash_net4_data_copy(struct hash_net4_elem *dst,
+ const struct hash_net4_elem *src)
+{
+ dst->ip = src->ip;
+ dst->cidr = src->cidr;
+}
+
+static inline void
+hash_net4_data_netmask(struct hash_net4_elem *elem, u8 cidr)
+{
+ elem->ip &= ip_set_netmask(cidr);
+ elem->cidr = cidr;
+}
+
+/* Zero CIDR values cannot be stored */
+static inline void
+hash_net4_data_zero_out(struct hash_net4_elem *elem)
+{
+ elem->cidr = 0;
+}
+
+static bool
+hash_net4_data_list(struct sk_buff *skb, const struct hash_net4_elem *data)
+{
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_net4_data_tlist(struct sk_buff *skb, const struct hash_net4_elem *data)
+{
+ const struct hash_net4_telem *tdata =
+ (const struct hash_net4_telem *)data;
+
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR, tdata->cidr);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(tdata->timeout)));
+
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#define IP_SET_HASH_WITH_NETS
+
+#define PF 4
+#define HOST_MASK 32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_net4_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_net4_elem data = { .cidr = h->nets[0].cidr || HOST_MASK };
+
+ if (data.cidr == 0)
+ return -EINVAL;
+ if (adt == IPSET_TEST)
+ data.cidr = HOST_MASK;
+
+ ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
+ data.ip &= ip_set_netmask(data.cidr);
+
+ return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_net4_elem data = { .cidr = HOST_MASK };
+ u32 timeout = h->timeout;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_CIDR])
+ data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+ if (!data.cidr)
+ return -IPSET_ERR_INVALID_CIDR;
+
+ data.ip &= ip_set_netmask(data.cidr);
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ ret = adtfn(set, &data, timeout);
+
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+static bool
+hash_net_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+ const struct ip_set_hash *x = a->data;
+ const struct ip_set_hash *y = b->data;
+
+ /* Resizing changes htable_bits, so we ignore it */
+ return x->maxelem == y->maxelem &&
+ x->timeout == y->timeout;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_net6_elem {
+ union nf_inet_addr ip;
+ u16 padding0;
+ u8 padding1;
+ u8 cidr;
+};
+
+struct hash_net6_telem {
+ union nf_inet_addr ip;
+ u16 padding0;
+ u8 padding1;
+ u8 cidr;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_net6_data_equal(const struct hash_net6_elem *ip1,
+ const struct hash_net6_elem *ip2)
+{
+ return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
+ ip1->cidr == ip2->cidr;
+}
+
+static inline bool
+hash_net6_data_isnull(const struct hash_net6_elem *elem)
+{
+ return elem->cidr == 0;
+}
+
+static inline void
+hash_net6_data_copy(struct hash_net6_elem *dst,
+ const struct hash_net6_elem *src)
+{
+ ipv6_addr_copy(&dst->ip.in6, &src->ip.in6);
+ dst->cidr = src->cidr;
+}
+
+static inline void
+hash_net6_data_zero_out(struct hash_net6_elem *elem)
+{
+ elem->cidr = 0;
+}
+
+static inline void
+ip6_netmask(union nf_inet_addr *ip, u8 prefix)
+{
+ ip->ip6[0] &= ip_set_netmask6(prefix)[0];
+ ip->ip6[1] &= ip_set_netmask6(prefix)[1];
+ ip->ip6[2] &= ip_set_netmask6(prefix)[2];
+ ip->ip6[3] &= ip_set_netmask6(prefix)[3];
+}
+
+static inline void
+hash_net6_data_netmask(struct hash_net6_elem *elem, u8 cidr)
+{
+ ip6_netmask(&elem->ip, cidr);
+ elem->cidr = cidr;
+}
+
+static bool
+hash_net6_data_list(struct sk_buff *skb, const struct hash_net6_elem *data)
+{
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_net6_data_tlist(struct sk_buff *skb, const struct hash_net6_elem *data)
+{
+ const struct hash_net6_telem *e =
+ (const struct hash_net6_telem *)data;
+
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR, e->cidr);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(e->timeout)));
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF 6
+#define HOST_MASK 128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_net6_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_net6_elem data = { .cidr = h->nets[0].cidr || HOST_MASK };
+
+ if (data.cidr == 0)
+ return -EINVAL;
+ if (adt == IPSET_TEST)
+ data.cidr = HOST_MASK;
+
+ ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+ ip6_netmask(&data.ip, data.cidr);
+
+ return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_net6_elem data = { .cidr = HOST_MASK };
+ u32 timeout = h->timeout;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_CIDR])
+ data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+ if (!data.cidr)
+ return -IPSET_ERR_INVALID_CIDR;
+
+ ip6_netmask(&data.ip, data.cidr);
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ ret = adtfn(set, &data, timeout);
+
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_net_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+ u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+ struct ip_set_hash *h;
+ u8 hbits;
+
+ if (!(set->family == AF_INET || set->family == AF_INET6))
+ return -IPSET_ERR_INVALID_FAMILY;
+
+ if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_HASHSIZE]) {
+ hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+ if (hashsize < IPSET_MIMINAL_HASHSIZE)
+ hashsize = IPSET_MIMINAL_HASHSIZE;
+ }
+
+ if (tb[IPSET_ATTR_MAXELEM])
+ maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+ h = kzalloc(sizeof(*h)
+ + sizeof(struct ip_set_hash_nets)
+ * (set->family == AF_INET ? 32 : 128), GFP_KERNEL);
+ if (!h)
+ return -ENOMEM;
+
+ h->maxelem = maxelem;
+ get_random_bytes(&h->initval, sizeof(h->initval));
+ h->timeout = IPSET_NO_TIMEOUT;
+
+ hbits = htable_bits(hashsize);
+ h->table = ip_set_alloc(
+ sizeof(struct htable)
+ + jhash_size(hbits) * sizeof(struct hbucket));
+ if (!h->table) {
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->table->htable_bits = hbits;
+
+ set->data = h;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+ set->variant = set->family == AF_INET
+ ? &hash_net4_tvariant : &hash_net6_tvariant;
+
+ if (set->family == AF_INET)
+ hash_net4_gc_init(set);
+ else
+ hash_net6_gc_init(set);
+ } else {
+ set->variant = set->family == AF_INET
+ ? &hash_net4_variant : &hash_net6_variant;
+ }
+
+ pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+ set->name, jhash_size(h->table->htable_bits),
+ h->table->htable_bits, h->maxelem, set->data, h->table);
+
+ return 0;
+}
+
+static struct ip_set_type hash_net_type __read_mostly = {
+ .name = "hash:net",
+ .protocol = IPSET_PROTOCOL,
+ .features = IPSET_TYPE_IP,
+ .dimension = IPSET_DIM_ONE,
+ .family = AF_UNSPEC,
+ .revision = 0,
+ .create = hash_net_create,
+ .create_policy = {
+ [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
+ [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
+ [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ },
+ .adt_policy = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init
+hash_net_init(void)
+{
+ return ip_set_type_register(&hash_net_type);
+}
+
+static void __exit
+hash_net_fini(void)
+{
+ ip_set_type_unregister(&hash_net_type);
+}
+
+module_init(hash_net_init);
+module_exit(hash_net_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
new file mode 100644
index 000000000000..34a165626ee9
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -0,0 +1,578 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:net,port type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:net,port type of IP sets");
+MODULE_ALIAS("ip_set_hash:net,port");
+
+/* Type specific function prefix */
+#define TYPE hash_netport
+
+static bool
+hash_netport_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_netport4_same_set hash_netport_same_set
+#define hash_netport6_same_set hash_netport_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_netport4_elem {
+ __be32 ip;
+ __be16 port;
+ u8 proto;
+ u8 cidr;
+};
+
+/* Member elements with timeout support */
+struct hash_netport4_telem {
+ __be32 ip;
+ __be16 port;
+ u8 proto;
+ u8 cidr;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_netport4_data_equal(const struct hash_netport4_elem *ip1,
+ const struct hash_netport4_elem *ip2)
+{
+ return ip1->ip == ip2->ip &&
+ ip1->port == ip2->port &&
+ ip1->proto == ip2->proto &&
+ ip1->cidr == ip2->cidr;
+}
+
+static inline bool
+hash_netport4_data_isnull(const struct hash_netport4_elem *elem)
+{
+ return elem->proto == 0;
+}
+
+static inline void
+hash_netport4_data_copy(struct hash_netport4_elem *dst,
+ const struct hash_netport4_elem *src)
+{
+ dst->ip = src->ip;
+ dst->port = src->port;
+ dst->proto = src->proto;
+ dst->cidr = src->cidr;
+}
+
+static inline void
+hash_netport4_data_netmask(struct hash_netport4_elem *elem, u8 cidr)
+{
+ elem->ip &= ip_set_netmask(cidr);
+ elem->cidr = cidr;
+}
+
+static inline void
+hash_netport4_data_zero_out(struct hash_netport4_elem *elem)
+{
+ elem->proto = 0;
+}
+
+static bool
+hash_netport4_data_list(struct sk_buff *skb,
+ const struct hash_netport4_elem *data)
+{
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_netport4_data_tlist(struct sk_buff *skb,
+ const struct hash_netport4_elem *data)
+{
+ const struct hash_netport4_telem *tdata =
+ (const struct hash_netport4_telem *)data;
+
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(tdata->timeout)));
+
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#define IP_SET_HASH_WITH_PROTO
+#define IP_SET_HASH_WITH_NETS
+
+#define PF 4
+#define HOST_MASK 32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_netport4_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_netport4_elem data = {
+ .cidr = h->nets[0].cidr || HOST_MASK };
+
+ if (data.cidr == 0)
+ return -EINVAL;
+ if (adt == IPSET_TEST)
+ data.cidr = HOST_MASK;
+
+ if (!ip_set_get_ip4_port(skb, flags & IPSET_DIM_TWO_SRC,
+ &data.port, &data.proto))
+ return -EINVAL;
+
+ ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
+ data.ip &= ip_set_netmask(data.cidr);
+
+ return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_netport4_elem data = { .cidr = HOST_MASK };
+ u32 port, port_to;
+ u32 timeout = h->timeout;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_CIDR])
+ data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+ if (!data.cidr)
+ return -IPSET_ERR_INVALID_CIDR;
+ data.ip &= ip_set_netmask(data.cidr);
+
+ if (tb[IPSET_ATTR_PORT])
+ data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+ else
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_PROTO]) {
+ data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+ if (data.proto == 0)
+ return -IPSET_ERR_INVALID_PROTO;
+ } else
+ return -IPSET_ERR_MISSING_PROTO;
+
+ switch (data.proto) {
+ case IPPROTO_UDP:
+ case IPPROTO_TCP:
+ case IPPROTO_ICMP:
+ break;
+ default:
+ data.port = 0;
+ break;
+ }
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ if (adt == IPSET_TEST ||
+ !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+ !tb[IPSET_ATTR_PORT_TO]) {
+ ret = adtfn(set, &data, timeout);
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+ }
+
+ port = ntohs(data.port);
+ port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+ if (port > port_to)
+ swap(port, port_to);
+
+ for (; port <= port_to; port++) {
+ data.port = htons(port);
+ ret = adtfn(set, &data, timeout);
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+static bool
+hash_netport_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+ const struct ip_set_hash *x = a->data;
+ const struct ip_set_hash *y = b->data;
+
+ /* Resizing changes htable_bits, so we ignore it */
+ return x->maxelem == y->maxelem &&
+ x->timeout == y->timeout;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_netport6_elem {
+ union nf_inet_addr ip;
+ __be16 port;
+ u8 proto;
+ u8 cidr;
+};
+
+struct hash_netport6_telem {
+ union nf_inet_addr ip;
+ __be16 port;
+ u8 proto;
+ u8 cidr;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_netport6_data_equal(const struct hash_netport6_elem *ip1,
+ const struct hash_netport6_elem *ip2)
+{
+ return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
+ ip1->port == ip2->port &&
+ ip1->proto == ip2->proto &&
+ ip1->cidr == ip2->cidr;
+}
+
+static inline bool
+hash_netport6_data_isnull(const struct hash_netport6_elem *elem)
+{
+ return elem->proto == 0;
+}
+
+static inline void
+hash_netport6_data_copy(struct hash_netport6_elem *dst,
+ const struct hash_netport6_elem *src)
+{
+ memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_netport6_data_zero_out(struct hash_netport6_elem *elem)
+{
+ elem->proto = 0;
+}
+
+static inline void
+ip6_netmask(union nf_inet_addr *ip, u8 prefix)
+{
+ ip->ip6[0] &= ip_set_netmask6(prefix)[0];
+ ip->ip6[1] &= ip_set_netmask6(prefix)[1];
+ ip->ip6[2] &= ip_set_netmask6(prefix)[2];
+ ip->ip6[3] &= ip_set_netmask6(prefix)[3];
+}
+
+static inline void
+hash_netport6_data_netmask(struct hash_netport6_elem *elem, u8 cidr)
+{
+ ip6_netmask(&elem->ip, cidr);
+ elem->cidr = cidr;
+}
+
+static bool
+hash_netport6_data_list(struct sk_buff *skb,
+ const struct hash_netport6_elem *data)
+{
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_netport6_data_tlist(struct sk_buff *skb,
+ const struct hash_netport6_elem *data)
+{
+ const struct hash_netport6_telem *e =
+ (const struct hash_netport6_telem *)data;
+
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(e->timeout)));
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF 6
+#define HOST_MASK 128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_netport6_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_netport6_elem data = {
+ .cidr = h->nets[0].cidr || HOST_MASK };
+
+ if (data.cidr == 0)
+ return -EINVAL;
+ if (adt == IPSET_TEST)
+ data.cidr = HOST_MASK;
+
+ if (!ip_set_get_ip6_port(skb, flags & IPSET_DIM_TWO_SRC,
+ &data.port, &data.proto))
+ return -EINVAL;
+
+ ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+ ip6_netmask(&data.ip, data.cidr);
+
+ return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_netport6_elem data = { .cidr = HOST_MASK };
+ u32 port, port_to;
+ u32 timeout = h->timeout;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_CIDR])
+ data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+ if (!data.cidr)
+ return -IPSET_ERR_INVALID_CIDR;
+ ip6_netmask(&data.ip, data.cidr);
+
+ if (tb[IPSET_ATTR_PORT])
+ data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+ else
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_PROTO]) {
+ data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+ if (data.proto == 0)
+ return -IPSET_ERR_INVALID_PROTO;
+ } else
+ return -IPSET_ERR_MISSING_PROTO;
+
+ switch (data.proto) {
+ case IPPROTO_UDP:
+ case IPPROTO_TCP:
+ case IPPROTO_ICMPV6:
+ break;
+ default:
+ data.port = 0;
+ break;
+ }
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ if (adt == IPSET_TEST ||
+ !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+ !tb[IPSET_ATTR_PORT_TO]) {
+ ret = adtfn(set, &data, timeout);
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+ }
+
+ port = ntohs(data.port);
+ port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+ if (port > port_to)
+ swap(port, port_to);
+
+ for (; port <= port_to; port++) {
+ data.port = htons(port);
+ ret = adtfn(set, &data, timeout);
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+ struct ip_set_hash *h;
+ u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+ u8 hbits;
+
+ if (!(set->family == AF_INET || set->family == AF_INET6))
+ return -IPSET_ERR_INVALID_FAMILY;
+
+ if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_HASHSIZE]) {
+ hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+ if (hashsize < IPSET_MIMINAL_HASHSIZE)
+ hashsize = IPSET_MIMINAL_HASHSIZE;
+ }
+
+ if (tb[IPSET_ATTR_MAXELEM])
+ maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+ h = kzalloc(sizeof(*h)
+ + sizeof(struct ip_set_hash_nets)
+ * (set->family == AF_INET ? 32 : 128), GFP_KERNEL);
+ if (!h)
+ return -ENOMEM;
+
+ h->maxelem = maxelem;
+ get_random_bytes(&h->initval, sizeof(h->initval));
+ h->timeout = IPSET_NO_TIMEOUT;
+
+ hbits = htable_bits(hashsize);
+ h->table = ip_set_alloc(
+ sizeof(struct htable)
+ + jhash_size(hbits) * sizeof(struct hbucket));
+ if (!h->table) {
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->table->htable_bits = hbits;
+
+ set->data = h;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+ set->variant = set->family == AF_INET
+ ? &hash_netport4_tvariant : &hash_netport6_tvariant;
+
+ if (set->family == AF_INET)
+ hash_netport4_gc_init(set);
+ else
+ hash_netport6_gc_init(set);
+ } else {
+ set->variant = set->family == AF_INET
+ ? &hash_netport4_variant : &hash_netport6_variant;
+ }
+
+ pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+ set->name, jhash_size(h->table->htable_bits),
+ h->table->htable_bits, h->maxelem, set->data, h->table);
+
+ return 0;
+}
+
+static struct ip_set_type hash_netport_type __read_mostly = {
+ .name = "hash:net,port",
+ .protocol = IPSET_PROTOCOL,
+ .features = IPSET_TYPE_IP | IPSET_TYPE_PORT,
+ .dimension = IPSET_DIM_TWO,
+ .family = AF_UNSPEC,
+ .revision = 0,
+ .create = hash_netport_create,
+ .create_policy = {
+ [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
+ [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
+ [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
+ [IPSET_ATTR_PROTO] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ },
+ .adt_policy = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_PORT] = { .type = NLA_U16 },
+ [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 },
+ [IPSET_ATTR_PROTO] = { .type = NLA_U8 },
+ [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init
+hash_netport_init(void)
+{
+ return ip_set_type_register(&hash_netport_type);
+}
+
+static void __exit
+hash_netport_fini(void)
+{
+ ip_set_type_unregister(&hash_netport_type);
+}
+
+module_init(hash_netport_init);
+module_exit(hash_netport_fini);
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
new file mode 100644
index 000000000000..a47c32982f06
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -0,0 +1,584 @@
+/* Copyright (C) 2008-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the list:set type */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_list.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("list:set type of IP sets");
+MODULE_ALIAS("ip_set_list:set");
+
+/* Member elements without and with timeout */
+struct set_elem {
+ ip_set_id_t id;
+};
+
+struct set_telem {
+ ip_set_id_t id;
+ unsigned long timeout;
+};
+
+/* Type structure */
+struct list_set {
+ size_t dsize; /* element size */
+ u32 size; /* size of set list array */
+ u32 timeout; /* timeout value */
+ struct timer_list gc; /* garbage collection */
+ struct set_elem members[0]; /* the set members */
+};
+
+static inline struct set_elem *
+list_set_elem(const struct list_set *map, u32 id)
+{
+ return (struct set_elem *)((char *)map->members + id * map->dsize);
+}
+
+static inline bool
+list_set_timeout(const struct list_set *map, u32 id)
+{
+ const struct set_telem *elem =
+ (const struct set_telem *) list_set_elem(map, id);
+
+ return ip_set_timeout_test(elem->timeout);
+}
+
+static inline bool
+list_set_expired(const struct list_set *map, u32 id)
+{
+ const struct set_telem *elem =
+ (const struct set_telem *) list_set_elem(map, id);
+
+ return ip_set_timeout_expired(elem->timeout);
+}
+
+static inline int
+list_set_exist(const struct set_telem *elem)
+{
+ return elem->id != IPSET_INVALID_ID &&
+ !ip_set_timeout_expired(elem->timeout);
+}
+
+/* Set list without and with timeout */
+
+static int
+list_set_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ struct list_set *map = set->data;
+ struct set_elem *elem;
+ u32 i;
+ int ret;
+
+ for (i = 0; i < map->size; i++) {
+ elem = list_set_elem(map, i);
+ if (elem->id == IPSET_INVALID_ID)
+ return 0;
+ if (with_timeout(map->timeout) && list_set_expired(map, i))
+ continue;
+ switch (adt) {
+ case IPSET_TEST:
+ ret = ip_set_test(elem->id, skb, pf, dim, flags);
+ if (ret > 0)
+ return ret;
+ break;
+ case IPSET_ADD:
+ ret = ip_set_add(elem->id, skb, pf, dim, flags);
+ if (ret == 0)
+ return ret;
+ break;
+ case IPSET_DEL:
+ ret = ip_set_del(elem->id, skb, pf, dim, flags);
+ if (ret == 0)
+ return ret;
+ break;
+ default:
+ break;
+ }
+ }
+ return -EINVAL;
+}
+
+static bool
+next_id_eq(const struct list_set *map, u32 i, ip_set_id_t id)
+{
+ const struct set_elem *elem;
+
+ if (i + 1 < map->size) {
+ elem = list_set_elem(map, i + 1);
+ return !!(elem->id == id &&
+ !(with_timeout(map->timeout) &&
+ list_set_expired(map, i + 1)));
+ }
+
+ return 0;
+}
+
+static void
+list_elem_add(struct list_set *map, u32 i, ip_set_id_t id)
+{
+ struct set_elem *e;
+
+ for (; i < map->size; i++) {
+ e = list_set_elem(map, i);
+ swap(e->id, id);
+ if (e->id == IPSET_INVALID_ID)
+ break;
+ }
+}
+
+static void
+list_elem_tadd(struct list_set *map, u32 i, ip_set_id_t id,
+ unsigned long timeout)
+{
+ struct set_telem *e;
+
+ for (; i < map->size; i++) {
+ e = (struct set_telem *)list_set_elem(map, i);
+ swap(e->id, id);
+ if (e->id == IPSET_INVALID_ID)
+ break;
+ swap(e->timeout, timeout);
+ }
+}
+
+static int
+list_set_add(struct list_set *map, u32 i, ip_set_id_t id,
+ unsigned long timeout)
+{
+ const struct set_elem *e = list_set_elem(map, i);
+
+ if (i == map->size - 1 && e->id != IPSET_INVALID_ID)
+ /* Last element replaced: e.g. add new,before,last */
+ ip_set_put_byindex(e->id);
+ if (with_timeout(map->timeout))
+ list_elem_tadd(map, i, id, timeout);
+ else
+ list_elem_add(map, i, id);
+
+ return 0;
+}
+
+static int
+list_set_del(struct list_set *map, ip_set_id_t id, u32 i)
+{
+ struct set_elem *a = list_set_elem(map, i), *b;
+
+ ip_set_put_byindex(id);
+
+ for (; i < map->size - 1; i++) {
+ b = list_set_elem(map, i + 1);
+ a->id = b->id;
+ if (with_timeout(map->timeout))
+ ((struct set_telem *)a)->timeout =
+ ((struct set_telem *)b)->timeout;
+ a = b;
+ if (a->id == IPSET_INVALID_ID)
+ break;
+ }
+ /* Last element */
+ a->id = IPSET_INVALID_ID;
+ return 0;
+}
+
+static int
+list_set_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ struct list_set *map = set->data;
+ bool with_timeout = with_timeout(map->timeout);
+ int before = 0;
+ u32 timeout = map->timeout;
+ ip_set_id_t id, refid = IPSET_INVALID_ID;
+ const struct set_elem *elem;
+ struct ip_set *s;
+ u32 i;
+ int ret = 0;
+
+ if (unlikely(!tb[IPSET_ATTR_NAME] ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ id = ip_set_get_byname(nla_data(tb[IPSET_ATTR_NAME]), &s);
+ if (id == IPSET_INVALID_ID)
+ return -IPSET_ERR_NAME;
+ /* "Loop detection" */
+ if (s->type->features & IPSET_TYPE_NAME) {
+ ret = -IPSET_ERR_LOOP;
+ goto finish;
+ }
+
+ if (tb[IPSET_ATTR_CADT_FLAGS]) {
+ u32 f = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+ before = f & IPSET_FLAG_BEFORE;
+ }
+
+ if (before && !tb[IPSET_ATTR_NAMEREF]) {
+ ret = -IPSET_ERR_BEFORE;
+ goto finish;
+ }
+
+ if (tb[IPSET_ATTR_NAMEREF]) {
+ refid = ip_set_get_byname(nla_data(tb[IPSET_ATTR_NAMEREF]),
+ &s);
+ if (refid == IPSET_INVALID_ID) {
+ ret = -IPSET_ERR_NAMEREF;
+ goto finish;
+ }
+ if (!before)
+ before = -1;
+ }
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout) {
+ ret = -IPSET_ERR_TIMEOUT;
+ goto finish;
+ }
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ switch (adt) {
+ case IPSET_TEST:
+ for (i = 0; i < map->size && !ret; i++) {
+ elem = list_set_elem(map, i);
+ if (elem->id == IPSET_INVALID_ID ||
+ (before != 0 && i + 1 >= map->size))
+ break;
+ else if (with_timeout && list_set_expired(map, i))
+ continue;
+ else if (before > 0 && elem->id == id)
+ ret = next_id_eq(map, i, refid);
+ else if (before < 0 && elem->id == refid)
+ ret = next_id_eq(map, i, id);
+ else if (before == 0 && elem->id == id)
+ ret = 1;
+ }
+ break;
+ case IPSET_ADD:
+ for (i = 0; i < map->size && !ret; i++) {
+ elem = list_set_elem(map, i);
+ if (elem->id == id &&
+ !(with_timeout && list_set_expired(map, i)))
+ ret = -IPSET_ERR_EXIST;
+ }
+ if (ret == -IPSET_ERR_EXIST)
+ break;
+ ret = -IPSET_ERR_LIST_FULL;
+ for (i = 0; i < map->size && ret == -IPSET_ERR_LIST_FULL; i++) {
+ elem = list_set_elem(map, i);
+ if (elem->id == IPSET_INVALID_ID)
+ ret = before != 0 ? -IPSET_ERR_REF_EXIST
+ : list_set_add(map, i, id, timeout);
+ else if (elem->id != refid)
+ continue;
+ else if (with_timeout && list_set_expired(map, i))
+ ret = -IPSET_ERR_REF_EXIST;
+ else if (before)
+ ret = list_set_add(map, i, id, timeout);
+ else if (i + 1 < map->size)
+ ret = list_set_add(map, i + 1, id, timeout);
+ }
+ break;
+ case IPSET_DEL:
+ ret = -IPSET_ERR_EXIST;
+ for (i = 0; i < map->size && ret == -IPSET_ERR_EXIST; i++) {
+ elem = list_set_elem(map, i);
+ if (elem->id == IPSET_INVALID_ID) {
+ ret = before != 0 ? -IPSET_ERR_REF_EXIST
+ : -IPSET_ERR_EXIST;
+ break;
+ } else if (with_timeout && list_set_expired(map, i))
+ continue;
+ else if (elem->id == id &&
+ (before == 0 ||
+ (before > 0 &&
+ next_id_eq(map, i, refid))))
+ ret = list_set_del(map, id, i);
+ else if (before < 0 &&
+ elem->id == refid &&
+ next_id_eq(map, i, id))
+ ret = list_set_del(map, id, i + 1);
+ }
+ break;
+ default:
+ break;
+ }
+
+finish:
+ if (refid != IPSET_INVALID_ID)
+ ip_set_put_byindex(refid);
+ if (adt != IPSET_ADD || ret)
+ ip_set_put_byindex(id);
+
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+static void
+list_set_flush(struct ip_set *set)
+{
+ struct list_set *map = set->data;
+ struct set_elem *elem;
+ u32 i;
+
+ for (i = 0; i < map->size; i++) {
+ elem = list_set_elem(map, i);
+ if (elem->id != IPSET_INVALID_ID) {
+ ip_set_put_byindex(elem->id);
+ elem->id = IPSET_INVALID_ID;
+ }
+ }
+}
+
+static void
+list_set_destroy(struct ip_set *set)
+{
+ struct list_set *map = set->data;
+
+ if (with_timeout(map->timeout))
+ del_timer_sync(&map->gc);
+ list_set_flush(set);
+ kfree(map);
+
+ set->data = NULL;
+}
+
+static int
+list_set_head(struct ip_set *set, struct sk_buff *skb)
+{
+ const struct list_set *map = set->data;
+ struct nlattr *nested;
+
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested)
+ goto nla_put_failure;
+ NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size));
+ if (with_timeout(map->timeout))
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+ NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
+ htonl(atomic_read(&set->ref) - 1));
+ NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
+ htonl(sizeof(*map) + map->size * map->dsize));
+ ipset_nest_end(skb, nested);
+
+ return 0;
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int
+list_set_list(const struct ip_set *set,
+ struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const struct list_set *map = set->data;
+ struct nlattr *atd, *nested;
+ u32 i, first = cb->args[2];
+ const struct set_elem *e;
+
+ atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+ if (!atd)
+ return -EMSGSIZE;
+ for (; cb->args[2] < map->size; cb->args[2]++) {
+ i = cb->args[2];
+ e = list_set_elem(map, i);
+ if (e->id == IPSET_INVALID_ID)
+ goto finish;
+ if (with_timeout(map->timeout) && list_set_expired(map, i))
+ continue;
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested) {
+ if (i == first) {
+ nla_nest_cancel(skb, atd);
+ return -EMSGSIZE;
+ } else
+ goto nla_put_failure;
+ }
+ NLA_PUT_STRING(skb, IPSET_ATTR_NAME,
+ ip_set_name_byindex(e->id));
+ if (with_timeout(map->timeout)) {
+ const struct set_telem *te =
+ (const struct set_telem *) e;
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(te->timeout)));
+ }
+ ipset_nest_end(skb, nested);
+ }
+finish:
+ ipset_nest_end(skb, atd);
+ /* Set listing finished */
+ cb->args[2] = 0;
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nested);
+ ipset_nest_end(skb, atd);
+ if (unlikely(i == first)) {
+ cb->args[2] = 0;
+ return -EMSGSIZE;
+ }
+ return 0;
+}
+
+static bool
+list_set_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+ const struct list_set *x = a->data;
+ const struct list_set *y = b->data;
+
+ return x->size == y->size &&
+ x->timeout == y->timeout;
+}
+
+static const struct ip_set_type_variant list_set = {
+ .kadt = list_set_kadt,
+ .uadt = list_set_uadt,
+ .destroy = list_set_destroy,
+ .flush = list_set_flush,
+ .head = list_set_head,
+ .list = list_set_list,
+ .same_set = list_set_same_set,
+};
+
+static void
+list_set_gc(unsigned long ul_set)
+{
+ struct ip_set *set = (struct ip_set *) ul_set;
+ struct list_set *map = set->data;
+ struct set_telem *e;
+ u32 i;
+
+ /* We run parallel with other readers (test element)
+ * but adding/deleting new entries is locked out */
+ read_lock_bh(&set->lock);
+ for (i = map->size - 1; i >= 0; i--) {
+ e = (struct set_telem *) list_set_elem(map, i);
+ if (e->id != IPSET_INVALID_ID &&
+ list_set_expired(map, i))
+ list_set_del(map, e->id, i);
+ }
+ read_unlock_bh(&set->lock);
+
+ map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+ add_timer(&map->gc);
+}
+
+static void
+list_set_gc_init(struct ip_set *set)
+{
+ struct list_set *map = set->data;
+
+ init_timer(&map->gc);
+ map->gc.data = (unsigned long) set;
+ map->gc.function = list_set_gc;
+ map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+ add_timer(&map->gc);
+}
+
+/* Create list:set type of sets */
+
+static bool
+init_list_set(struct ip_set *set, u32 size, size_t dsize,
+ unsigned long timeout)
+{
+ struct list_set *map;
+ struct set_elem *e;
+ u32 i;
+
+ map = kzalloc(sizeof(*map) + size * dsize, GFP_KERNEL);
+ if (!map)
+ return false;
+
+ map->size = size;
+ map->dsize = dsize;
+ map->timeout = timeout;
+ set->data = map;
+
+ for (i = 0; i < size; i++) {
+ e = list_set_elem(map, i);
+ e->id = IPSET_INVALID_ID;
+ }
+
+ return true;
+}
+
+static int
+list_set_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+ u32 size = IP_SET_LIST_DEFAULT_SIZE;
+
+ if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_SIZE) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_SIZE])
+ size = ip_set_get_h32(tb[IPSET_ATTR_SIZE]);
+ if (size < IP_SET_LIST_MIN_SIZE)
+ size = IP_SET_LIST_MIN_SIZE;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!init_list_set(set, size, sizeof(struct set_telem),
+ ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT])))
+ return -ENOMEM;
+
+ list_set_gc_init(set);
+ } else {
+ if (!init_list_set(set, size, sizeof(struct set_elem),
+ IPSET_NO_TIMEOUT))
+ return -ENOMEM;
+ }
+ set->variant = &list_set;
+ return 0;
+}
+
+static struct ip_set_type list_set_type __read_mostly = {
+ .name = "list:set",
+ .protocol = IPSET_PROTOCOL,
+ .features = IPSET_TYPE_NAME | IPSET_DUMP_LAST,
+ .dimension = IPSET_DIM_ONE,
+ .family = AF_UNSPEC,
+ .revision = 0,
+ .create = list_set_create,
+ .create_policy = {
+ [IPSET_ATTR_SIZE] = { .type = NLA_U32 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ },
+ .adt_policy = {
+ [IPSET_ATTR_NAME] = { .type = NLA_STRING,
+ .len = IPSET_MAXNAMELEN },
+ [IPSET_ATTR_NAMEREF] = { .type = NLA_STRING,
+ .len = IPSET_MAXNAMELEN },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init
+list_set_init(void)
+{
+ return ip_set_type_register(&list_set_type);
+}
+
+static void __exit
+list_set_fini(void)
+{
+ ip_set_type_unregister(&list_set_type);
+}
+
+module_init(list_set_init);
+module_exit(list_set_fini);
diff --git a/net/netfilter/ipset/pfxlen.c b/net/netfilter/ipset/pfxlen.c
new file mode 100644
index 000000000000..23f8c8162214
--- /dev/null
+++ b/net/netfilter/ipset/pfxlen.c
@@ -0,0 +1,291 @@
+#include <linux/netfilter/ipset/pfxlen.h>
+
+/*
+ * Prefixlen maps for fast conversions, by Jan Engelhardt.
+ */
+
+#define E(a, b, c, d) \
+ {.ip6 = { \
+ __constant_htonl(a), __constant_htonl(b), \
+ __constant_htonl(c), __constant_htonl(d), \
+ } }
+
+/*
+ * This table works for both IPv4 and IPv6;
+ * just use prefixlen_netmask_map[prefixlength].ip.
+ */
+const union nf_inet_addr ip_set_netmask_map[] = {
+ E(0x00000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0x80000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xC0000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xE0000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xF0000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xF8000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFC000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFE000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFF000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFF800000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFC00000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFE00000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFF00000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFF80000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFC0000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFE0000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFF8000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFC000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFE000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFF000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFF800, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFC00, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFE00, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFF00, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFF80, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFC0, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFE0, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFF0, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFF8, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFC, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFE, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0x80000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xC0000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xE0000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xF0000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xF8000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFC000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFE000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFF000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFF800000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFC00000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFE00000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFF00000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFF80000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFC0000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFE0000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFF0000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFF8000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFC000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFE000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFF000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFF800, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFC00, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFE00, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFF00, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFF80, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFC0, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFE0, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFF0, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFF8, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFC, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFE, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0x80000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x80000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF),
+};
+EXPORT_SYMBOL_GPL(ip_set_netmask_map);
+
+#undef E
+#define E(a, b, c, d) \
+ {.ip6 = { (__force __be32) a, (__force __be32) b, \
+ (__force __be32) c, (__force __be32) d, \
+ } }
+
+/*
+ * This table works for both IPv4 and IPv6;
+ * just use prefixlen_hostmask_map[prefixlength].ip.
+ */
+const union nf_inet_addr ip_set_hostmask_map[] = {
+ E(0x00000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0x80000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xC0000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xE0000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xF0000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xF8000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFC000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFE000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFF000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFF800000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFC00000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFE00000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFF00000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFF80000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFC0000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFE0000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFF8000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFC000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFE000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFF000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFF800, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFC00, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFE00, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFF00, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFF80, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFC0, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFE0, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFF0, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFF8, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFC, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFE, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0x80000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xC0000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xE0000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xF0000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xF8000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFC000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFE000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFF000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFF800000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFC00000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFE00000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFF00000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFF80000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFC0000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFE0000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFF0000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFF8000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFC000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFE000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFF000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFF800, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFC00, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFE00, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFF00, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFF80, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFC0, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFE0, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFF0, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFF8, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFC, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFE, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0x80000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x80000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF),
+};
+EXPORT_SYMBOL_GPL(ip_set_hostmask_map);
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index a475edee0912..5c48ffb60c28 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -43,11 +43,6 @@ EXPORT_SYMBOL(register_ip_vs_app);
EXPORT_SYMBOL(unregister_ip_vs_app);
EXPORT_SYMBOL(register_ip_vs_app_inc);
-/* ipvs application list head */
-static LIST_HEAD(ip_vs_app_list);
-static DEFINE_MUTEX(__ip_vs_app_mutex);
-
-
/*
* Get an ip_vs_app object
*/
@@ -67,7 +62,8 @@ static inline void ip_vs_app_put(struct ip_vs_app *app)
* Allocate/initialize app incarnation and register it in proto apps.
*/
static int
-ip_vs_app_inc_new(struct ip_vs_app *app, __u16 proto, __u16 port)
+ip_vs_app_inc_new(struct net *net, struct ip_vs_app *app, __u16 proto,
+ __u16 port)
{
struct ip_vs_protocol *pp;
struct ip_vs_app *inc;
@@ -98,7 +94,7 @@ ip_vs_app_inc_new(struct ip_vs_app *app, __u16 proto, __u16 port)
}
}
- ret = pp->register_app(inc);
+ ret = pp->register_app(net, inc);
if (ret)
goto out;
@@ -119,7 +115,7 @@ ip_vs_app_inc_new(struct ip_vs_app *app, __u16 proto, __u16 port)
* Release app incarnation
*/
static void
-ip_vs_app_inc_release(struct ip_vs_app *inc)
+ip_vs_app_inc_release(struct net *net, struct ip_vs_app *inc)
{
struct ip_vs_protocol *pp;
@@ -127,7 +123,7 @@ ip_vs_app_inc_release(struct ip_vs_app *inc)
return;
if (pp->unregister_app)
- pp->unregister_app(inc);
+ pp->unregister_app(net, inc);
IP_VS_DBG(9, "%s App %s:%u unregistered\n",
pp->name, inc->name, ntohs(inc->port));
@@ -168,15 +164,17 @@ void ip_vs_app_inc_put(struct ip_vs_app *inc)
* Register an application incarnation in protocol applications
*/
int
-register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port)
+register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app, __u16 proto,
+ __u16 port)
{
+ struct netns_ipvs *ipvs = net_ipvs(net);
int result;
- mutex_lock(&__ip_vs_app_mutex);
+ mutex_lock(&ipvs->app_mutex);
- result = ip_vs_app_inc_new(app, proto, port);
+ result = ip_vs_app_inc_new(net, app, proto, port);
- mutex_unlock(&__ip_vs_app_mutex);
+ mutex_unlock(&ipvs->app_mutex);
return result;
}
@@ -185,16 +183,17 @@ register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port)
/*
* ip_vs_app registration routine
*/
-int register_ip_vs_app(struct ip_vs_app *app)
+int register_ip_vs_app(struct net *net, struct ip_vs_app *app)
{
+ struct netns_ipvs *ipvs = net_ipvs(net);
/* increase the module use count */
ip_vs_use_count_inc();
- mutex_lock(&__ip_vs_app_mutex);
+ mutex_lock(&ipvs->app_mutex);
- list_add(&app->a_list, &ip_vs_app_list);
+ list_add(&app->a_list, &ipvs->app_list);
- mutex_unlock(&__ip_vs_app_mutex);
+ mutex_unlock(&ipvs->app_mutex);
return 0;
}
@@ -204,19 +203,20 @@ int register_ip_vs_app(struct ip_vs_app *app)
* ip_vs_app unregistration routine
* We are sure there are no app incarnations attached to services
*/
-void unregister_ip_vs_app(struct ip_vs_app *app)
+void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app)
{
+ struct netns_ipvs *ipvs = net_ipvs(net);
struct ip_vs_app *inc, *nxt;
- mutex_lock(&__ip_vs_app_mutex);
+ mutex_lock(&ipvs->app_mutex);
list_for_each_entry_safe(inc, nxt, &app->incs_list, a_list) {
- ip_vs_app_inc_release(inc);
+ ip_vs_app_inc_release(net, inc);
}
list_del(&app->a_list);
- mutex_unlock(&__ip_vs_app_mutex);
+ mutex_unlock(&ipvs->app_mutex);
/* decrease the module use count */
ip_vs_use_count_dec();
@@ -226,7 +226,8 @@ void unregister_ip_vs_app(struct ip_vs_app *app)
/*
* Bind ip_vs_conn to its ip_vs_app (called by cp constructor)
*/
-int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp)
+int ip_vs_bind_app(struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp)
{
return pp->app_conn_bind(cp);
}
@@ -481,11 +482,11 @@ int ip_vs_app_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb)
* /proc/net/ip_vs_app entry function
*/
-static struct ip_vs_app *ip_vs_app_idx(loff_t pos)
+static struct ip_vs_app *ip_vs_app_idx(struct netns_ipvs *ipvs, loff_t pos)
{
struct ip_vs_app *app, *inc;
- list_for_each_entry(app, &ip_vs_app_list, a_list) {
+ list_for_each_entry(app, &ipvs->app_list, a_list) {
list_for_each_entry(inc, &app->incs_list, a_list) {
if (pos-- == 0)
return inc;
@@ -497,19 +498,24 @@ static struct ip_vs_app *ip_vs_app_idx(loff_t pos)
static void *ip_vs_app_seq_start(struct seq_file *seq, loff_t *pos)
{
- mutex_lock(&__ip_vs_app_mutex);
+ struct net *net = seq_file_net(seq);
+ struct netns_ipvs *ipvs = net_ipvs(net);
- return *pos ? ip_vs_app_idx(*pos - 1) : SEQ_START_TOKEN;
+ mutex_lock(&ipvs->app_mutex);
+
+ return *pos ? ip_vs_app_idx(ipvs, *pos - 1) : SEQ_START_TOKEN;
}
static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct ip_vs_app *inc, *app;
struct list_head *e;
+ struct net *net = seq_file_net(seq);
+ struct netns_ipvs *ipvs = net_ipvs(net);
++*pos;
if (v == SEQ_START_TOKEN)
- return ip_vs_app_idx(0);
+ return ip_vs_app_idx(ipvs, 0);
inc = v;
app = inc->app;
@@ -518,7 +524,7 @@ static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos)
return list_entry(e, struct ip_vs_app, a_list);
/* go on to next application */
- for (e = app->a_list.next; e != &ip_vs_app_list; e = e->next) {
+ for (e = app->a_list.next; e != &ipvs->app_list; e = e->next) {
app = list_entry(e, struct ip_vs_app, a_list);
list_for_each_entry(inc, &app->incs_list, a_list) {
return inc;
@@ -529,7 +535,9 @@ static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos)
static void ip_vs_app_seq_stop(struct seq_file *seq, void *v)
{
- mutex_unlock(&__ip_vs_app_mutex);
+ struct netns_ipvs *ipvs = net_ipvs(seq_file_net(seq));
+
+ mutex_unlock(&ipvs->app_mutex);
}
static int ip_vs_app_seq_show(struct seq_file *seq, void *v)
@@ -557,7 +565,8 @@ static const struct seq_operations ip_vs_app_seq_ops = {
static int ip_vs_app_open(struct inode *inode, struct file *file)
{
- return seq_open(file, &ip_vs_app_seq_ops);
+ return seq_open_net(inode, file, &ip_vs_app_seq_ops,
+ sizeof(struct seq_net_private));
}
static const struct file_operations ip_vs_app_fops = {
@@ -569,15 +578,36 @@ static const struct file_operations ip_vs_app_fops = {
};
#endif
-int __init ip_vs_app_init(void)
+static int __net_init __ip_vs_app_init(struct net *net)
{
- /* we will replace it with proc_net_ipvs_create() soon */
- proc_net_fops_create(&init_net, "ip_vs_app", 0, &ip_vs_app_fops);
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ INIT_LIST_HEAD(&ipvs->app_list);
+ __mutex_init(&ipvs->app_mutex, "ipvs->app_mutex", &ipvs->app_key);
+ proc_net_fops_create(net, "ip_vs_app", 0, &ip_vs_app_fops);
return 0;
}
+static void __net_exit __ip_vs_app_cleanup(struct net *net)
+{
+ proc_net_remove(net, "ip_vs_app");
+}
+
+static struct pernet_operations ip_vs_app_ops = {
+ .init = __ip_vs_app_init,
+ .exit = __ip_vs_app_cleanup,
+};
+
+int __init ip_vs_app_init(void)
+{
+ int rv;
+
+ rv = register_pernet_subsys(&ip_vs_app_ops);
+ return rv;
+}
+
void ip_vs_app_cleanup(void)
{
- proc_net_remove(&init_net, "ip_vs_app");
+ unregister_pernet_subsys(&ip_vs_app_ops);
}
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index e9adecdc8ca4..9c2a517b69c8 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -48,35 +48,32 @@
/*
* Connection hash size. Default is what was selected at compile time.
*/
-int ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS;
+static int ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS;
module_param_named(conn_tab_bits, ip_vs_conn_tab_bits, int, 0444);
MODULE_PARM_DESC(conn_tab_bits, "Set connections' hash size");
/* size and mask values */
-int ip_vs_conn_tab_size;
-int ip_vs_conn_tab_mask;
+int ip_vs_conn_tab_size __read_mostly;
+static int ip_vs_conn_tab_mask __read_mostly;
/*
* Connection hash table: for input and output packets lookups of IPVS
*/
-static struct list_head *ip_vs_conn_tab;
+static struct hlist_head *ip_vs_conn_tab __read_mostly;
/* SLAB cache for IPVS connections */
static struct kmem_cache *ip_vs_conn_cachep __read_mostly;
-/* counter for current IPVS connections */
-static atomic_t ip_vs_conn_count = ATOMIC_INIT(0);
-
/* counter for no client port connections */
static atomic_t ip_vs_conn_no_cport_cnt = ATOMIC_INIT(0);
/* random value for IPVS connection hash */
-static unsigned int ip_vs_conn_rnd;
+static unsigned int ip_vs_conn_rnd __read_mostly;
/*
* Fine locking granularity for big connection hash table
*/
-#define CT_LOCKARRAY_BITS 4
+#define CT_LOCKARRAY_BITS 5
#define CT_LOCKARRAY_SIZE (1<<CT_LOCKARRAY_BITS)
#define CT_LOCKARRAY_MASK (CT_LOCKARRAY_SIZE-1)
@@ -133,19 +130,19 @@ static inline void ct_write_unlock_bh(unsigned key)
/*
* Returns hash value for IPVS connection entry
*/
-static unsigned int ip_vs_conn_hashkey(int af, unsigned proto,
+static unsigned int ip_vs_conn_hashkey(struct net *net, int af, unsigned proto,
const union nf_inet_addr *addr,
__be16 port)
{
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
- return jhash_3words(jhash(addr, 16, ip_vs_conn_rnd),
- (__force u32)port, proto, ip_vs_conn_rnd)
- & ip_vs_conn_tab_mask;
+ return (jhash_3words(jhash(addr, 16, ip_vs_conn_rnd),
+ (__force u32)port, proto, ip_vs_conn_rnd) ^
+ ((size_t)net>>8)) & ip_vs_conn_tab_mask;
#endif
- return jhash_3words((__force u32)addr->ip, (__force u32)port, proto,
- ip_vs_conn_rnd)
- & ip_vs_conn_tab_mask;
+ return (jhash_3words((__force u32)addr->ip, (__force u32)port, proto,
+ ip_vs_conn_rnd) ^
+ ((size_t)net>>8)) & ip_vs_conn_tab_mask;
}
static unsigned int ip_vs_conn_hashkey_param(const struct ip_vs_conn_param *p,
@@ -166,18 +163,18 @@ static unsigned int ip_vs_conn_hashkey_param(const struct ip_vs_conn_param *p,
port = p->vport;
}
- return ip_vs_conn_hashkey(p->af, p->protocol, addr, port);
+ return ip_vs_conn_hashkey(p->net, p->af, p->protocol, addr, port);
}
static unsigned int ip_vs_conn_hashkey_conn(const struct ip_vs_conn *cp)
{
struct ip_vs_conn_param p;
- ip_vs_conn_fill_param(cp->af, cp->protocol, &cp->caddr, cp->cport,
- NULL, 0, &p);
+ ip_vs_conn_fill_param(ip_vs_conn_net(cp), cp->af, cp->protocol,
+ &cp->caddr, cp->cport, NULL, 0, &p);
- if (cp->dest && cp->dest->svc->pe) {
- p.pe = cp->dest->svc->pe;
+ if (cp->pe) {
+ p.pe = cp->pe;
p.pe_data = cp->pe_data;
p.pe_data_len = cp->pe_data_len;
}
@@ -186,7 +183,7 @@ static unsigned int ip_vs_conn_hashkey_conn(const struct ip_vs_conn *cp)
}
/*
- * Hashes ip_vs_conn in ip_vs_conn_tab by proto,addr,port.
+ * Hashes ip_vs_conn in ip_vs_conn_tab by netns,proto,addr,port.
* returns bool success.
*/
static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
@@ -204,7 +201,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
spin_lock(&cp->lock);
if (!(cp->flags & IP_VS_CONN_F_HASHED)) {
- list_add(&cp->c_list, &ip_vs_conn_tab[hash]);
+ hlist_add_head(&cp->c_list, &ip_vs_conn_tab[hash]);
cp->flags |= IP_VS_CONN_F_HASHED;
atomic_inc(&cp->refcnt);
ret = 1;
@@ -237,7 +234,7 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
spin_lock(&cp->lock);
if (cp->flags & IP_VS_CONN_F_HASHED) {
- list_del(&cp->c_list);
+ hlist_del(&cp->c_list);
cp->flags &= ~IP_VS_CONN_F_HASHED;
atomic_dec(&cp->refcnt);
ret = 1;
@@ -262,18 +259,20 @@ __ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
{
unsigned hash;
struct ip_vs_conn *cp;
+ struct hlist_node *n;
hash = ip_vs_conn_hashkey_param(p, false);
ct_read_lock(hash);
- list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
+ hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
if (cp->af == p->af &&
+ p->cport == cp->cport && p->vport == cp->vport &&
ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) &&
ip_vs_addr_equal(p->af, p->vaddr, &cp->vaddr) &&
- p->cport == cp->cport && p->vport == cp->vport &&
((!p->cport) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) &&
- p->protocol == cp->protocol) {
+ p->protocol == cp->protocol &&
+ ip_vs_conn_net_eq(cp, p->net)) {
/* HIT */
atomic_inc(&cp->refcnt);
ct_read_unlock(hash);
@@ -313,23 +312,23 @@ ip_vs_conn_fill_param_proto(int af, const struct sk_buff *skb,
struct ip_vs_conn_param *p)
{
__be16 _ports[2], *pptr;
+ struct net *net = skb_net(skb);
pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
if (pptr == NULL)
return 1;
if (likely(!inverse))
- ip_vs_conn_fill_param(af, iph->protocol, &iph->saddr, pptr[0],
- &iph->daddr, pptr[1], p);
+ ip_vs_conn_fill_param(net, af, iph->protocol, &iph->saddr,
+ pptr[0], &iph->daddr, pptr[1], p);
else
- ip_vs_conn_fill_param(af, iph->protocol, &iph->daddr, pptr[1],
- &iph->saddr, pptr[0], p);
+ ip_vs_conn_fill_param(net, af, iph->protocol, &iph->daddr,
+ pptr[1], &iph->saddr, pptr[0], p);
return 0;
}
struct ip_vs_conn *
ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
- struct ip_vs_protocol *pp,
const struct ip_vs_iphdr *iph,
unsigned int proto_off, int inverse)
{
@@ -347,14 +346,17 @@ struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p)
{
unsigned hash;
struct ip_vs_conn *cp;
+ struct hlist_node *n;
hash = ip_vs_conn_hashkey_param(p, false);
ct_read_lock(hash);
- list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
+ hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
+ if (!ip_vs_conn_net_eq(cp, p->net))
+ continue;
if (p->pe_data && p->pe->ct_match) {
- if (p->pe->ct_match(p, cp))
+ if (p->pe == cp->pe && p->pe->ct_match(p, cp))
goto out;
continue;
}
@@ -394,6 +396,7 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
{
unsigned hash;
struct ip_vs_conn *cp, *ret=NULL;
+ struct hlist_node *n;
/*
* Check for "full" addressed entries
@@ -402,12 +405,13 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
ct_read_lock(hash);
- list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
+ hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
if (cp->af == p->af &&
+ p->vport == cp->cport && p->cport == cp->dport &&
ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) &&
ip_vs_addr_equal(p->af, p->caddr, &cp->daddr) &&
- p->vport == cp->cport && p->cport == cp->dport &&
- p->protocol == cp->protocol) {
+ p->protocol == cp->protocol &&
+ ip_vs_conn_net_eq(cp, p->net)) {
/* HIT */
atomic_inc(&cp->refcnt);
ret = cp;
@@ -428,7 +432,6 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
struct ip_vs_conn *
ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb,
- struct ip_vs_protocol *pp,
const struct ip_vs_iphdr *iph,
unsigned int proto_off, int inverse)
{
@@ -611,9 +614,9 @@ struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp)
struct ip_vs_dest *dest;
if ((cp) && (!cp->dest)) {
- dest = ip_vs_find_dest(cp->af, &cp->daddr, cp->dport,
- &cp->vaddr, cp->vport,
- cp->protocol);
+ dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, &cp->daddr,
+ cp->dport, &cp->vaddr, cp->vport,
+ cp->protocol, cp->fwmark);
ip_vs_bind_dest(cp, dest);
return dest;
} else
@@ -686,13 +689,14 @@ static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp)
int ip_vs_check_template(struct ip_vs_conn *ct)
{
struct ip_vs_dest *dest = ct->dest;
+ struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(ct));
/*
* Checking the dest server status.
*/
if ((dest == NULL) ||
!(dest->flags & IP_VS_DEST_F_AVAILABLE) ||
- (sysctl_ip_vs_expire_quiescent_template &&
+ (ipvs->sysctl_expire_quiescent_template &&
(atomic_read(&dest->weight) == 0))) {
IP_VS_DBG_BUF(9, "check_template: dest not available for "
"protocol %s s:%s:%d v:%s:%d "
@@ -730,6 +734,7 @@ int ip_vs_check_template(struct ip_vs_conn *ct)
static void ip_vs_conn_expire(unsigned long data)
{
struct ip_vs_conn *cp = (struct ip_vs_conn *)data;
+ struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
cp->timeout = 60*HZ;
@@ -765,13 +770,14 @@ static void ip_vs_conn_expire(unsigned long data)
if (cp->flags & IP_VS_CONN_F_NFCT)
ip_vs_conn_drop_conntrack(cp);
+ ip_vs_pe_put(cp->pe);
kfree(cp->pe_data);
if (unlikely(cp->app != NULL))
ip_vs_unbind_app(cp);
ip_vs_unbind_dest(cp);
if (cp->flags & IP_VS_CONN_F_NO_CPORT)
atomic_dec(&ip_vs_conn_no_cport_cnt);
- atomic_dec(&ip_vs_conn_count);
+ atomic_dec(&ipvs->conn_count);
kmem_cache_free(ip_vs_conn_cachep, cp);
return;
@@ -802,10 +808,12 @@ void ip_vs_conn_expire_now(struct ip_vs_conn *cp)
struct ip_vs_conn *
ip_vs_conn_new(const struct ip_vs_conn_param *p,
const union nf_inet_addr *daddr, __be16 dport, unsigned flags,
- struct ip_vs_dest *dest)
+ struct ip_vs_dest *dest, __u32 fwmark)
{
struct ip_vs_conn *cp;
- struct ip_vs_protocol *pp = ip_vs_proto_get(p->protocol);
+ struct netns_ipvs *ipvs = net_ipvs(p->net);
+ struct ip_vs_proto_data *pd = ip_vs_proto_data_get(p->net,
+ p->protocol);
cp = kmem_cache_zalloc(ip_vs_conn_cachep, GFP_ATOMIC);
if (cp == NULL) {
@@ -813,8 +821,9 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
return NULL;
}
- INIT_LIST_HEAD(&cp->c_list);
+ INIT_HLIST_NODE(&cp->c_list);
setup_timer(&cp->timer, ip_vs_conn_expire, (unsigned long)cp);
+ ip_vs_conn_net_set(cp, p->net);
cp->af = p->af;
cp->protocol = p->protocol;
ip_vs_addr_copy(p->af, &cp->caddr, p->caddr);
@@ -826,7 +835,10 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
&cp->daddr, daddr);
cp->dport = dport;
cp->flags = flags;
- if (flags & IP_VS_CONN_F_TEMPLATE && p->pe_data) {
+ cp->fwmark = fwmark;
+ if (flags & IP_VS_CONN_F_TEMPLATE && p->pe) {
+ ip_vs_pe_get(p->pe);
+ cp->pe = p->pe;
cp->pe_data = p->pe_data;
cp->pe_data_len = p->pe_data_len;
}
@@ -842,7 +854,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
atomic_set(&cp->n_control, 0);
atomic_set(&cp->in_pkts, 0);
- atomic_inc(&ip_vs_conn_count);
+ atomic_inc(&ipvs->conn_count);
if (flags & IP_VS_CONN_F_NO_CPORT)
atomic_inc(&ip_vs_conn_no_cport_cnt);
@@ -861,8 +873,8 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
#endif
ip_vs_bind_xmit(cp);
- if (unlikely(pp && atomic_read(&pp->appcnt)))
- ip_vs_bind_app(cp, pp);
+ if (unlikely(pd && atomic_read(&pd->appcnt)))
+ ip_vs_bind_app(cp, pd->pp);
/*
* Allow conntrack to be preserved. By default, conntrack
@@ -871,7 +883,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
* IP_VS_CONN_F_ONE_PACKET too.
*/
- if (ip_vs_conntrack_enabled())
+ if (ip_vs_conntrack_enabled(ipvs))
cp->flags |= IP_VS_CONN_F_NFCT;
/* Hash it in the ip_vs_conn_tab finally */
@@ -884,18 +896,24 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
* /proc/net/ip_vs_conn entries
*/
#ifdef CONFIG_PROC_FS
+struct ip_vs_iter_state {
+ struct seq_net_private p;
+ struct hlist_head *l;
+};
static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
{
int idx;
struct ip_vs_conn *cp;
+ struct ip_vs_iter_state *iter = seq->private;
+ struct hlist_node *n;
for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
ct_read_lock_bh(idx);
- list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
+ hlist_for_each_entry(cp, n, &ip_vs_conn_tab[idx], c_list) {
if (pos-- == 0) {
- seq->private = &ip_vs_conn_tab[idx];
- return cp;
+ iter->l = &ip_vs_conn_tab[idx];
+ return cp;
}
}
ct_read_unlock_bh(idx);
@@ -906,14 +924,18 @@ static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
static void *ip_vs_conn_seq_start(struct seq_file *seq, loff_t *pos)
{
- seq->private = NULL;
+ struct ip_vs_iter_state *iter = seq->private;
+
+ iter->l = NULL;
return *pos ? ip_vs_conn_array(seq, *pos - 1) :SEQ_START_TOKEN;
}
static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct ip_vs_conn *cp = v;
- struct list_head *e, *l = seq->private;
+ struct ip_vs_iter_state *iter = seq->private;
+ struct hlist_node *e;
+ struct hlist_head *l = iter->l;
int idx;
++*pos;
@@ -921,27 +943,28 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
return ip_vs_conn_array(seq, 0);
/* more on same hash chain? */
- if ((e = cp->c_list.next) != l)
- return list_entry(e, struct ip_vs_conn, c_list);
+ if ((e = cp->c_list.next))
+ return hlist_entry(e, struct ip_vs_conn, c_list);
idx = l - ip_vs_conn_tab;
ct_read_unlock_bh(idx);
while (++idx < ip_vs_conn_tab_size) {
ct_read_lock_bh(idx);
- list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
- seq->private = &ip_vs_conn_tab[idx];
+ hlist_for_each_entry(cp, e, &ip_vs_conn_tab[idx], c_list) {
+ iter->l = &ip_vs_conn_tab[idx];
return cp;
}
ct_read_unlock_bh(idx);
}
- seq->private = NULL;
+ iter->l = NULL;
return NULL;
}
static void ip_vs_conn_seq_stop(struct seq_file *seq, void *v)
{
- struct list_head *l = seq->private;
+ struct ip_vs_iter_state *iter = seq->private;
+ struct hlist_head *l = iter->l;
if (l)
ct_read_unlock_bh(l - ip_vs_conn_tab);
@@ -955,18 +978,19 @@ static int ip_vs_conn_seq_show(struct seq_file *seq, void *v)
"Pro FromIP FPrt ToIP TPrt DestIP DPrt State Expires PEName PEData\n");
else {
const struct ip_vs_conn *cp = v;
+ struct net *net = seq_file_net(seq);
char pe_data[IP_VS_PENAME_MAXLEN + IP_VS_PEDATA_MAXLEN + 3];
size_t len = 0;
- if (cp->dest && cp->pe_data &&
- cp->dest->svc->pe->show_pe_data) {
+ if (!ip_vs_conn_net_eq(cp, net))
+ return 0;
+ if (cp->pe_data) {
pe_data[0] = ' ';
- len = strlen(cp->dest->svc->pe->name);
- memcpy(pe_data + 1, cp->dest->svc->pe->name, len);
+ len = strlen(cp->pe->name);
+ memcpy(pe_data + 1, cp->pe->name, len);
pe_data[len + 1] = ' ';
len += 2;
- len += cp->dest->svc->pe->show_pe_data(cp,
- pe_data + len);
+ len += cp->pe->show_pe_data(cp, pe_data + len);
}
pe_data[len] = '\0';
@@ -1004,7 +1028,8 @@ static const struct seq_operations ip_vs_conn_seq_ops = {
static int ip_vs_conn_open(struct inode *inode, struct file *file)
{
- return seq_open(file, &ip_vs_conn_seq_ops);
+ return seq_open_net(inode, file, &ip_vs_conn_seq_ops,
+ sizeof(struct ip_vs_iter_state));
}
static const struct file_operations ip_vs_conn_fops = {
@@ -1031,6 +1056,10 @@ static int ip_vs_conn_sync_seq_show(struct seq_file *seq, void *v)
"Pro FromIP FPrt ToIP TPrt DestIP DPrt State Origin Expires\n");
else {
const struct ip_vs_conn *cp = v;
+ struct net *net = seq_file_net(seq);
+
+ if (!ip_vs_conn_net_eq(cp, net))
+ return 0;
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6)
@@ -1067,7 +1096,8 @@ static const struct seq_operations ip_vs_conn_sync_seq_ops = {
static int ip_vs_conn_sync_open(struct inode *inode, struct file *file)
{
- return seq_open(file, &ip_vs_conn_sync_seq_ops);
+ return seq_open_net(inode, file, &ip_vs_conn_sync_seq_ops,
+ sizeof(struct ip_vs_iter_state));
}
static const struct file_operations ip_vs_conn_sync_fops = {
@@ -1113,7 +1143,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
}
/* Called from keventd and must protect itself from softirqs */
-void ip_vs_random_dropentry(void)
+void ip_vs_random_dropentry(struct net *net)
{
int idx;
struct ip_vs_conn *cp;
@@ -1123,17 +1153,19 @@ void ip_vs_random_dropentry(void)
*/
for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) {
unsigned hash = net_random() & ip_vs_conn_tab_mask;
+ struct hlist_node *n;
/*
* Lock is actually needed in this loop.
*/
ct_write_lock_bh(hash);
- list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
+ hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
if (cp->flags & IP_VS_CONN_F_TEMPLATE)
/* connection template */
continue;
-
+ if (!ip_vs_conn_net_eq(cp, net))
+ continue;
if (cp->protocol == IPPROTO_TCP) {
switch(cp->state) {
case IP_VS_TCP_S_SYN_RECV:
@@ -1168,20 +1200,24 @@ void ip_vs_random_dropentry(void)
/*
* Flush all the connection entries in the ip_vs_conn_tab
*/
-static void ip_vs_conn_flush(void)
+static void ip_vs_conn_flush(struct net *net)
{
int idx;
struct ip_vs_conn *cp;
+ struct netns_ipvs *ipvs = net_ipvs(net);
- flush_again:
+flush_again:
for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
+ struct hlist_node *n;
+
/*
* Lock is actually needed in this loop.
*/
ct_write_lock_bh(idx);
- list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
-
+ hlist_for_each_entry(cp, n, &ip_vs_conn_tab[idx], c_list) {
+ if (!ip_vs_conn_net_eq(cp, net))
+ continue;
IP_VS_DBG(4, "del connection\n");
ip_vs_conn_expire_now(cp);
if (cp->control) {
@@ -1194,16 +1230,41 @@ static void ip_vs_conn_flush(void)
/* the counter may be not NULL, because maybe some conn entries
are run by slow timer handler or unhashed but still referred */
- if (atomic_read(&ip_vs_conn_count) != 0) {
+ if (atomic_read(&ipvs->conn_count) != 0) {
schedule();
goto flush_again;
}
}
+/*
+ * per netns init and exit
+ */
+int __net_init __ip_vs_conn_init(struct net *net)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ atomic_set(&ipvs->conn_count, 0);
+
+ proc_net_fops_create(net, "ip_vs_conn", 0, &ip_vs_conn_fops);
+ proc_net_fops_create(net, "ip_vs_conn_sync", 0, &ip_vs_conn_sync_fops);
+ return 0;
+}
+static void __net_exit __ip_vs_conn_cleanup(struct net *net)
+{
+ /* flush all the connection entries first */
+ ip_vs_conn_flush(net);
+ proc_net_remove(net, "ip_vs_conn");
+ proc_net_remove(net, "ip_vs_conn_sync");
+}
+static struct pernet_operations ipvs_conn_ops = {
+ .init = __ip_vs_conn_init,
+ .exit = __ip_vs_conn_cleanup,
+};
int __init ip_vs_conn_init(void)
{
int idx;
+ int retc;
/* Compute size and mask */
ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;
@@ -1212,8 +1273,7 @@ int __init ip_vs_conn_init(void)
/*
* Allocate the connection hash table and initialize its list heads
*/
- ip_vs_conn_tab = vmalloc(ip_vs_conn_tab_size *
- sizeof(struct list_head));
+ ip_vs_conn_tab = vmalloc(ip_vs_conn_tab_size * sizeof(*ip_vs_conn_tab));
if (!ip_vs_conn_tab)
return -ENOMEM;
@@ -1233,32 +1293,25 @@ int __init ip_vs_conn_init(void)
IP_VS_DBG(0, "Each connection entry needs %Zd bytes at least\n",
sizeof(struct ip_vs_conn));
- for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
- INIT_LIST_HEAD(&ip_vs_conn_tab[idx]);
- }
+ for (idx = 0; idx < ip_vs_conn_tab_size; idx++)
+ INIT_HLIST_HEAD(&ip_vs_conn_tab[idx]);
for (idx = 0; idx < CT_LOCKARRAY_SIZE; idx++) {
rwlock_init(&__ip_vs_conntbl_lock_array[idx].l);
}
- proc_net_fops_create(&init_net, "ip_vs_conn", 0, &ip_vs_conn_fops);
- proc_net_fops_create(&init_net, "ip_vs_conn_sync", 0, &ip_vs_conn_sync_fops);
+ retc = register_pernet_subsys(&ipvs_conn_ops);
/* calculate the random value for connection hash */
get_random_bytes(&ip_vs_conn_rnd, sizeof(ip_vs_conn_rnd));
- return 0;
+ return retc;
}
-
void ip_vs_conn_cleanup(void)
{
- /* flush all the connection entries first */
- ip_vs_conn_flush();
-
+ unregister_pernet_subsys(&ipvs_conn_ops);
/* Release the empty cache */
kmem_cache_destroy(ip_vs_conn_cachep);
- proc_net_remove(&init_net, "ip_vs_conn");
- proc_net_remove(&init_net, "ip_vs_conn_sync");
vfree(ip_vs_conn_tab);
}
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index b4e51e9c5a04..2d1f932add46 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -41,6 +41,7 @@
#include <net/icmp.h> /* for icmp_send */
#include <net/route.h>
#include <net/ip6_checksum.h>
+#include <net/netns/generic.h> /* net_generic() */
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
@@ -68,6 +69,12 @@ EXPORT_SYMBOL(ip_vs_conn_put);
EXPORT_SYMBOL(ip_vs_get_debug_level);
#endif
+int ip_vs_net_id __read_mostly;
+#ifdef IP_VS_GENERIC_NETNS
+EXPORT_SYMBOL(ip_vs_net_id);
+#endif
+/* netns cnt used for uniqueness */
+static atomic_t ipvs_netns_cnt = ATOMIC_INIT(0);
/* ID used in ICMP lookups */
#define icmp_id(icmph) (((icmph)->un).echo.id)
@@ -108,21 +115,28 @@ static inline void
ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
{
struct ip_vs_dest *dest = cp->dest;
+ struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
+
if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
- spin_lock(&dest->stats.lock);
- dest->stats.ustats.inpkts++;
- dest->stats.ustats.inbytes += skb->len;
- spin_unlock(&dest->stats.lock);
-
- spin_lock(&dest->svc->stats.lock);
- dest->svc->stats.ustats.inpkts++;
- dest->svc->stats.ustats.inbytes += skb->len;
- spin_unlock(&dest->svc->stats.lock);
-
- spin_lock(&ip_vs_stats.lock);
- ip_vs_stats.ustats.inpkts++;
- ip_vs_stats.ustats.inbytes += skb->len;
- spin_unlock(&ip_vs_stats.lock);
+ struct ip_vs_cpu_stats *s;
+
+ s = this_cpu_ptr(dest->stats.cpustats);
+ s->ustats.inpkts++;
+ u64_stats_update_begin(&s->syncp);
+ s->ustats.inbytes += skb->len;
+ u64_stats_update_end(&s->syncp);
+
+ s = this_cpu_ptr(dest->svc->stats.cpustats);
+ s->ustats.inpkts++;
+ u64_stats_update_begin(&s->syncp);
+ s->ustats.inbytes += skb->len;
+ u64_stats_update_end(&s->syncp);
+
+ s = this_cpu_ptr(ipvs->cpustats);
+ s->ustats.inpkts++;
+ u64_stats_update_begin(&s->syncp);
+ s->ustats.inbytes += skb->len;
+ u64_stats_update_end(&s->syncp);
}
}
@@ -131,21 +145,28 @@ static inline void
ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
{
struct ip_vs_dest *dest = cp->dest;
+ struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
+
if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
- spin_lock(&dest->stats.lock);
- dest->stats.ustats.outpkts++;
- dest->stats.ustats.outbytes += skb->len;
- spin_unlock(&dest->stats.lock);
-
- spin_lock(&dest->svc->stats.lock);
- dest->svc->stats.ustats.outpkts++;
- dest->svc->stats.ustats.outbytes += skb->len;
- spin_unlock(&dest->svc->stats.lock);
-
- spin_lock(&ip_vs_stats.lock);
- ip_vs_stats.ustats.outpkts++;
- ip_vs_stats.ustats.outbytes += skb->len;
- spin_unlock(&ip_vs_stats.lock);
+ struct ip_vs_cpu_stats *s;
+
+ s = this_cpu_ptr(dest->stats.cpustats);
+ s->ustats.outpkts++;
+ u64_stats_update_begin(&s->syncp);
+ s->ustats.outbytes += skb->len;
+ u64_stats_update_end(&s->syncp);
+
+ s = this_cpu_ptr(dest->svc->stats.cpustats);
+ s->ustats.outpkts++;
+ u64_stats_update_begin(&s->syncp);
+ s->ustats.outbytes += skb->len;
+ u64_stats_update_end(&s->syncp);
+
+ s = this_cpu_ptr(ipvs->cpustats);
+ s->ustats.outpkts++;
+ u64_stats_update_begin(&s->syncp);
+ s->ustats.outbytes += skb->len;
+ u64_stats_update_end(&s->syncp);
}
}
@@ -153,41 +174,44 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
static inline void
ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
{
- spin_lock(&cp->dest->stats.lock);
- cp->dest->stats.ustats.conns++;
- spin_unlock(&cp->dest->stats.lock);
+ struct netns_ipvs *ipvs = net_ipvs(svc->net);
+ struct ip_vs_cpu_stats *s;
+
+ s = this_cpu_ptr(cp->dest->stats.cpustats);
+ s->ustats.conns++;
- spin_lock(&svc->stats.lock);
- svc->stats.ustats.conns++;
- spin_unlock(&svc->stats.lock);
+ s = this_cpu_ptr(svc->stats.cpustats);
+ s->ustats.conns++;
- spin_lock(&ip_vs_stats.lock);
- ip_vs_stats.ustats.conns++;
- spin_unlock(&ip_vs_stats.lock);
+ s = this_cpu_ptr(ipvs->cpustats);
+ s->ustats.conns++;
}
static inline int
ip_vs_set_state(struct ip_vs_conn *cp, int direction,
const struct sk_buff *skb,
- struct ip_vs_protocol *pp)
+ struct ip_vs_proto_data *pd)
{
- if (unlikely(!pp->state_transition))
+ if (unlikely(!pd->pp->state_transition))
return 0;
- return pp->state_transition(cp, direction, skb, pp);
+ return pd->pp->state_transition(cp, direction, skb, pd);
}
-static inline void
+static inline int
ip_vs_conn_fill_param_persist(const struct ip_vs_service *svc,
struct sk_buff *skb, int protocol,
const union nf_inet_addr *caddr, __be16 cport,
const union nf_inet_addr *vaddr, __be16 vport,
struct ip_vs_conn_param *p)
{
- ip_vs_conn_fill_param(svc->af, protocol, caddr, cport, vaddr, vport, p);
+ ip_vs_conn_fill_param(svc->net, svc->af, protocol, caddr, cport, vaddr,
+ vport, p);
p->pe = svc->pe;
if (p->pe && p->pe->fill_param)
- p->pe->fill_param(p, skb);
+ return p->pe->fill_param(p, skb);
+
+ return 0;
}
/*
@@ -200,7 +224,7 @@ ip_vs_conn_fill_param_persist(const struct ip_vs_service *svc,
static struct ip_vs_conn *
ip_vs_sched_persist(struct ip_vs_service *svc,
struct sk_buff *skb,
- __be16 ports[2])
+ __be16 src_port, __be16 dst_port, int *ignored)
{
struct ip_vs_conn *cp = NULL;
struct ip_vs_iphdr iph;
@@ -224,8 +248,8 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u "
"mnet %s\n",
- IP_VS_DBG_ADDR(svc->af, &iph.saddr), ntohs(ports[0]),
- IP_VS_DBG_ADDR(svc->af, &iph.daddr), ntohs(ports[1]),
+ IP_VS_DBG_ADDR(svc->af, &iph.saddr), ntohs(src_port),
+ IP_VS_DBG_ADDR(svc->af, &iph.daddr), ntohs(dst_port),
IP_VS_DBG_ADDR(svc->af, &snet));
/*
@@ -247,14 +271,14 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
__be16 vport = 0;
- if (ports[1] == svc->port) {
+ if (dst_port == svc->port) {
/* non-FTP template:
* <protocol, caddr, 0, vaddr, vport, daddr, dport>
* FTP template:
* <protocol, caddr, 0, vaddr, 0, daddr, 0>
*/
if (svc->port != FTPPORT)
- vport = ports[1];
+ vport = dst_port;
} else {
/* Note: persistent fwmark-based services and
* persistent port zero service are handled here.
@@ -268,24 +292,31 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
vaddr = &fwmark;
}
}
- ip_vs_conn_fill_param_persist(svc, skb, protocol, &snet, 0,
- vaddr, vport, &param);
+ /* return *ignored = -1 so NF_DROP can be used */
+ if (ip_vs_conn_fill_param_persist(svc, skb, protocol, &snet, 0,
+ vaddr, vport, &param) < 0) {
+ *ignored = -1;
+ return NULL;
+ }
}
/* Check if a template already exists */
ct = ip_vs_ct_in_get(&param);
if (!ct || !ip_vs_check_template(ct)) {
- /* No template found or the dest of the connection
+ /*
+ * No template found or the dest of the connection
* template is not available.
+ * return *ignored=0 i.e. ICMP and NF_DROP
*/
dest = svc->scheduler->schedule(svc, skb);
if (!dest) {
IP_VS_DBG(1, "p-schedule: no dest found.\n");
kfree(param.pe_data);
+ *ignored = 0;
return NULL;
}
- if (ports[1] == svc->port && svc->port != FTPPORT)
+ if (dst_port == svc->port && svc->port != FTPPORT)
dport = dest->port;
/* Create a template
@@ -293,9 +324,10 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
* and thus param.pe_data will be destroyed
* when the template expires */
ct = ip_vs_conn_new(&param, &dest->addr, dport,
- IP_VS_CONN_F_TEMPLATE, dest);
+ IP_VS_CONN_F_TEMPLATE, dest, skb->mark);
if (ct == NULL) {
kfree(param.pe_data);
+ *ignored = -1;
return NULL;
}
@@ -306,7 +338,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
kfree(param.pe_data);
}
- dport = ports[1];
+ dport = dst_port;
if (dport == svc->port && dest->port)
dport = dest->port;
@@ -317,11 +349,13 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
/*
* Create a new connection according to the template
*/
- ip_vs_conn_fill_param(svc->af, iph.protocol, &iph.saddr, ports[0],
- &iph.daddr, ports[1], &param);
- cp = ip_vs_conn_new(&param, &dest->addr, dport, flags, dest);
+ ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol, &iph.saddr,
+ src_port, &iph.daddr, dst_port, &param);
+
+ cp = ip_vs_conn_new(&param, &dest->addr, dport, flags, dest, skb->mark);
if (cp == NULL) {
ip_vs_conn_put(ct);
+ *ignored = -1;
return NULL;
}
@@ -341,11 +375,27 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
* It selects a server according to the virtual service, and
* creates a connection entry.
* Protocols supported: TCP, UDP
+ *
+ * Usage of *ignored
+ *
+ * 1 : protocol tried to schedule (eg. on SYN), found svc but the
+ * svc/scheduler decides that this packet should be accepted with
+ * NF_ACCEPT because it must not be scheduled.
+ *
+ * 0 : scheduler can not find destination, so try bypass or
+ * return ICMP and then NF_DROP (ip_vs_leave).
+ *
+ * -1 : scheduler tried to schedule but fatal error occurred, eg.
+ * ip_vs_conn_new failure (ENOMEM) or ip_vs_sip_fill_param
+ * failure such as missing Call-ID, ENOMEM on skb_linearize
+ * or pe_data. In this case we should return NF_DROP without
+ * any attempts to send ICMP with ip_vs_leave.
*/
struct ip_vs_conn *
ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
- struct ip_vs_protocol *pp, int *ignored)
+ struct ip_vs_proto_data *pd, int *ignored)
{
+ struct ip_vs_protocol *pp = pd->pp;
struct ip_vs_conn *cp = NULL;
struct ip_vs_iphdr iph;
struct ip_vs_dest *dest;
@@ -371,12 +421,10 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
}
/*
- * Do not schedule replies from local real server. It is risky
- * for fwmark services but mostly for persistent services.
+ * Do not schedule replies from local real server.
*/
if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
- (svc->flags & IP_VS_SVC_F_PERSISTENT || svc->fwmark) &&
- (cp = pp->conn_in_get(svc->af, skb, pp, &iph, iph.len, 1))) {
+ (cp = pp->conn_in_get(svc->af, skb, &iph, iph.len, 1))) {
IP_VS_DBG_PKT(12, svc->af, pp, skb, 0,
"Not scheduling reply for existing connection");
__ip_vs_conn_put(cp);
@@ -386,10 +434,10 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
/*
* Persistent service
*/
- if (svc->flags & IP_VS_SVC_F_PERSISTENT) {
- *ignored = 0;
- return ip_vs_sched_persist(svc, skb, pptr);
- }
+ if (svc->flags & IP_VS_SVC_F_PERSISTENT)
+ return ip_vs_sched_persist(svc, skb, pptr[0], pptr[1], ignored);
+
+ *ignored = 0;
/*
* Non-persistent service
@@ -402,8 +450,6 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
return NULL;
}
- *ignored = 0;
-
dest = svc->scheduler->schedule(svc, skb);
if (dest == NULL) {
IP_VS_DBG(1, "Schedule: no dest found.\n");
@@ -419,13 +465,17 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
*/
{
struct ip_vs_conn_param p;
- ip_vs_conn_fill_param(svc->af, iph.protocol, &iph.saddr,
- pptr[0], &iph.daddr, pptr[1], &p);
+
+ ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol,
+ &iph.saddr, pptr[0], &iph.daddr, pptr[1],
+ &p);
cp = ip_vs_conn_new(&p, &dest->addr,
dest->port ? dest->port : pptr[1],
- flags, dest);
- if (!cp)
+ flags, dest, skb->mark);
+ if (!cp) {
+ *ignored = -1;
return NULL;
+ }
}
IP_VS_DBG_BUF(6, "Schedule fwd:%c c:%s:%u v:%s:%u "
@@ -447,11 +497,14 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
* no destination is available for a new connection.
*/
int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
- struct ip_vs_protocol *pp)
+ struct ip_vs_proto_data *pd)
{
+ struct net *net;
+ struct netns_ipvs *ipvs;
__be16 _ports[2], *pptr;
struct ip_vs_iphdr iph;
int unicast;
+
ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports);
@@ -459,18 +512,20 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
ip_vs_service_put(svc);
return NF_DROP;
}
+ net = skb_net(skb);
#ifdef CONFIG_IP_VS_IPV6
if (svc->af == AF_INET6)
unicast = ipv6_addr_type(&iph.daddr.in6) & IPV6_ADDR_UNICAST;
else
#endif
- unicast = (inet_addr_type(&init_net, iph.daddr.ip) == RTN_UNICAST);
+ unicast = (inet_addr_type(net, iph.daddr.ip) == RTN_UNICAST);
/* if it is fwmark-based service, the cache_bypass sysctl is up
and the destination is a non-local unicast, then create
a cache_bypass connection entry */
- if (sysctl_ip_vs_cache_bypass && svc->fwmark && unicast) {
+ ipvs = net_ipvs(net);
+ if (ipvs->sysctl_cache_bypass && svc->fwmark && unicast) {
int ret, cs;
struct ip_vs_conn *cp;
unsigned int flags = (svc->flags & IP_VS_SVC_F_ONEPACKET &&
@@ -484,12 +539,12 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__);
{
struct ip_vs_conn_param p;
- ip_vs_conn_fill_param(svc->af, iph.protocol,
+ ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol,
&iph.saddr, pptr[0],
&iph.daddr, pptr[1], &p);
cp = ip_vs_conn_new(&p, &daddr, 0,
IP_VS_CONN_F_BYPASS | flags,
- NULL);
+ NULL, skb->mark);
if (!cp)
return NF_DROP;
}
@@ -498,10 +553,10 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
ip_vs_in_stats(cp, skb);
/* set state */
- cs = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pp);
+ cs = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
/* transmit the first SYN packet */
- ret = cp->packet_xmit(skb, cp, pp);
+ ret = cp->packet_xmit(skb, cp, pd->pp);
/* do not touch skb anymore */
atomic_inc(&cp->in_pkts);
@@ -674,7 +729,7 @@ void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
#endif
/* Handle relevant response ICMP messages - forward to the right
- * destination host. Used for NAT and local client.
+ * destination host.
*/
static int handle_response_icmp(int af, struct sk_buff *skb,
union nf_inet_addr *snet,
@@ -682,6 +737,7 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
struct ip_vs_protocol *pp,
unsigned int offset, unsigned int ihl)
{
+ struct netns_ipvs *ipvs;
unsigned int verdict = NF_DROP;
if (IP_VS_FWD_METHOD(cp) != 0) {
@@ -703,6 +759,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
if (!skb_make_writable(skb, offset))
goto out;
+ ipvs = net_ipvs(skb_net(skb));
+
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
ip_vs_nat_icmp_v6(skb, pp, cp, 1);
@@ -712,11 +770,11 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6) {
- if (sysctl_ip_vs_snat_reroute && ip6_route_me_harder(skb) != 0)
+ if (ipvs->sysctl_snat_reroute && ip6_route_me_harder(skb) != 0)
goto out;
} else
#endif
- if ((sysctl_ip_vs_snat_reroute ||
+ if ((ipvs->sysctl_snat_reroute ||
skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
ip_route_me_harder(skb, RTN_LOCAL) != 0)
goto out;
@@ -808,7 +866,7 @@ static int ip_vs_out_icmp(struct sk_buff *skb, int *related,
ip_vs_fill_iphdr(AF_INET, cih, &ciph);
/* The embedded headers contain source and dest in reverse order */
- cp = pp->conn_out_get(AF_INET, skb, pp, &ciph, offset, 1);
+ cp = pp->conn_out_get(AF_INET, skb, &ciph, offset, 1);
if (!cp)
return NF_ACCEPT;
@@ -885,7 +943,7 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related,
ip_vs_fill_iphdr(AF_INET6, cih, &ciph);
/* The embedded headers contain source and dest in reverse order */
- cp = pp->conn_out_get(AF_INET6, skb, pp, &ciph, offset, 1);
+ cp = pp->conn_out_get(AF_INET6, skb, &ciph, offset, 1);
if (!cp)
return NF_ACCEPT;
@@ -921,12 +979,14 @@ static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
}
/* Handle response packets: rewrite addresses and send away...
- * Used for NAT and local client.
*/
static unsigned int
-handle_response(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
+handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
struct ip_vs_conn *cp, int ihl)
{
+ struct ip_vs_protocol *pp = pd->pp;
+ struct netns_ipvs *ipvs;
+
IP_VS_DBG_PKT(11, af, pp, skb, 0, "Outgoing packet");
if (!skb_make_writable(skb, ihl))
@@ -961,13 +1021,15 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
* if it came from this machine itself. So re-compute
* the routing information.
*/
+ ipvs = net_ipvs(skb_net(skb));
+
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6) {
- if (sysctl_ip_vs_snat_reroute && ip6_route_me_harder(skb) != 0)
+ if (ipvs->sysctl_snat_reroute && ip6_route_me_harder(skb) != 0)
goto drop;
} else
#endif
- if ((sysctl_ip_vs_snat_reroute ||
+ if ((ipvs->sysctl_snat_reroute ||
skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
ip_route_me_harder(skb, RTN_LOCAL) != 0)
goto drop;
@@ -975,7 +1037,7 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT");
ip_vs_out_stats(cp, skb);
- ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp);
+ ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pd);
skb->ipvs_property = 1;
if (!(cp->flags & IP_VS_CONN_F_NFCT))
ip_vs_notrack(skb);
@@ -999,9 +1061,12 @@ drop:
static unsigned int
ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
{
+ struct net *net = NULL;
struct ip_vs_iphdr iph;
struct ip_vs_protocol *pp;
+ struct ip_vs_proto_data *pd;
struct ip_vs_conn *cp;
+ struct netns_ipvs *ipvs;
EnterFunction(11);
@@ -1022,6 +1087,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
if (unlikely(!skb_dst(skb)))
return NF_ACCEPT;
+ net = skb_net(skb);
ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6) {
@@ -1045,9 +1111,10 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
}
- pp = ip_vs_proto_get(iph.protocol);
- if (unlikely(!pp))
+ pd = ip_vs_proto_data_get(net, iph.protocol);
+ if (unlikely(!pd))
return NF_ACCEPT;
+ pp = pd->pp;
/* reassemble IP fragments */
#ifdef CONFIG_IP_VS_IPV6
@@ -1073,11 +1140,12 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
/*
* Check if the packet belongs to an existing entry
*/
- cp = pp->conn_out_get(af, skb, pp, &iph, iph.len, 0);
+ cp = pp->conn_out_get(af, skb, &iph, iph.len, 0);
+ ipvs = net_ipvs(net);
if (likely(cp))
- return handle_response(af, skb, pp, cp, iph.len);
- if (sysctl_ip_vs_nat_icmp_send &&
+ return handle_response(af, skb, pd, cp, iph.len);
+ if (ipvs->sysctl_nat_icmp_send &&
(pp->protocol == IPPROTO_TCP ||
pp->protocol == IPPROTO_UDP ||
pp->protocol == IPPROTO_SCTP)) {
@@ -1087,7 +1155,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
sizeof(_ports), _ports);
if (pptr == NULL)
return NF_ACCEPT; /* Not for me */
- if (ip_vs_lookup_real_service(af, iph.protocol,
+ if (ip_vs_lookup_real_service(net, af, iph.protocol,
&iph.saddr,
pptr[0])) {
/*
@@ -1202,14 +1270,15 @@ ip_vs_local_reply6(unsigned int hooknum, struct sk_buff *skb,
static int
ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
{
+ struct net *net = NULL;
struct iphdr *iph;
struct icmphdr _icmph, *ic;
struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */
struct ip_vs_iphdr ciph;
struct ip_vs_conn *cp;
struct ip_vs_protocol *pp;
+ struct ip_vs_proto_data *pd;
unsigned int offset, ihl, verdict;
- union nf_inet_addr snet;
*related = 1;
@@ -1249,9 +1318,11 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
if (cih == NULL)
return NF_ACCEPT; /* The packet looks wrong, ignore */
- pp = ip_vs_proto_get(cih->protocol);
- if (!pp)
+ net = skb_net(skb);
+ pd = ip_vs_proto_data_get(net, cih->protocol);
+ if (!pd)
return NF_ACCEPT;
+ pp = pd->pp;
/* Is the embedded protocol header present? */
if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
@@ -1265,18 +1336,9 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
ip_vs_fill_iphdr(AF_INET, cih, &ciph);
/* The embedded headers contain source and dest in reverse order */
- cp = pp->conn_in_get(AF_INET, skb, pp, &ciph, offset, 1);
- if (!cp) {
- /* The packet could also belong to a local client */
- cp = pp->conn_out_get(AF_INET, skb, pp, &ciph, offset, 1);
- if (cp) {
- snet.ip = iph->saddr;
- return handle_response_icmp(AF_INET, skb, &snet,
- cih->protocol, cp, pp,
- offset, ihl);
- }
+ cp = pp->conn_in_get(AF_INET, skb, &ciph, offset, 1);
+ if (!cp)
return NF_ACCEPT;
- }
verdict = NF_DROP;
@@ -1312,6 +1374,7 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
static int
ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
{
+ struct net *net = NULL;
struct ipv6hdr *iph;
struct icmp6hdr _icmph, *ic;
struct ipv6hdr _ciph, *cih; /* The ip header contained
@@ -1319,8 +1382,8 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
struct ip_vs_iphdr ciph;
struct ip_vs_conn *cp;
struct ip_vs_protocol *pp;
+ struct ip_vs_proto_data *pd;
unsigned int offset, verdict;
- union nf_inet_addr snet;
struct rt6_info *rt;
*related = 1;
@@ -1361,9 +1424,11 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
if (cih == NULL)
return NF_ACCEPT; /* The packet looks wrong, ignore */
- pp = ip_vs_proto_get(cih->nexthdr);
- if (!pp)
+ net = skb_net(skb);
+ pd = ip_vs_proto_data_get(net, cih->nexthdr);
+ if (!pd)
return NF_ACCEPT;
+ pp = pd->pp;
/* Is the embedded protocol header present? */
/* TODO: we don't support fragmentation at the moment anyways */
@@ -1377,19 +1442,9 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
ip_vs_fill_iphdr(AF_INET6, cih, &ciph);
/* The embedded headers contain source and dest in reverse order */
- cp = pp->conn_in_get(AF_INET6, skb, pp, &ciph, offset, 1);
- if (!cp) {
- /* The packet could also belong to a local client */
- cp = pp->conn_out_get(AF_INET6, skb, pp, &ciph, offset, 1);
- if (cp) {
- ipv6_addr_copy(&snet.in6, &iph->saddr);
- return handle_response_icmp(AF_INET6, skb, &snet,
- cih->nexthdr,
- cp, pp, offset,
- sizeof(struct ipv6hdr));
- }
+ cp = pp->conn_in_get(AF_INET6, skb, &ciph, offset, 1);
+ if (!cp)
return NF_ACCEPT;
- }
verdict = NF_DROP;
@@ -1423,10 +1478,13 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
static unsigned int
ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
{
+ struct net *net;
struct ip_vs_iphdr iph;
struct ip_vs_protocol *pp;
+ struct ip_vs_proto_data *pd;
struct ip_vs_conn *cp;
int ret, restart, pkts;
+ struct netns_ipvs *ipvs;
/* Already marked as IPVS request or reply? */
if (skb->ipvs_property)
@@ -1480,20 +1538,21 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
}
+ net = skb_net(skb);
/* Protocol supported? */
- pp = ip_vs_proto_get(iph.protocol);
- if (unlikely(!pp))
+ pd = ip_vs_proto_data_get(net, iph.protocol);
+ if (unlikely(!pd))
return NF_ACCEPT;
-
+ pp = pd->pp;
/*
* Check if the packet belongs to an existing connection entry
*/
- cp = pp->conn_in_get(af, skb, pp, &iph, iph.len, 0);
+ cp = pp->conn_in_get(af, skb, &iph, iph.len, 0);
if (unlikely(!cp)) {
int v;
- if (!pp->conn_schedule(af, skb, pp, &v, &cp))
+ if (!pp->conn_schedule(af, skb, pd, &v, &cp))
return v;
}
@@ -1505,12 +1564,13 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
}
IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet");
-
+ net = skb_net(skb);
+ ipvs = net_ipvs(net);
/* Check the server status */
if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
/* the destination server is not available */
- if (sysctl_ip_vs_expire_nodest_conn) {
+ if (ipvs->sysctl_expire_nodest_conn) {
/* try to expire the connection immediately */
ip_vs_conn_expire_now(cp);
}
@@ -1521,7 +1581,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
}
ip_vs_in_stats(cp, skb);
- restart = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pp);
+ restart = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
if (cp->packet_xmit)
ret = cp->packet_xmit(skb, cp, pp);
/* do not touch skb anymore */
@@ -1535,35 +1595,41 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
*
* Sync connection if it is about to close to
* encorage the standby servers to update the connections timeout
+ *
+ * For ONE_PKT let ip_vs_sync_conn() do the filter work.
*/
- pkts = atomic_add_return(1, &cp->in_pkts);
- if (af == AF_INET && (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
+
+ if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
+ pkts = ipvs->sysctl_sync_threshold[0];
+ else
+ pkts = atomic_add_return(1, &cp->in_pkts);
+
+ if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
cp->protocol == IPPROTO_SCTP) {
if ((cp->state == IP_VS_SCTP_S_ESTABLISHED &&
- (pkts % sysctl_ip_vs_sync_threshold[1]
- == sysctl_ip_vs_sync_threshold[0])) ||
+ (pkts % ipvs->sysctl_sync_threshold[1]
+ == ipvs->sysctl_sync_threshold[0])) ||
(cp->old_state != cp->state &&
((cp->state == IP_VS_SCTP_S_CLOSED) ||
(cp->state == IP_VS_SCTP_S_SHUT_ACK_CLI) ||
(cp->state == IP_VS_SCTP_S_SHUT_ACK_SER)))) {
- ip_vs_sync_conn(cp);
+ ip_vs_sync_conn(net, cp);
goto out;
}
}
/* Keep this block last: TCP and others with pp->num_states <= 1 */
- else if (af == AF_INET &&
- (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
+ else if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
(((cp->protocol != IPPROTO_TCP ||
cp->state == IP_VS_TCP_S_ESTABLISHED) &&
- (pkts % sysctl_ip_vs_sync_threshold[1]
- == sysctl_ip_vs_sync_threshold[0])) ||
+ (pkts % ipvs->sysctl_sync_threshold[1]
+ == ipvs->sysctl_sync_threshold[0])) ||
((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) &&
((cp->state == IP_VS_TCP_S_FIN_WAIT) ||
(cp->state == IP_VS_TCP_S_CLOSE) ||
(cp->state == IP_VS_TCP_S_CLOSE_WAIT) ||
(cp->state == IP_VS_TCP_S_TIME_WAIT)))))
- ip_vs_sync_conn(cp);
+ ip_vs_sync_conn(net, cp);
out:
cp->old_state = cp->state;
@@ -1782,7 +1848,39 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
},
#endif
};
+/*
+ * Initialize IP Virtual Server netns mem.
+ */
+static int __net_init __ip_vs_init(struct net *net)
+{
+ struct netns_ipvs *ipvs;
+ ipvs = net_generic(net, ip_vs_net_id);
+ if (ipvs == NULL) {
+ pr_err("%s(): no memory.\n", __func__);
+ return -ENOMEM;
+ }
+ ipvs->net = net;
+ /* Counters used for creating unique names */
+ ipvs->gen = atomic_read(&ipvs_netns_cnt);
+ atomic_inc(&ipvs_netns_cnt);
+ net->ipvs = ipvs;
+ printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n",
+ sizeof(struct netns_ipvs), ipvs->gen);
+ return 0;
+}
+
+static void __net_exit __ip_vs_cleanup(struct net *net)
+{
+ IP_VS_DBG(10, "ipvs netns %d released\n", net_ipvs(net)->gen);
+}
+
+static struct pernet_operations ipvs_core_ops = {
+ .init = __ip_vs_init,
+ .exit = __ip_vs_cleanup,
+ .id = &ip_vs_net_id,
+ .size = sizeof(struct netns_ipvs),
+};
/*
* Initialize IP Virtual Server
@@ -1791,8 +1889,11 @@ static int __init ip_vs_init(void)
{
int ret;
- ip_vs_estimator_init();
+ ret = register_pernet_subsys(&ipvs_core_ops); /* Alloc ip_vs struct */
+ if (ret < 0)
+ return ret;
+ ip_vs_estimator_init();
ret = ip_vs_control_init();
if (ret < 0) {
pr_err("can't setup control.\n");
@@ -1813,15 +1914,23 @@ static int __init ip_vs_init(void)
goto cleanup_app;
}
+ ret = ip_vs_sync_init();
+ if (ret < 0) {
+ pr_err("can't setup sync data.\n");
+ goto cleanup_conn;
+ }
+
ret = nf_register_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
if (ret < 0) {
pr_err("can't register hooks.\n");
- goto cleanup_conn;
+ goto cleanup_sync;
}
pr_info("ipvs loaded.\n");
return ret;
+cleanup_sync:
+ ip_vs_sync_cleanup();
cleanup_conn:
ip_vs_conn_cleanup();
cleanup_app:
@@ -1831,17 +1940,20 @@ static int __init ip_vs_init(void)
ip_vs_control_cleanup();
cleanup_estimator:
ip_vs_estimator_cleanup();
+ unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */
return ret;
}
static void __exit ip_vs_cleanup(void)
{
nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
+ ip_vs_sync_cleanup();
ip_vs_conn_cleanup();
ip_vs_app_cleanup();
ip_vs_protocol_cleanup();
ip_vs_control_cleanup();
ip_vs_estimator_cleanup();
+ unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */
pr_info("ipvs unloaded.\n");
}
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 22f7ad5101ab..d69ec26b6bd4 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -38,6 +38,7 @@
#include <linux/mutex.h>
#include <net/net_namespace.h>
+#include <linux/nsproxy.h>
#include <net/ip.h>
#ifdef CONFIG_IP_VS_IPV6
#include <net/ipv6.h>
@@ -57,42 +58,7 @@ static DEFINE_MUTEX(__ip_vs_mutex);
/* lock for service table */
static DEFINE_RWLOCK(__ip_vs_svc_lock);
-/* lock for table with the real services */
-static DEFINE_RWLOCK(__ip_vs_rs_lock);
-
-/* lock for state and timeout tables */
-static DEFINE_SPINLOCK(ip_vs_securetcp_lock);
-
-/* lock for drop entry handling */
-static DEFINE_SPINLOCK(__ip_vs_dropentry_lock);
-
-/* lock for drop packet handling */
-static DEFINE_SPINLOCK(__ip_vs_droppacket_lock);
-
-/* 1/rate drop and drop-entry variables */
-int ip_vs_drop_rate = 0;
-int ip_vs_drop_counter = 0;
-static atomic_t ip_vs_dropentry = ATOMIC_INIT(0);
-
-/* number of virtual services */
-static int ip_vs_num_services = 0;
-
/* sysctl variables */
-static int sysctl_ip_vs_drop_entry = 0;
-static int sysctl_ip_vs_drop_packet = 0;
-static int sysctl_ip_vs_secure_tcp = 0;
-static int sysctl_ip_vs_amemthresh = 1024;
-static int sysctl_ip_vs_am_droprate = 10;
-int sysctl_ip_vs_cache_bypass = 0;
-int sysctl_ip_vs_expire_nodest_conn = 0;
-int sysctl_ip_vs_expire_quiescent_template = 0;
-int sysctl_ip_vs_sync_threshold[2] = { 3, 50 };
-int sysctl_ip_vs_nat_icmp_send = 0;
-#ifdef CONFIG_IP_VS_NFCT
-int sysctl_ip_vs_conntrack;
-#endif
-int sysctl_ip_vs_snat_reroute = 1;
-
#ifdef CONFIG_IP_VS_DEBUG
static int sysctl_ip_vs_debug_level = 0;
@@ -105,7 +71,8 @@ int ip_vs_get_debug_level(void)
#ifdef CONFIG_IP_VS_IPV6
/* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */
-static int __ip_vs_addr_is_local_v6(const struct in6_addr *addr)
+static int __ip_vs_addr_is_local_v6(struct net *net,
+ const struct in6_addr *addr)
{
struct rt6_info *rt;
struct flowi fl = {
@@ -114,7 +81,7 @@ static int __ip_vs_addr_is_local_v6(const struct in6_addr *addr)
.fl6_src = { .s6_addr32 = {0, 0, 0, 0} },
};
- rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
+ rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl);
if (rt && rt->rt6i_dev && (rt->rt6i_dev->flags & IFF_LOOPBACK))
return 1;
@@ -125,7 +92,7 @@ static int __ip_vs_addr_is_local_v6(const struct in6_addr *addr)
* update_defense_level is called from keventd and from sysctl,
* so it needs to protect itself from softirqs
*/
-static void update_defense_level(void)
+static void update_defense_level(struct netns_ipvs *ipvs)
{
struct sysinfo i;
static int old_secure_tcp = 0;
@@ -141,73 +108,73 @@ static void update_defense_level(void)
/* si_swapinfo(&i); */
/* availmem = availmem - (i.totalswap - i.freeswap); */
- nomem = (availmem < sysctl_ip_vs_amemthresh);
+ nomem = (availmem < ipvs->sysctl_amemthresh);
local_bh_disable();
/* drop_entry */
- spin_lock(&__ip_vs_dropentry_lock);
- switch (sysctl_ip_vs_drop_entry) {
+ spin_lock(&ipvs->dropentry_lock);
+ switch (ipvs->sysctl_drop_entry) {
case 0:
- atomic_set(&ip_vs_dropentry, 0);
+ atomic_set(&ipvs->dropentry, 0);
break;
case 1:
if (nomem) {
- atomic_set(&ip_vs_dropentry, 1);
- sysctl_ip_vs_drop_entry = 2;
+ atomic_set(&ipvs->dropentry, 1);
+ ipvs->sysctl_drop_entry = 2;
} else {
- atomic_set(&ip_vs_dropentry, 0);
+ atomic_set(&ipvs->dropentry, 0);
}
break;
case 2:
if (nomem) {
- atomic_set(&ip_vs_dropentry, 1);
+ atomic_set(&ipvs->dropentry, 1);
} else {
- atomic_set(&ip_vs_dropentry, 0);
- sysctl_ip_vs_drop_entry = 1;
+ atomic_set(&ipvs->dropentry, 0);
+ ipvs->sysctl_drop_entry = 1;
};
break;
case 3:
- atomic_set(&ip_vs_dropentry, 1);
+ atomic_set(&ipvs->dropentry, 1);
break;
}
- spin_unlock(&__ip_vs_dropentry_lock);
+ spin_unlock(&ipvs->dropentry_lock);
/* drop_packet */
- spin_lock(&__ip_vs_droppacket_lock);
- switch (sysctl_ip_vs_drop_packet) {
+ spin_lock(&ipvs->droppacket_lock);
+ switch (ipvs->sysctl_drop_packet) {
case 0:
- ip_vs_drop_rate = 0;
+ ipvs->drop_rate = 0;
break;
case 1:
if (nomem) {
- ip_vs_drop_rate = ip_vs_drop_counter
- = sysctl_ip_vs_amemthresh /
- (sysctl_ip_vs_amemthresh-availmem);
- sysctl_ip_vs_drop_packet = 2;
+ ipvs->drop_rate = ipvs->drop_counter
+ = ipvs->sysctl_amemthresh /
+ (ipvs->sysctl_amemthresh-availmem);
+ ipvs->sysctl_drop_packet = 2;
} else {
- ip_vs_drop_rate = 0;
+ ipvs->drop_rate = 0;
}
break;
case 2:
if (nomem) {
- ip_vs_drop_rate = ip_vs_drop_counter
- = sysctl_ip_vs_amemthresh /
- (sysctl_ip_vs_amemthresh-availmem);
+ ipvs->drop_rate = ipvs->drop_counter
+ = ipvs->sysctl_amemthresh /
+ (ipvs->sysctl_amemthresh-availmem);
} else {
- ip_vs_drop_rate = 0;
- sysctl_ip_vs_drop_packet = 1;
+ ipvs->drop_rate = 0;
+ ipvs->sysctl_drop_packet = 1;
}
break;
case 3:
- ip_vs_drop_rate = sysctl_ip_vs_am_droprate;
+ ipvs->drop_rate = ipvs->sysctl_am_droprate;
break;
}
- spin_unlock(&__ip_vs_droppacket_lock);
+ spin_unlock(&ipvs->droppacket_lock);
/* secure_tcp */
- spin_lock(&ip_vs_securetcp_lock);
- switch (sysctl_ip_vs_secure_tcp) {
+ spin_lock(&ipvs->securetcp_lock);
+ switch (ipvs->sysctl_secure_tcp) {
case 0:
if (old_secure_tcp >= 2)
to_change = 0;
@@ -216,7 +183,7 @@ static void update_defense_level(void)
if (nomem) {
if (old_secure_tcp < 2)
to_change = 1;
- sysctl_ip_vs_secure_tcp = 2;
+ ipvs->sysctl_secure_tcp = 2;
} else {
if (old_secure_tcp >= 2)
to_change = 0;
@@ -229,7 +196,7 @@ static void update_defense_level(void)
} else {
if (old_secure_tcp >= 2)
to_change = 0;
- sysctl_ip_vs_secure_tcp = 1;
+ ipvs->sysctl_secure_tcp = 1;
}
break;
case 3:
@@ -237,10 +204,11 @@ static void update_defense_level(void)
to_change = 1;
break;
}
- old_secure_tcp = sysctl_ip_vs_secure_tcp;
+ old_secure_tcp = ipvs->sysctl_secure_tcp;
if (to_change >= 0)
- ip_vs_protocol_timeout_change(sysctl_ip_vs_secure_tcp>1);
- spin_unlock(&ip_vs_securetcp_lock);
+ ip_vs_protocol_timeout_change(ipvs,
+ ipvs->sysctl_secure_tcp > 1);
+ spin_unlock(&ipvs->securetcp_lock);
local_bh_enable();
}
@@ -250,16 +218,16 @@ static void update_defense_level(void)
* Timer for checking the defense
*/
#define DEFENSE_TIMER_PERIOD 1*HZ
-static void defense_work_handler(struct work_struct *work);
-static DECLARE_DELAYED_WORK(defense_work, defense_work_handler);
static void defense_work_handler(struct work_struct *work)
{
- update_defense_level();
- if (atomic_read(&ip_vs_dropentry))
- ip_vs_random_dropentry();
+ struct netns_ipvs *ipvs =
+ container_of(work, struct netns_ipvs, defense_work.work);
- schedule_delayed_work(&defense_work, DEFENSE_TIMER_PERIOD);
+ update_defense_level(ipvs);
+ if (atomic_read(&ipvs->dropentry))
+ ip_vs_random_dropentry(ipvs->net);
+ schedule_delayed_work(&ipvs->defense_work, DEFENSE_TIMER_PERIOD);
}
int
@@ -287,33 +255,13 @@ static struct list_head ip_vs_svc_table[IP_VS_SVC_TAB_SIZE];
/* the service table hashed by fwmark */
static struct list_head ip_vs_svc_fwm_table[IP_VS_SVC_TAB_SIZE];
-/*
- * Hash table: for real service lookups
- */
-#define IP_VS_RTAB_BITS 4
-#define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS)
-#define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1)
-
-static struct list_head ip_vs_rtable[IP_VS_RTAB_SIZE];
-
-/*
- * Trash for destinations
- */
-static LIST_HEAD(ip_vs_dest_trash);
-
-/*
- * FTP & NULL virtual service counters
- */
-static atomic_t ip_vs_ftpsvc_counter = ATOMIC_INIT(0);
-static atomic_t ip_vs_nullsvc_counter = ATOMIC_INIT(0);
-
/*
* Returns hash value for virtual service
*/
-static __inline__ unsigned
-ip_vs_svc_hashkey(int af, unsigned proto, const union nf_inet_addr *addr,
- __be16 port)
+static inline unsigned
+ip_vs_svc_hashkey(struct net *net, int af, unsigned proto,
+ const union nf_inet_addr *addr, __be16 port)
{
register unsigned porth = ntohs(port);
__be32 addr_fold = addr->ip;
@@ -323,6 +271,7 @@ ip_vs_svc_hashkey(int af, unsigned proto, const union nf_inet_addr *addr,
addr_fold = addr->ip6[0]^addr->ip6[1]^
addr->ip6[2]^addr->ip6[3];
#endif
+ addr_fold ^= ((size_t)net>>8);
return (proto^ntohl(addr_fold)^(porth>>IP_VS_SVC_TAB_BITS)^porth)
& IP_VS_SVC_TAB_MASK;
@@ -331,13 +280,13 @@ ip_vs_svc_hashkey(int af, unsigned proto, const union nf_inet_addr *addr,
/*
* Returns hash value of fwmark for virtual service lookup
*/
-static __inline__ unsigned ip_vs_svc_fwm_hashkey(__u32 fwmark)
+static inline unsigned ip_vs_svc_fwm_hashkey(struct net *net, __u32 fwmark)
{
- return fwmark & IP_VS_SVC_TAB_MASK;
+ return (((size_t)net>>8) ^ fwmark) & IP_VS_SVC_TAB_MASK;
}
/*
- * Hashes a service in the ip_vs_svc_table by <proto,addr,port>
+ * Hashes a service in the ip_vs_svc_table by <netns,proto,addr,port>
* or in the ip_vs_svc_fwm_table by fwmark.
* Should be called with locked tables.
*/
@@ -353,16 +302,16 @@ static int ip_vs_svc_hash(struct ip_vs_service *svc)
if (svc->fwmark == 0) {
/*
- * Hash it by <protocol,addr,port> in ip_vs_svc_table
+ * Hash it by <netns,protocol,addr,port> in ip_vs_svc_table
*/
- hash = ip_vs_svc_hashkey(svc->af, svc->protocol, &svc->addr,
- svc->port);
+ hash = ip_vs_svc_hashkey(svc->net, svc->af, svc->protocol,
+ &svc->addr, svc->port);
list_add(&svc->s_list, &ip_vs_svc_table[hash]);
} else {
/*
- * Hash it by fwmark in ip_vs_svc_fwm_table
+ * Hash it by fwmark in svc_fwm_table
*/
- hash = ip_vs_svc_fwm_hashkey(svc->fwmark);
+ hash = ip_vs_svc_fwm_hashkey(svc->net, svc->fwmark);
list_add(&svc->f_list, &ip_vs_svc_fwm_table[hash]);
}
@@ -374,7 +323,7 @@ static int ip_vs_svc_hash(struct ip_vs_service *svc)
/*
- * Unhashes a service from ip_vs_svc_table/ip_vs_svc_fwm_table.
+ * Unhashes a service from svc_table / svc_fwm_table.
* Should be called with locked tables.
*/
static int ip_vs_svc_unhash(struct ip_vs_service *svc)
@@ -386,10 +335,10 @@ static int ip_vs_svc_unhash(struct ip_vs_service *svc)
}
if (svc->fwmark == 0) {
- /* Remove it from the ip_vs_svc_table table */
+ /* Remove it from the svc_table table */
list_del(&svc->s_list);
} else {
- /* Remove it from the ip_vs_svc_fwm_table table */
+ /* Remove it from the svc_fwm_table table */
list_del(&svc->f_list);
}
@@ -400,23 +349,24 @@ static int ip_vs_svc_unhash(struct ip_vs_service *svc)
/*
- * Get service by {proto,addr,port} in the service table.
+ * Get service by {netns, proto,addr,port} in the service table.
*/
static inline struct ip_vs_service *
-__ip_vs_service_find(int af, __u16 protocol, const union nf_inet_addr *vaddr,
- __be16 vport)
+__ip_vs_service_find(struct net *net, int af, __u16 protocol,
+ const union nf_inet_addr *vaddr, __be16 vport)
{
unsigned hash;
struct ip_vs_service *svc;
/* Check for "full" addressed entries */
- hash = ip_vs_svc_hashkey(af, protocol, vaddr, vport);
+ hash = ip_vs_svc_hashkey(net, af, protocol, vaddr, vport);
list_for_each_entry(svc, &ip_vs_svc_table[hash], s_list){
if ((svc->af == af)
&& ip_vs_addr_equal(af, &svc->addr, vaddr)
&& (svc->port == vport)
- && (svc->protocol == protocol)) {
+ && (svc->protocol == protocol)
+ && net_eq(svc->net, net)) {
/* HIT */
return svc;
}
@@ -430,16 +380,17 @@ __ip_vs_service_find(int af, __u16 protocol, const union nf_inet_addr *vaddr,
* Get service by {fwmark} in the service table.
*/
static inline struct ip_vs_service *
-__ip_vs_svc_fwm_find(int af, __u32 fwmark)
+__ip_vs_svc_fwm_find(struct net *net, int af, __u32 fwmark)
{
unsigned hash;
struct ip_vs_service *svc;
/* Check for fwmark addressed entries */
- hash = ip_vs_svc_fwm_hashkey(fwmark);
+ hash = ip_vs_svc_fwm_hashkey(net, fwmark);
list_for_each_entry(svc, &ip_vs_svc_fwm_table[hash], f_list) {
- if (svc->fwmark == fwmark && svc->af == af) {
+ if (svc->fwmark == fwmark && svc->af == af
+ && net_eq(svc->net, net)) {
/* HIT */
return svc;
}
@@ -449,42 +400,44 @@ __ip_vs_svc_fwm_find(int af, __u32 fwmark)
}
struct ip_vs_service *
-ip_vs_service_get(int af, __u32 fwmark, __u16 protocol,
+ip_vs_service_get(struct net *net, int af, __u32 fwmark, __u16 protocol,
const union nf_inet_addr *vaddr, __be16 vport)
{
struct ip_vs_service *svc;
+ struct netns_ipvs *ipvs = net_ipvs(net);
read_lock(&__ip_vs_svc_lock);
/*
* Check the table hashed by fwmark first
*/
- if (fwmark && (svc = __ip_vs_svc_fwm_find(af, fwmark)))
+ svc = __ip_vs_svc_fwm_find(net, af, fwmark);
+ if (fwmark && svc)
goto out;
/*
* Check the table hashed by <protocol,addr,port>
* for "full" addressed entries
*/
- svc = __ip_vs_service_find(af, protocol, vaddr, vport);
+ svc = __ip_vs_service_find(net, af, protocol, vaddr, vport);
if (svc == NULL
&& protocol == IPPROTO_TCP
- && atomic_read(&ip_vs_ftpsvc_counter)
+ && atomic_read(&ipvs->ftpsvc_counter)
&& (vport == FTPDATA || ntohs(vport) >= PROT_SOCK)) {
/*
* Check if ftp service entry exists, the packet
* might belong to FTP data connections.
*/
- svc = __ip_vs_service_find(af, protocol, vaddr, FTPPORT);
+ svc = __ip_vs_service_find(net, af, protocol, vaddr, FTPPORT);
}
if (svc == NULL
- && atomic_read(&ip_vs_nullsvc_counter)) {
+ && atomic_read(&ipvs->nullsvc_counter)) {
/*
* Check if the catch-all port (port zero) exists
*/
- svc = __ip_vs_service_find(af, protocol, vaddr, 0);
+ svc = __ip_vs_service_find(net, af, protocol, vaddr, 0);
}
out:
@@ -519,6 +472,7 @@ __ip_vs_unbind_svc(struct ip_vs_dest *dest)
svc->fwmark,
IP_VS_DBG_ADDR(svc->af, &svc->addr),
ntohs(svc->port), atomic_read(&svc->usecnt));
+ free_percpu(svc->stats.cpustats);
kfree(svc);
}
}
@@ -545,10 +499,10 @@ static inline unsigned ip_vs_rs_hashkey(int af,
}
/*
- * Hashes ip_vs_dest in ip_vs_rtable by <proto,addr,port>.
+ * Hashes ip_vs_dest in rs_table by <proto,addr,port>.
* should be called with locked tables.
*/
-static int ip_vs_rs_hash(struct ip_vs_dest *dest)
+static int ip_vs_rs_hash(struct netns_ipvs *ipvs, struct ip_vs_dest *dest)
{
unsigned hash;
@@ -562,19 +516,19 @@ static int ip_vs_rs_hash(struct ip_vs_dest *dest)
*/
hash = ip_vs_rs_hashkey(dest->af, &dest->addr, dest->port);
- list_add(&dest->d_list, &ip_vs_rtable[hash]);
+ list_add(&dest->d_list, &ipvs->rs_table[hash]);
return 1;
}
/*
- * UNhashes ip_vs_dest from ip_vs_rtable.
+ * UNhashes ip_vs_dest from rs_table.
* should be called with locked tables.
*/
static int ip_vs_rs_unhash(struct ip_vs_dest *dest)
{
/*
- * Remove it from the ip_vs_rtable table.
+ * Remove it from the rs_table table.
*/
if (!list_empty(&dest->d_list)) {
list_del(&dest->d_list);
@@ -588,10 +542,11 @@ static int ip_vs_rs_unhash(struct ip_vs_dest *dest)
* Lookup real service by <proto,addr,port> in the real service table.
*/
struct ip_vs_dest *
-ip_vs_lookup_real_service(int af, __u16 protocol,
+ip_vs_lookup_real_service(struct net *net, int af, __u16 protocol,
const union nf_inet_addr *daddr,
__be16 dport)
{
+ struct netns_ipvs *ipvs = net_ipvs(net);
unsigned hash;
struct ip_vs_dest *dest;
@@ -601,19 +556,19 @@ ip_vs_lookup_real_service(int af, __u16 protocol,
*/
hash = ip_vs_rs_hashkey(af, daddr, dport);
- read_lock(&__ip_vs_rs_lock);
- list_for_each_entry(dest, &ip_vs_rtable[hash], d_list) {
+ read_lock(&ipvs->rs_lock);
+ list_for_each_entry(dest, &ipvs->rs_table[hash], d_list) {
if ((dest->af == af)
&& ip_vs_addr_equal(af, &dest->addr, daddr)
&& (dest->port == dport)
&& ((dest->protocol == protocol) ||
dest->vfwmark)) {
/* HIT */
- read_unlock(&__ip_vs_rs_lock);
+ read_unlock(&ipvs->rs_lock);
return dest;
}
}
- read_unlock(&__ip_vs_rs_lock);
+ read_unlock(&ipvs->rs_lock);
return NULL;
}
@@ -652,15 +607,16 @@ ip_vs_lookup_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
* ip_vs_lookup_real_service() looked promissing, but
* seems not working as expected.
*/
-struct ip_vs_dest *ip_vs_find_dest(int af, const union nf_inet_addr *daddr,
+struct ip_vs_dest *ip_vs_find_dest(struct net *net, int af,
+ const union nf_inet_addr *daddr,
__be16 dport,
const union nf_inet_addr *vaddr,
- __be16 vport, __u16 protocol)
+ __be16 vport, __u16 protocol, __u32 fwmark)
{
struct ip_vs_dest *dest;
struct ip_vs_service *svc;
- svc = ip_vs_service_get(af, 0, protocol, vaddr, vport);
+ svc = ip_vs_service_get(net, af, fwmark, protocol, vaddr, vport);
if (!svc)
return NULL;
dest = ip_vs_lookup_dest(svc, daddr, dport);
@@ -685,11 +641,12 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
__be16 dport)
{
struct ip_vs_dest *dest, *nxt;
+ struct netns_ipvs *ipvs = net_ipvs(svc->net);
/*
* Find the destination in trash
*/
- list_for_each_entry_safe(dest, nxt, &ip_vs_dest_trash, n_list) {
+ list_for_each_entry_safe(dest, nxt, &ipvs->dest_trash, n_list) {
IP_VS_DBG_BUF(3, "Destination %u/%s:%u still in trash, "
"dest->refcnt=%d\n",
dest->vfwmark,
@@ -720,6 +677,7 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
list_del(&dest->n_list);
ip_vs_dst_reset(dest);
__ip_vs_unbind_svc(dest);
+ free_percpu(dest->stats.cpustats);
kfree(dest);
}
}
@@ -737,14 +695,16 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
* are expired, and the refcnt of each destination in the trash must
* be 1, so we simply release them here.
*/
-static void ip_vs_trash_cleanup(void)
+static void ip_vs_trash_cleanup(struct net *net)
{
struct ip_vs_dest *dest, *nxt;
+ struct netns_ipvs *ipvs = net_ipvs(net);
- list_for_each_entry_safe(dest, nxt, &ip_vs_dest_trash, n_list) {
+ list_for_each_entry_safe(dest, nxt, &ipvs->dest_trash, n_list) {
list_del(&dest->n_list);
ip_vs_dst_reset(dest);
__ip_vs_unbind_svc(dest);
+ free_percpu(dest->stats.cpustats);
kfree(dest);
}
}
@@ -768,6 +728,7 @@ static void
__ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
struct ip_vs_dest_user_kern *udest, int add)
{
+ struct netns_ipvs *ipvs = net_ipvs(svc->net);
int conn_flags;
/* set the weight and the flags */
@@ -780,12 +741,12 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
conn_flags |= IP_VS_CONN_F_NOOUTPUT;
} else {
/*
- * Put the real service in ip_vs_rtable if not present.
+ * Put the real service in rs_table if not present.
* For now only for NAT!
*/
- write_lock_bh(&__ip_vs_rs_lock);
- ip_vs_rs_hash(dest);
- write_unlock_bh(&__ip_vs_rs_lock);
+ write_lock_bh(&ipvs->rs_lock);
+ ip_vs_rs_hash(ipvs, dest);
+ write_unlock_bh(&ipvs->rs_lock);
}
atomic_set(&dest->conn_flags, conn_flags);
@@ -808,12 +769,12 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
dest->u_threshold = udest->u_threshold;
dest->l_threshold = udest->l_threshold;
- spin_lock(&dest->dst_lock);
+ spin_lock_bh(&dest->dst_lock);
ip_vs_dst_reset(dest);
- spin_unlock(&dest->dst_lock);
+ spin_unlock_bh(&dest->dst_lock);
if (add)
- ip_vs_new_estimator(&dest->stats);
+ ip_vs_new_estimator(svc->net, &dest->stats);
write_lock_bh(&__ip_vs_svc_lock);
@@ -850,12 +811,12 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
atype = ipv6_addr_type(&udest->addr.in6);
if ((!(atype & IPV6_ADDR_UNICAST) ||
atype & IPV6_ADDR_LINKLOCAL) &&
- !__ip_vs_addr_is_local_v6(&udest->addr.in6))
+ !__ip_vs_addr_is_local_v6(svc->net, &udest->addr.in6))
return -EINVAL;
} else
#endif
{
- atype = inet_addr_type(&init_net, udest->addr.ip);
+ atype = inet_addr_type(svc->net, udest->addr.ip);
if (atype != RTN_LOCAL && atype != RTN_UNICAST)
return -EINVAL;
}
@@ -865,6 +826,11 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
pr_err("%s(): no memory.\n", __func__);
return -ENOMEM;
}
+ dest->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
+ if (!dest->stats.cpustats) {
+ pr_err("%s() alloc_percpu failed\n", __func__);
+ goto err_alloc;
+ }
dest->af = svc->af;
dest->protocol = svc->protocol;
@@ -888,6 +854,10 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
LeaveFunction(2);
return 0;
+
+err_alloc:
+ kfree(dest);
+ return -ENOMEM;
}
@@ -1006,16 +976,18 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
/*
* Delete a destination (must be already unlinked from the service)
*/
-static void __ip_vs_del_dest(struct ip_vs_dest *dest)
+static void __ip_vs_del_dest(struct net *net, struct ip_vs_dest *dest)
{
- ip_vs_kill_estimator(&dest->stats);
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ ip_vs_kill_estimator(net, &dest->stats);
/*
* Remove it from the d-linked list with the real services.
*/
- write_lock_bh(&__ip_vs_rs_lock);
+ write_lock_bh(&ipvs->rs_lock);
ip_vs_rs_unhash(dest);
- write_unlock_bh(&__ip_vs_rs_lock);
+ write_unlock_bh(&ipvs->rs_lock);
/*
* Decrease the refcnt of the dest, and free the dest
@@ -1034,6 +1006,7 @@ static void __ip_vs_del_dest(struct ip_vs_dest *dest)
and only one user context can update virtual service at a
time, so the operation here is OK */
atomic_dec(&dest->svc->refcnt);
+ free_percpu(dest->stats.cpustats);
kfree(dest);
} else {
IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, "
@@ -1041,7 +1014,7 @@ static void __ip_vs_del_dest(struct ip_vs_dest *dest)
IP_VS_DBG_ADDR(dest->af, &dest->addr),
ntohs(dest->port),
atomic_read(&dest->refcnt));
- list_add(&dest->n_list, &ip_vs_dest_trash);
+ list_add(&dest->n_list, &ipvs->dest_trash);
atomic_inc(&dest->refcnt);
}
}
@@ -1105,7 +1078,7 @@ ip_vs_del_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
/*
* Delete the destination
*/
- __ip_vs_del_dest(dest);
+ __ip_vs_del_dest(svc->net, dest);
LeaveFunction(2);
@@ -1117,13 +1090,14 @@ ip_vs_del_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
* Add a service into the service hash table
*/
static int
-ip_vs_add_service(struct ip_vs_service_user_kern *u,
+ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
struct ip_vs_service **svc_p)
{
int ret = 0;
struct ip_vs_scheduler *sched = NULL;
struct ip_vs_pe *pe = NULL;
struct ip_vs_service *svc = NULL;
+ struct netns_ipvs *ipvs = net_ipvs(net);
/* increase the module use count */
ip_vs_use_count_inc();
@@ -1137,7 +1111,7 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
}
if (u->pe_name && *u->pe_name) {
- pe = ip_vs_pe_get(u->pe_name);
+ pe = ip_vs_pe_getbyname(u->pe_name);
if (pe == NULL) {
pr_info("persistence engine module ip_vs_pe_%s "
"not found\n", u->pe_name);
@@ -1159,6 +1133,11 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
ret = -ENOMEM;
goto out_err;
}
+ svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
+ if (!svc->stats.cpustats) {
+ pr_err("%s() alloc_percpu failed\n", __func__);
+ goto out_err;
+ }
/* I'm the first user of the service */
atomic_set(&svc->usecnt, 0);
@@ -1172,6 +1151,7 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
svc->flags = u->flags;
svc->timeout = u->timeout * HZ;
svc->netmask = u->netmask;
+ svc->net = net;
INIT_LIST_HEAD(&svc->destinations);
rwlock_init(&svc->sched_lock);
@@ -1189,15 +1169,15 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
/* Update the virtual service counters */
if (svc->port == FTPPORT)
- atomic_inc(&ip_vs_ftpsvc_counter);
+ atomic_inc(&ipvs->ftpsvc_counter);
else if (svc->port == 0)
- atomic_inc(&ip_vs_nullsvc_counter);
+ atomic_inc(&ipvs->nullsvc_counter);
- ip_vs_new_estimator(&svc->stats);
+ ip_vs_new_estimator(net, &svc->stats);
/* Count only IPv4 services for old get/setsockopt interface */
if (svc->af == AF_INET)
- ip_vs_num_services++;
+ ipvs->num_services++;
/* Hash the service into the service table */
write_lock_bh(&__ip_vs_svc_lock);
@@ -1207,6 +1187,7 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
*svc_p = svc;
return 0;
+
out_err:
if (svc != NULL) {
ip_vs_unbind_scheduler(svc);
@@ -1215,6 +1196,8 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
ip_vs_app_inc_put(svc->inc);
local_bh_enable();
}
+ if (svc->stats.cpustats)
+ free_percpu(svc->stats.cpustats);
kfree(svc);
}
ip_vs_scheduler_put(sched);
@@ -1248,7 +1231,7 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
old_sched = sched;
if (u->pe_name && *u->pe_name) {
- pe = ip_vs_pe_get(u->pe_name);
+ pe = ip_vs_pe_getbyname(u->pe_name);
if (pe == NULL) {
pr_info("persistence engine module ip_vs_pe_%s "
"not found\n", u->pe_name);
@@ -1334,14 +1317,15 @@ static void __ip_vs_del_service(struct ip_vs_service *svc)
struct ip_vs_dest *dest, *nxt;
struct ip_vs_scheduler *old_sched;
struct ip_vs_pe *old_pe;
+ struct netns_ipvs *ipvs = net_ipvs(svc->net);
pr_info("%s: enter\n", __func__);
/* Count only IPv4 services for old get/setsockopt interface */
if (svc->af == AF_INET)
- ip_vs_num_services--;
+ ipvs->num_services--;
- ip_vs_kill_estimator(&svc->stats);
+ ip_vs_kill_estimator(svc->net, &svc->stats);
/* Unbind scheduler */
old_sched = svc->scheduler;
@@ -1364,16 +1348,16 @@ static void __ip_vs_del_service(struct ip_vs_service *svc)
*/
list_for_each_entry_safe(dest, nxt, &svc->destinations, n_list) {
__ip_vs_unlink_dest(svc, dest, 0);
- __ip_vs_del_dest(dest);
+ __ip_vs_del_dest(svc->net, dest);
}
/*
* Update the virtual service counters
*/
if (svc->port == FTPPORT)
- atomic_dec(&ip_vs_ftpsvc_counter);
+ atomic_dec(&ipvs->ftpsvc_counter);
else if (svc->port == 0)
- atomic_dec(&ip_vs_nullsvc_counter);
+ atomic_dec(&ipvs->nullsvc_counter);
/*
* Free the service if nobody refers to it
@@ -1383,6 +1367,7 @@ static void __ip_vs_del_service(struct ip_vs_service *svc)
svc->fwmark,
IP_VS_DBG_ADDR(svc->af, &svc->addr),
ntohs(svc->port), atomic_read(&svc->usecnt));
+ free_percpu(svc->stats.cpustats);
kfree(svc);
}
@@ -1428,17 +1413,19 @@ static int ip_vs_del_service(struct ip_vs_service *svc)
/*
* Flush all the virtual services
*/
-static int ip_vs_flush(void)
+static int ip_vs_flush(struct net *net)
{
int idx;
struct ip_vs_service *svc, *nxt;
/*
- * Flush the service table hashed by <protocol,addr,port>
+ * Flush the service table hashed by <netns,protocol,addr,port>
*/
for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
- list_for_each_entry_safe(svc, nxt, &ip_vs_svc_table[idx], s_list) {
- ip_vs_unlink_service(svc);
+ list_for_each_entry_safe(svc, nxt, &ip_vs_svc_table[idx],
+ s_list) {
+ if (net_eq(svc->net, net))
+ ip_vs_unlink_service(svc);
}
}
@@ -1448,7 +1435,8 @@ static int ip_vs_flush(void)
for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry_safe(svc, nxt,
&ip_vs_svc_fwm_table[idx], f_list) {
- ip_vs_unlink_service(svc);
+ if (net_eq(svc->net, net))
+ ip_vs_unlink_service(svc);
}
}
@@ -1472,24 +1460,26 @@ static int ip_vs_zero_service(struct ip_vs_service *svc)
return 0;
}
-static int ip_vs_zero_all(void)
+static int ip_vs_zero_all(struct net *net)
{
int idx;
struct ip_vs_service *svc;
for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
- ip_vs_zero_service(svc);
+ if (net_eq(svc->net, net))
+ ip_vs_zero_service(svc);
}
}
for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
- ip_vs_zero_service(svc);
+ if (net_eq(svc->net, net))
+ ip_vs_zero_service(svc);
}
}
- ip_vs_zero_stats(&ip_vs_stats);
+ ip_vs_zero_stats(net_ipvs(net)->tot_stats);
return 0;
}
@@ -1498,6 +1488,7 @@ static int
proc_do_defense_mode(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
+ struct net *net = current->nsproxy->net_ns;
int *valp = table->data;
int val = *valp;
int rc;
@@ -1508,7 +1499,7 @@ proc_do_defense_mode(ctl_table *table, int write,
/* Restore the correct value */
*valp = val;
} else {
- update_defense_level();
+ update_defense_level(net_ipvs(net));
}
}
return rc;
@@ -1534,45 +1525,54 @@ proc_do_sync_threshold(ctl_table *table, int write,
return rc;
}
+static int
+proc_do_sync_mode(ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ int *valp = table->data;
+ int val = *valp;
+ int rc;
+
+ rc = proc_dointvec(table, write, buffer, lenp, ppos);
+ if (write && (*valp != val)) {
+ if ((*valp < 0) || (*valp > 1)) {
+ /* Restore the correct value */
+ *valp = val;
+ } else {
+ struct net *net = current->nsproxy->net_ns;
+ ip_vs_sync_switch_mode(net, val);
+ }
+ }
+ return rc;
+}
/*
* IPVS sysctl table (under the /proc/sys/net/ipv4/vs/)
+ * Do not change order or insert new entries without
+ * align with netns init in __ip_vs_control_init()
*/
static struct ctl_table vs_vars[] = {
{
.procname = "amemthresh",
- .data = &sysctl_ip_vs_amemthresh,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
-#ifdef CONFIG_IP_VS_DEBUG
- {
- .procname = "debug_level",
- .data = &sysctl_ip_vs_debug_level,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
-#endif
{
.procname = "am_droprate",
- .data = &sysctl_ip_vs_am_droprate,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "drop_entry",
- .data = &sysctl_ip_vs_drop_entry,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_do_defense_mode,
},
{
.procname = "drop_packet",
- .data = &sysctl_ip_vs_drop_packet,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_do_defense_mode,
@@ -1580,7 +1580,6 @@ static struct ctl_table vs_vars[] = {
#ifdef CONFIG_IP_VS_NFCT
{
.procname = "conntrack",
- .data = &sysctl_ip_vs_conntrack,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec,
@@ -1588,18 +1587,62 @@ static struct ctl_table vs_vars[] = {
#endif
{
.procname = "secure_tcp",
- .data = &sysctl_ip_vs_secure_tcp,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_do_defense_mode,
},
{
.procname = "snat_reroute",
- .data = &sysctl_ip_vs_snat_reroute,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec,
},
+ {
+ .procname = "sync_version",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_do_sync_mode,
+ },
+ {
+ .procname = "cache_bypass",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "expire_nodest_conn",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "expire_quiescent_template",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "sync_threshold",
+ .maxlen =
+ sizeof(((struct netns_ipvs *)0)->sysctl_sync_threshold),
+ .mode = 0644,
+ .proc_handler = proc_do_sync_threshold,
+ },
+ {
+ .procname = "nat_icmp_send",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+#ifdef CONFIG_IP_VS_DEBUG
+ {
+ .procname = "debug_level",
+ .data = &sysctl_ip_vs_debug_level,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+#endif
#if 0
{
.procname = "timeout_established",
@@ -1686,41 +1729,6 @@ static struct ctl_table vs_vars[] = {
.proc_handler = proc_dointvec_jiffies,
},
#endif
- {
- .procname = "cache_bypass",
- .data = &sysctl_ip_vs_cache_bypass,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "expire_nodest_conn",
- .data = &sysctl_ip_vs_expire_nodest_conn,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "expire_quiescent_template",
- .data = &sysctl_ip_vs_expire_quiescent_template,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "sync_threshold",
- .data = &sysctl_ip_vs_sync_threshold,
- .maxlen = sizeof(sysctl_ip_vs_sync_threshold),
- .mode = 0644,
- .proc_handler = proc_do_sync_threshold,
- },
- {
- .procname = "nat_icmp_send",
- .data = &sysctl_ip_vs_nat_icmp_send,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
{ }
};
@@ -1732,11 +1740,10 @@ const struct ctl_path net_vs_ctl_path[] = {
};
EXPORT_SYMBOL_GPL(net_vs_ctl_path);
-static struct ctl_table_header * sysctl_header;
-
#ifdef CONFIG_PROC_FS
struct ip_vs_iter {
+ struct seq_net_private p; /* Do not move this, netns depends upon it*/
struct list_head *table;
int bucket;
};
@@ -1763,6 +1770,7 @@ static inline const char *ip_vs_fwd_name(unsigned flags)
/* Get the Nth entry in the two lists */
static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos)
{
+ struct net *net = seq_file_net(seq);
struct ip_vs_iter *iter = seq->private;
int idx;
struct ip_vs_service *svc;
@@ -1770,7 +1778,7 @@ static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos)
/* look in hash by protocol */
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
- if (pos-- == 0){
+ if (net_eq(svc->net, net) && pos-- == 0) {
iter->table = ip_vs_svc_table;
iter->bucket = idx;
return svc;
@@ -1781,7 +1789,7 @@ static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos)
/* keep looking in fwmark */
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
- if (pos-- == 0) {
+ if (net_eq(svc->net, net) && pos-- == 0) {
iter->table = ip_vs_svc_fwm_table;
iter->bucket = idx;
return svc;
@@ -1935,7 +1943,7 @@ static const struct seq_operations ip_vs_info_seq_ops = {
static int ip_vs_info_open(struct inode *inode, struct file *file)
{
- return seq_open_private(file, &ip_vs_info_seq_ops,
+ return seq_open_net(inode, file, &ip_vs_info_seq_ops,
sizeof(struct ip_vs_iter));
}
@@ -1949,13 +1957,11 @@ static const struct file_operations ip_vs_info_fops = {
#endif
-struct ip_vs_stats ip_vs_stats = {
- .lock = __SPIN_LOCK_UNLOCKED(ip_vs_stats.lock),
-};
-
#ifdef CONFIG_PROC_FS
static int ip_vs_stats_show(struct seq_file *seq, void *v)
{
+ struct net *net = seq_file_single_net(seq);
+ struct ip_vs_stats *tot_stats = net_ipvs(net)->tot_stats;
/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
seq_puts(seq,
@@ -1963,29 +1969,29 @@ static int ip_vs_stats_show(struct seq_file *seq, void *v)
seq_printf(seq,
" Conns Packets Packets Bytes Bytes\n");
- spin_lock_bh(&ip_vs_stats.lock);
- seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", ip_vs_stats.ustats.conns,
- ip_vs_stats.ustats.inpkts, ip_vs_stats.ustats.outpkts,
- (unsigned long long) ip_vs_stats.ustats.inbytes,
- (unsigned long long) ip_vs_stats.ustats.outbytes);
+ spin_lock_bh(&tot_stats->lock);
+ seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", tot_stats->ustats.conns,
+ tot_stats->ustats.inpkts, tot_stats->ustats.outpkts,
+ (unsigned long long) tot_stats->ustats.inbytes,
+ (unsigned long long) tot_stats->ustats.outbytes);
/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
seq_puts(seq,
" Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n");
seq_printf(seq,"%8X %8X %8X %16X %16X\n",
- ip_vs_stats.ustats.cps,
- ip_vs_stats.ustats.inpps,
- ip_vs_stats.ustats.outpps,
- ip_vs_stats.ustats.inbps,
- ip_vs_stats.ustats.outbps);
- spin_unlock_bh(&ip_vs_stats.lock);
+ tot_stats->ustats.cps,
+ tot_stats->ustats.inpps,
+ tot_stats->ustats.outpps,
+ tot_stats->ustats.inbps,
+ tot_stats->ustats.outbps);
+ spin_unlock_bh(&tot_stats->lock);
return 0;
}
static int ip_vs_stats_seq_open(struct inode *inode, struct file *file)
{
- return single_open(file, ip_vs_stats_show, NULL);
+ return single_open_net(inode, file, ip_vs_stats_show);
}
static const struct file_operations ip_vs_stats_fops = {
@@ -1996,13 +2002,70 @@ static const struct file_operations ip_vs_stats_fops = {
.release = single_release,
};
+static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v)
+{
+ struct net *net = seq_file_single_net(seq);
+ struct ip_vs_stats *tot_stats = net_ipvs(net)->tot_stats;
+ int i;
+
+/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
+ seq_puts(seq,
+ " Total Incoming Outgoing Incoming Outgoing\n");
+ seq_printf(seq,
+ "CPU Conns Packets Packets Bytes Bytes\n");
+
+ for_each_possible_cpu(i) {
+ struct ip_vs_cpu_stats *u = per_cpu_ptr(net->ipvs->cpustats, i);
+ seq_printf(seq, "%3X %8X %8X %8X %16LX %16LX\n",
+ i, u->ustats.conns, u->ustats.inpkts,
+ u->ustats.outpkts, (__u64)u->ustats.inbytes,
+ (__u64)u->ustats.outbytes);
+ }
+
+ spin_lock_bh(&tot_stats->lock);
+ seq_printf(seq, " ~ %8X %8X %8X %16LX %16LX\n\n",
+ tot_stats->ustats.conns, tot_stats->ustats.inpkts,
+ tot_stats->ustats.outpkts,
+ (unsigned long long) tot_stats->ustats.inbytes,
+ (unsigned long long) tot_stats->ustats.outbytes);
+
+/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
+ seq_puts(seq,
+ " Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n");
+ seq_printf(seq, " %8X %8X %8X %16X %16X\n",
+ tot_stats->ustats.cps,
+ tot_stats->ustats.inpps,
+ tot_stats->ustats.outpps,
+ tot_stats->ustats.inbps,
+ tot_stats->ustats.outbps);
+ spin_unlock_bh(&tot_stats->lock);
+
+ return 0;
+}
+
+static int ip_vs_stats_percpu_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open_net(inode, file, ip_vs_stats_percpu_show);
+}
+
+static const struct file_operations ip_vs_stats_percpu_fops = {
+ .owner = THIS_MODULE,
+ .open = ip_vs_stats_percpu_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
#endif
/*
* Set timeout values for tcp tcpfin udp in the timeout_table.
*/
-static int ip_vs_set_timeout(struct ip_vs_timeout_user *u)
+static int ip_vs_set_timeout(struct net *net, struct ip_vs_timeout_user *u)
{
+#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP)
+ struct ip_vs_proto_data *pd;
+#endif
+
IP_VS_DBG(2, "Setting timeout tcp:%d tcpfin:%d udp:%d\n",
u->tcp_timeout,
u->tcp_fin_timeout,
@@ -2010,19 +2073,22 @@ static int ip_vs_set_timeout(struct ip_vs_timeout_user *u)
#ifdef CONFIG_IP_VS_PROTO_TCP
if (u->tcp_timeout) {
- ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_ESTABLISHED]
+ pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+ pd->timeout_table[IP_VS_TCP_S_ESTABLISHED]
= u->tcp_timeout * HZ;
}
if (u->tcp_fin_timeout) {
- ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_FIN_WAIT]
+ pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+ pd->timeout_table[IP_VS_TCP_S_FIN_WAIT]
= u->tcp_fin_timeout * HZ;
}
#endif
#ifdef CONFIG_IP_VS_PROTO_UDP
if (u->udp_timeout) {
- ip_vs_protocol_udp.timeout_table[IP_VS_UDP_S_NORMAL]
+ pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
+ pd->timeout_table[IP_VS_UDP_S_NORMAL]
= u->udp_timeout * HZ;
}
#endif
@@ -2087,6 +2153,7 @@ static void ip_vs_copy_udest_compat(struct ip_vs_dest_user_kern *udest,
static int
do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
{
+ struct net *net = sock_net(sk);
int ret;
unsigned char arg[MAX_ARG_LEN];
struct ip_vs_service_user *usvc_compat;
@@ -2121,19 +2188,20 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
if (cmd == IP_VS_SO_SET_FLUSH) {
/* Flush the virtual service */
- ret = ip_vs_flush();
+ ret = ip_vs_flush(net);
goto out_unlock;
} else if (cmd == IP_VS_SO_SET_TIMEOUT) {
/* Set timeout values for (tcp tcpfin udp) */
- ret = ip_vs_set_timeout((struct ip_vs_timeout_user *)arg);
+ ret = ip_vs_set_timeout(net, (struct ip_vs_timeout_user *)arg);
goto out_unlock;
} else if (cmd == IP_VS_SO_SET_STARTDAEMON) {
struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
- ret = start_sync_thread(dm->state, dm->mcast_ifn, dm->syncid);
+ ret = start_sync_thread(net, dm->state, dm->mcast_ifn,
+ dm->syncid);
goto out_unlock;
} else if (cmd == IP_VS_SO_SET_STOPDAEMON) {
struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
- ret = stop_sync_thread(dm->state);
+ ret = stop_sync_thread(net, dm->state);
goto out_unlock;
}
@@ -2148,7 +2216,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
if (cmd == IP_VS_SO_SET_ZERO) {
/* if no service address is set, zero counters in all */
if (!usvc.fwmark && !usvc.addr.ip && !usvc.port) {
- ret = ip_vs_zero_all();
+ ret = ip_vs_zero_all(net);
goto out_unlock;
}
}
@@ -2165,10 +2233,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
/* Lookup the exact service by <protocol, addr, port> or fwmark */
if (usvc.fwmark == 0)
- svc = __ip_vs_service_find(usvc.af, usvc.protocol,
+ svc = __ip_vs_service_find(net, usvc.af, usvc.protocol,
&usvc.addr, usvc.port);
else
- svc = __ip_vs_svc_fwm_find(usvc.af, usvc.fwmark);
+ svc = __ip_vs_svc_fwm_find(net, usvc.af, usvc.fwmark);
if (cmd != IP_VS_SO_SET_ADD
&& (svc == NULL || svc->protocol != usvc.protocol)) {
@@ -2181,7 +2249,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
if (svc != NULL)
ret = -EEXIST;
else
- ret = ip_vs_add_service(&usvc, &svc);
+ ret = ip_vs_add_service(net, &usvc, &svc);
break;
case IP_VS_SO_SET_EDIT:
ret = ip_vs_edit_service(svc, &usvc);
@@ -2241,7 +2309,8 @@ ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src)
}
static inline int
-__ip_vs_get_service_entries(const struct ip_vs_get_services *get,
+__ip_vs_get_service_entries(struct net *net,
+ const struct ip_vs_get_services *get,
struct ip_vs_get_services __user *uptr)
{
int idx, count=0;
@@ -2252,7 +2321,7 @@ __ip_vs_get_service_entries(const struct ip_vs_get_services *get,
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
/* Only expose IPv4 entries to old interface */
- if (svc->af != AF_INET)
+ if (svc->af != AF_INET || !net_eq(svc->net, net))
continue;
if (count >= get->num_services)
@@ -2271,7 +2340,7 @@ __ip_vs_get_service_entries(const struct ip_vs_get_services *get,
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
/* Only expose IPv4 entries to old interface */
- if (svc->af != AF_INET)
+ if (svc->af != AF_INET || !net_eq(svc->net, net))
continue;
if (count >= get->num_services)
@@ -2291,7 +2360,7 @@ __ip_vs_get_service_entries(const struct ip_vs_get_services *get,
}
static inline int
-__ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
+__ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
struct ip_vs_get_dests __user *uptr)
{
struct ip_vs_service *svc;
@@ -2299,9 +2368,9 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
int ret = 0;
if (get->fwmark)
- svc = __ip_vs_svc_fwm_find(AF_INET, get->fwmark);
+ svc = __ip_vs_svc_fwm_find(net, AF_INET, get->fwmark);
else
- svc = __ip_vs_service_find(AF_INET, get->protocol, &addr,
+ svc = __ip_vs_service_find(net, AF_INET, get->protocol, &addr,
get->port);
if (svc) {
@@ -2336,17 +2405,21 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
}
static inline void
-__ip_vs_get_timeouts(struct ip_vs_timeout_user *u)
+__ip_vs_get_timeouts(struct net *net, struct ip_vs_timeout_user *u)
{
+#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP)
+ struct ip_vs_proto_data *pd;
+#endif
+
#ifdef CONFIG_IP_VS_PROTO_TCP
- u->tcp_timeout =
- ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_ESTABLISHED] / HZ;
- u->tcp_fin_timeout =
- ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_FIN_WAIT] / HZ;
+ pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+ u->tcp_timeout = pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] / HZ;
+ u->tcp_fin_timeout = pd->timeout_table[IP_VS_TCP_S_FIN_WAIT] / HZ;
#endif
#ifdef CONFIG_IP_VS_PROTO_UDP
+ pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
u->udp_timeout =
- ip_vs_protocol_udp.timeout_table[IP_VS_UDP_S_NORMAL] / HZ;
+ pd->timeout_table[IP_VS_UDP_S_NORMAL] / HZ;
#endif
}
@@ -2375,7 +2448,10 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
unsigned char arg[128];
int ret = 0;
unsigned int copylen;
+ struct net *net = sock_net(sk);
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ BUG_ON(!net);
if (!capable(CAP_NET_ADMIN))
return -EPERM;
@@ -2418,7 +2494,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
struct ip_vs_getinfo info;
info.version = IP_VS_VERSION_CODE;
info.size = ip_vs_conn_tab_size;
- info.num_services = ip_vs_num_services;
+ info.num_services = ipvs->num_services;
if (copy_to_user(user, &info, sizeof(info)) != 0)
ret = -EFAULT;
}
@@ -2437,7 +2513,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
ret = -EINVAL;
goto out;
}
- ret = __ip_vs_get_service_entries(get, user);
+ ret = __ip_vs_get_service_entries(net, get, user);
}
break;
@@ -2450,10 +2526,11 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
entry = (struct ip_vs_service_entry *)arg;
addr.ip = entry->addr;
if (entry->fwmark)
- svc = __ip_vs_svc_fwm_find(AF_INET, entry->fwmark);
+ svc = __ip_vs_svc_fwm_find(net, AF_INET, entry->fwmark);
else
- svc = __ip_vs_service_find(AF_INET, entry->protocol,
- &addr, entry->port);
+ svc = __ip_vs_service_find(net, AF_INET,
+ entry->protocol, &addr,
+ entry->port);
if (svc) {
ip_vs_copy_service(entry, svc);
if (copy_to_user(user, entry, sizeof(*entry)) != 0)
@@ -2476,7 +2553,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
ret = -EINVAL;
goto out;
}
- ret = __ip_vs_get_dest_entries(get, user);
+ ret = __ip_vs_get_dest_entries(net, get, user);
}
break;
@@ -2484,7 +2561,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
{
struct ip_vs_timeout_user t;
- __ip_vs_get_timeouts(&t);
+ __ip_vs_get_timeouts(net, &t);
if (copy_to_user(user, &t, sizeof(t)) != 0)
ret = -EFAULT;
}
@@ -2495,15 +2572,17 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
struct ip_vs_daemon_user d[2];
memset(&d, 0, sizeof(d));
- if (ip_vs_sync_state & IP_VS_STATE_MASTER) {
+ if (ipvs->sync_state & IP_VS_STATE_MASTER) {
d[0].state = IP_VS_STATE_MASTER;
- strlcpy(d[0].mcast_ifn, ip_vs_master_mcast_ifn, sizeof(d[0].mcast_ifn));
- d[0].syncid = ip_vs_master_syncid;
+ strlcpy(d[0].mcast_ifn, ipvs->master_mcast_ifn,
+ sizeof(d[0].mcast_ifn));
+ d[0].syncid = ipvs->master_syncid;
}
- if (ip_vs_sync_state & IP_VS_STATE_BACKUP) {
+ if (ipvs->sync_state & IP_VS_STATE_BACKUP) {
d[1].state = IP_VS_STATE_BACKUP;
- strlcpy(d[1].mcast_ifn, ip_vs_backup_mcast_ifn, sizeof(d[1].mcast_ifn));
- d[1].syncid = ip_vs_backup_syncid;
+ strlcpy(d[1].mcast_ifn, ipvs->backup_mcast_ifn,
+ sizeof(d[1].mcast_ifn));
+ d[1].syncid = ipvs->backup_syncid;
}
if (copy_to_user(user, &d, sizeof(d)) != 0)
ret = -EFAULT;
@@ -2542,6 +2621,7 @@ static struct genl_family ip_vs_genl_family = {
.name = IPVS_GENL_NAME,
.version = IPVS_GENL_VERSION,
.maxattr = IPVS_CMD_MAX,
+ .netnsok = true, /* Make ipvsadm to work on netns */
};
/* Policy used for first-level command attributes */
@@ -2696,11 +2776,12 @@ static int ip_vs_genl_dump_services(struct sk_buff *skb,
int idx = 0, i;
int start = cb->args[0];
struct ip_vs_service *svc;
+ struct net *net = skb_sknet(skb);
mutex_lock(&__ip_vs_mutex);
for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) {
list_for_each_entry(svc, &ip_vs_svc_table[i], s_list) {
- if (++idx <= start)
+ if (++idx <= start || !net_eq(svc->net, net))
continue;
if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
idx--;
@@ -2711,7 +2792,7 @@ static int ip_vs_genl_dump_services(struct sk_buff *skb,
for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) {
list_for_each_entry(svc, &ip_vs_svc_fwm_table[i], f_list) {
- if (++idx <= start)
+ if (++idx <= start || !net_eq(svc->net, net))
continue;
if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
idx--;
@@ -2727,7 +2808,8 @@ nla_put_failure:
return skb->len;
}
-static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc,
+static int ip_vs_genl_parse_service(struct net *net,
+ struct ip_vs_service_user_kern *usvc,
struct nlattr *nla, int full_entry,
struct ip_vs_service **ret_svc)
{
@@ -2770,9 +2852,9 @@ static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc,
}
if (usvc->fwmark)
- svc = __ip_vs_svc_fwm_find(usvc->af, usvc->fwmark);
+ svc = __ip_vs_svc_fwm_find(net, usvc->af, usvc->fwmark);
else
- svc = __ip_vs_service_find(usvc->af, usvc->protocol,
+ svc = __ip_vs_service_find(net, usvc->af, usvc->protocol,
&usvc->addr, usvc->port);
*ret_svc = svc;
@@ -2809,13 +2891,14 @@ static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc,
return 0;
}
-static struct ip_vs_service *ip_vs_genl_find_service(struct nlattr *nla)
+static struct ip_vs_service *ip_vs_genl_find_service(struct net *net,
+ struct nlattr *nla)
{
struct ip_vs_service_user_kern usvc;
struct ip_vs_service *svc;
int ret;
- ret = ip_vs_genl_parse_service(&usvc, nla, 0, &svc);
+ ret = ip_vs_genl_parse_service(net, &usvc, nla, 0, &svc);
return ret ? ERR_PTR(ret) : svc;
}
@@ -2883,6 +2966,7 @@ static int ip_vs_genl_dump_dests(struct sk_buff *skb,
struct ip_vs_service *svc;
struct ip_vs_dest *dest;
struct nlattr *attrs[IPVS_CMD_ATTR_MAX + 1];
+ struct net *net = skb_sknet(skb);
mutex_lock(&__ip_vs_mutex);
@@ -2891,7 +2975,8 @@ static int ip_vs_genl_dump_dests(struct sk_buff *skb,
IPVS_CMD_ATTR_MAX, ip_vs_cmd_policy))
goto out_err;
- svc = ip_vs_genl_find_service(attrs[IPVS_CMD_ATTR_SERVICE]);
+
+ svc = ip_vs_genl_find_service(net, attrs[IPVS_CMD_ATTR_SERVICE]);
if (IS_ERR(svc) || svc == NULL)
goto out_err;
@@ -3005,20 +3090,23 @@ nla_put_failure:
static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
struct netlink_callback *cb)
{
+ struct net *net = skb_net(skb);
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
mutex_lock(&__ip_vs_mutex);
- if ((ip_vs_sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) {
+ if ((ipvs->sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) {
if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER,
- ip_vs_master_mcast_ifn,
- ip_vs_master_syncid, cb) < 0)
+ ipvs->master_mcast_ifn,
+ ipvs->master_syncid, cb) < 0)
goto nla_put_failure;
cb->args[0] = 1;
}
- if ((ip_vs_sync_state & IP_VS_STATE_BACKUP) && !cb->args[1]) {
+ if ((ipvs->sync_state & IP_VS_STATE_BACKUP) && !cb->args[1]) {
if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_BACKUP,
- ip_vs_backup_mcast_ifn,
- ip_vs_backup_syncid, cb) < 0)
+ ipvs->backup_mcast_ifn,
+ ipvs->backup_syncid, cb) < 0)
goto nla_put_failure;
cb->args[1] = 1;
@@ -3030,31 +3118,33 @@ nla_put_failure:
return skb->len;
}
-static int ip_vs_genl_new_daemon(struct nlattr **attrs)
+static int ip_vs_genl_new_daemon(struct net *net, struct nlattr **attrs)
{
if (!(attrs[IPVS_DAEMON_ATTR_STATE] &&
attrs[IPVS_DAEMON_ATTR_MCAST_IFN] &&
attrs[IPVS_DAEMON_ATTR_SYNC_ID]))
return -EINVAL;
- return start_sync_thread(nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]),
+ return start_sync_thread(net,
+ nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]),
nla_data(attrs[IPVS_DAEMON_ATTR_MCAST_IFN]),
nla_get_u32(attrs[IPVS_DAEMON_ATTR_SYNC_ID]));
}
-static int ip_vs_genl_del_daemon(struct nlattr **attrs)
+static int ip_vs_genl_del_daemon(struct net *net, struct nlattr **attrs)
{
if (!attrs[IPVS_DAEMON_ATTR_STATE])
return -EINVAL;
- return stop_sync_thread(nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
+ return stop_sync_thread(net,
+ nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
}
-static int ip_vs_genl_set_config(struct nlattr **attrs)
+static int ip_vs_genl_set_config(struct net *net, struct nlattr **attrs)
{
struct ip_vs_timeout_user t;
- __ip_vs_get_timeouts(&t);
+ __ip_vs_get_timeouts(net, &t);
if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP])
t.tcp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP]);
@@ -3066,7 +3156,7 @@ static int ip_vs_genl_set_config(struct nlattr **attrs)
if (attrs[IPVS_CMD_ATTR_TIMEOUT_UDP])
t.udp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_UDP]);
- return ip_vs_set_timeout(&t);
+ return ip_vs_set_timeout(net, &t);
}
static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
@@ -3076,16 +3166,20 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
struct ip_vs_dest_user_kern udest;
int ret = 0, cmd;
int need_full_svc = 0, need_full_dest = 0;
+ struct net *net;
+ struct netns_ipvs *ipvs;
+ net = skb_sknet(skb);
+ ipvs = net_ipvs(net);
cmd = info->genlhdr->cmd;
mutex_lock(&__ip_vs_mutex);
if (cmd == IPVS_CMD_FLUSH) {
- ret = ip_vs_flush();
+ ret = ip_vs_flush(net);
goto out;
} else if (cmd == IPVS_CMD_SET_CONFIG) {
- ret = ip_vs_genl_set_config(info->attrs);
+ ret = ip_vs_genl_set_config(net, info->attrs);
goto out;
} else if (cmd == IPVS_CMD_NEW_DAEMON ||
cmd == IPVS_CMD_DEL_DAEMON) {
@@ -3101,13 +3195,13 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
}
if (cmd == IPVS_CMD_NEW_DAEMON)
- ret = ip_vs_genl_new_daemon(daemon_attrs);
+ ret = ip_vs_genl_new_daemon(net, daemon_attrs);
else
- ret = ip_vs_genl_del_daemon(daemon_attrs);
+ ret = ip_vs_genl_del_daemon(net, daemon_attrs);
goto out;
} else if (cmd == IPVS_CMD_ZERO &&
!info->attrs[IPVS_CMD_ATTR_SERVICE]) {
- ret = ip_vs_zero_all();
+ ret = ip_vs_zero_all(net);
goto out;
}
@@ -3117,7 +3211,7 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
if (cmd == IPVS_CMD_NEW_SERVICE || cmd == IPVS_CMD_SET_SERVICE)
need_full_svc = 1;
- ret = ip_vs_genl_parse_service(&usvc,
+ ret = ip_vs_genl_parse_service(net, &usvc,
info->attrs[IPVS_CMD_ATTR_SERVICE],
need_full_svc, &svc);
if (ret)
@@ -3147,7 +3241,7 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
switch (cmd) {
case IPVS_CMD_NEW_SERVICE:
if (svc == NULL)
- ret = ip_vs_add_service(&usvc, &svc);
+ ret = ip_vs_add_service(net, &usvc, &svc);
else
ret = -EEXIST;
break;
@@ -3185,7 +3279,11 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
struct sk_buff *msg;
void *reply;
int ret, cmd, reply_cmd;
+ struct net *net;
+ struct netns_ipvs *ipvs;
+ net = skb_sknet(skb);
+ ipvs = net_ipvs(net);
cmd = info->genlhdr->cmd;
if (cmd == IPVS_CMD_GET_SERVICE)
@@ -3214,7 +3312,8 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
{
struct ip_vs_service *svc;
- svc = ip_vs_genl_find_service(info->attrs[IPVS_CMD_ATTR_SERVICE]);
+ svc = ip_vs_genl_find_service(net,
+ info->attrs[IPVS_CMD_ATTR_SERVICE]);
if (IS_ERR(svc)) {
ret = PTR_ERR(svc);
goto out_err;
@@ -3234,7 +3333,7 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
{
struct ip_vs_timeout_user t;
- __ip_vs_get_timeouts(&t);
+ __ip_vs_get_timeouts(net, &t);
#ifdef CONFIG_IP_VS_PROTO_TCP
NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP, t.tcp_timeout);
NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN,
@@ -3380,62 +3479,173 @@ static void ip_vs_genl_unregister(void)
/* End of Generic Netlink interface definitions */
+/*
+ * per netns intit/exit func.
+ */
+int __net_init __ip_vs_control_init(struct net *net)
+{
+ int idx;
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ctl_table *tbl;
+
+ atomic_set(&ipvs->dropentry, 0);
+ spin_lock_init(&ipvs->dropentry_lock);
+ spin_lock_init(&ipvs->droppacket_lock);
+ spin_lock_init(&ipvs->securetcp_lock);
+ ipvs->rs_lock = __RW_LOCK_UNLOCKED(ipvs->rs_lock);
+
+ /* Initialize rs_table */
+ for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++)
+ INIT_LIST_HEAD(&ipvs->rs_table[idx]);
+
+ INIT_LIST_HEAD(&ipvs->dest_trash);
+ atomic_set(&ipvs->ftpsvc_counter, 0);
+ atomic_set(&ipvs->nullsvc_counter, 0);
+
+ /* procfs stats */
+ ipvs->tot_stats = kzalloc(sizeof(struct ip_vs_stats), GFP_KERNEL);
+ if (ipvs->tot_stats == NULL) {
+ pr_err("%s(): no memory.\n", __func__);
+ return -ENOMEM;
+ }
+ ipvs->cpustats = alloc_percpu(struct ip_vs_cpu_stats);
+ if (!ipvs->cpustats) {
+ pr_err("%s() alloc_percpu failed\n", __func__);
+ goto err_alloc;
+ }
+ spin_lock_init(&ipvs->tot_stats->lock);
+
+ proc_net_fops_create(net, "ip_vs", 0, &ip_vs_info_fops);
+ proc_net_fops_create(net, "ip_vs_stats", 0, &ip_vs_stats_fops);
+ proc_net_fops_create(net, "ip_vs_stats_percpu", 0,
+ &ip_vs_stats_percpu_fops);
+
+ if (!net_eq(net, &init_net)) {
+ tbl = kmemdup(vs_vars, sizeof(vs_vars), GFP_KERNEL);
+ if (tbl == NULL)
+ goto err_dup;
+ } else
+ tbl = vs_vars;
+ /* Initialize sysctl defaults */
+ idx = 0;
+ ipvs->sysctl_amemthresh = 1024;
+ tbl[idx++].data = &ipvs->sysctl_amemthresh;
+ ipvs->sysctl_am_droprate = 10;
+ tbl[idx++].data = &ipvs->sysctl_am_droprate;
+ tbl[idx++].data = &ipvs->sysctl_drop_entry;
+ tbl[idx++].data = &ipvs->sysctl_drop_packet;
+#ifdef CONFIG_IP_VS_NFCT
+ tbl[idx++].data = &ipvs->sysctl_conntrack;
+#endif
+ tbl[idx++].data = &ipvs->sysctl_secure_tcp;
+ ipvs->sysctl_snat_reroute = 1;
+ tbl[idx++].data = &ipvs->sysctl_snat_reroute;
+ ipvs->sysctl_sync_ver = 1;
+ tbl[idx++].data = &ipvs->sysctl_sync_ver;
+ tbl[idx++].data = &ipvs->sysctl_cache_bypass;
+ tbl[idx++].data = &ipvs->sysctl_expire_nodest_conn;
+ tbl[idx++].data = &ipvs->sysctl_expire_quiescent_template;
+ ipvs->sysctl_sync_threshold[0] = 3;
+ ipvs->sysctl_sync_threshold[1] = 50;
+ tbl[idx].data = &ipvs->sysctl_sync_threshold;
+ tbl[idx++].maxlen = sizeof(ipvs->sysctl_sync_threshold);
+ tbl[idx++].data = &ipvs->sysctl_nat_icmp_send;
+
+
+#ifdef CONFIG_SYSCTL
+ ipvs->sysctl_hdr = register_net_sysctl_table(net, net_vs_ctl_path,
+ tbl);
+ if (ipvs->sysctl_hdr == NULL) {
+ if (!net_eq(net, &init_net))
+ kfree(tbl);
+ goto err_dup;
+ }
+#endif
+ ip_vs_new_estimator(net, ipvs->tot_stats);
+ ipvs->sysctl_tbl = tbl;
+ /* Schedule defense work */
+ INIT_DELAYED_WORK(&ipvs->defense_work, defense_work_handler);
+ schedule_delayed_work(&ipvs->defense_work, DEFENSE_TIMER_PERIOD);
+ return 0;
+
+err_dup:
+ free_percpu(ipvs->cpustats);
+err_alloc:
+ kfree(ipvs->tot_stats);
+ return -ENOMEM;
+}
+
+static void __net_exit __ip_vs_control_cleanup(struct net *net)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ ip_vs_trash_cleanup(net);
+ ip_vs_kill_estimator(net, ipvs->tot_stats);
+ cancel_delayed_work_sync(&ipvs->defense_work);
+ cancel_work_sync(&ipvs->defense_work.work);
+#ifdef CONFIG_SYSCTL
+ unregister_net_sysctl_table(ipvs->sysctl_hdr);
+#endif
+ proc_net_remove(net, "ip_vs_stats_percpu");
+ proc_net_remove(net, "ip_vs_stats");
+ proc_net_remove(net, "ip_vs");
+ free_percpu(ipvs->cpustats);
+ kfree(ipvs->tot_stats);
+}
+
+static struct pernet_operations ipvs_control_ops = {
+ .init = __ip_vs_control_init,
+ .exit = __ip_vs_control_cleanup,
+};
int __init ip_vs_control_init(void)
{
- int ret;
int idx;
+ int ret;
EnterFunction(2);
- /* Initialize ip_vs_svc_table, ip_vs_svc_fwm_table, ip_vs_rtable */
+ /* Initialize svc_table, ip_vs_svc_fwm_table, rs_table */
for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
INIT_LIST_HEAD(&ip_vs_svc_table[idx]);
INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]);
}
- for(idx = 0; idx < IP_VS_RTAB_SIZE; idx++) {
- INIT_LIST_HEAD(&ip_vs_rtable[idx]);
+
+ ret = register_pernet_subsys(&ipvs_control_ops);
+ if (ret) {
+ pr_err("cannot register namespace.\n");
+ goto err;
}
- smp_wmb();
+
+ smp_wmb(); /* Do we really need it now ? */
ret = nf_register_sockopt(&ip_vs_sockopts);
if (ret) {
pr_err("cannot register sockopt.\n");
- return ret;
+ goto err_net;
}
ret = ip_vs_genl_register();
if (ret) {
pr_err("cannot register Generic Netlink interface.\n");
nf_unregister_sockopt(&ip_vs_sockopts);
- return ret;
+ goto err_net;
}
- proc_net_fops_create(&init_net, "ip_vs", 0, &ip_vs_info_fops);
- proc_net_fops_create(&init_net, "ip_vs_stats",0, &ip_vs_stats_fops);
-
- sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars);
-
- ip_vs_new_estimator(&ip_vs_stats);
-
- /* Hook the defense timer */
- schedule_delayed_work(&defense_work, DEFENSE_TIMER_PERIOD);
-
LeaveFunction(2);
return 0;
+
+err_net:
+ unregister_pernet_subsys(&ipvs_control_ops);
+err:
+ return ret;
}
void ip_vs_control_cleanup(void)
{
EnterFunction(2);
- ip_vs_trash_cleanup();
- cancel_delayed_work_sync(&defense_work);
- cancel_work_sync(&defense_work.work);
- ip_vs_kill_estimator(&ip_vs_stats);
- unregister_sysctl_table(sysctl_header);
- proc_net_remove(&init_net, "ip_vs_stats");
- proc_net_remove(&init_net, "ip_vs");
+ unregister_pernet_subsys(&ipvs_control_ops);
ip_vs_genl_unregister();
nf_unregister_sockopt(&ip_vs_sockopts);
LeaveFunction(2);
diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c
index ff28801962e0..f560a05c965a 100644
--- a/net/netfilter/ipvs/ip_vs_est.c
+++ b/net/netfilter/ipvs/ip_vs_est.c
@@ -8,8 +8,12 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * Changes:
- *
+ * Changes: Hans Schillstrom <hans.schillstrom@ericsson.com>
+ * Network name space (netns) aware.
+ * Global data moved to netns i.e struct netns_ipvs
+ * Affected data: est_list and est_lock.
+ * estimation_timer() runs with timer per netns.
+ * get_stats()) do the per cpu summing.
*/
#define KMSG_COMPONENT "IPVS"
@@ -48,11 +52,42 @@
*/
-static void estimation_timer(unsigned long arg);
+/*
+ * Make a summary from each cpu
+ */
+static void ip_vs_read_cpu_stats(struct ip_vs_stats_user *sum,
+ struct ip_vs_cpu_stats *stats)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct ip_vs_cpu_stats *s = per_cpu_ptr(stats, i);
+ unsigned int start;
+ __u64 inbytes, outbytes;
+ if (i) {
+ sum->conns += s->ustats.conns;
+ sum->inpkts += s->ustats.inpkts;
+ sum->outpkts += s->ustats.outpkts;
+ do {
+ start = u64_stats_fetch_begin_bh(&s->syncp);
+ inbytes = s->ustats.inbytes;
+ outbytes = s->ustats.outbytes;
+ } while (u64_stats_fetch_retry_bh(&s->syncp, start));
+ sum->inbytes += inbytes;
+ sum->outbytes += outbytes;
+ } else {
+ sum->conns = s->ustats.conns;
+ sum->inpkts = s->ustats.inpkts;
+ sum->outpkts = s->ustats.outpkts;
+ do {
+ start = u64_stats_fetch_begin_bh(&s->syncp);
+ sum->inbytes = s->ustats.inbytes;
+ sum->outbytes = s->ustats.outbytes;
+ } while (u64_stats_fetch_retry_bh(&s->syncp, start));
+ }
+ }
+}
-static LIST_HEAD(est_list);
-static DEFINE_SPINLOCK(est_lock);
-static DEFINE_TIMER(est_timer, estimation_timer, 0, 0);
static void estimation_timer(unsigned long arg)
{
@@ -62,11 +97,16 @@ static void estimation_timer(unsigned long arg)
u32 n_inpkts, n_outpkts;
u64 n_inbytes, n_outbytes;
u32 rate;
+ struct net *net = (struct net *)arg;
+ struct netns_ipvs *ipvs;
- spin_lock(&est_lock);
- list_for_each_entry(e, &est_list, list) {
+ ipvs = net_ipvs(net);
+ ip_vs_read_cpu_stats(&ipvs->tot_stats->ustats, ipvs->cpustats);
+ spin_lock(&ipvs->est_lock);
+ list_for_each_entry(e, &ipvs->est_list, list) {
s = container_of(e, struct ip_vs_stats, est);
+ ip_vs_read_cpu_stats(&s->ustats, s->cpustats);
spin_lock(&s->lock);
n_conns = s->ustats.conns;
n_inpkts = s->ustats.inpkts;
@@ -75,38 +115,39 @@ static void estimation_timer(unsigned long arg)
n_outbytes = s->ustats.outbytes;
/* scaled by 2^10, but divided 2 seconds */
- rate = (n_conns - e->last_conns)<<9;
+ rate = (n_conns - e->last_conns) << 9;
e->last_conns = n_conns;
- e->cps += ((long)rate - (long)e->cps)>>2;
- s->ustats.cps = (e->cps+0x1FF)>>10;
+ e->cps += ((long)rate - (long)e->cps) >> 2;
+ s->ustats.cps = (e->cps + 0x1FF) >> 10;
- rate = (n_inpkts - e->last_inpkts)<<9;
+ rate = (n_inpkts - e->last_inpkts) << 9;
e->last_inpkts = n_inpkts;
- e->inpps += ((long)rate - (long)e->inpps)>>2;
- s->ustats.inpps = (e->inpps+0x1FF)>>10;
+ e->inpps += ((long)rate - (long)e->inpps) >> 2;
+ s->ustats.inpps = (e->inpps + 0x1FF) >> 10;
- rate = (n_outpkts - e->last_outpkts)<<9;
+ rate = (n_outpkts - e->last_outpkts) << 9;
e->last_outpkts = n_outpkts;
- e->outpps += ((long)rate - (long)e->outpps)>>2;
- s->ustats.outpps = (e->outpps+0x1FF)>>10;
+ e->outpps += ((long)rate - (long)e->outpps) >> 2;
+ s->ustats.outpps = (e->outpps + 0x1FF) >> 10;
- rate = (n_inbytes - e->last_inbytes)<<4;
+ rate = (n_inbytes - e->last_inbytes) << 4;
e->last_inbytes = n_inbytes;
- e->inbps += ((long)rate - (long)e->inbps)>>2;
- s->ustats.inbps = (e->inbps+0xF)>>5;
+ e->inbps += ((long)rate - (long)e->inbps) >> 2;
+ s->ustats.inbps = (e->inbps + 0xF) >> 5;
- rate = (n_outbytes - e->last_outbytes)<<4;
+ rate = (n_outbytes - e->last_outbytes) << 4;
e->last_outbytes = n_outbytes;
- e->outbps += ((long)rate - (long)e->outbps)>>2;
- s->ustats.outbps = (e->outbps+0xF)>>5;
+ e->outbps += ((long)rate - (long)e->outbps) >> 2;
+ s->ustats.outbps = (e->outbps + 0xF) >> 5;
spin_unlock(&s->lock);
}
- spin_unlock(&est_lock);
- mod_timer(&est_timer, jiffies + 2*HZ);
+ spin_unlock(&ipvs->est_lock);
+ mod_timer(&ipvs->est_timer, jiffies + 2*HZ);
}
-void ip_vs_new_estimator(struct ip_vs_stats *stats)
+void ip_vs_new_estimator(struct net *net, struct ip_vs_stats *stats)
{
+ struct netns_ipvs *ipvs = net_ipvs(net);
struct ip_vs_estimator *est = &stats->est;
INIT_LIST_HEAD(&est->list);
@@ -126,18 +167,19 @@ void ip_vs_new_estimator(struct ip_vs_stats *stats)
est->last_outbytes = stats->ustats.outbytes;
est->outbps = stats->ustats.outbps<<5;
- spin_lock_bh(&est_lock);
- list_add(&est->list, &est_list);
- spin_unlock_bh(&est_lock);
+ spin_lock_bh(&ipvs->est_lock);
+ list_add(&est->list, &ipvs->est_list);
+ spin_unlock_bh(&ipvs->est_lock);
}
-void ip_vs_kill_estimator(struct ip_vs_stats *stats)
+void ip_vs_kill_estimator(struct net *net, struct ip_vs_stats *stats)
{
+ struct netns_ipvs *ipvs = net_ipvs(net);
struct ip_vs_estimator *est = &stats->est;
- spin_lock_bh(&est_lock);
+ spin_lock_bh(&ipvs->est_lock);
list_del(&est->list);
- spin_unlock_bh(&est_lock);
+ spin_unlock_bh(&ipvs->est_lock);
}
void ip_vs_zero_estimator(struct ip_vs_stats *stats)
@@ -157,13 +199,35 @@ void ip_vs_zero_estimator(struct ip_vs_stats *stats)
est->outbps = 0;
}
-int __init ip_vs_estimator_init(void)
+static int __net_init __ip_vs_estimator_init(struct net *net)
{
- mod_timer(&est_timer, jiffies + 2 * HZ);
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ INIT_LIST_HEAD(&ipvs->est_list);
+ spin_lock_init(&ipvs->est_lock);
+ setup_timer(&ipvs->est_timer, estimation_timer, (unsigned long)net);
+ mod_timer(&ipvs->est_timer, jiffies + 2 * HZ);
return 0;
}
+static void __net_exit __ip_vs_estimator_exit(struct net *net)
+{
+ del_timer_sync(&net_ipvs(net)->est_timer);
+}
+static struct pernet_operations ip_vs_app_ops = {
+ .init = __ip_vs_estimator_init,
+ .exit = __ip_vs_estimator_exit,
+};
+
+int __init ip_vs_estimator_init(void)
+{
+ int rv;
+
+ rv = register_pernet_subsys(&ip_vs_app_ops);
+ return rv;
+}
+
void ip_vs_estimator_cleanup(void)
{
- del_timer_sync(&est_timer);
+ unregister_pernet_subsys(&ip_vs_app_ops);
}
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 75455000ad1c..6b5dd6ddaae9 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -157,6 +157,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
int ret = 0;
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
+ struct net *net;
#ifdef CONFIG_IP_VS_IPV6
/* This application helper doesn't work with IPv6 yet,
@@ -197,18 +198,20 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
*/
{
struct ip_vs_conn_param p;
- ip_vs_conn_fill_param(AF_INET, iph->protocol,
- &from, port, &cp->caddr, 0, &p);
+ ip_vs_conn_fill_param(ip_vs_conn_net(cp), AF_INET,
+ iph->protocol, &from, port,
+ &cp->caddr, 0, &p);
n_cp = ip_vs_conn_out_get(&p);
}
if (!n_cp) {
struct ip_vs_conn_param p;
- ip_vs_conn_fill_param(AF_INET, IPPROTO_TCP, &cp->caddr,
+ ip_vs_conn_fill_param(ip_vs_conn_net(cp),
+ AF_INET, IPPROTO_TCP, &cp->caddr,
0, &cp->vaddr, port, &p);
n_cp = ip_vs_conn_new(&p, &from, port,
IP_VS_CONN_F_NO_CPORT |
IP_VS_CONN_F_NFCT,
- cp->dest);
+ cp->dest, skb->mark);
if (!n_cp)
return 0;
@@ -257,8 +260,9 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
* would be adjusted twice.
*/
+ net = skb_net(skb);
cp->app_data = NULL;
- ip_vs_tcp_conn_listen(n_cp);
+ ip_vs_tcp_conn_listen(net, n_cp);
ip_vs_conn_put(n_cp);
return ret;
}
@@ -287,6 +291,7 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
union nf_inet_addr to;
__be16 port;
struct ip_vs_conn *n_cp;
+ struct net *net;
#ifdef CONFIG_IP_VS_IPV6
/* This application helper doesn't work with IPv6 yet,
@@ -358,14 +363,15 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
{
struct ip_vs_conn_param p;
- ip_vs_conn_fill_param(AF_INET, iph->protocol, &to, port,
- &cp->vaddr, htons(ntohs(cp->vport)-1),
- &p);
+ ip_vs_conn_fill_param(ip_vs_conn_net(cp), AF_INET,
+ iph->protocol, &to, port, &cp->vaddr,
+ htons(ntohs(cp->vport)-1), &p);
n_cp = ip_vs_conn_in_get(&p);
if (!n_cp) {
n_cp = ip_vs_conn_new(&p, &cp->daddr,
htons(ntohs(cp->dport)-1),
- IP_VS_CONN_F_NFCT, cp->dest);
+ IP_VS_CONN_F_NFCT, cp->dest,
+ skb->mark);
if (!n_cp)
return 0;
@@ -377,7 +383,8 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
/*
* Move tunnel to listen state
*/
- ip_vs_tcp_conn_listen(n_cp);
+ net = skb_net(skb);
+ ip_vs_tcp_conn_listen(net, n_cp);
ip_vs_conn_put(n_cp);
return 1;
@@ -398,23 +405,22 @@ static struct ip_vs_app ip_vs_ftp = {
.pkt_in = ip_vs_ftp_in,
};
-
/*
- * ip_vs_ftp initialization
+ * per netns ip_vs_ftp initialization
*/
-static int __init ip_vs_ftp_init(void)
+static int __net_init __ip_vs_ftp_init(struct net *net)
{
int i, ret;
struct ip_vs_app *app = &ip_vs_ftp;
- ret = register_ip_vs_app(app);
+ ret = register_ip_vs_app(net, app);
if (ret)
return ret;
for (i=0; i<IP_VS_APP_MAX_PORTS; i++) {
if (!ports[i])
continue;
- ret = register_ip_vs_app_inc(app, app->protocol, ports[i]);
+ ret = register_ip_vs_app_inc(net, app, app->protocol, ports[i]);
if (ret)
break;
pr_info("%s: loaded support on port[%d] = %d\n",
@@ -422,18 +428,39 @@ static int __init ip_vs_ftp_init(void)
}
if (ret)
- unregister_ip_vs_app(app);
+ unregister_ip_vs_app(net, app);
return ret;
}
+/*
+ * netns exit
+ */
+static void __ip_vs_ftp_exit(struct net *net)
+{
+ struct ip_vs_app *app = &ip_vs_ftp;
+
+ unregister_ip_vs_app(net, app);
+}
+
+static struct pernet_operations ip_vs_ftp_ops = {
+ .init = __ip_vs_ftp_init,
+ .exit = __ip_vs_ftp_exit,
+};
+int __init ip_vs_ftp_init(void)
+{
+ int rv;
+
+ rv = register_pernet_subsys(&ip_vs_ftp_ops);
+ return rv;
+}
/*
* ip_vs_ftp finish.
*/
static void __exit ip_vs_ftp_exit(void)
{
- unregister_ip_vs_app(&ip_vs_ftp);
+ unregister_pernet_subsys(&ip_vs_ftp_ops);
}
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 9323f8944199..6bf7a807649c 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -70,7 +70,6 @@
* entries that haven't been touched for a day.
*/
#define COUNT_FOR_FULL_EXPIRATION 30
-static int sysctl_ip_vs_lblc_expiration = 24*60*60*HZ;
/*
@@ -117,7 +116,7 @@ struct ip_vs_lblc_table {
static ctl_table vs_vars_table[] = {
{
.procname = "lblc_expiration",
- .data = &sysctl_ip_vs_lblc_expiration,
+ .data = NULL,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -125,8 +124,6 @@ static ctl_table vs_vars_table[] = {
{ }
};
-static struct ctl_table_header * sysctl_header;
-
static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en)
{
list_del(&en->list);
@@ -248,6 +245,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
struct ip_vs_lblc_entry *en, *nxt;
unsigned long now = jiffies;
int i, j;
+ struct netns_ipvs *ipvs = net_ipvs(svc->net);
for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLC_TAB_MASK;
@@ -255,7 +253,8 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
write_lock(&svc->sched_lock);
list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
if (time_before(now,
- en->lastuse + sysctl_ip_vs_lblc_expiration))
+ en->lastuse +
+ ipvs->sysctl_lblc_expiration))
continue;
ip_vs_lblc_free(en);
@@ -390,12 +389,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
int loh, doh;
/*
- * We think the overhead of processing active connections is fifty
- * times higher than that of inactive connections in average. (This
- * fifty times might not be accurate, we will change it later.) We
- * use the following formula to estimate the overhead:
- * dest->activeconns*50 + dest->inactconns
- * and the load:
+ * We use the following formula to estimate the load:
* (dest overhead) / dest->weight
*
* Remember -- no floats in kernel mode!!!
@@ -411,8 +405,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
continue;
if (atomic_read(&dest->weight) > 0) {
least = dest;
- loh = atomic_read(&least->activeconns) * 50
- + atomic_read(&least->inactconns);
+ loh = ip_vs_dest_conn_overhead(least);
goto nextstage;
}
}
@@ -426,8 +419,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
if (dest->flags & IP_VS_DEST_F_OVERLOAD)
continue;
- doh = atomic_read(&dest->activeconns) * 50
- + atomic_read(&dest->inactconns);
+ doh = ip_vs_dest_conn_overhead(dest);
if (loh * atomic_read(&dest->weight) >
doh * atomic_read(&least->weight)) {
least = dest;
@@ -511,7 +503,7 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
/* No cache entry or it is invalid, time to schedule */
dest = __ip_vs_lblc_schedule(svc);
if (!dest) {
- IP_VS_ERR_RL("LBLC: no destination available\n");
+ ip_vs_scheduler_err(svc, "no destination available");
return NULL;
}
@@ -543,23 +535,73 @@ static struct ip_vs_scheduler ip_vs_lblc_scheduler =
.schedule = ip_vs_lblc_schedule,
};
+/*
+ * per netns init.
+ */
+static int __net_init __ip_vs_lblc_init(struct net *net)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ if (!net_eq(net, &init_net)) {
+ ipvs->lblc_ctl_table = kmemdup(vs_vars_table,
+ sizeof(vs_vars_table),
+ GFP_KERNEL);
+ if (ipvs->lblc_ctl_table == NULL)
+ return -ENOMEM;
+ } else
+ ipvs->lblc_ctl_table = vs_vars_table;
+ ipvs->sysctl_lblc_expiration = 24*60*60*HZ;
+ ipvs->lblc_ctl_table[0].data = &ipvs->sysctl_lblc_expiration;
+
+#ifdef CONFIG_SYSCTL
+ ipvs->lblc_ctl_header =
+ register_net_sysctl_table(net, net_vs_ctl_path,
+ ipvs->lblc_ctl_table);
+ if (!ipvs->lblc_ctl_header) {
+ if (!net_eq(net, &init_net))
+ kfree(ipvs->lblc_ctl_table);
+ return -ENOMEM;
+ }
+#endif
+
+ return 0;
+}
+
+static void __net_exit __ip_vs_lblc_exit(struct net *net)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+#ifdef CONFIG_SYSCTL
+ unregister_net_sysctl_table(ipvs->lblc_ctl_header);
+#endif
+
+ if (!net_eq(net, &init_net))
+ kfree(ipvs->lblc_ctl_table);
+}
+
+static struct pernet_operations ip_vs_lblc_ops = {
+ .init = __ip_vs_lblc_init,
+ .exit = __ip_vs_lblc_exit,
+};
static int __init ip_vs_lblc_init(void)
{
int ret;
- sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
+ ret = register_pernet_subsys(&ip_vs_lblc_ops);
+ if (ret)
+ return ret;
+
ret = register_ip_vs_scheduler(&ip_vs_lblc_scheduler);
if (ret)
- unregister_sysctl_table(sysctl_header);
+ unregister_pernet_subsys(&ip_vs_lblc_ops);
return ret;
}
-
static void __exit ip_vs_lblc_cleanup(void)
{
- unregister_sysctl_table(sysctl_header);
unregister_ip_vs_scheduler(&ip_vs_lblc_scheduler);
+ unregister_pernet_subsys(&ip_vs_lblc_ops);
}
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index dbeed8ea421a..00631765b92a 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -70,8 +70,6 @@
* entries that haven't been touched for a day.
*/
#define COUNT_FOR_FULL_EXPIRATION 30
-static int sysctl_ip_vs_lblcr_expiration = 24*60*60*HZ;
-
/*
* for IPVS lblcr entry hash table
@@ -180,8 +178,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
if ((atomic_read(&least->weight) > 0)
&& (least->flags & IP_VS_DEST_F_AVAILABLE)) {
- loh = atomic_read(&least->activeconns) * 50
- + atomic_read(&least->inactconns);
+ loh = ip_vs_dest_conn_overhead(least);
goto nextstage;
}
}
@@ -194,8 +191,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
if (dest->flags & IP_VS_DEST_F_OVERLOAD)
continue;
- doh = atomic_read(&dest->activeconns) * 50
- + atomic_read(&dest->inactconns);
+ doh = ip_vs_dest_conn_overhead(dest);
if ((loh * atomic_read(&dest->weight) >
doh * atomic_read(&least->weight))
&& (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
@@ -230,8 +226,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
list_for_each_entry(e, &set->list, list) {
most = e->dest;
if (atomic_read(&most->weight) > 0) {
- moh = atomic_read(&most->activeconns) * 50
- + atomic_read(&most->inactconns);
+ moh = ip_vs_dest_conn_overhead(most);
goto nextstage;
}
}
@@ -241,8 +236,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
nextstage:
list_for_each_entry(e, &set->list, list) {
dest = e->dest;
- doh = atomic_read(&dest->activeconns) * 50
- + atomic_read(&dest->inactconns);
+ doh = ip_vs_dest_conn_overhead(dest);
/* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
if ((moh * atomic_read(&dest->weight) <
doh * atomic_read(&most->weight))
@@ -296,7 +290,7 @@ struct ip_vs_lblcr_table {
static ctl_table vs_vars_table[] = {
{
.procname = "lblcr_expiration",
- .data = &sysctl_ip_vs_lblcr_expiration,
+ .data = NULL,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -304,8 +298,6 @@ static ctl_table vs_vars_table[] = {
{ }
};
-static struct ctl_table_header * sysctl_header;
-
static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en)
{
list_del(&en->list);
@@ -425,14 +417,15 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
unsigned long now = jiffies;
int i, j;
struct ip_vs_lblcr_entry *en, *nxt;
+ struct netns_ipvs *ipvs = net_ipvs(svc->net);
for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
write_lock(&svc->sched_lock);
list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
- if (time_after(en->lastuse+sysctl_ip_vs_lblcr_expiration,
- now))
+ if (time_after(en->lastuse
+ + ipvs->sysctl_lblcr_expiration, now))
continue;
ip_vs_lblcr_free(en);
@@ -566,12 +559,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
int loh, doh;
/*
- * We think the overhead of processing active connections is fifty
- * times higher than that of inactive connections in average. (This
- * fifty times might not be accurate, we will change it later.) We
- * use the following formula to estimate the overhead:
- * dest->activeconns*50 + dest->inactconns
- * and the load:
+ * We use the following formula to estimate the load:
* (dest overhead) / dest->weight
*
* Remember -- no floats in kernel mode!!!
@@ -588,8 +576,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
if (atomic_read(&dest->weight) > 0) {
least = dest;
- loh = atomic_read(&least->activeconns) * 50
- + atomic_read(&least->inactconns);
+ loh = ip_vs_dest_conn_overhead(least);
goto nextstage;
}
}
@@ -603,8 +590,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
if (dest->flags & IP_VS_DEST_F_OVERLOAD)
continue;
- doh = atomic_read(&dest->activeconns) * 50
- + atomic_read(&dest->inactconns);
+ doh = ip_vs_dest_conn_overhead(dest);
if (loh * atomic_read(&dest->weight) >
doh * atomic_read(&least->weight)) {
least = dest;
@@ -664,6 +650,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
read_lock(&svc->sched_lock);
en = ip_vs_lblcr_get(svc->af, tbl, &iph.daddr);
if (en) {
+ struct netns_ipvs *ipvs = net_ipvs(svc->net);
/* We only hold a read lock, but this is atomic */
en->lastuse = jiffies;
@@ -675,7 +662,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
/* More than one destination + enough time passed by, cleanup */
if (atomic_read(&en->set.size) > 1 &&
time_after(jiffies, en->set.lastmod +
- sysctl_ip_vs_lblcr_expiration)) {
+ ipvs->sysctl_lblcr_expiration)) {
struct ip_vs_dest *m;
write_lock(&en->set.lock);
@@ -694,7 +681,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
/* The cache entry is invalid, time to schedule */
dest = __ip_vs_lblcr_schedule(svc);
if (!dest) {
- IP_VS_ERR_RL("LBLCR: no destination available\n");
+ ip_vs_scheduler_err(svc, "no destination available");
read_unlock(&svc->sched_lock);
return NULL;
}
@@ -744,23 +731,73 @@ static struct ip_vs_scheduler ip_vs_lblcr_scheduler =
.schedule = ip_vs_lblcr_schedule,
};
+/*
+ * per netns init.
+ */
+static int __net_init __ip_vs_lblcr_init(struct net *net)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ if (!net_eq(net, &init_net)) {
+ ipvs->lblcr_ctl_table = kmemdup(vs_vars_table,
+ sizeof(vs_vars_table),
+ GFP_KERNEL);
+ if (ipvs->lblcr_ctl_table == NULL)
+ return -ENOMEM;
+ } else
+ ipvs->lblcr_ctl_table = vs_vars_table;
+ ipvs->sysctl_lblcr_expiration = 24*60*60*HZ;
+ ipvs->lblcr_ctl_table[0].data = &ipvs->sysctl_lblcr_expiration;
+
+#ifdef CONFIG_SYSCTL
+ ipvs->lblcr_ctl_header =
+ register_net_sysctl_table(net, net_vs_ctl_path,
+ ipvs->lblcr_ctl_table);
+ if (!ipvs->lblcr_ctl_header) {
+ if (!net_eq(net, &init_net))
+ kfree(ipvs->lblcr_ctl_table);
+ return -ENOMEM;
+ }
+#endif
+
+ return 0;
+}
+
+static void __net_exit __ip_vs_lblcr_exit(struct net *net)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+#ifdef CONFIG_SYSCTL
+ unregister_net_sysctl_table(ipvs->lblcr_ctl_header);
+#endif
+
+ if (!net_eq(net, &init_net))
+ kfree(ipvs->lblcr_ctl_table);
+}
+
+static struct pernet_operations ip_vs_lblcr_ops = {
+ .init = __ip_vs_lblcr_init,
+ .exit = __ip_vs_lblcr_exit,
+};
static int __init ip_vs_lblcr_init(void)
{
int ret;
- sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
+ ret = register_pernet_subsys(&ip_vs_lblcr_ops);
+ if (ret)
+ return ret;
+
ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
if (ret)
- unregister_sysctl_table(sysctl_header);
+ unregister_pernet_subsys(&ip_vs_lblcr_ops);
return ret;
}
-
static void __exit ip_vs_lblcr_cleanup(void)
{
- unregister_sysctl_table(sysctl_header);
unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
+ unregister_pernet_subsys(&ip_vs_lblcr_ops);
}
diff --git a/net/netfilter/ipvs/ip_vs_lc.c b/net/netfilter/ipvs/ip_vs_lc.c
index 4f69db1fac56..f391819c0cca 100644
--- a/net/netfilter/ipvs/ip_vs_lc.c
+++ b/net/netfilter/ipvs/ip_vs_lc.c
@@ -22,22 +22,6 @@
#include <net/ip_vs.h>
-
-static inline unsigned int
-ip_vs_lc_dest_overhead(struct ip_vs_dest *dest)
-{
- /*
- * We think the overhead of processing active connections is 256
- * times higher than that of inactive connections in average. (This
- * 256 times might not be accurate, we will change it later) We
- * use the following formula to estimate the overhead now:
- * dest->activeconns*256 + dest->inactconns
- */
- return (atomic_read(&dest->activeconns) << 8) +
- atomic_read(&dest->inactconns);
-}
-
-
/*
* Least Connection scheduling
*/
@@ -62,7 +46,7 @@ ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
if ((dest->flags & IP_VS_DEST_F_OVERLOAD) ||
atomic_read(&dest->weight) == 0)
continue;
- doh = ip_vs_lc_dest_overhead(dest);
+ doh = ip_vs_dest_conn_overhead(dest);
if (!least || doh < loh) {
least = dest;
loh = doh;
@@ -70,7 +54,7 @@ ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
}
if (!least)
- IP_VS_ERR_RL("LC: no destination available\n");
+ ip_vs_scheduler_err(svc, "no destination available");
else
IP_VS_DBG_BUF(6, "LC: server %s:%u activeconns %d "
"inactconns %d\n",
diff --git a/net/netfilter/ipvs/ip_vs_nfct.c b/net/netfilter/ipvs/ip_vs_nfct.c
index 4680647cd450..f454c80df0a7 100644
--- a/net/netfilter/ipvs/ip_vs_nfct.c
+++ b/net/netfilter/ipvs/ip_vs_nfct.c
@@ -141,6 +141,7 @@ static void ip_vs_nfct_expect_callback(struct nf_conn *ct,
struct nf_conntrack_tuple *orig, new_reply;
struct ip_vs_conn *cp;
struct ip_vs_conn_param p;
+ struct net *net = nf_ct_net(ct);
if (exp->tuple.src.l3num != PF_INET)
return;
@@ -155,7 +156,7 @@ static void ip_vs_nfct_expect_callback(struct nf_conn *ct,
/* RS->CLIENT */
orig = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
- ip_vs_conn_fill_param(exp->tuple.src.l3num, orig->dst.protonum,
+ ip_vs_conn_fill_param(net, exp->tuple.src.l3num, orig->dst.protonum,
&orig->src.u3, orig->src.u.tcp.port,
&orig->dst.u3, orig->dst.u.tcp.port, &p);
cp = ip_vs_conn_out_get(&p);
@@ -268,7 +269,8 @@ void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
" for conn " FMT_CONN "\n",
__func__, ARG_TUPLE(&tuple), ARG_CONN(cp));
- h = nf_conntrack_find_get(&init_net, NF_CT_DEFAULT_ZONE, &tuple);
+ h = nf_conntrack_find_get(ip_vs_conn_net(cp), NF_CT_DEFAULT_ZONE,
+ &tuple);
if (h) {
ct = nf_ct_tuplehash_to_ctrack(h);
/* Show what happens instead of calling nf_ct_kill() */
diff --git a/net/netfilter/ipvs/ip_vs_nq.c b/net/netfilter/ipvs/ip_vs_nq.c
index c413e1830823..984d9c137d84 100644
--- a/net/netfilter/ipvs/ip_vs_nq.c
+++ b/net/netfilter/ipvs/ip_vs_nq.c
@@ -99,7 +99,7 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
}
if (!least) {
- IP_VS_ERR_RL("NQ: no destination available\n");
+ ip_vs_scheduler_err(svc, "no destination available");
return NULL;
}
diff --git a/net/netfilter/ipvs/ip_vs_pe.c b/net/netfilter/ipvs/ip_vs_pe.c
index 3414af70ee12..5cf859ccb31b 100644
--- a/net/netfilter/ipvs/ip_vs_pe.c
+++ b/net/netfilter/ipvs/ip_vs_pe.c
@@ -29,12 +29,11 @@ void ip_vs_unbind_pe(struct ip_vs_service *svc)
}
/* Get pe in the pe list by name */
-static struct ip_vs_pe *
-ip_vs_pe_getbyname(const char *pe_name)
+struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name)
{
struct ip_vs_pe *pe;
- IP_VS_DBG(2, "%s(): pe_name \"%s\"\n", __func__,
+ IP_VS_DBG(10, "%s(): pe_name \"%s\"\n", __func__,
pe_name);
spin_lock_bh(&ip_vs_pe_lock);
@@ -60,28 +59,22 @@ ip_vs_pe_getbyname(const char *pe_name)
}
/* Lookup pe and try to load it if it doesn't exist */
-struct ip_vs_pe *ip_vs_pe_get(const char *name)
+struct ip_vs_pe *ip_vs_pe_getbyname(const char *name)
{
struct ip_vs_pe *pe;
/* Search for the pe by name */
- pe = ip_vs_pe_getbyname(name);
+ pe = __ip_vs_pe_getbyname(name);
/* If pe not found, load the module and search again */
if (!pe) {
request_module("ip_vs_pe_%s", name);
- pe = ip_vs_pe_getbyname(name);
+ pe = __ip_vs_pe_getbyname(name);
}
return pe;
}
-void ip_vs_pe_put(struct ip_vs_pe *pe)
-{
- if (pe && pe->module)
- module_put(pe->module);
-}
-
/* Register a pe in the pe list */
int register_ip_vs_pe(struct ip_vs_pe *pe)
{
diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
index b8b4e9620f3e..0d83bc01fed4 100644
--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
+++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
@@ -71,6 +71,7 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
struct ip_vs_iphdr iph;
unsigned int dataoff, datalen, matchoff, matchlen;
const char *dptr;
+ int retc;
ip_vs_fill_iphdr(p->af, skb_network_header(skb), &iph);
@@ -83,6 +84,8 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
if (dataoff >= skb->len)
return -EINVAL;
+ if ((retc=skb_linearize(skb)) < 0)
+ return retc;
dptr = skb->data + dataoff;
datalen = skb->len - dataoff;
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index c53998390877..17484a4416ef 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -60,6 +60,35 @@ static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp)
return 0;
}
+#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP) || \
+ defined(CONFIG_IP_VS_PROTO_SCTP) || defined(CONFIG_IP_VS_PROTO_AH) || \
+ defined(CONFIG_IP_VS_PROTO_ESP)
+/*
+ * register an ipvs protocols netns related data
+ */
+static int
+register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ unsigned hash = IP_VS_PROTO_HASH(pp->protocol);
+ struct ip_vs_proto_data *pd =
+ kzalloc(sizeof(struct ip_vs_proto_data), GFP_ATOMIC);
+
+ if (!pd) {
+ pr_err("%s(): no memory.\n", __func__);
+ return -ENOMEM;
+ }
+ pd->pp = pp; /* For speed issues */
+ pd->next = ipvs->proto_data_table[hash];
+ ipvs->proto_data_table[hash] = pd;
+ atomic_set(&pd->appcnt, 0); /* Init app counter */
+
+ if (pp->init_netns != NULL)
+ pp->init_netns(net, pd);
+
+ return 0;
+}
+#endif
/*
* unregister an ipvs protocol
@@ -82,6 +111,29 @@ static int unregister_ip_vs_protocol(struct ip_vs_protocol *pp)
return -ESRCH;
}
+/*
+ * unregister an ipvs protocols netns data
+ */
+static int
+unregister_ip_vs_proto_netns(struct net *net, struct ip_vs_proto_data *pd)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ip_vs_proto_data **pd_p;
+ unsigned hash = IP_VS_PROTO_HASH(pd->pp->protocol);
+
+ pd_p = &ipvs->proto_data_table[hash];
+ for (; *pd_p; pd_p = &(*pd_p)->next) {
+ if (*pd_p == pd) {
+ *pd_p = pd->next;
+ if (pd->pp->exit_netns != NULL)
+ pd->pp->exit_netns(net, pd);
+ kfree(pd);
+ return 0;
+ }
+ }
+
+ return -ESRCH;
+}
/*
* get ip_vs_protocol object by its proto.
@@ -100,19 +152,44 @@ struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto)
}
EXPORT_SYMBOL(ip_vs_proto_get);
+/*
+ * get ip_vs_protocol object data by netns and proto
+ */
+struct ip_vs_proto_data *
+__ipvs_proto_data_get(struct netns_ipvs *ipvs, unsigned short proto)
+{
+ struct ip_vs_proto_data *pd;
+ unsigned hash = IP_VS_PROTO_HASH(proto);
+
+ for (pd = ipvs->proto_data_table[hash]; pd; pd = pd->next) {
+ if (pd->pp->protocol == proto)
+ return pd;
+ }
+
+ return NULL;
+}
+
+struct ip_vs_proto_data *
+ip_vs_proto_data_get(struct net *net, unsigned short proto)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ return __ipvs_proto_data_get(ipvs, proto);
+}
+EXPORT_SYMBOL(ip_vs_proto_data_get);
/*
* Propagate event for state change to all protocols
*/
-void ip_vs_protocol_timeout_change(int flags)
+void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags)
{
- struct ip_vs_protocol *pp;
+ struct ip_vs_proto_data *pd;
int i;
for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) {
- for (pp = ip_vs_proto_table[i]; pp; pp = pp->next) {
- if (pp->timeout_change)
- pp->timeout_change(pp, flags);
+ for (pd = ipvs->proto_data_table[i]; pd; pd = pd->next) {
+ if (pd->pp->timeout_change)
+ pd->pp->timeout_change(pd, flags);
}
}
}
@@ -236,6 +313,46 @@ ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp,
ip_vs_tcpudp_debug_packet_v4(pp, skb, offset, msg);
}
+/*
+ * per network name-space init
+ */
+static int __net_init __ip_vs_protocol_init(struct net *net)
+{
+#ifdef CONFIG_IP_VS_PROTO_TCP
+ register_ip_vs_proto_netns(net, &ip_vs_protocol_tcp);
+#endif
+#ifdef CONFIG_IP_VS_PROTO_UDP
+ register_ip_vs_proto_netns(net, &ip_vs_protocol_udp);
+#endif
+#ifdef CONFIG_IP_VS_PROTO_SCTP
+ register_ip_vs_proto_netns(net, &ip_vs_protocol_sctp);
+#endif
+#ifdef CONFIG_IP_VS_PROTO_AH
+ register_ip_vs_proto_netns(net, &ip_vs_protocol_ah);
+#endif
+#ifdef CONFIG_IP_VS_PROTO_ESP
+ register_ip_vs_proto_netns(net, &ip_vs_protocol_esp);
+#endif
+ return 0;
+}
+
+static void __net_exit __ip_vs_protocol_cleanup(struct net *net)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ip_vs_proto_data *pd;
+ int i;
+
+ /* unregister all the ipvs proto data for this netns */
+ for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) {
+ while ((pd = ipvs->proto_data_table[i]) != NULL)
+ unregister_ip_vs_proto_netns(net, pd);
+ }
+}
+
+static struct pernet_operations ipvs_proto_ops = {
+ .init = __ip_vs_protocol_init,
+ .exit = __ip_vs_protocol_cleanup,
+};
int __init ip_vs_protocol_init(void)
{
@@ -265,6 +382,7 @@ int __init ip_vs_protocol_init(void)
REGISTER_PROTOCOL(&ip_vs_protocol_esp);
#endif
pr_info("Registered protocols (%s)\n", &protocols[2]);
+ return register_pernet_subsys(&ipvs_proto_ops);
return 0;
}
@@ -275,6 +393,7 @@ void ip_vs_protocol_cleanup(void)
struct ip_vs_protocol *pp;
int i;
+ unregister_pernet_subsys(&ipvs_proto_ops);
/* unregister all the ipvs protocols */
for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) {
while ((pp = ip_vs_proto_table[i]) != NULL)
diff --git a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
index 3a0461117d3f..5b8eb8b12c3e 100644
--- a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
@@ -41,28 +41,30 @@ struct isakmp_hdr {
#define PORT_ISAKMP 500
static void
-ah_esp_conn_fill_param_proto(int af, const struct ip_vs_iphdr *iph,
- int inverse, struct ip_vs_conn_param *p)
+ah_esp_conn_fill_param_proto(struct net *net, int af,
+ const struct ip_vs_iphdr *iph, int inverse,
+ struct ip_vs_conn_param *p)
{
if (likely(!inverse))
- ip_vs_conn_fill_param(af, IPPROTO_UDP,
+ ip_vs_conn_fill_param(net, af, IPPROTO_UDP,
&iph->saddr, htons(PORT_ISAKMP),
&iph->daddr, htons(PORT_ISAKMP), p);
else
- ip_vs_conn_fill_param(af, IPPROTO_UDP,
+ ip_vs_conn_fill_param(net, af, IPPROTO_UDP,
&iph->daddr, htons(PORT_ISAKMP),
&iph->saddr, htons(PORT_ISAKMP), p);
}
static struct ip_vs_conn *
-ah_esp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
+ah_esp_conn_in_get(int af, const struct sk_buff *skb,
const struct ip_vs_iphdr *iph, unsigned int proto_off,
int inverse)
{
struct ip_vs_conn *cp;
struct ip_vs_conn_param p;
+ struct net *net = skb_net(skb);
- ah_esp_conn_fill_param_proto(af, iph, inverse, &p);
+ ah_esp_conn_fill_param_proto(net, af, iph, inverse, &p);
cp = ip_vs_conn_in_get(&p);
if (!cp) {
/*
@@ -72,7 +74,7 @@ ah_esp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
IP_VS_DBG_BUF(12, "Unknown ISAKMP entry for outin packet "
"%s%s %s->%s\n",
inverse ? "ICMP+" : "",
- pp->name,
+ ip_vs_proto_get(iph->protocol)->name,
IP_VS_DBG_ADDR(af, &iph->saddr),
IP_VS_DBG_ADDR(af, &iph->daddr));
}
@@ -83,21 +85,21 @@ ah_esp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
static struct ip_vs_conn *
ah_esp_conn_out_get(int af, const struct sk_buff *skb,
- struct ip_vs_protocol *pp,
const struct ip_vs_iphdr *iph,
unsigned int proto_off,
int inverse)
{
struct ip_vs_conn *cp;
struct ip_vs_conn_param p;
+ struct net *net = skb_net(skb);
- ah_esp_conn_fill_param_proto(af, iph, inverse, &p);
+ ah_esp_conn_fill_param_proto(net, af, iph, inverse, &p);
cp = ip_vs_conn_out_get(&p);
if (!cp) {
IP_VS_DBG_BUF(12, "Unknown ISAKMP entry for inout packet "
"%s%s %s->%s\n",
inverse ? "ICMP+" : "",
- pp->name,
+ ip_vs_proto_get(iph->protocol)->name,
IP_VS_DBG_ADDR(af, &iph->saddr),
IP_VS_DBG_ADDR(af, &iph->daddr));
}
@@ -107,7 +109,7 @@ ah_esp_conn_out_get(int af, const struct sk_buff *skb,
static int
-ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
+ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
int *verdict, struct ip_vs_conn **cpp)
{
/*
@@ -117,26 +119,14 @@ ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
return 0;
}
-static void ah_esp_init(struct ip_vs_protocol *pp)
-{
- /* nothing to do now */
-}
-
-
-static void ah_esp_exit(struct ip_vs_protocol *pp)
-{
- /* nothing to do now */
-}
-
-
#ifdef CONFIG_IP_VS_PROTO_AH
struct ip_vs_protocol ip_vs_protocol_ah = {
.name = "AH",
.protocol = IPPROTO_AH,
.num_states = 1,
.dont_defrag = 1,
- .init = ah_esp_init,
- .exit = ah_esp_exit,
+ .init = NULL,
+ .exit = NULL,
.conn_schedule = ah_esp_conn_schedule,
.conn_in_get = ah_esp_conn_in_get,
.conn_out_get = ah_esp_conn_out_get,
@@ -149,7 +139,6 @@ struct ip_vs_protocol ip_vs_protocol_ah = {
.app_conn_bind = NULL,
.debug_packet = ip_vs_tcpudp_debug_packet,
.timeout_change = NULL, /* ISAKMP */
- .set_state_timeout = NULL,
};
#endif
@@ -159,8 +148,8 @@ struct ip_vs_protocol ip_vs_protocol_esp = {
.protocol = IPPROTO_ESP,
.num_states = 1,
.dont_defrag = 1,
- .init = ah_esp_init,
- .exit = ah_esp_exit,
+ .init = NULL,
+ .exit = NULL,
.conn_schedule = ah_esp_conn_schedule,
.conn_in_get = ah_esp_conn_in_get,
.conn_out_get = ah_esp_conn_out_get,
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index 1ea96bcd342b..b027ccc49f43 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -9,9 +9,10 @@
#include <net/ip_vs.h>
static int
-sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
+sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
int *verdict, struct ip_vs_conn **cpp)
{
+ struct net *net;
struct ip_vs_service *svc;
sctp_chunkhdr_t _schunkh, *sch;
sctp_sctphdr_t *sh, _sctph;
@@ -27,13 +28,13 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
sizeof(_schunkh), &_schunkh);
if (sch == NULL)
return 0;
-
+ net = skb_net(skb);
if ((sch->type == SCTP_CID_INIT) &&
- (svc = ip_vs_service_get(af, skb->mark, iph.protocol,
+ (svc = ip_vs_service_get(net, af, skb->mark, iph.protocol,
&iph.daddr, sh->dest))) {
int ignored;
- if (ip_vs_todrop()) {
+ if (ip_vs_todrop(net_ipvs(net))) {
/*
* It seems that we are very loaded.
* We have to drop this packet :(
@@ -46,14 +47,19 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
* Let the virtual server select a real server for the
* incoming connection, and create a connection entry.
*/
- *cpp = ip_vs_schedule(svc, skb, pp, &ignored);
- if (!*cpp && !ignored) {
- *verdict = ip_vs_leave(svc, skb, pp);
+ *cpp = ip_vs_schedule(svc, skb, pd, &ignored);
+ if (!*cpp && ignored <= 0) {
+ if (!ignored)
+ *verdict = ip_vs_leave(svc, skb, pd);
+ else {
+ ip_vs_service_put(svc);
+ *verdict = NF_DROP;
+ }
return 0;
}
ip_vs_service_put(svc);
}
-
+ /* NF_ACCEPT */
return 1;
}
@@ -856,7 +862,7 @@ static struct ipvs_sctp_nextstate
/*
* Timeout table[state]
*/
-static int sctp_timeouts[IP_VS_SCTP_S_LAST + 1] = {
+static const int sctp_timeouts[IP_VS_SCTP_S_LAST + 1] = {
[IP_VS_SCTP_S_NONE] = 2 * HZ,
[IP_VS_SCTP_S_INIT_CLI] = 1 * 60 * HZ,
[IP_VS_SCTP_S_INIT_SER] = 1 * 60 * HZ,
@@ -900,20 +906,8 @@ static const char *sctp_state_name(int state)
return "?";
}
-static void sctp_timeout_change(struct ip_vs_protocol *pp, int flags)
-{
-}
-
-static int
-sctp_set_state_timeout(struct ip_vs_protocol *pp, char *sname, int to)
-{
-
-return ip_vs_set_state_timeout(pp->timeout_table, IP_VS_SCTP_S_LAST,
- sctp_state_name_table, sname, to);
-}
-
static inline int
-set_sctp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
+set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
int direction, const struct sk_buff *skb)
{
sctp_chunkhdr_t _sctpch, *sch;
@@ -971,7 +965,7 @@ set_sctp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
IP_VS_DBG_BUF(8, "%s %s %s:%d->"
"%s:%d state: %s->%s conn->refcnt:%d\n",
- pp->name,
+ pd->pp->name,
((direction == IP_VS_DIR_OUTPUT) ?
"output " : "input "),
IP_VS_DBG_ADDR(cp->af, &cp->daddr),
@@ -995,75 +989,73 @@ set_sctp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
}
}
}
+ if (likely(pd))
+ cp->timeout = pd->timeout_table[cp->state = next_state];
+ else /* What to do ? */
+ cp->timeout = sctp_timeouts[cp->state = next_state];
- cp->timeout = pp->timeout_table[cp->state = next_state];
-
- return 1;
+ return 1;
}
static int
sctp_state_transition(struct ip_vs_conn *cp, int direction,
- const struct sk_buff *skb, struct ip_vs_protocol *pp)
+ const struct sk_buff *skb, struct ip_vs_proto_data *pd)
{
int ret = 0;
spin_lock(&cp->lock);
- ret = set_sctp_state(pp, cp, direction, skb);
+ ret = set_sctp_state(pd, cp, direction, skb);
spin_unlock(&cp->lock);
return ret;
}
-/*
- * Hash table for SCTP application incarnations
- */
-#define SCTP_APP_TAB_BITS 4
-#define SCTP_APP_TAB_SIZE (1 << SCTP_APP_TAB_BITS)
-#define SCTP_APP_TAB_MASK (SCTP_APP_TAB_SIZE - 1)
-
-static struct list_head sctp_apps[SCTP_APP_TAB_SIZE];
-static DEFINE_SPINLOCK(sctp_app_lock);
-
static inline __u16 sctp_app_hashkey(__be16 port)
{
return (((__force u16)port >> SCTP_APP_TAB_BITS) ^ (__force u16)port)
& SCTP_APP_TAB_MASK;
}
-static int sctp_register_app(struct ip_vs_app *inc)
+static int sctp_register_app(struct net *net, struct ip_vs_app *inc)
{
struct ip_vs_app *i;
__u16 hash;
__be16 port = inc->port;
int ret = 0;
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_SCTP);
hash = sctp_app_hashkey(port);
- spin_lock_bh(&sctp_app_lock);
- list_for_each_entry(i, &sctp_apps[hash], p_list) {
+ spin_lock_bh(&ipvs->sctp_app_lock);
+ list_for_each_entry(i, &ipvs->sctp_apps[hash], p_list) {
if (i->port == port) {
ret = -EEXIST;
goto out;
}
}
- list_add(&inc->p_list, &sctp_apps[hash]);
- atomic_inc(&ip_vs_protocol_sctp.appcnt);
+ list_add(&inc->p_list, &ipvs->sctp_apps[hash]);
+ atomic_inc(&pd->appcnt);
out:
- spin_unlock_bh(&sctp_app_lock);
+ spin_unlock_bh(&ipvs->sctp_app_lock);
return ret;
}
-static void sctp_unregister_app(struct ip_vs_app *inc)
+static void sctp_unregister_app(struct net *net, struct ip_vs_app *inc)
{
- spin_lock_bh(&sctp_app_lock);
- atomic_dec(&ip_vs_protocol_sctp.appcnt);
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_SCTP);
+
+ spin_lock_bh(&ipvs->sctp_app_lock);
+ atomic_dec(&pd->appcnt);
list_del(&inc->p_list);
- spin_unlock_bh(&sctp_app_lock);
+ spin_unlock_bh(&ipvs->sctp_app_lock);
}
static int sctp_app_conn_bind(struct ip_vs_conn *cp)
{
+ struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
int hash;
struct ip_vs_app *inc;
int result = 0;
@@ -1074,12 +1066,12 @@ static int sctp_app_conn_bind(struct ip_vs_conn *cp)
/* Lookup application incarnations and bind the right one */
hash = sctp_app_hashkey(cp->vport);
- spin_lock(&sctp_app_lock);
- list_for_each_entry(inc, &sctp_apps[hash], p_list) {
+ spin_lock(&ipvs->sctp_app_lock);
+ list_for_each_entry(inc, &ipvs->sctp_apps[hash], p_list) {
if (inc->port == cp->vport) {
if (unlikely(!ip_vs_app_inc_get(inc)))
break;
- spin_unlock(&sctp_app_lock);
+ spin_unlock(&ipvs->sctp_app_lock);
IP_VS_DBG_BUF(9, "%s: Binding conn %s:%u->"
"%s:%u to app %s on port %u\n",
@@ -1095,43 +1087,50 @@ static int sctp_app_conn_bind(struct ip_vs_conn *cp)
goto out;
}
}
- spin_unlock(&sctp_app_lock);
+ spin_unlock(&ipvs->sctp_app_lock);
out:
return result;
}
-static void ip_vs_sctp_init(struct ip_vs_protocol *pp)
+/* ---------------------------------------------
+ * timeouts is netns related now.
+ * ---------------------------------------------
+ */
+static void __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd)
{
- IP_VS_INIT_HASH_TABLE(sctp_apps);
- pp->timeout_table = sctp_timeouts;
-}
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ ip_vs_init_hash_table(ipvs->sctp_apps, SCTP_APP_TAB_SIZE);
+ spin_lock_init(&ipvs->sctp_app_lock);
+ pd->timeout_table = ip_vs_create_timeout_table((int *)sctp_timeouts,
+ sizeof(sctp_timeouts));
+}
-static void ip_vs_sctp_exit(struct ip_vs_protocol *pp)
+static void __ip_vs_sctp_exit(struct net *net, struct ip_vs_proto_data *pd)
{
-
+ kfree(pd->timeout_table);
}
struct ip_vs_protocol ip_vs_protocol_sctp = {
- .name = "SCTP",
- .protocol = IPPROTO_SCTP,
- .num_states = IP_VS_SCTP_S_LAST,
- .dont_defrag = 0,
- .appcnt = ATOMIC_INIT(0),
- .init = ip_vs_sctp_init,
- .exit = ip_vs_sctp_exit,
- .register_app = sctp_register_app,
+ .name = "SCTP",
+ .protocol = IPPROTO_SCTP,
+ .num_states = IP_VS_SCTP_S_LAST,
+ .dont_defrag = 0,
+ .init = NULL,
+ .exit = NULL,
+ .init_netns = __ip_vs_sctp_init,
+ .exit_netns = __ip_vs_sctp_exit,
+ .register_app = sctp_register_app,
.unregister_app = sctp_unregister_app,
- .conn_schedule = sctp_conn_schedule,
- .conn_in_get = ip_vs_conn_in_get_proto,
- .conn_out_get = ip_vs_conn_out_get_proto,
- .snat_handler = sctp_snat_handler,
- .dnat_handler = sctp_dnat_handler,
- .csum_check = sctp_csum_check,
- .state_name = sctp_state_name,
+ .conn_schedule = sctp_conn_schedule,
+ .conn_in_get = ip_vs_conn_in_get_proto,
+ .conn_out_get = ip_vs_conn_out_get_proto,
+ .snat_handler = sctp_snat_handler,
+ .dnat_handler = sctp_dnat_handler,
+ .csum_check = sctp_csum_check,
+ .state_name = sctp_state_name,
.state_transition = sctp_state_transition,
- .app_conn_bind = sctp_app_conn_bind,
- .debug_packet = ip_vs_tcpudp_debug_packet,
- .timeout_change = sctp_timeout_change,
- .set_state_timeout = sctp_set_state_timeout,
+ .app_conn_bind = sctp_app_conn_bind,
+ .debug_packet = ip_vs_tcpudp_debug_packet,
+ .timeout_change = NULL,
};
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index f6c5200e2146..c0cc341b840d 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -9,8 +9,12 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * Changes:
+ * Changes: Hans Schillstrom <hans.schillstrom@ericsson.com>
*
+ * Network name space (netns) aware.
+ * Global data moved to netns i.e struct netns_ipvs
+ * tcp_timeouts table has copy per netns in a hash table per
+ * protocol ip_vs_proto_data and is handled by netns
*/
#define KMSG_COMPONENT "IPVS"
@@ -28,9 +32,10 @@
#include <net/ip_vs.h>
static int
-tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
+tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
int *verdict, struct ip_vs_conn **cpp)
{
+ struct net *net;
struct ip_vs_service *svc;
struct tcphdr _tcph, *th;
struct ip_vs_iphdr iph;
@@ -42,14 +47,14 @@ tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
*verdict = NF_DROP;
return 0;
}
-
+ net = skb_net(skb);
/* No !th->ack check to allow scheduling on SYN+ACK for Active FTP */
if (th->syn &&
- (svc = ip_vs_service_get(af, skb->mark, iph.protocol, &iph.daddr,
- th->dest))) {
+ (svc = ip_vs_service_get(net, af, skb->mark, iph.protocol,
+ &iph.daddr, th->dest))) {
int ignored;
- if (ip_vs_todrop()) {
+ if (ip_vs_todrop(net_ipvs(net))) {
/*
* It seems that we are very loaded.
* We have to drop this packet :(
@@ -63,13 +68,19 @@ tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
* Let the virtual server select a real server for the
* incoming connection, and create a connection entry.
*/
- *cpp = ip_vs_schedule(svc, skb, pp, &ignored);
- if (!*cpp && !ignored) {
- *verdict = ip_vs_leave(svc, skb, pp);
+ *cpp = ip_vs_schedule(svc, skb, pd, &ignored);
+ if (!*cpp && ignored <= 0) {
+ if (!ignored)
+ *verdict = ip_vs_leave(svc, skb, pd);
+ else {
+ ip_vs_service_put(svc);
+ *verdict = NF_DROP;
+ }
return 0;
}
ip_vs_service_put(svc);
}
+ /* NF_ACCEPT */
return 1;
}
@@ -338,7 +349,7 @@ static const int tcp_state_off[IP_VS_DIR_LAST] = {
/*
* Timeout table[state]
*/
-static int tcp_timeouts[IP_VS_TCP_S_LAST+1] = {
+static const int tcp_timeouts[IP_VS_TCP_S_LAST+1] = {
[IP_VS_TCP_S_NONE] = 2*HZ,
[IP_VS_TCP_S_ESTABLISHED] = 15*60*HZ,
[IP_VS_TCP_S_SYN_SENT] = 2*60*HZ,
@@ -437,10 +448,7 @@ static struct tcp_states_t tcp_states_dos [] = {
/*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }},
};
-static struct tcp_states_t *tcp_state_table = tcp_states;
-
-
-static void tcp_timeout_change(struct ip_vs_protocol *pp, int flags)
+static void tcp_timeout_change(struct ip_vs_proto_data *pd, int flags)
{
int on = (flags & 1); /* secure_tcp */
@@ -450,14 +458,7 @@ static void tcp_timeout_change(struct ip_vs_protocol *pp, int flags)
** for most if not for all of the applications. Something
** like "capabilities" (flags) for each object.
*/
- tcp_state_table = (on? tcp_states_dos : tcp_states);
-}
-
-static int
-tcp_set_state_timeout(struct ip_vs_protocol *pp, char *sname, int to)
-{
- return ip_vs_set_state_timeout(pp->timeout_table, IP_VS_TCP_S_LAST,
- tcp_state_name_table, sname, to);
+ pd->tcp_state_table = (on ? tcp_states_dos : tcp_states);
}
static inline int tcp_state_idx(struct tcphdr *th)
@@ -474,7 +475,7 @@ static inline int tcp_state_idx(struct tcphdr *th)
}
static inline void
-set_tcp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
+set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
int direction, struct tcphdr *th)
{
int state_idx;
@@ -497,7 +498,8 @@ set_tcp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
goto tcp_state_out;
}
- new_state = tcp_state_table[state_off+state_idx].next_state[cp->state];
+ new_state =
+ pd->tcp_state_table[state_off+state_idx].next_state[cp->state];
tcp_state_out:
if (new_state != cp->state) {
@@ -505,7 +507,7 @@ set_tcp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
IP_VS_DBG_BUF(8, "%s %s [%c%c%c%c] %s:%d->"
"%s:%d state: %s->%s conn->refcnt:%d\n",
- pp->name,
+ pd->pp->name,
((state_off == TCP_DIR_OUTPUT) ?
"output " : "input "),
th->syn ? 'S' : '.',
@@ -535,17 +537,19 @@ set_tcp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
}
}
- cp->timeout = pp->timeout_table[cp->state = new_state];
+ if (likely(pd))
+ cp->timeout = pd->timeout_table[cp->state = new_state];
+ else /* What to do ? */
+ cp->timeout = tcp_timeouts[cp->state = new_state];
}
-
/*
* Handle state transitions
*/
static int
tcp_state_transition(struct ip_vs_conn *cp, int direction,
const struct sk_buff *skb,
- struct ip_vs_protocol *pp)
+ struct ip_vs_proto_data *pd)
{
struct tcphdr _tcph, *th;
@@ -560,23 +564,12 @@ tcp_state_transition(struct ip_vs_conn *cp, int direction,
return 0;
spin_lock(&cp->lock);
- set_tcp_state(pp, cp, direction, th);
+ set_tcp_state(pd, cp, direction, th);
spin_unlock(&cp->lock);
return 1;
}
-
-/*
- * Hash table for TCP application incarnations
- */
-#define TCP_APP_TAB_BITS 4
-#define TCP_APP_TAB_SIZE (1 << TCP_APP_TAB_BITS)
-#define TCP_APP_TAB_MASK (TCP_APP_TAB_SIZE - 1)
-
-static struct list_head tcp_apps[TCP_APP_TAB_SIZE];
-static DEFINE_SPINLOCK(tcp_app_lock);
-
static inline __u16 tcp_app_hashkey(__be16 port)
{
return (((__force u16)port >> TCP_APP_TAB_BITS) ^ (__force u16)port)
@@ -584,44 +577,50 @@ static inline __u16 tcp_app_hashkey(__be16 port)
}
-static int tcp_register_app(struct ip_vs_app *inc)
+static int tcp_register_app(struct net *net, struct ip_vs_app *inc)
{
struct ip_vs_app *i;
__u16 hash;
__be16 port = inc->port;
int ret = 0;
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
hash = tcp_app_hashkey(port);
- spin_lock_bh(&tcp_app_lock);
- list_for_each_entry(i, &tcp_apps[hash], p_list) {
+ spin_lock_bh(&ipvs->tcp_app_lock);
+ list_for_each_entry(i, &ipvs->tcp_apps[hash], p_list) {
if (i->port == port) {
ret = -EEXIST;
goto out;
}
}
- list_add(&inc->p_list, &tcp_apps[hash]);
- atomic_inc(&ip_vs_protocol_tcp.appcnt);
+ list_add(&inc->p_list, &ipvs->tcp_apps[hash]);
+ atomic_inc(&pd->appcnt);
out:
- spin_unlock_bh(&tcp_app_lock);
+ spin_unlock_bh(&ipvs->tcp_app_lock);
return ret;
}
static void
-tcp_unregister_app(struct ip_vs_app *inc)
+tcp_unregister_app(struct net *net, struct ip_vs_app *inc)
{
- spin_lock_bh(&tcp_app_lock);
- atomic_dec(&ip_vs_protocol_tcp.appcnt);
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+
+ spin_lock_bh(&ipvs->tcp_app_lock);
+ atomic_dec(&pd->appcnt);
list_del(&inc->p_list);
- spin_unlock_bh(&tcp_app_lock);
+ spin_unlock_bh(&ipvs->tcp_app_lock);
}
static int
tcp_app_conn_bind(struct ip_vs_conn *cp)
{
+ struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
int hash;
struct ip_vs_app *inc;
int result = 0;
@@ -633,12 +632,12 @@ tcp_app_conn_bind(struct ip_vs_conn *cp)
/* Lookup application incarnations and bind the right one */
hash = tcp_app_hashkey(cp->vport);
- spin_lock(&tcp_app_lock);
- list_for_each_entry(inc, &tcp_apps[hash], p_list) {
+ spin_lock(&ipvs->tcp_app_lock);
+ list_for_each_entry(inc, &ipvs->tcp_apps[hash], p_list) {
if (inc->port == cp->vport) {
if (unlikely(!ip_vs_app_inc_get(inc)))
break;
- spin_unlock(&tcp_app_lock);
+ spin_unlock(&ipvs->tcp_app_lock);
IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->"
"%s:%u to app %s on port %u\n",
@@ -655,7 +654,7 @@ tcp_app_conn_bind(struct ip_vs_conn *cp)
goto out;
}
}
- spin_unlock(&tcp_app_lock);
+ spin_unlock(&ipvs->tcp_app_lock);
out:
return result;
@@ -665,24 +664,35 @@ tcp_app_conn_bind(struct ip_vs_conn *cp)
/*
* Set LISTEN timeout. (ip_vs_conn_put will setup timer)
*/
-void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp)
+void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp)
{
+ struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+
spin_lock(&cp->lock);
cp->state = IP_VS_TCP_S_LISTEN;
- cp->timeout = ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_LISTEN];
+ cp->timeout = (pd ? pd->timeout_table[IP_VS_TCP_S_LISTEN]
+ : tcp_timeouts[IP_VS_TCP_S_LISTEN]);
spin_unlock(&cp->lock);
}
-
-static void ip_vs_tcp_init(struct ip_vs_protocol *pp)
+/* ---------------------------------------------
+ * timeouts is netns related now.
+ * ---------------------------------------------
+ */
+static void __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd)
{
- IP_VS_INIT_HASH_TABLE(tcp_apps);
- pp->timeout_table = tcp_timeouts;
-}
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ ip_vs_init_hash_table(ipvs->tcp_apps, TCP_APP_TAB_SIZE);
+ spin_lock_init(&ipvs->tcp_app_lock);
+ pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts,
+ sizeof(tcp_timeouts));
+ pd->tcp_state_table = tcp_states;
+}
-static void ip_vs_tcp_exit(struct ip_vs_protocol *pp)
+static void __ip_vs_tcp_exit(struct net *net, struct ip_vs_proto_data *pd)
{
+ kfree(pd->timeout_table);
}
@@ -691,9 +701,10 @@ struct ip_vs_protocol ip_vs_protocol_tcp = {
.protocol = IPPROTO_TCP,
.num_states = IP_VS_TCP_S_LAST,
.dont_defrag = 0,
- .appcnt = ATOMIC_INIT(0),
- .init = ip_vs_tcp_init,
- .exit = ip_vs_tcp_exit,
+ .init = NULL,
+ .exit = NULL,
+ .init_netns = __ip_vs_tcp_init,
+ .exit_netns = __ip_vs_tcp_exit,
.register_app = tcp_register_app,
.unregister_app = tcp_unregister_app,
.conn_schedule = tcp_conn_schedule,
@@ -707,5 +718,4 @@ struct ip_vs_protocol ip_vs_protocol_tcp = {
.app_conn_bind = tcp_app_conn_bind,
.debug_packet = ip_vs_tcpudp_debug_packet,
.timeout_change = tcp_timeout_change,
- .set_state_timeout = tcp_set_state_timeout,
};
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c
index 9d106a06bb0a..f1282cbe6fe3 100644
--- a/net/netfilter/ipvs/ip_vs_proto_udp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_udp.c
@@ -9,7 +9,8 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * Changes:
+ * Changes: Hans Schillstrom <hans.schillstrom@ericsson.com>
+ * Network name space (netns) aware.
*
*/
@@ -28,9 +29,10 @@
#include <net/ip6_checksum.h>
static int
-udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
+udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
int *verdict, struct ip_vs_conn **cpp)
{
+ struct net *net;
struct ip_vs_service *svc;
struct udphdr _udph, *uh;
struct ip_vs_iphdr iph;
@@ -42,13 +44,13 @@ udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
*verdict = NF_DROP;
return 0;
}
-
- svc = ip_vs_service_get(af, skb->mark, iph.protocol,
+ net = skb_net(skb);
+ svc = ip_vs_service_get(net, af, skb->mark, iph.protocol,
&iph.daddr, uh->dest);
if (svc) {
int ignored;
- if (ip_vs_todrop()) {
+ if (ip_vs_todrop(net_ipvs(net))) {
/*
* It seems that we are very loaded.
* We have to drop this packet :(
@@ -62,13 +64,19 @@ udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
* Let the virtual server select a real server for the
* incoming connection, and create a connection entry.
*/
- *cpp = ip_vs_schedule(svc, skb, pp, &ignored);
- if (!*cpp && !ignored) {
- *verdict = ip_vs_leave(svc, skb, pp);
+ *cpp = ip_vs_schedule(svc, skb, pd, &ignored);
+ if (!*cpp && ignored <= 0) {
+ if (!ignored)
+ *verdict = ip_vs_leave(svc, skb, pd);
+ else {
+ ip_vs_service_put(svc);
+ *verdict = NF_DROP;
+ }
return 0;
}
ip_vs_service_put(svc);
}
+ /* NF_ACCEPT */
return 1;
}
@@ -338,19 +346,6 @@ udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
return 1;
}
-
-/*
- * Note: the caller guarantees that only one of register_app,
- * unregister_app or app_conn_bind is called each time.
- */
-
-#define UDP_APP_TAB_BITS 4
-#define UDP_APP_TAB_SIZE (1 << UDP_APP_TAB_BITS)
-#define UDP_APP_TAB_MASK (UDP_APP_TAB_SIZE - 1)
-
-static struct list_head udp_apps[UDP_APP_TAB_SIZE];
-static DEFINE_SPINLOCK(udp_app_lock);
-
static inline __u16 udp_app_hashkey(__be16 port)
{
return (((__force u16)port >> UDP_APP_TAB_BITS) ^ (__force u16)port)
@@ -358,44 +353,50 @@ static inline __u16 udp_app_hashkey(__be16 port)
}
-static int udp_register_app(struct ip_vs_app *inc)
+static int udp_register_app(struct net *net, struct ip_vs_app *inc)
{
struct ip_vs_app *i;
__u16 hash;
__be16 port = inc->port;
int ret = 0;
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
hash = udp_app_hashkey(port);
- spin_lock_bh(&udp_app_lock);
- list_for_each_entry(i, &udp_apps[hash], p_list) {
+ spin_lock_bh(&ipvs->udp_app_lock);
+ list_for_each_entry(i, &ipvs->udp_apps[hash], p_list) {
if (i->port == port) {
ret = -EEXIST;
goto out;
}
}
- list_add(&inc->p_list, &udp_apps[hash]);
- atomic_inc(&ip_vs_protocol_udp.appcnt);
+ list_add(&inc->p_list, &ipvs->udp_apps[hash]);
+ atomic_inc(&pd->appcnt);
out:
- spin_unlock_bh(&udp_app_lock);
+ spin_unlock_bh(&ipvs->udp_app_lock);
return ret;
}
static void
-udp_unregister_app(struct ip_vs_app *inc)
+udp_unregister_app(struct net *net, struct ip_vs_app *inc)
{
- spin_lock_bh(&udp_app_lock);
- atomic_dec(&ip_vs_protocol_udp.appcnt);
+ struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ spin_lock_bh(&ipvs->udp_app_lock);
+ atomic_dec(&pd->appcnt);
list_del(&inc->p_list);
- spin_unlock_bh(&udp_app_lock);
+ spin_unlock_bh(&ipvs->udp_app_lock);
}
static int udp_app_conn_bind(struct ip_vs_conn *cp)
{
+ struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
int hash;
struct ip_vs_app *inc;
int result = 0;
@@ -407,12 +408,12 @@ static int udp_app_conn_bind(struct ip_vs_conn *cp)
/* Lookup application incarnations and bind the right one */
hash = udp_app_hashkey(cp->vport);
- spin_lock(&udp_app_lock);
- list_for_each_entry(inc, &udp_apps[hash], p_list) {
+ spin_lock(&ipvs->udp_app_lock);
+ list_for_each_entry(inc, &ipvs->udp_apps[hash], p_list) {
if (inc->port == cp->vport) {
if (unlikely(!ip_vs_app_inc_get(inc)))
break;
- spin_unlock(&udp_app_lock);
+ spin_unlock(&ipvs->udp_app_lock);
IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->"
"%s:%u to app %s on port %u\n",
@@ -429,14 +430,14 @@ static int udp_app_conn_bind(struct ip_vs_conn *cp)
goto out;
}
}
- spin_unlock(&udp_app_lock);
+ spin_unlock(&ipvs->udp_app_lock);
out:
return result;
}
-static int udp_timeouts[IP_VS_UDP_S_LAST+1] = {
+static const int udp_timeouts[IP_VS_UDP_S_LAST+1] = {
[IP_VS_UDP_S_NORMAL] = 5*60*HZ,
[IP_VS_UDP_S_LAST] = 2*HZ,
};
@@ -446,14 +447,6 @@ static const char *const udp_state_name_table[IP_VS_UDP_S_LAST+1] = {
[IP_VS_UDP_S_LAST] = "BUG!",
};
-
-static int
-udp_set_state_timeout(struct ip_vs_protocol *pp, char *sname, int to)
-{
- return ip_vs_set_state_timeout(pp->timeout_table, IP_VS_UDP_S_LAST,
- udp_state_name_table, sname, to);
-}
-
static const char * udp_state_name(int state)
{
if (state >= IP_VS_UDP_S_LAST)
@@ -464,20 +457,30 @@ static const char * udp_state_name(int state)
static int
udp_state_transition(struct ip_vs_conn *cp, int direction,
const struct sk_buff *skb,
- struct ip_vs_protocol *pp)
+ struct ip_vs_proto_data *pd)
{
- cp->timeout = pp->timeout_table[IP_VS_UDP_S_NORMAL];
+ if (unlikely(!pd)) {
+ pr_err("UDP no ns data\n");
+ return 0;
+ }
+
+ cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL];
return 1;
}
-static void udp_init(struct ip_vs_protocol *pp)
+static void __udp_init(struct net *net, struct ip_vs_proto_data *pd)
{
- IP_VS_INIT_HASH_TABLE(udp_apps);
- pp->timeout_table = udp_timeouts;
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ ip_vs_init_hash_table(ipvs->udp_apps, UDP_APP_TAB_SIZE);
+ spin_lock_init(&ipvs->udp_app_lock);
+ pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts,
+ sizeof(udp_timeouts));
}
-static void udp_exit(struct ip_vs_protocol *pp)
+static void __udp_exit(struct net *net, struct ip_vs_proto_data *pd)
{
+ kfree(pd->timeout_table);
}
@@ -486,8 +489,10 @@ struct ip_vs_protocol ip_vs_protocol_udp = {
.protocol = IPPROTO_UDP,
.num_states = IP_VS_UDP_S_LAST,
.dont_defrag = 0,
- .init = udp_init,
- .exit = udp_exit,
+ .init = NULL,
+ .exit = NULL,
+ .init_netns = __udp_init,
+ .exit_netns = __udp_exit,
.conn_schedule = udp_conn_schedule,
.conn_in_get = ip_vs_conn_in_get_proto,
.conn_out_get = ip_vs_conn_out_get_proto,
@@ -501,5 +506,4 @@ struct ip_vs_protocol ip_vs_protocol_udp = {
.app_conn_bind = udp_app_conn_bind,
.debug_packet = ip_vs_tcpudp_debug_packet,
.timeout_change = NULL,
- .set_state_timeout = udp_set_state_timeout,
};
diff --git a/net/netfilter/ipvs/ip_vs_rr.c b/net/netfilter/ipvs/ip_vs_rr.c
index e210f37d8ea2..c49b388d1085 100644
--- a/net/netfilter/ipvs/ip_vs_rr.c
+++ b/net/netfilter/ipvs/ip_vs_rr.c
@@ -72,7 +72,7 @@ ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
q = q->next;
} while (q != p);
write_unlock(&svc->sched_lock);
- IP_VS_ERR_RL("RR: no destination available\n");
+ ip_vs_scheduler_err(svc, "no destination available");
return NULL;
out:
diff --git a/net/netfilter/ipvs/ip_vs_sched.c b/net/netfilter/ipvs/ip_vs_sched.c
index 076ebe00435d..08dbdd5bc18f 100644
--- a/net/netfilter/ipvs/ip_vs_sched.c
+++ b/net/netfilter/ipvs/ip_vs_sched.c
@@ -29,6 +29,7 @@
#include <net/ip_vs.h>
+EXPORT_SYMBOL(ip_vs_scheduler_err);
/*
* IPVS scheduler list
*/
@@ -146,6 +147,30 @@ void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler)
module_put(scheduler->module);
}
+/*
+ * Common error output helper for schedulers
+ */
+
+void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg)
+{
+ if (svc->fwmark) {
+ IP_VS_ERR_RL("%s: FWM %u 0x%08X - %s\n",
+ svc->scheduler->name, svc->fwmark,
+ svc->fwmark, msg);
+#ifdef CONFIG_IP_VS_IPV6
+ } else if (svc->af == AF_INET6) {
+ IP_VS_ERR_RL("%s: %s [%pI6]:%d - %s\n",
+ svc->scheduler->name,
+ ip_vs_proto_name(svc->protocol),
+ &svc->addr.in6, ntohs(svc->port), msg);
+#endif
+ } else {
+ IP_VS_ERR_RL("%s: %s %pI4:%d - %s\n",
+ svc->scheduler->name,
+ ip_vs_proto_name(svc->protocol),
+ &svc->addr.ip, ntohs(svc->port), msg);
+ }
+}
/*
* Register a scheduler in the scheduler list
diff --git a/net/netfilter/ipvs/ip_vs_sed.c b/net/netfilter/ipvs/ip_vs_sed.c
index 1ab75a9dc400..89ead246ed3d 100644
--- a/net/netfilter/ipvs/ip_vs_sed.c
+++ b/net/netfilter/ipvs/ip_vs_sed.c
@@ -87,7 +87,7 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
goto nextstage;
}
}
- IP_VS_ERR_RL("SED: no destination available\n");
+ ip_vs_scheduler_err(svc, "no destination available");
return NULL;
/*
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c
index e6cc174fbc06..b5e2556c581a 100644
--- a/net/netfilter/ipvs/ip_vs_sh.c
+++ b/net/netfilter/ipvs/ip_vs_sh.c
@@ -223,7 +223,7 @@ ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
|| !(dest->flags & IP_VS_DEST_F_AVAILABLE)
|| atomic_read(&dest->weight) <= 0
|| is_overloaded(dest)) {
- IP_VS_ERR_RL("SH: no destination available\n");
+ ip_vs_scheduler_err(svc, "no destination available");
return NULL;
}
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index ab85aedea17e..fecf24de4af3 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -5,6 +5,18 @@
* high-performance and highly available server based on a
* cluster of servers.
*
+ * Version 1, is capable of handling both version 0 and 1 messages.
+ * Version 0 is the plain old format.
+ * Note Version 0 receivers will just drop Ver 1 messages.
+ * Version 1 is capable of handle IPv6, Persistence data,
+ * time-outs, and firewall marks.
+ * In ver.1 "ip_vs_sync_conn_options" will be sent in netw. order.
+ * Ver. 0 can be turned on by sysctl -w net.ipv4.vs.sync_version=0
+ *
+ * Definitions Message: is a complete datagram
+ * Sync_conn: is a part of a Message
+ * Param Data is an option to a Sync_conn.
+ *
* Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
*
* ip_vs_sync: sync connection info from master load balancer to backups
@@ -15,6 +27,8 @@
* Alexandre Cassen : Added SyncID support for incoming sync
* messages filtering.
* Justin Ossevoort : Fix endian problem on sync message size.
+ * Hans Schillstrom : Added Version 1: i.e. IPv6,
+ * Persistence support, fwmark and time-out.
*/
#define KMSG_COMPONENT "IPVS"
@@ -35,6 +49,8 @@
#include <linux/wait.h>
#include <linux/kernel.h>
+#include <asm/unaligned.h> /* Used for ntoh_seq and hton_seq */
+
#include <net/ip.h>
#include <net/sock.h>
@@ -43,11 +59,13 @@
#define IP_VS_SYNC_GROUP 0xe0000051 /* multicast addr - 224.0.0.81 */
#define IP_VS_SYNC_PORT 8848 /* multicast port */
+#define SYNC_PROTO_VER 1 /* Protocol version in header */
/*
* IPVS sync connection entry
+ * Version 0, i.e. original version.
*/
-struct ip_vs_sync_conn {
+struct ip_vs_sync_conn_v0 {
__u8 reserved;
/* Protocol, addresses and port numbers */
@@ -71,41 +89,159 @@ struct ip_vs_sync_conn_options {
struct ip_vs_seq out_seq; /* outgoing seq. struct */
};
+/*
+ Sync Connection format (sync_conn)
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Type | Protocol | Ver. | Size |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Flags |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | State | cport |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | vport | dport |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | fwmark |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | timeout (in sec.) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | ... |
+ | IP-Addresses (v4 or v6) |
+ | ... |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ Optional Parameters.
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Param. Type | Param. Length | Param. data |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
+ | ... |
+ | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | | Param Type | Param. Length |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Param data |
+ | Last Param data should be padded for 32 bit alignment |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*/
+
+/*
+ * Type 0, IPv4 sync connection format
+ */
+struct ip_vs_sync_v4 {
+ __u8 type;
+ __u8 protocol; /* Which protocol (TCP/UDP) */
+ __be16 ver_size; /* Version msb 4 bits */
+ /* Flags and state transition */
+ __be32 flags; /* status flags */
+ __be16 state; /* state info */
+ /* Protocol, addresses and port numbers */
+ __be16 cport;
+ __be16 vport;
+ __be16 dport;
+ __be32 fwmark; /* Firewall mark from skb */
+ __be32 timeout; /* cp timeout */
+ __be32 caddr; /* client address */
+ __be32 vaddr; /* virtual address */
+ __be32 daddr; /* destination address */
+ /* The sequence options start here */
+ /* PE data padded to 32bit alignment after seq. options */
+};
+/*
+ * Type 2 messages IPv6
+ */
+struct ip_vs_sync_v6 {
+ __u8 type;
+ __u8 protocol; /* Which protocol (TCP/UDP) */
+ __be16 ver_size; /* Version msb 4 bits */
+ /* Flags and state transition */
+ __be32 flags; /* status flags */
+ __be16 state; /* state info */
+ /* Protocol, addresses and port numbers */
+ __be16 cport;
+ __be16 vport;
+ __be16 dport;
+ __be32 fwmark; /* Firewall mark from skb */
+ __be32 timeout; /* cp timeout */
+ struct in6_addr caddr; /* client address */
+ struct in6_addr vaddr; /* virtual address */
+ struct in6_addr daddr; /* destination address */
+ /* The sequence options start here */
+ /* PE data padded to 32bit alignment after seq. options */
+};
+
+union ip_vs_sync_conn {
+ struct ip_vs_sync_v4 v4;
+ struct ip_vs_sync_v6 v6;
+};
+
+/* Bits in Type field in above */
+#define STYPE_INET6 0
+#define STYPE_F_INET6 (1 << STYPE_INET6)
+
+#define SVER_SHIFT 12 /* Shift to get version */
+#define SVER_MASK 0x0fff /* Mask to strip version */
+
+#define IPVS_OPT_SEQ_DATA 1
+#define IPVS_OPT_PE_DATA 2
+#define IPVS_OPT_PE_NAME 3
+#define IPVS_OPT_PARAM 7
+
+#define IPVS_OPT_F_SEQ_DATA (1 << (IPVS_OPT_SEQ_DATA-1))
+#define IPVS_OPT_F_PE_DATA (1 << (IPVS_OPT_PE_DATA-1))
+#define IPVS_OPT_F_PE_NAME (1 << (IPVS_OPT_PE_NAME-1))
+#define IPVS_OPT_F_PARAM (1 << (IPVS_OPT_PARAM-1))
+
struct ip_vs_sync_thread_data {
+ struct net *net;
struct socket *sock;
char *buf;
};
-#define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn))
+/* Version 0 definition of packet sizes */
+#define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn_v0))
#define FULL_CONN_SIZE \
-(sizeof(struct ip_vs_sync_conn) + sizeof(struct ip_vs_sync_conn_options))
+(sizeof(struct ip_vs_sync_conn_v0) + sizeof(struct ip_vs_sync_conn_options))
/*
- The master mulitcasts messages to the backup load balancers in the
- following format.
+ The master mulitcasts messages (Datagrams) to the backup load balancers
+ in the following format.
+
+ Version 1:
+ Note, first byte should be Zero, so ver 0 receivers will drop the packet.
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Count Conns | SyncID | Size |
+ | 0 | SyncID | Size |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Count Conns | Version | Reserved, set to Zero |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
| IPVS Sync Connection (1) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| . |
- | . |
+ ~ . ~
| . |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
| IPVS Sync Connection (n) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ Version 0 Header
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Count Conns | SyncID | Size |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | IPVS Sync Connection (1) |
*/
#define SYNC_MESG_HEADER_LEN 4
#define MAX_CONNS_PER_SYNCBUFF 255 /* nr_conns in ip_vs_sync_mesg is 8 bit */
-struct ip_vs_sync_mesg {
+/* Version 0 header */
+struct ip_vs_sync_mesg_v0 {
__u8 nr_conns;
__u8 syncid;
__u16 size;
@@ -113,9 +249,16 @@ struct ip_vs_sync_mesg {
/* ip_vs_sync_conn entries start here */
};
-/* the maximum length of sync (sending/receiving) message */
-static int sync_send_mesg_maxlen;
-static int sync_recv_mesg_maxlen;
+/* Version 1 header */
+struct ip_vs_sync_mesg {
+ __u8 reserved; /* must be zero */
+ __u8 syncid;
+ __u16 size;
+ __u8 nr_conns;
+ __s8 version; /* SYNC_PROTO_VER */
+ __u16 spare;
+ /* ip_vs_sync_conn entries start here */
+};
struct ip_vs_sync_buff {
struct list_head list;
@@ -127,28 +270,6 @@ struct ip_vs_sync_buff {
unsigned char *end;
};
-
-/* the sync_buff list head and the lock */
-static LIST_HEAD(ip_vs_sync_queue);
-static DEFINE_SPINLOCK(ip_vs_sync_lock);
-
-/* current sync_buff for accepting new conn entries */
-static struct ip_vs_sync_buff *curr_sb = NULL;
-static DEFINE_SPINLOCK(curr_sb_lock);
-
-/* ipvs sync daemon state */
-volatile int ip_vs_sync_state = IP_VS_STATE_NONE;
-volatile int ip_vs_master_syncid = 0;
-volatile int ip_vs_backup_syncid = 0;
-
-/* multicast interface name */
-char ip_vs_master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
-char ip_vs_backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
-
-/* sync daemon tasks */
-static struct task_struct *sync_master_thread;
-static struct task_struct *sync_backup_thread;
-
/* multicast addr */
static struct sockaddr_in mcast_addr = {
.sin_family = AF_INET,
@@ -156,41 +277,71 @@ static struct sockaddr_in mcast_addr = {
.sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP),
};
+/*
+ * Copy of struct ip_vs_seq
+ * From unaligned network order to aligned host order
+ */
+static void ntoh_seq(struct ip_vs_seq *no, struct ip_vs_seq *ho)
+{
+ ho->init_seq = get_unaligned_be32(&no->init_seq);
+ ho->delta = get_unaligned_be32(&no->delta);
+ ho->previous_delta = get_unaligned_be32(&no->previous_delta);
+}
+
+/*
+ * Copy of struct ip_vs_seq
+ * From Aligned host order to unaligned network order
+ */
+static void hton_seq(struct ip_vs_seq *ho, struct ip_vs_seq *no)
+{
+ put_unaligned_be32(ho->init_seq, &no->init_seq);
+ put_unaligned_be32(ho->delta, &no->delta);
+ put_unaligned_be32(ho->previous_delta, &no->previous_delta);
+}
-static inline struct ip_vs_sync_buff *sb_dequeue(void)
+static inline struct ip_vs_sync_buff *sb_dequeue(struct netns_ipvs *ipvs)
{
struct ip_vs_sync_buff *sb;
- spin_lock_bh(&ip_vs_sync_lock);
- if (list_empty(&ip_vs_sync_queue)) {
+ spin_lock_bh(&ipvs->sync_lock);
+ if (list_empty(&ipvs->sync_queue)) {
sb = NULL;
} else {
- sb = list_entry(ip_vs_sync_queue.next,
+ sb = list_entry(ipvs->sync_queue.next,
struct ip_vs_sync_buff,
list);
list_del(&sb->list);
}
- spin_unlock_bh(&ip_vs_sync_lock);
+ spin_unlock_bh(&ipvs->sync_lock);
return sb;
}
-static inline struct ip_vs_sync_buff * ip_vs_sync_buff_create(void)
+/*
+ * Create a new sync buffer for Version 1 proto.
+ */
+static inline struct ip_vs_sync_buff *
+ip_vs_sync_buff_create(struct netns_ipvs *ipvs)
{
struct ip_vs_sync_buff *sb;
if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC)))
return NULL;
- if (!(sb->mesg=kmalloc(sync_send_mesg_maxlen, GFP_ATOMIC))) {
+ sb->mesg = kmalloc(ipvs->send_mesg_maxlen, GFP_ATOMIC);
+ if (!sb->mesg) {
kfree(sb);
return NULL;
}
+ sb->mesg->reserved = 0; /* old nr_conns i.e. must be zeo now */
+ sb->mesg->version = SYNC_PROTO_VER;
+ sb->mesg->syncid = ipvs->master_syncid;
+ sb->mesg->size = sizeof(struct ip_vs_sync_mesg);
sb->mesg->nr_conns = 0;
- sb->mesg->syncid = ip_vs_master_syncid;
- sb->mesg->size = 4;
- sb->head = (unsigned char *)sb->mesg + 4;
- sb->end = (unsigned char *)sb->mesg + sync_send_mesg_maxlen;
+ sb->mesg->spare = 0;
+ sb->head = (unsigned char *)sb->mesg + sizeof(struct ip_vs_sync_mesg);
+ sb->end = (unsigned char *)sb->mesg + ipvs->send_mesg_maxlen;
+
sb->firstuse = jiffies;
return sb;
}
@@ -201,14 +352,16 @@ static inline void ip_vs_sync_buff_release(struct ip_vs_sync_buff *sb)
kfree(sb);
}
-static inline void sb_queue_tail(struct ip_vs_sync_buff *sb)
+static inline void sb_queue_tail(struct netns_ipvs *ipvs)
{
- spin_lock(&ip_vs_sync_lock);
- if (ip_vs_sync_state & IP_VS_STATE_MASTER)
- list_add_tail(&sb->list, &ip_vs_sync_queue);
+ struct ip_vs_sync_buff *sb = ipvs->sync_buff;
+
+ spin_lock(&ipvs->sync_lock);
+ if (ipvs->sync_state & IP_VS_STATE_MASTER)
+ list_add_tail(&sb->list, &ipvs->sync_queue);
else
ip_vs_sync_buff_release(sb);
- spin_unlock(&ip_vs_sync_lock);
+ spin_unlock(&ipvs->sync_lock);
}
/*
@@ -216,36 +369,101 @@ static inline void sb_queue_tail(struct ip_vs_sync_buff *sb)
* than the specified time or the specified time is zero.
*/
static inline struct ip_vs_sync_buff *
-get_curr_sync_buff(unsigned long time)
+get_curr_sync_buff(struct netns_ipvs *ipvs, unsigned long time)
{
struct ip_vs_sync_buff *sb;
- spin_lock_bh(&curr_sb_lock);
- if (curr_sb && (time == 0 ||
- time_before(jiffies - curr_sb->firstuse, time))) {
- sb = curr_sb;
- curr_sb = NULL;
+ spin_lock_bh(&ipvs->sync_buff_lock);
+ if (ipvs->sync_buff &&
+ time_after_eq(jiffies - ipvs->sync_buff->firstuse, time)) {
+ sb = ipvs->sync_buff;
+ ipvs->sync_buff = NULL;
} else
sb = NULL;
- spin_unlock_bh(&curr_sb_lock);
+ spin_unlock_bh(&ipvs->sync_buff_lock);
return sb;
}
+/*
+ * Switch mode from sending version 0 or 1
+ * - must handle sync_buf
+ */
+void ip_vs_sync_switch_mode(struct net *net, int mode)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ if (!(ipvs->sync_state & IP_VS_STATE_MASTER))
+ return;
+ if (mode == ipvs->sysctl_sync_ver || !ipvs->sync_buff)
+ return;
+
+ spin_lock_bh(&ipvs->sync_buff_lock);
+ /* Buffer empty ? then let buf_create do the job */
+ if (ipvs->sync_buff->mesg->size <= sizeof(struct ip_vs_sync_mesg)) {
+ kfree(ipvs->sync_buff);
+ ipvs->sync_buff = NULL;
+ } else {
+ spin_lock_bh(&ipvs->sync_lock);
+ if (ipvs->sync_state & IP_VS_STATE_MASTER)
+ list_add_tail(&ipvs->sync_buff->list,
+ &ipvs->sync_queue);
+ else
+ ip_vs_sync_buff_release(ipvs->sync_buff);
+ spin_unlock_bh(&ipvs->sync_lock);
+ }
+ spin_unlock_bh(&ipvs->sync_buff_lock);
+}
/*
+ * Create a new sync buffer for Version 0 proto.
+ */
+static inline struct ip_vs_sync_buff *
+ip_vs_sync_buff_create_v0(struct netns_ipvs *ipvs)
+{
+ struct ip_vs_sync_buff *sb;
+ struct ip_vs_sync_mesg_v0 *mesg;
+
+ if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC)))
+ return NULL;
+
+ sb->mesg = kmalloc(ipvs->send_mesg_maxlen, GFP_ATOMIC);
+ if (!sb->mesg) {
+ kfree(sb);
+ return NULL;
+ }
+ mesg = (struct ip_vs_sync_mesg_v0 *)sb->mesg;
+ mesg->nr_conns = 0;
+ mesg->syncid = ipvs->master_syncid;
+ mesg->size = sizeof(struct ip_vs_sync_mesg_v0);
+ sb->head = (unsigned char *)mesg + sizeof(struct ip_vs_sync_mesg_v0);
+ sb->end = (unsigned char *)mesg + ipvs->send_mesg_maxlen;
+ sb->firstuse = jiffies;
+ return sb;
+}
+
+/*
+ * Version 0 , could be switched in by sys_ctl.
* Add an ip_vs_conn information into the current sync_buff.
- * Called by ip_vs_in.
*/
-void ip_vs_sync_conn(struct ip_vs_conn *cp)
+void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp)
{
- struct ip_vs_sync_mesg *m;
- struct ip_vs_sync_conn *s;
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ip_vs_sync_mesg_v0 *m;
+ struct ip_vs_sync_conn_v0 *s;
int len;
- spin_lock(&curr_sb_lock);
- if (!curr_sb) {
- if (!(curr_sb=ip_vs_sync_buff_create())) {
- spin_unlock(&curr_sb_lock);
+ if (unlikely(cp->af != AF_INET))
+ return;
+ /* Do not sync ONE PACKET */
+ if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
+ return;
+
+ spin_lock(&ipvs->sync_buff_lock);
+ if (!ipvs->sync_buff) {
+ ipvs->sync_buff =
+ ip_vs_sync_buff_create_v0(ipvs);
+ if (!ipvs->sync_buff) {
+ spin_unlock(&ipvs->sync_buff_lock);
pr_err("ip_vs_sync_buff_create failed.\n");
return;
}
@@ -253,10 +471,11 @@ void ip_vs_sync_conn(struct ip_vs_conn *cp)
len = (cp->flags & IP_VS_CONN_F_SEQ_MASK) ? FULL_CONN_SIZE :
SIMPLE_CONN_SIZE;
- m = curr_sb->mesg;
- s = (struct ip_vs_sync_conn *)curr_sb->head;
+ m = (struct ip_vs_sync_mesg_v0 *)ipvs->sync_buff->mesg;
+ s = (struct ip_vs_sync_conn_v0 *)ipvs->sync_buff->head;
/* copy members */
+ s->reserved = 0;
s->protocol = cp->protocol;
s->cport = cp->cport;
s->vport = cp->vport;
@@ -274,83 +493,366 @@ void ip_vs_sync_conn(struct ip_vs_conn *cp)
m->nr_conns++;
m->size += len;
- curr_sb->head += len;
+ ipvs->sync_buff->head += len;
/* check if there is a space for next one */
- if (curr_sb->head+FULL_CONN_SIZE > curr_sb->end) {
- sb_queue_tail(curr_sb);
- curr_sb = NULL;
+ if (ipvs->sync_buff->head + FULL_CONN_SIZE > ipvs->sync_buff->end) {
+ sb_queue_tail(ipvs);
+ ipvs->sync_buff = NULL;
}
- spin_unlock(&curr_sb_lock);
+ spin_unlock(&ipvs->sync_buff_lock);
/* synchronize its controller if it has */
if (cp->control)
- ip_vs_sync_conn(cp->control);
+ ip_vs_sync_conn(net, cp->control);
+}
+
+/*
+ * Add an ip_vs_conn information into the current sync_buff.
+ * Called by ip_vs_in.
+ * Sending Version 1 messages
+ */
+void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ip_vs_sync_mesg *m;
+ union ip_vs_sync_conn *s;
+ __u8 *p;
+ unsigned int len, pe_name_len, pad;
+
+ /* Handle old version of the protocol */
+ if (ipvs->sysctl_sync_ver == 0) {
+ ip_vs_sync_conn_v0(net, cp);
+ return;
+ }
+ /* Do not sync ONE PACKET */
+ if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
+ goto control;
+sloop:
+ /* Sanity checks */
+ pe_name_len = 0;
+ if (cp->pe_data_len) {
+ if (!cp->pe_data || !cp->dest) {
+ IP_VS_ERR_RL("SYNC, connection pe_data invalid\n");
+ return;
+ }
+ pe_name_len = strnlen(cp->pe->name, IP_VS_PENAME_MAXLEN);
+ }
+
+ spin_lock(&ipvs->sync_buff_lock);
+
+#ifdef CONFIG_IP_VS_IPV6
+ if (cp->af == AF_INET6)
+ len = sizeof(struct ip_vs_sync_v6);
+ else
+#endif
+ len = sizeof(struct ip_vs_sync_v4);
+
+ if (cp->flags & IP_VS_CONN_F_SEQ_MASK)
+ len += sizeof(struct ip_vs_sync_conn_options) + 2;
+
+ if (cp->pe_data_len)
+ len += cp->pe_data_len + 2; /* + Param hdr field */
+ if (pe_name_len)
+ len += pe_name_len + 2;
+
+ /* check if there is a space for this one */
+ pad = 0;
+ if (ipvs->sync_buff) {
+ pad = (4 - (size_t)ipvs->sync_buff->head) & 3;
+ if (ipvs->sync_buff->head + len + pad > ipvs->sync_buff->end) {
+ sb_queue_tail(ipvs);
+ ipvs->sync_buff = NULL;
+ pad = 0;
+ }
+ }
+
+ if (!ipvs->sync_buff) {
+ ipvs->sync_buff = ip_vs_sync_buff_create(ipvs);
+ if (!ipvs->sync_buff) {
+ spin_unlock(&ipvs->sync_buff_lock);
+ pr_err("ip_vs_sync_buff_create failed.\n");
+ return;
+ }
+ }
+
+ m = ipvs->sync_buff->mesg;
+ p = ipvs->sync_buff->head;
+ ipvs->sync_buff->head += pad + len;
+ m->size += pad + len;
+ /* Add ev. padding from prev. sync_conn */
+ while (pad--)
+ *(p++) = 0;
+
+ s = (union ip_vs_sync_conn *)p;
+
+ /* Set message type & copy members */
+ s->v4.type = (cp->af == AF_INET6 ? STYPE_F_INET6 : 0);
+ s->v4.ver_size = htons(len & SVER_MASK); /* Version 0 */
+ s->v4.flags = htonl(cp->flags & ~IP_VS_CONN_F_HASHED);
+ s->v4.state = htons(cp->state);
+ s->v4.protocol = cp->protocol;
+ s->v4.cport = cp->cport;
+ s->v4.vport = cp->vport;
+ s->v4.dport = cp->dport;
+ s->v4.fwmark = htonl(cp->fwmark);
+ s->v4.timeout = htonl(cp->timeout / HZ);
+ m->nr_conns++;
+
+#ifdef CONFIG_IP_VS_IPV6
+ if (cp->af == AF_INET6) {
+ p += sizeof(struct ip_vs_sync_v6);
+ ipv6_addr_copy(&s->v6.caddr, &cp->caddr.in6);
+ ipv6_addr_copy(&s->v6.vaddr, &cp->vaddr.in6);
+ ipv6_addr_copy(&s->v6.daddr, &cp->daddr.in6);
+ } else
+#endif
+ {
+ p += sizeof(struct ip_vs_sync_v4); /* options ptr */
+ s->v4.caddr = cp->caddr.ip;
+ s->v4.vaddr = cp->vaddr.ip;
+ s->v4.daddr = cp->daddr.ip;
+ }
+ if (cp->flags & IP_VS_CONN_F_SEQ_MASK) {
+ *(p++) = IPVS_OPT_SEQ_DATA;
+ *(p++) = sizeof(struct ip_vs_sync_conn_options);
+ hton_seq((struct ip_vs_seq *)p, &cp->in_seq);
+ p += sizeof(struct ip_vs_seq);
+ hton_seq((struct ip_vs_seq *)p, &cp->out_seq);
+ p += sizeof(struct ip_vs_seq);
+ }
+ /* Handle pe data */
+ if (cp->pe_data_len && cp->pe_data) {
+ *(p++) = IPVS_OPT_PE_DATA;
+ *(p++) = cp->pe_data_len;
+ memcpy(p, cp->pe_data, cp->pe_data_len);
+ p += cp->pe_data_len;
+ if (pe_name_len) {
+ /* Add PE_NAME */
+ *(p++) = IPVS_OPT_PE_NAME;
+ *(p++) = pe_name_len;
+ memcpy(p, cp->pe->name, pe_name_len);
+ p += pe_name_len;
+ }
+ }
+
+ spin_unlock(&ipvs->sync_buff_lock);
+
+control:
+ /* synchronize its controller if it has */
+ cp = cp->control;
+ if (!cp)
+ return;
+ /*
+ * Reduce sync rate for templates
+ * i.e only increment in_pkts for Templates.
+ */
+ if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
+ int pkts = atomic_add_return(1, &cp->in_pkts);
+
+ if (pkts % ipvs->sysctl_sync_threshold[1] != 1)
+ return;
+ }
+ goto sloop;
}
+/*
+ * fill_param used by version 1
+ */
static inline int
-ip_vs_conn_fill_param_sync(int af, int protocol,
- const union nf_inet_addr *caddr, __be16 cport,
- const union nf_inet_addr *vaddr, __be16 vport,
- struct ip_vs_conn_param *p)
+ip_vs_conn_fill_param_sync(struct net *net, int af, union ip_vs_sync_conn *sc,
+ struct ip_vs_conn_param *p,
+ __u8 *pe_data, unsigned int pe_data_len,
+ __u8 *pe_name, unsigned int pe_name_len)
{
- /* XXX: Need to take into account persistence engine */
- ip_vs_conn_fill_param(af, protocol, caddr, cport, vaddr, vport, p);
+#ifdef CONFIG_IP_VS_IPV6
+ if (af == AF_INET6)
+ ip_vs_conn_fill_param(net, af, sc->v6.protocol,
+ (const union nf_inet_addr *)&sc->v6.caddr,
+ sc->v6.cport,
+ (const union nf_inet_addr *)&sc->v6.vaddr,
+ sc->v6.vport, p);
+ else
+#endif
+ ip_vs_conn_fill_param(net, af, sc->v4.protocol,
+ (const union nf_inet_addr *)&sc->v4.caddr,
+ sc->v4.cport,
+ (const union nf_inet_addr *)&sc->v4.vaddr,
+ sc->v4.vport, p);
+ /* Handle pe data */
+ if (pe_data_len) {
+ if (pe_name_len) {
+ char buff[IP_VS_PENAME_MAXLEN+1];
+
+ memcpy(buff, pe_name, pe_name_len);
+ buff[pe_name_len]=0;
+ p->pe = __ip_vs_pe_getbyname(buff);
+ if (!p->pe) {
+ IP_VS_DBG(3, "BACKUP, no %s engine found/loaded\n",
+ buff);
+ return 1;
+ }
+ } else {
+ IP_VS_ERR_RL("BACKUP, Invalid PE parameters\n");
+ return 1;
+ }
+
+ p->pe_data = kmalloc(pe_data_len, GFP_ATOMIC);
+ if (!p->pe_data) {
+ if (p->pe->module)
+ module_put(p->pe->module);
+ return -ENOMEM;
+ }
+ memcpy(p->pe_data, pe_data, pe_data_len);
+ p->pe_data_len = pe_data_len;
+ }
return 0;
}
/*
- * Process received multicast message and create the corresponding
- * ip_vs_conn entries.
+ * Connection Add / Update.
+ * Common for version 0 and 1 reception of backup sync_conns.
+ * Param: ...
+ * timeout is in sec.
*/
-static void ip_vs_process_message(const char *buffer, const size_t buflen)
+static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
+ unsigned int flags, unsigned int state,
+ unsigned int protocol, unsigned int type,
+ const union nf_inet_addr *daddr, __be16 dport,
+ unsigned long timeout, __u32 fwmark,
+ struct ip_vs_sync_conn_options *opt)
{
- struct ip_vs_sync_mesg *m = (struct ip_vs_sync_mesg *)buffer;
- struct ip_vs_sync_conn *s;
- struct ip_vs_sync_conn_options *opt;
- struct ip_vs_conn *cp;
- struct ip_vs_protocol *pp;
struct ip_vs_dest *dest;
- struct ip_vs_conn_param param;
- char *p;
- int i;
+ struct ip_vs_conn *cp;
+ struct netns_ipvs *ipvs = net_ipvs(net);
- if (buflen < sizeof(struct ip_vs_sync_mesg)) {
- IP_VS_ERR_RL("sync message header too short\n");
- return;
- }
+ if (!(flags & IP_VS_CONN_F_TEMPLATE))
+ cp = ip_vs_conn_in_get(param);
+ else
+ cp = ip_vs_ct_in_get(param);
- /* Convert size back to host byte order */
- m->size = ntohs(m->size);
+ if (cp && param->pe_data) /* Free pe_data */
+ kfree(param->pe_data);
+ if (!cp) {
+ /*
+ * Find the appropriate destination for the connection.
+ * If it is not found the connection will remain unbound
+ * but still handled.
+ */
+ dest = ip_vs_find_dest(net, type, daddr, dport, param->vaddr,
+ param->vport, protocol, fwmark);
- if (buflen != m->size) {
- IP_VS_ERR_RL("bogus sync message size\n");
- return;
+ /* Set the approprite ativity flag */
+ if (protocol == IPPROTO_TCP) {
+ if (state != IP_VS_TCP_S_ESTABLISHED)
+ flags |= IP_VS_CONN_F_INACTIVE;
+ else
+ flags &= ~IP_VS_CONN_F_INACTIVE;
+ } else if (protocol == IPPROTO_SCTP) {
+ if (state != IP_VS_SCTP_S_ESTABLISHED)
+ flags |= IP_VS_CONN_F_INACTIVE;
+ else
+ flags &= ~IP_VS_CONN_F_INACTIVE;
+ }
+ cp = ip_vs_conn_new(param, daddr, dport, flags, dest, fwmark);
+ if (dest)
+ atomic_dec(&dest->refcnt);
+ if (!cp) {
+ if (param->pe_data)
+ kfree(param->pe_data);
+ IP_VS_DBG(2, "BACKUP, add new conn. failed\n");
+ return;
+ }
+ } else if (!cp->dest) {
+ dest = ip_vs_try_bind_dest(cp);
+ if (dest)
+ atomic_dec(&dest->refcnt);
+ } else if ((cp->dest) && (cp->protocol == IPPROTO_TCP) &&
+ (cp->state != state)) {
+ /* update active/inactive flag for the connection */
+ dest = cp->dest;
+ if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
+ (state != IP_VS_TCP_S_ESTABLISHED)) {
+ atomic_dec(&dest->activeconns);
+ atomic_inc(&dest->inactconns);
+ cp->flags |= IP_VS_CONN_F_INACTIVE;
+ } else if ((cp->flags & IP_VS_CONN_F_INACTIVE) &&
+ (state == IP_VS_TCP_S_ESTABLISHED)) {
+ atomic_inc(&dest->activeconns);
+ atomic_dec(&dest->inactconns);
+ cp->flags &= ~IP_VS_CONN_F_INACTIVE;
+ }
+ } else if ((cp->dest) && (cp->protocol == IPPROTO_SCTP) &&
+ (cp->state != state)) {
+ dest = cp->dest;
+ if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
+ (state != IP_VS_SCTP_S_ESTABLISHED)) {
+ atomic_dec(&dest->activeconns);
+ atomic_inc(&dest->inactconns);
+ cp->flags &= ~IP_VS_CONN_F_INACTIVE;
+ }
}
- /* SyncID sanity check */
- if (ip_vs_backup_syncid != 0 && m->syncid != ip_vs_backup_syncid) {
- IP_VS_DBG(7, "Ignoring incoming msg with syncid = %d\n",
- m->syncid);
- return;
+ if (opt)
+ memcpy(&cp->in_seq, opt, sizeof(*opt));
+ atomic_set(&cp->in_pkts, ipvs->sysctl_sync_threshold[0]);
+ cp->state = state;
+ cp->old_state = cp->state;
+ /*
+ * For Ver 0 messages style
+ * - Not possible to recover the right timeout for templates
+ * - can not find the right fwmark
+ * virtual service. If needed, we can do it for
+ * non-fwmark persistent services.
+ * Ver 1 messages style.
+ * - No problem.
+ */
+ if (timeout) {
+ if (timeout > MAX_SCHEDULE_TIMEOUT / HZ)
+ timeout = MAX_SCHEDULE_TIMEOUT / HZ;
+ cp->timeout = timeout*HZ;
+ } else {
+ struct ip_vs_proto_data *pd;
+
+ pd = ip_vs_proto_data_get(net, protocol);
+ if (!(flags & IP_VS_CONN_F_TEMPLATE) && pd && pd->timeout_table)
+ cp->timeout = pd->timeout_table[state];
+ else
+ cp->timeout = (3*60*HZ);
}
+ ip_vs_conn_put(cp);
+}
- p = (char *)buffer + sizeof(struct ip_vs_sync_mesg);
+/*
+ * Process received multicast message for Version 0
+ */
+static void ip_vs_process_message_v0(struct net *net, const char *buffer,
+ const size_t buflen)
+{
+ struct ip_vs_sync_mesg_v0 *m = (struct ip_vs_sync_mesg_v0 *)buffer;
+ struct ip_vs_sync_conn_v0 *s;
+ struct ip_vs_sync_conn_options *opt;
+ struct ip_vs_protocol *pp;
+ struct ip_vs_conn_param param;
+ char *p;
+ int i;
+
+ p = (char *)buffer + sizeof(struct ip_vs_sync_mesg_v0);
for (i=0; i<m->nr_conns; i++) {
unsigned flags, state;
if (p + SIMPLE_CONN_SIZE > buffer+buflen) {
- IP_VS_ERR_RL("bogus conn in sync message\n");
+ IP_VS_ERR_RL("BACKUP v0, bogus conn\n");
return;
}
- s = (struct ip_vs_sync_conn *) p;
+ s = (struct ip_vs_sync_conn_v0 *) p;
flags = ntohs(s->flags) | IP_VS_CONN_F_SYNC;
flags &= ~IP_VS_CONN_F_HASHED;
if (flags & IP_VS_CONN_F_SEQ_MASK) {
opt = (struct ip_vs_sync_conn_options *)&s[1];
p += FULL_CONN_SIZE;
if (p > buffer+buflen) {
- IP_VS_ERR_RL("bogus conn options in sync message\n");
+ IP_VS_ERR_RL("BACKUP v0, Dropping buffer bogus conn options\n");
return;
}
} else {
@@ -362,118 +864,286 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
pp = ip_vs_proto_get(s->protocol);
if (!pp) {
- IP_VS_ERR_RL("Unsupported protocol %u in sync msg\n",
+ IP_VS_DBG(2, "BACKUP v0, Unsupported protocol %u\n",
s->protocol);
continue;
}
if (state >= pp->num_states) {
- IP_VS_DBG(2, "Invalid %s state %u in sync msg\n",
+ IP_VS_DBG(2, "BACKUP v0, Invalid %s state %u\n",
pp->name, state);
continue;
}
} else {
/* protocol in templates is not used for state/timeout */
- pp = NULL;
if (state > 0) {
- IP_VS_DBG(2, "Invalid template state %u in sync msg\n",
+ IP_VS_DBG(2, "BACKUP v0, Invalid template state %u\n",
state);
state = 0;
}
}
- {
- if (ip_vs_conn_fill_param_sync(AF_INET, s->protocol,
- (union nf_inet_addr *)&s->caddr,
- s->cport,
- (union nf_inet_addr *)&s->vaddr,
- s->vport, &param)) {
- pr_err("ip_vs_conn_fill_param_sync failed");
- return;
+ ip_vs_conn_fill_param(net, AF_INET, s->protocol,
+ (const union nf_inet_addr *)&s->caddr,
+ s->cport,
+ (const union nf_inet_addr *)&s->vaddr,
+ s->vport, &param);
+
+ /* Send timeout as Zero */
+ ip_vs_proc_conn(net, &param, flags, state, s->protocol, AF_INET,
+ (union nf_inet_addr *)&s->daddr, s->dport,
+ 0, 0, opt);
+ }
+}
+
+/*
+ * Handle options
+ */
+static inline int ip_vs_proc_seqopt(__u8 *p, unsigned int plen,
+ __u32 *opt_flags,
+ struct ip_vs_sync_conn_options *opt)
+{
+ struct ip_vs_sync_conn_options *topt;
+
+ topt = (struct ip_vs_sync_conn_options *)p;
+
+ if (plen != sizeof(struct ip_vs_sync_conn_options)) {
+ IP_VS_DBG(2, "BACKUP, bogus conn options length\n");
+ return -EINVAL;
+ }
+ if (*opt_flags & IPVS_OPT_F_SEQ_DATA) {
+ IP_VS_DBG(2, "BACKUP, conn options found twice\n");
+ return -EINVAL;
+ }
+ ntoh_seq(&topt->in_seq, &opt->in_seq);
+ ntoh_seq(&topt->out_seq, &opt->out_seq);
+ *opt_flags |= IPVS_OPT_F_SEQ_DATA;
+ return 0;
+}
+
+static int ip_vs_proc_str(__u8 *p, unsigned int plen, unsigned int *data_len,
+ __u8 **data, unsigned int maxlen,
+ __u32 *opt_flags, __u32 flag)
+{
+ if (plen > maxlen) {
+ IP_VS_DBG(2, "BACKUP, bogus par.data len > %d\n", maxlen);
+ return -EINVAL;
+ }
+ if (*opt_flags & flag) {
+ IP_VS_DBG(2, "BACKUP, Par.data found twice 0x%x\n", flag);
+ return -EINVAL;
+ }
+ *data_len = plen;
+ *data = p;
+ *opt_flags |= flag;
+ return 0;
+}
+/*
+ * Process a Version 1 sync. connection
+ */
+static inline int ip_vs_proc_sync_conn(struct net *net, __u8 *p, __u8 *msg_end)
+{
+ struct ip_vs_sync_conn_options opt;
+ union ip_vs_sync_conn *s;
+ struct ip_vs_protocol *pp;
+ struct ip_vs_conn_param param;
+ __u32 flags;
+ unsigned int af, state, pe_data_len=0, pe_name_len=0;
+ __u8 *pe_data=NULL, *pe_name=NULL;
+ __u32 opt_flags=0;
+ int retc=0;
+
+ s = (union ip_vs_sync_conn *) p;
+
+ if (s->v6.type & STYPE_F_INET6) {
+#ifdef CONFIG_IP_VS_IPV6
+ af = AF_INET6;
+ p += sizeof(struct ip_vs_sync_v6);
+#else
+ IP_VS_DBG(3,"BACKUP, IPv6 msg received, and IPVS is not compiled for IPv6\n");
+ retc = 10;
+ goto out;
+#endif
+ } else if (!s->v4.type) {
+ af = AF_INET;
+ p += sizeof(struct ip_vs_sync_v4);
+ } else {
+ return -10;
+ }
+ if (p > msg_end)
+ return -20;
+
+ /* Process optional params check Type & Len. */
+ while (p < msg_end) {
+ int ptype;
+ int plen;
+
+ if (p+2 > msg_end)
+ return -30;
+ ptype = *(p++);
+ plen = *(p++);
+
+ if (!plen || ((p + plen) > msg_end))
+ return -40;
+ /* Handle seq option p = param data */
+ switch (ptype & ~IPVS_OPT_F_PARAM) {
+ case IPVS_OPT_SEQ_DATA:
+ if (ip_vs_proc_seqopt(p, plen, &opt_flags, &opt))
+ return -50;
+ break;
+
+ case IPVS_OPT_PE_DATA:
+ if (ip_vs_proc_str(p, plen, &pe_data_len, &pe_data,
+ IP_VS_PEDATA_MAXLEN, &opt_flags,
+ IPVS_OPT_F_PE_DATA))
+ return -60;
+ break;
+
+ case IPVS_OPT_PE_NAME:
+ if (ip_vs_proc_str(p, plen,&pe_name_len, &pe_name,
+ IP_VS_PENAME_MAXLEN, &opt_flags,
+ IPVS_OPT_F_PE_NAME))
+ return -70;
+ break;
+
+ default:
+ /* Param data mandatory ? */
+ if (!(ptype & IPVS_OPT_F_PARAM)) {
+ IP_VS_DBG(3, "BACKUP, Unknown mandatory param %d found\n",
+ ptype & ~IPVS_OPT_F_PARAM);
+ retc = 20;
+ goto out;
}
- if (!(flags & IP_VS_CONN_F_TEMPLATE))
- cp = ip_vs_conn_in_get(&param);
- else
- cp = ip_vs_ct_in_get(&param);
}
- if (!cp) {
- /*
- * Find the appropriate destination for the connection.
- * If it is not found the connection will remain unbound
- * but still handled.
- */
- dest = ip_vs_find_dest(AF_INET,
- (union nf_inet_addr *)&s->daddr,
- s->dport,
- (union nf_inet_addr *)&s->vaddr,
- s->vport,
- s->protocol);
- /* Set the approprite ativity flag */
- if (s->protocol == IPPROTO_TCP) {
- if (state != IP_VS_TCP_S_ESTABLISHED)
- flags |= IP_VS_CONN_F_INACTIVE;
- else
- flags &= ~IP_VS_CONN_F_INACTIVE;
- } else if (s->protocol == IPPROTO_SCTP) {
- if (state != IP_VS_SCTP_S_ESTABLISHED)
- flags |= IP_VS_CONN_F_INACTIVE;
- else
- flags &= ~IP_VS_CONN_F_INACTIVE;
+ p += plen; /* Next option */
+ }
+
+ /* Get flags and Mask off unsupported */
+ flags = ntohl(s->v4.flags) & IP_VS_CONN_F_BACKUP_MASK;
+ flags |= IP_VS_CONN_F_SYNC;
+ state = ntohs(s->v4.state);
+
+ if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
+ pp = ip_vs_proto_get(s->v4.protocol);
+ if (!pp) {
+ IP_VS_DBG(3,"BACKUP, Unsupported protocol %u\n",
+ s->v4.protocol);
+ retc = 30;
+ goto out;
+ }
+ if (state >= pp->num_states) {
+ IP_VS_DBG(3, "BACKUP, Invalid %s state %u\n",
+ pp->name, state);
+ retc = 40;
+ goto out;
+ }
+ } else {
+ /* protocol in templates is not used for state/timeout */
+ if (state > 0) {
+ IP_VS_DBG(3, "BACKUP, Invalid template state %u\n",
+ state);
+ state = 0;
+ }
+ }
+ if (ip_vs_conn_fill_param_sync(net, af, s, &param, pe_data,
+ pe_data_len, pe_name, pe_name_len)) {
+ retc = 50;
+ goto out;
+ }
+ /* If only IPv4, just silent skip IPv6 */
+ if (af == AF_INET)
+ ip_vs_proc_conn(net, &param, flags, state, s->v4.protocol, af,
+ (union nf_inet_addr *)&s->v4.daddr, s->v4.dport,
+ ntohl(s->v4.timeout), ntohl(s->v4.fwmark),
+ (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL)
+ );
+#ifdef CONFIG_IP_VS_IPV6
+ else
+ ip_vs_proc_conn(net, &param, flags, state, s->v6.protocol, af,
+ (union nf_inet_addr *)&s->v6.daddr, s->v6.dport,
+ ntohl(s->v6.timeout), ntohl(s->v6.fwmark),
+ (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL)
+ );
+#endif
+ return 0;
+ /* Error exit */
+out:
+ IP_VS_DBG(2, "BACKUP, Single msg dropped err:%d\n", retc);
+ return retc;
+
+}
+/*
+ * Process received multicast message and create the corresponding
+ * ip_vs_conn entries.
+ * Handles Version 0 & 1
+ */
+static void ip_vs_process_message(struct net *net, __u8 *buffer,
+ const size_t buflen)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ip_vs_sync_mesg *m2 = (struct ip_vs_sync_mesg *)buffer;
+ __u8 *p, *msg_end;
+ int i, nr_conns;
+
+ if (buflen < sizeof(struct ip_vs_sync_mesg_v0)) {
+ IP_VS_DBG(2, "BACKUP, message header too short\n");
+ return;
+ }
+ /* Convert size back to host byte order */
+ m2->size = ntohs(m2->size);
+
+ if (buflen != m2->size) {
+ IP_VS_DBG(2, "BACKUP, bogus message size\n");
+ return;
+ }
+ /* SyncID sanity check */
+ if (ipvs->backup_syncid != 0 && m2->syncid != ipvs->backup_syncid) {
+ IP_VS_DBG(7, "BACKUP, Ignoring syncid = %d\n", m2->syncid);
+ return;
+ }
+ /* Handle version 1 message */
+ if ((m2->version == SYNC_PROTO_VER) && (m2->reserved == 0)
+ && (m2->spare == 0)) {
+
+ msg_end = buffer + sizeof(struct ip_vs_sync_mesg);
+ nr_conns = m2->nr_conns;
+
+ for (i=0; i<nr_conns; i++) {
+ union ip_vs_sync_conn *s;
+ unsigned size;
+ int retc;
+
+ p = msg_end;
+ if (p + sizeof(s->v4) > buffer+buflen) {
+ IP_VS_ERR_RL("BACKUP, Dropping buffer, to small\n");
+ return;
}
- cp = ip_vs_conn_new(&param,
- (union nf_inet_addr *)&s->daddr,
- s->dport, flags, dest);
- if (dest)
- atomic_dec(&dest->refcnt);
- if (!cp) {
- pr_err("ip_vs_conn_new failed\n");
+ s = (union ip_vs_sync_conn *)p;
+ size = ntohs(s->v4.ver_size) & SVER_MASK;
+ msg_end = p + size;
+ /* Basic sanity checks */
+ if (msg_end > buffer+buflen) {
+ IP_VS_ERR_RL("BACKUP, Dropping buffer, msg > buffer\n");
return;
}
- } else if (!cp->dest) {
- dest = ip_vs_try_bind_dest(cp);
- if (dest)
- atomic_dec(&dest->refcnt);
- } else if ((cp->dest) && (cp->protocol == IPPROTO_TCP) &&
- (cp->state != state)) {
- /* update active/inactive flag for the connection */
- dest = cp->dest;
- if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
- (state != IP_VS_TCP_S_ESTABLISHED)) {
- atomic_dec(&dest->activeconns);
- atomic_inc(&dest->inactconns);
- cp->flags |= IP_VS_CONN_F_INACTIVE;
- } else if ((cp->flags & IP_VS_CONN_F_INACTIVE) &&
- (state == IP_VS_TCP_S_ESTABLISHED)) {
- atomic_inc(&dest->activeconns);
- atomic_dec(&dest->inactconns);
- cp->flags &= ~IP_VS_CONN_F_INACTIVE;
+ if (ntohs(s->v4.ver_size) >> SVER_SHIFT) {
+ IP_VS_ERR_RL("BACKUP, Dropping buffer, Unknown version %d\n",
+ ntohs(s->v4.ver_size) >> SVER_SHIFT);
+ return;
}
- } else if ((cp->dest) && (cp->protocol == IPPROTO_SCTP) &&
- (cp->state != state)) {
- dest = cp->dest;
- if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
- (state != IP_VS_SCTP_S_ESTABLISHED)) {
- atomic_dec(&dest->activeconns);
- atomic_inc(&dest->inactconns);
- cp->flags &= ~IP_VS_CONN_F_INACTIVE;
+ /* Process a single sync_conn */
+ retc = ip_vs_proc_sync_conn(net, p, msg_end);
+ if (retc < 0) {
+ IP_VS_ERR_RL("BACKUP, Dropping buffer, Err: %d in decoding\n",
+ retc);
+ return;
}
+ /* Make sure we have 32 bit alignment */
+ msg_end = p + ((size + 3) & ~3);
}
-
- if (opt)
- memcpy(&cp->in_seq, opt, sizeof(*opt));
- atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
- cp->state = state;
- cp->old_state = cp->state;
- /*
- * We can not recover the right timeout for templates
- * in all cases, we can not find the right fwmark
- * virtual service. If needed, we can do it for
- * non-fwmark persistent services.
- */
- if (!(flags & IP_VS_CONN_F_TEMPLATE) && pp->timeout_table)
- cp->timeout = pp->timeout_table[state];
- else
- cp->timeout = (3*60*HZ);
- ip_vs_conn_put(cp);
+ } else {
+ /* Old type of message */
+ ip_vs_process_message_v0(net, buffer, buflen);
+ return;
}
}
@@ -511,8 +1181,10 @@ static int set_mcast_if(struct sock *sk, char *ifname)
{
struct net_device *dev;
struct inet_sock *inet = inet_sk(sk);
+ struct net *net = sock_net(sk);
- if ((dev = __dev_get_by_name(&init_net, ifname)) == NULL)
+ dev = __dev_get_by_name(net, ifname);
+ if (!dev)
return -ENODEV;
if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
@@ -531,30 +1203,33 @@ static int set_mcast_if(struct sock *sk, char *ifname)
* Set the maximum length of sync message according to the
* specified interface's MTU.
*/
-static int set_sync_mesg_maxlen(int sync_state)
+static int set_sync_mesg_maxlen(struct net *net, int sync_state)
{
+ struct netns_ipvs *ipvs = net_ipvs(net);
struct net_device *dev;
int num;
if (sync_state == IP_VS_STATE_MASTER) {
- if ((dev = __dev_get_by_name(&init_net, ip_vs_master_mcast_ifn)) == NULL)
+ dev = __dev_get_by_name(net, ipvs->master_mcast_ifn);
+ if (!dev)
return -ENODEV;
num = (dev->mtu - sizeof(struct iphdr) -
sizeof(struct udphdr) -
SYNC_MESG_HEADER_LEN - 20) / SIMPLE_CONN_SIZE;
- sync_send_mesg_maxlen = SYNC_MESG_HEADER_LEN +
+ ipvs->send_mesg_maxlen = SYNC_MESG_HEADER_LEN +
SIMPLE_CONN_SIZE * min(num, MAX_CONNS_PER_SYNCBUFF);
IP_VS_DBG(7, "setting the maximum length of sync sending "
- "message %d.\n", sync_send_mesg_maxlen);
+ "message %d.\n", ipvs->send_mesg_maxlen);
} else if (sync_state == IP_VS_STATE_BACKUP) {
- if ((dev = __dev_get_by_name(&init_net, ip_vs_backup_mcast_ifn)) == NULL)
+ dev = __dev_get_by_name(net, ipvs->backup_mcast_ifn);
+ if (!dev)
return -ENODEV;
- sync_recv_mesg_maxlen = dev->mtu -
+ ipvs->recv_mesg_maxlen = dev->mtu -
sizeof(struct iphdr) - sizeof(struct udphdr);
IP_VS_DBG(7, "setting the maximum length of sync receiving "
- "message %d.\n", sync_recv_mesg_maxlen);
+ "message %d.\n", ipvs->recv_mesg_maxlen);
}
return 0;
@@ -569,6 +1244,7 @@ static int set_sync_mesg_maxlen(int sync_state)
static int
join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
{
+ struct net *net = sock_net(sk);
struct ip_mreqn mreq;
struct net_device *dev;
int ret;
@@ -576,7 +1252,8 @@ join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
memset(&mreq, 0, sizeof(mreq));
memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr));
- if ((dev = __dev_get_by_name(&init_net, ifname)) == NULL)
+ dev = __dev_get_by_name(net, ifname);
+ if (!dev)
return -ENODEV;
if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
return -EINVAL;
@@ -593,11 +1270,13 @@ join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
static int bind_mcastif_addr(struct socket *sock, char *ifname)
{
+ struct net *net = sock_net(sock->sk);
struct net_device *dev;
__be32 addr;
struct sockaddr_in sin;
- if ((dev = __dev_get_by_name(&init_net, ifname)) == NULL)
+ dev = __dev_get_by_name(net, ifname);
+ if (!dev)
return -ENODEV;
addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
@@ -619,19 +1298,20 @@ static int bind_mcastif_addr(struct socket *sock, char *ifname)
/*
* Set up sending multicast socket over UDP
*/
-static struct socket * make_send_sock(void)
+static struct socket *make_send_sock(struct net *net)
{
+ struct netns_ipvs *ipvs = net_ipvs(net);
struct socket *sock;
int result;
/* First create a socket */
- result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
+ result = __sock_create(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock, 1);
if (result < 0) {
pr_err("Error during creation of socket; terminating\n");
return ERR_PTR(result);
}
- result = set_mcast_if(sock->sk, ip_vs_master_mcast_ifn);
+ result = set_mcast_if(sock->sk, ipvs->master_mcast_ifn);
if (result < 0) {
pr_err("Error setting outbound mcast interface\n");
goto error;
@@ -640,7 +1320,7 @@ static struct socket * make_send_sock(void)
set_mcast_loop(sock->sk, 0);
set_mcast_ttl(sock->sk, 1);
- result = bind_mcastif_addr(sock, ip_vs_master_mcast_ifn);
+ result = bind_mcastif_addr(sock, ipvs->master_mcast_ifn);
if (result < 0) {
pr_err("Error binding address of the mcast interface\n");
goto error;
@@ -664,13 +1344,14 @@ static struct socket * make_send_sock(void)
/*
* Set up receiving multicast socket over UDP
*/
-static struct socket * make_receive_sock(void)
+static struct socket *make_receive_sock(struct net *net)
{
+ struct netns_ipvs *ipvs = net_ipvs(net);
struct socket *sock;
int result;
/* First create a socket */
- result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
+ result = __sock_create(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock, 1);
if (result < 0) {
pr_err("Error during creation of socket; terminating\n");
return ERR_PTR(result);
@@ -689,7 +1370,7 @@ static struct socket * make_receive_sock(void)
/* join the multicast group */
result = join_mcast_group(sock->sk,
(struct in_addr *) &mcast_addr.sin_addr,
- ip_vs_backup_mcast_ifn);
+ ipvs->backup_mcast_ifn);
if (result < 0) {
pr_err("Error joining to the multicast group\n");
goto error;
@@ -760,20 +1441,21 @@ ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen)
static int sync_thread_master(void *data)
{
struct ip_vs_sync_thread_data *tinfo = data;
+ struct netns_ipvs *ipvs = net_ipvs(tinfo->net);
struct ip_vs_sync_buff *sb;
pr_info("sync thread started: state = MASTER, mcast_ifn = %s, "
"syncid = %d\n",
- ip_vs_master_mcast_ifn, ip_vs_master_syncid);
+ ipvs->master_mcast_ifn, ipvs->master_syncid);
while (!kthread_should_stop()) {
- while ((sb = sb_dequeue())) {
+ while ((sb = sb_dequeue(ipvs))) {
ip_vs_send_sync_msg(tinfo->sock, sb->mesg);
ip_vs_sync_buff_release(sb);
}
- /* check if entries stay in curr_sb for 2 seconds */
- sb = get_curr_sync_buff(2 * HZ);
+ /* check if entries stay in ipvs->sync_buff for 2 seconds */
+ sb = get_curr_sync_buff(ipvs, 2 * HZ);
if (sb) {
ip_vs_send_sync_msg(tinfo->sock, sb->mesg);
ip_vs_sync_buff_release(sb);
@@ -783,14 +1465,13 @@ static int sync_thread_master(void *data)
}
/* clean up the sync_buff queue */
- while ((sb=sb_dequeue())) {
+ while ((sb = sb_dequeue(ipvs)))
ip_vs_sync_buff_release(sb);
- }
/* clean up the current sync_buff */
- if ((sb = get_curr_sync_buff(0))) {
+ sb = get_curr_sync_buff(ipvs, 0);
+ if (sb)
ip_vs_sync_buff_release(sb);
- }
/* release the sending multicast socket */
sock_release(tinfo->sock);
@@ -803,11 +1484,12 @@ static int sync_thread_master(void *data)
static int sync_thread_backup(void *data)
{
struct ip_vs_sync_thread_data *tinfo = data;
+ struct netns_ipvs *ipvs = net_ipvs(tinfo->net);
int len;
pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, "
"syncid = %d\n",
- ip_vs_backup_mcast_ifn, ip_vs_backup_syncid);
+ ipvs->backup_mcast_ifn, ipvs->backup_syncid);
while (!kthread_should_stop()) {
wait_event_interruptible(*sk_sleep(tinfo->sock->sk),
@@ -817,7 +1499,7 @@ static int sync_thread_backup(void *data)
/* do we have data now? */
while (!skb_queue_empty(&(tinfo->sock->sk->sk_receive_queue))) {
len = ip_vs_receive(tinfo->sock, tinfo->buf,
- sync_recv_mesg_maxlen);
+ ipvs->recv_mesg_maxlen);
if (len <= 0) {
pr_err("receiving message error\n");
break;
@@ -826,7 +1508,7 @@ static int sync_thread_backup(void *data)
/* disable bottom half, because it accesses the data
shared by softirq while getting/creating conns */
local_bh_disable();
- ip_vs_process_message(tinfo->buf, len);
+ ip_vs_process_message(tinfo->net, tinfo->buf, len);
local_bh_enable();
}
}
@@ -840,41 +1522,42 @@ static int sync_thread_backup(void *data)
}
-int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
+int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid)
{
struct ip_vs_sync_thread_data *tinfo;
struct task_struct **realtask, *task;
struct socket *sock;
+ struct netns_ipvs *ipvs = net_ipvs(net);
char *name, *buf = NULL;
int (*threadfn)(void *data);
int result = -ENOMEM;
IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n",
- sizeof(struct ip_vs_sync_conn));
+ sizeof(struct ip_vs_sync_conn_v0));
if (state == IP_VS_STATE_MASTER) {
- if (sync_master_thread)
+ if (ipvs->master_thread)
return -EEXIST;
- strlcpy(ip_vs_master_mcast_ifn, mcast_ifn,
- sizeof(ip_vs_master_mcast_ifn));
- ip_vs_master_syncid = syncid;
- realtask = &sync_master_thread;
- name = "ipvs_syncmaster";
+ strlcpy(ipvs->master_mcast_ifn, mcast_ifn,
+ sizeof(ipvs->master_mcast_ifn));
+ ipvs->master_syncid = syncid;
+ realtask = &ipvs->master_thread;
+ name = "ipvs_master:%d";
threadfn = sync_thread_master;
- sock = make_send_sock();
+ sock = make_send_sock(net);
} else if (state == IP_VS_STATE_BACKUP) {
- if (sync_backup_thread)
+ if (ipvs->backup_thread)
return -EEXIST;
- strlcpy(ip_vs_backup_mcast_ifn, mcast_ifn,
- sizeof(ip_vs_backup_mcast_ifn));
- ip_vs_backup_syncid = syncid;
- realtask = &sync_backup_thread;
- name = "ipvs_syncbackup";
+ strlcpy(ipvs->backup_mcast_ifn, mcast_ifn,
+ sizeof(ipvs->backup_mcast_ifn));
+ ipvs->backup_syncid = syncid;
+ realtask = &ipvs->backup_thread;
+ name = "ipvs_backup:%d";
threadfn = sync_thread_backup;
- sock = make_receive_sock();
+ sock = make_receive_sock(net);
} else {
return -EINVAL;
}
@@ -884,9 +1567,9 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
goto out;
}
- set_sync_mesg_maxlen(state);
+ set_sync_mesg_maxlen(net, state);
if (state == IP_VS_STATE_BACKUP) {
- buf = kmalloc(sync_recv_mesg_maxlen, GFP_KERNEL);
+ buf = kmalloc(ipvs->recv_mesg_maxlen, GFP_KERNEL);
if (!buf)
goto outsocket;
}
@@ -895,10 +1578,11 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
if (!tinfo)
goto outbuf;
+ tinfo->net = net;
tinfo->sock = sock;
tinfo->buf = buf;
- task = kthread_run(threadfn, tinfo, name);
+ task = kthread_run(threadfn, tinfo, name, ipvs->gen);
if (IS_ERR(task)) {
result = PTR_ERR(task);
goto outtinfo;
@@ -906,7 +1590,7 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
/* mark as active */
*realtask = task;
- ip_vs_sync_state |= state;
+ ipvs->sync_state |= state;
/* increase the module use count */
ip_vs_use_count_inc();
@@ -924,16 +1608,18 @@ out:
}
-int stop_sync_thread(int state)
+int stop_sync_thread(struct net *net, int state)
{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
if (state == IP_VS_STATE_MASTER) {
- if (!sync_master_thread)
+ if (!ipvs->master_thread)
return -ESRCH;
pr_info("stopping master sync thread %d ...\n",
- task_pid_nr(sync_master_thread));
+ task_pid_nr(ipvs->master_thread));
/*
* The lock synchronizes with sb_queue_tail(), so that we don't
@@ -941,21 +1627,21 @@ int stop_sync_thread(int state)
* progress of stopping the master sync daemon.
*/
- spin_lock_bh(&ip_vs_sync_lock);
- ip_vs_sync_state &= ~IP_VS_STATE_MASTER;
- spin_unlock_bh(&ip_vs_sync_lock);
- kthread_stop(sync_master_thread);
- sync_master_thread = NULL;
+ spin_lock_bh(&ipvs->sync_lock);
+ ipvs->sync_state &= ~IP_VS_STATE_MASTER;
+ spin_unlock_bh(&ipvs->sync_lock);
+ kthread_stop(ipvs->master_thread);
+ ipvs->master_thread = NULL;
} else if (state == IP_VS_STATE_BACKUP) {
- if (!sync_backup_thread)
+ if (!ipvs->backup_thread)
return -ESRCH;
pr_info("stopping backup sync thread %d ...\n",
- task_pid_nr(sync_backup_thread));
+ task_pid_nr(ipvs->backup_thread));
- ip_vs_sync_state &= ~IP_VS_STATE_BACKUP;
- kthread_stop(sync_backup_thread);
- sync_backup_thread = NULL;
+ ipvs->sync_state &= ~IP_VS_STATE_BACKUP;
+ kthread_stop(ipvs->backup_thread);
+ ipvs->backup_thread = NULL;
} else {
return -EINVAL;
}
@@ -965,3 +1651,42 @@ int stop_sync_thread(int state)
return 0;
}
+
+/*
+ * Initialize data struct for each netns
+ */
+static int __net_init __ip_vs_sync_init(struct net *net)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ INIT_LIST_HEAD(&ipvs->sync_queue);
+ spin_lock_init(&ipvs->sync_lock);
+ spin_lock_init(&ipvs->sync_buff_lock);
+
+ ipvs->sync_mcast_addr.sin_family = AF_INET;
+ ipvs->sync_mcast_addr.sin_port = cpu_to_be16(IP_VS_SYNC_PORT);
+ ipvs->sync_mcast_addr.sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP);
+ return 0;
+}
+
+static void __ip_vs_sync_cleanup(struct net *net)
+{
+ stop_sync_thread(net, IP_VS_STATE_MASTER);
+ stop_sync_thread(net, IP_VS_STATE_BACKUP);
+}
+
+static struct pernet_operations ipvs_sync_ops = {
+ .init = __ip_vs_sync_init,
+ .exit = __ip_vs_sync_cleanup,
+};
+
+
+int __init ip_vs_sync_init(void)
+{
+ return register_pernet_subsys(&ipvs_sync_ops);
+}
+
+void ip_vs_sync_cleanup(void)
+{
+ unregister_pernet_subsys(&ipvs_sync_ops);
+}
diff --git a/net/netfilter/ipvs/ip_vs_wlc.c b/net/netfilter/ipvs/ip_vs_wlc.c
index bbddfdb10db2..bc1bfc48a17f 100644
--- a/net/netfilter/ipvs/ip_vs_wlc.c
+++ b/net/netfilter/ipvs/ip_vs_wlc.c
@@ -27,22 +27,6 @@
#include <net/ip_vs.h>
-
-static inline unsigned int
-ip_vs_wlc_dest_overhead(struct ip_vs_dest *dest)
-{
- /*
- * We think the overhead of processing active connections is 256
- * times higher than that of inactive connections in average. (This
- * 256 times might not be accurate, we will change it later) We
- * use the following formula to estimate the overhead now:
- * dest->activeconns*256 + dest->inactconns
- */
- return (atomic_read(&dest->activeconns) << 8) +
- atomic_read(&dest->inactconns);
-}
-
-
/*
* Weighted Least Connection scheduling
*/
@@ -71,11 +55,11 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) &&
atomic_read(&dest->weight) > 0) {
least = dest;
- loh = ip_vs_wlc_dest_overhead(least);
+ loh = ip_vs_dest_conn_overhead(least);
goto nextstage;
}
}
- IP_VS_ERR_RL("WLC: no destination available\n");
+ ip_vs_scheduler_err(svc, "no destination available");
return NULL;
/*
@@ -85,7 +69,7 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
list_for_each_entry_continue(dest, &svc->destinations, n_list) {
if (dest->flags & IP_VS_DEST_F_OVERLOAD)
continue;
- doh = ip_vs_wlc_dest_overhead(dest);
+ doh = ip_vs_dest_conn_overhead(dest);
if (loh * atomic_read(&dest->weight) >
doh * atomic_read(&least->weight)) {
least = dest;
diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c
index 30db633f88f1..1ef41f50723c 100644
--- a/net/netfilter/ipvs/ip_vs_wrr.c
+++ b/net/netfilter/ipvs/ip_vs_wrr.c
@@ -147,8 +147,9 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
if (mark->cl == mark->cl->next) {
/* no dest entry */
- IP_VS_ERR_RL("WRR: no destination available: "
- "no destinations present\n");
+ ip_vs_scheduler_err(svc,
+ "no destination available: "
+ "no destinations present");
dest = NULL;
goto out;
}
@@ -162,8 +163,8 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
*/
if (mark->cw == 0) {
mark->cl = &svc->destinations;
- IP_VS_ERR_RL("WRR: no destination "
- "available\n");
+ ip_vs_scheduler_err(svc,
+ "no destination available");
dest = NULL;
goto out;
}
@@ -185,8 +186,9 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
/* back to the start, and no dest is found.
It is only possible when all dests are OVERLOADED */
dest = NULL;
- IP_VS_ERR_RL("WRR: no destination available: "
- "all destinations are overloaded\n");
+ ip_vs_scheduler_err(svc,
+ "no destination available: "
+ "all destinations are overloaded");
goto out;
}
}
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 5325a3fbe4ac..a48239aba33b 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -43,6 +43,13 @@
#include <net/ip_vs.h>
+enum {
+ IP_VS_RT_MODE_LOCAL = 1, /* Allow local dest */
+ IP_VS_RT_MODE_NON_LOCAL = 2, /* Allow non-local dest */
+ IP_VS_RT_MODE_RDR = 4, /* Allow redirect from remote daddr to
+ * local
+ */
+};
/*
* Destination cache to speed up outgoing route lookup
@@ -77,11 +84,7 @@ __ip_vs_dst_check(struct ip_vs_dest *dest, u32 rtos)
return dst;
}
-/*
- * Get route to destination or remote server
- * rt_mode: flags, &1=Allow local dest, &2=Allow non-local dest,
- * &4=Allow redirect from remote daddr to local
- */
+/* Get route to destination or remote server */
static struct rtable *
__ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
__be32 daddr, u32 rtos, int rt_mode)
@@ -126,15 +129,16 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
}
local = rt->rt_flags & RTCF_LOCAL;
- if (!((local ? 1 : 2) & rt_mode)) {
+ if (!((local ? IP_VS_RT_MODE_LOCAL : IP_VS_RT_MODE_NON_LOCAL) &
+ rt_mode)) {
IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI4\n",
(rt->rt_flags & RTCF_LOCAL) ?
"local":"non-local", &rt->rt_dst);
ip_rt_put(rt);
return NULL;
}
- if (local && !(rt_mode & 4) && !((ort = skb_rtable(skb)) &&
- ort->rt_flags & RTCF_LOCAL)) {
+ if (local && !(rt_mode & IP_VS_RT_MODE_RDR) &&
+ !((ort = skb_rtable(skb)) && ort->rt_flags & RTCF_LOCAL)) {
IP_VS_DBG_RL("Redirect from non-local address %pI4 to local "
"requires NAT method, dest: %pI4\n",
&ip_hdr(skb)->daddr, &rt->rt_dst);
@@ -175,7 +179,6 @@ __ip_vs_reroute_locally(struct sk_buff *skb)
.fl4_tos = RT_TOS(iph->tos),
.mark = skb->mark,
};
- struct rtable *rt;
if (ip_route_output_key(net, &rt, &fl))
return 0;
@@ -384,13 +387,14 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
EnterFunction(10);
- if (!(rt = __ip_vs_get_out_rt(skb, NULL, iph->daddr,
- RT_TOS(iph->tos), 2)))
+ if (!(rt = __ip_vs_get_out_rt(skb, NULL, iph->daddr, RT_TOS(iph->tos),
+ IP_VS_RT_MODE_NON_LOCAL)))
goto tx_error_icmp;
/* MTU checking */
mtu = dst_mtu(&rt->dst);
- if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
+ if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF)) &&
+ !skb_is_gso(skb)) {
ip_rt_put(rt);
icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
@@ -443,7 +447,7 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
/* MTU checking */
mtu = dst_mtu(&rt->dst);
- if (skb->len > mtu) {
+ if (skb->len > mtu && !skb_is_gso(skb)) {
if (!skb->dev) {
struct net *net = dev_net(skb_dst(skb)->dev);
@@ -512,7 +516,10 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
}
if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
- RT_TOS(iph->tos), 1|2|4)))
+ RT_TOS(iph->tos),
+ IP_VS_RT_MODE_LOCAL |
+ IP_VS_RT_MODE_NON_LOCAL |
+ IP_VS_RT_MODE_RDR)))
goto tx_error_icmp;
local = rt->rt_flags & RTCF_LOCAL;
/*
@@ -543,7 +550,8 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
/* MTU checking */
mtu = dst_mtu(&rt->dst);
- if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
+ if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF)) &&
+ !skb_is_gso(skb)) {
icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
IP_VS_DBG_RL_PKT(0, AF_INET, pp, skb, 0,
"ip_vs_nat_xmit(): frag needed for");
@@ -658,7 +666,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
/* MTU checking */
mtu = dst_mtu(&rt->dst);
- if (skb->len > mtu) {
+ if (skb->len > mtu && !skb_is_gso(skb)) {
if (!skb->dev) {
struct net *net = dev_net(skb_dst(skb)->dev);
@@ -754,7 +762,8 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
EnterFunction(10);
if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
- RT_TOS(tos), 1|2)))
+ RT_TOS(tos), IP_VS_RT_MODE_LOCAL |
+ IP_VS_RT_MODE_NON_LOCAL)))
goto tx_error_icmp;
if (rt->rt_flags & RTCF_LOCAL) {
ip_rt_put(rt);
@@ -773,8 +782,8 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
df |= (old_iph->frag_off & htons(IP_DF));
- if ((old_iph->frag_off & htons(IP_DF))
- && mtu < ntohs(old_iph->tot_len)) {
+ if ((old_iph->frag_off & htons(IP_DF) &&
+ mtu < ntohs(old_iph->tot_len) && !skb_is_gso(skb))) {
icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
goto tx_error_put;
@@ -886,7 +895,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
if (skb_dst(skb))
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
- if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) {
+ if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr) &&
+ !skb_is_gso(skb)) {
if (!skb->dev) {
struct net *net = dev_net(skb_dst(skb)->dev);
@@ -982,7 +992,9 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
EnterFunction(10);
if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
- RT_TOS(iph->tos), 1|2)))
+ RT_TOS(iph->tos),
+ IP_VS_RT_MODE_LOCAL |
+ IP_VS_RT_MODE_NON_LOCAL)))
goto tx_error_icmp;
if (rt->rt_flags & RTCF_LOCAL) {
ip_rt_put(rt);
@@ -991,7 +1003,8 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
/* MTU checking */
mtu = dst_mtu(&rt->dst);
- if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu) {
+ if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu &&
+ !skb_is_gso(skb)) {
icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
ip_rt_put(rt);
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
@@ -1125,7 +1138,10 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
*/
if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
- RT_TOS(ip_hdr(skb)->tos), 1|2|4)))
+ RT_TOS(ip_hdr(skb)->tos),
+ IP_VS_RT_MODE_LOCAL |
+ IP_VS_RT_MODE_NON_LOCAL |
+ IP_VS_RT_MODE_RDR)))
goto tx_error_icmp;
local = rt->rt_flags & RTCF_LOCAL;
@@ -1158,7 +1174,8 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
/* MTU checking */
mtu = dst_mtu(&rt->dst);
- if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF))) {
+ if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF)) &&
+ !skb_is_gso(skb)) {
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
goto tx_error_put;
@@ -1272,7 +1289,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
/* MTU checking */
mtu = dst_mtu(&rt->dst);
- if (skb->len > mtu) {
+ if (skb->len > mtu && !skb_is_gso(skb)) {
if (!skb->dev) {
struct net *net = dev_net(skb_dst(skb)->dev);
diff --git a/net/netfilter/nf_conntrack_broadcast.c b/net/netfilter/nf_conntrack_broadcast.c
new file mode 100644
index 000000000000..4e99cca61612
--- /dev/null
+++ b/net/netfilter/nf_conntrack_broadcast.c
@@ -0,0 +1,82 @@
+/*
+ * broadcast connection tracking helper
+ *
+ * (c) 2005 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <net/route.h>
+#include <linux/inetdevice.h>
+#include <linux/skbuff.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+
+int nf_conntrack_broadcast_help(struct sk_buff *skb,
+ unsigned int protoff,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int timeout)
+{
+ struct nf_conntrack_expect *exp;
+ struct iphdr *iph = ip_hdr(skb);
+ struct rtable *rt = skb_rtable(skb);
+ struct in_device *in_dev;
+ struct nf_conn_help *help = nfct_help(ct);
+ __be32 mask = 0;
+
+ /* we're only interested in locally generated packets */
+ if (skb->sk == NULL)
+ goto out;
+ if (rt == NULL || !(rt->rt_flags & RTCF_BROADCAST))
+ goto out;
+ if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
+ goto out;
+
+ rcu_read_lock();
+ in_dev = __in_dev_get_rcu(rt->dst.dev);
+ if (in_dev != NULL) {
+ for_primary_ifa(in_dev) {
+ if (ifa->ifa_broadcast == iph->daddr) {
+ mask = ifa->ifa_mask;
+ break;
+ }
+ } endfor_ifa(in_dev);
+ }
+ rcu_read_unlock();
+
+ if (mask == 0)
+ goto out;
+
+ exp = nf_ct_expect_alloc(ct);
+ if (exp == NULL)
+ goto out;
+
+ exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+ exp->tuple.src.u.udp.port = help->helper->tuple.src.u.udp.port;
+
+ exp->mask.src.u3.ip = mask;
+ exp->mask.src.u.udp.port = htons(0xFFFF);
+
+ exp->expectfn = NULL;
+ exp->flags = NF_CT_EXPECT_PERMANENT;
+ exp->class = NF_CT_EXPECT_CLASS_DEFAULT;
+ exp->helper = NULL;
+
+ nf_ct_expect_related(exp);
+ nf_ct_expect_put(exp);
+
+ nf_ct_refresh(ct, skb, timeout * HZ);
+out:
+ return NF_ACCEPT;
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_broadcast_help);
+
+MODULE_LICENSE("GPL");
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 84f4fcc5884b..2f454efa1a8b 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -43,6 +43,7 @@
#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_core.h>
@@ -282,6 +283,11 @@ EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
static void death_by_timeout(unsigned long ul_conntrack)
{
struct nf_conn *ct = (void *)ul_conntrack;
+ struct nf_conn_tstamp *tstamp;
+
+ tstamp = nf_conn_tstamp_find(ct);
+ if (tstamp && tstamp->stop == 0)
+ tstamp->stop = ktime_to_ns(ktime_get_real());
if (!test_bit(IPS_DYING_BIT, &ct->status) &&
unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) {
@@ -419,6 +425,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
struct nf_conn_help *help;
+ struct nf_conn_tstamp *tstamp;
struct hlist_nulls_node *n;
enum ip_conntrack_info ctinfo;
struct net *net;
@@ -486,8 +493,16 @@ __nf_conntrack_confirm(struct sk_buff *skb)
ct->timeout.expires += jiffies;
add_timer(&ct->timeout);
atomic_inc(&ct->ct_general.use);
- set_bit(IPS_CONFIRMED_BIT, &ct->status);
+ ct->status |= IPS_CONFIRMED;
+
+ /* set conntrack timestamp, if enabled. */
+ tstamp = nf_conn_tstamp_find(ct);
+ if (tstamp) {
+ if (skb->tstamp.tv64 == 0)
+ __net_timestamp((struct sk_buff *)skb);
+ tstamp->start = ktime_to_ns(skb->tstamp);
+ }
/* Since the lookup is lockless, hash insertion must be done after
* starting the timer and setting the CONFIRMED bit. The RCU barriers
* guarantee that no other CPU can find the conntrack before the above
@@ -655,7 +670,8 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
* and ct->tuplehash[IP_CT_DIR_REPLY].hnnode.next unchanged.
*/
memset(&ct->tuplehash[IP_CT_DIR_MAX], 0,
- sizeof(*ct) - offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX]));
+ offsetof(struct nf_conn, proto) -
+ offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX]));
spin_lock_init(&ct->lock);
ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
@@ -745,6 +761,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
}
nf_ct_acct_ext_add(ct, GFP_ATOMIC);
+ nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
@@ -1192,6 +1209,11 @@ struct __nf_ct_flush_report {
static int kill_report(struct nf_conn *i, void *data)
{
struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data;
+ struct nf_conn_tstamp *tstamp;
+
+ tstamp = nf_conn_tstamp_find(i);
+ if (tstamp && tstamp->stop == 0)
+ tstamp->stop = ktime_to_ns(ktime_get_real());
/* If we fail to deliver the event, death_by_timeout() will retry */
if (nf_conntrack_event_report(IPCT_DESTROY, i,
@@ -1208,9 +1230,9 @@ static int kill_all(struct nf_conn *i, void *data)
return 1;
}
-void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size)
+void nf_ct_free_hashtable(void *hash, unsigned int size)
{
- if (vmalloced)
+ if (is_vmalloc_addr(hash))
vfree(hash);
else
free_pages((unsigned long)hash,
@@ -1277,8 +1299,7 @@ static void nf_conntrack_cleanup_net(struct net *net)
goto i_see_dead_people;
}
- nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
- net->ct.htable_size);
+ nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
nf_conntrack_ecache_fini(net);
nf_conntrack_acct_fini(net);
nf_conntrack_expect_fini(net);
@@ -1307,21 +1328,18 @@ void nf_conntrack_cleanup(struct net *net)
}
}
-void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls)
+void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
{
struct hlist_nulls_head *hash;
unsigned int nr_slots, i;
size_t sz;
- *vmalloced = 0;
-
BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
sz = nr_slots * sizeof(struct hlist_nulls_head);
hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
get_order(sz));
if (!hash) {
- *vmalloced = 1;
printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
PAGE_KERNEL);
@@ -1337,7 +1355,7 @@ EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
{
- int i, bucket, vmalloced, old_vmalloced;
+ int i, bucket;
unsigned int hashsize, old_size;
struct hlist_nulls_head *hash, *old_hash;
struct nf_conntrack_tuple_hash *h;
@@ -1354,7 +1372,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
if (!hashsize)
return -EINVAL;
- hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced, 1);
+ hash = nf_ct_alloc_hashtable(&hashsize, 1);
if (!hash)
return -ENOMEM;
@@ -1376,15 +1394,13 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
}
}
old_size = init_net.ct.htable_size;
- old_vmalloced = init_net.ct.hash_vmalloc;
old_hash = init_net.ct.hash;
init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
- init_net.ct.hash_vmalloc = vmalloced;
init_net.ct.hash = hash;
spin_unlock_bh(&nf_conntrack_lock);
- nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
+ nf_ct_free_hashtable(old_hash, old_size);
return 0;
}
EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
@@ -1497,8 +1513,7 @@ static int nf_conntrack_init_net(struct net *net)
}
net->ct.htable_size = nf_conntrack_htable_size;
- net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size,
- &net->ct.hash_vmalloc, 1);
+ net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1);
if (!net->ct.hash) {
ret = -ENOMEM;
printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
@@ -1510,6 +1525,9 @@ static int nf_conntrack_init_net(struct net *net)
ret = nf_conntrack_acct_init(net);
if (ret < 0)
goto err_acct;
+ ret = nf_conntrack_tstamp_init(net);
+ if (ret < 0)
+ goto err_tstamp;
ret = nf_conntrack_ecache_init(net);
if (ret < 0)
goto err_ecache;
@@ -1517,12 +1535,13 @@ static int nf_conntrack_init_net(struct net *net)
return 0;
err_ecache:
+ nf_conntrack_tstamp_fini(net);
+err_tstamp:
nf_conntrack_acct_fini(net);
err_acct:
nf_conntrack_expect_fini(net);
err_expect:
- nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
- net->ct.htable_size);
+ nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
err_hash:
kmem_cache_destroy(net->ct.nf_conntrack_cachep);
err_cache:
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index a20fb0bd1efe..cd1e8e0970f2 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -319,7 +319,8 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
const struct nf_conntrack_expect_policy *p;
unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
- atomic_inc(&exp->use);
+ /* two references : one for hash insert, one for the timer */
+ atomic_add(2, &exp->use);
if (master_help) {
hlist_add_head(&exp->lnode, &master_help->expectations);
@@ -333,12 +334,14 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
(unsigned long)exp);
if (master_help) {
- p = &master_help->helper->expect_policy[exp->class];
+ p = &rcu_dereference_protected(
+ master_help->helper,
+ lockdep_is_held(&nf_conntrack_lock)
+ )->expect_policy[exp->class];
exp->timeout.expires = jiffies + p->timeout * HZ;
}
add_timer(&exp->timeout);
- atomic_inc(&exp->use);
NF_CT_STAT_INC(net, expect_create);
}
@@ -369,7 +372,10 @@ static inline int refresh_timer(struct nf_conntrack_expect *i)
if (!del_timer(&i->timeout))
return 0;
- p = &master_help->helper->expect_policy[i->class];
+ p = &rcu_dereference_protected(
+ master_help->helper,
+ lockdep_is_held(&nf_conntrack_lock)
+ )->expect_policy[i->class];
i->timeout.expires = jiffies + p->timeout * HZ;
add_timer(&i->timeout);
return 1;
@@ -407,7 +413,10 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
}
/* Will be over limit? */
if (master_help) {
- p = &master_help->helper->expect_policy[expect->class];
+ p = &rcu_dereference_protected(
+ master_help->helper,
+ lockdep_is_held(&nf_conntrack_lock)
+ )->expect_policy[expect->class];
if (p->max_expected &&
master_help->expecting[expect->class] >= p->max_expected) {
evict_oldest_expect(master, expect);
@@ -478,7 +487,7 @@ static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
struct hlist_node *n;
for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
- n = rcu_dereference(net->ct.expect_hash[st->bucket].first);
+ n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
if (n)
return n;
}
@@ -491,11 +500,11 @@ static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
struct net *net = seq_file_net(seq);
struct ct_expect_iter_state *st = seq->private;
- head = rcu_dereference(head->next);
+ head = rcu_dereference(hlist_next_rcu(head));
while (head == NULL) {
if (++st->bucket >= nf_ct_expect_hsize)
return NULL;
- head = rcu_dereference(net->ct.expect_hash[st->bucket].first);
+ head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
}
return head;
}
@@ -630,8 +639,7 @@ int nf_conntrack_expect_init(struct net *net)
}
net->ct.expect_count = 0;
- net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize,
- &net->ct.expect_vmalloc, 0);
+ net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
if (net->ct.expect_hash == NULL)
goto err1;
@@ -653,8 +661,7 @@ err3:
if (net_eq(net, &init_net))
kmem_cache_destroy(nf_ct_expect_cachep);
err2:
- nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc,
- nf_ct_expect_hsize);
+ nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
err1:
return err;
}
@@ -666,6 +673,5 @@ void nf_conntrack_expect_fini(struct net *net)
rcu_barrier(); /* Wait for call_rcu() before destroy */
kmem_cache_destroy(nf_ct_expect_cachep);
}
- nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc,
- nf_ct_expect_hsize);
+ nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
}
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index bd82450c193f..80a23ed62bb0 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -140,15 +140,16 @@ static void update_alloc_size(struct nf_ct_ext_type *type)
/* This assumes that extended areas in conntrack for the types
whose NF_CT_EXT_F_PREALLOC bit set are allocated in order */
for (i = min; i <= max; i++) {
- t1 = nf_ct_ext_types[i];
+ t1 = rcu_dereference_protected(nf_ct_ext_types[i],
+ lockdep_is_held(&nf_ct_ext_type_mutex));
if (!t1)
continue;
- t1->alloc_size = sizeof(struct nf_ct_ext)
- + ALIGN(sizeof(struct nf_ct_ext), t1->align)
- + t1->len;
+ t1->alloc_size = ALIGN(sizeof(struct nf_ct_ext), t1->align) +
+ t1->len;
for (j = 0; j < NF_CT_EXT_NUM; j++) {
- t2 = nf_ct_ext_types[j];
+ t2 = rcu_dereference_protected(nf_ct_ext_types[j],
+ lockdep_is_held(&nf_ct_ext_type_mutex));
if (t2 == NULL || t2 == t1 ||
(t2->flags & NF_CT_EXT_F_PREALLOC) == 0)
continue;
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 59e1a4cd4e8b..1bdfea357955 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -33,7 +33,6 @@ static DEFINE_MUTEX(nf_ct_helper_mutex);
static struct hlist_head *nf_ct_helper_hash __read_mostly;
static unsigned int nf_ct_helper_hsize __read_mostly;
static unsigned int nf_ct_helper_count __read_mostly;
-static int nf_ct_helper_vmalloc;
/* Stupid hash, but collision free for the default registrations of the
@@ -158,7 +157,10 @@ static inline int unhelp(struct nf_conntrack_tuple_hash *i,
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(i);
struct nf_conn_help *help = nfct_help(ct);
- if (help && help->helper == me) {
+ if (help && rcu_dereference_protected(
+ help->helper,
+ lockdep_is_held(&nf_conntrack_lock)
+ ) == me) {
nf_conntrack_event(IPCT_HELPER, ct);
rcu_assign_pointer(help->helper, NULL);
}
@@ -210,7 +212,10 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
hlist_for_each_entry_safe(exp, n, next,
&net->ct.expect_hash[i], hnode) {
struct nf_conn_help *help = nfct_help(exp->master);
- if ((help->helper == me || exp->helper == me) &&
+ if ((rcu_dereference_protected(
+ help->helper,
+ lockdep_is_held(&nf_conntrack_lock)
+ ) == me || exp->helper == me) &&
del_timer(&exp->timeout)) {
nf_ct_unlink_expect(exp);
nf_ct_expect_put(exp);
@@ -261,8 +266,7 @@ int nf_conntrack_helper_init(void)
int err;
nf_ct_helper_hsize = 1; /* gets rounded up to use one page */
- nf_ct_helper_hash = nf_ct_alloc_hashtable(&nf_ct_helper_hsize,
- &nf_ct_helper_vmalloc, 0);
+ nf_ct_helper_hash = nf_ct_alloc_hashtable(&nf_ct_helper_hsize, 0);
if (!nf_ct_helper_hash)
return -ENOMEM;
@@ -273,14 +277,12 @@ int nf_conntrack_helper_init(void)
return 0;
err1:
- nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_vmalloc,
- nf_ct_helper_hsize);
+ nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize);
return err;
}
void nf_conntrack_helper_fini(void)
{
nf_ct_extend_unregister(&helper_extend);
- nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_vmalloc,
- nf_ct_helper_hsize);
+ nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize);
}
diff --git a/net/netfilter/nf_conntrack_netbios_ns.c b/net/netfilter/nf_conntrack_netbios_ns.c
index aadde018a072..4c8f30a3d6d2 100644
--- a/net/netfilter/nf_conntrack_netbios_ns.c
+++ b/net/netfilter/nf_conntrack_netbios_ns.c
@@ -18,14 +18,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/if_addr.h>
#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/netfilter.h>
-#include <net/route.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_helper.h>
@@ -40,75 +33,26 @@ MODULE_ALIAS("ip_conntrack_netbios_ns");
MODULE_ALIAS_NFCT_HELPER("netbios_ns");
static unsigned int timeout __read_mostly = 3;
-module_param(timeout, uint, 0400);
+module_param(timeout, uint, S_IRUSR);
MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds");
-static int help(struct sk_buff *skb, unsigned int protoff,
- struct nf_conn *ct, enum ip_conntrack_info ctinfo)
-{
- struct nf_conntrack_expect *exp;
- struct iphdr *iph = ip_hdr(skb);
- struct rtable *rt = skb_rtable(skb);
- struct in_device *in_dev;
- __be32 mask = 0;
-
- /* we're only interested in locally generated packets */
- if (skb->sk == NULL)
- goto out;
- if (rt == NULL || !(rt->rt_flags & RTCF_BROADCAST))
- goto out;
- if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
- goto out;
-
- rcu_read_lock();
- in_dev = __in_dev_get_rcu(rt->dst.dev);
- if (in_dev != NULL) {
- for_primary_ifa(in_dev) {
- if (ifa->ifa_broadcast == iph->daddr) {
- mask = ifa->ifa_mask;
- break;
- }
- } endfor_ifa(in_dev);
- }
- rcu_read_unlock();
-
- if (mask == 0)
- goto out;
-
- exp = nf_ct_expect_alloc(ct);
- if (exp == NULL)
- goto out;
-
- exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
- exp->tuple.src.u.udp.port = htons(NMBD_PORT);
-
- exp->mask.src.u3.ip = mask;
- exp->mask.src.u.udp.port = htons(0xFFFF);
-
- exp->expectfn = NULL;
- exp->flags = NF_CT_EXPECT_PERMANENT;
- exp->class = NF_CT_EXPECT_CLASS_DEFAULT;
- exp->helper = NULL;
-
- nf_ct_expect_related(exp);
- nf_ct_expect_put(exp);
-
- nf_ct_refresh(ct, skb, timeout * HZ);
-out:
- return NF_ACCEPT;
-}
-
static struct nf_conntrack_expect_policy exp_policy = {
.max_expected = 1,
};
+static int netbios_ns_help(struct sk_buff *skb, unsigned int protoff,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+ return nf_conntrack_broadcast_help(skb, protoff, ct, ctinfo, timeout);
+}
+
static struct nf_conntrack_helper helper __read_mostly = {
.name = "netbios-ns",
- .tuple.src.l3num = AF_INET,
+ .tuple.src.l3num = NFPROTO_IPV4,
.tuple.src.u.udp.port = cpu_to_be16(NMBD_PORT),
.tuple.dst.protonum = IPPROTO_UDP,
.me = THIS_MODULE,
- .help = help,
+ .help = netbios_ns_help,
.expect_policy = &exp_policy,
};
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index eead9db6f899..30bf8a167fc8 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -42,6 +42,7 @@
#include <net/netfilter/nf_conntrack_tuple.h>
#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
#ifdef CONFIG_NF_NAT_NEEDED
#include <net/netfilter/nf_nat_core.h>
#include <net/netfilter/nf_nat_protocol.h>
@@ -230,6 +231,33 @@ nla_put_failure:
return -1;
}
+static int
+ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
+{
+ struct nlattr *nest_count;
+ const struct nf_conn_tstamp *tstamp;
+
+ tstamp = nf_conn_tstamp_find(ct);
+ if (!tstamp)
+ return 0;
+
+ nest_count = nla_nest_start(skb, CTA_TIMESTAMP | NLA_F_NESTED);
+ if (!nest_count)
+ goto nla_put_failure;
+
+ NLA_PUT_BE64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start));
+ if (tstamp->stop != 0) {
+ NLA_PUT_BE64(skb, CTA_TIMESTAMP_STOP,
+ cpu_to_be64(tstamp->stop));
+ }
+ nla_nest_end(skb, nest_count);
+
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
#ifdef CONFIG_NF_CONNTRACK_MARK
static inline int
ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
@@ -404,6 +432,7 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
ctnetlink_dump_timeout(skb, ct) < 0 ||
ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0 ||
+ ctnetlink_dump_timestamp(skb, ct) < 0 ||
ctnetlink_dump_protoinfo(skb, ct) < 0 ||
ctnetlink_dump_helpinfo(skb, ct) < 0 ||
ctnetlink_dump_mark(skb, ct) < 0 ||
@@ -471,6 +500,18 @@ ctnetlink_secctx_size(const struct nf_conn *ct)
}
static inline size_t
+ctnetlink_timestamp_size(const struct nf_conn *ct)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+ if (!nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP))
+ return 0;
+ return nla_total_size(0) + 2 * nla_total_size(sizeof(uint64_t));
+#else
+ return 0;
+#endif
+}
+
+static inline size_t
ctnetlink_nlmsg_size(const struct nf_conn *ct)
{
return NLMSG_ALIGN(sizeof(struct nfgenmsg))
@@ -481,6 +522,7 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
+ nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
+ nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
+ ctnetlink_counters_size(ct)
+ + ctnetlink_timestamp_size(ct)
+ nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
+ nla_total_size(0) /* CTA_PROTOINFO */
+ nla_total_size(0) /* CTA_HELP */
@@ -571,7 +613,8 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
if (events & (1 << IPCT_DESTROY)) {
if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
- ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0)
+ ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0 ||
+ ctnetlink_dump_timestamp(skb, ct) < 0)
goto nla_put_failure;
} else {
if (ctnetlink_dump_timeout(skb, ct) < 0)
@@ -761,7 +804,7 @@ static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
static int
ctnetlink_parse_tuple(const struct nlattr * const cda[],
struct nf_conntrack_tuple *tuple,
- enum ctattr_tuple type, u_int8_t l3num)
+ enum ctattr_type type, u_int8_t l3num)
{
struct nlattr *tb[CTA_TUPLE_MAX+1];
int err;
@@ -1358,6 +1401,7 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
}
nf_ct_acct_ext_add(ct, GFP_ATOMIC);
+ nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
/* we must add conntrack extensions before confirmation. */
ct->status |= IPS_CONFIRMED;
@@ -1376,6 +1420,7 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
}
#endif
+ memset(&ct->proto, 0, sizeof(ct->proto));
if (cda[CTA_PROTOINFO]) {
err = ctnetlink_change_protoinfo(ct, cda);
if (err < 0)
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index dc7bb74110df..5701c8dd783c 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -166,6 +166,7 @@ static void nf_ct_l3proto_unregister_sysctl(struct nf_conntrack_l3proto *l3proto
int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto)
{
int ret = 0;
+ struct nf_conntrack_l3proto *old;
if (proto->l3proto >= AF_MAX)
return -EBUSY;
@@ -174,7 +175,9 @@ int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto)
return -EINVAL;
mutex_lock(&nf_ct_proto_mutex);
- if (nf_ct_l3protos[proto->l3proto] != &nf_conntrack_l3proto_generic) {
+ old = rcu_dereference_protected(nf_ct_l3protos[proto->l3proto],
+ lockdep_is_held(&nf_ct_proto_mutex));
+ if (old != &nf_conntrack_l3proto_generic) {
ret = -EBUSY;
goto out_unlock;
}
@@ -201,7 +204,9 @@ void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto)
BUG_ON(proto->l3proto >= AF_MAX);
mutex_lock(&nf_ct_proto_mutex);
- BUG_ON(nf_ct_l3protos[proto->l3proto] != proto);
+ BUG_ON(rcu_dereference_protected(nf_ct_l3protos[proto->l3proto],
+ lockdep_is_held(&nf_ct_proto_mutex)
+ ) != proto);
rcu_assign_pointer(nf_ct_l3protos[proto->l3proto],
&nf_conntrack_l3proto_generic);
nf_ct_l3proto_unregister_sysctl(proto);
@@ -279,7 +284,7 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
mutex_lock(&nf_ct_proto_mutex);
if (!nf_ct_protos[l4proto->l3proto]) {
/* l3proto may be loaded latter. */
- struct nf_conntrack_l4proto **proto_array;
+ struct nf_conntrack_l4proto __rcu **proto_array;
int i;
proto_array = kmalloc(MAX_NF_CT_PROTO *
@@ -291,7 +296,7 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
}
for (i = 0; i < MAX_NF_CT_PROTO; i++)
- proto_array[i] = &nf_conntrack_l4proto_generic;
+ RCU_INIT_POINTER(proto_array[i], &nf_conntrack_l4proto_generic);
/* Before making proto_array visible to lockless readers,
* we must make sure its content is committed to memory.
@@ -299,8 +304,10 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
smp_wmb();
nf_ct_protos[l4proto->l3proto] = proto_array;
- } else if (nf_ct_protos[l4proto->l3proto][l4proto->l4proto] !=
- &nf_conntrack_l4proto_generic) {
+ } else if (rcu_dereference_protected(
+ nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
+ lockdep_is_held(&nf_ct_proto_mutex)
+ ) != &nf_conntrack_l4proto_generic) {
ret = -EBUSY;
goto out_unlock;
}
@@ -331,7 +338,10 @@ void nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *l4proto)
BUG_ON(l4proto->l3proto >= PF_MAX);
mutex_lock(&nf_ct_proto_mutex);
- BUG_ON(nf_ct_protos[l4proto->l3proto][l4proto->l4proto] != l4proto);
+ BUG_ON(rcu_dereference_protected(
+ nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
+ lockdep_is_held(&nf_ct_proto_mutex)
+ ) != l4proto);
rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
&nf_conntrack_l4proto_generic);
nf_ct_l4proto_unregister_sysctl(l4proto);
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 5292560d6d4a..9ae57c57c50e 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -452,6 +452,9 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_CLIENT;
ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_SERVER;
ct->proto.dccp.state = CT_DCCP_NONE;
+ ct->proto.dccp.last_pkt = DCCP_PKT_REQUEST;
+ ct->proto.dccp.last_dir = IP_CT_DIR_ORIGINAL;
+ ct->proto.dccp.handshake_seq = 0;
return true;
out_invalid:
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index c6049c2d5ea8..6f4ee70f460b 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -413,6 +413,7 @@ static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
test_bit(SCTP_CID_COOKIE_ACK, map))
return false;
+ memset(&ct->proto.sctp, 0, sizeof(ct->proto.sctp));
new_state = SCTP_CONNTRACK_MAX;
for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
/* Don't need lock here: this conntrack not in circulation yet */
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 3fb2b73b24dc..37bf94394be0 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -227,11 +227,11 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
* sCL -> sIV
*/
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
-/*synack*/ { sIV, sSR, sSR, sIG, sIG, sIG, sIG, sIG, sIG, sSR },
+/*synack*/ { sIV, sSR, sIG, sIG, sIG, sIG, sIG, sIG, sIG, sSR },
/*
* sSS -> sSR Standard open.
* sS2 -> sSR Simultaneous open
- * sSR -> sSR Retransmitted SYN/ACK.
+ * sSR -> sIG Retransmitted SYN/ACK, ignore it.
* sES -> sIG Late retransmitted SYN/ACK?
* sFW -> sIG Might be SYN/ACK answering ignored SYN
* sCW -> sIG
@@ -1066,9 +1066,7 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
BUG_ON(th == NULL);
/* Don't need lock here: this conntrack not in circulation yet */
- new_state
- = tcp_conntracks[0][get_conntrack_index(th)]
- [TCP_CONNTRACK_NONE];
+ new_state = tcp_conntracks[0][get_conntrack_index(th)][TCP_CONNTRACK_NONE];
/* Invalid: delete conntrack */
if (new_state >= TCP_CONNTRACK_MAX) {
@@ -1077,6 +1075,7 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
}
if (new_state == TCP_CONNTRACK_SYN_SENT) {
+ memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
/* SYN packet */
ct->proto.tcp.seen[0].td_end =
segment_seq_plus_len(ntohl(th->seq), skb->len,
@@ -1088,11 +1087,11 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
ct->proto.tcp.seen[0].td_end;
tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]);
- ct->proto.tcp.seen[1].flags = 0;
} else if (nf_ct_tcp_loose == 0) {
/* Don't try to pick up connections. */
return false;
} else {
+ memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
/*
* We are in the middle of a connection,
* its history is lost for us.
@@ -1107,7 +1106,6 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
ct->proto.tcp.seen[0].td_maxend =
ct->proto.tcp.seen[0].td_end +
ct->proto.tcp.seen[0].td_maxwin;
- ct->proto.tcp.seen[0].td_scale = 0;
/* We assume SACK and liberal window checking to handle
* window scaling */
@@ -1116,13 +1114,7 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
IP_CT_TCP_FLAG_BE_LIBERAL;
}
- ct->proto.tcp.seen[1].td_end = 0;
- ct->proto.tcp.seen[1].td_maxend = 0;
- ct->proto.tcp.seen[1].td_maxwin = 0;
- ct->proto.tcp.seen[1].td_scale = 0;
-
/* tcp_packet will set them */
- ct->proto.tcp.state = TCP_CONNTRACK_NONE;
ct->proto.tcp.last_index = TCP_NONE_SET;
pr_debug("tcp_new: sender end=%u maxend=%u maxwin=%u scale=%i "
diff --git a/net/netfilter/nf_conntrack_snmp.c b/net/netfilter/nf_conntrack_snmp.c
new file mode 100644
index 000000000000..6e545e26289e
--- /dev/null
+++ b/net/netfilter/nf_conntrack_snmp.c
@@ -0,0 +1,77 @@
+/*
+ * SNMP service broadcast connection tracking helper
+ *
+ * (c) 2011 Jiri Olsa <jolsa@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/in.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+
+#define SNMP_PORT 161
+
+MODULE_AUTHOR("Jiri Olsa <jolsa@redhat.com>");
+MODULE_DESCRIPTION("SNMP service broadcast connection tracking helper");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NFCT_HELPER("snmp");
+
+static unsigned int timeout __read_mostly = 30;
+module_param(timeout, uint, S_IRUSR);
+MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds");
+
+int (*nf_nat_snmp_hook)(struct sk_buff *skb,
+ unsigned int protoff,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo);
+EXPORT_SYMBOL_GPL(nf_nat_snmp_hook);
+
+static int snmp_conntrack_help(struct sk_buff *skb, unsigned int protoff,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+ typeof(nf_nat_snmp_hook) nf_nat_snmp;
+
+ nf_conntrack_broadcast_help(skb, protoff, ct, ctinfo, timeout);
+
+ nf_nat_snmp = rcu_dereference(nf_nat_snmp_hook);
+ if (nf_nat_snmp && ct->status & IPS_NAT_MASK)
+ return nf_nat_snmp(skb, protoff, ct, ctinfo);
+
+ return NF_ACCEPT;
+}
+
+static struct nf_conntrack_expect_policy exp_policy = {
+ .max_expected = 1,
+};
+
+static struct nf_conntrack_helper helper __read_mostly = {
+ .name = "snmp",
+ .tuple.src.l3num = NFPROTO_IPV4,
+ .tuple.src.u.udp.port = cpu_to_be16(SNMP_PORT),
+ .tuple.dst.protonum = IPPROTO_UDP,
+ .me = THIS_MODULE,
+ .help = snmp_conntrack_help,
+ .expect_policy = &exp_policy,
+};
+
+static int __init nf_conntrack_snmp_init(void)
+{
+ exp_policy.timeout = timeout;
+ return nf_conntrack_helper_register(&helper);
+}
+
+static void __exit nf_conntrack_snmp_fini(void)
+{
+ nf_conntrack_helper_unregister(&helper);
+}
+
+module_init(nf_conntrack_snmp_init);
+module_exit(nf_conntrack_snmp_fini);
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index b4d7f0f24b27..0ae142825881 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -29,6 +29,8 @@
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
+#include <linux/rculist_nulls.h>
MODULE_LICENSE("GPL");
@@ -45,6 +47,7 @@ EXPORT_SYMBOL_GPL(print_tuple);
struct ct_iter_state {
struct seq_net_private p;
unsigned int bucket;
+ u_int64_t time_now;
};
static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
@@ -56,7 +59,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
for (st->bucket = 0;
st->bucket < net->ct.htable_size;
st->bucket++) {
- n = rcu_dereference(net->ct.hash[st->bucket].first);
+ n = rcu_dereference(hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
if (!is_a_nulls(n))
return n;
}
@@ -69,13 +72,15 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
struct net *net = seq_file_net(seq);
struct ct_iter_state *st = seq->private;
- head = rcu_dereference(head->next);
+ head = rcu_dereference(hlist_nulls_next_rcu(head));
while (is_a_nulls(head)) {
if (likely(get_nulls_value(head) == st->bucket)) {
if (++st->bucket >= net->ct.htable_size)
return NULL;
}
- head = rcu_dereference(net->ct.hash[st->bucket].first);
+ head = rcu_dereference(
+ hlist_nulls_first_rcu(
+ &net->ct.hash[st->bucket]));
}
return head;
}
@@ -93,6 +98,9 @@ static struct hlist_nulls_node *ct_get_idx(struct seq_file *seq, loff_t pos)
static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
+ struct ct_iter_state *st = seq->private;
+
+ st->time_now = ktime_to_ns(ktime_get_real());
rcu_read_lock();
return ct_get_idx(seq, *pos);
}
@@ -132,6 +140,34 @@ static inline int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
}
#endif
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+static int ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct)
+{
+ struct ct_iter_state *st = s->private;
+ struct nf_conn_tstamp *tstamp;
+ s64 delta_time;
+
+ tstamp = nf_conn_tstamp_find(ct);
+ if (tstamp) {
+ delta_time = st->time_now - tstamp->start;
+ if (delta_time > 0)
+ delta_time = div_s64(delta_time, NSEC_PER_SEC);
+ else
+ delta_time = 0;
+
+ return seq_printf(s, "delta-time=%llu ",
+ (unsigned long long)delta_time);
+ }
+ return 0;
+}
+#else
+static inline int
+ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct)
+{
+ return 0;
+}
+#endif
+
/* return 0 on success, 1 in case of error */
static int ct_seq_show(struct seq_file *s, void *v)
{
@@ -200,6 +236,9 @@ static int ct_seq_show(struct seq_file *s, void *v)
goto release;
#endif
+ if (ct_show_delta_time(s, ct))
+ goto release;
+
if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
goto release;
diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
new file mode 100644
index 000000000000..af7dd31af0a1
--- /dev/null
+++ b/net/netfilter/nf_conntrack_timestamp.c
@@ -0,0 +1,120 @@
+/*
+ * (C) 2010 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation (or any later at your option).
+ */
+
+#include <linux/netfilter.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
+
+static int nf_ct_tstamp __read_mostly;
+
+module_param_named(tstamp, nf_ct_tstamp, bool, 0644);
+MODULE_PARM_DESC(tstamp, "Enable connection tracking flow timestamping.");
+
+#ifdef CONFIG_SYSCTL
+static struct ctl_table tstamp_sysctl_table[] = {
+ {
+ .procname = "nf_conntrack_timestamp",
+ .data = &init_net.ct.sysctl_tstamp,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {}
+};
+#endif /* CONFIG_SYSCTL */
+
+static struct nf_ct_ext_type tstamp_extend __read_mostly = {
+ .len = sizeof(struct nf_conn_tstamp),
+ .align = __alignof__(struct nf_conn_tstamp),
+ .id = NF_CT_EXT_TSTAMP,
+};
+
+#ifdef CONFIG_SYSCTL
+static int nf_conntrack_tstamp_init_sysctl(struct net *net)
+{
+ struct ctl_table *table;
+
+ table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
+ GFP_KERNEL);
+ if (!table)
+ goto out;
+
+ table[0].data = &net->ct.sysctl_tstamp;
+
+ net->ct.tstamp_sysctl_header = register_net_sysctl_table(net,
+ nf_net_netfilter_sysctl_path, table);
+ if (!net->ct.tstamp_sysctl_header) {
+ printk(KERN_ERR "nf_ct_tstamp: can't register to sysctl.\n");
+ goto out_register;
+ }
+ return 0;
+
+out_register:
+ kfree(table);
+out:
+ return -ENOMEM;
+}
+
+static void nf_conntrack_tstamp_fini_sysctl(struct net *net)
+{
+ struct ctl_table *table;
+
+ table = net->ct.tstamp_sysctl_header->ctl_table_arg;
+ unregister_net_sysctl_table(net->ct.tstamp_sysctl_header);
+ kfree(table);
+}
+#else
+static int nf_conntrack_tstamp_init_sysctl(struct net *net)
+{
+ return 0;
+}
+
+static void nf_conntrack_tstamp_fini_sysctl(struct net *net)
+{
+}
+#endif
+
+int nf_conntrack_tstamp_init(struct net *net)
+{
+ int ret;
+
+ net->ct.sysctl_tstamp = nf_ct_tstamp;
+
+ if (net_eq(net, &init_net)) {
+ ret = nf_ct_extend_register(&tstamp_extend);
+ if (ret < 0) {
+ printk(KERN_ERR "nf_ct_tstamp: Unable to register "
+ "extension\n");
+ goto out_extend_register;
+ }
+ }
+
+ ret = nf_conntrack_tstamp_init_sysctl(net);
+ if (ret < 0)
+ goto out_sysctl;
+
+ return 0;
+
+out_sysctl:
+ if (net_eq(net, &init_net))
+ nf_ct_extend_unregister(&tstamp_extend);
+out_extend_register:
+ return ret;
+}
+
+void nf_conntrack_tstamp_fini(struct net *net)
+{
+ nf_conntrack_tstamp_fini_sysctl(net);
+ if (net_eq(net, &init_net))
+ nf_ct_extend_unregister(&tstamp_extend);
+}
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index b07393eab88e..20714edf6cd2 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -85,6 +85,8 @@ EXPORT_SYMBOL(nf_log_unregister);
int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger)
{
+ if (pf >= ARRAY_SIZE(nf_loggers))
+ return -EINVAL;
mutex_lock(&nf_log_mutex);
if (__find_logger(pf, logger->name) == NULL) {
mutex_unlock(&nf_log_mutex);
@@ -98,6 +100,8 @@ EXPORT_SYMBOL(nf_log_bind_pf);
void nf_log_unbind_pf(u_int8_t pf)
{
+ if (pf >= ARRAY_SIZE(nf_loggers))
+ return;
mutex_lock(&nf_log_mutex);
rcu_assign_pointer(nf_loggers[pf], NULL);
mutex_unlock(&nf_log_mutex);
@@ -161,7 +165,8 @@ static int seq_show(struct seq_file *s, void *v)
struct nf_logger *t;
int ret;
- logger = nf_loggers[*pos];
+ logger = rcu_dereference_protected(nf_loggers[*pos],
+ lockdep_is_held(&nf_log_mutex));
if (!logger)
ret = seq_printf(s, "%2lld NONE (", *pos);
@@ -249,7 +254,8 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
mutex_unlock(&nf_log_mutex);
} else {
mutex_lock(&nf_log_mutex);
- logger = nf_loggers[tindex];
+ logger = rcu_dereference_protected(nf_loggers[tindex],
+ lockdep_is_held(&nf_log_mutex));
if (!logger)
table->data = "NONE";
else
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 74aebed5bd28..5ab22e2bbd7d 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -27,14 +27,17 @@ static DEFINE_MUTEX(queue_handler_mutex);
int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
{
int ret;
+ const struct nf_queue_handler *old;
if (pf >= ARRAY_SIZE(queue_handler))
return -EINVAL;
mutex_lock(&queue_handler_mutex);
- if (queue_handler[pf] == qh)
+ old = rcu_dereference_protected(queue_handler[pf],
+ lockdep_is_held(&queue_handler_mutex));
+ if (old == qh)
ret = -EEXIST;
- else if (queue_handler[pf])
+ else if (old)
ret = -EBUSY;
else {
rcu_assign_pointer(queue_handler[pf], qh);
@@ -49,11 +52,15 @@ EXPORT_SYMBOL(nf_register_queue_handler);
/* The caller must flush their queue before this */
int nf_unregister_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
{
+ const struct nf_queue_handler *old;
+
if (pf >= ARRAY_SIZE(queue_handler))
return -EINVAL;
mutex_lock(&queue_handler_mutex);
- if (queue_handler[pf] && queue_handler[pf] != qh) {
+ old = rcu_dereference_protected(queue_handler[pf],
+ lockdep_is_held(&queue_handler_mutex));
+ if (old && old != qh) {
mutex_unlock(&queue_handler_mutex);
return -EINVAL;
}
@@ -73,7 +80,10 @@ void nf_unregister_queue_handlers(const struct nf_queue_handler *qh)
mutex_lock(&queue_handler_mutex);
for (pf = 0; pf < ARRAY_SIZE(queue_handler); pf++) {
- if (queue_handler[pf] == qh)
+ if (rcu_dereference_protected(
+ queue_handler[pf],
+ lockdep_is_held(&queue_handler_mutex)
+ ) == qh)
rcu_assign_pointer(queue_handler[pf], NULL);
}
mutex_unlock(&queue_handler_mutex);
@@ -115,7 +125,7 @@ static int __nf_queue(struct sk_buff *skb,
int (*okfn)(struct sk_buff *),
unsigned int queuenum)
{
- int status;
+ int status = -ENOENT;
struct nf_queue_entry *entry = NULL;
#ifdef CONFIG_BRIDGE_NETFILTER
struct net_device *physindev;
@@ -128,16 +138,20 @@ static int __nf_queue(struct sk_buff *skb,
rcu_read_lock();
qh = rcu_dereference(queue_handler[pf]);
- if (!qh)
+ if (!qh) {
+ status = -ESRCH;
goto err_unlock;
+ }
afinfo = nf_get_afinfo(pf);
if (!afinfo)
goto err_unlock;
entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
- if (!entry)
+ if (!entry) {
+ status = -ENOMEM;
goto err_unlock;
+ }
*entry = (struct nf_queue_entry) {
.skb = skb,
@@ -151,11 +165,9 @@ static int __nf_queue(struct sk_buff *skb,
/* If it's going away, ignore hook. */
if (!try_module_get(entry->elem->owner)) {
- rcu_read_unlock();
- kfree(entry);
- return 0;
+ status = -ECANCELED;
+ goto err_unlock;
}
-
/* Bump dev refs so they don't vanish while packet is out */
if (indev)
dev_hold(indev);
@@ -182,14 +194,13 @@ static int __nf_queue(struct sk_buff *skb,
goto err;
}
- return 1;
+ return 0;
err_unlock:
rcu_read_unlock();
err:
- kfree_skb(skb);
kfree(entry);
- return 1;
+ return status;
}
int nf_queue(struct sk_buff *skb,
@@ -201,6 +212,8 @@ int nf_queue(struct sk_buff *skb,
unsigned int queuenum)
{
struct sk_buff *segs;
+ int err;
+ unsigned int queued;
if (!skb_is_gso(skb))
return __nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
@@ -216,20 +229,35 @@ int nf_queue(struct sk_buff *skb,
}
segs = skb_gso_segment(skb, 0);
- kfree_skb(skb);
+ /* Does not use PTR_ERR to limit the number of error codes that can be
+ * returned by nf_queue. For instance, callers rely on -ECANCELED to mean
+ * 'ignore this hook'.
+ */
if (IS_ERR(segs))
- return 1;
+ return -EINVAL;
+ queued = 0;
+ err = 0;
do {
struct sk_buff *nskb = segs->next;
segs->next = NULL;
- if (!__nf_queue(segs, elem, pf, hook, indev, outdev, okfn,
- queuenum))
+ if (err == 0)
+ err = __nf_queue(segs, elem, pf, hook, indev,
+ outdev, okfn, queuenum);
+ if (err == 0)
+ queued++;
+ else
kfree_skb(segs);
segs = nskb;
} while (segs);
- return 1;
+
+ /* also free orig skb if only some segments were queued */
+ if (unlikely(err && queued))
+ err = 0;
+ if (err == 0)
+ kfree_skb(skb);
+ return err;
}
void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
@@ -237,6 +265,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
struct sk_buff *skb = entry->skb;
struct list_head *elem = &entry->elem->list;
const struct nf_afinfo *afinfo;
+ int err;
rcu_read_lock();
@@ -270,10 +299,17 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
local_bh_enable();
break;
case NF_QUEUE:
- if (!__nf_queue(skb, elem, entry->pf, entry->hook,
- entry->indev, entry->outdev, entry->okfn,
- verdict >> NF_VERDICT_BITS))
- goto next_hook;
+ err = __nf_queue(skb, elem, entry->pf, entry->hook,
+ entry->indev, entry->outdev, entry->okfn,
+ verdict >> NF_VERDICT_QBITS);
+ if (err < 0) {
+ if (err == -ECANCELED)
+ goto next_hook;
+ if (err == -ESRCH &&
+ (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
+ goto next_hook;
+ kfree_skb(skb);
+ }
break;
case NF_STOLEN:
default:
diff --git a/net/netfilter/nf_tproxy_core.c b/net/netfilter/nf_tproxy_core.c
index 4d87befb04c0..474d621cbc2e 100644
--- a/net/netfilter/nf_tproxy_core.c
+++ b/net/netfilter/nf_tproxy_core.c
@@ -28,26 +28,23 @@ nf_tproxy_destructor(struct sk_buff *skb)
skb->destructor = NULL;
if (sk)
- nf_tproxy_put_sock(sk);
+ sock_put(sk);
}
/* consumes sk */
-int
+void
nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
{
- bool transparent = (sk->sk_state == TCP_TIME_WAIT) ?
- inet_twsk(sk)->tw_transparent :
- inet_sk(sk)->transparent;
-
- if (transparent) {
- skb_orphan(skb);
- skb->sk = sk;
- skb->destructor = nf_tproxy_destructor;
- return 1;
- } else
- nf_tproxy_put_sock(sk);
-
- return 0;
+ /* assigning tw sockets complicates things; most
+ * skb->sk->X checks would have to test sk->sk_state first */
+ if (sk->sk_state == TCP_TIME_WAIT) {
+ inet_twsk_put(inet_twsk(sk));
+ return;
+ }
+
+ skb_orphan(skb);
+ skb->sk = sk;
+ skb->destructor = nf_tproxy_destructor;
}
EXPORT_SYMBOL_GPL(nf_tproxy_assign_sock);
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 6a1572b0ab41..985e9b76c916 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -376,7 +376,6 @@ __build_packet_message(struct nfulnl_instance *inst,
unsigned int hooknum,
const struct net_device *indev,
const struct net_device *outdev,
- const struct nf_loginfo *li,
const char *prefix, unsigned int plen)
{
struct nfulnl_msg_packet_hdr pmsg;
@@ -652,7 +651,7 @@ nfulnl_log_packet(u_int8_t pf,
inst->qlen++;
__build_packet_message(inst, skb, data_len, pf,
- hooknum, in, out, li, prefix, plen);
+ hooknum, in, out, prefix, plen);
if (inst->qlen >= qthreshold)
__nfulnl_flush(inst);
@@ -874,19 +873,19 @@ static struct hlist_node *get_first(struct iter_state *st)
for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
if (!hlist_empty(&instance_table[st->bucket]))
- return rcu_dereference_bh(instance_table[st->bucket].first);
+ return rcu_dereference_bh(hlist_first_rcu(&instance_table[st->bucket]));
}
return NULL;
}
static struct hlist_node *get_next(struct iter_state *st, struct hlist_node *h)
{
- h = rcu_dereference_bh(h->next);
+ h = rcu_dereference_bh(hlist_next_rcu(h));
while (!h) {
if (++st->bucket >= INSTANCE_BUCKETS)
return NULL;
- h = rcu_dereference_bh(instance_table[st->bucket].first);
+ h = rcu_dereference_bh(hlist_first_rcu(&instance_table[st->bucket]));
}
return h;
}
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 68e67d19724d..b83123f12b42 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -387,25 +387,31 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
{
struct sk_buff *nskb;
struct nfqnl_instance *queue;
- int err;
+ int err = -ENOBUFS;
/* rcu_read_lock()ed by nf_hook_slow() */
queue = instance_lookup(queuenum);
- if (!queue)
+ if (!queue) {
+ err = -ESRCH;
goto err_out;
+ }
- if (queue->copy_mode == NFQNL_COPY_NONE)
+ if (queue->copy_mode == NFQNL_COPY_NONE) {
+ err = -EINVAL;
goto err_out;
+ }
nskb = nfqnl_build_packet_message(queue, entry);
- if (nskb == NULL)
+ if (nskb == NULL) {
+ err = -ENOMEM;
goto err_out;
-
+ }
spin_lock_bh(&queue->lock);
- if (!queue->peer_pid)
+ if (!queue->peer_pid) {
+ err = -EINVAL;
goto err_out_free_nskb;
-
+ }
if (queue->queue_total >= queue->queue_maxlen) {
queue->queue_dropped++;
if (net_ratelimit())
@@ -432,7 +438,7 @@ err_out_free_nskb:
err_out_unlock:
spin_unlock_bh(&queue->lock);
err_out:
- return -1;
+ return err;
}
static int
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index c94237631077..0a77d2ff2154 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -23,6 +23,7 @@
#include <linux/mutex.h>
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/audit.h>
#include <net/net_namespace.h>
#include <linux/netfilter/x_tables.h>
@@ -38,9 +39,8 @@ MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
struct compat_delta {
- struct compat_delta *next;
- unsigned int offset;
- int delta;
+ unsigned int offset; /* offset in kernel */
+ int delta; /* delta in 32bit user land */
};
struct xt_af {
@@ -49,7 +49,9 @@ struct xt_af {
struct list_head target;
#ifdef CONFIG_COMPAT
struct mutex compat_mutex;
- struct compat_delta *compat_offsets;
+ struct compat_delta *compat_tab;
+ unsigned int number; /* number of slots in compat_tab[] */
+ unsigned int cur; /* number of used slots in compat_tab[] */
#endif
};
@@ -414,54 +416,67 @@ int xt_check_match(struct xt_mtchk_param *par,
EXPORT_SYMBOL_GPL(xt_check_match);
#ifdef CONFIG_COMPAT
-int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta)
+int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
{
- struct compat_delta *tmp;
+ struct xt_af *xp = &xt[af];
- tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
+ if (!xp->compat_tab) {
+ if (!xp->number)
+ return -EINVAL;
+ xp->compat_tab = vmalloc(sizeof(struct compat_delta) * xp->number);
+ if (!xp->compat_tab)
+ return -ENOMEM;
+ xp->cur = 0;
+ }
- tmp->offset = offset;
- tmp->delta = delta;
+ if (xp->cur >= xp->number)
+ return -EINVAL;
- if (xt[af].compat_offsets) {
- tmp->next = xt[af].compat_offsets->next;
- xt[af].compat_offsets->next = tmp;
- } else {
- xt[af].compat_offsets = tmp;
- tmp->next = NULL;
- }
+ if (xp->cur)
+ delta += xp->compat_tab[xp->cur - 1].delta;
+ xp->compat_tab[xp->cur].offset = offset;
+ xp->compat_tab[xp->cur].delta = delta;
+ xp->cur++;
return 0;
}
EXPORT_SYMBOL_GPL(xt_compat_add_offset);
void xt_compat_flush_offsets(u_int8_t af)
{
- struct compat_delta *tmp, *next;
-
- if (xt[af].compat_offsets) {
- for (tmp = xt[af].compat_offsets; tmp; tmp = next) {
- next = tmp->next;
- kfree(tmp);
- }
- xt[af].compat_offsets = NULL;
+ if (xt[af].compat_tab) {
+ vfree(xt[af].compat_tab);
+ xt[af].compat_tab = NULL;
+ xt[af].number = 0;
}
}
EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
{
- struct compat_delta *tmp;
- int delta;
-
- for (tmp = xt[af].compat_offsets, delta = 0; tmp; tmp = tmp->next)
- if (tmp->offset < offset)
- delta += tmp->delta;
- return delta;
+ struct compat_delta *tmp = xt[af].compat_tab;
+ int mid, left = 0, right = xt[af].cur - 1;
+
+ while (left <= right) {
+ mid = (left + right) >> 1;
+ if (offset > tmp[mid].offset)
+ left = mid + 1;
+ else if (offset < tmp[mid].offset)
+ right = mid - 1;
+ else
+ return mid ? tmp[mid - 1].delta : 0;
+ }
+ WARN_ON_ONCE(1);
+ return 0;
}
EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
+void xt_compat_init_offsets(u_int8_t af, unsigned int number)
+{
+ xt[af].number = number;
+ xt[af].cur = 0;
+}
+EXPORT_SYMBOL(xt_compat_init_offsets);
+
int xt_compat_match_offset(const struct xt_match *match)
{
u_int16_t csize = match->compatsize ? : match->matchsize;
@@ -820,6 +835,21 @@ xt_replace_table(struct xt_table *table,
*/
local_bh_enable();
+#ifdef CONFIG_AUDIT
+ if (audit_enabled) {
+ struct audit_buffer *ab;
+
+ ab = audit_log_start(current->audit_context, GFP_KERNEL,
+ AUDIT_NETFILTER_CFG);
+ if (ab) {
+ audit_log_format(ab, "table=%s family=%u entries=%u",
+ table->name, table->af,
+ private->number);
+ audit_log_end(ab);
+ }
+ }
+#endif
+
return private;
}
EXPORT_SYMBOL_GPL(xt_replace_table);
@@ -1338,7 +1368,7 @@ static int __init xt_init(void)
mutex_init(&xt[i].mutex);
#ifdef CONFIG_COMPAT
mutex_init(&xt[i].compat_mutex);
- xt[i].compat_offsets = NULL;
+ xt[i].compat_tab = NULL;
#endif
INIT_LIST_HEAD(&xt[i].target);
INIT_LIST_HEAD(&xt[i].match);
diff --git a/net/netfilter/xt_AUDIT.c b/net/netfilter/xt_AUDIT.c
new file mode 100644
index 000000000000..81802d27346e
--- /dev/null
+++ b/net/netfilter/xt_AUDIT.c
@@ -0,0 +1,204 @@
+/*
+ * Creates audit record for dropped/accepted packets
+ *
+ * (C) 2010-2011 Thomas Graf <tgraf@redhat.com>
+ * (C) 2010-2011 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/audit.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/if_arp.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_AUDIT.h>
+#include <net/ipv6.h>
+#include <net/ip.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Thomas Graf <tgraf@redhat.com>");
+MODULE_DESCRIPTION("Xtables: creates audit records for dropped/accepted packets");
+MODULE_ALIAS("ipt_AUDIT");
+MODULE_ALIAS("ip6t_AUDIT");
+MODULE_ALIAS("ebt_AUDIT");
+MODULE_ALIAS("arpt_AUDIT");
+
+static void audit_proto(struct audit_buffer *ab, struct sk_buff *skb,
+ unsigned int proto, unsigned int offset)
+{
+ switch (proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_UDPLITE: {
+ const __be16 *pptr;
+ __be16 _ports[2];
+
+ pptr = skb_header_pointer(skb, offset, sizeof(_ports), _ports);
+ if (pptr == NULL) {
+ audit_log_format(ab, " truncated=1");
+ return;
+ }
+
+ audit_log_format(ab, " sport=%hu dport=%hu",
+ ntohs(pptr[0]), ntohs(pptr[1]));
+ }
+ break;
+
+ case IPPROTO_ICMP:
+ case IPPROTO_ICMPV6: {
+ const u8 *iptr;
+ u8 _ih[2];
+
+ iptr = skb_header_pointer(skb, offset, sizeof(_ih), &_ih);
+ if (iptr == NULL) {
+ audit_log_format(ab, " truncated=1");
+ return;
+ }
+
+ audit_log_format(ab, " icmptype=%hhu icmpcode=%hhu",
+ iptr[0], iptr[1]);
+
+ }
+ break;
+ }
+}
+
+static void audit_ip4(struct audit_buffer *ab, struct sk_buff *skb)
+{
+ struct iphdr _iph;
+ const struct iphdr *ih;
+
+ ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
+ if (!ih) {
+ audit_log_format(ab, " truncated=1");
+ return;
+ }
+
+ audit_log_format(ab, " saddr=%pI4 daddr=%pI4 ipid=%hu proto=%hhu",
+ &ih->saddr, &ih->daddr, ntohs(ih->id), ih->protocol);
+
+ if (ntohs(ih->frag_off) & IP_OFFSET) {
+ audit_log_format(ab, " frag=1");
+ return;
+ }
+
+ audit_proto(ab, skb, ih->protocol, ih->ihl * 4);
+}
+
+static void audit_ip6(struct audit_buffer *ab, struct sk_buff *skb)
+{
+ struct ipv6hdr _ip6h;
+ const struct ipv6hdr *ih;
+ u8 nexthdr;
+ int offset;
+
+ ih = skb_header_pointer(skb, skb_network_offset(skb), sizeof(_ip6h), &_ip6h);
+ if (!ih) {
+ audit_log_format(ab, " truncated=1");
+ return;
+ }
+
+ nexthdr = ih->nexthdr;
+ offset = ipv6_skip_exthdr(skb, skb_network_offset(skb) + sizeof(_ip6h),
+ &nexthdr);
+
+ audit_log_format(ab, " saddr=%pI6c daddr=%pI6c proto=%hhu",
+ &ih->saddr, &ih->daddr, nexthdr);
+
+ if (offset)
+ audit_proto(ab, skb, nexthdr, offset);
+}
+
+static unsigned int
+audit_tg(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ const struct xt_audit_info *info = par->targinfo;
+ struct audit_buffer *ab;
+
+ ab = audit_log_start(NULL, GFP_ATOMIC, AUDIT_NETFILTER_PKT);
+ if (ab == NULL)
+ goto errout;
+
+ audit_log_format(ab, "action=%hhu hook=%u len=%u inif=%s outif=%s",
+ info->type, par->hooknum, skb->len,
+ par->in ? par->in->name : "?",
+ par->out ? par->out->name : "?");
+
+ if (skb->mark)
+ audit_log_format(ab, " mark=%#x", skb->mark);
+
+ if (skb->dev && skb->dev->type == ARPHRD_ETHER) {
+ audit_log_format(ab, " smac=%pM dmac=%pM macproto=0x%04x",
+ eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
+ ntohs(eth_hdr(skb)->h_proto));
+
+ if (par->family == NFPROTO_BRIDGE) {
+ switch (eth_hdr(skb)->h_proto) {
+ case __constant_htons(ETH_P_IP):
+ audit_ip4(ab, skb);
+ break;
+
+ case __constant_htons(ETH_P_IPV6):
+ audit_ip6(ab, skb);
+ break;
+ }
+ }
+ }
+
+ switch (par->family) {
+ case NFPROTO_IPV4:
+ audit_ip4(ab, skb);
+ break;
+
+ case NFPROTO_IPV6:
+ audit_ip6(ab, skb);
+ break;
+ }
+
+ audit_log_end(ab);
+
+errout:
+ return XT_CONTINUE;
+}
+
+static int audit_tg_check(const struct xt_tgchk_param *par)
+{
+ const struct xt_audit_info *info = par->targinfo;
+
+ if (info->type > XT_AUDIT_TYPE_MAX) {
+ pr_info("Audit type out of range (valid range: 0..%hhu)\n",
+ XT_AUDIT_TYPE_MAX);
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static struct xt_target audit_tg_reg __read_mostly = {
+ .name = "AUDIT",
+ .family = NFPROTO_UNSPEC,
+ .target = audit_tg,
+ .targetsize = sizeof(struct xt_audit_info),
+ .checkentry = audit_tg_check,
+ .me = THIS_MODULE,
+};
+
+static int __init audit_tg_init(void)
+{
+ return xt_register_target(&audit_tg_reg);
+}
+
+static void __exit audit_tg_exit(void)
+{
+ xt_unregister_target(&audit_tg_reg);
+}
+
+module_init(audit_tg_init);
+module_exit(audit_tg_exit);
diff --git a/net/netfilter/xt_CLASSIFY.c b/net/netfilter/xt_CLASSIFY.c
index c2c0e4abeb99..af9c4dadf816 100644
--- a/net/netfilter/xt_CLASSIFY.c
+++ b/net/netfilter/xt_CLASSIFY.c
@@ -19,12 +19,14 @@
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_CLASSIFY.h>
+#include <linux/netfilter_arp.h>
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Xtables: Qdisc classification");
MODULE_ALIAS("ipt_CLASSIFY");
MODULE_ALIAS("ip6t_CLASSIFY");
+MODULE_ALIAS("arpt_CLASSIFY");
static unsigned int
classify_tg(struct sk_buff *skb, const struct xt_action_param *par)
@@ -35,26 +37,36 @@ classify_tg(struct sk_buff *skb, const struct xt_action_param *par)
return XT_CONTINUE;
}
-static struct xt_target classify_tg_reg __read_mostly = {
- .name = "CLASSIFY",
- .revision = 0,
- .family = NFPROTO_UNSPEC,
- .table = "mangle",
- .hooks = (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_FORWARD) |
- (1 << NF_INET_POST_ROUTING),
- .target = classify_tg,
- .targetsize = sizeof(struct xt_classify_target_info),
- .me = THIS_MODULE,
+static struct xt_target classify_tg_reg[] __read_mostly = {
+ {
+ .name = "CLASSIFY",
+ .revision = 0,
+ .family = NFPROTO_UNSPEC,
+ .hooks = (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_FORWARD) |
+ (1 << NF_INET_POST_ROUTING),
+ .target = classify_tg,
+ .targetsize = sizeof(struct xt_classify_target_info),
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "CLASSIFY",
+ .revision = 0,
+ .family = NFPROTO_ARP,
+ .hooks = (1 << NF_ARP_OUT) | (1 << NF_ARP_FORWARD),
+ .target = classify_tg,
+ .targetsize = sizeof(struct xt_classify_target_info),
+ .me = THIS_MODULE,
+ },
};
static int __init classify_tg_init(void)
{
- return xt_register_target(&classify_tg_reg);
+ return xt_register_targets(classify_tg_reg, ARRAY_SIZE(classify_tg_reg));
}
static void __exit classify_tg_exit(void)
{
- xt_unregister_target(&classify_tg_reg);
+ xt_unregister_targets(classify_tg_reg, ARRAY_SIZE(classify_tg_reg));
}
module_init(classify_tg_init);
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index be1f22e13545..3bdd443aaf15 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -313,3 +313,5 @@ MODULE_AUTHOR("Timo Teras <ext-timo.teras@nokia.com>");
MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
MODULE_DESCRIPTION("Xtables: idle time monitor");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("ipt_IDLETIMER");
+MODULE_ALIAS("ip6t_IDLETIMER");
diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c
index a4140509eea1..993de2ba89d3 100644
--- a/net/netfilter/xt_LED.c
+++ b/net/netfilter/xt_LED.c
@@ -31,6 +31,8 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Adam Nielsen <a.nielsen@shikadi.net>");
MODULE_DESCRIPTION("Xtables: trigger LED devices on packet match");
+MODULE_ALIAS("ipt_LED");
+MODULE_ALIAS("ip6t_LED");
static LIST_HEAD(xt_led_triggers);
static DEFINE_MUTEX(xt_led_mutex);
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index 039cce1bde3d..d4f4b5d66b20 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -72,18 +72,31 @@ nfqueue_tg_v1(struct sk_buff *skb, const struct xt_action_param *par)
if (info->queues_total > 1) {
if (par->family == NFPROTO_IPV4)
- queue = hash_v4(skb) % info->queues_total + queue;
+ queue = (((u64) hash_v4(skb) * info->queues_total) >>
+ 32) + queue;
#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
else if (par->family == NFPROTO_IPV6)
- queue = hash_v6(skb) % info->queues_total + queue;
+ queue = (((u64) hash_v6(skb) * info->queues_total) >>
+ 32) + queue;
#endif
}
return NF_QUEUE_NR(queue);
}
-static int nfqueue_tg_v1_check(const struct xt_tgchk_param *par)
+static unsigned int
+nfqueue_tg_v2(struct sk_buff *skb, const struct xt_action_param *par)
{
- const struct xt_NFQ_info_v1 *info = par->targinfo;
+ const struct xt_NFQ_info_v2 *info = par->targinfo;
+ unsigned int ret = nfqueue_tg_v1(skb, par);
+
+ if (info->bypass)
+ ret |= NF_VERDICT_FLAG_QUEUE_BYPASS;
+ return ret;
+}
+
+static int nfqueue_tg_check(const struct xt_tgchk_param *par)
+{
+ const struct xt_NFQ_info_v2 *info = par->targinfo;
u32 maxid;
if (unlikely(!rnd_inited)) {
@@ -100,6 +113,8 @@ static int nfqueue_tg_v1_check(const struct xt_tgchk_param *par)
info->queues_total, maxid);
return -ERANGE;
}
+ if (par->target->revision == 2 && info->bypass > 1)
+ return -EINVAL;
return 0;
}
@@ -115,11 +130,20 @@ static struct xt_target nfqueue_tg_reg[] __read_mostly = {
.name = "NFQUEUE",
.revision = 1,
.family = NFPROTO_UNSPEC,
- .checkentry = nfqueue_tg_v1_check,
+ .checkentry = nfqueue_tg_check,
.target = nfqueue_tg_v1,
.targetsize = sizeof(struct xt_NFQ_info_v1),
.me = THIS_MODULE,
},
+ {
+ .name = "NFQUEUE",
+ .revision = 2,
+ .family = NFPROTO_UNSPEC,
+ .checkentry = nfqueue_tg_check,
+ .target = nfqueue_tg_v2,
+ .targetsize = sizeof(struct xt_NFQ_info_v2),
+ .me = THIS_MODULE,
+ },
};
static int __init nfqueue_tg_init(void)
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 640678f47a2a..dcfd57eb9d02 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -33,6 +33,20 @@
#include <net/netfilter/nf_tproxy_core.h>
#include <linux/netfilter/xt_TPROXY.h>
+static bool tproxy_sk_is_transparent(struct sock *sk)
+{
+ if (sk->sk_state != TCP_TIME_WAIT) {
+ if (inet_sk(sk)->transparent)
+ return true;
+ sock_put(sk);
+ } else {
+ if (inet_twsk(sk)->tw_transparent)
+ return true;
+ inet_twsk_put(inet_twsk(sk));
+ }
+ return false;
+}
+
static inline __be32
tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
{
@@ -141,7 +155,7 @@ tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport,
skb->dev, NFT_LOOKUP_LISTENER);
/* NOTE: assign_sock consumes our sk reference */
- if (sk && nf_tproxy_assign_sock(skb, sk)) {
+ if (sk && tproxy_sk_is_transparent(sk)) {
/* This should be in a separate target, but we don't do multiple
targets on the same rule yet */
skb->mark = (skb->mark & ~mark_mask) ^ mark_value;
@@ -149,6 +163,8 @@ tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport,
pr_debug("redirecting: proto %hhu %pI4:%hu -> %pI4:%hu, mark: %x\n",
iph->protocol, &iph->daddr, ntohs(hp->dest),
&laddr, ntohs(lport), skb->mark);
+
+ nf_tproxy_assign_sock(skb, sk);
return NF_ACCEPT;
}
@@ -306,7 +322,7 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par)
par->in, NFT_LOOKUP_LISTENER);
/* NOTE: assign_sock consumes our sk reference */
- if (sk && nf_tproxy_assign_sock(skb, sk)) {
+ if (sk && tproxy_sk_is_transparent(sk)) {
/* This should be in a separate target, but we don't do multiple
targets on the same rule yet */
skb->mark = (skb->mark & ~tgi->mark_mask) ^ tgi->mark_value;
@@ -314,6 +330,8 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par)
pr_debug("redirecting: proto %hhu %pI6:%hu -> %pI6:%hu, mark: %x\n",
tproto, &iph->saddr, ntohs(hp->source),
laddr, ntohs(lport), skb->mark);
+
+ nf_tproxy_assign_sock(skb, sk);
return NF_ACCEPT;
}
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 5c5b6b921b84..e029c4807404 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -185,18 +185,24 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
int connections;
ct = nf_ct_get(skb, &ctinfo);
- if (ct != NULL)
- tuple_ptr = &ct->tuplehash[0].tuple;
- else if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
- par->family, &tuple))
+ if (ct != NULL) {
+ if (info->flags & XT_CONNLIMIT_DADDR)
+ tuple_ptr = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+ else
+ tuple_ptr = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+ } else if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
+ par->family, &tuple)) {
goto hotdrop;
+ }
if (par->family == NFPROTO_IPV6) {
const struct ipv6hdr *iph = ipv6_hdr(skb);
- memcpy(&addr.ip6, &iph->saddr, sizeof(iph->saddr));
+ memcpy(&addr.ip6, (info->flags & XT_CONNLIMIT_DADDR) ?
+ &iph->daddr : &iph->saddr, sizeof(addr.ip6));
} else {
const struct iphdr *iph = ip_hdr(skb);
- addr.ip = iph->saddr;
+ addr.ip = (info->flags & XT_CONNLIMIT_DADDR) ?
+ iph->daddr : iph->saddr;
}
spin_lock_bh(&info->data->lock);
@@ -204,13 +210,12 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
&info->mask, par->family);
spin_unlock_bh(&info->data->lock);
- if (connections < 0) {
+ if (connections < 0)
/* kmalloc failed, drop it entirely */
- par->hotdrop = true;
- return false;
- }
+ goto hotdrop;
- return (connections > info->limit) ^ info->inverse;
+ return (connections > info->limit) ^
+ !!(info->flags & XT_CONNLIMIT_INVERT);
hotdrop:
par->hotdrop = true;
@@ -268,25 +273,38 @@ static void connlimit_mt_destroy(const struct xt_mtdtor_param *par)
kfree(info->data);
}
-static struct xt_match connlimit_mt_reg __read_mostly = {
- .name = "connlimit",
- .revision = 0,
- .family = NFPROTO_UNSPEC,
- .checkentry = connlimit_mt_check,
- .match = connlimit_mt,
- .matchsize = sizeof(struct xt_connlimit_info),
- .destroy = connlimit_mt_destroy,
- .me = THIS_MODULE,
+static struct xt_match connlimit_mt_reg[] __read_mostly = {
+ {
+ .name = "connlimit",
+ .revision = 0,
+ .family = NFPROTO_UNSPEC,
+ .checkentry = connlimit_mt_check,
+ .match = connlimit_mt,
+ .matchsize = sizeof(struct xt_connlimit_info),
+ .destroy = connlimit_mt_destroy,
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "connlimit",
+ .revision = 1,
+ .family = NFPROTO_UNSPEC,
+ .checkentry = connlimit_mt_check,
+ .match = connlimit_mt,
+ .matchsize = sizeof(struct xt_connlimit_info),
+ .destroy = connlimit_mt_destroy,
+ .me = THIS_MODULE,
+ },
};
static int __init connlimit_mt_init(void)
{
- return xt_register_match(&connlimit_mt_reg);
+ return xt_register_matches(connlimit_mt_reg,
+ ARRAY_SIZE(connlimit_mt_reg));
}
static void __exit connlimit_mt_exit(void)
{
- xt_unregister_match(&connlimit_mt_reg);
+ xt_unregister_matches(connlimit_mt_reg, ARRAY_SIZE(connlimit_mt_reg));
}
module_init(connlimit_mt_init);
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c
index e536710ad916..2c0086a4751e 100644
--- a/net/netfilter/xt_conntrack.c
+++ b/net/netfilter/xt_conntrack.c
@@ -112,6 +112,54 @@ ct_proto_port_check(const struct xt_conntrack_mtinfo2 *info,
return true;
}
+static inline bool
+port_match(u16 min, u16 max, u16 port, bool invert)
+{
+ return (port >= min && port <= max) ^ invert;
+}
+
+static inline bool
+ct_proto_port_check_v3(const struct xt_conntrack_mtinfo3 *info,
+ const struct nf_conn *ct)
+{
+ const struct nf_conntrack_tuple *tuple;
+
+ tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+ if ((info->match_flags & XT_CONNTRACK_PROTO) &&
+ (nf_ct_protonum(ct) == info->l4proto) ^
+ !(info->invert_flags & XT_CONNTRACK_PROTO))
+ return false;
+
+ /* Shortcut to match all recognized protocols by using ->src.all. */
+ if ((info->match_flags & XT_CONNTRACK_ORIGSRC_PORT) &&
+ !port_match(info->origsrc_port, info->origsrc_port_high,
+ ntohs(tuple->src.u.all),
+ info->invert_flags & XT_CONNTRACK_ORIGSRC_PORT))
+ return false;
+
+ if ((info->match_flags & XT_CONNTRACK_ORIGDST_PORT) &&
+ !port_match(info->origdst_port, info->origdst_port_high,
+ ntohs(tuple->dst.u.all),
+ info->invert_flags & XT_CONNTRACK_ORIGDST_PORT))
+ return false;
+
+ tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+
+ if ((info->match_flags & XT_CONNTRACK_REPLSRC_PORT) &&
+ !port_match(info->replsrc_port, info->replsrc_port_high,
+ ntohs(tuple->src.u.all),
+ info->invert_flags & XT_CONNTRACK_REPLSRC_PORT))
+ return false;
+
+ if ((info->match_flags & XT_CONNTRACK_REPLDST_PORT) &&
+ !port_match(info->repldst_port, info->repldst_port_high,
+ ntohs(tuple->dst.u.all),
+ info->invert_flags & XT_CONNTRACK_REPLDST_PORT))
+ return false;
+
+ return true;
+}
+
static bool
conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par,
u16 state_mask, u16 status_mask)
@@ -170,8 +218,13 @@ conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par,
!(info->invert_flags & XT_CONNTRACK_REPLDST))
return false;
- if (!ct_proto_port_check(info, ct))
- return false;
+ if (par->match->revision != 3) {
+ if (!ct_proto_port_check(info, ct))
+ return false;
+ } else {
+ if (!ct_proto_port_check_v3(par->matchinfo, ct))
+ return false;
+ }
if ((info->match_flags & XT_CONNTRACK_STATUS) &&
(!!(status_mask & ct->status) ^
@@ -207,10 +260,23 @@ conntrack_mt_v2(const struct sk_buff *skb, struct xt_action_param *par)
return conntrack_mt(skb, par, info->state_mask, info->status_mask);
}
+static bool
+conntrack_mt_v3(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct xt_conntrack_mtinfo3 *info = par->matchinfo;
+
+ return conntrack_mt(skb, par, info->state_mask, info->status_mask);
+}
+
static int conntrack_mt_check(const struct xt_mtchk_param *par)
{
int ret;
+ if (strcmp(par->table, "raw") == 0) {
+ pr_info("state is undetermined at the time of raw table\n");
+ return -EINVAL;
+ }
+
ret = nf_ct_l3proto_try_module_get(par->family);
if (ret < 0)
pr_info("cannot load conntrack support for proto=%u\n",
@@ -244,6 +310,16 @@ static struct xt_match conntrack_mt_reg[] __read_mostly = {
.destroy = conntrack_mt_destroy,
.me = THIS_MODULE,
},
+ {
+ .name = "conntrack",
+ .revision = 3,
+ .family = NFPROTO_UNSPEC,
+ .matchsize = sizeof(struct xt_conntrack_mtinfo3),
+ .match = conntrack_mt_v3,
+ .checkentry = conntrack_mt_check,
+ .destroy = conntrack_mt_destroy,
+ .me = THIS_MODULE,
+ },
};
static int __init conntrack_mt_init(void)
diff --git a/net/netfilter/xt_cpu.c b/net/netfilter/xt_cpu.c
index b39db8a5cbae..c7a2e5466bc4 100644
--- a/net/netfilter/xt_cpu.c
+++ b/net/netfilter/xt_cpu.c
@@ -22,6 +22,8 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Eric Dumazet <eric.dumazet@gmail.com>");
MODULE_DESCRIPTION("Xtables: CPU match");
+MODULE_ALIAS("ipt_cpu");
+MODULE_ALIAS("ip6t_cpu");
static int cpu_mt_check(const struct xt_mtchk_param *par)
{
diff --git a/net/netfilter/xt_devgroup.c b/net/netfilter/xt_devgroup.c
new file mode 100644
index 000000000000..d9202cdd25c9
--- /dev/null
+++ b/net/netfilter/xt_devgroup.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+#include <linux/netfilter/xt_devgroup.h>
+#include <linux/netfilter/x_tables.h>
+
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Xtables: Device group match");
+MODULE_ALIAS("ipt_devgroup");
+MODULE_ALIAS("ip6t_devgroup");
+
+static bool devgroup_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct xt_devgroup_info *info = par->matchinfo;
+
+ if (info->flags & XT_DEVGROUP_MATCH_SRC &&
+ (((info->src_group ^ par->in->group) & info->src_mask ? 1 : 0) ^
+ ((info->flags & XT_DEVGROUP_INVERT_SRC) ? 1 : 0)))
+ return false;
+
+ if (info->flags & XT_DEVGROUP_MATCH_DST &&
+ (((info->dst_group ^ par->out->group) & info->dst_mask ? 1 : 0) ^
+ ((info->flags & XT_DEVGROUP_INVERT_DST) ? 1 : 0)))
+ return false;
+
+ return true;
+}
+
+static int devgroup_mt_checkentry(const struct xt_mtchk_param *par)
+{
+ const struct xt_devgroup_info *info = par->matchinfo;
+
+ if (info->flags & ~(XT_DEVGROUP_MATCH_SRC | XT_DEVGROUP_INVERT_SRC |
+ XT_DEVGROUP_MATCH_DST | XT_DEVGROUP_INVERT_DST))
+ return -EINVAL;
+
+ if (info->flags & XT_DEVGROUP_MATCH_SRC &&
+ par->hook_mask & ~((1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_LOCAL_IN) |
+ (1 << NF_INET_FORWARD)))
+ return -EINVAL;
+
+ if (info->flags & XT_DEVGROUP_MATCH_DST &&
+ par->hook_mask & ~((1 << NF_INET_FORWARD) |
+ (1 << NF_INET_LOCAL_OUT) |
+ (1 << NF_INET_POST_ROUTING)))
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct xt_match devgroup_mt_reg __read_mostly = {
+ .name = "devgroup",
+ .match = devgroup_mt,
+ .checkentry = devgroup_mt_checkentry,
+ .matchsize = sizeof(struct xt_devgroup_info),
+ .family = NFPROTO_UNSPEC,
+ .me = THIS_MODULE
+};
+
+static int __init devgroup_mt_init(void)
+{
+ return xt_register_match(&devgroup_mt_reg);
+}
+
+static void __exit devgroup_mt_exit(void)
+{
+ xt_unregister_match(&devgroup_mt_reg);
+}
+
+module_init(devgroup_mt_init);
+module_exit(devgroup_mt_exit);
diff --git a/net/netfilter/xt_iprange.c b/net/netfilter/xt_iprange.c
index 73c33a42f87f..b46626cddd93 100644
--- a/net/netfilter/xt_iprange.c
+++ b/net/netfilter/xt_iprange.c
@@ -31,7 +31,7 @@ iprange_mt4(const struct sk_buff *skb, struct xt_action_param *par)
pr_debug("src IP %pI4 NOT in range %s%pI4-%pI4\n",
&iph->saddr,
(info->flags & IPRANGE_SRC_INV) ? "(INV) " : "",
- &info->src_max.ip,
+ &info->src_min.ip,
&info->src_max.ip);
return false;
}
@@ -76,15 +76,27 @@ iprange_mt6(const struct sk_buff *skb, struct xt_action_param *par)
m = iprange_ipv6_lt(&iph->saddr, &info->src_min.in6);
m |= iprange_ipv6_lt(&info->src_max.in6, &iph->saddr);
m ^= !!(info->flags & IPRANGE_SRC_INV);
- if (m)
+ if (m) {
+ pr_debug("src IP %pI6 NOT in range %s%pI6-%pI6\n",
+ &iph->saddr,
+ (info->flags & IPRANGE_SRC_INV) ? "(INV) " : "",
+ &info->src_min.in6,
+ &info->src_max.in6);
return false;
+ }
}
if (info->flags & IPRANGE_DST) {
m = iprange_ipv6_lt(&iph->daddr, &info->dst_min.in6);
m |= iprange_ipv6_lt(&info->dst_max.in6, &iph->daddr);
m ^= !!(info->flags & IPRANGE_DST_INV);
- if (m)
+ if (m) {
+ pr_debug("dst IP %pI6 NOT in range %s%pI6-%pI6\n",
+ &iph->daddr,
+ (info->flags & IPRANGE_DST_INV) ? "(INV) " : "",
+ &info->dst_min.in6,
+ &info->dst_max.in6);
return false;
+ }
}
return true;
}
diff --git a/net/netfilter/xt_ipvs.c b/net/netfilter/xt_ipvs.c
index 9127a3d8aa35..bb10b0717f1b 100644
--- a/net/netfilter/xt_ipvs.c
+++ b/net/netfilter/xt_ipvs.c
@@ -85,7 +85,7 @@ ipvs_mt(const struct sk_buff *skb, struct xt_action_param *par)
/*
* Check if the packet belongs to an existing entry
*/
- cp = pp->conn_out_get(family, skb, pp, &iph, iph.len, 1 /* inverse */);
+ cp = pp->conn_out_get(family, skb, &iph, iph.len, 1 /* inverse */);
if (unlikely(cp == NULL)) {
match = false;
goto out;
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
new file mode 100644
index 000000000000..061d48cec137
--- /dev/null
+++ b/net/netfilter/xt_set.c
@@ -0,0 +1,359 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Martin Josefsson <gandalf@wlug.westbo.se>
+ * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module which implements the set match and SET target
+ * for netfilter/iptables. */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/version.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_set.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("Xtables: IP set match and target module");
+MODULE_ALIAS("xt_SET");
+MODULE_ALIAS("ipt_set");
+MODULE_ALIAS("ip6t_set");
+MODULE_ALIAS("ipt_SET");
+MODULE_ALIAS("ip6t_SET");
+
+static inline int
+match_set(ip_set_id_t index, const struct sk_buff *skb,
+ u8 pf, u8 dim, u8 flags, int inv)
+{
+ if (ip_set_test(index, skb, pf, dim, flags))
+ inv = !inv;
+ return inv;
+}
+
+/* Revision 0 interface: backward compatible with netfilter/iptables */
+
+static bool
+set_match_v0(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct xt_set_info_match_v0 *info = par->matchinfo;
+
+ return match_set(info->match_set.index, skb, par->family,
+ info->match_set.u.compat.dim,
+ info->match_set.u.compat.flags,
+ info->match_set.u.compat.flags & IPSET_INV_MATCH);
+}
+
+static void
+compat_flags(struct xt_set_info_v0 *info)
+{
+ u_int8_t i;
+
+ /* Fill out compatibility data according to enum ip_set_kopt */
+ info->u.compat.dim = IPSET_DIM_ZERO;
+ if (info->u.flags[0] & IPSET_MATCH_INV)
+ info->u.compat.flags |= IPSET_INV_MATCH;
+ for (i = 0; i < IPSET_DIM_MAX-1 && info->u.flags[i]; i++) {
+ info->u.compat.dim++;
+ if (info->u.flags[i] & IPSET_SRC)
+ info->u.compat.flags |= (1<<info->u.compat.dim);
+ }
+}
+
+static int
+set_match_v0_checkentry(const struct xt_mtchk_param *par)
+{
+ struct xt_set_info_match_v0 *info = par->matchinfo;
+ ip_set_id_t index;
+
+ index = ip_set_nfnl_get_byindex(info->match_set.index);
+
+ if (index == IPSET_INVALID_ID) {
+ pr_warning("Cannot find set indentified by id %u to match\n",
+ info->match_set.index);
+ return -ENOENT;
+ }
+ if (info->match_set.u.flags[IPSET_DIM_MAX-1] != 0) {
+ pr_warning("Protocol error: set match dimension "
+ "is over the limit!\n");
+ return -ERANGE;
+ }
+
+ /* Fill out compatibility data */
+ compat_flags(&info->match_set);
+
+ return 0;
+}
+
+static void
+set_match_v0_destroy(const struct xt_mtdtor_param *par)
+{
+ struct xt_set_info_match_v0 *info = par->matchinfo;
+
+ ip_set_nfnl_put(info->match_set.index);
+}
+
+static unsigned int
+set_target_v0(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ const struct xt_set_info_target_v0 *info = par->targinfo;
+
+ if (info->add_set.index != IPSET_INVALID_ID)
+ ip_set_add(info->add_set.index, skb, par->family,
+ info->add_set.u.compat.dim,
+ info->add_set.u.compat.flags);
+ if (info->del_set.index != IPSET_INVALID_ID)
+ ip_set_del(info->del_set.index, skb, par->family,
+ info->del_set.u.compat.dim,
+ info->del_set.u.compat.flags);
+
+ return XT_CONTINUE;
+}
+
+static int
+set_target_v0_checkentry(const struct xt_tgchk_param *par)
+{
+ struct xt_set_info_target_v0 *info = par->targinfo;
+ ip_set_id_t index;
+
+ if (info->add_set.index != IPSET_INVALID_ID) {
+ index = ip_set_nfnl_get_byindex(info->add_set.index);
+ if (index == IPSET_INVALID_ID) {
+ pr_warning("Cannot find add_set index %u as target\n",
+ info->add_set.index);
+ return -ENOENT;
+ }
+ }
+
+ if (info->del_set.index != IPSET_INVALID_ID) {
+ index = ip_set_nfnl_get_byindex(info->del_set.index);
+ if (index == IPSET_INVALID_ID) {
+ pr_warning("Cannot find del_set index %u as target\n",
+ info->del_set.index);
+ return -ENOENT;
+ }
+ }
+ if (info->add_set.u.flags[IPSET_DIM_MAX-1] != 0 ||
+ info->del_set.u.flags[IPSET_DIM_MAX-1] != 0) {
+ pr_warning("Protocol error: SET target dimension "
+ "is over the limit!\n");
+ return -ERANGE;
+ }
+
+ /* Fill out compatibility data */
+ compat_flags(&info->add_set);
+ compat_flags(&info->del_set);
+
+ return 0;
+}
+
+static void
+set_target_v0_destroy(const struct xt_tgdtor_param *par)
+{
+ const struct xt_set_info_target_v0 *info = par->targinfo;
+
+ if (info->add_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(info->add_set.index);
+ if (info->del_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(info->del_set.index);
+}
+
+/* Revision 1: current interface to netfilter/iptables */
+
+static bool
+set_match(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct xt_set_info_match *info = par->matchinfo;
+
+ return match_set(info->match_set.index, skb, par->family,
+ info->match_set.dim,
+ info->match_set.flags,
+ info->match_set.flags & IPSET_INV_MATCH);
+}
+
+static int
+set_match_checkentry(const struct xt_mtchk_param *par)
+{
+ struct xt_set_info_match *info = par->matchinfo;
+ ip_set_id_t index;
+
+ index = ip_set_nfnl_get_byindex(info->match_set.index);
+
+ if (index == IPSET_INVALID_ID) {
+ pr_warning("Cannot find set indentified by id %u to match\n",
+ info->match_set.index);
+ return -ENOENT;
+ }
+ if (info->match_set.dim > IPSET_DIM_MAX) {
+ pr_warning("Protocol error: set match dimension "
+ "is over the limit!\n");
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static void
+set_match_destroy(const struct xt_mtdtor_param *par)
+{
+ struct xt_set_info_match *info = par->matchinfo;
+
+ ip_set_nfnl_put(info->match_set.index);
+}
+
+static unsigned int
+set_target(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ const struct xt_set_info_target *info = par->targinfo;
+
+ if (info->add_set.index != IPSET_INVALID_ID)
+ ip_set_add(info->add_set.index,
+ skb, par->family,
+ info->add_set.dim,
+ info->add_set.flags);
+ if (info->del_set.index != IPSET_INVALID_ID)
+ ip_set_del(info->del_set.index,
+ skb, par->family,
+ info->add_set.dim,
+ info->del_set.flags);
+
+ return XT_CONTINUE;
+}
+
+static int
+set_target_checkentry(const struct xt_tgchk_param *par)
+{
+ const struct xt_set_info_target *info = par->targinfo;
+ ip_set_id_t index;
+
+ if (info->add_set.index != IPSET_INVALID_ID) {
+ index = ip_set_nfnl_get_byindex(info->add_set.index);
+ if (index == IPSET_INVALID_ID) {
+ pr_warning("Cannot find add_set index %u as target\n",
+ info->add_set.index);
+ return -ENOENT;
+ }
+ }
+
+ if (info->del_set.index != IPSET_INVALID_ID) {
+ index = ip_set_nfnl_get_byindex(info->del_set.index);
+ if (index == IPSET_INVALID_ID) {
+ pr_warning("Cannot find del_set index %u as target\n",
+ info->del_set.index);
+ return -ENOENT;
+ }
+ }
+ if (info->add_set.dim > IPSET_DIM_MAX ||
+ info->del_set.flags > IPSET_DIM_MAX) {
+ pr_warning("Protocol error: SET target dimension "
+ "is over the limit!\n");
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static void
+set_target_destroy(const struct xt_tgdtor_param *par)
+{
+ const struct xt_set_info_target *info = par->targinfo;
+
+ if (info->add_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(info->add_set.index);
+ if (info->del_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(info->del_set.index);
+}
+
+static struct xt_match set_matches[] __read_mostly = {
+ {
+ .name = "set",
+ .family = NFPROTO_IPV4,
+ .revision = 0,
+ .match = set_match_v0,
+ .matchsize = sizeof(struct xt_set_info_match_v0),
+ .checkentry = set_match_v0_checkentry,
+ .destroy = set_match_v0_destroy,
+ .me = THIS_MODULE
+ },
+ {
+ .name = "set",
+ .family = NFPROTO_IPV4,
+ .revision = 1,
+ .match = set_match,
+ .matchsize = sizeof(struct xt_set_info_match),
+ .checkentry = set_match_checkentry,
+ .destroy = set_match_destroy,
+ .me = THIS_MODULE
+ },
+ {
+ .name = "set",
+ .family = NFPROTO_IPV6,
+ .revision = 1,
+ .match = set_match,
+ .matchsize = sizeof(struct xt_set_info_match),
+ .checkentry = set_match_checkentry,
+ .destroy = set_match_destroy,
+ .me = THIS_MODULE
+ },
+};
+
+static struct xt_target set_targets[] __read_mostly = {
+ {
+ .name = "SET",
+ .revision = 0,
+ .family = NFPROTO_IPV4,
+ .target = set_target_v0,
+ .targetsize = sizeof(struct xt_set_info_target_v0),
+ .checkentry = set_target_v0_checkentry,
+ .destroy = set_target_v0_destroy,
+ .me = THIS_MODULE
+ },
+ {
+ .name = "SET",
+ .revision = 1,
+ .family = NFPROTO_IPV4,
+ .target = set_target,
+ .targetsize = sizeof(struct xt_set_info_target),
+ .checkentry = set_target_checkentry,
+ .destroy = set_target_destroy,
+ .me = THIS_MODULE
+ },
+ {
+ .name = "SET",
+ .revision = 1,
+ .family = NFPROTO_IPV6,
+ .target = set_target,
+ .targetsize = sizeof(struct xt_set_info_target),
+ .checkentry = set_target_checkentry,
+ .destroy = set_target_destroy,
+ .me = THIS_MODULE
+ },
+};
+
+static int __init xt_set_init(void)
+{
+ int ret = xt_register_matches(set_matches, ARRAY_SIZE(set_matches));
+
+ if (!ret) {
+ ret = xt_register_targets(set_targets,
+ ARRAY_SIZE(set_targets));
+ if (ret)
+ xt_unregister_matches(set_matches,
+ ARRAY_SIZE(set_matches));
+ }
+ return ret;
+}
+
+static void __exit xt_set_fini(void)
+{
+ xt_unregister_matches(set_matches, ARRAY_SIZE(set_matches));
+ xt_unregister_targets(set_targets, ARRAY_SIZE(set_targets));
+}
+
+module_init(xt_set_init);
+module_exit(xt_set_fini);
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 00d6ae838303..9cc46356b577 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -35,6 +35,15 @@
#include <net/netfilter/nf_conntrack.h>
#endif
+static void
+xt_socket_put_sk(struct sock *sk)
+{
+ if (sk->sk_state == TCP_TIME_WAIT)
+ inet_twsk_put(inet_twsk(sk));
+ else
+ sock_put(sk);
+}
+
static int
extract_icmp4_fields(const struct sk_buff *skb,
u8 *protocol,
@@ -164,7 +173,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
(sk->sk_state == TCP_TIME_WAIT &&
inet_twsk(sk)->tw_transparent));
- nf_tproxy_put_sock(sk);
+ xt_socket_put_sk(sk);
if (wildcard || !transparent)
sk = NULL;
@@ -298,7 +307,7 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
(sk->sk_state == TCP_TIME_WAIT &&
inet_twsk(sk)->tw_transparent));
- nf_tproxy_put_sock(sk);
+ xt_socket_put_sk(sk);
if (wildcard || !transparent)
sk = NULL;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 478181d53c55..1f924595bdef 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1407,7 +1407,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
int noblock = flags&MSG_DONTWAIT;
size_t copied;
struct sk_buff *skb, *data_skb;
- int err;
+ int err, ret;
if (flags&MSG_OOB)
return -EOPNOTSUPP;
@@ -1470,8 +1470,13 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
skb_free_datagram(sk, skb);
- if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2)
- netlink_dump(sk);
+ if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
+ ret = netlink_dump(sk);
+ if (ret) {
+ sk->sk_err = ret;
+ sk->sk_error_report(sk);
+ }
+ }
scm_recv(sock, msg, siocb->scm, flags);
out:
@@ -1736,6 +1741,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
struct netlink_callback *cb;
struct sock *sk;
struct netlink_sock *nlk;
+ int ret;
cb = kzalloc(sizeof(*cb), GFP_KERNEL);
if (cb == NULL)
@@ -1764,9 +1770,13 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
nlk->cb = cb;
mutex_unlock(nlk->cb_mutex);
- netlink_dump(sk);
+ ret = netlink_dump(sk);
+
sock_put(sk);
+ if (ret)
+ return ret;
+
/* We successfully started a dump, by returning -EINTR we
* signal not to send ACK even if it was requested.
*/
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 91cb1d71f018..5efef5b5879e 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -164,7 +164,6 @@ struct packet_mreq_max {
static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
int closing, int tx_ring);
-#define PGV_FROM_VMALLOC 1
struct pgv {
char *buffer;
};
@@ -466,7 +465,7 @@ retry:
*/
err = -EMSGSIZE;
- if (len > dev->mtu + dev->hard_header_len)
+ if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN)
goto out_unlock;
if (!skb) {
@@ -497,6 +496,19 @@ retry:
goto retry;
}
+ if (len > (dev->mtu + dev->hard_header_len)) {
+ /* Earlier code assumed this would be a VLAN pkt,
+ * double-check this now that we have the actual
+ * packet in hand.
+ */
+ struct ethhdr *ehdr;
+ skb_reset_mac_header(skb);
+ ehdr = eth_hdr(skb);
+ if (ehdr->h_proto != htons(ETH_P_8021Q)) {
+ err = -EMSGSIZE;
+ goto out_unlock;
+ }
+ }
skb->protocol = proto;
skb->dev = dev;
@@ -523,11 +535,11 @@ static inline unsigned int run_filter(const struct sk_buff *skb,
{
struct sk_filter *filter;
- rcu_read_lock_bh();
- filter = rcu_dereference_bh(sk->sk_filter);
+ rcu_read_lock();
+ filter = rcu_dereference(sk->sk_filter);
if (filter != NULL)
res = sk_run_filter(skb, filter->insns);
- rcu_read_unlock_bh();
+ rcu_read_unlock();
return res;
}
@@ -1200,7 +1212,7 @@ static int packet_snd(struct socket *sock,
}
err = -EMSGSIZE;
- if (!gso_type && (len > dev->mtu+reserve))
+ if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN))
goto out_unlock;
err = -ENOBUFS;
@@ -1225,6 +1237,20 @@ static int packet_snd(struct socket *sock,
if (err < 0)
goto out_free;
+ if (!gso_type && (len > dev->mtu + reserve)) {
+ /* Earlier code assumed this would be a VLAN pkt,
+ * double-check this now that we have the actual
+ * packet in hand.
+ */
+ struct ethhdr *ehdr;
+ skb_reset_mac_header(skb);
+ ehdr = eth_hdr(skb);
+ if (ehdr->h_proto != htons(ETH_P_8021Q)) {
+ err = -EMSGSIZE;
+ goto out_free;
+ }
+ }
+
skb->protocol = proto;
skb->dev = dev;
skb->priority = sk->sk_priority;
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index 1072b2c19d31..30cc676c35fd 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -110,6 +110,7 @@ static int pn_socket_create(struct net *net, struct socket *sock, int protocol,
sk->sk_protocol = protocol;
pn = pn_sk(sk);
pn->sobject = 0;
+ pn->dobject = 0;
pn->resource = 0;
sk->sk_prot->init(sk);
err = 0;
@@ -242,8 +243,18 @@ int pn_skb_send(struct sock *sk, struct sk_buff *skb,
struct net_device *dev;
struct pn_sock *pn = pn_sk(sk);
int err;
- u16 src;
- u8 daddr = pn_sockaddr_get_addr(target), saddr = PN_NO_ADDR;
+ u16 src, dst;
+ u8 daddr, saddr, res;
+
+ src = pn->sobject;
+ if (target != NULL) {
+ dst = pn_sockaddr_get_object(target);
+ res = pn_sockaddr_get_resource(target);
+ } else {
+ dst = pn->dobject;
+ res = pn->resource;
+ }
+ daddr = pn_addr(dst);
err = -EHOSTUNREACH;
if (sk->sk_bound_dev_if)
@@ -271,12 +282,10 @@ int pn_skb_send(struct sock *sk, struct sk_buff *skb,
if (saddr == PN_NO_ADDR)
goto drop;
- src = pn->sobject;
if (!pn_addr(src))
src = pn_object(saddr, pn_obj(src));
- err = pn_send(skb, dev, pn_sockaddr_get_object(target),
- src, pn_sockaddr_get_resource(target), 0);
+ err = pn_send(skb, dev, dst, src, res, 0);
dev_put(dev);
return err;
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 3e60f2e4e6c2..875e86cadcfd 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -50,11 +50,6 @@
#define CREDITS_MAX 10
#define CREDITS_THR 7
-static const struct sockaddr_pn pipe_srv = {
- .spn_family = AF_PHONET,
- .spn_resource = 0xD9, /* pipe service */
-};
-
#define pep_sb_size(s) (((s) + 5) & ~3) /* 2-bytes head, 32-bits aligned */
/* Get the next TLV sub-block. */
@@ -88,6 +83,7 @@ static int pep_reply(struct sock *sk, struct sk_buff *oskb,
const struct pnpipehdr *oph = pnp_hdr(oskb);
struct pnpipehdr *ph;
struct sk_buff *skb;
+ struct sockaddr_pn peer;
skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority);
if (!skb)
@@ -105,78 +101,14 @@ static int pep_reply(struct sock *sk, struct sk_buff *oskb,
ph->pipe_handle = oph->pipe_handle;
ph->error_code = code;
- return pn_skb_send(sk, skb, &pipe_srv);
+ pn_skb_get_src_sockaddr(oskb, &peer);
+ return pn_skb_send(sk, skb, &peer);
}
#define PAD 0x00
#ifdef CONFIG_PHONET_PIPECTRLR
-static u8 pipe_negotiate_fc(u8 *host_fc, u8 *remote_fc, int len)
-{
- int i, j;
- u8 base_fc, final_fc;
-
- for (i = 0; i < len; i++) {
- base_fc = host_fc[i];
- for (j = 0; j < len; j++) {
- if (remote_fc[j] == base_fc) {
- final_fc = base_fc;
- goto done;
- }
- }
- }
- return -EINVAL;
-
-done:
- return final_fc;
-
-}
-
-static int pipe_get_flow_info(struct sock *sk, struct sk_buff *skb,
- u8 *pref_rx_fc, u8 *req_tx_fc)
-{
- struct pnpipehdr *hdr;
- u8 n_sb;
-
- if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
- return -EINVAL;
-
- hdr = pnp_hdr(skb);
- n_sb = hdr->data[4];
-
- __skb_pull(skb, sizeof(*hdr) + 4);
- while (n_sb > 0) {
- u8 type, buf[3], len = sizeof(buf);
- u8 *data = pep_get_sb(skb, &type, &len, buf);
-
- if (data == NULL)
- return -EINVAL;
-
- switch (type) {
- case PN_PIPE_SB_REQUIRED_FC_TX:
- if (len < 3 || (data[2] | data[3] | data[4]) > 3)
- break;
- req_tx_fc[0] = data[2];
- req_tx_fc[1] = data[3];
- req_tx_fc[2] = data[4];
- break;
-
- case PN_PIPE_SB_PREFERRED_FC_RX:
- if (len < 3 || (data[2] | data[3] | data[4]) > 3)
- break;
- pref_rx_fc[0] = data[2];
- pref_rx_fc[1] = data[3];
- pref_rx_fc[2] = data[4];
- break;
-
- }
- n_sb--;
- }
- return 0;
-}
-
-static int pipe_handler_send_req(struct sock *sk, u8 utid,
- u8 msg_id, gfp_t priority)
+static int pipe_handler_send_req(struct sock *sk, u8 msg_id, gfp_t priority)
{
int len;
struct pnpipehdr *ph;
@@ -215,16 +147,15 @@ static int pipe_handler_send_req(struct sock *sk, u8 utid,
__skb_push(skb, sizeof(*ph));
skb_reset_transport_header(skb);
ph = pnp_hdr(skb);
- ph->utid = utid;
+ ph->utid = msg_id; /* whatever */
ph->message_id = msg_id;
ph->pipe_handle = pn->pipe_handle;
ph->error_code = PN_PIPE_NO_ERROR;
- return pn_skb_send(sk, skb, &pn->remote_pep);
+ return pn_skb_send(sk, skb, NULL);
}
-static int pipe_handler_send_created_ind(struct sock *sk,
- u8 utid, u8 msg_id)
+static int pipe_handler_send_created_ind(struct sock *sk, u8 msg_id)
{
int err_code;
struct pnpipehdr *ph;
@@ -257,15 +188,15 @@ static int pipe_handler_send_created_ind(struct sock *sk,
__skb_push(skb, sizeof(*ph));
skb_reset_transport_header(skb);
ph = pnp_hdr(skb);
- ph->utid = utid;
+ ph->utid = 0;
ph->message_id = msg_id;
ph->pipe_handle = pn->pipe_handle;
ph->error_code = err_code;
- return pn_skb_send(sk, skb, &pn->remote_pep);
+ return pn_skb_send(sk, skb, NULL);
}
-static int pipe_handler_send_ind(struct sock *sk, u8 utid, u8 msg_id)
+static int pipe_handler_send_ind(struct sock *sk, u8 msg_id)
{
int err_code;
struct pnpipehdr *ph;
@@ -290,26 +221,19 @@ static int pipe_handler_send_ind(struct sock *sk, u8 utid, u8 msg_id)
__skb_push(skb, sizeof(*ph));
skb_reset_transport_header(skb);
ph = pnp_hdr(skb);
- ph->utid = utid;
+ ph->utid = 0;
ph->message_id = msg_id;
ph->pipe_handle = pn->pipe_handle;
ph->error_code = err_code;
- return pn_skb_send(sk, skb, &pn->remote_pep);
+ return pn_skb_send(sk, skb, NULL);
}
static int pipe_handler_enable_pipe(struct sock *sk, int enable)
{
- int utid, req;
-
- if (enable) {
- utid = PNS_PIPE_ENABLE_UTID;
- req = PNS_PEP_ENABLE_REQ;
- } else {
- utid = PNS_PIPE_DISABLE_UTID;
- req = PNS_PEP_DISABLE_REQ;
- }
- return pipe_handler_send_req(sk, utid, req, GFP_ATOMIC);
+ u8 id = enable ? PNS_PEP_ENABLE_REQ : PNS_PEP_DISABLE_REQ;
+
+ return pipe_handler_send_req(sk, id, GFP_KERNEL);
}
#endif
@@ -396,11 +320,7 @@ static int pipe_snd_status(struct sock *sk, u8 type, u8 status, gfp_t priority)
ph->data[3] = PAD;
ph->data[4] = status;
-#ifdef CONFIG_PHONET_PIPECTRLR
- return pn_skb_send(sk, skb, &pn->remote_pep);
-#else
- return pn_skb_send(sk, skb, &pipe_srv);
-#endif
+ return pn_skb_send(sk, skb, NULL);
}
/* Send our RX flow control information to the sender.
@@ -534,7 +454,6 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
#ifdef CONFIG_PHONET_PIPECTRLR
case PNS_PEP_DISCONNECT_RESP:
- pn->pipe_state = PIPE_IDLE;
sk->sk_state = TCP_CLOSE;
break;
#endif
@@ -546,9 +465,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
#ifdef CONFIG_PHONET_PIPECTRLR
case PNS_PEP_ENABLE_RESP:
- pn->pipe_state = PIPE_ENABLED;
- pipe_handler_send_ind(sk, PNS_PIPE_ENABLED_IND_UTID,
- PNS_PIPE_ENABLED_IND);
+ pipe_handler_send_ind(sk, PNS_PIPE_ENABLED_IND);
if (!pn_flow_safe(pn->tx_fc)) {
atomic_set(&pn->tx_credits, 1);
@@ -581,10 +498,8 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
#ifdef CONFIG_PHONET_PIPECTRLR
case PNS_PEP_DISABLE_RESP:
- pn->pipe_state = PIPE_DISABLED;
atomic_set(&pn->tx_credits, 0);
- pipe_handler_send_ind(sk, PNS_PIPE_DISABLED_IND_UTID,
- PNS_PIPE_DISABLED_IND);
+ pipe_handler_send_ind(sk, PNS_PIPE_DISABLED_IND);
sk->sk_state = TCP_SYN_RECV;
pn->rx_credits = 0;
break;
@@ -682,38 +597,64 @@ static void pipe_destruct(struct sock *sk)
}
#ifdef CONFIG_PHONET_PIPECTRLR
+static u8 pipe_negotiate_fc(const u8 *fcs, unsigned n)
+{
+ unsigned i;
+ u8 final_fc = PN_NO_FLOW_CONTROL;
+
+ for (i = 0; i < n; i++) {
+ u8 fc = fcs[i];
+
+ if (fc > final_fc && fc < PN_MAX_FLOW_CONTROL)
+ final_fc = fc;
+ }
+ return final_fc;
+}
+
static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb)
{
struct pep_sock *pn = pep_sk(sk);
- u8 host_pref_rx_fc[3] = {3, 2, 1}, host_req_tx_fc[3] = {3, 2, 1};
- u8 remote_pref_rx_fc[3], remote_req_tx_fc[3];
- u8 negotiated_rx_fc, negotiated_tx_fc;
- int ret;
-
- pipe_get_flow_info(sk, skb, remote_pref_rx_fc,
- remote_req_tx_fc);
- negotiated_tx_fc = pipe_negotiate_fc(remote_req_tx_fc,
- host_pref_rx_fc,
- sizeof(host_pref_rx_fc));
- negotiated_rx_fc = pipe_negotiate_fc(host_req_tx_fc,
- remote_pref_rx_fc,
- sizeof(host_pref_rx_fc));
-
- pn->pipe_state = PIPE_DISABLED;
+ struct pnpipehdr *hdr;
+ u8 n_sb;
+
+ if (!pskb_pull(skb, sizeof(*hdr) + 4))
+ return -EINVAL;
+
+ hdr = pnp_hdr(skb);
+
+ /* Parse sub-blocks */
+ n_sb = hdr->data[4];
+ while (n_sb > 0) {
+ u8 type, buf[6], len = sizeof(buf);
+ const u8 *data = pep_get_sb(skb, &type, &len, buf);
+
+ if (data == NULL)
+ return -EINVAL;
+
+ switch (type) {
+ case PN_PIPE_SB_REQUIRED_FC_TX:
+ if (len < 2 || len < data[0])
+ break;
+ pn->tx_fc = pipe_negotiate_fc(data + 2, len - 2);
+ break;
+
+ case PN_PIPE_SB_PREFERRED_FC_RX:
+ if (len < 2 || len < data[0])
+ break;
+ pn->rx_fc = pipe_negotiate_fc(data + 2, len - 2);
+ break;
+
+ }
+ n_sb--;
+ }
+
sk->sk_state = TCP_SYN_RECV;
sk->sk_backlog_rcv = pipe_do_rcv;
sk->sk_destruct = pipe_destruct;
pn->rx_credits = 0;
- pn->rx_fc = negotiated_rx_fc;
- pn->tx_fc = negotiated_tx_fc;
sk->sk_state_change(sk);
- ret = pipe_handler_send_created_ind(sk,
- PNS_PIPE_CREATED_IND_UTID,
- PNS_PIPE_CREATED_IND
- );
-
- return ret;
+ return pipe_handler_send_created_ind(sk, PNS_PIPE_CREATED_IND);
}
#endif
@@ -722,7 +663,7 @@ static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb)
struct sock *newsk;
struct pep_sock *newpn, *pn = pep_sk(sk);
struct pnpipehdr *hdr;
- struct sockaddr_pn dst;
+ struct sockaddr_pn dst, src;
u16 peer_type;
u8 pipe_handle, enabled, n_sb;
u8 aligned = 0;
@@ -789,8 +730,10 @@ static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb)
newpn = pep_sk(newsk);
pn_skb_get_dst_sockaddr(skb, &dst);
+ pn_skb_get_src_sockaddr(skb, &src);
newpn->pn_sk.sobject = pn_sockaddr_get_object(&dst);
- newpn->pn_sk.resource = pn->pn_sk.resource;
+ newpn->pn_sk.dobject = pn_sockaddr_get_object(&src);
+ newpn->pn_sk.resource = pn_sockaddr_get_resource(&dst);
skb_queue_head_init(&newpn->ctrlreq_queue);
newpn->pipe_handle = pipe_handle;
atomic_set(&newpn->tx_credits, 0);
@@ -906,6 +849,7 @@ drop:
return err;
}
+#ifndef CONFIG_PHONET_PIPECTRLR
static int pipe_do_remove(struct sock *sk)
{
struct pep_sock *pn = pep_sk(sk);
@@ -925,8 +869,9 @@ static int pipe_do_remove(struct sock *sk)
ph->pipe_handle = pn->pipe_handle;
ph->data[0] = PAD;
- return pn_skb_send(sk, skb, &pipe_srv);
+ return pn_skb_send(sk, skb, NULL);
}
+#endif
/* associated socket ceases to exist */
static void pep_sock_close(struct sock *sk, long timeout)
@@ -946,21 +891,16 @@ static void pep_sock_close(struct sock *sk, long timeout)
sk_for_each_safe(sknode, p, n, &pn->ackq)
sk_del_node_init(sknode);
sk->sk_state = TCP_CLOSE;
- } else if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED))
+ } else if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED)) {
+#ifndef CONFIG_PHONET_PIPECTRLR
/* Forcefully remove dangling Phonet pipe */
pipe_do_remove(sk);
-
-#ifdef CONFIG_PHONET_PIPECTRLR
- if (pn->pipe_state != PIPE_IDLE) {
+#else
/* send pep disconnect request */
- pipe_handler_send_req(sk,
- PNS_PEP_DISCONNECT_UTID, PNS_PEP_DISCONNECT_REQ,
- GFP_KERNEL);
-
- pn->pipe_state = PIPE_IDLE;
+ pipe_handler_send_req(sk, PNS_PEP_DISCONNECT_REQ, GFP_KERNEL);
sk->sk_state = TCP_CLOSE;
- }
#endif
+ }
ifindex = pn->ifindex;
pn->ifindex = 0;
@@ -1042,13 +982,11 @@ out:
static int pep_sock_connect(struct sock *sk, struct sockaddr *addr, int len)
{
struct pep_sock *pn = pep_sk(sk);
- struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
+ const struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
- memcpy(&pn->remote_pep, spn, sizeof(struct sockaddr_pn));
-
- return pipe_handler_send_req(sk,
- PNS_PEP_CONNECT_UTID, PNS_PEP_CONNECT_REQ,
- GFP_ATOMIC);
+ pn->pn_sk.dobject = pn_sockaddr_get_object(spn);
+ pn->pn_sk.resource = pn_sockaddr_get_resource(spn);
+ return pipe_handler_send_req(sk, PNS_PEP_CONNECT_REQ, GFP_KERNEL);
}
#endif
@@ -1106,10 +1044,6 @@ static int pep_setsockopt(struct sock *sk, int level, int optname,
#ifdef CONFIG_PHONET_PIPECTRLR
case PNPIPE_PIPE_HANDLE:
if (val) {
- if (pn->pipe_state > PIPE_IDLE) {
- err = -EFAULT;
- break;
- }
pn->pipe_handle = val;
break;
}
@@ -1143,7 +1077,7 @@ static int pep_setsockopt(struct sock *sk, int level, int optname,
#ifdef CONFIG_PHONET_PIPECTRLR
case PNPIPE_ENABLE:
- if (pn->pipe_state <= PIPE_IDLE) {
+ if ((1 << sk->sk_state) & ~(TCPF_SYN_RECV|TCPF_ESTABLISHED)) {
err = -ENOTCONN;
break;
}
@@ -1182,9 +1116,7 @@ static int pep_getsockopt(struct sock *sk, int level, int optname,
#ifdef CONFIG_PHONET_PIPECTRLR
case PNPIPE_ENABLE:
- if (pn->pipe_state <= PIPE_IDLE)
- return -ENOTCONN;
- val = pn->pipe_state != PIPE_DISABLED;
+ val = sk->sk_state == TCP_ESTABLISHED;
break;
#endif
@@ -1222,11 +1154,7 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
} else
ph->message_id = PNS_PIPE_DATA;
ph->pipe_handle = pn->pipe_handle;
-#ifdef CONFIG_PHONET_PIPECTRLR
- err = pn_skb_send(sk, skb, &pn->remote_pep);
-#else
- err = pn_skb_send(sk, skb, &pipe_srv);
-#endif
+ err = pn_skb_send(sk, skb, NULL);
if (err && pn_flow_safe(pn->tx_fc))
atomic_inc(&pn->tx_credits);
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 25f746d20c1f..65a03338769e 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -428,19 +428,19 @@ static int pn_socket_listen(struct socket *sock, int backlog)
struct sock *sk = sock->sk;
int err = 0;
- if (sock->state != SS_UNCONNECTED)
- return -EINVAL;
if (pn_socket_autobind(sock))
return -ENOBUFS;
lock_sock(sk);
- if (sk->sk_state != TCP_CLOSE) {
+ if (sock->state != SS_UNCONNECTED) {
err = -EINVAL;
goto out;
}
- sk->sk_state = TCP_LISTEN;
- sk->sk_ack_backlog = 0;
+ if (sk->sk_state != TCP_LISTEN) {
+ sk->sk_state = TCP_LISTEN;
+ sk->sk_ack_backlog = 0;
+ }
sk->sk_max_ack_backlog = backlog;
out:
release_sock(sk);
@@ -633,8 +633,8 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu "
"%d %p %d%n",
- sk->sk_protocol, pn->sobject, 0, pn->resource,
- sk->sk_state,
+ sk->sk_protocol, pn->sobject, pn->dobject,
+ pn->resource, sk->sk_state,
sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
sock_i_uid(sk), sock_i_ino(sk),
atomic_read(&sk->sk_refcnt), sk,
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 4123967d4d65..cce19f95c624 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -364,7 +364,6 @@ void rds_ib_exit(void)
rds_ib_sysctl_exit();
rds_ib_recv_exit();
rds_trans_unregister(&rds_ib_transport);
- rds_ib_fmr_exit();
}
struct rds_transport rds_ib_transport = {
@@ -400,13 +399,9 @@ int rds_ib_init(void)
INIT_LIST_HEAD(&rds_ib_devices);
- ret = rds_ib_fmr_init();
- if (ret)
- goto out;
-
ret = ib_register_client(&rds_ib_client);
if (ret)
- goto out_fmr_exit;
+ goto out;
ret = rds_ib_sysctl_init();
if (ret)
@@ -430,8 +425,6 @@ out_sysctl:
rds_ib_sysctl_exit();
out_ibreg:
rds_ib_unregister_client();
-out_fmr_exit:
- rds_ib_fmr_exit();
out:
return ret;
}
diff --git a/net/rds/ib.h b/net/rds/ib.h
index e34ad032b66d..4297d92788dc 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -307,8 +307,6 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
void rds_ib_sync_mr(void *trans_private, int dir);
void rds_ib_free_mr(void *trans_private, int invalidate);
void rds_ib_flush_mrs(void);
-int rds_ib_fmr_init(void);
-void rds_ib_fmr_exit(void);
/* ib_recv.c */
int rds_ib_recv_init(void);
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 18a833c450c8..819c35a0d9cb 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -38,8 +38,6 @@
#include "ib.h"
#include "xlist.h"
-static struct workqueue_struct *rds_ib_fmr_wq;
-
static DEFINE_PER_CPU(unsigned long, clean_list_grace);
#define CLEAN_LIST_BUSY_BIT 0
@@ -307,7 +305,7 @@ static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev)
int err = 0, iter = 0;
if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
- queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
+ schedule_delayed_work(&pool->flush_worker, 10);
while (1) {
ibmr = rds_ib_reuse_fmr(pool);
@@ -696,24 +694,6 @@ out_nolock:
return ret;
}
-int rds_ib_fmr_init(void)
-{
- rds_ib_fmr_wq = create_workqueue("rds_fmr_flushd");
- if (!rds_ib_fmr_wq)
- return -ENOMEM;
- return 0;
-}
-
-/*
- * By the time this is called all the IB devices should have been torn down and
- * had their pools freed. As each pool is freed its work struct is waited on,
- * so the pool flushing work queue should be idle by the time we get here.
- */
-void rds_ib_fmr_exit(void)
-{
- destroy_workqueue(rds_ib_fmr_wq);
-}
-
static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
{
struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
@@ -741,7 +721,7 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
/* If we've pinned too many pages, request a flush */
if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
atomic_read(&pool->dirty_count) >= pool->max_items / 10)
- queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
+ schedule_delayed_work(&pool->flush_worker, 10);
if (invalidate) {
if (likely(!in_interrupt())) {
@@ -749,8 +729,7 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
} else {
/* We get here if the user created a MR marked
* as use_once and invalidate at the same time. */
- queue_delayed_work(rds_ib_fmr_wq,
- &pool->flush_worker, 10);
+ schedule_delayed_work(&pool->flush_worker, 10);
}
}
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 9542449c0720..da8adac2bf06 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -50,7 +50,6 @@ rdsdebug(char *fmt, ...)
#define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT))
#define RDS_CONG_MAP_BYTES (65536 / 8)
-#define RDS_CONG_MAP_LONGS (RDS_CONG_MAP_BYTES / sizeof(unsigned long))
#define RDS_CONG_MAP_PAGES (PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
#define RDS_CONG_MAP_PAGE_BITS (PAGE_SIZE * 8)
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index d952e7eac188..5ee0c62046a0 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -803,7 +803,6 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
rose_insert_socket(sk); /* Finish the bind */
}
-rose_try_next_neigh:
rose->dest_addr = addr->srose_addr;
rose->dest_call = addr->srose_call;
rose->rand = ((long)rose & 0xFFFF) + rose->lci;
@@ -865,12 +864,6 @@ rose_try_next_neigh:
}
if (sk->sk_state != TCP_ESTABLISHED) {
- /* Try next neighbour */
- rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, &diagnostic, 0);
- if (rose->neighbour)
- goto rose_try_next_neigh;
-
- /* No more neighbours */
sock->state = SS_UNCONNECTED;
err = sock_error(sk); /* Always set at this point */
goto out_release;
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index b4fdaac233f7..88a77e90e7e8 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -674,29 +674,34 @@ struct rose_route *rose_route_free_lci(unsigned int lci, struct rose_neigh *neig
* Find a neighbour or a route given a ROSE address.
*/
struct rose_neigh *rose_get_neigh(rose_address *addr, unsigned char *cause,
- unsigned char *diagnostic, int new)
+ unsigned char *diagnostic, int route_frame)
{
struct rose_neigh *res = NULL;
struct rose_node *node;
int failed = 0;
int i;
- if (!new) spin_lock_bh(&rose_node_list_lock);
+ if (!route_frame) spin_lock_bh(&rose_node_list_lock);
for (node = rose_node_list; node != NULL; node = node->next) {
if (rosecmpm(addr, &node->address, node->mask) == 0) {
for (i = 0; i < node->count; i++) {
- if (new) {
- if (node->neighbour[i]->restarted) {
- res = node->neighbour[i];
- goto out;
- }
+ if (node->neighbour[i]->restarted) {
+ res = node->neighbour[i];
+ goto out;
}
- else {
+ }
+ }
+ }
+ if (!route_frame) { /* connect request */
+ for (node = rose_node_list; node != NULL; node = node->next) {
+ if (rosecmpm(addr, &node->address, node->mask) == 0) {
+ for (i = 0; i < node->count; i++) {
if (!rose_ftimer_running(node->neighbour[i])) {
res = node->neighbour[i];
+ failed = 0;
goto out;
- } else
- failed = 1;
+ }
+ failed = 1;
}
}
}
@@ -711,8 +716,7 @@ struct rose_neigh *rose_get_neigh(rose_address *addr, unsigned char *cause,
}
out:
- if (!new) spin_unlock_bh(&rose_node_list_lock);
-
+ if (!route_frame) spin_unlock_bh(&rose_node_list_lock);
return res;
}
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index 5ee16f0353fe..d763793d39de 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -89,11 +89,11 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr,
return ret;
plen -= sizeof(*token);
- token = kmalloc(sizeof(*token), GFP_KERNEL);
+ token = kzalloc(sizeof(*token), GFP_KERNEL);
if (!token)
return -ENOMEM;
- token->kad = kmalloc(plen, GFP_KERNEL);
+ token->kad = kzalloc(plen, GFP_KERNEL);
if (!token->kad) {
kfree(token);
return -ENOMEM;
@@ -731,10 +731,10 @@ static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen)
goto error;
ret = -ENOMEM;
- token = kmalloc(sizeof(*token), GFP_KERNEL);
+ token = kzalloc(sizeof(*token), GFP_KERNEL);
if (!token)
goto error;
- token->kad = kmalloc(plen, GFP_KERNEL);
+ token->kad = kzalloc(plen, GFP_KERNEL);
if (!token->kad)
goto error_free;
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index f04d4a484d53..a7a5583d4f68 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -126,6 +126,17 @@ config NET_SCH_RED
To compile this code as a module, choose M here: the
module will be called sch_red.
+config NET_SCH_SFB
+ tristate "Stochastic Fair Blue (SFB)"
+ ---help---
+ Say Y here if you want to use the Stochastic Fair Blue (SFB)
+ packet scheduling algorithm.
+
+ See the top of <file:net/sched/sch_sfb.c> for more details.
+
+ To compile this code as a module, choose M here: the
+ module will be called sch_sfb.
+
config NET_SCH_SFQ
tristate "Stochastic Fairness Queueing (SFQ)"
---help---
@@ -205,6 +216,29 @@ config NET_SCH_DRR
If unsure, say N.
+config NET_SCH_MQPRIO
+ tristate "Multi-queue priority scheduler (MQPRIO)"
+ help
+ Say Y here if you want to use the Multi-queue Priority scheduler.
+ This scheduler allows QOS to be offloaded on NICs that have support
+ for offloading QOS schedulers.
+
+ To compile this driver as a module, choose M here: the module will
+ be called sch_mqprio.
+
+ If unsure, say N.
+
+config NET_SCH_CHOKE
+ tristate "CHOose and Keep responsive flow scheduler (CHOKE)"
+ help
+ Say Y here if you want to use the CHOKe packet scheduler (CHOose
+ and Keep for responsive flows, CHOose and Kill for unresponsive
+ flows). This is a variation of RED which trys to penalize flows
+ that monopolize the queue.
+
+ To compile this code as a module, choose M here: the
+ module will be called sch_choke.
+
config NET_SCH_INGRESS
tristate "Ingress Qdisc"
depends on NET_CLS_ACT
@@ -243,7 +277,7 @@ config NET_CLS_TCINDEX
config NET_CLS_ROUTE4
tristate "Routing decision (ROUTE)"
- select NET_CLS_ROUTE
+ select IP_ROUTE_CLASSID
select NET_CLS
---help---
If you say Y here, you will be able to classify packets
@@ -252,9 +286,6 @@ config NET_CLS_ROUTE4
To compile this code as a module, choose M here: the
module will be called cls_route.
-config NET_CLS_ROUTE
- bool
-
config NET_CLS_FW
tristate "Netfilter mark (FW)"
select NET_CLS
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 960f5dba6304..2e77b8dba22e 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_NET_SCH_RED) += sch_red.o
obj-$(CONFIG_NET_SCH_GRED) += sch_gred.o
obj-$(CONFIG_NET_SCH_INGRESS) += sch_ingress.o
obj-$(CONFIG_NET_SCH_DSMARK) += sch_dsmark.o
+obj-$(CONFIG_NET_SCH_SFB) += sch_sfb.o
obj-$(CONFIG_NET_SCH_SFQ) += sch_sfq.o
obj-$(CONFIG_NET_SCH_TBF) += sch_tbf.o
obj-$(CONFIG_NET_SCH_TEQL) += sch_teql.o
@@ -32,6 +33,9 @@ obj-$(CONFIG_NET_SCH_MULTIQ) += sch_multiq.o
obj-$(CONFIG_NET_SCH_ATM) += sch_atm.o
obj-$(CONFIG_NET_SCH_NETEM) += sch_netem.o
obj-$(CONFIG_NET_SCH_DRR) += sch_drr.o
+obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o
+obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o
+
obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
obj-$(CONFIG_NET_CLS_FW) += cls_fw.o
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 23b25f89e7e0..15873e14cb54 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -78,7 +78,7 @@ static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
struct tc_action *a, struct tcf_hashinfo *hinfo)
{
struct tcf_common *p;
- int err = 0, index = -1,i = 0, s_i = 0, n_i = 0;
+ int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
struct nlattr *nest;
read_lock_bh(hinfo->lock);
@@ -126,7 +126,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a,
{
struct tcf_common *p, *s_p;
struct nlattr *nest;
- int i= 0, n_i = 0;
+ int i = 0, n_i = 0;
nest = nla_nest_start(skb, a->order);
if (nest == NULL)
@@ -138,7 +138,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a,
while (p != NULL) {
s_p = p->tcfc_next;
if (ACT_P_DELETED == tcf_hash_release(p, 0, hinfo))
- module_put(a->ops->owner);
+ module_put(a->ops->owner);
n_i++;
p = s_p;
}
@@ -447,7 +447,8 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
nest = nla_nest_start(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
- if ((err = tcf_action_dump_old(skb, a, bind, ref)) > 0) {
+ err = tcf_action_dump_old(skb, a, bind, ref);
+ if (err > 0) {
nla_nest_end(skb, nest);
return err;
}
@@ -491,7 +492,7 @@ struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est,
struct tc_action *a;
struct tc_action_ops *a_o;
char act_name[IFNAMSIZ];
- struct nlattr *tb[TCA_ACT_MAX+1];
+ struct nlattr *tb[TCA_ACT_MAX + 1];
struct nlattr *kind;
int err;
@@ -549,9 +550,9 @@ struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est,
goto err_free;
/* module count goes up only when brand new policy is created
- if it exists and is only bound to in a_o->init() then
- ACT_P_CREATED is not returned (a zero is).
- */
+ * if it exists and is only bound to in a_o->init() then
+ * ACT_P_CREATED is not returned (a zero is).
+ */
if (err != ACT_P_CREATED)
module_put(a_o->owner);
a->ops = a_o;
@@ -569,7 +570,7 @@ err_out:
struct tc_action *tcf_action_init(struct nlattr *nla, struct nlattr *est,
char *name, int ovr, int bind)
{
- struct nlattr *tb[TCA_ACT_MAX_PRIO+1];
+ struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
struct tc_action *head = NULL, *act, *act_prev = NULL;
int err;
int i;
@@ -697,7 +698,7 @@ act_get_notify(struct net *net, u32 pid, struct nlmsghdr *n,
static struct tc_action *
tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 pid)
{
- struct nlattr *tb[TCA_ACT_MAX+1];
+ struct nlattr *tb[TCA_ACT_MAX + 1];
struct tc_action *a;
int index;
int err;
@@ -770,7 +771,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
struct tcamsg *t;
struct netlink_callback dcb;
struct nlattr *nest;
- struct nlattr *tb[TCA_ACT_MAX+1];
+ struct nlattr *tb[TCA_ACT_MAX + 1];
struct nlattr *kind;
struct tc_action *a = create_a(0);
int err = -ENOMEM;
@@ -821,7 +822,8 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
nlh->nlmsg_flags |= NLM_F_ROOT;
module_put(a->ops->owner);
kfree(a);
- err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
+ err = rtnetlink_send(skb, net, pid, RTNLGRP_TC,
+ n->nlmsg_flags & NLM_F_ECHO);
if (err > 0)
return 0;
@@ -842,14 +844,14 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
u32 pid, int event)
{
int i, ret;
- struct nlattr *tb[TCA_ACT_MAX_PRIO+1];
+ struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
struct tc_action *head = NULL, *act, *act_prev = NULL;
ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL);
if (ret < 0)
return ret;
- if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) {
+ if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
if (tb[1] != NULL)
return tca_action_flush(net, tb[1], n, pid);
else
@@ -892,7 +894,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
/* now do the delete */
tcf_action_destroy(head, 0);
ret = rtnetlink_send(skb, net, pid, RTNLGRP_TC,
- n->nlmsg_flags&NLM_F_ECHO);
+ n->nlmsg_flags & NLM_F_ECHO);
if (ret > 0)
return 0;
return ret;
@@ -936,7 +938,7 @@ static int tcf_add_notify(struct net *net, struct tc_action *a,
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
NETLINK_CB(skb).dst_group = RTNLGRP_TC;
- err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags&NLM_F_ECHO);
+ err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags & NLM_F_ECHO);
if (err > 0)
err = 0;
return err;
@@ -967,7 +969,7 @@ tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
/* dump then free all the actions after update; inserted policy
* stays intact
- * */
+ */
ret = tcf_add_notify(net, act, pid, seq, RTM_NEWACTION, n->nlmsg_flags);
for (a = act; a; a = act) {
act = a->next;
@@ -993,8 +995,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
return -EINVAL;
}
- /* n->nlmsg_flags&NLM_F_CREATE
- * */
+ /* n->nlmsg_flags & NLM_F_CREATE */
switch (n->nlmsg_type) {
case RTM_NEWACTION:
/* we are going to assume all other flags
@@ -1003,7 +1004,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
* but since we want avoid ambiguity (eg when flags
* is zero) then just set this
*/
- if (n->nlmsg_flags&NLM_F_REPLACE)
+ if (n->nlmsg_flags & NLM_F_REPLACE)
ovr = 1;
replay:
ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, pid, ovr);
@@ -1028,7 +1029,7 @@ replay:
static struct nlattr *
find_dump_kind(const struct nlmsghdr *n)
{
- struct nlattr *tb1, *tb2[TCA_ACT_MAX+1];
+ struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
struct nlattr *nla[TCAA_MAX + 1];
struct nlattr *kind;
@@ -1071,9 +1072,8 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
}
a_o = tc_lookup_action(kind);
- if (a_o == NULL) {
+ if (a_o == NULL)
return 0;
- }
memset(&a, 0, sizeof(struct tc_action));
a.ops = a_o;
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 83ddfc07e45d..6cdf9abe475f 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -63,7 +63,7 @@ static int tcf_csum_init(struct nlattr *nla, struct nlattr *est,
if (nla == NULL)
return -EINVAL;
- err = nla_parse_nested(tb, TCA_CSUM_MAX, nla,csum_policy);
+ err = nla_parse_nested(tb, TCA_CSUM_MAX, nla, csum_policy);
if (err < 0)
return err;
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index c2ed90a4c0b4..2b4ab4b05ce8 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -50,7 +50,7 @@ static int gact_determ(struct tcf_gact *gact)
}
typedef int (*g_rand)(struct tcf_gact *gact);
-static g_rand gact_rand[MAX_RAND]= { NULL, gact_net_rand, gact_determ };
+static g_rand gact_rand[MAX_RAND] = { NULL, gact_net_rand, gact_determ };
#endif /* CONFIG_GACT_PROB */
static const struct nla_policy gact_policy[TCA_GACT_MAX + 1] = {
@@ -89,7 +89,7 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
pc = tcf_hash_create(parm->index, est, a, sizeof(*gact),
bind, &gact_idx_gen, &gact_hash_info);
if (IS_ERR(pc))
- return PTR_ERR(pc);
+ return PTR_ERR(pc);
ret = ACT_P_CREATED;
} else {
if (!ovr) {
@@ -205,9 +205,9 @@ MODULE_LICENSE("GPL");
static int __init gact_init_module(void)
{
#ifdef CONFIG_GACT_PROB
- printk(KERN_INFO "GACT probability on\n");
+ pr_info("GACT probability on\n");
#else
- printk(KERN_INFO "GACT probability NOT on\n");
+ pr_info("GACT probability NOT on\n");
#endif
return tcf_register_action(&act_gact_ops);
}
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index c2a7c20e81c1..9fc211a1b20e 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -138,7 +138,7 @@ static int tcf_ipt_init(struct nlattr *nla, struct nlattr *est,
pc = tcf_hash_create(index, est, a, sizeof(*ipt), bind,
&ipt_idx_gen, &ipt_hash_info);
if (IS_ERR(pc))
- return PTR_ERR(pc);
+ return PTR_ERR(pc);
ret = ACT_P_CREATED;
} else {
if (!ovr) {
@@ -162,7 +162,8 @@ static int tcf_ipt_init(struct nlattr *nla, struct nlattr *est,
if (unlikely(!t))
goto err2;
- if ((err = ipt_init_target(t, tname, hook)) < 0)
+ err = ipt_init_target(t, tname, hook);
+ if (err < 0)
goto err3;
spin_lock_bh(&ipt->tcf_lock);
@@ -212,8 +213,9 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a,
bstats_update(&ipt->tcf_bstats, skb);
/* yes, we have to worry about both in and out dev
- worry later - danger - this API seems to have changed
- from earlier kernels */
+ * worry later - danger - this API seems to have changed
+ * from earlier kernels
+ */
par.in = skb->dev;
par.out = NULL;
par.hooknum = ipt->tcfi_hook;
@@ -253,9 +255,9 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int
struct tc_cnt c;
/* for simple targets kernel size == user size
- ** user name = target name
- ** for foolproof you need to not assume this
- */
+ * user name = target name
+ * for foolproof you need to not assume this
+ */
t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC);
if (unlikely(!t))
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index d765067e99db..961386e2f2c0 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -41,13 +41,13 @@ static struct tcf_hashinfo mirred_hash_info = {
.lock = &mirred_lock,
};
-static inline int tcf_mirred_release(struct tcf_mirred *m, int bind)
+static int tcf_mirred_release(struct tcf_mirred *m, int bind)
{
if (m) {
if (bind)
m->tcf_bindcnt--;
m->tcf_refcnt--;
- if(!m->tcf_bindcnt && m->tcf_refcnt <= 0) {
+ if (!m->tcf_bindcnt && m->tcf_refcnt <= 0) {
list_del(&m->tcfm_list);
if (m->tcfm_dev)
dev_put(m->tcfm_dev);
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 178a4bd7b7cb..762b027650a9 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -69,7 +69,7 @@ static int tcf_nat_init(struct nlattr *nla, struct nlattr *est,
pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind,
&nat_idx_gen, &nat_hash_info);
if (IS_ERR(pc))
- return PTR_ERR(pc);
+ return PTR_ERR(pc);
p = to_tcf_nat(pc);
ret = ACT_P_CREATED;
} else {
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 445bef716f77..50c7c06c019d 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -70,7 +70,7 @@ static int tcf_pedit_init(struct nlattr *nla, struct nlattr *est,
pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind,
&pedit_idx_gen, &pedit_hash_info);
if (IS_ERR(pc))
- return PTR_ERR(pc);
+ return PTR_ERR(pc);
p = to_pedit(pc);
keys = kmalloc(ksize, GFP_KERNEL);
if (keys == NULL) {
@@ -127,11 +127,9 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
int i, munged = 0;
unsigned int off;
- if (skb_cloned(skb)) {
- if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
- return p->tcf_action;
- }
- }
+ if (skb_cloned(skb) &&
+ pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ return p->tcf_action;
off = skb_network_offset(skb);
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index e2f08b1e2e58..8a1630774fd6 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -22,8 +22,8 @@
#include <net/act_api.h>
#include <net/netlink.h>
-#define L2T(p,L) qdisc_l2t((p)->tcfp_R_tab, L)
-#define L2T_P(p,L) qdisc_l2t((p)->tcfp_P_tab, L)
+#define L2T(p, L) qdisc_l2t((p)->tcfp_R_tab, L)
+#define L2T_P(p, L) qdisc_l2t((p)->tcfp_P_tab, L)
#define POL_TAB_MASK 15
static struct tcf_common *tcf_police_ht[POL_TAB_MASK + 1];
@@ -37,8 +37,7 @@ static struct tcf_hashinfo police_hash_info = {
};
/* old policer structure from before tc actions */
-struct tc_police_compat
-{
+struct tc_police_compat {
u32 index;
int action;
u32 limit;
@@ -139,7 +138,7 @@ static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
static int tcf_act_police_locate(struct nlattr *nla, struct nlattr *est,
struct tc_action *a, int ovr, int bind)
{
- unsigned h;
+ unsigned int h;
int ret = 0, err;
struct nlattr *tb[TCA_POLICE_MAX + 1];
struct tc_police *parm;
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 7287cff7af3e..a34a22de60b3 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -47,7 +47,7 @@ static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result
/* print policy string followed by _ then packet count
* Example if this was the 3rd packet and the string was "hello"
* then it would look like "hello_3" (without quotes)
- **/
+ */
pr_info("simple: %s_%d\n",
(char *)d->tcfd_defdata, d->tcf_bstats.packets);
spin_unlock(&d->tcf_lock);
@@ -125,7 +125,7 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est,
pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind,
&simp_idx_gen, &simp_hash_info);
if (IS_ERR(pc))
- return PTR_ERR(pc);
+ return PTR_ERR(pc);
d = to_defact(pc);
ret = alloc_defdata(d, defdata);
@@ -149,7 +149,7 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est,
return ret;
}
-static inline int tcf_simp_cleanup(struct tc_action *a, int bind)
+static int tcf_simp_cleanup(struct tc_action *a, int bind)
{
struct tcf_defact *d = a->priv;
@@ -158,8 +158,8 @@ static inline int tcf_simp_cleanup(struct tc_action *a, int bind)
return 0;
}
-static inline int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
- int bind, int ref)
+static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
+ int bind, int ref)
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_defact *d = a->priv;
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 836f5fee9e58..5f6f0c7c3905 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -113,7 +113,7 @@ static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est,
pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind,
&skbedit_idx_gen, &skbedit_hash_info);
if (IS_ERR(pc))
- return PTR_ERR(pc);
+ return PTR_ERR(pc);
d = to_skbedit(pc);
ret = ACT_P_CREATED;
@@ -144,7 +144,7 @@ static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est,
return ret;
}
-static inline int tcf_skbedit_cleanup(struct tc_action *a, int bind)
+static int tcf_skbedit_cleanup(struct tc_action *a, int bind)
{
struct tcf_skbedit *d = a->priv;
@@ -153,8 +153,8 @@ static inline int tcf_skbedit_cleanup(struct tc_action *a, int bind)
return 0;
}
-static inline int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
- int bind, int ref)
+static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
+ int bind, int ref)
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_skbedit *d = a->priv;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 5fd0c28ef79a..bb2c523f8158 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -85,7 +85,7 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
int rc = -ENOENT;
write_lock(&cls_mod_lock);
- for (tp = &tcf_proto_base; (t=*tp) != NULL; tp = &t->next)
+ for (tp = &tcf_proto_base; (t = *tp) != NULL; tp = &t->next)
if (t == ops)
break;
@@ -111,7 +111,7 @@ static inline u32 tcf_auto_prio(struct tcf_proto *tp)
u32 first = TC_H_MAKE(0xC0000000U, 0U);
if (tp)
- first = tp->prio-1;
+ first = tp->prio - 1;
return first;
}
@@ -149,7 +149,8 @@ replay:
if (prio == 0) {
/* If no priority is given, user wants we allocated it. */
- if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE))
+ if (n->nlmsg_type != RTM_NEWTFILTER ||
+ !(n->nlmsg_flags & NLM_F_CREATE))
return -ENOENT;
prio = TC_H_MAKE(0x80000000U, 0U);
}
@@ -176,7 +177,8 @@ replay:
}
/* Is it classful? */
- if ((cops = q->ops->cl_ops) == NULL)
+ cops = q->ops->cl_ops;
+ if (!cops)
return -EINVAL;
if (cops->tcf_chain == NULL)
@@ -196,10 +198,11 @@ replay:
goto errout;
/* Check the chain for existence of proto-tcf with this priority */
- for (back = chain; (tp=*back) != NULL; back = &tp->next) {
+ for (back = chain; (tp = *back) != NULL; back = &tp->next) {
if (tp->prio >= prio) {
if (tp->prio == prio) {
- if (!nprio || (tp->protocol != protocol && protocol))
+ if (!nprio ||
+ (tp->protocol != protocol && protocol))
goto errout;
} else
tp = NULL;
@@ -216,7 +219,8 @@ replay:
goto errout;
err = -ENOENT;
- if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE))
+ if (n->nlmsg_type != RTM_NEWTFILTER ||
+ !(n->nlmsg_flags & NLM_F_CREATE))
goto errout;
@@ -420,7 +424,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
return skb->len;
- if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
+ dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+ if (!dev)
return skb->len;
if (!tcm->tcm_parent)
@@ -429,7 +434,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
if (!q)
goto out;
- if ((cops = q->ops->cl_ops) == NULL)
+ cops = q->ops->cl_ops;
+ if (!cops)
goto errout;
if (cops->tcf_chain == NULL)
goto errout;
@@ -444,8 +450,9 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
s_t = cb->args[0];
- for (tp=*chain, t=0; tp; tp = tp->next, t++) {
- if (t < s_t) continue;
+ for (tp = *chain, t = 0; tp; tp = tp->next, t++) {
+ if (t < s_t)
+ continue;
if (TC_H_MAJ(tcm->tcm_info) &&
TC_H_MAJ(tcm->tcm_info) != tp->prio)
continue;
@@ -468,10 +475,10 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
arg.skb = skb;
arg.cb = cb;
arg.w.stop = 0;
- arg.w.skip = cb->args[1]-1;
+ arg.w.skip = cb->args[1] - 1;
arg.w.count = 0;
tp->ops->walk(tp, &arg.w);
- cb->args[1] = arg.w.count+1;
+ cb->args[1] = arg.w.count + 1;
if (arg.w.stop)
break;
}
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index f23d9155b1ef..8be8872dd571 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -21,14 +21,12 @@
#include <net/act_api.h>
#include <net/pkt_cls.h>
-struct basic_head
-{
+struct basic_head {
u32 hgenerator;
struct list_head flist;
};
-struct basic_filter
-{
+struct basic_filter {
u32 handle;
struct tcf_exts exts;
struct tcf_ematch_tree ematches;
@@ -92,8 +90,7 @@ static int basic_init(struct tcf_proto *tp)
return 0;
}
-static inline void basic_delete_filter(struct tcf_proto *tp,
- struct basic_filter *f)
+static void basic_delete_filter(struct tcf_proto *tp, struct basic_filter *f)
{
tcf_unbind_filter(tp, &f->res);
tcf_exts_destroy(tp, &f->exts);
@@ -135,9 +132,9 @@ static const struct nla_policy basic_policy[TCA_BASIC_MAX + 1] = {
[TCA_BASIC_EMATCHES] = { .type = NLA_NESTED },
};
-static inline int basic_set_parms(struct tcf_proto *tp, struct basic_filter *f,
- unsigned long base, struct nlattr **tb,
- struct nlattr *est)
+static int basic_set_parms(struct tcf_proto *tp, struct basic_filter *f,
+ unsigned long base, struct nlattr **tb,
+ struct nlattr *est)
{
int err = -EINVAL;
struct tcf_exts e;
@@ -203,7 +200,7 @@ static int basic_change(struct tcf_proto *tp, unsigned long base, u32 handle,
} while (--i > 0 && basic_get(tp, head->hgenerator));
if (i <= 0) {
- printk(KERN_ERR "Insufficient number of handles\n");
+ pr_err("Insufficient number of handles\n");
goto errout;
}
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index d49c40fb7e09..32a335194ca5 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -56,7 +56,8 @@ static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
{
struct cgroup_cls_state *cs;
- if (!(cs = kzalloc(sizeof(*cs), GFP_KERNEL)))
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
+ if (!cs)
return ERR_PTR(-ENOMEM);
if (cgrp->parent)
@@ -94,8 +95,7 @@ static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files));
}
-struct cls_cgroup_head
-{
+struct cls_cgroup_head {
u32 handle;
struct tcf_exts exts;
struct tcf_ematch_tree ematches;
@@ -166,7 +166,7 @@ static int cls_cgroup_change(struct tcf_proto *tp, unsigned long base,
u32 handle, struct nlattr **tca,
unsigned long *arg)
{
- struct nlattr *tb[TCA_CGROUP_MAX+1];
+ struct nlattr *tb[TCA_CGROUP_MAX + 1];
struct cls_cgroup_head *head = tp->root;
struct tcf_ematch_tree t;
struct tcf_exts e;
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 5b271a18bc3a..8ec01391d988 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -121,7 +121,7 @@ static u32 flow_get_proto_src(struct sk_buff *skb)
if (!pskb_network_may_pull(skb, sizeof(*iph)))
break;
iph = ip_hdr(skb);
- if (iph->frag_off & htons(IP_MF|IP_OFFSET))
+ if (iph->frag_off & htons(IP_MF | IP_OFFSET))
break;
poff = proto_ports_offset(iph->protocol);
if (poff >= 0 &&
@@ -163,7 +163,7 @@ static u32 flow_get_proto_dst(struct sk_buff *skb)
if (!pskb_network_may_pull(skb, sizeof(*iph)))
break;
iph = ip_hdr(skb);
- if (iph->frag_off & htons(IP_MF|IP_OFFSET))
+ if (iph->frag_off & htons(IP_MF | IP_OFFSET))
break;
poff = proto_ports_offset(iph->protocol);
if (poff >= 0 &&
@@ -276,7 +276,7 @@ fallback:
static u32 flow_get_rtclassid(const struct sk_buff *skb)
{
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
if (skb_dst(skb))
return skb_dst(skb)->tclassid;
#endif
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 93b0a7b6f9b4..26e7bc4ffb79 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -31,14 +31,12 @@
#define HTSIZE (PAGE_SIZE/sizeof(struct fw_filter *))
-struct fw_head
-{
+struct fw_head {
struct fw_filter *ht[HTSIZE];
u32 mask;
};
-struct fw_filter
-{
+struct fw_filter {
struct fw_filter *next;
u32 id;
struct tcf_result res;
@@ -53,7 +51,7 @@ static const struct tcf_ext_map fw_ext_map = {
.police = TCA_FW_POLICE
};
-static __inline__ int fw_hash(u32 handle)
+static inline int fw_hash(u32 handle)
{
if (HTSIZE == 4096)
return ((handle >> 24) & 0xFFF) ^
@@ -82,14 +80,14 @@ static __inline__ int fw_hash(u32 handle)
static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp,
struct tcf_result *res)
{
- struct fw_head *head = (struct fw_head*)tp->root;
+ struct fw_head *head = (struct fw_head *)tp->root;
struct fw_filter *f;
int r;
u32 id = skb->mark;
if (head != NULL) {
id &= head->mask;
- for (f=head->ht[fw_hash(id)]; f; f=f->next) {
+ for (f = head->ht[fw_hash(id)]; f; f = f->next) {
if (f->id == id) {
*res = f->res;
#ifdef CONFIG_NET_CLS_IND
@@ -105,7 +103,8 @@ static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp,
}
} else {
/* old method */
- if (id && (TC_H_MAJ(id) == 0 || !(TC_H_MAJ(id^tp->q->handle)))) {
+ if (id && (TC_H_MAJ(id) == 0 ||
+ !(TC_H_MAJ(id ^ tp->q->handle)))) {
res->classid = id;
res->class = 0;
return 0;
@@ -117,13 +116,13 @@ static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp,
static unsigned long fw_get(struct tcf_proto *tp, u32 handle)
{
- struct fw_head *head = (struct fw_head*)tp->root;
+ struct fw_head *head = (struct fw_head *)tp->root;
struct fw_filter *f;
if (head == NULL)
return 0;
- for (f=head->ht[fw_hash(handle)]; f; f=f->next) {
+ for (f = head->ht[fw_hash(handle)]; f; f = f->next) {
if (f->id == handle)
return (unsigned long)f;
}
@@ -139,8 +138,7 @@ static int fw_init(struct tcf_proto *tp)
return 0;
}
-static inline void
-fw_delete_filter(struct tcf_proto *tp, struct fw_filter *f)
+static void fw_delete_filter(struct tcf_proto *tp, struct fw_filter *f)
{
tcf_unbind_filter(tp, &f->res);
tcf_exts_destroy(tp, &f->exts);
@@ -156,8 +154,8 @@ static void fw_destroy(struct tcf_proto *tp)
if (head == NULL)
return;
- for (h=0; h<HTSIZE; h++) {
- while ((f=head->ht[h]) != NULL) {
+ for (h = 0; h < HTSIZE; h++) {
+ while ((f = head->ht[h]) != NULL) {
head->ht[h] = f->next;
fw_delete_filter(tp, f);
}
@@ -167,14 +165,14 @@ static void fw_destroy(struct tcf_proto *tp)
static int fw_delete(struct tcf_proto *tp, unsigned long arg)
{
- struct fw_head *head = (struct fw_head*)tp->root;
- struct fw_filter *f = (struct fw_filter*)arg;
+ struct fw_head *head = (struct fw_head *)tp->root;
+ struct fw_filter *f = (struct fw_filter *)arg;
struct fw_filter **fp;
if (head == NULL || f == NULL)
goto out;
- for (fp=&head->ht[fw_hash(f->id)]; *fp; fp = &(*fp)->next) {
+ for (fp = &head->ht[fw_hash(f->id)]; *fp; fp = &(*fp)->next) {
if (*fp == f) {
tcf_tree_lock(tp);
*fp = f->next;
@@ -240,7 +238,7 @@ static int fw_change(struct tcf_proto *tp, unsigned long base,
struct nlattr **tca,
unsigned long *arg)
{
- struct fw_head *head = (struct fw_head*)tp->root;
+ struct fw_head *head = (struct fw_head *)tp->root;
struct fw_filter *f = (struct fw_filter *) *arg;
struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_FW_MAX + 1];
@@ -302,7 +300,7 @@ errout:
static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{
- struct fw_head *head = (struct fw_head*)tp->root;
+ struct fw_head *head = (struct fw_head *)tp->root;
int h;
if (head == NULL)
@@ -332,7 +330,7 @@ static int fw_dump(struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t)
{
struct fw_head *head = (struct fw_head *)tp->root;
- struct fw_filter *f = (struct fw_filter*)fh;
+ struct fw_filter *f = (struct fw_filter *)fh;
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 694dcd85dec8..d580cdfca093 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -23,34 +23,30 @@
#include <net/pkt_cls.h>
/*
- 1. For now we assume that route tags < 256.
- It allows to use direct table lookups, instead of hash tables.
- 2. For now we assume that "from TAG" and "fromdev DEV" statements
- are mutually exclusive.
- 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
+ * 1. For now we assume that route tags < 256.
+ * It allows to use direct table lookups, instead of hash tables.
+ * 2. For now we assume that "from TAG" and "fromdev DEV" statements
+ * are mutually exclusive.
+ * 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
*/
-struct route4_fastmap
-{
+struct route4_fastmap {
struct route4_filter *filter;
u32 id;
int iif;
};
-struct route4_head
-{
+struct route4_head {
struct route4_fastmap fastmap[16];
- struct route4_bucket *table[256+1];
+ struct route4_bucket *table[256 + 1];
};
-struct route4_bucket
-{
+struct route4_bucket {
/* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
- struct route4_filter *ht[16+16+1];
+ struct route4_filter *ht[16 + 16 + 1];
};
-struct route4_filter
-{
+struct route4_filter {
struct route4_filter *next;
u32 id;
int iif;
@@ -61,20 +57,20 @@ struct route4_filter
struct route4_bucket *bkt;
};
-#define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
+#define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
static const struct tcf_ext_map route_ext_map = {
.police = TCA_ROUTE4_POLICE,
.action = TCA_ROUTE4_ACT
};
-static __inline__ int route4_fastmap_hash(u32 id, int iif)
+static inline int route4_fastmap_hash(u32 id, int iif)
{
- return id&0xF;
+ return id & 0xF;
}
-static inline
-void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
+static void
+route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
{
spinlock_t *root_lock = qdisc_root_sleeping_lock(q);
@@ -83,32 +79,33 @@ void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
spin_unlock_bh(root_lock);
}
-static inline void
+static void
route4_set_fastmap(struct route4_head *head, u32 id, int iif,
struct route4_filter *f)
{
int h = route4_fastmap_hash(id, iif);
+
head->fastmap[h].id = id;
head->fastmap[h].iif = iif;
head->fastmap[h].filter = f;
}
-static __inline__ int route4_hash_to(u32 id)
+static inline int route4_hash_to(u32 id)
{
- return id&0xFF;
+ return id & 0xFF;
}
-static __inline__ int route4_hash_from(u32 id)
+static inline int route4_hash_from(u32 id)
{
- return (id>>16)&0xF;
+ return (id >> 16) & 0xF;
}
-static __inline__ int route4_hash_iif(int iif)
+static inline int route4_hash_iif(int iif)
{
- return 16 + ((iif>>16)&0xF);
+ return 16 + ((iif >> 16) & 0xF);
}
-static __inline__ int route4_hash_wild(void)
+static inline int route4_hash_wild(void)
{
return 32;
}
@@ -131,21 +128,22 @@ static __inline__ int route4_hash_wild(void)
static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
struct tcf_result *res)
{
- struct route4_head *head = (struct route4_head*)tp->root;
+ struct route4_head *head = (struct route4_head *)tp->root;
struct dst_entry *dst;
struct route4_bucket *b;
struct route4_filter *f;
u32 id, h;
int iif, dont_cache = 0;
- if ((dst = skb_dst(skb)) == NULL)
+ dst = skb_dst(skb);
+ if (!dst)
goto failure;
id = dst->tclassid;
if (head == NULL)
goto old_method;
- iif = ((struct rtable*)dst)->fl.iif;
+ iif = ((struct rtable *)dst)->fl.iif;
h = route4_fastmap_hash(id, iif);
if (id == head->fastmap[h].id &&
@@ -161,7 +159,8 @@ static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
h = route4_hash_to(id);
restart:
- if ((b = head->table[h]) != NULL) {
+ b = head->table[h];
+ if (b) {
for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
if (f->id == id)
ROUTE4_APPLY_RESULT();
@@ -197,8 +196,9 @@ old_method:
static inline u32 to_hash(u32 id)
{
- u32 h = id&0xFF;
- if (id&0x8000)
+ u32 h = id & 0xFF;
+
+ if (id & 0x8000)
h += 256;
return h;
}
@@ -211,17 +211,17 @@ static inline u32 from_hash(u32 id)
if (!(id & 0x8000)) {
if (id > 255)
return 256;
- return id&0xF;
+ return id & 0xF;
}
- return 16 + (id&0xF);
+ return 16 + (id & 0xF);
}
static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
{
- struct route4_head *head = (struct route4_head*)tp->root;
+ struct route4_head *head = (struct route4_head *)tp->root;
struct route4_bucket *b;
struct route4_filter *f;
- unsigned h1, h2;
+ unsigned int h1, h2;
if (!head)
return 0;
@@ -230,11 +230,12 @@ static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
if (h1 > 256)
return 0;
- h2 = from_hash(handle>>16);
+ h2 = from_hash(handle >> 16);
if (h2 > 32)
return 0;
- if ((b = head->table[h1]) != NULL) {
+ b = head->table[h1];
+ if (b) {
for (f = b->ht[h2]; f; f = f->next)
if (f->handle == handle)
return (unsigned long)f;
@@ -251,7 +252,7 @@ static int route4_init(struct tcf_proto *tp)
return 0;
}
-static inline void
+static void
route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
{
tcf_unbind_filter(tp, &f->res);
@@ -267,11 +268,12 @@ static void route4_destroy(struct tcf_proto *tp)
if (head == NULL)
return;
- for (h1=0; h1<=256; h1++) {
+ for (h1 = 0; h1 <= 256; h1++) {
struct route4_bucket *b;
- if ((b = head->table[h1]) != NULL) {
- for (h2=0; h2<=32; h2++) {
+ b = head->table[h1];
+ if (b) {
+ for (h2 = 0; h2 <= 32; h2++) {
struct route4_filter *f;
while ((f = b->ht[h2]) != NULL) {
@@ -287,9 +289,9 @@ static void route4_destroy(struct tcf_proto *tp)
static int route4_delete(struct tcf_proto *tp, unsigned long arg)
{
- struct route4_head *head = (struct route4_head*)tp->root;
- struct route4_filter **fp, *f = (struct route4_filter*)arg;
- unsigned h = 0;
+ struct route4_head *head = (struct route4_head *)tp->root;
+ struct route4_filter **fp, *f = (struct route4_filter *)arg;
+ unsigned int h = 0;
struct route4_bucket *b;
int i;
@@ -299,7 +301,7 @@ static int route4_delete(struct tcf_proto *tp, unsigned long arg)
h = f->handle;
b = f->bkt;
- for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
+ for (fp = &b->ht[from_hash(h >> 16)]; *fp; fp = &(*fp)->next) {
if (*fp == f) {
tcf_tree_lock(tp);
*fp = f->next;
@@ -310,7 +312,7 @@ static int route4_delete(struct tcf_proto *tp, unsigned long arg)
/* Strip tree */
- for (i=0; i<=32; i++)
+ for (i = 0; i <= 32; i++)
if (b->ht[i])
return 0;
@@ -380,7 +382,8 @@ static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
}
h1 = to_hash(nhandle);
- if ((b = head->table[h1]) == NULL) {
+ b = head->table[h1];
+ if (!b) {
err = -ENOBUFS;
b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
if (b == NULL)
@@ -391,6 +394,7 @@ static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
tcf_tree_unlock(tp);
} else {
unsigned int h2 = from_hash(nhandle >> 16);
+
err = -EEXIST;
for (fp = b->ht[h2]; fp; fp = fp->next)
if (fp->handle == f->handle)
@@ -444,7 +448,8 @@ static int route4_change(struct tcf_proto *tp, unsigned long base,
if (err < 0)
return err;
- if ((f = (struct route4_filter*)*arg) != NULL) {
+ f = (struct route4_filter *)*arg;
+ if (f) {
if (f->handle != handle && handle)
return -EINVAL;
@@ -481,7 +486,7 @@ static int route4_change(struct tcf_proto *tp, unsigned long base,
reinsert:
h = from_hash(f->handle >> 16);
- for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next)
+ for (fp = &f->bkt->ht[h]; (f1 = *fp) != NULL; fp = &f1->next)
if (f->handle < f1->handle)
break;
@@ -492,7 +497,8 @@ reinsert:
if (old_handle && f->handle != old_handle) {
th = to_hash(old_handle);
h = from_hash(old_handle >> 16);
- if ((b = head->table[th]) != NULL) {
+ b = head->table[th];
+ if (b) {
for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
if (*fp == f) {
*fp = f->next;
@@ -515,7 +521,7 @@ errout:
static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{
struct route4_head *head = tp->root;
- unsigned h, h1;
+ unsigned int h, h1;
if (head == NULL)
arg->stop = 1;
@@ -549,7 +555,7 @@ static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
static int route4_dump(struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t)
{
- struct route4_filter *f = (struct route4_filter*)fh;
+ struct route4_filter *f = (struct route4_filter *)fh;
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
u32 id;
@@ -563,15 +569,15 @@ static int route4_dump(struct tcf_proto *tp, unsigned long fh,
if (nest == NULL)
goto nla_put_failure;
- if (!(f->handle&0x8000)) {
- id = f->id&0xFF;
+ if (!(f->handle & 0x8000)) {
+ id = f->id & 0xFF;
NLA_PUT_U32(skb, TCA_ROUTE4_TO, id);
}
- if (f->handle&0x80000000) {
- if ((f->handle>>16) != 0xFFFF)
+ if (f->handle & 0x80000000) {
+ if ((f->handle >> 16) != 0xFFFF)
NLA_PUT_U32(skb, TCA_ROUTE4_IIF, f->iif);
} else {
- id = f->id>>16;
+ id = f->id >> 16;
NLA_PUT_U32(skb, TCA_ROUTE4_FROM, id);
}
if (f->res.classid)
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index 425a1790b048..402c44b241a3 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -66,28 +66,25 @@
powerful classification engine. */
-struct rsvp_head
-{
+struct rsvp_head {
u32 tmap[256/32];
u32 hgenerator;
u8 tgenerator;
struct rsvp_session *ht[256];
};
-struct rsvp_session
-{
+struct rsvp_session {
struct rsvp_session *next;
__be32 dst[RSVP_DST_LEN];
struct tc_rsvp_gpi dpi;
u8 protocol;
u8 tunnelid;
/* 16 (src,sport) hash slots, and one wildcard source slot */
- struct rsvp_filter *ht[16+1];
+ struct rsvp_filter *ht[16 + 1];
};
-struct rsvp_filter
-{
+struct rsvp_filter {
struct rsvp_filter *next;
__be32 src[RSVP_DST_LEN];
struct tc_rsvp_gpi spi;
@@ -100,17 +97,19 @@ struct rsvp_filter
struct rsvp_session *sess;
};
-static __inline__ unsigned hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
+static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
{
- unsigned h = (__force __u32)dst[RSVP_DST_LEN-1];
+ unsigned int h = (__force __u32)dst[RSVP_DST_LEN - 1];
+
h ^= h>>16;
h ^= h>>8;
return (h ^ protocol ^ tunnelid) & 0xFF;
}
-static __inline__ unsigned hash_src(__be32 *src)
+static inline unsigned int hash_src(__be32 *src)
{
- unsigned h = (__force __u32)src[RSVP_DST_LEN-1];
+ unsigned int h = (__force __u32)src[RSVP_DST_LEN-1];
+
h ^= h>>16;
h ^= h>>8;
h ^= h>>4;
@@ -134,10 +133,10 @@ static struct tcf_ext_map rsvp_ext_map = {
static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp,
struct tcf_result *res)
{
- struct rsvp_session **sht = ((struct rsvp_head*)tp->root)->ht;
+ struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht;
struct rsvp_session *s;
struct rsvp_filter *f;
- unsigned h1, h2;
+ unsigned int h1, h2;
__be32 *dst, *src;
u8 protocol;
u8 tunnelid = 0;
@@ -162,13 +161,13 @@ restart:
src = &nhptr->saddr.s6_addr32[0];
dst = &nhptr->daddr.s6_addr32[0];
protocol = nhptr->nexthdr;
- xprt = ((u8*)nhptr) + sizeof(struct ipv6hdr);
+ xprt = ((u8 *)nhptr) + sizeof(struct ipv6hdr);
#else
src = &nhptr->saddr;
dst = &nhptr->daddr;
protocol = nhptr->protocol;
- xprt = ((u8*)nhptr) + (nhptr->ihl<<2);
- if (nhptr->frag_off & htons(IP_MF|IP_OFFSET))
+ xprt = ((u8 *)nhptr) + (nhptr->ihl<<2);
+ if (nhptr->frag_off & htons(IP_MF | IP_OFFSET))
return -1;
#endif
@@ -176,10 +175,10 @@ restart:
h2 = hash_src(src);
for (s = sht[h1]; s; s = s->next) {
- if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
+ if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN - 1] &&
protocol == s->protocol &&
!(s->dpi.mask &
- (*(u32*)(xprt+s->dpi.offset)^s->dpi.key)) &&
+ (*(u32 *)(xprt + s->dpi.offset) ^ s->dpi.key)) &&
#if RSVP_DST_LEN == 4
dst[0] == s->dst[0] &&
dst[1] == s->dst[1] &&
@@ -188,8 +187,8 @@ restart:
tunnelid == s->tunnelid) {
for (f = s->ht[h2]; f; f = f->next) {
- if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN-1] &&
- !(f->spi.mask & (*(u32*)(xprt+f->spi.offset)^f->spi.key))
+ if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN - 1] &&
+ !(f->spi.mask & (*(u32 *)(xprt + f->spi.offset) ^ f->spi.key))
#if RSVP_DST_LEN == 4
&&
src[0] == f->src[0] &&
@@ -205,7 +204,7 @@ matched:
return 0;
tunnelid = f->res.classid;
- nhptr = (void*)(xprt + f->tunnelhdr - sizeof(*nhptr));
+ nhptr = (void *)(xprt + f->tunnelhdr - sizeof(*nhptr));
goto restart;
}
}
@@ -224,11 +223,11 @@ matched:
static unsigned long rsvp_get(struct tcf_proto *tp, u32 handle)
{
- struct rsvp_session **sht = ((struct rsvp_head*)tp->root)->ht;
+ struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht;
struct rsvp_session *s;
struct rsvp_filter *f;
- unsigned h1 = handle&0xFF;
- unsigned h2 = (handle>>8)&0xFF;
+ unsigned int h1 = handle & 0xFF;
+ unsigned int h2 = (handle >> 8) & 0xFF;
if (h2 > 16)
return 0;
@@ -258,7 +257,7 @@ static int rsvp_init(struct tcf_proto *tp)
return -ENOBUFS;
}
-static inline void
+static void
rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
{
tcf_unbind_filter(tp, &f->res);
@@ -277,13 +276,13 @@ static void rsvp_destroy(struct tcf_proto *tp)
sht = data->ht;
- for (h1=0; h1<256; h1++) {
+ for (h1 = 0; h1 < 256; h1++) {
struct rsvp_session *s;
while ((s = sht[h1]) != NULL) {
sht[h1] = s->next;
- for (h2=0; h2<=16; h2++) {
+ for (h2 = 0; h2 <= 16; h2++) {
struct rsvp_filter *f;
while ((f = s->ht[h2]) != NULL) {
@@ -299,13 +298,13 @@ static void rsvp_destroy(struct tcf_proto *tp)
static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
{
- struct rsvp_filter **fp, *f = (struct rsvp_filter*)arg;
- unsigned h = f->handle;
+ struct rsvp_filter **fp, *f = (struct rsvp_filter *)arg;
+ unsigned int h = f->handle;
struct rsvp_session **sp;
struct rsvp_session *s = f->sess;
int i;
- for (fp = &s->ht[(h>>8)&0xFF]; *fp; fp = &(*fp)->next) {
+ for (fp = &s->ht[(h >> 8) & 0xFF]; *fp; fp = &(*fp)->next) {
if (*fp == f) {
tcf_tree_lock(tp);
*fp = f->next;
@@ -314,12 +313,12 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
/* Strip tree */
- for (i=0; i<=16; i++)
+ for (i = 0; i <= 16; i++)
if (s->ht[i])
return 0;
/* OK, session has no flows */
- for (sp = &((struct rsvp_head*)tp->root)->ht[h&0xFF];
+ for (sp = &((struct rsvp_head *)tp->root)->ht[h & 0xFF];
*sp; sp = &(*sp)->next) {
if (*sp == s) {
tcf_tree_lock(tp);
@@ -337,13 +336,14 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
return 0;
}
-static unsigned gen_handle(struct tcf_proto *tp, unsigned salt)
+static unsigned int gen_handle(struct tcf_proto *tp, unsigned salt)
{
struct rsvp_head *data = tp->root;
int i = 0xFFFF;
while (i-- > 0) {
u32 h;
+
if ((data->hgenerator += 0x10000) == 0)
data->hgenerator = 0x10000;
h = data->hgenerator|salt;
@@ -355,10 +355,10 @@ static unsigned gen_handle(struct tcf_proto *tp, unsigned salt)
static int tunnel_bts(struct rsvp_head *data)
{
- int n = data->tgenerator>>5;
- u32 b = 1<<(data->tgenerator&0x1F);
+ int n = data->tgenerator >> 5;
+ u32 b = 1 << (data->tgenerator & 0x1F);
- if (data->tmap[n]&b)
+ if (data->tmap[n] & b)
return 0;
data->tmap[n] |= b;
return 1;
@@ -372,10 +372,10 @@ static void tunnel_recycle(struct rsvp_head *data)
memset(tmap, 0, sizeof(tmap));
- for (h1=0; h1<256; h1++) {
+ for (h1 = 0; h1 < 256; h1++) {
struct rsvp_session *s;
for (s = sht[h1]; s; s = s->next) {
- for (h2=0; h2<=16; h2++) {
+ for (h2 = 0; h2 <= 16; h2++) {
struct rsvp_filter *f;
for (f = s->ht[h2]; f; f = f->next) {
@@ -395,8 +395,8 @@ static u32 gen_tunnel(struct rsvp_head *data)
{
int i, k;
- for (k=0; k<2; k++) {
- for (i=255; i>0; i--) {
+ for (k = 0; k < 2; k++) {
+ for (i = 255; i > 0; i--) {
if (++data->tgenerator == 0)
data->tgenerator = 1;
if (tunnel_bts(data))
@@ -428,7 +428,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
struct nlattr *opt = tca[TCA_OPTIONS-1];
struct nlattr *tb[TCA_RSVP_MAX + 1];
struct tcf_exts e;
- unsigned h1, h2;
+ unsigned int h1, h2;
__be32 *dst;
int err;
@@ -443,7 +443,8 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
if (err < 0)
return err;
- if ((f = (struct rsvp_filter*)*arg) != NULL) {
+ f = (struct rsvp_filter *)*arg;
+ if (f) {
/* Node exists: adjust only classid */
if (f->handle != handle && handle)
@@ -500,7 +501,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
goto errout;
}
- for (sp = &data->ht[h1]; (s=*sp) != NULL; sp = &s->next) {
+ for (sp = &data->ht[h1]; (s = *sp) != NULL; sp = &s->next) {
if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
pinfo && pinfo->protocol == s->protocol &&
memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 &&
@@ -523,7 +524,7 @@ insert:
tcf_exts_change(tp, &f->exts, &e);
for (fp = &s->ht[h2]; *fp; fp = &(*fp)->next)
- if (((*fp)->spi.mask&f->spi.mask) != f->spi.mask)
+ if (((*fp)->spi.mask & f->spi.mask) != f->spi.mask)
break;
f->next = *fp;
wmb();
@@ -567,7 +568,7 @@ errout2:
static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{
struct rsvp_head *head = tp->root;
- unsigned h, h1;
+ unsigned int h, h1;
if (arg->stop)
return;
@@ -598,7 +599,7 @@ static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg)
static int rsvp_dump(struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t)
{
- struct rsvp_filter *f = (struct rsvp_filter*)fh;
+ struct rsvp_filter *f = (struct rsvp_filter *)fh;
struct rsvp_session *s;
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
@@ -624,7 +625,7 @@ static int rsvp_dump(struct tcf_proto *tp, unsigned long fh,
NLA_PUT(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo);
if (f->res.classid)
NLA_PUT_U32(skb, TCA_RSVP_CLASSID, f->res.classid);
- if (((f->handle>>8)&0xFF) != 16)
+ if (((f->handle >> 8) & 0xFF) != 16)
NLA_PUT(skb, TCA_RSVP_SRC, sizeof(f->src), f->src);
if (tcf_exts_dump(skb, &f->exts, &rsvp_ext_map) < 0)
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 20ef330bb918..36667fa64237 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -249,7 +249,7 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle,
* of the hashing index is below the threshold.
*/
if ((cp.mask >> cp.shift) < PERFECT_HASH_THRESHOLD)
- cp.hash = (cp.mask >> cp.shift)+1;
+ cp.hash = (cp.mask >> cp.shift) + 1;
else
cp.hash = DEFAULT_HASH_SIZE;
}
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index b0c2a82178af..3b93fc0c8955 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -42,8 +42,7 @@
#include <net/act_api.h>
#include <net/pkt_cls.h>
-struct tc_u_knode
-{
+struct tc_u_knode {
struct tc_u_knode *next;
u32 handle;
struct tc_u_hnode *ht_up;
@@ -63,19 +62,17 @@ struct tc_u_knode
struct tc_u32_sel sel;
};
-struct tc_u_hnode
-{
+struct tc_u_hnode {
struct tc_u_hnode *next;
u32 handle;
u32 prio;
struct tc_u_common *tp_c;
int refcnt;
- unsigned divisor;
+ unsigned int divisor;
struct tc_u_knode *ht[1];
};
-struct tc_u_common
-{
+struct tc_u_common {
struct tc_u_hnode *hlist;
struct Qdisc *q;
int refcnt;
@@ -87,9 +84,11 @@ static const struct tcf_ext_map u32_ext_map = {
.police = TCA_U32_POLICE
};
-static __inline__ unsigned u32_hash_fold(__be32 key, struct tc_u32_sel *sel, u8 fshift)
+static inline unsigned int u32_hash_fold(__be32 key,
+ const struct tc_u32_sel *sel,
+ u8 fshift)
{
- unsigned h = ntohl(key & sel->hmask)>>fshift;
+ unsigned int h = ntohl(key & sel->hmask) >> fshift;
return h;
}
@@ -101,7 +100,7 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
unsigned int off;
} stack[TC_U32_MAXDEPTH];
- struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root;
+ struct tc_u_hnode *ht = (struct tc_u_hnode *)tp->root;
unsigned int off = skb_network_offset(skb);
struct tc_u_knode *n;
int sdepth = 0;
@@ -120,7 +119,7 @@ next_knode:
struct tc_u32_key *key = n->sel.keys;
#ifdef CONFIG_CLS_U32_PERF
- n->pf->rcnt +=1;
+ n->pf->rcnt += 1;
j = 0;
#endif
@@ -133,14 +132,14 @@ next_knode:
}
#endif
- for (i = n->sel.nkeys; i>0; i--, key++) {
+ for (i = n->sel.nkeys; i > 0; i--, key++) {
int toff = off + key->off + (off2 & key->offmask);
- __be32 *data, _data;
+ __be32 *data, hdata;
if (skb_headroom(skb) + toff > INT_MAX)
goto out;
- data = skb_header_pointer(skb, toff, 4, &_data);
+ data = skb_header_pointer(skb, toff, 4, &hdata);
if (!data)
goto out;
if ((*data ^ key->val) & key->mask) {
@@ -148,13 +147,13 @@ next_knode:
goto next_knode;
}
#ifdef CONFIG_CLS_U32_PERF
- n->pf->kcnts[j] +=1;
+ n->pf->kcnts[j] += 1;
j++;
#endif
}
if (n->ht_down == NULL) {
check_terminal:
- if (n->sel.flags&TC_U32_TERMINAL) {
+ if (n->sel.flags & TC_U32_TERMINAL) {
*res = n->res;
#ifdef CONFIG_NET_CLS_IND
@@ -164,7 +163,7 @@ check_terminal:
}
#endif
#ifdef CONFIG_CLS_U32_PERF
- n->pf->rhit +=1;
+ n->pf->rhit += 1;
#endif
r = tcf_exts_exec(skb, &n->exts, res);
if (r < 0) {
@@ -188,26 +187,26 @@ check_terminal:
ht = n->ht_down;
sel = 0;
if (ht->divisor) {
- __be32 *data, _data;
+ __be32 *data, hdata;
data = skb_header_pointer(skb, off + n->sel.hoff, 4,
- &_data);
+ &hdata);
if (!data)
goto out;
sel = ht->divisor & u32_hash_fold(*data, &n->sel,
n->fshift);
}
- if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT)))
+ if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
goto next_ht;
- if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) {
+ if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
off2 = n->sel.off + 3;
if (n->sel.flags & TC_U32_VAROFFSET) {
- __be16 *data, _data;
+ __be16 *data, hdata;
data = skb_header_pointer(skb,
off + n->sel.offoff,
- 2, &_data);
+ 2, &hdata);
if (!data)
goto out;
off2 += ntohs(n->sel.offmask & *data) >>
@@ -215,7 +214,7 @@ check_terminal:
}
off2 &= ~3;
}
- if (n->sel.flags&TC_U32_EAT) {
+ if (n->sel.flags & TC_U32_EAT) {
off += off2;
off2 = 0;
}
@@ -236,11 +235,11 @@ out:
deadloop:
if (net_ratelimit())
- printk(KERN_WARNING "cls_u32: dead loop\n");
+ pr_warning("cls_u32: dead loop\n");
return -1;
}
-static __inline__ struct tc_u_hnode *
+static struct tc_u_hnode *
u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
{
struct tc_u_hnode *ht;
@@ -252,10 +251,10 @@ u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
return ht;
}
-static __inline__ struct tc_u_knode *
+static struct tc_u_knode *
u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
{
- unsigned sel;
+ unsigned int sel;
struct tc_u_knode *n = NULL;
sel = TC_U32_HASH(handle);
@@ -300,7 +299,7 @@ static u32 gen_new_htid(struct tc_u_common *tp_c)
do {
if (++tp_c->hgenerator == 0x7FF)
tp_c->hgenerator = 1;
- } while (--i>0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
+ } while (--i > 0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0;
}
@@ -378,9 +377,9 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key)
static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
{
struct tc_u_knode *n;
- unsigned h;
+ unsigned int h;
- for (h=0; h<=ht->divisor; h++) {
+ for (h = 0; h <= ht->divisor; h++) {
while ((n = ht->ht[h]) != NULL) {
ht->ht[h] = n->next;
@@ -446,13 +445,13 @@ static void u32_destroy(struct tcf_proto *tp)
static int u32_delete(struct tcf_proto *tp, unsigned long arg)
{
- struct tc_u_hnode *ht = (struct tc_u_hnode*)arg;
+ struct tc_u_hnode *ht = (struct tc_u_hnode *)arg;
if (ht == NULL)
return 0;
if (TC_U32_KEY(ht->handle))
- return u32_delete_key(tp, (struct tc_u_knode*)ht);
+ return u32_delete_key(tp, (struct tc_u_knode *)ht);
if (tp->root == ht)
return -EINVAL;
@@ -470,14 +469,14 @@ static int u32_delete(struct tcf_proto *tp, unsigned long arg)
static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
{
struct tc_u_knode *n;
- unsigned i = 0x7FF;
+ unsigned int i = 0x7FF;
- for (n=ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
+ for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
if (i < TC_U32_NODE(n->handle))
i = TC_U32_NODE(n->handle);
i++;
- return handle|(i>0xFFF ? 0xFFF : i);
+ return handle | (i > 0xFFF ? 0xFFF : i);
}
static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
@@ -566,7 +565,8 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
if (err < 0)
return err;
- if ((n = (struct tc_u_knode*)*arg) != NULL) {
+ n = (struct tc_u_knode *)*arg;
+ if (n) {
if (TC_U32_KEY(n->handle) == 0)
return -EINVAL;
@@ -574,7 +574,7 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
}
if (tb[TCA_U32_DIVISOR]) {
- unsigned divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
+ unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
if (--divisor > 0x100)
return -EINVAL;
@@ -585,7 +585,7 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
if (handle == 0)
return -ENOMEM;
}
- ht = kzalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL);
+ ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL);
if (ht == NULL)
return -ENOBUFS;
ht->tp_c = tp_c;
@@ -683,7 +683,7 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode *ht;
struct tc_u_knode *n;
- unsigned h;
+ unsigned int h;
if (arg->stop)
return;
@@ -717,7 +717,7 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
static int u32_dump(struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t)
{
- struct tc_u_knode *n = (struct tc_u_knode*)fh;
+ struct tc_u_knode *n = (struct tc_u_knode *)fh;
struct nlattr *nest;
if (n == NULL)
@@ -730,8 +730,9 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh,
goto nla_put_failure;
if (TC_U32_KEY(n->handle) == 0) {
- struct tc_u_hnode *ht = (struct tc_u_hnode*)fh;
- u32 divisor = ht->divisor+1;
+ struct tc_u_hnode *ht = (struct tc_u_hnode *)fh;
+ u32 divisor = ht->divisor + 1;
+
NLA_PUT_U32(skb, TCA_U32_DIVISOR, divisor);
} else {
NLA_PUT(skb, TCA_U32_SEL,
@@ -755,7 +756,7 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh,
goto nla_put_failure;
#ifdef CONFIG_NET_CLS_IND
- if(strlen(n->indev))
+ if (strlen(n->indev))
NLA_PUT_STRING(skb, TCA_U32_INDEV, n->indev);
#endif
#ifdef CONFIG_CLS_U32_PERF
diff --git a/net/sched/em_cmp.c b/net/sched/em_cmp.c
index bc450397487a..1c8360a2752a 100644
--- a/net/sched/em_cmp.c
+++ b/net/sched/em_cmp.c
@@ -33,40 +33,41 @@ static int em_cmp_match(struct sk_buff *skb, struct tcf_ematch *em,
return 0;
switch (cmp->align) {
- case TCF_EM_ALIGN_U8:
- val = *ptr;
- break;
+ case TCF_EM_ALIGN_U8:
+ val = *ptr;
+ break;
- case TCF_EM_ALIGN_U16:
- val = get_unaligned_be16(ptr);
+ case TCF_EM_ALIGN_U16:
+ val = get_unaligned_be16(ptr);
- if (cmp_needs_transformation(cmp))
- val = be16_to_cpu(val);
- break;
+ if (cmp_needs_transformation(cmp))
+ val = be16_to_cpu(val);
+ break;
- case TCF_EM_ALIGN_U32:
- /* Worth checking boundries? The branching seems
- * to get worse. Visit again. */
- val = get_unaligned_be32(ptr);
+ case TCF_EM_ALIGN_U32:
+ /* Worth checking boundries? The branching seems
+ * to get worse. Visit again.
+ */
+ val = get_unaligned_be32(ptr);
- if (cmp_needs_transformation(cmp))
- val = be32_to_cpu(val);
- break;
+ if (cmp_needs_transformation(cmp))
+ val = be32_to_cpu(val);
+ break;
- default:
- return 0;
+ default:
+ return 0;
}
if (cmp->mask)
val &= cmp->mask;
switch (cmp->opnd) {
- case TCF_EM_OPND_EQ:
- return val == cmp->val;
- case TCF_EM_OPND_LT:
- return val < cmp->val;
- case TCF_EM_OPND_GT:
- return val > cmp->val;
+ case TCF_EM_OPND_EQ:
+ return val == cmp->val;
+ case TCF_EM_OPND_LT:
+ return val < cmp->val;
+ case TCF_EM_OPND_GT:
+ return val > cmp->val;
}
return 0;
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 34da5e29ea1a..e5e174782677 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -73,21 +73,18 @@
#include <net/pkt_cls.h>
#include <net/sock.h>
-struct meta_obj
-{
+struct meta_obj {
unsigned long value;
unsigned int len;
};
-struct meta_value
-{
+struct meta_value {
struct tcf_meta_val hdr;
unsigned long val;
unsigned int len;
};
-struct meta_match
-{
+struct meta_match {
struct meta_value lvalue;
struct meta_value rvalue;
};
@@ -255,7 +252,7 @@ META_COLLECTOR(int_rtclassid)
if (unlikely(skb_dst(skb) == NULL))
*err = -1;
else
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
dst->value = skb_dst(skb)->tclassid;
#else
dst->value = 0;
@@ -404,7 +401,7 @@ META_COLLECTOR(int_sk_sndbuf)
META_COLLECTOR(int_sk_alloc)
{
SKIP_NONLOCAL(skb);
- dst->value = skb->sk->sk_allocation;
+ dst->value = (__force int) skb->sk->sk_allocation;
}
META_COLLECTOR(int_sk_route_caps)
@@ -483,8 +480,7 @@ META_COLLECTOR(int_sk_write_pend)
* Meta value collectors assignment table
**************************************************************************/
-struct meta_ops
-{
+struct meta_ops {
void (*get)(struct sk_buff *, struct tcf_pkt_info *,
struct meta_value *, struct meta_obj *, int *);
};
@@ -494,7 +490,7 @@ struct meta_ops
/* Meta value operations table listing all meta value collectors and
* assigns them to a type and meta id. */
-static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = {
+static struct meta_ops __meta_ops[TCF_META_TYPE_MAX + 1][TCF_META_ID_MAX + 1] = {
[TCF_META_TYPE_VAR] = {
[META_ID(DEV)] = META_FUNC(var_dev),
[META_ID(SK_BOUND_IF)] = META_FUNC(var_sk_bound_if),
@@ -550,7 +546,7 @@ static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = {
}
};
-static inline struct meta_ops * meta_ops(struct meta_value *val)
+static inline struct meta_ops *meta_ops(struct meta_value *val)
{
return &__meta_ops[meta_type(val)][meta_id(val)];
}
@@ -649,9 +645,8 @@ static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
{
if (v->len == sizeof(unsigned long))
NLA_PUT(skb, tlv, sizeof(unsigned long), &v->val);
- else if (v->len == sizeof(u32)) {
+ else if (v->len == sizeof(u32))
NLA_PUT_U32(skb, tlv, v->val);
- }
return 0;
@@ -663,8 +658,7 @@ nla_put_failure:
* Type specific operations table
**************************************************************************/
-struct meta_type_ops
-{
+struct meta_type_ops {
void (*destroy)(struct meta_value *);
int (*compare)(struct meta_obj *, struct meta_obj *);
int (*change)(struct meta_value *, struct nlattr *);
@@ -672,7 +666,7 @@ struct meta_type_ops
int (*dump)(struct sk_buff *, struct meta_value *, int);
};
-static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX+1] = {
+static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = {
[TCF_META_TYPE_VAR] = {
.destroy = meta_var_destroy,
.compare = meta_var_compare,
@@ -688,7 +682,7 @@ static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX+1] = {
}
};
-static inline struct meta_type_ops * meta_type_ops(struct meta_value *v)
+static inline struct meta_type_ops *meta_type_ops(struct meta_value *v)
{
return &__meta_type_ops[meta_type(v)];
}
@@ -713,7 +707,7 @@ static int meta_get(struct sk_buff *skb, struct tcf_pkt_info *info,
return err;
if (meta_type_ops(v)->apply_extras)
- meta_type_ops(v)->apply_extras(v, dst);
+ meta_type_ops(v)->apply_extras(v, dst);
return 0;
}
@@ -732,12 +726,12 @@ static int em_meta_match(struct sk_buff *skb, struct tcf_ematch *m,
r = meta_type_ops(&meta->lvalue)->compare(&l_value, &r_value);
switch (meta->lvalue.hdr.op) {
- case TCF_EM_OPND_EQ:
- return !r;
- case TCF_EM_OPND_LT:
- return r < 0;
- case TCF_EM_OPND_GT:
- return r > 0;
+ case TCF_EM_OPND_EQ:
+ return !r;
+ case TCF_EM_OPND_LT:
+ return r < 0;
+ case TCF_EM_OPND_GT:
+ return r > 0;
}
return 0;
@@ -771,7 +765,7 @@ static inline int meta_change_data(struct meta_value *dst, struct nlattr *nla)
static inline int meta_is_supported(struct meta_value *val)
{
- return (!meta_id(val) || meta_ops(val)->get);
+ return !meta_id(val) || meta_ops(val)->get;
}
static const struct nla_policy meta_policy[TCA_EM_META_MAX + 1] = {
diff --git a/net/sched/em_nbyte.c b/net/sched/em_nbyte.c
index 1a4176aee6e5..a3bed07a008b 100644
--- a/net/sched/em_nbyte.c
+++ b/net/sched/em_nbyte.c
@@ -18,8 +18,7 @@
#include <linux/tc_ematch/tc_em_nbyte.h>
#include <net/pkt_cls.h>
-struct nbyte_data
-{
+struct nbyte_data {
struct tcf_em_nbyte hdr;
char pattern[0];
};
diff --git a/net/sched/em_text.c b/net/sched/em_text.c
index ea8f566e720c..15d353d2e4be 100644
--- a/net/sched/em_text.c
+++ b/net/sched/em_text.c
@@ -19,8 +19,7 @@
#include <linux/tc_ematch/tc_em_text.h>
#include <net/pkt_cls.h>
-struct text_match
-{
+struct text_match {
u16 from_offset;
u16 to_offset;
u8 from_layer;
diff --git a/net/sched/em_u32.c b/net/sched/em_u32.c
index 953f1479f7da..797bdb88c010 100644
--- a/net/sched/em_u32.c
+++ b/net/sched/em_u32.c
@@ -35,7 +35,7 @@ static int em_u32_match(struct sk_buff *skb, struct tcf_ematch *em,
if (!tcf_valid_offset(skb, ptr, sizeof(u32)))
return 0;
- return !(((*(__be32*) ptr) ^ key->val) & key->mask);
+ return !(((*(__be32 *) ptr) ^ key->val) & key->mask);
}
static struct tcf_ematch_ops em_u32_ops = {
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index 5e37da961f80..88d93eb92507 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -93,7 +93,7 @@
static LIST_HEAD(ematch_ops);
static DEFINE_RWLOCK(ematch_mod_lock);
-static inline struct tcf_ematch_ops * tcf_em_lookup(u16 kind)
+static struct tcf_ematch_ops *tcf_em_lookup(u16 kind)
{
struct tcf_ematch_ops *e = NULL;
@@ -163,8 +163,8 @@ void tcf_em_unregister(struct tcf_ematch_ops *ops)
}
EXPORT_SYMBOL(tcf_em_unregister);
-static inline struct tcf_ematch * tcf_em_get_match(struct tcf_ematch_tree *tree,
- int index)
+static inline struct tcf_ematch *tcf_em_get_match(struct tcf_ematch_tree *tree,
+ int index)
{
return &tree->matches[index];
}
@@ -184,7 +184,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
if (em_hdr->kind == TCF_EM_CONTAINER) {
/* Special ematch called "container", carries an index
- * referencing an external ematch sequence. */
+ * referencing an external ematch sequence.
+ */
u32 ref;
if (data_len < sizeof(ref))
@@ -195,7 +196,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
goto errout;
/* We do not allow backward jumps to avoid loops and jumps
- * to our own position are of course illegal. */
+ * to our own position are of course illegal.
+ */
if (ref <= idx)
goto errout;
@@ -208,7 +210,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
* which automatically releases the reference again, therefore
* the module MUST not be given back under any circumstances
* here. Be aware, the destroy function assumes that the
- * module is held if the ops field is non zero. */
+ * module is held if the ops field is non zero.
+ */
em->ops = tcf_em_lookup(em_hdr->kind);
if (em->ops == NULL) {
@@ -221,7 +224,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
if (em->ops) {
/* We dropped the RTNL mutex in order to
* perform the module load. Tell the caller
- * to replay the request. */
+ * to replay the request.
+ */
module_put(em->ops->owner);
err = -EAGAIN;
}
@@ -230,7 +234,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
}
/* ematch module provides expected length of data, so we
- * can do a basic sanity check. */
+ * can do a basic sanity check.
+ */
if (em->ops->datalen && data_len < em->ops->datalen)
goto errout;
@@ -246,7 +251,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
* TCF_EM_SIMPLE may be specified stating that the
* data only consists of a u32 integer and the module
* does not expected a memory reference but rather
- * the value carried. */
+ * the value carried.
+ */
if (em_hdr->flags & TCF_EM_SIMPLE) {
if (data_len < sizeof(u32))
goto errout;
@@ -334,7 +340,8 @@ int tcf_em_tree_validate(struct tcf_proto *tp, struct nlattr *nla,
* The array of rt attributes is parsed in the order as they are
* provided, their type must be incremental from 1 to n. Even
* if it does not serve any real purpose, a failure of sticking
- * to this policy will result in parsing failure. */
+ * to this policy will result in parsing failure.
+ */
for (idx = 0; nla_ok(rt_match, list_len); idx++) {
err = -EINVAL;
@@ -359,7 +366,8 @@ int tcf_em_tree_validate(struct tcf_proto *tp, struct nlattr *nla,
/* Check if the number of matches provided by userspace actually
* complies with the array of matches. The number was used for
* the validation of references and a mismatch could lead to
- * undefined references during the matching process. */
+ * undefined references during the matching process.
+ */
if (idx != tree_hdr->nmatches) {
err = -EINVAL;
goto errout_abort;
@@ -449,7 +457,7 @@ int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv)
.flags = em->flags
};
- NLA_PUT(skb, i+1, sizeof(em_hdr), &em_hdr);
+ NLA_PUT(skb, i + 1, sizeof(em_hdr), &em_hdr);
if (em->ops && em->ops->dump) {
if (em->ops->dump(skb, em) < 0)
@@ -478,6 +486,7 @@ static inline int tcf_em_match(struct sk_buff *skb, struct tcf_ematch *em,
struct tcf_pkt_info *info)
{
int r = em->ops->match(skb, em, info);
+
return tcf_em_is_inverted(em) ? !r : r;
}
@@ -527,8 +536,8 @@ pop_stack:
stack_overflow:
if (net_ratelimit())
- printk(KERN_WARNING "tc ematch: local stack overflow,"
- " increase NET_EMATCH_STACK\n");
+ pr_warning("tc ematch: local stack overflow,"
+ " increase NET_EMATCH_STACK\n");
return -1;
}
EXPORT_SYMBOL(__tcf_em_tree_match);
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index b22ca2d1cebc..7490f3f2db8b 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -187,7 +187,7 @@ int unregister_qdisc(struct Qdisc_ops *qops)
int err = -ENOENT;
write_lock(&qdisc_mod_lock);
- for (qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next)
+ for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
if (q == qops)
break;
if (q) {
@@ -321,7 +321,9 @@ void qdisc_put_rtab(struct qdisc_rate_table *tab)
if (!tab || --tab->refcnt)
return;
- for (rtabp = &qdisc_rtab_list; (rtab=*rtabp) != NULL; rtabp = &rtab->next) {
+ for (rtabp = &qdisc_rtab_list;
+ (rtab = *rtabp) != NULL;
+ rtabp = &rtab->next) {
if (rtab == tab) {
*rtabp = rtab->next;
kfree(rtab);
@@ -396,6 +398,11 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
return stab;
}
+static void stab_kfree_rcu(struct rcu_head *head)
+{
+ kfree(container_of(head, struct qdisc_size_table, rcu));
+}
+
void qdisc_put_stab(struct qdisc_size_table *tab)
{
if (!tab)
@@ -405,7 +412,7 @@ void qdisc_put_stab(struct qdisc_size_table *tab)
if (--tab->refcnt == 0) {
list_del(&tab->list);
- kfree(tab);
+ call_rcu_bh(&tab->rcu, stab_kfree_rcu);
}
spin_unlock(&qdisc_stab_lock);
@@ -428,7 +435,7 @@ nla_put_failure:
return -1;
}
-void qdisc_calculate_pkt_len(struct sk_buff *skb, struct qdisc_size_table *stab)
+void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab)
{
int pkt_len, slot;
@@ -454,14 +461,13 @@ out:
pkt_len = 1;
qdisc_skb_cb(skb)->pkt_len = pkt_len;
}
-EXPORT_SYMBOL(qdisc_calculate_pkt_len);
+EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc)
{
if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
- printk(KERN_WARNING
- "%s: %s qdisc %X: is non-work-conserving?\n",
- txt, qdisc->ops->id, qdisc->handle >> 16);
+ pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
+ txt, qdisc->ops->id, qdisc->handle >> 16);
qdisc->flags |= TCQ_F_WARN_NONWC;
}
}
@@ -472,7 +478,7 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
timer);
- wd->qdisc->flags &= ~TCQ_F_THROTTLED;
+ qdisc_unthrottled(wd->qdisc);
__netif_schedule(qdisc_root(wd->qdisc));
return HRTIMER_NORESTART;
@@ -494,7 +500,7 @@ void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
&qdisc_root_sleeping(wd->qdisc)->state))
return;
- wd->qdisc->flags |= TCQ_F_THROTTLED;
+ qdisc_throttled(wd->qdisc);
time = ktime_set(0, 0);
time = ktime_add_ns(time, PSCHED_TICKS2NS(expires));
hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
@@ -504,7 +510,7 @@ EXPORT_SYMBOL(qdisc_watchdog_schedule);
void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
{
hrtimer_cancel(&wd->timer);
- wd->qdisc->flags &= ~TCQ_F_THROTTLED;
+ qdisc_unthrottled(wd->qdisc);
}
EXPORT_SYMBOL(qdisc_watchdog_cancel);
@@ -625,7 +631,7 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
autohandle = TC_H_MAKE(0x80000000U, 0);
} while (qdisc_lookup(dev, autohandle) && --i > 0);
- return i>0 ? autohandle : 0;
+ return i > 0 ? autohandle : 0;
}
void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
@@ -834,7 +840,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
err = PTR_ERR(stab);
goto err_out4;
}
- sch->stab = stab;
+ rcu_assign_pointer(sch->stab, stab);
}
if (tca[TCA_RATE]) {
spinlock_t *root_lock;
@@ -874,7 +880,7 @@ err_out4:
* Any broken qdiscs that would require a ops->reset() here?
* The qdisc was never in action so it shouldn't be necessary.
*/
- qdisc_put_stab(sch->stab);
+ qdisc_put_stab(rtnl_dereference(sch->stab));
if (ops->destroy)
ops->destroy(sch);
goto err_out3;
@@ -882,7 +888,7 @@ err_out4:
static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
{
- struct qdisc_size_table *stab = NULL;
+ struct qdisc_size_table *ostab, *stab = NULL;
int err = 0;
if (tca[TCA_OPTIONS]) {
@@ -899,8 +905,9 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
return PTR_ERR(stab);
}
- qdisc_put_stab(sch->stab);
- sch->stab = stab;
+ ostab = rtnl_dereference(sch->stab);
+ rcu_assign_pointer(sch->stab, stab);
+ qdisc_put_stab(ostab);
if (tca[TCA_RATE]) {
/* NB: ignores errors from replace_estimator
@@ -915,9 +922,8 @@ out:
return 0;
}
-struct check_loop_arg
-{
- struct qdisc_walker w;
+struct check_loop_arg {
+ struct qdisc_walker w;
struct Qdisc *p;
int depth;
};
@@ -970,7 +976,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
struct Qdisc *p = NULL;
int err;
- if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
+ dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+ if (!dev)
return -ENODEV;
err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -980,12 +987,12 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
if (clid) {
if (clid != TC_H_ROOT) {
if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
- if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
+ p = qdisc_lookup(dev, TC_H_MAJ(clid));
+ if (!p)
return -ENOENT;
q = qdisc_leaf(p, clid);
- } else { /* ingress */
- if (dev_ingress_queue(dev))
- q = dev_ingress_queue(dev)->qdisc_sleeping;
+ } else if (dev_ingress_queue(dev)) {
+ q = dev_ingress_queue(dev)->qdisc_sleeping;
}
} else {
q = dev->qdisc;
@@ -996,7 +1003,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
return -EINVAL;
} else {
- if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
+ q = qdisc_lookup(dev, tcm->tcm_handle);
+ if (!q)
return -ENOENT;
}
@@ -1008,7 +1016,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
return -EINVAL;
if (q->handle == 0)
return -ENOENT;
- if ((err = qdisc_graft(dev, p, skb, n, clid, NULL, q)) != 0)
+ err = qdisc_graft(dev, p, skb, n, clid, NULL, q);
+ if (err != 0)
return err;
} else {
qdisc_notify(net, skb, n, clid, NULL, q);
@@ -1017,7 +1026,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
}
/*
- Create/change qdisc.
+ * Create/change qdisc.
*/
static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
@@ -1036,7 +1045,8 @@ replay:
clid = tcm->tcm_parent;
q = p = NULL;
- if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
+ dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+ if (!dev)
return -ENODEV;
err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -1046,12 +1056,12 @@ replay:
if (clid) {
if (clid != TC_H_ROOT) {
if (clid != TC_H_INGRESS) {
- if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
+ p = qdisc_lookup(dev, TC_H_MAJ(clid));
+ if (!p)
return -ENOENT;
q = qdisc_leaf(p, clid);
- } else { /* ingress */
- if (dev_ingress_queue_create(dev))
- q = dev_ingress_queue(dev)->qdisc_sleeping;
+ } else if (dev_ingress_queue_create(dev)) {
+ q = dev_ingress_queue(dev)->qdisc_sleeping;
}
} else {
q = dev->qdisc;
@@ -1063,13 +1073,14 @@ replay:
if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
if (tcm->tcm_handle) {
- if (q && !(n->nlmsg_flags&NLM_F_REPLACE))
+ if (q && !(n->nlmsg_flags & NLM_F_REPLACE))
return -EEXIST;
if (TC_H_MIN(tcm->tcm_handle))
return -EINVAL;
- if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
+ q = qdisc_lookup(dev, tcm->tcm_handle);
+ if (!q)
goto create_n_graft;
- if (n->nlmsg_flags&NLM_F_EXCL)
+ if (n->nlmsg_flags & NLM_F_EXCL)
return -EEXIST;
if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
return -EINVAL;
@@ -1079,7 +1090,7 @@ replay:
atomic_inc(&q->refcnt);
goto graft;
} else {
- if (q == NULL)
+ if (!q)
goto create_n_graft;
/* This magic test requires explanation.
@@ -1101,9 +1112,9 @@ replay:
* For now we select create/graft, if
* user gave KIND, which does not match existing.
*/
- if ((n->nlmsg_flags&NLM_F_CREATE) &&
- (n->nlmsg_flags&NLM_F_REPLACE) &&
- ((n->nlmsg_flags&NLM_F_EXCL) ||
+ if ((n->nlmsg_flags & NLM_F_CREATE) &&
+ (n->nlmsg_flags & NLM_F_REPLACE) &&
+ ((n->nlmsg_flags & NLM_F_EXCL) ||
(tca[TCA_KIND] &&
nla_strcmp(tca[TCA_KIND], q->ops->id))))
goto create_n_graft;
@@ -1118,7 +1129,7 @@ replay:
/* Change qdisc parameters */
if (q == NULL)
return -ENOENT;
- if (n->nlmsg_flags&NLM_F_EXCL)
+ if (n->nlmsg_flags & NLM_F_EXCL)
return -EEXIST;
if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
return -EINVAL;
@@ -1128,7 +1139,7 @@ replay:
return err;
create_n_graft:
- if (!(n->nlmsg_flags&NLM_F_CREATE))
+ if (!(n->nlmsg_flags & NLM_F_CREATE))
return -ENOENT;
if (clid == TC_H_INGRESS) {
if (dev_ingress_queue(dev))
@@ -1175,6 +1186,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
struct nlmsghdr *nlh;
unsigned char *b = skb_tail_pointer(skb);
struct gnet_dump d;
+ struct qdisc_size_table *stab;
nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
tcm = NLMSG_DATA(nlh);
@@ -1190,7 +1202,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
goto nla_put_failure;
q->qstats.qlen = q->q.qlen;
- if (q->stab && qdisc_dump_stab(skb, q->stab) < 0)
+ stab = rtnl_dereference(q->stab);
+ if (stab && qdisc_dump_stab(skb, stab) < 0)
goto nla_put_failure;
if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
@@ -1234,16 +1247,19 @@ static int qdisc_notify(struct net *net, struct sk_buff *oskb,
return -ENOBUFS;
if (old && !tc_qdisc_dump_ignore(old)) {
- if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0)
+ if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq,
+ 0, RTM_DELQDISC) < 0)
goto err_out;
}
if (new && !tc_qdisc_dump_ignore(new)) {
- if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
+ if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq,
+ old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
goto err_out;
}
if (skb->len)
- return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
+ return rtnetlink_send(skb, net, pid, RTNLGRP_TC,
+ n->nlmsg_flags & NLM_F_ECHO);
err_out:
kfree_skb(skb);
@@ -1275,7 +1291,7 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
q_idx++;
continue;
}
- if (!tc_qdisc_dump_ignore(q) &&
+ if (!tc_qdisc_dump_ignore(q) &&
tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
goto done;
@@ -1356,7 +1372,8 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
u32 qid = TC_H_MAJ(clid);
int err;
- if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
+ dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+ if (!dev)
return -ENODEV;
err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -1391,9 +1408,9 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
qid = dev->qdisc->handle;
/* Now qid is genuine qdisc handle consistent
- both with parent and child.
-
- TC_H_MAJ(pid) still may be unspecified, complete it now.
+ * both with parent and child.
+ *
+ * TC_H_MAJ(pid) still may be unspecified, complete it now.
*/
if (pid)
pid = TC_H_MAKE(qid, pid);
@@ -1403,7 +1420,8 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
}
/* OK. Locate qdisc */
- if ((q = qdisc_lookup(dev, qid)) == NULL)
+ q = qdisc_lookup(dev, qid);
+ if (!q)
return -ENOENT;
/* An check that it supports classes */
@@ -1423,13 +1441,14 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
if (cl == 0) {
err = -ENOENT;
- if (n->nlmsg_type != RTM_NEWTCLASS || !(n->nlmsg_flags&NLM_F_CREATE))
+ if (n->nlmsg_type != RTM_NEWTCLASS ||
+ !(n->nlmsg_flags & NLM_F_CREATE))
goto out;
} else {
switch (n->nlmsg_type) {
case RTM_NEWTCLASS:
err = -EEXIST;
- if (n->nlmsg_flags&NLM_F_EXCL)
+ if (n->nlmsg_flags & NLM_F_EXCL)
goto out;
break;
case RTM_DELTCLASS:
@@ -1521,14 +1540,14 @@ static int tclass_notify(struct net *net, struct sk_buff *oskb,
return -EINVAL;
}
- return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
+ return rtnetlink_send(skb, net, pid, RTNLGRP_TC,
+ n->nlmsg_flags & NLM_F_ECHO);
}
-struct qdisc_dump_args
-{
- struct qdisc_walker w;
- struct sk_buff *skb;
- struct netlink_callback *cb;
+struct qdisc_dump_args {
+ struct qdisc_walker w;
+ struct sk_buff *skb;
+ struct netlink_callback *cb;
};
static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
@@ -1590,7 +1609,7 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh);
+ struct tcmsg *tcm = (struct tcmsg *)NLMSG_DATA(cb->nlh);
struct net *net = sock_net(skb->sk);
struct netdev_queue *dev_queue;
struct net_device *dev;
@@ -1598,7 +1617,8 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
return 0;
- if ((dev = dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
+ dev = dev_get_by_index(net, tcm->tcm_ifindex);
+ if (!dev)
return 0;
s_t = cb->args[0];
@@ -1621,19 +1641,22 @@ done:
}
/* Main classifier routine: scans classifier chain attached
- to this qdisc, (optionally) tests for protocol and asks
- specific classifiers.
+ * to this qdisc, (optionally) tests for protocol and asks
+ * specific classifiers.
*/
int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp,
struct tcf_result *res)
{
__be16 protocol = skb->protocol;
- int err = 0;
+ int err;
for (; tp; tp = tp->next) {
- if ((tp->protocol == protocol ||
- tp->protocol == htons(ETH_P_ALL)) &&
- (err = tp->classify(skb, tp, res)) >= 0) {
+ if (tp->protocol != protocol &&
+ tp->protocol != htons(ETH_P_ALL))
+ continue;
+ err = tp->classify(skb, tp, res);
+
+ if (err >= 0) {
#ifdef CONFIG_NET_CLS_ACT
if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
@@ -1649,12 +1672,12 @@ int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
struct tcf_result *res)
{
int err = 0;
- __be16 protocol;
#ifdef CONFIG_NET_CLS_ACT
+ __be16 protocol;
struct tcf_proto *otp = tp;
reclassify:
-#endif
protocol = skb->protocol;
+#endif
err = tc_classify_compat(skb, tp, res);
#ifdef CONFIG_NET_CLS_ACT
@@ -1664,11 +1687,11 @@ reclassify:
if (verd++ >= MAX_REC_LOOP) {
if (net_ratelimit())
- printk(KERN_NOTICE
- "%s: packet reclassify loop"
+ pr_notice("%s: packet reclassify loop"
" rule prio %u protocol %02x\n",
- tp->q->ops->id,
- tp->prio & 0xffff, ntohs(tp->protocol));
+ tp->q->ops->id,
+ tp->prio & 0xffff,
+ ntohs(tp->protocol));
return TC_ACT_SHOT;
}
skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
@@ -1761,7 +1784,7 @@ static int __init pktsched_init(void)
err = register_pernet_subsys(&psched_net_ops);
if (err) {
- printk(KERN_ERR "pktsched_init: "
+ pr_err("pktsched_init: "
"cannot initialize per netns operations\n");
return err;
}
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 943d733409d0..3f08158b8688 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -319,7 +319,7 @@ static int atm_tc_delete(struct Qdisc *sch, unsigned long arg)
* creation), and one for the reference held when calling delete.
*/
if (flow->ref < 2) {
- printk(KERN_ERR "atm_tc_delete: flow->ref == %d\n", flow->ref);
+ pr_err("atm_tc_delete: flow->ref == %d\n", flow->ref);
return -EINVAL;
}
if (flow->ref > 2)
@@ -384,12 +384,12 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
}
}
flow = NULL;
- done:
- ;
+done:
+ ;
}
- if (!flow)
+ if (!flow) {
flow = &p->link;
- else {
+ } else {
if (flow->vcc)
ATM_SKB(skb)->atm_options = flow->vcc->atm_options;
/*@@@ looks good ... but it's not supposed to work :-) */
@@ -576,8 +576,7 @@ static void atm_tc_destroy(struct Qdisc *sch)
list_for_each_entry_safe(flow, tmp, &p->flows, list) {
if (flow->ref > 1)
- printk(KERN_ERR "atm_destroy: %p->ref = %d\n", flow,
- flow->ref);
+ pr_err("atm_destroy: %p->ref = %d\n", flow, flow->ref);
atm_tc_put(sch, (unsigned long)flow);
}
tasklet_kill(&p->task);
@@ -616,9 +615,8 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
}
if (flow->excess)
NLA_PUT_U32(skb, TCA_ATM_EXCESS, flow->classid);
- else {
+ else
NLA_PUT_U32(skb, TCA_ATM_EXCESS, 0);
- }
nla_nest_end(skb, nest);
return skb->len;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 5f63ec58942c..24d94c097b35 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -72,8 +72,7 @@
struct cbq_sched_data;
-struct cbq_class
-{
+struct cbq_class {
struct Qdisc_class_common common;
struct cbq_class *next_alive; /* next class with backlog in this priority band */
@@ -139,19 +138,18 @@ struct cbq_class
int refcnt;
int filters;
- struct cbq_class *defaults[TC_PRIO_MAX+1];
+ struct cbq_class *defaults[TC_PRIO_MAX + 1];
};
-struct cbq_sched_data
-{
+struct cbq_sched_data {
struct Qdisc_class_hash clhash; /* Hash table of all classes */
- int nclasses[TC_CBQ_MAXPRIO+1];
- unsigned quanta[TC_CBQ_MAXPRIO+1];
+ int nclasses[TC_CBQ_MAXPRIO + 1];
+ unsigned int quanta[TC_CBQ_MAXPRIO + 1];
struct cbq_class link;
- unsigned activemask;
- struct cbq_class *active[TC_CBQ_MAXPRIO+1]; /* List of all classes
+ unsigned int activemask;
+ struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes
with backlog */
#ifdef CONFIG_NET_CLS_ACT
@@ -162,7 +160,7 @@ struct cbq_sched_data
int tx_len;
psched_time_t now; /* Cached timestamp */
psched_time_t now_rt; /* Cached real time */
- unsigned pmask;
+ unsigned int pmask;
struct hrtimer delay_timer;
struct qdisc_watchdog watchdog; /* Watchdog timer,
@@ -175,9 +173,9 @@ struct cbq_sched_data
};
-#define L2T(cl,len) qdisc_l2t((cl)->R_tab,len)
+#define L2T(cl, len) qdisc_l2t((cl)->R_tab, len)
-static __inline__ struct cbq_class *
+static inline struct cbq_class *
cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
{
struct Qdisc_class_common *clc;
@@ -193,25 +191,27 @@ cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
static struct cbq_class *
cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
{
- struct cbq_class *cl, *new;
+ struct cbq_class *cl;
- for (cl = this->tparent; cl; cl = cl->tparent)
- if ((new = cl->defaults[TC_PRIO_BESTEFFORT]) != NULL && new != this)
- return new;
+ for (cl = this->tparent; cl; cl = cl->tparent) {
+ struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
+ if (new != NULL && new != this)
+ return new;
+ }
return NULL;
}
#endif
/* Classify packet. The procedure is pretty complicated, but
- it allows us to combine link sharing and priority scheduling
- transparently.
-
- Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
- so that it resolves to split nodes. Then packets are classified
- by logical priority, or a more specific classifier may be attached
- to the split node.
+ * it allows us to combine link sharing and priority scheduling
+ * transparently.
+ *
+ * Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
+ * so that it resolves to split nodes. Then packets are classified
+ * by logical priority, or a more specific classifier may be attached
+ * to the split node.
*/
static struct cbq_class *
@@ -227,7 +227,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
/*
* Step 1. If skb->priority points to one of our classes, use it.
*/
- if (TC_H_MAJ(prio^sch->handle) == 0 &&
+ if (TC_H_MAJ(prio ^ sch->handle) == 0 &&
(cl = cbq_class_lookup(q, prio)) != NULL)
return cl;
@@ -243,10 +243,11 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
(result = tc_classify_compat(skb, head->filter_list, &res)) < 0)
goto fallback;
- if ((cl = (void*)res.class) == NULL) {
+ cl = (void *)res.class;
+ if (!cl) {
if (TC_H_MAJ(res.classid))
cl = cbq_class_lookup(q, res.classid);
- else if ((cl = defmap[res.classid&TC_PRIO_MAX]) == NULL)
+ else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
cl = defmap[TC_PRIO_BESTEFFORT];
if (cl == NULL || cl->level >= head->level)
@@ -282,7 +283,7 @@ fallback:
* Step 4. No success...
*/
if (TC_H_MAJ(prio) == 0 &&
- !(cl = head->defaults[prio&TC_PRIO_MAX]) &&
+ !(cl = head->defaults[prio & TC_PRIO_MAX]) &&
!(cl = head->defaults[TC_PRIO_BESTEFFORT]))
return head;
@@ -290,12 +291,12 @@ fallback:
}
/*
- A packet has just been enqueued on the empty class.
- cbq_activate_class adds it to the tail of active class list
- of its priority band.
+ * A packet has just been enqueued on the empty class.
+ * cbq_activate_class adds it to the tail of active class list
+ * of its priority band.
*/
-static __inline__ void cbq_activate_class(struct cbq_class *cl)
+static inline void cbq_activate_class(struct cbq_class *cl)
{
struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
int prio = cl->cpriority;
@@ -314,9 +315,9 @@ static __inline__ void cbq_activate_class(struct cbq_class *cl)
}
/*
- Unlink class from active chain.
- Note that this same procedure is done directly in cbq_dequeue*
- during round-robin procedure.
+ * Unlink class from active chain.
+ * Note that this same procedure is done directly in cbq_dequeue*
+ * during round-robin procedure.
*/
static void cbq_deactivate_class(struct cbq_class *this)
@@ -350,7 +351,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
{
int toplevel = q->toplevel;
- if (toplevel > cl->level && !(cl->q->flags&TCQ_F_THROTTLED)) {
+ if (toplevel > cl->level && !(qdisc_is_throttled(cl->q))) {
psched_time_t now;
psched_tdiff_t incr;
@@ -363,7 +364,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
q->toplevel = cl->level;
return;
}
- } while ((cl=cl->borrow) != NULL && toplevel > cl->level);
+ } while ((cl = cl->borrow) != NULL && toplevel > cl->level);
}
}
@@ -417,11 +418,11 @@ static void cbq_ovl_classic(struct cbq_class *cl)
delay += cl->offtime;
/*
- Class goes to sleep, so that it will have no
- chance to work avgidle. Let's forgive it 8)
-
- BTW cbq-2.0 has a crap in this
- place, apparently they forgot to shift it by cl->ewma_log.
+ * Class goes to sleep, so that it will have no
+ * chance to work avgidle. Let's forgive it 8)
+ *
+ * BTW cbq-2.0 has a crap in this
+ * place, apparently they forgot to shift it by cl->ewma_log.
*/
if (cl->avgidle < 0)
delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
@@ -438,8 +439,8 @@ static void cbq_ovl_classic(struct cbq_class *cl)
q->wd_expires = delay;
/* Dirty work! We must schedule wakeups based on
- real available rate, rather than leaf rate,
- which may be tiny (even zero).
+ * real available rate, rather than leaf rate,
+ * which may be tiny (even zero).
*/
if (q->toplevel == TC_CBQ_MAXLEVEL) {
struct cbq_class *b;
@@ -459,7 +460,7 @@ static void cbq_ovl_classic(struct cbq_class *cl)
}
/* TC_CBQ_OVL_RCLASSIC: penalize by offtime classes in hierarchy, when
- they go overlimit
+ * they go overlimit
*/
static void cbq_ovl_rclassic(struct cbq_class *cl)
@@ -594,7 +595,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
struct Qdisc *sch = q->watchdog.qdisc;
psched_time_t now;
psched_tdiff_t delay = 0;
- unsigned pmask;
+ unsigned int pmask;
now = psched_get_time();
@@ -623,7 +624,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS);
}
- sch->flags &= ~TCQ_F_THROTTLED;
+ qdisc_unthrottled(sch);
__netif_schedule(qdisc_root(sch));
return HRTIMER_NORESTART;
}
@@ -663,15 +664,15 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
#endif
/*
- It is mission critical procedure.
-
- We "regenerate" toplevel cutoff, if transmitting class
- has backlog and it is not regulated. It is not part of
- original CBQ description, but looks more reasonable.
- Probably, it is wrong. This question needs further investigation.
-*/
+ * It is mission critical procedure.
+ *
+ * We "regenerate" toplevel cutoff, if transmitting class
+ * has backlog and it is not regulated. It is not part of
+ * original CBQ description, but looks more reasonable.
+ * Probably, it is wrong. This question needs further investigation.
+ */
-static __inline__ void
+static inline void
cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
struct cbq_class *borrowed)
{
@@ -682,7 +683,7 @@ cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
q->toplevel = borrowed->level;
return;
}
- } while ((borrowed=borrowed->borrow) != NULL);
+ } while ((borrowed = borrowed->borrow) != NULL);
}
#if 0
/* It is not necessary now. Uncommenting it
@@ -710,10 +711,10 @@ cbq_update(struct cbq_sched_data *q)
cl->bstats.bytes += len;
/*
- (now - last) is total time between packet right edges.
- (last_pktlen/rate) is "virtual" busy time, so that
-
- idle = (now - last) - last_pktlen/rate
+ * (now - last) is total time between packet right edges.
+ * (last_pktlen/rate) is "virtual" busy time, so that
+ *
+ * idle = (now - last) - last_pktlen/rate
*/
idle = q->now - cl->last;
@@ -723,9 +724,9 @@ cbq_update(struct cbq_sched_data *q)
idle -= L2T(cl, len);
/* true_avgidle := (1-W)*true_avgidle + W*idle,
- where W=2^{-ewma_log}. But cl->avgidle is scaled:
- cl->avgidle == true_avgidle/W,
- hence:
+ * where W=2^{-ewma_log}. But cl->avgidle is scaled:
+ * cl->avgidle == true_avgidle/W,
+ * hence:
*/
avgidle += idle - (avgidle>>cl->ewma_log);
}
@@ -739,22 +740,22 @@ cbq_update(struct cbq_sched_data *q)
cl->avgidle = avgidle;
/* Calculate expected time, when this class
- will be allowed to send.
- It will occur, when:
- (1-W)*true_avgidle + W*delay = 0, i.e.
- idle = (1/W - 1)*(-true_avgidle)
- or
- idle = (1 - W)*(-cl->avgidle);
+ * will be allowed to send.
+ * It will occur, when:
+ * (1-W)*true_avgidle + W*delay = 0, i.e.
+ * idle = (1/W - 1)*(-true_avgidle)
+ * or
+ * idle = (1 - W)*(-cl->avgidle);
*/
idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
/*
- That is not all.
- To maintain the rate allocated to the class,
- we add to undertime virtual clock,
- necessary to complete transmitted packet.
- (len/phys_bandwidth has been already passed
- to the moment of cbq_update)
+ * That is not all.
+ * To maintain the rate allocated to the class,
+ * we add to undertime virtual clock,
+ * necessary to complete transmitted packet.
+ * (len/phys_bandwidth has been already passed
+ * to the moment of cbq_update)
*/
idle -= L2T(&q->link, len);
@@ -776,7 +777,7 @@ cbq_update(struct cbq_sched_data *q)
cbq_update_toplevel(q, this, q->tx_borrowed);
}
-static __inline__ struct cbq_class *
+static inline struct cbq_class *
cbq_under_limit(struct cbq_class *cl)
{
struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
@@ -792,16 +793,17 @@ cbq_under_limit(struct cbq_class *cl)
do {
/* It is very suspicious place. Now overlimit
- action is generated for not bounded classes
- only if link is completely congested.
- Though it is in agree with ancestor-only paradigm,
- it looks very stupid. Particularly,
- it means that this chunk of code will either
- never be called or result in strong amplification
- of burstiness. Dangerous, silly, and, however,
- no another solution exists.
+ * action is generated for not bounded classes
+ * only if link is completely congested.
+ * Though it is in agree with ancestor-only paradigm,
+ * it looks very stupid. Particularly,
+ * it means that this chunk of code will either
+ * never be called or result in strong amplification
+ * of burstiness. Dangerous, silly, and, however,
+ * no another solution exists.
*/
- if ((cl = cl->borrow) == NULL) {
+ cl = cl->borrow;
+ if (!cl) {
this_cl->qstats.overlimits++;
this_cl->overlimit(this_cl);
return NULL;
@@ -814,7 +816,7 @@ cbq_under_limit(struct cbq_class *cl)
return cl;
}
-static __inline__ struct sk_buff *
+static inline struct sk_buff *
cbq_dequeue_prio(struct Qdisc *sch, int prio)
{
struct cbq_sched_data *q = qdisc_priv(sch);
@@ -838,7 +840,7 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
if (cl->deficit <= 0) {
/* Class exhausted its allotment per
- this round. Switch to the next one.
+ * this round. Switch to the next one.
*/
deficit = 1;
cl->deficit += cl->quantum;
@@ -848,8 +850,8 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
skb = cl->q->dequeue(cl->q);
/* Class did not give us any skb :-(
- It could occur even if cl->q->q.qlen != 0
- f.e. if cl->q == "tbf"
+ * It could occur even if cl->q->q.qlen != 0
+ * f.e. if cl->q == "tbf"
*/
if (skb == NULL)
goto skip_class;
@@ -878,7 +880,7 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
skip_class:
if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
/* Class is empty or penalized.
- Unlink it from active chain.
+ * Unlink it from active chain.
*/
cl_prev->next_alive = cl->next_alive;
cl->next_alive = NULL;
@@ -917,14 +919,14 @@ next_class:
return NULL;
}
-static __inline__ struct sk_buff *
+static inline struct sk_buff *
cbq_dequeue_1(struct Qdisc *sch)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb;
- unsigned activemask;
+ unsigned int activemask;
- activemask = q->activemask&0xFF;
+ activemask = q->activemask & 0xFF;
while (activemask) {
int prio = ffz(~activemask);
activemask &= ~(1<<prio);
@@ -949,11 +951,11 @@ cbq_dequeue(struct Qdisc *sch)
if (q->tx_class) {
psched_tdiff_t incr2;
/* Time integrator. We calculate EOS time
- by adding expected packet transmission time.
- If real time is greater, we warp artificial clock,
- so that:
-
- cbq_time = max(real_time, work);
+ * by adding expected packet transmission time.
+ * If real time is greater, we warp artificial clock,
+ * so that:
+ *
+ * cbq_time = max(real_time, work);
*/
incr2 = L2T(&q->link, q->tx_len);
q->now += incr2;
@@ -971,27 +973,27 @@ cbq_dequeue(struct Qdisc *sch)
if (skb) {
qdisc_bstats_update(sch, skb);
sch->q.qlen--;
- sch->flags &= ~TCQ_F_THROTTLED;
+ qdisc_unthrottled(sch);
return skb;
}
/* All the classes are overlimit.
-
- It is possible, if:
-
- 1. Scheduler is empty.
- 2. Toplevel cutoff inhibited borrowing.
- 3. Root class is overlimit.
-
- Reset 2d and 3d conditions and retry.
-
- Note, that NS and cbq-2.0 are buggy, peeking
- an arbitrary class is appropriate for ancestor-only
- sharing, but not for toplevel algorithm.
-
- Our version is better, but slower, because it requires
- two passes, but it is unavoidable with top-level sharing.
- */
+ *
+ * It is possible, if:
+ *
+ * 1. Scheduler is empty.
+ * 2. Toplevel cutoff inhibited borrowing.
+ * 3. Root class is overlimit.
+ *
+ * Reset 2d and 3d conditions and retry.
+ *
+ * Note, that NS and cbq-2.0 are buggy, peeking
+ * an arbitrary class is appropriate for ancestor-only
+ * sharing, but not for toplevel algorithm.
+ *
+ * Our version is better, but slower, because it requires
+ * two passes, but it is unavoidable with top-level sharing.
+ */
if (q->toplevel == TC_CBQ_MAXLEVEL &&
q->link.undertime == PSCHED_PASTPERFECT)
@@ -1002,7 +1004,8 @@ cbq_dequeue(struct Qdisc *sch)
}
/* No packets in scheduler or nobody wants to give them to us :-(
- Sigh... start watchdog timer in the last case. */
+ * Sigh... start watchdog timer in the last case.
+ */
if (sch->q.qlen) {
sch->qstats.overlimits++;
@@ -1024,13 +1027,14 @@ static void cbq_adjust_levels(struct cbq_class *this)
int level = 0;
struct cbq_class *cl;
- if ((cl = this->children) != NULL) {
+ cl = this->children;
+ if (cl) {
do {
if (cl->level > level)
level = cl->level;
} while ((cl = cl->sibling) != this->children);
}
- this->level = level+1;
+ this->level = level + 1;
} while ((this = this->tparent) != NULL);
}
@@ -1046,14 +1050,15 @@ static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
for (h = 0; h < q->clhash.hashsize; h++) {
hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) {
/* BUGGGG... Beware! This expression suffer of
- arithmetic overflows!
+ * arithmetic overflows!
*/
if (cl->priority == prio) {
cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
q->quanta[prio];
}
if (cl->quantum <= 0 || cl->quantum>32*qdisc_dev(cl->qdisc)->mtu) {
- printk(KERN_WARNING "CBQ: class %08x has bad quantum==%ld, repaired.\n", cl->common.classid, cl->quantum);
+ pr_warning("CBQ: class %08x has bad quantum==%ld, repaired.\n",
+ cl->common.classid, cl->quantum);
cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
}
}
@@ -1064,18 +1069,18 @@ static void cbq_sync_defmap(struct cbq_class *cl)
{
struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
struct cbq_class *split = cl->split;
- unsigned h;
+ unsigned int h;
int i;
if (split == NULL)
return;
- for (i=0; i<=TC_PRIO_MAX; i++) {
- if (split->defaults[i] == cl && !(cl->defmap&(1<<i)))
+ for (i = 0; i <= TC_PRIO_MAX; i++) {
+ if (split->defaults[i] == cl && !(cl->defmap & (1<<i)))
split->defaults[i] = NULL;
}
- for (i=0; i<=TC_PRIO_MAX; i++) {
+ for (i = 0; i <= TC_PRIO_MAX; i++) {
int level = split->level;
if (split->defaults[i])
@@ -1088,7 +1093,7 @@ static void cbq_sync_defmap(struct cbq_class *cl)
hlist_for_each_entry(c, n, &q->clhash.hash[h],
common.hnode) {
if (c->split == split && c->level < level &&
- c->defmap&(1<<i)) {
+ c->defmap & (1<<i)) {
split->defaults[i] = c;
level = c->level;
}
@@ -1102,7 +1107,8 @@ static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 ma
struct cbq_class *split = NULL;
if (splitid == 0) {
- if ((split = cl->split) == NULL)
+ split = cl->split;
+ if (!split)
return;
splitid = split->common.classid;
}
@@ -1120,9 +1126,9 @@ static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 ma
cl->defmap = 0;
cbq_sync_defmap(cl);
cl->split = split;
- cl->defmap = def&mask;
+ cl->defmap = def & mask;
} else
- cl->defmap = (cl->defmap&~mask)|(def&mask);
+ cl->defmap = (cl->defmap & ~mask) | (def & mask);
cbq_sync_defmap(cl);
}
@@ -1135,7 +1141,7 @@ static void cbq_unlink_class(struct cbq_class *this)
qdisc_class_hash_remove(&q->clhash, &this->common);
if (this->tparent) {
- clp=&this->sibling;
+ clp = &this->sibling;
cl = *clp;
do {
if (cl == this) {
@@ -1174,7 +1180,7 @@ static void cbq_link_class(struct cbq_class *this)
}
}
-static unsigned int cbq_drop(struct Qdisc* sch)
+static unsigned int cbq_drop(struct Qdisc *sch)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl, *cl_head;
@@ -1182,7 +1188,8 @@ static unsigned int cbq_drop(struct Qdisc* sch)
unsigned int len;
for (prio = TC_CBQ_MAXPRIO; prio >= 0; prio--) {
- if ((cl_head = q->active[prio]) == NULL)
+ cl_head = q->active[prio];
+ if (!cl_head)
continue;
cl = cl_head;
@@ -1199,13 +1206,13 @@ static unsigned int cbq_drop(struct Qdisc* sch)
}
static void
-cbq_reset(struct Qdisc* sch)
+cbq_reset(struct Qdisc *sch)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl;
struct hlist_node *n;
int prio;
- unsigned h;
+ unsigned int h;
q->activemask = 0;
q->pmask = 0;
@@ -1237,21 +1244,21 @@ cbq_reset(struct Qdisc* sch)
static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
{
- if (lss->change&TCF_CBQ_LSS_FLAGS) {
- cl->share = (lss->flags&TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
- cl->borrow = (lss->flags&TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
+ if (lss->change & TCF_CBQ_LSS_FLAGS) {
+ cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
+ cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
}
- if (lss->change&TCF_CBQ_LSS_EWMA)
+ if (lss->change & TCF_CBQ_LSS_EWMA)
cl->ewma_log = lss->ewma_log;
- if (lss->change&TCF_CBQ_LSS_AVPKT)
+ if (lss->change & TCF_CBQ_LSS_AVPKT)
cl->avpkt = lss->avpkt;
- if (lss->change&TCF_CBQ_LSS_MINIDLE)
+ if (lss->change & TCF_CBQ_LSS_MINIDLE)
cl->minidle = -(long)lss->minidle;
- if (lss->change&TCF_CBQ_LSS_MAXIDLE) {
+ if (lss->change & TCF_CBQ_LSS_MAXIDLE) {
cl->maxidle = lss->maxidle;
cl->avgidle = lss->maxidle;
}
- if (lss->change&TCF_CBQ_LSS_OFFTIME)
+ if (lss->change & TCF_CBQ_LSS_OFFTIME)
cl->offtime = lss->offtime;
return 0;
}
@@ -1279,10 +1286,10 @@ static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
if (wrr->weight)
cl->weight = wrr->weight;
if (wrr->priority) {
- cl->priority = wrr->priority-1;
+ cl->priority = wrr->priority - 1;
cl->cpriority = cl->priority;
if (cl->priority >= cl->priority2)
- cl->priority2 = TC_CBQ_MAXPRIO-1;
+ cl->priority2 = TC_CBQ_MAXPRIO - 1;
}
cbq_addprio(q, cl);
@@ -1299,10 +1306,10 @@ static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl)
cl->overlimit = cbq_ovl_delay;
break;
case TC_CBQ_OVL_LOWPRIO:
- if (ovl->priority2-1 >= TC_CBQ_MAXPRIO ||
- ovl->priority2-1 <= cl->priority)
+ if (ovl->priority2 - 1 >= TC_CBQ_MAXPRIO ||
+ ovl->priority2 - 1 <= cl->priority)
return -EINVAL;
- cl->priority2 = ovl->priority2-1;
+ cl->priority2 = ovl->priority2 - 1;
cl->overlimit = cbq_ovl_lowprio;
break;
case TC_CBQ_OVL_DROP:
@@ -1381,9 +1388,9 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
if (!q->link.q)
q->link.q = &noop_qdisc;
- q->link.priority = TC_CBQ_MAXPRIO-1;
- q->link.priority2 = TC_CBQ_MAXPRIO-1;
- q->link.cpriority = TC_CBQ_MAXPRIO-1;
+ q->link.priority = TC_CBQ_MAXPRIO - 1;
+ q->link.priority2 = TC_CBQ_MAXPRIO - 1;
+ q->link.cpriority = TC_CBQ_MAXPRIO - 1;
q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC;
q->link.overlimit = cbq_ovl_classic;
q->link.allot = psched_mtu(qdisc_dev(sch));
@@ -1414,7 +1421,7 @@ put_rtab:
return err;
}
-static __inline__ int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
{
unsigned char *b = skb_tail_pointer(skb);
@@ -1426,7 +1433,7 @@ nla_put_failure:
return -1;
}
-static __inline__ int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
{
unsigned char *b = skb_tail_pointer(skb);
struct tc_cbq_lssopt opt;
@@ -1451,15 +1458,15 @@ nla_put_failure:
return -1;
}
-static __inline__ int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
{
unsigned char *b = skb_tail_pointer(skb);
struct tc_cbq_wrropt opt;
opt.flags = 0;
opt.allot = cl->allot;
- opt.priority = cl->priority+1;
- opt.cpriority = cl->cpriority+1;
+ opt.priority = cl->priority + 1;
+ opt.cpriority = cl->cpriority + 1;
opt.weight = cl->weight;
NLA_PUT(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt);
return skb->len;
@@ -1469,13 +1476,13 @@ nla_put_failure:
return -1;
}
-static __inline__ int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl)
{
unsigned char *b = skb_tail_pointer(skb);
struct tc_cbq_ovl opt;
opt.strategy = cl->ovl_strategy;
- opt.priority2 = cl->priority2+1;
+ opt.priority2 = cl->priority2 + 1;
opt.pad = 0;
opt.penalty = cl->penalty;
NLA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt);
@@ -1486,7 +1493,7 @@ nla_put_failure:
return -1;
}
-static __inline__ int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
{
unsigned char *b = skb_tail_pointer(skb);
struct tc_cbq_fopt opt;
@@ -1505,7 +1512,7 @@ nla_put_failure:
}
#ifdef CONFIG_NET_CLS_ACT
-static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
{
unsigned char *b = skb_tail_pointer(skb);
struct tc_cbq_police opt;
@@ -1569,7 +1576,7 @@ static int
cbq_dump_class(struct Qdisc *sch, unsigned long arg,
struct sk_buff *skb, struct tcmsg *tcm)
{
- struct cbq_class *cl = (struct cbq_class*)arg;
+ struct cbq_class *cl = (struct cbq_class *)arg;
struct nlattr *nest;
if (cl->tparent)
@@ -1597,7 +1604,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
struct gnet_dump *d)
{
struct cbq_sched_data *q = qdisc_priv(sch);
- struct cbq_class *cl = (struct cbq_class*)arg;
+ struct cbq_class *cl = (struct cbq_class *)arg;
cl->qstats.qlen = cl->q->q.qlen;
cl->xstats.avgidle = cl->avgidle;
@@ -1617,7 +1624,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
struct Qdisc **old)
{
- struct cbq_class *cl = (struct cbq_class*)arg;
+ struct cbq_class *cl = (struct cbq_class *)arg;
if (new == NULL) {
new = qdisc_create_dflt(sch->dev_queue,
@@ -1640,10 +1647,9 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
return 0;
}
-static struct Qdisc *
-cbq_leaf(struct Qdisc *sch, unsigned long arg)
+static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg)
{
- struct cbq_class *cl = (struct cbq_class*)arg;
+ struct cbq_class *cl = (struct cbq_class *)arg;
return cl->q;
}
@@ -1682,13 +1688,12 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
kfree(cl);
}
-static void
-cbq_destroy(struct Qdisc* sch)
+static void cbq_destroy(struct Qdisc *sch)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct hlist_node *n, *next;
struct cbq_class *cl;
- unsigned h;
+ unsigned int h;
#ifdef CONFIG_NET_CLS_ACT
q->rx_class = NULL;
@@ -1712,7 +1717,7 @@ cbq_destroy(struct Qdisc* sch)
static void cbq_put(struct Qdisc *sch, unsigned long arg)
{
- struct cbq_class *cl = (struct cbq_class*)arg;
+ struct cbq_class *cl = (struct cbq_class *)arg;
if (--cl->refcnt == 0) {
#ifdef CONFIG_NET_CLS_ACT
@@ -1735,7 +1740,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
{
int err;
struct cbq_sched_data *q = qdisc_priv(sch);
- struct cbq_class *cl = (struct cbq_class*)*arg;
+ struct cbq_class *cl = (struct cbq_class *)*arg;
struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_CBQ_MAX + 1];
struct cbq_class *parent;
@@ -1827,13 +1832,14 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
if (classid) {
err = -EINVAL;
- if (TC_H_MAJ(classid^sch->handle) || cbq_class_lookup(q, classid))
+ if (TC_H_MAJ(classid ^ sch->handle) ||
+ cbq_class_lookup(q, classid))
goto failure;
} else {
int i;
- classid = TC_H_MAKE(sch->handle,0x8000);
+ classid = TC_H_MAKE(sch->handle, 0x8000);
- for (i=0; i<0x8000; i++) {
+ for (i = 0; i < 0x8000; i++) {
if (++q->hgenerator >= 0x8000)
q->hgenerator = 1;
if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)
@@ -1890,11 +1896,11 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
cl->minidle = -0x7FFFFFFF;
cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
- if (cl->ewma_log==0)
+ if (cl->ewma_log == 0)
cl->ewma_log = q->link.ewma_log;
- if (cl->maxidle==0)
+ if (cl->maxidle == 0)
cl->maxidle = q->link.maxidle;
- if (cl->avpkt==0)
+ if (cl->avpkt == 0)
cl->avpkt = q->link.avpkt;
cl->overlimit = cbq_ovl_classic;
if (tb[TCA_CBQ_OVL_STRATEGY])
@@ -1920,7 +1926,7 @@ failure:
static int cbq_delete(struct Qdisc *sch, unsigned long arg)
{
struct cbq_sched_data *q = qdisc_priv(sch);
- struct cbq_class *cl = (struct cbq_class*)arg;
+ struct cbq_class *cl = (struct cbq_class *)arg;
unsigned int qlen;
if (cl->filters || cl->children || cl == &q->link)
@@ -1978,7 +1984,7 @@ static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
u32 classid)
{
struct cbq_sched_data *q = qdisc_priv(sch);
- struct cbq_class *p = (struct cbq_class*)parent;
+ struct cbq_class *p = (struct cbq_class *)parent;
struct cbq_class *cl = cbq_class_lookup(q, classid);
if (cl) {
@@ -1992,7 +1998,7 @@ static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
{
- struct cbq_class *cl = (struct cbq_class*)arg;
+ struct cbq_class *cl = (struct cbq_class *)arg;
cl->filters--;
}
@@ -2002,7 +2008,7 @@ static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl;
struct hlist_node *n;
- unsigned h;
+ unsigned int h;
if (arg->stop)
return;
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
new file mode 100644
index 000000000000..06afbaeb4c88
--- /dev/null
+++ b/net/sched/sch_choke.c
@@ -0,0 +1,688 @@
+/*
+ * net/sched/sch_choke.c CHOKE scheduler
+ *
+ * Copyright (c) 2011 Stephen Hemminger <shemminger@vyatta.com>
+ * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/reciprocal_div.h>
+#include <linux/vmalloc.h>
+#include <net/pkt_sched.h>
+#include <net/inet_ecn.h>
+#include <net/red.h>
+#include <linux/ip.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+
+/*
+ CHOKe stateless AQM for fair bandwidth allocation
+ =================================================
+
+ CHOKe (CHOose and Keep for responsive flows, CHOose and Kill for
+ unresponsive flows) is a variant of RED that penalizes misbehaving flows but
+ maintains no flow state. The difference from RED is an additional step
+ during the enqueuing process. If average queue size is over the
+ low threshold (qmin), a packet is chosen at random from the queue.
+ If both the new and chosen packet are from the same flow, both
+ are dropped. Unlike RED, CHOKe is not really a "classful" qdisc because it
+ needs to access packets in queue randomly. It has a minimal class
+ interface to allow overriding the builtin flow classifier with
+ filters.
+
+ Source:
+ R. Pan, B. Prabhakar, and K. Psounis, "CHOKe, A Stateless
+ Active Queue Management Scheme for Approximating Fair Bandwidth Allocation",
+ IEEE INFOCOM, 2000.
+
+ A. Tang, J. Wang, S. Low, "Understanding CHOKe: Throughput and Spatial
+ Characteristics", IEEE/ACM Transactions on Networking, 2004
+
+ */
+
+/* Upper bound on size of sk_buff table (packets) */
+#define CHOKE_MAX_QUEUE (128*1024 - 1)
+
+struct choke_sched_data {
+/* Parameters */
+ u32 limit;
+ unsigned char flags;
+
+ struct red_parms parms;
+
+/* Variables */
+ struct tcf_proto *filter_list;
+ struct {
+ u32 prob_drop; /* Early probability drops */
+ u32 prob_mark; /* Early probability marks */
+ u32 forced_drop; /* Forced drops, qavg > max_thresh */
+ u32 forced_mark; /* Forced marks, qavg > max_thresh */
+ u32 pdrop; /* Drops due to queue limits */
+ u32 other; /* Drops due to drop() calls */
+ u32 matched; /* Drops to flow match */
+ } stats;
+
+ unsigned int head;
+ unsigned int tail;
+
+ unsigned int tab_mask; /* size - 1 */
+
+ struct sk_buff **tab;
+};
+
+/* deliver a random number between 0 and N - 1 */
+static u32 random_N(unsigned int N)
+{
+ return reciprocal_divide(random32(), N);
+}
+
+/* number of elements in queue including holes */
+static unsigned int choke_len(const struct choke_sched_data *q)
+{
+ return (q->tail - q->head) & q->tab_mask;
+}
+
+/* Is ECN parameter configured */
+static int use_ecn(const struct choke_sched_data *q)
+{
+ return q->flags & TC_RED_ECN;
+}
+
+/* Should packets over max just be dropped (versus marked) */
+static int use_harddrop(const struct choke_sched_data *q)
+{
+ return q->flags & TC_RED_HARDDROP;
+}
+
+/* Move head pointer forward to skip over holes */
+static void choke_zap_head_holes(struct choke_sched_data *q)
+{
+ do {
+ q->head = (q->head + 1) & q->tab_mask;
+ if (q->head == q->tail)
+ break;
+ } while (q->tab[q->head] == NULL);
+}
+
+/* Move tail pointer backwards to reuse holes */
+static void choke_zap_tail_holes(struct choke_sched_data *q)
+{
+ do {
+ q->tail = (q->tail - 1) & q->tab_mask;
+ if (q->head == q->tail)
+ break;
+ } while (q->tab[q->tail] == NULL);
+}
+
+/* Drop packet from queue array by creating a "hole" */
+static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb = q->tab[idx];
+
+ q->tab[idx] = NULL;
+
+ if (idx == q->head)
+ choke_zap_head_holes(q);
+ if (idx == q->tail)
+ choke_zap_tail_holes(q);
+
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_drop(skb, sch);
+ qdisc_tree_decrease_qlen(sch, 1);
+ --sch->q.qlen;
+}
+
+/*
+ * Compare flow of two packets
+ * Returns true only if source and destination address and port match.
+ * false for special cases
+ */
+static bool choke_match_flow(struct sk_buff *skb1,
+ struct sk_buff *skb2)
+{
+ int off1, off2, poff;
+ const u32 *ports1, *ports2;
+ u8 ip_proto;
+ __u32 hash1;
+
+ if (skb1->protocol != skb2->protocol)
+ return false;
+
+ /* Use hash value as quick check
+ * Assumes that __skb_get_rxhash makes IP header and ports linear
+ */
+ hash1 = skb_get_rxhash(skb1);
+ if (!hash1 || hash1 != skb_get_rxhash(skb2))
+ return false;
+
+ /* Probably match, but be sure to avoid hash collisions */
+ off1 = skb_network_offset(skb1);
+ off2 = skb_network_offset(skb2);
+
+ switch (skb1->protocol) {
+ case __constant_htons(ETH_P_IP): {
+ const struct iphdr *ip1, *ip2;
+
+ ip1 = (const struct iphdr *) (skb1->data + off1);
+ ip2 = (const struct iphdr *) (skb2->data + off2);
+
+ ip_proto = ip1->protocol;
+ if (ip_proto != ip2->protocol ||
+ ip1->saddr != ip2->saddr || ip1->daddr != ip2->daddr)
+ return false;
+
+ if ((ip1->frag_off | ip2->frag_off) & htons(IP_MF | IP_OFFSET))
+ ip_proto = 0;
+ off1 += ip1->ihl * 4;
+ off2 += ip2->ihl * 4;
+ break;
+ }
+
+ case __constant_htons(ETH_P_IPV6): {
+ const struct ipv6hdr *ip1, *ip2;
+
+ ip1 = (const struct ipv6hdr *) (skb1->data + off1);
+ ip2 = (const struct ipv6hdr *) (skb2->data + off2);
+
+ ip_proto = ip1->nexthdr;
+ if (ip_proto != ip2->nexthdr ||
+ ipv6_addr_cmp(&ip1->saddr, &ip2->saddr) ||
+ ipv6_addr_cmp(&ip1->daddr, &ip2->daddr))
+ return false;
+ off1 += 40;
+ off2 += 40;
+ }
+
+ default: /* Maybe compare MAC header here? */
+ return false;
+ }
+
+ poff = proto_ports_offset(ip_proto);
+ if (poff < 0)
+ return true;
+
+ off1 += poff;
+ off2 += poff;
+
+ ports1 = (__force u32 *)(skb1->data + off1);
+ ports2 = (__force u32 *)(skb2->data + off2);
+ return *ports1 == *ports2;
+}
+
+struct choke_skb_cb {
+ u16 classid;
+};
+
+static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(skb->cb) <
+ sizeof(struct qdisc_skb_cb) + sizeof(struct choke_skb_cb));
+ return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
+}
+
+static inline void choke_set_classid(struct sk_buff *skb, u16 classid)
+{
+ choke_skb_cb(skb)->classid = classid;
+}
+
+static u16 choke_get_classid(const struct sk_buff *skb)
+{
+ return choke_skb_cb(skb)->classid;
+}
+
+/*
+ * Classify flow using either:
+ * 1. pre-existing classification result in skb
+ * 2. fast internal classification
+ * 3. use TC filter based classification
+ */
+static bool choke_classify(struct sk_buff *skb,
+ struct Qdisc *sch, int *qerr)
+
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+ struct tcf_result res;
+ int result;
+
+ result = tc_classify(skb, q->filter_list, &res);
+ if (result >= 0) {
+#ifdef CONFIG_NET_CLS_ACT
+ switch (result) {
+ case TC_ACT_STOLEN:
+ case TC_ACT_QUEUED:
+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
+ case TC_ACT_SHOT:
+ return false;
+ }
+#endif
+ choke_set_classid(skb, TC_H_MIN(res.classid));
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Select a packet at random from queue
+ * HACK: since queue can have holes from previous deletion; retry several
+ * times to find a random skb but then just give up and return the head
+ * Will return NULL if queue is empty (q->head == q->tail)
+ */
+static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
+ unsigned int *pidx)
+{
+ struct sk_buff *skb;
+ int retrys = 3;
+
+ do {
+ *pidx = (q->head + random_N(choke_len(q))) & q->tab_mask;
+ skb = q->tab[*pidx];
+ if (skb)
+ return skb;
+ } while (--retrys > 0);
+
+ return q->tab[*pidx = q->head];
+}
+
+/*
+ * Compare new packet with random packet in queue
+ * returns true if matched and sets *pidx
+ */
+static bool choke_match_random(const struct choke_sched_data *q,
+ struct sk_buff *nskb,
+ unsigned int *pidx)
+{
+ struct sk_buff *oskb;
+
+ if (q->head == q->tail)
+ return false;
+
+ oskb = choke_peek_random(q, pidx);
+ if (q->filter_list)
+ return choke_get_classid(nskb) == choke_get_classid(oskb);
+
+ return choke_match_flow(oskb, nskb);
+}
+
+static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+ struct red_parms *p = &q->parms;
+ int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+
+ if (q->filter_list) {
+ /* If using external classifiers, get result and record it. */
+ if (!choke_classify(skb, sch, &ret))
+ goto other_drop; /* Packet was eaten by filter */
+ }
+
+ /* Compute average queue usage (see RED) */
+ p->qavg = red_calc_qavg(p, sch->q.qlen);
+ if (red_is_idling(p))
+ red_end_of_idle_period(p);
+
+ /* Is queue small? */
+ if (p->qavg <= p->qth_min)
+ p->qcount = -1;
+ else {
+ unsigned int idx;
+
+ /* Draw a packet at random from queue and compare flow */
+ if (choke_match_random(q, skb, &idx)) {
+ q->stats.matched++;
+ choke_drop_by_idx(sch, idx);
+ goto congestion_drop;
+ }
+
+ /* Queue is large, always mark/drop */
+ if (p->qavg > p->qth_max) {
+ p->qcount = -1;
+
+ sch->qstats.overlimits++;
+ if (use_harddrop(q) || !use_ecn(q) ||
+ !INET_ECN_set_ce(skb)) {
+ q->stats.forced_drop++;
+ goto congestion_drop;
+ }
+
+ q->stats.forced_mark++;
+ } else if (++p->qcount) {
+ if (red_mark_probability(p, p->qavg)) {
+ p->qcount = 0;
+ p->qR = red_random(p);
+
+ sch->qstats.overlimits++;
+ if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
+ q->stats.prob_drop++;
+ goto congestion_drop;
+ }
+
+ q->stats.prob_mark++;
+ }
+ } else
+ p->qR = red_random(p);
+ }
+
+ /* Admit new packet */
+ if (sch->q.qlen < q->limit) {
+ q->tab[q->tail] = skb;
+ q->tail = (q->tail + 1) & q->tab_mask;
+ ++sch->q.qlen;
+ sch->qstats.backlog += qdisc_pkt_len(skb);
+ return NET_XMIT_SUCCESS;
+ }
+
+ q->stats.pdrop++;
+ sch->qstats.drops++;
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+
+ congestion_drop:
+ qdisc_drop(skb, sch);
+ return NET_XMIT_CN;
+
+ other_drop:
+ if (ret & __NET_XMIT_BYPASS)
+ sch->qstats.drops++;
+ kfree_skb(skb);
+ return ret;
+}
+
+static struct sk_buff *choke_dequeue(struct Qdisc *sch)
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb;
+
+ if (q->head == q->tail) {
+ if (!red_is_idling(&q->parms))
+ red_start_of_idle_period(&q->parms);
+ return NULL;
+ }
+
+ skb = q->tab[q->head];
+ q->tab[q->head] = NULL;
+ choke_zap_head_holes(q);
+ --sch->q.qlen;
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_bstats_update(sch, skb);
+
+ return skb;
+}
+
+static unsigned int choke_drop(struct Qdisc *sch)
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+ unsigned int len;
+
+ len = qdisc_queue_drop(sch);
+ if (len > 0)
+ q->stats.other++;
+ else {
+ if (!red_is_idling(&q->parms))
+ red_start_of_idle_period(&q->parms);
+ }
+
+ return len;
+}
+
+static void choke_reset(struct Qdisc *sch)
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+
+ red_restart(&q->parms);
+}
+
+static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
+ [TCA_CHOKE_PARMS] = { .len = sizeof(struct tc_red_qopt) },
+ [TCA_CHOKE_STAB] = { .len = RED_STAB_SIZE },
+};
+
+
+static void choke_free(void *addr)
+{
+ if (addr) {
+ if (is_vmalloc_addr(addr))
+ vfree(addr);
+ else
+ kfree(addr);
+ }
+}
+
+static int choke_change(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+ struct nlattr *tb[TCA_CHOKE_MAX + 1];
+ const struct tc_red_qopt *ctl;
+ int err;
+ struct sk_buff **old = NULL;
+ unsigned int mask;
+
+ if (opt == NULL)
+ return -EINVAL;
+
+ err = nla_parse_nested(tb, TCA_CHOKE_MAX, opt, choke_policy);
+ if (err < 0)
+ return err;
+
+ if (tb[TCA_CHOKE_PARMS] == NULL ||
+ tb[TCA_CHOKE_STAB] == NULL)
+ return -EINVAL;
+
+ ctl = nla_data(tb[TCA_CHOKE_PARMS]);
+
+ if (ctl->limit > CHOKE_MAX_QUEUE)
+ return -EINVAL;
+
+ mask = roundup_pow_of_two(ctl->limit + 1) - 1;
+ if (mask != q->tab_mask) {
+ struct sk_buff **ntab;
+
+ ntab = kcalloc(mask + 1, sizeof(struct sk_buff *), GFP_KERNEL);
+ if (!ntab)
+ ntab = vzalloc((mask + 1) * sizeof(struct sk_buff *));
+ if (!ntab)
+ return -ENOMEM;
+
+ sch_tree_lock(sch);
+ old = q->tab;
+ if (old) {
+ unsigned int oqlen = sch->q.qlen, tail = 0;
+
+ while (q->head != q->tail) {
+ struct sk_buff *skb = q->tab[q->head];
+
+ q->head = (q->head + 1) & q->tab_mask;
+ if (!skb)
+ continue;
+ if (tail < mask) {
+ ntab[tail++] = skb;
+ continue;
+ }
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+ --sch->q.qlen;
+ qdisc_drop(skb, sch);
+ }
+ qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen);
+ q->head = 0;
+ q->tail = tail;
+ }
+
+ q->tab_mask = mask;
+ q->tab = ntab;
+ } else
+ sch_tree_lock(sch);
+
+ q->flags = ctl->flags;
+ q->limit = ctl->limit;
+
+ red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
+ ctl->Plog, ctl->Scell_log,
+ nla_data(tb[TCA_CHOKE_STAB]));
+
+ if (q->head == q->tail)
+ red_end_of_idle_period(&q->parms);
+
+ sch_tree_unlock(sch);
+ choke_free(old);
+ return 0;
+}
+
+static int choke_init(struct Qdisc *sch, struct nlattr *opt)
+{
+ return choke_change(sch, opt);
+}
+
+static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+ struct nlattr *opts = NULL;
+ struct tc_red_qopt opt = {
+ .limit = q->limit,
+ .flags = q->flags,
+ .qth_min = q->parms.qth_min >> q->parms.Wlog,
+ .qth_max = q->parms.qth_max >> q->parms.Wlog,
+ .Wlog = q->parms.Wlog,
+ .Plog = q->parms.Plog,
+ .Scell_log = q->parms.Scell_log,
+ };
+
+ opts = nla_nest_start(skb, TCA_OPTIONS);
+ if (opts == NULL)
+ goto nla_put_failure;
+
+ NLA_PUT(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt);
+ return nla_nest_end(skb, opts);
+
+nla_put_failure:
+ nla_nest_cancel(skb, opts);
+ return -EMSGSIZE;
+}
+
+static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+ struct tc_choke_xstats st = {
+ .early = q->stats.prob_drop + q->stats.forced_drop,
+ .marked = q->stats.prob_mark + q->stats.forced_mark,
+ .pdrop = q->stats.pdrop,
+ .other = q->stats.other,
+ .matched = q->stats.matched,
+ };
+
+ return gnet_stats_copy_app(d, &st, sizeof(st));
+}
+
+static void choke_destroy(struct Qdisc *sch)
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+
+ tcf_destroy_chain(&q->filter_list);
+ choke_free(q->tab);
+}
+
+static struct Qdisc *choke_leaf(struct Qdisc *sch, unsigned long arg)
+{
+ return NULL;
+}
+
+static unsigned long choke_get(struct Qdisc *sch, u32 classid)
+{
+ return 0;
+}
+
+static void choke_put(struct Qdisc *q, unsigned long cl)
+{
+}
+
+static unsigned long choke_bind(struct Qdisc *sch, unsigned long parent,
+ u32 classid)
+{
+ return 0;
+}
+
+static struct tcf_proto **choke_find_tcf(struct Qdisc *sch, unsigned long cl)
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+
+ if (cl)
+ return NULL;
+ return &q->filter_list;
+}
+
+static int choke_dump_class(struct Qdisc *sch, unsigned long cl,
+ struct sk_buff *skb, struct tcmsg *tcm)
+{
+ tcm->tcm_handle |= TC_H_MIN(cl);
+ return 0;
+}
+
+static void choke_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+{
+ if (!arg->stop) {
+ if (arg->fn(sch, 1, arg) < 0) {
+ arg->stop = 1;
+ return;
+ }
+ arg->count++;
+ }
+}
+
+static const struct Qdisc_class_ops choke_class_ops = {
+ .leaf = choke_leaf,
+ .get = choke_get,
+ .put = choke_put,
+ .tcf_chain = choke_find_tcf,
+ .bind_tcf = choke_bind,
+ .unbind_tcf = choke_put,
+ .dump = choke_dump_class,
+ .walk = choke_walk,
+};
+
+static struct sk_buff *choke_peek_head(struct Qdisc *sch)
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+
+ return (q->head != q->tail) ? q->tab[q->head] : NULL;
+}
+
+static struct Qdisc_ops choke_qdisc_ops __read_mostly = {
+ .id = "choke",
+ .priv_size = sizeof(struct choke_sched_data),
+
+ .enqueue = choke_enqueue,
+ .dequeue = choke_dequeue,
+ .peek = choke_peek_head,
+ .drop = choke_drop,
+ .init = choke_init,
+ .destroy = choke_destroy,
+ .reset = choke_reset,
+ .change = choke_change,
+ .dump = choke_dump,
+ .dump_stats = choke_dump_stats,
+ .owner = THIS_MODULE,
+};
+
+static int __init choke_module_init(void)
+{
+ return register_qdisc(&choke_qdisc_ops);
+}
+
+static void __exit choke_module_exit(void)
+{
+ unregister_qdisc(&choke_qdisc_ops);
+}
+
+module_init(choke_module_init)
+module_exit(choke_module_exit)
+
+MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 0f7bf3fdfea5..2c790204d042 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -137,10 +137,10 @@ static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
mask = nla_get_u8(tb[TCA_DSMARK_MASK]);
if (tb[TCA_DSMARK_VALUE])
- p->value[*arg-1] = nla_get_u8(tb[TCA_DSMARK_VALUE]);
+ p->value[*arg - 1] = nla_get_u8(tb[TCA_DSMARK_VALUE]);
if (tb[TCA_DSMARK_MASK])
- p->mask[*arg-1] = mask;
+ p->mask[*arg - 1] = mask;
err = 0;
@@ -155,8 +155,8 @@ static int dsmark_delete(struct Qdisc *sch, unsigned long arg)
if (!dsmark_valid_index(p, arg))
return -EINVAL;
- p->mask[arg-1] = 0xff;
- p->value[arg-1] = 0;
+ p->mask[arg - 1] = 0xff;
+ p->value[arg - 1] = 0;
return 0;
}
@@ -175,7 +175,7 @@ static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker)
if (p->mask[i] == 0xff && !p->value[i])
goto ignore;
if (walker->count >= walker->skip) {
- if (walker->fn(sch, i+1, walker) < 0) {
+ if (walker->fn(sch, i + 1, walker) < 0) {
walker->stop = 1;
break;
}
@@ -304,9 +304,8 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
* and don't need yet another qdisc as a bypass.
*/
if (p->mask[index] != 0xff || p->value[index])
- printk(KERN_WARNING
- "dsmark_dequeue: unsupported protocol %d\n",
- ntohs(skb->protocol));
+ pr_warning("dsmark_dequeue: unsupported protocol %d\n",
+ ntohs(skb->protocol));
break;
}
@@ -424,14 +423,14 @@ static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
if (!dsmark_valid_index(p, cl))
return -EINVAL;
- tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl-1);
+ tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl - 1);
tcm->tcm_info = p->q->handle;
opts = nla_nest_start(skb, TCA_OPTIONS);
if (opts == NULL)
goto nla_put_failure;
- NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl-1]);
- NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl-1]);
+ NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl - 1]);
+ NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl - 1]);
return nla_nest_end(skb, opts);
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index d468b479aa93..be33f9ddf9dd 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -19,12 +19,11 @@
/* 1 band FIFO pseudo-"scheduler" */
-struct fifo_sched_data
-{
+struct fifo_sched_data {
u32 limit;
};
-static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct fifo_sched_data *q = qdisc_priv(sch);
@@ -34,7 +33,7 @@ static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
return qdisc_reshape_fail(skb, sch);
}
-static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct fifo_sched_data *q = qdisc_priv(sch);
@@ -44,7 +43,7 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
return qdisc_reshape_fail(skb, sch);
}
-static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct fifo_sched_data *q = qdisc_priv(sch);
@@ -62,11 +61,13 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch)
static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
{
struct fifo_sched_data *q = qdisc_priv(sch);
+ bool bypass;
+ bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
if (opt == NULL) {
u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1;
- if (sch->ops == &bfifo_qdisc_ops)
+ if (is_bfifo)
limit *= psched_mtu(qdisc_dev(sch));
q->limit = limit;
@@ -79,6 +80,15 @@ static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
q->limit = ctl->limit;
}
+ if (is_bfifo)
+ bypass = q->limit >= psched_mtu(qdisc_dev(sch));
+ else
+ bypass = q->limit >= 1;
+
+ if (bypass)
+ sch->flags |= TCQ_F_CAN_BYPASS;
+ else
+ sch->flags &= ~TCQ_F_CAN_BYPASS;
return 0;
}
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 34dc598440a2..0440d1013a83 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -87,8 +87,8 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
*/
kfree_skb(skb);
if (net_ratelimit())
- printk(KERN_WARNING "Dead loop on netdevice %s, "
- "fix it urgently!\n", dev_queue->dev->name);
+ pr_warning("Dead loop on netdevice %s, fix it urgently!\n",
+ dev_queue->dev->name);
ret = qdisc_qlen(q);
} else {
/*
@@ -137,8 +137,8 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
} else {
/* Driver returned NETDEV_TX_BUSY - requeue skb */
if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
- printk(KERN_WARNING "BUG %s code %d qlen %d\n",
- dev->name, ret, q->q.qlen);
+ pr_warning("BUG %s code %d qlen %d\n",
+ dev->name, ret, q->q.qlen);
ret = dev_requeue_skb(skb, q);
}
@@ -412,8 +412,9 @@ static struct Qdisc noqueue_qdisc = {
};
-static const u8 prio2band[TC_PRIO_MAX+1] =
- { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };
+static const u8 prio2band[TC_PRIO_MAX + 1] = {
+ 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
+};
/* 3-band FIFO queue: old style, but should be a bit faster than
generic prio+fifo combination.
@@ -445,7 +446,7 @@ static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv,
return priv->q + band;
}
-static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
+static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc)
{
if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) {
int band = prio2band[skb->priority & TC_PRIO_MAX];
@@ -460,7 +461,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
return qdisc_drop(skb, qdisc);
}
-static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
+static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
{
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
int band = bitmap2band[priv->bitmap];
@@ -479,7 +480,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
return NULL;
}
-static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc)
+static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
{
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
int band = bitmap2band[priv->bitmap];
@@ -493,7 +494,7 @@ static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc)
return NULL;
}
-static void pfifo_fast_reset(struct Qdisc* qdisc)
+static void pfifo_fast_reset(struct Qdisc *qdisc)
{
int prio;
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
@@ -510,7 +511,7 @@ static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
{
struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
- memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1);
+ memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
return skb->len;
@@ -526,6 +527,8 @@ static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
skb_queue_head_init(band2list(priv, prio));
+ /* Can by-pass the queue discipline */
+ qdisc->flags |= TCQ_F_CAN_BYPASS;
return 0;
}
@@ -540,6 +543,7 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = {
.dump = pfifo_fast_dump,
.owner = THIS_MODULE,
};
+EXPORT_SYMBOL(pfifo_fast_ops);
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
struct Qdisc_ops *ops)
@@ -630,7 +634,7 @@ void qdisc_destroy(struct Qdisc *qdisc)
#ifdef CONFIG_NET_SCHED
qdisc_list_del(qdisc);
- qdisc_put_stab(qdisc->stab);
+ qdisc_put_stab(rtnl_dereference(qdisc->stab));
#endif
gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
if (ops->reset)
@@ -674,25 +678,21 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
return oqdisc;
}
+EXPORT_SYMBOL(dev_graft_qdisc);
static void attach_one_default_qdisc(struct net_device *dev,
struct netdev_queue *dev_queue,
void *_unused)
{
- struct Qdisc *qdisc;
+ struct Qdisc *qdisc = &noqueue_qdisc;
if (dev->tx_queue_len) {
qdisc = qdisc_create_dflt(dev_queue,
&pfifo_fast_ops, TC_H_ROOT);
if (!qdisc) {
- printk(KERN_INFO "%s: activation failed\n", dev->name);
+ netdev_info(dev, "activation failed\n");
return;
}
-
- /* Can by-pass the queue discipline for default qdisc */
- qdisc->flags |= TCQ_F_CAN_BYPASS;
- } else {
- qdisc = &noqueue_qdisc;
}
dev_queue->qdisc_sleeping = qdisc;
}
@@ -761,6 +761,7 @@ void dev_activate(struct net_device *dev)
dev_watchdog_up(dev);
}
}
+EXPORT_SYMBOL(dev_activate);
static void dev_deactivate_queue(struct net_device *dev,
struct netdev_queue *dev_queue,
@@ -839,7 +840,9 @@ void dev_deactivate(struct net_device *dev)
list_add(&dev->unreg_list, &single);
dev_deactivate_many(&single);
+ list_del(&single);
}
+EXPORT_SYMBOL(dev_deactivate);
static void dev_init_scheduler_queue(struct net_device *dev,
struct netdev_queue *dev_queue,
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 51dcc2aa5c92..b9493a09a870 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -32,8 +32,7 @@
struct gred_sched_data;
struct gred_sched;
-struct gred_sched_data
-{
+struct gred_sched_data {
u32 limit; /* HARD maximal queue length */
u32 DP; /* the drop pramaters */
u32 bytesin; /* bytes seen on virtualQ so far*/
@@ -50,8 +49,7 @@ enum {
GRED_RIO_MODE,
};
-struct gred_sched
-{
+struct gred_sched {
struct gred_sched_data *tab[MAX_DPs];
unsigned long flags;
u32 red_flags;
@@ -150,17 +148,18 @@ static inline int gred_use_harddrop(struct gred_sched *t)
return t->red_flags & TC_RED_HARDDROP;
}
-static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
- struct gred_sched_data *q=NULL;
- struct gred_sched *t= qdisc_priv(sch);
+ struct gred_sched_data *q = NULL;
+ struct gred_sched *t = qdisc_priv(sch);
unsigned long qavg = 0;
u16 dp = tc_index_to_dp(skb);
- if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
+ if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
dp = t->def;
- if ((q = t->tab[dp]) == NULL) {
+ q = t->tab[dp];
+ if (!q) {
/* Pass through packets not assigned to a DP
* if no default DP has been configured. This
* allows for DP flows to be left untouched.
@@ -183,7 +182,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
for (i = 0; i < t->DPs; i++) {
if (t->tab[i] && t->tab[i]->prio < q->prio &&
!red_is_idling(&t->tab[i]->parms))
- qavg +=t->tab[i]->parms.qavg;
+ qavg += t->tab[i]->parms.qavg;
}
}
@@ -203,28 +202,28 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
gred_store_wred_set(t, q);
switch (red_action(&q->parms, q->parms.qavg + qavg)) {
- case RED_DONT_MARK:
- break;
-
- case RED_PROB_MARK:
- sch->qstats.overlimits++;
- if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
- q->stats.prob_drop++;
- goto congestion_drop;
- }
-
- q->stats.prob_mark++;
- break;
-
- case RED_HARD_MARK:
- sch->qstats.overlimits++;
- if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
- !INET_ECN_set_ce(skb)) {
- q->stats.forced_drop++;
- goto congestion_drop;
- }
- q->stats.forced_mark++;
- break;
+ case RED_DONT_MARK:
+ break;
+
+ case RED_PROB_MARK:
+ sch->qstats.overlimits++;
+ if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
+ q->stats.prob_drop++;
+ goto congestion_drop;
+ }
+
+ q->stats.prob_mark++;
+ break;
+
+ case RED_HARD_MARK:
+ sch->qstats.overlimits++;
+ if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
+ !INET_ECN_set_ce(skb)) {
+ q->stats.forced_drop++;
+ goto congestion_drop;
+ }
+ q->stats.forced_mark++;
+ break;
}
if (q->backlog + qdisc_pkt_len(skb) <= q->limit) {
@@ -241,7 +240,7 @@ congestion_drop:
return NET_XMIT_CN;
}
-static struct sk_buff *gred_dequeue(struct Qdisc* sch)
+static struct sk_buff *gred_dequeue(struct Qdisc *sch)
{
struct sk_buff *skb;
struct gred_sched *t = qdisc_priv(sch);
@@ -254,9 +253,9 @@ static struct sk_buff *gred_dequeue(struct Qdisc* sch)
if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
if (net_ratelimit())
- printk(KERN_WARNING "GRED: Unable to relocate "
- "VQ 0x%x after dequeue, screwing up "
- "backlog.\n", tc_index_to_dp(skb));
+ pr_warning("GRED: Unable to relocate VQ 0x%x "
+ "after dequeue, screwing up "
+ "backlog.\n", tc_index_to_dp(skb));
} else {
q->backlog -= qdisc_pkt_len(skb);
@@ -273,7 +272,7 @@ static struct sk_buff *gred_dequeue(struct Qdisc* sch)
return NULL;
}
-static unsigned int gred_drop(struct Qdisc* sch)
+static unsigned int gred_drop(struct Qdisc *sch)
{
struct sk_buff *skb;
struct gred_sched *t = qdisc_priv(sch);
@@ -286,9 +285,9 @@ static unsigned int gred_drop(struct Qdisc* sch)
if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
if (net_ratelimit())
- printk(KERN_WARNING "GRED: Unable to relocate "
- "VQ 0x%x while dropping, screwing up "
- "backlog.\n", tc_index_to_dp(skb));
+ pr_warning("GRED: Unable to relocate VQ 0x%x "
+ "while dropping, screwing up "
+ "backlog.\n", tc_index_to_dp(skb));
} else {
q->backlog -= len;
q->stats.other++;
@@ -308,7 +307,7 @@ static unsigned int gred_drop(struct Qdisc* sch)
}
-static void gred_reset(struct Qdisc* sch)
+static void gred_reset(struct Qdisc *sch)
{
int i;
struct gred_sched *t = qdisc_priv(sch);
@@ -369,8 +368,8 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
for (i = table->DPs; i < MAX_DPs; i++) {
if (table->tab[i]) {
- printk(KERN_WARNING "GRED: Warning: Destroying "
- "shadowed VQ 0x%x\n", i);
+ pr_warning("GRED: Warning: Destroying "
+ "shadowed VQ 0x%x\n", i);
gred_destroy_vq(table->tab[i]);
table->tab[i] = NULL;
}
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 14a799de1c35..6488e6425652 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -81,8 +81,7 @@
* that are expensive on 32-bit architectures.
*/
-struct internal_sc
-{
+struct internal_sc {
u64 sm1; /* scaled slope of the 1st segment */
u64 ism1; /* scaled inverse-slope of the 1st segment */
u64 dx; /* the x-projection of the 1st segment */
@@ -92,8 +91,7 @@ struct internal_sc
};
/* runtime service curve */
-struct runtime_sc
-{
+struct runtime_sc {
u64 x; /* current starting position on x-axis */
u64 y; /* current starting position on y-axis */
u64 sm1; /* scaled slope of the 1st segment */
@@ -104,15 +102,13 @@ struct runtime_sc
u64 ism2; /* scaled inverse-slope of the 2nd segment */
};
-enum hfsc_class_flags
-{
+enum hfsc_class_flags {
HFSC_RSC = 0x1,
HFSC_FSC = 0x2,
HFSC_USC = 0x4
};
-struct hfsc_class
-{
+struct hfsc_class {
struct Qdisc_class_common cl_common;
unsigned int refcnt; /* usage count */
@@ -140,8 +136,8 @@ struct hfsc_class
u64 cl_cumul; /* cumulative work in bytes done by
real-time criteria */
- u64 cl_d; /* deadline*/
- u64 cl_e; /* eligible time */
+ u64 cl_d; /* deadline*/
+ u64 cl_e; /* eligible time */
u64 cl_vt; /* virtual time */
u64 cl_f; /* time when this class will fit for
link-sharing, max(myf, cfmin) */
@@ -176,8 +172,7 @@ struct hfsc_class
unsigned long cl_nactive; /* number of active children */
};
-struct hfsc_sched
-{
+struct hfsc_sched {
u16 defcls; /* default class id */
struct hfsc_class root; /* root class */
struct Qdisc_class_hash clhash; /* class hash */
@@ -693,7 +688,7 @@ init_vf(struct hfsc_class *cl, unsigned int len)
if (go_active) {
n = rb_last(&cl->cl_parent->vt_tree);
if (n != NULL) {
- max_cl = rb_entry(n, struct hfsc_class,vt_node);
+ max_cl = rb_entry(n, struct hfsc_class, vt_node);
/*
* set vt to the average of the min and max
* classes. if the parent's period didn't
@@ -1177,8 +1172,10 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
return NULL;
}
#endif
- if ((cl = (struct hfsc_class *)res.class) == NULL) {
- if ((cl = hfsc_find_class(res.classid, sch)) == NULL)
+ cl = (struct hfsc_class *)res.class;
+ if (!cl) {
+ cl = hfsc_find_class(res.classid, sch);
+ if (!cl)
break; /* filter selected invalid classid */
if (cl->level >= head->level)
break; /* filter may only point downwards */
@@ -1316,7 +1313,7 @@ hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
return -1;
}
-static inline int
+static int
hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
{
if ((cl->cl_flags & HFSC_RSC) &&
@@ -1420,7 +1417,8 @@ hfsc_schedule_watchdog(struct Qdisc *sch)
struct hfsc_class *cl;
u64 next_time = 0;
- if ((cl = eltree_get_minel(q)) != NULL)
+ cl = eltree_get_minel(q);
+ if (cl)
next_time = cl->cl_e;
if (q->root.cl_cfmin != 0) {
if (next_time == 0 || next_time > q->root.cl_cfmin)
@@ -1625,7 +1623,8 @@ hfsc_dequeue(struct Qdisc *sch)
* find the class with the minimum deadline among
* the eligible classes.
*/
- if ((cl = eltree_get_mindl(q, cur_time)) != NULL) {
+ cl = eltree_get_mindl(q, cur_time);
+ if (cl) {
realtime = 1;
} else {
/*
@@ -1664,7 +1663,7 @@ hfsc_dequeue(struct Qdisc *sch)
set_passive(cl);
}
- sch->flags &= ~TCQ_F_THROTTLED;
+ qdisc_unthrottled(sch);
qdisc_bstats_update(sch, skb);
sch->q.qlen--;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index fc12fe6f5597..e1429a85091f 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -99,9 +99,10 @@ struct htb_class {
struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */
struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */
/* When class changes from state 1->2 and disconnects from
- parent's feed then we lost ptr value and start from the
- first child again. Here we store classid of the
- last valid ptr (used when ptr is NULL). */
+ * parent's feed then we lost ptr value and start from the
+ * first child again. Here we store classid of the
+ * last valid ptr (used when ptr is NULL).
+ */
u32 last_ptr_id[TC_HTB_NUMPRIO];
} inner;
} un;
@@ -185,7 +186,7 @@ static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
* have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull
* then finish and return direct queue.
*/
-#define HTB_DIRECT (struct htb_class*)-1
+#define HTB_DIRECT ((struct htb_class *)-1L)
static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
int *qerr)
@@ -197,11 +198,13 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
int result;
/* allow to select class by setting skb->priority to valid classid;
- note that nfmark can be used too by attaching filter fw with no
- rules in it */
+ * note that nfmark can be used too by attaching filter fw with no
+ * rules in it
+ */
if (skb->priority == sch->handle)
return HTB_DIRECT; /* X:0 (direct flow) selected */
- if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0)
+ cl = htb_find(skb->priority, sch);
+ if (cl && cl->level == 0)
return cl;
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
@@ -216,10 +219,12 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
return NULL;
}
#endif
- if ((cl = (void *)res.class) == NULL) {
+ cl = (void *)res.class;
+ if (!cl) {
if (res.classid == sch->handle)
return HTB_DIRECT; /* X:0 (direct flow) */
- if ((cl = htb_find(res.classid, sch)) == NULL)
+ cl = htb_find(res.classid, sch);
+ if (!cl)
break; /* filter selected invalid classid */
}
if (!cl->level)
@@ -378,7 +383,8 @@ static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
if (p->un.inner.feed[prio].rb_node)
/* parent already has its feed in use so that
- reset bit in mask as parent is already ok */
+ * reset bit in mask as parent is already ok
+ */
mask &= ~(1 << prio);
htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio);
@@ -413,8 +419,9 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
if (p->un.inner.ptr[prio] == cl->node + prio) {
/* we are removing child which is pointed to from
- parent feed - forget the pointer but remember
- classid */
+ * parent feed - forget the pointer but remember
+ * classid
+ */
p->un.inner.last_ptr_id[prio] = cl->common.classid;
p->un.inner.ptr[prio] = NULL;
}
@@ -663,8 +670,9 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level,
unsigned long start)
{
/* don't run for longer than 2 jiffies; 2 is used instead of
- 1 to simplify things when jiffy is going to be incremented
- too soon */
+ * 1 to simplify things when jiffy is going to be incremented
+ * too soon
+ */
unsigned long stop_at = start + 2;
while (time_before(jiffies, stop_at)) {
struct htb_class *cl;
@@ -687,7 +695,7 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level,
/* too much load - let's continue after a break for scheduling */
if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
- printk(KERN_WARNING "htb: too many events!\n");
+ pr_warning("htb: too many events!\n");
q->warned |= HTB_WARN_TOOMANYEVENTS;
}
@@ -695,7 +703,8 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level,
}
/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
- is no such one exists. */
+ * is no such one exists.
+ */
static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
u32 id)
{
@@ -739,12 +748,14 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
for (i = 0; i < 65535; i++) {
if (!*sp->pptr && *sp->pid) {
/* ptr was invalidated but id is valid - try to recover
- the original or next ptr */
+ * the original or next ptr
+ */
*sp->pptr =
htb_id_find_next_upper(prio, sp->root, *sp->pid);
}
*sp->pid = 0; /* ptr is valid now so that remove this hint as it
- can become out of date quickly */
+ * can become out of date quickly
+ */
if (!*sp->pptr) { /* we are at right end; rewind & go up */
*sp->pptr = sp->root;
while ((*sp->pptr)->rb_left)
@@ -772,7 +783,8 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
}
/* dequeues packet at given priority and level; call only if
- you are sure that there is active class at prio/level */
+ * you are sure that there is active class at prio/level
+ */
static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
int level)
{
@@ -789,9 +801,10 @@ next:
return NULL;
/* class can be empty - it is unlikely but can be true if leaf
- qdisc drops packets in enqueue routine or if someone used
- graft operation on the leaf since last dequeue;
- simply deactivate and skip such class */
+ * qdisc drops packets in enqueue routine or if someone used
+ * graft operation on the leaf since last dequeue;
+ * simply deactivate and skip such class
+ */
if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
struct htb_class *next;
htb_deactivate(q, cl);
@@ -831,7 +844,8 @@ next:
ptr[0]) + prio);
}
/* this used to be after charge_class but this constelation
- gives us slightly better performance */
+ * gives us slightly better performance
+ */
if (!cl->un.leaf.q->q.qlen)
htb_deactivate(q, cl);
htb_charge_class(q, cl, level, skb);
@@ -852,7 +866,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
if (skb != NULL) {
ok:
qdisc_bstats_update(sch, skb);
- sch->flags &= ~TCQ_F_THROTTLED;
+ qdisc_unthrottled(sch);
sch->q.qlen--;
return skb;
}
@@ -883,6 +897,7 @@ ok:
m = ~q->row_mask[level];
while (m != (int)(-1)) {
int prio = ffz(m);
+
m |= 1 << prio;
skb = htb_dequeue_tree(q, prio, level);
if (likely(skb != NULL))
@@ -987,13 +1002,12 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
return err;
if (tb[TCA_HTB_INIT] == NULL) {
- printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n");
+ pr_err("HTB: hey probably you have bad tc tool ?\n");
return -EINVAL;
}
gopt = nla_data(tb[TCA_HTB_INIT]);
if (gopt->version != HTB_VER >> 16) {
- printk(KERN_ERR
- "HTB: need tc/htb version %d (minor is %d), you have %d\n",
+ pr_err("HTB: need tc/htb version %d (minor is %d), you have %d\n",
HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);
return -EINVAL;
}
@@ -1206,9 +1220,10 @@ static void htb_destroy(struct Qdisc *sch)
cancel_work_sync(&q->work);
qdisc_watchdog_cancel(&q->watchdog);
/* This line used to be after htb_destroy_class call below
- and surprisingly it worked in 2.4. But it must precede it
- because filter need its target class alive to be able to call
- unbind_filter on it (without Oops). */
+ * and surprisingly it worked in 2.4. But it must precede it
+ * because filter need its target class alive to be able to call
+ * unbind_filter on it (without Oops).
+ */
tcf_destroy_chain(&q->filter_list);
for (i = 0; i < q->clhash.hashsize; i++) {
@@ -1342,11 +1357,12 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
/* check maximal depth */
if (parent && parent->parent && parent->parent->level < 2) {
- printk(KERN_ERR "htb: tree is too deep\n");
+ pr_err("htb: tree is too deep\n");
goto failure;
}
err = -ENOBUFS;
- if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
+ cl = kzalloc(sizeof(*cl), GFP_KERNEL);
+ if (!cl)
goto failure;
err = gen_new_estimator(&cl->bstats, &cl->rate_est,
@@ -1366,8 +1382,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
RB_CLEAR_NODE(&cl->node[prio]);
/* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
- so that can't be used inside of sch_tree_lock
- -- thanks to Karlis Peisenieks */
+ * so that can't be used inside of sch_tree_lock
+ * -- thanks to Karlis Peisenieks
+ */
new_q = qdisc_create_dflt(sch->dev_queue,
&pfifo_qdisc_ops, classid);
sch_tree_lock(sch);
@@ -1419,17 +1436,18 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
}
/* it used to be a nasty bug here, we have to check that node
- is really leaf before changing cl->un.leaf ! */
+ * is really leaf before changing cl->un.leaf !
+ */
if (!cl->level) {
cl->quantum = rtab->rate.rate / q->rate2quantum;
if (!hopt->quantum && cl->quantum < 1000) {
- printk(KERN_WARNING
+ pr_warning(
"HTB: quantum of class %X is small. Consider r2q change.\n",
cl->common.classid);
cl->quantum = 1000;
}
if (!hopt->quantum && cl->quantum > 200000) {
- printk(KERN_WARNING
+ pr_warning(
"HTB: quantum of class %X is big. Consider r2q change.\n",
cl->common.classid);
cl->quantum = 200000;
@@ -1478,13 +1496,13 @@ static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
struct htb_class *cl = htb_find(classid, sch);
/*if (cl && !cl->level) return 0;
- The line above used to be there to prevent attaching filters to
- leaves. But at least tc_index filter uses this just to get class
- for other reasons so that we have to allow for it.
- ----
- 19.6.2002 As Werner explained it is ok - bind filter is just
- another way to "lock" the class - unlike "get" this lock can
- be broken by class during destroy IIUC.
+ * The line above used to be there to prevent attaching filters to
+ * leaves. But at least tc_index filter uses this just to get class
+ * for other reasons so that we have to allow for it.
+ * ----
+ * 19.6.2002 As Werner explained it is ok - bind filter is just
+ * another way to "lock" the class - unlike "get" this lock can
+ * be broken by class during destroy IIUC.
*/
if (cl)
cl->filter_cnt++;
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index ecc302f4d2a1..ec5cbc848963 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -61,7 +61,6 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
TC_H_MIN(ntx + 1)));
if (qdisc == NULL)
goto err;
- qdisc->flags |= TCQ_F_CAN_BYPASS;
priv->qdiscs[ntx] = qdisc;
}
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
new file mode 100644
index 000000000000..ea17cbed29ef
--- /dev/null
+++ b/net/sched/sch_mqprio.c
@@ -0,0 +1,418 @@
+/*
+ * net/sched/sch_mqprio.c
+ *
+ * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+#include <net/sch_generic.h>
+
+struct mqprio_sched {
+ struct Qdisc **qdiscs;
+ int hw_owned;
+};
+
+static void mqprio_destroy(struct Qdisc *sch)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ struct mqprio_sched *priv = qdisc_priv(sch);
+ unsigned int ntx;
+
+ if (priv->qdiscs) {
+ for (ntx = 0;
+ ntx < dev->num_tx_queues && priv->qdiscs[ntx];
+ ntx++)
+ qdisc_destroy(priv->qdiscs[ntx]);
+ kfree(priv->qdiscs);
+ }
+
+ if (priv->hw_owned && dev->netdev_ops->ndo_setup_tc)
+ dev->netdev_ops->ndo_setup_tc(dev, 0);
+ else
+ netdev_set_num_tc(dev, 0);
+}
+
+static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
+{
+ int i, j;
+
+ /* Verify num_tc is not out of max range */
+ if (qopt->num_tc > TC_MAX_QUEUE)
+ return -EINVAL;
+
+ /* Verify priority mapping uses valid tcs */
+ for (i = 0; i < TC_BITMASK + 1; i++) {
+ if (qopt->prio_tc_map[i] >= qopt->num_tc)
+ return -EINVAL;
+ }
+
+ /* net_device does not support requested operation */
+ if (qopt->hw && !dev->netdev_ops->ndo_setup_tc)
+ return -EINVAL;
+
+ /* if hw owned qcount and qoffset are taken from LLD so
+ * no reason to verify them here
+ */
+ if (qopt->hw)
+ return 0;
+
+ for (i = 0; i < qopt->num_tc; i++) {
+ unsigned int last = qopt->offset[i] + qopt->count[i];
+
+ /* Verify the queue count is in tx range being equal to the
+ * real_num_tx_queues indicates the last queue is in use.
+ */
+ if (qopt->offset[i] >= dev->real_num_tx_queues ||
+ !qopt->count[i] ||
+ last > dev->real_num_tx_queues)
+ return -EINVAL;
+
+ /* Verify that the offset and counts do not overlap */
+ for (j = i + 1; j < qopt->num_tc; j++) {
+ if (last > qopt->offset[j])
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ struct mqprio_sched *priv = qdisc_priv(sch);
+ struct netdev_queue *dev_queue;
+ struct Qdisc *qdisc;
+ int i, err = -EOPNOTSUPP;
+ struct tc_mqprio_qopt *qopt = NULL;
+
+ BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
+ BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);
+
+ if (sch->parent != TC_H_ROOT)
+ return -EOPNOTSUPP;
+
+ if (!netif_is_multiqueue(dev))
+ return -EOPNOTSUPP;
+
+ if (nla_len(opt) < sizeof(*qopt))
+ return -EINVAL;
+
+ qopt = nla_data(opt);
+ if (mqprio_parse_opt(dev, qopt))
+ return -EINVAL;
+
+ /* pre-allocate qdisc, attachment can't fail */
+ priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
+ GFP_KERNEL);
+ if (priv->qdiscs == NULL) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ dev_queue = netdev_get_tx_queue(dev, i);
+ qdisc = qdisc_create_dflt(dev_queue, &pfifo_fast_ops,
+ TC_H_MAKE(TC_H_MAJ(sch->handle),
+ TC_H_MIN(i + 1)));
+ if (qdisc == NULL) {
+ err = -ENOMEM;
+ goto err;
+ }
+ priv->qdiscs[i] = qdisc;
+ }
+
+ /* If the mqprio options indicate that hardware should own
+ * the queue mapping then run ndo_setup_tc otherwise use the
+ * supplied and verified mapping
+ */
+ if (qopt->hw) {
+ priv->hw_owned = 1;
+ err = dev->netdev_ops->ndo_setup_tc(dev, qopt->num_tc);
+ if (err)
+ goto err;
+ } else {
+ netdev_set_num_tc(dev, qopt->num_tc);
+ for (i = 0; i < qopt->num_tc; i++)
+ netdev_set_tc_queue(dev, i,
+ qopt->count[i], qopt->offset[i]);
+ }
+
+ /* Always use supplied priority mappings */
+ for (i = 0; i < TC_BITMASK + 1; i++)
+ netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
+
+ sch->flags |= TCQ_F_MQROOT;
+ return 0;
+
+err:
+ mqprio_destroy(sch);
+ return err;
+}
+
+static void mqprio_attach(struct Qdisc *sch)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ struct mqprio_sched *priv = qdisc_priv(sch);
+ struct Qdisc *qdisc;
+ unsigned int ntx;
+
+ /* Attach underlying qdisc */
+ for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
+ qdisc = priv->qdiscs[ntx];
+ qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
+ if (qdisc)
+ qdisc_destroy(qdisc);
+ }
+ kfree(priv->qdiscs);
+ priv->qdiscs = NULL;
+}
+
+static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
+ unsigned long cl)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ unsigned long ntx = cl - 1 - netdev_get_num_tc(dev);
+
+ if (ntx >= dev->num_tx_queues)
+ return NULL;
+ return netdev_get_tx_queue(dev, ntx);
+}
+
+static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
+ struct Qdisc **old)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
+
+ if (!dev_queue)
+ return -EINVAL;
+
+ if (dev->flags & IFF_UP)
+ dev_deactivate(dev);
+
+ *old = dev_graft_qdisc(dev_queue, new);
+
+ if (dev->flags & IFF_UP)
+ dev_activate(dev);
+
+ return 0;
+}
+
+static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ struct mqprio_sched *priv = qdisc_priv(sch);
+ unsigned char *b = skb_tail_pointer(skb);
+ struct tc_mqprio_qopt opt = { 0 };
+ struct Qdisc *qdisc;
+ unsigned int i;
+
+ sch->q.qlen = 0;
+ memset(&sch->bstats, 0, sizeof(sch->bstats));
+ memset(&sch->qstats, 0, sizeof(sch->qstats));
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ qdisc = netdev_get_tx_queue(dev, i)->qdisc;
+ spin_lock_bh(qdisc_lock(qdisc));
+ sch->q.qlen += qdisc->q.qlen;
+ sch->bstats.bytes += qdisc->bstats.bytes;
+ sch->bstats.packets += qdisc->bstats.packets;
+ sch->qstats.qlen += qdisc->qstats.qlen;
+ sch->qstats.backlog += qdisc->qstats.backlog;
+ sch->qstats.drops += qdisc->qstats.drops;
+ sch->qstats.requeues += qdisc->qstats.requeues;
+ sch->qstats.overlimits += qdisc->qstats.overlimits;
+ spin_unlock_bh(qdisc_lock(qdisc));
+ }
+
+ opt.num_tc = netdev_get_num_tc(dev);
+ memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
+ opt.hw = priv->hw_owned;
+
+ for (i = 0; i < netdev_get_num_tc(dev); i++) {
+ opt.count[i] = dev->tc_to_txq[i].count;
+ opt.offset[i] = dev->tc_to_txq[i].offset;
+ }
+
+ NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+
+ return skb->len;
+nla_put_failure:
+ nlmsg_trim(skb, b);
+ return -1;
+}
+
+static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
+{
+ struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
+
+ if (!dev_queue)
+ return NULL;
+
+ return dev_queue->qdisc_sleeping;
+}
+
+static unsigned long mqprio_get(struct Qdisc *sch, u32 classid)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ unsigned int ntx = TC_H_MIN(classid);
+
+ if (ntx > dev->num_tx_queues + netdev_get_num_tc(dev))
+ return 0;
+ return ntx;
+}
+
+static void mqprio_put(struct Qdisc *sch, unsigned long cl)
+{
+}
+
+static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
+ struct sk_buff *skb, struct tcmsg *tcm)
+{
+ struct net_device *dev = qdisc_dev(sch);
+
+ if (cl <= netdev_get_num_tc(dev)) {
+ tcm->tcm_parent = TC_H_ROOT;
+ tcm->tcm_info = 0;
+ } else {
+ int i;
+ struct netdev_queue *dev_queue;
+
+ dev_queue = mqprio_queue_get(sch, cl);
+ tcm->tcm_parent = 0;
+ for (i = 0; i < netdev_get_num_tc(dev); i++) {
+ struct netdev_tc_txq tc = dev->tc_to_txq[i];
+ int q_idx = cl - netdev_get_num_tc(dev);
+
+ if (q_idx > tc.offset &&
+ q_idx <= tc.offset + tc.count) {
+ tcm->tcm_parent =
+ TC_H_MAKE(TC_H_MAJ(sch->handle),
+ TC_H_MIN(i + 1));
+ break;
+ }
+ }
+ tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
+ }
+ tcm->tcm_handle |= TC_H_MIN(cl);
+ return 0;
+}
+
+static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
+ struct gnet_dump *d)
+ __releases(d->lock)
+ __acquires(d->lock)
+{
+ struct net_device *dev = qdisc_dev(sch);
+
+ if (cl <= netdev_get_num_tc(dev)) {
+ int i;
+ struct Qdisc *qdisc;
+ struct gnet_stats_queue qstats = {0};
+ struct gnet_stats_basic_packed bstats = {0};
+ struct netdev_tc_txq tc = dev->tc_to_txq[cl - 1];
+
+ /* Drop lock here it will be reclaimed before touching
+ * statistics this is required because the d->lock we
+ * hold here is the look on dev_queue->qdisc_sleeping
+ * also acquired below.
+ */
+ spin_unlock_bh(d->lock);
+
+ for (i = tc.offset; i < tc.offset + tc.count; i++) {
+ qdisc = netdev_get_tx_queue(dev, i)->qdisc;
+ spin_lock_bh(qdisc_lock(qdisc));
+ bstats.bytes += qdisc->bstats.bytes;
+ bstats.packets += qdisc->bstats.packets;
+ qstats.qlen += qdisc->qstats.qlen;
+ qstats.backlog += qdisc->qstats.backlog;
+ qstats.drops += qdisc->qstats.drops;
+ qstats.requeues += qdisc->qstats.requeues;
+ qstats.overlimits += qdisc->qstats.overlimits;
+ spin_unlock_bh(qdisc_lock(qdisc));
+ }
+ /* Reclaim root sleeping lock before completing stats */
+ spin_lock_bh(d->lock);
+ if (gnet_stats_copy_basic(d, &bstats) < 0 ||
+ gnet_stats_copy_queue(d, &qstats) < 0)
+ return -1;
+ } else {
+ struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
+
+ sch = dev_queue->qdisc_sleeping;
+ sch->qstats.qlen = sch->q.qlen;
+ if (gnet_stats_copy_basic(d, &sch->bstats) < 0 ||
+ gnet_stats_copy_queue(d, &sch->qstats) < 0)
+ return -1;
+ }
+ return 0;
+}
+
+static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ unsigned long ntx;
+
+ if (arg->stop)
+ return;
+
+ /* Walk hierarchy with a virtual class per tc */
+ arg->count = arg->skip;
+ for (ntx = arg->skip;
+ ntx < dev->num_tx_queues + netdev_get_num_tc(dev);
+ ntx++) {
+ if (arg->fn(sch, ntx + 1, arg) < 0) {
+ arg->stop = 1;
+ break;
+ }
+ arg->count++;
+ }
+}
+
+static const struct Qdisc_class_ops mqprio_class_ops = {
+ .graft = mqprio_graft,
+ .leaf = mqprio_leaf,
+ .get = mqprio_get,
+ .put = mqprio_put,
+ .walk = mqprio_walk,
+ .dump = mqprio_dump_class,
+ .dump_stats = mqprio_dump_class_stats,
+};
+
+static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
+ .cl_ops = &mqprio_class_ops,
+ .id = "mqprio",
+ .priv_size = sizeof(struct mqprio_sched),
+ .init = mqprio_init,
+ .destroy = mqprio_destroy,
+ .attach = mqprio_attach,
+ .dump = mqprio_dump,
+ .owner = THIS_MODULE,
+};
+
+static int __init mqprio_module_init(void)
+{
+ return register_qdisc(&mqprio_qdisc_ops);
+}
+
+static void __exit mqprio_module_exit(void)
+{
+ unregister_qdisc(&mqprio_qdisc_ops);
+}
+
+module_init(mqprio_module_init);
+module_exit(mqprio_module_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 436a2e75b322..edc1950e0e77 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -156,7 +156,7 @@ static unsigned int multiq_drop(struct Qdisc *sch)
unsigned int len;
struct Qdisc *qdisc;
- for (band = q->bands-1; band >= 0; band--) {
+ for (band = q->bands - 1; band >= 0; band--) {
qdisc = q->queues[band];
if (qdisc->ops->drop) {
len = qdisc->ops->drop(qdisc);
@@ -265,7 +265,7 @@ static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
for (i = 0; i < q->max_bands; i++)
q->queues[i] = &noop_qdisc;
- err = multiq_tune(sch,opt);
+ err = multiq_tune(sch, opt);
if (err)
kfree(q->queues);
@@ -346,7 +346,7 @@ static int multiq_dump_class(struct Qdisc *sch, unsigned long cl,
struct multiq_sched_data *q = qdisc_priv(sch);
tcm->tcm_handle |= TC_H_MIN(cl);
- tcm->tcm_info = q->queues[cl-1]->handle;
+ tcm->tcm_info = q->queues[cl - 1]->handle;
return 0;
}
@@ -378,7 +378,7 @@ static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
arg->count++;
continue;
}
- if (arg->fn(sch, band+1, arg) < 0) {
+ if (arg->fn(sch, band + 1, arg) < 0) {
arg->stop = 1;
break;
}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 6a3006b38dc5..edbbf7ad6623 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -19,12 +19,13 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
+#include <linux/vmalloc.h>
#include <linux/rtnetlink.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
-#define VERSION "1.2"
+#define VERSION "1.3"
/* Network Emulation Queuing algorithm.
====================================
@@ -47,6 +48,20 @@
layering other disciplines. It does not need to do bandwidth
control either since that can be handled by using token
bucket or other rate control.
+
+ Correlated Loss Generator models
+
+ Added generation of correlated loss according to the
+ "Gilbert-Elliot" model, a 4-state markov model.
+
+ References:
+ [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
+ [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
+ and intuitive loss model for packet networks and its implementation
+ in the Netem module in the Linux kernel", available in [1]
+
+ Authors: Stefano Salsano <stefano.salsano at uniroma2.it
+ Fabio Ludovici <fabio.ludovici at yahoo.it>
*/
struct netem_sched_data {
@@ -73,6 +88,26 @@ struct netem_sched_data {
u32 size;
s16 table[0];
} *delay_dist;
+
+ enum {
+ CLG_RANDOM,
+ CLG_4_STATES,
+ CLG_GILB_ELL,
+ } loss_model;
+
+ /* Correlated Loss Generation models */
+ struct clgstate {
+ /* state of the Markov chain */
+ u8 state;
+
+ /* 4-states and Gilbert-Elliot models */
+ u32 a1; /* p13 for 4-states or p for GE */
+ u32 a2; /* p31 for 4-states or r for GE */
+ u32 a3; /* p32 for 4-states or h for GE */
+ u32 a4; /* p14 for 4-states or 1-k for GE */
+ u32 a5; /* p23 used only in 4-states */
+ } clg;
+
};
/* Time stamp put into socket buffer control block */
@@ -115,6 +150,122 @@ static u32 get_crandom(struct crndstate *state)
return answer;
}
+/* loss_4state - 4-state model loss generator
+ * Generates losses according to the 4-state Markov chain adopted in
+ * the GI (General and Intuitive) loss model.
+ */
+static bool loss_4state(struct netem_sched_data *q)
+{
+ struct clgstate *clg = &q->clg;
+ u32 rnd = net_random();
+
+ /*
+ * Makes a comparision between rnd and the transition
+ * probabilities outgoing from the current state, then decides the
+ * next state and if the next packet has to be transmitted or lost.
+ * The four states correspond to:
+ * 1 => successfully transmitted packets within a gap period
+ * 4 => isolated losses within a gap period
+ * 3 => lost packets within a burst period
+ * 2 => successfully transmitted packets within a burst period
+ */
+ switch (clg->state) {
+ case 1:
+ if (rnd < clg->a4) {
+ clg->state = 4;
+ return true;
+ } else if (clg->a4 < rnd && rnd < clg->a1) {
+ clg->state = 3;
+ return true;
+ } else if (clg->a1 < rnd)
+ clg->state = 1;
+
+ break;
+ case 2:
+ if (rnd < clg->a5) {
+ clg->state = 3;
+ return true;
+ } else
+ clg->state = 2;
+
+ break;
+ case 3:
+ if (rnd < clg->a3)
+ clg->state = 2;
+ else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
+ clg->state = 1;
+ return true;
+ } else if (clg->a2 + clg->a3 < rnd) {
+ clg->state = 3;
+ return true;
+ }
+ break;
+ case 4:
+ clg->state = 1;
+ break;
+ }
+
+ return false;
+}
+
+/* loss_gilb_ell - Gilbert-Elliot model loss generator
+ * Generates losses according to the Gilbert-Elliot loss model or
+ * its special cases (Gilbert or Simple Gilbert)
+ *
+ * Makes a comparision between random number and the transition
+ * probabilities outgoing from the current state, then decides the
+ * next state. A second random number is extracted and the comparision
+ * with the loss probability of the current state decides if the next
+ * packet will be transmitted or lost.
+ */
+static bool loss_gilb_ell(struct netem_sched_data *q)
+{
+ struct clgstate *clg = &q->clg;
+
+ switch (clg->state) {
+ case 1:
+ if (net_random() < clg->a1)
+ clg->state = 2;
+ if (net_random() < clg->a4)
+ return true;
+ case 2:
+ if (net_random() < clg->a2)
+ clg->state = 1;
+ if (clg->a3 > net_random())
+ return true;
+ }
+
+ return false;
+}
+
+static bool loss_event(struct netem_sched_data *q)
+{
+ switch (q->loss_model) {
+ case CLG_RANDOM:
+ /* Random packet drop 0 => none, ~0 => all */
+ return q->loss && q->loss >= get_crandom(&q->loss_cor);
+
+ case CLG_4_STATES:
+ /* 4state loss model algorithm (used also for GI model)
+ * Extracts a value from the markov 4 state loss generator,
+ * if it is 1 drops a packet and if needed writes the event in
+ * the kernel logs
+ */
+ return loss_4state(q);
+
+ case CLG_GILB_ELL:
+ /* Gilbert-Elliot loss model algorithm
+ * Extracts a value from the Gilbert-Elliot loss generator,
+ * if it is 1 drops a packet and if needed writes the event in
+ * the kernel logs
+ */
+ return loss_gilb_ell(q);
+ }
+
+ return false; /* not reached */
+}
+
+
/* tabledist - return a pseudo-randomly distributed value with mean mu and
* std deviation sigma. Uses table lookup to approximate the desired
* distribution, and a uniformly-distributed pseudo-random source.
@@ -161,14 +312,12 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
int ret;
int count = 1;
- pr_debug("netem_enqueue skb=%p\n", skb);
-
/* Random duplication */
if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
++count;
- /* Random packet drop 0 => none, ~0 => all */
- if (q->loss && q->loss >= get_crandom(&q->loss_cor))
+ /* Drop packet? */
+ if (loss_event(q))
--count;
if (count == 0) {
@@ -211,8 +360,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
}
cb = netem_skb_cb(skb);
- if (q->gap == 0 || /* not doing reordering */
- q->counter < q->gap || /* inside last reordering gap */
+ if (q->gap == 0 || /* not doing reordering */
+ q->counter < q->gap || /* inside last reordering gap */
q->reorder < get_crandom(&q->reorder_cor)) {
psched_time_t now;
psched_tdiff_t delay;
@@ -238,17 +387,18 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
ret = NET_XMIT_SUCCESS;
}
- if (likely(ret == NET_XMIT_SUCCESS)) {
- sch->q.qlen++;
- } else if (net_xmit_drop_count(ret)) {
- sch->qstats.drops++;
+ if (ret != NET_XMIT_SUCCESS) {
+ if (net_xmit_drop_count(ret)) {
+ sch->qstats.drops++;
+ return ret;
+ }
}
- pr_debug("netem: enqueue ret %d\n", ret);
- return ret;
+ sch->q.qlen++;
+ return NET_XMIT_SUCCESS;
}
-static unsigned int netem_drop(struct Qdisc* sch)
+static unsigned int netem_drop(struct Qdisc *sch)
{
struct netem_sched_data *q = qdisc_priv(sch);
unsigned int len = 0;
@@ -265,7 +415,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
struct netem_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb;
- if (sch->flags & TCQ_F_THROTTLED)
+ if (qdisc_is_throttled(sch))
return NULL;
skb = q->qdisc->ops->peek(q->qdisc);
@@ -287,9 +437,10 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
skb->tstamp.tv64 = 0;
#endif
- pr_debug("netem_dequeue: return skb=%p\n", skb);
- qdisc_bstats_update(sch, skb);
+
sch->q.qlen--;
+ qdisc_unthrottled(sch);
+ qdisc_bstats_update(sch, skb);
return skb;
}
@@ -308,6 +459,16 @@ static void netem_reset(struct Qdisc *sch)
qdisc_watchdog_cancel(&q->watchdog);
}
+static void dist_free(struct disttable *d)
+{
+ if (d) {
+ if (is_vmalloc_addr(d))
+ vfree(d);
+ else
+ kfree(d);
+ }
+}
+
/*
* Distribution data is a variable size payload containing
* signed 16 bit values.
@@ -315,16 +476,20 @@ static void netem_reset(struct Qdisc *sch)
static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
{
struct netem_sched_data *q = qdisc_priv(sch);
- unsigned long n = nla_len(attr)/sizeof(__s16);
+ size_t n = nla_len(attr)/sizeof(__s16);
const __s16 *data = nla_data(attr);
spinlock_t *root_lock;
struct disttable *d;
int i;
+ size_t s;
- if (n > 65536)
+ if (n > NETEM_DIST_MAX)
return -EINVAL;
- d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL);
+ s = sizeof(struct disttable) + n * sizeof(s16);
+ d = kmalloc(s, GFP_KERNEL);
+ if (!d)
+ d = vmalloc(s);
if (!d)
return -ENOMEM;
@@ -335,7 +500,7 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
root_lock = qdisc_root_sleeping_lock(sch);
spin_lock_bh(root_lock);
- kfree(q->delay_dist);
+ dist_free(q->delay_dist);
q->delay_dist = d;
spin_unlock_bh(root_lock);
return 0;
@@ -369,10 +534,66 @@ static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr)
init_crandom(&q->corrupt_cor, r->correlation);
}
+static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
+{
+ struct netem_sched_data *q = qdisc_priv(sch);
+ const struct nlattr *la;
+ int rem;
+
+ nla_for_each_nested(la, attr, rem) {
+ u16 type = nla_type(la);
+
+ switch(type) {
+ case NETEM_LOSS_GI: {
+ const struct tc_netem_gimodel *gi = nla_data(la);
+
+ if (nla_len(la) != sizeof(struct tc_netem_gimodel)) {
+ pr_info("netem: incorrect gi model size\n");
+ return -EINVAL;
+ }
+
+ q->loss_model = CLG_4_STATES;
+
+ q->clg.state = 1;
+ q->clg.a1 = gi->p13;
+ q->clg.a2 = gi->p31;
+ q->clg.a3 = gi->p32;
+ q->clg.a4 = gi->p14;
+ q->clg.a5 = gi->p23;
+ break;
+ }
+
+ case NETEM_LOSS_GE: {
+ const struct tc_netem_gemodel *ge = nla_data(la);
+
+ if (nla_len(la) != sizeof(struct tc_netem_gemodel)) {
+ pr_info("netem: incorrect gi model size\n");
+ return -EINVAL;
+ }
+
+ q->loss_model = CLG_GILB_ELL;
+ q->clg.state = 1;
+ q->clg.a1 = ge->p;
+ q->clg.a2 = ge->r;
+ q->clg.a3 = ge->h;
+ q->clg.a4 = ge->k1;
+ break;
+ }
+
+ default:
+ pr_info("netem: unknown loss type %u\n", type);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
[TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
[TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
[TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
+ [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
};
static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
@@ -380,11 +601,15 @@ static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
{
int nested_len = nla_len(nla) - NLA_ALIGN(len);
- if (nested_len < 0)
+ if (nested_len < 0) {
+ pr_info("netem: invalid attributes len %d\n", nested_len);
return -EINVAL;
+ }
+
if (nested_len >= nla_attr_size(0))
return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
nested_len, policy);
+
memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
return 0;
}
@@ -407,7 +632,7 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
ret = fifo_set_limit(q->qdisc, qopt->limit);
if (ret) {
- pr_debug("netem: can't set fifo limit\n");
+ pr_info("netem: can't set fifo limit\n");
return ret;
}
@@ -440,7 +665,11 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
if (tb[TCA_NETEM_CORRUPT])
get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
- return 0;
+ q->loss_model = CLG_RANDOM;
+ if (tb[TCA_NETEM_LOSS])
+ ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]);
+
+ return ret;
}
/*
@@ -535,16 +764,17 @@ static int netem_init(struct Qdisc *sch, struct nlattr *opt)
qdisc_watchdog_init(&q->watchdog, sch);
+ q->loss_model = CLG_RANDOM;
q->qdisc = qdisc_create_dflt(sch->dev_queue, &tfifo_qdisc_ops,
TC_H_MAKE(sch->handle, 1));
if (!q->qdisc) {
- pr_debug("netem: qdisc create failed\n");
+ pr_notice("netem: qdisc create tfifo qdisc failed\n");
return -ENOMEM;
}
ret = netem_change(sch, opt);
if (ret) {
- pr_debug("netem: change failed\n");
+ pr_info("netem: change failed\n");
qdisc_destroy(q->qdisc);
}
return ret;
@@ -556,14 +786,61 @@ static void netem_destroy(struct Qdisc *sch)
qdisc_watchdog_cancel(&q->watchdog);
qdisc_destroy(q->qdisc);
- kfree(q->delay_dist);
+ dist_free(q->delay_dist);
+}
+
+static int dump_loss_model(const struct netem_sched_data *q,
+ struct sk_buff *skb)
+{
+ struct nlattr *nest;
+
+ nest = nla_nest_start(skb, TCA_NETEM_LOSS);
+ if (nest == NULL)
+ goto nla_put_failure;
+
+ switch (q->loss_model) {
+ case CLG_RANDOM:
+ /* legacy loss model */
+ nla_nest_cancel(skb, nest);
+ return 0; /* no data */
+
+ case CLG_4_STATES: {
+ struct tc_netem_gimodel gi = {
+ .p13 = q->clg.a1,
+ .p31 = q->clg.a2,
+ .p32 = q->clg.a3,
+ .p14 = q->clg.a4,
+ .p23 = q->clg.a5,
+ };
+
+ NLA_PUT(skb, NETEM_LOSS_GI, sizeof(gi), &gi);
+ break;
+ }
+ case CLG_GILB_ELL: {
+ struct tc_netem_gemodel ge = {
+ .p = q->clg.a1,
+ .r = q->clg.a2,
+ .h = q->clg.a3,
+ .k1 = q->clg.a4,
+ };
+
+ NLA_PUT(skb, NETEM_LOSS_GE, sizeof(ge), &ge);
+ break;
+ }
+ }
+
+ nla_nest_end(skb, nest);
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nest);
+ return -1;
}
static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
{
const struct netem_sched_data *q = qdisc_priv(sch);
- unsigned char *b = skb_tail_pointer(skb);
- struct nlattr *nla = (struct nlattr *) b;
+ struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
struct tc_netem_qopt qopt;
struct tc_netem_corr cor;
struct tc_netem_reorder reorder;
@@ -590,17 +867,87 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
corrupt.correlation = q->corrupt_cor.rho;
NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
- nla->nla_len = skb_tail_pointer(skb) - b;
+ if (dump_loss_model(q, skb) != 0)
+ goto nla_put_failure;
- return skb->len;
+ return nla_nest_end(skb, nla);
nla_put_failure:
- nlmsg_trim(skb, b);
+ nlmsg_trim(skb, nla);
return -1;
}
+static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
+ struct sk_buff *skb, struct tcmsg *tcm)
+{
+ struct netem_sched_data *q = qdisc_priv(sch);
+
+ if (cl != 1) /* only one class */
+ return -ENOENT;
+
+ tcm->tcm_handle |= TC_H_MIN(1);
+ tcm->tcm_info = q->qdisc->handle;
+
+ return 0;
+}
+
+static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ struct Qdisc **old)
+{
+ struct netem_sched_data *q = qdisc_priv(sch);
+
+ if (new == NULL)
+ new = &noop_qdisc;
+
+ sch_tree_lock(sch);
+ *old = q->qdisc;
+ q->qdisc = new;
+ qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+ qdisc_reset(*old);
+ sch_tree_unlock(sch);
+
+ return 0;
+}
+
+static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
+{
+ struct netem_sched_data *q = qdisc_priv(sch);
+ return q->qdisc;
+}
+
+static unsigned long netem_get(struct Qdisc *sch, u32 classid)
+{
+ return 1;
+}
+
+static void netem_put(struct Qdisc *sch, unsigned long arg)
+{
+}
+
+static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
+{
+ if (!walker->stop) {
+ if (walker->count >= walker->skip)
+ if (walker->fn(sch, 1, walker) < 0) {
+ walker->stop = 1;
+ return;
+ }
+ walker->count++;
+ }
+}
+
+static const struct Qdisc_class_ops netem_class_ops = {
+ .graft = netem_graft,
+ .leaf = netem_leaf,
+ .get = netem_get,
+ .put = netem_put,
+ .walk = netem_walk,
+ .dump = netem_dump_class,
+};
+
static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
.id = "netem",
+ .cl_ops = &netem_class_ops,
.priv_size = sizeof(struct netem_sched_data),
.enqueue = netem_enqueue,
.dequeue = netem_dequeue,
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index fbd710d619bf..2a318f2dc3e5 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -22,8 +22,7 @@
#include <net/pkt_sched.h>
-struct prio_sched_data
-{
+struct prio_sched_data {
int bands;
struct tcf_proto *filter_list;
u8 prio2band[TC_PRIO_MAX+1];
@@ -54,7 +53,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
if (!q->filter_list || err < 0) {
if (TC_H_MAJ(band))
band = 0;
- return q->queues[q->prio2band[band&TC_PRIO_MAX]];
+ return q->queues[q->prio2band[band & TC_PRIO_MAX]];
}
band = res.classid;
}
@@ -106,7 +105,7 @@ static struct sk_buff *prio_peek(struct Qdisc *sch)
return NULL;
}
-static struct sk_buff *prio_dequeue(struct Qdisc* sch)
+static struct sk_buff *prio_dequeue(struct Qdisc *sch)
{
struct prio_sched_data *q = qdisc_priv(sch);
int prio;
@@ -124,7 +123,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc* sch)
}
-static unsigned int prio_drop(struct Qdisc* sch)
+static unsigned int prio_drop(struct Qdisc *sch)
{
struct prio_sched_data *q = qdisc_priv(sch);
int prio;
@@ -143,24 +142,24 @@ static unsigned int prio_drop(struct Qdisc* sch)
static void
-prio_reset(struct Qdisc* sch)
+prio_reset(struct Qdisc *sch)
{
int prio;
struct prio_sched_data *q = qdisc_priv(sch);
- for (prio=0; prio<q->bands; prio++)
+ for (prio = 0; prio < q->bands; prio++)
qdisc_reset(q->queues[prio]);
sch->q.qlen = 0;
}
static void
-prio_destroy(struct Qdisc* sch)
+prio_destroy(struct Qdisc *sch)
{
int prio;
struct prio_sched_data *q = qdisc_priv(sch);
tcf_destroy_chain(&q->filter_list);
- for (prio=0; prio<q->bands; prio++)
+ for (prio = 0; prio < q->bands; prio++)
qdisc_destroy(q->queues[prio]);
}
@@ -177,7 +176,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2)
return -EINVAL;
- for (i=0; i<=TC_PRIO_MAX; i++) {
+ for (i = 0; i <= TC_PRIO_MAX; i++) {
if (qopt->priomap[i] >= qopt->bands)
return -EINVAL;
}
@@ -186,7 +185,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
q->bands = qopt->bands;
memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
- for (i=q->bands; i<TCQ_PRIO_BANDS; i++) {
+ for (i = q->bands; i < TCQ_PRIO_BANDS; i++) {
struct Qdisc *child = q->queues[i];
q->queues[i] = &noop_qdisc;
if (child != &noop_qdisc) {
@@ -196,9 +195,10 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
}
sch_tree_unlock(sch);
- for (i=0; i<q->bands; i++) {
+ for (i = 0; i < q->bands; i++) {
if (q->queues[i] == &noop_qdisc) {
struct Qdisc *child, *old;
+
child = qdisc_create_dflt(sch->dev_queue,
&pfifo_qdisc_ops,
TC_H_MAKE(sch->handle, i + 1));
@@ -224,7 +224,7 @@ static int prio_init(struct Qdisc *sch, struct nlattr *opt)
struct prio_sched_data *q = qdisc_priv(sch);
int i;
- for (i=0; i<TCQ_PRIO_BANDS; i++)
+ for (i = 0; i < TCQ_PRIO_BANDS; i++)
q->queues[i] = &noop_qdisc;
if (opt == NULL) {
@@ -232,7 +232,7 @@ static int prio_init(struct Qdisc *sch, struct nlattr *opt)
} else {
int err;
- if ((err= prio_tune(sch, opt)) != 0)
+ if ((err = prio_tune(sch, opt)) != 0)
return err;
}
return 0;
@@ -245,7 +245,7 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
struct tc_prio_qopt opt;
opt.bands = q->bands;
- memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1);
+ memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
@@ -342,7 +342,7 @@ static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
arg->count++;
continue;
}
- if (arg->fn(sch, prio+1, arg) < 0) {
+ if (arg->fn(sch, prio + 1, arg) < 0) {
arg->stop = 1;
break;
}
@@ -350,7 +350,7 @@ static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
}
}
-static struct tcf_proto ** prio_find_tcf(struct Qdisc *sch, unsigned long cl)
+static struct tcf_proto **prio_find_tcf(struct Qdisc *sch, unsigned long cl)
{
struct prio_sched_data *q = qdisc_priv(sch);
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 9f98dbd32d4c..6649463da1b6 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -36,8 +36,7 @@
if RED works correctly.
*/
-struct red_sched_data
-{
+struct red_sched_data {
u32 limit; /* HARD maximal queue length */
unsigned char flags;
struct red_parms parms;
@@ -55,7 +54,7 @@ static inline int red_use_harddrop(struct red_sched_data *q)
return q->flags & TC_RED_HARDDROP;
}
-static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct red_sched_data *q = qdisc_priv(sch);
struct Qdisc *child = q->qdisc;
@@ -67,29 +66,29 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
red_end_of_idle_period(&q->parms);
switch (red_action(&q->parms, q->parms.qavg)) {
- case RED_DONT_MARK:
- break;
-
- case RED_PROB_MARK:
- sch->qstats.overlimits++;
- if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
- q->stats.prob_drop++;
- goto congestion_drop;
- }
-
- q->stats.prob_mark++;
- break;
-
- case RED_HARD_MARK:
- sch->qstats.overlimits++;
- if (red_use_harddrop(q) || !red_use_ecn(q) ||
- !INET_ECN_set_ce(skb)) {
- q->stats.forced_drop++;
- goto congestion_drop;
- }
-
- q->stats.forced_mark++;
- break;
+ case RED_DONT_MARK:
+ break;
+
+ case RED_PROB_MARK:
+ sch->qstats.overlimits++;
+ if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
+ q->stats.prob_drop++;
+ goto congestion_drop;
+ }
+
+ q->stats.prob_mark++;
+ break;
+
+ case RED_HARD_MARK:
+ sch->qstats.overlimits++;
+ if (red_use_harddrop(q) || !red_use_ecn(q) ||
+ !INET_ECN_set_ce(skb)) {
+ q->stats.forced_drop++;
+ goto congestion_drop;
+ }
+
+ q->stats.forced_mark++;
+ break;
}
ret = qdisc_enqueue(skb, child);
@@ -106,7 +105,7 @@ congestion_drop:
return NET_XMIT_CN;
}
-static struct sk_buff * red_dequeue(struct Qdisc* sch)
+static struct sk_buff *red_dequeue(struct Qdisc *sch)
{
struct sk_buff *skb;
struct red_sched_data *q = qdisc_priv(sch);
@@ -123,7 +122,7 @@ static struct sk_buff * red_dequeue(struct Qdisc* sch)
return skb;
}
-static struct sk_buff * red_peek(struct Qdisc* sch)
+static struct sk_buff *red_peek(struct Qdisc *sch)
{
struct red_sched_data *q = qdisc_priv(sch);
struct Qdisc *child = q->qdisc;
@@ -131,7 +130,7 @@ static struct sk_buff * red_peek(struct Qdisc* sch)
return child->ops->peek(child);
}
-static unsigned int red_drop(struct Qdisc* sch)
+static unsigned int red_drop(struct Qdisc *sch)
{
struct red_sched_data *q = qdisc_priv(sch);
struct Qdisc *child = q->qdisc;
@@ -150,7 +149,7 @@ static unsigned int red_drop(struct Qdisc* sch)
return 0;
}
-static void red_reset(struct Qdisc* sch)
+static void red_reset(struct Qdisc *sch)
{
struct red_sched_data *q = qdisc_priv(sch);
@@ -217,7 +216,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
return 0;
}
-static int red_init(struct Qdisc* sch, struct nlattr *opt)
+static int red_init(struct Qdisc *sch, struct nlattr *opt)
{
struct red_sched_data *q = qdisc_priv(sch);
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
new file mode 100644
index 000000000000..0a833d0c1f61
--- /dev/null
+++ b/net/sched/sch_sfb.c
@@ -0,0 +1,709 @@
+/*
+ * net/sched/sch_sfb.c Stochastic Fair Blue
+ *
+ * Copyright (c) 2008-2011 Juliusz Chroboczek <jch@pps.jussieu.fr>
+ * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * W. Feng, D. Kandlur, D. Saha, K. Shin. Blue:
+ * A New Class of Active Queue Management Algorithms.
+ * U. Michigan CSE-TR-387-99, April 1999.
+ *
+ * http://www.thefengs.com/wuchang/blue/CSE-TR-387-99.pdf
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/random.h>
+#include <linux/jhash.h>
+#include <net/ip.h>
+#include <net/pkt_sched.h>
+#include <net/inet_ecn.h>
+
+/*
+ * SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level)
+ * This implementation uses L = 8 and N = 16
+ * This permits us to split one 32bit hash (provided per packet by rxhash or
+ * external classifier) into 8 subhashes of 4 bits.
+ */
+#define SFB_BUCKET_SHIFT 4
+#define SFB_NUMBUCKETS (1 << SFB_BUCKET_SHIFT) /* N bins per Level */
+#define SFB_BUCKET_MASK (SFB_NUMBUCKETS - 1)
+#define SFB_LEVELS (32 / SFB_BUCKET_SHIFT) /* L */
+
+/* SFB algo uses a virtual queue, named "bin" */
+struct sfb_bucket {
+ u16 qlen; /* length of virtual queue */
+ u16 p_mark; /* marking probability */
+};
+
+/* We use a double buffering right before hash change
+ * (Section 4.4 of SFB reference : moving hash functions)
+ */
+struct sfb_bins {
+ u32 perturbation; /* jhash perturbation */
+ struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS];
+};
+
+struct sfb_sched_data {
+ struct Qdisc *qdisc;
+ struct tcf_proto *filter_list;
+ unsigned long rehash_interval;
+ unsigned long warmup_time; /* double buffering warmup time in jiffies */
+ u32 max;
+ u32 bin_size; /* maximum queue length per bin */
+ u32 increment; /* d1 */
+ u32 decrement; /* d2 */
+ u32 limit; /* HARD maximal queue length */
+ u32 penalty_rate;
+ u32 penalty_burst;
+ u32 tokens_avail;
+ unsigned long rehash_time;
+ unsigned long token_time;
+
+ u8 slot; /* current active bins (0 or 1) */
+ bool double_buffering;
+ struct sfb_bins bins[2];
+
+ struct {
+ u32 earlydrop;
+ u32 penaltydrop;
+ u32 bucketdrop;
+ u32 queuedrop;
+ u32 childdrop; /* drops in child qdisc */
+ u32 marked; /* ECN mark */
+ } stats;
+};
+
+/*
+ * Each queued skb might be hashed on one or two bins
+ * We store in skb_cb the two hash values.
+ * (A zero value means double buffering was not used)
+ */
+struct sfb_skb_cb {
+ u32 hashes[2];
+};
+
+static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(skb->cb) <
+ sizeof(struct qdisc_skb_cb) + sizeof(struct sfb_skb_cb));
+ return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data;
+}
+
+/*
+ * If using 'internal' SFB flow classifier, hash comes from skb rxhash
+ * If using external classifier, hash comes from the classid.
+ */
+static u32 sfb_hash(const struct sk_buff *skb, u32 slot)
+{
+ return sfb_skb_cb(skb)->hashes[slot];
+}
+
+/* Probabilities are coded as Q0.16 fixed-point values,
+ * with 0xFFFF representing 65535/65536 (almost 1.0)
+ * Addition and subtraction are saturating in [0, 65535]
+ */
+static u32 prob_plus(u32 p1, u32 p2)
+{
+ u32 res = p1 + p2;
+
+ return min_t(u32, res, SFB_MAX_PROB);
+}
+
+static u32 prob_minus(u32 p1, u32 p2)
+{
+ return p1 > p2 ? p1 - p2 : 0;
+}
+
+static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q)
+{
+ int i;
+ struct sfb_bucket *b = &q->bins[slot].bins[0][0];
+
+ for (i = 0; i < SFB_LEVELS; i++) {
+ u32 hash = sfbhash & SFB_BUCKET_MASK;
+
+ sfbhash >>= SFB_BUCKET_SHIFT;
+ if (b[hash].qlen < 0xFFFF)
+ b[hash].qlen++;
+ b += SFB_NUMBUCKETS; /* next level */
+ }
+}
+
+static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
+{
+ u32 sfbhash;
+
+ sfbhash = sfb_hash(skb, 0);
+ if (sfbhash)
+ increment_one_qlen(sfbhash, 0, q);
+
+ sfbhash = sfb_hash(skb, 1);
+ if (sfbhash)
+ increment_one_qlen(sfbhash, 1, q);
+}
+
+static void decrement_one_qlen(u32 sfbhash, u32 slot,
+ struct sfb_sched_data *q)
+{
+ int i;
+ struct sfb_bucket *b = &q->bins[slot].bins[0][0];
+
+ for (i = 0; i < SFB_LEVELS; i++) {
+ u32 hash = sfbhash & SFB_BUCKET_MASK;
+
+ sfbhash >>= SFB_BUCKET_SHIFT;
+ if (b[hash].qlen > 0)
+ b[hash].qlen--;
+ b += SFB_NUMBUCKETS; /* next level */
+ }
+}
+
+static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
+{
+ u32 sfbhash;
+
+ sfbhash = sfb_hash(skb, 0);
+ if (sfbhash)
+ decrement_one_qlen(sfbhash, 0, q);
+
+ sfbhash = sfb_hash(skb, 1);
+ if (sfbhash)
+ decrement_one_qlen(sfbhash, 1, q);
+}
+
+static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
+{
+ b->p_mark = prob_minus(b->p_mark, q->decrement);
+}
+
+static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
+{
+ b->p_mark = prob_plus(b->p_mark, q->increment);
+}
+
+static void sfb_zero_all_buckets(struct sfb_sched_data *q)
+{
+ memset(&q->bins, 0, sizeof(q->bins));
+}
+
+/*
+ * compute max qlen, max p_mark, and avg p_mark
+ */
+static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q)
+{
+ int i;
+ u32 qlen = 0, prob = 0, totalpm = 0;
+ const struct sfb_bucket *b = &q->bins[q->slot].bins[0][0];
+
+ for (i = 0; i < SFB_LEVELS * SFB_NUMBUCKETS; i++) {
+ if (qlen < b->qlen)
+ qlen = b->qlen;
+ totalpm += b->p_mark;
+ if (prob < b->p_mark)
+ prob = b->p_mark;
+ b++;
+ }
+ *prob_r = prob;
+ *avgpm_r = totalpm / (SFB_LEVELS * SFB_NUMBUCKETS);
+ return qlen;
+}
+
+
+static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
+{
+ q->bins[slot].perturbation = net_random();
+}
+
+static void sfb_swap_slot(struct sfb_sched_data *q)
+{
+ sfb_init_perturbation(q->slot, q);
+ q->slot ^= 1;
+ q->double_buffering = false;
+}
+
+/* Non elastic flows are allowed to use part of the bandwidth, expressed
+ * in "penalty_rate" packets per second, with "penalty_burst" burst
+ */
+static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q)
+{
+ if (q->penalty_rate == 0 || q->penalty_burst == 0)
+ return true;
+
+ if (q->tokens_avail < 1) {
+ unsigned long age = min(10UL * HZ, jiffies - q->token_time);
+
+ q->tokens_avail = (age * q->penalty_rate) / HZ;
+ if (q->tokens_avail > q->penalty_burst)
+ q->tokens_avail = q->penalty_burst;
+ q->token_time = jiffies;
+ if (q->tokens_avail < 1)
+ return true;
+ }
+
+ q->tokens_avail--;
+ return false;
+}
+
+static bool sfb_classify(struct sk_buff *skb, struct sfb_sched_data *q,
+ int *qerr, u32 *salt)
+{
+ struct tcf_result res;
+ int result;
+
+ result = tc_classify(skb, q->filter_list, &res);
+ if (result >= 0) {
+#ifdef CONFIG_NET_CLS_ACT
+ switch (result) {
+ case TC_ACT_STOLEN:
+ case TC_ACT_QUEUED:
+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
+ case TC_ACT_SHOT:
+ return false;
+ }
+#endif
+ *salt = TC_H_MIN(res.classid);
+ return true;
+ }
+ return false;
+}
+
+static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+
+ struct sfb_sched_data *q = qdisc_priv(sch);
+ struct Qdisc *child = q->qdisc;
+ int i;
+ u32 p_min = ~0;
+ u32 minqlen = ~0;
+ u32 r, slot, salt, sfbhash;
+ int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+
+ if (q->rehash_interval > 0) {
+ unsigned long limit = q->rehash_time + q->rehash_interval;
+
+ if (unlikely(time_after(jiffies, limit))) {
+ sfb_swap_slot(q);
+ q->rehash_time = jiffies;
+ } else if (unlikely(!q->double_buffering && q->warmup_time > 0 &&
+ time_after(jiffies, limit - q->warmup_time))) {
+ q->double_buffering = true;
+ }
+ }
+
+ if (q->filter_list) {
+ /* If using external classifiers, get result and record it. */
+ if (!sfb_classify(skb, q, &ret, &salt))
+ goto other_drop;
+ } else {
+ salt = skb_get_rxhash(skb);
+ }
+
+ slot = q->slot;
+
+ sfbhash = jhash_1word(salt, q->bins[slot].perturbation);
+ if (!sfbhash)
+ sfbhash = 1;
+ sfb_skb_cb(skb)->hashes[slot] = sfbhash;
+
+ for (i = 0; i < SFB_LEVELS; i++) {
+ u32 hash = sfbhash & SFB_BUCKET_MASK;
+ struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
+
+ sfbhash >>= SFB_BUCKET_SHIFT;
+ if (b->qlen == 0)
+ decrement_prob(b, q);
+ else if (b->qlen >= q->bin_size)
+ increment_prob(b, q);
+ if (minqlen > b->qlen)
+ minqlen = b->qlen;
+ if (p_min > b->p_mark)
+ p_min = b->p_mark;
+ }
+
+ slot ^= 1;
+ sfb_skb_cb(skb)->hashes[slot] = 0;
+
+ if (unlikely(minqlen >= q->max || sch->q.qlen >= q->limit)) {
+ sch->qstats.overlimits++;
+ if (minqlen >= q->max)
+ q->stats.bucketdrop++;
+ else
+ q->stats.queuedrop++;
+ goto drop;
+ }
+
+ if (unlikely(p_min >= SFB_MAX_PROB)) {
+ /* Inelastic flow */
+ if (q->double_buffering) {
+ sfbhash = jhash_1word(salt, q->bins[slot].perturbation);
+ if (!sfbhash)
+ sfbhash = 1;
+ sfb_skb_cb(skb)->hashes[slot] = sfbhash;
+
+ for (i = 0; i < SFB_LEVELS; i++) {
+ u32 hash = sfbhash & SFB_BUCKET_MASK;
+ struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
+
+ sfbhash >>= SFB_BUCKET_SHIFT;
+ if (b->qlen == 0)
+ decrement_prob(b, q);
+ else if (b->qlen >= q->bin_size)
+ increment_prob(b, q);
+ }
+ }
+ if (sfb_rate_limit(skb, q)) {
+ sch->qstats.overlimits++;
+ q->stats.penaltydrop++;
+ goto drop;
+ }
+ goto enqueue;
+ }
+
+ r = net_random() & SFB_MAX_PROB;
+
+ if (unlikely(r < p_min)) {
+ if (unlikely(p_min > SFB_MAX_PROB / 2)) {
+ /* If we're marking that many packets, then either
+ * this flow is unresponsive, or we're badly congested.
+ * In either case, we want to start dropping packets.
+ */
+ if (r < (p_min - SFB_MAX_PROB / 2) * 2) {
+ q->stats.earlydrop++;
+ goto drop;
+ }
+ }
+ if (INET_ECN_set_ce(skb)) {
+ q->stats.marked++;
+ } else {
+ q->stats.earlydrop++;
+ goto drop;
+ }
+ }
+
+enqueue:
+ ret = qdisc_enqueue(skb, child);
+ if (likely(ret == NET_XMIT_SUCCESS)) {
+ sch->q.qlen++;
+ increment_qlen(skb, q);
+ } else if (net_xmit_drop_count(ret)) {
+ q->stats.childdrop++;
+ sch->qstats.drops++;
+ }
+ return ret;
+
+drop:
+ qdisc_drop(skb, sch);
+ return NET_XMIT_CN;
+other_drop:
+ if (ret & __NET_XMIT_BYPASS)
+ sch->qstats.drops++;
+ kfree_skb(skb);
+ return ret;
+}
+
+static struct sk_buff *sfb_dequeue(struct Qdisc *sch)
+{
+ struct sfb_sched_data *q = qdisc_priv(sch);
+ struct Qdisc *child = q->qdisc;
+ struct sk_buff *skb;
+
+ skb = child->dequeue(q->qdisc);
+
+ if (skb) {
+ qdisc_bstats_update(sch, skb);
+ sch->q.qlen--;
+ decrement_qlen(skb, q);
+ }
+
+ return skb;
+}
+
+static struct sk_buff *sfb_peek(struct Qdisc *sch)
+{
+ struct sfb_sched_data *q = qdisc_priv(sch);
+ struct Qdisc *child = q->qdisc;
+
+ return child->ops->peek(child);
+}
+
+/* No sfb_drop -- impossible since the child doesn't return the dropped skb. */
+
+static void sfb_reset(struct Qdisc *sch)
+{
+ struct sfb_sched_data *q = qdisc_priv(sch);
+
+ qdisc_reset(q->qdisc);
+ sch->q.qlen = 0;
+ q->slot = 0;
+ q->double_buffering = false;
+ sfb_zero_all_buckets(q);
+ sfb_init_perturbation(0, q);
+}
+
+static void sfb_destroy(struct Qdisc *sch)
+{
+ struct sfb_sched_data *q = qdisc_priv(sch);
+
+ tcf_destroy_chain(&q->filter_list);
+ qdisc_destroy(q->qdisc);
+}
+
+static const struct nla_policy sfb_policy[TCA_SFB_MAX + 1] = {
+ [TCA_SFB_PARMS] = { .len = sizeof(struct tc_sfb_qopt) },
+};
+
+static const struct tc_sfb_qopt sfb_default_ops = {
+ .rehash_interval = 600 * MSEC_PER_SEC,
+ .warmup_time = 60 * MSEC_PER_SEC,
+ .limit = 0,
+ .max = 25,
+ .bin_size = 20,
+ .increment = (SFB_MAX_PROB + 500) / 1000, /* 0.1 % */
+ .decrement = (SFB_MAX_PROB + 3000) / 6000,
+ .penalty_rate = 10,
+ .penalty_burst = 20,
+};
+
+static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct sfb_sched_data *q = qdisc_priv(sch);
+ struct Qdisc *child;
+ struct nlattr *tb[TCA_SFB_MAX + 1];
+ const struct tc_sfb_qopt *ctl = &sfb_default_ops;
+ u32 limit;
+ int err;
+
+ if (opt) {
+ err = nla_parse_nested(tb, TCA_SFB_MAX, opt, sfb_policy);
+ if (err < 0)
+ return -EINVAL;
+
+ if (tb[TCA_SFB_PARMS] == NULL)
+ return -EINVAL;
+
+ ctl = nla_data(tb[TCA_SFB_PARMS]);
+ }
+
+ limit = ctl->limit;
+ if (limit == 0)
+ limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
+
+ child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ sch_tree_lock(sch);
+
+ qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
+ qdisc_destroy(q->qdisc);
+ q->qdisc = child;
+
+ q->rehash_interval = msecs_to_jiffies(ctl->rehash_interval);
+ q->warmup_time = msecs_to_jiffies(ctl->warmup_time);
+ q->rehash_time = jiffies;
+ q->limit = limit;
+ q->increment = ctl->increment;
+ q->decrement = ctl->decrement;
+ q->max = ctl->max;
+ q->bin_size = ctl->bin_size;
+ q->penalty_rate = ctl->penalty_rate;
+ q->penalty_burst = ctl->penalty_burst;
+ q->tokens_avail = ctl->penalty_burst;
+ q->token_time = jiffies;
+
+ q->slot = 0;
+ q->double_buffering = false;
+ sfb_zero_all_buckets(q);
+ sfb_init_perturbation(0, q);
+ sfb_init_perturbation(1, q);
+
+ sch_tree_unlock(sch);
+
+ return 0;
+}
+
+static int sfb_init(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct sfb_sched_data *q = qdisc_priv(sch);
+
+ q->qdisc = &noop_qdisc;
+ return sfb_change(sch, opt);
+}
+
+static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct sfb_sched_data *q = qdisc_priv(sch);
+ struct nlattr *opts;
+ struct tc_sfb_qopt opt = {
+ .rehash_interval = jiffies_to_msecs(q->rehash_interval),
+ .warmup_time = jiffies_to_msecs(q->warmup_time),
+ .limit = q->limit,
+ .max = q->max,
+ .bin_size = q->bin_size,
+ .increment = q->increment,
+ .decrement = q->decrement,
+ .penalty_rate = q->penalty_rate,
+ .penalty_burst = q->penalty_burst,
+ };
+
+ sch->qstats.backlog = q->qdisc->qstats.backlog;
+ opts = nla_nest_start(skb, TCA_OPTIONS);
+ NLA_PUT(skb, TCA_SFB_PARMS, sizeof(opt), &opt);
+ return nla_nest_end(skb, opts);
+
+nla_put_failure:
+ nla_nest_cancel(skb, opts);
+ return -EMSGSIZE;
+}
+
+static int sfb_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+ struct sfb_sched_data *q = qdisc_priv(sch);
+ struct tc_sfb_xstats st = {
+ .earlydrop = q->stats.earlydrop,
+ .penaltydrop = q->stats.penaltydrop,
+ .bucketdrop = q->stats.bucketdrop,
+ .queuedrop = q->stats.queuedrop,
+ .childdrop = q->stats.childdrop,
+ .marked = q->stats.marked,
+ };
+
+ st.maxqlen = sfb_compute_qlen(&st.maxprob, &st.avgprob, q);
+
+ return gnet_stats_copy_app(d, &st, sizeof(st));
+}
+
+static int sfb_dump_class(struct Qdisc *sch, unsigned long cl,
+ struct sk_buff *skb, struct tcmsg *tcm)
+{
+ return -ENOSYS;
+}
+
+static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ struct Qdisc **old)
+{
+ struct sfb_sched_data *q = qdisc_priv(sch);
+
+ if (new == NULL)
+ new = &noop_qdisc;
+
+ sch_tree_lock(sch);
+ *old = q->qdisc;
+ q->qdisc = new;
+ qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+ qdisc_reset(*old);
+ sch_tree_unlock(sch);
+ return 0;
+}
+
+static struct Qdisc *sfb_leaf(struct Qdisc *sch, unsigned long arg)
+{
+ struct sfb_sched_data *q = qdisc_priv(sch);
+
+ return q->qdisc;
+}
+
+static unsigned long sfb_get(struct Qdisc *sch, u32 classid)
+{
+ return 1;
+}
+
+static void sfb_put(struct Qdisc *sch, unsigned long arg)
+{
+}
+
+static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+ struct nlattr **tca, unsigned long *arg)
+{
+ return -ENOSYS;
+}
+
+static int sfb_delete(struct Qdisc *sch, unsigned long cl)
+{
+ return -ENOSYS;
+}
+
+static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker)
+{
+ if (!walker->stop) {
+ if (walker->count >= walker->skip)
+ if (walker->fn(sch, 1, walker) < 0) {
+ walker->stop = 1;
+ return;
+ }
+ walker->count++;
+ }
+}
+
+static struct tcf_proto **sfb_find_tcf(struct Qdisc *sch, unsigned long cl)
+{
+ struct sfb_sched_data *q = qdisc_priv(sch);
+
+ if (cl)
+ return NULL;
+ return &q->filter_list;
+}
+
+static unsigned long sfb_bind(struct Qdisc *sch, unsigned long parent,
+ u32 classid)
+{
+ return 0;
+}
+
+
+static const struct Qdisc_class_ops sfb_class_ops = {
+ .graft = sfb_graft,
+ .leaf = sfb_leaf,
+ .get = sfb_get,
+ .put = sfb_put,
+ .change = sfb_change_class,
+ .delete = sfb_delete,
+ .walk = sfb_walk,
+ .tcf_chain = sfb_find_tcf,
+ .bind_tcf = sfb_bind,
+ .unbind_tcf = sfb_put,
+ .dump = sfb_dump_class,
+};
+
+static struct Qdisc_ops sfb_qdisc_ops __read_mostly = {
+ .id = "sfb",
+ .priv_size = sizeof(struct sfb_sched_data),
+ .cl_ops = &sfb_class_ops,
+ .enqueue = sfb_enqueue,
+ .dequeue = sfb_dequeue,
+ .peek = sfb_peek,
+ .init = sfb_init,
+ .reset = sfb_reset,
+ .destroy = sfb_destroy,
+ .change = sfb_change,
+ .dump = sfb_dump,
+ .dump_stats = sfb_dump_stats,
+ .owner = THIS_MODULE,
+};
+
+static int __init sfb_module_init(void)
+{
+ return register_qdisc(&sfb_qdisc_ops);
+}
+
+static void __exit sfb_module_exit(void)
+{
+ unregister_qdisc(&sfb_qdisc_ops);
+}
+
+module_init(sfb_module_init)
+module_exit(sfb_module_exit)
+
+MODULE_DESCRIPTION("Stochastic Fair Blue queue discipline");
+MODULE_AUTHOR("Juliusz Chroboczek");
+MODULE_AUTHOR("Eric Dumazet");
+MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index edea8cefec6c..c2e628dfaacc 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -21,6 +21,7 @@
#include <linux/skbuff.h>
#include <linux/jhash.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <net/ip.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
@@ -76,7 +77,8 @@
#define SFQ_DEPTH 128 /* max number of packets per flow */
#define SFQ_SLOTS 128 /* max number of flows */
#define SFQ_EMPTY_SLOT 255
-#define SFQ_HASH_DIVISOR 1024
+#define SFQ_DEFAULT_HASH_DIVISOR 1024
+
/* We use 16 bits to store allot, and want to handle packets up to 64K
* Scale allot by 8 (1<<3) so that no overflow occurs.
*/
@@ -92,8 +94,7 @@ typedef unsigned char sfq_index;
* while following values [SFQ_SLOTS ... SFQ_SLOTS + SFQ_DEPTH - 1]
* are 'pointers' to dep[] array
*/
-struct sfq_head
-{
+struct sfq_head {
sfq_index next;
sfq_index prev;
};
@@ -108,13 +109,12 @@ struct sfq_slot {
short allot; /* credit for this slot */
};
-struct sfq_sched_data
-{
+struct sfq_sched_data {
/* Parameters */
int perturb_period;
- unsigned quantum; /* Allotment per round: MUST BE >= MTU */
+ unsigned int quantum; /* Allotment per round: MUST BE >= MTU */
int limit;
-
+ unsigned int divisor; /* number of slots in hash table */
/* Variables */
struct tcf_proto *filter_list;
struct timer_list perturb_timer;
@@ -122,7 +122,7 @@ struct sfq_sched_data
sfq_index cur_depth; /* depth of longest slot */
unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
struct sfq_slot *tail; /* current slot in round */
- sfq_index ht[SFQ_HASH_DIVISOR]; /* Hash table */
+ sfq_index *ht; /* Hash table (divisor slots) */
struct sfq_slot slots[SFQ_SLOTS];
struct sfq_head dep[SFQ_DEPTH]; /* Linked list of slots, indexed by depth */
};
@@ -137,12 +137,12 @@ static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index
return &q->dep[val - SFQ_SLOTS];
}
-static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
+static unsigned int sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
{
- return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1);
+ return jhash_2words(h, h1, q->perturbation) & (q->divisor - 1);
}
-static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
+static unsigned int sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
{
u32 h, h2;
@@ -157,13 +157,13 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
iph = ip_hdr(skb);
h = (__force u32)iph->daddr;
h2 = (__force u32)iph->saddr ^ iph->protocol;
- if (iph->frag_off & htons(IP_MF|IP_OFFSET))
+ if (iph->frag_off & htons(IP_MF | IP_OFFSET))
break;
poff = proto_ports_offset(iph->protocol);
if (poff >= 0 &&
pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
iph = ip_hdr(skb);
- h2 ^= *(u32*)((void *)iph + iph->ihl * 4 + poff);
+ h2 ^= *(u32 *)((void *)iph + iph->ihl * 4 + poff);
}
break;
}
@@ -181,7 +181,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
if (poff >= 0 &&
pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) {
iph = ipv6_hdr(skb);
- h2 ^= *(u32*)((void *)iph + sizeof(*iph) + poff);
+ h2 ^= *(u32 *)((void *)iph + sizeof(*iph) + poff);
}
break;
}
@@ -203,7 +203,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
if (TC_H_MAJ(skb->priority) == sch->handle &&
TC_H_MIN(skb->priority) > 0 &&
- TC_H_MIN(skb->priority) <= SFQ_HASH_DIVISOR)
+ TC_H_MIN(skb->priority) <= q->divisor)
return TC_H_MIN(skb->priority);
if (!q->filter_list)
@@ -221,7 +221,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
return 0;
}
#endif
- if (TC_H_MIN(res.classid) <= SFQ_HASH_DIVISOR)
+ if (TC_H_MIN(res.classid) <= q->divisor)
return TC_H_MIN(res.classid);
}
return 0;
@@ -491,13 +491,18 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
return -EINVAL;
+ if (ctl->divisor &&
+ (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
+ return -EINVAL;
+
sch_tree_lock(sch);
q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch));
q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
q->perturb_period = ctl->perturb_period * HZ;
if (ctl->limit)
q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
-
+ if (ctl->divisor)
+ q->divisor = ctl->divisor;
qlen = sch->q.qlen;
while (sch->q.qlen > q->limit)
sfq_drop(sch);
@@ -515,15 +520,13 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
{
struct sfq_sched_data *q = qdisc_priv(sch);
+ size_t sz;
int i;
q->perturb_timer.function = sfq_perturbation;
q->perturb_timer.data = (unsigned long)sch;
init_timer_deferrable(&q->perturb_timer);
- for (i = 0; i < SFQ_HASH_DIVISOR; i++)
- q->ht[i] = SFQ_EMPTY_SLOT;
-
for (i = 0; i < SFQ_DEPTH; i++) {
q->dep[i].next = i + SFQ_SLOTS;
q->dep[i].prev = i + SFQ_SLOTS;
@@ -532,6 +535,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
q->limit = SFQ_DEPTH - 1;
q->cur_depth = 0;
q->tail = NULL;
+ q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
if (opt == NULL) {
q->quantum = psched_mtu(qdisc_dev(sch));
q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
@@ -543,10 +547,23 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
return err;
}
+ sz = sizeof(q->ht[0]) * q->divisor;
+ q->ht = kmalloc(sz, GFP_KERNEL);
+ if (!q->ht && sz > PAGE_SIZE)
+ q->ht = vmalloc(sz);
+ if (!q->ht)
+ return -ENOMEM;
+ for (i = 0; i < q->divisor; i++)
+ q->ht[i] = SFQ_EMPTY_SLOT;
+
for (i = 0; i < SFQ_SLOTS; i++) {
slot_queue_init(&q->slots[i]);
sfq_link(q, i);
}
+ if (q->limit >= 1)
+ sch->flags |= TCQ_F_CAN_BYPASS;
+ else
+ sch->flags &= ~TCQ_F_CAN_BYPASS;
return 0;
}
@@ -557,6 +574,10 @@ static void sfq_destroy(struct Qdisc *sch)
tcf_destroy_chain(&q->filter_list);
q->perturb_period = 0;
del_timer_sync(&q->perturb_timer);
+ if (is_vmalloc_addr(q->ht))
+ vfree(q->ht);
+ else
+ kfree(q->ht);
}
static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -569,7 +590,7 @@ static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
opt.perturb_period = q->perturb_period / HZ;
opt.limit = q->limit;
- opt.divisor = SFQ_HASH_DIVISOR;
+ opt.divisor = q->divisor;
opt.flows = q->limit;
NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
@@ -594,6 +615,8 @@ static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
u32 classid)
{
+ /* we cannot bypass queue discipline anymore */
+ sch->flags &= ~TCQ_F_CAN_BYPASS;
return 0;
}
@@ -647,7 +670,7 @@ static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
if (arg->stop)
return;
- for (i = 0; i < SFQ_HASH_DIVISOR; i++) {
+ for (i = 0; i < q->divisor; i++) {
if (q->ht[i] == SFQ_EMPTY_SLOT ||
arg->count < arg->skip) {
arg->count++;
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index e93165820c3f..1dcfb5223a86 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -97,8 +97,7 @@
changed the limit is not effective anymore.
*/
-struct tbf_sched_data
-{
+struct tbf_sched_data {
/* Parameters */
u32 limit; /* Maximal length of backlog: bytes */
u32 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
@@ -115,10 +114,10 @@ struct tbf_sched_data
struct qdisc_watchdog watchdog; /* Watchdog timer */
};
-#define L2T(q,L) qdisc_l2t((q)->R_tab,L)
-#define L2T_P(q,L) qdisc_l2t((q)->P_tab,L)
+#define L2T(q, L) qdisc_l2t((q)->R_tab, L)
+#define L2T_P(q, L) qdisc_l2t((q)->P_tab, L)
-static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct tbf_sched_data *q = qdisc_priv(sch);
int ret;
@@ -137,7 +136,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
return NET_XMIT_SUCCESS;
}
-static unsigned int tbf_drop(struct Qdisc* sch)
+static unsigned int tbf_drop(struct Qdisc *sch)
{
struct tbf_sched_data *q = qdisc_priv(sch);
unsigned int len = 0;
@@ -149,7 +148,7 @@ static unsigned int tbf_drop(struct Qdisc* sch)
return len;
}
-static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
+static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
{
struct tbf_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb;
@@ -185,7 +184,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
q->tokens = toks;
q->ptokens = ptoks;
sch->q.qlen--;
- sch->flags &= ~TCQ_F_THROTTLED;
+ qdisc_unthrottled(sch);
qdisc_bstats_update(sch, skb);
return skb;
}
@@ -209,7 +208,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
return NULL;
}
-static void tbf_reset(struct Qdisc* sch)
+static void tbf_reset(struct Qdisc *sch)
{
struct tbf_sched_data *q = qdisc_priv(sch);
@@ -227,7 +226,7 @@ static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
[TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
};
-static int tbf_change(struct Qdisc* sch, struct nlattr *opt)
+static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
{
int err;
struct tbf_sched_data *q = qdisc_priv(sch);
@@ -236,7 +235,7 @@ static int tbf_change(struct Qdisc* sch, struct nlattr *opt)
struct qdisc_rate_table *rtab = NULL;
struct qdisc_rate_table *ptab = NULL;
struct Qdisc *child = NULL;
- int max_size,n;
+ int max_size, n;
err = nla_parse_nested(tb, TCA_TBF_PTAB, opt, tbf_policy);
if (err < 0)
@@ -259,15 +258,18 @@ static int tbf_change(struct Qdisc* sch, struct nlattr *opt)
}
for (n = 0; n < 256; n++)
- if (rtab->data[n] > qopt->buffer) break;
- max_size = (n << qopt->rate.cell_log)-1;
+ if (rtab->data[n] > qopt->buffer)
+ break;
+ max_size = (n << qopt->rate.cell_log) - 1;
if (ptab) {
int size;
for (n = 0; n < 256; n++)
- if (ptab->data[n] > qopt->mtu) break;
- size = (n << qopt->peakrate.cell_log)-1;
- if (size < max_size) max_size = size;
+ if (ptab->data[n] > qopt->mtu)
+ break;
+ size = (n << qopt->peakrate.cell_log) - 1;
+ if (size < max_size)
+ max_size = size;
}
if (max_size < 0)
goto done;
@@ -310,7 +312,7 @@ done:
return err;
}
-static int tbf_init(struct Qdisc* sch, struct nlattr *opt)
+static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
{
struct tbf_sched_data *q = qdisc_priv(sch);
@@ -422,8 +424,7 @@ static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
}
}
-static const struct Qdisc_class_ops tbf_class_ops =
-{
+static const struct Qdisc_class_ops tbf_class_ops = {
.graft = tbf_graft,
.leaf = tbf_leaf,
.get = tbf_get,
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index d84e7329660f..45cd30098e34 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -53,8 +53,7 @@
which will not break load balancing, though native slave
traffic will have the highest priority. */
-struct teql_master
-{
+struct teql_master {
struct Qdisc_ops qops;
struct net_device *dev;
struct Qdisc *slaves;
@@ -65,22 +64,21 @@ struct teql_master
unsigned long tx_dropped;
};
-struct teql_sched_data
-{
+struct teql_sched_data {
struct Qdisc *next;
struct teql_master *m;
struct neighbour *ncache;
struct sk_buff_head q;
};
-#define NEXT_SLAVE(q) (((struct teql_sched_data*)qdisc_priv(q))->next)
+#define NEXT_SLAVE(q) (((struct teql_sched_data *)qdisc_priv(q))->next)
-#define FMASK (IFF_BROADCAST|IFF_POINTOPOINT)
+#define FMASK (IFF_BROADCAST | IFF_POINTOPOINT)
/* "teql*" qdisc routines */
static int
-teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+teql_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct net_device *dev = qdisc_dev(sch);
struct teql_sched_data *q = qdisc_priv(sch);
@@ -96,7 +94,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
}
static struct sk_buff *
-teql_dequeue(struct Qdisc* sch)
+teql_dequeue(struct Qdisc *sch)
{
struct teql_sched_data *dat = qdisc_priv(sch);
struct netdev_queue *dat_queue;
@@ -118,13 +116,13 @@ teql_dequeue(struct Qdisc* sch)
}
static struct sk_buff *
-teql_peek(struct Qdisc* sch)
+teql_peek(struct Qdisc *sch)
{
/* teql is meant to be used as root qdisc */
return NULL;
}
-static __inline__ void
+static inline void
teql_neigh_release(struct neighbour *n)
{
if (n)
@@ -132,7 +130,7 @@ teql_neigh_release(struct neighbour *n)
}
static void
-teql_reset(struct Qdisc* sch)
+teql_reset(struct Qdisc *sch)
{
struct teql_sched_data *dat = qdisc_priv(sch);
@@ -142,13 +140,14 @@ teql_reset(struct Qdisc* sch)
}
static void
-teql_destroy(struct Qdisc* sch)
+teql_destroy(struct Qdisc *sch)
{
struct Qdisc *q, *prev;
struct teql_sched_data *dat = qdisc_priv(sch);
struct teql_master *master = dat->m;
- if ((prev = master->slaves) != NULL) {
+ prev = master->slaves;
+ if (prev) {
do {
q = NEXT_SLAVE(prev);
if (q == sch) {
@@ -180,7 +179,7 @@ teql_destroy(struct Qdisc* sch)
static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
{
struct net_device *dev = qdisc_dev(sch);
- struct teql_master *m = (struct teql_master*)sch->ops;
+ struct teql_master *m = (struct teql_master *)sch->ops;
struct teql_sched_data *q = qdisc_priv(sch);
if (dev->hard_header_len > m->dev->hard_header_len)
@@ -291,7 +290,8 @@ restart:
nores = 0;
busy = 0;
- if ((q = start) == NULL)
+ q = start;
+ if (!q)
goto drop;
do {
@@ -356,10 +356,10 @@ drop:
static int teql_master_open(struct net_device *dev)
{
- struct Qdisc * q;
+ struct Qdisc *q;
struct teql_master *m = netdev_priv(dev);
int mtu = 0xFFFE;
- unsigned flags = IFF_NOARP|IFF_MULTICAST;
+ unsigned int flags = IFF_NOARP | IFF_MULTICAST;
if (m->slaves == NULL)
return -EUNATCH;
@@ -427,7 +427,7 @@ static int teql_master_mtu(struct net_device *dev, int new_mtu)
do {
if (new_mtu > qdisc_dev(q)->mtu)
return -EINVAL;
- } while ((q=NEXT_SLAVE(q)) != m->slaves);
+ } while ((q = NEXT_SLAVE(q)) != m->slaves);
}
dev->mtu = new_mtu;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 2cc46f0962ca..b23428f3c0dd 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2029,11 +2029,11 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc,
*errp = sctp_make_op_error_fixed(asoc, chunk);
if (*errp) {
- sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM,
- WORD_ROUND(ntohs(param.p->length)));
- sctp_addto_chunk_fixed(*errp,
- WORD_ROUND(ntohs(param.p->length)),
- param.v);
+ if (!sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM,
+ WORD_ROUND(ntohs(param.p->length))))
+ sctp_addto_chunk_fixed(*errp,
+ WORD_ROUND(ntohs(param.p->length)),
+ param.v);
} else {
/* If there is no memory for generating the ERROR
* report as specified, an ABORT will be triggered
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 8e02550ff3e8..b53b2ebbb198 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -6102,15 +6102,16 @@ static void __sctp_write_space(struct sctp_association *asoc)
wake_up_interruptible(&asoc->wait);
if (sctp_writeable(sk)) {
- if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
- wake_up_interruptible(sk_sleep(sk));
+ wait_queue_head_t *wq = sk_sleep(sk);
+
+ if (wq && waitqueue_active(wq))
+ wake_up_interruptible(wq);
/* Note that we try to include the Async I/O support
* here by modeling from the current TCP/UDP code.
* We have not tested with it yet.
*/
- if (sock->wq->fasync_list &&
- !(sk->sk_shutdown & SEND_SHUTDOWN))
+ if (!(sk->sk_shutdown & SEND_SHUTDOWN))
sock_wake_async(sock,
SOCK_WAKE_SPACE, POLL_OUT);
}
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c
index 747d5412c463..f1e40cebc981 100644
--- a/net/sctp/tsnmap.c
+++ b/net/sctp/tsnmap.c
@@ -344,7 +344,7 @@ __u16 sctp_tsnmap_num_gabs(struct sctp_tsnmap *map,
/* Refresh the gap ack information. */
if (sctp_tsnmap_has_gap(map)) {
- __u16 start, end;
+ __u16 start = 0, end = 0;
sctp_tsnmap_iter_init(map, &iter);
while (sctp_tsnmap_next_gap_ack(map, &iter,
&start,
diff --git a/net/socket.c b/net/socket.c
index ac2219f90d5d..937d0fcf74bc 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -240,17 +240,19 @@ static struct kmem_cache *sock_inode_cachep __read_mostly;
static struct inode *sock_alloc_inode(struct super_block *sb)
{
struct socket_alloc *ei;
+ struct socket_wq *wq;
ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL);
if (!ei)
return NULL;
- ei->socket.wq = kmalloc(sizeof(struct socket_wq), GFP_KERNEL);
- if (!ei->socket.wq) {
+ wq = kmalloc(sizeof(*wq), GFP_KERNEL);
+ if (!wq) {
kmem_cache_free(sock_inode_cachep, ei);
return NULL;
}
- init_waitqueue_head(&ei->socket.wq->wait);
- ei->socket.wq->fasync_list = NULL;
+ init_waitqueue_head(&wq->wait);
+ wq->fasync_list = NULL;
+ RCU_INIT_POINTER(ei->socket.wq, wq);
ei->socket.state = SS_UNCONNECTED;
ei->socket.flags = 0;
@@ -273,9 +275,11 @@ static void wq_free_rcu(struct rcu_head *head)
static void sock_destroy_inode(struct inode *inode)
{
struct socket_alloc *ei;
+ struct socket_wq *wq;
ei = container_of(inode, struct socket_alloc, vfs_inode);
- call_rcu(&ei->socket.wq->rcu, wq_free_rcu);
+ wq = rcu_dereference_protected(ei->socket.wq, 1);
+ call_rcu(&wq->rcu, wq_free_rcu);
kmem_cache_free(sock_inode_cachep, ei);
}
@@ -524,7 +528,7 @@ void sock_release(struct socket *sock)
module_put(owner);
}
- if (sock->wq->fasync_list)
+ if (rcu_dereference_protected(sock->wq, 1)->fasync_list)
printk(KERN_ERR "sock_release: fasync list not empty!\n");
percpu_sub(sockets_in_use, 1);
@@ -1108,15 +1112,16 @@ static int sock_fasync(int fd, struct file *filp, int on)
{
struct socket *sock = filp->private_data;
struct sock *sk = sock->sk;
+ struct socket_wq *wq;
if (sk == NULL)
return -EINVAL;
lock_sock(sk);
+ wq = rcu_dereference_protected(sock->wq, sock_owned_by_user(sk));
+ fasync_helper(fd, filp, on, &wq->fasync_list);
- fasync_helper(fd, filp, on, &sock->wq->fasync_list);
-
- if (!sock->wq->fasync_list)
+ if (!wq->fasync_list)
sock_reset_flag(sk, SOCK_FASYNC);
else
sock_set_flag(sk, SOCK_FASYNC);
@@ -2643,7 +2648,8 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
old_fs = get_fs();
set_fs(KERNEL_DS);
- err = dev_ioctl(net, cmd, &kifr);
+ err = dev_ioctl(net, cmd,
+ (struct ifreq __user __force *) &kifr);
set_fs(old_fs);
return err;
@@ -2752,7 +2758,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
old_fs = get_fs();
set_fs(KERNEL_DS);
- err = dev_ioctl(net, cmd, (void __user *)&ifr);
+ err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
set_fs(old_fs);
if (cmd == SIOCGIFMAP && !err) {
@@ -2857,7 +2863,8 @@ static int routing_ioctl(struct net *net, struct socket *sock,
ret |= __get_user(rtdev, &(ur4->rt_dev));
if (rtdev) {
ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
- r4.rt_dev = devname; devname[15] = 0;
+ r4.rt_dev = (char __user __force *)devname;
+ devname[15] = 0;
} else
r4.rt_dev = NULL;
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 243fc09b164e..2841cc6bcfda 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -908,7 +908,7 @@ static int rpciod_start(void)
* Create the rpciod thread and wait for it to start.
*/
dprintk("RPC: creating workqueue rpciod\n");
- wq = alloc_workqueue("rpciod", WQ_RESCUER, 0);
+ wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM, 0);
rpciod_workqueue = wq;
return rpciod_workqueue != NULL;
}
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index d802e941d365..b7d435c3f19e 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -420,6 +420,7 @@ static void svc_sock_setbufsize(struct socket *sock, unsigned int snd,
static void svc_udp_data_ready(struct sock *sk, int count)
{
struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
+ wait_queue_head_t *wq = sk_sleep(sk);
if (svsk) {
dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n",
@@ -428,8 +429,8 @@ static void svc_udp_data_ready(struct sock *sk, int count)
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
svc_xprt_enqueue(&svsk->sk_xprt);
}
- if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
- wake_up_interruptible(sk_sleep(sk));
+ if (wq && waitqueue_active(wq))
+ wake_up_interruptible(wq);
}
/*
@@ -438,6 +439,7 @@ static void svc_udp_data_ready(struct sock *sk, int count)
static void svc_write_space(struct sock *sk)
{
struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data);
+ wait_queue_head_t *wq = sk_sleep(sk);
if (svsk) {
dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
@@ -445,10 +447,10 @@ static void svc_write_space(struct sock *sk)
svc_xprt_enqueue(&svsk->sk_xprt);
}
- if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk))) {
+ if (wq && waitqueue_active(wq)) {
dprintk("RPC svc_write_space: someone sleeping on %p\n",
svsk);
- wake_up_interruptible(sk_sleep(sk));
+ wake_up_interruptible(wq);
}
}
@@ -739,6 +741,7 @@ static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
static void svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
{
struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
+ wait_queue_head_t *wq;
dprintk("svc: socket %p TCP (listen) state change %d\n",
sk, sk->sk_state);
@@ -761,8 +764,9 @@ static void svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
printk("svc: socket %p: no user data\n", sk);
}
- if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
- wake_up_interruptible_all(sk_sleep(sk));
+ wq = sk_sleep(sk);
+ if (wq && waitqueue_active(wq))
+ wake_up_interruptible_all(wq);
}
/*
@@ -771,6 +775,7 @@ static void svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
static void svc_tcp_state_change(struct sock *sk)
{
struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
+ wait_queue_head_t *wq = sk_sleep(sk);
dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n",
sk, sk->sk_state, sk->sk_user_data);
@@ -781,13 +786,14 @@ static void svc_tcp_state_change(struct sock *sk)
set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
svc_xprt_enqueue(&svsk->sk_xprt);
}
- if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
- wake_up_interruptible_all(sk_sleep(sk));
+ if (wq && waitqueue_active(wq))
+ wake_up_interruptible_all(wq);
}
static void svc_tcp_data_ready(struct sock *sk, int count)
{
struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
+ wait_queue_head_t *wq = sk_sleep(sk);
dprintk("svc: socket %p TCP data ready (svsk %p)\n",
sk, sk->sk_user_data);
@@ -795,8 +801,8 @@ static void svc_tcp_data_ready(struct sock *sk, int count)
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
svc_xprt_enqueue(&svsk->sk_xprt);
}
- if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
- wake_up_interruptible(sk_sleep(sk));
+ if (wq && waitqueue_active(wq))
+ wake_up_interruptible(wq);
}
/*
@@ -1531,6 +1537,7 @@ static void svc_sock_detach(struct svc_xprt *xprt)
{
struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
struct sock *sk = svsk->sk_sk;
+ wait_queue_head_t *wq;
dprintk("svc: svc_sock_detach(%p)\n", svsk);
@@ -1539,8 +1546,9 @@ static void svc_sock_detach(struct svc_xprt *xprt)
sk->sk_data_ready = svsk->sk_odata;
sk->sk_write_space = svsk->sk_owspace;
- if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
- wake_up_interruptible(sk_sleep(sk));
+ wq = sk_sleep(sk);
+ if (wq && waitqueue_active(wq))
+ wake_up_interruptible(wq);
}
/*
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 70ab5ef48766..7dc1dc7151ea 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -3,7 +3,7 @@
*
* Copyright (c) 2004-2006, Ericsson AB
* Copyright (c) 2004, Intel Corporation.
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -61,8 +61,8 @@
*/
struct bcbearer_pair {
- struct bearer *primary;
- struct bearer *secondary;
+ struct tipc_bearer *primary;
+ struct tipc_bearer *secondary;
};
/**
@@ -81,7 +81,7 @@ struct bcbearer_pair {
*/
struct bcbearer {
- struct bearer bearer;
+ struct tipc_bearer bearer;
struct media media;
struct bcbearer_pair bpairs[MAX_BEARERS];
struct bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
@@ -93,6 +93,7 @@ struct bcbearer {
* struct bclink - link used for broadcast messages
* @link: (non-standard) broadcast link structure
* @node: (non-standard) node structure representing b'cast link's peer node
+ * @retransmit_to: node that most recently requested a retransmit
*
* Handles sequence numbering, fragmentation, bundling, etc.
*/
@@ -100,6 +101,7 @@ struct bcbearer {
struct bclink {
struct link link;
struct tipc_node node;
+ struct tipc_node *retransmit_to;
};
@@ -184,6 +186,17 @@ static int bclink_ack_allowed(u32 n)
/**
+ * tipc_bclink_retransmit_to - get most recent node to request retransmission
+ *
+ * Called with bc_lock locked
+ */
+
+struct tipc_node *tipc_bclink_retransmit_to(void)
+{
+ return bclink->retransmit_to;
+}
+
+/**
* bclink_retransmit_pkt - retransmit broadcast packets
* @after: sequence number of last packet to *not* retransmit
* @to: sequence number of last packet to retransmit
@@ -285,6 +298,7 @@ static void bclink_send_nack(struct tipc_node *n_ptr)
msg = buf_msg(buf);
tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
INT_H_SIZE, n_ptr->addr);
+ msg_set_non_seq(msg, 1);
msg_set_mc_netid(msg, tipc_net_id);
msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in));
msg_set_bcgap_after(msg, n_ptr->bclink.gap_after);
@@ -405,8 +419,6 @@ int tipc_bclink_send_msg(struct sk_buff *buf)
else
bclink_set_last_sent();
- if (bcl->out_queue_size > bcl->stats.max_queue_sz)
- bcl->stats.max_queue_sz = bcl->out_queue_size;
bcl->stats.queue_sz_counts++;
bcl->stats.accu_queue_sz += bcl->out_queue_size;
@@ -444,10 +456,9 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
tipc_node_unlock(node);
spin_lock_bh(&bc_lock);
bcl->stats.recv_nacks++;
- bcl->owner->next = node; /* remember requestor */
+ bclink->retransmit_to = node;
bclink_retransmit_pkt(msg_bcgap_after(msg),
msg_bcgap_to(msg));
- bcl->owner->next = NULL;
spin_unlock_bh(&bc_lock);
} else {
tipc_bclink_peek_nack(msg_destnode(msg),
@@ -574,8 +585,8 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
bcbearer->remains = tipc_bcast_nmap;
for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
- struct bearer *p = bcbearer->bpairs[bp_index].primary;
- struct bearer *s = bcbearer->bpairs[bp_index].secondary;
+ struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
+ struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
if (!p)
break; /* no more bearers to try */
@@ -584,11 +595,11 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
if (bcbearer->remains_new.count == bcbearer->remains.count)
continue; /* bearer pair doesn't add anything */
- if (p->publ.blocked ||
- p->media->send_msg(buf, &p->publ, &p->media->bcast_addr)) {
+ if (p->blocked ||
+ p->media->send_msg(buf, p, &p->media->bcast_addr)) {
/* unable to send on primary bearer */
- if (!s || s->publ.blocked ||
- s->media->send_msg(buf, &s->publ,
+ if (!s || s->blocked ||
+ s->media->send_msg(buf, s,
&s->media->bcast_addr)) {
/* unable to send on either bearer */
continue;
@@ -633,7 +644,7 @@ void tipc_bcbearer_sort(void)
memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
- struct bearer *b = &tipc_bearers[b_index];
+ struct tipc_bearer *b = &tipc_bearers[b_index];
if (!b->active || !b->nodes.count)
continue;
@@ -682,12 +693,12 @@ void tipc_bcbearer_sort(void)
void tipc_bcbearer_push(void)
{
- struct bearer *b_ptr;
+ struct tipc_bearer *b_ptr;
spin_lock_bh(&bc_lock);
b_ptr = &bcbearer->bearer;
- if (b_ptr->publ.blocked) {
- b_ptr->publ.blocked = 0;
+ if (b_ptr->blocked) {
+ b_ptr->blocked = 0;
tipc_bearer_lock_push(b_ptr);
}
spin_unlock_bh(&bc_lock);
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 51f8c5326ce6..500c97f1c859 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -2,7 +2,7 @@
* net/tipc/bcast.h: Include file for TIPC broadcast code
*
* Copyright (c) 2003-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -90,6 +90,7 @@ void tipc_port_list_free(struct port_list *pl_ptr);
int tipc_bclink_init(void);
void tipc_bclink_stop(void);
+struct tipc_node *tipc_bclink_retransmit_to(void);
void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked);
int tipc_bclink_send_msg(struct sk_buff *buf);
void tipc_bclink_recv_pkt(struct sk_buff *buf);
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 837b7a467885..f2839b0f6b65 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -2,7 +2,7 @@
* net/tipc/bearer.c: TIPC bearer code
*
* Copyright (c) 1996-2006, Ericsson AB
- * Copyright (c) 2004-2006, Wind River Systems
+ * Copyright (c) 2004-2006, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -44,7 +44,7 @@
static struct media media_list[MAX_MEDIA];
static u32 media_count;
-struct bearer tipc_bearers[MAX_BEARERS];
+struct tipc_bearer tipc_bearers[MAX_BEARERS];
/**
* media_name_valid - validate media name
@@ -278,13 +278,13 @@ static int bearer_name_validate(const char *name,
* bearer_find - locates bearer object with matching bearer name
*/
-static struct bearer *bearer_find(const char *name)
+static struct tipc_bearer *bearer_find(const char *name)
{
- struct bearer *b_ptr;
+ struct tipc_bearer *b_ptr;
u32 i;
for (i = 0, b_ptr = tipc_bearers; i < MAX_BEARERS; i++, b_ptr++) {
- if (b_ptr->active && (!strcmp(b_ptr->publ.name, name)))
+ if (b_ptr->active && (!strcmp(b_ptr->name, name)))
return b_ptr;
}
return NULL;
@@ -294,16 +294,16 @@ static struct bearer *bearer_find(const char *name)
* tipc_bearer_find_interface - locates bearer object with matching interface name
*/
-struct bearer *tipc_bearer_find_interface(const char *if_name)
+struct tipc_bearer *tipc_bearer_find_interface(const char *if_name)
{
- struct bearer *b_ptr;
+ struct tipc_bearer *b_ptr;
char *b_if_name;
u32 i;
for (i = 0, b_ptr = tipc_bearers; i < MAX_BEARERS; i++, b_ptr++) {
if (!b_ptr->active)
continue;
- b_if_name = strchr(b_ptr->publ.name, ':') + 1;
+ b_if_name = strchr(b_ptr->name, ':') + 1;
if (!strcmp(b_if_name, if_name))
return b_ptr;
}
@@ -318,7 +318,7 @@ struct sk_buff *tipc_bearer_get_names(void)
{
struct sk_buff *buf;
struct media *m_ptr;
- struct bearer *b_ptr;
+ struct tipc_bearer *b_ptr;
int i, j;
buf = tipc_cfg_reply_alloc(MAX_BEARERS * TLV_SPACE(TIPC_MAX_BEARER_NAME));
@@ -331,8 +331,8 @@ struct sk_buff *tipc_bearer_get_names(void)
b_ptr = &tipc_bearers[j];
if (b_ptr->active && (b_ptr->media == m_ptr)) {
tipc_cfg_append_tlv(buf, TIPC_TLV_BEARER_NAME,
- b_ptr->publ.name,
- strlen(b_ptr->publ.name) + 1);
+ b_ptr->name,
+ strlen(b_ptr->name) + 1);
}
}
}
@@ -340,14 +340,14 @@ struct sk_buff *tipc_bearer_get_names(void)
return buf;
}
-void tipc_bearer_add_dest(struct bearer *b_ptr, u32 dest)
+void tipc_bearer_add_dest(struct tipc_bearer *b_ptr, u32 dest)
{
tipc_nmap_add(&b_ptr->nodes, dest);
tipc_disc_update_link_req(b_ptr->link_req);
tipc_bcbearer_sort();
}
-void tipc_bearer_remove_dest(struct bearer *b_ptr, u32 dest)
+void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest)
{
tipc_nmap_remove(&b_ptr->nodes, dest);
tipc_disc_update_link_req(b_ptr->link_req);
@@ -362,12 +362,12 @@ void tipc_bearer_remove_dest(struct bearer *b_ptr, u32 dest)
* bearer.lock must be taken before calling
* Returns binary true(1) ore false(0)
*/
-static int bearer_push(struct bearer *b_ptr)
+static int bearer_push(struct tipc_bearer *b_ptr)
{
u32 res = 0;
struct link *ln, *tln;
- if (b_ptr->publ.blocked)
+ if (b_ptr->blocked)
return 0;
while (!list_empty(&b_ptr->cong_links) && (res != PUSH_FAILED)) {
@@ -382,13 +382,13 @@ static int bearer_push(struct bearer *b_ptr)
return list_empty(&b_ptr->cong_links);
}
-void tipc_bearer_lock_push(struct bearer *b_ptr)
+void tipc_bearer_lock_push(struct tipc_bearer *b_ptr)
{
int res;
- spin_lock_bh(&b_ptr->publ.lock);
+ spin_lock_bh(&b_ptr->lock);
res = bearer_push(b_ptr);
- spin_unlock_bh(&b_ptr->publ.lock);
+ spin_unlock_bh(&b_ptr->lock);
if (res)
tipc_bcbearer_push();
}
@@ -398,16 +398,14 @@ void tipc_bearer_lock_push(struct bearer *b_ptr)
* Interrupt enabling new requests after bearer congestion or blocking:
* See bearer_send().
*/
-void tipc_continue(struct tipc_bearer *tb_ptr)
+void tipc_continue(struct tipc_bearer *b_ptr)
{
- struct bearer *b_ptr = (struct bearer *)tb_ptr;
-
- spin_lock_bh(&b_ptr->publ.lock);
+ spin_lock_bh(&b_ptr->lock);
b_ptr->continue_count++;
if (!list_empty(&b_ptr->cong_links))
tipc_k_signal((Handler)tipc_bearer_lock_push, (unsigned long)b_ptr);
- b_ptr->publ.blocked = 0;
- spin_unlock_bh(&b_ptr->publ.lock);
+ b_ptr->blocked = 0;
+ spin_unlock_bh(&b_ptr->lock);
}
/*
@@ -418,7 +416,7 @@ void tipc_continue(struct tipc_bearer *tb_ptr)
* bearer.lock is busy
*/
-static void tipc_bearer_schedule_unlocked(struct bearer *b_ptr, struct link *l_ptr)
+static void tipc_bearer_schedule_unlocked(struct tipc_bearer *b_ptr, struct link *l_ptr)
{
list_move_tail(&l_ptr->link_list, &b_ptr->cong_links);
}
@@ -431,11 +429,11 @@ static void tipc_bearer_schedule_unlocked(struct bearer *b_ptr, struct link *l_p
* bearer.lock is free
*/
-void tipc_bearer_schedule(struct bearer *b_ptr, struct link *l_ptr)
+void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct link *l_ptr)
{
- spin_lock_bh(&b_ptr->publ.lock);
+ spin_lock_bh(&b_ptr->lock);
tipc_bearer_schedule_unlocked(b_ptr, l_ptr);
- spin_unlock_bh(&b_ptr->publ.lock);
+ spin_unlock_bh(&b_ptr->lock);
}
@@ -444,18 +442,18 @@ void tipc_bearer_schedule(struct bearer *b_ptr, struct link *l_ptr)
* and if there is, try to resolve it before returning.
* 'tipc_net_lock' is read_locked when this function is called
*/
-int tipc_bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr)
+int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr, struct link *l_ptr)
{
int res = 1;
if (list_empty(&b_ptr->cong_links))
return 1;
- spin_lock_bh(&b_ptr->publ.lock);
+ spin_lock_bh(&b_ptr->lock);
if (!bearer_push(b_ptr)) {
tipc_bearer_schedule_unlocked(b_ptr, l_ptr);
res = 0;
}
- spin_unlock_bh(&b_ptr->publ.lock);
+ spin_unlock_bh(&b_ptr->lock);
return res;
}
@@ -463,9 +461,9 @@ int tipc_bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr)
* tipc_bearer_congested - determines if bearer is currently congested
*/
-int tipc_bearer_congested(struct bearer *b_ptr, struct link *l_ptr)
+int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct link *l_ptr)
{
- if (unlikely(b_ptr->publ.blocked))
+ if (unlikely(b_ptr->blocked))
return 1;
if (likely(list_empty(&b_ptr->cong_links)))
return 0;
@@ -478,7 +476,7 @@ int tipc_bearer_congested(struct bearer *b_ptr, struct link *l_ptr)
int tipc_enable_bearer(const char *name, u32 bcast_scope, u32 priority)
{
- struct bearer *b_ptr;
+ struct tipc_bearer *b_ptr;
struct media *m_ptr;
struct bearer_name b_name;
char addr_string[16];
@@ -528,7 +526,7 @@ restart:
bearer_id = i;
continue;
}
- if (!strcmp(name, tipc_bearers[i].publ.name)) {
+ if (!strcmp(name, tipc_bearers[i].name)) {
warn("Bearer <%s> rejected, already enabled\n", name);
goto failed;
}
@@ -551,8 +549,8 @@ restart:
}
b_ptr = &tipc_bearers[bearer_id];
- strcpy(b_ptr->publ.name, name);
- res = m_ptr->enable_bearer(&b_ptr->publ);
+ strcpy(b_ptr->name, name);
+ res = m_ptr->enable_bearer(b_ptr);
if (res) {
warn("Bearer <%s> rejected, enable failure (%d)\n", name, -res);
goto failed;
@@ -568,9 +566,9 @@ restart:
INIT_LIST_HEAD(&b_ptr->links);
if (m_ptr->bcast) {
b_ptr->link_req = tipc_disc_init_link_req(b_ptr, &m_ptr->bcast_addr,
- bcast_scope, 2);
+ bcast_scope);
}
- spin_lock_init(&b_ptr->publ.lock);
+ spin_lock_init(&b_ptr->lock);
write_unlock_bh(&tipc_net_lock);
info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
name, tipc_addr_string_fill(addr_string, bcast_scope), priority);
@@ -587,7 +585,7 @@ failed:
int tipc_block_bearer(const char *name)
{
- struct bearer *b_ptr = NULL;
+ struct tipc_bearer *b_ptr = NULL;
struct link *l_ptr;
struct link *temp_l_ptr;
@@ -600,8 +598,8 @@ int tipc_block_bearer(const char *name)
}
info("Blocking bearer <%s>\n", name);
- spin_lock_bh(&b_ptr->publ.lock);
- b_ptr->publ.blocked = 1;
+ spin_lock_bh(&b_ptr->lock);
+ b_ptr->blocked = 1;
list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
struct tipc_node *n_ptr = l_ptr->owner;
@@ -609,7 +607,7 @@ int tipc_block_bearer(const char *name)
tipc_link_reset(l_ptr);
spin_unlock_bh(&n_ptr->lock);
}
- spin_unlock_bh(&b_ptr->publ.lock);
+ spin_unlock_bh(&b_ptr->lock);
read_unlock_bh(&tipc_net_lock);
return 0;
}
@@ -620,27 +618,27 @@ int tipc_block_bearer(const char *name)
* Note: This routine assumes caller holds tipc_net_lock.
*/
-static void bearer_disable(struct bearer *b_ptr)
+static void bearer_disable(struct tipc_bearer *b_ptr)
{
struct link *l_ptr;
struct link *temp_l_ptr;
- info("Disabling bearer <%s>\n", b_ptr->publ.name);
+ info("Disabling bearer <%s>\n", b_ptr->name);
tipc_disc_stop_link_req(b_ptr->link_req);
- spin_lock_bh(&b_ptr->publ.lock);
+ spin_lock_bh(&b_ptr->lock);
b_ptr->link_req = NULL;
- b_ptr->publ.blocked = 1;
- b_ptr->media->disable_bearer(&b_ptr->publ);
+ b_ptr->blocked = 1;
+ b_ptr->media->disable_bearer(b_ptr);
list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
tipc_link_delete(l_ptr);
}
- spin_unlock_bh(&b_ptr->publ.lock);
- memset(b_ptr, 0, sizeof(struct bearer));
+ spin_unlock_bh(&b_ptr->lock);
+ memset(b_ptr, 0, sizeof(struct tipc_bearer));
}
int tipc_disable_bearer(const char *name)
{
- struct bearer *b_ptr;
+ struct tipc_bearer *b_ptr;
int res;
write_lock_bh(&tipc_net_lock);
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 85f451d5aacf..255dea64f7bd 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -2,7 +2,7 @@
* net/tipc/bearer.h: Include file for TIPC bearer code
*
* Copyright (c) 1996-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -61,26 +61,7 @@ struct tipc_media_addr {
} dev_addr;
};
-/**
- * struct tipc_bearer - TIPC bearer info available to media code
- * @usr_handle: pointer to additional media-specific information about bearer
- * @mtu: max packet size bearer can support
- * @blocked: non-zero if bearer is blocked
- * @lock: spinlock for controlling access to bearer
- * @addr: media-specific address associated with bearer
- * @name: bearer name (format = media:interface)
- *
- * Note: TIPC initializes "name" and "lock" fields; media code is responsible
- * for initialization all other fields when a bearer is enabled.
- */
-struct tipc_bearer {
- void *usr_handle;
- u32 mtu;
- int blocked;
- spinlock_t lock;
- struct tipc_media_addr addr;
- char name[TIPC_MAX_BEARER_NAME];
-};
+struct tipc_bearer;
/**
* struct media - TIPC media information available to internal users
@@ -115,8 +96,13 @@ struct media {
};
/**
- * struct bearer - TIPC bearer information available to internal users
- * @publ: bearer information available to privileged users
+ * struct tipc_bearer - TIPC bearer structure
+ * @usr_handle: pointer to additional media-specific information about bearer
+ * @mtu: max packet size bearer can support
+ * @blocked: non-zero if bearer is blocked
+ * @lock: spinlock for controlling access to bearer
+ * @addr: media-specific address associated with bearer
+ * @name: bearer name (format = media:interface)
* @media: ptr to media structure associated with bearer
* @priority: default link priority for bearer
* @detect_scope: network address mask used during automatic link creation
@@ -128,10 +114,18 @@ struct media {
* @active: non-zero if bearer structure is represents a bearer
* @net_plane: network plane ('A' through 'H') currently associated with bearer
* @nodes: indicates which nodes in cluster can be reached through bearer
+ *
+ * Note: media-specific code is responsible for initialization of the fields
+ * indicated below when a bearer is enabled; TIPC's generic bearer code takes
+ * care of initializing all other fields.
*/
-
-struct bearer {
- struct tipc_bearer publ;
+struct tipc_bearer {
+ void *usr_handle; /* initalized by media */
+ u32 mtu; /* initalized by media */
+ int blocked; /* initalized by media */
+ struct tipc_media_addr addr; /* initalized by media */
+ char name[TIPC_MAX_BEARER_NAME];
+ spinlock_t lock;
struct media *media;
u32 priority;
u32 detect_scope;
@@ -152,7 +146,7 @@ struct bearer_name {
struct link;
-extern struct bearer tipc_bearers[];
+extern struct tipc_bearer tipc_bearers[];
/*
* TIPC routines available to supported media types
@@ -186,14 +180,14 @@ void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
struct sk_buff *tipc_media_get_names(void);
struct sk_buff *tipc_bearer_get_names(void);
-void tipc_bearer_add_dest(struct bearer *b_ptr, u32 dest);
-void tipc_bearer_remove_dest(struct bearer *b_ptr, u32 dest);
-void tipc_bearer_schedule(struct bearer *b_ptr, struct link *l_ptr);
-struct bearer *tipc_bearer_find_interface(const char *if_name);
-int tipc_bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr);
-int tipc_bearer_congested(struct bearer *b_ptr, struct link *l_ptr);
+void tipc_bearer_add_dest(struct tipc_bearer *b_ptr, u32 dest);
+void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest);
+void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct link *l_ptr);
+struct tipc_bearer *tipc_bearer_find_interface(const char *if_name);
+int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr, struct link *l_ptr);
+int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct link *l_ptr);
void tipc_bearer_stop(void);
-void tipc_bearer_lock_push(struct bearer *b_ptr);
+void tipc_bearer_lock_push(struct tipc_bearer *b_ptr);
/**
@@ -214,10 +208,11 @@ void tipc_bearer_lock_push(struct bearer *b_ptr);
* and let TIPC's link code deal with the undelivered message.
*/
-static inline int tipc_bearer_send(struct bearer *b_ptr, struct sk_buff *buf,
+static inline int tipc_bearer_send(struct tipc_bearer *b_ptr,
+ struct sk_buff *buf,
struct tipc_media_addr *dest)
{
- return !b_ptr->media->send_msg(buf, &b_ptr->publ, dest);
+ return !b_ptr->media->send_msg(buf, b_ptr, dest);
}
#endif /* _TIPC_BEARER_H */
diff --git a/net/tipc/core.c b/net/tipc/core.c
index e071579e0850..2da1fc75ad65 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -2,7 +2,7 @@
* net/tipc/core.c: TIPC module code
*
* Copyright (c) 2003-2006, Ericsson AB
- * Copyright (c) 2005-2006, Wind River Systems
+ * Copyright (c) 2005-2006, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -57,7 +57,6 @@
int tipc_mode = TIPC_NOT_RUNNING;
int tipc_random;
-atomic_t tipc_user_count = ATOMIC_INIT(0);
const char tipc_alphabet[] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_.";
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 997158546e25..37544d9f73e1 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -2,7 +2,7 @@
* net/tipc/core.h: Include file for TIPC global declarations
*
* Copyright (c) 2005-2006, Ericsson AB
- * Copyright (c) 2005-2007, Wind River Systems
+ * Copyright (c) 2005-2007, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -161,7 +161,6 @@ extern int tipc_remote_management;
extern int tipc_mode;
extern int tipc_random;
extern const char tipc_alphabet[];
-extern atomic_t tipc_user_count;
/*
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index fa026bd91a68..09ce2318b89e 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -2,7 +2,7 @@
* net/tipc/discover.c
*
* Copyright (c) 2003-2006, Ericsson AB
- * Copyright (c) 2005-2006, Wind River Systems
+ * Copyright (c) 2005-2006, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -57,7 +57,7 @@
* @timer_intv: current interval between requests (in ms)
*/
struct link_req {
- struct bearer *bearer;
+ struct tipc_bearer *bearer;
struct tipc_media_addr dest;
struct sk_buff *buf;
struct timer_list timer;
@@ -67,15 +67,13 @@ struct link_req {
/**
* tipc_disc_init_msg - initialize a link setup message
* @type: message type (request or response)
- * @req_links: number of links associated with message
* @dest_domain: network domain of node(s) which should respond to message
* @b_ptr: ptr to bearer issuing message
*/
static struct sk_buff *tipc_disc_init_msg(u32 type,
- u32 req_links,
u32 dest_domain,
- struct bearer *b_ptr)
+ struct tipc_bearer *b_ptr)
{
struct sk_buff *buf = tipc_buf_acquire(DSC_H_SIZE);
struct tipc_msg *msg;
@@ -84,10 +82,9 @@ static struct sk_buff *tipc_disc_init_msg(u32 type,
msg = buf_msg(buf);
tipc_msg_init(msg, LINK_CONFIG, type, DSC_H_SIZE, dest_domain);
msg_set_non_seq(msg, 1);
- msg_set_req_links(msg, req_links);
msg_set_dest_domain(msg, dest_domain);
msg_set_bc_netid(msg, tipc_net_id);
- msg_set_media_addr(msg, &b_ptr->publ.addr);
+ msg_set_media_addr(msg, &b_ptr->addr);
}
return buf;
}
@@ -99,7 +96,7 @@ static struct sk_buff *tipc_disc_init_msg(u32 type,
* @media_addr: media address advertised by duplicated node
*/
-static void disc_dupl_alert(struct bearer *b_ptr, u32 node_addr,
+static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
struct tipc_media_addr *media_addr)
{
char node_addr_str[16];
@@ -111,7 +108,7 @@ static void disc_dupl_alert(struct bearer *b_ptr, u32 node_addr,
tipc_media_addr_printf(&pb, media_addr);
tipc_printbuf_validate(&pb);
warn("Duplicate %s using %s seen on <%s>\n",
- node_addr_str, media_addr_str, b_ptr->publ.name);
+ node_addr_str, media_addr_str, b_ptr->name);
}
/**
@@ -120,7 +117,7 @@ static void disc_dupl_alert(struct bearer *b_ptr, u32 node_addr,
* @b_ptr: bearer that message arrived on
*/
-void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr)
+void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
{
struct link *link;
struct tipc_media_addr media_addr;
@@ -140,7 +137,7 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr)
if (!tipc_addr_node_valid(orig))
return;
if (orig == tipc_own_addr) {
- if (memcmp(&media_addr, &b_ptr->publ.addr, sizeof(media_addr)))
+ if (memcmp(&media_addr, &b_ptr->addr, sizeof(media_addr)))
disc_dupl_alert(b_ptr, tipc_own_addr, &media_addr);
return;
}
@@ -191,9 +188,9 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr)
spin_unlock_bh(&n_ptr->lock);
if ((type == DSC_RESP_MSG) || link_fully_up)
return;
- rbuf = tipc_disc_init_msg(DSC_RESP_MSG, 1, orig, b_ptr);
+ rbuf = tipc_disc_init_msg(DSC_RESP_MSG, orig, b_ptr);
if (rbuf != NULL) {
- b_ptr->media->send_msg(rbuf, &b_ptr->publ, &media_addr);
+ b_ptr->media->send_msg(rbuf, b_ptr, &media_addr);
buf_discard(rbuf);
}
}
@@ -249,9 +246,9 @@ void tipc_disc_update_link_req(struct link_req *req)
static void disc_timeout(struct link_req *req)
{
- spin_lock_bh(&req->bearer->publ.lock);
+ spin_lock_bh(&req->bearer->lock);
- req->bearer->media->send_msg(req->buf, &req->bearer->publ, &req->dest);
+ req->bearer->media->send_msg(req->buf, req->bearer, &req->dest);
if ((req->timer_intv == TIPC_LINK_REQ_SLOW) ||
(req->timer_intv == TIPC_LINK_REQ_FAST)) {
@@ -266,7 +263,7 @@ static void disc_timeout(struct link_req *req)
}
k_start_timer(&req->timer, req->timer_intv);
- spin_unlock_bh(&req->bearer->publ.lock);
+ spin_unlock_bh(&req->bearer->lock);
}
/**
@@ -274,15 +271,13 @@ static void disc_timeout(struct link_req *req)
* @b_ptr: ptr to bearer issuing requests
* @dest: destination address for request messages
* @dest_domain: network domain of node(s) which should respond to message
- * @req_links: max number of desired links
*
* Returns pointer to link request structure, or NULL if unable to create.
*/
-struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
+struct link_req *tipc_disc_init_link_req(struct tipc_bearer *b_ptr,
const struct tipc_media_addr *dest,
- u32 dest_domain,
- u32 req_links)
+ u32 dest_domain)
{
struct link_req *req;
@@ -290,7 +285,7 @@ struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
if (!req)
return NULL;
- req->buf = tipc_disc_init_msg(DSC_REQ_MSG, req_links, dest_domain, b_ptr);
+ req->buf = tipc_disc_init_msg(DSC_REQ_MSG, dest_domain, b_ptr);
if (!req->buf) {
kfree(req);
return NULL;
diff --git a/net/tipc/discover.h b/net/tipc/discover.h
index d2c3cffb79fc..e48a167e47b2 100644
--- a/net/tipc/discover.h
+++ b/net/tipc/discover.h
@@ -2,7 +2,7 @@
* net/tipc/discover.h
*
* Copyright (c) 2003-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -39,13 +39,12 @@
struct link_req;
-struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
+struct link_req *tipc_disc_init_link_req(struct tipc_bearer *b_ptr,
const struct tipc_media_addr *dest,
- u32 dest_domain,
- u32 req_links);
+ u32 dest_domain);
void tipc_disc_update_link_req(struct link_req *req);
void tipc_disc_stop_link_req(struct link_req *req);
-void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr);
+void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr);
#endif
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 18702f58d111..89fbb6d6e956 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -2,7 +2,7 @@
* net/tipc/link.c: TIPC link code
*
* Copyright (c) 1996-2007, Ericsson AB
- * Copyright (c) 2004-2007, Wind River Systems
+ * Copyright (c) 2004-2007, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -90,7 +90,7 @@ static void link_handle_out_of_seq_msg(struct link *l_ptr,
static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf);
static int link_recv_changeover_msg(struct link **l_ptr, struct sk_buff **buf);
static void link_set_supervision_props(struct link *l_ptr, u32 tolerance);
-static int link_send_sections_long(struct port *sender,
+static int link_send_sections_long(struct tipc_port *sender,
struct iovec const *msg_sect,
u32 num_sect, u32 destnode);
static void link_check_defragm_bufs(struct link *l_ptr);
@@ -113,7 +113,7 @@ static void link_init_max_pkt(struct link *l_ptr)
{
u32 max_pkt;
- max_pkt = (l_ptr->b_ptr->publ.mtu & ~3);
+ max_pkt = (l_ptr->b_ptr->mtu & ~3);
if (max_pkt > MAX_MSG_SIZE)
max_pkt = MAX_MSG_SIZE;
@@ -246,9 +246,6 @@ static void link_timeout(struct link *l_ptr)
l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
l_ptr->stats.queue_sz_counts++;
- if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
- l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
-
if (l_ptr->first_out) {
struct tipc_msg *msg = buf_msg(l_ptr->first_out);
u32 length = msg_size(msg);
@@ -303,7 +300,7 @@ static void link_set_timer(struct link *l_ptr, u32 time)
* Returns pointer to link.
*/
-struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
+struct link *tipc_link_create(struct tipc_bearer *b_ptr, const u32 peer,
const struct tipc_media_addr *media_addr)
{
struct link *l_ptr;
@@ -317,7 +314,7 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
}
l_ptr->addr = peer;
- if_name = strchr(b_ptr->publ.name, ':') + 1;
+ if_name = strchr(b_ptr->name, ':') + 1;
sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:",
tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
tipc_node(tipc_own_addr),
@@ -391,7 +388,9 @@ void tipc_link_delete(struct link *l_ptr)
static void link_start(struct link *l_ptr)
{
+ tipc_node_lock(l_ptr->owner);
link_state_event(l_ptr, STARTING_EVT);
+ tipc_node_unlock(l_ptr->owner);
}
/**
@@ -406,7 +405,7 @@ static void link_start(struct link *l_ptr)
static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
{
- struct port *p_ptr;
+ struct tipc_port *p_ptr;
spin_lock_bh(&tipc_port_list_lock);
p_ptr = tipc_port_lock(origport);
@@ -415,7 +414,7 @@ static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
goto exit;
if (!list_empty(&p_ptr->wait_list))
goto exit;
- p_ptr->publ.congested = 1;
+ p_ptr->congested = 1;
p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt);
list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
l_ptr->stats.link_congs++;
@@ -428,8 +427,8 @@ exit:
void tipc_link_wakeup_ports(struct link *l_ptr, int all)
{
- struct port *p_ptr;
- struct port *temp_p_ptr;
+ struct tipc_port *p_ptr;
+ struct tipc_port *temp_p_ptr;
int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
if (all)
@@ -445,11 +444,11 @@ void tipc_link_wakeup_ports(struct link *l_ptr, int all)
if (win <= 0)
break;
list_del_init(&p_ptr->wait_list);
- spin_lock_bh(p_ptr->publ.lock);
- p_ptr->publ.congested = 0;
- p_ptr->wakeup(&p_ptr->publ);
+ spin_lock_bh(p_ptr->lock);
+ p_ptr->congested = 0;
+ p_ptr->wakeup(p_ptr);
win -= p_ptr->waiting_pkts;
- spin_unlock_bh(p_ptr->publ.lock);
+ spin_unlock_bh(p_ptr->lock);
}
exit:
@@ -824,7 +823,10 @@ static void link_add_to_outqueue(struct link *l_ptr,
l_ptr->last_out = buf;
} else
l_ptr->first_out = l_ptr->last_out = buf;
+
l_ptr->out_queue_size++;
+ if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
+ l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
}
/*
@@ -867,9 +869,6 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
/* Packet can be queued or sent: */
- if (queue_size > l_ptr->stats.max_queue_sz)
- l_ptr->stats.max_queue_sz = queue_size;
-
if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) &&
!link_congested(l_ptr))) {
link_add_to_outqueue(l_ptr, buf, msg);
@@ -1027,12 +1026,12 @@ int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
* except for total message length.
* Returns user data length or errno.
*/
-int tipc_link_send_sections_fast(struct port *sender,
+int tipc_link_send_sections_fast(struct tipc_port *sender,
struct iovec const *msg_sect,
const u32 num_sect,
u32 destaddr)
{
- struct tipc_msg *hdr = &sender->publ.phdr;
+ struct tipc_msg *hdr = &sender->phdr;
struct link *l_ptr;
struct sk_buff *buf;
struct tipc_node *node;
@@ -1045,7 +1044,7 @@ again:
* (Must not hold any locks while building message.)
*/
- res = tipc_msg_build(hdr, msg_sect, num_sect, sender->publ.max_pkt,
+ res = tipc_msg_build(hdr, msg_sect, num_sect, sender->max_pkt,
!sender->user_port, &buf);
read_lock_bh(&tipc_net_lock);
@@ -1056,7 +1055,7 @@ again:
if (likely(l_ptr)) {
if (likely(buf)) {
res = link_send_buf_fast(l_ptr, buf,
- &sender->publ.max_pkt);
+ &sender->max_pkt);
if (unlikely(res < 0))
buf_discard(buf);
exit:
@@ -1075,7 +1074,7 @@ exit:
if (link_congested(l_ptr) ||
!list_empty(&l_ptr->b_ptr->cong_links)) {
res = link_schedule_port(l_ptr,
- sender->publ.ref, res);
+ sender->ref, res);
goto exit;
}
@@ -1084,12 +1083,12 @@ exit:
* then re-try fast path or fragment the message
*/
- sender->publ.max_pkt = l_ptr->max_pkt;
+ sender->max_pkt = l_ptr->max_pkt;
tipc_node_unlock(node);
read_unlock_bh(&tipc_net_lock);
- if ((msg_hdr_sz(hdr) + res) <= sender->publ.max_pkt)
+ if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
goto again;
return link_send_sections_long(sender, msg_sect,
@@ -1123,14 +1122,14 @@ exit:
*
* Returns user data length or errno.
*/
-static int link_send_sections_long(struct port *sender,
+static int link_send_sections_long(struct tipc_port *sender,
struct iovec const *msg_sect,
u32 num_sect,
u32 destaddr)
{
struct link *l_ptr;
struct tipc_node *node;
- struct tipc_msg *hdr = &sender->publ.phdr;
+ struct tipc_msg *hdr = &sender->phdr;
u32 dsz = msg_data_sz(hdr);
u32 max_pkt, fragm_sz, rest;
struct tipc_msg fragm_hdr;
@@ -1142,7 +1141,7 @@ static int link_send_sections_long(struct port *sender,
again:
fragm_no = 1;
- max_pkt = sender->publ.max_pkt - INT_H_SIZE;
+ max_pkt = sender->max_pkt - INT_H_SIZE;
/* leave room for tunnel header in case of link changeover */
fragm_sz = max_pkt - INT_H_SIZE;
/* leave room for fragmentation header in each fragment */
@@ -1157,7 +1156,7 @@ again:
tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
INT_H_SIZE, msg_destnode(hdr));
- msg_set_link_selector(&fragm_hdr, sender->publ.ref);
+ msg_set_link_selector(&fragm_hdr, sender->ref);
msg_set_size(&fragm_hdr, max_pkt);
msg_set_fragm_no(&fragm_hdr, 1);
@@ -1238,13 +1237,13 @@ error:
node = tipc_node_find(destaddr);
if (likely(node)) {
tipc_node_lock(node);
- l_ptr = node->active_links[sender->publ.ref & 1];
+ l_ptr = node->active_links[sender->ref & 1];
if (!l_ptr) {
tipc_node_unlock(node);
goto reject;
}
if (l_ptr->max_pkt < max_pkt) {
- sender->publ.max_pkt = l_ptr->max_pkt;
+ sender->max_pkt = l_ptr->max_pkt;
tipc_node_unlock(node);
for (; buf_chain; buf_chain = buf) {
buf = buf_chain->next;
@@ -1441,7 +1440,7 @@ static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf)
info("Outstanding acks: %lu\n",
(unsigned long) TIPC_SKB_CB(buf)->handle);
- n_ptr = l_ptr->owner->next;
+ n_ptr = tipc_bclink_retransmit_to();
tipc_node_lock(n_ptr);
tipc_addr_string_fill(addr_string, n_ptr->addr);
@@ -1595,11 +1594,10 @@ static int link_recv_buf_validate(struct sk_buff *buf)
* structure (i.e. cannot be NULL), but bearer can be inactive.
*/
-void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
+void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
{
read_lock_bh(&tipc_net_lock);
while (head) {
- struct bearer *b_ptr = (struct bearer *)tb_ptr;
struct tipc_node *n_ptr;
struct link *l_ptr;
struct sk_buff *crs;
@@ -1950,6 +1948,7 @@ void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
msg_set_seq_gap(msg, 0);
msg_set_next_sent(msg, 1);
+ msg_set_probe(msg, 0);
msg_set_link_tolerance(msg, l_ptr->tolerance);
msg_set_linkprio(msg, l_ptr->priority);
msg_set_max_pkt(msg, l_ptr->max_pkt_target);
@@ -2618,6 +2617,9 @@ static void link_check_defragm_bufs(struct link *l_ptr)
static void link_set_supervision_props(struct link *l_ptr, u32 tolerance)
{
+ if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
+ return;
+
l_ptr->tolerance = tolerance;
l_ptr->continuity_interval =
((tolerance / 4) > 500) ? 500 : tolerance / 4;
@@ -2658,7 +2660,7 @@ void tipc_link_set_queue_limits(struct link *l_ptr, u32 window)
static struct link *link_find_link(const char *name, struct tipc_node **node)
{
struct link_name link_name_parts;
- struct bearer *b_ptr;
+ struct tipc_bearer *b_ptr;
struct link *l_ptr;
if (!link_name_validate(name, &link_name_parts))
@@ -2961,7 +2963,7 @@ static void link_print(struct link *l_ptr, const char *str)
tipc_printf(buf, str);
tipc_printf(buf, "Link %x<%s>:",
- l_ptr->addr, l_ptr->b_ptr->publ.name);
+ l_ptr->addr, l_ptr->b_ptr->name);
#ifdef CONFIG_TIPC_DEBUG
if (link_reset_reset(l_ptr) || link_reset_unknown(l_ptr))
@@ -2981,9 +2983,9 @@ static void link_print(struct link *l_ptr, const char *str)
!= (l_ptr->out_queue_size - 1)) ||
(l_ptr->last_out->next != NULL)) {
tipc_printf(buf, "\nSend queue inconsistency\n");
- tipc_printf(buf, "first_out= %x ", l_ptr->first_out);
- tipc_printf(buf, "next_out= %x ", l_ptr->next_out);
- tipc_printf(buf, "last_out= %x ", l_ptr->last_out);
+ tipc_printf(buf, "first_out= %p ", l_ptr->first_out);
+ tipc_printf(buf, "next_out= %p ", l_ptr->next_out);
+ tipc_printf(buf, "last_out= %p ", l_ptr->last_out);
}
} else
tipc_printf(buf, "[]");
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 70967e637027..a7794e7ede29 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -2,7 +2,7 @@
* net/tipc/link.h: Include file for TIPC link code
*
* Copyright (c) 1995-2006, Ericsson AB
- * Copyright (c) 2004-2005, Wind River Systems
+ * Copyright (c) 2004-2005, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -122,7 +122,7 @@ struct link {
u32 checkpoint;
u32 peer_session;
u32 peer_bearer_id;
- struct bearer *b_ptr;
+ struct tipc_bearer *b_ptr;
u32 tolerance;
u32 continuity_interval;
u32 abort_limit;
@@ -196,24 +196,18 @@ struct link {
u32 bearer_congs;
u32 deferred_recv;
u32 duplicates;
-
- /* for statistical profiling of send queue size */
-
- u32 max_queue_sz;
- u32 accu_queue_sz;
- u32 queue_sz_counts;
-
- /* for statistical profiling of message lengths */
-
- u32 msg_length_counts;
- u32 msg_lengths_total;
- u32 msg_length_profile[7];
+ u32 max_queue_sz; /* send queue size high water mark */
+ u32 accu_queue_sz; /* used for send queue size profiling */
+ u32 queue_sz_counts; /* used for send queue size profiling */
+ u32 msg_length_counts; /* used for message length profiling */
+ u32 msg_lengths_total; /* used for message length profiling */
+ u32 msg_length_profile[7]; /* used for msg. length profiling */
} stats;
};
-struct port;
+struct tipc_port;
-struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
+struct link *tipc_link_create(struct tipc_bearer *b_ptr, const u32 peer,
const struct tipc_media_addr *media_addr);
void tipc_link_delete(struct link *l_ptr);
void tipc_link_changeover(struct link *l_ptr);
@@ -230,7 +224,7 @@ void tipc_link_reset(struct link *l_ptr);
int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector);
int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf);
u32 tipc_link_get_max_pkt(u32 dest, u32 selector);
-int tipc_link_send_sections_fast(struct port *sender,
+int tipc_link_send_sections_fast(struct tipc_port *sender,
struct iovec const *msg_sect,
const u32 num_sect,
u32 destnode);
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index bb6180c4fcbb..0787e12423b8 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -2,7 +2,7 @@
* net/tipc/msg.c: TIPC message header routines
*
* Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -381,20 +381,15 @@ void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
tipc_printf(buf, ":OPRT(%u):", msg_origport(msg));
tipc_printf(buf, ":DPRT(%u):", msg_destport(msg));
}
- if (msg_routed(msg) && !msg_non_seq(msg))
- tipc_printf(buf, ":TSEQN(%u)", msg_transp_seqno(msg));
}
if (msg_user(msg) == NAME_DISTRIBUTOR) {
tipc_printf(buf, ":ONOD(%x):", msg_orignode(msg));
tipc_printf(buf, ":DNOD(%x):", msg_destnode(msg));
- if (msg_routed(msg))
- tipc_printf(buf, ":CSEQN(%u)", msg_transp_seqno(msg));
}
if (msg_user(msg) == LINK_CONFIG) {
u32 *raw = (u32 *)msg;
struct tipc_media_addr *orig = (struct tipc_media_addr *)&raw[5];
- tipc_printf(buf, ":REQL(%u):", msg_req_links(msg));
tipc_printf(buf, ":DDOM(%x):", msg_dest_domain(msg));
tipc_printf(buf, ":NETID(%u):", msg_bc_netid(msg));
tipc_media_addr_printf(buf, orig);
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 92c4c4fd7b3f..9d643a1b7d22 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -2,7 +2,7 @@
* net/tipc/msg.h: Include file for TIPC message header routines
*
* Copyright (c) 2000-2007, Ericsson AB
- * Copyright (c) 2005-2008, Wind River Systems
+ * Copyright (c) 2005-2008, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -438,11 +438,6 @@ static inline void msg_set_nametype(struct tipc_msg *m, u32 n)
msg_set_word(m, 8, n);
}
-static inline u32 msg_transp_seqno(struct tipc_msg *m)
-{
- return msg_word(m, 8);
-}
-
static inline void msg_set_timestamp(struct tipc_msg *m, u32 n)
{
msg_set_word(m, 8, n);
@@ -453,11 +448,6 @@ static inline u32 msg_timestamp(struct tipc_msg *m)
return msg_word(m, 8);
}
-static inline void msg_set_transp_seqno(struct tipc_msg *m, u32 n)
-{
- msg_set_word(m, 8, n);
-}
-
static inline u32 msg_nameinst(struct tipc_msg *m)
{
return msg_word(m, 9);
@@ -577,16 +567,6 @@ static inline void msg_set_seq_gap(struct tipc_msg *m, u32 n)
msg_set_bits(m, 1, 16, 0x1fff, n);
}
-static inline u32 msg_req_links(struct tipc_msg *m)
-{
- return msg_bits(m, 1, 16, 0xfff);
-}
-
-static inline void msg_set_req_links(struct tipc_msg *m, u32 n)
-{
- msg_set_bits(m, 1, 16, 0xfff, n);
-}
-
/*
* Word 2
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 3af53e327f49..e4dba1dfb6ea 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -2,7 +2,7 @@
* net/tipc/node.c: TIPC node management routines
*
* Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2005-2006, Wind River Systems
+ * Copyright (c) 2005-2006, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -238,7 +238,7 @@ struct tipc_node *tipc_node_attach_link(struct link *l_ptr)
return n_ptr;
}
err("Attempt to establish second link on <%s> to %s\n",
- l_ptr->b_ptr->publ.name,
+ l_ptr->b_ptr->name,
tipc_addr_string_fill(addr_string, l_ptr->addr));
}
return NULL;
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 067bab2a0b98..6ff78f9c7d65 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -2,7 +2,7 @@
* net/tipc/port.c: TIPC port code
*
* Copyright (c) 1992-2007, Ericsson AB
- * Copyright (c) 2004-2008, Wind River Systems
+ * Copyright (c) 2004-2008, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -54,33 +54,19 @@ static DEFINE_SPINLOCK(queue_lock);
static LIST_HEAD(ports);
static void port_handle_node_down(unsigned long ref);
-static struct sk_buff *port_build_self_abort_msg(struct port *, u32 err);
-static struct sk_buff *port_build_peer_abort_msg(struct port *, u32 err);
+static struct sk_buff *port_build_self_abort_msg(struct tipc_port *, u32 err);
+static struct sk_buff *port_build_peer_abort_msg(struct tipc_port *, u32 err);
static void port_timeout(unsigned long ref);
-static u32 port_peernode(struct port *p_ptr)
+static u32 port_peernode(struct tipc_port *p_ptr)
{
- return msg_destnode(&p_ptr->publ.phdr);
+ return msg_destnode(&p_ptr->phdr);
}
-static u32 port_peerport(struct port *p_ptr)
+static u32 port_peerport(struct tipc_port *p_ptr)
{
- return msg_destport(&p_ptr->publ.phdr);
-}
-
-static u32 port_out_seqno(struct port *p_ptr)
-{
- return msg_transp_seqno(&p_ptr->publ.phdr);
-}
-
-static void port_incr_out_seqno(struct port *p_ptr)
-{
- struct tipc_msg *m = &p_ptr->publ.phdr;
-
- if (likely(!msg_routed(m)))
- return;
- msg_set_transp_seqno(m, (msg_transp_seqno(m) + 1));
+ return msg_destport(&p_ptr->phdr);
}
/**
@@ -94,7 +80,7 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
struct sk_buff *buf;
struct sk_buff *ibuf = NULL;
struct port_list dports = {0, NULL, };
- struct port *oport = tipc_port_deref(ref);
+ struct tipc_port *oport = tipc_port_deref(ref);
int ext_targets;
int res;
@@ -103,7 +89,7 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
/* Create multicast message */
- hdr = &oport->publ.phdr;
+ hdr = &oport->phdr;
msg_set_type(hdr, TIPC_MCAST_MSG);
msg_set_nametype(hdr, seq->type);
msg_set_namelower(hdr, seq->lower);
@@ -211,7 +197,7 @@ struct tipc_port *tipc_createport_raw(void *usr_handle,
void (*wakeup)(struct tipc_port *),
const u32 importance)
{
- struct port *p_ptr;
+ struct tipc_port *p_ptr;
struct tipc_msg *msg;
u32 ref;
@@ -220,21 +206,19 @@ struct tipc_port *tipc_createport_raw(void *usr_handle,
warn("Port creation failed, no memory\n");
return NULL;
}
- ref = tipc_ref_acquire(p_ptr, &p_ptr->publ.lock);
+ ref = tipc_ref_acquire(p_ptr, &p_ptr->lock);
if (!ref) {
warn("Port creation failed, reference table exhausted\n");
kfree(p_ptr);
return NULL;
}
- p_ptr->publ.usr_handle = usr_handle;
- p_ptr->publ.max_pkt = MAX_PKT_DEFAULT;
- p_ptr->publ.ref = ref;
- msg = &p_ptr->publ.phdr;
+ p_ptr->usr_handle = usr_handle;
+ p_ptr->max_pkt = MAX_PKT_DEFAULT;
+ p_ptr->ref = ref;
+ msg = &p_ptr->phdr;
tipc_msg_init(msg, importance, TIPC_NAMED_MSG, LONG_H_SIZE, 0);
msg_set_origport(msg, ref);
- p_ptr->last_in_seqno = 41;
- p_ptr->sent = 1;
INIT_LIST_HEAD(&p_ptr->wait_list);
INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list);
p_ptr->dispatcher = dispatcher;
@@ -246,12 +230,12 @@ struct tipc_port *tipc_createport_raw(void *usr_handle,
INIT_LIST_HEAD(&p_ptr->port_list);
list_add_tail(&p_ptr->port_list, &ports);
spin_unlock_bh(&tipc_port_list_lock);
- return &(p_ptr->publ);
+ return p_ptr;
}
int tipc_deleteport(u32 ref)
{
- struct port *p_ptr;
+ struct tipc_port *p_ptr;
struct sk_buff *buf = NULL;
tipc_withdraw(ref, 0, NULL);
@@ -263,7 +247,7 @@ int tipc_deleteport(u32 ref)
tipc_port_unlock(p_ptr);
k_cancel_timer(&p_ptr->timer);
- if (p_ptr->publ.connected) {
+ if (p_ptr->connected) {
buf = port_build_peer_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
tipc_nodesub_unsubscribe(&p_ptr->subscription);
}
@@ -279,14 +263,14 @@ int tipc_deleteport(u32 ref)
return 0;
}
-static int port_unreliable(struct port *p_ptr)
+static int port_unreliable(struct tipc_port *p_ptr)
{
- return msg_src_droppable(&p_ptr->publ.phdr);
+ return msg_src_droppable(&p_ptr->phdr);
}
int tipc_portunreliable(u32 ref, unsigned int *isunreliable)
{
- struct port *p_ptr;
+ struct tipc_port *p_ptr;
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
@@ -298,24 +282,24 @@ int tipc_portunreliable(u32 ref, unsigned int *isunreliable)
int tipc_set_portunreliable(u32 ref, unsigned int isunreliable)
{
- struct port *p_ptr;
+ struct tipc_port *p_ptr;
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
return -EINVAL;
- msg_set_src_droppable(&p_ptr->publ.phdr, (isunreliable != 0));
+ msg_set_src_droppable(&p_ptr->phdr, (isunreliable != 0));
tipc_port_unlock(p_ptr);
return 0;
}
-static int port_unreturnable(struct port *p_ptr)
+static int port_unreturnable(struct tipc_port *p_ptr)
{
- return msg_dest_droppable(&p_ptr->publ.phdr);
+ return msg_dest_droppable(&p_ptr->phdr);
}
int tipc_portunreturnable(u32 ref, unsigned int *isunrejectable)
{
- struct port *p_ptr;
+ struct tipc_port *p_ptr;
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
@@ -327,12 +311,12 @@ int tipc_portunreturnable(u32 ref, unsigned int *isunrejectable)
int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable)
{
- struct port *p_ptr;
+ struct tipc_port *p_ptr;
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
return -EINVAL;
- msg_set_dest_droppable(&p_ptr->publ.phdr, (isunrejectable != 0));
+ msg_set_dest_droppable(&p_ptr->phdr, (isunrejectable != 0));
tipc_port_unlock(p_ptr);
return 0;
}
@@ -345,7 +329,7 @@ int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable)
static struct sk_buff *port_build_proto_msg(u32 destport, u32 destnode,
u32 origport, u32 orignode,
u32 usr, u32 type, u32 err,
- u32 seqno, u32 ack)
+ u32 ack)
{
struct sk_buff *buf;
struct tipc_msg *msg;
@@ -358,7 +342,6 @@ static struct sk_buff *port_build_proto_msg(u32 destport, u32 destnode,
msg_set_destport(msg, destport);
msg_set_origport(msg, origport);
msg_set_orignode(msg, orignode);
- msg_set_transp_seqno(msg, seqno);
msg_set_msgcnt(msg, ack);
}
return buf;
@@ -413,10 +396,10 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
/* send self-abort message when rejecting on a connected port */
if (msg_connected(msg)) {
struct sk_buff *abuf = NULL;
- struct port *p_ptr = tipc_port_lock(msg_destport(msg));
+ struct tipc_port *p_ptr = tipc_port_lock(msg_destport(msg));
if (p_ptr) {
- if (p_ptr->publ.connected)
+ if (p_ptr->connected)
abuf = port_build_self_abort_msg(p_ptr, err);
tipc_port_unlock(p_ptr);
}
@@ -429,7 +412,7 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
return data_sz;
}
-int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
+int tipc_port_reject_sections(struct tipc_port *p_ptr, struct tipc_msg *hdr,
struct iovec const *msg_sect, u32 num_sect,
int err)
{
@@ -446,13 +429,13 @@ int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
static void port_timeout(unsigned long ref)
{
- struct port *p_ptr = tipc_port_lock(ref);
+ struct tipc_port *p_ptr = tipc_port_lock(ref);
struct sk_buff *buf = NULL;
if (!p_ptr)
return;
- if (!p_ptr->publ.connected) {
+ if (!p_ptr->connected) {
tipc_port_unlock(p_ptr);
return;
}
@@ -463,14 +446,12 @@ static void port_timeout(unsigned long ref)
} else {
buf = port_build_proto_msg(port_peerport(p_ptr),
port_peernode(p_ptr),
- p_ptr->publ.ref,
+ p_ptr->ref,
tipc_own_addr,
CONN_MANAGER,
CONN_PROBE,
TIPC_OK,
- port_out_seqno(p_ptr),
0);
- port_incr_out_seqno(p_ptr);
p_ptr->probing_state = PROBING;
k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
}
@@ -481,7 +462,7 @@ static void port_timeout(unsigned long ref)
static void port_handle_node_down(unsigned long ref)
{
- struct port *p_ptr = tipc_port_lock(ref);
+ struct tipc_port *p_ptr = tipc_port_lock(ref);
struct sk_buff *buf = NULL;
if (!p_ptr)
@@ -492,73 +473,71 @@ static void port_handle_node_down(unsigned long ref)
}
-static struct sk_buff *port_build_self_abort_msg(struct port *p_ptr, u32 err)
+static struct sk_buff *port_build_self_abort_msg(struct tipc_port *p_ptr, u32 err)
{
- u32 imp = msg_importance(&p_ptr->publ.phdr);
+ u32 imp = msg_importance(&p_ptr->phdr);
- if (!p_ptr->publ.connected)
+ if (!p_ptr->connected)
return NULL;
if (imp < TIPC_CRITICAL_IMPORTANCE)
imp++;
- return port_build_proto_msg(p_ptr->publ.ref,
+ return port_build_proto_msg(p_ptr->ref,
tipc_own_addr,
port_peerport(p_ptr),
port_peernode(p_ptr),
imp,
TIPC_CONN_MSG,
err,
- p_ptr->last_in_seqno + 1,
0);
}
-static struct sk_buff *port_build_peer_abort_msg(struct port *p_ptr, u32 err)
+static struct sk_buff *port_build_peer_abort_msg(struct tipc_port *p_ptr, u32 err)
{
- u32 imp = msg_importance(&p_ptr->publ.phdr);
+ u32 imp = msg_importance(&p_ptr->phdr);
- if (!p_ptr->publ.connected)
+ if (!p_ptr->connected)
return NULL;
if (imp < TIPC_CRITICAL_IMPORTANCE)
imp++;
return port_build_proto_msg(port_peerport(p_ptr),
port_peernode(p_ptr),
- p_ptr->publ.ref,
+ p_ptr->ref,
tipc_own_addr,
imp,
TIPC_CONN_MSG,
err,
- port_out_seqno(p_ptr),
0);
}
void tipc_port_recv_proto_msg(struct sk_buff *buf)
{
struct tipc_msg *msg = buf_msg(buf);
- struct port *p_ptr = tipc_port_lock(msg_destport(msg));
+ struct tipc_port *p_ptr = tipc_port_lock(msg_destport(msg));
u32 err = TIPC_OK;
struct sk_buff *r_buf = NULL;
struct sk_buff *abort_buf = NULL;
if (!p_ptr) {
err = TIPC_ERR_NO_PORT;
- } else if (p_ptr->publ.connected) {
+ } else if (p_ptr->connected) {
if ((port_peernode(p_ptr) != msg_orignode(msg)) ||
(port_peerport(p_ptr) != msg_origport(msg))) {
err = TIPC_ERR_NO_PORT;
} else if (msg_type(msg) == CONN_ACK) {
int wakeup = tipc_port_congested(p_ptr) &&
- p_ptr->publ.congested &&
+ p_ptr->congested &&
p_ptr->wakeup;
p_ptr->acked += msg_msgcnt(msg);
if (tipc_port_congested(p_ptr))
goto exit;
- p_ptr->publ.congested = 0;
+ p_ptr->congested = 0;
if (!wakeup)
goto exit;
- p_ptr->wakeup(&p_ptr->publ);
+ p_ptr->wakeup(p_ptr);
goto exit;
}
- } else if (p_ptr->publ.published) {
+ } else if (p_ptr->published) {
err = TIPC_ERR_NO_PORT;
}
if (err) {
@@ -569,7 +548,6 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf)
TIPC_HIGH_IMPORTANCE,
TIPC_CONN_MSG,
err,
- 0,
0);
goto exit;
}
@@ -583,11 +561,9 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf)
CONN_MANAGER,
CONN_PROBE_REPLY,
TIPC_OK,
- port_out_seqno(p_ptr),
0);
}
p_ptr->probing_state = CONFIRMED;
- port_incr_out_seqno(p_ptr);
exit:
if (p_ptr)
tipc_port_unlock(p_ptr);
@@ -596,29 +572,29 @@ exit:
buf_discard(buf);
}
-static void port_print(struct port *p_ptr, struct print_buf *buf, int full_id)
+static void port_print(struct tipc_port *p_ptr, struct print_buf *buf, int full_id)
{
struct publication *publ;
if (full_id)
tipc_printf(buf, "<%u.%u.%u:%u>:",
tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
- tipc_node(tipc_own_addr), p_ptr->publ.ref);
+ tipc_node(tipc_own_addr), p_ptr->ref);
else
- tipc_printf(buf, "%-10u:", p_ptr->publ.ref);
+ tipc_printf(buf, "%-10u:", p_ptr->ref);
- if (p_ptr->publ.connected) {
+ if (p_ptr->connected) {
u32 dport = port_peerport(p_ptr);
u32 destnode = port_peernode(p_ptr);
tipc_printf(buf, " connected to <%u.%u.%u:%u>",
tipc_zone(destnode), tipc_cluster(destnode),
tipc_node(destnode), dport);
- if (p_ptr->publ.conn_type != 0)
+ if (p_ptr->conn_type != 0)
tipc_printf(buf, " via {%u,%u}",
- p_ptr->publ.conn_type,
- p_ptr->publ.conn_instance);
- } else if (p_ptr->publ.published) {
+ p_ptr->conn_type,
+ p_ptr->conn_instance);
+ } else if (p_ptr->published) {
tipc_printf(buf, " bound to");
list_for_each_entry(publ, &p_ptr->publications, pport_list) {
if (publ->lower == publ->upper)
@@ -639,7 +615,7 @@ struct sk_buff *tipc_port_get_ports(void)
struct sk_buff *buf;
struct tlv_desc *rep_tlv;
struct print_buf pb;
- struct port *p_ptr;
+ struct tipc_port *p_ptr;
int str_len;
buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_PORT_QUERY));
@@ -650,9 +626,9 @@ struct sk_buff *tipc_port_get_ports(void)
tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_QUERY);
spin_lock_bh(&tipc_port_list_lock);
list_for_each_entry(p_ptr, &ports, port_list) {
- spin_lock_bh(p_ptr->publ.lock);
+ spin_lock_bh(p_ptr->lock);
port_print(p_ptr, &pb, 0);
- spin_unlock_bh(p_ptr->publ.lock);
+ spin_unlock_bh(p_ptr->lock);
}
spin_unlock_bh(&tipc_port_list_lock);
str_len = tipc_printbuf_validate(&pb);
@@ -665,12 +641,12 @@ struct sk_buff *tipc_port_get_ports(void)
void tipc_port_reinit(void)
{
- struct port *p_ptr;
+ struct tipc_port *p_ptr;
struct tipc_msg *msg;
spin_lock_bh(&tipc_port_list_lock);
list_for_each_entry(p_ptr, &ports, port_list) {
- msg = &p_ptr->publ.phdr;
+ msg = &p_ptr->phdr;
if (msg_orignode(msg) == tipc_own_addr)
break;
msg_set_prevnode(msg, tipc_own_addr);
@@ -695,7 +671,7 @@ static void port_dispatcher_sigh(void *dummy)
spin_unlock_bh(&queue_lock);
while (buf) {
- struct port *p_ptr;
+ struct tipc_port *p_ptr;
struct user_port *up_ptr;
struct tipc_portid orig;
struct tipc_name_seq dseq;
@@ -720,8 +696,8 @@ static void port_dispatcher_sigh(void *dummy)
orig.node = msg_orignode(msg);
up_ptr = p_ptr->user_port;
usr_handle = up_ptr->usr_handle;
- connected = p_ptr->publ.connected;
- published = p_ptr->publ.published;
+ connected = p_ptr->connected;
+ published = p_ptr->published;
if (unlikely(msg_errcode(msg)))
goto err;
@@ -732,6 +708,7 @@ static void port_dispatcher_sigh(void *dummy)
tipc_conn_msg_event cb = up_ptr->conn_msg_cb;
u32 peer_port = port_peerport(p_ptr);
u32 peer_node = port_peernode(p_ptr);
+ u32 dsz;
tipc_port_unlock(p_ptr);
if (unlikely(!cb))
@@ -742,13 +719,14 @@ static void port_dispatcher_sigh(void *dummy)
} else if ((msg_origport(msg) != peer_port) ||
(msg_orignode(msg) != peer_node))
goto reject;
- if (unlikely(++p_ptr->publ.conn_unacked >=
- TIPC_FLOW_CONTROL_WIN))
+ dsz = msg_data_sz(msg);
+ if (unlikely(dsz &&
+ (++p_ptr->conn_unacked >=
+ TIPC_FLOW_CONTROL_WIN)))
tipc_acknowledge(dref,
- p_ptr->publ.conn_unacked);
+ p_ptr->conn_unacked);
skb_pull(buf, msg_hdr_sz(msg));
- cb(usr_handle, dref, &buf, msg_data(msg),
- msg_data_sz(msg));
+ cb(usr_handle, dref, &buf, msg_data(msg), dsz);
break;
}
case TIPC_DIRECT_MSG:{
@@ -872,7 +850,7 @@ static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf)
static void port_wakeup_sh(unsigned long ref)
{
- struct port *p_ptr;
+ struct tipc_port *p_ptr;
struct user_port *up_ptr;
tipc_continue_event cb = NULL;
void *uh = NULL;
@@ -898,14 +876,14 @@ static void port_wakeup(struct tipc_port *p_ptr)
void tipc_acknowledge(u32 ref, u32 ack)
{
- struct port *p_ptr;
+ struct tipc_port *p_ptr;
struct sk_buff *buf = NULL;
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
return;
- if (p_ptr->publ.connected) {
- p_ptr->publ.conn_unacked -= ack;
+ if (p_ptr->connected) {
+ p_ptr->conn_unacked -= ack;
buf = port_build_proto_msg(port_peerport(p_ptr),
port_peernode(p_ptr),
ref,
@@ -913,7 +891,6 @@ void tipc_acknowledge(u32 ref, u32 ack)
CONN_MANAGER,
CONN_ACK,
TIPC_OK,
- port_out_seqno(p_ptr),
ack);
}
tipc_port_unlock(p_ptr);
@@ -936,14 +913,14 @@ int tipc_createport(void *usr_handle,
u32 *portref)
{
struct user_port *up_ptr;
- struct port *p_ptr;
+ struct tipc_port *p_ptr;
up_ptr = kmalloc(sizeof(*up_ptr), GFP_ATOMIC);
if (!up_ptr) {
warn("Port creation failed, no memory\n");
return -ENOMEM;
}
- p_ptr = (struct port *)tipc_createport_raw(NULL, port_dispatcher,
+ p_ptr = (struct tipc_port *)tipc_createport_raw(NULL, port_dispatcher,
port_wakeup, importance);
if (!p_ptr) {
kfree(up_ptr);
@@ -952,7 +929,7 @@ int tipc_createport(void *usr_handle,
p_ptr->user_port = up_ptr;
up_ptr->usr_handle = usr_handle;
- up_ptr->ref = p_ptr->publ.ref;
+ up_ptr->ref = p_ptr->ref;
up_ptr->err_cb = error_cb;
up_ptr->named_err_cb = named_error_cb;
up_ptr->conn_err_cb = conn_error_cb;
@@ -960,26 +937,26 @@ int tipc_createport(void *usr_handle,
up_ptr->named_msg_cb = named_msg_cb;
up_ptr->conn_msg_cb = conn_msg_cb;
up_ptr->continue_event_cb = continue_event_cb;
- *portref = p_ptr->publ.ref;
+ *portref = p_ptr->ref;
tipc_port_unlock(p_ptr);
return 0;
}
int tipc_portimportance(u32 ref, unsigned int *importance)
{
- struct port *p_ptr;
+ struct tipc_port *p_ptr;
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
return -EINVAL;
- *importance = (unsigned int)msg_importance(&p_ptr->publ.phdr);
+ *importance = (unsigned int)msg_importance(&p_ptr->phdr);
tipc_port_unlock(p_ptr);
return 0;
}
int tipc_set_portimportance(u32 ref, unsigned int imp)
{
- struct port *p_ptr;
+ struct tipc_port *p_ptr;
if (imp > TIPC_CRITICAL_IMPORTANCE)
return -EINVAL;
@@ -987,7 +964,7 @@ int tipc_set_portimportance(u32 ref, unsigned int imp)
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
return -EINVAL;
- msg_set_importance(&p_ptr->publ.phdr, (u32)imp);
+ msg_set_importance(&p_ptr->phdr, (u32)imp);
tipc_port_unlock(p_ptr);
return 0;
}
@@ -995,7 +972,7 @@ int tipc_set_portimportance(u32 ref, unsigned int imp)
int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
{
- struct port *p_ptr;
+ struct tipc_port *p_ptr;
struct publication *publ;
u32 key;
int res = -EINVAL;
@@ -1004,7 +981,7 @@ int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
if (!p_ptr)
return -EINVAL;
- if (p_ptr->publ.connected)
+ if (p_ptr->connected)
goto exit;
if (seq->lower > seq->upper)
goto exit;
@@ -1016,11 +993,11 @@ int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
goto exit;
}
publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper,
- scope, p_ptr->publ.ref, key);
+ scope, p_ptr->ref, key);
if (publ) {
list_add(&publ->pport_list, &p_ptr->publications);
p_ptr->pub_count++;
- p_ptr->publ.published = 1;
+ p_ptr->published = 1;
res = 0;
}
exit:
@@ -1030,7 +1007,7 @@ exit:
int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
{
- struct port *p_ptr;
+ struct tipc_port *p_ptr;
struct publication *publ;
struct publication *tpubl;
int res = -EINVAL;
@@ -1063,37 +1040,36 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
}
}
if (list_empty(&p_ptr->publications))
- p_ptr->publ.published = 0;
+ p_ptr->published = 0;
tipc_port_unlock(p_ptr);
return res;
}
int tipc_connect2port(u32 ref, struct tipc_portid const *peer)
{
- struct port *p_ptr;
+ struct tipc_port *p_ptr;
struct tipc_msg *msg;
int res = -EINVAL;
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
return -EINVAL;
- if (p_ptr->publ.published || p_ptr->publ.connected)
+ if (p_ptr->published || p_ptr->connected)
goto exit;
if (!peer->ref)
goto exit;
- msg = &p_ptr->publ.phdr;
+ msg = &p_ptr->phdr;
msg_set_destnode(msg, peer->node);
msg_set_destport(msg, peer->ref);
msg_set_orignode(msg, tipc_own_addr);
- msg_set_origport(msg, p_ptr->publ.ref);
- msg_set_transp_seqno(msg, 42);
+ msg_set_origport(msg, p_ptr->ref);
msg_set_type(msg, TIPC_CONN_MSG);
msg_set_hdr_sz(msg, SHORT_H_SIZE);
p_ptr->probing_interval = PROBING_INTERVAL;
p_ptr->probing_state = CONFIRMED;
- p_ptr->publ.connected = 1;
+ p_ptr->connected = 1;
k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
tipc_nodesub_subscribe(&p_ptr->subscription, peer->node,
@@ -1102,7 +1078,7 @@ int tipc_connect2port(u32 ref, struct tipc_portid const *peer)
res = 0;
exit:
tipc_port_unlock(p_ptr);
- p_ptr->publ.max_pkt = tipc_link_get_max_pkt(peer->node, ref);
+ p_ptr->max_pkt = tipc_link_get_max_pkt(peer->node, ref);
return res;
}
@@ -1120,7 +1096,7 @@ int tipc_disconnect_port(struct tipc_port *tp_ptr)
tp_ptr->connected = 0;
/* let timer expire on it's own to avoid deadlock! */
tipc_nodesub_unsubscribe(
- &((struct port *)tp_ptr)->subscription);
+ &((struct tipc_port *)tp_ptr)->subscription);
res = 0;
} else {
res = -ENOTCONN;
@@ -1135,7 +1111,7 @@ int tipc_disconnect_port(struct tipc_port *tp_ptr)
int tipc_disconnect(u32 ref)
{
- struct port *p_ptr;
+ struct tipc_port *p_ptr;
int res;
p_ptr = tipc_port_lock(ref);
@@ -1151,15 +1127,15 @@ int tipc_disconnect(u32 ref)
*/
int tipc_shutdown(u32 ref)
{
- struct port *p_ptr;
+ struct tipc_port *p_ptr;
struct sk_buff *buf = NULL;
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
return -EINVAL;
- if (p_ptr->publ.connected) {
- u32 imp = msg_importance(&p_ptr->publ.phdr);
+ if (p_ptr->connected) {
+ u32 imp = msg_importance(&p_ptr->phdr);
if (imp < TIPC_CRITICAL_IMPORTANCE)
imp++;
buf = port_build_proto_msg(port_peerport(p_ptr),
@@ -1169,7 +1145,6 @@ int tipc_shutdown(u32 ref)
imp,
TIPC_CONN_MSG,
TIPC_CONN_SHUTDOWN,
- port_out_seqno(p_ptr),
0);
}
tipc_port_unlock(p_ptr);
@@ -1182,13 +1157,13 @@ int tipc_shutdown(u32 ref)
* message for this node.
*/
-static int tipc_port_recv_sections(struct port *sender, unsigned int num_sect,
+static int tipc_port_recv_sections(struct tipc_port *sender, unsigned int num_sect,
struct iovec const *msg_sect)
{
struct sk_buff *buf;
int res;
- res = tipc_msg_build(&sender->publ.phdr, msg_sect, num_sect,
+ res = tipc_msg_build(&sender->phdr, msg_sect, num_sect,
MAX_MSG_SIZE, !sender->user_port, &buf);
if (likely(buf))
tipc_port_recv_msg(buf);
@@ -1201,15 +1176,15 @@ static int tipc_port_recv_sections(struct port *sender, unsigned int num_sect,
int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
{
- struct port *p_ptr;
+ struct tipc_port *p_ptr;
u32 destnode;
int res;
p_ptr = tipc_port_deref(ref);
- if (!p_ptr || !p_ptr->publ.connected)
+ if (!p_ptr || !p_ptr->connected)
return -EINVAL;
- p_ptr->publ.congested = 1;
+ p_ptr->congested = 1;
if (!tipc_port_congested(p_ptr)) {
destnode = port_peernode(p_ptr);
if (likely(destnode != tipc_own_addr))
@@ -1219,14 +1194,14 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
if (likely(res != -ELINKCONG)) {
- port_incr_out_seqno(p_ptr);
- p_ptr->publ.congested = 0;
- p_ptr->sent++;
+ p_ptr->congested = 0;
+ if (res > 0)
+ p_ptr->sent++;
return res;
}
}
if (port_unreliable(p_ptr)) {
- p_ptr->publ.congested = 0;
+ p_ptr->congested = 0;
/* Just calculate msg length and return */
return tipc_msg_calc_data_size(msg_sect, num_sect);
}
@@ -1240,17 +1215,17 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
unsigned int num_sect, struct iovec const *msg_sect)
{
- struct port *p_ptr;
+ struct tipc_port *p_ptr;
struct tipc_msg *msg;
u32 destnode = domain;
u32 destport;
int res;
p_ptr = tipc_port_deref(ref);
- if (!p_ptr || p_ptr->publ.connected)
+ if (!p_ptr || p_ptr->connected)
return -EINVAL;
- msg = &p_ptr->publ.phdr;
+ msg = &p_ptr->phdr;
msg_set_type(msg, TIPC_NAMED_MSG);
msg_set_orignode(msg, tipc_own_addr);
msg_set_origport(msg, ref);
@@ -1263,13 +1238,17 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
msg_set_destport(msg, destport);
if (likely(destport)) {
- p_ptr->sent++;
if (likely(destnode == tipc_own_addr))
- return tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
- res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
- destnode);
- if (likely(res != -ELINKCONG))
+ res = tipc_port_recv_sections(p_ptr, num_sect,
+ msg_sect);
+ else
+ res = tipc_link_send_sections_fast(p_ptr, msg_sect,
+ num_sect, destnode);
+ if (likely(res != -ELINKCONG)) {
+ if (res > 0)
+ p_ptr->sent++;
return res;
+ }
if (port_unreliable(p_ptr)) {
/* Just calculate msg length and return */
return tipc_msg_calc_data_size(msg_sect, num_sect);
@@ -1287,27 +1266,32 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
int tipc_send2port(u32 ref, struct tipc_portid const *dest,
unsigned int num_sect, struct iovec const *msg_sect)
{
- struct port *p_ptr;
+ struct tipc_port *p_ptr;
struct tipc_msg *msg;
int res;
p_ptr = tipc_port_deref(ref);
- if (!p_ptr || p_ptr->publ.connected)
+ if (!p_ptr || p_ptr->connected)
return -EINVAL;
- msg = &p_ptr->publ.phdr;
+ msg = &p_ptr->phdr;
msg_set_type(msg, TIPC_DIRECT_MSG);
msg_set_orignode(msg, tipc_own_addr);
msg_set_origport(msg, ref);
msg_set_destnode(msg, dest->node);
msg_set_destport(msg, dest->ref);
msg_set_hdr_sz(msg, DIR_MSG_H_SIZE);
- p_ptr->sent++;
+
if (dest->node == tipc_own_addr)
- return tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
- res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, dest->node);
- if (likely(res != -ELINKCONG))
+ res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
+ else
+ res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
+ dest->node);
+ if (likely(res != -ELINKCONG)) {
+ if (res > 0)
+ p_ptr->sent++;
return res;
+ }
if (port_unreliable(p_ptr)) {
/* Just calculate msg length and return */
return tipc_msg_calc_data_size(msg_sect, num_sect);
@@ -1322,15 +1306,15 @@ int tipc_send2port(u32 ref, struct tipc_portid const *dest,
int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest,
struct sk_buff *buf, unsigned int dsz)
{
- struct port *p_ptr;
+ struct tipc_port *p_ptr;
struct tipc_msg *msg;
int res;
- p_ptr = (struct port *)tipc_ref_deref(ref);
- if (!p_ptr || p_ptr->publ.connected)
+ p_ptr = (struct tipc_port *)tipc_ref_deref(ref);
+ if (!p_ptr || p_ptr->connected)
return -EINVAL;
- msg = &p_ptr->publ.phdr;
+ msg = &p_ptr->phdr;
msg_set_type(msg, TIPC_DIRECT_MSG);
msg_set_orignode(msg, tipc_own_addr);
msg_set_origport(msg, ref);
@@ -1343,12 +1327,16 @@ int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest,
skb_push(buf, DIR_MSG_H_SIZE);
skb_copy_to_linear_data(buf, msg, DIR_MSG_H_SIZE);
- p_ptr->sent++;
+
if (dest->node == tipc_own_addr)
- return tipc_port_recv_msg(buf);
- res = tipc_send_buf_fast(buf, dest->node);
- if (likely(res != -ELINKCONG))
+ res = tipc_port_recv_msg(buf);
+ else
+ res = tipc_send_buf_fast(buf, dest->node);
+ if (likely(res != -ELINKCONG)) {
+ if (res > 0)
+ p_ptr->sent++;
return res;
+ }
if (port_unreliable(p_ptr))
return dsz;
return -ELINKCONG;
diff --git a/net/tipc/port.h b/net/tipc/port.h
index 8e84b989949c..87b9424ae0ec 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -2,7 +2,7 @@
* net/tipc/port.h: Include file for TIPC port code
*
* Copyright (c) 1994-2007, Ericsson AB
- * Copyright (c) 2004-2007, Wind River Systems
+ * Copyright (c) 2004-2007, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -95,7 +95,7 @@ struct user_port {
};
/**
- * struct tipc_port - TIPC port info available to socket API
+ * struct tipc_port - TIPC port structure
* @usr_handle: pointer to additional user-defined information about port
* @lock: pointer to spinlock for controlling access to port
* @connected: non-zero if port is currently connected to a peer port
@@ -107,43 +107,33 @@ struct user_port {
* @max_pkt: maximum packet size "hint" used when building messages sent by port
* @ref: unique reference to port in TIPC object registry
* @phdr: preformatted message header used when sending messages
- */
-struct tipc_port {
- void *usr_handle;
- spinlock_t *lock;
- int connected;
- u32 conn_type;
- u32 conn_instance;
- u32 conn_unacked;
- int published;
- u32 congested;
- u32 max_pkt;
- u32 ref;
- struct tipc_msg phdr;
-};
-
-/**
- * struct port - TIPC port structure
- * @publ: TIPC port info available to privileged users
* @port_list: adjacent ports in TIPC's global list of ports
* @dispatcher: ptr to routine which handles received messages
* @wakeup: ptr to routine to call when port is no longer congested
* @user_port: ptr to user port associated with port (if any)
* @wait_list: adjacent ports in list of ports waiting on link congestion
* @waiting_pkts:
- * @sent:
- * @acked:
+ * @sent: # of non-empty messages sent by port
+ * @acked: # of non-empty message acknowledgements from connected port's peer
* @publications: list of publications for port
* @pub_count: total # of publications port has made during its lifetime
* @probing_state:
* @probing_interval:
- * @last_in_seqno:
* @timer_ref:
* @subscription: "node down" subscription used to terminate failed connections
*/
-
-struct port {
- struct tipc_port publ;
+struct tipc_port {
+ void *usr_handle;
+ spinlock_t *lock;
+ int connected;
+ u32 conn_type;
+ u32 conn_instance;
+ u32 conn_unacked;
+ int published;
+ u32 congested;
+ u32 max_pkt;
+ u32 ref;
+ struct tipc_msg phdr;
struct list_head port_list;
u32 (*dispatcher)(struct tipc_port *, struct sk_buff *);
void (*wakeup)(struct tipc_port *);
@@ -156,7 +146,6 @@ struct port {
u32 pub_count;
u32 probing_state;
u32 probing_interval;
- u32 last_in_seqno;
struct timer_list timer;
struct tipc_node_subscr subscription;
};
@@ -230,7 +219,7 @@ int tipc_send_buf2port(u32 portref, struct tipc_portid const *dest,
int tipc_multicast(u32 portref, struct tipc_name_seq const *seq,
unsigned int section_count, struct iovec const *msg);
-int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
+int tipc_port_reject_sections(struct tipc_port *p_ptr, struct tipc_msg *hdr,
struct iovec const *msg_sect, u32 num_sect,
int err);
struct sk_buff *tipc_port_get_ports(void);
@@ -242,9 +231,9 @@ void tipc_port_reinit(void);
* tipc_port_lock - lock port instance referred to and return its pointer
*/
-static inline struct port *tipc_port_lock(u32 ref)
+static inline struct tipc_port *tipc_port_lock(u32 ref)
{
- return (struct port *)tipc_ref_lock(ref);
+ return (struct tipc_port *)tipc_ref_lock(ref);
}
/**
@@ -253,27 +242,27 @@ static inline struct port *tipc_port_lock(u32 ref)
* Can use pointer instead of tipc_ref_unlock() since port is already locked.
*/
-static inline void tipc_port_unlock(struct port *p_ptr)
+static inline void tipc_port_unlock(struct tipc_port *p_ptr)
{
- spin_unlock_bh(p_ptr->publ.lock);
+ spin_unlock_bh(p_ptr->lock);
}
-static inline struct port *tipc_port_deref(u32 ref)
+static inline struct tipc_port *tipc_port_deref(u32 ref)
{
- return (struct port *)tipc_ref_deref(ref);
+ return (struct tipc_port *)tipc_ref_deref(ref);
}
-static inline u32 tipc_peer_port(struct port *p_ptr)
+static inline u32 tipc_peer_port(struct tipc_port *p_ptr)
{
- return msg_destport(&p_ptr->publ.phdr);
+ return msg_destport(&p_ptr->phdr);
}
-static inline u32 tipc_peer_node(struct port *p_ptr)
+static inline u32 tipc_peer_node(struct tipc_port *p_ptr)
{
- return msg_destnode(&p_ptr->publ.phdr);
+ return msg_destnode(&p_ptr->phdr);
}
-static inline int tipc_port_congested(struct port *p_ptr)
+static inline int tipc_port_congested(struct tipc_port *p_ptr)
{
return (p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2);
}
@@ -284,7 +273,7 @@ static inline int tipc_port_congested(struct port *p_ptr)
static inline int tipc_port_recv_msg(struct sk_buff *buf)
{
- struct port *p_ptr;
+ struct tipc_port *p_ptr;
struct tipc_msg *msg = buf_msg(buf);
u32 destport = msg_destport(msg);
u32 dsz = msg_data_sz(msg);
@@ -299,7 +288,7 @@ static inline int tipc_port_recv_msg(struct sk_buff *buf)
/* validate destination & pass to port, otherwise reject message */
p_ptr = tipc_port_lock(destport);
if (likely(p_ptr)) {
- if (likely(p_ptr->publ.connected)) {
+ if (likely(p_ptr->connected)) {
if ((unlikely(msg_origport(msg) != tipc_peer_port(p_ptr))) ||
(unlikely(msg_orignode(msg) != tipc_peer_node(p_ptr))) ||
(unlikely(!msg_connected(msg)))) {
@@ -308,7 +297,7 @@ static inline int tipc_port_recv_msg(struct sk_buff *buf)
goto reject;
}
}
- err = p_ptr->dispatcher(&p_ptr->publ, buf);
+ err = p_ptr->dispatcher(p_ptr, buf);
tipc_port_unlock(p_ptr);
if (likely(!err))
return dsz;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 2b02a3a80313..125dcb0737b2 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -2,7 +2,7 @@
* net/tipc/socket.c: TIPC socket API
*
* Copyright (c) 2001-2007, Ericsson AB
- * Copyright (c) 2004-2008, Wind River Systems
+ * Copyright (c) 2004-2008, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -241,7 +241,6 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
tipc_set_portunreliable(tp_ptr->ref, 1);
}
- atomic_inc(&tipc_user_count);
return 0;
}
@@ -321,7 +320,6 @@ static int release(struct socket *sock)
sock_put(sk);
sock->sk = NULL;
- atomic_dec(&tipc_user_count);
return res;
}
@@ -495,6 +493,8 @@ static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
if (likely(dest->addr.name.name.type != TIPC_CFG_SRV))
return -EACCES;
+ if (!m->msg_iovlen || (m->msg_iov[0].iov_len < sizeof(hdr)))
+ return -EMSGSIZE;
if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr)))
return -EFAULT;
if ((ntohs(hdr.tcm_type) & 0xC000) && (!capable(CAP_NET_ADMIN)))
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index ca04479c3d42..aae9eae13404 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -2,7 +2,7 @@
* net/tipc/subscr.c: TIPC network topology service
*
* Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2005-2007, Wind River Systems
+ * Copyright (c) 2005-2007, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -160,7 +160,7 @@ void tipc_subscr_report_overlap(struct subscription *sub,
static void subscr_timeout(struct subscription *sub)
{
- struct port *server_port;
+ struct tipc_port *server_port;
/* Validate server port reference (in case subscriber is terminating) */
@@ -472,8 +472,6 @@ static void subscr_named_msg_event(void *usr_handle,
struct tipc_portid const *orig,
struct tipc_name_seq const *dest)
{
- static struct iovec msg_sect = {NULL, 0};
-
struct subscriber *subscriber;
u32 server_port_ref;
@@ -508,7 +506,7 @@ static void subscr_named_msg_event(void *usr_handle,
/* Lock server port (& save lock address for future use) */
- subscriber->lock = tipc_port_lock(subscriber->port_ref)->publ.lock;
+ subscriber->lock = tipc_port_lock(subscriber->port_ref)->lock;
/* Add subscriber to topology server's subscriber list */
@@ -523,7 +521,7 @@ static void subscr_named_msg_event(void *usr_handle,
/* Send an ACK- to complete connection handshaking */
- tipc_send(server_port_ref, 1, &msg_sect);
+ tipc_send(server_port_ref, 0, NULL);
/* Handle optional subscription request */
@@ -542,7 +540,6 @@ int tipc_subscr_start(void)
spin_lock_init(&topsrv.lock);
INIT_LIST_HEAD(&topsrv.subscriber_list);
- spin_lock_bh(&topsrv.lock);
res = tipc_createport(NULL,
TIPC_CRITICAL_IMPORTANCE,
NULL,
@@ -563,12 +560,10 @@ int tipc_subscr_start(void)
goto failed;
}
- spin_unlock_bh(&topsrv.lock);
return 0;
failed:
err("Failed to create subscription service\n");
- spin_unlock_bh(&topsrv.lock);
return res;
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index dd419d286204..217fb7f34d52 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1171,7 +1171,7 @@ restart:
newsk->sk_type = sk->sk_type;
init_peercred(newsk);
newu = unix_sk(newsk);
- newsk->sk_wq = &newu->peer_wq;
+ RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
otheru = unix_sk(other);
/* copy address information from listening to new sock*/
@@ -1475,6 +1475,12 @@ restart:
goto out_free;
}
+ if (sk_filter(other, skb) < 0) {
+ /* Toss the packet but do not return any error to the sender */
+ err = len;
+ goto out_free;
+ }
+
unix_state_lock(other);
err = -EPERM;
if (!unix_may_send(sk, other))
@@ -1978,36 +1984,38 @@ static int unix_shutdown(struct socket *sock, int mode)
mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
- if (mode) {
- unix_state_lock(sk);
- sk->sk_shutdown |= mode;
- other = unix_peer(sk);
- if (other)
- sock_hold(other);
- unix_state_unlock(sk);
- sk->sk_state_change(sk);
-
- if (other &&
- (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
-
- int peer_mode = 0;
-
- if (mode&RCV_SHUTDOWN)
- peer_mode |= SEND_SHUTDOWN;
- if (mode&SEND_SHUTDOWN)
- peer_mode |= RCV_SHUTDOWN;
- unix_state_lock(other);
- other->sk_shutdown |= peer_mode;
- unix_state_unlock(other);
- other->sk_state_change(other);
- if (peer_mode == SHUTDOWN_MASK)
- sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
- else if (peer_mode & RCV_SHUTDOWN)
- sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
- }
- if (other)
- sock_put(other);
+ if (!mode)
+ return 0;
+
+ unix_state_lock(sk);
+ sk->sk_shutdown |= mode;
+ other = unix_peer(sk);
+ if (other)
+ sock_hold(other);
+ unix_state_unlock(sk);
+ sk->sk_state_change(sk);
+
+ if (other &&
+ (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
+
+ int peer_mode = 0;
+
+ if (mode&RCV_SHUTDOWN)
+ peer_mode |= SEND_SHUTDOWN;
+ if (mode&SEND_SHUTDOWN)
+ peer_mode |= RCV_SHUTDOWN;
+ unix_state_lock(other);
+ other->sk_shutdown |= peer_mode;
+ unix_state_unlock(other);
+ other->sk_state_change(other);
+ if (peer_mode == SHUTDOWN_MASK)
+ sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
+ else if (peer_mode & RCV_SHUTDOWN)
+ sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
}
+ if (other)
+ sock_put(other);
+
return 0;
}
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
index 74944a2dd436..788a12c1eb5d 100644
--- a/net/wanrouter/wanmain.c
+++ b/net/wanrouter/wanmain.c
@@ -59,8 +59,6 @@
#include <asm/uaccess.h> /* copy_to/from_user */
#include <linux/init.h> /* __initfunc et al. */
-#define KMEM_SAFETYZONE 8
-
#define DEV_TO_SLAVE(dev) (*((struct net_device **)netdev_priv(dev)))
/*
diff --git a/net/wireless/core.c b/net/wireless/core.c
index e9a5f8ca4c27..fe01de29bfe8 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -718,13 +718,6 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
wdev->ps = false;
/* allow mac80211 to determine the timeout */
wdev->ps_timeout = -1;
- if (rdev->ops->set_power_mgmt)
- if (rdev->ops->set_power_mgmt(wdev->wiphy, dev,
- wdev->ps,
- wdev->ps_timeout)) {
- /* assume this means it's off */
- wdev->ps = false;
- }
if (!dev->ethtool_ops)
dev->ethtool_ops = &cfg80211_ethtool_ops;
@@ -813,6 +806,19 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
rdev->opencount++;
mutex_unlock(&rdev->devlist_mtx);
cfg80211_unlock_rdev(rdev);
+
+ /*
+ * Configure power management to the driver here so that its
+ * correctly set also after interface type changes etc.
+ */
+ if (wdev->iftype == NL80211_IFTYPE_STATION &&
+ rdev->ops->set_power_mgmt)
+ if (rdev->ops->set_power_mgmt(wdev->wiphy, dev,
+ wdev->ps,
+ wdev->ps_timeout)) {
+ /* assume this means it's off */
+ wdev->ps = false;
+ }
break;
case NETDEV_UNREGISTER:
/*
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 9b62710891a2..4ebce4284e9d 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1968,13 +1968,41 @@ static int parse_station_flags(struct genl_info *info,
return 0;
}
+static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info,
+ int attr)
+{
+ struct nlattr *rate;
+ u16 bitrate;
+
+ rate = nla_nest_start(msg, attr);
+ if (!rate)
+ goto nla_put_failure;
+
+ /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */
+ bitrate = cfg80211_calculate_bitrate(info);
+ if (bitrate > 0)
+ NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate);
+
+ if (info->flags & RATE_INFO_FLAGS_MCS)
+ NLA_PUT_U8(msg, NL80211_RATE_INFO_MCS, info->mcs);
+ if (info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH)
+ NLA_PUT_FLAG(msg, NL80211_RATE_INFO_40_MHZ_WIDTH);
+ if (info->flags & RATE_INFO_FLAGS_SHORT_GI)
+ NLA_PUT_FLAG(msg, NL80211_RATE_INFO_SHORT_GI);
+
+ nla_nest_end(msg, rate);
+ return true;
+
+nla_put_failure:
+ return false;
+}
+
static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
int flags, struct net_device *dev,
const u8 *mac_addr, struct station_info *sinfo)
{
void *hdr;
- struct nlattr *sinfoattr, *txrate;
- u16 bitrate;
+ struct nlattr *sinfoattr;
hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_STATION);
if (!hdr)
@@ -2013,24 +2041,14 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL_AVG,
sinfo->signal_avg);
if (sinfo->filled & STATION_INFO_TX_BITRATE) {
- txrate = nla_nest_start(msg, NL80211_STA_INFO_TX_BITRATE);
- if (!txrate)
+ if (!nl80211_put_sta_rate(msg, &sinfo->txrate,
+ NL80211_STA_INFO_TX_BITRATE))
+ goto nla_put_failure;
+ }
+ if (sinfo->filled & STATION_INFO_RX_BITRATE) {
+ if (!nl80211_put_sta_rate(msg, &sinfo->rxrate,
+ NL80211_STA_INFO_RX_BITRATE))
goto nla_put_failure;
-
- /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */
- bitrate = cfg80211_calculate_bitrate(&sinfo->txrate);
- if (bitrate > 0)
- NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate);
-
- if (sinfo->txrate.flags & RATE_INFO_FLAGS_MCS)
- NLA_PUT_U8(msg, NL80211_RATE_INFO_MCS,
- sinfo->txrate.mcs);
- if (sinfo->txrate.flags & RATE_INFO_FLAGS_40_MHZ_WIDTH)
- NLA_PUT_FLAG(msg, NL80211_RATE_INFO_40_MHZ_WIDTH);
- if (sinfo->txrate.flags & RATE_INFO_FLAGS_SHORT_GI)
- NLA_PUT_FLAG(msg, NL80211_RATE_INFO_SHORT_GI);
-
- nla_nest_end(msg, txrate);
}
if (sinfo->filled & STATION_INFO_RX_PACKETS)
NLA_PUT_U32(msg, NL80211_STA_INFO_RX_PACKETS,
@@ -2718,7 +2736,7 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
NL80211_CMD_GET_MESH_CONFIG);
if (!hdr)
- goto nla_put_failure;
+ goto out;
pinfoattr = nla_nest_start(msg, NL80211_ATTR_MESH_CONFIG);
if (!pinfoattr)
goto nla_put_failure;
@@ -2759,6 +2777,7 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
nla_put_failure:
genlmsg_cancel(msg, hdr);
+ out:
nlmsg_free(msg);
return -ENOBUFS;
}
@@ -2954,7 +2973,7 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
NL80211_CMD_GET_REG);
if (!hdr)
- goto nla_put_failure;
+ goto put_failure;
NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2,
cfg80211_regdomain->alpha2);
@@ -3001,6 +3020,7 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
nla_put_failure:
genlmsg_cancel(msg, hdr);
+put_failure:
nlmsg_free(msg);
err = -EMSGSIZE;
out:
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 37693b6ef23a..c565689f0b9f 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1801,9 +1801,9 @@ void regulatory_hint_disconnect(void)
static bool freq_is_chan_12_13_14(u16 freq)
{
- if (freq == ieee80211_channel_to_frequency(12) ||
- freq == ieee80211_channel_to_frequency(13) ||
- freq == ieee80211_channel_to_frequency(14))
+ if (freq == ieee80211_channel_to_frequency(12, IEEE80211_BAND_2GHZ) ||
+ freq == ieee80211_channel_to_frequency(13, IEEE80211_BAND_2GHZ) ||
+ freq == ieee80211_channel_to_frequency(14, IEEE80211_BAND_2GHZ))
return true;
return false;
}
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 7620ae2fcf18..6a750bc6bcfe 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -29,29 +29,37 @@ ieee80211_get_response_rate(struct ieee80211_supported_band *sband,
}
EXPORT_SYMBOL(ieee80211_get_response_rate);
-int ieee80211_channel_to_frequency(int chan)
+int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band)
{
- if (chan < 14)
- return 2407 + chan * 5;
-
- if (chan == 14)
- return 2484;
-
- /* FIXME: 802.11j 17.3.8.3.2 */
- return (chan + 1000) * 5;
+ /* see 802.11 17.3.8.3.2 and Annex J
+ * there are overlapping channel numbers in 5GHz and 2GHz bands */
+ if (band == IEEE80211_BAND_5GHZ) {
+ if (chan >= 182 && chan <= 196)
+ return 4000 + chan * 5;
+ else
+ return 5000 + chan * 5;
+ } else { /* IEEE80211_BAND_2GHZ */
+ if (chan == 14)
+ return 2484;
+ else if (chan < 14)
+ return 2407 + chan * 5;
+ else
+ return 0; /* not supported */
+ }
}
EXPORT_SYMBOL(ieee80211_channel_to_frequency);
int ieee80211_frequency_to_channel(int freq)
{
+ /* see 802.11 17.3.8.3.2 and Annex J */
if (freq == 2484)
return 14;
-
- if (freq < 2484)
+ else if (freq < 2484)
return (freq - 2407) / 5;
-
- /* FIXME: 802.11j 17.3.8.3.2 */
- return freq/5 - 1000;
+ else if (freq >= 4910 && freq <= 4980)
+ return (freq - 4000) / 5;
+ else
+ return (freq - 5000) / 5;
}
EXPORT_SYMBOL(ieee80211_frequency_to_channel);
@@ -159,12 +167,15 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
/*
* Disallow pairwise keys with non-zero index unless it's WEP
- * (because current deployments use pairwise WEP keys with
- * non-zero indizes but 802.11i clearly specifies to use zero)
+ * or a vendor specific cipher (because current deployments use
+ * pairwise WEP keys with non-zero indices and for vendor specific
+ * ciphers this should be validated in the driver or hardware level
+ * - but 802.11i clearly specifies to use zero)
*/
if (pairwise && key_idx &&
- params->cipher != WLAN_CIPHER_SUITE_WEP40 &&
- params->cipher != WLAN_CIPHER_SUITE_WEP104)
+ ((params->cipher == WLAN_CIPHER_SUITE_TKIP) ||
+ (params->cipher == WLAN_CIPHER_SUITE_CCMP) ||
+ (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC)))
return -EINVAL;
switch (params->cipher) {
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 3e5dbd4e4cd5..0bf169bb770e 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -267,9 +267,12 @@ int cfg80211_wext_freq(struct wiphy *wiphy, struct iw_freq *freq)
* -EINVAL for impossible things.
*/
if (freq->e == 0) {
+ enum ieee80211_band band = IEEE80211_BAND_2GHZ;
if (freq->m < 0)
return 0;
- return ieee80211_channel_to_frequency(freq->m);
+ if (freq->m > 14)
+ band = IEEE80211_BAND_5GHZ;
+ return ieee80211_channel_to_frequency(freq->m, band);
} else {
int i, div = 1000000;
for (i = 0; i < freq->e; i++)
@@ -802,11 +805,11 @@ int cfg80211_wext_siwfreq(struct net_device *dev,
return freq;
if (freq == 0)
return -EINVAL;
- wdev_lock(wdev);
mutex_lock(&rdev->devlist_mtx);
+ wdev_lock(wdev);
err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT);
- mutex_unlock(&rdev->devlist_mtx);
wdev_unlock(wdev);
+ mutex_unlock(&rdev->devlist_mtx);
return err;
default:
return -EOPNOTSUPP;
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 8b4d6e3246e5..58064d9e565d 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -618,21 +618,21 @@ static int xfrm_alg_name_match(const struct xfrm_algo_desc *entry,
(entry->compat && !strcmp(name, entry->compat)));
}
-struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe)
+struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe)
{
return xfrm_find_algo(&xfrm_aalg_list, xfrm_alg_name_match, name,
probe);
}
EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname);
-struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe)
+struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe)
{
return xfrm_find_algo(&xfrm_ealg_list, xfrm_alg_name_match, name,
probe);
}
EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname);
-struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe)
+struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe)
{
return xfrm_find_algo(&xfrm_calg_list, xfrm_alg_name_match, name,
probe);
@@ -654,7 +654,7 @@ static int xfrm_aead_name_match(const struct xfrm_algo_desc *entry,
!strcmp(name, entry->name);
}
-struct xfrm_algo_desc *xfrm_aead_get_byname(char *name, int icv_len, int probe)
+struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len, int probe)
{
struct xfrm_aead_name data = {
.name = name,
diff --git a/net/xfrm/xfrm_hash.h b/net/xfrm/xfrm_hash.h
index 8e69533d2313..7199d78b2aa1 100644
--- a/net/xfrm/xfrm_hash.h
+++ b/net/xfrm/xfrm_hash.h
@@ -4,29 +4,32 @@
#include <linux/xfrm.h>
#include <linux/socket.h>
-static inline unsigned int __xfrm4_addr_hash(xfrm_address_t *addr)
+static inline unsigned int __xfrm4_addr_hash(const xfrm_address_t *addr)
{
return ntohl(addr->a4);
}
-static inline unsigned int __xfrm6_addr_hash(xfrm_address_t *addr)
+static inline unsigned int __xfrm6_addr_hash(const xfrm_address_t *addr)
{
return ntohl(addr->a6[2] ^ addr->a6[3]);
}
-static inline unsigned int __xfrm4_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr)
+static inline unsigned int __xfrm4_daddr_saddr_hash(const xfrm_address_t *daddr,
+ const xfrm_address_t *saddr)
{
u32 sum = (__force u32)daddr->a4 + (__force u32)saddr->a4;
return ntohl((__force __be32)sum);
}
-static inline unsigned int __xfrm6_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr)
+static inline unsigned int __xfrm6_daddr_saddr_hash(const xfrm_address_t *daddr,
+ const xfrm_address_t *saddr)
{
return ntohl(daddr->a6[2] ^ daddr->a6[3] ^
saddr->a6[2] ^ saddr->a6[3]);
}
-static inline unsigned int __xfrm_dst_hash(xfrm_address_t *daddr, xfrm_address_t *saddr,
+static inline unsigned int __xfrm_dst_hash(const xfrm_address_t *daddr,
+ const xfrm_address_t *saddr,
u32 reqid, unsigned short family,
unsigned int hmask)
{
@@ -42,8 +45,8 @@ static inline unsigned int __xfrm_dst_hash(xfrm_address_t *daddr, xfrm_address_t
return (h ^ (h >> 16)) & hmask;
}
-static inline unsigned __xfrm_src_hash(xfrm_address_t *daddr,
- xfrm_address_t *saddr,
+static inline unsigned __xfrm_src_hash(const xfrm_address_t *daddr,
+ const xfrm_address_t *saddr,
unsigned short family,
unsigned int hmask)
{
@@ -60,8 +63,8 @@ static inline unsigned __xfrm_src_hash(xfrm_address_t *daddr,
}
static inline unsigned int
-__xfrm_spi_hash(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family,
- unsigned int hmask)
+__xfrm_spi_hash(const xfrm_address_t *daddr, __be32 spi, u8 proto,
+ unsigned short family, unsigned int hmask)
{
unsigned int h = (__force u32)spi ^ proto;
switch (family) {
@@ -80,10 +83,11 @@ static inline unsigned int __idx_hash(u32 index, unsigned int hmask)
return (index ^ (index >> 8)) & hmask;
}
-static inline unsigned int __sel_hash(struct xfrm_selector *sel, unsigned short family, unsigned int hmask)
+static inline unsigned int __sel_hash(const struct xfrm_selector *sel,
+ unsigned short family, unsigned int hmask)
{
- xfrm_address_t *daddr = &sel->daddr;
- xfrm_address_t *saddr = &sel->saddr;
+ const xfrm_address_t *daddr = &sel->daddr;
+ const xfrm_address_t *saddr = &sel->saddr;
unsigned int h = 0;
switch (family) {
@@ -107,7 +111,9 @@ static inline unsigned int __sel_hash(struct xfrm_selector *sel, unsigned short
return h & hmask;
}
-static inline unsigned int __addr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr, unsigned short family, unsigned int hmask)
+static inline unsigned int __addr_hash(const xfrm_address_t *daddr,
+ const xfrm_address_t *saddr,
+ unsigned short family, unsigned int hmask)
{
unsigned int h = 0;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 6459588befc3..0248afa11cda 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -51,14 +51,14 @@ static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
static void xfrm_init_pmtu(struct dst_entry *dst);
static int stale_bundle(struct dst_entry *dst);
static int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *xdst,
- struct flowi *fl, int family, int strict);
+ const struct flowi *fl, int family);
static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
int dir);
static inline int
-__xfrm4_selector_match(struct xfrm_selector *sel, struct flowi *fl)
+__xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
{
return addr_match(&fl->fl4_dst, &sel->daddr, sel->prefixlen_d) &&
addr_match(&fl->fl4_src, &sel->saddr, sel->prefixlen_s) &&
@@ -69,7 +69,7 @@ __xfrm4_selector_match(struct xfrm_selector *sel, struct flowi *fl)
}
static inline int
-__xfrm6_selector_match(struct xfrm_selector *sel, struct flowi *fl)
+__xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
{
return addr_match(&fl->fl6_dst, &sel->daddr, sel->prefixlen_d) &&
addr_match(&fl->fl6_src, &sel->saddr, sel->prefixlen_s) &&
@@ -79,8 +79,8 @@ __xfrm6_selector_match(struct xfrm_selector *sel, struct flowi *fl)
(fl->oif == sel->ifindex || !sel->ifindex);
}
-int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
- unsigned short family)
+int xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
+ unsigned short family)
{
switch (family) {
case AF_INET:
@@ -92,8 +92,8 @@ int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
}
static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
- xfrm_address_t *saddr,
- xfrm_address_t *daddr,
+ const xfrm_address_t *saddr,
+ const xfrm_address_t *daddr,
int family)
{
struct xfrm_policy_afinfo *afinfo;
@@ -311,7 +311,9 @@ static inline unsigned int idx_hash(struct net *net, u32 index)
return __idx_hash(index, net->xfrm.policy_idx_hmask);
}
-static struct hlist_head *policy_hash_bysel(struct net *net, struct xfrm_selector *sel, unsigned short family, int dir)
+static struct hlist_head *policy_hash_bysel(struct net *net,
+ const struct xfrm_selector *sel,
+ unsigned short family, int dir)
{
unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
unsigned int hash = __sel_hash(sel, family, hmask);
@@ -321,7 +323,10 @@ static struct hlist_head *policy_hash_bysel(struct net *net, struct xfrm_selecto
net->xfrm.policy_bydst[dir].table + hash);
}
-static struct hlist_head *policy_hash_direct(struct net *net, xfrm_address_t *daddr, xfrm_address_t *saddr, unsigned short family, int dir)
+static struct hlist_head *policy_hash_direct(struct net *net,
+ const xfrm_address_t *daddr,
+ const xfrm_address_t *saddr,
+ unsigned short family, int dir)
{
unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
@@ -864,10 +869,11 @@ EXPORT_SYMBOL(xfrm_policy_walk_done);
*
* Returns 0 if policy found, else an -errno.
*/
-static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl,
+static int xfrm_policy_match(const struct xfrm_policy *pol,
+ const struct flowi *fl,
u8 type, u16 family, int dir)
{
- struct xfrm_selector *sel = &pol->selector;
+ const struct xfrm_selector *sel = &pol->selector;
int match, ret = -ESRCH;
if (pol->family != family ||
@@ -884,12 +890,12 @@ static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl,
}
static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
- struct flowi *fl,
+ const struct flowi *fl,
u16 family, u8 dir)
{
int err;
struct xfrm_policy *pol, *ret;
- xfrm_address_t *daddr, *saddr;
+ const xfrm_address_t *daddr, *saddr;
struct hlist_node *entry;
struct hlist_head *chain;
u32 priority = ~0U;
@@ -941,7 +947,7 @@ fail:
}
static struct xfrm_policy *
-__xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family, u8 dir)
+__xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir)
{
#ifdef CONFIG_XFRM_SUB_POLICY
struct xfrm_policy *pol;
@@ -954,7 +960,7 @@ __xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family, u8 dir)
}
static struct flow_cache_object *
-xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family,
+xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family,
u8 dir, struct flow_cache_object *old_obj, void *ctx)
{
struct xfrm_policy *pol;
@@ -990,7 +996,8 @@ static inline int policy_to_flow_dir(int dir)
}
}
-static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl)
+static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir,
+ const struct flowi *fl)
{
struct xfrm_policy *pol;
@@ -1098,7 +1105,7 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
return 0;
}
-static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
+static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
{
struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
@@ -1157,9 +1164,8 @@ xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote,
/* Resolve list of templates for the flow, given policy. */
static int
-xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl,
- struct xfrm_state **xfrm,
- unsigned short family)
+xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
+ struct xfrm_state **xfrm, unsigned short family)
{
struct net *net = xp_net(policy);
int nx;
@@ -1214,9 +1220,8 @@ fail:
}
static int
-xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, struct flowi *fl,
- struct xfrm_state **xfrm,
- unsigned short family)
+xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
+ struct xfrm_state **xfrm, unsigned short family)
{
struct xfrm_state *tp[XFRM_MAX_DEPTH];
struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
@@ -1256,7 +1261,7 @@ xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, struct flowi *fl,
* still valid.
*/
-static inline int xfrm_get_tos(struct flowi *fl, int family)
+static inline int xfrm_get_tos(const struct flowi *fl, int family)
{
struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
int tos;
@@ -1340,7 +1345,7 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
default:
BUG();
}
- xdst = dst_alloc(dst_ops);
+ xdst = dst_alloc(dst_ops, 0);
xfrm_policy_put_afinfo(afinfo);
if (likely(xdst))
@@ -1369,7 +1374,7 @@ static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
}
static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
- struct flowi *fl)
+ const struct flowi *fl)
{
struct xfrm_policy_afinfo *afinfo =
xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
@@ -1392,7 +1397,7 @@ static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
struct xfrm_state **xfrm, int nx,
- struct flowi *fl,
+ const struct flowi *fl,
struct dst_entry *dst)
{
struct net *net = xp_net(policy);
@@ -1508,7 +1513,7 @@ free_dst:
}
static int inline
-xfrm_dst_alloc_copy(void **target, void *src, int size)
+xfrm_dst_alloc_copy(void **target, const void *src, int size)
{
if (!*target) {
*target = kmalloc(size, GFP_ATOMIC);
@@ -1520,7 +1525,7 @@ xfrm_dst_alloc_copy(void **target, void *src, int size)
}
static int inline
-xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
+xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
{
#ifdef CONFIG_XFRM_SUB_POLICY
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
@@ -1532,7 +1537,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
}
static int inline
-xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
+xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
{
#ifdef CONFIG_XFRM_SUB_POLICY
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
@@ -1542,7 +1547,7 @@ xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
#endif
}
-static int xfrm_expand_policies(struct flowi *fl, u16 family,
+static int xfrm_expand_policies(const struct flowi *fl, u16 family,
struct xfrm_policy **pols,
int *num_pols, int *num_xfrms)
{
@@ -1588,7 +1593,7 @@ static int xfrm_expand_policies(struct flowi *fl, u16 family,
static struct xfrm_dst *
xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
- struct flowi *fl, u16 family,
+ const struct flowi *fl, u16 family,
struct dst_entry *dst_orig)
{
struct net *net = xp_net(pols[0]);
@@ -1631,7 +1636,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
}
static struct flow_cache_object *
-xfrm_bundle_lookup(struct net *net, struct flowi *fl, u16 family, u8 dir,
+xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
struct flow_cache_object *oldflo, void *ctx)
{
struct dst_entry *dst_orig = (struct dst_entry *)ctx;
@@ -1730,13 +1735,31 @@ error:
return ERR_PTR(err);
}
+static struct dst_entry *make_blackhole(struct net *net, u16 family,
+ struct dst_entry *dst_orig)
+{
+ struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
+ struct dst_entry *ret;
+
+ if (!afinfo) {
+ dst_release(dst_orig);
+ ret = ERR_PTR(-EINVAL);
+ } else {
+ ret = afinfo->blackhole_route(net, dst_orig);
+ }
+ xfrm_policy_put_afinfo(afinfo);
+
+ return ret;
+}
+
/* Main function: finds/creates a bundle for given flow.
*
* At the moment we eat a raw IP route. Mostly to speed up lookups
* on interfaces with disabled IPsec.
*/
-int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl,
- struct sock *sk, int flags)
+int xfrm_lookup(struct net *net, struct dst_entry **dst_p,
+ const struct flowi *fl,
+ struct sock *sk, int flags)
{
struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
struct flow_cache_object *flo;
@@ -1823,9 +1846,14 @@ restart:
dst_release(dst);
xfrm_pols_put(pols, drop_pols);
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
- return -EREMOTE;
+
+ dst = make_blackhole(net, family, dst_orig);
+ if (IS_ERR(dst))
+ return PTR_ERR(dst);
+ *dst_p = dst;
+ return 0;
}
- if (flags & XFRM_LOOKUP_WAIT) {
+ if (fl->flags & FLOWI_FLAG_CAN_SLEEP) {
DECLARE_WAITQUEUE(wait, current);
add_wait_queue(&net->xfrm.km_waitq, &wait);
@@ -1889,25 +1917,10 @@ dropdst:
xfrm_pols_put(pols, drop_pols);
return err;
}
-EXPORT_SYMBOL(__xfrm_lookup);
-
-int xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl,
- struct sock *sk, int flags)
-{
- int err = __xfrm_lookup(net, dst_p, fl, sk, flags);
-
- if (err == -EREMOTE) {
- dst_release(*dst_p);
- *dst_p = NULL;
- err = -EAGAIN;
- }
-
- return err;
-}
EXPORT_SYMBOL(xfrm_lookup);
static inline int
-xfrm_secpath_reject(int idx, struct sk_buff *skb, struct flowi *fl)
+xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
{
struct xfrm_state *x;
@@ -1926,7 +1939,7 @@ xfrm_secpath_reject(int idx, struct sk_buff *skb, struct flowi *fl)
*/
static inline int
-xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,
+xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
unsigned short family)
{
if (xfrm_state_kern(x))
@@ -1949,7 +1962,7 @@ xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,
* Otherwise "-2 - errored_index" is returned.
*/
static inline int
-xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start,
+xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
unsigned short family)
{
int idx = start;
@@ -1987,7 +2000,7 @@ int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
}
EXPORT_SYMBOL(__xfrm_decode_session);
-static inline int secpath_has_nontransport(struct sec_path *sp, int k, int *idxp)
+static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
{
for (; k < sp->len; k++) {
if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
@@ -2210,7 +2223,7 @@ static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
static int stale_bundle(struct dst_entry *dst)
{
- return !xfrm_bundle_ok(NULL, (struct xfrm_dst *)dst, NULL, AF_UNSPEC, 0);
+ return !xfrm_bundle_ok(NULL, (struct xfrm_dst *)dst, NULL, AF_UNSPEC);
}
void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
@@ -2283,7 +2296,7 @@ static void xfrm_init_pmtu(struct dst_entry *dst)
*/
static int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
- struct flowi *fl, int family, int strict)
+ const struct flowi *fl, int family)
{
struct dst_entry *dst = &first->u.dst;
struct xfrm_dst *last;
@@ -2320,11 +2333,6 @@ static int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
return 0;
- if (strict && fl &&
- !(dst->xfrm->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
- !xfrm_state_addr_flow_check(dst->xfrm, fl, family))
- return 0;
-
mtu = dst_mtu(dst->child);
if (xdst->child_mtu_cached != mtu) {
last = xdst;
@@ -2735,8 +2743,8 @@ EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
#endif
#ifdef CONFIG_XFRM_MIGRATE
-static int xfrm_migrate_selector_match(struct xfrm_selector *sel_cmp,
- struct xfrm_selector *sel_tgt)
+static int xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
+ const struct xfrm_selector *sel_tgt)
{
if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
if (sel_tgt->family == sel_cmp->family &&
@@ -2756,7 +2764,7 @@ static int xfrm_migrate_selector_match(struct xfrm_selector *sel_cmp,
return 0;
}
-static struct xfrm_policy * xfrm_migrate_policy_find(struct xfrm_selector *sel,
+static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector *sel,
u8 dir, u8 type)
{
struct xfrm_policy *pol, *ret = NULL;
@@ -2792,7 +2800,7 @@ static struct xfrm_policy * xfrm_migrate_policy_find(struct xfrm_selector *sel,
return ret;
}
-static int migrate_tmpl_match(struct xfrm_migrate *m, struct xfrm_tmpl *t)
+static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
{
int match = 0;
@@ -2862,7 +2870,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
return 0;
}
-static int xfrm_migrate_check(struct xfrm_migrate *m, int num_migrate)
+static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
{
int i, j;
@@ -2896,7 +2904,7 @@ static int xfrm_migrate_check(struct xfrm_migrate *m, int num_migrate)
return 0;
}
-int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
+int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
struct xfrm_migrate *m, int num_migrate,
struct xfrm_kmaddress *k)
{
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 220ebc05c7af..81221d9cbf06 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -50,8 +50,8 @@ static void xfrm_audit_state_replay(struct xfrm_state *x,
#endif /* CONFIG_AUDITSYSCALL */
static inline unsigned int xfrm_dst_hash(struct net *net,
- xfrm_address_t *daddr,
- xfrm_address_t *saddr,
+ const xfrm_address_t *daddr,
+ const xfrm_address_t *saddr,
u32 reqid,
unsigned short family)
{
@@ -59,15 +59,16 @@ static inline unsigned int xfrm_dst_hash(struct net *net,
}
static inline unsigned int xfrm_src_hash(struct net *net,
- xfrm_address_t *daddr,
- xfrm_address_t *saddr,
+ const xfrm_address_t *daddr,
+ const xfrm_address_t *saddr,
unsigned short family)
{
return __xfrm_src_hash(daddr, saddr, family, net->xfrm.state_hmask);
}
static inline unsigned int
-xfrm_spi_hash(struct net *net, xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
+xfrm_spi_hash(struct net *net, const xfrm_address_t *daddr,
+ __be32 spi, u8 proto, unsigned short family)
{
return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask);
}
@@ -656,9 +657,9 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
EXPORT_SYMBOL(xfrm_sad_getinfo);
static int
-xfrm_init_tempstate(struct xfrm_state *x, struct flowi *fl,
- struct xfrm_tmpl *tmpl,
- xfrm_address_t *daddr, xfrm_address_t *saddr,
+xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl,
+ const struct xfrm_tmpl *tmpl,
+ const xfrm_address_t *daddr, const xfrm_address_t *saddr,
unsigned short family)
{
struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
@@ -677,7 +678,10 @@ xfrm_init_tempstate(struct xfrm_state *x, struct flowi *fl,
return 0;
}
-static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark, xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
+static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
+ const xfrm_address_t *daddr,
+ __be32 spi, u8 proto,
+ unsigned short family)
{
unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
struct xfrm_state *x;
@@ -699,7 +703,10 @@ static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark, xfrm_ad
return NULL;
}
-static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark, xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
+static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark,
+ const xfrm_address_t *daddr,
+ const xfrm_address_t *saddr,
+ u8 proto, unsigned short family)
{
unsigned int h = xfrm_src_hash(net, daddr, saddr, family);
struct xfrm_state *x;
@@ -746,8 +753,7 @@ static void xfrm_hash_grow_check(struct net *net, int have_hash_collision)
}
static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
- struct flowi *fl, unsigned short family,
- xfrm_address_t *daddr, xfrm_address_t *saddr,
+ const struct flowi *fl, unsigned short family,
struct xfrm_state **best, int *acq_in_progress,
int *error)
{
@@ -784,8 +790,8 @@ static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
}
struct xfrm_state *
-xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
- struct flowi *fl, struct xfrm_tmpl *tmpl,
+xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
+ const struct flowi *fl, struct xfrm_tmpl *tmpl,
struct xfrm_policy *pol, int *err,
unsigned short family)
{
@@ -813,7 +819,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
tmpl->mode == x->props.mode &&
tmpl->id.proto == x->id.proto &&
(tmpl->id.spi == x->id.spi || !tmpl->id.spi))
- xfrm_state_look_at(pol, x, fl, encap_family, daddr, saddr,
+ xfrm_state_look_at(pol, x, fl, encap_family,
&best, &acquire_in_progress, &error);
}
if (best)
@@ -829,7 +835,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
tmpl->mode == x->props.mode &&
tmpl->id.proto == x->id.proto &&
(tmpl->id.spi == x->id.spi || !tmpl->id.spi))
- xfrm_state_look_at(pol, x, fl, encap_family, daddr, saddr,
+ xfrm_state_look_at(pol, x, fl, encap_family,
&best, &acquire_in_progress, &error);
}
@@ -991,7 +997,11 @@ void xfrm_state_insert(struct xfrm_state *x)
EXPORT_SYMBOL(xfrm_state_insert);
/* xfrm_state_lock is held */
-static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m, unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
+static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m,
+ unsigned short family, u8 mode,
+ u32 reqid, u8 proto,
+ const xfrm_address_t *daddr,
+ const xfrm_address_t *saddr, int create)
{
unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
struct hlist_node *entry;
@@ -1369,7 +1379,7 @@ int xfrm_state_check_expire(struct xfrm_state *x)
EXPORT_SYMBOL(xfrm_state_check_expire);
struct xfrm_state *
-xfrm_state_lookup(struct net *net, u32 mark, xfrm_address_t *daddr, __be32 spi,
+xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi,
u8 proto, unsigned short family)
{
struct xfrm_state *x;
@@ -1383,7 +1393,7 @@ EXPORT_SYMBOL(xfrm_state_lookup);
struct xfrm_state *
xfrm_state_lookup_byaddr(struct net *net, u32 mark,
- xfrm_address_t *daddr, xfrm_address_t *saddr,
+ const xfrm_address_t *daddr, const xfrm_address_t *saddr,
u8 proto, unsigned short family)
{
struct xfrm_state *x;
@@ -1397,7 +1407,7 @@ EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
struct xfrm_state *
xfrm_find_acq(struct net *net, struct xfrm_mark *mark, u8 mode, u32 reqid, u8 proto,
- xfrm_address_t *daddr, xfrm_address_t *saddr,
+ const xfrm_address_t *daddr, const xfrm_address_t *saddr,
int create, unsigned short family)
{
struct xfrm_state *x;
@@ -1727,7 +1737,7 @@ void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
static LIST_HEAD(xfrm_km_list);
static DEFINE_RWLOCK(xfrm_km_lock);
-void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
+void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
{
struct xfrm_mgr *km;
@@ -1738,7 +1748,7 @@ void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
read_unlock(&xfrm_km_lock);
}
-void km_state_notify(struct xfrm_state *x, struct km_event *c)
+void km_state_notify(struct xfrm_state *x, const struct km_event *c)
{
struct xfrm_mgr *km;
read_lock(&xfrm_km_lock);
@@ -1819,9 +1829,9 @@ void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
EXPORT_SYMBOL(km_policy_expired);
#ifdef CONFIG_XFRM_MIGRATE
-int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
- struct xfrm_migrate *m, int num_migrate,
- struct xfrm_kmaddress *k)
+int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+ const struct xfrm_migrate *m, int num_migrate,
+ const struct xfrm_kmaddress *k)
{
int err = -EINVAL;
int ret;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 61291965c5f6..673698d380d7 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -234,7 +234,7 @@ out:
}
static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
- struct xfrm_algo_desc *(*get_byname)(char *, int),
+ struct xfrm_algo_desc *(*get_byname)(const char *, int),
struct nlattr *rta)
{
struct xfrm_algo *p, *ualg;
@@ -1582,7 +1582,7 @@ static inline size_t xfrm_aevent_msgsize(void)
+ nla_total_size(4); /* XFRM_AE_ETHR */
}
-static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c)
+static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
{
struct xfrm_aevent_id *id;
struct nlmsghdr *nlh;
@@ -1986,7 +1986,7 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
#endif
#ifdef CONFIG_XFRM_MIGRATE
-static int copy_to_user_migrate(struct xfrm_migrate *m, struct sk_buff *skb)
+static int copy_to_user_migrate(const struct xfrm_migrate *m, struct sk_buff *skb)
{
struct xfrm_user_migrate um;
@@ -2004,7 +2004,7 @@ static int copy_to_user_migrate(struct xfrm_migrate *m, struct sk_buff *skb)
return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um);
}
-static int copy_to_user_kmaddress(struct xfrm_kmaddress *k, struct sk_buff *skb)
+static int copy_to_user_kmaddress(const struct xfrm_kmaddress *k, struct sk_buff *skb)
{
struct xfrm_user_kmaddress uk;
@@ -2025,11 +2025,11 @@ static inline size_t xfrm_migrate_msgsize(int num_migrate, int with_kma)
+ userpolicy_type_attrsize();
}
-static int build_migrate(struct sk_buff *skb, struct xfrm_migrate *m,
- int num_migrate, struct xfrm_kmaddress *k,
- struct xfrm_selector *sel, u8 dir, u8 type)
+static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m,
+ int num_migrate, const struct xfrm_kmaddress *k,
+ const struct xfrm_selector *sel, u8 dir, u8 type)
{
- struct xfrm_migrate *mp;
+ const struct xfrm_migrate *mp;
struct xfrm_userpolicy_id *pol_id;
struct nlmsghdr *nlh;
int i;
@@ -2061,9 +2061,9 @@ nlmsg_failure:
return -EMSGSIZE;
}
-static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
- struct xfrm_migrate *m, int num_migrate,
- struct xfrm_kmaddress *k)
+static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+ const struct xfrm_migrate *m, int num_migrate,
+ const struct xfrm_kmaddress *k)
{
struct net *net = &init_net;
struct sk_buff *skb;
@@ -2079,9 +2079,9 @@ static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MIGRATE, GFP_ATOMIC);
}
#else
-static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
- struct xfrm_migrate *m, int num_migrate,
- struct xfrm_kmaddress *k)
+static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+ const struct xfrm_migrate *m, int num_migrate,
+ const struct xfrm_kmaddress *k)
{
return -ENOPROTOOPT;
}
@@ -2220,7 +2220,7 @@ static inline size_t xfrm_expire_msgsize(void)
+ nla_total_size(sizeof(struct xfrm_mark));
}
-static int build_expire(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c)
+static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
{
struct xfrm_user_expire *ue;
struct nlmsghdr *nlh;
@@ -2242,7 +2242,7 @@ nla_put_failure:
return -EMSGSIZE;
}
-static int xfrm_exp_state_notify(struct xfrm_state *x, struct km_event *c)
+static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c)
{
struct net *net = xs_net(x);
struct sk_buff *skb;
@@ -2259,7 +2259,7 @@ static int xfrm_exp_state_notify(struct xfrm_state *x, struct km_event *c)
return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
}
-static int xfrm_aevent_state_notify(struct xfrm_state *x, struct km_event *c)
+static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c)
{
struct net *net = xs_net(x);
struct sk_buff *skb;
@@ -2274,7 +2274,7 @@ static int xfrm_aevent_state_notify(struct xfrm_state *x, struct km_event *c)
return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_AEVENTS, GFP_ATOMIC);
}
-static int xfrm_notify_sa_flush(struct km_event *c)
+static int xfrm_notify_sa_flush(const struct km_event *c)
{
struct net *net = c->net;
struct xfrm_usersa_flush *p;
@@ -2330,7 +2330,7 @@ static inline size_t xfrm_sa_len(struct xfrm_state *x)
return l;
}
-static int xfrm_notify_sa(struct xfrm_state *x, struct km_event *c)
+static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
{
struct net *net = xs_net(x);
struct xfrm_usersa_info *p;
@@ -2387,7 +2387,7 @@ nla_put_failure:
return -1;
}
-static int xfrm_send_state_notify(struct xfrm_state *x, struct km_event *c)
+static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c)
{
switch (c->event) {
@@ -2546,7 +2546,7 @@ static inline size_t xfrm_polexpire_msgsize(struct xfrm_policy *xp)
}
static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
- int dir, struct km_event *c)
+ int dir, const struct km_event *c)
{
struct xfrm_user_polexpire *upe;
struct nlmsghdr *nlh;
@@ -2576,7 +2576,7 @@ nlmsg_failure:
return -EMSGSIZE;
}
-static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
+static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
{
struct net *net = xp_net(xp);
struct sk_buff *skb;
@@ -2591,7 +2591,7 @@ static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, struct km_eve
return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
}
-static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c)
+static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c)
{
struct net *net = xp_net(xp);
struct xfrm_userpolicy_info *p;
@@ -2656,7 +2656,7 @@ nlmsg_failure:
return -1;
}
-static int xfrm_notify_policy_flush(struct km_event *c)
+static int xfrm_notify_policy_flush(const struct km_event *c)
{
struct net *net = c->net;
struct nlmsghdr *nlh;
@@ -2681,7 +2681,7 @@ nlmsg_failure:
return -1;
}
-static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
+static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
{
switch (c->event) {
diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
index c9a16abacab4..6c94c6ce2925 100644
--- a/scripts/basic/fixdep.c
+++ b/scripts/basic/fixdep.c
@@ -315,6 +315,7 @@ static void parse_dep_file(void *map, size_t len)
char *end = m + len;
char *p;
char s[PATH_MAX];
+ int first;
p = strchr(m, ':');
if (!p) {
@@ -327,6 +328,7 @@ static void parse_dep_file(void *map, size_t len)
clear_config();
+ first = 1;
while (m < end) {
while (m < end && (*m == ' ' || *m == '\\' || *m == '\n'))
m++;
@@ -340,9 +342,17 @@ static void parse_dep_file(void *map, size_t len)
if (strrcmp(s, "include/generated/autoconf.h") &&
strrcmp(s, "arch/um/include/uml-config.h") &&
strrcmp(s, ".ver")) {
- printf(" %s \\\n", s);
+ /*
+ * Do not output the first dependency (the
+ * source file), so that kbuild is not confused
+ * if a .c file is rewritten into .S or vice
+ * versa.
+ */
+ if (!first)
+ printf(" %s \\\n", s);
do_config_file(s);
}
+ first = 0;
m = p + 1;
}
printf("\n%s: $(deps_%s)\n\n", target, target);
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 4c0383da1c9a..58848e3e392c 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2654,11 +2654,6 @@ sub process {
WARN("Use of volatile is usually wrong: see Documentation/volatile-considered-harmful.txt\n" . $herecurr);
}
-# SPIN_LOCK_UNLOCKED & RW_LOCK_UNLOCKED are deprecated
- if ($line =~ /\b(SPIN_LOCK_UNLOCKED|RW_LOCK_UNLOCKED)/) {
- ERROR("Use of $1 is deprecated: see Documentation/spinlocks.txt\n" . $herecurr);
- }
-
# warn about #if 0
if ($line =~ /^.\s*\#\s*if\s+0\b/) {
CHK("if this code is redundant consider removing it\n" .
diff --git a/scripts/extract-ikconfig b/scripts/extract-ikconfig
index 1512c0a755ac..e1862429ccda 100755
--- a/scripts/extract-ikconfig
+++ b/scripts/extract-ikconfig
@@ -56,10 +56,11 @@ trap "rm -f $tmp1 $tmp2" 0
dump_config "$img"
# That didn't work, so retry after decompression.
-try_decompress '\037\213\010' xy gunzip
-try_decompress 'BZh' xy bunzip2
-try_decompress '\135\0\0\0' xxx unlzma
-try_decompress '\211\114\132' xy 'lzop -d'
+try_decompress '\037\213\010' xy gunzip
+try_decompress '\3757zXZ\000' abcde unxz
+try_decompress 'BZh' xy bunzip2
+try_decompress '\135\0\0\0' xxx unlzma
+try_decompress '\211\114\132' xy 'lzop -d'
# Bail out:
echo "$me: Cannot find kernel config." >&2
diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
index fd81fc33d633..a4fe923c0131 100644
--- a/scripts/kconfig/streamline_config.pl
+++ b/scripts/kconfig/streamline_config.pl
@@ -1,6 +1,6 @@
#!/usr/bin/perl -w
#
-# Copywrite 2005-2009 - Steven Rostedt
+# Copyright 2005-2009 - Steven Rostedt
# Licensed under the terms of the GNU GPL License version 2
#
# It's simple enough to figure out how this works.
diff --git a/scripts/rt-tester/rt-tester.py b/scripts/rt-tester/rt-tester.py
index 44423b4dcb82..8c81d76959ee 100644
--- a/scripts/rt-tester/rt-tester.py
+++ b/scripts/rt-tester/rt-tester.py
@@ -33,8 +33,6 @@ cmd_opcodes = {
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
- "lockbkl" : "9",
- "unlockbkl" : "10",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
diff --git a/scripts/rt-tester/t2-l1-2rt-sameprio.tst b/scripts/rt-tester/t2-l1-2rt-sameprio.tst
index 8821f27cc8be..3710c8b2090d 100644
--- a/scripts/rt-tester/t2-l1-2rt-sameprio.tst
+++ b/scripts/rt-tester/t2-l1-2rt-sameprio.tst
@@ -19,8 +19,6 @@
# lockintnowait lock nr (0-7)
# lockcont lock nr (0-7)
# unlock lock nr (0-7)
-# lockbkl lock nr (0-7)
-# unlockbkl lock nr (0-7)
# signal 0
# reset 0
# resetevent 0
@@ -39,9 +37,6 @@
# blocked lock nr (0-7)
# blockedwake lock nr (0-7)
# unlocked lock nr (0-7)
-# lockedbkl dont care
-# blockedbkl dont care
-# unlockedbkl dont care
# opcodeeq command opcode or number
# opcodelt number
# opcodegt number
diff --git a/scripts/rt-tester/t2-l1-pi.tst b/scripts/rt-tester/t2-l1-pi.tst
index cde1f189a02b..b4cc95975adb 100644
--- a/scripts/rt-tester/t2-l1-pi.tst
+++ b/scripts/rt-tester/t2-l1-pi.tst
@@ -19,8 +19,6 @@
# lockintnowait lock nr (0-7)
# lockcont lock nr (0-7)
# unlock lock nr (0-7)
-# lockbkl lock nr (0-7)
-# unlockbkl lock nr (0-7)
# signal 0
# reset 0
# resetevent 0
@@ -39,9 +37,6 @@
# blocked lock nr (0-7)
# blockedwake lock nr (0-7)
# unlocked lock nr (0-7)
-# lockedbkl dont care
-# blockedbkl dont care
-# unlockedbkl dont care
# opcodeeq command opcode or number
# opcodelt number
# opcodegt number
diff --git a/scripts/rt-tester/t2-l1-signal.tst b/scripts/rt-tester/t2-l1-signal.tst
index 3ab0bfc49950..1b57376cc1f7 100644
--- a/scripts/rt-tester/t2-l1-signal.tst
+++ b/scripts/rt-tester/t2-l1-signal.tst
@@ -19,8 +19,6 @@
# lockintnowait lock nr (0-7)
# lockcont lock nr (0-7)
# unlock lock nr (0-7)
-# lockbkl lock nr (0-7)
-# unlockbkl lock nr (0-7)
# signal 0
# reset 0
# resetevent 0
@@ -39,9 +37,6 @@
# blocked lock nr (0-7)
# blockedwake lock nr (0-7)
# unlocked lock nr (0-7)
-# lockedbkl dont care
-# blockedbkl dont care
-# unlockedbkl dont care
# opcodeeq command opcode or number
# opcodelt number
# opcodegt number
diff --git a/scripts/rt-tester/t2-l2-2rt-deadlock.tst b/scripts/rt-tester/t2-l2-2rt-deadlock.tst
index f4b5d5d6215f..68b10629b6f4 100644
--- a/scripts/rt-tester/t2-l2-2rt-deadlock.tst
+++ b/scripts/rt-tester/t2-l2-2rt-deadlock.tst
@@ -19,8 +19,6 @@
# lockintnowait lock nr (0-7)
# lockcont lock nr (0-7)
# unlock lock nr (0-7)
-# lockbkl lock nr (0-7)
-# unlockbkl lock nr (0-7)
# signal 0
# reset 0
# resetevent 0
@@ -39,9 +37,6 @@
# blocked lock nr (0-7)
# blockedwake lock nr (0-7)
# unlocked lock nr (0-7)
-# lockedbkl dont care
-# blockedbkl dont care
-# unlockedbkl dont care
# opcodeeq command opcode or number
# opcodelt number
# opcodegt number
diff --git a/scripts/rt-tester/t3-l1-pi-1rt.tst b/scripts/rt-tester/t3-l1-pi-1rt.tst
index 63440ca2cce9..8e6c8b11ae56 100644
--- a/scripts/rt-tester/t3-l1-pi-1rt.tst
+++ b/scripts/rt-tester/t3-l1-pi-1rt.tst
@@ -19,8 +19,6 @@
# lockintnowait lock nr (0-7)
# lockcont lock nr (0-7)
# unlock lock nr (0-7)
-# lockbkl lock nr (0-7)
-# unlockbkl lock nr (0-7)
# signal thread to signal (0-7)
# reset 0
# resetevent 0
@@ -39,9 +37,6 @@
# blocked lock nr (0-7)
# blockedwake lock nr (0-7)
# unlocked lock nr (0-7)
-# lockedbkl dont care
-# blockedbkl dont care
-# unlockedbkl dont care
# opcodeeq command opcode or number
# opcodelt number
# opcodegt number
diff --git a/scripts/rt-tester/t3-l1-pi-2rt.tst b/scripts/rt-tester/t3-l1-pi-2rt.tst
index e5816fe67df3..69c2212fc520 100644
--- a/scripts/rt-tester/t3-l1-pi-2rt.tst
+++ b/scripts/rt-tester/t3-l1-pi-2rt.tst
@@ -19,8 +19,6 @@
# lockintnowait lock nr (0-7)
# lockcont lock nr (0-7)
# unlock lock nr (0-7)
-# lockbkl lock nr (0-7)
-# unlockbkl lock nr (0-7)
# signal thread to signal (0-7)
# reset 0
# resetevent 0
@@ -39,9 +37,6 @@
# blocked lock nr (0-7)
# blockedwake lock nr (0-7)
# unlocked lock nr (0-7)
-# lockedbkl dont care
-# blockedbkl dont care
-# unlockedbkl dont care
# opcodeeq command opcode or number
# opcodelt number
# opcodegt number
diff --git a/scripts/rt-tester/t3-l1-pi-3rt.tst b/scripts/rt-tester/t3-l1-pi-3rt.tst
index 718b82b5d3bb..9b0f1eb26a88 100644
--- a/scripts/rt-tester/t3-l1-pi-3rt.tst
+++ b/scripts/rt-tester/t3-l1-pi-3rt.tst
@@ -19,8 +19,6 @@
# lockintnowait lock nr (0-7)
# lockcont lock nr (0-7)
# unlock lock nr (0-7)
-# lockbkl lock nr (0-7)
-# unlockbkl lock nr (0-7)
# signal thread to signal (0-7)
# reset 0
# resetevent 0
@@ -39,9 +37,6 @@
# blocked lock nr (0-7)
# blockedwake lock nr (0-7)
# unlocked lock nr (0-7)
-# lockedbkl dont care
-# blockedbkl dont care
-# unlockedbkl dont care
# opcodeeq command opcode or number
# opcodelt number
# opcodegt number
diff --git a/scripts/rt-tester/t3-l1-pi-signal.tst b/scripts/rt-tester/t3-l1-pi-signal.tst
index c6e213563498..39ec74ab06ee 100644
--- a/scripts/rt-tester/t3-l1-pi-signal.tst
+++ b/scripts/rt-tester/t3-l1-pi-signal.tst
@@ -19,8 +19,6 @@
# lockintnowait lock nr (0-7)
# lockcont lock nr (0-7)
# unlock lock nr (0-7)
-# lockbkl lock nr (0-7)
-# unlockbkl lock nr (0-7)
# signal thread to signal (0-7)
# reset 0
# resetevent 0
@@ -39,9 +37,6 @@
# blocked lock nr (0-7)
# blockedwake lock nr (0-7)
# unlocked lock nr (0-7)
-# lockedbkl dont care
-# blockedbkl dont care
-# unlockedbkl dont care
# opcodeeq command opcode or number
# opcodelt number
# opcodegt number
diff --git a/scripts/rt-tester/t3-l1-pi-steal.tst b/scripts/rt-tester/t3-l1-pi-steal.tst
index f53749d59d79..e03db7e010fa 100644
--- a/scripts/rt-tester/t3-l1-pi-steal.tst
+++ b/scripts/rt-tester/t3-l1-pi-steal.tst
@@ -19,8 +19,6 @@
# lockintnowait lock nr (0-7)
# lockcont lock nr (0-7)
# unlock lock nr (0-7)
-# lockbkl lock nr (0-7)
-# unlockbkl lock nr (0-7)
# signal thread to signal (0-7)
# reset 0
# resetevent 0
@@ -39,9 +37,6 @@
# blocked lock nr (0-7)
# blockedwake lock nr (0-7)
# unlocked lock nr (0-7)
-# lockedbkl dont care
-# blockedbkl dont care
-# unlockedbkl dont care
# opcodeeq command opcode or number
# opcodelt number
# opcodegt number
diff --git a/scripts/rt-tester/t3-l2-pi.tst b/scripts/rt-tester/t3-l2-pi.tst
index cdc3e4fd7bac..7b59100d3e48 100644
--- a/scripts/rt-tester/t3-l2-pi.tst
+++ b/scripts/rt-tester/t3-l2-pi.tst
@@ -19,8 +19,6 @@
# lockintnowait lock nr (0-7)
# lockcont lock nr (0-7)
# unlock lock nr (0-7)
-# lockbkl lock nr (0-7)
-# unlockbkl lock nr (0-7)
# signal thread to signal (0-7)
# reset 0
# resetevent 0
@@ -39,9 +37,6 @@
# blocked lock nr (0-7)
# blockedwake lock nr (0-7)
# unlocked lock nr (0-7)
-# lockedbkl dont care
-# blockedbkl dont care
-# unlockedbkl dont care
# opcodeeq command opcode or number
# opcodelt number
# opcodegt number
diff --git a/scripts/rt-tester/t4-l2-pi-deboost.tst b/scripts/rt-tester/t4-l2-pi-deboost.tst
index baa14137f473..2f0e049d6443 100644
--- a/scripts/rt-tester/t4-l2-pi-deboost.tst
+++ b/scripts/rt-tester/t4-l2-pi-deboost.tst
@@ -19,8 +19,6 @@
# lockintnowait lock nr (0-7)
# lockcont lock nr (0-7)
# unlock lock nr (0-7)
-# lockbkl lock nr (0-7)
-# unlockbkl lock nr (0-7)
# signal thread to signal (0-7)
# reset 0
# resetevent 0
@@ -39,9 +37,6 @@
# blocked lock nr (0-7)
# blockedwake lock nr (0-7)
# unlocked lock nr (0-7)
-# lockedbkl dont care
-# blockedbkl dont care
-# unlockedbkl dont care
# opcodeeq command opcode or number
# opcodelt number
# opcodegt number
diff --git a/scripts/rt-tester/t5-l4-pi-boost-deboost-setsched.tst b/scripts/rt-tester/t5-l4-pi-boost-deboost-setsched.tst
index e6ec0c81b54d..04f4034ff895 100644
--- a/scripts/rt-tester/t5-l4-pi-boost-deboost-setsched.tst
+++ b/scripts/rt-tester/t5-l4-pi-boost-deboost-setsched.tst
@@ -19,8 +19,6 @@
# lockintnowait lock nr (0-7)
# lockcont lock nr (0-7)
# unlock lock nr (0-7)
-# lockbkl lock nr (0-7)
-# unlockbkl lock nr (0-7)
# signal thread to signal (0-7)
# reset 0
# resetevent 0
@@ -39,9 +37,6 @@
# blocked lock nr (0-7)
# blockedwake lock nr (0-7)
# unlocked lock nr (0-7)
-# lockedbkl dont care
-# blockedbkl dont care
-# unlockedbkl dont care
# opcodeeq command opcode or number
# opcodelt number
# opcodegt number
diff --git a/scripts/rt-tester/t5-l4-pi-boost-deboost.tst b/scripts/rt-tester/t5-l4-pi-boost-deboost.tst
index ca64f8bbf4bc..a48a6ee29ddc 100644
--- a/scripts/rt-tester/t5-l4-pi-boost-deboost.tst
+++ b/scripts/rt-tester/t5-l4-pi-boost-deboost.tst
@@ -19,8 +19,6 @@
# lockintnowait lock nr (0-7)
# lockcont lock nr (0-7)
# unlock lock nr (0-7)
-# lockbkl lock nr (0-7)
-# unlockbkl lock nr (0-7)
# signal thread to signal (0-7)
# reset 0
# resetevent 0
@@ -39,9 +37,6 @@
# blocked lock nr (0-7)
# blockedwake lock nr (0-7)
# unlocked lock nr (0-7)
-# lockedbkl dont care
-# blockedbkl dont care
-# unlockedbkl dont care
# opcodeeq command opcode or number
# opcodelt number
# opcodegt number
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index ef8729f48586..4d403844e137 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -86,12 +86,16 @@ scm_version()
# Check for mercurial and a mercurial repo.
if test -d .hg && hgid=`hg id 2>/dev/null`; then
- tag=`printf '%s' "$hgid" | cut -s -d' ' -f2`
-
- # Do we have an untagged version?
- if [ -z "$tag" -o "$tag" = tip ]; then
- id=`printf '%s' "$hgid" | sed 's/[+ ].*//'`
+ # Do we have an tagged version? If so, latesttagdistance == 1
+ if [ "`hg log -r . --template '{latesttagdistance}'`" == "1" ]; then
+ id=`hg log -r . --template '{latesttag}'`
printf '%s%s' -hg "$id"
+ else
+ tag=`printf '%s' "$hgid" | cut -d' ' -f2`
+ if [ -z "$tag" -o "$tag" = tip ]; then
+ id=`printf '%s' "$hgid" | sed 's/[+ ].*//'`
+ printf '%s%s' -hg "$id"
+ fi
fi
# Are there uncommitted changes?
diff --git a/scripts/tags.sh b/scripts/tags.sh
index 92fdc4546141..bd6185d529cf 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -114,6 +114,11 @@ docscope()
cscope -b -f cscope.out
}
+dogtags()
+{
+ all_sources | gtags -f -
+}
+
exuberant()
{
all_sources | xargs $1 -a \
@@ -187,6 +192,10 @@ case "$1" in
docscope
;;
+ "gtags")
+ dogtags
+ ;;
+
"tags")
rm -f tags
xtags ctags
diff --git a/scripts/unifdef.c b/scripts/unifdef.c
index 44d39785e50d..7493c0ee51cc 100644
--- a/scripts/unifdef.c
+++ b/scripts/unifdef.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002 - 2009 Tony Finch <dot@dotat.at>
+ * Copyright (c) 2002 - 2011 Tony Finch <dot@dotat.at>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -24,23 +24,14 @@
*/
/*
+ * unifdef - remove ifdef'ed lines
+ *
* This code was derived from software contributed to Berkeley by Dave Yost.
* It was rewritten to support ANSI C by Tony Finch. The original version
* of unifdef carried the 4-clause BSD copyright licence. None of its code
* remains in this version (though some of the names remain) so it now
* carries a more liberal licence.
*
- * The latest version is available from http://dotat.at/prog/unifdef
- */
-
-static const char * const copyright[] = {
- "@(#) Copyright (c) 2002 - 2009 Tony Finch <dot@dotat.at>\n",
- "$dotat: unifdef/unifdef.c,v 1.190 2009/11/27 17:21:26 fanf2 Exp $",
-};
-
-/*
- * unifdef - remove ifdef'ed lines
- *
* Wishlist:
* provide an option which will append the name of the
* appropriate symbol after #else's and #endif's
@@ -48,12 +39,16 @@ static const char * const copyright[] = {
* #else's and #endif's to see that they match their
* corresponding #ifdef or #ifndef
*
- * The first two items above require better buffer handling, which would
- * also make it possible to handle all "dodgy" directives correctly.
+ * These require better buffer handling, which would also make
+ * it possible to handle all "dodgy" directives correctly.
*/
+#include <sys/types.h>
+#include <sys/stat.h>
+
#include <ctype.h>
#include <err.h>
+#include <errno.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdio.h>
@@ -61,6 +56,12 @@ static const char * const copyright[] = {
#include <string.h>
#include <unistd.h>
+const char copyright[] =
+ "@(#) $Version: unifdef-2.5 $\n"
+ "@(#) $Author: Tony Finch (dot@dotat.at) $\n"
+ "@(#) $URL: http://dotat.at/prog/unifdef $\n"
+;
+
/* types of input lines: */
typedef enum {
LT_TRUEI, /* a true #if with ignore flag */
@@ -153,6 +154,11 @@ static char const * const linestate_name[] = {
#define EDITSLOP 10
/*
+ * For temporary filenames
+ */
+#define TEMPLATE "unifdef.XXXXXX"
+
+/*
* Globals.
*/
@@ -165,6 +171,7 @@ static bool strictlogic; /* -K: keep ambiguous #ifs */
static bool killconsts; /* -k: eval constant #ifs */
static bool lnnum; /* -n: add #line directives */
static bool symlist; /* -s: output symbol list */
+static bool symdepth; /* -S: output symbol depth */
static bool text; /* -t: this is a text file */
static const char *symname[MAXSYMS]; /* symbol name */
@@ -175,10 +182,18 @@ static int nsyms; /* number of symbols */
static FILE *input; /* input file pointer */
static const char *filename; /* input file name */
static int linenum; /* current line number */
+static FILE *output; /* output file pointer */
+static const char *ofilename; /* output file name */
+static bool overwriting; /* output overwrites input */
+static char tempname[FILENAME_MAX]; /* used when overwriting */
static char tline[MAXLINE+EDITSLOP];/* input buffer plus space */
static char *keyword; /* used for editing #elif's */
+static const char *newline; /* input file format */
+static const char newline_unix[] = "\n";
+static const char newline_crlf[] = "\r\n";
+
static Comment_state incomment; /* comment parser state */
static Line_state linestate; /* #if line parser state */
static Ifstate ifstate[MAXDEPTH]; /* #if processor state */
@@ -189,10 +204,13 @@ static int delcount; /* count of deleted lines */
static unsigned blankcount; /* count of blank lines */
static unsigned blankmax; /* maximum recent blankcount */
static bool constexpr; /* constant #if expression */
+static bool zerosyms = true; /* to format symdepth output */
+static bool firstsym; /* ditto */
static int exitstat; /* program exit status */
static void addsym(bool, bool, char *);
+static void closeout(void);
static void debug(const char *, ...);
static void done(void);
static void error(const char *);
@@ -212,6 +230,7 @@ static void state(Ifstate);
static int strlcmp(const char *, const char *, size_t);
static void unnest(void);
static void usage(void);
+static void version(void);
#define endsym(c) (!isalnum((unsigned char)c) && c != '_')
@@ -223,7 +242,7 @@ main(int argc, char *argv[])
{
int opt;
- while ((opt = getopt(argc, argv, "i:D:U:I:BbcdeKklnst")) != -1)
+ while ((opt = getopt(argc, argv, "i:D:U:I:o:bBcdeKklnsStV")) != -1)
switch (opt) {
case 'i': /* treat stuff controlled by these symbols as text */
/*
@@ -245,16 +264,15 @@ main(int argc, char *argv[])
case 'U': /* undef a symbol */
addsym(false, false, optarg);
break;
- case 'I':
- /* no-op for compatibility with cpp */
- break;
- case 'B': /* compress blank lines around removed section */
- compblank = true;
+ case 'I': /* no-op for compatibility with cpp */
break;
case 'b': /* blank deleted lines instead of omitting them */
case 'l': /* backwards compatibility */
lnblank = true;
break;
+ case 'B': /* compress blank lines around removed section */
+ compblank = true;
+ break;
case 'c': /* treat -D as -U and vice versa */
complement = true;
break;
@@ -273,12 +291,20 @@ main(int argc, char *argv[])
case 'n': /* add #line directive after deleted lines */
lnnum = true;
break;
+ case 'o': /* output to a file */
+ ofilename = optarg;
+ break;
case 's': /* only output list of symbols that control #ifs */
symlist = true;
break;
+ case 'S': /* list symbols with their nesting depth */
+ symlist = symdepth = true;
+ break;
case 't': /* don't parse C comments */
text = true;
break;
+ case 'V': /* print version */
+ version();
default:
usage();
}
@@ -290,21 +316,68 @@ main(int argc, char *argv[])
errx(2, "can only do one file");
} else if (argc == 1 && strcmp(*argv, "-") != 0) {
filename = *argv;
- input = fopen(filename, "r");
+ input = fopen(filename, "rb");
if (input == NULL)
err(2, "can't open %s", filename);
} else {
filename = "[stdin]";
input = stdin;
}
+ if (ofilename == NULL) {
+ ofilename = "[stdout]";
+ output = stdout;
+ } else {
+ struct stat ist, ost;
+ if (stat(ofilename, &ost) == 0 &&
+ fstat(fileno(input), &ist) == 0)
+ overwriting = (ist.st_dev == ost.st_dev
+ && ist.st_ino == ost.st_ino);
+ if (overwriting) {
+ const char *dirsep;
+ int ofd;
+
+ dirsep = strrchr(ofilename, '/');
+ if (dirsep != NULL)
+ snprintf(tempname, sizeof(tempname),
+ "%.*s/" TEMPLATE,
+ (int)(dirsep - ofilename), ofilename);
+ else
+ snprintf(tempname, sizeof(tempname),
+ TEMPLATE);
+ ofd = mkstemp(tempname);
+ if (ofd != -1)
+ output = fdopen(ofd, "wb+");
+ if (output == NULL)
+ err(2, "can't create temporary file");
+ fchmod(ofd, ist.st_mode & (S_IRWXU|S_IRWXG|S_IRWXO));
+ } else {
+ output = fopen(ofilename, "wb");
+ if (output == NULL)
+ err(2, "can't open %s", ofilename);
+ }
+ }
process();
abort(); /* bug */
}
static void
+version(void)
+{
+ const char *c = copyright;
+ for (;;) {
+ while (*++c != '$')
+ if (*c == '\0')
+ exit(0);
+ while (*++c != '$')
+ putc(*c, stderr);
+ putc('\n', stderr);
+ }
+}
+
+static void
usage(void)
{
- fprintf(stderr, "usage: unifdef [-BbcdeKknst] [-Ipath]"
+ fprintf(stderr, "usage: unifdef [-bBcdeKknsStV] [-Ipath]"
" [-Dsym[=val]] [-Usym] [-iDsym[=val]] [-iUsym] ... [file]\n");
exit(2);
}
@@ -322,7 +395,8 @@ usage(void)
* When we have processed a group that starts off with a known-false
* #if/#elif sequence (which has therefore been deleted) followed by a
* #elif that we don't understand and therefore must keep, we edit the
- * latter into a #if to keep the nesting correct.
+ * latter into a #if to keep the nesting correct. We use strncpy() to
+ * overwrite the 4 byte token "elif" with "if " without a '\0' byte.
*
* When we find a true #elif in a group, the following block will
* always be kept and the rest of the sequence after the next #elif or
@@ -375,11 +449,11 @@ static void Oelif (void) { if (!iocccok) Eioccc(); Pelif(); }
static void Idrop (void) { Fdrop(); ignoreon(); }
static void Itrue (void) { Ftrue(); ignoreon(); }
static void Ifalse(void) { Ffalse(); ignoreon(); }
-/* edit this line */
+/* modify this line */
static void Mpass (void) { strncpy(keyword, "if ", 4); Pelif(); }
-static void Mtrue (void) { keywordedit("else\n"); state(IS_TRUE_MIDDLE); }
-static void Melif (void) { keywordedit("endif\n"); state(IS_FALSE_TRAILER); }
-static void Melse (void) { keywordedit("endif\n"); state(IS_FALSE_ELSE); }
+static void Mtrue (void) { keywordedit("else"); state(IS_TRUE_MIDDLE); }
+static void Melif (void) { keywordedit("endif"); state(IS_FALSE_TRAILER); }
+static void Melse (void) { keywordedit("endif"); state(IS_FALSE_ELSE); }
static state_fn * const trans_table[IS_COUNT][LT_COUNT] = {
/* IS_OUTSIDE */
@@ -431,13 +505,6 @@ static state_fn * const trans_table[IS_COUNT][LT_COUNT] = {
* State machine utility functions
*/
static void
-done(void)
-{
- if (incomment)
- error("EOF in comment");
- exit(exitstat);
-}
-static void
ignoreoff(void)
{
if (depth == 0)
@@ -452,14 +519,8 @@ ignoreon(void)
static void
keywordedit(const char *replacement)
{
- size_t size = tline + sizeof(tline) - keyword;
- char *dst = keyword;
- const char *src = replacement;
- if (size != 0) {
- while ((--size != 0) && (*src != '\0'))
- *dst++ = *src++;
- *dst = '\0';
- }
+ snprintf(keyword, tline + sizeof(tline) - keyword,
+ "%s%s", replacement, newline);
print();
}
static void
@@ -494,24 +555,26 @@ flushline(bool keep)
if (symlist)
return;
if (keep ^ complement) {
- bool blankline = tline[strspn(tline, " \t\n")] == '\0';
+ bool blankline = tline[strspn(tline, " \t\r\n")] == '\0';
if (blankline && compblank && blankcount != blankmax) {
delcount += 1;
blankcount += 1;
} else {
if (lnnum && delcount > 0)
- printf("#line %d\n", linenum);
- fputs(tline, stdout);
+ printf("#line %d%s", linenum, newline);
+ fputs(tline, output);
delcount = 0;
blankmax = blankcount = blankline ? blankcount + 1 : 0;
}
} else {
if (lnblank)
- putc('\n', stdout);
+ fputs(newline, output);
exitstat = 1;
delcount += 1;
blankcount = 0;
}
+ if (debugging)
+ fflush(output);
}
/*
@@ -520,22 +583,55 @@ flushline(bool keep)
static void
process(void)
{
- Linetype lineval;
-
/* When compressing blank lines, act as if the file
is preceded by a large number of blank lines. */
blankmax = blankcount = 1000;
for (;;) {
- linenum++;
- lineval = parseline();
+ Linetype lineval = parseline();
trans_table[ifstate[depth]][lineval]();
- debug("process %s -> %s depth %d",
- linetype_name[lineval],
+ debug("process line %d %s -> %s depth %d",
+ linenum, linetype_name[lineval],
ifstate_name[ifstate[depth]], depth);
}
}
/*
+ * Flush the output and handle errors.
+ */
+static void
+closeout(void)
+{
+ if (symdepth && !zerosyms)
+ printf("\n");
+ if (fclose(output) == EOF) {
+ warn("couldn't write to %s", ofilename);
+ if (overwriting) {
+ unlink(tempname);
+ errx(2, "%s unchanged", filename);
+ } else {
+ exit(2);
+ }
+ }
+}
+
+/*
+ * Clean up and exit.
+ */
+static void
+done(void)
+{
+ if (incomment)
+ error("EOF in comment");
+ closeout();
+ if (overwriting && rename(tempname, ofilename) == -1) {
+ warn("couldn't rename temporary file");
+ unlink(tempname);
+ errx(2, "%s unchanged", ofilename);
+ }
+ exit(exitstat);
+}
+
+/*
* Parse a line and determine its type. We keep the preprocessor line
* parser state between calls in the global variable linestate, with
* help from skipcomment().
@@ -549,14 +645,22 @@ parseline(void)
Linetype retval;
Comment_state wascomment;
+ linenum++;
if (fgets(tline, MAXLINE, input) == NULL)
return (LT_EOF);
+ if (newline == NULL) {
+ if (strrchr(tline, '\n') == strrchr(tline, '\r') + 1)
+ newline = newline_crlf;
+ else
+ newline = newline_unix;
+ }
retval = LT_PLAIN;
wascomment = incomment;
cp = skipcomment(tline);
if (linestate == LS_START) {
if (*cp == '#') {
linestate = LS_HASH;
+ firstsym = true;
cp = skipcomment(cp + 1);
} else if (*cp != '\0')
linestate = LS_DIRTY;
@@ -566,7 +670,8 @@ parseline(void)
cp = skipsym(cp);
kwlen = cp - keyword;
/* no way can we deal with a continuation inside a keyword */
- if (strncmp(cp, "\\\n", 2) == 0)
+ if (strncmp(cp, "\\\r\n", 3) == 0 ||
+ strncmp(cp, "\\\n", 2) == 0)
Eioccc();
if (strlcmp("ifdef", keyword, kwlen) == 0 ||
strlcmp("ifndef", keyword, kwlen) == 0) {
@@ -617,9 +722,8 @@ parseline(void)
size_t len = cp - tline;
if (fgets(tline + len, MAXLINE - len, input) == NULL) {
/* append the missing newline */
- tline[len+0] = '\n';
- tline[len+1] = '\0';
- cp++;
+ strcpy(tline + len, newline);
+ cp += strlen(newline);
linestate = LS_START;
} else {
linestate = LS_DIRTY;
@@ -630,7 +734,7 @@ parseline(void)
while (*cp != '\0')
cp = skipcomment(cp + 1);
}
- debug("parser %s comment %s line",
+ debug("parser line %d state %s comment %s line", linenum,
comment_name[incomment], linestate_name[linestate]);
return (retval);
}
@@ -875,11 +979,16 @@ skipcomment(const char *cp)
}
while (*cp != '\0')
/* don't reset to LS_START after a line continuation */
- if (strncmp(cp, "\\\n", 2) == 0)
+ if (strncmp(cp, "\\\r\n", 3) == 0)
+ cp += 3;
+ else if (strncmp(cp, "\\\n", 2) == 0)
cp += 2;
else switch (incomment) {
case NO_COMMENT:
- if (strncmp(cp, "/\\\n", 3) == 0) {
+ if (strncmp(cp, "/\\\r\n", 4) == 0) {
+ incomment = STARTING_COMMENT;
+ cp += 4;
+ } else if (strncmp(cp, "/\\\n", 3) == 0) {
incomment = STARTING_COMMENT;
cp += 3;
} else if (strncmp(cp, "/*", 2) == 0) {
@@ -899,7 +1008,7 @@ skipcomment(const char *cp)
} else if (strncmp(cp, "\n", 1) == 0) {
linestate = LS_START;
cp += 1;
- } else if (strchr(" \t", *cp) != NULL) {
+ } else if (strchr(" \r\t", *cp) != NULL) {
cp += 1;
} else
return (cp);
@@ -931,7 +1040,10 @@ skipcomment(const char *cp)
cp += 1;
continue;
case C_COMMENT:
- if (strncmp(cp, "*\\\n", 3) == 0) {
+ if (strncmp(cp, "*\\\r\n", 4) == 0) {
+ incomment = FINISHING_COMMENT;
+ cp += 4;
+ } else if (strncmp(cp, "*\\\n", 3) == 0) {
incomment = FINISHING_COMMENT;
cp += 3;
} else if (strncmp(cp, "*/", 2) == 0) {
@@ -1015,7 +1127,13 @@ findsym(const char *str)
if (cp == str)
return (-1);
if (symlist) {
- printf("%.*s\n", (int)(cp-str), str);
+ if (symdepth && firstsym)
+ printf("%s%3d", zerosyms ? "" : "\n", depth);
+ firstsym = zerosyms = false;
+ printf("%s%.*s%s",
+ symdepth ? " " : "",
+ (int)(cp-str), str,
+ symdepth ? "" : "\n");
/* we don't care about the value of the symbol */
return (0);
}
@@ -1052,7 +1170,7 @@ addsym(bool ignorethis, bool definethis, char *sym)
value[symind] = val+1;
*val = '\0';
} else if (*val == '\0')
- value[symind] = "";
+ value[symind] = "1";
else
usage();
} else {
@@ -1060,6 +1178,8 @@ addsym(bool ignorethis, bool definethis, char *sym)
usage();
value[symind] = NULL;
}
+ debug("addsym %s=%s", symname[symind],
+ value[symind] ? value[symind] : "undef");
}
/*
@@ -1100,5 +1220,6 @@ error(const char *msg)
else
warnx("%s: %d: %s (#if line %d depth %d)",
filename, linenum, msg, stifline[depth], depth);
+ closeout();
errx(2, "output may be truncated");
}
diff --git a/security/capability.c b/security/capability.c
index 2a5df2b7da83..97560e4f742a 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -12,11 +12,6 @@
#include <linux/security.h>
-static int cap_sysctl(ctl_table *table, int op)
-{
- return 0;
-}
-
static int cap_syslog(int type)
{
return 0;
@@ -118,7 +113,8 @@ static void cap_inode_free_security(struct inode *inode)
}
static int cap_inode_init_security(struct inode *inode, struct inode *dir,
- char **name, void **value, size_t *len)
+ const struct qstr *qstr, char **name,
+ void **value, size_t *len)
{
return -EOPNOTSUPP;
}
@@ -760,7 +756,7 @@ static int cap_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 sk_sid, u8 dir)
static int cap_xfrm_state_pol_flow_match(struct xfrm_state *x,
struct xfrm_policy *xp,
- struct flowi *fl)
+ const struct flowi *fl)
{
return 1;
}
@@ -880,7 +876,6 @@ void __init security_fixup_ops(struct security_operations *ops)
set_to_cap_if_null(ops, capable);
set_to_cap_if_null(ops, quotactl);
set_to_cap_if_null(ops, quota_on);
- set_to_cap_if_null(ops, sysctl);
set_to_cap_if_null(ops, syslog);
set_to_cap_if_null(ops, settime);
set_to_cap_if_null(ops, vm_enough_memory);
diff --git a/security/commoncap.c b/security/commoncap.c
index 64c2ed9c9015..dbfdaed4cc66 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -93,7 +93,7 @@ int cap_capable(struct task_struct *tsk, const struct cred *cred, int cap,
* Determine whether the current process may set the system clock and timezone
* information, returning 0 if permission granted, -ve if denied.
*/
-int cap_settime(struct timespec *ts, struct timezone *tz)
+int cap_settime(const struct timespec *ts, const struct timezone *tz)
{
if (!capable(CAP_SYS_TIME))
return -EPERM;
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index ac79032bdf23..08408bd71462 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -110,8 +110,7 @@ struct ima_iint_cache {
};
/* LIM API function definitions */
-int ima_must_measure(struct ima_iint_cache *iint, struct inode *inode,
- int mask, int function);
+int ima_must_measure(struct inode *inode, int mask, int function);
int ima_collect_measurement(struct ima_iint_cache *iint, struct file *file);
void ima_store_measurement(struct ima_iint_cache *iint, struct file *file,
const unsigned char *filename);
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index d3963de6003d..da36d2c085a4 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -105,20 +105,13 @@ err_out:
* mask: contains the permission mask
* fsmagic: hex value
*
- * Must be called with iint->mutex held.
- *
- * Return 0 to measure. Return 1 if already measured.
- * For matching a DONT_MEASURE policy, no policy, or other
- * error, return an error code.
+ * Return 0 to measure. For matching a DONT_MEASURE policy, no policy,
+ * or other error, return an error code.
*/
-int ima_must_measure(struct ima_iint_cache *iint, struct inode *inode,
- int mask, int function)
+int ima_must_measure(struct inode *inode, int mask, int function)
{
int must_measure;
- if (iint && iint->flags & IMA_MEASURED)
- return 1;
-
must_measure = ima_match_policy(inode, function, mask);
return must_measure ? 0 : -EACCES;
}
diff --git a/security/integrity/ima/ima_iint.c b/security/integrity/ima/ima_iint.c
index c442e47b6785..4ae73040ab7b 100644
--- a/security/integrity/ima/ima_iint.c
+++ b/security/integrity/ima/ima_iint.c
@@ -137,11 +137,6 @@ void ima_inode_free(struct inode *inode)
{
struct ima_iint_cache *iint;
- if (inode->i_readcount)
- printk(KERN_INFO "%s: readcount: %u\n", __func__, inode->i_readcount);
-
- inode->i_readcount = 0;
-
if (!IS_IMA(inode))
return;
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 203de979d305..39d66dc2b8e9 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -36,67 +36,17 @@ static int __init hash_setup(char *str)
}
__setup("ima_hash=", hash_setup);
-struct ima_imbalance {
- struct hlist_node node;
- unsigned long fsmagic;
-};
-
-/*
- * ima_limit_imbalance - emit one imbalance message per filesystem type
- *
- * Maintain list of filesystem types that do not measure files properly.
- * Return false if unknown, true if known.
- */
-static bool ima_limit_imbalance(struct file *file)
-{
- static DEFINE_SPINLOCK(ima_imbalance_lock);
- static HLIST_HEAD(ima_imbalance_list);
-
- struct super_block *sb = file->f_dentry->d_sb;
- struct ima_imbalance *entry;
- struct hlist_node *node;
- bool found = false;
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(entry, node, &ima_imbalance_list, node) {
- if (entry->fsmagic == sb->s_magic) {
- found = true;
- break;
- }
- }
- rcu_read_unlock();
- if (found)
- goto out;
-
- entry = kmalloc(sizeof(*entry), GFP_NOFS);
- if (!entry)
- goto out;
- entry->fsmagic = sb->s_magic;
- spin_lock(&ima_imbalance_lock);
- /*
- * we could have raced and something else might have added this fs
- * to the list, but we don't really care
- */
- hlist_add_head_rcu(&entry->node, &ima_imbalance_list);
- spin_unlock(&ima_imbalance_lock);
- printk(KERN_INFO "IMA: unmeasured files on fsmagic: %lX\n",
- entry->fsmagic);
-out:
- return found;
-}
-
/*
- * ima_counts_get - increment file counts
+ * ima_rdwr_violation_check
*
- * Maintain read/write counters for all files, but only
- * invalidate the PCR for measured files:
+ * Only invalidate the PCR for measured files:
* - Opening a file for write when already open for read,
* results in a time of measure, time of use (ToMToU) error.
* - Opening a file for read when already open for write,
* could result in a file measurement error.
*
*/
-void ima_counts_get(struct file *file)
+static void ima_rdwr_violation_check(struct file *file)
{
struct dentry *dentry = file->f_path.dentry;
struct inode *inode = dentry->d_inode;
@@ -104,32 +54,25 @@ void ima_counts_get(struct file *file)
int rc;
bool send_tomtou = false, send_writers = false;
- if (!S_ISREG(inode->i_mode))
+ if (!S_ISREG(inode->i_mode) || !ima_initialized)
return;
- spin_lock(&inode->i_lock);
-
- if (!ima_initialized)
- goto out;
+ mutex_lock(&inode->i_mutex); /* file metadata: permissions, xattr */
if (mode & FMODE_WRITE) {
- if (inode->i_readcount && IS_IMA(inode))
+ if (atomic_read(&inode->i_readcount) && IS_IMA(inode))
send_tomtou = true;
goto out;
}
- rc = ima_must_measure(NULL, inode, MAY_READ, FILE_CHECK);
+ rc = ima_must_measure(inode, MAY_READ, FILE_CHECK);
if (rc < 0)
goto out;
if (atomic_read(&inode->i_writecount) > 0)
send_writers = true;
out:
- /* remember the vfs deals with i_writecount */
- if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
- inode->i_readcount++;
-
- spin_unlock(&inode->i_lock);
+ mutex_unlock(&inode->i_mutex);
if (send_tomtou)
ima_add_violation(inode, dentry->d_name.name, "invalid_pcr",
@@ -139,71 +82,25 @@ out:
"open_writers");
}
-/*
- * Decrement ima counts
- */
-static void ima_dec_counts(struct inode *inode, struct file *file)
-{
- mode_t mode = file->f_mode;
-
- assert_spin_locked(&inode->i_lock);
-
- if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) {
- if (unlikely(inode->i_readcount == 0)) {
- if (!ima_limit_imbalance(file)) {
- printk(KERN_INFO "%s: open/free imbalance (r:%u)\n",
- __func__, inode->i_readcount);
- dump_stack();
- }
- return;
- }
- inode->i_readcount--;
- }
-}
-
static void ima_check_last_writer(struct ima_iint_cache *iint,
struct inode *inode,
struct file *file)
{
mode_t mode = file->f_mode;
- BUG_ON(!mutex_is_locked(&iint->mutex));
- assert_spin_locked(&inode->i_lock);
-
+ mutex_lock(&iint->mutex);
if (mode & FMODE_WRITE &&
atomic_read(&inode->i_writecount) == 1 &&
iint->version != inode->i_version)
iint->flags &= ~IMA_MEASURED;
-}
-
-static void ima_file_free_iint(struct ima_iint_cache *iint, struct inode *inode,
- struct file *file)
-{
- mutex_lock(&iint->mutex);
- spin_lock(&inode->i_lock);
-
- ima_dec_counts(inode, file);
- ima_check_last_writer(iint, inode, file);
-
- spin_unlock(&inode->i_lock);
mutex_unlock(&iint->mutex);
}
-static void ima_file_free_noiint(struct inode *inode, struct file *file)
-{
- spin_lock(&inode->i_lock);
-
- ima_dec_counts(inode, file);
-
- spin_unlock(&inode->i_lock);
-}
-
/**
* ima_file_free - called on __fput()
* @file: pointer to file structure being freed
*
- * Flag files that changed, based on i_version;
- * and decrement the i_readcount.
+ * Flag files that changed, based on i_version
*/
void ima_file_free(struct file *file)
{
@@ -214,12 +111,10 @@ void ima_file_free(struct file *file)
return;
iint = ima_iint_find(inode);
+ if (!iint)
+ return;
- if (iint)
- ima_file_free_iint(iint, inode, file);
- else
- ima_file_free_noiint(inode, file);
-
+ ima_check_last_writer(iint, inode, file);
}
static int process_measurement(struct file *file, const unsigned char *filename,
@@ -232,7 +127,7 @@ static int process_measurement(struct file *file, const unsigned char *filename,
if (!ima_initialized || !S_ISREG(inode->i_mode))
return 0;
- rc = ima_must_measure(NULL, inode, mask, function);
+ rc = ima_must_measure(inode, mask, function);
if (rc != 0)
return rc;
retry:
@@ -246,7 +141,7 @@ retry:
mutex_lock(&iint->mutex);
- rc = ima_must_measure(iint, inode, mask, function);
+ rc = iint->flags & IMA_MEASURED ? 1 : 0;
if (rc != 0)
goto out;
@@ -317,6 +212,7 @@ int ima_file_check(struct file *file, int mask)
{
int rc;
+ ima_rdwr_violation_check(file);
rc = process_measurement(file, file->f_dentry->d_name.name,
mask & (MAY_READ | MAY_WRITE | MAY_EXEC),
FILE_CHECK);
diff --git a/security/security.c b/security/security.c
index 7b7308ace8c5..a31b6d9a3b73 100644
--- a/security/security.c
+++ b/security/security.c
@@ -181,11 +181,6 @@ int security_real_capable_noaudit(struct task_struct *tsk, int cap)
return ret;
}
-int security_sysctl(struct ctl_table *table, int op)
-{
- return security_ops->sysctl(table, op);
-}
-
int security_quotactl(int cmds, int type, int id, struct super_block *sb)
{
return security_ops->quotactl(cmds, type, id, sb);
@@ -201,7 +196,7 @@ int security_syslog(int type)
return security_ops->syslog(type);
}
-int security_settime(struct timespec *ts, struct timezone *tz)
+int security_settime(const struct timespec *ts, const struct timezone *tz)
{
return security_ops->settime(ts, tz);
}
@@ -335,11 +330,13 @@ void security_inode_free(struct inode *inode)
}
int security_inode_init_security(struct inode *inode, struct inode *dir,
- char **name, void **value, size_t *len)
+ const struct qstr *qstr, char **name,
+ void **value, size_t *len)
{
if (unlikely(IS_PRIVATE(inode)))
return -EOPNOTSUPP;
- return security_ops->inode_init_security(inode, dir, name, value, len);
+ return security_ops->inode_init_security(inode, dir, qstr, name, value,
+ len);
}
EXPORT_SYMBOL(security_inode_init_security);
@@ -359,6 +356,7 @@ int security_path_mkdir(struct path *dir, struct dentry *dentry, int mode)
return 0;
return security_ops->path_mkdir(dir, dentry, mode);
}
+EXPORT_SYMBOL(security_path_mkdir);
int security_path_rmdir(struct path *dir, struct dentry *dentry)
{
@@ -373,6 +371,7 @@ int security_path_unlink(struct path *dir, struct dentry *dentry)
return 0;
return security_ops->path_unlink(dir, dentry);
}
+EXPORT_SYMBOL(security_path_unlink);
int security_path_symlink(struct path *dir, struct dentry *dentry,
const char *old_name)
@@ -399,6 +398,7 @@ int security_path_rename(struct path *old_dir, struct dentry *old_dentry,
return security_ops->path_rename(old_dir, old_dentry, new_dir,
new_dentry);
}
+EXPORT_SYMBOL(security_path_rename);
int security_path_truncate(struct path *path)
{
@@ -1233,7 +1233,8 @@ int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
}
int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
- struct xfrm_policy *xp, struct flowi *fl)
+ struct xfrm_policy *xp,
+ const struct flowi *fl)
{
return security_ops->xfrm_state_pol_flow_match(x, xp, fl);
}
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index c8d699270687..6c51eaa6953f 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -24,9 +24,11 @@
*/
#include <linux/init.h>
+#include <linux/kd.h>
#include <linux/kernel.h>
#include <linux/tracehook.h>
#include <linux/errno.h>
+#include <linux/ext2_fs.h>
#include <linux/sched.h>
#include <linux/security.h>
#include <linux/xattr.h>
@@ -36,14 +38,15 @@
#include <linux/mman.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
+#include <linux/proc_fs.h>
#include <linux/swap.h>
#include <linux/spinlock.h>
#include <linux/syscalls.h>
+#include <linux/dcache.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/namei.h>
#include <linux/mount.h>
-#include <linux/proc_fs.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <linux/tty.h>
@@ -70,7 +73,6 @@
#include <net/ipv6.h>
#include <linux/hugetlb.h>
#include <linux/personality.h>
-#include <linux/sysctl.h>
#include <linux/audit.h>
#include <linux/string.h>
#include <linux/selinux.h>
@@ -1120,39 +1122,35 @@ static inline u16 socket_type_to_security_class(int family, int type, int protoc
}
#ifdef CONFIG_PROC_FS
-static int selinux_proc_get_sid(struct proc_dir_entry *de,
+static int selinux_proc_get_sid(struct dentry *dentry,
u16 tclass,
u32 *sid)
{
- int buflen, rc;
- char *buffer, *path, *end;
+ int rc;
+ char *buffer, *path;
buffer = (char *)__get_free_page(GFP_KERNEL);
if (!buffer)
return -ENOMEM;
- buflen = PAGE_SIZE;
- end = buffer+buflen;
- *--end = '\0';
- buflen--;
- path = end-1;
- *path = '/';
- while (de && de != de->parent) {
- buflen -= de->namelen + 1;
- if (buflen < 0)
- break;
- end -= de->namelen;
- memcpy(end, de->name, de->namelen);
- *--end = '/';
- path = end;
- de = de->parent;
+ path = dentry_path_raw(dentry, buffer, PAGE_SIZE);
+ if (IS_ERR(path))
+ rc = PTR_ERR(path);
+ else {
+ /* each process gets a /proc/PID/ entry. Strip off the
+ * PID part to get a valid selinux labeling.
+ * e.g. /proc/1/net/rpc/nfs -> /net/rpc/nfs */
+ while (path[1] >= '0' && path[1] <= '9') {
+ path[1] = '/';
+ path++;
+ }
+ rc = security_genfs_sid("proc", path, tclass, sid);
}
- rc = security_genfs_sid("proc", path, tclass, sid);
free_page((unsigned long)buffer);
return rc;
}
#else
-static int selinux_proc_get_sid(struct proc_dir_entry *de,
+static int selinux_proc_get_sid(struct dentry *dentry,
u16 tclass,
u32 *sid)
{
@@ -1300,10 +1298,8 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
/* Try to obtain a transition SID. */
isec->sclass = inode_mode_to_security_class(inode->i_mode);
- rc = security_transition_sid(isec->task_sid,
- sbsec->sid,
- isec->sclass,
- &sid);
+ rc = security_transition_sid(isec->task_sid, sbsec->sid,
+ isec->sclass, NULL, &sid);
if (rc)
goto out_unlock;
isec->sid = sid;
@@ -1316,10 +1312,9 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
isec->sid = sbsec->sid;
if ((sbsec->flags & SE_SBPROC) && !S_ISLNK(inode->i_mode)) {
- struct proc_inode *proci = PROC_I(inode);
- if (proci->pde) {
+ if (opt_dentry) {
isec->sclass = inode_mode_to_security_class(inode->i_mode);
- rc = selinux_proc_get_sid(proci->pde,
+ rc = selinux_proc_get_sid(opt_dentry,
isec->sclass,
&sid);
if (rc)
@@ -1578,7 +1573,7 @@ static int may_create(struct inode *dir,
return rc;
if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) {
- rc = security_transition_sid(sid, dsec->sid, tclass, &newsid);
+ rc = security_transition_sid(sid, dsec->sid, tclass, NULL, &newsid);
if (rc)
return rc;
}
@@ -1862,82 +1857,6 @@ static int selinux_capable(struct task_struct *tsk, const struct cred *cred,
return task_has_capability(tsk, cred, cap, audit);
}
-static int selinux_sysctl_get_sid(ctl_table *table, u16 tclass, u32 *sid)
-{
- int buflen, rc;
- char *buffer, *path, *end;
-
- rc = -ENOMEM;
- buffer = (char *)__get_free_page(GFP_KERNEL);
- if (!buffer)
- goto out;
-
- buflen = PAGE_SIZE;
- end = buffer+buflen;
- *--end = '\0';
- buflen--;
- path = end-1;
- *path = '/';
- while (table) {
- const char *name = table->procname;
- size_t namelen = strlen(name);
- buflen -= namelen + 1;
- if (buflen < 0)
- goto out_free;
- end -= namelen;
- memcpy(end, name, namelen);
- *--end = '/';
- path = end;
- table = table->parent;
- }
- buflen -= 4;
- if (buflen < 0)
- goto out_free;
- end -= 4;
- memcpy(end, "/sys", 4);
- path = end;
- rc = security_genfs_sid("proc", path, tclass, sid);
-out_free:
- free_page((unsigned long)buffer);
-out:
- return rc;
-}
-
-static int selinux_sysctl(ctl_table *table, int op)
-{
- int error = 0;
- u32 av;
- u32 tsid, sid;
- int rc;
-
- sid = current_sid();
-
- rc = selinux_sysctl_get_sid(table, (op == 0001) ?
- SECCLASS_DIR : SECCLASS_FILE, &tsid);
- if (rc) {
- /* Default to the well-defined sysctl SID. */
- tsid = SECINITSID_SYSCTL;
- }
-
- /* The op values are "defined" in sysctl.c, thereby creating
- * a bad coupling between this module and sysctl.c */
- if (op == 001) {
- error = avc_has_perm(sid, tsid,
- SECCLASS_DIR, DIR__SEARCH, NULL);
- } else {
- av = 0;
- if (op & 004)
- av |= FILE__READ;
- if (op & 002)
- av |= FILE__WRITE;
- if (av)
- error = avc_has_perm(sid, tsid,
- SECCLASS_FILE, av, NULL);
- }
-
- return error;
-}
-
static int selinux_quotactl(int cmds, int type, int id, struct super_block *sb)
{
const struct cred *cred = current_cred();
@@ -2060,7 +1979,8 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
} else {
/* Check for a default transition on this program. */
rc = security_transition_sid(old_tsec->sid, isec->sid,
- SECCLASS_PROCESS, &new_tsec->sid);
+ SECCLASS_PROCESS, NULL,
+ &new_tsec->sid);
if (rc)
return rc;
}
@@ -2509,8 +2429,8 @@ static void selinux_inode_free_security(struct inode *inode)
}
static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
- char **name, void **value,
- size_t *len)
+ const struct qstr *qstr, char **name,
+ void **value, size_t *len)
{
const struct task_security_struct *tsec = current_security();
struct inode_security_struct *dsec;
@@ -2531,7 +2451,7 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
else if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) {
rc = security_transition_sid(sid, dsec->sid,
inode_mode_to_security_class(inode->i_mode),
- &newsid);
+ qstr, &newsid);
if (rc) {
printk(KERN_WARNING "%s: "
"security_transition_sid failed, rc=%d (dev=%s "
@@ -2932,16 +2852,47 @@ static int selinux_file_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
const struct cred *cred = current_cred();
- u32 av = 0;
+ int error = 0;
- if (_IOC_DIR(cmd) & _IOC_WRITE)
- av |= FILE__WRITE;
- if (_IOC_DIR(cmd) & _IOC_READ)
- av |= FILE__READ;
- if (!av)
- av = FILE__IOCTL;
+ switch (cmd) {
+ case FIONREAD:
+ /* fall through */
+ case FIBMAP:
+ /* fall through */
+ case FIGETBSZ:
+ /* fall through */
+ case EXT2_IOC_GETFLAGS:
+ /* fall through */
+ case EXT2_IOC_GETVERSION:
+ error = file_has_perm(cred, file, FILE__GETATTR);
+ break;
+
+ case EXT2_IOC_SETFLAGS:
+ /* fall through */
+ case EXT2_IOC_SETVERSION:
+ error = file_has_perm(cred, file, FILE__SETATTR);
+ break;
+
+ /* sys_ioctl() checks */
+ case FIONBIO:
+ /* fall through */
+ case FIOASYNC:
+ error = file_has_perm(cred, file, 0);
+ break;
+
+ case KDSKBENT:
+ case KDSKBSENT:
+ error = task_has_capability(current, cred, CAP_SYS_TTY_CONFIG,
+ SECURITY_CAP_AUDIT);
+ break;
- return file_has_perm(cred, file, av);
+ /* default case assumes that the command will go
+ * to the file's ioctl() function.
+ */
+ default:
+ error = file_has_perm(cred, file, FILE__IOCTL);
+ }
+ return error;
}
static int default_noexec;
@@ -4002,7 +3953,6 @@ static int selinux_sock_rcv_skb_compat(struct sock *sk, struct sk_buff *skb,
{
int err = 0;
struct sk_security_struct *sksec = sk->sk_security;
- u32 peer_sid;
u32 sk_sid = sksec->sid;
struct common_audit_data ad;
char *addrp;
@@ -4021,20 +3971,10 @@ static int selinux_sock_rcv_skb_compat(struct sock *sk, struct sk_buff *skb,
return err;
}
- if (selinux_policycap_netpeer) {
- err = selinux_skb_peerlbl_sid(skb, family, &peer_sid);
- if (err)
- return err;
- err = avc_has_perm(sk_sid, peer_sid,
- SECCLASS_PEER, PEER__RECV, &ad);
- if (err)
- selinux_netlbl_err(skb, err, 0);
- } else {
- err = selinux_netlbl_sock_rcv_skb(sksec, skb, family, &ad);
- if (err)
- return err;
- err = selinux_xfrm_sock_rcv_skb(sksec->sid, skb, &ad);
- }
+ err = selinux_netlbl_sock_rcv_skb(sksec, skb, family, &ad);
+ if (err)
+ return err;
+ err = selinux_xfrm_sock_rcv_skb(sksec->sid, skb, &ad);
return err;
}
@@ -4529,9 +4469,8 @@ static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb,
SECCLASS_PACKET, PACKET__SEND, &ad))
return NF_DROP_ERR(-ECONNREFUSED);
- if (selinux_policycap_netpeer)
- if (selinux_xfrm_postroute_last(sksec->sid, skb, &ad, proto))
- return NF_DROP_ERR(-ECONNREFUSED);
+ if (selinux_xfrm_postroute_last(sksec->sid, skb, &ad, proto))
+ return NF_DROP_ERR(-ECONNREFUSED);
return NF_ACCEPT;
}
@@ -4574,27 +4513,14 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
* from the sending socket, otherwise use the kernel's sid */
sk = skb->sk;
if (sk == NULL) {
- switch (family) {
- case PF_INET:
- if (IPCB(skb)->flags & IPSKB_FORWARDED)
- secmark_perm = PACKET__FORWARD_OUT;
- else
- secmark_perm = PACKET__SEND;
- break;
- case PF_INET6:
- if (IP6CB(skb)->flags & IP6SKB_FORWARDED)
- secmark_perm = PACKET__FORWARD_OUT;
- else
- secmark_perm = PACKET__SEND;
- break;
- default:
- return NF_DROP_ERR(-ECONNREFUSED);
- }
- if (secmark_perm == PACKET__FORWARD_OUT) {
+ if (skb->skb_iif) {
+ secmark_perm = PACKET__FORWARD_OUT;
if (selinux_skb_peerlbl_sid(skb, family, &peer_sid))
return NF_DROP;
- } else
+ } else {
+ secmark_perm = PACKET__SEND;
peer_sid = SECINITSID_KERNEL;
+ }
} else {
struct sk_security_struct *sksec = sk->sk_security;
peer_sid = sksec->sid;
@@ -4848,7 +4774,7 @@ static int selinux_msg_queue_msgsnd(struct msg_queue *msq, struct msg_msg *msg,
* message queue this message will be stored in
*/
rc = security_transition_sid(sid, isec->sid, SECCLASS_MSG,
- &msec->sid);
+ NULL, &msec->sid);
if (rc)
return rc;
}
@@ -5402,7 +5328,6 @@ static struct security_operations selinux_ops = {
.ptrace_traceme = selinux_ptrace_traceme,
.capget = selinux_capget,
.capset = selinux_capset,
- .sysctl = selinux_sysctl,
.capable = selinux_capable,
.quotactl = selinux_quotactl,
.quota_on = selinux_quota_on,
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
index 7ed3663332ec..4227e5fa7861 100644
--- a/security/selinux/include/classmap.h
+++ b/security/selinux/include/classmap.h
@@ -132,8 +132,7 @@ struct security_class_mapping secclass_map[] = {
{ "appletalk_socket",
{ COMMON_SOCK_PERMS, NULL } },
{ "packet",
- { "send", "recv", "relabelto", "flow_in", "flow_out",
- "forward_in", "forward_out", NULL } },
+ { "send", "recv", "relabelto", "forward_in", "forward_out", NULL } },
{ "key",
{ "view", "read", "write", "search", "link", "setattr", "create",
NULL } },
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 671273eb1115..348eb00cb668 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -8,6 +8,7 @@
#ifndef _SELINUX_SECURITY_H_
#define _SELINUX_SECURITY_H_
+#include <linux/dcache.h>
#include <linux/magic.h>
#include <linux/types.h>
#include "flask.h"
@@ -28,13 +29,14 @@
#define POLICYDB_VERSION_POLCAP 22
#define POLICYDB_VERSION_PERMISSIVE 23
#define POLICYDB_VERSION_BOUNDARY 24
+#define POLICYDB_VERSION_FILENAME_TRANS 25
/* Range of policy versions we understand*/
#define POLICYDB_VERSION_MIN POLICYDB_VERSION_BASE
#ifdef CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX
#define POLICYDB_VERSION_MAX CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX_VALUE
#else
-#define POLICYDB_VERSION_MAX POLICYDB_VERSION_BOUNDARY
+#define POLICYDB_VERSION_MAX POLICYDB_VERSION_FILENAME_TRANS
#endif
/* Mask for just the mount related flags */
@@ -106,8 +108,8 @@ void security_compute_av(u32 ssid, u32 tsid,
void security_compute_av_user(u32 ssid, u32 tsid,
u16 tclass, struct av_decision *avd);
-int security_transition_sid(u32 ssid, u32 tsid,
- u16 tclass, u32 *out_sid);
+int security_transition_sid(u32 ssid, u32 tsid, u16 tclass,
+ const struct qstr *qstr, u32 *out_sid);
int security_transition_sid_user(u32 ssid, u32 tsid,
u16 tclass, u32 *out_sid);
diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
index 13128f9a3e5a..b43813c9e049 100644
--- a/security/selinux/include/xfrm.h
+++ b/security/selinux/include/xfrm.h
@@ -19,7 +19,7 @@ void selinux_xfrm_state_free(struct xfrm_state *x);
int selinux_xfrm_state_delete(struct xfrm_state *x);
int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x,
- struct xfrm_policy *xp, struct flowi *fl);
+ struct xfrm_policy *xp, const struct flowi *fl);
/*
* Extract the security blob from the sock (it's actually on the socket)
diff --git a/security/selinux/ss/avtab.h b/security/selinux/ss/avtab.h
index dff0c75345c1..63ce2f9e441d 100644
--- a/security/selinux/ss/avtab.h
+++ b/security/selinux/ss/avtab.h
@@ -14,7 +14,7 @@
*
* Copyright (C) 2003 Tresys Technology, LLC
* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
+ * it under the terms of the GNU General Public License as published by
* the Free Software Foundation, version 2.
*
* Updated: Yuichi Nakamura <ynakam@hitachisoft.jp>
@@ -27,16 +27,16 @@ struct avtab_key {
u16 source_type; /* source type */
u16 target_type; /* target type */
u16 target_class; /* target object class */
-#define AVTAB_ALLOWED 1
-#define AVTAB_AUDITALLOW 2
-#define AVTAB_AUDITDENY 4
-#define AVTAB_AV (AVTAB_ALLOWED | AVTAB_AUDITALLOW | AVTAB_AUDITDENY)
-#define AVTAB_TRANSITION 16
-#define AVTAB_MEMBER 32
-#define AVTAB_CHANGE 64
-#define AVTAB_TYPE (AVTAB_TRANSITION | AVTAB_MEMBER | AVTAB_CHANGE)
-#define AVTAB_ENABLED_OLD 0x80000000 /* reserved for used in cond_avtab */
-#define AVTAB_ENABLED 0x8000 /* reserved for used in cond_avtab */
+#define AVTAB_ALLOWED 0x0001
+#define AVTAB_AUDITALLOW 0x0002
+#define AVTAB_AUDITDENY 0x0004
+#define AVTAB_AV (AVTAB_ALLOWED | AVTAB_AUDITALLOW | AVTAB_AUDITDENY)
+#define AVTAB_TRANSITION 0x0010
+#define AVTAB_MEMBER 0x0020
+#define AVTAB_CHANGE 0x0040
+#define AVTAB_TYPE (AVTAB_TRANSITION | AVTAB_MEMBER | AVTAB_CHANGE)
+#define AVTAB_ENABLED_OLD 0x80000000 /* reserved for used in cond_avtab */
+#define AVTAB_ENABLED 0x8000 /* reserved for used in cond_avtab */
u16 specified; /* what field is specified */
};
@@ -86,7 +86,6 @@ void avtab_cache_destroy(void);
#define MAX_AVTAB_HASH_BITS 11
#define MAX_AVTAB_HASH_BUCKETS (1 << MAX_AVTAB_HASH_BITS)
-#define MAX_AVTAB_HASH_MASK (MAX_AVTAB_HASH_BUCKETS-1)
#endif /* _SS_AVTAB_H_ */
diff --git a/security/selinux/ss/ebitmap.h b/security/selinux/ss/ebitmap.h
index 1f4e93c2ae86..922f8afa89dd 100644
--- a/security/selinux/ss/ebitmap.h
+++ b/security/selinux/ss/ebitmap.h
@@ -36,7 +36,6 @@ struct ebitmap {
};
#define ebitmap_length(e) ((e)->highbit)
-#define ebitmap_startbit(e) ((e)->node ? (e)->node->startbit : 0)
static inline unsigned int ebitmap_start_positive(struct ebitmap *e,
struct ebitmap_node **n)
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 57363562f0f8..e7b850ad57ee 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -123,6 +123,11 @@ static struct policydb_compat_info policydb_compat[] = {
.sym_num = SYM_NUM,
.ocon_num = OCON_NUM,
},
+ {
+ .version = POLICYDB_VERSION_FILENAME_TRANS,
+ .sym_num = SYM_NUM,
+ .ocon_num = OCON_NUM,
+ },
};
static struct policydb_compat_info *policydb_lookup_compat(int version)
@@ -704,6 +709,7 @@ void policydb_destroy(struct policydb *p)
int i;
struct role_allow *ra, *lra = NULL;
struct role_trans *tr, *ltr = NULL;
+ struct filename_trans *ft, *nft;
for (i = 0; i < SYM_NUM; i++) {
cond_resched();
@@ -781,6 +787,15 @@ void policydb_destroy(struct policydb *p)
}
flex_array_free(p->type_attr_map_array);
}
+
+ ft = p->filename_trans;
+ while (ft) {
+ nft = ft->next;
+ kfree(ft->name);
+ kfree(ft);
+ ft = nft;
+ }
+
ebitmap_destroy(&p->policycaps);
ebitmap_destroy(&p->permissive_map);
@@ -1788,6 +1803,76 @@ out:
return rc;
}
+static int filename_trans_read(struct policydb *p, void *fp)
+{
+ struct filename_trans *ft, *last;
+ u32 nel, len;
+ char *name;
+ __le32 buf[4];
+ int rc, i;
+
+ if (p->policyvers < POLICYDB_VERSION_FILENAME_TRANS)
+ return 0;
+
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ goto out;
+ nel = le32_to_cpu(buf[0]);
+
+ printk(KERN_ERR "%s: nel=%d\n", __func__, nel);
+
+ last = p->filename_trans;
+ while (last && last->next)
+ last = last->next;
+
+ for (i = 0; i < nel; i++) {
+ rc = -ENOMEM;
+ ft = kzalloc(sizeof(*ft), GFP_KERNEL);
+ if (!ft)
+ goto out;
+
+ /* add it to the tail of the list */
+ if (!last)
+ p->filename_trans = ft;
+ else
+ last->next = ft;
+ last = ft;
+
+ /* length of the path component string */
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ goto out;
+ len = le32_to_cpu(buf[0]);
+
+ rc = -ENOMEM;
+ name = kmalloc(len + 1, GFP_KERNEL);
+ if (!name)
+ goto out;
+
+ ft->name = name;
+
+ /* path component string */
+ rc = next_entry(name, fp, len);
+ if (rc)
+ goto out;
+ name[len] = 0;
+
+ printk(KERN_ERR "%s: ft=%p ft->name=%p ft->name=%s\n", __func__, ft, ft->name, ft->name);
+
+ rc = next_entry(buf, fp, sizeof(u32) * 4);
+ if (rc)
+ goto out;
+
+ ft->stype = le32_to_cpu(buf[0]);
+ ft->ttype = le32_to_cpu(buf[1]);
+ ft->tclass = le32_to_cpu(buf[2]);
+ ft->otype = le32_to_cpu(buf[3]);
+ }
+ rc = 0;
+out:
+ return rc;
+}
+
static int genfs_read(struct policydb *p, void *fp)
{
int i, j, rc;
@@ -2251,6 +2336,10 @@ int policydb_read(struct policydb *p, void *fp)
lra = ra;
}
+ rc = filename_trans_read(p, fp);
+ if (rc)
+ goto bad;
+
rc = policydb_index(p);
if (rc)
goto bad;
@@ -3025,6 +3114,43 @@ static int range_write(struct policydb *p, void *fp)
return 0;
}
+static int filename_trans_write(struct policydb *p, void *fp)
+{
+ struct filename_trans *ft;
+ u32 len, nel = 0;
+ __le32 buf[4];
+ int rc;
+
+ for (ft = p->filename_trans; ft; ft = ft->next)
+ nel++;
+
+ buf[0] = cpu_to_le32(nel);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ for (ft = p->filename_trans; ft; ft = ft->next) {
+ len = strlen(ft->name);
+ buf[0] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ rc = put_entry(ft->name, sizeof(char), len, fp);
+ if (rc)
+ return rc;
+
+ buf[0] = ft->stype;
+ buf[1] = ft->ttype;
+ buf[2] = ft->tclass;
+ buf[3] = ft->otype;
+
+ rc = put_entry(buf, sizeof(u32), 4, fp);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
/*
* Write the configuration data in a policy database
* structure to a policy database binary representation
@@ -3135,6 +3261,10 @@ int policydb_write(struct policydb *p, void *fp)
if (rc)
return rc;
+ rc = filename_trans_write(p, fp);
+ if (rc)
+ return rc;
+
rc = ocontext_write(p, info, fp);
if (rc)
return rc;
diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h
index 4e3ab9d0b315..732ea4a68682 100644
--- a/security/selinux/ss/policydb.h
+++ b/security/selinux/ss/policydb.h
@@ -77,6 +77,15 @@ struct role_trans {
struct role_trans *next;
};
+struct filename_trans {
+ struct filename_trans *next;
+ u32 stype; /* current process */
+ u32 ttype; /* parent dir context */
+ u16 tclass; /* class of new object */
+ const char *name; /* last path component */
+ u32 otype; /* expected of new object */
+};
+
struct role_allow {
u32 role; /* current role */
u32 new_role; /* new role */
@@ -217,6 +226,9 @@ struct policydb {
/* role transitions */
struct role_trans *role_tr;
+ /* file transitions with the last path component */
+ struct filename_trans *filename_trans;
+
/* bools indexed by (value - 1) */
struct cond_bool_datum **bool_val_to_struct;
/* type enforcement conditional access vectors and transitions */
@@ -302,7 +314,7 @@ static inline int next_entry(void *buf, struct policy_file *fp, size_t bytes)
return 0;
}
-static inline int put_entry(void *buf, size_t bytes, int num, struct policy_file *fp)
+static inline int put_entry(const void *buf, size_t bytes, int num, struct policy_file *fp)
{
size_t len = bytes * num;
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index a03cfaf0ee07..2e36e03c21f2 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -1343,10 +1343,27 @@ out:
return -EACCES;
}
+static void filename_compute_type(struct policydb *p, struct context *newcontext,
+ u32 scon, u32 tcon, u16 tclass,
+ const struct qstr *qstr)
+{
+ struct filename_trans *ft;
+ for (ft = p->filename_trans; ft; ft = ft->next) {
+ if (ft->stype == scon &&
+ ft->ttype == tcon &&
+ ft->tclass == tclass &&
+ !strcmp(ft->name, qstr->name)) {
+ newcontext->type = ft->otype;
+ return;
+ }
+ }
+}
+
static int security_compute_sid(u32 ssid,
u32 tsid,
u16 orig_tclass,
u32 specified,
+ const struct qstr *qstr,
u32 *out_sid,
bool kern)
{
@@ -1442,6 +1459,11 @@ static int security_compute_sid(u32 ssid,
newcontext.type = avdatum->data;
}
+ /* if we have a qstr this is a file trans check so check those rules */
+ if (qstr)
+ filename_compute_type(&policydb, &newcontext, scontext->type,
+ tcontext->type, tclass, qstr);
+
/* Check for class-specific changes. */
if (tclass == policydb.process_class) {
if (specified & AVTAB_TRANSITION) {
@@ -1495,22 +1517,17 @@ out:
* if insufficient memory is available, or %0 if the new SID was
* computed successfully.
*/
-int security_transition_sid(u32 ssid,
- u32 tsid,
- u16 tclass,
- u32 *out_sid)
+int security_transition_sid(u32 ssid, u32 tsid, u16 tclass,
+ const struct qstr *qstr, u32 *out_sid)
{
return security_compute_sid(ssid, tsid, tclass, AVTAB_TRANSITION,
- out_sid, true);
+ qstr, out_sid, true);
}
-int security_transition_sid_user(u32 ssid,
- u32 tsid,
- u16 tclass,
- u32 *out_sid)
+int security_transition_sid_user(u32 ssid, u32 tsid, u16 tclass, u32 *out_sid)
{
return security_compute_sid(ssid, tsid, tclass, AVTAB_TRANSITION,
- out_sid, false);
+ NULL, out_sid, false);
}
/**
@@ -1531,8 +1548,8 @@ int security_member_sid(u32 ssid,
u16 tclass,
u32 *out_sid)
{
- return security_compute_sid(ssid, tsid, tclass, AVTAB_MEMBER, out_sid,
- false);
+ return security_compute_sid(ssid, tsid, tclass, AVTAB_MEMBER, NULL,
+ out_sid, false);
}
/**
@@ -1553,8 +1570,8 @@ int security_change_sid(u32 ssid,
u16 tclass,
u32 *out_sid)
{
- return security_compute_sid(ssid, tsid, tclass, AVTAB_CHANGE, out_sid,
- false);
+ return security_compute_sid(ssid, tsid, tclass, AVTAB_CHANGE, NULL,
+ out_sid, false);
}
/* Clone the SID into the new SID table. */
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
index fff78d3b51a2..af2442d419dd 100644
--- a/security/selinux/xfrm.c
+++ b/security/selinux/xfrm.c
@@ -112,7 +112,7 @@ int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
*/
int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *xp,
- struct flowi *fl)
+ const struct flowi *fl)
{
u32 state_sid;
int rc;
@@ -208,7 +208,7 @@ static int selinux_xfrm_sec_ctx_alloc(struct xfrm_sec_ctx **ctxp,
if (!uctx)
goto not_from_user;
- if (uctx->ctx_doi != XFRM_SC_ALG_SELINUX)
+ if (uctx->ctx_alg != XFRM_SC_ALG_SELINUX)
return -EINVAL;
str_len = uctx->ctx_len;
diff --git a/security/smack/smack.h b/security/smack/smack.h
index 129c4eb8ffb1..b449cfdad21c 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -52,13 +52,16 @@ struct socket_smack {
struct inode_smack {
char *smk_inode; /* label of the fso */
char *smk_task; /* label of the task */
+ char *smk_mmap; /* label of the mmap domain */
struct mutex smk_lock; /* initialization lock */
int smk_flags; /* smack inode flags */
};
struct task_smack {
- char *smk_task; /* label used for access control */
- char *smk_forked; /* label when forked */
+ char *smk_task; /* label for access control */
+ char *smk_forked; /* label when forked */
+ struct list_head smk_rules; /* per task access rules */
+ struct mutex smk_rules_lock; /* lock for the rules */
};
#define SMK_INODE_INSTANT 0x01 /* inode is instantiated */
@@ -152,12 +155,6 @@ struct smack_known {
#define SMACK_MAGIC 0x43415d53 /* "SMAC" */
/*
- * A limit on the number of entries in the lists
- * makes some of the list administration easier.
- */
-#define SMACK_LIST_MAX 10000
-
-/*
* CIPSO defaults.
*/
#define SMACK_CIPSO_DOI_DEFAULT 3 /* Historical */
@@ -174,9 +171,7 @@ struct smack_known {
/*
* Just to make the common cases easier to deal with
*/
-#define MAY_ANY (MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC)
#define MAY_ANYREAD (MAY_READ | MAY_EXEC)
-#define MAY_ANYWRITE (MAY_WRITE | MAY_APPEND)
#define MAY_READWRITE (MAY_READ | MAY_WRITE)
#define MAY_NOT 0
@@ -202,7 +197,7 @@ struct inode_smack *new_inode_smack(char *);
/*
* These functions are in smack_access.c
*/
-int smk_access_entry(char *, char *);
+int smk_access_entry(char *, char *, struct list_head *);
int smk_access(char *, char *, int, struct smk_audit_info *);
int smk_curacc(char *, u32, struct smk_audit_info *);
int smack_to_cipso(const char *, struct smack_cipso *);
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index 7ba8478f599e..86453db4333d 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -70,10 +70,11 @@ int log_policy = SMACK_AUDIT_DENIED;
* smk_access_entry - look up matching access rule
* @subject_label: a pointer to the subject's Smack label
* @object_label: a pointer to the object's Smack label
+ * @rule_list: the list of rules to search
*
* This function looks up the subject/object pair in the
- * access rule list and returns pointer to the matching rule if found,
- * NULL otherwise.
+ * access rule list and returns the access mode. If no
+ * entry is found returns -ENOENT.
*
* NOTE:
* Even though Smack labels are usually shared on smack_list
@@ -85,13 +86,13 @@ int log_policy = SMACK_AUDIT_DENIED;
* will be on the list, so checking the pointers may be a worthwhile
* optimization.
*/
-int smk_access_entry(char *subject_label, char *object_label)
+int smk_access_entry(char *subject_label, char *object_label,
+ struct list_head *rule_list)
{
- u32 may = MAY_NOT;
+ int may = -ENOENT;
struct smack_rule *srp;
- rcu_read_lock();
- list_for_each_entry_rcu(srp, &smack_rule_list, list) {
+ list_for_each_entry_rcu(srp, rule_list, list) {
if (srp->smk_subject == subject_label ||
strcmp(srp->smk_subject, subject_label) == 0) {
if (srp->smk_object == object_label ||
@@ -101,7 +102,6 @@ int smk_access_entry(char *subject_label, char *object_label)
}
}
}
- rcu_read_unlock();
return may;
}
@@ -129,7 +129,7 @@ int smk_access_entry(char *subject_label, char *object_label)
int smk_access(char *subject_label, char *object_label, int request,
struct smk_audit_info *a)
{
- u32 may = MAY_NOT;
+ int may = MAY_NOT;
int rc = 0;
/*
@@ -181,13 +181,14 @@ int smk_access(char *subject_label, char *object_label, int request,
* Beyond here an explicit relationship is required.
* If the requested access is contained in the available
* access (e.g. read is included in readwrite) it's
- * good.
- */
- may = smk_access_entry(subject_label, object_label);
- /*
- * This is a bit map operation.
+ * good. A negative response from smk_access_entry()
+ * indicates there is no entry for this pair.
*/
- if ((request & may) == request)
+ rcu_read_lock();
+ may = smk_access_entry(subject_label, object_label, &smack_rule_list);
+ rcu_read_unlock();
+
+ if (may > 0 && (request & may) == request)
goto out_audit;
rc = -EACCES;
@@ -212,12 +213,27 @@ out_audit:
*/
int smk_curacc(char *obj_label, u32 mode, struct smk_audit_info *a)
{
+ struct task_smack *tsp = current_security();
+ char *sp = smk_of_task(tsp);
+ int may;
int rc;
- char *sp = smk_of_current();
+ /*
+ * Check the global rule list
+ */
rc = smk_access(sp, obj_label, mode, NULL);
- if (rc == 0)
- goto out_audit;
+ if (rc == 0) {
+ /*
+ * If there is an entry in the task's rule list
+ * it can further restrict access.
+ */
+ may = smk_access_entry(sp, obj_label, &tsp->smk_rules);
+ if (may < 0)
+ goto out_audit;
+ if ((mode & may) == mode)
+ goto out_audit;
+ rc = -EACCES;
+ }
/*
* Return if a specific label has been designated as the
@@ -228,7 +244,7 @@ int smk_curacc(char *obj_label, u32 mode, struct smk_audit_info *a)
goto out_audit;
if (capable(CAP_MAC_OVERRIDE))
- return 0;
+ rc = 0;
out_audit:
#ifdef CONFIG_AUDIT
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 533bf3255d7f..23c7a6d0c80c 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -33,6 +33,7 @@
#include <net/cipso_ipv4.h>
#include <linux/audit.h>
#include <linux/magic.h>
+#include <linux/dcache.h>
#include "smack.h"
#define task_security(task) (task_cred_xxx((task), security))
@@ -84,6 +85,56 @@ struct inode_smack *new_inode_smack(char *smack)
return isp;
}
+/**
+ * new_task_smack - allocate a task security blob
+ * @smack: a pointer to the Smack label to use in the blob
+ *
+ * Returns the new blob or NULL if there's no memory available
+ */
+static struct task_smack *new_task_smack(char *task, char *forked, gfp_t gfp)
+{
+ struct task_smack *tsp;
+
+ tsp = kzalloc(sizeof(struct task_smack), gfp);
+ if (tsp == NULL)
+ return NULL;
+
+ tsp->smk_task = task;
+ tsp->smk_forked = forked;
+ INIT_LIST_HEAD(&tsp->smk_rules);
+ mutex_init(&tsp->smk_rules_lock);
+
+ return tsp;
+}
+
+/**
+ * smk_copy_rules - copy a rule set
+ * @nhead - new rules header pointer
+ * @ohead - old rules header pointer
+ *
+ * Returns 0 on success, -ENOMEM on error
+ */
+static int smk_copy_rules(struct list_head *nhead, struct list_head *ohead,
+ gfp_t gfp)
+{
+ struct smack_rule *nrp;
+ struct smack_rule *orp;
+ int rc = 0;
+
+ INIT_LIST_HEAD(nhead);
+
+ list_for_each_entry_rcu(orp, ohead, list) {
+ nrp = kzalloc(sizeof(struct smack_rule), gfp);
+ if (nrp == NULL) {
+ rc = -ENOMEM;
+ break;
+ }
+ *nrp = *orp;
+ list_add_rcu(&nrp->list, nhead);
+ }
+ return rc;
+}
+
/*
* LSM hooks.
* We he, that is fun!
@@ -102,23 +153,17 @@ static int smack_ptrace_access_check(struct task_struct *ctp, unsigned int mode)
{
int rc;
struct smk_audit_info ad;
- char *sp, *tsp;
+ char *tsp;
rc = cap_ptrace_access_check(ctp, mode);
if (rc != 0)
return rc;
- sp = smk_of_current();
tsp = smk_of_task(task_security(ctp));
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
smk_ad_setfield_u_tsk(&ad, ctp);
- /* we won't log here, because rc can be overriden */
- rc = smk_access(sp, tsp, MAY_READWRITE, NULL);
- if (rc != 0 && capable(CAP_MAC_OVERRIDE))
- rc = 0;
-
- smack_log(sp, tsp, MAY_READWRITE, rc, &ad);
+ rc = smk_curacc(tsp, MAY_READWRITE, &ad);
return rc;
}
@@ -134,23 +179,17 @@ static int smack_ptrace_traceme(struct task_struct *ptp)
{
int rc;
struct smk_audit_info ad;
- char *sp, *tsp;
+ char *tsp;
rc = cap_ptrace_traceme(ptp);
if (rc != 0)
return rc;
+ tsp = smk_of_task(task_security(ptp));
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
smk_ad_setfield_u_tsk(&ad, ptp);
- sp = smk_of_current();
- tsp = smk_of_task(task_security(ptp));
- /* we won't log here, because rc can be overriden */
- rc = smk_access(tsp, sp, MAY_READWRITE, NULL);
- if (rc != 0 && has_capability(ptp, CAP_MAC_OVERRIDE))
- rc = 0;
-
- smack_log(tsp, sp, MAY_READWRITE, rc, &ad);
+ rc = smk_curacc(tsp, MAY_READWRITE, &ad);
return rc;
}
@@ -463,6 +502,7 @@ static void smack_inode_free_security(struct inode *inode)
* smack_inode_init_security - copy out the smack from an inode
* @inode: the inode
* @dir: unused
+ * @qstr: unused
* @name: where to put the attribute name
* @value: where to put the attribute value
* @len: where to put the length of the attribute
@@ -470,11 +510,12 @@ static void smack_inode_free_security(struct inode *inode)
* Returns 0 if it all works out, -ENOMEM if there's no memory
*/
static int smack_inode_init_security(struct inode *inode, struct inode *dir,
- char **name, void **value, size_t *len)
+ const struct qstr *qstr, char **name,
+ void **value, size_t *len)
{
char *isp = smk_of_inode(inode);
char *dsp = smk_of_inode(dir);
- u32 may;
+ int may;
if (name) {
*name = kstrdup(XATTR_SMACK_SUFFIX, GFP_KERNEL);
@@ -483,14 +524,17 @@ static int smack_inode_init_security(struct inode *inode, struct inode *dir,
}
if (value) {
- may = smk_access_entry(smk_of_current(), dsp);
+ rcu_read_lock();
+ may = smk_access_entry(smk_of_current(), dsp, &smack_rule_list);
+ rcu_read_unlock();
/*
* If the access rule allows transmutation and
* the directory requests transmutation then
* by all means transmute.
*/
- if (((may & MAY_TRANSMUTE) != 0) && smk_inode_transmutable(dir))
+ if (may > 0 && ((may & MAY_TRANSMUTE) != 0) &&
+ smk_inode_transmutable(dir))
isp = dsp;
*value = kstrdup(isp, GFP_KERNEL);
@@ -716,7 +760,8 @@ static int smack_inode_setxattr(struct dentry *dentry, const char *name,
if (strcmp(name, XATTR_NAME_SMACK) == 0 ||
strcmp(name, XATTR_NAME_SMACKIPIN) == 0 ||
strcmp(name, XATTR_NAME_SMACKIPOUT) == 0 ||
- strcmp(name, XATTR_NAME_SMACKEXEC) == 0) {
+ strcmp(name, XATTR_NAME_SMACKEXEC) == 0 ||
+ strcmp(name, XATTR_NAME_SMACKMMAP) == 0) {
if (!capable(CAP_MAC_ADMIN))
rc = -EPERM;
/*
@@ -773,6 +818,12 @@ static void smack_inode_post_setxattr(struct dentry *dentry, const char *name,
isp->smk_task = nsp;
else
isp->smk_task = smack_known_invalid.smk_known;
+ } else if (strcmp(name, XATTR_NAME_SMACKMMAP) == 0) {
+ nsp = smk_import(value, size);
+ if (nsp != NULL)
+ isp->smk_mmap = nsp;
+ else
+ isp->smk_mmap = smack_known_invalid.smk_known;
} else if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0)
isp->smk_flags |= SMK_INODE_TRANSMUTE;
@@ -815,7 +866,8 @@ static int smack_inode_removexattr(struct dentry *dentry, const char *name)
strcmp(name, XATTR_NAME_SMACKIPIN) == 0 ||
strcmp(name, XATTR_NAME_SMACKIPOUT) == 0 ||
strcmp(name, XATTR_NAME_SMACKEXEC) == 0 ||
- strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0) {
+ strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0 ||
+ strcmp(name, XATTR_NAME_SMACKMMAP)) {
if (!capable(CAP_MAC_ADMIN))
rc = -EPERM;
} else
@@ -829,6 +881,7 @@ static int smack_inode_removexattr(struct dentry *dentry, const char *name)
if (rc == 0) {
isp = dentry->d_inode->i_security;
isp->smk_task = NULL;
+ isp->smk_mmap = NULL;
}
return rc;
@@ -1060,6 +1113,126 @@ static int smack_file_fcntl(struct file *file, unsigned int cmd,
}
/**
+ * smack_file_mmap :
+ * Check permissions for a mmap operation. The @file may be NULL, e.g.
+ * if mapping anonymous memory.
+ * @file contains the file structure for file to map (may be NULL).
+ * @reqprot contains the protection requested by the application.
+ * @prot contains the protection that will be applied by the kernel.
+ * @flags contains the operational flags.
+ * Return 0 if permission is granted.
+ */
+static int smack_file_mmap(struct file *file,
+ unsigned long reqprot, unsigned long prot,
+ unsigned long flags, unsigned long addr,
+ unsigned long addr_only)
+{
+ struct smack_rule *srp;
+ struct task_smack *tsp;
+ char *sp;
+ char *msmack;
+ char *osmack;
+ struct inode_smack *isp;
+ struct dentry *dp;
+ int may;
+ int mmay;
+ int tmay;
+ int rc;
+
+ /* do DAC check on address space usage */
+ rc = cap_file_mmap(file, reqprot, prot, flags, addr, addr_only);
+ if (rc || addr_only)
+ return rc;
+
+ if (file == NULL || file->f_dentry == NULL)
+ return 0;
+
+ dp = file->f_dentry;
+
+ if (dp->d_inode == NULL)
+ return 0;
+
+ isp = dp->d_inode->i_security;
+ if (isp->smk_mmap == NULL)
+ return 0;
+ msmack = isp->smk_mmap;
+
+ tsp = current_security();
+ sp = smk_of_current();
+ rc = 0;
+
+ rcu_read_lock();
+ /*
+ * For each Smack rule associated with the subject
+ * label verify that the SMACK64MMAP also has access
+ * to that rule's object label.
+ *
+ * Because neither of the labels comes
+ * from the networking code it is sufficient
+ * to compare pointers.
+ */
+ list_for_each_entry_rcu(srp, &smack_rule_list, list) {
+ if (srp->smk_subject != sp)
+ continue;
+
+ osmack = srp->smk_object;
+ /*
+ * Matching labels always allows access.
+ */
+ if (msmack == osmack)
+ continue;
+ /*
+ * If there is a matching local rule take
+ * that into account as well.
+ */
+ may = smk_access_entry(srp->smk_subject, osmack,
+ &tsp->smk_rules);
+ if (may == -ENOENT)
+ may = srp->smk_access;
+ else
+ may &= srp->smk_access;
+ /*
+ * If may is zero the SMACK64MMAP subject can't
+ * possibly have less access.
+ */
+ if (may == 0)
+ continue;
+
+ /*
+ * Fetch the global list entry.
+ * If there isn't one a SMACK64MMAP subject
+ * can't have as much access as current.
+ */
+ mmay = smk_access_entry(msmack, osmack, &smack_rule_list);
+ if (mmay == -ENOENT) {
+ rc = -EACCES;
+ break;
+ }
+ /*
+ * If there is a local entry it modifies the
+ * potential access, too.
+ */
+ tmay = smk_access_entry(msmack, osmack, &tsp->smk_rules);
+ if (tmay != -ENOENT)
+ mmay &= tmay;
+
+ /*
+ * If there is any access available to current that is
+ * not available to a SMACK64MMAP subject
+ * deny access.
+ */
+ if ((may | mmay) != mmay) {
+ rc = -EACCES;
+ break;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return rc;
+}
+
+/**
* smack_file_set_fowner - set the file security blob value
* @file: object in question
*
@@ -1095,6 +1268,7 @@ static int smack_file_send_sigiotask(struct task_struct *tsk,
* struct fown_struct is never outside the context of a struct file
*/
file = container_of(fown, struct file, f_owner);
+
/* we don't log here as rc can be overriden */
rc = smk_access(file->f_security, tsp, MAY_WRITE, NULL);
if (rc != 0 && has_capability(tsk, CAP_MAC_OVERRIDE))
@@ -1145,9 +1319,14 @@ static int smack_file_receive(struct file *file)
*/
static int smack_cred_alloc_blank(struct cred *cred, gfp_t gfp)
{
- cred->security = kzalloc(sizeof(struct task_smack), gfp);
- if (cred->security == NULL)
+ struct task_smack *tsp;
+
+ tsp = new_task_smack(NULL, NULL, gfp);
+ if (tsp == NULL)
return -ENOMEM;
+
+ cred->security = tsp;
+
return 0;
}
@@ -1156,13 +1335,24 @@ static int smack_cred_alloc_blank(struct cred *cred, gfp_t gfp)
* smack_cred_free - "free" task-level security credentials
* @cred: the credentials in question
*
- * Smack isn't using copies of blobs. Everyone
- * points to an immutable list. The blobs never go away.
- * There is no leak here.
*/
static void smack_cred_free(struct cred *cred)
{
- kfree(cred->security);
+ struct task_smack *tsp = cred->security;
+ struct smack_rule *rp;
+ struct list_head *l;
+ struct list_head *n;
+
+ if (tsp == NULL)
+ return;
+ cred->security = NULL;
+
+ list_for_each_safe(l, n, &tsp->smk_rules) {
+ rp = list_entry(l, struct smack_rule, list);
+ list_del(&rp->list);
+ kfree(rp);
+ }
+ kfree(tsp);
}
/**
@@ -1178,13 +1368,16 @@ static int smack_cred_prepare(struct cred *new, const struct cred *old,
{
struct task_smack *old_tsp = old->security;
struct task_smack *new_tsp;
+ int rc;
- new_tsp = kzalloc(sizeof(struct task_smack), gfp);
+ new_tsp = new_task_smack(old_tsp->smk_task, old_tsp->smk_task, gfp);
if (new_tsp == NULL)
return -ENOMEM;
- new_tsp->smk_task = old_tsp->smk_task;
- new_tsp->smk_forked = old_tsp->smk_task;
+ rc = smk_copy_rules(&new_tsp->smk_rules, &old_tsp->smk_rules, gfp);
+ if (rc != 0)
+ return rc;
+
new->security = new_tsp;
return 0;
}
@@ -1203,6 +1396,11 @@ static void smack_cred_transfer(struct cred *new, const struct cred *old)
new_tsp->smk_task = old_tsp->smk_task;
new_tsp->smk_forked = old_tsp->smk_task;
+ mutex_init(&new_tsp->smk_rules_lock);
+ INIT_LIST_HEAD(&new_tsp->smk_rules);
+
+
+ /* cbs copy rule list */
}
/**
@@ -2419,6 +2617,7 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
}
}
isp->smk_task = smk_fetch(XATTR_NAME_SMACKEXEC, inode, dp);
+ isp->smk_mmap = smk_fetch(XATTR_NAME_SMACKMMAP, inode, dp);
dput(dp);
break;
@@ -2478,6 +2677,7 @@ static int smack_getprocattr(struct task_struct *p, char *name, char **value)
static int smack_setprocattr(struct task_struct *p, char *name,
void *value, size_t size)
{
+ int rc;
struct task_smack *tsp;
struct task_smack *oldtsp;
struct cred *new;
@@ -2513,13 +2713,16 @@ static int smack_setprocattr(struct task_struct *p, char *name,
new = prepare_creds();
if (new == NULL)
return -ENOMEM;
- tsp = kzalloc(sizeof(struct task_smack), GFP_KERNEL);
+
+ tsp = new_task_smack(newsmack, oldtsp->smk_forked, GFP_KERNEL);
if (tsp == NULL) {
kfree(new);
return -ENOMEM;
}
- tsp->smk_task = newsmack;
- tsp->smk_forked = oldtsp->smk_forked;
+ rc = smk_copy_rules(&tsp->smk_rules, &oldtsp->smk_rules, GFP_KERNEL);
+ if (rc != 0)
+ return rc;
+
new->security = tsp;
commit_creds(new);
return size;
@@ -3221,6 +3424,7 @@ struct security_operations smack_ops = {
.file_ioctl = smack_file_ioctl,
.file_lock = smack_file_lock,
.file_fcntl = smack_file_fcntl,
+ .file_mmap = smack_file_mmap,
.file_set_fowner = smack_file_set_fowner,
.file_send_sigiotask = smack_file_send_sigiotask,
.file_receive = smack_file_receive,
@@ -3334,23 +3538,20 @@ static __init int smack_init(void)
struct cred *cred;
struct task_smack *tsp;
- tsp = kzalloc(sizeof(struct task_smack), GFP_KERNEL);
+ if (!security_module_enable(&smack_ops))
+ return 0;
+
+ tsp = new_task_smack(smack_known_floor.smk_known,
+ smack_known_floor.smk_known, GFP_KERNEL);
if (tsp == NULL)
return -ENOMEM;
- if (!security_module_enable(&smack_ops)) {
- kfree(tsp);
- return 0;
- }
-
printk(KERN_INFO "Smack: Initializing.\n");
/*
* Set the security state for the initial task.
*/
cred = (struct cred *) current->cred;
- tsp->smk_forked = smack_known_floor.smk_known;
- tsp->smk_task = smack_known_floor.smk_known;
cred->security = tsp;
/* initialize the smack_know_list */
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index 362d5eda948b..90d1bbaaa6f3 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -43,6 +43,7 @@ enum smk_inos {
SMK_NETLBLADDR = 8, /* single label hosts */
SMK_ONLYCAP = 9, /* the only "capable" label */
SMK_LOGGING = 10, /* logging */
+ SMK_LOAD_SELF = 11, /* task specific rules */
};
/*
@@ -135,104 +136,30 @@ static void smk_netlabel_audit_set(struct netlbl_audit *nap)
#define SMK_NETLBLADDRMIN 9
#define SMK_NETLBLADDRMAX 42
-/*
- * Seq_file read operations for /smack/load
- */
-
-static void *load_seq_start(struct seq_file *s, loff_t *pos)
-{
- if (*pos == SEQ_READ_FINISHED)
- return NULL;
- if (list_empty(&smack_rule_list))
- return NULL;
- return smack_rule_list.next;
-}
-
-static void *load_seq_next(struct seq_file *s, void *v, loff_t *pos)
-{
- struct list_head *list = v;
-
- if (list_is_last(list, &smack_rule_list)) {
- *pos = SEQ_READ_FINISHED;
- return NULL;
- }
- return list->next;
-}
-
-static int load_seq_show(struct seq_file *s, void *v)
-{
- struct list_head *list = v;
- struct smack_rule *srp =
- list_entry(list, struct smack_rule, list);
-
- seq_printf(s, "%s %s", (char *)srp->smk_subject,
- (char *)srp->smk_object);
-
- seq_putc(s, ' ');
-
- if (srp->smk_access & MAY_READ)
- seq_putc(s, 'r');
- if (srp->smk_access & MAY_WRITE)
- seq_putc(s, 'w');
- if (srp->smk_access & MAY_EXEC)
- seq_putc(s, 'x');
- if (srp->smk_access & MAY_APPEND)
- seq_putc(s, 'a');
- if (srp->smk_access & MAY_TRANSMUTE)
- seq_putc(s, 't');
- if (srp->smk_access == 0)
- seq_putc(s, '-');
-
- seq_putc(s, '\n');
-
- return 0;
-}
-
-static void load_seq_stop(struct seq_file *s, void *v)
-{
- /* No-op */
-}
-
-static const struct seq_operations load_seq_ops = {
- .start = load_seq_start,
- .next = load_seq_next,
- .show = load_seq_show,
- .stop = load_seq_stop,
-};
-
-/**
- * smk_open_load - open() for /smack/load
- * @inode: inode structure representing file
- * @file: "load" file pointer
- *
- * For reading, use load_seq_* seq_file reading operations.
- */
-static int smk_open_load(struct inode *inode, struct file *file)
-{
- return seq_open(file, &load_seq_ops);
-}
-
/**
* smk_set_access - add a rule to the rule list
* @srp: the new rule to add
+ * @rule_list: the list of rules
+ * @rule_lock: the rule list lock
*
* Looks through the current subject/object/access list for
* the subject/object pair and replaces the access that was
* there. If the pair isn't found add it with the specified
* access.
*
+ * Returns 1 if a rule was found to exist already, 0 if it is new
* Returns 0 if nothing goes wrong or -ENOMEM if it fails
* during the allocation of the new pair to add.
*/
-static int smk_set_access(struct smack_rule *srp)
+static int smk_set_access(struct smack_rule *srp, struct list_head *rule_list,
+ struct mutex *rule_lock)
{
struct smack_rule *sp;
- int ret = 0;
- int found;
- mutex_lock(&smack_list_lock);
+ int found = 0;
- found = 0;
- list_for_each_entry_rcu(sp, &smack_rule_list, list) {
+ mutex_lock(rule_lock);
+
+ list_for_each_entry_rcu(sp, rule_list, list) {
if (sp->smk_subject == srp->smk_subject &&
sp->smk_object == srp->smk_object) {
found = 1;
@@ -241,19 +168,21 @@ static int smk_set_access(struct smack_rule *srp)
}
}
if (found == 0)
- list_add_rcu(&srp->list, &smack_rule_list);
+ list_add_rcu(&srp->list, rule_list);
- mutex_unlock(&smack_list_lock);
+ mutex_unlock(rule_lock);
- return ret;
+ return found;
}
/**
- * smk_write_load - write() for /smack/load
+ * smk_write_load_list - write() for any /smack/load
* @file: file pointer, not actually used
* @buf: where to get the data from
* @count: bytes sent
* @ppos: where to start - must be 0
+ * @rule_list: the list of rules to write to
+ * @rule_lock: lock for the rule list
*
* Get one smack access rule from above.
* The format is exactly:
@@ -263,21 +192,19 @@ static int smk_set_access(struct smack_rule *srp)
*
* writes must be SMK_LABELLEN+SMK_LABELLEN+SMK_ACCESSLEN bytes.
*/
-static ssize_t smk_write_load(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t smk_write_load_list(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos,
+ struct list_head *rule_list,
+ struct mutex *rule_lock)
{
struct smack_rule *rule;
char *data;
int rc = -EINVAL;
/*
- * Must have privilege.
* No partial writes.
* Enough data must be present.
*/
- if (!capable(CAP_MAC_ADMIN))
- return -EPERM;
-
if (*ppos != 0)
return -EINVAL;
/*
@@ -372,11 +299,13 @@ static ssize_t smk_write_load(struct file *file, const char __user *buf,
goto out_free_rule;
}
- rc = smk_set_access(rule);
-
- if (!rc)
- rc = count;
- goto out;
+ rc = count;
+ /*
+ * smk_set_access returns true if there was already a rule
+ * for the subject/object pair, and false if it was new.
+ */
+ if (!smk_set_access(rule, rule_list, rule_lock))
+ goto out;
out_free_rule:
kfree(rule);
@@ -385,6 +314,108 @@ out:
return rc;
}
+
+/*
+ * Seq_file read operations for /smack/load
+ */
+
+static void *load_seq_start(struct seq_file *s, loff_t *pos)
+{
+ if (*pos == SEQ_READ_FINISHED)
+ return NULL;
+ if (list_empty(&smack_rule_list))
+ return NULL;
+ return smack_rule_list.next;
+}
+
+static void *load_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct list_head *list = v;
+
+ if (list_is_last(list, &smack_rule_list)) {
+ *pos = SEQ_READ_FINISHED;
+ return NULL;
+ }
+ return list->next;
+}
+
+static int load_seq_show(struct seq_file *s, void *v)
+{
+ struct list_head *list = v;
+ struct smack_rule *srp =
+ list_entry(list, struct smack_rule, list);
+
+ seq_printf(s, "%s %s", (char *)srp->smk_subject,
+ (char *)srp->smk_object);
+
+ seq_putc(s, ' ');
+
+ if (srp->smk_access & MAY_READ)
+ seq_putc(s, 'r');
+ if (srp->smk_access & MAY_WRITE)
+ seq_putc(s, 'w');
+ if (srp->smk_access & MAY_EXEC)
+ seq_putc(s, 'x');
+ if (srp->smk_access & MAY_APPEND)
+ seq_putc(s, 'a');
+ if (srp->smk_access & MAY_TRANSMUTE)
+ seq_putc(s, 't');
+ if (srp->smk_access == 0)
+ seq_putc(s, '-');
+
+ seq_putc(s, '\n');
+
+ return 0;
+}
+
+static void load_seq_stop(struct seq_file *s, void *v)
+{
+ /* No-op */
+}
+
+static const struct seq_operations load_seq_ops = {
+ .start = load_seq_start,
+ .next = load_seq_next,
+ .show = load_seq_show,
+ .stop = load_seq_stop,
+};
+
+/**
+ * smk_open_load - open() for /smack/load
+ * @inode: inode structure representing file
+ * @file: "load" file pointer
+ *
+ * For reading, use load_seq_* seq_file reading operations.
+ */
+static int smk_open_load(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &load_seq_ops);
+}
+
+/**
+ * smk_write_load - write() for /smack/load
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ *
+ */
+static ssize_t smk_write_load(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+
+ /*
+ * Must have privilege.
+ * No partial writes.
+ * Enough data must be present.
+ */
+ if (!capable(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ return smk_write_load_list(file, buf, count, ppos, &smack_rule_list,
+ &smack_list_lock);
+}
+
static const struct file_operations smk_load_ops = {
.open = smk_open_load,
.read = seq_read,
@@ -1288,6 +1319,112 @@ static const struct file_operations smk_logging_ops = {
.write = smk_write_logging,
.llseek = default_llseek,
};
+
+/*
+ * Seq_file read operations for /smack/load-self
+ */
+
+static void *load_self_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct task_smack *tsp = current_security();
+
+ if (*pos == SEQ_READ_FINISHED)
+ return NULL;
+ if (list_empty(&tsp->smk_rules))
+ return NULL;
+ return tsp->smk_rules.next;
+}
+
+static void *load_self_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct task_smack *tsp = current_security();
+ struct list_head *list = v;
+
+ if (list_is_last(list, &tsp->smk_rules)) {
+ *pos = SEQ_READ_FINISHED;
+ return NULL;
+ }
+ return list->next;
+}
+
+static int load_self_seq_show(struct seq_file *s, void *v)
+{
+ struct list_head *list = v;
+ struct smack_rule *srp =
+ list_entry(list, struct smack_rule, list);
+
+ seq_printf(s, "%s %s", (char *)srp->smk_subject,
+ (char *)srp->smk_object);
+
+ seq_putc(s, ' ');
+
+ if (srp->smk_access & MAY_READ)
+ seq_putc(s, 'r');
+ if (srp->smk_access & MAY_WRITE)
+ seq_putc(s, 'w');
+ if (srp->smk_access & MAY_EXEC)
+ seq_putc(s, 'x');
+ if (srp->smk_access & MAY_APPEND)
+ seq_putc(s, 'a');
+ if (srp->smk_access & MAY_TRANSMUTE)
+ seq_putc(s, 't');
+ if (srp->smk_access == 0)
+ seq_putc(s, '-');
+
+ seq_putc(s, '\n');
+
+ return 0;
+}
+
+static void load_self_seq_stop(struct seq_file *s, void *v)
+{
+ /* No-op */
+}
+
+static const struct seq_operations load_self_seq_ops = {
+ .start = load_self_seq_start,
+ .next = load_self_seq_next,
+ .show = load_self_seq_show,
+ .stop = load_self_seq_stop,
+};
+
+
+/**
+ * smk_open_load_self - open() for /smack/load-self
+ * @inode: inode structure representing file
+ * @file: "load" file pointer
+ *
+ * For reading, use load_seq_* seq_file reading operations.
+ */
+static int smk_open_load_self(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &load_self_seq_ops);
+}
+
+/**
+ * smk_write_load_self - write() for /smack/load-self
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ *
+ */
+static ssize_t smk_write_load_self(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct task_smack *tsp = current_security();
+
+ return smk_write_load_list(file, buf, count, ppos, &tsp->smk_rules,
+ &tsp->smk_rules_lock);
+}
+
+static const struct file_operations smk_load_self_ops = {
+ .open = smk_open_load_self,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = smk_write_load_self,
+ .release = seq_release,
+};
/**
* smk_fill_super - fill the /smackfs superblock
* @sb: the empty superblock
@@ -1304,23 +1441,26 @@ static int smk_fill_super(struct super_block *sb, void *data, int silent)
struct inode *root_inode;
static struct tree_descr smack_files[] = {
- [SMK_LOAD] =
- {"load", &smk_load_ops, S_IRUGO|S_IWUSR},
- [SMK_CIPSO] =
- {"cipso", &smk_cipso_ops, S_IRUGO|S_IWUSR},
- [SMK_DOI] =
- {"doi", &smk_doi_ops, S_IRUGO|S_IWUSR},
- [SMK_DIRECT] =
- {"direct", &smk_direct_ops, S_IRUGO|S_IWUSR},
- [SMK_AMBIENT] =
- {"ambient", &smk_ambient_ops, S_IRUGO|S_IWUSR},
- [SMK_NETLBLADDR] =
- {"netlabel", &smk_netlbladdr_ops, S_IRUGO|S_IWUSR},
- [SMK_ONLYCAP] =
- {"onlycap", &smk_onlycap_ops, S_IRUGO|S_IWUSR},
- [SMK_LOGGING] =
- {"logging", &smk_logging_ops, S_IRUGO|S_IWUSR},
- /* last one */ {""}
+ [SMK_LOAD] = {
+ "load", &smk_load_ops, S_IRUGO|S_IWUSR},
+ [SMK_CIPSO] = {
+ "cipso", &smk_cipso_ops, S_IRUGO|S_IWUSR},
+ [SMK_DOI] = {
+ "doi", &smk_doi_ops, S_IRUGO|S_IWUSR},
+ [SMK_DIRECT] = {
+ "direct", &smk_direct_ops, S_IRUGO|S_IWUSR},
+ [SMK_AMBIENT] = {
+ "ambient", &smk_ambient_ops, S_IRUGO|S_IWUSR},
+ [SMK_NETLBLADDR] = {
+ "netlabel", &smk_netlbladdr_ops, S_IRUGO|S_IWUSR},
+ [SMK_ONLYCAP] = {
+ "onlycap", &smk_onlycap_ops, S_IRUGO|S_IWUSR},
+ [SMK_LOGGING] = {
+ "logging", &smk_logging_ops, S_IRUGO|S_IWUSR},
+ [SMK_LOAD_SELF] = {
+ "load-self", &smk_load_self_ops, S_IRUGO|S_IWUGO},
+ /* last one */
+ {""}
};
rc = simple_fill_super(sb, SMACK_MAGIC, smack_files);
diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c
index 9d32f182301e..cb09f1fce910 100644
--- a/security/tomoyo/file.c
+++ b/security/tomoyo/file.c
@@ -927,7 +927,7 @@ int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
struct path *path, const int flag)
{
const u8 acc_mode = ACC_MODE(flag);
- int error = -ENOMEM;
+ int error = 0;
struct tomoyo_path_info buf;
struct tomoyo_request_info r;
int idx;
@@ -938,9 +938,6 @@ int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
buf.name = NULL;
r.mode = TOMOYO_CONFIG_DISABLED;
idx = tomoyo_read_lock();
- if (!tomoyo_get_realpath(&buf, path))
- goto out;
- error = 0;
/*
* If the filename is specified by "deny_rewrite" keyword,
* we need to check "allow_rewrite" permission when the filename is not
diff --git a/sound/arm/aaci.c b/sound/arm/aaci.c
index 7c1fc64cb53d..d0cead38d5fb 100644
--- a/sound/arm/aaci.c
+++ b/sound/arm/aaci.c
@@ -210,6 +210,7 @@ static void aaci_fifo_irq(struct aaci *aaci, int channel, u32 mask)
if (mask & ISR_RXINTR) {
struct aaci_runtime *aacirun = &aaci->capture;
+ bool period_elapsed = false;
void *ptr;
if (!aacirun->substream || !aacirun->start) {
@@ -222,15 +223,12 @@ static void aaci_fifo_irq(struct aaci *aaci, int channel, u32 mask)
ptr = aacirun->ptr;
do {
- unsigned int len = aacirun->fifosz;
+ unsigned int len = aacirun->fifo_bytes;
u32 val;
if (aacirun->bytes <= 0) {
aacirun->bytes += aacirun->period;
- aacirun->ptr = ptr;
- spin_unlock(&aacirun->lock);
- snd_pcm_period_elapsed(aacirun->substream);
- spin_lock(&aacirun->lock);
+ period_elapsed = true;
}
if (!(aacirun->cr & CR_EN))
break;
@@ -260,6 +258,9 @@ static void aaci_fifo_irq(struct aaci *aaci, int channel, u32 mask)
aacirun->ptr = ptr;
spin_unlock(&aacirun->lock);
+
+ if (period_elapsed)
+ snd_pcm_period_elapsed(aacirun->substream);
}
if (mask & ISR_URINTR) {
@@ -269,6 +270,7 @@ static void aaci_fifo_irq(struct aaci *aaci, int channel, u32 mask)
if (mask & ISR_TXINTR) {
struct aaci_runtime *aacirun = &aaci->playback;
+ bool period_elapsed = false;
void *ptr;
if (!aacirun->substream || !aacirun->start) {
@@ -281,15 +283,12 @@ static void aaci_fifo_irq(struct aaci *aaci, int channel, u32 mask)
ptr = aacirun->ptr;
do {
- unsigned int len = aacirun->fifosz;
+ unsigned int len = aacirun->fifo_bytes;
u32 val;
if (aacirun->bytes <= 0) {
aacirun->bytes += aacirun->period;
- aacirun->ptr = ptr;
- spin_unlock(&aacirun->lock);
- snd_pcm_period_elapsed(aacirun->substream);
- spin_lock(&aacirun->lock);
+ period_elapsed = true;
}
if (!(aacirun->cr & CR_EN))
break;
@@ -319,6 +318,9 @@ static void aaci_fifo_irq(struct aaci *aaci, int channel, u32 mask)
aacirun->ptr = ptr;
spin_unlock(&aacirun->lock);
+
+ if (period_elapsed)
+ snd_pcm_period_elapsed(aacirun->substream);
}
}
@@ -361,7 +363,7 @@ static struct snd_pcm_hardware aaci_hw_info = {
/* rates are setup from the AC'97 codec */
.channels_min = 2,
- .channels_max = 6,
+ .channels_max = 2,
.buffer_bytes_max = 64 * 1024,
.period_bytes_min = 256,
.period_bytes_max = PAGE_SIZE,
@@ -369,12 +371,46 @@ static struct snd_pcm_hardware aaci_hw_info = {
.periods_max = PAGE_SIZE / 16,
};
-static int __aaci_pcm_open(struct aaci *aaci,
- struct snd_pcm_substream *substream,
- struct aaci_runtime *aacirun)
+/*
+ * We can support two and four channel audio. Unfortunately
+ * six channel audio requires a non-standard channel ordering:
+ * 2 -> FL(3), FR(4)
+ * 4 -> FL(3), FR(4), SL(7), SR(8)
+ * 6 -> FL(3), FR(4), SL(7), SR(8), C(6), LFE(9) (required)
+ * FL(3), FR(4), C(6), SL(7), SR(8), LFE(9) (actual)
+ * This requires an ALSA configuration file to correct.
+ */
+static int aaci_rule_channels(struct snd_pcm_hw_params *p,
+ struct snd_pcm_hw_rule *rule)
+{
+ static unsigned int channel_list[] = { 2, 4, 6 };
+ struct aaci *aaci = rule->private;
+ unsigned int mask = 1 << 0, slots;
+
+ /* pcms[0] is the our 5.1 PCM instance. */
+ slots = aaci->ac97_bus->pcms[0].r[0].slots;
+ if (slots & (1 << AC97_SLOT_PCM_SLEFT)) {
+ mask |= 1 << 1;
+ if (slots & (1 << AC97_SLOT_LFE))
+ mask |= 1 << 2;
+ }
+
+ return snd_interval_list(hw_param_interval(p, rule->var),
+ ARRAY_SIZE(channel_list), channel_list, mask);
+}
+
+static int aaci_pcm_open(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- int ret;
+ struct aaci *aaci = substream->private_data;
+ struct aaci_runtime *aacirun;
+ int ret = 0;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ aacirun = &aaci->playback;
+ } else {
+ aacirun = &aaci->capture;
+ }
aacirun->substream = substream;
runtime->private_data = aacirun;
@@ -382,27 +418,37 @@ static int __aaci_pcm_open(struct aaci *aaci,
runtime->hw.rates = aacirun->pcm->rates;
snd_pcm_limit_hw_rates(runtime);
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
- aacirun->pcm->r[1].slots)
- snd_ac97_pcm_double_rate_rules(runtime);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ runtime->hw.channels_max = 6;
+
+ /* Add rule describing channel dependency. */
+ ret = snd_pcm_hw_rule_add(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ aaci_rule_channels, aaci,
+ SNDRV_PCM_HW_PARAM_CHANNELS, -1);
+ if (ret)
+ return ret;
+
+ if (aacirun->pcm->r[1].slots)
+ snd_ac97_pcm_double_rate_rules(runtime);
+ }
/*
- * FIXME: ALSA specifies fifo_size in bytes. If we're in normal
- * mode, each 32-bit word contains one sample. If we're in
- * compact mode, each 32-bit word contains two samples, effectively
- * halving the FIFO size. However, we don't know for sure which
- * we'll be using at this point. We set this to the lower limit.
+ * ALSA wants the byte-size of the FIFOs. As we only support
+ * 16-bit samples, this is twice the FIFO depth irrespective
+ * of whether it's in compact mode or not.
*/
- runtime->hw.fifo_size = aaci->fifosize * 2;
-
- ret = request_irq(aaci->dev->irq[0], aaci_irq, IRQF_SHARED|IRQF_DISABLED,
- DRIVER_NAME, aaci);
- if (ret)
- goto out;
-
- return 0;
+ runtime->hw.fifo_size = aaci->fifo_depth * 2;
+
+ mutex_lock(&aaci->irq_lock);
+ if (!aaci->users++) {
+ ret = request_irq(aaci->dev->irq[0], aaci_irq,
+ IRQF_SHARED | IRQF_DISABLED, DRIVER_NAME, aaci);
+ if (ret != 0)
+ aaci->users--;
+ }
+ mutex_unlock(&aaci->irq_lock);
- out:
return ret;
}
@@ -418,7 +464,11 @@ static int aaci_pcm_close(struct snd_pcm_substream *substream)
WARN_ON(aacirun->cr & CR_EN);
aacirun->substream = NULL;
- free_irq(aaci->dev->irq[0], aaci);
+
+ mutex_lock(&aaci->irq_lock);
+ if (!--aaci->users)
+ free_irq(aaci->dev->irq[0], aaci);
+ mutex_unlock(&aaci->irq_lock);
return 0;
}
@@ -444,12 +494,21 @@ static int aaci_pcm_hw_free(struct snd_pcm_substream *substream)
return 0;
}
+/* Channel to slot mask */
+static const u32 channels_to_slotmask[] = {
+ [2] = CR_SL3 | CR_SL4,
+ [4] = CR_SL3 | CR_SL4 | CR_SL7 | CR_SL8,
+ [6] = CR_SL3 | CR_SL4 | CR_SL7 | CR_SL8 | CR_SL6 | CR_SL9,
+};
+
static int aaci_pcm_hw_params(struct snd_pcm_substream *substream,
- struct aaci_runtime *aacirun,
struct snd_pcm_hw_params *params)
{
+ struct aaci_runtime *aacirun = substream->runtime->private_data;
+ unsigned int channels = params_channels(params);
+ unsigned int rate = params_rate(params);
+ int dbl = rate > 48000;
int err;
- struct aaci *aaci = substream->private_data;
aaci_pcm_hw_free(substream);
if (aacirun->pcm_open) {
@@ -457,22 +516,28 @@ static int aaci_pcm_hw_params(struct snd_pcm_substream *substream,
aacirun->pcm_open = 0;
}
+ /* channels is already limited to 2, 4, or 6 by aaci_rule_channels */
+ if (dbl && channels != 2)
+ return -EINVAL;
+
err = snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(params));
if (err >= 0) {
- unsigned int rate = params_rate(params);
- int dbl = rate > 48000;
+ struct aaci *aaci = substream->private_data;
- err = snd_ac97_pcm_open(aacirun->pcm, rate,
- params_channels(params),
+ err = snd_ac97_pcm_open(aacirun->pcm, rate, channels,
aacirun->pcm->r[dbl].slots);
aacirun->pcm_open = err == 0;
aacirun->cr = CR_FEN | CR_COMPACT | CR_SZ16;
- aacirun->fifosz = aaci->fifosize * 4;
-
- if (aacirun->cr & CR_COMPACT)
- aacirun->fifosz >>= 1;
+ aacirun->cr |= channels_to_slotmask[channels + dbl * 2];
+
+ /*
+ * fifo_bytes is the number of bytes we transfer to/from
+ * the FIFO, including padding. So that's x4. As we're
+ * in compact mode, the FIFO is half the size.
+ */
+ aacirun->fifo_bytes = aaci->fifo_depth * 4 / 2;
}
return err;
@@ -483,11 +548,11 @@ static int aaci_pcm_prepare(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
struct aaci_runtime *aacirun = runtime->private_data;
+ aacirun->period = snd_pcm_lib_period_bytes(substream);
aacirun->start = runtime->dma_area;
aacirun->end = aacirun->start + snd_pcm_lib_buffer_bytes(substream);
aacirun->ptr = aacirun->start;
- aacirun->period =
- aacirun->bytes = frames_to_bytes(runtime, runtime->period_size);
+ aacirun->bytes = aacirun->period;
return 0;
}
@@ -505,89 +570,6 @@ static snd_pcm_uframes_t aaci_pcm_pointer(struct snd_pcm_substream *substream)
/*
* Playback specific ALSA stuff
*/
-static const u32 channels_to_txmask[] = {
- [2] = CR_SL3 | CR_SL4,
- [4] = CR_SL3 | CR_SL4 | CR_SL7 | CR_SL8,
- [6] = CR_SL3 | CR_SL4 | CR_SL7 | CR_SL8 | CR_SL6 | CR_SL9,
-};
-
-/*
- * We can support two and four channel audio. Unfortunately
- * six channel audio requires a non-standard channel ordering:
- * 2 -> FL(3), FR(4)
- * 4 -> FL(3), FR(4), SL(7), SR(8)
- * 6 -> FL(3), FR(4), SL(7), SR(8), C(6), LFE(9) (required)
- * FL(3), FR(4), C(6), SL(7), SR(8), LFE(9) (actual)
- * This requires an ALSA configuration file to correct.
- */
-static unsigned int channel_list[] = { 2, 4, 6 };
-
-static int
-aaci_rule_channels(struct snd_pcm_hw_params *p, struct snd_pcm_hw_rule *rule)
-{
- struct aaci *aaci = rule->private;
- unsigned int chan_mask = 1 << 0, slots;
-
- /*
- * pcms[0] is the our 5.1 PCM instance.
- */
- slots = aaci->ac97_bus->pcms[0].r[0].slots;
- if (slots & (1 << AC97_SLOT_PCM_SLEFT)) {
- chan_mask |= 1 << 1;
- if (slots & (1 << AC97_SLOT_LFE))
- chan_mask |= 1 << 2;
- }
-
- return snd_interval_list(hw_param_interval(p, rule->var),
- ARRAY_SIZE(channel_list), channel_list,
- chan_mask);
-}
-
-static int aaci_pcm_open(struct snd_pcm_substream *substream)
-{
- struct aaci *aaci = substream->private_data;
- int ret;
-
- /*
- * Add rule describing channel dependency.
- */
- ret = snd_pcm_hw_rule_add(substream->runtime, 0,
- SNDRV_PCM_HW_PARAM_CHANNELS,
- aaci_rule_channels, aaci,
- SNDRV_PCM_HW_PARAM_CHANNELS, -1);
- if (ret)
- return ret;
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- ret = __aaci_pcm_open(aaci, substream, &aaci->playback);
- } else {
- ret = __aaci_pcm_open(aaci, substream, &aaci->capture);
- }
- return ret;
-}
-
-static int aaci_pcm_playback_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct aaci_runtime *aacirun = substream->runtime->private_data;
- unsigned int channels = params_channels(params);
- int ret;
-
- WARN_ON(channels >= ARRAY_SIZE(channels_to_txmask) ||
- !channels_to_txmask[channels]);
-
- ret = aaci_pcm_hw_params(substream, aacirun, params);
-
- /*
- * Enable FIFO, compact mode, 16 bits per sample.
- * FIXME: double rate slots?
- */
- if (ret >= 0)
- aacirun->cr |= channels_to_txmask[channels];
-
- return ret;
-}
-
static void aaci_pcm_playback_stop(struct aaci_runtime *aacirun)
{
u32 ie;
@@ -657,27 +639,13 @@ static struct snd_pcm_ops aaci_playback_ops = {
.open = aaci_pcm_open,
.close = aaci_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
- .hw_params = aaci_pcm_playback_hw_params,
+ .hw_params = aaci_pcm_hw_params,
.hw_free = aaci_pcm_hw_free,
.prepare = aaci_pcm_prepare,
.trigger = aaci_pcm_playback_trigger,
.pointer = aaci_pcm_pointer,
};
-static int aaci_pcm_capture_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct aaci_runtime *aacirun = substream->runtime->private_data;
- int ret;
-
- ret = aaci_pcm_hw_params(substream, aacirun, params);
- if (ret >= 0)
- /* Line in record: slot 3 and 4 */
- aacirun->cr |= CR_SL3 | CR_SL4;
-
- return ret;
-}
-
static void aaci_pcm_capture_stop(struct aaci_runtime *aacirun)
{
u32 ie;
@@ -774,7 +742,7 @@ static struct snd_pcm_ops aaci_capture_ops = {
.open = aaci_pcm_open,
.close = aaci_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
- .hw_params = aaci_pcm_capture_hw_params,
+ .hw_params = aaci_pcm_hw_params,
.hw_free = aaci_pcm_hw_free,
.prepare = aaci_pcm_capture_prepare,
.trigger = aaci_pcm_capture_trigger,
@@ -941,12 +909,13 @@ static struct aaci * __devinit aaci_init_card(struct amba_device *dev)
strlcpy(card->driver, DRIVER_NAME, sizeof(card->driver));
strlcpy(card->shortname, "ARM AC'97 Interface", sizeof(card->shortname));
snprintf(card->longname, sizeof(card->longname),
- "%s at 0x%016llx, irq %d",
- card->shortname, (unsigned long long)dev->res.start,
- dev->irq[0]);
+ "%s PL%03x rev%u at 0x%08llx, irq %d",
+ card->shortname, amba_part(dev), amba_rev(dev),
+ (unsigned long long)dev->res.start, dev->irq[0]);
aaci = card->private_data;
mutex_init(&aaci->ac97_sem);
+ mutex_init(&aaci->irq_lock);
aaci->card = card;
aaci->dev = dev;
@@ -984,6 +953,10 @@ static unsigned int __devinit aaci_size_fifo(struct aaci *aaci)
struct aaci_runtime *aacirun = &aaci->playback;
int i;
+ /*
+ * Enable the channel, but don't assign it to any slots, so
+ * it won't empty onto the AC'97 link.
+ */
writel(CR_FEN | CR_SZ16 | CR_EN, aacirun->base + AACI_TXCR);
for (i = 0; !(readl(aacirun->base + AACI_SR) & SR_TXFF) && i < 4096; i++)
@@ -1002,7 +975,7 @@ static unsigned int __devinit aaci_size_fifo(struct aaci *aaci)
writel(aaci->maincr, aaci->base + AACI_MAINCR);
/*
- * If we hit 4096, we failed. Go back to the specified
+ * If we hit 4096 entries, we failed. Go back to the specified
* fifo depth.
*/
if (i == 4096)
@@ -1011,7 +984,8 @@ static unsigned int __devinit aaci_size_fifo(struct aaci *aaci)
return i;
}
-static int __devinit aaci_probe(struct amba_device *dev, struct amba_id *id)
+static int __devinit aaci_probe(struct amba_device *dev,
+ const struct amba_id *id)
{
struct aaci *aaci;
int ret, i;
@@ -1067,11 +1041,12 @@ static int __devinit aaci_probe(struct amba_device *dev, struct amba_id *id)
/*
* Size the FIFOs (must be multiple of 16).
+ * This is the number of entries in the FIFO.
*/
- aaci->fifosize = aaci_size_fifo(aaci);
- if (aaci->fifosize & 15) {
- printk(KERN_WARNING "AACI: fifosize = %d not supported\n",
- aaci->fifosize);
+ aaci->fifo_depth = aaci_size_fifo(aaci);
+ if (aaci->fifo_depth & 15) {
+ printk(KERN_WARNING "AACI: FIFO depth %d not supported\n",
+ aaci->fifo_depth);
ret = -ENODEV;
goto out;
}
@@ -1084,8 +1059,8 @@ static int __devinit aaci_probe(struct amba_device *dev, struct amba_id *id)
ret = snd_card_register(aaci->card);
if (ret == 0) {
- dev_info(&dev->dev, "%s, fifo %d\n", aaci->card->longname,
- aaci->fifosize);
+ dev_info(&dev->dev, "%s\n", aaci->card->longname);
+ dev_info(&dev->dev, "FIFO %u entries\n", aaci->fifo_depth);
amba_set_drvdata(dev, aaci->card);
return ret;
}
diff --git a/sound/arm/aaci.h b/sound/arm/aaci.h
index 6a4a2eebdda1..5791bd5bd2ab 100644
--- a/sound/arm/aaci.h
+++ b/sound/arm/aaci.h
@@ -210,6 +210,8 @@ struct aaci_runtime {
u32 cr;
struct snd_pcm_substream *substream;
+ unsigned int period; /* byte size of a "period" */
+
/*
* PIO support
*/
@@ -217,15 +219,16 @@ struct aaci_runtime {
void *end;
void *ptr;
int bytes;
- unsigned int period;
- unsigned int fifosz;
+ unsigned int fifo_bytes;
};
struct aaci {
struct amba_device *dev;
struct snd_card *card;
void __iomem *base;
- unsigned int fifosize;
+ unsigned int fifo_depth;
+ unsigned int users;
+ struct mutex irq_lock;
/* AC'97 */
struct mutex ac97_sem;
diff --git a/sound/core/device.c b/sound/core/device.c
index a67dfac08c03..2d1ad4b0cd65 100644
--- a/sound/core/device.c
+++ b/sound/core/device.c
@@ -225,15 +225,16 @@ int snd_device_free_all(struct snd_card *card, snd_device_cmd_t cmd)
{
struct snd_device *dev;
int err;
- unsigned int range_low, range_high;
+ unsigned int range_low, range_high, type;
if (snd_BUG_ON(!card))
return -ENXIO;
- range_low = cmd * SNDRV_DEV_TYPE_RANGE_SIZE;
+ range_low = (__force unsigned int)cmd * SNDRV_DEV_TYPE_RANGE_SIZE;
range_high = range_low + SNDRV_DEV_TYPE_RANGE_SIZE - 1;
__again:
list_for_each_entry(dev, &card->devices, list) {
- if (dev->type >= range_low && dev->type <= range_high) {
+ type = (__force unsigned int)dev->type;
+ if (type >= range_low && type <= range_high) {
if ((err = snd_device_free(card, dev->device_data)) < 0)
return err;
goto __again;
diff --git a/sound/core/jack.c b/sound/core/jack.c
index 4902ae568730..53b53e97c896 100644
--- a/sound/core/jack.c
+++ b/sound/core/jack.c
@@ -141,6 +141,7 @@ int snd_jack_new(struct snd_card *card, const char *id, int type,
fail_input:
input_free_device(jack->input_dev);
+ kfree(jack->id);
kfree(jack);
return err;
}
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index 9e92441f9b78..16bd9c03679b 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -192,7 +192,8 @@ int snd_dma_alloc_pages(int type, struct device *device, size_t size,
dmab->bytes = 0;
switch (type) {
case SNDRV_DMA_TYPE_CONTINUOUS:
- dmab->area = snd_malloc_pages(size, (unsigned long)device);
+ dmab->area = snd_malloc_pages(size,
+ (__force gfp_t)(unsigned long)device);
dmab->addr = 0;
break;
#ifdef CONFIG_HAS_DMA
diff --git a/sound/core/oss/linear.c b/sound/core/oss/linear.c
index 4c1d16827199..13b3f6f49fae 100644
--- a/sound/core/oss/linear.c
+++ b/sound/core/oss/linear.c
@@ -114,7 +114,8 @@ static snd_pcm_sframes_t linear_transfer(struct snd_pcm_plugin *plugin,
return frames;
}
-static void init_data(struct linear_priv *data, int src_format, int dst_format)
+static void init_data(struct linear_priv *data,
+ snd_pcm_format_t src_format, snd_pcm_format_t dst_format)
{
int src_le, dst_le, src_bytes, dst_bytes;
@@ -140,9 +141,9 @@ static void init_data(struct linear_priv *data, int src_format, int dst_format)
if (snd_pcm_format_signed(src_format) !=
snd_pcm_format_signed(dst_format)) {
if (dst_le)
- data->flip = cpu_to_le32(0x80000000);
+ data->flip = (__force u32)cpu_to_le32(0x80000000);
else
- data->flip = cpu_to_be32(0x80000000);
+ data->flip = (__force u32)cpu_to_be32(0x80000000);
}
}
diff --git a/sound/core/oss/mixer_oss.c b/sound/core/oss/mixer_oss.c
index 822dd56993ca..d8359cfeca15 100644
--- a/sound/core/oss/mixer_oss.c
+++ b/sound/core/oss/mixer_oss.c
@@ -190,9 +190,10 @@ static int snd_mixer_oss_get_recsrc(struct snd_mixer_oss_file *fmixer)
return -EIO;
if (mixer->put_recsrc && mixer->get_recsrc) { /* exclusive */
int err;
- if ((err = mixer->get_recsrc(fmixer, &result)) < 0)
+ unsigned int index;
+ if ((err = mixer->get_recsrc(fmixer, &index)) < 0)
return err;
- result = 1 << result;
+ result = 1 << index;
} else {
struct snd_mixer_oss_slot *pslot;
int chn;
@@ -214,6 +215,7 @@ static int snd_mixer_oss_set_recsrc(struct snd_mixer_oss_file *fmixer, int recsr
struct snd_mixer_oss *mixer = fmixer->mixer;
struct snd_mixer_oss_slot *pslot;
int chn, active;
+ unsigned int index;
int result = 0;
if (mixer == NULL)
@@ -222,8 +224,8 @@ static int snd_mixer_oss_set_recsrc(struct snd_mixer_oss_file *fmixer, int recsr
if (recsrc & ~mixer->oss_recsrc)
recsrc &= ~mixer->oss_recsrc;
mixer->put_recsrc(fmixer, ffz(~recsrc));
- mixer->get_recsrc(fmixer, &result);
- result = 1 << result;
+ mixer->get_recsrc(fmixer, &index);
+ result = 1 << index;
}
for (chn = 0; chn < 31; chn++) {
pslot = &mixer->slots[chn];
diff --git a/sound/core/oss/mulaw.c b/sound/core/oss/mulaw.c
index f7649d4d950b..7915564bd394 100644
--- a/sound/core/oss/mulaw.c
+++ b/sound/core/oss/mulaw.c
@@ -274,7 +274,7 @@ static snd_pcm_sframes_t mulaw_transfer(struct snd_pcm_plugin *plugin,
return frames;
}
-static void init_data(struct mulaw_priv *data, int format)
+static void init_data(struct mulaw_priv *data, snd_pcm_format_t format)
{
#ifdef SNDRV_LITTLE_ENDIAN
data->cvt_endian = snd_pcm_format_big_endian(format) > 0;
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index a2e4eb324699..23c34a02894b 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -41,6 +41,7 @@
#include <sound/info.h>
#include <linux/soundcard.h>
#include <sound/initval.h>
+#include <sound/mixer_oss.h>
#define OSS_ALSAEMULVER _SIOR ('M', 249, int)
@@ -60,7 +61,6 @@ MODULE_PARM_DESC(nonblock_open, "Don't block opening busy PCM devices.");
MODULE_ALIAS_SNDRV_MINOR(SNDRV_MINOR_OSS_PCM);
MODULE_ALIAS_SNDRV_MINOR(SNDRV_MINOR_OSS_PCM1);
-extern int snd_mixer_oss_ioctl_card(struct snd_card *card, unsigned int cmd, unsigned long arg);
static int snd_pcm_oss_get_rate(struct snd_pcm_oss_file *pcm_oss_file);
static int snd_pcm_oss_get_channels(struct snd_pcm_oss_file *pcm_oss_file);
static int snd_pcm_oss_get_format(struct snd_pcm_oss_file *pcm_oss_file);
@@ -656,7 +656,7 @@ snd_pcm_uframes_t get_hw_ptr_period(struct snd_pcm_runtime *runtime)
#define AFMT_AC3 0x00000400
#define AFMT_VORBIS 0x00000800
-static int snd_pcm_oss_format_from(int format)
+static snd_pcm_format_t snd_pcm_oss_format_from(int format)
{
switch (format) {
case AFMT_MU_LAW: return SNDRV_PCM_FORMAT_MU_LAW;
@@ -680,7 +680,7 @@ static int snd_pcm_oss_format_from(int format)
}
}
-static int snd_pcm_oss_format_to(int format)
+static int snd_pcm_oss_format_to(snd_pcm_format_t format)
{
switch (format) {
case SNDRV_PCM_FORMAT_MU_LAW: return AFMT_MU_LAW;
@@ -843,7 +843,8 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream)
size_t oss_frame_size;
int err;
int direct;
- int format, sformat, n;
+ snd_pcm_format_t format, sformat;
+ int n;
struct snd_mask sformat_mask;
struct snd_mask mask;
@@ -868,11 +869,11 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream)
_snd_pcm_hw_param_min(sparams, SNDRV_PCM_HW_PARAM_PERIODS, 2, 0);
snd_mask_none(&mask);
if (atomic_read(&substream->mmap_count))
- snd_mask_set(&mask, SNDRV_PCM_ACCESS_MMAP_INTERLEAVED);
+ snd_mask_set(&mask, (__force int)SNDRV_PCM_ACCESS_MMAP_INTERLEAVED);
else {
- snd_mask_set(&mask, SNDRV_PCM_ACCESS_RW_INTERLEAVED);
+ snd_mask_set(&mask, (__force int)SNDRV_PCM_ACCESS_RW_INTERLEAVED);
if (!direct)
- snd_mask_set(&mask, SNDRV_PCM_ACCESS_RW_NONINTERLEAVED);
+ snd_mask_set(&mask, (__force int)SNDRV_PCM_ACCESS_RW_NONINTERLEAVED);
}
err = snd_pcm_hw_param_mask(substream, sparams, SNDRV_PCM_HW_PARAM_ACCESS, &mask);
if (err < 0) {
@@ -891,19 +892,22 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream)
else
sformat = snd_pcm_plug_slave_format(format, &sformat_mask);
- if (sformat < 0 || !snd_mask_test(&sformat_mask, sformat)) {
- for (sformat = 0; sformat <= SNDRV_PCM_FORMAT_LAST; sformat++) {
- if (snd_mask_test(&sformat_mask, sformat) &&
+ if ((__force int)sformat < 0 ||
+ !snd_mask_test(&sformat_mask, (__force int)sformat)) {
+ for (sformat = (__force snd_pcm_format_t)0;
+ (__force int)sformat <= (__force int)SNDRV_PCM_FORMAT_LAST;
+ sformat = (__force snd_pcm_format_t)((__force int)sformat + 1)) {
+ if (snd_mask_test(&sformat_mask, (__force int)sformat) &&
snd_pcm_oss_format_to(sformat) >= 0)
break;
}
- if (sformat > SNDRV_PCM_FORMAT_LAST) {
+ if ((__force int)sformat > (__force int)SNDRV_PCM_FORMAT_LAST) {
snd_printd("Cannot find a format!!!\n");
err = -EINVAL;
goto failure;
}
}
- err = _snd_pcm_hw_param_set(sparams, SNDRV_PCM_HW_PARAM_FORMAT, sformat, 0);
+ err = _snd_pcm_hw_param_set(sparams, SNDRV_PCM_HW_PARAM_FORMAT, (__force int)sformat, 0);
if (err < 0)
goto failure;
@@ -912,9 +916,9 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream)
} else {
_snd_pcm_hw_params_any(params);
_snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_ACCESS,
- SNDRV_PCM_ACCESS_RW_INTERLEAVED, 0);
+ (__force int)SNDRV_PCM_ACCESS_RW_INTERLEAVED, 0);
_snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_FORMAT,
- snd_pcm_oss_format_from(runtime->oss.format), 0);
+ (__force int)snd_pcm_oss_format_from(runtime->oss.format), 0);
_snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_CHANNELS,
runtime->oss.channels, 0);
_snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_RATE,
@@ -1185,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
if (in_kernel) {
mm_segment_t fs;
fs = snd_enter_user();
- ret = snd_pcm_lib_write(substream, (void __user *)ptr, frames);
+ ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
snd_leave_user(fs);
} else {
- ret = snd_pcm_lib_write(substream, (void __user *)ptr, frames);
+ ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
}
if (ret != -EPIPE && ret != -ESTRPIPE)
break;
@@ -1230,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
if (in_kernel) {
mm_segment_t fs;
fs = snd_enter_user();
- ret = snd_pcm_lib_read(substream, (void __user *)ptr, frames);
+ ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
snd_leave_user(fs);
} else {
- ret = snd_pcm_lib_read(substream, (void __user *)ptr, frames);
+ ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
}
if (ret == -EPIPE) {
if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
@@ -1333,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
struct snd_pcm_plugin_channel *channels;
size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
if (!in_kernel) {
- if (copy_from_user(runtime->oss.buffer, (const char __user *)buf, bytes))
+ if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
return -EFAULT;
buf = runtime->oss.buffer;
}
@@ -1429,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
struct snd_pcm_runtime *runtime = substream->runtime;
snd_pcm_sframes_t frames, frames1;
#ifdef CONFIG_SND_PCM_OSS_PLUGINS
- char __user *final_dst = (char __user *)buf;
+ char __user *final_dst = (char __force __user *)buf;
if (runtime->oss.plugin_first) {
struct snd_pcm_plugin_channel *channels;
size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
@@ -1549,6 +1553,7 @@ static int snd_pcm_oss_sync1(struct snd_pcm_substream *substream, size_t size)
{
struct snd_pcm_runtime *runtime;
ssize_t result = 0;
+ snd_pcm_state_t state;
long res;
wait_queue_t wait;
@@ -1570,9 +1575,9 @@ static int snd_pcm_oss_sync1(struct snd_pcm_substream *substream, size_t size)
result = 0;
set_current_state(TASK_INTERRUPTIBLE);
snd_pcm_stream_lock_irq(substream);
- res = runtime->status->state;
+ state = runtime->status->state;
snd_pcm_stream_unlock_irq(substream);
- if (res != SNDRV_PCM_STATE_RUNNING) {
+ if (state != SNDRV_PCM_STATE_RUNNING) {
set_current_state(TASK_RUNNING);
break;
}
@@ -1658,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
size1);
size1 /= runtime->channels; /* frames */
fs = snd_enter_user();
- snd_pcm_lib_write(substream, (void __user *)runtime->oss.buffer, size1);
+ snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
snd_leave_user(fs);
}
} else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c
index 6751daa3bb50..71cc3ddf5c15 100644
--- a/sound/core/oss/pcm_plugin.c
+++ b/sound/core/oss/pcm_plugin.c
@@ -264,7 +264,7 @@ snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, snd_pc
return frames;
}
-static int snd_pcm_plug_formats(struct snd_mask *mask, int format)
+static int snd_pcm_plug_formats(struct snd_mask *mask, snd_pcm_format_t format)
{
struct snd_mask formats = *mask;
u64 linfmts = (SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S8 |
@@ -276,16 +276,16 @@ static int snd_pcm_plug_formats(struct snd_mask *mask, int format)
SNDRV_PCM_FMTBIT_U24_3BE | SNDRV_PCM_FMTBIT_S24_3BE |
SNDRV_PCM_FMTBIT_U32_LE | SNDRV_PCM_FMTBIT_S32_LE |
SNDRV_PCM_FMTBIT_U32_BE | SNDRV_PCM_FMTBIT_S32_BE);
- snd_mask_set(&formats, SNDRV_PCM_FORMAT_MU_LAW);
+ snd_mask_set(&formats, (__force int)SNDRV_PCM_FORMAT_MU_LAW);
if (formats.bits[0] & (u32)linfmts)
formats.bits[0] |= (u32)linfmts;
if (formats.bits[1] & (u32)(linfmts >> 32))
formats.bits[1] |= (u32)(linfmts >> 32);
- return snd_mask_test(&formats, format);
+ return snd_mask_test(&formats, (__force int)format);
}
-static int preferred_formats[] = {
+static snd_pcm_format_t preferred_formats[] = {
SNDRV_PCM_FORMAT_S16_LE,
SNDRV_PCM_FORMAT_S16_BE,
SNDRV_PCM_FORMAT_U16_LE,
@@ -306,24 +306,25 @@ static int preferred_formats[] = {
SNDRV_PCM_FORMAT_U8
};
-int snd_pcm_plug_slave_format(int format, struct snd_mask *format_mask)
+snd_pcm_format_t snd_pcm_plug_slave_format(snd_pcm_format_t format,
+ struct snd_mask *format_mask)
{
int i;
- if (snd_mask_test(format_mask, format))
+ if (snd_mask_test(format_mask, (__force int)format))
return format;
- if (! snd_pcm_plug_formats(format_mask, format))
- return -EINVAL;
+ if (!snd_pcm_plug_formats(format_mask, format))
+ return (__force snd_pcm_format_t)-EINVAL;
if (snd_pcm_format_linear(format)) {
unsigned int width = snd_pcm_format_width(format);
int unsignd = snd_pcm_format_unsigned(format) > 0;
int big = snd_pcm_format_big_endian(format) > 0;
unsigned int badness, best = -1;
- int best_format = -1;
+ snd_pcm_format_t best_format = (__force snd_pcm_format_t)-1;
for (i = 0; i < ARRAY_SIZE(preferred_formats); i++) {
- int f = preferred_formats[i];
+ snd_pcm_format_t f = preferred_formats[i];
unsigned int w;
- if (!snd_mask_test(format_mask, f))
+ if (!snd_mask_test(format_mask, (__force int)f))
continue;
w = snd_pcm_format_width(f);
if (w >= width)
@@ -337,17 +338,20 @@ int snd_pcm_plug_slave_format(int format, struct snd_mask *format_mask)
best = badness;
}
}
- return best_format >= 0 ? best_format : -EINVAL;
+ if ((__force int)best_format >= 0)
+ return best_format;
+ else
+ return (__force snd_pcm_format_t)-EINVAL;
} else {
switch (format) {
case SNDRV_PCM_FORMAT_MU_LAW:
for (i = 0; i < ARRAY_SIZE(preferred_formats); ++i) {
- int format1 = preferred_formats[i];
- if (snd_mask_test(format_mask, format1))
+ snd_pcm_format_t format1 = preferred_formats[i];
+ if (snd_mask_test(format_mask, (__force int)format1))
return format1;
}
default:
- return -EINVAL;
+ return (__force snd_pcm_format_t)-EINVAL;
}
}
}
@@ -359,7 +363,7 @@ int snd_pcm_plug_format_plugins(struct snd_pcm_substream *plug,
struct snd_pcm_plugin_format tmpformat;
struct snd_pcm_plugin_format dstformat;
struct snd_pcm_plugin_format srcformat;
- int src_access, dst_access;
+ snd_pcm_access_t src_access, dst_access;
struct snd_pcm_plugin *plugin = NULL;
int err;
int stream = snd_pcm_plug_stream(plug);
@@ -641,7 +645,7 @@ snd_pcm_sframes_t snd_pcm_plug_read_transfer(struct snd_pcm_substream *plug, str
}
int snd_pcm_area_silence(const struct snd_pcm_channel_area *dst_area, size_t dst_offset,
- size_t samples, int format)
+ size_t samples, snd_pcm_format_t format)
{
/* FIXME: sub byte resolution and odd dst_offset */
unsigned char *dst;
@@ -688,7 +692,7 @@ int snd_pcm_area_silence(const struct snd_pcm_channel_area *dst_area, size_t dst
int snd_pcm_area_copy(const struct snd_pcm_channel_area *src_area, size_t src_offset,
const struct snd_pcm_channel_area *dst_area, size_t dst_offset,
- size_t samples, int format)
+ size_t samples, snd_pcm_format_t format)
{
/* FIXME: sub byte resolution and odd dst_offset */
char *src, *dst;
diff --git a/sound/core/oss/pcm_plugin.h b/sound/core/oss/pcm_plugin.h
index b9afab603711..a5035c2369a6 100644
--- a/sound/core/oss/pcm_plugin.h
+++ b/sound/core/oss/pcm_plugin.h
@@ -46,7 +46,7 @@ struct snd_pcm_plugin_channel {
};
struct snd_pcm_plugin_format {
- int format;
+ snd_pcm_format_t format;
unsigned int rate;
unsigned int channels;
};
@@ -58,7 +58,7 @@ struct snd_pcm_plugin {
struct snd_pcm_plugin_format dst_format; /* destination format */
int src_width; /* sample width in bits */
int dst_width; /* sample width in bits */
- int access;
+ snd_pcm_access_t access;
snd_pcm_sframes_t (*src_frames)(struct snd_pcm_plugin *plugin, snd_pcm_uframes_t dst_frames);
snd_pcm_sframes_t (*dst_frames)(struct snd_pcm_plugin *plugin, snd_pcm_uframes_t src_frames);
snd_pcm_sframes_t (*client_channels)(struct snd_pcm_plugin *plugin,
@@ -125,7 +125,8 @@ int snd_pcm_plug_format_plugins(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_pcm_hw_params *slave_params);
-int snd_pcm_plug_slave_format(int format, struct snd_mask *format_mask);
+snd_pcm_format_t snd_pcm_plug_slave_format(snd_pcm_format_t format,
+ struct snd_mask *format_mask);
int snd_pcm_plugin_append(struct snd_pcm_plugin *plugin);
@@ -146,12 +147,12 @@ snd_pcm_sframes_t snd_pcm_plugin_client_channels(struct snd_pcm_plugin *plugin,
int snd_pcm_area_silence(const struct snd_pcm_channel_area *dst_channel,
size_t dst_offset,
- size_t samples, int format);
+ size_t samples, snd_pcm_format_t format);
int snd_pcm_area_copy(const struct snd_pcm_channel_area *src_channel,
size_t src_offset,
const struct snd_pcm_channel_area *dst_channel,
size_t dst_offset,
- size_t samples, int format);
+ size_t samples, snd_pcm_format_t format);
void *snd_pcm_plug_buf_alloc(struct snd_pcm_substream *plug, snd_pcm_uframes_t size);
void snd_pcm_plug_buf_unlock(struct snd_pcm_substream *plug, void *ptr);
diff --git a/sound/core/oss/route.c b/sound/core/oss/route.c
index bbe25d8c450a..c8171f5783c8 100644
--- a/sound/core/oss/route.c
+++ b/sound/core/oss/route.c
@@ -25,7 +25,7 @@
#include "pcm_plugin.h"
static void zero_areas(struct snd_pcm_plugin_channel *dvp, int ndsts,
- snd_pcm_uframes_t frames, int format)
+ snd_pcm_uframes_t frames, snd_pcm_format_t format)
{
int dst = 0;
for (; dst < ndsts; ++dst) {
@@ -38,7 +38,7 @@ static void zero_areas(struct snd_pcm_plugin_channel *dvp, int ndsts,
static inline void copy_area(const struct snd_pcm_plugin_channel *src_channel,
struct snd_pcm_plugin_channel *dst_channel,
- snd_pcm_uframes_t frames, int format)
+ snd_pcm_uframes_t frames, snd_pcm_format_t format)
{
dst_channel->enabled = 1;
snd_pcm_area_copy(&src_channel->area, 0, &dst_channel->area, 0, frames, format);
@@ -51,7 +51,7 @@ static snd_pcm_sframes_t route_transfer(struct snd_pcm_plugin *plugin,
{
int nsrcs, ndsts, dst;
struct snd_pcm_plugin_channel *dvp;
- int format;
+ snd_pcm_format_t format;
if (snd_BUG_ON(!plugin || !src_channels || !dst_channels))
return -ENXIO;
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 6b4b1287b314..ee9abb2d9001 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -211,9 +211,9 @@ static char *snd_pcm_format_names[] = {
const char *snd_pcm_format_name(snd_pcm_format_t format)
{
- if (format >= ARRAY_SIZE(snd_pcm_format_names))
+ if ((__force unsigned int)format >= ARRAY_SIZE(snd_pcm_format_names))
return "Unknown";
- return snd_pcm_format_names[format];
+ return snd_pcm_format_names[(__force unsigned int)format];
}
EXPORT_SYMBOL_GPL(snd_pcm_format_name);
@@ -269,12 +269,12 @@ static const char *snd_pcm_stream_name(int stream)
static const char *snd_pcm_access_name(snd_pcm_access_t access)
{
- return snd_pcm_access_names[access];
+ return snd_pcm_access_names[(__force int)access];
}
static const char *snd_pcm_subformat_name(snd_pcm_subformat_t subformat)
{
- return snd_pcm_subformat_names[subformat];
+ return snd_pcm_subformat_names[(__force int)subformat];
}
static const char *snd_pcm_tstamp_mode_name(int mode)
@@ -284,7 +284,7 @@ static const char *snd_pcm_tstamp_mode_name(int mode)
static const char *snd_pcm_state_name(snd_pcm_state_t state)
{
- return snd_pcm_state_names[state];
+ return snd_pcm_state_names[(__force int)state];
}
#if defined(CONFIG_SND_PCM_OSS) || defined(CONFIG_SND_PCM_OSS_MODULE)
diff --git a/sound/core/pcm_misc.c b/sound/core/pcm_misc.c
index 434af3c56d52..88f02e3866e0 100644
--- a/sound/core/pcm_misc.c
+++ b/sound/core/pcm_misc.c
@@ -35,7 +35,10 @@ struct pcm_format_data {
unsigned char silence[8]; /* silence data to fill */
};
-static struct pcm_format_data pcm_formats[SNDRV_PCM_FORMAT_LAST+1] = {
+/* we do lots of calculations on snd_pcm_format_t; shut up sparse */
+#define INT __force int
+
+static struct pcm_format_data pcm_formats[(INT)SNDRV_PCM_FORMAT_LAST+1] = {
[SNDRV_PCM_FORMAT_S8] = {
.width = 8, .phys = 8, .le = -1, .signd = 1,
.silence = {},
@@ -215,9 +218,9 @@ static struct pcm_format_data pcm_formats[SNDRV_PCM_FORMAT_LAST+1] = {
int snd_pcm_format_signed(snd_pcm_format_t format)
{
int val;
- if (format < 0 || format > SNDRV_PCM_FORMAT_LAST)
+ if ((INT)format < 0 || (INT)format > (INT)SNDRV_PCM_FORMAT_LAST)
return -EINVAL;
- if ((val = pcm_formats[format].signd) < 0)
+ if ((val = pcm_formats[(INT)format].signd) < 0)
return -EINVAL;
return val;
}
@@ -266,9 +269,9 @@ EXPORT_SYMBOL(snd_pcm_format_linear);
int snd_pcm_format_little_endian(snd_pcm_format_t format)
{
int val;
- if (format < 0 || format > SNDRV_PCM_FORMAT_LAST)
+ if ((INT)format < 0 || (INT)format > (INT)SNDRV_PCM_FORMAT_LAST)
return -EINVAL;
- if ((val = pcm_formats[format].le) < 0)
+ if ((val = pcm_formats[(INT)format].le) < 0)
return -EINVAL;
return val;
}
@@ -304,9 +307,9 @@ EXPORT_SYMBOL(snd_pcm_format_big_endian);
int snd_pcm_format_width(snd_pcm_format_t format)
{
int val;
- if (format < 0 || format > SNDRV_PCM_FORMAT_LAST)
+ if ((INT)format < 0 || (INT)format > (INT)SNDRV_PCM_FORMAT_LAST)
return -EINVAL;
- if ((val = pcm_formats[format].width) == 0)
+ if ((val = pcm_formats[(INT)format].width) == 0)
return -EINVAL;
return val;
}
@@ -323,9 +326,9 @@ EXPORT_SYMBOL(snd_pcm_format_width);
int snd_pcm_format_physical_width(snd_pcm_format_t format)
{
int val;
- if (format < 0 || format > SNDRV_PCM_FORMAT_LAST)
+ if ((INT)format < 0 || (INT)format > (INT)SNDRV_PCM_FORMAT_LAST)
return -EINVAL;
- if ((val = pcm_formats[format].phys) == 0)
+ if ((val = pcm_formats[(INT)format].phys) == 0)
return -EINVAL;
return val;
}
@@ -358,11 +361,11 @@ EXPORT_SYMBOL(snd_pcm_format_size);
*/
const unsigned char *snd_pcm_format_silence_64(snd_pcm_format_t format)
{
- if (format < 0 || format > SNDRV_PCM_FORMAT_LAST)
+ if ((INT)format < 0 || (INT)format > (INT)SNDRV_PCM_FORMAT_LAST)
return NULL;
- if (! pcm_formats[format].phys)
+ if (! pcm_formats[(INT)format].phys)
return NULL;
- return pcm_formats[format].silence;
+ return pcm_formats[(INT)format].silence;
}
EXPORT_SYMBOL(snd_pcm_format_silence_64);
@@ -382,16 +385,16 @@ int snd_pcm_format_set_silence(snd_pcm_format_t format, void *data, unsigned int
int width;
unsigned char *dst, *pat;
- if (format < 0 || format > SNDRV_PCM_FORMAT_LAST)
+ if ((INT)format < 0 || (INT)format > (INT)SNDRV_PCM_FORMAT_LAST)
return -EINVAL;
if (samples == 0)
return 0;
- width = pcm_formats[format].phys; /* physical width */
- pat = pcm_formats[format].silence;
+ width = pcm_formats[(INT)format].phys; /* physical width */
+ pat = pcm_formats[(INT)format].silence;
if (! width)
return -EINVAL;
/* signed or 1 byte data */
- if (pcm_formats[format].signd == 1 || width <= 8) {
+ if (pcm_formats[(INT)format].signd == 1 || width <= 8) {
unsigned int bytes = samples * width / 8;
memset(data, *pat, bytes);
return 0;
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 4be45e7be8ad..ae42b6509ce4 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -941,7 +941,7 @@ static struct action_ops snd_pcm_action_stop = {
*
* The state of each stream is then changed to the given state unconditionally.
*/
-int snd_pcm_stop(struct snd_pcm_substream *substream, int state)
+int snd_pcm_stop(struct snd_pcm_substream *substream, snd_pcm_state_t state)
{
return snd_pcm_action(&snd_pcm_action_stop, substream, state);
}
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 99a485f13648..f2436d33fbf7 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -1052,7 +1052,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
} else {
#ifdef CONFIG_COMPAT
if (client->convert32 && snd_seq_ev_is_varusr(&event)) {
- void *ptr = compat_ptr(event.data.raw32.d[1]);
+ void *ptr = (void __force *)compat_ptr(event.data.raw32.d[1]);
event.data.ext.ptr = ptr;
}
#endif
@@ -2407,7 +2407,7 @@ int snd_seq_kernel_client_ctl(int clientid, unsigned int cmd, void *arg)
if (client == NULL)
return -ENXIO;
fs = snd_enter_user();
- result = snd_seq_do_ioctl(client, cmd, (void __user *)arg);
+ result = snd_seq_do_ioctl(client, cmd, (void __force __user *)arg);
snd_leave_user(fs);
return result;
}
@@ -2497,9 +2497,6 @@ static void snd_seq_info_dump_ports(struct snd_info_buffer *buffer,
}
-void snd_seq_info_pool(struct snd_info_buffer *buffer,
- struct snd_seq_pool *pool, char *space);
-
/* exported to seq_info.c */
void snd_seq_info_clients_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index 7fb55436287f..7f50c1437675 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -86,7 +86,7 @@ int snd_seq_dump_var_event(const struct snd_seq_event *event,
if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) {
char buf[32];
- char __user *curptr = (char __user *)event->data.ext.ptr;
+ char __user *curptr = (char __force __user *)event->data.ext.ptr;
while (len > 0) {
int size = sizeof(buf);
if (len < size)
@@ -157,7 +157,7 @@ int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char
if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) {
if (! in_kernel)
return -EINVAL;
- if (copy_from_user(buf, (void __user *)event->data.ext.ptr, len))
+ if (copy_from_user(buf, (void __force __user *)event->data.ext.ptr, len))
return -EFAULT;
return newlen;
}
@@ -343,7 +343,7 @@ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
tmp->event = src->event;
src = src->next;
} else if (is_usrptr) {
- if (copy_from_user(&tmp->event, (char __user *)buf, size)) {
+ if (copy_from_user(&tmp->event, (char __force __user *)buf, size)) {
err = -EFAULT;
goto __error;
}
diff --git a/sound/core/seq/seq_memory.h b/sound/core/seq/seq_memory.h
index 63e91431a29f..4a2ec779b8a7 100644
--- a/sound/core/seq/seq_memory.h
+++ b/sound/core/seq/seq_memory.h
@@ -24,6 +24,8 @@
#include <sound/seq_kernel.h>
#include <linux/poll.h>
+struct snd_info_buffer;
+
/* container for sequencer event (internal use) */
struct snd_seq_event_cell {
struct snd_seq_event event;
@@ -99,5 +101,7 @@ void snd_sequencer_memory_done(void);
/* polling */
int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file, poll_table *wait);
+void snd_seq_info_pool(struct snd_info_buffer *buffer,
+ struct snd_seq_pool *pool, char *space);
#endif
diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
index 3bf7d73ac52e..e12bcd94b6db 100644
--- a/sound/core/seq/seq_ports.c
+++ b/sound/core/seq/seq_ports.c
@@ -412,7 +412,7 @@ int snd_seq_get_port_info(struct snd_seq_client_port * port,
* initialization or termination of devices (see seq_midi.c).
*
* If callback_all option is set, the callback function is invoked
- * at each connnection/disconnection.
+ * at each connection/disconnection.
*/
static int subscribe_port(struct snd_seq_client *client,
diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c
index 3b9b550109cb..a89948ae9e8d 100644
--- a/sound/core/vmaster.c
+++ b/sound/core/vmaster.c
@@ -18,7 +18,7 @@
* a subset of information returned via ctl info callback
*/
struct link_ctl_info {
- int type; /* value type */
+ snd_ctl_elem_type_t type; /* value type */
int count; /* item count */
int min_val, max_val; /* min, max values */
};
diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
index 9823d59d7ad7..389cd7931668 100644
--- a/sound/pci/Kconfig
+++ b/sound/pci/Kconfig
@@ -152,10 +152,16 @@ config SND_AZT3328
select SND_MPU401_UART
select SND_PCM
select SND_RAWMIDI
+ select SND_AC97_CODEC
help
Say Y here to include support for Aztech AZF3328 (PCI168)
soundcards.
+ Supported features: AC97-"conformant" mixer, MPU401/OPL3, analog I/O
+ (16bit/8bit, many sample rates [<= 66.2kHz], NO hardware mixing),
+ Digital Enhanced Game Port, 1.024MHz multimedia sequencer timer,
+ ext. codec (I2S port), onboard amp (4W/4Ohms/ch), suspend/resume.
+
To compile this driver as a module, choose M here: the module
will be called snd-azt3328.
@@ -572,13 +578,13 @@ comment "Don't forget to add built-in firmwares for HDSP driver"
depends on SND_HDSP=y
config SND_HDSPM
- tristate "RME Hammerfall DSP MADI"
+ tristate "RME Hammerfall DSP MADI/RayDAT/AIO"
select SND_HWDEP
select SND_RAWMIDI
select SND_PCM
help
- Say Y here to include support for RME Hammerfall DSP MADI
- soundcards.
+ Say Y here to include support for RME Hammerfall DSP MADI,
+ RayDAT and AIO soundcards.
To compile this driver as a module, choose M here: the module
will be called snd-hdspm.
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
index cb62d178b3e0..7f4d619f4ddb 100644
--- a/sound/pci/ac97/ac97_codec.c
+++ b/sound/pci/ac97/ac97_codec.c
@@ -71,6 +71,12 @@ static const struct ac97_codec_id snd_ac97_codec_id_vendors[] = {
{ 0x414b4d00, 0xffffff00, "Asahi Kasei", NULL, NULL },
{ 0x414c4300, 0xffffff00, "Realtek", NULL, NULL },
{ 0x414c4700, 0xffffff00, "Realtek", NULL, NULL },
+/*
+ * This is an _inofficial_ Aztech Labs entry
+ * (value might differ from unknown official Aztech ID),
+ * currently used by the AC97 emulation of the almost-AC97 PCI168 card.
+ */
+{ 0x415a5400, 0xffffff00, "Aztech Labs (emulated)", NULL, NULL },
{ 0x434d4900, 0xffffff00, "C-Media Electronics", NULL, NULL },
{ 0x43525900, 0xffffff00, "Cirrus Logic", NULL, NULL },
{ 0x43585400, 0xffffff00, "Conexant", NULL, NULL },
@@ -127,6 +133,7 @@ static const struct ac97_codec_id snd_ac97_codec_ids[] = {
{ 0x414c4781, 0xffffffff, "ALC658D", NULL, NULL }, /* already patched */
{ 0x414c4780, 0xfffffff0, "ALC658", patch_alc655, NULL },
{ 0x414c4790, 0xfffffff0, "ALC850", patch_alc850, NULL },
+{ 0x415a5401, 0xffffffff, "AZF3328", patch_aztech_azf3328, NULL },
{ 0x434d4941, 0xffffffff, "CMI9738", patch_cm9738, NULL },
{ 0x434d4961, 0xffffffff, "CMI9739", patch_cm9739, NULL },
{ 0x434d4969, 0xffffffff, "CMI9780", patch_cm9780, NULL },
@@ -590,9 +597,9 @@ static int snd_ac97_put_volsw(struct snd_kcontrol *kcontrol,
snd_ac97_page_restore(ac97, page_save);
#ifdef CONFIG_SND_AC97_POWER_SAVE
/* check analog mixer power-down */
- if ((val_mask & 0x8000) &&
+ if ((val_mask & AC97_PD_EAPD) &&
(kcontrol->private_value & (1<<30))) {
- if (val & 0x8000)
+ if (val & AC97_PD_EAPD)
ac97->power_up &= ~(1 << (reg>>1));
else
ac97->power_up |= 1 << (reg>>1);
@@ -1035,20 +1042,20 @@ static int snd_ac97_dev_free(struct snd_device *device)
static int snd_ac97_try_volume_mix(struct snd_ac97 * ac97, int reg)
{
- unsigned short val, mask = 0x8000;
+ unsigned short val, mask = AC97_MUTE_MASK_MONO;
if (! snd_ac97_valid_reg(ac97, reg))
return 0;
switch (reg) {
case AC97_MASTER_TONE:
- return ac97->caps & 0x04 ? 1 : 0;
+ return ac97->caps & AC97_BC_BASS_TREBLE ? 1 : 0;
case AC97_HEADPHONE:
- return ac97->caps & 0x10 ? 1 : 0;
+ return ac97->caps & AC97_BC_HEADPHONE ? 1 : 0;
case AC97_REC_GAIN_MIC:
- return ac97->caps & 0x01 ? 1 : 0;
+ return ac97->caps & AC97_BC_DEDICATED_MIC ? 1 : 0;
case AC97_3D_CONTROL:
- if (ac97->caps & 0x7c00) {
+ if (ac97->caps & AC97_BC_3D_TECH_ID_MASK) {
val = snd_ac97_read(ac97, reg);
/* if nonzero - fixed and we can't set it */
return val == 0;
@@ -1104,7 +1111,10 @@ static void check_volume_resolution(struct snd_ac97 *ac97, int reg, unsigned cha
*lo_max = *hi_max = 0;
for (i = 0 ; i < ARRAY_SIZE(cbit); i++) {
unsigned short val;
- snd_ac97_write(ac97, reg, 0x8080 | cbit[i] | (cbit[i] << 8));
+ snd_ac97_write(
+ ac97, reg,
+ AC97_MUTE_MASK_STEREO | cbit[i] | (cbit[i] << 8)
+ );
/* Do the read twice due to buffers on some ac97 codecs.
* e.g. The STAC9704 returns exactly what you wrote to the register
* if you read it immediately. This causes the detect routine to fail.
@@ -1139,14 +1149,14 @@ static void snd_ac97_change_volume_params2(struct snd_ac97 * ac97, int reg, int
unsigned short val, val1;
*max = 63;
- val = 0x8080 | (0x20 << shift);
+ val = AC97_MUTE_MASK_STEREO | (0x20 << shift);
snd_ac97_write(ac97, reg, val);
val1 = snd_ac97_read(ac97, reg);
if (val != val1) {
*max = 31;
}
/* reset volume to zero */
- snd_ac97_write_cache(ac97, reg, 0x8080);
+ snd_ac97_write_cache(ac97, reg, AC97_MUTE_MASK_STEREO);
}
static inline int printable(unsigned int x)
@@ -1183,16 +1193,16 @@ static int snd_ac97_cmute_new_stereo(struct snd_card *card, char *name, int reg,
if (! snd_ac97_valid_reg(ac97, reg))
return 0;
- mute_mask = 0x8000;
+ mute_mask = AC97_MUTE_MASK_MONO;
val = snd_ac97_read(ac97, reg);
if (check_stereo || (ac97->flags & AC97_STEREO_MUTES)) {
/* check whether both mute bits work */
- val1 = val | 0x8080;
+ val1 = val | AC97_MUTE_MASK_STEREO;
snd_ac97_write(ac97, reg, val1);
if (val1 == snd_ac97_read(ac97, reg))
- mute_mask = 0x8080;
+ mute_mask = AC97_MUTE_MASK_STEREO;
}
- if (mute_mask == 0x8080) {
+ if (mute_mask == AC97_MUTE_MASK_STEREO) {
struct snd_kcontrol_new tmp = AC97_DOUBLE(name, reg, 15, 7, 1, 1);
if (check_amix)
tmp.private_value |= (1 << 30);
@@ -1268,9 +1278,11 @@ static int snd_ac97_cvol_new(struct snd_card *card, char *name, int reg, unsigne
err = snd_ctl_add(card, kctl);
if (err < 0)
return err;
- snd_ac97_write_cache(ac97, reg,
- (snd_ac97_read(ac97, reg) & 0x8080) |
- lo_max | (hi_max << 8));
+ snd_ac97_write_cache(
+ ac97, reg,
+ (snd_ac97_read(ac97, reg) & AC97_MUTE_MASK_STEREO)
+ | lo_max | (hi_max << 8)
+ );
return 0;
}
@@ -1332,7 +1344,7 @@ static int snd_ac97_mixer_build(struct snd_ac97 * ac97)
return err;
}
- ac97->regs[AC97_CENTER_LFE_MASTER] = 0x8080;
+ ac97->regs[AC97_CENTER_LFE_MASTER] = AC97_MUTE_MASK_STEREO;
/* build center controls */
if ((snd_ac97_try_volume_mix(ac97, AC97_CENTER_LFE_MASTER))
@@ -1410,8 +1422,12 @@ static int snd_ac97_mixer_build(struct snd_ac97 * ac97)
if ((err = snd_ctl_add(card, kctl = snd_ac97_cnew(&snd_ac97_controls_pc_beep[idx], ac97))) < 0)
return err;
set_tlv_db_scale(kctl, db_scale_4bit);
- snd_ac97_write_cache(ac97, AC97_PC_BEEP,
- snd_ac97_read(ac97, AC97_PC_BEEP) | 0x801e);
+ snd_ac97_write_cache(
+ ac97,
+ AC97_PC_BEEP,
+ (snd_ac97_read(ac97, AC97_PC_BEEP)
+ | AC97_MUTE_MASK_MONO | 0x001e)
+ );
}
/* build Phone controls */
@@ -1545,7 +1561,7 @@ static int snd_ac97_mixer_build(struct snd_ac97 * ac97)
}
/* build Simulated Stereo Enhancement control */
- if (ac97->caps & 0x0008) {
+ if (ac97->caps & AC97_BC_SIM_STEREO) {
if ((err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_general[AC97_GENERAL_STEREO_ENHANCEMENT], ac97))) < 0)
return err;
}
@@ -1557,7 +1573,7 @@ static int snd_ac97_mixer_build(struct snd_ac97 * ac97)
}
/* build Loudness control */
- if (ac97->caps & 0x0020) {
+ if (ac97->caps & AC97_BC_LOUDNESS) {
if ((err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_general[AC97_GENERAL_LOUDNESS], ac97))) < 0)
return err;
}
@@ -2542,8 +2558,8 @@ void snd_ac97_resume(struct snd_ac97 *ac97)
schedule_timeout_uninterruptible(1);
} while (time_after_eq(end_time, jiffies));
/* FIXME: extra delay */
- ac97->bus->ops->write(ac97, AC97_MASTER, 0x8000);
- if (snd_ac97_read(ac97, AC97_MASTER) != 0x8000)
+ ac97->bus->ops->write(ac97, AC97_MASTER, AC97_MUTE_MASK_MONO);
+ if (snd_ac97_read(ac97, AC97_MASTER) != AC97_MUTE_MASK_MONO)
msleep(250);
} else {
end_time = jiffies + msecs_to_jiffies(100);
@@ -2747,12 +2763,12 @@ static int master_mute_sw_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem
int rshift = (kcontrol->private_value >> 12) & 0x0f;
unsigned short mask;
if (shift != rshift)
- mask = 0x8080;
+ mask = AC97_MUTE_MASK_STEREO;
else
- mask = 0x8000;
- snd_ac97_update_bits(ac97, AC97_POWERDOWN, 0x8000,
+ mask = AC97_MUTE_MASK_MONO;
+ snd_ac97_update_bits(ac97, AC97_POWERDOWN, AC97_PD_EAPD,
(ac97->regs[AC97_MASTER] & mask) == mask ?
- 0x8000 : 0);
+ AC97_PD_EAPD : 0);
}
return err;
}
@@ -2765,7 +2781,10 @@ static int tune_mute_led(struct snd_ac97 *ac97)
return -ENOENT;
msw->put = master_mute_sw_put;
snd_ac97_remove_ctl(ac97, "External Amplifier", NULL);
- snd_ac97_update_bits(ac97, AC97_POWERDOWN, 0x8000, 0x8000); /* mute LED on */
+ snd_ac97_update_bits(
+ ac97, AC97_POWERDOWN,
+ AC97_PD_EAPD, AC97_PD_EAPD /* mute LED on */
+ );
ac97->scaps |= AC97_SCAP_EAPD_LED;
return 0;
}
@@ -2780,12 +2799,12 @@ static int hp_master_mute_sw_put(struct snd_kcontrol *kcontrol,
int rshift = (kcontrol->private_value >> 12) & 0x0f;
unsigned short mask;
if (shift != rshift)
- mask = 0x8080;
+ mask = AC97_MUTE_MASK_STEREO;
else
- mask = 0x8000;
- snd_ac97_update_bits(ac97, AC97_POWERDOWN, 0x8000,
+ mask = AC97_MUTE_MASK_MONO;
+ snd_ac97_update_bits(ac97, AC97_POWERDOWN, AC97_PD_EAPD,
(ac97->regs[AC97_MASTER] & mask) == mask ?
- 0x8000 : 0);
+ AC97_PD_EAPD : 0);
}
return err;
}
@@ -2801,7 +2820,10 @@ static int tune_hp_mute_led(struct snd_ac97 *ac97)
snd_ac97_remove_ctl(ac97, "External Amplifier", NULL);
snd_ac97_remove_ctl(ac97, "Headphone Playback", "Switch");
snd_ac97_remove_ctl(ac97, "Headphone Playback", "Volume");
- snd_ac97_update_bits(ac97, AC97_POWERDOWN, 0x8000, 0x8000); /* mute LED on */
+ snd_ac97_update_bits(
+ ac97, AC97_POWERDOWN,
+ AC97_PD_EAPD, AC97_PD_EAPD /* mute LED on */
+ );
return 0;
}
diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c
index bf47574ca1f0..200c9a1d48b7 100644
--- a/sound/pci/ac97/ac97_patch.c
+++ b/sound/pci/ac97/ac97_patch.c
@@ -27,6 +27,15 @@
#include "ac97_patch.h"
/*
+ * Forward declarations
+ */
+
+static struct snd_kcontrol *snd_ac97_find_mixer_ctl(struct snd_ac97 *ac97,
+ const char *name);
+static int snd_ac97_add_vmaster(struct snd_ac97 *ac97, char *name,
+ const unsigned int *tlv, const char **slaves);
+
+/*
* Chip specific initialization
*/
@@ -2940,6 +2949,49 @@ static int patch_alc850(struct snd_ac97 *ac97)
return 0;
}
+static int patch_aztech_azf3328_specific(struct snd_ac97 *ac97)
+{
+ struct snd_kcontrol *kctl_3d_center =
+ snd_ac97_find_mixer_ctl(ac97, "3D Control - Center");
+ struct snd_kcontrol *kctl_3d_depth =
+ snd_ac97_find_mixer_ctl(ac97, "3D Control - Depth");
+
+ /*
+ * 3D register is different from AC97 standard layout
+ * (also do some renaming, to resemble Windows driver naming)
+ */
+ if (kctl_3d_center) {
+ kctl_3d_center->private_value =
+ AC97_SINGLE_VALUE(AC97_3D_CONTROL, 1, 0x07, 0);
+ snd_ac97_rename_vol_ctl(ac97,
+ "3D Control - Center", "3D Control - Width"
+ );
+ }
+ if (kctl_3d_depth)
+ kctl_3d_depth->private_value =
+ AC97_SINGLE_VALUE(AC97_3D_CONTROL, 8, 0x03, 0);
+
+ /* Aztech Windows driver calls the
+ equivalent control "Modem Playback", thus rename it: */
+ snd_ac97_rename_vol_ctl(ac97,
+ "Master Mono Playback", "Modem Playback"
+ );
+ snd_ac97_rename_vol_ctl(ac97,
+ "Headphone Playback", "FM Synth Playback"
+ );
+
+ return 0;
+}
+
+static const struct snd_ac97_build_ops patch_aztech_azf3328_ops = {
+ .build_specific = patch_aztech_azf3328_specific
+};
+
+static int patch_aztech_azf3328(struct snd_ac97 *ac97)
+{
+ ac97->build_ops = &patch_aztech_azf3328_ops;
+ return 0;
+}
/*
* C-Media CM97xx codecs
diff --git a/sound/pci/asihpi/asihpi.c b/sound/pci/asihpi/asihpi.c
index c80b0b863c54..0ac1f98d91a1 100644
--- a/sound/pci/asihpi/asihpi.c
+++ b/sound/pci/asihpi/asihpi.c
@@ -21,6 +21,7 @@
* would appreciate it if you grant us the right to use those modifications
* for any purpose including commercial applications.
*/
+
/* >0: print Hw params, timer vars. >1: print stream write/copy sizes */
#define REALLY_VERBOSE_LOGGING 0
@@ -36,16 +37,12 @@
#define VPRINTK2(...)
#endif
-#ifndef ASI_STYLE_NAMES
-/* not sure how ALSA style name should look */
-#define ASI_STYLE_NAMES 1
-#endif
-
#include "hpi_internal.h"
#include "hpimsginit.h"
#include "hpioctl.h"
#include <linux/pci.h>
+#include <linux/version.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/slab.h>
@@ -85,11 +82,11 @@ MODULE_PARM_DESC(enable_hpi_hwdep,
/* identify driver */
#ifdef KERNEL_ALSA_BUILD
-static char *build_info = "built using headers from kernel source";
+static char *build_info = "Built using headers from kernel source";
module_param(build_info, charp, S_IRUGO);
MODULE_PARM_DESC(build_info, "built using headers from kernel source");
#else
-static char *build_info = "built within ALSA source";
+static char *build_info = "Built within ALSA source";
module_param(build_info, charp, S_IRUGO);
MODULE_PARM_DESC(build_info, "built within ALSA source");
#endif
@@ -100,13 +97,14 @@ static const int mixer_dump;
#define DEFAULT_SAMPLERATE 44100
static int adapter_fs = DEFAULT_SAMPLERATE;
-static struct hpi_hsubsys *ss; /* handle to HPI audio subsystem */
-
/* defaults */
#define PERIODS_MIN 2
-#define PERIOD_BYTES_MIN 2304
+#define PERIOD_BYTES_MIN 2048
#define BUFFER_BYTES_MAX (512 * 1024)
+/* convert stream to character */
+#define SCHR(s) ((s == SNDRV_PCM_STREAM_PLAYBACK) ? 'P' : 'C')
+
/*#define TIMER_MILLISECONDS 20
#define FORCE_TIMER_JIFFIES ((TIMER_MILLISECONDS * HZ + 999)/1000)
*/
@@ -152,11 +150,12 @@ struct snd_card_asihpi_pcm {
struct timer_list timer;
unsigned int respawn_timer;
unsigned int hpi_buffer_attached;
- unsigned int pcm_size;
- unsigned int pcm_count;
+ unsigned int buffer_bytes;
+ unsigned int period_bytes;
unsigned int bytes_per_sec;
- unsigned int pcm_irq_pos; /* IRQ position */
- unsigned int pcm_buf_pos; /* position in buffer */
+ unsigned int pcm_buf_host_rw_ofs; /* Host R/W pos */
+ unsigned int pcm_buf_dma_ofs; /* DMA R/W offset in buffer */
+ unsigned int pcm_buf_elapsed_dma_ofs; /* DMA R/W offset in buffer */
struct snd_pcm_substream *substream;
u32 h_stream;
struct hpi_format format;
@@ -167,7 +166,6 @@ struct snd_card_asihpi_pcm {
/* Functions to allow driver to give a buffer to HPI for busmastering */
static u16 hpi_stream_host_buffer_attach(
- struct hpi_hsubsys *hS,
u32 h_stream, /* handle to outstream. */
u32 size_in_bytes, /* size in bytes of bus mastering buffer */
u32 pci_address
@@ -194,10 +192,7 @@ static u16 hpi_stream_host_buffer_attach(
return hr.error;
}
-static u16 hpi_stream_host_buffer_detach(
- struct hpi_hsubsys *hS,
- u32 h_stream
-)
+static u16 hpi_stream_host_buffer_detach(u32 h_stream)
{
struct hpi_message hm;
struct hpi_response hr;
@@ -218,24 +213,23 @@ static u16 hpi_stream_host_buffer_detach(
return hr.error;
}
-static inline u16 hpi_stream_start(struct hpi_hsubsys *hS, u32 h_stream)
+static inline u16 hpi_stream_start(u32 h_stream)
{
if (hpi_handle_object(h_stream) == HPI_OBJ_OSTREAM)
- return hpi_outstream_start(hS, h_stream);
+ return hpi_outstream_start(h_stream);
else
- return hpi_instream_start(hS, h_stream);
+ return hpi_instream_start(h_stream);
}
-static inline u16 hpi_stream_stop(struct hpi_hsubsys *hS, u32 h_stream)
+static inline u16 hpi_stream_stop(u32 h_stream)
{
if (hpi_handle_object(h_stream) == HPI_OBJ_OSTREAM)
- return hpi_outstream_stop(hS, h_stream);
+ return hpi_outstream_stop(h_stream);
else
- return hpi_instream_stop(hS, h_stream);
+ return hpi_instream_stop(h_stream);
}
static inline u16 hpi_stream_get_info_ex(
- struct hpi_hsubsys *hS,
u32 h_stream,
u16 *pw_state,
u32 *pbuffer_size,
@@ -244,42 +238,43 @@ static inline u16 hpi_stream_get_info_ex(
u32 *pauxiliary_data
)
{
+ u16 e;
if (hpi_handle_object(h_stream) == HPI_OBJ_OSTREAM)
- return hpi_outstream_get_info_ex(hS, h_stream, pw_state,
+ e = hpi_outstream_get_info_ex(h_stream, pw_state,
pbuffer_size, pdata_in_buffer,
psample_count, pauxiliary_data);
else
- return hpi_instream_get_info_ex(hS, h_stream, pw_state,
+ e = hpi_instream_get_info_ex(h_stream, pw_state,
pbuffer_size, pdata_in_buffer,
psample_count, pauxiliary_data);
+ return e;
}
-static inline u16 hpi_stream_group_add(struct hpi_hsubsys *hS,
+static inline u16 hpi_stream_group_add(
u32 h_master,
u32 h_stream)
{
if (hpi_handle_object(h_master) == HPI_OBJ_OSTREAM)
- return hpi_outstream_group_add(hS, h_master, h_stream);
+ return hpi_outstream_group_add(h_master, h_stream);
else
- return hpi_instream_group_add(hS, h_master, h_stream);
+ return hpi_instream_group_add(h_master, h_stream);
}
-static inline u16 hpi_stream_group_reset(struct hpi_hsubsys *hS,
- u32 h_stream)
+static inline u16 hpi_stream_group_reset(u32 h_stream)
{
if (hpi_handle_object(h_stream) == HPI_OBJ_OSTREAM)
- return hpi_outstream_group_reset(hS, h_stream);
+ return hpi_outstream_group_reset(h_stream);
else
- return hpi_instream_group_reset(hS, h_stream);
+ return hpi_instream_group_reset(h_stream);
}
-static inline u16 hpi_stream_group_get_map(struct hpi_hsubsys *hS,
+static inline u16 hpi_stream_group_get_map(
u32 h_stream, u32 *mo, u32 *mi)
{
if (hpi_handle_object(h_stream) == HPI_OBJ_OSTREAM)
- return hpi_outstream_group_get_map(hS, h_stream, mo, mi);
+ return hpi_outstream_group_get_map(h_stream, mo, mi);
else
- return hpi_instream_group_get_map(hS, h_stream, mo, mi);
+ return hpi_instream_group_get_map(h_stream, mo, mi);
}
static u16 handle_error(u16 err, int line, char *filename)
@@ -299,11 +294,11 @@ static void print_hwparams(struct snd_pcm_hw_params *p)
{
snd_printd("HWPARAMS \n");
snd_printd("samplerate %d \n", params_rate(p));
- snd_printd("channels %d \n", params_channels(p));
- snd_printd("format %d \n", params_format(p));
+ snd_printd("Channels %d \n", params_channels(p));
+ snd_printd("Format %d \n", params_format(p));
snd_printd("subformat %d \n", params_subformat(p));
- snd_printd("buffer bytes %d \n", params_buffer_bytes(p));
- snd_printd("period bytes %d \n", params_period_bytes(p));
+ snd_printd("Buffer bytes %d \n", params_buffer_bytes(p));
+ snd_printd("Period bytes %d \n", params_period_bytes(p));
snd_printd("access %d \n", params_access(p));
snd_printd("period_size %d \n", params_period_size(p));
snd_printd("periods %d \n", params_periods(p));
@@ -335,7 +330,7 @@ static snd_pcm_format_t hpi_to_alsa_formats[] = {
*/
-1
#else
- /* SNDRV_PCM_FORMAT_S24_3LE */ /* { HPI_FORMAT_PCM24_SIGNED 15 */
+ /* SNDRV_PCM_FORMAT_S24_3LE */ /* HPI_FORMAT_PCM24_SIGNED 15 */
#endif
};
@@ -378,20 +373,20 @@ static void snd_card_asihpi_pcm_samplerates(struct snd_card_asihpi *asihpi,
} else {
/* on cards without SRC,
valid rates are determined by sampleclock */
- err = hpi_mixer_get_control(ss, asihpi->h_mixer,
+ err = hpi_mixer_get_control(asihpi->h_mixer,
HPI_SOURCENODE_CLOCK_SOURCE, 0, 0, 0,
HPI_CONTROL_SAMPLECLOCK, &h_control);
if (err) {
snd_printk(KERN_ERR
- "no local sampleclock, err %d\n", err);
+ "No local sampleclock, err %d\n", err);
}
for (idx = 0; idx < 100; idx++) {
- if (hpi_sample_clock_query_local_rate(ss,
+ if (hpi_sample_clock_query_local_rate(
h_control, idx, &sample_rate)) {
if (!idx)
snd_printk(KERN_ERR
- "local rate query failed\n");
+ "Local rate query failed\n");
break;
}
@@ -480,10 +475,10 @@ static int snd_card_asihpi_pcm_hw_params(struct snd_pcm_substream *substream,
format, params_rate(params), 0, 0));
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
- if (hpi_instream_reset(ss, dpcm->h_stream) != 0)
+ if (hpi_instream_reset(dpcm->h_stream) != 0)
return -EINVAL;
- if (hpi_instream_set_format(ss,
+ if (hpi_instream_set_format(
dpcm->h_stream, &dpcm->format) != 0)
return -EINVAL;
}
@@ -491,10 +486,10 @@ static int snd_card_asihpi_pcm_hw_params(struct snd_pcm_substream *substream,
dpcm->hpi_buffer_attached = 0;
if (card->support_mmap) {
- err = hpi_stream_host_buffer_attach(ss, dpcm->h_stream,
+ err = hpi_stream_host_buffer_attach(dpcm->h_stream,
params_buffer_bytes(params), runtime->dma_addr);
if (err == 0) {
- snd_printd(KERN_INFO
+ VPRINTK1(KERN_INFO
"stream_host_buffer_attach succeeded %u %lu\n",
params_buffer_bytes(params),
(unsigned long)runtime->dma_addr);
@@ -505,11 +500,11 @@ static int snd_card_asihpi_pcm_hw_params(struct snd_pcm_substream *substream,
return -ENOMEM;
}
- err = hpi_stream_get_info_ex(ss, dpcm->h_stream, NULL,
+ err = hpi_stream_get_info_ex(dpcm->h_stream, NULL,
&dpcm->hpi_buffer_attached,
NULL, NULL, NULL);
- snd_printd(KERN_INFO "stream_host_buffer_attach status 0x%x\n",
+ VPRINTK1(KERN_INFO "stream_host_buffer_attach status 0x%x\n",
dpcm->hpi_buffer_attached);
}
bytes_per_sec = params_rate(params) * params_channels(params);
@@ -520,16 +515,32 @@ static int snd_card_asihpi_pcm_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
dpcm->bytes_per_sec = bytes_per_sec;
- dpcm->pcm_size = params_buffer_bytes(params);
- dpcm->pcm_count = params_period_bytes(params);
- snd_printd(KERN_INFO "pcm_size=%d, pcm_count=%d, bps=%d\n",
- dpcm->pcm_size, dpcm->pcm_count, bytes_per_sec);
+ dpcm->buffer_bytes = params_buffer_bytes(params);
+ dpcm->period_bytes = params_period_bytes(params);
+ VPRINTK1(KERN_INFO "buffer_bytes=%d, period_bytes=%d, bps=%d\n",
+ dpcm->buffer_bytes, dpcm->period_bytes, bytes_per_sec);
- dpcm->pcm_irq_pos = 0;
- dpcm->pcm_buf_pos = 0;
return 0;
}
+static int
+snd_card_asihpi_hw_free(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_card_asihpi_pcm *dpcm = runtime->private_data;
+ if (dpcm->hpi_buffer_attached)
+ hpi_stream_host_buffer_detach(dpcm->h_stream);
+
+ snd_pcm_lib_free_pages(substream);
+ return 0;
+}
+
+static void snd_card_asihpi_runtime_free(struct snd_pcm_runtime *runtime)
+{
+ struct snd_card_asihpi_pcm *dpcm = runtime->private_data;
+ kfree(dpcm);
+}
+
static void snd_card_asihpi_pcm_timer_start(struct snd_pcm_substream *
substream)
{
@@ -537,9 +548,9 @@ static void snd_card_asihpi_pcm_timer_start(struct snd_pcm_substream *
struct snd_card_asihpi_pcm *dpcm = runtime->private_data;
int expiry;
- expiry = (dpcm->pcm_count * HZ / dpcm->bytes_per_sec);
- /* wait longer the first time, for samples to propagate */
- expiry = max(expiry, 20);
+ expiry = HZ / 200;
+ /*? (dpcm->period_bytes * HZ / dpcm->bytes_per_sec); */
+ expiry = max(expiry, 1); /* don't let it be zero! */
dpcm->timer.expires = jiffies + expiry;
dpcm->respawn_timer = 1;
add_timer(&dpcm->timer);
@@ -562,37 +573,44 @@ static int snd_card_asihpi_trigger(struct snd_pcm_substream *substream,
struct snd_pcm_substream *s;
u16 e;
- snd_printd("trigger %dstream %d\n",
- substream->stream, substream->number);
+ VPRINTK1(KERN_INFO "%c%d trigger\n",
+ SCHR(substream->stream), substream->number);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
snd_pcm_group_for_each_entry(s, substream) {
- struct snd_card_asihpi_pcm *ds;
- ds = s->runtime->private_data;
+ struct snd_pcm_runtime *runtime = s->runtime;
+ struct snd_card_asihpi_pcm *ds = runtime->private_data;
if (snd_pcm_substream_chip(s) != card)
continue;
+ /* don't link Cap and Play */
+ if (substream->stream != s->stream)
+ continue;
+
if ((s->stream == SNDRV_PCM_STREAM_PLAYBACK) &&
(card->support_mmap)) {
/* How do I know how much valid data is present
- * in buffer? Just guessing 2 periods, but if
+ * in buffer? Must be at least one period!
+ * Guessing 2 periods, but if
* buffer is bigger it may contain even more
* data??
*/
- unsigned int preload = ds->pcm_count * 2;
- VPRINTK2("preload %d\n", preload);
+ unsigned int preload = ds->period_bytes * 1;
+ VPRINTK2(KERN_INFO "%d preload x%x\n", s->number, preload);
hpi_handle_error(hpi_outstream_write_buf(
- ss, ds->h_stream,
- &s->runtime->dma_area[0],
+ ds->h_stream,
+ &runtime->dma_area[0],
preload,
&ds->format));
+ ds->pcm_buf_host_rw_ofs = preload;
}
if (card->support_grouping) {
- VPRINTK1("\t_group %dstream %d\n", s->stream,
+ VPRINTK1(KERN_INFO "\t%c%d group\n",
+ SCHR(s->stream),
s->number);
- e = hpi_stream_group_add(ss,
+ e = hpi_stream_group_add(
dpcm->h_stream,
ds->h_stream);
if (!e) {
@@ -604,10 +622,12 @@ static int snd_card_asihpi_trigger(struct snd_pcm_substream *substream,
} else
break;
}
- snd_printd("start\n");
+ VPRINTK1(KERN_INFO "start\n");
/* start the master stream */
snd_card_asihpi_pcm_timer_start(substream);
- hpi_handle_error(hpi_stream_start(ss, dpcm->h_stream));
+ if ((substream->stream == SNDRV_PCM_STREAM_CAPTURE) ||
+ !card->support_mmap)
+ hpi_handle_error(hpi_stream_start(dpcm->h_stream));
break;
case SNDRV_PCM_TRIGGER_STOP:
@@ -615,88 +635,73 @@ static int snd_card_asihpi_trigger(struct snd_pcm_substream *substream,
snd_pcm_group_for_each_entry(s, substream) {
if (snd_pcm_substream_chip(s) != card)
continue;
+ /* don't link Cap and Play */
+ if (substream->stream != s->stream)
+ continue;
/*? workaround linked streams don't
transition to SETUP 20070706*/
s->runtime->status->state = SNDRV_PCM_STATE_SETUP;
if (card->support_grouping) {
- VPRINTK1("\t_group %dstream %d\n", s->stream,
+ VPRINTK1(KERN_INFO "\t%c%d group\n",
+ SCHR(s->stream),
s->number);
snd_pcm_trigger_done(s, substream);
} else
break;
}
- snd_printd("stop\n");
+ VPRINTK1(KERN_INFO "stop\n");
/* _prepare and _hwparams reset the stream */
- hpi_handle_error(hpi_stream_stop(ss, dpcm->h_stream));
+ hpi_handle_error(hpi_stream_stop(dpcm->h_stream));
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
hpi_handle_error(
- hpi_outstream_reset(ss, dpcm->h_stream));
+ hpi_outstream_reset(dpcm->h_stream));
if (card->support_grouping)
- hpi_handle_error(hpi_stream_group_reset(ss,
- dpcm->h_stream));
+ hpi_handle_error(hpi_stream_group_reset(dpcm->h_stream));
break;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- snd_printd("pause release\n");
- hpi_handle_error(hpi_stream_start(ss, dpcm->h_stream));
+ VPRINTK1(KERN_INFO "pause release\n");
+ hpi_handle_error(hpi_stream_start(dpcm->h_stream));
snd_card_asihpi_pcm_timer_start(substream);
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- snd_printd("pause\n");
+ VPRINTK1(KERN_INFO "pause\n");
snd_card_asihpi_pcm_timer_stop(substream);
- hpi_handle_error(hpi_stream_stop(ss, dpcm->h_stream));
+ hpi_handle_error(hpi_stream_stop(dpcm->h_stream));
break;
default:
- snd_printd("\tINVALID\n");
+ snd_printd(KERN_ERR "\tINVALID\n");
return -EINVAL;
}
return 0;
}
-static int
-snd_card_asihpi_hw_free(struct snd_pcm_substream *substream)
-{
- struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_card_asihpi_pcm *dpcm = runtime->private_data;
- if (dpcm->hpi_buffer_attached)
- hpi_stream_host_buffer_detach(ss, dpcm->h_stream);
-
- snd_pcm_lib_free_pages(substream);
- return 0;
-}
-
-static void snd_card_asihpi_runtime_free(struct snd_pcm_runtime *runtime)
-{
- struct snd_card_asihpi_pcm *dpcm = runtime->private_data;
- kfree(dpcm);
-}
-
/*algorithm outline
Without linking degenerates to getting single stream pos etc
Without mmap 2nd loop degenerates to snd_pcm_period_elapsed
*/
/*
-buf_pos=get_buf_pos(s);
+pcm_buf_dma_ofs=get_buf_pos(s);
for_each_linked_stream(s) {
- buf_pos=get_buf_pos(s);
- min_buf_pos = modulo_min(min_buf_pos, buf_pos, pcm_size)
- new_data = min(new_data, calc_new_data(buf_pos,irq_pos)
+ pcm_buf_dma_ofs=get_buf_pos(s);
+ min_buf_pos = modulo_min(min_buf_pos, pcm_buf_dma_ofs, buffer_bytes)
+ new_data = min(new_data, calc_new_data(pcm_buf_dma_ofs,irq_pos)
}
timer.expires = jiffies + predict_next_period_ready(min_buf_pos);
for_each_linked_stream(s) {
- s->buf_pos = min_buf_pos;
- if (new_data > pcm_count) {
+ s->pcm_buf_dma_ofs = min_buf_pos;
+ if (new_data > period_bytes) {
if (mmap) {
- irq_pos = (irq_pos + pcm_count) % pcm_size;
+ irq_pos = (irq_pos + period_bytes) % buffer_bytes;
if (playback) {
- write(pcm_count);
+ write(period_bytes);
} else {
- read(pcm_count);
+ read(period_bytes);
}
}
snd_pcm_period_elapsed(s);
@@ -724,105 +729,136 @@ static inline unsigned int modulo_min(unsigned int a, unsigned int b,
static void snd_card_asihpi_timer_function(unsigned long data)
{
struct snd_card_asihpi_pcm *dpcm = (struct snd_card_asihpi_pcm *)data;
- struct snd_card_asihpi *card = snd_pcm_substream_chip(dpcm->substream);
+ struct snd_pcm_substream *substream = dpcm->substream;
+ struct snd_card_asihpi *card = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime;
struct snd_pcm_substream *s;
unsigned int newdata = 0;
- unsigned int buf_pos, min_buf_pos = 0;
+ unsigned int pcm_buf_dma_ofs, min_buf_pos = 0;
unsigned int remdata, xfercount, next_jiffies;
int first = 1;
+ int loops = 0;
u16 state;
- u32 buffer_size, data_avail, samples_played, aux;
+ u32 buffer_size, bytes_avail, samples_played, on_card_bytes;
+
+ VPRINTK1(KERN_INFO "%c%d snd_card_asihpi_timer_function\n",
+ SCHR(substream->stream), substream->number);
/* find minimum newdata and buffer pos in group */
- snd_pcm_group_for_each_entry(s, dpcm->substream) {
+ snd_pcm_group_for_each_entry(s, substream) {
struct snd_card_asihpi_pcm *ds = s->runtime->private_data;
runtime = s->runtime;
if (snd_pcm_substream_chip(s) != card)
continue;
- hpi_handle_error(hpi_stream_get_info_ex(ss,
+ /* don't link Cap and Play */
+ if (substream->stream != s->stream)
+ continue;
+
+ hpi_handle_error(hpi_stream_get_info_ex(
ds->h_stream, &state,
- &buffer_size, &data_avail,
- &samples_played, &aux));
+ &buffer_size, &bytes_avail,
+ &samples_played, &on_card_bytes));
/* number of bytes in on-card buffer */
- runtime->delay = aux;
-
- if (state == HPI_STATE_DRAINED) {
- snd_printd(KERN_WARNING "outstream %d drained\n",
- s->number);
- snd_pcm_stop(s, SNDRV_PCM_STATE_XRUN);
- return;
- }
+ runtime->delay = on_card_bytes;
if (s->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- buf_pos = frames_to_bytes(runtime, samples_played);
- } else {
- buf_pos = data_avail + ds->pcm_irq_pos;
- }
+ pcm_buf_dma_ofs = ds->pcm_buf_host_rw_ofs - bytes_avail;
+ if (state == HPI_STATE_STOPPED) {
+ if ((bytes_avail == 0) &&
+ (on_card_bytes < ds->pcm_buf_host_rw_ofs)) {
+ hpi_handle_error(hpi_stream_start(ds->h_stream));
+ VPRINTK1(KERN_INFO "P%d start\n", s->number);
+ }
+ } else if (state == HPI_STATE_DRAINED) {
+ VPRINTK1(KERN_WARNING "P%d drained\n",
+ s->number);
+ /*snd_pcm_stop(s, SNDRV_PCM_STATE_XRUN);
+ continue; */
+ }
+ } else
+ pcm_buf_dma_ofs = bytes_avail + ds->pcm_buf_host_rw_ofs;
if (first) {
/* can't statically init min when wrap is involved */
- min_buf_pos = buf_pos;
- newdata = (buf_pos - ds->pcm_irq_pos) % ds->pcm_size;
+ min_buf_pos = pcm_buf_dma_ofs;
+ newdata = (pcm_buf_dma_ofs - ds->pcm_buf_elapsed_dma_ofs) % ds->buffer_bytes;
first = 0;
} else {
min_buf_pos =
- modulo_min(min_buf_pos, buf_pos, UINT_MAX+1L);
+ modulo_min(min_buf_pos, pcm_buf_dma_ofs, UINT_MAX+1L);
newdata = min(
- (buf_pos - ds->pcm_irq_pos) % ds->pcm_size,
+ (pcm_buf_dma_ofs - ds->pcm_buf_elapsed_dma_ofs) % ds->buffer_bytes,
newdata);
}
- VPRINTK1("PB timer hw_ptr x%04lX, appl_ptr x%04lX\n",
+ VPRINTK1(KERN_INFO "PB timer hw_ptr x%04lX, appl_ptr x%04lX\n",
(unsigned long)frames_to_bytes(runtime,
runtime->status->hw_ptr),
(unsigned long)frames_to_bytes(runtime,
runtime->control->appl_ptr));
- VPRINTK1("%d S=%d, irq=%04X, pos=x%04X, left=x%04X,"
- " aux=x%04X space=x%04X\n", s->number,
- state, ds->pcm_irq_pos, buf_pos, (int)data_avail,
- (int)aux, buffer_size-data_avail);
+
+ VPRINTK1(KERN_INFO "%d %c%d S=%d, rw=%04X, dma=x%04X, left=x%04X,"
+ " aux=x%04X space=x%04X\n",
+ loops, SCHR(s->stream), s->number,
+ state, ds->pcm_buf_host_rw_ofs, pcm_buf_dma_ofs, (int)bytes_avail,
+ (int)on_card_bytes, buffer_size-bytes_avail);
+ loops++;
}
+ pcm_buf_dma_ofs = min_buf_pos;
- remdata = newdata % dpcm->pcm_count;
- xfercount = newdata - remdata; /* a multiple of pcm_count */
- next_jiffies = ((dpcm->pcm_count-remdata) * HZ / dpcm->bytes_per_sec)+1;
- next_jiffies = max(next_jiffies, 2U * HZ / 1000U);
+ remdata = newdata % dpcm->period_bytes;
+ xfercount = newdata - remdata; /* a multiple of period_bytes */
+ /* come back when on_card_bytes has decreased enough to allow
+ write to happen, or when data has been consumed to make another
+ period
+ */
+ if (xfercount && (on_card_bytes > dpcm->period_bytes))
+ next_jiffies = ((on_card_bytes - dpcm->period_bytes) * HZ / dpcm->bytes_per_sec);
+ else
+ next_jiffies = ((dpcm->period_bytes - remdata) * HZ / dpcm->bytes_per_sec);
+
+ next_jiffies = max(next_jiffies, 1U);
dpcm->timer.expires = jiffies + next_jiffies;
- VPRINTK1("jif %d buf pos x%04X newdata x%04X xc x%04X\n",
- next_jiffies, min_buf_pos, newdata, xfercount);
+ VPRINTK1(KERN_INFO "jif %d buf pos x%04X newdata x%04X xfer x%04X\n",
+ next_jiffies, pcm_buf_dma_ofs, newdata, xfercount);
- snd_pcm_group_for_each_entry(s, dpcm->substream) {
+ snd_pcm_group_for_each_entry(s, substream) {
struct snd_card_asihpi_pcm *ds = s->runtime->private_data;
- ds->pcm_buf_pos = min_buf_pos;
- if (xfercount) {
+ /* don't link Cap and Play */
+ if (substream->stream != s->stream)
+ continue;
+
+ ds->pcm_buf_dma_ofs = pcm_buf_dma_ofs;
+
+ if (xfercount && (on_card_bytes <= ds->period_bytes)) {
if (card->support_mmap) {
- ds->pcm_irq_pos = ds->pcm_irq_pos + xfercount;
if (s->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- VPRINTK2("write OS%d x%04x\n",
+ VPRINTK2(KERN_INFO "P%d write x%04x\n",
s->number,
- ds->pcm_count);
+ ds->period_bytes);
hpi_handle_error(
hpi_outstream_write_buf(
- ss, ds->h_stream,
+ ds->h_stream,
&s->runtime->
dma_area[0],
xfercount,
&ds->format));
} else {
- VPRINTK2("read IS%d x%04x\n",
+ VPRINTK2(KERN_INFO "C%d read x%04x\n",
s->number,
- dpcm->pcm_count);
+ xfercount);
hpi_handle_error(
hpi_instream_read_buf(
- ss, ds->h_stream,
+ ds->h_stream,
NULL, xfercount));
}
+ ds->pcm_buf_host_rw_ofs = ds->pcm_buf_host_rw_ofs + xfercount;
} /* else R/W will be handled by read/write callbacks */
+ ds->pcm_buf_elapsed_dma_ofs = pcm_buf_dma_ofs;
snd_pcm_period_elapsed(s);
}
}
@@ -845,12 +881,12 @@ static int snd_card_asihpi_playback_prepare(struct snd_pcm_substream *
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_card_asihpi_pcm *dpcm = runtime->private_data;
- snd_printd(KERN_INFO "playback prepare %d\n", substream->number);
-
- hpi_handle_error(hpi_outstream_reset(ss, dpcm->h_stream));
- dpcm->pcm_irq_pos = 0;
- dpcm->pcm_buf_pos = 0;
+ VPRINTK1(KERN_INFO "playback prepare %d\n", substream->number);
+ hpi_handle_error(hpi_outstream_reset(dpcm->h_stream));
+ dpcm->pcm_buf_host_rw_ofs = 0;
+ dpcm->pcm_buf_dma_ofs = 0;
+ dpcm->pcm_buf_elapsed_dma_ofs = 0;
return 0;
}
@@ -861,27 +897,8 @@ snd_card_asihpi_playback_pointer(struct snd_pcm_substream *substream)
struct snd_card_asihpi_pcm *dpcm = runtime->private_data;
snd_pcm_uframes_t ptr;
- u32 samples_played;
- u16 err;
-
- if (!snd_pcm_stream_linked(substream)) {
- /* NOTE, can use samples played for playback position here and
- * in timer fn because it LAGS the actual read pointer, and is a
- * better representation of actual playout position
- */
- err = hpi_outstream_get_info_ex(ss, dpcm->h_stream, NULL,
- NULL, NULL,
- &samples_played, NULL);
- hpi_handle_error(err);
-
- dpcm->pcm_buf_pos = frames_to_bytes(runtime, samples_played);
- }
- /* else must return most conservative value found in timer func
- * by looping over all streams
- */
-
- ptr = bytes_to_frames(runtime, dpcm->pcm_buf_pos % dpcm->pcm_size);
- VPRINTK2("playback_pointer=%04ld\n", (unsigned long)ptr);
+ ptr = bytes_to_frames(runtime, dpcm->pcm_buf_dma_ofs % dpcm->buffer_bytes);
+ /* VPRINTK2(KERN_INFO "playback_pointer=x%04lx\n", (unsigned long)ptr); */
return ptr;
}
@@ -898,12 +915,12 @@ static void snd_card_asihpi_playback_format(struct snd_card_asihpi *asihpi,
/* on cards without SRC, must query at valid rate,
* maybe set by external sync
*/
- err = hpi_mixer_get_control(ss, asihpi->h_mixer,
+ err = hpi_mixer_get_control(asihpi->h_mixer,
HPI_SOURCENODE_CLOCK_SOURCE, 0, 0, 0,
HPI_CONTROL_SAMPLECLOCK, &h_control);
if (!err)
- err = hpi_sample_clock_get_sample_rate(ss, h_control,
+ err = hpi_sample_clock_get_sample_rate(h_control,
&sample_rate);
for (format = HPI_FORMAT_PCM8_UNSIGNED;
@@ -911,7 +928,7 @@ static void snd_card_asihpi_playback_format(struct snd_card_asihpi *asihpi,
err = hpi_format_create(&hpi_format,
2, format, sample_rate, 128000, 0);
if (!err)
- err = hpi_outstream_query_format(ss, h_stream,
+ err = hpi_outstream_query_format(h_stream,
&hpi_format);
if (!err && (hpi_to_alsa_formats[format] != -1))
pcmhw->formats |=
@@ -942,7 +959,7 @@ static int snd_card_asihpi_playback_open(struct snd_pcm_substream *substream)
return -ENOMEM;
err =
- hpi_outstream_open(ss, card->adapter_index,
+ hpi_outstream_open(card->adapter_index,
substream->number, &dpcm->h_stream);
hpi_handle_error(err);
if (err)
@@ -998,11 +1015,11 @@ static int snd_card_asihpi_playback_open(struct snd_pcm_substream *substream)
snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
card->update_interval_frames);
snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
- card->update_interval_frames * 4, UINT_MAX);
+ card->update_interval_frames * 2, UINT_MAX);
snd_pcm_set_sync(substream);
- snd_printd(KERN_INFO "playback open\n");
+ VPRINTK1(KERN_INFO "playback open\n");
return 0;
}
@@ -1012,8 +1029,8 @@ static int snd_card_asihpi_playback_close(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_card_asihpi_pcm *dpcm = runtime->private_data;
- hpi_handle_error(hpi_outstream_close(ss, dpcm->h_stream));
- snd_printd(KERN_INFO "playback close\n");
+ hpi_handle_error(hpi_outstream_close(dpcm->h_stream));
+ VPRINTK1(KERN_INFO "playback close\n");
return 0;
}
@@ -1036,9 +1053,11 @@ static int snd_card_asihpi_playback_copy(struct snd_pcm_substream *substream,
VPRINTK2(KERN_DEBUG "playback copy%d %u bytes\n",
substream->number, len);
- hpi_handle_error(hpi_outstream_write_buf(ss, dpcm->h_stream,
+ hpi_handle_error(hpi_outstream_write_buf(dpcm->h_stream,
runtime->dma_area, len, &dpcm->format));
+ dpcm->pcm_buf_host_rw_ofs = dpcm->pcm_buf_host_rw_ofs + len;
+
return 0;
}
@@ -1052,10 +1071,10 @@ static int snd_card_asihpi_playback_silence(struct snd_pcm_substream *
struct snd_card_asihpi_pcm *dpcm = runtime->private_data;
len = frames_to_bytes(runtime, count);
- snd_printd(KERN_INFO "playback silence %u bytes\n", len);
+ VPRINTK1(KERN_INFO "playback silence %u bytes\n", len);
memset(runtime->dma_area, 0, len);
- hpi_handle_error(hpi_outstream_write_buf(ss, dpcm->h_stream,
+ hpi_handle_error(hpi_outstream_write_buf(dpcm->h_stream,
runtime->dma_area, len, &dpcm->format));
return 0;
}
@@ -1091,13 +1110,13 @@ snd_card_asihpi_capture_pointer(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_card_asihpi_pcm *dpcm = runtime->private_data;
- VPRINTK2("capture pointer %d=%d\n",
- substream->number, dpcm->pcm_buf_pos);
- /* NOTE Unlike playback can't use actual dwSamplesPlayed
+ VPRINTK2(KERN_INFO "capture pointer %d=%d\n",
+ substream->number, dpcm->pcm_buf_dma_ofs);
+ /* NOTE Unlike playback can't use actual samples_played
for the capture position, because those samples aren't yet in
the local buffer available for reading.
*/
- return bytes_to_frames(runtime, dpcm->pcm_buf_pos % dpcm->pcm_size);
+ return bytes_to_frames(runtime, dpcm->pcm_buf_dma_ofs % dpcm->buffer_bytes);
}
static int snd_card_asihpi_capture_ioctl(struct snd_pcm_substream *substream,
@@ -1111,11 +1130,12 @@ static int snd_card_asihpi_capture_prepare(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_card_asihpi_pcm *dpcm = runtime->private_data;
- hpi_handle_error(hpi_instream_reset(ss, dpcm->h_stream));
- dpcm->pcm_irq_pos = 0;
- dpcm->pcm_buf_pos = 0;
+ hpi_handle_error(hpi_instream_reset(dpcm->h_stream));
+ dpcm->pcm_buf_host_rw_ofs = 0;
+ dpcm->pcm_buf_dma_ofs = 0;
+ dpcm->pcm_buf_elapsed_dma_ofs = 0;
- snd_printd("capture prepare %d\n", substream->number);
+ VPRINTK1("Capture Prepare %d\n", substream->number);
return 0;
}
@@ -1133,12 +1153,12 @@ static void snd_card_asihpi_capture_format(struct snd_card_asihpi *asihpi,
/* on cards without SRC, must query at valid rate,
maybe set by external sync */
- err = hpi_mixer_get_control(ss, asihpi->h_mixer,
+ err = hpi_mixer_get_control(asihpi->h_mixer,
HPI_SOURCENODE_CLOCK_SOURCE, 0, 0, 0,
HPI_CONTROL_SAMPLECLOCK, &h_control);
if (!err)
- err = hpi_sample_clock_get_sample_rate(ss, h_control,
+ err = hpi_sample_clock_get_sample_rate(h_control,
&sample_rate);
for (format = HPI_FORMAT_PCM8_UNSIGNED;
@@ -1147,7 +1167,7 @@ static void snd_card_asihpi_capture_format(struct snd_card_asihpi *asihpi,
err = hpi_format_create(&hpi_format, 2, format,
sample_rate, 128000, 0);
if (!err)
- err = hpi_instream_query_format(ss, h_stream,
+ err = hpi_instream_query_format(h_stream,
&hpi_format);
if (!err)
pcmhw->formats |=
@@ -1178,11 +1198,11 @@ static int snd_card_asihpi_capture_open(struct snd_pcm_substream *substream)
if (dpcm == NULL)
return -ENOMEM;
- snd_printd("hpi_instream_open adapter %d stream %d\n",
+ VPRINTK1("hpi_instream_open adapter %d stream %d\n",
card->adapter_index, substream->number);
err = hpi_handle_error(
- hpi_instream_open(ss, card->adapter_index,
+ hpi_instream_open(card->adapter_index,
substream->number, &dpcm->h_stream));
if (err)
kfree(dpcm);
@@ -1209,6 +1229,9 @@ static int snd_card_asihpi_capture_open(struct snd_pcm_substream *substream)
snd_card_asihpi_capture.info |= SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID;
+ if (card->support_grouping)
+ snd_card_asihpi_capture.info |= SNDRV_PCM_INFO_SYNC_START;
+
runtime->hw = snd_card_asihpi_capture;
if (card->support_mmap)
@@ -1231,7 +1254,7 @@ static int snd_card_asihpi_capture_close(struct snd_pcm_substream *substream)
{
struct snd_card_asihpi_pcm *dpcm = substream->runtime->private_data;
- hpi_handle_error(hpi_instream_close(ss, dpcm->h_stream));
+ hpi_handle_error(hpi_instream_close(dpcm->h_stream));
return 0;
}
@@ -1241,18 +1264,17 @@ static int snd_card_asihpi_capture_copy(struct snd_pcm_substream *substream,
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_card_asihpi_pcm *dpcm = runtime->private_data;
- u32 data_size;
+ u32 len;
- data_size = frames_to_bytes(runtime, count);
+ len = frames_to_bytes(runtime, count);
- VPRINTK2("capture copy%d %d bytes\n", substream->number, data_size);
- hpi_handle_error(hpi_instream_read_buf(ss, dpcm->h_stream,
- runtime->dma_area, data_size));
+ VPRINTK2(KERN_INFO "capture copy%d %d bytes\n", substream->number, len);
+ hpi_handle_error(hpi_instream_read_buf(dpcm->h_stream,
+ runtime->dma_area, len));
- /* Used by capture_pointer */
- dpcm->pcm_irq_pos = dpcm->pcm_irq_pos + data_size;
+ dpcm->pcm_buf_host_rw_ofs = dpcm->pcm_buf_host_rw_ofs + len;
- if (copy_to_user(dst, runtime->dma_area, data_size))
+ if (copy_to_user(dst, runtime->dma_area, len))
return -EFAULT;
return 0;
@@ -1287,7 +1309,7 @@ static int __devinit snd_card_asihpi_pcm_new(struct snd_card_asihpi *asihpi,
struct snd_pcm *pcm;
int err;
- err = snd_pcm_new(asihpi->card, "asihpi PCM", device,
+ err = snd_pcm_new(asihpi->card, "Asihpi PCM", device,
asihpi->num_outstreams, asihpi->num_instreams,
&pcm);
if (err < 0)
@@ -1307,7 +1329,7 @@ static int __devinit snd_card_asihpi_pcm_new(struct snd_card_asihpi *asihpi,
pcm->private_data = asihpi;
pcm->info_flags = 0;
- strcpy(pcm->name, "asihpi PCM");
+ strcpy(pcm->name, "Asihpi PCM");
/*? do we want to emulate MMAP for non-BBM cards?
Jack doesn't work with ALSAs MMAP emulation - WHY NOT? */
@@ -1330,8 +1352,7 @@ struct hpi_control {
char name[44]; /* copied to snd_ctl_elem_id.name[44]; */
};
-static char *asihpi_tuner_band_names[] =
-{
+static const char * const asihpi_tuner_band_names[] = {
"invalid",
"AM",
"FM mono",
@@ -1349,70 +1370,36 @@ compile_time_assert(
(HPI_TUNER_BAND_LAST+1)),
assert_tuner_band_names_size);
-#if ASI_STYLE_NAMES
-static char *asihpi_src_names[] =
-{
+static const char * const asihpi_src_names[] = {
"no source",
- "outstream",
- "line_in",
- "aes_in",
- "tuner",
+ "PCM",
+ "Line",
+ "Digital",
+ "Tuner",
"RF",
- "clock",
- "bitstr",
- "mic",
- "cobranet",
- "analog_in",
- "adapter",
+ "Clock",
+ "Bitstream",
+ "Microphone",
+ "Cobranet",
+ "Analog",
+ "Adapter",
};
-#else
-static char *asihpi_src_names[] =
-{
- "no source",
- "PCM playback",
- "line in",
- "digital in",
- "tuner",
- "RF",
- "clock",
- "bitstream",
- "mic",
- "cobranet in",
- "analog in",
- "adapter",
-};
-#endif
compile_time_assert(
(ARRAY_SIZE(asihpi_src_names) ==
(HPI_SOURCENODE_LAST_INDEX-HPI_SOURCENODE_NONE+1)),
assert_src_names_size);
-#if ASI_STYLE_NAMES
-static char *asihpi_dst_names[] =
-{
- "no destination",
- "instream",
- "line_out",
- "aes_out",
- "RF",
- "speaker" ,
- "cobranet",
- "analog_out",
-};
-#else
-static char *asihpi_dst_names[] =
-{
+static const char * const asihpi_dst_names[] = {
"no destination",
- "PCM capture",
- "line out",
- "digital out",
+ "PCM",
+ "Line",
+ "Digital",
"RF",
- "speaker",
- "cobranet out",
- "analog out"
+ "Speaker",
+ "Cobranet Out",
+ "Analog"
};
-#endif
compile_time_assert(
(ARRAY_SIZE(asihpi_dst_names) ==
@@ -1438,30 +1425,45 @@ static void asihpi_ctl_init(struct snd_kcontrol_new *snd_control,
struct hpi_control *hpi_ctl,
char *name)
{
+ char *dir = "";
memset(snd_control, 0, sizeof(*snd_control));
snd_control->name = hpi_ctl->name;
snd_control->private_value = hpi_ctl->h_control;
snd_control->iface = SNDRV_CTL_ELEM_IFACE_MIXER;
snd_control->index = 0;
+ if (hpi_ctl->dst_node_type + HPI_DESTNODE_NONE == HPI_DESTNODE_ISTREAM)
+ dir = "Capture "; /* On or towards a PCM capture destination*/
+ else if ((hpi_ctl->src_node_type + HPI_SOURCENODE_NONE != HPI_SOURCENODE_OSTREAM) &&
+ (!hpi_ctl->dst_node_type))
+ dir = "Capture "; /* On a source node that is not PCM playback */
+ else if (hpi_ctl->src_node_type &&
+ (hpi_ctl->src_node_type + HPI_SOURCENODE_NONE != HPI_SOURCENODE_OSTREAM) &&
+ (hpi_ctl->dst_node_type))
+ dir = "Monitor Playback "; /* Between an input and an output */
+ else
+ dir = "Playback "; /* PCM Playback source, or output node */
+
if (hpi_ctl->src_node_type && hpi_ctl->dst_node_type)
- sprintf(hpi_ctl->name, "%s%d to %s%d %s",
+ sprintf(hpi_ctl->name, "%s%d %s%d %s%s",
asihpi_src_names[hpi_ctl->src_node_type],
hpi_ctl->src_node_index,
asihpi_dst_names[hpi_ctl->dst_node_type],
hpi_ctl->dst_node_index,
- name);
+ dir, name);
else if (hpi_ctl->dst_node_type) {
- sprintf(hpi_ctl->name, "%s%d %s",
+ sprintf(hpi_ctl->name, "%s %d %s%s",
asihpi_dst_names[hpi_ctl->dst_node_type],
hpi_ctl->dst_node_index,
- name);
+ dir, name);
} else {
- sprintf(hpi_ctl->name, "%s%d %s",
+ sprintf(hpi_ctl->name, "%s %d %s%s",
asihpi_src_names[hpi_ctl->src_node_type],
hpi_ctl->src_node_index,
- name);
+ dir, name);
}
+ /* printk(KERN_INFO "Adding %s %d to %d ", hpi_ctl->name,
+ hpi_ctl->wSrcNodeType, hpi_ctl->wDstNodeType); */
}
/*------------------------------------------------------------
@@ -1478,7 +1480,7 @@ static int snd_asihpi_volume_info(struct snd_kcontrol *kcontrol,
short max_gain_mB;
short step_gain_mB;
- err = hpi_volume_query_range(ss, h_control,
+ err = hpi_volume_query_range(h_control,
&min_gain_mB, &max_gain_mB, &step_gain_mB);
if (err) {
max_gain_mB = 0;
@@ -1500,7 +1502,7 @@ static int snd_asihpi_volume_get(struct snd_kcontrol *kcontrol,
u32 h_control = kcontrol->private_value;
short an_gain_mB[HPI_MAX_CHANNELS];
- hpi_handle_error(hpi_volume_get_gain(ss, h_control, an_gain_mB));
+ hpi_handle_error(hpi_volume_get_gain(h_control, an_gain_mB));
ucontrol->value.integer.value[0] = an_gain_mB[0] / VOL_STEP_mB;
ucontrol->value.integer.value[1] = an_gain_mB[1] / VOL_STEP_mB;
@@ -1522,7 +1524,7 @@ static int snd_asihpi_volume_put(struct snd_kcontrol *kcontrol,
asihpi->mixer_volume[addr][1] != right;
*/
change = 1;
- hpi_handle_error(hpi_volume_set_gain(ss, h_control, an_gain_mB));
+ hpi_handle_error(hpi_volume_set_gain(h_control, an_gain_mB));
return change;
}
@@ -1534,7 +1536,7 @@ static int __devinit snd_asihpi_volume_add(struct snd_card_asihpi *asihpi,
struct snd_card *card = asihpi->card;
struct snd_kcontrol_new snd_control;
- asihpi_ctl_init(&snd_control, hpi_ctl, "volume");
+ asihpi_ctl_init(&snd_control, hpi_ctl, "Volume");
snd_control.access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ;
snd_control.info = snd_asihpi_volume_info;
@@ -1558,7 +1560,7 @@ static int snd_asihpi_level_info(struct snd_kcontrol *kcontrol,
short step_gain_mB;
err =
- hpi_level_query_range(ss, h_control, &min_gain_mB,
+ hpi_level_query_range(h_control, &min_gain_mB,
&max_gain_mB, &step_gain_mB);
if (err) {
max_gain_mB = 2400;
@@ -1580,7 +1582,7 @@ static int snd_asihpi_level_get(struct snd_kcontrol *kcontrol,
u32 h_control = kcontrol->private_value;
short an_gain_mB[HPI_MAX_CHANNELS];
- hpi_handle_error(hpi_level_get_gain(ss, h_control, an_gain_mB));
+ hpi_handle_error(hpi_level_get_gain(h_control, an_gain_mB));
ucontrol->value.integer.value[0] =
an_gain_mB[0] / HPI_UNITS_PER_dB;
ucontrol->value.integer.value[1] =
@@ -1604,7 +1606,7 @@ static int snd_asihpi_level_put(struct snd_kcontrol *kcontrol,
asihpi->mixer_level[addr][1] != right;
*/
change = 1;
- hpi_handle_error(hpi_level_set_gain(ss, h_control, an_gain_mB));
+ hpi_handle_error(hpi_level_set_gain(h_control, an_gain_mB));
return change;
}
@@ -1617,7 +1619,7 @@ static int __devinit snd_asihpi_level_add(struct snd_card_asihpi *asihpi,
struct snd_kcontrol_new snd_control;
/* can't use 'volume' cos some nodes have volume as well */
- asihpi_ctl_init(&snd_control, hpi_ctl, "level");
+ asihpi_ctl_init(&snd_control, hpi_ctl, "Level");
snd_control.access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ;
snd_control.info = snd_asihpi_level_info;
@@ -1633,12 +1635,8 @@ static int __devinit snd_asihpi_level_add(struct snd_card_asihpi *asihpi,
------------------------------------------------------------*/
/* AESEBU format */
-static char *asihpi_aesebu_format_names[] =
-{
- "N/A",
- "S/PDIF",
- "AES/EBU",
-};
+static const char * const asihpi_aesebu_format_names[] = {
+ "N/A", "S/PDIF", "AES/EBU" };
static int snd_asihpi_aesebu_format_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
@@ -1659,12 +1657,12 @@ static int snd_asihpi_aesebu_format_info(struct snd_kcontrol *kcontrol,
static int snd_asihpi_aesebu_format_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol,
- u16 (*func)(const struct hpi_hsubsys *, u32, u16 *))
+ u16 (*func)(u32, u16 *))
{
u32 h_control = kcontrol->private_value;
u16 source, err;
- err = func(ss, h_control, &source);
+ err = func(h_control, &source);
/* default to N/A */
ucontrol->value.enumerated.item[0] = 0;
@@ -1681,7 +1679,7 @@ static int snd_asihpi_aesebu_format_get(struct snd_kcontrol *kcontrol,
static int snd_asihpi_aesebu_format_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol,
- u16 (*func)(const struct hpi_hsubsys *, u32, u16))
+ u16 (*func)(u32, u16))
{
u32 h_control = kcontrol->private_value;
@@ -1693,7 +1691,7 @@ static int snd_asihpi_aesebu_format_put(struct snd_kcontrol *kcontrol,
if (ucontrol->value.enumerated.item[0] == 2)
source = HPI_AESEBU_FORMAT_AESEBU;
- if (func(ss, h_control, source) != 0)
+ if (func(h_control, source) != 0)
return -EINVAL;
return 1;
@@ -1702,13 +1700,13 @@ static int snd_asihpi_aesebu_format_put(struct snd_kcontrol *kcontrol,
static int snd_asihpi_aesebu_rx_format_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol) {
return snd_asihpi_aesebu_format_get(kcontrol, ucontrol,
- HPI_AESEBU__receiver_get_format);
+ hpi_aesebu_receiver_get_format);
}
static int snd_asihpi_aesebu_rx_format_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol) {
return snd_asihpi_aesebu_format_put(kcontrol, ucontrol,
- HPI_AESEBU__receiver_set_format);
+ hpi_aesebu_receiver_set_format);
}
static int snd_asihpi_aesebu_rxstatus_info(struct snd_kcontrol *kcontrol,
@@ -1730,8 +1728,8 @@ static int snd_asihpi_aesebu_rxstatus_get(struct snd_kcontrol *kcontrol,
u32 h_control = kcontrol->private_value;
u16 status;
- hpi_handle_error(HPI_AESEBU__receiver_get_error_status(
- ss, h_control, &status));
+ hpi_handle_error(hpi_aesebu_receiver_get_error_status(
+ h_control, &status));
ucontrol->value.integer.value[0] = status;
return 0;
}
@@ -1742,7 +1740,7 @@ static int __devinit snd_asihpi_aesebu_rx_add(struct snd_card_asihpi *asihpi,
struct snd_card *card = asihpi->card;
struct snd_kcontrol_new snd_control;
- asihpi_ctl_init(&snd_control, hpi_ctl, "format");
+ asihpi_ctl_init(&snd_control, hpi_ctl, "Format");
snd_control.access = SNDRV_CTL_ELEM_ACCESS_READWRITE;
snd_control.info = snd_asihpi_aesebu_format_info;
snd_control.get = snd_asihpi_aesebu_rx_format_get;
@@ -1752,7 +1750,7 @@ static int __devinit snd_asihpi_aesebu_rx_add(struct snd_card_asihpi *asihpi,
if (ctl_add(card, &snd_control, asihpi) < 0)
return -EINVAL;
- asihpi_ctl_init(&snd_control, hpi_ctl, "status");
+ asihpi_ctl_init(&snd_control, hpi_ctl, "Status");
snd_control.access =
SNDRV_CTL_ELEM_ACCESS_VOLATILE | SNDRV_CTL_ELEM_ACCESS_READ;
snd_control.info = snd_asihpi_aesebu_rxstatus_info;
@@ -1764,13 +1762,13 @@ static int __devinit snd_asihpi_aesebu_rx_add(struct snd_card_asihpi *asihpi,
static int snd_asihpi_aesebu_tx_format_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol) {
return snd_asihpi_aesebu_format_get(kcontrol, ucontrol,
- HPI_AESEBU__transmitter_get_format);
+ hpi_aesebu_transmitter_get_format);
}
static int snd_asihpi_aesebu_tx_format_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol) {
return snd_asihpi_aesebu_format_put(kcontrol, ucontrol,
- HPI_AESEBU__transmitter_set_format);
+ hpi_aesebu_transmitter_set_format);
}
@@ -1780,7 +1778,7 @@ static int __devinit snd_asihpi_aesebu_tx_add(struct snd_card_asihpi *asihpi,
struct snd_card *card = asihpi->card;
struct snd_kcontrol_new snd_control;
- asihpi_ctl_init(&snd_control, hpi_ctl, "format");
+ asihpi_ctl_init(&snd_control, hpi_ctl, "Format");
snd_control.access = SNDRV_CTL_ELEM_ACCESS_READWRITE;
snd_control.info = snd_asihpi_aesebu_format_info;
snd_control.get = snd_asihpi_aesebu_tx_format_get;
@@ -1804,7 +1802,7 @@ static int snd_asihpi_tuner_gain_info(struct snd_kcontrol *kcontrol,
u16 gain_range[3];
for (idx = 0; idx < 3; idx++) {
- err = hpi_tuner_query_gain(ss, h_control,
+ err = hpi_tuner_query_gain(h_control,
idx, &gain_range[idx]);
if (err != 0)
return err;
@@ -1827,7 +1825,7 @@ static int snd_asihpi_tuner_gain_get(struct snd_kcontrol *kcontrol,
u32 h_control = kcontrol->private_value;
short gain;
- hpi_handle_error(hpi_tuner_get_gain(ss, h_control, &gain));
+ hpi_handle_error(hpi_tuner_get_gain(h_control, &gain));
ucontrol->value.integer.value[0] = gain / HPI_UNITS_PER_dB;
return 0;
@@ -1843,7 +1841,7 @@ static int snd_asihpi_tuner_gain_put(struct snd_kcontrol *kcontrol,
short gain;
gain = (ucontrol->value.integer.value[0]) * HPI_UNITS_PER_dB;
- hpi_handle_error(hpi_tuner_set_gain(ss, h_control, gain));
+ hpi_handle_error(hpi_tuner_set_gain(h_control, gain));
return 1;
}
@@ -1857,7 +1855,7 @@ static int asihpi_tuner_band_query(struct snd_kcontrol *kcontrol,
u32 i;
for (i = 0; i < len; i++) {
- err = hpi_tuner_query_band(ss,
+ err = hpi_tuner_query_band(
h_control, i, &band_list[i]);
if (err != 0)
break;
@@ -1913,7 +1911,7 @@ static int snd_asihpi_tuner_band_get(struct snd_kcontrol *kcontrol,
num_bands = asihpi_tuner_band_query(kcontrol, tuner_bands,
HPI_TUNER_BAND_LAST);
- hpi_handle_error(hpi_tuner_get_band(ss, h_control, &band));
+ hpi_handle_error(hpi_tuner_get_band(h_control, &band));
ucontrol->value.enumerated.item[0] = -1;
for (idx = 0; idx < HPI_TUNER_BAND_LAST; idx++)
@@ -1940,7 +1938,7 @@ static int snd_asihpi_tuner_band_put(struct snd_kcontrol *kcontrol,
HPI_TUNER_BAND_LAST);
band = tuner_bands[ucontrol->value.enumerated.item[0]];
- hpi_handle_error(hpi_tuner_set_band(ss, h_control, band));
+ hpi_handle_error(hpi_tuner_set_band(h_control, band));
return 1;
}
@@ -1965,7 +1963,7 @@ static int snd_asihpi_tuner_freq_info(struct snd_kcontrol *kcontrol,
for (band_iter = 0; band_iter < num_bands; band_iter++) {
for (idx = 0; idx < 3; idx++) {
- err = hpi_tuner_query_frequency(ss, h_control,
+ err = hpi_tuner_query_frequency(h_control,
idx, tuner_bands[band_iter],
&temp_freq_range[idx]);
if (err != 0)
@@ -1998,7 +1996,7 @@ static int snd_asihpi_tuner_freq_get(struct snd_kcontrol *kcontrol,
u32 h_control = kcontrol->private_value;
u32 freq;
- hpi_handle_error(hpi_tuner_get_frequency(ss, h_control, &freq));
+ hpi_handle_error(hpi_tuner_get_frequency(h_control, &freq));
ucontrol->value.integer.value[0] = freq;
return 0;
@@ -2011,7 +2009,7 @@ static int snd_asihpi_tuner_freq_put(struct snd_kcontrol *kcontrol,
u32 freq;
freq = ucontrol->value.integer.value[0];
- hpi_handle_error(hpi_tuner_set_frequency(ss, h_control, freq));
+ hpi_handle_error(hpi_tuner_set_frequency(h_control, freq));
return 1;
}
@@ -2026,8 +2024,8 @@ static int __devinit snd_asihpi_tuner_add(struct snd_card_asihpi *asihpi,
snd_control.private_value = hpi_ctl->h_control;
snd_control.access = SNDRV_CTL_ELEM_ACCESS_READWRITE;
- if (!hpi_tuner_get_gain(ss, hpi_ctl->h_control, NULL)) {
- asihpi_ctl_init(&snd_control, hpi_ctl, "gain");
+ if (!hpi_tuner_get_gain(hpi_ctl->h_control, NULL)) {
+ asihpi_ctl_init(&snd_control, hpi_ctl, "Gain");
snd_control.info = snd_asihpi_tuner_gain_info;
snd_control.get = snd_asihpi_tuner_gain_get;
snd_control.put = snd_asihpi_tuner_gain_put;
@@ -2036,7 +2034,7 @@ static int __devinit snd_asihpi_tuner_add(struct snd_card_asihpi *asihpi,
return -EINVAL;
}
- asihpi_ctl_init(&snd_control, hpi_ctl, "band");
+ asihpi_ctl_init(&snd_control, hpi_ctl, "Band");
snd_control.info = snd_asihpi_tuner_band_info;
snd_control.get = snd_asihpi_tuner_band_get;
snd_control.put = snd_asihpi_tuner_band_put;
@@ -2044,7 +2042,7 @@ static int __devinit snd_asihpi_tuner_add(struct snd_card_asihpi *asihpi,
if (ctl_add(card, &snd_control, asihpi) < 0)
return -EINVAL;
- asihpi_ctl_init(&snd_control, hpi_ctl, "freq");
+ asihpi_ctl_init(&snd_control, hpi_ctl, "Freq");
snd_control.info = snd_asihpi_tuner_freq_info;
snd_control.get = snd_asihpi_tuner_freq_get;
snd_control.put = snd_asihpi_tuner_freq_put;
@@ -2095,7 +2093,7 @@ static int snd_asihpi_meter_get(struct snd_kcontrol *kcontrol,
short an_gain_mB[HPI_MAX_CHANNELS], i;
u16 err;
- err = hpi_meter_get_peak(ss, h_control, an_gain_mB);
+ err = hpi_meter_get_peak(h_control, an_gain_mB);
for (i = 0; i < HPI_MAX_CHANNELS; i++) {
if (err) {
@@ -2120,7 +2118,7 @@ static int __devinit snd_asihpi_meter_add(struct snd_card_asihpi *asihpi,
struct snd_card *card = asihpi->card;
struct snd_kcontrol_new snd_control;
- asihpi_ctl_init(&snd_control, hpi_ctl, "meter");
+ asihpi_ctl_init(&snd_control, hpi_ctl, "Meter");
snd_control.access =
SNDRV_CTL_ELEM_ACCESS_VOLATILE | SNDRV_CTL_ELEM_ACCESS_READ;
snd_control.info = snd_asihpi_meter_info;
@@ -2140,7 +2138,7 @@ static int snd_card_asihpi_mux_count_sources(struct snd_kcontrol *snd_control)
struct hpi_control hpi_ctl;
int s, err;
for (s = 0; s < 32; s++) {
- err = hpi_multiplexer_query_source(ss, h_control, s,
+ err = hpi_multiplexer_query_source(h_control, s,
&hpi_ctl.
src_node_type,
&hpi_ctl.
@@ -2168,7 +2166,7 @@ static int snd_asihpi_mux_info(struct snd_kcontrol *kcontrol,
uinfo->value.enumerated.items - 1;
err =
- hpi_multiplexer_query_source(ss, h_control,
+ hpi_multiplexer_query_source(h_control,
uinfo->value.enumerated.item,
&src_node_type, &src_node_index);
@@ -2186,11 +2184,11 @@ static int snd_asihpi_mux_get(struct snd_kcontrol *kcontrol,
u16 src_node_type, src_node_index;
int s;
- hpi_handle_error(hpi_multiplexer_get_source(ss, h_control,
+ hpi_handle_error(hpi_multiplexer_get_source(h_control,
&source_type, &source_index));
/* Should cache this search result! */
for (s = 0; s < 256; s++) {
- if (hpi_multiplexer_query_source(ss, h_control, s,
+ if (hpi_multiplexer_query_source(h_control, s,
&src_node_type, &src_node_index))
break;
@@ -2201,7 +2199,7 @@ static int snd_asihpi_mux_get(struct snd_kcontrol *kcontrol,
}
}
snd_printd(KERN_WARNING
- "control %x failed to match mux source %hu %hu\n",
+ "Control %x failed to match mux source %hu %hu\n",
h_control, source_type, source_index);
ucontrol->value.enumerated.item[0] = 0;
return 0;
@@ -2217,12 +2215,12 @@ static int snd_asihpi_mux_put(struct snd_kcontrol *kcontrol,
change = 1;
- e = hpi_multiplexer_query_source(ss, h_control,
+ e = hpi_multiplexer_query_source(h_control,
ucontrol->value.enumerated.item[0],
&source_type, &source_index);
if (!e)
hpi_handle_error(
- hpi_multiplexer_set_source(ss, h_control,
+ hpi_multiplexer_set_source(h_control,
source_type, source_index));
return change;
}
@@ -2234,11 +2232,7 @@ static int __devinit snd_asihpi_mux_add(struct snd_card_asihpi *asihpi,
struct snd_card *card = asihpi->card;
struct snd_kcontrol_new snd_control;
-#if ASI_STYLE_NAMES
- asihpi_ctl_init(&snd_control, hpi_ctl, "multiplexer");
-#else
- asihpi_ctl_init(&snd_control, hpi_ctl, "route");
-#endif
+ asihpi_ctl_init(&snd_control, hpi_ctl, "Route");
snd_control.access = SNDRV_CTL_ELEM_ACCESS_READWRITE;
snd_control.info = snd_asihpi_mux_info;
snd_control.get = snd_asihpi_mux_get;
@@ -2254,33 +2248,38 @@ static int __devinit snd_asihpi_mux_add(struct snd_card_asihpi *asihpi,
static int snd_asihpi_cmode_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *mode_names[HPI_CHANNEL_MODE_LAST] = {
- "normal", "swap",
- "from_left", "from_right",
- "to_left", "to_right"
+ static const char * const mode_names[HPI_CHANNEL_MODE_LAST + 1] = {
+ "invalid",
+ "Normal", "Swap",
+ "From Left", "From Right",
+ "To Left", "To Right"
};
u32 h_control = kcontrol->private_value;
u16 mode;
int i;
+ u16 mode_map[6];
+ int valid_modes = 0;
/* HPI channel mode values can be from 1 to 6
Some adapters only support a contiguous subset
*/
for (i = 0; i < HPI_CHANNEL_MODE_LAST; i++)
- if (hpi_channel_mode_query_mode(
- ss, h_control, i, &mode))
- break;
+ if (!hpi_channel_mode_query_mode(
+ h_control, i, &mode)) {
+ mode_map[valid_modes] = mode;
+ valid_modes++;
+ }
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
- uinfo->value.enumerated.items = i;
+ uinfo->value.enumerated.items = valid_modes;
- if (uinfo->value.enumerated.item >= i)
- uinfo->value.enumerated.item = i - 1;
+ if (uinfo->value.enumerated.item >= valid_modes)
+ uinfo->value.enumerated.item = valid_modes - 1;
strcpy(uinfo->value.enumerated.name,
- mode_names[uinfo->value.enumerated.item]);
+ mode_names[mode_map[uinfo->value.enumerated.item]]);
return 0;
}
@@ -2291,7 +2290,7 @@ static int snd_asihpi_cmode_get(struct snd_kcontrol *kcontrol,
u32 h_control = kcontrol->private_value;
u16 mode;
- if (hpi_channel_mode_get(ss, h_control, &mode))
+ if (hpi_channel_mode_get(h_control, &mode))
mode = 1;
ucontrol->value.enumerated.item[0] = mode - 1;
@@ -2307,7 +2306,7 @@ static int snd_asihpi_cmode_put(struct snd_kcontrol *kcontrol,
change = 1;
- hpi_handle_error(hpi_channel_mode_set(ss, h_control,
+ hpi_handle_error(hpi_channel_mode_set(h_control,
ucontrol->value.enumerated.item[0] + 1));
return change;
}
@@ -2319,7 +2318,7 @@ static int __devinit snd_asihpi_cmode_add(struct snd_card_asihpi *asihpi,
struct snd_card *card = asihpi->card;
struct snd_kcontrol_new snd_control;
- asihpi_ctl_init(&snd_control, hpi_ctl, "channel mode");
+ asihpi_ctl_init(&snd_control, hpi_ctl, "Mode");
snd_control.access = SNDRV_CTL_ELEM_ACCESS_READWRITE;
snd_control.info = snd_asihpi_cmode_info;
snd_control.get = snd_asihpi_cmode_get;
@@ -2331,15 +2330,12 @@ static int __devinit snd_asihpi_cmode_add(struct snd_card_asihpi *asihpi,
/*------------------------------------------------------------
Sampleclock source controls
------------------------------------------------------------*/
-
-static char *sampleclock_sources[MAX_CLOCKSOURCES] =
- { "N/A", "local PLL", "AES/EBU sync", "word external", "word header",
- "SMPTE", "AES/EBU in1", "auto", "network", "invalid",
- "prev module",
- "AES/EBU in2", "AES/EBU in3", "AES/EBU in4", "AES/EBU in5",
- "AES/EBU in6", "AES/EBU in7", "AES/EBU in8"};
-
-
+static char *sampleclock_sources[MAX_CLOCKSOURCES] = {
+ "N/A", "Local PLL", "Digital Sync", "Word External", "Word Header",
+ "SMPTE", "Digital1", "Auto", "Network", "Invalid",
+ "Prev Module",
+ "Digital2", "Digital3", "Digital4", "Digital5",
+ "Digital6", "Digital7", "Digital8"};
static int snd_asihpi_clksrc_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
@@ -2371,11 +2367,11 @@ static int snd_asihpi_clksrc_get(struct snd_kcontrol *kcontrol,
int i;
ucontrol->value.enumerated.item[0] = 0;
- if (hpi_sample_clock_get_source(ss, h_control, &source))
+ if (hpi_sample_clock_get_source(h_control, &source))
source = 0;
if (source == HPI_SAMPLECLOCK_SOURCE_AESEBU_INPUT)
- if (hpi_sample_clock_get_source_index(ss, h_control, &srcindex))
+ if (hpi_sample_clock_get_source_index(h_control, &srcindex))
srcindex = 0;
for (i = 0; i < clkcache->count; i++)
@@ -2402,11 +2398,11 @@ static int snd_asihpi_clksrc_put(struct snd_kcontrol *kcontrol,
if (item >= clkcache->count)
item = clkcache->count-1;
- hpi_handle_error(hpi_sample_clock_set_source(ss,
+ hpi_handle_error(hpi_sample_clock_set_source(
h_control, clkcache->s[item].source));
if (clkcache->s[item].source == HPI_SAMPLECLOCK_SOURCE_AESEBU_INPUT)
- hpi_handle_error(hpi_sample_clock_set_source_index(ss,
+ hpi_handle_error(hpi_sample_clock_set_source_index(
h_control, clkcache->s[item].index));
return change;
}
@@ -2434,7 +2430,7 @@ static int snd_asihpi_clklocal_get(struct snd_kcontrol *kcontrol,
u32 rate;
u16 e;
- e = hpi_sample_clock_get_local_rate(ss, h_control, &rate);
+ e = hpi_sample_clock_get_local_rate(h_control, &rate);
if (!e)
ucontrol->value.integer.value[0] = rate;
else
@@ -2452,7 +2448,7 @@ static int snd_asihpi_clklocal_put(struct snd_kcontrol *kcontrol,
asihpi->mixer_clkrate[addr][1] != right;
*/
change = 1;
- hpi_handle_error(hpi_sample_clock_set_local_rate(ss, h_control,
+ hpi_handle_error(hpi_sample_clock_set_local_rate(h_control,
ucontrol->value.integer.value[0]));
return change;
}
@@ -2476,7 +2472,7 @@ static int snd_asihpi_clkrate_get(struct snd_kcontrol *kcontrol,
u32 rate;
u16 e;
- e = hpi_sample_clock_get_sample_rate(ss, h_control, &rate);
+ e = hpi_sample_clock_get_sample_rate(h_control, &rate);
if (!e)
ucontrol->value.integer.value[0] = rate;
else
@@ -2501,7 +2497,7 @@ static int __devinit snd_asihpi_sampleclock_add(struct snd_card_asihpi *asihpi,
clkcache->has_local = 0;
for (i = 0; i <= HPI_SAMPLECLOCK_SOURCE_LAST; i++) {
- if (hpi_sample_clock_query_source(ss, hSC,
+ if (hpi_sample_clock_query_source(hSC,
i, &source))
break;
clkcache->s[i].source = source;
@@ -2515,7 +2511,7 @@ static int __devinit snd_asihpi_sampleclock_add(struct snd_card_asihpi *asihpi,
if (has_aes_in)
/* already will have picked up index 0 above */
for (j = 1; j < 8; j++) {
- if (hpi_sample_clock_query_source_index(ss, hSC,
+ if (hpi_sample_clock_query_source_index(hSC,
j, HPI_SAMPLECLOCK_SOURCE_AESEBU_INPUT,
&source))
break;
@@ -2528,7 +2524,7 @@ static int __devinit snd_asihpi_sampleclock_add(struct snd_card_asihpi *asihpi,
}
clkcache->count = i;
- asihpi_ctl_init(&snd_control, hpi_ctl, "source");
+ asihpi_ctl_init(&snd_control, hpi_ctl, "Source");
snd_control.access = SNDRV_CTL_ELEM_ACCESS_READWRITE ;
snd_control.info = snd_asihpi_clksrc_info;
snd_control.get = snd_asihpi_clksrc_get;
@@ -2538,7 +2534,7 @@ static int __devinit snd_asihpi_sampleclock_add(struct snd_card_asihpi *asihpi,
if (clkcache->has_local) {
- asihpi_ctl_init(&snd_control, hpi_ctl, "local_rate");
+ asihpi_ctl_init(&snd_control, hpi_ctl, "Localrate");
snd_control.access = SNDRV_CTL_ELEM_ACCESS_READWRITE ;
snd_control.info = snd_asihpi_clklocal_info;
snd_control.get = snd_asihpi_clklocal_get;
@@ -2549,7 +2545,7 @@ static int __devinit snd_asihpi_sampleclock_add(struct snd_card_asihpi *asihpi,
return -EINVAL;
}
- asihpi_ctl_init(&snd_control, hpi_ctl, "rate");
+ asihpi_ctl_init(&snd_control, hpi_ctl, "Rate");
snd_control.access =
SNDRV_CTL_ELEM_ACCESS_VOLATILE | SNDRV_CTL_ELEM_ACCESS_READ;
snd_control.info = snd_asihpi_clkrate_info;
@@ -2571,10 +2567,10 @@ static int __devinit snd_card_asihpi_mixer_new(struct snd_card_asihpi *asihpi)
if (snd_BUG_ON(!asihpi))
return -EINVAL;
- strcpy(card->mixername, "asihpi mixer");
+ strcpy(card->mixername, "Asihpi Mixer");
err =
- hpi_mixer_open(ss, asihpi->adapter_index,
+ hpi_mixer_open(asihpi->adapter_index,
&asihpi->h_mixer);
hpi_handle_error(err);
if (err)
@@ -2585,7 +2581,7 @@ static int __devinit snd_card_asihpi_mixer_new(struct snd_card_asihpi *asihpi)
for (idx = 0; idx < 2000; idx++) {
err = hpi_mixer_get_control_by_index(
- ss, asihpi->h_mixer,
+ asihpi->h_mixer,
idx,
&hpi_ctl.src_node_type,
&hpi_ctl.src_node_index,
@@ -2597,7 +2593,7 @@ static int __devinit snd_card_asihpi_mixer_new(struct snd_card_asihpi *asihpi)
if (err == HPI_ERROR_CONTROL_DISABLED) {
if (mixer_dump)
snd_printk(KERN_INFO
- "disabled HPI control(%d)\n",
+ "Disabled HPI Control(%d)\n",
idx);
continue;
} else
@@ -2662,7 +2658,7 @@ static int __devinit snd_card_asihpi_mixer_new(struct snd_card_asihpi *asihpi)
default:
if (mixer_dump)
snd_printk(KERN_INFO
- "untranslated HPI control"
+ "Untranslated HPI Control"
"(%d) %d %d %d %d %d\n",
idx,
hpi_ctl.control_type,
@@ -2712,14 +2708,14 @@ snd_asihpi_proc_read(struct snd_info_entry *entry,
version & 0x7,
((version >> 13) * 100) + ((version >> 7) & 0x3f));
- err = hpi_mixer_get_control(ss, asihpi->h_mixer,
+ err = hpi_mixer_get_control(asihpi->h_mixer,
HPI_SOURCENODE_CLOCK_SOURCE, 0, 0, 0,
HPI_CONTROL_SAMPLECLOCK, &h_control);
if (!err) {
- err = hpi_sample_clock_get_sample_rate(ss,
+ err = hpi_sample_clock_get_sample_rate(
h_control, &rate);
- err += hpi_sample_clock_get_source(ss, h_control, &source);
+ err += hpi_sample_clock_get_source(h_control, &source);
if (!err)
snd_iprintf(buffer, "sample_clock=%d_hz, source %s\n",
@@ -2841,15 +2837,17 @@ static int __devinit snd_asihpi_probe(struct pci_dev *pci_dev,
if (err < 0)
return err;
snd_printk(KERN_WARNING
- "**** WARNING **** adapter index %d->ALSA index %d\n",
+ "**** WARNING **** Adapter index %d->ALSA index %d\n",
hpi_card->index, card->number);
}
+ snd_card_set_dev(card, &pci_dev->dev);
+
asihpi = (struct snd_card_asihpi *) card->private_data;
asihpi->card = card;
- asihpi->pci = hpi_card->pci;
+ asihpi->pci = pci_dev;
asihpi->adapter_index = hpi_card->index;
- hpi_handle_error(hpi_adapter_get_info(ss,
+ hpi_handle_error(hpi_adapter_get_info(
asihpi->adapter_index,
&asihpi->num_outstreams,
&asihpi->num_instreams,
@@ -2859,7 +2857,7 @@ static int __devinit snd_asihpi_probe(struct pci_dev *pci_dev,
version = asihpi->version;
snd_printk(KERN_INFO "adapter ID=%4X index=%d num_outstreams=%d "
"num_instreams=%d S/N=%d\n"
- "hw version %c%d DSP code version %03d\n",
+ "Hw Version %c%d DSP code version %03d\n",
asihpi->type, asihpi->adapter_index,
asihpi->num_outstreams,
asihpi->num_instreams, asihpi->serial_number,
@@ -2871,33 +2869,33 @@ static int __devinit snd_asihpi_probe(struct pci_dev *pci_dev,
if (pcm_substreams < asihpi->num_instreams)
pcm_substreams = asihpi->num_instreams;
- err = hpi_adapter_get_property(ss, asihpi->adapter_index,
+ err = hpi_adapter_get_property(asihpi->adapter_index,
HPI_ADAPTER_PROPERTY_CAPS1,
NULL, &asihpi->support_grouping);
if (err)
asihpi->support_grouping = 0;
- err = hpi_adapter_get_property(ss, asihpi->adapter_index,
+ err = hpi_adapter_get_property(asihpi->adapter_index,
HPI_ADAPTER_PROPERTY_CAPS2,
&asihpi->support_mrx, NULL);
if (err)
asihpi->support_mrx = 0;
- err = hpi_adapter_get_property(ss, asihpi->adapter_index,
+ err = hpi_adapter_get_property(asihpi->adapter_index,
HPI_ADAPTER_PROPERTY_INTERVAL,
NULL, &asihpi->update_interval_frames);
if (err)
asihpi->update_interval_frames = 512;
- hpi_handle_error(hpi_instream_open(ss, asihpi->adapter_index,
+ hpi_handle_error(hpi_instream_open(asihpi->adapter_index,
0, &h_stream));
- err = hpi_instream_host_buffer_free(ss, h_stream);
+ err = hpi_instream_host_buffer_free(h_stream);
asihpi->support_mmap = (!err);
- hpi_handle_error(hpi_instream_close(ss, h_stream));
+ hpi_handle_error(hpi_instream_close(h_stream));
- err = hpi_adapter_get_property(ss, asihpi->adapter_index,
+ err = hpi_adapter_get_property(asihpi->adapter_index,
HPI_ADAPTER_PROPERTY_CURCHANNELS,
&asihpi->in_max_chans, &asihpi->out_max_chans);
if (err) {
@@ -2923,13 +2921,13 @@ static int __devinit snd_asihpi_probe(struct pci_dev *pci_dev,
goto __nodev;
}
- err = hpi_mixer_get_control(ss, asihpi->h_mixer,
+ err = hpi_mixer_get_control(asihpi->h_mixer,
HPI_SOURCENODE_CLOCK_SOURCE, 0, 0, 0,
HPI_CONTROL_SAMPLECLOCK, &h_control);
if (!err)
err = hpi_sample_clock_set_local_rate(
- ss, h_control, adapter_fs);
+ h_control, adapter_fs);
snd_asihpi_proc_init(asihpi);
diff --git a/sound/pci/asihpi/hpi.h b/sound/pci/asihpi/hpi.h
index 23399d02f666..6fc025c448de 100644
--- a/sound/pci/asihpi/hpi.h
+++ b/sound/pci/asihpi/hpi.h
@@ -24,17 +24,10 @@
The HPI is a low-level hardware abstraction layer to all
AudioScience digital audio adapters
-*/
-/*
- You must define one operating system that the HPI is to be compiled under
- HPI_OS_WIN32_USER 32bit Windows
- HPI_OS_DSP_C6000 DSP TI C6000 (automatically set)
- HPI_OS_WDM Windows WDM kernel driver
- HPI_OS_LINUX Linux userspace
- HPI_OS_LINUX_KERNEL Linux kernel (automatically set)
(C) Copyright AudioScience Inc. 1998-2010
-******************************************************************************/
+*/
+
#ifndef _HPI_H_
#define _HPI_H_
/* HPI Version
@@ -50,20 +43,20 @@ i.e 3.05.02 is a development version
#define HPI_VER_RELEASE(v) ((int)(v & 0xFF))
/* Use single digits for versions less that 10 to avoid octal. */
-#define HPI_VER HPI_VERSION_CONSTRUCTOR(4L, 4, 1)
-#define HPI_VER_STRING "4.04.01"
+#define HPI_VER HPI_VERSION_CONSTRUCTOR(4L, 6, 0)
+#define HPI_VER_STRING "4.06.00"
/* Library version as documented in hpi-api-versions.txt */
#define HPI_LIB_VER HPI_VERSION_CONSTRUCTOR(9, 0, 0)
#include <linux/types.h>
-#define HPI_EXCLUDE_DEPRECATED
+#define HPI_BUILD_EXCLUDE_DEPRECATED
+#define HPI_BUILD_KERNEL_MODE
/******************************************************************************/
-/******************************************************************************/
/******** HPI API DEFINITIONS *****/
/******************************************************************************/
-/******************************************************************************/
+
/*******************************************/
/** Audio format types
\ingroup stream
@@ -174,7 +167,6 @@ The range is +1.0 to -1.0, which corresponds to digital fullscale.
HPI_FORMAT_UNDEFINED = 0xffff
};
-/******************************************* in/out Stream states */
/*******************************************/
/** Stream States
\ingroup stream
@@ -194,7 +186,7 @@ enum HPI_STREAM_STATES {
cards to be ready. */
HPI_STATE_WAIT = 6
};
-/******************************************* mixer source node types */
+/*******************************************/
/** Source node types
\ingroup mixer
*/
@@ -224,7 +216,7 @@ enum HPI_SOURCENODES {
/* AX6 max sourcenode types = 15 */
};
-/******************************************* mixer dest node types */
+/*******************************************/
/** Destination node types
\ingroup mixer
*/
@@ -262,11 +254,11 @@ enum HPI_CONTROLS {
HPI_CONTROL_MUTE = 4, /*mute control - not used at present. */
HPI_CONTROL_MULTIPLEXER = 5, /**< multiplexer control. */
- HPI_CONTROL_AESEBU_TRANSMITTER = 6, /**< AES/EBU transmitter control. */
- HPI_CONTROL_AESEBUTX = HPI_CONTROL_AESEBU_TRANSMITTER,
+ HPI_CONTROL_AESEBU_TRANSMITTER = 6, /**< AES/EBU transmitter control */
+ HPI_CONTROL_AESEBUTX = 6, /* HPI_CONTROL_AESEBU_TRANSMITTER */
HPI_CONTROL_AESEBU_RECEIVER = 7, /**< AES/EBU receiver control. */
- HPI_CONTROL_AESEBURX = HPI_CONTROL_AESEBU_RECEIVER,
+ HPI_CONTROL_AESEBURX = 7, /* HPI_CONTROL_AESEBU_RECEIVER */
HPI_CONTROL_LEVEL = 8, /**< level/trim control - works in d_bu. */
HPI_CONTROL_TUNER = 9, /**< tuner control. */
@@ -281,7 +273,7 @@ enum HPI_CONTROLS {
HPI_CONTROL_SAMPLECLOCK = 17, /**< sample clock control. */
HPI_CONTROL_MICROPHONE = 18, /**< microphone control. */
HPI_CONTROL_PARAMETRIC_EQ = 19, /**< parametric EQ control. */
- HPI_CONTROL_EQUALIZER = HPI_CONTROL_PARAMETRIC_EQ,
+ HPI_CONTROL_EQUALIZER = 19, /*HPI_CONTROL_PARAMETRIC_EQ */
HPI_CONTROL_COMPANDER = 20, /**< compander control. */
HPI_CONTROL_COBRANET = 21, /**< cobranet control. */
@@ -296,10 +288,7 @@ enum HPI_CONTROLS {
/* WARNING types 256 or greater impact bit packing in all AX6 DSP code */
};
-/* Shorthand names that match attribute names */
-
-/******************************************* ADAPTER ATTRIBUTES ****/
-
+/*******************************************/
/** Adapter properties
These are used in HPI_AdapterSetProperty() and HPI_AdapterGetProperty()
\ingroup adapter
@@ -330,12 +319,21 @@ by the driver and is not passed on to the DSP at all.
Indicates the state of the adapter's SSX2 setting. This setting is stored in
non-volatile memory on the adapter. A typical call sequence would be to use
HPI_ADAPTER_PROPERTY_SSX2_SETTING to set SSX2 on the adapter and then to reload
-the driver. The driver would query HPI_ADAPTER_PROPERTY_SSX2_SETTING during startup
-and if SSX2 is set, it would then call HPI_ADAPTER_PROPERTY_ENABLE_SSX2 to enable
-SSX2 stream mapping within the kernel level of the driver.
+the driver. The driver would query HPI_ADAPTER_PROPERTY_SSX2_SETTING during
+startup and if SSX2 is set, it would then call HPI_ADAPTER_PROPERTY_ENABLE_SSX2
+to enable SSX2 stream mapping within the kernel level of the driver.
*/
HPI_ADAPTER_PROPERTY_SSX2_SETTING = 4,
+/** Enables/disables PCI(e) IRQ.
+A setting of 0 indicates that no interrupts are being generated. A DSP boot
+this property is set to 0. Setting to a non-zero value specifies the number
+of frames of audio that should be processed between interrupts. This property
+should be set to multiple of the mixer interval as read back from the
+HPI_ADAPTER_PROPERTY_INTERVAL property.
+*/
+ HPI_ADAPTER_PROPERTY_IRQ_RATE = 5,
+
/** Base number for readonly properties */
HPI_ADAPTER_PROPERTY_READONLYBASE = 256,
@@ -440,21 +438,30 @@ return value is true (1) or false (0). If the current adapter
mode is MONO SSX2 is disabled, even though this property will
return true.
*/
- HPI_ADAPTER_PROPERTY_SUPPORTS_SSX2 = 271
+ HPI_ADAPTER_PROPERTY_SUPPORTS_SSX2 = 271,
+/** Readonly supports PCI(e) IRQ.
+Indicates that the adapter in it's current mode supports interrupts
+across the host bus. Note, this does not imply that interrupts are
+enabled. Instead it indicates that they can be enabled.
+*/
+ HPI_ADAPTER_PROPERTY_SUPPORTS_IRQ = 272
};
/** Adapter mode commands
-Used in wQueryOrSet field of HPI_AdapterSetModeEx().
+Used in wQueryOrSet parameter of HPI_AdapterSetModeEx().
\ingroup adapter
*/
enum HPI_ADAPTER_MODE_CMDS {
+ /** Set the mode to the given parameter */
HPI_ADAPTER_MODE_SET = 0,
+ /** Return 0 or error depending whether mode is valid,
+ but don't set the mode */
HPI_ADAPTER_MODE_QUERY = 1
};
/** Adapter Modes
- These are used by HPI_AdapterSetModeEx()
+ These are used by HPI_AdapterSetModeEx()
\warning - more than 16 possible modes breaks
a bitmask in the Windows WAVE DLL
@@ -629,10 +636,13 @@ enum HPI_MIXER_STORE_COMMAND {
HPI_MIXER_STORE_SAVE_SINGLE = 6
};
-/************************************* CONTROL ATTRIBUTE VALUES ****/
+/****************************/
+/* CONTROL ATTRIBUTE VALUES */
+/****************************/
+
/** Used by mixer plugin enable functions
-E.g. HPI_ParametricEQ_SetState()
+E.g. HPI_ParametricEq_SetState()
\ingroup mixer
*/
enum HPI_SWITCH_STATES {
@@ -641,6 +651,7 @@ enum HPI_SWITCH_STATES {
};
/* Volume control special gain values */
+
/** volumes units are 100ths of a dB
\ingroup volume
*/
@@ -650,6 +661,11 @@ enum HPI_SWITCH_STATES {
*/
#define HPI_GAIN_OFF (-100 * HPI_UNITS_PER_dB)
+/** channel mask specifying all channels
+\ingroup volume
+*/
+#define HPI_BITMASK_ALL_CHANNELS (0xFFFFFFFF)
+
/** value returned for no signal
\ingroup meter
*/
@@ -667,7 +683,7 @@ enum HPI_VOLUME_AUTOFADES {
/** The physical encoding format of the AESEBU I/O.
-Used in HPI_AESEBU_Transmitter_SetFormat(), HPI_AESEBU_Receiver_SetFormat()
+Used in HPI_Aesebu_Transmitter_SetFormat(), HPI_Aesebu_Receiver_SetFormat()
along with related Get and Query functions
\ingroup aestx
*/
@@ -680,7 +696,7 @@ enum HPI_AESEBU_FORMATS {
/** AES/EBU error status bits
-Returned by HPI_AESEBU_Receiver_GetErrorStatus()
+Returned by HPI_Aesebu_Receiver_GetErrorStatus()
\ingroup aesrx
*/
enum HPI_AESEBU_ERRORS {
@@ -767,14 +783,6 @@ enum HPI_TUNER_MODE_VALUES {
HPI_TUNER_MODE_RDS_RBDS = 2 /**< RDS - RBDS mode */
};
-/** Tuner Level settings
-\ingroup tuner
-*/
-enum HPI_TUNER_LEVEL {
- HPI_TUNER_LEVEL_AVERAGE = 0,
- HPI_TUNER_LEVEL_RAW = 1
-};
-
/** Tuner Status Bits
These bitfield values are returned by a call to HPI_Tuner_GetStatus().
@@ -783,13 +791,13 @@ Multiple fields are returned from a single call.
*/
enum HPI_TUNER_STATUS_BITS {
HPI_TUNER_VIDEO_COLOR_PRESENT = 0x0001, /**< video color is present. */
- HPI_TUNER_VIDEO_IS_60HZ = 0x0020, /**< 60 hz video detected. */
- HPI_TUNER_VIDEO_HORZ_SYNC_MISSING = 0x0040, /**< video HSYNC is missing. */
- HPI_TUNER_VIDEO_STATUS_VALID = 0x0100, /**< video status is valid. */
- HPI_TUNER_PLL_LOCKED = 0x1000, /**< the tuner's PLL is locked. */
- HPI_TUNER_FM_STEREO = 0x2000, /**< tuner reports back FM stereo. */
- HPI_TUNER_DIGITAL = 0x0200, /**< tuner reports digital programming. */
- HPI_TUNER_MULTIPROGRAM = 0x0400 /**< tuner reports multiple programs. */
+ HPI_TUNER_VIDEO_IS_60HZ = 0x0020, /**< 60 hz video detected. */
+ HPI_TUNER_VIDEO_HORZ_SYNC_MISSING = 0x0040, /**< video HSYNC is missing. */
+ HPI_TUNER_VIDEO_STATUS_VALID = 0x0100, /**< video status is valid. */
+ HPI_TUNER_DIGITAL = 0x0200, /**< tuner reports digital programming. */
+ HPI_TUNER_MULTIPROGRAM = 0x0400, /**< tuner reports multiple programs. */
+ HPI_TUNER_PLL_LOCKED = 0x1000, /**< the tuner's PLL is locked. */
+ HPI_TUNER_FM_STEREO = 0x2000 /**< tuner reports back FM stereo. */
};
/** Channel Modes
@@ -839,7 +847,7 @@ enum HPI_SAMPLECLOCK_SOURCES {
HPI_SAMPLECLOCK_SOURCE_LAST = 10
};
-/** Equalizer filter types. Used by HPI_ParametricEQ_SetBand()
+/** Equalizer filter types. Used by HPI_ParametricEq_SetBand()
\ingroup parmeq
*/
enum HPI_FILTER_TYPE {
@@ -882,7 +890,7 @@ enum HPI_ERROR_CODES {
HPI_ERROR_INVALID_OBJ = 101,
/** Function does not exist. */
HPI_ERROR_INVALID_FUNC = 102,
- /** The specified object (adapter/Stream) does not exist. */
+ /** The specified object does not exist. */
HPI_ERROR_INVALID_OBJ_INDEX = 103,
/** Trying to access an object that has not been opened yet. */
HPI_ERROR_OBJ_NOT_OPEN = 104,
@@ -890,8 +898,7 @@ enum HPI_ERROR_CODES {
HPI_ERROR_OBJ_ALREADY_OPEN = 105,
/** PCI, ISA resource not valid. */
HPI_ERROR_INVALID_RESOURCE = 106,
- /** GetInfo call from SubSysFindAdapters failed. */
- HPI_ERROR_SUBSYSFINDADAPTERS_GETINFO = 107,
+ /* HPI_ERROR_SUBSYSFINDADAPTERS_GETINFO= 107 */
/** Default response was never updated with actual error code. */
HPI_ERROR_INVALID_RESPONSE = 108,
/** wSize field of response was not updated,
@@ -899,38 +906,44 @@ enum HPI_ERROR_CODES {
HPI_ERROR_PROCESSING_MESSAGE = 109,
/** The network did not respond in a timely manner. */
HPI_ERROR_NETWORK_TIMEOUT = 110,
- /** An HPI handle is invalid (uninitialised?). */
+ /* An HPI handle is invalid (uninitialised?). */
HPI_ERROR_INVALID_HANDLE = 111,
/** A function or attribute has not been implemented yet. */
HPI_ERROR_UNIMPLEMENTED = 112,
- /** There are too many clients attempting to access a network resource. */
+ /** There are too many clients attempting
+ to access a network resource. */
HPI_ERROR_NETWORK_TOO_MANY_CLIENTS = 113,
- /** Response buffer passed to HPI_Message was smaller than returned response */
+ /** Response buffer passed to HPI_Message
+ was smaller than returned response.
+ wSpecificError field of hpi response contains the required size.
+ */
HPI_ERROR_RESPONSE_BUFFER_TOO_SMALL = 114,
/** The returned response did not match the sent message */
HPI_ERROR_RESPONSE_MISMATCH = 115,
+ /** A control setting that should have been cached was not. */
+ HPI_ERROR_CONTROL_CACHING = 116,
+ /** A message buffer in the path to the adapter was smaller
+ than the message size.
+ wSpecificError field of hpi response contains the actual size.
+ */
+ HPI_ERROR_MESSAGE_BUFFER_TOO_SMALL = 117,
- /** Too many adapters.*/
- HPI_ERROR_TOO_MANY_ADAPTERS = 200,
+ /* HPI_ERROR_TOO_MANY_ADAPTERS= 200 */
/** Bad adpater. */
HPI_ERROR_BAD_ADAPTER = 201,
/** Adapter number out of range or not set properly. */
HPI_ERROR_BAD_ADAPTER_NUMBER = 202,
/** 2 adapters with the same adapter number. */
- HPI_DUPLICATE_ADAPTER_NUMBER = 203,
- /** DSP code failed to bootload. */
+ HPI_ERROR_DUPLICATE_ADAPTER_NUMBER = 203,
+ /** DSP code failed to bootload. (unused?) */
HPI_ERROR_DSP_BOOTLOAD = 204,
- /** Adapter failed DSP code self test. */
- HPI_ERROR_DSP_SELFTEST = 205,
/** Couldn't find or open the DSP code file. */
HPI_ERROR_DSP_FILE_NOT_FOUND = 206,
/** Internal DSP hardware error. */
HPI_ERROR_DSP_HARDWARE = 207,
- /** Could not allocate memory in DOS. */
- HPI_ERROR_DOS_MEMORY_ALLOC = 208,
/** Could not allocate memory */
HPI_ERROR_MEMORY_ALLOC = 208,
- /** Failed to correctly load/config PLD .*/
+ /** Failed to correctly load/config PLD. (unused) */
HPI_ERROR_PLD_LOAD = 209,
/** Unexpected end of file, block length too big etc. */
HPI_ERROR_DSP_FILE_FORMAT = 210,
@@ -939,8 +952,7 @@ enum HPI_ERROR_CODES {
HPI_ERROR_DSP_FILE_ACCESS_DENIED = 211,
/** First DSP code section header not found in DSP file. */
HPI_ERROR_DSP_FILE_NO_HEADER = 212,
- /** File read operation on DSP code file failed. */
- HPI_ERROR_DSP_FILE_READ_ERROR = 213,
+ /* HPI_ERROR_DSP_FILE_READ_ERROR= 213, */
/** DSP code for adapter family not found. */
HPI_ERROR_DSP_SECTION_NOT_FOUND = 214,
/** Other OS specific error opening DSP file. */
@@ -950,23 +962,21 @@ enum HPI_ERROR_CODES {
/** DSP code section header had size == 0. */
HPI_ERROR_DSP_FILE_NULL_HEADER = 217,
- /** Base number for flash errors. */
- HPI_ERROR_FLASH = 220,
+ /* HPI_ERROR_FLASH = 220, */
/** Flash has bad checksum */
- HPI_ERROR_BAD_CHECKSUM = (HPI_ERROR_FLASH + 1),
- HPI_ERROR_BAD_SEQUENCE = (HPI_ERROR_FLASH + 2),
- HPI_ERROR_FLASH_ERASE = (HPI_ERROR_FLASH + 3),
- HPI_ERROR_FLASH_PROGRAM = (HPI_ERROR_FLASH + 4),
- HPI_ERROR_FLASH_VERIFY = (HPI_ERROR_FLASH + 5),
- HPI_ERROR_FLASH_TYPE = (HPI_ERROR_FLASH + 6),
- HPI_ERROR_FLASH_START = (HPI_ERROR_FLASH + 7),
+ HPI_ERROR_BAD_CHECKSUM = 221,
+ HPI_ERROR_BAD_SEQUENCE = 222,
+ HPI_ERROR_FLASH_ERASE = 223,
+ HPI_ERROR_FLASH_PROGRAM = 224,
+ HPI_ERROR_FLASH_VERIFY = 225,
+ HPI_ERROR_FLASH_TYPE = 226,
+ HPI_ERROR_FLASH_START = 227,
/** Reserved for OEMs. */
HPI_ERROR_RESERVED_1 = 290,
- /** Stream does not exist. */
- HPI_ERROR_INVALID_STREAM = 300,
+ /* HPI_ERROR_INVALID_STREAM = 300 use HPI_ERROR_INVALID_OBJ_INDEX */
/** Invalid compression format. */
HPI_ERROR_INVALID_FORMAT = 301,
/** Invalid format samplerate */
@@ -977,21 +987,19 @@ enum HPI_ERROR_CODES {
HPI_ERROR_INVALID_BITRATE = 304,
/** Invalid datasize used for stream read/write. */
HPI_ERROR_INVALID_DATASIZE = 305,
- /** Stream buffer is full during stream write. */
- HPI_ERROR_BUFFER_FULL = 306,
- /** Stream buffer is empty during stream read. */
- HPI_ERROR_BUFFER_EMPTY = 307,
- /** Invalid datasize used for stream read/write. */
- HPI_ERROR_INVALID_DATA_TRANSFER = 308,
+ /* HPI_ERROR_BUFFER_FULL = 306 use HPI_ERROR_INVALID_DATASIZE */
+ /* HPI_ERROR_BUFFER_EMPTY = 307 use HPI_ERROR_INVALID_DATASIZE */
+ /** Null data pointer used for stream read/write. */
+ HPI_ERROR_INVALID_DATA_POINTER = 308,
/** Packet ordering error for stream read/write. */
HPI_ERROR_INVALID_PACKET_ORDER = 309,
/** Object can't do requested operation in its current
- state, eg set format, change rec mux state while recording.*/
+ state, eg set format, change rec mux state while recording.*/
HPI_ERROR_INVALID_OPERATION = 310,
- /** Where an SRG is shared amongst streams, an incompatible samplerate is one
- that is different to any currently playing or recording stream. */
+ /** Where a SRG is shared amongst streams, an incompatible samplerate
+ is one that is different to any currently active stream. */
HPI_ERROR_INCOMPATIBLE_SAMPLERATE = 311,
/** Adapter mode is illegal.*/
HPI_ERROR_BAD_ADAPTER_MODE = 312,
@@ -1004,6 +1012,8 @@ enum HPI_ERROR_CODES {
HPI_ERROR_NO_INTERADAPTER_GROUPS = 314,
/** Streams on different DSPs cannot be grouped. */
HPI_ERROR_NO_INTERDSP_GROUPS = 315,
+ /** Stream wait cancelled before threshold reached. */
+ HPI_ERROR_WAIT_CANCELLED = 316,
/** Invalid mixer node for this adapter. */
HPI_ERROR_INVALID_NODE = 400,
@@ -1017,6 +1027,7 @@ enum HPI_ERROR_CODES {
HPI_ERROR_CONTROL_DISABLED = 404,
/** I2C transaction failed due to a missing ACK. */
HPI_ERROR_CONTROL_I2C_MISSING_ACK = 405,
+ HPI_ERROR_I2C_MISSING_ACK = 405,
/** Control is busy, or coming out of
reset and cannot be accessed at this time. */
HPI_ERROR_CONTROL_NOT_READY = 407,
@@ -1027,7 +1038,6 @@ enum HPI_ERROR_CODES {
HPI_ERROR_NVMEM_FAIL = 452,
/** I2C */
- HPI_ERROR_I2C_MISSING_ACK = HPI_ERROR_CONTROL_I2C_MISSING_ACK,
HPI_ERROR_I2C_BAD_ADR = 460,
/** Entity errors */
@@ -1035,6 +1045,7 @@ enum HPI_ERROR_CODES {
HPI_ERROR_ENTITY_ITEM_COUNT = 471,
HPI_ERROR_ENTITY_TYPE_INVALID = 472,
HPI_ERROR_ENTITY_ROLE_INVALID = 473,
+ HPI_ERROR_ENTITY_SIZE_MISMATCH = 474,
/* AES18 specific errors were 500..507 */
@@ -1044,11 +1055,18 @@ enum HPI_ERROR_CODES {
/** hpioct32.c can't obtain mutex */
HPI_ERROR_MUTEX_TIMEOUT = 700,
- /** errors from HPI backends have values >= this */
+ /** Backend errors used to be greater than this.
+ \deprecated Now, all backends return only errors defined here in hpi.h
+ */
HPI_ERROR_BACKEND_BASE = 900,
- /** indicates a cached u16 value is invalid. */
- HPI_ERROR_ILLEGAL_CACHE_VALUE = 0xffff
+ /** Communication with DSP failed */
+ HPI_ERROR_DSP_COMMUNICATION = 900
+ /* Note that the dsp communication error is set to this value so that
+ it remains compatible with any software that expects such errors
+ to be backend errors i.e. >= 900.
+ Do not define any new error codes with values > 900.
+ */
};
/** \defgroup maximums HPI maximum values
@@ -1075,7 +1093,7 @@ enum HPI_ERROR_CODES {
/**\}*/
-/* ////////////////////////////////////////////////////////////////////// */
+/**************/
/* STRUCTURES */
#ifndef DISABLE_PRAGMA_PACK1
#pragma pack(push, 1)
@@ -1092,7 +1110,7 @@ struct hpi_format {
/**< Stereo/JointStereo/Mono */
u16 mode_legacy;
/**< Legacy ancillary mode or idle bit */
- u16 unused; /**< unused */
+ u16 unused; /**< Unused */
u16 channels; /**< 1,2..., (or ancillary mode or idle bit */
u16 format; /**< HPI_FORMAT_PCM16, _MPEG etc. see #HPI_FORMATS. */
};
@@ -1106,930 +1124,594 @@ struct hpi_anc_frame {
*/
struct hpi_async_event {
u16 event_type; /**< type of event. \sa async_event */
- u16 sequence; /**< sequence number, allows lost event detection */
- u32 state; /**< new state */
- u32 h_object; /**< handle to the object returning the event. */
+ u16 sequence; /**< Sequence number, allows lost event detection */
+ u32 state; /**< New state */
+ u32 h_object; /**< handle to the object returning the event. */
union {
struct {
u16 index; /**< GPIO bit index. */
} gpio;
struct {
u16 node_index; /**< what node is the control on ? */
- u16 node_type; /**< what type of node is the control on ? */
+ u16 node_type; /**< what type of node is the control on ? */
} control;
} u;
};
-/*/////////////////////////////////////////////////////////////////////////// */
-/* Public HPI Entity related definitions */
-
-struct hpi_entity;
-
-enum e_entity_type {
- entity_type_null,
- entity_type_sequence, /* sequence of potentially heterogeneous TLV entities */
-
- entity_type_reference, /* refers to a TLV entity or NULL */
-
- entity_type_int, /* 32 bit */
- entity_type_float, /* ieee754 binary 32 bit encoding */
- entity_type_double,
-
- entity_type_cstring,
- entity_type_octet,
- entity_type_ip4_address,
- entity_type_ip6_address,
- entity_type_mac_address,
-
- LAST_ENTITY_TYPE
-};
-
-enum e_entity_role {
- entity_role_null,
- entity_role_value,
- entity_role_classname,
-
- entity_role_units,
- entity_role_flags,
- entity_role_range,
-
- entity_role_mapping,
- entity_role_enum,
-
- entity_role_instance_of,
- entity_role_depends_on,
- entity_role_member_of_group,
- entity_role_value_constraint,
- entity_role_parameter_port,
-
- entity_role_block,
- entity_role_node_group,
- entity_role_audio_port,
- entity_role_clock_port,
- LAST_ENTITY_ROLE
-};
-
/* skip host side function declarations for
DSP compile and documentation extraction */
-struct hpi_hsubsys {
- int not_really_used;
-};
-
#ifndef DISABLE_PRAGMA_PACK1
#pragma pack(pop)
#endif
-/*////////////////////////////////////////////////////////////////////////// */
+/*****************/
/* HPI FUNCTIONS */
+/*****************/
-/*/////////////////////////// */
-/* DATA and FORMAT and STREAM */
-
+/* Stream */
u16 hpi_stream_estimate_buffer_size(struct hpi_format *pF,
u32 host_polling_rate_in_milli_seconds, u32 *recommended_buffer_size);
-/*/////////// */
-/* SUB SYSTEM */
-struct hpi_hsubsys *hpi_subsys_create(void
- );
-
-void hpi_subsys_free(const struct hpi_hsubsys *ph_subsys);
-
-u16 hpi_subsys_get_version(const struct hpi_hsubsys *ph_subsys,
- u32 *pversion);
-
-u16 hpi_subsys_get_version_ex(const struct hpi_hsubsys *ph_subsys,
- u32 *pversion_ex);
-
-u16 hpi_subsys_get_info(const struct hpi_hsubsys *ph_subsys, u32 *pversion,
- u16 *pw_num_adapters, u16 aw_adapter_list[], u16 list_length);
-
-u16 hpi_subsys_find_adapters(const struct hpi_hsubsys *ph_subsys,
- u16 *pw_num_adapters, u16 aw_adapter_list[], u16 list_length);
-
-u16 hpi_subsys_get_num_adapters(const struct hpi_hsubsys *ph_subsys,
- int *pn_num_adapters);
-
-u16 hpi_subsys_get_adapter(const struct hpi_hsubsys *ph_subsys, int iterator,
- u32 *padapter_index, u16 *pw_adapter_type);
-
-u16 hpi_subsys_ssx2_bypass(const struct hpi_hsubsys *ph_subsys, u16 bypass);
+/*************/
+/* SubSystem */
+/*************/
-u16 hpi_subsys_set_host_network_interface(const struct hpi_hsubsys *ph_subsys,
- const char *sz_interface);
+u16 hpi_subsys_get_version_ex(u32 *pversion_ex);
-/*///////// */
-/* ADAPTER */
+u16 hpi_subsys_get_num_adapters(int *pn_num_adapters);
-u16 hpi_adapter_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index);
+u16 hpi_subsys_get_adapter(int iterator, u32 *padapter_index,
+ u16 *pw_adapter_type);
-u16 hpi_adapter_close(const struct hpi_hsubsys *ph_subsys, u16 adapter_index);
+/***********/
+/* Adapter */
+/***********/
-u16 hpi_adapter_get_info(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index, u16 *pw_num_outstreams, u16 *pw_num_instreams,
- u16 *pw_version, u32 *pserial_number, u16 *pw_adapter_type);
+u16 hpi_adapter_open(u16 adapter_index);
-u16 hpi_adapter_get_module_by_index(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index, u16 module_index, u16 *pw_num_outputs,
- u16 *pw_num_inputs, u16 *pw_version, u32 *pserial_number,
- u16 *pw_module_type, u32 *ph_module);
+u16 hpi_adapter_close(u16 adapter_index);
-u16 hpi_adapter_set_mode(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index, u32 adapter_mode);
+u16 hpi_adapter_get_info(u16 adapter_index, u16 *pw_num_outstreams,
+ u16 *pw_num_instreams, u16 *pw_version, u32 *pserial_number,
+ u16 *pw_adapter_type);
-u16 hpi_adapter_set_mode_ex(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index, u32 adapter_mode, u16 query_or_set);
-
-u16 hpi_adapter_get_mode(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index, u32 *padapter_mode);
-
-u16 hpi_adapter_get_assert(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index, u16 *assert_present, char *psz_assert,
- u16 *pw_line_number);
-
-u16 hpi_adapter_get_assert_ex(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index, u16 *assert_present, char *psz_assert,
- u32 *pline_number, u16 *pw_assert_on_dsp);
-
-u16 hpi_adapter_test_assert(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index, u16 assert_id);
-
-u16 hpi_adapter_enable_capability(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index, u16 capability, u32 key);
-
-u16 hpi_adapter_self_test(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index);
-
-u16 hpi_adapter_debug_read(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index, u32 dsp_address, char *p_bytes, int *count_bytes);
-
-u16 hpi_adapter_set_property(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index, u16 property, u16 paramter1, u16 paramter2);
-
-u16 hpi_adapter_get_property(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index, u16 property, u16 *pw_paramter1,
- u16 *pw_paramter2);
-
-u16 hpi_adapter_enumerate_property(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index, u16 index, u16 what_to_enumerate,
- u16 property_index, u32 *psetting);
-
-/*////////////// */
-/* NonVol Memory */
-u16 hpi_nv_memory_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index,
- u32 *ph_nv_memory, u16 *pw_size_in_bytes);
-
-u16 hpi_nv_memory_read_byte(const struct hpi_hsubsys *ph_subsys,
- u32 h_nv_memory, u16 index, u16 *pw_data);
-
-u16 hpi_nv_memory_write_byte(const struct hpi_hsubsys *ph_subsys,
- u32 h_nv_memory, u16 index, u16 data);
-
-/*////////////// */
-/* Digital I/O */
-u16 hpi_gpio_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index,
- u32 *ph_gpio, u16 *pw_number_input_bits, u16 *pw_number_output_bits);
-
-u16 hpi_gpio_read_bit(const struct hpi_hsubsys *ph_subsys, u32 h_gpio,
- u16 bit_index, u16 *pw_bit_data);
-
-u16 hpi_gpio_read_all_bits(const struct hpi_hsubsys *ph_subsys, u32 h_gpio,
- u16 aw_all_bit_data[4]
- );
+u16 hpi_adapter_get_module_by_index(u16 adapter_index, u16 module_index,
+ u16 *pw_num_outputs, u16 *pw_num_inputs, u16 *pw_version,
+ u32 *pserial_number, u16 *pw_module_type, u32 *ph_module);
-u16 hpi_gpio_write_bit(const struct hpi_hsubsys *ph_subsys, u32 h_gpio,
- u16 bit_index, u16 bit_data);
+u16 hpi_adapter_set_mode(u16 adapter_index, u32 adapter_mode);
-u16 hpi_gpio_write_status(const struct hpi_hsubsys *ph_subsys, u32 h_gpio,
- u16 aw_all_bit_data[4]
- );
+u16 hpi_adapter_set_mode_ex(u16 adapter_index, u32 adapter_mode,
+ u16 query_or_set);
-/**********************/
-/* Async Event Object */
-/**********************/
-u16 hpi_async_event_open(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index, u32 *ph_async);
+u16 hpi_adapter_get_mode(u16 adapter_index, u32 *padapter_mode);
-u16 hpi_async_event_close(const struct hpi_hsubsys *ph_subsys, u32 h_async);
+u16 hpi_adapter_get_assert2(u16 adapter_index, u16 *p_assert_count,
+ char *psz_assert, u32 *p_param1, u32 *p_param2,
+ u32 *p_dsp_string_addr, u16 *p_processor_id);
-u16 hpi_async_event_wait(const struct hpi_hsubsys *ph_subsys, u32 h_async,
- u16 maximum_events, struct hpi_async_event *p_events,
- u16 *pw_number_returned);
+u16 hpi_adapter_test_assert(u16 adapter_index, u16 assert_id);
-u16 hpi_async_event_get_count(const struct hpi_hsubsys *ph_subsys,
- u32 h_async, u16 *pw_count);
+u16 hpi_adapter_enable_capability(u16 adapter_index, u16 capability, u32 key);
-u16 hpi_async_event_get(const struct hpi_hsubsys *ph_subsys, u32 h_async,
- u16 maximum_events, struct hpi_async_event *p_events,
- u16 *pw_number_returned);
+u16 hpi_adapter_self_test(u16 adapter_index);
-/*/////////// */
-/* WATCH-DOG */
-u16 hpi_watchdog_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index,
- u32 *ph_watchdog);
+u16 hpi_adapter_debug_read(u16 adapter_index, u32 dsp_address, char *p_bytes,
+ int *count_bytes);
-u16 hpi_watchdog_set_time(const struct hpi_hsubsys *ph_subsys, u32 h_watchdog,
- u32 time_millisec);
+u16 hpi_adapter_set_property(u16 adapter_index, u16 property, u16 paramter1,
+ u16 paramter2);
-u16 hpi_watchdog_ping(const struct hpi_hsubsys *ph_subsys, u32 h_watchdog);
+u16 hpi_adapter_get_property(u16 adapter_index, u16 property,
+ u16 *pw_paramter1, u16 *pw_paramter2);
-/**************/
-/* OUT STREAM */
-/**************/
-u16 hpi_outstream_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index,
- u16 outstream_index, u32 *ph_outstream);
+u16 hpi_adapter_enumerate_property(u16 adapter_index, u16 index,
+ u16 what_to_enumerate, u16 property_index, u32 *psetting);
+/*************/
+/* OutStream */
+/*************/
+u16 hpi_outstream_open(u16 adapter_index, u16 outstream_index,
+ u32 *ph_outstream);
-u16 hpi_outstream_close(const struct hpi_hsubsys *ph_subsys, u32 h_outstream);
+u16 hpi_outstream_close(u32 h_outstream);
-u16 hpi_outstream_get_info_ex(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream, u16 *pw_state, u32 *pbuffer_size, u32 *pdata_to_play,
- u32 *psamples_played, u32 *pauxiliary_data_to_play);
+u16 hpi_outstream_get_info_ex(u32 h_outstream, u16 *pw_state,
+ u32 *pbuffer_size, u32 *pdata_to_play, u32 *psamples_played,
+ u32 *pauxiliary_data_to_play);
-u16 hpi_outstream_write_buf(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream, const u8 *pb_write_buf, u32 bytes_to_write,
- const struct hpi_format *p_format);
+u16 hpi_outstream_write_buf(u32 h_outstream, const u8 *pb_write_buf,
+ u32 bytes_to_write, const struct hpi_format *p_format);
-u16 hpi_outstream_start(const struct hpi_hsubsys *ph_subsys, u32 h_outstream);
+u16 hpi_outstream_start(u32 h_outstream);
-u16 hpi_outstream_wait_start(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream);
+u16 hpi_outstream_wait_start(u32 h_outstream);
-u16 hpi_outstream_stop(const struct hpi_hsubsys *ph_subsys, u32 h_outstream);
+u16 hpi_outstream_stop(u32 h_outstream);
-u16 hpi_outstream_sinegen(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream);
+u16 hpi_outstream_sinegen(u32 h_outstream);
-u16 hpi_outstream_reset(const struct hpi_hsubsys *ph_subsys, u32 h_outstream);
+u16 hpi_outstream_reset(u32 h_outstream);
-u16 hpi_outstream_query_format(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream, struct hpi_format *p_format);
+u16 hpi_outstream_query_format(u32 h_outstream, struct hpi_format *p_format);
-u16 hpi_outstream_set_format(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream, struct hpi_format *p_format);
+u16 hpi_outstream_set_format(u32 h_outstream, struct hpi_format *p_format);
-u16 hpi_outstream_set_punch_in_out(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream, u32 punch_in_sample, u32 punch_out_sample);
+u16 hpi_outstream_set_punch_in_out(u32 h_outstream, u32 punch_in_sample,
+ u32 punch_out_sample);
-u16 hpi_outstream_set_velocity(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream, short velocity);
+u16 hpi_outstream_set_velocity(u32 h_outstream, short velocity);
-u16 hpi_outstream_ancillary_reset(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream, u16 mode);
+u16 hpi_outstream_ancillary_reset(u32 h_outstream, u16 mode);
-u16 hpi_outstream_ancillary_get_info(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream, u32 *pframes_available);
+u16 hpi_outstream_ancillary_get_info(u32 h_outstream, u32 *pframes_available);
-u16 hpi_outstream_ancillary_read(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream, struct hpi_anc_frame *p_anc_frame_buffer,
+u16 hpi_outstream_ancillary_read(u32 h_outstream,
+ struct hpi_anc_frame *p_anc_frame_buffer,
u32 anc_frame_buffer_size_in_bytes,
u32 number_of_ancillary_frames_to_read);
-u16 hpi_outstream_set_time_scale(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream, u32 time_scaleX10000);
+u16 hpi_outstream_set_time_scale(u32 h_outstream, u32 time_scaleX10000);
-u16 hpi_outstream_host_buffer_allocate(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream, u32 size_in_bytes);
+u16 hpi_outstream_host_buffer_allocate(u32 h_outstream, u32 size_in_bytes);
-u16 hpi_outstream_host_buffer_free(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream);
+u16 hpi_outstream_host_buffer_free(u32 h_outstream);
-u16 hpi_outstream_group_add(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream, u32 h_stream);
+u16 hpi_outstream_group_add(u32 h_outstream, u32 h_stream);
-u16 hpi_outstream_group_get_map(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream, u32 *poutstream_map, u32 *pinstream_map);
+u16 hpi_outstream_group_get_map(u32 h_outstream, u32 *poutstream_map,
+ u32 *pinstream_map);
-u16 hpi_outstream_group_reset(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream);
+u16 hpi_outstream_group_reset(u32 h_outstream);
-/*////////// */
-/* IN_STREAM */
-u16 hpi_instream_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index,
- u16 instream_index, u32 *ph_instream);
+/************/
+/* InStream */
+/************/
+u16 hpi_instream_open(u16 adapter_index, u16 instream_index,
+ u32 *ph_instream);
-u16 hpi_instream_close(const struct hpi_hsubsys *ph_subsys, u32 h_instream);
+u16 hpi_instream_close(u32 h_instream);
-u16 hpi_instream_query_format(const struct hpi_hsubsys *ph_subsys,
- u32 h_instream, const struct hpi_format *p_format);
+u16 hpi_instream_query_format(u32 h_instream,
+ const struct hpi_format *p_format);
-u16 hpi_instream_set_format(const struct hpi_hsubsys *ph_subsys,
- u32 h_instream, const struct hpi_format *p_format);
+u16 hpi_instream_set_format(u32 h_instream,
+ const struct hpi_format *p_format);
-u16 hpi_instream_read_buf(const struct hpi_hsubsys *ph_subsys, u32 h_instream,
- u8 *pb_read_buf, u32 bytes_to_read);
+u16 hpi_instream_read_buf(u32 h_instream, u8 *pb_read_buf, u32 bytes_to_read);
-u16 hpi_instream_start(const struct hpi_hsubsys *ph_subsys, u32 h_instream);
+u16 hpi_instream_start(u32 h_instream);
-u16 hpi_instream_wait_start(const struct hpi_hsubsys *ph_subsys,
- u32 h_instream);
+u16 hpi_instream_wait_start(u32 h_instream);
-u16 hpi_instream_stop(const struct hpi_hsubsys *ph_subsys, u32 h_instream);
+u16 hpi_instream_stop(u32 h_instream);
-u16 hpi_instream_reset(const struct hpi_hsubsys *ph_subsys, u32 h_instream);
+u16 hpi_instream_reset(u32 h_instream);
-u16 hpi_instream_get_info_ex(const struct hpi_hsubsys *ph_subsys,
- u32 h_instream, u16 *pw_state, u32 *pbuffer_size, u32 *pdata_recorded,
- u32 *psamples_recorded, u32 *pauxiliary_data_recorded);
+u16 hpi_instream_get_info_ex(u32 h_instream, u16 *pw_state, u32 *pbuffer_size,
+ u32 *pdata_recorded, u32 *psamples_recorded,
+ u32 *pauxiliary_data_recorded);
-u16 hpi_instream_ancillary_reset(const struct hpi_hsubsys *ph_subsys,
- u32 h_instream, u16 bytes_per_frame, u16 mode, u16 alignment,
- u16 idle_bit);
+u16 hpi_instream_ancillary_reset(u32 h_instream, u16 bytes_per_frame,
+ u16 mode, u16 alignment, u16 idle_bit);
-u16 hpi_instream_ancillary_get_info(const struct hpi_hsubsys *ph_subsys,
- u32 h_instream, u32 *pframe_space);
+u16 hpi_instream_ancillary_get_info(u32 h_instream, u32 *pframe_space);
-u16 hpi_instream_ancillary_write(const struct hpi_hsubsys *ph_subsys,
- u32 h_instream, const struct hpi_anc_frame *p_anc_frame_buffer,
+u16 hpi_instream_ancillary_write(u32 h_instream,
+ const struct hpi_anc_frame *p_anc_frame_buffer,
u32 anc_frame_buffer_size_in_bytes,
u32 number_of_ancillary_frames_to_write);
-u16 hpi_instream_host_buffer_allocate(const struct hpi_hsubsys *ph_subsys,
- u32 h_instream, u32 size_in_bytes);
+u16 hpi_instream_host_buffer_allocate(u32 h_instream, u32 size_in_bytes);
-u16 hpi_instream_host_buffer_free(const struct hpi_hsubsys *ph_subsys,
- u32 h_instream);
+u16 hpi_instream_host_buffer_free(u32 h_instream);
-u16 hpi_instream_group_add(const struct hpi_hsubsys *ph_subsys,
- u32 h_instream, u32 h_stream);
+u16 hpi_instream_group_add(u32 h_instream, u32 h_stream);
-u16 hpi_instream_group_get_map(const struct hpi_hsubsys *ph_subsys,
- u32 h_instream, u32 *poutstream_map, u32 *pinstream_map);
+u16 hpi_instream_group_get_map(u32 h_instream, u32 *poutstream_map,
+ u32 *pinstream_map);
-u16 hpi_instream_group_reset(const struct hpi_hsubsys *ph_subsys,
- u32 h_instream);
+u16 hpi_instream_group_reset(u32 h_instream);
/*********/
-/* MIXER */
+/* Mixer */
/*********/
-u16 hpi_mixer_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index,
- u32 *ph_mixer);
-
-u16 hpi_mixer_close(const struct hpi_hsubsys *ph_subsys, u32 h_mixer);
-
-u16 hpi_mixer_get_control(const struct hpi_hsubsys *ph_subsys, u32 h_mixer,
- u16 src_node_type, u16 src_node_type_index, u16 dst_node_type,
- u16 dst_node_type_index, u16 control_type, u32 *ph_control);
-
-u16 hpi_mixer_get_control_by_index(const struct hpi_hsubsys *ph_subsys,
- u32 h_mixer, u16 control_index, u16 *pw_src_node_type,
- u16 *pw_src_node_index, u16 *pw_dst_node_type, u16 *pw_dst_node_index,
- u16 *pw_control_type, u32 *ph_control);
-
-u16 hpi_mixer_store(const struct hpi_hsubsys *ph_subsys, u32 h_mixer,
- enum HPI_MIXER_STORE_COMMAND command, u16 index);
-/*************************/
-/* mixer CONTROLS */
-/*************************/
-/*************************/
-/* volume control */
-/*************************/
-u16 hpi_volume_set_gain(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- short an_gain0_01dB[HPI_MAX_CHANNELS]
+u16 hpi_mixer_open(u16 adapter_index, u32 *ph_mixer);
+
+u16 hpi_mixer_close(u32 h_mixer);
+
+u16 hpi_mixer_get_control(u32 h_mixer, u16 src_node_type,
+ u16 src_node_type_index, u16 dst_node_type, u16 dst_node_type_index,
+ u16 control_type, u32 *ph_control);
+
+u16 hpi_mixer_get_control_by_index(u32 h_mixer, u16 control_index,
+ u16 *pw_src_node_type, u16 *pw_src_node_index, u16 *pw_dst_node_type,
+ u16 *pw_dst_node_index, u16 *pw_control_type, u32 *ph_control);
+
+u16 hpi_mixer_store(u32 h_mixer, enum HPI_MIXER_STORE_COMMAND command,
+ u16 index);
+/************/
+/* Controls */
+/************/
+/******************/
+/* Volume control */
+/******************/
+u16 hpi_volume_set_gain(u32 h_control, short an_gain0_01dB[HPI_MAX_CHANNELS]
);
-u16 hpi_volume_get_gain(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+u16 hpi_volume_get_gain(u32 h_control,
short an_gain0_01dB_out[HPI_MAX_CHANNELS]
);
+u16 hpi_volume_set_mute(u32 h_control, u32 mute);
+
+u16 hpi_volume_get_mute(u32 h_control, u32 *mute);
+
#define hpi_volume_get_range hpi_volume_query_range
-u16 hpi_volume_query_range(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- short *min_gain_01dB, short *max_gain_01dB, short *step_gain_01dB);
+u16 hpi_volume_query_range(u32 h_control, short *min_gain_01dB,
+ short *max_gain_01dB, short *step_gain_01dB);
-u16 hpi_volume_query_channels(const struct hpi_hsubsys *ph_subsys,
- const u32 h_volume, u32 *p_channels);
+u16 hpi_volume_query_channels(const u32 h_volume, u32 *p_channels);
-u16 hpi_volume_auto_fade(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+u16 hpi_volume_auto_fade(u32 h_control,
short an_stop_gain0_01dB[HPI_MAX_CHANNELS], u32 duration_ms);
-u16 hpi_volume_auto_fade_profile(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, short an_stop_gain0_01dB[HPI_MAX_CHANNELS],
- u32 duration_ms, u16 profile);
+u16 hpi_volume_auto_fade_profile(u32 h_control,
+ short an_stop_gain0_01dB[HPI_MAX_CHANNELS], u32 duration_ms,
+ u16 profile);
-/*************************/
-/* level control */
-/*************************/
-u16 hpi_level_query_range(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- short *min_gain_01dB, short *max_gain_01dB, short *step_gain_01dB);
+/*****************/
+/* Level control */
+/*****************/
+u16 hpi_level_query_range(u32 h_control, short *min_gain_01dB,
+ short *max_gain_01dB, short *step_gain_01dB);
-u16 hpi_level_set_gain(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- short an_gain0_01dB[HPI_MAX_CHANNELS]
+u16 hpi_level_set_gain(u32 h_control, short an_gain0_01dB[HPI_MAX_CHANNELS]
);
-u16 hpi_level_get_gain(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+u16 hpi_level_get_gain(u32 h_control,
short an_gain0_01dB_out[HPI_MAX_CHANNELS]
);
-/*************************/
-/* meter control */
-/*************************/
-u16 hpi_meter_query_channels(const struct hpi_hsubsys *ph_subsys,
- const u32 h_meter, u32 *p_channels);
+/*****************/
+/* Meter control */
+/*****************/
+u16 hpi_meter_query_channels(const u32 h_meter, u32 *p_channels);
-u16 hpi_meter_get_peak(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+u16 hpi_meter_get_peak(u32 h_control,
short an_peak0_01dB_out[HPI_MAX_CHANNELS]
);
-u16 hpi_meter_get_rms(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- short an_peak0_01dB_out[HPI_MAX_CHANNELS]
+u16 hpi_meter_get_rms(u32 h_control, short an_peak0_01dB_out[HPI_MAX_CHANNELS]
);
-u16 hpi_meter_set_peak_ballistics(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 attack, u16 decay);
+u16 hpi_meter_set_peak_ballistics(u32 h_control, u16 attack, u16 decay);
-u16 hpi_meter_set_rms_ballistics(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 attack, u16 decay);
+u16 hpi_meter_set_rms_ballistics(u32 h_control, u16 attack, u16 decay);
-u16 hpi_meter_get_peak_ballistics(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 *attack, u16 *decay);
+u16 hpi_meter_get_peak_ballistics(u32 h_control, u16 *attack, u16 *decay);
-u16 hpi_meter_get_rms_ballistics(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 *attack, u16 *decay);
+u16 hpi_meter_get_rms_ballistics(u32 h_control, u16 *attack, u16 *decay);
-/*************************/
-/* channel mode control */
-/*************************/
-u16 hpi_channel_mode_query_mode(const struct hpi_hsubsys *ph_subsys,
- const u32 h_mode, const u32 index, u16 *pw_mode);
+/************************/
+/* ChannelMode control */
+/************************/
+u16 hpi_channel_mode_query_mode(const u32 h_mode, const u32 index,
+ u16 *pw_mode);
-u16 hpi_channel_mode_set(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- u16 mode);
+u16 hpi_channel_mode_set(u32 h_control, u16 mode);
-u16 hpi_channel_mode_get(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- u16 *mode);
+u16 hpi_channel_mode_get(u32 h_control, u16 *mode);
-/*************************/
-/* Tuner control */
-/*************************/
-u16 hpi_tuner_query_band(const struct hpi_hsubsys *ph_subsys,
- const u32 h_tuner, const u32 index, u16 *pw_band);
+/*****************/
+/* Tuner control */
+/*****************/
+u16 hpi_tuner_query_band(const u32 h_tuner, const u32 index, u16 *pw_band);
-u16 hpi_tuner_set_band(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- u16 band);
+u16 hpi_tuner_set_band(u32 h_control, u16 band);
-u16 hpi_tuner_get_band(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- u16 *pw_band);
+u16 hpi_tuner_get_band(u32 h_control, u16 *pw_band);
-u16 hpi_tuner_query_frequency(const struct hpi_hsubsys *ph_subsys,
- const u32 h_tuner, const u32 index, const u16 band, u32 *pfreq);
+u16 hpi_tuner_query_frequency(const u32 h_tuner, const u32 index,
+ const u16 band, u32 *pfreq);
-u16 hpi_tuner_set_frequency(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 freq_ink_hz);
+u16 hpi_tuner_set_frequency(u32 h_control, u32 freq_ink_hz);
-u16 hpi_tuner_get_frequency(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *pw_freq_ink_hz);
+u16 hpi_tuner_get_frequency(u32 h_control, u32 *pw_freq_ink_hz);
-u16 hpi_tuner_getRF_level(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- short *pw_level);
+u16 hpi_tuner_get_rf_level(u32 h_control, short *pw_level);
-u16 hpi_tuner_get_rawRF_level(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, short *pw_level);
+u16 hpi_tuner_get_raw_rf_level(u32 h_control, short *pw_level);
-u16 hpi_tuner_query_gain(const struct hpi_hsubsys *ph_subsys,
- const u32 h_tuner, const u32 index, u16 *pw_gain);
+u16 hpi_tuner_query_gain(const u32 h_tuner, const u32 index, u16 *pw_gain);
-u16 hpi_tuner_set_gain(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- short gain);
+u16 hpi_tuner_set_gain(u32 h_control, short gain);
-u16 hpi_tuner_get_gain(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- short *pn_gain);
+u16 hpi_tuner_get_gain(u32 h_control, short *pn_gain);
-u16 hpi_tuner_get_status(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- u16 *pw_status_mask, u16 *pw_status);
+u16 hpi_tuner_get_status(u32 h_control, u16 *pw_status_mask, u16 *pw_status);
-u16 hpi_tuner_set_mode(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- u32 mode, u32 value);
+u16 hpi_tuner_set_mode(u32 h_control, u32 mode, u32 value);
-u16 hpi_tuner_get_mode(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- u32 mode, u32 *pn_value);
+u16 hpi_tuner_get_mode(u32 h_control, u32 mode, u32 *pn_value);
-u16 hpi_tuner_getRDS(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- char *p_rds_data);
+u16 hpi_tuner_get_rds(u32 h_control, char *p_rds_data);
-u16 hpi_tuner_query_deemphasis(const struct hpi_hsubsys *ph_subsys,
- const u32 h_tuner, const u32 index, const u16 band, u32 *pdeemphasis);
+u16 hpi_tuner_query_deemphasis(const u32 h_tuner, const u32 index,
+ const u16 band, u32 *pdeemphasis);
-u16 hpi_tuner_set_deemphasis(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 deemphasis);
-u16 hpi_tuner_get_deemphasis(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *pdeemphasis);
+u16 hpi_tuner_set_deemphasis(u32 h_control, u32 deemphasis);
+u16 hpi_tuner_get_deemphasis(u32 h_control, u32 *pdeemphasis);
-u16 hpi_tuner_query_program(const struct hpi_hsubsys *ph_subsys,
- const u32 h_tuner, u32 *pbitmap_program);
+u16 hpi_tuner_query_program(const u32 h_tuner, u32 *pbitmap_program);
-u16 hpi_tuner_set_program(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- u32 program);
+u16 hpi_tuner_set_program(u32 h_control, u32 program);
-u16 hpi_tuner_get_program(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- u32 *pprogram);
+u16 hpi_tuner_get_program(u32 h_control, u32 *pprogram);
-u16 hpi_tuner_get_hd_radio_dsp_version(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, char *psz_dsp_version, const u32 string_size);
+u16 hpi_tuner_get_hd_radio_dsp_version(u32 h_control, char *psz_dsp_version,
+ const u32 string_size);
-u16 hpi_tuner_get_hd_radio_sdk_version(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, char *psz_sdk_version, const u32 string_size);
+u16 hpi_tuner_get_hd_radio_sdk_version(u32 h_control, char *psz_sdk_version,
+ const u32 string_size);
-u16 hpi_tuner_get_hd_radio_signal_quality(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *pquality);
+u16 hpi_tuner_get_hd_radio_signal_quality(u32 h_control, u32 *pquality);
-u16 hpi_tuner_get_hd_radio_signal_blend(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *pblend);
+u16 hpi_tuner_get_hd_radio_signal_blend(u32 h_control, u32 *pblend);
-u16 hpi_tuner_set_hd_radio_signal_blend(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, const u32 blend);
+u16 hpi_tuner_set_hd_radio_signal_blend(u32 h_control, const u32 blend);
-/****************************/
-/* PADs control */
-/****************************/
+/***************/
+/* PAD control */
+/***************/
-u16 HPI_PAD__get_channel_name(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, char *psz_string, const u32 string_length);
+u16 hpi_pad_get_channel_name(u32 h_control, char *psz_string,
+ const u32 string_length);
-u16 HPI_PAD__get_artist(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- char *psz_string, const u32 string_length);
+u16 hpi_pad_get_artist(u32 h_control, char *psz_string,
+ const u32 string_length);
-u16 HPI_PAD__get_title(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- char *psz_string, const u32 string_length);
+u16 hpi_pad_get_title(u32 h_control, char *psz_string,
+ const u32 string_length);
-u16 HPI_PAD__get_comment(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- char *psz_string, const u32 string_length);
+u16 hpi_pad_get_comment(u32 h_control, char *psz_string,
+ const u32 string_length);
-u16 HPI_PAD__get_program_type(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *ppTY);
+u16 hpi_pad_get_program_type(u32 h_control, u32 *ppTY);
-u16 HPI_PAD__get_rdsPI(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- u32 *ppI);
+u16 hpi_pad_get_rdsPI(u32 h_control, u32 *ppI);
-u16 HPI_PAD__get_program_type_string(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, const u32 data_type, const u32 pTY, char *psz_string,
- const u32 string_length);
+u16 hpi_pad_get_program_type_string(u32 h_control, const u32 data_type,
+ const u32 pTY, char *psz_string, const u32 string_length);
/****************************/
/* AES/EBU Receiver control */
/****************************/
-u16 HPI_AESEBU__receiver_query_format(const struct hpi_hsubsys *ph_subsys,
- const u32 h_aes_rx, const u32 index, u16 *pw_format);
+u16 hpi_aesebu_receiver_query_format(const u32 h_aes_rx, const u32 index,
+ u16 *pw_format);
-u16 HPI_AESEBU__receiver_set_format(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 source);
+u16 hpi_aesebu_receiver_set_format(u32 h_control, u16 source);
-u16 HPI_AESEBU__receiver_get_format(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 *pw_source);
+u16 hpi_aesebu_receiver_get_format(u32 h_control, u16 *pw_source);
-u16 HPI_AESEBU__receiver_get_sample_rate(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *psample_rate);
+u16 hpi_aesebu_receiver_get_sample_rate(u32 h_control, u32 *psample_rate);
-u16 HPI_AESEBU__receiver_get_user_data(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 index, u16 *pw_data);
+u16 hpi_aesebu_receiver_get_user_data(u32 h_control, u16 index, u16 *pw_data);
-u16 HPI_AESEBU__receiver_get_channel_status(const struct hpi_hsubsys
- *ph_subsys, u32 h_control, u16 index, u16 *pw_data);
+u16 hpi_aesebu_receiver_get_channel_status(u32 h_control, u16 index,
+ u16 *pw_data);
-u16 HPI_AESEBU__receiver_get_error_status(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 *pw_error_data);
+u16 hpi_aesebu_receiver_get_error_status(u32 h_control, u16 *pw_error_data);
/*******************************/
/* AES/EBU Transmitter control */
/*******************************/
-u16 HPI_AESEBU__transmitter_set_sample_rate(const struct hpi_hsubsys
- *ph_subsys, u32 h_control, u32 sample_rate);
+u16 hpi_aesebu_transmitter_set_sample_rate(u32 h_control, u32 sample_rate);
-u16 HPI_AESEBU__transmitter_set_user_data(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 index, u16 data);
+u16 hpi_aesebu_transmitter_set_user_data(u32 h_control, u16 index, u16 data);
-u16 HPI_AESEBU__transmitter_set_channel_status(const struct hpi_hsubsys
- *ph_subsys, u32 h_control, u16 index, u16 data);
+u16 hpi_aesebu_transmitter_set_channel_status(u32 h_control, u16 index,
+ u16 data);
-u16 HPI_AESEBU__transmitter_get_channel_status(const struct hpi_hsubsys
- *ph_subsys, u32 h_control, u16 index, u16 *pw_data);
+u16 hpi_aesebu_transmitter_get_channel_status(u32 h_control, u16 index,
+ u16 *pw_data);
-u16 HPI_AESEBU__transmitter_query_format(const struct hpi_hsubsys *ph_subsys,
- const u32 h_aes_tx, const u32 index, u16 *pw_format);
+u16 hpi_aesebu_transmitter_query_format(const u32 h_aes_tx, const u32 index,
+ u16 *pw_format);
-u16 HPI_AESEBU__transmitter_set_format(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 output_format);
+u16 hpi_aesebu_transmitter_set_format(u32 h_control, u16 output_format);
-u16 HPI_AESEBU__transmitter_get_format(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 *pw_output_format);
+u16 hpi_aesebu_transmitter_get_format(u32 h_control, u16 *pw_output_format);
/***********************/
-/* multiplexer control */
+/* Multiplexer control */
/***********************/
-u16 hpi_multiplexer_set_source(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 source_node_type, u16 source_node_index);
-
-u16 hpi_multiplexer_get_source(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 *source_node_type, u16 *source_node_index);
+u16 hpi_multiplexer_set_source(u32 h_control, u16 source_node_type,
+ u16 source_node_index);
-u16 hpi_multiplexer_query_source(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 index, u16 *source_node_type,
+u16 hpi_multiplexer_get_source(u32 h_control, u16 *source_node_type,
u16 *source_node_index);
+u16 hpi_multiplexer_query_source(u32 h_control, u16 index,
+ u16 *source_node_type, u16 *source_node_index);
+
/***************/
-/* VOX control */
+/* Vox control */
/***************/
-u16 hpi_vox_set_threshold(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- short an_gain0_01dB);
+u16 hpi_vox_set_threshold(u32 h_control, short an_gain0_01dB);
-u16 hpi_vox_get_threshold(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- short *an_gain0_01dB);
+u16 hpi_vox_get_threshold(u32 h_control, short *an_gain0_01dB);
/*********************/
/* Bitstream control */
/*********************/
-u16 hpi_bitstream_set_clock_edge(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 edge_type);
+u16 hpi_bitstream_set_clock_edge(u32 h_control, u16 edge_type);
-u16 hpi_bitstream_set_data_polarity(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 polarity);
+u16 hpi_bitstream_set_data_polarity(u32 h_control, u16 polarity);
-u16 hpi_bitstream_get_activity(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 *pw_clk_activity, u16 *pw_data_activity);
+u16 hpi_bitstream_get_activity(u32 h_control, u16 *pw_clk_activity,
+ u16 *pw_data_activity);
/***********************/
/* SampleClock control */
/***********************/
-u16 hpi_sample_clock_query_source(const struct hpi_hsubsys *ph_subsys,
- const u32 h_clock, const u32 index, u16 *pw_source);
+u16 hpi_sample_clock_query_source(const u32 h_clock, const u32 index,
+ u16 *pw_source);
-u16 hpi_sample_clock_set_source(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 source);
+u16 hpi_sample_clock_set_source(u32 h_control, u16 source);
-u16 hpi_sample_clock_get_source(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 *pw_source);
+u16 hpi_sample_clock_get_source(u32 h_control, u16 *pw_source);
-u16 hpi_sample_clock_query_source_index(const struct hpi_hsubsys *ph_subsys,
- const u32 h_clock, const u32 index, const u32 source,
- u16 *pw_source_index);
+u16 hpi_sample_clock_query_source_index(const u32 h_clock, const u32 index,
+ const u32 source, u16 *pw_source_index);
-u16 hpi_sample_clock_set_source_index(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 source_index);
+u16 hpi_sample_clock_set_source_index(u32 h_control, u16 source_index);
-u16 hpi_sample_clock_get_source_index(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 *pw_source_index);
+u16 hpi_sample_clock_get_source_index(u32 h_control, u16 *pw_source_index);
-u16 hpi_sample_clock_get_sample_rate(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *psample_rate);
+u16 hpi_sample_clock_get_sample_rate(u32 h_control, u32 *psample_rate);
-u16 hpi_sample_clock_query_local_rate(const struct hpi_hsubsys *ph_subsys,
- const u32 h_clock, const u32 index, u32 *psource);
+u16 hpi_sample_clock_query_local_rate(const u32 h_clock, const u32 index,
+ u32 *psource);
-u16 hpi_sample_clock_set_local_rate(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 sample_rate);
+u16 hpi_sample_clock_set_local_rate(u32 h_control, u32 sample_rate);
-u16 hpi_sample_clock_get_local_rate(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *psample_rate);
+u16 hpi_sample_clock_get_local_rate(u32 h_control, u32 *psample_rate);
-u16 hpi_sample_clock_set_auto(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 enable);
+u16 hpi_sample_clock_set_auto(u32 h_control, u32 enable);
-u16 hpi_sample_clock_get_auto(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *penable);
+u16 hpi_sample_clock_get_auto(u32 h_control, u32 *penable);
-u16 hpi_sample_clock_set_local_rate_lock(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 lock);
+u16 hpi_sample_clock_set_local_rate_lock(u32 h_control, u32 lock);
-u16 hpi_sample_clock_get_local_rate_lock(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *plock);
+u16 hpi_sample_clock_get_local_rate_lock(u32 h_control, u32 *plock);
/***********************/
/* Microphone control */
/***********************/
-u16 hpi_microphone_set_phantom_power(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 on_off);
+u16 hpi_microphone_set_phantom_power(u32 h_control, u16 on_off);
-u16 hpi_microphone_get_phantom_power(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 *pw_on_off);
+u16 hpi_microphone_get_phantom_power(u32 h_control, u16 *pw_on_off);
-/*******************************
- Parametric Equalizer control
-*******************************/
-u16 hpi_parametricEQ__get_info(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 *pw_number_of_bands, u16 *pw_enabled);
+/********************************/
+/* Parametric Equalizer control */
+/********************************/
+u16 hpi_parametric_eq_get_info(u32 h_control, u16 *pw_number_of_bands,
+ u16 *pw_enabled);
-u16 hpi_parametricEQ__set_state(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 on_off);
+u16 hpi_parametric_eq_set_state(u32 h_control, u16 on_off);
-u16 hpi_parametricEQ__set_band(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 index, u16 type, u32 frequency_hz, short q100,
- short gain0_01dB);
+u16 hpi_parametric_eq_set_band(u32 h_control, u16 index, u16 type,
+ u32 frequency_hz, short q100, short gain0_01dB);
-u16 hpi_parametricEQ__get_band(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 index, u16 *pn_type, u32 *pfrequency_hz,
- short *pnQ100, short *pn_gain0_01dB);
+u16 hpi_parametric_eq_get_band(u32 h_control, u16 index, u16 *pn_type,
+ u32 *pfrequency_hz, short *pnQ100, short *pn_gain0_01dB);
-u16 hpi_parametricEQ__get_coeffs(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 index, short coeffs[5]
+u16 hpi_parametric_eq_get_coeffs(u32 h_control, u16 index, short coeffs[5]
);
-/*******************************
- Compressor Expander control
-*******************************/
-
-u16 hpi_compander_set_enable(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 on);
-
-u16 hpi_compander_get_enable(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *pon);
-
-u16 hpi_compander_set_makeup_gain(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, short makeup_gain0_01dB);
-
-u16 hpi_compander_get_makeup_gain(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, short *pn_makeup_gain0_01dB);
-
-u16 hpi_compander_set_attack_time_constant(const struct hpi_hsubsys
- *ph_subsys, u32 h_control, u32 index, u32 attack);
-
-u16 hpi_compander_get_attack_time_constant(const struct hpi_hsubsys
- *ph_subsys, u32 h_control, u32 index, u32 *pw_attack);
-
-u16 hpi_compander_set_decay_time_constant(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 index, u32 decay);
-
-u16 hpi_compander_get_decay_time_constant(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 index, u32 *pw_decay);
-
-u16 hpi_compander_set_threshold(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 index, short threshold0_01dB);
-
-u16 hpi_compander_get_threshold(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 index, short *pn_threshold0_01dB);
-
-u16 hpi_compander_set_ratio(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 index, u32 ratio100);
-
-u16 hpi_compander_get_ratio(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 index, u32 *pw_ratio100);
-
-/*******************************
- Cobranet HMI control
-*******************************/
-u16 hpi_cobranet_hmi_write(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- u32 hmi_address, u32 byte_count, u8 *pb_data);
-
-u16 hpi_cobranet_hmi_read(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- u32 hmi_address, u32 max_byte_count, u32 *pbyte_count, u8 *pb_data);
-
-u16 hpi_cobranet_hmi_get_status(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *pstatus, u32 *preadable_size,
- u32 *pwriteable_size);
-
-/*Read the current IP address
-*/
-u16 hpi_cobranet_getI_paddress(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *pi_paddress);
-
-/* Write the current IP address
-*/
-u16 hpi_cobranet_setI_paddress(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 i_paddress);
-
-/* Read the static IP address
-*/
-u16 hpi_cobranet_get_staticI_paddress(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *pi_paddress);
+/*******************************/
+/* Compressor Expander control */
+/*******************************/
-/* Write the static IP address
-*/
-u16 hpi_cobranet_set_staticI_paddress(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 i_paddress);
+u16 hpi_compander_set_enable(u32 h_control, u32 on);
-/* Read the MAC address
-*/
-u16 hpi_cobranet_getMA_caddress(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *pmAC_MS_bs, u32 *pmAC_LS_bs);
+u16 hpi_compander_get_enable(u32 h_control, u32 *pon);
-/*******************************
- Tone Detector control
-*******************************/
-u16 hpi_tone_detector_get_state(const struct hpi_hsubsys *ph_subsys, u32 hC,
- u32 *state);
+u16 hpi_compander_set_makeup_gain(u32 h_control, short makeup_gain0_01dB);
-u16 hpi_tone_detector_set_enable(const struct hpi_hsubsys *ph_subsys, u32 hC,
- u32 enable);
+u16 hpi_compander_get_makeup_gain(u32 h_control, short *pn_makeup_gain0_01dB);
-u16 hpi_tone_detector_get_enable(const struct hpi_hsubsys *ph_subsys, u32 hC,
- u32 *enable);
+u16 hpi_compander_set_attack_time_constant(u32 h_control, u32 index,
+ u32 attack);
-u16 hpi_tone_detector_set_event_enable(const struct hpi_hsubsys *ph_subsys,
- u32 hC, u32 event_enable);
+u16 hpi_compander_get_attack_time_constant(u32 h_control, u32 index,
+ u32 *pw_attack);
-u16 hpi_tone_detector_get_event_enable(const struct hpi_hsubsys *ph_subsys,
- u32 hC, u32 *event_enable);
+u16 hpi_compander_set_decay_time_constant(u32 h_control, u32 index,
+ u32 decay);
-u16 hpi_tone_detector_set_threshold(const struct hpi_hsubsys *ph_subsys,
- u32 hC, int threshold);
+u16 hpi_compander_get_decay_time_constant(u32 h_control, u32 index,
+ u32 *pw_decay);
-u16 hpi_tone_detector_get_threshold(const struct hpi_hsubsys *ph_subsys,
- u32 hC, int *threshold);
+u16 hpi_compander_set_threshold(u32 h_control, u32 index,
+ short threshold0_01dB);
-u16 hpi_tone_detector_get_frequency(const struct hpi_hsubsys *ph_subsys,
- u32 hC, u32 index, u32 *frequency);
+u16 hpi_compander_get_threshold(u32 h_control, u32 index,
+ short *pn_threshold0_01dB);
-/*******************************
- Silence Detector control
-*******************************/
-u16 hpi_silence_detector_get_state(const struct hpi_hsubsys *ph_subsys,
- u32 hC, u32 *state);
+u16 hpi_compander_set_ratio(u32 h_control, u32 index, u32 ratio100);
-u16 hpi_silence_detector_set_enable(const struct hpi_hsubsys *ph_subsys,
- u32 hC, u32 enable);
+u16 hpi_compander_get_ratio(u32 h_control, u32 index, u32 *pw_ratio100);
-u16 hpi_silence_detector_get_enable(const struct hpi_hsubsys *ph_subsys,
- u32 hC, u32 *enable);
+/********************/
+/* Cobranet control */
+/********************/
+u16 hpi_cobranet_hmi_write(u32 h_control, u32 hmi_address, u32 byte_count,
+ u8 *pb_data);
-u16 hpi_silence_detector_set_event_enable(const struct hpi_hsubsys *ph_subsys,
- u32 hC, u32 event_enable);
+u16 hpi_cobranet_hmi_read(u32 h_control, u32 hmi_address, u32 max_byte_count,
+ u32 *pbyte_count, u8 *pb_data);
-u16 hpi_silence_detector_get_event_enable(const struct hpi_hsubsys *ph_subsys,
- u32 hC, u32 *event_enable);
+u16 hpi_cobranet_hmi_get_status(u32 h_control, u32 *pstatus,
+ u32 *preadable_size, u32 *pwriteable_size);
-u16 hpi_silence_detector_set_delay(const struct hpi_hsubsys *ph_subsys,
- u32 hC, u32 delay);
+u16 hpi_cobranet_get_ip_address(u32 h_control, u32 *pdw_ip_address);
-u16 hpi_silence_detector_get_delay(const struct hpi_hsubsys *ph_subsys,
- u32 hC, u32 *delay);
+u16 hpi_cobranet_set_ip_address(u32 h_control, u32 dw_ip_address);
-u16 hpi_silence_detector_set_threshold(const struct hpi_hsubsys *ph_subsys,
- u32 hC, int threshold);
+u16 hpi_cobranet_get_static_ip_address(u32 h_control, u32 *pdw_ip_address);
-u16 hpi_silence_detector_get_threshold(const struct hpi_hsubsys *ph_subsys,
- u32 hC, int *threshold);
+u16 hpi_cobranet_set_static_ip_address(u32 h_control, u32 dw_ip_address);
-/*******************************
- Universal control
-*******************************/
-u16 hpi_entity_find_next(struct hpi_entity *container_entity,
- enum e_entity_type type, enum e_entity_role role, int recursive_flag,
- struct hpi_entity **current_match);
+u16 hpi_cobranet_get_macaddress(u32 h_control, u32 *p_mac_msbs,
+ u32 *p_mac_lsbs);
-u16 hpi_entity_copy_value_from(struct hpi_entity *entity,
- enum e_entity_type type, size_t item_count, void *value_dst_p);
+/*************************/
+/* Tone Detector control */
+/*************************/
+u16 hpi_tone_detector_get_state(u32 hC, u32 *state);
-u16 hpi_entity_unpack(struct hpi_entity *entity, enum e_entity_type *type,
- size_t *items, enum e_entity_role *role, void **value);
+u16 hpi_tone_detector_set_enable(u32 hC, u32 enable);
-u16 hpi_entity_alloc_and_pack(const enum e_entity_type type,
- const size_t item_count, const enum e_entity_role role, void *value,
- struct hpi_entity **entity);
+u16 hpi_tone_detector_get_enable(u32 hC, u32 *enable);
-void hpi_entity_free(struct hpi_entity *entity);
+u16 hpi_tone_detector_set_event_enable(u32 hC, u32 event_enable);
-u16 hpi_universal_info(const struct hpi_hsubsys *ph_subsys, u32 hC,
- struct hpi_entity **info);
+u16 hpi_tone_detector_get_event_enable(u32 hC, u32 *event_enable);
-u16 hpi_universal_get(const struct hpi_hsubsys *ph_subsys, u32 hC,
- struct hpi_entity **value);
+u16 hpi_tone_detector_set_threshold(u32 hC, int threshold);
-u16 hpi_universal_set(const struct hpi_hsubsys *ph_subsys, u32 hC,
- struct hpi_entity *value);
+u16 hpi_tone_detector_get_threshold(u32 hC, int *threshold);
-/*/////////// */
-/* DSP CLOCK */
-/*/////////// */
-u16 hpi_clock_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index,
- u32 *ph_dsp_clock);
+u16 hpi_tone_detector_get_frequency(u32 hC, u32 index, u32 *frequency);
-u16 hpi_clock_set_time(const struct hpi_hsubsys *ph_subsys, u32 h_clock,
- u16 hour, u16 minute, u16 second, u16 milli_second);
+/****************************/
+/* Silence Detector control */
+/****************************/
+u16 hpi_silence_detector_get_state(u32 hC, u32 *state);
-u16 hpi_clock_get_time(const struct hpi_hsubsys *ph_subsys, u32 h_clock,
- u16 *pw_hour, u16 *pw_minute, u16 *pw_second, u16 *pw_milli_second);
+u16 hpi_silence_detector_set_enable(u32 hC, u32 enable);
-/*/////////// */
-/* PROFILE */
-/*/////////// */
-u16 hpi_profile_open_all(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index, u16 profile_index, u32 *ph_profile,
- u16 *pw_max_profiles);
+u16 hpi_silence_detector_get_enable(u32 hC, u32 *enable);
-u16 hpi_profile_get(const struct hpi_hsubsys *ph_subsys, u32 h_profile,
- u16 index, u16 *pw_seconds, u32 *pmicro_seconds, u32 *pcall_count,
- u32 *pmax_micro_seconds, u32 *pmin_micro_seconds);
+u16 hpi_silence_detector_set_event_enable(u32 hC, u32 event_enable);
-u16 hpi_profile_start_all(const struct hpi_hsubsys *ph_subsys, u32 h_profile);
+u16 hpi_silence_detector_get_event_enable(u32 hC, u32 *event_enable);
-u16 hpi_profile_stop_all(const struct hpi_hsubsys *ph_subsys, u32 h_profile);
+u16 hpi_silence_detector_set_delay(u32 hC, u32 delay);
-u16 hpi_profile_get_name(const struct hpi_hsubsys *ph_subsys, u32 h_profile,
- u16 index, char *sz_profile_name, u16 profile_name_length);
+u16 hpi_silence_detector_get_delay(u32 hC, u32 *delay);
-u16 hpi_profile_get_utilization(const struct hpi_hsubsys *ph_subsys,
- u32 h_profile, u32 *putilization);
+u16 hpi_silence_detector_set_threshold(u32 hC, int threshold);
-/*//////////////////// */
-/* UTILITY functions */
+u16 hpi_silence_detector_get_threshold(u32 hC, int *threshold);
+/*********************/
+/* Utility functions */
+/*********************/
u16 hpi_format_create(struct hpi_format *p_format, u16 channels, u16 format,
u32 sample_rate, u32 bit_rate, u32 attributes);
-/* Until it's verified, this function is for Windows OSs only */
-
-#endif /*_H_HPI_ */
-/*
-///////////////////////////////////////////////////////////////////////////////
-// See CVS for history. Last complete set in rev 1.146
-////////////////////////////////////////////////////////////////////////////////
-*/
+#endif /*_HPI_H_ */
diff --git a/sound/pci/asihpi/hpi6000.c b/sound/pci/asihpi/hpi6000.c
index 1b9bf9395cfe..3e3c2ef6efd8 100644
--- a/sound/pci/asihpi/hpi6000.c
+++ b/sound/pci/asihpi/hpi6000.c
@@ -43,16 +43,17 @@
#define HPI_HIF_ERROR_MASK 0x4000
/* HPI6000 specific error codes */
+#define HPI6000_ERROR_BASE 900 /* not actually used anywhere */
-#define HPI6000_ERROR_BASE 900
+/* operational/messaging errors */
#define HPI6000_ERROR_MSG_RESP_IDLE_TIMEOUT 901
-#define HPI6000_ERROR_MSG_RESP_SEND_MSG_ACK 902
+
#define HPI6000_ERROR_MSG_RESP_GET_RESP_ACK 903
#define HPI6000_ERROR_MSG_GET_ADR 904
#define HPI6000_ERROR_RESP_GET_ADR 905
#define HPI6000_ERROR_MSG_RESP_BLOCKWRITE32 906
#define HPI6000_ERROR_MSG_RESP_BLOCKREAD32 907
-#define HPI6000_ERROR_MSG_INVALID_DSP_INDEX 908
+
#define HPI6000_ERROR_CONTROL_CACHE_PARAMS 909
#define HPI6000_ERROR_SEND_DATA_IDLE_TIMEOUT 911
@@ -62,7 +63,6 @@
#define HPI6000_ERROR_SEND_DATA_CMD 915
#define HPI6000_ERROR_SEND_DATA_WRITE 916
#define HPI6000_ERROR_SEND_DATA_IDLECMD 917
-#define HPI6000_ERROR_SEND_DATA_VERIFY 918
#define HPI6000_ERROR_GET_DATA_IDLE_TIMEOUT 921
#define HPI6000_ERROR_GET_DATA_ACK 922
@@ -76,9 +76,8 @@
#define HPI6000_ERROR_MSG_RESP_GETRESPCMD 961
#define HPI6000_ERROR_MSG_RESP_IDLECMD 962
-#define HPI6000_ERROR_MSG_RESP_BLOCKVERIFY32 963
-/* adapter init errors */
+/* Initialisation/bootload errors */
#define HPI6000_ERROR_UNHANDLED_SUBSYS_ID 930
/* can't access PCI2040 */
@@ -210,6 +209,8 @@ static void adapter_get_asserts(struct hpi_adapter_obj *pao,
static short create_adapter_obj(struct hpi_adapter_obj *pao,
u32 *pos_error_code);
+static void delete_adapter_obj(struct hpi_adapter_obj *pao);
+
/* local globals */
static u16 gw_pci_read_asserts; /* used to count PCI2040 errors */
@@ -217,17 +218,7 @@ static u16 gw_pci_write_asserts; /* used to count PCI2040 errors */
static void subsys_message(struct hpi_message *phm, struct hpi_response *phr)
{
-
switch (phm->function) {
- case HPI_SUBSYS_OPEN:
- case HPI_SUBSYS_CLOSE:
- case HPI_SUBSYS_GET_INFO:
- case HPI_SUBSYS_DRIVER_UNLOAD:
- case HPI_SUBSYS_DRIVER_LOAD:
- case HPI_SUBSYS_FIND_ADAPTERS:
- /* messages that should not get here */
- phr->error = HPI_ERROR_UNIMPLEMENTED;
- break;
case HPI_SUBSYS_CREATE_ADAPTER:
subsys_create_adapter(phm, phr);
break;
@@ -243,7 +234,6 @@ static void subsys_message(struct hpi_message *phm, struct hpi_response *phr)
static void control_message(struct hpi_adapter_obj *pao,
struct hpi_message *phm, struct hpi_response *phr)
{
-
switch (phm->function) {
case HPI_CONTROL_GET_STATE:
if (pao->has_control_cache) {
@@ -251,7 +241,13 @@ static void control_message(struct hpi_adapter_obj *pao,
err = hpi6000_update_control_cache(pao, phm);
if (err) {
- phr->error = err;
+ if (err >= HPI_ERROR_BACKEND_BASE) {
+ phr->error =
+ HPI_ERROR_CONTROL_CACHING;
+ phr->specific_error = err;
+ } else {
+ phr->error = err;
+ }
break;
}
@@ -262,16 +258,15 @@ static void control_message(struct hpi_adapter_obj *pao,
}
hw_message(pao, phm, phr);
break;
- case HPI_CONTROL_GET_INFO:
- hw_message(pao, phm, phr);
- break;
case HPI_CONTROL_SET_STATE:
hw_message(pao, phm, phr);
- hpi_sync_control_cache(((struct hpi_hw_obj *)pao->priv)->
- p_cache, phm, phr);
+ hpi_cmn_control_cache_sync_to_msg(((struct hpi_hw_obj *)pao->
+ priv)->p_cache, phm, phr);
break;
+
+ case HPI_CONTROL_GET_INFO:
default:
- phr->error = HPI_ERROR_INVALID_FUNC;
+ hw_message(pao, phm, phr);
break;
}
}
@@ -280,26 +275,12 @@ static void adapter_message(struct hpi_adapter_obj *pao,
struct hpi_message *phm, struct hpi_response *phr)
{
switch (phm->function) {
- case HPI_ADAPTER_GET_INFO:
- hw_message(pao, phm, phr);
- break;
case HPI_ADAPTER_GET_ASSERT:
adapter_get_asserts(pao, phm, phr);
break;
- case HPI_ADAPTER_OPEN:
- case HPI_ADAPTER_CLOSE:
- case HPI_ADAPTER_TEST_ASSERT:
- case HPI_ADAPTER_SELFTEST:
- case HPI_ADAPTER_GET_MODE:
- case HPI_ADAPTER_SET_MODE:
- case HPI_ADAPTER_FIND_OBJECT:
- case HPI_ADAPTER_GET_PROPERTY:
- case HPI_ADAPTER_SET_PROPERTY:
- case HPI_ADAPTER_ENUM_PROPERTY:
- hw_message(pao, phm, phr);
- break;
+
default:
- phr->error = HPI_ERROR_INVALID_FUNC;
+ hw_message(pao, phm, phr);
break;
}
}
@@ -311,7 +292,7 @@ static void outstream_message(struct hpi_adapter_obj *pao,
case HPI_OSTREAM_HOSTBUFFER_ALLOC:
case HPI_OSTREAM_HOSTBUFFER_FREE:
/* Don't let these messages go to the HW function because
- * they're called without allocating the spinlock.
+ * they're called without locking the spinlock.
* For the HPI6000 adapters the HW would return
* HPI_ERROR_INVALID_FUNC anyway.
*/
@@ -331,7 +312,7 @@ static void instream_message(struct hpi_adapter_obj *pao,
case HPI_ISTREAM_HOSTBUFFER_ALLOC:
case HPI_ISTREAM_HOSTBUFFER_FREE:
/* Don't let these messages go to the HW function because
- * they're called without allocating the spinlock.
+ * they're called without locking the spinlock.
* For the HPI6000 adapters the HW would return
* HPI_ERROR_INVALID_FUNC anyway.
*/
@@ -355,7 +336,7 @@ void HPI_6000(struct hpi_message *phm, struct hpi_response *phr)
/* subsytem messages get executed by every HPI. */
/* All other messages are ignored unless the adapter index matches */
/* an adapter in the HPI */
- HPI_DEBUG_LOG(DEBUG, "O %d,F %x\n", phm->object, phm->function);
+ /*HPI_DEBUG_LOG(DEBUG, "O %d,F %x\n", phm->wObject, phm->wFunction); */
/* if Dsp has crashed then do not communicate with it any more */
if (phm->object != HPI_OBJ_SUBSYSTEM) {
@@ -433,21 +414,13 @@ static void subsys_create_adapter(struct hpi_message *phm,
struct hpi_adapter_obj ao;
struct hpi_adapter_obj *pao;
u32 os_error_code;
- short error = 0;
+ u16 err = 0;
u32 dsp_index = 0;
HPI_DEBUG_LOG(VERBOSE, "subsys_create_adapter\n");
memset(&ao, 0, sizeof(ao));
- /* this HPI only creates adapters for TI/PCI2040 based devices */
- if (phm->u.s.resource.bus_type != HPI_BUS_PCI)
- return;
- if (phm->u.s.resource.r.pci->vendor_id != HPI_PCI_VENDOR_ID_TI)
- return;
- if (phm->u.s.resource.r.pci->device_id != HPI_PCI_DEV_ID_PCI2040)
- return;
-
ao.priv = kzalloc(sizeof(struct hpi_hw_obj), GFP_KERNEL);
if (!ao.priv) {
HPI_DEBUG_LOG(ERROR, "cant get mem for adapter object\n");
@@ -456,16 +429,19 @@ static void subsys_create_adapter(struct hpi_message *phm,
}
/* create the adapter object based on the resource information */
- /*? memcpy(&ao.Pci,&phm->u.s.Resource.r.Pci,sizeof(ao.Pci)); */
ao.pci = *phm->u.s.resource.r.pci;
- error = create_adapter_obj(&ao, &os_error_code);
- if (!error)
- error = hpi_add_adapter(&ao);
- if (error) {
+ err = create_adapter_obj(&ao, &os_error_code);
+ if (err) {
+ delete_adapter_obj(&ao);
+ if (err >= HPI_ERROR_BACKEND_BASE) {
+ phr->error = HPI_ERROR_DSP_BOOTLOAD;
+ phr->specific_error = err;
+ } else {
+ phr->error = err;
+ }
+
phr->u.s.data = os_error_code;
- kfree(ao.priv);
- phr->error = error;
return;
}
/* need to update paParentAdapter */
@@ -473,7 +449,7 @@ static void subsys_create_adapter(struct hpi_message *phm,
if (!pao) {
/* We just added this adapter, why can't we find it!? */
HPI_DEBUG_LOG(ERROR, "lost adapter after boot\n");
- phr->error = 950;
+ phr->error = HPI_ERROR_BAD_ADAPTER;
return;
}
@@ -482,9 +458,8 @@ static void subsys_create_adapter(struct hpi_message *phm,
phw->ado[dsp_index].pa_parent_adapter = pao;
}
- phr->u.s.aw_adapter_list[ao.index] = ao.adapter_type;
+ phr->u.s.adapter_type = ao.adapter_type;
phr->u.s.adapter_index = ao.index;
- phr->u.s.num_adapters++;
phr->error = 0;
}
@@ -492,20 +467,13 @@ static void subsys_delete_adapter(struct hpi_message *phm,
struct hpi_response *phr)
{
struct hpi_adapter_obj *pao = NULL;
- struct hpi_hw_obj *phw;
- pao = hpi_find_adapter(phm->adapter_index);
+ pao = hpi_find_adapter(phm->obj_index);
if (!pao)
return;
- phw = (struct hpi_hw_obj *)pao->priv;
-
- if (pao->has_control_cache)
- hpi_free_control_cache(phw->p_cache);
-
+ delete_adapter_obj(pao);
hpi_delete_adapter(pao);
- kfree(phw);
-
phr->error = 0;
}
@@ -519,9 +487,6 @@ static short create_adapter_obj(struct hpi_adapter_obj *pao,
u32 control_cache_count = 0;
struct hpi_hw_obj *phw = (struct hpi_hw_obj *)pao->priv;
- /* init error reporting */
- pao->dsp_crashed = 0;
-
/* The PCI2040 has the following address map */
/* BAR0 - 4K = HPI control and status registers on PCI2040 (HPI CSR) */
/* BAR1 - 32K = HPI registers on DSP */
@@ -575,36 +540,36 @@ static short create_adapter_obj(struct hpi_adapter_obj *pao,
/* get info about the adapter by asking the adapter */
/* send a HPI_ADAPTER_GET_INFO message */
{
- struct hpi_message hM;
- struct hpi_response hR0; /* response from DSP 0 */
- struct hpi_response hR1; /* response from DSP 1 */
+ struct hpi_message hm;
+ struct hpi_response hr0; /* response from DSP 0 */
+ struct hpi_response hr1; /* response from DSP 1 */
u16 error = 0;
HPI_DEBUG_LOG(VERBOSE, "send ADAPTER_GET_INFO\n");
- memset(&hM, 0, sizeof(hM));
- hM.type = HPI_TYPE_MESSAGE;
- hM.size = sizeof(struct hpi_message);
- hM.object = HPI_OBJ_ADAPTER;
- hM.function = HPI_ADAPTER_GET_INFO;
- hM.adapter_index = 0;
- memset(&hR0, 0, sizeof(hR0));
- memset(&hR1, 0, sizeof(hR1));
- hR0.size = sizeof(hR0);
- hR1.size = sizeof(hR1);
-
- error = hpi6000_message_response_sequence(pao, 0, &hM, &hR0);
- if (hR0.error) {
- HPI_DEBUG_LOG(DEBUG, "message error %d\n", hR0.error);
- return hR0.error;
+ memset(&hm, 0, sizeof(hm));
+ hm.type = HPI_TYPE_MESSAGE;
+ hm.size = sizeof(struct hpi_message);
+ hm.object = HPI_OBJ_ADAPTER;
+ hm.function = HPI_ADAPTER_GET_INFO;
+ hm.adapter_index = 0;
+ memset(&hr0, 0, sizeof(hr0));
+ memset(&hr1, 0, sizeof(hr1));
+ hr0.size = sizeof(hr0);
+ hr1.size = sizeof(hr1);
+
+ error = hpi6000_message_response_sequence(pao, 0, &hm, &hr0);
+ if (hr0.error) {
+ HPI_DEBUG_LOG(DEBUG, "message error %d\n", hr0.error);
+ return hr0.error;
}
if (phw->num_dsp == 2) {
- error = hpi6000_message_response_sequence(pao, 1, &hM,
- &hR1);
+ error = hpi6000_message_response_sequence(pao, 1, &hm,
+ &hr1);
if (error)
return error;
}
- pao->adapter_type = hR0.u.a.adapter_type;
- pao->index = hR0.u.a.adapter_index;
+ pao->adapter_type = hr0.u.ax.info.adapter_type;
+ pao->index = hr0.u.ax.info.adapter_index;
}
memset(&phw->control_cache[0], 0,
@@ -618,22 +583,37 @@ static short create_adapter_obj(struct hpi_adapter_obj *pao,
control_cache_count =
hpi_read_word(&phw->ado[0],
HPI_HIF_ADDR(control_cache_count));
- pao->has_control_cache = 1;
phw->p_cache =
hpi_alloc_control_cache(control_cache_count,
- control_cache_size, (struct hpi_control_cache_info *)
+ control_cache_size, (unsigned char *)
&phw->control_cache[0]
);
- if (!phw->p_cache)
- pao->has_control_cache = 0;
- } else
- pao->has_control_cache = 0;
+ if (phw->p_cache)
+ pao->has_control_cache = 1;
+ }
HPI_DEBUG_LOG(DEBUG, "get adapter info ASI%04X index %d\n",
pao->adapter_type, pao->index);
pao->open = 0; /* upon creation the adapter is closed */
- return 0;
+
+ if (phw->p_cache)
+ phw->p_cache->adap_idx = pao->index;
+
+ return hpi_add_adapter(pao);
+}
+
+static void delete_adapter_obj(struct hpi_adapter_obj *pao)
+{
+ struct hpi_hw_obj *phw = (struct hpi_hw_obj *)pao->priv;
+
+ if (pao->has_control_cache)
+ hpi_free_control_cache(phw->p_cache);
+
+ /* reset DSPs on adapter */
+ iowrite32(0x0003000F, phw->dw2040_HPICSR + HPI_RESET);
+
+ kfree(phw);
}
/************************************************************************/
@@ -645,11 +625,13 @@ static void adapter_get_asserts(struct hpi_adapter_obj *pao,
#ifndef HIDE_PCI_ASSERTS
/* if we have PCI2040 asserts then collect them */
if ((gw_pci_read_asserts > 0) || (gw_pci_write_asserts > 0)) {
- phr->u.a.serial_number =
+ phr->u.ax.assert.p1 =
gw_pci_read_asserts * 100 + gw_pci_write_asserts;
- phr->u.a.adapter_index = 1; /* assert count */
- phr->u.a.adapter_type = -1; /* "dsp index" */
- strcpy(phr->u.a.sz_adapter_assert, "PCI2040 error");
+ phr->u.ax.assert.p2 = 0;
+ phr->u.ax.assert.count = 1; /* assert count */
+ phr->u.ax.assert.dsp_index = -1; /* "dsp index" */
+ strcpy(phr->u.ax.assert.sz_message, "PCI2040 error");
+ phr->u.ax.assert.dsp_msg_addr = 0;
gw_pci_read_asserts = 0;
gw_pci_write_asserts = 0;
phr->error = 0;
@@ -686,10 +668,10 @@ static short hpi6000_adapter_boot_load_dsp(struct hpi_adapter_obj *pao,
/* NOTE don't use wAdapterType in this routine. It is not setup yet */
- switch (pao->pci.subsys_device_id) {
+ switch (pao->pci.pci_dev->subsystem_device) {
case 0x5100:
case 0x5110: /* ASI5100 revB or higher with C6711D */
- case 0x5200: /* ASI5200 PC_ie version of ASI5100 */
+ case 0x5200: /* ASI5200 PCIe version of ASI5100 */
case 0x6100:
case 0x6200:
boot_load_family = HPI_ADAPTER_FAMILY_ASI(0x6200);
@@ -709,8 +691,9 @@ static short hpi6000_adapter_boot_load_dsp(struct hpi_adapter_obj *pao,
* note that bits 4..15 are read-only and so should always return zero,
* even though we wrote 1 to them
*/
- for (i = 0; i < 1000; i++)
- delay = ioread32(phw->dw2040_HPICSR + HPI_RESET);
+ hpios_delay_micro_seconds(1000);
+ delay = ioread32(phw->dw2040_HPICSR + HPI_RESET);
+
if (delay != dw2040_reset) {
HPI_DEBUG_LOG(ERROR, "INIT_PCI2040 %x %x\n", dw2040_reset,
delay);
@@ -743,8 +726,7 @@ static short hpi6000_adapter_boot_load_dsp(struct hpi_adapter_obj *pao,
dw2040_reset = dw2040_reset & (~0x00000008);
iowrite32(dw2040_reset, phw->dw2040_HPICSR + HPI_RESET);
/*delay to allow DSP to get going */
- for (i = 0; i < 100; i++)
- delay = ioread32(phw->dw2040_HPICSR + HPI_RESET);
+ hpios_delay_micro_seconds(100);
/* loop through all DSPs, downloading DSP code */
for (dsp_index = 0; dsp_index < phw->num_dsp; dsp_index++) {
@@ -783,27 +765,27 @@ static short hpi6000_adapter_boot_load_dsp(struct hpi_adapter_obj *pao,
*/
/* bypass PLL */
hpi_write_word(pdo, 0x01B7C100, 0x0000);
- for (i = 0; i < 100; i++)
- delay = ioread32(phw->dw2040_HPICSR +
- HPI_RESET);
+ hpios_delay_micro_seconds(100);
/* ** use default of PLL x7 ** */
/* EMIF = 225/3=75MHz */
hpi_write_word(pdo, 0x01B7C120, 0x8002);
+ hpios_delay_micro_seconds(100);
+
/* peri = 225/2 */
hpi_write_word(pdo, 0x01B7C11C, 0x8001);
+ hpios_delay_micro_seconds(100);
+
/* cpu = 225/1 */
hpi_write_word(pdo, 0x01B7C118, 0x8000);
- /* ~200us delay */
- for (i = 0; i < 2000; i++)
- delay = ioread32(phw->dw2040_HPICSR +
- HPI_RESET);
+
+ /* ~2ms delay */
+ hpios_delay_micro_seconds(2000);
+
/* PLL not bypassed */
hpi_write_word(pdo, 0x01B7C100, 0x0001);
- /* ~200us delay */
- for (i = 0; i < 2000; i++)
- delay = ioread32(phw->dw2040_HPICSR +
- HPI_RESET);
+ /* ~2ms delay */
+ hpios_delay_micro_seconds(2000);
}
/* test r/w to internal DSP memory
@@ -927,9 +909,7 @@ static short hpi6000_adapter_boot_load_dsp(struct hpi_adapter_obj *pao,
}
/* delay a little to allow SDRAM and DSP to "get going" */
-
- for (i = 0; i < 1000; i++)
- delay = ioread32(phw->dw2040_HPICSR + HPI_RESET);
+ hpios_delay_micro_seconds(1000);
/* test access to SDRAM */
{
@@ -976,7 +956,7 @@ static short hpi6000_adapter_boot_load_dsp(struct hpi_adapter_obj *pao,
/* write the DSP code down into the DSPs memory */
/*HpiDspCode_Open(nBootLoadFamily,&DspCode,pdwOsErrorCode); */
- dsp_code.ps_dev = pao->pci.p_os_data;
+ dsp_code.ps_dev = pao->pci.pci_dev;
error = hpi_dsp_code_open(boot_load_family, &dsp_code,
pos_error_code);
@@ -1073,8 +1053,7 @@ static short hpi6000_adapter_boot_load_dsp(struct hpi_adapter_obj *pao,
/* step 3. Start code by sending interrupt */
iowrite32(0x00030003, pdo->prHPI_control);
- for (i = 0; i < 10000; i++)
- delay = ioread32(phw->dw2040_HPICSR + HPI_RESET);
+ hpios_delay_micro_seconds(10000);
/* wait for a non-zero value in hostcmd -
* indicating initialization is complete
@@ -1101,7 +1080,7 @@ static short hpi6000_adapter_boot_load_dsp(struct hpi_adapter_obj *pao,
* locks up with a bluescreen (NOT GPF or pagefault).
*/
else
- hpios_delay_micro_seconds(1000);
+ hpios_delay_micro_seconds(10000);
}
if (timeout == 0)
return HPI6000_ERROR_INIT_NOACK;
@@ -1132,14 +1111,14 @@ static short hpi6000_adapter_boot_load_dsp(struct hpi_adapter_obj *pao,
mask = 0xFFFFFF00L;
/* ASI5100 uses AX6 code, */
/* but has no PLD r/w register to test */
- if (HPI_ADAPTER_FAMILY_ASI(pao->pci.
- subsys_device_id) ==
+ if (HPI_ADAPTER_FAMILY_ASI(pao->pci.pci_dev->
+ subsystem_device) ==
HPI_ADAPTER_FAMILY_ASI(0x5100))
mask = 0x00000000L;
/* ASI5200 uses AX6 code, */
/* but has no PLD r/w register to test */
- if (HPI_ADAPTER_FAMILY_ASI(pao->pci.
- subsys_device_id) ==
+ if (HPI_ADAPTER_FAMILY_ASI(pao->pci.pci_dev->
+ subsystem_device) ==
HPI_ADAPTER_FAMILY_ASI(0x5200))
mask = 0x00000000L;
break;
@@ -1204,7 +1183,7 @@ static u32 hpi_read_word(struct dsp_obj *pdo, u32 address)
u32 data = 0;
if (hpi_set_address(pdo, address))
- return 0; /*? no way to return error */
+ return 0; /*? No way to return error */
/* take care of errata in revB DSP (2.0.1) */
data = ioread32(pdo->prHPI_data);
@@ -1340,10 +1319,6 @@ static short hpi6000_message_response_sequence(struct hpi_adapter_obj *pao,
u32 *p_data;
u16 error = 0;
- /* does the DSP we are referencing exist? */
- if (dsp_index >= phw->num_dsp)
- return HPI6000_ERROR_MSG_INVALID_DSP_INDEX;
-
ack = hpi6000_wait_dsp_ack(pao, dsp_index, HPI_HIF_IDLE);
if (ack & HPI_HIF_ERROR_MASK) {
pao->dsp_crashed++;
@@ -1351,9 +1326,7 @@ static short hpi6000_message_response_sequence(struct hpi_adapter_obj *pao,
}
pao->dsp_crashed = 0;
- /* send the message */
-
- /* get the address and size */
+ /* get the message address and size */
if (phw->message_buffer_address_on_dsp == 0) {
timeout = TIMEOUT;
do {
@@ -1368,10 +1341,9 @@ static short hpi6000_message_response_sequence(struct hpi_adapter_obj *pao,
} else
address = phw->message_buffer_address_on_dsp;
- /* dwLength = sizeof(struct hpi_message); */
length = phm->size;
- /* send it */
+ /* send the message */
p_data = (u32 *)phm;
if (hpi6000_dsp_block_write32(pao, dsp_index, address, p_data,
(u16)length / 4))
@@ -1385,7 +1357,7 @@ static short hpi6000_message_response_sequence(struct hpi_adapter_obj *pao,
if (ack & HPI_HIF_ERROR_MASK)
return HPI6000_ERROR_MSG_RESP_GET_RESP_ACK;
- /* get the address and size */
+ /* get the response address */
if (phw->response_buffer_address_on_dsp == 0) {
timeout = TIMEOUT;
do {
@@ -1409,7 +1381,7 @@ static short hpi6000_message_response_sequence(struct hpi_adapter_obj *pao,
if (!timeout)
length = sizeof(struct hpi_response);
- /* get it */
+ /* get the response */
p_data = (u32 *)phr;
if (hpi6000_dsp_block_read32(pao, dsp_index, address, p_data,
(u16)length / 4))
@@ -1805,17 +1777,11 @@ static void hw_message(struct hpi_adapter_obj *pao, struct hpi_message *phm,
hpios_dsplock_lock(pao);
error = hpi6000_message_response_sequence(pao, dsp_index, phm, phr);
- /* maybe an error response */
- if (error) {
- /* something failed in the HPI/DSP interface */
- phr->error = error;
- /* just the header of the response is valid */
- phr->size = sizeof(struct hpi_response_header);
+ if (error) /* something failed in the HPI/DSP interface */
goto err;
- }
- if (phr->error != 0) /* something failed in the DSP */
- goto err;
+ if (phr->error) /* something failed in the DSP */
+ goto out;
switch (phm->function) {
case HPI_OSTREAM_WRITE:
@@ -1827,21 +1793,30 @@ static void hw_message(struct hpi_adapter_obj *pao, struct hpi_message *phm,
error = hpi6000_get_data(pao, dsp_index, phm, phr);
break;
case HPI_ADAPTER_GET_ASSERT:
- phr->u.a.adapter_index = 0; /* dsp 0 default */
+ phr->u.ax.assert.dsp_index = 0; /* dsp 0 default */
if (num_dsp == 2) {
- if (!phr->u.a.adapter_type) {
+ if (!phr->u.ax.assert.count) {
/* no assert from dsp 0, check dsp 1 */
error = hpi6000_message_response_sequence(pao,
1, phm, phr);
- phr->u.a.adapter_index = 1;
+ phr->u.ax.assert.dsp_index = 1;
}
}
}
- if (error)
- phr->error = error;
-
err:
+ if (error) {
+ if (error >= HPI_ERROR_BACKEND_BASE) {
+ phr->error = HPI_ERROR_DSP_COMMUNICATION;
+ phr->specific_error = error;
+ } else {
+ phr->error = error;
+ }
+
+ /* just the header of the response is valid */
+ phr->size = sizeof(struct hpi_response_header);
+ }
+out:
hpios_dsplock_unlock(pao);
return;
}
diff --git a/sound/pci/asihpi/hpi6205.c b/sound/pci/asihpi/hpi6205.c
index 2672f6591ceb..33136cb0d251 100644
--- a/sound/pci/asihpi/hpi6205.c
+++ b/sound/pci/asihpi/hpi6205.c
@@ -38,27 +38,26 @@
/*****************************************************************************/
/* HPI6205 specific error codes */
-#define HPI6205_ERROR_BASE 1000
-/*#define HPI6205_ERROR_MEM_ALLOC 1001 */
+#define HPI6205_ERROR_BASE 1000 /* not actually used anywhere */
+
+/* operational/messaging errors */
+#define HPI6205_ERROR_MSG_RESP_IDLE_TIMEOUT 1015
+#define HPI6205_ERROR_MSG_RESP_TIMEOUT 1016
+
+/* initialization/bootload errors */
#define HPI6205_ERROR_6205_NO_IRQ 1002
#define HPI6205_ERROR_6205_INIT_FAILED 1003
-/*#define HPI6205_ERROR_MISSING_DSPCODE 1004 */
-#define HPI6205_ERROR_UNKNOWN_PCI_DEVICE 1005
#define HPI6205_ERROR_6205_REG 1006
#define HPI6205_ERROR_6205_DSPPAGE 1007
-#define HPI6205_ERROR_BAD_DSPINDEX 1008
#define HPI6205_ERROR_C6713_HPIC 1009
#define HPI6205_ERROR_C6713_HPIA 1010
#define HPI6205_ERROR_C6713_PLL 1011
#define HPI6205_ERROR_DSP_INTMEM 1012
#define HPI6205_ERROR_DSP_EXTMEM 1013
#define HPI6205_ERROR_DSP_PLD 1014
-#define HPI6205_ERROR_MSG_RESP_IDLE_TIMEOUT 1015
-#define HPI6205_ERROR_MSG_RESP_TIMEOUT 1016
#define HPI6205_ERROR_6205_EEPROM 1017
#define HPI6205_ERROR_DSP_EMIF 1018
-#define hpi6205_error(dsp_index, err) (err)
/*****************************************************************************/
/* for C6205 PCI i/f */
/* Host Status Register (HSR) bitfields */
@@ -128,9 +127,6 @@ struct hpi_hw_obj {
u32 outstream_host_buffer_size[HPI_MAX_STREAMS];
struct consistent_dma_area h_control_cache;
- struct consistent_dma_area h_async_event_buffer;
-/* struct hpi_control_cache_single *pControlCache; */
- struct hpi_async_event *p_async_event_buffer;
struct hpi_control_cache *p_cache;
};
@@ -208,8 +204,8 @@ static void instream_start(struct hpi_adapter_obj *pao,
static u32 boot_loader_read_mem32(struct hpi_adapter_obj *pao, int dsp_index,
u32 address);
-static u16 boot_loader_write_mem32(struct hpi_adapter_obj *pao, int dsp_index,
- u32 address, u32 data);
+static void boot_loader_write_mem32(struct hpi_adapter_obj *pao,
+ int dsp_index, u32 address, u32 data);
static u16 boot_loader_config_emif(struct hpi_adapter_obj *pao,
int dsp_index);
@@ -229,17 +225,7 @@ static u16 boot_loader_test_pld(struct hpi_adapter_obj *pao, int dsp_index);
static void subsys_message(struct hpi_message *phm, struct hpi_response *phr)
{
-
switch (phm->function) {
- case HPI_SUBSYS_OPEN:
- case HPI_SUBSYS_CLOSE:
- case HPI_SUBSYS_GET_INFO:
- case HPI_SUBSYS_DRIVER_UNLOAD:
- case HPI_SUBSYS_DRIVER_LOAD:
- case HPI_SUBSYS_FIND_ADAPTERS:
- /* messages that should not get here */
- phr->error = HPI_ERROR_UNIMPLEMENTED;
- break;
case HPI_SUBSYS_CREATE_ADAPTER:
subsys_create_adapter(phm, phr);
break;
@@ -257,15 +243,22 @@ static void control_message(struct hpi_adapter_obj *pao,
{
struct hpi_hw_obj *phw = pao->priv;
+ u16 pending_cache_error = 0;
switch (phm->function) {
case HPI_CONTROL_GET_STATE:
if (pao->has_control_cache) {
- rmb(); /* make sure we see updates DM_aed from DSP */
- if (hpi_check_control_cache(phw->p_cache, phm, phr))
+ rmb(); /* make sure we see updates DMAed from DSP */
+ if (hpi_check_control_cache(phw->p_cache, phm, phr)) {
break;
+ } else if (phm->u.c.attribute == HPI_METER_PEAK) {
+ pending_cache_error =
+ HPI_ERROR_CONTROL_CACHING;
+ }
}
hw_message(pao, phm, phr);
+ if (pending_cache_error && !phr->error)
+ phr->error = pending_cache_error;
break;
case HPI_CONTROL_GET_INFO:
hw_message(pao, phm, phr);
@@ -273,7 +266,8 @@ static void control_message(struct hpi_adapter_obj *pao,
case HPI_CONTROL_SET_STATE:
hw_message(pao, phm, phr);
if (pao->has_control_cache)
- hpi_sync_control_cache(phw->p_cache, phm, phr);
+ hpi_cmn_control_cache_sync_to_msg(phw->p_cache, phm,
+ phr);
break;
default:
phr->error = HPI_ERROR_INVALID_FUNC;
@@ -296,9 +290,9 @@ static void outstream_message(struct hpi_adapter_obj *pao,
{
if (phm->obj_index >= HPI_MAX_STREAMS) {
- phr->error = HPI_ERROR_INVALID_STREAM;
+ phr->error = HPI_ERROR_INVALID_OBJ_INDEX;
HPI_DEBUG_LOG(WARNING,
- "message referencing invalid stream %d "
+ "Message referencing invalid stream %d "
"on adapter index %d\n", phm->obj_index,
phm->adapter_index);
return;
@@ -340,9 +334,9 @@ static void instream_message(struct hpi_adapter_obj *pao,
{
if (phm->obj_index >= HPI_MAX_STREAMS) {
- phr->error = HPI_ERROR_INVALID_STREAM;
+ phr->error = HPI_ERROR_INVALID_OBJ_INDEX;
HPI_DEBUG_LOG(WARNING,
- "message referencing invalid stream %d "
+ "Message referencing invalid stream %d "
"on adapter index %d\n", phm->obj_index,
phm->adapter_index);
return;
@@ -385,8 +379,8 @@ void HPI_6205(struct hpi_message *phm, struct hpi_response *phr)
* All other messages are ignored unless the adapter index matches
* an adapter in the HPI
*/
- HPI_DEBUG_LOG(DEBUG, "HPI obj=%d, func=%d\n", phm->object,
- phm->function);
+ /* HPI_DEBUG_LOG(DEBUG, "HPI Obj=%d, Func=%d\n", phm->wObject,
+ phm->wFunction); */
/* if Dsp has crashed then do not communicate with it any more */
if (phm->object != HPI_OBJ_SUBSYSTEM) {
@@ -411,8 +405,7 @@ void HPI_6205(struct hpi_message *phm, struct hpi_response *phr)
/* Init default response */
if (phm->function != HPI_SUBSYS_CREATE_ADAPTER)
- hpi_init_response(phr, phm->object, phm->function,
- HPI_ERROR_PROCESSING_MESSAGE);
+ phr->error = HPI_ERROR_PROCESSING_MESSAGE;
HPI_DEBUG_LOG(VERBOSE, "start of switch\n");
switch (phm->type) {
@@ -423,9 +416,6 @@ void HPI_6205(struct hpi_message *phm, struct hpi_response *phr)
break;
case HPI_OBJ_ADAPTER:
- phr->size =
- sizeof(struct hpi_response_header) +
- sizeof(struct hpi_adapter_res);
adapter_message(pao, phm, phr);
break;
@@ -474,14 +464,6 @@ static void subsys_create_adapter(struct hpi_message *phm,
memset(&ao, 0, sizeof(ao));
- /* this HPI only creates adapters for TI/PCI devices */
- if (phm->u.s.resource.bus_type != HPI_BUS_PCI)
- return;
- if (phm->u.s.resource.r.pci->vendor_id != HPI_PCI_VENDOR_ID_TI)
- return;
- if (phm->u.s.resource.r.pci->device_id != HPI_PCI_DEV_ID_DSP6205)
- return;
-
ao.priv = kzalloc(sizeof(struct hpi_hw_obj), GFP_KERNEL);
if (!ao.priv) {
HPI_DEBUG_LOG(ERROR, "cant get mem for adapter object\n");
@@ -491,18 +473,20 @@ static void subsys_create_adapter(struct hpi_message *phm,
ao.pci = *phm->u.s.resource.r.pci;
err = create_adapter_obj(&ao, &os_error_code);
- if (!err)
- err = hpi_add_adapter(&ao);
if (err) {
- phr->u.s.data = os_error_code;
delete_adapter_obj(&ao);
- phr->error = err;
+ if (err >= HPI_ERROR_BACKEND_BASE) {
+ phr->error = HPI_ERROR_DSP_BOOTLOAD;
+ phr->specific_error = err;
+ } else {
+ phr->error = err;
+ }
+ phr->u.s.data = os_error_code;
return;
}
- phr->u.s.aw_adapter_list[ao.index] = ao.adapter_type;
+ phr->u.s.adapter_type = ao.adapter_type;
phr->u.s.adapter_index = ao.index;
- phr->u.s.num_adapters++;
phr->error = 0;
}
@@ -513,7 +497,7 @@ static void subsys_delete_adapter(struct hpi_message *phm,
struct hpi_adapter_obj *pao;
struct hpi_hw_obj *phw;
- pao = hpi_find_adapter(phm->adapter_index);
+ pao = hpi_find_adapter(phm->obj_index);
if (!pao) {
phr->error = HPI_ERROR_INVALID_OBJ_INDEX;
return;
@@ -526,6 +510,7 @@ static void subsys_delete_adapter(struct hpi_message *phm,
iowrite32(C6205_HDCR_WARMRESET, phw->prHDCR);
delete_adapter_obj(pao);
+ hpi_delete_adapter(pao);
phr->error = 0;
}
@@ -538,10 +523,6 @@ static u16 create_adapter_obj(struct hpi_adapter_obj *pao,
struct hpi_hw_obj *phw = pao->priv;
struct bus_master_interface *interface;
u32 phys_addr;
-#ifndef HPI6205_NO_HSR_POLL
- u32 time_out = HPI6205_TIMEOUT;
- u32 temp1;
-#endif
int i;
u16 err;
@@ -566,7 +547,7 @@ static u16 create_adapter_obj(struct hpi_adapter_obj *pao,
if (hpios_locked_mem_alloc(&phw->h_locked_mem,
sizeof(struct bus_master_interface),
- pao->pci.p_os_data))
+ pao->pci.pci_dev))
phw->p_interface_buffer = NULL;
else if (hpios_locked_mem_get_virt_addr(&phw->h_locked_mem,
(void *)&phw->p_interface_buffer))
@@ -591,49 +572,29 @@ static u16 create_adapter_obj(struct hpi_adapter_obj *pao,
/* allow boot load even if mem alloc wont work */
if (!phw->p_interface_buffer)
- return hpi6205_error(0, HPI_ERROR_MEMORY_ALLOC);
+ return HPI_ERROR_MEMORY_ALLOC;
interface = phw->p_interface_buffer;
-#ifndef HPI6205_NO_HSR_POLL
- /* wait for first interrupt indicating the DSP init is done */
- time_out = HPI6205_TIMEOUT * 10;
- temp1 = 0;
- while (((temp1 & C6205_HSR_INTSRC) == 0) && --time_out)
- temp1 = ioread32(phw->prHSR);
-
- if (temp1 & C6205_HSR_INTSRC)
- HPI_DEBUG_LOG(INFO,
- "interrupt confirming DSP code running OK\n");
- else {
- HPI_DEBUG_LOG(ERROR,
- "timed out waiting for interrupt "
- "confirming DSP code running\n");
- return hpi6205_error(0, HPI6205_ERROR_6205_NO_IRQ);
- }
-
- /* reset the interrupt */
- iowrite32(C6205_HSR_INTSRC, phw->prHSR);
-#endif
-
/* make sure the DSP has started ok */
if (!wait_dsp_ack(phw, H620_HIF_RESET, HPI6205_TIMEOUT * 10)) {
HPI_DEBUG_LOG(ERROR, "timed out waiting reset state \n");
- return hpi6205_error(0, HPI6205_ERROR_6205_INIT_FAILED);
+ return HPI6205_ERROR_6205_INIT_FAILED;
}
/* Note that *pao, *phw are zeroed after allocation,
* so pointers and flags are NULL by default.
* Allocate bus mastering control cache buffer and tell the DSP about it
*/
if (interface->control_cache.number_of_controls) {
- void *p_control_cache_virtual;
+ u8 *p_control_cache_virtual;
err = hpios_locked_mem_alloc(&phw->h_control_cache,
interface->control_cache.size_in_bytes,
- pao->pci.p_os_data);
+ pao->pci.pci_dev);
if (!err)
err = hpios_locked_mem_get_virt_addr(&phw->
- h_control_cache, &p_control_cache_virtual);
+ h_control_cache,
+ (void *)&p_control_cache_virtual);
if (!err) {
memset(p_control_cache_virtual, 0,
interface->control_cache.size_in_bytes);
@@ -642,7 +603,6 @@ static u16 create_adapter_obj(struct hpi_adapter_obj *pao,
hpi_alloc_control_cache(interface->
control_cache.number_of_controls,
interface->control_cache.size_in_bytes,
- (struct hpi_control_cache_info *)
p_control_cache_virtual);
if (!phw->p_cache)
err = HPI_ERROR_MEMORY_ALLOC;
@@ -662,78 +622,56 @@ static u16 create_adapter_obj(struct hpi_adapter_obj *pao,
pao->has_control_cache = 0;
}
}
- /* allocate bus mastering async buffer and tell the DSP about it */
- if (interface->async_buffer.b.size) {
- err = hpios_locked_mem_alloc(&phw->h_async_event_buffer,
- interface->async_buffer.b.size *
- sizeof(struct hpi_async_event), pao->pci.p_os_data);
- if (!err)
- err = hpios_locked_mem_get_virt_addr
- (&phw->h_async_event_buffer, (void *)
- &phw->p_async_event_buffer);
- if (!err)
- memset((void *)phw->p_async_event_buffer, 0,
- interface->async_buffer.b.size *
- sizeof(struct hpi_async_event));
- if (!err) {
- err = hpios_locked_mem_get_phys_addr
- (&phw->h_async_event_buffer, &phys_addr);
- interface->async_buffer.physical_address32 =
- phys_addr;
- }
- if (err) {
- if (hpios_locked_mem_valid(&phw->
- h_async_event_buffer)) {
- hpios_locked_mem_free
- (&phw->h_async_event_buffer);
- phw->p_async_event_buffer = NULL;
- }
- }
- }
send_dsp_command(phw, H620_HIF_IDLE);
{
- struct hpi_message hM;
- struct hpi_response hR;
+ struct hpi_message hm;
+ struct hpi_response hr;
u32 max_streams;
HPI_DEBUG_LOG(VERBOSE, "init ADAPTER_GET_INFO\n");
- memset(&hM, 0, sizeof(hM));
- hM.type = HPI_TYPE_MESSAGE;
- hM.size = sizeof(hM);
- hM.object = HPI_OBJ_ADAPTER;
- hM.function = HPI_ADAPTER_GET_INFO;
- hM.adapter_index = 0;
- memset(&hR, 0, sizeof(hR));
- hR.size = sizeof(hR);
-
- err = message_response_sequence(pao, &hM, &hR);
+ memset(&hm, 0, sizeof(hm));
+ hm.type = HPI_TYPE_MESSAGE;
+ hm.size = sizeof(hm);
+ hm.object = HPI_OBJ_ADAPTER;
+ hm.function = HPI_ADAPTER_GET_INFO;
+ hm.adapter_index = 0;
+ memset(&hr, 0, sizeof(hr));
+ hr.size = sizeof(hr);
+
+ err = message_response_sequence(pao, &hm, &hr);
if (err) {
HPI_DEBUG_LOG(ERROR, "message transport error %d\n",
err);
return err;
}
- if (hR.error)
- return hR.error;
+ if (hr.error)
+ return hr.error;
- pao->adapter_type = hR.u.a.adapter_type;
- pao->index = hR.u.a.adapter_index;
+ pao->adapter_type = hr.u.ax.info.adapter_type;
+ pao->index = hr.u.ax.info.adapter_index;
- max_streams = hR.u.a.num_outstreams + hR.u.a.num_instreams;
+ max_streams =
+ hr.u.ax.info.num_outstreams +
+ hr.u.ax.info.num_instreams;
hpios_locked_mem_prepare((max_streams * 6) / 10, max_streams,
- 65536, pao->pci.p_os_data);
+ 65536, pao->pci.pci_dev);
HPI_DEBUG_LOG(VERBOSE,
"got adapter info type %x index %d serial %d\n",
- hR.u.a.adapter_type, hR.u.a.adapter_index,
- hR.u.a.serial_number);
+ hr.u.ax.info.adapter_type, hr.u.ax.info.adapter_index,
+ hr.u.ax.info.serial_number);
}
pao->open = 0; /* upon creation the adapter is closed */
+ if (phw->p_cache)
+ phw->p_cache->adap_idx = pao->index;
+
HPI_DEBUG_LOG(INFO, "bootload DSP OK\n");
- return 0;
+
+ return hpi_add_adapter(pao);
}
/** Free memory areas allocated by adapter
@@ -747,11 +685,6 @@ static void delete_adapter_obj(struct hpi_adapter_obj *pao)
phw = pao->priv;
- if (hpios_locked_mem_valid(&phw->h_async_event_buffer)) {
- hpios_locked_mem_free(&phw->h_async_event_buffer);
- phw->p_async_event_buffer = NULL;
- }
-
if (hpios_locked_mem_valid(&phw->h_control_cache)) {
hpios_locked_mem_free(&phw->h_control_cache);
hpi_free_control_cache(phw->p_cache);
@@ -776,13 +709,15 @@ static void delete_adapter_obj(struct hpi_adapter_obj *pao)
phw->outstream_host_buffer_size[i] = 0;
}
- hpios_locked_mem_unprepare(pao->pci.p_os_data);
+ hpios_locked_mem_unprepare(pao->pci.pci_dev);
- hpi_delete_adapter(pao);
kfree(phw);
}
/*****************************************************************************/
+/* Adapter functions */
+
+/*****************************************************************************/
/* OutStream Host buffer functions */
/** Allocate or attach buffer for busmastering
@@ -824,7 +759,7 @@ static void outstream_host_buffer_allocate(struct hpi_adapter_obj *pao,
err = hpios_locked_mem_alloc(&phw->outstream_host_buffers
[phm->obj_index], phm->u.d.u.buffer.buffer_size,
- pao->pci.p_os_data);
+ pao->pci.pci_dev);
if (err) {
phr->error = HPI_ERROR_INVALID_DATASIZE;
@@ -861,7 +796,7 @@ static void outstream_host_buffer_allocate(struct hpi_adapter_obj *pao,
if (phm->u.d.u.buffer.buffer_size & (phm->u.d.u.buffer.
buffer_size - 1)) {
HPI_DEBUG_LOG(ERROR,
- "buffer size must be 2^N not %d\n",
+ "Buffer size must be 2^N not %d\n",
phm->u.d.u.buffer.buffer_size);
phr->error = HPI_ERROR_INVALID_DATASIZE;
return;
@@ -875,6 +810,7 @@ static void outstream_host_buffer_allocate(struct hpi_adapter_obj *pao,
status->dSP_index = 0;
status->host_index = status->dSP_index;
status->size_in_bytes = phm->u.d.u.buffer.buffer_size;
+ status->auxiliary_data_available = 0;
hw_message(pao, phm, phr);
@@ -966,51 +902,6 @@ static void outstream_write(struct hpi_adapter_obj *pao,
hpi_init_response(phr, phm->object, phm->function, 0);
status = &interface->outstream_host_buffer_status[phm->obj_index];
- if (phw->flag_outstream_just_reset[phm->obj_index]) {
- /* First OutStremWrite() call following reset will write data to the
- adapter's buffers, reducing delay before stream can start. The DSP
- takes care of setting the stream data format using format information
- embedded in phm.
- */
- int partial_write = 0;
- unsigned int original_size = 0;
-
- phw->flag_outstream_just_reset[phm->obj_index] = 0;
-
- /* Send the first buffer to the DSP the old way. */
- /* Limit size of first transfer - */
- /* expect that this will not usually be triggered. */
- if (phm->u.d.u.data.data_size > HPI6205_SIZEOF_DATA) {
- partial_write = 1;
- original_size = phm->u.d.u.data.data_size;
- phm->u.d.u.data.data_size = HPI6205_SIZEOF_DATA;
- }
- /* write it */
- phm->function = HPI_OSTREAM_WRITE;
- hw_message(pao, phm, phr);
-
- if (phr->error)
- return;
-
- /* update status information that the DSP would typically
- * update (and will update next time the DSP
- * buffer update task reads data from the host BBM buffer)
- */
- status->auxiliary_data_available = phm->u.d.u.data.data_size;
- status->host_index += phm->u.d.u.data.data_size;
- status->dSP_index += phm->u.d.u.data.data_size;
-
- /* if we did a full write, we can return from here. */
- if (!partial_write)
- return;
-
- /* tweak buffer parameters and let the rest of the */
- /* buffer land in internal BBM buffer */
- phm->u.d.u.data.data_size =
- original_size - HPI6205_SIZEOF_DATA;
- phm->u.d.u.data.pb_data += HPI6205_SIZEOF_DATA;
- }
-
space_available = outstream_get_space_available(status);
if (space_available < phm->u.d.u.data.data_size) {
phr->error = HPI_ERROR_INVALID_DATASIZE;
@@ -1047,6 +938,24 @@ static void outstream_write(struct hpi_adapter_obj *pao,
memcpy(p_bbm_data, p_app_data + l_first_write,
phm->u.d.u.data.data_size - l_first_write);
}
+
+ /*
+ * This version relies on the DSP code triggering an OStream buffer
+ * update immediately following a SET_FORMAT call. The host has
+ * already written data into the BBM buffer, but the DSP won't know
+ * about it until dwHostIndex is adjusted.
+ */
+ if (phw->flag_outstream_just_reset[phm->obj_index]) {
+ /* Format can only change after reset. Must tell DSP. */
+ u16 function = phm->function;
+ phw->flag_outstream_just_reset[phm->obj_index] = 0;
+ phm->function = HPI_OSTREAM_SET_FORMAT;
+ hw_message(pao, phm, phr); /* send the format to the DSP */
+ phm->function = function;
+ if (phr->error)
+ return;
+ }
+
status->host_index += phm->u.d.u.data.data_size;
}
@@ -1132,7 +1041,7 @@ static void instream_host_buffer_allocate(struct hpi_adapter_obj *pao,
err = hpios_locked_mem_alloc(&phw->instream_host_buffers[phm->
obj_index], phm->u.d.u.buffer.buffer_size,
- pao->pci.p_os_data);
+ pao->pci.pci_dev);
if (err) {
phr->error = HPI_ERROR_INVALID_DATASIZE;
@@ -1163,7 +1072,7 @@ static void instream_host_buffer_allocate(struct hpi_adapter_obj *pao,
if (phm->u.d.u.buffer.buffer_size & (phm->u.d.u.buffer.
buffer_size - 1)) {
HPI_DEBUG_LOG(ERROR,
- "buffer size must be 2^N not %d\n",
+ "Buffer size must be 2^N not %d\n",
phm->u.d.u.buffer.buffer_size);
phr->error = HPI_ERROR_INVALID_DATASIZE;
return;
@@ -1178,8 +1087,10 @@ static void instream_host_buffer_allocate(struct hpi_adapter_obj *pao,
status->dSP_index = 0;
status->host_index = status->dSP_index;
status->size_in_bytes = phm->u.d.u.buffer.buffer_size;
+ status->auxiliary_data_available = 0;
hw_message(pao, phm, phr);
+
if (phr->error
&& hpios_locked_mem_valid(&phw->
instream_host_buffers[phm->obj_index])) {
@@ -1344,33 +1255,36 @@ static u16 adapter_boot_load_dsp(struct hpi_adapter_obj *pao,
struct hpi_hw_obj *phw = pao->priv;
struct dsp_code dsp_code;
u16 boot_code_id[HPI6205_MAX_FILES_TO_LOAD];
- u16 firmware_id = pao->pci.subsys_device_id;
u32 temp;
int dsp = 0, i = 0;
u16 err = 0;
boot_code_id[0] = HPI_ADAPTER_ASI(0x6205);
- /* special cases where firmware_id != subsys ID */
- switch (firmware_id) {
+ boot_code_id[1] = pao->pci.pci_dev->subsystem_device;
+ boot_code_id[1] = HPI_ADAPTER_FAMILY_ASI(boot_code_id[1]);
+
+ /* fix up cases where bootcode id[1] != subsys id */
+ switch (boot_code_id[1]) {
case HPI_ADAPTER_FAMILY_ASI(0x5000):
- boot_code_id[0] = firmware_id;
- firmware_id = 0;
+ boot_code_id[0] = boot_code_id[1];
+ boot_code_id[1] = 0;
break;
case HPI_ADAPTER_FAMILY_ASI(0x5300):
case HPI_ADAPTER_FAMILY_ASI(0x5400):
case HPI_ADAPTER_FAMILY_ASI(0x6300):
- firmware_id = HPI_ADAPTER_FAMILY_ASI(0x6400);
+ boot_code_id[1] = HPI_ADAPTER_FAMILY_ASI(0x6400);
break;
case HPI_ADAPTER_FAMILY_ASI(0x5600):
case HPI_ADAPTER_FAMILY_ASI(0x6500):
- firmware_id = HPI_ADAPTER_FAMILY_ASI(0x6600);
+ boot_code_id[1] = HPI_ADAPTER_FAMILY_ASI(0x6600);
break;
case HPI_ADAPTER_FAMILY_ASI(0x8800):
- firmware_id = HPI_ADAPTER_FAMILY_ASI(0x8900);
+ boot_code_id[1] = HPI_ADAPTER_FAMILY_ASI(0x8900);
+ break;
+ default:
break;
}
- boot_code_id[1] = firmware_id;
/* reset DSP by writing a 1 to the WARMRESET bit */
temp = C6205_HDCR_WARMRESET;
@@ -1381,7 +1295,7 @@ static u16 adapter_boot_load_dsp(struct hpi_adapter_obj *pao,
temp = ioread32(phw->prHSR);
if ((temp & (C6205_HSR_CFGERR | C6205_HSR_EEREAD)) !=
C6205_HSR_EEREAD)
- return hpi6205_error(0, HPI6205_ERROR_6205_EEPROM);
+ return HPI6205_ERROR_6205_EEPROM;
temp |= 0x04;
/* disable PINTA interrupt */
iowrite32(temp, phw->prHSR);
@@ -1389,27 +1303,27 @@ static u16 adapter_boot_load_dsp(struct hpi_adapter_obj *pao,
/* check control register reports PCI boot mode */
temp = ioread32(phw->prHDCR);
if (!(temp & C6205_HDCR_PCIBOOT))
- return hpi6205_error(0, HPI6205_ERROR_6205_REG);
+ return HPI6205_ERROR_6205_REG;
- /* try writing a couple of numbers to the DSP page register */
+ /* try writing a few numbers to the DSP page register */
/* and reading them back. */
- temp = 1;
+ temp = 3;
iowrite32(temp, phw->prDSPP);
if ((temp | C6205_DSPP_MAP1) != ioread32(phw->prDSPP))
- return hpi6205_error(0, HPI6205_ERROR_6205_DSPPAGE);
+ return HPI6205_ERROR_6205_DSPPAGE;
temp = 2;
iowrite32(temp, phw->prDSPP);
if ((temp | C6205_DSPP_MAP1) != ioread32(phw->prDSPP))
- return hpi6205_error(0, HPI6205_ERROR_6205_DSPPAGE);
- temp = 3;
+ return HPI6205_ERROR_6205_DSPPAGE;
+ temp = 1;
iowrite32(temp, phw->prDSPP);
if ((temp | C6205_DSPP_MAP1) != ioread32(phw->prDSPP))
- return hpi6205_error(0, HPI6205_ERROR_6205_DSPPAGE);
+ return HPI6205_ERROR_6205_DSPPAGE;
/* reset DSP page to the correct number */
temp = 0;
iowrite32(temp, phw->prDSPP);
if ((temp | C6205_DSPP_MAP1) != ioread32(phw->prDSPP))
- return hpi6205_error(0, HPI6205_ERROR_6205_DSPPAGE);
+ return HPI6205_ERROR_6205_DSPPAGE;
phw->dsp_page = 0;
/* release 6713 from reset before 6205 is bootloaded.
@@ -1455,7 +1369,7 @@ static u16 adapter_boot_load_dsp(struct hpi_adapter_obj *pao,
return err;
/* write the DSP code down into the DSPs memory */
- dsp_code.ps_dev = pao->pci.p_os_data;
+ dsp_code.ps_dev = pao->pci.pci_dev;
err = hpi_dsp_code_open(boot_code_id[dsp], &dsp_code,
pos_error_code);
if (err)
@@ -1484,10 +1398,8 @@ static u16 adapter_boot_load_dsp(struct hpi_adapter_obj *pao,
if (err)
break;
for (i = 0; i < (int)length; i++) {
- err = boot_loader_write_mem32(pao, dsp,
- address, *pcode);
- if (err)
- break;
+ boot_loader_write_mem32(pao, dsp, address,
+ *pcode);
/* dummy read every 4 words */
/* for 6205 advisory 1.4.4 */
if (i % 4 == 0)
@@ -1561,7 +1473,7 @@ static u16 adapter_boot_load_dsp(struct hpi_adapter_obj *pao,
host_mailbox_address_on_dsp = 0x80000000;
while ((physicalPC_iaddress != physicalPC_iaddress_verify)
&& time_out--) {
- err = boot_loader_write_mem32(pao, 0,
+ boot_loader_write_mem32(pao, 0,
host_mailbox_address_on_dsp,
physicalPC_iaddress);
physicalPC_iaddress_verify =
@@ -1631,11 +1543,10 @@ static u32 boot_loader_read_mem32(struct hpi_adapter_obj *pao, int dsp_index,
return data;
}
-static u16 boot_loader_write_mem32(struct hpi_adapter_obj *pao, int dsp_index,
- u32 address, u32 data)
+static void boot_loader_write_mem32(struct hpi_adapter_obj *pao,
+ int dsp_index, u32 address, u32 data)
{
struct hpi_hw_obj *phw = pao->priv;
- u16 err = 0;
__iomem u32 *p_data;
/* u32 dwVerifyData=0; */
@@ -1675,15 +1586,11 @@ static u16 boot_loader_write_mem32(struct hpi_adapter_obj *pao, int dsp_index,
/* dummy read every 4 words for 6205 advisory 1.4.4 */
boot_loader_read_mem32(pao, 0, 0);
- } else
- err = hpi6205_error(dsp_index, HPI6205_ERROR_BAD_DSPINDEX);
- return err;
+ }
}
static u16 boot_loader_config_emif(struct hpi_adapter_obj *pao, int dsp_index)
{
- u16 err = 0;
-
if (dsp_index == 0) {
u32 setting;
@@ -1711,8 +1618,7 @@ static u16 boot_loader_config_emif(struct hpi_adapter_obj *pao, int dsp_index)
boot_loader_write_mem32(pao, dsp_index, 0x01800008, setting);
if (setting != boot_loader_read_mem32(pao, dsp_index,
0x01800008))
- return hpi6205_error(dsp_index,
- HPI6205_ERROR_DSP_EMIF);
+ return HPI6205_ERROR_DSP_EMIF;
/* EMIF CE1 setup - 32 bit async. This is 6713 #1 HPI, */
/* which occupies D15..0. 6713 starts at 27MHz, so need */
@@ -1725,8 +1631,7 @@ static u16 boot_loader_config_emif(struct hpi_adapter_obj *pao, int dsp_index)
boot_loader_write_mem32(pao, dsp_index, 0x01800004, setting);
if (setting != boot_loader_read_mem32(pao, dsp_index,
0x01800004))
- return hpi6205_error(dsp_index,
- HPI6205_ERROR_DSP_EMIF);
+ return HPI6205_ERROR_DSP_EMIF;
/* EMIF CE2 setup - 32 bit async. This is 6713 #2 HPI, */
/* which occupies D15..0. 6713 starts at 27MHz, so need */
@@ -1738,8 +1643,7 @@ static u16 boot_loader_config_emif(struct hpi_adapter_obj *pao, int dsp_index)
boot_loader_write_mem32(pao, dsp_index, 0x01800010, setting);
if (setting != boot_loader_read_mem32(pao, dsp_index,
0x01800010))
- return hpi6205_error(dsp_index,
- HPI6205_ERROR_DSP_EMIF);
+ return HPI6205_ERROR_DSP_EMIF;
/* EMIF CE3 setup - 32 bit async. */
/* This is the PLD on the ASI5000 cards only */
@@ -1750,8 +1654,7 @@ static u16 boot_loader_config_emif(struct hpi_adapter_obj *pao, int dsp_index)
boot_loader_write_mem32(pao, dsp_index, 0x01800014, setting);
if (setting != boot_loader_read_mem32(pao, dsp_index,
0x01800014))
- return hpi6205_error(dsp_index,
- HPI6205_ERROR_DSP_EMIF);
+ return HPI6205_ERROR_DSP_EMIF;
/* set EMIF SDRAM control for 2Mx32 SDRAM (512x32x4 bank) */
/* need to use this else DSP code crashes? */
@@ -1775,12 +1678,9 @@ static u16 boot_loader_config_emif(struct hpi_adapter_obj *pao, int dsp_index)
read_data =
0xFFF7 & boot_loader_read_mem32(pao, 0, HPICL_ADDR);
if (write_data != read_data) {
- err = hpi6205_error(dsp_index,
- HPI6205_ERROR_C6713_HPIC);
HPI_DEBUG_LOG(ERROR, "HPICL %x %x\n", write_data,
read_data);
-
- return err;
+ return HPI6205_ERROR_C6713_HPIC;
}
/* HPIA - walking ones test */
write_data = 1;
@@ -1798,11 +1698,9 @@ static u16 boot_loader_config_emif(struct hpi_adapter_obj *pao, int dsp_index)
HPIAH_ADDR))
<< 16);
if (read_data != write_data) {
- err = hpi6205_error(dsp_index,
- HPI6205_ERROR_C6713_HPIA);
HPI_DEBUG_LOG(ERROR, "HPIA %x %x\n",
write_data, read_data);
- return err;
+ return HPI6205_ERROR_C6713_HPIA;
}
write_data = write_data << 1;
}
@@ -1847,30 +1745,81 @@ static u16 boot_loader_config_emif(struct hpi_adapter_obj *pao, int dsp_index)
/* PLL should not be bypassed! */
if ((boot_loader_read_mem32(pao, dsp_index, 0x01B7C100) & 0xF)
!= 0x0001) {
- err = hpi6205_error(dsp_index,
- HPI6205_ERROR_C6713_PLL);
- return err;
+ return HPI6205_ERROR_C6713_PLL;
}
/* setup C67x EMIF (note this is the only use of
BAR1 via BootLoader_WriteMem32) */
boot_loader_write_mem32(pao, dsp_index, C6713_EMIF_GCTL,
0x000034A8);
+
+ /* EMIF CE0 setup - 2Mx32 Sync DRAM
+ 31..28 Wr setup
+ 27..22 Wr strobe
+ 21..20 Wr hold
+ 19..16 Rd setup
+ 15..14 -
+ 13..8 Rd strobe
+ 7..4 MTYPE 0011 Sync DRAM 32bits
+ 3 Wr hold MSB
+ 2..0 Rd hold
+ */
boot_loader_write_mem32(pao, dsp_index, C6713_EMIF_CE0,
0x00000030);
+
+ /* EMIF SDRAM Extension
+ 0x00
+ 31-21 0000b 0000b 000b
+ 20 WR2RD = 2cycles-1 = 1b
+
+ 19-18 WR2DEAC = 3cycle-1 = 10b
+ 17 WR2WR = 2cycle-1 = 1b
+ 16-15 R2WDQM = 4cycle-1 = 11b
+ 14-12 RD2WR = 6cycles-1 = 101b
+
+ 11-10 RD2DEAC = 4cycle-1 = 11b
+ 9 RD2RD = 2cycle-1 = 1b
+ 8-7 THZP = 3cycle-1 = 10b
+ 6-5 TWR = 2cycle-1 = 01b (tWR = 17ns)
+ 4 TRRD = 2cycle = 0b (tRRD = 14ns)
+ 3-1 TRAS = 5cycle-1 = 100b (Tras=42ns)
+ 1 CAS latency = 3cyc = 1b
+ (for Micron 2M32-7 operating at 100MHz)
+ */
boot_loader_write_mem32(pao, dsp_index, C6713_EMIF_SDRAMEXT,
0x001BDF29);
+
+ /* EMIF SDRAM control - set up for a 2Mx32 SDRAM (512x32x4 bank)
+ 31 - 0b -
+ 30 SDBSZ 1b 4 bank
+ 29..28 SDRSZ 00b 11 row address pins
+
+ 27..26 SDCSZ 01b 8 column address pins
+ 25 RFEN 1b refersh enabled
+ 24 INIT 1b init SDRAM!
+
+ 23..20 TRCD 0001b (Trcd/Tcyc)-1 = (20/10)-1 = 1
+
+ 19..16 TRP 0001b (Trp/Tcyc)-1 = (20/10)-1 = 1
+
+ 15..12 TRC 0110b (Trc/Tcyc)-1 = (70/10)-1 = 6
+
+ 11..0 - 0000b 0000b 0000b
+ */
boot_loader_write_mem32(pao, dsp_index, C6713_EMIF_SDRAMCTL,
- 0x47117000);
+ 0x47116000);
+
+ /* SDRAM refresh timing
+ Need 4,096 refresh cycles every 64ms = 15.625us = 1562cycles of 100MHz = 0x61A
+ */
boot_loader_write_mem32(pao, dsp_index,
C6713_EMIF_SDRAMTIMING, 0x00000410);
hpios_delay_micro_seconds(1000);
} else if (dsp_index == 2) {
/* DSP 2 is a C6713 */
+ }
- } else
- err = hpi6205_error(dsp_index, HPI6205_ERROR_BAD_DSPINDEX);
- return err;
+ return 0;
}
static u16 boot_loader_test_memory(struct hpi_adapter_obj *pao, int dsp_index,
@@ -1896,7 +1845,7 @@ static u16 boot_loader_test_memory(struct hpi_adapter_obj *pao, int dsp_index,
test_addr);
if (data != test_data) {
HPI_DEBUG_LOG(VERBOSE,
- "memtest error details "
+ "Memtest error details "
"%08x %08x %08x %i\n", test_addr,
test_data, data, dsp_index);
return 1; /* error */
@@ -1916,7 +1865,7 @@ static u16 boot_loader_test_memory(struct hpi_adapter_obj *pao, int dsp_index,
data = boot_loader_read_mem32(pao, dsp_index, test_addr);
if (data != test_data) {
HPI_DEBUG_LOG(VERBOSE,
- "memtest error details "
+ "Memtest error details "
"%08x %08x %08x %i\n", test_addr, test_data,
data, dsp_index);
return 1; /* error */
@@ -1946,8 +1895,8 @@ static u16 boot_loader_test_internal_memory(struct hpi_adapter_obj *pao,
/* 64K data mem */
err = boot_loader_test_memory(pao, dsp_index,
0x80000000, 0x10000);
- } else if ((dsp_index == 1) || (dsp_index == 2)) {
- /* DSP 1&2 are a C6713 */
+ } else if (dsp_index == 1) {
+ /* DSP 1 is a C6713 */
/* 192K internal mem */
err = boot_loader_test_memory(pao, dsp_index, 0x00000000,
0x30000);
@@ -1955,11 +1904,10 @@ static u16 boot_loader_test_internal_memory(struct hpi_adapter_obj *pao,
/* 64K internal mem / L2 cache */
err = boot_loader_test_memory(pao, dsp_index,
0x00030000, 0x10000);
- } else
- return hpi6205_error(dsp_index, HPI6205_ERROR_BAD_DSPINDEX);
+ }
if (err)
- return hpi6205_error(dsp_index, HPI6205_ERROR_DSP_INTMEM);
+ return HPI6205_ERROR_DSP_INTMEM;
else
return 0;
}
@@ -1972,24 +1920,23 @@ static u16 boot_loader_test_external_memory(struct hpi_adapter_obj *pao,
if (dsp_index == 0) {
/* only test for SDRAM if an ASI5000 card */
- if (pao->pci.subsys_device_id == 0x5000) {
+ if (pao->pci.pci_dev->subsystem_device == 0x5000) {
/* DSP 0 is always C6205 */
dRAM_start_address = 0x00400000;
dRAM_size = 0x200000;
/*dwDRAMinc=1024; */
} else
return 0;
- } else if ((dsp_index == 1) || (dsp_index == 2)) {
+ } else if (dsp_index == 1) {
/* DSP 1 is a C6713 */
dRAM_start_address = 0x80000000;
dRAM_size = 0x200000;
/*dwDRAMinc=1024; */
- } else
- return hpi6205_error(dsp_index, HPI6205_ERROR_BAD_DSPINDEX);
+ }
if (boot_loader_test_memory(pao, dsp_index, dRAM_start_address,
dRAM_size))
- return hpi6205_error(dsp_index, HPI6205_ERROR_DSP_EXTMEM);
+ return HPI6205_ERROR_DSP_EXTMEM;
return 0;
}
@@ -1998,28 +1945,25 @@ static u16 boot_loader_test_pld(struct hpi_adapter_obj *pao, int dsp_index)
u32 data = 0;
if (dsp_index == 0) {
/* only test for DSP0 PLD on ASI5000 card */
- if (pao->pci.subsys_device_id == 0x5000) {
+ if (pao->pci.pci_dev->subsystem_device == 0x5000) {
/* PLD is located at CE3=0x03000000 */
data = boot_loader_read_mem32(pao, dsp_index,
0x03000008);
if ((data & 0xF) != 0x5)
- return hpi6205_error(dsp_index,
- HPI6205_ERROR_DSP_PLD);
+ return HPI6205_ERROR_DSP_PLD;
data = boot_loader_read_mem32(pao, dsp_index,
0x0300000C);
if ((data & 0xF) != 0xA)
- return hpi6205_error(dsp_index,
- HPI6205_ERROR_DSP_PLD);
+ return HPI6205_ERROR_DSP_PLD;
}
} else if (dsp_index == 1) {
/* DSP 1 is a C6713 */
- if (pao->pci.subsys_device_id == 0x8700) {
+ if (pao->pci.pci_dev->subsystem_device == 0x8700) {
/* PLD is located at CE1=0x90000000 */
data = boot_loader_read_mem32(pao, dsp_index,
0x90000010);
if ((data & 0xFF) != 0xAA)
- return hpi6205_error(dsp_index,
- HPI6205_ERROR_DSP_PLD);
+ return HPI6205_ERROR_DSP_PLD;
/* 8713 - LED on */
boot_loader_write_mem32(pao, dsp_index, 0x90000000,
0x02);
@@ -2037,14 +1981,11 @@ static short hpi6205_transfer_data(struct hpi_adapter_obj *pao, u8 *p_data,
struct hpi_hw_obj *phw = pao->priv;
u32 data_transferred = 0;
u16 err = 0;
-#ifndef HPI6205_NO_HSR_POLL
- u32 time_out;
-#endif
u32 temp2;
struct bus_master_interface *interface = phw->p_interface_buffer;
if (!p_data)
- return HPI_ERROR_INVALID_DATA_TRANSFER;
+ return HPI_ERROR_INVALID_DATA_POINTER;
data_size &= ~3L; /* round data_size down to nearest 4 bytes */
@@ -2064,14 +2005,10 @@ static short hpi6205_transfer_data(struct hpi_adapter_obj *pao, u8 *p_data,
interface->transfer_size_in_bytes = this_copy;
-#ifdef HPI6205_NO_HSR_POLL
/* DSP must change this back to nOperation */
interface->dsp_ack = H620_HIF_IDLE;
-#endif
-
send_dsp_command(phw, operation);
-#ifdef HPI6205_NO_HSR_POLL
temp2 = wait_dsp_ack(phw, operation, HPI6205_TIMEOUT);
HPI_DEBUG_LOG(DEBUG, "spun %d times for data xfer of %d\n",
HPI6205_TIMEOUT - temp2, this_copy);
@@ -2079,45 +2016,11 @@ static short hpi6205_transfer_data(struct hpi_adapter_obj *pao, u8 *p_data,
if (!temp2) {
/* timed out */
HPI_DEBUG_LOG(ERROR,
- "timed out waiting for " "state %d got %d\n",
+ "Timed out waiting for " "state %d got %d\n",
operation, interface->dsp_ack);
break;
}
-#else
- /* spin waiting on the result */
- time_out = HPI6205_TIMEOUT;
- temp2 = 0;
- while ((temp2 == 0) && time_out--) {
- /* give 16k bus mastering transfer time to happen */
- /*(16k / 132Mbytes/s = 122usec) */
- hpios_delay_micro_seconds(20);
- temp2 = ioread32(phw->prHSR);
- temp2 &= C6205_HSR_INTSRC;
- }
- HPI_DEBUG_LOG(DEBUG, "spun %d times for data xfer of %d\n",
- HPI6205_TIMEOUT - time_out, this_copy);
- if (temp2 == C6205_HSR_INTSRC) {
- HPI_DEBUG_LOG(VERBOSE,
- "interrupt from HIF <data> OK\n");
- /*
- if(interface->dwDspAck != nOperation) {
- HPI_DEBUG_LOG(DEBUG("interface->dwDspAck=%d,
- expected %d \n",
- interface->dwDspAck,nOperation);
- }
- */
- }
-/* need to handle this differently... */
- else {
- HPI_DEBUG_LOG(ERROR,
- "interrupt from HIF <data> BAD\n");
- err = HPI_ERROR_DSP_HARDWARE;
- }
-
- /* reset the interrupt from the DSP */
- iowrite32(C6205_HSR_INTSRC, phw->prHSR);
-#endif
if (operation == H620_HIF_GET_DATA)
memcpy(&p_data[data_transferred],
(void *)&interface->u.b_data[0], this_copy);
@@ -2174,31 +2077,39 @@ static unsigned int message_count;
static u16 message_response_sequence(struct hpi_adapter_obj *pao,
struct hpi_message *phm, struct hpi_response *phr)
{
-#ifndef HPI6205_NO_HSR_POLL
- u32 temp2;
-#endif
u32 time_out, time_out2;
struct hpi_hw_obj *phw = pao->priv;
struct bus_master_interface *interface = phw->p_interface_buffer;
u16 err = 0;
message_count++;
+ if (phm->size > sizeof(interface->u)) {
+ phr->error = HPI_ERROR_MESSAGE_BUFFER_TOO_SMALL;
+ phr->specific_error = sizeof(interface->u);
+ phr->size = sizeof(struct hpi_response_header);
+ HPI_DEBUG_LOG(ERROR,
+ "message len %d too big for buffer %ld \n", phm->size,
+ sizeof(interface->u));
+ return 0;
+ }
+
/* Assume buffer of type struct bus_master_interface
is allocated "noncacheable" */
if (!wait_dsp_ack(phw, H620_HIF_IDLE, HPI6205_TIMEOUT)) {
HPI_DEBUG_LOG(DEBUG, "timeout waiting for idle\n");
- return hpi6205_error(0, HPI6205_ERROR_MSG_RESP_IDLE_TIMEOUT);
+ return HPI6205_ERROR_MSG_RESP_IDLE_TIMEOUT;
}
- interface->u.message_buffer = *phm;
+
+ memcpy(&interface->u.message_buffer, phm, phm->size);
/* signal we want a response */
send_dsp_command(phw, H620_HIF_GET_RESP);
time_out2 = wait_dsp_ack(phw, H620_HIF_GET_RESP, HPI6205_TIMEOUT);
- if (time_out2 == 0) {
+ if (!time_out2) {
HPI_DEBUG_LOG(ERROR,
- "(%u) timed out waiting for " "GET_RESP state [%x]\n",
+ "(%u) Timed out waiting for " "GET_RESP state [%x]\n",
message_count, interface->dsp_ack);
} else {
HPI_DEBUG_LOG(VERBOSE,
@@ -2208,58 +2119,38 @@ static u16 message_response_sequence(struct hpi_adapter_obj *pao,
/* spin waiting on HIF interrupt flag (end of msg process) */
time_out = HPI6205_TIMEOUT;
-#ifndef HPI6205_NO_HSR_POLL
- temp2 = 0;
- while ((temp2 == 0) && --time_out) {
- temp2 = ioread32(phw->prHSR);
- temp2 &= C6205_HSR_INTSRC;
- hpios_delay_micro_seconds(1);
- }
- if (temp2 == C6205_HSR_INTSRC) {
- rmb(); /* ensure we see latest value for dsp_ack */
- if ((interface->dsp_ack != H620_HIF_GET_RESP)) {
- HPI_DEBUG_LOG(DEBUG,
- "(%u)interface->dsp_ack(0x%x) != "
- "H620_HIF_GET_RESP, t=%u\n", message_count,
- interface->dsp_ack,
- HPI6205_TIMEOUT - time_out);
- } else {
- HPI_DEBUG_LOG(VERBOSE,
- "(%u)int with GET_RESP after %u\n",
- message_count, HPI6205_TIMEOUT - time_out);
+ /* read the result */
+ if (time_out) {
+ if (interface->u.response_buffer.size <= phr->size)
+ memcpy(phr, &interface->u.response_buffer,
+ interface->u.response_buffer.size);
+ else {
+ HPI_DEBUG_LOG(ERROR,
+ "response len %d too big for buffer %d\n",
+ interface->u.response_buffer.size, phr->size);
+ memcpy(phr, &interface->u.response_buffer,
+ sizeof(struct hpi_response_header));
+ phr->error = HPI_ERROR_RESPONSE_BUFFER_TOO_SMALL;
+ phr->specific_error =
+ interface->u.response_buffer.size;
+ phr->size = sizeof(struct hpi_response_header);
}
-
- } else {
- /* can we do anything else in response to the error ? */
- HPI_DEBUG_LOG(ERROR,
- "interrupt from HIF module BAD (function %x)\n",
- phm->function);
}
-
- /* reset the interrupt from the DSP */
- iowrite32(C6205_HSR_INTSRC, phw->prHSR);
-#endif
-
- /* read the result */
- if (time_out != 0)
- *phr = interface->u.response_buffer;
-
/* set interface back to idle */
send_dsp_command(phw, H620_HIF_IDLE);
- if ((time_out == 0) || (time_out2 == 0)) {
+ if (!time_out || !time_out2) {
HPI_DEBUG_LOG(DEBUG, "something timed out!\n");
- return hpi6205_error(0, HPI6205_ERROR_MSG_RESP_TIMEOUT);
+ return HPI6205_ERROR_MSG_RESP_TIMEOUT;
}
/* special case for adapter close - */
/* wait for the DSP to indicate it is idle */
if (phm->function == HPI_ADAPTER_CLOSE) {
if (!wait_dsp_ack(phw, H620_HIF_IDLE, HPI6205_TIMEOUT)) {
HPI_DEBUG_LOG(DEBUG,
- "timeout waiting for idle "
+ "Timeout waiting for idle "
"(on adapter_close)\n");
- return hpi6205_error(0,
- HPI6205_ERROR_MSG_RESP_IDLE_TIMEOUT);
+ return HPI6205_ERROR_MSG_RESP_IDLE_TIMEOUT;
}
}
err = hpi_validate_response(phm, phr);
@@ -2279,7 +2170,13 @@ static void hw_message(struct hpi_adapter_obj *pao, struct hpi_message *phm,
/* maybe an error response */
if (err) {
/* something failed in the HPI/DSP interface */
- phr->error = err;
+ if (err >= HPI_ERROR_BACKEND_BASE) {
+ phr->error = HPI_ERROR_DSP_COMMUNICATION;
+ phr->specific_error = err;
+ } else {
+ phr->error = err;
+ }
+
pao->dsp_crashed++;
/* just the header of the response is valid */
diff --git a/sound/pci/asihpi/hpi6205.h b/sound/pci/asihpi/hpi6205.h
index 1adae0857cda..df2f02c0c7b4 100644
--- a/sound/pci/asihpi/hpi6205.h
+++ b/sound/pci/asihpi/hpi6205.h
@@ -25,9 +25,6 @@ Copyright AudioScience, Inc., 2003
#ifndef _HPI6205_H_
#define _HPI6205_H_
-/* transitional conditional compile shared between host and DSP */
-/* #define HPI6205_NO_HSR_POLL */
-
#include "hpi_internal.h"
/***********************************************************
@@ -78,8 +75,8 @@ struct bus_master_interface {
u32 dsp_ack;
u32 transfer_size_in_bytes;
union {
- struct hpi_message message_buffer;
- struct hpi_response response_buffer;
+ struct hpi_message_header message_buffer;
+ struct hpi_response_header response_buffer;
u8 b_data[HPI6205_SIZEOF_DATA];
} u;
struct controlcache_6205 control_cache;
diff --git a/sound/pci/asihpi/hpi_internal.h b/sound/pci/asihpi/hpi_internal.h
index 16f502d459de..af678be0aa15 100644
--- a/sound/pci/asihpi/hpi_internal.h
+++ b/sound/pci/asihpi/hpi_internal.h
@@ -28,7 +28,7 @@ HPI internal definitions
/** maximum number of memory regions mapped to an adapter */
#define HPI_MAX_ADAPTER_MEM_SPACES (2)
-/* Each OS needs its own hpios.h, or specific define as above */
+/* Each OS needs its own hpios.h */
#include "hpios.h"
/* physical memory allocation */
@@ -49,7 +49,7 @@ HpiOs_LockedMem_GetPyhsAddr() will always succed on the returned handle.
*/
u16 hpios_locked_mem_alloc(struct consistent_dma_area *p_locked_mem_handle,
/**< memory handle */
- u32 size, /**< size in bytes to allocate */
+ u32 size, /**< Size in bytes to allocate */
struct pci_dev *p_os_reference
/**< OS specific data required for memory allocation */
);
@@ -96,41 +96,6 @@ typedef void hpi_handler_func(struct hpi_message *, struct hpi_response *);
#define compile_time_assert(cond, msg) \
typedef char ASSERT_##msg[(cond) ? 1 : -1]
-/*/////////////////////////////////////////////////////////////////////////// */
-/* Private HPI Entity related definitions */
-
-#define STR_SIZE_FIELD_MAX 65535U
-#define STR_TYPE_FIELD_MAX 255U
-#define STR_ROLE_FIELD_MAX 255U
-
-struct hpi_entity_str {
- u16 size;
- u8 type;
- u8 role;
-};
-
-#if defined(_MSC_VER)
-#pragma warning(push)
-#pragma warning(disable : 4200)
-#endif
-
-struct hpi_entity {
- struct hpi_entity_str header;
-#if ! defined(HPI_OS_DSP_C6000) || (defined(HPI_OS_DSP_C6000) && (__TI_COMPILER_VERSION__ > 6000008))
- /* DSP C6000 compiler v6.0.8 and lower
- do not support flexible array member */
- u8 value[];
-#else
- /* NOTE! Using sizeof(struct hpi_entity) will give erroneous results */
-#define HPI_INTERNAL_WARN_ABOUT_ENTITY_VALUE
- u8 value[1];
-#endif
-};
-
-#if defined(_MSC_VER)
-#pragma warning(pop)
-#endif
-
/******************************************* bus types */
enum HPI_BUSES {
HPI_BUS_ISAPNP = 1,
@@ -139,206 +104,155 @@ enum HPI_BUSES {
HPI_BUS_NET = 4
};
+enum HPI_SUBSYS_OPTIONS {
+ /* 0, 256 are invalid, 1..255 reserved for global options */
+ HPI_SUBSYS_OPT_NET_ENABLE = 257,
+ HPI_SUBSYS_OPT_NET_BROADCAST = 258,
+ HPI_SUBSYS_OPT_NET_UNICAST = 259,
+ HPI_SUBSYS_OPT_NET_ADDR = 260,
+ HPI_SUBSYS_OPT_NET_MASK = 261,
+ HPI_SUBSYS_OPT_NET_ADAPTER_ADDRESS_ADD = 262
+};
+
+/** Volume flags
+*/
+enum HPI_VOLUME_FLAGS {
+ /** Set if the volume control is muted */
+ HPI_VOLUME_FLAG_MUTED = (1 << 0),
+ /** Set if the volume control has a mute function */
+ HPI_VOLUME_FLAG_HAS_MUTE = (1 << 1),
+ /** Set if volume control can do autofading */
+ HPI_VOLUME_FLAG_HAS_AUTOFADE = (1 << 2)
+ /* Note Flags >= (1<<8) are for DSP internal use only */
+};
+
/******************************************* CONTROL ATTRIBUTES ****/
/* (in order of control type ID */
/* This allows for 255 control types, 256 unique attributes each */
-#define HPI_CTL_ATTR(ctl, ai) (HPI_CONTROL_##ctl * 0x100 + ai)
+#define HPI_CTL_ATTR(ctl, ai) ((HPI_CONTROL_##ctl << 8) + ai)
/* Get the sub-index of the attribute for a control type */
-#define HPI_CTL_ATTR_INDEX(i) (i&0xff)
+#define HPI_CTL_ATTR_INDEX(i) (i & 0xff)
/* Extract the control from the control attribute */
-#define HPI_CTL_ATTR_CONTROL(i) (i>>8)
-
-/* Generic control attributes. */
-
-/** Enable a control.
-0=disable, 1=enable
-\note generic to all mixer plugins?
-*/
-#define HPI_GENERIC_ENABLE HPI_CTL_ATTR(GENERIC, 1)
+#define HPI_CTL_ATTR_CONTROL(i) (i >> 8)
/** Enable event generation for a control.
0=disable, 1=enable
\note generic to all controls that can generate events
*/
-#define HPI_GENERIC_EVENT_ENABLE HPI_CTL_ATTR(GENERIC, 2)
-
-/* Volume Control attributes */
-#define HPI_VOLUME_GAIN HPI_CTL_ATTR(VOLUME, 1)
-#define HPI_VOLUME_AUTOFADE HPI_CTL_ATTR(VOLUME, 2)
-
-/** For HPI_ControlQuery() to get the number of channels of a volume control*/
-#define HPI_VOLUME_NUM_CHANNELS HPI_CTL_ATTR(VOLUME, 6)
-#define HPI_VOLUME_RANGE HPI_CTL_ATTR(VOLUME, 10)
-
-/** Level Control attributes */
-#define HPI_LEVEL_GAIN HPI_CTL_ATTR(LEVEL, 1)
-#define HPI_LEVEL_RANGE HPI_CTL_ATTR(LEVEL, 10)
-
-/* Meter Control attributes */
-/** return RMS signal level */
-#define HPI_METER_RMS HPI_CTL_ATTR(METER, 1)
-/** return peak signal level */
-#define HPI_METER_PEAK HPI_CTL_ATTR(METER, 2)
-/** ballistics for ALL rms meters on adapter */
-#define HPI_METER_RMS_BALLISTICS HPI_CTL_ATTR(METER, 3)
-/** ballistics for ALL peak meters on adapter */
-#define HPI_METER_PEAK_BALLISTICS HPI_CTL_ATTR(METER, 4)
-
-/** For HPI_ControlQuery() to get the number of channels of a meter control*/
-#define HPI_METER_NUM_CHANNELS HPI_CTL_ATTR(METER, 5)
-
-/* Multiplexer control attributes */
-#define HPI_MULTIPLEXER_SOURCE HPI_CTL_ATTR(MULTIPLEXER, 1)
-#define HPI_MULTIPLEXER_QUERYSOURCE HPI_CTL_ATTR(MULTIPLEXER, 2)
-
-/** AES/EBU transmitter control attributes */
-/** AESEBU or SPDIF */
-#define HPI_AESEBUTX_FORMAT HPI_CTL_ATTR(AESEBUTX, 1)
-#define HPI_AESEBUTX_SAMPLERATE HPI_CTL_ATTR(AESEBUTX, 3)
-#define HPI_AESEBUTX_CHANNELSTATUS HPI_CTL_ATTR(AESEBUTX, 4)
-#define HPI_AESEBUTX_USERDATA HPI_CTL_ATTR(AESEBUTX, 5)
-
-/** AES/EBU receiver control attributes */
-#define HPI_AESEBURX_FORMAT HPI_CTL_ATTR(AESEBURX, 1)
-#define HPI_AESEBURX_ERRORSTATUS HPI_CTL_ATTR(AESEBURX, 2)
-#define HPI_AESEBURX_SAMPLERATE HPI_CTL_ATTR(AESEBURX, 3)
-#define HPI_AESEBURX_CHANNELSTATUS HPI_CTL_ATTR(AESEBURX, 4)
-#define HPI_AESEBURX_USERDATA HPI_CTL_ATTR(AESEBURX, 5)
-
-/** \defgroup tuner_defs Tuners
-\{
-*/
-/** \defgroup tuner_attrs Tuner control attributes
-\{
-*/
-#define HPI_TUNER_BAND HPI_CTL_ATTR(TUNER, 1)
-#define HPI_TUNER_FREQ HPI_CTL_ATTR(TUNER, 2)
-#define HPI_TUNER_LEVEL HPI_CTL_ATTR(TUNER, 3)
-#define HPI_TUNER_AUDIOMUTE HPI_CTL_ATTR(TUNER, 4)
-/* use TUNER_STATUS instead */
-#define HPI_TUNER_VIDEO_STATUS HPI_CTL_ATTR(TUNER, 5)
-#define HPI_TUNER_GAIN HPI_CTL_ATTR(TUNER, 6)
-#define HPI_TUNER_STATUS HPI_CTL_ATTR(TUNER, 7)
-#define HPI_TUNER_MODE HPI_CTL_ATTR(TUNER, 8)
-/** RDS data. */
-#define HPI_TUNER_RDS HPI_CTL_ATTR(TUNER, 9)
-/** Audio pre-emphasis. */
-#define HPI_TUNER_DEEMPHASIS HPI_CTL_ATTR(TUNER, 10)
-/** HD Radio tuner program control. */
-#define HPI_TUNER_PROGRAM HPI_CTL_ATTR(TUNER, 11)
-/** HD Radio tuner digital signal quality. */
-#define HPI_TUNER_HDRADIO_SIGNAL_QUALITY HPI_CTL_ATTR(TUNER, 12)
-/** HD Radio SDK firmware version. */
-#define HPI_TUNER_HDRADIO_SDK_VERSION HPI_CTL_ATTR(TUNER, 13)
-/** HD Radio DSP firmware version. */
-#define HPI_TUNER_HDRADIO_DSP_VERSION HPI_CTL_ATTR(TUNER, 14)
-/** HD Radio signal blend (force analog, or automatic). */
-#define HPI_TUNER_HDRADIO_BLEND HPI_CTL_ATTR(TUNER, 15)
-
-/** \} */
-
-/** \defgroup pads_attrs Tuner PADs control attributes
-\{
-*/
-/** The text string containing the station/channel combination. */
-#define HPI_PAD_CHANNEL_NAME HPI_CTL_ATTR(PAD, 1)
-/** The text string containing the artist. */
-#define HPI_PAD_ARTIST HPI_CTL_ATTR(PAD, 2)
-/** The text string containing the title. */
-#define HPI_PAD_TITLE HPI_CTL_ATTR(PAD, 3)
-/** The text string containing the comment. */
-#define HPI_PAD_COMMENT HPI_CTL_ATTR(PAD, 4)
-/** The integer containing the PTY code. */
-#define HPI_PAD_PROGRAM_TYPE HPI_CTL_ATTR(PAD, 5)
-/** The integer containing the program identification. */
-#define HPI_PAD_PROGRAM_ID HPI_CTL_ATTR(PAD, 6)
-/** The integer containing whether traffic information is supported.
-Contains either 1 or 0. */
-#define HPI_PAD_TA_SUPPORT HPI_CTL_ATTR(PAD, 7)
-/** The integer containing whether traffic announcement is in progress.
-Contains either 1 or 0. */
-#define HPI_PAD_TA_ACTIVE HPI_CTL_ATTR(PAD, 8)
-/** \} */
-/** \} */
-
-/* VOX control attributes */
-#define HPI_VOX_THRESHOLD HPI_CTL_ATTR(VOX, 1)
-
-/*?? channel mode used hpi_multiplexer_source attribute == 1 */
-#define HPI_CHANNEL_MODE_MODE HPI_CTL_ATTR(CHANNEL_MODE, 1)
-
-/** \defgroup channel_modes Channel Modes
-Used for HPI_ChannelModeSet/Get()
-\{
+
+/** Unique identifiers for every control attribute
*/
-/** Left channel out = left channel in, Right channel out = right channel in. */
-#define HPI_CHANNEL_MODE_NORMAL 1
-/** Left channel out = right channel in, Right channel out = left channel in. */
-#define HPI_CHANNEL_MODE_SWAP 2
-/** Left channel out = left channel in, Right channel out = left channel in. */
-#define HPI_CHANNEL_MODE_LEFT_TO_STEREO 3
-/** Left channel out = right channel in, Right channel out = right channel in.*/
-#define HPI_CHANNEL_MODE_RIGHT_TO_STEREO 4
-/** Left channel out = (left channel in + right channel in)/2,
- Right channel out = mute. */
-#define HPI_CHANNEL_MODE_STEREO_TO_LEFT 5
-/** Left channel out = mute,
- Right channel out = (right channel in + left channel in)/2. */
-#define HPI_CHANNEL_MODE_STEREO_TO_RIGHT 6
-#define HPI_CHANNEL_MODE_LAST 6
-/** \} */
-
-/* Bitstream control set attributes */
-#define HPI_BITSTREAM_DATA_POLARITY HPI_CTL_ATTR(BITSTREAM, 1)
-#define HPI_BITSTREAM_CLOCK_EDGE HPI_CTL_ATTR(BITSTREAM, 2)
-#define HPI_BITSTREAM_CLOCK_SOURCE HPI_CTL_ATTR(BITSTREAM, 3)
+enum HPI_CONTROL_ATTRIBUTES {
+ HPI_GENERIC_ENABLE = HPI_CTL_ATTR(GENERIC, 1),
+ HPI_GENERIC_EVENT_ENABLE = HPI_CTL_ATTR(GENERIC, 2),
+
+ HPI_VOLUME_GAIN = HPI_CTL_ATTR(VOLUME, 1),
+ HPI_VOLUME_AUTOFADE = HPI_CTL_ATTR(VOLUME, 2),
+ HPI_VOLUME_MUTE = HPI_CTL_ATTR(VOLUME, 3),
+ HPI_VOLUME_GAIN_AND_FLAGS = HPI_CTL_ATTR(VOLUME, 4),
+ HPI_VOLUME_NUM_CHANNELS = HPI_CTL_ATTR(VOLUME, 6),
+ HPI_VOLUME_RANGE = HPI_CTL_ATTR(VOLUME, 10),
+
+ HPI_METER_RMS = HPI_CTL_ATTR(METER, 1),
+ HPI_METER_PEAK = HPI_CTL_ATTR(METER, 2),
+ HPI_METER_RMS_BALLISTICS = HPI_CTL_ATTR(METER, 3),
+ HPI_METER_PEAK_BALLISTICS = HPI_CTL_ATTR(METER, 4),
+ HPI_METER_NUM_CHANNELS = HPI_CTL_ATTR(METER, 5),
+
+ HPI_MULTIPLEXER_SOURCE = HPI_CTL_ATTR(MULTIPLEXER, 1),
+ HPI_MULTIPLEXER_QUERYSOURCE = HPI_CTL_ATTR(MULTIPLEXER, 2),
+
+ HPI_AESEBUTX_FORMAT = HPI_CTL_ATTR(AESEBUTX, 1),
+ HPI_AESEBUTX_SAMPLERATE = HPI_CTL_ATTR(AESEBUTX, 3),
+ HPI_AESEBUTX_CHANNELSTATUS = HPI_CTL_ATTR(AESEBUTX, 4),
+ HPI_AESEBUTX_USERDATA = HPI_CTL_ATTR(AESEBUTX, 5),
+
+ HPI_AESEBURX_FORMAT = HPI_CTL_ATTR(AESEBURX, 1),
+ HPI_AESEBURX_ERRORSTATUS = HPI_CTL_ATTR(AESEBURX, 2),
+ HPI_AESEBURX_SAMPLERATE = HPI_CTL_ATTR(AESEBURX, 3),
+ HPI_AESEBURX_CHANNELSTATUS = HPI_CTL_ATTR(AESEBURX, 4),
+ HPI_AESEBURX_USERDATA = HPI_CTL_ATTR(AESEBURX, 5),
+
+ HPI_LEVEL_GAIN = HPI_CTL_ATTR(LEVEL, 1),
+ HPI_LEVEL_RANGE = HPI_CTL_ATTR(LEVEL, 10),
+
+ HPI_TUNER_BAND = HPI_CTL_ATTR(TUNER, 1),
+ HPI_TUNER_FREQ = HPI_CTL_ATTR(TUNER, 2),
+ HPI_TUNER_LEVEL_AVG = HPI_CTL_ATTR(TUNER, 3),
+ HPI_TUNER_LEVEL_RAW = HPI_CTL_ATTR(TUNER, 4),
+ HPI_TUNER_SNR = HPI_CTL_ATTR(TUNER, 5),
+ HPI_TUNER_GAIN = HPI_CTL_ATTR(TUNER, 6),
+ HPI_TUNER_STATUS = HPI_CTL_ATTR(TUNER, 7),
+ HPI_TUNER_MODE = HPI_CTL_ATTR(TUNER, 8),
+ HPI_TUNER_RDS = HPI_CTL_ATTR(TUNER, 9),
+ HPI_TUNER_DEEMPHASIS = HPI_CTL_ATTR(TUNER, 10),
+ HPI_TUNER_PROGRAM = HPI_CTL_ATTR(TUNER, 11),
+ HPI_TUNER_HDRADIO_SIGNAL_QUALITY = HPI_CTL_ATTR(TUNER, 12),
+ HPI_TUNER_HDRADIO_SDK_VERSION = HPI_CTL_ATTR(TUNER, 13),
+ HPI_TUNER_HDRADIO_DSP_VERSION = HPI_CTL_ATTR(TUNER, 14),
+ HPI_TUNER_HDRADIO_BLEND = HPI_CTL_ATTR(TUNER, 15),
+
+ HPI_VOX_THRESHOLD = HPI_CTL_ATTR(VOX, 1),
+
+ HPI_CHANNEL_MODE_MODE = HPI_CTL_ATTR(CHANNEL_MODE, 1),
+
+ HPI_BITSTREAM_DATA_POLARITY = HPI_CTL_ATTR(BITSTREAM, 1),
+ HPI_BITSTREAM_CLOCK_EDGE = HPI_CTL_ATTR(BITSTREAM, 2),
+ HPI_BITSTREAM_CLOCK_SOURCE = HPI_CTL_ATTR(BITSTREAM, 3),
+ HPI_BITSTREAM_ACTIVITY = HPI_CTL_ATTR(BITSTREAM, 4),
+
+ HPI_SAMPLECLOCK_SOURCE = HPI_CTL_ATTR(SAMPLECLOCK, 1),
+ HPI_SAMPLECLOCK_SAMPLERATE = HPI_CTL_ATTR(SAMPLECLOCK, 2),
+ HPI_SAMPLECLOCK_SOURCE_INDEX = HPI_CTL_ATTR(SAMPLECLOCK, 3),
+ HPI_SAMPLECLOCK_LOCAL_SAMPLERATE = HPI_CTL_ATTR(SAMPLECLOCK, 4),
+ HPI_SAMPLECLOCK_AUTO = HPI_CTL_ATTR(SAMPLECLOCK, 5),
+ HPI_SAMPLECLOCK_LOCAL_LOCK = HPI_CTL_ATTR(SAMPLECLOCK, 6),
+
+ HPI_MICROPHONE_PHANTOM_POWER = HPI_CTL_ATTR(MICROPHONE, 1),
+
+ HPI_EQUALIZER_NUM_FILTERS = HPI_CTL_ATTR(EQUALIZER, 1),
+ HPI_EQUALIZER_FILTER = HPI_CTL_ATTR(EQUALIZER, 2),
+ HPI_EQUALIZER_COEFFICIENTS = HPI_CTL_ATTR(EQUALIZER, 3),
+
+ HPI_COMPANDER_PARAMS = HPI_CTL_ATTR(COMPANDER, 1),
+ HPI_COMPANDER_MAKEUPGAIN = HPI_CTL_ATTR(COMPANDER, 2),
+ HPI_COMPANDER_THRESHOLD = HPI_CTL_ATTR(COMPANDER, 3),
+ HPI_COMPANDER_RATIO = HPI_CTL_ATTR(COMPANDER, 4),
+ HPI_COMPANDER_ATTACK = HPI_CTL_ATTR(COMPANDER, 5),
+ HPI_COMPANDER_DECAY = HPI_CTL_ATTR(COMPANDER, 6),
+
+ HPI_COBRANET_SET = HPI_CTL_ATTR(COBRANET, 1),
+ HPI_COBRANET_GET = HPI_CTL_ATTR(COBRANET, 2),
+ HPI_COBRANET_SET_DATA = HPI_CTL_ATTR(COBRANET, 3),
+ HPI_COBRANET_GET_DATA = HPI_CTL_ATTR(COBRANET, 4),
+ HPI_COBRANET_GET_STATUS = HPI_CTL_ATTR(COBRANET, 5),
+ HPI_COBRANET_SEND_PACKET = HPI_CTL_ATTR(COBRANET, 6),
+ HPI_COBRANET_GET_PACKET = HPI_CTL_ATTR(COBRANET, 7),
+
+ HPI_TONEDETECTOR_THRESHOLD = HPI_CTL_ATTR(TONEDETECTOR, 1),
+ HPI_TONEDETECTOR_STATE = HPI_CTL_ATTR(TONEDETECTOR, 2),
+ HPI_TONEDETECTOR_FREQUENCY = HPI_CTL_ATTR(TONEDETECTOR, 3),
+
+ HPI_SILENCEDETECTOR_THRESHOLD = HPI_CTL_ATTR(SILENCEDETECTOR, 1),
+ HPI_SILENCEDETECTOR_STATE = HPI_CTL_ATTR(SILENCEDETECTOR, 2),
+ HPI_SILENCEDETECTOR_DELAY = HPI_CTL_ATTR(SILENCEDETECTOR, 3),
+
+ HPI_PAD_CHANNEL_NAME = HPI_CTL_ATTR(PAD, 1),
+ HPI_PAD_ARTIST = HPI_CTL_ATTR(PAD, 2),
+ HPI_PAD_TITLE = HPI_CTL_ATTR(PAD, 3),
+ HPI_PAD_COMMENT = HPI_CTL_ATTR(PAD, 4),
+ HPI_PAD_PROGRAM_TYPE = HPI_CTL_ATTR(PAD, 5),
+ HPI_PAD_PROGRAM_ID = HPI_CTL_ATTR(PAD, 6),
+ HPI_PAD_TA_SUPPORT = HPI_CTL_ATTR(PAD, 7),
+ HPI_PAD_TA_ACTIVE = HPI_CTL_ATTR(PAD, 8)
+};
#define HPI_POLARITY_POSITIVE 0
#define HPI_POLARITY_NEGATIVE 1
-/* Bitstream control get attributes */
-#define HPI_BITSTREAM_ACTIVITY 1
-
-/* SampleClock control attributes */
-#define HPI_SAMPLECLOCK_SOURCE HPI_CTL_ATTR(SAMPLECLOCK, 1)
-#define HPI_SAMPLECLOCK_SAMPLERATE HPI_CTL_ATTR(SAMPLECLOCK, 2)
-#define HPI_SAMPLECLOCK_SOURCE_INDEX HPI_CTL_ATTR(SAMPLECLOCK, 3)
-#define HPI_SAMPLECLOCK_LOCAL_SAMPLERATE\
- HPI_CTL_ATTR(SAMPLECLOCK, 4)
-#define HPI_SAMPLECLOCK_AUTO HPI_CTL_ATTR(SAMPLECLOCK, 5)
-#define HPI_SAMPLECLOCK_LOCAL_LOCK HPI_CTL_ATTR(SAMPLECLOCK, 6)
-
-/* Microphone control attributes */
-#define HPI_MICROPHONE_PHANTOM_POWER HPI_CTL_ATTR(MICROPHONE, 1)
-
-/** Equalizer control attributes */
-/** Used to get number of filters in an EQ. (Can't set) */
-#define HPI_EQUALIZER_NUM_FILTERS HPI_CTL_ATTR(EQUALIZER, 1)
-/** Set/get the filter by type, freq, Q, gain */
-#define HPI_EQUALIZER_FILTER HPI_CTL_ATTR(EQUALIZER, 2)
-/** Get the biquad coefficients */
-#define HPI_EQUALIZER_COEFFICIENTS HPI_CTL_ATTR(EQUALIZER, 3)
-
-/* Note compander also uses HPI_GENERIC_ENABLE */
-#define HPI_COMPANDER_PARAMS HPI_CTL_ATTR(COMPANDER, 1)
-#define HPI_COMPANDER_MAKEUPGAIN HPI_CTL_ATTR(COMPANDER, 2)
-#define HPI_COMPANDER_THRESHOLD HPI_CTL_ATTR(COMPANDER, 3)
-#define HPI_COMPANDER_RATIO HPI_CTL_ATTR(COMPANDER, 4)
-#define HPI_COMPANDER_ATTACK HPI_CTL_ATTR(COMPANDER, 5)
-#define HPI_COMPANDER_DECAY HPI_CTL_ATTR(COMPANDER, 6)
-
-/* Cobranet control attributes. */
-#define HPI_COBRANET_SET HPI_CTL_ATTR(COBRANET, 1)
-#define HPI_COBRANET_GET HPI_CTL_ATTR(COBRANET, 2)
-#define HPI_COBRANET_SET_DATA HPI_CTL_ATTR(COBRANET, 3)
-#define HPI_COBRANET_GET_DATA HPI_CTL_ATTR(COBRANET, 4)
-#define HPI_COBRANET_GET_STATUS HPI_CTL_ATTR(COBRANET, 5)
-#define HPI_COBRANET_SEND_PACKET HPI_CTL_ATTR(COBRANET, 6)
-#define HPI_COBRANET_GET_PACKET HPI_CTL_ATTR(COBRANET, 7)
-
/*------------------------------------------------------------
Cobranet Chip Bridge - copied from HMI.H
------------------------------------------------------------*/
@@ -395,69 +309,22 @@ Used for HPI_ChannelModeSet/Get()
#define HPI_ETHERNET_UDP_PORT (44600) /*!< UDP messaging port */
-/** Base network time out is set to 100 milli-seconds. */
-#define HPI_ETHERNET_TIMEOUT_MS (100)
-
-/** \defgroup tonedet_attr Tonedetector attributes
-\{
-Used by HPI_ToneDetector_Set() and HPI_ToneDetector_Get()
-*/
-
-/** Set the threshold level of a tonedetector,
-Threshold is a -ve number in units of dB/100,
-*/
-#define HPI_TONEDETECTOR_THRESHOLD HPI_CTL_ATTR(TONEDETECTOR, 1)
-
-/** Get the current state of tonedetection
-The result is a bitmap of detected tones. pairs of bits represent the left
-and right channels, with left channel in LSB.
-The lowest frequency detector state is in the LSB
-*/
-#define HPI_TONEDETECTOR_STATE HPI_CTL_ATTR(TONEDETECTOR, 2)
-
-/** Get the frequency of a tonedetector band.
-*/
-#define HPI_TONEDETECTOR_FREQUENCY HPI_CTL_ATTR(TONEDETECTOR, 3)
-
-/**\}*/
+/** Default network timeout in milli-seconds. */
+#define HPI_ETHERNET_TIMEOUT_MS 500
-/** \defgroup silencedet_attr SilenceDetector attributes
-\{
-*/
-
-/** Get the current state of tonedetection
-The result is a bitmap with 1s for silent channels. Left channel is in LSB
-*/
-#define HPI_SILENCEDETECTOR_STATE \
- HPI_CTL_ATTR(SILENCEDETECTOR, 2)
-
-/** Set the threshold level of a SilenceDetector,
-Threshold is a -ve number in units of dB/100,
-*/
-#define HPI_SILENCEDETECTOR_THRESHOLD \
- HPI_CTL_ATTR(SILENCEDETECTOR, 1)
-
-/** get/set the silence time before the detector triggers
-*/
-#define HPI_SILENCEDETECTOR_DELAY \
- HPI_CTL_ATTR(SILENCEDETECTOR, 3)
-
-/**\}*/
-
-/* Locked memory buffer alloc/free phases */
-/** use one message to allocate or free physical memory */
-#define HPI_BUFFER_CMD_EXTERNAL 0
-/** alloc physical memory */
-#define HPI_BUFFER_CMD_INTERNAL_ALLOC 1
-/** send physical memory address to adapter */
-#define HPI_BUFFER_CMD_INTERNAL_GRANTADAPTER 2
-/** notify adapter to stop using physical buffer */
-#define HPI_BUFFER_CMD_INTERNAL_REVOKEADAPTER 3
-/** free physical buffer */
-#define HPI_BUFFER_CMD_INTERNAL_FREE 4
-
-/******************************************* CONTROLX ATTRIBUTES ****/
-/* NOTE: All controlx attributes must be unique, unlike control attributes */
+/** Locked memory buffer alloc/free phases */
+enum HPI_BUFFER_CMDS {
+ /** use one message to allocate or free physical memory */
+ HPI_BUFFER_CMD_EXTERNAL = 0,
+ /** alloc physical memory */
+ HPI_BUFFER_CMD_INTERNAL_ALLOC = 1,
+ /** send physical memory address to adapter */
+ HPI_BUFFER_CMD_INTERNAL_GRANTADAPTER = 2,
+ /** notify adapter to stop using physical buffer */
+ HPI_BUFFER_CMD_INTERNAL_REVOKEADAPTER = 3,
+ /** free physical buffer */
+ HPI_BUFFER_CMD_INTERNAL_FREE = 4
+};
/*****************************************************************************/
/*****************************************************************************/
@@ -482,6 +349,12 @@ Threshold is a -ve number in units of dB/100,
#define HPI_USB_W2K_TAG 0x57495341 /* "ASIW" */
#define HPI_USB_LINUX_TAG 0x4C495341 /* "ASIL" */
+/** Invalid Adapter index
+Used in HPI messages that are not addressed to a specific adapter
+Used in DLL to indicate device not present
+*/
+#define HPI_ADAPTER_INDEX_INVALID 0xFFFF
+
/** First 2 hex digits define the adapter family */
#define HPI_ADAPTER_FAMILY_MASK 0xff00
#define HPI_MODULE_FAMILY_MASK 0xfff0
@@ -490,178 +363,185 @@ Threshold is a -ve number in units of dB/100,
#define HPI_MODULE_FAMILY_ASI(f) (f & HPI_MODULE_FAMILY_MASK)
#define HPI_ADAPTER_ASI(f) (f)
-/******************************************* message types */
-#define HPI_TYPE_MESSAGE 1
-#define HPI_TYPE_RESPONSE 2
-#define HPI_TYPE_DATA 3
-#define HPI_TYPE_SSX2BYPASS_MESSAGE 4
-
-/******************************************* object types */
-#define HPI_OBJ_SUBSYSTEM 1
-#define HPI_OBJ_ADAPTER 2
-#define HPI_OBJ_OSTREAM 3
-#define HPI_OBJ_ISTREAM 4
-#define HPI_OBJ_MIXER 5
-#define HPI_OBJ_NODE 6
-#define HPI_OBJ_CONTROL 7
-#define HPI_OBJ_NVMEMORY 8
-#define HPI_OBJ_GPIO 9
-#define HPI_OBJ_WATCHDOG 10
-#define HPI_OBJ_CLOCK 11
-#define HPI_OBJ_PROFILE 12
-#define HPI_OBJ_CONTROLEX 13
-#define HPI_OBJ_ASYNCEVENT 14
-
-#define HPI_OBJ_MAXINDEX 14
-
-/******************************************* methods/functions */
-
-#define HPI_OBJ_FUNCTION_SPACING 0x100
-#define HPI_MAKE_INDEX(obj, index) (obj * HPI_OBJ_FUNCTION_SPACING + index)
+enum HPI_MESSAGE_TYPES {
+ HPI_TYPE_MESSAGE = 1,
+ HPI_TYPE_RESPONSE = 2,
+ HPI_TYPE_DATA = 3,
+ HPI_TYPE_SSX2BYPASS_MESSAGE = 4
+};
+
+enum HPI_OBJECT_TYPES {
+ HPI_OBJ_SUBSYSTEM = 1,
+ HPI_OBJ_ADAPTER = 2,
+ HPI_OBJ_OSTREAM = 3,
+ HPI_OBJ_ISTREAM = 4,
+ HPI_OBJ_MIXER = 5,
+ HPI_OBJ_NODE = 6,
+ HPI_OBJ_CONTROL = 7,
+ HPI_OBJ_NVMEMORY = 8,
+ HPI_OBJ_GPIO = 9,
+ HPI_OBJ_WATCHDOG = 10,
+ HPI_OBJ_CLOCK = 11,
+ HPI_OBJ_PROFILE = 12,
+ HPI_OBJ_CONTROLEX = 13,
+ HPI_OBJ_ASYNCEVENT = 14
+#define HPI_OBJ_MAXINDEX 14
+};
+
+#define HPI_OBJ_FUNCTION_SPACING 0x100
+#define HPI_FUNC_ID(obj, i) (HPI_OBJ_##obj * HPI_OBJ_FUNCTION_SPACING + i)
+
#define HPI_EXTRACT_INDEX(fn) (fn & 0xff)
-/* SUB-SYSTEM */
-#define HPI_SUBSYS_OPEN HPI_MAKE_INDEX(HPI_OBJ_SUBSYSTEM, 1)
-#define HPI_SUBSYS_GET_VERSION HPI_MAKE_INDEX(HPI_OBJ_SUBSYSTEM, 2)
-#define HPI_SUBSYS_GET_INFO HPI_MAKE_INDEX(HPI_OBJ_SUBSYSTEM, 3)
-#define HPI_SUBSYS_FIND_ADAPTERS HPI_MAKE_INDEX(HPI_OBJ_SUBSYSTEM, 4)
-#define HPI_SUBSYS_CREATE_ADAPTER HPI_MAKE_INDEX(HPI_OBJ_SUBSYSTEM, 5)
-#define HPI_SUBSYS_CLOSE HPI_MAKE_INDEX(HPI_OBJ_SUBSYSTEM, 6)
-#define HPI_SUBSYS_DELETE_ADAPTER HPI_MAKE_INDEX(HPI_OBJ_SUBSYSTEM, 7)
-#define HPI_SUBSYS_DRIVER_LOAD HPI_MAKE_INDEX(HPI_OBJ_SUBSYSTEM, 8)
-#define HPI_SUBSYS_DRIVER_UNLOAD HPI_MAKE_INDEX(HPI_OBJ_SUBSYSTEM, 9)
-#define HPI_SUBSYS_READ_PORT_8 HPI_MAKE_INDEX(HPI_OBJ_SUBSYSTEM, 10)
-#define HPI_SUBSYS_WRITE_PORT_8 HPI_MAKE_INDEX(HPI_OBJ_SUBSYSTEM, 11)
-#define HPI_SUBSYS_GET_NUM_ADAPTERS HPI_MAKE_INDEX(HPI_OBJ_SUBSYSTEM, 12)
-#define HPI_SUBSYS_GET_ADAPTER HPI_MAKE_INDEX(HPI_OBJ_SUBSYSTEM, 13)
-#define HPI_SUBSYS_SET_NETWORK_INTERFACE HPI_MAKE_INDEX(HPI_OBJ_SUBSYSTEM, 14)
-#define HPI_SUBSYS_FUNCTION_COUNT 14
-/* ADAPTER */
-#define HPI_ADAPTER_OPEN HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 1)
-#define HPI_ADAPTER_CLOSE HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 2)
-#define HPI_ADAPTER_GET_INFO HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 3)
-#define HPI_ADAPTER_GET_ASSERT HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 4)
-#define HPI_ADAPTER_TEST_ASSERT HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 5)
-#define HPI_ADAPTER_SET_MODE HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 6)
-#define HPI_ADAPTER_GET_MODE HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 7)
-#define HPI_ADAPTER_ENABLE_CAPABILITY HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 8)
-#define HPI_ADAPTER_SELFTEST HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 9)
-#define HPI_ADAPTER_FIND_OBJECT HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 10)
-#define HPI_ADAPTER_QUERY_FLASH HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 11)
-#define HPI_ADAPTER_START_FLASH HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 12)
-#define HPI_ADAPTER_PROGRAM_FLASH HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 13)
-#define HPI_ADAPTER_SET_PROPERTY HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 14)
-#define HPI_ADAPTER_GET_PROPERTY HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 15)
-#define HPI_ADAPTER_ENUM_PROPERTY HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 16)
-#define HPI_ADAPTER_MODULE_INFO HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 17)
-#define HPI_ADAPTER_DEBUG_READ HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 18)
-#define HPI_ADAPTER_FUNCTION_COUNT 18
-/* OUTPUT STREAM */
-#define HPI_OSTREAM_OPEN HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 1)
-#define HPI_OSTREAM_CLOSE HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 2)
-#define HPI_OSTREAM_WRITE HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 3)
-#define HPI_OSTREAM_START HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 4)
-#define HPI_OSTREAM_STOP HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 5)
-#define HPI_OSTREAM_RESET HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 6)
-#define HPI_OSTREAM_GET_INFO HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 7)
-#define HPI_OSTREAM_QUERY_FORMAT HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 8)
-#define HPI_OSTREAM_DATA HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 9)
-#define HPI_OSTREAM_SET_VELOCITY HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 10)
-#define HPI_OSTREAM_SET_PUNCHINOUT HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 11)
-#define HPI_OSTREAM_SINEGEN HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 12)
-#define HPI_OSTREAM_ANC_RESET HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 13)
-#define HPI_OSTREAM_ANC_GET_INFO HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 14)
-#define HPI_OSTREAM_ANC_READ HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 15)
-#define HPI_OSTREAM_SET_TIMESCALE HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 16)
-#define HPI_OSTREAM_SET_FORMAT HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 17)
-#define HPI_OSTREAM_HOSTBUFFER_ALLOC HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 18)
-#define HPI_OSTREAM_HOSTBUFFER_FREE HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 19)
-#define HPI_OSTREAM_GROUP_ADD HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 20)
-#define HPI_OSTREAM_GROUP_GETMAP HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 21)
-#define HPI_OSTREAM_GROUP_RESET HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 22)
-#define HPI_OSTREAM_HOSTBUFFER_GET_INFO HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 23)
-#define HPI_OSTREAM_WAIT_START HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 24)
-#define HPI_OSTREAM_FUNCTION_COUNT 24
-/* INPUT STREAM */
-#define HPI_ISTREAM_OPEN HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 1)
-#define HPI_ISTREAM_CLOSE HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 2)
-#define HPI_ISTREAM_SET_FORMAT HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 3)
-#define HPI_ISTREAM_READ HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 4)
-#define HPI_ISTREAM_START HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 5)
-#define HPI_ISTREAM_STOP HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 6)
-#define HPI_ISTREAM_RESET HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 7)
-#define HPI_ISTREAM_GET_INFO HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 8)
-#define HPI_ISTREAM_QUERY_FORMAT HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 9)
-#define HPI_ISTREAM_ANC_RESET HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 10)
-#define HPI_ISTREAM_ANC_GET_INFO HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 11)
-#define HPI_ISTREAM_ANC_WRITE HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 12)
-#define HPI_ISTREAM_HOSTBUFFER_ALLOC HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 13)
-#define HPI_ISTREAM_HOSTBUFFER_FREE HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 14)
-#define HPI_ISTREAM_GROUP_ADD HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 15)
-#define HPI_ISTREAM_GROUP_GETMAP HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 16)
-#define HPI_ISTREAM_GROUP_RESET HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 17)
-#define HPI_ISTREAM_HOSTBUFFER_GET_INFO HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 18)
-#define HPI_ISTREAM_WAIT_START HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 19)
-#define HPI_ISTREAM_FUNCTION_COUNT 19
-/* MIXER */
+enum HPI_FUNCTION_IDS {
+ HPI_SUBSYS_OPEN = HPI_FUNC_ID(SUBSYSTEM, 1),
+ HPI_SUBSYS_GET_VERSION = HPI_FUNC_ID(SUBSYSTEM, 2),
+ HPI_SUBSYS_GET_INFO = HPI_FUNC_ID(SUBSYSTEM, 3),
+ HPI_SUBSYS_FIND_ADAPTERS = HPI_FUNC_ID(SUBSYSTEM, 4),
+ HPI_SUBSYS_CREATE_ADAPTER = HPI_FUNC_ID(SUBSYSTEM, 5),
+ HPI_SUBSYS_CLOSE = HPI_FUNC_ID(SUBSYSTEM, 6),
+ HPI_SUBSYS_DELETE_ADAPTER = HPI_FUNC_ID(SUBSYSTEM, 7),
+ HPI_SUBSYS_DRIVER_LOAD = HPI_FUNC_ID(SUBSYSTEM, 8),
+ HPI_SUBSYS_DRIVER_UNLOAD = HPI_FUNC_ID(SUBSYSTEM, 9),
+ HPI_SUBSYS_READ_PORT_8 = HPI_FUNC_ID(SUBSYSTEM, 10),
+ HPI_SUBSYS_WRITE_PORT_8 = HPI_FUNC_ID(SUBSYSTEM, 11),
+ HPI_SUBSYS_GET_NUM_ADAPTERS = HPI_FUNC_ID(SUBSYSTEM, 12),
+ HPI_SUBSYS_GET_ADAPTER = HPI_FUNC_ID(SUBSYSTEM, 13),
+ HPI_SUBSYS_SET_NETWORK_INTERFACE = HPI_FUNC_ID(SUBSYSTEM, 14),
+ HPI_SUBSYS_OPTION_INFO = HPI_FUNC_ID(SUBSYSTEM, 15),
+ HPI_SUBSYS_OPTION_GET = HPI_FUNC_ID(SUBSYSTEM, 16),
+ HPI_SUBSYS_OPTION_SET = HPI_FUNC_ID(SUBSYSTEM, 17),
+#define HPI_SUBSYS_FUNCTION_COUNT 17
+
+ HPI_ADAPTER_OPEN = HPI_FUNC_ID(ADAPTER, 1),
+ HPI_ADAPTER_CLOSE = HPI_FUNC_ID(ADAPTER, 2),
+ HPI_ADAPTER_GET_INFO = HPI_FUNC_ID(ADAPTER, 3),
+ HPI_ADAPTER_GET_ASSERT = HPI_FUNC_ID(ADAPTER, 4),
+ HPI_ADAPTER_TEST_ASSERT = HPI_FUNC_ID(ADAPTER, 5),
+ HPI_ADAPTER_SET_MODE = HPI_FUNC_ID(ADAPTER, 6),
+ HPI_ADAPTER_GET_MODE = HPI_FUNC_ID(ADAPTER, 7),
+ HPI_ADAPTER_ENABLE_CAPABILITY = HPI_FUNC_ID(ADAPTER, 8),
+ HPI_ADAPTER_SELFTEST = HPI_FUNC_ID(ADAPTER, 9),
+ HPI_ADAPTER_FIND_OBJECT = HPI_FUNC_ID(ADAPTER, 10),
+ HPI_ADAPTER_QUERY_FLASH = HPI_FUNC_ID(ADAPTER, 11),
+ HPI_ADAPTER_START_FLASH = HPI_FUNC_ID(ADAPTER, 12),
+ HPI_ADAPTER_PROGRAM_FLASH = HPI_FUNC_ID(ADAPTER, 13),
+ HPI_ADAPTER_SET_PROPERTY = HPI_FUNC_ID(ADAPTER, 14),
+ HPI_ADAPTER_GET_PROPERTY = HPI_FUNC_ID(ADAPTER, 15),
+ HPI_ADAPTER_ENUM_PROPERTY = HPI_FUNC_ID(ADAPTER, 16),
+ HPI_ADAPTER_MODULE_INFO = HPI_FUNC_ID(ADAPTER, 17),
+ HPI_ADAPTER_DEBUG_READ = HPI_FUNC_ID(ADAPTER, 18),
+ HPI_ADAPTER_IRQ_QUERY_AND_CLEAR = HPI_FUNC_ID(ADAPTER, 19),
+ HPI_ADAPTER_IRQ_CALLBACK = HPI_FUNC_ID(ADAPTER, 20),
+#define HPI_ADAPTER_FUNCTION_COUNT 20
+
+ HPI_OSTREAM_OPEN = HPI_FUNC_ID(OSTREAM, 1),
+ HPI_OSTREAM_CLOSE = HPI_FUNC_ID(OSTREAM, 2),
+ HPI_OSTREAM_WRITE = HPI_FUNC_ID(OSTREAM, 3),
+ HPI_OSTREAM_START = HPI_FUNC_ID(OSTREAM, 4),
+ HPI_OSTREAM_STOP = HPI_FUNC_ID(OSTREAM, 5),
+ HPI_OSTREAM_RESET = HPI_FUNC_ID(OSTREAM, 6),
+ HPI_OSTREAM_GET_INFO = HPI_FUNC_ID(OSTREAM, 7),
+ HPI_OSTREAM_QUERY_FORMAT = HPI_FUNC_ID(OSTREAM, 8),
+ HPI_OSTREAM_DATA = HPI_FUNC_ID(OSTREAM, 9),
+ HPI_OSTREAM_SET_VELOCITY = HPI_FUNC_ID(OSTREAM, 10),
+ HPI_OSTREAM_SET_PUNCHINOUT = HPI_FUNC_ID(OSTREAM, 11),
+ HPI_OSTREAM_SINEGEN = HPI_FUNC_ID(OSTREAM, 12),
+ HPI_OSTREAM_ANC_RESET = HPI_FUNC_ID(OSTREAM, 13),
+ HPI_OSTREAM_ANC_GET_INFO = HPI_FUNC_ID(OSTREAM, 14),
+ HPI_OSTREAM_ANC_READ = HPI_FUNC_ID(OSTREAM, 15),
+ HPI_OSTREAM_SET_TIMESCALE = HPI_FUNC_ID(OSTREAM, 16),
+ HPI_OSTREAM_SET_FORMAT = HPI_FUNC_ID(OSTREAM, 17),
+ HPI_OSTREAM_HOSTBUFFER_ALLOC = HPI_FUNC_ID(OSTREAM, 18),
+ HPI_OSTREAM_HOSTBUFFER_FREE = HPI_FUNC_ID(OSTREAM, 19),
+ HPI_OSTREAM_GROUP_ADD = HPI_FUNC_ID(OSTREAM, 20),
+ HPI_OSTREAM_GROUP_GETMAP = HPI_FUNC_ID(OSTREAM, 21),
+ HPI_OSTREAM_GROUP_RESET = HPI_FUNC_ID(OSTREAM, 22),
+ HPI_OSTREAM_HOSTBUFFER_GET_INFO = HPI_FUNC_ID(OSTREAM, 23),
+ HPI_OSTREAM_WAIT_START = HPI_FUNC_ID(OSTREAM, 24),
+ HPI_OSTREAM_WAIT = HPI_FUNC_ID(OSTREAM, 25),
+#define HPI_OSTREAM_FUNCTION_COUNT 25
+
+ HPI_ISTREAM_OPEN = HPI_FUNC_ID(ISTREAM, 1),
+ HPI_ISTREAM_CLOSE = HPI_FUNC_ID(ISTREAM, 2),
+ HPI_ISTREAM_SET_FORMAT = HPI_FUNC_ID(ISTREAM, 3),
+ HPI_ISTREAM_READ = HPI_FUNC_ID(ISTREAM, 4),
+ HPI_ISTREAM_START = HPI_FUNC_ID(ISTREAM, 5),
+ HPI_ISTREAM_STOP = HPI_FUNC_ID(ISTREAM, 6),
+ HPI_ISTREAM_RESET = HPI_FUNC_ID(ISTREAM, 7),
+ HPI_ISTREAM_GET_INFO = HPI_FUNC_ID(ISTREAM, 8),
+ HPI_ISTREAM_QUERY_FORMAT = HPI_FUNC_ID(ISTREAM, 9),
+ HPI_ISTREAM_ANC_RESET = HPI_FUNC_ID(ISTREAM, 10),
+ HPI_ISTREAM_ANC_GET_INFO = HPI_FUNC_ID(ISTREAM, 11),
+ HPI_ISTREAM_ANC_WRITE = HPI_FUNC_ID(ISTREAM, 12),
+ HPI_ISTREAM_HOSTBUFFER_ALLOC = HPI_FUNC_ID(ISTREAM, 13),
+ HPI_ISTREAM_HOSTBUFFER_FREE = HPI_FUNC_ID(ISTREAM, 14),
+ HPI_ISTREAM_GROUP_ADD = HPI_FUNC_ID(ISTREAM, 15),
+ HPI_ISTREAM_GROUP_GETMAP = HPI_FUNC_ID(ISTREAM, 16),
+ HPI_ISTREAM_GROUP_RESET = HPI_FUNC_ID(ISTREAM, 17),
+ HPI_ISTREAM_HOSTBUFFER_GET_INFO = HPI_FUNC_ID(ISTREAM, 18),
+ HPI_ISTREAM_WAIT_START = HPI_FUNC_ID(ISTREAM, 19),
+ HPI_ISTREAM_WAIT = HPI_FUNC_ID(ISTREAM, 20),
+#define HPI_ISTREAM_FUNCTION_COUNT 20
+
/* NOTE:
GET_NODE_INFO, SET_CONNECTION, GET_CONNECTIONS are not currently used */
-#define HPI_MIXER_OPEN HPI_MAKE_INDEX(HPI_OBJ_MIXER, 1)
-#define HPI_MIXER_CLOSE HPI_MAKE_INDEX(HPI_OBJ_MIXER, 2)
-#define HPI_MIXER_GET_INFO HPI_MAKE_INDEX(HPI_OBJ_MIXER, 3)
-#define HPI_MIXER_GET_NODE_INFO HPI_MAKE_INDEX(HPI_OBJ_MIXER, 4)
-#define HPI_MIXER_GET_CONTROL HPI_MAKE_INDEX(HPI_OBJ_MIXER, 5)
-#define HPI_MIXER_SET_CONNECTION HPI_MAKE_INDEX(HPI_OBJ_MIXER, 6)
-#define HPI_MIXER_GET_CONNECTIONS HPI_MAKE_INDEX(HPI_OBJ_MIXER, 7)
-#define HPI_MIXER_GET_CONTROL_BY_INDEX HPI_MAKE_INDEX(HPI_OBJ_MIXER, 8)
-#define HPI_MIXER_GET_CONTROL_ARRAY_BY_INDEX HPI_MAKE_INDEX(HPI_OBJ_MIXER, 9)
-#define HPI_MIXER_GET_CONTROL_MULTIPLE_VALUES HPI_MAKE_INDEX(HPI_OBJ_MIXER, 10)
-#define HPI_MIXER_STORE HPI_MAKE_INDEX(HPI_OBJ_MIXER, 11)
-#define HPI_MIXER_FUNCTION_COUNT 11
-/* MIXER CONTROLS */
-#define HPI_CONTROL_GET_INFO HPI_MAKE_INDEX(HPI_OBJ_CONTROL, 1)
-#define HPI_CONTROL_GET_STATE HPI_MAKE_INDEX(HPI_OBJ_CONTROL, 2)
-#define HPI_CONTROL_SET_STATE HPI_MAKE_INDEX(HPI_OBJ_CONTROL, 3)
+ HPI_MIXER_OPEN = HPI_FUNC_ID(MIXER, 1),
+ HPI_MIXER_CLOSE = HPI_FUNC_ID(MIXER, 2),
+ HPI_MIXER_GET_INFO = HPI_FUNC_ID(MIXER, 3),
+ HPI_MIXER_GET_NODE_INFO = HPI_FUNC_ID(MIXER, 4),
+ HPI_MIXER_GET_CONTROL = HPI_FUNC_ID(MIXER, 5),
+ HPI_MIXER_SET_CONNECTION = HPI_FUNC_ID(MIXER, 6),
+ HPI_MIXER_GET_CONNECTIONS = HPI_FUNC_ID(MIXER, 7),
+ HPI_MIXER_GET_CONTROL_BY_INDEX = HPI_FUNC_ID(MIXER, 8),
+ HPI_MIXER_GET_CONTROL_ARRAY_BY_INDEX = HPI_FUNC_ID(MIXER, 9),
+ HPI_MIXER_GET_CONTROL_MULTIPLE_VALUES = HPI_FUNC_ID(MIXER, 10),
+ HPI_MIXER_STORE = HPI_FUNC_ID(MIXER, 11),
+ HPI_MIXER_GET_CACHE_INFO = HPI_FUNC_ID(MIXER, 12),
+#define HPI_MIXER_FUNCTION_COUNT 12
+
+ HPI_CONTROL_GET_INFO = HPI_FUNC_ID(CONTROL, 1),
+ HPI_CONTROL_GET_STATE = HPI_FUNC_ID(CONTROL, 2),
+ HPI_CONTROL_SET_STATE = HPI_FUNC_ID(CONTROL, 3),
#define HPI_CONTROL_FUNCTION_COUNT 3
-/* NONVOL MEMORY */
-#define HPI_NVMEMORY_OPEN HPI_MAKE_INDEX(HPI_OBJ_NVMEMORY, 1)
-#define HPI_NVMEMORY_READ_BYTE HPI_MAKE_INDEX(HPI_OBJ_NVMEMORY, 2)
-#define HPI_NVMEMORY_WRITE_BYTE HPI_MAKE_INDEX(HPI_OBJ_NVMEMORY, 3)
+
+ HPI_NVMEMORY_OPEN = HPI_FUNC_ID(NVMEMORY, 1),
+ HPI_NVMEMORY_READ_BYTE = HPI_FUNC_ID(NVMEMORY, 2),
+ HPI_NVMEMORY_WRITE_BYTE = HPI_FUNC_ID(NVMEMORY, 3),
#define HPI_NVMEMORY_FUNCTION_COUNT 3
-/* GPIO */
-#define HPI_GPIO_OPEN HPI_MAKE_INDEX(HPI_OBJ_GPIO, 1)
-#define HPI_GPIO_READ_BIT HPI_MAKE_INDEX(HPI_OBJ_GPIO, 2)
-#define HPI_GPIO_WRITE_BIT HPI_MAKE_INDEX(HPI_OBJ_GPIO, 3)
-#define HPI_GPIO_READ_ALL HPI_MAKE_INDEX(HPI_OBJ_GPIO, 4)
-#define HPI_GPIO_WRITE_STATUS HPI_MAKE_INDEX(HPI_OBJ_GPIO, 5)
+
+ HPI_GPIO_OPEN = HPI_FUNC_ID(GPIO, 1),
+ HPI_GPIO_READ_BIT = HPI_FUNC_ID(GPIO, 2),
+ HPI_GPIO_WRITE_BIT = HPI_FUNC_ID(GPIO, 3),
+ HPI_GPIO_READ_ALL = HPI_FUNC_ID(GPIO, 4),
+ HPI_GPIO_WRITE_STATUS = HPI_FUNC_ID(GPIO, 5),
#define HPI_GPIO_FUNCTION_COUNT 5
-/* ASYNC EVENT */
-#define HPI_ASYNCEVENT_OPEN HPI_MAKE_INDEX(HPI_OBJ_ASYNCEVENT, 1)
-#define HPI_ASYNCEVENT_CLOSE HPI_MAKE_INDEX(HPI_OBJ_ASYNCEVENT, 2)
-#define HPI_ASYNCEVENT_WAIT HPI_MAKE_INDEX(HPI_OBJ_ASYNCEVENT, 3)
-#define HPI_ASYNCEVENT_GETCOUNT HPI_MAKE_INDEX(HPI_OBJ_ASYNCEVENT, 4)
-#define HPI_ASYNCEVENT_GET HPI_MAKE_INDEX(HPI_OBJ_ASYNCEVENT, 5)
-#define HPI_ASYNCEVENT_SENDEVENTS HPI_MAKE_INDEX(HPI_OBJ_ASYNCEVENT, 6)
+
+ HPI_ASYNCEVENT_OPEN = HPI_FUNC_ID(ASYNCEVENT, 1),
+ HPI_ASYNCEVENT_CLOSE = HPI_FUNC_ID(ASYNCEVENT, 2),
+ HPI_ASYNCEVENT_WAIT = HPI_FUNC_ID(ASYNCEVENT, 3),
+ HPI_ASYNCEVENT_GETCOUNT = HPI_FUNC_ID(ASYNCEVENT, 4),
+ HPI_ASYNCEVENT_GET = HPI_FUNC_ID(ASYNCEVENT, 5),
+ HPI_ASYNCEVENT_SENDEVENTS = HPI_FUNC_ID(ASYNCEVENT, 6),
#define HPI_ASYNCEVENT_FUNCTION_COUNT 6
-/* WATCH-DOG */
-#define HPI_WATCHDOG_OPEN HPI_MAKE_INDEX(HPI_OBJ_WATCHDOG, 1)
-#define HPI_WATCHDOG_SET_TIME HPI_MAKE_INDEX(HPI_OBJ_WATCHDOG, 2)
-#define HPI_WATCHDOG_PING HPI_MAKE_INDEX(HPI_OBJ_WATCHDOG, 3)
-/* CLOCK */
-#define HPI_CLOCK_OPEN HPI_MAKE_INDEX(HPI_OBJ_CLOCK, 1)
-#define HPI_CLOCK_SET_TIME HPI_MAKE_INDEX(HPI_OBJ_CLOCK, 2)
-#define HPI_CLOCK_GET_TIME HPI_MAKE_INDEX(HPI_OBJ_CLOCK, 3)
-/* PROFILE */
-#define HPI_PROFILE_OPEN_ALL HPI_MAKE_INDEX(HPI_OBJ_PROFILE, 1)
-#define HPI_PROFILE_START_ALL HPI_MAKE_INDEX(HPI_OBJ_PROFILE, 2)
-#define HPI_PROFILE_STOP_ALL HPI_MAKE_INDEX(HPI_OBJ_PROFILE, 3)
-#define HPI_PROFILE_GET HPI_MAKE_INDEX(HPI_OBJ_PROFILE, 4)
-#define HPI_PROFILE_GET_IDLECOUNT HPI_MAKE_INDEX(HPI_OBJ_PROFILE, 5)
-#define HPI_PROFILE_GET_NAME HPI_MAKE_INDEX(HPI_OBJ_PROFILE, 6)
-#define HPI_PROFILE_GET_UTILIZATION HPI_MAKE_INDEX(HPI_OBJ_PROFILE, 7)
+
+ HPI_WATCHDOG_OPEN = HPI_FUNC_ID(WATCHDOG, 1),
+ HPI_WATCHDOG_SET_TIME = HPI_FUNC_ID(WATCHDOG, 2),
+ HPI_WATCHDOG_PING = HPI_FUNC_ID(WATCHDOG, 3),
+
+ HPI_CLOCK_OPEN = HPI_FUNC_ID(CLOCK, 1),
+ HPI_CLOCK_SET_TIME = HPI_FUNC_ID(CLOCK, 2),
+ HPI_CLOCK_GET_TIME = HPI_FUNC_ID(CLOCK, 3),
+
+ HPI_PROFILE_OPEN_ALL = HPI_FUNC_ID(PROFILE, 1),
+ HPI_PROFILE_START_ALL = HPI_FUNC_ID(PROFILE, 2),
+ HPI_PROFILE_STOP_ALL = HPI_FUNC_ID(PROFILE, 3),
+ HPI_PROFILE_GET = HPI_FUNC_ID(PROFILE, 4),
+ HPI_PROFILE_GET_IDLECOUNT = HPI_FUNC_ID(PROFILE, 5),
+ HPI_PROFILE_GET_NAME = HPI_FUNC_ID(PROFILE, 6),
+ HPI_PROFILE_GET_UTILIZATION = HPI_FUNC_ID(PROFILE, 7)
#define HPI_PROFILE_FUNCTION_COUNT 7
-/* ////////////////////////////////////////////////////////////////////// */
-/* PRIVATE ATTRIBUTES */
+};
/* ////////////////////////////////////////////////////////////////////// */
/* STRUCTURES */
@@ -672,18 +552,7 @@ Threshold is a -ve number in units of dB/100,
/** PCI bus resource */
struct hpi_pci {
u32 __iomem *ap_mem_base[HPI_MAX_ADAPTER_MEM_SPACES];
- struct pci_dev *p_os_data;
-
-#ifndef HPI64BIT /* keep structure size constant */
- u32 padding[HPI_MAX_ADAPTER_MEM_SPACES + 1];
-#endif
- u16 vendor_id;
- u16 device_id;
- u16 subsys_vendor_id;
- u16 subsys_device_id;
- u16 bus_number;
- u16 device_number;
- u32 interrupt;
+ struct pci_dev *pci_dev;
};
struct hpi_resource {
@@ -702,12 +571,10 @@ struct hpi_resource {
/** Format info used inside struct hpi_message
Not the same as public API struct hpi_format */
struct hpi_msg_format {
- u32 sample_rate;
- /**< 11025, 32000, 44100 ... */
- u32 bit_rate; /**< for MPEG */
- u32 attributes;
- /**< Stereo/JointStereo/Mono */
- u16 channels; /**< 1,2..., (or ancillary mode or idle bit */
+ u32 sample_rate; /**< 11025, 32000, 44100 etc. */
+ u32 bit_rate; /**< for MPEG */
+ u32 attributes; /**< stereo/joint_stereo/mono */
+ u16 channels; /**< 1,2..., (or ancillary mode or idle bit */
u16 format; /**< HPI_FORMAT_PCM16, _MPEG etc. see \ref HPI_FORMATS. */
};
@@ -742,7 +609,7 @@ struct hpi_data_compat32 {
struct hpi_buffer {
/** placehoder for backward compatability (see dwBufferSize) */
struct hpi_msg_format reserved;
- u32 command; /**< HPI_BUFFER_CMD_xxx*/
+ u32 command; /**< HPI_BUFFER_CMD_xxx*/
u32 pci_address; /**< PCI physical address of buffer for DSP DMA */
u32 buffer_size; /**< must line up with data_size of HPI_DATA*/
};
@@ -777,30 +644,25 @@ struct hpi_subsys_msg {
struct hpi_subsys_res {
u32 version;
- u32 data; /* used to return extended version */
- u16 num_adapters; /* number of adapters */
+ u32 data; /* extended version */
+ u16 num_adapters;
u16 adapter_index;
- u16 aw_adapter_list[HPI_MAX_ADAPTERS];
-};
-
-struct hpi_adapter_msg {
- u32 adapter_mode; /* adapter mode */
- u16 assert_id; /* assert number for "test assert" call
- object_index for find object call
- query_or_set for hpi_adapter_set_mode_ex() */
- u16 object_type; /* for adapter find object call */
+ u16 adapter_type;
+ u16 pad16;
};
union hpi_adapterx_msg {
- struct hpi_adapter_msg adapter;
struct {
- u32 offset;
- } query_flash;
+ u32 dsp_address;
+ u32 count_bytes;
+ } debug_read;
struct {
- u32 offset;
- u32 length;
- u32 key;
- } start_flash;
+ u32 adapter_mode;
+ u16 query_or_set;
+ } mode;
+ struct {
+ u16 index;
+ } module_info;
struct {
u32 checksum;
u16 sequence;
@@ -809,28 +671,41 @@ union hpi_adapterx_msg {
u16 unused;
} program_flash;
struct {
+ u16 index;
+ u16 what;
+ u16 property_index;
+ } property_enum;
+ struct {
u16 property;
u16 parameter1;
u16 parameter2;
} property_set;
struct {
- u16 index;
- u16 what;
- u16 property_index;
- } property_enum;
+ u32 offset;
+ } query_flash;
struct {
- u16 index;
- } module_info;
+ u32 pad32;
+ u16 key1;
+ u16 key2;
+ } restart;
struct {
- u32 dsp_address;
- u32 count_bytes;
- } debug_read;
+ u32 offset;
+ u32 length;
+ u32 key;
+ } start_flash;
+ struct {
+ u32 pad32;
+ u16 value;
+ } test_assert;
+ struct {
+ u32 yes;
+ } irq_query;
};
struct hpi_adapter_res {
u32 serial_number;
u16 adapter_type;
- u16 adapter_index; /* is this needed? also used for dsp_index */
+ u16 adapter_index;
u16 num_instreams;
u16 num_outstreams;
u16 num_mixers;
@@ -839,12 +714,18 @@ struct hpi_adapter_res {
};
union hpi_adapterx_res {
- struct hpi_adapter_res adapter;
+ struct hpi_adapter_res info;
struct {
- u32 checksum;
- u32 length;
- u32 version;
- } query_flash;
+ u32 p1;
+ u16 count;
+ u16 dsp_index;
+ u32 p2;
+ u32 dsp_msg_addr;
+ char sz_message[HPI_STRING_LEN];
+ } assert;
+ struct {
+ u32 adapter_mode;
+ } mode;
struct {
u16 sequence;
} program_flash;
@@ -852,6 +733,14 @@ union hpi_adapterx_res {
u16 parameter1;
u16 parameter2;
} property_get;
+ struct {
+ u32 checksum;
+ u32 length;
+ u32 version;
+ } query_flash;
+ struct {
+ u32 yes;
+ } irq_query;
};
struct hpi_stream_msg {
@@ -863,6 +752,7 @@ struct hpi_stream_msg {
u32 time_scale;
struct hpi_buffer buffer;
struct hpi_streamid stream;
+ u32 threshold_bytes;
} u;
};
@@ -911,7 +801,7 @@ struct hpi_stream_res {
struct hpi_mixer_msg {
u16 control_index;
u16 control_type; /* = HPI_CONTROL_METER _VOLUME etc */
- u16 padding1; /* maintain alignment of subsequent fields */
+ u16 padding1; /* Maintain alignment of subsequent fields */
u16 node_type1; /* = HPI_SOURCENODE_LINEIN etc */
u16 node_index1; /* = 0..N */
u16 node_type2;
@@ -949,6 +839,11 @@ union hpi_mixerx_res {
u32 p_data; /* pointer to data array */
u16 more_to_do; /* indicates if there is more to do */
} gcabi;
+ struct {
+ u32 total_controls; /* count of controls in the mixer */
+ u32 cache_controls; /* count of controls in the cac */
+ u32 cache_bytes; /* size of cache */
+ } cache_info;
};
struct hpi_control_msg {
@@ -1000,12 +895,16 @@ union hpi_control_union_res {
u32 band;
u32 frequency;
u32 gain;
- u32 level;
u32 deemphasis;
struct {
u32 data[2];
u32 bLER;
} rds;
+ short s_level;
+ struct {
+ u16 value;
+ u16 mask;
+ } status;
} tuner;
struct {
char sz_data[8];
@@ -1178,11 +1077,11 @@ struct hpi_profile_res_open {
};
struct hpi_profile_res_time {
- u32 micro_seconds;
+ u32 total_tick_count;
u32 call_count;
- u32 max_micro_seconds;
- u32 min_micro_seconds;
- u16 seconds;
+ u32 max_tick_count;
+ u32 ticks_per_millisecond;
+ u16 profile_interval;
};
struct hpi_profile_res_name {
@@ -1218,7 +1117,6 @@ struct hpi_message {
u16 obj_index; /* */
union {
struct hpi_subsys_msg s;
- struct hpi_adapter_msg a;
union hpi_adapterx_msg ax;
struct hpi_stream_msg d;
struct hpi_mixer_msg m;
@@ -1239,7 +1137,7 @@ struct hpi_message {
};
#define HPI_MESSAGE_SIZE_BY_OBJECT { \
- sizeof(struct hpi_message_header) , /* default, no object type 0 */ \
+ sizeof(struct hpi_message_header) , /* Default, no object type 0 */ \
sizeof(struct hpi_message_header) + sizeof(struct hpi_subsys_msg),\
sizeof(struct hpi_message_header) + sizeof(union hpi_adapterx_msg),\
sizeof(struct hpi_message_header) + sizeof(struct hpi_stream_msg),\
@@ -1256,6 +1154,11 @@ struct hpi_message {
sizeof(struct hpi_message_header) + sizeof(struct hpi_async_msg) \
}
+/*
+Note that the wSpecificError error field should be inspected and potentially
+reported whenever HPI_ERROR_DSP_COMMUNICATION or HPI_ERROR_DSP_BOOTLOAD is
+returned in wError.
+*/
struct hpi_response_header {
u16 size;
u8 type; /* HPI_TYPE_RESPONSE */
@@ -1277,7 +1180,6 @@ struct hpi_response {
u16 specific_error; /* adapter specific error */
union {
struct hpi_subsys_res s;
- struct hpi_adapter_res a;
union hpi_adapterx_res ax;
struct hpi_stream_res d;
struct hpi_mixer_res m;
@@ -1297,7 +1199,7 @@ struct hpi_response {
};
#define HPI_RESPONSE_SIZE_BY_OBJECT { \
- sizeof(struct hpi_response_header) ,/* default, no object type 0 */ \
+ sizeof(struct hpi_response_header) ,/* Default, no object type 0 */ \
sizeof(struct hpi_response_header) + sizeof(struct hpi_subsys_res),\
sizeof(struct hpi_response_header) + sizeof(union hpi_adapterx_res),\
sizeof(struct hpi_response_header) + sizeof(struct hpi_stream_res),\
@@ -1314,7 +1216,7 @@ struct hpi_response {
sizeof(struct hpi_response_header) + sizeof(struct hpi_async_res) \
}
-/*********************** version 1 message/response *****************************/
+/*********************** version 1 message/response **************************/
#define HPINET_ETHERNET_DATA_SIZE (1500)
#define HPINET_IP_HDR_SIZE (20)
#define HPINET_IP_DATA_SIZE (HPINET_ETHERNET_DATA_SIZE - HPINET_IP_HDR_SIZE)
@@ -1394,6 +1296,17 @@ struct hpi_res_adapter_program_flash {
sizeof(struct hpi_response_header) - sizeof(u16)];
};
+struct hpi_msg_adapter_debug_read {
+ struct hpi_message_header h;
+ u32 dsp_address;
+ u32 count_bytes;
+};
+
+struct hpi_res_adapter_debug_read {
+ struct hpi_response_header h;
+ u8 bytes[256];
+};
+
#if 1
#define hpi_message_header_v1 hpi_message_header
#define hpi_response_header_v1 hpi_response_header
@@ -1414,23 +1327,10 @@ struct hpi_response_header_v1 {
};
#endif
-/* STRV HPI Packet */
-struct hpi_msg_strv {
- struct hpi_message_header h;
- struct hpi_entity strv;
-};
-
-struct hpi_res_strv {
- struct hpi_response_header h;
- struct hpi_entity strv;
-};
-#define MIN_STRV_PACKET_SIZE sizeof(struct hpi_res_strv)
-
struct hpi_msg_payload_v0 {
struct hpi_message_header h;
union {
struct hpi_subsys_msg s;
- struct hpi_adapter_msg a;
union hpi_adapterx_msg ax;
struct hpi_stream_msg d;
struct hpi_mixer_msg m;
@@ -1451,7 +1351,6 @@ struct hpi_res_payload_v0 {
struct hpi_response_header h;
union {
struct hpi_subsys_res s;
- struct hpi_adapter_res a;
union hpi_adapterx_res ax;
struct hpi_stream_res d;
struct hpi_mixer_res m;
@@ -1471,13 +1370,13 @@ struct hpi_res_payload_v0 {
union hpi_message_buffer_v1 {
struct hpi_message m0; /* version 0 */
struct hpi_message_header_v1 h;
- unsigned char buf[HPI_MAX_PAYLOAD_SIZE];
+ u8 buf[HPI_MAX_PAYLOAD_SIZE];
};
union hpi_response_buffer_v1 {
struct hpi_response r0; /* version 0 */
struct hpi_response_header_v1 h;
- unsigned char buf[HPI_MAX_PAYLOAD_SIZE];
+ u8 buf[HPI_MAX_PAYLOAD_SIZE];
};
compile_time_assert((sizeof(union hpi_message_buffer_v1) <=
@@ -1499,6 +1398,11 @@ struct hpi_control_defn {
/*////////////////////////////////////////////////////////////////////////// */
/* declarations for control caching (internal to HPI<->DSP interaction) */
+/** indicates a cached u16 value is invalid. */
+#define HPI_CACHE_INVALID_UINT16 0xFFFF
+/** indicates a cached short value is invalid. */
+#define HPI_CACHE_INVALID_SHORT -32768
+
/** A compact representation of (part of) a controls state.
Used for efficient transfer of the control state
between DSP and host or across a network
@@ -1512,58 +1416,104 @@ struct hpi_control_cache_info {
u16 control_index;
};
-struct hpi_control_cache_single {
+struct hpi_control_cache_vol {
+ struct hpi_control_cache_info i;
+ short an_log[2];
+ unsigned short flags;
+ char padding[2];
+};
+
+struct hpi_control_cache_meter {
+ struct hpi_control_cache_info i;
+ short an_log_peak[2];
+ short an_logRMS[2];
+};
+
+struct hpi_control_cache_channelmode {
+ struct hpi_control_cache_info i;
+ u16 mode;
+ char temp_padding[6];
+};
+
+struct hpi_control_cache_mux {
+ struct hpi_control_cache_info i;
+ u16 source_node_type;
+ u16 source_node_index;
+ char temp_padding[4];
+};
+
+struct hpi_control_cache_level {
struct hpi_control_cache_info i;
+ short an_log[2];
+ char temp_padding[4];
+};
+
+struct hpi_control_cache_tuner {
+ struct hpi_control_cache_info i;
+ u32 freq_ink_hz;
+ u16 band;
+ short s_level_avg;
+};
+
+struct hpi_control_cache_aes3rx {
+ struct hpi_control_cache_info i;
+ u32 error_status;
+ u32 format;
+};
+
+struct hpi_control_cache_aes3tx {
+ struct hpi_control_cache_info i;
+ u32 format;
+ char temp_padding[4];
+};
+
+struct hpi_control_cache_tonedetector {
+ struct hpi_control_cache_info i;
+ u16 state;
+ char temp_padding[6];
+};
+
+struct hpi_control_cache_silencedetector {
+ struct hpi_control_cache_info i;
+ u32 state;
+ char temp_padding[4];
+};
+
+struct hpi_control_cache_sampleclock {
+ struct hpi_control_cache_info i;
+ u16 source;
+ u16 source_index;
+ u32 sample_rate;
+};
+
+struct hpi_control_cache_microphone {
+ struct hpi_control_cache_info i;
+ u16 phantom_state;
+ char temp_padding[6];
+};
+
+struct hpi_control_cache_generic {
+ struct hpi_control_cache_info i;
+ u32 dw1;
+ u32 dw2;
+};
+
+struct hpi_control_cache_single {
union {
- struct { /* volume */
- short an_log[2];
- } v;
- struct { /* peak meter */
- short an_log_peak[2];
- short an_logRMS[2];
- } p;
- struct { /* channel mode */
- u16 mode;
- } m;
- struct { /* multiplexer */
- u16 source_node_type;
- u16 source_node_index;
- } x;
- struct { /* level/trim */
- short an_log[2];
- } l;
- struct { /* tuner - partial caching.
- some attributes go to the DSP. */
- u32 freq_ink_hz;
- u16 band;
- u16 level;
- } t;
- struct { /* AESEBU rx status */
- u32 error_status;
- u32 source;
- } aes3rx;
- struct { /* AESEBU tx */
- u32 format;
- } aes3tx;
- struct { /* tone detector */
- u16 state;
- } tone;
- struct { /* silence detector */
- u32 state;
- u32 count;
- } silence;
- struct { /* sample clock */
- u16 source;
- u16 source_index;
- u32 sample_rate;
- } clk;
- struct { /* microphone control */
- u16 state;
- } phantom_power;
- struct { /* generic control */
- u32 dw1;
- u32 dw2;
- } g;
+ struct hpi_control_cache_info i;
+ struct hpi_control_cache_vol vol;
+ struct hpi_control_cache_meter meter;
+ struct hpi_control_cache_channelmode mode;
+ struct hpi_control_cache_mux mux;
+ struct hpi_control_cache_level level;
+ struct hpi_control_cache_tuner tuner;
+ struct hpi_control_cache_aes3rx aes3rx;
+ struct hpi_control_cache_aes3tx aes3tx;
+ struct hpi_control_cache_tonedetector tone;
+ struct hpi_control_cache_silencedetector silence;
+ struct hpi_control_cache_sampleclock clk;
+ struct hpi_control_cache_microphone microphone;
+ struct hpi_control_cache_generic generic;
} u;
};
@@ -1580,8 +1530,7 @@ struct hpi_control_cache_pad {
u32 traffic_anouncement;
};
-/*/////////////////////////////////////////////////////////////////////////// */
-/* declarations for 2^N sized FIFO buffer (internal to HPI<->DSP interaction) */
+/* 2^N sized FIFO buffer (internal to HPI<->DSP interaction) */
struct hpi_fifo_buffer {
u32 size;
u32 dSP_index;
@@ -1606,25 +1555,18 @@ u32 hpi_indexes_to_handle(const char c_object, const u16 adapter_index,
/*////////////////////////////////////////////////////////////////////////// */
/* main HPI entry point */
-hpi_handler_func hpi_send_recv;
-
-/* UDP message */
-void hpi_send_recvUDP(struct hpi_message *phm, struct hpi_response *phr,
- const unsigned int timeout);
+void hpi_send_recv(struct hpi_message *phm, struct hpi_response *phr);
/* used in PnP OS/driver */
-u16 hpi_subsys_create_adapter(const struct hpi_hsubsys *ph_subsys,
- const struct hpi_resource *p_resource, u16 *pw_adapter_index);
+u16 hpi_subsys_create_adapter(const struct hpi_resource *p_resource,
+ u16 *pw_adapter_index);
-u16 hpi_subsys_delete_adapter(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index);
+u16 hpi_subsys_delete_adapter(u16 adapter_index);
-u16 hpi_outstream_host_buffer_get_info(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream, u8 **pp_buffer,
+u16 hpi_outstream_host_buffer_get_info(u32 h_outstream, u8 **pp_buffer,
struct hpi_hostbuffer_status **pp_status);
-u16 hpi_instream_host_buffer_get_info(const struct hpi_hsubsys *ph_subsys,
- u32 h_instream, u8 **pp_buffer,
+u16 hpi_instream_host_buffer_get_info(u32 h_instream, u8 **pp_buffer,
struct hpi_hostbuffer_status **pp_status);
u16 hpi_adapter_restart(u16 adapter_index);
diff --git a/sound/pci/asihpi/hpicmn.c b/sound/pci/asihpi/hpicmn.c
index d67f4d3db911..3e9c5c289764 100644
--- a/sound/pci/asihpi/hpicmn.c
+++ b/sound/pci/asihpi/hpicmn.c
@@ -26,6 +26,8 @@
#include "hpi_internal.h"
#include "hpidebug.h"
+#include "hpimsginit.h"
+
#include "hpicmn.h"
struct hpi_adapters_list {
@@ -43,14 +45,24 @@ static struct hpi_adapters_list adapters;
**/
u16 hpi_validate_response(struct hpi_message *phm, struct hpi_response *phr)
{
- u16 error = 0;
+ if (phr->type != HPI_TYPE_RESPONSE) {
+ HPI_DEBUG_LOG(ERROR, "header type %d invalid\n", phr->type);
+ return HPI_ERROR_INVALID_RESPONSE;
+ }
- if ((phr->type != HPI_TYPE_RESPONSE)
- || (phr->object != phm->object)
- || (phr->function != phm->function))
- error = HPI_ERROR_INVALID_RESPONSE;
+ if (phr->object != phm->object) {
+ HPI_DEBUG_LOG(ERROR, "header object %d invalid\n",
+ phr->object);
+ return HPI_ERROR_INVALID_RESPONSE;
+ }
+
+ if (phr->function != phm->function) {
+ HPI_DEBUG_LOG(ERROR, "header type %d invalid\n",
+ phr->function);
+ return HPI_ERROR_INVALID_RESPONSE;
+ }
- return error;
+ return 0;
}
u16 hpi_add_adapter(struct hpi_adapter_obj *pao)
@@ -66,8 +78,18 @@ u16 hpi_add_adapter(struct hpi_adapter_obj *pao)
}
if (adapters.adapter[pao->index].adapter_type) {
- {
- retval = HPI_DUPLICATE_ADAPTER_NUMBER;
+ int a;
+ for (a = HPI_MAX_ADAPTERS - 1; a >= 0; a--) {
+ if (!adapters.adapter[a].adapter_type) {
+ HPI_DEBUG_LOG(WARNING,
+ "ASI%X duplicate index %d moved to %d\n",
+ pao->adapter_type, pao->index, a);
+ pao->index = a;
+ break;
+ }
+ }
+ if (a < 0) {
+ retval = HPI_ERROR_DUPLICATE_ADAPTER_NUMBER;
goto unlock;
}
}
@@ -76,17 +98,22 @@ u16 hpi_add_adapter(struct hpi_adapter_obj *pao)
adapters.gw_num_adapters++;
unlock:
- hpios_alistlock_un_lock(&adapters);
+ hpios_alistlock_unlock(&adapters);
return retval;
}
void hpi_delete_adapter(struct hpi_adapter_obj *pao)
{
- memset(pao, 0, sizeof(struct hpi_adapter_obj));
+ if (!pao->adapter_type) {
+ HPI_DEBUG_LOG(ERROR, "removing null adapter?\n");
+ return;
+ }
hpios_alistlock_lock(&adapters);
- adapters.gw_num_adapters--; /* dec the number of adapters */
- hpios_alistlock_un_lock(&adapters);
+ if (adapters.adapter[pao->index].adapter_type)
+ adapters.gw_num_adapters--;
+ memset(&adapters.adapter[pao->index], 0, sizeof(adapters.adapter[0]));
+ hpios_alistlock_unlock(&adapters);
}
/**
@@ -99,7 +126,7 @@ struct hpi_adapter_obj *hpi_find_adapter(u16 adapter_index)
struct hpi_adapter_obj *pao = NULL;
if (adapter_index >= HPI_MAX_ADAPTERS) {
- HPI_DEBUG_LOG(VERBOSE, "find_adapter invalid index %d ",
+ HPI_DEBUG_LOG(VERBOSE, "find_adapter invalid index %d\n",
adapter_index);
return NULL;
}
@@ -125,51 +152,34 @@ struct hpi_adapter_obj *hpi_find_adapter(u16 adapter_index)
* wipe an HPI_ADAPTERS_LIST structure.
*
**/
-static void wipe_adapter_list(void
- )
+static void wipe_adapter_list(void)
{
memset(&adapters, 0, sizeof(adapters));
}
-/**
-* SubSysGetAdapters fills awAdapterList in an struct hpi_response structure
-* with all adapters in the given HPI_ADAPTERS_LIST.
-*
-*/
-static void subsys_get_adapters(struct hpi_response *phr)
+static void subsys_get_adapter(struct hpi_message *phm,
+ struct hpi_response *phr)
{
- /* fill in the response adapter array with the position */
- /* identified by the adapter number/index of the adapters in */
- /* this HPI */
- /* i.e. if we have an A120 with it's jumper set to */
- /* Adapter Number 2 then put an Adapter type A120 in the */
- /* array in position 1 */
- /* NOTE: AdapterNumber is 1..N, Index is 0..N-1 */
-
- /* input: NONE */
- /* output: wNumAdapters */
- /* awAdapter[] */
- /* */
-
- short i;
- struct hpi_adapter_obj *pao = NULL;
+ int count = phm->obj_index;
+ u16 index = 0;
- HPI_DEBUG_LOG(VERBOSE, "subsys_get_adapters\n");
-
- /* for each adapter, place it's type in the position of the array */
- /* corresponding to it's adapter number */
- for (i = 0; i < adapters.gw_num_adapters; i++) {
- pao = &adapters.adapter[i];
- if (phr->u.s.aw_adapter_list[pao->index] != 0) {
- phr->error = HPI_DUPLICATE_ADAPTER_NUMBER;
- phr->specific_error = pao->index;
- return;
+ /* find the nCount'th nonzero adapter in array */
+ for (index = 0; index < HPI_MAX_ADAPTERS; index++) {
+ if (adapters.adapter[index].adapter_type) {
+ if (!count)
+ break;
+ count--;
}
- phr->u.s.aw_adapter_list[pao->index] = pao->adapter_type;
}
- phr->u.s.num_adapters = adapters.gw_num_adapters;
- phr->error = 0; /* the function completed OK; */
+ if (index < HPI_MAX_ADAPTERS) {
+ phr->u.s.adapter_index = adapters.adapter[index].index;
+ phr->u.s.adapter_type = adapters.adapter[index].adapter_type;
+ } else {
+ phr->u.s.adapter_index = 0;
+ phr->u.s.adapter_type = 0;
+ phr->error = HPI_ERROR_BAD_ADAPTER_NUMBER;
+ }
}
static unsigned int control_cache_alloc_check(struct hpi_control_cache *pC)
@@ -178,67 +188,98 @@ static unsigned int control_cache_alloc_check(struct hpi_control_cache *pC)
int cached = 0;
if (!pC)
return 0;
- if ((!pC->init) && (pC->p_cache != NULL) && (pC->control_count)
- && (pC->cache_size_in_bytes)
- ) {
- u32 *p_master_cache;
- pC->init = 1;
-
- p_master_cache = (u32 *)pC->p_cache;
- HPI_DEBUG_LOG(VERBOSE, "check %d controls\n",
+
+ if (pC->init)
+ return pC->init;
+
+ if (!pC->p_cache)
+ return 0;
+
+ if (pC->control_count && pC->cache_size_in_bytes) {
+ char *p_master_cache;
+ unsigned int byte_count = 0;
+
+ p_master_cache = (char *)pC->p_cache;
+ HPI_DEBUG_LOG(DEBUG, "check %d controls\n",
pC->control_count);
for (i = 0; i < pC->control_count; i++) {
struct hpi_control_cache_info *info =
(struct hpi_control_cache_info *)
- p_master_cache;
+ &p_master_cache[byte_count];
+
+ if (!info->size_in32bit_words) {
+ if (!i) {
+ HPI_DEBUG_LOG(INFO,
+ "adap %d cache not ready?\n",
+ pC->adap_idx);
+ return 0;
+ }
+ /* The cache is invalid.
+ * Minimum valid entry size is
+ * sizeof(struct hpi_control_cache_info)
+ */
+ HPI_DEBUG_LOG(ERROR,
+ "adap %d zero size cache entry %d\n",
+ pC->adap_idx, i);
+ break;
+ }
if (info->control_type) {
- pC->p_info[i] = info;
+ pC->p_info[info->control_index] = info;
cached++;
- } else
- pC->p_info[i] = NULL;
+ } else /* dummy cache entry */
+ pC->p_info[info->control_index] = NULL;
- if (info->size_in32bit_words)
- p_master_cache += info->size_in32bit_words;
- else
- p_master_cache +=
- sizeof(struct
- hpi_control_cache_single) /
- sizeof(u32);
+ byte_count += info->size_in32bit_words * 4;
HPI_DEBUG_LOG(VERBOSE,
- "cached %d, pinfo %p index %d type %d\n",
- cached, pC->p_info[i], info->control_index,
- info->control_type);
+ "cached %d, pinfo %p index %d type %d size %d\n",
+ cached, pC->p_info[info->control_index],
+ info->control_index, info->control_type,
+ info->size_in32bit_words);
+
+ /* quit loop early if whole cache has been scanned.
+ * dwControlCount is the maximum possible entries
+ * but some may be absent from the cache
+ */
+ if (byte_count >= pC->cache_size_in_bytes)
+ break;
+ /* have seen last control index */
+ if (info->control_index == pC->control_count - 1)
+ break;
}
- /*
- We didn't find anything to cache, so try again later !
- */
- if (!cached)
- pC->init = 0;
+
+ if (byte_count != pC->cache_size_in_bytes)
+ HPI_DEBUG_LOG(WARNING,
+ "adap %d bytecount %d != cache size %d\n",
+ pC->adap_idx, byte_count,
+ pC->cache_size_in_bytes);
+ else
+ HPI_DEBUG_LOG(DEBUG,
+ "adap %d cache good, bytecount == cache size = %d\n",
+ pC->adap_idx, byte_count);
+
+ pC->init = (u16)cached;
}
return pC->init;
}
/** Find a control.
*/
-static short find_control(struct hpi_message *phm,
- struct hpi_control_cache *p_cache, struct hpi_control_cache_info **pI,
- u16 *pw_control_index)
+static short find_control(u16 control_index,
+ struct hpi_control_cache *p_cache, struct hpi_control_cache_info **pI)
{
- *pw_control_index = phm->obj_index;
-
if (!control_cache_alloc_check(p_cache)) {
HPI_DEBUG_LOG(VERBOSE,
- "control_cache_alloc_check() failed. adap%d ci%d\n",
- phm->adapter_index, *pw_control_index);
+ "control_cache_alloc_check() failed %d\n",
+ control_index);
return 0;
}
- *pI = p_cache->p_info[*pw_control_index];
+ *pI = p_cache->p_info[control_index];
if (!*pI) {
- HPI_DEBUG_LOG(VERBOSE, "uncached adap %d, control %d\n",
- phm->adapter_index, *pw_control_index);
+ HPI_DEBUG_LOG(VERBOSE, "Uncached Control %d\n",
+ control_index);
return 0;
} else {
HPI_DEBUG_LOG(VERBOSE, "find_control() type %d\n",
@@ -247,25 +288,6 @@ static short find_control(struct hpi_message *phm,
return 1;
}
-/** Used by the kernel driver to figure out if a buffer needs mapping.
- */
-short hpi_check_buffer_mapping(struct hpi_control_cache *p_cache,
- struct hpi_message *phm, void **p, unsigned int *pN)
-{
- *pN = 0;
- *p = NULL;
- if ((phm->function == HPI_CONTROL_GET_STATE)
- && (phm->object == HPI_OBJ_CONTROLEX)
- ) {
- u16 control_index;
- struct hpi_control_cache_info *pI;
-
- if (!find_control(phm, p_cache, &pI, &control_index))
- return 0;
- }
- return 0;
-}
-
/* allow unified treatment of several string fields within struct */
#define HPICMN_PAD_OFS_AND_SIZE(m) {\
offsetof(struct hpi_control_cache_pad, m), \
@@ -290,13 +312,16 @@ short hpi_check_control_cache(struct hpi_control_cache *p_cache,
struct hpi_message *phm, struct hpi_response *phr)
{
short found = 1;
- u16 control_index;
struct hpi_control_cache_info *pI;
struct hpi_control_cache_single *pC;
struct hpi_control_cache_pad *p_pad;
- if (!find_control(phm, p_cache, &pI, &control_index))
+ if (!find_control(phm->obj_index, p_cache, &pI)) {
+ HPI_DEBUG_LOG(VERBOSE,
+ "HPICMN find_control() failed for adap %d\n",
+ phm->adapter_index);
return 0;
+ }
phr->error = 0;
@@ -310,55 +335,79 @@ short hpi_check_control_cache(struct hpi_control_cache *p_cache,
case HPI_CONTROL_METER:
if (phm->u.c.attribute == HPI_METER_PEAK) {
- phr->u.c.an_log_value[0] = pC->u.p.an_log_peak[0];
- phr->u.c.an_log_value[1] = pC->u.p.an_log_peak[1];
+ phr->u.c.an_log_value[0] = pC->u.meter.an_log_peak[0];
+ phr->u.c.an_log_value[1] = pC->u.meter.an_log_peak[1];
} else if (phm->u.c.attribute == HPI_METER_RMS) {
- phr->u.c.an_log_value[0] = pC->u.p.an_logRMS[0];
- phr->u.c.an_log_value[1] = pC->u.p.an_logRMS[1];
+ if (pC->u.meter.an_logRMS[0] ==
+ HPI_CACHE_INVALID_SHORT) {
+ phr->error =
+ HPI_ERROR_INVALID_CONTROL_ATTRIBUTE;
+ phr->u.c.an_log_value[0] = HPI_METER_MINIMUM;
+ phr->u.c.an_log_value[1] = HPI_METER_MINIMUM;
+ } else {
+ phr->u.c.an_log_value[0] =
+ pC->u.meter.an_logRMS[0];
+ phr->u.c.an_log_value[1] =
+ pC->u.meter.an_logRMS[1];
+ }
} else
found = 0;
break;
case HPI_CONTROL_VOLUME:
if (phm->u.c.attribute == HPI_VOLUME_GAIN) {
- phr->u.c.an_log_value[0] = pC->u.v.an_log[0];
- phr->u.c.an_log_value[1] = pC->u.v.an_log[1];
- } else
+ phr->u.c.an_log_value[0] = pC->u.vol.an_log[0];
+ phr->u.c.an_log_value[1] = pC->u.vol.an_log[1];
+ } else if (phm->u.c.attribute == HPI_VOLUME_MUTE) {
+ if (pC->u.vol.flags & HPI_VOLUME_FLAG_HAS_MUTE) {
+ if (pC->u.vol.flags & HPI_VOLUME_FLAG_MUTED)
+ phr->u.c.param1 =
+ HPI_BITMASK_ALL_CHANNELS;
+ else
+ phr->u.c.param1 = 0;
+ } else {
+ phr->error =
+ HPI_ERROR_INVALID_CONTROL_ATTRIBUTE;
+ phr->u.c.param1 = 0;
+ }
+ } else {
found = 0;
+ }
break;
case HPI_CONTROL_MULTIPLEXER:
if (phm->u.c.attribute == HPI_MULTIPLEXER_SOURCE) {
- phr->u.c.param1 = pC->u.x.source_node_type;
- phr->u.c.param2 = pC->u.x.source_node_index;
+ phr->u.c.param1 = pC->u.mux.source_node_type;
+ phr->u.c.param2 = pC->u.mux.source_node_index;
} else {
found = 0;
}
break;
case HPI_CONTROL_CHANNEL_MODE:
if (phm->u.c.attribute == HPI_CHANNEL_MODE_MODE)
- phr->u.c.param1 = pC->u.m.mode;
+ phr->u.c.param1 = pC->u.mode.mode;
else
found = 0;
break;
case HPI_CONTROL_LEVEL:
if (phm->u.c.attribute == HPI_LEVEL_GAIN) {
- phr->u.c.an_log_value[0] = pC->u.l.an_log[0];
- phr->u.c.an_log_value[1] = pC->u.l.an_log[1];
+ phr->u.c.an_log_value[0] = pC->u.level.an_log[0];
+ phr->u.c.an_log_value[1] = pC->u.level.an_log[1];
} else
found = 0;
break;
case HPI_CONTROL_TUNER:
if (phm->u.c.attribute == HPI_TUNER_FREQ)
- phr->u.c.param1 = pC->u.t.freq_ink_hz;
+ phr->u.c.param1 = pC->u.tuner.freq_ink_hz;
else if (phm->u.c.attribute == HPI_TUNER_BAND)
- phr->u.c.param1 = pC->u.t.band;
- else if ((phm->u.c.attribute == HPI_TUNER_LEVEL)
- && (phm->u.c.param1 == HPI_TUNER_LEVEL_AVERAGE))
- if (pC->u.t.level == HPI_ERROR_ILLEGAL_CACHE_VALUE) {
- phr->u.c.param1 = 0;
+ phr->u.c.param1 = pC->u.tuner.band;
+ else if (phm->u.c.attribute == HPI_TUNER_LEVEL_AVG)
+ if (pC->u.tuner.s_level_avg ==
+ HPI_CACHE_INVALID_SHORT) {
+ phr->u.cu.tuner.s_level = 0;
phr->error =
HPI_ERROR_INVALID_CONTROL_ATTRIBUTE;
} else
- phr->u.c.param1 = pC->u.t.level;
+ phr->u.cu.tuner.s_level =
+ pC->u.tuner.s_level_avg;
else
found = 0;
break;
@@ -366,7 +415,7 @@ short hpi_check_control_cache(struct hpi_control_cache *p_cache,
if (phm->u.c.attribute == HPI_AESEBURX_ERRORSTATUS)
phr->u.c.param1 = pC->u.aes3rx.error_status;
else if (phm->u.c.attribute == HPI_AESEBURX_FORMAT)
- phr->u.c.param1 = pC->u.aes3rx.source;
+ phr->u.c.param1 = pC->u.aes3rx.format;
else
found = 0;
break;
@@ -385,13 +434,12 @@ short hpi_check_control_cache(struct hpi_control_cache *p_cache,
case HPI_CONTROL_SILENCEDETECTOR:
if (phm->u.c.attribute == HPI_SILENCEDETECTOR_STATE) {
phr->u.c.param1 = pC->u.silence.state;
- phr->u.c.param2 = pC->u.silence.count;
} else
found = 0;
break;
case HPI_CONTROL_MICROPHONE:
if (phm->u.c.attribute == HPI_MICROPHONE_PHANTOM_POWER)
- phr->u.c.param1 = pC->u.phantom_power.state;
+ phr->u.c.param1 = pC->u.microphone.phantom_state;
else
found = 0;
break;
@@ -400,7 +448,7 @@ short hpi_check_control_cache(struct hpi_control_cache *p_cache,
phr->u.c.param1 = pC->u.clk.source;
else if (phm->u.c.attribute == HPI_SAMPLECLOCK_SOURCE_INDEX) {
if (pC->u.clk.source_index ==
- HPI_ERROR_ILLEGAL_CACHE_VALUE) {
+ HPI_CACHE_INVALID_UINT16) {
phr->u.c.param1 = 0;
phr->error =
HPI_ERROR_INVALID_CONTROL_ATTRIBUTE;
@@ -411,60 +459,63 @@ short hpi_check_control_cache(struct hpi_control_cache *p_cache,
else
found = 0;
break;
- case HPI_CONTROL_PAD:
+ case HPI_CONTROL_PAD:{
+ struct hpi_control_cache_pad *p_pad;
+ p_pad = (struct hpi_control_cache_pad *)pI;
- if (!(p_pad->field_valid_flags & (1 <<
- HPI_CTL_ATTR_INDEX(phm->u.c.
- attribute)))) {
- phr->error = HPI_ERROR_INVALID_CONTROL_ATTRIBUTE;
- break;
- }
-
- if (phm->u.c.attribute == HPI_PAD_PROGRAM_ID)
- phr->u.c.param1 = p_pad->pI;
- else if (phm->u.c.attribute == HPI_PAD_PROGRAM_TYPE)
- phr->u.c.param1 = p_pad->pTY;
- else {
- unsigned int index =
- HPI_CTL_ATTR_INDEX(phm->u.c.attribute) - 1;
- unsigned int offset = phm->u.c.param1;
- unsigned int pad_string_len, field_size;
- char *pad_string;
- unsigned int tocopy;
-
- HPI_DEBUG_LOG(VERBOSE, "PADS HPI_PADS_ %d\n",
- phm->u.c.attribute);
-
- if (index > ARRAY_SIZE(pad_desc) - 1) {
+ if (!(p_pad->field_valid_flags & (1 <<
+ HPI_CTL_ATTR_INDEX(phm->u.c.
+ attribute)))) {
phr->error =
HPI_ERROR_INVALID_CONTROL_ATTRIBUTE;
break;
}
- pad_string = ((char *)p_pad) + pad_desc[index].offset;
- field_size = pad_desc[index].field_size;
- /* Ensure null terminator */
- pad_string[field_size - 1] = 0;
-
- pad_string_len = strlen(pad_string) + 1;
-
- if (offset > pad_string_len) {
- phr->error = HPI_ERROR_INVALID_CONTROL_VALUE;
- break;
+ if (phm->u.c.attribute == HPI_PAD_PROGRAM_ID)
+ phr->u.c.param1 = p_pad->pI;
+ else if (phm->u.c.attribute == HPI_PAD_PROGRAM_TYPE)
+ phr->u.c.param1 = p_pad->pTY;
+ else {
+ unsigned int index =
+ HPI_CTL_ATTR_INDEX(phm->u.c.
+ attribute) - 1;
+ unsigned int offset = phm->u.c.param1;
+ unsigned int pad_string_len, field_size;
+ char *pad_string;
+ unsigned int tocopy;
+
+ if (index > ARRAY_SIZE(pad_desc) - 1) {
+ phr->error =
+ HPI_ERROR_INVALID_CONTROL_ATTRIBUTE;
+ break;
+ }
+
+ pad_string =
+ ((char *)p_pad) +
+ pad_desc[index].offset;
+ field_size = pad_desc[index].field_size;
+ /* Ensure null terminator */
+ pad_string[field_size - 1] = 0;
+
+ pad_string_len = strlen(pad_string) + 1;
+
+ if (offset > pad_string_len) {
+ phr->error =
+ HPI_ERROR_INVALID_CONTROL_VALUE;
+ break;
+ }
+
+ tocopy = pad_string_len - offset;
+ if (tocopy > sizeof(phr->u.cu.chars8.sz_data))
+ tocopy = sizeof(phr->u.cu.chars8.
+ sz_data);
+
+ memcpy(phr->u.cu.chars8.sz_data,
+ &pad_string[offset], tocopy);
+
+ phr->u.cu.chars8.remaining_chars =
+ pad_string_len - offset - tocopy;
}
-
- tocopy = pad_string_len - offset;
- if (tocopy > sizeof(phr->u.cu.chars8.sz_data))
- tocopy = sizeof(phr->u.cu.chars8.sz_data);
-
- HPI_DEBUG_LOG(VERBOSE,
- "PADS memcpy(%d), offset %d \n", tocopy,
- offset);
- memcpy(phr->u.cu.chars8.sz_data, &pad_string[offset],
- tocopy);
-
- phr->u.cu.chars8.remaining_chars =
- pad_string_len - offset - tocopy;
}
break;
default:
@@ -472,16 +523,9 @@ short hpi_check_control_cache(struct hpi_control_cache *p_cache,
break;
}
- if (found)
- HPI_DEBUG_LOG(VERBOSE,
- "cached adap %d, ctl %d, type %d, attr %d\n",
- phm->adapter_index, pI->control_index,
- pI->control_type, phm->u.c.attribute);
- else
- HPI_DEBUG_LOG(VERBOSE,
- "uncached adap %d, ctl %d, ctl type %d\n",
- phm->adapter_index, pI->control_index,
- pI->control_type);
+ HPI_DEBUG_LOG(VERBOSE, "%s Adap %d, Ctl %d, Type %d, Attr %d\n",
+ found ? "Cached" : "Uncached", phm->adapter_index,
+ pI->control_index, pI->control_type, phm->u.c.attribute);
if (found)
phr->size =
@@ -497,18 +541,21 @@ Only update if no error.
Volume and Level return the limited values in the response, so use these
Multiplexer does so use sent values
*/
-void hpi_sync_control_cache(struct hpi_control_cache *p_cache,
+void hpi_cmn_control_cache_sync_to_msg(struct hpi_control_cache *p_cache,
struct hpi_message *phm, struct hpi_response *phr)
{
- u16 control_index;
struct hpi_control_cache_single *pC;
struct hpi_control_cache_info *pI;
if (phr->error)
return;
- if (!find_control(phm, p_cache, &pI, &control_index))
+ if (!find_control(phm->obj_index, p_cache, &pI)) {
+ HPI_DEBUG_LOG(VERBOSE,
+ "HPICMN find_control() failed for adap %d\n",
+ phm->adapter_index);
return;
+ }
/* pC is the default cached control strucure.
May be cast to something else in the following switch statement.
@@ -518,31 +565,36 @@ void hpi_sync_control_cache(struct hpi_control_cache *p_cache,
switch (pI->control_type) {
case HPI_CONTROL_VOLUME:
if (phm->u.c.attribute == HPI_VOLUME_GAIN) {
- pC->u.v.an_log[0] = phr->u.c.an_log_value[0];
- pC->u.v.an_log[1] = phr->u.c.an_log_value[1];
+ pC->u.vol.an_log[0] = phr->u.c.an_log_value[0];
+ pC->u.vol.an_log[1] = phr->u.c.an_log_value[1];
+ } else if (phm->u.c.attribute == HPI_VOLUME_MUTE) {
+ if (phm->u.c.param1)
+ pC->u.vol.flags |= HPI_VOLUME_FLAG_MUTED;
+ else
+ pC->u.vol.flags &= ~HPI_VOLUME_FLAG_MUTED;
}
break;
case HPI_CONTROL_MULTIPLEXER:
/* mux does not return its setting on Set command. */
if (phm->u.c.attribute == HPI_MULTIPLEXER_SOURCE) {
- pC->u.x.source_node_type = (u16)phm->u.c.param1;
- pC->u.x.source_node_index = (u16)phm->u.c.param2;
+ pC->u.mux.source_node_type = (u16)phm->u.c.param1;
+ pC->u.mux.source_node_index = (u16)phm->u.c.param2;
}
break;
case HPI_CONTROL_CHANNEL_MODE:
/* mode does not return its setting on Set command. */
if (phm->u.c.attribute == HPI_CHANNEL_MODE_MODE)
- pC->u.m.mode = (u16)phm->u.c.param1;
+ pC->u.mode.mode = (u16)phm->u.c.param1;
break;
case HPI_CONTROL_LEVEL:
if (phm->u.c.attribute == HPI_LEVEL_GAIN) {
- pC->u.v.an_log[0] = phr->u.c.an_log_value[0];
- pC->u.v.an_log[1] = phr->u.c.an_log_value[1];
+ pC->u.vol.an_log[0] = phr->u.c.an_log_value[0];
+ pC->u.vol.an_log[1] = phr->u.c.an_log_value[1];
}
break;
case HPI_CONTROL_MICROPHONE:
if (phm->u.c.attribute == HPI_MICROPHONE_PHANTOM_POWER)
- pC->u.phantom_power.state = (u16)phm->u.c.param1;
+ pC->u.microphone.phantom_state = (u16)phm->u.c.param1;
break;
case HPI_CONTROL_AESEBU_TRANSMITTER:
if (phm->u.c.attribute == HPI_AESEBUTX_FORMAT)
@@ -550,7 +602,7 @@ void hpi_sync_control_cache(struct hpi_control_cache *p_cache,
break;
case HPI_CONTROL_AESEBU_RECEIVER:
if (phm->u.c.attribute == HPI_AESEBURX_FORMAT)
- pC->u.aes3rx.source = phm->u.c.param1;
+ pC->u.aes3rx.format = phm->u.c.param1;
break;
case HPI_CONTROL_SAMPLECLOCK:
if (phm->u.c.attribute == HPI_SAMPLECLOCK_SOURCE)
@@ -565,59 +617,57 @@ void hpi_sync_control_cache(struct hpi_control_cache *p_cache,
}
}
-struct hpi_control_cache *hpi_alloc_control_cache(const u32
- number_of_controls, const u32 size_in_bytes,
- struct hpi_control_cache_info *pDSP_control_buffer)
+struct hpi_control_cache *hpi_alloc_control_cache(const u32 control_count,
+ const u32 size_in_bytes, u8 *p_dsp_control_buffer)
{
struct hpi_control_cache *p_cache =
kmalloc(sizeof(*p_cache), GFP_KERNEL);
if (!p_cache)
return NULL;
+
p_cache->p_info =
- kmalloc(sizeof(*p_cache->p_info) * number_of_controls,
- GFP_KERNEL);
+ kmalloc(sizeof(*p_cache->p_info) * control_count, GFP_KERNEL);
if (!p_cache->p_info) {
kfree(p_cache);
return NULL;
}
+ memset(p_cache->p_info, 0, sizeof(*p_cache->p_info) * control_count);
p_cache->cache_size_in_bytes = size_in_bytes;
- p_cache->control_count = number_of_controls;
- p_cache->p_cache =
- (struct hpi_control_cache_single *)pDSP_control_buffer;
+ p_cache->control_count = control_count;
+ p_cache->p_cache = p_dsp_control_buffer;
p_cache->init = 0;
return p_cache;
}
void hpi_free_control_cache(struct hpi_control_cache *p_cache)
{
- if (p_cache->init) {
+ if (p_cache) {
kfree(p_cache->p_info);
- p_cache->p_info = NULL;
- p_cache->init = 0;
kfree(p_cache);
}
}
static void subsys_message(struct hpi_message *phm, struct hpi_response *phr)
{
+ hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, phm->function, 0);
switch (phm->function) {
case HPI_SUBSYS_OPEN:
case HPI_SUBSYS_CLOSE:
case HPI_SUBSYS_DRIVER_UNLOAD:
- phr->error = 0;
break;
case HPI_SUBSYS_DRIVER_LOAD:
wipe_adapter_list();
hpios_alistlock_init(&adapters);
- phr->error = 0;
break;
- case HPI_SUBSYS_GET_INFO:
- subsys_get_adapters(phr);
+ case HPI_SUBSYS_GET_ADAPTER:
+ subsys_get_adapter(phm, phr);
+ break;
+ case HPI_SUBSYS_GET_NUM_ADAPTERS:
+ phr->u.s.num_adapters = adapters.gw_num_adapters;
break;
case HPI_SUBSYS_CREATE_ADAPTER:
case HPI_SUBSYS_DELETE_ADAPTER:
- phr->error = 0;
break;
default:
phr->error = HPI_ERROR_INVALID_FUNC;
diff --git a/sound/pci/asihpi/hpicmn.h b/sound/pci/asihpi/hpicmn.h
index 6229022f56cb..590f0b69e655 100644
--- a/sound/pci/asihpi/hpicmn.h
+++ b/sound/pci/asihpi/hpicmn.h
@@ -33,18 +33,19 @@ struct hpi_adapter_obj {
};
struct hpi_control_cache {
- u32 init; /**< indicates whether the
- structures are initialized */
+ /** indicates whether the structures are initialized */
+ u16 init;
+ u16 adap_idx;
u32 control_count;
u32 cache_size_in_bytes;
- struct hpi_control_cache_info
- **p_info; /**< pointer to allocated memory of
- lookup pointers. */
- struct hpi_control_cache_single
- *p_cache; /**< pointer to DSP's control cache. */
+ /** pointer to allocated memory of lookup pointers. */
+ struct hpi_control_cache_info **p_info;
+ /** pointer to DSP's control cache. */
+ u8 *p_cache;
};
struct hpi_adapter_obj *hpi_find_adapter(u16 adapter_index);
+
u16 hpi_add_adapter(struct hpi_adapter_obj *pao);
void hpi_delete_adapter(struct hpi_adapter_obj *pao);
@@ -52,13 +53,10 @@ void hpi_delete_adapter(struct hpi_adapter_obj *pao);
short hpi_check_control_cache(struct hpi_control_cache *pC,
struct hpi_message *phm, struct hpi_response *phr);
struct hpi_control_cache *hpi_alloc_control_cache(const u32
- number_of_controls, const u32 size_in_bytes,
- struct hpi_control_cache_info
- *pDSP_control_buffer);
+ number_of_controls, const u32 size_in_bytes, u8 *pDSP_control_buffer);
void hpi_free_control_cache(struct hpi_control_cache *p_cache);
-void hpi_sync_control_cache(struct hpi_control_cache *pC,
+void hpi_cmn_control_cache_sync_to_msg(struct hpi_control_cache *pC,
struct hpi_message *phm, struct hpi_response *phr);
+
u16 hpi_validate_response(struct hpi_message *phm, struct hpi_response *phr);
-short hpi_check_buffer_mapping(struct hpi_control_cache *p_cache,
- struct hpi_message *phm, void **p, unsigned int *pN);
diff --git a/sound/pci/asihpi/hpidebug.c b/sound/pci/asihpi/hpidebug.c
index 949836ec913a..b52baf62791e 100644
--- a/sound/pci/asihpi/hpidebug.c
+++ b/sound/pci/asihpi/hpidebug.c
@@ -45,161 +45,14 @@ int hpi_debug_level_get(void)
return hpi_debug_level;
}
-#ifdef HPIOS_DEBUG_PRINT
-/* implies OS has no printf-like function */
-#include <stdarg.h>
-
-void hpi_debug_printf(char *fmt, ...)
-{
- va_list arglist;
- char buffer[128];
-
- va_start(arglist, fmt);
-
- if (buffer[0])
- HPIOS_DEBUG_PRINT(buffer);
- va_end(arglist);
-}
-#endif
-
-struct treenode {
- void *array;
- unsigned int num_elements;
-};
-
-#define make_treenode_from_array(nodename, array) \
-static void *tmp_strarray_##nodename[] = array; \
-static struct treenode nodename = { \
- &tmp_strarray_##nodename, \
- ARRAY_SIZE(tmp_strarray_##nodename) \
-};
-
-#define get_treenode_elem(node_ptr, idx, type) \
- (&(*((type *)(node_ptr)->array)[idx]))
-
-make_treenode_from_array(hpi_control_type_strings, HPI_CONTROL_TYPE_STRINGS)
-
- make_treenode_from_array(hpi_subsys_strings, HPI_SUBSYS_STRINGS)
- make_treenode_from_array(hpi_adapter_strings, HPI_ADAPTER_STRINGS)
- make_treenode_from_array(hpi_istream_strings, HPI_ISTREAM_STRINGS)
- make_treenode_from_array(hpi_ostream_strings, HPI_OSTREAM_STRINGS)
- make_treenode_from_array(hpi_mixer_strings, HPI_MIXER_STRINGS)
- make_treenode_from_array(hpi_node_strings,
- {
- "NODE is invalid object"})
-
- make_treenode_from_array(hpi_control_strings, HPI_CONTROL_STRINGS)
- make_treenode_from_array(hpi_nvmemory_strings, HPI_OBJ_STRINGS)
- make_treenode_from_array(hpi_digitalio_strings, HPI_DIGITALIO_STRINGS)
- make_treenode_from_array(hpi_watchdog_strings, HPI_WATCHDOG_STRINGS)
- make_treenode_from_array(hpi_clock_strings, HPI_CLOCK_STRINGS)
- make_treenode_from_array(hpi_profile_strings, HPI_PROFILE_STRINGS)
- make_treenode_from_array(hpi_asyncevent_strings, HPI_ASYNCEVENT_STRINGS)
-#define HPI_FUNCTION_STRINGS \
-{ \
- &hpi_subsys_strings,\
- &hpi_adapter_strings,\
- &hpi_ostream_strings,\
- &hpi_istream_strings,\
- &hpi_mixer_strings,\
- &hpi_node_strings,\
- &hpi_control_strings,\
- &hpi_nvmemory_strings,\
- &hpi_digitalio_strings,\
- &hpi_watchdog_strings,\
- &hpi_clock_strings,\
- &hpi_profile_strings,\
- &hpi_control_strings, \
- &hpi_asyncevent_strings \
-}
- make_treenode_from_array(hpi_function_strings, HPI_FUNCTION_STRINGS)
-
- compile_time_assert(HPI_OBJ_MAXINDEX == 14, obj_list_doesnt_match);
-
-static char *hpi_function_string(unsigned int function)
-{
- unsigned int object;
- struct treenode *tmp;
-
- object = function / HPI_OBJ_FUNCTION_SPACING;
- function = function - object * HPI_OBJ_FUNCTION_SPACING;
-
- if (object == 0 || object == HPI_OBJ_NODE
- || object > hpi_function_strings.num_elements)
- return "invalid object";
-
- tmp = get_treenode_elem(&hpi_function_strings, object - 1,
- struct treenode *);
-
- if (function == 0 || function > tmp->num_elements)
- return "invalid function";
-
- return get_treenode_elem(tmp, function - 1, char *);
-}
-
void hpi_debug_message(struct hpi_message *phm, char *sz_fileline)
{
if (phm) {
- if ((phm->object <= HPI_OBJ_MAXINDEX) && phm->object) {
- u16 index = 0;
- u16 attrib = 0;
- int is_control = 0;
-
- index = phm->obj_index;
- switch (phm->object) {
- case HPI_OBJ_ADAPTER:
- case HPI_OBJ_PROFILE:
- break;
- case HPI_OBJ_MIXER:
- if (phm->function ==
- HPI_MIXER_GET_CONTROL_BY_INDEX)
- index = phm->u.m.control_index;
- break;
- case HPI_OBJ_OSTREAM:
- case HPI_OBJ_ISTREAM:
- break;
-
- case HPI_OBJ_CONTROLEX:
- case HPI_OBJ_CONTROL:
- if (phm->version == 1)
- attrib = HPI_CTL_ATTR(UNIVERSAL, 1);
- else
- attrib = phm->u.c.attribute;
- is_control = 1;
- break;
- default:
- break;
- }
-
- if (is_control && (attrib & 0xFF00)) {
- int control_type = (attrib & 0xFF00) >> 8;
- int attr_index = HPI_CTL_ATTR_INDEX(attrib);
- /* note the KERN facility level
- is in szFileline already */
- printk("%s adapter %d %s "
- "ctrl_index x%04x %s %d\n",
- sz_fileline, phm->adapter_index,
- hpi_function_string(phm->function),
- index,
- get_treenode_elem
- (&hpi_control_type_strings,
- control_type, char *),
- attr_index);
-
- } else
- printk("%s adapter %d %s "
- "idx x%04x attr x%04x \n",
- sz_fileline, phm->adapter_index,
- hpi_function_string(phm->function),
- index, attrib);
- } else {
- printk("adap=%d, invalid obj=%d, func=0x%x\n",
- phm->adapter_index, phm->object,
- phm->function);
- }
- } else
- printk(KERN_ERR
- "NULL message pointer to hpi_debug_message!\n");
+ printk(KERN_DEBUG "HPI_MSG%d,%d,%d,%d,%d\n", phm->version,
+ phm->adapter_index, phm->obj_index, phm->function,
+ phm->u.c.attribute);
+ }
+
}
void hpi_debug_data(u16 *pdata, u32 len)
diff --git a/sound/pci/asihpi/hpidebug.h b/sound/pci/asihpi/hpidebug.h
index a2f0952a99f0..940f54c3c538 100644
--- a/sound/pci/asihpi/hpidebug.h
+++ b/sound/pci/asihpi/hpidebug.h
@@ -37,7 +37,7 @@ enum { HPI_DEBUG_LEVEL_ERROR = 0, /* always log errors */
#define HPI_DEBUG_LEVEL_DEFAULT HPI_DEBUG_LEVEL_NOTICE
/* an OS can define an extra flag string that is appended to
- the start of each message, eg see hpios_linux.h */
+ the start of each message, eg see linux kernel hpios.h */
#ifdef SOURCEFILE_NAME
#define FILE_LINE SOURCEFILE_NAME ":" __stringify(__LINE__) " "
@@ -45,18 +45,11 @@ enum { HPI_DEBUG_LEVEL_ERROR = 0, /* always log errors */
#define FILE_LINE __FILE__ ":" __stringify(__LINE__) " "
#endif
-#if defined(HPI_DEBUG) && defined(_WINDOWS)
-#define HPI_DEBUGBREAK() debug_break()
-#else
-#define HPI_DEBUGBREAK()
-#endif
-
#define HPI_DEBUG_ASSERT(expression) \
do { \
- if (!(expression)) {\
- printk(KERN_ERR FILE_LINE\
- "ASSERT " __stringify(expression));\
- HPI_DEBUGBREAK();\
+ if (!(expression)) { \
+ printk(KERN_ERR FILE_LINE \
+ "ASSERT " __stringify(expression)); \
} \
} while (0)
@@ -78,28 +71,27 @@ void hpi_debug_message(struct hpi_message *phm, char *sz_fileline);
void hpi_debug_data(u16 *pdata, u32 len);
-#define HPI_DEBUG_DATA(pdata, len) \
- do { \
+#define HPI_DEBUG_DATA(pdata, len) \
+ do { \
if (hpi_debug_level >= HPI_DEBUG_LEVEL_VERBOSE) \
hpi_debug_data(pdata, len); \
} while (0)
-#define HPI_DEBUG_MESSAGE(level, phm) \
- do { \
- if (hpi_debug_level >= HPI_DEBUG_LEVEL_##level) { \
- hpi_debug_message(phm,HPI_DEBUG_FLAG_##level \
- FILE_LINE __stringify(level));\
- } \
+#define HPI_DEBUG_MESSAGE(level, phm) \
+ do { \
+ if (hpi_debug_level >= HPI_DEBUG_LEVEL_##level) { \
+ hpi_debug_message(phm, HPI_DEBUG_FLAG_##level \
+ FILE_LINE __stringify(level)); \
+ } \
} while (0)
-#define HPI_DEBUG_RESPONSE(phr) \
- do { \
- if ((hpi_debug_level >= HPI_DEBUG_LEVEL_DEBUG) && (phr->error))\
- HPI_DEBUG_LOG(ERROR, \
- "HPI response - error# %d\n", \
- phr->error); \
- else if (hpi_debug_level >= HPI_DEBUG_LEVEL_VERBOSE) \
- HPI_DEBUG_LOG(VERBOSE, "HPI response OK\n");\
+#define HPI_DEBUG_RESPONSE(phr) \
+ do { \
+ if (((hpi_debug_level >= HPI_DEBUG_LEVEL_DEBUG) && \
+ (phr->error)) ||\
+ (hpi_debug_level >= HPI_DEBUG_LEVEL_VERBOSE)) \
+ printk(KERN_DEBUG "HPI_RES%d,%d,%d\n", \
+ phr->version, phr->error, phr->specific_error); \
} while (0)
#ifndef compile_time_assert
@@ -107,279 +99,4 @@ void hpi_debug_data(u16 *pdata, u32 len);
typedef char msg[(cond) ? 1 : -1]
#endif
- /* check that size is exactly some number */
-#define function_count_check(sym, size) \
- compile_time_assert((sym##_FUNCTION_COUNT) == (size),\
- strings_match_defs_##sym)
-
-/* These strings should be generated using a macro which defines
- the corresponding symbol values. */
-#define HPI_OBJ_STRINGS \
-{ \
- "HPI_OBJ_SUBSYSTEM", \
- "HPI_OBJ_ADAPTER", \
- "HPI_OBJ_OSTREAM", \
- "HPI_OBJ_ISTREAM", \
- "HPI_OBJ_MIXER", \
- "HPI_OBJ_NODE", \
- "HPI_OBJ_CONTROL", \
- "HPI_OBJ_NVMEMORY", \
- "HPI_OBJ_DIGITALIO", \
- "HPI_OBJ_WATCHDOG", \
- "HPI_OBJ_CLOCK", \
- "HPI_OBJ_PROFILE", \
- "HPI_OBJ_CONTROLEX" \
-}
-
-#define HPI_SUBSYS_STRINGS \
-{ \
- "HPI_SUBSYS_OPEN", \
- "HPI_SUBSYS_GET_VERSION", \
- "HPI_SUBSYS_GET_INFO", \
- "HPI_SUBSYS_FIND_ADAPTERS", \
- "HPI_SUBSYS_CREATE_ADAPTER",\
- "HPI_SUBSYS_CLOSE", \
- "HPI_SUBSYS_DELETE_ADAPTER", \
- "HPI_SUBSYS_DRIVER_LOAD", \
- "HPI_SUBSYS_DRIVER_UNLOAD", \
- "HPI_SUBSYS_READ_PORT_8", \
- "HPI_SUBSYS_WRITE_PORT_8", \
- "HPI_SUBSYS_GET_NUM_ADAPTERS",\
- "HPI_SUBSYS_GET_ADAPTER", \
- "HPI_SUBSYS_SET_NETWORK_INTERFACE"\
-}
-function_count_check(HPI_SUBSYS, 14);
-
-#define HPI_ADAPTER_STRINGS \
-{ \
- "HPI_ADAPTER_OPEN", \
- "HPI_ADAPTER_CLOSE", \
- "HPI_ADAPTER_GET_INFO", \
- "HPI_ADAPTER_GET_ASSERT", \
- "HPI_ADAPTER_TEST_ASSERT", \
- "HPI_ADAPTER_SET_MODE", \
- "HPI_ADAPTER_GET_MODE", \
- "HPI_ADAPTER_ENABLE_CAPABILITY",\
- "HPI_ADAPTER_SELFTEST", \
- "HPI_ADAPTER_FIND_OBJECT", \
- "HPI_ADAPTER_QUERY_FLASH", \
- "HPI_ADAPTER_START_FLASH", \
- "HPI_ADAPTER_PROGRAM_FLASH", \
- "HPI_ADAPTER_SET_PROPERTY", \
- "HPI_ADAPTER_GET_PROPERTY", \
- "HPI_ADAPTER_ENUM_PROPERTY", \
- "HPI_ADAPTER_MODULE_INFO", \
- "HPI_ADAPTER_DEBUG_READ" \
-}
-
-function_count_check(HPI_ADAPTER, 18);
-
-#define HPI_OSTREAM_STRINGS \
-{ \
- "HPI_OSTREAM_OPEN", \
- "HPI_OSTREAM_CLOSE", \
- "HPI_OSTREAM_WRITE", \
- "HPI_OSTREAM_START", \
- "HPI_OSTREAM_STOP", \
- "HPI_OSTREAM_RESET", \
- "HPI_OSTREAM_GET_INFO", \
- "HPI_OSTREAM_QUERY_FORMAT", \
- "HPI_OSTREAM_DATA", \
- "HPI_OSTREAM_SET_VELOCITY", \
- "HPI_OSTREAM_SET_PUNCHINOUT", \
- "HPI_OSTREAM_SINEGEN", \
- "HPI_OSTREAM_ANC_RESET", \
- "HPI_OSTREAM_ANC_GET_INFO", \
- "HPI_OSTREAM_ANC_READ", \
- "HPI_OSTREAM_SET_TIMESCALE",\
- "HPI_OSTREAM_SET_FORMAT", \
- "HPI_OSTREAM_HOSTBUFFER_ALLOC", \
- "HPI_OSTREAM_HOSTBUFFER_FREE", \
- "HPI_OSTREAM_GROUP_ADD",\
- "HPI_OSTREAM_GROUP_GETMAP", \
- "HPI_OSTREAM_GROUP_RESET", \
- "HPI_OSTREAM_HOSTBUFFER_GET_INFO", \
- "HPI_OSTREAM_WAIT_START", \
-}
-function_count_check(HPI_OSTREAM, 24);
-
-#define HPI_ISTREAM_STRINGS \
-{ \
- "HPI_ISTREAM_OPEN", \
- "HPI_ISTREAM_CLOSE", \
- "HPI_ISTREAM_SET_FORMAT", \
- "HPI_ISTREAM_READ", \
- "HPI_ISTREAM_START", \
- "HPI_ISTREAM_STOP", \
- "HPI_ISTREAM_RESET", \
- "HPI_ISTREAM_GET_INFO", \
- "HPI_ISTREAM_QUERY_FORMAT", \
- "HPI_ISTREAM_ANC_RESET", \
- "HPI_ISTREAM_ANC_GET_INFO", \
- "HPI_ISTREAM_ANC_WRITE", \
- "HPI_ISTREAM_HOSTBUFFER_ALLOC",\
- "HPI_ISTREAM_HOSTBUFFER_FREE", \
- "HPI_ISTREAM_GROUP_ADD", \
- "HPI_ISTREAM_GROUP_GETMAP", \
- "HPI_ISTREAM_GROUP_RESET", \
- "HPI_ISTREAM_HOSTBUFFER_GET_INFO", \
- "HPI_ISTREAM_WAIT_START", \
-}
-function_count_check(HPI_ISTREAM, 19);
-
-#define HPI_MIXER_STRINGS \
-{ \
- "HPI_MIXER_OPEN", \
- "HPI_MIXER_CLOSE", \
- "HPI_MIXER_GET_INFO", \
- "HPI_MIXER_GET_NODE_INFO", \
- "HPI_MIXER_GET_CONTROL", \
- "HPI_MIXER_SET_CONNECTION", \
- "HPI_MIXER_GET_CONNECTIONS", \
- "HPI_MIXER_GET_CONTROL_BY_INDEX", \
- "HPI_MIXER_GET_CONTROL_ARRAY_BY_INDEX", \
- "HPI_MIXER_GET_CONTROL_MULTIPLE_VALUES", \
- "HPI_MIXER_STORE", \
-}
-function_count_check(HPI_MIXER, 11);
-
-#define HPI_CONTROL_STRINGS \
-{ \
- "HPI_CONTROL_GET_INFO", \
- "HPI_CONTROL_GET_STATE", \
- "HPI_CONTROL_SET_STATE" \
-}
-function_count_check(HPI_CONTROL, 3);
-
-#define HPI_NVMEMORY_STRINGS \
-{ \
- "HPI_NVMEMORY_OPEN", \
- "HPI_NVMEMORY_READ_BYTE", \
- "HPI_NVMEMORY_WRITE_BYTE" \
-}
-function_count_check(HPI_NVMEMORY, 3);
-
-#define HPI_DIGITALIO_STRINGS \
-{ \
- "HPI_GPIO_OPEN", \
- "HPI_GPIO_READ_BIT", \
- "HPI_GPIO_WRITE_BIT", \
- "HPI_GPIO_READ_ALL", \
- "HPI_GPIO_WRITE_STATUS"\
-}
-function_count_check(HPI_GPIO, 5);
-
-#define HPI_WATCHDOG_STRINGS \
-{ \
- "HPI_WATCHDOG_OPEN", \
- "HPI_WATCHDOG_SET_TIME", \
- "HPI_WATCHDOG_PING" \
-}
-
-#define HPI_CLOCK_STRINGS \
-{ \
- "HPI_CLOCK_OPEN", \
- "HPI_CLOCK_SET_TIME", \
- "HPI_CLOCK_GET_TIME" \
-}
-
-#define HPI_PROFILE_STRINGS \
-{ \
- "HPI_PROFILE_OPEN_ALL", \
- "HPI_PROFILE_START_ALL", \
- "HPI_PROFILE_STOP_ALL", \
- "HPI_PROFILE_GET", \
- "HPI_PROFILE_GET_IDLECOUNT", \
- "HPI_PROFILE_GET_NAME", \
- "HPI_PROFILE_GET_UTILIZATION" \
-}
-function_count_check(HPI_PROFILE, 7);
-
-#define HPI_ASYNCEVENT_STRINGS \
-{ \
- "HPI_ASYNCEVENT_OPEN",\
- "HPI_ASYNCEVENT_CLOSE ",\
- "HPI_ASYNCEVENT_WAIT",\
- "HPI_ASYNCEVENT_GETCOUNT",\
- "HPI_ASYNCEVENT_GET",\
- "HPI_ASYNCEVENT_SENDEVENTS"\
-}
-function_count_check(HPI_ASYNCEVENT, 6);
-
-#define HPI_CONTROL_TYPE_STRINGS \
-{ \
- "null control", \
- "HPI_CONTROL_CONNECTION", \
- "HPI_CONTROL_VOLUME", \
- "HPI_CONTROL_METER", \
- "HPI_CONTROL_MUTE", \
- "HPI_CONTROL_MULTIPLEXER", \
- "HPI_CONTROL_AESEBU_TRANSMITTER", \
- "HPI_CONTROL_AESEBU_RECEIVER", \
- "HPI_CONTROL_LEVEL", \
- "HPI_CONTROL_TUNER", \
- "HPI_CONTROL_ONOFFSWITCH", \
- "HPI_CONTROL_VOX", \
- "HPI_CONTROL_AES18_TRANSMITTER", \
- "HPI_CONTROL_AES18_RECEIVER", \
- "HPI_CONTROL_AES18_BLOCKGENERATOR", \
- "HPI_CONTROL_CHANNEL_MODE", \
- "HPI_CONTROL_BITSTREAM", \
- "HPI_CONTROL_SAMPLECLOCK", \
- "HPI_CONTROL_MICROPHONE", \
- "HPI_CONTROL_PARAMETRIC_EQ", \
- "HPI_CONTROL_COMPANDER", \
- "HPI_CONTROL_COBRANET", \
- "HPI_CONTROL_TONE_DETECT", \
- "HPI_CONTROL_SILENCE_DETECT", \
- "HPI_CONTROL_PAD", \
- "HPI_CONTROL_SRC" ,\
- "HPI_CONTROL_UNIVERSAL" \
-}
-
-compile_time_assert((HPI_CONTROL_LAST_INDEX + 1 == 27),
- controltype_strings_match_defs);
-
-#define HPI_SOURCENODE_STRINGS \
-{ \
- "no source", \
- "HPI_SOURCENODE_OSTREAM", \
- "HPI_SOURCENODE_LINEIN", \
- "HPI_SOURCENODE_AESEBU_IN", \
- "HPI_SOURCENODE_TUNER", \
- "HPI_SOURCENODE_RF", \
- "HPI_SOURCENODE_CLOCK_SOURCE", \
- "HPI_SOURCENODE_RAW_BITSTREAM", \
- "HPI_SOURCENODE_MICROPHONE", \
- "HPI_SOURCENODE_COBRANET", \
- "HPI_SOURCENODE_ANALOG", \
- "HPI_SOURCENODE_ADAPTER" \
-}
-
-compile_time_assert((HPI_SOURCENODE_LAST_INDEX - HPI_SOURCENODE_NONE + 1) ==
- (12), sourcenode_strings_match_defs);
-
-#define HPI_DESTNODE_STRINGS \
-{ \
- "no destination", \
- "HPI_DESTNODE_ISTREAM", \
- "HPI_DESTNODE_LINEOUT", \
- "HPI_DESTNODE_AESEBU_OUT", \
- "HPI_DESTNODE_RF", \
- "HPI_DESTNODE_SPEAKER", \
- "HPI_DESTNODE_COBRANET", \
- "HPI_DESTNODE_ANALOG" \
-}
-compile_time_assert((HPI_DESTNODE_LAST_INDEX - HPI_DESTNODE_NONE + 1) == (8),
- destnode_strings_match_defs);
-
-#define HPI_CONTROL_CHANNEL_MODE_STRINGS \
-{ \
- "XXX HPI_CHANNEL_MODE_ERROR XXX", \
- "HPI_CHANNEL_MODE_NORMAL", \
- "HPI_CHANNEL_MODE_SWAP", \
- "HPI_CHANNEL_MODE_LEFT_ONLY", \
- "HPI_CHANNEL_MODE_RIGHT_ONLY" \
-}
-
-#endif /* _HPIDEBUG_H */
+#endif /* _HPIDEBUG_H_ */
diff --git a/sound/pci/asihpi/hpidspcd.c b/sound/pci/asihpi/hpidspcd.c
index 9b10d9a5c255..fb311d8c05bf 100644
--- a/sound/pci/asihpi/hpidspcd.c
+++ b/sound/pci/asihpi/hpidspcd.c
@@ -71,47 +71,50 @@ short hpi_dsp_code_open(u32 adapter, struct dsp_code *ps_dsp_code,
int err;
sprintf(fw_name, "asihpi/dsp%04x.bin", adapter);
- HPI_DEBUG_LOG(INFO, "requesting firmware for %s\n", fw_name);
err = request_firmware(&ps_firmware, fw_name,
&ps_dsp_code->ps_dev->dev);
+
if (err != 0) {
- HPI_DEBUG_LOG(ERROR, "%d, request_firmware failed for %s\n",
- err, fw_name);
+ dev_printk(KERN_ERR, &ps_dsp_code->ps_dev->dev,
+ "%d, request_firmware failed for %s\n", err,
+ fw_name);
goto error1;
}
if (ps_firmware->size < sizeof(header)) {
- HPI_DEBUG_LOG(ERROR, "header size too small %s\n", fw_name);
+ dev_printk(KERN_ERR, &ps_dsp_code->ps_dev->dev,
+ "Header size too small %s\n", fw_name);
goto error2;
}
memcpy(&header, ps_firmware->data, sizeof(header));
if (header.adapter != adapter) {
- HPI_DEBUG_LOG(ERROR, "adapter type incorrect %4x != %4x\n",
- header.adapter, adapter);
+ dev_printk(KERN_ERR, &ps_dsp_code->ps_dev->dev,
+ "Adapter type incorrect %4x != %4x\n", header.adapter,
+ adapter);
goto error2;
}
if (header.size != ps_firmware->size) {
- HPI_DEBUG_LOG(ERROR, "code size wrong %d != %ld\n",
- header.size, (unsigned long)ps_firmware->size);
+ dev_printk(KERN_ERR, &ps_dsp_code->ps_dev->dev,
+ "Code size wrong %d != %ld\n", header.size,
+ (unsigned long)ps_firmware->size);
goto error2;
}
- if (header.version / 10000 != HPI_VER_DECIMAL / 10000) {
- HPI_DEBUG_LOG(ERROR,
- "firmware major version mismatch "
- "DSP image %d != driver %d\n", header.version,
+ if (header.version / 100 != HPI_VER_DECIMAL / 100) {
+ dev_printk(KERN_ERR, &ps_dsp_code->ps_dev->dev,
+ "Incompatible firmware version "
+ "DSP image %d != Driver %d\n", header.version,
HPI_VER_DECIMAL);
goto error2;
}
if (header.version != HPI_VER_DECIMAL) {
- HPI_DEBUG_LOG(WARNING,
- "version mismatch DSP image %d != driver %d\n",
+ dev_printk(KERN_WARNING, &ps_dsp_code->ps_dev->dev,
+ "Firmware: release version mismatch DSP image %d != Driver %d\n",
header.version, HPI_VER_DECIMAL);
- /* goto error2; still allow driver to load */
}
- HPI_DEBUG_LOG(INFO, "dsp code %s opened\n", fw_name);
+ HPI_DEBUG_LOG(DEBUG, "dsp code %s opened\n", fw_name);
ps_dsp_code->ps_firmware = ps_firmware;
ps_dsp_code->block_length = header.size / sizeof(u32);
ps_dsp_code->word_count = sizeof(header) / sizeof(u32);
@@ -148,7 +151,7 @@ void hpi_dsp_code_rewind(struct dsp_code *ps_dsp_code)
short hpi_dsp_code_read_word(struct dsp_code *ps_dsp_code, u32 *pword)
{
if (ps_dsp_code->word_count + 1 > ps_dsp_code->block_length)
- return (HPI_ERROR_DSP_FILE_FORMAT);
+ return HPI_ERROR_DSP_FILE_FORMAT;
*pword = ((u32 *)(ps_dsp_code->ps_firmware->data))[ps_dsp_code->
word_count];
diff --git a/sound/pci/asihpi/hpidspcd.h b/sound/pci/asihpi/hpidspcd.h
index d7c240398225..65f0ca732704 100644
--- a/sound/pci/asihpi/hpidspcd.h
+++ b/sound/pci/asihpi/hpidspcd.h
@@ -87,7 +87,7 @@ void hpi_dsp_code_rewind(struct dsp_code *ps_dsp_code);
*/
short hpi_dsp_code_read_word(struct dsp_code *ps_dsp_code,
/**< DSP code descriptor */
- u32 *pword /**< where to store the read word */
+ u32 *pword /**< Where to store the read word */
);
/** Get a block of dsp code into an internal buffer, and provide a pointer to
diff --git a/sound/pci/asihpi/hpifunc.c b/sound/pci/asihpi/hpifunc.c
index 1e92eb6dd509..c38fc9487560 100644
--- a/sound/pci/asihpi/hpifunc.c
+++ b/sound/pci/asihpi/hpifunc.c
@@ -30,16 +30,25 @@ u32 hpi_indexes_to_handle(const char c_object, const u16 adapter_index,
return handle.w;
}
-void hpi_handle_to_indexes(const u32 handle, u16 *pw_adapter_index,
- u16 *pw_object_index)
+static u16 hpi_handle_indexes(const u32 h, u16 *p1, u16 *p2)
{
union handle_word uhandle;
- uhandle.w = handle;
+ if (!h)
+ return HPI_ERROR_INVALID_HANDLE;
+
+ uhandle.w = h;
+
+ *p1 = (u16)uhandle.h.adapter_index;
+ if (p2)
+ *p2 = (u16)uhandle.h.obj_index;
- if (pw_adapter_index)
- *pw_adapter_index = (u16)uhandle.h.adapter_index;
- if (pw_object_index)
- *pw_object_index = (u16)uhandle.h.obj_index;
+ return 0;
+}
+
+void hpi_handle_to_indexes(const u32 handle, u16 *pw_adapter_index,
+ u16 *pw_object_index)
+{
+ hpi_handle_indexes(handle, pw_adapter_index, pw_object_index);
}
char hpi_handle_object(const u32 handle)
@@ -49,22 +58,6 @@ char hpi_handle_object(const u32 handle)
return (char)uhandle.h.obj_type;
}
-#define u32TOINDEX(h, i1) \
-do {\
- if (h == 0) \
- return HPI_ERROR_INVALID_OBJ; \
- else \
- hpi_handle_to_indexes(h, i1, NULL); \
-} while (0)
-
-#define u32TOINDEXES(h, i1, i2) \
-do {\
- if (h == 0) \
- return HPI_ERROR_INVALID_OBJ; \
- else \
- hpi_handle_to_indexes(h, i1, i2);\
-} while (0)
-
void hpi_format_to_msg(struct hpi_msg_format *pMF,
const struct hpi_format *pF)
{
@@ -94,52 +87,13 @@ void hpi_stream_response_to_legacy(struct hpi_stream_res *pSR)
pSR->u.legacy_stream_info.state = pSR->u.stream_info.state;
}
-static struct hpi_hsubsys gh_subsys;
-
-struct hpi_hsubsys *hpi_subsys_create(void)
+static inline void hpi_send_recvV1(struct hpi_message_header *m,
+ struct hpi_response_header *r)
{
- struct hpi_message hm;
- struct hpi_response hr;
-
- memset(&gh_subsys, 0, sizeof(struct hpi_hsubsys));
-
- {
- hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
- HPI_SUBSYS_OPEN);
- hpi_send_recv(&hm, &hr);
-
- if (hr.error == 0)
- return &gh_subsys;
-
- }
- return NULL;
-}
-
-void hpi_subsys_free(const struct hpi_hsubsys *ph_subsys)
-{
- struct hpi_message hm;
- struct hpi_response hr;
-
- hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
- HPI_SUBSYS_CLOSE);
- hpi_send_recv(&hm, &hr);
-
+ hpi_send_recv((struct hpi_message *)m, (struct hpi_response *)r);
}
-u16 hpi_subsys_get_version(const struct hpi_hsubsys *ph_subsys, u32 *pversion)
-{
- struct hpi_message hm;
- struct hpi_response hr;
-
- hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
- HPI_SUBSYS_GET_VERSION);
- hpi_send_recv(&hm, &hr);
- *pversion = hr.u.s.version;
- return hr.error;
-}
-
-u16 hpi_subsys_get_version_ex(const struct hpi_hsubsys *ph_subsys,
- u32 *pversion_ex)
+u16 hpi_subsys_get_version_ex(u32 *pversion_ex)
{
struct hpi_message hm;
struct hpi_response hr;
@@ -151,51 +105,8 @@ u16 hpi_subsys_get_version_ex(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_subsys_get_info(const struct hpi_hsubsys *ph_subsys, u32 *pversion,
- u16 *pw_num_adapters, u16 aw_adapter_list[], u16 list_length)
-{
- struct hpi_message hm;
- struct hpi_response hr;
- hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
- HPI_SUBSYS_GET_INFO);
-
- hpi_send_recv(&hm, &hr);
-
- *pversion = hr.u.s.version;
- if (list_length > HPI_MAX_ADAPTERS)
- memcpy(aw_adapter_list, &hr.u.s.aw_adapter_list,
- HPI_MAX_ADAPTERS);
- else
- memcpy(aw_adapter_list, &hr.u.s.aw_adapter_list, list_length);
- *pw_num_adapters = hr.u.s.num_adapters;
- return hr.error;
-}
-
-u16 hpi_subsys_find_adapters(const struct hpi_hsubsys *ph_subsys,
- u16 *pw_num_adapters, u16 aw_adapter_list[], u16 list_length)
-{
- struct hpi_message hm;
- struct hpi_response hr;
- hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
- HPI_SUBSYS_FIND_ADAPTERS);
-
- hpi_send_recv(&hm, &hr);
-
- if (list_length > HPI_MAX_ADAPTERS) {
- memcpy(aw_adapter_list, &hr.u.s.aw_adapter_list,
- HPI_MAX_ADAPTERS * sizeof(u16));
- memset(&aw_adapter_list[HPI_MAX_ADAPTERS], 0,
- (list_length - HPI_MAX_ADAPTERS) * sizeof(u16));
- } else
- memcpy(aw_adapter_list, &hr.u.s.aw_adapter_list,
- list_length * sizeof(u16));
- *pw_num_adapters = hr.u.s.num_adapters;
-
- return hr.error;
-}
-
-u16 hpi_subsys_create_adapter(const struct hpi_hsubsys *ph_subsys,
- const struct hpi_resource *p_resource, u16 *pw_adapter_index)
+u16 hpi_subsys_create_adapter(const struct hpi_resource *p_resource,
+ u16 *pw_adapter_index)
{
struct hpi_message hm;
struct hpi_response hr;
@@ -210,20 +121,18 @@ u16 hpi_subsys_create_adapter(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_subsys_delete_adapter(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index)
+u16 hpi_subsys_delete_adapter(u16 adapter_index)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
HPI_SUBSYS_DELETE_ADAPTER);
- hm.adapter_index = adapter_index;
+ hm.obj_index = adapter_index;
hpi_send_recv(&hm, &hr);
return hr.error;
}
-u16 hpi_subsys_get_num_adapters(const struct hpi_hsubsys *ph_subsys,
- int *pn_num_adapters)
+u16 hpi_subsys_get_num_adapters(int *pn_num_adapters)
{
struct hpi_message hm;
struct hpi_response hr;
@@ -234,35 +143,22 @@ u16 hpi_subsys_get_num_adapters(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_subsys_get_adapter(const struct hpi_hsubsys *ph_subsys, int iterator,
- u32 *padapter_index, u16 *pw_adapter_type)
+u16 hpi_subsys_get_adapter(int iterator, u32 *padapter_index,
+ u16 *pw_adapter_type)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
HPI_SUBSYS_GET_ADAPTER);
- hm.adapter_index = (u16)iterator;
+ hm.obj_index = (u16)iterator;
hpi_send_recv(&hm, &hr);
*padapter_index = (int)hr.u.s.adapter_index;
- *pw_adapter_type = hr.u.s.aw_adapter_list[0];
- return hr.error;
-}
+ *pw_adapter_type = hr.u.s.adapter_type;
-u16 hpi_subsys_set_host_network_interface(const struct hpi_hsubsys *ph_subsys,
- const char *sz_interface)
-{
- struct hpi_message hm;
- struct hpi_response hr;
- hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
- HPI_SUBSYS_SET_NETWORK_INTERFACE);
- if (sz_interface == NULL)
- return HPI_ERROR_INVALID_RESOURCE;
- hm.u.s.resource.r.net_if = sz_interface;
- hpi_send_recv(&hm, &hr);
return hr.error;
}
-u16 hpi_adapter_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index)
+u16 hpi_adapter_open(u16 adapter_index)
{
struct hpi_message hm;
struct hpi_response hr;
@@ -276,7 +172,7 @@ u16 hpi_adapter_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index)
}
-u16 hpi_adapter_close(const struct hpi_hsubsys *ph_subsys, u16 adapter_index)
+u16 hpi_adapter_close(u16 adapter_index)
{
struct hpi_message hm;
struct hpi_response hr;
@@ -289,15 +185,14 @@ u16 hpi_adapter_close(const struct hpi_hsubsys *ph_subsys, u16 adapter_index)
return hr.error;
}
-u16 hpi_adapter_set_mode(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index, u32 adapter_mode)
+u16 hpi_adapter_set_mode(u16 adapter_index, u32 adapter_mode)
{
- return hpi_adapter_set_mode_ex(ph_subsys, adapter_index, adapter_mode,
+ return hpi_adapter_set_mode_ex(adapter_index, adapter_mode,
HPI_ADAPTER_MODE_SET);
}
-u16 hpi_adapter_set_mode_ex(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index, u32 adapter_mode, u16 query_or_set)
+u16 hpi_adapter_set_mode_ex(u16 adapter_index, u32 adapter_mode,
+ u16 query_or_set)
{
struct hpi_message hm;
struct hpi_response hr;
@@ -305,14 +200,13 @@ u16 hpi_adapter_set_mode_ex(const struct hpi_hsubsys *ph_subsys,
hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
HPI_ADAPTER_SET_MODE);
hm.adapter_index = adapter_index;
- hm.u.a.adapter_mode = adapter_mode;
- hm.u.a.assert_id = query_or_set;
+ hm.u.ax.mode.adapter_mode = adapter_mode;
+ hm.u.ax.mode.query_or_set = query_or_set;
hpi_send_recv(&hm, &hr);
return hr.error;
}
-u16 hpi_adapter_get_mode(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index, u32 *padapter_mode)
+u16 hpi_adapter_get_mode(u16 adapter_index, u32 *padapter_mode)
{
struct hpi_message hm;
struct hpi_response hr;
@@ -321,13 +215,13 @@ u16 hpi_adapter_get_mode(const struct hpi_hsubsys *ph_subsys,
hm.adapter_index = adapter_index;
hpi_send_recv(&hm, &hr);
if (padapter_mode)
- *padapter_mode = hr.u.a.serial_number;
+ *padapter_mode = hr.u.ax.mode.adapter_mode;
return hr.error;
}
-u16 hpi_adapter_get_info(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index, u16 *pw_num_outstreams, u16 *pw_num_instreams,
- u16 *pw_version, u32 *pserial_number, u16 *pw_adapter_type)
+u16 hpi_adapter_get_info(u16 adapter_index, u16 *pw_num_outstreams,
+ u16 *pw_num_instreams, u16 *pw_version, u32 *pserial_number,
+ u16 *pw_adapter_type)
{
struct hpi_message hm;
struct hpi_response hr;
@@ -337,18 +231,17 @@ u16 hpi_adapter_get_info(const struct hpi_hsubsys *ph_subsys,
hpi_send_recv(&hm, &hr);
- *pw_adapter_type = hr.u.a.adapter_type;
- *pw_num_outstreams = hr.u.a.num_outstreams;
- *pw_num_instreams = hr.u.a.num_instreams;
- *pw_version = hr.u.a.version;
- *pserial_number = hr.u.a.serial_number;
+ *pw_adapter_type = hr.u.ax.info.adapter_type;
+ *pw_num_outstreams = hr.u.ax.info.num_outstreams;
+ *pw_num_instreams = hr.u.ax.info.num_instreams;
+ *pw_version = hr.u.ax.info.version;
+ *pserial_number = hr.u.ax.info.serial_number;
return hr.error;
}
-u16 hpi_adapter_get_module_by_index(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index, u16 module_index, u16 *pw_num_outputs,
- u16 *pw_num_inputs, u16 *pw_version, u32 *pserial_number,
- u16 *pw_module_type, u32 *ph_module)
+u16 hpi_adapter_get_module_by_index(u16 adapter_index, u16 module_index,
+ u16 *pw_num_outputs, u16 *pw_num_inputs, u16 *pw_version,
+ u32 *pserial_number, u16 *pw_module_type, u32 *ph_module)
{
struct hpi_message hm;
struct hpi_response hr;
@@ -360,173 +253,18 @@ u16 hpi_adapter_get_module_by_index(const struct hpi_hsubsys *ph_subsys,
hpi_send_recv(&hm, &hr);
- *pw_module_type = hr.u.a.adapter_type;
- *pw_num_outputs = hr.u.a.num_outstreams;
- *pw_num_inputs = hr.u.a.num_instreams;
- *pw_version = hr.u.a.version;
- *pserial_number = hr.u.a.serial_number;
+ *pw_module_type = hr.u.ax.info.adapter_type;
+ *pw_num_outputs = hr.u.ax.info.num_outstreams;
+ *pw_num_inputs = hr.u.ax.info.num_instreams;
+ *pw_version = hr.u.ax.info.version;
+ *pserial_number = hr.u.ax.info.serial_number;
*ph_module = 0;
return hr.error;
}
-u16 hpi_adapter_get_assert(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index, u16 *assert_present, char *psz_assert,
- u16 *pw_line_number)
-{
- struct hpi_message hm;
- struct hpi_response hr;
- hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
- HPI_ADAPTER_GET_ASSERT);
- hm.adapter_index = adapter_index;
- hpi_send_recv(&hm, &hr);
-
- *assert_present = 0;
-
- if (!hr.error) {
-
- *pw_line_number = (u16)hr.u.a.serial_number;
- if (*pw_line_number) {
-
- int i;
- char *src = (char *)hr.u.a.sz_adapter_assert;
- char *dst = psz_assert;
-
- *assert_present = 1;
-
- for (i = 0; i < HPI_STRING_LEN; i++) {
- char c;
- c = *src++;
- *dst++ = c;
- if (c == 0)
- break;
- }
-
- }
- }
- return hr.error;
-}
-
-u16 hpi_adapter_get_assert_ex(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index, u16 *assert_present, char *psz_assert,
- u32 *pline_number, u16 *pw_assert_on_dsp)
-{
- struct hpi_message hm;
- struct hpi_response hr;
- hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
- HPI_ADAPTER_GET_ASSERT);
- hm.adapter_index = adapter_index;
-
- hpi_send_recv(&hm, &hr);
-
- *assert_present = 0;
-
- if (!hr.error) {
-
- *pline_number = hr.u.a.serial_number;
-
- *assert_present = hr.u.a.adapter_type;
-
- *pw_assert_on_dsp = hr.u.a.adapter_index;
-
- if (!*assert_present && *pline_number)
-
- *assert_present = 1;
-
- if (*assert_present) {
-
- int i;
- char *src = (char *)hr.u.a.sz_adapter_assert;
- char *dst = psz_assert;
-
- for (i = 0; i < HPI_STRING_LEN; i++) {
- char c;
- c = *src++;
- *dst++ = c;
- if (c == 0)
- break;
- }
-
- } else {
- *psz_assert = 0;
- }
- }
- return hr.error;
-}
-
-u16 hpi_adapter_test_assert(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index, u16 assert_id)
-{
- struct hpi_message hm;
- struct hpi_response hr;
- hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
- HPI_ADAPTER_TEST_ASSERT);
- hm.adapter_index = adapter_index;
- hm.u.a.assert_id = assert_id;
-
- hpi_send_recv(&hm, &hr);
-
- return hr.error;
-}
-
-u16 hpi_adapter_enable_capability(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index, u16 capability, u32 key)
-{
- struct hpi_message hm;
- struct hpi_response hr;
- hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
- HPI_ADAPTER_ENABLE_CAPABILITY);
- hm.adapter_index = adapter_index;
- hm.u.a.assert_id = capability;
- hm.u.a.adapter_mode = key;
-
- hpi_send_recv(&hm, &hr);
-
- return hr.error;
-}
-
-u16 hpi_adapter_self_test(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index)
-{
- struct hpi_message hm;
- struct hpi_response hr;
- hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
- HPI_ADAPTER_SELFTEST);
- hm.adapter_index = adapter_index;
- hpi_send_recv(&hm, &hr);
- return hr.error;
-}
-
-u16 hpi_adapter_debug_read(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index, u32 dsp_address, char *p_buffer, int *count_bytes)
-{
- struct hpi_message hm;
- struct hpi_response hr;
- hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
- HPI_ADAPTER_DEBUG_READ);
-
- hr.size = sizeof(hr);
-
- hm.adapter_index = adapter_index;
- hm.u.ax.debug_read.dsp_address = dsp_address;
-
- if (*count_bytes > (int)sizeof(hr.u.bytes))
- *count_bytes = sizeof(hr.u.bytes);
-
- hm.u.ax.debug_read.count_bytes = *count_bytes;
-
- hpi_send_recv(&hm, &hr);
-
- if (!hr.error) {
- *count_bytes = hr.size - 12;
- memcpy(p_buffer, &hr.u.bytes, *count_bytes);
- } else
- *count_bytes = 0;
- return hr.error;
-}
-
-u16 hpi_adapter_set_property(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index, u16 property, u16 parameter1, u16 parameter2)
+u16 hpi_adapter_set_property(u16 adapter_index, u16 property, u16 parameter1,
+ u16 parameter2)
{
struct hpi_message hm;
struct hpi_response hr;
@@ -542,9 +280,8 @@ u16 hpi_adapter_set_property(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_adapter_get_property(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index, u16 property, u16 *pw_parameter1,
- u16 *pw_parameter2)
+u16 hpi_adapter_get_property(u16 adapter_index, u16 property,
+ u16 *pw_parameter1, u16 *pw_parameter2)
{
struct hpi_message hm;
struct hpi_response hr;
@@ -564,9 +301,8 @@ u16 hpi_adapter_get_property(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_adapter_enumerate_property(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index, u16 index, u16 what_to_enumerate,
- u16 property_index, u32 *psetting)
+u16 hpi_adapter_enumerate_property(u16 adapter_index, u16 index,
+ u16 what_to_enumerate, u16 property_index, u32 *psetting)
{
return 0;
}
@@ -574,7 +310,7 @@ u16 hpi_adapter_enumerate_property(const struct hpi_hsubsys *ph_subsys,
u16 hpi_format_create(struct hpi_format *p_format, u16 channels, u16 format,
u32 sample_rate, u32 bit_rate, u32 attributes)
{
- u16 error = 0;
+ u16 err = 0;
struct hpi_msg_format fmt;
switch (channels) {
@@ -586,8 +322,8 @@ u16 hpi_format_create(struct hpi_format *p_format, u16 channels, u16 format,
case 16:
break;
default:
- error = HPI_ERROR_INVALID_CHANNELS;
- return error;
+ err = HPI_ERROR_INVALID_CHANNELS;
+ return err;
}
fmt.channels = channels;
@@ -610,17 +346,17 @@ u16 hpi_format_create(struct hpi_format *p_format, u16 channels, u16 format,
case HPI_FORMAT_OEM2:
break;
default:
- error = HPI_ERROR_INVALID_FORMAT;
- return error;
+ err = HPI_ERROR_INVALID_FORMAT;
+ return err;
}
fmt.format = format;
if (sample_rate < 8000L) {
- error = HPI_ERROR_INCOMPATIBLE_SAMPLERATE;
+ err = HPI_ERROR_INCOMPATIBLE_SAMPLERATE;
sample_rate = 8000L;
}
if (sample_rate > 200000L) {
- error = HPI_ERROR_INCOMPATIBLE_SAMPLERATE;
+ err = HPI_ERROR_INCOMPATIBLE_SAMPLERATE;
sample_rate = 200000L;
}
fmt.sample_rate = sample_rate;
@@ -651,10 +387,10 @@ u16 hpi_format_create(struct hpi_format *p_format, u16 channels, u16 format,
if ((channels == 1)
&& (attributes != HPI_MPEG_MODE_DEFAULT)) {
attributes = HPI_MPEG_MODE_DEFAULT;
- error = HPI_ERROR_INVALID_FORMAT;
+ err = HPI_ERROR_INVALID_FORMAT;
} else if (attributes > HPI_MPEG_MODE_DUALCHANNEL) {
attributes = HPI_MPEG_MODE_DEFAULT;
- error = HPI_ERROR_INVALID_FORMAT;
+ err = HPI_ERROR_INVALID_FORMAT;
}
fmt.attributes = attributes;
break;
@@ -663,7 +399,7 @@ u16 hpi_format_create(struct hpi_format *p_format, u16 channels, u16 format,
}
hpi_msg_to_format(p_format, &fmt);
- return error;
+ return err;
}
u16 hpi_stream_estimate_buffer_size(struct hpi_format *p_format,
@@ -712,8 +448,8 @@ u16 hpi_stream_estimate_buffer_size(struct hpi_format *p_format,
return 0;
}
-u16 hpi_outstream_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index,
- u16 outstream_index, u32 *ph_outstream)
+u16 hpi_outstream_open(u16 adapter_index, u16 outstream_index,
+ u32 *ph_outstream)
{
struct hpi_message hm;
struct hpi_response hr;
@@ -733,38 +469,41 @@ u16 hpi_outstream_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index,
return hr.error;
}
-u16 hpi_outstream_close(const struct hpi_hsubsys *ph_subsys, u32 h_outstream)
+u16 hpi_outstream_close(u32 h_outstream)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
HPI_OSTREAM_HOSTBUFFER_FREE);
- u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
+
hpi_send_recv(&hm, &hr);
hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
HPI_OSTREAM_GROUP_RESET);
- u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index);
hpi_send_recv(&hm, &hr);
hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
HPI_OSTREAM_CLOSE);
- u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index);
hpi_send_recv(&hm, &hr);
return hr.error;
}
-u16 hpi_outstream_get_info_ex(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream, u16 *pw_state, u32 *pbuffer_size, u32 *pdata_to_play,
- u32 *psamples_played, u32 *pauxiliary_data_to_play)
+u16 hpi_outstream_get_info_ex(u32 h_outstream, u16 *pw_state,
+ u32 *pbuffer_size, u32 *pdata_to_play, u32 *psamples_played,
+ u32 *pauxiliary_data_to_play)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
HPI_OSTREAM_GET_INFO);
- u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hpi_send_recv(&hm, &hr);
@@ -782,15 +521,15 @@ u16 hpi_outstream_get_info_ex(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_outstream_write_buf(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream, const u8 *pb_data, u32 bytes_to_write,
- const struct hpi_format *p_format)
+u16 hpi_outstream_write_buf(u32 h_outstream, const u8 *pb_data,
+ u32 bytes_to_write, const struct hpi_format *p_format)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
HPI_OSTREAM_WRITE);
- u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.d.u.data.pb_data = (u8 *)pb_data;
hm.u.d.u.data.data_size = bytes_to_write;
@@ -801,82 +540,85 @@ u16 hpi_outstream_write_buf(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_outstream_start(const struct hpi_hsubsys *ph_subsys, u32 h_outstream)
+u16 hpi_outstream_start(u32 h_outstream)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
HPI_OSTREAM_START);
- u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hpi_send_recv(&hm, &hr);
return hr.error;
}
-u16 hpi_outstream_wait_start(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream)
+u16 hpi_outstream_wait_start(u32 h_outstream)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
HPI_OSTREAM_WAIT_START);
- u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hpi_send_recv(&hm, &hr);
return hr.error;
}
-u16 hpi_outstream_stop(const struct hpi_hsubsys *ph_subsys, u32 h_outstream)
+u16 hpi_outstream_stop(u32 h_outstream)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
HPI_OSTREAM_STOP);
- u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hpi_send_recv(&hm, &hr);
return hr.error;
}
-u16 hpi_outstream_sinegen(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream)
+u16 hpi_outstream_sinegen(u32 h_outstream)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
HPI_OSTREAM_SINEGEN);
- u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hpi_send_recv(&hm, &hr);
return hr.error;
}
-u16 hpi_outstream_reset(const struct hpi_hsubsys *ph_subsys, u32 h_outstream)
+u16 hpi_outstream_reset(u32 h_outstream)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
HPI_OSTREAM_RESET);
- u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hpi_send_recv(&hm, &hr);
return hr.error;
}
-u16 hpi_outstream_query_format(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream, struct hpi_format *p_format)
+u16 hpi_outstream_query_format(u32 h_outstream, struct hpi_format *p_format)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
HPI_OSTREAM_QUERY_FORMAT);
- u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hpi_format_to_msg(&hm.u.d.u.data.format, p_format);
@@ -885,15 +627,15 @@ u16 hpi_outstream_query_format(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_outstream_set_format(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream, struct hpi_format *p_format)
+u16 hpi_outstream_set_format(u32 h_outstream, struct hpi_format *p_format)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
HPI_OSTREAM_SET_FORMAT);
- u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hpi_format_to_msg(&hm.u.d.u.data.format, p_format);
@@ -902,15 +644,15 @@ u16 hpi_outstream_set_format(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_outstream_set_velocity(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream, short velocity)
+u16 hpi_outstream_set_velocity(u32 h_outstream, short velocity)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
HPI_OSTREAM_SET_VELOCITY);
- u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.d.u.velocity = velocity;
hpi_send_recv(&hm, &hr);
@@ -918,15 +660,16 @@ u16 hpi_outstream_set_velocity(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_outstream_set_punch_in_out(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream, u32 punch_in_sample, u32 punch_out_sample)
+u16 hpi_outstream_set_punch_in_out(u32 h_outstream, u32 punch_in_sample,
+ u32 punch_out_sample)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
HPI_OSTREAM_SET_PUNCHINOUT);
- u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.d.u.pio.punch_in_sample = punch_in_sample;
hm.u.d.u.pio.punch_out_sample = punch_out_sample;
@@ -936,29 +679,29 @@ u16 hpi_outstream_set_punch_in_out(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_outstream_ancillary_reset(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream, u16 mode)
+u16 hpi_outstream_ancillary_reset(u32 h_outstream, u16 mode)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
HPI_OSTREAM_ANC_RESET);
- u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.d.u.data.format.channels = mode;
hpi_send_recv(&hm, &hr);
return hr.error;
}
-u16 hpi_outstream_ancillary_get_info(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream, u32 *pframes_available)
+u16 hpi_outstream_ancillary_get_info(u32 h_outstream, u32 *pframes_available)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
HPI_OSTREAM_ANC_GET_INFO);
- u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hpi_send_recv(&hm, &hr);
if (hr.error == 0) {
if (pframes_available)
@@ -969,8 +712,8 @@ u16 hpi_outstream_ancillary_get_info(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_outstream_ancillary_read(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream, struct hpi_anc_frame *p_anc_frame_buffer,
+u16 hpi_outstream_ancillary_read(u32 h_outstream,
+ struct hpi_anc_frame *p_anc_frame_buffer,
u32 anc_frame_buffer_size_in_bytes,
u32 number_of_ancillary_frames_to_read)
{
@@ -979,7 +722,8 @@ u16 hpi_outstream_ancillary_read(const struct hpi_hsubsys *ph_subsys,
hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
HPI_OSTREAM_ANC_READ);
- u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.d.u.data.pb_data = (u8 *)p_anc_frame_buffer;
hm.u.d.u.data.data_size =
number_of_ancillary_frames_to_read *
@@ -987,19 +731,19 @@ u16 hpi_outstream_ancillary_read(const struct hpi_hsubsys *ph_subsys,
if (hm.u.d.u.data.data_size <= anc_frame_buffer_size_in_bytes)
hpi_send_recv(&hm, &hr);
else
- hr.error = HPI_ERROR_INVALID_DATA_TRANSFER;
+ hr.error = HPI_ERROR_INVALID_DATASIZE;
return hr.error;
}
-u16 hpi_outstream_set_time_scale(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream, u32 time_scale)
+u16 hpi_outstream_set_time_scale(u32 h_outstream, u32 time_scale)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
HPI_OSTREAM_SET_TIMESCALE);
- u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.d.u.time_scale = time_scale;
@@ -1008,22 +752,21 @@ u16 hpi_outstream_set_time_scale(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_outstream_host_buffer_allocate(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream, u32 size_in_bytes)
+u16 hpi_outstream_host_buffer_allocate(u32 h_outstream, u32 size_in_bytes)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
HPI_OSTREAM_HOSTBUFFER_ALLOC);
- u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.d.u.data.data_size = size_in_bytes;
hpi_send_recv(&hm, &hr);
return hr.error;
}
-u16 hpi_outstream_host_buffer_get_info(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream, u8 **pp_buffer,
+u16 hpi_outstream_host_buffer_get_info(u32 h_outstream, u8 **pp_buffer,
struct hpi_hostbuffer_status **pp_status)
{
struct hpi_message hm;
@@ -1031,7 +774,8 @@ u16 hpi_outstream_host_buffer_get_info(const struct hpi_hsubsys *ph_subsys,
hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
HPI_OSTREAM_HOSTBUFFER_GET_INFO);
- u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hpi_send_recv(&hm, &hr);
if (hr.error == 0) {
@@ -1043,21 +787,20 @@ u16 hpi_outstream_host_buffer_get_info(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_outstream_host_buffer_free(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream)
+u16 hpi_outstream_host_buffer_free(u32 h_outstream)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
HPI_OSTREAM_HOSTBUFFER_FREE);
- u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hpi_send_recv(&hm, &hr);
return hr.error;
}
-u16 hpi_outstream_group_add(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream, u32 h_stream)
+u16 hpi_outstream_group_add(u32 h_outstream, u32 h_stream)
{
struct hpi_message hm;
struct hpi_response hr;
@@ -1066,22 +809,22 @@ u16 hpi_outstream_group_add(const struct hpi_hsubsys *ph_subsys,
hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
HPI_OSTREAM_GROUP_ADD);
- hr.error = 0;
- u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+
+ if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
+
+ if (hpi_handle_indexes(h_stream, &adapter,
+ &hm.u.d.u.stream.stream_index))
+ return HPI_ERROR_INVALID_HANDLE;
+
c_obj_type = hpi_handle_object(h_stream);
switch (c_obj_type) {
case HPI_OBJ_OSTREAM:
- hm.u.d.u.stream.object_type = HPI_OBJ_OSTREAM;
- u32TOINDEXES(h_stream, &adapter,
- &hm.u.d.u.stream.stream_index);
- break;
case HPI_OBJ_ISTREAM:
- hm.u.d.u.stream.object_type = HPI_OBJ_ISTREAM;
- u32TOINDEXES(h_stream, &adapter,
- &hm.u.d.u.stream.stream_index);
+ hm.u.d.u.stream.object_type = c_obj_type;
break;
default:
- return HPI_ERROR_INVALID_STREAM;
+ return HPI_ERROR_INVALID_OBJ;
}
if (adapter != hm.adapter_index)
return HPI_ERROR_NO_INTERADAPTER_GROUPS;
@@ -1090,15 +833,16 @@ u16 hpi_outstream_group_add(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_outstream_group_get_map(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream, u32 *poutstream_map, u32 *pinstream_map)
+u16 hpi_outstream_group_get_map(u32 h_outstream, u32 *poutstream_map,
+ u32 *pinstream_map)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
HPI_OSTREAM_GROUP_GETMAP);
- u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hpi_send_recv(&hm, &hr);
if (poutstream_map)
@@ -1109,21 +853,20 @@ u16 hpi_outstream_group_get_map(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_outstream_group_reset(const struct hpi_hsubsys *ph_subsys,
- u32 h_outstream)
+u16 hpi_outstream_group_reset(u32 h_outstream)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
HPI_OSTREAM_GROUP_RESET);
- u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hpi_send_recv(&hm, &hr);
return hr.error;
}
-u16 hpi_instream_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index,
- u16 instream_index, u32 *ph_instream)
+u16 hpi_instream_open(u16 adapter_index, u16 instream_index, u32 *ph_instream)
{
struct hpi_message hm;
struct hpi_response hr;
@@ -1145,38 +888,40 @@ u16 hpi_instream_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index,
return hr.error;
}
-u16 hpi_instream_close(const struct hpi_hsubsys *ph_subsys, u32 h_instream)
+u16 hpi_instream_close(u32 h_instream)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
HPI_ISTREAM_HOSTBUFFER_FREE);
- u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hpi_send_recv(&hm, &hr);
hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
HPI_ISTREAM_GROUP_RESET);
- u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index);
hpi_send_recv(&hm, &hr);
hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
HPI_ISTREAM_CLOSE);
- u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index);
hpi_send_recv(&hm, &hr);
return hr.error;
}
-u16 hpi_instream_query_format(const struct hpi_hsubsys *ph_subsys,
- u32 h_instream, const struct hpi_format *p_format)
+u16 hpi_instream_query_format(u32 h_instream,
+ const struct hpi_format *p_format)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
HPI_ISTREAM_QUERY_FORMAT);
- u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hpi_format_to_msg(&hm.u.d.u.data.format, p_format);
hpi_send_recv(&hm, &hr);
@@ -1184,15 +929,15 @@ u16 hpi_instream_query_format(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_instream_set_format(const struct hpi_hsubsys *ph_subsys,
- u32 h_instream, const struct hpi_format *p_format)
+u16 hpi_instream_set_format(u32 h_instream, const struct hpi_format *p_format)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
HPI_ISTREAM_SET_FORMAT);
- u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hpi_format_to_msg(&hm.u.d.u.data.format, p_format);
hpi_send_recv(&hm, &hr);
@@ -1200,15 +945,15 @@ u16 hpi_instream_set_format(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_instream_read_buf(const struct hpi_hsubsys *ph_subsys, u32 h_instream,
- u8 *pb_data, u32 bytes_to_read)
+u16 hpi_instream_read_buf(u32 h_instream, u8 *pb_data, u32 bytes_to_read)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
HPI_ISTREAM_READ);
- u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.d.u.data.data_size = bytes_to_read;
hm.u.d.u.data.pb_data = pb_data;
@@ -1217,72 +962,76 @@ u16 hpi_instream_read_buf(const struct hpi_hsubsys *ph_subsys, u32 h_instream,
return hr.error;
}
-u16 hpi_instream_start(const struct hpi_hsubsys *ph_subsys, u32 h_instream)
+u16 hpi_instream_start(u32 h_instream)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
HPI_ISTREAM_START);
- u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hpi_send_recv(&hm, &hr);
return hr.error;
}
-u16 hpi_instream_wait_start(const struct hpi_hsubsys *ph_subsys,
- u32 h_instream)
+u16 hpi_instream_wait_start(u32 h_instream)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
HPI_ISTREAM_WAIT_START);
- u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hpi_send_recv(&hm, &hr);
return hr.error;
}
-u16 hpi_instream_stop(const struct hpi_hsubsys *ph_subsys, u32 h_instream)
+u16 hpi_instream_stop(u32 h_instream)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
HPI_ISTREAM_STOP);
- u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hpi_send_recv(&hm, &hr);
return hr.error;
}
-u16 hpi_instream_reset(const struct hpi_hsubsys *ph_subsys, u32 h_instream)
+u16 hpi_instream_reset(u32 h_instream)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
HPI_ISTREAM_RESET);
- u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hpi_send_recv(&hm, &hr);
return hr.error;
}
-u16 hpi_instream_get_info_ex(const struct hpi_hsubsys *ph_subsys,
- u32 h_instream, u16 *pw_state, u32 *pbuffer_size, u32 *pdata_recorded,
- u32 *psamples_recorded, u32 *pauxiliary_data_recorded)
+u16 hpi_instream_get_info_ex(u32 h_instream, u16 *pw_state, u32 *pbuffer_size,
+ u32 *pdata_recorded, u32 *psamples_recorded,
+ u32 *pauxiliary_data_recorded)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
HPI_ISTREAM_GET_INFO);
- u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hpi_send_recv(&hm, &hr);
@@ -1300,15 +1049,15 @@ u16 hpi_instream_get_info_ex(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_instream_ancillary_reset(const struct hpi_hsubsys *ph_subsys,
- u32 h_instream, u16 bytes_per_frame, u16 mode, u16 alignment,
- u16 idle_bit)
+u16 hpi_instream_ancillary_reset(u32 h_instream, u16 bytes_per_frame,
+ u16 mode, u16 alignment, u16 idle_bit)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
HPI_ISTREAM_ANC_RESET);
- u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.d.u.data.format.attributes = bytes_per_frame;
hm.u.d.u.data.format.format = (mode << 8) | (alignment & 0xff);
hm.u.d.u.data.format.channels = idle_bit;
@@ -1316,14 +1065,14 @@ u16 hpi_instream_ancillary_reset(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_instream_ancillary_get_info(const struct hpi_hsubsys *ph_subsys,
- u32 h_instream, u32 *pframe_space)
+u16 hpi_instream_ancillary_get_info(u32 h_instream, u32 *pframe_space)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
HPI_ISTREAM_ANC_GET_INFO);
- u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hpi_send_recv(&hm, &hr);
if (pframe_space)
*pframe_space =
@@ -1333,8 +1082,8 @@ u16 hpi_instream_ancillary_get_info(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_instream_ancillary_write(const struct hpi_hsubsys *ph_subsys,
- u32 h_instream, const struct hpi_anc_frame *p_anc_frame_buffer,
+u16 hpi_instream_ancillary_write(u32 h_instream,
+ const struct hpi_anc_frame *p_anc_frame_buffer,
u32 anc_frame_buffer_size_in_bytes,
u32 number_of_ancillary_frames_to_write)
{
@@ -1343,7 +1092,8 @@ u16 hpi_instream_ancillary_write(const struct hpi_hsubsys *ph_subsys,
hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
HPI_ISTREAM_ANC_WRITE);
- u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.d.u.data.pb_data = (u8 *)p_anc_frame_buffer;
hm.u.d.u.data.data_size =
number_of_ancillary_frames_to_write *
@@ -1351,12 +1101,11 @@ u16 hpi_instream_ancillary_write(const struct hpi_hsubsys *ph_subsys,
if (hm.u.d.u.data.data_size <= anc_frame_buffer_size_in_bytes)
hpi_send_recv(&hm, &hr);
else
- hr.error = HPI_ERROR_INVALID_DATA_TRANSFER;
+ hr.error = HPI_ERROR_INVALID_DATASIZE;
return hr.error;
}
-u16 hpi_instream_host_buffer_allocate(const struct hpi_hsubsys *ph_subsys,
- u32 h_instream, u32 size_in_bytes)
+u16 hpi_instream_host_buffer_allocate(u32 h_instream, u32 size_in_bytes)
{
struct hpi_message hm;
@@ -1364,14 +1113,14 @@ u16 hpi_instream_host_buffer_allocate(const struct hpi_hsubsys *ph_subsys,
hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
HPI_ISTREAM_HOSTBUFFER_ALLOC);
- u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.d.u.data.data_size = size_in_bytes;
hpi_send_recv(&hm, &hr);
return hr.error;
}
-u16 hpi_instream_host_buffer_get_info(const struct hpi_hsubsys *ph_subsys,
- u32 h_instream, u8 **pp_buffer,
+u16 hpi_instream_host_buffer_get_info(u32 h_instream, u8 **pp_buffer,
struct hpi_hostbuffer_status **pp_status)
{
struct hpi_message hm;
@@ -1379,7 +1128,8 @@ u16 hpi_instream_host_buffer_get_info(const struct hpi_hsubsys *ph_subsys,
hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
HPI_ISTREAM_HOSTBUFFER_GET_INFO);
- u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hpi_send_recv(&hm, &hr);
if (hr.error == 0) {
@@ -1391,8 +1141,7 @@ u16 hpi_instream_host_buffer_get_info(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_instream_host_buffer_free(const struct hpi_hsubsys *ph_subsys,
- u32 h_instream)
+u16 hpi_instream_host_buffer_free(u32 h_instream)
{
struct hpi_message hm;
@@ -1400,13 +1149,13 @@ u16 hpi_instream_host_buffer_free(const struct hpi_hsubsys *ph_subsys,
hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
HPI_ISTREAM_HOSTBUFFER_FREE);
- u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hpi_send_recv(&hm, &hr);
return hr.error;
}
-u16 hpi_instream_group_add(const struct hpi_hsubsys *ph_subsys,
- u32 h_instream, u32 h_stream)
+u16 hpi_instream_group_add(u32 h_instream, u32 h_stream)
{
struct hpi_message hm;
struct hpi_response hr;
@@ -1416,22 +1165,23 @@ u16 hpi_instream_group_add(const struct hpi_hsubsys *ph_subsys,
hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
HPI_ISTREAM_GROUP_ADD);
hr.error = 0;
- u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+
+ if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
+
+ if (hpi_handle_indexes(h_stream, &adapter,
+ &hm.u.d.u.stream.stream_index))
+ return HPI_ERROR_INVALID_HANDLE;
+
c_obj_type = hpi_handle_object(h_stream);
switch (c_obj_type) {
case HPI_OBJ_OSTREAM:
- hm.u.d.u.stream.object_type = HPI_OBJ_OSTREAM;
- u32TOINDEXES(h_stream, &adapter,
- &hm.u.d.u.stream.stream_index);
- break;
case HPI_OBJ_ISTREAM:
- hm.u.d.u.stream.object_type = HPI_OBJ_ISTREAM;
- u32TOINDEXES(h_stream, &adapter,
- &hm.u.d.u.stream.stream_index);
+ hm.u.d.u.stream.object_type = c_obj_type;
break;
default:
- return HPI_ERROR_INVALID_STREAM;
+ return HPI_ERROR_INVALID_OBJ;
}
if (adapter != hm.adapter_index)
@@ -1441,15 +1191,16 @@ u16 hpi_instream_group_add(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_instream_group_get_map(const struct hpi_hsubsys *ph_subsys,
- u32 h_instream, u32 *poutstream_map, u32 *pinstream_map)
+u16 hpi_instream_group_get_map(u32 h_instream, u32 *poutstream_map,
+ u32 *pinstream_map)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
HPI_ISTREAM_HOSTBUFFER_FREE);
- u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hpi_send_recv(&hm, &hr);
if (poutstream_map)
@@ -1460,21 +1211,20 @@ u16 hpi_instream_group_get_map(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_instream_group_reset(const struct hpi_hsubsys *ph_subsys,
- u32 h_instream)
+u16 hpi_instream_group_reset(u32 h_instream)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
HPI_ISTREAM_GROUP_RESET);
- u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hpi_send_recv(&hm, &hr);
return hr.error;
}
-u16 hpi_mixer_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index,
- u32 *ph_mixer)
+u16 hpi_mixer_open(u16 adapter_index, u32 *ph_mixer)
{
struct hpi_message hm;
struct hpi_response hr;
@@ -1492,25 +1242,29 @@ u16 hpi_mixer_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index,
return hr.error;
}
-u16 hpi_mixer_close(const struct hpi_hsubsys *ph_subsys, u32 h_mixer)
+u16 hpi_mixer_close(u32 h_mixer)
{
struct hpi_message hm;
struct hpi_response hr;
+
hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER, HPI_MIXER_CLOSE);
- u32TOINDEX(h_mixer, &hm.adapter_index);
+ if (hpi_handle_indexes(h_mixer, &hm.adapter_index, NULL))
+ return HPI_ERROR_INVALID_HANDLE;
+
hpi_send_recv(&hm, &hr);
return hr.error;
}
-u16 hpi_mixer_get_control(const struct hpi_hsubsys *ph_subsys, u32 h_mixer,
- u16 src_node_type, u16 src_node_type_index, u16 dst_node_type,
- u16 dst_node_type_index, u16 control_type, u32 *ph_control)
+u16 hpi_mixer_get_control(u32 h_mixer, u16 src_node_type,
+ u16 src_node_type_index, u16 dst_node_type, u16 dst_node_type_index,
+ u16 control_type, u32 *ph_control)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER,
HPI_MIXER_GET_CONTROL);
- u32TOINDEX(h_mixer, &hm.adapter_index);
+ if (hpi_handle_indexes(h_mixer, &hm.adapter_index, NULL))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.m.node_type1 = src_node_type;
hm.u.m.node_index1 = src_node_type_index;
hm.u.m.node_type2 = dst_node_type;
@@ -1528,16 +1282,16 @@ u16 hpi_mixer_get_control(const struct hpi_hsubsys *ph_subsys, u32 h_mixer,
return hr.error;
}
-u16 hpi_mixer_get_control_by_index(const struct hpi_hsubsys *ph_subsys,
- u32 h_mixer, u16 control_index, u16 *pw_src_node_type,
- u16 *pw_src_node_index, u16 *pw_dst_node_type, u16 *pw_dst_node_index,
- u16 *pw_control_type, u32 *ph_control)
+u16 hpi_mixer_get_control_by_index(u32 h_mixer, u16 control_index,
+ u16 *pw_src_node_type, u16 *pw_src_node_index, u16 *pw_dst_node_type,
+ u16 *pw_dst_node_index, u16 *pw_control_type, u32 *ph_control)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER,
HPI_MIXER_GET_CONTROL_BY_INDEX);
- u32TOINDEX(h_mixer, &hm.adapter_index);
+ if (hpi_handle_indexes(h_mixer, &hm.adapter_index, NULL))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.m.control_index = control_index;
hpi_send_recv(&hm, &hr);
@@ -1562,13 +1316,14 @@ u16 hpi_mixer_get_control_by_index(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_mixer_store(const struct hpi_hsubsys *ph_subsys, u32 h_mixer,
- enum HPI_MIXER_STORE_COMMAND command, u16 index)
+u16 hpi_mixer_store(u32 h_mixer, enum HPI_MIXER_STORE_COMMAND command,
+ u16 index)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER, HPI_MIXER_STORE);
- u32TOINDEX(h_mixer, &hm.adapter_index);
+ if (hpi_handle_indexes(h_mixer, &hm.adapter_index, NULL))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.mx.store.command = command;
hm.u.mx.store.index = index;
hpi_send_recv(&hm, &hr);
@@ -1576,16 +1331,16 @@ u16 hpi_mixer_store(const struct hpi_hsubsys *ph_subsys, u32 h_mixer,
}
static
-u16 hpi_control_param_set(const struct hpi_hsubsys *ph_subsys,
- const u32 h_control, const u16 attrib, const u32 param1,
- const u32 param2)
+u16 hpi_control_param_set(const u32 h_control, const u16 attrib,
+ const u32 param1, const u32 param2)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
HPI_CONTROL_SET_STATE);
- u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.c.attribute = attrib;
hm.u.c.param1 = param1;
hm.u.c.param2 = param2;
@@ -1601,7 +1356,8 @@ static u16 hpi_control_log_set2(u32 h_control, u16 attrib, short sv0,
hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
HPI_CONTROL_SET_STATE);
- u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.c.attribute = attrib;
hm.u.c.an_log_value[0] = sv0;
hm.u.c.an_log_value[1] = sv1;
@@ -1610,16 +1366,16 @@ static u16 hpi_control_log_set2(u32 h_control, u16 attrib, short sv0,
}
static
-u16 hpi_control_param_get(const struct hpi_hsubsys *ph_subsys,
- const u32 h_control, const u16 attrib, u32 param1, u32 param2,
- u32 *pparam1, u32 *pparam2)
+u16 hpi_control_param_get(const u32 h_control, const u16 attrib, u32 param1,
+ u32 param2, u32 *pparam1, u32 *pparam2)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
HPI_CONTROL_GET_STATE);
- u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.c.attribute = attrib;
hm.u.c.param1 = param1;
hm.u.c.param2 = param2;
@@ -1632,19 +1388,20 @@ u16 hpi_control_param_get(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-#define hpi_control_param1_get(s, h, a, p1) \
- hpi_control_param_get(s, h, a, 0, 0, p1, NULL)
-#define hpi_control_param2_get(s, h, a, p1, p2) \
- hpi_control_param_get(s, h, a, 0, 0, p1, p2)
+#define hpi_control_param1_get(h, a, p1) \
+ hpi_control_param_get(h, a, 0, 0, p1, NULL)
+#define hpi_control_param2_get(h, a, p1, p2) \
+ hpi_control_param_get(h, a, 0, 0, p1, p2)
-static u16 hpi_control_log_get2(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 attrib, short *sv0, short *sv1)
+static u16 hpi_control_log_get2(u32 h_control, u16 attrib, short *sv0,
+ short *sv1)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
HPI_CONTROL_GET_STATE);
- u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.c.attribute = attrib;
hpi_send_recv(&hm, &hr);
@@ -1655,8 +1412,7 @@ static u16 hpi_control_log_get2(const struct hpi_hsubsys *ph_subsys,
}
static
-u16 hpi_control_query(const struct hpi_hsubsys *ph_subsys,
- const u32 h_control, const u16 attrib, const u32 index,
+u16 hpi_control_query(const u32 h_control, const u16 attrib, const u32 index,
const u32 param, u32 *psetting)
{
struct hpi_message hm;
@@ -1664,7 +1420,8 @@ u16 hpi_control_query(const struct hpi_hsubsys *ph_subsys,
hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
HPI_CONTROL_GET_INFO);
- u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.c.attribute = attrib;
hm.u.c.param1 = index;
@@ -1682,7 +1439,7 @@ static u16 hpi_control_get_string(const u32 h_control, const u16 attribute,
unsigned int sub_string_index = 0, j = 0;
char c = 0;
unsigned int n = 0;
- u16 hE = 0;
+ u16 err = 0;
if ((string_length < 1) || (string_length > 256))
return HPI_ERROR_INVALID_CONTROL_VALUE;
@@ -1693,7 +1450,9 @@ static u16 hpi_control_get_string(const u32 h_control, const u16 attribute,
hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
HPI_CONTROL_GET_STATE);
- u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_control, &hm.adapter_index,
+ &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.c.attribute = attribute;
hm.u.c.param1 = sub_string_index;
hm.u.c.param2 = 0;
@@ -1705,7 +1464,7 @@ static u16 hpi_control_get_string(const u32 h_control, const u16 attribute,
return HPI_ERROR_INVALID_CONTROL_VALUE;
if (hr.error) {
- hE = hr.error;
+ err = hr.error;
break;
}
for (j = 0; j < 8; j++) {
@@ -1714,7 +1473,7 @@ static u16 hpi_control_get_string(const u32 h_control, const u16 attribute,
n++;
if (n >= string_length) {
psz_string[string_length - 1] = 0;
- hE = HPI_ERROR_INVALID_CONTROL_VALUE;
+ err = HPI_ERROR_INVALID_CONTROL_VALUE;
break;
}
if (c == 0)
@@ -1730,57 +1489,52 @@ static u16 hpi_control_get_string(const u32 h_control, const u16 attribute,
if (c == 0)
break;
}
- return hE;
+ return err;
}
-u16 HPI_AESEBU__receiver_query_format(const struct hpi_hsubsys *ph_subsys,
- const u32 h_aes_rx, const u32 index, u16 *pw_format)
+u16 hpi_aesebu_receiver_query_format(const u32 h_aes_rx, const u32 index,
+ u16 *pw_format)
{
u32 qr;
u16 err;
- err = hpi_control_query(ph_subsys, h_aes_rx, HPI_AESEBURX_FORMAT,
- index, 0, &qr);
+ err = hpi_control_query(h_aes_rx, HPI_AESEBURX_FORMAT, index, 0, &qr);
*pw_format = (u16)qr;
return err;
}
-u16 HPI_AESEBU__receiver_set_format(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 format)
+u16 hpi_aesebu_receiver_set_format(u32 h_control, u16 format)
{
- return hpi_control_param_set(ph_subsys, h_control,
- HPI_AESEBURX_FORMAT, format, 0);
+ return hpi_control_param_set(h_control, HPI_AESEBURX_FORMAT, format,
+ 0);
}
-u16 HPI_AESEBU__receiver_get_format(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 *pw_format)
+u16 hpi_aesebu_receiver_get_format(u32 h_control, u16 *pw_format)
{
u16 err;
u32 param;
- err = hpi_control_param1_get(ph_subsys, h_control,
- HPI_AESEBURX_FORMAT, &param);
+ err = hpi_control_param1_get(h_control, HPI_AESEBURX_FORMAT, &param);
if (!err && pw_format)
*pw_format = (u16)param;
return err;
}
-u16 HPI_AESEBU__receiver_get_sample_rate(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *psample_rate)
+u16 hpi_aesebu_receiver_get_sample_rate(u32 h_control, u32 *psample_rate)
{
- return hpi_control_param1_get(ph_subsys, h_control,
- HPI_AESEBURX_SAMPLERATE, psample_rate);
+ return hpi_control_param1_get(h_control, HPI_AESEBURX_SAMPLERATE,
+ psample_rate);
}
-u16 HPI_AESEBU__receiver_get_user_data(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 index, u16 *pw_data)
+u16 hpi_aesebu_receiver_get_user_data(u32 h_control, u16 index, u16 *pw_data)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
HPI_CONTROL_GET_STATE);
- u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.c.attribute = HPI_AESEBURX_USERDATA;
hm.u.c.param1 = index;
@@ -1791,14 +1545,15 @@ u16 HPI_AESEBU__receiver_get_user_data(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 HPI_AESEBU__receiver_get_channel_status(const struct hpi_hsubsys
- *ph_subsys, u32 h_control, u16 index, u16 *pw_data)
+u16 hpi_aesebu_receiver_get_channel_status(u32 h_control, u16 index,
+ u16 *pw_data)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
HPI_CONTROL_GET_STATE);
- u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.c.attribute = HPI_AESEBURX_CHANNELSTATUS;
hm.u.c.param1 = index;
@@ -1809,101 +1564,93 @@ u16 HPI_AESEBU__receiver_get_channel_status(const struct hpi_hsubsys
return hr.error;
}
-u16 HPI_AESEBU__receiver_get_error_status(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 *pw_error_data)
+u16 hpi_aesebu_receiver_get_error_status(u32 h_control, u16 *pw_error_data)
{
u32 error_data = 0;
- u16 error = 0;
+ u16 err = 0;
- error = hpi_control_param1_get(ph_subsys, h_control,
- HPI_AESEBURX_ERRORSTATUS, &error_data);
+ err = hpi_control_param1_get(h_control, HPI_AESEBURX_ERRORSTATUS,
+ &error_data);
if (pw_error_data)
*pw_error_data = (u16)error_data;
- return error;
+ return err;
}
-u16 HPI_AESEBU__transmitter_set_sample_rate(const struct hpi_hsubsys
- *ph_subsys, u32 h_control, u32 sample_rate)
+u16 hpi_aesebu_transmitter_set_sample_rate(u32 h_control, u32 sample_rate)
{
- return hpi_control_param_set(ph_subsys, h_control,
- HPI_AESEBUTX_SAMPLERATE, sample_rate, 0);
+ return hpi_control_param_set(h_control, HPI_AESEBUTX_SAMPLERATE,
+ sample_rate, 0);
}
-u16 HPI_AESEBU__transmitter_set_user_data(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 index, u16 data)
+u16 hpi_aesebu_transmitter_set_user_data(u32 h_control, u16 index, u16 data)
{
- return hpi_control_param_set(ph_subsys, h_control,
- HPI_AESEBUTX_USERDATA, index, data);
+ return hpi_control_param_set(h_control, HPI_AESEBUTX_USERDATA, index,
+ data);
}
-u16 HPI_AESEBU__transmitter_set_channel_status(const struct hpi_hsubsys
- *ph_subsys, u32 h_control, u16 index, u16 data)
+u16 hpi_aesebu_transmitter_set_channel_status(u32 h_control, u16 index,
+ u16 data)
{
- return hpi_control_param_set(ph_subsys, h_control,
- HPI_AESEBUTX_CHANNELSTATUS, index, data);
+ return hpi_control_param_set(h_control, HPI_AESEBUTX_CHANNELSTATUS,
+ index, data);
}
-u16 HPI_AESEBU__transmitter_get_channel_status(const struct hpi_hsubsys
- *ph_subsys, u32 h_control, u16 index, u16 *pw_data)
+u16 hpi_aesebu_transmitter_get_channel_status(u32 h_control, u16 index,
+ u16 *pw_data)
{
return HPI_ERROR_INVALID_OPERATION;
}
-u16 HPI_AESEBU__transmitter_query_format(const struct hpi_hsubsys *ph_subsys,
- const u32 h_aes_tx, const u32 index, u16 *pw_format)
+u16 hpi_aesebu_transmitter_query_format(const u32 h_aes_tx, const u32 index,
+ u16 *pw_format)
{
u32 qr;
u16 err;
- err = hpi_control_query(ph_subsys, h_aes_tx, HPI_AESEBUTX_FORMAT,
- index, 0, &qr);
+ err = hpi_control_query(h_aes_tx, HPI_AESEBUTX_FORMAT, index, 0, &qr);
*pw_format = (u16)qr;
return err;
}
-u16 HPI_AESEBU__transmitter_set_format(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 output_format)
+u16 hpi_aesebu_transmitter_set_format(u32 h_control, u16 output_format)
{
- return hpi_control_param_set(ph_subsys, h_control,
- HPI_AESEBUTX_FORMAT, output_format, 0);
+ return hpi_control_param_set(h_control, HPI_AESEBUTX_FORMAT,
+ output_format, 0);
}
-u16 HPI_AESEBU__transmitter_get_format(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 *pw_output_format)
+u16 hpi_aesebu_transmitter_get_format(u32 h_control, u16 *pw_output_format)
{
u16 err;
u32 param;
- err = hpi_control_param1_get(ph_subsys, h_control,
- HPI_AESEBUTX_FORMAT, &param);
+ err = hpi_control_param1_get(h_control, HPI_AESEBUTX_FORMAT, &param);
if (!err && pw_output_format)
*pw_output_format = (u16)param;
return err;
}
-u16 hpi_bitstream_set_clock_edge(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 edge_type)
+u16 hpi_bitstream_set_clock_edge(u32 h_control, u16 edge_type)
{
- return hpi_control_param_set(ph_subsys, h_control,
- HPI_BITSTREAM_CLOCK_EDGE, edge_type, 0);
+ return hpi_control_param_set(h_control, HPI_BITSTREAM_CLOCK_EDGE,
+ edge_type, 0);
}
-u16 hpi_bitstream_set_data_polarity(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 polarity)
+u16 hpi_bitstream_set_data_polarity(u32 h_control, u16 polarity)
{
- return hpi_control_param_set(ph_subsys, h_control,
- HPI_BITSTREAM_DATA_POLARITY, polarity, 0);
+ return hpi_control_param_set(h_control, HPI_BITSTREAM_DATA_POLARITY,
+ polarity, 0);
}
-u16 hpi_bitstream_get_activity(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 *pw_clk_activity, u16 *pw_data_activity)
+u16 hpi_bitstream_get_activity(u32 h_control, u16 *pw_clk_activity,
+ u16 *pw_data_activity)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
HPI_CONTROL_GET_STATE);
- u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.c.attribute = HPI_BITSTREAM_ACTIVITY;
hpi_send_recv(&hm, &hr);
if (pw_clk_activity)
@@ -1913,45 +1660,43 @@ u16 hpi_bitstream_get_activity(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_channel_mode_query_mode(const struct hpi_hsubsys *ph_subsys,
- const u32 h_mode, const u32 index, u16 *pw_mode)
+u16 hpi_channel_mode_query_mode(const u32 h_mode, const u32 index,
+ u16 *pw_mode)
{
u32 qr;
u16 err;
- err = hpi_control_query(ph_subsys, h_mode, HPI_CHANNEL_MODE_MODE,
- index, 0, &qr);
+ err = hpi_control_query(h_mode, HPI_CHANNEL_MODE_MODE, index, 0, &qr);
*pw_mode = (u16)qr;
return err;
}
-u16 hpi_channel_mode_set(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- u16 mode)
+u16 hpi_channel_mode_set(u32 h_control, u16 mode)
{
- return hpi_control_param_set(ph_subsys, h_control,
- HPI_CHANNEL_MODE_MODE, mode, 0);
+ return hpi_control_param_set(h_control, HPI_CHANNEL_MODE_MODE, mode,
+ 0);
}
-u16 hpi_channel_mode_get(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- u16 *mode)
+u16 hpi_channel_mode_get(u32 h_control, u16 *mode)
{
u32 mode32 = 0;
- u16 error = hpi_control_param1_get(ph_subsys, h_control,
+ u16 err = hpi_control_param1_get(h_control,
HPI_CHANNEL_MODE_MODE, &mode32);
if (mode)
*mode = (u16)mode32;
- return error;
+ return err;
}
-u16 hpi_cobranet_hmi_write(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- u32 hmi_address, u32 byte_count, u8 *pb_data)
+u16 hpi_cobranet_hmi_write(u32 h_control, u32 hmi_address, u32 byte_count,
+ u8 *pb_data)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROLEX,
HPI_CONTROL_SET_STATE);
- u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.cx.u.cobranet_data.byte_count = byte_count;
hm.u.cx.u.cobranet_data.hmi_address = hmi_address;
@@ -1969,15 +1714,16 @@ u16 hpi_cobranet_hmi_write(const struct hpi_hsubsys *ph_subsys, u32 h_control,
return hr.error;
}
-u16 hpi_cobranet_hmi_read(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- u32 hmi_address, u32 max_byte_count, u32 *pbyte_count, u8 *pb_data)
+u16 hpi_cobranet_hmi_read(u32 h_control, u32 hmi_address, u32 max_byte_count,
+ u32 *pbyte_count, u8 *pb_data)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROLEX,
HPI_CONTROL_GET_STATE);
- u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.cx.u.cobranet_data.byte_count = max_byte_count;
hm.u.cx.u.cobranet_data.hmi_address = hmi_address;
@@ -2008,16 +1754,16 @@ u16 hpi_cobranet_hmi_read(const struct hpi_hsubsys *ph_subsys, u32 h_control,
return hr.error;
}
-u16 hpi_cobranet_hmi_get_status(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *pstatus, u32 *preadable_size,
- u32 *pwriteable_size)
+u16 hpi_cobranet_hmi_get_status(u32 h_control, u32 *pstatus,
+ u32 *preadable_size, u32 *pwriteable_size)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROLEX,
HPI_CONTROL_GET_STATE);
- u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.cx.attribute = HPI_COBRANET_GET_STATUS;
@@ -2035,177 +1781,176 @@ u16 hpi_cobranet_hmi_get_status(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_cobranet_getI_paddress(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *pi_paddress)
+u16 hpi_cobranet_get_ip_address(u32 h_control, u32 *pdw_ip_address)
{
u32 byte_count;
u32 iP;
- u16 error;
+ u16 err;
- error = hpi_cobranet_hmi_read(ph_subsys, h_control,
+ err = hpi_cobranet_hmi_read(h_control,
HPI_COBRANET_HMI_cobra_ip_mon_currentIP, 4, &byte_count,
(u8 *)&iP);
- *pi_paddress =
+ *pdw_ip_address =
((iP & 0xff000000) >> 8) | ((iP & 0x00ff0000) << 8) | ((iP &
0x0000ff00) >> 8) | ((iP & 0x000000ff) << 8);
- if (error)
- *pi_paddress = 0;
+ if (err)
+ *pdw_ip_address = 0;
- return error;
+ return err;
}
-u16 hpi_cobranet_setI_paddress(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 i_paddress)
+u16 hpi_cobranet_set_ip_address(u32 h_control, u32 dw_ip_address)
{
u32 iP;
- u16 error;
+ u16 err;
- iP = ((i_paddress & 0xff000000) >> 8) | ((i_paddress & 0x00ff0000) <<
- 8) | ((i_paddress & 0x0000ff00) >> 8) | ((i_paddress &
- 0x000000ff) << 8);
+ iP = ((dw_ip_address & 0xff000000) >> 8) | ((dw_ip_address &
+ 0x00ff0000) << 8) | ((dw_ip_address & 0x0000ff00) >>
+ 8) | ((dw_ip_address & 0x000000ff) << 8);
- error = hpi_cobranet_hmi_write(ph_subsys, h_control,
+ err = hpi_cobranet_hmi_write(h_control,
HPI_COBRANET_HMI_cobra_ip_mon_currentIP, 4, (u8 *)&iP);
- return error;
+ return err;
}
-u16 hpi_cobranet_get_staticI_paddress(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *pi_paddress)
+u16 hpi_cobranet_get_static_ip_address(u32 h_control, u32 *pdw_ip_address)
{
u32 byte_count;
u32 iP;
- u16 error;
- error = hpi_cobranet_hmi_read(ph_subsys, h_control,
+ u16 err;
+ err = hpi_cobranet_hmi_read(h_control,
HPI_COBRANET_HMI_cobra_ip_mon_staticIP, 4, &byte_count,
(u8 *)&iP);
- *pi_paddress =
+ *pdw_ip_address =
((iP & 0xff000000) >> 8) | ((iP & 0x00ff0000) << 8) | ((iP &
0x0000ff00) >> 8) | ((iP & 0x000000ff) << 8);
- if (error)
- *pi_paddress = 0;
+ if (err)
+ *pdw_ip_address = 0;
- return error;
+ return err;
}
-u16 hpi_cobranet_set_staticI_paddress(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 i_paddress)
+u16 hpi_cobranet_set_static_ip_address(u32 h_control, u32 dw_ip_address)
{
u32 iP;
- u16 error;
+ u16 err;
- iP = ((i_paddress & 0xff000000) >> 8) | ((i_paddress & 0x00ff0000) <<
- 8) | ((i_paddress & 0x0000ff00) >> 8) | ((i_paddress &
- 0x000000ff) << 8);
+ iP = ((dw_ip_address & 0xff000000) >> 8) | ((dw_ip_address &
+ 0x00ff0000) << 8) | ((dw_ip_address & 0x0000ff00) >>
+ 8) | ((dw_ip_address & 0x000000ff) << 8);
- error = hpi_cobranet_hmi_write(ph_subsys, h_control,
+ err = hpi_cobranet_hmi_write(h_control,
HPI_COBRANET_HMI_cobra_ip_mon_staticIP, 4, (u8 *)&iP);
- return error;
+ return err;
}
-u16 hpi_cobranet_getMA_caddress(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *pmAC_MS_bs, u32 *pmAC_LS_bs)
+u16 hpi_cobranet_get_macaddress(u32 h_control, u32 *p_mac_msbs,
+ u32 *p_mac_lsbs)
{
u32 byte_count;
- u16 error;
- u32 mAC;
+ u16 err;
+ u32 mac;
- error = hpi_cobranet_hmi_read(ph_subsys, h_control,
+ err = hpi_cobranet_hmi_read(h_control,
HPI_COBRANET_HMI_cobra_if_phy_address, 4, &byte_count,
- (u8 *)&mAC);
- *pmAC_MS_bs =
- ((mAC & 0xff000000) >> 8) | ((mAC & 0x00ff0000) << 8) | ((mAC
- & 0x0000ff00) >> 8) | ((mAC & 0x000000ff) << 8);
- error += hpi_cobranet_hmi_read(ph_subsys, h_control,
- HPI_COBRANET_HMI_cobra_if_phy_address + 1, 4, &byte_count,
- (u8 *)&mAC);
- *pmAC_LS_bs =
- ((mAC & 0xff000000) >> 8) | ((mAC & 0x00ff0000) << 8) | ((mAC
- & 0x0000ff00) >> 8) | ((mAC & 0x000000ff) << 8);
-
- if (error) {
- *pmAC_MS_bs = 0;
- *pmAC_LS_bs = 0;
+ (u8 *)&mac);
+
+ if (!err) {
+ *p_mac_msbs =
+ ((mac & 0xff000000) >> 8) | ((mac & 0x00ff0000) << 8)
+ | ((mac & 0x0000ff00) >> 8) | ((mac & 0x000000ff) <<
+ 8);
+
+ err = hpi_cobranet_hmi_read(h_control,
+ HPI_COBRANET_HMI_cobra_if_phy_address + 1, 4,
+ &byte_count, (u8 *)&mac);
}
- return error;
+ if (!err) {
+ *p_mac_lsbs =
+ ((mac & 0xff000000) >> 8) | ((mac & 0x00ff0000) << 8)
+ | ((mac & 0x0000ff00) >> 8) | ((mac & 0x000000ff) <<
+ 8);
+ } else {
+ *p_mac_msbs = 0;
+ *p_mac_lsbs = 0;
+ }
+
+ return err;
}
-u16 hpi_compander_set_enable(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 enable)
+u16 hpi_compander_set_enable(u32 h_control, u32 enable)
{
- return hpi_control_param_set(ph_subsys, h_control, HPI_GENERIC_ENABLE,
- enable, 0);
+ return hpi_control_param_set(h_control, HPI_GENERIC_ENABLE, enable,
+ 0);
}
-u16 hpi_compander_get_enable(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *enable)
+u16 hpi_compander_get_enable(u32 h_control, u32 *enable)
{
- return hpi_control_param1_get(ph_subsys, h_control,
- HPI_GENERIC_ENABLE, enable);
+ return hpi_control_param1_get(h_control, HPI_GENERIC_ENABLE, enable);
}
-u16 hpi_compander_set_makeup_gain(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, short makeup_gain0_01dB)
+u16 hpi_compander_set_makeup_gain(u32 h_control, short makeup_gain0_01dB)
{
return hpi_control_log_set2(h_control, HPI_COMPANDER_MAKEUPGAIN,
makeup_gain0_01dB, 0);
}
-u16 hpi_compander_get_makeup_gain(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, short *makeup_gain0_01dB)
+u16 hpi_compander_get_makeup_gain(u32 h_control, short *makeup_gain0_01dB)
{
- return hpi_control_log_get2(ph_subsys, h_control,
- HPI_COMPANDER_MAKEUPGAIN, makeup_gain0_01dB, NULL);
+ return hpi_control_log_get2(h_control, HPI_COMPANDER_MAKEUPGAIN,
+ makeup_gain0_01dB, NULL);
}
-u16 hpi_compander_set_attack_time_constant(const struct hpi_hsubsys
- *ph_subsys, u32 h_control, unsigned int index, u32 attack)
+u16 hpi_compander_set_attack_time_constant(u32 h_control, unsigned int index,
+ u32 attack)
{
- return hpi_control_param_set(ph_subsys, h_control,
- HPI_COMPANDER_ATTACK, attack, index);
+ return hpi_control_param_set(h_control, HPI_COMPANDER_ATTACK, attack,
+ index);
}
-u16 hpi_compander_get_attack_time_constant(const struct hpi_hsubsys
- *ph_subsys, u32 h_control, unsigned int index, u32 *attack)
+u16 hpi_compander_get_attack_time_constant(u32 h_control, unsigned int index,
+ u32 *attack)
{
- return hpi_control_param_get(ph_subsys, h_control,
- HPI_COMPANDER_ATTACK, 0, index, attack, NULL);
+ return hpi_control_param_get(h_control, HPI_COMPANDER_ATTACK, 0,
+ index, attack, NULL);
}
-u16 hpi_compander_set_decay_time_constant(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, unsigned int index, u32 decay)
+u16 hpi_compander_set_decay_time_constant(u32 h_control, unsigned int index,
+ u32 decay)
{
- return hpi_control_param_set(ph_subsys, h_control,
- HPI_COMPANDER_DECAY, decay, index);
+ return hpi_control_param_set(h_control, HPI_COMPANDER_DECAY, decay,
+ index);
}
-u16 hpi_compander_get_decay_time_constant(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, unsigned int index, u32 *decay)
+u16 hpi_compander_get_decay_time_constant(u32 h_control, unsigned int index,
+ u32 *decay)
{
- return hpi_control_param_get(ph_subsys, h_control,
- HPI_COMPANDER_DECAY, 0, index, decay, NULL);
+ return hpi_control_param_get(h_control, HPI_COMPANDER_DECAY, 0, index,
+ decay, NULL);
}
-u16 hpi_compander_set_threshold(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, unsigned int index, short threshold0_01dB)
+u16 hpi_compander_set_threshold(u32 h_control, unsigned int index,
+ short threshold0_01dB)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
HPI_CONTROL_SET_STATE);
- u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.c.attribute = HPI_COMPANDER_THRESHOLD;
hm.u.c.param2 = index;
hm.u.c.an_log_value[0] = threshold0_01dB;
@@ -2215,15 +1960,16 @@ u16 hpi_compander_set_threshold(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_compander_get_threshold(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, unsigned int index, short *threshold0_01dB)
+u16 hpi_compander_get_threshold(u32 h_control, unsigned int index,
+ short *threshold0_01dB)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
HPI_CONTROL_GET_STATE);
- u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.c.attribute = HPI_COMPANDER_THRESHOLD;
hm.u.c.param2 = index;
@@ -2233,29 +1979,28 @@ u16 hpi_compander_get_threshold(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_compander_set_ratio(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 index, u32 ratio100)
+u16 hpi_compander_set_ratio(u32 h_control, u32 index, u32 ratio100)
{
- return hpi_control_param_set(ph_subsys, h_control,
- HPI_COMPANDER_RATIO, ratio100, index);
+ return hpi_control_param_set(h_control, HPI_COMPANDER_RATIO, ratio100,
+ index);
}
-u16 hpi_compander_get_ratio(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 index, u32 *ratio100)
+u16 hpi_compander_get_ratio(u32 h_control, u32 index, u32 *ratio100)
{
- return hpi_control_param_get(ph_subsys, h_control,
- HPI_COMPANDER_RATIO, 0, index, ratio100, NULL);
+ return hpi_control_param_get(h_control, HPI_COMPANDER_RATIO, 0, index,
+ ratio100, NULL);
}
-u16 hpi_level_query_range(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- short *min_gain_01dB, short *max_gain_01dB, short *step_gain_01dB)
+u16 hpi_level_query_range(u32 h_control, short *min_gain_01dB,
+ short *max_gain_01dB, short *step_gain_01dB)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
HPI_CONTROL_GET_STATE);
- u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.c.attribute = HPI_LEVEL_RANGE;
hpi_send_recv(&hm, &hr);
@@ -2273,31 +2018,27 @@ u16 hpi_level_query_range(const struct hpi_hsubsys *ph_subsys, u32 h_control,
return hr.error;
}
-u16 hpi_level_set_gain(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- short an_gain0_01dB[HPI_MAX_CHANNELS]
+u16 hpi_level_set_gain(u32 h_control, short an_gain0_01dB[HPI_MAX_CHANNELS]
)
{
return hpi_control_log_set2(h_control, HPI_LEVEL_GAIN,
an_gain0_01dB[0], an_gain0_01dB[1]);
}
-u16 hpi_level_get_gain(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- short an_gain0_01dB[HPI_MAX_CHANNELS]
+u16 hpi_level_get_gain(u32 h_control, short an_gain0_01dB[HPI_MAX_CHANNELS]
)
{
- return hpi_control_log_get2(ph_subsys, h_control, HPI_LEVEL_GAIN,
+ return hpi_control_log_get2(h_control, HPI_LEVEL_GAIN,
&an_gain0_01dB[0], &an_gain0_01dB[1]);
}
-u16 hpi_meter_query_channels(const struct hpi_hsubsys *ph_subsys,
- const u32 h_meter, u32 *p_channels)
+u16 hpi_meter_query_channels(const u32 h_meter, u32 *p_channels)
{
- return hpi_control_query(ph_subsys, h_meter, HPI_METER_NUM_CHANNELS,
- 0, 0, p_channels);
+ return hpi_control_query(h_meter, HPI_METER_NUM_CHANNELS, 0, 0,
+ p_channels);
}
-u16 hpi_meter_get_peak(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- short an_peakdB[HPI_MAX_CHANNELS]
+u16 hpi_meter_get_peak(u32 h_control, short an_peakdB[HPI_MAX_CHANNELS]
)
{
short i = 0;
@@ -2307,7 +2048,8 @@ u16 hpi_meter_get_peak(const struct hpi_hsubsys *ph_subsys, u32 h_control,
hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
HPI_CONTROL_GET_STATE);
- u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.obj_index = hm.obj_index;
hm.u.c.attribute = HPI_METER_PEAK;
@@ -2322,8 +2064,7 @@ u16 hpi_meter_get_peak(const struct hpi_hsubsys *ph_subsys, u32 h_control,
return hr.error;
}
-u16 hpi_meter_get_rms(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- short an_rmsdB[HPI_MAX_CHANNELS]
+u16 hpi_meter_get_rms(u32 h_control, short an_rmsdB[HPI_MAX_CHANNELS]
)
{
short i = 0;
@@ -2333,7 +2074,8 @@ u16 hpi_meter_get_rms(const struct hpi_hsubsys *ph_subsys, u32 h_control,
hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
HPI_CONTROL_GET_STATE);
- u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.c.attribute = HPI_METER_RMS;
hpi_send_recv(&hm, &hr);
@@ -2348,22 +2090,20 @@ u16 hpi_meter_get_rms(const struct hpi_hsubsys *ph_subsys, u32 h_control,
return hr.error;
}
-u16 hpi_meter_set_rms_ballistics(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 attack, u16 decay)
+u16 hpi_meter_set_rms_ballistics(u32 h_control, u16 attack, u16 decay)
{
- return hpi_control_param_set(ph_subsys, h_control,
- HPI_METER_RMS_BALLISTICS, attack, decay);
+ return hpi_control_param_set(h_control, HPI_METER_RMS_BALLISTICS,
+ attack, decay);
}
-u16 hpi_meter_get_rms_ballistics(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 *pn_attack, u16 *pn_decay)
+u16 hpi_meter_get_rms_ballistics(u32 h_control, u16 *pn_attack, u16 *pn_decay)
{
u32 attack;
u32 decay;
u16 error;
- error = hpi_control_param2_get(ph_subsys, h_control,
- HPI_METER_RMS_BALLISTICS, &attack, &decay);
+ error = hpi_control_param2_get(h_control, HPI_METER_RMS_BALLISTICS,
+ &attack, &decay);
if (pn_attack)
*pn_attack = (unsigned short)attack;
@@ -2373,22 +2113,21 @@ u16 hpi_meter_get_rms_ballistics(const struct hpi_hsubsys *ph_subsys,
return error;
}
-u16 hpi_meter_set_peak_ballistics(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 attack, u16 decay)
+u16 hpi_meter_set_peak_ballistics(u32 h_control, u16 attack, u16 decay)
{
- return hpi_control_param_set(ph_subsys, h_control,
- HPI_METER_PEAK_BALLISTICS, attack, decay);
+ return hpi_control_param_set(h_control, HPI_METER_PEAK_BALLISTICS,
+ attack, decay);
}
-u16 hpi_meter_get_peak_ballistics(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 *pn_attack, u16 *pn_decay)
+u16 hpi_meter_get_peak_ballistics(u32 h_control, u16 *pn_attack,
+ u16 *pn_decay)
{
u32 attack;
u32 decay;
u16 error;
- error = hpi_control_param2_get(ph_subsys, h_control,
- HPI_METER_PEAK_BALLISTICS, &attack, &decay);
+ error = hpi_control_param2_get(h_control, HPI_METER_PEAK_BALLISTICS,
+ &attack, &decay);
if (pn_attack)
*pn_attack = (short)attack;
@@ -2398,55 +2137,53 @@ u16 hpi_meter_get_peak_ballistics(const struct hpi_hsubsys *ph_subsys,
return error;
}
-u16 hpi_microphone_set_phantom_power(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 on_off)
+u16 hpi_microphone_set_phantom_power(u32 h_control, u16 on_off)
{
- return hpi_control_param_set(ph_subsys, h_control,
- HPI_MICROPHONE_PHANTOM_POWER, (u32)on_off, 0);
+ return hpi_control_param_set(h_control, HPI_MICROPHONE_PHANTOM_POWER,
+ (u32)on_off, 0);
}
-u16 hpi_microphone_get_phantom_power(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 *pw_on_off)
+u16 hpi_microphone_get_phantom_power(u32 h_control, u16 *pw_on_off)
{
u16 error = 0;
u32 on_off = 0;
- error = hpi_control_param1_get(ph_subsys, h_control,
+ error = hpi_control_param1_get(h_control,
HPI_MICROPHONE_PHANTOM_POWER, &on_off);
if (pw_on_off)
*pw_on_off = (u16)on_off;
return error;
}
-u16 hpi_multiplexer_set_source(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 source_node_type, u16 source_node_index)
+u16 hpi_multiplexer_set_source(u32 h_control, u16 source_node_type,
+ u16 source_node_index)
{
- return hpi_control_param_set(ph_subsys, h_control,
- HPI_MULTIPLEXER_SOURCE, source_node_type, source_node_index);
+ return hpi_control_param_set(h_control, HPI_MULTIPLEXER_SOURCE,
+ source_node_type, source_node_index);
}
-u16 hpi_multiplexer_get_source(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 *source_node_type, u16 *source_node_index)
+u16 hpi_multiplexer_get_source(u32 h_control, u16 *source_node_type,
+ u16 *source_node_index)
{
u32 node, index;
- u16 error = hpi_control_param2_get(ph_subsys, h_control,
+ u16 err = hpi_control_param2_get(h_control,
HPI_MULTIPLEXER_SOURCE, &node,
&index);
if (source_node_type)
*source_node_type = (u16)node;
if (source_node_index)
*source_node_index = (u16)index;
- return error;
+ return err;
}
-u16 hpi_multiplexer_query_source(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 index, u16 *source_node_type,
- u16 *source_node_index)
+u16 hpi_multiplexer_query_source(u32 h_control, u16 index,
+ u16 *source_node_type, u16 *source_node_index)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
HPI_CONTROL_GET_STATE);
- u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.c.attribute = HPI_MULTIPLEXER_QUERYSOURCE;
hm.u.c.param1 = index;
@@ -2459,15 +2196,15 @@ u16 hpi_multiplexer_query_source(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_parametricEQ__get_info(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 *pw_number_of_bands, u16 *pw_on_off)
+u16 hpi_parametric_eq_get_info(u32 h_control, u16 *pw_number_of_bands,
+ u16 *pw_on_off)
{
u32 oB = 0;
u32 oO = 0;
u16 error = 0;
- error = hpi_control_param2_get(ph_subsys, h_control,
- HPI_EQUALIZER_NUM_FILTERS, &oO, &oB);
+ error = hpi_control_param2_get(h_control, HPI_EQUALIZER_NUM_FILTERS,
+ &oO, &oB);
if (pw_number_of_bands)
*pw_number_of_bands = (u16)oB;
if (pw_on_off)
@@ -2475,23 +2212,22 @@ u16 hpi_parametricEQ__get_info(const struct hpi_hsubsys *ph_subsys,
return error;
}
-u16 hpi_parametricEQ__set_state(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 on_off)
+u16 hpi_parametric_eq_set_state(u32 h_control, u16 on_off)
{
- return hpi_control_param_set(ph_subsys, h_control,
- HPI_EQUALIZER_NUM_FILTERS, on_off, 0);
+ return hpi_control_param_set(h_control, HPI_EQUALIZER_NUM_FILTERS,
+ on_off, 0);
}
-u16 hpi_parametricEQ__get_band(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 index, u16 *pn_type, u32 *pfrequency_hz,
- short *pnQ100, short *pn_gain0_01dB)
+u16 hpi_parametric_eq_get_band(u32 h_control, u16 index, u16 *pn_type,
+ u32 *pfrequency_hz, short *pnQ100, short *pn_gain0_01dB)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
HPI_CONTROL_GET_STATE);
- u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.c.attribute = HPI_EQUALIZER_FILTER;
hm.u.c.param2 = index;
@@ -2509,16 +2245,16 @@ u16 hpi_parametricEQ__get_band(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_parametricEQ__set_band(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 index, u16 type, u32 frequency_hz, short q100,
- short gain0_01dB)
+u16 hpi_parametric_eq_set_band(u32 h_control, u16 index, u16 type,
+ u32 frequency_hz, short q100, short gain0_01dB)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
HPI_CONTROL_SET_STATE);
- u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.c.param1 = frequency_hz;
hm.u.c.param2 = (index & 0xFFFFL) + ((u32)type << 16);
@@ -2531,8 +2267,7 @@ u16 hpi_parametricEQ__set_band(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_parametricEQ__get_coeffs(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 index, short coeffs[5]
+u16 hpi_parametric_eq_get_coeffs(u32 h_control, u16 index, short coeffs[5]
)
{
struct hpi_message hm;
@@ -2540,7 +2275,8 @@ u16 hpi_parametricEQ__get_coeffs(const struct hpi_hsubsys *ph_subsys,
hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
HPI_CONTROL_GET_STATE);
- u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.c.attribute = HPI_EQUALIZER_COEFFICIENTS;
hm.u.c.param2 = index;
@@ -2555,444 +2291,388 @@ u16 hpi_parametricEQ__get_coeffs(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_sample_clock_query_source(const struct hpi_hsubsys *ph_subsys,
- const u32 h_clock, const u32 index, u16 *pw_source)
+u16 hpi_sample_clock_query_source(const u32 h_clock, const u32 index,
+ u16 *pw_source)
{
u32 qr;
u16 err;
- err = hpi_control_query(ph_subsys, h_clock, HPI_SAMPLECLOCK_SOURCE,
- index, 0, &qr);
+ err = hpi_control_query(h_clock, HPI_SAMPLECLOCK_SOURCE, index, 0,
+ &qr);
*pw_source = (u16)qr;
return err;
}
-u16 hpi_sample_clock_set_source(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 source)
+u16 hpi_sample_clock_set_source(u32 h_control, u16 source)
{
- return hpi_control_param_set(ph_subsys, h_control,
- HPI_SAMPLECLOCK_SOURCE, source, 0);
+ return hpi_control_param_set(h_control, HPI_SAMPLECLOCK_SOURCE,
+ source, 0);
}
-u16 hpi_sample_clock_get_source(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 *pw_source)
+u16 hpi_sample_clock_get_source(u32 h_control, u16 *pw_source)
{
- u16 error = 0;
+ u16 err = 0;
u32 source = 0;
- error = hpi_control_param1_get(ph_subsys, h_control,
- HPI_SAMPLECLOCK_SOURCE, &source);
- if (!error)
+ err = hpi_control_param1_get(h_control, HPI_SAMPLECLOCK_SOURCE,
+ &source);
+ if (!err)
if (pw_source)
*pw_source = (u16)source;
- return error;
+ return err;
}
-u16 hpi_sample_clock_query_source_index(const struct hpi_hsubsys *ph_subsys,
- const u32 h_clock, const u32 index, const u32 source,
- u16 *pw_source_index)
+u16 hpi_sample_clock_query_source_index(const u32 h_clock, const u32 index,
+ const u32 source, u16 *pw_source_index)
{
u32 qr;
u16 err;
- err = hpi_control_query(ph_subsys, h_clock,
- HPI_SAMPLECLOCK_SOURCE_INDEX, index, source, &qr);
+ err = hpi_control_query(h_clock, HPI_SAMPLECLOCK_SOURCE_INDEX, index,
+ source, &qr);
*pw_source_index = (u16)qr;
return err;
}
-u16 hpi_sample_clock_set_source_index(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 source_index)
+u16 hpi_sample_clock_set_source_index(u32 h_control, u16 source_index)
{
- return hpi_control_param_set(ph_subsys, h_control,
- HPI_SAMPLECLOCK_SOURCE_INDEX, source_index, 0);
+ return hpi_control_param_set(h_control, HPI_SAMPLECLOCK_SOURCE_INDEX,
+ source_index, 0);
}
-u16 hpi_sample_clock_get_source_index(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u16 *pw_source_index)
+u16 hpi_sample_clock_get_source_index(u32 h_control, u16 *pw_source_index)
{
- u16 error = 0;
+ u16 err = 0;
u32 source_index = 0;
- error = hpi_control_param1_get(ph_subsys, h_control,
- HPI_SAMPLECLOCK_SOURCE_INDEX, &source_index);
- if (!error)
+ err = hpi_control_param1_get(h_control, HPI_SAMPLECLOCK_SOURCE_INDEX,
+ &source_index);
+ if (!err)
if (pw_source_index)
*pw_source_index = (u16)source_index;
- return error;
+ return err;
}
-u16 hpi_sample_clock_query_local_rate(const struct hpi_hsubsys *ph_subsys,
- const u32 h_clock, const u32 index, u32 *prate)
+u16 hpi_sample_clock_query_local_rate(const u32 h_clock, const u32 index,
+ u32 *prate)
{
u16 err;
- err = hpi_control_query(ph_subsys, h_clock,
- HPI_SAMPLECLOCK_LOCAL_SAMPLERATE, index, 0, prate);
+ err = hpi_control_query(h_clock, HPI_SAMPLECLOCK_LOCAL_SAMPLERATE,
+ index, 0, prate);
return err;
}
-u16 hpi_sample_clock_set_local_rate(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 sample_rate)
+u16 hpi_sample_clock_set_local_rate(u32 h_control, u32 sample_rate)
{
- return hpi_control_param_set(ph_subsys, h_control,
+ return hpi_control_param_set(h_control,
HPI_SAMPLECLOCK_LOCAL_SAMPLERATE, sample_rate, 0);
}
-u16 hpi_sample_clock_get_local_rate(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *psample_rate)
+u16 hpi_sample_clock_get_local_rate(u32 h_control, u32 *psample_rate)
{
- u16 error = 0;
+ u16 err = 0;
u32 sample_rate = 0;
- error = hpi_control_param1_get(ph_subsys, h_control,
+ err = hpi_control_param1_get(h_control,
HPI_SAMPLECLOCK_LOCAL_SAMPLERATE, &sample_rate);
- if (!error)
+ if (!err)
if (psample_rate)
*psample_rate = sample_rate;
- return error;
+ return err;
}
-u16 hpi_sample_clock_get_sample_rate(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *psample_rate)
+u16 hpi_sample_clock_get_sample_rate(u32 h_control, u32 *psample_rate)
{
- u16 error = 0;
+ u16 err = 0;
u32 sample_rate = 0;
- error = hpi_control_param1_get(ph_subsys, h_control,
- HPI_SAMPLECLOCK_SAMPLERATE, &sample_rate);
- if (!error)
+ err = hpi_control_param1_get(h_control, HPI_SAMPLECLOCK_SAMPLERATE,
+ &sample_rate);
+ if (!err)
if (psample_rate)
*psample_rate = sample_rate;
- return error;
+ return err;
}
-u16 hpi_sample_clock_set_auto(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 enable)
+u16 hpi_sample_clock_set_auto(u32 h_control, u32 enable)
{
- return hpi_control_param_set(ph_subsys, h_control,
- HPI_SAMPLECLOCK_AUTO, enable, 0);
+ return hpi_control_param_set(h_control, HPI_SAMPLECLOCK_AUTO, enable,
+ 0);
}
-u16 hpi_sample_clock_get_auto(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *penable)
+u16 hpi_sample_clock_get_auto(u32 h_control, u32 *penable)
{
- return hpi_control_param1_get(ph_subsys, h_control,
- HPI_SAMPLECLOCK_AUTO, penable);
+ return hpi_control_param1_get(h_control, HPI_SAMPLECLOCK_AUTO,
+ penable);
}
-u16 hpi_sample_clock_set_local_rate_lock(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 lock)
+u16 hpi_sample_clock_set_local_rate_lock(u32 h_control, u32 lock)
{
- return hpi_control_param_set(ph_subsys, h_control,
- HPI_SAMPLECLOCK_LOCAL_LOCK, lock, 0);
+ return hpi_control_param_set(h_control, HPI_SAMPLECLOCK_LOCAL_LOCK,
+ lock, 0);
}
-u16 hpi_sample_clock_get_local_rate_lock(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *plock)
+u16 hpi_sample_clock_get_local_rate_lock(u32 h_control, u32 *plock)
{
- return hpi_control_param1_get(ph_subsys, h_control,
- HPI_SAMPLECLOCK_LOCAL_LOCK, plock);
+ return hpi_control_param1_get(h_control, HPI_SAMPLECLOCK_LOCAL_LOCK,
+ plock);
}
-u16 hpi_tone_detector_get_frequency(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 index, u32 *frequency)
+u16 hpi_tone_detector_get_frequency(u32 h_control, u32 index, u32 *frequency)
{
- return hpi_control_param_get(ph_subsys, h_control,
- HPI_TONEDETECTOR_FREQUENCY, index, 0, frequency, NULL);
+ return hpi_control_param_get(h_control, HPI_TONEDETECTOR_FREQUENCY,
+ index, 0, frequency, NULL);
}
-u16 hpi_tone_detector_get_state(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *state)
+u16 hpi_tone_detector_get_state(u32 h_control, u32 *state)
{
- return hpi_control_param1_get(ph_subsys, h_control,
- HPI_TONEDETECTOR_STATE, state);
+ return hpi_control_param1_get(h_control, HPI_TONEDETECTOR_STATE,
+ state);
}
-u16 hpi_tone_detector_set_enable(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 enable)
+u16 hpi_tone_detector_set_enable(u32 h_control, u32 enable)
{
- return hpi_control_param_set(ph_subsys, h_control, HPI_GENERIC_ENABLE,
- (u32)enable, 0);
+ return hpi_control_param_set(h_control, HPI_GENERIC_ENABLE, enable,
+ 0);
}
-u16 hpi_tone_detector_get_enable(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *enable)
+u16 hpi_tone_detector_get_enable(u32 h_control, u32 *enable)
{
- return hpi_control_param1_get(ph_subsys, h_control,
- HPI_GENERIC_ENABLE, enable);
+ return hpi_control_param1_get(h_control, HPI_GENERIC_ENABLE, enable);
}
-u16 hpi_tone_detector_set_event_enable(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 event_enable)
+u16 hpi_tone_detector_set_event_enable(u32 h_control, u32 event_enable)
{
- return hpi_control_param_set(ph_subsys, h_control,
- HPI_GENERIC_EVENT_ENABLE, (u32)event_enable, 0);
+ return hpi_control_param_set(h_control, HPI_GENERIC_EVENT_ENABLE,
+ (u32)event_enable, 0);
}
-u16 hpi_tone_detector_get_event_enable(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *event_enable)
+u16 hpi_tone_detector_get_event_enable(u32 h_control, u32 *event_enable)
{
- return hpi_control_param1_get(ph_subsys, h_control,
- HPI_GENERIC_EVENT_ENABLE, event_enable);
+ return hpi_control_param1_get(h_control, HPI_GENERIC_EVENT_ENABLE,
+ event_enable);
}
-u16 hpi_tone_detector_set_threshold(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, int threshold)
+u16 hpi_tone_detector_set_threshold(u32 h_control, int threshold)
{
- return hpi_control_param_set(ph_subsys, h_control,
- HPI_TONEDETECTOR_THRESHOLD, (u32)threshold, 0);
+ return hpi_control_param_set(h_control, HPI_TONEDETECTOR_THRESHOLD,
+ (u32)threshold, 0);
}
-u16 hpi_tone_detector_get_threshold(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, int *threshold)
+u16 hpi_tone_detector_get_threshold(u32 h_control, int *threshold)
{
- return hpi_control_param1_get(ph_subsys, h_control,
- HPI_TONEDETECTOR_THRESHOLD, (u32 *)threshold);
+ return hpi_control_param1_get(h_control, HPI_TONEDETECTOR_THRESHOLD,
+ (u32 *)threshold);
}
-u16 hpi_silence_detector_get_state(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *state)
+u16 hpi_silence_detector_get_state(u32 h_control, u32 *state)
{
- return hpi_control_param1_get(ph_subsys, h_control,
- HPI_SILENCEDETECTOR_STATE, state);
+ return hpi_control_param1_get(h_control, HPI_SILENCEDETECTOR_STATE,
+ state);
}
-u16 hpi_silence_detector_set_enable(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 enable)
+u16 hpi_silence_detector_set_enable(u32 h_control, u32 enable)
{
- return hpi_control_param_set(ph_subsys, h_control, HPI_GENERIC_ENABLE,
- (u32)enable, 0);
+ return hpi_control_param_set(h_control, HPI_GENERIC_ENABLE, enable,
+ 0);
}
-u16 hpi_silence_detector_get_enable(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *enable)
+u16 hpi_silence_detector_get_enable(u32 h_control, u32 *enable)
{
- return hpi_control_param1_get(ph_subsys, h_control,
- HPI_GENERIC_ENABLE, enable);
+ return hpi_control_param1_get(h_control, HPI_GENERIC_ENABLE, enable);
}
-u16 hpi_silence_detector_set_event_enable(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 event_enable)
+u16 hpi_silence_detector_set_event_enable(u32 h_control, u32 event_enable)
{
- return hpi_control_param_set(ph_subsys, h_control,
- HPI_GENERIC_EVENT_ENABLE, event_enable, 0);
+ return hpi_control_param_set(h_control, HPI_GENERIC_EVENT_ENABLE,
+ event_enable, 0);
}
-u16 hpi_silence_detector_get_event_enable(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *event_enable)
+u16 hpi_silence_detector_get_event_enable(u32 h_control, u32 *event_enable)
{
- return hpi_control_param1_get(ph_subsys, h_control,
- HPI_GENERIC_EVENT_ENABLE, event_enable);
+ return hpi_control_param1_get(h_control, HPI_GENERIC_EVENT_ENABLE,
+ event_enable);
}
-u16 hpi_silence_detector_set_delay(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 delay)
+u16 hpi_silence_detector_set_delay(u32 h_control, u32 delay)
{
- return hpi_control_param_set(ph_subsys, h_control,
- HPI_SILENCEDETECTOR_DELAY, delay, 0);
+ return hpi_control_param_set(h_control, HPI_SILENCEDETECTOR_DELAY,
+ delay, 0);
}
-u16 hpi_silence_detector_get_delay(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *delay)
+u16 hpi_silence_detector_get_delay(u32 h_control, u32 *delay)
{
- return hpi_control_param1_get(ph_subsys, h_control,
- HPI_SILENCEDETECTOR_DELAY, delay);
+ return hpi_control_param1_get(h_control, HPI_SILENCEDETECTOR_DELAY,
+ delay);
}
-u16 hpi_silence_detector_set_threshold(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, int threshold)
+u16 hpi_silence_detector_set_threshold(u32 h_control, int threshold)
{
- return hpi_control_param_set(ph_subsys, h_control,
- HPI_SILENCEDETECTOR_THRESHOLD, threshold, 0);
+ return hpi_control_param_set(h_control, HPI_SILENCEDETECTOR_THRESHOLD,
+ threshold, 0);
}
-u16 hpi_silence_detector_get_threshold(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, int *threshold)
+u16 hpi_silence_detector_get_threshold(u32 h_control, int *threshold)
{
- return hpi_control_param1_get(ph_subsys, h_control,
+ return hpi_control_param1_get(h_control,
HPI_SILENCEDETECTOR_THRESHOLD, (u32 *)threshold);
}
-u16 hpi_tuner_query_band(const struct hpi_hsubsys *ph_subsys,
- const u32 h_tuner, const u32 index, u16 *pw_band)
+u16 hpi_tuner_query_band(const u32 h_tuner, const u32 index, u16 *pw_band)
{
u32 qr;
u16 err;
- err = hpi_control_query(ph_subsys, h_tuner, HPI_TUNER_BAND, index, 0,
- &qr);
+ err = hpi_control_query(h_tuner, HPI_TUNER_BAND, index, 0, &qr);
*pw_band = (u16)qr;
return err;
}
-u16 hpi_tuner_set_band(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- u16 band)
+u16 hpi_tuner_set_band(u32 h_control, u16 band)
{
- return hpi_control_param_set(ph_subsys, h_control, HPI_TUNER_BAND,
- band, 0);
+ return hpi_control_param_set(h_control, HPI_TUNER_BAND, band, 0);
}
-u16 hpi_tuner_get_band(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- u16 *pw_band)
+u16 hpi_tuner_get_band(u32 h_control, u16 *pw_band)
{
u32 band = 0;
u16 error = 0;
- error = hpi_control_param1_get(ph_subsys, h_control, HPI_TUNER_BAND,
- &band);
+ error = hpi_control_param1_get(h_control, HPI_TUNER_BAND, &band);
if (pw_band)
*pw_band = (u16)band;
return error;
}
-u16 hpi_tuner_query_frequency(const struct hpi_hsubsys *ph_subsys,
- const u32 h_tuner, const u32 index, const u16 band, u32 *pfreq)
+u16 hpi_tuner_query_frequency(const u32 h_tuner, const u32 index,
+ const u16 band, u32 *pfreq)
{
- return hpi_control_query(ph_subsys, h_tuner, HPI_TUNER_FREQ, index,
- band, pfreq);
+ return hpi_control_query(h_tuner, HPI_TUNER_FREQ, index, band, pfreq);
}
-u16 hpi_tuner_set_frequency(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 freq_ink_hz)
+u16 hpi_tuner_set_frequency(u32 h_control, u32 freq_ink_hz)
{
- return hpi_control_param_set(ph_subsys, h_control, HPI_TUNER_FREQ,
- freq_ink_hz, 0);
+ return hpi_control_param_set(h_control, HPI_TUNER_FREQ, freq_ink_hz,
+ 0);
}
-u16 hpi_tuner_get_frequency(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *pw_freq_ink_hz)
+u16 hpi_tuner_get_frequency(u32 h_control, u32 *pw_freq_ink_hz)
{
- return hpi_control_param1_get(ph_subsys, h_control, HPI_TUNER_FREQ,
+ return hpi_control_param1_get(h_control, HPI_TUNER_FREQ,
pw_freq_ink_hz);
}
-u16 hpi_tuner_query_gain(const struct hpi_hsubsys *ph_subsys,
- const u32 h_tuner, const u32 index, u16 *pw_gain)
+u16 hpi_tuner_query_gain(const u32 h_tuner, const u32 index, u16 *pw_gain)
{
u32 qr;
u16 err;
- err = hpi_control_query(ph_subsys, h_tuner, HPI_TUNER_BAND, index, 0,
- &qr);
+ err = hpi_control_query(h_tuner, HPI_TUNER_BAND, index, 0, &qr);
*pw_gain = (u16)qr;
return err;
}
-u16 hpi_tuner_set_gain(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- short gain)
+u16 hpi_tuner_set_gain(u32 h_control, short gain)
{
- return hpi_control_param_set(ph_subsys, h_control, HPI_TUNER_GAIN,
- gain, 0);
+ return hpi_control_param_set(h_control, HPI_TUNER_GAIN, gain, 0);
}
-u16 hpi_tuner_get_gain(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- short *pn_gain)
+u16 hpi_tuner_get_gain(u32 h_control, short *pn_gain)
{
u32 gain = 0;
u16 error = 0;
- error = hpi_control_param1_get(ph_subsys, h_control, HPI_TUNER_GAIN,
- &gain);
+ error = hpi_control_param1_get(h_control, HPI_TUNER_GAIN, &gain);
if (pn_gain)
*pn_gain = (u16)gain;
return error;
}
-u16 hpi_tuner_getRF_level(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- short *pw_level)
+u16 hpi_tuner_get_rf_level(u32 h_control, short *pw_level)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
HPI_CONTROL_GET_STATE);
- u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
- hm.u.c.attribute = HPI_TUNER_LEVEL;
- hm.u.c.param1 = HPI_TUNER_LEVEL_AVERAGE;
+ if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
+ hm.u.cu.attribute = HPI_TUNER_LEVEL_AVG;
hpi_send_recv(&hm, &hr);
if (pw_level)
- *pw_level = (short)hr.u.c.param1;
+ *pw_level = hr.u.cu.tuner.s_level;
return hr.error;
}
-u16 hpi_tuner_get_rawRF_level(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, short *pw_level)
+u16 hpi_tuner_get_raw_rf_level(u32 h_control, short *pw_level)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
HPI_CONTROL_GET_STATE);
- u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
- hm.u.c.attribute = HPI_TUNER_LEVEL;
- hm.u.c.param1 = HPI_TUNER_LEVEL_RAW;
+ if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
+ hm.u.cu.attribute = HPI_TUNER_LEVEL_RAW;
hpi_send_recv(&hm, &hr);
if (pw_level)
- *pw_level = (short)hr.u.c.param1;
+ *pw_level = hr.u.cu.tuner.s_level;
return hr.error;
}
-u16 hpi_tuner_query_deemphasis(const struct hpi_hsubsys *ph_subsys,
- const u32 h_tuner, const u32 index, const u16 band, u32 *pdeemphasis)
+u16 hpi_tuner_query_deemphasis(const u32 h_tuner, const u32 index,
+ const u16 band, u32 *pdeemphasis)
{
- return hpi_control_query(ph_subsys, h_tuner, HPI_TUNER_DEEMPHASIS,
- index, band, pdeemphasis);
+ return hpi_control_query(h_tuner, HPI_TUNER_DEEMPHASIS, index, band,
+ pdeemphasis);
}
-u16 hpi_tuner_set_deemphasis(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 deemphasis)
+u16 hpi_tuner_set_deemphasis(u32 h_control, u32 deemphasis)
{
- return hpi_control_param_set(ph_subsys, h_control,
- HPI_TUNER_DEEMPHASIS, deemphasis, 0);
+ return hpi_control_param_set(h_control, HPI_TUNER_DEEMPHASIS,
+ deemphasis, 0);
}
-u16 hpi_tuner_get_deemphasis(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *pdeemphasis)
+u16 hpi_tuner_get_deemphasis(u32 h_control, u32 *pdeemphasis)
{
- return hpi_control_param1_get(ph_subsys, h_control,
- HPI_TUNER_DEEMPHASIS, pdeemphasis);
+ return hpi_control_param1_get(h_control, HPI_TUNER_DEEMPHASIS,
+ pdeemphasis);
}
-u16 hpi_tuner_query_program(const struct hpi_hsubsys *ph_subsys,
- const u32 h_tuner, u32 *pbitmap_program)
+u16 hpi_tuner_query_program(const u32 h_tuner, u32 *pbitmap_program)
{
- return hpi_control_query(ph_subsys, h_tuner, HPI_TUNER_PROGRAM, 0, 0,
+ return hpi_control_query(h_tuner, HPI_TUNER_PROGRAM, 0, 0,
pbitmap_program);
}
-u16 hpi_tuner_set_program(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- u32 program)
+u16 hpi_tuner_set_program(u32 h_control, u32 program)
{
- return hpi_control_param_set(ph_subsys, h_control, HPI_TUNER_PROGRAM,
- program, 0);
+ return hpi_control_param_set(h_control, HPI_TUNER_PROGRAM, program,
+ 0);
}
-u16 hpi_tuner_get_program(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- u32 *pprogram)
+u16 hpi_tuner_get_program(u32 h_control, u32 *pprogram)
{
- return hpi_control_param1_get(ph_subsys, h_control, HPI_TUNER_PROGRAM,
- pprogram);
+ return hpi_control_param1_get(h_control, HPI_TUNER_PROGRAM, pprogram);
}
-u16 hpi_tuner_get_hd_radio_dsp_version(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, char *psz_dsp_version, const u32 string_size)
+u16 hpi_tuner_get_hd_radio_dsp_version(u32 h_control, char *psz_dsp_version,
+ const u32 string_size)
{
return hpi_control_get_string(h_control,
HPI_TUNER_HDRADIO_DSP_VERSION, psz_dsp_version, string_size);
}
-u16 hpi_tuner_get_hd_radio_sdk_version(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, char *psz_sdk_version, const u32 string_size)
+u16 hpi_tuner_get_hd_radio_sdk_version(u32 h_control, char *psz_sdk_version,
+ const u32 string_size)
{
return hpi_control_get_string(h_control,
HPI_TUNER_HDRADIO_SDK_VERSION, psz_sdk_version, string_size);
}
-u16 hpi_tuner_get_status(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- u16 *pw_status_mask, u16 *pw_status)
+u16 hpi_tuner_get_status(u32 h_control, u16 *pw_status_mask, u16 *pw_status)
{
u32 status = 0;
u16 error = 0;
- error = hpi_control_param1_get(ph_subsys, h_control, HPI_TUNER_STATUS,
- &status);
+ error = hpi_control_param1_get(h_control, HPI_TUNER_STATUS, &status);
if (pw_status) {
if (!error) {
*pw_status_mask = (u16)(status >> 16);
@@ -3005,50 +2685,44 @@ u16 hpi_tuner_get_status(const struct hpi_hsubsys *ph_subsys, u32 h_control,
return error;
}
-u16 hpi_tuner_set_mode(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- u32 mode, u32 value)
+u16 hpi_tuner_set_mode(u32 h_control, u32 mode, u32 value)
{
- return hpi_control_param_set(ph_subsys, h_control, HPI_TUNER_MODE,
- mode, value);
+ return hpi_control_param_set(h_control, HPI_TUNER_MODE, mode, value);
}
-u16 hpi_tuner_get_mode(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- u32 mode, u32 *pn_value)
+u16 hpi_tuner_get_mode(u32 h_control, u32 mode, u32 *pn_value)
{
- return hpi_control_param_get(ph_subsys, h_control, HPI_TUNER_MODE,
- mode, 0, pn_value, NULL);
+ return hpi_control_param_get(h_control, HPI_TUNER_MODE, mode, 0,
+ pn_value, NULL);
}
-u16 hpi_tuner_get_hd_radio_signal_quality(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *pquality)
+u16 hpi_tuner_get_hd_radio_signal_quality(u32 h_control, u32 *pquality)
{
- return hpi_control_param1_get(ph_subsys, h_control,
+ return hpi_control_param1_get(h_control,
HPI_TUNER_HDRADIO_SIGNAL_QUALITY, pquality);
}
-u16 hpi_tuner_get_hd_radio_signal_blend(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *pblend)
+u16 hpi_tuner_get_hd_radio_signal_blend(u32 h_control, u32 *pblend)
{
- return hpi_control_param1_get(ph_subsys, h_control,
- HPI_TUNER_HDRADIO_BLEND, pblend);
+ return hpi_control_param1_get(h_control, HPI_TUNER_HDRADIO_BLEND,
+ pblend);
}
-u16 hpi_tuner_set_hd_radio_signal_blend(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, const u32 blend)
+u16 hpi_tuner_set_hd_radio_signal_blend(u32 h_control, const u32 blend)
{
- return hpi_control_param_set(ph_subsys, h_control,
- HPI_TUNER_HDRADIO_BLEND, blend, 0);
+ return hpi_control_param_set(h_control, HPI_TUNER_HDRADIO_BLEND,
+ blend, 0);
}
-u16 hpi_tuner_getRDS(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- char *p_data)
+u16 hpi_tuner_get_rds(u32 h_control, char *p_data)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
HPI_CONTROL_GET_STATE);
- u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.c.attribute = HPI_TUNER_RDS;
hpi_send_recv(&hm, &hr);
if (p_data) {
@@ -3059,80 +2733,82 @@ u16 hpi_tuner_getRDS(const struct hpi_hsubsys *ph_subsys, u32 h_control,
return hr.error;
}
-u16 HPI_PAD__get_channel_name(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, char *psz_string, const u32 data_length)
+u16 hpi_pad_get_channel_name(u32 h_control, char *psz_string,
+ const u32 data_length)
{
return hpi_control_get_string(h_control, HPI_PAD_CHANNEL_NAME,
psz_string, data_length);
}
-u16 HPI_PAD__get_artist(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- char *psz_string, const u32 data_length)
+u16 hpi_pad_get_artist(u32 h_control, char *psz_string, const u32 data_length)
{
return hpi_control_get_string(h_control, HPI_PAD_ARTIST, psz_string,
data_length);
}
-u16 HPI_PAD__get_title(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- char *psz_string, const u32 data_length)
+u16 hpi_pad_get_title(u32 h_control, char *psz_string, const u32 data_length)
{
return hpi_control_get_string(h_control, HPI_PAD_TITLE, psz_string,
data_length);
}
-u16 HPI_PAD__get_comment(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- char *psz_string, const u32 data_length)
+u16 hpi_pad_get_comment(u32 h_control, char *psz_string,
+ const u32 data_length)
{
return hpi_control_get_string(h_control, HPI_PAD_COMMENT, psz_string,
data_length);
}
-u16 HPI_PAD__get_program_type(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, u32 *ppTY)
+u16 hpi_pad_get_program_type(u32 h_control, u32 *ppTY)
{
- return hpi_control_param1_get(ph_subsys, h_control,
- HPI_PAD_PROGRAM_TYPE, ppTY);
+ return hpi_control_param1_get(h_control, HPI_PAD_PROGRAM_TYPE, ppTY);
}
-u16 HPI_PAD__get_rdsPI(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- u32 *ppI)
+u16 hpi_pad_get_rdsPI(u32 h_control, u32 *ppI)
{
- return hpi_control_param1_get(ph_subsys, h_control,
- HPI_PAD_PROGRAM_ID, ppI);
+ return hpi_control_param1_get(h_control, HPI_PAD_PROGRAM_ID, ppI);
}
-u16 hpi_volume_query_channels(const struct hpi_hsubsys *ph_subsys,
- const u32 h_volume, u32 *p_channels)
+u16 hpi_volume_query_channels(const u32 h_volume, u32 *p_channels)
{
- return hpi_control_query(ph_subsys, h_volume, HPI_VOLUME_NUM_CHANNELS,
- 0, 0, p_channels);
+ return hpi_control_query(h_volume, HPI_VOLUME_NUM_CHANNELS, 0, 0,
+ p_channels);
}
-u16 hpi_volume_set_gain(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- short an_log_gain[HPI_MAX_CHANNELS]
+u16 hpi_volume_set_gain(u32 h_control, short an_log_gain[HPI_MAX_CHANNELS]
)
{
return hpi_control_log_set2(h_control, HPI_VOLUME_GAIN,
an_log_gain[0], an_log_gain[1]);
}
-u16 hpi_volume_get_gain(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- short an_log_gain[HPI_MAX_CHANNELS]
+u16 hpi_volume_get_gain(u32 h_control, short an_log_gain[HPI_MAX_CHANNELS]
)
{
- return hpi_control_log_get2(ph_subsys, h_control, HPI_VOLUME_GAIN,
+ return hpi_control_log_get2(h_control, HPI_VOLUME_GAIN,
&an_log_gain[0], &an_log_gain[1]);
}
-u16 hpi_volume_query_range(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- short *min_gain_01dB, short *max_gain_01dB, short *step_gain_01dB)
+u16 hpi_volume_set_mute(u32 h_control, u32 mute)
+{
+ return hpi_control_param_set(h_control, HPI_VOLUME_MUTE, mute, 0);
+}
+
+u16 hpi_volume_get_mute(u32 h_control, u32 *mute)
+{
+ return hpi_control_param1_get(h_control, HPI_VOLUME_MUTE, mute);
+}
+
+u16 hpi_volume_query_range(u32 h_control, short *min_gain_01dB,
+ short *max_gain_01dB, short *step_gain_01dB)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
HPI_CONTROL_GET_STATE);
- u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.c.attribute = HPI_VOLUME_RANGE;
hpi_send_recv(&hm, &hr);
@@ -3150,16 +2826,17 @@ u16 hpi_volume_query_range(const struct hpi_hsubsys *ph_subsys, u32 h_control,
return hr.error;
}
-u16 hpi_volume_auto_fade_profile(const struct hpi_hsubsys *ph_subsys,
- u32 h_control, short an_stop_gain0_01dB[HPI_MAX_CHANNELS],
- u32 duration_ms, u16 profile)
+u16 hpi_volume_auto_fade_profile(u32 h_control,
+ short an_stop_gain0_01dB[HPI_MAX_CHANNELS], u32 duration_ms,
+ u16 profile)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
HPI_CONTROL_SET_STATE);
- u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
memcpy(hm.u.c.an_log_value, an_stop_gain0_01dB,
sizeof(short) * HPI_MAX_CHANNELS);
@@ -3173,21 +2850,21 @@ u16 hpi_volume_auto_fade_profile(const struct hpi_hsubsys *ph_subsys,
return hr.error;
}
-u16 hpi_volume_auto_fade(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+u16 hpi_volume_auto_fade(u32 h_control,
short an_stop_gain0_01dB[HPI_MAX_CHANNELS], u32 duration_ms)
{
- return hpi_volume_auto_fade_profile(ph_subsys, h_control,
- an_stop_gain0_01dB, duration_ms, HPI_VOLUME_AUTOFADE_LOG);
+ return hpi_volume_auto_fade_profile(h_control, an_stop_gain0_01dB,
+ duration_ms, HPI_VOLUME_AUTOFADE_LOG);
}
-u16 hpi_vox_set_threshold(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- short an_gain0_01dB)
+u16 hpi_vox_set_threshold(u32 h_control, short an_gain0_01dB)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
HPI_CONTROL_SET_STATE);
- u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.c.attribute = HPI_VOX_THRESHOLD;
hm.u.c.an_log_value[0] = an_gain0_01dB;
@@ -3197,14 +2874,14 @@ u16 hpi_vox_set_threshold(const struct hpi_hsubsys *ph_subsys, u32 h_control,
return hr.error;
}
-u16 hpi_vox_get_threshold(const struct hpi_hsubsys *ph_subsys, u32 h_control,
- short *an_gain0_01dB)
+u16 hpi_vox_get_threshold(u32 h_control, short *an_gain0_01dB)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
HPI_CONTROL_GET_STATE);
- u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index))
+ return HPI_ERROR_INVALID_HANDLE;
hm.u.c.attribute = HPI_VOX_THRESHOLD;
hpi_send_recv(&hm, &hr);
@@ -3213,728 +2890,3 @@ u16 hpi_vox_get_threshold(const struct hpi_hsubsys *ph_subsys, u32 h_control,
return hr.error;
}
-
-static size_t strv_packet_size = MIN_STRV_PACKET_SIZE;
-
-static size_t entity_type_to_size[LAST_ENTITY_TYPE] = {
- 0,
- sizeof(struct hpi_entity),
- sizeof(void *),
-
- sizeof(int),
- sizeof(float),
- sizeof(double),
-
- sizeof(char),
- sizeof(char),
-
- 4 * sizeof(char),
- 16 * sizeof(char),
- 6 * sizeof(char),
-};
-
-static inline size_t hpi_entity_size(struct hpi_entity *entity_ptr)
-{
- return entity_ptr->header.size;
-}
-
-static inline size_t hpi_entity_header_size(struct hpi_entity *entity_ptr)
-{
- return sizeof(entity_ptr->header);
-}
-
-static inline size_t hpi_entity_value_size(struct hpi_entity *entity_ptr)
-{
- return hpi_entity_size(entity_ptr) -
- hpi_entity_header_size(entity_ptr);
-}
-
-static inline size_t hpi_entity_item_count(struct hpi_entity *entity_ptr)
-{
- return hpi_entity_value_size(entity_ptr) /
- entity_type_to_size[entity_ptr->header.type];
-}
-
-static inline struct hpi_entity *hpi_entity_ptr_to_next(struct hpi_entity
- *entity_ptr)
-{
- return (void *)(((u8 *)entity_ptr) + hpi_entity_size(entity_ptr));
-}
-
-static inline u16 hpi_entity_check_type(const enum e_entity_type t)
-{
- if (t >= 0 && t < STR_TYPE_FIELD_MAX)
- return 0;
- return HPI_ERROR_ENTITY_TYPE_INVALID;
-}
-
-static inline u16 hpi_entity_check_role(const enum e_entity_role r)
-{
- if (r >= 0 && r < STR_ROLE_FIELD_MAX)
- return 0;
- return HPI_ERROR_ENTITY_ROLE_INVALID;
-}
-
-static u16 hpi_entity_get_next(struct hpi_entity *entity, int recursive_flag,
- void *guard_p, struct hpi_entity **next)
-{
- HPI_DEBUG_ASSERT(entity != NULL);
- HPI_DEBUG_ASSERT(next != NULL);
- HPI_DEBUG_ASSERT(hpi_entity_size(entity) != 0);
-
- if (guard_p <= (void *)entity) {
- *next = NULL;
- return 0;
- }
-
- if (recursive_flag && entity->header.type == entity_type_sequence)
- *next = (struct hpi_entity *)entity->value;
- else
- *next = (struct hpi_entity *)hpi_entity_ptr_to_next(entity);
-
- if (guard_p <= (void *)*next) {
- *next = NULL;
- return 0;
- }
-
- HPI_DEBUG_ASSERT(guard_p >= (void *)hpi_entity_ptr_to_next(*next));
- return 0;
-}
-
-u16 hpi_entity_find_next(struct hpi_entity *container_entity,
- enum e_entity_type type, enum e_entity_role role, int recursive_flag,
- struct hpi_entity **current_match)
-{
- struct hpi_entity *tmp = NULL;
- void *guard_p = NULL;
-
- HPI_DEBUG_ASSERT(container_entity != NULL);
- guard_p = hpi_entity_ptr_to_next(container_entity);
-
- if (*current_match != NULL)
- hpi_entity_get_next(*current_match, recursive_flag, guard_p,
- &tmp);
- else
- hpi_entity_get_next(container_entity, 1, guard_p, &tmp);
-
- while (tmp) {
- u16 err;
-
- HPI_DEBUG_ASSERT((void *)tmp >= (void *)container_entity);
-
- if ((!type || tmp->header.type == type) && (!role
- || tmp->header.role == role)) {
- *current_match = tmp;
- return 0;
- }
-
- err = hpi_entity_get_next(tmp, recursive_flag, guard_p,
- current_match);
- if (err)
- return err;
-
- tmp = *current_match;
- }
-
- *current_match = NULL;
- return 0;
-}
-
-void hpi_entity_free(struct hpi_entity *entity)
-{
- kfree(entity);
-}
-
-static u16 hpi_entity_alloc_and_copy(struct hpi_entity *src,
- struct hpi_entity **dst)
-{
- size_t buf_size;
- HPI_DEBUG_ASSERT(dst != NULL);
- HPI_DEBUG_ASSERT(src != NULL);
-
- buf_size = hpi_entity_size(src);
- *dst = kmalloc(buf_size, GFP_KERNEL);
- if (*dst == NULL)
- return HPI_ERROR_MEMORY_ALLOC;
- memcpy(*dst, src, buf_size);
- return 0;
-}
-
-u16 hpi_universal_info(const struct hpi_hsubsys *ph_subsys, u32 hC,
- struct hpi_entity **info)
-{
- struct hpi_msg_strv hm;
- struct hpi_res_strv *phr;
- u16 hpi_err;
- int remaining_attempts = 2;
- size_t resp_packet_size = 1024;
-
- *info = NULL;
-
- while (remaining_attempts--) {
- phr = kmalloc(resp_packet_size, GFP_KERNEL);
- HPI_DEBUG_ASSERT(phr != NULL);
-
- hpi_init_message_responseV1(&hm.h, (u16)sizeof(hm), &phr->h,
- (u16)resp_packet_size, HPI_OBJ_CONTROL,
- HPI_CONTROL_GET_INFO);
- u32TOINDEXES(hC, &hm.h.adapter_index, &hm.h.obj_index);
-
- hm.strv.header.size = sizeof(hm.strv);
- phr->strv.header.size = resp_packet_size - sizeof(phr->h);
-
- hpi_send_recv((struct hpi_message *)&hm.h,
- (struct hpi_response *)&phr->h);
- if (phr->h.error == HPI_ERROR_RESPONSE_BUFFER_TOO_SMALL) {
-
- HPI_DEBUG_ASSERT(phr->h.specific_error >
- MIN_STRV_PACKET_SIZE
- && phr->h.specific_error < 1500);
- resp_packet_size = phr->h.specific_error;
- } else {
- remaining_attempts = 0;
- if (!phr->h.error)
- hpi_entity_alloc_and_copy(&phr->strv, info);
- }
-
- hpi_err = phr->h.error;
- kfree(phr);
- }
-
- return hpi_err;
-}
-
-u16 hpi_universal_get(const struct hpi_hsubsys *ph_subsys, u32 hC,
- struct hpi_entity **value)
-{
- struct hpi_msg_strv hm;
- struct hpi_res_strv *phr;
- u16 hpi_err;
- int remaining_attempts = 2;
-
- *value = NULL;
-
- while (remaining_attempts--) {
- phr = kmalloc(strv_packet_size, GFP_KERNEL);
- if (!phr)
- return HPI_ERROR_MEMORY_ALLOC;
-
- hpi_init_message_responseV1(&hm.h, (u16)sizeof(hm), &phr->h,
- (u16)strv_packet_size, HPI_OBJ_CONTROL,
- HPI_CONTROL_GET_STATE);
- u32TOINDEXES(hC, &hm.h.adapter_index, &hm.h.obj_index);
-
- hm.strv.header.size = sizeof(hm.strv);
- phr->strv.header.size = strv_packet_size - sizeof(phr->h);
-
- hpi_send_recv((struct hpi_message *)&hm.h,
- (struct hpi_response *)&phr->h);
- if (phr->h.error == HPI_ERROR_RESPONSE_BUFFER_TOO_SMALL) {
-
- HPI_DEBUG_ASSERT(phr->h.specific_error >
- MIN_STRV_PACKET_SIZE
- && phr->h.specific_error < 1000);
- strv_packet_size = phr->h.specific_error;
- } else {
- remaining_attempts = 0;
- if (!phr->h.error)
- hpi_entity_alloc_and_copy(&phr->strv, value);
- }
-
- hpi_err = phr->h.error;
- kfree(phr);
- }
-
- return hpi_err;
-}
-
-u16 hpi_universal_set(const struct hpi_hsubsys *ph_subsys, u32 hC,
- struct hpi_entity *value)
-{
- struct hpi_msg_strv *phm;
- struct hpi_res_strv hr;
-
- phm = kmalloc(sizeof(phm->h) + value->header.size, GFP_KERNEL);
- HPI_DEBUG_ASSERT(phm != NULL);
-
- hpi_init_message_responseV1(&phm->h,
- sizeof(phm->h) + value->header.size, &hr.h, sizeof(hr),
- HPI_OBJ_CONTROL, HPI_CONTROL_SET_STATE);
- u32TOINDEXES(hC, &phm->h.adapter_index, &phm->h.obj_index);
- hr.strv.header.size = sizeof(hr.strv);
-
- memcpy(&phm->strv, value, value->header.size);
- hpi_send_recv((struct hpi_message *)&phm->h,
- (struct hpi_response *)&hr.h);
-
- return hr.h.error;
-}
-
-u16 hpi_entity_alloc_and_pack(const enum e_entity_type type,
- const size_t item_count, const enum e_entity_role role, void *value,
- struct hpi_entity **entity)
-{
- size_t bytes_to_copy, total_size;
- u16 hE = 0;
- *entity = NULL;
-
- hE = hpi_entity_check_type(type);
- if (hE)
- return hE;
-
- HPI_DEBUG_ASSERT(role > entity_role_null && type < LAST_ENTITY_TYPE);
-
- bytes_to_copy = entity_type_to_size[type] * item_count;
- total_size = hpi_entity_header_size(*entity) + bytes_to_copy;
-
- HPI_DEBUG_ASSERT(total_size >= hpi_entity_header_size(*entity)
- && total_size < STR_SIZE_FIELD_MAX);
-
- *entity = kmalloc(total_size, GFP_KERNEL);
- if (*entity == NULL)
- return HPI_ERROR_MEMORY_ALLOC;
- memcpy((*entity)->value, value, bytes_to_copy);
- (*entity)->header.size =
- hpi_entity_header_size(*entity) + bytes_to_copy;
- (*entity)->header.type = type;
- (*entity)->header.role = role;
- return 0;
-}
-
-u16 hpi_entity_copy_value_from(struct hpi_entity *entity,
- enum e_entity_type type, size_t item_count, void *value_dst_p)
-{
- size_t bytes_to_copy;
-
- if (entity->header.type != type)
- return HPI_ERROR_ENTITY_TYPE_MISMATCH;
-
- if (hpi_entity_item_count(entity) != item_count)
- return HPI_ERROR_ENTITY_ITEM_COUNT;
-
- bytes_to_copy = entity_type_to_size[type] * item_count;
- memcpy(value_dst_p, entity->value, bytes_to_copy);
- return 0;
-}
-
-u16 hpi_entity_unpack(struct hpi_entity *entity, enum e_entity_type *type,
- size_t *item_count, enum e_entity_role *role, void **value)
-{
- u16 err = 0;
- HPI_DEBUG_ASSERT(entity != NULL);
-
- if (type)
- *type = entity->header.type;
-
- if (role)
- *role = entity->header.role;
-
- if (value)
- *value = entity->value;
-
- if (item_count != NULL) {
- if (entity->header.type == entity_type_sequence) {
- void *guard_p = hpi_entity_ptr_to_next(entity);
- struct hpi_entity *next = NULL;
- void *contents = entity->value;
-
- *item_count = 0;
- while (contents < guard_p) {
- (*item_count)++;
- err = hpi_entity_get_next(contents, 0,
- guard_p, &next);
- if (next == NULL || err)
- break;
- contents = next;
- }
- } else {
- *item_count = hpi_entity_item_count(entity);
- }
- }
- return err;
-}
-
-u16 hpi_gpio_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index,
- u32 *ph_gpio, u16 *pw_number_input_bits, u16 *pw_number_output_bits)
-{
- struct hpi_message hm;
- struct hpi_response hr;
- hpi_init_message_response(&hm, &hr, HPI_OBJ_GPIO, HPI_GPIO_OPEN);
- hm.adapter_index = adapter_index;
-
- hpi_send_recv(&hm, &hr);
-
- if (hr.error == 0) {
- *ph_gpio =
- hpi_indexes_to_handle(HPI_OBJ_GPIO, adapter_index, 0);
- if (pw_number_input_bits)
- *pw_number_input_bits = hr.u.l.number_input_bits;
- if (pw_number_output_bits)
- *pw_number_output_bits = hr.u.l.number_output_bits;
- } else
- *ph_gpio = 0;
- return hr.error;
-}
-
-u16 hpi_gpio_read_bit(const struct hpi_hsubsys *ph_subsys, u32 h_gpio,
- u16 bit_index, u16 *pw_bit_data)
-{
- struct hpi_message hm;
- struct hpi_response hr;
- hpi_init_message_response(&hm, &hr, HPI_OBJ_GPIO, HPI_GPIO_READ_BIT);
- u32TOINDEX(h_gpio, &hm.adapter_index);
- hm.u.l.bit_index = bit_index;
-
- hpi_send_recv(&hm, &hr);
-
- *pw_bit_data = hr.u.l.bit_data[0];
- return hr.error;
-}
-
-u16 hpi_gpio_read_all_bits(const struct hpi_hsubsys *ph_subsys, u32 h_gpio,
- u16 aw_all_bit_data[4]
- )
-{
- struct hpi_message hm;
- struct hpi_response hr;
- hpi_init_message_response(&hm, &hr, HPI_OBJ_GPIO, HPI_GPIO_READ_ALL);
- u32TOINDEX(h_gpio, &hm.adapter_index);
-
- hpi_send_recv(&hm, &hr);
-
- if (aw_all_bit_data) {
- aw_all_bit_data[0] = hr.u.l.bit_data[0];
- aw_all_bit_data[1] = hr.u.l.bit_data[1];
- aw_all_bit_data[2] = hr.u.l.bit_data[2];
- aw_all_bit_data[3] = hr.u.l.bit_data[3];
- }
- return hr.error;
-}
-
-u16 hpi_gpio_write_bit(const struct hpi_hsubsys *ph_subsys, u32 h_gpio,
- u16 bit_index, u16 bit_data)
-{
- struct hpi_message hm;
- struct hpi_response hr;
- hpi_init_message_response(&hm, &hr, HPI_OBJ_GPIO, HPI_GPIO_WRITE_BIT);
- u32TOINDEX(h_gpio, &hm.adapter_index);
- hm.u.l.bit_index = bit_index;
- hm.u.l.bit_data = bit_data;
-
- hpi_send_recv(&hm, &hr);
-
- return hr.error;
-}
-
-u16 hpi_gpio_write_status(const struct hpi_hsubsys *ph_subsys, u32 h_gpio,
- u16 aw_all_bit_data[4]
- )
-{
- struct hpi_message hm;
- struct hpi_response hr;
- hpi_init_message_response(&hm, &hr, HPI_OBJ_GPIO,
- HPI_GPIO_WRITE_STATUS);
- u32TOINDEX(h_gpio, &hm.adapter_index);
-
- hpi_send_recv(&hm, &hr);
-
- if (aw_all_bit_data) {
- aw_all_bit_data[0] = hr.u.l.bit_data[0];
- aw_all_bit_data[1] = hr.u.l.bit_data[1];
- aw_all_bit_data[2] = hr.u.l.bit_data[2];
- aw_all_bit_data[3] = hr.u.l.bit_data[3];
- }
- return hr.error;
-}
-
-u16 hpi_async_event_open(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index, u32 *ph_async)
-{
- struct hpi_message hm;
- struct hpi_response hr;
- hpi_init_message_response(&hm, &hr, HPI_OBJ_ASYNCEVENT,
- HPI_ASYNCEVENT_OPEN);
- hm.adapter_index = adapter_index;
-
- hpi_send_recv(&hm, &hr);
-
- if (hr.error == 0)
-
- *ph_async =
- hpi_indexes_to_handle(HPI_OBJ_ASYNCEVENT,
- adapter_index, 0);
- else
- *ph_async = 0;
- return hr.error;
-
-}
-
-u16 hpi_async_event_close(const struct hpi_hsubsys *ph_subsys, u32 h_async)
-{
- struct hpi_message hm;
- struct hpi_response hr;
- hpi_init_message_response(&hm, &hr, HPI_OBJ_ASYNCEVENT,
- HPI_ASYNCEVENT_OPEN);
- u32TOINDEX(h_async, &hm.adapter_index);
-
- hpi_send_recv(&hm, &hr);
-
- return hr.error;
-}
-
-u16 hpi_async_event_wait(const struct hpi_hsubsys *ph_subsys, u32 h_async,
- u16 maximum_events, struct hpi_async_event *p_events,
- u16 *pw_number_returned)
-{
-
- return 0;
-}
-
-u16 hpi_async_event_get_count(const struct hpi_hsubsys *ph_subsys,
- u32 h_async, u16 *pw_count)
-{
- struct hpi_message hm;
- struct hpi_response hr;
- hpi_init_message_response(&hm, &hr, HPI_OBJ_ASYNCEVENT,
- HPI_ASYNCEVENT_GETCOUNT);
- u32TOINDEX(h_async, &hm.adapter_index);
-
- hpi_send_recv(&hm, &hr);
-
- if (hr.error == 0)
- if (pw_count)
- *pw_count = hr.u.as.u.count.count;
-
- return hr.error;
-}
-
-u16 hpi_async_event_get(const struct hpi_hsubsys *ph_subsys, u32 h_async,
- u16 maximum_events, struct hpi_async_event *p_events,
- u16 *pw_number_returned)
-{
- struct hpi_message hm;
- struct hpi_response hr;
- hpi_init_message_response(&hm, &hr, HPI_OBJ_ASYNCEVENT,
- HPI_ASYNCEVENT_GET);
- u32TOINDEX(h_async, &hm.adapter_index);
-
- hpi_send_recv(&hm, &hr);
- if (!hr.error) {
- memcpy(p_events, &hr.u.as.u.event,
- sizeof(struct hpi_async_event));
- *pw_number_returned = 1;
- }
-
- return hr.error;
-}
-
-u16 hpi_nv_memory_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index,
- u32 *ph_nv_memory, u16 *pw_size_in_bytes)
-{
- struct hpi_message hm;
- struct hpi_response hr;
- hpi_init_message_response(&hm, &hr, HPI_OBJ_NVMEMORY,
- HPI_NVMEMORY_OPEN);
- hm.adapter_index = adapter_index;
-
- hpi_send_recv(&hm, &hr);
-
- if (hr.error == 0) {
- *ph_nv_memory =
- hpi_indexes_to_handle(HPI_OBJ_NVMEMORY, adapter_index,
- 0);
- if (pw_size_in_bytes)
- *pw_size_in_bytes = hr.u.n.size_in_bytes;
- } else
- *ph_nv_memory = 0;
- return hr.error;
-}
-
-u16 hpi_nv_memory_read_byte(const struct hpi_hsubsys *ph_subsys,
- u32 h_nv_memory, u16 index, u16 *pw_data)
-{
- struct hpi_message hm;
- struct hpi_response hr;
- hpi_init_message_response(&hm, &hr, HPI_OBJ_NVMEMORY,
- HPI_NVMEMORY_READ_BYTE);
- u32TOINDEX(h_nv_memory, &hm.adapter_index);
- hm.u.n.address = index;
-
- hpi_send_recv(&hm, &hr);
-
- *pw_data = hr.u.n.data;
- return hr.error;
-}
-
-u16 hpi_nv_memory_write_byte(const struct hpi_hsubsys *ph_subsys,
- u32 h_nv_memory, u16 index, u16 data)
-{
- struct hpi_message hm;
- struct hpi_response hr;
- hpi_init_message_response(&hm, &hr, HPI_OBJ_NVMEMORY,
- HPI_NVMEMORY_WRITE_BYTE);
- u32TOINDEX(h_nv_memory, &hm.adapter_index);
- hm.u.n.address = index;
- hm.u.n.data = data;
-
- hpi_send_recv(&hm, &hr);
-
- return hr.error;
-}
-
-u16 hpi_profile_open_all(const struct hpi_hsubsys *ph_subsys,
- u16 adapter_index, u16 profile_index, u32 *ph_profile,
- u16 *pw_max_profiles)
-{
- struct hpi_message hm;
- struct hpi_response hr;
- hpi_init_message_response(&hm, &hr, HPI_OBJ_PROFILE,
- HPI_PROFILE_OPEN_ALL);
- hm.adapter_index = adapter_index;
- hm.obj_index = profile_index;
- hpi_send_recv(&hm, &hr);
-
- *pw_max_profiles = hr.u.p.u.o.max_profiles;
- if (hr.error == 0)
- *ph_profile =
- hpi_indexes_to_handle(HPI_OBJ_PROFILE, adapter_index,
- profile_index);
- else
- *ph_profile = 0;
- return hr.error;
-}
-
-u16 hpi_profile_get(const struct hpi_hsubsys *ph_subsys, u32 h_profile,
- u16 bin_index, u16 *pw_seconds, u32 *pmicro_seconds, u32 *pcall_count,
- u32 *pmax_micro_seconds, u32 *pmin_micro_seconds)
-{
- struct hpi_message hm;
- struct hpi_response hr;
- hpi_init_message_response(&hm, &hr, HPI_OBJ_PROFILE, HPI_PROFILE_GET);
- u32TOINDEXES(h_profile, &hm.adapter_index, &hm.obj_index);
- hm.u.p.bin_index = bin_index;
- hpi_send_recv(&hm, &hr);
- if (pw_seconds)
- *pw_seconds = hr.u.p.u.t.seconds;
- if (pmicro_seconds)
- *pmicro_seconds = hr.u.p.u.t.micro_seconds;
- if (pcall_count)
- *pcall_count = hr.u.p.u.t.call_count;
- if (pmax_micro_seconds)
- *pmax_micro_seconds = hr.u.p.u.t.max_micro_seconds;
- if (pmin_micro_seconds)
- *pmin_micro_seconds = hr.u.p.u.t.min_micro_seconds;
- return hr.error;
-}
-
-u16 hpi_profile_get_utilization(const struct hpi_hsubsys *ph_subsys,
- u32 h_profile, u32 *putilization)
-{
- struct hpi_message hm;
- struct hpi_response hr;
- hpi_init_message_response(&hm, &hr, HPI_OBJ_PROFILE,
- HPI_PROFILE_GET_UTILIZATION);
- u32TOINDEXES(h_profile, &hm.adapter_index, &hm.obj_index);
- hpi_send_recv(&hm, &hr);
- if (hr.error) {
- if (putilization)
- *putilization = 0;
- } else {
- if (putilization)
- *putilization = hr.u.p.u.t.call_count;
- }
- return hr.error;
-}
-
-u16 hpi_profile_get_name(const struct hpi_hsubsys *ph_subsys, u32 h_profile,
- u16 bin_index, char *sz_name, u16 name_length)
-{
- struct hpi_message hm;
- struct hpi_response hr;
- hpi_init_message_response(&hm, &hr, HPI_OBJ_PROFILE,
- HPI_PROFILE_GET_NAME);
- u32TOINDEXES(h_profile, &hm.adapter_index, &hm.obj_index);
- hm.u.p.bin_index = bin_index;
- hpi_send_recv(&hm, &hr);
- if (hr.error) {
- if (sz_name)
- strcpy(sz_name, "??");
- } else {
- if (sz_name)
- memcpy(sz_name, (char *)hr.u.p.u.n.sz_name,
- name_length);
- }
- return hr.error;
-}
-
-u16 hpi_profile_start_all(const struct hpi_hsubsys *ph_subsys, u32 h_profile)
-{
- struct hpi_message hm;
- struct hpi_response hr;
- hpi_init_message_response(&hm, &hr, HPI_OBJ_PROFILE,
- HPI_PROFILE_START_ALL);
- u32TOINDEXES(h_profile, &hm.adapter_index, &hm.obj_index);
- hpi_send_recv(&hm, &hr);
-
- return hr.error;
-}
-
-u16 hpi_profile_stop_all(const struct hpi_hsubsys *ph_subsys, u32 h_profile)
-{
- struct hpi_message hm;
- struct hpi_response hr;
- hpi_init_message_response(&hm, &hr, HPI_OBJ_PROFILE,
- HPI_PROFILE_STOP_ALL);
- u32TOINDEXES(h_profile, &hm.adapter_index, &hm.obj_index);
- hpi_send_recv(&hm, &hr);
-
- return hr.error;
-}
-
-u16 hpi_watchdog_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index,
- u32 *ph_watchdog)
-{
- struct hpi_message hm;
- struct hpi_response hr;
- hpi_init_message_response(&hm, &hr, HPI_OBJ_WATCHDOG,
- HPI_WATCHDOG_OPEN);
- hm.adapter_index = adapter_index;
-
- hpi_send_recv(&hm, &hr);
-
- if (hr.error == 0)
- *ph_watchdog =
- hpi_indexes_to_handle(HPI_OBJ_WATCHDOG, adapter_index,
- 0);
- else
- *ph_watchdog = 0;
- return hr.error;
-}
-
-u16 hpi_watchdog_set_time(const struct hpi_hsubsys *ph_subsys, u32 h_watchdog,
- u32 time_millisec)
-{
- struct hpi_message hm;
- struct hpi_response hr;
- hpi_init_message_response(&hm, &hr, HPI_OBJ_WATCHDOG,
- HPI_WATCHDOG_SET_TIME);
- u32TOINDEX(h_watchdog, &hm.adapter_index);
- hm.u.w.time_ms = time_millisec;
-
- hpi_send_recv(&hm, &hr);
-
- return hr.error;
-}
-
-u16 hpi_watchdog_ping(const struct hpi_hsubsys *ph_subsys, u32 h_watchdog)
-{
- struct hpi_message hm;
- struct hpi_response hr;
- hpi_init_message_response(&hm, &hr, HPI_OBJ_WATCHDOG,
- HPI_WATCHDOG_PING);
- u32TOINDEX(h_watchdog, &hm.adapter_index);
-
- hpi_send_recv(&hm, &hr);
-
- return hr.error;
-}
diff --git a/sound/pci/asihpi/hpimsginit.c b/sound/pci/asihpi/hpimsginit.c
index 8e1d099ed7e4..628376ce4a49 100644
--- a/sound/pci/asihpi/hpimsginit.c
+++ b/sound/pci/asihpi/hpimsginit.c
@@ -32,21 +32,6 @@ static u16 res_size[HPI_OBJ_MAXINDEX + 1] = HPI_RESPONSE_SIZE_BY_OBJECT;
static u16 gwSSX2_bypass;
/** \internal
- * Used by ASIO driver to disable SSX2 for a single process
- * \param phSubSys Pointer to HPI subsystem handle.
- * \param wBypass New bypass setting 0 = off, nonzero = on
- * \return Previous bypass setting.
- */
-u16 hpi_subsys_ssx2_bypass(const struct hpi_hsubsys *ph_subsys, u16 bypass)
-{
- u16 old_value = gwSSX2_bypass;
-
- gwSSX2_bypass = bypass;
-
- return old_value;
-}
-
-/** \internal
* initialize the HPI message structure
*/
static void hpi_init_message(struct hpi_message *phm, u16 object,
@@ -65,7 +50,8 @@ static void hpi_init_message(struct hpi_message *phm, u16 object,
phm->object = object;
phm->function = function;
phm->version = 0;
- /* Expect adapter index to be set by caller */
+ phm->adapter_index = HPI_ADAPTER_INDEX_INVALID;
+ /* Expect actual adapter index to be set by caller */
}
/** \internal
diff --git a/sound/pci/asihpi/hpimsginit.h b/sound/pci/asihpi/hpimsginit.h
index 864ad020c9b3..bfd330d78b58 100644
--- a/sound/pci/asihpi/hpimsginit.h
+++ b/sound/pci/asihpi/hpimsginit.h
@@ -21,11 +21,15 @@
(C) Copyright AudioScience Inc. 2007
*******************************************************************************/
/* Initialise response headers, or msg/response pairs.
-Note that it is valid to just init a response e.g. when a lower level is preparing
-a response to a message.
-However, when sending a message, a matching response buffer always must be prepared
+Note that it is valid to just init a response e.g. when a lower level is
+preparing a response to a message.
+However, when sending a message, a matching response buffer must always be
+prepared.
*/
+#ifndef _HPIMSGINIT_H_
+#define _HPIMSGINIT_H_
+
void hpi_init_response(struct hpi_response *phr, u16 object, u16 function,
u16 error);
@@ -38,3 +42,5 @@ void hpi_init_responseV1(struct hpi_response_header *phr, u16 size,
void hpi_init_message_responseV1(struct hpi_message_header *phm, u16 msg_size,
struct hpi_response_header *phr, u16 res_size, u16 object,
u16 function);
+
+#endif /* _HPIMSGINIT_H_ */
diff --git a/sound/pci/asihpi/hpimsgx.c b/sound/pci/asihpi/hpimsgx.c
index f01ab964f602..bcbdf30a6aa0 100644
--- a/sound/pci/asihpi/hpimsgx.c
+++ b/sound/pci/asihpi/hpimsgx.c
@@ -23,6 +23,7 @@ Extended Message Function With Response Cacheing
#define SOURCEFILE_NAME "hpimsgx.c"
#include "hpi_internal.h"
#include "hpimsginit.h"
+#include "hpicmn.h"
#include "hpimsgx.h"
#include "hpidebug.h"
@@ -42,22 +43,24 @@ static hpi_handler_func *hpi_lookup_entry_point_function(const struct hpi_pci
for (i = 0; asihpi_pci_tbl[i].vendor != 0; i++) {
if (asihpi_pci_tbl[i].vendor != PCI_ANY_ID
- && asihpi_pci_tbl[i].vendor != pci_info->vendor_id)
+ && asihpi_pci_tbl[i].vendor !=
+ pci_info->pci_dev->vendor)
continue;
if (asihpi_pci_tbl[i].device != PCI_ANY_ID
- && asihpi_pci_tbl[i].device != pci_info->device_id)
+ && asihpi_pci_tbl[i].device !=
+ pci_info->pci_dev->device)
continue;
if (asihpi_pci_tbl[i].subvendor != PCI_ANY_ID
&& asihpi_pci_tbl[i].subvendor !=
- pci_info->subsys_vendor_id)
+ pci_info->pci_dev->subsystem_vendor)
continue;
if (asihpi_pci_tbl[i].subdevice != PCI_ANY_ID
&& asihpi_pci_tbl[i].subdevice !=
- pci_info->subsys_device_id)
+ pci_info->pci_dev->subsystem_device)
continue;
- HPI_DEBUG_LOG(DEBUG, " %x,%lu\n", i,
- asihpi_pci_tbl[i].driver_data);
+ /* HPI_DEBUG_LOG(DEBUG, " %x,%lx\n", i,
+ asihpi_pci_tbl[i].driver_data); */
return (hpi_handler_func *) asihpi_pci_tbl[i].driver_data;
}
@@ -67,21 +70,12 @@ static hpi_handler_func *hpi_lookup_entry_point_function(const struct hpi_pci
static inline void hw_entry_point(struct hpi_message *phm,
struct hpi_response *phr)
{
-
- hpi_handler_func *ep;
-
- if (phm->adapter_index < HPI_MAX_ADAPTERS) {
- ep = (hpi_handler_func *) hpi_entry_points[phm->
- adapter_index];
- if (ep) {
- HPI_DEBUG_MESSAGE(DEBUG, phm);
- ep(phm, phr);
- HPI_DEBUG_RESPONSE(phr);
- return;
- }
- }
- hpi_init_response(phr, phm->object, phm->function,
- HPI_ERROR_PROCESSING_MESSAGE);
+ if ((phm->adapter_index < HPI_MAX_ADAPTERS)
+ && hpi_entry_points[phm->adapter_index])
+ hpi_entry_points[phm->adapter_index] (phm, phr);
+ else
+ hpi_init_response(phr, phm->object, phm->function,
+ HPI_ERROR_PROCESSING_MESSAGE);
}
static void adapter_open(struct hpi_message *phm, struct hpi_response *phr);
@@ -100,6 +94,7 @@ static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
void *h_owner);
static void HPIMSGX__reset(u16 adapter_index);
+
static u16 HPIMSGX__init(struct hpi_message *phm, struct hpi_response *phr);
static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner);
@@ -153,8 +148,6 @@ static struct hpi_stream_response
static struct hpi_mixer_response rESP_HPI_MIXER_OPEN[HPI_MAX_ADAPTERS];
-static struct hpi_subsys_response gRESP_HPI_SUBSYS_FIND_ADAPTERS;
-
static struct adapter_info aDAPTER_INFO[HPI_MAX_ADAPTERS];
/* use these to keep track of opens from user mode apps/DLLs */
@@ -167,6 +160,11 @@ static struct asi_open_state
static void subsys_message(struct hpi_message *phm, struct hpi_response *phr,
void *h_owner)
{
+ if (phm->adapter_index != HPI_ADAPTER_INDEX_INVALID)
+ HPI_DEBUG_LOG(WARNING,
+ "suspicious adapter index %d in subsys message 0x%x.\n",
+ phm->adapter_index, phm->function);
+
switch (phm->function) {
case HPI_SUBSYS_GET_VERSION:
hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
@@ -204,85 +202,37 @@ static void subsys_message(struct hpi_message *phm, struct hpi_response *phr,
HPI_SUBSYS_DRIVER_UNLOAD, 0);
return;
- case HPI_SUBSYS_GET_INFO:
- HPI_COMMON(phm, phr);
- break;
-
- case HPI_SUBSYS_FIND_ADAPTERS:
- memcpy(phr, &gRESP_HPI_SUBSYS_FIND_ADAPTERS,
- sizeof(gRESP_HPI_SUBSYS_FIND_ADAPTERS));
- break;
case HPI_SUBSYS_GET_NUM_ADAPTERS:
- memcpy(phr, &gRESP_HPI_SUBSYS_FIND_ADAPTERS,
- sizeof(gRESP_HPI_SUBSYS_FIND_ADAPTERS));
- phr->function = HPI_SUBSYS_GET_NUM_ADAPTERS;
- break;
case HPI_SUBSYS_GET_ADAPTER:
- {
- int count = phm->adapter_index;
- int index = 0;
- hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
- HPI_SUBSYS_GET_ADAPTER, 0);
-
- /* This is complicated by the fact that we want to
- * "skip" 0's in the adapter list.
- * First, make sure we are pointing to a
- * non-zero adapter type.
- */
- while (gRESP_HPI_SUBSYS_FIND_ADAPTERS.
- s.aw_adapter_list[index] == 0) {
- index++;
- if (index >= HPI_MAX_ADAPTERS)
- break;
- }
- while (count) {
- /* move on to the next adapter */
- index++;
- if (index >= HPI_MAX_ADAPTERS)
- break;
- while (gRESP_HPI_SUBSYS_FIND_ADAPTERS.
- s.aw_adapter_list[index] == 0) {
- index++;
- if (index >= HPI_MAX_ADAPTERS)
- break;
- }
- count--;
- }
+ HPI_COMMON(phm, phr);
+ break;
- if (index < HPI_MAX_ADAPTERS) {
- phr->u.s.adapter_index = (u16)index;
- phr->u.s.aw_adapter_list[0] =
- gRESP_HPI_SUBSYS_FIND_ADAPTERS.
- s.aw_adapter_list[index];
- } else {
- phr->u.s.adapter_index = 0;
- phr->u.s.aw_adapter_list[0] = 0;
- phr->error = HPI_ERROR_BAD_ADAPTER_NUMBER;
- }
- break;
- }
case HPI_SUBSYS_CREATE_ADAPTER:
HPIMSGX__init(phm, phr);
break;
+
case HPI_SUBSYS_DELETE_ADAPTER:
- HPIMSGX__cleanup(phm->adapter_index, h_owner);
+ HPIMSGX__cleanup(phm->obj_index, h_owner);
{
struct hpi_message hm;
struct hpi_response hr;
- /* call to HPI_ADAPTER_CLOSE */
hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
HPI_ADAPTER_CLOSE);
- hm.adapter_index = phm->adapter_index;
+ hm.adapter_index = phm->obj_index;
hw_entry_point(&hm, &hr);
}
- hw_entry_point(phm, phr);
- gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.
- aw_adapter_list[phm->adapter_index]
- = 0;
- hpi_entry_points[phm->adapter_index] = NULL;
+ if ((phm->obj_index < HPI_MAX_ADAPTERS)
+ && hpi_entry_points[phm->obj_index]) {
+ hpi_entry_points[phm->obj_index] (phm, phr);
+ hpi_entry_points[phm->obj_index] = NULL;
+ } else
+ phr->error = HPI_ERROR_INVALID_OBJ_INDEX;
+
break;
default:
- hw_entry_point(phm, phr);
+ /* Must explicitly handle every subsys message in this switch */
+ hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, phm->function,
+ HPI_ERROR_INVALID_FUNC);
break;
}
}
@@ -409,33 +359,7 @@ void hpi_send_recv_ex(struct hpi_message *phm, struct hpi_response *phr,
break;
}
HPI_DEBUG_RESPONSE(phr);
-#if 1
- if (phr->error >= HPI_ERROR_BACKEND_BASE) {
- void *ep = NULL;
- char *ep_name;
-
- HPI_DEBUG_MESSAGE(ERROR, phm);
-
- if (phm->adapter_index < HPI_MAX_ADAPTERS)
- ep = hpi_entry_points[phm->adapter_index];
-
- /* Don't need this? Have adapter index in debug info
- Know at driver load time index->backend mapping */
- if (ep == HPI_6000)
- ep_name = "HPI_6000";
- else if (ep == HPI_6205)
- ep_name = "HPI_6205";
- else
- ep_name = "unknown";
-
- HPI_DEBUG_LOG(ERROR, "HPI %s response - error# %d\n", ep_name,
- phr->error);
-
- if (hpi_debug_level >= HPI_DEBUG_LEVEL_VERBOSE)
- hpi_debug_data((u16 *)phm,
- sizeof(*phm) / sizeof(u16));
- }
-#endif
+
}
static void adapter_open(struct hpi_message *phm, struct hpi_response *phr)
@@ -484,7 +408,7 @@ static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
else {
instream_user_open[phm->adapter_index][phm->
obj_index].open_flag = 1;
- hpios_msgxlock_un_lock(&msgx_lock);
+ hpios_msgxlock_unlock(&msgx_lock);
/* issue a reset */
hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
@@ -509,7 +433,7 @@ static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
}
}
- hpios_msgxlock_un_lock(&msgx_lock);
+ hpios_msgxlock_unlock(&msgx_lock);
}
static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
@@ -530,7 +454,7 @@ static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
phm->wAdapterIndex, phm->wObjIndex, hOwner); */
instream_user_open[phm->adapter_index][phm->
obj_index].h_owner = NULL;
- hpios_msgxlock_un_lock(&msgx_lock);
+ hpios_msgxlock_unlock(&msgx_lock);
/* issue a reset */
hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
HPI_ISTREAM_RESET);
@@ -556,7 +480,7 @@ static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
obj_index].h_owner);
phr->error = HPI_ERROR_OBJ_NOT_OPEN;
}
- hpios_msgxlock_un_lock(&msgx_lock);
+ hpios_msgxlock_unlock(&msgx_lock);
}
static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
@@ -581,7 +505,7 @@ static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
else {
outstream_user_open[phm->adapter_index][phm->
obj_index].open_flag = 1;
- hpios_msgxlock_un_lock(&msgx_lock);
+ hpios_msgxlock_unlock(&msgx_lock);
/* issue a reset */
hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
@@ -606,7 +530,7 @@ static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
}
}
- hpios_msgxlock_un_lock(&msgx_lock);
+ hpios_msgxlock_unlock(&msgx_lock);
}
static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
@@ -628,7 +552,7 @@ static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
phm->wAdapterIndex, phm->wObjIndex, hOwner); */
outstream_user_open[phm->adapter_index][phm->
obj_index].h_owner = NULL;
- hpios_msgxlock_un_lock(&msgx_lock);
+ hpios_msgxlock_unlock(&msgx_lock);
/* issue a reset */
hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
HPI_OSTREAM_RESET);
@@ -654,7 +578,7 @@ static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
obj_index].h_owner);
phr->error = HPI_ERROR_OBJ_NOT_OPEN;
}
- hpios_msgxlock_un_lock(&msgx_lock);
+ hpios_msgxlock_unlock(&msgx_lock);
}
static u16 adapter_prepare(u16 adapter)
@@ -683,16 +607,9 @@ static u16 adapter_prepare(u16 adapter)
if (hr.error)
return hr.error;
- aDAPTER_INFO[adapter].num_outstreams = hr.u.a.num_outstreams;
- aDAPTER_INFO[adapter].num_instreams = hr.u.a.num_instreams;
- aDAPTER_INFO[adapter].type = hr.u.a.adapter_type;
-
- gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.aw_adapter_list[adapter] =
- hr.u.a.adapter_type;
- gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.num_adapters++;
- if (gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.num_adapters > HPI_MAX_ADAPTERS)
- gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.num_adapters =
- HPI_MAX_ADAPTERS;
+ aDAPTER_INFO[adapter].num_outstreams = hr.u.ax.info.num_outstreams;
+ aDAPTER_INFO[adapter].num_instreams = hr.u.ax.info.num_instreams;
+ aDAPTER_INFO[adapter].type = hr.u.ax.info.adapter_type;
/* call to HPI_OSTREAM_OPEN */
for (i = 0; i < aDAPTER_INFO[adapter].num_outstreams; i++) {
@@ -727,7 +644,7 @@ static u16 adapter_prepare(u16 adapter)
memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
sizeof(rESP_HPI_MIXER_OPEN[0]));
- return gRESP_HPI_SUBSYS_FIND_ADAPTERS.h.error;
+ return 0;
}
static void HPIMSGX__reset(u16 adapter_index)
@@ -737,12 +654,6 @@ static void HPIMSGX__reset(u16 adapter_index)
struct hpi_response hr;
if (adapter_index == HPIMSGX_ALLADAPTERS) {
- /* reset all responses to contain errors */
- hpi_init_response(&hr, HPI_OBJ_SUBSYSTEM,
- HPI_SUBSYS_FIND_ADAPTERS, 0);
- memcpy(&gRESP_HPI_SUBSYS_FIND_ADAPTERS, &hr,
- sizeof(gRESP_HPI_SUBSYS_FIND_ADAPTERS));
-
for (adapter = 0; adapter < HPI_MAX_ADAPTERS; adapter++) {
hpi_init_response(&hr, HPI_OBJ_ADAPTER,
@@ -783,12 +694,6 @@ static void HPIMSGX__reset(u16 adapter_index)
rESP_HPI_ISTREAM_OPEN[adapter_index][i].h.error =
HPI_ERROR_INVALID_OBJ;
}
- if (gRESP_HPI_SUBSYS_FIND_ADAPTERS.
- s.aw_adapter_list[adapter_index]) {
- gRESP_HPI_SUBSYS_FIND_ADAPTERS.
- s.aw_adapter_list[adapter_index] = 0;
- gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.num_adapters--;
- }
}
}
@@ -802,15 +707,9 @@ static u16 HPIMSGX__init(struct hpi_message *phm,
hpi_handler_func *entry_point_func;
struct hpi_response hr;
- if (gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.num_adapters >= HPI_MAX_ADAPTERS)
- return HPI_ERROR_BAD_ADAPTER_NUMBER;
-
/* Init response here so we can pass in previous adapter list */
hpi_init_response(&hr, phm->object, phm->function,
HPI_ERROR_INVALID_OBJ);
- memcpy(hr.u.s.aw_adapter_list,
- gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.aw_adapter_list,
- sizeof(gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.aw_adapter_list));
entry_point_func =
hpi_lookup_entry_point_function(phm->u.s.resource.r.pci);
@@ -860,7 +759,7 @@ static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner)
struct hpi_response hr;
HPI_DEBUG_LOG(DEBUG,
- "close adapter %d ostream %d\n",
+ "Close adapter %d ostream %d\n",
adapter, i);
hpi_init_message_response(&hm, &hr,
@@ -884,7 +783,7 @@ static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner)
struct hpi_response hr;
HPI_DEBUG_LOG(DEBUG,
- "close adapter %d istream %d\n",
+ "Close adapter %d istream %d\n",
adapter, i);
hpi_init_message_response(&hm, &hr,
diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c
index 22dbd91811a4..26186be638b1 100644
--- a/sound/pci/asihpi/hpioctl.c
+++ b/sound/pci/asihpi/hpioctl.c
@@ -30,6 +30,7 @@ Common Linux HPI ioctl and module probe/remove functions
#include <linux/slab.h>
#include <linux/moduleparam.h>
#include <asm/uaccess.h>
+#include <linux/pci.h>
#include <linux/stringify.h>
#ifdef MODULE_FIRMWARE
@@ -45,7 +46,7 @@ MODULE_FIRMWARE("asihpi/dsp8900.bin");
static int prealloc_stream_buf;
module_param(prealloc_stream_buf, int, S_IRUGO);
MODULE_PARM_DESC(prealloc_stream_buf,
- "preallocate size for per-adapter stream buffer");
+ "Preallocate size for per-adapter stream buffer");
/* Allow the debug level to be changed after module load.
E.g. echo 2 > /sys/module/asihpi/parameters/hpiDebugLevel
@@ -121,8 +122,8 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
phpi_ioctl_data = (struct hpi_ioctl_linux __user *)arg;
/* Read the message and response pointers from user space. */
- if (get_user(puhm, &phpi_ioctl_data->phm) ||
- get_user(puhr, &phpi_ioctl_data->phr)) {
+ if (get_user(puhm, &phpi_ioctl_data->phm)
+ || get_user(puhr, &phpi_ioctl_data->phr)) {
err = -EFAULT;
goto out;
}
@@ -135,7 +136,7 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (hm->h.size > sizeof(*hm))
hm->h.size = sizeof(*hm);
- /*printk(KERN_INFO "message size %d\n", hm->h.wSize); */
+ /* printk(KERN_INFO "message size %d\n", hm->h.wSize); */
uncopied_bytes = copy_from_user(hm, puhm, hm->h.size);
if (uncopied_bytes) {
@@ -156,7 +157,7 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
pa = &adapters[hm->h.adapter_index];
- hr->h.size = 0;
+ hr->h.size = res_max_size;
if (hm->h.object == HPI_OBJ_SUBSYSTEM) {
switch (hm->h.function) {
case HPI_SUBSYS_CREATE_ADAPTER:
@@ -216,7 +217,7 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
*/
if (pa->buffer_size < size) {
HPI_DEBUG_LOG(DEBUG,
- "realloc adapter %d stream "
+ "Realloc adapter %d stream "
"buffer from %zd to %d\n",
hm->h.adapter_index,
pa->buffer_size, size);
@@ -259,7 +260,7 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
copy_from_user(pa->p_buffer, ptr, size);
if (uncopied_bytes)
HPI_DEBUG_LOG(WARNING,
- "missed %d of %d "
+ "Missed %d of %d "
"bytes from user\n", uncopied_bytes,
size);
}
@@ -271,7 +272,7 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
copy_to_user(ptr, pa->p_buffer, size);
if (uncopied_bytes)
HPI_DEBUG_LOG(WARNING,
- "missed %d of %d " "bytes to user\n",
+ "Missed %d of %d " "bytes to user\n",
uncopied_bytes, size);
}
@@ -290,9 +291,9 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (hr->h.size > res_max_size) {
HPI_DEBUG_LOG(ERROR, "response too big %d %d\n", hr->h.size,
res_max_size);
- /*HPI_DEBUG_MESSAGE(ERROR, hm); */
- err = -EFAULT;
- goto out;
+ hr->h.error = HPI_ERROR_RESPONSE_BUFFER_TOO_SMALL;
+ hr->h.specific_error = hr->h.size;
+ hr->h.size = sizeof(hr->h);
}
uncopied_bytes = copy_to_user(puhr, hr, hr->h.size);
@@ -320,18 +321,26 @@ int __devinit asihpi_adapter_probe(struct pci_dev *pci_dev,
memset(&adapter, 0, sizeof(adapter));
- printk(KERN_DEBUG "probe PCI device (%04x:%04x,%04x:%04x,%04x)\n",
- pci_dev->vendor, pci_dev->device, pci_dev->subsystem_vendor,
+ dev_printk(KERN_DEBUG, &pci_dev->dev,
+ "probe %04x:%04x,%04x:%04x,%04x\n", pci_dev->vendor,
+ pci_dev->device, pci_dev->subsystem_vendor,
pci_dev->subsystem_device, pci_dev->devfn);
+ if (pci_enable_device(pci_dev) < 0) {
+ dev_printk(KERN_ERR, &pci_dev->dev,
+ "pci_enable_device failed, disabling device\n");
+ return -EIO;
+ }
+
+ pci_set_master(pci_dev); /* also sets latency timer if < 16 */
+
hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
HPI_SUBSYS_CREATE_ADAPTER);
hpi_init_response(&hr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CREATE_ADAPTER,
HPI_ERROR_PROCESSING_MESSAGE);
- hm.adapter_index = -1; /* an invalid index */
+ hm.adapter_index = HPI_ADAPTER_INDEX_INVALID;
- /* fill in HPI_PCI information from kernel provided information */
adapter.pci = pci_dev;
nm = HPI_MAX_ADAPTER_MEM_SPACES;
@@ -359,19 +368,7 @@ int __devinit asihpi_adapter_probe(struct pci_dev *pci_dev,
pci.ap_mem_base[idx] = adapter.ap_remapped_mem_base[idx];
}
- /* could replace Pci with direct pointer to pci_dev for linux
- Instead wrap accessor functions for IDs etc.
- Would it work for windows?
- */
- pci.bus_number = pci_dev->bus->number;
- pci.vendor_id = (u16)pci_dev->vendor;
- pci.device_id = (u16)pci_dev->device;
- pci.subsys_vendor_id = (u16)(pci_dev->subsystem_vendor & 0xffff);
- pci.subsys_device_id = (u16)(pci_dev->subsystem_device & 0xffff);
- pci.device_number = pci_dev->devfn;
- pci.interrupt = pci_dev->irq;
- pci.p_os_data = pci_dev;
-
+ pci.pci_dev = pci_dev;
hm.u.s.resource.bus_type = HPI_BUS_PCI;
hm.u.s.resource.r.pci = &pci;
@@ -392,10 +389,10 @@ int __devinit asihpi_adapter_probe(struct pci_dev *pci_dev,
}
adapter.index = hr.u.s.adapter_index;
- adapter.type = hr.u.s.aw_adapter_list[adapter.index];
+ adapter.type = hr.u.s.adapter_type;
hm.adapter_index = adapter.index;
- err = hpi_adapter_open(NULL, adapter.index);
+ err = hpi_adapter_open(adapter.index);
if (err)
goto err;
@@ -407,8 +404,9 @@ int __devinit asihpi_adapter_probe(struct pci_dev *pci_dev,
mutex_init(&adapters[adapter.index].mutex);
pci_set_drvdata(pci_dev, &adapters[adapter.index]);
- printk(KERN_INFO "probe found adapter ASI%04X HPI index #%d.\n",
- adapter.type, adapter.index);
+ dev_printk(KERN_INFO, &pci_dev->dev,
+ "probe succeeded for ASI%04X HPI index %d\n", adapter.type,
+ adapter.index);
return 0;
@@ -439,7 +437,8 @@ void __devexit asihpi_adapter_remove(struct pci_dev *pci_dev)
hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
HPI_SUBSYS_DELETE_ADAPTER);
- hm.adapter_index = pa->index;
+ hm.obj_index = pa->index;
+ hm.adapter_index = HPI_ADAPTER_INDEX_INVALID;
hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
/* unmap PCI memory space, mapped during device init. */
@@ -450,20 +449,18 @@ void __devexit asihpi_adapter_remove(struct pci_dev *pci_dev)
}
}
- if (pa->p_buffer) {
- pa->buffer_size = 0;
+ if (pa->p_buffer)
vfree(pa->p_buffer);
- }
pci_set_drvdata(pci_dev, NULL);
- /*
- printk(KERN_INFO "PCI device (%04x:%04x,%04x:%04x,%04x),"
- " HPI index # %d, removed.\n",
- pci_dev->vendor, pci_dev->device,
- pci_dev->subsystem_vendor,
- pci_dev->subsystem_device, pci_dev->devfn,
- pa->index);
- */
+ if (1)
+ dev_printk(KERN_INFO, &pci_dev->dev,
+ "remove %04x:%04x,%04x:%04x,%04x," " HPI index %d.\n",
+ pci_dev->vendor, pci_dev->device,
+ pci_dev->subsystem_vendor, pci_dev->subsystem_device,
+ pci_dev->devfn, pa->index);
+
+ memset(pa, 0, sizeof(*pa));
}
void __init asihpi_init(void)
diff --git a/sound/pci/asihpi/hpios.h b/sound/pci/asihpi/hpios.h
index 370f39b43f85..03273e729f99 100644
--- a/sound/pci/asihpi/hpios.h
+++ b/sound/pci/asihpi/hpios.h
@@ -27,9 +27,7 @@ HPI Operating System Specific macros for Linux Kernel driver
#define HPI_OS_LINUX_KERNEL
#define HPI_OS_DEFINED
-#define HPI_KERNEL_MODE
-
-#define HPI_REASSIGN_DUPLICATE_ADAPTER_IDX
+#define HPI_BUILD_KERNEL_MODE
#include <linux/io.h>
#include <asm/system.h>
@@ -135,20 +133,20 @@ static inline void cond_unlock(struct hpios_spinlock *l)
#define hpios_msgxlock_init(obj) spin_lock_init(&(obj)->lock)
#define hpios_msgxlock_lock(obj) cond_lock(obj)
-#define hpios_msgxlock_un_lock(obj) cond_unlock(obj)
+#define hpios_msgxlock_unlock(obj) cond_unlock(obj)
#define hpios_dsplock_init(obj) spin_lock_init(&(obj)->dsp_lock.lock)
#define hpios_dsplock_lock(obj) cond_lock(&(obj)->dsp_lock)
#define hpios_dsplock_unlock(obj) cond_unlock(&(obj)->dsp_lock)
#ifdef CONFIG_SND_DEBUG
-#define HPI_DEBUG
+#define HPI_BUILD_DEBUG
#endif
#define HPI_ALIST_LOCKING
#define hpios_alistlock_init(obj) spin_lock_init(&((obj)->list_lock.lock))
#define hpios_alistlock_lock(obj) spin_lock(&((obj)->list_lock.lock))
-#define hpios_alistlock_un_lock(obj) spin_unlock(&((obj)->list_lock.lock))
+#define hpios_alistlock_unlock(obj) spin_unlock(&((obj)->list_lock.lock))
struct hpi_adapter {
/* mutex prevents contention for one card
diff --git a/sound/pci/au88x0/au88x0.h b/sound/pci/au88x0/au88x0.h
index cf46bba563cf..ecb8f4daf408 100644
--- a/sound/pci/au88x0/au88x0.h
+++ b/sound/pci/au88x0/au88x0.h
@@ -211,7 +211,7 @@ static void vortex_adbdma_startfifo(vortex_t * vortex, int adbdma);
//static void vortex_adbdma_stopfifo(vortex_t *vortex, int adbdma);
static void vortex_adbdma_pausefifo(vortex_t * vortex, int adbdma);
static void vortex_adbdma_resumefifo(vortex_t * vortex, int adbdma);
-static int inline vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma);
+static inline int vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma);
static void vortex_adbdma_resetup(vortex_t *vortex, int adbdma);
#ifndef CHIP_AU8810
@@ -219,7 +219,7 @@ static void vortex_wtdma_startfifo(vortex_t * vortex, int wtdma);
static void vortex_wtdma_stopfifo(vortex_t * vortex, int wtdma);
static void vortex_wtdma_pausefifo(vortex_t * vortex, int wtdma);
static void vortex_wtdma_resumefifo(vortex_t * vortex, int wtdma);
-static int inline vortex_wtdma_getlinearpos(vortex_t * vortex, int wtdma);
+static inline int vortex_wtdma_getlinearpos(vortex_t * vortex, int wtdma);
#endif
/* global stuff. */
diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
index 16c0bdfbb164..489150380eac 100644
--- a/sound/pci/au88x0/au88x0_core.c
+++ b/sound/pci/au88x0/au88x0_core.c
@@ -1249,7 +1249,7 @@ static void vortex_adbdma_resetup(vortex_t *vortex, int adbdma) {
}
}
-static int inline vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma)
+static inline int vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma)
{
stream_t *dma = &vortex->dma_adb[adbdma];
int temp, page, delta;
@@ -1506,7 +1506,7 @@ static int vortex_wtdma_getcursubuffer(vortex_t * vortex, int wtdma)
POS_SHIFT) & POS_MASK);
}
#endif
-static int inline vortex_wtdma_getlinearpos(vortex_t * vortex, int wtdma)
+static inline int vortex_wtdma_getlinearpos(vortex_t * vortex, int wtdma)
{
stream_t *dma = &vortex->dma_wt[wtdma];
int temp;
diff --git a/sound/pci/au88x0/au88x0_eq.c b/sound/pci/au88x0/au88x0_eq.c
index 38602b85874d..278ed8189fca 100644
--- a/sound/pci/au88x0/au88x0_eq.c
+++ b/sound/pci/au88x0/au88x0_eq.c
@@ -896,7 +896,8 @@ static int __devinit vortex_eq_init(vortex_t * vortex)
if ((kcontrol =
snd_ctl_new1(&vortex_eq_kcontrol, vortex)) == NULL)
return -ENOMEM;
- strcpy(kcontrol->id.name, EqBandLabels[i]);
+ snprintf(kcontrol->id.name, sizeof(kcontrol->id.name),
+ "%s Playback Volume", EqBandLabels[i]);
kcontrol->private_value = i;
if ((err = snd_ctl_add(vortex->card, kcontrol)) < 0)
return err;
diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c
index 573594bf3225..5715c4d05573 100644
--- a/sound/pci/azt3328.c
+++ b/sound/pci/azt3328.c
@@ -1,6 +1,5 @@
-/*
- * azt3328.c - driver for Aztech AZF3328 based soundcards (e.g. PCI168).
- * Copyright (C) 2002, 2005 - 2010 by Andreas Mohr <andi AT lisas.de>
+/* azt3328.c - driver for Aztech AZF3328 based soundcards (e.g. PCI168).
+ * Copyright (C) 2002, 2005 - 2011 by Andreas Mohr <andi AT lisas.de>
*
* Framework borrowed from Bart Hartgers's als4000.c.
* Driver developed on PCI168 AP(W) version (PCI rev. 10, subsystem ID 1801),
@@ -66,6 +65,13 @@
* addresses illegally. So far unfortunately it looks like the very flexible
* ALSA AC97 support is still not enough to easily compensate for such a
* grave layout violation despite all tweaks and quirks mechanisms it offers.
+ * Well, not quite: now ac97 layer is much improved (bus-specific ops!),
+ * thus I was able to implement support - it's actually working quite well.
+ * An interesting item might be Aztech AMR 2800-W, since it's an AC97
+ * modem card which might reveal the Aztech-specific codec ID which
+ * we might want to pretend, too. Dito PCI168's brother, PCI368,
+ * where the advertising datasheet says it's AC97-based and has a
+ * Digital Enhanced Game Port.
* - builtin genuine OPL3 - verified to work fine, 20080506
* - full duplex 16bit playback/record at independent sampling rate
* - MPU401 (+ legacy address support, claimed by one official spec sheet)
@@ -189,6 +195,16 @@
#include <sound/mpu401.h>
#include <sound/opl3.h>
#include <sound/initval.h>
+/*
+ * Config switch, to use ALSA's AC97 layer instead of old custom mixer crap.
+ * If the AC97 compatibility parts we needed to implement locally turn out
+ * to work nicely, then remove the old implementation eventually.
+ */
+#define AZF_USE_AC97_LAYER 1
+
+#ifdef AZF_USE_AC97_LAYER
+#include <sound/ac97_codec.h>
+#endif
#include "azt3328.h"
MODULE_AUTHOR("Andreas Mohr <andi AT lisas.de>");
@@ -328,6 +344,10 @@ struct snd_azf3328 {
/* playback, recording and I2S out codecs */
struct snd_azf3328_codec_data codecs[3];
+#ifdef AZF_USE_AC97_LAYER
+ struct snd_ac97 *ac97;
+#endif
+
struct snd_card *card;
struct snd_rawmidi *rmidi;
@@ -506,7 +526,7 @@ snd_azf3328_mixer_inw(const struct snd_azf3328 *chip, unsigned reg)
#define AZF_MUTE_BIT 0x80
static bool
-snd_azf3328_mixer_set_mute(const struct snd_azf3328 *chip,
+snd_azf3328_mixer_mute_control(const struct snd_azf3328 *chip,
unsigned reg, bool do_mute
)
{
@@ -521,6 +541,323 @@ snd_azf3328_mixer_set_mute(const struct snd_azf3328 *chip,
return (do_mute) ? !updated : updated;
}
+static inline bool
+snd_azf3328_mixer_mute_control_master(const struct snd_azf3328 *chip,
+ bool do_mute
+)
+{
+ return snd_azf3328_mixer_mute_control(
+ chip,
+ IDX_MIXER_PLAY_MASTER,
+ do_mute
+ );
+}
+
+static inline bool
+snd_azf3328_mixer_mute_control_pcm(const struct snd_azf3328 *chip,
+ bool do_mute
+)
+{
+ return snd_azf3328_mixer_mute_control(
+ chip,
+ IDX_MIXER_WAVEOUT,
+ do_mute
+ );
+}
+
+static inline void
+snd_azf3328_mixer_reset(const struct snd_azf3328 *chip)
+{
+ /* reset (close) mixer:
+ * first mute master volume, then reset
+ */
+ snd_azf3328_mixer_mute_control_master(chip, 1);
+ snd_azf3328_mixer_outw(chip, IDX_MIXER_RESET, 0x0000);
+}
+
+#ifdef AZF_USE_AC97_LAYER
+
+static inline void
+snd_azf3328_mixer_ac97_map_unsupported(unsigned short reg, const char *mode)
+{
+ /* need to add some more or less clever emulation? */
+ printk(KERN_WARNING
+ "azt3328: missing %s emulation for AC97 register 0x%02x!\n",
+ mode, reg);
+}
+
+/*
+ * Need to have _special_ AC97 mixer hardware register index mapper,
+ * to compensate for the issue of a rather AC97-incompatible hardware layout.
+ */
+#define AZF_REG_MASK 0x3f
+#define AZF_AC97_REG_UNSUPPORTED 0x8000
+#define AZF_AC97_REG_REAL_IO_READ 0x4000
+#define AZF_AC97_REG_REAL_IO_WRITE 0x2000
+#define AZF_AC97_REG_REAL_IO_RW \
+ (AZF_AC97_REG_REAL_IO_READ | AZF_AC97_REG_REAL_IO_WRITE)
+#define AZF_AC97_REG_EMU_IO_READ 0x0400
+#define AZF_AC97_REG_EMU_IO_WRITE 0x0200
+#define AZF_AC97_REG_EMU_IO_RW \
+ (AZF_AC97_REG_EMU_IO_READ | AZF_AC97_REG_EMU_IO_WRITE)
+static unsigned short
+snd_azf3328_mixer_ac97_map_reg_idx(unsigned short reg)
+{
+ static const struct {
+ unsigned short azf_reg;
+ } azf_reg_mapper[] = {
+ /* Especially when taking into consideration
+ * mono/stereo-based sequence of azf vs. AC97 control series,
+ * it's quite obvious that azf simply got rid
+ * of the AC97_HEADPHONE control at its intended offset,
+ * thus shifted _all_ controls by one,
+ * and _then_ simply added it as an FMSYNTH control at the end,
+ * to make up for the offset.
+ * This means we'll have to translate indices here as
+ * needed and then do some tiny AC97 patch action
+ * (snd_ac97_rename_vol_ctl() etc.) - that's it.
+ */
+ { /* AC97_RESET */ IDX_MIXER_RESET
+ | AZF_AC97_REG_REAL_IO_WRITE
+ | AZF_AC97_REG_EMU_IO_READ },
+ { /* AC97_MASTER */ IDX_MIXER_PLAY_MASTER },
+ /* note large shift: AC97_HEADPHONE to IDX_MIXER_FMSYNTH! */
+ { /* AC97_HEADPHONE */ IDX_MIXER_FMSYNTH },
+ { /* AC97_MASTER_MONO */ IDX_MIXER_MODEMOUT },
+ { /* AC97_MASTER_TONE */ IDX_MIXER_BASSTREBLE },
+ { /* AC97_PC_BEEP */ IDX_MIXER_PCBEEP },
+ { /* AC97_PHONE */ IDX_MIXER_MODEMIN },
+ { /* AC97_MIC */ IDX_MIXER_MIC },
+ { /* AC97_LINE */ IDX_MIXER_LINEIN },
+ { /* AC97_CD */ IDX_MIXER_CDAUDIO },
+ { /* AC97_VIDEO */ IDX_MIXER_VIDEO },
+ { /* AC97_AUX */ IDX_MIXER_AUX },
+ { /* AC97_PCM */ IDX_MIXER_WAVEOUT },
+ { /* AC97_REC_SEL */ IDX_MIXER_REC_SELECT },
+ { /* AC97_REC_GAIN */ IDX_MIXER_REC_VOLUME },
+ { /* AC97_REC_GAIN_MIC */ AZF_AC97_REG_EMU_IO_RW },
+ { /* AC97_GENERAL_PURPOSE */ IDX_MIXER_ADVCTL2 },
+ { /* AC97_3D_CONTROL */ IDX_MIXER_ADVCTL1 },
+ };
+
+ unsigned short reg_azf = AZF_AC97_REG_UNSUPPORTED;
+
+ /* azf3328 supports the low-numbered and low-spec:ed range
+ of AC97 regs only */
+ if (reg <= AC97_3D_CONTROL) {
+ unsigned short reg_idx = reg / 2;
+ reg_azf = azf_reg_mapper[reg_idx].azf_reg;
+ /* a translation-only entry means it's real read/write: */
+ if (!(reg_azf & ~AZF_REG_MASK))
+ reg_azf |= AZF_AC97_REG_REAL_IO_RW;
+ } else {
+ switch (reg) {
+ case AC97_POWERDOWN:
+ reg_azf = AZF_AC97_REG_EMU_IO_RW;
+ break;
+ case AC97_EXTENDED_ID:
+ reg_azf = AZF_AC97_REG_EMU_IO_READ;
+ break;
+ case AC97_EXTENDED_STATUS:
+ /* I don't know what the h*ll AC97 layer
+ * would consult this _extended_ register for
+ * given a base-AC97-advertised card,
+ * but let's just emulate it anyway :-P
+ */
+ reg_azf = AZF_AC97_REG_EMU_IO_RW;
+ break;
+ case AC97_VENDOR_ID1:
+ case AC97_VENDOR_ID2:
+ reg_azf = AZF_AC97_REG_EMU_IO_READ;
+ break;
+ }
+ }
+ return reg_azf;
+}
+
+static const unsigned short
+azf_emulated_ac97_caps =
+ AC97_BC_DEDICATED_MIC |
+ AC97_BC_BASS_TREBLE |
+ /* Headphone is an FM Synth control here */
+ AC97_BC_HEADPHONE |
+ /* no AC97_BC_LOUDNESS! */
+ /* mask 0x7c00 is
+ vendor-specific 3D enhancement
+ vendor indicator.
+ Since there actually _is_ an
+ entry for Aztech Labs
+ (13), make damn sure
+ to indicate it. */
+ (13 << 10);
+
+static const unsigned short
+azf_emulated_ac97_powerdown =
+ /* pretend everything to be active */
+ AC97_PD_ADC_STATUS |
+ AC97_PD_DAC_STATUS |
+ AC97_PD_MIXER_STATUS |
+ AC97_PD_VREF_STATUS;
+
+/*
+ * Emulated, _inofficial_ vendor ID
+ * (there might be some devices such as the MR 2800-W
+ * which could reveal the real Aztech AC97 ID).
+ * We choose to use "AZT" prefix, and then use 1 to indicate PCI168
+ * (better don't use 0x68 since there's a PCI368 as well).
+ */
+static const unsigned int
+azf_emulated_ac97_vendor_id = 0x415a5401;
+
+static unsigned short
+snd_azf3328_mixer_ac97_read(struct snd_ac97 *ac97, unsigned short reg_ac97)
+{
+ const struct snd_azf3328 *chip = ac97->private_data;
+ unsigned short reg_azf = snd_azf3328_mixer_ac97_map_reg_idx(reg_ac97);
+ unsigned short reg_val = 0;
+ bool unsupported = 0;
+
+ snd_azf3328_dbgmixer(
+ "snd_azf3328_mixer_ac97_read reg_ac97 %u\n",
+ reg_ac97
+ );
+ if (reg_azf & AZF_AC97_REG_UNSUPPORTED)
+ unsupported = 1;
+ else {
+ if (reg_azf & AZF_AC97_REG_REAL_IO_READ)
+ reg_val = snd_azf3328_mixer_inw(chip,
+ reg_azf & AZF_REG_MASK);
+ else {
+ /*
+ * Proceed with dummy I/O read,
+ * to ensure compatible timing where this may matter.
+ * (ALSA AC97 layer usually doesn't call I/O functions
+ * due to intelligent I/O caching anyway)
+ * Choose a mixer register that's thoroughly unrelated
+ * to common audio (try to minimize distortion).
+ */
+ snd_azf3328_mixer_inw(chip, IDX_MIXER_SOMETHING30H);
+ }
+
+ if (reg_azf & AZF_AC97_REG_EMU_IO_READ) {
+ switch (reg_ac97) {
+ case AC97_RESET:
+ reg_val |= azf_emulated_ac97_caps;
+ break;
+ case AC97_POWERDOWN:
+ reg_val |= azf_emulated_ac97_powerdown;
+ break;
+ case AC97_EXTENDED_ID:
+ case AC97_EXTENDED_STATUS:
+ /* AFAICS we simply can't support anything: */
+ reg_val |= 0;
+ break;
+ case AC97_VENDOR_ID1:
+ reg_val = azf_emulated_ac97_vendor_id >> 16;
+ break;
+ case AC97_VENDOR_ID2:
+ reg_val = azf_emulated_ac97_vendor_id & 0xffff;
+ break;
+ default:
+ unsupported = 1;
+ break;
+ }
+ }
+ }
+ if (unsupported)
+ snd_azf3328_mixer_ac97_map_unsupported(reg_ac97, "read");
+
+ return reg_val;
+}
+
+static void
+snd_azf3328_mixer_ac97_write(struct snd_ac97 *ac97,
+ unsigned short reg_ac97, unsigned short val)
+{
+ const struct snd_azf3328 *chip = ac97->private_data;
+ unsigned short reg_azf = snd_azf3328_mixer_ac97_map_reg_idx(reg_ac97);
+ bool unsupported = 0;
+
+ snd_azf3328_dbgmixer(
+ "snd_azf3328_mixer_ac97_write reg_ac97 %u val %u\n",
+ reg_ac97, val
+ );
+ if (reg_azf & AZF_AC97_REG_UNSUPPORTED)
+ unsupported = 1;
+ else {
+ if (reg_azf & AZF_AC97_REG_REAL_IO_WRITE)
+ snd_azf3328_mixer_outw(
+ chip,
+ reg_azf & AZF_REG_MASK,
+ val
+ );
+ else
+ if (reg_azf & AZF_AC97_REG_EMU_IO_WRITE) {
+ switch (reg_ac97) {
+ case AC97_REC_GAIN_MIC:
+ case AC97_POWERDOWN:
+ case AC97_EXTENDED_STATUS:
+ /*
+ * Silently swallow these writes.
+ * Since for most registers our card doesn't
+ * actually support a comparable feature,
+ * this is exactly what we should do here.
+ * The AC97 layer's I/O caching probably
+ * automatically takes care of all the rest...
+ * (remembers written values etc.)
+ */
+ break;
+ default:
+ unsupported = 1;
+ break;
+ }
+ }
+ }
+ if (unsupported)
+ snd_azf3328_mixer_ac97_map_unsupported(reg_ac97, "write");
+}
+
+static int __devinit
+snd_azf3328_mixer_new(struct snd_azf3328 *chip)
+{
+ struct snd_ac97_bus *bus;
+ struct snd_ac97_template ac97;
+ static struct snd_ac97_bus_ops ops = {
+ .write = snd_azf3328_mixer_ac97_write,
+ .read = snd_azf3328_mixer_ac97_read,
+ };
+ int rc;
+
+ memset(&ac97, 0, sizeof(ac97));
+ ac97.scaps = AC97_SCAP_SKIP_MODEM
+ | AC97_SCAP_AUDIO /* we support audio! */
+ | AC97_SCAP_NO_SPDIF;
+ ac97.private_data = chip;
+ ac97.pci = chip->pci;
+
+ /*
+ * ALSA's AC97 layer has terrible init crackling issues,
+ * unfortunately, and since it makes use of AC97_RESET,
+ * there's no use trying to mute Master Playback proactively.
+ */
+
+ rc = snd_ac97_bus(chip->card, 0, &ops, NULL, &bus);
+ if (!rc)
+ rc = snd_ac97_mixer(bus, &ac97, &chip->ac97);
+ /*
+ * Make sure to complain loudly in case of AC97 init failure,
+ * since failure may happen quite often,
+ * due to this card being a very quirky AC97 "lookalike".
+ */
+ if (rc)
+ printk(KERN_ERR "azt3328: AC97 init failed, err %d!\n", rc);
+
+ /* If we return an error here, then snd_card_free() should
+ * free up any ac97 codecs that got created, as well as the bus.
+ */
+ return rc;
+}
+#else /* AZF_USE_AC97_LAYER */
static void
snd_azf3328_mixer_write_volume_gradually(const struct snd_azf3328 *chip,
unsigned reg,
@@ -945,6 +1282,7 @@ snd_azf3328_mixer_new(struct snd_azf3328 *chip)
snd_azf3328_dbgcallleave();
return 0;
}
+#endif /* AZF_USE_AC97_LAYER */
static int
snd_azf3328_hw_params(struct snd_pcm_substream *substream,
@@ -1233,8 +1571,8 @@ snd_azf3328_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
if (is_main_mixer_playback_codec) {
/* mute WaveOut (avoid clicking during setup) */
previously_muted =
- snd_azf3328_mixer_set_mute(
- chip, IDX_MIXER_WAVEOUT, 1
+ snd_azf3328_mixer_mute_control_pcm(
+ chip, 1
);
}
@@ -1290,8 +1628,8 @@ snd_azf3328_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
if (is_main_mixer_playback_codec) {
/* now unmute WaveOut */
if (!previously_muted)
- snd_azf3328_mixer_set_mute(
- chip, IDX_MIXER_WAVEOUT, 0
+ snd_azf3328_mixer_mute_control_pcm(
+ chip, 0
);
}
@@ -1315,8 +1653,8 @@ snd_azf3328_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
if (is_main_mixer_playback_codec) {
/* mute WaveOut (avoid clicking during setup) */
previously_muted =
- snd_azf3328_mixer_set_mute(
- chip, IDX_MIXER_WAVEOUT, 1
+ snd_azf3328_mixer_mute_control_pcm(
+ chip, 1
);
}
@@ -1341,8 +1679,8 @@ snd_azf3328_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
if (is_main_mixer_playback_codec) {
/* now unmute WaveOut */
if (!previously_muted)
- snd_azf3328_mixer_set_mute(
- chip, IDX_MIXER_WAVEOUT, 0
+ snd_azf3328_mixer_mute_control_pcm(
+ chip, 0
);
}
@@ -2050,11 +2388,7 @@ snd_azf3328_free(struct snd_azf3328 *chip)
if (chip->irq < 0)
goto __end_hw;
- /* reset (close) mixer:
- * first mute master volume, then reset
- */
- snd_azf3328_mixer_set_mute(chip, IDX_MIXER_PLAY_MASTER, 1);
- snd_azf3328_mixer_outw(chip, IDX_MIXER_RESET, 0x0000);
+ snd_azf3328_mixer_reset(chip);
snd_azf3328_timer_stop(chip->timer);
snd_azf3328_gameport_free(chip);
@@ -2407,6 +2741,55 @@ snd_azf3328_suspend_regs(unsigned long io_addr, unsigned count, u32 *saved_regs)
}
}
+static inline void
+snd_azf3328_resume_regs(const u32 *saved_regs,
+ unsigned long io_addr,
+ unsigned count
+)
+{
+ unsigned reg;
+
+ for (reg = 0; reg < count; ++reg) {
+ outl(*saved_regs, io_addr);
+ snd_azf3328_dbgpm("resume: io 0x%04lx: 0x%08x --> 0x%08x\n",
+ io_addr, *saved_regs, inl(io_addr));
+ ++saved_regs;
+ io_addr += sizeof(*saved_regs);
+ }
+}
+
+static inline void
+snd_azf3328_suspend_ac97(struct snd_azf3328 *chip)
+{
+#ifdef AZF_USE_AC97_LAYER
+ snd_ac97_suspend(chip->ac97);
+#else
+ snd_azf3328_suspend_regs(chip->mixer_io,
+ ARRAY_SIZE(chip->saved_regs_mixer), chip->saved_regs_mixer);
+
+ /* make sure to disable master volume etc. to prevent looping sound */
+ snd_azf3328_mixer_mute_control_master(chip, 1);
+ snd_azf3328_mixer_mute_control_pcm(chip, 1);
+#endif /* AZF_USE_AC97_LAYER */
+}
+
+static inline void
+snd_azf3328_resume_ac97(const struct snd_azf3328 *chip)
+{
+#ifdef AZF_USE_AC97_LAYER
+ snd_ac97_resume(chip->ac97);
+#else
+ snd_azf3328_resume_regs(chip->saved_regs_mixer, chip->mixer_io,
+ ARRAY_SIZE(chip->saved_regs_mixer));
+
+ /* unfortunately with 32bit transfers, IDX_MIXER_PLAY_MASTER (0x02)
+ and IDX_MIXER_RESET (offset 0x00) get touched at the same time,
+ resulting in a mixer reset condition persisting until _after_
+ master vol was restored. Thus master vol needs an extra restore. */
+ outw(((u16 *)chip->saved_regs_mixer)[1], chip->mixer_io + 2);
+#endif /* AZF_USE_AC97_LAYER */
+}
+
static int
snd_azf3328_suspend(struct pci_dev *pci, pm_message_t state)
{
@@ -2420,12 +2803,7 @@ snd_azf3328_suspend(struct pci_dev *pci, pm_message_t state)
snd_pcm_suspend_all(chip->pcm[AZF_CODEC_PLAYBACK]);
snd_pcm_suspend_all(chip->pcm[AZF_CODEC_I2S_OUT]);
- snd_azf3328_suspend_regs(chip->mixer_io,
- ARRAY_SIZE(chip->saved_regs_mixer), chip->saved_regs_mixer);
-
- /* make sure to disable master volume etc. to prevent looping sound */
- snd_azf3328_mixer_set_mute(chip, IDX_MIXER_PLAY_MASTER, 1);
- snd_azf3328_mixer_set_mute(chip, IDX_MIXER_WAVEOUT, 1);
+ snd_azf3328_suspend_ac97(chip);
snd_azf3328_suspend_regs(chip->ctrl_io,
ARRAY_SIZE(chip->saved_regs_ctrl), chip->saved_regs_ctrl);
@@ -2447,23 +2825,6 @@ snd_azf3328_suspend(struct pci_dev *pci, pm_message_t state)
return 0;
}
-static inline void
-snd_azf3328_resume_regs(const u32 *saved_regs,
- unsigned long io_addr,
- unsigned count
-)
-{
- unsigned reg;
-
- for (reg = 0; reg < count; ++reg) {
- outl(*saved_regs, io_addr);
- snd_azf3328_dbgpm("resume: io 0x%04lx: 0x%08x --> 0x%08x\n",
- io_addr, *saved_regs, inl(io_addr));
- ++saved_regs;
- io_addr += sizeof(*saved_regs);
- }
-}
-
static int
snd_azf3328_resume(struct pci_dev *pci)
{
@@ -2487,14 +2848,7 @@ snd_azf3328_resume(struct pci_dev *pci)
snd_azf3328_resume_regs(chip->saved_regs_opl3, chip->opl3_io,
ARRAY_SIZE(chip->saved_regs_opl3));
- snd_azf3328_resume_regs(chip->saved_regs_mixer, chip->mixer_io,
- ARRAY_SIZE(chip->saved_regs_mixer));
-
- /* unfortunately with 32bit transfers, IDX_MIXER_PLAY_MASTER (0x02)
- and IDX_MIXER_RESET (offset 0x00) get touched at the same time,
- resulting in a mixer reset condition persisting until _after_
- master vol was restored. Thus master vol needs an extra restore. */
- outw(((u16 *)chip->saved_regs_mixer)[1], chip->mixer_io + 2);
+ snd_azf3328_resume_ac97(chip);
snd_azf3328_resume_regs(chip->saved_regs_ctrl, chip->ctrl_io,
ARRAY_SIZE(chip->saved_regs_ctrl));
diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
index 66c7fb3ced3e..5e619a84da06 100644
--- a/sound/pci/emu10k1/emu10k1_main.c
+++ b/sound/pci/emu10k1/emu10k1_main.c
@@ -926,7 +926,7 @@ static int snd_emu10k1_emu1010_init(struct snd_emu10k1 *emu)
snd_emu1010_fpga_write(emu, EMU_HANA_MIDI_IN, 0x19);
/* Unknown. */
snd_emu1010_fpga_write(emu, EMU_HANA_MIDI_OUT, 0x0c);
- /* IRQ Enable: Alll on */
+ /* IRQ Enable: All on */
/* snd_emu1010_fpga_write(emu, 0x09, 0x0f ); */
/* IRQ Enable: All off */
snd_emu1010_fpga_write(emu, EMU_HANA_IRQ_ENABLE, 0x00);
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 8dabab798689..734c6ee55d8a 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -30,10 +30,10 @@
#include "hda_beep.h"
struct ad198x_spec {
- struct snd_kcontrol_new *mixers[5];
+ struct snd_kcontrol_new *mixers[6];
int num_mixers;
unsigned int beep_amp; /* beep amp value, set via set_beep_amp() */
- const struct hda_verb *init_verbs[5]; /* initialization verbs
+ const struct hda_verb *init_verbs[6]; /* initialization verbs
* don't forget NULL termination!
*/
unsigned int num_init_verbs;
@@ -331,36 +331,11 @@ static int ad198x_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
return snd_hda_multi_out_analog_cleanup(codec, &spec->multiout);
}
-static int ad198x_alt_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
- struct hda_codec *codec,
- unsigned int stream_tag,
- unsigned int format,
- struct snd_pcm_substream *substream)
-{
- struct ad198x_spec *spec = codec->spec;
- snd_hda_codec_setup_stream(codec, spec->alt_dac_nid[0], stream_tag,
- 0, format);
- return 0;
-}
-
-static int ad198x_alt_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
- struct hda_codec *codec,
- struct snd_pcm_substream *substream)
-{
- struct ad198x_spec *spec = codec->spec;
- snd_hda_codec_cleanup_stream(codec, spec->alt_dac_nid[0]);
- return 0;
-}
-
static struct hda_pcm_stream ad198x_pcm_analog_alt_playback = {
.substreams = 1,
.channels_min = 2,
.channels_max = 2,
/* NID is set in ad198x_build_pcms */
- .ops = {
- .prepare = ad198x_alt_playback_pcm_prepare,
- .cleanup = ad198x_alt_playback_pcm_cleanup
- },
};
/*
@@ -2239,29 +2214,6 @@ static struct snd_kcontrol_new ad1988_6stack_mixers2[] = {
static struct snd_kcontrol_new ad1988_6stack_fp_mixers[] = {
HDA_CODEC_VOLUME("Headphone Playback Volume", 0x03, 0x0, HDA_OUTPUT),
- HDA_BIND_MUTE("Front Playback Switch", 0x29, 2, HDA_INPUT),
- HDA_BIND_MUTE("Surround Playback Switch", 0x2a, 2, HDA_INPUT),
- HDA_BIND_MUTE_MONO("Center Playback Switch", 0x27, 1, 2, HDA_INPUT),
- HDA_BIND_MUTE_MONO("LFE Playback Switch", 0x27, 2, 2, HDA_INPUT),
- HDA_BIND_MUTE("Side Playback Switch", 0x28, 2, HDA_INPUT),
- HDA_BIND_MUTE("Headphone Playback Switch", 0x22, 2, HDA_INPUT),
- HDA_BIND_MUTE("Mono Playback Switch", 0x1e, 2, HDA_INPUT),
-
- HDA_CODEC_VOLUME("CD Playback Volume", 0x20, 0x6, HDA_INPUT),
- HDA_CODEC_MUTE("CD Playback Switch", 0x20, 0x6, HDA_INPUT),
- HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x20, 0x0, HDA_INPUT),
- HDA_CODEC_MUTE("Front Mic Playback Switch", 0x20, 0x0, HDA_INPUT),
- HDA_CODEC_VOLUME("Line Playback Volume", 0x20, 0x1, HDA_INPUT),
- HDA_CODEC_MUTE("Line Playback Switch", 0x20, 0x1, HDA_INPUT),
- HDA_CODEC_VOLUME("Mic Playback Volume", 0x20, 0x4, HDA_INPUT),
- HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x4, HDA_INPUT),
-
- HDA_CODEC_VOLUME("Analog Mix Playback Volume", 0x21, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Analog Mix Playback Switch", 0x21, 0x0, HDA_OUTPUT),
-
- HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x39, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Mic Boost Volume", 0x3c, 0x0, HDA_OUTPUT),
-
{ } /* end */
};
@@ -2545,11 +2497,6 @@ static struct hda_verb ad1988_6stack_init_verbs[] = {
};
static struct hda_verb ad1988_6stack_fp_init_verbs[] = {
- /* Front, Surround, CLFE, side DAC; unmute as default */
- {0x04, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x06, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x05, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x0a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
/* Headphone; unmute as default */
{0x03, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
/* Port-A front headphon path */
@@ -2558,50 +2505,6 @@ static struct hda_verb ad1988_6stack_fp_init_verbs[] = {
{0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
{0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
{0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
- /* Port-D line-out path */
- {0x29, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x29, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x12, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- /* Port-F surround path */
- {0x2a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x2a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- /* Port-G CLFE path */
- {0x27, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x27, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x24, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- /* Port-H side path */
- {0x28, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x28, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x25, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x25, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- /* Mono out path */
- {0x36, AC_VERB_SET_CONNECT_SEL, 0x1}, /* DAC1:04h */
- {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x13, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- {0x13, AC_VERB_SET_AMP_GAIN_MUTE, 0xb01f}, /* unmute, 0dB */
- /* Port-B front mic-in path */
- {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
- {0x39, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
- /* Port-C line-in path */
- {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
- {0x3a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
- {0x33, AC_VERB_SET_CONNECT_SEL, 0x0},
- /* Port-E mic-in path */
- {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- {0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
- {0x3c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
- {0x34, AC_VERB_SET_CONNECT_SEL, 0x0},
- /* Analog CD Input */
- {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
- /* Analog Mix output amp */
- {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x1f}, /* 0dB */
{ }
};
@@ -3316,20 +3219,20 @@ static int patch_ad1988(struct hda_codec *codec)
spec->mixers[0] = ad1988_6stack_mixers1_rev2;
else
spec->mixers[0] = ad1988_6stack_mixers1;
+ spec->mixers[1] = ad1988_6stack_mixers2;
+ spec->num_init_verbs = 1;
+ spec->init_verbs[0] = ad1988_6stack_init_verbs;
if (board_config == AD1988_6STACK_DIG_FP) {
- spec->mixers[1] = ad1988_6stack_fp_mixers;
+ spec->num_mixers++;
+ spec->mixers[2] = ad1988_6stack_fp_mixers;
+ spec->num_init_verbs++;
+ spec->init_verbs[1] = ad1988_6stack_fp_init_verbs;
spec->slave_vols = ad1988_6stack_fp_slave_vols;
spec->slave_sws = ad1988_6stack_fp_slave_sws;
spec->alt_dac_nid = ad1988_alt_dac_nid;
spec->stream_analog_alt_playback =
&ad198x_pcm_analog_alt_playback;
- } else
- spec->mixers[1] = ad1988_6stack_mixers2;
- spec->num_init_verbs = 1;
- if (board_config == AD1988_6STACK_DIG_FP)
- spec->init_verbs[0] = ad1988_6stack_fp_init_verbs;
- else
- spec->init_verbs[0] = ad1988_6stack_init_verbs;
+ }
if ((board_config == AD1988_6STACK_DIG) ||
(board_config == AD1988_6STACK_DIG_FP)) {
spec->multiout.dig_out_nid = AD1988_SPDIF_OUT;
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index dd7c5c12225d..4d5004e693f0 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3114,6 +3114,8 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO),
SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO),
SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
+ SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD),
+ SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP),
SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_ASUS),
SND_PCI_QUIRK(0x1043, 0x1643, "Asus K52JU", CXT5066_ASUS),
@@ -3937,6 +3939,8 @@ static struct hda_codec_preset snd_hda_preset_conexant[] = {
.patch = patch_cxt5066 },
{ .id = 0x14f15069, .name = "CX20585",
.patch = patch_cxt5066 },
+ { .id = 0x14f1506e, .name = "CX20590",
+ .patch = patch_cxt5066 },
{ .id = 0x14f15097, .name = "CX20631",
.patch = patch_conexant_auto },
{ .id = 0x14f15098, .name = "CX20632",
@@ -3963,6 +3967,7 @@ MODULE_ALIAS("snd-hda-codec-id:14f15066");
MODULE_ALIAS("snd-hda-codec-id:14f15067");
MODULE_ALIAS("snd-hda-codec-id:14f15068");
MODULE_ALIAS("snd-hda-codec-id:14f15069");
+MODULE_ALIAS("snd-hda-codec-id:14f1506e");
MODULE_ALIAS("snd-hda-codec-id:14f15097");
MODULE_ALIAS("snd-hda-codec-id:14f15098");
MODULE_ALIAS("snd-hda-codec-id:14f150a1");
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index a58767736727..bb4930484ec4 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -110,6 +110,12 @@ struct dp_audio_infoframe {
u8 LFEPBL01_LSV36_DM_INH7;
};
+union audio_infoframe {
+ struct hdmi_audio_infoframe hdmi;
+ struct dp_audio_infoframe dp;
+ u8 bytes[0];
+};
+
/*
* CEA speaker placement:
*
@@ -620,8 +626,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec, hda_nid_t nid,
int channels = substream->runtime->channels;
int ca;
int i;
- u8 ai[max(sizeof(struct hdmi_audio_infoframe),
- sizeof(struct dp_audio_infoframe))];
+ union audio_infoframe ai;
ca = hdmi_channel_allocation(codec, nid, channels);
@@ -633,11 +638,10 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec, hda_nid_t nid,
pin_nid = spec->pin[i];
- memset(ai, 0, sizeof(ai));
+ memset(&ai, 0, sizeof(ai));
if (spec->sink_eld[i].conn_type == 0) { /* HDMI */
- struct hdmi_audio_infoframe *hdmi_ai;
+ struct hdmi_audio_infoframe *hdmi_ai = &ai.hdmi;
- hdmi_ai = (struct hdmi_audio_infoframe *)ai;
hdmi_ai->type = 0x84;
hdmi_ai->ver = 0x01;
hdmi_ai->len = 0x0a;
@@ -645,9 +649,8 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec, hda_nid_t nid,
hdmi_ai->CA = ca;
hdmi_checksum_audio_infoframe(hdmi_ai);
} else if (spec->sink_eld[i].conn_type == 1) { /* DisplayPort */
- struct dp_audio_infoframe *dp_ai;
+ struct dp_audio_infoframe *dp_ai = &ai.dp;
- dp_ai = (struct dp_audio_infoframe *)ai;
dp_ai->type = 0x84;
dp_ai->len = 0x1b;
dp_ai->ver = 0x11 << 2;
@@ -664,7 +667,8 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec, hda_nid_t nid,
* sizeof(*dp_ai) to avoid partial match/update problems when
* the user switches between HDMI/DP monitors.
*/
- if (!hdmi_infoframe_uptodate(codec, pin_nid, ai, sizeof(ai))) {
+ if (!hdmi_infoframe_uptodate(codec, pin_nid, ai.bytes,
+ sizeof(ai))) {
snd_printdd("hdmi_setup_audio_infoframe: "
"cvt=%d pin=%d channels=%d\n",
nid, pin_nid,
@@ -672,7 +676,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec, hda_nid_t nid,
hdmi_setup_channel_mapping(codec, pin_nid, ca);
hdmi_stop_infoframe_trans(codec, pin_nid);
hdmi_fill_audio_infoframe(codec, pin_nid,
- ai, sizeof(ai));
+ ai.bytes, sizeof(ai));
hdmi_start_infoframe_trans(codec, pin_nid);
}
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 3328a259a242..f721a1845bf6 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1133,11 +1133,8 @@ static void alc_automute_speaker(struct hda_codec *codec, int pinctl)
nid = spec->autocfg.hp_pins[i];
if (!nid)
break;
- if (snd_hda_jack_detect(codec, nid)) {
- spec->jack_present = 1;
- break;
- }
- alc_report_jack(codec, spec->autocfg.hp_pins[i]);
+ alc_report_jack(codec, nid);
+ spec->jack_present |= snd_hda_jack_detect(codec, nid);
}
mute = spec->jack_present ? HDA_AMP_MUTE : 0;
@@ -2295,13 +2292,13 @@ static struct snd_kcontrol_new alc888_acer_aspire_4930g_mixer[] = {
HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
HDA_CODEC_VOLUME("Surround Playback Volume", 0x0d, 0x0, HDA_OUTPUT),
HDA_BIND_MUTE("Surround Playback Switch", 0x0d, 2, HDA_INPUT),
- HDA_CODEC_VOLUME_MONO("Center Playback Volume", 0x0f, 2, 0x0,
+ HDA_CODEC_VOLUME_MONO("Center Playback Volume", 0x0e, 1, 0x0,
HDA_OUTPUT),
- HDA_BIND_MUTE_MONO("Center Playback Switch", 0x0f, 2, 2, HDA_INPUT),
- HDA_CODEC_VOLUME_MONO("LFE Playback Volume", 0x0f, 1, 0x0, HDA_OUTPUT),
- HDA_BIND_MUTE_MONO("LFE Playback Switch", 0x0f, 1, 2, HDA_INPUT),
- HDA_CODEC_VOLUME("Side Playback Volume", 0x0e, 0x0, HDA_OUTPUT),
- HDA_BIND_MUTE("Side Playback Switch", 0x0e, 2, HDA_INPUT),
+ HDA_BIND_MUTE_MONO("Center Playback Switch", 0x0e, 1, 2, HDA_INPUT),
+ HDA_CODEC_VOLUME_MONO("LFE Playback Volume", 0x0e, 2, 0x0, HDA_OUTPUT),
+ HDA_BIND_MUTE_MONO("LFE Playback Switch", 0x0e, 2, 2, HDA_INPUT),
+ HDA_CODEC_VOLUME_MONO("Internal LFE Playback Volume", 0x0f, 1, 0x0, HDA_OUTPUT),
+ HDA_BIND_MUTE_MONO("Internal LFE Playback Switch", 0x0f, 1, 2, HDA_INPUT),
HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
@@ -2312,7 +2309,6 @@ static struct snd_kcontrol_new alc888_acer_aspire_4930g_mixer[] = {
{ } /* end */
};
-
static struct snd_kcontrol_new alc889_acer_aspire_8930g_mixer[] = {
HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
@@ -4705,7 +4701,7 @@ static struct snd_pci_quirk alc880_cfg_tbl[] = {
SND_PCI_QUIRK(0x1558, 0x5401, "ASUS", ALC880_ASUS_DIG2),
SND_PCI_QUIRK(0x1565, 0x8202, "Biostar", ALC880_5ST_DIG),
SND_PCI_QUIRK(0x1584, 0x9050, "Uniwill", ALC880_UNIWILL_DIG),
- SND_PCI_QUIRK(0x1584, 0x9054, "Uniwlll", ALC880_F1734),
+ SND_PCI_QUIRK(0x1584, 0x9054, "Uniwill", ALC880_F1734),
SND_PCI_QUIRK(0x1584, 0x9070, "Uniwill", ALC880_UNIWILL),
SND_PCI_QUIRK(0x1584, 0x9077, "Uniwill P53", ALC880_UNIWILL_P53),
SND_PCI_QUIRK(0x161f, 0x203d, "W810", ALC880_W810),
@@ -15015,7 +15011,7 @@ static struct snd_pci_quirk alc269_cfg_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x11e3, "ASUS U33Jc", ALC269VB_AMIC),
SND_PCI_QUIRK(0x1043, 0x1273, "ASUS UL80Jt", ALC269VB_AMIC),
SND_PCI_QUIRK(0x1043, 0x1283, "ASUS U53Jc", ALC269_AMIC),
- SND_PCI_QUIRK(0x1043, 0x12b3, "ASUS N82Jv", ALC269_AMIC),
+ SND_PCI_QUIRK(0x1043, 0x12b3, "ASUS N82JV", ALC269VB_AMIC),
SND_PCI_QUIRK(0x1043, 0x12d3, "ASUS N61Jv", ALC269_AMIC),
SND_PCI_QUIRK(0x1043, 0x13a3, "ASUS UL30Vt", ALC269_AMIC),
SND_PCI_QUIRK(0x1043, 0x1373, "ASUS G73JX", ALC269_AMIC),
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 9ea48b425d0b..bd7b123f6440 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -586,7 +586,12 @@ static hda_nid_t stac92hd83xxx_pin_nids[10] = {
0x0f, 0x10, 0x11, 0x1f, 0x20,
};
-static hda_nid_t stac92hd88xxx_pin_nids[10] = {
+static hda_nid_t stac92hd87xxx_pin_nids[6] = {
+ 0x0a, 0x0b, 0x0c, 0x0d,
+ 0x0f, 0x11,
+};
+
+static hda_nid_t stac92hd88xxx_pin_nids[8] = {
0x0a, 0x0b, 0x0c, 0x0d,
0x0f, 0x11, 0x1f, 0x20,
};
@@ -5430,12 +5435,13 @@ again:
switch (codec->vendor_id) {
case 0x111d76d1:
case 0x111d76d9:
+ case 0x111d76e5:
spec->dmic_nids = stac92hd87b_dmic_nids;
spec->num_dmics = stac92xx_connected_ports(codec,
stac92hd87b_dmic_nids,
STAC92HD87B_NUM_DMICS);
- spec->num_pins = ARRAY_SIZE(stac92hd88xxx_pin_nids);
- spec->pin_nids = stac92hd88xxx_pin_nids;
+ spec->num_pins = ARRAY_SIZE(stac92hd87xxx_pin_nids);
+ spec->pin_nids = stac92hd87xxx_pin_nids;
spec->mono_nid = 0;
spec->num_pwrs = 0;
break;
@@ -5443,6 +5449,7 @@ again:
case 0x111d7667:
case 0x111d7668:
case 0x111d7669:
+ case 0x111d76e3:
spec->num_dmics = stac92xx_connected_ports(codec,
stac92hd88xxx_dmic_nids,
STAC92HD88XXX_NUM_DMICS);
@@ -6387,6 +6394,8 @@ static struct hda_codec_preset snd_hda_preset_sigmatel[] = {
{ .id = 0x111d76cd, .name = "92HD89F2", .patch = patch_stac92hd73xx },
{ .id = 0x111d76ce, .name = "92HD89F1", .patch = patch_stac92hd73xx },
{ .id = 0x111d76e0, .name = "92HD91BXX", .patch = patch_stac92hd83xxx},
+ { .id = 0x111d76e3, .name = "92HD98BXX", .patch = patch_stac92hd83xxx},
+ { .id = 0x111d76e5, .name = "92HD99BXX", .patch = patch_stac92hd83xxx},
{ .id = 0x111d76e7, .name = "92HD90BXX", .patch = patch_stac92hd83xxx},
{} /* terminator */
};
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index a76c3260d941..63b0054200a8 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -567,7 +567,7 @@ static void via_auto_init_analog_input(struct hda_codec *codec)
hda_nid_t nid = cfg->inputs[i].pin;
if (spec->smart51_enabled && is_smart51_pins(spec, nid))
ctl = PIN_OUT;
- else if (i == AUTO_PIN_MIC)
+ else if (cfg->inputs[i].type == AUTO_PIN_MIC)
ctl = PIN_VREF50;
else
ctl = PIN_IN;
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index f5eadfc0672a..0ed4b4e981d8 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -8,6 +8,21 @@
* Modified 2006-06-01 for AES32 support by Remy Bruno
* <remy.bruno@trinnov.com>
*
+ * Modified 2009-04-13 for proper metering by Florian Faber
+ * <faber@faberman.de>
+ *
+ * Modified 2009-04-14 for native float support by Florian Faber
+ * <faber@faberman.de>
+ *
+ * Modified 2009-04-26 fixed bug in rms metering by Florian Faber
+ * <faber@faberman.de>
+ *
+ * Modified 2009-04-30 added hw serial number support by Florian Faber
+ *
+ * Modified 2011-01-14 added S/PDIF input on RayDATs by Adrian Knoth
+ *
+ * Modified 2011-01-25 variable period sizes on RayDAT/AIO by Adrian Knoth
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -35,6 +50,7 @@
#include <sound/core.h>
#include <sound/control.h>
#include <sound/pcm.h>
+#include <sound/pcm_params.h>
#include <sound/info.h>
#include <sound/asoundef.h>
#include <sound/rawmidi.h>
@@ -47,15 +63,6 @@ static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;/* Enable this card */
-/* Disable precise pointer at start */
-static int precise_ptr[SNDRV_CARDS];
-
-/* Send all playback to line outs */
-static int line_outs_monitor[SNDRV_CARDS];
-
-/* Enable Analog Outs on Channel 63/64 by default */
-static int enable_monitor[SNDRV_CARDS];
-
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for RME HDSPM interface.");
@@ -65,42 +72,39 @@ MODULE_PARM_DESC(id, "ID string for RME HDSPM interface.");
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable/disable specific HDSPM soundcards.");
-module_param_array(precise_ptr, bool, NULL, 0444);
-MODULE_PARM_DESC(precise_ptr, "Enable or disable precise pointer.");
-
-module_param_array(line_outs_monitor, bool, NULL, 0444);
-MODULE_PARM_DESC(line_outs_monitor,
- "Send playback streams to analog outs by default.");
-
-module_param_array(enable_monitor, bool, NULL, 0444);
-MODULE_PARM_DESC(enable_monitor,
- "Enable Analog Out on Channel 63/64 by default.");
MODULE_AUTHOR
- ("Winfried Ritsch <ritsch_AT_iem.at>, "
- "Paul Davis <paul@linuxaudiosystems.com>, "
- "Marcus Andersson, Thomas Charbonnel <thomas@undata.org>, "
- "Remy Bruno <remy.bruno@trinnov.com>");
+(
+ "Winfried Ritsch <ritsch_AT_iem.at>, "
+ "Paul Davis <paul@linuxaudiosystems.com>, "
+ "Marcus Andersson, Thomas Charbonnel <thomas@undata.org>, "
+ "Remy Bruno <remy.bruno@trinnov.com>, "
+ "Florian Faber <faberman@linuxproaudio.org>, "
+ "Adrian Knoth <adi@drcomp.erfurt.thur.de>"
+);
MODULE_DESCRIPTION("RME HDSPM");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
-/* --- Write registers. ---
+/* --- Write registers. ---
These are defined as byte-offsets from the iobase value. */
+#define HDSPM_WR_SETTINGS 0
+#define HDSPM_outputBufferAddress 32
+#define HDSPM_inputBufferAddress 36
#define HDSPM_controlRegister 64
#define HDSPM_interruptConfirmation 96
#define HDSPM_control2Reg 256 /* not in specs ???????? */
#define HDSPM_freqReg 256 /* for AES32 */
-#define HDSPM_midiDataOut0 352 /* just believe in old code */
-#define HDSPM_midiDataOut1 356
+#define HDSPM_midiDataOut0 352 /* just believe in old code */
+#define HDSPM_midiDataOut1 356
#define HDSPM_eeprom_wr 384 /* for AES32 */
/* DMA enable for 64 channels, only Bit 0 is relevant */
-#define HDSPM_outputEnableBase 512 /* 512-767 input DMA */
+#define HDSPM_outputEnableBase 512 /* 512-767 input DMA */
#define HDSPM_inputEnableBase 768 /* 768-1023 output DMA */
-/* 16 page addresses for each of the 64 channels DMA buffer in and out
+/* 16 page addresses for each of the 64 channels DMA buffer in and out
(each 64k=16*4k) Buffer must be 4k aligned (which is default i386 ????) */
#define HDSPM_pageAddressBufferOut 8192
#define HDSPM_pageAddressBufferIn (HDSPM_pageAddressBufferOut+64*16*4)
@@ -119,22 +123,84 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
#define HDSPM_statusRegister2 192
#define HDSPM_timecodeRegister 128
+/* AIO, RayDAT */
+#define HDSPM_RD_STATUS_0 0
+#define HDSPM_RD_STATUS_1 64
+#define HDSPM_RD_STATUS_2 128
+#define HDSPM_RD_STATUS_3 192
+
+#define HDSPM_RD_TCO 256
+#define HDSPM_RD_PLL_FREQ 512
+#define HDSPM_WR_TCO 128
+
+#define HDSPM_TCO1_TCO_lock 0x00000001
+#define HDSPM_TCO1_WCK_Input_Range_LSB 0x00000002
+#define HDSPM_TCO1_WCK_Input_Range_MSB 0x00000004
+#define HDSPM_TCO1_LTC_Input_valid 0x00000008
+#define HDSPM_TCO1_WCK_Input_valid 0x00000010
+#define HDSPM_TCO1_Video_Input_Format_NTSC 0x00000020
+#define HDSPM_TCO1_Video_Input_Format_PAL 0x00000040
+
+#define HDSPM_TCO1_set_TC 0x00000100
+#define HDSPM_TCO1_set_drop_frame_flag 0x00000200
+#define HDSPM_TCO1_LTC_Format_LSB 0x00000400
+#define HDSPM_TCO1_LTC_Format_MSB 0x00000800
+
+#define HDSPM_TCO2_TC_run 0x00010000
+#define HDSPM_TCO2_WCK_IO_ratio_LSB 0x00020000
+#define HDSPM_TCO2_WCK_IO_ratio_MSB 0x00040000
+#define HDSPM_TCO2_set_num_drop_frames_LSB 0x00080000
+#define HDSPM_TCO2_set_num_drop_frames_MSB 0x00100000
+#define HDSPM_TCO2_set_jam_sync 0x00200000
+#define HDSPM_TCO2_set_flywheel 0x00400000
+
+#define HDSPM_TCO2_set_01_4 0x01000000
+#define HDSPM_TCO2_set_pull_down 0x02000000
+#define HDSPM_TCO2_set_pull_up 0x04000000
+#define HDSPM_TCO2_set_freq 0x08000000
+#define HDSPM_TCO2_set_term_75R 0x10000000
+#define HDSPM_TCO2_set_input_LSB 0x20000000
+#define HDSPM_TCO2_set_input_MSB 0x40000000
+#define HDSPM_TCO2_set_freq_from_app 0x80000000
+
+
+#define HDSPM_midiDataOut0 352
+#define HDSPM_midiDataOut1 356
+#define HDSPM_midiDataOut2 368
+
#define HDSPM_midiDataIn0 360
#define HDSPM_midiDataIn1 364
+#define HDSPM_midiDataIn2 372
+#define HDSPM_midiDataIn3 376
/* status is data bytes in MIDI-FIFO (0-128) */
-#define HDSPM_midiStatusOut0 384
-#define HDSPM_midiStatusOut1 388
-#define HDSPM_midiStatusIn0 392
-#define HDSPM_midiStatusIn1 396
+#define HDSPM_midiStatusOut0 384
+#define HDSPM_midiStatusOut1 388
+#define HDSPM_midiStatusOut2 400
+
+#define HDSPM_midiStatusIn0 392
+#define HDSPM_midiStatusIn1 396
+#define HDSPM_midiStatusIn2 404
+#define HDSPM_midiStatusIn3 408
/* the meters are regular i/o-mapped registers, but offset
considerably from the rest. the peak registers are reset
- when read; the least-significant 4 bits are full-scale counters;
+ when read; the least-significant 4 bits are full-scale counters;
the actual peak value is in the most-significant 24 bits.
*/
-#define HDSPM_MADI_peakrmsbase 4096 /* 4096-8191 2x64x32Bit Meters */
+
+#define HDSPM_MADI_INPUT_PEAK 4096
+#define HDSPM_MADI_PLAYBACK_PEAK 4352
+#define HDSPM_MADI_OUTPUT_PEAK 4608
+
+#define HDSPM_MADI_INPUT_RMS_L 6144
+#define HDSPM_MADI_PLAYBACK_RMS_L 6400
+#define HDSPM_MADI_OUTPUT_RMS_L 6656
+
+#define HDSPM_MADI_INPUT_RMS_H 7168
+#define HDSPM_MADI_PLAYBACK_RMS_H 7424
+#define HDSPM_MADI_OUTPUT_RMS_H 7680
/* --- Control Register bits --------- */
#define HDSPM_Start (1<<0) /* start engine */
@@ -143,7 +209,9 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
#define HDSPM_Latency1 (1<<2) /* where n is defined */
#define HDSPM_Latency2 (1<<3) /* by Latency{2,1,0} */
-#define HDSPM_ClockModeMaster (1<<4) /* 1=Master, 0=Slave/Autosync */
+#define HDSPM_ClockModeMaster (1<<4) /* 1=Master, 0=Autosync */
+#define HDSPM_c0Master 0x1 /* Master clock bit in settings
+ register [RayDAT, AIO] */
#define HDSPM_AudioInterruptEnable (1<<5) /* what do you think ? */
@@ -157,7 +225,7 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
56channelMODE=0 */ /* MADI ONLY*/
#define HDSPM_Emphasis (1<<10) /* Emphasis */ /* AES32 ONLY */
-#define HDSPM_AutoInp (1<<11) /* Auto Input (takeover) == Safe Mode,
+#define HDSPM_AutoInp (1<<11) /* Auto Input (takeover) == Safe Mode,
0=off, 1=on */ /* MADI ONLY */
#define HDSPM_Dolby (1<<11) /* Dolby = "NonAudio" ?? */ /* AES32 ONLY */
@@ -166,22 +234,23 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
*/
#define HDSPM_InputSelect1 (1<<15) /* should be 0 */
-#define HDSPM_SyncRef0 (1<<16) /* 0=WOrd, 1=MADI */
-#define HDSPM_SyncRef1 (1<<17) /* for AES32: SyncRefN codes the AES # */
#define HDSPM_SyncRef2 (1<<13)
#define HDSPM_SyncRef3 (1<<25)
#define HDSPM_SMUX (1<<18) /* Frame ??? */ /* MADI ONY */
-#define HDSPM_clr_tms (1<<19) /* clear track marker, do not use
+#define HDSPM_clr_tms (1<<19) /* clear track marker, do not use
AES additional bits in
lower 5 Audiodatabits ??? */
#define HDSPM_taxi_reset (1<<20) /* ??? */ /* MADI ONLY ? */
#define HDSPM_WCK48 (1<<20) /* Frame ??? = HDSPM_SMUX */ /* AES32 ONLY */
-#define HDSPM_Midi0InterruptEnable (1<<22)
-#define HDSPM_Midi1InterruptEnable (1<<23)
+#define HDSPM_Midi0InterruptEnable 0x0400000
+#define HDSPM_Midi1InterruptEnable 0x0800000
+#define HDSPM_Midi2InterruptEnable 0x0200000
+#define HDSPM_Midi3InterruptEnable 0x4000000
#define HDSPM_LineOut (1<<24) /* Analog Out on channel 63/64 on=1, mute=0 */
+#define HDSPe_FLOAT_FORMAT 0x2000000
#define HDSPM_DS_DoubleWire (1<<26) /* AES32 ONLY */
#define HDSPM_QS_DoubleWire (1<<27) /* AES32 ONLY */
@@ -198,11 +267,18 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
#define HDSPM_InputCoaxial (HDSPM_InputSelect0)
#define HDSPM_SyncRefMask (HDSPM_SyncRef0|HDSPM_SyncRef1|\
HDSPM_SyncRef2|HDSPM_SyncRef3)
-#define HDSPM_SyncRef_Word 0
-#define HDSPM_SyncRef_MADI (HDSPM_SyncRef0)
-#define HDSPM_SYNC_FROM_WORD 0 /* Preferred sync reference */
-#define HDSPM_SYNC_FROM_MADI 1 /* choices - used by "pref_sync_ref" */
+#define HDSPM_c0_SyncRef0 0x2
+#define HDSPM_c0_SyncRef1 0x4
+#define HDSPM_c0_SyncRef2 0x8
+#define HDSPM_c0_SyncRef3 0x10
+#define HDSPM_c0_SyncRefMask (HDSPM_c0_SyncRef0 | HDSPM_c0_SyncRef1 |\
+ HDSPM_c0_SyncRef2 | HDSPM_c0_SyncRef3)
+
+#define HDSPM_SYNC_FROM_WORD 0 /* Preferred sync reference */
+#define HDSPM_SYNC_FROM_MADI 1 /* choices - used by "pref_sync_ref" */
+#define HDSPM_SYNC_FROM_TCO 2
+#define HDSPM_SYNC_FROM_SYNC_IN 3
#define HDSPM_Frequency32KHz HDSPM_Frequency0
#define HDSPM_Frequency44_1KHz HDSPM_Frequency1
@@ -216,17 +292,6 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
#define HDSPM_Frequency192KHz (HDSPM_QuadSpeed|HDSPM_Frequency1|\
HDSPM_Frequency0)
-/* --- for internal discrimination */
-#define HDSPM_CLOCK_SOURCE_AUTOSYNC 0 /* Sample Clock Sources */
-#define HDSPM_CLOCK_SOURCE_INTERNAL_32KHZ 1
-#define HDSPM_CLOCK_SOURCE_INTERNAL_44_1KHZ 2
-#define HDSPM_CLOCK_SOURCE_INTERNAL_48KHZ 3
-#define HDSPM_CLOCK_SOURCE_INTERNAL_64KHZ 4
-#define HDSPM_CLOCK_SOURCE_INTERNAL_88_2KHZ 5
-#define HDSPM_CLOCK_SOURCE_INTERNAL_96KHZ 6
-#define HDSPM_CLOCK_SOURCE_INTERNAL_128KHZ 7
-#define HDSPM_CLOCK_SOURCE_INTERNAL_176_4KHZ 8
-#define HDSPM_CLOCK_SOURCE_INTERNAL_192KHZ 9
/* Synccheck Status */
#define HDSPM_SYNC_CHECK_NO_LOCK 0
@@ -236,14 +301,16 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
/* AutoSync References - used by "autosync_ref" control switch */
#define HDSPM_AUTOSYNC_FROM_WORD 0
#define HDSPM_AUTOSYNC_FROM_MADI 1
-#define HDSPM_AUTOSYNC_FROM_NONE 2
+#define HDSPM_AUTOSYNC_FROM_TCO 2
+#define HDSPM_AUTOSYNC_FROM_SYNC_IN 3
+#define HDSPM_AUTOSYNC_FROM_NONE 4
/* Possible sources of MADI input */
#define HDSPM_OPTICAL 0 /* optical */
#define HDSPM_COAXIAL 1 /* BNC */
#define hdspm_encode_latency(x) (((x)<<1) & HDSPM_LatencyMask)
-#define hdspm_decode_latency(x) (((x) & HDSPM_LatencyMask)>>1)
+#define hdspm_decode_latency(x) ((((x) & HDSPM_LatencyMask)>>1))
#define hdspm_encode_in(x) (((x)&0x3)<<14)
#define hdspm_decode_in(x) (((x)>>14)&0x3)
@@ -270,13 +337,21 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
#define HDSPM_AB_int (1<<2) /* InputChannel Opt=0, Coax=1
* (like inp0)
*/
+
#define HDSPM_madiLock (1<<3) /* MADI Locked =1, no=0 */
+#define HDSPM_madiSync (1<<18) /* MADI is in sync */
+
+#define HDSPM_tcoLock 0x00000020 /* Optional TCO locked status FOR HDSPe MADI! */
+#define HDSPM_tcoSync 0x10000000 /* Optional TCO sync status */
+
+#define HDSPM_syncInLock 0x00010000 /* Sync In lock status FOR HDSPe MADI! */
+#define HDSPM_syncInSync 0x00020000 /* Sync In sync status FOR HDSPe MADI! */
#define HDSPM_BufferPositionMask 0x000FFC0 /* Bit 6..15 : h/w buffer pointer */
- /* since 64byte accurate last 6 bits
- are not used */
+ /* since 64byte accurate, last 6 bits are not used */
+
+
-#define HDSPM_madiSync (1<<18) /* MADI is in sync */
#define HDSPM_DoubleSpeedStatus (1<<19) /* (input) card in double speed */
#define HDSPM_madiFreq0 (1<<22) /* system freq 0=error */
@@ -287,8 +362,19 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
#define HDSPM_BufferID (1<<26) /* (Double)Buffer ID toggles with
* Interrupt
*/
-#define HDSPM_midi0IRQPending (1<<30) /* MIDI IRQ is pending */
-#define HDSPM_midi1IRQPending (1<<31) /* and aktiv */
+#define HDSPM_tco_detect 0x08000000
+#define HDSPM_tco_lock 0x20000000
+
+#define HDSPM_s2_tco_detect 0x00000040
+#define HDSPM_s2_AEBO_D 0x00000080
+#define HDSPM_s2_AEBI_D 0x00000100
+
+
+#define HDSPM_midi0IRQPending 0x40000000
+#define HDSPM_midi1IRQPending 0x80000000
+#define HDSPM_midi2IRQPending 0x20000000
+#define HDSPM_midi2IRQPendingAES 0x00000020
+#define HDSPM_midi3IRQPending 0x00200000
/* --- status bit helpers */
#define HDSPM_madiFreqMask (HDSPM_madiFreq0|HDSPM_madiFreq1|\
@@ -317,7 +403,10 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
#define HDSPM_wc_freq2 (1<<7) /* 100=64, 101=88.2, 110=96, */
/* missing Bit for 111=128, 1000=176.4, 1001=192 */
-#define HDSPM_SelSyncRef0 (1<<8) /* Sync Source in slave mode */
+#define HDSPM_SyncRef0 0x10000 /* Sync Reference */
+#define HDSPM_SyncRef1 0x20000
+
+#define HDSPM_SelSyncRef0 (1<<8) /* AutoSync Source */
#define HDSPM_SelSyncRef1 (1<<9) /* 000=word, 001=MADI, */
#define HDSPM_SelSyncRef2 (1<<10) /* 111=no valid signal */
@@ -331,11 +420,19 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
#define HDSPM_wcFreq88_2 (HDSPM_wc_freq0|HDSPM_wc_freq2)
#define HDSPM_wcFreq96 (HDSPM_wc_freq1|HDSPM_wc_freq2)
+#define HDSPM_status1_F_0 0x0400000
+#define HDSPM_status1_F_1 0x0800000
+#define HDSPM_status1_F_2 0x1000000
+#define HDSPM_status1_F_3 0x2000000
+#define HDSPM_status1_freqMask (HDSPM_status1_F_0|HDSPM_status1_F_1|HDSPM_status1_F_2|HDSPM_status1_F_3)
+
#define HDSPM_SelSyncRefMask (HDSPM_SelSyncRef0|HDSPM_SelSyncRef1|\
HDSPM_SelSyncRef2)
#define HDSPM_SelSyncRef_WORD 0
#define HDSPM_SelSyncRef_MADI (HDSPM_SelSyncRef0)
+#define HDSPM_SelSyncRef_TCO (HDSPM_SelSyncRef1)
+#define HDSPM_SelSyncRef_SyncIn (HDSPM_SelSyncRef0|HDSPM_SelSyncRef1)
#define HDSPM_SelSyncRef_NVALID (HDSPM_SelSyncRef0|HDSPM_SelSyncRef1|\
HDSPM_SelSyncRef2)
@@ -345,7 +442,7 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
/* status */
#define HDSPM_AES32_wcLock 0x0200000
#define HDSPM_AES32_wcFreq_bit 22
-/* (status >> HDSPM_AES32_wcFreq_bit) & 0xF gives WC frequency (cf function
+/* (status >> HDSPM_AES32_wcFreq_bit) & 0xF gives WC frequency (cf function
HDSPM_bit2freq */
#define HDSPM_AES32_syncref_bit 16
/* (status >> HDSPM_AES32_syncref_bit) & 0xF gives sync source */
@@ -398,28 +495,347 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
#define MADI_DS_CHANNELS 32
#define MADI_QS_CHANNELS 16
+#define RAYDAT_SS_CHANNELS 36
+#define RAYDAT_DS_CHANNELS 20
+#define RAYDAT_QS_CHANNELS 12
+
+#define AIO_IN_SS_CHANNELS 14
+#define AIO_IN_DS_CHANNELS 10
+#define AIO_IN_QS_CHANNELS 8
+#define AIO_OUT_SS_CHANNELS 16
+#define AIO_OUT_DS_CHANNELS 12
+#define AIO_OUT_QS_CHANNELS 10
+
+#define AES32_CHANNELS 16
+
/* the size of a substream (1 mono data stream) */
#define HDSPM_CHANNEL_BUFFER_SAMPLES (16*1024)
#define HDSPM_CHANNEL_BUFFER_BYTES (4*HDSPM_CHANNEL_BUFFER_SAMPLES)
/* the size of the area we need to allocate for DMA transfers. the
size is the same regardless of the number of channels, and
- also the latency to use.
+ also the latency to use.
for one direction !!!
*/
#define HDSPM_DMA_AREA_BYTES (HDSPM_MAX_CHANNELS * HDSPM_CHANNEL_BUFFER_BYTES)
#define HDSPM_DMA_AREA_KILOBYTES (HDSPM_DMA_AREA_BYTES/1024)
/* revisions >= 230 indicate AES32 card */
-#define HDSPM_AESREVISION 230
+#define HDSPM_MADI_REV 210
+#define HDSPM_RAYDAT_REV 211
+#define HDSPM_AIO_REV 212
+#define HDSPM_MADIFACE_REV 213
+#define HDSPM_AES_REV 240
+#define HDSPM_AES32_REV 234
/* speed factor modes */
#define HDSPM_SPEED_SINGLE 0
#define HDSPM_SPEED_DOUBLE 1
#define HDSPM_SPEED_QUAD 2
+
/* names for speed modes */
static char *hdspm_speed_names[] = { "single", "double", "quad" };
+static char *texts_autosync_aes_tco[] = { "Word Clock",
+ "AES1", "AES2", "AES3", "AES4",
+ "AES5", "AES6", "AES7", "AES8",
+ "TCO" };
+static char *texts_autosync_aes[] = { "Word Clock",
+ "AES1", "AES2", "AES3", "AES4",
+ "AES5", "AES6", "AES7", "AES8" };
+static char *texts_autosync_madi_tco[] = { "Word Clock",
+ "MADI", "TCO", "Sync In" };
+static char *texts_autosync_madi[] = { "Word Clock",
+ "MADI", "Sync In" };
+
+static char *texts_autosync_raydat_tco[] = {
+ "Word Clock",
+ "ADAT 1", "ADAT 2", "ADAT 3", "ADAT 4",
+ "AES", "SPDIF", "TCO", "Sync In"
+};
+static char *texts_autosync_raydat[] = {
+ "Word Clock",
+ "ADAT 1", "ADAT 2", "ADAT 3", "ADAT 4",
+ "AES", "SPDIF", "Sync In"
+};
+static char *texts_autosync_aio_tco[] = {
+ "Word Clock",
+ "ADAT", "AES", "SPDIF", "TCO", "Sync In"
+};
+static char *texts_autosync_aio[] = { "Word Clock",
+ "ADAT", "AES", "SPDIF", "Sync In" };
+
+static char *texts_freq[] = {
+ "No Lock",
+ "32 kHz",
+ "44.1 kHz",
+ "48 kHz",
+ "64 kHz",
+ "88.2 kHz",
+ "96 kHz",
+ "128 kHz",
+ "176.4 kHz",
+ "192 kHz"
+};
+
+static char *texts_ports_madi[] = {
+ "MADI.1", "MADI.2", "MADI.3", "MADI.4", "MADI.5", "MADI.6",
+ "MADI.7", "MADI.8", "MADI.9", "MADI.10", "MADI.11", "MADI.12",
+ "MADI.13", "MADI.14", "MADI.15", "MADI.16", "MADI.17", "MADI.18",
+ "MADI.19", "MADI.20", "MADI.21", "MADI.22", "MADI.23", "MADI.24",
+ "MADI.25", "MADI.26", "MADI.27", "MADI.28", "MADI.29", "MADI.30",
+ "MADI.31", "MADI.32", "MADI.33", "MADI.34", "MADI.35", "MADI.36",
+ "MADI.37", "MADI.38", "MADI.39", "MADI.40", "MADI.41", "MADI.42",
+ "MADI.43", "MADI.44", "MADI.45", "MADI.46", "MADI.47", "MADI.48",
+ "MADI.49", "MADI.50", "MADI.51", "MADI.52", "MADI.53", "MADI.54",
+ "MADI.55", "MADI.56", "MADI.57", "MADI.58", "MADI.59", "MADI.60",
+ "MADI.61", "MADI.62", "MADI.63", "MADI.64",
+};
+
+
+static char *texts_ports_raydat_ss[] = {
+ "ADAT1.1", "ADAT1.2", "ADAT1.3", "ADAT1.4", "ADAT1.5", "ADAT1.6",
+ "ADAT1.7", "ADAT1.8", "ADAT2.1", "ADAT2.2", "ADAT2.3", "ADAT2.4",
+ "ADAT2.5", "ADAT2.6", "ADAT2.7", "ADAT2.8", "ADAT3.1", "ADAT3.2",
+ "ADAT3.3", "ADAT3.4", "ADAT3.5", "ADAT3.6", "ADAT3.7", "ADAT3.8",
+ "ADAT4.1", "ADAT4.2", "ADAT4.3", "ADAT4.4", "ADAT4.5", "ADAT4.6",
+ "ADAT4.7", "ADAT4.8",
+ "AES.L", "AES.R",
+ "SPDIF.L", "SPDIF.R"
+};
+
+static char *texts_ports_raydat_ds[] = {
+ "ADAT1.1", "ADAT1.2", "ADAT1.3", "ADAT1.4",
+ "ADAT2.1", "ADAT2.2", "ADAT2.3", "ADAT2.4",
+ "ADAT3.1", "ADAT3.2", "ADAT3.3", "ADAT3.4",
+ "ADAT4.1", "ADAT4.2", "ADAT4.3", "ADAT4.4",
+ "AES.L", "AES.R",
+ "SPDIF.L", "SPDIF.R"
+};
+
+static char *texts_ports_raydat_qs[] = {
+ "ADAT1.1", "ADAT1.2",
+ "ADAT2.1", "ADAT2.2",
+ "ADAT3.1", "ADAT3.2",
+ "ADAT4.1", "ADAT4.2",
+ "AES.L", "AES.R",
+ "SPDIF.L", "SPDIF.R"
+};
+
+
+static char *texts_ports_aio_in_ss[] = {
+ "Analogue.L", "Analogue.R",
+ "AES.L", "AES.R",
+ "SPDIF.L", "SPDIF.R",
+ "ADAT.1", "ADAT.2", "ADAT.3", "ADAT.4", "ADAT.5", "ADAT.6",
+ "ADAT.7", "ADAT.8"
+};
+
+static char *texts_ports_aio_out_ss[] = {
+ "Analogue.L", "Analogue.R",
+ "AES.L", "AES.R",
+ "SPDIF.L", "SPDIF.R",
+ "ADAT.1", "ADAT.2", "ADAT.3", "ADAT.4", "ADAT.5", "ADAT.6",
+ "ADAT.7", "ADAT.8",
+ "Phone.L", "Phone.R"
+};
+
+static char *texts_ports_aio_in_ds[] = {
+ "Analogue.L", "Analogue.R",
+ "AES.L", "AES.R",
+ "SPDIF.L", "SPDIF.R",
+ "ADAT.1", "ADAT.2", "ADAT.3", "ADAT.4"
+};
+
+static char *texts_ports_aio_out_ds[] = {
+ "Analogue.L", "Analogue.R",
+ "AES.L", "AES.R",
+ "SPDIF.L", "SPDIF.R",
+ "ADAT.1", "ADAT.2", "ADAT.3", "ADAT.4",
+ "Phone.L", "Phone.R"
+};
+
+static char *texts_ports_aio_in_qs[] = {
+ "Analogue.L", "Analogue.R",
+ "AES.L", "AES.R",
+ "SPDIF.L", "SPDIF.R",
+ "ADAT.1", "ADAT.2", "ADAT.3", "ADAT.4"
+};
+
+static char *texts_ports_aio_out_qs[] = {
+ "Analogue.L", "Analogue.R",
+ "AES.L", "AES.R",
+ "SPDIF.L", "SPDIF.R",
+ "ADAT.1", "ADAT.2", "ADAT.3", "ADAT.4",
+ "Phone.L", "Phone.R"
+};
+
+static char *texts_ports_aes32[] = {
+ "AES.1", "AES.2", "AES.3", "AES.4", "AES.5", "AES.6", "AES.7",
+ "AES.8", "AES.9.", "AES.10", "AES.11", "AES.12", "AES.13", "AES.14",
+ "AES.15", "AES.16"
+};
+
+/* These tables map the ALSA channels 1..N to the channels that we
+ need to use in order to find the relevant channel buffer. RME
+ refers to this kind of mapping as between "the ADAT channel and
+ the DMA channel." We index it using the logical audio channel,
+ and the value is the DMA channel (i.e. channel buffer number)
+ where the data for that channel can be read/written from/to.
+*/
+
+static char channel_map_unity_ss[HDSPM_MAX_CHANNELS] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63
+};
+
+static char channel_map_raydat_ss[HDSPM_MAX_CHANNELS] = {
+ 4, 5, 6, 7, 8, 9, 10, 11, /* ADAT 1 */
+ 12, 13, 14, 15, 16, 17, 18, 19, /* ADAT 2 */
+ 20, 21, 22, 23, 24, 25, 26, 27, /* ADAT 3 */
+ 28, 29, 30, 31, 32, 33, 34, 35, /* ADAT 4 */
+ 0, 1, /* AES */
+ 2, 3, /* SPDIF */
+ -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+};
+
+static char channel_map_raydat_ds[HDSPM_MAX_CHANNELS] = {
+ 4, 5, 6, 7, /* ADAT 1 */
+ 8, 9, 10, 11, /* ADAT 2 */
+ 12, 13, 14, 15, /* ADAT 3 */
+ 16, 17, 18, 19, /* ADAT 4 */
+ 0, 1, /* AES */
+ 2, 3, /* SPDIF */
+ -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+};
+
+static char channel_map_raydat_qs[HDSPM_MAX_CHANNELS] = {
+ 4, 5, /* ADAT 1 */
+ 6, 7, /* ADAT 2 */
+ 8, 9, /* ADAT 3 */
+ 10, 11, /* ADAT 4 */
+ 0, 1, /* AES */
+ 2, 3, /* SPDIF */
+ -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+};
+
+static char channel_map_aio_in_ss[HDSPM_MAX_CHANNELS] = {
+ 0, 1, /* line in */
+ 8, 9, /* aes in, */
+ 10, 11, /* spdif in */
+ 12, 13, 14, 15, 16, 17, 18, 19, /* ADAT in */
+ -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+};
+
+static char channel_map_aio_out_ss[HDSPM_MAX_CHANNELS] = {
+ 0, 1, /* line out */
+ 8, 9, /* aes out */
+ 10, 11, /* spdif out */
+ 12, 13, 14, 15, 16, 17, 18, 19, /* ADAT out */
+ 6, 7, /* phone out */
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+};
+
+static char channel_map_aio_in_ds[HDSPM_MAX_CHANNELS] = {
+ 0, 1, /* line in */
+ 8, 9, /* aes in */
+ 10, 11, /* spdif in */
+ 12, 14, 16, 18, /* adat in */
+ -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1
+};
+
+static char channel_map_aio_out_ds[HDSPM_MAX_CHANNELS] = {
+ 0, 1, /* line out */
+ 8, 9, /* aes out */
+ 10, 11, /* spdif out */
+ 12, 14, 16, 18, /* adat out */
+ 6, 7, /* phone out */
+ -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1
+};
+
+static char channel_map_aio_in_qs[HDSPM_MAX_CHANNELS] = {
+ 0, 1, /* line in */
+ 8, 9, /* aes in */
+ 10, 11, /* spdif in */
+ 12, 16, /* adat in */
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1
+};
+
+static char channel_map_aio_out_qs[HDSPM_MAX_CHANNELS] = {
+ 0, 1, /* line out */
+ 8, 9, /* aes out */
+ 10, 11, /* spdif out */
+ 12, 16, /* adat out */
+ 6, 7, /* phone out */
+ -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1
+};
+
+static char channel_map_aes32[HDSPM_MAX_CHANNELS] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1
+};
+
struct hdspm_midi {
struct hdspm *hdspm;
int id;
@@ -430,6 +846,21 @@ struct hdspm_midi {
struct timer_list timer;
spinlock_t lock;
int pending;
+ int dataIn;
+ int statusIn;
+ int dataOut;
+ int statusOut;
+ int ie;
+ int irq;
+};
+
+struct hdspm_tco {
+ int input;
+ int framerate;
+ int wordclock;
+ int samplerate;
+ int pull;
+ int term; /* 0 = off, 1 = on */
};
struct hdspm {
@@ -441,21 +872,39 @@ struct hdspm {
char *card_name; /* for procinfo */
unsigned short firmware_rev; /* dont know if relevant (yes if AES32)*/
- unsigned char is_aes32; /* indicates if card is AES32 */
+ uint8_t io_type;
- int precise_ptr; /* use precise pointers, to be tested */
int monitor_outs; /* set up monitoring outs init flag */
u32 control_register; /* cached value */
u32 control2_register; /* cached value */
+ u32 settings_register;
- struct hdspm_midi midi[2];
+ struct hdspm_midi midi[4];
struct tasklet_struct midi_tasklet;
size_t period_bytes;
- unsigned char ss_channels; /* channels of card in single speed */
- unsigned char ds_channels; /* Double Speed */
- unsigned char qs_channels; /* Quad Speed */
+ unsigned char ss_in_channels;
+ unsigned char ds_in_channels;
+ unsigned char qs_in_channels;
+ unsigned char ss_out_channels;
+ unsigned char ds_out_channels;
+ unsigned char qs_out_channels;
+
+ unsigned char max_channels_in;
+ unsigned char max_channels_out;
+
+ char *channel_map_in;
+ char *channel_map_out;
+
+ char *channel_map_in_ss, *channel_map_in_ds, *channel_map_in_qs;
+ char *channel_map_out_ss, *channel_map_out_ds, *channel_map_out_qs;
+
+ char **port_names_in;
+ char **port_names_out;
+
+ char **port_names_in_ss, **port_names_in_ds, **port_names_in_qs;
+ char **port_names_out_ss, **port_names_out_ds, **port_names_out_qs;
unsigned char *playback_buffer; /* suitably aligned address */
unsigned char *capture_buffer; /* suitably aligned address */
@@ -468,14 +917,13 @@ struct hdspm {
int last_internal_sample_rate;
int system_sample_rate;
- char *channel_map; /* channel map for DS and Quadspeed */
-
int dev; /* Hardware vars... */
int irq;
unsigned long port;
void __iomem *iobase;
int irq_count; /* for debug */
+ int midiPorts;
struct snd_card *card; /* one card */
struct snd_pcm *pcm; /* has one pcm */
@@ -487,28 +935,17 @@ struct hdspm {
struct snd_kcontrol *playback_mixer_ctls[HDSPM_MAX_CHANNELS];
/* but input to much, so not used */
struct snd_kcontrol *input_mixer_ctls[HDSPM_MAX_CHANNELS];
- /* full mixer accessible over mixer ioctl or hwdep-device */
+ /* full mixer accessable over mixer ioctl or hwdep-device */
struct hdspm_mixer *mixer;
-};
+ struct hdspm_tco *tco; /* NULL if no TCO detected */
-/* These tables map the ALSA channels 1..N to the channels that we
- need to use in order to find the relevant channel buffer. RME
- refer to this kind of mapping as between "the ADAT channel and
- the DMA channel." We index it using the logical audio channel,
- and the value is the DMA channel (i.e. channel buffer number)
- where the data for that channel can be read/written from/to.
-*/
+ char **texts_autosync;
+ int texts_autosync_items;
+
+ cycles_t last_interrupt;
-static char channel_map_madi_ss[HDSPM_MAX_CHANNELS] = {
- 0, 1, 2, 3, 4, 5, 6, 7,
- 8, 9, 10, 11, 12, 13, 14, 15,
- 16, 17, 18, 19, 20, 21, 22, 23,
- 24, 25, 26, 27, 28, 29, 30, 31,
- 32, 33, 34, 35, 36, 37, 38, 39,
- 40, 41, 42, 43, 44, 45, 46, 47,
- 48, 49, 50, 51, 52, 53, 54, 55,
- 56, 57, 58, 59, 60, 61, 62, 63
+ struct hdspm_peak_rms peak_rms;
};
@@ -532,11 +969,11 @@ static int __devinit snd_hdspm_create_alsa_devices(struct snd_card *card,
static int __devinit snd_hdspm_create_pcm(struct snd_card *card,
struct hdspm * hdspm);
-static inline void snd_hdspm_initialize_midi_flush(struct hdspm * hdspm);
-static int hdspm_update_simple_mixer_controls(struct hdspm * hdspm);
-static int hdspm_autosync_ref(struct hdspm * hdspm);
-static int snd_hdspm_set_defaults(struct hdspm * hdspm);
-static void hdspm_set_sgbuf(struct hdspm * hdspm,
+static inline void snd_hdspm_initialize_midi_flush(struct hdspm *hdspm);
+static int hdspm_update_simple_mixer_controls(struct hdspm *hdspm);
+static int hdspm_autosync_ref(struct hdspm *hdspm);
+static int snd_hdspm_set_defaults(struct hdspm *hdspm);
+static void hdspm_set_sgbuf(struct hdspm *hdspm,
struct snd_pcm_substream *substream,
unsigned int reg, int channels);
@@ -550,7 +987,7 @@ static inline int HDSPM_bit2freq(int n)
return bit2freq_tab[n];
}
-/* Write/read to/from HDSPM with Addresses in Bytes
+/* Write/read to/from HDSPM with Adresses in Bytes
not words but only 32Bit writes are allowed */
static inline void hdspm_write(struct hdspm * hdspm, unsigned int reg,
@@ -564,8 +1001,8 @@ static inline unsigned int hdspm_read(struct hdspm * hdspm, unsigned int reg)
return readl(hdspm->iobase + reg);
}
-/* for each output channel (chan) I have an Input (in) and Playback (pb) Fader
- mixer is write only on hardware so we have to cache him for read
+/* for each output channel (chan) I have an Input (in) and Playback (pb) Fader
+ mixer is write only on hardware so we have to cache him for read
each fader is a u32, but uses only the first 16 bit */
static inline int hdspm_read_in_gain(struct hdspm * hdspm, unsigned int chan,
@@ -641,30 +1078,67 @@ static int snd_hdspm_use_is_exclusive(struct hdspm *hdspm)
/* check for external sample rate */
static int hdspm_external_sample_rate(struct hdspm *hdspm)
{
- if (hdspm->is_aes32) {
- unsigned int status2 = hdspm_read(hdspm, HDSPM_statusRegister2);
- unsigned int status = hdspm_read(hdspm, HDSPM_statusRegister);
- unsigned int timecode =
- hdspm_read(hdspm, HDSPM_timecodeRegister);
+ unsigned int status, status2, timecode;
+ int syncref, rate = 0, rate_bits;
- int syncref = hdspm_autosync_ref(hdspm);
+ switch (hdspm->io_type) {
+ case AES32:
+ status2 = hdspm_read(hdspm, HDSPM_statusRegister2);
+ status = hdspm_read(hdspm, HDSPM_statusRegister);
+ timecode = hdspm_read(hdspm, HDSPM_timecodeRegister);
+
+ syncref = hdspm_autosync_ref(hdspm);
if (syncref == HDSPM_AES32_AUTOSYNC_FROM_WORD &&
status & HDSPM_AES32_wcLock)
- return HDSPM_bit2freq((status >> HDSPM_AES32_wcFreq_bit)
- & 0xF);
+ return HDSPM_bit2freq((status >> HDSPM_AES32_wcFreq_bit) & 0xF);
+
if (syncref >= HDSPM_AES32_AUTOSYNC_FROM_AES1 &&
- syncref <= HDSPM_AES32_AUTOSYNC_FROM_AES8 &&
- status2 & (HDSPM_LockAES >>
- (syncref - HDSPM_AES32_AUTOSYNC_FROM_AES1)))
- return HDSPM_bit2freq((timecode >>
- (4*(syncref-HDSPM_AES32_AUTOSYNC_FROM_AES1))) & 0xF);
+ syncref <= HDSPM_AES32_AUTOSYNC_FROM_AES8 &&
+ status2 & (HDSPM_LockAES >>
+ (syncref - HDSPM_AES32_AUTOSYNC_FROM_AES1)))
+ return HDSPM_bit2freq((timecode >> (4*(syncref-HDSPM_AES32_AUTOSYNC_FROM_AES1))) & 0xF);
return 0;
- } else {
- unsigned int status2 = hdspm_read(hdspm, HDSPM_statusRegister2);
- unsigned int status = hdspm_read(hdspm, HDSPM_statusRegister);
- unsigned int rate_bits;
- int rate = 0;
+ break;
+
+ case MADIface:
+ status = hdspm_read(hdspm, HDSPM_statusRegister);
+
+ if (!(status & HDSPM_madiLock)) {
+ rate = 0; /* no lock */
+ } else {
+ switch (status & (HDSPM_status1_freqMask)) {
+ case HDSPM_status1_F_0*1:
+ rate = 32000; break;
+ case HDSPM_status1_F_0*2:
+ rate = 44100; break;
+ case HDSPM_status1_F_0*3:
+ rate = 48000; break;
+ case HDSPM_status1_F_0*4:
+ rate = 64000; break;
+ case HDSPM_status1_F_0*5:
+ rate = 88200; break;
+ case HDSPM_status1_F_0*6:
+ rate = 96000; break;
+ case HDSPM_status1_F_0*7:
+ rate = 128000; break;
+ case HDSPM_status1_F_0*8:
+ rate = 176400; break;
+ case HDSPM_status1_F_0*9:
+ rate = 192000; break;
+ default:
+ rate = 0; break;
+ }
+ }
+
+ break;
+
+ case MADI:
+ case AIO:
+ case RayDAT:
+ status2 = hdspm_read(hdspm, HDSPM_statusRegister2);
+ status = hdspm_read(hdspm, HDSPM_statusRegister);
+ rate = 0;
/* if wordclock has synced freq and wordclock is valid */
if ((status2 & HDSPM_wcLock) != 0 &&
@@ -672,6 +1146,7 @@ static int hdspm_external_sample_rate(struct hdspm *hdspm)
rate_bits = status2 & HDSPM_wcFreqMask;
+
switch (rate_bits) {
case HDSPM_wcFreq32:
rate = 32000;
@@ -691,7 +1166,6 @@ static int hdspm_external_sample_rate(struct hdspm *hdspm)
case HDSPM_wcFreq96:
rate = 96000;
break;
- /* Quadspeed Bit missing ???? */
default:
rate = 0;
break;
@@ -702,10 +1176,10 @@ static int hdspm_external_sample_rate(struct hdspm *hdspm)
* word has priority to MADI
*/
if (rate != 0 &&
- (status2 & HDSPM_SelSyncRefMask) == HDSPM_SelSyncRef_WORD)
+ (status2 & HDSPM_SelSyncRefMask) == HDSPM_SelSyncRef_WORD)
return rate;
- /* maby a madi input (which is taken if sel sync is madi) */
+ /* maybe a madi input (which is taken if sel sync is madi) */
if (status & HDSPM_madiLock) {
rate_bits = status & HDSPM_madiFreqMask;
@@ -742,36 +1216,35 @@ static int hdspm_external_sample_rate(struct hdspm *hdspm)
break;
}
}
- return rate;
+ break;
}
+
+ return rate;
}
/* Latency function */
-static inline void hdspm_compute_period_size(struct hdspm * hdspm)
+static inline void hdspm_compute_period_size(struct hdspm *hdspm)
{
- hdspm->period_bytes =
- 1 << ((hdspm_decode_latency(hdspm->control_register) + 8));
+ hdspm->period_bytes = 1 << ((hdspm_decode_latency(hdspm->control_register) + 8));
}
-static snd_pcm_uframes_t hdspm_hw_pointer(struct hdspm * hdspm)
+
+static snd_pcm_uframes_t hdspm_hw_pointer(struct hdspm *hdspm)
{
int position;
position = hdspm_read(hdspm, HDSPM_statusRegister);
- if (!hdspm->precise_ptr)
- return (position & HDSPM_BufferID) ?
+ switch (hdspm->io_type) {
+ case RayDAT:
+ case AIO:
+ position &= HDSPM_BufferPositionMask;
+ position /= 4; /* Bytes per sample */
+ break;
+ default:
+ position = (position & HDSPM_BufferID) ?
(hdspm->period_bytes / 4) : 0;
-
- /* hwpointer comes in bytes and is 64Bytes accurate (by docu since
- PCI Burst)
- i have experimented that it is at most 64 Byte to much for playing
- so substraction of 64 byte should be ok for ALSA, but use it only
- for application where you know what you do since if you come to
- near with record pointer it can be a disaster */
-
- position &= HDSPM_BufferPositionMask;
- position = ((position - 64) % (2 * hdspm->period_bytes)) / 4;
+ }
return position;
}
@@ -805,7 +1278,7 @@ static void hdspm_silence_playback(struct hdspm *hdspm)
}
}
-static int hdspm_set_interrupt_interval(struct hdspm * s, unsigned int frames)
+static int hdspm_set_interrupt_interval(struct hdspm *s, unsigned int frames)
{
int n;
@@ -829,21 +1302,53 @@ static int hdspm_set_interrupt_interval(struct hdspm * s, unsigned int frames)
return 0;
}
+static u64 hdspm_calc_dds_value(struct hdspm *hdspm, u64 period)
+{
+ u64 freq_const;
+
+ if (period == 0)
+ return 0;
+
+ switch (hdspm->io_type) {
+ case MADI:
+ case AES32:
+ freq_const = 110069313433624ULL;
+ break;
+ case RayDAT:
+ case AIO:
+ freq_const = 104857600000000ULL;
+ break;
+ case MADIface:
+ freq_const = 131072000000000ULL;
+ }
+
+ return div_u64(freq_const, period);
+}
+
+
static void hdspm_set_dds_value(struct hdspm *hdspm, int rate)
{
u64 n;
-
+
if (rate >= 112000)
rate /= 4;
else if (rate >= 56000)
rate /= 2;
- /* RME says n = 104857600000000, but in the windows MADI driver, I see:
-// return 104857600000000 / rate; // 100 MHz
- return 110100480000000 / rate; // 105 MHz
- */
- /* n = 104857600000000ULL; */ /* = 2^20 * 10^8 */
- n = 110100480000000ULL; /* Value checked for AES32 and MADI */
+ switch (hdspm->io_type) {
+ case MADIface:
+ n = 131072000000000ULL; /* 125 MHz */
+ break;
+ case MADI:
+ case AES32:
+ n = 110069313433624ULL; /* 105 MHz */
+ break;
+ case RayDAT:
+ case AIO:
+ n = 104857600000000ULL; /* 100 MHz */
+ break;
+ }
+
n = div_u64(n, rate);
/* n should be less than 2^32 for being written to FREQ register */
snd_BUG_ON(n >> 32);
@@ -864,13 +1369,13 @@ static int hdspm_set_rate(struct hdspm * hdspm, int rate, int called_internally)
if (!(hdspm->control_register & HDSPM_ClockModeMaster)) {
- /* SLAVE --- */
+ /* SLAVE --- */
if (called_internally) {
- /* request from ctl or card initialization
- just make a warning an remember setting
- for future master mode switching */
-
+ /* request from ctl or card initialization
+ just make a warning an remember setting
+ for future master mode switching */
+
snd_printk(KERN_WARNING "HDSPM: "
"Warning: device is not running "
"as a clock master.\n");
@@ -907,7 +1412,7 @@ static int hdspm_set_rate(struct hdspm * hdspm, int rate, int called_internally)
Note that a similar but essentially insoluble problem exists for
externally-driven rate changes. All we can do is to flag rate
- changes in the read/write routines.
+ changes in the read/write routines.
*/
if (current_rate <= 48000)
@@ -975,16 +1480,35 @@ static int hdspm_set_rate(struct hdspm * hdspm, int rate, int called_internally)
/* For AES32, need to set DDS value in FREQ register
For MADI, also apparently */
hdspm_set_dds_value(hdspm, rate);
-
- if (hdspm->is_aes32 && rate != current_rate)
+
+ if (AES32 == hdspm->io_type && rate != current_rate)
hdspm_write(hdspm, HDSPM_eeprom_wr, 0);
-
- /* For AES32 and for MADI (at least rev 204), channel_map needs to
- * always be channel_map_madi_ss, whatever the sample rate */
- hdspm->channel_map = channel_map_madi_ss;
hdspm->system_sample_rate = rate;
+ if (rate <= 48000) {
+ hdspm->channel_map_in = hdspm->channel_map_in_ss;
+ hdspm->channel_map_out = hdspm->channel_map_out_ss;
+ hdspm->max_channels_in = hdspm->ss_in_channels;
+ hdspm->max_channels_out = hdspm->ss_out_channels;
+ hdspm->port_names_in = hdspm->port_names_in_ss;
+ hdspm->port_names_out = hdspm->port_names_out_ss;
+ } else if (rate <= 96000) {
+ hdspm->channel_map_in = hdspm->channel_map_in_ds;
+ hdspm->channel_map_out = hdspm->channel_map_out_ds;
+ hdspm->max_channels_in = hdspm->ds_in_channels;
+ hdspm->max_channels_out = hdspm->ds_out_channels;
+ hdspm->port_names_in = hdspm->port_names_in_ds;
+ hdspm->port_names_out = hdspm->port_names_out_ds;
+ } else {
+ hdspm->channel_map_in = hdspm->channel_map_in_qs;
+ hdspm->channel_map_out = hdspm->channel_map_out_qs;
+ hdspm->max_channels_in = hdspm->qs_in_channels;
+ hdspm->max_channels_out = hdspm->qs_out_channels;
+ hdspm->port_names_in = hdspm->port_names_in_qs;
+ hdspm->port_names_out = hdspm->port_names_out_qs;
+ }
+
if (not_set != 0)
return -1;
@@ -1019,39 +1543,26 @@ static inline unsigned char snd_hdspm_midi_read_byte (struct hdspm *hdspm,
int id)
{
/* the hardware already does the relevant bit-mask with 0xff */
- if (id)
- return hdspm_read(hdspm, HDSPM_midiDataIn1);
- else
- return hdspm_read(hdspm, HDSPM_midiDataIn0);
+ return hdspm_read(hdspm, hdspm->midi[id].dataIn);
}
static inline void snd_hdspm_midi_write_byte (struct hdspm *hdspm, int id,
int val)
{
/* the hardware already does the relevant bit-mask with 0xff */
- if (id)
- hdspm_write(hdspm, HDSPM_midiDataOut1, val);
- else
- hdspm_write(hdspm, HDSPM_midiDataOut0, val);
+ return hdspm_write(hdspm, hdspm->midi[id].dataOut, val);
}
static inline int snd_hdspm_midi_input_available (struct hdspm *hdspm, int id)
{
- if (id)
- return (hdspm_read(hdspm, HDSPM_midiStatusIn1) & 0xff);
- else
- return (hdspm_read(hdspm, HDSPM_midiStatusIn0) & 0xff);
+ return hdspm_read(hdspm, hdspm->midi[id].statusIn) & 0xFF;
}
static inline int snd_hdspm_midi_output_possible (struct hdspm *hdspm, int id)
{
int fifo_bytes_used;
- if (id)
- fifo_bytes_used = hdspm_read(hdspm, HDSPM_midiStatusOut1);
- else
- fifo_bytes_used = hdspm_read(hdspm, HDSPM_midiStatusOut0);
- fifo_bytes_used &= 0xff;
+ fifo_bytes_used = hdspm_read(hdspm, hdspm->midi[id].statusOut) & 0xFF;
if (fifo_bytes_used < 128)
return 128 - fifo_bytes_used;
@@ -1074,7 +1585,7 @@ static int snd_hdspm_midi_output_write (struct hdspm_midi *hmidi)
unsigned char buf[128];
/* Output is not interrupt driven */
-
+
spin_lock_irqsave (&hmidi->lock, flags);
if (hmidi->output &&
!snd_rawmidi_transmit_empty (hmidi->output)) {
@@ -1083,11 +1594,11 @@ static int snd_hdspm_midi_output_write (struct hdspm_midi *hmidi)
if (n_pending > 0) {
if (n_pending > (int)sizeof (buf))
n_pending = sizeof (buf);
-
+
to_write = snd_rawmidi_transmit (hmidi->output, buf,
n_pending);
if (to_write > 0) {
- for (i = 0; i < to_write; ++i)
+ for (i = 0; i < to_write; ++i)
snd_hdspm_midi_write_byte (hmidi->hdspm,
hmidi->id,
buf[i]);
@@ -1127,12 +1638,11 @@ static int snd_hdspm_midi_input_read (struct hdspm_midi *hmidi)
}
}
hmidi->pending = 0;
- if (hmidi->id)
- hmidi->hdspm->control_register |= HDSPM_Midi1InterruptEnable;
- else
- hmidi->hdspm->control_register |= HDSPM_Midi0InterruptEnable;
+
+ hmidi->hdspm->control_register |= hmidi->ie;
hdspm_write(hmidi->hdspm, HDSPM_controlRegister,
hmidi->hdspm->control_register);
+
spin_unlock_irqrestore (&hmidi->lock, flags);
return snd_hdspm_midi_output_write (hmidi);
}
@@ -1143,20 +1653,18 @@ snd_hdspm_midi_input_trigger(struct snd_rawmidi_substream *substream, int up)
struct hdspm *hdspm;
struct hdspm_midi *hmidi;
unsigned long flags;
- u32 ie;
hmidi = substream->rmidi->private_data;
hdspm = hmidi->hdspm;
- ie = hmidi->id ?
- HDSPM_Midi1InterruptEnable : HDSPM_Midi0InterruptEnable;
+
spin_lock_irqsave (&hdspm->lock, flags);
if (up) {
- if (!(hdspm->control_register & ie)) {
+ if (!(hdspm->control_register & hmidi->ie)) {
snd_hdspm_flush_midi_input (hdspm, hmidi->id);
- hdspm->control_register |= ie;
+ hdspm->control_register |= hmidi->ie;
}
} else {
- hdspm->control_register &= ~ie;
+ hdspm->control_register &= ~hmidi->ie;
}
hdspm_write(hdspm, HDSPM_controlRegister, hdspm->control_register);
@@ -1167,14 +1675,14 @@ static void snd_hdspm_midi_output_timer(unsigned long data)
{
struct hdspm_midi *hmidi = (struct hdspm_midi *) data;
unsigned long flags;
-
+
snd_hdspm_midi_output_write(hmidi);
spin_lock_irqsave (&hmidi->lock, flags);
/* this does not bump hmidi->istimer, because the
kernel automatically removed the timer when it
expired, and we are now adding it back, thus
- leaving istimer wherever it was set before.
+ leaving istimer wherever it was set before.
*/
if (hmidi->istimer) {
@@ -1288,22 +1796,103 @@ static int __devinit snd_hdspm_create_midi (struct snd_card *card,
hdspm->midi[id].hdspm = hdspm;
spin_lock_init (&hdspm->midi[id].lock);
- sprintf (buf, "%s MIDI %d", card->shortname, id+1);
- err = snd_rawmidi_new (card, buf, id, 1, 1, &hdspm->midi[id].rmidi);
- if (err < 0)
- return err;
+ if (0 == id) {
+ if (MADIface == hdspm->io_type) {
+ /* MIDI-over-MADI on HDSPe MADIface */
+ hdspm->midi[0].dataIn = HDSPM_midiDataIn2;
+ hdspm->midi[0].statusIn = HDSPM_midiStatusIn2;
+ hdspm->midi[0].dataOut = HDSPM_midiDataOut2;
+ hdspm->midi[0].statusOut = HDSPM_midiStatusOut2;
+ hdspm->midi[0].ie = HDSPM_Midi2InterruptEnable;
+ hdspm->midi[0].irq = HDSPM_midi2IRQPending;
+ } else {
+ hdspm->midi[0].dataIn = HDSPM_midiDataIn0;
+ hdspm->midi[0].statusIn = HDSPM_midiStatusIn0;
+ hdspm->midi[0].dataOut = HDSPM_midiDataOut0;
+ hdspm->midi[0].statusOut = HDSPM_midiStatusOut0;
+ hdspm->midi[0].ie = HDSPM_Midi0InterruptEnable;
+ hdspm->midi[0].irq = HDSPM_midi0IRQPending;
+ }
+ } else if (1 == id) {
+ hdspm->midi[1].dataIn = HDSPM_midiDataIn1;
+ hdspm->midi[1].statusIn = HDSPM_midiStatusIn1;
+ hdspm->midi[1].dataOut = HDSPM_midiDataOut1;
+ hdspm->midi[1].statusOut = HDSPM_midiStatusOut1;
+ hdspm->midi[1].ie = HDSPM_Midi1InterruptEnable;
+ hdspm->midi[1].irq = HDSPM_midi1IRQPending;
+ } else if ((2 == id) && (MADI == hdspm->io_type)) {
+ /* MIDI-over-MADI on HDSPe MADI */
+ hdspm->midi[2].dataIn = HDSPM_midiDataIn2;
+ hdspm->midi[2].statusIn = HDSPM_midiStatusIn2;
+ hdspm->midi[2].dataOut = HDSPM_midiDataOut2;
+ hdspm->midi[2].statusOut = HDSPM_midiStatusOut2;
+ hdspm->midi[2].ie = HDSPM_Midi2InterruptEnable;
+ hdspm->midi[2].irq = HDSPM_midi2IRQPending;
+ } else if (2 == id) {
+ /* TCO MTC, read only */
+ hdspm->midi[2].dataIn = HDSPM_midiDataIn2;
+ hdspm->midi[2].statusIn = HDSPM_midiStatusIn2;
+ hdspm->midi[2].dataOut = -1;
+ hdspm->midi[2].statusOut = -1;
+ hdspm->midi[2].ie = HDSPM_Midi2InterruptEnable;
+ hdspm->midi[2].irq = HDSPM_midi2IRQPendingAES;
+ } else if (3 == id) {
+ /* TCO MTC on HDSPe MADI */
+ hdspm->midi[3].dataIn = HDSPM_midiDataIn3;
+ hdspm->midi[3].statusIn = HDSPM_midiStatusIn3;
+ hdspm->midi[3].dataOut = -1;
+ hdspm->midi[3].statusOut = -1;
+ hdspm->midi[3].ie = HDSPM_Midi3InterruptEnable;
+ hdspm->midi[3].irq = HDSPM_midi3IRQPending;
+ }
- sprintf(hdspm->midi[id].rmidi->name, "HDSPM MIDI %d", id+1);
- hdspm->midi[id].rmidi->private_data = &hdspm->midi[id];
+ if ((id < 2) || ((2 == id) && ((MADI == hdspm->io_type) ||
+ (MADIface == hdspm->io_type)))) {
+ if ((id == 0) && (MADIface == hdspm->io_type)) {
+ sprintf(buf, "%s MIDIoverMADI", card->shortname);
+ } else if ((id == 2) && (MADI == hdspm->io_type)) {
+ sprintf(buf, "%s MIDIoverMADI", card->shortname);
+ } else {
+ sprintf(buf, "%s MIDI %d", card->shortname, id+1);
+ }
+ err = snd_rawmidi_new(card, buf, id, 1, 1,
+ &hdspm->midi[id].rmidi);
+ if (err < 0)
+ return err;
- snd_rawmidi_set_ops(hdspm->midi[id].rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT,
- &snd_hdspm_midi_output);
- snd_rawmidi_set_ops(hdspm->midi[id].rmidi, SNDRV_RAWMIDI_STREAM_INPUT,
- &snd_hdspm_midi_input);
+ sprintf(hdspm->midi[id].rmidi->name, "%s MIDI %d",
+ card->id, id+1);
+ hdspm->midi[id].rmidi->private_data = &hdspm->midi[id];
+
+ snd_rawmidi_set_ops(hdspm->midi[id].rmidi,
+ SNDRV_RAWMIDI_STREAM_OUTPUT,
+ &snd_hdspm_midi_output);
+ snd_rawmidi_set_ops(hdspm->midi[id].rmidi,
+ SNDRV_RAWMIDI_STREAM_INPUT,
+ &snd_hdspm_midi_input);
+
+ hdspm->midi[id].rmidi->info_flags |=
+ SNDRV_RAWMIDI_INFO_OUTPUT |
+ SNDRV_RAWMIDI_INFO_INPUT |
+ SNDRV_RAWMIDI_INFO_DUPLEX;
+ } else {
+ /* TCO MTC, read only */
+ sprintf(buf, "%s MTC %d", card->shortname, id+1);
+ err = snd_rawmidi_new(card, buf, id, 1, 1,
+ &hdspm->midi[id].rmidi);
+ if (err < 0)
+ return err;
- hdspm->midi[id].rmidi->info_flags |= SNDRV_RAWMIDI_INFO_OUTPUT |
- SNDRV_RAWMIDI_INFO_INPUT |
- SNDRV_RAWMIDI_INFO_DUPLEX;
+ sprintf(hdspm->midi[id].rmidi->name,
+ "%s MTC %d", card->id, id+1);
+ hdspm->midi[id].rmidi->private_data = &hdspm->midi[id];
+
+ snd_rawmidi_set_ops(hdspm->midi[id].rmidi,
+ SNDRV_RAWMIDI_STREAM_INPUT,
+ &snd_hdspm_midi_input);
+
+ hdspm->midi[id].rmidi->info_flags |= SNDRV_RAWMIDI_INFO_INPUT;
+ }
return 0;
}
@@ -1312,12 +1901,15 @@ static int __devinit snd_hdspm_create_midi (struct snd_card *card,
static void hdspm_midi_tasklet(unsigned long arg)
{
struct hdspm *hdspm = (struct hdspm *)arg;
-
- if (hdspm->midi[0].pending)
- snd_hdspm_midi_input_read (&hdspm->midi[0]);
- if (hdspm->midi[1].pending)
- snd_hdspm_midi_input_read (&hdspm->midi[1]);
-}
+ int i = 0;
+
+ while (i < hdspm->midiPorts) {
+ if (hdspm->midi[i].pending)
+ snd_hdspm_midi_input_read(&hdspm->midi[i]);
+
+ i++;
+ }
+}
/*-----------------------------------------------------------------------------
@@ -1326,6 +1918,22 @@ static void hdspm_midi_tasklet(unsigned long arg)
/* get the system sample rate which is set */
+
+/**
+ * Calculate the real sample rate from the
+ * current DDS value.
+ **/
+static int hdspm_get_system_sample_rate(struct hdspm *hdspm)
+{
+ unsigned int period, rate;
+
+ period = hdspm_read(hdspm, HDSPM_RD_PLL_FREQ);
+ rate = hdspm_calc_dds_value(hdspm, period);
+
+ return rate;
+}
+
+
#define HDSPM_SYSTEM_SAMPLE_RATE(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
@@ -1340,112 +1948,274 @@ static int snd_hdspm_info_system_sample_rate(struct snd_kcontrol *kcontrol,
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
+ uinfo->value.integer.min = 27000;
+ uinfo->value.integer.max = 207000;
+ uinfo->value.integer.step = 1;
return 0;
}
+
static int snd_hdspm_get_system_sample_rate(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *
ucontrol)
{
struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
- ucontrol->value.enumerated.item[0] = hdspm->system_sample_rate;
+ ucontrol->value.integer.value[0] = hdspm_get_system_sample_rate(hdspm);
+ return 0;
+}
+
+
+/**
+ * Returns the WordClock sample rate class for the given card.
+ **/
+static int hdspm_get_wc_sample_rate(struct hdspm *hdspm)
+{
+ int status;
+
+ switch (hdspm->io_type) {
+ case RayDAT:
+ case AIO:
+ status = hdspm_read(hdspm, HDSPM_RD_STATUS_1);
+ return (status >> 16) & 0xF;
+ break;
+ default:
+ break;
+ }
+
+
+ return 0;
+}
+
+
+/**
+ * Returns the TCO sample rate class for the given card.
+ **/
+static int hdspm_get_tco_sample_rate(struct hdspm *hdspm)
+{
+ int status;
+
+ if (hdspm->tco) {
+ switch (hdspm->io_type) {
+ case RayDAT:
+ case AIO:
+ status = hdspm_read(hdspm, HDSPM_RD_STATUS_1);
+ return (status >> 20) & 0xF;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+
+/**
+ * Returns the SYNC_IN sample rate class for the given card.
+ **/
+static int hdspm_get_sync_in_sample_rate(struct hdspm *hdspm)
+{
+ int status;
+
+ if (hdspm->tco) {
+ switch (hdspm->io_type) {
+ case RayDAT:
+ case AIO:
+ status = hdspm_read(hdspm, HDSPM_RD_STATUS_2);
+ return (status >> 12) & 0xF;
+ break;
+ default:
+ break;
+ }
+ }
+
return 0;
}
+
+/**
+ * Returns the sample rate class for input source <idx> for
+ * 'new style' cards like the AIO and RayDAT.
+ **/
+static int hdspm_get_s1_sample_rate(struct hdspm *hdspm, unsigned int idx)
+{
+ int status = hdspm_read(hdspm, HDSPM_RD_STATUS_2);
+
+ return (status >> (idx*4)) & 0xF;
+}
+
+
+
#define HDSPM_AUTOSYNC_SAMPLE_RATE(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
- .name = xname, \
- .index = xindex, \
- .access = SNDRV_CTL_ELEM_ACCESS_READ, \
- .info = snd_hdspm_info_autosync_sample_rate, \
- .get = snd_hdspm_get_autosync_sample_rate \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+ .name = xname, \
+ .private_value = xindex, \
+ .access = SNDRV_CTL_ELEM_ACCESS_READ, \
+ .info = snd_hdspm_info_autosync_sample_rate, \
+ .get = snd_hdspm_get_autosync_sample_rate \
}
+
static int snd_hdspm_info_autosync_sample_rate(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = { "32000", "44100", "48000",
- "64000", "88200", "96000",
- "128000", "176400", "192000",
- "None"
- };
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
uinfo->value.enumerated.items = 10;
+
if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item =
- uinfo->value.enumerated.items - 1;
+ uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
strcpy(uinfo->value.enumerated.name,
- texts[uinfo->value.enumerated.item]);
+ texts_freq[uinfo->value.enumerated.item]);
return 0;
}
+
static int snd_hdspm_get_autosync_sample_rate(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *
ucontrol)
{
struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
- switch (hdspm_external_sample_rate(hdspm)) {
- case 32000:
- ucontrol->value.enumerated.item[0] = 0;
- break;
- case 44100:
- ucontrol->value.enumerated.item[0] = 1;
- break;
- case 48000:
- ucontrol->value.enumerated.item[0] = 2;
- break;
- case 64000:
- ucontrol->value.enumerated.item[0] = 3;
- break;
- case 88200:
- ucontrol->value.enumerated.item[0] = 4;
- break;
- case 96000:
- ucontrol->value.enumerated.item[0] = 5;
- break;
- case 128000:
- ucontrol->value.enumerated.item[0] = 6;
- break;
- case 176400:
- ucontrol->value.enumerated.item[0] = 7;
- break;
- case 192000:
- ucontrol->value.enumerated.item[0] = 8;
- break;
+ switch (hdspm->io_type) {
+ case RayDAT:
+ switch (kcontrol->private_value) {
+ case 0:
+ ucontrol->value.enumerated.item[0] =
+ hdspm_get_wc_sample_rate(hdspm);
+ break;
+ case 7:
+ ucontrol->value.enumerated.item[0] =
+ hdspm_get_tco_sample_rate(hdspm);
+ break;
+ case 8:
+ ucontrol->value.enumerated.item[0] =
+ hdspm_get_sync_in_sample_rate(hdspm);
+ break;
+ default:
+ ucontrol->value.enumerated.item[0] =
+ hdspm_get_s1_sample_rate(hdspm,
+ kcontrol->private_value-1);
+ }
+
+ case AIO:
+ switch (kcontrol->private_value) {
+ case 0: /* WC */
+ ucontrol->value.enumerated.item[0] =
+ hdspm_get_wc_sample_rate(hdspm);
+ break;
+ case 4: /* TCO */
+ ucontrol->value.enumerated.item[0] =
+ hdspm_get_tco_sample_rate(hdspm);
+ break;
+ case 5: /* SYNC_IN */
+ ucontrol->value.enumerated.item[0] =
+ hdspm_get_sync_in_sample_rate(hdspm);
+ break;
+ default:
+ ucontrol->value.enumerated.item[0] =
+ hdspm_get_s1_sample_rate(hdspm,
+ ucontrol->id.index-1);
+ }
+
+ case AES32:
+ switch (kcontrol->private_value) {
+ case 0: /* WC */
+ ucontrol->value.enumerated.item[0] =
+ hdspm_get_wc_sample_rate(hdspm);
+ break;
+ case 9: /* TCO */
+ ucontrol->value.enumerated.item[0] =
+ hdspm_get_tco_sample_rate(hdspm);
+ break;
+ case 10: /* SYNC_IN */
+ ucontrol->value.enumerated.item[0] =
+ hdspm_get_sync_in_sample_rate(hdspm);
+ break;
+ default: /* AES1 to AES8 */
+ ucontrol->value.enumerated.item[0] =
+ hdspm_get_s1_sample_rate(hdspm,
+ kcontrol->private_value-1);
+ break;
+
+ }
default:
- ucontrol->value.enumerated.item[0] = 9;
+ break;
}
+
return 0;
}
+
#define HDSPM_SYSTEM_CLOCK_MODE(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
- .name = xname, \
- .index = xindex, \
- .access = SNDRV_CTL_ELEM_ACCESS_READ, \
- .info = snd_hdspm_info_system_clock_mode, \
- .get = snd_hdspm_get_system_clock_mode, \
-}
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+ .name = xname, \
+ .index = xindex, \
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |\
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
+ .info = snd_hdspm_info_system_clock_mode, \
+ .get = snd_hdspm_get_system_clock_mode, \
+ .put = snd_hdspm_put_system_clock_mode, \
+}
+
+
+/**
+ * Returns the system clock mode for the given card.
+ * @returns 0 - master, 1 - slave
+ **/
+static int hdspm_system_clock_mode(struct hdspm *hdspm)
+{
+ switch (hdspm->io_type) {
+ case AIO:
+ case RayDAT:
+ if (hdspm->settings_register & HDSPM_c0Master)
+ return 0;
+ break;
+
+ default:
+ if (hdspm->control_register & HDSPM_ClockModeMaster)
+ return 0;
+ }
+ return 1;
+}
-static int hdspm_system_clock_mode(struct hdspm * hdspm)
+/**
+ * Sets the system clock mode.
+ * @param mode 0 - master, 1 - slave
+ **/
+static void hdspm_set_system_clock_mode(struct hdspm *hdspm, int mode)
{
- /* Always reflect the hardware info, rme is never wrong !!!! */
+ switch (hdspm->io_type) {
+ case AIO:
+ case RayDAT:
+ if (0 == mode)
+ hdspm->settings_register |= HDSPM_c0Master;
+ else
+ hdspm->settings_register &= ~HDSPM_c0Master;
- if (hdspm->control_register & HDSPM_ClockModeMaster)
- return 0;
- return 1;
+ hdspm_write(hdspm, HDSPM_WR_SETTINGS, hdspm->settings_register);
+ break;
+
+ default:
+ if (0 == mode)
+ hdspm->control_register |= HDSPM_ClockModeMaster;
+ else
+ hdspm->control_register &= ~HDSPM_ClockModeMaster;
+
+ hdspm_write(hdspm, HDSPM_controlRegister,
+ hdspm->control_register);
+ }
}
+
static int snd_hdspm_info_system_clock_mode(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = { "Master", "Slave" };
+ static char *texts[] = { "Master", "AutoSync" };
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
@@ -1463,96 +2233,83 @@ static int snd_hdspm_get_system_clock_mode(struct snd_kcontrol *kcontrol,
{
struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
- ucontrol->value.enumerated.item[0] =
- hdspm_system_clock_mode(hdspm);
+ ucontrol->value.enumerated.item[0] = hdspm_system_clock_mode(hdspm);
return 0;
}
-#define HDSPM_CLOCK_SOURCE(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
- .name = xname, \
- .index = xindex, \
- .info = snd_hdspm_info_clock_source, \
- .get = snd_hdspm_get_clock_source, \
- .put = snd_hdspm_put_clock_source \
+static int snd_hdspm_put_system_clock_mode(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+ int val;
+
+ if (!snd_hdspm_use_is_exclusive(hdspm))
+ return -EBUSY;
+
+ val = ucontrol->value.enumerated.item[0];
+ if (val < 0)
+ val = 0;
+ else if (val > 1)
+ val = 1;
+
+ hdspm_set_system_clock_mode(hdspm, val);
+
+ return 0;
}
+
+#define HDSPM_INTERNAL_CLOCK(xname, xindex) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+ .name = xname, \
+ .index = xindex, \
+ .info = snd_hdspm_info_clock_source, \
+ .get = snd_hdspm_get_clock_source, \
+ .put = snd_hdspm_put_clock_source \
+}
+
+
static int hdspm_clock_source(struct hdspm * hdspm)
{
- if (hdspm->control_register & HDSPM_ClockModeMaster) {
- switch (hdspm->system_sample_rate) {
- case 32000:
- return 1;
- case 44100:
- return 2;
- case 48000:
- return 3;
- case 64000:
- return 4;
- case 88200:
- return 5;
- case 96000:
- return 6;
- case 128000:
- return 7;
- case 176400:
- return 8;
- case 192000:
- return 9;
- default:
- return 3;
- }
- } else {
- return 0;
+ switch (hdspm->system_sample_rate) {
+ case 32000: return 0;
+ case 44100: return 1;
+ case 48000: return 2;
+ case 64000: return 3;
+ case 88200: return 4;
+ case 96000: return 5;
+ case 128000: return 6;
+ case 176400: return 7;
+ case 192000: return 8;
}
+
+ return -1;
}
static int hdspm_set_clock_source(struct hdspm * hdspm, int mode)
{
int rate;
switch (mode) {
-
- case HDSPM_CLOCK_SOURCE_AUTOSYNC:
- if (hdspm_external_sample_rate(hdspm) != 0) {
- hdspm->control_register &= ~HDSPM_ClockModeMaster;
- hdspm_write(hdspm, HDSPM_controlRegister,
- hdspm->control_register);
- return 0;
- }
- return -1;
- case HDSPM_CLOCK_SOURCE_INTERNAL_32KHZ:
- rate = 32000;
- break;
- case HDSPM_CLOCK_SOURCE_INTERNAL_44_1KHZ:
- rate = 44100;
- break;
- case HDSPM_CLOCK_SOURCE_INTERNAL_48KHZ:
- rate = 48000;
- break;
- case HDSPM_CLOCK_SOURCE_INTERNAL_64KHZ:
- rate = 64000;
- break;
- case HDSPM_CLOCK_SOURCE_INTERNAL_88_2KHZ:
- rate = 88200;
- break;
- case HDSPM_CLOCK_SOURCE_INTERNAL_96KHZ:
- rate = 96000;
- break;
- case HDSPM_CLOCK_SOURCE_INTERNAL_128KHZ:
- rate = 128000;
- break;
- case HDSPM_CLOCK_SOURCE_INTERNAL_176_4KHZ:
- rate = 176400;
- break;
- case HDSPM_CLOCK_SOURCE_INTERNAL_192KHZ:
- rate = 192000;
- break;
-
+ case 0:
+ rate = 32000; break;
+ case 1:
+ rate = 44100; break;
+ case 2:
+ rate = 48000; break;
+ case 3:
+ rate = 64000; break;
+ case 4:
+ rate = 88200; break;
+ case 5:
+ rate = 96000; break;
+ case 6:
+ rate = 128000; break;
+ case 7:
+ rate = 176400; break;
+ case 8:
+ rate = 192000; break;
default:
- rate = 44100;
+ rate = 48000;
}
- hdspm->control_register |= HDSPM_ClockModeMaster;
- hdspm_write(hdspm, HDSPM_controlRegister, hdspm->control_register);
hdspm_set_rate(hdspm, rate, 1);
return 0;
}
@@ -1560,25 +2317,16 @@ static int hdspm_set_clock_source(struct hdspm * hdspm, int mode)
static int snd_hdspm_info_clock_source(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = { "AutoSync",
- "Internal 32.0 kHz", "Internal 44.1 kHz",
- "Internal 48.0 kHz",
- "Internal 64.0 kHz", "Internal 88.2 kHz",
- "Internal 96.0 kHz",
- "Internal 128.0 kHz", "Internal 176.4 kHz",
- "Internal 192.0 kHz"
- };
-
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
- uinfo->value.enumerated.items = 10;
+ uinfo->value.enumerated.items = 9;
if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
uinfo->value.enumerated.item =
uinfo->value.enumerated.items - 1;
strcpy(uinfo->value.enumerated.name,
- texts[uinfo->value.enumerated.item]);
+ texts_freq[uinfo->value.enumerated.item+1]);
return 0;
}
@@ -1615,134 +2363,301 @@ static int snd_hdspm_put_clock_source(struct snd_kcontrol *kcontrol,
return change;
}
-#define HDSPM_PREF_SYNC_REF(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
- .name = xname, \
- .index = xindex, \
- .info = snd_hdspm_info_pref_sync_ref, \
- .get = snd_hdspm_get_pref_sync_ref, \
- .put = snd_hdspm_put_pref_sync_ref \
-}
+#define HDSPM_PREF_SYNC_REF(xname, xindex) \
+{.iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+ .name = xname, \
+ .index = xindex, \
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |\
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
+ .info = snd_hdspm_info_pref_sync_ref, \
+ .get = snd_hdspm_get_pref_sync_ref, \
+ .put = snd_hdspm_put_pref_sync_ref \
+}
+
+
+/**
+ * Returns the current preferred sync reference setting.
+ * The semantics of the return value are depending on the
+ * card, please see the comments for clarification.
+ **/
static int hdspm_pref_sync_ref(struct hdspm * hdspm)
{
- /* Notice that this looks at the requested sync source,
- not the one actually in use.
- */
- if (hdspm->is_aes32) {
+ switch (hdspm->io_type) {
+ case AES32:
switch (hdspm->control_register & HDSPM_SyncRefMask) {
- /* number gives AES index, except for 0 which
- corresponds to WordClock */
- case 0: return 0;
- case HDSPM_SyncRef0: return 1;
- case HDSPM_SyncRef1: return 2;
- case HDSPM_SyncRef1+HDSPM_SyncRef0: return 3;
- case HDSPM_SyncRef2: return 4;
- case HDSPM_SyncRef2+HDSPM_SyncRef0: return 5;
- case HDSPM_SyncRef2+HDSPM_SyncRef1: return 6;
- case HDSPM_SyncRef2+HDSPM_SyncRef1+HDSPM_SyncRef0: return 7;
- case HDSPM_SyncRef3: return 8;
+ case 0: return 0; /* WC */
+ case HDSPM_SyncRef0: return 1; /* AES 1 */
+ case HDSPM_SyncRef1: return 2; /* AES 2 */
+ case HDSPM_SyncRef1+HDSPM_SyncRef0: return 3; /* AES 3 */
+ case HDSPM_SyncRef2: return 4; /* AES 4 */
+ case HDSPM_SyncRef2+HDSPM_SyncRef0: return 5; /* AES 5 */
+ case HDSPM_SyncRef2+HDSPM_SyncRef1: return 6; /* AES 6 */
+ case HDSPM_SyncRef2+HDSPM_SyncRef1+HDSPM_SyncRef0:
+ return 7; /* AES 7 */
+ case HDSPM_SyncRef3: return 8; /* AES 8 */
+ case HDSPM_SyncRef3+HDSPM_SyncRef0: return 9; /* TCO */
}
- } else {
- switch (hdspm->control_register & HDSPM_SyncRefMask) {
- case HDSPM_SyncRef_Word:
- return HDSPM_SYNC_FROM_WORD;
- case HDSPM_SyncRef_MADI:
- return HDSPM_SYNC_FROM_MADI;
+ break;
+
+ case MADI:
+ case MADIface:
+ if (hdspm->tco) {
+ switch (hdspm->control_register & HDSPM_SyncRefMask) {
+ case 0: return 0; /* WC */
+ case HDSPM_SyncRef0: return 1; /* MADI */
+ case HDSPM_SyncRef1: return 2; /* TCO */
+ case HDSPM_SyncRef1+HDSPM_SyncRef0:
+ return 3; /* SYNC_IN */
+ }
+ } else {
+ switch (hdspm->control_register & HDSPM_SyncRefMask) {
+ case 0: return 0; /* WC */
+ case HDSPM_SyncRef0: return 1; /* MADI */
+ case HDSPM_SyncRef1+HDSPM_SyncRef0:
+ return 2; /* SYNC_IN */
+ }
+ }
+ break;
+
+ case RayDAT:
+ if (hdspm->tco) {
+ switch ((hdspm->settings_register &
+ HDSPM_c0_SyncRefMask) / HDSPM_c0_SyncRef0) {
+ case 0: return 0; /* WC */
+ case 3: return 1; /* ADAT 1 */
+ case 4: return 2; /* ADAT 2 */
+ case 5: return 3; /* ADAT 3 */
+ case 6: return 4; /* ADAT 4 */
+ case 1: return 5; /* AES */
+ case 2: return 6; /* SPDIF */
+ case 9: return 7; /* TCO */
+ case 10: return 8; /* SYNC_IN */
+ }
+ } else {
+ switch ((hdspm->settings_register &
+ HDSPM_c0_SyncRefMask) / HDSPM_c0_SyncRef0) {
+ case 0: return 0; /* WC */
+ case 3: return 1; /* ADAT 1 */
+ case 4: return 2; /* ADAT 2 */
+ case 5: return 3; /* ADAT 3 */
+ case 6: return 4; /* ADAT 4 */
+ case 1: return 5; /* AES */
+ case 2: return 6; /* SPDIF */
+ case 10: return 7; /* SYNC_IN */
+ }
}
+
+ break;
+
+ case AIO:
+ if (hdspm->tco) {
+ switch ((hdspm->settings_register &
+ HDSPM_c0_SyncRefMask) / HDSPM_c0_SyncRef0) {
+ case 0: return 0; /* WC */
+ case 3: return 1; /* ADAT */
+ case 1: return 2; /* AES */
+ case 2: return 3; /* SPDIF */
+ case 9: return 4; /* TCO */
+ case 10: return 5; /* SYNC_IN */
+ }
+ } else {
+ switch ((hdspm->settings_register &
+ HDSPM_c0_SyncRefMask) / HDSPM_c0_SyncRef0) {
+ case 0: return 0; /* WC */
+ case 3: return 1; /* ADAT */
+ case 1: return 2; /* AES */
+ case 2: return 3; /* SPDIF */
+ case 10: return 4; /* SYNC_IN */
+ }
+ }
+
+ break;
}
- return HDSPM_SYNC_FROM_WORD;
+ return -1;
}
+
+/**
+ * Set the preferred sync reference to <pref>. The semantics
+ * of <pref> are depending on the card type, see the comments
+ * for clarification.
+ **/
static int hdspm_set_pref_sync_ref(struct hdspm * hdspm, int pref)
{
- hdspm->control_register &= ~HDSPM_SyncRefMask;
+ int p = 0;
- if (hdspm->is_aes32) {
- switch (pref) {
- case 0:
- hdspm->control_register |= 0;
- break;
- case 1:
- hdspm->control_register |= HDSPM_SyncRef0;
- break;
- case 2:
- hdspm->control_register |= HDSPM_SyncRef1;
- break;
- case 3:
- hdspm->control_register |= HDSPM_SyncRef1+HDSPM_SyncRef0;
- break;
- case 4:
- hdspm->control_register |= HDSPM_SyncRef2;
- break;
- case 5:
- hdspm->control_register |= HDSPM_SyncRef2+HDSPM_SyncRef0;
- break;
- case 6:
- hdspm->control_register |= HDSPM_SyncRef2+HDSPM_SyncRef1;
- break;
- case 7:
- hdspm->control_register |=
- HDSPM_SyncRef2+HDSPM_SyncRef1+HDSPM_SyncRef0;
- break;
- case 8:
- hdspm->control_register |= HDSPM_SyncRef3;
- break;
- default:
- return -1;
- }
- } else {
+ switch (hdspm->io_type) {
+ case AES32:
+ hdspm->control_register &= ~HDSPM_SyncRefMask;
switch (pref) {
- case HDSPM_SYNC_FROM_MADI:
- hdspm->control_register |= HDSPM_SyncRef_MADI;
+ case 0: /* WC */
+ break;
+ case 1: /* AES 1 */
+ hdspm->control_register |= HDSPM_SyncRef0;
+ break;
+ case 2: /* AES 2 */
+ hdspm->control_register |= HDSPM_SyncRef1;
+ break;
+ case 3: /* AES 3 */
+ hdspm->control_register |=
+ HDSPM_SyncRef1+HDSPM_SyncRef0;
+ break;
+ case 4: /* AES 4 */
+ hdspm->control_register |= HDSPM_SyncRef2;
+ break;
+ case 5: /* AES 5 */
+ hdspm->control_register |=
+ HDSPM_SyncRef2+HDSPM_SyncRef0;
break;
- case HDSPM_SYNC_FROM_WORD:
- hdspm->control_register |= HDSPM_SyncRef_Word;
+ case 6: /* AES 6 */
+ hdspm->control_register |=
+ HDSPM_SyncRef2+HDSPM_SyncRef1;
+ break;
+ case 7: /* AES 7 */
+ hdspm->control_register |=
+ HDSPM_SyncRef2+HDSPM_SyncRef1+HDSPM_SyncRef0;
+ break;
+ case 8: /* AES 8 */
+ hdspm->control_register |= HDSPM_SyncRef3;
+ break;
+ case 9: /* TCO */
+ hdspm->control_register |=
+ HDSPM_SyncRef3+HDSPM_SyncRef0;
break;
default:
return -1;
}
+
+ break;
+
+ case MADI:
+ case MADIface:
+ hdspm->control_register &= ~HDSPM_SyncRefMask;
+ if (hdspm->tco) {
+ switch (pref) {
+ case 0: /* WC */
+ break;
+ case 1: /* MADI */
+ hdspm->control_register |= HDSPM_SyncRef0;
+ break;
+ case 2: /* TCO */
+ hdspm->control_register |= HDSPM_SyncRef1;
+ break;
+ case 3: /* SYNC_IN */
+ hdspm->control_register |=
+ HDSPM_SyncRef0+HDSPM_SyncRef1;
+ break;
+ default:
+ return -1;
+ }
+ } else {
+ switch (pref) {
+ case 0: /* WC */
+ break;
+ case 1: /* MADI */
+ hdspm->control_register |= HDSPM_SyncRef0;
+ break;
+ case 2: /* SYNC_IN */
+ hdspm->control_register |=
+ HDSPM_SyncRef0+HDSPM_SyncRef1;
+ break;
+ default:
+ return -1;
+ }
+ }
+
+ break;
+
+ case RayDAT:
+ if (hdspm->tco) {
+ switch (pref) {
+ case 0: p = 0; break; /* WC */
+ case 1: p = 3; break; /* ADAT 1 */
+ case 2: p = 4; break; /* ADAT 2 */
+ case 3: p = 5; break; /* ADAT 3 */
+ case 4: p = 6; break; /* ADAT 4 */
+ case 5: p = 1; break; /* AES */
+ case 6: p = 2; break; /* SPDIF */
+ case 7: p = 9; break; /* TCO */
+ case 8: p = 10; break; /* SYNC_IN */
+ default: return -1;
+ }
+ } else {
+ switch (pref) {
+ case 0: p = 0; break; /* WC */
+ case 1: p = 3; break; /* ADAT 1 */
+ case 2: p = 4; break; /* ADAT 2 */
+ case 3: p = 5; break; /* ADAT 3 */
+ case 4: p = 6; break; /* ADAT 4 */
+ case 5: p = 1; break; /* AES */
+ case 6: p = 2; break; /* SPDIF */
+ case 7: p = 10; break; /* SYNC_IN */
+ default: return -1;
+ }
+ }
+ break;
+
+ case AIO:
+ if (hdspm->tco) {
+ switch (pref) {
+ case 0: p = 0; break; /* WC */
+ case 1: p = 3; break; /* ADAT */
+ case 2: p = 1; break; /* AES */
+ case 3: p = 2; break; /* SPDIF */
+ case 4: p = 9; break; /* TCO */
+ case 5: p = 10; break; /* SYNC_IN */
+ default: return -1;
+ }
+ } else {
+ switch (pref) {
+ case 0: p = 0; break; /* WC */
+ case 1: p = 3; break; /* ADAT */
+ case 2: p = 1; break; /* AES */
+ case 3: p = 2; break; /* SPDIF */
+ case 4: p = 10; break; /* SYNC_IN */
+ default: return -1;
+ }
+ }
+ break;
}
- hdspm_write(hdspm, HDSPM_controlRegister, hdspm->control_register);
+
+ switch (hdspm->io_type) {
+ case RayDAT:
+ case AIO:
+ hdspm->settings_register &= ~HDSPM_c0_SyncRefMask;
+ hdspm->settings_register |= HDSPM_c0_SyncRef0 * p;
+ hdspm_write(hdspm, HDSPM_WR_SETTINGS, hdspm->settings_register);
+ break;
+
+ case MADI:
+ case MADIface:
+ case AES32:
+ hdspm_write(hdspm, HDSPM_controlRegister,
+ hdspm->control_register);
+ }
+
return 0;
}
+
static int snd_hdspm_info_pref_sync_ref(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
- if (hdspm->is_aes32) {
- static char *texts[] = { "Word", "AES1", "AES2", "AES3",
- "AES4", "AES5", "AES6", "AES7", "AES8" };
-
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
-
- uinfo->value.enumerated.items = 9;
-
- if (uinfo->value.enumerated.item >=
- uinfo->value.enumerated.items)
- uinfo->value.enumerated.item =
- uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name,
- texts[uinfo->value.enumerated.item]);
- } else {
- static char *texts[] = { "Word", "MADI" };
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->count = 1;
+ uinfo->value.enumerated.items = hdspm->texts_autosync_items;
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
+ if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
+ uinfo->value.enumerated.item =
+ uinfo->value.enumerated.items - 1;
- uinfo->value.enumerated.items = 2;
+ strcpy(uinfo->value.enumerated.name,
+ hdspm->texts_autosync[uinfo->value.enumerated.item]);
- if (uinfo->value.enumerated.item >=
- uinfo->value.enumerated.items)
- uinfo->value.enumerated.item =
- uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name,
- texts[uinfo->value.enumerated.item]);
- }
return 0;
}
@@ -1750,32 +2665,41 @@ static int snd_hdspm_get_pref_sync_ref(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+ int psf = hdspm_pref_sync_ref(hdspm);
- ucontrol->value.enumerated.item[0] = hdspm_pref_sync_ref(hdspm);
- return 0;
+ if (psf >= 0) {
+ ucontrol->value.enumerated.item[0] = psf;
+ return 0;
+ }
+
+ return -1;
}
static int snd_hdspm_put_pref_sync_ref(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
- int change, max;
- unsigned int val;
-
- max = hdspm->is_aes32 ? 9 : 2;
+ int val, change = 0;
if (!snd_hdspm_use_is_exclusive(hdspm))
return -EBUSY;
- val = ucontrol->value.enumerated.item[0] % max;
+ val = ucontrol->value.enumerated.item[0];
+
+ if (val < 0)
+ val = 0;
+ else if (val >= hdspm->texts_autosync_items)
+ val = hdspm->texts_autosync_items-1;
spin_lock_irq(&hdspm->lock);
- change = (int) val != hdspm_pref_sync_ref(hdspm);
- hdspm_set_pref_sync_ref(hdspm, val);
+ if (val != hdspm_pref_sync_ref(hdspm))
+ change = (0 == hdspm_set_pref_sync_ref(hdspm, val)) ? 1 : 0;
+
spin_unlock_irq(&hdspm->lock);
return change;
}
+
#define HDSPM_AUTOSYNC_REF(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
@@ -1785,18 +2709,18 @@ static int snd_hdspm_put_pref_sync_ref(struct snd_kcontrol *kcontrol,
.get = snd_hdspm_get_autosync_ref, \
}
-static int hdspm_autosync_ref(struct hdspm * hdspm)
+static int hdspm_autosync_ref(struct hdspm *hdspm)
{
- if (hdspm->is_aes32) {
+ if (AES32 == hdspm->io_type) {
unsigned int status = hdspm_read(hdspm, HDSPM_statusRegister);
- unsigned int syncref = (status >> HDSPM_AES32_syncref_bit) &
- 0xF;
+ unsigned int syncref =
+ (status >> HDSPM_AES32_syncref_bit) & 0xF;
if (syncref == 0)
return HDSPM_AES32_AUTOSYNC_FROM_WORD;
if (syncref <= 8)
return syncref;
return HDSPM_AES32_AUTOSYNC_FROM_NONE;
- } else {
+ } else if (MADI == hdspm->io_type) {
/* This looks at the autosync selected sync reference */
unsigned int status2 = hdspm_read(hdspm, HDSPM_statusRegister2);
@@ -1805,22 +2729,27 @@ static int hdspm_autosync_ref(struct hdspm * hdspm)
return HDSPM_AUTOSYNC_FROM_WORD;
case HDSPM_SelSyncRef_MADI:
return HDSPM_AUTOSYNC_FROM_MADI;
+ case HDSPM_SelSyncRef_TCO:
+ return HDSPM_AUTOSYNC_FROM_TCO;
+ case HDSPM_SelSyncRef_SyncIn:
+ return HDSPM_AUTOSYNC_FROM_SYNC_IN;
case HDSPM_SelSyncRef_NVALID:
return HDSPM_AUTOSYNC_FROM_NONE;
default:
return 0;
}
- return 0;
}
+ return 0;
}
+
static int snd_hdspm_info_autosync_ref(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
- if (hdspm->is_aes32) {
+ if (AES32 == hdspm->io_type) {
static char *texts[] = { "WordClock", "AES1", "AES2", "AES3",
"AES4", "AES5", "AES6", "AES7", "AES8", "None"};
@@ -1833,14 +2762,15 @@ static int snd_hdspm_info_autosync_ref(struct snd_kcontrol *kcontrol,
uinfo->value.enumerated.items - 1;
strcpy(uinfo->value.enumerated.name,
texts[uinfo->value.enumerated.item]);
- } else {
- static char *texts[] = { "WordClock", "MADI", "None" };
+ } else if (MADI == hdspm->io_type) {
+ static char *texts[] = {"Word Clock", "MADI", "TCO",
+ "Sync In", "None" };
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
- uinfo->value.enumerated.items = 3;
+ uinfo->value.enumerated.items = 5;
if (uinfo->value.enumerated.item >=
- uinfo->value.enumerated.items)
+ uinfo->value.enumerated.items)
uinfo->value.enumerated.item =
uinfo->value.enumerated.items - 1;
strcpy(uinfo->value.enumerated.name,
@@ -1858,6 +2788,7 @@ static int snd_hdspm_get_autosync_ref(struct snd_kcontrol *kcontrol,
return 0;
}
+
#define HDSPM_LINE_OUT(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
@@ -1914,6 +2845,7 @@ static int snd_hdspm_put_line_out(struct snd_kcontrol *kcontrol,
return change;
}
+
#define HDSPM_TX_64(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
@@ -1969,6 +2901,7 @@ static int snd_hdspm_put_tx_64(struct snd_kcontrol *kcontrol,
return change;
}
+
#define HDSPM_C_TMS(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
@@ -2024,6 +2957,7 @@ static int snd_hdspm_put_c_tms(struct snd_kcontrol *kcontrol,
return change;
}
+
#define HDSPM_SAFE_MODE(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
@@ -2079,6 +3013,7 @@ static int snd_hdspm_put_safe_mode(struct snd_kcontrol *kcontrol,
return change;
}
+
#define HDSPM_EMPHASIS(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
@@ -2134,6 +3069,7 @@ static int snd_hdspm_put_emphasis(struct snd_kcontrol *kcontrol,
return change;
}
+
#define HDSPM_DOLBY(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
@@ -2189,6 +3125,7 @@ static int snd_hdspm_put_dolby(struct snd_kcontrol *kcontrol,
return change;
}
+
#define HDSPM_PROFESSIONAL(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
@@ -2315,6 +3252,7 @@ static int snd_hdspm_put_input_select(struct snd_kcontrol *kcontrol,
return change;
}
+
#define HDSPM_DS_WIRE(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
@@ -2386,6 +3324,7 @@ static int snd_hdspm_put_ds_wire(struct snd_kcontrol *kcontrol,
return change;
}
+
#define HDSPM_QS_WIRE(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
@@ -2472,15 +3411,6 @@ static int snd_hdspm_put_qs_wire(struct snd_kcontrol *kcontrol,
return change;
}
-/* Simple Mixer
- deprecated since to much faders ???
- MIXER interface says output (source, destination, value)
- where source > MAX_channels are playback channels
- on MADICARD
- - playback mixer matrix: [channelout+64] [output] [value]
- - input(thru) mixer matrix: [channelin] [output] [value]
- (better do 2 kontrols for separation ?)
-*/
#define HDSPM_MIXER(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
@@ -2586,7 +3516,7 @@ static int snd_hdspm_put_mixer(struct snd_kcontrol *kcontrol,
/* The simple mixer control(s) provide gain control for the
basic 1:1 mappings of playback streams to output
- streams.
+ streams.
*/
#define HDSPM_PLAYBACK_MIXER \
@@ -2604,7 +3534,7 @@ static int snd_hdspm_info_playback_mixer(struct snd_kcontrol *kcontrol,
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = 0;
- uinfo->value.integer.max = 65536;
+ uinfo->value.integer.max = 64;
uinfo->value.integer.step = 1;
return 0;
}
@@ -2614,28 +3544,17 @@ static int snd_hdspm_get_playback_mixer(struct snd_kcontrol *kcontrol,
{
struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
int channel;
- int mapped_channel;
channel = ucontrol->id.index - 1;
if (snd_BUG_ON(channel < 0 || channel >= HDSPM_MAX_CHANNELS))
return -EINVAL;
- mapped_channel = hdspm->channel_map[channel];
- if (mapped_channel < 0)
- return -EINVAL;
-
spin_lock_irq(&hdspm->lock);
ucontrol->value.integer.value[0] =
- hdspm_read_pb_gain(hdspm, mapped_channel, mapped_channel);
+ (hdspm_read_pb_gain(hdspm, channel, channel)*64)/UNITY_GAIN;
spin_unlock_irq(&hdspm->lock);
- /*
- snd_printdd("get pb mixer index %d, channel %d, mapped_channel %d, "
- "value %d\n",
- ucontrol->id.index, channel, mapped_channel,
- ucontrol->value.integer.value[0]);
- */
return 0;
}
@@ -2645,7 +3564,6 @@ static int snd_hdspm_put_playback_mixer(struct snd_kcontrol *kcontrol,
struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
int change;
int channel;
- int mapped_channel;
int gain;
if (!snd_hdspm_use_is_exclusive(hdspm))
@@ -2656,59 +3574,60 @@ static int snd_hdspm_put_playback_mixer(struct snd_kcontrol *kcontrol,
if (snd_BUG_ON(channel < 0 || channel >= HDSPM_MAX_CHANNELS))
return -EINVAL;
- mapped_channel = hdspm->channel_map[channel];
- if (mapped_channel < 0)
- return -EINVAL;
-
- gain = ucontrol->value.integer.value[0];
+ gain = ucontrol->value.integer.value[0]*UNITY_GAIN/64;
spin_lock_irq(&hdspm->lock);
change =
- gain != hdspm_read_pb_gain(hdspm, mapped_channel,
- mapped_channel);
+ gain != hdspm_read_pb_gain(hdspm, channel,
+ channel);
if (change)
- hdspm_write_pb_gain(hdspm, mapped_channel, mapped_channel,
+ hdspm_write_pb_gain(hdspm, channel, channel,
gain);
spin_unlock_irq(&hdspm->lock);
return change;
}
-#define HDSPM_WC_SYNC_CHECK(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
- .name = xname, \
- .index = xindex, \
- .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
- .info = snd_hdspm_info_sync_check, \
- .get = snd_hdspm_get_wc_sync_check \
+#define HDSPM_SYNC_CHECK(xname, xindex) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+ .name = xname, \
+ .private_value = xindex, \
+ .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
+ .info = snd_hdspm_info_sync_check, \
+ .get = snd_hdspm_get_sync_check \
}
+
static int snd_hdspm_info_sync_check(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = { "No Lock", "Lock", "Sync" };
+ static char *texts[] = { "No Lock", "Lock", "Sync", "N/A" };
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
- uinfo->value.enumerated.items = 3;
+ uinfo->value.enumerated.items = 4;
if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
uinfo->value.enumerated.item =
- uinfo->value.enumerated.items - 1;
+ uinfo->value.enumerated.items - 1;
strcpy(uinfo->value.enumerated.name,
- texts[uinfo->value.enumerated.item]);
+ texts[uinfo->value.enumerated.item]);
return 0;
}
-static int hdspm_wc_sync_check(struct hdspm * hdspm)
+static int hdspm_wc_sync_check(struct hdspm *hdspm)
{
- if (hdspm->is_aes32) {
- int status = hdspm_read(hdspm, HDSPM_statusRegister);
- if (status & HDSPM_AES32_wcLock) {
- /* I don't know how to differenciate sync from lock.
- Doing as if sync for now */
+ int status, status2;
+
+ switch (hdspm->io_type) {
+ case AES32:
+ status = hdspm_read(hdspm, HDSPM_statusRegister);
+ if (status & HDSPM_wcSync)
return 2;
- }
+ else if (status & HDSPM_wcLock)
+ return 1;
return 0;
- } else {
- int status2 = hdspm_read(hdspm, HDSPM_statusRegister2);
+ break;
+
+ case MADI:
+ status2 = hdspm_read(hdspm, HDSPM_statusRegister2);
if (status2 & HDSPM_wcLock) {
if (status2 & HDSPM_wcSync)
return 2;
@@ -2716,29 +3635,30 @@ static int hdspm_wc_sync_check(struct hdspm * hdspm)
return 1;
}
return 0;
- }
-}
+ break;
-static int snd_hdspm_get_wc_sync_check(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+ case RayDAT:
+ case AIO:
+ status = hdspm_read(hdspm, HDSPM_statusRegister);
- ucontrol->value.enumerated.item[0] = hdspm_wc_sync_check(hdspm);
- return 0;
-}
+ if (status & 0x2000000)
+ return 2;
+ else if (status & 0x1000000)
+ return 1;
+ return 0;
+ break;
-#define HDSPM_MADI_SYNC_CHECK(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
- .name = xname, \
- .index = xindex, \
- .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
- .info = snd_hdspm_info_sync_check, \
- .get = snd_hdspm_get_madisync_sync_check \
+ case MADIface:
+ break;
+ }
+
+
+ return 3;
}
-static int hdspm_madisync_sync_check(struct hdspm * hdspm)
+
+static int hdspm_madi_sync_check(struct hdspm *hdspm)
{
int status = hdspm_read(hdspm, HDSPM_statusRegister);
if (status & HDSPM_madiLock) {
@@ -2750,89 +3670,726 @@ static int hdspm_madisync_sync_check(struct hdspm * hdspm)
return 0;
}
-static int snd_hdspm_get_madisync_sync_check(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *
- ucontrol)
+
+static int hdspm_s1_sync_check(struct hdspm *hdspm, int idx)
{
- struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+ int status, lock, sync;
+
+ status = hdspm_read(hdspm, HDSPM_RD_STATUS_1);
+
+ lock = (status & (0x1<<idx)) ? 1 : 0;
+ sync = (status & (0x100<<idx)) ? 1 : 0;
- ucontrol->value.enumerated.item[0] =
- hdspm_madisync_sync_check(hdspm);
+ if (lock && sync)
+ return 2;
+ else if (lock)
+ return 1;
return 0;
}
-#define HDSPM_AES_SYNC_CHECK(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
- .name = xname, \
- .index = xindex, \
- .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
- .info = snd_hdspm_info_sync_check, \
- .get = snd_hdspm_get_aes_sync_check \
+static int hdspm_sync_in_sync_check(struct hdspm *hdspm)
+{
+ int status, lock = 0, sync = 0;
+
+ switch (hdspm->io_type) {
+ case RayDAT:
+ case AIO:
+ status = hdspm_read(hdspm, HDSPM_RD_STATUS_3);
+ lock = (status & 0x400) ? 1 : 0;
+ sync = (status & 0x800) ? 1 : 0;
+ break;
+
+ case MADI:
+ case AES32:
+ status = hdspm_read(hdspm, HDSPM_statusRegister2);
+ lock = (status & HDSPM_syncInLock) ? 1 : 0;
+ sync = (status & HDSPM_syncInSync) ? 1 : 0;
+ break;
+
+ case MADIface:
+ break;
+ }
+
+ if (lock && sync)
+ return 2;
+ else if (lock)
+ return 1;
+
+ return 0;
}
-static int hdspm_aes_sync_check(struct hdspm * hdspm, int idx)
+static int hdspm_aes_sync_check(struct hdspm *hdspm, int idx)
{
- int status2 = hdspm_read(hdspm, HDSPM_statusRegister2);
- if (status2 & (HDSPM_LockAES >> idx)) {
- /* I don't know how to differenciate sync from lock.
- Doing as if sync for now */
+ int status2, lock, sync;
+ status2 = hdspm_read(hdspm, HDSPM_statusRegister2);
+
+ lock = (status2 & (0x0080 >> idx)) ? 1 : 0;
+ sync = (status2 & (0x8000 >> idx)) ? 1 : 0;
+
+ if (sync)
return 2;
+ else if (lock)
+ return 1;
+ return 0;
+}
+
+
+static int hdspm_tco_sync_check(struct hdspm *hdspm)
+{
+ int status;
+
+ if (hdspm->tco) {
+ switch (hdspm->io_type) {
+ case MADI:
+ case AES32:
+ status = hdspm_read(hdspm, HDSPM_statusRegister);
+ if (status & HDSPM_tcoLock) {
+ if (status & HDSPM_tcoSync)
+ return 2;
+ else
+ return 1;
+ }
+ return 0;
+
+ break;
+
+ case RayDAT:
+ case AIO:
+ status = hdspm_read(hdspm, HDSPM_RD_STATUS_1);
+
+ if (status & 0x8000000)
+ return 2; /* Sync */
+ if (status & 0x4000000)
+ return 1; /* Lock */
+ return 0; /* No signal */
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ return 3; /* N/A */
+}
+
+
+static int snd_hdspm_get_sync_check(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+ int val = -1;
+
+ switch (hdspm->io_type) {
+ case RayDAT:
+ switch (kcontrol->private_value) {
+ case 0: /* WC */
+ val = hdspm_wc_sync_check(hdspm); break;
+ case 7: /* TCO */
+ val = hdspm_tco_sync_check(hdspm); break;
+ case 8: /* SYNC IN */
+ val = hdspm_sync_in_sync_check(hdspm); break;
+ default:
+ val = hdspm_s1_sync_check(hdspm, ucontrol->id.index-1);
+ }
+
+ case AIO:
+ switch (kcontrol->private_value) {
+ case 0: /* WC */
+ val = hdspm_wc_sync_check(hdspm); break;
+ case 4: /* TCO */
+ val = hdspm_tco_sync_check(hdspm); break;
+ case 5: /* SYNC IN */
+ val = hdspm_sync_in_sync_check(hdspm); break;
+ default:
+ val = hdspm_s1_sync_check(hdspm, ucontrol->id.index-1);
+ }
+
+ case MADI:
+ switch (kcontrol->private_value) {
+ case 0: /* WC */
+ val = hdspm_wc_sync_check(hdspm); break;
+ case 1: /* MADI */
+ val = hdspm_madi_sync_check(hdspm); break;
+ case 2: /* TCO */
+ val = hdspm_tco_sync_check(hdspm); break;
+ case 3: /* SYNC_IN */
+ val = hdspm_sync_in_sync_check(hdspm); break;
+ }
+
+ case MADIface:
+ val = hdspm_madi_sync_check(hdspm); /* MADI */
+ break;
+
+ case AES32:
+ switch (kcontrol->private_value) {
+ case 0: /* WC */
+ val = hdspm_wc_sync_check(hdspm); break;
+ case 9: /* TCO */
+ val = hdspm_tco_sync_check(hdspm); break;
+ case 10 /* SYNC IN */:
+ val = hdspm_sync_in_sync_check(hdspm); break;
+ default: /* AES1 to AES8 */
+ val = hdspm_aes_sync_check(hdspm,
+ kcontrol->private_value-1);
+ }
+
}
+
+ if (-1 == val)
+ val = 3;
+
+ ucontrol->value.enumerated.item[0] = val;
return 0;
}
-static int snd_hdspm_get_aes_sync_check(struct snd_kcontrol *kcontrol,
+
+
+/**
+ * TCO controls
+ **/
+static void hdspm_tco_write(struct hdspm *hdspm)
+{
+ unsigned int tc[4] = { 0, 0, 0, 0};
+
+ switch (hdspm->tco->input) {
+ case 0:
+ tc[2] |= HDSPM_TCO2_set_input_MSB;
+ break;
+ case 1:
+ tc[2] |= HDSPM_TCO2_set_input_LSB;
+ break;
+ default:
+ break;
+ }
+
+ switch (hdspm->tco->framerate) {
+ case 1:
+ tc[1] |= HDSPM_TCO1_LTC_Format_LSB;
+ break;
+ case 2:
+ tc[1] |= HDSPM_TCO1_LTC_Format_MSB;
+ break;
+ case 3:
+ tc[1] |= HDSPM_TCO1_LTC_Format_MSB +
+ HDSPM_TCO1_set_drop_frame_flag;
+ break;
+ case 4:
+ tc[1] |= HDSPM_TCO1_LTC_Format_LSB +
+ HDSPM_TCO1_LTC_Format_MSB;
+ break;
+ case 5:
+ tc[1] |= HDSPM_TCO1_LTC_Format_LSB +
+ HDSPM_TCO1_LTC_Format_MSB +
+ HDSPM_TCO1_set_drop_frame_flag;
+ break;
+ default:
+ break;
+ }
+
+ switch (hdspm->tco->wordclock) {
+ case 1:
+ tc[2] |= HDSPM_TCO2_WCK_IO_ratio_LSB;
+ break;
+ case 2:
+ tc[2] |= HDSPM_TCO2_WCK_IO_ratio_MSB;
+ break;
+ default:
+ break;
+ }
+
+ switch (hdspm->tco->samplerate) {
+ case 1:
+ tc[2] |= HDSPM_TCO2_set_freq;
+ break;
+ case 2:
+ tc[2] |= HDSPM_TCO2_set_freq_from_app;
+ break;
+ default:
+ break;
+ }
+
+ switch (hdspm->tco->pull) {
+ case 1:
+ tc[2] |= HDSPM_TCO2_set_pull_up;
+ break;
+ case 2:
+ tc[2] |= HDSPM_TCO2_set_pull_down;
+ break;
+ case 3:
+ tc[2] |= HDSPM_TCO2_set_pull_up + HDSPM_TCO2_set_01_4;
+ break;
+ case 4:
+ tc[2] |= HDSPM_TCO2_set_pull_down + HDSPM_TCO2_set_01_4;
+ break;
+ default:
+ break;
+ }
+
+ if (1 == hdspm->tco->term) {
+ tc[2] |= HDSPM_TCO2_set_term_75R;
+ }
+
+ hdspm_write(hdspm, HDSPM_WR_TCO, tc[0]);
+ hdspm_write(hdspm, HDSPM_WR_TCO+4, tc[1]);
+ hdspm_write(hdspm, HDSPM_WR_TCO+8, tc[2]);
+ hdspm_write(hdspm, HDSPM_WR_TCO+12, tc[3]);
+}
+
+
+#define HDSPM_TCO_SAMPLE_RATE(xname, xindex) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+ .name = xname, \
+ .index = xindex, \
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |\
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
+ .info = snd_hdspm_info_tco_sample_rate, \
+ .get = snd_hdspm_get_tco_sample_rate, \
+ .put = snd_hdspm_put_tco_sample_rate \
+}
+
+static int snd_hdspm_info_tco_sample_rate(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ static char *texts[] = { "44.1 kHz", "48 kHz" };
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->count = 1;
+ uinfo->value.enumerated.items = 2;
+
+ if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
+ uinfo->value.enumerated.item =
+ uinfo->value.enumerated.items - 1;
+
+ strcpy(uinfo->value.enumerated.name,
+ texts[uinfo->value.enumerated.item]);
+
+ return 0;
+}
+
+static int snd_hdspm_get_tco_sample_rate(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+
+ ucontrol->value.enumerated.item[0] = hdspm->tco->samplerate;
+
+ return 0;
+}
+
+static int snd_hdspm_put_tco_sample_rate(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+
+ if (hdspm->tco->samplerate != ucontrol->value.enumerated.item[0]) {
+ hdspm->tco->samplerate = ucontrol->value.enumerated.item[0];
+
+ hdspm_tco_write(hdspm);
+
+ return 1;
+ }
+
+ return 0;
+}
+
+
+#define HDSPM_TCO_PULL(xname, xindex) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+ .name = xname, \
+ .index = xindex, \
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |\
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
+ .info = snd_hdspm_info_tco_pull, \
+ .get = snd_hdspm_get_tco_pull, \
+ .put = snd_hdspm_put_tco_pull \
+}
+
+static int snd_hdspm_info_tco_pull(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ static char *texts[] = { "0", "+ 0.1 %", "- 0.1 %", "+ 4 %", "- 4 %" };
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->count = 1;
+ uinfo->value.enumerated.items = 5;
+
+ if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
+ uinfo->value.enumerated.item =
+ uinfo->value.enumerated.items - 1;
+
+ strcpy(uinfo->value.enumerated.name,
+ texts[uinfo->value.enumerated.item]);
+
+ return 0;
+}
+
+static int snd_hdspm_get_tco_pull(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+
+ ucontrol->value.enumerated.item[0] = hdspm->tco->pull;
+
+ return 0;
+}
+
+static int snd_hdspm_put_tco_pull(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+
+ if (hdspm->tco->pull != ucontrol->value.enumerated.item[0]) {
+ hdspm->tco->pull = ucontrol->value.enumerated.item[0];
+
+ hdspm_tco_write(hdspm);
+
+ return 1;
+ }
+
+ return 0;
+}
+
+#define HDSPM_TCO_WCK_CONVERSION(xname, xindex) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+ .name = xname, \
+ .index = xindex, \
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |\
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
+ .info = snd_hdspm_info_tco_wck_conversion, \
+ .get = snd_hdspm_get_tco_wck_conversion, \
+ .put = snd_hdspm_put_tco_wck_conversion \
+}
+
+static int snd_hdspm_info_tco_wck_conversion(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ static char *texts[] = { "1:1", "44.1 -> 48", "48 -> 44.1" };
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->count = 1;
+ uinfo->value.enumerated.items = 3;
+
+ if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
+ uinfo->value.enumerated.item =
+ uinfo->value.enumerated.items - 1;
+
+ strcpy(uinfo->value.enumerated.name,
+ texts[uinfo->value.enumerated.item]);
+
+ return 0;
+}
+
+static int snd_hdspm_get_tco_wck_conversion(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+
+ ucontrol->value.enumerated.item[0] = hdspm->tco->wordclock;
+
+ return 0;
+}
+
+static int snd_hdspm_put_tco_wck_conversion(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+
+ if (hdspm->tco->wordclock != ucontrol->value.enumerated.item[0]) {
+ hdspm->tco->wordclock = ucontrol->value.enumerated.item[0];
+
+ hdspm_tco_write(hdspm);
+
+ return 1;
+ }
+
+ return 0;
+}
+
+
+#define HDSPM_TCO_FRAME_RATE(xname, xindex) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+ .name = xname, \
+ .index = xindex, \
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |\
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
+ .info = snd_hdspm_info_tco_frame_rate, \
+ .get = snd_hdspm_get_tco_frame_rate, \
+ .put = snd_hdspm_put_tco_frame_rate \
+}
+
+static int snd_hdspm_info_tco_frame_rate(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ static char *texts[] = { "24 fps", "25 fps", "29.97fps",
+ "29.97 dfps", "30 fps", "30 dfps" };
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->count = 1;
+ uinfo->value.enumerated.items = 6;
+
+ if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
+ uinfo->value.enumerated.item =
+ uinfo->value.enumerated.items - 1;
+
+ strcpy(uinfo->value.enumerated.name,
+ texts[uinfo->value.enumerated.item]);
+
+ return 0;
+}
+
+static int snd_hdspm_get_tco_frame_rate(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- int offset;
struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
- offset = ucontrol->id.index - 1;
- if (offset < 0 || offset >= 8)
- return -EINVAL;
+ ucontrol->value.enumerated.item[0] = hdspm->tco->framerate;
- ucontrol->value.enumerated.item[0] =
- hdspm_aes_sync_check(hdspm, offset);
return 0;
}
+static int snd_hdspm_put_tco_frame_rate(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+
+ if (hdspm->tco->framerate != ucontrol->value.enumerated.item[0]) {
+ hdspm->tco->framerate = ucontrol->value.enumerated.item[0];
-static struct snd_kcontrol_new snd_hdspm_controls_madi[] = {
+ hdspm_tco_write(hdspm);
+
+ return 1;
+ }
+
+ return 0;
+}
- HDSPM_MIXER("Mixer", 0),
-/* 'Sample Clock Source' complies with the alsa control naming scheme */
- HDSPM_CLOCK_SOURCE("Sample Clock Source", 0),
+#define HDSPM_TCO_SYNC_SOURCE(xname, xindex) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+ .name = xname, \
+ .index = xindex, \
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |\
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
+ .info = snd_hdspm_info_tco_sync_source, \
+ .get = snd_hdspm_get_tco_sync_source, \
+ .put = snd_hdspm_put_tco_sync_source \
+}
+
+static int snd_hdspm_info_tco_sync_source(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ static char *texts[] = { "LTC", "Video", "WCK" };
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->count = 1;
+ uinfo->value.enumerated.items = 3;
+
+ if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
+ uinfo->value.enumerated.item =
+ uinfo->value.enumerated.items - 1;
+
+ strcpy(uinfo->value.enumerated.name,
+ texts[uinfo->value.enumerated.item]);
+
+ return 0;
+}
+
+static int snd_hdspm_get_tco_sync_source(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+
+ ucontrol->value.enumerated.item[0] = hdspm->tco->input;
+
+ return 0;
+}
+
+static int snd_hdspm_put_tco_sync_source(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+
+ if (hdspm->tco->input != ucontrol->value.enumerated.item[0]) {
+ hdspm->tco->input = ucontrol->value.enumerated.item[0];
+
+ hdspm_tco_write(hdspm);
+
+ return 1;
+ }
+
+ return 0;
+}
+
+
+#define HDSPM_TCO_WORD_TERM(xname, xindex) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+ .name = xname, \
+ .index = xindex, \
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |\
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
+ .info = snd_hdspm_info_tco_word_term, \
+ .get = snd_hdspm_get_tco_word_term, \
+ .put = snd_hdspm_put_tco_word_term \
+}
+
+static int snd_hdspm_info_tco_word_term(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
+ uinfo->count = 1;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = 1;
+
+ return 0;
+}
+
+
+static int snd_hdspm_get_tco_word_term(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+
+ ucontrol->value.enumerated.item[0] = hdspm->tco->term;
+
+ return 0;
+}
+
+
+static int snd_hdspm_put_tco_word_term(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+
+ if (hdspm->tco->term != ucontrol->value.enumerated.item[0]) {
+ hdspm->tco->term = ucontrol->value.enumerated.item[0];
+
+ hdspm_tco_write(hdspm);
+
+ return 1;
+ }
+
+ return 0;
+}
+
+
+
+
+static struct snd_kcontrol_new snd_hdspm_controls_madi[] = {
+ HDSPM_MIXER("Mixer", 0),
+ HDSPM_INTERNAL_CLOCK("Internal Clock", 0),
HDSPM_SYSTEM_CLOCK_MODE("System Clock Mode", 0),
HDSPM_PREF_SYNC_REF("Preferred Sync Reference", 0),
HDSPM_AUTOSYNC_REF("AutoSync Reference", 0),
HDSPM_SYSTEM_SAMPLE_RATE("System Sample Rate", 0),
-/* 'External Rate' complies with the alsa control naming scheme */
- HDSPM_AUTOSYNC_SAMPLE_RATE("External Rate", 0),
- HDSPM_WC_SYNC_CHECK("Word Clock Lock Status", 0),
- HDSPM_MADI_SYNC_CHECK("MADI Sync Lock Status", 0),
+ HDSPM_SYNC_CHECK("WC SyncCheck", 0),
+ HDSPM_SYNC_CHECK("MADI SyncCheck", 1),
+ HDSPM_SYNC_CHECK("TCO SyncCHeck", 2),
+ HDSPM_SYNC_CHECK("SYNC IN SyncCheck", 3),
HDSPM_LINE_OUT("Line Out", 0),
HDSPM_TX_64("TX 64 channels mode", 0),
HDSPM_C_TMS("Clear Track Marker", 0),
HDSPM_SAFE_MODE("Safe Mode", 0),
- HDSPM_INPUT_SELECT("Input Select", 0),
+ HDSPM_INPUT_SELECT("Input Select", 0)
};
-static struct snd_kcontrol_new snd_hdspm_controls_aes32[] = {
+static struct snd_kcontrol_new snd_hdspm_controls_madiface[] = {
HDSPM_MIXER("Mixer", 0),
-/* 'Sample Clock Source' complies with the alsa control naming scheme */
- HDSPM_CLOCK_SOURCE("Sample Clock Source", 0),
+ HDSPM_INTERNAL_CLOCK("Internal Clock", 0),
+ HDSPM_SYSTEM_CLOCK_MODE("System Clock Mode", 0),
+ HDSPM_SYSTEM_SAMPLE_RATE("System Sample Rate", 0),
+ HDSPM_AUTOSYNC_SAMPLE_RATE("External Rate", 0),
+ HDSPM_SYNC_CHECK("MADI SyncCheck", 0),
+ HDSPM_TX_64("TX 64 channels mode", 0),
+ HDSPM_C_TMS("Clear Track Marker", 0),
+ HDSPM_SAFE_MODE("Safe Mode", 0)
+};
+static struct snd_kcontrol_new snd_hdspm_controls_aio[] = {
+ HDSPM_MIXER("Mixer", 0),
+ HDSPM_INTERNAL_CLOCK("Internal Clock", 0),
HDSPM_SYSTEM_CLOCK_MODE("System Clock Mode", 0),
HDSPM_PREF_SYNC_REF("Preferred Sync Reference", 0),
HDSPM_AUTOSYNC_REF("AutoSync Reference", 0),
HDSPM_SYSTEM_SAMPLE_RATE("System Sample Rate", 0),
-/* 'External Rate' complies with the alsa control naming scheme */
HDSPM_AUTOSYNC_SAMPLE_RATE("External Rate", 0),
- HDSPM_WC_SYNC_CHECK("Word Clock Lock Status", 0),
-/* HDSPM_AES_SYNC_CHECK("AES Lock Status", 0),*/ /* created in snd_hdspm_create_controls() */
+ HDSPM_SYNC_CHECK("WC SyncCheck", 0),
+ HDSPM_SYNC_CHECK("AES SyncCheck", 1),
+ HDSPM_SYNC_CHECK("SPDIF SyncCheck", 2),
+ HDSPM_SYNC_CHECK("ADAT SyncCheck", 3),
+ HDSPM_SYNC_CHECK("TCO SyncCheck", 4),
+ HDSPM_SYNC_CHECK("SYNC IN SyncCheck", 5),
+ HDSPM_AUTOSYNC_SAMPLE_RATE("WC Frequency", 0),
+ HDSPM_AUTOSYNC_SAMPLE_RATE("AES Frequency", 1),
+ HDSPM_AUTOSYNC_SAMPLE_RATE("SPDIF Frequency", 2),
+ HDSPM_AUTOSYNC_SAMPLE_RATE("ADAT Frequency", 3),
+ HDSPM_AUTOSYNC_SAMPLE_RATE("TCO Frequency", 4),
+ HDSPM_AUTOSYNC_SAMPLE_RATE("SYNC IN Frequency", 5)
+
+ /*
+ HDSPM_INPUT_SELECT("Input Select", 0),
+ HDSPM_SPDIF_OPTICAL("SPDIF Out Optical", 0),
+ HDSPM_PROFESSIONAL("SPDIF Out Professional", 0);
+ HDSPM_SPDIF_IN("SPDIF In", 0);
+ HDSPM_BREAKOUT_CABLE("Breakout Cable", 0);
+ HDSPM_INPUT_LEVEL("Input Level", 0);
+ HDSPM_OUTPUT_LEVEL("Output Level", 0);
+ HDSPM_PHONES("Phones", 0);
+ */
+};
+
+static struct snd_kcontrol_new snd_hdspm_controls_raydat[] = {
+ HDSPM_MIXER("Mixer", 0),
+ HDSPM_INTERNAL_CLOCK("Internal Clock", 0),
+ HDSPM_SYSTEM_CLOCK_MODE("Clock Mode", 0),
+ HDSPM_PREF_SYNC_REF("Pref Sync Ref", 0),
+ HDSPM_SYSTEM_SAMPLE_RATE("System Sample Rate", 0),
+ HDSPM_SYNC_CHECK("WC SyncCheck", 0),
+ HDSPM_SYNC_CHECK("AES SyncCheck", 1),
+ HDSPM_SYNC_CHECK("SPDIF SyncCheck", 2),
+ HDSPM_SYNC_CHECK("ADAT1 SyncCheck", 3),
+ HDSPM_SYNC_CHECK("ADAT2 SyncCheck", 4),
+ HDSPM_SYNC_CHECK("ADAT3 SyncCheck", 5),
+ HDSPM_SYNC_CHECK("ADAT4 SyncCheck", 6),
+ HDSPM_SYNC_CHECK("TCO SyncCheck", 7),
+ HDSPM_SYNC_CHECK("SYNC IN SyncCheck", 8),
+ HDSPM_AUTOSYNC_SAMPLE_RATE("WC Frequency", 0),
+ HDSPM_AUTOSYNC_SAMPLE_RATE("AES Frequency", 1),
+ HDSPM_AUTOSYNC_SAMPLE_RATE("SPDIF Frequency", 2),
+ HDSPM_AUTOSYNC_SAMPLE_RATE("ADAT1 Frequency", 3),
+ HDSPM_AUTOSYNC_SAMPLE_RATE("ADAT2 Frequency", 4),
+ HDSPM_AUTOSYNC_SAMPLE_RATE("ADAT3 Frequency", 5),
+ HDSPM_AUTOSYNC_SAMPLE_RATE("ADAT4 Frequency", 6),
+ HDSPM_AUTOSYNC_SAMPLE_RATE("TCO Frequency", 7),
+ HDSPM_AUTOSYNC_SAMPLE_RATE("SYNC IN Frequency", 8)
+};
+
+static struct snd_kcontrol_new snd_hdspm_controls_aes32[] = {
+ HDSPM_MIXER("Mixer", 0),
+ HDSPM_INTERNAL_CLOCK("Internal Clock", 0),
+ HDSPM_SYSTEM_CLOCK_MODE("System Clock Mode", 0),
+ HDSPM_PREF_SYNC_REF("Preferred Sync Reference", 0),
+ HDSPM_AUTOSYNC_REF("AutoSync Reference", 0),
+ HDSPM_SYSTEM_SAMPLE_RATE("System Sample Rate", 0),
+ HDSPM_AUTOSYNC_SAMPLE_RATE("External Rate", 0),
+ HDSPM_SYNC_CHECK("WC Sync Check", 0),
+ HDSPM_SYNC_CHECK("AES1 Sync Check", 1),
+ HDSPM_SYNC_CHECK("AES2 Sync Check", 2),
+ HDSPM_SYNC_CHECK("AES3 Sync Check", 3),
+ HDSPM_SYNC_CHECK("AES4 Sync Check", 4),
+ HDSPM_SYNC_CHECK("AES5 Sync Check", 5),
+ HDSPM_SYNC_CHECK("AES6 Sync Check", 6),
+ HDSPM_SYNC_CHECK("AES7 Sync Check", 7),
+ HDSPM_SYNC_CHECK("AES8 Sync Check", 8),
+ HDSPM_SYNC_CHECK("TCO Sync Check", 9),
+ HDSPM_SYNC_CHECK("SYNC IN Sync Check", 10),
+ HDSPM_AUTOSYNC_SAMPLE_RATE("WC Frequency", 0),
+ HDSPM_AUTOSYNC_SAMPLE_RATE("AES1 Frequency", 1),
+ HDSPM_AUTOSYNC_SAMPLE_RATE("AES2 Frequency", 2),
+ HDSPM_AUTOSYNC_SAMPLE_RATE("AES3 Frequency", 3),
+ HDSPM_AUTOSYNC_SAMPLE_RATE("AES4 Frequency", 4),
+ HDSPM_AUTOSYNC_SAMPLE_RATE("AES5 Frequency", 5),
+ HDSPM_AUTOSYNC_SAMPLE_RATE("AES6 Frequency", 6),
+ HDSPM_AUTOSYNC_SAMPLE_RATE("AES7 Frequency", 7),
+ HDSPM_AUTOSYNC_SAMPLE_RATE("AES8 Frequency", 8),
+ HDSPM_AUTOSYNC_SAMPLE_RATE("TCO Frequency", 9),
+ HDSPM_AUTOSYNC_SAMPLE_RATE("SYNC IN Frequency", 10),
HDSPM_LINE_OUT("Line Out", 0),
HDSPM_EMPHASIS("Emphasis", 0),
HDSPM_DOLBY("Non Audio", 0),
@@ -2842,6 +4399,19 @@ static struct snd_kcontrol_new snd_hdspm_controls_aes32[] = {
HDSPM_QS_WIRE("Quad Speed Wire Mode", 0),
};
+
+
+/* Control elements for the optional TCO module */
+static struct snd_kcontrol_new snd_hdspm_controls_tco[] = {
+ HDSPM_TCO_SAMPLE_RATE("TCO Sample Rate", 0),
+ HDSPM_TCO_PULL("TCO Pull", 0),
+ HDSPM_TCO_WCK_CONVERSION("TCO WCK Conversion", 0),
+ HDSPM_TCO_FRAME_RATE("TCO Frame Rate", 0),
+ HDSPM_TCO_SYNC_SOURCE("TCO Sync Source", 0),
+ HDSPM_TCO_WORD_TERM("TCO Word Term", 0)
+};
+
+
static struct snd_kcontrol_new snd_hdspm_playback_mixer = HDSPM_PLAYBACK_MIXER;
@@ -2849,78 +4419,76 @@ static int hdspm_update_simple_mixer_controls(struct hdspm * hdspm)
{
int i;
- for (i = hdspm->ds_channels; i < hdspm->ss_channels; ++i) {
+ for (i = hdspm->ds_out_channels; i < hdspm->ss_out_channels; ++i) {
if (hdspm->system_sample_rate > 48000) {
hdspm->playback_mixer_ctls[i]->vd[0].access =
- SNDRV_CTL_ELEM_ACCESS_INACTIVE |
- SNDRV_CTL_ELEM_ACCESS_READ |
- SNDRV_CTL_ELEM_ACCESS_VOLATILE;
+ SNDRV_CTL_ELEM_ACCESS_INACTIVE |
+ SNDRV_CTL_ELEM_ACCESS_READ |
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE;
} else {
hdspm->playback_mixer_ctls[i]->vd[0].access =
- SNDRV_CTL_ELEM_ACCESS_READWRITE |
- SNDRV_CTL_ELEM_ACCESS_VOLATILE;
+ SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE;
}
snd_ctl_notify(hdspm->card, SNDRV_CTL_EVENT_MASK_VALUE |
- SNDRV_CTL_EVENT_MASK_INFO,
- &hdspm->playback_mixer_ctls[i]->id);
+ SNDRV_CTL_EVENT_MASK_INFO,
+ &hdspm->playback_mixer_ctls[i]->id);
}
return 0;
}
-static int snd_hdspm_create_controls(struct snd_card *card, struct hdspm * hdspm)
+static int snd_hdspm_create_controls(struct snd_card *card,
+ struct hdspm *hdspm)
{
unsigned int idx, limit;
int err;
struct snd_kcontrol *kctl;
+ struct snd_kcontrol_new *list = NULL;
- /* add control list first */
- if (hdspm->is_aes32) {
- struct snd_kcontrol_new aes_sync_ctl =
- HDSPM_AES_SYNC_CHECK("AES Lock Status", 0);
+ switch (hdspm->io_type) {
+ case MADI:
+ list = snd_hdspm_controls_madi;
+ limit = ARRAY_SIZE(snd_hdspm_controls_madi);
+ break;
+ case MADIface:
+ list = snd_hdspm_controls_madiface;
+ limit = ARRAY_SIZE(snd_hdspm_controls_madiface);
+ break;
+ case AIO:
+ list = snd_hdspm_controls_aio;
+ limit = ARRAY_SIZE(snd_hdspm_controls_aio);
+ break;
+ case RayDAT:
+ list = snd_hdspm_controls_raydat;
+ limit = ARRAY_SIZE(snd_hdspm_controls_raydat);
+ break;
+ case AES32:
+ list = snd_hdspm_controls_aes32;
+ limit = ARRAY_SIZE(snd_hdspm_controls_aes32);
+ break;
+ }
- for (idx = 0; idx < ARRAY_SIZE(snd_hdspm_controls_aes32);
- idx++) {
- err = snd_ctl_add(card,
- snd_ctl_new1(&snd_hdspm_controls_aes32[idx],
- hdspm));
- if (err < 0)
- return err;
- }
- for (idx = 1; idx <= 8; idx++) {
- aes_sync_ctl.index = idx;
- err = snd_ctl_add(card,
- snd_ctl_new1(&aes_sync_ctl, hdspm));
- if (err < 0)
- return err;
- }
- } else {
- for (idx = 0; idx < ARRAY_SIZE(snd_hdspm_controls_madi);
- idx++) {
+ if (NULL != list) {
+ for (idx = 0; idx < limit; idx++) {
err = snd_ctl_add(card,
- snd_ctl_new1(&snd_hdspm_controls_madi[idx],
- hdspm));
+ snd_ctl_new1(&list[idx], hdspm));
if (err < 0)
return err;
}
}
- /* Channel playback mixer as default control
- Note: the whole matrix would be 128*HDSPM_MIXER_CHANNELS Faders,
- thats too * big for any alsamixer they are accessible via special
- IOCTL on hwdep and the mixer 2dimensional mixer control
- */
+ /* create simple 1:1 playback mixer controls */
snd_hdspm_playback_mixer.name = "Chn";
- limit = HDSPM_MAX_CHANNELS;
-
- /* The index values are one greater than the channel ID so that
- * alsamixer will display them correctly. We want to use the index
- * for fast lookup of the relevant channel, but if we use it at all,
- * most ALSA software does the wrong thing with it ...
- */
-
+ if (hdspm->system_sample_rate >= 128000) {
+ limit = hdspm->qs_out_channels;
+ } else if (hdspm->system_sample_rate >= 64000) {
+ limit = hdspm->ds_out_channels;
+ } else {
+ limit = hdspm->ss_out_channels;
+ }
for (idx = 0; idx < limit; ++idx) {
snd_hdspm_playback_mixer.index = idx + 1;
kctl = snd_ctl_new1(&snd_hdspm_playback_mixer, hdspm);
@@ -2930,11 +4498,24 @@ static int snd_hdspm_create_controls(struct snd_card *card, struct hdspm * hdspm
hdspm->playback_mixer_ctls[idx] = kctl;
}
+
+ if (hdspm->tco) {
+ /* add tco control elements */
+ list = snd_hdspm_controls_tco;
+ limit = ARRAY_SIZE(snd_hdspm_controls_tco);
+ for (idx = 0; idx < limit; idx++) {
+ err = snd_ctl_add(card,
+ snd_ctl_new1(&list[idx], hdspm));
+ if (err < 0)
+ return err;
+ }
+ }
+
return 0;
}
/*------------------------------------------------------------
- /proc interface
+ /proc interface
------------------------------------------------------------*/
static void
@@ -2942,72 +4523,178 @@ snd_hdspm_proc_read_madi(struct snd_info_entry * entry,
struct snd_info_buffer *buffer)
{
struct hdspm *hdspm = entry->private_data;
- unsigned int status;
- unsigned int status2;
+ unsigned int status, status2, control, freq;
+
char *pref_sync_ref;
char *autosync_ref;
char *system_clock_mode;
- char *clock_source;
char *insel;
- char *syncref;
int x, x2;
+ /* TCO stuff */
+ int a, ltc, frames, seconds, minutes, hours;
+ unsigned int period;
+ u64 freq_const = 0;
+ u32 rate;
+
status = hdspm_read(hdspm, HDSPM_statusRegister);
status2 = hdspm_read(hdspm, HDSPM_statusRegister2);
+ control = hdspm->control_register;
+ freq = hdspm_read(hdspm, HDSPM_timecodeRegister);
snd_iprintf(buffer, "%s (Card #%d) Rev.%x Status2first3bits: %x\n",
- hdspm->card_name, hdspm->card->number + 1,
- hdspm->firmware_rev,
- (status2 & HDSPM_version0) |
- (status2 & HDSPM_version1) | (status2 &
- HDSPM_version2));
+ hdspm->card_name, hdspm->card->number + 1,
+ hdspm->firmware_rev,
+ (status2 & HDSPM_version0) |
+ (status2 & HDSPM_version1) | (status2 &
+ HDSPM_version2));
+
+ snd_iprintf(buffer, "HW Serial: 0x%06x%06x\n",
+ (hdspm_read(hdspm, HDSPM_midiStatusIn1)>>8) & 0xFFFFFF,
+ (hdspm_read(hdspm, HDSPM_midiStatusIn0)>>8) & 0xFFFFFF);
snd_iprintf(buffer, "IRQ: %d Registers bus: 0x%lx VM: 0x%lx\n",
- hdspm->irq, hdspm->port, (unsigned long)hdspm->iobase);
+ hdspm->irq, hdspm->port, (unsigned long)hdspm->iobase);
snd_iprintf(buffer, "--- System ---\n");
snd_iprintf(buffer,
- "IRQ Pending: Audio=%d, MIDI0=%d, MIDI1=%d, IRQcount=%d\n",
- status & HDSPM_audioIRQPending,
- (status & HDSPM_midi0IRQPending) ? 1 : 0,
- (status & HDSPM_midi1IRQPending) ? 1 : 0,
- hdspm->irq_count);
+ "IRQ Pending: Audio=%d, MIDI0=%d, MIDI1=%d, IRQcount=%d\n",
+ status & HDSPM_audioIRQPending,
+ (status & HDSPM_midi0IRQPending) ? 1 : 0,
+ (status & HDSPM_midi1IRQPending) ? 1 : 0,
+ hdspm->irq_count);
snd_iprintf(buffer,
- "HW pointer: id = %d, rawptr = %d (%d->%d) "
- "estimated= %ld (bytes)\n",
- ((status & HDSPM_BufferID) ? 1 : 0),
- (status & HDSPM_BufferPositionMask),
- (status & HDSPM_BufferPositionMask) %
- (2 * (int)hdspm->period_bytes),
- ((status & HDSPM_BufferPositionMask) - 64) %
- (2 * (int)hdspm->period_bytes),
- (long) hdspm_hw_pointer(hdspm) * 4);
+ "HW pointer: id = %d, rawptr = %d (%d->%d) "
+ "estimated= %ld (bytes)\n",
+ ((status & HDSPM_BufferID) ? 1 : 0),
+ (status & HDSPM_BufferPositionMask),
+ (status & HDSPM_BufferPositionMask) %
+ (2 * (int)hdspm->period_bytes),
+ ((status & HDSPM_BufferPositionMask) - 64) %
+ (2 * (int)hdspm->period_bytes),
+ (long) hdspm_hw_pointer(hdspm) * 4);
snd_iprintf(buffer,
- "MIDI FIFO: Out1=0x%x, Out2=0x%x, In1=0x%x, In2=0x%x \n",
- hdspm_read(hdspm, HDSPM_midiStatusOut0) & 0xFF,
- hdspm_read(hdspm, HDSPM_midiStatusOut1) & 0xFF,
- hdspm_read(hdspm, HDSPM_midiStatusIn0) & 0xFF,
- hdspm_read(hdspm, HDSPM_midiStatusIn1) & 0xFF);
+ "MIDI FIFO: Out1=0x%x, Out2=0x%x, In1=0x%x, In2=0x%x \n",
+ hdspm_read(hdspm, HDSPM_midiStatusOut0) & 0xFF,
+ hdspm_read(hdspm, HDSPM_midiStatusOut1) & 0xFF,
+ hdspm_read(hdspm, HDSPM_midiStatusIn0) & 0xFF,
+ hdspm_read(hdspm, HDSPM_midiStatusIn1) & 0xFF);
snd_iprintf(buffer,
- "Register: ctrl1=0x%x, ctrl2=0x%x, status1=0x%x, "
- "status2=0x%x\n",
- hdspm->control_register, hdspm->control2_register,
- status, status2);
+ "MIDIoverMADI FIFO: In=0x%x, Out=0x%x \n",
+ hdspm_read(hdspm, HDSPM_midiStatusIn2) & 0xFF,
+ hdspm_read(hdspm, HDSPM_midiStatusOut2) & 0xFF);
+ snd_iprintf(buffer,
+ "Register: ctrl1=0x%x, ctrl2=0x%x, status1=0x%x, "
+ "status2=0x%x\n",
+ hdspm->control_register, hdspm->control2_register,
+ status, status2);
+ if (status & HDSPM_tco_detect) {
+ snd_iprintf(buffer, "TCO module detected.\n");
+ a = hdspm_read(hdspm, HDSPM_RD_TCO+4);
+ if (a & HDSPM_TCO1_LTC_Input_valid) {
+ snd_iprintf(buffer, " LTC valid, ");
+ switch (a & (HDSPM_TCO1_LTC_Format_LSB |
+ HDSPM_TCO1_LTC_Format_MSB)) {
+ case 0:
+ snd_iprintf(buffer, "24 fps, ");
+ break;
+ case HDSPM_TCO1_LTC_Format_LSB:
+ snd_iprintf(buffer, "25 fps, ");
+ break;
+ case HDSPM_TCO1_LTC_Format_MSB:
+ snd_iprintf(buffer, "29.97 fps, ");
+ break;
+ default:
+ snd_iprintf(buffer, "30 fps, ");
+ break;
+ }
+ if (a & HDSPM_TCO1_set_drop_frame_flag) {
+ snd_iprintf(buffer, "drop frame\n");
+ } else {
+ snd_iprintf(buffer, "full frame\n");
+ }
+ } else {
+ snd_iprintf(buffer, " no LTC\n");
+ }
+ if (a & HDSPM_TCO1_Video_Input_Format_NTSC) {
+ snd_iprintf(buffer, " Video: NTSC\n");
+ } else if (a & HDSPM_TCO1_Video_Input_Format_PAL) {
+ snd_iprintf(buffer, " Video: PAL\n");
+ } else {
+ snd_iprintf(buffer, " No video\n");
+ }
+ if (a & HDSPM_TCO1_TCO_lock) {
+ snd_iprintf(buffer, " Sync: lock\n");
+ } else {
+ snd_iprintf(buffer, " Sync: no lock\n");
+ }
+
+ switch (hdspm->io_type) {
+ case MADI:
+ case AES32:
+ freq_const = 110069313433624ULL;
+ break;
+ case RayDAT:
+ case AIO:
+ freq_const = 104857600000000ULL;
+ break;
+ case MADIface:
+ break; /* no TCO possible */
+ }
+
+ period = hdspm_read(hdspm, HDSPM_RD_PLL_FREQ);
+ snd_iprintf(buffer, " period: %u\n", period);
+
+
+ /* rate = freq_const/period; */
+ rate = div_u64(freq_const, period);
+
+ if (control & HDSPM_QuadSpeed) {
+ rate *= 4;
+ } else if (control & HDSPM_DoubleSpeed) {
+ rate *= 2;
+ }
+
+ snd_iprintf(buffer, " Frequency: %u Hz\n",
+ (unsigned int) rate);
+
+ ltc = hdspm_read(hdspm, HDSPM_RD_TCO);
+ frames = ltc & 0xF;
+ ltc >>= 4;
+ frames += (ltc & 0x3) * 10;
+ ltc >>= 4;
+ seconds = ltc & 0xF;
+ ltc >>= 4;
+ seconds += (ltc & 0x7) * 10;
+ ltc >>= 4;
+ minutes = ltc & 0xF;
+ ltc >>= 4;
+ minutes += (ltc & 0x7) * 10;
+ ltc >>= 4;
+ hours = ltc & 0xF;
+ ltc >>= 4;
+ hours += (ltc & 0x3) * 10;
+ snd_iprintf(buffer,
+ " LTC In: %02d:%02d:%02d:%02d\n",
+ hours, minutes, seconds, frames);
+
+ } else {
+ snd_iprintf(buffer, "No TCO module detected.\n");
+ }
snd_iprintf(buffer, "--- Settings ---\n");
x = 1 << (6 + hdspm_decode_latency(hdspm->control_register &
- HDSPM_LatencyMask));
+ HDSPM_LatencyMask));
snd_iprintf(buffer,
- "Size (Latency): %d samples (2 periods of %lu bytes)\n",
- x, (unsigned long) hdspm->period_bytes);
+ "Size (Latency): %d samples (2 periods of %lu bytes)\n",
+ x, (unsigned long) hdspm->period_bytes);
- snd_iprintf(buffer, "Line out: %s, Precise Pointer: %s\n",
- (hdspm->control_register & HDSPM_LineOut) ? "on " : "off",
- (hdspm->precise_ptr) ? "on" : "off");
+ snd_iprintf(buffer, "Line out: %s\n",
+ (hdspm->control_register & HDSPM_LineOut) ? "on " : "off");
switch (hdspm->control_register & HDSPM_InputMask) {
case HDSPM_InputOptical:
@@ -3017,63 +4704,22 @@ snd_hdspm_proc_read_madi(struct snd_info_entry * entry,
insel = "Coaxial";
break;
default:
- insel = "Unknown";
- }
-
- switch (hdspm->control_register & HDSPM_SyncRefMask) {
- case HDSPM_SyncRef_Word:
- syncref = "WordClock";
- break;
- case HDSPM_SyncRef_MADI:
- syncref = "MADI";
- break;
- default:
- syncref = "Unknown";
+ insel = "Unkown";
}
- snd_iprintf(buffer, "Inputsel = %s, SyncRef = %s\n", insel,
- syncref);
snd_iprintf(buffer,
- "ClearTrackMarker = %s, Transmit in %s Channel Mode, "
- "Auto Input %s\n",
- (hdspm->
- control_register & HDSPM_clr_tms) ? "on" : "off",
- (hdspm->
- control_register & HDSPM_TX_64ch) ? "64" : "56",
- (hdspm->
- control_register & HDSPM_AutoInp) ? "on" : "off");
+ "ClearTrackMarker = %s, Transmit in %s Channel Mode, "
+ "Auto Input %s\n",
+ (hdspm->control_register & HDSPM_clr_tms) ? "on" : "off",
+ (hdspm->control_register & HDSPM_TX_64ch) ? "64" : "56",
+ (hdspm->control_register & HDSPM_AutoInp) ? "on" : "off");
+
- switch (hdspm_clock_source(hdspm)) {
- case HDSPM_CLOCK_SOURCE_AUTOSYNC:
- clock_source = "AutoSync";
- break;
- case HDSPM_CLOCK_SOURCE_INTERNAL_32KHZ:
- clock_source = "Internal 32 kHz";
- break;
- case HDSPM_CLOCK_SOURCE_INTERNAL_44_1KHZ:
- clock_source = "Internal 44.1 kHz";
- break;
- case HDSPM_CLOCK_SOURCE_INTERNAL_48KHZ:
- clock_source = "Internal 48 kHz";
- break;
- case HDSPM_CLOCK_SOURCE_INTERNAL_64KHZ:
- clock_source = "Internal 64 kHz";
- break;
- case HDSPM_CLOCK_SOURCE_INTERNAL_88_2KHZ:
- clock_source = "Internal 88.2 kHz";
- break;
- case HDSPM_CLOCK_SOURCE_INTERNAL_96KHZ:
- clock_source = "Internal 96 kHz";
- break;
- default:
- clock_source = "Error";
- }
- snd_iprintf(buffer, "Sample Clock Source: %s\n", clock_source);
if (!(hdspm->control_register & HDSPM_ClockModeMaster))
- system_clock_mode = "Slave";
+ system_clock_mode = "AutoSync";
else
system_clock_mode = "Master";
- snd_iprintf(buffer, "System Clock Mode: %s\n", system_clock_mode);
+ snd_iprintf(buffer, "AutoSync Reference: %s\n", system_clock_mode);
switch (hdspm_pref_sync_ref(hdspm)) {
case HDSPM_SYNC_FROM_WORD:
@@ -3082,15 +4728,21 @@ snd_hdspm_proc_read_madi(struct snd_info_entry * entry,
case HDSPM_SYNC_FROM_MADI:
pref_sync_ref = "MADI Sync";
break;
+ case HDSPM_SYNC_FROM_TCO:
+ pref_sync_ref = "TCO";
+ break;
+ case HDSPM_SYNC_FROM_SYNC_IN:
+ pref_sync_ref = "Sync In";
+ break;
default:
pref_sync_ref = "XXXX Clock";
break;
}
snd_iprintf(buffer, "Preferred Sync Reference: %s\n",
- pref_sync_ref);
+ pref_sync_ref);
snd_iprintf(buffer, "System Clock Frequency: %d\n",
- hdspm->system_sample_rate);
+ hdspm->system_sample_rate);
snd_iprintf(buffer, "--- Status:\n");
@@ -3099,12 +4751,18 @@ snd_hdspm_proc_read_madi(struct snd_info_entry * entry,
x2 = status2 & HDSPM_wcSync;
snd_iprintf(buffer, "Inputs MADI=%s, WordClock=%s\n",
- (status & HDSPM_madiLock) ? (x ? "Sync" : "Lock") :
- "NoLock",
- (status2 & HDSPM_wcLock) ? (x2 ? "Sync" : "Lock") :
- "NoLock");
+ (status & HDSPM_madiLock) ? (x ? "Sync" : "Lock") :
+ "NoLock",
+ (status2 & HDSPM_wcLock) ? (x2 ? "Sync" : "Lock") :
+ "NoLock");
switch (hdspm_autosync_ref(hdspm)) {
+ case HDSPM_AUTOSYNC_FROM_SYNC_IN:
+ autosync_ref = "Sync In";
+ break;
+ case HDSPM_AUTOSYNC_FROM_TCO:
+ autosync_ref = "TCO";
+ break;
case HDSPM_AUTOSYNC_FROM_WORD:
autosync_ref = "Word Clock";
break;
@@ -3119,15 +4777,15 @@ snd_hdspm_proc_read_madi(struct snd_info_entry * entry,
break;
}
snd_iprintf(buffer,
- "AutoSync: Reference= %s, Freq=%d (MADI = %d, Word = %d)\n",
- autosync_ref, hdspm_external_sample_rate(hdspm),
- (status & HDSPM_madiFreqMask) >> 22,
- (status2 & HDSPM_wcFreqMask) >> 5);
+ "AutoSync: Reference= %s, Freq=%d (MADI = %d, Word = %d)\n",
+ autosync_ref, hdspm_external_sample_rate(hdspm),
+ (status & HDSPM_madiFreqMask) >> 22,
+ (status2 & HDSPM_wcFreqMask) >> 5);
snd_iprintf(buffer, "Input: %s, Mode=%s\n",
- (status & HDSPM_AB_int) ? "Coax" : "Optical",
- (status & HDSPM_RX_64ch) ? "64 channels" :
- "56 channels");
+ (status & HDSPM_AB_int) ? "Coax" : "Optical",
+ (status & HDSPM_RX_64ch) ? "64 channels" :
+ "56 channels");
snd_iprintf(buffer, "\n");
}
@@ -3142,8 +4800,6 @@ snd_hdspm_proc_read_aes32(struct snd_info_entry * entry,
unsigned int timecode;
int pref_syncref;
char *autosync_ref;
- char *system_clock_mode;
- char *clock_source;
int x;
status = hdspm_read(hdspm, HDSPM_statusRegister);
@@ -3183,24 +4839,27 @@ snd_hdspm_proc_read_aes32(struct snd_info_entry * entry,
hdspm_read(hdspm, HDSPM_midiStatusIn0) & 0xFF,
hdspm_read(hdspm, HDSPM_midiStatusIn1) & 0xFF);
snd_iprintf(buffer,
- "Register: ctrl1=0x%x, status1=0x%x, status2=0x%x, "
- "timecode=0x%x\n",
- hdspm->control_register,
- status, status2, timecode);
+ "MIDIoverMADI FIFO: In=0x%x, Out=0x%x \n",
+ hdspm_read(hdspm, HDSPM_midiStatusIn2) & 0xFF,
+ hdspm_read(hdspm, HDSPM_midiStatusOut2) & 0xFF);
+ snd_iprintf(buffer,
+ "Register: ctrl1=0x%x, ctrl2=0x%x, status1=0x%x, "
+ "status2=0x%x\n",
+ hdspm->control_register, hdspm->control2_register,
+ status, status2);
snd_iprintf(buffer, "--- Settings ---\n");
x = 1 << (6 + hdspm_decode_latency(hdspm->control_register &
- HDSPM_LatencyMask));
+ HDSPM_LatencyMask));
snd_iprintf(buffer,
"Size (Latency): %d samples (2 periods of %lu bytes)\n",
x, (unsigned long) hdspm->period_bytes);
- snd_iprintf(buffer, "Line out: %s, Precise Pointer: %s\n",
+ snd_iprintf(buffer, "Line out: %s\n",
(hdspm->
- control_register & HDSPM_LineOut) ? "on " : "off",
- (hdspm->precise_ptr) ? "on" : "off");
+ control_register & HDSPM_LineOut) ? "on " : "off");
snd_iprintf(buffer,
"ClearTrackMarker %s, Emphasis %s, Dolby %s\n",
@@ -3211,46 +4870,6 @@ snd_hdspm_proc_read_aes32(struct snd_info_entry * entry,
(hdspm->
control_register & HDSPM_Dolby) ? "on" : "off");
- switch (hdspm_clock_source(hdspm)) {
- case HDSPM_CLOCK_SOURCE_AUTOSYNC:
- clock_source = "AutoSync";
- break;
- case HDSPM_CLOCK_SOURCE_INTERNAL_32KHZ:
- clock_source = "Internal 32 kHz";
- break;
- case HDSPM_CLOCK_SOURCE_INTERNAL_44_1KHZ:
- clock_source = "Internal 44.1 kHz";
- break;
- case HDSPM_CLOCK_SOURCE_INTERNAL_48KHZ:
- clock_source = "Internal 48 kHz";
- break;
- case HDSPM_CLOCK_SOURCE_INTERNAL_64KHZ:
- clock_source = "Internal 64 kHz";
- break;
- case HDSPM_CLOCK_SOURCE_INTERNAL_88_2KHZ:
- clock_source = "Internal 88.2 kHz";
- break;
- case HDSPM_CLOCK_SOURCE_INTERNAL_96KHZ:
- clock_source = "Internal 96 kHz";
- break;
- case HDSPM_CLOCK_SOURCE_INTERNAL_128KHZ:
- clock_source = "Internal 128 kHz";
- break;
- case HDSPM_CLOCK_SOURCE_INTERNAL_176_4KHZ:
- clock_source = "Internal 176.4 kHz";
- break;
- case HDSPM_CLOCK_SOURCE_INTERNAL_192KHZ:
- clock_source = "Internal 192 kHz";
- break;
- default:
- clock_source = "Error";
- }
- snd_iprintf(buffer, "Sample Clock Source: %s\n", clock_source);
- if (!(hdspm->control_register & HDSPM_ClockModeMaster))
- system_clock_mode = "Slave";
- else
- system_clock_mode = "Master";
- snd_iprintf(buffer, "System Clock Mode: %s\n", system_clock_mode);
pref_syncref = hdspm_pref_sync_ref(hdspm);
if (pref_syncref == 0)
@@ -3274,38 +4893,108 @@ snd_hdspm_proc_read_aes32(struct snd_info_entry * entry,
snd_iprintf(buffer, "--- Status:\n");
snd_iprintf(buffer, "Word: %s Frequency: %d\n",
- (status & HDSPM_AES32_wcLock)? "Sync " : "No Lock",
+ (status & HDSPM_AES32_wcLock) ? "Sync " : "No Lock",
HDSPM_bit2freq((status >> HDSPM_AES32_wcFreq_bit) & 0xF));
for (x = 0; x < 8; x++) {
snd_iprintf(buffer, "AES%d: %s Frequency: %d\n",
x+1,
(status2 & (HDSPM_LockAES >> x)) ?
- "Sync ": "No Lock",
+ "Sync " : "No Lock",
HDSPM_bit2freq((timecode >> (4*x)) & 0xF));
}
switch (hdspm_autosync_ref(hdspm)) {
- case HDSPM_AES32_AUTOSYNC_FROM_NONE: autosync_ref="None"; break;
- case HDSPM_AES32_AUTOSYNC_FROM_WORD: autosync_ref="Word Clock"; break;
- case HDSPM_AES32_AUTOSYNC_FROM_AES1: autosync_ref="AES1"; break;
- case HDSPM_AES32_AUTOSYNC_FROM_AES2: autosync_ref="AES2"; break;
- case HDSPM_AES32_AUTOSYNC_FROM_AES3: autosync_ref="AES3"; break;
- case HDSPM_AES32_AUTOSYNC_FROM_AES4: autosync_ref="AES4"; break;
- case HDSPM_AES32_AUTOSYNC_FROM_AES5: autosync_ref="AES5"; break;
- case HDSPM_AES32_AUTOSYNC_FROM_AES6: autosync_ref="AES6"; break;
- case HDSPM_AES32_AUTOSYNC_FROM_AES7: autosync_ref="AES7"; break;
- case HDSPM_AES32_AUTOSYNC_FROM_AES8: autosync_ref="AES8"; break;
- default: autosync_ref = "---"; break;
+ case HDSPM_AES32_AUTOSYNC_FROM_NONE:
+ autosync_ref = "None"; break;
+ case HDSPM_AES32_AUTOSYNC_FROM_WORD:
+ autosync_ref = "Word Clock"; break;
+ case HDSPM_AES32_AUTOSYNC_FROM_AES1:
+ autosync_ref = "AES1"; break;
+ case HDSPM_AES32_AUTOSYNC_FROM_AES2:
+ autosync_ref = "AES2"; break;
+ case HDSPM_AES32_AUTOSYNC_FROM_AES3:
+ autosync_ref = "AES3"; break;
+ case HDSPM_AES32_AUTOSYNC_FROM_AES4:
+ autosync_ref = "AES4"; break;
+ case HDSPM_AES32_AUTOSYNC_FROM_AES5:
+ autosync_ref = "AES5"; break;
+ case HDSPM_AES32_AUTOSYNC_FROM_AES6:
+ autosync_ref = "AES6"; break;
+ case HDSPM_AES32_AUTOSYNC_FROM_AES7:
+ autosync_ref = "AES7"; break;
+ case HDSPM_AES32_AUTOSYNC_FROM_AES8:
+ autosync_ref = "AES8"; break;
+ default:
+ autosync_ref = "---"; break;
}
snd_iprintf(buffer, "AutoSync ref = %s\n", autosync_ref);
snd_iprintf(buffer, "\n");
}
+static void
+snd_hdspm_proc_read_raydat(struct snd_info_entry *entry,
+ struct snd_info_buffer *buffer)
+{
+ struct hdspm *hdspm = entry->private_data;
+ unsigned int status1, status2, status3, control, i;
+ unsigned int lock, sync;
+
+ status1 = hdspm_read(hdspm, HDSPM_RD_STATUS_1); /* s1 */
+ status2 = hdspm_read(hdspm, HDSPM_RD_STATUS_2); /* freq */
+ status3 = hdspm_read(hdspm, HDSPM_RD_STATUS_3); /* s2 */
+
+ control = hdspm->control_register;
+
+ snd_iprintf(buffer, "STATUS1: 0x%08x\n", status1);
+ snd_iprintf(buffer, "STATUS2: 0x%08x\n", status2);
+ snd_iprintf(buffer, "STATUS3: 0x%08x\n", status3);
+
+
+ snd_iprintf(buffer, "\n*** CLOCK MODE\n\n");
+
+ snd_iprintf(buffer, "Clock mode : %s\n",
+ (hdspm_system_clock_mode(hdspm) == 0) ? "master" : "slave");
+ snd_iprintf(buffer, "System frequency: %d Hz\n",
+ hdspm_get_system_sample_rate(hdspm));
+
+ snd_iprintf(buffer, "\n*** INPUT STATUS\n\n");
+
+ lock = 0x1;
+ sync = 0x100;
+
+ for (i = 0; i < 8; i++) {
+ snd_iprintf(buffer, "s1_input %d: Lock %d, Sync %d, Freq %s\n",
+ i,
+ (status1 & lock) ? 1 : 0,
+ (status1 & sync) ? 1 : 0,
+ texts_freq[(status2 >> (i * 4)) & 0xF]);
+
+ lock = lock<<1;
+ sync = sync<<1;
+ }
+
+ snd_iprintf(buffer, "WC input: Lock %d, Sync %d, Freq %s\n",
+ (status1 & 0x1000000) ? 1 : 0,
+ (status1 & 0x2000000) ? 1 : 0,
+ texts_freq[(status1 >> 16) & 0xF]);
+
+ snd_iprintf(buffer, "TCO input: Lock %d, Sync %d, Freq %s\n",
+ (status1 & 0x4000000) ? 1 : 0,
+ (status1 & 0x8000000) ? 1 : 0,
+ texts_freq[(status1 >> 20) & 0xF]);
+
+ snd_iprintf(buffer, "SYNC IN: Lock %d, Sync %d, Freq %s\n",
+ (status3 & 0x400) ? 1 : 0,
+ (status3 & 0x800) ? 1 : 0,
+ texts_freq[(status2 >> 12) & 0xF]);
+
+}
+
#ifdef CONFIG_SND_DEBUG
static void
-snd_hdspm_proc_read_debug(struct snd_info_entry * entry,
+snd_hdspm_proc_read_debug(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct hdspm *hdspm = entry->private_data;
@@ -3322,16 +5011,68 @@ snd_hdspm_proc_read_debug(struct snd_info_entry * entry,
#endif
+static void snd_hdspm_proc_ports_in(struct snd_info_entry *entry,
+ struct snd_info_buffer *buffer)
+{
+ struct hdspm *hdspm = entry->private_data;
+ int i;
+
+ snd_iprintf(buffer, "# generated by hdspm\n");
+
+ for (i = 0; i < hdspm->max_channels_in; i++) {
+ snd_iprintf(buffer, "%d=%s\n", i+1, hdspm->port_names_in[i]);
+ }
+}
-static void __devinit snd_hdspm_proc_init(struct hdspm * hdspm)
+static void snd_hdspm_proc_ports_out(struct snd_info_entry *entry,
+ struct snd_info_buffer *buffer)
+{
+ struct hdspm *hdspm = entry->private_data;
+ int i;
+
+ snd_iprintf(buffer, "# generated by hdspm\n");
+
+ for (i = 0; i < hdspm->max_channels_out; i++) {
+ snd_iprintf(buffer, "%d=%s\n", i+1, hdspm->port_names_out[i]);
+ }
+}
+
+
+static void __devinit snd_hdspm_proc_init(struct hdspm *hdspm)
{
struct snd_info_entry *entry;
- if (!snd_card_proc_new(hdspm->card, "hdspm", &entry))
- snd_info_set_text_ops(entry, hdspm,
- hdspm->is_aes32 ?
- snd_hdspm_proc_read_aes32 :
- snd_hdspm_proc_read_madi);
+ if (!snd_card_proc_new(hdspm->card, "hdspm", &entry)) {
+ switch (hdspm->io_type) {
+ case AES32:
+ snd_info_set_text_ops(entry, hdspm,
+ snd_hdspm_proc_read_aes32);
+ break;
+ case MADI:
+ snd_info_set_text_ops(entry, hdspm,
+ snd_hdspm_proc_read_madi);
+ break;
+ case MADIface:
+ /* snd_info_set_text_ops(entry, hdspm,
+ snd_hdspm_proc_read_madiface); */
+ break;
+ case RayDAT:
+ snd_info_set_text_ops(entry, hdspm,
+ snd_hdspm_proc_read_raydat);
+ break;
+ case AIO:
+ break;
+ }
+ }
+
+ if (!snd_card_proc_new(hdspm->card, "ports.in", &entry)) {
+ snd_info_set_text_ops(entry, hdspm, snd_hdspm_proc_ports_in);
+ }
+
+ if (!snd_card_proc_new(hdspm->card, "ports.out", &entry)) {
+ snd_info_set_text_ops(entry, hdspm, snd_hdspm_proc_ports_out);
+ }
+
#ifdef CONFIG_SND_DEBUG
/* debug file to read all hdspm registers */
if (!snd_card_proc_new(hdspm->card, "debug", &entry))
@@ -3341,47 +5082,48 @@ static void __devinit snd_hdspm_proc_init(struct hdspm * hdspm)
}
/*------------------------------------------------------------
- hdspm intitialize
+ hdspm intitialize
------------------------------------------------------------*/
static int snd_hdspm_set_defaults(struct hdspm * hdspm)
{
- unsigned int i;
-
/* ASSUMPTION: hdspm->lock is either held, or there is no need to
hold it (e.g. during module initialization).
- */
+ */
/* set defaults: */
- if (hdspm->is_aes32)
+ hdspm->settings_register = 0;
+
+ switch (hdspm->io_type) {
+ case MADI:
+ case MADIface:
+ hdspm->control_register =
+ 0x2 + 0x8 + 0x10 + 0x80 + 0x400 + 0x4000 + 0x1000000;
+ break;
+
+ case RayDAT:
+ case AIO:
+ hdspm->settings_register = 0x1 + 0x1000;
+ /* Magic values are: LAT_0, LAT_2, Master, freq1, tx64ch, inp_0,
+ * line_out */
+ hdspm->control_register =
+ 0x2 + 0x8 + 0x10 + 0x80 + 0x400 + 0x4000 + 0x1000000;
+ break;
+
+ case AES32:
hdspm->control_register =
HDSPM_ClockModeMaster | /* Master Cloack Mode on */
- hdspm_encode_latency(7) | /* latency maximum =
- * 8192 samples
- */
+ hdspm_encode_latency(7) | /* latency max=8192samples */
HDSPM_SyncRef0 | /* AES1 is syncclock */
HDSPM_LineOut | /* Analog output in */
HDSPM_Professional; /* Professional mode */
- else
- hdspm->control_register =
- HDSPM_ClockModeMaster | /* Master Cloack Mode on */
- hdspm_encode_latency(7) | /* latency maximum =
- * 8192 samples
- */
- HDSPM_InputCoaxial | /* Input Coax not Optical */
- HDSPM_SyncRef_MADI | /* Madi is syncclock */
- HDSPM_LineOut | /* Analog output in */
- HDSPM_TX_64ch | /* transmit in 64ch mode */
- HDSPM_AutoInp; /* AutoInput chossing (takeover) */
-
- /* ! HDSPM_Frequency0|HDSPM_Frequency1 = 44.1khz */
- /* ! HDSPM_DoubleSpeed HDSPM_QuadSpeed = normal speed */
- /* ! HDSPM_clr_tms = do not clear bits in track marks */
+ break;
+ }
hdspm_write(hdspm, HDSPM_controlRegister, hdspm->control_register);
- if (!hdspm->is_aes32) {
+ if (AES32 == hdspm->io_type) {
/* No control2 register for AES32 */
#ifdef SNDRV_BIG_ENDIAN
hdspm->control2_register = HDSPM_BIGENDIAN_MODE;
@@ -3397,57 +5139,59 @@ static int snd_hdspm_set_defaults(struct hdspm * hdspm)
all_in_all_mixer(hdspm, 0 * UNITY_GAIN);
- if (line_outs_monitor[hdspm->dev]) {
-
- snd_printk(KERN_INFO "HDSPM: "
- "sending all playback streams to line outs.\n");
-
- for (i = 0; i < HDSPM_MIXER_CHANNELS; i++) {
- if (hdspm_write_pb_gain(hdspm, i, i, UNITY_GAIN))
- return -EIO;
- }
+ if (hdspm->io_type == AIO || hdspm->io_type == RayDAT) {
+ hdspm_write(hdspm, HDSPM_WR_SETTINGS, hdspm->settings_register);
}
/* set a default rate so that the channel map is set up. */
- hdspm->channel_map = channel_map_madi_ss;
- hdspm_set_rate(hdspm, 44100, 1);
+ hdspm_set_rate(hdspm, 48000, 1);
return 0;
}
/*------------------------------------------------------------
- interrupt
+ interrupt
------------------------------------------------------------*/
static irqreturn_t snd_hdspm_interrupt(int irq, void *dev_id)
{
struct hdspm *hdspm = (struct hdspm *) dev_id;
unsigned int status;
- int audio;
- int midi0;
- int midi1;
- unsigned int midi0status;
- unsigned int midi1status;
- int schedule = 0;
+ int i, audio, midi, schedule = 0;
+ /* cycles_t now; */
status = hdspm_read(hdspm, HDSPM_statusRegister);
audio = status & HDSPM_audioIRQPending;
- midi0 = status & HDSPM_midi0IRQPending;
- midi1 = status & HDSPM_midi1IRQPending;
+ midi = status & (HDSPM_midi0IRQPending | HDSPM_midi1IRQPending |
+ HDSPM_midi2IRQPending | HDSPM_midi3IRQPending);
+
+ /* now = get_cycles(); */
+ /**
+ * LAT_2..LAT_0 period counter (win) counter (mac)
+ * 6 4096 ~256053425 ~514672358
+ * 5 2048 ~128024983 ~257373821
+ * 4 1024 ~64023706 ~128718089
+ * 3 512 ~32005945 ~64385999
+ * 2 256 ~16003039 ~32260176
+ * 1 128 ~7998738 ~16194507
+ * 0 64 ~3998231 ~8191558
+ **/
+ /*
+ snd_printk(KERN_INFO "snd_hdspm_interrupt %llu @ %llx\n",
+ now-hdspm->last_interrupt, status & 0xFFC0);
+ hdspm->last_interrupt = now;
+ */
- if (!audio && !midi0 && !midi1)
+ if (!audio && !midi)
return IRQ_NONE;
hdspm_write(hdspm, HDSPM_interruptConfirmation, 0);
hdspm->irq_count++;
- midi0status = hdspm_read(hdspm, HDSPM_midiStatusIn0) & 0xff;
- midi1status = hdspm_read(hdspm, HDSPM_midiStatusIn1) & 0xff;
if (audio) {
-
if (hdspm->capture_substream)
snd_pcm_period_elapsed(hdspm->capture_substream);
@@ -3455,118 +5199,44 @@ static irqreturn_t snd_hdspm_interrupt(int irq, void *dev_id)
snd_pcm_period_elapsed(hdspm->playback_substream);
}
- if (midi0 && midi0status) {
- /* we disable interrupts for this input until processing
- * is done
- */
- hdspm->control_register &= ~HDSPM_Midi0InterruptEnable;
- hdspm_write(hdspm, HDSPM_controlRegister,
- hdspm->control_register);
- hdspm->midi[0].pending = 1;
- schedule = 1;
- }
- if (midi1 && midi1status) {
- /* we disable interrupts for this input until processing
- * is done
- */
- hdspm->control_register &= ~HDSPM_Midi1InterruptEnable;
- hdspm_write(hdspm, HDSPM_controlRegister,
- hdspm->control_register);
- hdspm->midi[1].pending = 1;
- schedule = 1;
+ if (midi) {
+ i = 0;
+ while (i < hdspm->midiPorts) {
+ if ((hdspm_read(hdspm,
+ hdspm->midi[i].statusIn) & 0xff) &&
+ (status & hdspm->midi[i].irq)) {
+ /* we disable interrupts for this input until
+ * processing is done
+ */
+ hdspm->control_register &= ~hdspm->midi[i].ie;
+ hdspm_write(hdspm, HDSPM_controlRegister,
+ hdspm->control_register);
+ hdspm->midi[i].pending = 1;
+ schedule = 1;
+ }
+
+ i++;
+ }
+
+ if (schedule)
+ tasklet_hi_schedule(&hdspm->midi_tasklet);
}
- if (schedule)
- tasklet_schedule(&hdspm->midi_tasklet);
+
return IRQ_HANDLED;
}
/*------------------------------------------------------------
- pcm interface
+ pcm interface
------------------------------------------------------------*/
-static snd_pcm_uframes_t snd_hdspm_hw_pointer(struct snd_pcm_substream *
- substream)
+static snd_pcm_uframes_t snd_hdspm_hw_pointer(struct snd_pcm_substream
+ *substream)
{
struct hdspm *hdspm = snd_pcm_substream_chip(substream);
return hdspm_hw_pointer(hdspm);
}
-static char *hdspm_channel_buffer_location(struct hdspm * hdspm,
- int stream, int channel)
-{
- int mapped_channel;
-
- if (snd_BUG_ON(channel < 0 || channel >= HDSPM_MAX_CHANNELS))
- return NULL;
-
- mapped_channel = hdspm->channel_map[channel];
- if (mapped_channel < 0)
- return NULL;
-
- if (stream == SNDRV_PCM_STREAM_CAPTURE)
- return hdspm->capture_buffer +
- mapped_channel * HDSPM_CHANNEL_BUFFER_BYTES;
- else
- return hdspm->playback_buffer +
- mapped_channel * HDSPM_CHANNEL_BUFFER_BYTES;
-}
-
-
-/* dont know why need it ??? */
-static int snd_hdspm_playback_copy(struct snd_pcm_substream *substream,
- int channel, snd_pcm_uframes_t pos,
- void __user *src, snd_pcm_uframes_t count)
-{
- struct hdspm *hdspm = snd_pcm_substream_chip(substream);
- char *channel_buf;
-
- if (snd_BUG_ON(pos + count > HDSPM_CHANNEL_BUFFER_BYTES / 4))
- return -EINVAL;
-
- channel_buf =
- hdspm_channel_buffer_location(hdspm, substream->pstr->stream,
- channel);
-
- if (snd_BUG_ON(!channel_buf))
- return -EIO;
-
- return copy_from_user(channel_buf + pos * 4, src, count * 4);
-}
-
-static int snd_hdspm_capture_copy(struct snd_pcm_substream *substream,
- int channel, snd_pcm_uframes_t pos,
- void __user *dst, snd_pcm_uframes_t count)
-{
- struct hdspm *hdspm = snd_pcm_substream_chip(substream);
- char *channel_buf;
-
- if (snd_BUG_ON(pos + count > HDSPM_CHANNEL_BUFFER_BYTES / 4))
- return -EINVAL;
-
- channel_buf =
- hdspm_channel_buffer_location(hdspm, substream->pstr->stream,
- channel);
- if (snd_BUG_ON(!channel_buf))
- return -EIO;
- return copy_to_user(dst, channel_buf + pos * 4, count * 4);
-}
-
-static int snd_hdspm_hw_silence(struct snd_pcm_substream *substream,
- int channel, snd_pcm_uframes_t pos,
- snd_pcm_uframes_t count)
-{
- struct hdspm *hdspm = snd_pcm_substream_chip(substream);
- char *channel_buf;
-
- channel_buf =
- hdspm_channel_buffer_location(hdspm, substream->pstr->stream,
- channel);
- if (snd_BUG_ON(!channel_buf))
- return -EIO;
- memset(channel_buf + pos * 4, 0, count * 4);
- return 0;
-}
static int snd_hdspm_reset(struct snd_pcm_substream *substream)
{
@@ -3589,7 +5259,7 @@ static int snd_hdspm_reset(struct snd_pcm_substream *substream)
snd_pcm_group_for_each_entry(s, substream) {
if (s == other) {
oruntime->status->hw_ptr =
- runtime->status->hw_ptr;
+ runtime->status->hw_ptr;
break;
}
}
@@ -3621,19 +5291,19 @@ static int snd_hdspm_hw_params(struct snd_pcm_substream *substream,
/* The other stream is open, and not by the same
task as this one. Make sure that the parameters
that matter are the same.
- */
+ */
if (params_rate(params) != hdspm->system_sample_rate) {
spin_unlock_irq(&hdspm->lock);
_snd_pcm_hw_param_setempty(params,
- SNDRV_PCM_HW_PARAM_RATE);
+ SNDRV_PCM_HW_PARAM_RATE);
return -EBUSY;
}
if (params_period_size(params) != hdspm->period_bytes / 4) {
spin_unlock_irq(&hdspm->lock);
_snd_pcm_hw_param_setempty(params,
- SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
return -EBUSY;
}
@@ -3646,18 +5316,20 @@ static int snd_hdspm_hw_params(struct snd_pcm_substream *substream,
spin_lock_irq(&hdspm->lock);
err = hdspm_set_rate(hdspm, params_rate(params), 0);
if (err < 0) {
+ snd_printk(KERN_INFO "err on hdspm_set_rate: %d\n", err);
spin_unlock_irq(&hdspm->lock);
_snd_pcm_hw_param_setempty(params,
- SNDRV_PCM_HW_PARAM_RATE);
+ SNDRV_PCM_HW_PARAM_RATE);
return err;
}
spin_unlock_irq(&hdspm->lock);
err = hdspm_set_interrupt_interval(hdspm,
- params_period_size(params));
+ params_period_size(params));
if (err < 0) {
+ snd_printk(KERN_INFO "err on hdspm_set_interrupt_interval: %d\n", err);
_snd_pcm_hw_param_setempty(params,
- SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
return err;
}
@@ -3667,10 +5339,13 @@ static int snd_hdspm_hw_params(struct snd_pcm_substream *substream,
/* malloc all buffer even if not enabled to get sure */
/* Update for MADI rev 204: we need to allocate for all channels,
* otherwise it doesn't work at 96kHz */
+
err =
- snd_pcm_lib_malloc_pages(substream, HDSPM_DMA_AREA_BYTES);
- if (err < 0)
+ snd_pcm_lib_malloc_pages(substream, HDSPM_DMA_AREA_BYTES);
+ if (err < 0) {
+ snd_printk(KERN_INFO "err on snd_pcm_lib_malloc_pages: %d\n", err);
return err;
+ }
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
@@ -3681,7 +5356,7 @@ static int snd_hdspm_hw_params(struct snd_pcm_substream *substream,
snd_hdspm_enable_out(hdspm, i, 1);
hdspm->playback_buffer =
- (unsigned char *) substream->runtime->dma_area;
+ (unsigned char *) substream->runtime->dma_area;
snd_printdd("Allocated sample buffer for playback at %p\n",
hdspm->playback_buffer);
} else {
@@ -3692,23 +5367,40 @@ static int snd_hdspm_hw_params(struct snd_pcm_substream *substream,
snd_hdspm_enable_in(hdspm, i, 1);
hdspm->capture_buffer =
- (unsigned char *) substream->runtime->dma_area;
+ (unsigned char *) substream->runtime->dma_area;
snd_printdd("Allocated sample buffer for capture at %p\n",
hdspm->capture_buffer);
}
+
/*
snd_printdd("Allocated sample buffer for %s at 0x%08X\n",
substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
"playback" : "capture",
snd_pcm_sgbuf_get_addr(substream, 0));
- */
+ */
/*
- snd_printdd("set_hwparams: %s %d Hz, %d channels, bs = %d\n",
- substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
- "playback" : "capture",
- params_rate(params), params_channels(params),
- params_buffer_size(params));
- */
+ snd_printdd("set_hwparams: %s %d Hz, %d channels, bs = %d\n",
+ substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+ "playback" : "capture",
+ params_rate(params), params_channels(params),
+ params_buffer_size(params));
+ */
+
+
+ /* Switch to native float format if requested */
+ if (SNDRV_PCM_FORMAT_FLOAT_LE == params_format(params)) {
+ if (!(hdspm->control_register & HDSPe_FLOAT_FORMAT))
+ snd_printk(KERN_INFO "hdspm: Switching to native 32bit LE float format.\n");
+
+ hdspm->control_register |= HDSPe_FLOAT_FORMAT;
+ } else if (SNDRV_PCM_FORMAT_S32_LE == params_format(params)) {
+ if (hdspm->control_register & HDSPe_FLOAT_FORMAT)
+ snd_printk(KERN_INFO "hdspm: Switching to native 32bit LE integer format.\n");
+
+ hdspm->control_register &= ~HDSPe_FLOAT_FORMAT;
+ }
+ hdspm_write(hdspm, HDSPM_controlRegister, hdspm->control_register);
+
return 0;
}
@@ -3719,14 +5411,14 @@ static int snd_hdspm_hw_free(struct snd_pcm_substream *substream)
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- /* params_channels(params) should be enough,
+ /* params_channels(params) should be enough,
but to get sure in case of error */
- for (i = 0; i < HDSPM_MAX_CHANNELS; ++i)
+ for (i = 0; i < hdspm->max_channels_out; ++i)
snd_hdspm_enable_out(hdspm, i, 0);
hdspm->playback_buffer = NULL;
} else {
- for (i = 0; i < HDSPM_MAX_CHANNELS; ++i)
+ for (i = 0; i < hdspm->max_channels_in; ++i)
snd_hdspm_enable_in(hdspm, i, 0);
hdspm->capture_buffer = NULL;
@@ -3738,37 +5430,58 @@ static int snd_hdspm_hw_free(struct snd_pcm_substream *substream)
return 0;
}
+
static int snd_hdspm_channel_info(struct snd_pcm_substream *substream,
- struct snd_pcm_channel_info * info)
+ struct snd_pcm_channel_info *info)
{
struct hdspm *hdspm = snd_pcm_substream_chip(substream);
- int mapped_channel;
- if (snd_BUG_ON(info->channel >= HDSPM_MAX_CHANNELS))
- return -EINVAL;
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ if (snd_BUG_ON(info->channel >= hdspm->max_channels_out)) {
+ snd_printk(KERN_INFO "snd_hdspm_channel_info: output channel out of range (%d)\n", info->channel);
+ return -EINVAL;
+ }
- mapped_channel = hdspm->channel_map[info->channel];
- if (mapped_channel < 0)
- return -EINVAL;
+ if (hdspm->channel_map_out[info->channel] < 0) {
+ snd_printk(KERN_INFO "snd_hdspm_channel_info: output channel %d mapped out\n", info->channel);
+ return -EINVAL;
+ }
+
+ info->offset = hdspm->channel_map_out[info->channel] *
+ HDSPM_CHANNEL_BUFFER_BYTES;
+ } else {
+ if (snd_BUG_ON(info->channel >= hdspm->max_channels_in)) {
+ snd_printk(KERN_INFO "snd_hdspm_channel_info: input channel out of range (%d)\n", info->channel);
+ return -EINVAL;
+ }
+
+ if (hdspm->channel_map_in[info->channel] < 0) {
+ snd_printk(KERN_INFO "snd_hdspm_channel_info: input channel %d mapped out\n", info->channel);
+ return -EINVAL;
+ }
+
+ info->offset = hdspm->channel_map_in[info->channel] *
+ HDSPM_CHANNEL_BUFFER_BYTES;
+ }
- info->offset = mapped_channel * HDSPM_CHANNEL_BUFFER_BYTES;
info->first = 0;
info->step = 32;
return 0;
}
+
static int snd_hdspm_ioctl(struct snd_pcm_substream *substream,
- unsigned int cmd, void *arg)
+ unsigned int cmd, void *arg)
{
switch (cmd) {
case SNDRV_PCM_IOCTL1_RESET:
return snd_hdspm_reset(substream);
case SNDRV_PCM_IOCTL1_CHANNEL_INFO:
- {
- struct snd_pcm_channel_info *info = arg;
- return snd_hdspm_channel_info(substream, info);
- }
+ {
+ struct snd_pcm_channel_info *info = arg;
+ return snd_hdspm_channel_info(substream, info);
+ }
default:
break;
}
@@ -3815,19 +5528,19 @@ static int snd_hdspm_trigger(struct snd_pcm_substream *substream, int cmd)
}
if (cmd == SNDRV_PCM_TRIGGER_START) {
if (!(running & (1 << SNDRV_PCM_STREAM_PLAYBACK))
- && substream->stream ==
- SNDRV_PCM_STREAM_CAPTURE)
+ && substream->stream ==
+ SNDRV_PCM_STREAM_CAPTURE)
hdspm_silence_playback(hdspm);
} else {
if (running &&
- substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
hdspm_silence_playback(hdspm);
}
} else {
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
hdspm_silence_playback(hdspm);
}
- _ok:
+_ok:
snd_pcm_trigger_done(substream, substream);
if (!hdspm->running && running)
hdspm_start_audio(hdspm);
@@ -3844,8 +5557,18 @@ static int snd_hdspm_prepare(struct snd_pcm_substream *substream)
return 0;
}
-static unsigned int period_sizes[] =
- { 64, 128, 256, 512, 1024, 2048, 4096, 8192 };
+static unsigned int period_sizes_old[] = {
+ 64, 128, 256, 512, 1024, 2048, 4096
+};
+
+static unsigned int period_sizes_new[] = {
+ 32, 64, 128, 256, 512, 1024, 2048, 4096
+};
+
+/* RayDAT and AIO always have a buffer of 16384 samples per channel */
+static unsigned int raydat_aio_buffer_sizes[] = {
+ 16384
+};
static struct snd_pcm_hardware snd_hdspm_playback_subinfo = {
.info = (SNDRV_PCM_INFO_MMAP |
@@ -3866,9 +5589,9 @@ static struct snd_pcm_hardware snd_hdspm_playback_subinfo = {
.buffer_bytes_max =
HDSPM_CHANNEL_BUFFER_BYTES * HDSPM_MAX_CHANNELS,
.period_bytes_min = (64 * 4),
- .period_bytes_max = (8192 * 4) * HDSPM_MAX_CHANNELS,
+ .period_bytes_max = (4096 * 4) * HDSPM_MAX_CHANNELS,
.periods_min = 2,
- .periods_max = 2,
+ .periods_max = 512,
.fifo_size = 0
};
@@ -3891,20 +5614,66 @@ static struct snd_pcm_hardware snd_hdspm_capture_subinfo = {
.buffer_bytes_max =
HDSPM_CHANNEL_BUFFER_BYTES * HDSPM_MAX_CHANNELS,
.period_bytes_min = (64 * 4),
- .period_bytes_max = (8192 * 4) * HDSPM_MAX_CHANNELS,
+ .period_bytes_max = (4096 * 4) * HDSPM_MAX_CHANNELS,
.periods_min = 2,
- .periods_max = 2,
+ .periods_max = 512,
.fifo_size = 0
};
-static struct snd_pcm_hw_constraint_list hw_constraints_period_sizes = {
- .count = ARRAY_SIZE(period_sizes),
- .list = period_sizes,
+static struct snd_pcm_hw_constraint_list hw_constraints_period_sizes_old = {
+ .count = ARRAY_SIZE(period_sizes_old),
+ .list = period_sizes_old,
.mask = 0
};
+static struct snd_pcm_hw_constraint_list hw_constraints_period_sizes_new = {
+ .count = ARRAY_SIZE(period_sizes_new),
+ .list = period_sizes_new,
+ .mask = 0
+};
-static int snd_hdspm_hw_rule_channels_rate(struct snd_pcm_hw_params *params,
+static struct snd_pcm_hw_constraint_list hw_constraints_raydat_io_buffer = {
+ .count = ARRAY_SIZE(raydat_aio_buffer_sizes),
+ .list = raydat_aio_buffer_sizes,
+ .mask = 0
+};
+
+static int snd_hdspm_hw_rule_in_channels_rate(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct hdspm *hdspm = rule->private;
+ struct snd_interval *c =
+ hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+ struct snd_interval *r =
+ hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+
+ if (r->min > 96000 && r->max <= 192000) {
+ struct snd_interval t = {
+ .min = hdspm->qs_in_channels,
+ .max = hdspm->qs_in_channels,
+ .integer = 1,
+ };
+ return snd_interval_refine(c, &t);
+ } else if (r->min > 48000 && r->max <= 96000) {
+ struct snd_interval t = {
+ .min = hdspm->ds_in_channels,
+ .max = hdspm->ds_in_channels,
+ .integer = 1,
+ };
+ return snd_interval_refine(c, &t);
+ } else if (r->max < 64000) {
+ struct snd_interval t = {
+ .min = hdspm->ss_in_channels,
+ .max = hdspm->ss_in_channels,
+ .integer = 1,
+ };
+ return snd_interval_refine(c, &t);
+ }
+
+ return 0;
+}
+
+static int snd_hdspm_hw_rule_out_channels_rate(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule * rule)
{
struct hdspm *hdspm = rule->private;
@@ -3913,25 +5682,33 @@ static int snd_hdspm_hw_rule_channels_rate(struct snd_pcm_hw_params *params,
struct snd_interval *r =
hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
- if (r->min > 48000 && r->max <= 96000) {
+ if (r->min > 96000 && r->max <= 192000) {
struct snd_interval t = {
- .min = hdspm->ds_channels,
- .max = hdspm->ds_channels,
+ .min = hdspm->qs_out_channels,
+ .max = hdspm->qs_out_channels,
+ .integer = 1,
+ };
+ return snd_interval_refine(c, &t);
+ } else if (r->min > 48000 && r->max <= 96000) {
+ struct snd_interval t = {
+ .min = hdspm->ds_out_channels,
+ .max = hdspm->ds_out_channels,
.integer = 1,
};
return snd_interval_refine(c, &t);
} else if (r->max < 64000) {
struct snd_interval t = {
- .min = hdspm->ss_channels,
- .max = hdspm->ss_channels,
+ .min = hdspm->ss_out_channels,
+ .max = hdspm->ss_out_channels,
.integer = 1,
};
return snd_interval_refine(c, &t);
+ } else {
}
return 0;
}
-static int snd_hdspm_hw_rule_rate_channels(struct snd_pcm_hw_params *params,
+static int snd_hdspm_hw_rule_rate_in_channels(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule * rule)
{
struct hdspm *hdspm = rule->private;
@@ -3940,42 +5717,92 @@ static int snd_hdspm_hw_rule_rate_channels(struct snd_pcm_hw_params *params,
struct snd_interval *r =
hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
- if (c->min >= hdspm->ss_channels) {
+ if (c->min >= hdspm->ss_in_channels) {
struct snd_interval t = {
.min = 32000,
.max = 48000,
.integer = 1,
};
return snd_interval_refine(r, &t);
- } else if (c->max <= hdspm->ds_channels) {
+ } else if (c->max <= hdspm->qs_in_channels) {
+ struct snd_interval t = {
+ .min = 128000,
+ .max = 192000,
+ .integer = 1,
+ };
+ return snd_interval_refine(r, &t);
+ } else if (c->max <= hdspm->ds_in_channels) {
struct snd_interval t = {
.min = 64000,
.max = 96000,
.integer = 1,
};
+ return snd_interval_refine(r, &t);
+ }
+ return 0;
+}
+static int snd_hdspm_hw_rule_rate_out_channels(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct hdspm *hdspm = rule->private;
+ struct snd_interval *c =
+ hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+ struct snd_interval *r =
+ hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+
+ if (c->min >= hdspm->ss_out_channels) {
+ struct snd_interval t = {
+ .min = 32000,
+ .max = 48000,
+ .integer = 1,
+ };
+ return snd_interval_refine(r, &t);
+ } else if (c->max <= hdspm->qs_out_channels) {
+ struct snd_interval t = {
+ .min = 128000,
+ .max = 192000,
+ .integer = 1,
+ };
+ return snd_interval_refine(r, &t);
+ } else if (c->max <= hdspm->ds_out_channels) {
+ struct snd_interval t = {
+ .min = 64000,
+ .max = 96000,
+ .integer = 1,
+ };
return snd_interval_refine(r, &t);
}
+
return 0;
}
-static int snd_hdspm_hw_rule_channels(struct snd_pcm_hw_params *params,
+static int snd_hdspm_hw_rule_in_channels(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
unsigned int list[3];
struct hdspm *hdspm = rule->private;
struct snd_interval *c = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
- if (hdspm->is_aes32) {
- list[0] = hdspm->qs_channels;
- list[1] = hdspm->ds_channels;
- list[2] = hdspm->ss_channels;
- return snd_interval_list(c, 3, list, 0);
- } else {
- list[0] = hdspm->ds_channels;
- list[1] = hdspm->ss_channels;
- return snd_interval_list(c, 2, list, 0);
- }
+
+ list[0] = hdspm->qs_in_channels;
+ list[1] = hdspm->ds_in_channels;
+ list[2] = hdspm->ss_in_channels;
+ return snd_interval_list(c, 3, list, 0);
+}
+
+static int snd_hdspm_hw_rule_out_channels(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ unsigned int list[3];
+ struct hdspm *hdspm = rule->private;
+ struct snd_interval *c = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+
+ list[0] = hdspm->qs_out_channels;
+ list[1] = hdspm->ds_out_channels;
+ list[2] = hdspm->ss_out_channels;
+ return snd_interval_list(c, 3, list, 0);
}
@@ -3999,6 +5826,7 @@ static int snd_hdspm_playback_open(struct snd_pcm_substream *substream)
snd_pcm_set_sync(substream);
+
runtime->hw = snd_hdspm_playback_subinfo;
if (hdspm->capture_substream == NULL)
@@ -4011,25 +5839,41 @@ static int snd_hdspm_playback_open(struct snd_pcm_substream *substream)
snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24);
- snd_pcm_hw_constraint_list(runtime, 0,
- SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
- &hw_constraints_period_sizes);
+ switch (hdspm->io_type) {
+ case AIO:
+ case RayDAT:
+ snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ &hw_constraints_period_sizes_new);
+ snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
+ &hw_constraints_raydat_io_buffer);
- if (hdspm->is_aes32) {
+ break;
+
+ default:
+ snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ &hw_constraints_period_sizes_old);
+ }
+
+ if (AES32 == hdspm->io_type) {
snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
&hdspm_hw_constraints_aes32_sample_rates);
} else {
- snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
- snd_hdspm_hw_rule_channels, hdspm,
- SNDRV_PCM_HW_PARAM_CHANNELS, -1);
- snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
- snd_hdspm_hw_rule_channels_rate, hdspm,
- SNDRV_PCM_HW_PARAM_RATE, -1);
-
snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
- snd_hdspm_hw_rule_rate_channels, hdspm,
- SNDRV_PCM_HW_PARAM_CHANNELS, -1);
+ snd_hdspm_hw_rule_rate_out_channels, hdspm,
+ SNDRV_PCM_HW_PARAM_CHANNELS, -1);
}
+
+ snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ snd_hdspm_hw_rule_out_channels, hdspm,
+ SNDRV_PCM_HW_PARAM_CHANNELS, -1);
+
+ snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ snd_hdspm_hw_rule_out_channels_rate, hdspm,
+ SNDRV_PCM_HW_PARAM_RATE, -1);
+
return 0;
}
@@ -4066,24 +5910,40 @@ static int snd_hdspm_capture_open(struct snd_pcm_substream *substream)
spin_unlock_irq(&hdspm->lock);
snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24);
- snd_pcm_hw_constraint_list(runtime, 0,
- SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
- &hw_constraints_period_sizes);
- if (hdspm->is_aes32) {
+ switch (hdspm->io_type) {
+ case AIO:
+ case RayDAT:
+ snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ &hw_constraints_period_sizes_new);
+ snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
+ &hw_constraints_raydat_io_buffer);
+ break;
+
+ default:
+ snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ &hw_constraints_period_sizes_old);
+ }
+
+ if (AES32 == hdspm->io_type) {
snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
&hdspm_hw_constraints_aes32_sample_rates);
} else {
- snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
- snd_hdspm_hw_rule_channels, hdspm,
- SNDRV_PCM_HW_PARAM_CHANNELS, -1);
- snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
- snd_hdspm_hw_rule_channels_rate, hdspm,
- SNDRV_PCM_HW_PARAM_RATE, -1);
-
snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
- snd_hdspm_hw_rule_rate_channels, hdspm,
- SNDRV_PCM_HW_PARAM_CHANNELS, -1);
+ snd_hdspm_hw_rule_rate_in_channels, hdspm,
+ SNDRV_PCM_HW_PARAM_CHANNELS, -1);
}
+
+ snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ snd_hdspm_hw_rule_in_channels, hdspm,
+ SNDRV_PCM_HW_PARAM_CHANNELS, -1);
+
+ snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ snd_hdspm_hw_rule_in_channels_rate, hdspm,
+ SNDRV_PCM_HW_PARAM_RATE, -1);
+
return 0;
}
@@ -4100,32 +5960,129 @@ static int snd_hdspm_capture_release(struct snd_pcm_substream *substream)
return 0;
}
-static int snd_hdspm_hwdep_ioctl(struct snd_hwdep * hw, struct file *file,
- unsigned int cmd, unsigned long arg)
+static int snd_hdspm_hwdep_dummy_op(struct snd_hwdep *hw, struct file *file)
{
+ /* we have nothing to initialize but the call is required */
+ return 0;
+}
+
+static inline int copy_u32_le(void __user *dest, void __iomem *src)
+{
+ u32 val = readl(src);
+ return copy_to_user(dest, &val, 4);
+}
+
+static int snd_hdspm_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
+ unsigned int cmd, unsigned long __user arg)
+{
+ void __user *argp = (void __user *)arg;
struct hdspm *hdspm = hw->private_data;
struct hdspm_mixer_ioctl mixer;
- struct hdspm_config_info info;
+ struct hdspm_config info;
+ struct hdspm_status status;
struct hdspm_version hdspm_version;
- struct hdspm_peak_rms_ioctl rms;
+ struct hdspm_peak_rms *levels;
+ struct hdspm_ltc ltc;
+ unsigned int statusregister;
+ long unsigned int s;
+ int i = 0;
switch (cmd) {
case SNDRV_HDSPM_IOCTL_GET_PEAK_RMS:
- if (copy_from_user(&rms, (void __user *)arg, sizeof(rms)))
+ levels = &hdspm->peak_rms;
+ for (i = 0; i < HDSPM_MAX_CHANNELS; i++) {
+ levels->input_peaks[i] =
+ readl(hdspm->iobase +
+ HDSPM_MADI_INPUT_PEAK + i*4);
+ levels->playback_peaks[i] =
+ readl(hdspm->iobase +
+ HDSPM_MADI_PLAYBACK_PEAK + i*4);
+ levels->output_peaks[i] =
+ readl(hdspm->iobase +
+ HDSPM_MADI_OUTPUT_PEAK + i*4);
+
+ levels->input_rms[i] =
+ ((uint64_t) readl(hdspm->iobase +
+ HDSPM_MADI_INPUT_RMS_H + i*4) << 32) |
+ (uint64_t) readl(hdspm->iobase +
+ HDSPM_MADI_INPUT_RMS_L + i*4);
+ levels->playback_rms[i] =
+ ((uint64_t)readl(hdspm->iobase +
+ HDSPM_MADI_PLAYBACK_RMS_H+i*4) << 32) |
+ (uint64_t)readl(hdspm->iobase +
+ HDSPM_MADI_PLAYBACK_RMS_L + i*4);
+ levels->output_rms[i] =
+ ((uint64_t)readl(hdspm->iobase +
+ HDSPM_MADI_OUTPUT_RMS_H + i*4) << 32) |
+ (uint64_t)readl(hdspm->iobase +
+ HDSPM_MADI_OUTPUT_RMS_L + i*4);
+ }
+
+ if (hdspm->system_sample_rate > 96000) {
+ levels->speed = qs;
+ } else if (hdspm->system_sample_rate > 48000) {
+ levels->speed = ds;
+ } else {
+ levels->speed = ss;
+ }
+ levels->status2 = hdspm_read(hdspm, HDSPM_statusRegister2);
+
+ s = copy_to_user(argp, levels, sizeof(struct hdspm_peak_rms));
+ if (0 != s) {
+ /* snd_printk(KERN_ERR "copy_to_user(.., .., %lu): %lu
+ [Levels]\n", sizeof(struct hdspm_peak_rms), s);
+ */
return -EFAULT;
- /* maybe there is a chance to memorymap in future
- * so dont touch just copy
- */
- if(copy_to_user_fromio((void __user *)rms.peak,
- hdspm->iobase+HDSPM_MADI_peakrmsbase,
- sizeof(struct hdspm_peak_rms)) != 0 )
+ }
+ break;
+
+ case SNDRV_HDSPM_IOCTL_GET_LTC:
+ ltc.ltc = hdspm_read(hdspm, HDSPM_RD_TCO);
+ i = hdspm_read(hdspm, HDSPM_RD_TCO + 4);
+ if (i & HDSPM_TCO1_LTC_Input_valid) {
+ switch (i & (HDSPM_TCO1_LTC_Format_LSB |
+ HDSPM_TCO1_LTC_Format_MSB)) {
+ case 0:
+ ltc.format = fps_24;
+ break;
+ case HDSPM_TCO1_LTC_Format_LSB:
+ ltc.format = fps_25;
+ break;
+ case HDSPM_TCO1_LTC_Format_MSB:
+ ltc.format = fps_2997;
+ break;
+ default:
+ ltc.format = 30;
+ break;
+ }
+ if (i & HDSPM_TCO1_set_drop_frame_flag) {
+ ltc.frame = drop_frame;
+ } else {
+ ltc.frame = full_frame;
+ }
+ } else {
+ ltc.format = format_invalid;
+ ltc.frame = frame_invalid;
+ }
+ if (i & HDSPM_TCO1_Video_Input_Format_NTSC) {
+ ltc.input_format = ntsc;
+ } else if (i & HDSPM_TCO1_Video_Input_Format_PAL) {
+ ltc.input_format = pal;
+ } else {
+ ltc.input_format = no_video;
+ }
+
+ s = copy_to_user(argp, &ltc, sizeof(struct hdspm_ltc));
+ if (0 != s) {
+ /*
+ snd_printk(KERN_ERR "copy_to_user(.., .., %lu): %lu [LTC]\n", sizeof(struct hdspm_ltc), s); */
return -EFAULT;
+ }
break;
-
- case SNDRV_HDSPM_IOCTL_GET_CONFIG_INFO:
+ case SNDRV_HDSPM_IOCTL_GET_CONFIG:
memset(&info, 0, sizeof(info));
spin_lock_irq(&hdspm->lock);
@@ -4134,7 +6091,7 @@ static int snd_hdspm_hwdep_ioctl(struct snd_hwdep * hw, struct file *file,
info.system_sample_rate = hdspm->system_sample_rate;
info.autosync_sample_rate =
- hdspm_external_sample_rate(hdspm);
+ hdspm_external_sample_rate(hdspm);
info.system_clock_mode = hdspm_system_clock_mode(hdspm);
info.clock_source = hdspm_clock_source(hdspm);
info.autosync_ref = hdspm_autosync_ref(hdspm);
@@ -4145,10 +6102,58 @@ static int snd_hdspm_hwdep_ioctl(struct snd_hwdep * hw, struct file *file,
return -EFAULT;
break;
+ case SNDRV_HDSPM_IOCTL_GET_STATUS:
+ status.card_type = hdspm->io_type;
+
+ status.autosync_source = hdspm_autosync_ref(hdspm);
+
+ status.card_clock = 110069313433624ULL;
+ status.master_period = hdspm_read(hdspm, HDSPM_RD_PLL_FREQ);
+
+ switch (hdspm->io_type) {
+ case MADI:
+ case MADIface:
+ status.card_specific.madi.sync_wc =
+ hdspm_wc_sync_check(hdspm);
+ status.card_specific.madi.sync_madi =
+ hdspm_madi_sync_check(hdspm);
+ status.card_specific.madi.sync_tco =
+ hdspm_tco_sync_check(hdspm);
+ status.card_specific.madi.sync_in =
+ hdspm_sync_in_sync_check(hdspm);
+
+ statusregister =
+ hdspm_read(hdspm, HDSPM_statusRegister);
+ status.card_specific.madi.madi_input =
+ (statusregister & HDSPM_AB_int) ? 1 : 0;
+ status.card_specific.madi.channel_format =
+ (statusregister & HDSPM_TX_64ch) ? 1 : 0;
+ /* TODO: Mac driver sets it when f_s>48kHz */
+ status.card_specific.madi.frame_format = 0;
+
+ default:
+ break;
+ }
+
+ if (copy_to_user((void __user *) arg, &status, sizeof(status)))
+ return -EFAULT;
+
+
+ break;
+
case SNDRV_HDSPM_IOCTL_GET_VERSION:
+ hdspm_version.card_type = hdspm->io_type;
+ strncpy(hdspm_version.cardname, hdspm->card_name,
+ sizeof(hdspm_version.cardname));
+ hdspm_version.serial = (hdspm_read(hdspm,
+ HDSPM_midiStatusIn0)>>8) & 0xFFFFFF;
hdspm_version.firmware_rev = hdspm->firmware_rev;
+ hdspm_version.addons = 0;
+ if (hdspm->tco)
+ hdspm_version.addons |= HDSPM_ADDON_TCO;
+
if (copy_to_user((void __user *) arg, &hdspm_version,
- sizeof(hdspm_version)))
+ sizeof(hdspm_version)))
return -EFAULT;
break;
@@ -4156,7 +6161,7 @@ static int snd_hdspm_hwdep_ioctl(struct snd_hwdep * hw, struct file *file,
if (copy_from_user(&mixer, (void __user *)arg, sizeof(mixer)))
return -EFAULT;
if (copy_to_user((void __user *)mixer.mixer, hdspm->mixer,
- sizeof(struct hdspm_mixer)))
+ sizeof(struct hdspm_mixer)))
return -EFAULT;
break;
@@ -4175,8 +6180,6 @@ static struct snd_pcm_ops snd_hdspm_playback_ops = {
.prepare = snd_hdspm_prepare,
.trigger = snd_hdspm_trigger,
.pointer = snd_hdspm_hw_pointer,
- .copy = snd_hdspm_playback_copy,
- .silence = snd_hdspm_hw_silence,
.page = snd_pcm_sgbuf_ops_page,
};
@@ -4189,7 +6192,6 @@ static struct snd_pcm_ops snd_hdspm_capture_ops = {
.prepare = snd_hdspm_prepare,
.trigger = snd_hdspm_trigger,
.pointer = snd_hdspm_hw_pointer,
- .copy = snd_hdspm_capture_copy,
.page = snd_pcm_sgbuf_ops_page,
};
@@ -4207,16 +6209,18 @@ static int __devinit snd_hdspm_create_hwdep(struct snd_card *card,
hw->private_data = hdspm;
strcpy(hw->name, "HDSPM hwdep interface");
+ hw->ops.open = snd_hdspm_hwdep_dummy_op;
hw->ops.ioctl = snd_hdspm_hwdep_ioctl;
+ hw->ops.release = snd_hdspm_hwdep_dummy_op;
return 0;
}
/*------------------------------------------------------------
- memory interface
+ memory interface
------------------------------------------------------------*/
-static int __devinit snd_hdspm_preallocate_memory(struct hdspm * hdspm)
+static int __devinit snd_hdspm_preallocate_memory(struct hdspm *hdspm)
{
int err;
struct snd_pcm *pcm;
@@ -4228,7 +6232,7 @@ static int __devinit snd_hdspm_preallocate_memory(struct hdspm * hdspm)
err =
snd_pcm_lib_preallocate_pages_for_all(pcm,
- SNDRV_DMA_TYPE_DEV_SG,
+ SNDRV_DMA_TYPE_DEV_SG,
snd_dma_pci_data(hdspm->pci),
wanted,
wanted);
@@ -4242,19 +6246,23 @@ static int __devinit snd_hdspm_preallocate_memory(struct hdspm * hdspm)
return 0;
}
-static void hdspm_set_sgbuf(struct hdspm * hdspm,
+
+static void hdspm_set_sgbuf(struct hdspm *hdspm,
struct snd_pcm_substream *substream,
unsigned int reg, int channels)
{
int i;
+
+ /* continuous memory segment */
for (i = 0; i < (channels * 16); i++)
hdspm_write(hdspm, reg + 4 * i,
- snd_pcm_sgbuf_get_addr(substream, 4096 * i));
+ snd_pcm_sgbuf_get_addr(substream, 4096 * i));
}
+
/* ------------- ALSA Devices ---------------------------- */
static int __devinit snd_hdspm_create_pcm(struct snd_card *card,
- struct hdspm * hdspm)
+ struct hdspm *hdspm)
{
struct snd_pcm *pcm;
int err;
@@ -4283,27 +6291,30 @@ static int __devinit snd_hdspm_create_pcm(struct snd_card *card,
static inline void snd_hdspm_initialize_midi_flush(struct hdspm * hdspm)
{
- snd_hdspm_flush_midi_input(hdspm, 0);
- snd_hdspm_flush_midi_input(hdspm, 1);
+ int i;
+
+ for (i = 0; i < hdspm->midiPorts; i++)
+ snd_hdspm_flush_midi_input(hdspm, i);
}
static int __devinit snd_hdspm_create_alsa_devices(struct snd_card *card,
struct hdspm * hdspm)
{
- int err;
+ int err, i;
snd_printdd("Create card...\n");
err = snd_hdspm_create_pcm(card, hdspm);
if (err < 0)
return err;
- err = snd_hdspm_create_midi(card, hdspm, 0);
- if (err < 0)
- return err;
-
- err = snd_hdspm_create_midi(card, hdspm, 1);
- if (err < 0)
- return err;
+ i = 0;
+ while (i < hdspm->midiPorts) {
+ err = snd_hdspm_create_midi(card, hdspm, i);
+ if (err < 0) {
+ return err;
+ }
+ i++;
+ }
err = snd_hdspm_create_controls(card, hdspm);
if (err < 0)
@@ -4346,37 +6357,54 @@ static int __devinit snd_hdspm_create_alsa_devices(struct snd_card *card,
}
static int __devinit snd_hdspm_create(struct snd_card *card,
- struct hdspm *hdspm,
- int precise_ptr, int enable_monitor)
-{
+ struct hdspm *hdspm) {
+
struct pci_dev *pci = hdspm->pci;
int err;
unsigned long io_extent;
hdspm->irq = -1;
-
- spin_lock_init(&hdspm->midi[0].lock);
- spin_lock_init(&hdspm->midi[1].lock);
-
hdspm->card = card;
spin_lock_init(&hdspm->lock);
- tasklet_init(&hdspm->midi_tasklet,
- hdspm_midi_tasklet, (unsigned long) hdspm);
-
pci_read_config_word(hdspm->pci,
- PCI_CLASS_REVISION, &hdspm->firmware_rev);
-
- hdspm->is_aes32 = (hdspm->firmware_rev >= HDSPM_AESREVISION);
+ PCI_CLASS_REVISION, &hdspm->firmware_rev);
strcpy(card->mixername, "Xilinx FPGA");
- if (hdspm->is_aes32) {
- strcpy(card->driver, "HDSPAES32");
- hdspm->card_name = "RME HDSPM AES32";
- } else {
- strcpy(card->driver, "HDSPM");
- hdspm->card_name = "RME HDSPM MADI";
+ strcpy(card->driver, "HDSPM");
+
+ switch (hdspm->firmware_rev) {
+ case HDSPM_MADI_REV:
+ hdspm->io_type = MADI;
+ hdspm->card_name = "RME MADI";
+ hdspm->midiPorts = 3;
+ break;
+ case HDSPM_RAYDAT_REV:
+ hdspm->io_type = RayDAT;
+ hdspm->card_name = "RME RayDAT";
+ hdspm->midiPorts = 2;
+ break;
+ case HDSPM_AIO_REV:
+ hdspm->io_type = AIO;
+ hdspm->card_name = "RME AIO";
+ hdspm->midiPorts = 1;
+ break;
+ case HDSPM_MADIFACE_REV:
+ hdspm->io_type = MADIface;
+ hdspm->card_name = "RME MADIface";
+ hdspm->midiPorts = 1;
+ break;
+ case HDSPM_AES_REV:
+ case HDSPM_AES32_REV:
+ hdspm->io_type = AES32;
+ hdspm->card_name = "RME AES32";
+ hdspm->midiPorts = 2;
+ break;
+ default:
+ snd_printk(KERN_ERR "HDSPM: unknown firmware revision %x\n",
+ hdspm->firmware_rev);
+ return -ENODEV;
}
err = pci_enable_device(pci);
@@ -4393,22 +6421,21 @@ static int __devinit snd_hdspm_create(struct snd_card *card,
io_extent = pci_resource_len(pci, 0);
snd_printdd("grabbed memory region 0x%lx-0x%lx\n",
- hdspm->port, hdspm->port + io_extent - 1);
-
+ hdspm->port, hdspm->port + io_extent - 1);
hdspm->iobase = ioremap_nocache(hdspm->port, io_extent);
if (!hdspm->iobase) {
snd_printk(KERN_ERR "HDSPM: "
- "unable to remap region 0x%lx-0x%lx\n",
- hdspm->port, hdspm->port + io_extent - 1);
+ "unable to remap region 0x%lx-0x%lx\n",
+ hdspm->port, hdspm->port + io_extent - 1);
return -EBUSY;
}
snd_printdd("remapped region (0x%lx) 0x%lx-0x%lx\n",
- (unsigned long)hdspm->iobase, hdspm->port,
- hdspm->port + io_extent - 1);
+ (unsigned long)hdspm->iobase, hdspm->port,
+ hdspm->port + io_extent - 1);
if (request_irq(pci->irq, snd_hdspm_interrupt,
- IRQF_SHARED, "hdspm", hdspm)) {
+ IRQF_SHARED, "hdspm", hdspm)) {
snd_printk(KERN_ERR "HDSPM: unable to use IRQ %d\n", pci->irq);
return -EBUSY;
}
@@ -4416,23 +6443,219 @@ static int __devinit snd_hdspm_create(struct snd_card *card,
snd_printdd("use IRQ %d\n", pci->irq);
hdspm->irq = pci->irq;
- hdspm->precise_ptr = precise_ptr;
-
- hdspm->monitor_outs = enable_monitor;
snd_printdd("kmalloc Mixer memory of %zd Bytes\n",
- sizeof(struct hdspm_mixer));
+ sizeof(struct hdspm_mixer));
hdspm->mixer = kzalloc(sizeof(struct hdspm_mixer), GFP_KERNEL);
if (!hdspm->mixer) {
snd_printk(KERN_ERR "HDSPM: "
- "unable to kmalloc Mixer memory of %d Bytes\n",
- (int)sizeof(struct hdspm_mixer));
+ "unable to kmalloc Mixer memory of %d Bytes\n",
+ (int)sizeof(struct hdspm_mixer));
return err;
}
- hdspm->ss_channels = MADI_SS_CHANNELS;
- hdspm->ds_channels = MADI_DS_CHANNELS;
- hdspm->qs_channels = MADI_QS_CHANNELS;
+ hdspm->port_names_in = NULL;
+ hdspm->port_names_out = NULL;
+
+ switch (hdspm->io_type) {
+ case AES32:
+ hdspm->ss_in_channels = hdspm->ss_out_channels = AES32_CHANNELS;
+ hdspm->ds_in_channels = hdspm->ds_out_channels = AES32_CHANNELS;
+ hdspm->qs_in_channels = hdspm->qs_out_channels = AES32_CHANNELS;
+
+ hdspm->channel_map_in_ss = hdspm->channel_map_out_ss =
+ channel_map_aes32;
+ hdspm->channel_map_in_ds = hdspm->channel_map_out_ds =
+ channel_map_aes32;
+ hdspm->channel_map_in_qs = hdspm->channel_map_out_qs =
+ channel_map_aes32;
+ hdspm->port_names_in_ss = hdspm->port_names_out_ss =
+ texts_ports_aes32;
+ hdspm->port_names_in_ds = hdspm->port_names_out_ds =
+ texts_ports_aes32;
+ hdspm->port_names_in_qs = hdspm->port_names_out_qs =
+ texts_ports_aes32;
+
+ hdspm->max_channels_out = hdspm->max_channels_in =
+ AES32_CHANNELS;
+ hdspm->port_names_in = hdspm->port_names_out =
+ texts_ports_aes32;
+ hdspm->channel_map_in = hdspm->channel_map_out =
+ channel_map_aes32;
+
+ break;
+
+ case MADI:
+ case MADIface:
+ hdspm->ss_in_channels = hdspm->ss_out_channels =
+ MADI_SS_CHANNELS;
+ hdspm->ds_in_channels = hdspm->ds_out_channels =
+ MADI_DS_CHANNELS;
+ hdspm->qs_in_channels = hdspm->qs_out_channels =
+ MADI_QS_CHANNELS;
+
+ hdspm->channel_map_in_ss = hdspm->channel_map_out_ss =
+ channel_map_unity_ss;
+ hdspm->channel_map_in_ds = hdspm->channel_map_out_ds =
+ channel_map_unity_ss;
+ hdspm->channel_map_in_qs = hdspm->channel_map_out_qs =
+ channel_map_unity_ss;
+
+ hdspm->port_names_in_ss = hdspm->port_names_out_ss =
+ texts_ports_madi;
+ hdspm->port_names_in_ds = hdspm->port_names_out_ds =
+ texts_ports_madi;
+ hdspm->port_names_in_qs = hdspm->port_names_out_qs =
+ texts_ports_madi;
+ break;
+
+ case AIO:
+ if (0 == (hdspm_read(hdspm, HDSPM_statusRegister2) & HDSPM_s2_AEBI_D)) {
+ snd_printk(KERN_INFO "HDSPM: AEB input board found, but not supported\n");
+ }
+
+ hdspm->ss_in_channels = AIO_IN_SS_CHANNELS;
+ hdspm->ds_in_channels = AIO_IN_DS_CHANNELS;
+ hdspm->qs_in_channels = AIO_IN_QS_CHANNELS;
+ hdspm->ss_out_channels = AIO_OUT_SS_CHANNELS;
+ hdspm->ds_out_channels = AIO_OUT_DS_CHANNELS;
+ hdspm->qs_out_channels = AIO_OUT_QS_CHANNELS;
+
+ hdspm->channel_map_out_ss = channel_map_aio_out_ss;
+ hdspm->channel_map_out_ds = channel_map_aio_out_ds;
+ hdspm->channel_map_out_qs = channel_map_aio_out_qs;
+
+ hdspm->channel_map_in_ss = channel_map_aio_in_ss;
+ hdspm->channel_map_in_ds = channel_map_aio_in_ds;
+ hdspm->channel_map_in_qs = channel_map_aio_in_qs;
+
+ hdspm->port_names_in_ss = texts_ports_aio_in_ss;
+ hdspm->port_names_out_ss = texts_ports_aio_out_ss;
+ hdspm->port_names_in_ds = texts_ports_aio_in_ds;
+ hdspm->port_names_out_ds = texts_ports_aio_out_ds;
+ hdspm->port_names_in_qs = texts_ports_aio_in_qs;
+ hdspm->port_names_out_qs = texts_ports_aio_out_qs;
+
+ break;
+
+ case RayDAT:
+ hdspm->ss_in_channels = hdspm->ss_out_channels =
+ RAYDAT_SS_CHANNELS;
+ hdspm->ds_in_channels = hdspm->ds_out_channels =
+ RAYDAT_DS_CHANNELS;
+ hdspm->qs_in_channels = hdspm->qs_out_channels =
+ RAYDAT_QS_CHANNELS;
+
+ hdspm->max_channels_in = RAYDAT_SS_CHANNELS;
+ hdspm->max_channels_out = RAYDAT_SS_CHANNELS;
+
+ hdspm->channel_map_in_ss = hdspm->channel_map_out_ss =
+ channel_map_raydat_ss;
+ hdspm->channel_map_in_ds = hdspm->channel_map_out_ds =
+ channel_map_raydat_ds;
+ hdspm->channel_map_in_qs = hdspm->channel_map_out_qs =
+ channel_map_raydat_qs;
+ hdspm->channel_map_in = hdspm->channel_map_out =
+ channel_map_raydat_ss;
+
+ hdspm->port_names_in_ss = hdspm->port_names_out_ss =
+ texts_ports_raydat_ss;
+ hdspm->port_names_in_ds = hdspm->port_names_out_ds =
+ texts_ports_raydat_ds;
+ hdspm->port_names_in_qs = hdspm->port_names_out_qs =
+ texts_ports_raydat_qs;
+
+
+ break;
+
+ }
+
+ /* TCO detection */
+ switch (hdspm->io_type) {
+ case AIO:
+ case RayDAT:
+ if (hdspm_read(hdspm, HDSPM_statusRegister2) &
+ HDSPM_s2_tco_detect) {
+ hdspm->midiPorts++;
+ hdspm->tco = kzalloc(sizeof(struct hdspm_tco),
+ GFP_KERNEL);
+ if (NULL != hdspm->tco) {
+ hdspm_tco_write(hdspm);
+ }
+ snd_printk(KERN_INFO "HDSPM: AIO/RayDAT TCO module found\n");
+ } else {
+ hdspm->tco = NULL;
+ }
+ break;
+
+ case MADI:
+ if (hdspm_read(hdspm, HDSPM_statusRegister) & HDSPM_tco_detect) {
+ hdspm->midiPorts++;
+ hdspm->tco = kzalloc(sizeof(struct hdspm_tco),
+ GFP_KERNEL);
+ if (NULL != hdspm->tco) {
+ hdspm_tco_write(hdspm);
+ }
+ snd_printk(KERN_INFO "HDSPM: MADI TCO module found\n");
+ } else {
+ hdspm->tco = NULL;
+ }
+ break;
+
+ default:
+ hdspm->tco = NULL;
+ }
+
+ /* texts */
+ switch (hdspm->io_type) {
+ case AES32:
+ if (hdspm->tco) {
+ hdspm->texts_autosync = texts_autosync_aes_tco;
+ hdspm->texts_autosync_items = 10;
+ } else {
+ hdspm->texts_autosync = texts_autosync_aes;
+ hdspm->texts_autosync_items = 9;
+ }
+ break;
+
+ case MADI:
+ if (hdspm->tco) {
+ hdspm->texts_autosync = texts_autosync_madi_tco;
+ hdspm->texts_autosync_items = 4;
+ } else {
+ hdspm->texts_autosync = texts_autosync_madi;
+ hdspm->texts_autosync_items = 3;
+ }
+ break;
+
+ case MADIface:
+
+ break;
+
+ case RayDAT:
+ if (hdspm->tco) {
+ hdspm->texts_autosync = texts_autosync_raydat_tco;
+ hdspm->texts_autosync_items = 9;
+ } else {
+ hdspm->texts_autosync = texts_autosync_raydat;
+ hdspm->texts_autosync_items = 8;
+ }
+ break;
+
+ case AIO:
+ if (hdspm->tco) {
+ hdspm->texts_autosync = texts_autosync_aio_tco;
+ hdspm->texts_autosync_items = 6;
+ } else {
+ hdspm->texts_autosync = texts_autosync_aio;
+ hdspm->texts_autosync_items = 5;
+ }
+ break;
+
+ }
+
+ tasklet_init(&hdspm->midi_tasklet,
+ hdspm_midi_tasklet, (unsigned long) hdspm);
snd_printdd("create alsa devices.\n");
err = snd_hdspm_create_alsa_devices(card, hdspm);
@@ -4444,6 +6667,7 @@ static int __devinit snd_hdspm_create(struct snd_card *card,
return 0;
}
+
static int snd_hdspm_free(struct hdspm * hdspm)
{
@@ -4452,7 +6676,8 @@ static int snd_hdspm_free(struct hdspm * hdspm)
/* stop th audio, and cancel all interrupts */
hdspm->control_register &=
~(HDSPM_Start | HDSPM_AudioInterruptEnable |
- HDSPM_Midi0InterruptEnable | HDSPM_Midi1InterruptEnable);
+ HDSPM_Midi0InterruptEnable | HDSPM_Midi1InterruptEnable |
+ HDSPM_Midi2InterruptEnable | HDSPM_Midi3InterruptEnable);
hdspm_write(hdspm, HDSPM_controlRegister,
hdspm->control_register);
}
@@ -4472,6 +6697,7 @@ static int snd_hdspm_free(struct hdspm * hdspm)
return 0;
}
+
static void snd_hdspm_card_free(struct snd_card *card)
{
struct hdspm *hdspm = card->private_data;
@@ -4480,6 +6706,7 @@ static void snd_hdspm_card_free(struct snd_card *card)
snd_hdspm_free(hdspm);
}
+
static int __devinit snd_hdspm_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
@@ -4496,7 +6723,7 @@ static int __devinit snd_hdspm_probe(struct pci_dev *pci,
}
err = snd_card_create(index[dev], id[dev],
- THIS_MODULE, sizeof(struct hdspm), &card);
+ THIS_MODULE, sizeof(struct hdspm), &card);
if (err < 0)
return err;
@@ -4507,16 +6734,25 @@ static int __devinit snd_hdspm_probe(struct pci_dev *pci,
snd_card_set_dev(card, &pci->dev);
- err = snd_hdspm_create(card, hdspm, precise_ptr[dev],
- enable_monitor[dev]);
+ err = snd_hdspm_create(card, hdspm);
if (err < 0) {
snd_card_free(card);
return err;
}
- strcpy(card->shortname, "HDSPM MADI");
- sprintf(card->longname, "%s at 0x%lx, irq %d", hdspm->card_name,
- hdspm->port, hdspm->irq);
+ if (hdspm->io_type != MADIface) {
+ sprintf(card->shortname, "%s_%x",
+ hdspm->card_name,
+ (hdspm_read(hdspm, HDSPM_midiStatusIn0)>>8) & 0xFFFFFF);
+ sprintf(card->longname, "%s S/N 0x%x at 0x%lx, irq %d",
+ hdspm->card_name,
+ (hdspm_read(hdspm, HDSPM_midiStatusIn0)>>8) & 0xFFFFFF,
+ hdspm->port, hdspm->irq);
+ } else {
+ sprintf(card->shortname, "%s", hdspm->card_name);
+ sprintf(card->longname, "%s at 0x%lx, irq %d",
+ hdspm->card_name, hdspm->port, hdspm->irq);
+ }
err = snd_card_register(card);
if (err < 0) {
diff --git a/sound/ppc/pmac.c b/sound/ppc/pmac.c
index b47cfd45b3b9..3ecbd67f88c9 100644
--- a/sound/ppc/pmac.c
+++ b/sound/ppc/pmac.c
@@ -1034,7 +1034,11 @@ static int __devinit snd_pmac_detect(struct snd_pmac *chip)
if (of_device_is_compatible(sound, "tumbler")) {
chip->model = PMAC_TUMBLER;
chip->can_capture = of_machine_is_compatible("PowerMac4,2")
- || of_machine_is_compatible("PowerBook4,1");
+ || of_machine_is_compatible("PowerBook3,2")
+ || of_machine_is_compatible("PowerBook3,3")
+ || of_machine_is_compatible("PowerBook4,1")
+ || of_machine_is_compatible("PowerBook4,2")
+ || of_machine_is_compatible("PowerBook4,3");
chip->can_duplex = 0;
// chip->can_byte_swap = 0; /* FIXME: check this */
chip->num_freqs = ARRAY_SIZE(tumbler_freqs);
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index a3efc52a34da..8224db5f0434 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -50,10 +50,12 @@ source "sound/soc/jz4740/Kconfig"
source "sound/soc/nuc900/Kconfig"
source "sound/soc/omap/Kconfig"
source "sound/soc/kirkwood/Kconfig"
+source "sound/soc/mid-x86/Kconfig"
source "sound/soc/pxa/Kconfig"
source "sound/soc/samsung/Kconfig"
source "sound/soc/s6000/Kconfig"
source "sound/soc/sh/Kconfig"
+source "sound/soc/tegra/Kconfig"
source "sound/soc/txx9/Kconfig"
# Supported codecs
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index ce913bf5213c..1ed61c5df2c5 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_SND_SOC) += ep93xx/
obj-$(CONFIG_SND_SOC) += fsl/
obj-$(CONFIG_SND_SOC) += imx/
obj-$(CONFIG_SND_SOC) += jz4740/
+obj-$(CONFIG_SND_SOC) += mid-x86/
obj-$(CONFIG_SND_SOC) += nuc900/
obj-$(CONFIG_SND_SOC) += omap/
obj-$(CONFIG_SND_SOC) += kirkwood/
@@ -17,4 +18,5 @@ obj-$(CONFIG_SND_SOC) += pxa/
obj-$(CONFIG_SND_SOC) += samsung/
obj-$(CONFIG_SND_SOC) += s6000/
obj-$(CONFIG_SND_SOC) += sh/
+obj-$(CONFIG_SND_SOC) += tegra/
obj-$(CONFIG_SND_SOC) += txx9/
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index c48b23c1d4fc..c04da1871297 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -26,12 +26,15 @@ config SND_SOC_ALL_CODECS
select SND_SOC_CQ0093VC if MFD_DAVINCI_VOICECODEC
select SND_SOC_CS42L51 if I2C
select SND_SOC_CS4270 if I2C
+ select SND_SOC_CS4271 if SND_SOC_I2C_AND_SPI
select SND_SOC_CX20442
select SND_SOC_DA7210 if I2C
select SND_SOC_JZ4740_CODEC if SOC_JZ4740
select SND_SOC_MAX98088 if I2C
select SND_SOC_MAX9877 if I2C
select SND_SOC_PCM3008
+ select SND_SOC_SGTL5000 if I2C
+ select SND_SOC_SN95031 if INTEL_SCU_IPC
select SND_SOC_SPDIF
select SND_SOC_SSM2602 if I2C
select SND_SOC_STAC9766 if SND_SOC_AC97_BUS
@@ -76,6 +79,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_WM8985 if SND_SOC_I2C_AND_SPI
select SND_SOC_WM8988 if SND_SOC_I2C_AND_SPI
select SND_SOC_WM8990 if I2C
+ select SND_SOC_WM8991 if I2C
select SND_SOC_WM8993 if I2C
select SND_SOC_WM8994 if MFD_WM8994
select SND_SOC_WM8995 if SND_SOC_I2C_AND_SPI
@@ -155,6 +159,9 @@ config SND_SOC_CS4270_VD33_ERRATA
bool
depends on SND_SOC_CS4270
+config SND_SOC_CS4271
+ tristate
+
config SND_SOC_CX20442
tristate
@@ -176,6 +183,13 @@ config SND_SOC_MAX98088
config SND_SOC_PCM3008
tristate
+#Freescale sgtl5000 codec
+config SND_SOC_SGTL5000
+ tristate
+
+config SND_SOC_SN95031
+ tristate
+
config SND_SOC_SPDIF
tristate
@@ -304,6 +318,9 @@ config SND_SOC_WM8988
config SND_SOC_WM8990
tristate
+config SND_SOC_WM8991
+ tristate
+
config SND_SOC_WM8993
tristate
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 579af9c4f128..3bbb08c512d0 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -12,13 +12,16 @@ snd-soc-ak4671-objs := ak4671.o
snd-soc-cq93vc-objs := cq93vc.o
snd-soc-cs42l51-objs := cs42l51.o
snd-soc-cs4270-objs := cs4270.o
+snd-soc-cs4271-objs := cs4271.o
snd-soc-cx20442-objs := cx20442.o
snd-soc-da7210-objs := da7210.o
snd-soc-dmic-objs := dmic.o
snd-soc-l3-objs := l3.o
snd-soc-max98088-objs := max98088.o
snd-soc-pcm3008-objs := pcm3008.o
+snd-soc-sgtl5000-objs := sgtl5000.o
snd-soc-alc5623-objs := alc5623.o
+snd-soc-sn95031-objs := sn95031.o
snd-soc-spdif-objs := spdif_transciever.o
snd-soc-ssm2602-objs := ssm2602.o
snd-soc-stac9766-objs := stac9766.o
@@ -61,6 +64,7 @@ snd-soc-wm8978-objs := wm8978.o
snd-soc-wm8985-objs := wm8985.o
snd-soc-wm8988-objs := wm8988.o
snd-soc-wm8990-objs := wm8990.o
+snd-soc-wm8991-objs := wm8991.o
snd-soc-wm8993-objs := wm8993.o
snd-soc-wm8994-objs := wm8994.o wm8994-tables.o
snd-soc-wm8995-objs := wm8995.o
@@ -88,9 +92,11 @@ obj-$(CONFIG_SND_SOC_AK4104) += snd-soc-ak4104.o
obj-$(CONFIG_SND_SOC_AK4535) += snd-soc-ak4535.o
obj-$(CONFIG_SND_SOC_AK4642) += snd-soc-ak4642.o
obj-$(CONFIG_SND_SOC_AK4671) += snd-soc-ak4671.o
+obj-$(CONFIG_SND_SOC_ALC5623) += snd-soc-alc5623.o
obj-$(CONFIG_SND_SOC_CQ0093VC) += snd-soc-cq93vc.o
obj-$(CONFIG_SND_SOC_CS42L51) += snd-soc-cs42l51.o
obj-$(CONFIG_SND_SOC_CS4270) += snd-soc-cs4270.o
+obj-$(CONFIG_SND_SOC_CS4271) += snd-soc-cs4271.o
obj-$(CONFIG_SND_SOC_CX20442) += snd-soc-cx20442.o
obj-$(CONFIG_SND_SOC_DA7210) += snd-soc-da7210.o
obj-$(CONFIG_SND_SOC_DMIC) += snd-soc-dmic.o
@@ -98,7 +104,8 @@ obj-$(CONFIG_SND_SOC_L3) += snd-soc-l3.o
obj-$(CONFIG_SND_SOC_JZ4740_CODEC) += snd-soc-jz4740-codec.o
obj-$(CONFIG_SND_SOC_MAX98088) += snd-soc-max98088.o
obj-$(CONFIG_SND_SOC_PCM3008) += snd-soc-pcm3008.o
-obj-$(CONFIG_SND_SOC_ALC5623) += snd-soc-alc5623.o
+obj-$(CONFIG_SND_SOC_SGTL5000) += snd-soc-sgtl5000.o
+obj-$(CONFIG_SND_SOC_SN95031) +=snd-soc-sn95031.o
obj-$(CONFIG_SND_SOC_SPDIF) += snd-soc-spdif.o
obj-$(CONFIG_SND_SOC_SSM2602) += snd-soc-ssm2602.o
obj-$(CONFIG_SND_SOC_STAC9766) += snd-soc-stac9766.o
@@ -141,6 +148,7 @@ obj-$(CONFIG_SND_SOC_WM8978) += snd-soc-wm8978.o
obj-$(CONFIG_SND_SOC_WM8985) += snd-soc-wm8985.o
obj-$(CONFIG_SND_SOC_WM8988) += snd-soc-wm8988.o
obj-$(CONFIG_SND_SOC_WM8990) += snd-soc-wm8990.o
+obj-$(CONFIG_SND_SOC_WM8991) += snd-soc-wm8991.o
obj-$(CONFIG_SND_SOC_WM8993) += snd-soc-wm8993.o
obj-$(CONFIG_SND_SOC_WM8994) += snd-soc-wm8994.o
obj-$(CONFIG_SND_SOC_WM8995) += snd-soc-wm8995.o
diff --git a/sound/soc/codecs/ak4104.c b/sound/soc/codecs/ak4104.c
index c27f8f59dc66..cbf0b6d400b8 100644
--- a/sound/soc/codecs/ak4104.c
+++ b/sound/soc/codecs/ak4104.c
@@ -294,7 +294,6 @@ static struct spi_driver ak4104_spi_driver = {
static int __init ak4104_init(void)
{
- pr_info("Asahi Kasei AK4104 ALSA SoC Codec Driver\n");
return spi_register_driver(&ak4104_spi_driver);
}
module_init(ak4104_init);
diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
index f00eba313dfd..4be0570e3f1f 100644
--- a/sound/soc/codecs/ak4642.c
+++ b/sound/soc/codecs/ak4642.c
@@ -116,6 +116,12 @@
#define BCKO_MASK (1 << 3)
#define BCKO_64 BCKO_MASK
+#define DIF_MASK (3 << 0)
+#define DSP (0 << 0)
+#define RIGHT_J (1 << 0)
+#define LEFT_J (2 << 0)
+#define I2S (3 << 0)
+
/* MD_CTL2 */
#define FS0 (1 << 0)
#define FS1 (1 << 1)
@@ -354,6 +360,24 @@ static int ak4642_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
snd_soc_update_bits(codec, PW_MGMT2, MS, data);
snd_soc_update_bits(codec, MD_CTL1, BCKO_MASK, bcko);
+ /* format type */
+ data = 0;
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_LEFT_J:
+ data = LEFT_J;
+ break;
+ case SND_SOC_DAIFMT_I2S:
+ data = I2S;
+ break;
+ /* FIXME
+ * Please add RIGHT_J / DSP support here
+ */
+ default:
+ return -EINVAL;
+ break;
+ }
+ snd_soc_update_bits(codec, MD_CTL1, DIF_MASK, data);
+
return 0;
}
diff --git a/sound/soc/codecs/cq93vc.c b/sound/soc/codecs/cq93vc.c
index 347a567b01e1..b8066ef10bb0 100644
--- a/sound/soc/codecs/cq93vc.c
+++ b/sound/soc/codecs/cq93vc.c
@@ -153,7 +153,8 @@ static int cq93vc_resume(struct snd_soc_codec *codec)
static int cq93vc_probe(struct snd_soc_codec *codec)
{
- struct davinci_vc *davinci_vc = snd_soc_codec_get_drvdata(codec);
+ struct davinci_vc *davinci_vc =
+ mfd_get_data(to_platform_device(codec->dev));
davinci_vc->cq93vc.codec = codec;
codec->control_data = davinci_vc;
diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c
index 8b51245f2318..65f578ff611e 100644
--- a/sound/soc/codecs/cs4270.c
+++ b/sound/soc/codecs/cs4270.c
@@ -193,12 +193,12 @@ static struct cs4270_mode_ratios cs4270_mode_ratios[] = {
/* The number of MCLK/LRCK ratios supported by the CS4270 */
#define NUM_MCLK_RATIOS ARRAY_SIZE(cs4270_mode_ratios)
-static int cs4270_reg_is_readable(unsigned int reg)
+static int cs4270_reg_is_readable(struct snd_soc_codec *codec, unsigned int reg)
{
return (reg >= CS4270_FIRSTREG) && (reg <= CS4270_LASTREG);
}
-static int cs4270_reg_is_volatile(unsigned int reg)
+static int cs4270_reg_is_volatile(struct snd_soc_codec *codec, unsigned int reg)
{
/* Unreadable registers are considered volatile */
if ((reg < CS4270_FIRSTREG) || (reg > CS4270_LASTREG))
@@ -743,8 +743,6 @@ static struct i2c_driver cs4270_i2c_driver = {
static int __init cs4270_init(void)
{
- pr_info("Cirrus Logic CS4270 ALSA SoC Codec Driver\n");
-
return i2c_add_driver(&cs4270_i2c_driver);
}
module_init(cs4270_init);
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
new file mode 100644
index 000000000000..1791796216c8
--- /dev/null
+++ b/sound/soc/codecs/cs4271.c
@@ -0,0 +1,643 @@
+/*
+ * CS4271 ASoC codec driver
+ *
+ * Copyright (c) 2010 Alexander Sverdlin <subaparts@yandex.ru>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This driver support CS4271 codec being master or slave, working
+ * in control port mode, connected either via SPI or I2C.
+ * The data format accepted is I2S or left-justified.
+ * DAPM support not implemented.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/spi/spi.h>
+#include <sound/cs4271.h>
+
+#define CS4271_PCM_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+/*
+ * CS4271 registers
+ * High byte represents SPI chip address (0x10) + write command (0)
+ * Low byte - codec register address
+ */
+#define CS4271_MODE1 0x2001 /* Mode Control 1 */
+#define CS4271_DACCTL 0x2002 /* DAC Control */
+#define CS4271_DACVOL 0x2003 /* DAC Volume & Mixing Control */
+#define CS4271_VOLA 0x2004 /* DAC Channel A Volume Control */
+#define CS4271_VOLB 0x2005 /* DAC Channel B Volume Control */
+#define CS4271_ADCCTL 0x2006 /* ADC Control */
+#define CS4271_MODE2 0x2007 /* Mode Control 2 */
+#define CS4271_CHIPID 0x2008 /* Chip ID */
+
+#define CS4271_FIRSTREG CS4271_MODE1
+#define CS4271_LASTREG CS4271_MODE2
+#define CS4271_NR_REGS ((CS4271_LASTREG & 0xFF) + 1)
+
+/* Bit masks for the CS4271 registers */
+#define CS4271_MODE1_MODE_MASK 0xC0
+#define CS4271_MODE1_MODE_1X 0x00
+#define CS4271_MODE1_MODE_2X 0x80
+#define CS4271_MODE1_MODE_4X 0xC0
+
+#define CS4271_MODE1_DIV_MASK 0x30
+#define CS4271_MODE1_DIV_1 0x00
+#define CS4271_MODE1_DIV_15 0x10
+#define CS4271_MODE1_DIV_2 0x20
+#define CS4271_MODE1_DIV_3 0x30
+
+#define CS4271_MODE1_MASTER 0x08
+
+#define CS4271_MODE1_DAC_DIF_MASK 0x07
+#define CS4271_MODE1_DAC_DIF_LJ 0x00
+#define CS4271_MODE1_DAC_DIF_I2S 0x01
+#define CS4271_MODE1_DAC_DIF_RJ16 0x02
+#define CS4271_MODE1_DAC_DIF_RJ24 0x03
+#define CS4271_MODE1_DAC_DIF_RJ20 0x04
+#define CS4271_MODE1_DAC_DIF_RJ18 0x05
+
+#define CS4271_DACCTL_AMUTE 0x80
+#define CS4271_DACCTL_IF_SLOW 0x40
+
+#define CS4271_DACCTL_DEM_MASK 0x30
+#define CS4271_DACCTL_DEM_DIS 0x00
+#define CS4271_DACCTL_DEM_441 0x10
+#define CS4271_DACCTL_DEM_48 0x20
+#define CS4271_DACCTL_DEM_32 0x30
+
+#define CS4271_DACCTL_SVRU 0x08
+#define CS4271_DACCTL_SRD 0x04
+#define CS4271_DACCTL_INVA 0x02
+#define CS4271_DACCTL_INVB 0x01
+
+#define CS4271_DACVOL_BEQUA 0x40
+#define CS4271_DACVOL_SOFT 0x20
+#define CS4271_DACVOL_ZEROC 0x10
+
+#define CS4271_DACVOL_ATAPI_MASK 0x0F
+#define CS4271_DACVOL_ATAPI_M_M 0x00
+#define CS4271_DACVOL_ATAPI_M_BR 0x01
+#define CS4271_DACVOL_ATAPI_M_BL 0x02
+#define CS4271_DACVOL_ATAPI_M_BLR2 0x03
+#define CS4271_DACVOL_ATAPI_AR_M 0x04
+#define CS4271_DACVOL_ATAPI_AR_BR 0x05
+#define CS4271_DACVOL_ATAPI_AR_BL 0x06
+#define CS4271_DACVOL_ATAPI_AR_BLR2 0x07
+#define CS4271_DACVOL_ATAPI_AL_M 0x08
+#define CS4271_DACVOL_ATAPI_AL_BR 0x09
+#define CS4271_DACVOL_ATAPI_AL_BL 0x0A
+#define CS4271_DACVOL_ATAPI_AL_BLR2 0x0B
+#define CS4271_DACVOL_ATAPI_ALR2_M 0x0C
+#define CS4271_DACVOL_ATAPI_ALR2_BR 0x0D
+#define CS4271_DACVOL_ATAPI_ALR2_BL 0x0E
+#define CS4271_DACVOL_ATAPI_ALR2_BLR2 0x0F
+
+#define CS4271_VOLA_MUTE 0x80
+#define CS4271_VOLA_VOL_MASK 0x7F
+#define CS4271_VOLB_MUTE 0x80
+#define CS4271_VOLB_VOL_MASK 0x7F
+
+#define CS4271_ADCCTL_DITHER16 0x20
+
+#define CS4271_ADCCTL_ADC_DIF_MASK 0x10
+#define CS4271_ADCCTL_ADC_DIF_LJ 0x00
+#define CS4271_ADCCTL_ADC_DIF_I2S 0x10
+
+#define CS4271_ADCCTL_MUTEA 0x08
+#define CS4271_ADCCTL_MUTEB 0x04
+#define CS4271_ADCCTL_HPFDA 0x02
+#define CS4271_ADCCTL_HPFDB 0x01
+
+#define CS4271_MODE2_LOOP 0x10
+#define CS4271_MODE2_MUTECAEQUB 0x08
+#define CS4271_MODE2_FREEZE 0x04
+#define CS4271_MODE2_CPEN 0x02
+#define CS4271_MODE2_PDN 0x01
+
+#define CS4271_CHIPID_PART_MASK 0xF0
+#define CS4271_CHIPID_REV_MASK 0x0F
+
+/*
+ * Default CS4271 power-up configuration
+ * Array contains non-existing in hw register at address 0
+ * Array do not include Chip ID, as codec driver does not use
+ * registers read operations at all
+ */
+static const u8 cs4271_dflt_reg[CS4271_NR_REGS] = {
+ 0,
+ 0,
+ CS4271_DACCTL_AMUTE,
+ CS4271_DACVOL_SOFT | CS4271_DACVOL_ATAPI_AL_BR,
+ 0,
+ 0,
+ 0,
+ 0,
+};
+
+struct cs4271_private {
+ /* SND_SOC_I2C or SND_SOC_SPI */
+ enum snd_soc_control_type bus_type;
+ void *control_data;
+ unsigned int mclk;
+ bool master;
+ bool deemph;
+ /* Current sample rate for de-emphasis control */
+ int rate;
+ /* GPIO driving Reset pin, if any */
+ int gpio_nreset;
+ /* GPIO that disable serial bus, if any */
+ int gpio_disable;
+};
+
+struct cs4271_clk_cfg {
+ unsigned int ratio; /* MCLK / sample rate */
+ u8 speed_mode; /* codec speed mode: 1x, 2x, 4x */
+ u8 mclk_master; /* ratio bit mask for Master mode */
+ u8 mclk_slave; /* ratio bit mask for Slave mode */
+};
+
+static struct cs4271_clk_cfg cs4271_clk_tab[] = {
+ {64, CS4271_MODE1_MODE_4X, CS4271_MODE1_DIV_1, CS4271_MODE1_DIV_1},
+ {96, CS4271_MODE1_MODE_4X, CS4271_MODE1_DIV_15, CS4271_MODE1_DIV_1},
+ {128, CS4271_MODE1_MODE_2X, CS4271_MODE1_DIV_1, CS4271_MODE1_DIV_1},
+ {192, CS4271_MODE1_MODE_2X, CS4271_MODE1_DIV_15, CS4271_MODE1_DIV_1},
+ {256, CS4271_MODE1_MODE_1X, CS4271_MODE1_DIV_1, CS4271_MODE1_DIV_1},
+ {384, CS4271_MODE1_MODE_1X, CS4271_MODE1_DIV_15, CS4271_MODE1_DIV_1},
+ {512, CS4271_MODE1_MODE_1X, CS4271_MODE1_DIV_2, CS4271_MODE1_DIV_1},
+ {768, CS4271_MODE1_MODE_1X, CS4271_MODE1_DIV_3, CS4271_MODE1_DIV_3},
+ {1024, CS4271_MODE1_MODE_1X, CS4271_MODE1_DIV_3, CS4271_MODE1_DIV_3}
+};
+
+#define CS4171_NR_RATIOS ARRAY_SIZE(cs4271_clk_tab)
+
+/*
+ * @freq is the desired MCLK rate
+ * MCLK rate should (c) be the sample rate, multiplied by one of the
+ * ratios listed in cs4271_mclk_fs_ratios table
+ */
+static int cs4271_set_dai_sysclk(struct snd_soc_dai *codec_dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
+
+ cs4271->mclk = freq;
+ return 0;
+}
+
+static int cs4271_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int format)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
+ unsigned int val = 0;
+ int ret;
+
+ switch (format & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ cs4271->master = 0;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ cs4271->master = 1;
+ val |= CS4271_MODE1_MASTER;
+ break;
+ default:
+ dev_err(codec->dev, "Invalid DAI format\n");
+ return -EINVAL;
+ }
+
+ switch (format & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_LEFT_J:
+ val |= CS4271_MODE1_DAC_DIF_LJ;
+ ret = snd_soc_update_bits(codec, CS4271_ADCCTL,
+ CS4271_ADCCTL_ADC_DIF_MASK, CS4271_ADCCTL_ADC_DIF_LJ);
+ if (ret < 0)
+ return ret;
+ break;
+ case SND_SOC_DAIFMT_I2S:
+ val |= CS4271_MODE1_DAC_DIF_I2S;
+ ret = snd_soc_update_bits(codec, CS4271_ADCCTL,
+ CS4271_ADCCTL_ADC_DIF_MASK, CS4271_ADCCTL_ADC_DIF_I2S);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ dev_err(codec->dev, "Invalid DAI format\n");
+ return -EINVAL;
+ }
+
+ ret = snd_soc_update_bits(codec, CS4271_MODE1,
+ CS4271_MODE1_DAC_DIF_MASK | CS4271_MODE1_MASTER, val);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static int cs4271_deemph[] = {0, 44100, 48000, 32000};
+
+static int cs4271_set_deemph(struct snd_soc_codec *codec)
+{
+ struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
+ int i, ret;
+ int val = CS4271_DACCTL_DEM_DIS;
+
+ if (cs4271->deemph) {
+ /* Find closest de-emphasis freq */
+ val = 1;
+ for (i = 2; i < ARRAY_SIZE(cs4271_deemph); i++)
+ if (abs(cs4271_deemph[i] - cs4271->rate) <
+ abs(cs4271_deemph[val] - cs4271->rate))
+ val = i;
+ val <<= 4;
+ }
+
+ ret = snd_soc_update_bits(codec, CS4271_DACCTL,
+ CS4271_DACCTL_DEM_MASK, val);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static int cs4271_get_deemph(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
+
+ ucontrol->value.enumerated.item[0] = cs4271->deemph;
+ return 0;
+}
+
+static int cs4271_put_deemph(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
+
+ cs4271->deemph = ucontrol->value.enumerated.item[0];
+ return cs4271_set_deemph(codec);
+}
+
+static int cs4271_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_codec *codec = rtd->codec;
+ struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
+ int i, ret;
+ unsigned int ratio, val;
+
+ cs4271->rate = params_rate(params);
+ ratio = cs4271->mclk / cs4271->rate;
+ for (i = 0; i < CS4171_NR_RATIOS; i++)
+ if (cs4271_clk_tab[i].ratio == ratio)
+ break;
+
+ if ((i == CS4171_NR_RATIOS) || ((ratio == 1024) && cs4271->master)) {
+ dev_err(codec->dev, "Invalid sample rate\n");
+ return -EINVAL;
+ }
+
+ /* Configure DAC */
+ val = cs4271_clk_tab[i].speed_mode;
+
+ if (cs4271->master)
+ val |= cs4271_clk_tab[i].mclk_master;
+ else
+ val |= cs4271_clk_tab[i].mclk_slave;
+
+ ret = snd_soc_update_bits(codec, CS4271_MODE1,
+ CS4271_MODE1_MODE_MASK | CS4271_MODE1_DIV_MASK, val);
+ if (ret < 0)
+ return ret;
+
+ return cs4271_set_deemph(codec);
+}
+
+static int cs4271_digital_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ int ret;
+ int val_a = 0;
+ int val_b = 0;
+
+ if (mute) {
+ val_a = CS4271_VOLA_MUTE;
+ val_b = CS4271_VOLB_MUTE;
+ }
+
+ ret = snd_soc_update_bits(codec, CS4271_VOLA, CS4271_VOLA_MUTE, val_a);
+ if (ret < 0)
+ return ret;
+ ret = snd_soc_update_bits(codec, CS4271_VOLB, CS4271_VOLB_MUTE, val_b);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/* CS4271 controls */
+static DECLARE_TLV_DB_SCALE(cs4271_dac_tlv, -12700, 100, 0);
+
+static const struct snd_kcontrol_new cs4271_snd_controls[] = {
+ SOC_DOUBLE_R_TLV("Master Playback Volume", CS4271_VOLA, CS4271_VOLB,
+ 0, 0x7F, 1, cs4271_dac_tlv),
+ SOC_SINGLE("Digital Loopback Switch", CS4271_MODE2, 4, 1, 0),
+ SOC_SINGLE("Soft Ramp Switch", CS4271_DACVOL, 5, 1, 0),
+ SOC_SINGLE("Zero Cross Switch", CS4271_DACVOL, 4, 1, 0),
+ SOC_SINGLE_BOOL_EXT("De-emphasis Switch", 0,
+ cs4271_get_deemph, cs4271_put_deemph),
+ SOC_SINGLE("Auto-Mute Switch", CS4271_DACCTL, 7, 1, 0),
+ SOC_SINGLE("Slow Roll Off Filter Switch", CS4271_DACCTL, 6, 1, 0),
+ SOC_SINGLE("Soft Volume Ramp-Up Switch", CS4271_DACCTL, 3, 1, 0),
+ SOC_SINGLE("Soft Ramp-Down Switch", CS4271_DACCTL, 2, 1, 0),
+ SOC_SINGLE("Left Channel Inversion Switch", CS4271_DACCTL, 1, 1, 0),
+ SOC_SINGLE("Right Channel Inversion Switch", CS4271_DACCTL, 0, 1, 0),
+ SOC_DOUBLE("Master Capture Switch", CS4271_ADCCTL, 3, 2, 1, 1),
+ SOC_SINGLE("Dither 16-Bit Data Switch", CS4271_ADCCTL, 5, 1, 0),
+ SOC_DOUBLE("High Pass Filter Switch", CS4271_ADCCTL, 1, 0, 1, 1),
+ SOC_DOUBLE_R("Master Playback Switch", CS4271_VOLA, CS4271_VOLB,
+ 7, 1, 1),
+};
+
+static struct snd_soc_dai_ops cs4271_dai_ops = {
+ .hw_params = cs4271_hw_params,
+ .set_sysclk = cs4271_set_dai_sysclk,
+ .set_fmt = cs4271_set_dai_fmt,
+ .digital_mute = cs4271_digital_mute,
+};
+
+static struct snd_soc_dai_driver cs4271_dai = {
+ .name = "cs4271-hifi",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = CS4271_PCM_FORMATS,
+ },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = CS4271_PCM_FORMATS,
+ },
+ .ops = &cs4271_dai_ops,
+ .symmetric_rates = 1,
+};
+
+#ifdef CONFIG_PM
+static int cs4271_soc_suspend(struct snd_soc_codec *codec, pm_message_t mesg)
+{
+ int ret;
+ /* Set power-down bit */
+ ret = snd_soc_update_bits(codec, CS4271_MODE2, 0, CS4271_MODE2_PDN);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static int cs4271_soc_resume(struct snd_soc_codec *codec)
+{
+ int ret;
+ /* Restore codec state */
+ ret = snd_soc_cache_sync(codec);
+ if (ret < 0)
+ return ret;
+ /* then disable the power-down bit */
+ ret = snd_soc_update_bits(codec, CS4271_MODE2, CS4271_MODE2_PDN, 0);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+#else
+#define cs4271_soc_suspend NULL
+#define cs4271_soc_resume NULL
+#endif /* CONFIG_PM */
+
+static int cs4271_probe(struct snd_soc_codec *codec)
+{
+ struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
+ struct cs4271_platform_data *cs4271plat = codec->dev->platform_data;
+ int ret;
+ int gpio_nreset = -EINVAL;
+
+ codec->control_data = cs4271->control_data;
+
+ if (cs4271plat && gpio_is_valid(cs4271plat->gpio_nreset))
+ gpio_nreset = cs4271plat->gpio_nreset;
+
+ if (gpio_nreset >= 0)
+ if (gpio_request(gpio_nreset, "CS4271 Reset"))
+ gpio_nreset = -EINVAL;
+ if (gpio_nreset >= 0) {
+ /* Reset codec */
+ gpio_direction_output(gpio_nreset, 0);
+ udelay(1);
+ gpio_set_value(gpio_nreset, 1);
+ /* Give the codec time to wake up */
+ udelay(1);
+ }
+
+ cs4271->gpio_nreset = gpio_nreset;
+
+ /*
+ * In case of I2C, chip address specified in board data.
+ * So cache IO operations use 8 bit codec register address.
+ * In case of SPI, chip address and register address
+ * passed together as 16 bit value.
+ * Anyway, register address is masked with 0xFF inside
+ * soc-cache code.
+ */
+ if (cs4271->bus_type == SND_SOC_SPI)
+ ret = snd_soc_codec_set_cache_io(codec, 16, 8,
+ cs4271->bus_type);
+ else
+ ret = snd_soc_codec_set_cache_io(codec, 8, 8,
+ cs4271->bus_type);
+ if (ret) {
+ dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_update_bits(codec, CS4271_MODE2, 0,
+ CS4271_MODE2_PDN | CS4271_MODE2_CPEN);
+ if (ret < 0)
+ return ret;
+ ret = snd_soc_update_bits(codec, CS4271_MODE2, CS4271_MODE2_PDN, 0);
+ if (ret < 0)
+ return ret;
+ /* Power-up sequence requires 85 uS */
+ udelay(85);
+
+ return snd_soc_add_controls(codec, cs4271_snd_controls,
+ ARRAY_SIZE(cs4271_snd_controls));
+}
+
+static int cs4271_remove(struct snd_soc_codec *codec)
+{
+ struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
+ int gpio_nreset;
+
+ gpio_nreset = cs4271->gpio_nreset;
+
+ if (gpio_is_valid(gpio_nreset)) {
+ /* Set codec to the reset state */
+ gpio_set_value(gpio_nreset, 0);
+ gpio_free(gpio_nreset);
+ }
+
+ return 0;
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_cs4271 = {
+ .probe = cs4271_probe,
+ .remove = cs4271_remove,
+ .suspend = cs4271_soc_suspend,
+ .resume = cs4271_soc_resume,
+ .reg_cache_default = cs4271_dflt_reg,
+ .reg_cache_size = ARRAY_SIZE(cs4271_dflt_reg),
+ .reg_word_size = sizeof(cs4271_dflt_reg[0]),
+ .compress_type = SND_SOC_FLAT_COMPRESSION,
+};
+
+#if defined(CONFIG_SPI_MASTER)
+static int __devinit cs4271_spi_probe(struct spi_device *spi)
+{
+ struct cs4271_private *cs4271;
+
+ cs4271 = devm_kzalloc(&spi->dev, sizeof(*cs4271), GFP_KERNEL);
+ if (!cs4271)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, cs4271);
+ cs4271->control_data = spi;
+ cs4271->bus_type = SND_SOC_SPI;
+
+ return snd_soc_register_codec(&spi->dev, &soc_codec_dev_cs4271,
+ &cs4271_dai, 1);
+}
+
+static int __devexit cs4271_spi_remove(struct spi_device *spi)
+{
+ snd_soc_unregister_codec(&spi->dev);
+ return 0;
+}
+
+static struct spi_driver cs4271_spi_driver = {
+ .driver = {
+ .name = "cs4271",
+ .owner = THIS_MODULE,
+ },
+ .probe = cs4271_spi_probe,
+ .remove = __devexit_p(cs4271_spi_remove),
+};
+#endif /* defined(CONFIG_SPI_MASTER) */
+
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+static struct i2c_device_id cs4271_i2c_id[] = {
+ {"cs4271", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, cs4271_i2c_id);
+
+static int __devinit cs4271_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct cs4271_private *cs4271;
+
+ cs4271 = devm_kzalloc(&client->dev, sizeof(*cs4271), GFP_KERNEL);
+ if (!cs4271)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, cs4271);
+ cs4271->control_data = client;
+ cs4271->bus_type = SND_SOC_I2C;
+
+ return snd_soc_register_codec(&client->dev, &soc_codec_dev_cs4271,
+ &cs4271_dai, 1);
+}
+
+static int __devexit cs4271_i2c_remove(struct i2c_client *client)
+{
+ snd_soc_unregister_codec(&client->dev);
+ return 0;
+}
+
+static struct i2c_driver cs4271_i2c_driver = {
+ .driver = {
+ .name = "cs4271",
+ .owner = THIS_MODULE,
+ },
+ .id_table = cs4271_i2c_id,
+ .probe = cs4271_i2c_probe,
+ .remove = __devexit_p(cs4271_i2c_remove),
+};
+#endif /* defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) */
+
+/*
+ * We only register our serial bus driver here without
+ * assignment to particular chip. So if any of the below
+ * fails, there is some problem with I2C or SPI subsystem.
+ * In most cases this module will be compiled with support
+ * of only one serial bus.
+ */
+static int __init cs4271_modinit(void)
+{
+ int ret;
+
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ ret = i2c_add_driver(&cs4271_i2c_driver);
+ if (ret) {
+ pr_err("Failed to register CS4271 I2C driver: %d\n", ret);
+ return ret;
+ }
+#endif
+
+#if defined(CONFIG_SPI_MASTER)
+ ret = spi_register_driver(&cs4271_spi_driver);
+ if (ret) {
+ pr_err("Failed to register CS4271 SPI driver: %d\n", ret);
+ return ret;
+ }
+#endif
+
+ return 0;
+}
+module_init(cs4271_modinit);
+
+static void __exit cs4271_modexit(void)
+{
+#if defined(CONFIG_SPI_MASTER)
+ spi_unregister_driver(&cs4271_spi_driver);
+#endif
+
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ i2c_del_driver(&cs4271_i2c_driver);
+#endif
+}
+module_exit(cs4271_modexit);
+
+MODULE_AUTHOR("Alexander Sverdlin <subaparts@yandex.ru>");
+MODULE_DESCRIPTION("Cirrus Logic CS4271 ALSA SoC Codec Driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/cx20442.c b/sound/soc/codecs/cx20442.c
index bb4bf65b9e7e..0bb424af956f 100644
--- a/sound/soc/codecs/cx20442.c
+++ b/sound/soc/codecs/cx20442.c
@@ -367,7 +367,7 @@ static int cx20442_codec_remove(struct snd_soc_codec *codec)
return 0;
}
-static const u8 cx20442_reg = CX20442_TELOUT | CX20442_MIC;
+static const u8 cx20442_reg;
static struct snd_soc_codec_driver cx20442_codec_dev = {
.probe = cx20442_codec_probe,
diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
index 89498f9ad2e5..bd0517cb7980 100644
--- a/sound/soc/codecs/max98088.c
+++ b/sound/soc/codecs/max98088.c
@@ -608,7 +608,7 @@ static struct {
{ 0xFF, 0x00, 1 }, /* FF */
};
-static int max98088_volatile_register(unsigned int reg)
+static int max98088_volatile_register(struct snd_soc_codec *codec, unsigned int reg)
{
return max98088_access[reg].vol;
}
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
new file mode 100644
index 000000000000..b7e97c026898
--- /dev/null
+++ b/sound/soc/codecs/sgtl5000.c
@@ -0,0 +1,1511 @@
+/*
+ * sgtl5000.c -- SGTL5000 ALSA SoC Audio driver
+ *
+ * Copyright 2010-2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/consumer.h>
+#include <sound/core.h>
+#include <sound/tlv.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+
+#include "sgtl5000.h"
+
+#define SGTL5000_DAP_REG_OFFSET 0x0100
+#define SGTL5000_MAX_REG_OFFSET 0x013A
+
+/* default value of sgtl5000 registers except DAP */
+static const u16 sgtl5000_regs[SGTL5000_MAX_REG_OFFSET >> 1] = {
+ 0xa011, /* 0x0000, CHIP_ID. 11 stand for revison 17 */
+ 0x0000, /* 0x0002, CHIP_DIG_POWER. */
+ 0x0008, /* 0x0004, CHIP_CKL_CTRL */
+ 0x0010, /* 0x0006, CHIP_I2S_CTRL */
+ 0x0000, /* 0x0008, reserved */
+ 0x0008, /* 0x000A, CHIP_SSS_CTRL */
+ 0x0000, /* 0x000C, reserved */
+ 0x020c, /* 0x000E, CHIP_ADCDAC_CTRL */
+ 0x3c3c, /* 0x0010, CHIP_DAC_VOL */
+ 0x0000, /* 0x0012, reserved */
+ 0x015f, /* 0x0014, CHIP_PAD_STRENGTH */
+ 0x0000, /* 0x0016, reserved */
+ 0x0000, /* 0x0018, reserved */
+ 0x0000, /* 0x001A, reserved */
+ 0x0000, /* 0x001E, reserved */
+ 0x0000, /* 0x0020, CHIP_ANA_ADC_CTRL */
+ 0x1818, /* 0x0022, CHIP_ANA_HP_CTRL */
+ 0x0111, /* 0x0024, CHIP_ANN_CTRL */
+ 0x0000, /* 0x0026, CHIP_LINREG_CTRL */
+ 0x0000, /* 0x0028, CHIP_REF_CTRL */
+ 0x0000, /* 0x002A, CHIP_MIC_CTRL */
+ 0x0000, /* 0x002C, CHIP_LINE_OUT_CTRL */
+ 0x0404, /* 0x002E, CHIP_LINE_OUT_VOL */
+ 0x7060, /* 0x0030, CHIP_ANA_POWER */
+ 0x5000, /* 0x0032, CHIP_PLL_CTRL */
+ 0x0000, /* 0x0034, CHIP_CLK_TOP_CTRL */
+ 0x0000, /* 0x0036, CHIP_ANA_STATUS */
+ 0x0000, /* 0x0038, reserved */
+ 0x0000, /* 0x003A, CHIP_ANA_TEST2 */
+ 0x0000, /* 0x003C, CHIP_SHORT_CTRL */
+ 0x0000, /* reserved */
+};
+
+/* default value of dap registers */
+static const u16 sgtl5000_dap_regs[] = {
+ 0x0000, /* 0x0100, DAP_CONTROL */
+ 0x0000, /* 0x0102, DAP_PEQ */
+ 0x0040, /* 0x0104, DAP_BASS_ENHANCE */
+ 0x051f, /* 0x0106, DAP_BASS_ENHANCE_CTRL */
+ 0x0000, /* 0x0108, DAP_AUDIO_EQ */
+ 0x0040, /* 0x010A, DAP_SGTL_SURROUND */
+ 0x0000, /* 0x010C, DAP_FILTER_COEF_ACCESS */
+ 0x0000, /* 0x010E, DAP_COEF_WR_B0_MSB */
+ 0x0000, /* 0x0110, DAP_COEF_WR_B0_LSB */
+ 0x0000, /* 0x0112, reserved */
+ 0x0000, /* 0x0114, reserved */
+ 0x002f, /* 0x0116, DAP_AUDIO_EQ_BASS_BAND0 */
+ 0x002f, /* 0x0118, DAP_AUDIO_EQ_BAND0 */
+ 0x002f, /* 0x011A, DAP_AUDIO_EQ_BAND2 */
+ 0x002f, /* 0x011C, DAP_AUDIO_EQ_BAND3 */
+ 0x002f, /* 0x011E, DAP_AUDIO_EQ_TREBLE_BAND4 */
+ 0x8000, /* 0x0120, DAP_MAIN_CHAN */
+ 0x0000, /* 0x0122, DAP_MIX_CHAN */
+ 0x0510, /* 0x0124, DAP_AVC_CTRL */
+ 0x1473, /* 0x0126, DAP_AVC_THRESHOLD */
+ 0x0028, /* 0x0128, DAP_AVC_ATTACK */
+ 0x0050, /* 0x012A, DAP_AVC_DECAY */
+ 0x0000, /* 0x012C, DAP_COEF_WR_B1_MSB */
+ 0x0000, /* 0x012E, DAP_COEF_WR_B1_LSB */
+ 0x0000, /* 0x0130, DAP_COEF_WR_B2_MSB */
+ 0x0000, /* 0x0132, DAP_COEF_WR_B2_LSB */
+ 0x0000, /* 0x0134, DAP_COEF_WR_A1_MSB */
+ 0x0000, /* 0x0136, DAP_COEF_WR_A1_LSB */
+ 0x0000, /* 0x0138, DAP_COEF_WR_A2_MSB */
+ 0x0000, /* 0x013A, DAP_COEF_WR_A2_LSB */
+};
+
+/* regulator supplies for sgtl5000, VDDD is an optional external supply */
+enum sgtl5000_regulator_supplies {
+ VDDA,
+ VDDIO,
+ VDDD,
+ SGTL5000_SUPPLY_NUM
+};
+
+/* vddd is optional supply */
+static const char *supply_names[SGTL5000_SUPPLY_NUM] = {
+ "VDDA",
+ "VDDIO",
+ "VDDD"
+};
+
+#define LDO_CONSUMER_NAME "VDDD_LDO"
+#define LDO_VOLTAGE 1200000
+
+static struct regulator_consumer_supply ldo_consumer[] = {
+ REGULATOR_SUPPLY(LDO_CONSUMER_NAME, NULL),
+};
+
+static struct regulator_init_data ldo_init_data = {
+ .constraints = {
+ .min_uV = 850000,
+ .max_uV = 1600000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &ldo_consumer[0],
+};
+
+/*
+ * sgtl5000 internal ldo regulator,
+ * enabled when VDDD not provided
+ */
+struct ldo_regulator {
+ struct regulator_desc desc;
+ struct regulator_dev *dev;
+ int voltage;
+ void *codec_data;
+ bool enabled;
+};
+
+/* sgtl5000 private structure in codec */
+struct sgtl5000_priv {
+ int sysclk; /* sysclk rate */
+ int master; /* i2s master or not */
+ int fmt; /* i2s data format */
+ struct regulator_bulk_data supplies[SGTL5000_SUPPLY_NUM];
+ struct ldo_regulator *ldo;
+};
+
+/*
+ * mic_bias power on/off share the same register bits with
+ * output impedance of mic bias, when power on mic bias, we
+ * need reclaim it to impedance value.
+ * 0x0 = Powered off
+ * 0x1 = 2Kohm
+ * 0x2 = 4Kohm
+ * 0x3 = 8Kohm
+ */
+static int mic_bias_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ /* change mic bias resistor to 4Kohm */
+ snd_soc_update_bits(w->codec, SGTL5000_CHIP_MIC_CTRL,
+ SGTL5000_BIAS_R_4k, SGTL5000_BIAS_R_4k);
+ break;
+
+ case SND_SOC_DAPM_PRE_PMD:
+ /*
+ * SGTL5000_BIAS_R_8k as mask to clean the two bits
+ * of mic bias and output impedance
+ */
+ snd_soc_update_bits(w->codec, SGTL5000_CHIP_MIC_CTRL,
+ SGTL5000_BIAS_R_8k, 0);
+ break;
+ }
+ return 0;
+}
+
+/*
+ * using codec assist to small pop, hp_powerup or lineout_powerup
+ * should stay setting until vag_powerup is fully ramped down,
+ * vag fully ramped down require 400ms.
+ */
+static int small_pop_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER,
+ SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP);
+ break;
+
+ case SND_SOC_DAPM_PRE_PMD:
+ snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER,
+ SGTL5000_VAG_POWERUP, 0);
+ msleep(400);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/* input sources for ADC */
+static const char *adc_mux_text[] = {
+ "MIC_IN", "LINE_IN"
+};
+
+static const struct soc_enum adc_enum =
+SOC_ENUM_SINGLE(SGTL5000_CHIP_ANA_CTRL, 2, 2, adc_mux_text);
+
+static const struct snd_kcontrol_new adc_mux =
+SOC_DAPM_ENUM("Capture Mux", adc_enum);
+
+/* input sources for DAC */
+static const char *dac_mux_text[] = {
+ "DAC", "LINE_IN"
+};
+
+static const struct soc_enum dac_enum =
+SOC_ENUM_SINGLE(SGTL5000_CHIP_ANA_CTRL, 6, 2, dac_mux_text);
+
+static const struct snd_kcontrol_new dac_mux =
+SOC_DAPM_ENUM("Headphone Mux", dac_enum);
+
+static const struct snd_soc_dapm_widget sgtl5000_dapm_widgets[] = {
+ SND_SOC_DAPM_INPUT("LINE_IN"),
+ SND_SOC_DAPM_INPUT("MIC_IN"),
+
+ SND_SOC_DAPM_OUTPUT("HP_OUT"),
+ SND_SOC_DAPM_OUTPUT("LINE_OUT"),
+
+ SND_SOC_DAPM_MICBIAS_E("Mic Bias", SGTL5000_CHIP_MIC_CTRL, 8, 0,
+ mic_bias_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+
+ SND_SOC_DAPM_PGA_E("HP", SGTL5000_CHIP_ANA_POWER, 4, 0, NULL, 0,
+ small_pop_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_PGA_E("LO", SGTL5000_CHIP_ANA_POWER, 0, 0, NULL, 0,
+ small_pop_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+
+ SND_SOC_DAPM_MUX("Capture Mux", SND_SOC_NOPM, 0, 0, &adc_mux),
+ SND_SOC_DAPM_MUX("Headphone Mux", SND_SOC_NOPM, 0, 0, &dac_mux),
+
+ /* aif for i2s input */
+ SND_SOC_DAPM_AIF_IN("AIFIN", "Playback",
+ 0, SGTL5000_CHIP_DIG_POWER,
+ 0, 0),
+
+ /* aif for i2s output */
+ SND_SOC_DAPM_AIF_OUT("AIFOUT", "Capture",
+ 0, SGTL5000_CHIP_DIG_POWER,
+ 1, 0),
+
+ SND_SOC_DAPM_ADC("ADC", "Capture", SGTL5000_CHIP_ANA_POWER, 1, 0),
+
+ SND_SOC_DAPM_DAC("DAC", "Playback", SGTL5000_CHIP_ANA_POWER, 3, 0),
+};
+
+/* routes for sgtl5000 */
+static const struct snd_soc_dapm_route audio_map[] = {
+ {"Capture Mux", "LINE_IN", "LINE_IN"}, /* line_in --> adc_mux */
+ {"Capture Mux", "MIC_IN", "MIC_IN"}, /* mic_in --> adc_mux */
+
+ {"ADC", NULL, "Capture Mux"}, /* adc_mux --> adc */
+ {"AIFOUT", NULL, "ADC"}, /* adc --> i2s_out */
+
+ {"DAC", NULL, "AIFIN"}, /* i2s-->dac,skip audio mux */
+ {"Headphone Mux", "DAC", "DAC"}, /* dac --> hp_mux */
+ {"LO", NULL, "DAC"}, /* dac --> line_out */
+
+ {"Headphone Mux", "LINE_IN", "LINE_IN"},/* line_in --> hp_mux */
+ {"HP", NULL, "Headphone Mux"}, /* hp_mux --> hp */
+
+ {"LINE_OUT", NULL, "LO"},
+ {"HP_OUT", NULL, "HP"},
+};
+
+/* custom function to fetch info of PCM playback volume */
+static int dac_info_volsw(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 2;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = 0xfc - 0x3c;
+ return 0;
+}
+
+/*
+ * custom function to get of PCM playback volume
+ *
+ * dac volume register
+ * 15-------------8-7--------------0
+ * | R channel vol | L channel vol |
+ * -------------------------------
+ *
+ * PCM volume with 0.5017 dB steps from 0 to -90 dB
+ *
+ * register values map to dB
+ * 0x3B and less = Reserved
+ * 0x3C = 0 dB
+ * 0x3D = -0.5 dB
+ * 0xF0 = -90 dB
+ * 0xFC and greater = Muted
+ *
+ * register value map to userspace value
+ *
+ * register value 0x3c(0dB) 0xf0(-90dB)0xfc
+ * ------------------------------
+ * userspace value 0xc0 0
+ */
+static int dac_get_volsw(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ int reg;
+ int l;
+ int r;
+
+ reg = snd_soc_read(codec, SGTL5000_CHIP_DAC_VOL);
+
+ /* get left channel volume */
+ l = (reg & SGTL5000_DAC_VOL_LEFT_MASK) >> SGTL5000_DAC_VOL_LEFT_SHIFT;
+
+ /* get right channel volume */
+ r = (reg & SGTL5000_DAC_VOL_RIGHT_MASK) >> SGTL5000_DAC_VOL_RIGHT_SHIFT;
+
+ /* make sure value fall in (0x3c,0xfc) */
+ l = clamp(l, 0x3c, 0xfc);
+ r = clamp(r, 0x3c, 0xfc);
+
+ /* invert it and map to userspace value */
+ l = 0xfc - l;
+ r = 0xfc - r;
+
+ ucontrol->value.integer.value[0] = l;
+ ucontrol->value.integer.value[1] = r;
+
+ return 0;
+}
+
+/*
+ * custom function to put of PCM playback volume
+ *
+ * dac volume register
+ * 15-------------8-7--------------0
+ * | R channel vol | L channel vol |
+ * -------------------------------
+ *
+ * PCM volume with 0.5017 dB steps from 0 to -90 dB
+ *
+ * register values map to dB
+ * 0x3B and less = Reserved
+ * 0x3C = 0 dB
+ * 0x3D = -0.5 dB
+ * 0xF0 = -90 dB
+ * 0xFC and greater = Muted
+ *
+ * userspace value map to register value
+ *
+ * userspace value 0xc0 0
+ * ------------------------------
+ * register value 0x3c(0dB) 0xf0(-90dB)0xfc
+ */
+static int dac_put_volsw(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ int reg;
+ int l;
+ int r;
+
+ l = ucontrol->value.integer.value[0];
+ r = ucontrol->value.integer.value[1];
+
+ /* make sure userspace volume fall in (0, 0xfc-0x3c) */
+ l = clamp(l, 0, 0xfc - 0x3c);
+ r = clamp(r, 0, 0xfc - 0x3c);
+
+ /* invert it, get the value can be set to register */
+ l = 0xfc - l;
+ r = 0xfc - r;
+
+ /* shift to get the register value */
+ reg = l << SGTL5000_DAC_VOL_LEFT_SHIFT |
+ r << SGTL5000_DAC_VOL_RIGHT_SHIFT;
+
+ snd_soc_write(codec, SGTL5000_CHIP_DAC_VOL, reg);
+
+ return 0;
+}
+
+static const DECLARE_TLV_DB_SCALE(capture_6db_attenuate, -600, 600, 0);
+
+/* tlv for mic gain, 0db 20db 30db 40db */
+static const unsigned int mic_gain_tlv[] = {
+ TLV_DB_RANGE_HEAD(4),
+ 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
+ 1, 3, TLV_DB_SCALE_ITEM(2000, 1000, 0),
+};
+
+/* tlv for hp volume, -51.5db to 12.0db, step .5db */
+static const DECLARE_TLV_DB_SCALE(headphone_volume, -5150, 50, 0);
+
+static const struct snd_kcontrol_new sgtl5000_snd_controls[] = {
+ /* SOC_DOUBLE_S8_TLV with invert */
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "PCM Playback Volume",
+ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |
+ SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .info = dac_info_volsw,
+ .get = dac_get_volsw,
+ .put = dac_put_volsw,
+ },
+
+ SOC_DOUBLE("Capture Volume", SGTL5000_CHIP_ANA_ADC_CTRL, 0, 4, 0xf, 0),
+ SOC_SINGLE_TLV("Capture Attenuate Switch (-6dB)",
+ SGTL5000_CHIP_ANA_ADC_CTRL,
+ 8, 2, 0, capture_6db_attenuate),
+ SOC_SINGLE("Capture ZC Switch", SGTL5000_CHIP_ANA_CTRL, 1, 1, 0),
+
+ SOC_DOUBLE_TLV("Headphone Playback Volume",
+ SGTL5000_CHIP_ANA_HP_CTRL,
+ 0, 8,
+ 0x7f, 1,
+ headphone_volume),
+ SOC_SINGLE("Headphone Playback ZC Switch", SGTL5000_CHIP_ANA_CTRL,
+ 5, 1, 0),
+
+ SOC_SINGLE_TLV("Mic Volume", SGTL5000_CHIP_MIC_CTRL,
+ 0, 4, 0, mic_gain_tlv),
+};
+
+/* mute the codec used by alsa core */
+static int sgtl5000_digital_mute(struct snd_soc_dai *codec_dai, int mute)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ u16 adcdac_ctrl = SGTL5000_DAC_MUTE_LEFT | SGTL5000_DAC_MUTE_RIGHT;
+
+ snd_soc_update_bits(codec, SGTL5000_CHIP_ADCDAC_CTRL,
+ adcdac_ctrl, mute ? adcdac_ctrl : 0);
+
+ return 0;
+}
+
+/* set codec format */
+static int sgtl5000_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
+ u16 i2sctl = 0;
+
+ sgtl5000->master = 0;
+ /*
+ * i2s clock and frame master setting.
+ * ONLY support:
+ * - clock and frame slave,
+ * - clock and frame master
+ */
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ i2sctl |= SGTL5000_I2S_MASTER;
+ sgtl5000->master = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* setting i2s data format */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_DSP_A:
+ i2sctl |= SGTL5000_I2S_MODE_PCM;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ i2sctl |= SGTL5000_I2S_MODE_PCM;
+ i2sctl |= SGTL5000_I2S_LRALIGN;
+ break;
+ case SND_SOC_DAIFMT_I2S:
+ i2sctl |= SGTL5000_I2S_MODE_I2S_LJ;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ i2sctl |= SGTL5000_I2S_MODE_RJ;
+ i2sctl |= SGTL5000_I2S_LRPOL;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ i2sctl |= SGTL5000_I2S_MODE_I2S_LJ;
+ i2sctl |= SGTL5000_I2S_LRALIGN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ sgtl5000->fmt = fmt & SND_SOC_DAIFMT_FORMAT_MASK;
+
+ /* Clock inversion */
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ i2sctl |= SGTL5000_I2S_SCLK_INV;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_write(codec, SGTL5000_CHIP_I2S_CTRL, i2sctl);
+
+ return 0;
+}
+
+/* set codec sysclk */
+static int sgtl5000_set_dai_sysclk(struct snd_soc_dai *codec_dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
+
+ switch (clk_id) {
+ case SGTL5000_SYSCLK:
+ sgtl5000->sysclk = freq;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * set clock according to i2s frame clock,
+ * sgtl5000 provide 2 clock sources.
+ * 1. sys_mclk. sample freq can only configure to
+ * 1/256, 1/384, 1/512 of sys_mclk.
+ * 2. pll. can derive any audio clocks.
+ *
+ * clock setting rules:
+ * 1. in slave mode, only sys_mclk can use.
+ * 2. as constraint by sys_mclk, sample freq should
+ * set to 32k, 44.1k and above.
+ * 3. using sys_mclk prefer to pll to save power.
+ */
+static int sgtl5000_set_clock(struct snd_soc_codec *codec, int frame_rate)
+{
+ struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
+ int clk_ctl = 0;
+ int sys_fs; /* sample freq */
+
+ /*
+ * sample freq should be divided by frame clock,
+ * if frame clock lower than 44.1khz, sample feq should set to
+ * 32khz or 44.1khz.
+ */
+ switch (frame_rate) {
+ case 8000:
+ case 16000:
+ sys_fs = 32000;
+ break;
+ case 11025:
+ case 22050:
+ sys_fs = 44100;
+ break;
+ default:
+ sys_fs = frame_rate;
+ break;
+ }
+
+ /* set divided factor of frame clock */
+ switch (sys_fs / frame_rate) {
+ case 4:
+ clk_ctl |= SGTL5000_RATE_MODE_DIV_4 << SGTL5000_RATE_MODE_SHIFT;
+ break;
+ case 2:
+ clk_ctl |= SGTL5000_RATE_MODE_DIV_2 << SGTL5000_RATE_MODE_SHIFT;
+ break;
+ case 1:
+ clk_ctl |= SGTL5000_RATE_MODE_DIV_1 << SGTL5000_RATE_MODE_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set the sys_fs according to frame rate */
+ switch (sys_fs) {
+ case 32000:
+ clk_ctl |= SGTL5000_SYS_FS_32k << SGTL5000_SYS_FS_SHIFT;
+ break;
+ case 44100:
+ clk_ctl |= SGTL5000_SYS_FS_44_1k << SGTL5000_SYS_FS_SHIFT;
+ break;
+ case 48000:
+ clk_ctl |= SGTL5000_SYS_FS_48k << SGTL5000_SYS_FS_SHIFT;
+ break;
+ case 96000:
+ clk_ctl |= SGTL5000_SYS_FS_96k << SGTL5000_SYS_FS_SHIFT;
+ break;
+ default:
+ dev_err(codec->dev, "frame rate %d not supported\n",
+ frame_rate);
+ return -EINVAL;
+ }
+
+ /*
+ * calculate the divider of mclk/sample_freq,
+ * factor of freq =96k can only be 256, since mclk in range (12m,27m)
+ */
+ switch (sgtl5000->sysclk / sys_fs) {
+ case 256:
+ clk_ctl |= SGTL5000_MCLK_FREQ_256FS <<
+ SGTL5000_MCLK_FREQ_SHIFT;
+ break;
+ case 384:
+ clk_ctl |= SGTL5000_MCLK_FREQ_384FS <<
+ SGTL5000_MCLK_FREQ_SHIFT;
+ break;
+ case 512:
+ clk_ctl |= SGTL5000_MCLK_FREQ_512FS <<
+ SGTL5000_MCLK_FREQ_SHIFT;
+ break;
+ default:
+ /* if mclk not satisify the divider, use pll */
+ if (sgtl5000->master) {
+ clk_ctl |= SGTL5000_MCLK_FREQ_PLL <<
+ SGTL5000_MCLK_FREQ_SHIFT;
+ } else {
+ dev_err(codec->dev,
+ "PLL not supported in slave mode\n");
+ return -EINVAL;
+ }
+ }
+
+ /* if using pll, please check manual 6.4.2 for detail */
+ if ((clk_ctl & SGTL5000_MCLK_FREQ_MASK) == SGTL5000_MCLK_FREQ_PLL) {
+ u64 out, t;
+ int div2;
+ int pll_ctl;
+ unsigned int in, int_div, frac_div;
+
+ if (sgtl5000->sysclk > 17000000) {
+ div2 = 1;
+ in = sgtl5000->sysclk / 2;
+ } else {
+ div2 = 0;
+ in = sgtl5000->sysclk;
+ }
+ if (sys_fs == 44100)
+ out = 180633600;
+ else
+ out = 196608000;
+ t = do_div(out, in);
+ int_div = out;
+ t *= 2048;
+ do_div(t, in);
+ frac_div = t;
+ pll_ctl = int_div << SGTL5000_PLL_INT_DIV_SHIFT |
+ frac_div << SGTL5000_PLL_FRAC_DIV_SHIFT;
+
+ snd_soc_write(codec, SGTL5000_CHIP_PLL_CTRL, pll_ctl);
+ if (div2)
+ snd_soc_update_bits(codec,
+ SGTL5000_CHIP_CLK_TOP_CTRL,
+ SGTL5000_INPUT_FREQ_DIV2,
+ SGTL5000_INPUT_FREQ_DIV2);
+ else
+ snd_soc_update_bits(codec,
+ SGTL5000_CHIP_CLK_TOP_CTRL,
+ SGTL5000_INPUT_FREQ_DIV2,
+ 0);
+
+ /* power up pll */
+ snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
+ SGTL5000_PLL_POWERUP | SGTL5000_VCOAMP_POWERUP,
+ SGTL5000_PLL_POWERUP | SGTL5000_VCOAMP_POWERUP);
+ } else {
+ /* power down pll */
+ snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
+ SGTL5000_PLL_POWERUP | SGTL5000_VCOAMP_POWERUP,
+ 0);
+ }
+
+ /* if using pll, clk_ctrl must be set after pll power up */
+ snd_soc_write(codec, SGTL5000_CHIP_CLK_CTRL, clk_ctl);
+
+ return 0;
+}
+
+/*
+ * Set PCM DAI bit size and sample rate.
+ * input: params_rate, params_fmt
+ */
+static int sgtl5000_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_codec *codec = rtd->codec;
+ struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
+ int channels = params_channels(params);
+ int i2s_ctl = 0;
+ int stereo;
+ int ret;
+
+ /* sysclk should already set */
+ if (!sgtl5000->sysclk) {
+ dev_err(codec->dev, "%s: set sysclk first!\n", __func__);
+ return -EFAULT;
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ stereo = SGTL5000_DAC_STEREO;
+ else
+ stereo = SGTL5000_ADC_STEREO;
+
+ /* set mono to save power */
+ snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER, stereo,
+ channels == 1 ? 0 : stereo);
+
+ /* set codec clock base on lrclk */
+ ret = sgtl5000_set_clock(codec, params_rate(params));
+ if (ret)
+ return ret;
+
+ /* set i2s data format */
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ if (sgtl5000->fmt == SND_SOC_DAIFMT_RIGHT_J)
+ return -EINVAL;
+ i2s_ctl |= SGTL5000_I2S_DLEN_16 << SGTL5000_I2S_DLEN_SHIFT;
+ i2s_ctl |= SGTL5000_I2S_SCLKFREQ_32FS <<
+ SGTL5000_I2S_SCLKFREQ_SHIFT;
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ i2s_ctl |= SGTL5000_I2S_DLEN_20 << SGTL5000_I2S_DLEN_SHIFT;
+ i2s_ctl |= SGTL5000_I2S_SCLKFREQ_64FS <<
+ SGTL5000_I2S_SCLKFREQ_SHIFT;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ i2s_ctl |= SGTL5000_I2S_DLEN_24 << SGTL5000_I2S_DLEN_SHIFT;
+ i2s_ctl |= SGTL5000_I2S_SCLKFREQ_64FS <<
+ SGTL5000_I2S_SCLKFREQ_SHIFT;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ if (sgtl5000->fmt == SND_SOC_DAIFMT_RIGHT_J)
+ return -EINVAL;
+ i2s_ctl |= SGTL5000_I2S_DLEN_32 << SGTL5000_I2S_DLEN_SHIFT;
+ i2s_ctl |= SGTL5000_I2S_SCLKFREQ_64FS <<
+ SGTL5000_I2S_SCLKFREQ_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, SGTL5000_CHIP_I2S_CTRL, i2s_ctl, i2s_ctl);
+
+ return 0;
+}
+
+static int ldo_regulator_is_enabled(struct regulator_dev *dev)
+{
+ struct ldo_regulator *ldo = rdev_get_drvdata(dev);
+
+ return ldo->enabled;
+}
+
+static int ldo_regulator_enable(struct regulator_dev *dev)
+{
+ struct ldo_regulator *ldo = rdev_get_drvdata(dev);
+ struct snd_soc_codec *codec = (struct snd_soc_codec *)ldo->codec_data;
+ int reg;
+
+ if (ldo_regulator_is_enabled(dev))
+ return 0;
+
+ /* set regulator value firstly */
+ reg = (1600 - ldo->voltage / 1000) / 50;
+ reg = clamp(reg, 0x0, 0xf);
+
+ /* amend the voltage value, unit: uV */
+ ldo->voltage = (1600 - reg * 50) * 1000;
+
+ /* set voltage to register */
+ snd_soc_update_bits(codec, SGTL5000_CHIP_LINREG_CTRL,
+ (0x1 << 4) - 1, reg);
+
+ snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
+ SGTL5000_LINEREG_D_POWERUP,
+ SGTL5000_LINEREG_D_POWERUP);
+
+ /* when internal ldo enabled, simple digital power can be disabled */
+ snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
+ SGTL5000_LINREG_SIMPLE_POWERUP,
+ 0);
+
+ ldo->enabled = 1;
+ return 0;
+}
+
+static int ldo_regulator_disable(struct regulator_dev *dev)
+{
+ struct ldo_regulator *ldo = rdev_get_drvdata(dev);
+ struct snd_soc_codec *codec = (struct snd_soc_codec *)ldo->codec_data;
+
+ snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
+ SGTL5000_LINEREG_D_POWERUP,
+ 0);
+
+ /* clear voltage info */
+ snd_soc_update_bits(codec, SGTL5000_CHIP_LINREG_CTRL,
+ (0x1 << 4) - 1, 0);
+
+ ldo->enabled = 0;
+
+ return 0;
+}
+
+static int ldo_regulator_get_voltage(struct regulator_dev *dev)
+{
+ struct ldo_regulator *ldo = rdev_get_drvdata(dev);
+
+ return ldo->voltage;
+}
+
+static struct regulator_ops ldo_regulator_ops = {
+ .is_enabled = ldo_regulator_is_enabled,
+ .enable = ldo_regulator_enable,
+ .disable = ldo_regulator_disable,
+ .get_voltage = ldo_regulator_get_voltage,
+};
+
+static int ldo_regulator_register(struct snd_soc_codec *codec,
+ struct regulator_init_data *init_data,
+ int voltage)
+{
+ struct ldo_regulator *ldo;
+
+ ldo = kzalloc(sizeof(struct ldo_regulator), GFP_KERNEL);
+
+ if (!ldo) {
+ dev_err(codec->dev, "failed to allocate ldo_regulator\n");
+ return -ENOMEM;
+ }
+
+ ldo->desc.name = kstrdup(dev_name(codec->dev), GFP_KERNEL);
+ if (!ldo->desc.name) {
+ kfree(ldo);
+ dev_err(codec->dev, "failed to allocate decs name memory\n");
+ return -ENOMEM;
+ }
+
+ ldo->desc.type = REGULATOR_VOLTAGE;
+ ldo->desc.owner = THIS_MODULE;
+ ldo->desc.ops = &ldo_regulator_ops;
+ ldo->desc.n_voltages = 1;
+
+ ldo->codec_data = codec;
+ ldo->voltage = voltage;
+
+ ldo->dev = regulator_register(&ldo->desc, codec->dev,
+ init_data, ldo);
+ if (IS_ERR(ldo->dev)) {
+ dev_err(codec->dev, "failed to register regulator\n");
+ kfree(ldo->desc.name);
+ kfree(ldo);
+
+ return PTR_ERR(ldo->dev);
+ }
+
+ return 0;
+}
+
+static int ldo_regulator_remove(struct snd_soc_codec *codec)
+{
+ struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
+ struct ldo_regulator *ldo = sgtl5000->ldo;
+
+ if (!ldo)
+ return 0;
+
+ regulator_unregister(ldo->dev);
+ kfree(ldo->desc.name);
+ kfree(ldo);
+
+ return 0;
+}
+
+/*
+ * set dac bias
+ * common state changes:
+ * startup:
+ * off --> standby --> prepare --> on
+ * standby --> prepare --> on
+ *
+ * stop:
+ * on --> prepare --> standby
+ */
+static int sgtl5000_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ int ret;
+ struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ case SND_SOC_BIAS_PREPARE:
+ break;
+ case SND_SOC_BIAS_STANDBY:
+ if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
+ ret = regulator_bulk_enable(
+ ARRAY_SIZE(sgtl5000->supplies),
+ sgtl5000->supplies);
+ if (ret)
+ return ret;
+ udelay(10);
+ }
+
+ break;
+ case SND_SOC_BIAS_OFF:
+ regulator_bulk_disable(ARRAY_SIZE(sgtl5000->supplies),
+ sgtl5000->supplies);
+ break;
+ }
+
+ codec->dapm.bias_level = level;
+ return 0;
+}
+
+#define SGTL5000_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+ SNDRV_PCM_FMTBIT_S20_3LE |\
+ SNDRV_PCM_FMTBIT_S24_LE |\
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_ops sgtl5000_ops = {
+ .hw_params = sgtl5000_pcm_hw_params,
+ .digital_mute = sgtl5000_digital_mute,
+ .set_fmt = sgtl5000_set_dai_fmt,
+ .set_sysclk = sgtl5000_set_dai_sysclk,
+};
+
+static struct snd_soc_dai_driver sgtl5000_dai = {
+ .name = "sgtl5000",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ /*
+ * only support 8~48K + 96K,
+ * TODO modify hw_param to support more
+ */
+ .rates = SNDRV_PCM_RATE_8000_48000 | SNDRV_PCM_RATE_96000,
+ .formats = SGTL5000_FORMATS,
+ },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_48000 | SNDRV_PCM_RATE_96000,
+ .formats = SGTL5000_FORMATS,
+ },
+ .ops = &sgtl5000_ops,
+ .symmetric_rates = 1,
+};
+
+static int sgtl5000_volatile_register(struct snd_soc_codec *codec,
+ unsigned int reg)
+{
+ switch (reg) {
+ case SGTL5000_CHIP_ID:
+ case SGTL5000_CHIP_ADCDAC_CTRL:
+ case SGTL5000_CHIP_ANA_STATUS:
+ return 1;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_SUSPEND
+static int sgtl5000_suspend(struct snd_soc_codec *codec, pm_message_t state)
+{
+ sgtl5000_set_bias_level(codec, SND_SOC_BIAS_OFF);
+
+ return 0;
+}
+
+/*
+ * restore all sgtl5000 registers,
+ * since a big hole between dap and regular registers,
+ * we will restore them respectively.
+ */
+static int sgtl5000_restore_regs(struct snd_soc_codec *codec)
+{
+ u16 *cache = codec->reg_cache;
+ int i;
+ int regular_regs = SGTL5000_CHIP_SHORT_CTRL >> 1;
+
+ /* restore regular registers */
+ for (i = 0; i < regular_regs; i++) {
+ int reg = i << 1;
+
+ /* this regs depends on the others */
+ if (reg == SGTL5000_CHIP_ANA_POWER ||
+ reg == SGTL5000_CHIP_CLK_CTRL ||
+ reg == SGTL5000_CHIP_LINREG_CTRL ||
+ reg == SGTL5000_CHIP_LINE_OUT_CTRL ||
+ reg == SGTL5000_CHIP_CLK_CTRL)
+ continue;
+
+ snd_soc_write(codec, reg, cache[i]);
+ }
+
+ /* restore dap registers */
+ for (i = SGTL5000_DAP_REG_OFFSET >> 1;
+ i < SGTL5000_MAX_REG_OFFSET >> 1; i++) {
+ int reg = i << 1;
+
+ snd_soc_write(codec, reg, cache[i]);
+ }
+
+ /*
+ * restore power and other regs according
+ * to set_power() and set_clock()
+ */
+ snd_soc_write(codec, SGTL5000_CHIP_LINREG_CTRL,
+ cache[SGTL5000_CHIP_LINREG_CTRL >> 1]);
+
+ snd_soc_write(codec, SGTL5000_CHIP_ANA_POWER,
+ cache[SGTL5000_CHIP_ANA_POWER >> 1]);
+
+ snd_soc_write(codec, SGTL5000_CHIP_CLK_CTRL,
+ cache[SGTL5000_CHIP_CLK_CTRL >> 1]);
+
+ snd_soc_write(codec, SGTL5000_CHIP_REF_CTRL,
+ cache[SGTL5000_CHIP_REF_CTRL >> 1]);
+
+ snd_soc_write(codec, SGTL5000_CHIP_LINE_OUT_CTRL,
+ cache[SGTL5000_CHIP_LINE_OUT_CTRL >> 1]);
+ return 0;
+}
+
+static int sgtl5000_resume(struct snd_soc_codec *codec)
+{
+ /* Bring the codec back up to standby to enable regulators */
+ sgtl5000_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+ /* Restore registers by cached in memory */
+ sgtl5000_restore_regs(codec);
+ return 0;
+}
+#else
+#define sgtl5000_suspend NULL
+#define sgtl5000_resume NULL
+#endif /* CONFIG_SUSPEND */
+
+/*
+ * sgtl5000 has 3 internal power supplies:
+ * 1. VAG, normally set to vdda/2
+ * 2. chargepump, set to different value
+ * according to voltage of vdda and vddio
+ * 3. line out VAG, normally set to vddio/2
+ *
+ * and should be set according to:
+ * 1. vddd provided by external or not
+ * 2. vdda and vddio voltage value. > 3.1v or not
+ * 3. chip revision >=0x11 or not. If >=0x11, not use external vddd.
+ */
+static int sgtl5000_set_power_regs(struct snd_soc_codec *codec)
+{
+ int vddd;
+ int vdda;
+ int vddio;
+ u16 ana_pwr;
+ u16 lreg_ctrl;
+ int vag;
+ struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
+
+ vdda = regulator_get_voltage(sgtl5000->supplies[VDDA].consumer);
+ vddio = regulator_get_voltage(sgtl5000->supplies[VDDIO].consumer);
+ vddd = regulator_get_voltage(sgtl5000->supplies[VDDD].consumer);
+
+ vdda = vdda / 1000;
+ vddio = vddio / 1000;
+ vddd = vddd / 1000;
+
+ if (vdda <= 0 || vddio <= 0 || vddd < 0) {
+ dev_err(codec->dev, "regulator voltage not set correctly\n");
+
+ return -EINVAL;
+ }
+
+ /* according to datasheet, maximum voltage of supplies */
+ if (vdda > 3600 || vddio > 3600 || vddd > 1980) {
+ dev_err(codec->dev,
+ "exceed max voltage vdda %dmv vddio %dma vddd %dma\n",
+ vdda, vddio, vddd);
+
+ return -EINVAL;
+ }
+
+ /* reset value */
+ ana_pwr = snd_soc_read(codec, SGTL5000_CHIP_ANA_POWER);
+ ana_pwr |= SGTL5000_DAC_STEREO |
+ SGTL5000_ADC_STEREO |
+ SGTL5000_REFTOP_POWERUP;
+ lreg_ctrl = snd_soc_read(codec, SGTL5000_CHIP_LINREG_CTRL);
+
+ if (vddio < 3100 && vdda < 3100) {
+ /* enable internal oscillator used for charge pump */
+ snd_soc_update_bits(codec, SGTL5000_CHIP_CLK_TOP_CTRL,
+ SGTL5000_INT_OSC_EN,
+ SGTL5000_INT_OSC_EN);
+ /* Enable VDDC charge pump */
+ ana_pwr |= SGTL5000_VDDC_CHRGPMP_POWERUP;
+ } else if (vddio >= 3100 && vdda >= 3100) {
+ /*
+ * if vddio and vddd > 3.1v,
+ * charge pump should be clean before set ana_pwr
+ */
+ snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
+ SGTL5000_VDDC_CHRGPMP_POWERUP, 0);
+
+ /* VDDC use VDDIO rail */
+ lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD;
+ lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO <<
+ SGTL5000_VDDC_MAN_ASSN_SHIFT;
+ }
+
+ snd_soc_write(codec, SGTL5000_CHIP_LINREG_CTRL, lreg_ctrl);
+
+ snd_soc_write(codec, SGTL5000_CHIP_ANA_POWER, ana_pwr);
+
+ /* set voltage to register */
+ snd_soc_update_bits(codec, SGTL5000_CHIP_LINREG_CTRL,
+ (0x1 << 4) - 1, 0x8);
+
+ /*
+ * if vddd linear reg has been enabled,
+ * simple digital supply should be clear to get
+ * proper VDDD voltage.
+ */
+ if (ana_pwr & SGTL5000_LINEREG_D_POWERUP)
+ snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
+ SGTL5000_LINREG_SIMPLE_POWERUP,
+ 0);
+ else
+ snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
+ SGTL5000_LINREG_SIMPLE_POWERUP |
+ SGTL5000_STARTUP_POWERUP,
+ 0);
+
+ /*
+ * set ADC/DAC VAG to vdda / 2,
+ * should stay in range (0.8v, 1.575v)
+ */
+ vag = vdda / 2;
+ if (vag <= SGTL5000_ANA_GND_BASE)
+ vag = 0;
+ else if (vag >= SGTL5000_ANA_GND_BASE + SGTL5000_ANA_GND_STP *
+ (SGTL5000_ANA_GND_MASK >> SGTL5000_ANA_GND_SHIFT))
+ vag = SGTL5000_ANA_GND_MASK >> SGTL5000_ANA_GND_SHIFT;
+ else
+ vag = (vag - SGTL5000_ANA_GND_BASE) / SGTL5000_ANA_GND_STP;
+
+ snd_soc_update_bits(codec, SGTL5000_CHIP_REF_CTRL,
+ vag << SGTL5000_ANA_GND_SHIFT,
+ vag << SGTL5000_ANA_GND_SHIFT);
+
+ /* set line out VAG to vddio / 2, in range (0.8v, 1.675v) */
+ vag = vddio / 2;
+ if (vag <= SGTL5000_LINE_OUT_GND_BASE)
+ vag = 0;
+ else if (vag >= SGTL5000_LINE_OUT_GND_BASE +
+ SGTL5000_LINE_OUT_GND_STP * SGTL5000_LINE_OUT_GND_MAX)
+ vag = SGTL5000_LINE_OUT_GND_MAX;
+ else
+ vag = (vag - SGTL5000_LINE_OUT_GND_BASE) /
+ SGTL5000_LINE_OUT_GND_STP;
+
+ snd_soc_update_bits(codec, SGTL5000_CHIP_LINE_OUT_CTRL,
+ vag << SGTL5000_LINE_OUT_GND_SHIFT |
+ SGTL5000_LINE_OUT_CURRENT_360u <<
+ SGTL5000_LINE_OUT_CURRENT_SHIFT,
+ vag << SGTL5000_LINE_OUT_GND_SHIFT |
+ SGTL5000_LINE_OUT_CURRENT_360u <<
+ SGTL5000_LINE_OUT_CURRENT_SHIFT);
+
+ return 0;
+}
+
+static int sgtl5000_enable_regulators(struct snd_soc_codec *codec)
+{
+ u16 reg;
+ int ret;
+ int rev;
+ int i;
+ int external_vddd = 0;
+ struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
+
+ for (i = 0; i < ARRAY_SIZE(sgtl5000->supplies); i++)
+ sgtl5000->supplies[i].supply = supply_names[i];
+
+ ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(sgtl5000->supplies),
+ sgtl5000->supplies);
+ if (!ret)
+ external_vddd = 1;
+ else {
+ /* set internal ldo to 1.2v */
+ int voltage = LDO_VOLTAGE;
+
+ ret = ldo_regulator_register(codec, &ldo_init_data, voltage);
+ if (ret) {
+ dev_err(codec->dev,
+ "Failed to register vddd internal supplies: %d\n",
+ ret);
+ return ret;
+ }
+
+ sgtl5000->supplies[VDDD].supply = LDO_CONSUMER_NAME;
+
+ ret = regulator_bulk_get(codec->dev,
+ ARRAY_SIZE(sgtl5000->supplies),
+ sgtl5000->supplies);
+
+ if (ret) {
+ ldo_regulator_remove(codec);
+ dev_err(codec->dev,
+ "Failed to request supplies: %d\n", ret);
+
+ return ret;
+ }
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(sgtl5000->supplies),
+ sgtl5000->supplies);
+ if (ret)
+ goto err_regulator_free;
+
+ /* wait for all power rails bring up */
+ udelay(10);
+
+ /* read chip information */
+ reg = snd_soc_read(codec, SGTL5000_CHIP_ID);
+ if (((reg & SGTL5000_PARTID_MASK) >> SGTL5000_PARTID_SHIFT) !=
+ SGTL5000_PARTID_PART_ID) {
+ dev_err(codec->dev,
+ "Device with ID register %x is not a sgtl5000\n", reg);
+ ret = -ENODEV;
+ goto err_regulator_disable;
+ }
+
+ rev = (reg & SGTL5000_REVID_MASK) >> SGTL5000_REVID_SHIFT;
+ dev_info(codec->dev, "sgtl5000 revision %d\n", rev);
+
+ /*
+ * workaround for revision 0x11 and later,
+ * roll back to use internal LDO
+ */
+ if (external_vddd && rev >= 0x11) {
+ int voltage = LDO_VOLTAGE;
+ /* disable all regulator first */
+ regulator_bulk_disable(ARRAY_SIZE(sgtl5000->supplies),
+ sgtl5000->supplies);
+ /* free VDDD regulator */
+ regulator_bulk_free(ARRAY_SIZE(sgtl5000->supplies),
+ sgtl5000->supplies);
+
+ ret = ldo_regulator_register(codec, &ldo_init_data, voltage);
+ if (ret)
+ return ret;
+
+ sgtl5000->supplies[VDDD].supply = LDO_CONSUMER_NAME;
+
+ ret = regulator_bulk_get(codec->dev,
+ ARRAY_SIZE(sgtl5000->supplies),
+ sgtl5000->supplies);
+ if (ret) {
+ ldo_regulator_remove(codec);
+ dev_err(codec->dev,
+ "Failed to request supplies: %d\n", ret);
+
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(sgtl5000->supplies),
+ sgtl5000->supplies);
+ if (ret)
+ goto err_regulator_free;
+
+ /* wait for all power rails bring up */
+ udelay(10);
+ }
+
+ return 0;
+
+err_regulator_disable:
+ regulator_bulk_disable(ARRAY_SIZE(sgtl5000->supplies),
+ sgtl5000->supplies);
+err_regulator_free:
+ regulator_bulk_free(ARRAY_SIZE(sgtl5000->supplies),
+ sgtl5000->supplies);
+ if (external_vddd)
+ ldo_regulator_remove(codec);
+ return ret;
+
+}
+
+static int sgtl5000_probe(struct snd_soc_codec *codec)
+{
+ int ret;
+ struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
+
+ /* setup i2c data ops */
+ ret = snd_soc_codec_set_cache_io(codec, 16, 16, SND_SOC_I2C);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
+ return ret;
+ }
+
+ ret = sgtl5000_enable_regulators(codec);
+ if (ret)
+ return ret;
+
+ /* power up sgtl5000 */
+ ret = sgtl5000_set_power_regs(codec);
+ if (ret)
+ goto err;
+
+ /* enable small pop, introduce 400ms delay in turning off */
+ snd_soc_update_bits(codec, SGTL5000_CHIP_REF_CTRL,
+ SGTL5000_SMALL_POP,
+ SGTL5000_SMALL_POP);
+
+ /* disable short cut detector */
+ snd_soc_write(codec, SGTL5000_CHIP_SHORT_CTRL, 0);
+
+ /*
+ * set i2s as default input of sound switch
+ * TODO: add sound switch to control and dapm widge.
+ */
+ snd_soc_write(codec, SGTL5000_CHIP_SSS_CTRL,
+ SGTL5000_DAC_SEL_I2S_IN << SGTL5000_DAC_SEL_SHIFT);
+ snd_soc_write(codec, SGTL5000_CHIP_DIG_POWER,
+ SGTL5000_ADC_EN | SGTL5000_DAC_EN);
+
+ /* enable dac volume ramp by default */
+ snd_soc_write(codec, SGTL5000_CHIP_ADCDAC_CTRL,
+ SGTL5000_DAC_VOL_RAMP_EN |
+ SGTL5000_DAC_MUTE_RIGHT |
+ SGTL5000_DAC_MUTE_LEFT);
+
+ snd_soc_write(codec, SGTL5000_CHIP_PAD_STRENGTH, 0x015f);
+
+ snd_soc_write(codec, SGTL5000_CHIP_ANA_CTRL,
+ SGTL5000_HP_ZCD_EN |
+ SGTL5000_ADC_ZCD_EN);
+
+ snd_soc_write(codec, SGTL5000_CHIP_MIC_CTRL, 0);
+
+ /*
+ * disable DAP
+ * TODO:
+ * Enable DAP in kcontrol and dapm.
+ */
+ snd_soc_write(codec, SGTL5000_DAP_CTRL, 0);
+
+ /* leading to standby state */
+ ret = sgtl5000_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+ if (ret)
+ goto err;
+
+ snd_soc_add_controls(codec, sgtl5000_snd_controls,
+ ARRAY_SIZE(sgtl5000_snd_controls));
+
+ snd_soc_dapm_new_controls(&codec->dapm, sgtl5000_dapm_widgets,
+ ARRAY_SIZE(sgtl5000_dapm_widgets));
+
+ snd_soc_dapm_add_routes(&codec->dapm, audio_map,
+ ARRAY_SIZE(audio_map));
+
+ snd_soc_dapm_new_widgets(&codec->dapm);
+
+ return 0;
+
+err:
+ regulator_bulk_disable(ARRAY_SIZE(sgtl5000->supplies),
+ sgtl5000->supplies);
+ regulator_bulk_free(ARRAY_SIZE(sgtl5000->supplies),
+ sgtl5000->supplies);
+ ldo_regulator_remove(codec);
+
+ return ret;
+}
+
+static int sgtl5000_remove(struct snd_soc_codec *codec)
+{
+ struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
+
+ sgtl5000_set_bias_level(codec, SND_SOC_BIAS_OFF);
+
+ regulator_bulk_disable(ARRAY_SIZE(sgtl5000->supplies),
+ sgtl5000->supplies);
+ regulator_bulk_free(ARRAY_SIZE(sgtl5000->supplies),
+ sgtl5000->supplies);
+ ldo_regulator_remove(codec);
+
+ return 0;
+}
+
+static struct snd_soc_codec_driver sgtl5000_driver = {
+ .probe = sgtl5000_probe,
+ .remove = sgtl5000_remove,
+ .suspend = sgtl5000_suspend,
+ .resume = sgtl5000_resume,
+ .set_bias_level = sgtl5000_set_bias_level,
+ .reg_cache_size = ARRAY_SIZE(sgtl5000_regs),
+ .reg_word_size = sizeof(u16),
+ .reg_cache_step = 2,
+ .reg_cache_default = sgtl5000_regs,
+ .volatile_register = sgtl5000_volatile_register,
+};
+
+static __devinit int sgtl5000_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct sgtl5000_priv *sgtl5000;
+ int ret;
+
+ sgtl5000 = kzalloc(sizeof(struct sgtl5000_priv), GFP_KERNEL);
+ if (!sgtl5000)
+ return -ENOMEM;
+
+ /*
+ * copy DAP default values to default value array.
+ * sgtl5000 register space has a big hole, merge it
+ * at init phase makes life easy.
+ * FIXME: should we drop 'const' of sgtl5000_regs?
+ */
+ memcpy((void *)(&sgtl5000_regs[0] + (SGTL5000_DAP_REG_OFFSET >> 1)),
+ sgtl5000_dap_regs,
+ SGTL5000_MAX_REG_OFFSET - SGTL5000_DAP_REG_OFFSET);
+
+ i2c_set_clientdata(client, sgtl5000);
+
+ ret = snd_soc_register_codec(&client->dev,
+ &sgtl5000_driver, &sgtl5000_dai, 1);
+ if (ret) {
+ dev_err(&client->dev, "Failed to register codec: %d\n", ret);
+ kfree(sgtl5000);
+ return ret;
+ }
+
+ return 0;
+}
+
+static __devexit int sgtl5000_i2c_remove(struct i2c_client *client)
+{
+ struct sgtl5000_priv *sgtl5000 = i2c_get_clientdata(client);
+
+ snd_soc_unregister_codec(&client->dev);
+
+ kfree(sgtl5000);
+ return 0;
+}
+
+static const struct i2c_device_id sgtl5000_id[] = {
+ {"sgtl5000", 0},
+ {},
+};
+
+MODULE_DEVICE_TABLE(i2c, sgtl5000_id);
+
+static struct i2c_driver sgtl5000_i2c_driver = {
+ .driver = {
+ .name = "sgtl5000",
+ .owner = THIS_MODULE,
+ },
+ .probe = sgtl5000_i2c_probe,
+ .remove = __devexit_p(sgtl5000_i2c_remove),
+ .id_table = sgtl5000_id,
+};
+
+static int __init sgtl5000_modinit(void)
+{
+ return i2c_add_driver(&sgtl5000_i2c_driver);
+}
+module_init(sgtl5000_modinit);
+
+static void __exit sgtl5000_exit(void)
+{
+ i2c_del_driver(&sgtl5000_i2c_driver);
+}
+module_exit(sgtl5000_exit);
+
+MODULE_DESCRIPTION("Freescale SGTL5000 ALSA SoC Codec Driver");
+MODULE_AUTHOR("Zeng Zhaoming <zhaoming.zeng@freescale.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/sgtl5000.h b/sound/soc/codecs/sgtl5000.h
new file mode 100644
index 000000000000..eec3ab368f39
--- /dev/null
+++ b/sound/soc/codecs/sgtl5000.h
@@ -0,0 +1,400 @@
+/*
+ * sgtl5000.h - SGTL5000 audio codec interface
+ *
+ * Copyright 2010-2011 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _SGTL5000_H
+#define _SGTL5000_H
+
+/*
+ * Register values.
+ */
+#define SGTL5000_CHIP_ID 0x0000
+#define SGTL5000_CHIP_DIG_POWER 0x0002
+#define SGTL5000_CHIP_CLK_CTRL 0x0004
+#define SGTL5000_CHIP_I2S_CTRL 0x0006
+#define SGTL5000_CHIP_SSS_CTRL 0x000a
+#define SGTL5000_CHIP_ADCDAC_CTRL 0x000e
+#define SGTL5000_CHIP_DAC_VOL 0x0010
+#define SGTL5000_CHIP_PAD_STRENGTH 0x0014
+#define SGTL5000_CHIP_ANA_ADC_CTRL 0x0020
+#define SGTL5000_CHIP_ANA_HP_CTRL 0x0022
+#define SGTL5000_CHIP_ANA_CTRL 0x0024
+#define SGTL5000_CHIP_LINREG_CTRL 0x0026
+#define SGTL5000_CHIP_REF_CTRL 0x0028
+#define SGTL5000_CHIP_MIC_CTRL 0x002a
+#define SGTL5000_CHIP_LINE_OUT_CTRL 0x002c
+#define SGTL5000_CHIP_LINE_OUT_VOL 0x002e
+#define SGTL5000_CHIP_ANA_POWER 0x0030
+#define SGTL5000_CHIP_PLL_CTRL 0x0032
+#define SGTL5000_CHIP_CLK_TOP_CTRL 0x0034
+#define SGTL5000_CHIP_ANA_STATUS 0x0036
+#define SGTL5000_CHIP_SHORT_CTRL 0x003c
+#define SGTL5000_CHIP_ANA_TEST2 0x003a
+#define SGTL5000_DAP_CTRL 0x0100
+#define SGTL5000_DAP_PEQ 0x0102
+#define SGTL5000_DAP_BASS_ENHANCE 0x0104
+#define SGTL5000_DAP_BASS_ENHANCE_CTRL 0x0106
+#define SGTL5000_DAP_AUDIO_EQ 0x0108
+#define SGTL5000_DAP_SURROUND 0x010a
+#define SGTL5000_DAP_FLT_COEF_ACCESS 0x010c
+#define SGTL5000_DAP_COEF_WR_B0_MSB 0x010e
+#define SGTL5000_DAP_COEF_WR_B0_LSB 0x0110
+#define SGTL5000_DAP_EQ_BASS_BAND0 0x0116
+#define SGTL5000_DAP_EQ_BASS_BAND1 0x0118
+#define SGTL5000_DAP_EQ_BASS_BAND2 0x011a
+#define SGTL5000_DAP_EQ_BASS_BAND3 0x011c
+#define SGTL5000_DAP_EQ_BASS_BAND4 0x011e
+#define SGTL5000_DAP_MAIN_CHAN 0x0120
+#define SGTL5000_DAP_MIX_CHAN 0x0122
+#define SGTL5000_DAP_AVC_CTRL 0x0124
+#define SGTL5000_DAP_AVC_THRESHOLD 0x0126
+#define SGTL5000_DAP_AVC_ATTACK 0x0128
+#define SGTL5000_DAP_AVC_DECAY 0x012a
+#define SGTL5000_DAP_COEF_WR_B1_MSB 0x012c
+#define SGTL5000_DAP_COEF_WR_B1_LSB 0x012e
+#define SGTL5000_DAP_COEF_WR_B2_MSB 0x0130
+#define SGTL5000_DAP_COEF_WR_B2_LSB 0x0132
+#define SGTL5000_DAP_COEF_WR_A1_MSB 0x0134
+#define SGTL5000_DAP_COEF_WR_A1_LSB 0x0136
+#define SGTL5000_DAP_COEF_WR_A2_MSB 0x0138
+#define SGTL5000_DAP_COEF_WR_A2_LSB 0x013a
+
+/*
+ * Field Definitions.
+ */
+
+/*
+ * SGTL5000_CHIP_ID
+ */
+#define SGTL5000_PARTID_MASK 0xff00
+#define SGTL5000_PARTID_SHIFT 8
+#define SGTL5000_PARTID_WIDTH 8
+#define SGTL5000_PARTID_PART_ID 0xa0
+#define SGTL5000_REVID_MASK 0x00ff
+#define SGTL5000_REVID_SHIFT 0
+#define SGTL5000_REVID_WIDTH 8
+
+/*
+ * SGTL5000_CHIP_DIG_POWER
+ */
+#define SGTL5000_ADC_EN 0x0040
+#define SGTL5000_DAC_EN 0x0020
+#define SGTL5000_DAP_POWERUP 0x0010
+#define SGTL5000_I2S_OUT_POWERUP 0x0002
+#define SGTL5000_I2S_IN_POWERUP 0x0001
+
+/*
+ * SGTL5000_CHIP_CLK_CTRL
+ */
+#define SGTL5000_RATE_MODE_MASK 0x0030
+#define SGTL5000_RATE_MODE_SHIFT 4
+#define SGTL5000_RATE_MODE_WIDTH 2
+#define SGTL5000_RATE_MODE_DIV_1 0
+#define SGTL5000_RATE_MODE_DIV_2 1
+#define SGTL5000_RATE_MODE_DIV_4 2
+#define SGTL5000_RATE_MODE_DIV_6 3
+#define SGTL5000_SYS_FS_MASK 0x000c
+#define SGTL5000_SYS_FS_SHIFT 2
+#define SGTL5000_SYS_FS_WIDTH 2
+#define SGTL5000_SYS_FS_32k 0x0
+#define SGTL5000_SYS_FS_44_1k 0x1
+#define SGTL5000_SYS_FS_48k 0x2
+#define SGTL5000_SYS_FS_96k 0x3
+#define SGTL5000_MCLK_FREQ_MASK 0x0003
+#define SGTL5000_MCLK_FREQ_SHIFT 0
+#define SGTL5000_MCLK_FREQ_WIDTH 2
+#define SGTL5000_MCLK_FREQ_256FS 0x0
+#define SGTL5000_MCLK_FREQ_384FS 0x1
+#define SGTL5000_MCLK_FREQ_512FS 0x2
+#define SGTL5000_MCLK_FREQ_PLL 0x3
+
+/*
+ * SGTL5000_CHIP_I2S_CTRL
+ */
+#define SGTL5000_I2S_SCLKFREQ_MASK 0x0100
+#define SGTL5000_I2S_SCLKFREQ_SHIFT 8
+#define SGTL5000_I2S_SCLKFREQ_WIDTH 1
+#define SGTL5000_I2S_SCLKFREQ_64FS 0x0
+#define SGTL5000_I2S_SCLKFREQ_32FS 0x1 /* Not for RJ mode */
+#define SGTL5000_I2S_MASTER 0x0080
+#define SGTL5000_I2S_SCLK_INV 0x0040
+#define SGTL5000_I2S_DLEN_MASK 0x0030
+#define SGTL5000_I2S_DLEN_SHIFT 4
+#define SGTL5000_I2S_DLEN_WIDTH 2
+#define SGTL5000_I2S_DLEN_32 0x0
+#define SGTL5000_I2S_DLEN_24 0x1
+#define SGTL5000_I2S_DLEN_20 0x2
+#define SGTL5000_I2S_DLEN_16 0x3
+#define SGTL5000_I2S_MODE_MASK 0x000c
+#define SGTL5000_I2S_MODE_SHIFT 2
+#define SGTL5000_I2S_MODE_WIDTH 2
+#define SGTL5000_I2S_MODE_I2S_LJ 0x0
+#define SGTL5000_I2S_MODE_RJ 0x1
+#define SGTL5000_I2S_MODE_PCM 0x2
+#define SGTL5000_I2S_LRALIGN 0x0002
+#define SGTL5000_I2S_LRPOL 0x0001 /* set for which mode */
+
+/*
+ * SGTL5000_CHIP_SSS_CTRL
+ */
+#define SGTL5000_DAP_MIX_LRSWAP 0x4000
+#define SGTL5000_DAP_LRSWAP 0x2000
+#define SGTL5000_DAC_LRSWAP 0x1000
+#define SGTL5000_I2S_OUT_LRSWAP 0x0400
+#define SGTL5000_DAP_MIX_SEL_MASK 0x0300
+#define SGTL5000_DAP_MIX_SEL_SHIFT 8
+#define SGTL5000_DAP_MIX_SEL_WIDTH 2
+#define SGTL5000_DAP_MIX_SEL_ADC 0x0
+#define SGTL5000_DAP_MIX_SEL_I2S_IN 0x1
+#define SGTL5000_DAP_SEL_MASK 0x00c0
+#define SGTL5000_DAP_SEL_SHIFT 6
+#define SGTL5000_DAP_SEL_WIDTH 2
+#define SGTL5000_DAP_SEL_ADC 0x0
+#define SGTL5000_DAP_SEL_I2S_IN 0x1
+#define SGTL5000_DAC_SEL_MASK 0x0030
+#define SGTL5000_DAC_SEL_SHIFT 4
+#define SGTL5000_DAC_SEL_WIDTH 2
+#define SGTL5000_DAC_SEL_ADC 0x0
+#define SGTL5000_DAC_SEL_I2S_IN 0x1
+#define SGTL5000_DAC_SEL_DAP 0x3
+#define SGTL5000_I2S_OUT_SEL_MASK 0x0003
+#define SGTL5000_I2S_OUT_SEL_SHIFT 0
+#define SGTL5000_I2S_OUT_SEL_WIDTH 2
+#define SGTL5000_I2S_OUT_SEL_ADC 0x0
+#define SGTL5000_I2S_OUT_SEL_I2S_IN 0x1
+#define SGTL5000_I2S_OUT_SEL_DAP 0x3
+
+/*
+ * SGTL5000_CHIP_ADCDAC_CTRL
+ */
+#define SGTL5000_VOL_BUSY_DAC_RIGHT 0x2000
+#define SGTL5000_VOL_BUSY_DAC_LEFT 0x1000
+#define SGTL5000_DAC_VOL_RAMP_EN 0x0200
+#define SGTL5000_DAC_VOL_RAMP_EXPO 0x0100
+#define SGTL5000_DAC_MUTE_RIGHT 0x0008
+#define SGTL5000_DAC_MUTE_LEFT 0x0004
+#define SGTL5000_ADC_HPF_FREEZE 0x0002
+#define SGTL5000_ADC_HPF_BYPASS 0x0001
+
+/*
+ * SGTL5000_CHIP_DAC_VOL
+ */
+#define SGTL5000_DAC_VOL_RIGHT_MASK 0xff00
+#define SGTL5000_DAC_VOL_RIGHT_SHIFT 8
+#define SGTL5000_DAC_VOL_RIGHT_WIDTH 8
+#define SGTL5000_DAC_VOL_LEFT_MASK 0x00ff
+#define SGTL5000_DAC_VOL_LEFT_SHIFT 0
+#define SGTL5000_DAC_VOL_LEFT_WIDTH 8
+
+/*
+ * SGTL5000_CHIP_PAD_STRENGTH
+ */
+#define SGTL5000_PAD_I2S_LRCLK_MASK 0x0300
+#define SGTL5000_PAD_I2S_LRCLK_SHIFT 8
+#define SGTL5000_PAD_I2S_LRCLK_WIDTH 2
+#define SGTL5000_PAD_I2S_SCLK_MASK 0x00c0
+#define SGTL5000_PAD_I2S_SCLK_SHIFT 6
+#define SGTL5000_PAD_I2S_SCLK_WIDTH 2
+#define SGTL5000_PAD_I2S_DOUT_MASK 0x0030
+#define SGTL5000_PAD_I2S_DOUT_SHIFT 4
+#define SGTL5000_PAD_I2S_DOUT_WIDTH 2
+#define SGTL5000_PAD_I2C_SDA_MASK 0x000c
+#define SGTL5000_PAD_I2C_SDA_SHIFT 2
+#define SGTL5000_PAD_I2C_SDA_WIDTH 2
+#define SGTL5000_PAD_I2C_SCL_MASK 0x0003
+#define SGTL5000_PAD_I2C_SCL_SHIFT 0
+#define SGTL5000_PAD_I2C_SCL_WIDTH 2
+
+/*
+ * SGTL5000_CHIP_ANA_ADC_CTRL
+ */
+#define SGTL5000_ADC_VOL_M6DB 0x0100
+#define SGTL5000_ADC_VOL_RIGHT_MASK 0x00f0
+#define SGTL5000_ADC_VOL_RIGHT_SHIFT 4
+#define SGTL5000_ADC_VOL_RIGHT_WIDTH 4
+#define SGTL5000_ADC_VOL_LEFT_MASK 0x000f
+#define SGTL5000_ADC_VOL_LEFT_SHIFT 0
+#define SGTL5000_ADC_VOL_LEFT_WIDTH 4
+
+/*
+ * SGTL5000_CHIP_ANA_HP_CTRL
+ */
+#define SGTL5000_HP_VOL_RIGHT_MASK 0x7f00
+#define SGTL5000_HP_VOL_RIGHT_SHIFT 8
+#define SGTL5000_HP_VOL_RIGHT_WIDTH 7
+#define SGTL5000_HP_VOL_LEFT_MASK 0x007f
+#define SGTL5000_HP_VOL_LEFT_SHIFT 0
+#define SGTL5000_HP_VOL_LEFT_WIDTH 7
+
+/*
+ * SGTL5000_CHIP_ANA_CTRL
+ */
+#define SGTL5000_LINE_OUT_MUTE 0x0100
+#define SGTL5000_HP_SEL_MASK 0x0040
+#define SGTL5000_HP_SEL_SHIFT 6
+#define SGTL5000_HP_SEL_WIDTH 1
+#define SGTL5000_HP_SEL_DAC 0x0
+#define SGTL5000_HP_SEL_LINE_IN 0x1
+#define SGTL5000_HP_ZCD_EN 0x0020
+#define SGTL5000_HP_MUTE 0x0010
+#define SGTL5000_ADC_SEL_MASK 0x0004
+#define SGTL5000_ADC_SEL_SHIFT 2
+#define SGTL5000_ADC_SEL_WIDTH 1
+#define SGTL5000_ADC_SEL_MIC 0x0
+#define SGTL5000_ADC_SEL_LINE_IN 0x1
+#define SGTL5000_ADC_ZCD_EN 0x0002
+#define SGTL5000_ADC_MUTE 0x0001
+
+/*
+ * SGTL5000_CHIP_LINREG_CTRL
+ */
+#define SGTL5000_VDDC_MAN_ASSN_MASK 0x0040
+#define SGTL5000_VDDC_MAN_ASSN_SHIFT 6
+#define SGTL5000_VDDC_MAN_ASSN_WIDTH 1
+#define SGTL5000_VDDC_MAN_ASSN_VDDA 0x0
+#define SGTL5000_VDDC_MAN_ASSN_VDDIO 0x1
+#define SGTL5000_VDDC_ASSN_OVRD 0x0020
+#define SGTL5000_LINREG_VDDD_MASK 0x000f
+#define SGTL5000_LINREG_VDDD_SHIFT 0
+#define SGTL5000_LINREG_VDDD_WIDTH 4
+
+/*
+ * SGTL5000_CHIP_REF_CTRL
+ */
+#define SGTL5000_ANA_GND_MASK 0x01f0
+#define SGTL5000_ANA_GND_SHIFT 4
+#define SGTL5000_ANA_GND_WIDTH 5
+#define SGTL5000_ANA_GND_BASE 800 /* mv */
+#define SGTL5000_ANA_GND_STP 25 /*mv */
+#define SGTL5000_BIAS_CTRL_MASK 0x000e
+#define SGTL5000_BIAS_CTRL_SHIFT 1
+#define SGTL5000_BIAS_CTRL_WIDTH 3
+#define SGTL5000_SMALL_POP 0x0001
+
+/*
+ * SGTL5000_CHIP_MIC_CTRL
+ */
+#define SGTL5000_BIAS_R_MASK 0x0200
+#define SGTL5000_BIAS_R_SHIFT 8
+#define SGTL5000_BIAS_R_WIDTH 2
+#define SGTL5000_BIAS_R_off 0x0
+#define SGTL5000_BIAS_R_2K 0x1
+#define SGTL5000_BIAS_R_4k 0x2
+#define SGTL5000_BIAS_R_8k 0x3
+#define SGTL5000_BIAS_VOLT_MASK 0x0070
+#define SGTL5000_BIAS_VOLT_SHIFT 4
+#define SGTL5000_BIAS_VOLT_WIDTH 3
+#define SGTL5000_MIC_GAIN_MASK 0x0003
+#define SGTL5000_MIC_GAIN_SHIFT 0
+#define SGTL5000_MIC_GAIN_WIDTH 2
+
+/*
+ * SGTL5000_CHIP_LINE_OUT_CTRL
+ */
+#define SGTL5000_LINE_OUT_CURRENT_MASK 0x0f00
+#define SGTL5000_LINE_OUT_CURRENT_SHIFT 8
+#define SGTL5000_LINE_OUT_CURRENT_WIDTH 4
+#define SGTL5000_LINE_OUT_CURRENT_180u 0x0
+#define SGTL5000_LINE_OUT_CURRENT_270u 0x1
+#define SGTL5000_LINE_OUT_CURRENT_360u 0x3
+#define SGTL5000_LINE_OUT_CURRENT_450u 0x7
+#define SGTL5000_LINE_OUT_CURRENT_540u 0xf
+#define SGTL5000_LINE_OUT_GND_MASK 0x003f
+#define SGTL5000_LINE_OUT_GND_SHIFT 0
+#define SGTL5000_LINE_OUT_GND_WIDTH 6
+#define SGTL5000_LINE_OUT_GND_BASE 800 /* mv */
+#define SGTL5000_LINE_OUT_GND_STP 25
+#define SGTL5000_LINE_OUT_GND_MAX 0x23
+
+/*
+ * SGTL5000_CHIP_LINE_OUT_VOL
+ */
+#define SGTL5000_LINE_OUT_VOL_RIGHT_MASK 0x1f00
+#define SGTL5000_LINE_OUT_VOL_RIGHT_SHIFT 8
+#define SGTL5000_LINE_OUT_VOL_RIGHT_WIDTH 5
+#define SGTL5000_LINE_OUT_VOL_LEFT_MASK 0x001f
+#define SGTL5000_LINE_OUT_VOL_LEFT_SHIFT 0
+#define SGTL5000_LINE_OUT_VOL_LEFT_WIDTH 5
+
+/*
+ * SGTL5000_CHIP_ANA_POWER
+ */
+#define SGTL5000_DAC_STEREO 0x4000
+#define SGTL5000_LINREG_SIMPLE_POWERUP 0x2000
+#define SGTL5000_STARTUP_POWERUP 0x1000
+#define SGTL5000_VDDC_CHRGPMP_POWERUP 0x0800
+#define SGTL5000_PLL_POWERUP 0x0400
+#define SGTL5000_LINEREG_D_POWERUP 0x0200
+#define SGTL5000_VCOAMP_POWERUP 0x0100
+#define SGTL5000_VAG_POWERUP 0x0080
+#define SGTL5000_ADC_STEREO 0x0040
+#define SGTL5000_REFTOP_POWERUP 0x0020
+#define SGTL5000_HP_POWERUP 0x0010
+#define SGTL5000_DAC_POWERUP 0x0008
+#define SGTL5000_CAPLESS_HP_POWERUP 0x0004
+#define SGTL5000_ADC_POWERUP 0x0002
+#define SGTL5000_LINE_OUT_POWERUP 0x0001
+
+/*
+ * SGTL5000_CHIP_PLL_CTRL
+ */
+#define SGTL5000_PLL_INT_DIV_MASK 0xf800
+#define SGTL5000_PLL_INT_DIV_SHIFT 11
+#define SGTL5000_PLL_INT_DIV_WIDTH 5
+#define SGTL5000_PLL_FRAC_DIV_MASK 0x0700
+#define SGTL5000_PLL_FRAC_DIV_SHIFT 0
+#define SGTL5000_PLL_FRAC_DIV_WIDTH 11
+
+/*
+ * SGTL5000_CHIP_CLK_TOP_CTRL
+ */
+#define SGTL5000_INT_OSC_EN 0x0800
+#define SGTL5000_INPUT_FREQ_DIV2 0x0008
+
+/*
+ * SGTL5000_CHIP_ANA_STATUS
+ */
+#define SGTL5000_HP_LRSHORT 0x0200
+#define SGTL5000_CAPLESS_SHORT 0x0100
+#define SGTL5000_PLL_LOCKED 0x0010
+
+/*
+ * SGTL5000_CHIP_SHORT_CTRL
+ */
+#define SGTL5000_LVLADJR_MASK 0x7000
+#define SGTL5000_LVLADJR_SHIFT 12
+#define SGTL5000_LVLADJR_WIDTH 3
+#define SGTL5000_LVLADJL_MASK 0x0700
+#define SGTL5000_LVLADJL_SHIFT 8
+#define SGTL5000_LVLADJL_WIDTH 3
+#define SGTL5000_LVLADJC_MASK 0x0070
+#define SGTL5000_LVLADJC_SHIFT 4
+#define SGTL5000_LVLADJC_WIDTH 3
+#define SGTL5000_LR_SHORT_MOD_MASK 0x000c
+#define SGTL5000_LR_SHORT_MOD_SHIFT 2
+#define SGTL5000_LR_SHORT_MOD_WIDTH 2
+#define SGTL5000_CM_SHORT_MOD_MASK 0x0003
+#define SGTL5000_CM_SHORT_MOD_SHIFT 0
+#define SGTL5000_CM_SHORT_MOD_WIDTH 2
+
+/*
+ *SGTL5000_CHIP_ANA_TEST2
+ */
+#define SGTL5000_MONO_DAC 0x1000
+
+/*
+ * SGTL5000_DAP_CTRL
+ */
+#define SGTL5000_DAP_MIX_EN 0x0010
+#define SGTL5000_DAP_EN 0x0001
+
+#define SGTL5000_SYSCLK 0x00
+#define SGTL5000_LRCLK 0x01
+
+#endif
diff --git a/sound/soc/codecs/sn95031.c b/sound/soc/codecs/sn95031.c
new file mode 100644
index 000000000000..2a30eae1881c
--- /dev/null
+++ b/sound/soc/codecs/sn95031.c
@@ -0,0 +1,949 @@
+/*
+ * sn95031.c - TI sn95031 Codec driver
+ *
+ * Copyright (C) 2010 Intel Corp
+ * Author: Vinod Koul <vinod.koul@intel.com>
+ * Author: Harsha Priya <priya.harsha@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <asm/intel_scu_ipc.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+#include <sound/jack.h>
+#include "sn95031.h"
+
+#define SN95031_RATES (SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_44100)
+#define SN95031_FORMATS (SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE)
+
+/* adc helper functions */
+
+/* enables mic bias voltage */
+static void sn95031_enable_mic_bias(struct snd_soc_codec *codec)
+{
+ snd_soc_write(codec, SN95031_VAUD, BIT(2)|BIT(1)|BIT(0));
+ snd_soc_update_bits(codec, SN95031_MICBIAS, BIT(2), BIT(2));
+}
+
+/* Enable/Disable the ADC depending on the argument */
+static void configure_adc(struct snd_soc_codec *sn95031_codec, int val)
+{
+ int value = snd_soc_read(sn95031_codec, SN95031_ADC1CNTL1);
+
+ if (val) {
+ /* Enable and start the ADC */
+ value |= (SN95031_ADC_ENBL | SN95031_ADC_START);
+ value &= (~SN95031_ADC_NO_LOOP);
+ } else {
+ /* Just stop the ADC */
+ value &= (~SN95031_ADC_START);
+ }
+ snd_soc_write(sn95031_codec, SN95031_ADC1CNTL1, value);
+}
+
+/*
+ * finds an empty channel for conversion
+ * If the ADC is not enabled then start using 0th channel
+ * itself. Otherwise find an empty channel by looking for a
+ * channel in which the stopbit is set to 1. returns the index
+ * of the first free channel if succeeds or an error code.
+ *
+ * Context: can sleep
+ *
+ */
+static int find_free_channel(struct snd_soc_codec *sn95031_codec)
+{
+ int ret = 0, i, value;
+
+ /* check whether ADC is enabled */
+ value = snd_soc_read(sn95031_codec, SN95031_ADC1CNTL1);
+
+ if ((value & SN95031_ADC_ENBL) == 0)
+ return 0;
+
+ /* ADC is already enabled; Looking for an empty channel */
+ for (i = 0; i < SN95031_ADC_CHANLS_MAX; i++) {
+ value = snd_soc_read(sn95031_codec,
+ SN95031_ADC_CHNL_START_ADDR + i);
+ if (value & SN95031_STOPBIT_MASK) {
+ ret = i;
+ break;
+ }
+ }
+ return (ret > SN95031_ADC_LOOP_MAX) ? (-EINVAL) : ret;
+}
+
+/* Initialize the ADC for reading micbias values. Can sleep. */
+static int sn95031_initialize_adc(struct snd_soc_codec *sn95031_codec)
+{
+ int base_addr, chnl_addr;
+ int value;
+ static int channel_index;
+
+ /* Index of the first channel in which the stop bit is set */
+ channel_index = find_free_channel(sn95031_codec);
+ if (channel_index < 0) {
+ pr_err("No free ADC channels");
+ return channel_index;
+ }
+
+ base_addr = SN95031_ADC_CHNL_START_ADDR + channel_index;
+
+ if (!(channel_index == 0 || channel_index == SN95031_ADC_LOOP_MAX)) {
+ /* Reset stop bit for channels other than 0 and 12 */
+ value = snd_soc_read(sn95031_codec, base_addr);
+ /* Set the stop bit to zero */
+ snd_soc_write(sn95031_codec, base_addr, value & 0xEF);
+ /* Index of the first free channel */
+ base_addr++;
+ channel_index++;
+ }
+
+ /* Since this is the last channel, set the stop bit
+ to 1 by ORing the DIE_SENSOR_CODE with 0x10 */
+ snd_soc_write(sn95031_codec, base_addr,
+ SN95031_AUDIO_DETECT_CODE | 0x10);
+
+ chnl_addr = SN95031_ADC_DATA_START_ADDR + 2 * channel_index;
+ pr_debug("mid_initialize : %x", chnl_addr);
+ configure_adc(sn95031_codec, 1);
+ return chnl_addr;
+}
+
+
+/* reads the ADC registers and gets the mic bias value in mV. */
+static unsigned int sn95031_get_mic_bias(struct snd_soc_codec *codec)
+{
+ u16 adc_adr = sn95031_initialize_adc(codec);
+ u16 adc_val1, adc_val2;
+ unsigned int mic_bias;
+
+ sn95031_enable_mic_bias(codec);
+
+ /* Enable the sound card for conversion before reading */
+ snd_soc_write(codec, SN95031_ADC1CNTL3, 0x05);
+ /* Re-toggle the RRDATARD bit */
+ snd_soc_write(codec, SN95031_ADC1CNTL3, 0x04);
+
+ /* Read the higher bits of data */
+ msleep(1000);
+ adc_val1 = snd_soc_read(codec, adc_adr);
+ adc_adr++;
+ adc_val2 = snd_soc_read(codec, adc_adr);
+
+ /* Adding lower two bits to the higher bits */
+ mic_bias = (adc_val1 << 2) + (adc_val2 & 3);
+ mic_bias = (mic_bias * SN95031_ADC_ONE_LSB_MULTIPLIER) / 1000;
+ pr_debug("mic bias = %dmV\n", mic_bias);
+ return mic_bias;
+}
+EXPORT_SYMBOL_GPL(sn95031_get_mic_bias);
+/*end - adc helper functions */
+
+static inline unsigned int sn95031_read(struct snd_soc_codec *codec,
+ unsigned int reg)
+{
+ u8 value = 0;
+ int ret;
+
+ ret = intel_scu_ipc_ioread8(reg, &value);
+ if (ret)
+ pr_err("read of %x failed, err %d\n", reg, ret);
+ return value;
+
+}
+
+static inline int sn95031_write(struct snd_soc_codec *codec,
+ unsigned int reg, unsigned int value)
+{
+ int ret;
+
+ ret = intel_scu_ipc_iowrite8(reg, value);
+ if (ret)
+ pr_err("write of %x failed, err %d\n", reg, ret);
+ return ret;
+}
+
+static int sn95031_set_vaud_bias(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ break;
+
+ case SND_SOC_BIAS_PREPARE:
+ if (codec->dapm.bias_level == SND_SOC_BIAS_STANDBY) {
+ pr_debug("vaud_bias powering up pll\n");
+ /* power up the pll */
+ snd_soc_write(codec, SN95031_AUDPLLCTRL, BIT(5));
+ /* enable pcm 2 */
+ snd_soc_update_bits(codec, SN95031_PCM2C2,
+ BIT(0), BIT(0));
+ }
+ break;
+
+ case SND_SOC_BIAS_STANDBY:
+ if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
+ pr_debug("vaud_bias power up rail\n");
+ /* power up the rail */
+ snd_soc_write(codec, SN95031_VAUD,
+ BIT(2)|BIT(1)|BIT(0));
+ msleep(1);
+ } else if (codec->dapm.bias_level == SND_SOC_BIAS_PREPARE) {
+ /* turn off pcm */
+ pr_debug("vaud_bias power dn pcm\n");
+ snd_soc_update_bits(codec, SN95031_PCM2C2, BIT(0), 0);
+ snd_soc_write(codec, SN95031_AUDPLLCTRL, 0);
+ }
+ break;
+
+
+ case SND_SOC_BIAS_OFF:
+ pr_debug("vaud_bias _OFF doing rail shutdown\n");
+ snd_soc_write(codec, SN95031_VAUD, BIT(3));
+ break;
+ }
+
+ codec->dapm.bias_level = level;
+ return 0;
+}
+
+static int sn95031_vhs_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ if (SND_SOC_DAPM_EVENT_ON(event)) {
+ pr_debug("VHS SND_SOC_DAPM_EVENT_ON doing rail startup now\n");
+ /* power up the rail */
+ snd_soc_write(w->codec, SN95031_VHSP, 0x3D);
+ snd_soc_write(w->codec, SN95031_VHSN, 0x3F);
+ msleep(1);
+ } else if (SND_SOC_DAPM_EVENT_OFF(event)) {
+ pr_debug("VHS SND_SOC_DAPM_EVENT_OFF doing rail shutdown\n");
+ snd_soc_write(w->codec, SN95031_VHSP, 0xC4);
+ snd_soc_write(w->codec, SN95031_VHSN, 0x04);
+ }
+ return 0;
+}
+
+static int sn95031_vihf_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ if (SND_SOC_DAPM_EVENT_ON(event)) {
+ pr_debug("VIHF SND_SOC_DAPM_EVENT_ON doing rail startup now\n");
+ /* power up the rail */
+ snd_soc_write(w->codec, SN95031_VIHF, 0x27);
+ msleep(1);
+ } else if (SND_SOC_DAPM_EVENT_OFF(event)) {
+ pr_debug("VIHF SND_SOC_DAPM_EVENT_OFF doing rail shutdown\n");
+ snd_soc_write(w->codec, SN95031_VIHF, 0x24);
+ }
+ return 0;
+}
+
+static int sn95031_dmic12_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ unsigned int ldo = 0, clk_dir = 0, data_dir = 0;
+
+ if (SND_SOC_DAPM_EVENT_ON(event)) {
+ ldo = BIT(5)|BIT(4);
+ clk_dir = BIT(0);
+ data_dir = BIT(7);
+ }
+ /* program DMIC LDO, clock and set clock */
+ snd_soc_update_bits(w->codec, SN95031_MICBIAS, BIT(5)|BIT(4), ldo);
+ snd_soc_update_bits(w->codec, SN95031_DMICBUF0123, BIT(0), clk_dir);
+ snd_soc_update_bits(w->codec, SN95031_DMICBUF0123, BIT(7), data_dir);
+ return 0;
+}
+
+static int sn95031_dmic34_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ unsigned int ldo = 0, clk_dir = 0, data_dir = 0;
+
+ if (SND_SOC_DAPM_EVENT_ON(event)) {
+ ldo = BIT(5)|BIT(4);
+ clk_dir = BIT(2);
+ data_dir = BIT(1);
+ }
+ /* program DMIC LDO, clock and set clock */
+ snd_soc_update_bits(w->codec, SN95031_MICBIAS, BIT(5)|BIT(4), ldo);
+ snd_soc_update_bits(w->codec, SN95031_DMICBUF0123, BIT(2), clk_dir);
+ snd_soc_update_bits(w->codec, SN95031_DMICBUF45, BIT(1), data_dir);
+ return 0;
+}
+
+static int sn95031_dmic56_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ unsigned int ldo = 0;
+
+ if (SND_SOC_DAPM_EVENT_ON(event))
+ ldo = BIT(7)|BIT(6);
+
+ /* program DMIC LDO */
+ snd_soc_update_bits(w->codec, SN95031_MICBIAS, BIT(7)|BIT(6), ldo);
+ return 0;
+}
+
+/* mux controls */
+static const char *sn95031_mic_texts[] = { "AMIC", "LineIn" };
+
+static const struct soc_enum sn95031_micl_enum =
+ SOC_ENUM_SINGLE(SN95031_ADCCONFIG, 1, 2, sn95031_mic_texts);
+
+static const struct snd_kcontrol_new sn95031_micl_mux_control =
+ SOC_DAPM_ENUM("Route", sn95031_micl_enum);
+
+static const struct soc_enum sn95031_micr_enum =
+ SOC_ENUM_SINGLE(SN95031_ADCCONFIG, 3, 2, sn95031_mic_texts);
+
+static const struct snd_kcontrol_new sn95031_micr_mux_control =
+ SOC_DAPM_ENUM("Route", sn95031_micr_enum);
+
+static const char *sn95031_input_texts[] = { "DMIC1", "DMIC2", "DMIC3",
+ "DMIC4", "DMIC5", "DMIC6",
+ "ADC Left", "ADC Right" };
+
+static const struct soc_enum sn95031_input1_enum =
+ SOC_ENUM_SINGLE(SN95031_AUDIOMUX12, 0, 8, sn95031_input_texts);
+
+static const struct snd_kcontrol_new sn95031_input1_mux_control =
+ SOC_DAPM_ENUM("Route", sn95031_input1_enum);
+
+static const struct soc_enum sn95031_input2_enum =
+ SOC_ENUM_SINGLE(SN95031_AUDIOMUX12, 4, 8, sn95031_input_texts);
+
+static const struct snd_kcontrol_new sn95031_input2_mux_control =
+ SOC_DAPM_ENUM("Route", sn95031_input2_enum);
+
+static const struct soc_enum sn95031_input3_enum =
+ SOC_ENUM_SINGLE(SN95031_AUDIOMUX34, 0, 8, sn95031_input_texts);
+
+static const struct snd_kcontrol_new sn95031_input3_mux_control =
+ SOC_DAPM_ENUM("Route", sn95031_input3_enum);
+
+static const struct soc_enum sn95031_input4_enum =
+ SOC_ENUM_SINGLE(SN95031_AUDIOMUX34, 4, 8, sn95031_input_texts);
+
+static const struct snd_kcontrol_new sn95031_input4_mux_control =
+ SOC_DAPM_ENUM("Route", sn95031_input4_enum);
+
+/* capture path controls */
+
+static const char *sn95031_micmode_text[] = {"Single Ended", "Differential"};
+
+/* 0dB to 30dB in 10dB steps */
+static const DECLARE_TLV_DB_SCALE(mic_tlv, 0, 10, 0);
+
+static const struct soc_enum sn95031_micmode1_enum =
+ SOC_ENUM_SINGLE(SN95031_MICAMP1, 1, 2, sn95031_micmode_text);
+static const struct soc_enum sn95031_micmode2_enum =
+ SOC_ENUM_SINGLE(SN95031_MICAMP2, 1, 2, sn95031_micmode_text);
+
+static const char *sn95031_dmic_cfg_text[] = {"GPO", "DMIC"};
+
+static const struct soc_enum sn95031_dmic12_cfg_enum =
+ SOC_ENUM_SINGLE(SN95031_DMICMUX, 0, 2, sn95031_dmic_cfg_text);
+static const struct soc_enum sn95031_dmic34_cfg_enum =
+ SOC_ENUM_SINGLE(SN95031_DMICMUX, 1, 2, sn95031_dmic_cfg_text);
+static const struct soc_enum sn95031_dmic56_cfg_enum =
+ SOC_ENUM_SINGLE(SN95031_DMICMUX, 2, 2, sn95031_dmic_cfg_text);
+
+static const struct snd_kcontrol_new sn95031_snd_controls[] = {
+ SOC_ENUM("Mic1Mode Capture Route", sn95031_micmode1_enum),
+ SOC_ENUM("Mic2Mode Capture Route", sn95031_micmode2_enum),
+ SOC_ENUM("DMIC12 Capture Route", sn95031_dmic12_cfg_enum),
+ SOC_ENUM("DMIC34 Capture Route", sn95031_dmic34_cfg_enum),
+ SOC_ENUM("DMIC56 Capture Route", sn95031_dmic56_cfg_enum),
+ SOC_SINGLE_TLV("Mic1 Capture Volume", SN95031_MICAMP1,
+ 2, 4, 0, mic_tlv),
+ SOC_SINGLE_TLV("Mic2 Capture Volume", SN95031_MICAMP2,
+ 2, 4, 0, mic_tlv),
+};
+
+/* DAPM widgets */
+static const struct snd_soc_dapm_widget sn95031_dapm_widgets[] = {
+
+ /* all end points mic, hs etc */
+ SND_SOC_DAPM_OUTPUT("HPOUTL"),
+ SND_SOC_DAPM_OUTPUT("HPOUTR"),
+ SND_SOC_DAPM_OUTPUT("EPOUT"),
+ SND_SOC_DAPM_OUTPUT("IHFOUTL"),
+ SND_SOC_DAPM_OUTPUT("IHFOUTR"),
+ SND_SOC_DAPM_OUTPUT("LINEOUTL"),
+ SND_SOC_DAPM_OUTPUT("LINEOUTR"),
+ SND_SOC_DAPM_OUTPUT("VIB1OUT"),
+ SND_SOC_DAPM_OUTPUT("VIB2OUT"),
+
+ SND_SOC_DAPM_INPUT("AMIC1"), /* headset mic */
+ SND_SOC_DAPM_INPUT("AMIC2"),
+ SND_SOC_DAPM_INPUT("DMIC1"),
+ SND_SOC_DAPM_INPUT("DMIC2"),
+ SND_SOC_DAPM_INPUT("DMIC3"),
+ SND_SOC_DAPM_INPUT("DMIC4"),
+ SND_SOC_DAPM_INPUT("DMIC5"),
+ SND_SOC_DAPM_INPUT("DMIC6"),
+ SND_SOC_DAPM_INPUT("LINEINL"),
+ SND_SOC_DAPM_INPUT("LINEINR"),
+
+ SND_SOC_DAPM_MICBIAS("AMIC1Bias", SN95031_MICBIAS, 2, 0),
+ SND_SOC_DAPM_MICBIAS("AMIC2Bias", SN95031_MICBIAS, 3, 0),
+ SND_SOC_DAPM_MICBIAS("DMIC12Bias", SN95031_DMICMUX, 3, 0),
+ SND_SOC_DAPM_MICBIAS("DMIC34Bias", SN95031_DMICMUX, 4, 0),
+ SND_SOC_DAPM_MICBIAS("DMIC56Bias", SN95031_DMICMUX, 5, 0),
+
+ SND_SOC_DAPM_SUPPLY("DMIC12supply", SN95031_DMICLK, 0, 0,
+ sn95031_dmic12_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("DMIC34supply", SN95031_DMICLK, 1, 0,
+ sn95031_dmic34_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("DMIC56supply", SN95031_DMICLK, 2, 0,
+ sn95031_dmic56_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_AIF_OUT("PCM_Out", "Capture", 0,
+ SND_SOC_NOPM, 0, 0),
+
+ SND_SOC_DAPM_SUPPLY("Headset Rail", SND_SOC_NOPM, 0, 0,
+ sn95031_vhs_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("Speaker Rail", SND_SOC_NOPM, 0, 0,
+ sn95031_vihf_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+ /* playback path driver enables */
+ SND_SOC_DAPM_PGA("Headset Left Playback",
+ SN95031_DRIVEREN, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Headset Right Playback",
+ SN95031_DRIVEREN, 1, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Speaker Left Playback",
+ SN95031_DRIVEREN, 2, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Speaker Right Playback",
+ SN95031_DRIVEREN, 3, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Vibra1 Playback",
+ SN95031_DRIVEREN, 4, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Vibra2 Playback",
+ SN95031_DRIVEREN, 5, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Earpiece Playback",
+ SN95031_DRIVEREN, 6, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Lineout Left Playback",
+ SN95031_LOCTL, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Lineout Right Playback",
+ SN95031_LOCTL, 4, 0, NULL, 0),
+
+ /* playback path filter enable */
+ SND_SOC_DAPM_PGA("Headset Left Filter",
+ SN95031_HSEPRXCTRL, 4, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Headset Right Filter",
+ SN95031_HSEPRXCTRL, 5, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Speaker Left Filter",
+ SN95031_IHFRXCTRL, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Speaker Right Filter",
+ SN95031_IHFRXCTRL, 1, 0, NULL, 0),
+
+ /* DACs */
+ SND_SOC_DAPM_DAC("HSDAC Left", "Headset",
+ SN95031_DACCONFIG, 0, 0),
+ SND_SOC_DAPM_DAC("HSDAC Right", "Headset",
+ SN95031_DACCONFIG, 1, 0),
+ SND_SOC_DAPM_DAC("IHFDAC Left", "Speaker",
+ SN95031_DACCONFIG, 2, 0),
+ SND_SOC_DAPM_DAC("IHFDAC Right", "Speaker",
+ SN95031_DACCONFIG, 3, 0),
+ SND_SOC_DAPM_DAC("Vibra1 DAC", "Vibra1",
+ SN95031_VIB1C5, 1, 0),
+ SND_SOC_DAPM_DAC("Vibra2 DAC", "Vibra2",
+ SN95031_VIB2C5, 1, 0),
+
+ /* capture widgets */
+ SND_SOC_DAPM_PGA("LineIn Enable Left", SN95031_MICAMP1,
+ 7, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("LineIn Enable Right", SN95031_MICAMP2,
+ 7, 0, NULL, 0),
+
+ SND_SOC_DAPM_PGA("MIC1 Enable", SN95031_MICAMP1, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("MIC2 Enable", SN95031_MICAMP2, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("TX1 Enable", SN95031_AUDIOTXEN, 2, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("TX2 Enable", SN95031_AUDIOTXEN, 3, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("TX3 Enable", SN95031_AUDIOTXEN, 4, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("TX4 Enable", SN95031_AUDIOTXEN, 5, 0, NULL, 0),
+
+ /* ADC have null stream as they will be turned ON by TX path */
+ SND_SOC_DAPM_ADC("ADC Left", NULL,
+ SN95031_ADCCONFIG, 0, 0),
+ SND_SOC_DAPM_ADC("ADC Right", NULL,
+ SN95031_ADCCONFIG, 2, 0),
+
+ SND_SOC_DAPM_MUX("Mic_InputL Capture Route",
+ SND_SOC_NOPM, 0, 0, &sn95031_micl_mux_control),
+ SND_SOC_DAPM_MUX("Mic_InputR Capture Route",
+ SND_SOC_NOPM, 0, 0, &sn95031_micr_mux_control),
+
+ SND_SOC_DAPM_MUX("Txpath1 Capture Route",
+ SND_SOC_NOPM, 0, 0, &sn95031_input1_mux_control),
+ SND_SOC_DAPM_MUX("Txpath2 Capture Route",
+ SND_SOC_NOPM, 0, 0, &sn95031_input2_mux_control),
+ SND_SOC_DAPM_MUX("Txpath3 Capture Route",
+ SND_SOC_NOPM, 0, 0, &sn95031_input3_mux_control),
+ SND_SOC_DAPM_MUX("Txpath4 Capture Route",
+ SND_SOC_NOPM, 0, 0, &sn95031_input4_mux_control),
+
+};
+
+static const struct snd_soc_dapm_route sn95031_audio_map[] = {
+ /* headset and earpiece map */
+ { "HPOUTL", NULL, "Headset Rail"},
+ { "HPOUTR", NULL, "Headset Rail"},
+ { "HPOUTL", NULL, "Headset Left Playback" },
+ { "HPOUTR", NULL, "Headset Right Playback" },
+ { "EPOUT", NULL, "Earpiece Playback" },
+ { "Headset Left Playback", NULL, "Headset Left Filter"},
+ { "Headset Right Playback", NULL, "Headset Right Filter"},
+ { "Earpiece Playback", NULL, "Headset Left Filter"},
+ { "Headset Left Filter", NULL, "HSDAC Left"},
+ { "Headset Right Filter", NULL, "HSDAC Right"},
+
+ /* speaker map */
+ { "IHFOUTL", NULL, "Speaker Rail"},
+ { "IHFOUTR", NULL, "Speaker Rail"},
+ { "IHFOUTL", "NULL", "Speaker Left Playback"},
+ { "IHFOUTR", "NULL", "Speaker Right Playback"},
+ { "Speaker Left Playback", NULL, "Speaker Left Filter"},
+ { "Speaker Right Playback", NULL, "Speaker Right Filter"},
+ { "Speaker Left Filter", NULL, "IHFDAC Left"},
+ { "Speaker Right Filter", NULL, "IHFDAC Right"},
+
+ /* vibra map */
+ { "VIB1OUT", NULL, "Vibra1 Playback"},
+ { "Vibra1 Playback", NULL, "Vibra1 DAC"},
+
+ { "VIB2OUT", NULL, "Vibra2 Playback"},
+ { "Vibra2 Playback", NULL, "Vibra2 DAC"},
+
+ /* lineout */
+ { "LINEOUTL", NULL, "Lineout Left Playback"},
+ { "LINEOUTR", NULL, "Lineout Right Playback"},
+ { "Lineout Left Playback", NULL, "Headset Left Filter"},
+ { "Lineout Left Playback", NULL, "Speaker Left Filter"},
+ { "Lineout Left Playback", NULL, "Vibra1 DAC"},
+ { "Lineout Right Playback", NULL, "Headset Right Filter"},
+ { "Lineout Right Playback", NULL, "Speaker Right Filter"},
+ { "Lineout Right Playback", NULL, "Vibra2 DAC"},
+
+ /* Headset (AMIC1) mic */
+ { "AMIC1Bias", NULL, "AMIC1"},
+ { "MIC1 Enable", NULL, "AMIC1Bias"},
+ { "Mic_InputL Capture Route", "AMIC", "MIC1 Enable"},
+
+ /* AMIC2 */
+ { "AMIC2Bias", NULL, "AMIC2"},
+ { "MIC2 Enable", NULL, "AMIC2Bias"},
+ { "Mic_InputR Capture Route", "AMIC", "MIC2 Enable"},
+
+
+ /* Linein */
+ { "LineIn Enable Left", NULL, "LINEINL"},
+ { "LineIn Enable Right", NULL, "LINEINR"},
+ { "Mic_InputL Capture Route", "LineIn", "LineIn Enable Left"},
+ { "Mic_InputR Capture Route", "LineIn", "LineIn Enable Right"},
+
+ /* ADC connection */
+ { "ADC Left", NULL, "Mic_InputL Capture Route"},
+ { "ADC Right", NULL, "Mic_InputR Capture Route"},
+
+ /*DMIC connections */
+ { "DMIC1", NULL, "DMIC12supply"},
+ { "DMIC2", NULL, "DMIC12supply"},
+ { "DMIC3", NULL, "DMIC34supply"},
+ { "DMIC4", NULL, "DMIC34supply"},
+ { "DMIC5", NULL, "DMIC56supply"},
+ { "DMIC6", NULL, "DMIC56supply"},
+
+ { "DMIC12Bias", NULL, "DMIC1"},
+ { "DMIC12Bias", NULL, "DMIC2"},
+ { "DMIC34Bias", NULL, "DMIC3"},
+ { "DMIC34Bias", NULL, "DMIC4"},
+ { "DMIC56Bias", NULL, "DMIC5"},
+ { "DMIC56Bias", NULL, "DMIC6"},
+
+ /*TX path inputs*/
+ { "Txpath1 Capture Route", "ADC Left", "ADC Left"},
+ { "Txpath2 Capture Route", "ADC Left", "ADC Left"},
+ { "Txpath3 Capture Route", "ADC Left", "ADC Left"},
+ { "Txpath4 Capture Route", "ADC Left", "ADC Left"},
+ { "Txpath1 Capture Route", "ADC Right", "ADC Right"},
+ { "Txpath2 Capture Route", "ADC Right", "ADC Right"},
+ { "Txpath3 Capture Route", "ADC Right", "ADC Right"},
+ { "Txpath4 Capture Route", "ADC Right", "ADC Right"},
+ { "Txpath1 Capture Route", "DMIC1", "DMIC1"},
+ { "Txpath2 Capture Route", "DMIC1", "DMIC1"},
+ { "Txpath3 Capture Route", "DMIC1", "DMIC1"},
+ { "Txpath4 Capture Route", "DMIC1", "DMIC1"},
+ { "Txpath1 Capture Route", "DMIC2", "DMIC2"},
+ { "Txpath2 Capture Route", "DMIC2", "DMIC2"},
+ { "Txpath3 Capture Route", "DMIC2", "DMIC2"},
+ { "Txpath4 Capture Route", "DMIC2", "DMIC2"},
+ { "Txpath1 Capture Route", "DMIC3", "DMIC3"},
+ { "Txpath2 Capture Route", "DMIC3", "DMIC3"},
+ { "Txpath3 Capture Route", "DMIC3", "DMIC3"},
+ { "Txpath4 Capture Route", "DMIC3", "DMIC3"},
+ { "Txpath1 Capture Route", "DMIC4", "DMIC4"},
+ { "Txpath2 Capture Route", "DMIC4", "DMIC4"},
+ { "Txpath3 Capture Route", "DMIC4", "DMIC4"},
+ { "Txpath4 Capture Route", "DMIC4", "DMIC4"},
+ { "Txpath1 Capture Route", "DMIC5", "DMIC5"},
+ { "Txpath2 Capture Route", "DMIC5", "DMIC5"},
+ { "Txpath3 Capture Route", "DMIC5", "DMIC5"},
+ { "Txpath4 Capture Route", "DMIC5", "DMIC5"},
+ { "Txpath1 Capture Route", "DMIC6", "DMIC6"},
+ { "Txpath2 Capture Route", "DMIC6", "DMIC6"},
+ { "Txpath3 Capture Route", "DMIC6", "DMIC6"},
+ { "Txpath4 Capture Route", "DMIC6", "DMIC6"},
+
+ /* tx path */
+ { "TX1 Enable", NULL, "Txpath1 Capture Route"},
+ { "TX2 Enable", NULL, "Txpath2 Capture Route"},
+ { "TX3 Enable", NULL, "Txpath3 Capture Route"},
+ { "TX4 Enable", NULL, "Txpath4 Capture Route"},
+ { "PCM_Out", NULL, "TX1 Enable"},
+ { "PCM_Out", NULL, "TX2 Enable"},
+ { "PCM_Out", NULL, "TX3 Enable"},
+ { "PCM_Out", NULL, "TX4 Enable"},
+
+};
+
+/* speaker and headset mutes, for audio pops and clicks */
+static int sn95031_pcm_hs_mute(struct snd_soc_dai *dai, int mute)
+{
+ snd_soc_update_bits(dai->codec,
+ SN95031_HSLVOLCTRL, BIT(7), (!mute << 7));
+ snd_soc_update_bits(dai->codec,
+ SN95031_HSRVOLCTRL, BIT(7), (!mute << 7));
+ return 0;
+}
+
+static int sn95031_pcm_spkr_mute(struct snd_soc_dai *dai, int mute)
+{
+ snd_soc_update_bits(dai->codec,
+ SN95031_IHFLVOLCTRL, BIT(7), (!mute << 7));
+ snd_soc_update_bits(dai->codec,
+ SN95031_IHFRVOLCTRL, BIT(7), (!mute << 7));
+ return 0;
+}
+
+int sn95031_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
+{
+ unsigned int format, rate;
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ format = BIT(4)|BIT(5);
+ break;
+
+ case SNDRV_PCM_FORMAT_S24_LE:
+ format = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ snd_soc_update_bits(dai->codec, SN95031_PCM2C2,
+ BIT(4)|BIT(5), format);
+
+ switch (params_rate(params)) {
+ case 48000:
+ pr_debug("RATE_48000\n");
+ rate = 0;
+ break;
+
+ case 44100:
+ pr_debug("RATE_44100\n");
+ rate = BIT(7);
+ break;
+
+ default:
+ pr_err("ERR rate %d\n", params_rate(params));
+ return -EINVAL;
+ }
+ snd_soc_update_bits(dai->codec, SN95031_PCM1C1, BIT(7), rate);
+
+ return 0;
+}
+
+/* Codec DAI section */
+static struct snd_soc_dai_ops sn95031_headset_dai_ops = {
+ .digital_mute = sn95031_pcm_hs_mute,
+ .hw_params = sn95031_pcm_hw_params,
+};
+
+static struct snd_soc_dai_ops sn95031_speaker_dai_ops = {
+ .digital_mute = sn95031_pcm_spkr_mute,
+ .hw_params = sn95031_pcm_hw_params,
+};
+
+static struct snd_soc_dai_ops sn95031_vib1_dai_ops = {
+ .hw_params = sn95031_pcm_hw_params,
+};
+
+static struct snd_soc_dai_ops sn95031_vib2_dai_ops = {
+ .hw_params = sn95031_pcm_hw_params,
+};
+
+struct snd_soc_dai_driver sn95031_dais[] = {
+{
+ .name = "SN95031 Headset",
+ .playback = {
+ .stream_name = "Headset",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SN95031_RATES,
+ .formats = SN95031_FORMATS,
+ },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 5,
+ .rates = SN95031_RATES,
+ .formats = SN95031_FORMATS,
+ },
+ .ops = &sn95031_headset_dai_ops,
+},
+{ .name = "SN95031 Speaker",
+ .playback = {
+ .stream_name = "Speaker",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SN95031_RATES,
+ .formats = SN95031_FORMATS,
+ },
+ .ops = &sn95031_speaker_dai_ops,
+},
+{ .name = "SN95031 Vibra1",
+ .playback = {
+ .stream_name = "Vibra1",
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = SN95031_RATES,
+ .formats = SN95031_FORMATS,
+ },
+ .ops = &sn95031_vib1_dai_ops,
+},
+{ .name = "SN95031 Vibra2",
+ .playback = {
+ .stream_name = "Vibra2",
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = SN95031_RATES,
+ .formats = SN95031_FORMATS,
+ },
+ .ops = &sn95031_vib2_dai_ops,
+},
+};
+
+static inline void sn95031_disable_jack_btn(struct snd_soc_codec *codec)
+{
+ snd_soc_write(codec, SN95031_BTNCTRL2, 0x00);
+}
+
+static inline void sn95031_enable_jack_btn(struct snd_soc_codec *codec)
+{
+ snd_soc_write(codec, SN95031_BTNCTRL1, 0x77);
+ snd_soc_write(codec, SN95031_BTNCTRL2, 0x01);
+}
+
+static int sn95031_get_headset_state(struct snd_soc_jack *mfld_jack)
+{
+ int micbias = sn95031_get_mic_bias(mfld_jack->codec);
+
+ int jack_type = snd_soc_jack_get_type(mfld_jack, micbias);
+
+ pr_debug("jack type detected = %d\n", jack_type);
+ if (jack_type == SND_JACK_HEADSET)
+ sn95031_enable_jack_btn(mfld_jack->codec);
+ return jack_type;
+}
+
+void sn95031_jack_detection(struct mfld_jack_data *jack_data)
+{
+ unsigned int status;
+ unsigned int mask = SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_HEADSET;
+
+ pr_debug("interrupt id read in sram = 0x%x\n", jack_data->intr_id);
+ if (jack_data->intr_id & 0x1) {
+ pr_debug("short_push detected\n");
+ status = SND_JACK_HEADSET | SND_JACK_BTN_0;
+ } else if (jack_data->intr_id & 0x2) {
+ pr_debug("long_push detected\n");
+ status = SND_JACK_HEADSET | SND_JACK_BTN_1;
+ } else if (jack_data->intr_id & 0x4) {
+ pr_debug("headset or headphones inserted\n");
+ status = sn95031_get_headset_state(jack_data->mfld_jack);
+ } else if (jack_data->intr_id & 0x8) {
+ pr_debug("headset or headphones removed\n");
+ status = 0;
+ sn95031_disable_jack_btn(jack_data->mfld_jack->codec);
+ } else {
+ pr_err("unidentified interrupt\n");
+ return;
+ }
+
+ snd_soc_jack_report(jack_data->mfld_jack, status, mask);
+ /*button pressed and released so we send explicit button release */
+ if ((status & SND_JACK_BTN_0) | (status & SND_JACK_BTN_1))
+ snd_soc_jack_report(jack_data->mfld_jack,
+ SND_JACK_HEADSET, mask);
+}
+EXPORT_SYMBOL_GPL(sn95031_jack_detection);
+
+/* codec registration */
+static int sn95031_codec_probe(struct snd_soc_codec *codec)
+{
+ int ret;
+
+ pr_debug("codec_probe called\n");
+
+ codec->dapm.bias_level = SND_SOC_BIAS_OFF;
+ codec->dapm.idle_bias_off = 1;
+
+ /* PCM interface config
+ * This sets the pcm rx slot conguration to max 6 slots
+ * for max 4 dais (2 stereo and 2 mono)
+ */
+ snd_soc_write(codec, SN95031_PCM2RXSLOT01, 0x10);
+ snd_soc_write(codec, SN95031_PCM2RXSLOT23, 0x32);
+ snd_soc_write(codec, SN95031_PCM2RXSLOT45, 0x54);
+ snd_soc_write(codec, SN95031_PCM2TXSLOT01, 0x10);
+ snd_soc_write(codec, SN95031_PCM2TXSLOT23, 0x32);
+ /* pcm port setting
+ * This sets the pcm port to slave and clock at 19.2Mhz which
+ * can support 6slots, sampling rate set per stream in hw-params
+ */
+ snd_soc_write(codec, SN95031_PCM1C1, 0x00);
+ snd_soc_write(codec, SN95031_PCM2C1, 0x01);
+ snd_soc_write(codec, SN95031_PCM2C2, 0x0A);
+ snd_soc_write(codec, SN95031_HSMIXER, BIT(0)|BIT(4));
+ /* vendor vibra workround, the vibras are muted by
+ * custom register so unmute them
+ */
+ snd_soc_write(codec, SN95031_SSR5, 0x80);
+ snd_soc_write(codec, SN95031_SSR6, 0x80);
+ snd_soc_write(codec, SN95031_VIB1C5, 0x00);
+ snd_soc_write(codec, SN95031_VIB2C5, 0x00);
+ /* configure vibras for pcm port */
+ snd_soc_write(codec, SN95031_VIB1C3, 0x00);
+ snd_soc_write(codec, SN95031_VIB2C3, 0x00);
+
+ /* soft mute ramp time */
+ snd_soc_write(codec, SN95031_SOFTMUTE, 0x3);
+ /* fix the initial volume at 1dB,
+ * default in +9dB,
+ * 1dB give optimal swing on DAC, amps
+ */
+ snd_soc_write(codec, SN95031_HSLVOLCTRL, 0x08);
+ snd_soc_write(codec, SN95031_HSRVOLCTRL, 0x08);
+ snd_soc_write(codec, SN95031_IHFLVOLCTRL, 0x08);
+ snd_soc_write(codec, SN95031_IHFRVOLCTRL, 0x08);
+ /* dac mode and lineout workaround */
+ snd_soc_write(codec, SN95031_SSR2, 0x10);
+ snd_soc_write(codec, SN95031_SSR3, 0x40);
+
+ snd_soc_add_controls(codec, sn95031_snd_controls,
+ ARRAY_SIZE(sn95031_snd_controls));
+
+ ret = snd_soc_dapm_new_controls(&codec->dapm, sn95031_dapm_widgets,
+ ARRAY_SIZE(sn95031_dapm_widgets));
+ if (ret)
+ pr_err("soc_dapm_new_control failed %d", ret);
+ ret = snd_soc_dapm_add_routes(&codec->dapm, sn95031_audio_map,
+ ARRAY_SIZE(sn95031_audio_map));
+ if (ret)
+ pr_err("soc_dapm_add_routes failed %d", ret);
+
+ return ret;
+}
+
+static int sn95031_codec_remove(struct snd_soc_codec *codec)
+{
+ pr_debug("codec_remove called\n");
+ sn95031_set_vaud_bias(codec, SND_SOC_BIAS_OFF);
+
+ return 0;
+}
+
+struct snd_soc_codec_driver sn95031_codec = {
+ .probe = sn95031_codec_probe,
+ .remove = sn95031_codec_remove,
+ .read = sn95031_read,
+ .write = sn95031_write,
+ .set_bias_level = sn95031_set_vaud_bias,
+};
+
+static int __devinit sn95031_device_probe(struct platform_device *pdev)
+{
+ pr_debug("codec device probe called for %s\n", dev_name(&pdev->dev));
+ return snd_soc_register_codec(&pdev->dev, &sn95031_codec,
+ sn95031_dais, ARRAY_SIZE(sn95031_dais));
+}
+
+static int __devexit sn95031_device_remove(struct platform_device *pdev)
+{
+ pr_debug("codec device remove called\n");
+ snd_soc_unregister_codec(&pdev->dev);
+ return 0;
+}
+
+static struct platform_driver sn95031_codec_driver = {
+ .driver = {
+ .name = "sn95031",
+ .owner = THIS_MODULE,
+ },
+ .probe = sn95031_device_probe,
+ .remove = sn95031_device_remove,
+};
+
+static int __init sn95031_init(void)
+{
+ pr_debug("driver init called\n");
+ return platform_driver_register(&sn95031_codec_driver);
+}
+module_init(sn95031_init);
+
+static void __exit sn95031_exit(void)
+{
+ pr_debug("driver exit called\n");
+ platform_driver_unregister(&sn95031_codec_driver);
+}
+module_exit(sn95031_exit);
+
+MODULE_DESCRIPTION("ASoC TI SN95031 codec driver");
+MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
+MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:sn95031");
diff --git a/sound/soc/codecs/sn95031.h b/sound/soc/codecs/sn95031.h
new file mode 100644
index 000000000000..20376d234fb8
--- /dev/null
+++ b/sound/soc/codecs/sn95031.h
@@ -0,0 +1,132 @@
+/*
+ * sn95031.h - TI sn95031 Codec driver
+ *
+ * Copyright (C) 2010 Intel Corp
+ * Author: Vinod Koul <vinod.koul@intel.com>
+ * Author: Harsha Priya <priya.harsha@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *
+ */
+#ifndef _SN95031_H
+#define _SN95031_H
+
+/*register map*/
+#define SN95031_VAUD 0xDB
+#define SN95031_VHSP 0xDC
+#define SN95031_VHSN 0xDD
+#define SN95031_VIHF 0xC9
+
+#define SN95031_AUDPLLCTRL 0x240
+#define SN95031_DMICBUF0123 0x241
+#define SN95031_DMICBUF45 0x242
+#define SN95031_DMICGPO 0x244
+#define SN95031_DMICMUX 0x245
+#define SN95031_DMICLK 0x246
+#define SN95031_MICBIAS 0x247
+#define SN95031_ADCCONFIG 0x248
+#define SN95031_MICAMP1 0x249
+#define SN95031_MICAMP2 0x24A
+#define SN95031_NOISEMUX 0x24B
+#define SN95031_AUDIOMUX12 0x24C
+#define SN95031_AUDIOMUX34 0x24D
+#define SN95031_AUDIOSINC 0x24E
+#define SN95031_AUDIOTXEN 0x24F
+#define SN95031_HSEPRXCTRL 0x250
+#define SN95031_IHFRXCTRL 0x251
+#define SN95031_HSMIXER 0x256
+#define SN95031_DACCONFIG 0x257
+#define SN95031_SOFTMUTE 0x258
+#define SN95031_HSLVOLCTRL 0x259
+#define SN95031_HSRVOLCTRL 0x25A
+#define SN95031_IHFLVOLCTRL 0x25B
+#define SN95031_IHFRVOLCTRL 0x25C
+#define SN95031_DRIVEREN 0x25D
+#define SN95031_LOCTL 0x25E
+#define SN95031_VIB1C1 0x25F
+#define SN95031_VIB1C2 0x260
+#define SN95031_VIB1C3 0x261
+#define SN95031_VIB1SPIPCM1 0x262
+#define SN95031_VIB1SPIPCM2 0x263
+#define SN95031_VIB1C5 0x264
+#define SN95031_VIB2C1 0x265
+#define SN95031_VIB2C2 0x266
+#define SN95031_VIB2C3 0x267
+#define SN95031_VIB2SPIPCM1 0x268
+#define SN95031_VIB2SPIPCM2 0x269
+#define SN95031_VIB2C5 0x26A
+#define SN95031_BTNCTRL1 0x26B
+#define SN95031_BTNCTRL2 0x26C
+#define SN95031_PCM1TXSLOT01 0x26D
+#define SN95031_PCM1TXSLOT23 0x26E
+#define SN95031_PCM1TXSLOT45 0x26F
+#define SN95031_PCM1RXSLOT0_3 0x270
+#define SN95031_PCM1RXSLOT45 0x271
+#define SN95031_PCM2TXSLOT01 0x272
+#define SN95031_PCM2TXSLOT23 0x273
+#define SN95031_PCM2TXSLOT45 0x274
+#define SN95031_PCM2RXSLOT01 0x275
+#define SN95031_PCM2RXSLOT23 0x276
+#define SN95031_PCM2RXSLOT45 0x277
+#define SN95031_PCM1C1 0x278
+#define SN95031_PCM1C2 0x279
+#define SN95031_PCM1C3 0x27A
+#define SN95031_PCM2C1 0x27B
+#define SN95031_PCM2C2 0x27C
+/*end codec register defn*/
+
+/*vendor defn these are not part of avp*/
+#define SN95031_SSR2 0x381
+#define SN95031_SSR3 0x382
+#define SN95031_SSR5 0x384
+#define SN95031_SSR6 0x385
+
+/* ADC registers */
+
+#define SN95031_ADC1CNTL1 0x1C0
+#define SN95031_ADC_ENBL 0x10
+#define SN95031_ADC_START 0x08
+#define SN95031_ADC1CNTL3 0x1C2
+#define SN95031_ADCTHERM_ENBL 0x04
+#define SN95031_ADCRRDATA_ENBL 0x05
+#define SN95031_STOPBIT_MASK 16
+#define SN95031_ADCTHERM_MASK 4
+#define SN95031_ADC_CHANLS_MAX 15 /* Number of ADC channels */
+#define SN95031_ADC_LOOP_MAX (SN95031_ADC_CHANLS_MAX - 1)
+#define SN95031_ADC_NO_LOOP 0x07
+#define SN95031_AUDIO_GPIO_CTRL 0x070
+
+/* ADC channel code values */
+#define SN95031_AUDIO_DETECT_CODE 0x06
+
+/* ADC base addresses */
+#define SN95031_ADC_CHNL_START_ADDR 0x1C5 /* increments by 1 */
+#define SN95031_ADC_DATA_START_ADDR 0x1D4 /* increments by 2 */
+/* multipier to convert to mV */
+#define SN95031_ADC_ONE_LSB_MULTIPLIER 2346
+
+
+struct mfld_jack_data {
+ int intr_id;
+ int micbias_vol;
+ struct snd_soc_jack *mfld_jack;
+};
+
+extern void sn95031_jack_detection(struct mfld_jack_data *jack_data);
+
+#endif
diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c
index e4d464b937d6..8512800f6326 100644
--- a/sound/soc/codecs/twl4030.c
+++ b/sound/soc/codecs/twl4030.c
@@ -26,6 +26,7 @@
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
#include <linux/i2c/twl.h>
#include <linux/slab.h>
#include <sound/core.h>
@@ -732,7 +733,8 @@ static int aif_event(struct snd_soc_dapm_widget *w,
static void headset_ramp(struct snd_soc_codec *codec, int ramp)
{
- struct twl4030_codec_audio_data *pdata = codec->dev->platform_data;
+ struct twl4030_codec_audio_data *pdata =
+ mfd_get_data(to_platform_device(codec->dev));
unsigned char hs_gain, hs_pop;
struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec);
/* Base values for ramp delay calculation: 2^19 - 2^26 */
@@ -2297,7 +2299,7 @@ static struct snd_soc_codec_driver soc_codec_dev_twl4030 = {
static int __devinit twl4030_codec_probe(struct platform_device *pdev)
{
- struct twl4030_codec_audio_data *pdata = pdev->dev.platform_data;
+ struct twl4030_codec_audio_data *pdata = mfd_get_data(pdev);
if (!pdata) {
dev_err(&pdev->dev, "platform_data is missing\n");
diff --git a/sound/soc/codecs/twl6040.c b/sound/soc/codecs/twl6040.c
index 4bbf1b15a493..482fcdb59bfa 100644
--- a/sound/soc/codecs/twl6040.c
+++ b/sound/soc/codecs/twl6040.c
@@ -724,8 +724,8 @@ static int twl6040_power_mode_event(struct snd_soc_dapm_widget *w,
return 0;
}
-void twl6040_hs_jack_report(struct snd_soc_codec *codec,
- struct snd_soc_jack *jack, int report)
+static void twl6040_hs_jack_report(struct snd_soc_codec *codec,
+ struct snd_soc_jack *jack, int report)
{
struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
int status;
diff --git a/sound/soc/codecs/wl1273.c b/sound/soc/codecs/wl1273.c
index 861b28f543d2..1ad0d5aecece 100644
--- a/sound/soc/codecs/wl1273.c
+++ b/sound/soc/codecs/wl1273.c
@@ -436,7 +436,8 @@ EXPORT_SYMBOL_GPL(wl1273_get_format);
static int wl1273_probe(struct snd_soc_codec *codec)
{
- struct wl1273_core **core = codec->dev->platform_data;
+ struct wl1273_core **core =
+ mfd_get_data(to_platform_device(codec->dev));
struct wl1273_priv *wl1273;
int r;
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
index 80ddf4fd23db..a3b9cbb20ee9 100644
--- a/sound/soc/codecs/wm2000.c
+++ b/sound/soc/codecs/wm2000.c
@@ -836,24 +836,25 @@ static void wm2000_i2c_shutdown(struct i2c_client *i2c)
}
#ifdef CONFIG_PM
-static int wm2000_i2c_suspend(struct i2c_client *i2c, pm_message_t mesg)
+static int wm2000_i2c_suspend(struct device *dev)
{
+ struct i2c_client *i2c = to_i2c_client(dev);
struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev);
return wm2000_anc_transition(wm2000, ANC_OFF);
}
-static int wm2000_i2c_resume(struct i2c_client *i2c)
+static int wm2000_i2c_resume(struct device *dev)
{
+ struct i2c_client *i2c = to_i2c_client(dev);
struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev);
return wm2000_anc_set_mode(wm2000);
}
-#else
-#define wm2000_i2c_suspend NULL
-#define wm2000_i2c_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(wm2000_pm, wm2000_i2c_suspend, wm2000_i2c_resume);
+
static const struct i2c_device_id wm2000_i2c_id[] = {
{ "wm2000", 0 },
{ }
@@ -864,11 +865,10 @@ static struct i2c_driver wm2000_i2c_driver = {
.driver = {
.name = "wm2000",
.owner = THIS_MODULE,
+ .pm = &wm2000_pm,
},
.probe = wm2000_i2c_probe,
.remove = __devexit_p(wm2000_i2c_remove),
- .suspend = wm2000_i2c_suspend,
- .resume = wm2000_i2c_resume,
.shutdown = wm2000_i2c_shutdown,
.id_table = wm2000_i2c_id,
};
diff --git a/sound/soc/codecs/wm8400.c b/sound/soc/codecs/wm8400.c
index 3c3bc079167e..736b785e3756 100644
--- a/sound/soc/codecs/wm8400.c
+++ b/sound/soc/codecs/wm8400.c
@@ -22,6 +22,7 @@
#include <linux/regulator/consumer.h>
#include <linux/mfd/wm8400-audio.h>
#include <linux/mfd/wm8400-private.h>
+#include <linux/mfd/core.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -1377,7 +1378,7 @@ static void wm8400_probe_deferred(struct work_struct *work)
static int wm8400_codec_probe(struct snd_soc_codec *codec)
{
- struct wm8400 *wm8400 = dev_get_platdata(codec->dev);
+ struct wm8400 *wm8400 = mfd_get_data(to_platform_device(codec->dev));
struct wm8400_priv *priv;
int ret;
u16 reg;
diff --git a/sound/soc/codecs/wm8523.c b/sound/soc/codecs/wm8523.c
index 5eb2f501ce32..4fd4d8dca0fc 100644
--- a/sound/soc/codecs/wm8523.c
+++ b/sound/soc/codecs/wm8523.c
@@ -58,7 +58,7 @@ static const u16 wm8523_reg[WM8523_REGISTER_COUNT] = {
0x0000, /* R8 - ZERO_DETECT */
};
-static int wm8523_volatile_register(unsigned int reg)
+static int wm8523_volatile_register(struct snd_soc_codec *codec, unsigned int reg)
{
switch (reg) {
case WM8523_DEVICE_ID:
@@ -414,7 +414,6 @@ static int wm8523_resume(struct snd_soc_codec *codec)
static int wm8523_probe(struct snd_soc_codec *codec)
{
struct wm8523_priv *wm8523 = snd_soc_codec_get_drvdata(codec);
- u16 *reg_cache = codec->reg_cache;
int ret, i;
codec->hw_write = (hw_write_t)i2c_master_send;
@@ -471,8 +470,9 @@ static int wm8523_probe(struct snd_soc_codec *codec)
}
/* Change some default settings - latch VU and enable ZC */
- reg_cache[WM8523_DAC_GAINR] |= WM8523_DACR_VU;
- reg_cache[WM8523_DAC_CTRL3] |= WM8523_ZC;
+ snd_soc_update_bits(codec, WM8523_DAC_GAINR,
+ WM8523_DACR_VU, WM8523_DACR_VU);
+ snd_soc_update_bits(codec, WM8523_DAC_CTRL3, WM8523_ZC, WM8523_ZC);
wm8523_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
diff --git a/sound/soc/codecs/wm8741.c b/sound/soc/codecs/wm8741.c
index 494f2d31d75b..25af901fe813 100644
--- a/sound/soc/codecs/wm8741.c
+++ b/sound/soc/codecs/wm8741.c
@@ -421,7 +421,6 @@ static int wm8741_resume(struct snd_soc_codec *codec)
static int wm8741_probe(struct snd_soc_codec *codec)
{
struct wm8741_priv *wm8741 = snd_soc_codec_get_drvdata(codec);
- u16 *reg_cache = codec->reg_cache;
int ret = 0;
ret = snd_soc_codec_set_cache_io(codec, 7, 9, wm8741->control_type);
@@ -437,10 +436,14 @@ static int wm8741_probe(struct snd_soc_codec *codec)
}
/* Change some default settings - latch VU */
- reg_cache[WM8741_DACLLSB_ATTENUATION] |= WM8741_UPDATELL;
- reg_cache[WM8741_DACLMSB_ATTENUATION] |= WM8741_UPDATELM;
- reg_cache[WM8741_DACRLSB_ATTENUATION] |= WM8741_UPDATERL;
- reg_cache[WM8741_DACRLSB_ATTENUATION] |= WM8741_UPDATERM;
+ snd_soc_update_bits(codec, WM8741_DACLLSB_ATTENUATION,
+ WM8741_UPDATELL, WM8741_UPDATELL);
+ snd_soc_update_bits(codec, WM8741_DACLMSB_ATTENUATION,
+ WM8741_UPDATELM, WM8741_UPDATELM);
+ snd_soc_update_bits(codec, WM8741_DACRLSB_ATTENUATION,
+ WM8741_UPDATERL, WM8741_UPDATERL);
+ snd_soc_update_bits(codec, WM8741_DACRLSB_ATTENUATION,
+ WM8741_UPDATERM, WM8741_UPDATERM);
snd_soc_add_controls(codec, wm8741_snd_controls,
ARRAY_SIZE(wm8741_snd_controls));
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index 79b02ae125c5..3f09deea8d9d 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -55,8 +55,10 @@ static int caps_charge = 2000;
module_param(caps_charge, int, 0);
MODULE_PARM_DESC(caps_charge, "WM8753 cap charge time (msecs)");
-static void wm8753_set_dai_mode(struct snd_soc_codec *codec,
- struct snd_soc_dai *dai, unsigned int hifi);
+static int wm8753_hifi_write_dai_fmt(struct snd_soc_codec *codec,
+ unsigned int fmt);
+static int wm8753_voice_write_dai_fmt(struct snd_soc_codec *codec,
+ unsigned int fmt);
/*
* wm8753 register cache
@@ -87,6 +89,10 @@ struct wm8753_priv {
enum snd_soc_control_type control_type;
unsigned int sysclk;
unsigned int pcmclk;
+
+ unsigned int voice_fmt;
+ unsigned int hifi_fmt;
+
int dai_func;
};
@@ -170,9 +176,9 @@ static int wm8753_get_dai(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
- int mode = snd_soc_read(codec, WM8753_IOCTL);
+ struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec);
- ucontrol->value.integer.value[0] = (mode & 0xc) >> 2;
+ ucontrol->value.integer.value[0] = wm8753->dai_func;
return 0;
}
@@ -180,16 +186,26 @@ static int wm8753_set_dai(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
- int mode = snd_soc_read(codec, WM8753_IOCTL);
struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec);
+ u16 ioctl;
+
+ if (codec->active)
+ return -EBUSY;
+
+ ioctl = snd_soc_read(codec, WM8753_IOCTL);
+
+ wm8753->dai_func = ucontrol->value.integer.value[0];
+
+ if (((ioctl >> 2) & 0x3) == wm8753->dai_func)
+ return 1;
+
+ ioctl = (ioctl & 0x1f3) | (wm8753->dai_func << 2);
+ snd_soc_write(codec, WM8753_IOCTL, ioctl);
- if (((mode & 0xc) >> 2) == ucontrol->value.integer.value[0])
- return 0;
- mode &= 0xfff3;
- mode |= (ucontrol->value.integer.value[0] << 2);
+ wm8753_hifi_write_dai_fmt(codec, wm8753->hifi_fmt);
+ wm8753_voice_write_dai_fmt(codec, wm8753->voice_fmt);
- wm8753->dai_func = ucontrol->value.integer.value[0];
return 1;
}
@@ -828,10 +844,9 @@ static int wm8753_set_dai_sysclk(struct snd_soc_dai *codec_dai,
/*
* Set's ADC and Voice DAC format.
*/
-static int wm8753_vdac_adc_set_dai_fmt(struct snd_soc_dai *codec_dai,
+static int wm8753_vdac_adc_set_dai_fmt(struct snd_soc_codec *codec,
unsigned int fmt)
{
- struct snd_soc_codec *codec = codec_dai->codec;
u16 voice = snd_soc_read(codec, WM8753_PCM) & 0x01ec;
/* interface format */
@@ -858,13 +873,6 @@ static int wm8753_vdac_adc_set_dai_fmt(struct snd_soc_dai *codec_dai,
return 0;
}
-static int wm8753_pcm_startup(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- wm8753_set_dai_mode(dai->codec, dai, 0);
- return 0;
-}
-
/*
* Set PCM DAI bit size and sample rate.
*/
@@ -905,10 +913,9 @@ static int wm8753_pcm_hw_params(struct snd_pcm_substream *substream,
/*
* Set's PCM dai fmt and BCLK.
*/
-static int wm8753_pcm_set_dai_fmt(struct snd_soc_dai *codec_dai,
+static int wm8753_pcm_set_dai_fmt(struct snd_soc_codec *codec,
unsigned int fmt)
{
- struct snd_soc_codec *codec = codec_dai->codec;
u16 voice, ioctl;
voice = snd_soc_read(codec, WM8753_PCM) & 0x011f;
@@ -999,10 +1006,9 @@ static int wm8753_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
/*
* Set's HiFi DAC format.
*/
-static int wm8753_hdac_set_dai_fmt(struct snd_soc_dai *codec_dai,
+static int wm8753_hdac_set_dai_fmt(struct snd_soc_codec *codec,
unsigned int fmt)
{
- struct snd_soc_codec *codec = codec_dai->codec;
u16 hifi = snd_soc_read(codec, WM8753_HIFI) & 0x01e0;
/* interface format */
@@ -1032,10 +1038,9 @@ static int wm8753_hdac_set_dai_fmt(struct snd_soc_dai *codec_dai,
/*
* Set's I2S DAI format.
*/
-static int wm8753_i2s_set_dai_fmt(struct snd_soc_dai *codec_dai,
+static int wm8753_i2s_set_dai_fmt(struct snd_soc_codec *codec,
unsigned int fmt)
{
- struct snd_soc_codec *codec = codec_dai->codec;
u16 ioctl, hifi;
hifi = snd_soc_read(codec, WM8753_HIFI) & 0x011f;
@@ -1098,13 +1103,6 @@ static int wm8753_i2s_set_dai_fmt(struct snd_soc_dai *codec_dai,
return 0;
}
-static int wm8753_i2s_startup(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- wm8753_set_dai_mode(dai->codec, dai, 1);
- return 0;
-}
-
/*
* Set PCM DAI bit size and sample rate.
*/
@@ -1147,61 +1145,117 @@ static int wm8753_i2s_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int wm8753_mode1v_set_dai_fmt(struct snd_soc_dai *codec_dai,
+static int wm8753_mode1v_set_dai_fmt(struct snd_soc_codec *codec,
unsigned int fmt)
{
- struct snd_soc_codec *codec = codec_dai->codec;
u16 clock;
/* set clk source as pcmclk */
clock = snd_soc_read(codec, WM8753_CLOCK) & 0xfffb;
snd_soc_write(codec, WM8753_CLOCK, clock);
- if (wm8753_vdac_adc_set_dai_fmt(codec_dai, fmt) < 0)
- return -EINVAL;
- return wm8753_pcm_set_dai_fmt(codec_dai, fmt);
+ return wm8753_vdac_adc_set_dai_fmt(codec, fmt);
}
-static int wm8753_mode1h_set_dai_fmt(struct snd_soc_dai *codec_dai,
+static int wm8753_mode1h_set_dai_fmt(struct snd_soc_codec *codec,
unsigned int fmt)
{
- if (wm8753_hdac_set_dai_fmt(codec_dai, fmt) < 0)
- return -EINVAL;
- return wm8753_i2s_set_dai_fmt(codec_dai, fmt);
+ return wm8753_hdac_set_dai_fmt(codec, fmt);
}
-static int wm8753_mode2_set_dai_fmt(struct snd_soc_dai *codec_dai,
+static int wm8753_mode2_set_dai_fmt(struct snd_soc_codec *codec,
unsigned int fmt)
{
- struct snd_soc_codec *codec = codec_dai->codec;
u16 clock;
/* set clk source as pcmclk */
clock = snd_soc_read(codec, WM8753_CLOCK) & 0xfffb;
snd_soc_write(codec, WM8753_CLOCK, clock);
- if (wm8753_vdac_adc_set_dai_fmt(codec_dai, fmt) < 0)
- return -EINVAL;
- return wm8753_i2s_set_dai_fmt(codec_dai, fmt);
+ return wm8753_vdac_adc_set_dai_fmt(codec, fmt);
}
-static int wm8753_mode3_4_set_dai_fmt(struct snd_soc_dai *codec_dai,
+static int wm8753_mode3_4_set_dai_fmt(struct snd_soc_codec *codec,
unsigned int fmt)
{
- struct snd_soc_codec *codec = codec_dai->codec;
u16 clock;
/* set clk source as mclk */
clock = snd_soc_read(codec, WM8753_CLOCK) & 0xfffb;
snd_soc_write(codec, WM8753_CLOCK, clock | 0x4);
- if (wm8753_hdac_set_dai_fmt(codec_dai, fmt) < 0)
+ if (wm8753_hdac_set_dai_fmt(codec, fmt) < 0)
return -EINVAL;
- if (wm8753_vdac_adc_set_dai_fmt(codec_dai, fmt) < 0)
- return -EINVAL;
- return wm8753_i2s_set_dai_fmt(codec_dai, fmt);
+ return wm8753_vdac_adc_set_dai_fmt(codec, fmt);
}
+static int wm8753_hifi_write_dai_fmt(struct snd_soc_codec *codec,
+ unsigned int fmt)
+{
+ struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec);
+ int ret = 0;
+
+ switch (wm8753->dai_func) {
+ case 0:
+ ret = wm8753_mode1h_set_dai_fmt(codec, fmt);
+ break;
+ case 1:
+ ret = wm8753_mode2_set_dai_fmt(codec, fmt);
+ break;
+ case 2:
+ case 3:
+ ret = wm8753_mode3_4_set_dai_fmt(codec, fmt);
+ break;
+ default:
+ break;
+ }
+ if (ret)
+ return ret;
+
+ return wm8753_i2s_set_dai_fmt(codec, fmt);
+}
+
+static int wm8753_hifi_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec);
+
+ wm8753->hifi_fmt = fmt;
+
+ return wm8753_hifi_write_dai_fmt(codec, fmt);
+};
+
+static int wm8753_voice_write_dai_fmt(struct snd_soc_codec *codec,
+ unsigned int fmt)
+{
+ struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec);
+ int ret = 0;
+
+ if (wm8753->dai_func != 0)
+ return 0;
+
+ ret = wm8753_mode1v_set_dai_fmt(codec, fmt);
+ if (ret)
+ return ret;
+ ret = wm8753_pcm_set_dai_fmt(codec, fmt);
+ if (ret)
+ return ret;
+
+ return 0;
+};
+
+static int wm8753_voice_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec);
+
+ wm8753->voice_fmt = fmt;
+
+ return wm8753_voice_write_dai_fmt(codec, fmt);
+};
+
static int wm8753_mute(struct snd_soc_dai *dai, int mute)
{
struct snd_soc_codec *codec = dai->codec;
@@ -1268,57 +1322,25 @@ static int wm8753_set_bias_level(struct snd_soc_codec *codec,
* 3. Voice disabled - HIFI over HIFI
* 4. Voice disabled - HIFI over HIFI, uses voice DAI LRC for capture
*/
-static struct snd_soc_dai_ops wm8753_dai_ops_hifi_mode1 = {
- .startup = wm8753_i2s_startup,
+static struct snd_soc_dai_ops wm8753_dai_ops_hifi_mode = {
.hw_params = wm8753_i2s_hw_params,
.digital_mute = wm8753_mute,
- .set_fmt = wm8753_mode1h_set_dai_fmt,
- .set_clkdiv = wm8753_set_dai_clkdiv,
- .set_pll = wm8753_set_dai_pll,
- .set_sysclk = wm8753_set_dai_sysclk,
-};
-
-static struct snd_soc_dai_ops wm8753_dai_ops_voice_mode1 = {
- .startup = wm8753_pcm_startup,
- .hw_params = wm8753_pcm_hw_params,
- .digital_mute = wm8753_mute,
- .set_fmt = wm8753_mode1v_set_dai_fmt,
+ .set_fmt = wm8753_hifi_set_dai_fmt,
.set_clkdiv = wm8753_set_dai_clkdiv,
.set_pll = wm8753_set_dai_pll,
.set_sysclk = wm8753_set_dai_sysclk,
};
-static struct snd_soc_dai_ops wm8753_dai_ops_voice_mode2 = {
- .startup = wm8753_pcm_startup,
+static struct snd_soc_dai_ops wm8753_dai_ops_voice_mode = {
.hw_params = wm8753_pcm_hw_params,
.digital_mute = wm8753_mute,
- .set_fmt = wm8753_mode2_set_dai_fmt,
- .set_clkdiv = wm8753_set_dai_clkdiv,
- .set_pll = wm8753_set_dai_pll,
- .set_sysclk = wm8753_set_dai_sysclk,
-};
-
-static struct snd_soc_dai_ops wm8753_dai_ops_hifi_mode3 = {
- .startup = wm8753_i2s_startup,
- .hw_params = wm8753_i2s_hw_params,
- .digital_mute = wm8753_mute,
- .set_fmt = wm8753_mode3_4_set_dai_fmt,
- .set_clkdiv = wm8753_set_dai_clkdiv,
- .set_pll = wm8753_set_dai_pll,
- .set_sysclk = wm8753_set_dai_sysclk,
-};
-
-static struct snd_soc_dai_ops wm8753_dai_ops_hifi_mode4 = {
- .startup = wm8753_i2s_startup,
- .hw_params = wm8753_i2s_hw_params,
- .digital_mute = wm8753_mute,
- .set_fmt = wm8753_mode3_4_set_dai_fmt,
+ .set_fmt = wm8753_voice_set_dai_fmt,
.set_clkdiv = wm8753_set_dai_clkdiv,
.set_pll = wm8753_set_dai_pll,
.set_sysclk = wm8753_set_dai_sysclk,
};
-static struct snd_soc_dai_driver wm8753_all_dai[] = {
+static struct snd_soc_dai_driver wm8753_dai[] = {
/* DAI HiFi mode 1 */
{ .name = "wm8753-hifi",
.playback = {
@@ -1326,14 +1348,16 @@ static struct snd_soc_dai_driver wm8753_all_dai[] = {
.channels_min = 1,
.channels_max = 2,
.rates = WM8753_RATES,
- .formats = WM8753_FORMATS},
+ .formats = WM8753_FORMATS
+ },
.capture = { /* dummy for fast DAI switching */
.stream_name = "Capture",
.channels_min = 1,
.channels_max = 2,
.rates = WM8753_RATES,
- .formats = WM8753_FORMATS},
- .ops = &wm8753_dai_ops_hifi_mode1,
+ .formats = WM8753_FORMATS
+ },
+ .ops = &wm8753_dai_ops_hifi_mode,
},
/* DAI Voice mode 1 */
{ .name = "wm8753-voice",
@@ -1342,97 +1366,19 @@ static struct snd_soc_dai_driver wm8753_all_dai[] = {
.channels_min = 1,
.channels_max = 1,
.rates = WM8753_RATES,
- .formats = WM8753_FORMATS,},
- .capture = {
- .stream_name = "Capture",
- .channels_min = 1,
- .channels_max = 2,
- .rates = WM8753_RATES,
- .formats = WM8753_FORMATS,},
- .ops = &wm8753_dai_ops_voice_mode1,
-},
-/* DAI HiFi mode 2 - dummy */
-{ .name = "wm8753-hifi",
-},
-/* DAI Voice mode 2 */
-{ .name = "wm8753-voice",
- .playback = {
- .stream_name = "Voice Playback",
- .channels_min = 1,
- .channels_max = 1,
- .rates = WM8753_RATES,
- .formats = WM8753_FORMATS,},
- .capture = {
- .stream_name = "Capture",
- .channels_min = 1,
- .channels_max = 2,
- .rates = WM8753_RATES,
- .formats = WM8753_FORMATS,},
- .ops = &wm8753_dai_ops_voice_mode2,
-},
-/* DAI HiFi mode 3 */
-{ .name = "wm8753-hifi",
- .playback = {
- .stream_name = "HiFi Playback",
- .channels_min = 1,
- .channels_max = 2,
- .rates = WM8753_RATES,
- .formats = WM8753_FORMATS,},
- .capture = {
- .stream_name = "Capture",
- .channels_min = 1,
- .channels_max = 2,
- .rates = WM8753_RATES,
- .formats = WM8753_FORMATS,},
- .ops = &wm8753_dai_ops_hifi_mode3,
-},
-/* DAI Voice mode 3 - dummy */
-{ .name = "wm8753-voice",
-},
-/* DAI HiFi mode 4 */
-{ .name = "wm8753-hifi",
- .playback = {
- .stream_name = "HiFi Playback",
- .channels_min = 1,
- .channels_max = 2,
- .rates = WM8753_RATES,
- .formats = WM8753_FORMATS,},
+ .formats = WM8753_FORMATS,
+ },
.capture = {
.stream_name = "Capture",
.channels_min = 1,
.channels_max = 2,
.rates = WM8753_RATES,
- .formats = WM8753_FORMATS,},
- .ops = &wm8753_dai_ops_hifi_mode4,
-},
-/* DAI Voice mode 4 - dummy */
-{ .name = "wm8753-voice",
-},
-};
-
-static struct snd_soc_dai_driver wm8753_dai[] = {
- {
- .name = "wm8753-aif0",
- },
- {
- .name = "wm8753-aif1",
+ .formats = WM8753_FORMATS,
},
+ .ops = &wm8753_dai_ops_voice_mode,
+},
};
-static void wm8753_set_dai_mode(struct snd_soc_codec *codec,
- struct snd_soc_dai *dai, unsigned int hifi)
-{
- struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec);
-
- if (wm8753->dai_func < 4) {
- if (hifi)
- dai->driver = &wm8753_all_dai[wm8753->dai_func << 1];
- else
- dai->driver = &wm8753_all_dai[(wm8753->dai_func << 1) + 1];
- }
- snd_soc_write(codec, WM8753_IOCTL, wm8753->dai_func);
-}
-
static void wm8753_work(struct work_struct *work)
{
struct snd_soc_dapm_context *dapm =
diff --git a/sound/soc/codecs/wm8804.c b/sound/soc/codecs/wm8804.c
index 6dae1b40c9f7..6785688f8806 100644
--- a/sound/soc/codecs/wm8804.c
+++ b/sound/soc/codecs/wm8804.c
@@ -175,7 +175,7 @@ static int txsrc_put(struct snd_kcontrol *kcontrol,
return 0;
}
-static int wm8804_volatile(unsigned int reg)
+static int wm8804_volatile(struct snd_soc_codec *codec, unsigned int reg)
{
switch (reg) {
case WM8804_RST_DEVID1:
diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
index cd0959926d12..449ea09a193d 100644
--- a/sound/soc/codecs/wm8900.c
+++ b/sound/soc/codecs/wm8900.c
@@ -180,7 +180,7 @@ static const u16 wm8900_reg_defaults[WM8900_MAXREG] = {
/* Remaining registers all zero */
};
-static int wm8900_volatile_register(unsigned int reg)
+static int wm8900_volatile_register(struct snd_soc_codec *codec, unsigned int reg)
{
switch (reg) {
case WM8900_REG_ID:
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index 987476a5895f..ae1cadfae84c 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -2,6 +2,7 @@
* wm8903.c -- WM8903 ALSA SoC Audio driver
*
* Copyright 2008 Wolfson Microelectronics
+ * Copyright 2011 NVIDIA, Inc.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
@@ -19,6 +20,7 @@
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/delay.h>
+#include <linux/gpio.h>
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
@@ -213,6 +215,7 @@ static u16 wm8903_reg_defaults[] = {
};
struct wm8903_priv {
+ struct snd_soc_codec *codec;
int sysclk;
int irq;
@@ -220,25 +223,36 @@ struct wm8903_priv {
int fs;
int deemph;
+ int dcs_pending;
+ int dcs_cache[4];
+
/* Reference count */
int class_w_users;
- struct completion wseq;
-
struct snd_soc_jack *mic_jack;
int mic_det;
int mic_short;
int mic_last_report;
int mic_delay;
+
+#ifdef CONFIG_GPIOLIB
+ struct gpio_chip gpio_chip;
+#endif
};
-static int wm8903_volatile_register(unsigned int reg)
+static int wm8903_volatile_register(struct snd_soc_codec *codec, unsigned int reg)
{
switch (reg) {
case WM8903_SW_RESET_AND_ID:
case WM8903_REVISION_NUMBER:
case WM8903_INTERRUPT_STATUS_1:
case WM8903_WRITE_SEQUENCER_4:
+ case WM8903_POWER_MANAGEMENT_3:
+ case WM8903_POWER_MANAGEMENT_2:
+ case WM8903_DC_SERVO_READBACK_1:
+ case WM8903_DC_SERVO_READBACK_2:
+ case WM8903_DC_SERVO_READBACK_3:
+ case WM8903_DC_SERVO_READBACK_4:
return 1;
default:
@@ -246,50 +260,6 @@ static int wm8903_volatile_register(unsigned int reg)
}
}
-static int wm8903_run_sequence(struct snd_soc_codec *codec, unsigned int start)
-{
- u16 reg[5];
- struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
-
- BUG_ON(start > 48);
-
- /* Enable the sequencer if it's not already on */
- reg[0] = snd_soc_read(codec, WM8903_WRITE_SEQUENCER_0);
- snd_soc_write(codec, WM8903_WRITE_SEQUENCER_0,
- reg[0] | WM8903_WSEQ_ENA);
-
- dev_dbg(codec->dev, "Starting sequence at %d\n", start);
-
- snd_soc_write(codec, WM8903_WRITE_SEQUENCER_3,
- start | WM8903_WSEQ_START);
-
- /* Wait for it to complete. If we have the interrupt wired up then
- * that will break us out of the poll early.
- */
- do {
- wait_for_completion_timeout(&wm8903->wseq,
- msecs_to_jiffies(10));
-
- reg[4] = snd_soc_read(codec, WM8903_WRITE_SEQUENCER_4);
- } while (reg[4] & WM8903_WSEQ_BUSY);
-
- dev_dbg(codec->dev, "Sequence complete\n");
-
- /* Disable the sequencer again if we enabled it */
- snd_soc_write(codec, WM8903_WRITE_SEQUENCER_0, reg[0]);
-
- return 0;
-}
-
-static void wm8903_sync_reg_cache(struct snd_soc_codec *codec, u16 *cache)
-{
- int i;
-
- /* There really ought to be something better we can do here :/ */
- for (i = 0; i < ARRAY_SIZE(wm8903_reg_defaults); i++)
- cache[i] = codec->hw_read(codec, i);
-}
-
static void wm8903_reset(struct snd_soc_codec *codec)
{
snd_soc_write(codec, WM8903_SW_RESET_AND_ID, 0);
@@ -297,11 +267,6 @@ static void wm8903_reset(struct snd_soc_codec *codec)
sizeof(wm8903_reg_defaults));
}
-#define WM8903_OUTPUT_SHORT 0x8
-#define WM8903_OUTPUT_OUT 0x4
-#define WM8903_OUTPUT_INT 0x2
-#define WM8903_OUTPUT_IN 0x1
-
static int wm8903_cp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
@@ -311,97 +276,101 @@ static int wm8903_cp_event(struct snd_soc_dapm_widget *w,
return 0;
}
-/*
- * Event for headphone and line out amplifier power changes. Special
- * power up/down sequences are required in order to maximise pop/click
- * performance.
- */
-static int wm8903_output_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
+static int wm8903_dcs_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
- u16 val;
- u16 reg;
- u16 dcs_reg;
- u16 dcs_bit;
- int shift;
+ struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
- switch (w->reg) {
- case WM8903_POWER_MANAGEMENT_2:
- reg = WM8903_ANALOGUE_HP_0;
- dcs_bit = 0 + w->shift;
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ wm8903->dcs_pending |= 1 << w->shift;
break;
- case WM8903_POWER_MANAGEMENT_3:
- reg = WM8903_ANALOGUE_LINEOUT_0;
- dcs_bit = 2 + w->shift;
+ case SND_SOC_DAPM_PRE_PMD:
+ snd_soc_update_bits(codec, WM8903_DC_SERVO_0,
+ 1 << w->shift, 0);
break;
- default:
- BUG();
- return -EINVAL; /* Spurious warning from some compilers */
}
- switch (w->shift) {
- case 0:
- shift = 0;
- break;
- case 1:
- shift = 4;
- break;
- default:
- BUG();
- return -EINVAL; /* Spurious warning from some compilers */
- }
+ return 0;
+}
- if (event & SND_SOC_DAPM_PRE_PMU) {
- val = snd_soc_read(codec, reg);
+#define WM8903_DCS_MODE_WRITE_STOP 0
+#define WM8903_DCS_MODE_START_STOP 2
- /* Short the output */
- val &= ~(WM8903_OUTPUT_SHORT << shift);
- snd_soc_write(codec, reg, val);
- }
+static void wm8903_seq_notifier(struct snd_soc_dapm_context *dapm,
+ enum snd_soc_dapm_type event, int subseq)
+{
+ struct snd_soc_codec *codec = container_of(dapm,
+ struct snd_soc_codec, dapm);
+ struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
+ int dcs_mode = WM8903_DCS_MODE_WRITE_STOP;
+ int i, val;
- if (event & SND_SOC_DAPM_POST_PMU) {
- val = snd_soc_read(codec, reg);
+ /* Complete any pending DC servo starts */
+ if (wm8903->dcs_pending) {
+ dev_dbg(codec->dev, "Starting DC servo for %x\n",
+ wm8903->dcs_pending);
- val |= (WM8903_OUTPUT_IN << shift);
- snd_soc_write(codec, reg, val);
+ /* If we've no cached values then we need to do startup */
+ for (i = 0; i < ARRAY_SIZE(wm8903->dcs_cache); i++) {
+ if (!(wm8903->dcs_pending & (1 << i)))
+ continue;
- val |= (WM8903_OUTPUT_INT << shift);
- snd_soc_write(codec, reg, val);
+ if (wm8903->dcs_cache[i]) {
+ dev_dbg(codec->dev,
+ "Restore DC servo %d value %x\n",
+ 3 - i, wm8903->dcs_cache[i]);
+
+ snd_soc_write(codec, WM8903_DC_SERVO_4 + i,
+ wm8903->dcs_cache[i] & 0xff);
+ } else {
+ dev_dbg(codec->dev,
+ "Calibrate DC servo %d\n", 3 - i);
+ dcs_mode = WM8903_DCS_MODE_START_STOP;
+ }
+ }
- /* Turn on the output ENA_OUTP */
- val |= (WM8903_OUTPUT_OUT << shift);
- snd_soc_write(codec, reg, val);
+ /* Don't trust the cache for analogue */
+ if (wm8903->class_w_users)
+ dcs_mode = WM8903_DCS_MODE_START_STOP;
- /* Enable the DC servo */
- dcs_reg = snd_soc_read(codec, WM8903_DC_SERVO_0);
- dcs_reg |= dcs_bit;
- snd_soc_write(codec, WM8903_DC_SERVO_0, dcs_reg);
+ snd_soc_update_bits(codec, WM8903_DC_SERVO_2,
+ WM8903_DCS_MODE_MASK, dcs_mode);
- /* Remove the short */
- val |= (WM8903_OUTPUT_SHORT << shift);
- snd_soc_write(codec, reg, val);
- }
+ snd_soc_update_bits(codec, WM8903_DC_SERVO_0,
+ WM8903_DCS_ENA_MASK, wm8903->dcs_pending);
- if (event & SND_SOC_DAPM_PRE_PMD) {
- val = snd_soc_read(codec, reg);
+ switch (dcs_mode) {
+ case WM8903_DCS_MODE_WRITE_STOP:
+ break;
- /* Short the output */
- val &= ~(WM8903_OUTPUT_SHORT << shift);
- snd_soc_write(codec, reg, val);
+ case WM8903_DCS_MODE_START_STOP:
+ msleep(270);
- /* Disable the DC servo */
- dcs_reg = snd_soc_read(codec, WM8903_DC_SERVO_0);
- dcs_reg &= ~dcs_bit;
- snd_soc_write(codec, WM8903_DC_SERVO_0, dcs_reg);
+ /* Cache the measured offsets for digital */
+ if (wm8903->class_w_users)
+ break;
- /* Then disable the intermediate and output stages */
- val &= ~((WM8903_OUTPUT_OUT | WM8903_OUTPUT_INT |
- WM8903_OUTPUT_IN) << shift);
- snd_soc_write(codec, reg, val);
- }
+ for (i = 0; i < ARRAY_SIZE(wm8903->dcs_cache); i++) {
+ if (!(wm8903->dcs_pending & (1 << i)))
+ continue;
- return 0;
+ val = snd_soc_read(codec,
+ WM8903_DC_SERVO_READBACK_1 + i);
+ dev_dbg(codec->dev, "DC servo %d: %x\n",
+ 3 - i, val);
+ wm8903->dcs_cache[i] = val;
+ }
+ break;
+
+ default:
+ pr_warn("DCS mode %d delay not set\n", dcs_mode);
+ break;
+ }
+
+ wm8903->dcs_pending = 0;
+ }
}
/*
@@ -667,6 +636,22 @@ static const struct soc_enum lsidetone_enum =
static const struct soc_enum rsidetone_enum =
SOC_ENUM_SINGLE(WM8903_DAC_DIGITAL_0, 0, 3, sidetone_text);
+static const char *aif_text[] = {
+ "Left", "Right"
+};
+
+static const struct soc_enum lcapture_enum =
+ SOC_ENUM_SINGLE(WM8903_AUDIO_INTERFACE_0, 7, 2, aif_text);
+
+static const struct soc_enum rcapture_enum =
+ SOC_ENUM_SINGLE(WM8903_AUDIO_INTERFACE_0, 6, 2, aif_text);
+
+static const struct soc_enum lplay_enum =
+ SOC_ENUM_SINGLE(WM8903_AUDIO_INTERFACE_0, 5, 2, aif_text);
+
+static const struct soc_enum rplay_enum =
+ SOC_ENUM_SINGLE(WM8903_AUDIO_INTERFACE_0, 4, 2, aif_text);
+
static const struct snd_kcontrol_new wm8903_snd_controls[] = {
/* Input PGAs - No TLV since the scale depends on PGA mode */
@@ -784,6 +769,18 @@ static const struct snd_kcontrol_new lsidetone_mux =
static const struct snd_kcontrol_new rsidetone_mux =
SOC_DAPM_ENUM("DACR Sidetone Mux", rsidetone_enum);
+static const struct snd_kcontrol_new lcapture_mux =
+ SOC_DAPM_ENUM("Left Capture Mux", lcapture_enum);
+
+static const struct snd_kcontrol_new rcapture_mux =
+ SOC_DAPM_ENUM("Right Capture Mux", rcapture_enum);
+
+static const struct snd_kcontrol_new lplay_mux =
+ SOC_DAPM_ENUM("Left Playback Mux", lplay_enum);
+
+static const struct snd_kcontrol_new rplay_mux =
+ SOC_DAPM_ENUM("Right Playback Mux", rplay_enum);
+
static const struct snd_kcontrol_new left_output_mixer[] = {
SOC_DAPM_SINGLE("DACL Switch", WM8903_ANALOGUE_LEFT_MIX_0, 3, 1, 0),
SOC_DAPM_SINGLE("DACR Switch", WM8903_ANALOGUE_LEFT_MIX_0, 2, 1, 0),
@@ -847,14 +844,26 @@ SND_SOC_DAPM_MUX("Right Input Mode Mux", SND_SOC_NOPM, 0, 0, &rinput_mode_mux),
SND_SOC_DAPM_PGA("Left Input PGA", WM8903_POWER_MANAGEMENT_0, 1, 0, NULL, 0),
SND_SOC_DAPM_PGA("Right Input PGA", WM8903_POWER_MANAGEMENT_0, 0, 0, NULL, 0),
-SND_SOC_DAPM_ADC("ADCL", "Left HiFi Capture", WM8903_POWER_MANAGEMENT_6, 1, 0),
-SND_SOC_DAPM_ADC("ADCR", "Right HiFi Capture", WM8903_POWER_MANAGEMENT_6, 0, 0),
+SND_SOC_DAPM_ADC("ADCL", NULL, WM8903_POWER_MANAGEMENT_6, 1, 0),
+SND_SOC_DAPM_ADC("ADCR", NULL, WM8903_POWER_MANAGEMENT_6, 0, 0),
+
+SND_SOC_DAPM_MUX("Left Capture Mux", SND_SOC_NOPM, 0, 0, &lcapture_mux),
+SND_SOC_DAPM_MUX("Right Capture Mux", SND_SOC_NOPM, 0, 0, &rcapture_mux),
+
+SND_SOC_DAPM_AIF_OUT("AIFTXL", "Left HiFi Capture", 0, SND_SOC_NOPM, 0, 0),
+SND_SOC_DAPM_AIF_OUT("AIFTXR", "Right HiFi Capture", 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_MUX("DACL Sidetone", SND_SOC_NOPM, 0, 0, &lsidetone_mux),
SND_SOC_DAPM_MUX("DACR Sidetone", SND_SOC_NOPM, 0, 0, &rsidetone_mux),
-SND_SOC_DAPM_DAC("DACL", "Left Playback", WM8903_POWER_MANAGEMENT_6, 3, 0),
-SND_SOC_DAPM_DAC("DACR", "Right Playback", WM8903_POWER_MANAGEMENT_6, 2, 0),
+SND_SOC_DAPM_AIF_IN("AIFRXL", "Left Playback", 0, SND_SOC_NOPM, 0, 0),
+SND_SOC_DAPM_AIF_IN("AIFRXR", "Right Playback", 0, SND_SOC_NOPM, 0, 0),
+
+SND_SOC_DAPM_MUX("Left Playback Mux", SND_SOC_NOPM, 0, 0, &lplay_mux),
+SND_SOC_DAPM_MUX("Right Playback Mux", SND_SOC_NOPM, 0, 0, &rplay_mux),
+
+SND_SOC_DAPM_DAC("DACL", NULL, WM8903_POWER_MANAGEMENT_6, 3, 0),
+SND_SOC_DAPM_DAC("DACR", NULL, WM8903_POWER_MANAGEMENT_6, 2, 0),
SND_SOC_DAPM_MIXER("Left Output Mixer", WM8903_POWER_MANAGEMENT_1, 1, 0,
left_output_mixer, ARRAY_SIZE(left_output_mixer)),
@@ -866,23 +875,45 @@ SND_SOC_DAPM_MIXER("Left Speaker Mixer", WM8903_POWER_MANAGEMENT_4, 1, 0,
SND_SOC_DAPM_MIXER("Right Speaker Mixer", WM8903_POWER_MANAGEMENT_4, 0, 0,
right_speaker_mixer, ARRAY_SIZE(right_speaker_mixer)),
-SND_SOC_DAPM_PGA_E("Left Headphone Output PGA", WM8903_POWER_MANAGEMENT_2,
- 1, 0, NULL, 0, wm8903_output_event,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_PRE_PMD),
-SND_SOC_DAPM_PGA_E("Right Headphone Output PGA", WM8903_POWER_MANAGEMENT_2,
- 0, 0, NULL, 0, wm8903_output_event,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_PRE_PMD),
-
-SND_SOC_DAPM_PGA_E("Left Line Output PGA", WM8903_POWER_MANAGEMENT_3, 1, 0,
- NULL, 0, wm8903_output_event,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_PRE_PMD),
-SND_SOC_DAPM_PGA_E("Right Line Output PGA", WM8903_POWER_MANAGEMENT_3, 0, 0,
- NULL, 0, wm8903_output_event,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_PRE_PMD),
+SND_SOC_DAPM_PGA_S("Left Headphone Output PGA", 0, WM8903_ANALOGUE_HP_0,
+ 4, 0, NULL, 0),
+SND_SOC_DAPM_PGA_S("Right Headphone Output PGA", 0, WM8903_ANALOGUE_HP_0,
+ 0, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA_S("Left Line Output PGA", 0, WM8903_ANALOGUE_LINEOUT_0, 4, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA_S("Right Line Output PGA", 0, WM8903_ANALOGUE_LINEOUT_0, 0, 0,
+ NULL, 0),
+
+SND_SOC_DAPM_PGA_S("HPL_RMV_SHORT", 4, WM8903_ANALOGUE_HP_0, 7, 0, NULL, 0),
+SND_SOC_DAPM_PGA_S("HPL_ENA_OUTP", 3, WM8903_ANALOGUE_HP_0, 6, 0, NULL, 0),
+SND_SOC_DAPM_PGA_S("HPL_ENA_DLY", 1, WM8903_ANALOGUE_HP_0, 5, 0, NULL, 0),
+SND_SOC_DAPM_PGA_S("HPR_RMV_SHORT", 4, WM8903_ANALOGUE_HP_0, 3, 0, NULL, 0),
+SND_SOC_DAPM_PGA_S("HPR_ENA_OUTP", 3, WM8903_ANALOGUE_HP_0, 2, 0, NULL, 0),
+SND_SOC_DAPM_PGA_S("HPR_ENA_DLY", 1, WM8903_ANALOGUE_HP_0, 1, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA_S("LINEOUTL_RMV_SHORT", 4, WM8903_ANALOGUE_LINEOUT_0, 7, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA_S("LINEOUTL_ENA_OUTP", 3, WM8903_ANALOGUE_LINEOUT_0, 6, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA_S("LINEOUTL_ENA_DLY", 1, WM8903_ANALOGUE_LINEOUT_0, 5, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA_S("LINEOUTR_RMV_SHORT", 4, WM8903_ANALOGUE_LINEOUT_0, 3, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA_S("LINEOUTR_ENA_OUTP", 3, WM8903_ANALOGUE_LINEOUT_0, 2, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA_S("LINEOUTR_ENA_DLY", 1, WM8903_ANALOGUE_LINEOUT_0, 1, 0,
+ NULL, 0),
+
+SND_SOC_DAPM_SUPPLY("DCS Master", WM8903_DC_SERVO_0, 4, 0, NULL, 0),
+SND_SOC_DAPM_PGA_S("HPL_DCS", 3, SND_SOC_NOPM, 3, 0, wm8903_dcs_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+SND_SOC_DAPM_PGA_S("HPR_DCS", 3, SND_SOC_NOPM, 2, 0, wm8903_dcs_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+SND_SOC_DAPM_PGA_S("LINEOUTL_DCS", 3, SND_SOC_NOPM, 1, 0, wm8903_dcs_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+SND_SOC_DAPM_PGA_S("LINEOUTR_DCS", 3, SND_SOC_NOPM, 0, 0, wm8903_dcs_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_PGA("Left Speaker PGA", WM8903_POWER_MANAGEMENT_5, 1, 0,
NULL, 0),
@@ -892,10 +923,18 @@ SND_SOC_DAPM_PGA("Right Speaker PGA", WM8903_POWER_MANAGEMENT_5, 0, 0,
SND_SOC_DAPM_SUPPLY("Charge Pump", WM8903_CHARGE_PUMP_0, 0, 0,
wm8903_cp_event, SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_SUPPLY("CLK_DSP", WM8903_CLOCK_RATES_2, 1, 0, NULL, 0),
+SND_SOC_DAPM_SUPPLY("CLK_SYS", WM8903_CLOCK_RATES_2, 2, 0, NULL, 0),
};
static const struct snd_soc_dapm_route intercon[] = {
+ { "CLK_DSP", NULL, "CLK_SYS" },
+ { "Mic Bias", NULL, "CLK_SYS" },
+ { "HPL_DCS", NULL, "CLK_SYS" },
+ { "HPR_DCS", NULL, "CLK_SYS" },
+ { "LINEOUTL_DCS", NULL, "CLK_SYS" },
+ { "LINEOUTR_DCS", NULL, "CLK_SYS" },
+
{ "Left Input Mux", "IN1L", "IN1L" },
{ "Left Input Mux", "IN2L", "IN2L" },
{ "Left Input Mux", "IN3L", "IN3L" },
@@ -936,18 +975,36 @@ static const struct snd_soc_dapm_route intercon[] = {
{ "Left Input PGA", NULL, "Left Input Mode Mux" },
{ "Right Input PGA", NULL, "Right Input Mode Mux" },
+ { "Left Capture Mux", "Left", "ADCL" },
+ { "Left Capture Mux", "Right", "ADCR" },
+
+ { "Right Capture Mux", "Left", "ADCL" },
+ { "Right Capture Mux", "Right", "ADCR" },
+
+ { "AIFTXL", NULL, "Left Capture Mux" },
+ { "AIFTXR", NULL, "Right Capture Mux" },
+
{ "ADCL", NULL, "Left Input PGA" },
{ "ADCL", NULL, "CLK_DSP" },
{ "ADCR", NULL, "Right Input PGA" },
{ "ADCR", NULL, "CLK_DSP" },
+ { "Left Playback Mux", "Left", "AIFRXL" },
+ { "Left Playback Mux", "Right", "AIFRXR" },
+
+ { "Right Playback Mux", "Left", "AIFRXL" },
+ { "Right Playback Mux", "Right", "AIFRXR" },
+
{ "DACL Sidetone", "Left", "ADCL" },
{ "DACL Sidetone", "Right", "ADCR" },
{ "DACR Sidetone", "Left", "ADCL" },
{ "DACR Sidetone", "Right", "ADCR" },
+ { "DACL", NULL, "Left Playback Mux" },
{ "DACL", NULL, "DACL Sidetone" },
{ "DACL", NULL, "CLK_DSP" },
+
+ { "DACR", NULL, "Right Playback Mux" },
{ "DACR", NULL, "DACR Sidetone" },
{ "DACR", NULL, "CLK_DSP" },
@@ -980,11 +1037,35 @@ static const struct snd_soc_dapm_route intercon[] = {
{ "Left Speaker PGA", NULL, "Left Speaker Mixer" },
{ "Right Speaker PGA", NULL, "Right Speaker Mixer" },
- { "HPOUTL", NULL, "Left Headphone Output PGA" },
- { "HPOUTR", NULL, "Right Headphone Output PGA" },
+ { "HPL_ENA_DLY", NULL, "Left Headphone Output PGA" },
+ { "HPR_ENA_DLY", NULL, "Right Headphone Output PGA" },
+ { "LINEOUTL_ENA_DLY", NULL, "Left Line Output PGA" },
+ { "LINEOUTR_ENA_DLY", NULL, "Right Line Output PGA" },
+
+ { "HPL_DCS", NULL, "DCS Master" },
+ { "HPR_DCS", NULL, "DCS Master" },
+ { "LINEOUTL_DCS", NULL, "DCS Master" },
+ { "LINEOUTR_DCS", NULL, "DCS Master" },
+
+ { "HPL_DCS", NULL, "HPL_ENA_DLY" },
+ { "HPR_DCS", NULL, "HPR_ENA_DLY" },
+ { "LINEOUTL_DCS", NULL, "LINEOUTL_ENA_DLY" },
+ { "LINEOUTR_DCS", NULL, "LINEOUTR_ENA_DLY" },
- { "LINEOUTL", NULL, "Left Line Output PGA" },
- { "LINEOUTR", NULL, "Right Line Output PGA" },
+ { "HPL_ENA_OUTP", NULL, "HPL_DCS" },
+ { "HPR_ENA_OUTP", NULL, "HPR_DCS" },
+ { "LINEOUTL_ENA_OUTP", NULL, "LINEOUTL_DCS" },
+ { "LINEOUTR_ENA_OUTP", NULL, "LINEOUTR_DCS" },
+
+ { "HPL_RMV_SHORT", NULL, "HPL_ENA_OUTP" },
+ { "HPR_RMV_SHORT", NULL, "HPR_ENA_OUTP" },
+ { "LINEOUTL_RMV_SHORT", NULL, "LINEOUTL_ENA_OUTP" },
+ { "LINEOUTR_RMV_SHORT", NULL, "LINEOUTR_ENA_OUTP" },
+
+ { "HPOUTL", NULL, "HPL_RMV_SHORT" },
+ { "HPOUTR", NULL, "HPR_RMV_SHORT" },
+ { "LINEOUTL", NULL, "LINEOUTL_RMV_SHORT" },
+ { "LINEOUTR", NULL, "LINEOUTR_RMV_SHORT" },
{ "LOP", NULL, "Left Speaker PGA" },
{ "LON", NULL, "Left Speaker PGA" },
@@ -1012,29 +1093,71 @@ static int wm8903_add_widgets(struct snd_soc_codec *codec)
static int wm8903_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
- u16 reg;
-
switch (level) {
case SND_SOC_BIAS_ON:
+ break;
+
case SND_SOC_BIAS_PREPARE:
- reg = snd_soc_read(codec, WM8903_VMID_CONTROL_0);
- reg &= ~(WM8903_VMID_RES_MASK);
- reg |= WM8903_VMID_RES_50K;
- snd_soc_write(codec, WM8903_VMID_CONTROL_0, reg);
+ snd_soc_update_bits(codec, WM8903_VMID_CONTROL_0,
+ WM8903_VMID_RES_MASK,
+ WM8903_VMID_RES_50K);
break;
case SND_SOC_BIAS_STANDBY:
if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
- snd_soc_write(codec, WM8903_CLOCK_RATES_2,
- WM8903_CLK_SYS_ENA);
-
- /* Change DC servo dither level in startup sequence */
- snd_soc_write(codec, WM8903_WRITE_SEQUENCER_0, 0x11);
- snd_soc_write(codec, WM8903_WRITE_SEQUENCER_1, 0x1257);
- snd_soc_write(codec, WM8903_WRITE_SEQUENCER_2, 0x2);
-
- wm8903_run_sequence(codec, 0);
- wm8903_sync_reg_cache(codec, codec->reg_cache);
+ snd_soc_update_bits(codec, WM8903_BIAS_CONTROL_0,
+ WM8903_POBCTRL | WM8903_ISEL_MASK |
+ WM8903_STARTUP_BIAS_ENA |
+ WM8903_BIAS_ENA,
+ WM8903_POBCTRL |
+ (2 << WM8903_ISEL_SHIFT) |
+ WM8903_STARTUP_BIAS_ENA);
+
+ snd_soc_update_bits(codec,
+ WM8903_ANALOGUE_SPK_OUTPUT_CONTROL_0,
+ WM8903_SPK_DISCHARGE,
+ WM8903_SPK_DISCHARGE);
+
+ msleep(33);
+
+ snd_soc_update_bits(codec, WM8903_POWER_MANAGEMENT_5,
+ WM8903_SPKL_ENA | WM8903_SPKR_ENA,
+ WM8903_SPKL_ENA | WM8903_SPKR_ENA);
+
+ snd_soc_update_bits(codec,
+ WM8903_ANALOGUE_SPK_OUTPUT_CONTROL_0,
+ WM8903_SPK_DISCHARGE, 0);
+
+ snd_soc_update_bits(codec, WM8903_VMID_CONTROL_0,
+ WM8903_VMID_TIE_ENA |
+ WM8903_BUFIO_ENA |
+ WM8903_VMID_IO_ENA |
+ WM8903_VMID_SOFT_MASK |
+ WM8903_VMID_RES_MASK |
+ WM8903_VMID_BUF_ENA,
+ WM8903_VMID_TIE_ENA |
+ WM8903_BUFIO_ENA |
+ WM8903_VMID_IO_ENA |
+ (2 << WM8903_VMID_SOFT_SHIFT) |
+ WM8903_VMID_RES_250K |
+ WM8903_VMID_BUF_ENA);
+
+ msleep(129);
+
+ snd_soc_update_bits(codec, WM8903_POWER_MANAGEMENT_5,
+ WM8903_SPKL_ENA | WM8903_SPKR_ENA,
+ 0);
+
+ snd_soc_update_bits(codec, WM8903_VMID_CONTROL_0,
+ WM8903_VMID_SOFT_MASK, 0);
+
+ snd_soc_update_bits(codec, WM8903_VMID_CONTROL_0,
+ WM8903_VMID_RES_MASK,
+ WM8903_VMID_RES_50K);
+
+ snd_soc_update_bits(codec, WM8903_BIAS_CONTROL_0,
+ WM8903_BIAS_ENA | WM8903_POBCTRL,
+ WM8903_BIAS_ENA);
/* By default no bypass paths are enabled so
* enable Class W support.
@@ -1047,17 +1170,32 @@ static int wm8903_set_bias_level(struct snd_soc_codec *codec,
WM8903_CP_DYN_V);
}
- reg = snd_soc_read(codec, WM8903_VMID_CONTROL_0);
- reg &= ~(WM8903_VMID_RES_MASK);
- reg |= WM8903_VMID_RES_250K;
- snd_soc_write(codec, WM8903_VMID_CONTROL_0, reg);
+ snd_soc_update_bits(codec, WM8903_VMID_CONTROL_0,
+ WM8903_VMID_RES_MASK,
+ WM8903_VMID_RES_250K);
break;
case SND_SOC_BIAS_OFF:
- wm8903_run_sequence(codec, 32);
- reg = snd_soc_read(codec, WM8903_CLOCK_RATES_2);
- reg &= ~WM8903_CLK_SYS_ENA;
- snd_soc_write(codec, WM8903_CLOCK_RATES_2, reg);
+ snd_soc_update_bits(codec, WM8903_BIAS_CONTROL_0,
+ WM8903_BIAS_ENA, 0);
+
+ snd_soc_update_bits(codec, WM8903_VMID_CONTROL_0,
+ WM8903_VMID_SOFT_MASK,
+ 2 << WM8903_VMID_SOFT_SHIFT);
+
+ snd_soc_update_bits(codec, WM8903_VMID_CONTROL_0,
+ WM8903_VMID_BUF_ENA, 0);
+
+ msleep(290);
+
+ snd_soc_update_bits(codec, WM8903_VMID_CONTROL_0,
+ WM8903_VMID_TIE_ENA | WM8903_BUFIO_ENA |
+ WM8903_VMID_IO_ENA | WM8903_VMID_RES_MASK |
+ WM8903_VMID_SOFT_MASK |
+ WM8903_VMID_BUF_ENA, 0);
+
+ snd_soc_update_bits(codec, WM8903_BIAS_CONTROL_0,
+ WM8903_STARTUP_BIAS_ENA, 0);
break;
}
@@ -1482,7 +1620,7 @@ int wm8903_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack,
WM8903_MICDET_EINT | WM8903_MICSHRT_EINT,
irq_mask);
- if (det && shrt) {
+ if (det || shrt) {
/* Enable mic detection, this may not have been set through
* platform data (eg, if the defaults are OK). */
snd_soc_update_bits(codec, WM8903_WRITE_SEQUENCER_0,
@@ -1510,8 +1648,7 @@ static irqreturn_t wm8903_irq(int irq, void *data)
int_val = snd_soc_read(codec, WM8903_INTERRUPT_STATUS_1) & mask;
if (int_val & WM8903_WSEQ_BUSY_EINT) {
- dev_dbg(codec->dev, "Write sequencer done\n");
- complete(&wm8903->wseq);
+ dev_warn(codec->dev, "Write sequencer done\n");
}
/*
@@ -1635,6 +1772,120 @@ static int wm8903_resume(struct snd_soc_codec *codec)
return 0;
}
+#ifdef CONFIG_GPIOLIB
+static inline struct wm8903_priv *gpio_to_wm8903(struct gpio_chip *chip)
+{
+ return container_of(chip, struct wm8903_priv, gpio_chip);
+}
+
+static int wm8903_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ if (offset >= WM8903_NUM_GPIO)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int wm8903_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
+{
+ struct wm8903_priv *wm8903 = gpio_to_wm8903(chip);
+ struct snd_soc_codec *codec = wm8903->codec;
+ unsigned int mask, val;
+
+ mask = WM8903_GP1_FN_MASK | WM8903_GP1_DIR_MASK;
+ val = (WM8903_GPn_FN_GPIO_INPUT << WM8903_GP1_FN_SHIFT) |
+ WM8903_GP1_DIR;
+
+ return snd_soc_update_bits(codec, WM8903_GPIO_CONTROL_1 + offset,
+ mask, val);
+}
+
+static int wm8903_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct wm8903_priv *wm8903 = gpio_to_wm8903(chip);
+ struct snd_soc_codec *codec = wm8903->codec;
+ int reg;
+
+ reg = snd_soc_read(codec, WM8903_GPIO_CONTROL_1 + offset);
+
+ return (reg & WM8903_GP1_LVL_MASK) >> WM8903_GP1_LVL_SHIFT;
+}
+
+static int wm8903_gpio_direction_out(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct wm8903_priv *wm8903 = gpio_to_wm8903(chip);
+ struct snd_soc_codec *codec = wm8903->codec;
+ unsigned int mask, val;
+
+ mask = WM8903_GP1_FN_MASK | WM8903_GP1_DIR_MASK | WM8903_GP1_LVL_MASK;
+ val = (WM8903_GPn_FN_GPIO_OUTPUT << WM8903_GP1_FN_SHIFT) |
+ (value << WM8903_GP2_LVL_SHIFT);
+
+ return snd_soc_update_bits(codec, WM8903_GPIO_CONTROL_1 + offset,
+ mask, val);
+}
+
+static void wm8903_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct wm8903_priv *wm8903 = gpio_to_wm8903(chip);
+ struct snd_soc_codec *codec = wm8903->codec;
+
+ snd_soc_update_bits(codec, WM8903_GPIO_CONTROL_1 + offset,
+ WM8903_GP1_LVL_MASK,
+ !!value << WM8903_GP1_LVL_SHIFT);
+}
+
+static struct gpio_chip wm8903_template_chip = {
+ .label = "wm8903",
+ .owner = THIS_MODULE,
+ .request = wm8903_gpio_request,
+ .direction_input = wm8903_gpio_direction_in,
+ .get = wm8903_gpio_get,
+ .direction_output = wm8903_gpio_direction_out,
+ .set = wm8903_gpio_set,
+ .can_sleep = 1,
+};
+
+static void wm8903_init_gpio(struct snd_soc_codec *codec)
+{
+ struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
+ struct wm8903_platform_data *pdata = dev_get_platdata(codec->dev);
+ int ret;
+
+ wm8903->gpio_chip = wm8903_template_chip;
+ wm8903->gpio_chip.ngpio = WM8903_NUM_GPIO;
+ wm8903->gpio_chip.dev = codec->dev;
+
+ if (pdata && pdata->gpio_base)
+ wm8903->gpio_chip.base = pdata->gpio_base;
+ else
+ wm8903->gpio_chip.base = -1;
+
+ ret = gpiochip_add(&wm8903->gpio_chip);
+ if (ret != 0)
+ dev_err(codec->dev, "Failed to add GPIOs: %d\n", ret);
+}
+
+static void wm8903_free_gpio(struct snd_soc_codec *codec)
+{
+ struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ ret = gpiochip_remove(&wm8903->gpio_chip);
+ if (ret != 0)
+ dev_err(codec->dev, "Failed to remove GPIOs: %d\n", ret);
+}
+#else
+static void wm8903_init_gpio(struct snd_soc_codec *codec)
+{
+}
+
+static void wm8903_free_gpio(struct snd_soc_codec *codec)
+{
+}
+#endif
+
static int wm8903_probe(struct snd_soc_codec *codec)
{
struct wm8903_platform_data *pdata = dev_get_platdata(codec->dev);
@@ -1643,7 +1894,7 @@ static int wm8903_probe(struct snd_soc_codec *codec)
int trigger, irq_pol;
u16 val;
- init_completion(&wm8903->wseq);
+ wm8903->codec = codec;
ret = snd_soc_codec_set_cache_io(codec, 8, 16, SND_SOC_I2C);
if (ret != 0) {
@@ -1659,19 +1910,33 @@ static int wm8903_probe(struct snd_soc_codec *codec)
}
val = snd_soc_read(codec, WM8903_REVISION_NUMBER);
- dev_info(codec->dev, "WM8903 revision %d\n",
- val & WM8903_CHIP_REV_MASK);
+ dev_info(codec->dev, "WM8903 revision %c\n",
+ (val & WM8903_CHIP_REV_MASK) + 'A');
wm8903_reset(codec);
/* Set up GPIOs and microphone detection */
if (pdata) {
+ bool mic_gpio = false;
+
for (i = 0; i < ARRAY_SIZE(pdata->gpio_cfg); i++) {
- if (!pdata->gpio_cfg[i])
+ if (pdata->gpio_cfg[i] == WM8903_GPIO_NO_CONFIG)
continue;
snd_soc_write(codec, WM8903_GPIO_CONTROL_1 + i,
pdata->gpio_cfg[i] & 0xffff);
+
+ val = (pdata->gpio_cfg[i] & WM8903_GP1_FN_MASK)
+ >> WM8903_GP1_FN_SHIFT;
+
+ switch (val) {
+ case WM8903_GPn_FN_MICBIAS_CURRENT_DETECT:
+ case WM8903_GPn_FN_MICBIAS_SHORT_DETECT:
+ mic_gpio = true;
+ break;
+ default:
+ break;
+ }
}
snd_soc_write(codec, WM8903_MIC_BIAS_CONTROL_0,
@@ -1682,6 +1947,14 @@ static int wm8903_probe(struct snd_soc_codec *codec)
snd_soc_update_bits(codec, WM8903_WRITE_SEQUENCER_0,
WM8903_WSEQ_ENA, WM8903_WSEQ_ENA);
+ /* If microphone detection is enabled by pdata but
+ * detected via IRQ then interrupts can be lost before
+ * the machine driver has set up microphone detection
+ * IRQs as the IRQs are clear on read. The detection
+ * will be enabled when the machine driver configures.
+ */
+ WARN_ON(!mic_gpio && (pdata->micdet_cfg & WM8903_MICDET_ENA));
+
wm8903->mic_delay = pdata->micdet_delay;
}
@@ -1741,20 +2014,23 @@ static int wm8903_probe(struct snd_soc_codec *codec)
snd_soc_write(codec, WM8903_ANALOGUE_OUT3_RIGHT, val);
/* Enable DAC soft mute by default */
- val = snd_soc_read(codec, WM8903_DAC_DIGITAL_1);
- val |= WM8903_DAC_MUTEMODE;
- snd_soc_write(codec, WM8903_DAC_DIGITAL_1, val);
+ snd_soc_update_bits(codec, WM8903_DAC_DIGITAL_1,
+ WM8903_DAC_MUTEMODE | WM8903_DAC_MUTE,
+ WM8903_DAC_MUTEMODE | WM8903_DAC_MUTE);
snd_soc_add_controls(codec, wm8903_snd_controls,
ARRAY_SIZE(wm8903_snd_controls));
wm8903_add_widgets(codec);
+ wm8903_init_gpio(codec);
+
return ret;
}
/* power down chip */
static int wm8903_remove(struct snd_soc_codec *codec)
{
+ wm8903_free_gpio(codec);
wm8903_set_bias_level(codec, SND_SOC_BIAS_OFF);
return 0;
}
@@ -1769,6 +2045,7 @@ static struct snd_soc_codec_driver soc_codec_dev_wm8903 = {
.reg_word_size = sizeof(u16),
.reg_cache_default = wm8903_reg_defaults,
.volatile_register = wm8903_volatile_register,
+ .seq_notifier = wm8903_seq_notifier,
};
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
@@ -1807,7 +2084,7 @@ MODULE_DEVICE_TABLE(i2c, wm8903_i2c_id);
static struct i2c_driver wm8903_i2c_driver = {
.driver = {
- .name = "wm8903-codec",
+ .name = "wm8903",
.owner = THIS_MODULE,
},
.probe = wm8903_i2c_probe,
diff --git a/sound/soc/codecs/wm8903.h b/sound/soc/codecs/wm8903.h
index e8490f3edd03..db949311c0f2 100644
--- a/sound/soc/codecs/wm8903.h
+++ b/sound/soc/codecs/wm8903.h
@@ -75,6 +75,14 @@ extern int wm8903_mic_detect(struct snd_soc_codec *codec,
#define WM8903_ANALOGUE_SPK_OUTPUT_CONTROL_0 0x41
#define WM8903_DC_SERVO_0 0x43
#define WM8903_DC_SERVO_2 0x45
+#define WM8903_DC_SERVO_4 0x47
+#define WM8903_DC_SERVO_5 0x48
+#define WM8903_DC_SERVO_6 0x49
+#define WM8903_DC_SERVO_7 0x4A
+#define WM8903_DC_SERVO_READBACK_1 0x51
+#define WM8903_DC_SERVO_READBACK_2 0x52
+#define WM8903_DC_SERVO_READBACK_3 0x53
+#define WM8903_DC_SERVO_READBACK_4 0x54
#define WM8903_ANALOGUE_HP_0 0x5A
#define WM8903_ANALOGUE_LINEOUT_0 0x5E
#define WM8903_CHARGE_PUMP_0 0x62
@@ -165,7 +173,7 @@ extern int wm8903_mic_detect(struct snd_soc_codec *codec,
#define WM8903_VMID_RES_50K 2
#define WM8903_VMID_RES_250K 3
-#define WM8903_VMID_RES_5K 4
+#define WM8903_VMID_RES_5K 6
/*
* R8 (0x08) - Analogue DAC 0
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index 9de44a4c05c0..443ae580445c 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -596,7 +596,7 @@ static struct {
{ 0x003F, 0x003F, 0 }, /* R248 - FLL NCO Test 1 */
};
-static int wm8904_volatile_register(unsigned int reg)
+static int wm8904_volatile_register(struct snd_soc_codec *codec, unsigned int reg)
{
return wm8904_access[reg].vol;
}
@@ -2436,19 +2436,28 @@ static int wm8904_probe(struct snd_soc_codec *codec)
}
/* Change some default settings - latch VU and enable ZC */
- reg_cache[WM8904_ADC_DIGITAL_VOLUME_LEFT] |= WM8904_ADC_VU;
- reg_cache[WM8904_ADC_DIGITAL_VOLUME_RIGHT] |= WM8904_ADC_VU;
- reg_cache[WM8904_DAC_DIGITAL_VOLUME_LEFT] |= WM8904_DAC_VU;
- reg_cache[WM8904_DAC_DIGITAL_VOLUME_RIGHT] |= WM8904_DAC_VU;
- reg_cache[WM8904_ANALOGUE_OUT1_LEFT] |= WM8904_HPOUT_VU |
- WM8904_HPOUTLZC;
- reg_cache[WM8904_ANALOGUE_OUT1_RIGHT] |= WM8904_HPOUT_VU |
- WM8904_HPOUTRZC;
- reg_cache[WM8904_ANALOGUE_OUT2_LEFT] |= WM8904_LINEOUT_VU |
- WM8904_LINEOUTLZC;
- reg_cache[WM8904_ANALOGUE_OUT2_RIGHT] |= WM8904_LINEOUT_VU |
- WM8904_LINEOUTRZC;
- reg_cache[WM8904_CLOCK_RATES_0] &= ~WM8904_SR_MODE;
+ snd_soc_update_bits(codec, WM8904_ADC_DIGITAL_VOLUME_LEFT,
+ WM8904_ADC_VU, WM8904_ADC_VU);
+ snd_soc_update_bits(codec, WM8904_ADC_DIGITAL_VOLUME_RIGHT,
+ WM8904_ADC_VU, WM8904_ADC_VU);
+ snd_soc_update_bits(codec, WM8904_DAC_DIGITAL_VOLUME_LEFT,
+ WM8904_DAC_VU, WM8904_DAC_VU);
+ snd_soc_update_bits(codec, WM8904_DAC_DIGITAL_VOLUME_RIGHT,
+ WM8904_DAC_VU, WM8904_DAC_VU);
+ snd_soc_update_bits(codec, WM8904_ANALOGUE_OUT1_LEFT,
+ WM8904_HPOUT_VU | WM8904_HPOUTLZC,
+ WM8904_HPOUT_VU | WM8904_HPOUTLZC);
+ snd_soc_update_bits(codec, WM8904_ANALOGUE_OUT1_RIGHT,
+ WM8904_HPOUT_VU | WM8904_HPOUTRZC,
+ WM8904_HPOUT_VU | WM8904_HPOUTRZC);
+ snd_soc_update_bits(codec, WM8904_ANALOGUE_OUT2_LEFT,
+ WM8904_LINEOUT_VU | WM8904_LINEOUTLZC,
+ WM8904_LINEOUT_VU | WM8904_LINEOUTLZC);
+ snd_soc_update_bits(codec, WM8904_ANALOGUE_OUT2_RIGHT,
+ WM8904_LINEOUT_VU | WM8904_LINEOUTRZC,
+ WM8904_LINEOUT_VU | WM8904_LINEOUTRZC);
+ snd_soc_update_bits(codec, WM8904_CLOCK_RATES_0,
+ WM8904_SR_MODE, 0);
/* Apply configuration from the platform data. */
if (wm8904->pdata) {
@@ -2469,10 +2478,12 @@ static int wm8904_probe(struct snd_soc_codec *codec)
/* Set Class W by default - this will be managed by the Class
* G widget at runtime where bypass paths are available.
*/
- reg_cache[WM8904_CLASS_W_0] |= WM8904_CP_DYN_PWR;
+ snd_soc_update_bits(codec, WM8904_CLASS_W_0,
+ WM8904_CP_DYN_PWR, WM8904_CP_DYN_PWR);
/* Use normal bias source */
- reg_cache[WM8904_BIAS_CONTROL_0] &= ~WM8904_POBCTRL;
+ snd_soc_update_bits(codec, WM8904_BIAS_CONTROL_0,
+ WM8904_POBCTRL, 0);
wm8904_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
diff --git a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c
index 7167dfc96aa7..5e0214d6293e 100644
--- a/sound/soc/codecs/wm8955.c
+++ b/sound/soc/codecs/wm8955.c
@@ -934,16 +934,27 @@ static int wm8955_probe(struct snd_soc_codec *codec)
}
/* Change some default settings - latch VU and enable ZC */
- reg_cache[WM8955_LEFT_DAC_VOLUME] |= WM8955_LDVU;
- reg_cache[WM8955_RIGHT_DAC_VOLUME] |= WM8955_RDVU;
- reg_cache[WM8955_LOUT1_VOLUME] |= WM8955_LO1VU | WM8955_LO1ZC;
- reg_cache[WM8955_ROUT1_VOLUME] |= WM8955_RO1VU | WM8955_RO1ZC;
- reg_cache[WM8955_LOUT2_VOLUME] |= WM8955_LO2VU | WM8955_LO2ZC;
- reg_cache[WM8955_ROUT2_VOLUME] |= WM8955_RO2VU | WM8955_RO2ZC;
- reg_cache[WM8955_MONOOUT_VOLUME] |= WM8955_MOZC;
+ snd_soc_update_bits(codec, WM8955_LEFT_DAC_VOLUME,
+ WM8955_LDVU, WM8955_LDVU);
+ snd_soc_update_bits(codec, WM8955_RIGHT_DAC_VOLUME,
+ WM8955_RDVU, WM8955_RDVU);
+ snd_soc_update_bits(codec, WM8955_LOUT1_VOLUME,
+ WM8955_LO1VU | WM8955_LO1ZC,
+ WM8955_LO1VU | WM8955_LO1ZC);
+ snd_soc_update_bits(codec, WM8955_ROUT1_VOLUME,
+ WM8955_RO1VU | WM8955_RO1ZC,
+ WM8955_RO1VU | WM8955_RO1ZC);
+ snd_soc_update_bits(codec, WM8955_LOUT2_VOLUME,
+ WM8955_LO2VU | WM8955_LO2ZC,
+ WM8955_LO2VU | WM8955_LO2ZC);
+ snd_soc_update_bits(codec, WM8955_ROUT2_VOLUME,
+ WM8955_RO2VU | WM8955_RO2ZC,
+ WM8955_RO2VU | WM8955_RO2ZC);
+ snd_soc_update_bits(codec, WM8955_MONOOUT_VOLUME,
+ WM8955_MOZC, WM8955_MOZC);
/* Also enable adaptive bass boost by default */
- reg_cache[WM8955_BASS_CONTROL] |= WM8955_BB;
+ snd_soc_update_bits(codec, WM8955_BASS_CONTROL, WM8955_BB, WM8955_BB);
/* Set platform data values */
if (pdata) {
diff --git a/sound/soc/codecs/wm8961.c b/sound/soc/codecs/wm8961.c
index 55252e7d02c9..cdee8103d09b 100644
--- a/sound/soc/codecs/wm8961.c
+++ b/sound/soc/codecs/wm8961.c
@@ -291,7 +291,7 @@ struct wm8961_priv {
int sysclk;
};
-static int wm8961_volatile_register(unsigned int reg)
+static int wm8961_volatile_register(struct snd_soc_codec *codec, unsigned int reg)
{
switch (reg) {
case WM8961_SOFTWARE_RESET:
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index b9cb1fcf8c92..3b71dd65c966 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -1938,7 +1938,7 @@ static const struct wm8962_reg_access {
[21139] = { 0xFFFF, 0xFFFF, 0x0000 }, /* R21139 - VSS_XTS32_0 */
};
-static int wm8962_volatile_register(unsigned int reg)
+static int wm8962_volatile_register(struct snd_soc_codec *codec, unsigned int reg)
{
if (wm8962_reg_access[reg].vol)
return 1;
@@ -1946,7 +1946,7 @@ static int wm8962_volatile_register(unsigned int reg)
return 0;
}
-static int wm8962_readable_register(unsigned int reg)
+static int wm8962_readable_register(struct snd_soc_codec *codec, unsigned int reg)
{
if (wm8962_reg_access[reg].read)
return 1;
@@ -3635,7 +3635,7 @@ static void wm8962_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
struct snd_soc_codec *codec = wm8962->codec;
snd_soc_update_bits(codec, WM8962_GPIO_BASE + offset,
- WM8962_GP2_LVL, value << WM8962_GP2_LVL_SHIFT);
+ WM8962_GP2_LVL, !!value << WM8962_GP2_LVL_SHIFT);
}
static int wm8962_gpio_direction_out(struct gpio_chip *chip,
@@ -3822,16 +3822,26 @@ static int wm8962_probe(struct snd_soc_codec *codec)
}
/* Latch volume update bits */
- reg_cache[WM8962_LEFT_INPUT_VOLUME] |= WM8962_IN_VU;
- reg_cache[WM8962_RIGHT_INPUT_VOLUME] |= WM8962_IN_VU;
- reg_cache[WM8962_LEFT_ADC_VOLUME] |= WM8962_ADC_VU;
- reg_cache[WM8962_RIGHT_ADC_VOLUME] |= WM8962_ADC_VU;
- reg_cache[WM8962_LEFT_DAC_VOLUME] |= WM8962_DAC_VU;
- reg_cache[WM8962_RIGHT_DAC_VOLUME] |= WM8962_DAC_VU;
- reg_cache[WM8962_SPKOUTL_VOLUME] |= WM8962_SPKOUT_VU;
- reg_cache[WM8962_SPKOUTR_VOLUME] |= WM8962_SPKOUT_VU;
- reg_cache[WM8962_HPOUTL_VOLUME] |= WM8962_HPOUT_VU;
- reg_cache[WM8962_HPOUTR_VOLUME] |= WM8962_HPOUT_VU;
+ snd_soc_update_bits(codec, WM8962_LEFT_INPUT_VOLUME,
+ WM8962_IN_VU, WM8962_IN_VU);
+ snd_soc_update_bits(codec, WM8962_RIGHT_INPUT_VOLUME,
+ WM8962_IN_VU, WM8962_IN_VU);
+ snd_soc_update_bits(codec, WM8962_LEFT_ADC_VOLUME,
+ WM8962_ADC_VU, WM8962_ADC_VU);
+ snd_soc_update_bits(codec, WM8962_RIGHT_ADC_VOLUME,
+ WM8962_ADC_VU, WM8962_ADC_VU);
+ snd_soc_update_bits(codec, WM8962_LEFT_DAC_VOLUME,
+ WM8962_DAC_VU, WM8962_DAC_VU);
+ snd_soc_update_bits(codec, WM8962_RIGHT_DAC_VOLUME,
+ WM8962_DAC_VU, WM8962_DAC_VU);
+ snd_soc_update_bits(codec, WM8962_SPKOUTL_VOLUME,
+ WM8962_SPKOUT_VU, WM8962_SPKOUT_VU);
+ snd_soc_update_bits(codec, WM8962_SPKOUTR_VOLUME,
+ WM8962_SPKOUT_VU, WM8962_SPKOUT_VU);
+ snd_soc_update_bits(codec, WM8962_HPOUTL_VOLUME,
+ WM8962_HPOUT_VU, WM8962_HPOUT_VU);
+ snd_soc_update_bits(codec, WM8962_HPOUTR_VOLUME,
+ WM8962_HPOUT_VU, WM8962_HPOUT_VU);
wm8962_add_widgets(codec);
diff --git a/sound/soc/codecs/wm8978.c b/sound/soc/codecs/wm8978.c
index 4bbc3442703f..30fb48ec2799 100644
--- a/sound/soc/codecs/wm8978.c
+++ b/sound/soc/codecs/wm8978.c
@@ -965,7 +965,7 @@ static int wm8978_probe(struct snd_soc_codec *codec)
* written.
*/
for (i = 0; i < ARRAY_SIZE(update_reg); i++)
- ((u16 *)codec->reg_cache)[update_reg[i]] |= 0x100;
+ snd_soc_update_bits(codec, update_reg[i], 0x100, 0x100);
/* Reset the codec */
ret = snd_soc_write(codec, WM8978_RESET, 0);
diff --git a/sound/soc/codecs/wm8991.c b/sound/soc/codecs/wm8991.c
new file mode 100644
index 000000000000..28fdfd66661d
--- /dev/null
+++ b/sound/soc/codecs/wm8991.c
@@ -0,0 +1,1427 @@
+/*
+ * wm8991.c -- WM8991 ALSA Soc Audio driver
+ *
+ * Copyright 2007-2010 Wolfson Microelectronics PLC.
+ * Author: Graeme Gregory
+ * linux@wolfsonmicro.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+#include <asm/div64.h>
+
+#include "wm8991.h"
+
+struct wm8991_priv {
+ enum snd_soc_control_type control_type;
+ unsigned int pcmclk;
+};
+
+static const u16 wm8991_reg_defs[] = {
+ 0x8991, /* R0 - Reset */
+ 0x0000, /* R1 - Power Management (1) */
+ 0x6000, /* R2 - Power Management (2) */
+ 0x0000, /* R3 - Power Management (3) */
+ 0x4050, /* R4 - Audio Interface (1) */
+ 0x4000, /* R5 - Audio Interface (2) */
+ 0x01C8, /* R6 - Clocking (1) */
+ 0x0000, /* R7 - Clocking (2) */
+ 0x0040, /* R8 - Audio Interface (3) */
+ 0x0040, /* R9 - Audio Interface (4) */
+ 0x0004, /* R10 - DAC CTRL */
+ 0x00C0, /* R11 - Left DAC Digital Volume */
+ 0x00C0, /* R12 - Right DAC Digital Volume */
+ 0x0000, /* R13 - Digital Side Tone */
+ 0x0100, /* R14 - ADC CTRL */
+ 0x00C0, /* R15 - Left ADC Digital Volume */
+ 0x00C0, /* R16 - Right ADC Digital Volume */
+ 0x0000, /* R17 */
+ 0x0000, /* R18 - GPIO CTRL 1 */
+ 0x1000, /* R19 - GPIO1 & GPIO2 */
+ 0x1010, /* R20 - GPIO3 & GPIO4 */
+ 0x1010, /* R21 - GPIO5 & GPIO6 */
+ 0x8000, /* R22 - GPIOCTRL 2 */
+ 0x0800, /* R23 - GPIO_POL */
+ 0x008B, /* R24 - Left Line Input 1&2 Volume */
+ 0x008B, /* R25 - Left Line Input 3&4 Volume */
+ 0x008B, /* R26 - Right Line Input 1&2 Volume */
+ 0x008B, /* R27 - Right Line Input 3&4 Volume */
+ 0x0000, /* R28 - Left Output Volume */
+ 0x0000, /* R29 - Right Output Volume */
+ 0x0066, /* R30 - Line Outputs Volume */
+ 0x0022, /* R31 - Out3/4 Volume */
+ 0x0079, /* R32 - Left OPGA Volume */
+ 0x0079, /* R33 - Right OPGA Volume */
+ 0x0003, /* R34 - Speaker Volume */
+ 0x0003, /* R35 - ClassD1 */
+ 0x0000, /* R36 */
+ 0x0100, /* R37 - ClassD3 */
+ 0x0000, /* R38 */
+ 0x0000, /* R39 - Input Mixer1 */
+ 0x0000, /* R40 - Input Mixer2 */
+ 0x0000, /* R41 - Input Mixer3 */
+ 0x0000, /* R42 - Input Mixer4 */
+ 0x0000, /* R43 - Input Mixer5 */
+ 0x0000, /* R44 - Input Mixer6 */
+ 0x0000, /* R45 - Output Mixer1 */
+ 0x0000, /* R46 - Output Mixer2 */
+ 0x0000, /* R47 - Output Mixer3 */
+ 0x0000, /* R48 - Output Mixer4 */
+ 0x0000, /* R49 - Output Mixer5 */
+ 0x0000, /* R50 - Output Mixer6 */
+ 0x0180, /* R51 - Out3/4 Mixer */
+ 0x0000, /* R52 - Line Mixer1 */
+ 0x0000, /* R53 - Line Mixer2 */
+ 0x0000, /* R54 - Speaker Mixer */
+ 0x0000, /* R55 - Additional Control */
+ 0x0000, /* R56 - AntiPOP1 */
+ 0x0000, /* R57 - AntiPOP2 */
+ 0x0000, /* R58 - MICBIAS */
+ 0x0000, /* R59 */
+ 0x0008, /* R60 - PLL1 */
+ 0x0031, /* R61 - PLL2 */
+ 0x0026, /* R62 - PLL3 */
+};
+
+#define wm8991_reset(c) snd_soc_write(c, WM8991_RESET, 0)
+
+static const unsigned int rec_mix_tlv[] = {
+ TLV_DB_RANGE_HEAD(1),
+ 0, 7, TLV_DB_LINEAR_ITEM(-1500, 600),
+};
+
+static const unsigned int in_pga_tlv[] = {
+ TLV_DB_RANGE_HEAD(1),
+ 0, 0x1F, TLV_DB_LINEAR_ITEM(-1650, 3000),
+};
+
+static const unsigned int out_mix_tlv[] = {
+ TLV_DB_RANGE_HEAD(1),
+ 0, 7, TLV_DB_LINEAR_ITEM(0, -2100),
+};
+
+static const unsigned int out_pga_tlv[] = {
+ TLV_DB_RANGE_HEAD(1),
+ 0, 127, TLV_DB_LINEAR_ITEM(-7300, 600),
+};
+
+static const unsigned int out_omix_tlv[] = {
+ TLV_DB_RANGE_HEAD(1),
+ 0, 7, TLV_DB_LINEAR_ITEM(-600, 0),
+};
+
+static const unsigned int out_dac_tlv[] = {
+ TLV_DB_RANGE_HEAD(1),
+ 0, 255, TLV_DB_LINEAR_ITEM(-7163, 0),
+};
+
+static const unsigned int in_adc_tlv[] = {
+ TLV_DB_RANGE_HEAD(1),
+ 0, 255, TLV_DB_LINEAR_ITEM(-7163, 1763),
+};
+
+static const unsigned int out_sidetone_tlv[] = {
+ TLV_DB_RANGE_HEAD(1),
+ 0, 31, TLV_DB_LINEAR_ITEM(-3600, 0),
+};
+
+static int wm899x_outpga_put_volsw_vu(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ int reg = kcontrol->private_value & 0xff;
+ int ret;
+ u16 val;
+
+ ret = snd_soc_put_volsw(kcontrol, ucontrol);
+ if (ret < 0)
+ return ret;
+
+ /* now hit the volume update bits (always bit 8) */
+ val = snd_soc_read(codec, reg);
+ return snd_soc_write(codec, reg, val | 0x0100);
+}
+
+static const char *wm8991_digital_sidetone[] =
+{"None", "Left ADC", "Right ADC", "Reserved"};
+
+static const struct soc_enum wm8991_left_digital_sidetone_enum =
+ SOC_ENUM_SINGLE(WM8991_DIGITAL_SIDE_TONE,
+ WM8991_ADC_TO_DACL_SHIFT,
+ WM8991_ADC_TO_DACL_MASK,
+ wm8991_digital_sidetone);
+
+static const struct soc_enum wm8991_right_digital_sidetone_enum =
+ SOC_ENUM_SINGLE(WM8991_DIGITAL_SIDE_TONE,
+ WM8991_ADC_TO_DACR_SHIFT,
+ WM8991_ADC_TO_DACR_MASK,
+ wm8991_digital_sidetone);
+
+static const char *wm8991_adcmode[] =
+{"Hi-fi mode", "Voice mode 1", "Voice mode 2", "Voice mode 3"};
+
+static const struct soc_enum wm8991_right_adcmode_enum =
+ SOC_ENUM_SINGLE(WM8991_ADC_CTRL,
+ WM8991_ADC_HPF_CUT_SHIFT,
+ WM8991_ADC_HPF_CUT_MASK,
+ wm8991_adcmode);
+
+static const struct snd_kcontrol_new wm8991_snd_controls[] = {
+ /* INMIXL */
+ SOC_SINGLE("LIN12 PGA Boost", WM8991_INPUT_MIXER3, WM8991_L12MNBST_BIT, 1, 0),
+ SOC_SINGLE("LIN34 PGA Boost", WM8991_INPUT_MIXER3, WM8991_L34MNBST_BIT, 1, 0),
+ /* INMIXR */
+ SOC_SINGLE("RIN12 PGA Boost", WM8991_INPUT_MIXER3, WM8991_R12MNBST_BIT, 1, 0),
+ SOC_SINGLE("RIN34 PGA Boost", WM8991_INPUT_MIXER3, WM8991_R34MNBST_BIT, 1, 0),
+
+ /* LOMIX */
+ SOC_SINGLE_TLV("LOMIX LIN3 Bypass Volume", WM8991_OUTPUT_MIXER3,
+ WM8991_LLI3LOVOL_SHIFT, WM8991_LLI3LOVOL_MASK, 1, out_mix_tlv),
+ SOC_SINGLE_TLV("LOMIX RIN12 PGA Bypass Volume", WM8991_OUTPUT_MIXER3,
+ WM8991_LR12LOVOL_SHIFT, WM8991_LR12LOVOL_MASK, 1, out_mix_tlv),
+ SOC_SINGLE_TLV("LOMIX LIN12 PGA Bypass Volume", WM8991_OUTPUT_MIXER3,
+ WM8991_LL12LOVOL_SHIFT, WM8991_LL12LOVOL_MASK, 1, out_mix_tlv),
+ SOC_SINGLE_TLV("LOMIX RIN3 Bypass Volume", WM8991_OUTPUT_MIXER5,
+ WM8991_LRI3LOVOL_SHIFT, WM8991_LRI3LOVOL_MASK, 1, out_mix_tlv),
+ SOC_SINGLE_TLV("LOMIX AINRMUX Bypass Volume", WM8991_OUTPUT_MIXER5,
+ WM8991_LRBLOVOL_SHIFT, WM8991_LRBLOVOL_MASK, 1, out_mix_tlv),
+ SOC_SINGLE_TLV("LOMIX AINLMUX Bypass Volume", WM8991_OUTPUT_MIXER5,
+ WM8991_LRBLOVOL_SHIFT, WM8991_LRBLOVOL_MASK, 1, out_mix_tlv),
+
+ /* ROMIX */
+ SOC_SINGLE_TLV("ROMIX RIN3 Bypass Volume", WM8991_OUTPUT_MIXER4,
+ WM8991_RRI3ROVOL_SHIFT, WM8991_RRI3ROVOL_MASK, 1, out_mix_tlv),
+ SOC_SINGLE_TLV("ROMIX LIN12 PGA Bypass Volume", WM8991_OUTPUT_MIXER4,
+ WM8991_RL12ROVOL_SHIFT, WM8991_RL12ROVOL_MASK, 1, out_mix_tlv),
+ SOC_SINGLE_TLV("ROMIX RIN12 PGA Bypass Volume", WM8991_OUTPUT_MIXER4,
+ WM8991_RR12ROVOL_SHIFT, WM8991_RR12ROVOL_MASK, 1, out_mix_tlv),
+ SOC_SINGLE_TLV("ROMIX LIN3 Bypass Volume", WM8991_OUTPUT_MIXER6,
+ WM8991_RLI3ROVOL_SHIFT, WM8991_RLI3ROVOL_MASK, 1, out_mix_tlv),
+ SOC_SINGLE_TLV("ROMIX AINLMUX Bypass Volume", WM8991_OUTPUT_MIXER6,
+ WM8991_RLBROVOL_SHIFT, WM8991_RLBROVOL_MASK, 1, out_mix_tlv),
+ SOC_SINGLE_TLV("ROMIX AINRMUX Bypass Volume", WM8991_OUTPUT_MIXER6,
+ WM8991_RRBROVOL_SHIFT, WM8991_RRBROVOL_MASK, 1, out_mix_tlv),
+
+ /* LOUT */
+ SOC_WM899X_OUTPGA_SINGLE_R_TLV("LOUT Volume", WM8991_LEFT_OUTPUT_VOLUME,
+ WM8991_LOUTVOL_SHIFT, WM8991_LOUTVOL_MASK, 0, out_pga_tlv),
+ SOC_SINGLE("LOUT ZC", WM8991_LEFT_OUTPUT_VOLUME, WM8991_LOZC_BIT, 1, 0),
+
+ /* ROUT */
+ SOC_WM899X_OUTPGA_SINGLE_R_TLV("ROUT Volume", WM8991_RIGHT_OUTPUT_VOLUME,
+ WM8991_ROUTVOL_SHIFT, WM8991_ROUTVOL_MASK, 0, out_pga_tlv),
+ SOC_SINGLE("ROUT ZC", WM8991_RIGHT_OUTPUT_VOLUME, WM8991_ROZC_BIT, 1, 0),
+
+ /* LOPGA */
+ SOC_WM899X_OUTPGA_SINGLE_R_TLV("LOPGA Volume", WM8991_LEFT_OPGA_VOLUME,
+ WM8991_LOPGAVOL_SHIFT, WM8991_LOPGAVOL_MASK, 0, out_pga_tlv),
+ SOC_SINGLE("LOPGA ZC Switch", WM8991_LEFT_OPGA_VOLUME,
+ WM8991_LOPGAZC_BIT, 1, 0),
+
+ /* ROPGA */
+ SOC_WM899X_OUTPGA_SINGLE_R_TLV("ROPGA Volume", WM8991_RIGHT_OPGA_VOLUME,
+ WM8991_ROPGAVOL_SHIFT, WM8991_ROPGAVOL_MASK, 0, out_pga_tlv),
+ SOC_SINGLE("ROPGA ZC Switch", WM8991_RIGHT_OPGA_VOLUME,
+ WM8991_ROPGAZC_BIT, 1, 0),
+
+ SOC_SINGLE("LON Mute Switch", WM8991_LINE_OUTPUTS_VOLUME,
+ WM8991_LONMUTE_BIT, 1, 0),
+ SOC_SINGLE("LOP Mute Switch", WM8991_LINE_OUTPUTS_VOLUME,
+ WM8991_LOPMUTE_BIT, 1, 0),
+ SOC_SINGLE("LOP Attenuation Switch", WM8991_LINE_OUTPUTS_VOLUME,
+ WM8991_LOATTN_BIT, 1, 0),
+ SOC_SINGLE("RON Mute Switch", WM8991_LINE_OUTPUTS_VOLUME,
+ WM8991_RONMUTE_BIT, 1, 0),
+ SOC_SINGLE("ROP Mute Switch", WM8991_LINE_OUTPUTS_VOLUME,
+ WM8991_ROPMUTE_BIT, 1, 0),
+ SOC_SINGLE("ROP Attenuation Switch", WM8991_LINE_OUTPUTS_VOLUME,
+ WM8991_ROATTN_BIT, 1, 0),
+
+ SOC_SINGLE("OUT3 Mute Switch", WM8991_OUT3_4_VOLUME,
+ WM8991_OUT3MUTE_BIT, 1, 0),
+ SOC_SINGLE("OUT3 Attenuation Switch", WM8991_OUT3_4_VOLUME,
+ WM8991_OUT3ATTN_BIT, 1, 0),
+
+ SOC_SINGLE("OUT4 Mute Switch", WM8991_OUT3_4_VOLUME,
+ WM8991_OUT4MUTE_BIT, 1, 0),
+ SOC_SINGLE("OUT4 Attenuation Switch", WM8991_OUT3_4_VOLUME,
+ WM8991_OUT4ATTN_BIT, 1, 0),
+
+ SOC_SINGLE("Speaker Mode Switch", WM8991_CLASSD1,
+ WM8991_CDMODE_BIT, 1, 0),
+
+ SOC_SINGLE("Speaker Output Attenuation Volume", WM8991_SPEAKER_VOLUME,
+ WM8991_SPKVOL_SHIFT, WM8991_SPKVOL_MASK, 0),
+ SOC_SINGLE("Speaker DC Boost Volume", WM8991_CLASSD3,
+ WM8991_DCGAIN_SHIFT, WM8991_DCGAIN_MASK, 0),
+ SOC_SINGLE("Speaker AC Boost Volume", WM8991_CLASSD3,
+ WM8991_ACGAIN_SHIFT, WM8991_ACGAIN_MASK, 0),
+
+ SOC_WM899X_OUTPGA_SINGLE_R_TLV("Left DAC Digital Volume",
+ WM8991_LEFT_DAC_DIGITAL_VOLUME,
+ WM8991_DACL_VOL_SHIFT,
+ WM8991_DACL_VOL_MASK,
+ 0,
+ out_dac_tlv),
+
+ SOC_WM899X_OUTPGA_SINGLE_R_TLV("Right DAC Digital Volume",
+ WM8991_RIGHT_DAC_DIGITAL_VOLUME,
+ WM8991_DACR_VOL_SHIFT,
+ WM8991_DACR_VOL_MASK,
+ 0,
+ out_dac_tlv),
+
+ SOC_ENUM("Left Digital Sidetone", wm8991_left_digital_sidetone_enum),
+ SOC_ENUM("Right Digital Sidetone", wm8991_right_digital_sidetone_enum),
+
+ SOC_SINGLE_TLV("Left Digital Sidetone Volume", WM8991_DIGITAL_SIDE_TONE,
+ WM8991_ADCL_DAC_SVOL_SHIFT, WM8991_ADCL_DAC_SVOL_MASK, 0,
+ out_sidetone_tlv),
+ SOC_SINGLE_TLV("Right Digital Sidetone Volume", WM8991_DIGITAL_SIDE_TONE,
+ WM8991_ADCR_DAC_SVOL_SHIFT, WM8991_ADCR_DAC_SVOL_MASK, 0,
+ out_sidetone_tlv),
+
+ SOC_SINGLE("ADC Digital High Pass Filter Switch", WM8991_ADC_CTRL,
+ WM8991_ADC_HPF_ENA_BIT, 1, 0),
+
+ SOC_ENUM("ADC HPF Mode", wm8991_right_adcmode_enum),
+
+ SOC_WM899X_OUTPGA_SINGLE_R_TLV("Left ADC Digital Volume",
+ WM8991_LEFT_ADC_DIGITAL_VOLUME,
+ WM8991_ADCL_VOL_SHIFT,
+ WM8991_ADCL_VOL_MASK,
+ 0,
+ in_adc_tlv),
+
+ SOC_WM899X_OUTPGA_SINGLE_R_TLV("Right ADC Digital Volume",
+ WM8991_RIGHT_ADC_DIGITAL_VOLUME,
+ WM8991_ADCR_VOL_SHIFT,
+ WM8991_ADCR_VOL_MASK,
+ 0,
+ in_adc_tlv),
+
+ SOC_WM899X_OUTPGA_SINGLE_R_TLV("LIN12 Volume",
+ WM8991_LEFT_LINE_INPUT_1_2_VOLUME,
+ WM8991_LIN12VOL_SHIFT,
+ WM8991_LIN12VOL_MASK,
+ 0,
+ in_pga_tlv),
+
+ SOC_SINGLE("LIN12 ZC Switch", WM8991_LEFT_LINE_INPUT_1_2_VOLUME,
+ WM8991_LI12ZC_BIT, 1, 0),
+
+ SOC_SINGLE("LIN12 Mute Switch", WM8991_LEFT_LINE_INPUT_1_2_VOLUME,
+ WM8991_LI12MUTE_BIT, 1, 0),
+
+ SOC_WM899X_OUTPGA_SINGLE_R_TLV("LIN34 Volume",
+ WM8991_LEFT_LINE_INPUT_3_4_VOLUME,
+ WM8991_LIN34VOL_SHIFT,
+ WM8991_LIN34VOL_MASK,
+ 0,
+ in_pga_tlv),
+
+ SOC_SINGLE("LIN34 ZC Switch", WM8991_LEFT_LINE_INPUT_3_4_VOLUME,
+ WM8991_LI34ZC_BIT, 1, 0),
+
+ SOC_SINGLE("LIN34 Mute Switch", WM8991_LEFT_LINE_INPUT_3_4_VOLUME,
+ WM8991_LI34MUTE_BIT, 1, 0),
+
+ SOC_WM899X_OUTPGA_SINGLE_R_TLV("RIN12 Volume",
+ WM8991_RIGHT_LINE_INPUT_1_2_VOLUME,
+ WM8991_RIN12VOL_SHIFT,
+ WM8991_RIN12VOL_MASK,
+ 0,
+ in_pga_tlv),
+
+ SOC_SINGLE("RIN12 ZC Switch", WM8991_RIGHT_LINE_INPUT_1_2_VOLUME,
+ WM8991_RI12ZC_BIT, 1, 0),
+
+ SOC_SINGLE("RIN12 Mute Switch", WM8991_RIGHT_LINE_INPUT_1_2_VOLUME,
+ WM8991_RI12MUTE_BIT, 1, 0),
+
+ SOC_WM899X_OUTPGA_SINGLE_R_TLV("RIN34 Volume",
+ WM8991_RIGHT_LINE_INPUT_3_4_VOLUME,
+ WM8991_RIN34VOL_SHIFT,
+ WM8991_RIN34VOL_MASK,
+ 0,
+ in_pga_tlv),
+
+ SOC_SINGLE("RIN34 ZC Switch", WM8991_RIGHT_LINE_INPUT_3_4_VOLUME,
+ WM8991_RI34ZC_BIT, 1, 0),
+
+ SOC_SINGLE("RIN34 Mute Switch", WM8991_RIGHT_LINE_INPUT_3_4_VOLUME,
+ WM8991_RI34MUTE_BIT, 1, 0),
+};
+
+/*
+ * _DAPM_ Controls
+ */
+static int inmixer_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ u16 reg, fakepower;
+
+ reg = snd_soc_read(w->codec, WM8991_POWER_MANAGEMENT_2);
+ fakepower = snd_soc_read(w->codec, WM8991_INTDRIVBITS);
+
+ if (fakepower & ((1 << WM8991_INMIXL_PWR_BIT) |
+ (1 << WM8991_AINLMUX_PWR_BIT)))
+ reg |= WM8991_AINL_ENA;
+ else
+ reg &= ~WM8991_AINL_ENA;
+
+ if (fakepower & ((1 << WM8991_INMIXR_PWR_BIT) |
+ (1 << WM8991_AINRMUX_PWR_BIT)))
+ reg |= WM8991_AINR_ENA;
+ else
+ reg &= ~WM8991_AINL_ENA;
+
+ snd_soc_write(w->codec, WM8991_POWER_MANAGEMENT_2, reg);
+ return 0;
+}
+
+static int outmixer_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ u32 reg_shift = kcontrol->private_value & 0xfff;
+ int ret = 0;
+ u16 reg;
+
+ switch (reg_shift) {
+ case WM8991_SPEAKER_MIXER | (WM8991_LDSPK_BIT << 8):
+ reg = snd_soc_read(w->codec, WM8991_OUTPUT_MIXER1);
+ if (reg & WM8991_LDLO) {
+ printk(KERN_WARNING
+ "Cannot set as Output Mixer 1 LDLO Set\n");
+ ret = -1;
+ }
+ break;
+
+ case WM8991_SPEAKER_MIXER | (WM8991_RDSPK_BIT << 8):
+ reg = snd_soc_read(w->codec, WM8991_OUTPUT_MIXER2);
+ if (reg & WM8991_RDRO) {
+ printk(KERN_WARNING
+ "Cannot set as Output Mixer 2 RDRO Set\n");
+ ret = -1;
+ }
+ break;
+
+ case WM8991_OUTPUT_MIXER1 | (WM8991_LDLO_BIT << 8):
+ reg = snd_soc_read(w->codec, WM8991_SPEAKER_MIXER);
+ if (reg & WM8991_LDSPK) {
+ printk(KERN_WARNING
+ "Cannot set as Speaker Mixer LDSPK Set\n");
+ ret = -1;
+ }
+ break;
+
+ case WM8991_OUTPUT_MIXER2 | (WM8991_RDRO_BIT << 8):
+ reg = snd_soc_read(w->codec, WM8991_SPEAKER_MIXER);
+ if (reg & WM8991_RDSPK) {
+ printk(KERN_WARNING
+ "Cannot set as Speaker Mixer RDSPK Set\n");
+ ret = -1;
+ }
+ break;
+ }
+
+ return ret;
+}
+
+/* INMIX dB values */
+static const unsigned int in_mix_tlv[] = {
+ TLV_DB_RANGE_HEAD(1),
+ 0, 7, TLV_DB_LINEAR_ITEM(-1200, 600),
+};
+
+/* Left In PGA Connections */
+static const struct snd_kcontrol_new wm8991_dapm_lin12_pga_controls[] = {
+ SOC_DAPM_SINGLE("LIN1 Switch", WM8991_INPUT_MIXER2, WM8991_LMN1_BIT, 1, 0),
+ SOC_DAPM_SINGLE("LIN2 Switch", WM8991_INPUT_MIXER2, WM8991_LMP2_BIT, 1, 0),
+};
+
+static const struct snd_kcontrol_new wm8991_dapm_lin34_pga_controls[] = {
+ SOC_DAPM_SINGLE("LIN3 Switch", WM8991_INPUT_MIXER2, WM8991_LMN3_BIT, 1, 0),
+ SOC_DAPM_SINGLE("LIN4 Switch", WM8991_INPUT_MIXER2, WM8991_LMP4_BIT, 1, 0),
+};
+
+/* Right In PGA Connections */
+static const struct snd_kcontrol_new wm8991_dapm_rin12_pga_controls[] = {
+ SOC_DAPM_SINGLE("RIN1 Switch", WM8991_INPUT_MIXER2, WM8991_RMN1_BIT, 1, 0),
+ SOC_DAPM_SINGLE("RIN2 Switch", WM8991_INPUT_MIXER2, WM8991_RMP2_BIT, 1, 0),
+};
+
+static const struct snd_kcontrol_new wm8991_dapm_rin34_pga_controls[] = {
+ SOC_DAPM_SINGLE("RIN3 Switch", WM8991_INPUT_MIXER2, WM8991_RMN3_BIT, 1, 0),
+ SOC_DAPM_SINGLE("RIN4 Switch", WM8991_INPUT_MIXER2, WM8991_RMP4_BIT, 1, 0),
+};
+
+/* INMIXL */
+static const struct snd_kcontrol_new wm8991_dapm_inmixl_controls[] = {
+ SOC_DAPM_SINGLE_TLV("Record Left Volume", WM8991_INPUT_MIXER3,
+ WM8991_LDBVOL_SHIFT, WM8991_LDBVOL_MASK, 0, in_mix_tlv),
+ SOC_DAPM_SINGLE_TLV("LIN2 Volume", WM8991_INPUT_MIXER5, WM8991_LI2BVOL_SHIFT,
+ 7, 0, in_mix_tlv),
+ SOC_DAPM_SINGLE("LINPGA12 Switch", WM8991_INPUT_MIXER3, WM8991_L12MNB_BIT,
+ 1, 0),
+ SOC_DAPM_SINGLE("LINPGA34 Switch", WM8991_INPUT_MIXER3, WM8991_L34MNB_BIT,
+ 1, 0),
+};
+
+/* INMIXR */
+static const struct snd_kcontrol_new wm8991_dapm_inmixr_controls[] = {
+ SOC_DAPM_SINGLE_TLV("Record Right Volume", WM8991_INPUT_MIXER4,
+ WM8991_RDBVOL_SHIFT, WM8991_RDBVOL_MASK, 0, in_mix_tlv),
+ SOC_DAPM_SINGLE_TLV("RIN2 Volume", WM8991_INPUT_MIXER6, WM8991_RI2BVOL_SHIFT,
+ 7, 0, in_mix_tlv),
+ SOC_DAPM_SINGLE("RINPGA12 Switch", WM8991_INPUT_MIXER3, WM8991_L12MNB_BIT,
+ 1, 0),
+ SOC_DAPM_SINGLE("RINPGA34 Switch", WM8991_INPUT_MIXER3, WM8991_L34MNB_BIT,
+ 1, 0),
+};
+
+/* AINLMUX */
+static const char *wm8991_ainlmux[] =
+{"INMIXL Mix", "RXVOICE Mix", "DIFFINL Mix"};
+
+static const struct soc_enum wm8991_ainlmux_enum =
+ SOC_ENUM_SINGLE(WM8991_INPUT_MIXER1, WM8991_AINLMODE_SHIFT,
+ ARRAY_SIZE(wm8991_ainlmux), wm8991_ainlmux);
+
+static const struct snd_kcontrol_new wm8991_dapm_ainlmux_controls =
+ SOC_DAPM_ENUM("Route", wm8991_ainlmux_enum);
+
+/* DIFFINL */
+
+/* AINRMUX */
+static const char *wm8991_ainrmux[] =
+{"INMIXR Mix", "RXVOICE Mix", "DIFFINR Mix"};
+
+static const struct soc_enum wm8991_ainrmux_enum =
+ SOC_ENUM_SINGLE(WM8991_INPUT_MIXER1, WM8991_AINRMODE_SHIFT,
+ ARRAY_SIZE(wm8991_ainrmux), wm8991_ainrmux);
+
+static const struct snd_kcontrol_new wm8991_dapm_ainrmux_controls =
+ SOC_DAPM_ENUM("Route", wm8991_ainrmux_enum);
+
+/* RXVOICE */
+static const struct snd_kcontrol_new wm8991_dapm_rxvoice_controls[] = {
+ SOC_DAPM_SINGLE_TLV("LIN4RXN", WM8991_INPUT_MIXER5, WM8991_LR4BVOL_SHIFT,
+ WM8991_LR4BVOL_MASK, 0, in_mix_tlv),
+ SOC_DAPM_SINGLE_TLV("RIN4RXP", WM8991_INPUT_MIXER6, WM8991_RL4BVOL_SHIFT,
+ WM8991_RL4BVOL_MASK, 0, in_mix_tlv),
+};
+
+/* LOMIX */
+static const struct snd_kcontrol_new wm8991_dapm_lomix_controls[] = {
+ SOC_DAPM_SINGLE("LOMIX Right ADC Bypass Switch", WM8991_OUTPUT_MIXER1,
+ WM8991_LRBLO_BIT, 1, 0),
+ SOC_DAPM_SINGLE("LOMIX Left ADC Bypass Switch", WM8991_OUTPUT_MIXER1,
+ WM8991_LLBLO_BIT, 1, 0),
+ SOC_DAPM_SINGLE("LOMIX RIN3 Bypass Switch", WM8991_OUTPUT_MIXER1,
+ WM8991_LRI3LO_BIT, 1, 0),
+ SOC_DAPM_SINGLE("LOMIX LIN3 Bypass Switch", WM8991_OUTPUT_MIXER1,
+ WM8991_LLI3LO_BIT, 1, 0),
+ SOC_DAPM_SINGLE("LOMIX RIN12 PGA Bypass Switch", WM8991_OUTPUT_MIXER1,
+ WM8991_LR12LO_BIT, 1, 0),
+ SOC_DAPM_SINGLE("LOMIX LIN12 PGA Bypass Switch", WM8991_OUTPUT_MIXER1,
+ WM8991_LL12LO_BIT, 1, 0),
+ SOC_DAPM_SINGLE("LOMIX Left DAC Switch", WM8991_OUTPUT_MIXER1,
+ WM8991_LDLO_BIT, 1, 0),
+};
+
+/* ROMIX */
+static const struct snd_kcontrol_new wm8991_dapm_romix_controls[] = {
+ SOC_DAPM_SINGLE("ROMIX Left ADC Bypass Switch", WM8991_OUTPUT_MIXER2,
+ WM8991_RLBRO_BIT, 1, 0),
+ SOC_DAPM_SINGLE("ROMIX Right ADC Bypass Switch", WM8991_OUTPUT_MIXER2,
+ WM8991_RRBRO_BIT, 1, 0),
+ SOC_DAPM_SINGLE("ROMIX LIN3 Bypass Switch", WM8991_OUTPUT_MIXER2,
+ WM8991_RLI3RO_BIT, 1, 0),
+ SOC_DAPM_SINGLE("ROMIX RIN3 Bypass Switch", WM8991_OUTPUT_MIXER2,
+ WM8991_RRI3RO_BIT, 1, 0),
+ SOC_DAPM_SINGLE("ROMIX LIN12 PGA Bypass Switch", WM8991_OUTPUT_MIXER2,
+ WM8991_RL12RO_BIT, 1, 0),
+ SOC_DAPM_SINGLE("ROMIX RIN12 PGA Bypass Switch", WM8991_OUTPUT_MIXER2,
+ WM8991_RR12RO_BIT, 1, 0),
+ SOC_DAPM_SINGLE("ROMIX Right DAC Switch", WM8991_OUTPUT_MIXER2,
+ WM8991_RDRO_BIT, 1, 0),
+};
+
+/* LONMIX */
+static const struct snd_kcontrol_new wm8991_dapm_lonmix_controls[] = {
+ SOC_DAPM_SINGLE("LONMIX Left Mixer PGA Switch", WM8991_LINE_MIXER1,
+ WM8991_LLOPGALON_BIT, 1, 0),
+ SOC_DAPM_SINGLE("LONMIX Right Mixer PGA Switch", WM8991_LINE_MIXER1,
+ WM8991_LROPGALON_BIT, 1, 0),
+ SOC_DAPM_SINGLE("LONMIX Inverted LOP Switch", WM8991_LINE_MIXER1,
+ WM8991_LOPLON_BIT, 1, 0),
+};
+
+/* LOPMIX */
+static const struct snd_kcontrol_new wm8991_dapm_lopmix_controls[] = {
+ SOC_DAPM_SINGLE("LOPMIX Right Mic Bypass Switch", WM8991_LINE_MIXER1,
+ WM8991_LR12LOP_BIT, 1, 0),
+ SOC_DAPM_SINGLE("LOPMIX Left Mic Bypass Switch", WM8991_LINE_MIXER1,
+ WM8991_LL12LOP_BIT, 1, 0),
+ SOC_DAPM_SINGLE("LOPMIX Left Mixer PGA Switch", WM8991_LINE_MIXER1,
+ WM8991_LLOPGALOP_BIT, 1, 0),
+};
+
+/* RONMIX */
+static const struct snd_kcontrol_new wm8991_dapm_ronmix_controls[] = {
+ SOC_DAPM_SINGLE("RONMIX Right Mixer PGA Switch", WM8991_LINE_MIXER2,
+ WM8991_RROPGARON_BIT, 1, 0),
+ SOC_DAPM_SINGLE("RONMIX Left Mixer PGA Switch", WM8991_LINE_MIXER2,
+ WM8991_RLOPGARON_BIT, 1, 0),
+ SOC_DAPM_SINGLE("RONMIX Inverted ROP Switch", WM8991_LINE_MIXER2,
+ WM8991_ROPRON_BIT, 1, 0),
+};
+
+/* ROPMIX */
+static const struct snd_kcontrol_new wm8991_dapm_ropmix_controls[] = {
+ SOC_DAPM_SINGLE("ROPMIX Left Mic Bypass Switch", WM8991_LINE_MIXER2,
+ WM8991_RL12ROP_BIT, 1, 0),
+ SOC_DAPM_SINGLE("ROPMIX Right Mic Bypass Switch", WM8991_LINE_MIXER2,
+ WM8991_RR12ROP_BIT, 1, 0),
+ SOC_DAPM_SINGLE("ROPMIX Right Mixer PGA Switch", WM8991_LINE_MIXER2,
+ WM8991_RROPGAROP_BIT, 1, 0),
+};
+
+/* OUT3MIX */
+static const struct snd_kcontrol_new wm8991_dapm_out3mix_controls[] = {
+ SOC_DAPM_SINGLE("OUT3MIX LIN4RXN Bypass Switch", WM8991_OUT3_4_MIXER,
+ WM8991_LI4O3_BIT, 1, 0),
+ SOC_DAPM_SINGLE("OUT3MIX Left Out PGA Switch", WM8991_OUT3_4_MIXER,
+ WM8991_LPGAO3_BIT, 1, 0),
+};
+
+/* OUT4MIX */
+static const struct snd_kcontrol_new wm8991_dapm_out4mix_controls[] = {
+ SOC_DAPM_SINGLE("OUT4MIX Right Out PGA Switch", WM8991_OUT3_4_MIXER,
+ WM8991_RPGAO4_BIT, 1, 0),
+ SOC_DAPM_SINGLE("OUT4MIX RIN4RXP Bypass Switch", WM8991_OUT3_4_MIXER,
+ WM8991_RI4O4_BIT, 1, 0),
+};
+
+/* SPKMIX */
+static const struct snd_kcontrol_new wm8991_dapm_spkmix_controls[] = {
+ SOC_DAPM_SINGLE("SPKMIX LIN2 Bypass Switch", WM8991_SPEAKER_MIXER,
+ WM8991_LI2SPK_BIT, 1, 0),
+ SOC_DAPM_SINGLE("SPKMIX LADC Bypass Switch", WM8991_SPEAKER_MIXER,
+ WM8991_LB2SPK_BIT, 1, 0),
+ SOC_DAPM_SINGLE("SPKMIX Left Mixer PGA Switch", WM8991_SPEAKER_MIXER,
+ WM8991_LOPGASPK_BIT, 1, 0),
+ SOC_DAPM_SINGLE("SPKMIX Left DAC Switch", WM8991_SPEAKER_MIXER,
+ WM8991_LDSPK_BIT, 1, 0),
+ SOC_DAPM_SINGLE("SPKMIX Right DAC Switch", WM8991_SPEAKER_MIXER,
+ WM8991_RDSPK_BIT, 1, 0),
+ SOC_DAPM_SINGLE("SPKMIX Right Mixer PGA Switch", WM8991_SPEAKER_MIXER,
+ WM8991_ROPGASPK_BIT, 1, 0),
+ SOC_DAPM_SINGLE("SPKMIX RADC Bypass Switch", WM8991_SPEAKER_MIXER,
+ WM8991_RL12ROP_BIT, 1, 0),
+ SOC_DAPM_SINGLE("SPKMIX RIN2 Bypass Switch", WM8991_SPEAKER_MIXER,
+ WM8991_RI2SPK_BIT, 1, 0),
+};
+
+static const struct snd_soc_dapm_widget wm8991_dapm_widgets[] = {
+ /* Input Side */
+ /* Input Lines */
+ SND_SOC_DAPM_INPUT("LIN1"),
+ SND_SOC_DAPM_INPUT("LIN2"),
+ SND_SOC_DAPM_INPUT("LIN3"),
+ SND_SOC_DAPM_INPUT("LIN4RXN"),
+ SND_SOC_DAPM_INPUT("RIN3"),
+ SND_SOC_DAPM_INPUT("RIN4RXP"),
+ SND_SOC_DAPM_INPUT("RIN1"),
+ SND_SOC_DAPM_INPUT("RIN2"),
+ SND_SOC_DAPM_INPUT("Internal ADC Source"),
+
+ /* DACs */
+ SND_SOC_DAPM_ADC("Left ADC", "Left Capture", WM8991_POWER_MANAGEMENT_2,
+ WM8991_ADCL_ENA_BIT, 0),
+ SND_SOC_DAPM_ADC("Right ADC", "Right Capture", WM8991_POWER_MANAGEMENT_2,
+ WM8991_ADCR_ENA_BIT, 0),
+
+ /* Input PGAs */
+ SND_SOC_DAPM_MIXER("LIN12 PGA", WM8991_POWER_MANAGEMENT_2, WM8991_LIN12_ENA_BIT,
+ 0, &wm8991_dapm_lin12_pga_controls[0],
+ ARRAY_SIZE(wm8991_dapm_lin12_pga_controls)),
+ SND_SOC_DAPM_MIXER("LIN34 PGA", WM8991_POWER_MANAGEMENT_2, WM8991_LIN34_ENA_BIT,
+ 0, &wm8991_dapm_lin34_pga_controls[0],
+ ARRAY_SIZE(wm8991_dapm_lin34_pga_controls)),
+ SND_SOC_DAPM_MIXER("RIN12 PGA", WM8991_POWER_MANAGEMENT_2, WM8991_RIN12_ENA_BIT,
+ 0, &wm8991_dapm_rin12_pga_controls[0],
+ ARRAY_SIZE(wm8991_dapm_rin12_pga_controls)),
+ SND_SOC_DAPM_MIXER("RIN34 PGA", WM8991_POWER_MANAGEMENT_2, WM8991_RIN34_ENA_BIT,
+ 0, &wm8991_dapm_rin34_pga_controls[0],
+ ARRAY_SIZE(wm8991_dapm_rin34_pga_controls)),
+
+ /* INMIXL */
+ SND_SOC_DAPM_MIXER_E("INMIXL", WM8991_INTDRIVBITS, WM8991_INMIXL_PWR_BIT, 0,
+ &wm8991_dapm_inmixl_controls[0],
+ ARRAY_SIZE(wm8991_dapm_inmixl_controls),
+ inmixer_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ /* AINLMUX */
+ SND_SOC_DAPM_MUX_E("AINLMUX", WM8991_INTDRIVBITS, WM8991_AINLMUX_PWR_BIT, 0,
+ &wm8991_dapm_ainlmux_controls, inmixer_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ /* INMIXR */
+ SND_SOC_DAPM_MIXER_E("INMIXR", WM8991_INTDRIVBITS, WM8991_INMIXR_PWR_BIT, 0,
+ &wm8991_dapm_inmixr_controls[0],
+ ARRAY_SIZE(wm8991_dapm_inmixr_controls),
+ inmixer_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ /* AINRMUX */
+ SND_SOC_DAPM_MUX_E("AINRMUX", WM8991_INTDRIVBITS, WM8991_AINRMUX_PWR_BIT, 0,
+ &wm8991_dapm_ainrmux_controls, inmixer_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ /* Output Side */
+ /* DACs */
+ SND_SOC_DAPM_DAC("Left DAC", "Left Playback", WM8991_POWER_MANAGEMENT_3,
+ WM8991_DACL_ENA_BIT, 0),
+ SND_SOC_DAPM_DAC("Right DAC", "Right Playback", WM8991_POWER_MANAGEMENT_3,
+ WM8991_DACR_ENA_BIT, 0),
+
+ /* LOMIX */
+ SND_SOC_DAPM_MIXER_E("LOMIX", WM8991_POWER_MANAGEMENT_3, WM8991_LOMIX_ENA_BIT,
+ 0, &wm8991_dapm_lomix_controls[0],
+ ARRAY_SIZE(wm8991_dapm_lomix_controls),
+ outmixer_event, SND_SOC_DAPM_PRE_REG),
+
+ /* LONMIX */
+ SND_SOC_DAPM_MIXER("LONMIX", WM8991_POWER_MANAGEMENT_3, WM8991_LON_ENA_BIT, 0,
+ &wm8991_dapm_lonmix_controls[0],
+ ARRAY_SIZE(wm8991_dapm_lonmix_controls)),
+
+ /* LOPMIX */
+ SND_SOC_DAPM_MIXER("LOPMIX", WM8991_POWER_MANAGEMENT_3, WM8991_LOP_ENA_BIT, 0,
+ &wm8991_dapm_lopmix_controls[0],
+ ARRAY_SIZE(wm8991_dapm_lopmix_controls)),
+
+ /* OUT3MIX */
+ SND_SOC_DAPM_MIXER("OUT3MIX", WM8991_POWER_MANAGEMENT_1, WM8991_OUT3_ENA_BIT, 0,
+ &wm8991_dapm_out3mix_controls[0],
+ ARRAY_SIZE(wm8991_dapm_out3mix_controls)),
+
+ /* SPKMIX */
+ SND_SOC_DAPM_MIXER_E("SPKMIX", WM8991_POWER_MANAGEMENT_1, WM8991_SPK_ENA_BIT, 0,
+ &wm8991_dapm_spkmix_controls[0],
+ ARRAY_SIZE(wm8991_dapm_spkmix_controls), outmixer_event,
+ SND_SOC_DAPM_PRE_REG),
+
+ /* OUT4MIX */
+ SND_SOC_DAPM_MIXER("OUT4MIX", WM8991_POWER_MANAGEMENT_1, WM8991_OUT4_ENA_BIT, 0,
+ &wm8991_dapm_out4mix_controls[0],
+ ARRAY_SIZE(wm8991_dapm_out4mix_controls)),
+
+ /* ROPMIX */
+ SND_SOC_DAPM_MIXER("ROPMIX", WM8991_POWER_MANAGEMENT_3, WM8991_ROP_ENA_BIT, 0,
+ &wm8991_dapm_ropmix_controls[0],
+ ARRAY_SIZE(wm8991_dapm_ropmix_controls)),
+
+ /* RONMIX */
+ SND_SOC_DAPM_MIXER("RONMIX", WM8991_POWER_MANAGEMENT_3, WM8991_RON_ENA_BIT, 0,
+ &wm8991_dapm_ronmix_controls[0],
+ ARRAY_SIZE(wm8991_dapm_ronmix_controls)),
+
+ /* ROMIX */
+ SND_SOC_DAPM_MIXER_E("ROMIX", WM8991_POWER_MANAGEMENT_3, WM8991_ROMIX_ENA_BIT,
+ 0, &wm8991_dapm_romix_controls[0],
+ ARRAY_SIZE(wm8991_dapm_romix_controls),
+ outmixer_event, SND_SOC_DAPM_PRE_REG),
+
+ /* LOUT PGA */
+ SND_SOC_DAPM_PGA("LOUT PGA", WM8991_POWER_MANAGEMENT_1, WM8991_LOUT_ENA_BIT, 0,
+ NULL, 0),
+
+ /* ROUT PGA */
+ SND_SOC_DAPM_PGA("ROUT PGA", WM8991_POWER_MANAGEMENT_1, WM8991_ROUT_ENA_BIT, 0,
+ NULL, 0),
+
+ /* LOPGA */
+ SND_SOC_DAPM_PGA("LOPGA", WM8991_POWER_MANAGEMENT_3, WM8991_LOPGA_ENA_BIT, 0,
+ NULL, 0),
+
+ /* ROPGA */
+ SND_SOC_DAPM_PGA("ROPGA", WM8991_POWER_MANAGEMENT_3, WM8991_ROPGA_ENA_BIT, 0,
+ NULL, 0),
+
+ /* MICBIAS */
+ SND_SOC_DAPM_MICBIAS("MICBIAS", WM8991_POWER_MANAGEMENT_1,
+ WM8991_MICBIAS_ENA_BIT, 0),
+
+ SND_SOC_DAPM_OUTPUT("LON"),
+ SND_SOC_DAPM_OUTPUT("LOP"),
+ SND_SOC_DAPM_OUTPUT("OUT3"),
+ SND_SOC_DAPM_OUTPUT("LOUT"),
+ SND_SOC_DAPM_OUTPUT("SPKN"),
+ SND_SOC_DAPM_OUTPUT("SPKP"),
+ SND_SOC_DAPM_OUTPUT("ROUT"),
+ SND_SOC_DAPM_OUTPUT("OUT4"),
+ SND_SOC_DAPM_OUTPUT("ROP"),
+ SND_SOC_DAPM_OUTPUT("RON"),
+ SND_SOC_DAPM_OUTPUT("OUT"),
+
+ SND_SOC_DAPM_OUTPUT("Internal DAC Sink"),
+};
+
+static const struct snd_soc_dapm_route audio_map[] = {
+ /* Make DACs turn on when playing even if not mixed into any outputs */
+ {"Internal DAC Sink", NULL, "Left DAC"},
+ {"Internal DAC Sink", NULL, "Right DAC"},
+
+ /* Make ADCs turn on when recording even if not mixed from any inputs */
+ {"Left ADC", NULL, "Internal ADC Source"},
+ {"Right ADC", NULL, "Internal ADC Source"},
+
+ /* Input Side */
+ /* LIN12 PGA */
+ {"LIN12 PGA", "LIN1 Switch", "LIN1"},
+ {"LIN12 PGA", "LIN2 Switch", "LIN2"},
+ /* LIN34 PGA */
+ {"LIN34 PGA", "LIN3 Switch", "LIN3"},
+ {"LIN34 PGA", "LIN4 Switch", "LIN4RXN"},
+ /* INMIXL */
+ {"INMIXL", "Record Left Volume", "LOMIX"},
+ {"INMIXL", "LIN2 Volume", "LIN2"},
+ {"INMIXL", "LINPGA12 Switch", "LIN12 PGA"},
+ {"INMIXL", "LINPGA34 Switch", "LIN34 PGA"},
+ /* AINLMUX */
+ {"AINLMUX", "INMIXL Mix", "INMIXL"},
+ {"AINLMUX", "DIFFINL Mix", "LIN12 PGA"},
+ {"AINLMUX", "DIFFINL Mix", "LIN34 PGA"},
+ {"AINLMUX", "RXVOICE Mix", "LIN4RXN"},
+ {"AINLMUX", "RXVOICE Mix", "RIN4RXP"},
+ /* ADC */
+ {"Left ADC", NULL, "AINLMUX"},
+
+ /* RIN12 PGA */
+ {"RIN12 PGA", "RIN1 Switch", "RIN1"},
+ {"RIN12 PGA", "RIN2 Switch", "RIN2"},
+ /* RIN34 PGA */
+ {"RIN34 PGA", "RIN3 Switch", "RIN3"},
+ {"RIN34 PGA", "RIN4 Switch", "RIN4RXP"},
+ /* INMIXL */
+ {"INMIXR", "Record Right Volume", "ROMIX"},
+ {"INMIXR", "RIN2 Volume", "RIN2"},
+ {"INMIXR", "RINPGA12 Switch", "RIN12 PGA"},
+ {"INMIXR", "RINPGA34 Switch", "RIN34 PGA"},
+ /* AINRMUX */
+ {"AINRMUX", "INMIXR Mix", "INMIXR"},
+ {"AINRMUX", "DIFFINR Mix", "RIN12 PGA"},
+ {"AINRMUX", "DIFFINR Mix", "RIN34 PGA"},
+ {"AINRMUX", "RXVOICE Mix", "LIN4RXN"},
+ {"AINRMUX", "RXVOICE Mix", "RIN4RXP"},
+ /* ADC */
+ {"Right ADC", NULL, "AINRMUX"},
+
+ /* LOMIX */
+ {"LOMIX", "LOMIX RIN3 Bypass Switch", "RIN3"},
+ {"LOMIX", "LOMIX LIN3 Bypass Switch", "LIN3"},
+ {"LOMIX", "LOMIX LIN12 PGA Bypass Switch", "LIN12 PGA"},
+ {"LOMIX", "LOMIX RIN12 PGA Bypass Switch", "RIN12 PGA"},
+ {"LOMIX", "LOMIX Right ADC Bypass Switch", "AINRMUX"},
+ {"LOMIX", "LOMIX Left ADC Bypass Switch", "AINLMUX"},
+ {"LOMIX", "LOMIX Left DAC Switch", "Left DAC"},
+
+ /* ROMIX */
+ {"ROMIX", "ROMIX RIN3 Bypass Switch", "RIN3"},
+ {"ROMIX", "ROMIX LIN3 Bypass Switch", "LIN3"},
+ {"ROMIX", "ROMIX LIN12 PGA Bypass Switch", "LIN12 PGA"},
+ {"ROMIX", "ROMIX RIN12 PGA Bypass Switch", "RIN12 PGA"},
+ {"ROMIX", "ROMIX Right ADC Bypass Switch", "AINRMUX"},
+ {"ROMIX", "ROMIX Left ADC Bypass Switch", "AINLMUX"},
+ {"ROMIX", "ROMIX Right DAC Switch", "Right DAC"},
+
+ /* SPKMIX */
+ {"SPKMIX", "SPKMIX LIN2 Bypass Switch", "LIN2"},
+ {"SPKMIX", "SPKMIX RIN2 Bypass Switch", "RIN2"},
+ {"SPKMIX", "SPKMIX LADC Bypass Switch", "AINLMUX"},
+ {"SPKMIX", "SPKMIX RADC Bypass Switch", "AINRMUX"},
+ {"SPKMIX", "SPKMIX Left Mixer PGA Switch", "LOPGA"},
+ {"SPKMIX", "SPKMIX Right Mixer PGA Switch", "ROPGA"},
+ {"SPKMIX", "SPKMIX Right DAC Switch", "Right DAC"},
+ {"SPKMIX", "SPKMIX Left DAC Switch", "Right DAC"},
+
+ /* LONMIX */
+ {"LONMIX", "LONMIX Left Mixer PGA Switch", "LOPGA"},
+ {"LONMIX", "LONMIX Right Mixer PGA Switch", "ROPGA"},
+ {"LONMIX", "LONMIX Inverted LOP Switch", "LOPMIX"},
+
+ /* LOPMIX */
+ {"LOPMIX", "LOPMIX Right Mic Bypass Switch", "RIN12 PGA"},
+ {"LOPMIX", "LOPMIX Left Mic Bypass Switch", "LIN12 PGA"},
+ {"LOPMIX", "LOPMIX Left Mixer PGA Switch", "LOPGA"},
+
+ /* OUT3MIX */
+ {"OUT3MIX", "OUT3MIX LIN4RXN Bypass Switch", "LIN4RXN"},
+ {"OUT3MIX", "OUT3MIX Left Out PGA Switch", "LOPGA"},
+
+ /* OUT4MIX */
+ {"OUT4MIX", "OUT4MIX Right Out PGA Switch", "ROPGA"},
+ {"OUT4MIX", "OUT4MIX RIN4RXP Bypass Switch", "RIN4RXP"},
+
+ /* RONMIX */
+ {"RONMIX", "RONMIX Right Mixer PGA Switch", "ROPGA"},
+ {"RONMIX", "RONMIX Left Mixer PGA Switch", "LOPGA"},
+ {"RONMIX", "RONMIX Inverted ROP Switch", "ROPMIX"},
+
+ /* ROPMIX */
+ {"ROPMIX", "ROPMIX Left Mic Bypass Switch", "LIN12 PGA"},
+ {"ROPMIX", "ROPMIX Right Mic Bypass Switch", "RIN12 PGA"},
+ {"ROPMIX", "ROPMIX Right Mixer PGA Switch", "ROPGA"},
+
+ /* Out Mixer PGAs */
+ {"LOPGA", NULL, "LOMIX"},
+ {"ROPGA", NULL, "ROMIX"},
+
+ {"LOUT PGA", NULL, "LOMIX"},
+ {"ROUT PGA", NULL, "ROMIX"},
+
+ /* Output Pins */
+ {"LON", NULL, "LONMIX"},
+ {"LOP", NULL, "LOPMIX"},
+ {"OUT", NULL, "OUT3MIX"},
+ {"LOUT", NULL, "LOUT PGA"},
+ {"SPKN", NULL, "SPKMIX"},
+ {"ROUT", NULL, "ROUT PGA"},
+ {"OUT4", NULL, "OUT4MIX"},
+ {"ROP", NULL, "ROPMIX"},
+ {"RON", NULL, "RONMIX"},
+};
+
+/* PLL divisors */
+struct _pll_div {
+ u32 div2;
+ u32 n;
+ u32 k;
+};
+
+/* The size in bits of the pll divide multiplied by 10
+ * to allow rounding later */
+#define FIXED_PLL_SIZE ((1 << 16) * 10)
+
+static void pll_factors(struct _pll_div *pll_div, unsigned int target,
+ unsigned int source)
+{
+ u64 Kpart;
+ unsigned int K, Ndiv, Nmod;
+
+
+ Ndiv = target / source;
+ if (Ndiv < 6) {
+ source >>= 1;
+ pll_div->div2 = 1;
+ Ndiv = target / source;
+ } else
+ pll_div->div2 = 0;
+
+ if ((Ndiv < 6) || (Ndiv > 12))
+ printk(KERN_WARNING
+ "WM8991 N value outwith recommended range! N = %d\n", Ndiv);
+
+ pll_div->n = Ndiv;
+ Nmod = target % source;
+ Kpart = FIXED_PLL_SIZE * (long long)Nmod;
+
+ do_div(Kpart, source);
+
+ K = Kpart & 0xFFFFFFFF;
+
+ /* Check if we need to round */
+ if ((K % 10) >= 5)
+ K += 5;
+
+ /* Move down to proper range now rounding is done */
+ K /= 10;
+
+ pll_div->k = K;
+}
+
+static int wm8991_set_dai_pll(struct snd_soc_dai *codec_dai,
+ int pll_id, int src, unsigned int freq_in, unsigned int freq_out)
+{
+ u16 reg;
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct _pll_div pll_div;
+
+ if (freq_in && freq_out) {
+ pll_factors(&pll_div, freq_out * 4, freq_in);
+
+ /* Turn on PLL */
+ reg = snd_soc_read(codec, WM8991_POWER_MANAGEMENT_2);
+ reg |= WM8991_PLL_ENA;
+ snd_soc_write(codec, WM8991_POWER_MANAGEMENT_2, reg);
+
+ /* sysclk comes from PLL */
+ reg = snd_soc_read(codec, WM8991_CLOCKING_2);
+ snd_soc_write(codec, WM8991_CLOCKING_2, reg | WM8991_SYSCLK_SRC);
+
+ /* set up N , fractional mode and pre-divisor if neccessary */
+ snd_soc_write(codec, WM8991_PLL1, pll_div.n | WM8991_SDM |
+ (pll_div.div2 ? WM8991_PRESCALE : 0));
+ snd_soc_write(codec, WM8991_PLL2, (u8)(pll_div.k>>8));
+ snd_soc_write(codec, WM8991_PLL3, (u8)(pll_div.k & 0xFF));
+ } else {
+ /* Turn on PLL */
+ reg = snd_soc_read(codec, WM8991_POWER_MANAGEMENT_2);
+ reg &= ~WM8991_PLL_ENA;
+ snd_soc_write(codec, WM8991_POWER_MANAGEMENT_2, reg);
+ }
+ return 0;
+}
+
+/*
+ * Set's ADC and Voice DAC format.
+ */
+static int wm8991_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ u16 audio1, audio3;
+
+ audio1 = snd_soc_read(codec, WM8991_AUDIO_INTERFACE_1);
+ audio3 = snd_soc_read(codec, WM8991_AUDIO_INTERFACE_3);
+
+ /* set master/slave audio interface */
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ audio3 &= ~WM8991_AIF_MSTR1;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ audio3 |= WM8991_AIF_MSTR1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ audio1 &= ~WM8991_AIF_FMT_MASK;
+
+ /* interface format */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ audio1 |= WM8991_AIF_TMF_I2S;
+ audio1 &= ~WM8991_AIF_LRCLK_INV;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ audio1 |= WM8991_AIF_TMF_RIGHTJ;
+ audio1 &= ~WM8991_AIF_LRCLK_INV;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ audio1 |= WM8991_AIF_TMF_LEFTJ;
+ audio1 &= ~WM8991_AIF_LRCLK_INV;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ audio1 |= WM8991_AIF_TMF_DSP;
+ audio1 &= ~WM8991_AIF_LRCLK_INV;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ audio1 |= WM8991_AIF_TMF_DSP | WM8991_AIF_LRCLK_INV;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_write(codec, WM8991_AUDIO_INTERFACE_1, audio1);
+ snd_soc_write(codec, WM8991_AUDIO_INTERFACE_3, audio3);
+ return 0;
+}
+
+static int wm8991_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
+ int div_id, int div)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ u16 reg;
+
+ switch (div_id) {
+ case WM8991_MCLK_DIV:
+ reg = snd_soc_read(codec, WM8991_CLOCKING_2) &
+ ~WM8991_MCLK_DIV_MASK;
+ snd_soc_write(codec, WM8991_CLOCKING_2, reg | div);
+ break;
+ case WM8991_DACCLK_DIV:
+ reg = snd_soc_read(codec, WM8991_CLOCKING_2) &
+ ~WM8991_DAC_CLKDIV_MASK;
+ snd_soc_write(codec, WM8991_CLOCKING_2, reg | div);
+ break;
+ case WM8991_ADCCLK_DIV:
+ reg = snd_soc_read(codec, WM8991_CLOCKING_2) &
+ ~WM8991_ADC_CLKDIV_MASK;
+ snd_soc_write(codec, WM8991_CLOCKING_2, reg | div);
+ break;
+ case WM8991_BCLK_DIV:
+ reg = snd_soc_read(codec, WM8991_CLOCKING_1) &
+ ~WM8991_BCLK_DIV_MASK;
+ snd_soc_write(codec, WM8991_CLOCKING_1, reg | div);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Set PCM DAI bit size and sample rate.
+ */
+static int wm8991_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ u16 audio1 = snd_soc_read(codec, WM8991_AUDIO_INTERFACE_1);
+
+ audio1 &= ~WM8991_AIF_WL_MASK;
+ /* bit size */
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ audio1 |= WM8991_AIF_WL_20BITS;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ audio1 |= WM8991_AIF_WL_24BITS;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ audio1 |= WM8991_AIF_WL_32BITS;
+ break;
+ }
+
+ snd_soc_write(codec, WM8991_AUDIO_INTERFACE_1, audio1);
+ return 0;
+}
+
+static int wm8991_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ u16 val;
+
+ val = snd_soc_read(codec, WM8991_DAC_CTRL) & ~WM8991_DAC_MUTE;
+ if (mute)
+ snd_soc_write(codec, WM8991_DAC_CTRL, val | WM8991_DAC_MUTE);
+ else
+ snd_soc_write(codec, WM8991_DAC_CTRL, val);
+ return 0;
+}
+
+static int wm8991_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ u16 val;
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ break;
+
+ case SND_SOC_BIAS_PREPARE:
+ /* VMID=2*50k */
+ val = snd_soc_read(codec, WM8991_POWER_MANAGEMENT_1) &
+ ~WM8991_VMID_MODE_MASK;
+ snd_soc_write(codec, WM8991_POWER_MANAGEMENT_1, val | 0x2);
+ break;
+
+ case SND_SOC_BIAS_STANDBY:
+ if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
+ snd_soc_cache_sync(codec);
+ /* Enable all output discharge bits */
+ snd_soc_write(codec, WM8991_ANTIPOP1, WM8991_DIS_LLINE |
+ WM8991_DIS_RLINE | WM8991_DIS_OUT3 |
+ WM8991_DIS_OUT4 | WM8991_DIS_LOUT |
+ WM8991_DIS_ROUT);
+
+ /* Enable POBCTRL, SOFT_ST, VMIDTOG and BUFDCOPEN */
+ snd_soc_write(codec, WM8991_ANTIPOP2, WM8991_SOFTST |
+ WM8991_BUFDCOPEN | WM8991_POBCTRL |
+ WM8991_VMIDTOG);
+
+ /* Delay to allow output caps to discharge */
+ msleep(300);
+
+ /* Disable VMIDTOG */
+ snd_soc_write(codec, WM8991_ANTIPOP2, WM8991_SOFTST |
+ WM8991_BUFDCOPEN | WM8991_POBCTRL);
+
+ /* disable all output discharge bits */
+ snd_soc_write(codec, WM8991_ANTIPOP1, 0);
+
+ /* Enable outputs */
+ snd_soc_write(codec, WM8991_POWER_MANAGEMENT_1, 0x1b00);
+
+ msleep(50);
+
+ /* Enable VMID at 2x50k */
+ snd_soc_write(codec, WM8991_POWER_MANAGEMENT_1, 0x1f02);
+
+ msleep(100);
+
+ /* Enable VREF */
+ snd_soc_write(codec, WM8991_POWER_MANAGEMENT_1, 0x1f03);
+
+ msleep(600);
+
+ /* Enable BUFIOEN */
+ snd_soc_write(codec, WM8991_ANTIPOP2, WM8991_SOFTST |
+ WM8991_BUFDCOPEN | WM8991_POBCTRL |
+ WM8991_BUFIOEN);
+
+ /* Disable outputs */
+ snd_soc_write(codec, WM8991_POWER_MANAGEMENT_1, 0x3);
+
+ /* disable POBCTRL, SOFT_ST and BUFDCOPEN */
+ snd_soc_write(codec, WM8991_ANTIPOP2, WM8991_BUFIOEN);
+ }
+
+ /* VMID=2*250k */
+ val = snd_soc_read(codec, WM8991_POWER_MANAGEMENT_1) &
+ ~WM8991_VMID_MODE_MASK;
+ snd_soc_write(codec, WM8991_POWER_MANAGEMENT_1, val | 0x4);
+ break;
+
+ case SND_SOC_BIAS_OFF:
+ /* Enable POBCTRL and SOFT_ST */
+ snd_soc_write(codec, WM8991_ANTIPOP2, WM8991_SOFTST |
+ WM8991_POBCTRL | WM8991_BUFIOEN);
+
+ /* Enable POBCTRL, SOFT_ST and BUFDCOPEN */
+ snd_soc_write(codec, WM8991_ANTIPOP2, WM8991_SOFTST |
+ WM8991_BUFDCOPEN | WM8991_POBCTRL |
+ WM8991_BUFIOEN);
+
+ /* mute DAC */
+ val = snd_soc_read(codec, WM8991_DAC_CTRL);
+ snd_soc_write(codec, WM8991_DAC_CTRL, val | WM8991_DAC_MUTE);
+
+ /* Enable any disabled outputs */
+ snd_soc_write(codec, WM8991_POWER_MANAGEMENT_1, 0x1f03);
+
+ /* Disable VMID */
+ snd_soc_write(codec, WM8991_POWER_MANAGEMENT_1, 0x1f01);
+
+ msleep(300);
+
+ /* Enable all output discharge bits */
+ snd_soc_write(codec, WM8991_ANTIPOP1, WM8991_DIS_LLINE |
+ WM8991_DIS_RLINE | WM8991_DIS_OUT3 |
+ WM8991_DIS_OUT4 | WM8991_DIS_LOUT |
+ WM8991_DIS_ROUT);
+
+ /* Disable VREF */
+ snd_soc_write(codec, WM8991_POWER_MANAGEMENT_1, 0x0);
+
+ /* disable POBCTRL, SOFT_ST and BUFDCOPEN */
+ snd_soc_write(codec, WM8991_ANTIPOP2, 0x0);
+ codec->cache_sync = 1;
+ break;
+ }
+
+ codec->dapm.bias_level = level;
+ return 0;
+}
+
+static int wm8991_suspend(struct snd_soc_codec *codec, pm_message_t state)
+{
+ wm8991_set_bias_level(codec, SND_SOC_BIAS_OFF);
+ return 0;
+}
+
+static int wm8991_resume(struct snd_soc_codec *codec)
+{
+ wm8991_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+ return 0;
+}
+
+/* power down chip */
+static int wm8991_remove(struct snd_soc_codec *codec)
+{
+ wm8991_set_bias_level(codec, SND_SOC_BIAS_OFF);
+ return 0;
+}
+
+static int wm8991_probe(struct snd_soc_codec *codec)
+{
+ struct wm8991_priv *wm8991;
+ int ret;
+ unsigned int reg;
+
+ wm8991 = snd_soc_codec_get_drvdata(codec);
+
+ ret = snd_soc_codec_set_cache_io(codec, 8, 16, wm8991->control_type);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set cache i/o: %d\n", ret);
+ return ret;
+ }
+
+ ret = wm8991_reset(codec);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to issue reset\n");
+ return ret;
+ }
+
+ wm8991_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+ reg = snd_soc_read(codec, WM8991_AUDIO_INTERFACE_4);
+ snd_soc_write(codec, WM8991_AUDIO_INTERFACE_4, reg | WM8991_ALRCGPIO1);
+
+ reg = snd_soc_read(codec, WM8991_GPIO1_GPIO2) &
+ ~WM8991_GPIO1_SEL_MASK;
+ snd_soc_write(codec, WM8991_GPIO1_GPIO2, reg | 1);
+
+ reg = snd_soc_read(codec, WM8991_POWER_MANAGEMENT_1);
+ snd_soc_write(codec, WM8991_POWER_MANAGEMENT_1, reg | WM8991_VREF_ENA|
+ WM8991_VMID_MODE_MASK);
+
+ reg = snd_soc_read(codec, WM8991_POWER_MANAGEMENT_2);
+ snd_soc_write(codec, WM8991_POWER_MANAGEMENT_2, reg | WM8991_OPCLK_ENA);
+
+ snd_soc_write(codec, WM8991_DAC_CTRL, 0);
+ snd_soc_write(codec, WM8991_LEFT_OUTPUT_VOLUME, 0x50 | (1<<8));
+ snd_soc_write(codec, WM8991_RIGHT_OUTPUT_VOLUME, 0x50 | (1<<8));
+
+ snd_soc_add_controls(codec, wm8991_snd_controls,
+ ARRAY_SIZE(wm8991_snd_controls));
+
+ snd_soc_dapm_new_controls(&codec->dapm, wm8991_dapm_widgets,
+ ARRAY_SIZE(wm8991_dapm_widgets));
+ snd_soc_dapm_add_routes(&codec->dapm, audio_map,
+ ARRAY_SIZE(audio_map));
+ return 0;
+}
+
+#define WM8991_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
+ SNDRV_PCM_FMTBIT_S24_LE)
+
+static struct snd_soc_dai_ops wm8991_ops = {
+ .hw_params = wm8991_hw_params,
+ .digital_mute = wm8991_mute,
+ .set_fmt = wm8991_set_dai_fmt,
+ .set_clkdiv = wm8991_set_dai_clkdiv,
+ .set_pll = wm8991_set_dai_pll
+};
+
+/*
+ * The WM8991 supports 2 different and mutually exclusive DAI
+ * configurations.
+ *
+ * 1. ADC/DAC on Primary Interface
+ * 2. ADC on Primary Interface/DAC on secondary
+ */
+static struct snd_soc_dai_driver wm8991_dai = {
+ /* ADC/DAC on primary */
+ .name = "wm8991",
+ .id = 1,
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = WM8991_FORMATS
+ },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = WM8991_FORMATS
+ },
+ .ops = &wm8991_ops
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_wm8991 = {
+ .probe = wm8991_probe,
+ .remove = wm8991_remove,
+ .suspend = wm8991_suspend,
+ .resume = wm8991_resume,
+ .set_bias_level = wm8991_set_bias_level,
+ .reg_cache_size = WM8991_MAX_REGISTER + 1,
+ .reg_word_size = sizeof(u16),
+ .reg_cache_default = wm8991_reg_defs
+};
+
+static __devinit int wm8991_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct wm8991_priv *wm8991;
+ int ret;
+
+ wm8991 = kzalloc(sizeof *wm8991, GFP_KERNEL);
+ if (!wm8991)
+ return -ENOMEM;
+
+ wm8991->control_type = SND_SOC_I2C;
+ i2c_set_clientdata(i2c, wm8991);
+
+ ret = snd_soc_register_codec(&i2c->dev,
+ &soc_codec_dev_wm8991, &wm8991_dai, 1);
+ if (ret < 0)
+ kfree(wm8991);
+ return ret;
+}
+
+static __devexit int wm8991_i2c_remove(struct i2c_client *client)
+{
+ snd_soc_unregister_codec(&client->dev);
+ kfree(i2c_get_clientdata(client));
+ return 0;
+}
+
+static const struct i2c_device_id wm8991_i2c_id[] = {
+ { "wm8991", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, wm8991_i2c_id);
+
+static struct i2c_driver wm8991_i2c_driver = {
+ .driver = {
+ .name = "wm8991",
+ .owner = THIS_MODULE,
+ },
+ .probe = wm8991_i2c_probe,
+ .remove = __devexit_p(wm8991_i2c_remove),
+ .id_table = wm8991_i2c_id,
+};
+
+static int __init wm8991_modinit(void)
+{
+ int ret;
+ ret = i2c_add_driver(&wm8991_i2c_driver);
+ if (ret != 0) {
+ printk(KERN_ERR "Failed to register WM8991 I2C driver: %d\n",
+ ret);
+ }
+ return 0;
+}
+module_init(wm8991_modinit);
+
+static void __exit wm8991_exit(void)
+{
+ i2c_del_driver(&wm8991_i2c_driver);
+}
+module_exit(wm8991_exit);
+
+MODULE_DESCRIPTION("ASoC WM8991 driver");
+MODULE_AUTHOR("Graeme Gregory");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8991.h b/sound/soc/codecs/wm8991.h
new file mode 100644
index 000000000000..8a942efd18a5
--- /dev/null
+++ b/sound/soc/codecs/wm8991.h
@@ -0,0 +1,833 @@
+/*
+ * wm8991.h -- audio driver for WM8991
+ *
+ * Copyright 2007 Wolfson Microelectronics PLC.
+ * Author: Graeme Gregory
+ * graeme.gregory@wolfsonmicro.com or linux@wolfsonmicro.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _WM8991_H
+#define _WM8991_H
+
+/*
+ * Register values.
+ */
+#define WM8991_RESET 0x00
+#define WM8991_POWER_MANAGEMENT_1 0x01
+#define WM8991_POWER_MANAGEMENT_2 0x02
+#define WM8991_POWER_MANAGEMENT_3 0x03
+#define WM8991_AUDIO_INTERFACE_1 0x04
+#define WM8991_AUDIO_INTERFACE_2 0x05
+#define WM8991_CLOCKING_1 0x06
+#define WM8991_CLOCKING_2 0x07
+#define WM8991_AUDIO_INTERFACE_3 0x08
+#define WM8991_AUDIO_INTERFACE_4 0x09
+#define WM8991_DAC_CTRL 0x0A
+#define WM8991_LEFT_DAC_DIGITAL_VOLUME 0x0B
+#define WM8991_RIGHT_DAC_DIGITAL_VOLUME 0x0C
+#define WM8991_DIGITAL_SIDE_TONE 0x0D
+#define WM8991_ADC_CTRL 0x0E
+#define WM8991_LEFT_ADC_DIGITAL_VOLUME 0x0F
+#define WM8991_RIGHT_ADC_DIGITAL_VOLUME 0x10
+#define WM8991_GPIO_CTRL_1 0x12
+#define WM8991_GPIO1_GPIO2 0x13
+#define WM8991_GPIO3_GPIO4 0x14
+#define WM8991_GPIO5_GPIO6 0x15
+#define WM8991_GPIOCTRL_2 0x16
+#define WM8991_GPIO_POL 0x17
+#define WM8991_LEFT_LINE_INPUT_1_2_VOLUME 0x18
+#define WM8991_LEFT_LINE_INPUT_3_4_VOLUME 0x19
+#define WM8991_RIGHT_LINE_INPUT_1_2_VOLUME 0x1A
+#define WM8991_RIGHT_LINE_INPUT_3_4_VOLUME 0x1B
+#define WM8991_LEFT_OUTPUT_VOLUME 0x1C
+#define WM8991_RIGHT_OUTPUT_VOLUME 0x1D
+#define WM8991_LINE_OUTPUTS_VOLUME 0x1E
+#define WM8991_OUT3_4_VOLUME 0x1F
+#define WM8991_LEFT_OPGA_VOLUME 0x20
+#define WM8991_RIGHT_OPGA_VOLUME 0x21
+#define WM8991_SPEAKER_VOLUME 0x22
+#define WM8991_CLASSD1 0x23
+#define WM8991_CLASSD3 0x25
+#define WM8991_INPUT_MIXER1 0x27
+#define WM8991_INPUT_MIXER2 0x28
+#define WM8991_INPUT_MIXER3 0x29
+#define WM8991_INPUT_MIXER4 0x2A
+#define WM8991_INPUT_MIXER5 0x2B
+#define WM8991_INPUT_MIXER6 0x2C
+#define WM8991_OUTPUT_MIXER1 0x2D
+#define WM8991_OUTPUT_MIXER2 0x2E
+#define WM8991_OUTPUT_MIXER3 0x2F
+#define WM8991_OUTPUT_MIXER4 0x30
+#define WM8991_OUTPUT_MIXER5 0x31
+#define WM8991_OUTPUT_MIXER6 0x32
+#define WM8991_OUT3_4_MIXER 0x33
+#define WM8991_LINE_MIXER1 0x34
+#define WM8991_LINE_MIXER2 0x35
+#define WM8991_SPEAKER_MIXER 0x36
+#define WM8991_ADDITIONAL_CONTROL 0x37
+#define WM8991_ANTIPOP1 0x38
+#define WM8991_ANTIPOP2 0x39
+#define WM8991_MICBIAS 0x3A
+#define WM8991_PLL1 0x3C
+#define WM8991_PLL2 0x3D
+#define WM8991_PLL3 0x3E
+#define WM8991_INTDRIVBITS 0x3F
+
+#define WM8991_REGISTER_COUNT 60
+#define WM8991_MAX_REGISTER 0x3F
+
+/*
+ * Field Definitions.
+ */
+
+/*
+ * R0 (0x00) - Reset
+ */
+#define WM8991_SW_RESET_CHIP_ID_MASK 0xFFFF /* SW_RESET_CHIP_ID - [15:0] */
+
+/*
+ * R1 (0x01) - Power Management (1)
+ */
+#define WM8991_SPK_ENA 0x1000 /* SPK_ENA */
+#define WM8991_SPK_ENA_BIT 12
+#define WM8991_OUT3_ENA 0x0800 /* OUT3_ENA */
+#define WM8991_OUT3_ENA_BIT 11
+#define WM8991_OUT4_ENA 0x0400 /* OUT4_ENA */
+#define WM8991_OUT4_ENA_BIT 10
+#define WM8991_LOUT_ENA 0x0200 /* LOUT_ENA */
+#define WM8991_LOUT_ENA_BIT 9
+#define WM8991_ROUT_ENA 0x0100 /* ROUT_ENA */
+#define WM8991_ROUT_ENA_BIT 8
+#define WM8991_MICBIAS_ENA 0x0010 /* MICBIAS_ENA */
+#define WM8991_MICBIAS_ENA_BIT 4
+#define WM8991_VMID_MODE_MASK 0x0006 /* VMID_MODE - [2:1] */
+#define WM8991_VREF_ENA 0x0001 /* VREF_ENA */
+#define WM8991_VREF_ENA_BIT 0
+
+/*
+ * R2 (0x02) - Power Management (2)
+ */
+#define WM8991_PLL_ENA 0x8000 /* PLL_ENA */
+#define WM8991_PLL_ENA_BIT 15
+#define WM8991_TSHUT_ENA 0x4000 /* TSHUT_ENA */
+#define WM8991_TSHUT_ENA_BIT 14
+#define WM8991_TSHUT_OPDIS 0x2000 /* TSHUT_OPDIS */
+#define WM8991_TSHUT_OPDIS_BIT 13
+#define WM8991_OPCLK_ENA 0x0800 /* OPCLK_ENA */
+#define WM8991_OPCLK_ENA_BIT 11
+#define WM8991_AINL_ENA 0x0200 /* AINL_ENA */
+#define WM8991_AINL_ENA_BIT 9
+#define WM8991_AINR_ENA 0x0100 /* AINR_ENA */
+#define WM8991_AINR_ENA_BIT 8
+#define WM8991_LIN34_ENA 0x0080 /* LIN34_ENA */
+#define WM8991_LIN34_ENA_BIT 7
+#define WM8991_LIN12_ENA 0x0040 /* LIN12_ENA */
+#define WM8991_LIN12_ENA_BIT 6
+#define WM8991_RIN34_ENA 0x0020 /* RIN34_ENA */
+#define WM8991_RIN34_ENA_BIT 5
+#define WM8991_RIN12_ENA 0x0010 /* RIN12_ENA */
+#define WM8991_RIN12_ENA_BIT 4
+#define WM8991_ADCL_ENA 0x0002 /* ADCL_ENA */
+#define WM8991_ADCL_ENA_BIT 1
+#define WM8991_ADCR_ENA 0x0001 /* ADCR_ENA */
+#define WM8991_ADCR_ENA_BIT 0
+
+/*
+ * R3 (0x03) - Power Management (3)
+ */
+#define WM8991_LON_ENA 0x2000 /* LON_ENA */
+#define WM8991_LON_ENA_BIT 13
+#define WM8991_LOP_ENA 0x1000 /* LOP_ENA */
+#define WM8991_LOP_ENA_BIT 12
+#define WM8991_RON_ENA 0x0800 /* RON_ENA */
+#define WM8991_RON_ENA_BIT 11
+#define WM8991_ROP_ENA 0x0400 /* ROP_ENA */
+#define WM8991_ROP_ENA_BIT 10
+#define WM8991_LOPGA_ENA 0x0080 /* LOPGA_ENA */
+#define WM8991_LOPGA_ENA_BIT 7
+#define WM8991_ROPGA_ENA 0x0040 /* ROPGA_ENA */
+#define WM8991_ROPGA_ENA_BIT 6
+#define WM8991_LOMIX_ENA 0x0020 /* LOMIX_ENA */
+#define WM8991_LOMIX_ENA_BIT 5
+#define WM8991_ROMIX_ENA 0x0010 /* ROMIX_ENA */
+#define WM8991_ROMIX_ENA_BIT 4
+#define WM8991_DACL_ENA 0x0002 /* DACL_ENA */
+#define WM8991_DACL_ENA_BIT 1
+#define WM8991_DACR_ENA 0x0001 /* DACR_ENA */
+#define WM8991_DACR_ENA_BIT 0
+
+/*
+ * R4 (0x04) - Audio Interface (1)
+ */
+#define WM8991_AIFADCL_SRC 0x8000 /* AIFADCL_SRC */
+#define WM8991_AIFADCR_SRC 0x4000 /* AIFADCR_SRC */
+#define WM8991_AIFADC_TDM 0x2000 /* AIFADC_TDM */
+#define WM8991_AIFADC_TDM_CHAN 0x1000 /* AIFADC_TDM_CHAN */
+#define WM8991_AIF_BCLK_INV 0x0100 /* AIF_BCLK_INV */
+#define WM8991_AIF_LRCLK_INV 0x0080 /* AIF_LRCLK_INV */
+#define WM8991_AIF_WL_MASK 0x0060 /* AIF_WL - [6:5] */
+#define WM8991_AIF_WL_16BITS (0 << 5)
+#define WM8991_AIF_WL_20BITS (1 << 5)
+#define WM8991_AIF_WL_24BITS (2 << 5)
+#define WM8991_AIF_WL_32BITS (3 << 5)
+#define WM8991_AIF_FMT_MASK 0x0018 /* AIF_FMT - [4:3] */
+#define WM8991_AIF_TMF_RIGHTJ (0 << 3)
+#define WM8991_AIF_TMF_LEFTJ (1 << 3)
+#define WM8991_AIF_TMF_I2S (2 << 3)
+#define WM8991_AIF_TMF_DSP (3 << 3)
+
+/*
+ * R5 (0x05) - Audio Interface (2)
+ */
+#define WM8991_DACL_SRC 0x8000 /* DACL_SRC */
+#define WM8991_DACR_SRC 0x4000 /* DACR_SRC */
+#define WM8991_AIFDAC_TDM 0x2000 /* AIFDAC_TDM */
+#define WM8991_AIFDAC_TDM_CHAN 0x1000 /* AIFDAC_TDM_CHAN */
+#define WM8991_DAC_BOOST_MASK 0x0C00 /* DAC_BOOST - [11:10] */
+#define WM8991_DAC_COMP 0x0010 /* DAC_COMP */
+#define WM8991_DAC_COMPMODE 0x0008 /* DAC_COMPMODE */
+#define WM8991_ADC_COMP 0x0004 /* ADC_COMP */
+#define WM8991_ADC_COMPMODE 0x0002 /* ADC_COMPMODE */
+#define WM8991_LOOPBACK 0x0001 /* LOOPBACK */
+
+/*
+ * R6 (0x06) - Clocking (1)
+ */
+#define WM8991_TOCLK_RATE 0x8000 /* TOCLK_RATE */
+#define WM8991_TOCLK_ENA 0x4000 /* TOCLK_ENA */
+#define WM8991_OPCLKDIV_MASK 0x1E00 /* OPCLKDIV - [12:9] */
+#define WM8991_DCLKDIV_MASK 0x01C0 /* DCLKDIV - [8:6] */
+#define WM8991_BCLK_DIV_MASK 0x001E /* BCLK_DIV - [4:1] */
+#define WM8991_BCLK_DIV_1 (0x0 << 1)
+#define WM8991_BCLK_DIV_1_5 (0x1 << 1)
+#define WM8991_BCLK_DIV_2 (0x2 << 1)
+#define WM8991_BCLK_DIV_3 (0x3 << 1)
+#define WM8991_BCLK_DIV_4 (0x4 << 1)
+#define WM8991_BCLK_DIV_5_5 (0x5 << 1)
+#define WM8991_BCLK_DIV_6 (0x6 << 1)
+#define WM8991_BCLK_DIV_8 (0x7 << 1)
+#define WM8991_BCLK_DIV_11 (0x8 << 1)
+#define WM8991_BCLK_DIV_12 (0x9 << 1)
+#define WM8991_BCLK_DIV_16 (0xA << 1)
+#define WM8991_BCLK_DIV_22 (0xB << 1)
+#define WM8991_BCLK_DIV_24 (0xC << 1)
+#define WM8991_BCLK_DIV_32 (0xD << 1)
+#define WM8991_BCLK_DIV_44 (0xE << 1)
+#define WM8991_BCLK_DIV_48 (0xF << 1)
+
+/*
+ * R7 (0x07) - Clocking (2)
+ */
+#define WM8991_MCLK_SRC 0x8000 /* MCLK_SRC */
+#define WM8991_SYSCLK_SRC 0x4000 /* SYSCLK_SRC */
+#define WM8991_CLK_FORCE 0x2000 /* CLK_FORCE */
+#define WM8991_MCLK_DIV_MASK 0x1800 /* MCLK_DIV - [12:11] */
+#define WM8991_MCLK_DIV_1 (0 << 11)
+#define WM8991_MCLK_DIV_2 ( 2 << 11)
+#define WM8991_MCLK_INV 0x0400 /* MCLK_INV */
+#define WM8991_ADC_CLKDIV_MASK 0x00E0 /* ADC_CLKDIV - [7:5] */
+#define WM8991_ADC_CLKDIV_1 (0 << 5)
+#define WM8991_ADC_CLKDIV_1_5 (1 << 5)
+#define WM8991_ADC_CLKDIV_2 (2 << 5)
+#define WM8991_ADC_CLKDIV_3 (3 << 5)
+#define WM8991_ADC_CLKDIV_4 (4 << 5)
+#define WM8991_ADC_CLKDIV_5_5 (5 << 5)
+#define WM8991_ADC_CLKDIV_6 (6 << 5)
+#define WM8991_DAC_CLKDIV_MASK 0x001C /* DAC_CLKDIV - [4:2] */
+#define WM8991_DAC_CLKDIV_1 (0 << 2)
+#define WM8991_DAC_CLKDIV_1_5 (1 << 2)
+#define WM8991_DAC_CLKDIV_2 (2 << 2)
+#define WM8991_DAC_CLKDIV_3 (3 << 2)
+#define WM8991_DAC_CLKDIV_4 (4 << 2)
+#define WM8991_DAC_CLKDIV_5_5 (5 << 2)
+#define WM8991_DAC_CLKDIV_6 (6 << 2)
+
+/*
+ * R8 (0x08) - Audio Interface (3)
+ */
+#define WM8991_AIF_MSTR1 0x8000 /* AIF_MSTR1 */
+#define WM8991_AIF_MSTR2 0x4000 /* AIF_MSTR2 */
+#define WM8991_AIF_SEL 0x2000 /* AIF_SEL */
+#define WM8991_ADCLRC_DIR 0x0800 /* ADCLRC_DIR */
+#define WM8991_ADCLRC_RATE_MASK 0x07FF /* ADCLRC_RATE - [10:0] */
+
+/*
+ * R9 (0x09) - Audio Interface (4)
+ */
+#define WM8991_ALRCGPIO1 0x8000 /* ALRCGPIO1 */
+#define WM8991_ALRCBGPIO6 0x4000 /* ALRCBGPIO6 */
+#define WM8991_AIF_TRIS 0x2000 /* AIF_TRIS */
+#define WM8991_DACLRC_DIR 0x0800 /* DACLRC_DIR */
+#define WM8991_DACLRC_RATE_MASK 0x07FF /* DACLRC_RATE - [10:0] */
+
+/*
+ * R10 (0x0A) - DAC CTRL
+ */
+#define WM8991_AIF_LRCLKRATE 0x0400 /* AIF_LRCLKRATE */
+#define WM8991_DAC_MONO 0x0200 /* DAC_MONO */
+#define WM8991_DAC_SB_FILT 0x0100 /* DAC_SB_FILT */
+#define WM8991_DAC_MUTERATE 0x0080 /* DAC_MUTERATE */
+#define WM8991_DAC_MUTEMODE 0x0040 /* DAC_MUTEMODE */
+#define WM8991_DEEMP_MASK 0x0030 /* DEEMP - [5:4] */
+#define WM8991_DAC_MUTE 0x0004 /* DAC_MUTE */
+#define WM8991_DACL_DATINV 0x0002 /* DACL_DATINV */
+#define WM8991_DACR_DATINV 0x0001 /* DACR_DATINV */
+
+/*
+ * R11 (0x0B) - Left DAC Digital Volume
+ */
+#define WM8991_DAC_VU 0x0100 /* DAC_VU */
+#define WM8991_DACL_VOL_MASK 0x00FF /* DACL_VOL - [7:0] */
+#define WM8991_DACL_VOL_SHIFT 0
+/*
+ * R12 (0x0C) - Right DAC Digital Volume
+ */
+#define WM8991_DAC_VU 0x0100 /* DAC_VU */
+#define WM8991_DACR_VOL_MASK 0x00FF /* DACR_VOL - [7:0] */
+#define WM8991_DACR_VOL_SHIFT 0
+/*
+ * R13 (0x0D) - Digital Side Tone
+ */
+#define WM8991_ADCL_DAC_SVOL_MASK 0x0F /* ADCL_DAC_SVOL - [12:9] */
+#define WM8991_ADCL_DAC_SVOL_SHIFT 9
+#define WM8991_ADCR_DAC_SVOL_MASK 0x0F /* ADCR_DAC_SVOL - [8:5] */
+#define WM8991_ADCR_DAC_SVOL_SHIFT 5
+#define WM8991_ADC_TO_DACL_MASK 0x03 /* ADC_TO_DACL - [3:2] */
+#define WM8991_ADC_TO_DACL_SHIFT 2
+#define WM8991_ADC_TO_DACR_MASK 0x03 /* ADC_TO_DACR - [1:0] */
+#define WM8991_ADC_TO_DACR_SHIFT 0
+
+/*
+ * R14 (0x0E) - ADC CTRL
+ */
+#define WM8991_ADC_HPF_ENA 0x0100 /* ADC_HPF_ENA */
+#define WM8991_ADC_HPF_ENA_BIT 8
+#define WM8991_ADC_HPF_CUT_MASK 0x03 /* ADC_HPF_CUT - [6:5] */
+#define WM8991_ADC_HPF_CUT_SHIFT 5
+#define WM8991_ADCL_DATINV 0x0002 /* ADCL_DATINV */
+#define WM8991_ADCL_DATINV_BIT 1
+#define WM8991_ADCR_DATINV 0x0001 /* ADCR_DATINV */
+#define WM8991_ADCR_DATINV_BIT 0
+
+/*
+ * R15 (0x0F) - Left ADC Digital Volume
+ */
+#define WM8991_ADC_VU 0x0100 /* ADC_VU */
+#define WM8991_ADCL_VOL_MASK 0x00FF /* ADCL_VOL - [7:0] */
+#define WM8991_ADCL_VOL_SHIFT 0
+
+/*
+ * R16 (0x10) - Right ADC Digital Volume
+ */
+#define WM8991_ADC_VU 0x0100 /* ADC_VU */
+#define WM8991_ADCR_VOL_MASK 0x00FF /* ADCR_VOL - [7:0] */
+#define WM8991_ADCR_VOL_SHIFT 0
+
+/*
+ * R18 (0x12) - GPIO CTRL 1
+ */
+#define WM8991_IRQ 0x1000 /* IRQ */
+#define WM8991_TEMPOK 0x0800 /* TEMPOK */
+#define WM8991_MICSHRT 0x0400 /* MICSHRT */
+#define WM8991_MICDET 0x0200 /* MICDET */
+#define WM8991_PLL_LCK 0x0100 /* PLL_LCK */
+#define WM8991_GPI8_STATUS 0x0080 /* GPI8_STATUS */
+#define WM8991_GPI7_STATUS 0x0040 /* GPI7_STATUS */
+#define WM8991_GPIO6_STATUS 0x0020 /* GPIO6_STATUS */
+#define WM8991_GPIO5_STATUS 0x0010 /* GPIO5_STATUS */
+#define WM8991_GPIO4_STATUS 0x0008 /* GPIO4_STATUS */
+#define WM8991_GPIO3_STATUS 0x0004 /* GPIO3_STATUS */
+#define WM8991_GPIO2_STATUS 0x0002 /* GPIO2_STATUS */
+#define WM8991_GPIO1_STATUS 0x0001 /* GPIO1_STATUS */
+
+/*
+ * R19 (0x13) - GPIO1 & GPIO2
+ */
+#define WM8991_GPIO2_DEB_ENA 0x8000 /* GPIO2_DEB_ENA */
+#define WM8991_GPIO2_IRQ_ENA 0x4000 /* GPIO2_IRQ_ENA */
+#define WM8991_GPIO2_PU 0x2000 /* GPIO2_PU */
+#define WM8991_GPIO2_PD 0x1000 /* GPIO2_PD */
+#define WM8991_GPIO2_SEL_MASK 0x0F00 /* GPIO2_SEL - [11:8] */
+#define WM8991_GPIO1_DEB_ENA 0x0080 /* GPIO1_DEB_ENA */
+#define WM8991_GPIO1_IRQ_ENA 0x0040 /* GPIO1_IRQ_ENA */
+#define WM8991_GPIO1_PU 0x0020 /* GPIO1_PU */
+#define WM8991_GPIO1_PD 0x0010 /* GPIO1_PD */
+#define WM8991_GPIO1_SEL_MASK 0x000F /* GPIO1_SEL - [3:0] */
+
+/*
+ * R20 (0x14) - GPIO3 & GPIO4
+ */
+#define WM8991_GPIO4_DEB_ENA 0x8000 /* GPIO4_DEB_ENA */
+#define WM8991_GPIO4_IRQ_ENA 0x4000 /* GPIO4_IRQ_ENA */
+#define WM8991_GPIO4_PU 0x2000 /* GPIO4_PU */
+#define WM8991_GPIO4_PD 0x1000 /* GPIO4_PD */
+#define WM8991_GPIO4_SEL_MASK 0x0F00 /* GPIO4_SEL - [11:8] */
+#define WM8991_GPIO3_DEB_ENA 0x0080 /* GPIO3_DEB_ENA */
+#define WM8991_GPIO3_IRQ_ENA 0x0040 /* GPIO3_IRQ_ENA */
+#define WM8991_GPIO3_PU 0x0020 /* GPIO3_PU */
+#define WM8991_GPIO3_PD 0x0010 /* GPIO3_PD */
+#define WM8991_GPIO3_SEL_MASK 0x000F /* GPIO3_SEL - [3:0] */
+
+/*
+ * R21 (0x15) - GPIO5 & GPIO6
+ */
+#define WM8991_GPIO6_DEB_ENA 0x8000 /* GPIO6_DEB_ENA */
+#define WM8991_GPIO6_IRQ_ENA 0x4000 /* GPIO6_IRQ_ENA */
+#define WM8991_GPIO6_PU 0x2000 /* GPIO6_PU */
+#define WM8991_GPIO6_PD 0x1000 /* GPIO6_PD */
+#define WM8991_GPIO6_SEL_MASK 0x0F00 /* GPIO6_SEL - [11:8] */
+#define WM8991_GPIO5_DEB_ENA 0x0080 /* GPIO5_DEB_ENA */
+#define WM8991_GPIO5_IRQ_ENA 0x0040 /* GPIO5_IRQ_ENA */
+#define WM8991_GPIO5_PU 0x0020 /* GPIO5_PU */
+#define WM8991_GPIO5_PD 0x0010 /* GPIO5_PD */
+#define WM8991_GPIO5_SEL_MASK 0x000F /* GPIO5_SEL - [3:0] */
+
+/*
+ * R22 (0x16) - GPIOCTRL 2
+ */
+#define WM8991_RD_3W_ENA 0x8000 /* RD_3W_ENA */
+#define WM8991_MODE_3W4W 0x4000 /* MODE_3W4W */
+#define WM8991_TEMPOK_IRQ_ENA 0x0800 /* TEMPOK_IRQ_ENA */
+#define WM8991_MICSHRT_IRQ_ENA 0x0400 /* MICSHRT_IRQ_ENA */
+#define WM8991_MICDET_IRQ_ENA 0x0200 /* MICDET_IRQ_ENA */
+#define WM8991_PLL_LCK_IRQ_ENA 0x0100 /* PLL_LCK_IRQ_ENA */
+#define WM8991_GPI8_DEB_ENA 0x0080 /* GPI8_DEB_ENA */
+#define WM8991_GPI8_IRQ_ENA 0x0040 /* GPI8_IRQ_ENA */
+#define WM8991_GPI8_ENA 0x0010 /* GPI8_ENA */
+#define WM8991_GPI7_DEB_ENA 0x0008 /* GPI7_DEB_ENA */
+#define WM8991_GPI7_IRQ_ENA 0x0004 /* GPI7_IRQ_ENA */
+#define WM8991_GPI7_ENA 0x0001 /* GPI7_ENA */
+
+/*
+ * R23 (0x17) - GPIO_POL
+ */
+#define WM8991_IRQ_INV 0x1000 /* IRQ_INV */
+#define WM8991_TEMPOK_POL 0x0800 /* TEMPOK_POL */
+#define WM8991_MICSHRT_POL 0x0400 /* MICSHRT_POL */
+#define WM8991_MICDET_POL 0x0200 /* MICDET_POL */
+#define WM8991_PLL_LCK_POL 0x0100 /* PLL_LCK_POL */
+#define WM8991_GPI8_POL 0x0080 /* GPI8_POL */
+#define WM8991_GPI7_POL 0x0040 /* GPI7_POL */
+#define WM8991_GPIO6_POL 0x0020 /* GPIO6_POL */
+#define WM8991_GPIO5_POL 0x0010 /* GPIO5_POL */
+#define WM8991_GPIO4_POL 0x0008 /* GPIO4_POL */
+#define WM8991_GPIO3_POL 0x0004 /* GPIO3_POL */
+#define WM8991_GPIO2_POL 0x0002 /* GPIO2_POL */
+#define WM8991_GPIO1_POL 0x0001 /* GPIO1_POL */
+
+/*
+ * R24 (0x18) - Left Line Input 1&2 Volume
+ */
+#define WM8991_IPVU 0x0100 /* IPVU */
+#define WM8991_LI12MUTE 0x0080 /* LI12MUTE */
+#define WM8991_LI12MUTE_BIT 7
+#define WM8991_LI12ZC 0x0040 /* LI12ZC */
+#define WM8991_LI12ZC_BIT 6
+#define WM8991_LIN12VOL_MASK 0x001F /* LIN12VOL - [4:0] */
+#define WM8991_LIN12VOL_SHIFT 0
+/*
+ * R25 (0x19) - Left Line Input 3&4 Volume
+ */
+#define WM8991_IPVU 0x0100 /* IPVU */
+#define WM8991_LI34MUTE 0x0080 /* LI34MUTE */
+#define WM8991_LI34MUTE_BIT 7
+#define WM8991_LI34ZC 0x0040 /* LI34ZC */
+#define WM8991_LI34ZC_BIT 6
+#define WM8991_LIN34VOL_MASK 0x001F /* LIN34VOL - [4:0] */
+#define WM8991_LIN34VOL_SHIFT 0
+
+/*
+ * R26 (0x1A) - Right Line Input 1&2 Volume
+ */
+#define WM8991_IPVU 0x0100 /* IPVU */
+#define WM8991_RI12MUTE 0x0080 /* RI12MUTE */
+#define WM8991_RI12MUTE_BIT 7
+#define WM8991_RI12ZC 0x0040 /* RI12ZC */
+#define WM8991_RI12ZC_BIT 6
+#define WM8991_RIN12VOL_MASK 0x001F /* RIN12VOL - [4:0] */
+#define WM8991_RIN12VOL_SHIFT 0
+
+/*
+ * R27 (0x1B) - Right Line Input 3&4 Volume
+ */
+#define WM8991_IPVU 0x0100 /* IPVU */
+#define WM8991_RI34MUTE 0x0080 /* RI34MUTE */
+#define WM8991_RI34MUTE_BIT 7
+#define WM8991_RI34ZC 0x0040 /* RI34ZC */
+#define WM8991_RI34ZC_BIT 6
+#define WM8991_RIN34VOL_MASK 0x001F /* RIN34VOL - [4:0] */
+#define WM8991_RIN34VOL_SHIFT 0
+
+/*
+ * R28 (0x1C) - Left Output Volume
+ */
+#define WM8991_OPVU 0x0100 /* OPVU */
+#define WM8991_LOZC 0x0080 /* LOZC */
+#define WM8991_LOZC_BIT 7
+#define WM8991_LOUTVOL_MASK 0x007F /* LOUTVOL - [6:0] */
+#define WM8991_LOUTVOL_SHIFT 0
+/*
+ * R29 (0x1D) - Right Output Volume
+ */
+#define WM8991_OPVU 0x0100 /* OPVU */
+#define WM8991_ROZC 0x0080 /* ROZC */
+#define WM8991_ROZC_BIT 7
+#define WM8991_ROUTVOL_MASK 0x007F /* ROUTVOL - [6:0] */
+#define WM8991_ROUTVOL_SHIFT 0
+/*
+ * R30 (0x1E) - Line Outputs Volume
+ */
+#define WM8991_LONMUTE 0x0040 /* LONMUTE */
+#define WM8991_LONMUTE_BIT 6
+#define WM8991_LOPMUTE 0x0020 /* LOPMUTE */
+#define WM8991_LOPMUTE_BIT 5
+#define WM8991_LOATTN 0x0010 /* LOATTN */
+#define WM8991_LOATTN_BIT 4
+#define WM8991_RONMUTE 0x0004 /* RONMUTE */
+#define WM8991_RONMUTE_BIT 2
+#define WM8991_ROPMUTE 0x0002 /* ROPMUTE */
+#define WM8991_ROPMUTE_BIT 1
+#define WM8991_ROATTN 0x0001 /* ROATTN */
+#define WM8991_ROATTN_BIT 0
+
+/*
+ * R31 (0x1F) - Out3/4 Volume
+ */
+#define WM8991_OUT3MUTE 0x0020 /* OUT3MUTE */
+#define WM8991_OUT3MUTE_BIT 5
+#define WM8991_OUT3ATTN 0x0010 /* OUT3ATTN */
+#define WM8991_OUT3ATTN_BIT 4
+#define WM8991_OUT4MUTE 0x0002 /* OUT4MUTE */
+#define WM8991_OUT4MUTE_BIT 1
+#define WM8991_OUT4ATTN 0x0001 /* OUT4ATTN */
+#define WM8991_OUT4ATTN_BIT 0
+
+/*
+ * R32 (0x20) - Left OPGA Volume
+ */
+#define WM8991_OPVU 0x0100 /* OPVU */
+#define WM8991_LOPGAZC 0x0080 /* LOPGAZC */
+#define WM8991_LOPGAZC_BIT 7
+#define WM8991_LOPGAVOL_MASK 0x007F /* LOPGAVOL - [6:0] */
+#define WM8991_LOPGAVOL_SHIFT 0
+
+/*
+ * R33 (0x21) - Right OPGA Volume
+ */
+#define WM8991_OPVU 0x0100 /* OPVU */
+#define WM8991_ROPGAZC 0x0080 /* ROPGAZC */
+#define WM8991_ROPGAZC_BIT 7
+#define WM8991_ROPGAVOL_MASK 0x007F /* ROPGAVOL - [6:0] */
+#define WM8991_ROPGAVOL_SHIFT 0
+/*
+ * R34 (0x22) - Speaker Volume
+ */
+#define WM8991_SPKVOL_MASK 0x0003 /* SPKVOL - [1:0] */
+#define WM8991_SPKVOL_SHIFT 0
+
+/*
+ * R35 (0x23) - ClassD1
+ */
+#define WM8991_CDMODE 0x0100 /* CDMODE */
+#define WM8991_CDMODE_BIT 8
+
+/*
+ * R37 (0x25) - ClassD3
+ */
+#define WM8991_DCGAIN_MASK 0x0007 /* DCGAIN - [5:3] */
+#define WM8991_DCGAIN_SHIFT 3
+#define WM8991_ACGAIN_MASK 0x0007 /* ACGAIN - [2:0] */
+#define WM8991_ACGAIN_SHIFT 0
+/*
+ * R39 (0x27) - Input Mixer1
+ */
+#define WM8991_AINLMODE_MASK 0x000C /* AINLMODE - [3:2] */
+#define WM8991_AINLMODE_SHIFT 2
+#define WM8991_AINRMODE_MASK 0x0003 /* AINRMODE - [1:0] */
+#define WM8991_AINRMODE_SHIFT 0
+
+/*
+ * R40 (0x28) - Input Mixer2
+ */
+#define WM8991_LMP4 0x0080 /* LMP4 */
+#define WM8991_LMP4_BIT 7 /* LMP4 */
+#define WM8991_LMN3 0x0040 /* LMN3 */
+#define WM8991_LMN3_BIT 6 /* LMN3 */
+#define WM8991_LMP2 0x0020 /* LMP2 */
+#define WM8991_LMP2_BIT 5 /* LMP2 */
+#define WM8991_LMN1 0x0010 /* LMN1 */
+#define WM8991_LMN1_BIT 4 /* LMN1 */
+#define WM8991_RMP4 0x0008 /* RMP4 */
+#define WM8991_RMP4_BIT 3 /* RMP4 */
+#define WM8991_RMN3 0x0004 /* RMN3 */
+#define WM8991_RMN3_BIT 2 /* RMN3 */
+#define WM8991_RMP2 0x0002 /* RMP2 */
+#define WM8991_RMP2_BIT 1 /* RMP2 */
+#define WM8991_RMN1 0x0001 /* RMN1 */
+#define WM8991_RMN1_BIT 0 /* RMN1 */
+
+/*
+ * R41 (0x29) - Input Mixer3
+ */
+#define WM8991_L34MNB 0x0100 /* L34MNB */
+#define WM8991_L34MNB_BIT 8
+#define WM8991_L34MNBST 0x0080 /* L34MNBST */
+#define WM8991_L34MNBST_BIT 7
+#define WM8991_L12MNB 0x0020 /* L12MNB */
+#define WM8991_L12MNB_BIT 5
+#define WM8991_L12MNBST 0x0010 /* L12MNBST */
+#define WM8991_L12MNBST_BIT 4
+#define WM8991_LDBVOL_MASK 0x0007 /* LDBVOL - [2:0] */
+#define WM8991_LDBVOL_SHIFT 0
+
+/*
+ * R42 (0x2A) - Input Mixer4
+ */
+#define WM8991_R34MNB 0x0100 /* R34MNB */
+#define WM8991_R34MNB_BIT 8
+#define WM8991_R34MNBST 0x0080 /* R34MNBST */
+#define WM8991_R34MNBST_BIT 7
+#define WM8991_R12MNB 0x0020 /* R12MNB */
+#define WM8991_R12MNB_BIT 5
+#define WM8991_R12MNBST 0x0010 /* R12MNBST */
+#define WM8991_R12MNBST_BIT 4
+#define WM8991_RDBVOL_MASK 0x0007 /* RDBVOL - [2:0] */
+#define WM8991_RDBVOL_SHIFT 0
+
+/*
+ * R43 (0x2B) - Input Mixer5
+ */
+#define WM8991_LI2BVOL_MASK 0x07 /* LI2BVOL - [8:6] */
+#define WM8991_LI2BVOL_SHIFT 6
+#define WM8991_LR4BVOL_MASK 0x07 /* LR4BVOL - [5:3] */
+#define WM8991_LR4BVOL_SHIFT 3
+#define WM8991_LL4BVOL_MASK 0x07 /* LL4BVOL - [2:0] */
+#define WM8991_LL4BVOL_SHIFT 0
+
+/*
+ * R44 (0x2C) - Input Mixer6
+ */
+#define WM8991_RI2BVOL_MASK 0x07 /* RI2BVOL - [8:6] */
+#define WM8991_RI2BVOL_SHIFT 6
+#define WM8991_RL4BVOL_MASK 0x07 /* RL4BVOL - [5:3] */
+#define WM8991_RL4BVOL_SHIFT 3
+#define WM8991_RR4BVOL_MASK 0x07 /* RR4BVOL - [2:0] */
+#define WM8991_RR4BVOL_SHIFT 0
+
+/*
+ * R45 (0x2D) - Output Mixer1
+ */
+#define WM8991_LRBLO 0x0080 /* LRBLO */
+#define WM8991_LRBLO_BIT 7
+#define WM8991_LLBLO 0x0040 /* LLBLO */
+#define WM8991_LLBLO_BIT 6
+#define WM8991_LRI3LO 0x0020 /* LRI3LO */
+#define WM8991_LRI3LO_BIT 5
+#define WM8991_LLI3LO 0x0010 /* LLI3LO */
+#define WM8991_LLI3LO_BIT 4
+#define WM8991_LR12LO 0x0008 /* LR12LO */
+#define WM8991_LR12LO_BIT 3
+#define WM8991_LL12LO 0x0004 /* LL12LO */
+#define WM8991_LL12LO_BIT 2
+#define WM8991_LDLO 0x0001 /* LDLO */
+#define WM8991_LDLO_BIT 0
+
+/*
+ * R46 (0x2E) - Output Mixer2
+ */
+#define WM8991_RLBRO 0x0080 /* RLBRO */
+#define WM8991_RLBRO_BIT 7
+#define WM8991_RRBRO 0x0040 /* RRBRO */
+#define WM8991_RRBRO_BIT 6
+#define WM8991_RLI3RO 0x0020 /* RLI3RO */
+#define WM8991_RLI3RO_BIT 5
+#define WM8991_RRI3RO 0x0010 /* RRI3RO */
+#define WM8991_RRI3RO_BIT 4
+#define WM8991_RL12RO 0x0008 /* RL12RO */
+#define WM8991_RL12RO_BIT 3
+#define WM8991_RR12RO 0x0004 /* RR12RO */
+#define WM8991_RR12RO_BIT 2
+#define WM8991_RDRO 0x0001 /* RDRO */
+#define WM8991_RDRO_BIT 0
+
+/*
+ * R47 (0x2F) - Output Mixer3
+ */
+#define WM8991_LLI3LOVOL_MASK 0x07 /* LLI3LOVOL - [8:6] */
+#define WM8991_LLI3LOVOL_SHIFT 6
+#define WM8991_LR12LOVOL_MASK 0x07 /* LR12LOVOL - [5:3] */
+#define WM8991_LR12LOVOL_SHIFT 3
+#define WM8991_LL12LOVOL_MASK 0x07 /* LL12LOVOL - [2:0] */
+#define WM8991_LL12LOVOL_SHIFT 0
+
+/*
+ * R48 (0x30) - Output Mixer4
+ */
+#define WM8991_RRI3ROVOL_MASK 0x07 /* RRI3ROVOL - [8:6] */
+#define WM8991_RRI3ROVOL_SHIFT 6
+#define WM8991_RL12ROVOL_MASK 0x07 /* RL12ROVOL - [5:3] */
+#define WM8991_RL12ROVOL_SHIFT 3
+#define WM8991_RR12ROVOL_MASK 0x07 /* RR12ROVOL - [2:0] */
+#define WM8991_RR12ROVOL_SHIFT 0
+
+/*
+ * R49 (0x31) - Output Mixer5
+ */
+#define WM8991_LRI3LOVOL_MASK 0x07 /* LRI3LOVOL - [8:6] */
+#define WM8991_LRI3LOVOL_SHIFT 6
+#define WM8991_LRBLOVOL_MASK 0x07 /* LRBLOVOL - [5:3] */
+#define WM8991_LRBLOVOL_SHIFT 3
+#define WM8991_LLBLOVOL_MASK 0x07 /* LLBLOVOL - [2:0] */
+#define WM8991_LLBLOVOL_SHIFT 0
+
+/*
+ * R50 (0x32) - Output Mixer6
+ */
+#define WM8991_RLI3ROVOL_MASK 0x07 /* RLI3ROVOL - [8:6] */
+#define WM8991_RLI3ROVOL_SHIFT 6
+#define WM8991_RLBROVOL_MASK 0x07 /* RLBROVOL - [5:3] */
+#define WM8991_RLBROVOL_SHIFT 3
+#define WM8991_RRBROVOL_MASK 0x07 /* RRBROVOL - [2:0] */
+#define WM8991_RRBROVOL_SHIFT 0
+
+/*
+ * R51 (0x33) - Out3/4 Mixer
+ */
+#define WM8991_VSEL_MASK 0x0180 /* VSEL - [8:7] */
+#define WM8991_LI4O3 0x0020 /* LI4O3 */
+#define WM8991_LI4O3_BIT 5
+#define WM8991_LPGAO3 0x0010 /* LPGAO3 */
+#define WM8991_LPGAO3_BIT 4
+#define WM8991_RI4O4 0x0002 /* RI4O4 */
+#define WM8991_RI4O4_BIT 1
+#define WM8991_RPGAO4 0x0001 /* RPGAO4 */
+#define WM8991_RPGAO4_BIT 0
+/*
+ * R52 (0x34) - Line Mixer1
+ */
+#define WM8991_LLOPGALON 0x0040 /* LLOPGALON */
+#define WM8991_LLOPGALON_BIT 6
+#define WM8991_LROPGALON 0x0020 /* LROPGALON */
+#define WM8991_LROPGALON_BIT 5
+#define WM8991_LOPLON 0x0010 /* LOPLON */
+#define WM8991_LOPLON_BIT 4
+#define WM8991_LR12LOP 0x0004 /* LR12LOP */
+#define WM8991_LR12LOP_BIT 2
+#define WM8991_LL12LOP 0x0002 /* LL12LOP */
+#define WM8991_LL12LOP_BIT 1
+#define WM8991_LLOPGALOP 0x0001 /* LLOPGALOP */
+#define WM8991_LLOPGALOP_BIT 0
+/*
+ * R53 (0x35) - Line Mixer2
+ */
+#define WM8991_RROPGARON 0x0040 /* RROPGARON */
+#define WM8991_RROPGARON_BIT 6
+#define WM8991_RLOPGARON 0x0020 /* RLOPGARON */
+#define WM8991_RLOPGARON_BIT 5
+#define WM8991_ROPRON 0x0010 /* ROPRON */
+#define WM8991_ROPRON_BIT 4
+#define WM8991_RL12ROP 0x0004 /* RL12ROP */
+#define WM8991_RL12ROP_BIT 2
+#define WM8991_RR12ROP 0x0002 /* RR12ROP */
+#define WM8991_RR12ROP_BIT 1
+#define WM8991_RROPGAROP 0x0001 /* RROPGAROP */
+#define WM8991_RROPGAROP_BIT 0
+
+/*
+ * R54 (0x36) - Speaker Mixer
+ */
+#define WM8991_LB2SPK 0x0080 /* LB2SPK */
+#define WM8991_LB2SPK_BIT 7
+#define WM8991_RB2SPK 0x0040 /* RB2SPK */
+#define WM8991_RB2SPK_BIT 6
+#define WM8991_LI2SPK 0x0020 /* LI2SPK */
+#define WM8991_LI2SPK_BIT 5
+#define WM8991_RI2SPK 0x0010 /* RI2SPK */
+#define WM8991_RI2SPK_BIT 4
+#define WM8991_LOPGASPK 0x0008 /* LOPGASPK */
+#define WM8991_LOPGASPK_BIT 3
+#define WM8991_ROPGASPK 0x0004 /* ROPGASPK */
+#define WM8991_ROPGASPK_BIT 2
+#define WM8991_LDSPK 0x0002 /* LDSPK */
+#define WM8991_LDSPK_BIT 1
+#define WM8991_RDSPK 0x0001 /* RDSPK */
+#define WM8991_RDSPK_BIT 0
+
+/*
+ * R55 (0x37) - Additional Control
+ */
+#define WM8991_VROI 0x0001 /* VROI */
+
+/*
+ * R56 (0x38) - AntiPOP1
+ */
+#define WM8991_DIS_LLINE 0x0020 /* DIS_LLINE */
+#define WM8991_DIS_RLINE 0x0010 /* DIS_RLINE */
+#define WM8991_DIS_OUT3 0x0008 /* DIS_OUT3 */
+#define WM8991_DIS_OUT4 0x0004 /* DIS_OUT4 */
+#define WM8991_DIS_LOUT 0x0002 /* DIS_LOUT */
+#define WM8991_DIS_ROUT 0x0001 /* DIS_ROUT */
+
+/*
+ * R57 (0x39) - AntiPOP2
+ */
+#define WM8991_SOFTST 0x0040 /* SOFTST */
+#define WM8991_BUFIOEN 0x0008 /* BUFIOEN */
+#define WM8991_BUFDCOPEN 0x0004 /* BUFDCOPEN */
+#define WM8991_POBCTRL 0x0002 /* POBCTRL */
+#define WM8991_VMIDTOG 0x0001 /* VMIDTOG */
+
+/*
+ * R58 (0x3A) - MICBIAS
+ */
+#define WM8991_MCDSCTH_MASK 0x00C0 /* MCDSCTH - [7:6] */
+#define WM8991_MCDTHR_MASK 0x0038 /* MCDTHR - [5:3] */
+#define WM8991_MCD 0x0004 /* MCD */
+#define WM8991_MBSEL 0x0001 /* MBSEL */
+
+/*
+ * R60 (0x3C) - PLL1
+ */
+#define WM8991_SDM 0x0080 /* SDM */
+#define WM8991_PRESCALE 0x0040 /* PRESCALE */
+#define WM8991_PLLN_MASK 0x000F /* PLLN - [3:0] */
+
+/*
+ * R61 (0x3D) - PLL2
+ */
+#define WM8991_PLLK1_MASK 0x00FF /* PLLK1 - [7:0] */
+
+/*
+ * R62 (0x3E) - PLL3
+ */
+#define WM8991_PLLK2_MASK 0x00FF /* PLLK2 - [7:0] */
+
+/*
+ * R63 (0x3F) - Internal Driver Bits
+ */
+#define WM8991_INMIXL_PWR_BIT 0
+#define WM8991_AINLMUX_PWR_BIT 1
+#define WM8991_INMIXR_PWR_BIT 2
+#define WM8991_AINRMUX_PWR_BIT 3
+
+#define WM8991_MCLK_DIV 0
+#define WM8991_DACCLK_DIV 1
+#define WM8991_ADCCLK_DIV 2
+#define WM8991_BCLK_DIV 3
+
+#define SOC_WM899X_OUTPGA_SINGLE_R_TLV(xname, reg, shift, max, invert,\
+ tlv_array) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
+ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\
+ SNDRV_CTL_ELEM_ACCESS_READWRITE,\
+ .tlv.p = (tlv_array), \
+ .info = snd_soc_info_volsw, \
+ .get = snd_soc_get_volsw, .put = wm899x_outpga_put_volsw_vu, \
+ .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert) }
+
+#endif /* _WM8991_H */
diff --git a/sound/soc/codecs/wm8993.c b/sound/soc/codecs/wm8993.c
index 18c0d9ce7c32..379fa22c5b6c 100644
--- a/sound/soc/codecs/wm8993.c
+++ b/sound/soc/codecs/wm8993.c
@@ -242,7 +242,7 @@ struct wm8993_priv {
int fll_src;
};
-static int wm8993_volatile(unsigned int reg)
+static int wm8993_volatile(struct snd_soc_codec *codec, unsigned int reg)
{
switch (reg) {
case WM8993_SOFTWARE_RESET:
diff --git a/sound/soc/codecs/wm8994-tables.c b/sound/soc/codecs/wm8994-tables.c
index 68e9b024dd48..61dfd91c6c72 100644
--- a/sound/soc/codecs/wm8994-tables.c
+++ b/sound/soc/codecs/wm8994-tables.c
@@ -62,8 +62,8 @@ const struct wm8994_access_mask wm8994_access_masks[WM8994_CACHE_SIZE] = {
{ 0x00FF, 0x00FF }, /* R58 - MICBIAS */
{ 0x000F, 0x000F }, /* R59 - LDO 1 */
{ 0x0007, 0x0007 }, /* R60 - LDO 2 */
- { 0x0000, 0x0000 }, /* R61 */
- { 0x0000, 0x0000 }, /* R62 */
+ { 0xFFFF, 0xFFFF }, /* R61 */
+ { 0xFFFF, 0xFFFF }, /* R62 */
{ 0x0000, 0x0000 }, /* R63 */
{ 0x0000, 0x0000 }, /* R64 */
{ 0x0000, 0x0000 }, /* R65 */
@@ -209,9 +209,9 @@ const struct wm8994_access_mask wm8994_access_masks[WM8994_CACHE_SIZE] = {
{ 0x0000, 0x0000 }, /* R205 */
{ 0x0000, 0x0000 }, /* R206 */
{ 0x0000, 0x0000 }, /* R207 */
- { 0x0000, 0x0000 }, /* R208 */
- { 0x0000, 0x0000 }, /* R209 */
- { 0x0000, 0x0000 }, /* R210 */
+ { 0xFFFF, 0xFFFF }, /* R208 */
+ { 0xFFFF, 0xFFFF }, /* R209 */
+ { 0xFFFF, 0xFFFF }, /* R210 */
{ 0x0000, 0x0000 }, /* R211 */
{ 0x0000, 0x0000 }, /* R212 */
{ 0x0000, 0x0000 }, /* R213 */
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 37b8aa8a680f..125bfb6eb245 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -102,14 +102,19 @@ struct wm8994_priv {
wm8958_micdet_cb jack_cb;
void *jack_cb_data;
- bool jack_is_mic;
- bool jack_is_video;
+ int micdet_irq;
int revision;
struct wm8994_pdata *pdata;
+
+ unsigned int aif1clk_enable:1;
+ unsigned int aif2clk_enable:1;
+
+ unsigned int aif1clk_disable:1;
+ unsigned int aif2clk_disable:1;
};
-static int wm8994_readable(unsigned int reg)
+static int wm8994_readable(struct snd_soc_codec *codec, unsigned int reg)
{
switch (reg) {
case WM8994_GPIO_1:
@@ -136,7 +141,7 @@ static int wm8994_readable(unsigned int reg)
return wm8994_access_masks[reg].readable != 0;
}
-static int wm8994_volatile(unsigned int reg)
+static int wm8994_volatile(struct snd_soc_codec *codec, unsigned int reg)
{
if (reg >= WM8994_CACHE_SIZE)
return 1;
@@ -164,7 +169,7 @@ static int wm8994_write(struct snd_soc_codec *codec, unsigned int reg,
BUG_ON(reg > WM8994_MAX_REGISTER);
- if (!wm8994_volatile(reg)) {
+ if (!wm8994_volatile(codec, reg)) {
ret = snd_soc_cache_write(codec, reg, value);
if (ret != 0)
dev_err(codec->dev, "Cache write to %x failed: %d\n",
@@ -182,7 +187,7 @@ static unsigned int wm8994_read(struct snd_soc_codec *codec,
BUG_ON(reg > WM8994_MAX_REGISTER);
- if (!wm8994_volatile(reg) && wm8994_readable(reg) &&
+ if (!wm8994_volatile(codec, reg) && wm8994_readable(codec, reg) &&
reg < codec->driver->reg_cache_size) {
ret = snd_soc_cache_read(codec, reg, &val);
if (ret >= 0)
@@ -523,7 +528,7 @@ static int wm8994_get_retune_mobile_enum(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
- struct wm8994_priv *wm8994 =snd_soc_codec_get_drvdata(codec);
+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
int block = wm8994_get_retune_mobile_block(kcontrol->id.name);
ucontrol->value.enumerated.item[0] = wm8994->retune_mobile_cfg[block];
@@ -1004,6 +1009,117 @@ static void wm8994_update_class_w(struct snd_soc_codec *codec)
}
}
+static int late_enable_ev(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (wm8994->aif1clk_enable) {
+ snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
+ WM8994_AIF1CLK_ENA_MASK,
+ WM8994_AIF1CLK_ENA);
+ wm8994->aif1clk_enable = 0;
+ }
+ if (wm8994->aif2clk_enable) {
+ snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
+ WM8994_AIF2CLK_ENA_MASK,
+ WM8994_AIF2CLK_ENA);
+ wm8994->aif2clk_enable = 0;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static int late_disable_ev(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMD:
+ if (wm8994->aif1clk_disable) {
+ snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
+ WM8994_AIF1CLK_ENA_MASK, 0);
+ wm8994->aif1clk_disable = 0;
+ }
+ if (wm8994->aif2clk_disable) {
+ snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
+ WM8994_AIF2CLK_ENA_MASK, 0);
+ wm8994->aif2clk_disable = 0;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static int aif1clk_ev(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ wm8994->aif1clk_enable = 1;
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ wm8994->aif1clk_disable = 1;
+ break;
+ }
+
+ return 0;
+}
+
+static int aif2clk_ev(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ wm8994->aif2clk_enable = 1;
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ wm8994->aif2clk_disable = 1;
+ break;
+ }
+
+ return 0;
+}
+
+static int adc_mux_ev(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ late_enable_ev(w, kcontrol, event);
+ return 0;
+}
+
+static int micbias_ev(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ late_enable_ev(w, kcontrol, event);
+ return 0;
+}
+
+static int dac_ev(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ unsigned int mask = 1 << w->shift;
+
+ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
+ mask, mask);
+ return 0;
+}
+
static const char *hp_mux_text[] = {
"Mixer",
"DAC",
@@ -1272,11 +1388,68 @@ static const struct soc_enum aif2dacr_src_enum =
static const struct snd_kcontrol_new aif2dacr_src_mux =
SOC_DAPM_ENUM("AIF2DACR Mux", aif2dacr_src_enum);
+static const struct snd_soc_dapm_widget wm8994_lateclk_revd_widgets[] = {
+SND_SOC_DAPM_SUPPLY("AIF1CLK", SND_SOC_NOPM, 0, 0, aif1clk_ev,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+SND_SOC_DAPM_SUPPLY("AIF2CLK", SND_SOC_NOPM, 0, 0, aif2clk_ev,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+SND_SOC_DAPM_PGA_E("Late DAC1L Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
+ late_enable_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_PGA_E("Late DAC1R Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
+ late_enable_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_PGA_E("Late DAC2L Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
+ late_enable_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_PGA_E("Late DAC2R Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
+ late_enable_ev, SND_SOC_DAPM_PRE_PMU),
+
+SND_SOC_DAPM_POST("Late Disable PGA", late_disable_ev)
+};
+
+static const struct snd_soc_dapm_widget wm8994_lateclk_widgets[] = {
+SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, NULL, 0),
+SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, NULL, 0)
+};
+
+static const struct snd_soc_dapm_widget wm8994_dac_revd_widgets[] = {
+SND_SOC_DAPM_DAC_E("DAC2L", NULL, SND_SOC_NOPM, 3, 0,
+ dac_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_DAC_E("DAC2R", NULL, SND_SOC_NOPM, 2, 0,
+ dac_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_DAC_E("DAC1L", NULL, SND_SOC_NOPM, 1, 0,
+ dac_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_DAC_E("DAC1R", NULL, SND_SOC_NOPM, 0, 0,
+ dac_ev, SND_SOC_DAPM_PRE_PMU),
+};
+
+static const struct snd_soc_dapm_widget wm8994_dac_widgets[] = {
+SND_SOC_DAPM_DAC("DAC2L", NULL, WM8994_POWER_MANAGEMENT_5, 3, 0),
+SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 2, 0),
+SND_SOC_DAPM_DAC("DAC1L", NULL, WM8994_POWER_MANAGEMENT_5, 1, 0),
+SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0),
+};
+
+static const struct snd_soc_dapm_widget wm8994_adc_revd_widgets[] = {
+SND_SOC_DAPM_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux,
+ adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux,
+ adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
+};
+
+static const struct snd_soc_dapm_widget wm8994_adc_widgets[] = {
+SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
+SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
+};
+
static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = {
SND_SOC_DAPM_INPUT("DMIC1DAT"),
SND_SOC_DAPM_INPUT("DMIC2DAT"),
SND_SOC_DAPM_INPUT("Clock"),
+SND_SOC_DAPM_MICBIAS("MICBIAS", WM8994_MICBIAS, 2, 0),
+SND_SOC_DAPM_SUPPLY_S("MICBIAS Supply", 1, SND_SOC_NOPM, 0, 0, micbias_ev,
+ SND_SOC_DAPM_PRE_PMU),
+
SND_SOC_DAPM_SUPPLY("CLK_SYS", SND_SOC_NOPM, 0, 0, clk_sys_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
@@ -1284,9 +1457,6 @@ SND_SOC_DAPM_SUPPLY("DSP1CLK", WM8994_CLOCKING_1, 3, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("DSP2CLK", WM8994_CLOCKING_1, 2, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("DSPINTCLK", WM8994_CLOCKING_1, 1, 0, NULL, 0),
-SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, NULL, 0),
-SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, NULL, 0),
-
SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", NULL,
0, WM8994_POWER_MANAGEMENT_4, 9, 0),
SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", NULL,
@@ -1369,14 +1539,6 @@ SND_SOC_DAPM_ADC("DMIC1R", NULL, WM8994_POWER_MANAGEMENT_4, 2, 0),
SND_SOC_DAPM_ADC("ADCL", NULL, SND_SOC_NOPM, 1, 0),
SND_SOC_DAPM_ADC("ADCR", NULL, SND_SOC_NOPM, 0, 0),
-SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
-SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
-
-SND_SOC_DAPM_DAC("DAC2L", NULL, WM8994_POWER_MANAGEMENT_5, 3, 0),
-SND_SOC_DAPM_DAC("DAC2R", NULL, WM8994_POWER_MANAGEMENT_5, 2, 0),
-SND_SOC_DAPM_DAC("DAC1L", NULL, WM8994_POWER_MANAGEMENT_5, 1, 0),
-SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0),
-
SND_SOC_DAPM_MUX("Left Headphone Mux", SND_SOC_NOPM, 0, 0, &hpl_mux),
SND_SOC_DAPM_MUX("Right Headphone Mux", SND_SOC_NOPM, 0, 0, &hpr_mux),
@@ -1516,14 +1678,12 @@ static const struct snd_soc_dapm_route intercon[] = {
{ "AIF2ADC Mux", "AIF3DACDAT", "AIF3ADCDAT" },
/* DAC1 inputs */
- { "DAC1L", NULL, "DAC1L Mixer" },
{ "DAC1L Mixer", "AIF2 Switch", "AIF2DACL" },
{ "DAC1L Mixer", "AIF1.2 Switch", "AIF1DAC2L" },
{ "DAC1L Mixer", "AIF1.1 Switch", "AIF1DAC1L" },
{ "DAC1L Mixer", "Left Sidetone Switch", "Left Sidetone" },
{ "DAC1L Mixer", "Right Sidetone Switch", "Right Sidetone" },
- { "DAC1R", NULL, "DAC1R Mixer" },
{ "DAC1R Mixer", "AIF2 Switch", "AIF2DACR" },
{ "DAC1R Mixer", "AIF1.2 Switch", "AIF1DAC2R" },
{ "DAC1R Mixer", "AIF1.1 Switch", "AIF1DAC1R" },
@@ -1532,7 +1692,6 @@ static const struct snd_soc_dapm_route intercon[] = {
/* DAC2/AIF2 outputs */
{ "AIF2ADCL", NULL, "AIF2DAC2L Mixer" },
- { "DAC2L", NULL, "AIF2DAC2L Mixer" },
{ "AIF2DAC2L Mixer", "AIF2 Switch", "AIF2DACL" },
{ "AIF2DAC2L Mixer", "AIF1.2 Switch", "AIF1DAC2L" },
{ "AIF2DAC2L Mixer", "AIF1.1 Switch", "AIF1DAC1L" },
@@ -1540,7 +1699,6 @@ static const struct snd_soc_dapm_route intercon[] = {
{ "AIF2DAC2L Mixer", "Right Sidetone Switch", "Right Sidetone" },
{ "AIF2ADCR", NULL, "AIF2DAC2R Mixer" },
- { "DAC2R", NULL, "AIF2DAC2R Mixer" },
{ "AIF2DAC2R Mixer", "AIF2 Switch", "AIF2DACR" },
{ "AIF2DAC2R Mixer", "AIF1.2 Switch", "AIF1DAC2R" },
{ "AIF2DAC2R Mixer", "AIF1.1 Switch", "AIF1DAC1R" },
@@ -1584,11 +1742,31 @@ static const struct snd_soc_dapm_route intercon[] = {
{ "Right Headphone Mux", "DAC", "DAC1R" },
};
+static const struct snd_soc_dapm_route wm8994_lateclk_revd_intercon[] = {
+ { "DAC1L", NULL, "Late DAC1L Enable PGA" },
+ { "Late DAC1L Enable PGA", NULL, "DAC1L Mixer" },
+ { "DAC1R", NULL, "Late DAC1R Enable PGA" },
+ { "Late DAC1R Enable PGA", NULL, "DAC1R Mixer" },
+ { "DAC2L", NULL, "Late DAC2L Enable PGA" },
+ { "Late DAC2L Enable PGA", NULL, "AIF2DAC2L Mixer" },
+ { "DAC2R", NULL, "Late DAC2R Enable PGA" },
+ { "Late DAC2R Enable PGA", NULL, "AIF2DAC2R Mixer" }
+};
+
+static const struct snd_soc_dapm_route wm8994_lateclk_intercon[] = {
+ { "DAC1L", NULL, "DAC1L Mixer" },
+ { "DAC1R", NULL, "DAC1R Mixer" },
+ { "DAC2L", NULL, "AIF2DAC2L Mixer" },
+ { "DAC2R", NULL, "AIF2DAC2R Mixer" },
+};
+
static const struct snd_soc_dapm_route wm8994_revd_intercon[] = {
{ "AIF1DACDAT", NULL, "AIF2DACDAT" },
{ "AIF2DACDAT", NULL, "AIF1DACDAT" },
{ "AIF1ADCDAT", NULL, "AIF2ADCDAT" },
{ "AIF2ADCDAT", NULL, "AIF1ADCDAT" },
+ { "MICBIAS", NULL, "CLK_SYS" },
+ { "MICBIAS", NULL, "MICBIAS Supply" },
};
static const struct snd_soc_dapm_route wm8994_intercon[] = {
@@ -2514,6 +2692,22 @@ static int wm8994_resume(struct snd_soc_codec *codec)
{
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
int i, ret;
+ unsigned int val, mask;
+
+ if (wm8994->revision < 4) {
+ /* force a HW read */
+ val = wm8994_reg_read(codec->control_data,
+ WM8994_POWER_MANAGEMENT_5);
+
+ /* modify the cache only */
+ codec->cache_only = 1;
+ mask = WM8994_DAC1R_ENA | WM8994_DAC1L_ENA |
+ WM8994_DAC2R_ENA | WM8994_DAC2L_ENA;
+ val &= mask;
+ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
+ mask, val);
+ codec->cache_only = 0;
+ }
/* Restore the registers */
ret = snd_soc_cache_sync(codec);
@@ -2701,6 +2895,13 @@ static void wm8994_handle_pdata(struct wm8994_priv *wm8994)
else
snd_soc_add_controls(wm8994->codec, wm8994_eq_controls,
ARRAY_SIZE(wm8994_eq_controls));
+
+ for (i = 0; i < ARRAY_SIZE(pdata->micbias); i++) {
+ if (pdata->micbias[i]) {
+ snd_soc_write(codec, WM8958_MICBIAS1 + i,
+ pdata->micbias[i] & 0xffff);
+ }
+ }
}
/**
@@ -2811,47 +3012,18 @@ static void wm8958_default_micdet(u16 status, void *data)
int report = 0;
/* If nothing present then clear our statuses */
- if (!(status & WM8958_MICD_STS)) {
- wm8994->jack_is_video = false;
- wm8994->jack_is_mic = false;
+ if (!(status & WM8958_MICD_STS))
goto done;
- }
-
- /* Assume anything over 475 ohms is a microphone and remember
- * that we've seen one (since buttons override it) */
- if (status & 0x600)
- wm8994->jack_is_mic = true;
- if (wm8994->jack_is_mic)
- report |= SND_JACK_MICROPHONE;
- /* Video has an impedence of approximately 75 ohms; assume
- * this isn't used as a button and remember it since buttons
- * override it. */
- if (status & 0x40)
- wm8994->jack_is_video = true;
- if (wm8994->jack_is_video)
- report |= SND_JACK_VIDEOOUT;
+ report = SND_JACK_MICROPHONE;
/* Everything else is buttons; just assign slots */
- if (status & 0x4)
+ if (status & 0x1c0)
report |= SND_JACK_BTN_0;
- if (status & 0x8)
- report |= SND_JACK_BTN_1;
- if (status & 0x10)
- report |= SND_JACK_BTN_2;
- if (status & 0x20)
- report |= SND_JACK_BTN_3;
- if (status & 0x80)
- report |= SND_JACK_BTN_4;
- if (status & 0x100)
- report |= SND_JACK_BTN_5;
done:
- snd_soc_jack_report(wm8994->micdet[0].jack,
- SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2 |
- SND_JACK_BTN_3 | SND_JACK_BTN_4 | SND_JACK_BTN_5 |
- SND_JACK_MICROPHONE | SND_JACK_VIDEOOUT,
- report);
+ snd_soc_jack_report(wm8994->micdet[0].jack, report,
+ SND_JACK_BTN_0 | SND_JACK_MICROPHONE);
}
/**
@@ -2950,13 +3122,19 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
wm8994->pdata = dev_get_platdata(codec->dev->parent);
wm8994->codec = codec;
+ if (wm8994->pdata && wm8994->pdata->micdet_irq)
+ wm8994->micdet_irq = wm8994->pdata->micdet_irq;
+ else if (wm8994->pdata && wm8994->pdata->irq_base)
+ wm8994->micdet_irq = wm8994->pdata->irq_base +
+ WM8994_IRQ_MIC1_DET;
+
pm_runtime_enable(codec->dev);
pm_runtime_resume(codec->dev);
/* Read our current status back from the chip - we don't want to
* reset as this may interfere with the GPIO or LDO operation. */
for (i = 0; i < WM8994_CACHE_SIZE; i++) {
- if (!wm8994_readable(i) || wm8994_volatile(i))
+ if (!wm8994_readable(codec, i) || wm8994_volatile(codec, i))
continue;
ret = wm8994_reg_read(codec->control_data, i);
@@ -2998,14 +3176,17 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
switch (control->type) {
case WM8994:
- ret = wm8994_request_irq(codec->control_data,
- WM8994_IRQ_MIC1_DET,
- wm8994_mic_irq, "Mic 1 detect",
- wm8994);
- if (ret != 0)
- dev_warn(codec->dev,
- "Failed to request Mic1 detect IRQ: %d\n",
- ret);
+ if (wm8994->micdet_irq) {
+ ret = request_threaded_irq(wm8994->micdet_irq, NULL,
+ wm8994_mic_irq,
+ IRQF_TRIGGER_RISING,
+ "Mic1 detect",
+ wm8994);
+ if (ret != 0)
+ dev_warn(codec->dev,
+ "Failed to request Mic1 detect IRQ: %d\n",
+ ret);
+ }
ret = wm8994_request_irq(codec->control_data,
WM8994_IRQ_MIC1_SHRT,
@@ -3036,15 +3217,17 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
break;
case WM8958:
- ret = wm8994_request_irq(codec->control_data,
- WM8994_IRQ_MIC1_DET,
- wm8958_mic_irq, "Mic detect",
- wm8994);
- if (ret != 0)
- dev_warn(codec->dev,
- "Failed to request Mic detect IRQ: %d\n",
- ret);
- break;
+ if (wm8994->micdet_irq) {
+ ret = request_threaded_irq(wm8994->micdet_irq, NULL,
+ wm8958_mic_irq,
+ IRQF_TRIGGER_RISING,
+ "Mic detect",
+ wm8994);
+ if (ret != 0)
+ dev_warn(codec->dev,
+ "Failed to request Mic detect IRQ: %d\n",
+ ret);
+ }
}
/* Remember if AIFnLRCLK is configured as a GPIO. This should be
@@ -3125,6 +3308,21 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
case WM8994:
snd_soc_dapm_new_controls(dapm, wm8994_specific_dapm_widgets,
ARRAY_SIZE(wm8994_specific_dapm_widgets));
+ if (wm8994->revision < 4) {
+ snd_soc_dapm_new_controls(dapm, wm8994_lateclk_revd_widgets,
+ ARRAY_SIZE(wm8994_lateclk_revd_widgets));
+ snd_soc_dapm_new_controls(dapm, wm8994_adc_revd_widgets,
+ ARRAY_SIZE(wm8994_adc_revd_widgets));
+ snd_soc_dapm_new_controls(dapm, wm8994_dac_revd_widgets,
+ ARRAY_SIZE(wm8994_dac_revd_widgets));
+ } else {
+ snd_soc_dapm_new_controls(dapm, wm8994_lateclk_widgets,
+ ARRAY_SIZE(wm8994_lateclk_widgets));
+ snd_soc_dapm_new_controls(dapm, wm8994_adc_widgets,
+ ARRAY_SIZE(wm8994_adc_widgets));
+ snd_soc_dapm_new_controls(dapm, wm8994_dac_widgets,
+ ARRAY_SIZE(wm8994_dac_widgets));
+ }
break;
case WM8958:
snd_soc_add_controls(codec, wm8958_snd_controls,
@@ -3143,10 +3341,15 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
snd_soc_dapm_add_routes(dapm, wm8994_intercon,
ARRAY_SIZE(wm8994_intercon));
- if (wm8994->revision < 4)
+ if (wm8994->revision < 4) {
snd_soc_dapm_add_routes(dapm, wm8994_revd_intercon,
ARRAY_SIZE(wm8994_revd_intercon));
-
+ snd_soc_dapm_add_routes(dapm, wm8994_lateclk_revd_intercon,
+ ARRAY_SIZE(wm8994_lateclk_revd_intercon));
+ } else {
+ snd_soc_dapm_add_routes(dapm, wm8994_lateclk_intercon,
+ ARRAY_SIZE(wm8994_lateclk_intercon));
+ }
break;
case WM8958:
snd_soc_dapm_add_routes(dapm, wm8958_intercon,
@@ -3160,7 +3363,8 @@ err_irq:
wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC2_SHRT, wm8994);
wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC2_DET, wm8994);
wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC1_SHRT, wm8994);
- wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC1_DET, wm8994);
+ if (wm8994->micdet_irq)
+ free_irq(wm8994->micdet_irq, wm8994);
err:
kfree(wm8994);
return ret;
@@ -3177,8 +3381,8 @@ static int wm8994_codec_remove(struct snd_soc_codec *codec)
switch (control->type) {
case WM8994:
- wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC2_SHRT,
- wm8994);
+ if (wm8994->micdet_irq)
+ free_irq(wm8994->micdet_irq, wm8994);
wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC2_DET,
wm8994);
wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC1_SHRT,
@@ -3188,8 +3392,8 @@ static int wm8994_codec_remove(struct snd_soc_codec *codec)
break;
case WM8958:
- wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC1_DET,
- wm8994);
+ if (wm8994->micdet_irq)
+ free_irq(wm8994->micdet_irq, wm8994);
break;
}
kfree(wm8994->retune_mobile_texts);
diff --git a/sound/soc/codecs/wm8995.c b/sound/soc/codecs/wm8995.c
index 608c84c5aa8e..67eaaecbb42e 100644
--- a/sound/soc/codecs/wm8995.c
+++ b/sound/soc/codecs/wm8995.c
@@ -19,6 +19,7 @@
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/pcm.h>
@@ -30,6 +31,18 @@
#include "wm8995.h"
+#define WM8995_NUM_SUPPLIES 8
+static const char *wm8995_supply_names[WM8995_NUM_SUPPLIES] = {
+ "DCVDD",
+ "DBVDD1",
+ "DBVDD2",
+ "DBVDD3",
+ "AVDD1",
+ "AVDD2",
+ "CPVDD",
+ "MICVDD"
+};
+
static const u16 wm8995_reg_defs[WM8995_MAX_REGISTER + 1] = {
[0] = 0x8995, [5] = 0x0100, [16] = 0x000b, [17] = 0x000b,
[24] = 0x02c0, [25] = 0x02c0, [26] = 0x02c0, [27] = 0x02c0,
@@ -126,8 +139,37 @@ struct wm8995_priv {
int mclk[2];
int aifclk[2];
struct fll_config fll[2], fll_suspend[2];
+ struct regulator_bulk_data supplies[WM8995_NUM_SUPPLIES];
+ struct notifier_block disable_nb[WM8995_NUM_SUPPLIES];
+ struct snd_soc_codec *codec;
};
+/*
+ * We can't use the same notifier block for more than one supply and
+ * there's no way I can see to get from a callback to the caller
+ * except container_of().
+ */
+#define WM8995_REGULATOR_EVENT(n) \
+static int wm8995_regulator_event_##n(struct notifier_block *nb, \
+ unsigned long event, void *data) \
+{ \
+ struct wm8995_priv *wm8995 = container_of(nb, struct wm8995_priv, \
+ disable_nb[n]); \
+ if (event & REGULATOR_EVENT_DISABLE) { \
+ wm8995->codec->cache_sync = 1; \
+ } \
+ return 0; \
+}
+
+WM8995_REGULATOR_EVENT(0)
+WM8995_REGULATOR_EVENT(1)
+WM8995_REGULATOR_EVENT(2)
+WM8995_REGULATOR_EVENT(3)
+WM8995_REGULATOR_EVENT(4)
+WM8995_REGULATOR_EVENT(5)
+WM8995_REGULATOR_EVENT(6)
+WM8995_REGULATOR_EVENT(7)
+
static const DECLARE_TLV_DB_SCALE(digital_tlv, -7200, 75, 1);
static const DECLARE_TLV_DB_SCALE(in1lr_pga_tlv, -1650, 150, 0);
static const DECLARE_TLV_DB_SCALE(in1l_boost_tlv, 0, 600, 0);
@@ -909,7 +951,7 @@ static const struct snd_soc_dapm_route wm8995_intercon[] = {
{ "SPK2R", NULL, "SPK2R Driver" }
};
-static int wm8995_volatile(unsigned int reg)
+static int wm8995_volatile(struct snd_soc_codec *codec, unsigned int reg)
{
/* out of bounds registers are generally considered
* volatile to support register banks that are partially
@@ -1483,6 +1525,11 @@ static int wm8995_set_bias_level(struct snd_soc_codec *codec,
break;
case SND_SOC_BIAS_STANDBY:
if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
+ ret = regulator_bulk_enable(ARRAY_SIZE(wm8995->supplies),
+ wm8995->supplies);
+ if (ret)
+ return ret;
+
ret = snd_soc_cache_sync(codec);
if (ret) {
dev_err(codec->dev,
@@ -1492,12 +1539,13 @@ static int wm8995_set_bias_level(struct snd_soc_codec *codec,
snd_soc_update_bits(codec, WM8995_POWER_MANAGEMENT_1,
WM8995_BG_ENA_MASK, WM8995_BG_ENA);
-
}
break;
case SND_SOC_BIAS_OFF:
snd_soc_update_bits(codec, WM8995_POWER_MANAGEMENT_1,
WM8995_BG_ENA_MASK, 0);
+ regulator_bulk_disable(ARRAY_SIZE(wm8995->supplies),
+ wm8995->supplies);
break;
}
@@ -1536,10 +1584,12 @@ static int wm8995_remove(struct snd_soc_codec *codec)
static int wm8995_probe(struct snd_soc_codec *codec)
{
struct wm8995_priv *wm8995;
+ int i;
int ret;
codec->dapm.idle_bias_off = 1;
wm8995 = snd_soc_codec_get_drvdata(codec);
+ wm8995->codec = codec;
ret = snd_soc_codec_set_cache_io(codec, 16, 16, wm8995->control_type);
if (ret < 0) {
@@ -1547,21 +1597,58 @@ static int wm8995_probe(struct snd_soc_codec *codec)
return ret;
}
+ for (i = 0; i < ARRAY_SIZE(wm8995->supplies); i++)
+ wm8995->supplies[i].supply = wm8995_supply_names[i];
+
+ ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8995->supplies),
+ wm8995->supplies);
+ if (ret) {
+ dev_err(codec->dev, "Failed to request supplies: %d\n", ret);
+ return ret;
+ }
+
+ wm8995->disable_nb[0].notifier_call = wm8995_regulator_event_0;
+ wm8995->disable_nb[1].notifier_call = wm8995_regulator_event_1;
+ wm8995->disable_nb[2].notifier_call = wm8995_regulator_event_2;
+ wm8995->disable_nb[3].notifier_call = wm8995_regulator_event_3;
+ wm8995->disable_nb[4].notifier_call = wm8995_regulator_event_4;
+ wm8995->disable_nb[5].notifier_call = wm8995_regulator_event_5;
+ wm8995->disable_nb[6].notifier_call = wm8995_regulator_event_6;
+ wm8995->disable_nb[7].notifier_call = wm8995_regulator_event_7;
+
+ /* This should really be moved into the regulator core */
+ for (i = 0; i < ARRAY_SIZE(wm8995->supplies); i++) {
+ ret = regulator_register_notifier(wm8995->supplies[i].consumer,
+ &wm8995->disable_nb[i]);
+ if (ret) {
+ dev_err(codec->dev,
+ "Failed to register regulator notifier: %d\n",
+ ret);
+ }
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(wm8995->supplies),
+ wm8995->supplies);
+ if (ret) {
+ dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
+ goto err_reg_get;
+ }
+
ret = snd_soc_read(codec, WM8995_SOFTWARE_RESET);
if (ret < 0) {
dev_err(codec->dev, "Failed to read device ID: %d\n", ret);
- return ret;
+ goto err_reg_enable;
}
if (ret != 0x8995) {
dev_err(codec->dev, "Invalid device ID: %#x\n", ret);
- return -EINVAL;
+ goto err_reg_enable;
}
ret = snd_soc_write(codec, WM8995_SOFTWARE_RESET, 0);
if (ret < 0) {
dev_err(codec->dev, "Failed to issue reset: %d\n", ret);
- return ret;
+ goto err_reg_enable;
}
wm8995_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
@@ -1596,6 +1683,12 @@ static int wm8995_probe(struct snd_soc_codec *codec)
ARRAY_SIZE(wm8995_intercon));
return 0;
+
+err_reg_enable:
+ regulator_bulk_disable(ARRAY_SIZE(wm8995->supplies), wm8995->supplies);
+err_reg_get:
+ regulator_bulk_free(ARRAY_SIZE(wm8995->supplies), wm8995->supplies);
+ return ret;
}
#define WM8995_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c
index 43825b2102a5..7883f3ed797b 100644
--- a/sound/soc/codecs/wm9081.c
+++ b/sound/soc/codecs/wm9081.c
@@ -15,6 +15,7 @@
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
@@ -166,10 +167,10 @@ struct wm9081_priv {
int fll_fref;
int fll_fout;
int tdm_width;
- struct wm9081_retune_mobile_config *retune;
+ struct wm9081_pdata pdata;
};
-static int wm9081_volatile_register(unsigned int reg)
+static int wm9081_volatile_register(struct snd_soc_codec *codec, unsigned int reg)
{
switch (reg) {
case WM9081_SOFTWARE_RESET:
@@ -1081,21 +1082,22 @@ static int wm9081_hw_params(struct snd_pcm_substream *substream,
aif4 |= wm9081->bclk / wm9081->fs;
/* Apply a ReTune Mobile configuration if it's in use */
- if (wm9081->retune) {
- struct wm9081_retune_mobile_config *retune = wm9081->retune;
+ if (wm9081->pdata.num_retune_configs) {
+ struct wm9081_pdata *pdata = &wm9081->pdata;
struct wm9081_retune_mobile_setting *s;
int eq1;
best = 0;
- best_val = abs(retune->configs[0].rate - wm9081->fs);
- for (i = 0; i < retune->num_configs; i++) {
- cur_val = abs(retune->configs[i].rate - wm9081->fs);
+ best_val = abs(pdata->retune_configs[0].rate - wm9081->fs);
+ for (i = 0; i < pdata->num_retune_configs; i++) {
+ cur_val = abs(pdata->retune_configs[i].rate -
+ wm9081->fs);
if (cur_val < best_val) {
best_val = cur_val;
best = i;
}
}
- s = &retune->configs[best];
+ s = &pdata->retune_configs[best];
dev_dbg(codec->dev, "ReTune Mobile %s tuned for %dHz\n",
s->name, s->rate);
@@ -1254,6 +1256,14 @@ static int wm9081_probe(struct snd_soc_codec *codec)
return ret;
}
+ reg = 0;
+ if (wm9081->pdata.irq_high)
+ reg |= WM9081_IRQ_POL;
+ if (!wm9081->pdata.irq_cmos)
+ reg |= WM9081_IRQ_OP_CTRL;
+ snd_soc_update_bits(codec, WM9081_INTERRUPT_CONTROL,
+ WM9081_IRQ_POL | WM9081_IRQ_OP_CTRL, reg);
+
wm9081_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
/* Enable zero cross by default */
@@ -1265,7 +1275,7 @@ static int wm9081_probe(struct snd_soc_codec *codec)
snd_soc_add_controls(codec, wm9081_snd_controls,
ARRAY_SIZE(wm9081_snd_controls));
- if (!wm9081->retune) {
+ if (!wm9081->pdata.num_retune_configs) {
dev_dbg(codec->dev,
"No ReTune Mobile data, using normal EQ\n");
snd_soc_add_controls(codec, wm9081_eq_controls,
@@ -1341,6 +1351,10 @@ static __devinit int wm9081_i2c_probe(struct i2c_client *i2c,
wm9081->control_type = SND_SOC_I2C;
wm9081->control_data = i2c;
+ if (dev_get_platdata(&i2c->dev))
+ memcpy(&wm9081->pdata, dev_get_platdata(&i2c->dev),
+ sizeof(wm9081->pdata));
+
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_wm9081, &wm9081_dai, 1);
if (ret < 0)
@@ -1363,7 +1377,7 @@ MODULE_DEVICE_TABLE(i2c, wm9081_i2c_id);
static struct i2c_driver wm9081_i2c_driver = {
.driver = {
- .name = "wm9081-codec",
+ .name = "wm9081",
.owner = THIS_MODULE,
},
.probe = wm9081_i2c_probe,
diff --git a/sound/soc/codecs/wm9090.c b/sound/soc/codecs/wm9090.c
index a788c4297046..4de12203e611 100644
--- a/sound/soc/codecs/wm9090.c
+++ b/sound/soc/codecs/wm9090.c
@@ -144,7 +144,7 @@ struct wm9090_priv {
void *control_data;
};
-static int wm9090_volatile(unsigned int reg)
+static int wm9090_volatile(struct snd_soc_codec *codec, unsigned int reg)
{
switch (reg) {
case WM9090_SOFTWARE_RESET:
@@ -518,7 +518,7 @@ static int wm9090_set_bias_level(struct snd_soc_codec *codec,
for (i = 1; i < codec->driver->reg_cache_size; i++) {
if (reg_cache[i] == wm9090_reg_defaults[i])
continue;
- if (wm9090_volatile(i))
+ if (wm9090_volatile(codec, i))
continue;
ret = snd_soc_write(codec, i, reg_cache[i]);
@@ -551,7 +551,6 @@ static int wm9090_set_bias_level(struct snd_soc_codec *codec,
static int wm9090_probe(struct snd_soc_codec *codec)
{
struct wm9090_priv *wm9090 = snd_soc_codec_get_drvdata(codec);
- u16 *reg_cache = codec->reg_cache;
int ret;
codec->control_data = wm9090->control_data;
@@ -576,22 +575,30 @@ static int wm9090_probe(struct snd_soc_codec *codec)
/* Configure some defaults; they will be written out when we
* bring the bias up.
*/
- reg_cache[WM9090_IN1_LINE_INPUT_A_VOLUME] |= WM9090_IN1_VU
- | WM9090_IN1A_ZC;
- reg_cache[WM9090_IN1_LINE_INPUT_B_VOLUME] |= WM9090_IN1_VU
- | WM9090_IN1B_ZC;
- reg_cache[WM9090_IN2_LINE_INPUT_A_VOLUME] |= WM9090_IN2_VU
- | WM9090_IN2A_ZC;
- reg_cache[WM9090_IN2_LINE_INPUT_B_VOLUME] |= WM9090_IN2_VU
- | WM9090_IN2B_ZC;
- reg_cache[WM9090_SPEAKER_VOLUME_LEFT] |=
- WM9090_SPKOUT_VU | WM9090_SPKOUTL_ZC;
- reg_cache[WM9090_LEFT_OUTPUT_VOLUME] |=
- WM9090_HPOUT1_VU | WM9090_HPOUT1L_ZC;
- reg_cache[WM9090_RIGHT_OUTPUT_VOLUME] |=
- WM9090_HPOUT1_VU | WM9090_HPOUT1R_ZC;
-
- reg_cache[WM9090_CLOCKING_1] |= WM9090_TOCLK_ENA;
+ snd_soc_update_bits(codec, WM9090_IN1_LINE_INPUT_A_VOLUME,
+ WM9090_IN1_VU | WM9090_IN1A_ZC,
+ WM9090_IN1_VU | WM9090_IN1A_ZC);
+ snd_soc_update_bits(codec, WM9090_IN1_LINE_INPUT_B_VOLUME,
+ WM9090_IN1_VU | WM9090_IN1B_ZC,
+ WM9090_IN1_VU | WM9090_IN1B_ZC);
+ snd_soc_update_bits(codec, WM9090_IN2_LINE_INPUT_A_VOLUME,
+ WM9090_IN2_VU | WM9090_IN2A_ZC,
+ WM9090_IN2_VU | WM9090_IN2A_ZC);
+ snd_soc_update_bits(codec, WM9090_IN2_LINE_INPUT_B_VOLUME,
+ WM9090_IN2_VU | WM9090_IN2B_ZC,
+ WM9090_IN2_VU | WM9090_IN2B_ZC);
+ snd_soc_update_bits(codec, WM9090_SPEAKER_VOLUME_LEFT,
+ WM9090_SPKOUT_VU | WM9090_SPKOUTL_ZC,
+ WM9090_SPKOUT_VU | WM9090_SPKOUTL_ZC);
+ snd_soc_update_bits(codec, WM9090_LEFT_OUTPUT_VOLUME,
+ WM9090_HPOUT1_VU | WM9090_HPOUT1L_ZC,
+ WM9090_HPOUT1_VU | WM9090_HPOUT1L_ZC);
+ snd_soc_update_bits(codec, WM9090_RIGHT_OUTPUT_VOLUME,
+ WM9090_HPOUT1_VU | WM9090_HPOUT1R_ZC,
+ WM9090_HPOUT1_VU | WM9090_HPOUT1R_ZC);
+
+ snd_soc_update_bits(codec, WM9090_CLOCKING_1,
+ WM9090_TOCLK_ENA, WM9090_TOCLK_ENA);
wm9090_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index 613df5db0b32..7b6b3c18e299 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -82,7 +82,8 @@ static void wait_for_dc_servo(struct snd_soc_codec *codec, unsigned int op)
} while (reg & op && count < 400);
if (reg & op)
- dev_err(codec->dev, "Timed out waiting for DC Servo\n");
+ dev_err(codec->dev, "Timed out waiting for DC Servo %x\n",
+ op);
}
/*
@@ -674,6 +675,9 @@ SND_SOC_DAPM_OUTPUT("LINEOUT2N"),
};
static const struct snd_soc_dapm_route analogue_routes[] = {
+ { "MICBIAS1", NULL, "CLK_SYS" },
+ { "MICBIAS2", NULL, "CLK_SYS" },
+
{ "IN1L PGA", "IN1LP Switch", "IN1LP" },
{ "IN1L PGA", "IN1LN Switch", "IN1LN" },
diff --git a/sound/soc/davinci/davinci-i2s.c b/sound/soc/davinci/davinci-i2s.c
index 9e0e565e6ed9..d0d60b8a54d4 100644
--- a/sound/soc/davinci/davinci-i2s.c
+++ b/sound/soc/davinci/davinci-i2s.c
@@ -658,7 +658,7 @@ static int davinci_i2s_probe(struct platform_device *pdev)
return -ENODEV;
}
- ioarea = request_mem_region(mem->start, (mem->end - mem->start) + 1,
+ ioarea = request_mem_region(mem->start, resource_size(mem),
pdev->name);
if (!ioarea) {
dev_err(&pdev->dev, "McBSP region already claimed\n");
@@ -694,20 +694,25 @@ static int davinci_i2s_probe(struct platform_device *pdev)
}
clk_enable(dev->clk);
- dev->base = (void __iomem *)IO_ADDRESS(mem->start);
+ dev->base = ioremap(mem->start, resource_size(mem));
+ if (!dev->base) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_release_clk;
+ }
dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].dma_addr =
- (dma_addr_t)(io_v2p(dev->base) + DAVINCI_MCBSP_DXR_REG);
+ (dma_addr_t)(mem->start + DAVINCI_MCBSP_DXR_REG);
dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].dma_addr =
- (dma_addr_t)(io_v2p(dev->base) + DAVINCI_MCBSP_DRR_REG);
+ (dma_addr_t)(mem->start + DAVINCI_MCBSP_DRR_REG);
/* first TX, then RX */
res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (!res) {
dev_err(&pdev->dev, "no DMA resource\n");
ret = -ENXIO;
- goto err_free_mem;
+ goto err_iounmap;
}
dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].channel = res->start;
@@ -715,7 +720,7 @@ static int davinci_i2s_probe(struct platform_device *pdev)
if (!res) {
dev_err(&pdev->dev, "no DMA resource\n");
ret = -ENXIO;
- goto err_free_mem;
+ goto err_iounmap;
}
dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].channel = res->start;
dev->dev = &pdev->dev;
@@ -724,14 +729,19 @@ static int davinci_i2s_probe(struct platform_device *pdev)
ret = snd_soc_register_dai(&pdev->dev, &davinci_i2s_dai);
if (ret != 0)
- goto err_free_mem;
+ goto err_iounmap;
return 0;
+err_iounmap:
+ iounmap(dev->base);
+err_release_clk:
+ clk_disable(dev->clk);
+ clk_put(dev->clk);
err_free_mem:
kfree(dev);
err_release_region:
- release_mem_region(mem->start, (mem->end - mem->start) + 1);
+ release_mem_region(mem->start, resource_size(mem));
return ret;
}
@@ -747,7 +757,7 @@ static int davinci_i2s_remove(struct platform_device *pdev)
dev->clk = NULL;
kfree(dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(mem->start, (mem->end - mem->start) + 1);
+ release_mem_region(mem->start, resource_size(mem));
return 0;
}
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index fb55d2c5d704..a5af834c8ef5 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -868,7 +868,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
}
ioarea = request_mem_region(mem->start,
- (mem->end - mem->start) + 1, pdev->name);
+ resource_size(mem), pdev->name);
if (!ioarea) {
dev_err(&pdev->dev, "Audio region already claimed\n");
ret = -EBUSY;
@@ -885,7 +885,13 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
clk_enable(dev->clk);
dev->clk_active = 1;
- dev->base = (void __iomem *)IO_ADDRESS(mem->start);
+ dev->base = ioremap(mem->start, resource_size(mem));
+ if (!dev->base) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_release_clk;
+ }
+
dev->op_mode = pdata->op_mode;
dev->tdm_slots = pdata->tdm_slots;
dev->num_serializer = pdata->num_serializer;
@@ -899,14 +905,14 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
dma_data->asp_chan_q = pdata->asp_chan_q;
dma_data->ram_chan_q = pdata->ram_chan_q;
dma_data->dma_addr = (dma_addr_t) (pdata->tx_dma_offset +
- io_v2p(dev->base));
+ mem->start);
/* first TX, then RX */
res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (!res) {
dev_err(&pdev->dev, "no DMA resource\n");
ret = -ENODEV;
- goto err_release_region;
+ goto err_iounmap;
}
dma_data->channel = res->start;
@@ -915,13 +921,13 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
dma_data->asp_chan_q = pdata->asp_chan_q;
dma_data->ram_chan_q = pdata->ram_chan_q;
dma_data->dma_addr = (dma_addr_t)(pdata->rx_dma_offset +
- io_v2p(dev->base));
+ mem->start);
res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
if (!res) {
dev_err(&pdev->dev, "no DMA resource\n");
ret = -ENODEV;
- goto err_release_region;
+ goto err_iounmap;
}
dma_data->channel = res->start;
@@ -929,11 +935,16 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
ret = snd_soc_register_dai(&pdev->dev, &davinci_mcasp_dai[pdata->op_mode]);
if (ret != 0)
- goto err_release_region;
+ goto err_iounmap;
return 0;
+err_iounmap:
+ iounmap(dev->base);
+err_release_clk:
+ clk_disable(dev->clk);
+ clk_put(dev->clk);
err_release_region:
- release_mem_region(mem->start, (mem->end - mem->start) + 1);
+ release_mem_region(mem->start, resource_size(mem));
err_release_data:
kfree(dev);
@@ -951,7 +962,7 @@ static int davinci_mcasp_remove(struct platform_device *pdev)
dev->clk = NULL;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(mem->start, (mem->end - mem->start) + 1);
+ release_mem_region(mem->start, resource_size(mem));
kfree(dev);
diff --git a/sound/soc/davinci/davinci-vcif.c b/sound/soc/davinci/davinci-vcif.c
index 9d2afccc3a2d..13e05a302a92 100644
--- a/sound/soc/davinci/davinci-vcif.c
+++ b/sound/soc/davinci/davinci-vcif.c
@@ -205,7 +205,7 @@ static struct snd_soc_dai_driver davinci_vcif_dai = {
static int davinci_vcif_probe(struct platform_device *pdev)
{
- struct davinci_vc *davinci_vc = platform_get_drvdata(pdev);
+ struct davinci_vc *davinci_vc = mfd_get_data(pdev);
struct davinci_vcif_dev *davinci_vcif_dev;
int ret;
diff --git a/sound/soc/ep93xx/Kconfig b/sound/soc/ep93xx/Kconfig
index 57429041189c..91a28de94109 100644
--- a/sound/soc/ep93xx/Kconfig
+++ b/sound/soc/ep93xx/Kconfig
@@ -30,3 +30,12 @@ config SND_EP93XX_SOC_SIMONE
help
Say Y or M here if you want to add support for AC97 audio on the
Simplemachines Sim.One board.
+
+config SND_EP93XX_SOC_EDB93XX
+ tristate "SoC Audio support for Cirrus Logic EDB93xx boards"
+ depends on SND_EP93XX_SOC && (MACH_EDB9301 || MACH_EDB9302 || MACH_EDB9302A || MACH_EDB9307A || MACH_EDB9315A)
+ select SND_EP93XX_SOC_I2S
+ select SND_SOC_CS4271
+ help
+ Say Y or M here if you want to add support for I2S audio on the
+ Cirrus Logic EDB93xx boards.
diff --git a/sound/soc/ep93xx/Makefile b/sound/soc/ep93xx/Makefile
index 8e7977fb6b7d..5514146cbdf0 100644
--- a/sound/soc/ep93xx/Makefile
+++ b/sound/soc/ep93xx/Makefile
@@ -10,6 +10,8 @@ obj-$(CONFIG_SND_EP93XX_SOC_AC97) += snd-soc-ep93xx-ac97.o
# EP93XX Machine Support
snd-soc-snappercl15-objs := snappercl15.o
snd-soc-simone-objs := simone.o
+snd-soc-edb93xx-objs := edb93xx.o
obj-$(CONFIG_SND_EP93XX_SOC_SNAPPERCL15) += snd-soc-snappercl15.o
obj-$(CONFIG_SND_EP93XX_SOC_SIMONE) += snd-soc-simone.o
+obj-$(CONFIG_SND_EP93XX_SOC_EDB93XX) += snd-soc-edb93xx.o
diff --git a/sound/soc/ep93xx/edb93xx.c b/sound/soc/ep93xx/edb93xx.c
new file mode 100644
index 000000000000..b270085227f3
--- /dev/null
+++ b/sound/soc/ep93xx/edb93xx.c
@@ -0,0 +1,142 @@
+/*
+ * SoC audio for EDB93xx
+ *
+ * Copyright (c) 2010 Alexander Sverdlin <subaparts@yandex.ru>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This driver support CS4271 codec being master or slave, working
+ * in control port mode, connected either via SPI or I2C.
+ * The data format accepted is I2S or left-justified.
+ * DAPM support not implemented.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <asm/mach-types.h>
+#include <mach/hardware.h>
+#include "ep93xx-pcm.h"
+
+#define edb93xx_has_audio() (machine_is_edb9301() || \
+ machine_is_edb9302() || \
+ machine_is_edb9302a() || \
+ machine_is_edb9307a() || \
+ machine_is_edb9315a())
+
+static int edb93xx_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ int err;
+ unsigned int rate = params_rate(params);
+ /*
+ * We set LRCLK equal to `rate' and SCLK = LRCLK * 64,
+ * because our sample size is 32 bit * 2 channels.
+ * I2S standard permits us to transmit more bits than
+ * the codec uses.
+ * MCLK = SCLK * 4 is the best recommended value,
+ * but we have to fall back to ratio 2 for higher
+ * sample rates.
+ */
+ unsigned int mclk_rate = rate * 64 * ((rate <= 48000) ? 4 : 2);
+
+ err = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_IF |
+ SND_SOC_DAIFMT_CBS_CFS);
+ if (err)
+ return err;
+
+ err = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_IF |
+ SND_SOC_DAIFMT_CBS_CFS);
+ if (err)
+ return err;
+
+ err = snd_soc_dai_set_sysclk(codec_dai, 0, mclk_rate,
+ SND_SOC_CLOCK_IN);
+ if (err)
+ return err;
+
+ return snd_soc_dai_set_sysclk(cpu_dai, 0, mclk_rate,
+ SND_SOC_CLOCK_OUT);
+}
+
+static struct snd_soc_ops edb93xx_ops = {
+ .hw_params = edb93xx_hw_params,
+};
+
+static struct snd_soc_dai_link edb93xx_dai = {
+ .name = "CS4271",
+ .stream_name = "CS4271 HiFi",
+ .platform_name = "ep93xx-pcm-audio",
+ .cpu_dai_name = "ep93xx-i2s",
+ .codec_name = "spi0.0",
+ .codec_dai_name = "cs4271-hifi",
+ .ops = &edb93xx_ops,
+};
+
+static struct snd_soc_card snd_soc_edb93xx = {
+ .name = "EDB93XX",
+ .dai_link = &edb93xx_dai,
+ .num_links = 1,
+};
+
+static struct platform_device *edb93xx_snd_device;
+
+static int __init edb93xx_init(void)
+{
+ int ret;
+
+ if (!edb93xx_has_audio())
+ return -ENODEV;
+
+ ret = ep93xx_i2s_acquire(EP93XX_SYSCON_DEVCFG_I2SONAC97,
+ EP93XX_SYSCON_I2SCLKDIV_ORIDE |
+ EP93XX_SYSCON_I2SCLKDIV_SPOL);
+ if (ret)
+ return ret;
+
+ edb93xx_snd_device = platform_device_alloc("soc-audio", -1);
+ if (!edb93xx_snd_device) {
+ ret = -ENOMEM;
+ goto free_i2s;
+ }
+
+ platform_set_drvdata(edb93xx_snd_device, &snd_soc_edb93xx);
+ ret = platform_device_add(edb93xx_snd_device);
+ if (ret)
+ goto device_put;
+
+ return 0;
+
+device_put:
+ platform_device_put(edb93xx_snd_device);
+free_i2s:
+ ep93xx_i2s_release();
+ return ret;
+}
+module_init(edb93xx_init);
+
+static void __exit edb93xx_exit(void)
+{
+ platform_device_unregister(edb93xx_snd_device);
+ ep93xx_i2s_release();
+}
+module_exit(edb93xx_exit);
+
+MODULE_AUTHOR("Alexander Sverdlin <subaparts@yandex.ru>");
+MODULE_DESCRIPTION("ALSA SoC EDB93xx");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/ep93xx/ep93xx-ac97.c b/sound/soc/ep93xx/ep93xx-ac97.c
index 68a0bae1208a..104e95cda0ad 100644
--- a/sound/soc/ep93xx/ep93xx-ac97.c
+++ b/sound/soc/ep93xx/ep93xx-ac97.c
@@ -253,7 +253,6 @@ static int ep93xx_ac97_trigger(struct snd_pcm_substream *substream,
struct ep93xx_ac97_info *info = snd_soc_dai_get_drvdata(dai);
unsigned v = 0;
-
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
diff --git a/sound/soc/fsl/fsl_dma.c b/sound/soc/fsl/fsl_dma.c
index 4cf98c03af22..15dac0f20cd8 100644
--- a/sound/soc/fsl/fsl_dma.c
+++ b/sound/soc/fsl/fsl_dma.c
@@ -896,8 +896,7 @@ static struct snd_pcm_ops fsl_dma_ops = {
.pointer = fsl_dma_pointer,
};
-static int __devinit fsl_soc_dma_probe(struct platform_device *pdev,
- const struct of_device_id *match)
+static int __devinit fsl_soc_dma_probe(struct platform_device *pdev)
{
struct dma_object *dma;
struct device_node *np = pdev->dev.of_node;
@@ -979,7 +978,7 @@ static const struct of_device_id fsl_soc_dma_ids[] = {
};
MODULE_DEVICE_TABLE(of, fsl_soc_dma_ids);
-static struct of_platform_driver fsl_soc_dma_driver = {
+static struct platform_driver fsl_soc_dma_driver = {
.driver = {
.name = "fsl-pcm-audio",
.owner = THIS_MODULE,
@@ -993,12 +992,12 @@ static int __init fsl_soc_dma_init(void)
{
pr_info("Freescale Elo DMA ASoC PCM Driver\n");
- return of_register_platform_driver(&fsl_soc_dma_driver);
+ return platform_driver_register(&fsl_soc_dma_driver);
}
static void __exit fsl_soc_dma_exit(void)
{
- of_unregister_platform_driver(&fsl_soc_dma_driver);
+ platform_driver_unregister(&fsl_soc_dma_driver);
}
module_init(fsl_soc_dma_init);
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 4cc167a7aeb8..313e0ccedd5b 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -624,8 +624,7 @@ static void make_lowercase(char *s)
}
}
-static int __devinit fsl_ssi_probe(struct platform_device *pdev,
- const struct of_device_id *match)
+static int __devinit fsl_ssi_probe(struct platform_device *pdev)
{
struct fsl_ssi_private *ssi_private;
int ret = 0;
@@ -774,7 +773,7 @@ static const struct of_device_id fsl_ssi_ids[] = {
};
MODULE_DEVICE_TABLE(of, fsl_ssi_ids);
-static struct of_platform_driver fsl_ssi_driver = {
+static struct platform_driver fsl_ssi_driver = {
.driver = {
.name = "fsl-ssi-dai",
.owner = THIS_MODULE,
@@ -788,12 +787,12 @@ static int __init fsl_ssi_init(void)
{
printk(KERN_INFO "Freescale Synchronous Serial Interface (SSI) ASoC Driver\n");
- return of_register_platform_driver(&fsl_ssi_driver);
+ return platform_driver_register(&fsl_ssi_driver);
}
static void __exit fsl_ssi_exit(void)
{
- of_unregister_platform_driver(&fsl_ssi_driver);
+ platform_driver_unregister(&fsl_ssi_driver);
}
module_init(fsl_ssi_init);
diff --git a/sound/soc/fsl/mpc5200_dma.c b/sound/soc/fsl/mpc5200_dma.c
index f92dca07cd35..fff695ccdd3e 100644
--- a/sound/soc/fsl/mpc5200_dma.c
+++ b/sound/soc/fsl/mpc5200_dma.c
@@ -368,8 +368,7 @@ static struct snd_soc_platform_driver mpc5200_audio_dma_platform = {
.pcm_free = &psc_dma_free,
};
-static int mpc5200_hpcd_probe(struct of_device *op,
- const struct of_device_id *match)
+static int mpc5200_hpcd_probe(struct of_device *op)
{
phys_addr_t fifo;
struct psc_dma *psc_dma;
@@ -511,32 +510,31 @@ static int mpc5200_hpcd_remove(struct of_device *op)
}
static struct of_device_id mpc5200_hpcd_match[] = {
- {
- .compatible = "fsl,mpc5200-pcm",
- },
+ { .compatible = "fsl,mpc5200-pcm", },
{}
};
MODULE_DEVICE_TABLE(of, mpc5200_hpcd_match);
-static struct of_platform_driver mpc5200_hpcd_of_driver = {
- .owner = THIS_MODULE,
- .name = "mpc5200-pcm-audio",
- .match_table = mpc5200_hpcd_match,
+static struct platform_driver mpc5200_hpcd_of_driver = {
.probe = mpc5200_hpcd_probe,
.remove = mpc5200_hpcd_remove,
+ .dev = {
+ .owner = THIS_MODULE,
+ .name = "mpc5200-pcm-audio",
+ .of_match_table = mpc5200_hpcd_match,
+ }
};
static int __init mpc5200_hpcd_init(void)
{
- return of_register_platform_driver(&mpc5200_hpcd_of_driver);
+ return platform_driver_register(&mpc5200_hpcd_of_driver);
}
+module_init(mpc5200_hpcd_init);
static void __exit mpc5200_hpcd_exit(void)
{
- of_unregister_platform_driver(&mpc5200_hpcd_of_driver);
+ platform_driver_unregister(&mpc5200_hpcd_of_driver);
}
-
-module_init(mpc5200_hpcd_init);
module_exit(mpc5200_hpcd_exit);
MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
diff --git a/sound/soc/fsl/mpc5200_psc_ac97.c b/sound/soc/fsl/mpc5200_psc_ac97.c
index 40acc8e2b1ca..ad36b095bb79 100644
--- a/sound/soc/fsl/mpc5200_psc_ac97.c
+++ b/sound/soc/fsl/mpc5200_psc_ac97.c
@@ -272,8 +272,7 @@ static struct snd_soc_dai_driver psc_ac97_dai[] = {
* - Probe/remove operations
* - OF device match table
*/
-static int __devinit psc_ac97_of_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit psc_ac97_of_probe(struct platform_device *op)
{
int rc;
struct snd_ac97 ac97;
@@ -316,7 +315,7 @@ static struct of_device_id psc_ac97_match[] __devinitdata = {
};
MODULE_DEVICE_TABLE(of, psc_ac97_match);
-static struct of_platform_driver psc_ac97_driver = {
+static struct platform_driver psc_ac97_driver = {
.probe = psc_ac97_of_probe,
.remove = __devexit_p(psc_ac97_of_remove),
.driver = {
@@ -332,13 +331,13 @@ static struct of_platform_driver psc_ac97_driver = {
*/
static int __init psc_ac97_init(void)
{
- return of_register_platform_driver(&psc_ac97_driver);
+ return platform_driver_register(&psc_ac97_driver);
}
module_init(psc_ac97_init);
static void __exit psc_ac97_exit(void)
{
- of_unregister_platform_driver(&psc_ac97_driver);
+ platform_driver_unregister(&psc_ac97_driver);
}
module_exit(psc_ac97_exit);
diff --git a/sound/soc/fsl/mpc5200_psc_i2s.c b/sound/soc/fsl/mpc5200_psc_i2s.c
index 9018fa5bf0db..87cf2a5c2b2c 100644
--- a/sound/soc/fsl/mpc5200_psc_i2s.c
+++ b/sound/soc/fsl/mpc5200_psc_i2s.c
@@ -150,8 +150,7 @@ static struct snd_soc_dai_driver psc_i2s_dai[] = {{
* - Probe/remove operations
* - OF device match table
*/
-static int __devinit psc_i2s_of_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit psc_i2s_of_probe(struct platform_device *op)
{
int rc;
struct psc_dma *psc_dma;
@@ -213,7 +212,7 @@ static struct of_device_id psc_i2s_match[] __devinitdata = {
};
MODULE_DEVICE_TABLE(of, psc_i2s_match);
-static struct of_platform_driver psc_i2s_driver = {
+static struct platform_driver psc_i2s_driver = {
.probe = psc_i2s_of_probe,
.remove = __devexit_p(psc_i2s_of_remove),
.driver = {
@@ -229,13 +228,13 @@ static struct of_platform_driver psc_i2s_driver = {
*/
static int __init psc_i2s_init(void)
{
- return of_register_platform_driver(&psc_i2s_driver);
+ return platform_driver_register(&psc_i2s_driver);
}
module_init(psc_i2s_init);
static void __exit psc_i2s_exit(void)
{
- of_unregister_platform_driver(&psc_i2s_driver);
+ platform_driver_unregister(&psc_i2s_driver);
}
module_exit(psc_i2s_exit);
diff --git a/sound/soc/fsl/mpc8610_hpcd.c b/sound/soc/fsl/mpc8610_hpcd.c
index 7d7847a1e66b..c16c6b2eff95 100644
--- a/sound/soc/fsl/mpc8610_hpcd.c
+++ b/sound/soc/fsl/mpc8610_hpcd.c
@@ -53,9 +53,8 @@ struct mpc8610_hpcd_data {
*
* Here we program the DMACR and PMUXCR registers.
*/
-static int mpc8610_hpcd_machine_probe(struct platform_device *sound_device)
+static int mpc8610_hpcd_machine_probe(struct snd_soc_card *card)
{
- struct snd_soc_card *card = platform_get_drvdata(sound_device);
struct mpc8610_hpcd_data *machine_data =
container_of(card, struct mpc8610_hpcd_data, card);
struct ccsr_guts_86xx __iomem *guts;
@@ -138,9 +137,8 @@ static int mpc8610_hpcd_startup(struct snd_pcm_substream *substream)
* This function is called to remove the sound device for one SSI. We
* de-program the DMACR and PMUXCR register.
*/
-static int mpc8610_hpcd_machine_remove(struct platform_device *sound_device)
+static int mpc8610_hpcd_machine_remove(struct snd_soc_card *card)
{
- struct snd_soc_card *card = platform_get_drvdata(sound_device);
struct mpc8610_hpcd_data *machine_data =
container_of(card, struct mpc8610_hpcd_data, card);
struct ccsr_guts_86xx __iomem *guts;
diff --git a/sound/soc/fsl/p1022_ds.c b/sound/soc/fsl/p1022_ds.c
index 026b756961e0..66e0b68af147 100644
--- a/sound/soc/fsl/p1022_ds.c
+++ b/sound/soc/fsl/p1022_ds.c
@@ -85,9 +85,8 @@ struct machine_data {
*
* Here we program the DMACR and PMUXCR registers.
*/
-static int p1022_ds_machine_probe(struct platform_device *sound_device)
+static int p1022_ds_machine_probe(struct snd_soc_card *card)
{
- struct snd_soc_card *card = platform_get_drvdata(sound_device);
struct machine_data *mdata =
container_of(card, struct machine_data, card);
struct ccsr_guts_85xx __iomem *guts;
@@ -160,9 +159,8 @@ static int p1022_ds_startup(struct snd_pcm_substream *substream)
* This function is called to remove the sound device for one SSI. We
* de-program the DMACR and PMUXCR register.
*/
-static int p1022_ds_machine_remove(struct platform_device *sound_device)
+static int p1022_ds_machine_remove(struct snd_soc_card *card)
{
- struct snd_soc_card *card = platform_get_drvdata(sound_device);
struct machine_data *mdata =
container_of(card, struct machine_data, card);
struct ccsr_guts_85xx __iomem *guts;
diff --git a/sound/soc/imx/Kconfig b/sound/soc/imx/Kconfig
index 642270a635ea..9eeb8f0d67e9 100644
--- a/sound/soc/imx/Kconfig
+++ b/sound/soc/imx/Kconfig
@@ -44,7 +44,8 @@ config SND_SOC_EUKREA_TLV320
tristate "Eukrea TLV320"
depends on MACH_EUKREA_MBIMX27_BASEBOARD \
|| MACH_EUKREA_MBIMXSD25_BASEBOARD \
- || MACH_EUKREA_MBIMXSD35_BASEBOARD
+ || MACH_EUKREA_MBIMXSD35_BASEBOARD \
+ || MACH_EUKREA_MBIMXSD51_BASEBOARD
select SND_SOC_TLV320AIC23
select SND_MXC_SOC_SSI
select SND_MXC_SOC_FIQ
diff --git a/sound/soc/imx/eukrea-tlv320.c b/sound/soc/imx/eukrea-tlv320.c
index e20c9e1457c0..75fb4b83548b 100644
--- a/sound/soc/imx/eukrea-tlv320.c
+++ b/sound/soc/imx/eukrea-tlv320.c
@@ -79,7 +79,7 @@ static struct snd_soc_dai_link eukrea_tlv320_dai = {
.name = "tlv320aic23",
.stream_name = "TLV320AIC23",
.codec_dai_name = "tlv320aic23-hifi",
- .platform_name = "imx-pcm-audio.0",
+ .platform_name = "imx-fiq-pcm-audio.0",
.codec_name = "tlv320aic23-codec.0-001a",
.cpu_dai_name = "imx-ssi.0",
.ops = &eukrea_tlv320_snd_ops,
@@ -98,7 +98,8 @@ static int __init eukrea_tlv320_init(void)
int ret;
if (!machine_is_eukrea_cpuimx27() && !machine_is_eukrea_cpuimx25sd()
- && !machine_is_eukrea_cpuimx35sd())
+ && !machine_is_eukrea_cpuimx35sd()
+ && !machine_is_eukrea_cpuimx51sd())
/* return happy. We might run on a totally different machine */
return 0;
diff --git a/sound/soc/imx/imx-ssi.c b/sound/soc/imx/imx-ssi.c
index 30894ea7f333..bc92ec620004 100644
--- a/sound/soc/imx/imx-ssi.c
+++ b/sound/soc/imx/imx-ssi.c
@@ -108,7 +108,7 @@ static int imx_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
break;
case SND_SOC_DAIFMT_DSP_B:
/* data on rising edge of bclk, frame high with data */
- strcr |= SSI_STCR_TFSL;
+ strcr |= SSI_STCR_TFSL | SSI_STCR_TXBIT0;
break;
case SND_SOC_DAIFMT_DSP_A:
/* data on rising edge of bclk, frame high 1clk before data */
@@ -656,6 +656,9 @@ static int imx_ssi_probe(struct platform_device *pdev)
ssi->dma_params_rx.dma_addr = res->start + SSI_SRX0;
ssi->dma_params_tx.dma_addr = res->start + SSI_STX0;
+ ssi->dma_params_tx.burstsize = 4;
+ ssi->dma_params_rx.burstsize = 4;
+
res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx0");
if (res)
ssi->dma_params_tx.dma = res->start;
diff --git a/sound/soc/mid-x86/Kconfig b/sound/soc/mid-x86/Kconfig
new file mode 100644
index 000000000000..29350428f1c2
--- /dev/null
+++ b/sound/soc/mid-x86/Kconfig
@@ -0,0 +1,14 @@
+config SND_MFLD_MACHINE
+ tristate "SOC Machine Audio driver for Intel Medfield MID platform"
+ depends on INTEL_SCU_IPC
+ depends on SND_INTEL_SST
+ select SND_SOC_SN95031
+ select SND_SST_PLATFORM
+ help
+ This adds support for ASoC machine driver for Intel(R) MID Medfield platform
+ used as alsa device in audio substem in Intel(R) MID devices
+ Say Y if you have such a device
+ If unsure select "N".
+
+config SND_SST_PLATFORM
+ tristate
diff --git a/sound/soc/mid-x86/Makefile b/sound/soc/mid-x86/Makefile
new file mode 100644
index 000000000000..639883339465
--- /dev/null
+++ b/sound/soc/mid-x86/Makefile
@@ -0,0 +1,5 @@
+snd-soc-sst-platform-objs := sst_platform.o
+snd-soc-mfld-machine-objs := mfld_machine.o
+
+obj-$(CONFIG_SND_SST_PLATFORM) += snd-soc-sst-platform.o
+obj-$(CONFIG_SND_MFLD_MACHINE) += snd-soc-mfld-machine.o
diff --git a/sound/soc/mid-x86/mfld_machine.c b/sound/soc/mid-x86/mfld_machine.c
new file mode 100644
index 000000000000..429aa1be2cff
--- /dev/null
+++ b/sound/soc/mid-x86/mfld_machine.c
@@ -0,0 +1,452 @@
+/*
+ * mfld_machine.c - ASoc Machine driver for Intel Medfield MID platform
+ *
+ * Copyright (C) 2010 Intel Corp
+ * Author: Vinod Koul <vinod.koul@intel.com>
+ * Author: Harsha Priya <priya.harsha@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include "../codecs/sn95031.h"
+
+#define MID_MONO 1
+#define MID_STEREO 2
+#define MID_MAX_CAP 5
+#define MFLD_JACK_INSERT 0x04
+
+enum soc_mic_bias_zones {
+ MFLD_MV_START = 0,
+ /* mic bias volutage range for Headphones*/
+ MFLD_MV_HP = 400,
+ /* mic bias volutage range for American Headset*/
+ MFLD_MV_AM_HS = 650,
+ /* mic bias volutage range for Headset*/
+ MFLD_MV_HS = 2000,
+ MFLD_MV_UNDEFINED,
+};
+
+static unsigned int hs_switch;
+static unsigned int lo_dac;
+
+struct mfld_mc_private {
+ struct platform_device *socdev;
+ void __iomem *int_base;
+ struct snd_soc_codec *codec;
+ u8 interrupt_status;
+};
+
+struct snd_soc_jack mfld_jack;
+
+/*Headset jack detection DAPM pins */
+static struct snd_soc_jack_pin mfld_jack_pins[] = {
+ {
+ .pin = "Headphones",
+ .mask = SND_JACK_HEADPHONE,
+ },
+ {
+ .pin = "AMIC1",
+ .mask = SND_JACK_MICROPHONE,
+ },
+};
+
+/* jack detection voltage zones */
+static struct snd_soc_jack_zone mfld_zones[] = {
+ {MFLD_MV_START, MFLD_MV_AM_HS, SND_JACK_HEADPHONE},
+ {MFLD_MV_AM_HS, MFLD_MV_HS, SND_JACK_HEADSET},
+};
+
+/* sound card controls */
+static const char *headset_switch_text[] = {"Earpiece", "Headset"};
+
+static const char *lo_text[] = {"Vibra", "Headset", "IHF", "None"};
+
+static const struct soc_enum headset_enum =
+ SOC_ENUM_SINGLE_EXT(2, headset_switch_text);
+
+static const struct soc_enum lo_enum =
+ SOC_ENUM_SINGLE_EXT(4, lo_text);
+
+static int headset_get_switch(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = hs_switch;
+ return 0;
+}
+
+static int headset_set_switch(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+
+ if (ucontrol->value.integer.value[0] == hs_switch)
+ return 0;
+
+ if (ucontrol->value.integer.value[0]) {
+ pr_debug("hs_set HS path\n");
+ snd_soc_dapm_enable_pin(&codec->dapm, "Headphones");
+ snd_soc_dapm_disable_pin(&codec->dapm, "EPOUT");
+ } else {
+ pr_debug("hs_set EP path\n");
+ snd_soc_dapm_disable_pin(&codec->dapm, "Headphones");
+ snd_soc_dapm_enable_pin(&codec->dapm, "EPOUT");
+ }
+ snd_soc_dapm_sync(&codec->dapm);
+ hs_switch = ucontrol->value.integer.value[0];
+
+ return 0;
+}
+
+static void lo_enable_out_pins(struct snd_soc_codec *codec)
+{
+ snd_soc_dapm_enable_pin(&codec->dapm, "IHFOUTL");
+ snd_soc_dapm_enable_pin(&codec->dapm, "IHFOUTR");
+ snd_soc_dapm_enable_pin(&codec->dapm, "LINEOUTL");
+ snd_soc_dapm_enable_pin(&codec->dapm, "LINEOUTR");
+ snd_soc_dapm_enable_pin(&codec->dapm, "VIB1OUT");
+ snd_soc_dapm_enable_pin(&codec->dapm, "VIB2OUT");
+ if (hs_switch) {
+ snd_soc_dapm_enable_pin(&codec->dapm, "Headphones");
+ snd_soc_dapm_disable_pin(&codec->dapm, "EPOUT");
+ } else {
+ snd_soc_dapm_disable_pin(&codec->dapm, "Headphones");
+ snd_soc_dapm_enable_pin(&codec->dapm, "EPOUT");
+ }
+}
+
+static int lo_get_switch(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = lo_dac;
+ return 0;
+}
+
+static int lo_set_switch(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+
+ if (ucontrol->value.integer.value[0] == lo_dac)
+ return 0;
+
+ /* we dont want to work with last state of lineout so just enable all
+ * pins and then disable pins not required
+ */
+ lo_enable_out_pins(codec);
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ pr_debug("set vibra path\n");
+ snd_soc_dapm_disable_pin(&codec->dapm, "VIB1OUT");
+ snd_soc_dapm_disable_pin(&codec->dapm, "VIB2OUT");
+ snd_soc_update_bits(codec, SN95031_LOCTL, 0x66, 0);
+ break;
+
+ case 1:
+ pr_debug("set hs path\n");
+ snd_soc_dapm_disable_pin(&codec->dapm, "Headphones");
+ snd_soc_dapm_disable_pin(&codec->dapm, "EPOUT");
+ snd_soc_update_bits(codec, SN95031_LOCTL, 0x66, 0x22);
+ break;
+
+ case 2:
+ pr_debug("set spkr path\n");
+ snd_soc_dapm_disable_pin(&codec->dapm, "IHFOUTL");
+ snd_soc_dapm_disable_pin(&codec->dapm, "IHFOUTR");
+ snd_soc_update_bits(codec, SN95031_LOCTL, 0x66, 0x44);
+ break;
+
+ case 3:
+ pr_debug("set null path\n");
+ snd_soc_dapm_disable_pin(&codec->dapm, "LINEOUTL");
+ snd_soc_dapm_disable_pin(&codec->dapm, "LINEOUTR");
+ snd_soc_update_bits(codec, SN95031_LOCTL, 0x66, 0x66);
+ break;
+ }
+ snd_soc_dapm_sync(&codec->dapm);
+ lo_dac = ucontrol->value.integer.value[0];
+ return 0;
+}
+
+static const struct snd_kcontrol_new mfld_snd_controls[] = {
+ SOC_ENUM_EXT("Playback Switch", headset_enum,
+ headset_get_switch, headset_set_switch),
+ SOC_ENUM_EXT("Lineout Mux", lo_enum,
+ lo_get_switch, lo_set_switch),
+};
+
+static const struct snd_soc_dapm_widget mfld_widgets[] = {
+ SND_SOC_DAPM_HP("Headphones", NULL),
+ SND_SOC_DAPM_MIC("Mic", NULL),
+};
+
+static const struct snd_soc_dapm_route mfld_map[] = {
+ {"Headphones", NULL, "HPOUTR"},
+ {"Headphones", NULL, "HPOUTL"},
+ {"Mic", NULL, "AMIC1"},
+};
+
+static void mfld_jack_check(unsigned int intr_status)
+{
+ struct mfld_jack_data jack_data;
+
+ jack_data.mfld_jack = &mfld_jack;
+ jack_data.intr_id = intr_status;
+
+ sn95031_jack_detection(&jack_data);
+ /* TODO: add american headset detection post gpiolib support */
+}
+
+static int mfld_init(struct snd_soc_pcm_runtime *runtime)
+{
+ struct snd_soc_codec *codec = runtime->codec;
+ struct snd_soc_dapm_context *dapm = &codec->dapm;
+ int ret_val;
+
+ /* Add jack sense widgets */
+ snd_soc_dapm_new_controls(dapm, mfld_widgets, ARRAY_SIZE(mfld_widgets));
+
+ /* Set up the map */
+ snd_soc_dapm_add_routes(dapm, mfld_map, ARRAY_SIZE(mfld_map));
+
+ /* always connected */
+ snd_soc_dapm_enable_pin(dapm, "Headphones");
+ snd_soc_dapm_enable_pin(dapm, "Mic");
+ snd_soc_dapm_sync(dapm);
+
+ ret_val = snd_soc_add_controls(codec, mfld_snd_controls,
+ ARRAY_SIZE(mfld_snd_controls));
+ if (ret_val) {
+ pr_err("soc_add_controls failed %d", ret_val);
+ return ret_val;
+ }
+ /* default is earpiece pin, userspace sets it explcitly */
+ snd_soc_dapm_disable_pin(dapm, "Headphones");
+ /* default is lineout NC, userspace sets it explcitly */
+ snd_soc_dapm_disable_pin(dapm, "LINEOUTL");
+ snd_soc_dapm_disable_pin(dapm, "LINEOUTR");
+ lo_dac = 3;
+ hs_switch = 0;
+ /* we dont use linein in this so set to NC */
+ snd_soc_dapm_disable_pin(dapm, "LINEINL");
+ snd_soc_dapm_disable_pin(dapm, "LINEINR");
+ snd_soc_dapm_sync(dapm);
+
+ /* Headset and button jack detection */
+ ret_val = snd_soc_jack_new(codec, "Intel(R) MID Audio Jack",
+ SND_JACK_HEADSET | SND_JACK_BTN_0 |
+ SND_JACK_BTN_1, &mfld_jack);
+ if (ret_val) {
+ pr_err("jack creation failed\n");
+ return ret_val;
+ }
+
+ ret_val = snd_soc_jack_add_pins(&mfld_jack,
+ ARRAY_SIZE(mfld_jack_pins), mfld_jack_pins);
+ if (ret_val) {
+ pr_err("adding jack pins failed\n");
+ return ret_val;
+ }
+ ret_val = snd_soc_jack_add_zones(&mfld_jack,
+ ARRAY_SIZE(mfld_zones), mfld_zones);
+ if (ret_val) {
+ pr_err("adding jack zones failed\n");
+ return ret_val;
+ }
+
+ /* we want to check if anything is inserted at boot,
+ * so send a fake event to codec and it will read adc
+ * to find if anything is there or not */
+ mfld_jack_check(MFLD_JACK_INSERT);
+ return ret_val;
+}
+
+struct snd_soc_dai_link mfld_msic_dailink[] = {
+ {
+ .name = "Medfield Headset",
+ .stream_name = "Headset",
+ .cpu_dai_name = "Headset-cpu-dai",
+ .codec_dai_name = "SN95031 Headset",
+ .codec_name = "sn95031",
+ .platform_name = "sst-platform",
+ .init = mfld_init,
+ },
+ {
+ .name = "Medfield Speaker",
+ .stream_name = "Speaker",
+ .cpu_dai_name = "Speaker-cpu-dai",
+ .codec_dai_name = "SN95031 Speaker",
+ .codec_name = "sn95031",
+ .platform_name = "sst-platform",
+ .init = NULL,
+ },
+ {
+ .name = "Medfield Vibra",
+ .stream_name = "Vibra1",
+ .cpu_dai_name = "Vibra1-cpu-dai",
+ .codec_dai_name = "SN95031 Vibra1",
+ .codec_name = "sn95031",
+ .platform_name = "sst-platform",
+ .init = NULL,
+ },
+ {
+ .name = "Medfield Haptics",
+ .stream_name = "Vibra2",
+ .cpu_dai_name = "Vibra2-cpu-dai",
+ .codec_dai_name = "SN95031 Vibra2",
+ .codec_name = "sn95031",
+ .platform_name = "sst-platform",
+ .init = NULL,
+ },
+};
+
+/* SoC card */
+static struct snd_soc_card snd_soc_card_mfld = {
+ .name = "medfield_audio",
+ .dai_link = mfld_msic_dailink,
+ .num_links = ARRAY_SIZE(mfld_msic_dailink),
+};
+
+static irqreturn_t snd_mfld_jack_intr_handler(int irq, void *dev)
+{
+ struct mfld_mc_private *mc_private = (struct mfld_mc_private *) dev;
+
+ memcpy_fromio(&mc_private->interrupt_status,
+ ((void *)(mc_private->int_base)),
+ sizeof(u8));
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t snd_mfld_jack_detection(int irq, void *data)
+{
+ struct mfld_mc_private *mc_drv_ctx = (struct mfld_mc_private *) data;
+
+ if (mfld_jack.codec == NULL)
+ return IRQ_HANDLED;
+ mfld_jack_check(mc_drv_ctx->interrupt_status);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit snd_mfld_mc_probe(struct platform_device *pdev)
+{
+ int ret_val = 0, irq;
+ struct mfld_mc_private *mc_drv_ctx;
+ struct resource *irq_mem;
+
+ pr_debug("snd_mfld_mc_probe called\n");
+
+ /* retrive the irq number */
+ irq = platform_get_irq(pdev, 0);
+
+ /* audio interrupt base of SRAM location where
+ * interrupts are stored by System FW */
+ mc_drv_ctx = kzalloc(sizeof(*mc_drv_ctx), GFP_ATOMIC);
+ if (!mc_drv_ctx) {
+ pr_err("allocation failed\n");
+ return -ENOMEM;
+ }
+
+ irq_mem = platform_get_resource_byname(
+ pdev, IORESOURCE_MEM, "IRQ_BASE");
+ if (!irq_mem) {
+ pr_err("no mem resource given\n");
+ ret_val = -ENODEV;
+ goto unalloc;
+ }
+ mc_drv_ctx->int_base = ioremap_nocache(irq_mem->start,
+ resource_size(irq_mem));
+ if (!mc_drv_ctx->int_base) {
+ pr_err("Mapping of cache failed\n");
+ ret_val = -ENOMEM;
+ goto unalloc;
+ }
+ /* register for interrupt */
+ ret_val = request_threaded_irq(irq, snd_mfld_jack_intr_handler,
+ snd_mfld_jack_detection,
+ IRQF_SHARED, pdev->dev.driver->name, mc_drv_ctx);
+ if (ret_val) {
+ pr_err("cannot register IRQ\n");
+ goto unalloc;
+ }
+ /* register the soc card */
+ snd_soc_card_mfld.dev = &pdev->dev;
+ ret_val = snd_soc_register_card(&snd_soc_card_mfld);
+ if (ret_val) {
+ pr_debug("snd_soc_register_card failed %d\n", ret_val);
+ goto freeirq;
+ }
+ platform_set_drvdata(pdev, mc_drv_ctx);
+ pr_debug("successfully exited probe\n");
+ return ret_val;
+
+freeirq:
+ free_irq(irq, mc_drv_ctx);
+unalloc:
+ kfree(mc_drv_ctx);
+ return ret_val;
+}
+
+static int __devexit snd_mfld_mc_remove(struct platform_device *pdev)
+{
+ struct mfld_mc_private *mc_drv_ctx = platform_get_drvdata(pdev);
+
+ pr_debug("snd_mfld_mc_remove called\n");
+ free_irq(platform_get_irq(pdev, 0), mc_drv_ctx);
+ snd_soc_unregister_card(&snd_soc_card_mfld);
+ kfree(mc_drv_ctx);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static struct platform_driver snd_mfld_mc_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "msic_audio",
+ },
+ .probe = snd_mfld_mc_probe,
+ .remove = __devexit_p(snd_mfld_mc_remove),
+};
+
+static int __init snd_mfld_driver_init(void)
+{
+ pr_debug("snd_mfld_driver_init called\n");
+ return platform_driver_register(&snd_mfld_mc_driver);
+}
+module_init(snd_mfld_driver_init);
+
+static void __exit snd_mfld_driver_exit(void)
+{
+ pr_debug("snd_mfld_driver_exit called\n");
+ platform_driver_unregister(&snd_mfld_mc_driver);
+}
+module_exit(snd_mfld_driver_exit);
+
+MODULE_DESCRIPTION("ASoC Intel(R) MID Machine driver");
+MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
+MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:msic-audio");
diff --git a/sound/soc/mid-x86/sst_platform.c b/sound/soc/mid-x86/sst_platform.c
new file mode 100644
index 000000000000..ee2c22475a76
--- /dev/null
+++ b/sound/soc/mid-x86/sst_platform.c
@@ -0,0 +1,474 @@
+/*
+ * sst_platform.c - Intel MID Platform driver
+ *
+ * Copyright (C) 2010 Intel Corp
+ * Author: Vinod Koul <vinod.koul@intel.com>
+ * Author: Harsha Priya <priya.harsha@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include "../../../drivers/staging/intel_sst/intel_sst_ioctl.h"
+#include "../../../drivers/staging/intel_sst/intel_sst.h"
+#include "sst_platform.h"
+
+static struct snd_pcm_hardware sst_platform_pcm_hw = {
+ .info = (SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_DOUBLE |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_RESUME |
+ SNDRV_PCM_INFO_MMAP|
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_SYNC_START),
+ .formats = (SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_U16 |
+ SNDRV_PCM_FMTBIT_S24 | SNDRV_PCM_FMTBIT_U24 |
+ SNDRV_PCM_FMTBIT_S32 | SNDRV_PCM_FMTBIT_U32),
+ .rates = (SNDRV_PCM_RATE_8000|
+ SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000),
+ .rate_min = SST_MIN_RATE,
+ .rate_max = SST_MAX_RATE,
+ .channels_min = SST_MIN_CHANNEL,
+ .channels_max = SST_MAX_CHANNEL,
+ .buffer_bytes_max = SST_MAX_BUFFER,
+ .period_bytes_min = SST_MIN_PERIOD_BYTES,
+ .period_bytes_max = SST_MAX_PERIOD_BYTES,
+ .periods_min = SST_MIN_PERIODS,
+ .periods_max = SST_MAX_PERIODS,
+ .fifo_size = SST_FIFO_SIZE,
+};
+
+/* MFLD - MSIC */
+struct snd_soc_dai_driver sst_platform_dai[] = {
+{
+ .name = "Headset-cpu-dai",
+ .id = 0,
+ .playback = {
+ .channels_min = SST_STEREO,
+ .channels_max = SST_STEREO,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S24_LE,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 5,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S24_LE,
+ },
+},
+{
+ .name = "Speaker-cpu-dai",
+ .id = 1,
+ .playback = {
+ .channels_min = SST_MONO,
+ .channels_max = SST_STEREO,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S24_LE,
+ },
+},
+{
+ .name = "Vibra1-cpu-dai",
+ .id = 2,
+ .playback = {
+ .channels_min = SST_MONO,
+ .channels_max = SST_MONO,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S24_LE,
+ },
+},
+{
+ .name = "Vibra2-cpu-dai",
+ .id = 3,
+ .playback = {
+ .channels_min = SST_MONO,
+ .channels_max = SST_STEREO,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S24_LE,
+ },
+},
+};
+
+/* helper functions */
+static inline void sst_set_stream_status(struct sst_runtime_stream *stream,
+ int state)
+{
+ spin_lock(&stream->status_lock);
+ stream->stream_status = state;
+ spin_unlock(&stream->status_lock);
+}
+
+static inline int sst_get_stream_status(struct sst_runtime_stream *stream)
+{
+ int state;
+
+ spin_lock(&stream->status_lock);
+ state = stream->stream_status;
+ spin_unlock(&stream->status_lock);
+ return state;
+}
+
+static void sst_fill_pcm_params(struct snd_pcm_substream *substream,
+ struct snd_sst_stream_params *param)
+{
+
+ param->uc.pcm_params.codec = SST_CODEC_TYPE_PCM;
+ param->uc.pcm_params.num_chan = (u8) substream->runtime->channels;
+ param->uc.pcm_params.pcm_wd_sz = substream->runtime->sample_bits;
+ param->uc.pcm_params.reserved = 0;
+ param->uc.pcm_params.sfreq = substream->runtime->rate;
+ param->uc.pcm_params.ring_buffer_size =
+ snd_pcm_lib_buffer_bytes(substream);
+ param->uc.pcm_params.period_count = substream->runtime->period_size;
+ param->uc.pcm_params.ring_buffer_addr =
+ virt_to_phys(substream->dma_buffer.area);
+ pr_debug("period_cnt = %d\n", param->uc.pcm_params.period_count);
+ pr_debug("sfreq= %d, wd_sz = %d\n",
+ param->uc.pcm_params.sfreq, param->uc.pcm_params.pcm_wd_sz);
+}
+
+static int sst_platform_alloc_stream(struct snd_pcm_substream *substream)
+{
+ struct sst_runtime_stream *stream =
+ substream->runtime->private_data;
+ struct snd_sst_stream_params param = {{{0,},},};
+ struct snd_sst_params str_params = {0};
+ int ret_val;
+
+ /* set codec params and inform SST driver the same */
+ sst_fill_pcm_params(substream, &param);
+ substream->runtime->dma_area = substream->dma_buffer.area;
+ str_params.sparams = param;
+ str_params.codec = param.uc.pcm_params.codec;
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ str_params.ops = STREAM_OPS_PLAYBACK;
+ str_params.device_type = substream->pcm->device + 1;
+ pr_debug("Playbck stream,Device %d\n",
+ substream->pcm->device);
+ } else {
+ str_params.ops = STREAM_OPS_CAPTURE;
+ str_params.device_type = SND_SST_DEVICE_CAPTURE;
+ pr_debug("Capture stream,Device %d\n",
+ substream->pcm->device);
+ }
+ ret_val = stream->sstdrv_ops->pcm_control->open(&str_params);
+ pr_debug("SST_SND_PLAY/CAPTURE ret_val = %x\n", ret_val);
+ if (ret_val < 0)
+ return ret_val;
+
+ stream->stream_info.str_id = ret_val;
+ pr_debug("str id : %d\n", stream->stream_info.str_id);
+ return ret_val;
+}
+
+static void sst_period_elapsed(void *mad_substream)
+{
+ struct snd_pcm_substream *substream = mad_substream;
+ struct sst_runtime_stream *stream;
+ int status;
+
+ if (!substream || !substream->runtime)
+ return;
+ stream = substream->runtime->private_data;
+ if (!stream)
+ return;
+ status = sst_get_stream_status(stream);
+ if (status != SST_PLATFORM_RUNNING)
+ return;
+ snd_pcm_period_elapsed(substream);
+}
+
+static int sst_platform_init_stream(struct snd_pcm_substream *substream)
+{
+ struct sst_runtime_stream *stream =
+ substream->runtime->private_data;
+ int ret_val;
+
+ pr_debug("setting buffer ptr param\n");
+ sst_set_stream_status(stream, SST_PLATFORM_INIT);
+ stream->stream_info.period_elapsed = sst_period_elapsed;
+ stream->stream_info.mad_substream = substream;
+ stream->stream_info.buffer_ptr = 0;
+ stream->stream_info.sfreq = substream->runtime->rate;
+ ret_val = stream->sstdrv_ops->pcm_control->device_control(
+ SST_SND_STREAM_INIT, &stream->stream_info);
+ if (ret_val)
+ pr_err("control_set ret error %d\n", ret_val);
+ return ret_val;
+
+}
+/* end -- helper functions */
+
+static int sst_platform_open(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime;
+ struct sst_runtime_stream *stream;
+ int ret_val = 0;
+
+ pr_debug("sst_platform_open called\n");
+ runtime = substream->runtime;
+ runtime->hw = sst_platform_pcm_hw;
+ stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+ if (!stream)
+ return -ENOMEM;
+ spin_lock_init(&stream->status_lock);
+ stream->stream_info.str_id = 0;
+ sst_set_stream_status(stream, SST_PLATFORM_INIT);
+ stream->stream_info.mad_substream = substream;
+ /* allocate memory for SST API set */
+ stream->sstdrv_ops = kzalloc(sizeof(*stream->sstdrv_ops),
+ GFP_KERNEL);
+ if (!stream->sstdrv_ops) {
+ pr_err("sst: mem allocation for ops fail\n");
+ kfree(stream);
+ return -ENOMEM;
+ }
+ stream->sstdrv_ops->vendor_id = MSIC_VENDOR_ID;
+ /* registering with SST driver to get access to SST APIs to use */
+ ret_val = register_sst_card(stream->sstdrv_ops);
+ if (ret_val) {
+ pr_err("sst: sst card registration failed\n");
+ return ret_val;
+ }
+ runtime->private_data = stream;
+ return snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+}
+
+static int sst_platform_close(struct snd_pcm_substream *substream)
+{
+ struct sst_runtime_stream *stream;
+ int ret_val = 0, str_id;
+
+ pr_debug("sst_platform_close called\n");
+ stream = substream->runtime->private_data;
+ str_id = stream->stream_info.str_id;
+ if (str_id)
+ ret_val = stream->sstdrv_ops->pcm_control->close(str_id);
+ kfree(stream->sstdrv_ops);
+ kfree(stream);
+ return ret_val;
+}
+
+static int sst_platform_pcm_prepare(struct snd_pcm_substream *substream)
+{
+ struct sst_runtime_stream *stream;
+ int ret_val = 0, str_id;
+
+ pr_debug("sst_platform_pcm_prepare called\n");
+ stream = substream->runtime->private_data;
+ str_id = stream->stream_info.str_id;
+ if (stream->stream_info.str_id) {
+ ret_val = stream->sstdrv_ops->pcm_control->device_control(
+ SST_SND_DROP, &str_id);
+ return ret_val;
+ }
+
+ ret_val = sst_platform_alloc_stream(substream);
+ if (ret_val < 0)
+ return ret_val;
+ snprintf(substream->pcm->id, sizeof(substream->pcm->id),
+ "%d", stream->stream_info.str_id);
+
+ ret_val = sst_platform_init_stream(substream);
+ if (ret_val)
+ return ret_val;
+ substream->runtime->hw.info = SNDRV_PCM_INFO_BLOCK_TRANSFER;
+ return ret_val;
+}
+
+static int sst_platform_pcm_trigger(struct snd_pcm_substream *substream,
+ int cmd)
+{
+ int ret_val = 0, str_id;
+ struct sst_runtime_stream *stream;
+ int str_cmd, status;
+
+ pr_debug("sst_platform_pcm_trigger called\n");
+ stream = substream->runtime->private_data;
+ str_id = stream->stream_info.str_id;
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ pr_debug("sst: Trigger Start\n");
+ str_cmd = SST_SND_START;
+ status = SST_PLATFORM_RUNNING;
+ stream->stream_info.mad_substream = substream;
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ pr_debug("sst: in stop\n");
+ str_cmd = SST_SND_DROP;
+ status = SST_PLATFORM_DROPPED;
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ pr_debug("sst: in pause\n");
+ str_cmd = SST_SND_PAUSE;
+ status = SST_PLATFORM_PAUSED;
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ pr_debug("sst: in pause release\n");
+ str_cmd = SST_SND_RESUME;
+ status = SST_PLATFORM_RUNNING;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ret_val = stream->sstdrv_ops->pcm_control->device_control(str_cmd,
+ &str_id);
+ if (!ret_val)
+ sst_set_stream_status(stream, status);
+
+ return ret_val;
+}
+
+
+static snd_pcm_uframes_t sst_platform_pcm_pointer
+ (struct snd_pcm_substream *substream)
+{
+ struct sst_runtime_stream *stream;
+ int ret_val, status;
+ struct pcm_stream_info *str_info;
+
+ stream = substream->runtime->private_data;
+ status = sst_get_stream_status(stream);
+ if (status == SST_PLATFORM_INIT)
+ return 0;
+ str_info = &stream->stream_info;
+ ret_val = stream->sstdrv_ops->pcm_control->device_control(
+ SST_SND_BUFFER_POINTER, str_info);
+ if (ret_val) {
+ pr_err("sst: error code = %d\n", ret_val);
+ return ret_val;
+ }
+ return stream->stream_info.buffer_ptr;
+}
+
+static int sst_platform_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
+ memset(substream->runtime->dma_area, 0, params_buffer_bytes(params));
+
+ return 0;
+}
+
+static struct snd_pcm_ops sst_platform_ops = {
+ .open = sst_platform_open,
+ .close = sst_platform_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .prepare = sst_platform_pcm_prepare,
+ .trigger = sst_platform_pcm_trigger,
+ .pointer = sst_platform_pcm_pointer,
+ .hw_params = sst_platform_pcm_hw_params,
+};
+
+static void sst_pcm_free(struct snd_pcm *pcm)
+{
+ pr_debug("sst_pcm_free called\n");
+ snd_pcm_lib_preallocate_free_for_all(pcm);
+}
+
+int sst_pcm_new(struct snd_card *card, struct snd_soc_dai *dai,
+ struct snd_pcm *pcm)
+{
+ int retval = 0;
+
+ pr_debug("sst_pcm_new called\n");
+ if (dai->driver->playback.channels_min ||
+ dai->driver->capture.channels_min) {
+ retval = snd_pcm_lib_preallocate_pages_for_all(pcm,
+ SNDRV_DMA_TYPE_CONTINUOUS,
+ snd_dma_continuous_data(GFP_KERNEL),
+ SST_MIN_BUFFER, SST_MAX_BUFFER);
+ if (retval) {
+ pr_err("dma buffer allocationf fail\n");
+ return retval;
+ }
+ }
+ return retval;
+}
+struct snd_soc_platform_driver sst_soc_platform_drv = {
+ .ops = &sst_platform_ops,
+ .pcm_new = sst_pcm_new,
+ .pcm_free = sst_pcm_free,
+};
+
+static int sst_platform_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ pr_debug("sst_platform_probe called\n");
+ ret = snd_soc_register_platform(&pdev->dev, &sst_soc_platform_drv);
+ if (ret) {
+ pr_err("registering soc platform failed\n");
+ return ret;
+ }
+
+ ret = snd_soc_register_dais(&pdev->dev,
+ sst_platform_dai, ARRAY_SIZE(sst_platform_dai));
+ if (ret) {
+ pr_err("registering cpu dais failed\n");
+ snd_soc_unregister_platform(&pdev->dev);
+ }
+ return ret;
+}
+
+static int sst_platform_remove(struct platform_device *pdev)
+{
+
+ snd_soc_unregister_dais(&pdev->dev, ARRAY_SIZE(sst_platform_dai));
+ snd_soc_unregister_platform(&pdev->dev);
+ pr_debug("sst_platform_remove sucess\n");
+ return 0;
+}
+
+static struct platform_driver sst_platform_driver = {
+ .driver = {
+ .name = "sst-platform",
+ .owner = THIS_MODULE,
+ },
+ .probe = sst_platform_probe,
+ .remove = sst_platform_remove,
+};
+
+static int __init sst_soc_platform_init(void)
+{
+ pr_debug("sst_soc_platform_init called\n");
+ return platform_driver_register(&sst_platform_driver);
+}
+module_init(sst_soc_platform_init);
+
+static void __exit sst_soc_platform_exit(void)
+{
+ platform_driver_unregister(&sst_platform_driver);
+ pr_debug("sst_soc_platform_exit sucess\n");
+}
+module_exit(sst_soc_platform_exit);
+
+MODULE_DESCRIPTION("ASoC Intel(R) MID Platform driver");
+MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
+MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:sst-platform");
diff --git a/sound/soc/mid-x86/sst_platform.h b/sound/soc/mid-x86/sst_platform.h
new file mode 100644
index 000000000000..df370286694f
--- /dev/null
+++ b/sound/soc/mid-x86/sst_platform.h
@@ -0,0 +1,63 @@
+/*
+ * sst_platform.h - Intel MID Platform driver header file
+ *
+ * Copyright (C) 2010 Intel Corp
+ * Author: Vinod Koul <vinod.koul@intel.com>
+ * Author: Harsha Priya <priya.harsha@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *
+ */
+
+#ifndef __SST_PLATFORMDRV_H__
+#define __SST_PLATFORMDRV_H__
+
+#define SST_MONO 1
+#define SST_STEREO 2
+#define SST_MAX_CAP 5
+
+#define SST_MIN_RATE 8000
+#define SST_MAX_RATE 48000
+#define SST_MIN_CHANNEL 1
+#define SST_MAX_CHANNEL 5
+#define SST_MAX_BUFFER (800*1024)
+#define SST_MIN_BUFFER (800*1024)
+#define SST_MIN_PERIOD_BYTES 32
+#define SST_MAX_PERIOD_BYTES SST_MAX_BUFFER
+#define SST_MIN_PERIODS 2
+#define SST_MAX_PERIODS (1024*2)
+#define SST_FIFO_SIZE 0
+#define SST_CARD_NAMES "intel_mid_card"
+#define MSIC_VENDOR_ID 3
+
+struct sst_runtime_stream {
+ int stream_status;
+ struct pcm_stream_info stream_info;
+ struct intel_sst_card_ops *sstdrv_ops;
+ spinlock_t status_lock;
+};
+
+enum sst_drv_status {
+ SST_PLATFORM_INIT = 1,
+ SST_PLATFORM_STARTED,
+ SST_PLATFORM_RUNNING,
+ SST_PLATFORM_PAUSED,
+ SST_PLATFORM_DROPPED,
+};
+
+#endif
diff --git a/sound/soc/omap/Kconfig b/sound/soc/omap/Kconfig
index a088db6d5091..b5922984eac6 100644
--- a/sound/soc/omap/Kconfig
+++ b/sound/soc/omap/Kconfig
@@ -24,6 +24,7 @@ config SND_OMAP_SOC_RX51
select OMAP_MCBSP
select SND_OMAP_SOC_MCBSP
select SND_SOC_TLV320AIC3X
+ select SND_SOC_TPA6130A2
help
Say Y if you want to add support for SoC audio on Nokia RX-51
hardware. This is also known as Nokia N900 product.
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index d203f4da18a0..2175f09e57b6 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -69,110 +69,6 @@ static struct omap_mcbsp_data mcbsp_data[NUM_LINKS];
*/
static struct omap_pcm_dma_data omap_mcbsp_dai_dma_params[NUM_LINKS][2];
-#if defined(CONFIG_ARCH_OMAP15XX) || defined(CONFIG_ARCH_OMAP16XX)
-static const int omap1_dma_reqs[][2] = {
- { OMAP_DMA_MCBSP1_TX, OMAP_DMA_MCBSP1_RX },
- { OMAP_DMA_MCBSP2_TX, OMAP_DMA_MCBSP2_RX },
- { OMAP_DMA_MCBSP3_TX, OMAP_DMA_MCBSP3_RX },
-};
-static const unsigned long omap1_mcbsp_port[][2] = {
- { OMAP1510_MCBSP1_BASE + OMAP_MCBSP_REG_DXR1,
- OMAP1510_MCBSP1_BASE + OMAP_MCBSP_REG_DRR1 },
- { OMAP1510_MCBSP2_BASE + OMAP_MCBSP_REG_DXR1,
- OMAP1510_MCBSP2_BASE + OMAP_MCBSP_REG_DRR1 },
- { OMAP1510_MCBSP3_BASE + OMAP_MCBSP_REG_DXR1,
- OMAP1510_MCBSP3_BASE + OMAP_MCBSP_REG_DRR1 },
-};
-#else
-static const int omap1_dma_reqs[][2] = {};
-static const unsigned long omap1_mcbsp_port[][2] = {};
-#endif
-
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
-static const int omap24xx_dma_reqs[][2] = {
- { OMAP24XX_DMA_MCBSP1_TX, OMAP24XX_DMA_MCBSP1_RX },
- { OMAP24XX_DMA_MCBSP2_TX, OMAP24XX_DMA_MCBSP2_RX },
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3)
- { OMAP24XX_DMA_MCBSP3_TX, OMAP24XX_DMA_MCBSP3_RX },
- { OMAP24XX_DMA_MCBSP4_TX, OMAP24XX_DMA_MCBSP4_RX },
- { OMAP24XX_DMA_MCBSP5_TX, OMAP24XX_DMA_MCBSP5_RX },
-#endif
-};
-#else
-static const int omap24xx_dma_reqs[][2] = {};
-#endif
-
-#if defined(CONFIG_ARCH_OMAP4)
-static const int omap44xx_dma_reqs[][2] = {
- { OMAP44XX_DMA_MCBSP1_TX, OMAP44XX_DMA_MCBSP1_RX },
- { OMAP44XX_DMA_MCBSP2_TX, OMAP44XX_DMA_MCBSP2_RX },
- { OMAP44XX_DMA_MCBSP3_TX, OMAP44XX_DMA_MCBSP3_RX },
- { OMAP44XX_DMA_MCBSP4_TX, OMAP44XX_DMA_MCBSP4_RX },
-};
-#else
-static const int omap44xx_dma_reqs[][2] = {};
-#endif
-
-#if defined(CONFIG_ARCH_OMAP2420)
-static const unsigned long omap2420_mcbsp_port[][2] = {
- { OMAP24XX_MCBSP1_BASE + OMAP_MCBSP_REG_DXR1,
- OMAP24XX_MCBSP1_BASE + OMAP_MCBSP_REG_DRR1 },
- { OMAP24XX_MCBSP2_BASE + OMAP_MCBSP_REG_DXR1,
- OMAP24XX_MCBSP2_BASE + OMAP_MCBSP_REG_DRR1 },
-};
-#else
-static const unsigned long omap2420_mcbsp_port[][2] = {};
-#endif
-
-#if defined(CONFIG_ARCH_OMAP2430)
-static const unsigned long omap2430_mcbsp_port[][2] = {
- { OMAP24XX_MCBSP1_BASE + OMAP_MCBSP_REG_DXR,
- OMAP24XX_MCBSP1_BASE + OMAP_MCBSP_REG_DRR },
- { OMAP24XX_MCBSP2_BASE + OMAP_MCBSP_REG_DXR,
- OMAP24XX_MCBSP2_BASE + OMAP_MCBSP_REG_DRR },
- { OMAP2430_MCBSP3_BASE + OMAP_MCBSP_REG_DXR,
- OMAP2430_MCBSP3_BASE + OMAP_MCBSP_REG_DRR },
- { OMAP2430_MCBSP4_BASE + OMAP_MCBSP_REG_DXR,
- OMAP2430_MCBSP4_BASE + OMAP_MCBSP_REG_DRR },
- { OMAP2430_MCBSP5_BASE + OMAP_MCBSP_REG_DXR,
- OMAP2430_MCBSP5_BASE + OMAP_MCBSP_REG_DRR },
-};
-#else
-static const unsigned long omap2430_mcbsp_port[][2] = {};
-#endif
-
-#if defined(CONFIG_ARCH_OMAP3)
-static const unsigned long omap34xx_mcbsp_port[][2] = {
- { OMAP34XX_MCBSP1_BASE + OMAP_MCBSP_REG_DXR,
- OMAP34XX_MCBSP1_BASE + OMAP_MCBSP_REG_DRR },
- { OMAP34XX_MCBSP2_BASE + OMAP_MCBSP_REG_DXR,
- OMAP34XX_MCBSP2_BASE + OMAP_MCBSP_REG_DRR },
- { OMAP34XX_MCBSP3_BASE + OMAP_MCBSP_REG_DXR,
- OMAP34XX_MCBSP3_BASE + OMAP_MCBSP_REG_DRR },
- { OMAP34XX_MCBSP4_BASE + OMAP_MCBSP_REG_DXR,
- OMAP34XX_MCBSP4_BASE + OMAP_MCBSP_REG_DRR },
- { OMAP34XX_MCBSP5_BASE + OMAP_MCBSP_REG_DXR,
- OMAP34XX_MCBSP5_BASE + OMAP_MCBSP_REG_DRR },
-};
-#else
-static const unsigned long omap34xx_mcbsp_port[][2] = {};
-#endif
-
-#if defined(CONFIG_ARCH_OMAP4)
-static const unsigned long omap44xx_mcbsp_port[][2] = {
- { OMAP44XX_MCBSP1_BASE + OMAP_MCBSP_REG_DXR,
- OMAP44XX_MCBSP1_BASE + OMAP_MCBSP_REG_DRR },
- { OMAP44XX_MCBSP2_BASE + OMAP_MCBSP_REG_DXR,
- OMAP44XX_MCBSP2_BASE + OMAP_MCBSP_REG_DRR },
- { OMAP44XX_MCBSP3_BASE + OMAP_MCBSP_REG_DXR,
- OMAP44XX_MCBSP3_BASE + OMAP_MCBSP_REG_DRR },
- { OMAP44XX_MCBSP4_BASE + OMAP_MCBSP_REG_DXR,
- OMAP44XX_MCBSP4_BASE + OMAP_MCBSP_REG_DRR },
-};
-#else
-static const unsigned long omap44xx_mcbsp_port[][2] = {};
-#endif
-
static void omap_mcbsp_set_threshold(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -346,24 +242,10 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
unsigned int format, div, framesize, master;
dma_data = &omap_mcbsp_dai_dma_params[cpu_dai->id][substream->stream];
- if (cpu_class_is_omap1()) {
- dma = omap1_dma_reqs[bus_id][substream->stream];
- port = omap1_mcbsp_port[bus_id][substream->stream];
- } else if (cpu_is_omap2420()) {
- dma = omap24xx_dma_reqs[bus_id][substream->stream];
- port = omap2420_mcbsp_port[bus_id][substream->stream];
- } else if (cpu_is_omap2430()) {
- dma = omap24xx_dma_reqs[bus_id][substream->stream];
- port = omap2430_mcbsp_port[bus_id][substream->stream];
- } else if (cpu_is_omap343x()) {
- dma = omap24xx_dma_reqs[bus_id][substream->stream];
- port = omap34xx_mcbsp_port[bus_id][substream->stream];
- } else if (cpu_is_omap44xx()) {
- dma = omap44xx_dma_reqs[bus_id][substream->stream];
- port = omap44xx_mcbsp_port[bus_id][substream->stream];
- } else {
- return -ENODEV;
- }
+
+ dma = omap_mcbsp_dma_ch_params(bus_id, substream->stream);
+ port = omap_mcbsp_dma_reg_params(bus_id, substream->stream);
+
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
dma_data->data_type = OMAP_DMA_DATA_TYPE_S16;
diff --git a/sound/soc/omap/omap-mcbsp.h b/sound/soc/omap/omap-mcbsp.h
index 110c106611d3..37dc7211ed3f 100644
--- a/sound/soc/omap/omap-mcbsp.h
+++ b/sound/soc/omap/omap-mcbsp.h
@@ -43,7 +43,7 @@ enum omap_mcbsp_div {
OMAP_MCBSP_CLKGDV, /* Sample rate generator divider */
};
-#if defined(CONFIG_ARCH_OMAP2420)
+#if defined(CONFIG_SOC_OMAP2420)
#define NUM_LINKS 2
#endif
#if defined(CONFIG_ARCH_OMAP15XX) || defined(CONFIG_ARCH_OMAP16XX)
@@ -54,7 +54,7 @@ enum omap_mcbsp_div {
#undef NUM_LINKS
#define NUM_LINKS 4
#endif
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3)
+#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_OMAP2430)
#undef NUM_LINKS
#define NUM_LINKS 5
#endif
diff --git a/sound/soc/omap/rx51.c b/sound/soc/omap/rx51.c
index 09fb0df8d416..2d7d4115cb71 100644
--- a/sound/soc/omap/rx51.c
+++ b/sound/soc/omap/rx51.c
@@ -31,6 +31,7 @@
#include <sound/pcm.h>
#include <sound/soc.h>
#include <plat/mcbsp.h>
+#include "../codecs/tpa6130a2.h"
#include <asm/mach-types.h>
@@ -47,7 +48,8 @@
enum {
RX51_JACK_DISABLED,
- RX51_JACK_TVOUT, /* tv-out */
+ RX51_JACK_TVOUT, /* tv-out with stereo output */
+ RX51_JACK_HP, /* headphone: stereo output, no mic */
};
static int rx51_spk_func;
@@ -57,6 +59,15 @@ static int rx51_jack_func;
static void rx51_ext_control(struct snd_soc_codec *codec)
{
struct snd_soc_dapm_context *dapm = &codec->dapm;
+ int hp = 0, tvout = 0;
+
+ switch (rx51_jack_func) {
+ case RX51_JACK_TVOUT:
+ tvout = 1;
+ case RX51_JACK_HP:
+ hp = 1;
+ break;
+ }
if (rx51_spk_func)
snd_soc_dapm_enable_pin(dapm, "Ext Spk");
@@ -66,9 +77,12 @@ static void rx51_ext_control(struct snd_soc_codec *codec)
snd_soc_dapm_enable_pin(dapm, "DMic");
else
snd_soc_dapm_disable_pin(dapm, "DMic");
+ if (hp)
+ snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
+ else
+ snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
- gpio_set_value(RX51_TVOUT_SEL_GPIO,
- rx51_jack_func == RX51_JACK_TVOUT);
+ gpio_set_value(RX51_TVOUT_SEL_GPIO, tvout);
snd_soc_dapm_sync(dapm);
}
@@ -153,6 +167,19 @@ static int rx51_spk_event(struct snd_soc_dapm_widget *w,
return 0;
}
+static int rx51_hp_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_codec *codec = w->dapm->codec;
+
+ if (SND_SOC_DAPM_EVENT_ON(event))
+ tpa6130a2_stereo_enable(codec, 1);
+ else
+ tpa6130a2_stereo_enable(codec, 0);
+
+ return 0;
+}
+
static int rx51_get_input(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -212,19 +239,31 @@ static struct snd_soc_jack_gpio rx51_av_jack_gpios[] = {
static const struct snd_soc_dapm_widget aic34_dapm_widgets[] = {
SND_SOC_DAPM_SPK("Ext Spk", rx51_spk_event),
SND_SOC_DAPM_MIC("DMic", NULL),
+ SND_SOC_DAPM_HP("Headphone Jack", rx51_hp_event),
+};
+
+static const struct snd_soc_dapm_widget aic34_dapm_widgetsb[] = {
+ SND_SOC_DAPM_SPK("Earphone", NULL),
};
static const struct snd_soc_dapm_route audio_map[] = {
{"Ext Spk", NULL, "HPLOUT"},
{"Ext Spk", NULL, "HPROUT"},
+ {"Headphone Jack", NULL, "LLOUT"},
+ {"Headphone Jack", NULL, "RLOUT"},
{"DMic Rate 64", NULL, "Mic Bias 2V"},
{"Mic Bias 2V", NULL, "DMic"},
};
+static const struct snd_soc_dapm_route audio_mapb[] = {
+ {"b LINE2R", NULL, "MONO_LOUT"},
+ {"Earphone", NULL, "b HPLOUT"},
+};
+
static const char *spk_function[] = {"Off", "On"};
static const char *input_function[] = {"ADC", "Digital Mic"};
-static const char *jack_function[] = {"Off", "TV-OUT"};
+static const char *jack_function[] = {"Off", "TV-OUT", "Headphone"};
static const struct soc_enum rx51_enum[] = {
SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(spk_function), spk_function),
@@ -241,6 +280,10 @@ static const struct snd_kcontrol_new aic34_rx51_controls[] = {
rx51_get_jack, rx51_set_jack),
};
+static const struct snd_kcontrol_new aic34_rx51_controlsb[] = {
+ SOC_DAPM_PIN_SWITCH("Earphone"),
+};
+
static int rx51_aic34_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
@@ -265,6 +308,11 @@ static int rx51_aic34_init(struct snd_soc_pcm_runtime *rtd)
/* Set up RX-51 specific audio path audio_map */
snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
+ err = tpa6130a2_add_controls(codec);
+ if (err < 0)
+ return err;
+ snd_soc_limit_volume(codec, "TPA6130A2 Headphone Playback Volume", 42);
+
snd_soc_dapm_sync(dapm);
/* AV jack detection */
@@ -279,6 +327,24 @@ static int rx51_aic34_init(struct snd_soc_pcm_runtime *rtd)
return err;
}
+static int rx51_aic34b_init(struct snd_soc_dapm_context *dapm)
+{
+ int err;
+
+ err = snd_soc_add_controls(dapm->codec, aic34_rx51_controlsb,
+ ARRAY_SIZE(aic34_rx51_controlsb));
+ if (err < 0)
+ return err;
+
+ err = snd_soc_dapm_new_controls(dapm, aic34_dapm_widgetsb,
+ ARRAY_SIZE(aic34_dapm_widgetsb));
+ if (err < 0)
+ return 0;
+
+ return snd_soc_dapm_add_routes(dapm, audio_mapb,
+ ARRAY_SIZE(audio_mapb));
+}
+
/* Digital audio interface glue - connects codec <--> CPU */
static struct snd_soc_dai_link rx51_dai[] = {
{
@@ -293,11 +359,30 @@ static struct snd_soc_dai_link rx51_dai[] = {
},
};
+struct snd_soc_aux_dev rx51_aux_dev[] = {
+ {
+ .name = "TLV320AIC34b",
+ .codec_name = "tlv320aic3x-codec.2-0019",
+ .init = rx51_aic34b_init,
+ },
+};
+
+static struct snd_soc_codec_conf rx51_codec_conf[] = {
+ {
+ .dev_name = "tlv320aic3x-codec.2-0019",
+ .name_prefix = "b",
+ },
+};
+
/* Audio card */
static struct snd_soc_card rx51_sound_card = {
.name = "RX-51",
.dai_link = rx51_dai,
.num_links = ARRAY_SIZE(rx51_dai),
+ .aux_dev = rx51_aux_dev,
+ .num_aux_devs = ARRAY_SIZE(rx51_aux_dev),
+ .codec_conf = rx51_codec_conf,
+ .num_configs = ARRAY_SIZE(rx51_codec_conf),
};
static struct platform_device *rx51_snd_device;
diff --git a/sound/soc/pxa/e740_wm9705.c b/sound/soc/pxa/e740_wm9705.c
index 28333e7d9c50..dc65650a6fa1 100644
--- a/sound/soc/pxa/e740_wm9705.c
+++ b/sound/soc/pxa/e740_wm9705.c
@@ -117,7 +117,7 @@ static struct snd_soc_dai_link e740_dai[] = {
{
.name = "AC97",
.stream_name = "AC97 HiFi",
- .cpu_dai_name = "pxa-ac97.0",
+ .cpu_dai_name = "pxa2xx-ac97",
.codec_dai_name = "wm9705-hifi",
.platform_name = "pxa-pcm-audio",
.codec_name = "wm9705-codec",
@@ -126,7 +126,7 @@ static struct snd_soc_dai_link e740_dai[] = {
{
.name = "AC97 Aux",
.stream_name = "AC97 Aux",
- .cpu_dai_name = "pxa-ac97.1",
+ .cpu_dai_name = "pxa2xx-ac97-aux",
.codec_dai_name = "wm9705-aux",
.platform_name = "pxa-pcm-audio",
.codec_name = "wm9705-codec",
diff --git a/sound/soc/pxa/e750_wm9705.c b/sound/soc/pxa/e750_wm9705.c
index 01bf31675c55..51897fcd911b 100644
--- a/sound/soc/pxa/e750_wm9705.c
+++ b/sound/soc/pxa/e750_wm9705.c
@@ -99,7 +99,7 @@ static struct snd_soc_dai_link e750_dai[] = {
{
.name = "AC97",
.stream_name = "AC97 HiFi",
- .cpu_dai_name = "pxa-ac97.0",
+ .cpu_dai_name = "pxa2xx-ac97",
.codec_dai_name = "wm9705-hifi",
.platform_name = "pxa-pcm-audio",
.codec_name = "wm9705-codec",
@@ -109,7 +109,7 @@ static struct snd_soc_dai_link e750_dai[] = {
{
.name = "AC97 Aux",
.stream_name = "AC97 Aux",
- .cpu_dai_name = "pxa-ac97.1",
+ .cpu_dai_name = "pxa2xx-ac97-aux",
.codec_dai_name ="wm9705-aux",
.platform_name = "pxa-pcm-audio",
.codec_name = "wm9705-codec",
diff --git a/sound/soc/pxa/e800_wm9712.c b/sound/soc/pxa/e800_wm9712.c
index c6a37c6ef23b..053ed208e59f 100644
--- a/sound/soc/pxa/e800_wm9712.c
+++ b/sound/soc/pxa/e800_wm9712.c
@@ -89,7 +89,7 @@ static struct snd_soc_dai_link e800_dai[] = {
{
.name = "AC97",
.stream_name = "AC97 HiFi",
- .cpu_dai_name = "pxa-ac97.0",
+ .cpu_dai_name = "pxa2xx-ac97",
.codec_dai_name = "wm9712-hifi",
.platform_name = "pxa-pcm-audio",
.codec_name = "wm9712-codec",
@@ -98,7 +98,7 @@ static struct snd_soc_dai_link e800_dai[] = {
{
.name = "AC97 Aux",
.stream_name = "AC97 Aux",
- .cpu_dai_name = "pxa-ac97.1",
+ .cpu_dai_name = "pxa2xx-ac97-aux",
.codec_dai_name ="wm9712-aux",
.platform_name = "pxa-pcm-audio",
.codec_name = "wm9712-codec",
diff --git a/sound/soc/pxa/em-x270.c b/sound/soc/pxa/em-x270.c
index fc22e6eefc98..b13a4252812d 100644
--- a/sound/soc/pxa/em-x270.c
+++ b/sound/soc/pxa/em-x270.c
@@ -37,7 +37,7 @@ static struct snd_soc_dai_link em_x270_dai[] = {
{
.name = "AC97",
.stream_name = "AC97 HiFi",
- .cpu_dai_name = "pxa-ac97.0",
+ .cpu_dai_name = "pxa2xx-ac97",
.codec_dai_name = "wm9712-hifi",
.platform_name = "pxa-pcm-audio",
.codec_name = "wm9712-codec",
@@ -45,7 +45,7 @@ static struct snd_soc_dai_link em_x270_dai[] = {
{
.name = "AC97 Aux",
.stream_name = "AC97 Aux",
- .cpu_dai_name = "pxa-ac97.1",
+ .cpu_dai_name = "pxa2xx-ac97-aux",
.codec_dai_name ="wm9712-aux",
.platform_name = "pxa-pcm-audio",
.codec_name = "wm9712-codec",
diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c
index 0d70fc8c12bd..38ca6759907e 100644
--- a/sound/soc/pxa/mioa701_wm9713.c
+++ b/sound/soc/pxa/mioa701_wm9713.c
@@ -162,7 +162,7 @@ static struct snd_soc_dai_link mioa701_dai[] = {
{
.name = "AC97",
.stream_name = "AC97 HiFi",
- .cpu_dai_name = "pxa-ac97.0",
+ .cpu_dai_name = "pxa2xx-ac97",
.codec_dai_name = "wm9713-hifi",
.codec_name = "wm9713-codec",
.init = mioa701_wm9713_init,
@@ -172,7 +172,7 @@ static struct snd_soc_dai_link mioa701_dai[] = {
{
.name = "AC97 Aux",
.stream_name = "AC97 Aux",
- .cpu_dai_name = "pxa-ac97.1",
+ .cpu_dai_name = "pxa2xx-ac97-aux",
.codec_dai_name ="wm9713-aux",
.codec_name = "wm9713-codec",
.platform_name = "pxa-pcm-audio",
diff --git a/sound/soc/pxa/palm27x.c b/sound/soc/pxa/palm27x.c
index 857db96d4a4f..504e4004f004 100644
--- a/sound/soc/pxa/palm27x.c
+++ b/sound/soc/pxa/palm27x.c
@@ -132,7 +132,7 @@ static struct snd_soc_dai_link palm27x_dai[] = {
{
.name = "AC97 HiFi",
.stream_name = "AC97 HiFi",
- .cpu_dai_name = "pxa-ac97.0",
+ .cpu_dai_name = "pxa2xx-ac97",
.codec_dai_name = "wm9712-hifi",
.codec_name = "wm9712-codec",
.platform_name = "pxa-pcm-audio",
@@ -141,7 +141,7 @@ static struct snd_soc_dai_link palm27x_dai[] = {
{
.name = "AC97 Aux",
.stream_name = "AC97 Aux",
- .cpu_dai_name = "pxa-ac97.1",
+ .cpu_dai_name = "pxa2xx-ac97-aux",
.codec_dai_name = "wm9712-aux",
.codec_name = "wm9712-codec",
.platform_name = "pxa-pcm-audio",
diff --git a/sound/soc/pxa/raumfeld.c b/sound/soc/pxa/raumfeld.c
index 0fd60f423036..2afabaf59491 100644
--- a/sound/soc/pxa/raumfeld.c
+++ b/sound/soc/pxa/raumfeld.c
@@ -151,13 +151,13 @@ static struct snd_soc_ops raumfeld_cs4270_ops = {
.hw_params = raumfeld_cs4270_hw_params,
};
-static int raumfeld_line_suspend(struct platform_device *pdev, pm_message_t state)
+static int raumfeld_line_suspend(struct snd_soc_card *card)
{
raumfeld_enable_audio(false);
return 0;
}
-static int raumfeld_line_resume(struct platform_device *pdev)
+static int raumfeld_line_resume(struct snd_soc_card *card)
{
raumfeld_enable_audio(true);
return 0;
@@ -229,19 +229,19 @@ static struct snd_soc_dai_link raumfeld_dai[] = {
{
.name = "ak4104",
.stream_name = "Playback",
- .cpu_dai_name = "pxa-ssp-dai.1",
- .codec_dai_name = "ak4104-hifi",
- .platform_name = "pxa-pcm-audio",
+ .cpu_dai_name = "pxa-ssp-dai.1",
+ .codec_dai_name = "ak4104-hifi",
+ .platform_name = "pxa-pcm-audio",
.ops = &raumfeld_ak4104_ops,
- .codec_name = "ak4104-codec.0",
+ .codec_name = "ak4104-codec.0",
},
{
.name = "CS4270",
.stream_name = "CS4270",
- .cpu_dai_name = "pxa-ssp-dai.0",
- .platform_name = "pxa-pcm-audio",
- .codec_dai_name = "cs4270-hifi",
- .codec_name = "cs4270-codec.0-0048",
+ .cpu_dai_name = "pxa-ssp-dai.0",
+ .platform_name = "pxa-pcm-audio",
+ .codec_dai_name = "cs4270-hifi",
+ .codec_name = "cs4270-codec.0-0048",
.ops = &raumfeld_cs4270_ops,
},};
diff --git a/sound/soc/pxa/tosa.c b/sound/soc/pxa/tosa.c
index f75804ef0897..9a2351366957 100644
--- a/sound/soc/pxa/tosa.c
+++ b/sound/soc/pxa/tosa.c
@@ -219,7 +219,7 @@ static struct snd_soc_dai_link tosa_dai[] = {
{
.name = "AC97",
.stream_name = "AC97 HiFi",
- .cpu_dai_name = "pxa-ac97.0",
+ .cpu_dai_name = "pxa2xx-ac97",
.codec_dai_name = "wm9712-hifi",
.platform_name = "pxa-pcm-audio",
.codec_name = "wm9712-codec",
@@ -229,7 +229,7 @@ static struct snd_soc_dai_link tosa_dai[] = {
{
.name = "AC97 Aux",
.stream_name = "AC97 Aux",
- .cpu_dai_name = "pxa-ac97.1",
+ .cpu_dai_name = "pxa2xx-ac97-aux",
.codec_dai_name = "wm9712-aux",
.platform_name = "pxa-pcm-audio",
.codec_name = "wm9712-codec",
@@ -237,7 +237,7 @@ static struct snd_soc_dai_link tosa_dai[] = {
},
};
-static int tosa_probe(struct platform_device *dev)
+static int tosa_probe(struct snd_soc_card *card)
{
int ret;
@@ -251,7 +251,7 @@ static int tosa_probe(struct platform_device *dev)
return ret;
}
-static int tosa_remove(struct platform_device *dev)
+static int tosa_remove(struct snd_soc_card *card)
{
gpio_free(TOSA_GPIO_L_MUTE);
return 0;
diff --git a/sound/soc/pxa/zylonite.c b/sound/soc/pxa/zylonite.c
index b222a7d72027..ac577263b3e3 100644
--- a/sound/soc/pxa/zylonite.c
+++ b/sound/soc/pxa/zylonite.c
@@ -166,7 +166,7 @@ static struct snd_soc_dai_link zylonite_dai[] = {
.stream_name = "AC97 HiFi",
.codec_name = "wm9713-codec",
.platform_name = "pxa-pcm-audio",
- .cpu_dai_name = "pxa-ac97.0",
+ .cpu_dai_name = "pxa2xx-ac97",
.codec_name = "wm9713-hifi",
.init = zylonite_wm9713_init,
},
@@ -175,7 +175,7 @@ static struct snd_soc_dai_link zylonite_dai[] = {
.stream_name = "AC97 Aux",
.codec_name = "wm9713-codec",
.platform_name = "pxa-pcm-audio",
- .cpu_dai_name = "pxa-ac97.1",
+ .cpu_dai_name = "pxa2xx-ac97-aux",
.codec_name = "wm9713-aux",
},
{
@@ -189,7 +189,7 @@ static struct snd_soc_dai_link zylonite_dai[] = {
},
};
-static int zylonite_probe(struct platform_device *pdev)
+static int zylonite_probe(struct snd_soc_card *card)
{
int ret;
@@ -216,7 +216,7 @@ static int zylonite_probe(struct platform_device *pdev)
return 0;
}
-static int zylonite_remove(struct platform_device *pdev)
+static int zylonite_remove(struct snd_soc_card *card)
{
if (clk_pout) {
clk_disable(pout);
@@ -226,8 +226,7 @@ static int zylonite_remove(struct platform_device *pdev)
return 0;
}
-static int zylonite_suspend_post(struct platform_device *pdev,
- pm_message_t state)
+static int zylonite_suspend_post(struct snd_soc_card *card)
{
if (clk_pout)
clk_disable(pout);
@@ -235,7 +234,7 @@ static int zylonite_suspend_post(struct platform_device *pdev,
return 0;
}
-static int zylonite_resume_pre(struct platform_device *pdev)
+static int zylonite_resume_pre(struct snd_soc_card *card)
{
int ret = 0;
diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig
index a6a6b5fa2f2f..d6713d5a90e7 100644
--- a/sound/soc/samsung/Kconfig
+++ b/sound/soc/samsung/Kconfig
@@ -1,6 +1,6 @@
config SND_SOC_SAMSUNG
tristate "ASoC support for Samsung"
- depends on ARCH_S3C2410 || ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_S5P64X0 || ARCH_S5P6442 || ARCH_S5PV310
+ depends on ARCH_S3C2410 || ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_S5P64X0 || ARCH_S5P6442 || ARCH_EXYNOS4
select S3C64XX_DMA if ARCH_S3C64XX
select S3C2410_DMA if ARCH_S3C2410
help
diff --git a/sound/soc/samsung/ac97.c b/sound/soc/samsung/ac97.c
index 4770a9550341..f97110e72e85 100644
--- a/sound/soc/samsung/ac97.c
+++ b/sound/soc/samsung/ac97.c
@@ -12,24 +12,24 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
-#include <linux/module.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <sound/soc.h>
-#include <plat/regs-ac97.h>
#include <mach/dma.h>
+#include <plat/regs-ac97.h>
#include <plat/audio.h>
#include "dma.h"
-#include "ac97.h"
#define AC_CMD_ADDR(x) (x << 16)
#define AC_CMD_DATA(x) (x & 0xffff)
+#define S3C_AC97_DAI_PCM 0
+#define S3C_AC97_DAI_MIC 1
+
struct s3c_ac97_info {
struct clk *ac97_clk;
void __iomem *regs;
diff --git a/sound/soc/samsung/ac97.h b/sound/soc/samsung/ac97.h
deleted file mode 100644
index 0d0e1b511457..000000000000
--- a/sound/soc/samsung/ac97.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* sound/soc/samsung/ac97.h
- *
- * ALSA SoC Audio Layer - S3C AC97 Controller driver
- * Evolved from s3c2443-ac97.h
- *
- * Copyright (c) 2010 Samsung Electronics Co. Ltd
- * Author: Jaswinder Singh <jassi.brar@samsung.com>
- * Credits: Graeme Gregory, Sean Choi
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __S3C_AC97_H_
-#define __S3C_AC97_H_
-
-#define S3C_AC97_DAI_PCM 0
-#define S3C_AC97_DAI_MIC 1
-
-#endif /* __S3C_AC97_H_ */
diff --git a/sound/soc/samsung/dma.c b/sound/soc/samsung/dma.c
index 21240198c5d6..5cb3b880f0d5 100644
--- a/sound/soc/samsung/dma.c
+++ b/sound/soc/samsung/dma.c
@@ -14,17 +14,11 @@
* option) any later version.
*/
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
#include <sound/soc.h>
+#include <sound/pcm_params.h>
#include <asm/dma.h>
#include <mach/hardware.h>
@@ -32,6 +26,9 @@
#include "dma.h"
+#define ST_RUNNING (1<<0)
+#define ST_OPENED (1<<1)
+
static const struct snd_pcm_hardware dma_hardware = {
.info = SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
@@ -313,7 +310,7 @@ dma_pointer(struct snd_pcm_substream *substream)
/* we seem to be getting the odd error from the pcm library due
* to out-of-bounds pointers. this is maybe due to the dma engine
* not having loaded the new values for the channel before being
- * callled... (todo - fix )
+ * called... (todo - fix )
*/
if (res >= snd_pcm_lib_buffer_bytes(substream)) {
diff --git a/sound/soc/samsung/dma.h b/sound/soc/samsung/dma.h
index f8cd2b4223af..c50659269a40 100644
--- a/sound/soc/samsung/dma.h
+++ b/sound/soc/samsung/dma.h
@@ -12,9 +12,6 @@
#ifndef _S3C_AUDIO_H
#define _S3C_AUDIO_H
-#define ST_RUNNING (1<<0)
-#define ST_OPENED (1<<1)
-
struct s3c_dma_params {
struct s3c2410_dma_client *client; /* stream identifier */
int channel; /* Channel ID */
@@ -22,9 +19,4 @@ struct s3c_dma_params {
int dma_size; /* Size of the DMA transfer */
};
-#define S3C24XX_DAI_I2S 0
-
-/* platform data */
-extern struct snd_ac97_bus_ops s3c24xx_ac97_ops;
-
#endif
diff --git a/sound/soc/samsung/goni_wm8994.c b/sound/soc/samsung/goni_wm8994.c
index 34dd9ef1b9c0..f6b3a3ce5919 100644
--- a/sound/soc/samsung/goni_wm8994.c
+++ b/sound/soc/samsung/goni_wm8994.c
@@ -11,21 +11,13 @@
*
*/
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
#include <sound/soc.h>
#include <sound/jack.h>
+
#include <asm/mach-types.h>
#include <mach/gpio.h>
-#include <mach/regs-clock.h>
-#include <linux/mfd/wm8994/core.h>
-#include <linux/mfd/wm8994/registers.h>
#include "../codecs/wm8994.h"
-#include "dma.h"
-#include "i2s.h"
#define MACHINE_NAME 0
#define CPU_VOICE_DAI 1
diff --git a/sound/soc/samsung/h1940_uda1380.c b/sound/soc/samsung/h1940_uda1380.c
index c45f7ce14d61..241f55d00660 100644
--- a/sound/soc/samsung/h1940_uda1380.c
+++ b/sound/soc/samsung/h1940_uda1380.c
@@ -13,25 +13,16 @@
*
*/
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/platform_device.h>
-#include <linux/i2c.h>
#include <linux/gpio.h>
#include <sound/soc.h>
-#include <sound/uda1380.h>
#include <sound/jack.h>
#include <plat/regs-iis.h>
-
#include <mach/h1940-latch.h>
-
#include <asm/mach-types.h>
-#include "dma.h"
#include "s3c24xx-i2s.h"
-#include "../codecs/uda1380.h"
static unsigned int rates[] = {
11025,
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index d00ac3a7102c..ffa09b3b2caa 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -15,9 +15,8 @@
#include <linux/clk.h>
#include <linux/io.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
#include <sound/soc.h>
+#include <sound/pcm_params.h>
#include <plat/audio.h>
diff --git a/sound/soc/samsung/jive_wm8750.c b/sound/soc/samsung/jive_wm8750.c
index 08802520e014..3b53ad54bc33 100644
--- a/sound/soc/samsung/jive_wm8750.c
+++ b/sound/soc/samsung/jive_wm8750.c
@@ -11,22 +11,11 @@
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-
-#include <sound/core.h>
-#include <sound/pcm.h>
#include <sound/soc.h>
#include <asm/mach-types.h>
-#include "dma.h"
#include "s3c2412-i2s.h"
-
#include "../codecs/wm8750.h"
static const struct snd_soc_dapm_route audio_map[] = {
diff --git a/sound/soc/samsung/ln2440sbc_alc650.c b/sound/soc/samsung/ln2440sbc_alc650.c
index a2bb34def740..bd91c19a6c08 100644
--- a/sound/soc/samsung/ln2440sbc_alc650.c
+++ b/sound/soc/samsung/ln2440sbc_alc650.c
@@ -16,15 +16,8 @@
*
*/
-#include <linux/module.h>
-#include <linux/device.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
#include <sound/soc.h>
-#include "dma.h"
-#include "ac97.h"
-
static struct snd_soc_card ln2440sbc;
static struct snd_soc_dai_link ln2440sbc_dai[] = {
diff --git a/sound/soc/samsung/neo1973_gta02_wm8753.c b/sound/soc/samsung/neo1973_gta02_wm8753.c
index 0d0ae2b9eef6..95ebf812b146 100644
--- a/sound/soc/samsung/neo1973_gta02_wm8753.c
+++ b/sound/soc/samsung/neo1973_gta02_wm8753.c
@@ -13,25 +13,15 @@
* option) any later version.
*/
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
#include <linux/gpio.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
+
#include <sound/soc.h>
#include <asm/mach-types.h>
-
#include <plat/regs-iis.h>
-
-#include <mach/regs-clock.h>
-#include <asm/io.h>
#include <mach/gta02.h>
+
#include "../codecs/wm8753.h"
-#include "dma.h"
#include "s3c24xx-i2s.h"
static struct snd_soc_card neo1973_gta02;
diff --git a/sound/soc/samsung/neo1973_wm8753.c b/sound/soc/samsung/neo1973_wm8753.c
index d20815d5ab2e..d3cd6888a810 100644
--- a/sound/soc/samsung/neo1973_wm8753.c
+++ b/sound/soc/samsung/neo1973_wm8753.c
@@ -24,7 +24,6 @@
#include <sound/tlv.h>
#include <asm/mach-types.h>
-#include <asm/hardware/scoop.h>
#include <mach/regs-clock.h>
#include <mach/regs-gpio.h>
#include <mach/hardware.h>
diff --git a/sound/soc/samsung/pcm.c b/sound/soc/samsung/pcm.c
index 48d0b750406b..38aac7d57a59 100644
--- a/sound/soc/samsung/pcm.c
+++ b/sound/soc/samsung/pcm.c
@@ -11,20 +11,11 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/delay.h>
#include <linux/clk.h>
-#include <linux/kernel.h>
-#include <linux/gpio.h>
#include <linux/io.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/initval.h>
#include <sound/soc.h>
+#include <sound/pcm_params.h>
#include <plat/audio.h>
#include <plat/dma.h>
@@ -32,6 +23,113 @@
#include "dma.h"
#include "pcm.h"
+/*Register Offsets */
+#define S3C_PCM_CTL 0x00
+#define S3C_PCM_CLKCTL 0x04
+#define S3C_PCM_TXFIFO 0x08
+#define S3C_PCM_RXFIFO 0x0C
+#define S3C_PCM_IRQCTL 0x10
+#define S3C_PCM_IRQSTAT 0x14
+#define S3C_PCM_FIFOSTAT 0x18
+#define S3C_PCM_CLRINT 0x20
+
+/* PCM_CTL Bit-Fields */
+#define S3C_PCM_CTL_TXDIPSTICK_MASK 0x3f
+#define S3C_PCM_CTL_TXDIPSTICK_SHIFT 13
+#define S3C_PCM_CTL_RXDIPSTICK_MASK 0x3f
+#define S3C_PCM_CTL_RXDIPSTICK_SHIFT 7
+#define S3C_PCM_CTL_TXDMA_EN (0x1 << 6)
+#define S3C_PCM_CTL_RXDMA_EN (0x1 << 5)
+#define S3C_PCM_CTL_TXMSB_AFTER_FSYNC (0x1 << 4)
+#define S3C_PCM_CTL_RXMSB_AFTER_FSYNC (0x1 << 3)
+#define S3C_PCM_CTL_TXFIFO_EN (0x1 << 2)
+#define S3C_PCM_CTL_RXFIFO_EN (0x1 << 1)
+#define S3C_PCM_CTL_ENABLE (0x1 << 0)
+
+/* PCM_CLKCTL Bit-Fields */
+#define S3C_PCM_CLKCTL_SERCLK_EN (0x1 << 19)
+#define S3C_PCM_CLKCTL_SERCLKSEL_PCLK (0x1 << 18)
+#define S3C_PCM_CLKCTL_SCLKDIV_MASK 0x1ff
+#define S3C_PCM_CLKCTL_SYNCDIV_MASK 0x1ff
+#define S3C_PCM_CLKCTL_SCLKDIV_SHIFT 9
+#define S3C_PCM_CLKCTL_SYNCDIV_SHIFT 0
+
+/* PCM_TXFIFO Bit-Fields */
+#define S3C_PCM_TXFIFO_DVALID (0x1 << 16)
+#define S3C_PCM_TXFIFO_DATA_MSK (0xffff << 0)
+
+/* PCM_RXFIFO Bit-Fields */
+#define S3C_PCM_RXFIFO_DVALID (0x1 << 16)
+#define S3C_PCM_RXFIFO_DATA_MSK (0xffff << 0)
+
+/* PCM_IRQCTL Bit-Fields */
+#define S3C_PCM_IRQCTL_IRQEN (0x1 << 14)
+#define S3C_PCM_IRQCTL_WRDEN (0x1 << 12)
+#define S3C_PCM_IRQCTL_TXEMPTYEN (0x1 << 11)
+#define S3C_PCM_IRQCTL_TXALMSTEMPTYEN (0x1 << 10)
+#define S3C_PCM_IRQCTL_TXFULLEN (0x1 << 9)
+#define S3C_PCM_IRQCTL_TXALMSTFULLEN (0x1 << 8)
+#define S3C_PCM_IRQCTL_TXSTARVEN (0x1 << 7)
+#define S3C_PCM_IRQCTL_TXERROVRFLEN (0x1 << 6)
+#define S3C_PCM_IRQCTL_RXEMPTEN (0x1 << 5)
+#define S3C_PCM_IRQCTL_RXALMSTEMPTEN (0x1 << 4)
+#define S3C_PCM_IRQCTL_RXFULLEN (0x1 << 3)
+#define S3C_PCM_IRQCTL_RXALMSTFULLEN (0x1 << 2)
+#define S3C_PCM_IRQCTL_RXSTARVEN (0x1 << 1)
+#define S3C_PCM_IRQCTL_RXERROVRFLEN (0x1 << 0)
+
+/* PCM_IRQSTAT Bit-Fields */
+#define S3C_PCM_IRQSTAT_IRQPND (0x1 << 13)
+#define S3C_PCM_IRQSTAT_WRD_XFER (0x1 << 12)
+#define S3C_PCM_IRQSTAT_TXEMPTY (0x1 << 11)
+#define S3C_PCM_IRQSTAT_TXALMSTEMPTY (0x1 << 10)
+#define S3C_PCM_IRQSTAT_TXFULL (0x1 << 9)
+#define S3C_PCM_IRQSTAT_TXALMSTFULL (0x1 << 8)
+#define S3C_PCM_IRQSTAT_TXSTARV (0x1 << 7)
+#define S3C_PCM_IRQSTAT_TXERROVRFL (0x1 << 6)
+#define S3C_PCM_IRQSTAT_RXEMPT (0x1 << 5)
+#define S3C_PCM_IRQSTAT_RXALMSTEMPT (0x1 << 4)
+#define S3C_PCM_IRQSTAT_RXFULL (0x1 << 3)
+#define S3C_PCM_IRQSTAT_RXALMSTFULL (0x1 << 2)
+#define S3C_PCM_IRQSTAT_RXSTARV (0x1 << 1)
+#define S3C_PCM_IRQSTAT_RXERROVRFL (0x1 << 0)
+
+/* PCM_FIFOSTAT Bit-Fields */
+#define S3C_PCM_FIFOSTAT_TXCNT_MSK (0x3f << 14)
+#define S3C_PCM_FIFOSTAT_TXFIFOEMPTY (0x1 << 13)
+#define S3C_PCM_FIFOSTAT_TXFIFOALMSTEMPTY (0x1 << 12)
+#define S3C_PCM_FIFOSTAT_TXFIFOFULL (0x1 << 11)
+#define S3C_PCM_FIFOSTAT_TXFIFOALMSTFULL (0x1 << 10)
+#define S3C_PCM_FIFOSTAT_RXCNT_MSK (0x3f << 4)
+#define S3C_PCM_FIFOSTAT_RXFIFOEMPTY (0x1 << 3)
+#define S3C_PCM_FIFOSTAT_RXFIFOALMSTEMPTY (0x1 << 2)
+#define S3C_PCM_FIFOSTAT_RXFIFOFULL (0x1 << 1)
+#define S3C_PCM_FIFOSTAT_RXFIFOALMSTFULL (0x1 << 0)
+
+/**
+ * struct s3c_pcm_info - S3C PCM Controller information
+ * @dev: The parent device passed to use from the probe.
+ * @regs: The pointer to the device register block.
+ * @dma_playback: DMA information for playback channel.
+ * @dma_capture: DMA information for capture channel.
+ */
+struct s3c_pcm_info {
+ spinlock_t lock;
+ struct device *dev;
+ void __iomem *regs;
+
+ unsigned int sclk_per_fs;
+
+ /* Whether to keep PCMSCLK enabled even when idle(no active xfer) */
+ unsigned int idleclk;
+
+ struct clk *pclk;
+ struct clk *cclk;
+
+ struct s3c_dma_params *dma_playback;
+ struct s3c_dma_params *dma_capture;
+};
+
static struct s3c2410_dma_client s3c_pcm_dma_client_out = {
.name = "PCM Stereo out"
};
diff --git a/sound/soc/samsung/pcm.h b/sound/soc/samsung/pcm.h
index 03393dcf852d..726baf814613 100644
--- a/sound/soc/samsung/pcm.h
+++ b/sound/soc/samsung/pcm.h
@@ -9,116 +9,9 @@
#ifndef __S3C_PCM_H
#define __S3C_PCM_H __FILE__
-/*Register Offsets */
-#define S3C_PCM_CTL (0x00)
-#define S3C_PCM_CLKCTL (0x04)
-#define S3C_PCM_TXFIFO (0x08)
-#define S3C_PCM_RXFIFO (0x0C)
-#define S3C_PCM_IRQCTL (0x10)
-#define S3C_PCM_IRQSTAT (0x14)
-#define S3C_PCM_FIFOSTAT (0x18)
-#define S3C_PCM_CLRINT (0x20)
-
-/* PCM_CTL Bit-Fields */
-#define S3C_PCM_CTL_TXDIPSTICK_MASK (0x3f)
-#define S3C_PCM_CTL_TXDIPSTICK_SHIFT (13)
-#define S3C_PCM_CTL_RXDIPSTICK_MASK (0x3f)
-#define S3C_PCM_CTL_RXDIPSTICK_SHIFT (7)
-#define S3C_PCM_CTL_TXDMA_EN (0x1<<6)
-#define S3C_PCM_CTL_RXDMA_EN (0x1<<5)
-#define S3C_PCM_CTL_TXMSB_AFTER_FSYNC (0x1<<4)
-#define S3C_PCM_CTL_RXMSB_AFTER_FSYNC (0x1<<3)
-#define S3C_PCM_CTL_TXFIFO_EN (0x1<<2)
-#define S3C_PCM_CTL_RXFIFO_EN (0x1<<1)
-#define S3C_PCM_CTL_ENABLE (0x1<<0)
-
-/* PCM_CLKCTL Bit-Fields */
-#define S3C_PCM_CLKCTL_SERCLK_EN (0x1<<19)
-#define S3C_PCM_CLKCTL_SERCLKSEL_PCLK (0x1<<18)
-#define S3C_PCM_CLKCTL_SCLKDIV_MASK (0x1ff)
-#define S3C_PCM_CLKCTL_SYNCDIV_MASK (0x1ff)
-#define S3C_PCM_CLKCTL_SCLKDIV_SHIFT (9)
-#define S3C_PCM_CLKCTL_SYNCDIV_SHIFT (0)
-
-/* PCM_TXFIFO Bit-Fields */
-#define S3C_PCM_TXFIFO_DVALID (0x1<<16)
-#define S3C_PCM_TXFIFO_DATA_MSK (0xffff<<0)
-
-/* PCM_RXFIFO Bit-Fields */
-#define S3C_PCM_RXFIFO_DVALID (0x1<<16)
-#define S3C_PCM_RXFIFO_DATA_MSK (0xffff<<0)
-
-/* PCM_IRQCTL Bit-Fields */
-#define S3C_PCM_IRQCTL_IRQEN (0x1<<14)
-#define S3C_PCM_IRQCTL_WRDEN (0x1<<12)
-#define S3C_PCM_IRQCTL_TXEMPTYEN (0x1<<11)
-#define S3C_PCM_IRQCTL_TXALMSTEMPTYEN (0x1<<10)
-#define S3C_PCM_IRQCTL_TXFULLEN (0x1<<9)
-#define S3C_PCM_IRQCTL_TXALMSTFULLEN (0x1<<8)
-#define S3C_PCM_IRQCTL_TXSTARVEN (0x1<<7)
-#define S3C_PCM_IRQCTL_TXERROVRFLEN (0x1<<6)
-#define S3C_PCM_IRQCTL_RXEMPTEN (0x1<<5)
-#define S3C_PCM_IRQCTL_RXALMSTEMPTEN (0x1<<4)
-#define S3C_PCM_IRQCTL_RXFULLEN (0x1<<3)
-#define S3C_PCM_IRQCTL_RXALMSTFULLEN (0x1<<2)
-#define S3C_PCM_IRQCTL_RXSTARVEN (0x1<<1)
-#define S3C_PCM_IRQCTL_RXERROVRFLEN (0x1<<0)
-
-/* PCM_IRQSTAT Bit-Fields */
-#define S3C_PCM_IRQSTAT_IRQPND (0x1<<13)
-#define S3C_PCM_IRQSTAT_WRD_XFER (0x1<<12)
-#define S3C_PCM_IRQSTAT_TXEMPTY (0x1<<11)
-#define S3C_PCM_IRQSTAT_TXALMSTEMPTY (0x1<<10)
-#define S3C_PCM_IRQSTAT_TXFULL (0x1<<9)
-#define S3C_PCM_IRQSTAT_TXALMSTFULL (0x1<<8)
-#define S3C_PCM_IRQSTAT_TXSTARV (0x1<<7)
-#define S3C_PCM_IRQSTAT_TXERROVRFL (0x1<<6)
-#define S3C_PCM_IRQSTAT_RXEMPT (0x1<<5)
-#define S3C_PCM_IRQSTAT_RXALMSTEMPT (0x1<<4)
-#define S3C_PCM_IRQSTAT_RXFULL (0x1<<3)
-#define S3C_PCM_IRQSTAT_RXALMSTFULL (0x1<<2)
-#define S3C_PCM_IRQSTAT_RXSTARV (0x1<<1)
-#define S3C_PCM_IRQSTAT_RXERROVRFL (0x1<<0)
-
-/* PCM_FIFOSTAT Bit-Fields */
-#define S3C_PCM_FIFOSTAT_TXCNT_MSK (0x3f<<14)
-#define S3C_PCM_FIFOSTAT_TXFIFOEMPTY (0x1<<13)
-#define S3C_PCM_FIFOSTAT_TXFIFOALMSTEMPTY (0x1<<12)
-#define S3C_PCM_FIFOSTAT_TXFIFOFULL (0x1<<11)
-#define S3C_PCM_FIFOSTAT_TXFIFOALMSTFULL (0x1<<10)
-#define S3C_PCM_FIFOSTAT_RXCNT_MSK (0x3f<<4)
-#define S3C_PCM_FIFOSTAT_RXFIFOEMPTY (0x1<<3)
-#define S3C_PCM_FIFOSTAT_RXFIFOALMSTEMPTY (0x1<<2)
-#define S3C_PCM_FIFOSTAT_RXFIFOFULL (0x1<<1)
-#define S3C_PCM_FIFOSTAT_RXFIFOALMSTFULL (0x1<<0)
-
#define S3C_PCM_CLKSRC_PCLK 0
#define S3C_PCM_CLKSRC_MUX 1
#define S3C_PCM_SCLK_PER_FS 0
-/**
- * struct s3c_pcm_info - S3C PCM Controller information
- * @dev: The parent device passed to use from the probe.
- * @regs: The pointer to the device register block.
- * @dma_playback: DMA information for playback channel.
- * @dma_capture: DMA information for capture channel.
- */
-struct s3c_pcm_info {
- spinlock_t lock;
- struct device *dev;
- void __iomem *regs;
-
- unsigned int sclk_per_fs;
-
- /* Whether to keep PCMSCLK enabled even when idle(no active xfer) */
- unsigned int idleclk;
-
- struct clk *pclk;
- struct clk *cclk;
-
- struct s3c_dma_params *dma_playback;
- struct s3c_dma_params *dma_capture;
-};
-
#endif /* __S3C_PCM_H */
diff --git a/sound/soc/samsung/rx1950_uda1380.c b/sound/soc/samsung/rx1950_uda1380.c
index f40027445dda..1e574a5d440d 100644
--- a/sound/soc/samsung/rx1950_uda1380.c
+++ b/sound/soc/samsung/rx1950_uda1380.c
@@ -17,26 +17,15 @@
*
*/
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/platform_device.h>
-#include <linux/i2c.h>
#include <linux/gpio.h>
-#include <linux/clk.h>
#include <sound/soc.h>
-#include <sound/uda1380.h>
#include <sound/jack.h>
#include <plat/regs-iis.h>
-
-#include <mach/regs-clock.h>
-
#include <asm/mach-types.h>
-#include "dma.h"
#include "s3c24xx-i2s.h"
-#include "../codecs/uda1380.h"
static int rx1950_uda1380_init(struct snd_soc_pcm_runtime *rtd);
static int rx1950_startup(struct snd_pcm_substream *substream);
diff --git a/sound/soc/samsung/s3c-i2s-v2.c b/sound/soc/samsung/s3c-i2s-v2.c
index 094f36e41e83..52074a2b0696 100644
--- a/sound/soc/samsung/s3c-i2s-v2.c
+++ b/sound/soc/samsung/s3c-i2s-v2.c
@@ -20,9 +20,8 @@
#include <linux/clk.h>
#include <linux/io.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
#include <sound/soc.h>
+#include <sound/pcm_params.h>
#include <mach/dma.h>
diff --git a/sound/soc/samsung/s3c2412-i2s.c b/sound/soc/samsung/s3c2412-i2s.c
index 7ea837867124..841ab14c1100 100644
--- a/sound/soc/samsung/s3c2412-i2s.c
+++ b/sound/soc/samsung/s3c2412-i2s.c
@@ -16,21 +16,13 @@
* option) any later version.
*/
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/clk.h>
-#include <linux/kernel.h>
#include <linux/io.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/initval.h>
#include <sound/soc.h>
-#include <mach/hardware.h>
+#include <sound/pcm_params.h>
#include <mach/regs-gpio.h>
#include <mach/dma.h>
@@ -39,8 +31,6 @@
#include "regs-i2s-v2.h"
#include "s3c2412-i2s.h"
-#define S3C2412_I2S_DEBUG 0
-
static struct s3c2410_dma_client s3c2412_dma_client_out = {
.name = "I2S PCM Stereo out"
};
diff --git a/sound/soc/samsung/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c
index 13e41ed8e22b..63d8849d80bd 100644
--- a/sound/soc/samsung/s3c24xx-i2s.c
+++ b/sound/soc/samsung/s3c24xx-i2s.c
@@ -14,28 +14,16 @@
* option) any later version.
*/
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
#include <linux/delay.h>
#include <linux/clk.h>
-#include <linux/jiffies.h>
#include <linux/io.h>
#include <linux/gpio.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/initval.h>
#include <sound/soc.h>
+#include <sound/pcm_params.h>
-#include <mach/hardware.h>
#include <mach/regs-gpio.h>
-#include <mach/regs-clock.h>
-
-#include <asm/dma.h>
#include <mach/dma.h>
-
#include <plat/regs-iis.h>
#include "dma.h"
diff --git a/sound/soc/samsung/s3c24xx_simtec.c b/sound/soc/samsung/s3c24xx_simtec.c
index a434032d1832..349566f0686b 100644
--- a/sound/soc/samsung/s3c24xx_simtec.c
+++ b/sound/soc/samsung/s3c24xx_simtec.c
@@ -7,20 +7,13 @@
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/clk.h>
-#include <linux/i2c.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
#include <sound/soc.h>
#include <plat/audio-simtec.h>
-#include "dma.h"
#include "s3c24xx-i2s.h"
#include "s3c24xx_simtec.h"
diff --git a/sound/soc/samsung/s3c24xx_simtec_hermes.c b/sound/soc/samsung/s3c24xx_simtec_hermes.c
index 08fcaaa66907..ce6aef604179 100644
--- a/sound/soc/samsung/s3c24xx_simtec_hermes.c
+++ b/sound/soc/samsung/s3c24xx_simtec_hermes.c
@@ -7,18 +7,8 @@
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-
-#include <sound/core.h>
-#include <sound/pcm.h>
#include <sound/soc.h>
-#include <plat/audio-simtec.h>
-
-#include "dma.h"
-#include "s3c24xx-i2s.h"
#include "s3c24xx_simtec.h"
static const struct snd_soc_dapm_widget dapm_widgets[] = {
diff --git a/sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c b/sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c
index 116e3e670167..a7ef7db54687 100644
--- a/sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c
+++ b/sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c
@@ -7,22 +7,10 @@
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-
-#include <sound/core.h>
-#include <sound/pcm.h>
#include <sound/soc.h>
-#include <plat/audio-simtec.h>
-
-#include "dma.h"
-#include "s3c24xx-i2s.h"
#include "s3c24xx_simtec.h"
-#include "../codecs/tlv320aic23.h"
-
/* supported machines:
*
* Machine Connections AMP
diff --git a/sound/soc/samsung/s3c24xx_uda134x.c b/sound/soc/samsung/s3c24xx_uda134x.c
index 2c09e93dd566..3cb700751078 100644
--- a/sound/soc/samsung/s3c24xx_uda134x.c
+++ b/sound/soc/samsung/s3c24xx_uda134x.c
@@ -11,22 +11,15 @@
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
#include <linux/clk.h>
-#include <linux/mutex.h>
#include <linux/gpio.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
+
#include <sound/soc.h>
#include <sound/s3c24xx_uda134x.h>
-#include <sound/uda134x.h>
#include <plat/regs-iis.h>
-#include "dma.h"
#include "s3c24xx-i2s.h"
-#include "../codecs/uda134x.h"
-
/* #define ENFORCE_RATES 1 */
/*
diff --git a/sound/soc/samsung/smartq_wm8987.c b/sound/soc/samsung/smartq_wm8987.c
index 61e2b5219d42..0a2c4f223038 100644
--- a/sound/soc/samsung/smartq_wm8987.c
+++ b/sound/soc/samsung/smartq_wm8987.c
@@ -13,20 +13,14 @@
*
*/
-#include <linux/module.h>
-#include <linux/platform_device.h>
#include <linux/gpio.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/jack.h>
#include <asm/mach-types.h>
-#include "dma.h"
#include "i2s.h"
-
#include "../codecs/wm8750.h"
/*
diff --git a/sound/soc/samsung/smdk2443_wm9710.c b/sound/soc/samsung/smdk2443_wm9710.c
index 3be7e7e92d6e..3a0dbfc793f0 100644
--- a/sound/soc/samsung/smdk2443_wm9710.c
+++ b/sound/soc/samsung/smdk2443_wm9710.c
@@ -12,15 +12,8 @@
*
*/
-#include <linux/module.h>
-#include <linux/device.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
#include <sound/soc.h>
-#include "dma.h"
-#include "ac97.h"
-
static struct snd_soc_card smdk2443;
static struct snd_soc_dai_link smdk2443_dai[] = {
diff --git a/sound/soc/samsung/smdk_spdif.c b/sound/soc/samsung/smdk_spdif.c
index b5c3fad01bb8..e8ac961c6ba1 100644
--- a/sound/soc/samsung/smdk_spdif.c
+++ b/sound/soc/samsung/smdk_spdif.c
@@ -10,15 +10,10 @@
*
*/
-#include <linux/module.h>
-#include <linux/device.h>
#include <linux/clk.h>
-#include <plat/devs.h>
-
#include <sound/soc.h>
-#include "dma.h"
#include "spdif.h"
/* Audio clock settings are belonged to board specific part. Every
diff --git a/sound/soc/samsung/smdk_wm8580.c b/sound/soc/samsung/smdk_wm8580.c
index b2cff1a44aed..8aacf23d6f3a 100644
--- a/sound/soc/samsung/smdk_wm8580.c
+++ b/sound/soc/samsung/smdk_wm8580.c
@@ -10,17 +10,12 @@
* option) any later version.
*/
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
#include <sound/soc.h>
+#include <sound/pcm_params.h>
#include <asm/mach-types.h>
#include "../codecs/wm8580.h"
-#include "dma.h"
#include "i2s.h"
/*
diff --git a/sound/soc/samsung/smdk_wm9713.c b/sound/soc/samsung/smdk_wm9713.c
index ae5fed6f772f..fffe3c1dd1bd 100644
--- a/sound/soc/samsung/smdk_wm9713.c
+++ b/sound/soc/samsung/smdk_wm9713.c
@@ -11,13 +11,8 @@
*
*/
-#include <linux/module.h>
-#include <linux/device.h>
#include <sound/soc.h>
-#include "dma.h"
-#include "ac97.h"
-
static struct snd_soc_card smdk;
/*
diff --git a/sound/soc/samsung/spdif.c b/sound/soc/samsung/spdif.c
index f0816404ea3e..28c491dacf7a 100644
--- a/sound/soc/samsung/spdif.c
+++ b/sound/soc/samsung/spdif.c
@@ -13,9 +13,8 @@
#include <linux/clk.h>
#include <linux/io.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
#include <sound/soc.h>
+#include <sound/pcm_params.h>
#include <plat/audio.h>
#include <mach/dma.h>
diff --git a/sound/soc/sh/fsi-ak4642.c b/sound/soc/sh/fsi-ak4642.c
index a14820ac9665..d6f4703b3c07 100644
--- a/sound/soc/sh/fsi-ak4642.c
+++ b/sound/soc/sh/fsi-ak4642.c
@@ -18,18 +18,26 @@ struct fsi_ak4642_data {
const char *cpu_dai;
const char *codec;
const char *platform;
+ int id;
};
static int fsi_ak4642_dai_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_dai *dai = rtd->codec_dai;
+ struct snd_soc_dai *codec = rtd->codec_dai;
+ struct snd_soc_dai *cpu = rtd->cpu_dai;
int ret;
- ret = snd_soc_dai_set_fmt(dai, SND_SOC_DAIFMT_CBM_CFM);
+ ret = snd_soc_dai_set_fmt(codec, SND_SOC_DAIFMT_LEFT_J |
+ SND_SOC_DAIFMT_CBM_CFM);
if (ret < 0)
return ret;
- ret = snd_soc_dai_set_sysclk(dai, 0, 11289600, 0);
+ ret = snd_soc_dai_set_sysclk(codec, 0, 11289600, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = snd_soc_dai_set_fmt(cpu, SND_SOC_DAIFMT_LEFT_J |
+ SND_SOC_DAIFMT_CBS_CFS);
return ret;
}
@@ -60,7 +68,7 @@ static int fsi_ak4642_probe(struct platform_device *pdev)
pdata = (struct fsi_ak4642_data *)id_entry->driver_data;
- fsi_snd_device = platform_device_alloc("soc-audio", FSI_PORT_A);
+ fsi_snd_device = platform_device_alloc("soc-audio", pdata->id);
if (!fsi_snd_device)
goto out;
@@ -93,6 +101,7 @@ static struct fsi_ak4642_data fsi_a_ak4642 = {
.cpu_dai = "fsia-dai",
.codec = "ak4642-codec.0-0012",
.platform = "sh_fsi.0",
+ .id = FSI_PORT_A,
};
static struct fsi_ak4642_data fsi_b_ak4642 = {
@@ -101,6 +110,7 @@ static struct fsi_ak4642_data fsi_b_ak4642 = {
.cpu_dai = "fsib-dai",
.codec = "ak4642-codec.0-0012",
.platform = "sh_fsi.0",
+ .id = FSI_PORT_B,
};
static struct fsi_ak4642_data fsi_a_ak4643 = {
@@ -109,6 +119,7 @@ static struct fsi_ak4642_data fsi_a_ak4643 = {
.cpu_dai = "fsia-dai",
.codec = "ak4642-codec.0-0013",
.platform = "sh_fsi.0",
+ .id = FSI_PORT_A,
};
static struct fsi_ak4642_data fsi_b_ak4643 = {
@@ -117,6 +128,7 @@ static struct fsi_ak4642_data fsi_b_ak4643 = {
.cpu_dai = "fsib-dai",
.codec = "ak4642-codec.0-0013",
.platform = "sh_fsi.0",
+ .id = FSI_PORT_B,
};
static struct fsi_ak4642_data fsi2_a_ak4642 = {
@@ -125,6 +137,7 @@ static struct fsi_ak4642_data fsi2_a_ak4642 = {
.cpu_dai = "fsia-dai",
.codec = "ak4642-codec.0-0012",
.platform = "sh_fsi2",
+ .id = FSI_PORT_A,
};
static struct fsi_ak4642_data fsi2_b_ak4642 = {
@@ -133,6 +146,7 @@ static struct fsi_ak4642_data fsi2_b_ak4642 = {
.cpu_dai = "fsib-dai",
.codec = "ak4642-codec.0-0012",
.platform = "sh_fsi2",
+ .id = FSI_PORT_B,
};
static struct fsi_ak4642_data fsi2_a_ak4643 = {
@@ -141,6 +155,7 @@ static struct fsi_ak4642_data fsi2_a_ak4643 = {
.cpu_dai = "fsia-dai",
.codec = "ak4642-codec.0-0013",
.platform = "sh_fsi2",
+ .id = FSI_PORT_A,
};
static struct fsi_ak4642_data fsi2_b_ak4643 = {
@@ -149,6 +164,7 @@ static struct fsi_ak4642_data fsi2_b_ak4643 = {
.cpu_dai = "fsib-dai",
.codec = "ak4642-codec.0-0013",
.platform = "sh_fsi2",
+ .id = FSI_PORT_B,
};
static struct platform_device_id fsi_id_table[] = {
diff --git a/sound/soc/sh/fsi-da7210.c b/sound/soc/sh/fsi-da7210.c
index e8df9da92f71..dbafd7ac5590 100644
--- a/sound/soc/sh/fsi-da7210.c
+++ b/sound/soc/sh/fsi-da7210.c
@@ -15,11 +15,20 @@
static int fsi_da7210_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_dai *dai = rtd->codec_dai;
+ struct snd_soc_dai *codec = rtd->codec_dai;
+ struct snd_soc_dai *cpu = rtd->cpu_dai;
+ int ret;
- return snd_soc_dai_set_fmt(dai,
+ ret = snd_soc_dai_set_fmt(codec,
SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_CBM_CFM);
+ if (ret < 0)
+ return ret;
+
+ ret = snd_soc_dai_set_fmt(cpu, SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_CBS_CFS);
+
+ return ret;
}
static struct snd_soc_dai_link fsi_da7210_dai = {
diff --git a/sound/soc/sh/fsi-hdmi.c b/sound/soc/sh/fsi-hdmi.c
index a52dd8ec71d3..9719985eb82d 100644
--- a/sound/soc/sh/fsi-hdmi.c
+++ b/sound/soc/sh/fsi-hdmi.c
@@ -12,31 +12,59 @@
#include <linux/platform_device.h>
#include <sound/sh_fsi.h>
+struct fsi_hdmi_data {
+ const char *cpu_dai;
+ const char *card;
+ int id;
+};
+
+static int fsi_hdmi_dai_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_dai *cpu = rtd->cpu_dai;
+ int ret;
+
+ ret = snd_soc_dai_set_fmt(cpu, SND_SOC_DAIFMT_CBM_CFM);
+
+ return ret;
+}
+
static struct snd_soc_dai_link fsi_dai_link = {
.name = "HDMI",
.stream_name = "HDMI",
- .cpu_dai_name = "fsib-dai", /* fsi B */
.codec_dai_name = "sh_mobile_hdmi-hifi",
.platform_name = "sh_fsi2",
.codec_name = "sh-mobile-hdmi",
+ .init = fsi_hdmi_dai_init,
};
static struct snd_soc_card fsi_soc_card = {
- .name = "FSI (SH MOBILE HDMI)",
.dai_link = &fsi_dai_link,
.num_links = 1,
};
static struct platform_device *fsi_snd_device;
-static int __init fsi_hdmi_init(void)
+static int fsi_hdmi_probe(struct platform_device *pdev)
{
int ret = -ENOMEM;
+ const struct platform_device_id *id_entry;
+ struct fsi_hdmi_data *pdata;
+
+ id_entry = pdev->id_entry;
+ if (!id_entry) {
+ dev_err(&pdev->dev, "unknown fsi hdmi\n");
+ return -ENODEV;
+ }
- fsi_snd_device = platform_device_alloc("soc-audio", FSI_PORT_B);
+ pdata = (struct fsi_hdmi_data *)id_entry->driver_data;
+
+ fsi_snd_device = platform_device_alloc("soc-audio", pdata->id);
if (!fsi_snd_device)
goto out;
+ fsi_dai_link.cpu_dai_name = pdata->cpu_dai;
+ fsi_soc_card.name = pdata->card;
+
platform_set_drvdata(fsi_snd_device, &fsi_soc_card);
ret = platform_device_add(fsi_snd_device);
@@ -47,9 +75,48 @@ out:
return ret;
}
-static void __exit fsi_hdmi_exit(void)
+static int fsi_hdmi_remove(struct platform_device *pdev)
{
platform_device_unregister(fsi_snd_device);
+ return 0;
+}
+
+static struct fsi_hdmi_data fsi2_a_hdmi = {
+ .cpu_dai = "fsia-dai",
+ .card = "FSI2A (SH MOBILE HDMI)",
+ .id = FSI_PORT_A,
+};
+
+static struct fsi_hdmi_data fsi2_b_hdmi = {
+ .cpu_dai = "fsib-dai",
+ .card = "FSI2B (SH MOBILE HDMI)",
+ .id = FSI_PORT_B,
+};
+
+static struct platform_device_id fsi_id_table[] = {
+ /* FSI 2 */
+ { "sh_fsi2_a_hdmi", (kernel_ulong_t)&fsi2_a_hdmi },
+ { "sh_fsi2_b_hdmi", (kernel_ulong_t)&fsi2_b_hdmi },
+ {},
+};
+
+static struct platform_driver fsi_hdmi = {
+ .driver = {
+ .name = "fsi-hdmi-audio",
+ },
+ .probe = fsi_hdmi_probe,
+ .remove = fsi_hdmi_remove,
+ .id_table = fsi_id_table,
+};
+
+static int __init fsi_hdmi_init(void)
+{
+ return platform_driver_register(&fsi_hdmi);
+}
+
+static void __exit fsi_hdmi_exit(void)
+{
+ platform_driver_unregister(&fsi_hdmi);
}
module_init(fsi_hdmi_init);
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index 2b06402801ef..0c9997e2d8c0 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -78,6 +78,8 @@
/* CKG1 */
#define ACKMD_MASK 0x00007000
#define BPFMD_MASK 0x00000700
+#define DIMD (1 << 4)
+#define DOMD (1 << 0)
/* A/B MST_CTLR */
#define BP (1 << 4) /* Fix the signal of Biphase output */
@@ -111,6 +113,8 @@
#define FSI_FMTS (SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE)
+typedef int (*set_rate_func)(struct device *dev, int is_porta, int rate, int enable);
+
/*
* FSI driver use below type name for variable
*
@@ -128,7 +132,6 @@ struct fsi_stream {
struct snd_pcm_substream *substream;
int fifo_max_num;
- int chan_num;
int buff_offset;
int buff_len;
@@ -143,6 +146,7 @@ struct fsi_priv {
void __iomem *base;
struct fsi_master *master;
+ int chan_num;
struct fsi_stream playback;
struct fsi_stream capture;
@@ -252,9 +256,8 @@ static struct snd_soc_dai *fsi_get_dai(struct snd_pcm_substream *substream)
return rtd->cpu_dai;
}
-static struct fsi_priv *fsi_get_priv(struct snd_pcm_substream *substream)
+static struct fsi_priv *fsi_get_priv_frm_dai(struct snd_soc_dai *dai)
{
- struct snd_soc_dai *dai = fsi_get_dai(substream);
struct fsi_master *master = snd_soc_dai_get_drvdata(dai);
if (dai->id == 0)
@@ -263,11 +266,27 @@ static struct fsi_priv *fsi_get_priv(struct snd_pcm_substream *substream)
return &master->fsib;
}
+static struct fsi_priv *fsi_get_priv(struct snd_pcm_substream *substream)
+{
+ return fsi_get_priv_frm_dai(fsi_get_dai(substream));
+}
+
+static set_rate_func fsi_get_info_set_rate(struct fsi_master *master)
+{
+ if (!master->info)
+ return NULL;
+
+ return master->info->set_rate;
+}
+
static u32 fsi_get_info_flags(struct fsi_priv *fsi)
{
int is_porta = fsi_is_port_a(fsi);
struct fsi_master *master = fsi_get_master(fsi);
+ if (!master->info)
+ return 0;
+
return is_porta ? master->info->porta_flags :
master->info->portb_flags;
}
@@ -288,21 +307,6 @@ static inline struct fsi_stream *fsi_get_stream(struct fsi_priv *fsi,
return is_play ? &fsi->playback : &fsi->capture;
}
-static int fsi_is_master_mode(struct fsi_priv *fsi, int is_play)
-{
- u32 mode;
- u32 flags = fsi_get_info_flags(fsi);
-
- mode = is_play ? SH_FSI_OUT_SLAVE_MODE : SH_FSI_IN_SLAVE_MODE;
-
- /* return
- * 1 : master mode
- * 0 : slave mode
- */
-
- return (mode & flags) != mode;
-}
-
static u32 fsi_get_port_shift(struct fsi_priv *fsi, int is_play)
{
int is_porta = fsi_is_port_a(fsi);
@@ -357,7 +361,6 @@ static void fsi_stream_pop(struct fsi_priv *fsi, int is_play)
static int fsi_get_fifo_data_num(struct fsi_priv *fsi, int is_play)
{
u32 status;
- struct fsi_stream *io = fsi_get_stream(fsi, is_play);
int data_num;
status = is_play ?
@@ -365,7 +368,7 @@ static int fsi_get_fifo_data_num(struct fsi_priv *fsi, int is_play)
fsi_reg_read(fsi, DIFF_ST);
data_num = 0x1ff & (status >> 8);
- data_num *= io->chan_num;
+ data_num *= fsi->chan_num;
return data_num;
}
@@ -387,7 +390,7 @@ static int fsi_get_frame_width(struct fsi_priv *fsi, int is_play)
struct snd_pcm_substream *substream = io->substream;
struct snd_pcm_runtime *runtime = substream->runtime;
- return frames_to_bytes(runtime, 1) / io->chan_num;
+ return frames_to_bytes(runtime, 1) / fsi->chan_num;
}
static void fsi_count_fifo_err(struct fsi_priv *fsi)
@@ -580,10 +583,10 @@ static void fsi_fifo_init(struct fsi_priv *fsi,
* 7 channels: 32 ( 32 x 7 = 224)
* 8 channels: 32 ( 32 x 8 = 256)
*/
- for (i = 1; i < io->chan_num; i <<= 1)
+ for (i = 1; i < fsi->chan_num; i <<= 1)
io->fifo_max_num >>= 1;
dev_dbg(dai->dev, "%d channel %d store\n",
- io->chan_num, io->fifo_max_num);
+ fsi->chan_num, io->fifo_max_num);
/*
* set interrupt generation factor
@@ -659,7 +662,7 @@ static int fsi_fifo_data_ctrl(struct fsi_priv *fsi, int stream)
* data_num_max : number of FSI fifo free space
* data_num : number of ALSA residue data
*/
- data_num_max = io->fifo_max_num * io->chan_num;
+ data_num_max = io->fifo_max_num * fsi->chan_num;
data_num_max -= fsi_get_fifo_data_num(fsi, is_play);
data_num = data_residue_num;
@@ -754,25 +757,12 @@ static int fsi_dai_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct fsi_priv *fsi = fsi_get_priv(substream);
- struct fsi_master *master = fsi_get_master(fsi);
- struct fsi_stream *io;
u32 flags = fsi_get_info_flags(fsi);
- u32 fmt;
u32 data;
int is_play = fsi_is_play(substream);
- int is_master;
-
- io = fsi_get_stream(fsi, is_play);
pm_runtime_get_sync(dai->dev);
- /* CKG1 */
- data = is_play ? (1 << 0) : (1 << 4);
- is_master = fsi_is_master_mode(fsi, is_play);
- if (is_master)
- fsi_reg_mask_set(fsi, CKG1, data, data);
- else
- fsi_reg_mask_set(fsi, CKG1, data, 0);
/* clock inversion (CKG2) */
data = 0;
@@ -787,54 +777,6 @@ static int fsi_dai_startup(struct snd_pcm_substream *substream,
fsi_reg_write(fsi, CKG2, data);
- /* do fmt, di fmt */
- data = 0;
- fmt = is_play ? SH_FSI_GET_OFMT(flags) : SH_FSI_GET_IFMT(flags);
- switch (fmt) {
- case SH_FSI_FMT_MONO:
- data = CR_MONO;
- io->chan_num = 1;
- break;
- case SH_FSI_FMT_MONO_DELAY:
- data = CR_MONO_D;
- io->chan_num = 1;
- break;
- case SH_FSI_FMT_PCM:
- data = CR_PCM;
- io->chan_num = 2;
- break;
- case SH_FSI_FMT_I2S:
- data = CR_I2S;
- io->chan_num = 2;
- break;
- case SH_FSI_FMT_TDM:
- io->chan_num = is_play ?
- SH_FSI_GET_CH_O(flags) : SH_FSI_GET_CH_I(flags);
- data = CR_TDM | (io->chan_num - 1);
- break;
- case SH_FSI_FMT_TDM_DELAY:
- io->chan_num = is_play ?
- SH_FSI_GET_CH_O(flags) : SH_FSI_GET_CH_I(flags);
- data = CR_TDM_D | (io->chan_num - 1);
- break;
- case SH_FSI_FMT_SPDIF:
- if (master->core->ver < 2) {
- dev_err(dai->dev, "This FSI can not use SPDIF\n");
- return -EINVAL;
- }
- data = CR_BWS_16 | CR_DTMD_SPDIF_PCM | CR_PCM;
- io->chan_num = 2;
- fsi_spdif_clk_ctrl(fsi, 1);
- fsi_reg_mask_set(fsi, OUT_SEL, DMMD, DMMD);
- break;
- default:
- dev_err(dai->dev, "unknown format.\n");
- return -EINVAL;
- }
- is_play ?
- fsi_reg_write(fsi, DO_FMT, data) :
- fsi_reg_write(fsi, DI_FMT, data);
-
/* irq clear */
fsi_irq_disable(fsi, is_play);
fsi_irq_clear_status(fsi);
@@ -851,12 +793,12 @@ static void fsi_dai_shutdown(struct snd_pcm_substream *substream,
struct fsi_priv *fsi = fsi_get_priv(substream);
int is_play = fsi_is_play(substream);
struct fsi_master *master = fsi_get_master(fsi);
- int (*set_rate)(struct device *dev, int is_porta, int rate, int enable);
+ set_rate_func set_rate;
fsi_irq_disable(fsi, is_play);
fsi_clk_ctrl(fsi, 0);
- set_rate = master->info->set_rate;
+ set_rate = fsi_get_info_set_rate(master);
if (set_rate && fsi->rate)
set_rate(dai->dev, fsi_is_port_a(fsi), fsi->rate, 0);
fsi->rate = 0;
@@ -889,18 +831,100 @@ static int fsi_dai_trigger(struct snd_pcm_substream *substream, int cmd,
return ret;
}
+static int fsi_set_fmt_dai(struct fsi_priv *fsi, unsigned int fmt)
+{
+ u32 data = 0;
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ data = CR_I2S;
+ fsi->chan_num = 2;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ data = CR_PCM;
+ fsi->chan_num = 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ fsi_reg_write(fsi, DO_FMT, data);
+ fsi_reg_write(fsi, DI_FMT, data);
+
+ return 0;
+}
+
+static int fsi_set_fmt_spdif(struct fsi_priv *fsi)
+{
+ struct fsi_master *master = fsi_get_master(fsi);
+ u32 data = 0;
+
+ if (master->core->ver < 2)
+ return -EINVAL;
+
+ data = CR_BWS_16 | CR_DTMD_SPDIF_PCM | CR_PCM;
+ fsi->chan_num = 2;
+ fsi_spdif_clk_ctrl(fsi, 1);
+ fsi_reg_mask_set(fsi, OUT_SEL, DMMD, DMMD);
+
+ fsi_reg_write(fsi, DO_FMT, data);
+ fsi_reg_write(fsi, DI_FMT, data);
+
+ return 0;
+}
+
+static int fsi_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct fsi_priv *fsi = fsi_get_priv_frm_dai(dai);
+ u32 flags = fsi_get_info_flags(fsi);
+ u32 data = 0;
+ int ret;
+
+ pm_runtime_get_sync(dai->dev);
+
+ /* set master/slave audio interface */
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ data = DIMD | DOMD;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ default:
+ ret = -EINVAL;
+ goto set_fmt_exit;
+ }
+ fsi_reg_mask_set(fsi, CKG1, (DIMD | DOMD), data);
+
+ /* set format */
+ switch (flags & SH_FSI_FMT_MASK) {
+ case SH_FSI_FMT_DAI:
+ ret = fsi_set_fmt_dai(fsi, fmt & SND_SOC_DAIFMT_FORMAT_MASK);
+ break;
+ case SH_FSI_FMT_SPDIF:
+ ret = fsi_set_fmt_spdif(fsi);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+set_fmt_exit:
+ pm_runtime_put_sync(dai->dev);
+
+ return ret;
+}
+
static int fsi_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct fsi_priv *fsi = fsi_get_priv(substream);
struct fsi_master *master = fsi_get_master(fsi);
- int (*set_rate)(struct device *dev, int is_porta, int rate, int enable);
+ set_rate_func set_rate;
int fsi_ver = master->core->ver;
long rate = params_rate(params);
int ret;
- set_rate = master->info->set_rate;
+ set_rate = fsi_get_info_set_rate(master);
if (!set_rate)
return 0;
@@ -975,6 +999,7 @@ static struct snd_soc_dai_ops fsi_dai_ops = {
.startup = fsi_dai_startup,
.shutdown = fsi_dai_shutdown,
.trigger = fsi_dai_trigger,
+ .set_fmt = fsi_dai_set_fmt,
.hw_params = fsi_dai_hw_params,
};
diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c
index 8c2a21a978ac..5d76da43b14c 100644
--- a/sound/soc/soc-cache.c
+++ b/sound/soc/soc-cache.c
@@ -18,6 +18,8 @@
#include <linux/bitmap.h>
#include <linux/rbtree.h>
+#include <trace/events/asoc.h>
+
static unsigned int snd_soc_4_12_read(struct snd_soc_codec *codec,
unsigned int reg)
{
@@ -25,7 +27,8 @@ static unsigned int snd_soc_4_12_read(struct snd_soc_codec *codec,
unsigned int val;
if (reg >= codec->driver->reg_cache_size ||
- snd_soc_codec_volatile_register(codec, reg)) {
+ snd_soc_codec_volatile_register(codec, reg) ||
+ codec->cache_bypass) {
if (codec->cache_only)
return -1;
@@ -49,7 +52,8 @@ static int snd_soc_4_12_write(struct snd_soc_codec *codec, unsigned int reg,
data[1] = value & 0x00ff;
if (!snd_soc_codec_volatile_register(codec, reg) &&
- reg < codec->driver->reg_cache_size) {
+ reg < codec->driver->reg_cache_size &&
+ !codec->cache_bypass) {
ret = snd_soc_cache_write(codec, reg, value);
if (ret < 0)
return -1;
@@ -106,7 +110,8 @@ static unsigned int snd_soc_7_9_read(struct snd_soc_codec *codec,
unsigned int val;
if (reg >= codec->driver->reg_cache_size ||
- snd_soc_codec_volatile_register(codec, reg)) {
+ snd_soc_codec_volatile_register(codec, reg) ||
+ codec->cache_bypass) {
if (codec->cache_only)
return -1;
@@ -130,7 +135,8 @@ static int snd_soc_7_9_write(struct snd_soc_codec *codec, unsigned int reg,
data[1] = value & 0x00ff;
if (!snd_soc_codec_volatile_register(codec, reg) &&
- reg < codec->driver->reg_cache_size) {
+ reg < codec->driver->reg_cache_size &&
+ !codec->cache_bypass) {
ret = snd_soc_cache_write(codec, reg, value);
if (ret < 0)
return -1;
@@ -191,7 +197,8 @@ static int snd_soc_8_8_write(struct snd_soc_codec *codec, unsigned int reg,
data[1] = value & 0xff;
if (!snd_soc_codec_volatile_register(codec, reg) &&
- reg < codec->driver->reg_cache_size) {
+ reg < codec->driver->reg_cache_size &&
+ !codec->cache_bypass) {
ret = snd_soc_cache_write(codec, reg, value);
if (ret < 0)
return -1;
@@ -216,7 +223,8 @@ static unsigned int snd_soc_8_8_read(struct snd_soc_codec *codec,
reg &= 0xff;
if (reg >= codec->driver->reg_cache_size ||
- snd_soc_codec_volatile_register(codec, reg)) {
+ snd_soc_codec_volatile_register(codec, reg) ||
+ codec->cache_bypass) {
if (codec->cache_only)
return -1;
@@ -271,7 +279,8 @@ static int snd_soc_8_16_write(struct snd_soc_codec *codec, unsigned int reg,
data[2] = value & 0xff;
if (!snd_soc_codec_volatile_register(codec, reg) &&
- reg < codec->driver->reg_cache_size) {
+ reg < codec->driver->reg_cache_size &&
+ !codec->cache_bypass) {
ret = snd_soc_cache_write(codec, reg, value);
if (ret < 0)
return -1;
@@ -295,7 +304,8 @@ static unsigned int snd_soc_8_16_read(struct snd_soc_codec *codec,
unsigned int val;
if (reg >= codec->driver->reg_cache_size ||
- snd_soc_codec_volatile_register(codec, reg)) {
+ snd_soc_codec_volatile_register(codec, reg) ||
+ codec->cache_bypass) {
if (codec->cache_only)
return -1;
@@ -450,7 +460,8 @@ static unsigned int snd_soc_16_8_read(struct snd_soc_codec *codec,
reg &= 0xff;
if (reg >= codec->driver->reg_cache_size ||
- snd_soc_codec_volatile_register(codec, reg)) {
+ snd_soc_codec_volatile_register(codec, reg) ||
+ codec->cache_bypass) {
if (codec->cache_only)
return -1;
@@ -476,7 +487,8 @@ static int snd_soc_16_8_write(struct snd_soc_codec *codec, unsigned int reg,
reg &= 0xff;
if (!snd_soc_codec_volatile_register(codec, reg) &&
- reg < codec->driver->reg_cache_size) {
+ reg < codec->driver->reg_cache_size &&
+ !codec->cache_bypass) {
ret = snd_soc_cache_write(codec, reg, value);
if (ret < 0)
return -1;
@@ -568,7 +580,8 @@ static unsigned int snd_soc_16_16_read(struct snd_soc_codec *codec,
unsigned int val;
if (reg >= codec->driver->reg_cache_size ||
- snd_soc_codec_volatile_register(codec, reg)) {
+ snd_soc_codec_volatile_register(codec, reg) ||
+ codec->cache_bypass) {
if (codec->cache_only)
return -1;
@@ -595,7 +608,8 @@ static int snd_soc_16_16_write(struct snd_soc_codec *codec, unsigned int reg,
data[3] = value & 0xff;
if (!snd_soc_codec_volatile_register(codec, reg) &&
- reg < codec->driver->reg_cache_size) {
+ reg < codec->driver->reg_cache_size &&
+ !codec->cache_bypass) {
ret = snd_soc_cache_write(codec, reg, value);
if (ret < 0)
return -1;
@@ -761,6 +775,49 @@ int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec,
}
EXPORT_SYMBOL_GPL(snd_soc_codec_set_cache_io);
+static bool snd_soc_set_cache_val(void *base, unsigned int idx,
+ unsigned int val, unsigned int word_size)
+{
+ switch (word_size) {
+ case 1: {
+ u8 *cache = base;
+ if (cache[idx] == val)
+ return true;
+ cache[idx] = val;
+ break;
+ }
+ case 2: {
+ u16 *cache = base;
+ if (cache[idx] == val)
+ return true;
+ cache[idx] = val;
+ break;
+ }
+ default:
+ BUG();
+ }
+ return false;
+}
+
+static unsigned int snd_soc_get_cache_val(const void *base, unsigned int idx,
+ unsigned int word_size)
+{
+ switch (word_size) {
+ case 1: {
+ const u8 *cache = base;
+ return cache[idx];
+ }
+ case 2: {
+ const u16 *cache = base;
+ return cache[idx];
+ }
+ default:
+ BUG();
+ }
+ /* unreachable */
+ return -1;
+}
+
struct snd_soc_rbtree_node {
struct rb_node node;
unsigned int reg;
@@ -835,7 +892,9 @@ static int snd_soc_rbtree_cache_sync(struct snd_soc_codec *codec)
ret = snd_soc_cache_read(codec, rbnode->reg, &val);
if (ret)
return ret;
+ codec->cache_bypass = 1;
ret = snd_soc_write(codec, rbnode->reg, val);
+ codec->cache_bypass = 0;
if (ret)
return ret;
dev_dbg(codec->dev, "Synced register %#x, value = %#x\n",
@@ -924,7 +983,12 @@ static int snd_soc_rbtree_cache_exit(struct snd_soc_codec *codec)
static int snd_soc_rbtree_cache_init(struct snd_soc_codec *codec)
{
+ struct snd_soc_rbtree_node *rbtree_node;
struct snd_soc_rbtree_ctx *rbtree_ctx;
+ unsigned int val;
+ unsigned int word_size;
+ int i;
+ int ret;
codec->reg_cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
if (!codec->reg_cache)
@@ -936,53 +1000,25 @@ static int snd_soc_rbtree_cache_init(struct snd_soc_codec *codec)
if (!codec->reg_def_copy)
return 0;
-/*
- * populate the rbtree with the initialized registers. All other
- * registers will be inserted into the tree when they are first written.
- *
- * The reasoning behind this, is that we need to step through and
- * dereference the cache in u8/u16 increments without sacrificing
- * portability. This could also be done using memcpy() but that would
- * be slightly more cryptic.
- */
-#define snd_soc_rbtree_populate(cache) \
-({ \
- int ret, i; \
- struct snd_soc_rbtree_node *rbtree_node; \
- \
- ret = 0; \
- cache = codec->reg_def_copy; \
- for (i = 0; i < codec->driver->reg_cache_size; ++i) { \
- if (!cache[i]) \
- continue; \
- rbtree_node = kzalloc(sizeof *rbtree_node, GFP_KERNEL); \
- if (!rbtree_node) { \
- ret = -ENOMEM; \
- snd_soc_cache_exit(codec); \
- break; \
- } \
- rbtree_node->reg = i; \
- rbtree_node->value = cache[i]; \
- rbtree_node->defval = cache[i]; \
- snd_soc_rbtree_insert(&rbtree_ctx->root, \
- rbtree_node); \
- } \
- ret; \
-})
-
- switch (codec->driver->reg_word_size) {
- case 1: {
- const u8 *cache;
-
- return snd_soc_rbtree_populate(cache);
- }
- case 2: {
- const u16 *cache;
-
- return snd_soc_rbtree_populate(cache);
- }
- default:
- BUG();
+ /*
+ * populate the rbtree with the initialized registers. All other
+ * registers will be inserted when they are first modified.
+ */
+ word_size = codec->driver->reg_word_size;
+ for (i = 0; i < codec->driver->reg_cache_size; ++i) {
+ val = snd_soc_get_cache_val(codec->reg_def_copy, i, word_size);
+ if (!val)
+ continue;
+ rbtree_node = kzalloc(sizeof *rbtree_node, GFP_KERNEL);
+ if (!rbtree_node) {
+ ret = -ENOMEM;
+ snd_soc_cache_exit(codec);
+ break;
+ }
+ rbtree_node->reg = i;
+ rbtree_node->value = val;
+ rbtree_node->defval = val;
+ snd_soc_rbtree_insert(&rbtree_ctx->root, rbtree_node);
}
return 0;
@@ -1080,34 +1116,28 @@ static inline int snd_soc_lzo_get_blkindex(struct snd_soc_codec *codec,
unsigned int reg)
{
const struct snd_soc_codec_driver *codec_drv;
- size_t reg_size;
codec_drv = codec->driver;
- reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size;
return (reg * codec_drv->reg_word_size) /
- DIV_ROUND_UP(reg_size, snd_soc_lzo_block_count());
+ DIV_ROUND_UP(codec->reg_size, snd_soc_lzo_block_count());
}
static inline int snd_soc_lzo_get_blkpos(struct snd_soc_codec *codec,
unsigned int reg)
{
const struct snd_soc_codec_driver *codec_drv;
- size_t reg_size;
codec_drv = codec->driver;
- reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size;
- return reg % (DIV_ROUND_UP(reg_size, snd_soc_lzo_block_count()) /
+ return reg % (DIV_ROUND_UP(codec->reg_size, snd_soc_lzo_block_count()) /
codec_drv->reg_word_size);
}
static inline int snd_soc_lzo_get_blksize(struct snd_soc_codec *codec)
{
const struct snd_soc_codec_driver *codec_drv;
- size_t reg_size;
codec_drv = codec->driver;
- reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size;
- return DIV_ROUND_UP(reg_size, snd_soc_lzo_block_count());
+ return DIV_ROUND_UP(codec->reg_size, snd_soc_lzo_block_count());
}
static int snd_soc_lzo_cache_sync(struct snd_soc_codec *codec)
@@ -1122,7 +1152,9 @@ static int snd_soc_lzo_cache_sync(struct snd_soc_codec *codec)
ret = snd_soc_cache_read(codec, i, &val);
if (ret)
return ret;
+ codec->cache_bypass = 1;
ret = snd_soc_write(codec, i, val);
+ codec->cache_bypass = 0;
if (ret)
return ret;
dev_dbg(codec->dev, "Synced register %#x, value = %#x\n",
@@ -1165,29 +1197,10 @@ static int snd_soc_lzo_cache_write(struct snd_soc_codec *codec,
}
/* write the new value to the cache */
- switch (codec->driver->reg_word_size) {
- case 1: {
- u8 *cache;
- cache = lzo_block->dst;
- if (cache[blkpos] == value) {
- kfree(lzo_block->dst);
- goto out;
- }
- cache[blkpos] = value;
- }
- break;
- case 2: {
- u16 *cache;
- cache = lzo_block->dst;
- if (cache[blkpos] == value) {
- kfree(lzo_block->dst);
- goto out;
- }
- cache[blkpos] = value;
- }
- break;
- default:
- BUG();
+ if (snd_soc_set_cache_val(lzo_block->dst, blkpos, value,
+ codec->driver->reg_word_size)) {
+ kfree(lzo_block->dst);
+ goto out;
}
/* prepare the source to be the decompressed block */
@@ -1241,25 +1254,10 @@ static int snd_soc_lzo_cache_read(struct snd_soc_codec *codec,
/* decompress the block */
ret = snd_soc_lzo_decompress_cache_block(codec, lzo_block);
- if (ret >= 0) {
+ if (ret >= 0)
/* fetch the value from the cache */
- switch (codec->driver->reg_word_size) {
- case 1: {
- u8 *cache;
- cache = lzo_block->dst;
- *value = cache[blkpos];
- }
- break;
- case 2: {
- u16 *cache;
- cache = lzo_block->dst;
- *value = cache[blkpos];
- }
- break;
- default:
- BUG();
- }
- }
+ *value = snd_soc_get_cache_val(lzo_block->dst, blkpos,
+ codec->driver->reg_word_size);
kfree(lzo_block->dst);
/* restore the pointer and length of the compressed block */
@@ -1301,7 +1299,7 @@ static int snd_soc_lzo_cache_exit(struct snd_soc_codec *codec)
static int snd_soc_lzo_cache_init(struct snd_soc_codec *codec)
{
struct snd_soc_lzo_ctx **lzo_blocks;
- size_t reg_size, bmp_size;
+ size_t bmp_size;
const struct snd_soc_codec_driver *codec_drv;
int ret, tofree, i, blksize, blkcount;
const char *p, *end;
@@ -1309,7 +1307,6 @@ static int snd_soc_lzo_cache_init(struct snd_soc_codec *codec)
ret = 0;
codec_drv = codec->driver;
- reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size;
/*
* If we have not been given a default register cache
@@ -1321,8 +1318,7 @@ static int snd_soc_lzo_cache_init(struct snd_soc_codec *codec)
tofree = 1;
if (!codec->reg_def_copy) {
- codec->reg_def_copy = kzalloc(reg_size,
- GFP_KERNEL);
+ codec->reg_def_copy = kzalloc(codec->reg_size, GFP_KERNEL);
if (!codec->reg_def_copy)
return -ENOMEM;
}
@@ -1370,7 +1366,7 @@ static int snd_soc_lzo_cache_init(struct snd_soc_codec *codec)
blksize = snd_soc_lzo_get_blksize(codec);
p = codec->reg_def_copy;
- end = codec->reg_def_copy + reg_size;
+ end = codec->reg_def_copy + codec->reg_size;
/* compress the register map and fill the lzo blocks */
for (i = 0; i < blkcount; ++i, p += blksize) {
lzo_blocks[i]->src = p;
@@ -1414,28 +1410,10 @@ static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec)
ret = snd_soc_cache_read(codec, i, &val);
if (ret)
return ret;
- if (codec_drv->reg_cache_default) {
- switch (codec_drv->reg_word_size) {
- case 1: {
- const u8 *cache;
-
- cache = codec_drv->reg_cache_default;
- if (cache[i] == val)
- continue;
- }
- break;
- case 2: {
- const u16 *cache;
-
- cache = codec_drv->reg_cache_default;
- if (cache[i] == val)
- continue;
- }
- break;
- default:
- BUG();
- }
- }
+ if (codec->reg_def_copy)
+ if (snd_soc_get_cache_val(codec->reg_def_copy,
+ i, codec_drv->reg_word_size) == val)
+ continue;
ret = snd_soc_write(codec, i, val);
if (ret)
return ret;
@@ -1448,50 +1426,16 @@ static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec)
static int snd_soc_flat_cache_write(struct snd_soc_codec *codec,
unsigned int reg, unsigned int value)
{
- switch (codec->driver->reg_word_size) {
- case 1: {
- u8 *cache;
-
- cache = codec->reg_cache;
- cache[reg] = value;
- }
- break;
- case 2: {
- u16 *cache;
-
- cache = codec->reg_cache;
- cache[reg] = value;
- }
- break;
- default:
- BUG();
- }
-
+ snd_soc_set_cache_val(codec->reg_cache, reg, value,
+ codec->driver->reg_word_size);
return 0;
}
static int snd_soc_flat_cache_read(struct snd_soc_codec *codec,
unsigned int reg, unsigned int *value)
{
- switch (codec->driver->reg_word_size) {
- case 1: {
- u8 *cache;
-
- cache = codec->reg_cache;
- *value = cache[reg];
- }
- break;
- case 2: {
- u16 *cache;
-
- cache = codec->reg_cache;
- *value = cache[reg];
- }
- break;
- default:
- BUG();
- }
-
+ *value = snd_soc_get_cache_val(codec->reg_cache, reg,
+ codec->driver->reg_word_size);
return 0;
}
@@ -1507,24 +1451,14 @@ static int snd_soc_flat_cache_exit(struct snd_soc_codec *codec)
static int snd_soc_flat_cache_init(struct snd_soc_codec *codec)
{
const struct snd_soc_codec_driver *codec_drv;
- size_t reg_size;
codec_drv = codec->driver;
- reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size;
- /*
- * for flat compression, we don't need to keep a copy of the
- * original defaults register cache as it will definitely not
- * be marked as __devinitconst
- */
- kfree(codec->reg_def_copy);
- codec->reg_def_copy = NULL;
-
- if (codec_drv->reg_cache_default)
- codec->reg_cache = kmemdup(codec_drv->reg_cache_default,
- reg_size, GFP_KERNEL);
+ if (codec->reg_def_copy)
+ codec->reg_cache = kmemdup(codec->reg_def_copy,
+ codec->reg_size, GFP_KERNEL);
else
- codec->reg_cache = kzalloc(reg_size, GFP_KERNEL);
+ codec->reg_cache = kzalloc(codec->reg_size, GFP_KERNEL);
if (!codec->reg_cache)
return -ENOMEM;
@@ -1669,21 +1603,77 @@ EXPORT_SYMBOL_GPL(snd_soc_cache_write);
int snd_soc_cache_sync(struct snd_soc_codec *codec)
{
int ret;
+ const char *name;
if (!codec->cache_sync) {
return 0;
}
- if (codec->cache_ops && codec->cache_ops->sync) {
- if (codec->cache_ops->name)
- dev_dbg(codec->dev, "Syncing %s cache for %s codec\n",
- codec->cache_ops->name, codec->name);
- ret = codec->cache_ops->sync(codec);
- if (!ret)
- codec->cache_sync = 0;
- return ret;
- }
+ if (!codec->cache_ops || !codec->cache_ops->sync)
+ return -EINVAL;
- return -EINVAL;
+ if (codec->cache_ops->name)
+ name = codec->cache_ops->name;
+ else
+ name = "unknown";
+
+ if (codec->cache_ops->name)
+ dev_dbg(codec->dev, "Syncing %s cache for %s codec\n",
+ codec->cache_ops->name, codec->name);
+ trace_snd_soc_cache_sync(codec, name, "start");
+ ret = codec->cache_ops->sync(codec);
+ if (!ret)
+ codec->cache_sync = 0;
+ trace_snd_soc_cache_sync(codec, name, "end");
+ return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_cache_sync);
+
+static int snd_soc_get_reg_access_index(struct snd_soc_codec *codec,
+ unsigned int reg)
+{
+ const struct snd_soc_codec_driver *codec_drv;
+ unsigned int min, max, index;
+
+ codec_drv = codec->driver;
+ min = 0;
+ max = codec_drv->reg_access_size - 1;
+ do {
+ index = (min + max) / 2;
+ if (codec_drv->reg_access_default[index].reg == reg)
+ return index;
+ if (codec_drv->reg_access_default[index].reg < reg)
+ min = index + 1;
+ else
+ max = index;
+ } while (min <= max);
+ return -1;
+}
+
+int snd_soc_default_volatile_register(struct snd_soc_codec *codec,
+ unsigned int reg)
+{
+ int index;
+
+ if (reg >= codec->driver->reg_cache_size)
+ return 1;
+ index = snd_soc_get_reg_access_index(codec, reg);
+ if (index < 0)
+ return 0;
+ return codec->driver->reg_access_default[index].vol;
+}
+EXPORT_SYMBOL_GPL(snd_soc_default_volatile_register);
+
+int snd_soc_default_readable_register(struct snd_soc_codec *codec,
+ unsigned int reg)
+{
+ int index;
+
+ if (reg >= codec->driver->reg_cache_size)
+ return 1;
+ index = snd_soc_get_reg_access_index(codec, reg);
+ if (index < 0)
+ return 0;
+ return codec->driver->reg_access_default[index].read;
+}
+EXPORT_SYMBOL_GPL(snd_soc_default_readable_register);
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index c3f6f1e72790..64befac3f9c3 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -48,7 +48,8 @@ static DEFINE_MUTEX(pcm_mutex);
static DECLARE_WAIT_QUEUE_HEAD(soc_pm_waitq);
#ifdef CONFIG_DEBUG_FS
-static struct dentry *debugfs_root;
+struct dentry *snd_soc_debugfs_root;
+EXPORT_SYMBOL_GPL(snd_soc_debugfs_root);
#endif
static DEFINE_MUTEX(client_mutex);
@@ -57,8 +58,6 @@ static LIST_HEAD(dai_list);
static LIST_HEAD(platform_list);
static LIST_HEAD(codec_list);
-static int snd_soc_register_card(struct snd_soc_card *card);
-static int snd_soc_unregister_card(struct snd_soc_card *card);
static int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num);
/*
@@ -70,10 +69,73 @@ static int pmdown_time = 5000;
module_param(pmdown_time, int, 0);
MODULE_PARM_DESC(pmdown_time, "DAPM stream powerdown time (msecs)");
+/* returns the minimum number of bytes needed to represent
+ * a particular given value */
+static int min_bytes_needed(unsigned long val)
+{
+ int c = 0;
+ int i;
+
+ for (i = (sizeof val * 8) - 1; i >= 0; --i, ++c)
+ if (val & (1UL << i))
+ break;
+ c = (sizeof val * 8) - c;
+ if (!c || (c % 8))
+ c = (c + 8) / 8;
+ else
+ c /= 8;
+ return c;
+}
+
+/* fill buf which is 'len' bytes with a formatted
+ * string of the form 'reg: value\n' */
+static int format_register_str(struct snd_soc_codec *codec,
+ unsigned int reg, char *buf, size_t len)
+{
+ int wordsize = codec->driver->reg_word_size * 2;
+ int regsize = min_bytes_needed(codec->driver->reg_cache_size) * 2;
+ int ret;
+ char tmpbuf[len + 1];
+ char regbuf[regsize + 1];
+
+ /* since tmpbuf is allocated on the stack, warn the callers if they
+ * try to abuse this function */
+ WARN_ON(len > 63);
+
+ /* +2 for ': ' and + 1 for '\n' */
+ if (wordsize + regsize + 2 + 1 != len)
+ return -EINVAL;
+
+ ret = snd_soc_read(codec , reg);
+ if (ret < 0) {
+ memset(regbuf, 'X', regsize);
+ regbuf[regsize] = '\0';
+ } else {
+ snprintf(regbuf, regsize + 1, "%.*x", regsize, ret);
+ }
+
+ /* prepare the buffer */
+ snprintf(tmpbuf, len + 1, "%.*x: %s\n", wordsize, reg, regbuf);
+ /* copy it back to the caller without the '\0' */
+ memcpy(buf, tmpbuf, len);
+
+ return 0;
+}
+
/* codec register dump */
-static ssize_t soc_codec_reg_show(struct snd_soc_codec *codec, char *buf)
+static ssize_t soc_codec_reg_show(struct snd_soc_codec *codec, char *buf,
+ size_t count, loff_t pos)
{
- int ret, i, step = 1, count = 0;
+ int i, step = 1;
+ int wordsize, regsize;
+ int len;
+ size_t total = 0;
+ loff_t p = 0;
+
+ wordsize = codec->driver->reg_word_size * 2;
+ regsize = min_bytes_needed(codec->driver->reg_cache_size) * 2;
+
+ len = wordsize + regsize + 2 + 1;
if (!codec->driver->reg_cache_size)
return 0;
@@ -81,55 +143,37 @@ static ssize_t soc_codec_reg_show(struct snd_soc_codec *codec, char *buf)
if (codec->driver->reg_cache_step)
step = codec->driver->reg_cache_step;
- count += sprintf(buf, "%s registers\n", codec->name);
for (i = 0; i < codec->driver->reg_cache_size; i += step) {
- if (codec->driver->readable_register && !codec->driver->readable_register(i))
+ if (codec->readable_register && !codec->readable_register(codec, i))
continue;
-
- count += sprintf(buf + count, "%2x: ", i);
- if (count >= PAGE_SIZE - 1)
- break;
-
if (codec->driver->display_register) {
count += codec->driver->display_register(codec, buf + count,
PAGE_SIZE - count, i);
} else {
- /* If the read fails it's almost certainly due to
- * the register being volatile and the device being
- * powered off.
- */
- ret = snd_soc_read(codec, i);
- if (ret >= 0)
- count += snprintf(buf + count,
- PAGE_SIZE - count,
- "%4x", ret);
- else
- count += snprintf(buf + count,
- PAGE_SIZE - count,
- "<no data: %d>", ret);
+ /* only support larger than PAGE_SIZE bytes debugfs
+ * entries for the default case */
+ if (p >= pos) {
+ if (total + len >= count - 1)
+ break;
+ format_register_str(codec, i, buf + total, len);
+ total += len;
+ }
+ p += len;
}
-
- if (count >= PAGE_SIZE - 1)
- break;
-
- count += snprintf(buf + count, PAGE_SIZE - count, "\n");
- if (count >= PAGE_SIZE - 1)
- break;
}
- /* Truncate count; min() would cause a warning */
- if (count >= PAGE_SIZE)
- count = PAGE_SIZE - 1;
+ total = min(total, count - 1);
- return count;
+ return total;
}
+
static ssize_t codec_reg_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct snd_soc_pcm_runtime *rtd =
container_of(dev, struct snd_soc_pcm_runtime, dev);
- return soc_codec_reg_show(rtd->codec, buf);
+ return soc_codec_reg_show(rtd->codec, buf, PAGE_SIZE, 0);
}
static DEVICE_ATTR(codec_reg, 0444, codec_reg_show, NULL);
@@ -168,16 +212,28 @@ static int codec_reg_open_file(struct inode *inode, struct file *file)
}
static ssize_t codec_reg_read_file(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
ssize_t ret;
struct snd_soc_codec *codec = file->private_data;
- char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ char *buf;
+
+ if (*ppos < 0 || !count)
+ return -EINVAL;
+
+ buf = kmalloc(count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- ret = soc_codec_reg_show(codec, buf);
- if (ret >= 0)
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
+
+ ret = soc_codec_reg_show(codec, buf, count, *ppos);
+ if (ret >= 0) {
+ if (copy_to_user(user_buf, buf, ret)) {
+ kfree(buf);
+ return -EFAULT;
+ }
+ *ppos += ret;
+ }
+
kfree(buf);
return ret;
}
@@ -209,6 +265,10 @@ static ssize_t codec_reg_write_file(struct file *file,
start++;
if (strict_strtoul(start, 16, &value))
return -EINVAL;
+
+ /* Userspace has been fiddling around behind the kernel's back */
+ add_taint(TAINT_USER);
+
snd_soc_write(codec, reg, value);
return buf_size;
}
@@ -232,6 +292,11 @@ static void soc_init_codec_debugfs(struct snd_soc_codec *codec)
return;
}
+ debugfs_create_bool("cache_sync", 0444, codec->debugfs_codec_root,
+ &codec->cache_sync);
+ debugfs_create_bool("cache_only", 0444, codec->debugfs_codec_root,
+ &codec->cache_only);
+
codec->debugfs_reg = debugfs_create_file("codec_reg", 0644,
codec->debugfs_codec_root,
codec, &codec_reg_fops);
@@ -356,7 +421,7 @@ static const struct file_operations platform_list_fops = {
static void soc_init_card_debugfs(struct snd_soc_card *card)
{
card->debugfs_card_root = debugfs_create_dir(card->name,
- debugfs_root);
+ snd_soc_debugfs_root);
if (!card->debugfs_card_root) {
dev_warn(card->dev,
"ASoC: Failed to create codec debugfs directory\n");
@@ -962,12 +1027,11 @@ static struct snd_pcm_ops soc_pcm_ops = {
.pointer = soc_pcm_pointer,
};
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
/* powers down audio subsystem for suspend */
-static int soc_suspend(struct device *dev)
+int snd_soc_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct snd_soc_card *card = platform_get_drvdata(pdev);
+ struct snd_soc_card *card = dev_get_drvdata(dev);
struct snd_soc_codec *codec;
int i;
@@ -1008,7 +1072,7 @@ static int soc_suspend(struct device *dev)
}
if (card->suspend_pre)
- card->suspend_pre(pdev, PMSG_SUSPEND);
+ card->suspend_pre(card);
for (i = 0; i < card->num_rtd; i++) {
struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai;
@@ -1075,10 +1139,11 @@ static int soc_suspend(struct device *dev)
}
if (card->suspend_post)
- card->suspend_post(pdev, PMSG_SUSPEND);
+ card->suspend_post(card);
return 0;
}
+EXPORT_SYMBOL_GPL(snd_soc_suspend);
/* deferred resume work, so resume can complete before we finished
* setting our codec back up, which can be very slow on I2C
@@ -1087,7 +1152,6 @@ static void soc_resume_deferred(struct work_struct *work)
{
struct snd_soc_card *card =
container_of(work, struct snd_soc_card, deferred_resume_work);
- struct platform_device *pdev = to_platform_device(card->dev);
struct snd_soc_codec *codec;
int i;
@@ -1101,7 +1165,7 @@ static void soc_resume_deferred(struct work_struct *work)
snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D2);
if (card->resume_pre)
- card->resume_pre(pdev);
+ card->resume_pre(card);
/* resume AC97 DAIs */
for (i = 0; i < card->num_rtd; i++) {
@@ -1176,7 +1240,7 @@ static void soc_resume_deferred(struct work_struct *work)
}
if (card->resume_post)
- card->resume_post(pdev);
+ card->resume_post(card);
dev_dbg(card->dev, "resume work completed\n");
@@ -1185,10 +1249,9 @@ static void soc_resume_deferred(struct work_struct *work)
}
/* powers up audio subsystem after a suspend */
-static int soc_resume(struct device *dev)
+int snd_soc_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct snd_soc_card *card = platform_get_drvdata(pdev);
+ struct snd_soc_card *card = dev_get_drvdata(dev);
int i;
/* AC97 devices might have other drivers hanging off them so
@@ -1210,9 +1273,10 @@ static int soc_resume(struct device *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(snd_soc_resume);
#else
-#define soc_suspend NULL
-#define soc_resume NULL
+#define snd_soc_suspend NULL
+#define snd_soc_resume NULL
#endif
static struct snd_soc_dai_ops null_dai_ops = {
@@ -1405,26 +1469,31 @@ static int soc_probe_codec(struct snd_soc_card *card,
codec->dapm.card = card;
soc_set_name_prefix(card, codec);
+ if (!try_module_get(codec->dev->driver->owner))
+ return -ENODEV;
+
if (codec->driver->probe) {
ret = codec->driver->probe(codec);
if (ret < 0) {
dev_err(codec->dev,
"asoc: failed to probe CODEC %s: %d\n",
codec->name, ret);
- return ret;
+ goto err_probe;
}
}
soc_init_codec_debugfs(codec);
/* mark codec as probed and add to card codec list */
- if (!try_module_get(codec->dev->driver->owner))
- return -ENODEV;
-
codec->probed = 1;
list_add(&codec->card_list, &card->codec_dev_list);
list_add(&codec->dapm.list, &card->dapm_list);
+ return 0;
+
+err_probe:
+ module_put(codec->dev->driver->owner);
+
return ret;
}
@@ -1468,7 +1537,6 @@ static int soc_post_component_init(struct snd_soc_card *card,
/* Make sure all DAPM widgets are instantiated */
snd_soc_dapm_new_widgets(&codec->dapm);
- snd_soc_dapm_sync(&codec->dapm);
/* register the rtd device */
rtd->codec = codec;
@@ -1543,19 +1611,19 @@ static int soc_probe_dai_link(struct snd_soc_card *card, int num)
/* probe the platform */
if (!platform->probed) {
+ if (!try_module_get(platform->dev->driver->owner))
+ return -ENODEV;
+
if (platform->driver->probe) {
ret = platform->driver->probe(platform);
if (ret < 0) {
printk(KERN_ERR "asoc: failed to probe platform %s\n",
platform->name);
+ module_put(platform->dev->driver->owner);
return ret;
}
}
/* mark platform as probed and add to card platform list */
-
- if (!try_module_get(platform->dev->driver->owner))
- return -ENODEV;
-
platform->probed = 1;
list_add(&platform->card_list, &card->platform_dev_list);
}
@@ -1713,7 +1781,6 @@ static int snd_soc_init_codec_cache(struct snd_soc_codec *codec,
static void snd_soc_instantiate_card(struct snd_soc_card *card)
{
- struct platform_device *pdev = to_platform_device(card->dev);
struct snd_soc_codec *codec;
struct snd_soc_codec_conf *codec_conf;
enum snd_soc_compress_type compress_type;
@@ -1740,6 +1807,8 @@ static void snd_soc_instantiate_card(struct snd_soc_card *card)
list_for_each_entry(codec, &codec_list, list) {
if (codec->cache_init)
continue;
+ /* by default we don't override the compress_type */
+ compress_type = 0;
/* check to see if we need to override the compress_type */
for (i = 0; i < card->num_configs; ++i) {
codec_conf = &card->codec_conf[i];
@@ -1750,18 +1819,6 @@ static void snd_soc_instantiate_card(struct snd_soc_card *card)
break;
}
}
- if (i == card->num_configs) {
- /* no need to override the compress_type so
- * go ahead and do the standard thing */
- ret = snd_soc_init_codec_cache(codec, 0);
- if (ret < 0) {
- mutex_unlock(&card->mutex);
- return;
- }
- continue;
- }
- /* override the compress_type with the one supplied in
- * the machine driver */
ret = snd_soc_init_codec_cache(codec, compress_type);
if (ret < 0) {
mutex_unlock(&card->mutex);
@@ -1780,14 +1837,14 @@ static void snd_soc_instantiate_card(struct snd_soc_card *card)
}
card->snd_card->dev = card->dev;
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
/* deferred resume work */
INIT_WORK(&card->deferred_resume_work, soc_resume_deferred);
#endif
/* initialise the sound card only once */
if (card->probe) {
- ret = card->probe(pdev);
+ ret = card->probe(card);
if (ret < 0)
goto card_probe_error;
}
@@ -1848,7 +1905,7 @@ probe_dai_err:
card_probe_error:
if (card->remove)
- card->remove(pdev);
+ card->remove(card);
snd_card_free(card->snd_card);
@@ -1872,16 +1929,15 @@ static int soc_probe(struct platform_device *pdev)
struct snd_soc_card *card = platform_get_drvdata(pdev);
int ret = 0;
+ /*
+ * no card, so machine driver should be registering card
+ * we should not be here in that case so ret error
+ */
+ if (!card)
+ return -EINVAL;
+
/* Bodge while we unpick instantiation */
card->dev = &pdev->dev;
- INIT_LIST_HEAD(&card->dai_dev_list);
- INIT_LIST_HEAD(&card->codec_dev_list);
- INIT_LIST_HEAD(&card->platform_dev_list);
- INIT_LIST_HEAD(&card->widgets);
- INIT_LIST_HEAD(&card->paths);
- INIT_LIST_HEAD(&card->dapm_list);
-
- soc_init_card_debugfs(card);
ret = snd_soc_register_card(card);
if (ret != 0) {
@@ -1892,45 +1948,48 @@ static int soc_probe(struct platform_device *pdev)
return 0;
}
-/* removes a socdev */
-static int soc_remove(struct platform_device *pdev)
+static int soc_cleanup_card_resources(struct snd_soc_card *card)
{
- struct snd_soc_card *card = platform_get_drvdata(pdev);
int i;
- if (card->instantiated) {
+ /* make sure any delayed work runs */
+ for (i = 0; i < card->num_rtd; i++) {
+ struct snd_soc_pcm_runtime *rtd = &card->rtd[i];
+ flush_delayed_work_sync(&rtd->delayed_work);
+ }
+
+ /* remove auxiliary devices */
+ for (i = 0; i < card->num_aux_devs; i++)
+ soc_remove_aux_dev(card, i);
- /* make sure any delayed work runs */
- for (i = 0; i < card->num_rtd; i++) {
- struct snd_soc_pcm_runtime *rtd = &card->rtd[i];
- flush_delayed_work_sync(&rtd->delayed_work);
- }
+ /* remove and free each DAI */
+ for (i = 0; i < card->num_rtd; i++)
+ soc_remove_dai_link(card, i);
- /* remove auxiliary devices */
- for (i = 0; i < card->num_aux_devs; i++)
- soc_remove_aux_dev(card, i);
+ soc_cleanup_card_debugfs(card);
- /* remove and free each DAI */
- for (i = 0; i < card->num_rtd; i++)
- soc_remove_dai_link(card, i);
+ /* remove the card */
+ if (card->remove)
+ card->remove(card);
- soc_cleanup_card_debugfs(card);
+ kfree(card->rtd);
+ snd_card_free(card->snd_card);
+ return 0;
- /* remove the card */
- if (card->remove)
- card->remove(pdev);
+}
+
+/* removes a socdev */
+static int soc_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
- kfree(card->rtd);
- snd_card_free(card->snd_card);
- }
snd_soc_unregister_card(card);
return 0;
}
-static int soc_poweroff(struct device *dev)
+int snd_soc_poweroff(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct snd_soc_card *card = platform_get_drvdata(pdev);
+ struct snd_soc_card *card = dev_get_drvdata(dev);
int i;
if (!card->instantiated)
@@ -1947,11 +2006,12 @@ static int soc_poweroff(struct device *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(snd_soc_poweroff);
-static const struct dev_pm_ops soc_pm_ops = {
- .suspend = soc_suspend,
- .resume = soc_resume,
- .poweroff = soc_poweroff,
+const struct dev_pm_ops snd_soc_pm_ops = {
+ .suspend = snd_soc_suspend,
+ .resume = snd_soc_resume,
+ .poweroff = snd_soc_poweroff,
};
/* ASoC platform driver */
@@ -1959,7 +2019,7 @@ static struct platform_driver soc_driver = {
.driver = {
.name = "soc-audio",
.owner = THIS_MODULE,
- .pm = &soc_pm_ops,
+ .pm = &snd_soc_pm_ops,
},
.probe = soc_probe,
.remove = soc_remove,
@@ -2029,10 +2089,11 @@ static int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
*
* Boolean function indiciating if a CODEC register is volatile.
*/
-int snd_soc_codec_volatile_register(struct snd_soc_codec *codec, int reg)
+int snd_soc_codec_volatile_register(struct snd_soc_codec *codec,
+ unsigned int reg)
{
- if (codec->driver->volatile_register)
- return codec->driver->volatile_register(reg);
+ if (codec->volatile_register)
+ return codec->volatile_register(codec, reg);
else
return 0;
}
@@ -2129,19 +2190,27 @@ EXPORT_SYMBOL_GPL(snd_soc_write);
*
* Writes new register value.
*
- * Returns 1 for change else 0.
+ * Returns 1 for change, 0 for no change, or negative error code.
*/
int snd_soc_update_bits(struct snd_soc_codec *codec, unsigned short reg,
unsigned int mask, unsigned int value)
{
int change;
unsigned int old, new;
+ int ret;
- old = snd_soc_read(codec, reg);
+ ret = snd_soc_read(codec, reg);
+ if (ret < 0)
+ return ret;
+
+ old = ret;
new = (old & ~mask) | value;
change = old != new;
- if (change)
- snd_soc_write(codec, reg, new);
+ if (change) {
+ ret = snd_soc_write(codec, reg, new);
+ if (ret < 0)
+ return ret;
+ }
return change;
}
@@ -3101,17 +3170,18 @@ EXPORT_SYMBOL_GPL(snd_soc_dai_digital_mute);
*
* @card: Card to register
*
- * Note that currently this is an internal only function: it will be
- * exposed to machine drivers after further backporting of ASoC v2
- * registration APIs.
*/
-static int snd_soc_register_card(struct snd_soc_card *card)
+int snd_soc_register_card(struct snd_soc_card *card)
{
int i;
if (!card->name || !card->dev)
return -EINVAL;
+ snd_soc_initialize_card_lists(card);
+
+ soc_init_card_debugfs(card);
+
card->rtd = kzalloc(sizeof(struct snd_soc_pcm_runtime) *
(card->num_links + card->num_aux_devs),
GFP_KERNEL);
@@ -3135,18 +3205,18 @@ static int snd_soc_register_card(struct snd_soc_card *card)
return 0;
}
+EXPORT_SYMBOL_GPL(snd_soc_register_card);
/**
* snd_soc_unregister_card - Unregister a card with the ASoC core
*
* @card: Card to unregister
*
- * Note that currently this is an internal only function: it will be
- * exposed to machine drivers after further backporting of ASoC v2
- * registration APIs.
*/
-static int snd_soc_unregister_card(struct snd_soc_card *card)
+int snd_soc_unregister_card(struct snd_soc_card *card)
{
+ if (card->instantiated)
+ soc_cleanup_card_resources(card);
mutex_lock(&client_mutex);
list_del(&card->list);
mutex_unlock(&client_mutex);
@@ -3154,6 +3224,7 @@ static int snd_soc_unregister_card(struct snd_soc_card *card)
return 0;
}
+EXPORT_SYMBOL_GPL(snd_soc_unregister_card);
/*
* Simplify DAI link configuration by removing ".-1" from device names
@@ -3483,9 +3554,12 @@ int snd_soc_register_codec(struct device *dev,
codec->write = codec_drv->write;
codec->read = codec_drv->read;
+ codec->volatile_register = codec_drv->volatile_register;
+ codec->readable_register = codec_drv->readable_register;
codec->dapm.bias_level = SND_SOC_BIAS_OFF;
codec->dapm.dev = dev;
codec->dapm.codec = codec;
+ codec->dapm.seq_notifier = codec_drv->seq_notifier;
codec->dev = dev;
codec->driver = codec_drv;
codec->num_dai = num_dai;
@@ -3494,20 +3568,30 @@ int snd_soc_register_codec(struct device *dev,
/* allocate CODEC register cache */
if (codec_drv->reg_cache_size && codec_drv->reg_word_size) {
reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size;
+ codec->reg_size = reg_size;
/* it is necessary to make a copy of the default register cache
* because in the case of using a compression type that requires
* the default register cache to be marked as __devinitconst the
* kernel might have freed the array by the time we initialize
* the cache.
*/
- codec->reg_def_copy = kmemdup(codec_drv->reg_cache_default,
- reg_size, GFP_KERNEL);
- if (!codec->reg_def_copy) {
- ret = -ENOMEM;
- goto fail;
+ if (codec_drv->reg_cache_default) {
+ codec->reg_def_copy = kmemdup(codec_drv->reg_cache_default,
+ reg_size, GFP_KERNEL);
+ if (!codec->reg_def_copy) {
+ ret = -ENOMEM;
+ goto fail;
+ }
}
}
+ if (codec_drv->reg_access_size && codec_drv->reg_access_default) {
+ if (!codec->volatile_register)
+ codec->volatile_register = snd_soc_default_volatile_register;
+ if (!codec->readable_register)
+ codec->readable_register = snd_soc_default_readable_register;
+ }
+
for (i = 0; i < num_dai; i++) {
fixup_codec_formats(&dai_drv[i].playback);
fixup_codec_formats(&dai_drv[i].capture);
@@ -3574,22 +3658,22 @@ EXPORT_SYMBOL_GPL(snd_soc_unregister_codec);
static int __init snd_soc_init(void)
{
#ifdef CONFIG_DEBUG_FS
- debugfs_root = debugfs_create_dir("asoc", NULL);
- if (IS_ERR(debugfs_root) || !debugfs_root) {
+ snd_soc_debugfs_root = debugfs_create_dir("asoc", NULL);
+ if (IS_ERR(snd_soc_debugfs_root) || !snd_soc_debugfs_root) {
printk(KERN_WARNING
"ASoC: Failed to create debugfs directory\n");
- debugfs_root = NULL;
+ snd_soc_debugfs_root = NULL;
}
- if (!debugfs_create_file("codecs", 0444, debugfs_root, NULL,
+ if (!debugfs_create_file("codecs", 0444, snd_soc_debugfs_root, NULL,
&codec_list_fops))
pr_warn("ASoC: Failed to create CODEC list debugfs file\n");
- if (!debugfs_create_file("dais", 0444, debugfs_root, NULL,
+ if (!debugfs_create_file("dais", 0444, snd_soc_debugfs_root, NULL,
&dai_list_fops))
pr_warn("ASoC: Failed to create DAI list debugfs file\n");
- if (!debugfs_create_file("platforms", 0444, debugfs_root, NULL,
+ if (!debugfs_create_file("platforms", 0444, snd_soc_debugfs_root, NULL,
&platform_list_fops))
pr_warn("ASoC: Failed to create platform list debugfs file\n");
#endif
@@ -3601,7 +3685,7 @@ module_init(snd_soc_init);
static void __exit snd_soc_exit(void)
{
#ifdef CONFIG_DEBUG_FS
- debugfs_remove_recursive(debugfs_root);
+ debugfs_remove_recursive(snd_soc_debugfs_root);
#endif
platform_driver_unregister(&soc_driver);
}
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 8194f150bab7..b8e6ab739def 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -32,6 +32,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
+#include <linux/async.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/bitops.h>
@@ -125,17 +126,17 @@ static inline struct snd_soc_dapm_widget *dapm_cnew_widget(
/**
* snd_soc_dapm_set_bias_level - set the bias level for the system
- * @card: audio device
+ * @dapm: DAPM context
* @level: level to configure
*
* Configure the bias (power) levels for the SoC audio device.
*
* Returns 0 for success else error.
*/
-static int snd_soc_dapm_set_bias_level(struct snd_soc_card *card,
- struct snd_soc_dapm_context *dapm,
+static int snd_soc_dapm_set_bias_level(struct snd_soc_dapm_context *dapm,
enum snd_soc_bias_level level)
{
+ struct snd_soc_card *card = dapm->card;
int ret = 0;
switch (level) {
@@ -712,7 +713,15 @@ static int dapm_supply_check_power(struct snd_soc_dapm_widget *w)
!path->connected(path->source, path->sink))
continue;
- if (path->sink && path->sink->power_check &&
+ if (!path->sink)
+ continue;
+
+ if (path->sink->force) {
+ power = 1;
+ break;
+ }
+
+ if (path->sink->power_check &&
path->sink->power_check(path->sink)) {
power = 1;
break;
@@ -726,10 +735,23 @@ static int dapm_supply_check_power(struct snd_soc_dapm_widget *w)
static int dapm_seq_compare(struct snd_soc_dapm_widget *a,
struct snd_soc_dapm_widget *b,
- int sort[])
+ bool power_up)
{
+ int *sort;
+
+ if (power_up)
+ sort = dapm_up_seq;
+ else
+ sort = dapm_down_seq;
+
if (sort[a->id] != sort[b->id])
return sort[a->id] - sort[b->id];
+ if (a->subseq != b->subseq) {
+ if (power_up)
+ return a->subseq - b->subseq;
+ else
+ return b->subseq - a->subseq;
+ }
if (a->reg != b->reg)
return a->reg - b->reg;
if (a->dapm != b->dapm)
@@ -741,12 +763,12 @@ static int dapm_seq_compare(struct snd_soc_dapm_widget *a,
/* Insert a widget in order into a DAPM power sequence. */
static void dapm_seq_insert(struct snd_soc_dapm_widget *new_widget,
struct list_head *list,
- int sort[])
+ bool power_up)
{
struct snd_soc_dapm_widget *w;
list_for_each_entry(w, list, power_list)
- if (dapm_seq_compare(new_widget, w, sort) < 0) {
+ if (dapm_seq_compare(new_widget, w, power_up) < 0) {
list_add_tail(&new_widget->power_list, &w->power_list);
return;
}
@@ -857,26 +879,42 @@ static void dapm_seq_run_coalesced(struct snd_soc_dapm_context *dapm,
* handled.
*/
static void dapm_seq_run(struct snd_soc_dapm_context *dapm,
- struct list_head *list, int event, int sort[])
+ struct list_head *list, int event, bool power_up)
{
struct snd_soc_dapm_widget *w, *n;
LIST_HEAD(pending);
int cur_sort = -1;
+ int cur_subseq = -1;
int cur_reg = SND_SOC_NOPM;
struct snd_soc_dapm_context *cur_dapm = NULL;
- int ret;
+ int ret, i;
+ int *sort;
+
+ if (power_up)
+ sort = dapm_up_seq;
+ else
+ sort = dapm_down_seq;
list_for_each_entry_safe(w, n, list, power_list) {
ret = 0;
/* Do we need to apply any queued changes? */
if (sort[w->id] != cur_sort || w->reg != cur_reg ||
- w->dapm != cur_dapm) {
+ w->dapm != cur_dapm || w->subseq != cur_subseq) {
if (!list_empty(&pending))
dapm_seq_run_coalesced(cur_dapm, &pending);
+ if (cur_dapm && cur_dapm->seq_notifier) {
+ for (i = 0; i < ARRAY_SIZE(dapm_up_seq); i++)
+ if (sort[i] == cur_sort)
+ cur_dapm->seq_notifier(cur_dapm,
+ i,
+ cur_subseq);
+ }
+
INIT_LIST_HEAD(&pending);
cur_sort = -1;
+ cur_subseq = -1;
cur_reg = SND_SOC_NOPM;
cur_dapm = NULL;
}
@@ -921,6 +959,7 @@ static void dapm_seq_run(struct snd_soc_dapm_context *dapm,
default:
/* Queue it up for application */
cur_sort = sort[w->id];
+ cur_subseq = w->subseq;
cur_reg = w->reg;
cur_dapm = w->dapm;
list_move(&w->power_list, &pending);
@@ -934,6 +973,13 @@ static void dapm_seq_run(struct snd_soc_dapm_context *dapm,
if (!list_empty(&pending))
dapm_seq_run_coalesced(dapm, &pending);
+
+ if (cur_dapm && cur_dapm->seq_notifier) {
+ for (i = 0; i < ARRAY_SIZE(dapm_up_seq); i++)
+ if (sort[i] == cur_sort)
+ cur_dapm->seq_notifier(cur_dapm,
+ i, cur_subseq);
+ }
}
static void dapm_widget_update(struct snd_soc_dapm_context *dapm)
@@ -969,7 +1015,62 @@ static void dapm_widget_update(struct snd_soc_dapm_context *dapm)
}
}
+/* Async callback run prior to DAPM sequences - brings to _PREPARE if
+ * they're changing state.
+ */
+static void dapm_pre_sequence_async(void *data, async_cookie_t cookie)
+{
+ struct snd_soc_dapm_context *d = data;
+ int ret;
+
+ if (d->dev_power && d->bias_level == SND_SOC_BIAS_OFF) {
+ ret = snd_soc_dapm_set_bias_level(d, SND_SOC_BIAS_STANDBY);
+ if (ret != 0)
+ dev_err(d->dev,
+ "Failed to turn on bias: %d\n", ret);
+ }
+
+ /* If we're changing to all on or all off then prepare */
+ if ((d->dev_power && d->bias_level == SND_SOC_BIAS_STANDBY) ||
+ (!d->dev_power && d->bias_level == SND_SOC_BIAS_ON)) {
+ ret = snd_soc_dapm_set_bias_level(d, SND_SOC_BIAS_PREPARE);
+ if (ret != 0)
+ dev_err(d->dev,
+ "Failed to prepare bias: %d\n", ret);
+ }
+}
+
+/* Async callback run prior to DAPM sequences - brings to their final
+ * state.
+ */
+static void dapm_post_sequence_async(void *data, async_cookie_t cookie)
+{
+ struct snd_soc_dapm_context *d = data;
+ int ret;
+ /* If we just powered the last thing off drop to standby bias */
+ if (d->bias_level == SND_SOC_BIAS_PREPARE && !d->dev_power) {
+ ret = snd_soc_dapm_set_bias_level(d, SND_SOC_BIAS_STANDBY);
+ if (ret != 0)
+ dev_err(d->dev, "Failed to apply standby bias: %d\n",
+ ret);
+ }
+
+ /* If we're in standby and can support bias off then do that */
+ if (d->bias_level == SND_SOC_BIAS_STANDBY && d->idle_bias_off) {
+ ret = snd_soc_dapm_set_bias_level(d, SND_SOC_BIAS_OFF);
+ if (ret != 0)
+ dev_err(d->dev, "Failed to turn off bias: %d\n", ret);
+ }
+
+ /* If we just powered up then move to active bias */
+ if (d->bias_level == SND_SOC_BIAS_PREPARE && d->dev_power) {
+ ret = snd_soc_dapm_set_bias_level(d, SND_SOC_BIAS_ON);
+ if (ret != 0)
+ dev_err(d->dev, "Failed to apply active bias: %d\n",
+ ret);
+ }
+}
/*
* Scan each dapm widget for complete audio path.
@@ -987,7 +1088,7 @@ static int dapm_power_widgets(struct snd_soc_dapm_context *dapm, int event)
struct snd_soc_dapm_context *d;
LIST_HEAD(up_list);
LIST_HEAD(down_list);
- int ret = 0;
+ LIST_HEAD(async_domain);
int power;
trace_snd_soc_dapm_start(card);
@@ -1002,10 +1103,10 @@ static int dapm_power_widgets(struct snd_soc_dapm_context *dapm, int event)
list_for_each_entry(w, &card->widgets, list) {
switch (w->id) {
case snd_soc_dapm_pre:
- dapm_seq_insert(w, &down_list, dapm_down_seq);
+ dapm_seq_insert(w, &down_list, false);
break;
case snd_soc_dapm_post:
- dapm_seq_insert(w, &up_list, dapm_up_seq);
+ dapm_seq_insert(w, &up_list, true);
break;
default:
@@ -1025,9 +1126,9 @@ static int dapm_power_widgets(struct snd_soc_dapm_context *dapm, int event)
trace_snd_soc_dapm_widget_power(w, power);
if (power)
- dapm_seq_insert(w, &up_list, dapm_up_seq);
+ dapm_seq_insert(w, &up_list, true);
else
- dapm_seq_insert(w, &down_list, dapm_down_seq);
+ dapm_seq_insert(w, &down_list, false);
w->power = power;
break;
@@ -1065,65 +1166,25 @@ static int dapm_power_widgets(struct snd_soc_dapm_context *dapm, int event)
}
}
- list_for_each_entry(d, &dapm->card->dapm_list, list) {
- if (d->dev_power && d->bias_level == SND_SOC_BIAS_OFF) {
- ret = snd_soc_dapm_set_bias_level(card, d,
- SND_SOC_BIAS_STANDBY);
- if (ret != 0)
- dev_err(d->dev,
- "Failed to turn on bias: %d\n", ret);
- }
-
- /* If we're changing to all on or all off then prepare */
- if ((d->dev_power && d->bias_level == SND_SOC_BIAS_STANDBY) ||
- (!d->dev_power && d->bias_level == SND_SOC_BIAS_ON)) {
- ret = snd_soc_dapm_set_bias_level(card, d,
- SND_SOC_BIAS_PREPARE);
- if (ret != 0)
- dev_err(d->dev,
- "Failed to prepare bias: %d\n", ret);
- }
- }
+ /* Run all the bias changes in parallel */
+ list_for_each_entry(d, &dapm->card->dapm_list, list)
+ async_schedule_domain(dapm_pre_sequence_async, d,
+ &async_domain);
+ async_synchronize_full_domain(&async_domain);
/* Power down widgets first; try to avoid amplifying pops. */
- dapm_seq_run(dapm, &down_list, event, dapm_down_seq);
+ dapm_seq_run(dapm, &down_list, event, false);
dapm_widget_update(dapm);
/* Now power up. */
- dapm_seq_run(dapm, &up_list, event, dapm_up_seq);
-
- list_for_each_entry(d, &dapm->card->dapm_list, list) {
- /* If we just powered the last thing off drop to standby bias */
- if (d->bias_level == SND_SOC_BIAS_PREPARE && !d->dev_power) {
- ret = snd_soc_dapm_set_bias_level(card, d,
- SND_SOC_BIAS_STANDBY);
- if (ret != 0)
- dev_err(d->dev,
- "Failed to apply standby bias: %d\n",
- ret);
- }
-
- /* If we're in standby and can support bias off then do that */
- if (d->bias_level == SND_SOC_BIAS_STANDBY &&
- d->idle_bias_off) {
- ret = snd_soc_dapm_set_bias_level(card, d,
- SND_SOC_BIAS_OFF);
- if (ret != 0)
- dev_err(d->dev,
- "Failed to turn off bias: %d\n", ret);
- }
+ dapm_seq_run(dapm, &up_list, event, true);
- /* If we just powered up then move to active bias */
- if (d->bias_level == SND_SOC_BIAS_PREPARE && d->dev_power) {
- ret = snd_soc_dapm_set_bias_level(card, d,
- SND_SOC_BIAS_ON);
- if (ret != 0)
- dev_err(d->dev,
- "Failed to apply active bias: %d\n",
- ret);
- }
- }
+ /* Run all the bias changes in parallel */
+ list_for_each_entry(d, &dapm->card->dapm_list, list)
+ async_schedule_domain(dapm_post_sequence_async, d,
+ &async_domain);
+ async_synchronize_full_domain(&async_domain);
pop_dbg(dapm->dev, card->pop_time,
"DAPM sequencing finished, waiting %dms\n", card->pop_time);
@@ -1181,7 +1242,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
if (p->connect)
ret += snprintf(buf + ret, PAGE_SIZE - ret,
- " in %s %s\n",
+ " in \"%s\" \"%s\"\n",
p->name ? p->name : "static",
p->source->name);
}
@@ -1191,7 +1252,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
if (p->connect)
ret += snprintf(buf + ret, PAGE_SIZE - ret,
- " out %s %s\n",
+ " out \"%s\" \"%s\"\n",
p->name ? p->name : "static",
p->sink->name);
}
@@ -1627,6 +1688,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_add_routes);
int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm)
{
struct snd_soc_dapm_widget *w;
+ unsigned int val;
list_for_each_entry(w, &dapm->card->widgets, list)
{
@@ -1675,6 +1737,18 @@ int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm)
case snd_soc_dapm_post:
break;
}
+
+ /* Read the initial power state from the device */
+ if (w->reg >= 0) {
+ val = snd_soc_read(w->codec, w->reg);
+ val &= 1 << w->shift;
+ if (w->invert)
+ val = !val;
+
+ if (val)
+ w->power = 1;
+ }
+
w->new = 1;
}
@@ -2205,7 +2279,6 @@ int snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd,
mutex_unlock(&codec->mutex);
return 0;
}
-EXPORT_SYMBOL_GPL(snd_soc_dapm_stream_event);
/**
* snd_soc_dapm_enable_pin - enable pin.
@@ -2372,7 +2445,7 @@ static void soc_dapm_shutdown_codec(struct snd_soc_dapm_context *dapm)
if (w->dapm != dapm)
continue;
if (w->power) {
- dapm_seq_insert(w, &down_list, dapm_down_seq);
+ dapm_seq_insert(w, &down_list, false);
w->power = 0;
powerdown = 1;
}
@@ -2382,9 +2455,9 @@ static void soc_dapm_shutdown_codec(struct snd_soc_dapm_context *dapm)
* standby.
*/
if (powerdown) {
- snd_soc_dapm_set_bias_level(NULL, dapm, SND_SOC_BIAS_PREPARE);
- dapm_seq_run(dapm, &down_list, 0, dapm_down_seq);
- snd_soc_dapm_set_bias_level(NULL, dapm, SND_SOC_BIAS_STANDBY);
+ snd_soc_dapm_set_bias_level(dapm, SND_SOC_BIAS_PREPARE);
+ dapm_seq_run(dapm, &down_list, 0, false);
+ snd_soc_dapm_set_bias_level(dapm, SND_SOC_BIAS_STANDBY);
}
}
@@ -2397,7 +2470,7 @@ void snd_soc_dapm_shutdown(struct snd_soc_card *card)
list_for_each_entry(codec, &card->codec_dev_list, list) {
soc_dapm_shutdown_codec(&codec->dapm);
- snd_soc_dapm_set_bias_level(card, &codec->dapm, SND_SOC_BIAS_OFF);
+ snd_soc_dapm_set_bias_level(&codec->dapm, SND_SOC_BIAS_OFF);
}
}
diff --git a/sound/soc/soc-jack.c b/sound/soc/soc-jack.c
index ac5a5bc7375a..fcab80b36a37 100644
--- a/sound/soc/soc-jack.c
+++ b/sound/soc/soc-jack.c
@@ -37,6 +37,7 @@ int snd_soc_jack_new(struct snd_soc_codec *codec, const char *id, int type,
{
jack->codec = codec;
INIT_LIST_HEAD(&jack->pins);
+ INIT_LIST_HEAD(&jack->jack_zones);
BLOCKING_INIT_NOTIFIER_HEAD(&jack->notifier);
return snd_jack_new(codec->card->snd_card, id, type, &jack->jack);
@@ -100,7 +101,7 @@ void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask)
}
/* Report before the DAPM sync to help users updating micbias status */
- blocking_notifier_call_chain(&jack->notifier, status, NULL);
+ blocking_notifier_call_chain(&jack->notifier, status, jack);
snd_soc_dapm_sync(dapm);
@@ -112,6 +113,51 @@ out:
EXPORT_SYMBOL_GPL(snd_soc_jack_report);
/**
+ * snd_soc_jack_add_zones - Associate voltage zones with jack
+ *
+ * @jack: ASoC jack
+ * @count: Number of zones
+ * @zone: Array of zones
+ *
+ * After this function has been called the zones specified in the
+ * array will be associated with the jack.
+ */
+int snd_soc_jack_add_zones(struct snd_soc_jack *jack, int count,
+ struct snd_soc_jack_zone *zones)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ INIT_LIST_HEAD(&zones[i].list);
+ list_add(&(zones[i].list), &jack->jack_zones);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_jack_add_zones);
+
+/**
+ * snd_soc_jack_get_type - Based on the mic bias value, this function returns
+ * the type of jack from the zones delcared in the jack type
+ *
+ * @micbias_voltage: mic bias voltage at adc channel when jack is plugged in
+ *
+ * Based on the mic bias value passed, this function helps identify
+ * the type of jack from the already delcared jack zones
+ */
+int snd_soc_jack_get_type(struct snd_soc_jack *jack, int micbias_voltage)
+{
+ struct snd_soc_jack_zone *zone;
+
+ list_for_each_entry(zone, &jack->jack_zones, list) {
+ if (micbias_voltage >= zone->min_mv &&
+ micbias_voltage < zone->max_mv)
+ return zone->jack_type;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_jack_get_type);
+
+/**
* snd_soc_jack_add_pins - Associate DAPM pins with an ASoC jack
*
* @jack: ASoC jack
@@ -194,7 +240,7 @@ static void snd_soc_jack_gpio_detect(struct snd_soc_jack_gpio *gpio)
int enable;
int report;
- enable = gpio_get_value(gpio->gpio);
+ enable = gpio_get_value_cansleep(gpio->gpio);
if (gpio->invert)
enable = !enable;
@@ -284,6 +330,14 @@ int snd_soc_jack_add_gpios(struct snd_soc_jack *jack, int count,
if (ret)
goto err;
+ if (gpios[i].wake) {
+ ret = set_irq_wake(gpio_to_irq(gpios[i].gpio), 1);
+ if (ret != 0)
+ printk(KERN_ERR
+ "Failed to mark GPIO %d as wake source: %d\n",
+ gpios[i].gpio, ret);
+ }
+
#ifdef CONFIG_GPIO_SYSFS
/* Expose GPIO value over sysfs for diagnostic purposes */
gpio_export(gpios[i].gpio, false);
diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c
index 1d07b931f3d8..3f45e6a439bf 100644
--- a/sound/soc/soc-utils.c
+++ b/sound/soc/soc-utils.c
@@ -28,26 +28,9 @@ int snd_soc_params_to_frame_size(struct snd_pcm_hw_params *params)
{
int sample_size;
- switch (params_format(params)) {
- case SNDRV_PCM_FORMAT_S16_LE:
- case SNDRV_PCM_FORMAT_S16_BE:
- sample_size = 16;
- break;
- case SNDRV_PCM_FORMAT_S20_3LE:
- case SNDRV_PCM_FORMAT_S20_3BE:
- sample_size = 20;
- break;
- case SNDRV_PCM_FORMAT_S24_LE:
- case SNDRV_PCM_FORMAT_S24_BE:
- sample_size = 24;
- break;
- case SNDRV_PCM_FORMAT_S32_LE:
- case SNDRV_PCM_FORMAT_S32_BE:
- sample_size = 32;
- break;
- default:
- return -ENOTSUPP;
- }
+ sample_size = snd_pcm_format_width(params_format(params));
+ if (sample_size < 0)
+ return sample_size;
return snd_soc_calc_frame_size(sample_size, params_channels(params),
1);
diff --git a/sound/soc/tegra/Kconfig b/sound/soc/tegra/Kconfig
new file mode 100644
index 000000000000..66b504f06c23
--- /dev/null
+++ b/sound/soc/tegra/Kconfig
@@ -0,0 +1,26 @@
+config SND_TEGRA_SOC
+ tristate "SoC Audio for the Tegra System-on-Chip"
+ depends on ARCH_TEGRA && TEGRA_SYSTEM_DMA
+ default m
+ help
+ Say Y or M here if you want support for SoC audio on Tegra.
+
+config SND_TEGRA_SOC_I2S
+ tristate
+ depends on SND_TEGRA_SOC
+ default m
+ help
+ Say Y or M if you want to add support for codecs attached to the
+ Tegra I2S interface. You will also need to select the individual
+ machine drivers to support below.
+
+config SND_TEGRA_SOC_HARMONY
+ tristate "SoC Audio support for Tegra Harmony reference board"
+ depends on SND_TEGRA_SOC && MACH_HARMONY && I2C
+ default m
+ select SND_TEGRA_SOC_I2S
+ select SND_SOC_WM8903
+ help
+ Say Y or M here if you want to add support for SoC audio on the
+ Tegra Harmony reference board.
+
diff --git a/sound/soc/tegra/Makefile b/sound/soc/tegra/Makefile
new file mode 100644
index 000000000000..fd183d3ab4f1
--- /dev/null
+++ b/sound/soc/tegra/Makefile
@@ -0,0 +1,15 @@
+# Tegra platform Support
+snd-soc-tegra-das-objs := tegra_das.o
+snd-soc-tegra-pcm-objs := tegra_pcm.o
+snd-soc-tegra-i2s-objs := tegra_i2s.o
+snd-soc-tegra-utils-objs += tegra_asoc_utils.o
+
+obj-$(CONFIG_SND_TEGRA_SOC) += snd-soc-tegra-utils.o
+obj-$(CONFIG_SND_TEGRA_SOC) += snd-soc-tegra-das.o
+obj-$(CONFIG_SND_TEGRA_SOC) += snd-soc-tegra-pcm.o
+obj-$(CONFIG_SND_TEGRA_SOC_I2S) += snd-soc-tegra-i2s.o
+
+# Tegra machine Support
+snd-soc-tegra-harmony-objs := harmony.o
+
+obj-$(CONFIG_SND_TEGRA_SOC_HARMONY) += snd-soc-tegra-harmony.o
diff --git a/sound/soc/tegra/harmony.c b/sound/soc/tegra/harmony.c
new file mode 100644
index 000000000000..8585957477eb
--- /dev/null
+++ b/sound/soc/tegra/harmony.c
@@ -0,0 +1,393 @@
+/*
+ * harmony.c - Harmony machine ASoC driver
+ *
+ * Author: Stephen Warren <swarren@nvidia.com>
+ * Copyright (C) 2010-2011 - NVIDIA, Inc.
+ *
+ * Based on code copyright/by:
+ *
+ * (c) 2009, 2010 Nvidia Graphics Pvt. Ltd.
+ *
+ * Copyright 2007 Wolfson Microelectronics PLC.
+ * Author: Graeme Gregory
+ * graeme.gregory@wolfsonmicro.com or linux@wolfsonmicro.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <asm/mach-types.h>
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+
+#include <mach/harmony_audio.h>
+
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include "../codecs/wm8903.h"
+
+#include "tegra_das.h"
+#include "tegra_i2s.h"
+#include "tegra_pcm.h"
+#include "tegra_asoc_utils.h"
+
+#define DRV_NAME "tegra-snd-harmony"
+
+#define GPIO_SPKR_EN BIT(0)
+#define GPIO_INT_MIC_EN BIT(1)
+#define GPIO_EXT_MIC_EN BIT(2)
+
+struct tegra_harmony {
+ struct tegra_asoc_utils_data util_data;
+ struct harmony_audio_platform_data *pdata;
+ int gpio_requested;
+};
+
+static int harmony_asoc_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_card *card = codec->card;
+ struct tegra_harmony *harmony = snd_soc_card_get_drvdata(card);
+ int srate, mclk, mclk_change;
+ int err;
+
+ srate = params_rate(params);
+ switch (srate) {
+ case 64000:
+ case 88200:
+ case 96000:
+ mclk = 128 * srate;
+ break;
+ default:
+ mclk = 256 * srate;
+ break;
+ }
+ /* FIXME: Codec only requires >= 3MHz if OSR==0 */
+ while (mclk < 6000000)
+ mclk *= 2;
+
+ err = tegra_asoc_utils_set_rate(&harmony->util_data, srate, mclk,
+ &mclk_change);
+ if (err < 0) {
+ dev_err(card->dev, "Can't configure clocks\n");
+ return err;
+ }
+
+ err = snd_soc_dai_set_fmt(codec_dai,
+ SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS);
+ if (err < 0) {
+ dev_err(card->dev, "codec_dai fmt not set\n");
+ return err;
+ }
+
+ err = snd_soc_dai_set_fmt(cpu_dai,
+ SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS);
+ if (err < 0) {
+ dev_err(card->dev, "cpu_dai fmt not set\n");
+ return err;
+ }
+
+ if (mclk_change) {
+ err = snd_soc_dai_set_sysclk(codec_dai, 0, mclk,
+ SND_SOC_CLOCK_IN);
+ if (err < 0) {
+ dev_err(card->dev, "codec_dai clock not set\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static struct snd_soc_ops harmony_asoc_ops = {
+ .hw_params = harmony_asoc_hw_params,
+};
+
+static struct snd_soc_jack harmony_hp_jack;
+
+static struct snd_soc_jack_pin harmony_hp_jack_pins[] = {
+ {
+ .pin = "Headphone Jack",
+ .mask = SND_JACK_HEADPHONE,
+ },
+};
+
+static struct snd_soc_jack_gpio harmony_hp_jack_gpios[] = {
+ {
+ .name = "headphone detect",
+ .report = SND_JACK_HEADPHONE,
+ .debounce_time = 150,
+ .invert = 1,
+ }
+};
+
+static struct snd_soc_jack harmony_mic_jack;
+
+static struct snd_soc_jack_pin harmony_mic_jack_pins[] = {
+ {
+ .pin = "Mic Jack",
+ .mask = SND_JACK_MICROPHONE,
+ },
+};
+
+static int harmony_event_int_spk(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_card *card = codec->card;
+ struct tegra_harmony *harmony = snd_soc_card_get_drvdata(card);
+ struct harmony_audio_platform_data *pdata = harmony->pdata;
+
+ gpio_set_value_cansleep(pdata->gpio_spkr_en,
+ SND_SOC_DAPM_EVENT_ON(event));
+
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget harmony_dapm_widgets[] = {
+ SND_SOC_DAPM_SPK("Int Spk", harmony_event_int_spk),
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_MIC("Mic Jack", NULL),
+};
+
+static const struct snd_soc_dapm_route harmony_audio_map[] = {
+ {"Headphone Jack", NULL, "HPOUTR"},
+ {"Headphone Jack", NULL, "HPOUTL"},
+ {"Int Spk", NULL, "ROP"},
+ {"Int Spk", NULL, "RON"},
+ {"Int Spk", NULL, "LOP"},
+ {"Int Spk", NULL, "LON"},
+ {"Mic Bias", NULL, "Mic Jack"},
+ {"IN1L", NULL, "Mic Bias"},
+};
+
+static const struct snd_kcontrol_new harmony_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Int Spk"),
+};
+
+static int harmony_asoc_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_dapm_context *dapm = &codec->dapm;
+ struct snd_soc_card *card = codec->card;
+ struct tegra_harmony *harmony = snd_soc_card_get_drvdata(card);
+ struct harmony_audio_platform_data *pdata = harmony->pdata;
+ int ret;
+
+ ret = gpio_request(pdata->gpio_spkr_en, "spkr_en");
+ if (ret) {
+ dev_err(card->dev, "cannot get spkr_en gpio\n");
+ return ret;
+ }
+ harmony->gpio_requested |= GPIO_SPKR_EN;
+
+ gpio_direction_output(pdata->gpio_spkr_en, 0);
+
+ ret = gpio_request(pdata->gpio_int_mic_en, "int_mic_en");
+ if (ret) {
+ dev_err(card->dev, "cannot get int_mic_en gpio\n");
+ return ret;
+ }
+ harmony->gpio_requested |= GPIO_INT_MIC_EN;
+
+ /* Disable int mic; enable signal is active-high */
+ gpio_direction_output(pdata->gpio_int_mic_en, 0);
+
+ ret = gpio_request(pdata->gpio_ext_mic_en, "ext_mic_en");
+ if (ret) {
+ dev_err(card->dev, "cannot get ext_mic_en gpio\n");
+ return ret;
+ }
+ harmony->gpio_requested |= GPIO_EXT_MIC_EN;
+
+ /* Enable ext mic; enable signal is active-low */
+ gpio_direction_output(pdata->gpio_ext_mic_en, 0);
+
+ ret = snd_soc_add_controls(codec, harmony_controls,
+ ARRAY_SIZE(harmony_controls));
+ if (ret < 0)
+ return ret;
+
+ snd_soc_dapm_new_controls(dapm, harmony_dapm_widgets,
+ ARRAY_SIZE(harmony_dapm_widgets));
+
+ snd_soc_dapm_add_routes(dapm, harmony_audio_map,
+ ARRAY_SIZE(harmony_audio_map));
+
+ harmony_hp_jack_gpios[0].gpio = pdata->gpio_hp_det;
+ snd_soc_jack_new(codec, "Headphone Jack", SND_JACK_HEADPHONE,
+ &harmony_hp_jack);
+ snd_soc_jack_add_pins(&harmony_hp_jack,
+ ARRAY_SIZE(harmony_hp_jack_pins),
+ harmony_hp_jack_pins);
+ snd_soc_jack_add_gpios(&harmony_hp_jack,
+ ARRAY_SIZE(harmony_hp_jack_gpios),
+ harmony_hp_jack_gpios);
+
+ snd_soc_jack_new(codec, "Mic Jack", SND_JACK_MICROPHONE,
+ &harmony_mic_jack);
+ snd_soc_jack_add_pins(&harmony_mic_jack,
+ ARRAY_SIZE(harmony_mic_jack_pins),
+ harmony_mic_jack_pins);
+ wm8903_mic_detect(codec, &harmony_mic_jack, SND_JACK_MICROPHONE, 0);
+
+ snd_soc_dapm_force_enable_pin(dapm, "Mic Bias");
+
+ snd_soc_dapm_nc_pin(dapm, "IN3L");
+ snd_soc_dapm_nc_pin(dapm, "IN3R");
+ snd_soc_dapm_nc_pin(dapm, "LINEOUTL");
+ snd_soc_dapm_nc_pin(dapm, "LINEOUTR");
+
+ snd_soc_dapm_sync(dapm);
+
+ return 0;
+}
+
+static struct snd_soc_dai_link harmony_wm8903_dai = {
+ .name = "WM8903",
+ .stream_name = "WM8903 PCM",
+ .codec_name = "wm8903.0-001a",
+ .platform_name = "tegra-pcm-audio",
+ .cpu_dai_name = "tegra-i2s.0",
+ .codec_dai_name = "wm8903-hifi",
+ .init = harmony_asoc_init,
+ .ops = &harmony_asoc_ops,
+};
+
+static struct snd_soc_card snd_soc_harmony = {
+ .name = "tegra-harmony",
+ .dai_link = &harmony_wm8903_dai,
+ .num_links = 1,
+};
+
+static __devinit int tegra_snd_harmony_probe(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = &snd_soc_harmony;
+ struct tegra_harmony *harmony;
+ struct harmony_audio_platform_data *pdata;
+ int ret;
+
+ if (!machine_is_harmony()) {
+ dev_err(&pdev->dev, "Not running on Tegra Harmony!\n");
+ return -ENODEV;
+ }
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data supplied\n");
+ return -EINVAL;
+ }
+
+ harmony = kzalloc(sizeof(struct tegra_harmony), GFP_KERNEL);
+ if (!harmony) {
+ dev_err(&pdev->dev, "Can't allocate tegra_harmony\n");
+ return -ENOMEM;
+ }
+
+ harmony->pdata = pdata;
+
+ ret = tegra_asoc_utils_init(&harmony->util_data, &pdev->dev);
+ if (ret)
+ goto err_free_harmony;
+
+ card->dev = &pdev->dev;
+ platform_set_drvdata(pdev, card);
+ snd_soc_card_set_drvdata(card, harmony);
+
+ ret = snd_soc_register_card(card);
+ if (ret) {
+ dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n",
+ ret);
+ goto err_clear_drvdata;
+ }
+
+ return 0;
+
+err_clear_drvdata:
+ snd_soc_card_set_drvdata(card, NULL);
+ platform_set_drvdata(pdev, NULL);
+ card->dev = NULL;
+ tegra_asoc_utils_fini(&harmony->util_data);
+err_free_harmony:
+ kfree(harmony);
+ return ret;
+}
+
+static int __devexit tegra_snd_harmony_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+ struct tegra_harmony *harmony = snd_soc_card_get_drvdata(card);
+ struct harmony_audio_platform_data *pdata = harmony->pdata;
+
+ snd_soc_unregister_card(card);
+
+ snd_soc_card_set_drvdata(card, NULL);
+ platform_set_drvdata(pdev, NULL);
+ card->dev = NULL;
+
+ tegra_asoc_utils_fini(&harmony->util_data);
+
+ if (harmony->gpio_requested & GPIO_EXT_MIC_EN)
+ gpio_free(pdata->gpio_ext_mic_en);
+ if (harmony->gpio_requested & GPIO_INT_MIC_EN)
+ gpio_free(pdata->gpio_int_mic_en);
+ if (harmony->gpio_requested & GPIO_SPKR_EN)
+ gpio_free(pdata->gpio_spkr_en);
+
+ kfree(harmony);
+
+ return 0;
+}
+
+static struct platform_driver tegra_snd_harmony_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = tegra_snd_harmony_probe,
+ .remove = __devexit_p(tegra_snd_harmony_remove),
+};
+
+static int __init snd_tegra_harmony_init(void)
+{
+ return platform_driver_register(&tegra_snd_harmony_driver);
+}
+module_init(snd_tegra_harmony_init);
+
+static void __exit snd_tegra_harmony_exit(void)
+{
+ platform_driver_unregister(&tegra_snd_harmony_driver);
+}
+module_exit(snd_tegra_harmony_exit);
+
+MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
+MODULE_DESCRIPTION("Harmony machine ASoC driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/tegra/tegra_asoc_utils.c b/sound/soc/tegra/tegra_asoc_utils.c
new file mode 100644
index 000000000000..52f0a3f9ce40
--- /dev/null
+++ b/sound/soc/tegra/tegra_asoc_utils.c
@@ -0,0 +1,155 @@
+/*
+ * tegra_asoc_utils.c - Harmony machine ASoC driver
+ *
+ * Author: Stephen Warren <swarren@nvidia.com>
+ * Copyright (C) 2010 - NVIDIA, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+
+#include "tegra_asoc_utils.h"
+
+int tegra_asoc_utils_set_rate(struct tegra_asoc_utils_data *data, int srate,
+ int mclk, int *mclk_change)
+{
+ int new_baseclock;
+ int err;
+
+ switch (srate) {
+ case 11025:
+ case 22050:
+ case 44100:
+ case 88200:
+ new_baseclock = 56448000;
+ break;
+ case 8000:
+ case 16000:
+ case 32000:
+ case 48000:
+ case 64000:
+ case 96000:
+ new_baseclock = 73728000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *mclk_change = ((new_baseclock != data->set_baseclock) ||
+ (mclk != data->set_mclk));
+ if (!*mclk_change)
+ return 0;
+
+ data->set_baseclock = 0;
+ data->set_mclk = 0;
+
+ clk_disable(data->clk_cdev1);
+ clk_disable(data->clk_pll_a_out0);
+ clk_disable(data->clk_pll_a);
+
+ err = clk_set_rate(data->clk_pll_a, new_baseclock);
+ if (err) {
+ dev_err(data->dev, "Can't set pll_a rate: %d\n", err);
+ return err;
+ }
+
+ err = clk_set_rate(data->clk_pll_a_out0, mclk);
+ if (err) {
+ dev_err(data->dev, "Can't set pll_a_out0 rate: %d\n", err);
+ return err;
+ }
+
+ /* Don't set cdev1 rate; its locked to pll_a_out0 */
+
+ err = clk_enable(data->clk_pll_a);
+ if (err) {
+ dev_err(data->dev, "Can't enable pll_a: %d\n", err);
+ return err;
+ }
+
+ err = clk_enable(data->clk_pll_a_out0);
+ if (err) {
+ dev_err(data->dev, "Can't enable pll_a_out0: %d\n", err);
+ return err;
+ }
+
+ err = clk_enable(data->clk_cdev1);
+ if (err) {
+ dev_err(data->dev, "Can't enable cdev1: %d\n", err);
+ return err;
+ }
+
+ data->set_baseclock = new_baseclock;
+ data->set_mclk = mclk;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tegra_asoc_utils_set_rate);
+
+int tegra_asoc_utils_init(struct tegra_asoc_utils_data *data,
+ struct device *dev)
+{
+ int ret;
+
+ data->dev = dev;
+
+ data->clk_pll_a = clk_get_sys(NULL, "pll_a");
+ if (IS_ERR(data->clk_pll_a)) {
+ dev_err(data->dev, "Can't retrieve clk pll_a\n");
+ ret = PTR_ERR(data->clk_pll_a);
+ goto err;
+ }
+
+ data->clk_pll_a_out0 = clk_get_sys(NULL, "pll_a_out0");
+ if (IS_ERR(data->clk_pll_a_out0)) {
+ dev_err(data->dev, "Can't retrieve clk pll_a_out0\n");
+ ret = PTR_ERR(data->clk_pll_a_out0);
+ goto err_put_pll_a;
+ }
+
+ data->clk_cdev1 = clk_get_sys(NULL, "cdev1");
+ if (IS_ERR(data->clk_cdev1)) {
+ dev_err(data->dev, "Can't retrieve clk cdev1\n");
+ ret = PTR_ERR(data->clk_cdev1);
+ goto err_put_pll_a_out0;
+ }
+
+ return 0;
+
+err_put_pll_a_out0:
+ clk_put(data->clk_pll_a_out0);
+err_put_pll_a:
+ clk_put(data->clk_pll_a);
+err:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tegra_asoc_utils_init);
+
+void tegra_asoc_utils_fini(struct tegra_asoc_utils_data *data)
+{
+ clk_put(data->clk_cdev1);
+ clk_put(data->clk_pll_a_out0);
+ clk_put(data->clk_pll_a);
+}
+EXPORT_SYMBOL_GPL(tegra_asoc_utils_fini);
+
+MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
+MODULE_DESCRIPTION("Tegra ASoC utility code");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/tegra/tegra_asoc_utils.h b/sound/soc/tegra/tegra_asoc_utils.h
new file mode 100644
index 000000000000..bbba7afdfc2c
--- /dev/null
+++ b/sound/soc/tegra/tegra_asoc_utils.h
@@ -0,0 +1,45 @@
+/*
+ * tegra_asoc_utils.h - Definitions for Tegra DAS driver
+ *
+ * Author: Stephen Warren <swarren@nvidia.com>
+ * Copyright (C) 2010 - NVIDIA, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __TEGRA_ASOC_UTILS_H__
+#define __TEGRA_ASOC_UTILS_H_
+
+struct clk;
+struct device;
+
+struct tegra_asoc_utils_data {
+ struct device *dev;
+ struct clk *clk_pll_a;
+ struct clk *clk_pll_a_out0;
+ struct clk *clk_cdev1;
+ int set_baseclock;
+ int set_mclk;
+};
+
+int tegra_asoc_utils_set_rate(struct tegra_asoc_utils_data *data, int srate,
+ int mclk, int *mclk_change);
+int tegra_asoc_utils_init(struct tegra_asoc_utils_data *data,
+ struct device *dev);
+void tegra_asoc_utils_fini(struct tegra_asoc_utils_data *data);
+
+#endif
+
diff --git a/sound/soc/tegra/tegra_das.c b/sound/soc/tegra/tegra_das.c
new file mode 100644
index 000000000000..9f24ef73f2cb
--- /dev/null
+++ b/sound/soc/tegra/tegra_das.c
@@ -0,0 +1,265 @@
+/*
+ * tegra_das.c - Tegra DAS driver
+ *
+ * Author: Stephen Warren <swarren@nvidia.com>
+ * Copyright (C) 2010 - NVIDIA, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <mach/iomap.h>
+#include <sound/soc.h>
+#include "tegra_das.h"
+
+#define DRV_NAME "tegra-das"
+
+static struct tegra_das *das;
+
+static inline void tegra_das_write(u32 reg, u32 val)
+{
+ __raw_writel(val, das->regs + reg);
+}
+
+static inline u32 tegra_das_read(u32 reg)
+{
+ return __raw_readl(das->regs + reg);
+}
+
+int tegra_das_connect_dap_to_dac(int dap, int dac)
+{
+ u32 addr;
+ u32 reg;
+
+ if (!das)
+ return -ENODEV;
+
+ addr = TEGRA_DAS_DAP_CTRL_SEL +
+ (dap * TEGRA_DAS_DAP_CTRL_SEL_STRIDE);
+ reg = dac << TEGRA_DAS_DAP_CTRL_SEL_DAP_CTRL_SEL_P;
+
+ tegra_das_write(addr, reg);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tegra_das_connect_dap_to_dac);
+
+int tegra_das_connect_dap_to_dap(int dap, int otherdap, int master,
+ int sdata1rx, int sdata2rx)
+{
+ u32 addr;
+ u32 reg;
+
+ if (!das)
+ return -ENODEV;
+
+ addr = TEGRA_DAS_DAP_CTRL_SEL +
+ (dap * TEGRA_DAS_DAP_CTRL_SEL_STRIDE);
+ reg = otherdap << TEGRA_DAS_DAP_CTRL_SEL_DAP_CTRL_SEL_P |
+ !!sdata2rx << TEGRA_DAS_DAP_CTRL_SEL_DAP_SDATA2_TX_RX_P |
+ !!sdata1rx << TEGRA_DAS_DAP_CTRL_SEL_DAP_SDATA1_TX_RX_P |
+ !!master << TEGRA_DAS_DAP_CTRL_SEL_DAP_MS_SEL_P;
+
+ tegra_das_write(addr, reg);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tegra_das_connect_dap_to_dap);
+
+int tegra_das_connect_dac_to_dap(int dac, int dap)
+{
+ u32 addr;
+ u32 reg;
+
+ if (!das)
+ return -ENODEV;
+
+ addr = TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL +
+ (dac * TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_STRIDE);
+ reg = dap << TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_CLK_SEL_P |
+ dap << TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA1_SEL_P |
+ dap << TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA2_SEL_P;
+
+ tegra_das_write(addr, reg);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tegra_das_connect_dac_to_dap);
+
+#ifdef CONFIG_DEBUG_FS
+static int tegra_das_show(struct seq_file *s, void *unused)
+{
+ int i;
+ u32 addr;
+ u32 reg;
+
+ for (i = 0; i < TEGRA_DAS_DAP_CTRL_SEL_COUNT; i++) {
+ addr = TEGRA_DAS_DAP_CTRL_SEL +
+ (i * TEGRA_DAS_DAP_CTRL_SEL_STRIDE);
+ reg = tegra_das_read(addr);
+ seq_printf(s, "TEGRA_DAS_DAP_CTRL_SEL[%d] = %08x\n", i, reg);
+ }
+
+ for (i = 0; i < TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_COUNT; i++) {
+ addr = TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL +
+ (i * TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_STRIDE);
+ reg = tegra_das_read(addr);
+ seq_printf(s, "TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL[%d] = %08x\n",
+ i, reg);
+ }
+
+ return 0;
+}
+
+static int tegra_das_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tegra_das_show, inode->i_private);
+}
+
+static const struct file_operations tegra_das_debug_fops = {
+ .open = tegra_das_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void tegra_das_debug_add(struct tegra_das *das)
+{
+ das->debug = debugfs_create_file(DRV_NAME, S_IRUGO,
+ snd_soc_debugfs_root, das,
+ &tegra_das_debug_fops);
+}
+
+static void tegra_das_debug_remove(struct tegra_das *das)
+{
+ if (das->debug)
+ debugfs_remove(das->debug);
+}
+#else
+static inline void tegra_das_debug_add(struct tegra_das *das)
+{
+}
+
+static inline void tegra_das_debug_remove(struct tegra_das *das)
+{
+}
+#endif
+
+static int __devinit tegra_das_probe(struct platform_device *pdev)
+{
+ struct resource *res, *region;
+ int ret = 0;
+
+ if (das)
+ return -ENODEV;
+
+ das = kzalloc(sizeof(struct tegra_das), GFP_KERNEL);
+ if (!das) {
+ dev_err(&pdev->dev, "Can't allocate tegra_das\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+ das->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "No memory resource\n");
+ ret = -ENODEV;
+ goto err_free;
+ }
+
+ region = request_mem_region(res->start, resource_size(res),
+ pdev->name);
+ if (!region) {
+ dev_err(&pdev->dev, "Memory region already claimed\n");
+ ret = -EBUSY;
+ goto err_free;
+ }
+
+ das->regs = ioremap(res->start, resource_size(res));
+ if (!das->regs) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_release;
+ }
+
+ tegra_das_debug_add(das);
+
+ platform_set_drvdata(pdev, das);
+
+ return 0;
+
+err_release:
+ release_mem_region(res->start, resource_size(res));
+err_free:
+ kfree(das);
+ das = 0;
+exit:
+ return ret;
+}
+
+static int __devexit tegra_das_remove(struct platform_device *pdev)
+{
+ struct resource *res;
+
+ if (!das)
+ return -ENODEV;
+
+ platform_set_drvdata(pdev, NULL);
+
+ tegra_das_debug_remove(das);
+
+ iounmap(das->regs);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+ kfree(das);
+ das = 0;
+
+ return 0;
+}
+
+static struct platform_driver tegra_das_driver = {
+ .probe = tegra_das_probe,
+ .remove = __devexit_p(tegra_das_remove),
+ .driver = {
+ .name = DRV_NAME,
+ },
+};
+
+static int __init tegra_das_modinit(void)
+{
+ return platform_driver_register(&tegra_das_driver);
+}
+module_init(tegra_das_modinit);
+
+static void __exit tegra_das_modexit(void)
+{
+ platform_driver_unregister(&tegra_das_driver);
+}
+module_exit(tegra_das_modexit);
+
+MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
+MODULE_DESCRIPTION("Tegra DAS driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/tegra/tegra_das.h b/sound/soc/tegra/tegra_das.h
new file mode 100644
index 000000000000..2c96c7b3c459
--- /dev/null
+++ b/sound/soc/tegra/tegra_das.h
@@ -0,0 +1,135 @@
+/*
+ * tegra_das.h - Definitions for Tegra DAS driver
+ *
+ * Author: Stephen Warren <swarren@nvidia.com>
+ * Copyright (C) 2010 - NVIDIA, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __TEGRA_DAS_H__
+#define __TEGRA_DAS_H__
+
+/* Register TEGRA_DAS_DAP_CTRL_SEL */
+#define TEGRA_DAS_DAP_CTRL_SEL 0x00
+#define TEGRA_DAS_DAP_CTRL_SEL_COUNT 5
+#define TEGRA_DAS_DAP_CTRL_SEL_STRIDE 4
+#define TEGRA_DAS_DAP_CTRL_SEL_DAP_MS_SEL_P 31
+#define TEGRA_DAS_DAP_CTRL_SEL_DAP_MS_SEL_S 1
+#define TEGRA_DAS_DAP_CTRL_SEL_DAP_SDATA1_TX_RX_P 30
+#define TEGRA_DAS_DAP_CTRL_SEL_DAP_SDATA1_TX_RX_S 1
+#define TEGRA_DAS_DAP_CTRL_SEL_DAP_SDATA2_TX_RX_P 29
+#define TEGRA_DAS_DAP_CTRL_SEL_DAP_SDATA2_TX_RX_S 1
+#define TEGRA_DAS_DAP_CTRL_SEL_DAP_CTRL_SEL_P 0
+#define TEGRA_DAS_DAP_CTRL_SEL_DAP_CTRL_SEL_S 5
+
+/* Values for field TEGRA_DAS_DAP_CTRL_SEL_DAP_CTRL_SEL */
+#define TEGRA_DAS_DAP_SEL_DAC1 0
+#define TEGRA_DAS_DAP_SEL_DAC2 1
+#define TEGRA_DAS_DAP_SEL_DAC3 2
+#define TEGRA_DAS_DAP_SEL_DAP1 16
+#define TEGRA_DAS_DAP_SEL_DAP2 17
+#define TEGRA_DAS_DAP_SEL_DAP3 18
+#define TEGRA_DAS_DAP_SEL_DAP4 19
+#define TEGRA_DAS_DAP_SEL_DAP5 20
+
+/* Register TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL */
+#define TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL 0x40
+#define TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_COUNT 3
+#define TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_STRIDE 4
+#define TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA2_SEL_P 28
+#define TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA2_SEL_S 4
+#define TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA1_SEL_P 24
+#define TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA1_SEL_S 4
+#define TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_CLK_SEL_P 0
+#define TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_CLK_SEL_S 4
+
+/*
+ * Values for:
+ * TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA2_SEL
+ * TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA1_SEL
+ * TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_CLK_SEL
+ */
+#define TEGRA_DAS_DAC_SEL_DAP1 0
+#define TEGRA_DAS_DAC_SEL_DAP2 1
+#define TEGRA_DAS_DAC_SEL_DAP3 2
+#define TEGRA_DAS_DAC_SEL_DAP4 3
+#define TEGRA_DAS_DAC_SEL_DAP5 4
+
+/*
+ * Names/IDs of the DACs/DAPs.
+ */
+
+#define TEGRA_DAS_DAP_ID_1 0
+#define TEGRA_DAS_DAP_ID_2 1
+#define TEGRA_DAS_DAP_ID_3 2
+#define TEGRA_DAS_DAP_ID_4 3
+#define TEGRA_DAS_DAP_ID_5 4
+
+#define TEGRA_DAS_DAC_ID_1 0
+#define TEGRA_DAS_DAC_ID_2 1
+#define TEGRA_DAS_DAC_ID_3 2
+
+struct tegra_das {
+ struct device *dev;
+ void __iomem *regs;
+ struct dentry *debug;
+};
+
+/*
+ * Terminology:
+ * DAS: Digital audio switch (HW module controlled by this driver)
+ * DAP: Digital audio port (port/pins on Tegra device)
+ * DAC: Digital audio controller (e.g. I2S or AC97 controller elsewhere)
+ *
+ * The Tegra DAS is a mux/cross-bar which can connect each DAP to a specific
+ * DAC, or another DAP. When DAPs are connected, one must be the master and
+ * one the slave. Each DAC allows selection of a specific DAP for input, to
+ * cater for the case where N DAPs are connected to 1 DAC for broadcast
+ * output.
+ *
+ * This driver is dumb; no attempt is made to ensure that a valid routing
+ * configuration is programmed.
+ */
+
+/*
+ * Connect a DAP to to a DAC
+ * dap_id: DAP to connect: TEGRA_DAS_DAP_ID_*
+ * dac_sel: DAC to connect to: TEGRA_DAS_DAP_SEL_DAC*
+ */
+extern int tegra_das_connect_dap_to_dac(int dap_id, int dac_sel);
+
+/*
+ * Connect a DAP to to another DAP
+ * dap_id: DAP to connect: TEGRA_DAS_DAP_ID_*
+ * other_dap_sel: DAP to connect to: TEGRA_DAS_DAP_SEL_DAP*
+ * master: Is this DAP the master (1) or slave (0)
+ * sdata1rx: Is this DAP's SDATA1 pin RX (1) or TX (0)
+ * sdata2rx: Is this DAP's SDATA2 pin RX (1) or TX (0)
+ */
+extern int tegra_das_connect_dap_to_dap(int dap_id, int other_dap_sel,
+ int master, int sdata1rx,
+ int sdata2rx);
+
+/*
+ * Connect a DAC's input to a DAP
+ * (DAC outputs are selected by the DAP)
+ * dac_id: DAC ID to connect: TEGRA_DAS_DAC_ID_*
+ * dap_sel: DAP to receive input from: TEGRA_DAS_DAC_SEL_DAP*
+ */
+extern int tegra_das_connect_dac_to_dap(int dac_id, int dap_sel);
+
+#endif
diff --git a/sound/soc/tegra/tegra_i2s.c b/sound/soc/tegra/tegra_i2s.c
new file mode 100644
index 000000000000..4f5e2c90b020
--- /dev/null
+++ b/sound/soc/tegra/tegra_i2s.c
@@ -0,0 +1,503 @@
+/*
+ * tegra_i2s.c - Tegra I2S driver
+ *
+ * Author: Stephen Warren <swarren@nvidia.com>
+ * Copyright (C) 2010 - NVIDIA, Inc.
+ *
+ * Based on code copyright/by:
+ *
+ * Copyright (c) 2009-2010, NVIDIA Corporation.
+ * Scott Peterson <speterson@nvidia.com>
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Iliyan Malchev <malchev@google.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <mach/iomap.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include "tegra_das.h"
+#include "tegra_i2s.h"
+
+#define DRV_NAME "tegra-i2s"
+
+static inline void tegra_i2s_write(struct tegra_i2s *i2s, u32 reg, u32 val)
+{
+ __raw_writel(val, i2s->regs + reg);
+}
+
+static inline u32 tegra_i2s_read(struct tegra_i2s *i2s, u32 reg)
+{
+ return __raw_readl(i2s->regs + reg);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int tegra_i2s_show(struct seq_file *s, void *unused)
+{
+#define REG(r) { r, #r }
+ static const struct {
+ int offset;
+ const char *name;
+ } regs[] = {
+ REG(TEGRA_I2S_CTRL),
+ REG(TEGRA_I2S_STATUS),
+ REG(TEGRA_I2S_TIMING),
+ REG(TEGRA_I2S_FIFO_SCR),
+ REG(TEGRA_I2S_PCM_CTRL),
+ REG(TEGRA_I2S_NW_CTRL),
+ REG(TEGRA_I2S_TDM_CTRL),
+ REG(TEGRA_I2S_TDM_TX_RX_CTRL),
+ };
+#undef REG
+
+ struct tegra_i2s *i2s = s->private;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ u32 val = tegra_i2s_read(i2s, regs[i].offset);
+ seq_printf(s, "%s = %08x\n", regs[i].name, val);
+ }
+
+ return 0;
+}
+
+static int tegra_i2s_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tegra_i2s_show, inode->i_private);
+}
+
+static const struct file_operations tegra_i2s_debug_fops = {
+ .open = tegra_i2s_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void tegra_i2s_debug_add(struct tegra_i2s *i2s, int id)
+{
+ char name[] = DRV_NAME ".0";
+
+ snprintf(name, sizeof(name), DRV_NAME".%1d", id);
+ i2s->debug = debugfs_create_file(name, S_IRUGO, snd_soc_debugfs_root,
+ i2s, &tegra_i2s_debug_fops);
+}
+
+static void tegra_i2s_debug_remove(struct tegra_i2s *i2s)
+{
+ if (i2s->debug)
+ debugfs_remove(i2s->debug);
+}
+#else
+static inline void tegra_i2s_debug_add(struct tegra_i2s *i2s)
+{
+}
+
+static inline void tegra_i2s_debug_remove(struct tegra_i2s *i2s)
+{
+}
+#endif
+
+static int tegra_i2s_set_fmt(struct snd_soc_dai *dai,
+ unsigned int fmt)
+{
+ struct tegra_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ i2s->reg_ctrl &= ~TEGRA_I2S_CTRL_MASTER_ENABLE;
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ i2s->reg_ctrl |= TEGRA_I2S_CTRL_MASTER_ENABLE;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ i2s->reg_ctrl &= ~(TEGRA_I2S_CTRL_BIT_FORMAT_MASK |
+ TEGRA_I2S_CTRL_LRCK_MASK);
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_DSP_A:
+ i2s->reg_ctrl |= TEGRA_I2S_CTRL_BIT_FORMAT_DSP;
+ i2s->reg_ctrl |= TEGRA_I2S_CTRL_LRCK_L_LOW;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ i2s->reg_ctrl |= TEGRA_I2S_CTRL_BIT_FORMAT_DSP;
+ i2s->reg_ctrl |= TEGRA_I2S_CTRL_LRCK_R_LOW;
+ break;
+ case SND_SOC_DAIFMT_I2S:
+ i2s->reg_ctrl |= TEGRA_I2S_CTRL_BIT_FORMAT_I2S;
+ i2s->reg_ctrl |= TEGRA_I2S_CTRL_LRCK_L_LOW;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ i2s->reg_ctrl |= TEGRA_I2S_CTRL_BIT_FORMAT_RJM;
+ i2s->reg_ctrl |= TEGRA_I2S_CTRL_LRCK_L_LOW;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ i2s->reg_ctrl |= TEGRA_I2S_CTRL_BIT_FORMAT_LJM;
+ i2s->reg_ctrl |= TEGRA_I2S_CTRL_LRCK_L_LOW;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tegra_i2s_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct device *dev = substream->pcm->card->dev;
+ struct tegra_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+ u32 reg;
+ int ret, sample_size, srate, i2sclock, bitcnt;
+
+ i2s->reg_ctrl &= ~TEGRA_I2S_CTRL_BIT_SIZE_MASK;
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ i2s->reg_ctrl |= TEGRA_I2S_CTRL_BIT_SIZE_16;
+ sample_size = 16;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ i2s->reg_ctrl |= TEGRA_I2S_CTRL_BIT_SIZE_24;
+ sample_size = 24;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ i2s->reg_ctrl |= TEGRA_I2S_CTRL_BIT_SIZE_32;
+ sample_size = 32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ srate = params_rate(params);
+
+ /* Final "* 2" required by Tegra hardware */
+ i2sclock = srate * params_channels(params) * sample_size * 2;
+
+ ret = clk_set_rate(i2s->clk_i2s, i2sclock);
+ if (ret) {
+ dev_err(dev, "Can't set I2S clock rate: %d\n", ret);
+ return ret;
+ }
+
+ bitcnt = (i2sclock / (2 * srate)) - 1;
+ if (bitcnt < 0 || bitcnt > TEGRA_I2S_TIMING_CHANNEL_BIT_COUNT_MASK_US)
+ return -EINVAL;
+ reg = bitcnt << TEGRA_I2S_TIMING_CHANNEL_BIT_COUNT_SHIFT;
+
+ if (i2sclock % (2 * srate))
+ reg |= TEGRA_I2S_TIMING_NON_SYM_ENABLE;
+
+ tegra_i2s_write(i2s, TEGRA_I2S_TIMING, reg);
+
+ tegra_i2s_write(i2s, TEGRA_I2S_FIFO_SCR,
+ TEGRA_I2S_FIFO_SCR_FIFO2_ATN_LVL_FOUR_SLOTS |
+ TEGRA_I2S_FIFO_SCR_FIFO1_ATN_LVL_FOUR_SLOTS);
+
+ return 0;
+}
+
+static void tegra_i2s_start_playback(struct tegra_i2s *i2s)
+{
+ i2s->reg_ctrl |= TEGRA_I2S_CTRL_FIFO1_ENABLE;
+ tegra_i2s_write(i2s, TEGRA_I2S_CTRL, i2s->reg_ctrl);
+}
+
+static void tegra_i2s_stop_playback(struct tegra_i2s *i2s)
+{
+ i2s->reg_ctrl &= ~TEGRA_I2S_CTRL_FIFO1_ENABLE;
+ tegra_i2s_write(i2s, TEGRA_I2S_CTRL, i2s->reg_ctrl);
+}
+
+static void tegra_i2s_start_capture(struct tegra_i2s *i2s)
+{
+ i2s->reg_ctrl |= TEGRA_I2S_CTRL_FIFO2_ENABLE;
+ tegra_i2s_write(i2s, TEGRA_I2S_CTRL, i2s->reg_ctrl);
+}
+
+static void tegra_i2s_stop_capture(struct tegra_i2s *i2s)
+{
+ i2s->reg_ctrl &= ~TEGRA_I2S_CTRL_FIFO2_ENABLE;
+ tegra_i2s_write(i2s, TEGRA_I2S_CTRL, i2s->reg_ctrl);
+}
+
+static int tegra_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct tegra_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ if (!i2s->clk_refs)
+ clk_enable(i2s->clk_i2s);
+ i2s->clk_refs++;
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ tegra_i2s_start_playback(i2s);
+ else
+ tegra_i2s_start_capture(i2s);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ tegra_i2s_stop_playback(i2s);
+ else
+ tegra_i2s_stop_capture(i2s);
+ i2s->clk_refs--;
+ if (!i2s->clk_refs)
+ clk_disable(i2s->clk_i2s);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tegra_i2s_probe(struct snd_soc_dai *dai)
+{
+ struct tegra_i2s * i2s = snd_soc_dai_get_drvdata(dai);
+
+ dai->capture_dma_data = &i2s->capture_dma_data;
+ dai->playback_dma_data = &i2s->playback_dma_data;
+
+ return 0;
+}
+
+static struct snd_soc_dai_ops tegra_i2s_dai_ops = {
+ .set_fmt = tegra_i2s_set_fmt,
+ .hw_params = tegra_i2s_hw_params,
+ .trigger = tegra_i2s_trigger,
+};
+
+struct snd_soc_dai_driver tegra_i2s_dai[] = {
+ {
+ .name = DRV_NAME ".0",
+ .probe = tegra_i2s_probe,
+ .playback = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .capture = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .ops = &tegra_i2s_dai_ops,
+ .symmetric_rates = 1,
+ },
+ {
+ .name = DRV_NAME ".1",
+ .probe = tegra_i2s_probe,
+ .playback = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .capture = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .ops = &tegra_i2s_dai_ops,
+ .symmetric_rates = 1,
+ },
+};
+
+static __devinit int tegra_i2s_platform_probe(struct platform_device *pdev)
+{
+ struct tegra_i2s * i2s;
+ char clk_name[12]; /* tegra-i2s.0 */
+ struct resource *mem, *memregion, *dmareq;
+ int ret;
+
+ if ((pdev->id < 0) ||
+ (pdev->id >= ARRAY_SIZE(tegra_i2s_dai))) {
+ dev_err(&pdev->dev, "ID %d out of range\n", pdev->id);
+ return -EINVAL;
+ }
+
+ /*
+ * FIXME: Until a codec driver exists for the tegra DAS, hard-code a
+ * 1:1 mapping between audio controllers and audio ports.
+ */
+ ret = tegra_das_connect_dap_to_dac(TEGRA_DAS_DAP_ID_1 + pdev->id,
+ TEGRA_DAS_DAP_SEL_DAC1 + pdev->id);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't set up DAP connection\n");
+ return ret;
+ }
+ ret = tegra_das_connect_dac_to_dap(TEGRA_DAS_DAC_ID_1 + pdev->id,
+ TEGRA_DAS_DAC_SEL_DAP1 + pdev->id);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't set up DAC connection\n");
+ return ret;
+ }
+
+ i2s = kzalloc(sizeof(struct tegra_i2s), GFP_KERNEL);
+ if (!i2s) {
+ dev_err(&pdev->dev, "Can't allocate tegra_i2s\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+ dev_set_drvdata(&pdev->dev, i2s);
+
+ snprintf(clk_name, sizeof(clk_name), DRV_NAME ".%d", pdev->id);
+ i2s->clk_i2s = clk_get_sys(clk_name, NULL);
+ if (IS_ERR(i2s->clk_i2s)) {
+ dev_err(&pdev->dev, "Can't retrieve i2s clock\n");
+ ret = PTR_ERR(i2s->clk_i2s);
+ goto err_free;
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&pdev->dev, "No memory resource\n");
+ ret = -ENODEV;
+ goto err_clk_put;
+ }
+
+ dmareq = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!dmareq) {
+ dev_err(&pdev->dev, "No DMA resource\n");
+ ret = -ENODEV;
+ goto err_clk_put;
+ }
+
+ memregion = request_mem_region(mem->start, resource_size(mem),
+ DRV_NAME);
+ if (!memregion) {
+ dev_err(&pdev->dev, "Memory region already claimed\n");
+ ret = -EBUSY;
+ goto err_clk_put;
+ }
+
+ i2s->regs = ioremap(mem->start, resource_size(mem));
+ if (!i2s->regs) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_release;
+ }
+
+ i2s->capture_dma_data.addr = mem->start + TEGRA_I2S_FIFO2;
+ i2s->capture_dma_data.wrap = 4;
+ i2s->capture_dma_data.width = 32;
+ i2s->capture_dma_data.req_sel = dmareq->start;
+
+ i2s->playback_dma_data.addr = mem->start + TEGRA_I2S_FIFO1;
+ i2s->playback_dma_data.wrap = 4;
+ i2s->playback_dma_data.width = 32;
+ i2s->playback_dma_data.req_sel = dmareq->start;
+
+ i2s->reg_ctrl = TEGRA_I2S_CTRL_FIFO_FORMAT_PACKED;
+
+ ret = snd_soc_register_dai(&pdev->dev, &tegra_i2s_dai[pdev->id]);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not register DAI: %d\n", ret);
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+
+ tegra_i2s_debug_add(i2s, pdev->id);
+
+ return 0;
+
+err_unmap:
+ iounmap(i2s->regs);
+err_release:
+ release_mem_region(mem->start, resource_size(mem));
+err_clk_put:
+ clk_put(i2s->clk_i2s);
+err_free:
+ kfree(i2s);
+exit:
+ return ret;
+}
+
+static int __devexit tegra_i2s_platform_remove(struct platform_device *pdev)
+{
+ struct tegra_i2s *i2s = dev_get_drvdata(&pdev->dev);
+ struct resource *res;
+
+ snd_soc_unregister_dai(&pdev->dev);
+
+ tegra_i2s_debug_remove(i2s);
+
+ iounmap(i2s->regs);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+ clk_put(i2s->clk_i2s);
+
+ kfree(i2s);
+
+ return 0;
+}
+
+static struct platform_driver tegra_i2s_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = tegra_i2s_platform_probe,
+ .remove = __devexit_p(tegra_i2s_platform_remove),
+};
+
+static int __init snd_tegra_i2s_init(void)
+{
+ return platform_driver_register(&tegra_i2s_driver);
+}
+module_init(snd_tegra_i2s_init);
+
+static void __exit snd_tegra_i2s_exit(void)
+{
+ platform_driver_unregister(&tegra_i2s_driver);
+}
+module_exit(snd_tegra_i2s_exit);
+
+MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
+MODULE_DESCRIPTION("Tegra I2S ASoC driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/tegra/tegra_i2s.h b/sound/soc/tegra/tegra_i2s.h
new file mode 100644
index 000000000000..2b38a096f46c
--- /dev/null
+++ b/sound/soc/tegra/tegra_i2s.h
@@ -0,0 +1,165 @@
+/*
+ * tegra_i2s.h - Definitions for Tegra I2S driver
+ *
+ * Author: Stephen Warren <swarren@nvidia.com>
+ * Copyright (C) 2010 - NVIDIA, Inc.
+ *
+ * Based on code copyright/by:
+ *
+ * Copyright (c) 2009-2010, NVIDIA Corporation.
+ * Scott Peterson <speterson@nvidia.com>
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Iliyan Malchev <malchev@google.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __TEGRA_I2S_H__
+#define __TEGRA_I2S_H__
+
+#include "tegra_pcm.h"
+
+/* Register offsets from TEGRA_I2S1_BASE and TEGRA_I2S2_BASE */
+
+#define TEGRA_I2S_CTRL 0x00
+#define TEGRA_I2S_STATUS 0x04
+#define TEGRA_I2S_TIMING 0x08
+#define TEGRA_I2S_FIFO_SCR 0x0c
+#define TEGRA_I2S_PCM_CTRL 0x10
+#define TEGRA_I2S_NW_CTRL 0x14
+#define TEGRA_I2S_TDM_CTRL 0x20
+#define TEGRA_I2S_TDM_TX_RX_CTRL 0x24
+#define TEGRA_I2S_FIFO1 0x40
+#define TEGRA_I2S_FIFO2 0x80
+
+/* Fields in TEGRA_I2S_CTRL */
+
+#define TEGRA_I2S_CTRL_FIFO2_TX_ENABLE (1 << 30)
+#define TEGRA_I2S_CTRL_FIFO1_ENABLE (1 << 29)
+#define TEGRA_I2S_CTRL_FIFO2_ENABLE (1 << 28)
+#define TEGRA_I2S_CTRL_FIFO1_RX_ENABLE (1 << 27)
+#define TEGRA_I2S_CTRL_FIFO_LPBK_ENABLE (1 << 26)
+#define TEGRA_I2S_CTRL_MASTER_ENABLE (1 << 25)
+
+#define TEGRA_I2S_LRCK_LEFT_LOW 0
+#define TEGRA_I2S_LRCK_RIGHT_LOW 1
+
+#define TEGRA_I2S_CTRL_LRCK_SHIFT 24
+#define TEGRA_I2S_CTRL_LRCK_MASK (1 << TEGRA_I2S_CTRL_LRCK_SHIFT)
+#define TEGRA_I2S_CTRL_LRCK_L_LOW (TEGRA_I2S_LRCK_LEFT_LOW << TEGRA_I2S_CTRL_LRCK_SHIFT)
+#define TEGRA_I2S_CTRL_LRCK_R_LOW (TEGRA_I2S_LRCK_RIGHT_LOW << TEGRA_I2S_CTRL_LRCK_SHIFT)
+
+#define TEGRA_I2S_BIT_FORMAT_I2S 0
+#define TEGRA_I2S_BIT_FORMAT_RJM 1
+#define TEGRA_I2S_BIT_FORMAT_LJM 2
+#define TEGRA_I2S_BIT_FORMAT_DSP 3
+
+#define TEGRA_I2S_CTRL_BIT_FORMAT_SHIFT 10
+#define TEGRA_I2S_CTRL_BIT_FORMAT_MASK (3 << TEGRA_I2S_CTRL_BIT_FORMAT_SHIFT)
+#define TEGRA_I2S_CTRL_BIT_FORMAT_I2S (TEGRA_I2S_BIT_FORMAT_I2S << TEGRA_I2S_CTRL_BIT_FORMAT_SHIFT)
+#define TEGRA_I2S_CTRL_BIT_FORMAT_RJM (TEGRA_I2S_BIT_FORMAT_RJM << TEGRA_I2S_CTRL_BIT_FORMAT_SHIFT)
+#define TEGRA_I2S_CTRL_BIT_FORMAT_LJM (TEGRA_I2S_BIT_FORMAT_LJM << TEGRA_I2S_CTRL_BIT_FORMAT_SHIFT)
+#define TEGRA_I2S_CTRL_BIT_FORMAT_DSP (TEGRA_I2S_BIT_FORMAT_DSP << TEGRA_I2S_CTRL_BIT_FORMAT_SHIFT)
+
+#define TEGRA_I2S_BIT_SIZE_16 0
+#define TEGRA_I2S_BIT_SIZE_20 1
+#define TEGRA_I2S_BIT_SIZE_24 2
+#define TEGRA_I2S_BIT_SIZE_32 3
+
+#define TEGRA_I2S_CTRL_BIT_SIZE_SHIFT 8
+#define TEGRA_I2S_CTRL_BIT_SIZE_MASK (3 << TEGRA_I2S_CTRL_BIT_SIZE_SHIFT)
+#define TEGRA_I2S_CTRL_BIT_SIZE_16 (TEGRA_I2S_BIT_SIZE_16 << TEGRA_I2S_CTRL_BIT_SIZE_SHIFT)
+#define TEGRA_I2S_CTRL_BIT_SIZE_20 (TEGRA_I2S_BIT_SIZE_20 << TEGRA_I2S_CTRL_BIT_SIZE_SHIFT)
+#define TEGRA_I2S_CTRL_BIT_SIZE_24 (TEGRA_I2S_BIT_SIZE_24 << TEGRA_I2S_CTRL_BIT_SIZE_SHIFT)
+#define TEGRA_I2S_CTRL_BIT_SIZE_32 (TEGRA_I2S_BIT_SIZE_32 << TEGRA_I2S_CTRL_BIT_SIZE_SHIFT)
+
+#define TEGRA_I2S_FIFO_16_LSB 0
+#define TEGRA_I2S_FIFO_20_LSB 1
+#define TEGRA_I2S_FIFO_24_LSB 2
+#define TEGRA_I2S_FIFO_32 3
+#define TEGRA_I2S_FIFO_PACKED 7
+
+#define TEGRA_I2S_CTRL_FIFO_FORMAT_SHIFT 4
+#define TEGRA_I2S_CTRL_FIFO_FORMAT_MASK (7 << TEGRA_I2S_CTRL_FIFO_FORMAT_SHIFT)
+#define TEGRA_I2S_CTRL_FIFO_FORMAT_16_LSB (TEGRA_I2S_FIFO_16_LSB << TEGRA_I2S_CTRL_FIFO_FORMAT_SHIFT)
+#define TEGRA_I2S_CTRL_FIFO_FORMAT_20_LSB (TEGRA_I2S_FIFO_20_LSB << TEGRA_I2S_CTRL_FIFO_FORMAT_SHIFT)
+#define TEGRA_I2S_CTRL_FIFO_FORMAT_24_LSB (TEGRA_I2S_FIFO_24_LSB << TEGRA_I2S_CTRL_FIFO_FORMAT_SHIFT)
+#define TEGRA_I2S_CTRL_FIFO_FORMAT_32 (TEGRA_I2S_FIFO_32 << TEGRA_I2S_CTRL_FIFO_FORMAT_SHIFT)
+#define TEGRA_I2S_CTRL_FIFO_FORMAT_PACKED (TEGRA_I2S_FIFO_PACKED << TEGRA_I2S_CTRL_FIFO_FORMAT_SHIFT)
+
+#define TEGRA_I2S_CTRL_IE_FIFO1_ERR (1 << 3)
+#define TEGRA_I2S_CTRL_IE_FIFO2_ERR (1 << 2)
+#define TEGRA_I2S_CTRL_QE_FIFO1 (1 << 1)
+#define TEGRA_I2S_CTRL_QE_FIFO2 (1 << 0)
+
+/* Fields in TEGRA_I2S_STATUS */
+
+#define TEGRA_I2S_STATUS_FIFO1_RDY (1 << 31)
+#define TEGRA_I2S_STATUS_FIFO2_RDY (1 << 30)
+#define TEGRA_I2S_STATUS_FIFO1_BSY (1 << 29)
+#define TEGRA_I2S_STATUS_FIFO2_BSY (1 << 28)
+#define TEGRA_I2S_STATUS_FIFO1_ERR (1 << 3)
+#define TEGRA_I2S_STATUS_FIFO2_ERR (1 << 2)
+#define TEGRA_I2S_STATUS_QS_FIFO1 (1 << 1)
+#define TEGRA_I2S_STATUS_QS_FIFO2 (1 << 0)
+
+/* Fields in TEGRA_I2S_TIMING */
+
+#define TEGRA_I2S_TIMING_NON_SYM_ENABLE (1 << 12)
+#define TEGRA_I2S_TIMING_CHANNEL_BIT_COUNT_SHIFT 0
+#define TEGRA_I2S_TIMING_CHANNEL_BIT_COUNT_MASK_US 0x7fff
+#define TEGRA_I2S_TIMING_CHANNEL_BIT_COUNT_MASK (TEGRA_I2S_TIMING_CHANNEL_BIT_COUNT_MASK_US << TEGRA_I2S_TIMING_CHANNEL_BIT_COUNT_SHIFT)
+
+/* Fields in TEGRA_I2S_FIFO_SCR */
+
+#define TEGRA_I2S_FIFO_SCR_FIFO2_FULL_EMPTY_COUNT_SHIFT 24
+#define TEGRA_I2S_FIFO_SCR_FIFO1_FULL_EMPTY_COUNT_SHIFT 16
+#define TEGRA_I2S_FIFO_SCR_FIFO_FULL_EMPTY_COUNT_MASK 0x3f
+
+#define TEGRA_I2S_FIFO_SCR_FIFO2_CLR (1 << 12)
+#define TEGRA_I2S_FIFO_SCR_FIFO1_CLR (1 << 8)
+
+#define TEGRA_I2S_FIFO_ATN_LVL_ONE_SLOT 0
+#define TEGRA_I2S_FIFO_ATN_LVL_FOUR_SLOTS 1
+#define TEGRA_I2S_FIFO_ATN_LVL_EIGHT_SLOTS 2
+#define TEGRA_I2S_FIFO_ATN_LVL_TWELVE_SLOTS 3
+
+#define TEGRA_I2S_FIFO_SCR_FIFO2_ATN_LVL_SHIFT 4
+#define TEGRA_I2S_FIFO_SCR_FIFO2_ATN_LVL_MASK (3 << TEGRA_I2S_FIFO_SCR_FIFO2_ATN_LVL_SHIFT)
+#define TEGRA_I2S_FIFO_SCR_FIFO2_ATN_LVL_ONE_SLOT (TEGRA_I2S_FIFO_ATN_LVL_ONE_SLOT << TEGRA_I2S_FIFO_SCR_FIFO2_ATN_LVL_SHIFT)
+#define TEGRA_I2S_FIFO_SCR_FIFO2_ATN_LVL_FOUR_SLOTS (TEGRA_I2S_FIFO_ATN_LVL_FOUR_SLOTS << TEGRA_I2S_FIFO_SCR_FIFO2_ATN_LVL_SHIFT)
+#define TEGRA_I2S_FIFO_SCR_FIFO2_ATN_LVL_EIGHT_SLOTS (TEGRA_I2S_FIFO_ATN_LVL_EIGHT_SLOTS << TEGRA_I2S_FIFO_SCR_FIFO2_ATN_LVL_SHIFT)
+#define TEGRA_I2S_FIFO_SCR_FIFO2_ATN_LVL_TWELVE_SLOTS (TEGRA_I2S_FIFO_ATN_LVL_TWELVE_SLOTS << TEGRA_I2S_FIFO_SCR_FIFO2_ATN_LVL_SHIFT)
+
+#define TEGRA_I2S_FIFO_SCR_FIFO1_ATN_LVL_SHIFT 0
+#define TEGRA_I2S_FIFO_SCR_FIFO1_ATN_LVL_MASK (3 << TEGRA_I2S_FIFO_SCR_FIFO1_ATN_LVL_SHIFT)
+#define TEGRA_I2S_FIFO_SCR_FIFO1_ATN_LVL_ONE_SLOT (TEGRA_I2S_FIFO_ATN_LVL_ONE_SLOT << TEGRA_I2S_FIFO_SCR_FIFO1_ATN_LVL_SHIFT)
+#define TEGRA_I2S_FIFO_SCR_FIFO1_ATN_LVL_FOUR_SLOTS (TEGRA_I2S_FIFO_ATN_LVL_FOUR_SLOTS << TEGRA_I2S_FIFO_SCR_FIFO1_ATN_LVL_SHIFT)
+#define TEGRA_I2S_FIFO_SCR_FIFO1_ATN_LVL_EIGHT_SLOTS (TEGRA_I2S_FIFO_ATN_LVL_EIGHT_SLOTS << TEGRA_I2S_FIFO_SCR_FIFO1_ATN_LVL_SHIFT)
+#define TEGRA_I2S_FIFO_SCR_FIFO1_ATN_LVL_TWELVE_SLOTS (TEGRA_I2S_FIFO_ATN_LVL_TWELVE_SLOTS << TEGRA_I2S_FIFO_SCR_FIFO1_ATN_LVL_SHIFT)
+
+struct tegra_i2s {
+ struct clk *clk_i2s;
+ int clk_refs;
+ struct tegra_pcm_dma_params capture_dma_data;
+ struct tegra_pcm_dma_params playback_dma_data;
+ void __iomem *regs;
+ struct dentry *debug;
+ u32 reg_ctrl;
+};
+
+#endif
diff --git a/sound/soc/tegra/tegra_pcm.c b/sound/soc/tegra/tegra_pcm.c
new file mode 100644
index 000000000000..3c271f953582
--- /dev/null
+++ b/sound/soc/tegra/tegra_pcm.c
@@ -0,0 +1,404 @@
+/*
+ * tegra_pcm.c - Tegra PCM driver
+ *
+ * Author: Stephen Warren <swarren@nvidia.com>
+ * Copyright (C) 2010 - NVIDIA, Inc.
+ *
+ * Based on code copyright/by:
+ *
+ * Copyright (c) 2009-2010, NVIDIA Corporation.
+ * Scott Peterson <speterson@nvidia.com>
+ * Vijay Mali <vmali@nvidia.com>
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Iliyan Malchev <malchev@google.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include "tegra_pcm.h"
+
+#define DRV_NAME "tegra-pcm-audio"
+
+static const struct snd_pcm_hardware tegra_pcm_hardware = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_RESUME |
+ SNDRV_PCM_INFO_INTERLEAVED,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 2,
+ .channels_max = 2,
+ .period_bytes_min = 1024,
+ .period_bytes_max = PAGE_SIZE,
+ .periods_min = 2,
+ .periods_max = 8,
+ .buffer_bytes_max = PAGE_SIZE * 8,
+ .fifo_size = 4,
+};
+
+static void tegra_pcm_queue_dma(struct tegra_runtime_data *prtd)
+{
+ struct snd_pcm_substream *substream = prtd->substream;
+ struct snd_dma_buffer *buf = &substream->dma_buffer;
+ struct tegra_dma_req *dma_req;
+ unsigned long addr;
+
+ dma_req = &prtd->dma_req[prtd->dma_req_idx];
+ prtd->dma_req_idx = 1 - prtd->dma_req_idx;
+
+ addr = buf->addr + prtd->dma_pos;
+ prtd->dma_pos += dma_req->size;
+ if (prtd->dma_pos >= prtd->dma_pos_end)
+ prtd->dma_pos = 0;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ dma_req->source_addr = addr;
+ else
+ dma_req->dest_addr = addr;
+
+ tegra_dma_enqueue_req(prtd->dma_chan, dma_req);
+}
+
+static void dma_complete_callback(struct tegra_dma_req *req)
+{
+ struct tegra_runtime_data *prtd = (struct tegra_runtime_data *)req->dev;
+ struct snd_pcm_substream *substream = prtd->substream;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ spin_lock(&prtd->lock);
+
+ if (!prtd->running) {
+ spin_unlock(&prtd->lock);
+ return;
+ }
+
+ if (++prtd->period_index >= runtime->periods)
+ prtd->period_index = 0;
+
+ tegra_pcm_queue_dma(prtd);
+
+ spin_unlock(&prtd->lock);
+
+ snd_pcm_period_elapsed(substream);
+}
+
+static void setup_dma_tx_request(struct tegra_dma_req *req,
+ struct tegra_pcm_dma_params * dmap)
+{
+ req->complete = dma_complete_callback;
+ req->to_memory = false;
+ req->dest_addr = dmap->addr;
+ req->dest_wrap = dmap->wrap;
+ req->source_bus_width = 32;
+ req->source_wrap = 0;
+ req->dest_bus_width = dmap->width;
+ req->req_sel = dmap->req_sel;
+}
+
+static void setup_dma_rx_request(struct tegra_dma_req *req,
+ struct tegra_pcm_dma_params * dmap)
+{
+ req->complete = dma_complete_callback;
+ req->to_memory = true;
+ req->source_addr = dmap->addr;
+ req->dest_wrap = 0;
+ req->source_bus_width = dmap->width;
+ req->source_wrap = dmap->wrap;
+ req->dest_bus_width = 32;
+ req->req_sel = dmap->req_sel;
+}
+
+static int tegra_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct tegra_runtime_data *prtd;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct tegra_pcm_dma_params * dmap;
+ int ret = 0;
+
+ prtd = kzalloc(sizeof(struct tegra_runtime_data), GFP_KERNEL);
+ if (prtd == NULL)
+ return -ENOMEM;
+
+ runtime->private_data = prtd;
+ prtd->substream = substream;
+
+ spin_lock_init(&prtd->lock);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ dmap = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+ setup_dma_tx_request(&prtd->dma_req[0], dmap);
+ setup_dma_tx_request(&prtd->dma_req[1], dmap);
+ } else {
+ dmap = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+ setup_dma_rx_request(&prtd->dma_req[0], dmap);
+ setup_dma_rx_request(&prtd->dma_req[1], dmap);
+ }
+
+ prtd->dma_req[0].dev = prtd;
+ prtd->dma_req[1].dev = prtd;
+
+ prtd->dma_chan = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT);
+ if (prtd->dma_chan == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /* Set HW params now that initialization is complete */
+ snd_soc_set_runtime_hwparams(substream, &tegra_pcm_hardware);
+
+ /* Ensure that buffer size is a multiple of period size */
+ ret = snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0)
+ goto err;
+
+ return 0;
+
+err:
+ if (prtd->dma_chan) {
+ tegra_dma_free_channel(prtd->dma_chan);
+ }
+
+ kfree(prtd);
+
+ return ret;
+}
+
+static int tegra_pcm_close(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct tegra_runtime_data *prtd = runtime->private_data;
+
+ tegra_dma_free_channel(prtd->dma_chan);
+
+ kfree(prtd);
+
+ return 0;
+}
+
+static int tegra_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct tegra_runtime_data *prtd = runtime->private_data;
+
+ snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+
+ prtd->dma_req[0].size = params_period_bytes(params);
+ prtd->dma_req[1].size = prtd->dma_req[0].size;
+
+ return 0;
+}
+
+static int tegra_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+ snd_pcm_set_runtime_buffer(substream, NULL);
+
+ return 0;
+}
+
+static int tegra_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct tegra_runtime_data *prtd = runtime->private_data;
+ unsigned long flags;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ prtd->dma_pos = 0;
+ prtd->dma_pos_end = frames_to_bytes(runtime, runtime->periods * runtime->period_size);
+ prtd->period_index = 0;
+ prtd->dma_req_idx = 0;
+ /* Fall-through */
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ spin_lock_irqsave(&prtd->lock, flags);
+ prtd->running = 1;
+ spin_unlock_irqrestore(&prtd->lock, flags);
+ tegra_pcm_queue_dma(prtd);
+ tegra_pcm_queue_dma(prtd);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ spin_lock_irqsave(&prtd->lock, flags);
+ prtd->running = 0;
+ spin_unlock_irqrestore(&prtd->lock, flags);
+ tegra_dma_dequeue_req(prtd->dma_chan, &prtd->dma_req[0]);
+ tegra_dma_dequeue_req(prtd->dma_chan, &prtd->dma_req[1]);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static snd_pcm_uframes_t tegra_pcm_pointer(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct tegra_runtime_data *prtd = runtime->private_data;
+
+ return prtd->period_index * runtime->period_size;
+}
+
+
+static int tegra_pcm_mmap(struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ return dma_mmap_writecombine(substream->pcm->card->dev, vma,
+ runtime->dma_area,
+ runtime->dma_addr,
+ runtime->dma_bytes);
+}
+
+static struct snd_pcm_ops tegra_pcm_ops = {
+ .open = tegra_pcm_open,
+ .close = tegra_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = tegra_pcm_hw_params,
+ .hw_free = tegra_pcm_hw_free,
+ .trigger = tegra_pcm_trigger,
+ .pointer = tegra_pcm_pointer,
+ .mmap = tegra_pcm_mmap,
+};
+
+static int tegra_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
+{
+ struct snd_pcm_substream *substream = pcm->streams[stream].substream;
+ struct snd_dma_buffer *buf = &substream->dma_buffer;
+ size_t size = tegra_pcm_hardware.buffer_bytes_max;
+
+ buf->area = dma_alloc_writecombine(pcm->card->dev, size,
+ &buf->addr, GFP_KERNEL);
+ if (!buf->area)
+ return -ENOMEM;
+
+ buf->dev.type = SNDRV_DMA_TYPE_DEV;
+ buf->dev.dev = pcm->card->dev;
+ buf->private_data = NULL;
+ buf->bytes = size;
+
+ return 0;
+}
+
+static void tegra_pcm_deallocate_dma_buffer(struct snd_pcm *pcm, int stream)
+{
+ struct snd_pcm_substream *substream = pcm->streams[stream].substream;
+ struct snd_dma_buffer *buf = &substream->dma_buffer;
+
+ if (!buf->area)
+ return;
+
+ dma_free_writecombine(pcm->card->dev, buf->bytes,
+ buf->area, buf->addr);
+ buf->area = NULL;
+}
+
+static u64 tegra_dma_mask = DMA_BIT_MASK(32);
+
+static int tegra_pcm_new(struct snd_card *card,
+ struct snd_soc_dai *dai, struct snd_pcm *pcm)
+{
+ int ret = 0;
+
+ if (!card->dev->dma_mask)
+ card->dev->dma_mask = &tegra_dma_mask;
+ if (!card->dev->coherent_dma_mask)
+ card->dev->coherent_dma_mask = 0xffffffff;
+
+ if (dai->driver->playback.channels_min) {
+ ret = tegra_pcm_preallocate_dma_buffer(pcm,
+ SNDRV_PCM_STREAM_PLAYBACK);
+ if (ret)
+ goto err;
+ }
+
+ if (dai->driver->capture.channels_min) {
+ ret = tegra_pcm_preallocate_dma_buffer(pcm,
+ SNDRV_PCM_STREAM_CAPTURE);
+ if (ret)
+ goto err_free_play;
+ }
+
+ return 0;
+
+err_free_play:
+ tegra_pcm_deallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_PLAYBACK);
+err:
+ return ret;
+}
+
+static void tegra_pcm_free(struct snd_pcm *pcm)
+{
+ tegra_pcm_deallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_CAPTURE);
+ tegra_pcm_deallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_PLAYBACK);
+}
+
+struct snd_soc_platform_driver tegra_pcm_platform = {
+ .ops = &tegra_pcm_ops,
+ .pcm_new = tegra_pcm_new,
+ .pcm_free = tegra_pcm_free,
+};
+
+static int __devinit tegra_pcm_platform_probe(struct platform_device *pdev)
+{
+ return snd_soc_register_platform(&pdev->dev, &tegra_pcm_platform);
+}
+
+static int __devexit tegra_pcm_platform_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_platform(&pdev->dev);
+ return 0;
+}
+
+static struct platform_driver tegra_pcm_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = tegra_pcm_platform_probe,
+ .remove = __devexit_p(tegra_pcm_platform_remove),
+};
+
+static int __init snd_tegra_pcm_init(void)
+{
+ return platform_driver_register(&tegra_pcm_driver);
+}
+module_init(snd_tegra_pcm_init);
+
+static void __exit snd_tegra_pcm_exit(void)
+{
+ platform_driver_unregister(&tegra_pcm_driver);
+}
+module_exit(snd_tegra_pcm_exit);
+
+MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
+MODULE_DESCRIPTION("Tegra PCM ASoC driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/tegra/tegra_pcm.h b/sound/soc/tegra/tegra_pcm.h
new file mode 100644
index 000000000000..dbb90339fe0d
--- /dev/null
+++ b/sound/soc/tegra/tegra_pcm.h
@@ -0,0 +1,55 @@
+/*
+ * tegra_pcm.h - Definitions for Tegra PCM driver
+ *
+ * Author: Stephen Warren <swarren@nvidia.com>
+ * Copyright (C) 2010 - NVIDIA, Inc.
+ *
+ * Based on code copyright/by:
+ *
+ * Copyright (c) 2009-2010, NVIDIA Corporation.
+ * Scott Peterson <speterson@nvidia.com>
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Iliyan Malchev <malchev@google.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __TEGRA_PCM_H__
+#define __TEGRA_PCM_H__
+
+#include <mach/dma.h>
+
+struct tegra_pcm_dma_params {
+ unsigned long addr;
+ unsigned long wrap;
+ unsigned long width;
+ unsigned long req_sel;
+};
+
+struct tegra_runtime_data {
+ struct snd_pcm_substream *substream;
+ spinlock_t lock;
+ int running;
+ int dma_pos;
+ int dma_pos_end;
+ int period_index;
+ int dma_req_idx;
+ struct tegra_dma_req dma_req[2];
+ struct tegra_dma_channel *dma_chan;
+};
+
+#endif
diff --git a/sound/sparc/amd7930.c b/sound/sparc/amd7930.c
index f8bcfc30f800..ad7d4d7d9237 100644
--- a/sound/sparc/amd7930.c
+++ b/sound/sparc/amd7930.c
@@ -1002,7 +1002,7 @@ static int __devinit snd_amd7930_create(struct snd_card *card,
return 0;
}
-static int __devinit amd7930_sbus_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit amd7930_sbus_probe(struct platform_device *op)
{
struct resource *rp = &op->resource[0];
static int dev_num;
@@ -1064,7 +1064,7 @@ static const struct of_device_id amd7930_match[] = {
{},
};
-static struct of_platform_driver amd7930_sbus_driver = {
+static struct platform_driver amd7930_sbus_driver = {
.driver = {
.name = "audio",
.owner = THIS_MODULE,
@@ -1075,7 +1075,7 @@ static struct of_platform_driver amd7930_sbus_driver = {
static int __init amd7930_init(void)
{
- return of_register_platform_driver(&amd7930_sbus_driver);
+ return platform_driver_register(&amd7930_sbus_driver);
}
static void __exit amd7930_exit(void)
@@ -1092,7 +1092,7 @@ static void __exit amd7930_exit(void)
amd7930_list = NULL;
- of_unregister_platform_driver(&amd7930_sbus_driver);
+ platform_driver_unregister(&amd7930_sbus_driver);
}
module_init(amd7930_init);
diff --git a/sound/sparc/cs4231.c b/sound/sparc/cs4231.c
index c276086c3b57..0e618f82808c 100644
--- a/sound/sparc/cs4231.c
+++ b/sound/sparc/cs4231.c
@@ -1856,7 +1856,7 @@ static int __devinit snd_cs4231_sbus_create(struct snd_card *card,
return 0;
}
-static int __devinit cs4231_sbus_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit cs4231_sbus_probe(struct platform_device *op)
{
struct resource *rp = &op->resource[0];
struct snd_card *card;
@@ -2048,7 +2048,7 @@ static int __devinit snd_cs4231_ebus_create(struct snd_card *card,
return 0;
}
-static int __devinit cs4231_ebus_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit cs4231_ebus_probe(struct platform_device *op)
{
struct snd_card *card;
int err;
@@ -2072,16 +2072,16 @@ static int __devinit cs4231_ebus_probe(struct platform_device *op, const struct
}
#endif
-static int __devinit cs4231_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit cs4231_probe(struct platform_device *op)
{
#ifdef EBUS_SUPPORT
if (!strcmp(op->dev.of_node->parent->name, "ebus"))
- return cs4231_ebus_probe(op, match);
+ return cs4231_ebus_probe(op);
#endif
#ifdef SBUS_SUPPORT
if (!strcmp(op->dev.of_node->parent->name, "sbus") ||
!strcmp(op->dev.of_node->parent->name, "sbi"))
- return cs4231_sbus_probe(op, match);
+ return cs4231_sbus_probe(op);
#endif
return -ENODEV;
}
@@ -2108,7 +2108,7 @@ static const struct of_device_id cs4231_match[] = {
MODULE_DEVICE_TABLE(of, cs4231_match);
-static struct of_platform_driver cs4231_driver = {
+static struct platform_driver cs4231_driver = {
.driver = {
.name = "audio",
.owner = THIS_MODULE,
@@ -2120,12 +2120,12 @@ static struct of_platform_driver cs4231_driver = {
static int __init cs4231_init(void)
{
- return of_register_platform_driver(&cs4231_driver);
+ return platform_driver_register(&cs4231_driver);
}
static void __exit cs4231_exit(void)
{
- of_unregister_platform_driver(&cs4231_driver);
+ platform_driver_unregister(&cs4231_driver);
}
module_init(cs4231_init);
diff --git a/sound/sparc/dbri.c b/sound/sparc/dbri.c
index 39cd5d69d051..73f9cbacc077 100644
--- a/sound/sparc/dbri.c
+++ b/sound/sparc/dbri.c
@@ -2592,7 +2592,7 @@ static void snd_dbri_free(struct snd_dbri *dbri)
(void *)dbri->dma, dbri->dma_dvma);
}
-static int __devinit dbri_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit dbri_probe(struct platform_device *op)
{
struct snd_dbri *dbri;
struct resource *rp;
@@ -2686,7 +2686,7 @@ static const struct of_device_id dbri_match[] = {
MODULE_DEVICE_TABLE(of, dbri_match);
-static struct of_platform_driver dbri_sbus_driver = {
+static struct platform_driver dbri_sbus_driver = {
.driver = {
.name = "dbri",
.owner = THIS_MODULE,
@@ -2699,12 +2699,12 @@ static struct of_platform_driver dbri_sbus_driver = {
/* Probe for the dbri chip and then attach the driver. */
static int __init dbri_init(void)
{
- return of_register_platform_driver(&dbri_sbus_driver);
+ return platform_driver_register(&dbri_sbus_driver);
}
static void __exit dbri_exit(void)
{
- of_unregister_platform_driver(&dbri_sbus_driver);
+ platform_driver_unregister(&dbri_sbus_driver);
}
module_init(dbri_init);
diff --git a/sound/usb/6fire/Makefile b/sound/usb/6fire/Makefile
new file mode 100644
index 000000000000..dfce6ec53513
--- /dev/null
+++ b/sound/usb/6fire/Makefile
@@ -0,0 +1,3 @@
+snd-usb-6fire-objs += chip.o comm.o midi.o control.o firmware.o pcm.o
+obj-$(CONFIG_SND_USB_6FIRE) += snd-usb-6fire.o
+
diff --git a/sound/usb/6fire/chip.c b/sound/usb/6fire/chip.c
new file mode 100644
index 000000000000..c7dca7b0b9fe
--- /dev/null
+++ b/sound/usb/6fire/chip.c
@@ -0,0 +1,232 @@
+/*
+ * Linux driver for TerraTec DMX 6Fire USB
+ *
+ * Main routines and module definitions.
+ *
+ * Author: Torsten Schenk <torsten.schenk@zoho.com>
+ * Created: Jan 01, 2011
+ * Version: 0.3.0
+ * Copyright: (C) Torsten Schenk
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "chip.h"
+#include "firmware.h"
+#include "pcm.h"
+#include "control.h"
+#include "comm.h"
+#include "midi.h"
+
+#include <linux/moduleparam.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/gfp.h>
+#include <sound/initval.h>
+
+MODULE_AUTHOR("Torsten Schenk <torsten.schenk@zoho.com>");
+MODULE_DESCRIPTION("TerraTec DMX 6Fire USB audio driver, version 0.3.0");
+MODULE_LICENSE("GPL v2");
+MODULE_SUPPORTED_DEVICE("{{TerraTec, DMX 6Fire USB}}");
+
+static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-max */
+static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* Id for card */
+static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable card */
+static struct sfire_chip *chips[SNDRV_CARDS] = SNDRV_DEFAULT_PTR;
+static struct usb_device *devices[SNDRV_CARDS] = SNDRV_DEFAULT_PTR;
+
+module_param_array(index, int, NULL, 0444);
+MODULE_PARM_DESC(index, "Index value for the 6fire sound device");
+module_param_array(id, charp, NULL, 0444);
+MODULE_PARM_DESC(id, "ID string for the 6fire sound device.");
+module_param_array(enable, bool, NULL, 0444);
+MODULE_PARM_DESC(enable, "Enable the 6fire sound device.");
+
+static DEFINE_MUTEX(register_mutex);
+
+static void usb6fire_chip_abort(struct sfire_chip *chip)
+{
+ if (chip) {
+ if (chip->pcm)
+ usb6fire_pcm_abort(chip);
+ if (chip->midi)
+ usb6fire_midi_abort(chip);
+ if (chip->comm)
+ usb6fire_comm_abort(chip);
+ if (chip->control)
+ usb6fire_control_abort(chip);
+ if (chip->card) {
+ snd_card_disconnect(chip->card);
+ snd_card_free_when_closed(chip->card);
+ chip->card = NULL;
+ }
+ }
+}
+
+static void usb6fire_chip_destroy(struct sfire_chip *chip)
+{
+ if (chip) {
+ if (chip->pcm)
+ usb6fire_pcm_destroy(chip);
+ if (chip->midi)
+ usb6fire_midi_destroy(chip);
+ if (chip->comm)
+ usb6fire_comm_destroy(chip);
+ if (chip->control)
+ usb6fire_control_destroy(chip);
+ if (chip->card)
+ snd_card_free(chip->card);
+ }
+}
+
+static int __devinit usb6fire_chip_probe(struct usb_interface *intf,
+ const struct usb_device_id *usb_id)
+{
+ int ret;
+ int i;
+ struct sfire_chip *chip = NULL;
+ struct usb_device *device = interface_to_usbdev(intf);
+ int regidx = -1; /* index in module parameter array */
+ struct snd_card *card = NULL;
+
+ /* look if we already serve this card and return if so */
+ mutex_lock(&register_mutex);
+ for (i = 0; i < SNDRV_CARDS; i++) {
+ if (devices[i] == device) {
+ if (chips[i])
+ chips[i]->intf_count++;
+ usb_set_intfdata(intf, chips[i]);
+ mutex_unlock(&register_mutex);
+ return 0;
+ } else if (regidx < 0)
+ regidx = i;
+ }
+ if (regidx < 0) {
+ mutex_unlock(&register_mutex);
+ snd_printk(KERN_ERR PREFIX "too many cards registered.\n");
+ return -ENODEV;
+ }
+ devices[regidx] = device;
+ mutex_unlock(&register_mutex);
+
+ /* check, if firmware is present on device, upload it if not */
+ ret = usb6fire_fw_init(intf);
+ if (ret < 0)
+ return ret;
+ else if (ret == FW_NOT_READY) /* firmware update performed */
+ return 0;
+
+ /* if we are here, card can be registered in alsa. */
+ if (usb_set_interface(device, 0, 0) != 0) {
+ snd_printk(KERN_ERR PREFIX "can't set first interface.\n");
+ return -EIO;
+ }
+ ret = snd_card_create(index[regidx], id[regidx], THIS_MODULE,
+ sizeof(struct sfire_chip), &card);
+ if (ret < 0) {
+ snd_printk(KERN_ERR PREFIX "cannot create alsa card.\n");
+ return ret;
+ }
+ strcpy(card->driver, "6FireUSB");
+ strcpy(card->shortname, "TerraTec DMX6FireUSB");
+ sprintf(card->longname, "%s at %d:%d", card->shortname,
+ device->bus->busnum, device->devnum);
+ snd_card_set_dev(card, &intf->dev);
+
+ chip = card->private_data;
+ chips[regidx] = chip;
+ chip->dev = device;
+ chip->regidx = regidx;
+ chip->intf_count = 1;
+ chip->card = card;
+
+ ret = usb6fire_comm_init(chip);
+ if (ret < 0) {
+ usb6fire_chip_destroy(chip);
+ return ret;
+ }
+
+ ret = usb6fire_midi_init(chip);
+ if (ret < 0) {
+ usb6fire_chip_destroy(chip);
+ return ret;
+ }
+
+ ret = usb6fire_pcm_init(chip);
+ if (ret < 0) {
+ usb6fire_chip_destroy(chip);
+ return ret;
+ }
+
+ ret = usb6fire_control_init(chip);
+ if (ret < 0) {
+ usb6fire_chip_destroy(chip);
+ return ret;
+ }
+
+ ret = snd_card_register(card);
+ if (ret < 0) {
+ snd_printk(KERN_ERR PREFIX "cannot register card.");
+ usb6fire_chip_destroy(chip);
+ return ret;
+ }
+ usb_set_intfdata(intf, chip);
+ return 0;
+}
+
+static void usb6fire_chip_disconnect(struct usb_interface *intf)
+{
+ struct sfire_chip *chip;
+ struct snd_card *card;
+
+ chip = usb_get_intfdata(intf);
+ if (chip) { /* if !chip, fw upload has been performed */
+ card = chip->card;
+ chip->intf_count--;
+ if (!chip->intf_count) {
+ mutex_lock(&register_mutex);
+ devices[chip->regidx] = NULL;
+ chips[chip->regidx] = NULL;
+ mutex_unlock(&register_mutex);
+
+ chip->shutdown = true;
+ usb6fire_chip_abort(chip);
+ usb6fire_chip_destroy(chip);
+ }
+ }
+}
+
+static struct usb_device_id device_table[] = {
+ {
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE,
+ .idVendor = 0x0ccd,
+ .idProduct = 0x0080
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(usb, device_table);
+
+static struct usb_driver driver = {
+ .name = "snd-usb-6fire",
+ .probe = usb6fire_chip_probe,
+ .disconnect = usb6fire_chip_disconnect,
+ .id_table = device_table,
+};
+
+static int __init usb6fire_chip_init(void)
+{
+ return usb_register(&driver);
+}
+
+static void __exit usb6fire_chip_cleanup(void)
+{
+ usb_deregister(&driver);
+}
+
+module_init(usb6fire_chip_init);
+module_exit(usb6fire_chip_cleanup);
diff --git a/sound/usb/6fire/chip.h b/sound/usb/6fire/chip.h
new file mode 100644
index 000000000000..d11e5cb520f0
--- /dev/null
+++ b/sound/usb/6fire/chip.h
@@ -0,0 +1,32 @@
+/*
+ * Linux driver for TerraTec DMX 6Fire USB
+ *
+ * Author: Torsten Schenk <torsten.schenk@zoho.com>
+ * Created: Jan 01, 2011
+ * Version: 0.3.0
+ * Copyright: (C) Torsten Schenk
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef USB6FIRE_CHIP_H
+#define USB6FIRE_CHIP_H
+
+#include "common.h"
+
+struct sfire_chip {
+ struct usb_device *dev;
+ struct snd_card *card;
+ int intf_count; /* number of registered interfaces */
+ int regidx; /* index in module parameter arrays */
+ bool shutdown;
+
+ struct midi_runtime *midi;
+ struct pcm_runtime *pcm;
+ struct control_runtime *control;
+ struct comm_runtime *comm;
+};
+#endif /* USB6FIRE_CHIP_H */
+
diff --git a/sound/usb/6fire/comm.c b/sound/usb/6fire/comm.c
new file mode 100644
index 000000000000..c994daa57af2
--- /dev/null
+++ b/sound/usb/6fire/comm.c
@@ -0,0 +1,176 @@
+/*
+ * Linux driver for TerraTec DMX 6Fire USB
+ *
+ * Device communications
+ *
+ * Author: Torsten Schenk <torsten.schenk@zoho.com>
+ * Created: Jan 01, 2011
+ * Version: 0.3.0
+ * Copyright: (C) Torsten Schenk
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "comm.h"
+#include "chip.h"
+#include "midi.h"
+
+enum {
+ COMM_EP = 1,
+ COMM_FPGA_EP = 2
+};
+
+static void usb6fire_comm_init_urb(struct comm_runtime *rt, struct urb *urb,
+ u8 *buffer, void *context, void(*handler)(struct urb *urb))
+{
+ usb_init_urb(urb);
+ urb->transfer_buffer = buffer;
+ urb->pipe = usb_sndintpipe(rt->chip->dev, COMM_EP);
+ urb->complete = handler;
+ urb->context = context;
+ urb->interval = 1;
+ urb->dev = rt->chip->dev;
+}
+
+static void usb6fire_comm_receiver_handler(struct urb *urb)
+{
+ struct comm_runtime *rt = urb->context;
+ struct midi_runtime *midi_rt = rt->chip->midi;
+
+ if (!urb->status) {
+ if (rt->receiver_buffer[0] == 0x10) /* midi in event */
+ if (midi_rt)
+ midi_rt->in_received(midi_rt,
+ rt->receiver_buffer + 2,
+ rt->receiver_buffer[1]);
+ }
+
+ if (!rt->chip->shutdown) {
+ urb->status = 0;
+ urb->actual_length = 0;
+ if (usb_submit_urb(urb, GFP_ATOMIC) < 0)
+ snd_printk(KERN_WARNING PREFIX
+ "comm data receiver aborted.\n");
+ }
+}
+
+static void usb6fire_comm_init_buffer(u8 *buffer, u8 id, u8 request,
+ u8 reg, u8 vl, u8 vh)
+{
+ buffer[0] = 0x01;
+ buffer[2] = request;
+ buffer[3] = id;
+ switch (request) {
+ case 0x02:
+ buffer[1] = 0x05; /* length (starting at buffer[2]) */
+ buffer[4] = reg;
+ buffer[5] = vl;
+ buffer[6] = vh;
+ break;
+
+ case 0x12:
+ buffer[1] = 0x0b; /* length (starting at buffer[2]) */
+ buffer[4] = 0x00;
+ buffer[5] = 0x18;
+ buffer[6] = 0x05;
+ buffer[7] = 0x00;
+ buffer[8] = 0x01;
+ buffer[9] = 0x00;
+ buffer[10] = 0x9e;
+ buffer[11] = reg;
+ buffer[12] = vl;
+ break;
+
+ case 0x20:
+ case 0x21:
+ case 0x22:
+ buffer[1] = 0x04;
+ buffer[4] = reg;
+ buffer[5] = vl;
+ break;
+ }
+}
+
+static int usb6fire_comm_send_buffer(u8 *buffer, struct usb_device *dev)
+{
+ int ret;
+ int actual_len;
+
+ ret = usb_interrupt_msg(dev, usb_sndintpipe(dev, COMM_EP),
+ buffer, buffer[1] + 2, &actual_len, HZ);
+ if (ret < 0)
+ return ret;
+ else if (actual_len != buffer[1] + 2)
+ return -EIO;
+ return 0;
+}
+
+static int usb6fire_comm_write8(struct comm_runtime *rt, u8 request,
+ u8 reg, u8 value)
+{
+ u8 buffer[13]; /* 13: maximum length of message */
+
+ usb6fire_comm_init_buffer(buffer, 0x00, request, reg, value, 0x00);
+ return usb6fire_comm_send_buffer(buffer, rt->chip->dev);
+}
+
+static int usb6fire_comm_write16(struct comm_runtime *rt, u8 request,
+ u8 reg, u8 vl, u8 vh)
+{
+ u8 buffer[13]; /* 13: maximum length of message */
+
+ usb6fire_comm_init_buffer(buffer, 0x00, request, reg, vl, vh);
+ return usb6fire_comm_send_buffer(buffer, rt->chip->dev);
+}
+
+int __devinit usb6fire_comm_init(struct sfire_chip *chip)
+{
+ struct comm_runtime *rt = kzalloc(sizeof(struct comm_runtime),
+ GFP_KERNEL);
+ struct urb *urb = &rt->receiver;
+ int ret;
+
+ if (!rt)
+ return -ENOMEM;
+
+ rt->serial = 1;
+ rt->chip = chip;
+ usb_init_urb(urb);
+ rt->init_urb = usb6fire_comm_init_urb;
+ rt->write8 = usb6fire_comm_write8;
+ rt->write16 = usb6fire_comm_write16;
+
+ /* submit an urb that receives communication data from device */
+ urb->transfer_buffer = rt->receiver_buffer;
+ urb->transfer_buffer_length = COMM_RECEIVER_BUFSIZE;
+ urb->pipe = usb_rcvintpipe(chip->dev, COMM_EP);
+ urb->dev = chip->dev;
+ urb->complete = usb6fire_comm_receiver_handler;
+ urb->context = rt;
+ urb->interval = 1;
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret < 0) {
+ kfree(rt);
+ snd_printk(KERN_ERR PREFIX "cannot create comm data receiver.");
+ return ret;
+ }
+ chip->comm = rt;
+ return 0;
+}
+
+void usb6fire_comm_abort(struct sfire_chip *chip)
+{
+ struct comm_runtime *rt = chip->comm;
+
+ if (rt)
+ usb_poison_urb(&rt->receiver);
+}
+
+void usb6fire_comm_destroy(struct sfire_chip *chip)
+{
+ kfree(chip->comm);
+ chip->comm = NULL;
+}
diff --git a/sound/usb/6fire/comm.h b/sound/usb/6fire/comm.h
new file mode 100644
index 000000000000..edc5dc84b888
--- /dev/null
+++ b/sound/usb/6fire/comm.h
@@ -0,0 +1,44 @@
+/*
+ * Linux driver for TerraTec DMX 6Fire USB
+ *
+ * Author: Torsten Schenk <torsten.schenk@zoho.com>
+ * Created: Jan 01, 2011
+ * Version: 0.3.0
+ * Copyright: (C) Torsten Schenk
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef USB6FIRE_COMM_H
+#define USB6FIRE_COMM_H
+
+#include "common.h"
+
+enum /* settings for comm */
+{
+ COMM_RECEIVER_BUFSIZE = 64,
+};
+
+struct comm_runtime {
+ struct sfire_chip *chip;
+
+ struct urb receiver;
+ u8 receiver_buffer[COMM_RECEIVER_BUFSIZE];
+
+ u8 serial; /* urb serial */
+
+ void (*init_urb)(struct comm_runtime *rt, struct urb *urb, u8 *buffer,
+ void *context, void(*handler)(struct urb *urb));
+ /* writes control data to the device */
+ int (*write8)(struct comm_runtime *rt, u8 request, u8 reg, u8 value);
+ int (*write16)(struct comm_runtime *rt, u8 request, u8 reg,
+ u8 vh, u8 vl);
+};
+
+int __devinit usb6fire_comm_init(struct sfire_chip *chip);
+void usb6fire_comm_abort(struct sfire_chip *chip);
+void usb6fire_comm_destroy(struct sfire_chip *chip);
+#endif /* USB6FIRE_COMM_H */
+
diff --git a/sound/usb/6fire/common.h b/sound/usb/6fire/common.h
new file mode 100644
index 000000000000..7dbeb4a37831
--- /dev/null
+++ b/sound/usb/6fire/common.h
@@ -0,0 +1,30 @@
+/*
+ * Linux driver for TerraTec DMX 6Fire USB
+ *
+ * Author: Torsten Schenk <torsten.schenk@zoho.com>
+ * Created: Jan 01, 2011
+ * Version: 0.3.0
+ * Copyright: (C) Torsten Schenk
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef USB6FIRE_COMMON_H
+#define USB6FIRE_COMMON_H
+
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <sound/core.h>
+
+#define PREFIX "6fire: "
+
+struct sfire_chip;
+struct midi_runtime;
+struct pcm_runtime;
+struct control_runtime;
+struct comm_runtime;
+#endif /* USB6FIRE_COMMON_H */
+
diff --git a/sound/usb/6fire/control.c b/sound/usb/6fire/control.c
new file mode 100644
index 000000000000..248463511186
--- /dev/null
+++ b/sound/usb/6fire/control.c
@@ -0,0 +1,275 @@
+/*
+ * Linux driver for TerraTec DMX 6Fire USB
+ *
+ * Mixer control
+ *
+ * Author: Torsten Schenk <torsten.schenk@zoho.com>
+ * Created: Jan 01, 2011
+ * Version: 0.3.0
+ * Copyright: (C) Torsten Schenk
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/interrupt.h>
+#include <sound/control.h>
+
+#include "control.h"
+#include "comm.h"
+#include "chip.h"
+
+static char *opt_coax_texts[2] = { "Optical", "Coax" };
+static char *line_phono_texts[2] = { "Line", "Phono" };
+
+/*
+ * calculated with $value\[i\] = 128 \cdot sqrt[3]{\frac{i}{128}}$
+ * this is done because the linear values cause rapid degredation
+ * of volume in the uppermost region.
+ */
+static const u8 log_volume_table[128] = {
+ 0x00, 0x19, 0x20, 0x24, 0x28, 0x2b, 0x2e, 0x30, 0x32, 0x34,
+ 0x36, 0x38, 0x3a, 0x3b, 0x3d, 0x3e, 0x40, 0x41, 0x42, 0x43,
+ 0x44, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e,
+ 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x53, 0x54, 0x55, 0x56,
+ 0x56, 0x57, 0x58, 0x58, 0x59, 0x5a, 0x5b, 0x5b, 0x5c, 0x5c,
+ 0x5d, 0x5e, 0x5e, 0x5f, 0x60, 0x60, 0x61, 0x61, 0x62, 0x62,
+ 0x63, 0x63, 0x64, 0x65, 0x65, 0x66, 0x66, 0x67, 0x67, 0x68,
+ 0x68, 0x69, 0x69, 0x6a, 0x6a, 0x6b, 0x6b, 0x6c, 0x6c, 0x6c,
+ 0x6d, 0x6d, 0x6e, 0x6e, 0x6f, 0x6f, 0x70, 0x70, 0x70, 0x71,
+ 0x71, 0x72, 0x72, 0x73, 0x73, 0x73, 0x74, 0x74, 0x75, 0x75,
+ 0x75, 0x76, 0x76, 0x77, 0x77, 0x77, 0x78, 0x78, 0x78, 0x79,
+ 0x79, 0x7a, 0x7a, 0x7a, 0x7b, 0x7b, 0x7b, 0x7c, 0x7c, 0x7c,
+ 0x7d, 0x7d, 0x7d, 0x7e, 0x7e, 0x7e, 0x7f, 0x7f };
+
+/*
+ * data that needs to be sent to device. sets up card internal stuff.
+ * values dumped from windows driver and filtered by trial'n'error.
+ */
+static const struct {
+ u8 type;
+ u8 reg;
+ u8 value;
+}
+init_data[] = {
+ { 0x22, 0x00, 0x00 }, { 0x20, 0x00, 0x08 }, { 0x22, 0x01, 0x01 },
+ { 0x20, 0x01, 0x08 }, { 0x22, 0x02, 0x00 }, { 0x20, 0x02, 0x08 },
+ { 0x22, 0x03, 0x00 }, { 0x20, 0x03, 0x08 }, { 0x22, 0x04, 0x00 },
+ { 0x20, 0x04, 0x08 }, { 0x22, 0x05, 0x01 }, { 0x20, 0x05, 0x08 },
+ { 0x22, 0x04, 0x01 }, { 0x12, 0x04, 0x00 }, { 0x12, 0x05, 0x00 },
+ { 0x12, 0x0d, 0x78 }, { 0x12, 0x21, 0x82 }, { 0x12, 0x22, 0x80 },
+ { 0x12, 0x23, 0x00 }, { 0x12, 0x06, 0x02 }, { 0x12, 0x03, 0x00 },
+ { 0x12, 0x02, 0x00 }, { 0x22, 0x03, 0x01 },
+ { 0 } /* TERMINATING ENTRY */
+};
+
+static void usb6fire_control_master_vol_update(struct control_runtime *rt)
+{
+ struct comm_runtime *comm_rt = rt->chip->comm;
+ if (comm_rt) {
+ /* set volume */
+ comm_rt->write8(comm_rt, 0x12, 0x0f, 0x7f -
+ log_volume_table[rt->master_vol]);
+ /* unmute */
+ comm_rt->write8(comm_rt, 0x12, 0x0e, 0x00);
+ }
+}
+
+static void usb6fire_control_line_phono_update(struct control_runtime *rt)
+{
+ struct comm_runtime *comm_rt = rt->chip->comm;
+ if (comm_rt) {
+ comm_rt->write8(comm_rt, 0x22, 0x02, rt->line_phono_switch);
+ comm_rt->write8(comm_rt, 0x21, 0x02, rt->line_phono_switch);
+ }
+}
+
+static void usb6fire_control_opt_coax_update(struct control_runtime *rt)
+{
+ struct comm_runtime *comm_rt = rt->chip->comm;
+ if (comm_rt) {
+ comm_rt->write8(comm_rt, 0x22, 0x00, rt->opt_coax_switch);
+ comm_rt->write8(comm_rt, 0x21, 0x00, rt->opt_coax_switch);
+ }
+}
+
+static int usb6fire_control_master_vol_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = 127;
+ return 0;
+}
+
+static int usb6fire_control_master_vol_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct control_runtime *rt = snd_kcontrol_chip(kcontrol);
+ int changed = 0;
+ if (rt->master_vol != ucontrol->value.integer.value[0]) {
+ rt->master_vol = ucontrol->value.integer.value[0];
+ usb6fire_control_master_vol_update(rt);
+ changed = 1;
+ }
+ return changed;
+}
+
+static int usb6fire_control_master_vol_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct control_runtime *rt = snd_kcontrol_chip(kcontrol);
+ ucontrol->value.integer.value[0] = rt->master_vol;
+ return 0;
+}
+
+static int usb6fire_control_line_phono_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->count = 1;
+ uinfo->value.enumerated.items = 2;
+ if (uinfo->value.enumerated.item > 1)
+ uinfo->value.enumerated.item = 1;
+ strcpy(uinfo->value.enumerated.name,
+ line_phono_texts[uinfo->value.enumerated.item]);
+ return 0;
+}
+
+static int usb6fire_control_line_phono_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct control_runtime *rt = snd_kcontrol_chip(kcontrol);
+ int changed = 0;
+ if (rt->line_phono_switch != ucontrol->value.integer.value[0]) {
+ rt->line_phono_switch = ucontrol->value.integer.value[0];
+ usb6fire_control_line_phono_update(rt);
+ changed = 1;
+ }
+ return changed;
+}
+
+static int usb6fire_control_line_phono_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct control_runtime *rt = snd_kcontrol_chip(kcontrol);
+ ucontrol->value.integer.value[0] = rt->line_phono_switch;
+ return 0;
+}
+
+static int usb6fire_control_opt_coax_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->count = 1;
+ uinfo->value.enumerated.items = 2;
+ if (uinfo->value.enumerated.item > 1)
+ uinfo->value.enumerated.item = 1;
+ strcpy(uinfo->value.enumerated.name,
+ opt_coax_texts[uinfo->value.enumerated.item]);
+ return 0;
+}
+
+static int usb6fire_control_opt_coax_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct control_runtime *rt = snd_kcontrol_chip(kcontrol);
+ int changed = 0;
+
+ if (rt->opt_coax_switch != ucontrol->value.enumerated.item[0]) {
+ rt->opt_coax_switch = ucontrol->value.enumerated.item[0];
+ usb6fire_control_opt_coax_update(rt);
+ changed = 1;
+ }
+ return changed;
+}
+
+static int usb6fire_control_opt_coax_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct control_runtime *rt = snd_kcontrol_chip(kcontrol);
+ ucontrol->value.enumerated.item[0] = rt->opt_coax_switch;
+ return 0;
+}
+
+static struct __devinitdata snd_kcontrol_new elements[] = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Master Playback Volume",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .info = usb6fire_control_master_vol_info,
+ .get = usb6fire_control_master_vol_get,
+ .put = usb6fire_control_master_vol_put
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Line/Phono Capture Route",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .info = usb6fire_control_line_phono_info,
+ .get = usb6fire_control_line_phono_get,
+ .put = usb6fire_control_line_phono_put
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Opt/Coax Capture Route",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .info = usb6fire_control_opt_coax_info,
+ .get = usb6fire_control_opt_coax_get,
+ .put = usb6fire_control_opt_coax_put
+ },
+ {}
+};
+
+int __devinit usb6fire_control_init(struct sfire_chip *chip)
+{
+ int i;
+ int ret;
+ struct control_runtime *rt = kzalloc(sizeof(struct control_runtime),
+ GFP_KERNEL);
+ struct comm_runtime *comm_rt = chip->comm;
+
+ if (!rt)
+ return -ENOMEM;
+
+ rt->chip = chip;
+
+ i = 0;
+ while (init_data[i].type) {
+ comm_rt->write8(comm_rt, init_data[i].type, init_data[i].reg,
+ init_data[i].value);
+ i++;
+ }
+
+ usb6fire_control_opt_coax_update(rt);
+ usb6fire_control_line_phono_update(rt);
+ usb6fire_control_master_vol_update(rt);
+
+ i = 0;
+ while (elements[i].name) {
+ ret = snd_ctl_add(chip->card, snd_ctl_new1(&elements[i], rt));
+ if (ret < 0) {
+ kfree(rt);
+ snd_printk(KERN_ERR PREFIX "cannot add control.\n");
+ return ret;
+ }
+ i++;
+ }
+
+ chip->control = rt;
+ return 0;
+}
+
+void usb6fire_control_abort(struct sfire_chip *chip)
+{}
+
+void usb6fire_control_destroy(struct sfire_chip *chip)
+{
+ kfree(chip->control);
+ chip->control = NULL;
+}
diff --git a/sound/usb/6fire/control.h b/sound/usb/6fire/control.h
new file mode 100644
index 000000000000..b534c777ab02
--- /dev/null
+++ b/sound/usb/6fire/control.h
@@ -0,0 +1,37 @@
+/*
+ * Linux driver for TerraTec DMX 6Fire USB
+ *
+ * Author: Torsten Schenk <torsten.schenk@zoho.com>
+ * Created: Jan 01, 2011
+ * Version: 0.3.0
+ * Copyright: (C) Torsten Schenk
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef USB6FIRE_CONTROL_H
+#define USB6FIRE_CONTROL_H
+
+#include "common.h"
+
+enum {
+ CONTROL_MAX_ELEMENTS = 32
+};
+
+struct control_runtime {
+ struct sfire_chip *chip;
+
+ struct snd_kcontrol *element[CONTROL_MAX_ELEMENTS];
+ bool opt_coax_switch;
+ bool line_phono_switch;
+ u8 master_vol;
+};
+
+int __devinit usb6fire_control_init(struct sfire_chip *chip);
+void usb6fire_control_abort(struct sfire_chip *chip);
+void usb6fire_control_destroy(struct sfire_chip *chip);
+#endif /* USB6FIRE_CONTROL_H */
+
diff --git a/sound/usb/6fire/firmware.c b/sound/usb/6fire/firmware.c
new file mode 100644
index 000000000000..9081a54a9c6c
--- /dev/null
+++ b/sound/usb/6fire/firmware.c
@@ -0,0 +1,426 @@
+/*
+ * Linux driver for TerraTec DMX 6Fire USB
+ *
+ * Firmware loader
+ *
+ * Currently not working for all devices. To be able to use the device
+ * in linux, it is also possible to let the windows driver upload the firmware.
+ * For that, start the computer in windows and reboot.
+ * As long as the device is connected to the power supply, no firmware reload
+ * needs to be performed.
+ *
+ * Author: Torsten Schenk <torsten.schenk@zoho.com>
+ * Created: Jan 01, 2011
+ * Version: 0.3.0
+ * Copyright: (C) Torsten Schenk
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/firmware.h>
+
+#include "firmware.h"
+#include "chip.h"
+
+MODULE_FIRMWARE("6fire/dmx6firel2.ihx");
+MODULE_FIRMWARE("6fire/dmx6fireap.ihx");
+MODULE_FIRMWARE("6fire/dmx6firecf.bin");
+
+enum {
+ FPGA_BUFSIZE = 512, FPGA_EP = 2
+};
+
+static const u8 BIT_REVERSE_TABLE[256] = {
+ 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x10, 0x90, 0x50,
+ 0xd0, 0x30, 0xb0, 0x70, 0xf0, 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8,
+ 0x68, 0xe8, 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, 0x04,
+ 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4, 0x14, 0x94, 0x54, 0xd4,
+ 0x34, 0xb4, 0x74, 0xf4, 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c,
+ 0xec, 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc, 0x02, 0x82,
+ 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2, 0x12, 0x92, 0x52, 0xd2, 0x32,
+ 0xb2, 0x72, 0xf2, 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea,
+ 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa, 0x06, 0x86, 0x46,
+ 0xc6, 0x26, 0xa6, 0x66, 0xe6, 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6,
+ 0x76, 0xf6, 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee, 0x1e,
+ 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, 0x01, 0x81, 0x41, 0xc1,
+ 0x21, 0xa1, 0x61, 0xe1, 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71,
+ 0xf1, 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, 0x19, 0x99,
+ 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9, 0x05, 0x85, 0x45, 0xc5, 0x25,
+ 0xa5, 0x65, 0xe5, 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
+ 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed, 0x1d, 0x9d, 0x5d,
+ 0xdd, 0x3d, 0xbd, 0x7d, 0xfd, 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3,
+ 0x63, 0xe3, 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3, 0x0b,
+ 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb, 0x1b, 0x9b, 0x5b, 0xdb,
+ 0x3b, 0xbb, 0x7b, 0xfb, 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67,
+ 0xe7, 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7, 0x0f, 0x8f,
+ 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, 0x1f, 0x9f, 0x5f, 0xdf, 0x3f,
+ 0xbf, 0x7f, 0xff };
+
+/*
+ * wMaxPacketSize of pcm endpoints.
+ * keep synced with rates_in_packet_size and rates_out_packet_size in pcm.c
+ * fpp: frames per isopacket
+ *
+ * CAUTION: keep sizeof <= buffer[] in usb6fire_fw_init
+ */
+static const u8 ep_w_max_packet_size[] = {
+ 0xe4, 0x00, 0xe4, 0x00, /* alt 1: 228 EP2 and EP6 (7 fpp) */
+ 0xa4, 0x01, 0xa4, 0x01, /* alt 2: 420 EP2 and EP6 (13 fpp)*/
+ 0x94, 0x01, 0x5c, 0x02 /* alt 3: 404 EP2 and 604 EP6 (25 fpp) */
+};
+
+struct ihex_record {
+ u16 address;
+ u8 len;
+ u8 data[256];
+ char error; /* true if an error occured parsing this record */
+
+ u8 max_len; /* maximum record length in whole ihex */
+
+ /* private */
+ const char *txt_data;
+ unsigned int txt_length;
+ unsigned int txt_offset; /* current position in txt_data */
+};
+
+static u8 usb6fire_fw_ihex_nibble(const u8 n)
+{
+ if (n >= '0' && n <= '9')
+ return n - '0';
+ else if (n >= 'A' && n <= 'F')
+ return n - ('A' - 10);
+ else if (n >= 'a' && n <= 'f')
+ return n - ('a' - 10);
+ return 0;
+}
+
+static u8 usb6fire_fw_ihex_hex(const u8 *data, u8 *crc)
+{
+ u8 val = (usb6fire_fw_ihex_nibble(data[0]) << 4) |
+ usb6fire_fw_ihex_nibble(data[1]);
+ *crc += val;
+ return val;
+}
+
+/*
+ * returns true if record is available, false otherwise.
+ * iff an error occured, false will be returned and record->error will be true.
+ */
+static bool usb6fire_fw_ihex_next_record(struct ihex_record *record)
+{
+ u8 crc = 0;
+ u8 type;
+ int i;
+
+ record->error = false;
+
+ /* find begin of record (marked by a colon) */
+ while (record->txt_offset < record->txt_length
+ && record->txt_data[record->txt_offset] != ':')
+ record->txt_offset++;
+ if (record->txt_offset == record->txt_length)
+ return false;
+
+ /* number of characters needed for len, addr and type entries */
+ record->txt_offset++;
+ if (record->txt_offset + 8 > record->txt_length) {
+ record->error = true;
+ return false;
+ }
+
+ record->len = usb6fire_fw_ihex_hex(record->txt_data +
+ record->txt_offset, &crc);
+ record->txt_offset += 2;
+ record->address = usb6fire_fw_ihex_hex(record->txt_data +
+ record->txt_offset, &crc) << 8;
+ record->txt_offset += 2;
+ record->address |= usb6fire_fw_ihex_hex(record->txt_data +
+ record->txt_offset, &crc);
+ record->txt_offset += 2;
+ type = usb6fire_fw_ihex_hex(record->txt_data +
+ record->txt_offset, &crc);
+ record->txt_offset += 2;
+
+ /* number of characters needed for data and crc entries */
+ if (record->txt_offset + 2 * (record->len + 1) > record->txt_length) {
+ record->error = true;
+ return false;
+ }
+ for (i = 0; i < record->len; i++) {
+ record->data[i] = usb6fire_fw_ihex_hex(record->txt_data
+ + record->txt_offset, &crc);
+ record->txt_offset += 2;
+ }
+ usb6fire_fw_ihex_hex(record->txt_data + record->txt_offset, &crc);
+ if (crc) {
+ record->error = true;
+ return false;
+ }
+
+ if (type == 1 || !record->len) /* eof */
+ return false;
+ else if (type == 0)
+ return true;
+ else {
+ record->error = true;
+ return false;
+ }
+}
+
+static int usb6fire_fw_ihex_init(const struct firmware *fw,
+ struct ihex_record *record)
+{
+ record->txt_data = fw->data;
+ record->txt_length = fw->size;
+ record->txt_offset = 0;
+ record->max_len = 0;
+ /* read all records, if loop ends, record->error indicates,
+ * whether ihex is valid. */
+ while (usb6fire_fw_ihex_next_record(record))
+ record->max_len = max(record->len, record->max_len);
+ if (record->error)
+ return -EINVAL;
+ record->txt_offset = 0;
+ return 0;
+}
+
+static int usb6fire_fw_ezusb_write(struct usb_device *device,
+ int type, int value, char *data, int len)
+{
+ int ret;
+
+ ret = usb_control_msg(device, usb_sndctrlpipe(device, 0), type,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, 0, data, len, HZ);
+ if (ret < 0)
+ return ret;
+ else if (ret != len)
+ return -EIO;
+ return 0;
+}
+
+static int usb6fire_fw_ezusb_read(struct usb_device *device,
+ int type, int value, char *data, int len)
+{
+ int ret = usb_control_msg(device, usb_rcvctrlpipe(device, 0), type,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value,
+ 0, data, len, HZ);
+ if (ret < 0)
+ return ret;
+ else if (ret != len)
+ return -EIO;
+ return 0;
+}
+
+static int usb6fire_fw_fpga_write(struct usb_device *device,
+ char *data, int len)
+{
+ int actual_len;
+ int ret;
+
+ ret = usb_bulk_msg(device, usb_sndbulkpipe(device, FPGA_EP), data, len,
+ &actual_len, HZ);
+ if (ret < 0)
+ return ret;
+ else if (actual_len != len)
+ return -EIO;
+ return 0;
+}
+
+static int usb6fire_fw_ezusb_upload(
+ struct usb_interface *intf, const char *fwname,
+ unsigned int postaddr, u8 *postdata, unsigned int postlen)
+{
+ int ret;
+ u8 data;
+ struct usb_device *device = interface_to_usbdev(intf);
+ const struct firmware *fw = 0;
+ struct ihex_record *rec = kmalloc(sizeof(struct ihex_record),
+ GFP_KERNEL);
+
+ if (!rec)
+ return -ENOMEM;
+
+ ret = request_firmware(&fw, fwname, &device->dev);
+ if (ret < 0) {
+ kfree(rec);
+ snd_printk(KERN_ERR PREFIX "error requesting ezusb "
+ "firmware %s.\n", fwname);
+ return ret;
+ }
+ ret = usb6fire_fw_ihex_init(fw, rec);
+ if (ret < 0) {
+ kfree(rec);
+ snd_printk(KERN_ERR PREFIX "error validating ezusb "
+ "firmware %s.\n", fwname);
+ return ret;
+ }
+ /* upload firmware image */
+ data = 0x01; /* stop ezusb cpu */
+ ret = usb6fire_fw_ezusb_write(device, 0xa0, 0xe600, &data, 1);
+ if (ret < 0) {
+ kfree(rec);
+ release_firmware(fw);
+ snd_printk(KERN_ERR PREFIX "unable to upload ezusb "
+ "firmware %s: begin message.\n", fwname);
+ return ret;
+ }
+
+ while (usb6fire_fw_ihex_next_record(rec)) { /* write firmware */
+ ret = usb6fire_fw_ezusb_write(device, 0xa0, rec->address,
+ rec->data, rec->len);
+ if (ret < 0) {
+ kfree(rec);
+ release_firmware(fw);
+ snd_printk(KERN_ERR PREFIX "unable to upload ezusb "
+ "firmware %s: data urb.\n", fwname);
+ return ret;
+ }
+ }
+
+ release_firmware(fw);
+ kfree(rec);
+ if (postdata) { /* write data after firmware has been uploaded */
+ ret = usb6fire_fw_ezusb_write(device, 0xa0, postaddr,
+ postdata, postlen);
+ if (ret < 0) {
+ snd_printk(KERN_ERR PREFIX "unable to upload ezusb "
+ "firmware %s: post urb.\n", fwname);
+ return ret;
+ }
+ }
+
+ data = 0x00; /* resume ezusb cpu */
+ ret = usb6fire_fw_ezusb_write(device, 0xa0, 0xe600, &data, 1);
+ if (ret < 0) {
+ release_firmware(fw);
+ snd_printk(KERN_ERR PREFIX "unable to upload ezusb "
+ "firmware %s: end message.\n", fwname);
+ return ret;
+ }
+ return 0;
+}
+
+static int usb6fire_fw_fpga_upload(
+ struct usb_interface *intf, const char *fwname)
+{
+ int ret;
+ int i;
+ struct usb_device *device = interface_to_usbdev(intf);
+ u8 *buffer = kmalloc(FPGA_BUFSIZE, GFP_KERNEL);
+ const char *c;
+ const char *end;
+ const struct firmware *fw;
+
+ if (!buffer)
+ return -ENOMEM;
+
+ ret = request_firmware(&fw, fwname, &device->dev);
+ if (ret < 0) {
+ snd_printk(KERN_ERR PREFIX "unable to get fpga firmware %s.\n",
+ fwname);
+ kfree(buffer);
+ return -EIO;
+ }
+
+ c = fw->data;
+ end = fw->data + fw->size;
+
+ ret = usb6fire_fw_ezusb_write(device, 8, 0, NULL, 0);
+ if (ret < 0) {
+ kfree(buffer);
+ release_firmware(fw);
+ snd_printk(KERN_ERR PREFIX "unable to upload fpga firmware: "
+ "begin urb.\n");
+ return ret;
+ }
+
+ while (c != end) {
+ for (i = 0; c != end && i < FPGA_BUFSIZE; i++, c++)
+ buffer[i] = BIT_REVERSE_TABLE[(u8) *c];
+
+ ret = usb6fire_fw_fpga_write(device, buffer, i);
+ if (ret < 0) {
+ release_firmware(fw);
+ kfree(buffer);
+ snd_printk(KERN_ERR PREFIX "unable to upload fpga "
+ "firmware: fw urb.\n");
+ return ret;
+ }
+ }
+ release_firmware(fw);
+ kfree(buffer);
+
+ ret = usb6fire_fw_ezusb_write(device, 9, 0, NULL, 0);
+ if (ret < 0) {
+ snd_printk(KERN_ERR PREFIX "unable to upload fpga firmware: "
+ "end urb.\n");
+ return ret;
+ }
+ return 0;
+}
+
+int usb6fire_fw_init(struct usb_interface *intf)
+{
+ int i;
+ int ret;
+ struct usb_device *device = interface_to_usbdev(intf);
+ /* buffer: 8 receiving bytes from device and
+ * sizeof(EP_W_MAX_PACKET_SIZE) bytes for non-const copy */
+ u8 buffer[12];
+
+ ret = usb6fire_fw_ezusb_read(device, 1, 0, buffer, 8);
+ if (ret < 0) {
+ snd_printk(KERN_ERR PREFIX "unable to receive device "
+ "firmware state.\n");
+ return ret;
+ }
+ if (buffer[0] != 0xeb || buffer[1] != 0xaa || buffer[2] != 0x55
+ || buffer[4] != 0x03 || buffer[5] != 0x01 || buffer[7]
+ != 0x00) {
+ snd_printk(KERN_ERR PREFIX "unknown device firmware state "
+ "received from device: ");
+ for (i = 0; i < 8; i++)
+ snd_printk("%02x ", buffer[i]);
+ snd_printk("\n");
+ return -EIO;
+ }
+ /* do we need fpga loader ezusb firmware? */
+ if (buffer[3] == 0x01 && buffer[6] == 0x19) {
+ ret = usb6fire_fw_ezusb_upload(intf,
+ "6fire/dmx6firel2.ihx", 0, NULL, 0);
+ if (ret < 0)
+ return ret;
+ return FW_NOT_READY;
+ }
+ /* do we need fpga firmware and application ezusb firmware? */
+ else if (buffer[3] == 0x02 && buffer[6] == 0x0b) {
+ ret = usb6fire_fw_fpga_upload(intf, "6fire/dmx6firecf.bin");
+ if (ret < 0)
+ return ret;
+ memcpy(buffer, ep_w_max_packet_size,
+ sizeof(ep_w_max_packet_size));
+ ret = usb6fire_fw_ezusb_upload(intf, "6fire/dmx6fireap.ihx",
+ 0x0003, buffer, sizeof(ep_w_max_packet_size));
+ if (ret < 0)
+ return ret;
+ return FW_NOT_READY;
+ }
+ /* all fw loaded? */
+ else if (buffer[3] == 0x03 && buffer[6] == 0x0b)
+ return 0;
+ /* unknown data? */
+ else {
+ snd_printk(KERN_ERR PREFIX "unknown device firmware state "
+ "received from device: ");
+ for (i = 0; i < 8; i++)
+ snd_printk("%02x ", buffer[i]);
+ snd_printk("\n");
+ return -EIO;
+ }
+ return 0;
+}
+
diff --git a/sound/usb/6fire/firmware.h b/sound/usb/6fire/firmware.h
new file mode 100644
index 000000000000..008569895381
--- /dev/null
+++ b/sound/usb/6fire/firmware.h
@@ -0,0 +1,27 @@
+/*
+ * Linux driver for TerraTec DMX 6Fire USB
+ *
+ * Author: Torsten Schenk
+ * Created: Jan 01, 2011
+ * Copyright: (C) Torsten Schenk
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef USB6FIRE_FIRMWARE_H
+#define USB6FIRE_FIRMWARE_H
+
+#include "common.h"
+
+enum /* firmware state of device */
+{
+ FW_READY = 0,
+ FW_NOT_READY = 1
+};
+
+int __devinit usb6fire_fw_init(struct usb_interface *intf);
+#endif /* USB6FIRE_FIRMWARE_H */
+
diff --git a/sound/usb/6fire/midi.c b/sound/usb/6fire/midi.c
new file mode 100644
index 000000000000..13f4509dce2b
--- /dev/null
+++ b/sound/usb/6fire/midi.c
@@ -0,0 +1,203 @@
+/*
+ * Linux driver for TerraTec DMX 6Fire USB
+ *
+ * Rawmidi driver
+ *
+ * Author: Torsten Schenk <torsten.schenk@zoho.com>
+ * Created: Jan 01, 2011
+ * Version: 0.3.0
+ * Copyright: (C) Torsten Schenk
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <sound/rawmidi.h>
+
+#include "midi.h"
+#include "chip.h"
+#include "comm.h"
+
+static void usb6fire_midi_out_handler(struct urb *urb)
+{
+ struct midi_runtime *rt = urb->context;
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rt->out_lock, flags);
+
+ if (rt->out) {
+ ret = snd_rawmidi_transmit(rt->out, rt->out_buffer + 4,
+ MIDI_BUFSIZE - 4);
+ if (ret > 0) { /* more data available, send next packet */
+ rt->out_buffer[1] = ret + 2;
+ rt->out_buffer[3] = rt->out_serial++;
+ urb->transfer_buffer_length = ret + 4;
+
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret < 0)
+ snd_printk(KERN_ERR PREFIX "midi out urb "
+ "submit failed: %d\n", ret);
+ } else /* no more data to transmit */
+ rt->out = NULL;
+ }
+ spin_unlock_irqrestore(&rt->out_lock, flags);
+}
+
+static void usb6fire_midi_in_received(
+ struct midi_runtime *rt, u8 *data, int length)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rt->in_lock, flags);
+ if (rt->in)
+ snd_rawmidi_receive(rt->in, data, length);
+ spin_unlock_irqrestore(&rt->in_lock, flags);
+}
+
+static int usb6fire_midi_out_open(struct snd_rawmidi_substream *alsa_sub)
+{
+ return 0;
+}
+
+static int usb6fire_midi_out_close(struct snd_rawmidi_substream *alsa_sub)
+{
+ return 0;
+}
+
+static void usb6fire_midi_out_trigger(
+ struct snd_rawmidi_substream *alsa_sub, int up)
+{
+ struct midi_runtime *rt = alsa_sub->rmidi->private_data;
+ struct urb *urb = &rt->out_urb;
+ __s8 ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rt->out_lock, flags);
+ if (up) { /* start transfer */
+ if (rt->out) { /* we are already transmitting so just return */
+ spin_unlock_irqrestore(&rt->out_lock, flags);
+ return;
+ }
+
+ ret = snd_rawmidi_transmit(alsa_sub, rt->out_buffer + 4,
+ MIDI_BUFSIZE - 4);
+ if (ret > 0) {
+ rt->out_buffer[1] = ret + 2;
+ rt->out_buffer[3] = rt->out_serial++;
+ urb->transfer_buffer_length = ret + 4;
+
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret < 0)
+ snd_printk(KERN_ERR PREFIX "midi out urb "
+ "submit failed: %d\n", ret);
+ else
+ rt->out = alsa_sub;
+ }
+ } else if (rt->out == alsa_sub)
+ rt->out = NULL;
+ spin_unlock_irqrestore(&rt->out_lock, flags);
+}
+
+static void usb6fire_midi_out_drain(struct snd_rawmidi_substream *alsa_sub)
+{
+ struct midi_runtime *rt = alsa_sub->rmidi->private_data;
+ int retry = 0;
+
+ while (rt->out && retry++ < 100)
+ msleep(10);
+}
+
+static int usb6fire_midi_in_open(struct snd_rawmidi_substream *alsa_sub)
+{
+ return 0;
+}
+
+static int usb6fire_midi_in_close(struct snd_rawmidi_substream *alsa_sub)
+{
+ return 0;
+}
+
+static void usb6fire_midi_in_trigger(
+ struct snd_rawmidi_substream *alsa_sub, int up)
+{
+ struct midi_runtime *rt = alsa_sub->rmidi->private_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rt->in_lock, flags);
+ if (up)
+ rt->in = alsa_sub;
+ else
+ rt->in = NULL;
+ spin_unlock_irqrestore(&rt->in_lock, flags);
+}
+
+static struct snd_rawmidi_ops out_ops = {
+ .open = usb6fire_midi_out_open,
+ .close = usb6fire_midi_out_close,
+ .trigger = usb6fire_midi_out_trigger,
+ .drain = usb6fire_midi_out_drain
+};
+
+static struct snd_rawmidi_ops in_ops = {
+ .open = usb6fire_midi_in_open,
+ .close = usb6fire_midi_in_close,
+ .trigger = usb6fire_midi_in_trigger
+};
+
+int __devinit usb6fire_midi_init(struct sfire_chip *chip)
+{
+ int ret;
+ struct midi_runtime *rt = kzalloc(sizeof(struct midi_runtime),
+ GFP_KERNEL);
+ struct comm_runtime *comm_rt = chip->comm;
+
+ if (!rt)
+ return -ENOMEM;
+
+ rt->chip = chip;
+ rt->in_received = usb6fire_midi_in_received;
+ rt->out_buffer[0] = 0x80; /* 'send midi' command */
+ rt->out_buffer[1] = 0x00; /* size of data */
+ rt->out_buffer[2] = 0x00; /* always 0 */
+ spin_lock_init(&rt->in_lock);
+ spin_lock_init(&rt->out_lock);
+
+ comm_rt->init_urb(comm_rt, &rt->out_urb, rt->out_buffer, rt,
+ usb6fire_midi_out_handler);
+
+ ret = snd_rawmidi_new(chip->card, "6FireUSB", 0, 1, 1, &rt->instance);
+ if (ret < 0) {
+ kfree(rt);
+ snd_printk(KERN_ERR PREFIX "unable to create midi.\n");
+ return ret;
+ }
+ rt->instance->private_data = rt;
+ strcpy(rt->instance->name, "DMX6FireUSB MIDI");
+ rt->instance->info_flags = SNDRV_RAWMIDI_INFO_OUTPUT |
+ SNDRV_RAWMIDI_INFO_INPUT |
+ SNDRV_RAWMIDI_INFO_DUPLEX;
+ snd_rawmidi_set_ops(rt->instance, SNDRV_RAWMIDI_STREAM_OUTPUT,
+ &out_ops);
+ snd_rawmidi_set_ops(rt->instance, SNDRV_RAWMIDI_STREAM_INPUT,
+ &in_ops);
+
+ chip->midi = rt;
+ return 0;
+}
+
+void usb6fire_midi_abort(struct sfire_chip *chip)
+{
+ struct midi_runtime *rt = chip->midi;
+
+ if (rt)
+ usb_poison_urb(&rt->out_urb);
+}
+
+void usb6fire_midi_destroy(struct sfire_chip *chip)
+{
+ kfree(chip->midi);
+ chip->midi = NULL;
+}
diff --git a/sound/usb/6fire/midi.h b/sound/usb/6fire/midi.h
new file mode 100644
index 000000000000..97a7bf669135
--- /dev/null
+++ b/sound/usb/6fire/midi.h
@@ -0,0 +1,46 @@
+/*
+ * Linux driver for TerraTec DMX 6Fire USB
+ *
+ * Author: Torsten Schenk <torsten.schenk@zoho.com>
+ * Created: Jan 01, 2011
+ * Version: 0.3.0
+ * Copyright: (C) Torsten Schenk
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef USB6FIRE_MIDI_H
+#define USB6FIRE_MIDI_H
+
+#include "common.h"
+
+enum {
+ MIDI_BUFSIZE = 64
+};
+
+struct midi_runtime {
+ struct sfire_chip *chip;
+ struct snd_rawmidi *instance;
+
+ struct snd_rawmidi_substream *in;
+ char in_active;
+
+ spinlock_t in_lock;
+ spinlock_t out_lock;
+ struct snd_rawmidi_substream *out;
+ struct urb out_urb;
+ u8 out_serial; /* serial number of out packet */
+ u8 out_buffer[MIDI_BUFSIZE];
+ int buffer_offset;
+
+ void (*in_received)(struct midi_runtime *rt, u8 *data, int length);
+};
+
+int __devinit usb6fire_midi_init(struct sfire_chip *chip);
+void usb6fire_midi_abort(struct sfire_chip *chip);
+void usb6fire_midi_destroy(struct sfire_chip *chip);
+#endif /* USB6FIRE_MIDI_H */
+
diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c
new file mode 100644
index 000000000000..ba62c7468ba8
--- /dev/null
+++ b/sound/usb/6fire/pcm.c
@@ -0,0 +1,688 @@
+/*
+ * Linux driver for TerraTec DMX 6Fire USB
+ *
+ * PCM driver
+ *
+ * Author: Torsten Schenk <torsten.schenk@zoho.com>
+ * Created: Jan 01, 2011
+ * Version: 0.3.0
+ * Copyright: (C) Torsten Schenk
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "pcm.h"
+#include "chip.h"
+#include "comm.h"
+
+enum {
+ OUT_N_CHANNELS = 6, IN_N_CHANNELS = 4
+};
+
+/* keep next two synced with
+ * FW_EP_W_MAX_PACKET_SIZE[] and RATES_MAX_PACKET_SIZE */
+static const int rates_in_packet_size[] = { 228, 228, 420, 420, 404, 404 };
+static const int rates_out_packet_size[] = { 228, 228, 420, 420, 604, 604 };
+static const int rates[] = { 44100, 48000, 88200, 96000, 176400, 192000 };
+static const int rates_altsetting[] = { 1, 1, 2, 2, 3, 3 };
+static const int rates_alsaid[] = {
+ SNDRV_PCM_RATE_44100, SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_88200, SNDRV_PCM_RATE_96000,
+ SNDRV_PCM_RATE_176400, SNDRV_PCM_RATE_192000 };
+
+/* values to write to soundcard register for all samplerates */
+static const u16 rates_6fire_vl[] = {0x00, 0x01, 0x00, 0x01, 0x00, 0x01};
+static const u16 rates_6fire_vh[] = {0x11, 0x11, 0x10, 0x10, 0x00, 0x00};
+
+enum { /* settings for pcm */
+ OUT_EP = 6, IN_EP = 2, MAX_BUFSIZE = 128 * 1024
+};
+
+enum { /* pcm streaming states */
+ STREAM_DISABLED, /* no pcm streaming */
+ STREAM_STARTING, /* pcm streaming requested, waiting to become ready */
+ STREAM_RUNNING, /* pcm streaming running */
+ STREAM_STOPPING
+};
+
+enum { /* pcm sample rates (also index into RATES_XXX[]) */
+ RATE_44KHZ,
+ RATE_48KHZ,
+ RATE_88KHZ,
+ RATE_96KHZ,
+ RATE_176KHZ,
+ RATE_192KHZ
+};
+
+static const struct snd_pcm_hardware pcm_hw = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_BATCH,
+
+ .formats = SNDRV_PCM_FMTBIT_S24_LE,
+
+ .rates = SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_88200 |
+ SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_176400 |
+ SNDRV_PCM_RATE_192000,
+
+ .rate_min = 44100,
+ .rate_max = 192000,
+ .channels_min = 1,
+ .channels_max = 0, /* set in pcm_open, depending on capture/playback */
+ .buffer_bytes_max = MAX_BUFSIZE,
+ .period_bytes_min = PCM_N_PACKETS_PER_URB * (PCM_MAX_PACKET_SIZE - 4),
+ .period_bytes_max = MAX_BUFSIZE,
+ .periods_min = 2,
+ .periods_max = 1024
+};
+
+static int usb6fire_pcm_set_rate(struct pcm_runtime *rt)
+{
+ int ret;
+ struct usb_device *device = rt->chip->dev;
+ struct comm_runtime *comm_rt = rt->chip->comm;
+
+ if (rt->rate >= ARRAY_SIZE(rates))
+ return -EINVAL;
+ /* disable streaming */
+ ret = comm_rt->write16(comm_rt, 0x02, 0x00, 0x00, 0x00);
+ if (ret < 0) {
+ snd_printk(KERN_ERR PREFIX "error stopping streaming while "
+ "setting samplerate %d.\n", rates[rt->rate]);
+ return ret;
+ }
+
+ ret = usb_set_interface(device, 1, rates_altsetting[rt->rate]);
+ if (ret < 0) {
+ snd_printk(KERN_ERR PREFIX "error setting interface "
+ "altsetting %d for samplerate %d.\n",
+ rates_altsetting[rt->rate], rates[rt->rate]);
+ return ret;
+ }
+
+ /* set soundcard clock */
+ ret = comm_rt->write16(comm_rt, 0x02, 0x01, rates_6fire_vl[rt->rate],
+ rates_6fire_vh[rt->rate]);
+ if (ret < 0) {
+ snd_printk(KERN_ERR PREFIX "error setting samplerate %d.\n",
+ rates[rt->rate]);
+ return ret;
+ }
+
+ /* enable analog inputs and outputs
+ * (one bit per stereo-channel) */
+ ret = comm_rt->write16(comm_rt, 0x02, 0x02,
+ (1 << (OUT_N_CHANNELS / 2)) - 1,
+ (1 << (IN_N_CHANNELS / 2)) - 1);
+ if (ret < 0) {
+ snd_printk(KERN_ERR PREFIX "error initializing analog channels "
+ "while setting samplerate %d.\n",
+ rates[rt->rate]);
+ return ret;
+ }
+ /* disable digital inputs and outputs */
+ ret = comm_rt->write16(comm_rt, 0x02, 0x03, 0x00, 0x00);
+ if (ret < 0) {
+ snd_printk(KERN_ERR PREFIX "error initializing digital "
+ "channels while setting samplerate %d.\n",
+ rates[rt->rate]);
+ return ret;
+ }
+
+ ret = comm_rt->write16(comm_rt, 0x02, 0x00, 0x00, 0x01);
+ if (ret < 0) {
+ snd_printk(KERN_ERR PREFIX "error starting streaming while "
+ "setting samplerate %d.\n", rates[rt->rate]);
+ return ret;
+ }
+
+ rt->in_n_analog = IN_N_CHANNELS;
+ rt->out_n_analog = OUT_N_CHANNELS;
+ rt->in_packet_size = rates_in_packet_size[rt->rate];
+ rt->out_packet_size = rates_out_packet_size[rt->rate];
+ return 0;
+}
+
+static struct pcm_substream *usb6fire_pcm_get_substream(
+ struct snd_pcm_substream *alsa_sub)
+{
+ struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub);
+
+ if (alsa_sub->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ return &rt->playback;
+ else if (alsa_sub->stream == SNDRV_PCM_STREAM_CAPTURE)
+ return &rt->capture;
+ snd_printk(KERN_ERR PREFIX "error getting pcm substream slot.\n");
+ return NULL;
+}
+
+/* call with stream_mutex locked */
+static void usb6fire_pcm_stream_stop(struct pcm_runtime *rt)
+{
+ int i;
+
+ if (rt->stream_state != STREAM_DISABLED) {
+ for (i = 0; i < PCM_N_URBS; i++) {
+ usb_kill_urb(&rt->in_urbs[i].instance);
+ usb_kill_urb(&rt->out_urbs[i].instance);
+ }
+ rt->stream_state = STREAM_DISABLED;
+ }
+}
+
+/* call with stream_mutex locked */
+static int usb6fire_pcm_stream_start(struct pcm_runtime *rt)
+{
+ int ret;
+ int i;
+ int k;
+ struct usb_iso_packet_descriptor *packet;
+
+ if (rt->stream_state == STREAM_DISABLED) {
+ /* submit our in urbs */
+ rt->stream_wait_cond = false;
+ rt->stream_state = STREAM_STARTING;
+ for (i = 0; i < PCM_N_URBS; i++) {
+ for (k = 0; k < PCM_N_PACKETS_PER_URB; k++) {
+ packet = &rt->in_urbs[i].packets[k];
+ packet->offset = k * rt->in_packet_size;
+ packet->length = rt->in_packet_size;
+ packet->actual_length = 0;
+ packet->status = 0;
+ }
+ ret = usb_submit_urb(&rt->in_urbs[i].instance,
+ GFP_ATOMIC);
+ if (ret) {
+ usb6fire_pcm_stream_stop(rt);
+ return ret;
+ }
+ }
+
+ /* wait for first out urb to return (sent in in urb handler) */
+ wait_event_timeout(rt->stream_wait_queue, rt->stream_wait_cond,
+ HZ);
+ if (rt->stream_wait_cond)
+ rt->stream_state = STREAM_RUNNING;
+ else {
+ usb6fire_pcm_stream_stop(rt);
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+/* call with substream locked */
+static void usb6fire_pcm_capture(struct pcm_substream *sub, struct pcm_urb *urb)
+{
+ int i;
+ int frame;
+ int frame_count;
+ unsigned int total_length = 0;
+ struct pcm_runtime *rt = snd_pcm_substream_chip(sub->instance);
+ struct snd_pcm_runtime *alsa_rt = sub->instance->runtime;
+ u32 *src = (u32 *) urb->buffer;
+ u32 *dest = (u32 *) (alsa_rt->dma_area + sub->dma_off
+ * (alsa_rt->frame_bits >> 3));
+ u32 *dest_end = (u32 *) (alsa_rt->dma_area + alsa_rt->buffer_size
+ * (alsa_rt->frame_bits >> 3));
+ int bytes_per_frame = alsa_rt->channels << 2;
+
+ for (i = 0; i < PCM_N_PACKETS_PER_URB; i++) {
+ /* at least 4 header bytes for valid packet.
+ * after that: 32 bits per sample for analog channels */
+ if (urb->packets[i].actual_length > 4)
+ frame_count = (urb->packets[i].actual_length - 4)
+ / (rt->in_n_analog << 2);
+ else
+ frame_count = 0;
+
+ src = (u32 *) (urb->buffer + total_length);
+ src++; /* skip leading 4 bytes of every packet */
+ total_length += urb->packets[i].length;
+ for (frame = 0; frame < frame_count; frame++) {
+ memcpy(dest, src, bytes_per_frame);
+ dest += alsa_rt->channels;
+ src += rt->in_n_analog;
+ sub->dma_off++;
+ sub->period_off++;
+ if (dest == dest_end) {
+ sub->dma_off = 0;
+ dest = (u32 *) alsa_rt->dma_area;
+ }
+ }
+ }
+}
+
+/* call with substream locked */
+static void usb6fire_pcm_playback(struct pcm_substream *sub,
+ struct pcm_urb *urb)
+{
+ int i;
+ int frame;
+ int frame_count;
+ struct pcm_runtime *rt = snd_pcm_substream_chip(sub->instance);
+ struct snd_pcm_runtime *alsa_rt = sub->instance->runtime;
+ u32 *src = (u32 *) (alsa_rt->dma_area + sub->dma_off
+ * (alsa_rt->frame_bits >> 3));
+ u32 *src_end = (u32 *) (alsa_rt->dma_area + alsa_rt->buffer_size
+ * (alsa_rt->frame_bits >> 3));
+ u32 *dest = (u32 *) urb->buffer;
+ int bytes_per_frame = alsa_rt->channels << 2;
+
+ for (i = 0; i < PCM_N_PACKETS_PER_URB; i++) {
+ /* at least 4 header bytes for valid packet.
+ * after that: 32 bits per sample for analog channels */
+ if (urb->packets[i].length > 4)
+ frame_count = (urb->packets[i].length - 4)
+ / (rt->out_n_analog << 2);
+ else
+ frame_count = 0;
+ dest++; /* skip leading 4 bytes of every frame */
+ for (frame = 0; frame < frame_count; frame++) {
+ memcpy(dest, src, bytes_per_frame);
+ src += alsa_rt->channels;
+ dest += rt->out_n_analog;
+ sub->dma_off++;
+ sub->period_off++;
+ if (src == src_end) {
+ src = (u32 *) alsa_rt->dma_area;
+ sub->dma_off = 0;
+ }
+ }
+ }
+}
+
+static void usb6fire_pcm_in_urb_handler(struct urb *usb_urb)
+{
+ struct pcm_urb *in_urb = usb_urb->context;
+ struct pcm_urb *out_urb = in_urb->peer;
+ struct pcm_runtime *rt = in_urb->chip->pcm;
+ struct pcm_substream *sub;
+ unsigned long flags;
+ int total_length = 0;
+ int frame_count;
+ int frame;
+ int channel;
+ int i;
+ u8 *dest;
+
+ if (usb_urb->status || rt->panic || rt->stream_state == STREAM_STOPPING)
+ return;
+ for (i = 0; i < PCM_N_PACKETS_PER_URB; i++)
+ if (in_urb->packets[i].status) {
+ rt->panic = true;
+ return;
+ }
+
+ if (rt->stream_state == STREAM_DISABLED) {
+ snd_printk(KERN_ERR PREFIX "internal error: "
+ "stream disabled in in-urb handler.\n");
+ return;
+ }
+
+ /* receive our capture data */
+ sub = &rt->capture;
+ spin_lock_irqsave(&sub->lock, flags);
+ if (sub->active) {
+ usb6fire_pcm_capture(sub, in_urb);
+ if (sub->period_off >= sub->instance->runtime->period_size) {
+ sub->period_off %= sub->instance->runtime->period_size;
+ spin_unlock_irqrestore(&sub->lock, flags);
+ snd_pcm_period_elapsed(sub->instance);
+ } else
+ spin_unlock_irqrestore(&sub->lock, flags);
+ } else
+ spin_unlock_irqrestore(&sub->lock, flags);
+
+ /* setup out urb structure */
+ for (i = 0; i < PCM_N_PACKETS_PER_URB; i++) {
+ out_urb->packets[i].offset = total_length;
+ out_urb->packets[i].length = (in_urb->packets[i].actual_length
+ - 4) / (rt->in_n_analog << 2)
+ * (rt->out_n_analog << 2) + 4;
+ out_urb->packets[i].status = 0;
+ total_length += out_urb->packets[i].length;
+ }
+ memset(out_urb->buffer, 0, total_length);
+
+ /* now send our playback data (if a free out urb was found) */
+ sub = &rt->playback;
+ spin_lock_irqsave(&sub->lock, flags);
+ if (sub->active) {
+ usb6fire_pcm_playback(sub, out_urb);
+ if (sub->period_off >= sub->instance->runtime->period_size) {
+ sub->period_off %= sub->instance->runtime->period_size;
+ spin_unlock_irqrestore(&sub->lock, flags);
+ snd_pcm_period_elapsed(sub->instance);
+ } else
+ spin_unlock_irqrestore(&sub->lock, flags);
+ } else
+ spin_unlock_irqrestore(&sub->lock, flags);
+
+ /* setup the 4th byte of each sample (0x40 for analog channels) */
+ dest = out_urb->buffer;
+ for (i = 0; i < PCM_N_PACKETS_PER_URB; i++)
+ if (out_urb->packets[i].length >= 4) {
+ frame_count = (out_urb->packets[i].length - 4)
+ / (rt->out_n_analog << 2);
+ *(dest++) = 0xaa;
+ *(dest++) = 0xaa;
+ *(dest++) = frame_count;
+ *(dest++) = 0x00;
+ for (frame = 0; frame < frame_count; frame++)
+ for (channel = 0;
+ channel < rt->out_n_analog;
+ channel++) {
+ dest += 3; /* skip sample data */
+ *(dest++) = 0x40;
+ }
+ }
+ usb_submit_urb(&out_urb->instance, GFP_ATOMIC);
+ usb_submit_urb(&in_urb->instance, GFP_ATOMIC);
+}
+
+static void usb6fire_pcm_out_urb_handler(struct urb *usb_urb)
+{
+ struct pcm_urb *urb = usb_urb->context;
+ struct pcm_runtime *rt = urb->chip->pcm;
+
+ if (rt->stream_state == STREAM_STARTING) {
+ rt->stream_wait_cond = true;
+ wake_up(&rt->stream_wait_queue);
+ }
+}
+
+static int usb6fire_pcm_open(struct snd_pcm_substream *alsa_sub)
+{
+ struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub);
+ struct pcm_substream *sub = NULL;
+ struct snd_pcm_runtime *alsa_rt = alsa_sub->runtime;
+
+ if (rt->panic)
+ return -EPIPE;
+
+ mutex_lock(&rt->stream_mutex);
+ alsa_rt->hw = pcm_hw;
+
+ if (alsa_sub->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ if (rt->rate >= 0)
+ alsa_rt->hw.rates = rates_alsaid[rt->rate];
+ alsa_rt->hw.channels_max = OUT_N_CHANNELS;
+ sub = &rt->playback;
+ } else if (alsa_sub->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ if (rt->rate >= 0)
+ alsa_rt->hw.rates = rates_alsaid[rt->rate];
+ alsa_rt->hw.channels_max = IN_N_CHANNELS;
+ sub = &rt->capture;
+ }
+
+ if (!sub) {
+ mutex_unlock(&rt->stream_mutex);
+ snd_printk(KERN_ERR PREFIX "invalid stream type.\n");
+ return -EINVAL;
+ }
+
+ sub->instance = alsa_sub;
+ sub->active = false;
+ mutex_unlock(&rt->stream_mutex);
+ return 0;
+}
+
+static int usb6fire_pcm_close(struct snd_pcm_substream *alsa_sub)
+{
+ struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub);
+ struct pcm_substream *sub = usb6fire_pcm_get_substream(alsa_sub);
+ unsigned long flags;
+
+ if (rt->panic)
+ return 0;
+
+ mutex_lock(&rt->stream_mutex);
+ if (sub) {
+ /* deactivate substream */
+ spin_lock_irqsave(&sub->lock, flags);
+ sub->instance = NULL;
+ sub->active = false;
+ spin_unlock_irqrestore(&sub->lock, flags);
+
+ /* all substreams closed? if so, stop streaming */
+ if (!rt->playback.instance && !rt->capture.instance) {
+ usb6fire_pcm_stream_stop(rt);
+ rt->rate = -1;
+ }
+ }
+ mutex_unlock(&rt->stream_mutex);
+ return 0;
+}
+
+static int usb6fire_pcm_hw_params(struct snd_pcm_substream *alsa_sub,
+ struct snd_pcm_hw_params *hw_params)
+{
+ return snd_pcm_lib_malloc_pages(alsa_sub,
+ params_buffer_bytes(hw_params));
+}
+
+static int usb6fire_pcm_hw_free(struct snd_pcm_substream *alsa_sub)
+{
+ return snd_pcm_lib_free_pages(alsa_sub);
+}
+
+static int usb6fire_pcm_prepare(struct snd_pcm_substream *alsa_sub)
+{
+ struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub);
+ struct pcm_substream *sub = usb6fire_pcm_get_substream(alsa_sub);
+ struct snd_pcm_runtime *alsa_rt = alsa_sub->runtime;
+ int i;
+ int ret;
+
+ if (rt->panic)
+ return -EPIPE;
+ if (!sub)
+ return -ENODEV;
+
+ mutex_lock(&rt->stream_mutex);
+ sub->dma_off = 0;
+ sub->period_off = 0;
+
+ if (rt->stream_state == STREAM_DISABLED) {
+ for (i = 0; i < ARRAY_SIZE(rates); i++)
+ if (alsa_rt->rate == rates[i]) {
+ rt->rate = i;
+ break;
+ }
+ if (i == ARRAY_SIZE(rates)) {
+ mutex_unlock(&rt->stream_mutex);
+ snd_printk("invalid rate %d in prepare.\n",
+ alsa_rt->rate);
+ return -EINVAL;
+ }
+
+ ret = usb6fire_pcm_set_rate(rt);
+ if (ret) {
+ mutex_unlock(&rt->stream_mutex);
+ return ret;
+ }
+ ret = usb6fire_pcm_stream_start(rt);
+ if (ret) {
+ mutex_unlock(&rt->stream_mutex);
+ snd_printk(KERN_ERR PREFIX
+ "could not start pcm stream.\n");
+ return ret;
+ }
+ }
+ mutex_unlock(&rt->stream_mutex);
+ return 0;
+}
+
+static int usb6fire_pcm_trigger(struct snd_pcm_substream *alsa_sub, int cmd)
+{
+ struct pcm_substream *sub = usb6fire_pcm_get_substream(alsa_sub);
+ struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub);
+ unsigned long flags;
+
+ if (rt->panic)
+ return -EPIPE;
+ if (!sub)
+ return -ENODEV;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ spin_lock_irqsave(&sub->lock, flags);
+ sub->active = true;
+ spin_unlock_irqrestore(&sub->lock, flags);
+ return 0;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ spin_lock_irqsave(&sub->lock, flags);
+ sub->active = false;
+ spin_unlock_irqrestore(&sub->lock, flags);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static snd_pcm_uframes_t usb6fire_pcm_pointer(
+ struct snd_pcm_substream *alsa_sub)
+{
+ struct pcm_substream *sub = usb6fire_pcm_get_substream(alsa_sub);
+ struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub);
+ unsigned long flags;
+ snd_pcm_uframes_t ret;
+
+ if (rt->panic || !sub)
+ return SNDRV_PCM_STATE_XRUN;
+
+ spin_lock_irqsave(&sub->lock, flags);
+ ret = sub->dma_off;
+ spin_unlock_irqrestore(&sub->lock, flags);
+ return ret;
+}
+
+static struct snd_pcm_ops pcm_ops = {
+ .open = usb6fire_pcm_open,
+ .close = usb6fire_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = usb6fire_pcm_hw_params,
+ .hw_free = usb6fire_pcm_hw_free,
+ .prepare = usb6fire_pcm_prepare,
+ .trigger = usb6fire_pcm_trigger,
+ .pointer = usb6fire_pcm_pointer,
+};
+
+static void __devinit usb6fire_pcm_init_urb(struct pcm_urb *urb,
+ struct sfire_chip *chip, bool in, int ep,
+ void (*handler)(struct urb *))
+{
+ urb->chip = chip;
+ usb_init_urb(&urb->instance);
+ urb->instance.transfer_buffer = urb->buffer;
+ urb->instance.transfer_buffer_length =
+ PCM_N_PACKETS_PER_URB * PCM_MAX_PACKET_SIZE;
+ urb->instance.dev = chip->dev;
+ urb->instance.pipe = in ? usb_rcvisocpipe(chip->dev, ep)
+ : usb_sndisocpipe(chip->dev, ep);
+ urb->instance.interval = 1;
+ urb->instance.transfer_flags = URB_ISO_ASAP;
+ urb->instance.complete = handler;
+ urb->instance.context = urb;
+ urb->instance.number_of_packets = PCM_N_PACKETS_PER_URB;
+}
+
+int __devinit usb6fire_pcm_init(struct sfire_chip *chip)
+{
+ int i;
+ int ret;
+ struct snd_pcm *pcm;
+ struct pcm_runtime *rt =
+ kzalloc(sizeof(struct pcm_runtime), GFP_KERNEL);
+
+ if (!rt)
+ return -ENOMEM;
+
+ rt->chip = chip;
+ rt->stream_state = STREAM_DISABLED;
+ rt->rate = -1;
+ init_waitqueue_head(&rt->stream_wait_queue);
+ mutex_init(&rt->stream_mutex);
+
+ spin_lock_init(&rt->playback.lock);
+ spin_lock_init(&rt->capture.lock);
+
+ for (i = 0; i < PCM_N_URBS; i++) {
+ usb6fire_pcm_init_urb(&rt->in_urbs[i], chip, true, IN_EP,
+ usb6fire_pcm_in_urb_handler);
+ usb6fire_pcm_init_urb(&rt->out_urbs[i], chip, false, OUT_EP,
+ usb6fire_pcm_out_urb_handler);
+
+ rt->in_urbs[i].peer = &rt->out_urbs[i];
+ rt->out_urbs[i].peer = &rt->in_urbs[i];
+ }
+
+ ret = snd_pcm_new(chip->card, "DMX6FireUSB", 0, 1, 1, &pcm);
+ if (ret < 0) {
+ kfree(rt);
+ snd_printk(KERN_ERR PREFIX "cannot create pcm instance.\n");
+ return ret;
+ }
+
+ pcm->private_data = rt;
+ strcpy(pcm->name, "DMX 6Fire USB");
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &pcm_ops);
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &pcm_ops);
+
+ ret = snd_pcm_lib_preallocate_pages_for_all(pcm,
+ SNDRV_DMA_TYPE_CONTINUOUS,
+ snd_dma_continuous_data(GFP_KERNEL),
+ MAX_BUFSIZE, MAX_BUFSIZE);
+ if (ret) {
+ kfree(rt);
+ snd_printk(KERN_ERR PREFIX
+ "error preallocating pcm buffers.\n");
+ return ret;
+ }
+ rt->instance = pcm;
+
+ chip->pcm = rt;
+ return 0;
+}
+
+void usb6fire_pcm_abort(struct sfire_chip *chip)
+{
+ struct pcm_runtime *rt = chip->pcm;
+ int i;
+
+ if (rt) {
+ rt->panic = true;
+
+ if (rt->playback.instance)
+ snd_pcm_stop(rt->playback.instance,
+ SNDRV_PCM_STATE_XRUN);
+ if (rt->capture.instance)
+ snd_pcm_stop(rt->capture.instance,
+ SNDRV_PCM_STATE_XRUN);
+
+ for (i = 0; i < PCM_N_URBS; i++) {
+ usb_poison_urb(&rt->in_urbs[i].instance);
+ usb_poison_urb(&rt->out_urbs[i].instance);
+ }
+
+ }
+}
+
+void usb6fire_pcm_destroy(struct sfire_chip *chip)
+{
+ kfree(chip->pcm);
+ chip->pcm = NULL;
+}
diff --git a/sound/usb/6fire/pcm.h b/sound/usb/6fire/pcm.h
new file mode 100644
index 000000000000..2bee81374002
--- /dev/null
+++ b/sound/usb/6fire/pcm.h
@@ -0,0 +1,76 @@
+/*
+ * Linux driver for TerraTec DMX 6Fire USB
+ *
+ * Author: Torsten Schenk <torsten.schenk@zoho.com>
+ * Created: Jan 01, 2011
+ * Version: 0.3.0
+ * Copyright: (C) Torsten Schenk
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef USB6FIRE_PCM_H
+#define USB6FIRE_PCM_H
+
+#include <sound/pcm.h>
+#include <linux/mutex.h>
+
+#include "common.h"
+
+enum /* settings for pcm */
+{
+ /* maximum of EP_W_MAX_PACKET_SIZE[] (see firmware.c) */
+ PCM_N_URBS = 16, PCM_N_PACKETS_PER_URB = 8, PCM_MAX_PACKET_SIZE = 604
+};
+
+struct pcm_urb {
+ struct sfire_chip *chip;
+
+ /* BEGIN DO NOT SEPARATE */
+ struct urb instance;
+ struct usb_iso_packet_descriptor packets[PCM_N_PACKETS_PER_URB];
+ /* END DO NOT SEPARATE */
+ u8 buffer[PCM_N_PACKETS_PER_URB * PCM_MAX_PACKET_SIZE];
+
+ struct pcm_urb *peer;
+};
+
+struct pcm_substream {
+ spinlock_t lock;
+ struct snd_pcm_substream *instance;
+
+ bool active;
+
+ snd_pcm_uframes_t dma_off; /* current position in alsa dma_area */
+ snd_pcm_uframes_t period_off; /* current position in current period */
+};
+
+struct pcm_runtime {
+ struct sfire_chip *chip;
+ struct snd_pcm *instance;
+
+ struct pcm_substream playback;
+ struct pcm_substream capture;
+ bool panic; /* if set driver won't do anymore pcm on device */
+
+ struct pcm_urb in_urbs[PCM_N_URBS];
+ struct pcm_urb out_urbs[PCM_N_URBS];
+ int in_packet_size;
+ int out_packet_size;
+ int in_n_analog; /* number of analog channels soundcard sends */
+ int out_n_analog; /* number of analog channels soundcard receives */
+
+ struct mutex stream_mutex;
+ u8 stream_state; /* one of STREAM_XXX (pcm.c) */
+ u8 rate; /* one of PCM_RATE_XXX */
+ wait_queue_head_t stream_wait_queue;
+ bool stream_wait_cond;
+};
+
+int __devinit usb6fire_pcm_init(struct sfire_chip *chip);
+void usb6fire_pcm_abort(struct sfire_chip *chip);
+void usb6fire_pcm_destroy(struct sfire_chip *chip);
+#endif /* USB6FIRE_PCM_H */
diff --git a/sound/usb/Kconfig b/sound/usb/Kconfig
index 112984f4080f..97724d8fa9f6 100644
--- a/sound/usb/Kconfig
+++ b/sound/usb/Kconfig
@@ -62,6 +62,7 @@ config SND_USB_CAIAQ
* Native Instruments Audio 2 DJ
* Native Instruments Audio 4 DJ
* Native Instruments Audio 8 DJ
+ * Native Instruments Traktor Audio 2
* Native Instruments Guitar Rig Session I/O
* Native Instruments Guitar Rig mobile
* Native Instruments Traktor Kontrol X1
@@ -97,5 +98,21 @@ config SND_USB_US122L
To compile this driver as a module, choose M here: the module
will be called snd-usb-us122l.
+config SND_USB_6FIRE
+ tristate "TerraTec DMX 6Fire USB"
+ depends on EXPERIMENTAL
+ select FW_LOADER
+ select SND_RAWMIDI
+ select SND_PCM
+ help
+ Say Y here to include support for TerraTec 6fire DMX USB interface.
+
+ You will need firmware files in order to be able to use the device
+ after it has been coldstarted. This driver currently does not support
+ firmware loading for all devices. If you own such a device,
+ you could start windows and let the windows driver upload
+ the firmware. As long as you do not unplug your device from power,
+ it should be usable.
+
endif # SND_USB
diff --git a/sound/usb/Makefile b/sound/usb/Makefile
index 1e362bf8834f..cf9ed66445fa 100644
--- a/sound/usb/Makefile
+++ b/sound/usb/Makefile
@@ -23,4 +23,4 @@ obj-$(CONFIG_SND_USB_UA101) += snd-usbmidi-lib.o
obj-$(CONFIG_SND_USB_USX2Y) += snd-usbmidi-lib.o
obj-$(CONFIG_SND_USB_US122L) += snd-usbmidi-lib.o
-obj-$(CONFIG_SND) += misc/ usx2y/ caiaq/
+obj-$(CONFIG_SND) += misc/ usx2y/ caiaq/ 6fire/
diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c
index 66eabafb1c24..d0d493ca28ae 100644
--- a/sound/usb/caiaq/audio.c
+++ b/sound/usb/caiaq/audio.c
@@ -805,6 +805,7 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *dev)
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO2DJ):
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO4DJ):
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO8DJ):
+ case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORAUDIO2):
dev->samplerates |= SNDRV_PCM_RATE_88200;
break;
}
diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c
index 6480c3283c05..45bc4a2dc6f0 100644
--- a/sound/usb/caiaq/device.c
+++ b/sound/usb/caiaq/device.c
@@ -46,6 +46,7 @@ MODULE_SUPPORTED_DEVICE("{{Native Instruments, RigKontrol2},"
"{Native Instruments, Audio 2 DJ},"
"{Native Instruments, Audio 4 DJ},"
"{Native Instruments, Audio 8 DJ},"
+ "{Native Instruments, Traktor Audio 2},"
"{Native Instruments, Session I/O},"
"{Native Instruments, GuitarRig mobile}"
"{Native Instruments, Traktor Kontrol X1}"
@@ -140,6 +141,11 @@ static struct usb_device_id snd_usb_id_table[] = {
.idVendor = USB_VID_NATIVEINSTRUMENTS,
.idProduct = USB_PID_TRAKTORKONTROLS4
},
+ {
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE,
+ .idVendor = USB_VID_NATIVEINSTRUMENTS,
+ .idProduct = USB_PID_TRAKTORAUDIO2
+ },
{ /* terminator */ }
};
diff --git a/sound/usb/caiaq/device.h b/sound/usb/caiaq/device.h
index e3d8a3efb35b..b2b310194ffa 100644
--- a/sound/usb/caiaq/device.h
+++ b/sound/usb/caiaq/device.h
@@ -17,6 +17,7 @@
#define USB_PID_GUITARRIGMOBILE 0x0d8d
#define USB_PID_TRAKTORKONTROLX1 0x2305
#define USB_PID_TRAKTORKONTROLS4 0xbaff
+#define USB_PID_TRAKTORAUDIO2 0x041d
#define EP1_BUFSIZE 64
#define EP4_BUFSIZE 512
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 800f7cb4f251..c0f8270bc199 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -323,6 +323,7 @@ static int snd_usb_audio_create(struct usb_device *dev, int idx,
return -ENOMEM;
}
+ mutex_init(&chip->shutdown_mutex);
chip->index = idx;
chip->dev = dev;
chip->card = card;
@@ -531,6 +532,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, void *ptr)
chip = ptr;
card = chip->card;
mutex_lock(&register_mutex);
+ mutex_lock(&chip->shutdown_mutex);
chip->shutdown = 1;
chip->num_interfaces--;
if (chip->num_interfaces <= 0) {
@@ -548,9 +550,11 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, void *ptr)
snd_usb_mixer_disconnect(p);
}
usb_chip[chip->index] = NULL;
+ mutex_unlock(&chip->shutdown_mutex);
mutex_unlock(&register_mutex);
snd_card_free_when_closed(card);
} else {
+ mutex_unlock(&chip->shutdown_mutex);
mutex_unlock(&register_mutex);
}
}
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 782f741cd00a..73dcc8256bc0 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -346,6 +346,141 @@ static int snd_xonar_u1_controls_create(struct usb_mixer_interface *mixer)
return 0;
}
+/* Native Instruments device quirks */
+
+#define _MAKE_NI_CONTROL(bRequest,wIndex) ((bRequest) << 16 | (wIndex))
+
+static int snd_nativeinstruments_control_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_interface *mixer = snd_kcontrol_chip(kcontrol);
+ struct usb_device *dev = mixer->chip->dev;
+ u8 bRequest = (kcontrol->private_value >> 16) & 0xff;
+ u16 wIndex = kcontrol->private_value & 0xffff;
+ u8 tmp;
+
+ int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
+ 0, cpu_to_le16(wIndex),
+ &tmp, sizeof(tmp), 1000);
+
+ if (ret < 0) {
+ snd_printk(KERN_ERR
+ "unable to issue vendor read request (ret = %d)", ret);
+ return ret;
+ }
+
+ ucontrol->value.integer.value[0] = tmp;
+
+ return 0;
+}
+
+static int snd_nativeinstruments_control_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_interface *mixer = snd_kcontrol_chip(kcontrol);
+ struct usb_device *dev = mixer->chip->dev;
+ u8 bRequest = (kcontrol->private_value >> 16) & 0xff;
+ u16 wIndex = kcontrol->private_value & 0xffff;
+ u16 wValue = ucontrol->value.integer.value[0];
+
+ int ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), bRequest,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
+ cpu_to_le16(wValue), cpu_to_le16(wIndex),
+ NULL, 0, 1000);
+
+ if (ret < 0) {
+ snd_printk(KERN_ERR
+ "unable to issue vendor write request (ret = %d)", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct snd_kcontrol_new snd_nativeinstruments_ta6_mixers[] = {
+ {
+ .name = "Direct Thru Channel A",
+ .private_value = _MAKE_NI_CONTROL(0x01, 0x03),
+ },
+ {
+ .name = "Direct Thru Channel B",
+ .private_value = _MAKE_NI_CONTROL(0x01, 0x05),
+ },
+ {
+ .name = "Phono Input Channel A",
+ .private_value = _MAKE_NI_CONTROL(0x02, 0x03),
+ },
+ {
+ .name = "Phono Input Channel B",
+ .private_value = _MAKE_NI_CONTROL(0x02, 0x05),
+ },
+};
+
+static struct snd_kcontrol_new snd_nativeinstruments_ta10_mixers[] = {
+ {
+ .name = "Direct Thru Channel A",
+ .private_value = _MAKE_NI_CONTROL(0x01, 0x03),
+ },
+ {
+ .name = "Direct Thru Channel B",
+ .private_value = _MAKE_NI_CONTROL(0x01, 0x05),
+ },
+ {
+ .name = "Direct Thru Channel C",
+ .private_value = _MAKE_NI_CONTROL(0x01, 0x07),
+ },
+ {
+ .name = "Direct Thru Channel D",
+ .private_value = _MAKE_NI_CONTROL(0x01, 0x09),
+ },
+ {
+ .name = "Phono Input Channel A",
+ .private_value = _MAKE_NI_CONTROL(0x02, 0x03),
+ },
+ {
+ .name = "Phono Input Channel B",
+ .private_value = _MAKE_NI_CONTROL(0x02, 0x05),
+ },
+ {
+ .name = "Phono Input Channel C",
+ .private_value = _MAKE_NI_CONTROL(0x02, 0x07),
+ },
+ {
+ .name = "Phono Input Channel D",
+ .private_value = _MAKE_NI_CONTROL(0x02, 0x09),
+ },
+};
+
+static int snd_nativeinstruments_create_mixer(struct usb_mixer_interface *mixer,
+ const struct snd_kcontrol_new *kc,
+ unsigned int count)
+{
+ int i, err = 0;
+ struct snd_kcontrol_new template = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .get = snd_nativeinstruments_control_get,
+ .put = snd_nativeinstruments_control_put,
+ .info = snd_ctl_boolean_mono_info,
+ };
+
+ for (i = 0; i < count; i++) {
+ struct snd_kcontrol *c;
+
+ template.name = kc[i].name;
+ template.private_value = kc[i].private_value;
+
+ c = snd_ctl_new1(&template, mixer);
+ err = snd_ctl_add(mixer->chip->card, c);
+
+ if (err < 0)
+ break;
+ }
+
+ return err;
+}
+
void snd_emuusb_set_samplerate(struct snd_usb_audio *chip,
unsigned char samplerate_id)
{
@@ -367,31 +502,44 @@ void snd_emuusb_set_samplerate(struct snd_usb_audio *chip,
int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
{
- int err;
+ int err = 0;
struct snd_info_entry *entry;
if ((err = snd_usb_soundblaster_remote_init(mixer)) < 0)
return err;
- if (mixer->chip->usb_id == USB_ID(0x041e, 0x3020) ||
- mixer->chip->usb_id == USB_ID(0x041e, 0x3040) ||
- mixer->chip->usb_id == USB_ID(0x041e, 0x3042) ||
- mixer->chip->usb_id == USB_ID(0x041e, 0x3048)) {
- if ((err = snd_audigy2nx_controls_create(mixer)) < 0)
- return err;
+ switch (mixer->chip->usb_id) {
+ case USB_ID(0x041e, 0x3020):
+ case USB_ID(0x041e, 0x3040):
+ case USB_ID(0x041e, 0x3042):
+ case USB_ID(0x041e, 0x3048):
+ err = snd_audigy2nx_controls_create(mixer);
+ if (err < 0)
+ break;
if (!snd_card_proc_new(mixer->chip->card, "audigy2nx", &entry))
snd_info_set_text_ops(entry, mixer,
snd_audigy2nx_proc_read);
- }
+ break;
- if (mixer->chip->usb_id == USB_ID(0x0b05, 0x1739) ||
- mixer->chip->usb_id == USB_ID(0x0b05, 0x1743)) {
+ case USB_ID(0x0b05, 0x1739):
+ case USB_ID(0x0b05, 0x1743):
err = snd_xonar_u1_controls_create(mixer);
- if (err < 0)
- return err;
+ break;
+
+ case USB_ID(0x17cc, 0x1011): /* Traktor Audio 6 */
+ err = snd_nativeinstruments_create_mixer(mixer,
+ snd_nativeinstruments_ta6_mixers,
+ ARRAY_SIZE(snd_nativeinstruments_ta6_mixers));
+ break;
+
+ case USB_ID(0x17cc, 0x1021): /* Traktor Audio 10 */
+ err = snd_nativeinstruments_create_mixer(mixer,
+ snd_nativeinstruments_ta10_mixers,
+ ARRAY_SIZE(snd_nativeinstruments_ta10_mixers));
+ break;
}
- return 0;
+ return err;
}
void snd_usb_mixer_rc_memory_change(struct usb_mixer_interface *mixer,
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 4132522ac90f..e3f680526cb5 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -361,6 +361,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
}
if (changed) {
+ mutex_lock(&subs->stream->chip->shutdown_mutex);
/* format changed */
snd_usb_release_substream_urbs(subs, 0);
/* influenced: period_bytes, channels, rate, format, */
@@ -368,6 +369,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
params_rate(hw_params),
snd_pcm_format_physical_width(params_format(hw_params)) *
params_channels(hw_params));
+ mutex_unlock(&subs->stream->chip->shutdown_mutex);
}
return ret;
@@ -385,8 +387,9 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream)
subs->cur_audiofmt = NULL;
subs->cur_rate = 0;
subs->period_bytes = 0;
- if (!subs->stream->chip->shutdown)
- snd_usb_release_substream_urbs(subs, 0);
+ mutex_lock(&subs->stream->chip->shutdown_mutex);
+ snd_usb_release_substream_urbs(subs, 0);
+ mutex_unlock(&subs->stream->chip->shutdown_mutex);
return snd_pcm_lib_free_vmalloc_buffer(substream);
}
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 921a86fd9884..c0dcfca9b5b5 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -2290,6 +2290,20 @@ YAMAHA_DEVICE(0x7010, "UB99"),
}
},
+/* Native Instruments MK2 series */
+{
+ /* Traktor Audio 6 */
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE,
+ .idVendor = 0x17cc,
+ .idProduct = 0x1010,
+},
+{
+ /* Traktor Audio 10 */
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE,
+ .idVendor = 0x17cc,
+ .idProduct = 0x1020,
+},
+
/* Miditech devices */
{
USB_DEVICE(0x4752, 0x0011),
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index e314cdb85003..355759bad581 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -425,6 +425,34 @@ static int snd_usb_accessmusic_boot_quirk(struct usb_device *dev)
}
/*
+ * Some sound cards from Native Instruments are in fact compliant to the USB
+ * audio standard of version 2 and other approved USB standards, even though
+ * they come up as vendor-specific device when first connected.
+ *
+ * However, they can be told to come up with a new set of descriptors
+ * upon their next enumeration, and the interfaces announced by the new
+ * descriptors will then be handled by the kernel's class drivers. As the
+ * product ID will also change, no further checks are required.
+ */
+
+static int snd_usb_nativeinstruments_boot_quirk(struct usb_device *dev)
+{
+ int ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ 0xaf, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ cpu_to_le16(1), 0, NULL, 0, 1000);
+
+ if (ret < 0)
+ return ret;
+
+ usb_reset_device(dev);
+
+ /* return -EAGAIN, so the creation of an audio interface for this
+ * temporary device is aborted. The device will reconnect with a
+ * new product ID */
+ return -EAGAIN;
+}
+
+/*
* Setup quirks
*/
#define AUDIOPHILE_SET 0x01 /* if set, parse device_setup */
@@ -489,27 +517,33 @@ int snd_usb_apply_boot_quirk(struct usb_device *dev,
u32 id = USB_ID(le16_to_cpu(dev->descriptor.idVendor),
le16_to_cpu(dev->descriptor.idProduct));
- /* SB Extigy needs special boot-up sequence */
- /* if more models come, this will go to the quirk list. */
- if (id == USB_ID(0x041e, 0x3000))
+ switch (id) {
+ case USB_ID(0x041e, 0x3000):
+ /* SB Extigy needs special boot-up sequence */
+ /* if more models come, this will go to the quirk list. */
return snd_usb_extigy_boot_quirk(dev, intf);
- /* SB Audigy 2 NX needs its own boot-up magic, too */
- if (id == USB_ID(0x041e, 0x3020))
+ case USB_ID(0x041e, 0x3020):
+ /* SB Audigy 2 NX needs its own boot-up magic, too */
return snd_usb_audigy2nx_boot_quirk(dev);
- /* C-Media CM106 / Turtle Beach Audio Advantage Roadie */
- if (id == USB_ID(0x10f5, 0x0200))
+ case USB_ID(0x10f5, 0x0200):
+ /* C-Media CM106 / Turtle Beach Audio Advantage Roadie */
return snd_usb_cm106_boot_quirk(dev);
- /* C-Media CM6206 / CM106-Like Sound Device */
- if (id == USB_ID(0x0d8c, 0x0102))
+ case USB_ID(0x0d8c, 0x0102):
+ /* C-Media CM6206 / CM106-Like Sound Device */
return snd_usb_cm6206_boot_quirk(dev);
- /* Access Music VirusTI Desktop */
- if (id == USB_ID(0x133e, 0x0815))
+ case USB_ID(0x133e, 0x0815):
+ /* Access Music VirusTI Desktop */
return snd_usb_accessmusic_boot_quirk(dev);
+ case USB_ID(0x17cc, 0x1010): /* Traktor Audio 6 */
+ case USB_ID(0x17cc, 0x1020): /* Traktor Audio 10 */
+ return snd_usb_nativeinstruments_boot_quirk(dev);
+ }
+
return 0;
}
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index db3eb21627ee..6e66fffe87f5 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -36,6 +36,7 @@ struct snd_usb_audio {
struct snd_card *card;
u32 usb_id;
int shutdown;
+ struct mutex shutdown_mutex;
unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */
int num_interfaces;
int num_suspended_intf;
diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore
index cb43289e447f..416684be0ad3 100644
--- a/tools/perf/.gitignore
+++ b/tools/perf/.gitignore
@@ -1,4 +1,3 @@
-PERF-BUILD-OPTIONS
PERF-CFLAGS
PERF-GUI-VARS
PERF-VERSION-FILE
diff --git a/tools/perf/Documentation/Makefile b/tools/perf/Documentation/Makefile
index bd498d496952..4626a398836a 100644
--- a/tools/perf/Documentation/Makefile
+++ b/tools/perf/Documentation/Makefile
@@ -178,8 +178,8 @@ install-pdf: pdf
$(INSTALL) -d -m 755 $(DESTDIR)$(pdfdir)
$(INSTALL) -m 644 user-manual.pdf $(DESTDIR)$(pdfdir)
-install-html: html
- '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(DESTDIR)$(htmldir)
+#install-html: html
+# '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(DESTDIR)$(htmldir)
../PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
$(QUIET_SUBDIR0)../ $(QUIET_SUBDIR1) PERF-VERSION-FILE
@@ -288,15 +288,16 @@ $(patsubst %.txt,%.html,$(wildcard howto/*.txt)): %.html : %.txt
sed -e '1,/^$$/d' $< | $(ASCIIDOC) -b xhtml11 - >$@+ && \
mv $@+ $@
-install-webdoc : html
- '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(WEBDOC_DEST)
+# UNIMPLEMENTED
+#install-webdoc : html
+# '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(WEBDOC_DEST)
-quick-install: quick-install-man
+# quick-install: quick-install-man
-quick-install-man:
- '$(SHELL_PATH_SQ)' ./install-doc-quick.sh $(DOC_REF) $(DESTDIR)$(mandir)
+# quick-install-man:
+# '$(SHELL_PATH_SQ)' ./install-doc-quick.sh $(DOC_REF) $(DESTDIR)$(mandir)
-quick-install-html:
- '$(SHELL_PATH_SQ)' ./install-doc-quick.sh $(HTML_REF) $(DESTDIR)$(htmldir)
+#quick-install-html:
+# '$(SHELL_PATH_SQ)' ./install-doc-quick.sh $(HTML_REF) $(DESTDIR)$(htmldir)
.PHONY: .FORCE-PERF-VERSION-FILE
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index 399751befeed..7a527f7e9da9 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -8,7 +8,7 @@ perf-list - List all symbolic event types
SYNOPSIS
--------
[verse]
-'perf list'
+'perf list' [hw|sw|cache|tracepoint|event_glob]
DESCRIPTION
-----------
@@ -63,7 +63,26 @@ details. Some of them are referenced in the SEE ALSO section below.
OPTIONS
-------
-None
+
+Without options all known events will be listed.
+
+To limit the list use:
+
+. 'hw' or 'hardware' to list hardware events such as cache-misses, etc.
+
+. 'sw' or 'software' to list software events such as context switches, etc.
+
+. 'cache' or 'hwcache' to list hardware cache events such as L1-dcache-loads, etc.
+
+. 'tracepoint' to list all tracepoint events, alternatively use
+ 'subsys_glob:event_glob' to filter by tracepoint subsystems such as sched,
+ block, etc.
+
+. If none of the above is matched, it will apply the supplied glob to all
+ events, printing the ones that match.
+
+One or more types can be used at the same time, listing the events for the
+types specified.
SEE ALSO
--------
diff --git a/tools/perf/Documentation/perf-lock.txt b/tools/perf/Documentation/perf-lock.txt
index 921de259ea10..4a26a2f3a6a3 100644
--- a/tools/perf/Documentation/perf-lock.txt
+++ b/tools/perf/Documentation/perf-lock.txt
@@ -24,8 +24,8 @@ and statistics with this 'perf lock' command.
'perf lock report' reports statistical data.
-OPTIONS
--------
+COMMON OPTIONS
+--------------
-i::
--input=<file>::
@@ -39,6 +39,14 @@ OPTIONS
--dump-raw-trace::
Dump raw trace in ASCII.
+REPORT OPTIONS
+--------------
+
+-k::
+--key=<value>::
+ Sorting key. Possible values: acquired (default), contended,
+ wait_total, wait_max, wait_min.
+
SEE ALSO
--------
linkperf:perf[1]
diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt
index 86b797a35aa6..02bafce4b341 100644
--- a/tools/perf/Documentation/perf-probe.txt
+++ b/tools/perf/Documentation/perf-probe.txt
@@ -16,7 +16,7 @@ or
or
'perf probe' --list
or
-'perf probe' [options] --line='FUNC[:RLN[+NUM|:RLN2]]|SRC:ALN[+NUM|:ALN2]'
+'perf probe' [options] --line='LINE'
or
'perf probe' [options] --vars='PROBEPOINT'
@@ -73,6 +73,17 @@ OPTIONS
(Only for --vars) Show external defined variables in addition to local
variables.
+-F::
+--funcs::
+ Show available functions in given module or kernel.
+
+--filter=FILTER::
+ (Only for --vars and --funcs) Set filter. FILTER is a combination of glob
+ pattern, see FILTER PATTERN for detail.
+ Default FILTER is "!__k???tab_* & !__crc_*" for --vars, and "!_*"
+ for --funcs.
+ If several filters are specified, only the last filter is used.
+
-f::
--force::
Forcibly add events with existing name.
@@ -117,13 +128,14 @@ LINE SYNTAX
-----------
Line range is described by following syntax.
- "FUNC[:RLN[+NUM|-RLN2]]|SRC[:ALN[+NUM|-ALN2]]"
+ "FUNC[@SRC][:RLN[+NUM|-RLN2]]|SRC[:ALN[+NUM|-ALN2]]"
FUNC specifies the function name of showing lines. 'RLN' is the start line
number from function entry line, and 'RLN2' is the end line number. As same as
probe syntax, 'SRC' means the source file path, 'ALN' is start line number,
and 'ALN2' is end line number in the file. It is also possible to specify how
-many lines to show by using 'NUM'.
+many lines to show by using 'NUM'. Moreover, 'FUNC@SRC' combination is good
+for searching a specific function when several functions share same name.
So, "source.c:100-120" shows lines between 100th to l20th in source.c file. And "func:10+20" shows 20 lines from 10th line of func function.
LAZY MATCHING
@@ -135,6 +147,14 @@ e.g.
This provides some sort of flexibility and robustness to probe point definitions against minor code changes. For example, actual 10th line of schedule() can be moved easily by modifying schedule(), but the same line matching 'rq=cpu_rq*' may still exist in the function.)
+FILTER PATTERN
+--------------
+ The filter pattern is a glob matching pattern(s) to filter variables.
+ In addition, you can use "!" for specifying filter-out rule. You also can give several rules combined with "&" or "|", and fold those rules as one rule by using "(" ")".
+
+e.g.
+ With --filter "foo* | bar*", perf probe -V shows variables which start with "foo" or "bar".
+ With --filter "!foo* & *bar", perf probe -V shows variables which don't start with "foo" and end with "bar", like "fizzbar". But "foobar" is filtered out.
EXAMPLES
--------
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index e032716c839b..5a520f825295 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -137,6 +137,17 @@ Do not update the builid cache. This saves some overhead in situations
where the information in the perf.data file (which includes buildids)
is sufficient.
+-G name,...::
+--cgroup name,...::
+monitor only in the container (cgroup) called "name". This option is available only
+in per-cpu mode. The cgroup filesystem must be mounted. All threads belonging to
+container "name" are monitored when they run on the monitored CPUs. Multiple cgroups
+can be provided. Each cgroup is applied to the corresponding event, i.e., first cgroup
+to first event, second cgroup to second event and so on. It is possible to provide
+an empty cgroup (monitor all the time) using, e.g., -G foo,,bar. Cgroups must have
+corresponding events, i.e., they always refer to events defined earlier on the command
+line.
+
SEE ALSO
--------
linkperf:perf-stat[1], linkperf:perf-list[1]
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index b6da7affbbee..918cc38ee6d1 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -83,6 +83,17 @@ This option is only valid in system-wide mode.
print counts using a CSV-style output to make it easy to import directly into
spreadsheets. Columns are separated by the string specified in SEP.
+-G name::
+--cgroup name::
+monitor only in the container (cgroup) called "name". This option is available only
+in per-cpu mode. The cgroup filesystem must be mounted. All threads belonging to
+container "name" are monitored when they run on the monitored CPUs. Multiple cgroups
+can be provided. Each cgroup is applied to the corresponding event, i.e., first cgroup
+to first event, second cgroup to second event and so on. It is possible to provide
+an empty cgroup (monitor all the time) using, e.g., -G foo,,bar. Cgroups must have
+corresponding events, i.e., they always refer to events defined earlier on the command
+line.
+
EXAMPLES
--------
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 7141c42e1469..9b8421805c5c 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -3,7 +3,7 @@ ifeq ("$(origin O)", "command line")
endif
# The default target of this Makefile is...
-all::
+all:
ifneq ($(OUTPUT),)
# check that the output directory actually exists
@@ -11,152 +11,12 @@ OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
endif
-# Define V=1 to have a more verbose compile.
-# Define V=2 to have an even more verbose compile.
-#
-# Define SNPRINTF_RETURNS_BOGUS if your are on a system which snprintf()
-# or vsnprintf() return -1 instead of number of characters which would
-# have been written to the final string if enough space had been available.
-#
-# Define FREAD_READS_DIRECTORIES if your are on a system which succeeds
-# when attempting to read from an fopen'ed directory.
-#
-# Define NO_OPENSSL environment variable if you do not have OpenSSL.
-# This also implies MOZILLA_SHA1.
-#
-# Define CURLDIR=/foo/bar if your curl header and library files are in
-# /foo/bar/include and /foo/bar/lib directories.
-#
-# Define EXPATDIR=/foo/bar if your expat header and library files are in
-# /foo/bar/include and /foo/bar/lib directories.
-#
-# Define NO_D_INO_IN_DIRENT if you don't have d_ino in your struct dirent.
-#
-# Define NO_D_TYPE_IN_DIRENT if your platform defines DT_UNKNOWN but lacks
-# d_type in struct dirent (latest Cygwin -- will be fixed soonish).
-#
-# Define NO_C99_FORMAT if your formatted IO functions (printf/scanf et.al.)
-# do not support the 'size specifiers' introduced by C99, namely ll, hh,
-# j, z, t. (representing long long int, char, intmax_t, size_t, ptrdiff_t).
-# some C compilers supported these specifiers prior to C99 as an extension.
-#
-# Define NO_STRCASESTR if you don't have strcasestr.
-#
-# Define NO_MEMMEM if you don't have memmem.
-#
-# Define NO_STRTOUMAX if you don't have strtoumax in the C library.
-# If your compiler also does not support long long or does not have
-# strtoull, define NO_STRTOULL.
-#
-# Define NO_SETENV if you don't have setenv in the C library.
-#
-# Define NO_UNSETENV if you don't have unsetenv in the C library.
-#
-# Define NO_MKDTEMP if you don't have mkdtemp in the C library.
-#
-# Define NO_SYS_SELECT_H if you don't have sys/select.h.
-#
-# Define NO_SYMLINK_HEAD if you never want .perf/HEAD to be a symbolic link.
-# Enable it on Windows. By default, symrefs are still used.
-#
-# Define NO_SVN_TESTS if you want to skip time-consuming SVN interoperability
-# tests. These tests take up a significant amount of the total test time
-# but are not needed unless you plan to talk to SVN repos.
-#
-# Define NO_FINK if you are building on Darwin/Mac OS X, have Fink
-# installed in /sw, but don't want PERF to link against any libraries
-# installed there. If defined you may specify your own (or Fink's)
-# include directories and library directories by defining CFLAGS
-# and LDFLAGS appropriately.
-#
-# Define NO_DARWIN_PORTS if you are building on Darwin/Mac OS X,
-# have DarwinPorts installed in /opt/local, but don't want PERF to
-# link against any libraries installed there. If defined you may
-# specify your own (or DarwinPort's) include directories and
-# library directories by defining CFLAGS and LDFLAGS appropriately.
-#
-# Define PPC_SHA1 environment variable when running make to make use of
-# a bundled SHA1 routine optimized for PowerPC.
-#
-# Define ARM_SHA1 environment variable when running make to make use of
-# a bundled SHA1 routine optimized for ARM.
-#
-# Define MOZILLA_SHA1 environment variable when running make to make use of
-# a bundled SHA1 routine coming from Mozilla. It is GPL'd and should be fast
-# on non-x86 architectures (e.g. PowerPC), while the OpenSSL version (default
-# choice) has very fast version optimized for i586.
-#
-# Define NEEDS_SSL_WITH_CRYPTO if you need -lcrypto with -lssl (Darwin).
-#
-# Define NEEDS_LIBICONV if linking with libc is not enough (Darwin).
-#
-# Define NEEDS_SOCKET if linking with libc is not enough (SunOS,
-# Patrick Mauritz).
-#
-# Define NO_MMAP if you want to avoid mmap.
-#
-# Define NO_PTHREADS if you do not have or do not want to use Pthreads.
-#
-# Define NO_PREAD if you have a problem with pread() system call (e.g.
-# cygwin.dll before v1.5.22).
-#
-# Define NO_FAST_WORKING_DIRECTORY if accessing objects in pack files is
-# generally faster on your platform than accessing the working directory.
-#
-# Define NO_TRUSTABLE_FILEMODE if your filesystem may claim to support
-# the executable mode bit, but doesn't really do so.
-#
-# Define NO_IPV6 if you lack IPv6 support and getaddrinfo().
-#
-# Define NO_SOCKADDR_STORAGE if your platform does not have struct
-# sockaddr_storage.
-#
-# Define NO_ICONV if your libc does not properly support iconv.
-#
-# Define OLD_ICONV if your library has an old iconv(), where the second
-# (input buffer pointer) parameter is declared with type (const char **).
-#
-# Define NO_DEFLATE_BOUND if your zlib does not have deflateBound.
-#
-# Define NO_R_TO_GCC_LINKER if your gcc does not like "-R/path/lib"
-# that tells runtime paths to dynamic libraries;
-# "-Wl,-rpath=/path/lib" is used instead.
-#
-# Define USE_NSEC below if you want perf to care about sub-second file mtimes
-# and ctimes. Note that you need recent glibc (at least 2.2.4) for this, and
-# it will BREAK YOUR LOCAL DIFFS! show-diff and anything using it will likely
-# randomly break unless your underlying filesystem supports those sub-second
-# times (my ext3 doesn't).
-#
-# Define USE_ST_TIMESPEC if your "struct stat" uses "st_ctimespec" instead of
-# "st_ctim"
-#
-# Define NO_NSEC if your "struct stat" does not have "st_ctim.tv_nsec"
-# available. This automatically turns USE_NSEC off.
-#
-# Define USE_STDEV below if you want perf to care about the underlying device
-# change being considered an inode change from the update-index perspective.
-#
-# Define NO_ST_BLOCKS_IN_STRUCT_STAT if your platform does not have st_blocks
-# field that counts the on-disk footprint in 512-byte blocks.
+# Define V to have a more verbose compile.
#
# Define ASCIIDOC8 if you want to format documentation with AsciiDoc 8
#
# Define DOCBOOK_XSL_172 if you want to format man pages with DocBook XSL v1.72.
#
-# Define NO_PERL_MAKEMAKER if you cannot use Makefiles generated by perl's
-# MakeMaker (e.g. using ActiveState under Cygwin).
-#
-# Define NO_PERL if you do not want Perl scripts or libraries at all.
-#
-# Define INTERNAL_QSORT to use Git's implementation of qsort(), which
-# is a simplified version of the merge sort used in glibc. This is
-# recommended if Git triggers O(n^2) behavior in your platform's qsort().
-#
-# Define NO_EXTERNAL_GREP if you don't want "perf grep" to ever call
-# your external grep (e.g., if your system lacks grep, if its grep is
-# broken, or spawning external process is slower than built-in grep perf has).
-#
# Define LDFLAGS=-static to build a static binary.
#
# Define EXTRA_CFLAGS=-m64 or EXTRA_CFLAGS=-m32 as appropriate for cross-builds.
@@ -167,12 +27,7 @@ $(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
@$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT)
-include $(OUTPUT)PERF-VERSION-FILE
-uname_S := $(shell sh -c 'uname -s 2>/dev/null || echo not')
-uname_M := $(shell sh -c 'uname -m 2>/dev/null || echo not')
-uname_O := $(shell sh -c 'uname -o 2>/dev/null || echo not')
-uname_R := $(shell sh -c 'uname -r 2>/dev/null || echo not')
-uname_P := $(shell sh -c 'uname -p 2>/dev/null || echo not')
-uname_V := $(shell sh -c 'uname -v 2>/dev/null || echo not')
+uname_M := $(shell uname -m 2>/dev/null || echo not)
ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
-e s/arm.*/arm/ -e s/sa110/arm/ \
@@ -191,8 +46,6 @@ ifeq ($(ARCH),x86_64)
ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S
endif
-# CFLAGS and LDFLAGS are for the users to override from the command line.
-
#
# Include saner warnings here, which can catch bugs:
#
@@ -270,22 +123,13 @@ CC = $(CROSS_COMPILE)gcc
AR = $(CROSS_COMPILE)ar
RM = rm -f
MKDIR = mkdir
-TAR = tar
FIND = find
INSTALL = install
-RPMBUILD = rpmbuild
-PTHREAD_LIBS = -lpthread
# sparse is architecture-neutral, which means that we need to tell it
# explicitly what architecture to check for. Fix this up for yours..
SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__
-ifeq ($(V), 2)
- QUIET_STDERR = ">/dev/null"
-else
- QUIET_STDERR = ">/dev/null 2>&1"
-endif
-
-include feature-tests.mak
ifeq ($(call try-cc,$(SOURCE_HELLO),-Werror -fstack-protector-all),y)
@@ -310,49 +154,37 @@ BASIC_LDFLAGS =
# Guard against environment variables
BUILTIN_OBJS =
-BUILT_INS =
-COMPAT_CFLAGS =
-COMPAT_OBJS =
LIB_H =
LIB_OBJS =
-SCRIPT_PERL =
+PYRF_OBJS =
SCRIPT_SH =
-TEST_PROGRAMS =
SCRIPT_SH += perf-archive.sh
grep-libs = $(filter -l%,$(1))
strip-libs = $(filter-out -l%,$(1))
+$(OUTPUT)python/perf.so: $(PYRF_OBJS)
+ $(QUIET_GEN)python util/setup.py --quiet build_ext --build-lib='$(OUTPUT)python' \
+ --build-temp='$(OUTPUT)python/temp'
#
# No Perl scripts right now:
#
-# SCRIPT_PERL += perf-add--interactive.perl
-
-SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH)) \
- $(patsubst %.perl,%,$(SCRIPT_PERL))
-
-# Empty...
-EXTRA_PROGRAMS =
-
-# ... and all the rest that could be moved out of bindir to perfexecdir
-PROGRAMS += $(EXTRA_PROGRAMS)
+SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH))
#
# Single 'perf' binary right now:
#
PROGRAMS += $(OUTPUT)perf
-# List built-in command $C whose implementation cmd_$C() is not in
-# builtin-$C.o but is linked in as part of some other command.
-#
+LANG_BINDINGS =
# what 'all' will build and 'install' will install, in perfexecdir
ALL_PROGRAMS = $(PROGRAMS) $(SCRIPTS)
# what 'all' will build but not install in perfexecdir
-OTHER_PROGRAMS = $(OUTPUT)perf$X
+OTHER_PROGRAMS = $(OUTPUT)perf
# Set paths to tools early so that they can be used for version tests.
ifndef SHELL_PATH
@@ -395,6 +227,7 @@ LIB_H += util/include/dwarf-regs.h
LIB_H += util/include/asm/dwarf2.h
LIB_H += util/include/asm/cpufeature.h
LIB_H += perf.h
+LIB_H += util/annotate.h
LIB_H += util/cache.h
LIB_H += util/callchain.h
LIB_H += util/build-id.h
@@ -402,6 +235,7 @@ LIB_H += util/debug.h
LIB_H += util/debugfs.h
LIB_H += util/event.h
LIB_H += util/evsel.h
+LIB_H += util/evlist.h
LIB_H += util/exec_cmd.h
LIB_H += util/types.h
LIB_H += util/levenshtein.h
@@ -416,6 +250,7 @@ LIB_H += util/help.h
LIB_H += util/session.h
LIB_H += util/strbuf.h
LIB_H += util/strlist.h
+LIB_H += util/strfilter.h
LIB_H += util/svghelper.h
LIB_H += util/run-command.h
LIB_H += util/sigchain.h
@@ -425,21 +260,26 @@ LIB_H += util/values.h
LIB_H += util/sort.h
LIB_H += util/hist.h
LIB_H += util/thread.h
+LIB_H += util/thread_map.h
LIB_H += util/trace-event.h
LIB_H += util/probe-finder.h
LIB_H += util/probe-event.h
LIB_H += util/pstack.h
LIB_H += util/cpumap.h
+LIB_H += util/top.h
LIB_H += $(ARCH_INCLUDE)
+LIB_H += util/cgroup.h
LIB_OBJS += $(OUTPUT)util/abspath.o
LIB_OBJS += $(OUTPUT)util/alias.o
+LIB_OBJS += $(OUTPUT)util/annotate.o
LIB_OBJS += $(OUTPUT)util/build-id.o
LIB_OBJS += $(OUTPUT)util/config.o
LIB_OBJS += $(OUTPUT)util/ctype.o
LIB_OBJS += $(OUTPUT)util/debugfs.o
LIB_OBJS += $(OUTPUT)util/environment.o
LIB_OBJS += $(OUTPUT)util/event.o
+LIB_OBJS += $(OUTPUT)util/evlist.o
LIB_OBJS += $(OUTPUT)util/evsel.o
LIB_OBJS += $(OUTPUT)util/exec_cmd.o
LIB_OBJS += $(OUTPUT)util/help.o
@@ -455,6 +295,8 @@ LIB_OBJS += $(OUTPUT)util/quote.o
LIB_OBJS += $(OUTPUT)util/strbuf.o
LIB_OBJS += $(OUTPUT)util/string.o
LIB_OBJS += $(OUTPUT)util/strlist.o
+LIB_OBJS += $(OUTPUT)util/strfilter.o
+LIB_OBJS += $(OUTPUT)util/top.o
LIB_OBJS += $(OUTPUT)util/usage.o
LIB_OBJS += $(OUTPUT)util/wrapper.o
LIB_OBJS += $(OUTPUT)util/sigchain.o
@@ -469,6 +311,7 @@ LIB_OBJS += $(OUTPUT)util/map.o
LIB_OBJS += $(OUTPUT)util/pstack.o
LIB_OBJS += $(OUTPUT)util/session.o
LIB_OBJS += $(OUTPUT)util/thread.o
+LIB_OBJS += $(OUTPUT)util/thread_map.o
LIB_OBJS += $(OUTPUT)util/trace-event-parse.o
LIB_OBJS += $(OUTPUT)util/trace-event-read.o
LIB_OBJS += $(OUTPUT)util/trace-event-info.o
@@ -480,6 +323,7 @@ LIB_OBJS += $(OUTPUT)util/probe-event.o
LIB_OBJS += $(OUTPUT)util/util.o
LIB_OBJS += $(OUTPUT)util/xyarray.o
LIB_OBJS += $(OUTPUT)util/cpumap.o
+LIB_OBJS += $(OUTPUT)util/cgroup.o
BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o
@@ -514,6 +358,20 @@ BUILTIN_OBJS += $(OUTPUT)builtin-inject.o
PERFLIBS = $(LIB_FILE)
+# Files needed for the python binding, perf.so
+# pyrf is just an internal name needed for all those wrappers.
+# This has to be in sync with what is in the 'sources' variable in
+# tools/perf/util/setup.py
+
+PYRF_OBJS += $(OUTPUT)util/cpumap.o
+PYRF_OBJS += $(OUTPUT)util/ctype.o
+PYRF_OBJS += $(OUTPUT)util/evlist.o
+PYRF_OBJS += $(OUTPUT)util/evsel.o
+PYRF_OBJS += $(OUTPUT)util/python.o
+PYRF_OBJS += $(OUTPUT)util/thread_map.o
+PYRF_OBJS += $(OUTPUT)util/util.o
+PYRF_OBJS += $(OUTPUT)util/xyarray.o
+
#
# Platform specific tweaks
#
@@ -535,22 +393,6 @@ endif # NO_DWARF
-include arch/$(ARCH)/Makefile
-ifeq ($(uname_S),Darwin)
- ifndef NO_FINK
- ifeq ($(shell test -d /sw/lib && echo y),y)
- BASIC_CFLAGS += -I/sw/include
- BASIC_LDFLAGS += -L/sw/lib
- endif
- endif
- ifndef NO_DARWIN_PORTS
- ifeq ($(shell test -d /opt/local/lib && echo y),y)
- BASIC_CFLAGS += -I/opt/local/include
- BASIC_LDFLAGS += -L/opt/local/lib
- endif
- endif
- PTHREAD_LIBS =
-endif
-
ifneq ($(OUTPUT),)
BASIC_CFLAGS += -I$(OUTPUT)
endif
@@ -595,6 +437,7 @@ else
LIB_OBJS += $(OUTPUT)util/ui/browsers/annotate.o
LIB_OBJS += $(OUTPUT)util/ui/browsers/hists.o
LIB_OBJS += $(OUTPUT)util/ui/browsers/map.o
+ LIB_OBJS += $(OUTPUT)util/ui/browsers/top.o
LIB_OBJS += $(OUTPUT)util/ui/helpline.o
LIB_OBJS += $(OUTPUT)util/ui/progress.o
LIB_OBJS += $(OUTPUT)util/ui/util.o
@@ -604,6 +447,7 @@ else
LIB_H += util/ui/libslang.h
LIB_H += util/ui/progress.h
LIB_H += util/ui/util.h
+ LIB_H += util/ui/ui.h
endif
endif
@@ -635,12 +479,14 @@ else
PYTHON_EMBED_CCOPTS = `python-config --cflags 2>/dev/null`
FLAGS_PYTHON_EMBED=$(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
ifneq ($(call try-cc,$(SOURCE_PYTHON_EMBED),$(FLAGS_PYTHON_EMBED)),y)
+ msg := $(warning No Python.h found, install python-dev[el] to have python support in 'perf script' and to build the python bindings)
BASIC_CFLAGS += -DNO_LIBPYTHON
else
ALL_LDFLAGS += $(PYTHON_EMBED_LDFLAGS)
EXTLIBS += $(PYTHON_EMBED_LIBADD)
LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-python.o
LIB_OBJS += $(OUTPUT)scripts/python/Perf-Trace-Util/Context.o
+ LANG_BINDINGS += $(OUTPUT)python/perf.so
endif
endif
@@ -690,201 +536,13 @@ else
endif
endif
-ifndef CC_LD_DYNPATH
- ifdef NO_R_TO_GCC_LINKER
- # Some gcc does not accept and pass -R to the linker to specify
- # the runtime dynamic library path.
- CC_LD_DYNPATH = -Wl,-rpath,
- else
- CC_LD_DYNPATH = -R
- endif
-endif
-
-ifdef NEEDS_SOCKET
- EXTLIBS += -lsocket
-endif
-ifdef NEEDS_NSL
- EXTLIBS += -lnsl
-endif
-ifdef NO_D_TYPE_IN_DIRENT
- BASIC_CFLAGS += -DNO_D_TYPE_IN_DIRENT
-endif
-ifdef NO_D_INO_IN_DIRENT
- BASIC_CFLAGS += -DNO_D_INO_IN_DIRENT
-endif
-ifdef NO_ST_BLOCKS_IN_STRUCT_STAT
- BASIC_CFLAGS += -DNO_ST_BLOCKS_IN_STRUCT_STAT
-endif
-ifdef USE_NSEC
- BASIC_CFLAGS += -DUSE_NSEC
-endif
-ifdef USE_ST_TIMESPEC
- BASIC_CFLAGS += -DUSE_ST_TIMESPEC
-endif
-ifdef NO_NSEC
- BASIC_CFLAGS += -DNO_NSEC
-endif
-ifdef NO_C99_FORMAT
- BASIC_CFLAGS += -DNO_C99_FORMAT
-endif
-ifdef SNPRINTF_RETURNS_BOGUS
- COMPAT_CFLAGS += -DSNPRINTF_RETURNS_BOGUS
- COMPAT_OBJS += $(OUTPUT)compat/snprintf.o
-endif
-ifdef FREAD_READS_DIRECTORIES
- COMPAT_CFLAGS += -DFREAD_READS_DIRECTORIES
- COMPAT_OBJS += $(OUTPUT)compat/fopen.o
-endif
-ifdef NO_SYMLINK_HEAD
- BASIC_CFLAGS += -DNO_SYMLINK_HEAD
-endif
-ifdef NO_STRCASESTR
- COMPAT_CFLAGS += -DNO_STRCASESTR
- COMPAT_OBJS += $(OUTPUT)compat/strcasestr.o
-endif
-ifdef NO_STRTOUMAX
- COMPAT_CFLAGS += -DNO_STRTOUMAX
- COMPAT_OBJS += $(OUTPUT)compat/strtoumax.o
-endif
-ifdef NO_STRTOULL
- COMPAT_CFLAGS += -DNO_STRTOULL
-endif
-ifdef NO_SETENV
- COMPAT_CFLAGS += -DNO_SETENV
- COMPAT_OBJS += $(OUTPUT)compat/setenv.o
-endif
-ifdef NO_MKDTEMP
- COMPAT_CFLAGS += -DNO_MKDTEMP
- COMPAT_OBJS += $(OUTPUT)compat/mkdtemp.o
-endif
-ifdef NO_UNSETENV
- COMPAT_CFLAGS += -DNO_UNSETENV
- COMPAT_OBJS += $(OUTPUT)compat/unsetenv.o
-endif
-ifdef NO_SYS_SELECT_H
- BASIC_CFLAGS += -DNO_SYS_SELECT_H
-endif
-ifdef NO_MMAP
- COMPAT_CFLAGS += -DNO_MMAP
- COMPAT_OBJS += $(OUTPUT)compat/mmap.o
-else
- ifdef USE_WIN32_MMAP
- COMPAT_CFLAGS += -DUSE_WIN32_MMAP
- COMPAT_OBJS += $(OUTPUT)compat/win32mmap.o
- endif
-endif
-ifdef NO_PREAD
- COMPAT_CFLAGS += -DNO_PREAD
- COMPAT_OBJS += $(OUTPUT)compat/pread.o
-endif
-ifdef NO_FAST_WORKING_DIRECTORY
- BASIC_CFLAGS += -DNO_FAST_WORKING_DIRECTORY
-endif
-ifdef NO_TRUSTABLE_FILEMODE
- BASIC_CFLAGS += -DNO_TRUSTABLE_FILEMODE
-endif
-ifdef NO_IPV6
- BASIC_CFLAGS += -DNO_IPV6
-endif
-ifdef NO_UINTMAX_T
- BASIC_CFLAGS += -Duintmax_t=uint32_t
-endif
-ifdef NO_SOCKADDR_STORAGE
-ifdef NO_IPV6
- BASIC_CFLAGS += -Dsockaddr_storage=sockaddr_in
-else
- BASIC_CFLAGS += -Dsockaddr_storage=sockaddr_in6
-endif
-endif
-ifdef NO_INET_NTOP
- LIB_OBJS += $(OUTPUT)compat/inet_ntop.o
-endif
-ifdef NO_INET_PTON
- LIB_OBJS += $(OUTPUT)compat/inet_pton.o
-endif
-
-ifdef NO_ICONV
- BASIC_CFLAGS += -DNO_ICONV
-endif
-
-ifdef OLD_ICONV
- BASIC_CFLAGS += -DOLD_ICONV
-endif
-
-ifdef NO_DEFLATE_BOUND
- BASIC_CFLAGS += -DNO_DEFLATE_BOUND
-endif
-
-ifdef PPC_SHA1
- SHA1_HEADER = "ppc/sha1.h"
- LIB_OBJS += $(OUTPUT)ppc/sha1.o ppc/sha1ppc.o
-else
-ifdef ARM_SHA1
- SHA1_HEADER = "arm/sha1.h"
- LIB_OBJS += $(OUTPUT)arm/sha1.o $(OUTPUT)arm/sha1_arm.o
-else
-ifdef MOZILLA_SHA1
- SHA1_HEADER = "mozilla-sha1/sha1.h"
- LIB_OBJS += $(OUTPUT)mozilla-sha1/sha1.o
-else
- SHA1_HEADER = <openssl/sha.h>
- EXTLIBS += $(LIB_4_CRYPTO)
-endif
-endif
-endif
-ifdef NO_PERL_MAKEMAKER
- export NO_PERL_MAKEMAKER
-endif
-ifdef NO_HSTRERROR
- COMPAT_CFLAGS += -DNO_HSTRERROR
- COMPAT_OBJS += $(OUTPUT)compat/hstrerror.o
-endif
-ifdef NO_MEMMEM
- COMPAT_CFLAGS += -DNO_MEMMEM
- COMPAT_OBJS += $(OUTPUT)compat/memmem.o
-endif
-ifdef INTERNAL_QSORT
- COMPAT_CFLAGS += -DINTERNAL_QSORT
- COMPAT_OBJS += $(OUTPUT)compat/qsort.o
-endif
-ifdef RUNTIME_PREFIX
- COMPAT_CFLAGS += -DRUNTIME_PREFIX
-endif
-
-ifdef DIR_HAS_BSD_GROUP_SEMANTICS
- COMPAT_CFLAGS += -DDIR_HAS_BSD_GROUP_SEMANTICS
-endif
-ifdef NO_EXTERNAL_GREP
- BASIC_CFLAGS += -DNO_EXTERNAL_GREP
-endif
-
-ifeq ($(PERL_PATH),)
-NO_PERL=NoThanks
-endif
-
-QUIET_SUBDIR0 = +$(MAKE) -C # space to separate -C and subdir
-QUIET_SUBDIR1 =
-
-ifneq ($(findstring $(MAKEFLAGS),w),w)
-PRINT_DIR = --no-print-directory
-else # "make -w"
-NO_SUBDIR = :
-endif
-
ifneq ($(findstring $(MAKEFLAGS),s),s)
ifndef V
QUIET_CC = @echo ' ' CC $@;
QUIET_AR = @echo ' ' AR $@;
QUIET_LINK = @echo ' ' LINK $@;
QUIET_MKDIR = @echo ' ' MKDIR $@;
- QUIET_BUILT_IN = @echo ' ' BUILTIN $@;
QUIET_GEN = @echo ' ' GEN $@;
- QUIET_SUBDIR0 = +@subdir=
- QUIET_SUBDIR1 = ;$(NO_SUBDIR) echo ' ' SUBDIR $$subdir; \
- $(MAKE) $(PRINT_DIR) -C $$subdir
- export V
- export QUIET_GEN
- export QUIET_BUILT_IN
endif
endif
@@ -894,7 +552,6 @@ endif
# Shell quote (do not use $(call) to accommodate ancient setups);
-SHA1_HEADER_SQ = $(subst ','\'',$(SHA1_HEADER))
ETC_PERFCONFIG_SQ = $(subst ','\'',$(ETC_PERFCONFIG))
DESTDIR_SQ = $(subst ','\'',$(DESTDIR))
@@ -908,46 +565,36 @@ htmldir_SQ = $(subst ','\'',$(htmldir))
prefix_SQ = $(subst ','\'',$(prefix))
SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH))
-PERL_PATH_SQ = $(subst ','\'',$(PERL_PATH))
LIBS = -Wl,--whole-archive $(PERFLIBS) -Wl,--no-whole-archive $(EXTLIBS)
-BASIC_CFLAGS += -DSHA1_HEADER='$(SHA1_HEADER_SQ)' \
- $(COMPAT_CFLAGS)
-LIB_OBJS += $(COMPAT_OBJS)
-
ALL_CFLAGS += $(BASIC_CFLAGS)
ALL_CFLAGS += $(ARCH_CFLAGS)
ALL_LDFLAGS += $(BASIC_LDFLAGS)
-export TAR INSTALL DESTDIR SHELL_PATH
+export INSTALL SHELL_PATH
### Build rules
SHELL = $(SHELL_PATH)
-all:: shell_compatibility_test $(ALL_PROGRAMS) $(BUILT_INS) $(OTHER_PROGRAMS) $(OUTPUT)PERF-BUILD-OPTIONS
-ifneq (,$X)
- $(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) perf$X)), test '$p' -ef '$p$X' || $(RM) '$p';)
-endif
-
-all::
+all: shell_compatibility_test $(ALL_PROGRAMS) $(LANG_BINDINGS) $(OTHER_PROGRAMS)
please_set_SHELL_PATH_to_a_more_modern_shell:
@$$(:)
shell_compatibility_test: please_set_SHELL_PATH_to_a_more_modern_shell
-strip: $(PROGRAMS) $(OUTPUT)perf$X
- $(STRIP) $(STRIP_OPTS) $(PROGRAMS) $(OUTPUT)perf$X
+strip: $(PROGRAMS) $(OUTPUT)perf
+ $(STRIP) $(STRIP_OPTS) $(PROGRAMS) $(OUTPUT)perf
$(OUTPUT)perf.o: perf.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -DPERF_VERSION='"$(PERF_VERSION)"' \
'-DPERF_HTML_PATH="$(htmldir_SQ)"' \
$(ALL_CFLAGS) -c $(filter %.c,$^) -o $@
-$(OUTPUT)perf$X: $(OUTPUT)perf.o $(BUILTIN_OBJS) $(PERFLIBS)
+$(OUTPUT)perf: $(OUTPUT)perf.o $(BUILTIN_OBJS) $(PERFLIBS)
$(QUIET_LINK)$(CC) $(ALL_CFLAGS) $(ALL_LDFLAGS) $(OUTPUT)perf.o \
$(BUILTIN_OBJS) $(LIBS) -o $@
@@ -963,39 +610,17 @@ $(OUTPUT)builtin-timechart.o: builtin-timechart.c $(OUTPUT)common-cmds.h $(OUTPU
'-DPERF_MAN_PATH="$(mandir_SQ)"' \
'-DPERF_INFO_PATH="$(infodir_SQ)"' $<
-$(BUILT_INS): $(OUTPUT)perf$X
- $(QUIET_BUILT_IN)$(RM) $@ && \
- ln perf$X $@ 2>/dev/null || \
- ln -s perf$X $@ 2>/dev/null || \
- cp perf$X $@
-
$(OUTPUT)common-cmds.h: util/generate-cmdlist.sh command-list.txt
$(OUTPUT)common-cmds.h: $(wildcard Documentation/perf-*.txt)
$(QUIET_GEN). util/generate-cmdlist.sh > $@+ && mv $@+ $@
-$(patsubst %.sh,%,$(SCRIPT_SH)) : % : %.sh
- $(QUIET_GEN)$(RM) $(OUTPUT)$@ $(OUTPUT)$@+ && \
- sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \
- -e 's|@SHELL_PATH@|$(SHELL_PATH_SQ)|' \
- -e 's|@@PERL@@|$(PERL_PATH_SQ)|g' \
- -e 's/@@PERF_VERSION@@/$(PERF_VERSION)/g' \
- -e 's/@@NO_CURL@@/$(NO_CURL)/g' \
- $@.sh > $(OUTPUT)$@+ && \
- chmod +x $(OUTPUT)$@+ && \
- mv $(OUTPUT)$@+ $(OUTPUT)$@
-
-configure: configure.ac
- $(QUIET_GEN)$(RM) $@ $<+ && \
- sed -e 's/@@PERF_VERSION@@/$(PERF_VERSION)/g' \
- $< > $<+ && \
- autoconf -o $@ $<+ && \
- $(RM) $<+
+$(SCRIPTS) : % : %.sh
+ $(QUIET_GEN)$(INSTALL) '$@.sh' '$(OUTPUT)$@'
# These can record PERF_VERSION
$(OUTPUT)perf.o perf.spec \
- $(patsubst %.sh,%,$(SCRIPT_SH)) \
- $(patsubst %.perl,%,$(SCRIPT_PERL)) \
+ $(SCRIPTS) \
: $(OUTPUT)PERF-VERSION-FILE
$(OUTPUT)%.o: %.c $(OUTPUT)PERF-CFLAGS
@@ -1012,9 +637,6 @@ $(OUTPUT)util/exec_cmd.o: util/exec_cmd.c $(OUTPUT)PERF-CFLAGS
'-DPREFIX="$(prefix_SQ)"' \
$<
-$(OUTPUT)builtin-init-db.o: builtin-init-db.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DDEFAULT_PERF_TEMPLATE_DIR='"$(template_dir_SQ)"' $<
-
$(OUTPUT)util/config.o: util/config.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
@@ -1024,6 +646,9 @@ $(OUTPUT)util/ui/browser.o: util/ui/browser.c $(OUTPUT)PERF-CFLAGS
$(OUTPUT)util/ui/browsers/annotate.o: util/ui/browsers/annotate.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $<
+$(OUTPUT)util/ui/browsers/top.o: util/ui/browsers/top.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $<
+
$(OUTPUT)util/ui/browsers/hists.o: util/ui/browsers/hists.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $<
@@ -1045,12 +670,11 @@ $(OUTPUT)util/scripting-engines/trace-event-python.o: util/scripting-engines/tra
$(OUTPUT)scripts/python/Perf-Trace-Util/Context.o: scripts/python/Perf-Trace-Util/Context.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $<
-$(OUTPUT)perf-%$X: %.o $(PERFLIBS)
+$(OUTPUT)perf-%: %.o $(PERFLIBS)
$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS)
$(LIB_OBJS) $(BUILTIN_OBJS): $(LIB_H)
-$(patsubst perf-%$X,%.o,$(PROGRAMS)): $(LIB_H) $(wildcard */*.h)
-builtin-revert.o wt-status.o: wt-status.h
+$(patsubst perf-%,%.o,$(PROGRAMS)): $(LIB_H) $(wildcard */*.h)
# we compile into subdirectories. if the target directory is not the source directory, they might not exists. So
# we depend the various files onto their directories.
@@ -1063,6 +687,36 @@ $(sort $(dir $(DIRECTORY_DEPS))):
$(LIB_FILE): $(LIB_OBJS)
$(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIB_OBJS)
+help:
+ @echo 'Perf make targets:'
+ @echo ' doc - make *all* documentation (see below)'
+ @echo ' man - make manpage documentation (access with man <foo>)'
+ @echo ' html - make html documentation'
+ @echo ' info - make GNU info documentation (access with info <foo>)'
+ @echo ' pdf - make pdf documentation'
+ @echo ' TAGS - use etags to make tag information for source browsing'
+ @echo ' tags - use ctags to make tag information for source browsing'
+ @echo ' cscope - use cscope to make interactive browsing database'
+ @echo ''
+ @echo 'Perf install targets:'
+ @echo ' NOTE: documentation build requires asciidoc, xmlto packages to be installed'
+ @echo ' HINT: use "make prefix=<path> <install target>" to install to a particular'
+ @echo ' path like make prefix=/usr/local install install-doc'
+ @echo ' install - install compiled binaries'
+ @echo ' install-doc - install *all* documentation'
+ @echo ' install-man - install manpage documentation'
+ @echo ' install-html - install html documentation'
+ @echo ' install-info - install GNU info documentation'
+ @echo ' install-pdf - install pdf documentation'
+ @echo ''
+ @echo ' quick-install-doc - alias for quick-install-man'
+ @echo ' quick-install-man - install the documentation quickly'
+ @echo ' quick-install-html - install the html documentation quickly'
+ @echo ''
+ @echo 'Perf maintainer targets:'
+ @echo ' distclean - alias to clean'
+ @echo ' clean - clean all binary objects and build output'
+
doc:
$(MAKE) -C Documentation all
@@ -1101,30 +755,12 @@ $(OUTPUT)PERF-CFLAGS: .FORCE-PERF-CFLAGS
echo "$$FLAGS" >$(OUTPUT)PERF-CFLAGS; \
fi
-# We need to apply sq twice, once to protect from the shell
-# that runs $(OUTPUT)PERF-BUILD-OPTIONS, and then again to protect it
-# and the first level quoting from the shell that runs "echo".
-$(OUTPUT)PERF-BUILD-OPTIONS: .FORCE-PERF-BUILD-OPTIONS
- @echo SHELL_PATH=\''$(subst ','\'',$(SHELL_PATH_SQ))'\' >$@
- @echo TAR=\''$(subst ','\'',$(subst ','\'',$(TAR)))'\' >>$@
- @echo NO_CURL=\''$(subst ','\'',$(subst ','\'',$(NO_CURL)))'\' >>$@
- @echo NO_PERL=\''$(subst ','\'',$(subst ','\'',$(NO_PERL)))'\' >>$@
-
### Testing rules
-#
-# None right now:
-#
-# TEST_PROGRAMS += test-something$X
-
-all:: $(TEST_PROGRAMS)
-
# GNU make supports exporting all variables by "export" without parameters.
# However, the environment gets quite big, and some programs have problems
# with that.
-export NO_SVN_TESTS
-
check: $(OUTPUT)common-cmds.h
if sparse; \
then \
@@ -1133,33 +769,21 @@ check: $(OUTPUT)common-cmds.h
sparse $(ALL_CFLAGS) $(SPARSE_FLAGS) $$i || exit; \
done; \
else \
- echo 2>&1 "Did you mean 'make test'?"; \
exit 1; \
fi
-remove-dashes:
- ./fixup-builtins $(BUILT_INS) $(PROGRAMS) $(SCRIPTS)
-
### Installation rules
-ifneq ($(filter /%,$(firstword $(template_dir))),)
-template_instdir = $(template_dir)
-else
-template_instdir = $(prefix)/$(template_dir)
-endif
-export template_instdir
-
ifneq ($(filter /%,$(firstword $(perfexecdir))),)
perfexec_instdir = $(perfexecdir)
else
perfexec_instdir = $(prefix)/$(perfexecdir)
endif
perfexec_instdir_SQ = $(subst ','\'',$(perfexec_instdir))
-export perfexec_instdir
install: all
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)'
- $(INSTALL) $(OUTPUT)perf$X '$(DESTDIR_SQ)$(bindir_SQ)'
+ $(INSTALL) $(OUTPUT)perf '$(DESTDIR_SQ)$(bindir_SQ)'
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'
$(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
@@ -1172,14 +796,6 @@ install: all
$(INSTALL) scripts/python/*.py -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python'
$(INSTALL) scripts/python/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'
-ifdef BUILT_INS
- $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
- $(INSTALL) $(BUILT_INS) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
-ifneq (,$X)
- $(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) $(OUTPUT)perf$X)), $(RM) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/$p';)
-endif
-endif
-
install-doc:
$(MAKE) -C Documentation install
@@ -1204,104 +820,17 @@ quick-install-man:
quick-install-html:
$(MAKE) -C Documentation quick-install-html
-
-### Maintainer's dist rules
-#
-# None right now
-#
-#
-# perf.spec: perf.spec.in
-# sed -e 's/@@VERSION@@/$(PERF_VERSION)/g' < $< > $@+
-# mv $@+ $@
-#
-# PERF_TARNAME=perf-$(PERF_VERSION)
-# dist: perf.spec perf-archive$(X) configure
-# ./perf-archive --format=tar \
-# --prefix=$(PERF_TARNAME)/ HEAD^{tree} > $(PERF_TARNAME).tar
-# @mkdir -p $(PERF_TARNAME)
-# @cp perf.spec configure $(PERF_TARNAME)
-# @echo $(PERF_VERSION) > $(PERF_TARNAME)/version
-# $(TAR) rf $(PERF_TARNAME).tar \
-# $(PERF_TARNAME)/perf.spec \
-# $(PERF_TARNAME)/configure \
-# $(PERF_TARNAME)/version
-# @$(RM) -r $(PERF_TARNAME)
-# gzip -f -9 $(PERF_TARNAME).tar
-#
-# htmldocs = perf-htmldocs-$(PERF_VERSION)
-# manpages = perf-manpages-$(PERF_VERSION)
-# dist-doc:
-# $(RM) -r .doc-tmp-dir
-# mkdir .doc-tmp-dir
-# $(MAKE) -C Documentation WEBDOC_DEST=../.doc-tmp-dir install-webdoc
-# cd .doc-tmp-dir && $(TAR) cf ../$(htmldocs).tar .
-# gzip -n -9 -f $(htmldocs).tar
-# :
-# $(RM) -r .doc-tmp-dir
-# mkdir -p .doc-tmp-dir/man1 .doc-tmp-dir/man5 .doc-tmp-dir/man7
-# $(MAKE) -C Documentation DESTDIR=./ \
-# man1dir=../.doc-tmp-dir/man1 \
-# man5dir=../.doc-tmp-dir/man5 \
-# man7dir=../.doc-tmp-dir/man7 \
-# install
-# cd .doc-tmp-dir && $(TAR) cf ../$(manpages).tar .
-# gzip -n -9 -f $(manpages).tar
-# $(RM) -r .doc-tmp-dir
-#
-# rpm: dist
-# $(RPMBUILD) -ta $(PERF_TARNAME).tar.gz
-
### Cleaning rules
-distclean: clean
-# $(RM) configure
-
clean:
- $(RM) *.o */*.o */*/*.o */*/*/*.o $(LIB_FILE)
- $(RM) $(ALL_PROGRAMS) $(BUILT_INS) perf$X
- $(RM) $(TEST_PROGRAMS)
+ $(RM) $(OUTPUT){*.o,*/*.o,*/*/*.o,*/*/*/*.o,$(LIB_FILE),perf-archive}
+ $(RM) $(ALL_PROGRAMS) perf
$(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope*
- $(RM) -r autom4te.cache
- $(RM) config.log config.mak.autogen config.mak.append config.status config.cache
- $(RM) -r $(PERF_TARNAME) .doc-tmp-dir
- $(RM) $(PERF_TARNAME).tar.gz perf-core_$(PERF_VERSION)-*.tar.gz
- $(RM) $(htmldocs).tar.gz $(manpages).tar.gz
$(MAKE) -C Documentation/ clean
- $(RM) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS $(OUTPUT)PERF-BUILD-OPTIONS
+ $(RM) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS
+ @python util/setup.py clean --build-lib='$(OUTPUT)python' \
+ --build-temp='$(OUTPUT)python/temp'
.PHONY: all install clean strip
.PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell
.PHONY: .FORCE-PERF-VERSION-FILE TAGS tags cscope .FORCE-PERF-CFLAGS
-.PHONY: .FORCE-PERF-BUILD-OPTIONS
-
-### Make sure built-ins do not have dups and listed in perf.c
-#
-check-builtins::
- ./check-builtins.sh
-
-### Test suite coverage testing
-#
-# None right now
-#
-# .PHONY: coverage coverage-clean coverage-build coverage-report
-#
-# coverage:
-# $(MAKE) coverage-build
-# $(MAKE) coverage-report
-#
-# coverage-clean:
-# rm -f *.gcda *.gcno
-#
-# COVERAGE_CFLAGS = $(CFLAGS) -O0 -ftest-coverage -fprofile-arcs
-# COVERAGE_LDFLAGS = $(CFLAGS) -O0 -lgcov
-#
-# coverage-build: coverage-clean
-# $(MAKE) CFLAGS="$(COVERAGE_CFLAGS)" LDFLAGS="$(COVERAGE_LDFLAGS)" all
-# $(MAKE) CFLAGS="$(COVERAGE_CFLAGS)" LDFLAGS="$(COVERAGE_LDFLAGS)" \
-# -j1 test
-#
-# coverage-report:
-# gcov -b *.c */*.c
-# grep '^function.*called 0 ' *.c.gcov */*.c.gcov \
-# | sed -e 's/\([^:]*\)\.gcov: *function \([^ ]*\) called.*/\1: \2/' \
-# | tee coverage-untested-functions
diff --git a/tools/perf/bench/sched-pipe.c b/tools/perf/bench/sched-pipe.c
index d9ab3ce446ac..0c7454f8b8a9 100644
--- a/tools/perf/bench/sched-pipe.c
+++ b/tools/perf/bench/sched-pipe.c
@@ -55,7 +55,7 @@ int bench_sched_pipe(int argc, const char **argv,
* discarding returned value of read(), write()
* causes error in building environment for perf
*/
- int ret, wait_stat;
+ int __used ret, wait_stat;
pid_t pid, retpid;
argc = parse_options(argc, argv, options,
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 8879463807e4..427182953fd7 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -9,6 +9,7 @@
#include "util/util.h"
+#include "util/util.h"
#include "util/color.h"
#include <linux/list.h>
#include "util/cache.h"
@@ -18,6 +19,7 @@
#include "perf.h"
#include "util/debug.h"
+#include "util/annotate.h"
#include "util/event.h"
#include "util/parse-options.h"
#include "util/parse-events.h"
@@ -55,15 +57,29 @@ static int hists__add_entry(struct hists *self, struct addr_location *al)
if (he == NULL)
return -ENOMEM;
- return hist_entry__inc_addr_samples(he, al->addr);
+ if (he->ms.sym != NULL) {
+ /*
+ * All aggregated on the first sym_hist.
+ */
+ struct annotation *notes = symbol__annotation(he->ms.sym);
+ if (notes->src == NULL &&
+ symbol__alloc_hist(he->ms.sym, 1) < 0)
+ return -ENOMEM;
+
+ return hist_entry__inc_addr_samples(he, 0, al->addr);
+ }
+
+ return 0;
}
-static int process_sample_event(event_t *event, struct sample_data *sample,
+static int process_sample_event(union perf_event *event,
+ struct perf_sample *sample,
struct perf_session *session)
{
struct addr_location al;
- if (event__preprocess_sample(event, session, &al, sample, NULL) < 0) {
+ if (perf_event__preprocess_sample(event, session, &al, sample,
+ symbol__annotate_init) < 0) {
pr_warning("problem processing %d event, skipping it.\n",
event->header.type);
return -1;
@@ -78,245 +94,10 @@ static int process_sample_event(event_t *event, struct sample_data *sample,
return 0;
}
-static int objdump_line__print(struct objdump_line *self,
- struct list_head *head,
- struct hist_entry *he, u64 len)
-{
- struct symbol *sym = he->ms.sym;
- static const char *prev_line;
- static const char *prev_color;
-
- if (self->offset != -1) {
- const char *path = NULL;
- unsigned int hits = 0;
- double percent = 0.0;
- const char *color;
- struct sym_priv *priv = symbol__priv(sym);
- struct sym_ext *sym_ext = priv->ext;
- struct sym_hist *h = priv->hist;
- s64 offset = self->offset;
- struct objdump_line *next = objdump__get_next_ip_line(head, self);
-
- while (offset < (s64)len &&
- (next == NULL || offset < next->offset)) {
- if (sym_ext) {
- if (path == NULL)
- path = sym_ext[offset].path;
- percent += sym_ext[offset].percent;
- } else
- hits += h->ip[offset];
-
- ++offset;
- }
-
- if (sym_ext == NULL && h->sum)
- percent = 100.0 * hits / h->sum;
-
- color = get_percent_color(percent);
-
- /*
- * Also color the filename and line if needed, with
- * the same color than the percentage. Don't print it
- * twice for close colored ip with the same filename:line
- */
- if (path) {
- if (!prev_line || strcmp(prev_line, path)
- || color != prev_color) {
- color_fprintf(stdout, color, " %s", path);
- prev_line = path;
- prev_color = color;
- }
- }
-
- color_fprintf(stdout, color, " %7.2f", percent);
- printf(" : ");
- color_fprintf(stdout, PERF_COLOR_BLUE, "%s\n", self->line);
- } else {
- if (!*self->line)
- printf(" :\n");
- else
- printf(" : %s\n", self->line);
- }
-
- return 0;
-}
-
-static struct rb_root root_sym_ext;
-
-static void insert_source_line(struct sym_ext *sym_ext)
-{
- struct sym_ext *iter;
- struct rb_node **p = &root_sym_ext.rb_node;
- struct rb_node *parent = NULL;
-
- while (*p != NULL) {
- parent = *p;
- iter = rb_entry(parent, struct sym_ext, node);
-
- if (sym_ext->percent > iter->percent)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
-
- rb_link_node(&sym_ext->node, parent, p);
- rb_insert_color(&sym_ext->node, &root_sym_ext);
-}
-
-static void free_source_line(struct hist_entry *he, int len)
-{
- struct sym_priv *priv = symbol__priv(he->ms.sym);
- struct sym_ext *sym_ext = priv->ext;
- int i;
-
- if (!sym_ext)
- return;
-
- for (i = 0; i < len; i++)
- free(sym_ext[i].path);
- free(sym_ext);
-
- priv->ext = NULL;
- root_sym_ext = RB_ROOT;
-}
-
-/* Get the filename:line for the colored entries */
-static void
-get_source_line(struct hist_entry *he, int len, const char *filename)
-{
- struct symbol *sym = he->ms.sym;
- u64 start;
- int i;
- char cmd[PATH_MAX * 2];
- struct sym_ext *sym_ext;
- struct sym_priv *priv = symbol__priv(sym);
- struct sym_hist *h = priv->hist;
-
- if (!h->sum)
- return;
-
- sym_ext = priv->ext = calloc(len, sizeof(struct sym_ext));
- if (!priv->ext)
- return;
-
- start = he->ms.map->unmap_ip(he->ms.map, sym->start);
-
- for (i = 0; i < len; i++) {
- char *path = NULL;
- size_t line_len;
- u64 offset;
- FILE *fp;
-
- sym_ext[i].percent = 100.0 * h->ip[i] / h->sum;
- if (sym_ext[i].percent <= 0.5)
- continue;
-
- offset = start + i;
- sprintf(cmd, "addr2line -e %s %016" PRIx64, filename, offset);
- fp = popen(cmd, "r");
- if (!fp)
- continue;
-
- if (getline(&path, &line_len, fp) < 0 || !line_len)
- goto next;
-
- sym_ext[i].path = malloc(sizeof(char) * line_len + 1);
- if (!sym_ext[i].path)
- goto next;
-
- strcpy(sym_ext[i].path, path);
- insert_source_line(&sym_ext[i]);
-
- next:
- pclose(fp);
- }
-}
-
-static void print_summary(const char *filename)
+static int hist_entry__tty_annotate(struct hist_entry *he, int evidx)
{
- struct sym_ext *sym_ext;
- struct rb_node *node;
-
- printf("\nSorted summary for file %s\n", filename);
- printf("----------------------------------------------\n\n");
-
- if (RB_EMPTY_ROOT(&root_sym_ext)) {
- printf(" Nothing higher than %1.1f%%\n", MIN_GREEN);
- return;
- }
-
- node = rb_first(&root_sym_ext);
- while (node) {
- double percent;
- const char *color;
- char *path;
-
- sym_ext = rb_entry(node, struct sym_ext, node);
- percent = sym_ext->percent;
- color = get_percent_color(percent);
- path = sym_ext->path;
-
- color_fprintf(stdout, color, " %7.2f %s", percent, path);
- node = rb_next(node);
- }
-}
-
-static void hist_entry__print_hits(struct hist_entry *self)
-{
- struct symbol *sym = self->ms.sym;
- struct sym_priv *priv = symbol__priv(sym);
- struct sym_hist *h = priv->hist;
- u64 len = sym->end - sym->start, offset;
-
- for (offset = 0; offset < len; ++offset)
- if (h->ip[offset] != 0)
- printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2,
- sym->start + offset, h->ip[offset]);
- printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->sum", h->sum);
-}
-
-static int hist_entry__tty_annotate(struct hist_entry *he)
-{
- struct map *map = he->ms.map;
- struct dso *dso = map->dso;
- struct symbol *sym = he->ms.sym;
- const char *filename = dso->long_name, *d_filename;
- u64 len;
- LIST_HEAD(head);
- struct objdump_line *pos, *n;
-
- if (hist_entry__annotate(he, &head, 0) < 0)
- return -1;
-
- if (full_paths)
- d_filename = filename;
- else
- d_filename = basename(filename);
-
- len = sym->end - sym->start;
-
- if (print_line) {
- get_source_line(he, len, filename);
- print_summary(filename);
- }
-
- printf("\n\n------------------------------------------------\n");
- printf(" Percent | Source code & Disassembly of %s\n", d_filename);
- printf("------------------------------------------------\n");
-
- if (verbose)
- hist_entry__print_hits(he);
-
- list_for_each_entry_safe(pos, n, &head, node) {
- objdump_line__print(pos, &head, he, len);
- list_del(&pos->node);
- objdump_line__free(pos);
- }
-
- if (print_line)
- free_source_line(he, len);
-
- return 0;
+ return symbol__tty_annotate(he->ms.sym, he->ms.map, evidx,
+ print_line, full_paths, 0, 0);
}
static void hists__find_annotations(struct hists *self)
@@ -326,13 +107,13 @@ static void hists__find_annotations(struct hists *self)
while (nd) {
struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
- struct sym_priv *priv;
+ struct annotation *notes;
if (he->ms.sym == NULL || he->ms.map->dso->annotate_warned)
goto find_next;
- priv = symbol__priv(he->ms.sym);
- if (priv->hist == NULL) {
+ notes = symbol__annotation(he->ms.sym);
+ if (notes->src == NULL) {
find_next:
if (key == KEY_LEFT)
nd = rb_prev(nd);
@@ -342,7 +123,8 @@ find_next:
}
if (use_browser > 0) {
- key = hist_entry__tui_annotate(he);
+ /* For now all is aggregated on the first */
+ key = hist_entry__tui_annotate(he, 0);
switch (key) {
case KEY_RIGHT:
next = rb_next(nd);
@@ -357,24 +139,25 @@ find_next:
if (next != NULL)
nd = next;
} else {
- hist_entry__tty_annotate(he);
+ /* For now all is aggregated on the first */
+ hist_entry__tty_annotate(he, 0);
nd = rb_next(nd);
/*
* Since we have a hist_entry per IP for the same
- * symbol, free he->ms.sym->hist to signal we already
+ * symbol, free he->ms.sym->src to signal we already
* processed this symbol.
*/
- free(priv->hist);
- priv->hist = NULL;
+ free(notes->src);
+ notes->src = NULL;
}
}
}
static struct perf_event_ops event_ops = {
.sample = process_sample_event,
- .mmap = event__process_mmap,
- .comm = event__process_comm,
- .fork = event__process_task,
+ .mmap = perf_event__process_mmap,
+ .comm = perf_event__process_comm,
+ .fork = perf_event__process_task,
.ordered_samples = true,
.ordering_requires_timestamps = true,
};
@@ -451,9 +234,9 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used)
else if (use_tui)
use_browser = 1;
- setup_browser();
+ setup_browser(true);
- symbol_conf.priv_size = sizeof(struct sym_priv);
+ symbol_conf.priv_size = sizeof(struct annotation);
symbol_conf.try_vmlinux_path = true;
if (symbol__init() < 0)
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 3153e492dbcc..6b7d91160ecb 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -30,13 +30,13 @@ static int hists__add_entry(struct hists *self,
return -ENOMEM;
}
-static int diff__process_sample_event(event_t *event,
- struct sample_data *sample,
+static int diff__process_sample_event(union perf_event *event,
+ struct perf_sample *sample,
struct perf_session *session)
{
struct addr_location al;
- if (event__preprocess_sample(event, session, &al, sample, NULL) < 0) {
+ if (perf_event__preprocess_sample(event, session, &al, sample, NULL) < 0) {
pr_warning("problem processing %d event, skipping it.\n",
event->header.type);
return -1;
@@ -56,11 +56,11 @@ static int diff__process_sample_event(event_t *event,
static struct perf_event_ops event_ops = {
.sample = diff__process_sample_event,
- .mmap = event__process_mmap,
- .comm = event__process_comm,
- .exit = event__process_task,
- .fork = event__process_task,
- .lost = event__process_lost,
+ .mmap = perf_event__process_mmap,
+ .comm = perf_event__process_comm,
+ .exit = perf_event__process_task,
+ .fork = perf_event__process_task,
+ .lost = perf_event__process_lost,
.ordered_samples = true,
.ordering_requires_timestamps = true,
};
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 0c78ffa7bf67..e29f04ed3396 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -16,8 +16,8 @@
static char const *input_name = "-";
static bool inject_build_ids;
-static int event__repipe_synth(event_t *event,
- struct perf_session *session __used)
+static int perf_event__repipe_synth(union perf_event *event,
+ struct perf_session *session __used)
{
uint32_t size;
void *buf = event;
@@ -36,41 +36,44 @@ static int event__repipe_synth(event_t *event,
return 0;
}
-static int event__repipe(event_t *event, struct sample_data *sample __used,
- struct perf_session *session)
+static int perf_event__repipe(union perf_event *event,
+ struct perf_sample *sample __used,
+ struct perf_session *session)
{
- return event__repipe_synth(event, session);
+ return perf_event__repipe_synth(event, session);
}
-static int event__repipe_mmap(event_t *self, struct sample_data *sample,
- struct perf_session *session)
+static int perf_event__repipe_mmap(union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_session *session)
{
int err;
- err = event__process_mmap(self, sample, session);
- event__repipe(self, sample, session);
+ err = perf_event__process_mmap(event, sample, session);
+ perf_event__repipe(event, sample, session);
return err;
}
-static int event__repipe_task(event_t *self, struct sample_data *sample,
- struct perf_session *session)
+static int perf_event__repipe_task(union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_session *session)
{
int err;
- err = event__process_task(self, sample, session);
- event__repipe(self, sample, session);
+ err = perf_event__process_task(event, sample, session);
+ perf_event__repipe(event, sample, session);
return err;
}
-static int event__repipe_tracing_data(event_t *self,
- struct perf_session *session)
+static int perf_event__repipe_tracing_data(union perf_event *event,
+ struct perf_session *session)
{
int err;
- event__repipe_synth(self, session);
- err = event__process_tracing_data(self, session);
+ perf_event__repipe_synth(event, session);
+ err = perf_event__process_tracing_data(event, session);
return err;
}
@@ -109,8 +112,8 @@ static int dso__inject_build_id(struct dso *self, struct perf_session *session)
if (self->kernel)
misc = PERF_RECORD_MISC_KERNEL;
- err = event__synthesize_build_id(self, misc, event__repipe,
- machine, session);
+ err = perf_event__synthesize_build_id(self, misc, perf_event__repipe,
+ machine, session);
if (err) {
pr_err("Can't synthesize build_id event for %s\n", self->long_name);
return -1;
@@ -119,8 +122,9 @@ static int dso__inject_build_id(struct dso *self, struct perf_session *session)
return 0;
}
-static int event__inject_buildid(event_t *event, struct sample_data *sample,
- struct perf_session *session)
+static int perf_event__inject_buildid(union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_session *session)
{
struct addr_location al;
struct thread *thread;
@@ -155,24 +159,24 @@ static int event__inject_buildid(event_t *event, struct sample_data *sample,
}
repipe:
- event__repipe(event, sample, session);
+ perf_event__repipe(event, sample, session);
return 0;
}
struct perf_event_ops inject_ops = {
- .sample = event__repipe,
- .mmap = event__repipe,
- .comm = event__repipe,
- .fork = event__repipe,
- .exit = event__repipe,
- .lost = event__repipe,
- .read = event__repipe,
- .throttle = event__repipe,
- .unthrottle = event__repipe,
- .attr = event__repipe_synth,
- .event_type = event__repipe_synth,
- .tracing_data = event__repipe_synth,
- .build_id = event__repipe_synth,
+ .sample = perf_event__repipe,
+ .mmap = perf_event__repipe,
+ .comm = perf_event__repipe,
+ .fork = perf_event__repipe,
+ .exit = perf_event__repipe,
+ .lost = perf_event__repipe,
+ .read = perf_event__repipe,
+ .throttle = perf_event__repipe,
+ .unthrottle = perf_event__repipe,
+ .attr = perf_event__repipe_synth,
+ .event_type = perf_event__repipe_synth,
+ .tracing_data = perf_event__repipe_synth,
+ .build_id = perf_event__repipe_synth,
};
extern volatile int session_done;
@@ -190,10 +194,10 @@ static int __cmd_inject(void)
signal(SIGINT, sig_handler);
if (inject_build_ids) {
- inject_ops.sample = event__inject_buildid;
- inject_ops.mmap = event__repipe_mmap;
- inject_ops.fork = event__repipe_task;
- inject_ops.tracing_data = event__repipe_tracing_data;
+ inject_ops.sample = perf_event__inject_buildid;
+ inject_ops.mmap = perf_event__repipe_mmap;
+ inject_ops.fork = perf_event__repipe_task;
+ inject_ops.tracing_data = perf_event__repipe_tracing_data;
}
session = perf_session__new(input_name, O_RDONLY, false, true, &inject_ops);
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index d97256d65980..7f618f4e7b79 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -275,9 +275,8 @@ static void process_free_event(void *data,
s_alloc->alloc_cpu = -1;
}
-static void
-process_raw_event(event_t *raw_event __used, void *data,
- int cpu, u64 timestamp, struct thread *thread)
+static void process_raw_event(union perf_event *raw_event __used, void *data,
+ int cpu, u64 timestamp, struct thread *thread)
{
struct event *event;
int type;
@@ -304,7 +303,8 @@ process_raw_event(event_t *raw_event __used, void *data,
}
}
-static int process_sample_event(event_t *event, struct sample_data *sample,
+static int process_sample_event(union perf_event *event,
+ struct perf_sample *sample,
struct perf_session *session)
{
struct thread *thread = perf_session__findnew(session, event->ip.pid);
@@ -325,7 +325,7 @@ static int process_sample_event(event_t *event, struct sample_data *sample,
static struct perf_event_ops event_ops = {
.sample = process_sample_event,
- .comm = event__process_comm,
+ .comm = perf_event__process_comm,
.ordered_samples = true,
};
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index d88c6961274c..6313b6eb3ebb 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -5,6 +5,7 @@
*
* Copyright (C) 2009, Thomas Gleixner <tglx@linutronix.de>
* Copyright (C) 2008-2009, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
*/
#include "builtin.h"
@@ -13,9 +14,47 @@
#include "util/parse-events.h"
#include "util/cache.h"
-int cmd_list(int argc __used, const char **argv __used, const char *prefix __used)
+int cmd_list(int argc, const char **argv, const char *prefix __used)
{
setup_pager();
- print_events();
+
+ if (argc == 1)
+ print_events(NULL);
+ else {
+ int i;
+
+ for (i = 1; i < argc; ++i) {
+ if (i > 1)
+ putchar('\n');
+ if (strncmp(argv[i], "tracepoint", 10) == 0)
+ print_tracepoint_events(NULL, NULL);
+ else if (strcmp(argv[i], "hw") == 0 ||
+ strcmp(argv[i], "hardware") == 0)
+ print_events_type(PERF_TYPE_HARDWARE);
+ else if (strcmp(argv[i], "sw") == 0 ||
+ strcmp(argv[i], "software") == 0)
+ print_events_type(PERF_TYPE_SOFTWARE);
+ else if (strcmp(argv[i], "cache") == 0 ||
+ strcmp(argv[i], "hwcache") == 0)
+ print_hwcache_events(NULL);
+ else {
+ char *sep = strchr(argv[i], ':'), *s;
+ int sep_idx;
+
+ if (sep == NULL) {
+ print_events(argv[i]);
+ continue;
+ }
+ sep_idx = sep - argv[i];
+ s = strdup(argv[i]);
+ if (s == NULL)
+ return -1;
+
+ s[sep_idx] = '\0';
+ print_tracepoint_events(s, s + sep_idx + 1);
+ free(s);
+ }
+ }
+ }
return 0;
}
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index 2b36defc5d73..2e93f99b1480 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -834,14 +834,14 @@ static void dump_info(void)
die("Unknown type of information\n");
}
-static int process_sample_event(event_t *self, struct sample_data *sample,
+static int process_sample_event(union perf_event *event, struct perf_sample *sample,
struct perf_session *s)
{
struct thread *thread = perf_session__findnew(s, sample->tid);
if (thread == NULL) {
pr_debug("problem processing %d event, skipping it.\n",
- self->header.type);
+ event->header.type);
return -1;
}
@@ -852,7 +852,7 @@ static int process_sample_event(event_t *self, struct sample_data *sample,
static struct perf_event_ops eops = {
.sample = process_sample_event,
- .comm = event__process_comm,
+ .comm = perf_event__process_comm,
.ordered_samples = true,
};
@@ -893,7 +893,7 @@ static const char * const report_usage[] = {
static const struct option report_options[] = {
OPT_STRING('k', "key", &sort_key, "acquired",
- "key for sorting"),
+ "key for sorting (acquired / contended / wait_total / wait_max / wait_min)"),
/* TODO: type */
OPT_END()
};
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index add163c9f0e7..2c0e64d0b4aa 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -36,6 +36,7 @@
#include "builtin.h"
#include "util/util.h"
#include "util/strlist.h"
+#include "util/strfilter.h"
#include "util/symbol.h"
#include "util/debug.h"
#include "util/debugfs.h"
@@ -43,6 +44,8 @@
#include "util/probe-finder.h"
#include "util/probe-event.h"
+#define DEFAULT_VAR_FILTER "!__k???tab_* & !__crc_*"
+#define DEFAULT_FUNC_FILTER "!_*"
#define MAX_PATH_LEN 256
/* Session management structure */
@@ -52,6 +55,7 @@ static struct {
bool show_lines;
bool show_vars;
bool show_ext_vars;
+ bool show_funcs;
bool mod_events;
int nevents;
struct perf_probe_event events[MAX_PROBES];
@@ -59,6 +63,7 @@ static struct {
struct line_range line_range;
const char *target_module;
int max_probe_points;
+ struct strfilter *filter;
} params;
/* Parse an event definition. Note that any error must die. */
@@ -157,6 +162,27 @@ static int opt_show_vars(const struct option *opt __used,
}
#endif
+static int opt_set_filter(const struct option *opt __used,
+ const char *str, int unset __used)
+{
+ const char *err;
+
+ if (str) {
+ pr_debug2("Set filter: %s\n", str);
+ if (params.filter)
+ strfilter__delete(params.filter);
+ params.filter = strfilter__new(str, &err);
+ if (!params.filter) {
+ pr_err("Filter parse error at %td.\n", err - str + 1);
+ pr_err("Source: \"%s\"\n", str);
+ pr_err(" %*c\n", (int)(err - str + 1), '^');
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static const char * const probe_usage[] = {
"perf probe [<options>] 'PROBEDEF' ['PROBEDEF' ...]",
"perf probe [<options>] --add 'PROBEDEF' [--add 'PROBEDEF' ...]",
@@ -221,6 +247,13 @@ static const struct option options[] = {
OPT__DRY_RUN(&probe_event_dry_run),
OPT_INTEGER('\0', "max-probes", &params.max_probe_points,
"Set how many probe points can be found for a probe."),
+ OPT_BOOLEAN('F', "funcs", &params.show_funcs,
+ "Show potential probe-able functions."),
+ OPT_CALLBACK('\0', "filter", NULL,
+ "[!]FILTER", "Set a filter (with --vars/funcs only)\n"
+ "\t\t\t(default: \"" DEFAULT_VAR_FILTER "\" for --vars,\n"
+ "\t\t\t \"" DEFAULT_FUNC_FILTER "\" for --funcs)",
+ opt_set_filter),
OPT_END()
};
@@ -246,7 +279,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
params.max_probe_points = MAX_PROBES;
if ((!params.nevents && !params.dellist && !params.list_events &&
- !params.show_lines))
+ !params.show_lines && !params.show_funcs))
usage_with_options(probe_usage, options);
/*
@@ -267,12 +300,41 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
pr_err(" Error: Don't use --list with --vars.\n");
usage_with_options(probe_usage, options);
}
+ if (params.show_funcs) {
+ pr_err(" Error: Don't use --list with --funcs.\n");
+ usage_with_options(probe_usage, options);
+ }
ret = show_perf_probe_events();
if (ret < 0)
pr_err(" Error: Failed to show event list. (%d)\n",
ret);
return ret;
}
+ if (params.show_funcs) {
+ if (params.nevents != 0 || params.dellist) {
+ pr_err(" Error: Don't use --funcs with"
+ " --add/--del.\n");
+ usage_with_options(probe_usage, options);
+ }
+ if (params.show_lines) {
+ pr_err(" Error: Don't use --funcs with --line.\n");
+ usage_with_options(probe_usage, options);
+ }
+ if (params.show_vars) {
+ pr_err(" Error: Don't use --funcs with --vars.\n");
+ usage_with_options(probe_usage, options);
+ }
+ if (!params.filter)
+ params.filter = strfilter__new(DEFAULT_FUNC_FILTER,
+ NULL);
+ ret = show_available_funcs(params.target_module,
+ params.filter);
+ strfilter__delete(params.filter);
+ if (ret < 0)
+ pr_err(" Error: Failed to show functions."
+ " (%d)\n", ret);
+ return ret;
+ }
#ifdef DWARF_SUPPORT
if (params.show_lines) {
@@ -297,10 +359,16 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
" --add/--del.\n");
usage_with_options(probe_usage, options);
}
+ if (!params.filter)
+ params.filter = strfilter__new(DEFAULT_VAR_FILTER,
+ NULL);
+
ret = show_available_vars(params.events, params.nevents,
params.max_probe_points,
params.target_module,
+ params.filter,
params.show_ext_vars);
+ strfilter__delete(params.filter);
if (ret < 0)
pr_err(" Error: Failed to show vars. (%d)\n", ret);
return ret;
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 60cac6f92e8b..db4cd1e7b51a 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -18,17 +18,20 @@
#include "util/header.h"
#include "util/event.h"
+#include "util/evlist.h"
#include "util/evsel.h"
#include "util/debug.h"
#include "util/session.h"
#include "util/symbol.h"
#include "util/cpumap.h"
+#include "util/thread_map.h"
#include <unistd.h>
#include <sched.h>
#include <sys/mman.h>
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
+#define SID(e, x, y) xyarray__entry(e->id, x, y)
enum write_mode_t {
WRITE_FORCE,
@@ -39,14 +42,13 @@ static u64 user_interval = ULLONG_MAX;
static u64 default_interval = 0;
static u64 sample_type;
-static struct cpu_map *cpus;
static unsigned int page_size;
static unsigned int mmap_pages = 128;
static unsigned int user_freq = UINT_MAX;
static int freq = 1000;
static int output;
static int pipe_output = 0;
-static const char *output_name = "perf.data";
+static const char *output_name = NULL;
static int group = 0;
static int realtime_prio = 0;
static bool nodelay = false;
@@ -55,7 +57,6 @@ static bool sample_id_all_avail = true;
static bool system_wide = false;
static pid_t target_pid = -1;
static pid_t target_tid = -1;
-static struct thread_map *threads;
static pid_t child_pid = -1;
static bool no_inherit = false;
static enum write_mode_t write_mode = WRITE_FORCE;
@@ -66,51 +67,17 @@ static bool sample_address = false;
static bool sample_time = false;
static bool no_buildid = false;
static bool no_buildid_cache = false;
+static struct perf_evlist *evsel_list;
static long samples = 0;
static u64 bytes_written = 0;
-static struct pollfd *event_array;
-
-static int nr_poll = 0;
-static int nr_cpu = 0;
-
static int file_new = 1;
static off_t post_processing_offset;
static struct perf_session *session;
static const char *cpu_list;
-struct mmap_data {
- void *base;
- unsigned int mask;
- unsigned int prev;
-};
-
-static struct mmap_data mmap_array[MAX_NR_CPUS];
-
-static unsigned long mmap_read_head(struct mmap_data *md)
-{
- struct perf_event_mmap_page *pc = md->base;
- long head;
-
- head = pc->data_head;
- rmb();
-
- return head;
-}
-
-static void mmap_write_tail(struct mmap_data *md, unsigned long tail)
-{
- struct perf_event_mmap_page *pc = md->base;
-
- /*
- * ensure all reads are done before we write the tail out.
- */
- /* mb(); */
- pc->data_tail = tail;
-}
-
static void advance_output(size_t size)
{
bytes_written += size;
@@ -131,42 +98,26 @@ static void write_output(void *buf, size_t size)
}
}
-static int process_synthesized_event(event_t *event,
- struct sample_data *sample __used,
+static int process_synthesized_event(union perf_event *event,
+ struct perf_sample *sample __used,
struct perf_session *self __used)
{
write_output(event, event->header.size);
return 0;
}
-static void mmap_read(struct mmap_data *md)
+static void mmap_read(struct perf_mmap *md)
{
- unsigned int head = mmap_read_head(md);
+ unsigned int head = perf_mmap__read_head(md);
unsigned int old = md->prev;
unsigned char *data = md->base + page_size;
unsigned long size;
void *buf;
- int diff;
- /*
- * If we're further behind than half the buffer, there's a chance
- * the writer will bite our tail and mess up the samples under us.
- *
- * If we somehow ended up ahead of the head, we got messed up.
- *
- * In either case, truncate and restart at head.
- */
- diff = head - old;
- if (diff < 0) {
- fprintf(stderr, "WARNING: failed to keep up with mmap data\n");
- /*
- * head points to a known good entry, start there.
- */
- old = head;
- }
+ if (old == head)
+ return;
- if (old != head)
- samples++;
+ samples++;
size = head - old;
@@ -185,7 +136,7 @@ static void mmap_read(struct mmap_data *md)
write_output(buf, size);
md->prev = old;
- mmap_write_tail(md, old);
+ perf_mmap__write_tail(md, old);
}
static volatile int done = 0;
@@ -209,8 +160,6 @@ static void sig_atexit(void)
kill(getpid(), signr);
}
-static int group_fd;
-
static struct perf_header_attr *get_header_attr(struct perf_event_attr *a, int nr)
{
struct perf_header_attr *h_attr;
@@ -234,28 +183,47 @@ static void create_counter(struct perf_evsel *evsel, int cpu)
char *filter = evsel->filter;
struct perf_event_attr *attr = &evsel->attr;
struct perf_header_attr *h_attr;
- int track = !evsel->idx; /* only the first counter needs these */
+ struct perf_sample_id *sid;
int thread_index;
int ret;
- struct {
- u64 count;
- u64 time_enabled;
- u64 time_running;
- u64 id;
- } read_data;
- /*
- * Check if parse_single_tracepoint_event has already asked for
- * PERF_SAMPLE_TIME.
- *
- * XXX this is kludgy but short term fix for problems introduced by
- * eac23d1c that broke 'perf script' by having different sample_types
- * when using multiple tracepoint events when we use a perf binary
- * that tries to use sample_id_all on an older kernel.
- *
- * We need to move counter creation to perf_session, support
- * different sample_types, etc.
- */
- bool time_needed = attr->sample_type & PERF_SAMPLE_TIME;
+
+ for (thread_index = 0; thread_index < evsel_list->threads->nr; thread_index++) {
+ h_attr = get_header_attr(attr, evsel->idx);
+ if (h_attr == NULL)
+ die("nomem\n");
+
+ if (!file_new) {
+ if (memcmp(&h_attr->attr, attr, sizeof(*attr))) {
+ fprintf(stderr, "incompatible append\n");
+ exit(-1);
+ }
+ }
+
+ sid = SID(evsel, cpu, thread_index);
+ if (perf_header_attr__add_id(h_attr, sid->id) < 0) {
+ pr_warning("Not enough memory to add id\n");
+ exit(-1);
+ }
+
+ if (filter != NULL) {
+ ret = ioctl(FD(evsel, cpu, thread_index),
+ PERF_EVENT_IOC_SET_FILTER, filter);
+ if (ret) {
+ error("failed to set filter with %d (%s)\n", errno,
+ strerror(errno));
+ exit(-1);
+ }
+ }
+ }
+
+ if (!sample_type)
+ sample_type = attr->sample_type;
+}
+
+static void config_attr(struct perf_evsel *evsel, struct perf_evlist *evlist)
+{
+ struct perf_event_attr *attr = &evsel->attr;
+ int track = !evsel->idx; /* only the first counter needs these */
attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
PERF_FORMAT_TOTAL_TIME_RUNNING |
@@ -263,7 +231,7 @@ static void create_counter(struct perf_evsel *evsel, int cpu)
attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
- if (nr_counters > 1)
+ if (evlist->nr_entries > 1)
attr->sample_type |= PERF_SAMPLE_ID;
/*
@@ -315,19 +283,40 @@ static void create_counter(struct perf_evsel *evsel, int cpu)
attr->mmap = track;
attr->comm = track;
- attr->inherit = !no_inherit;
+
if (target_pid == -1 && target_tid == -1 && !system_wide) {
attr->disabled = 1;
attr->enable_on_exec = 1;
}
-retry_sample_id:
- attr->sample_id_all = sample_id_all_avail ? 1 : 0;
+}
- for (thread_index = 0; thread_index < threads->nr; thread_index++) {
-try_again:
- FD(evsel, nr_cpu, thread_index) = sys_perf_event_open(attr, threads->map[thread_index], cpu, group_fd, 0);
+static void open_counters(struct perf_evlist *evlist)
+{
+ struct perf_evsel *pos;
+ int cpu;
- if (FD(evsel, nr_cpu, thread_index) < 0) {
+ list_for_each_entry(pos, &evlist->entries, node) {
+ struct perf_event_attr *attr = &pos->attr;
+ /*
+ * Check if parse_single_tracepoint_event has already asked for
+ * PERF_SAMPLE_TIME.
+ *
+ * XXX this is kludgy but short term fix for problems introduced by
+ * eac23d1c that broke 'perf script' by having different sample_types
+ * when using multiple tracepoint events when we use a perf binary
+ * that tries to use sample_id_all on an older kernel.
+ *
+ * We need to move counter creation to perf_session, support
+ * different sample_types, etc.
+ */
+ bool time_needed = attr->sample_type & PERF_SAMPLE_TIME;
+
+ config_attr(pos, evlist);
+retry_sample_id:
+ attr->sample_id_all = sample_id_all_avail ? 1 : 0;
+try_again:
+ if (perf_evsel__open(pos, evlist->cpus, evlist->threads, group,
+ !no_inherit) < 0) {
int err = errno;
if (err == EPERM || err == EACCES)
@@ -364,7 +353,7 @@ try_again:
}
printf("\n");
error("sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information.\n",
- FD(evsel, nr_cpu, thread_index), strerror(err));
+ err, strerror(err));
#if defined(__i386__) || defined(__x86_64__)
if (attr->type == PERF_TYPE_HARDWARE && err == EOPNOTSUPP)
@@ -375,90 +364,16 @@ try_again:
#endif
die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
- exit(-1);
- }
-
- h_attr = get_header_attr(attr, evsel->idx);
- if (h_attr == NULL)
- die("nomem\n");
-
- if (!file_new) {
- if (memcmp(&h_attr->attr, attr, sizeof(*attr))) {
- fprintf(stderr, "incompatible append\n");
- exit(-1);
- }
- }
-
- if (read(FD(evsel, nr_cpu, thread_index), &read_data, sizeof(read_data)) == -1) {
- perror("Unable to read perf file descriptor");
- exit(-1);
- }
-
- if (perf_header_attr__add_id(h_attr, read_data.id) < 0) {
- pr_warning("Not enough memory to add id\n");
- exit(-1);
- }
-
- assert(FD(evsel, nr_cpu, thread_index) >= 0);
- fcntl(FD(evsel, nr_cpu, thread_index), F_SETFL, O_NONBLOCK);
-
- /*
- * First counter acts as the group leader:
- */
- if (group && group_fd == -1)
- group_fd = FD(evsel, nr_cpu, thread_index);
-
- if (evsel->idx || thread_index) {
- struct perf_evsel *first;
- first = list_entry(evsel_list.next, struct perf_evsel, node);
- ret = ioctl(FD(evsel, nr_cpu, thread_index),
- PERF_EVENT_IOC_SET_OUTPUT,
- FD(first, nr_cpu, 0));
- if (ret) {
- error("failed to set output: %d (%s)\n", errno,
- strerror(errno));
- exit(-1);
- }
- } else {
- mmap_array[nr_cpu].prev = 0;
- mmap_array[nr_cpu].mask = mmap_pages*page_size - 1;
- mmap_array[nr_cpu].base = mmap(NULL, (mmap_pages+1)*page_size,
- PROT_READ | PROT_WRITE, MAP_SHARED, FD(evsel, nr_cpu, thread_index), 0);
- if (mmap_array[nr_cpu].base == MAP_FAILED) {
- error("failed to mmap with %d (%s)\n", errno, strerror(errno));
- exit(-1);
- }
-
- event_array[nr_poll].fd = FD(evsel, nr_cpu, thread_index);
- event_array[nr_poll].events = POLLIN;
- nr_poll++;
- }
-
- if (filter != NULL) {
- ret = ioctl(FD(evsel, nr_cpu, thread_index),
- PERF_EVENT_IOC_SET_FILTER, filter);
- if (ret) {
- error("failed to set filter with %d (%s)\n", errno,
- strerror(errno));
- exit(-1);
- }
}
}
- if (!sample_type)
- sample_type = attr->sample_type;
-}
-
-static void open_counters(int cpu)
-{
- struct perf_evsel *pos;
-
- group_fd = -1;
+ if (perf_evlist__mmap(evlist, mmap_pages, false) < 0)
+ die("failed to mmap with %d (%s)\n", errno, strerror(errno));
- list_for_each_entry(pos, &evsel_list, node)
- create_counter(pos, cpu);
-
- nr_cpu++;
+ for (cpu = 0; cpu < evsel_list->cpus->nr; ++cpu) {
+ list_for_each_entry(pos, &evlist->entries, node)
+ create_counter(pos, cpu);
+ }
}
static int process_buildids(void)
@@ -481,14 +396,14 @@ static void atexit_header(void)
if (!no_buildid)
process_buildids();
- perf_header__write(&session->header, output, true);
+ perf_header__write(&session->header, evsel_list, output, true);
perf_session__delete(session);
- perf_evsel_list__delete();
+ perf_evlist__delete(evsel_list);
symbol__exit();
}
}
-static void event__synthesize_guest_os(struct machine *machine, void *data)
+static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
{
int err;
struct perf_session *psession = data;
@@ -504,8 +419,8 @@ static void event__synthesize_guest_os(struct machine *machine, void *data)
*method is used to avoid symbol missing when the first addr is
*in module instead of in guest kernel.
*/
- err = event__synthesize_modules(process_synthesized_event,
- psession, machine);
+ err = perf_event__synthesize_modules(process_synthesized_event,
+ psession, machine);
if (err < 0)
pr_err("Couldn't record guest kernel [%d]'s reference"
" relocation symbol.\n", machine->pid);
@@ -514,11 +429,12 @@ static void event__synthesize_guest_os(struct machine *machine, void *data)
* We use _stext for guest kernel because guest kernel's /proc/kallsyms
* have no _text sometimes.
*/
- err = event__synthesize_kernel_mmap(process_synthesized_event,
- psession, machine, "_text");
+ err = perf_event__synthesize_kernel_mmap(process_synthesized_event,
+ psession, machine, "_text");
if (err < 0)
- err = event__synthesize_kernel_mmap(process_synthesized_event,
- psession, machine, "_stext");
+ err = perf_event__synthesize_kernel_mmap(process_synthesized_event,
+ psession, machine,
+ "_stext");
if (err < 0)
pr_err("Couldn't record guest kernel [%d]'s reference"
" relocation symbol.\n", machine->pid);
@@ -533,9 +449,9 @@ static void mmap_read_all(void)
{
int i;
- for (i = 0; i < nr_cpu; i++) {
- if (mmap_array[i].base)
- mmap_read(&mmap_array[i]);
+ for (i = 0; i < evsel_list->cpus->nr; i++) {
+ if (evsel_list->mmap[i].base)
+ mmap_read(&evsel_list->mmap[i]);
}
if (perf_header__has_feat(&session->header, HEADER_TRACE_INFO))
@@ -566,18 +482,26 @@ static int __cmd_record(int argc, const char **argv)
exit(-1);
}
- if (!strcmp(output_name, "-"))
- pipe_output = 1;
- else if (!stat(output_name, &st) && st.st_size) {
- if (write_mode == WRITE_FORCE) {
- char oldname[PATH_MAX];
- snprintf(oldname, sizeof(oldname), "%s.old",
- output_name);
- unlink(oldname);
- rename(output_name, oldname);
+ if (!output_name) {
+ if (!fstat(STDOUT_FILENO, &st) && S_ISFIFO(st.st_mode))
+ pipe_output = 1;
+ else
+ output_name = "perf.data";
+ }
+ if (output_name) {
+ if (!strcmp(output_name, "-"))
+ pipe_output = 1;
+ else if (!stat(output_name, &st) && st.st_size) {
+ if (write_mode == WRITE_FORCE) {
+ char oldname[PATH_MAX];
+ snprintf(oldname, sizeof(oldname), "%s.old",
+ output_name);
+ unlink(oldname);
+ rename(output_name, oldname);
+ }
+ } else if (write_mode == WRITE_APPEND) {
+ write_mode = WRITE_FORCE;
}
- } else if (write_mode == WRITE_APPEND) {
- write_mode = WRITE_FORCE;
}
flags = O_CREAT|O_RDWR;
@@ -611,14 +535,9 @@ static int __cmd_record(int argc, const char **argv)
goto out_delete_session;
}
- if (have_tracepoints(&evsel_list))
+ if (have_tracepoints(&evsel_list->entries))
perf_header__set_feat(&session->header, HEADER_TRACE_INFO);
- /*
- * perf_session__delete(session) will be called at atexit_header()
- */
- atexit(atexit_header);
-
if (forks) {
child_pid = fork();
if (child_pid < 0) {
@@ -659,7 +578,7 @@ static int __cmd_record(int argc, const char **argv)
}
if (!system_wide && target_tid == -1 && target_pid == -1)
- threads->map[0] = child_pid;
+ evsel_list->threads->map[0] = child_pid;
close(child_ready_pipe[1]);
close(go_pipe[0]);
@@ -673,21 +592,22 @@ static int __cmd_record(int argc, const char **argv)
close(child_ready_pipe[0]);
}
- if (!system_wide && no_inherit && !cpu_list) {
- open_counters(-1);
- } else {
- for (i = 0; i < cpus->nr; i++)
- open_counters(cpus->map[i]);
- }
+ open_counters(evsel_list);
perf_session__set_sample_type(session, sample_type);
+ /*
+ * perf_session__delete(session) will be called at atexit_header()
+ */
+ atexit(atexit_header);
+
if (pipe_output) {
err = perf_header__write_pipe(output);
if (err < 0)
return err;
} else if (file_new) {
- err = perf_header__write(&session->header, output, false);
+ err = perf_header__write(&session->header, evsel_list,
+ output, false);
if (err < 0)
return err;
}
@@ -697,22 +617,22 @@ static int __cmd_record(int argc, const char **argv)
perf_session__set_sample_id_all(session, sample_id_all_avail);
if (pipe_output) {
- err = event__synthesize_attrs(&session->header,
- process_synthesized_event,
- session);
+ err = perf_event__synthesize_attrs(&session->header,
+ process_synthesized_event,
+ session);
if (err < 0) {
pr_err("Couldn't synthesize attrs.\n");
return err;
}
- err = event__synthesize_event_types(process_synthesized_event,
- session);
+ err = perf_event__synthesize_event_types(process_synthesized_event,
+ session);
if (err < 0) {
pr_err("Couldn't synthesize event_types.\n");
return err;
}
- if (have_tracepoints(&evsel_list)) {
+ if (have_tracepoints(&evsel_list->entries)) {
/*
* FIXME err <= 0 here actually means that
* there were no tracepoints so its not really
@@ -721,9 +641,9 @@ static int __cmd_record(int argc, const char **argv)
* return this more properly and also
* propagate errors that now are calling die()
*/
- err = event__synthesize_tracing_data(output, &evsel_list,
- process_synthesized_event,
- session);
+ err = perf_event__synthesize_tracing_data(output, evsel_list,
+ process_synthesized_event,
+ session);
if (err <= 0) {
pr_err("Couldn't record tracing data.\n");
return err;
@@ -738,31 +658,34 @@ static int __cmd_record(int argc, const char **argv)
return -1;
}
- err = event__synthesize_kernel_mmap(process_synthesized_event,
- session, machine, "_text");
+ err = perf_event__synthesize_kernel_mmap(process_synthesized_event,
+ session, machine, "_text");
if (err < 0)
- err = event__synthesize_kernel_mmap(process_synthesized_event,
- session, machine, "_stext");
+ err = perf_event__synthesize_kernel_mmap(process_synthesized_event,
+ session, machine, "_stext");
if (err < 0)
pr_err("Couldn't record kernel reference relocation symbol\n"
"Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
"Check /proc/kallsyms permission or run as root.\n");
- err = event__synthesize_modules(process_synthesized_event,
- session, machine);
+ err = perf_event__synthesize_modules(process_synthesized_event,
+ session, machine);
if (err < 0)
pr_err("Couldn't record kernel module information.\n"
"Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
"Check /proc/modules permission or run as root.\n");
if (perf_guest)
- perf_session__process_machines(session, event__synthesize_guest_os);
+ perf_session__process_machines(session,
+ perf_event__synthesize_guest_os);
if (!system_wide)
- event__synthesize_thread_map(threads, process_synthesized_event,
- session);
+ perf_event__synthesize_thread_map(evsel_list->threads,
+ process_synthesized_event,
+ session);
else
- event__synthesize_threads(process_synthesized_event, session);
+ perf_event__synthesize_threads(process_synthesized_event,
+ session);
if (realtime_prio) {
struct sched_param param;
@@ -789,17 +712,17 @@ static int __cmd_record(int argc, const char **argv)
if (hits == samples) {
if (done)
break;
- err = poll(event_array, nr_poll, -1);
+ err = poll(evsel_list->pollfd, evsel_list->nr_fds, -1);
waking++;
}
if (done) {
- for (i = 0; i < nr_cpu; i++) {
+ for (i = 0; i < evsel_list->cpus->nr; i++) {
struct perf_evsel *pos;
- list_for_each_entry(pos, &evsel_list, node) {
+ list_for_each_entry(pos, &evsel_list->entries, node) {
for (thread = 0;
- thread < threads->nr;
+ thread < evsel_list->threads->nr;
thread++)
ioctl(FD(pos, i, thread),
PERF_EVENT_IOC_DISABLE);
@@ -838,10 +761,10 @@ static const char * const record_usage[] = {
static bool force, append_file;
const struct option record_options[] = {
- OPT_CALLBACK('e', "event", NULL, "event",
+ OPT_CALLBACK('e', "event", &evsel_list, "event",
"event selector. use 'perf list' to list available events",
parse_events),
- OPT_CALLBACK(0, "filter", NULL, "filter",
+ OPT_CALLBACK(0, "filter", &evsel_list, "filter",
"event filter", parse_filter),
OPT_INTEGER('p', "pid", &target_pid,
"record events on existing process id"),
@@ -884,6 +807,9 @@ const struct option record_options[] = {
"do not update the buildid cache"),
OPT_BOOLEAN('B', "no-buildid", &no_buildid,
"do not collect buildids in perf.data"),
+ OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
+ "monitor event in cgroup name only",
+ parse_cgroups),
OPT_END()
};
@@ -892,6 +818,10 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
int err = -ENOMEM;
struct perf_evsel *pos;
+ evsel_list = perf_evlist__new(NULL, NULL);
+ if (evsel_list == NULL)
+ return -ENOMEM;
+
argc = parse_options(argc, argv, record_options, record_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
if (!argc && target_pid == -1 && target_tid == -1 &&
@@ -908,12 +838,19 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
write_mode = WRITE_FORCE;
}
+ if (nr_cgroups && !system_wide) {
+ fprintf(stderr, "cgroup monitoring only available in"
+ " system-wide mode\n");
+ usage_with_options(record_usage, record_options);
+ }
+
symbol__init();
if (no_buildid_cache || no_buildid)
disable_buildid_cache();
- if (list_empty(&evsel_list) && perf_evsel_list__create_default() < 0) {
+ if (evsel_list->nr_entries == 0 &&
+ perf_evlist__add_default(evsel_list) < 0) {
pr_err("Not enough memory for event selector list\n");
goto out_symbol_exit;
}
@@ -921,27 +858,19 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
if (target_pid != -1)
target_tid = target_pid;
- threads = thread_map__new(target_pid, target_tid);
- if (threads == NULL) {
- pr_err("Problems finding threads of monitor\n");
+ if (perf_evlist__create_maps(evsel_list, target_pid,
+ target_tid, cpu_list) < 0)
usage_with_options(record_usage, record_options);
- }
-
- cpus = cpu_map__new(cpu_list);
- if (cpus == NULL) {
- perror("failed to parse CPUs map");
- return -1;
- }
- list_for_each_entry(pos, &evsel_list, node) {
- if (perf_evsel__alloc_fd(pos, cpus->nr, threads->nr) < 0)
+ list_for_each_entry(pos, &evsel_list->entries, node) {
+ if (perf_evsel__alloc_fd(pos, evsel_list->cpus->nr,
+ evsel_list->threads->nr) < 0)
goto out_free_fd;
if (perf_header__push_event(pos->attr.config, event_name(pos)))
goto out_free_fd;
}
- event_array = malloc((sizeof(struct pollfd) * MAX_NR_CPUS *
- MAX_COUNTERS * threads->nr));
- if (!event_array)
+
+ if (perf_evlist__alloc_pollfd(evsel_list) < 0)
goto out_free_fd;
if (user_interval != ULLONG_MAX)
@@ -959,16 +888,12 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
} else {
fprintf(stderr, "frequency and count are zero, aborting\n");
err = -EINVAL;
- goto out_free_event_array;
+ goto out_free_fd;
}
err = __cmd_record(argc, argv);
-
-out_free_event_array:
- free(event_array);
out_free_fd:
- thread_map__delete(threads);
- threads = NULL;
+ perf_evlist__delete_maps(evsel_list);
out_symbol_exit:
symbol__exit();
return err;
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index c27e31f289e6..dddcc7ea2bec 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -9,6 +9,7 @@
#include "util/util.h"
+#include "util/annotate.h"
#include "util/color.h"
#include <linux/list.h>
#include "util/cache.h"
@@ -43,6 +44,7 @@ static const char default_pretty_printing_style[] = "normal";
static const char *pretty_printing_style = default_pretty_printing_style;
static char callchain_default_opt[] = "fractal,0.5";
+static symbol_filter_t annotate_init;
static struct hists *perf_session__hists_findnew(struct perf_session *self,
u64 event_stream, u32 type,
@@ -77,86 +79,96 @@ static struct hists *perf_session__hists_findnew(struct perf_session *self,
return new;
}
-static int perf_session__add_hist_entry(struct perf_session *self,
+static int perf_session__add_hist_entry(struct perf_session *session,
struct addr_location *al,
- struct sample_data *data)
+ struct perf_sample *sample)
{
- struct map_symbol *syms = NULL;
struct symbol *parent = NULL;
- int err = -ENOMEM;
+ int err = 0;
struct hist_entry *he;
struct hists *hists;
struct perf_event_attr *attr;
- if ((sort__has_parent || symbol_conf.use_callchain) && data->callchain) {
- syms = perf_session__resolve_callchain(self, al->thread,
- data->callchain, &parent);
- if (syms == NULL)
- return -ENOMEM;
+ if ((sort__has_parent || symbol_conf.use_callchain) && sample->callchain) {
+ err = perf_session__resolve_callchain(session, al->thread,
+ sample->callchain, &parent);
+ if (err)
+ return err;
}
- attr = perf_header__find_attr(data->id, &self->header);
+ attr = perf_header__find_attr(sample->id, &session->header);
if (attr)
- hists = perf_session__hists_findnew(self, data->id, attr->type, attr->config);
+ hists = perf_session__hists_findnew(session, sample->id, attr->type, attr->config);
else
- hists = perf_session__hists_findnew(self, data->id, 0, 0);
+ hists = perf_session__hists_findnew(session, sample->id, 0, 0);
if (hists == NULL)
- goto out_free_syms;
- he = __hists__add_entry(hists, al, parent, data->period);
+ return -ENOMEM;
+
+ he = __hists__add_entry(hists, al, parent, sample->period);
if (he == NULL)
- goto out_free_syms;
- err = 0;
+ return -ENOMEM;
+
if (symbol_conf.use_callchain) {
- err = callchain_append(he->callchain, data->callchain, syms,
- data->period);
+ err = callchain_append(he->callchain, &session->callchain_cursor,
+ sample->period);
if (err)
- goto out_free_syms;
+ return err;
}
/*
* Only in the newt browser we are doing integrated annotation,
* so we don't allocated the extra space needed because the stdio
* code will not use it.
*/
- if (use_browser > 0)
- err = hist_entry__inc_addr_samples(he, al->addr);
-out_free_syms:
- free(syms);
+ if (al->sym != NULL && use_browser > 0) {
+ /*
+ * All aggregated on the first sym_hist.
+ */
+ struct annotation *notes = symbol__annotation(he->ms.sym);
+ if (notes->src == NULL &&
+ symbol__alloc_hist(he->ms.sym, 1) < 0)
+ err = -ENOMEM;
+ else
+ err = hist_entry__inc_addr_samples(he, 0, al->addr);
+ }
+
return err;
}
static int add_event_total(struct perf_session *session,
- struct sample_data *data,
+ struct perf_sample *sample,
struct perf_event_attr *attr)
{
struct hists *hists;
if (attr)
- hists = perf_session__hists_findnew(session, data->id,
+ hists = perf_session__hists_findnew(session, sample->id,
attr->type, attr->config);
else
- hists = perf_session__hists_findnew(session, data->id, 0, 0);
+ hists = perf_session__hists_findnew(session, sample->id, 0, 0);
if (!hists)
return -ENOMEM;
- hists->stats.total_period += data->period;
+ hists->stats.total_period += sample->period;
/*
* FIXME: add_event_total should be moved from here to
* perf_session__process_event so that the proper hist is passed to
* the event_op methods.
*/
hists__inc_nr_events(hists, PERF_RECORD_SAMPLE);
- session->hists.stats.total_period += data->period;
+ session->hists.stats.total_period += sample->period;
return 0;
}
-static int process_sample_event(event_t *event, struct sample_data *sample,
+static int process_sample_event(union perf_event *event,
+ struct perf_sample *sample,
struct perf_session *session)
{
struct addr_location al;
struct perf_event_attr *attr;
- if (event__preprocess_sample(event, session, &al, sample, NULL) < 0) {
+ if (perf_event__preprocess_sample(event, session, &al, sample,
+ annotate_init) < 0) {
fprintf(stderr, "problem processing %d event, skipping it.\n",
event->header.type);
return -1;
@@ -180,7 +192,8 @@ static int process_sample_event(event_t *event, struct sample_data *sample,
return 0;
}
-static int process_read_event(event_t *event, struct sample_data *sample __used,
+static int process_read_event(union perf_event *event,
+ struct perf_sample *sample __used,
struct perf_session *session __used)
{
struct perf_event_attr *attr;
@@ -222,7 +235,7 @@ static int perf_session__setup_sample_type(struct perf_session *self)
} else if (!dont_use_callchains && callchain_param.mode != CHAIN_NONE &&
!symbol_conf.use_callchain) {
symbol_conf.use_callchain = true;
- if (register_callchain_param(&callchain_param) < 0) {
+ if (callchain_register_param(&callchain_param) < 0) {
fprintf(stderr, "Can't register callchain"
" params\n");
return -EINVAL;
@@ -233,17 +246,17 @@ static int perf_session__setup_sample_type(struct perf_session *self)
}
static struct perf_event_ops event_ops = {
- .sample = process_sample_event,
- .mmap = event__process_mmap,
- .comm = event__process_comm,
- .exit = event__process_task,
- .fork = event__process_task,
- .lost = event__process_lost,
- .read = process_read_event,
- .attr = event__process_attr,
- .event_type = event__process_event_type,
- .tracing_data = event__process_tracing_data,
- .build_id = event__process_build_id,
+ .sample = process_sample_event,
+ .mmap = perf_event__process_mmap,
+ .comm = perf_event__process_comm,
+ .exit = perf_event__process_task,
+ .fork = perf_event__process_task,
+ .lost = perf_event__process_lost,
+ .read = process_read_event,
+ .attr = perf_event__process_attr,
+ .event_type = perf_event__process_event_type,
+ .tracing_data = perf_event__process_tracing_data,
+ .build_id = perf_event__process_build_id,
.ordered_samples = true,
.ordering_requires_timestamps = true,
};
@@ -337,6 +350,12 @@ static int __cmd_report(void)
perf_session__fprintf_dsos(session, stdout);
next = rb_first(&session->hists_tree);
+
+ if (next == NULL) {
+ ui__warning("The %s file has no samples!\n", input_name);
+ goto out_delete;
+ }
+
while (next) {
struct hists *hists;
@@ -347,7 +366,7 @@ static int __cmd_report(void)
}
if (use_browser > 0)
- hists__tui_browse_tree(&session->hists_tree, help);
+ hists__tui_browse_tree(&session->hists_tree, help, 0);
else
hists__tty_browse_tree(&session->hists_tree, help);
@@ -424,7 +443,7 @@ parse_callchain_opt(const struct option *opt __used, const char *arg,
if (tok2)
callchain_param.print_limit = strtod(tok2, &endptr);
setup:
- if (register_callchain_param(&callchain_param) < 0) {
+ if (callchain_register_param(&callchain_param) < 0) {
fprintf(stderr, "Can't register callchain params\n");
return -1;
}
@@ -498,7 +517,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
use_browser = 1;
if (strcmp(input_name, "-") != 0)
- setup_browser();
+ setup_browser(true);
else
use_browser = 0;
/*
@@ -507,7 +526,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
* implementation.
*/
if (use_browser > 0) {
- symbol_conf.priv_size = sizeof(struct sym_priv);
+ symbol_conf.priv_size = sizeof(struct annotation);
+ annotate_init = symbol__annotate_init;
/*
* For searching by name on the "Browse map details".
* providing it only in verbose mode not to bloat too
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 29acb894e035..a32f411faeac 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -369,11 +369,6 @@ static void
process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom)
{
int ret = 0;
- u64 now;
- long long delta;
-
- now = get_nsecs();
- delta = start_time + atom->timestamp - now;
switch (atom->type) {
case SCHED_EVENT_RUN:
@@ -562,7 +557,7 @@ static void wait_for_tasks(void)
static void run_one_test(void)
{
- u64 T0, T1, delta, avg_delta, fluct, std_dev;
+ u64 T0, T1, delta, avg_delta, fluct;
T0 = get_nsecs();
wait_for_tasks();
@@ -578,7 +573,6 @@ static void run_one_test(void)
else
fluct = delta - avg_delta;
sum_fluct += fluct;
- std_dev = sum_fluct / nr_runs / sqrt(nr_runs);
if (!run_avg)
run_avg = delta;
run_avg = (run_avg*9 + delta)/10;
@@ -799,7 +793,7 @@ replay_switch_event(struct trace_switch_event *switch_event,
u64 timestamp,
struct thread *thread __used)
{
- struct task_desc *prev, *next;
+ struct task_desc *prev, __used *next;
u64 timestamp0;
s64 delta;
@@ -1404,7 +1398,7 @@ map_switch_event(struct trace_switch_event *switch_event,
u64 timestamp,
struct thread *thread __used)
{
- struct thread *sched_out, *sched_in;
+ struct thread *sched_out __used, *sched_in;
int new_shortname;
u64 timestamp0;
s64 delta;
@@ -1580,9 +1574,9 @@ process_sched_migrate_task_event(void *data, struct perf_session *session,
event, cpu, timestamp, thread);
}
-static void
-process_raw_event(event_t *raw_event __used, struct perf_session *session,
- void *data, int cpu, u64 timestamp, struct thread *thread)
+static void process_raw_event(union perf_event *raw_event __used,
+ struct perf_session *session, void *data, int cpu,
+ u64 timestamp, struct thread *thread)
{
struct event *event;
int type;
@@ -1607,7 +1601,8 @@ process_raw_event(event_t *raw_event __used, struct perf_session *session,
process_sched_migrate_task_event(data, session, event, cpu, timestamp, thread);
}
-static int process_sample_event(event_t *event, struct sample_data *sample,
+static int process_sample_event(union perf_event *event,
+ struct perf_sample *sample,
struct perf_session *session)
{
struct thread *thread;
@@ -1635,9 +1630,9 @@ static int process_sample_event(event_t *event, struct sample_data *sample,
static struct perf_event_ops event_ops = {
.sample = process_sample_event,
- .comm = event__process_comm,
- .lost = event__process_lost,
- .fork = event__process_task,
+ .comm = perf_event__process_comm,
+ .lost = perf_event__process_lost,
+ .fork = perf_event__process_task,
.ordered_samples = true,
};
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index b766c2a9ac97..5f40df635dcb 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -63,7 +63,8 @@ static int cleanup_scripting(void)
static char const *input_name = "perf.data";
-static int process_sample_event(event_t *event, struct sample_data *sample,
+static int process_sample_event(union perf_event *event,
+ struct perf_sample *sample,
struct perf_session *session)
{
struct thread *thread = perf_session__findnew(session, event->ip.pid);
@@ -100,14 +101,14 @@ static int process_sample_event(event_t *event, struct sample_data *sample,
}
static struct perf_event_ops event_ops = {
- .sample = process_sample_event,
- .comm = event__process_comm,
- .attr = event__process_attr,
- .event_type = event__process_event_type,
- .tracing_data = event__process_tracing_data,
- .build_id = event__process_build_id,
- .ordering_requires_timestamps = true,
+ .sample = process_sample_event,
+ .comm = perf_event__process_comm,
+ .attr = perf_event__process_attr,
+ .event_type = perf_event__process_event_type,
+ .tracing_data = perf_event__process_tracing_data,
+ .build_id = perf_event__process_build_id,
.ordered_samples = true,
+ .ordering_requires_timestamps = true,
};
extern volatile int session_done;
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index a482a191a0ca..21c025222496 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -43,11 +43,13 @@
#include "util/parse-options.h"
#include "util/parse-events.h"
#include "util/event.h"
+#include "util/evlist.h"
#include "util/evsel.h"
#include "util/debug.h"
#include "util/header.h"
#include "util/cpumap.h"
#include "util/thread.h"
+#include "util/thread_map.h"
#include <sys/prctl.h>
#include <math.h>
@@ -71,8 +73,9 @@ static struct perf_event_attr default_attrs[] = {
};
+struct perf_evlist *evsel_list;
+
static bool system_wide = false;
-static struct cpu_map *cpus;
static int run_idx = 0;
static int run_count = 1;
@@ -81,7 +84,6 @@ static bool scale = true;
static bool no_aggr = false;
static pid_t target_pid = -1;
static pid_t target_tid = -1;
-static struct thread_map *threads;
static pid_t child_pid = -1;
static bool null_run = false;
static bool big_num = true;
@@ -166,7 +168,7 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
PERF_FORMAT_TOTAL_TIME_RUNNING;
if (system_wide)
- return perf_evsel__open_per_cpu(evsel, cpus);
+ return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, false, false);
attr->inherit = !no_inherit;
if (target_pid == -1 && target_tid == -1) {
@@ -174,7 +176,7 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
attr->enable_on_exec = 1;
}
- return perf_evsel__open_per_thread(evsel, threads);
+ return perf_evsel__open_per_thread(evsel, evsel_list->threads, false, false);
}
/*
@@ -199,7 +201,8 @@ static int read_counter_aggr(struct perf_evsel *counter)
u64 *count = counter->counts->aggr.values;
int i;
- if (__perf_evsel__read(counter, cpus->nr, threads->nr, scale) < 0)
+ if (__perf_evsel__read(counter, evsel_list->cpus->nr,
+ evsel_list->threads->nr, scale) < 0)
return -1;
for (i = 0; i < 3; i++)
@@ -232,7 +235,7 @@ static int read_counter(struct perf_evsel *counter)
u64 *count;
int cpu;
- for (cpu = 0; cpu < cpus->nr; cpu++) {
+ for (cpu = 0; cpu < evsel_list->cpus->nr; cpu++) {
if (__perf_evsel__read_on_cpu(counter, cpu, 0, scale) < 0)
return -1;
@@ -297,7 +300,7 @@ static int run_perf_stat(int argc __used, const char **argv)
}
if (target_tid == -1 && target_pid == -1 && !system_wide)
- threads->map[0] = child_pid;
+ evsel_list->threads->map[0] = child_pid;
/*
* Wait for the child to be ready to exec.
@@ -309,7 +312,7 @@ static int run_perf_stat(int argc __used, const char **argv)
close(child_ready_pipe[0]);
}
- list_for_each_entry(counter, &evsel_list, node) {
+ list_for_each_entry(counter, &evsel_list->entries, node) {
if (create_perf_stat_counter(counter) < 0) {
if (errno == -EPERM || errno == -EACCES) {
error("You may not have permission to collect %sstats.\n"
@@ -347,14 +350,15 @@ static int run_perf_stat(int argc __used, const char **argv)
update_stats(&walltime_nsecs_stats, t1 - t0);
if (no_aggr) {
- list_for_each_entry(counter, &evsel_list, node) {
+ list_for_each_entry(counter, &evsel_list->entries, node) {
read_counter(counter);
- perf_evsel__close_fd(counter, cpus->nr, 1);
+ perf_evsel__close_fd(counter, evsel_list->cpus->nr, 1);
}
} else {
- list_for_each_entry(counter, &evsel_list, node) {
+ list_for_each_entry(counter, &evsel_list->entries, node) {
read_counter_aggr(counter);
- perf_evsel__close_fd(counter, cpus->nr, threads->nr);
+ perf_evsel__close_fd(counter, evsel_list->cpus->nr,
+ evsel_list->threads->nr);
}
}
@@ -382,10 +386,13 @@ static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg)
if (no_aggr)
sprintf(cpustr, "CPU%*d%s",
csv_output ? 0 : -4,
- cpus->map[cpu], csv_sep);
+ evsel_list->cpus->map[cpu], csv_sep);
fprintf(stderr, fmt, cpustr, msecs, csv_sep, event_name(evsel));
+ if (evsel->cgrp)
+ fprintf(stderr, "%s%s", csv_sep, evsel->cgrp->name);
+
if (csv_output)
return;
@@ -410,12 +417,15 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg)
if (no_aggr)
sprintf(cpustr, "CPU%*d%s",
csv_output ? 0 : -4,
- cpus->map[cpu], csv_sep);
+ evsel_list->cpus->map[cpu], csv_sep);
else
cpu = 0;
fprintf(stderr, fmt, cpustr, avg, csv_sep, event_name(evsel));
+ if (evsel->cgrp)
+ fprintf(stderr, "%s%s", csv_sep, evsel->cgrp->name);
+
if (csv_output)
return;
@@ -456,9 +466,17 @@ static void print_counter_aggr(struct perf_evsel *counter)
int scaled = counter->counts->scaled;
if (scaled == -1) {
- fprintf(stderr, "%*s%s%-24s\n",
+ fprintf(stderr, "%*s%s%*s",
csv_output ? 0 : 18,
- "<not counted>", csv_sep, event_name(counter));
+ "<not counted>",
+ csv_sep,
+ csv_output ? 0 : -24,
+ event_name(counter));
+
+ if (counter->cgrp)
+ fprintf(stderr, "%s%s", csv_sep, counter->cgrp->name);
+
+ fputc('\n', stderr);
return;
}
@@ -483,7 +501,6 @@ static void print_counter_aggr(struct perf_evsel *counter)
fprintf(stderr, " (scaled from %.2f%%)",
100 * avg_running / avg_enabled);
}
-
fprintf(stderr, "\n");
}
@@ -496,19 +513,23 @@ static void print_counter(struct perf_evsel *counter)
u64 ena, run, val;
int cpu;
- for (cpu = 0; cpu < cpus->nr; cpu++) {
+ for (cpu = 0; cpu < evsel_list->cpus->nr; cpu++) {
val = counter->counts->cpu[cpu].val;
ena = counter->counts->cpu[cpu].ena;
run = counter->counts->cpu[cpu].run;
if (run == 0 || ena == 0) {
- fprintf(stderr, "CPU%*d%s%*s%s%-24s",
+ fprintf(stderr, "CPU%*d%s%*s%s%*s",
csv_output ? 0 : -4,
- cpus->map[cpu], csv_sep,
+ evsel_list->cpus->map[cpu], csv_sep,
csv_output ? 0 : 18,
"<not counted>", csv_sep,
+ csv_output ? 0 : -24,
event_name(counter));
- fprintf(stderr, "\n");
+ if (counter->cgrp)
+ fprintf(stderr, "%s%s", csv_sep, counter->cgrp->name);
+
+ fputc('\n', stderr);
continue;
}
@@ -525,7 +546,7 @@ static void print_counter(struct perf_evsel *counter)
100.0 * run / ena);
}
}
- fprintf(stderr, "\n");
+ fputc('\n', stderr);
}
}
@@ -555,10 +576,10 @@ static void print_stat(int argc, const char **argv)
}
if (no_aggr) {
- list_for_each_entry(counter, &evsel_list, node)
+ list_for_each_entry(counter, &evsel_list->entries, node)
print_counter(counter);
} else {
- list_for_each_entry(counter, &evsel_list, node)
+ list_for_each_entry(counter, &evsel_list->entries, node)
print_counter_aggr(counter);
}
@@ -610,7 +631,7 @@ static int stat__set_big_num(const struct option *opt __used,
}
static const struct option options[] = {
- OPT_CALLBACK('e', "event", NULL, "event",
+ OPT_CALLBACK('e', "event", &evsel_list, "event",
"event selector. use 'perf list' to list available events",
parse_events),
OPT_BOOLEAN('i', "no-inherit", &no_inherit,
@@ -638,6 +659,9 @@ static const struct option options[] = {
"disable CPU count aggregation"),
OPT_STRING('x', "field-separator", &csv_sep, "separator",
"print counts with custom separator"),
+ OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
+ "monitor event in cgroup name only",
+ parse_cgroups),
OPT_END()
};
@@ -648,6 +672,10 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
setlocale(LC_ALL, "");
+ evsel_list = perf_evlist__new(NULL, NULL);
+ if (evsel_list == NULL)
+ return -ENOMEM;
+
argc = parse_options(argc, argv, options, stat_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
@@ -674,49 +702,50 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
if (run_count <= 0)
usage_with_options(stat_usage, options);
- /* no_aggr is for system-wide only */
- if (no_aggr && !system_wide)
+ /* no_aggr, cgroup are for system-wide only */
+ if ((no_aggr || nr_cgroups) && !system_wide) {
+ fprintf(stderr, "both cgroup and no-aggregation "
+ "modes only available in system-wide mode\n");
+
usage_with_options(stat_usage, options);
+ }
/* Set attrs and nr_counters if no event is selected and !null_run */
- if (!null_run && !nr_counters) {
+ if (!null_run && !evsel_list->nr_entries) {
size_t c;
- nr_counters = ARRAY_SIZE(default_attrs);
-
for (c = 0; c < ARRAY_SIZE(default_attrs); ++c) {
- pos = perf_evsel__new(&default_attrs[c],
- nr_counters);
+ pos = perf_evsel__new(&default_attrs[c], c);
if (pos == NULL)
goto out;
- list_add(&pos->node, &evsel_list);
+ perf_evlist__add(evsel_list, pos);
}
}
if (target_pid != -1)
target_tid = target_pid;
- threads = thread_map__new(target_pid, target_tid);
- if (threads == NULL) {
+ evsel_list->threads = thread_map__new(target_pid, target_tid);
+ if (evsel_list->threads == NULL) {
pr_err("Problems finding threads of monitor\n");
usage_with_options(stat_usage, options);
}
if (system_wide)
- cpus = cpu_map__new(cpu_list);
+ evsel_list->cpus = cpu_map__new(cpu_list);
else
- cpus = cpu_map__dummy_new();
+ evsel_list->cpus = cpu_map__dummy_new();
- if (cpus == NULL) {
+ if (evsel_list->cpus == NULL) {
perror("failed to parse CPUs map");
usage_with_options(stat_usage, options);
return -1;
}
- list_for_each_entry(pos, &evsel_list, node) {
+ list_for_each_entry(pos, &evsel_list->entries, node) {
if (perf_evsel__alloc_stat_priv(pos) < 0 ||
- perf_evsel__alloc_counts(pos, cpus->nr) < 0 ||
- perf_evsel__alloc_fd(pos, cpus->nr, threads->nr) < 0)
+ perf_evsel__alloc_counts(pos, evsel_list->cpus->nr) < 0 ||
+ perf_evsel__alloc_fd(pos, evsel_list->cpus->nr, evsel_list->threads->nr) < 0)
goto out_free_fd;
}
@@ -741,11 +770,10 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
if (status != -1)
print_stat(argc, argv);
out_free_fd:
- list_for_each_entry(pos, &evsel_list, node)
+ list_for_each_entry(pos, &evsel_list->entries, node)
perf_evsel__free_stat_priv(pos);
- perf_evsel_list__delete();
+ perf_evlist__delete_maps(evsel_list);
out:
- thread_map__delete(threads);
- threads = NULL;
+ perf_evlist__delete(evsel_list);
return status;
}
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index 5dcdba653d70..1b2106c58f66 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -7,10 +7,11 @@
#include "util/cache.h"
#include "util/debug.h"
+#include "util/evlist.h"
#include "util/parse-options.h"
-#include "util/session.h"
+#include "util/parse-events.h"
#include "util/symbol.h"
-#include "util/thread.h"
+#include "util/thread_map.h"
static long page_size;
@@ -238,14 +239,14 @@ out:
#include "util/evsel.h"
#include <sys/types.h>
-static int trace_event__id(const char *event_name)
+static int trace_event__id(const char *evname)
{
char *filename;
int err = -1, fd;
if (asprintf(&filename,
"/sys/kernel/debug/tracing/events/syscalls/%s/id",
- event_name) < 0)
+ evname) < 0)
return -1;
fd = open(filename, O_RDONLY);
@@ -289,7 +290,7 @@ static int test__open_syscall_event(void)
goto out_thread_map_delete;
}
- if (perf_evsel__open_per_thread(evsel, threads) < 0) {
+ if (perf_evsel__open_per_thread(evsel, threads, false, false) < 0) {
pr_debug("failed to open counter: %s, "
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
strerror(errno));
@@ -347,9 +348,9 @@ static int test__open_syscall_event_on_all_cpus(void)
}
cpus = cpu_map__new(NULL);
- if (threads == NULL) {
- pr_debug("thread_map__new\n");
- return -1;
+ if (cpus == NULL) {
+ pr_debug("cpu_map__new\n");
+ goto out_thread_map_delete;
}
@@ -364,7 +365,7 @@ static int test__open_syscall_event_on_all_cpus(void)
goto out_thread_map_delete;
}
- if (perf_evsel__open(evsel, cpus, threads) < 0) {
+ if (perf_evsel__open(evsel, cpus, threads, false, false) < 0) {
pr_debug("failed to open counter: %s, "
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
strerror(errno));
@@ -408,6 +409,8 @@ static int test__open_syscall_event_on_all_cpus(void)
goto out_close_fd;
}
+ err = 0;
+
for (cpu = 0; cpu < cpus->nr; ++cpu) {
unsigned int expected;
@@ -416,18 +419,18 @@ static int test__open_syscall_event_on_all_cpus(void)
if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
pr_debug("perf_evsel__open_read_on_cpu\n");
- goto out_close_fd;
+ err = -1;
+ break;
}
expected = nr_open_calls + cpu;
if (evsel->counts->cpu[cpu].val != expected) {
pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
expected, cpus->map[cpu], evsel->counts->cpu[cpu].val);
- goto out_close_fd;
+ err = -1;
}
}
- err = 0;
out_close_fd:
perf_evsel__close_fd(evsel, 1, threads->nr);
out_evsel_delete:
@@ -437,6 +440,159 @@ out_thread_map_delete:
return err;
}
+/*
+ * This test will generate random numbers of calls to some getpid syscalls,
+ * then establish an mmap for a group of events that are created to monitor
+ * the syscalls.
+ *
+ * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated
+ * sample.id field to map back to its respective perf_evsel instance.
+ *
+ * Then it checks if the number of syscalls reported as perf events by
+ * the kernel corresponds to the number of syscalls made.
+ */
+static int test__basic_mmap(void)
+{
+ int err = -1;
+ union perf_event *event;
+ struct thread_map *threads;
+ struct cpu_map *cpus;
+ struct perf_evlist *evlist;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_TRACEPOINT,
+ .read_format = PERF_FORMAT_ID,
+ .sample_type = PERF_SAMPLE_ID,
+ .watermark = 0,
+ };
+ cpu_set_t cpu_set;
+ const char *syscall_names[] = { "getsid", "getppid", "getpgrp",
+ "getpgid", };
+ pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp,
+ (void*)getpgid };
+#define nsyscalls ARRAY_SIZE(syscall_names)
+ int ids[nsyscalls];
+ unsigned int nr_events[nsyscalls],
+ expected_nr_events[nsyscalls], i, j;
+ struct perf_evsel *evsels[nsyscalls], *evsel;
+
+ for (i = 0; i < nsyscalls; ++i) {
+ char name[64];
+
+ snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
+ ids[i] = trace_event__id(name);
+ if (ids[i] < 0) {
+ pr_debug("Is debugfs mounted on /sys/kernel/debug?\n");
+ return -1;
+ }
+ nr_events[i] = 0;
+ expected_nr_events[i] = random() % 257;
+ }
+
+ threads = thread_map__new(-1, getpid());
+ if (threads == NULL) {
+ pr_debug("thread_map__new\n");
+ return -1;
+ }
+
+ cpus = cpu_map__new(NULL);
+ if (cpus == NULL) {
+ pr_debug("cpu_map__new\n");
+ goto out_free_threads;
+ }
+
+ CPU_ZERO(&cpu_set);
+ CPU_SET(cpus->map[0], &cpu_set);
+ sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
+ if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
+ pr_debug("sched_setaffinity() failed on CPU %d: %s ",
+ cpus->map[0], strerror(errno));
+ goto out_free_cpus;
+ }
+
+ evlist = perf_evlist__new(cpus, threads);
+ if (evlist == NULL) {
+ pr_debug("perf_evlist__new\n");
+ goto out_free_cpus;
+ }
+
+ /* anonymous union fields, can't be initialized above */
+ attr.wakeup_events = 1;
+ attr.sample_period = 1;
+
+ for (i = 0; i < nsyscalls; ++i) {
+ attr.config = ids[i];
+ evsels[i] = perf_evsel__new(&attr, i);
+ if (evsels[i] == NULL) {
+ pr_debug("perf_evsel__new\n");
+ goto out_free_evlist;
+ }
+
+ perf_evlist__add(evlist, evsels[i]);
+
+ if (perf_evsel__open(evsels[i], cpus, threads, false, false) < 0) {
+ pr_debug("failed to open counter: %s, "
+ "tweak /proc/sys/kernel/perf_event_paranoid?\n",
+ strerror(errno));
+ goto out_close_fd;
+ }
+ }
+
+ if (perf_evlist__mmap(evlist, 128, true) < 0) {
+ pr_debug("failed to mmap events: %d (%s)\n", errno,
+ strerror(errno));
+ goto out_close_fd;
+ }
+
+ for (i = 0; i < nsyscalls; ++i)
+ for (j = 0; j < expected_nr_events[i]; ++j) {
+ int foo = syscalls[i]();
+ ++foo;
+ }
+
+ while ((event = perf_evlist__read_on_cpu(evlist, 0)) != NULL) {
+ struct perf_sample sample;
+
+ if (event->header.type != PERF_RECORD_SAMPLE) {
+ pr_debug("unexpected %s event\n",
+ perf_event__name(event->header.type));
+ goto out_munmap;
+ }
+
+ perf_event__parse_sample(event, attr.sample_type, false, &sample);
+ evsel = perf_evlist__id2evsel(evlist, sample.id);
+ if (evsel == NULL) {
+ pr_debug("event with id %" PRIu64
+ " doesn't map to an evsel\n", sample.id);
+ goto out_munmap;
+ }
+ nr_events[evsel->idx]++;
+ }
+
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
+ pr_debug("expected %d %s events, got %d\n",
+ expected_nr_events[evsel->idx],
+ event_name(evsel), nr_events[evsel->idx]);
+ goto out_munmap;
+ }
+ }
+
+ err = 0;
+out_munmap:
+ perf_evlist__munmap(evlist);
+out_close_fd:
+ for (i = 0; i < nsyscalls; ++i)
+ perf_evsel__close_fd(evsels[i], 1, threads->nr);
+out_free_evlist:
+ perf_evlist__delete(evlist);
+out_free_cpus:
+ cpu_map__delete(cpus);
+out_free_threads:
+ thread_map__delete(threads);
+ return err;
+#undef nsyscalls
+}
+
static struct test {
const char *desc;
int (*func)(void);
@@ -454,6 +610,10 @@ static struct test {
.func = test__open_syscall_event_on_all_cpus,
},
{
+ .desc = "read samples using the mmap interface",
+ .func = test__basic_mmap,
+ },
+ {
.func = NULL,
},
};
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 746cf03cb05d..67c0459dc325 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -264,9 +264,6 @@ pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end)
c->start_time = start;
if (p->start_time == 0 || p->start_time > start)
p->start_time = start;
-
- if (cpu > numcpus)
- numcpus = cpu;
}
#define MAX_CPUS 4096
@@ -276,21 +273,24 @@ static int cpus_cstate_state[MAX_CPUS];
static u64 cpus_pstate_start_times[MAX_CPUS];
static u64 cpus_pstate_state[MAX_CPUS];
-static int process_comm_event(event_t *event, struct sample_data *sample __used,
+static int process_comm_event(union perf_event *event,
+ struct perf_sample *sample __used,
struct perf_session *session __used)
{
pid_set_comm(event->comm.tid, event->comm.comm);
return 0;
}
-static int process_fork_event(event_t *event, struct sample_data *sample __used,
+static int process_fork_event(union perf_event *event,
+ struct perf_sample *sample __used,
struct perf_session *session __used)
{
pid_fork(event->fork.pid, event->fork.ppid, event->fork.time);
return 0;
}
-static int process_exit_event(event_t *event, struct sample_data *sample __used,
+static int process_exit_event(union perf_event *event,
+ struct perf_sample *sample __used,
struct perf_session *session __used)
{
pid_exit(event->fork.pid, event->fork.time);
@@ -486,8 +486,8 @@ static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te)
}
-static int process_sample_event(event_t *event __used,
- struct sample_data *sample,
+static int process_sample_event(union perf_event *event __used,
+ struct perf_sample *sample,
struct perf_session *session)
{
struct trace_entry *te;
@@ -511,6 +511,9 @@ static int process_sample_event(event_t *event __used,
if (!event_str)
return 0;
+ if (sample->cpu > numcpus)
+ numcpus = sample->cpu;
+
if (strcmp(event_str, "power:cpu_idle") == 0) {
struct power_processor_entry *ppe = (void *)te;
if (ppe->state == (u32)PWR_EVENT_EXIT)
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 5a29d9cd9486..f88a2630e1fc 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -20,11 +20,16 @@
#include "perf.h"
+#include "util/annotate.h"
+#include "util/cache.h"
#include "util/color.h"
+#include "util/evlist.h"
#include "util/evsel.h"
#include "util/session.h"
#include "util/symbol.h"
#include "util/thread.h"
+#include "util/thread_map.h"
+#include "util/top.h"
#include "util/util.h"
#include <linux/rbtree.h>
#include "util/parse-options.h"
@@ -45,7 +50,6 @@
#include <errno.h>
#include <time.h>
#include <sched.h>
-#include <pthread.h>
#include <sys/syscall.h>
#include <sys/ioctl.h>
@@ -60,85 +64,41 @@
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
+static struct perf_top top = {
+ .count_filter = 5,
+ .delay_secs = 2,
+ .display_weighted = -1,
+ .target_pid = -1,
+ .target_tid = -1,
+ .active_symbols = LIST_HEAD_INIT(top.active_symbols),
+ .active_symbols_lock = PTHREAD_MUTEX_INITIALIZER,
+ .freq = 1000, /* 1 KHz */
+};
+
static bool system_wide = false;
-static int default_interval = 0;
+static bool use_tui, use_stdio;
-static int count_filter = 5;
-static int print_entries;
+static int default_interval = 0;
-static int target_pid = -1;
-static int target_tid = -1;
-static struct thread_map *threads;
static bool inherit = false;
-static struct cpu_map *cpus;
static int realtime_prio = 0;
static bool group = false;
static unsigned int page_size;
-static unsigned int mmap_pages = 16;
-static int freq = 1000; /* 1 KHz */
+static unsigned int mmap_pages = 128;
-static int delay_secs = 2;
-static bool zero = false;
static bool dump_symtab = false;
-static bool hide_kernel_symbols = false;
-static bool hide_user_symbols = false;
static struct winsize winsize;
-/*
- * Source
- */
-
-struct source_line {
- u64 eip;
- unsigned long count[MAX_COUNTERS];
- char *line;
- struct source_line *next;
-};
-
static const char *sym_filter = NULL;
-struct sym_entry *sym_filter_entry = NULL;
struct sym_entry *sym_filter_entry_sched = NULL;
static int sym_pcnt_filter = 5;
-static int sym_counter = 0;
-static struct perf_evsel *sym_evsel = NULL;
-static int display_weighted = -1;
-static const char *cpu_list;
-
-/*
- * Symbols
- */
-
-struct sym_entry_source {
- struct source_line *source;
- struct source_line *lines;
- struct source_line **lines_tail;
- pthread_mutex_t lock;
-};
-
-struct sym_entry {
- struct rb_node rb_node;
- struct list_head node;
- unsigned long snap_count;
- double weight;
- int skip;
- u16 name_len;
- u8 origin;
- struct map *map;
- struct sym_entry_source *src;
- unsigned long count[0];
-};
/*
* Source functions
*/
-static inline struct symbol *sym_entry__symbol(struct sym_entry *self)
-{
- return ((void *)self) + symbol_conf.priv_size;
-}
-
void get_term_dimensions(struct winsize *ws)
{
char *s = getenv("LINES");
@@ -163,10 +123,10 @@ void get_term_dimensions(struct winsize *ws)
static void update_print_entries(struct winsize *ws)
{
- print_entries = ws->ws_row;
+ top.print_entries = ws->ws_row;
- if (print_entries > 9)
- print_entries -= 9;
+ if (top.print_entries > 9)
+ top.print_entries -= 9;
}
static void sig_winch_handler(int sig __used)
@@ -178,12 +138,9 @@ static void sig_winch_handler(int sig __used)
static int parse_source(struct sym_entry *syme)
{
struct symbol *sym;
- struct sym_entry_source *source;
+ struct annotation *notes;
struct map *map;
- FILE *file;
- char command[PATH_MAX*2];
- const char *path;
- u64 len;
+ int err = -1;
if (!syme)
return -1;
@@ -194,411 +151,137 @@ static int parse_source(struct sym_entry *syme)
/*
* We can't annotate with just /proc/kallsyms
*/
- if (map->dso->origin == DSO__ORIG_KERNEL)
+ if (map->dso->origin == DSO__ORIG_KERNEL) {
+ pr_err("Can't annotate %s: No vmlinux file was found in the "
+ "path\n", sym->name);
+ sleep(1);
return -1;
-
- if (syme->src == NULL) {
- syme->src = zalloc(sizeof(*source));
- if (syme->src == NULL)
- return -1;
- pthread_mutex_init(&syme->src->lock, NULL);
}
- source = syme->src;
-
- if (source->lines) {
- pthread_mutex_lock(&source->lock);
+ notes = symbol__annotation(sym);
+ if (notes->src != NULL) {
+ pthread_mutex_lock(&notes->lock);
goto out_assign;
}
- path = map->dso->long_name;
-
- len = sym->end - sym->start;
-
- sprintf(command,
- "objdump --start-address=%#0*" PRIx64 " --stop-address=%#0*" PRIx64 " -dS %s",
- BITS_PER_LONG / 4, map__rip_2objdump(map, sym->start),
- BITS_PER_LONG / 4, map__rip_2objdump(map, sym->end), path);
-
- file = popen(command, "r");
- if (!file)
- return -1;
- pthread_mutex_lock(&source->lock);
- source->lines_tail = &source->lines;
- while (!feof(file)) {
- struct source_line *src;
- size_t dummy = 0;
- char *c, *sep;
+ pthread_mutex_lock(&notes->lock);
- src = malloc(sizeof(struct source_line));
- assert(src != NULL);
- memset(src, 0, sizeof(struct source_line));
-
- if (getline(&src->line, &dummy, file) < 0)
- break;
- if (!src->line)
- break;
-
- c = strchr(src->line, '\n');
- if (c)
- *c = 0;
-
- src->next = NULL;
- *source->lines_tail = src;
- source->lines_tail = &src->next;
-
- src->eip = strtoull(src->line, &sep, 16);
- if (*sep == ':')
- src->eip = map__objdump_2ip(map, src->eip);
- else /* this line has no ip info (e.g. source line) */
- src->eip = 0;
+ if (symbol__alloc_hist(sym, top.evlist->nr_entries) < 0) {
+ pthread_mutex_unlock(&notes->lock);
+ pr_err("Not enough memory for annotating '%s' symbol!\n",
+ sym->name);
+ sleep(1);
+ return err;
}
- pclose(file);
+
+ err = symbol__annotate(sym, syme->map, 0);
+ if (err == 0) {
out_assign:
- sym_filter_entry = syme;
- pthread_mutex_unlock(&source->lock);
- return 0;
+ top.sym_filter_entry = syme;
+ }
+
+ pthread_mutex_unlock(&notes->lock);
+ return err;
}
static void __zero_source_counters(struct sym_entry *syme)
{
- int i;
- struct source_line *line;
-
- line = syme->src->lines;
- while (line) {
- for (i = 0; i < nr_counters; i++)
- line->count[i] = 0;
- line = line->next;
- }
+ struct symbol *sym = sym_entry__symbol(syme);
+ symbol__annotate_zero_histograms(sym);
}
static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip)
{
- struct source_line *line;
-
- if (syme != sym_filter_entry)
- return;
+ struct annotation *notes;
+ struct symbol *sym;
- if (pthread_mutex_trylock(&syme->src->lock))
+ if (syme != top.sym_filter_entry)
return;
- if (syme->src == NULL || syme->src->source == NULL)
- goto out_unlock;
-
- for (line = syme->src->lines; line; line = line->next) {
- /* skip lines without IP info */
- if (line->eip == 0)
- continue;
- if (line->eip == ip) {
- line->count[counter]++;
- break;
- }
- if (line->eip > ip)
- break;
- }
-out_unlock:
- pthread_mutex_unlock(&syme->src->lock);
-}
-
-#define PATTERN_LEN (BITS_PER_LONG / 4 + 2)
-
-static void lookup_sym_source(struct sym_entry *syme)
-{
- struct symbol *symbol = sym_entry__symbol(syme);
- struct source_line *line;
- char pattern[PATTERN_LEN + 1];
-
- sprintf(pattern, "%0*" PRIx64 " <", BITS_PER_LONG / 4,
- map__rip_2objdump(syme->map, symbol->start));
-
- pthread_mutex_lock(&syme->src->lock);
- for (line = syme->src->lines; line; line = line->next) {
- if (memcmp(line->line, pattern, PATTERN_LEN) == 0) {
- syme->src->source = line;
- break;
- }
- }
- pthread_mutex_unlock(&syme->src->lock);
-}
+ sym = sym_entry__symbol(syme);
+ notes = symbol__annotation(sym);
-static void show_lines(struct source_line *queue, int count, int total)
-{
- int i;
- struct source_line *line;
+ if (pthread_mutex_trylock(&notes->lock))
+ return;
- line = queue;
- for (i = 0; i < count; i++) {
- float pcnt = 100.0*(float)line->count[sym_counter]/(float)total;
+ ip = syme->map->map_ip(syme->map, ip);
+ symbol__inc_addr_samples(sym, syme->map, counter, ip);
- printf("%8li %4.1f%%\t%s\n", line->count[sym_counter], pcnt, line->line);
- line = line->next;
- }
+ pthread_mutex_unlock(&notes->lock);
}
-#define TRACE_COUNT 3
-
static void show_details(struct sym_entry *syme)
{
+ struct annotation *notes;
struct symbol *symbol;
- struct source_line *line;
- struct source_line *line_queue = NULL;
- int displayed = 0;
- int line_queue_count = 0, total = 0, more = 0;
+ int more;
if (!syme)
return;
- if (!syme->src->source)
- lookup_sym_source(syme);
-
- if (!syme->src->source)
- return;
-
symbol = sym_entry__symbol(syme);
- printf("Showing %s for %s\n", event_name(sym_evsel), symbol->name);
- printf(" Events Pcnt (>=%d%%)\n", sym_pcnt_filter);
+ notes = symbol__annotation(symbol);
- pthread_mutex_lock(&syme->src->lock);
- line = syme->src->source;
- while (line) {
- total += line->count[sym_counter];
- line = line->next;
- }
-
- line = syme->src->source;
- while (line) {
- float pcnt = 0.0;
-
- if (!line_queue_count)
- line_queue = line;
- line_queue_count++;
-
- if (line->count[sym_counter])
- pcnt = 100.0 * line->count[sym_counter] / (float)total;
- if (pcnt >= (float)sym_pcnt_filter) {
- if (displayed <= print_entries)
- show_lines(line_queue, line_queue_count, total);
- else more++;
- displayed += line_queue_count;
- line_queue_count = 0;
- line_queue = NULL;
- } else if (line_queue_count > TRACE_COUNT) {
- line_queue = line_queue->next;
- line_queue_count--;
- }
-
- line->count[sym_counter] = zero ? 0 : line->count[sym_counter] * 7 / 8;
- line = line->next;
- }
- pthread_mutex_unlock(&syme->src->lock);
- if (more)
- printf("%d lines not displayed, maybe increase display entries [e]\n", more);
-}
+ pthread_mutex_lock(&notes->lock);
-/*
- * Symbols will be added here in event__process_sample and will get out
- * after decayed.
- */
-static LIST_HEAD(active_symbols);
-static pthread_mutex_t active_symbols_lock = PTHREAD_MUTEX_INITIALIZER;
-
-/*
- * Ordering weight: count-1 * count-2 * ... / count-n
- */
-static double sym_weight(const struct sym_entry *sym)
-{
- double weight = sym->snap_count;
- int counter;
-
- if (!display_weighted)
- return weight;
-
- for (counter = 1; counter < nr_counters-1; counter++)
- weight *= sym->count[counter];
+ if (notes->src == NULL)
+ goto out_unlock;
- weight /= (sym->count[counter] + 1);
+ printf("Showing %s for %s\n", event_name(top.sym_evsel), symbol->name);
+ printf(" Events Pcnt (>=%d%%)\n", sym_pcnt_filter);
- return weight;
+ more = symbol__annotate_printf(symbol, syme->map, top.sym_evsel->idx,
+ 0, sym_pcnt_filter, top.print_entries, 4);
+ if (top.zero)
+ symbol__annotate_zero_histogram(symbol, top.sym_evsel->idx);
+ else
+ symbol__annotate_decay_histogram(symbol, top.sym_evsel->idx);
+ if (more != 0)
+ printf("%d lines not displayed, maybe increase display entries [e]\n", more);
+out_unlock:
+ pthread_mutex_unlock(&notes->lock);
}
-static long samples;
-static long kernel_samples, us_samples;
-static long exact_samples;
-static long guest_us_samples, guest_kernel_samples;
static const char CONSOLE_CLEAR[] = "";
static void __list_insert_active_sym(struct sym_entry *syme)
{
- list_add(&syme->node, &active_symbols);
+ list_add(&syme->node, &top.active_symbols);
}
-static void list_remove_active_sym(struct sym_entry *syme)
+static void print_sym_table(struct perf_session *session)
{
- pthread_mutex_lock(&active_symbols_lock);
- list_del_init(&syme->node);
- pthread_mutex_unlock(&active_symbols_lock);
-}
-
-static void rb_insert_active_sym(struct rb_root *tree, struct sym_entry *se)
-{
- struct rb_node **p = &tree->rb_node;
- struct rb_node *parent = NULL;
- struct sym_entry *iter;
-
- while (*p != NULL) {
- parent = *p;
- iter = rb_entry(parent, struct sym_entry, rb_node);
-
- if (se->weight > iter->weight)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
-
- rb_link_node(&se->rb_node, parent, p);
- rb_insert_color(&se->rb_node, tree);
-}
-
-static void print_sym_table(void)
-{
- int printed = 0, j;
- struct perf_evsel *counter;
- int snap = !display_weighted ? sym_counter : 0;
- float samples_per_sec = samples/delay_secs;
- float ksamples_per_sec = kernel_samples/delay_secs;
- float us_samples_per_sec = (us_samples)/delay_secs;
- float guest_kernel_samples_per_sec = (guest_kernel_samples)/delay_secs;
- float guest_us_samples_per_sec = (guest_us_samples)/delay_secs;
- float esamples_percent = (100.0*exact_samples)/samples;
- float sum_ksamples = 0.0;
- struct sym_entry *syme, *n;
- struct rb_root tmp = RB_ROOT;
+ char bf[160];
+ int printed = 0;
struct rb_node *nd;
- int sym_width = 0, dso_width = 0, dso_short_width = 0;
+ struct sym_entry *syme;
+ struct rb_root tmp = RB_ROOT;
const int win_width = winsize.ws_col - 1;
-
- samples = us_samples = kernel_samples = exact_samples = 0;
- guest_kernel_samples = guest_us_samples = 0;
-
- /* Sort the active symbols */
- pthread_mutex_lock(&active_symbols_lock);
- syme = list_entry(active_symbols.next, struct sym_entry, node);
- pthread_mutex_unlock(&active_symbols_lock);
-
- list_for_each_entry_safe_from(syme, n, &active_symbols, node) {
- syme->snap_count = syme->count[snap];
- if (syme->snap_count != 0) {
-
- if ((hide_user_symbols &&
- syme->origin == PERF_RECORD_MISC_USER) ||
- (hide_kernel_symbols &&
- syme->origin == PERF_RECORD_MISC_KERNEL)) {
- list_remove_active_sym(syme);
- continue;
- }
- syme->weight = sym_weight(syme);
- rb_insert_active_sym(&tmp, syme);
- sum_ksamples += syme->snap_count;
-
- for (j = 0; j < nr_counters; j++)
- syme->count[j] = zero ? 0 : syme->count[j] * 7 / 8;
- } else
- list_remove_active_sym(syme);
- }
+ int sym_width, dso_width, dso_short_width;
+ float sum_ksamples = perf_top__decay_samples(&top, &tmp);
puts(CONSOLE_CLEAR);
- printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
- if (!perf_guest) {
- printf(" PerfTop:%8.0f irqs/sec kernel:%4.1f%%"
- " exact: %4.1f%% [",
- samples_per_sec,
- 100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) /
- samples_per_sec)),
- esamples_percent);
- } else {
- printf(" PerfTop:%8.0f irqs/sec kernel:%4.1f%% us:%4.1f%%"
- " guest kernel:%4.1f%% guest us:%4.1f%%"
- " exact: %4.1f%% [",
- samples_per_sec,
- 100.0 - (100.0 * ((samples_per_sec-ksamples_per_sec) /
- samples_per_sec)),
- 100.0 - (100.0 * ((samples_per_sec-us_samples_per_sec) /
- samples_per_sec)),
- 100.0 - (100.0 * ((samples_per_sec -
- guest_kernel_samples_per_sec) /
- samples_per_sec)),
- 100.0 - (100.0 * ((samples_per_sec -
- guest_us_samples_per_sec) /
- samples_per_sec)),
- esamples_percent);
- }
-
- if (nr_counters == 1 || !display_weighted) {
- struct perf_evsel *first;
- first = list_entry(evsel_list.next, struct perf_evsel, node);
- printf("%" PRIu64, (uint64_t)first->attr.sample_period);
- if (freq)
- printf("Hz ");
- else
- printf(" ");
- }
+ perf_top__header_snprintf(&top, bf, sizeof(bf));
+ printf("%s\n", bf);
- if (!display_weighted)
- printf("%s", event_name(sym_evsel));
- else list_for_each_entry(counter, &evsel_list, node) {
- if (counter->idx)
- printf("/");
-
- printf("%s", event_name(counter));
- }
-
- printf( "], ");
-
- if (target_pid != -1)
- printf(" (target_pid: %d", target_pid);
- else if (target_tid != -1)
- printf(" (target_tid: %d", target_tid);
- else
- printf(" (all");
-
- if (cpu_list)
- printf(", CPU%s: %s)\n", cpus->nr > 1 ? "s" : "", cpu_list);
- else {
- if (target_tid != -1)
- printf(")\n");
- else
- printf(", %d CPU%s)\n", cpus->nr, cpus->nr > 1 ? "s" : "");
- }
+ perf_top__reset_sample_counters(&top);
printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
- if (sym_filter_entry) {
- show_details(sym_filter_entry);
- return;
+ if (session->hists.stats.total_lost != 0) {
+ color_fprintf(stdout, PERF_COLOR_RED, "WARNING:");
+ printf(" LOST %" PRIu64 " events, Check IO/CPU overload\n",
+ session->hists.stats.total_lost);
}
- /*
- * Find the longest symbol name that will be displayed
- */
- for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) {
- syme = rb_entry(nd, struct sym_entry, rb_node);
- if (++printed > print_entries ||
- (int)syme->snap_count < count_filter)
- continue;
-
- if (syme->map->dso->long_name_len > dso_width)
- dso_width = syme->map->dso->long_name_len;
-
- if (syme->map->dso->short_name_len > dso_short_width)
- dso_short_width = syme->map->dso->short_name_len;
-
- if (syme->name_len > sym_width)
- sym_width = syme->name_len;
+ if (top.sym_filter_entry) {
+ show_details(top.sym_filter_entry);
+ return;
}
- printed = 0;
+ perf_top__find_widths(&top, &tmp, &dso_width, &dso_short_width,
+ &sym_width);
if (sym_width + dso_width > winsize.ws_col - 29) {
dso_width = dso_short_width;
@@ -606,7 +289,7 @@ static void print_sym_table(void)
sym_width = winsize.ws_col - dso_width - 29;
}
putchar('\n');
- if (nr_counters == 1)
+ if (top.evlist->nr_entries == 1)
printf(" samples pcnt");
else
printf(" weight samples pcnt");
@@ -615,7 +298,7 @@ static void print_sym_table(void)
printf(" RIP ");
printf(" %-*.*s DSO\n", sym_width, sym_width, "function");
printf(" %s _______ _____",
- nr_counters == 1 ? " " : "______");
+ top.evlist->nr_entries == 1 ? " " : "______");
if (verbose)
printf(" ________________");
printf(" %-*.*s", sym_width, sym_width, graph_line);
@@ -628,13 +311,14 @@ static void print_sym_table(void)
syme = rb_entry(nd, struct sym_entry, rb_node);
sym = sym_entry__symbol(syme);
- if (++printed > print_entries || (int)syme->snap_count < count_filter)
+ if (++printed > top.print_entries ||
+ (int)syme->snap_count < top.count_filter)
continue;
pcnt = 100.0 - (100.0 * ((sum_ksamples - syme->snap_count) /
sum_ksamples));
- if (nr_counters == 1 || !display_weighted)
+ if (top.evlist->nr_entries == 1 || !top.display_weighted)
printf("%20.2f ", syme->weight);
else
printf("%9.1f %10ld ", syme->weight, syme->snap_count);
@@ -693,10 +377,8 @@ static void prompt_symbol(struct sym_entry **target, const char *msg)
/* zero counters of active symbol */
if (syme) {
- pthread_mutex_lock(&syme->src->lock);
__zero_source_counters(syme);
*target = NULL;
- pthread_mutex_unlock(&syme->src->lock);
}
fprintf(stdout, "\n%s: ", msg);
@@ -707,11 +389,11 @@ static void prompt_symbol(struct sym_entry **target, const char *msg)
if (p)
*p = 0;
- pthread_mutex_lock(&active_symbols_lock);
- syme = list_entry(active_symbols.next, struct sym_entry, node);
- pthread_mutex_unlock(&active_symbols_lock);
+ pthread_mutex_lock(&top.active_symbols_lock);
+ syme = list_entry(top.active_symbols.next, struct sym_entry, node);
+ pthread_mutex_unlock(&top.active_symbols_lock);
- list_for_each_entry_safe_from(syme, n, &active_symbols, node) {
+ list_for_each_entry_safe_from(syme, n, &top.active_symbols, node) {
struct symbol *sym = sym_entry__symbol(syme);
if (!strcmp(buf, sym->name)) {
@@ -735,34 +417,34 @@ static void print_mapped_keys(void)
{
char *name = NULL;
- if (sym_filter_entry) {
- struct symbol *sym = sym_entry__symbol(sym_filter_entry);
+ if (top.sym_filter_entry) {
+ struct symbol *sym = sym_entry__symbol(top.sym_filter_entry);
name = sym->name;
}
fprintf(stdout, "\nMapped keys:\n");
- fprintf(stdout, "\t[d] display refresh delay. \t(%d)\n", delay_secs);
- fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", print_entries);
+ fprintf(stdout, "\t[d] display refresh delay. \t(%d)\n", top.delay_secs);
+ fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", top.print_entries);
- if (nr_counters > 1)
- fprintf(stdout, "\t[E] active event counter. \t(%s)\n", event_name(sym_evsel));
+ if (top.evlist->nr_entries > 1)
+ fprintf(stdout, "\t[E] active event counter. \t(%s)\n", event_name(top.sym_evsel));
- fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", count_filter);
+ fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top.count_filter);
fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter);
fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL");
fprintf(stdout, "\t[S] stop annotation.\n");
- if (nr_counters > 1)
- fprintf(stdout, "\t[w] toggle display weighted/count[E]r. \t(%d)\n", display_weighted ? 1 : 0);
+ if (top.evlist->nr_entries > 1)
+ fprintf(stdout, "\t[w] toggle display weighted/count[E]r. \t(%d)\n", top.display_weighted ? 1 : 0);
fprintf(stdout,
"\t[K] hide kernel_symbols symbols. \t(%s)\n",
- hide_kernel_symbols ? "yes" : "no");
+ top.hide_kernel_symbols ? "yes" : "no");
fprintf(stdout,
"\t[U] hide user symbols. \t(%s)\n",
- hide_user_symbols ? "yes" : "no");
- fprintf(stdout, "\t[z] toggle sample zeroing. \t(%d)\n", zero ? 1 : 0);
+ top.hide_user_symbols ? "yes" : "no");
+ fprintf(stdout, "\t[z] toggle sample zeroing. \t(%d)\n", top.zero ? 1 : 0);
fprintf(stdout, "\t[qQ] quit.\n");
}
@@ -783,7 +465,7 @@ static int key_mapped(int c)
return 1;
case 'E':
case 'w':
- return nr_counters > 1 ? 1 : 0;
+ return top.evlist->nr_entries > 1 ? 1 : 0;
default:
break;
}
@@ -818,47 +500,47 @@ static void handle_keypress(struct perf_session *session, int c)
switch (c) {
case 'd':
- prompt_integer(&delay_secs, "Enter display delay");
- if (delay_secs < 1)
- delay_secs = 1;
+ prompt_integer(&top.delay_secs, "Enter display delay");
+ if (top.delay_secs < 1)
+ top.delay_secs = 1;
break;
case 'e':
- prompt_integer(&print_entries, "Enter display entries (lines)");
- if (print_entries == 0) {
+ prompt_integer(&top.print_entries, "Enter display entries (lines)");
+ if (top.print_entries == 0) {
sig_winch_handler(SIGWINCH);
signal(SIGWINCH, sig_winch_handler);
} else
signal(SIGWINCH, SIG_DFL);
break;
case 'E':
- if (nr_counters > 1) {
+ if (top.evlist->nr_entries > 1) {
fprintf(stderr, "\nAvailable events:");
- list_for_each_entry(sym_evsel, &evsel_list, node)
- fprintf(stderr, "\n\t%d %s", sym_evsel->idx, event_name(sym_evsel));
+ list_for_each_entry(top.sym_evsel, &top.evlist->entries, node)
+ fprintf(stderr, "\n\t%d %s", top.sym_evsel->idx, event_name(top.sym_evsel));
- prompt_integer(&sym_counter, "Enter details event counter");
+ prompt_integer(&top.sym_counter, "Enter details event counter");
- if (sym_counter >= nr_counters) {
- sym_evsel = list_entry(evsel_list.next, struct perf_evsel, node);
- sym_counter = 0;
- fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(sym_evsel));
+ if (top.sym_counter >= top.evlist->nr_entries) {
+ top.sym_evsel = list_entry(top.evlist->entries.next, struct perf_evsel, node);
+ top.sym_counter = 0;
+ fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(top.sym_evsel));
sleep(1);
break;
}
- list_for_each_entry(sym_evsel, &evsel_list, node)
- if (sym_evsel->idx == sym_counter)
+ list_for_each_entry(top.sym_evsel, &top.evlist->entries, node)
+ if (top.sym_evsel->idx == top.sym_counter)
break;
- } else sym_counter = 0;
+ } else top.sym_counter = 0;
break;
case 'f':
- prompt_integer(&count_filter, "Enter display event count filter");
+ prompt_integer(&top.count_filter, "Enter display event count filter");
break;
case 'F':
prompt_percent(&sym_pcnt_filter, "Enter details display event filter (percent)");
break;
case 'K':
- hide_kernel_symbols = !hide_kernel_symbols;
+ top.hide_kernel_symbols = !top.hide_kernel_symbols;
break;
case 'q':
case 'Q':
@@ -867,34 +549,40 @@ static void handle_keypress(struct perf_session *session, int c)
perf_session__fprintf_dsos(session, stderr);
exit(0);
case 's':
- prompt_symbol(&sym_filter_entry, "Enter details symbol");
+ prompt_symbol(&top.sym_filter_entry, "Enter details symbol");
break;
case 'S':
- if (!sym_filter_entry)
+ if (!top.sym_filter_entry)
break;
else {
- struct sym_entry *syme = sym_filter_entry;
+ struct sym_entry *syme = top.sym_filter_entry;
- pthread_mutex_lock(&syme->src->lock);
- sym_filter_entry = NULL;
+ top.sym_filter_entry = NULL;
__zero_source_counters(syme);
- pthread_mutex_unlock(&syme->src->lock);
}
break;
case 'U':
- hide_user_symbols = !hide_user_symbols;
+ top.hide_user_symbols = !top.hide_user_symbols;
break;
case 'w':
- display_weighted = ~display_weighted;
+ top.display_weighted = ~top.display_weighted;
break;
case 'z':
- zero = !zero;
+ top.zero = !top.zero;
break;
default:
break;
}
}
+static void *display_thread_tui(void *arg __used)
+{
+ perf_top__tui_browser(&top);
+ exit_browser(0);
+ exit(0);
+ return NULL;
+}
+
static void *display_thread(void *arg __used)
{
struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
@@ -909,13 +597,13 @@ static void *display_thread(void *arg __used)
tc.c_cc[VTIME] = 0;
repeat:
- delay_msecs = delay_secs * 1000;
+ delay_msecs = top.delay_secs * 1000;
tcsetattr(0, TCSANOW, &tc);
/* trash return*/
getc(stdin);
do {
- print_sym_table();
+ print_sym_table(session);
} while (!poll(&stdin_poll, 1, delay_msecs) == 1);
c = getc(stdin);
@@ -930,6 +618,7 @@ repeat:
/* Tag samples to be skipped. */
static const char *skip_symbols[] = {
"default_idle",
+ "native_safe_halt",
"cpu_idle",
"enter_idle",
"exit_idle",
@@ -965,9 +654,9 @@ static int symbol_filter(struct map *map, struct symbol *sym)
syme = symbol__priv(sym);
syme->map = map;
- syme->src = NULL;
+ symbol__annotate_init(map, sym);
- if (!sym_filter_entry && sym_filter && !strcmp(name, sym_filter)) {
+ if (!top.sym_filter_entry && sym_filter && !strcmp(name, sym_filter)) {
/* schedule initial sym_filter_entry setup */
sym_filter_entry_sched = syme;
sym_filter = NULL;
@@ -980,44 +669,40 @@ static int symbol_filter(struct map *map, struct symbol *sym)
}
}
- if (!syme->skip)
- syme->name_len = strlen(sym->name);
-
return 0;
}
-static void event__process_sample(const event_t *self,
- struct sample_data *sample,
- struct perf_session *session,
- struct perf_evsel *evsel)
+static void perf_event__process_sample(const union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_session *session)
{
- u64 ip = self->ip.ip;
+ u64 ip = event->ip.ip;
struct sym_entry *syme;
struct addr_location al;
struct machine *machine;
- u8 origin = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+ u8 origin = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
- ++samples;
+ ++top.samples;
switch (origin) {
case PERF_RECORD_MISC_USER:
- ++us_samples;
- if (hide_user_symbols)
+ ++top.us_samples;
+ if (top.hide_user_symbols)
return;
machine = perf_session__find_host_machine(session);
break;
case PERF_RECORD_MISC_KERNEL:
- ++kernel_samples;
- if (hide_kernel_symbols)
+ ++top.kernel_samples;
+ if (top.hide_kernel_symbols)
return;
machine = perf_session__find_host_machine(session);
break;
case PERF_RECORD_MISC_GUEST_KERNEL:
- ++guest_kernel_samples;
- machine = perf_session__find_machine(session, self->ip.pid);
+ ++top.guest_kernel_samples;
+ machine = perf_session__find_machine(session, event->ip.pid);
break;
case PERF_RECORD_MISC_GUEST_USER:
- ++guest_us_samples;
+ ++top.guest_us_samples;
/*
* TODO: we don't process guest user from host side
* except simple counting.
@@ -1029,15 +714,15 @@ static void event__process_sample(const event_t *self,
if (!machine && perf_guest) {
pr_err("Can't find guest [%d]'s kernel information\n",
- self->ip.pid);
+ event->ip.pid);
return;
}
- if (self->header.misc & PERF_RECORD_MISC_EXACT_IP)
- exact_samples++;
+ if (event->header.misc & PERF_RECORD_MISC_EXACT_IP)
+ top.exact_samples++;
- if (event__preprocess_sample(self, session, &al, sample,
- symbol_filter) < 0 ||
+ if (perf_event__preprocess_sample(event, session, &al, sample,
+ symbol_filter) < 0 ||
al.filtered)
return;
@@ -1065,13 +750,13 @@ static void event__process_sample(const event_t *self,
/* let's see, whether we need to install initial sym_filter_entry */
if (sym_filter_entry_sched) {
- sym_filter_entry = sym_filter_entry_sched;
+ top.sym_filter_entry = sym_filter_entry_sched;
sym_filter_entry_sched = NULL;
- if (parse_source(sym_filter_entry) < 0) {
- struct symbol *sym = sym_entry__symbol(sym_filter_entry);
+ if (parse_source(top.sym_filter_entry) < 0) {
+ struct symbol *sym = sym_entry__symbol(top.sym_filter_entry);
pr_err("Can't annotate %s", sym->name);
- if (sym_filter_entry->map->dso->origin == DSO__ORIG_KERNEL) {
+ if (top.sym_filter_entry->map->dso->origin == DSO__ORIG_KERNEL) {
pr_err(": No vmlinux file was found in the path:\n");
machine__fprintf_vmlinux_path(machine, stderr);
} else
@@ -1082,166 +767,67 @@ static void event__process_sample(const event_t *self,
syme = symbol__priv(al.sym);
if (!syme->skip) {
- syme->count[evsel->idx]++;
+ struct perf_evsel *evsel;
+
syme->origin = origin;
+ evsel = perf_evlist__id2evsel(top.evlist, sample->id);
+ assert(evsel != NULL);
+ syme->count[evsel->idx]++;
record_precise_ip(syme, evsel->idx, ip);
- pthread_mutex_lock(&active_symbols_lock);
+ pthread_mutex_lock(&top.active_symbols_lock);
if (list_empty(&syme->node) || !syme->node.next)
__list_insert_active_sym(syme);
- pthread_mutex_unlock(&active_symbols_lock);
+ pthread_mutex_unlock(&top.active_symbols_lock);
}
}
-struct mmap_data {
- void *base;
- int mask;
- unsigned int prev;
-};
-
-static int perf_evsel__alloc_mmap_per_thread(struct perf_evsel *evsel,
- int ncpus, int nthreads)
-{
- evsel->priv = xyarray__new(ncpus, nthreads, sizeof(struct mmap_data));
- return evsel->priv != NULL ? 0 : -ENOMEM;
-}
-
-static void perf_evsel__free_mmap(struct perf_evsel *evsel)
-{
- xyarray__delete(evsel->priv);
- evsel->priv = NULL;
-}
-
-static unsigned int mmap_read_head(struct mmap_data *md)
-{
- struct perf_event_mmap_page *pc = md->base;
- int head;
-
- head = pc->data_head;
- rmb();
-
- return head;
-}
-
-static void perf_session__mmap_read_counter(struct perf_session *self,
- struct perf_evsel *evsel,
- int cpu, int thread_idx)
+static void perf_session__mmap_read_cpu(struct perf_session *self, int cpu)
{
- struct xyarray *mmap_array = evsel->priv;
- struct mmap_data *md = xyarray__entry(mmap_array, cpu, thread_idx);
- unsigned int head = mmap_read_head(md);
- unsigned int old = md->prev;
- unsigned char *data = md->base + page_size;
- struct sample_data sample;
- int diff;
-
- /*
- * If we're further behind than half the buffer, there's a chance
- * the writer will bite our tail and mess up the samples under us.
- *
- * If we somehow ended up ahead of the head, we got messed up.
- *
- * In either case, truncate and restart at head.
- */
- diff = head - old;
- if (diff > md->mask / 2 || diff < 0) {
- fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
-
- /*
- * head points to a known good entry, start there.
- */
- old = head;
- }
-
- for (; old != head;) {
- event_t *event = (event_t *)&data[old & md->mask];
-
- event_t event_copy;
-
- size_t size = event->header.size;
+ struct perf_sample sample;
+ union perf_event *event;
- /*
- * Event straddles the mmap boundary -- header should always
- * be inside due to u64 alignment of output.
- */
- if ((old & md->mask) + size != ((old + size) & md->mask)) {
- unsigned int offset = old;
- unsigned int len = min(sizeof(*event), size), cpy;
- void *dst = &event_copy;
-
- do {
- cpy = min(md->mask + 1 - (offset & md->mask), len);
- memcpy(dst, &data[offset & md->mask], cpy);
- offset += cpy;
- dst += cpy;
- len -= cpy;
- } while (len);
-
- event = &event_copy;
- }
+ while ((event = perf_evlist__read_on_cpu(top.evlist, cpu)) != NULL) {
+ perf_session__parse_sample(self, event, &sample);
- event__parse_sample(event, self, &sample);
if (event->header.type == PERF_RECORD_SAMPLE)
- event__process_sample(event, &sample, self, evsel);
+ perf_event__process_sample(event, &sample, self);
else
- event__process(event, &sample, self);
- old += size;
+ perf_event__process(event, &sample, self);
}
-
- md->prev = old;
}
-static struct pollfd *event_array;
-
static void perf_session__mmap_read(struct perf_session *self)
{
- struct perf_evsel *counter;
- int i, thread_index;
-
- for (i = 0; i < cpus->nr; i++) {
- list_for_each_entry(counter, &evsel_list, node) {
- for (thread_index = 0;
- thread_index < threads->nr;
- thread_index++) {
- perf_session__mmap_read_counter(self,
- counter, i, thread_index);
- }
- }
- }
-}
+ int i;
-int nr_poll;
-int group_fd;
+ for (i = 0; i < top.evlist->cpus->nr; i++)
+ perf_session__mmap_read_cpu(self, i);
+}
-static void start_counter(int i, struct perf_evsel *evsel)
+static void start_counters(struct perf_evlist *evlist)
{
- struct xyarray *mmap_array = evsel->priv;
- struct mmap_data *mm;
- struct perf_event_attr *attr;
- int cpu = -1;
- int thread_index;
-
- if (target_tid == -1)
- cpu = cpus->map[i];
+ struct perf_evsel *counter;
- attr = &evsel->attr;
+ list_for_each_entry(counter, &evlist->entries, node) {
+ struct perf_event_attr *attr = &counter->attr;
- attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
+ attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
- if (freq) {
- attr->sample_type |= PERF_SAMPLE_PERIOD;
- attr->freq = 1;
- attr->sample_freq = freq;
- }
+ if (top.freq) {
+ attr->sample_type |= PERF_SAMPLE_PERIOD;
+ attr->freq = 1;
+ attr->sample_freq = top.freq;
+ }
- attr->inherit = (cpu < 0) && inherit;
- attr->mmap = 1;
+ if (evlist->nr_entries > 1) {
+ attr->sample_type |= PERF_SAMPLE_ID;
+ attr->read_format |= PERF_FORMAT_ID;
+ }
- for (thread_index = 0; thread_index < threads->nr; thread_index++) {
+ attr->mmap = 1;
try_again:
- FD(evsel, i, thread_index) = sys_perf_event_open(attr,
- threads->map[thread_index], cpu, group_fd, 0);
-
- if (FD(evsel, i, thread_index) < 0) {
+ if (perf_evsel__open(counter, top.evlist->cpus,
+ top.evlist->threads, group, inherit) < 0) {
int err = errno;
if (err == EPERM || err == EACCES)
@@ -1253,8 +839,8 @@ try_again:
* based cpu-clock-tick sw counter, which
* is always available even if no PMU support:
*/
- if (attr->type == PERF_TYPE_HARDWARE
- && attr->config == PERF_COUNT_HW_CPU_CYCLES) {
+ if (attr->type == PERF_TYPE_HARDWARE &&
+ attr->config == PERF_COUNT_HW_CPU_CYCLES) {
if (verbose)
warning(" ... trying to fall back to cpu-clock-ticks\n");
@@ -1264,39 +850,23 @@ try_again:
goto try_again;
}
printf("\n");
- error("sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information.\n",
- FD(evsel, i, thread_index), strerror(err));
+ error("sys_perf_event_open() syscall returned with %d "
+ "(%s). /bin/dmesg may provide additional information.\n",
+ err, strerror(err));
die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
exit(-1);
}
- assert(FD(evsel, i, thread_index) >= 0);
- fcntl(FD(evsel, i, thread_index), F_SETFL, O_NONBLOCK);
-
- /*
- * First counter acts as the group leader:
- */
- if (group && group_fd == -1)
- group_fd = FD(evsel, i, thread_index);
-
- event_array[nr_poll].fd = FD(evsel, i, thread_index);
- event_array[nr_poll].events = POLLIN;
- nr_poll++;
-
- mm = xyarray__entry(mmap_array, i, thread_index);
- mm->prev = 0;
- mm->mask = mmap_pages*page_size - 1;
- mm->base = mmap(NULL, (mmap_pages+1)*page_size,
- PROT_READ, MAP_SHARED, FD(evsel, i, thread_index), 0);
- if (mm->base == MAP_FAILED)
- die("failed to mmap with %d (%s)\n", errno, strerror(errno));
}
+
+ if (perf_evlist__mmap(evlist, mmap_pages, false) < 0)
+ die("failed to mmap with %d (%s)\n", errno, strerror(errno));
}
static int __cmd_top(void)
{
pthread_t thread;
- struct perf_evsel *counter;
- int i, ret;
+ struct perf_evsel *first;
+ int ret __used;
/*
* FIXME: perf_session__new should allow passing a O_MMAP, so that all this
* mmap reading, etc is encapsulated in it. Use O_WRONLY for now.
@@ -1305,23 +875,23 @@ static int __cmd_top(void)
if (session == NULL)
return -ENOMEM;
- if (target_tid != -1)
- event__synthesize_thread_map(threads, event__process, session);
+ if (top.target_tid != -1)
+ perf_event__synthesize_thread_map(top.evlist->threads,
+ perf_event__process, session);
else
- event__synthesize_threads(event__process, session);
+ perf_event__synthesize_threads(perf_event__process, session);
- for (i = 0; i < cpus->nr; i++) {
- group_fd = -1;
- list_for_each_entry(counter, &evsel_list, node)
- start_counter(i, counter);
- }
+ start_counters(top.evlist);
+ first = list_entry(top.evlist->entries.next, struct perf_evsel, node);
+ perf_session__set_sample_type(session, first->attr.sample_type);
/* Wait for a minimal set of events before starting the snapshot */
- poll(&event_array[0], nr_poll, 100);
+ poll(top.evlist->pollfd, top.evlist->nr_fds, 100);
perf_session__mmap_read(session);
- if (pthread_create(&thread, NULL, display_thread, session)) {
+ if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
+ display_thread), session)) {
printf("Could not create display thread.\n");
exit(-1);
}
@@ -1337,12 +907,12 @@ static int __cmd_top(void)
}
while (1) {
- int hits = samples;
+ u64 hits = top.samples;
perf_session__mmap_read(session);
- if (hits == samples)
- ret = poll(event_array, nr_poll, 100);
+ if (hits == top.samples)
+ ret = poll(top.evlist->pollfd, top.evlist->nr_fds, 100);
}
return 0;
@@ -1354,31 +924,31 @@ static const char * const top_usage[] = {
};
static const struct option options[] = {
- OPT_CALLBACK('e', "event", NULL, "event",
+ OPT_CALLBACK('e', "event", &top.evlist, "event",
"event selector. use 'perf list' to list available events",
parse_events),
OPT_INTEGER('c', "count", &default_interval,
"event period to sample"),
- OPT_INTEGER('p', "pid", &target_pid,
+ OPT_INTEGER('p', "pid", &top.target_pid,
"profile events on existing process id"),
- OPT_INTEGER('t', "tid", &target_tid,
+ OPT_INTEGER('t', "tid", &top.target_tid,
"profile events on existing thread id"),
OPT_BOOLEAN('a', "all-cpus", &system_wide,
"system-wide collection from all CPUs"),
- OPT_STRING('C', "cpu", &cpu_list, "cpu",
+ OPT_STRING('C', "cpu", &top.cpu_list, "cpu",
"list of cpus to monitor"),
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
"file", "vmlinux pathname"),
- OPT_BOOLEAN('K', "hide_kernel_symbols", &hide_kernel_symbols,
+ OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols,
"hide kernel symbols"),
OPT_UINTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"),
OPT_INTEGER('r', "realtime", &realtime_prio,
"collect data with this RT SCHED_FIFO priority"),
- OPT_INTEGER('d', "delay", &delay_secs,
+ OPT_INTEGER('d', "delay", &top.delay_secs,
"number of seconds to delay between refreshes"),
OPT_BOOLEAN('D', "dump-symtab", &dump_symtab,
"dump the symbol table used for profiling"),
- OPT_INTEGER('f', "count-filter", &count_filter,
+ OPT_INTEGER('f', "count-filter", &top.count_filter,
"only display functions with more events than this"),
OPT_BOOLEAN('g', "group", &group,
"put the counters into a counter group"),
@@ -1386,14 +956,16 @@ static const struct option options[] = {
"child tasks inherit counters"),
OPT_STRING('s', "sym-annotate", &sym_filter, "symbol name",
"symbol to annotate"),
- OPT_BOOLEAN('z', "zero", &zero,
+ OPT_BOOLEAN('z', "zero", &top.zero,
"zero history across updates"),
- OPT_INTEGER('F', "freq", &freq,
+ OPT_INTEGER('F', "freq", &top.freq,
"profile at this frequency"),
- OPT_INTEGER('E', "entries", &print_entries,
+ OPT_INTEGER('E', "entries", &top.print_entries,
"display this many functions"),
- OPT_BOOLEAN('U', "hide_user_symbols", &hide_user_symbols,
+ OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols,
"hide user symbols"),
+ OPT_BOOLEAN(0, "tui", &use_tui, "Use the TUI interface"),
+ OPT_BOOLEAN(0, "stdio", &use_stdio, "Use the stdio interface"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_END()
@@ -1404,64 +976,68 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
struct perf_evsel *pos;
int status = -ENOMEM;
+ top.evlist = perf_evlist__new(NULL, NULL);
+ if (top.evlist == NULL)
+ return -ENOMEM;
+
page_size = sysconf(_SC_PAGE_SIZE);
argc = parse_options(argc, argv, options, top_usage, 0);
if (argc)
usage_with_options(top_usage, options);
- if (target_pid != -1)
- target_tid = target_pid;
+ /*
+ * XXX For now start disabled, only using TUI if explicitely asked for.
+ * Change that when handle_keys equivalent gets written, live annotation
+ * done, etc.
+ */
+ use_browser = 0;
- threads = thread_map__new(target_pid, target_tid);
- if (threads == NULL) {
- pr_err("Problems finding threads of monitor\n");
- usage_with_options(top_usage, options);
- }
+ if (use_stdio)
+ use_browser = 0;
+ else if (use_tui)
+ use_browser = 1;
- event_array = malloc((sizeof(struct pollfd) *
- MAX_NR_CPUS * MAX_COUNTERS * threads->nr));
- if (!event_array)
- return -ENOMEM;
+ setup_browser(false);
/* CPU and PID are mutually exclusive */
- if (target_tid > 0 && cpu_list) {
+ if (top.target_tid > 0 && top.cpu_list) {
printf("WARNING: PID switch overriding CPU\n");
sleep(1);
- cpu_list = NULL;
+ top.cpu_list = NULL;
}
- if (!nr_counters && perf_evsel_list__create_default() < 0) {
+ if (top.target_pid != -1)
+ top.target_tid = top.target_pid;
+
+ if (perf_evlist__create_maps(top.evlist, top.target_pid,
+ top.target_tid, top.cpu_list) < 0)
+ usage_with_options(top_usage, options);
+
+ if (!top.evlist->nr_entries &&
+ perf_evlist__add_default(top.evlist) < 0) {
pr_err("Not enough memory for event selector list\n");
return -ENOMEM;
}
- if (delay_secs < 1)
- delay_secs = 1;
+ if (top.delay_secs < 1)
+ top.delay_secs = 1;
/*
* User specified count overrides default frequency.
*/
if (default_interval)
- freq = 0;
- else if (freq) {
- default_interval = freq;
+ top.freq = 0;
+ else if (top.freq) {
+ default_interval = top.freq;
} else {
fprintf(stderr, "frequency and count are zero, aborting\n");
exit(EXIT_FAILURE);
}
- if (target_tid != -1)
- cpus = cpu_map__dummy_new();
- else
- cpus = cpu_map__new(cpu_list);
-
- if (cpus == NULL)
- usage_with_options(top_usage, options);
-
- list_for_each_entry(pos, &evsel_list, node) {
- if (perf_evsel__alloc_mmap_per_thread(pos, cpus->nr, threads->nr) < 0 ||
- perf_evsel__alloc_fd(pos, cpus->nr, threads->nr) < 0)
+ list_for_each_entry(pos, &top.evlist->entries, node) {
+ if (perf_evsel__alloc_fd(pos, top.evlist->cpus->nr,
+ top.evlist->threads->nr) < 0)
goto out_free_fd;
/*
* Fill in the ones not specifically initialized via -c:
@@ -1472,26 +1048,28 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
pos->attr.sample_period = default_interval;
}
- sym_evsel = list_entry(evsel_list.next, struct perf_evsel, node);
+ if (perf_evlist__alloc_pollfd(top.evlist) < 0 ||
+ perf_evlist__alloc_mmap(top.evlist) < 0)
+ goto out_free_fd;
+
+ top.sym_evsel = list_entry(top.evlist->entries.next, struct perf_evsel, node);
- symbol_conf.priv_size = (sizeof(struct sym_entry) +
- (nr_counters + 1) * sizeof(unsigned long));
+ symbol_conf.priv_size = (sizeof(struct sym_entry) + sizeof(struct annotation) +
+ (top.evlist->nr_entries + 1) * sizeof(unsigned long));
symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
if (symbol__init() < 0)
return -1;
get_term_dimensions(&winsize);
- if (print_entries == 0) {
+ if (top.print_entries == 0) {
update_print_entries(&winsize);
signal(SIGWINCH, sig_winch_handler);
}
status = __cmd_top();
out_free_fd:
- list_for_each_entry(pos, &evsel_list, node)
- perf_evsel__free_mmap(pos);
- perf_evsel_list__delete();
+ perf_evlist__delete(top.evlist);
return status;
}
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 95aaf565c704..a5fc660c1f12 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -94,6 +94,32 @@ void get_term_dimensions(struct winsize *ws);
#include "util/types.h"
#include <stdbool.h>
+struct perf_mmap {
+ void *base;
+ int mask;
+ unsigned int prev;
+};
+
+static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm)
+{
+ struct perf_event_mmap_page *pc = mm->base;
+ int head = pc->data_head;
+ rmb();
+ return head;
+}
+
+static inline void perf_mmap__write_tail(struct perf_mmap *md,
+ unsigned long tail)
+{
+ struct perf_event_mmap_page *pc = md->base;
+
+ /*
+ * ensure all reads are done before we write the tail out.
+ */
+ /* mb(); */
+ pc->data_tail = tail;
+}
+
/*
* prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all
* counters in the current task.
diff --git a/tools/perf/python/twatch.py b/tools/perf/python/twatch.py
new file mode 100755
index 000000000000..df638c438a9f
--- /dev/null
+++ b/tools/perf/python/twatch.py
@@ -0,0 +1,41 @@
+#! /usr/bin/python
+# -*- python -*-
+# -*- coding: utf-8 -*-
+# twatch - Experimental use of the perf python interface
+# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
+#
+# This application is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; version 2.
+#
+# This application is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+
+import perf
+
+def main():
+ cpus = perf.cpu_map()
+ threads = perf.thread_map()
+ evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
+ wakeup_events = 1, sample_period = 1,
+ sample_id_all = 1,
+ sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
+ evsel.open(cpus = cpus, threads = threads);
+ evlist = perf.evlist(cpus, threads)
+ evlist.add(evsel)
+ evlist.mmap()
+ while True:
+ evlist.poll(timeout = -1)
+ for cpu in cpus:
+ event = evlist.read_on_cpu(cpu)
+ if not event:
+ continue
+ print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
+ event.sample_pid,
+ event.sample_tid),
+ print event
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
new file mode 100644
index 000000000000..0d0830c98cd7
--- /dev/null
+++ b/tools/perf/util/annotate.c
@@ -0,0 +1,605 @@
+/*
+ * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Parts came from builtin-annotate.c, see those files for further
+ * copyright notes.
+ *
+ * Released under the GPL v2. (and only v2, not any later version)
+ */
+
+#include "util.h"
+#include "build-id.h"
+#include "color.h"
+#include "cache.h"
+#include "symbol.h"
+#include "debug.h"
+#include "annotate.h"
+#include <pthread.h>
+
+int symbol__annotate_init(struct map *map __used, struct symbol *sym)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ pthread_mutex_init(&notes->lock, NULL);
+ return 0;
+}
+
+int symbol__alloc_hist(struct symbol *sym, int nevents)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ size_t sizeof_sym_hist = (sizeof(struct sym_hist) +
+ (sym->end - sym->start) * sizeof(u64));
+
+ notes->src = zalloc(sizeof(*notes->src) + nevents * sizeof_sym_hist);
+ if (notes->src == NULL)
+ return -1;
+ notes->src->sizeof_sym_hist = sizeof_sym_hist;
+ notes->src->nr_histograms = nevents;
+ INIT_LIST_HEAD(&notes->src->source);
+ return 0;
+}
+
+void symbol__annotate_zero_histograms(struct symbol *sym)
+{
+ struct annotation *notes = symbol__annotation(sym);
+
+ pthread_mutex_lock(&notes->lock);
+ if (notes->src != NULL)
+ memset(notes->src->histograms, 0,
+ notes->src->nr_histograms * notes->src->sizeof_sym_hist);
+ pthread_mutex_unlock(&notes->lock);
+}
+
+int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
+ int evidx, u64 addr)
+{
+ unsigned offset;
+ struct annotation *notes;
+ struct sym_hist *h;
+
+ notes = symbol__annotation(sym);
+ if (notes->src == NULL)
+ return -ENOMEM;
+
+ pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr));
+
+ if (addr >= sym->end)
+ return 0;
+
+ offset = addr - sym->start;
+ h = annotation__histogram(notes, evidx);
+ h->sum++;
+ h->addr[offset]++;
+
+ pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64
+ ", evidx=%d] => %" PRIu64 "\n", sym->start, sym->name,
+ addr, addr - sym->start, evidx, h->addr[offset]);
+ return 0;
+}
+
+static struct objdump_line *objdump_line__new(s64 offset, char *line, size_t privsize)
+{
+ struct objdump_line *self = malloc(sizeof(*self) + privsize);
+
+ if (self != NULL) {
+ self->offset = offset;
+ self->line = line;
+ }
+
+ return self;
+}
+
+void objdump_line__free(struct objdump_line *self)
+{
+ free(self->line);
+ free(self);
+}
+
+static void objdump__add_line(struct list_head *head, struct objdump_line *line)
+{
+ list_add_tail(&line->node, head);
+}
+
+struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
+ struct objdump_line *pos)
+{
+ list_for_each_entry_continue(pos, head, node)
+ if (pos->offset >= 0)
+ return pos;
+
+ return NULL;
+}
+
+static int objdump_line__print(struct objdump_line *oline, struct symbol *sym,
+ int evidx, u64 len, int min_pcnt,
+ int printed, int max_lines,
+ struct objdump_line *queue)
+{
+ static const char *prev_line;
+ static const char *prev_color;
+
+ if (oline->offset != -1) {
+ const char *path = NULL;
+ unsigned int hits = 0;
+ double percent = 0.0;
+ const char *color;
+ struct annotation *notes = symbol__annotation(sym);
+ struct source_line *src_line = notes->src->lines;
+ struct sym_hist *h = annotation__histogram(notes, evidx);
+ s64 offset = oline->offset;
+ struct objdump_line *next;
+
+ next = objdump__get_next_ip_line(&notes->src->source, oline);
+
+ while (offset < (s64)len &&
+ (next == NULL || offset < next->offset)) {
+ if (src_line) {
+ if (path == NULL)
+ path = src_line[offset].path;
+ percent += src_line[offset].percent;
+ } else
+ hits += h->addr[offset];
+
+ ++offset;
+ }
+
+ if (src_line == NULL && h->sum)
+ percent = 100.0 * hits / h->sum;
+
+ if (percent < min_pcnt)
+ return -1;
+
+ if (max_lines && printed >= max_lines)
+ return 1;
+
+ if (queue != NULL) {
+ list_for_each_entry_from(queue, &notes->src->source, node) {
+ if (queue == oline)
+ break;
+ objdump_line__print(queue, sym, evidx, len,
+ 0, 0, 1, NULL);
+ }
+ }
+
+ color = get_percent_color(percent);
+
+ /*
+ * Also color the filename and line if needed, with
+ * the same color than the percentage. Don't print it
+ * twice for close colored addr with the same filename:line
+ */
+ if (path) {
+ if (!prev_line || strcmp(prev_line, path)
+ || color != prev_color) {
+ color_fprintf(stdout, color, " %s", path);
+ prev_line = path;
+ prev_color = color;
+ }
+ }
+
+ color_fprintf(stdout, color, " %7.2f", percent);
+ printf(" : ");
+ color_fprintf(stdout, PERF_COLOR_BLUE, "%s\n", oline->line);
+ } else if (max_lines && printed >= max_lines)
+ return 1;
+ else {
+ if (queue)
+ return -1;
+
+ if (!*oline->line)
+ printf(" :\n");
+ else
+ printf(" : %s\n", oline->line);
+ }
+
+ return 0;
+}
+
+static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
+ FILE *file, size_t privsize)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct objdump_line *objdump_line;
+ char *line = NULL, *tmp, *tmp2, *c;
+ size_t line_len;
+ s64 line_ip, offset = -1;
+
+ if (getline(&line, &line_len, file) < 0)
+ return -1;
+
+ if (!line)
+ return -1;
+
+ while (line_len != 0 && isspace(line[line_len - 1]))
+ line[--line_len] = '\0';
+
+ c = strchr(line, '\n');
+ if (c)
+ *c = 0;
+
+ line_ip = -1;
+
+ /*
+ * Strip leading spaces:
+ */
+ tmp = line;
+ while (*tmp) {
+ if (*tmp != ' ')
+ break;
+ tmp++;
+ }
+
+ if (*tmp) {
+ /*
+ * Parse hexa addresses followed by ':'
+ */
+ line_ip = strtoull(tmp, &tmp2, 16);
+ if (*tmp2 != ':' || tmp == tmp2 || tmp2[1] == '\0')
+ line_ip = -1;
+ }
+
+ if (line_ip != -1) {
+ u64 start = map__rip_2objdump(map, sym->start),
+ end = map__rip_2objdump(map, sym->end);
+
+ offset = line_ip - start;
+ if (offset < 0 || (u64)line_ip > end)
+ offset = -1;
+ }
+
+ objdump_line = objdump_line__new(offset, line, privsize);
+ if (objdump_line == NULL) {
+ free(line);
+ return -1;
+ }
+ objdump__add_line(&notes->src->source, objdump_line);
+
+ return 0;
+}
+
+int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize)
+{
+ struct dso *dso = map->dso;
+ char *filename = dso__build_id_filename(dso, NULL, 0);
+ bool free_filename = true;
+ char command[PATH_MAX * 2];
+ FILE *file;
+ int err = 0;
+ char symfs_filename[PATH_MAX];
+
+ if (filename) {
+ snprintf(symfs_filename, sizeof(symfs_filename), "%s%s",
+ symbol_conf.symfs, filename);
+ }
+
+ if (filename == NULL) {
+ if (dso->has_build_id) {
+ pr_err("Can't annotate %s: not enough memory\n",
+ sym->name);
+ return -ENOMEM;
+ }
+ goto fallback;
+ } else if (readlink(symfs_filename, command, sizeof(command)) < 0 ||
+ strstr(command, "[kernel.kallsyms]") ||
+ access(symfs_filename, R_OK)) {
+ free(filename);
+fallback:
+ /*
+ * If we don't have build-ids or the build-id file isn't in the
+ * cache, or is just a kallsyms file, well, lets hope that this
+ * DSO is the same as when 'perf record' ran.
+ */
+ filename = dso->long_name;
+ snprintf(symfs_filename, sizeof(symfs_filename), "%s%s",
+ symbol_conf.symfs, filename);
+ free_filename = false;
+ }
+
+ if (dso->origin == DSO__ORIG_KERNEL) {
+ char bf[BUILD_ID_SIZE * 2 + 16] = " with build id ";
+ char *build_id_msg = NULL;
+
+ if (dso->annotate_warned)
+ goto out_free_filename;
+
+ if (dso->has_build_id) {
+ build_id__sprintf(dso->build_id,
+ sizeof(dso->build_id), bf + 15);
+ build_id_msg = bf;
+ }
+ err = -ENOENT;
+ dso->annotate_warned = 1;
+ pr_err("Can't annotate %s: No vmlinux file%s was found in the "
+ "path.\nPlease use 'perf buildid-cache -av vmlinux' or "
+ "--vmlinux vmlinux.\n",
+ sym->name, build_id_msg ?: "");
+ goto out_free_filename;
+ }
+
+ pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__,
+ filename, sym->name, map->unmap_ip(map, sym->start),
+ map->unmap_ip(map, sym->end));
+
+ pr_debug("annotating [%p] %30s : [%p] %30s\n",
+ dso, dso->long_name, sym, sym->name);
+
+ snprintf(command, sizeof(command),
+ "objdump --start-address=0x%016" PRIx64
+ " --stop-address=0x%016" PRIx64 " -dS -C %s|grep -v %s|expand",
+ map__rip_2objdump(map, sym->start),
+ map__rip_2objdump(map, sym->end),
+ symfs_filename, filename);
+
+ pr_debug("Executing: %s\n", command);
+
+ file = popen(command, "r");
+ if (!file)
+ goto out_free_filename;
+
+ while (!feof(file))
+ if (symbol__parse_objdump_line(sym, map, file, privsize) < 0)
+ break;
+
+ pclose(file);
+out_free_filename:
+ if (free_filename)
+ free(filename);
+ return err;
+}
+
+static void insert_source_line(struct rb_root *root, struct source_line *src_line)
+{
+ struct source_line *iter;
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*p != NULL) {
+ parent = *p;
+ iter = rb_entry(parent, struct source_line, node);
+
+ if (src_line->percent > iter->percent)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&src_line->node, parent, p);
+ rb_insert_color(&src_line->node, root);
+}
+
+static void symbol__free_source_line(struct symbol *sym, int len)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct source_line *src_line = notes->src->lines;
+ int i;
+
+ for (i = 0; i < len; i++)
+ free(src_line[i].path);
+
+ free(src_line);
+ notes->src->lines = NULL;
+}
+
+/* Get the filename:line for the colored entries */
+static int symbol__get_source_line(struct symbol *sym, struct map *map,
+ int evidx, struct rb_root *root, int len,
+ const char *filename)
+{
+ u64 start;
+ int i;
+ char cmd[PATH_MAX * 2];
+ struct source_line *src_line;
+ struct annotation *notes = symbol__annotation(sym);
+ struct sym_hist *h = annotation__histogram(notes, evidx);
+
+ if (!h->sum)
+ return 0;
+
+ src_line = notes->src->lines = calloc(len, sizeof(struct source_line));
+ if (!notes->src->lines)
+ return -1;
+
+ start = map->unmap_ip(map, sym->start);
+
+ for (i = 0; i < len; i++) {
+ char *path = NULL;
+ size_t line_len;
+ u64 offset;
+ FILE *fp;
+
+ src_line[i].percent = 100.0 * h->addr[i] / h->sum;
+ if (src_line[i].percent <= 0.5)
+ continue;
+
+ offset = start + i;
+ sprintf(cmd, "addr2line -e %s %016" PRIx64, filename, offset);
+ fp = popen(cmd, "r");
+ if (!fp)
+ continue;
+
+ if (getline(&path, &line_len, fp) < 0 || !line_len)
+ goto next;
+
+ src_line[i].path = malloc(sizeof(char) * line_len + 1);
+ if (!src_line[i].path)
+ goto next;
+
+ strcpy(src_line[i].path, path);
+ insert_source_line(root, &src_line[i]);
+
+ next:
+ pclose(fp);
+ }
+
+ return 0;
+}
+
+static void print_summary(struct rb_root *root, const char *filename)
+{
+ struct source_line *src_line;
+ struct rb_node *node;
+
+ printf("\nSorted summary for file %s\n", filename);
+ printf("----------------------------------------------\n\n");
+
+ if (RB_EMPTY_ROOT(root)) {
+ printf(" Nothing higher than %1.1f%%\n", MIN_GREEN);
+ return;
+ }
+
+ node = rb_first(root);
+ while (node) {
+ double percent;
+ const char *color;
+ char *path;
+
+ src_line = rb_entry(node, struct source_line, node);
+ percent = src_line->percent;
+ color = get_percent_color(percent);
+ path = src_line->path;
+
+ color_fprintf(stdout, color, " %7.2f %s", percent, path);
+ node = rb_next(node);
+ }
+}
+
+static void symbol__annotate_hits(struct symbol *sym, int evidx)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct sym_hist *h = annotation__histogram(notes, evidx);
+ u64 len = sym->end - sym->start, offset;
+
+ for (offset = 0; offset < len; ++offset)
+ if (h->addr[offset] != 0)
+ printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2,
+ sym->start + offset, h->addr[offset]);
+ printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->sum", h->sum);
+}
+
+int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx,
+ bool full_paths, int min_pcnt, int max_lines,
+ int context)
+{
+ struct dso *dso = map->dso;
+ const char *filename = dso->long_name, *d_filename;
+ struct annotation *notes = symbol__annotation(sym);
+ struct objdump_line *pos, *queue = NULL;
+ int printed = 2, queue_len = 0;
+ int more = 0;
+ u64 len;
+
+ if (full_paths)
+ d_filename = filename;
+ else
+ d_filename = basename(filename);
+
+ len = sym->end - sym->start;
+
+ printf(" Percent | Source code & Disassembly of %s\n", d_filename);
+ printf("------------------------------------------------\n");
+
+ if (verbose)
+ symbol__annotate_hits(sym, evidx);
+
+ list_for_each_entry(pos, &notes->src->source, node) {
+ if (context && queue == NULL) {
+ queue = pos;
+ queue_len = 0;
+ }
+
+ switch (objdump_line__print(pos, sym, evidx, len, min_pcnt,
+ printed, max_lines, queue)) {
+ case 0:
+ ++printed;
+ if (context) {
+ printed += queue_len;
+ queue = NULL;
+ queue_len = 0;
+ }
+ break;
+ case 1:
+ /* filtered by max_lines */
+ ++more;
+ break;
+ case -1:
+ default:
+ /*
+ * Filtered by min_pcnt or non IP lines when
+ * context != 0
+ */
+ if (!context)
+ break;
+ if (queue_len == context)
+ queue = list_entry(queue->node.next, typeof(*queue), node);
+ else
+ ++queue_len;
+ break;
+ }
+ }
+
+ return more;
+}
+
+void symbol__annotate_zero_histogram(struct symbol *sym, int evidx)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct sym_hist *h = annotation__histogram(notes, evidx);
+
+ memset(h, 0, notes->src->sizeof_sym_hist);
+}
+
+void symbol__annotate_decay_histogram(struct symbol *sym, int evidx)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct sym_hist *h = annotation__histogram(notes, evidx);
+ struct objdump_line *pos;
+ int len = sym->end - sym->start;
+
+ h->sum = 0;
+
+ list_for_each_entry(pos, &notes->src->source, node) {
+ if (pos->offset != -1 && pos->offset < len) {
+ h->addr[pos->offset] = h->addr[pos->offset] * 7 / 8;
+ h->sum += h->addr[pos->offset];
+ }
+ }
+}
+
+void objdump_line_list__purge(struct list_head *head)
+{
+ struct objdump_line *pos, *n;
+
+ list_for_each_entry_safe(pos, n, head, node) {
+ list_del(&pos->node);
+ objdump_line__free(pos);
+ }
+}
+
+int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx,
+ bool print_lines, bool full_paths, int min_pcnt,
+ int max_lines)
+{
+ struct dso *dso = map->dso;
+ const char *filename = dso->long_name;
+ struct rb_root source_line = RB_ROOT;
+ u64 len;
+
+ if (symbol__annotate(sym, map, 0) < 0)
+ return -1;
+
+ len = sym->end - sym->start;
+
+ if (print_lines) {
+ symbol__get_source_line(sym, map, evidx, &source_line,
+ len, filename);
+ print_summary(&source_line, filename);
+ }
+
+ symbol__annotate_printf(sym, map, evidx, full_paths,
+ min_pcnt, max_lines, 0);
+ if (print_lines)
+ symbol__free_source_line(sym, len);
+
+ objdump_line_list__purge(&symbol__annotation(sym)->src->source);
+
+ return 0;
+}
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
new file mode 100644
index 000000000000..c2c286896801
--- /dev/null
+++ b/tools/perf/util/annotate.h
@@ -0,0 +1,103 @@
+#ifndef __PERF_ANNOTATE_H
+#define __PERF_ANNOTATE_H
+
+#include <stdbool.h>
+#include "types.h"
+#include "symbol.h"
+#include <linux/list.h>
+#include <linux/rbtree.h>
+
+struct objdump_line {
+ struct list_head node;
+ s64 offset;
+ char *line;
+};
+
+void objdump_line__free(struct objdump_line *self);
+struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
+ struct objdump_line *pos);
+
+struct sym_hist {
+ u64 sum;
+ u64 addr[0];
+};
+
+struct source_line {
+ struct rb_node node;
+ double percent;
+ char *path;
+};
+
+/** struct annotated_source - symbols with hits have this attached as in sannotation
+ *
+ * @histogram: Array of addr hit histograms per event being monitored
+ * @lines: If 'print_lines' is specified, per source code line percentages
+ * @source: source parsed from objdump -dS
+ *
+ * lines is allocated, percentages calculated and all sorted by percentage
+ * when the annotation is about to be presented, so the percentages are for
+ * one of the entries in the histogram array, i.e. for the event/counter being
+ * presented. It is deallocated right after symbol__{tui,tty,etc}_annotate
+ * returns.
+ */
+struct annotated_source {
+ struct list_head source;
+ struct source_line *lines;
+ int nr_histograms;
+ int sizeof_sym_hist;
+ struct sym_hist histograms[0];
+};
+
+struct annotation {
+ pthread_mutex_t lock;
+ struct annotated_source *src;
+};
+
+struct sannotation {
+ struct annotation annotation;
+ struct symbol symbol;
+};
+
+static inline struct sym_hist *annotation__histogram(struct annotation *notes, int idx)
+{
+ return (((void *)&notes->src->histograms) +
+ (notes->src->sizeof_sym_hist * idx));
+}
+
+static inline struct annotation *symbol__annotation(struct symbol *sym)
+{
+ struct sannotation *a = container_of(sym, struct sannotation, symbol);
+ return &a->annotation;
+}
+
+int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
+ int evidx, u64 addr);
+int symbol__alloc_hist(struct symbol *sym, int nevents);
+void symbol__annotate_zero_histograms(struct symbol *sym);
+
+int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize);
+int symbol__annotate_init(struct map *map __used, struct symbol *sym);
+int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx,
+ bool full_paths, int min_pcnt, int max_lines,
+ int context);
+void symbol__annotate_zero_histogram(struct symbol *sym, int evidx);
+void symbol__annotate_decay_histogram(struct symbol *sym, int evidx);
+void objdump_line_list__purge(struct list_head *head);
+
+int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx,
+ bool print_lines, bool full_paths, int min_pcnt,
+ int max_lines);
+
+#ifdef NO_NEWT_SUPPORT
+static inline int symbol__tui_annotate(struct symbol *sym __used,
+ struct map *map __used,
+ int evidx __used, int refresh __used)
+{
+ return 0;
+}
+#else
+int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
+ int refresh);
+#endif
+
+#endif /* __PERF_ANNOTATE_H */
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index deffb8c96071..31f934af9861 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -14,8 +14,8 @@
#include <linux/kernel.h>
#include "debug.h"
-static int build_id__mark_dso_hit(event_t *event,
- struct sample_data *sample __used,
+static int build_id__mark_dso_hit(union perf_event *event,
+ struct perf_sample *sample __used,
struct perf_session *session)
{
struct addr_location al;
@@ -37,13 +37,14 @@ static int build_id__mark_dso_hit(event_t *event,
return 0;
}
-static int event__exit_del_thread(event_t *self, struct sample_data *sample __used,
- struct perf_session *session)
+static int perf_event__exit_del_thread(union perf_event *event,
+ struct perf_sample *sample __used,
+ struct perf_session *session)
{
- struct thread *thread = perf_session__findnew(session, self->fork.tid);
+ struct thread *thread = perf_session__findnew(session, event->fork.tid);
- dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid,
- self->fork.ppid, self->fork.ptid);
+ dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid,
+ event->fork.ppid, event->fork.ptid);
if (thread) {
rb_erase(&thread->rb_node, &session->threads);
@@ -56,9 +57,9 @@ static int event__exit_del_thread(event_t *self, struct sample_data *sample __us
struct perf_event_ops build_id__mark_dso_hit_ops = {
.sample = build_id__mark_dso_hit,
- .mmap = event__process_mmap,
- .fork = event__process_task,
- .exit = event__exit_del_thread,
+ .mmap = perf_event__process_mmap,
+ .fork = perf_event__process_task,
+ .exit = perf_event__exit_del_thread,
};
char *dso__build_id_filename(struct dso *self, char *bf, size_t size)
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h
index a7729797fd96..fc5e5a09d5b9 100644
--- a/tools/perf/util/cache.h
+++ b/tools/perf/util/cache.h
@@ -34,13 +34,14 @@ extern int pager_use_color;
extern int use_browser;
#ifdef NO_NEWT_SUPPORT
-static inline void setup_browser(void)
+static inline void setup_browser(bool fallback_to_pager)
{
- setup_pager();
+ if (fallback_to_pager)
+ setup_pager();
}
static inline void exit_browser(bool wait_for_ok __used) {}
#else
-void setup_browser(void);
+void setup_browser(bool fallback_to_pager);
void exit_browser(bool wait_for_ok);
#endif
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index e12d539417b2..9f7106a8d9a4 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009-2010, Frederic Weisbecker <fweisbec@gmail.com>
+ * Copyright (C) 2009-2011, Frederic Weisbecker <fweisbec@gmail.com>
*
* Handle the callchains from the stream in an ad-hoc radix tree and then
* sort them in an rbtree.
@@ -18,7 +18,8 @@
#include "util.h"
#include "callchain.h"
-bool ip_callchain__valid(struct ip_callchain *chain, const event_t *event)
+bool ip_callchain__valid(struct ip_callchain *chain,
+ const union perf_event *event)
{
unsigned int chain_size = event->header.size;
chain_size -= (unsigned long)&event->ip.__more_data - (unsigned long)event;
@@ -26,10 +27,10 @@ bool ip_callchain__valid(struct ip_callchain *chain, const event_t *event)
}
#define chain_for_each_child(child, parent) \
- list_for_each_entry(child, &parent->children, brothers)
+ list_for_each_entry(child, &parent->children, siblings)
#define chain_for_each_child_safe(child, next, parent) \
- list_for_each_entry_safe(child, next, &parent->children, brothers)
+ list_for_each_entry_safe(child, next, &parent->children, siblings)
static void
rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
@@ -38,14 +39,14 @@ rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct callchain_node *rnode;
- u64 chain_cumul = cumul_hits(chain);
+ u64 chain_cumul = callchain_cumul_hits(chain);
while (*p) {
u64 rnode_cumul;
parent = *p;
rnode = rb_entry(parent, struct callchain_node, rb_node);
- rnode_cumul = cumul_hits(rnode);
+ rnode_cumul = callchain_cumul_hits(rnode);
switch (mode) {
case CHAIN_FLAT:
@@ -104,7 +105,7 @@ static void __sort_chain_graph_abs(struct callchain_node *node,
chain_for_each_child(child, node) {
__sort_chain_graph_abs(child, min_hit);
- if (cumul_hits(child) >= min_hit)
+ if (callchain_cumul_hits(child) >= min_hit)
rb_insert_callchain(&node->rb_root, child,
CHAIN_GRAPH_ABS);
}
@@ -129,7 +130,7 @@ static void __sort_chain_graph_rel(struct callchain_node *node,
chain_for_each_child(child, node) {
__sort_chain_graph_rel(child, min_percent);
- if (cumul_hits(child) >= min_hit)
+ if (callchain_cumul_hits(child) >= min_hit)
rb_insert_callchain(&node->rb_root, child,
CHAIN_GRAPH_REL);
}
@@ -143,7 +144,7 @@ sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_root *chain_root,
rb_root->rb_node = chain_root->node.rb_root.rb_node;
}
-int register_callchain_param(struct callchain_param *param)
+int callchain_register_param(struct callchain_param *param)
{
switch (param->mode) {
case CHAIN_GRAPH_ABS:
@@ -189,32 +190,27 @@ create_child(struct callchain_node *parent, bool inherit_children)
chain_for_each_child(next, new)
next->parent = new;
}
- list_add_tail(&new->brothers, &parent->children);
+ list_add_tail(&new->siblings, &parent->children);
return new;
}
-struct resolved_ip {
- u64 ip;
- struct map_symbol ms;
-};
-
-struct resolved_chain {
- u64 nr;
- struct resolved_ip ips[0];
-};
-
-
/*
* Fill the node with callchain values
*/
static void
-fill_node(struct callchain_node *node, struct resolved_chain *chain, int start)
+fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
{
- unsigned int i;
+ struct callchain_cursor_node *cursor_node;
+
+ node->val_nr = cursor->nr - cursor->pos;
+ if (!node->val_nr)
+ pr_warning("Warning: empty node in callchain tree\n");
- for (i = start; i < chain->nr; i++) {
+ cursor_node = callchain_cursor_current(cursor);
+
+ while (cursor_node) {
struct callchain_list *call;
call = zalloc(sizeof(*call));
@@ -222,23 +218,25 @@ fill_node(struct callchain_node *node, struct resolved_chain *chain, int start)
perror("not enough memory for the code path tree");
return;
}
- call->ip = chain->ips[i].ip;
- call->ms = chain->ips[i].ms;
+ call->ip = cursor_node->ip;
+ call->ms.sym = cursor_node->sym;
+ call->ms.map = cursor_node->map;
list_add_tail(&call->list, &node->val);
+
+ callchain_cursor_advance(cursor);
+ cursor_node = callchain_cursor_current(cursor);
}
- node->val_nr = chain->nr - start;
- if (!node->val_nr)
- pr_warning("Warning: empty node in callchain tree\n");
}
static void
-add_child(struct callchain_node *parent, struct resolved_chain *chain,
- int start, u64 period)
+add_child(struct callchain_node *parent,
+ struct callchain_cursor *cursor,
+ u64 period)
{
struct callchain_node *new;
new = create_child(parent, false);
- fill_node(new, chain, start);
+ fill_node(new, cursor);
new->children_hit = 0;
new->hit = period;
@@ -250,9 +248,10 @@ add_child(struct callchain_node *parent, struct resolved_chain *chain,
* Then create another child to host the given callchain of new branch
*/
static void
-split_add_child(struct callchain_node *parent, struct resolved_chain *chain,
- struct callchain_list *to_split, int idx_parents, int idx_local,
- u64 period)
+split_add_child(struct callchain_node *parent,
+ struct callchain_cursor *cursor,
+ struct callchain_list *to_split,
+ u64 idx_parents, u64 idx_local, u64 period)
{
struct callchain_node *new;
struct list_head *old_tail;
@@ -272,14 +271,14 @@ split_add_child(struct callchain_node *parent, struct resolved_chain *chain,
/* split the hits */
new->hit = parent->hit;
new->children_hit = parent->children_hit;
- parent->children_hit = cumul_hits(new);
+ parent->children_hit = callchain_cumul_hits(new);
new->val_nr = parent->val_nr - idx_local;
parent->val_nr = idx_local;
/* create a new child for the new branch if any */
- if (idx_total < chain->nr) {
+ if (idx_total < cursor->nr) {
parent->hit = 0;
- add_child(parent, chain, idx_total, period);
+ add_child(parent, cursor, period);
parent->children_hit += period;
} else {
parent->hit = period;
@@ -287,36 +286,41 @@ split_add_child(struct callchain_node *parent, struct resolved_chain *chain,
}
static int
-append_chain(struct callchain_node *root, struct resolved_chain *chain,
- unsigned int start, u64 period);
+append_chain(struct callchain_node *root,
+ struct callchain_cursor *cursor,
+ u64 period);
static void
-append_chain_children(struct callchain_node *root, struct resolved_chain *chain,
- unsigned int start, u64 period)
+append_chain_children(struct callchain_node *root,
+ struct callchain_cursor *cursor,
+ u64 period)
{
struct callchain_node *rnode;
/* lookup in childrens */
chain_for_each_child(rnode, root) {
- unsigned int ret = append_chain(rnode, chain, start, period);
+ unsigned int ret = append_chain(rnode, cursor, period);
if (!ret)
goto inc_children_hit;
}
/* nothing in children, add to the current node */
- add_child(root, chain, start, period);
+ add_child(root, cursor, period);
inc_children_hit:
root->children_hit += period;
}
static int
-append_chain(struct callchain_node *root, struct resolved_chain *chain,
- unsigned int start, u64 period)
+append_chain(struct callchain_node *root,
+ struct callchain_cursor *cursor,
+ u64 period)
{
+ struct callchain_cursor_node *curr_snap = cursor->curr;
struct callchain_list *cnode;
- unsigned int i = start;
+ u64 start = cursor->pos;
bool found = false;
+ u64 matches;
/*
* Lookup in the current node
@@ -324,141 +328,134 @@ append_chain(struct callchain_node *root, struct resolved_chain *chain,
* anywhere inside a function.
*/
list_for_each_entry(cnode, &root->val, list) {
+ struct callchain_cursor_node *node;
struct symbol *sym;
- if (i == chain->nr)
+ node = callchain_cursor_current(cursor);
+ if (!node)
break;
- sym = chain->ips[i].ms.sym;
+ sym = node->sym;
if (cnode->ms.sym && sym) {
if (cnode->ms.sym->start != sym->start)
break;
- } else if (cnode->ip != chain->ips[i].ip)
+ } else if (cnode->ip != node->ip)
break;
if (!found)
found = true;
- i++;
+
+ callchain_cursor_advance(cursor);
}
/* matches not, relay on the parent */
- if (!found)
+ if (!found) {
+ cursor->curr = curr_snap;
+ cursor->pos = start;
return -1;
+ }
+
+ matches = cursor->pos - start;
/* we match only a part of the node. Split it and add the new chain */
- if (i - start < root->val_nr) {
- split_add_child(root, chain, cnode, start, i - start, period);
+ if (matches < root->val_nr) {
+ split_add_child(root, cursor, cnode, start, matches, period);
return 0;
}
/* we match 100% of the path, increment the hit */
- if (i - start == root->val_nr && i == chain->nr) {
+ if (matches == root->val_nr && cursor->pos == cursor->nr) {
root->hit += period;
return 0;
}
/* We match the node and still have a part remaining */
- append_chain_children(root, chain, i, period);
+ append_chain_children(root, cursor, period);
return 0;
}
-static void filter_context(struct ip_callchain *old, struct resolved_chain *new,
- struct map_symbol *syms)
-{
- int i, j = 0;
-
- for (i = 0; i < (int)old->nr; i++) {
- if (old->ips[i] >= PERF_CONTEXT_MAX)
- continue;
-
- new->ips[j].ip = old->ips[i];
- new->ips[j].ms = syms[i];
- j++;
- }
-
- new->nr = j;
-}
-
-
-int callchain_append(struct callchain_root *root, struct ip_callchain *chain,
- struct map_symbol *syms, u64 period)
+int callchain_append(struct callchain_root *root,
+ struct callchain_cursor *cursor,
+ u64 period)
{
- struct resolved_chain *filtered;
-
- if (!chain->nr)
+ if (!cursor->nr)
return 0;
- filtered = zalloc(sizeof(*filtered) +
- chain->nr * sizeof(struct resolved_ip));
- if (!filtered)
- return -ENOMEM;
-
- filter_context(chain, filtered, syms);
-
- if (!filtered->nr)
- goto end;
+ callchain_cursor_commit(cursor);
- append_chain_children(&root->node, filtered, 0, period);
+ append_chain_children(&root->node, cursor, period);
- if (filtered->nr > root->max_depth)
- root->max_depth = filtered->nr;
-end:
- free(filtered);
+ if (cursor->nr > root->max_depth)
+ root->max_depth = cursor->nr;
return 0;
}
static int
-merge_chain_branch(struct callchain_node *dst, struct callchain_node *src,
- struct resolved_chain *chain)
+merge_chain_branch(struct callchain_cursor *cursor,
+ struct callchain_node *dst, struct callchain_node *src)
{
+ struct callchain_cursor_node **old_last = cursor->last;
struct callchain_node *child, *next_child;
struct callchain_list *list, *next_list;
- int old_pos = chain->nr;
+ int old_pos = cursor->nr;
int err = 0;
list_for_each_entry_safe(list, next_list, &src->val, list) {
- chain->ips[chain->nr].ip = list->ip;
- chain->ips[chain->nr].ms = list->ms;
- chain->nr++;
+ callchain_cursor_append(cursor, list->ip,
+ list->ms.map, list->ms.sym);
list_del(&list->list);
free(list);
}
- if (src->hit)
- append_chain_children(dst, chain, 0, src->hit);
+ if (src->hit) {
+ callchain_cursor_commit(cursor);
+ append_chain_children(dst, cursor, src->hit);
+ }
chain_for_each_child_safe(child, next_child, src) {
- err = merge_chain_branch(dst, child, chain);
+ err = merge_chain_branch(cursor, dst, child);
if (err)
break;
- list_del(&child->brothers);
+ list_del(&child->siblings);
free(child);
}
- chain->nr = old_pos;
+ cursor->nr = old_pos;
+ cursor->last = old_last;
return err;
}
-int callchain_merge(struct callchain_root *dst, struct callchain_root *src)
+int callchain_merge(struct callchain_cursor *cursor,
+ struct callchain_root *dst, struct callchain_root *src)
+{
+ return merge_chain_branch(cursor, &dst->node, &src->node);
+}
+
+int callchain_cursor_append(struct callchain_cursor *cursor,
+ u64 ip, struct map *map, struct symbol *sym)
{
- struct resolved_chain *chain;
- int err;
+ struct callchain_cursor_node *node = *cursor->last;
- chain = malloc(sizeof(*chain) +
- src->max_depth * sizeof(struct resolved_ip));
- if (!chain)
- return -ENOMEM;
+ if (!node) {
+ node = calloc(sizeof(*node), 1);
+ if (!node)
+ return -ENOMEM;
- chain->nr = 0;
+ *cursor->last = node;
+ }
- err = merge_chain_branch(&dst->node, &src->node, chain);
+ node->ip = ip;
+ node->map = map;
+ node->sym = sym;
- free(chain);
+ cursor->nr++;
- return err;
+ cursor->last = &node->next;
+
+ return 0;
}
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index c15fb8c24ad2..1a79df9f739f 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -16,7 +16,7 @@ enum chain_mode {
struct callchain_node {
struct callchain_node *parent;
- struct list_head brothers;
+ struct list_head siblings;
struct list_head children;
struct list_head val;
struct rb_node rb_node; /* to sort nodes in an rbtree */
@@ -49,9 +49,30 @@ struct callchain_list {
struct list_head list;
};
+/*
+ * A callchain cursor is a single linked list that
+ * let one feed a callchain progressively.
+ * It keeps persitent allocated entries to minimize
+ * allocations.
+ */
+struct callchain_cursor_node {
+ u64 ip;
+ struct map *map;
+ struct symbol *sym;
+ struct callchain_cursor_node *next;
+};
+
+struct callchain_cursor {
+ u64 nr;
+ struct callchain_cursor_node *first;
+ struct callchain_cursor_node **last;
+ u64 pos;
+ struct callchain_cursor_node *curr;
+};
+
static inline void callchain_init(struct callchain_root *root)
{
- INIT_LIST_HEAD(&root->node.brothers);
+ INIT_LIST_HEAD(&root->node.siblings);
INIT_LIST_HEAD(&root->node.children);
INIT_LIST_HEAD(&root->node.val);
@@ -61,15 +82,54 @@ static inline void callchain_init(struct callchain_root *root)
root->max_depth = 0;
}
-static inline u64 cumul_hits(struct callchain_node *node)
+static inline u64 callchain_cumul_hits(struct callchain_node *node)
{
return node->hit + node->children_hit;
}
-int register_callchain_param(struct callchain_param *param);
-int callchain_append(struct callchain_root *root, struct ip_callchain *chain,
- struct map_symbol *syms, u64 period);
-int callchain_merge(struct callchain_root *dst, struct callchain_root *src);
+int callchain_register_param(struct callchain_param *param);
+int callchain_append(struct callchain_root *root,
+ struct callchain_cursor *cursor,
+ u64 period);
+
+int callchain_merge(struct callchain_cursor *cursor,
+ struct callchain_root *dst, struct callchain_root *src);
+
+bool ip_callchain__valid(struct ip_callchain *chain,
+ const union perf_event *event);
+/*
+ * Initialize a cursor before adding entries inside, but keep
+ * the previously allocated entries as a cache.
+ */
+static inline void callchain_cursor_reset(struct callchain_cursor *cursor)
+{
+ cursor->nr = 0;
+ cursor->last = &cursor->first;
+}
+
+int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip,
+ struct map *map, struct symbol *sym);
-bool ip_callchain__valid(struct ip_callchain *chain, const event_t *event);
+/* Close a cursor writing session. Initialize for the reader */
+static inline void callchain_cursor_commit(struct callchain_cursor *cursor)
+{
+ cursor->curr = cursor->first;
+ cursor->pos = 0;
+}
+
+/* Cursor reading iteration helpers */
+static inline struct callchain_cursor_node *
+callchain_cursor_current(struct callchain_cursor *cursor)
+{
+ if (cursor->pos == cursor->nr)
+ return NULL;
+
+ return cursor->curr;
+}
+
+static inline void callchain_cursor_advance(struct callchain_cursor *cursor)
+{
+ cursor->curr = cursor->curr->next;
+ cursor->pos++;
+}
#endif /* __PERF_CALLCHAIN_H */
diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c
new file mode 100644
index 000000000000..9fea75535221
--- /dev/null
+++ b/tools/perf/util/cgroup.c
@@ -0,0 +1,178 @@
+#include "util.h"
+#include "../perf.h"
+#include "parse-options.h"
+#include "evsel.h"
+#include "cgroup.h"
+#include "debugfs.h" /* MAX_PATH, STR() */
+#include "evlist.h"
+
+int nr_cgroups;
+
+static int
+cgroupfs_find_mountpoint(char *buf, size_t maxlen)
+{
+ FILE *fp;
+ char mountpoint[MAX_PATH+1], tokens[MAX_PATH+1], type[MAX_PATH+1];
+ char *token, *saved_ptr;
+ int found = 0;
+
+ fp = fopen("/proc/mounts", "r");
+ if (!fp)
+ return -1;
+
+ /*
+ * in order to handle split hierarchy, we need to scan /proc/mounts
+ * and inspect every cgroupfs mount point to find one that has
+ * perf_event subsystem
+ */
+ while (fscanf(fp, "%*s %"STR(MAX_PATH)"s %"STR(MAX_PATH)"s %"
+ STR(MAX_PATH)"s %*d %*d\n",
+ mountpoint, type, tokens) == 3) {
+
+ if (!strcmp(type, "cgroup")) {
+
+ token = strtok_r(tokens, ",", &saved_ptr);
+
+ while (token != NULL) {
+ if (!strcmp(token, "perf_event")) {
+ found = 1;
+ break;
+ }
+ token = strtok_r(NULL, ",", &saved_ptr);
+ }
+ }
+ if (found)
+ break;
+ }
+ fclose(fp);
+ if (!found)
+ return -1;
+
+ if (strlen(mountpoint) < maxlen) {
+ strcpy(buf, mountpoint);
+ return 0;
+ }
+ return -1;
+}
+
+static int open_cgroup(char *name)
+{
+ char path[MAX_PATH+1];
+ char mnt[MAX_PATH+1];
+ int fd;
+
+
+ if (cgroupfs_find_mountpoint(mnt, MAX_PATH+1))
+ return -1;
+
+ snprintf(path, MAX_PATH, "%s/%s", mnt, name);
+
+ fd = open(path, O_RDONLY);
+ if (fd == -1)
+ fprintf(stderr, "no access to cgroup %s\n", path);
+
+ return fd;
+}
+
+static int add_cgroup(struct perf_evlist *evlist, char *str)
+{
+ struct perf_evsel *counter;
+ struct cgroup_sel *cgrp = NULL;
+ int n;
+ /*
+ * check if cgrp is already defined, if so we reuse it
+ */
+ list_for_each_entry(counter, &evlist->entries, node) {
+ cgrp = counter->cgrp;
+ if (!cgrp)
+ continue;
+ if (!strcmp(cgrp->name, str))
+ break;
+
+ cgrp = NULL;
+ }
+
+ if (!cgrp) {
+ cgrp = zalloc(sizeof(*cgrp));
+ if (!cgrp)
+ return -1;
+
+ cgrp->name = str;
+
+ cgrp->fd = open_cgroup(str);
+ if (cgrp->fd == -1) {
+ free(cgrp);
+ return -1;
+ }
+ }
+
+ /*
+ * find corresponding event
+ * if add cgroup N, then need to find event N
+ */
+ n = 0;
+ list_for_each_entry(counter, &evlist->entries, node) {
+ if (n == nr_cgroups)
+ goto found;
+ n++;
+ }
+ if (cgrp->refcnt == 0)
+ free(cgrp);
+
+ return -1;
+found:
+ cgrp->refcnt++;
+ counter->cgrp = cgrp;
+ return 0;
+}
+
+void close_cgroup(struct cgroup_sel *cgrp)
+{
+ if (!cgrp)
+ return;
+
+ /* XXX: not reentrant */
+ if (--cgrp->refcnt == 0) {
+ close(cgrp->fd);
+ free(cgrp->name);
+ free(cgrp);
+ }
+}
+
+int parse_cgroups(const struct option *opt __used, const char *str,
+ int unset __used)
+{
+ struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
+ const char *p, *e, *eos = str + strlen(str);
+ char *s;
+ int ret;
+
+ if (list_empty(&evlist->entries)) {
+ fprintf(stderr, "must define events before cgroups\n");
+ return -1;
+ }
+
+ for (;;) {
+ p = strchr(str, ',');
+ e = p ? p : eos;
+
+ /* allow empty cgroups, i.e., skip */
+ if (e - str) {
+ /* termination added */
+ s = strndup(str, e - str);
+ if (!s)
+ return -1;
+ ret = add_cgroup(evlist, s);
+ if (ret) {
+ free(s);
+ return -1;
+ }
+ }
+ /* nr_cgroups is increased een for empty cgroups */
+ nr_cgroups++;
+ if (!p)
+ break;
+ str = p+1;
+ }
+ return 0;
+}
diff --git a/tools/perf/util/cgroup.h b/tools/perf/util/cgroup.h
new file mode 100644
index 000000000000..89acd6debdc5
--- /dev/null
+++ b/tools/perf/util/cgroup.h
@@ -0,0 +1,17 @@
+#ifndef __CGROUP_H__
+#define __CGROUP_H__
+
+struct option;
+
+struct cgroup_sel {
+ char *name;
+ int fd;
+ int refcnt;
+};
+
+
+extern int nr_cgroups; /* number of explicit cgroups defined */
+extern void close_cgroup(struct cgroup_sel *cgrp);
+extern int parse_cgroups(const struct option *opt, const char *str, int unset);
+
+#endif /* __CGROUP_H__ */
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index 3ccaa1043383..6893eec693ab 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -177,3 +177,8 @@ struct cpu_map *cpu_map__dummy_new(void)
return cpus;
}
+
+void cpu_map__delete(struct cpu_map *map)
+{
+ free(map);
+}
diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h
index f7a4f42f6307..072c0a374794 100644
--- a/tools/perf/util/cpumap.h
+++ b/tools/perf/util/cpumap.h
@@ -8,6 +8,6 @@ struct cpu_map {
struct cpu_map *cpu_map__new(const char *cpu_list);
struct cpu_map *cpu_map__dummy_new(void);
-void *cpu_map__delete(struct cpu_map *map);
+void cpu_map__delete(struct cpu_map *map);
#endif /* __PERF_CPUMAP_H */
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index 01bbe8ecec3f..d4536a9e0d8c 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -57,7 +57,7 @@ void ui__warning(const char *format, ...)
}
#endif
-void trace_event(event_t *event)
+void trace_event(union perf_event *event)
{
unsigned char *raw_event = (void *)event;
const char *color = PERF_COLOR_BLUE;
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
index ca35fd66b5df..93516cf4682c 100644
--- a/tools/perf/util/debug.h
+++ b/tools/perf/util/debug.h
@@ -9,7 +9,7 @@ extern int verbose;
extern bool quiet, dump_trace;
int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
-void trace_event(event_t *event);
+void trace_event(union perf_event *event);
struct ui_progress;
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 50d0a931497a..fbf5754c8866 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -6,8 +6,9 @@
#include "string.h"
#include "strlist.h"
#include "thread.h"
+#include "thread_map.h"
-static const char *event__name[] = {
+static const char *perf_event__names[] = {
[0] = "TOTAL",
[PERF_RECORD_MMAP] = "MMAP",
[PERF_RECORD_LOST] = "LOST",
@@ -25,16 +26,16 @@ static const char *event__name[] = {
[PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND",
};
-const char *event__get_event_name(unsigned int id)
+const char *perf_event__name(unsigned int id)
{
- if (id >= ARRAY_SIZE(event__name))
+ if (id >= ARRAY_SIZE(perf_event__names))
return "INVALID";
- if (!event__name[id])
+ if (!perf_event__names[id])
return "UNKNOWN";
- return event__name[id];
+ return perf_event__names[id];
}
-static struct sample_data synth_sample = {
+static struct perf_sample synth_sample = {
.pid = -1,
.tid = -1,
.time = -1,
@@ -43,9 +44,9 @@ static struct sample_data synth_sample = {
.period = 1,
};
-static pid_t event__synthesize_comm(event_t *event, pid_t pid, int full,
- event__handler_t process,
- struct perf_session *session)
+static pid_t perf_event__synthesize_comm(union perf_event *event, pid_t pid,
+ int full, perf_event__handler_t process,
+ struct perf_session *session)
{
char filename[PATH_MAX];
char bf[BUFSIZ];
@@ -126,9 +127,10 @@ out:
return tgid;
}
-static int event__synthesize_mmap_events(event_t *event, pid_t pid, pid_t tgid,
- event__handler_t process,
- struct perf_session *session)
+static int perf_event__synthesize_mmap_events(union perf_event *event,
+ pid_t pid, pid_t tgid,
+ perf_event__handler_t process,
+ struct perf_session *session)
{
char filename[PATH_MAX];
FILE *fp;
@@ -199,14 +201,14 @@ static int event__synthesize_mmap_events(event_t *event, pid_t pid, pid_t tgid,
return 0;
}
-int event__synthesize_modules(event__handler_t process,
- struct perf_session *session,
- struct machine *machine)
+int perf_event__synthesize_modules(perf_event__handler_t process,
+ struct perf_session *session,
+ struct machine *machine)
{
struct rb_node *nd;
struct map_groups *kmaps = &machine->kmaps;
- event_t *event = zalloc(sizeof(event->mmap) + session->id_hdr_size);
-
+ union perf_event *event = zalloc((sizeof(event->mmap) +
+ session->id_hdr_size));
if (event == NULL) {
pr_debug("Not enough memory synthesizing mmap event "
"for kernel modules\n");
@@ -251,23 +253,24 @@ int event__synthesize_modules(event__handler_t process,
return 0;
}
-static int __event__synthesize_thread(event_t *comm_event, event_t *mmap_event,
- pid_t pid, event__handler_t process,
+static int __event__synthesize_thread(union perf_event *comm_event,
+ union perf_event *mmap_event,
+ pid_t pid, perf_event__handler_t process,
struct perf_session *session)
{
- pid_t tgid = event__synthesize_comm(comm_event, pid, 1, process,
+ pid_t tgid = perf_event__synthesize_comm(comm_event, pid, 1, process,
session);
if (tgid == -1)
return -1;
- return event__synthesize_mmap_events(mmap_event, pid, tgid,
+ return perf_event__synthesize_mmap_events(mmap_event, pid, tgid,
process, session);
}
-int event__synthesize_thread_map(struct thread_map *threads,
- event__handler_t process,
- struct perf_session *session)
+int perf_event__synthesize_thread_map(struct thread_map *threads,
+ perf_event__handler_t process,
+ struct perf_session *session)
{
- event_t *comm_event, *mmap_event;
+ union perf_event *comm_event, *mmap_event;
int err = -1, thread;
comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size);
@@ -294,12 +297,12 @@ out:
return err;
}
-int event__synthesize_threads(event__handler_t process,
- struct perf_session *session)
+int perf_event__synthesize_threads(perf_event__handler_t process,
+ struct perf_session *session)
{
DIR *proc;
struct dirent dirent, *next;
- event_t *comm_event, *mmap_event;
+ union perf_event *comm_event, *mmap_event;
int err = -1;
comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size);
@@ -357,10 +360,10 @@ static int find_symbol_cb(void *arg, const char *name, char type,
return 1;
}
-int event__synthesize_kernel_mmap(event__handler_t process,
- struct perf_session *session,
- struct machine *machine,
- const char *symbol_name)
+int perf_event__synthesize_kernel_mmap(perf_event__handler_t process,
+ struct perf_session *session,
+ struct machine *machine,
+ const char *symbol_name)
{
size_t size;
const char *filename, *mmap_name;
@@ -374,8 +377,8 @@ int event__synthesize_kernel_mmap(event__handler_t process,
* kernels.
*/
struct process_symbol_args args = { .name = symbol_name, };
- event_t *event = zalloc(sizeof(event->mmap) + session->id_hdr_size);
-
+ union perf_event *event = zalloc((sizeof(event->mmap) +
+ session->id_hdr_size));
if (event == NULL) {
pr_debug("Not enough memory synthesizing mmap event "
"for kernel modules\n");
@@ -448,14 +451,15 @@ static int thread__set_comm_adjust(struct thread *self, const char *comm,
return 0;
}
-int event__process_comm(event_t *self, struct sample_data *sample __used,
- struct perf_session *session)
+int perf_event__process_comm(union perf_event *event,
+ struct perf_sample *sample __used,
+ struct perf_session *session)
{
- struct thread *thread = perf_session__findnew(session, self->comm.tid);
+ struct thread *thread = perf_session__findnew(session, event->comm.tid);
- dump_printf(": %s:%d\n", self->comm.comm, self->comm.tid);
+ dump_printf(": %s:%d\n", event->comm.comm, event->comm.tid);
- if (thread == NULL || thread__set_comm_adjust(thread, self->comm.comm,
+ if (thread == NULL || thread__set_comm_adjust(thread, event->comm.comm,
&session->hists)) {
dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
return -1;
@@ -464,19 +468,21 @@ int event__process_comm(event_t *self, struct sample_data *sample __used,
return 0;
}
-int event__process_lost(event_t *self, struct sample_data *sample __used,
- struct perf_session *session)
+int perf_event__process_lost(union perf_event *event,
+ struct perf_sample *sample __used,
+ struct perf_session *session)
{
dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
- self->lost.id, self->lost.lost);
- session->hists.stats.total_lost += self->lost.lost;
+ event->lost.id, event->lost.lost);
+ session->hists.stats.total_lost += event->lost.lost;
return 0;
}
-static void event_set_kernel_mmap_len(struct map **maps, event_t *self)
+static void perf_event__set_kernel_mmap_len(union perf_event *event,
+ struct map **maps)
{
- maps[MAP__FUNCTION]->start = self->mmap.start;
- maps[MAP__FUNCTION]->end = self->mmap.start + self->mmap.len;
+ maps[MAP__FUNCTION]->start = event->mmap.start;
+ maps[MAP__FUNCTION]->end = event->mmap.start + event->mmap.len;
/*
* Be a bit paranoid here, some perf.data file came with
* a zero sized synthesized MMAP event for the kernel.
@@ -485,8 +491,8 @@ static void event_set_kernel_mmap_len(struct map **maps, event_t *self)
maps[MAP__FUNCTION]->end = ~0ULL;
}
-static int event__process_kernel_mmap(event_t *self,
- struct perf_session *session)
+static int perf_event__process_kernel_mmap(union perf_event *event,
+ struct perf_session *session)
{
struct map *map;
char kmmap_prefix[PATH_MAX];
@@ -494,9 +500,9 @@ static int event__process_kernel_mmap(event_t *self,
enum dso_kernel_type kernel_type;
bool is_kernel_mmap;
- machine = perf_session__findnew_machine(session, self->mmap.pid);
+ machine = perf_session__findnew_machine(session, event->mmap.pid);
if (!machine) {
- pr_err("Can't find id %d's machine\n", self->mmap.pid);
+ pr_err("Can't find id %d's machine\n", event->mmap.pid);
goto out_problem;
}
@@ -506,17 +512,17 @@ static int event__process_kernel_mmap(event_t *self,
else
kernel_type = DSO_TYPE_GUEST_KERNEL;
- is_kernel_mmap = memcmp(self->mmap.filename,
+ is_kernel_mmap = memcmp(event->mmap.filename,
kmmap_prefix,
strlen(kmmap_prefix)) == 0;
- if (self->mmap.filename[0] == '/' ||
- (!is_kernel_mmap && self->mmap.filename[0] == '[')) {
+ if (event->mmap.filename[0] == '/' ||
+ (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
char short_module_name[1024];
char *name, *dot;
- if (self->mmap.filename[0] == '/') {
- name = strrchr(self->mmap.filename, '/');
+ if (event->mmap.filename[0] == '/') {
+ name = strrchr(event->mmap.filename, '/');
if (name == NULL)
goto out_problem;
@@ -528,10 +534,10 @@ static int event__process_kernel_mmap(event_t *self,
"[%.*s]", (int)(dot - name), name);
strxfrchar(short_module_name, '-', '_');
} else
- strcpy(short_module_name, self->mmap.filename);
+ strcpy(short_module_name, event->mmap.filename);
- map = machine__new_module(machine, self->mmap.start,
- self->mmap.filename);
+ map = machine__new_module(machine, event->mmap.start,
+ event->mmap.filename);
if (map == NULL)
goto out_problem;
@@ -541,9 +547,9 @@ static int event__process_kernel_mmap(event_t *self,
map->dso->short_name = name;
map->dso->sname_alloc = 1;
- map->end = map->start + self->mmap.len;
+ map->end = map->start + event->mmap.len;
} else if (is_kernel_mmap) {
- const char *symbol_name = (self->mmap.filename +
+ const char *symbol_name = (event->mmap.filename +
strlen(kmmap_prefix));
/*
* Should be there already, from the build-id table in
@@ -558,10 +564,10 @@ static int event__process_kernel_mmap(event_t *self,
if (__machine__create_kernel_maps(machine, kernel) < 0)
goto out_problem;
- event_set_kernel_mmap_len(machine->vmlinux_maps, self);
+ perf_event__set_kernel_mmap_len(event, machine->vmlinux_maps);
perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
symbol_name,
- self->mmap.pgoff);
+ event->mmap.pgoff);
if (machine__is_default_guest(machine)) {
/*
* preload dso of guest kernel and modules
@@ -575,22 +581,23 @@ out_problem:
return -1;
}
-int event__process_mmap(event_t *self, struct sample_data *sample __used,
- struct perf_session *session)
+int perf_event__process_mmap(union perf_event *event,
+ struct perf_sample *sample __used,
+ struct perf_session *session)
{
struct machine *machine;
struct thread *thread;
struct map *map;
- u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+ u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
int ret = 0;
dump_printf(" %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n",
- self->mmap.pid, self->mmap.tid, self->mmap.start,
- self->mmap.len, self->mmap.pgoff, self->mmap.filename);
+ event->mmap.pid, event->mmap.tid, event->mmap.start,
+ event->mmap.len, event->mmap.pgoff, event->mmap.filename);
if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
cpumode == PERF_RECORD_MISC_KERNEL) {
- ret = event__process_kernel_mmap(self, session);
+ ret = perf_event__process_kernel_mmap(event, session);
if (ret < 0)
goto out_problem;
return 0;
@@ -599,12 +606,12 @@ int event__process_mmap(event_t *self, struct sample_data *sample __used,
machine = perf_session__find_host_machine(session);
if (machine == NULL)
goto out_problem;
- thread = perf_session__findnew(session, self->mmap.pid);
+ thread = perf_session__findnew(session, event->mmap.pid);
if (thread == NULL)
goto out_problem;
- map = map__new(&machine->user_dsos, self->mmap.start,
- self->mmap.len, self->mmap.pgoff,
- self->mmap.pid, self->mmap.filename,
+ map = map__new(&machine->user_dsos, event->mmap.start,
+ event->mmap.len, event->mmap.pgoff,
+ event->mmap.pid, event->mmap.filename,
MAP__FUNCTION);
if (map == NULL)
goto out_problem;
@@ -617,16 +624,17 @@ out_problem:
return 0;
}
-int event__process_task(event_t *self, struct sample_data *sample __used,
- struct perf_session *session)
+int perf_event__process_task(union perf_event *event,
+ struct perf_sample *sample __used,
+ struct perf_session *session)
{
- struct thread *thread = perf_session__findnew(session, self->fork.tid);
- struct thread *parent = perf_session__findnew(session, self->fork.ptid);
+ struct thread *thread = perf_session__findnew(session, event->fork.tid);
+ struct thread *parent = perf_session__findnew(session, event->fork.ptid);
- dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid,
- self->fork.ppid, self->fork.ptid);
+ dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid,
+ event->fork.ppid, event->fork.ptid);
- if (self->header.type == PERF_RECORD_EXIT) {
+ if (event->header.type == PERF_RECORD_EXIT) {
perf_session__remove_thread(session, thread);
return 0;
}
@@ -640,20 +648,22 @@ int event__process_task(event_t *self, struct sample_data *sample __used,
return 0;
}
-int event__process(event_t *event, struct sample_data *sample,
- struct perf_session *session)
+int perf_event__process(union perf_event *event, struct perf_sample *sample,
+ struct perf_session *session)
{
switch (event->header.type) {
case PERF_RECORD_COMM:
- event__process_comm(event, sample, session);
+ perf_event__process_comm(event, sample, session);
break;
case PERF_RECORD_MMAP:
- event__process_mmap(event, sample, session);
+ perf_event__process_mmap(event, sample, session);
break;
case PERF_RECORD_FORK:
case PERF_RECORD_EXIT:
- event__process_task(event, sample, session);
+ perf_event__process_task(event, sample, session);
break;
+ case PERF_RECORD_LOST:
+ perf_event__process_lost(event, sample, session);
default:
break;
}
@@ -762,12 +772,14 @@ static void dso__calc_col_width(struct dso *self, struct hists *hists)
self->slen_calculated = 1;
}
-int event__preprocess_sample(const event_t *self, struct perf_session *session,
- struct addr_location *al, struct sample_data *data,
- symbol_filter_t filter)
+int perf_event__preprocess_sample(const union perf_event *event,
+ struct perf_session *session,
+ struct addr_location *al,
+ struct perf_sample *sample,
+ symbol_filter_t filter)
{
- u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
- struct thread *thread = perf_session__findnew(session, self->ip.pid);
+ u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+ struct thread *thread = perf_session__findnew(session, event->ip.pid);
if (thread == NULL)
return -1;
@@ -789,12 +801,12 @@ int event__preprocess_sample(const event_t *self, struct perf_session *session,
machine__create_kernel_maps(&session->host_machine);
thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
- self->ip.pid, self->ip.ip, al);
+ event->ip.pid, event->ip.ip, al);
dump_printf(" ...... dso: %s\n",
al->map ? al->map->dso->long_name :
al->level == 'H' ? "[hypervisor]" : "<not found>");
al->sym = NULL;
- al->cpu = data->cpu;
+ al->cpu = sample->cpu;
if (al->map) {
if (symbol_conf.dso_list &&
@@ -834,128 +846,3 @@ out_filtered:
al->filtered = true;
return 0;
}
-
-static int event__parse_id_sample(const event_t *event,
- struct perf_session *session,
- struct sample_data *sample)
-{
- const u64 *array;
- u64 type;
-
- sample->cpu = sample->pid = sample->tid = -1;
- sample->stream_id = sample->id = sample->time = -1ULL;
-
- if (!session->sample_id_all)
- return 0;
-
- array = event->sample.array;
- array += ((event->header.size -
- sizeof(event->header)) / sizeof(u64)) - 1;
- type = session->sample_type;
-
- if (type & PERF_SAMPLE_CPU) {
- u32 *p = (u32 *)array;
- sample->cpu = *p;
- array--;
- }
-
- if (type & PERF_SAMPLE_STREAM_ID) {
- sample->stream_id = *array;
- array--;
- }
-
- if (type & PERF_SAMPLE_ID) {
- sample->id = *array;
- array--;
- }
-
- if (type & PERF_SAMPLE_TIME) {
- sample->time = *array;
- array--;
- }
-
- if (type & PERF_SAMPLE_TID) {
- u32 *p = (u32 *)array;
- sample->pid = p[0];
- sample->tid = p[1];
- }
-
- return 0;
-}
-
-int event__parse_sample(const event_t *event, struct perf_session *session,
- struct sample_data *data)
-{
- const u64 *array;
- u64 type;
-
- if (event->header.type != PERF_RECORD_SAMPLE)
- return event__parse_id_sample(event, session, data);
-
- array = event->sample.array;
- type = session->sample_type;
-
- if (type & PERF_SAMPLE_IP) {
- data->ip = event->ip.ip;
- array++;
- }
-
- if (type & PERF_SAMPLE_TID) {
- u32 *p = (u32 *)array;
- data->pid = p[0];
- data->tid = p[1];
- array++;
- }
-
- if (type & PERF_SAMPLE_TIME) {
- data->time = *array;
- array++;
- }
-
- if (type & PERF_SAMPLE_ADDR) {
- data->addr = *array;
- array++;
- }
-
- data->id = -1ULL;
- if (type & PERF_SAMPLE_ID) {
- data->id = *array;
- array++;
- }
-
- if (type & PERF_SAMPLE_STREAM_ID) {
- data->stream_id = *array;
- array++;
- }
-
- if (type & PERF_SAMPLE_CPU) {
- u32 *p = (u32 *)array;
- data->cpu = *p;
- array++;
- } else
- data->cpu = -1;
-
- if (type & PERF_SAMPLE_PERIOD) {
- data->period = *array;
- array++;
- }
-
- if (type & PERF_SAMPLE_READ) {
- pr_debug("PERF_SAMPLE_READ is unsuported for now\n");
- return -1;
- }
-
- if (type & PERF_SAMPLE_CALLCHAIN) {
- data->callchain = (struct ip_callchain *)array;
- array += 1 + data->callchain->nr;
- }
-
- if (type & PERF_SAMPLE_RAW) {
- u32 *p = (u32 *)array;
- data->raw_size = *p;
- p++;
- data->raw_data = p;
- }
-
- return 0;
-}
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index cc7b52f9b492..9c35170fb379 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -61,7 +61,7 @@ struct sample_event {
u64 array[];
};
-struct sample_data {
+struct perf_sample {
u64 ip;
u32 pid, tid;
u64 time;
@@ -117,7 +117,7 @@ struct tracing_data_event {
u32 size;
};
-typedef union event_union {
+union perf_event {
struct perf_event_header header;
struct ip_event ip;
struct mmap_event mmap;
@@ -130,50 +130,54 @@ typedef union event_union {
struct event_type_event event_type;
struct tracing_data_event tracing_data;
struct build_id_event build_id;
-} event_t;
+};
-void event__print_totals(void);
+void perf_event__print_totals(void);
struct perf_session;
struct thread_map;
-typedef int (*event__handler_synth_t)(event_t *event,
+typedef int (*perf_event__handler_synth_t)(union perf_event *event,
+ struct perf_session *session);
+typedef int (*perf_event__handler_t)(union perf_event *event,
+ struct perf_sample *sample,
struct perf_session *session);
-typedef int (*event__handler_t)(event_t *event, struct sample_data *sample,
- struct perf_session *session);
-
-int event__synthesize_thread_map(struct thread_map *threads,
- event__handler_t process,
- struct perf_session *session);
-int event__synthesize_threads(event__handler_t process,
- struct perf_session *session);
-int event__synthesize_kernel_mmap(event__handler_t process,
- struct perf_session *session,
- struct machine *machine,
- const char *symbol_name);
-
-int event__synthesize_modules(event__handler_t process,
- struct perf_session *session,
- struct machine *machine);
-
-int event__process_comm(event_t *self, struct sample_data *sample,
- struct perf_session *session);
-int event__process_lost(event_t *self, struct sample_data *sample,
- struct perf_session *session);
-int event__process_mmap(event_t *self, struct sample_data *sample,
- struct perf_session *session);
-int event__process_task(event_t *self, struct sample_data *sample,
+
+int perf_event__synthesize_thread_map(struct thread_map *threads,
+ perf_event__handler_t process,
+ struct perf_session *session);
+int perf_event__synthesize_threads(perf_event__handler_t process,
+ struct perf_session *session);
+int perf_event__synthesize_kernel_mmap(perf_event__handler_t process,
+ struct perf_session *session,
+ struct machine *machine,
+ const char *symbol_name);
+
+int perf_event__synthesize_modules(perf_event__handler_t process,
+ struct perf_session *session,
+ struct machine *machine);
+
+int perf_event__process_comm(union perf_event *event, struct perf_sample *sample,
+ struct perf_session *session);
+int perf_event__process_lost(union perf_event *event, struct perf_sample *sample,
+ struct perf_session *session);
+int perf_event__process_mmap(union perf_event *event, struct perf_sample *sample,
+ struct perf_session *session);
+int perf_event__process_task(union perf_event *event, struct perf_sample *sample,
+ struct perf_session *session);
+int perf_event__process(union perf_event *event, struct perf_sample *sample,
struct perf_session *session);
-int event__process(event_t *event, struct sample_data *sample,
- struct perf_session *session);
struct addr_location;
-int event__preprocess_sample(const event_t *self, struct perf_session *session,
- struct addr_location *al, struct sample_data *data,
- symbol_filter_t filter);
-int event__parse_sample(const event_t *event, struct perf_session *session,
- struct sample_data *sample);
+int perf_event__preprocess_sample(const union perf_event *self,
+ struct perf_session *session,
+ struct addr_location *al,
+ struct perf_sample *sample,
+ symbol_filter_t filter);
+
+const char *perf_event__name(unsigned int id);
-const char *event__get_event_name(unsigned int id);
+int perf_event__parse_sample(const union perf_event *event, u64 type,
+ bool sample_id_all, struct perf_sample *sample);
#endif /* __PERF_RECORD_H */
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
new file mode 100644
index 000000000000..95b21fece2ce
--- /dev/null
+++ b/tools/perf/util/evlist.c
@@ -0,0 +1,350 @@
+/*
+ * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Parts came from builtin-{top,stat,record}.c, see those files for further
+ * copyright notes.
+ *
+ * Released under the GPL v2. (and only v2, not any later version)
+ */
+#include <poll.h>
+#include "cpumap.h"
+#include "thread_map.h"
+#include "evlist.h"
+#include "evsel.h"
+#include "util.h"
+
+#include <sys/mman.h>
+
+#include <linux/bitops.h>
+#include <linux/hash.h>
+
+#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
+#define SID(e, x, y) xyarray__entry(e->id, x, y)
+
+void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
+ struct thread_map *threads)
+{
+ int i;
+
+ for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
+ INIT_HLIST_HEAD(&evlist->heads[i]);
+ INIT_LIST_HEAD(&evlist->entries);
+ perf_evlist__set_maps(evlist, cpus, threads);
+}
+
+struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
+ struct thread_map *threads)
+{
+ struct perf_evlist *evlist = zalloc(sizeof(*evlist));
+
+ if (evlist != NULL)
+ perf_evlist__init(evlist, cpus, threads);
+
+ return evlist;
+}
+
+static void perf_evlist__purge(struct perf_evlist *evlist)
+{
+ struct perf_evsel *pos, *n;
+
+ list_for_each_entry_safe(pos, n, &evlist->entries, node) {
+ list_del_init(&pos->node);
+ perf_evsel__delete(pos);
+ }
+
+ evlist->nr_entries = 0;
+}
+
+void perf_evlist__exit(struct perf_evlist *evlist)
+{
+ free(evlist->mmap);
+ free(evlist->pollfd);
+ evlist->mmap = NULL;
+ evlist->pollfd = NULL;
+}
+
+void perf_evlist__delete(struct perf_evlist *evlist)
+{
+ perf_evlist__purge(evlist);
+ perf_evlist__exit(evlist);
+ free(evlist);
+}
+
+void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
+{
+ list_add_tail(&entry->node, &evlist->entries);
+ ++evlist->nr_entries;
+}
+
+int perf_evlist__add_default(struct perf_evlist *evlist)
+{
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ };
+ struct perf_evsel *evsel = perf_evsel__new(&attr, 0);
+
+ if (evsel == NULL)
+ return -ENOMEM;
+
+ perf_evlist__add(evlist, evsel);
+ return 0;
+}
+
+int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
+{
+ int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
+ evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
+ return evlist->pollfd != NULL ? 0 : -ENOMEM;
+}
+
+void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
+{
+ fcntl(fd, F_SETFL, O_NONBLOCK);
+ evlist->pollfd[evlist->nr_fds].fd = fd;
+ evlist->pollfd[evlist->nr_fds].events = POLLIN;
+ evlist->nr_fds++;
+}
+
+static int perf_evlist__id_hash(struct perf_evlist *evlist, struct perf_evsel *evsel,
+ int cpu, int thread, int fd)
+{
+ struct perf_sample_id *sid;
+ u64 read_data[4] = { 0, };
+ int hash, id_idx = 1; /* The first entry is the counter value */
+
+ if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
+ read(fd, &read_data, sizeof(read_data)) == -1)
+ return -1;
+
+ if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ ++id_idx;
+ if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ ++id_idx;
+
+ sid = SID(evsel, cpu, thread);
+ sid->id = read_data[id_idx];
+ sid->evsel = evsel;
+ hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
+ hlist_add_head(&sid->node, &evlist->heads[hash]);
+ return 0;
+}
+
+struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
+{
+ struct hlist_head *head;
+ struct hlist_node *pos;
+ struct perf_sample_id *sid;
+ int hash;
+
+ if (evlist->nr_entries == 1)
+ return list_entry(evlist->entries.next, struct perf_evsel, node);
+
+ hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
+ head = &evlist->heads[hash];
+
+ hlist_for_each_entry(sid, pos, head, node)
+ if (sid->id == id)
+ return sid->evsel;
+ return NULL;
+}
+
+union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *evlist, int cpu)
+{
+ /* XXX Move this to perf.c, making it generally available */
+ unsigned int page_size = sysconf(_SC_PAGE_SIZE);
+ struct perf_mmap *md = &evlist->mmap[cpu];
+ unsigned int head = perf_mmap__read_head(md);
+ unsigned int old = md->prev;
+ unsigned char *data = md->base + page_size;
+ union perf_event *event = NULL;
+
+ if (evlist->overwrite) {
+ /*
+ * If we're further behind than half the buffer, there's a chance
+ * the writer will bite our tail and mess up the samples under us.
+ *
+ * If we somehow ended up ahead of the head, we got messed up.
+ *
+ * In either case, truncate and restart at head.
+ */
+ int diff = head - old;
+ if (diff > md->mask / 2 || diff < 0) {
+ fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
+
+ /*
+ * head points to a known good entry, start there.
+ */
+ old = head;
+ }
+ }
+
+ if (old != head) {
+ size_t size;
+
+ event = (union perf_event *)&data[old & md->mask];
+ size = event->header.size;
+
+ /*
+ * Event straddles the mmap boundary -- header should always
+ * be inside due to u64 alignment of output.
+ */
+ if ((old & md->mask) + size != ((old + size) & md->mask)) {
+ unsigned int offset = old;
+ unsigned int len = min(sizeof(*event), size), cpy;
+ void *dst = &evlist->event_copy;
+
+ do {
+ cpy = min(md->mask + 1 - (offset & md->mask), len);
+ memcpy(dst, &data[offset & md->mask], cpy);
+ offset += cpy;
+ dst += cpy;
+ len -= cpy;
+ } while (len);
+
+ event = &evlist->event_copy;
+ }
+
+ old += size;
+ }
+
+ md->prev = old;
+
+ if (!evlist->overwrite)
+ perf_mmap__write_tail(md, old);
+
+ return event;
+}
+
+void perf_evlist__munmap(struct perf_evlist *evlist)
+{
+ int cpu;
+
+ for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
+ if (evlist->mmap[cpu].base != NULL) {
+ munmap(evlist->mmap[cpu].base, evlist->mmap_len);
+ evlist->mmap[cpu].base = NULL;
+ }
+ }
+}
+
+int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
+{
+ evlist->mmap = zalloc(evlist->cpus->nr * sizeof(struct perf_mmap));
+ return evlist->mmap != NULL ? 0 : -ENOMEM;
+}
+
+static int __perf_evlist__mmap(struct perf_evlist *evlist, int cpu, int prot,
+ int mask, int fd)
+{
+ evlist->mmap[cpu].prev = 0;
+ evlist->mmap[cpu].mask = mask;
+ evlist->mmap[cpu].base = mmap(NULL, evlist->mmap_len, prot,
+ MAP_SHARED, fd, 0);
+ if (evlist->mmap[cpu].base == MAP_FAILED)
+ return -1;
+
+ perf_evlist__add_pollfd(evlist, fd);
+ return 0;
+}
+
+/** perf_evlist__mmap - Create per cpu maps to receive events
+ *
+ * @evlist - list of events
+ * @pages - map length in pages
+ * @overwrite - overwrite older events?
+ *
+ * If overwrite is false the user needs to signal event consuption using:
+ *
+ * struct perf_mmap *m = &evlist->mmap[cpu];
+ * unsigned int head = perf_mmap__read_head(m);
+ *
+ * perf_mmap__write_tail(m, head)
+ *
+ * Using perf_evlist__read_on_cpu does this automatically.
+ */
+int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite)
+{
+ unsigned int page_size = sysconf(_SC_PAGE_SIZE);
+ int mask = pages * page_size - 1, cpu;
+ struct perf_evsel *first_evsel, *evsel;
+ const struct cpu_map *cpus = evlist->cpus;
+ const struct thread_map *threads = evlist->threads;
+ int thread, prot = PROT_READ | (overwrite ? 0 : PROT_WRITE);
+
+ if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
+ return -ENOMEM;
+
+ if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
+ return -ENOMEM;
+
+ evlist->overwrite = overwrite;
+ evlist->mmap_len = (pages + 1) * page_size;
+ first_evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
+
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
+ evsel->id == NULL &&
+ perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
+ return -ENOMEM;
+
+ for (cpu = 0; cpu < cpus->nr; cpu++) {
+ for (thread = 0; thread < threads->nr; thread++) {
+ int fd = FD(evsel, cpu, thread);
+
+ if (evsel->idx || thread) {
+ if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT,
+ FD(first_evsel, cpu, 0)) != 0)
+ goto out_unmap;
+ } else if (__perf_evlist__mmap(evlist, cpu, prot, mask, fd) < 0)
+ goto out_unmap;
+
+ if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
+ perf_evlist__id_hash(evlist, evsel, cpu, thread, fd) < 0)
+ goto out_unmap;
+ }
+ }
+ }
+
+ return 0;
+
+out_unmap:
+ for (cpu = 0; cpu < cpus->nr; cpu++) {
+ if (evlist->mmap[cpu].base != NULL) {
+ munmap(evlist->mmap[cpu].base, evlist->mmap_len);
+ evlist->mmap[cpu].base = NULL;
+ }
+ }
+ return -1;
+}
+
+int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
+ pid_t target_tid, const char *cpu_list)
+{
+ evlist->threads = thread_map__new(target_pid, target_tid);
+
+ if (evlist->threads == NULL)
+ return -1;
+
+ if (target_tid != -1)
+ evlist->cpus = cpu_map__dummy_new();
+ else
+ evlist->cpus = cpu_map__new(cpu_list);
+
+ if (evlist->cpus == NULL)
+ goto out_delete_threads;
+
+ return 0;
+
+out_delete_threads:
+ thread_map__delete(evlist->threads);
+ return -1;
+}
+
+void perf_evlist__delete_maps(struct perf_evlist *evlist)
+{
+ cpu_map__delete(evlist->cpus);
+ thread_map__delete(evlist->threads);
+ evlist->cpus = NULL;
+ evlist->threads = NULL;
+}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
new file mode 100644
index 000000000000..c9884056097c
--- /dev/null
+++ b/tools/perf/util/evlist.h
@@ -0,0 +1,64 @@
+#ifndef __PERF_EVLIST_H
+#define __PERF_EVLIST_H 1
+
+#include <linux/list.h>
+#include "../perf.h"
+#include "event.h"
+
+struct pollfd;
+struct thread_map;
+struct cpu_map;
+
+#define PERF_EVLIST__HLIST_BITS 8
+#define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
+
+struct perf_evlist {
+ struct list_head entries;
+ struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
+ int nr_entries;
+ int nr_fds;
+ int mmap_len;
+ bool overwrite;
+ union perf_event event_copy;
+ struct perf_mmap *mmap;
+ struct pollfd *pollfd;
+ struct thread_map *threads;
+ struct cpu_map *cpus;
+};
+
+struct perf_evsel;
+
+struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
+ struct thread_map *threads);
+void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
+ struct thread_map *threads);
+void perf_evlist__exit(struct perf_evlist *evlist);
+void perf_evlist__delete(struct perf_evlist *evlist);
+
+void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry);
+int perf_evlist__add_default(struct perf_evlist *evlist);
+
+int perf_evlist__alloc_pollfd(struct perf_evlist *evlist);
+void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd);
+
+struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
+
+union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *self, int cpu);
+
+int perf_evlist__alloc_mmap(struct perf_evlist *evlist);
+int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite);
+void perf_evlist__munmap(struct perf_evlist *evlist);
+
+static inline void perf_evlist__set_maps(struct perf_evlist *evlist,
+ struct cpu_map *cpus,
+ struct thread_map *threads)
+{
+ evlist->cpus = cpus;
+ evlist->threads = threads;
+}
+
+int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
+ pid_t target_tid, const char *cpu_list);
+void perf_evlist__delete_maps(struct perf_evlist *evlist);
+
+#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index d8575d31ee6c..8083d5126fca 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -1,20 +1,34 @@
+/*
+ * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Parts came from builtin-{top,stat,record}.c, see those files for further
+ * copyright notes.
+ *
+ * Released under the GPL v2. (and only v2, not any later version)
+ */
+
#include "evsel.h"
-#include "../perf.h"
+#include "evlist.h"
#include "util.h"
#include "cpumap.h"
-#include "thread.h"
+#include "thread_map.h"
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
+void perf_evsel__init(struct perf_evsel *evsel,
+ struct perf_event_attr *attr, int idx)
+{
+ evsel->idx = idx;
+ evsel->attr = *attr;
+ INIT_LIST_HEAD(&evsel->node);
+}
+
struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
{
struct perf_evsel *evsel = zalloc(sizeof(*evsel));
- if (evsel != NULL) {
- evsel->idx = idx;
- evsel->attr = *attr;
- INIT_LIST_HEAD(&evsel->node);
- }
+ if (evsel != NULL)
+ perf_evsel__init(evsel, attr, idx);
return evsel;
}
@@ -25,6 +39,12 @@ int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
return evsel->fd != NULL ? 0 : -ENOMEM;
}
+int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
+{
+ evsel->id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
+ return evsel->id != NULL ? 0 : -ENOMEM;
+}
+
int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
{
evsel->counts = zalloc((sizeof(*evsel->counts) +
@@ -38,6 +58,12 @@ void perf_evsel__free_fd(struct perf_evsel *evsel)
evsel->fd = NULL;
}
+void perf_evsel__free_id(struct perf_evsel *evsel)
+{
+ xyarray__delete(evsel->id);
+ evsel->id = NULL;
+}
+
void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
{
int cpu, thread;
@@ -49,10 +75,18 @@ void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
}
}
-void perf_evsel__delete(struct perf_evsel *evsel)
+void perf_evsel__exit(struct perf_evsel *evsel)
{
assert(list_empty(&evsel->node));
xyarray__delete(evsel->fd);
+ xyarray__delete(evsel->id);
+}
+
+void perf_evsel__delete(struct perf_evsel *evsel)
+{
+ perf_evsel__exit(evsel);
+ close_cgroup(evsel->cgrp);
+ free(evsel->name);
free(evsel);
}
@@ -128,21 +162,51 @@ int __perf_evsel__read(struct perf_evsel *evsel,
}
static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
- struct thread_map *threads)
+ struct thread_map *threads, bool group, bool inherit)
{
int cpu, thread;
+ unsigned long flags = 0;
+ int pid = -1;
if (evsel->fd == NULL &&
perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
return -1;
+ if (evsel->cgrp) {
+ flags = PERF_FLAG_PID_CGROUP;
+ pid = evsel->cgrp->fd;
+ }
+
for (cpu = 0; cpu < cpus->nr; cpu++) {
+ int group_fd = -1;
+ /*
+ * Don't allow mmap() of inherited per-task counters. This
+ * would create a performance issue due to all children writing
+ * to the same buffer.
+ *
+ * FIXME:
+ * Proper fix is not to pass 'inherit' to perf_evsel__open*,
+ * but a 'flags' parameter, with 'group' folded there as well,
+ * then introduce a PERF_O_{MMAP,GROUP,INHERIT} enum, and if
+ * O_MMAP is set, emit a warning if cpu < 0 and O_INHERIT is
+ * set. Lets go for the minimal fix first tho.
+ */
+ evsel->attr.inherit = (cpus->map[cpu] >= 0) && inherit;
+
for (thread = 0; thread < threads->nr; thread++) {
+
+ if (!evsel->cgrp)
+ pid = threads->map[thread];
+
FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
- threads->map[thread],
- cpus->map[cpu], -1, 0);
+ pid,
+ cpus->map[cpu],
+ group_fd, flags);
if (FD(evsel, cpu, thread) < 0)
goto out_close;
+
+ if (group && group_fd == -1)
+ group_fd = FD(evsel, cpu, thread);
}
}
@@ -175,10 +239,9 @@ static struct {
.threads = { -1, },
};
-int perf_evsel__open(struct perf_evsel *evsel,
- struct cpu_map *cpus, struct thread_map *threads)
+int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
+ struct thread_map *threads, bool group, bool inherit)
{
-
if (cpus == NULL) {
/* Work around old compiler warnings about strict aliasing */
cpus = &empty_cpu_map.map;
@@ -187,15 +250,135 @@ int perf_evsel__open(struct perf_evsel *evsel,
if (threads == NULL)
threads = &empty_thread_map.map;
- return __perf_evsel__open(evsel, cpus, threads);
+ return __perf_evsel__open(evsel, cpus, threads, group, inherit);
+}
+
+int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
+ struct cpu_map *cpus, bool group, bool inherit)
+{
+ return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group, inherit);
+}
+
+int perf_evsel__open_per_thread(struct perf_evsel *evsel,
+ struct thread_map *threads, bool group, bool inherit)
+{
+ return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group, inherit);
}
-int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus)
+static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
+ struct perf_sample *sample)
{
- return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
+ const u64 *array = event->sample.array;
+
+ array += ((event->header.size -
+ sizeof(event->header)) / sizeof(u64)) - 1;
+
+ if (type & PERF_SAMPLE_CPU) {
+ u32 *p = (u32 *)array;
+ sample->cpu = *p;
+ array--;
+ }
+
+ if (type & PERF_SAMPLE_STREAM_ID) {
+ sample->stream_id = *array;
+ array--;
+ }
+
+ if (type & PERF_SAMPLE_ID) {
+ sample->id = *array;
+ array--;
+ }
+
+ if (type & PERF_SAMPLE_TIME) {
+ sample->time = *array;
+ array--;
+ }
+
+ if (type & PERF_SAMPLE_TID) {
+ u32 *p = (u32 *)array;
+ sample->pid = p[0];
+ sample->tid = p[1];
+ }
+
+ return 0;
}
-int perf_evsel__open_per_thread(struct perf_evsel *evsel, struct thread_map *threads)
+int perf_event__parse_sample(const union perf_event *event, u64 type,
+ bool sample_id_all, struct perf_sample *data)
{
- return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
+ const u64 *array;
+
+ data->cpu = data->pid = data->tid = -1;
+ data->stream_id = data->id = data->time = -1ULL;
+
+ if (event->header.type != PERF_RECORD_SAMPLE) {
+ if (!sample_id_all)
+ return 0;
+ return perf_event__parse_id_sample(event, type, data);
+ }
+
+ array = event->sample.array;
+
+ if (type & PERF_SAMPLE_IP) {
+ data->ip = event->ip.ip;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TID) {
+ u32 *p = (u32 *)array;
+ data->pid = p[0];
+ data->tid = p[1];
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TIME) {
+ data->time = *array;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_ADDR) {
+ data->addr = *array;
+ array++;
+ }
+
+ data->id = -1ULL;
+ if (type & PERF_SAMPLE_ID) {
+ data->id = *array;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_STREAM_ID) {
+ data->stream_id = *array;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_CPU) {
+ u32 *p = (u32 *)array;
+ data->cpu = *p;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_PERIOD) {
+ data->period = *array;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_READ) {
+ fprintf(stderr, "PERF_SAMPLE_READ is unsuported for now\n");
+ return -1;
+ }
+
+ if (type & PERF_SAMPLE_CALLCHAIN) {
+ data->callchain = (struct ip_callchain *)array;
+ array += 1 + data->callchain->nr;
+ }
+
+ if (type & PERF_SAMPLE_RAW) {
+ u32 *p = (u32 *)array;
+ data->raw_size = *p;
+ p++;
+ data->raw_data = p;
+ }
+
+ return 0;
}
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index b2d755fe88a5..f6fc8f651a25 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -6,6 +6,7 @@
#include "../../../include/linux/perf_event.h"
#include "types.h"
#include "xyarray.h"
+#include "cgroup.h"
struct perf_counts_values {
union {
@@ -24,31 +25,60 @@ struct perf_counts {
struct perf_counts_values cpu[];
};
+struct perf_evsel;
+
+/*
+ * Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
+ * more than one entry in the evlist.
+ */
+struct perf_sample_id {
+ struct hlist_node node;
+ u64 id;
+ struct perf_evsel *evsel;
+};
+
+/** struct perf_evsel - event selector
+ *
+ * @name - Can be set to retain the original event name passed by the user,
+ * so that when showing results in tools such as 'perf stat', we
+ * show the name used, not some alias.
+ */
struct perf_evsel {
struct list_head node;
struct perf_event_attr attr;
char *filter;
struct xyarray *fd;
+ struct xyarray *id;
struct perf_counts *counts;
int idx;
+ char *name;
void *priv;
+ struct cgroup_sel *cgrp;
};
struct cpu_map;
struct thread_map;
+struct perf_evlist;
struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx);
+void perf_evsel__init(struct perf_evsel *evsel,
+ struct perf_event_attr *attr, int idx);
+void perf_evsel__exit(struct perf_evsel *evsel);
void perf_evsel__delete(struct perf_evsel *evsel);
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
+int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
void perf_evsel__free_fd(struct perf_evsel *evsel);
+void perf_evsel__free_id(struct perf_evsel *evsel);
void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
-int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus);
-int perf_evsel__open_per_thread(struct perf_evsel *evsel, struct thread_map *threads);
-int perf_evsel__open(struct perf_evsel *evsel,
- struct cpu_map *cpus, struct thread_map *threads);
+int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
+ struct cpu_map *cpus, bool group, bool inherit);
+int perf_evsel__open_per_thread(struct perf_evsel *evsel,
+ struct thread_map *threads, bool group, bool inherit);
+int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
+ struct thread_map *threads, bool group, bool inherit);
#define perf_evsel__match(evsel, t, c) \
(evsel->attr.type == PERF_TYPE_##t && \
diff --git a/tools/perf/util/exec_cmd.c b/tools/perf/util/exec_cmd.c
index 67eeff571568..7adf4ad15d8f 100644
--- a/tools/perf/util/exec_cmd.c
+++ b/tools/perf/util/exec_cmd.c
@@ -11,31 +11,12 @@ static const char *argv0_path;
const char *system_path(const char *path)
{
-#ifdef RUNTIME_PREFIX
- static const char *prefix;
-#else
static const char *prefix = PREFIX;
-#endif
struct strbuf d = STRBUF_INIT;
if (is_absolute_path(path))
return path;
-#ifdef RUNTIME_PREFIX
- assert(argv0_path);
- assert(is_absolute_path(argv0_path));
-
- if (!prefix &&
- !(prefix = strip_path_suffix(argv0_path, PERF_EXEC_PATH)) &&
- !(prefix = strip_path_suffix(argv0_path, BINDIR)) &&
- !(prefix = strip_path_suffix(argv0_path, "perf"))) {
- prefix = PREFIX;
- fprintf(stderr, "RUNTIME_PREFIX requested, "
- "but prefix computation failed. "
- "Using static fallback '%s'.\n", prefix);
- }
-#endif
-
strbuf_addf(&d, "%s/%s", prefix, path);
path = strbuf_detach(&d, NULL);
return path;
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index f6a929e74981..72c124dc5781 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -8,6 +8,7 @@
#include <linux/list.h>
#include <linux/kernel.h>
+#include "evlist.h"
#include "util.h"
#include "header.h"
#include "../perf.h"
@@ -428,7 +429,8 @@ static bool perf_session__read_build_ids(struct perf_session *self, bool with_hi
return ret;
}
-static int perf_header__adds_write(struct perf_header *self, int fd)
+static int perf_header__adds_write(struct perf_header *self,
+ struct perf_evlist *evlist, int fd)
{
int nr_sections;
struct perf_session *session;
@@ -463,7 +465,7 @@ static int perf_header__adds_write(struct perf_header *self, int fd)
/* Write trace info */
trace_sec->offset = lseek(fd, 0, SEEK_CUR);
- read_tracing_data(fd, &evsel_list);
+ read_tracing_data(fd, &evlist->entries);
trace_sec->size = lseek(fd, 0, SEEK_CUR) - trace_sec->offset;
}
@@ -513,7 +515,8 @@ int perf_header__write_pipe(int fd)
return 0;
}
-int perf_header__write(struct perf_header *self, int fd, bool at_exit)
+int perf_header__write(struct perf_header *self, struct perf_evlist *evlist,
+ int fd, bool at_exit)
{
struct perf_file_header f_header;
struct perf_file_attr f_attr;
@@ -566,7 +569,7 @@ int perf_header__write(struct perf_header *self, int fd, bool at_exit)
self->data_offset = lseek(fd, 0, SEEK_CUR);
if (at_exit) {
- err = perf_header__adds_write(self, fd);
+ err = perf_header__adds_write(self, evlist, fd);
if (err < 0)
return err;
}
@@ -997,11 +1000,11 @@ perf_header__find_attr(u64 id, struct perf_header *header)
return NULL;
}
-int event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
- event__handler_t process,
- struct perf_session *session)
+int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
+ perf_event__handler_t process,
+ struct perf_session *session)
{
- event_t *ev;
+ union perf_event *ev;
size_t size;
int err;
@@ -1028,8 +1031,9 @@ int event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
return err;
}
-int event__synthesize_attrs(struct perf_header *self, event__handler_t process,
- struct perf_session *session)
+int perf_event__synthesize_attrs(struct perf_header *self,
+ perf_event__handler_t process,
+ struct perf_session *session)
{
struct perf_header_attr *attr;
int i, err = 0;
@@ -1037,8 +1041,8 @@ int event__synthesize_attrs(struct perf_header *self, event__handler_t process,
for (i = 0; i < self->attrs; i++) {
attr = self->attr[i];
- err = event__synthesize_attr(&attr->attr, attr->ids, attr->id,
- process, session);
+ err = perf_event__synthesize_attr(&attr->attr, attr->ids,
+ attr->id, process, session);
if (err) {
pr_debug("failed to create perf header attribute\n");
return err;
@@ -1048,21 +1052,22 @@ int event__synthesize_attrs(struct perf_header *self, event__handler_t process,
return err;
}
-int event__process_attr(event_t *self, struct perf_session *session)
+int perf_event__process_attr(union perf_event *event,
+ struct perf_session *session)
{
struct perf_header_attr *attr;
unsigned int i, ids, n_ids;
- attr = perf_header_attr__new(&self->attr.attr);
+ attr = perf_header_attr__new(&event->attr.attr);
if (attr == NULL)
return -ENOMEM;
- ids = self->header.size;
- ids -= (void *)&self->attr.id - (void *)self;
+ ids = event->header.size;
+ ids -= (void *)&event->attr.id - (void *)event;
n_ids = ids / sizeof(u64);
for (i = 0; i < n_ids; i++) {
- if (perf_header_attr__add_id(attr, self->attr.id[i]) < 0) {
+ if (perf_header_attr__add_id(attr, event->attr.id[i]) < 0) {
perf_header_attr__delete(attr);
return -ENOMEM;
}
@@ -1078,11 +1083,11 @@ int event__process_attr(event_t *self, struct perf_session *session)
return 0;
}
-int event__synthesize_event_type(u64 event_id, char *name,
- event__handler_t process,
- struct perf_session *session)
+int perf_event__synthesize_event_type(u64 event_id, char *name,
+ perf_event__handler_t process,
+ struct perf_session *session)
{
- event_t ev;
+ union perf_event ev;
size_t size = 0;
int err = 0;
@@ -1103,8 +1108,8 @@ int event__synthesize_event_type(u64 event_id, char *name,
return err;
}
-int event__synthesize_event_types(event__handler_t process,
- struct perf_session *session)
+int perf_event__synthesize_event_types(perf_event__handler_t process,
+ struct perf_session *session)
{
struct perf_trace_event_type *type;
int i, err = 0;
@@ -1112,8 +1117,9 @@ int event__synthesize_event_types(event__handler_t process,
for (i = 0; i < event_count; i++) {
type = &events[i];
- err = event__synthesize_event_type(type->event_id, type->name,
- process, session);
+ err = perf_event__synthesize_event_type(type->event_id,
+ type->name, process,
+ session);
if (err) {
pr_debug("failed to create perf header event type\n");
return err;
@@ -1123,28 +1129,28 @@ int event__synthesize_event_types(event__handler_t process,
return err;
}
-int event__process_event_type(event_t *self,
- struct perf_session *session __unused)
+int perf_event__process_event_type(union perf_event *event,
+ struct perf_session *session __unused)
{
- if (perf_header__push_event(self->event_type.event_type.event_id,
- self->event_type.event_type.name) < 0)
+ if (perf_header__push_event(event->event_type.event_type.event_id,
+ event->event_type.event_type.name) < 0)
return -ENOMEM;
return 0;
}
-int event__synthesize_tracing_data(int fd, struct list_head *pattrs,
- event__handler_t process,
+int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist,
+ perf_event__handler_t process,
struct perf_session *session __unused)
{
- event_t ev;
+ union perf_event ev;
ssize_t size = 0, aligned_size = 0, padding;
- int err = 0;
+ int err __used = 0;
memset(&ev, 0, sizeof(ev));
ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
- size = read_tracing_data_size(fd, pattrs);
+ size = read_tracing_data_size(fd, &evlist->entries);
if (size <= 0)
return size;
aligned_size = ALIGN(size, sizeof(u64));
@@ -1154,16 +1160,16 @@ int event__synthesize_tracing_data(int fd, struct list_head *pattrs,
process(&ev, NULL, session);
- err = read_tracing_data(fd, pattrs);
+ err = read_tracing_data(fd, &evlist->entries);
write_padded(fd, NULL, 0, padding);
return aligned_size;
}
-int event__process_tracing_data(event_t *self,
- struct perf_session *session)
+int perf_event__process_tracing_data(union perf_event *event,
+ struct perf_session *session)
{
- ssize_t size_read, padding, size = self->tracing_data.size;
+ ssize_t size_read, padding, size = event->tracing_data.size;
off_t offset = lseek(session->fd, 0, SEEK_CUR);
char buf[BUFSIZ];
@@ -1189,12 +1195,12 @@ int event__process_tracing_data(event_t *self,
return size_read + padding;
}
-int event__synthesize_build_id(struct dso *pos, u16 misc,
- event__handler_t process,
- struct machine *machine,
- struct perf_session *session)
+int perf_event__synthesize_build_id(struct dso *pos, u16 misc,
+ perf_event__handler_t process,
+ struct machine *machine,
+ struct perf_session *session)
{
- event_t ev;
+ union perf_event ev;
size_t len;
int err = 0;
@@ -1217,11 +1223,11 @@ int event__synthesize_build_id(struct dso *pos, u16 misc,
return err;
}
-int event__process_build_id(event_t *self,
- struct perf_session *session)
+int perf_event__process_build_id(union perf_event *event,
+ struct perf_session *session)
{
- __event_process_build_id(&self->build_id,
- self->build_id.filename,
+ __event_process_build_id(&event->build_id,
+ event->build_id.filename,
session);
return 0;
}
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 33f16be7b72f..f042cebcec1e 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -65,8 +65,11 @@ struct perf_header {
int perf_header__init(struct perf_header *self);
void perf_header__exit(struct perf_header *self);
+struct perf_evlist;
+
int perf_header__read(struct perf_session *session, int fd);
-int perf_header__write(struct perf_header *self, int fd, bool at_exit);
+int perf_header__write(struct perf_header *self, struct perf_evlist *evlist,
+ int fd, bool at_exit);
int perf_header__write_pipe(int fd);
int perf_header__add_attr(struct perf_header *self,
@@ -97,32 +100,32 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
const char *name, bool is_kallsyms);
int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir);
-int event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
- event__handler_t process,
- struct perf_session *session);
-int event__synthesize_attrs(struct perf_header *self,
- event__handler_t process,
- struct perf_session *session);
-int event__process_attr(event_t *self, struct perf_session *session);
-
-int event__synthesize_event_type(u64 event_id, char *name,
- event__handler_t process,
+int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
+ perf_event__handler_t process,
+ struct perf_session *session);
+int perf_event__synthesize_attrs(struct perf_header *self,
+ perf_event__handler_t process,
struct perf_session *session);
-int event__synthesize_event_types(event__handler_t process,
- struct perf_session *session);
-int event__process_event_type(event_t *self,
- struct perf_session *session);
-
-int event__synthesize_tracing_data(int fd, struct list_head *pattrs,
- event__handler_t process,
+int perf_event__process_attr(union perf_event *event, struct perf_session *session);
+
+int perf_event__synthesize_event_type(u64 event_id, char *name,
+ perf_event__handler_t process,
+ struct perf_session *session);
+int perf_event__synthesize_event_types(perf_event__handler_t process,
+ struct perf_session *session);
+int perf_event__process_event_type(union perf_event *event,
struct perf_session *session);
-int event__process_tracing_data(event_t *self,
- struct perf_session *session);
-
-int event__synthesize_build_id(struct dso *pos, u16 misc,
- event__handler_t process,
- struct machine *machine,
- struct perf_session *session);
-int event__process_build_id(event_t *self, struct perf_session *session);
+int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist,
+ perf_event__handler_t process,
+ struct perf_session *session);
+int perf_event__process_tracing_data(union perf_event *event,
+ struct perf_session *session);
+
+int perf_event__synthesize_build_id(struct dso *pos, u16 misc,
+ perf_event__handler_t process,
+ struct machine *machine,
+ struct perf_session *session);
+int perf_event__process_build_id(union perf_event *event,
+ struct perf_session *session);
#endif /* __PERF_HEADER_H */
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 32f4f1f2f6e4..da2899e8c6f8 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -1,3 +1,4 @@
+#include "annotate.h"
#include "util.h"
#include "build-id.h"
#include "hist.h"
@@ -211,7 +212,9 @@ void hist_entry__free(struct hist_entry *he)
* collapse the histogram
*/
-static bool collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
+static bool hists__collapse_insert_entry(struct hists *self,
+ struct rb_root *root,
+ struct hist_entry *he)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
@@ -226,8 +229,11 @@ static bool collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
if (!cmp) {
iter->period += he->period;
- if (symbol_conf.use_callchain)
- callchain_merge(iter->callchain, he->callchain);
+ if (symbol_conf.use_callchain) {
+ callchain_cursor_reset(&self->callchain_cursor);
+ callchain_merge(&self->callchain_cursor, iter->callchain,
+ he->callchain);
+ }
hist_entry__free(he);
return false;
}
@@ -262,7 +268,7 @@ void hists__collapse_resort(struct hists *self)
next = rb_next(&n->rb_node);
rb_erase(&n->rb_node, &self->entries);
- if (collapse__insert_entry(&tmp, n))
+ if (hists__collapse_insert_entry(self, &tmp, n))
hists__inc_nr_entries(self, n);
}
@@ -425,7 +431,7 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
u64 cumul;
child = rb_entry(node, struct callchain_node, rb_node);
- cumul = cumul_hits(child);
+ cumul = callchain_cumul_hits(child);
remaining -= cumul;
/*
@@ -585,6 +591,7 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
{
struct sort_entry *se;
u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us;
+ u64 nr_events;
const char *sep = symbol_conf.field_sep;
int ret;
@@ -593,6 +600,7 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
if (pair_hists) {
period = self->pair ? self->pair->period : 0;
+ nr_events = self->pair ? self->pair->nr_events : 0;
total = pair_hists->stats.total_period;
period_sys = self->pair ? self->pair->period_sys : 0;
period_us = self->pair ? self->pair->period_us : 0;
@@ -600,6 +608,7 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
period_guest_us = self->pair ? self->pair->period_guest_us : 0;
} else {
period = self->period;
+ nr_events = self->nr_events;
total = session_total;
period_sys = self->period_sys;
period_us = self->period_us;
@@ -640,9 +649,9 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
if (symbol_conf.show_nr_samples) {
if (sep)
- ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, period);
+ ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events);
else
- ret += snprintf(s + ret, size - ret, "%11" PRIu64, period);
+ ret += snprintf(s + ret, size - ret, "%11" PRIu64, nr_events);
}
if (pair_hists) {
@@ -944,225 +953,14 @@ void hists__filter_by_thread(struct hists *self, const struct thread *thread)
}
}
-static int symbol__alloc_hist(struct symbol *self)
-{
- struct sym_priv *priv = symbol__priv(self);
- const int size = (sizeof(*priv->hist) +
- (self->end - self->start) * sizeof(u64));
-
- priv->hist = zalloc(size);
- return priv->hist == NULL ? -1 : 0;
-}
-
-int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip)
-{
- unsigned int sym_size, offset;
- struct symbol *sym = self->ms.sym;
- struct sym_priv *priv;
- struct sym_hist *h;
-
- if (!sym || !self->ms.map)
- return 0;
-
- priv = symbol__priv(sym);
- if (priv->hist == NULL && symbol__alloc_hist(sym) < 0)
- return -ENOMEM;
-
- sym_size = sym->end - sym->start;
- offset = ip - sym->start;
-
- pr_debug3("%s: ip=%#" PRIx64 "\n", __func__, self->ms.map->unmap_ip(self->ms.map, ip));
-
- if (offset >= sym_size)
- return 0;
-
- h = priv->hist;
- h->sum++;
- h->ip[offset]++;
-
- pr_debug3("%#" PRIx64 " %s: period++ [ip: %#" PRIx64 ", %#" PRIx64
- "] => %" PRIu64 "\n", self->ms.sym->start, self->ms.sym->name,
- ip, ip - self->ms.sym->start, h->ip[offset]);
- return 0;
-}
-
-static struct objdump_line *objdump_line__new(s64 offset, char *line, size_t privsize)
-{
- struct objdump_line *self = malloc(sizeof(*self) + privsize);
-
- if (self != NULL) {
- self->offset = offset;
- self->line = line;
- }
-
- return self;
-}
-
-void objdump_line__free(struct objdump_line *self)
-{
- free(self->line);
- free(self);
-}
-
-static void objdump__add_line(struct list_head *head, struct objdump_line *line)
-{
- list_add_tail(&line->node, head);
-}
-
-struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
- struct objdump_line *pos)
-{
- list_for_each_entry_continue(pos, head, node)
- if (pos->offset >= 0)
- return pos;
-
- return NULL;
-}
-
-static int hist_entry__parse_objdump_line(struct hist_entry *self, FILE *file,
- struct list_head *head, size_t privsize)
+int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
{
- struct symbol *sym = self->ms.sym;
- struct objdump_line *objdump_line;
- char *line = NULL, *tmp, *tmp2, *c;
- size_t line_len;
- s64 line_ip, offset = -1;
-
- if (getline(&line, &line_len, file) < 0)
- return -1;
-
- if (!line)
- return -1;
-
- while (line_len != 0 && isspace(line[line_len - 1]))
- line[--line_len] = '\0';
-
- c = strchr(line, '\n');
- if (c)
- *c = 0;
-
- line_ip = -1;
-
- /*
- * Strip leading spaces:
- */
- tmp = line;
- while (*tmp) {
- if (*tmp != ' ')
- break;
- tmp++;
- }
-
- if (*tmp) {
- /*
- * Parse hexa addresses followed by ':'
- */
- line_ip = strtoull(tmp, &tmp2, 16);
- if (*tmp2 != ':' || tmp == tmp2 || tmp2[1] == '\0')
- line_ip = -1;
- }
-
- if (line_ip != -1) {
- u64 start = map__rip_2objdump(self->ms.map, sym->start),
- end = map__rip_2objdump(self->ms.map, sym->end);
-
- offset = line_ip - start;
- if (offset < 0 || (u64)line_ip > end)
- offset = -1;
- }
-
- objdump_line = objdump_line__new(offset, line, privsize);
- if (objdump_line == NULL) {
- free(line);
- return -1;
- }
- objdump__add_line(head, objdump_line);
-
- return 0;
+ return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
}
-int hist_entry__annotate(struct hist_entry *self, struct list_head *head,
- size_t privsize)
+int hist_entry__annotate(struct hist_entry *he, size_t privsize)
{
- struct symbol *sym = self->ms.sym;
- struct map *map = self->ms.map;
- struct dso *dso = map->dso;
- char *filename = dso__build_id_filename(dso, NULL, 0);
- bool free_filename = true;
- char command[PATH_MAX * 2];
- FILE *file;
- int err = 0;
- u64 len;
- char symfs_filename[PATH_MAX];
-
- if (filename) {
- snprintf(symfs_filename, sizeof(symfs_filename), "%s%s",
- symbol_conf.symfs, filename);
- }
-
- if (filename == NULL) {
- if (dso->has_build_id) {
- pr_err("Can't annotate %s: not enough memory\n",
- sym->name);
- return -ENOMEM;
- }
- goto fallback;
- } else if (readlink(symfs_filename, command, sizeof(command)) < 0 ||
- strstr(command, "[kernel.kallsyms]") ||
- access(symfs_filename, R_OK)) {
- free(filename);
-fallback:
- /*
- * If we don't have build-ids or the build-id file isn't in the
- * cache, or is just a kallsyms file, well, lets hope that this
- * DSO is the same as when 'perf record' ran.
- */
- filename = dso->long_name;
- snprintf(symfs_filename, sizeof(symfs_filename), "%s%s",
- symbol_conf.symfs, filename);
- free_filename = false;
- }
-
- if (dso->origin == DSO__ORIG_KERNEL) {
- if (dso->annotate_warned)
- goto out_free_filename;
- err = -ENOENT;
- dso->annotate_warned = 1;
- pr_err("Can't annotate %s: No vmlinux file was found in the "
- "path\n", sym->name);
- goto out_free_filename;
- }
-
- pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__,
- filename, sym->name, map->unmap_ip(map, sym->start),
- map->unmap_ip(map, sym->end));
-
- len = sym->end - sym->start;
-
- pr_debug("annotating [%p] %30s : [%p] %30s\n",
- dso, dso->long_name, sym, sym->name);
-
- snprintf(command, sizeof(command),
- "objdump --start-address=0x%016" PRIx64 " --stop-address=0x%016" PRIx64 " -dS -C %s|grep -v %s|expand",
- map__rip_2objdump(map, sym->start),
- map__rip_2objdump(map, sym->end),
- symfs_filename, filename);
-
- pr_debug("Executing: %s\n", command);
-
- file = popen(command, "r");
- if (!file)
- goto out_free_filename;
-
- while (!feof(file))
- if (hist_entry__parse_objdump_line(self, file, head, privsize) < 0)
- break;
-
- pclose(file);
-out_free_filename:
- if (free_filename)
- free(filename);
- return err;
+ return symbol__annotate(he->ms.sym, he->ms.map, privsize);
}
void hists__inc_nr_events(struct hists *self, u32 type)
@@ -1177,7 +975,7 @@ size_t hists__fprintf_nr_events(struct hists *self, FILE *fp)
size_t ret = 0;
for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
- const char *name = event__get_event_name(i);
+ const char *name = perf_event__name(i);
if (!strcmp(name, "UNKNOWN"))
continue;
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index ee789856a8c9..37c79089de09 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -9,33 +9,6 @@ extern struct callchain_param callchain_param;
struct hist_entry;
struct addr_location;
struct symbol;
-struct rb_root;
-
-struct objdump_line {
- struct list_head node;
- s64 offset;
- char *line;
-};
-
-void objdump_line__free(struct objdump_line *self);
-struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
- struct objdump_line *pos);
-
-struct sym_hist {
- u64 sum;
- u64 ip[0];
-};
-
-struct sym_ext {
- struct rb_node node;
- double percent;
- char *path;
-};
-
-struct sym_priv {
- struct sym_hist *hist;
- struct sym_ext *ext;
-};
/*
* The kernel collects the number of events it couldn't send in a stretch and
@@ -77,6 +50,8 @@ struct hists {
u64 event_stream;
u32 type;
u16 col_len[HISTC_NR_COLS];
+ /* Best would be to reuse the session callchain cursor */
+ struct callchain_cursor callchain_cursor;
};
struct hist_entry *__hists__add_entry(struct hists *self,
@@ -102,9 +77,8 @@ size_t hists__fprintf_nr_events(struct hists *self, FILE *fp);
size_t hists__fprintf(struct hists *self, struct hists *pair,
bool show_displacement, FILE *fp);
-int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip);
-int hist_entry__annotate(struct hist_entry *self, struct list_head *head,
- size_t privsize);
+int hist_entry__inc_addr_samples(struct hist_entry *self, int evidx, u64 addr);
+int hist_entry__annotate(struct hist_entry *self, size_t privsize);
void hists__filter_by_dso(struct hists *self, const struct dso *dso);
void hists__filter_by_thread(struct hists *self, const struct thread *thread);
@@ -116,18 +90,20 @@ bool hists__new_col_len(struct hists *self, enum hist_column col, u16 len);
#ifdef NO_NEWT_SUPPORT
static inline int hists__browse(struct hists *self __used,
const char *helpline __used,
- const char *ev_name __used)
+ const char *ev_name __used, int evidx __used)
{
return 0;
}
static inline int hists__tui_browse_tree(struct rb_root *self __used,
- const char *help __used)
+ const char *help __used,
+ int evidx __used)
{
return 0;
}
-static inline int hist_entry__tui_annotate(struct hist_entry *self __used)
+static inline int hist_entry__tui_annotate(struct hist_entry *self __used,
+ int evidx __used)
{
return 0;
}
@@ -136,13 +112,13 @@ static inline int hist_entry__tui_annotate(struct hist_entry *self __used)
#else
#include <newt.h>
int hists__browse(struct hists *self, const char *helpline,
- const char *ev_name);
-int hist_entry__tui_annotate(struct hist_entry *self);
+ const char *ev_name, int evidx);
+int hist_entry__tui_annotate(struct hist_entry *self, int evidx);
#define KEY_LEFT NEWT_KEY_LEFT
#define KEY_RIGHT NEWT_KEY_RIGHT
-int hists__tui_browse_tree(struct rb_root *self, const char *help);
+int hists__tui_browse_tree(struct rb_root *self, const char *help, int evidx);
#endif
unsigned int hists__sort_list_width(struct hists *self);
diff --git a/tools/perf/util/include/linux/list.h b/tools/perf/util/include/linux/list.h
index f5ca26e53fbb..356c7e467b83 100644
--- a/tools/perf/util/include/linux/list.h
+++ b/tools/perf/util/include/linux/list.h
@@ -1,3 +1,4 @@
+#include <linux/kernel.h>
#include "../../../../include/linux/list.h"
#ifndef PERF_LIST_H
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 135f69baf966..54a7e2634d58 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -1,6 +1,7 @@
#include "../../../include/linux/hw_breakpoint.h"
#include "util.h"
#include "../perf.h"
+#include "evlist.h"
#include "evsel.h"
#include "parse-options.h"
#include "parse-events.h"
@@ -11,10 +12,6 @@
#include "header.h"
#include "debugfs.h"
-int nr_counters;
-
-LIST_HEAD(evsel_list);
-
struct event_symbol {
u8 type;
u64 config;
@@ -271,6 +268,9 @@ const char *event_name(struct perf_evsel *evsel)
u64 config = evsel->attr.config;
int type = evsel->attr.type;
+ if (evsel->name)
+ return evsel->name;
+
return __event_name(type, config);
}
@@ -449,8 +449,8 @@ parse_single_tracepoint_event(char *sys_name,
/* sys + ':' + event + ':' + flags*/
#define MAX_EVOPT_LEN (MAX_EVENT_LENGTH * 2 + 2 + 128)
static enum event_result
-parse_multiple_tracepoint_event(char *sys_name, const char *evt_exp,
- char *flags)
+parse_multiple_tracepoint_event(const struct option *opt, char *sys_name,
+ const char *evt_exp, char *flags)
{
char evt_path[MAXPATHLEN];
struct dirent *evt_ent;
@@ -483,15 +483,16 @@ parse_multiple_tracepoint_event(char *sys_name, const char *evt_exp,
if (len < 0)
return EVT_FAILED;
- if (parse_events(NULL, event_opt, 0))
+ if (parse_events(opt, event_opt, 0))
return EVT_FAILED;
}
return EVT_HANDLED_ALL;
}
-static enum event_result parse_tracepoint_event(const char **strp,
- struct perf_event_attr *attr)
+static enum event_result
+parse_tracepoint_event(const struct option *opt, const char **strp,
+ struct perf_event_attr *attr)
{
const char *evt_name;
char *flags = NULL, *comma_loc;
@@ -530,7 +531,7 @@ static enum event_result parse_tracepoint_event(const char **strp,
return EVT_FAILED;
if (strpbrk(evt_name, "*?")) {
*strp += strlen(sys_name) + evt_length + 1; /* 1 == the ':' */
- return parse_multiple_tracepoint_event(sys_name, evt_name,
+ return parse_multiple_tracepoint_event(opt, sys_name, evt_name,
flags);
} else {
return parse_single_tracepoint_event(sys_name, evt_name,
@@ -740,11 +741,12 @@ parse_event_modifier(const char **strp, struct perf_event_attr *attr)
* Symbolic names are (almost) exactly matched.
*/
static enum event_result
-parse_event_symbols(const char **str, struct perf_event_attr *attr)
+parse_event_symbols(const struct option *opt, const char **str,
+ struct perf_event_attr *attr)
{
enum event_result ret;
- ret = parse_tracepoint_event(str, attr);
+ ret = parse_tracepoint_event(opt, str, attr);
if (ret != EVT_FAILED)
goto modifier;
@@ -778,14 +780,17 @@ modifier:
return ret;
}
-int parse_events(const struct option *opt __used, const char *str, int unset __used)
+int parse_events(const struct option *opt, const char *str, int unset __used)
{
+ struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
struct perf_event_attr attr;
enum event_result ret;
+ const char *ostr;
for (;;) {
+ ostr = str;
memset(&attr, 0, sizeof(attr));
- ret = parse_event_symbols(&str, &attr);
+ ret = parse_event_symbols(opt, &str, &attr);
if (ret == EVT_FAILED)
return -1;
@@ -794,12 +799,15 @@ int parse_events(const struct option *opt __used, const char *str, int unset __u
if (ret != EVT_HANDLED_ALL) {
struct perf_evsel *evsel;
- evsel = perf_evsel__new(&attr,
- nr_counters);
+ evsel = perf_evsel__new(&attr, evlist->nr_entries);
if (evsel == NULL)
return -1;
- list_add_tail(&evsel->node, &evsel_list);
- ++nr_counters;
+ perf_evlist__add(evlist, evsel);
+
+ evsel->name = calloc(str - ostr + 1, 1);
+ if (!evsel->name)
+ return -1;
+ strncpy(evsel->name, ostr, str - ostr);
}
if (*str == 0)
@@ -813,13 +821,14 @@ int parse_events(const struct option *opt __used, const char *str, int unset __u
return 0;
}
-int parse_filter(const struct option *opt __used, const char *str,
+int parse_filter(const struct option *opt, const char *str,
int unset __used)
{
+ struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
struct perf_evsel *last = NULL;
- if (!list_empty(&evsel_list))
- last = list_entry(evsel_list.prev, struct perf_evsel, node);
+ if (evlist->nr_entries > 0)
+ last = list_entry(evlist->entries.prev, struct perf_evsel, node);
if (last == NULL || last->attr.type != PERF_TYPE_TRACEPOINT) {
fprintf(stderr,
@@ -849,7 +858,7 @@ static const char * const event_type_descriptors[] = {
* Print the events from <debugfs_mount_point>/tracing/events
*/
-static void print_tracepoint_events(void)
+void print_tracepoint_events(const char *subsys_glob, const char *event_glob)
{
DIR *sys_dir, *evt_dir;
struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
@@ -864,6 +873,9 @@ static void print_tracepoint_events(void)
return;
for_each_subsystem(sys_dir, sys_dirent, sys_next) {
+ if (subsys_glob != NULL &&
+ !strglobmatch(sys_dirent.d_name, subsys_glob))
+ continue;
snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path,
sys_dirent.d_name);
@@ -872,6 +884,10 @@ static void print_tracepoint_events(void)
continue;
for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
+ if (event_glob != NULL &&
+ !strglobmatch(evt_dirent.d_name, event_glob))
+ continue;
+
snprintf(evt_path, MAXPATHLEN, "%s:%s",
sys_dirent.d_name, evt_dirent.d_name);
printf(" %-42s [%s]\n", evt_path,
@@ -923,13 +939,61 @@ int is_valid_tracepoint(const char *event_string)
return 0;
}
+void print_events_type(u8 type)
+{
+ struct event_symbol *syms = event_symbols;
+ unsigned int i;
+ char name[64];
+
+ for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) {
+ if (type != syms->type)
+ continue;
+
+ if (strlen(syms->alias))
+ snprintf(name, sizeof(name), "%s OR %s",
+ syms->symbol, syms->alias);
+ else
+ snprintf(name, sizeof(name), "%s", syms->symbol);
+
+ printf(" %-42s [%s]\n", name,
+ event_type_descriptors[type]);
+ }
+}
+
+int print_hwcache_events(const char *event_glob)
+{
+ unsigned int type, op, i, printed = 0;
+
+ for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
+ for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
+ /* skip invalid cache type */
+ if (!is_cache_op_valid(type, op))
+ continue;
+
+ for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
+ char *name = event_cache_name(type, op, i);
+
+ if (event_glob != NULL &&
+ !strglobmatch(name, event_glob))
+ continue;
+
+ printf(" %-42s [%s]\n", name,
+ event_type_descriptors[PERF_TYPE_HW_CACHE]);
+ ++printed;
+ }
+ }
+ }
+
+ return printed;
+}
+
/*
* Print the help text for the event symbols:
*/
-void print_events(void)
+void print_events(const char *event_glob)
{
struct event_symbol *syms = event_symbols;
- unsigned int i, type, op, prev_type = -1;
+ unsigned int i, type, prev_type = -1, printed = 0, ntypes_printed = 0;
char name[40];
printf("\n");
@@ -938,8 +1002,16 @@ void print_events(void)
for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) {
type = syms->type;
- if (type != prev_type)
+ if (type != prev_type && printed) {
printf("\n");
+ printed = 0;
+ ntypes_printed++;
+ }
+
+ if (event_glob != NULL &&
+ !(strglobmatch(syms->symbol, event_glob) ||
+ (syms->alias && strglobmatch(syms->alias, event_glob))))
+ continue;
if (strlen(syms->alias))
sprintf(name, "%s OR %s", syms->symbol, syms->alias);
@@ -949,22 +1021,17 @@ void print_events(void)
event_type_descriptors[type]);
prev_type = type;
+ ++printed;
}
- printf("\n");
- for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
- for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
- /* skip invalid cache type */
- if (!is_cache_op_valid(type, op))
- continue;
-
- for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
- printf(" %-42s [%s]\n",
- event_cache_name(type, op, i),
- event_type_descriptors[PERF_TYPE_HW_CACHE]);
- }
- }
+ if (ntypes_printed) {
+ printed = 0;
+ printf("\n");
}
+ print_hwcache_events(event_glob);
+
+ if (event_glob != NULL)
+ return;
printf("\n");
printf(" %-42s [%s]\n",
@@ -977,37 +1044,7 @@ void print_events(void)
event_type_descriptors[PERF_TYPE_BREAKPOINT]);
printf("\n");
- print_tracepoint_events();
+ print_tracepoint_events(NULL, NULL);
exit(129);
}
-
-int perf_evsel_list__create_default(void)
-{
- struct perf_evsel *evsel;
- struct perf_event_attr attr;
-
- memset(&attr, 0, sizeof(attr));
- attr.type = PERF_TYPE_HARDWARE;
- attr.config = PERF_COUNT_HW_CPU_CYCLES;
-
- evsel = perf_evsel__new(&attr, 0);
-
- if (evsel == NULL)
- return -ENOMEM;
-
- list_add(&evsel->node, &evsel_list);
- ++nr_counters;
- return 0;
-}
-
-void perf_evsel_list__delete(void)
-{
- struct perf_evsel *pos, *n;
-
- list_for_each_entry_safe(pos, n, &evsel_list, node) {
- list_del_init(&pos->node);
- perf_evsel__delete(pos);
- }
- nr_counters = 0;
-}
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 458e3ecf17af..212f88e07a9c 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -9,11 +9,6 @@
struct list_head;
struct perf_evsel;
-extern struct list_head evsel_list;
-
-int perf_evsel_list__create_default(void);
-void perf_evsel_list__delete(void);
-
struct option;
struct tracepoint_path {
@@ -25,8 +20,6 @@ struct tracepoint_path {
extern struct tracepoint_path *tracepoint_id_to_path(u64 config);
extern bool have_tracepoints(struct list_head *evlist);
-extern int nr_counters;
-
const char *event_name(struct perf_evsel *event);
extern const char *__event_name(int type, u64 config);
@@ -35,7 +28,10 @@ extern int parse_filter(const struct option *opt, const char *str, int unset);
#define EVENTS_HELP_MAX (128*1024)
-extern void print_events(void);
+void print_events(const char *event_glob);
+void print_events_type(u8 type);
+void print_tracepoint_events(const char *subsys_glob, const char *event_glob);
+int print_hwcache_events(const char *event_glob);
extern int is_valid_tracepoint(const char *event_string);
extern char debugfs_path[];
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 6e29d9c9dccc..5ddee66020a7 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -31,6 +31,7 @@
#include <string.h>
#include <stdarg.h>
#include <limits.h>
+#include <elf.h>
#undef _GNU_SOURCE
#include "util.h"
@@ -111,7 +112,25 @@ static struct symbol *__find_kernel_function_by_name(const char *name,
NULL);
}
-const char *kernel_get_module_path(const char *module)
+static struct map *kernel_get_module_map(const char *module)
+{
+ struct rb_node *nd;
+ struct map_groups *grp = &machine.kmaps;
+
+ if (!module)
+ module = "kernel";
+
+ for (nd = rb_first(&grp->maps[MAP__FUNCTION]); nd; nd = rb_next(nd)) {
+ struct map *pos = rb_entry(nd, struct map, rb_node);
+ if (strncmp(pos->dso->short_name + 1, module,
+ pos->dso->short_name_len - 2) == 0) {
+ return pos;
+ }
+ }
+ return NULL;
+}
+
+static struct dso *kernel_get_module_dso(const char *module)
{
struct dso *dso;
struct map *map;
@@ -141,7 +160,13 @@ const char *kernel_get_module_path(const char *module)
}
}
found:
- return dso->long_name;
+ return dso;
+}
+
+const char *kernel_get_module_path(const char *module)
+{
+ struct dso *dso = kernel_get_module_dso(module);
+ return (dso) ? dso->long_name : NULL;
}
#ifdef DWARF_SUPPORT
@@ -384,7 +409,7 @@ int show_line_range(struct line_range *lr, const char *module)
setup_pager();
if (lr->function)
- fprintf(stdout, "<%s:%d>\n", lr->function,
+ fprintf(stdout, "<%s@%s:%d>\n", lr->function, lr->path,
lr->start - lr->offset);
else
fprintf(stdout, "<%s:%d>\n", lr->path, lr->start);
@@ -426,12 +451,14 @@ end:
}
static int show_available_vars_at(int fd, struct perf_probe_event *pev,
- int max_vls, bool externs)
+ int max_vls, struct strfilter *_filter,
+ bool externs)
{
char *buf;
- int ret, i;
+ int ret, i, nvars;
struct str_node *node;
struct variable_list *vls = NULL, *vl;
+ const char *var;
buf = synthesize_perf_probe_point(&pev->point);
if (!buf)
@@ -439,36 +466,45 @@ static int show_available_vars_at(int fd, struct perf_probe_event *pev,
pr_debug("Searching variables at %s\n", buf);
ret = find_available_vars_at(fd, pev, &vls, max_vls, externs);
- if (ret > 0) {
- /* Some variables were found */
- fprintf(stdout, "Available variables at %s\n", buf);
- for (i = 0; i < ret; i++) {
- vl = &vls[i];
- /*
- * A probe point might be converted to
- * several trace points.
- */
- fprintf(stdout, "\t@<%s+%lu>\n", vl->point.symbol,
- vl->point.offset);
- free(vl->point.symbol);
- if (vl->vars) {
- strlist__for_each(node, vl->vars)
+ if (ret <= 0) {
+ pr_err("Failed to find variables at %s (%d)\n", buf, ret);
+ goto end;
+ }
+ /* Some variables are found */
+ fprintf(stdout, "Available variables at %s\n", buf);
+ for (i = 0; i < ret; i++) {
+ vl = &vls[i];
+ /*
+ * A probe point might be converted to
+ * several trace points.
+ */
+ fprintf(stdout, "\t@<%s+%lu>\n", vl->point.symbol,
+ vl->point.offset);
+ free(vl->point.symbol);
+ nvars = 0;
+ if (vl->vars) {
+ strlist__for_each(node, vl->vars) {
+ var = strchr(node->s, '\t') + 1;
+ if (strfilter__compare(_filter, var)) {
fprintf(stdout, "\t\t%s\n", node->s);
- strlist__delete(vl->vars);
- } else
- fprintf(stdout, "(No variables)\n");
+ nvars++;
+ }
+ }
+ strlist__delete(vl->vars);
}
- free(vls);
- } else
- pr_err("Failed to find variables at %s (%d)\n", buf, ret);
-
+ if (nvars == 0)
+ fprintf(stdout, "\t\t(No matched variables)\n");
+ }
+ free(vls);
+end:
free(buf);
return ret;
}
/* Show available variables on given probe point */
int show_available_vars(struct perf_probe_event *pevs, int npevs,
- int max_vls, const char *module, bool externs)
+ int max_vls, const char *module,
+ struct strfilter *_filter, bool externs)
{
int i, fd, ret = 0;
@@ -485,7 +521,8 @@ int show_available_vars(struct perf_probe_event *pevs, int npevs,
setup_pager();
for (i = 0; i < npevs && ret >= 0; i++)
- ret = show_available_vars_at(fd, &pevs[i], max_vls, externs);
+ ret = show_available_vars_at(fd, &pevs[i], max_vls, _filter,
+ externs);
close(fd);
return ret;
@@ -531,7 +568,9 @@ int show_line_range(struct line_range *lr __unused, const char *module __unused)
int show_available_vars(struct perf_probe_event *pevs __unused,
int npevs __unused, int max_vls __unused,
- const char *module __unused, bool externs __unused)
+ const char *module __unused,
+ struct strfilter *filter __unused,
+ bool externs __unused)
{
pr_warning("Debuginfo-analysis is not supported.\n");
return -ENOSYS;
@@ -556,11 +595,11 @@ static int parse_line_num(char **ptr, int *val, const char *what)
* The line range syntax is described by:
*
* SRC[:SLN[+NUM|-ELN]]
- * FNC[:SLN[+NUM|-ELN]]
+ * FNC[@SRC][:SLN[+NUM|-ELN]]
*/
int parse_line_range_desc(const char *arg, struct line_range *lr)
{
- char *range, *name = strdup(arg);
+ char *range, *file, *name = strdup(arg);
int err;
if (!name)
@@ -610,7 +649,16 @@ int parse_line_range_desc(const char *arg, struct line_range *lr)
}
}
- if (strchr(name, '.'))
+ file = strchr(name, '@');
+ if (file) {
+ *file = '\0';
+ lr->file = strdup(++file);
+ if (lr->file == NULL) {
+ err = -ENOMEM;
+ goto err;
+ }
+ lr->function = name;
+ } else if (strchr(name, '.'))
lr->file = name;
else
lr->function = name;
@@ -1784,9 +1832,12 @@ int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
}
/* Loop 2: add all events */
- for (i = 0; i < npevs && ret >= 0; i++)
+ for (i = 0; i < npevs; i++) {
ret = __add_probe_trace_events(pkgs[i].pev, pkgs[i].tevs,
pkgs[i].ntevs, force_add);
+ if (ret < 0)
+ break;
+ }
end:
/* Loop 3: cleanup and free trace events */
for (i = 0; i < npevs; i++) {
@@ -1912,4 +1963,46 @@ int del_perf_probe_events(struct strlist *dellist)
return ret;
}
+/* TODO: don't use a global variable for filter ... */
+static struct strfilter *available_func_filter;
+
+/*
+ * If a symbol corresponds to a function with global binding and
+ * matches filter return 0. For all others return 1.
+ */
+static int filter_available_functions(struct map *map __unused,
+ struct symbol *sym)
+{
+ if (sym->binding == STB_GLOBAL &&
+ strfilter__compare(available_func_filter, sym->name))
+ return 0;
+ return 1;
+}
+
+int show_available_funcs(const char *module, struct strfilter *_filter)
+{
+ struct map *map;
+ int ret;
+
+ setup_pager();
+
+ ret = init_vmlinux();
+ if (ret < 0)
+ return ret;
+ map = kernel_get_module_map(module);
+ if (!map) {
+ pr_err("Failed to find %s map.\n", (module) ? : "kernel");
+ return -EINVAL;
+ }
+ available_func_filter = _filter;
+ if (map__load(map, filter_available_functions)) {
+ pr_err("Failed to load map.\n");
+ return -EINVAL;
+ }
+ if (!dso__sorted_by_name(map->dso, map->type))
+ dso__sort_by_name(map->dso, map->type);
+
+ dso__fprintf_symbols_by_name(map->dso, map->type, stdout);
+ return 0;
+}
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
index 5accbedfea37..3434fc9d79d5 100644
--- a/tools/perf/util/probe-event.h
+++ b/tools/perf/util/probe-event.h
@@ -3,6 +3,7 @@
#include <stdbool.h>
#include "strlist.h"
+#include "strfilter.h"
extern bool probe_event_dry_run;
@@ -126,7 +127,8 @@ extern int show_perf_probe_events(void);
extern int show_line_range(struct line_range *lr, const char *module);
extern int show_available_vars(struct perf_probe_event *pevs, int npevs,
int max_probe_points, const char *module,
- bool externs);
+ struct strfilter *filter, bool externs);
+extern int show_available_funcs(const char *module, struct strfilter *filter);
/* Maximum index number of event-name postfix */
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index ab83b6ac5d65..17f9c4a66ddd 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -33,6 +33,7 @@
#include <ctype.h>
#include <dwarf-regs.h>
+#include <linux/bitops.h>
#include "event.h"
#include "debug.h"
#include "util.h"
@@ -280,6 +281,19 @@ static bool die_compare_name(Dwarf_Die *dw_die, const char *tname)
return name ? (strcmp(tname, name) == 0) : false;
}
+/* Get callsite line number of inline-function instance */
+static int die_get_call_lineno(Dwarf_Die *in_die)
+{
+ Dwarf_Attribute attr;
+ Dwarf_Word ret;
+
+ if (!dwarf_attr(in_die, DW_AT_call_line, &attr))
+ return -ENOENT;
+
+ dwarf_formudata(&attr, &ret);
+ return (int)ret;
+}
+
/* Get type die */
static Dwarf_Die *die_get_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
{
@@ -320,13 +334,23 @@ static Dwarf_Die *die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
return vr_die;
}
-static bool die_is_signed_type(Dwarf_Die *tp_die)
+static int die_get_attr_udata(Dwarf_Die *tp_die, unsigned int attr_name,
+ Dwarf_Word *result)
{
Dwarf_Attribute attr;
+
+ if (dwarf_attr(tp_die, attr_name, &attr) == NULL ||
+ dwarf_formudata(&attr, result) != 0)
+ return -ENOENT;
+
+ return 0;
+}
+
+static bool die_is_signed_type(Dwarf_Die *tp_die)
+{
Dwarf_Word ret;
- if (dwarf_attr(tp_die, DW_AT_encoding, &attr) == NULL ||
- dwarf_formudata(&attr, &ret) != 0)
+ if (die_get_attr_udata(tp_die, DW_AT_encoding, &ret))
return false;
return (ret == DW_ATE_signed_char || ret == DW_ATE_signed ||
@@ -335,11 +359,29 @@ static bool die_is_signed_type(Dwarf_Die *tp_die)
static int die_get_byte_size(Dwarf_Die *tp_die)
{
- Dwarf_Attribute attr;
Dwarf_Word ret;
- if (dwarf_attr(tp_die, DW_AT_byte_size, &attr) == NULL ||
- dwarf_formudata(&attr, &ret) != 0)
+ if (die_get_attr_udata(tp_die, DW_AT_byte_size, &ret))
+ return 0;
+
+ return (int)ret;
+}
+
+static int die_get_bit_size(Dwarf_Die *tp_die)
+{
+ Dwarf_Word ret;
+
+ if (die_get_attr_udata(tp_die, DW_AT_bit_size, &ret))
+ return 0;
+
+ return (int)ret;
+}
+
+static int die_get_bit_offset(Dwarf_Die *tp_die)
+{
+ Dwarf_Word ret;
+
+ if (die_get_attr_udata(tp_die, DW_AT_bit_offset, &ret))
return 0;
return (int)ret;
@@ -458,6 +500,151 @@ static Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
return die_find_child(sp_die, __die_find_inline_cb, &addr, die_mem);
}
+/* Walker on lines (Note: line number will not be sorted) */
+typedef int (* line_walk_handler_t) (const char *fname, int lineno,
+ Dwarf_Addr addr, void *data);
+
+struct __line_walk_param {
+ const char *fname;
+ line_walk_handler_t handler;
+ void *data;
+ int retval;
+};
+
+static int __die_walk_funclines_cb(Dwarf_Die *in_die, void *data)
+{
+ struct __line_walk_param *lw = data;
+ Dwarf_Addr addr;
+ int lineno;
+
+ if (dwarf_tag(in_die) == DW_TAG_inlined_subroutine) {
+ lineno = die_get_call_lineno(in_die);
+ if (lineno > 0 && dwarf_entrypc(in_die, &addr) == 0) {
+ lw->retval = lw->handler(lw->fname, lineno, addr,
+ lw->data);
+ if (lw->retval != 0)
+ return DIE_FIND_CB_FOUND;
+ }
+ }
+ return DIE_FIND_CB_SIBLING;
+}
+
+/* Walk on lines of blocks included in given DIE */
+static int __die_walk_funclines(Dwarf_Die *sp_die,
+ line_walk_handler_t handler, void *data)
+{
+ struct __line_walk_param lw = {
+ .handler = handler,
+ .data = data,
+ .retval = 0,
+ };
+ Dwarf_Die die_mem;
+ Dwarf_Addr addr;
+ int lineno;
+
+ /* Handle function declaration line */
+ lw.fname = dwarf_decl_file(sp_die);
+ if (lw.fname && dwarf_decl_line(sp_die, &lineno) == 0 &&
+ dwarf_entrypc(sp_die, &addr) == 0) {
+ lw.retval = handler(lw.fname, lineno, addr, data);
+ if (lw.retval != 0)
+ goto done;
+ }
+ die_find_child(sp_die, __die_walk_funclines_cb, &lw, &die_mem);
+done:
+ return lw.retval;
+}
+
+static int __die_walk_culines_cb(Dwarf_Die *sp_die, void *data)
+{
+ struct __line_walk_param *lw = data;
+
+ lw->retval = __die_walk_funclines(sp_die, lw->handler, lw->data);
+ if (lw->retval != 0)
+ return DWARF_CB_ABORT;
+
+ return DWARF_CB_OK;
+}
+
+/*
+ * Walk on lines inside given PDIE. If the PDIE is subprogram, walk only on
+ * the lines inside the subprogram, otherwise PDIE must be a CU DIE.
+ */
+static int die_walk_lines(Dwarf_Die *pdie, line_walk_handler_t handler,
+ void *data)
+{
+ Dwarf_Lines *lines;
+ Dwarf_Line *line;
+ Dwarf_Addr addr;
+ const char *fname;
+ int lineno, ret = 0;
+ Dwarf_Die die_mem, *cu_die;
+ size_t nlines, i;
+
+ /* Get the CU die */
+ if (dwarf_tag(pdie) == DW_TAG_subprogram)
+ cu_die = dwarf_diecu(pdie, &die_mem, NULL, NULL);
+ else
+ cu_die = pdie;
+ if (!cu_die) {
+ pr_debug2("Failed to get CU from subprogram\n");
+ return -EINVAL;
+ }
+
+ /* Get lines list in the CU */
+ if (dwarf_getsrclines(cu_die, &lines, &nlines) != 0) {
+ pr_debug2("Failed to get source lines on this CU.\n");
+ return -ENOENT;
+ }
+ pr_debug2("Get %zd lines from this CU\n", nlines);
+
+ /* Walk on the lines on lines list */
+ for (i = 0; i < nlines; i++) {
+ line = dwarf_onesrcline(lines, i);
+ if (line == NULL ||
+ dwarf_lineno(line, &lineno) != 0 ||
+ dwarf_lineaddr(line, &addr) != 0) {
+ pr_debug2("Failed to get line info. "
+ "Possible error in debuginfo.\n");
+ continue;
+ }
+ /* Filter lines based on address */
+ if (pdie != cu_die)
+ /*
+ * Address filtering
+ * The line is included in given function, and
+ * no inline block includes it.
+ */
+ if (!dwarf_haspc(pdie, addr) ||
+ die_find_inlinefunc(pdie, addr, &die_mem))
+ continue;
+ /* Get source line */
+ fname = dwarf_linesrc(line, NULL, NULL);
+
+ ret = handler(fname, lineno, addr, data);
+ if (ret != 0)
+ return ret;
+ }
+
+ /*
+ * Dwarf lines doesn't include function declarations and inlined
+ * subroutines. We have to check functions list or given function.
+ */
+ if (pdie != cu_die)
+ ret = __die_walk_funclines(pdie, handler, data);
+ else {
+ struct __line_walk_param param = {
+ .handler = handler,
+ .data = data,
+ .retval = 0,
+ };
+ dwarf_getfuncs(cu_die, __die_walk_culines_cb, &param, 0);
+ ret = param.retval;
+ }
+
+ return ret;
+}
+
struct __find_variable_param {
const char *name;
Dwarf_Addr addr;
@@ -669,6 +856,8 @@ static_var:
return 0;
}
+#define BYTES_TO_BITS(nb) ((nb) * BITS_PER_LONG / sizeof(long))
+
static int convert_variable_type(Dwarf_Die *vr_die,
struct probe_trace_arg *tvar,
const char *cast)
@@ -685,6 +874,14 @@ static int convert_variable_type(Dwarf_Die *vr_die,
return (tvar->type == NULL) ? -ENOMEM : 0;
}
+ if (die_get_bit_size(vr_die) != 0) {
+ /* This is a bitfield */
+ ret = snprintf(buf, 16, "b%d@%d/%zd", die_get_bit_size(vr_die),
+ die_get_bit_offset(vr_die),
+ BYTES_TO_BITS(die_get_byte_size(vr_die)));
+ goto formatted;
+ }
+
if (die_get_real_type(vr_die, &type) == NULL) {
pr_warning("Failed to get a type information of %s.\n",
dwarf_diename(vr_die));
@@ -729,29 +926,31 @@ static int convert_variable_type(Dwarf_Die *vr_die,
return (tvar->type == NULL) ? -ENOMEM : 0;
}
- ret = die_get_byte_size(&type) * 8;
- if (ret) {
- /* Check the bitwidth */
- if (ret > MAX_BASIC_TYPE_BITS) {
- pr_info("%s exceeds max-bitwidth."
- " Cut down to %d bits.\n",
- dwarf_diename(&type), MAX_BASIC_TYPE_BITS);
- ret = MAX_BASIC_TYPE_BITS;
- }
+ ret = BYTES_TO_BITS(die_get_byte_size(&type));
+ if (!ret)
+ /* No size ... try to use default type */
+ return 0;
- ret = snprintf(buf, 16, "%c%d",
- die_is_signed_type(&type) ? 's' : 'u', ret);
- if (ret < 0 || ret >= 16) {
- if (ret >= 16)
- ret = -E2BIG;
- pr_warning("Failed to convert variable type: %s\n",
- strerror(-ret));
- return ret;
- }
- tvar->type = strdup(buf);
- if (tvar->type == NULL)
- return -ENOMEM;
+ /* Check the bitwidth */
+ if (ret > MAX_BASIC_TYPE_BITS) {
+ pr_info("%s exceeds max-bitwidth. Cut down to %d bits.\n",
+ dwarf_diename(&type), MAX_BASIC_TYPE_BITS);
+ ret = MAX_BASIC_TYPE_BITS;
+ }
+ ret = snprintf(buf, 16, "%c%d",
+ die_is_signed_type(&type) ? 's' : 'u', ret);
+
+formatted:
+ if (ret < 0 || ret >= 16) {
+ if (ret >= 16)
+ ret = -E2BIG;
+ pr_warning("Failed to convert variable type: %s\n",
+ strerror(-ret));
+ return ret;
}
+ tvar->type = strdup(buf);
+ if (tvar->type == NULL)
+ return -ENOMEM;
return 0;
}
@@ -1050,157 +1249,102 @@ static int call_probe_finder(Dwarf_Die *sp_die, struct probe_finder *pf)
return ret;
}
-/* Find probe point from its line number */
-static int find_probe_point_by_line(struct probe_finder *pf)
+static int probe_point_line_walker(const char *fname, int lineno,
+ Dwarf_Addr addr, void *data)
{
- Dwarf_Lines *lines;
- Dwarf_Line *line;
- size_t nlines, i;
- Dwarf_Addr addr;
- int lineno;
- int ret = 0;
-
- if (dwarf_getsrclines(&pf->cu_die, &lines, &nlines) != 0) {
- pr_warning("No source lines found.\n");
- return -ENOENT;
- }
+ struct probe_finder *pf = data;
+ int ret;
- for (i = 0; i < nlines && ret == 0; i++) {
- line = dwarf_onesrcline(lines, i);
- if (dwarf_lineno(line, &lineno) != 0 ||
- lineno != pf->lno)
- continue;
+ if (lineno != pf->lno || strtailcmp(fname, pf->fname) != 0)
+ return 0;
- /* TODO: Get fileno from line, but how? */
- if (strtailcmp(dwarf_linesrc(line, NULL, NULL), pf->fname) != 0)
- continue;
+ pf->addr = addr;
+ ret = call_probe_finder(NULL, pf);
- if (dwarf_lineaddr(line, &addr) != 0) {
- pr_warning("Failed to get the address of the line.\n");
- return -ENOENT;
- }
- pr_debug("Probe line found: line[%d]:%d addr:0x%jx\n",
- (int)i, lineno, (uintmax_t)addr);
- pf->addr = addr;
+ /* Continue if no error, because the line will be in inline function */
+ return ret < 0 ? ret : 0;
+}
- ret = call_probe_finder(NULL, pf);
- /* Continuing, because target line might be inlined. */
- }
- return ret;
+/* Find probe point from its line number */
+static int find_probe_point_by_line(struct probe_finder *pf)
+{
+ return die_walk_lines(&pf->cu_die, probe_point_line_walker, pf);
}
/* Find lines which match lazy pattern */
static int find_lazy_match_lines(struct list_head *head,
const char *fname, const char *pat)
{
- char *fbuf, *p1, *p2;
- int fd, line, nlines = -1;
- struct stat st;
-
- fd = open(fname, O_RDONLY);
- if (fd < 0) {
- pr_warning("Failed to open %s: %s\n", fname, strerror(-fd));
+ FILE *fp;
+ char *line = NULL;
+ size_t line_len;
+ ssize_t len;
+ int count = 0, linenum = 1;
+
+ fp = fopen(fname, "r");
+ if (!fp) {
+ pr_warning("Failed to open %s: %s\n", fname, strerror(errno));
return -errno;
}
- if (fstat(fd, &st) < 0) {
- pr_warning("Failed to get the size of %s: %s\n",
- fname, strerror(errno));
- nlines = -errno;
- goto out_close;
- }
-
- nlines = -ENOMEM;
- fbuf = malloc(st.st_size + 2);
- if (fbuf == NULL)
- goto out_close;
- if (read(fd, fbuf, st.st_size) < 0) {
- pr_warning("Failed to read %s: %s\n", fname, strerror(errno));
- nlines = -errno;
- goto out_free_fbuf;
- }
- fbuf[st.st_size] = '\n'; /* Dummy line */
- fbuf[st.st_size + 1] = '\0';
- p1 = fbuf;
- line = 1;
- nlines = 0;
- while ((p2 = strchr(p1, '\n')) != NULL) {
- *p2 = '\0';
- if (strlazymatch(p1, pat)) {
- line_list__add_line(head, line);
- nlines++;
+ while ((len = getline(&line, &line_len, fp)) > 0) {
+
+ if (line[len - 1] == '\n')
+ line[len - 1] = '\0';
+
+ if (strlazymatch(line, pat)) {
+ line_list__add_line(head, linenum);
+ count++;
}
- line++;
- p1 = p2 + 1;
+ linenum++;
}
-out_free_fbuf:
- free(fbuf);
-out_close:
- close(fd);
- return nlines;
+
+ if (ferror(fp))
+ count = -errno;
+ free(line);
+ fclose(fp);
+
+ if (count == 0)
+ pr_debug("No matched lines found in %s.\n", fname);
+ return count;
+}
+
+static int probe_point_lazy_walker(const char *fname, int lineno,
+ Dwarf_Addr addr, void *data)
+{
+ struct probe_finder *pf = data;
+ int ret;
+
+ if (!line_list__has_line(&pf->lcache, lineno) ||
+ strtailcmp(fname, pf->fname) != 0)
+ return 0;
+
+ pr_debug("Probe line found: line:%d addr:0x%llx\n",
+ lineno, (unsigned long long)addr);
+ pf->addr = addr;
+ ret = call_probe_finder(NULL, pf);
+
+ /*
+ * Continue if no error, because the lazy pattern will match
+ * to other lines
+ */
+ return ret < 0 ?: 0;
}
/* Find probe points from lazy pattern */
static int find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf)
{
- Dwarf_Lines *lines;
- Dwarf_Line *line;
- size_t nlines, i;
- Dwarf_Addr addr;
- Dwarf_Die die_mem;
- int lineno;
int ret = 0;
if (list_empty(&pf->lcache)) {
/* Matching lazy line pattern */
ret = find_lazy_match_lines(&pf->lcache, pf->fname,
pf->pev->point.lazy_line);
- if (ret == 0) {
- pr_debug("No matched lines found in %s.\n", pf->fname);
- return 0;
- } else if (ret < 0)
+ if (ret <= 0)
return ret;
}
- if (dwarf_getsrclines(&pf->cu_die, &lines, &nlines) != 0) {
- pr_warning("No source lines found.\n");
- return -ENOENT;
- }
-
- for (i = 0; i < nlines && ret >= 0; i++) {
- line = dwarf_onesrcline(lines, i);
-
- if (dwarf_lineno(line, &lineno) != 0 ||
- !line_list__has_line(&pf->lcache, lineno))
- continue;
-
- /* TODO: Get fileno from line, but how? */
- if (strtailcmp(dwarf_linesrc(line, NULL, NULL), pf->fname) != 0)
- continue;
-
- if (dwarf_lineaddr(line, &addr) != 0) {
- pr_debug("Failed to get the address of line %d.\n",
- lineno);
- continue;
- }
- if (sp_die) {
- /* Address filtering 1: does sp_die include addr? */
- if (!dwarf_haspc(sp_die, addr))
- continue;
- /* Address filtering 2: No child include addr? */
- if (die_find_inlinefunc(sp_die, addr, &die_mem))
- continue;
- }
-
- pr_debug("Probe line found: line[%d]:%d addr:0x%llx\n",
- (int)i, lineno, (unsigned long long)addr);
- pf->addr = addr;
-
- ret = call_probe_finder(sp_die, pf);
- /* Continuing, because target line might be inlined. */
- }
- /* TODO: deallocate lines, but how? */
- return ret;
+ return die_walk_lines(sp_die, probe_point_lazy_walker, pf);
}
/* Callback parameter with return value */
@@ -1318,8 +1462,7 @@ static int find_probes(int fd, struct probe_finder *pf)
off = 0;
line_list__init(&pf->lcache);
/* Loop on CUs (Compilation Unit) */
- while (!dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL) &&
- ret >= 0) {
+ while (!dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL)) {
/* Get the DIE(Debugging Information Entry) of this CU */
diep = dwarf_offdie(dbg, off + cuhl, &pf->cu_die);
if (!diep)
@@ -1340,6 +1483,8 @@ static int find_probes(int fd, struct probe_finder *pf)
pf->lno = pp->line;
ret = find_probe_point_by_line(pf);
}
+ if (ret < 0)
+ break;
}
off = noff;
}
@@ -1644,91 +1789,28 @@ static int line_range_add_line(const char *src, unsigned int lineno,
return line_list__add_line(&lr->line_list, lineno);
}
-/* Search function declaration lines */
-static int line_range_funcdecl_cb(Dwarf_Die *sp_die, void *data)
+static int line_range_walk_cb(const char *fname, int lineno,
+ Dwarf_Addr addr __used,
+ void *data)
{
- struct dwarf_callback_param *param = data;
- struct line_finder *lf = param->data;
- const char *src;
- int lineno;
+ struct line_finder *lf = data;
- src = dwarf_decl_file(sp_die);
- if (src && strtailcmp(src, lf->fname) != 0)
- return DWARF_CB_OK;
-
- if (dwarf_decl_line(sp_die, &lineno) != 0 ||
+ if ((strtailcmp(fname, lf->fname) != 0) ||
(lf->lno_s > lineno || lf->lno_e < lineno))
- return DWARF_CB_OK;
+ return 0;
- param->retval = line_range_add_line(src, lineno, lf->lr);
- if (param->retval < 0)
- return DWARF_CB_ABORT;
- return DWARF_CB_OK;
-}
+ if (line_range_add_line(fname, lineno, lf->lr) < 0)
+ return -EINVAL;
-static int find_line_range_func_decl_lines(struct line_finder *lf)
-{
- struct dwarf_callback_param param = {.data = (void *)lf, .retval = 0};
- dwarf_getfuncs(&lf->cu_die, line_range_funcdecl_cb, &param, 0);
- return param.retval;
+ return 0;
}
/* Find line range from its line number */
static int find_line_range_by_line(Dwarf_Die *sp_die, struct line_finder *lf)
{
- Dwarf_Lines *lines;
- Dwarf_Line *line;
- size_t nlines, i;
- Dwarf_Addr addr;
- int lineno, ret = 0;
- const char *src;
- Dwarf_Die die_mem;
-
- line_list__init(&lf->lr->line_list);
- if (dwarf_getsrclines(&lf->cu_die, &lines, &nlines) != 0) {
- pr_warning("No source lines found.\n");
- return -ENOENT;
- }
-
- /* Search probable lines on lines list */
- for (i = 0; i < nlines; i++) {
- line = dwarf_onesrcline(lines, i);
- if (dwarf_lineno(line, &lineno) != 0 ||
- (lf->lno_s > lineno || lf->lno_e < lineno))
- continue;
-
- if (sp_die) {
- /* Address filtering 1: does sp_die include addr? */
- if (dwarf_lineaddr(line, &addr) != 0 ||
- !dwarf_haspc(sp_die, addr))
- continue;
-
- /* Address filtering 2: No child include addr? */
- if (die_find_inlinefunc(sp_die, addr, &die_mem))
- continue;
- }
-
- /* TODO: Get fileno from line, but how? */
- src = dwarf_linesrc(line, NULL, NULL);
- if (strtailcmp(src, lf->fname) != 0)
- continue;
-
- ret = line_range_add_line(src, lineno, lf->lr);
- if (ret < 0)
- return ret;
- }
+ int ret;
- /*
- * Dwarf lines doesn't include function declarations. We have to
- * check functions list or given function.
- */
- if (sp_die) {
- src = dwarf_decl_file(sp_die);
- if (src && dwarf_decl_line(sp_die, &lineno) == 0 &&
- (lf->lno_s <= lineno && lf->lno_e >= lineno))
- ret = line_range_add_line(src, lineno, lf->lr);
- } else
- ret = find_line_range_func_decl_lines(lf);
+ ret = die_walk_lines(sp_die ?: &lf->cu_die, line_range_walk_cb, lf);
/* Update status */
if (ret >= 0)
@@ -1758,9 +1840,6 @@ static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
struct line_finder *lf = param->data;
struct line_range *lr = lf->lr;
- pr_debug("find (%llx) %s\n",
- (unsigned long long)dwarf_dieoffset(sp_die),
- dwarf_diename(sp_die));
if (dwarf_tag(sp_die) == DW_TAG_subprogram &&
die_compare_name(sp_die, lr->function)) {
lf->fname = dwarf_decl_file(sp_die);
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
new file mode 100644
index 000000000000..5317ef229041
--- /dev/null
+++ b/tools/perf/util/python.c
@@ -0,0 +1,891 @@
+#include <Python.h>
+#include <structmember.h>
+#include <inttypes.h>
+#include <poll.h>
+#include "evlist.h"
+#include "evsel.h"
+#include "event.h"
+#include "cpumap.h"
+#include "thread_map.h"
+
+struct throttle_event {
+ struct perf_event_header header;
+ u64 time;
+ u64 id;
+ u64 stream_id;
+};
+
+PyMODINIT_FUNC initperf(void);
+
+#define member_def(type, member, ptype, help) \
+ { #member, ptype, \
+ offsetof(struct pyrf_event, event) + offsetof(struct type, member), \
+ 0, help }
+
+#define sample_member_def(name, member, ptype, help) \
+ { #name, ptype, \
+ offsetof(struct pyrf_event, sample) + offsetof(struct perf_sample, member), \
+ 0, help }
+
+struct pyrf_event {
+ PyObject_HEAD
+ struct perf_sample sample;
+ union perf_event event;
+};
+
+#define sample_members \
+ sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"), \
+ sample_member_def(sample_pid, pid, T_INT, "event pid"), \
+ sample_member_def(sample_tid, tid, T_INT, "event tid"), \
+ sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"), \
+ sample_member_def(sample_addr, addr, T_ULONGLONG, "event addr"), \
+ sample_member_def(sample_id, id, T_ULONGLONG, "event id"), \
+ sample_member_def(sample_stream_id, stream_id, T_ULONGLONG, "event stream id"), \
+ sample_member_def(sample_period, period, T_ULONGLONG, "event period"), \
+ sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"),
+
+static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object.");
+
+static PyMemberDef pyrf_mmap_event__members[] = {
+ sample_members
+ member_def(perf_event_header, type, T_UINT, "event type"),
+ member_def(mmap_event, pid, T_UINT, "event pid"),
+ member_def(mmap_event, tid, T_UINT, "event tid"),
+ member_def(mmap_event, start, T_ULONGLONG, "start of the map"),
+ member_def(mmap_event, len, T_ULONGLONG, "map length"),
+ member_def(mmap_event, pgoff, T_ULONGLONG, "page offset"),
+ member_def(mmap_event, filename, T_STRING_INPLACE, "backing store"),
+ { .name = NULL, },
+};
+
+static PyObject *pyrf_mmap_event__repr(struct pyrf_event *pevent)
+{
+ PyObject *ret;
+ char *s;
+
+ if (asprintf(&s, "{ type: mmap, pid: %u, tid: %u, start: %#" PRIx64 ", "
+ "length: %#" PRIx64 ", offset: %#" PRIx64 ", "
+ "filename: %s }",
+ pevent->event.mmap.pid, pevent->event.mmap.tid,
+ pevent->event.mmap.start, pevent->event.mmap.len,
+ pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) {
+ ret = PyErr_NoMemory();
+ } else {
+ ret = PyString_FromString(s);
+ free(s);
+ }
+ return ret;
+}
+
+static PyTypeObject pyrf_mmap_event__type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "perf.mmap_event",
+ .tp_basicsize = sizeof(struct pyrf_event),
+ .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+ .tp_doc = pyrf_mmap_event__doc,
+ .tp_members = pyrf_mmap_event__members,
+ .tp_repr = (reprfunc)pyrf_mmap_event__repr,
+};
+
+static char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object.");
+
+static PyMemberDef pyrf_task_event__members[] = {
+ sample_members
+ member_def(perf_event_header, type, T_UINT, "event type"),
+ member_def(fork_event, pid, T_UINT, "event pid"),
+ member_def(fork_event, ppid, T_UINT, "event ppid"),
+ member_def(fork_event, tid, T_UINT, "event tid"),
+ member_def(fork_event, ptid, T_UINT, "event ptid"),
+ member_def(fork_event, time, T_ULONGLONG, "timestamp"),
+ { .name = NULL, },
+};
+
+static PyObject *pyrf_task_event__repr(struct pyrf_event *pevent)
+{
+ return PyString_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, "
+ "ptid: %u, time: %" PRIu64 "}",
+ pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit",
+ pevent->event.fork.pid,
+ pevent->event.fork.ppid,
+ pevent->event.fork.tid,
+ pevent->event.fork.ptid,
+ pevent->event.fork.time);
+}
+
+static PyTypeObject pyrf_task_event__type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "perf.task_event",
+ .tp_basicsize = sizeof(struct pyrf_event),
+ .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+ .tp_doc = pyrf_task_event__doc,
+ .tp_members = pyrf_task_event__members,
+ .tp_repr = (reprfunc)pyrf_task_event__repr,
+};
+
+static char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object.");
+
+static PyMemberDef pyrf_comm_event__members[] = {
+ sample_members
+ member_def(perf_event_header, type, T_UINT, "event type"),
+ member_def(comm_event, pid, T_UINT, "event pid"),
+ member_def(comm_event, tid, T_UINT, "event tid"),
+ member_def(comm_event, comm, T_STRING_INPLACE, "process name"),
+ { .name = NULL, },
+};
+
+static PyObject *pyrf_comm_event__repr(struct pyrf_event *pevent)
+{
+ return PyString_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }",
+ pevent->event.comm.pid,
+ pevent->event.comm.tid,
+ pevent->event.comm.comm);
+}
+
+static PyTypeObject pyrf_comm_event__type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "perf.comm_event",
+ .tp_basicsize = sizeof(struct pyrf_event),
+ .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+ .tp_doc = pyrf_comm_event__doc,
+ .tp_members = pyrf_comm_event__members,
+ .tp_repr = (reprfunc)pyrf_comm_event__repr,
+};
+
+static char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object.");
+
+static PyMemberDef pyrf_throttle_event__members[] = {
+ sample_members
+ member_def(perf_event_header, type, T_UINT, "event type"),
+ member_def(throttle_event, time, T_ULONGLONG, "timestamp"),
+ member_def(throttle_event, id, T_ULONGLONG, "event id"),
+ member_def(throttle_event, stream_id, T_ULONGLONG, "event stream id"),
+ { .name = NULL, },
+};
+
+static PyObject *pyrf_throttle_event__repr(struct pyrf_event *pevent)
+{
+ struct throttle_event *te = (struct throttle_event *)(&pevent->event.header + 1);
+
+ return PyString_FromFormat("{ type: %sthrottle, time: %" PRIu64 ", id: %" PRIu64
+ ", stream_id: %" PRIu64 " }",
+ pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un",
+ te->time, te->id, te->stream_id);
+}
+
+static PyTypeObject pyrf_throttle_event__type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "perf.throttle_event",
+ .tp_basicsize = sizeof(struct pyrf_event),
+ .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+ .tp_doc = pyrf_throttle_event__doc,
+ .tp_members = pyrf_throttle_event__members,
+ .tp_repr = (reprfunc)pyrf_throttle_event__repr,
+};
+
+static int pyrf_event__setup_types(void)
+{
+ int err;
+ pyrf_mmap_event__type.tp_new =
+ pyrf_task_event__type.tp_new =
+ pyrf_comm_event__type.tp_new =
+ pyrf_throttle_event__type.tp_new = PyType_GenericNew;
+ err = PyType_Ready(&pyrf_mmap_event__type);
+ if (err < 0)
+ goto out;
+ err = PyType_Ready(&pyrf_task_event__type);
+ if (err < 0)
+ goto out;
+ err = PyType_Ready(&pyrf_comm_event__type);
+ if (err < 0)
+ goto out;
+ err = PyType_Ready(&pyrf_throttle_event__type);
+ if (err < 0)
+ goto out;
+out:
+ return err;
+}
+
+static PyTypeObject *pyrf_event__type[] = {
+ [PERF_RECORD_MMAP] = &pyrf_mmap_event__type,
+ [PERF_RECORD_LOST] = &pyrf_mmap_event__type,
+ [PERF_RECORD_COMM] = &pyrf_comm_event__type,
+ [PERF_RECORD_EXIT] = &pyrf_task_event__type,
+ [PERF_RECORD_THROTTLE] = &pyrf_throttle_event__type,
+ [PERF_RECORD_UNTHROTTLE] = &pyrf_throttle_event__type,
+ [PERF_RECORD_FORK] = &pyrf_task_event__type,
+ [PERF_RECORD_READ] = &pyrf_mmap_event__type,
+ [PERF_RECORD_SAMPLE] = &pyrf_mmap_event__type,
+};
+
+static PyObject *pyrf_event__new(union perf_event *event)
+{
+ struct pyrf_event *pevent;
+ PyTypeObject *ptype;
+
+ if (event->header.type < PERF_RECORD_MMAP ||
+ event->header.type > PERF_RECORD_SAMPLE)
+ return NULL;
+
+ ptype = pyrf_event__type[event->header.type];
+ pevent = PyObject_New(struct pyrf_event, ptype);
+ if (pevent != NULL)
+ memcpy(&pevent->event, event, event->header.size);
+ return (PyObject *)pevent;
+}
+
+struct pyrf_cpu_map {
+ PyObject_HEAD
+
+ struct cpu_map *cpus;
+};
+
+static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus,
+ PyObject *args, PyObject *kwargs)
+{
+ static char *kwlist[] = { "cpustr", NULL, NULL, };
+ char *cpustr = NULL;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s",
+ kwlist, &cpustr))
+ return -1;
+
+ pcpus->cpus = cpu_map__new(cpustr);
+ if (pcpus->cpus == NULL)
+ return -1;
+ return 0;
+}
+
+static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus)
+{
+ cpu_map__delete(pcpus->cpus);
+ pcpus->ob_type->tp_free((PyObject*)pcpus);
+}
+
+static Py_ssize_t pyrf_cpu_map__length(PyObject *obj)
+{
+ struct pyrf_cpu_map *pcpus = (void *)obj;
+
+ return pcpus->cpus->nr;
+}
+
+static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i)
+{
+ struct pyrf_cpu_map *pcpus = (void *)obj;
+
+ if (i >= pcpus->cpus->nr)
+ return NULL;
+
+ return Py_BuildValue("i", pcpus->cpus->map[i]);
+}
+
+static PySequenceMethods pyrf_cpu_map__sequence_methods = {
+ .sq_length = pyrf_cpu_map__length,
+ .sq_item = pyrf_cpu_map__item,
+};
+
+static char pyrf_cpu_map__doc[] = PyDoc_STR("cpu map object.");
+
+static PyTypeObject pyrf_cpu_map__type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "perf.cpu_map",
+ .tp_basicsize = sizeof(struct pyrf_cpu_map),
+ .tp_dealloc = (destructor)pyrf_cpu_map__delete,
+ .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+ .tp_doc = pyrf_cpu_map__doc,
+ .tp_as_sequence = &pyrf_cpu_map__sequence_methods,
+ .tp_init = (initproc)pyrf_cpu_map__init,
+};
+
+static int pyrf_cpu_map__setup_types(void)
+{
+ pyrf_cpu_map__type.tp_new = PyType_GenericNew;
+ return PyType_Ready(&pyrf_cpu_map__type);
+}
+
+struct pyrf_thread_map {
+ PyObject_HEAD
+
+ struct thread_map *threads;
+};
+
+static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads,
+ PyObject *args, PyObject *kwargs)
+{
+ static char *kwlist[] = { "pid", "tid", NULL, NULL, };
+ int pid = -1, tid = -1;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii",
+ kwlist, &pid, &tid))
+ return -1;
+
+ pthreads->threads = thread_map__new(pid, tid);
+ if (pthreads->threads == NULL)
+ return -1;
+ return 0;
+}
+
+static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads)
+{
+ thread_map__delete(pthreads->threads);
+ pthreads->ob_type->tp_free((PyObject*)pthreads);
+}
+
+static Py_ssize_t pyrf_thread_map__length(PyObject *obj)
+{
+ struct pyrf_thread_map *pthreads = (void *)obj;
+
+ return pthreads->threads->nr;
+}
+
+static PyObject *pyrf_thread_map__item(PyObject *obj, Py_ssize_t i)
+{
+ struct pyrf_thread_map *pthreads = (void *)obj;
+
+ if (i >= pthreads->threads->nr)
+ return NULL;
+
+ return Py_BuildValue("i", pthreads->threads->map[i]);
+}
+
+static PySequenceMethods pyrf_thread_map__sequence_methods = {
+ .sq_length = pyrf_thread_map__length,
+ .sq_item = pyrf_thread_map__item,
+};
+
+static char pyrf_thread_map__doc[] = PyDoc_STR("thread map object.");
+
+static PyTypeObject pyrf_thread_map__type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "perf.thread_map",
+ .tp_basicsize = sizeof(struct pyrf_thread_map),
+ .tp_dealloc = (destructor)pyrf_thread_map__delete,
+ .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+ .tp_doc = pyrf_thread_map__doc,
+ .tp_as_sequence = &pyrf_thread_map__sequence_methods,
+ .tp_init = (initproc)pyrf_thread_map__init,
+};
+
+static int pyrf_thread_map__setup_types(void)
+{
+ pyrf_thread_map__type.tp_new = PyType_GenericNew;
+ return PyType_Ready(&pyrf_thread_map__type);
+}
+
+struct pyrf_evsel {
+ PyObject_HEAD
+
+ struct perf_evsel evsel;
+};
+
+static int pyrf_evsel__init(struct pyrf_evsel *pevsel,
+ PyObject *args, PyObject *kwargs)
+{
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ .sample_type = PERF_SAMPLE_PERIOD | PERF_SAMPLE_TID,
+ };
+ static char *kwlist[] = {
+ "type",
+ "config",
+ "sample_freq",
+ "sample_period",
+ "sample_type",
+ "read_format",
+ "disabled",
+ "inherit",
+ "pinned",
+ "exclusive",
+ "exclude_user",
+ "exclude_kernel",
+ "exclude_hv",
+ "exclude_idle",
+ "mmap",
+ "comm",
+ "freq",
+ "inherit_stat",
+ "enable_on_exec",
+ "task",
+ "watermark",
+ "precise_ip",
+ "mmap_data",
+ "sample_id_all",
+ "wakeup_events",
+ "bp_type",
+ "bp_addr",
+ "bp_len", NULL, NULL, };
+ u64 sample_period = 0;
+ u32 disabled = 0,
+ inherit = 0,
+ pinned = 0,
+ exclusive = 0,
+ exclude_user = 0,
+ exclude_kernel = 0,
+ exclude_hv = 0,
+ exclude_idle = 0,
+ mmap = 0,
+ comm = 0,
+ freq = 1,
+ inherit_stat = 0,
+ enable_on_exec = 0,
+ task = 0,
+ watermark = 0,
+ precise_ip = 0,
+ mmap_data = 0,
+ sample_id_all = 1;
+ int idx = 0;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs,
+ "|iKiKKiiiiiiiiiiiiiiiiiiiiiKK", kwlist,
+ &attr.type, &attr.config, &attr.sample_freq,
+ &sample_period, &attr.sample_type,
+ &attr.read_format, &disabled, &inherit,
+ &pinned, &exclusive, &exclude_user,
+ &exclude_kernel, &exclude_hv, &exclude_idle,
+ &mmap, &comm, &freq, &inherit_stat,
+ &enable_on_exec, &task, &watermark,
+ &precise_ip, &mmap_data, &sample_id_all,
+ &attr.wakeup_events, &attr.bp_type,
+ &attr.bp_addr, &attr.bp_len, &idx))
+ return -1;
+
+ /* union... */
+ if (sample_period != 0) {
+ if (attr.sample_freq != 0)
+ return -1; /* FIXME: throw right exception */
+ attr.sample_period = sample_period;
+ }
+
+ /* Bitfields */
+ attr.disabled = disabled;
+ attr.inherit = inherit;
+ attr.pinned = pinned;
+ attr.exclusive = exclusive;
+ attr.exclude_user = exclude_user;
+ attr.exclude_kernel = exclude_kernel;
+ attr.exclude_hv = exclude_hv;
+ attr.exclude_idle = exclude_idle;
+ attr.mmap = mmap;
+ attr.comm = comm;
+ attr.freq = freq;
+ attr.inherit_stat = inherit_stat;
+ attr.enable_on_exec = enable_on_exec;
+ attr.task = task;
+ attr.watermark = watermark;
+ attr.precise_ip = precise_ip;
+ attr.mmap_data = mmap_data;
+ attr.sample_id_all = sample_id_all;
+
+ perf_evsel__init(&pevsel->evsel, &attr, idx);
+ return 0;
+}
+
+static void pyrf_evsel__delete(struct pyrf_evsel *pevsel)
+{
+ perf_evsel__exit(&pevsel->evsel);
+ pevsel->ob_type->tp_free((PyObject*)pevsel);
+}
+
+static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
+ PyObject *args, PyObject *kwargs)
+{
+ struct perf_evsel *evsel = &pevsel->evsel;
+ struct cpu_map *cpus = NULL;
+ struct thread_map *threads = NULL;
+ PyObject *pcpus = NULL, *pthreads = NULL;
+ int group = 0, overwrite = 0;
+ static char *kwlist[] = {"cpus", "threads", "group", "overwrite", NULL, NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist,
+ &pcpus, &pthreads, &group, &overwrite))
+ return NULL;
+
+ if (pthreads != NULL)
+ threads = ((struct pyrf_thread_map *)pthreads)->threads;
+
+ if (pcpus != NULL)
+ cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
+
+ if (perf_evsel__open(evsel, cpus, threads, group, overwrite) < 0) {
+ PyErr_SetFromErrno(PyExc_OSError);
+ return NULL;
+ }
+
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static PyMethodDef pyrf_evsel__methods[] = {
+ {
+ .ml_name = "open",
+ .ml_meth = (PyCFunction)pyrf_evsel__open,
+ .ml_flags = METH_VARARGS | METH_KEYWORDS,
+ .ml_doc = PyDoc_STR("open the event selector file descriptor table.")
+ },
+ { .ml_name = NULL, }
+};
+
+static char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object.");
+
+static PyTypeObject pyrf_evsel__type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "perf.evsel",
+ .tp_basicsize = sizeof(struct pyrf_evsel),
+ .tp_dealloc = (destructor)pyrf_evsel__delete,
+ .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+ .tp_doc = pyrf_evsel__doc,
+ .tp_methods = pyrf_evsel__methods,
+ .tp_init = (initproc)pyrf_evsel__init,
+};
+
+static int pyrf_evsel__setup_types(void)
+{
+ pyrf_evsel__type.tp_new = PyType_GenericNew;
+ return PyType_Ready(&pyrf_evsel__type);
+}
+
+struct pyrf_evlist {
+ PyObject_HEAD
+
+ struct perf_evlist evlist;
+};
+
+static int pyrf_evlist__init(struct pyrf_evlist *pevlist,
+ PyObject *args, PyObject *kwargs __used)
+{
+ PyObject *pcpus = NULL, *pthreads = NULL;
+ struct cpu_map *cpus;
+ struct thread_map *threads;
+
+ if (!PyArg_ParseTuple(args, "OO", &pcpus, &pthreads))
+ return -1;
+
+ threads = ((struct pyrf_thread_map *)pthreads)->threads;
+ cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
+ perf_evlist__init(&pevlist->evlist, cpus, threads);
+ return 0;
+}
+
+static void pyrf_evlist__delete(struct pyrf_evlist *pevlist)
+{
+ perf_evlist__exit(&pevlist->evlist);
+ pevlist->ob_type->tp_free((PyObject*)pevlist);
+}
+
+static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist,
+ PyObject *args, PyObject *kwargs)
+{
+ struct perf_evlist *evlist = &pevlist->evlist;
+ static char *kwlist[] = {"pages", "overwrite",
+ NULL, NULL};
+ int pages = 128, overwrite = false;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist,
+ &pages, &overwrite))
+ return NULL;
+
+ if (perf_evlist__mmap(evlist, pages, overwrite) < 0) {
+ PyErr_SetFromErrno(PyExc_OSError);
+ return NULL;
+ }
+
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist,
+ PyObject *args, PyObject *kwargs)
+{
+ struct perf_evlist *evlist = &pevlist->evlist;
+ static char *kwlist[] = {"timeout", NULL, NULL};
+ int timeout = -1, n;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout))
+ return NULL;
+
+ n = poll(evlist->pollfd, evlist->nr_fds, timeout);
+ if (n < 0) {
+ PyErr_SetFromErrno(PyExc_OSError);
+ return NULL;
+ }
+
+ return Py_BuildValue("i", n);
+}
+
+static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
+ PyObject *args __used, PyObject *kwargs __used)
+{
+ struct perf_evlist *evlist = &pevlist->evlist;
+ PyObject *list = PyList_New(0);
+ int i;
+
+ for (i = 0; i < evlist->nr_fds; ++i) {
+ PyObject *file;
+ FILE *fp = fdopen(evlist->pollfd[i].fd, "r");
+
+ if (fp == NULL)
+ goto free_list;
+
+ file = PyFile_FromFile(fp, "perf", "r", NULL);
+ if (file == NULL)
+ goto free_list;
+
+ if (PyList_Append(list, file) != 0) {
+ Py_DECREF(file);
+ goto free_list;
+ }
+
+ Py_DECREF(file);
+ }
+
+ return list;
+free_list:
+ return PyErr_NoMemory();
+}
+
+
+static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist,
+ PyObject *args, PyObject *kwargs __used)
+{
+ struct perf_evlist *evlist = &pevlist->evlist;
+ PyObject *pevsel;
+ struct perf_evsel *evsel;
+
+ if (!PyArg_ParseTuple(args, "O", &pevsel))
+ return NULL;
+
+ Py_INCREF(pevsel);
+ evsel = &((struct pyrf_evsel *)pevsel)->evsel;
+ evsel->idx = evlist->nr_entries;
+ perf_evlist__add(evlist, evsel);
+
+ return Py_BuildValue("i", evlist->nr_entries);
+}
+
+static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
+ PyObject *args, PyObject *kwargs)
+{
+ struct perf_evlist *evlist = &pevlist->evlist;
+ union perf_event *event;
+ int sample_id_all = 1, cpu;
+ static char *kwlist[] = {"sample_id_all", NULL, NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist,
+ &cpu, &sample_id_all))
+ return NULL;
+
+ event = perf_evlist__read_on_cpu(evlist, cpu);
+ if (event != NULL) {
+ struct perf_evsel *first;
+ PyObject *pyevent = pyrf_event__new(event);
+ struct pyrf_event *pevent = (struct pyrf_event *)pyevent;
+
+ if (pyevent == NULL)
+ return PyErr_NoMemory();
+
+ first = list_entry(evlist->entries.next, struct perf_evsel, node);
+ perf_event__parse_sample(event, first->attr.sample_type, sample_id_all,
+ &pevent->sample);
+ return pyevent;
+ }
+
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static PyMethodDef pyrf_evlist__methods[] = {
+ {
+ .ml_name = "mmap",
+ .ml_meth = (PyCFunction)pyrf_evlist__mmap,
+ .ml_flags = METH_VARARGS | METH_KEYWORDS,
+ .ml_doc = PyDoc_STR("mmap the file descriptor table.")
+ },
+ {
+ .ml_name = "poll",
+ .ml_meth = (PyCFunction)pyrf_evlist__poll,
+ .ml_flags = METH_VARARGS | METH_KEYWORDS,
+ .ml_doc = PyDoc_STR("poll the file descriptor table.")
+ },
+ {
+ .ml_name = "get_pollfd",
+ .ml_meth = (PyCFunction)pyrf_evlist__get_pollfd,
+ .ml_flags = METH_VARARGS | METH_KEYWORDS,
+ .ml_doc = PyDoc_STR("get the poll file descriptor table.")
+ },
+ {
+ .ml_name = "add",
+ .ml_meth = (PyCFunction)pyrf_evlist__add,
+ .ml_flags = METH_VARARGS | METH_KEYWORDS,
+ .ml_doc = PyDoc_STR("adds an event selector to the list.")
+ },
+ {
+ .ml_name = "read_on_cpu",
+ .ml_meth = (PyCFunction)pyrf_evlist__read_on_cpu,
+ .ml_flags = METH_VARARGS | METH_KEYWORDS,
+ .ml_doc = PyDoc_STR("reads an event.")
+ },
+ { .ml_name = NULL, }
+};
+
+static Py_ssize_t pyrf_evlist__length(PyObject *obj)
+{
+ struct pyrf_evlist *pevlist = (void *)obj;
+
+ return pevlist->evlist.nr_entries;
+}
+
+static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i)
+{
+ struct pyrf_evlist *pevlist = (void *)obj;
+ struct perf_evsel *pos;
+
+ if (i >= pevlist->evlist.nr_entries)
+ return NULL;
+
+ list_for_each_entry(pos, &pevlist->evlist.entries, node)
+ if (i-- == 0)
+ break;
+
+ return Py_BuildValue("O", container_of(pos, struct pyrf_evsel, evsel));
+}
+
+static PySequenceMethods pyrf_evlist__sequence_methods = {
+ .sq_length = pyrf_evlist__length,
+ .sq_item = pyrf_evlist__item,
+};
+
+static char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object.");
+
+static PyTypeObject pyrf_evlist__type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "perf.evlist",
+ .tp_basicsize = sizeof(struct pyrf_evlist),
+ .tp_dealloc = (destructor)pyrf_evlist__delete,
+ .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+ .tp_as_sequence = &pyrf_evlist__sequence_methods,
+ .tp_doc = pyrf_evlist__doc,
+ .tp_methods = pyrf_evlist__methods,
+ .tp_init = (initproc)pyrf_evlist__init,
+};
+
+static int pyrf_evlist__setup_types(void)
+{
+ pyrf_evlist__type.tp_new = PyType_GenericNew;
+ return PyType_Ready(&pyrf_evlist__type);
+}
+
+static struct {
+ const char *name;
+ int value;
+} perf__constants[] = {
+ { "TYPE_HARDWARE", PERF_TYPE_HARDWARE },
+ { "TYPE_SOFTWARE", PERF_TYPE_SOFTWARE },
+ { "TYPE_TRACEPOINT", PERF_TYPE_TRACEPOINT },
+ { "TYPE_HW_CACHE", PERF_TYPE_HW_CACHE },
+ { "TYPE_RAW", PERF_TYPE_RAW },
+ { "TYPE_BREAKPOINT", PERF_TYPE_BREAKPOINT },
+
+ { "COUNT_HW_CPU_CYCLES", PERF_COUNT_HW_CPU_CYCLES },
+ { "COUNT_HW_INSTRUCTIONS", PERF_COUNT_HW_INSTRUCTIONS },
+ { "COUNT_HW_CACHE_REFERENCES", PERF_COUNT_HW_CACHE_REFERENCES },
+ { "COUNT_HW_CACHE_MISSES", PERF_COUNT_HW_CACHE_MISSES },
+ { "COUNT_HW_BRANCH_INSTRUCTIONS", PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
+ { "COUNT_HW_BRANCH_MISSES", PERF_COUNT_HW_BRANCH_MISSES },
+ { "COUNT_HW_BUS_CYCLES", PERF_COUNT_HW_BUS_CYCLES },
+ { "COUNT_HW_CACHE_L1D", PERF_COUNT_HW_CACHE_L1D },
+ { "COUNT_HW_CACHE_L1I", PERF_COUNT_HW_CACHE_L1I },
+ { "COUNT_HW_CACHE_LL", PERF_COUNT_HW_CACHE_LL },
+ { "COUNT_HW_CACHE_DTLB", PERF_COUNT_HW_CACHE_DTLB },
+ { "COUNT_HW_CACHE_ITLB", PERF_COUNT_HW_CACHE_ITLB },
+ { "COUNT_HW_CACHE_BPU", PERF_COUNT_HW_CACHE_BPU },
+ { "COUNT_HW_CACHE_OP_READ", PERF_COUNT_HW_CACHE_OP_READ },
+ { "COUNT_HW_CACHE_OP_WRITE", PERF_COUNT_HW_CACHE_OP_WRITE },
+ { "COUNT_HW_CACHE_OP_PREFETCH", PERF_COUNT_HW_CACHE_OP_PREFETCH },
+ { "COUNT_HW_CACHE_RESULT_ACCESS", PERF_COUNT_HW_CACHE_RESULT_ACCESS },
+ { "COUNT_HW_CACHE_RESULT_MISS", PERF_COUNT_HW_CACHE_RESULT_MISS },
+
+ { "COUNT_SW_CPU_CLOCK", PERF_COUNT_SW_CPU_CLOCK },
+ { "COUNT_SW_TASK_CLOCK", PERF_COUNT_SW_TASK_CLOCK },
+ { "COUNT_SW_PAGE_FAULTS", PERF_COUNT_SW_PAGE_FAULTS },
+ { "COUNT_SW_CONTEXT_SWITCHES", PERF_COUNT_SW_CONTEXT_SWITCHES },
+ { "COUNT_SW_CPU_MIGRATIONS", PERF_COUNT_SW_CPU_MIGRATIONS },
+ { "COUNT_SW_PAGE_FAULTS_MIN", PERF_COUNT_SW_PAGE_FAULTS_MIN },
+ { "COUNT_SW_PAGE_FAULTS_MAJ", PERF_COUNT_SW_PAGE_FAULTS_MAJ },
+ { "COUNT_SW_ALIGNMENT_FAULTS", PERF_COUNT_SW_ALIGNMENT_FAULTS },
+ { "COUNT_SW_EMULATION_FAULTS", PERF_COUNT_SW_EMULATION_FAULTS },
+
+ { "SAMPLE_IP", PERF_SAMPLE_IP },
+ { "SAMPLE_TID", PERF_SAMPLE_TID },
+ { "SAMPLE_TIME", PERF_SAMPLE_TIME },
+ { "SAMPLE_ADDR", PERF_SAMPLE_ADDR },
+ { "SAMPLE_READ", PERF_SAMPLE_READ },
+ { "SAMPLE_CALLCHAIN", PERF_SAMPLE_CALLCHAIN },
+ { "SAMPLE_ID", PERF_SAMPLE_ID },
+ { "SAMPLE_CPU", PERF_SAMPLE_CPU },
+ { "SAMPLE_PERIOD", PERF_SAMPLE_PERIOD },
+ { "SAMPLE_STREAM_ID", PERF_SAMPLE_STREAM_ID },
+ { "SAMPLE_RAW", PERF_SAMPLE_RAW },
+
+ { "FORMAT_TOTAL_TIME_ENABLED", PERF_FORMAT_TOTAL_TIME_ENABLED },
+ { "FORMAT_TOTAL_TIME_RUNNING", PERF_FORMAT_TOTAL_TIME_RUNNING },
+ { "FORMAT_ID", PERF_FORMAT_ID },
+ { "FORMAT_GROUP", PERF_FORMAT_GROUP },
+
+ { "RECORD_MMAP", PERF_RECORD_MMAP },
+ { "RECORD_LOST", PERF_RECORD_LOST },
+ { "RECORD_COMM", PERF_RECORD_COMM },
+ { "RECORD_EXIT", PERF_RECORD_EXIT },
+ { "RECORD_THROTTLE", PERF_RECORD_THROTTLE },
+ { "RECORD_UNTHROTTLE", PERF_RECORD_UNTHROTTLE },
+ { "RECORD_FORK", PERF_RECORD_FORK },
+ { "RECORD_READ", PERF_RECORD_READ },
+ { "RECORD_SAMPLE", PERF_RECORD_SAMPLE },
+ { .name = NULL, },
+};
+
+static PyMethodDef perf__methods[] = {
+ { .ml_name = NULL, }
+};
+
+PyMODINIT_FUNC initperf(void)
+{
+ PyObject *obj;
+ int i;
+ PyObject *dict, *module = Py_InitModule("perf", perf__methods);
+
+ if (module == NULL ||
+ pyrf_event__setup_types() < 0 ||
+ pyrf_evlist__setup_types() < 0 ||
+ pyrf_evsel__setup_types() < 0 ||
+ pyrf_thread_map__setup_types() < 0 ||
+ pyrf_cpu_map__setup_types() < 0)
+ return;
+
+ Py_INCREF(&pyrf_evlist__type);
+ PyModule_AddObject(module, "evlist", (PyObject*)&pyrf_evlist__type);
+
+ Py_INCREF(&pyrf_evsel__type);
+ PyModule_AddObject(module, "evsel", (PyObject*)&pyrf_evsel__type);
+
+ Py_INCREF(&pyrf_thread_map__type);
+ PyModule_AddObject(module, "thread_map", (PyObject*)&pyrf_thread_map__type);
+
+ Py_INCREF(&pyrf_cpu_map__type);
+ PyModule_AddObject(module, "cpu_map", (PyObject*)&pyrf_cpu_map__type);
+
+ dict = PyModule_GetDict(module);
+ if (dict == NULL)
+ goto error;
+
+ for (i = 0; perf__constants[i].name != NULL; i++) {
+ obj = PyInt_FromLong(perf__constants[i].value);
+ if (obj == NULL)
+ goto error;
+ PyDict_SetItemString(dict, perf__constants[i].name, obj);
+ Py_DECREF(obj);
+ }
+
+error:
+ if (PyErr_Occurred())
+ PyErr_SetString(PyExc_ImportError, "perf: Init failed!");
+}
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index c6d99334bdfa..2040b8538527 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -248,8 +248,7 @@ static void python_process_event(int cpu, void *data,
context = PyCObject_FromVoidPtr(scripting_context, NULL);
PyTuple_SetItem(t, n++, PyString_FromString(handler_name));
- PyTuple_SetItem(t, n++,
- PyCObject_FromVoidPtr(scripting_context, NULL));
+ PyTuple_SetItem(t, n++, context);
if (handler) {
PyTuple_SetItem(t, n++, PyInt_FromLong(cpu));
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 105f00bfd555..a3a871f7bda3 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -67,7 +67,7 @@ out_close:
static void perf_session__id_header_size(struct perf_session *session)
{
- struct sample_data *data;
+ struct perf_sample *data;
u64 sample_type = session->sample_type;
u16 size = 0;
@@ -165,7 +165,7 @@ struct perf_session *perf_session__new(const char *filename, int mode,
} else if (mode == O_WRONLY) {
/*
* In O_RDONLY mode this will be performed when reading the
- * kernel MMAP event, in event__process_mmap().
+ * kernel MMAP event, in perf_event__process_mmap().
*/
if (perf_session__create_kernel_maps(self) < 0)
goto out_delete;
@@ -242,17 +242,16 @@ static bool symbol__match_parent_regex(struct symbol *sym)
return 0;
}
-struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
- struct thread *thread,
- struct ip_callchain *chain,
- struct symbol **parent)
+int perf_session__resolve_callchain(struct perf_session *self,
+ struct thread *thread,
+ struct ip_callchain *chain,
+ struct symbol **parent)
{
u8 cpumode = PERF_RECORD_MISC_USER;
unsigned int i;
- struct map_symbol *syms = calloc(chain->nr, sizeof(*syms));
+ int err;
- if (!syms)
- return NULL;
+ callchain_cursor_reset(&self->callchain_cursor);
for (i = 0; i < chain->nr; i++) {
u64 ip = chain->ips[i];
@@ -281,30 +280,33 @@ struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
*parent = al.sym;
if (!symbol_conf.use_callchain)
break;
- syms[i].map = al.map;
- syms[i].sym = al.sym;
}
+
+ err = callchain_cursor_append(&self->callchain_cursor,
+ ip, al.map, al.sym);
+ if (err)
+ return err;
}
- return syms;
+ return 0;
}
-static int process_event_synth_stub(event_t *event __used,
+static int process_event_synth_stub(union perf_event *event __used,
struct perf_session *session __used)
{
dump_printf(": unhandled!\n");
return 0;
}
-static int process_event_stub(event_t *event __used,
- struct sample_data *sample __used,
+static int process_event_stub(union perf_event *event __used,
+ struct perf_sample *sample __used,
struct perf_session *session __used)
{
dump_printf(": unhandled!\n");
return 0;
}
-static int process_finished_round_stub(event_t *event __used,
+static int process_finished_round_stub(union perf_event *event __used,
struct perf_session *session __used,
struct perf_event_ops *ops __used)
{
@@ -312,7 +314,7 @@ static int process_finished_round_stub(event_t *event __used,
return 0;
}
-static int process_finished_round(event_t *event,
+static int process_finished_round(union perf_event *event,
struct perf_session *session,
struct perf_event_ops *ops);
@@ -329,7 +331,7 @@ static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
if (handler->exit == NULL)
handler->exit = process_event_stub;
if (handler->lost == NULL)
- handler->lost = event__process_lost;
+ handler->lost = perf_event__process_lost;
if (handler->read == NULL)
handler->read = process_event_stub;
if (handler->throttle == NULL)
@@ -363,98 +365,98 @@ void mem_bswap_64(void *src, int byte_size)
}
}
-static void event__all64_swap(event_t *self)
+static void perf_event__all64_swap(union perf_event *event)
{
- struct perf_event_header *hdr = &self->header;
- mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
+ struct perf_event_header *hdr = &event->header;
+ mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
}
-static void event__comm_swap(event_t *self)
+static void perf_event__comm_swap(union perf_event *event)
{
- self->comm.pid = bswap_32(self->comm.pid);
- self->comm.tid = bswap_32(self->comm.tid);
+ event->comm.pid = bswap_32(event->comm.pid);
+ event->comm.tid = bswap_32(event->comm.tid);
}
-static void event__mmap_swap(event_t *self)
+static void perf_event__mmap_swap(union perf_event *event)
{
- self->mmap.pid = bswap_32(self->mmap.pid);
- self->mmap.tid = bswap_32(self->mmap.tid);
- self->mmap.start = bswap_64(self->mmap.start);
- self->mmap.len = bswap_64(self->mmap.len);
- self->mmap.pgoff = bswap_64(self->mmap.pgoff);
+ event->mmap.pid = bswap_32(event->mmap.pid);
+ event->mmap.tid = bswap_32(event->mmap.tid);
+ event->mmap.start = bswap_64(event->mmap.start);
+ event->mmap.len = bswap_64(event->mmap.len);
+ event->mmap.pgoff = bswap_64(event->mmap.pgoff);
}
-static void event__task_swap(event_t *self)
+static void perf_event__task_swap(union perf_event *event)
{
- self->fork.pid = bswap_32(self->fork.pid);
- self->fork.tid = bswap_32(self->fork.tid);
- self->fork.ppid = bswap_32(self->fork.ppid);
- self->fork.ptid = bswap_32(self->fork.ptid);
- self->fork.time = bswap_64(self->fork.time);
+ event->fork.pid = bswap_32(event->fork.pid);
+ event->fork.tid = bswap_32(event->fork.tid);
+ event->fork.ppid = bswap_32(event->fork.ppid);
+ event->fork.ptid = bswap_32(event->fork.ptid);
+ event->fork.time = bswap_64(event->fork.time);
}
-static void event__read_swap(event_t *self)
+static void perf_event__read_swap(union perf_event *event)
{
- self->read.pid = bswap_32(self->read.pid);
- self->read.tid = bswap_32(self->read.tid);
- self->read.value = bswap_64(self->read.value);
- self->read.time_enabled = bswap_64(self->read.time_enabled);
- self->read.time_running = bswap_64(self->read.time_running);
- self->read.id = bswap_64(self->read.id);
+ event->read.pid = bswap_32(event->read.pid);
+ event->read.tid = bswap_32(event->read.tid);
+ event->read.value = bswap_64(event->read.value);
+ event->read.time_enabled = bswap_64(event->read.time_enabled);
+ event->read.time_running = bswap_64(event->read.time_running);
+ event->read.id = bswap_64(event->read.id);
}
-static void event__attr_swap(event_t *self)
+static void perf_event__attr_swap(union perf_event *event)
{
size_t size;
- self->attr.attr.type = bswap_32(self->attr.attr.type);
- self->attr.attr.size = bswap_32(self->attr.attr.size);
- self->attr.attr.config = bswap_64(self->attr.attr.config);
- self->attr.attr.sample_period = bswap_64(self->attr.attr.sample_period);
- self->attr.attr.sample_type = bswap_64(self->attr.attr.sample_type);
- self->attr.attr.read_format = bswap_64(self->attr.attr.read_format);
- self->attr.attr.wakeup_events = bswap_32(self->attr.attr.wakeup_events);
- self->attr.attr.bp_type = bswap_32(self->attr.attr.bp_type);
- self->attr.attr.bp_addr = bswap_64(self->attr.attr.bp_addr);
- self->attr.attr.bp_len = bswap_64(self->attr.attr.bp_len);
-
- size = self->header.size;
- size -= (void *)&self->attr.id - (void *)self;
- mem_bswap_64(self->attr.id, size);
+ event->attr.attr.type = bswap_32(event->attr.attr.type);
+ event->attr.attr.size = bswap_32(event->attr.attr.size);
+ event->attr.attr.config = bswap_64(event->attr.attr.config);
+ event->attr.attr.sample_period = bswap_64(event->attr.attr.sample_period);
+ event->attr.attr.sample_type = bswap_64(event->attr.attr.sample_type);
+ event->attr.attr.read_format = bswap_64(event->attr.attr.read_format);
+ event->attr.attr.wakeup_events = bswap_32(event->attr.attr.wakeup_events);
+ event->attr.attr.bp_type = bswap_32(event->attr.attr.bp_type);
+ event->attr.attr.bp_addr = bswap_64(event->attr.attr.bp_addr);
+ event->attr.attr.bp_len = bswap_64(event->attr.attr.bp_len);
+
+ size = event->header.size;
+ size -= (void *)&event->attr.id - (void *)event;
+ mem_bswap_64(event->attr.id, size);
}
-static void event__event_type_swap(event_t *self)
+static void perf_event__event_type_swap(union perf_event *event)
{
- self->event_type.event_type.event_id =
- bswap_64(self->event_type.event_type.event_id);
+ event->event_type.event_type.event_id =
+ bswap_64(event->event_type.event_type.event_id);
}
-static void event__tracing_data_swap(event_t *self)
+static void perf_event__tracing_data_swap(union perf_event *event)
{
- self->tracing_data.size = bswap_32(self->tracing_data.size);
+ event->tracing_data.size = bswap_32(event->tracing_data.size);
}
-typedef void (*event__swap_op)(event_t *self);
-
-static event__swap_op event__swap_ops[] = {
- [PERF_RECORD_MMAP] = event__mmap_swap,
- [PERF_RECORD_COMM] = event__comm_swap,
- [PERF_RECORD_FORK] = event__task_swap,
- [PERF_RECORD_EXIT] = event__task_swap,
- [PERF_RECORD_LOST] = event__all64_swap,
- [PERF_RECORD_READ] = event__read_swap,
- [PERF_RECORD_SAMPLE] = event__all64_swap,
- [PERF_RECORD_HEADER_ATTR] = event__attr_swap,
- [PERF_RECORD_HEADER_EVENT_TYPE] = event__event_type_swap,
- [PERF_RECORD_HEADER_TRACING_DATA] = event__tracing_data_swap,
- [PERF_RECORD_HEADER_BUILD_ID] = NULL,
- [PERF_RECORD_HEADER_MAX] = NULL,
+typedef void (*perf_event__swap_op)(union perf_event *event);
+
+static perf_event__swap_op perf_event__swap_ops[] = {
+ [PERF_RECORD_MMAP] = perf_event__mmap_swap,
+ [PERF_RECORD_COMM] = perf_event__comm_swap,
+ [PERF_RECORD_FORK] = perf_event__task_swap,
+ [PERF_RECORD_EXIT] = perf_event__task_swap,
+ [PERF_RECORD_LOST] = perf_event__all64_swap,
+ [PERF_RECORD_READ] = perf_event__read_swap,
+ [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
+ [PERF_RECORD_HEADER_ATTR] = perf_event__attr_swap,
+ [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
+ [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
+ [PERF_RECORD_HEADER_BUILD_ID] = NULL,
+ [PERF_RECORD_HEADER_MAX] = NULL,
};
struct sample_queue {
u64 timestamp;
u64 file_offset;
- event_t *event;
+ union perf_event *event;
struct list_head list;
};
@@ -472,8 +474,8 @@ static void perf_session_free_sample_buffers(struct perf_session *session)
}
static int perf_session_deliver_event(struct perf_session *session,
- event_t *event,
- struct sample_data *sample,
+ union perf_event *event,
+ struct perf_sample *sample,
struct perf_event_ops *ops,
u64 file_offset);
@@ -483,7 +485,7 @@ static void flush_sample_queue(struct perf_session *s,
struct ordered_samples *os = &s->ordered_samples;
struct list_head *head = &os->samples;
struct sample_queue *tmp, *iter;
- struct sample_data sample;
+ struct perf_sample sample;
u64 limit = os->next_flush;
u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
@@ -494,7 +496,7 @@ static void flush_sample_queue(struct perf_session *s,
if (iter->timestamp > limit)
break;
- event__parse_sample(iter->event, s, &sample);
+ perf_session__parse_sample(s, iter->event, &sample);
perf_session_deliver_event(s, iter->event, &sample, ops,
iter->file_offset);
@@ -550,7 +552,7 @@ static void flush_sample_queue(struct perf_session *s,
* Flush every events below timestamp 7
* etc...
*/
-static int process_finished_round(event_t *event __used,
+static int process_finished_round(union perf_event *event __used,
struct perf_session *session,
struct perf_event_ops *ops)
{
@@ -607,12 +609,12 @@ static void __queue_event(struct sample_queue *new, struct perf_session *s)
#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue))
-static int perf_session_queue_event(struct perf_session *s, event_t *event,
- struct sample_data *data, u64 file_offset)
+static int perf_session_queue_event(struct perf_session *s, union perf_event *event,
+ struct perf_sample *sample, u64 file_offset)
{
struct ordered_samples *os = &s->ordered_samples;
struct list_head *sc = &os->sample_cache;
- u64 timestamp = data->time;
+ u64 timestamp = sample->time;
struct sample_queue *new;
if (!timestamp || timestamp == ~0ULL)
@@ -648,7 +650,7 @@ static int perf_session_queue_event(struct perf_session *s, event_t *event,
return 0;
}
-static void callchain__printf(struct sample_data *sample)
+static void callchain__printf(struct perf_sample *sample)
{
unsigned int i;
@@ -660,8 +662,8 @@ static void callchain__printf(struct sample_data *sample)
}
static void perf_session__print_tstamp(struct perf_session *session,
- event_t *event,
- struct sample_data *sample)
+ union perf_event *event,
+ struct perf_sample *sample)
{
if (event->header.type != PERF_RECORD_SAMPLE &&
!session->sample_id_all) {
@@ -676,8 +678,8 @@ static void perf_session__print_tstamp(struct perf_session *session,
printf("%" PRIu64 " ", sample->time);
}
-static void dump_event(struct perf_session *session, event_t *event,
- u64 file_offset, struct sample_data *sample)
+static void dump_event(struct perf_session *session, union perf_event *event,
+ u64 file_offset, struct perf_sample *sample)
{
if (!dump_trace)
return;
@@ -691,11 +693,11 @@ static void dump_event(struct perf_session *session, event_t *event,
perf_session__print_tstamp(session, event, sample);
printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
- event->header.size, event__get_event_name(event->header.type));
+ event->header.size, perf_event__name(event->header.type));
}
-static void dump_sample(struct perf_session *session, event_t *event,
- struct sample_data *sample)
+static void dump_sample(struct perf_session *session, union perf_event *event,
+ struct perf_sample *sample)
{
if (!dump_trace)
return;
@@ -709,8 +711,8 @@ static void dump_sample(struct perf_session *session, event_t *event,
}
static int perf_session_deliver_event(struct perf_session *session,
- event_t *event,
- struct sample_data *sample,
+ union perf_event *event,
+ struct perf_sample *sample,
struct perf_event_ops *ops,
u64 file_offset)
{
@@ -743,7 +745,7 @@ static int perf_session_deliver_event(struct perf_session *session,
}
static int perf_session__preprocess_sample(struct perf_session *session,
- event_t *event, struct sample_data *sample)
+ union perf_event *event, struct perf_sample *sample)
{
if (event->header.type != PERF_RECORD_SAMPLE ||
!(session->sample_type & PERF_SAMPLE_CALLCHAIN))
@@ -758,7 +760,7 @@ static int perf_session__preprocess_sample(struct perf_session *session,
return 0;
}
-static int perf_session__process_user_event(struct perf_session *session, event_t *event,
+static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
struct perf_event_ops *ops, u64 file_offset)
{
dump_event(session, event, file_offset, NULL);
@@ -783,15 +785,16 @@ static int perf_session__process_user_event(struct perf_session *session, event_
}
static int perf_session__process_event(struct perf_session *session,
- event_t *event,
+ union perf_event *event,
struct perf_event_ops *ops,
u64 file_offset)
{
- struct sample_data sample;
+ struct perf_sample sample;
int ret;
- if (session->header.needs_swap && event__swap_ops[event->header.type])
- event__swap_ops[event->header.type](event);
+ if (session->header.needs_swap &&
+ perf_event__swap_ops[event->header.type])
+ perf_event__swap_ops[event->header.type](event);
if (event->header.type >= PERF_RECORD_HEADER_MAX)
return -EINVAL;
@@ -804,7 +807,7 @@ static int perf_session__process_event(struct perf_session *session,
/*
* For all kernel events we get the sample data
*/
- event__parse_sample(event, session, &sample);
+ perf_session__parse_sample(session, event, &sample);
/* Preprocess sample records - precheck callchains */
if (perf_session__preprocess_sample(session, event, &sample))
@@ -843,7 +846,7 @@ static struct thread *perf_session__register_idle_thread(struct perf_session *se
static void perf_session__warn_about_errors(const struct perf_session *session,
const struct perf_event_ops *ops)
{
- if (ops->lost == event__process_lost &&
+ if (ops->lost == perf_event__process_lost &&
session->hists.stats.total_lost != 0) {
ui__warning("Processed %" PRIu64 " events and LOST %" PRIu64
"!\n\nCheck IO/CPU overload!\n\n",
@@ -875,7 +878,7 @@ volatile int session_done;
static int __perf_session__process_pipe_events(struct perf_session *self,
struct perf_event_ops *ops)
{
- event_t event;
+ union perf_event event;
uint32_t size;
int skip = 0;
u64 head;
@@ -956,7 +959,7 @@ int __perf_session__process_events(struct perf_session *session,
struct ui_progress *progress;
size_t page_size, mmap_size;
char *buf, *mmaps[8];
- event_t *event;
+ union perf_event *event;
uint32_t size;
perf_event_ops__fill_defaults(ops);
@@ -1001,7 +1004,7 @@ remap:
file_pos = file_offset + head;
more:
- event = (event_t *)(buf + head);
+ event = (union perf_event *)(buf + head);
if (session->header.needs_swap)
perf_event_header__bswap(&event->header);
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index decd83f274fd..977b3a1b14aa 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -51,15 +51,17 @@ struct perf_session {
int cwdlen;
char *cwd;
struct ordered_samples ordered_samples;
- char filename[0];
+ struct callchain_cursor callchain_cursor;
+ char filename[0];
};
struct perf_event_ops;
-typedef int (*event_op)(event_t *self, struct sample_data *sample,
+typedef int (*event_op)(union perf_event *self, struct perf_sample *sample,
struct perf_session *session);
-typedef int (*event_synth_op)(event_t *self, struct perf_session *session);
-typedef int (*event_op2)(event_t *self, struct perf_session *session,
+typedef int (*event_synth_op)(union perf_event *self,
+ struct perf_session *session);
+typedef int (*event_op2)(union perf_event *self, struct perf_session *session,
struct perf_event_ops *ops);
struct perf_event_ops {
@@ -94,10 +96,10 @@ int __perf_session__process_events(struct perf_session *self,
int perf_session__process_events(struct perf_session *self,
struct perf_event_ops *event_ops);
-struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
- struct thread *thread,
- struct ip_callchain *chain,
- struct symbol **parent);
+int perf_session__resolve_callchain(struct perf_session *self,
+ struct thread *thread,
+ struct ip_callchain *chain,
+ struct symbol **parent);
bool perf_session__has_traces(struct perf_session *self, const char *msg);
@@ -154,4 +156,13 @@ size_t perf_session__fprintf_nr_events(struct perf_session *self, FILE *fp)
{
return hists__fprintf_nr_events(&self->hists, fp);
}
+
+static inline int perf_session__parse_sample(struct perf_session *session,
+ const union perf_event *event,
+ struct perf_sample *sample)
+{
+ return perf_event__parse_sample(event, session->sample_type,
+ session->sample_id_all, sample);
+}
+
#endif /* __PERF_SESSION_H */
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
new file mode 100644
index 000000000000..e24ffadb20b2
--- /dev/null
+++ b/tools/perf/util/setup.py
@@ -0,0 +1,19 @@
+#!/usr/bin/python2
+
+from distutils.core import setup, Extension
+
+perf = Extension('perf',
+ sources = ['util/python.c', 'util/ctype.c', 'util/evlist.c',
+ 'util/evsel.c', 'util/cpumap.c', 'util/thread_map.c',
+ 'util/util.c', 'util/xyarray.c', 'util/cgroup.c'],
+ include_dirs = ['util/include'],
+ extra_compile_args = ['-fno-strict-aliasing', '-Wno-write-strings'])
+
+setup(name='perf',
+ version='0.1',
+ description='Interface with the Linux profiling infrastructure',
+ author='Arnaldo Carvalho de Melo',
+ author_email='acme@redhat.com',
+ license='GPLv2',
+ url='http://perf.wiki.kernel.org',
+ ext_modules=[perf])
diff --git a/tools/perf/util/strfilter.c b/tools/perf/util/strfilter.c
new file mode 100644
index 000000000000..4064b7d708b8
--- /dev/null
+++ b/tools/perf/util/strfilter.c
@@ -0,0 +1,200 @@
+#include <ctype.h>
+#include "util.h"
+#include "string.h"
+#include "strfilter.h"
+
+/* Operators */
+static const char *OP_and = "&"; /* Logical AND */
+static const char *OP_or = "|"; /* Logical OR */
+static const char *OP_not = "!"; /* Logical NOT */
+
+#define is_operator(c) ((c) == '|' || (c) == '&' || (c) == '!')
+#define is_separator(c) (is_operator(c) || (c) == '(' || (c) == ')')
+
+static void strfilter_node__delete(struct strfilter_node *self)
+{
+ if (self) {
+ if (self->p && !is_operator(*self->p))
+ free((char *)self->p);
+ strfilter_node__delete(self->l);
+ strfilter_node__delete(self->r);
+ free(self);
+ }
+}
+
+void strfilter__delete(struct strfilter *self)
+{
+ if (self) {
+ strfilter_node__delete(self->root);
+ free(self);
+ }
+}
+
+static const char *get_token(const char *s, const char **e)
+{
+ const char *p;
+
+ while (isspace(*s)) /* Skip spaces */
+ s++;
+
+ if (*s == '\0') {
+ p = s;
+ goto end;
+ }
+
+ p = s + 1;
+ if (!is_separator(*s)) {
+ /* End search */
+retry:
+ while (*p && !is_separator(*p) && !isspace(*p))
+ p++;
+ /* Escape and special case: '!' is also used in glob pattern */
+ if (*(p - 1) == '\\' || (*p == '!' && *(p - 1) == '[')) {
+ p++;
+ goto retry;
+ }
+ }
+end:
+ *e = p;
+ return s;
+}
+
+static struct strfilter_node *strfilter_node__alloc(const char *op,
+ struct strfilter_node *l,
+ struct strfilter_node *r)
+{
+ struct strfilter_node *ret = zalloc(sizeof(struct strfilter_node));
+
+ if (ret) {
+ ret->p = op;
+ ret->l = l;
+ ret->r = r;
+ }
+
+ return ret;
+}
+
+static struct strfilter_node *strfilter_node__new(const char *s,
+ const char **ep)
+{
+ struct strfilter_node root, *cur, *last_op;
+ const char *e;
+
+ if (!s)
+ return NULL;
+
+ memset(&root, 0, sizeof(root));
+ last_op = cur = &root;
+
+ s = get_token(s, &e);
+ while (*s != '\0' && *s != ')') {
+ switch (*s) {
+ case '&': /* Exchg last OP->r with AND */
+ if (!cur->r || !last_op->r)
+ goto error;
+ cur = strfilter_node__alloc(OP_and, last_op->r, NULL);
+ if (!cur)
+ goto nomem;
+ last_op->r = cur;
+ last_op = cur;
+ break;
+ case '|': /* Exchg the root with OR */
+ if (!cur->r || !root.r)
+ goto error;
+ cur = strfilter_node__alloc(OP_or, root.r, NULL);
+ if (!cur)
+ goto nomem;
+ root.r = cur;
+ last_op = cur;
+ break;
+ case '!': /* Add NOT as a leaf node */
+ if (cur->r)
+ goto error;
+ cur->r = strfilter_node__alloc(OP_not, NULL, NULL);
+ if (!cur->r)
+ goto nomem;
+ cur = cur->r;
+ break;
+ case '(': /* Recursively parses inside the parenthesis */
+ if (cur->r)
+ goto error;
+ cur->r = strfilter_node__new(s + 1, &s);
+ if (!s)
+ goto nomem;
+ if (!cur->r || *s != ')')
+ goto error;
+ e = s + 1;
+ break;
+ default:
+ if (cur->r)
+ goto error;
+ cur->r = strfilter_node__alloc(NULL, NULL, NULL);
+ if (!cur->r)
+ goto nomem;
+ cur->r->p = strndup(s, e - s);
+ if (!cur->r->p)
+ goto nomem;
+ }
+ s = get_token(e, &e);
+ }
+ if (!cur->r)
+ goto error;
+ *ep = s;
+ return root.r;
+nomem:
+ s = NULL;
+error:
+ *ep = s;
+ strfilter_node__delete(root.r);
+ return NULL;
+}
+
+/*
+ * Parse filter rule and return new strfilter.
+ * Return NULL if fail, and *ep == NULL if memory allocation failed.
+ */
+struct strfilter *strfilter__new(const char *rules, const char **err)
+{
+ struct strfilter *ret = zalloc(sizeof(struct strfilter));
+ const char *ep = NULL;
+
+ if (ret)
+ ret->root = strfilter_node__new(rules, &ep);
+
+ if (!ret || !ret->root || *ep != '\0') {
+ if (err)
+ *err = ep;
+ strfilter__delete(ret);
+ ret = NULL;
+ }
+
+ return ret;
+}
+
+static bool strfilter_node__compare(struct strfilter_node *self,
+ const char *str)
+{
+ if (!self || !self->p)
+ return false;
+
+ switch (*self->p) {
+ case '|': /* OR */
+ return strfilter_node__compare(self->l, str) ||
+ strfilter_node__compare(self->r, str);
+ case '&': /* AND */
+ return strfilter_node__compare(self->l, str) &&
+ strfilter_node__compare(self->r, str);
+ case '!': /* NOT */
+ return !strfilter_node__compare(self->r, str);
+ default:
+ return strglobmatch(str, self->p);
+ }
+}
+
+/* Return true if STR matches the filter rules */
+bool strfilter__compare(struct strfilter *self, const char *str)
+{
+ if (!self)
+ return false;
+ return strfilter_node__compare(self->root, str);
+}
diff --git a/tools/perf/util/strfilter.h b/tools/perf/util/strfilter.h
new file mode 100644
index 000000000000..00f58a7506de
--- /dev/null
+++ b/tools/perf/util/strfilter.h
@@ -0,0 +1,48 @@
+#ifndef __PERF_STRFILTER_H
+#define __PERF_STRFILTER_H
+/* General purpose glob matching filter */
+
+#include <linux/list.h>
+#include <stdbool.h>
+
+/* A node of string filter */
+struct strfilter_node {
+ struct strfilter_node *l; /* Tree left branche (for &,|) */
+ struct strfilter_node *r; /* Tree right branche (for !,&,|) */
+ const char *p; /* Operator or rule */
+};
+
+/* String filter */
+struct strfilter {
+ struct strfilter_node *root;
+};
+
+/**
+ * strfilter__new - Create a new string filter
+ * @rules: Filter rule, which is a combination of glob expressions.
+ * @err: Pointer which points an error detected on @rules
+ *
+ * Parse @rules and return new strfilter. Return NULL if an error detected.
+ * In that case, *@err will indicate where it is detected, and *@err is NULL
+ * if a memory allocation is failed.
+ */
+struct strfilter *strfilter__new(const char *rules, const char **err);
+
+/**
+ * strfilter__compare - compare given string and a string filter
+ * @self: String filter
+ * @str: target string
+ *
+ * Compare @str and @self. Return true if the str match the rule
+ */
+bool strfilter__compare(struct strfilter *self, const char *str);
+
+/**
+ * strfilter__delete - delete a string filter
+ * @self: String filter to delete
+ *
+ * Delete @self.
+ */
+void strfilter__delete(struct strfilter *self);
+
+#endif
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
index fb737fe9be91..96c866045d60 100644
--- a/tools/perf/util/svghelper.c
+++ b/tools/perf/util/svghelper.c
@@ -456,9 +456,9 @@ void svg_legenda(void)
return;
svg_legenda_box(0, "Running", "sample");
- svg_legenda_box(100, "Idle","rect.c1");
- svg_legenda_box(200, "Deeper Idle", "rect.c3");
- svg_legenda_box(350, "Deepest Idle", "rect.c6");
+ svg_legenda_box(100, "Idle","c1");
+ svg_legenda_box(200, "Deeper Idle", "c3");
+ svg_legenda_box(350, "Deepest Idle", "c6");
svg_legenda_box(550, "Sleeping", "process2");
svg_legenda_box(650, "Waiting for cpu", "waiting");
svg_legenda_box(800, "Blocked on IO", "blocked");
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 7821d0e6866f..3e193f8e3061 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1525,8 +1525,8 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter)
symbol_conf.symfs, self->long_name);
break;
case DSO__ORIG_GUEST_KMODULE:
- if (map->groups && map->groups->machine)
- root_dir = map->groups->machine->root_dir;
+ if (map->groups && machine)
+ root_dir = machine->root_dir;
else
root_dir = "";
snprintf(name, size, "%s%s%s", symbol_conf.symfs,
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 00f4eade2e3e..d5d3b22250f3 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -7,61 +7,6 @@
#include "util.h"
#include "debug.h"
-/* Skip "." and ".." directories */
-static int filter(const struct dirent *dir)
-{
- if (dir->d_name[0] == '.')
- return 0;
- else
- return 1;
-}
-
-struct thread_map *thread_map__new_by_pid(pid_t pid)
-{
- struct thread_map *threads;
- char name[256];
- int items;
- struct dirent **namelist = NULL;
- int i;
-
- sprintf(name, "/proc/%d/task", pid);
- items = scandir(name, &namelist, filter, NULL);
- if (items <= 0)
- return NULL;
-
- threads = malloc(sizeof(*threads) + sizeof(pid_t) * items);
- if (threads != NULL) {
- for (i = 0; i < items; i++)
- threads->map[i] = atoi(namelist[i]->d_name);
- threads->nr = items;
- }
-
- for (i=0; i<items; i++)
- free(namelist[i]);
- free(namelist);
-
- return threads;
-}
-
-struct thread_map *thread_map__new_by_tid(pid_t tid)
-{
- struct thread_map *threads = malloc(sizeof(*threads) + sizeof(pid_t));
-
- if (threads != NULL) {
- threads->map[0] = tid;
- threads->nr = 1;
- }
-
- return threads;
-}
-
-struct thread_map *thread_map__new(pid_t pid, pid_t tid)
-{
- if (pid != -1)
- return thread_map__new_by_pid(pid);
- return thread_map__new_by_tid(tid);
-}
-
static struct thread *thread__new(pid_t pid)
{
struct thread *self = zalloc(sizeof(*self));
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index d7574101054a..e5f2401c1b5e 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -18,24 +18,10 @@ struct thread {
int comm_len;
};
-struct thread_map {
- int nr;
- int map[];
-};
-
struct perf_session;
void thread__delete(struct thread *self);
-struct thread_map *thread_map__new_by_pid(pid_t pid);
-struct thread_map *thread_map__new_by_tid(pid_t tid);
-struct thread_map *thread_map__new(pid_t pid, pid_t tid);
-
-static inline void thread_map__delete(struct thread_map *threads)
-{
- free(threads);
-}
-
int thread__set_comm(struct thread *self, const char *comm);
int thread__comm_len(struct thread *self);
struct thread *perf_session__findnew(struct perf_session *self, pid_t pid);
diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c
new file mode 100644
index 000000000000..a5df131b77c3
--- /dev/null
+++ b/tools/perf/util/thread_map.c
@@ -0,0 +1,64 @@
+#include <dirent.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "thread_map.h"
+
+/* Skip "." and ".." directories */
+static int filter(const struct dirent *dir)
+{
+ if (dir->d_name[0] == '.')
+ return 0;
+ else
+ return 1;
+}
+
+struct thread_map *thread_map__new_by_pid(pid_t pid)
+{
+ struct thread_map *threads;
+ char name[256];
+ int items;
+ struct dirent **namelist = NULL;
+ int i;
+
+ sprintf(name, "/proc/%d/task", pid);
+ items = scandir(name, &namelist, filter, NULL);
+ if (items <= 0)
+ return NULL;
+
+ threads = malloc(sizeof(*threads) + sizeof(pid_t) * items);
+ if (threads != NULL) {
+ for (i = 0; i < items; i++)
+ threads->map[i] = atoi(namelist[i]->d_name);
+ threads->nr = items;
+ }
+
+ for (i=0; i<items; i++)
+ free(namelist[i]);
+ free(namelist);
+
+ return threads;
+}
+
+struct thread_map *thread_map__new_by_tid(pid_t tid)
+{
+ struct thread_map *threads = malloc(sizeof(*threads) + sizeof(pid_t));
+
+ if (threads != NULL) {
+ threads->map[0] = tid;
+ threads->nr = 1;
+ }
+
+ return threads;
+}
+
+struct thread_map *thread_map__new(pid_t pid, pid_t tid)
+{
+ if (pid != -1)
+ return thread_map__new_by_pid(pid);
+ return thread_map__new_by_tid(tid);
+}
+
+void thread_map__delete(struct thread_map *threads)
+{
+ free(threads);
+}
diff --git a/tools/perf/util/thread_map.h b/tools/perf/util/thread_map.h
new file mode 100644
index 000000000000..3cb907311409
--- /dev/null
+++ b/tools/perf/util/thread_map.h
@@ -0,0 +1,15 @@
+#ifndef __PERF_THREAD_MAP_H
+#define __PERF_THREAD_MAP_H
+
+#include <sys/types.h>
+
+struct thread_map {
+ int nr;
+ int map[];
+};
+
+struct thread_map *thread_map__new_by_pid(pid_t pid);
+struct thread_map *thread_map__new_by_tid(pid_t tid);
+struct thread_map *thread_map__new(pid_t pid, pid_t tid);
+void thread_map__delete(struct thread_map *threads);
+#endif /* __PERF_THREAD_MAP_H */
diff --git a/tools/perf/util/top.c b/tools/perf/util/top.c
new file mode 100644
index 000000000000..70a9c13f4ad5
--- /dev/null
+++ b/tools/perf/util/top.c
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Refactored from builtin-top.c, see that files for further copyright notes.
+ *
+ * Released under the GPL v2. (and only v2, not any later version)
+ */
+
+#include "cpumap.h"
+#include "event.h"
+#include "evlist.h"
+#include "evsel.h"
+#include "parse-events.h"
+#include "symbol.h"
+#include "top.h"
+#include <inttypes.h>
+
+/*
+ * Ordering weight: count-1 * count-2 * ... / count-n
+ */
+static double sym_weight(const struct sym_entry *sym, struct perf_top *top)
+{
+ double weight = sym->snap_count;
+ int counter;
+
+ if (!top->display_weighted)
+ return weight;
+
+ for (counter = 1; counter < top->evlist->nr_entries - 1; counter++)
+ weight *= sym->count[counter];
+
+ weight /= (sym->count[counter] + 1);
+
+ return weight;
+}
+
+static void perf_top__remove_active_sym(struct perf_top *top, struct sym_entry *syme)
+{
+ pthread_mutex_lock(&top->active_symbols_lock);
+ list_del_init(&syme->node);
+ pthread_mutex_unlock(&top->active_symbols_lock);
+}
+
+static void rb_insert_active_sym(struct rb_root *tree, struct sym_entry *se)
+{
+ struct rb_node **p = &tree->rb_node;
+ struct rb_node *parent = NULL;
+ struct sym_entry *iter;
+
+ while (*p != NULL) {
+ parent = *p;
+ iter = rb_entry(parent, struct sym_entry, rb_node);
+
+ if (se->weight > iter->weight)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&se->rb_node, parent, p);
+ rb_insert_color(&se->rb_node, tree);
+}
+
+size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
+{
+ struct perf_evsel *counter;
+ float samples_per_sec = top->samples / top->delay_secs;
+ float ksamples_per_sec = top->kernel_samples / top->delay_secs;
+ float esamples_percent = (100.0 * top->exact_samples) / top->samples;
+ size_t ret = 0;
+
+ if (!perf_guest) {
+ ret = snprintf(bf, size,
+ " PerfTop:%8.0f irqs/sec kernel:%4.1f%%"
+ " exact: %4.1f%% [", samples_per_sec,
+ 100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) /
+ samples_per_sec)),
+ esamples_percent);
+ } else {
+ float us_samples_per_sec = top->us_samples / top->delay_secs;
+ float guest_kernel_samples_per_sec = top->guest_kernel_samples / top->delay_secs;
+ float guest_us_samples_per_sec = top->guest_us_samples / top->delay_secs;
+
+ ret = snprintf(bf, size,
+ " PerfTop:%8.0f irqs/sec kernel:%4.1f%% us:%4.1f%%"
+ " guest kernel:%4.1f%% guest us:%4.1f%%"
+ " exact: %4.1f%% [", samples_per_sec,
+ 100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) /
+ samples_per_sec)),
+ 100.0 - (100.0 * ((samples_per_sec - us_samples_per_sec) /
+ samples_per_sec)),
+ 100.0 - (100.0 * ((samples_per_sec -
+ guest_kernel_samples_per_sec) /
+ samples_per_sec)),
+ 100.0 - (100.0 * ((samples_per_sec -
+ guest_us_samples_per_sec) /
+ samples_per_sec)),
+ esamples_percent);
+ }
+
+ if (top->evlist->nr_entries == 1 || !top->display_weighted) {
+ struct perf_evsel *first;
+ first = list_entry(top->evlist->entries.next, struct perf_evsel, node);
+ ret += snprintf(bf + ret, size - ret, "%" PRIu64 "%s ",
+ (uint64_t)first->attr.sample_period,
+ top->freq ? "Hz" : "");
+ }
+
+ if (!top->display_weighted) {
+ ret += snprintf(bf + ret, size - ret, "%s",
+ event_name(top->sym_evsel));
+ } else list_for_each_entry(counter, &top->evlist->entries, node) {
+ ret += snprintf(bf + ret, size - ret, "%s%s",
+ counter->idx ? "/" : "", event_name(counter));
+ }
+
+ ret += snprintf(bf + ret, size - ret, "], ");
+
+ if (top->target_pid != -1)
+ ret += snprintf(bf + ret, size - ret, " (target_pid: %d",
+ top->target_pid);
+ else if (top->target_tid != -1)
+ ret += snprintf(bf + ret, size - ret, " (target_tid: %d",
+ top->target_tid);
+ else
+ ret += snprintf(bf + ret, size - ret, " (all");
+
+ if (top->cpu_list)
+ ret += snprintf(bf + ret, size - ret, ", CPU%s: %s)",
+ top->evlist->cpus->nr > 1 ? "s" : "", top->cpu_list);
+ else {
+ if (top->target_tid != -1)
+ ret += snprintf(bf + ret, size - ret, ")");
+ else
+ ret += snprintf(bf + ret, size - ret, ", %d CPU%s)",
+ top->evlist->cpus->nr,
+ top->evlist->cpus->nr > 1 ? "s" : "");
+ }
+
+ return ret;
+}
+
+void perf_top__reset_sample_counters(struct perf_top *top)
+{
+ top->samples = top->us_samples = top->kernel_samples =
+ top->exact_samples = top->guest_kernel_samples =
+ top->guest_us_samples = 0;
+}
+
+float perf_top__decay_samples(struct perf_top *top, struct rb_root *root)
+{
+ struct sym_entry *syme, *n;
+ float sum_ksamples = 0.0;
+ int snap = !top->display_weighted ? top->sym_counter : 0, j;
+
+ /* Sort the active symbols */
+ pthread_mutex_lock(&top->active_symbols_lock);
+ syme = list_entry(top->active_symbols.next, struct sym_entry, node);
+ pthread_mutex_unlock(&top->active_symbols_lock);
+
+ top->rb_entries = 0;
+ list_for_each_entry_safe_from(syme, n, &top->active_symbols, node) {
+ syme->snap_count = syme->count[snap];
+ if (syme->snap_count != 0) {
+
+ if ((top->hide_user_symbols &&
+ syme->origin == PERF_RECORD_MISC_USER) ||
+ (top->hide_kernel_symbols &&
+ syme->origin == PERF_RECORD_MISC_KERNEL)) {
+ perf_top__remove_active_sym(top, syme);
+ continue;
+ }
+ syme->weight = sym_weight(syme, top);
+
+ if ((int)syme->snap_count >= top->count_filter) {
+ rb_insert_active_sym(root, syme);
+ ++top->rb_entries;
+ }
+ sum_ksamples += syme->snap_count;
+
+ for (j = 0; j < top->evlist->nr_entries; j++)
+ syme->count[j] = top->zero ? 0 : syme->count[j] * 7 / 8;
+ } else
+ perf_top__remove_active_sym(top, syme);
+ }
+
+ return sum_ksamples;
+}
+
+/*
+ * Find the longest symbol name that will be displayed
+ */
+void perf_top__find_widths(struct perf_top *top, struct rb_root *root,
+ int *dso_width, int *dso_short_width, int *sym_width)
+{
+ struct rb_node *nd;
+ int printed = 0;
+
+ *sym_width = *dso_width = *dso_short_width = 0;
+
+ for (nd = rb_first(root); nd; nd = rb_next(nd)) {
+ struct sym_entry *syme = rb_entry(nd, struct sym_entry, rb_node);
+ struct symbol *sym = sym_entry__symbol(syme);
+
+ if (++printed > top->print_entries ||
+ (int)syme->snap_count < top->count_filter)
+ continue;
+
+ if (syme->map->dso->long_name_len > *dso_width)
+ *dso_width = syme->map->dso->long_name_len;
+
+ if (syme->map->dso->short_name_len > *dso_short_width)
+ *dso_short_width = syme->map->dso->short_name_len;
+
+ if (sym->namelen > *sym_width)
+ *sym_width = sym->namelen;
+ }
+}
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h
new file mode 100644
index 000000000000..e8d28e2ecdba
--- /dev/null
+++ b/tools/perf/util/top.h
@@ -0,0 +1,65 @@
+#ifndef __PERF_TOP_H
+#define __PERF_TOP_H 1
+
+#include "types.h"
+#include "../perf.h"
+#include <stddef.h>
+#include <pthread.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+
+struct perf_evlist;
+struct perf_evsel;
+
+struct sym_entry {
+ struct rb_node rb_node;
+ struct list_head node;
+ unsigned long snap_count;
+ double weight;
+ int skip;
+ u8 origin;
+ struct map *map;
+ unsigned long count[0];
+};
+
+static inline struct symbol *sym_entry__symbol(struct sym_entry *self)
+{
+ return ((void *)self) + symbol_conf.priv_size;
+}
+
+struct perf_top {
+ struct perf_evlist *evlist;
+ /*
+ * Symbols will be added here in perf_event__process_sample and will
+ * get out after decayed.
+ */
+ struct list_head active_symbols;
+ pthread_mutex_t active_symbols_lock;
+ u64 samples;
+ u64 kernel_samples, us_samples;
+ u64 exact_samples;
+ u64 guest_us_samples, guest_kernel_samples;
+ int print_entries, count_filter, delay_secs;
+ int display_weighted, freq, rb_entries, sym_counter;
+ pid_t target_pid, target_tid;
+ bool hide_kernel_symbols, hide_user_symbols, zero;
+ const char *cpu_list;
+ struct sym_entry *sym_filter_entry;
+ struct perf_evsel *sym_evsel;
+};
+
+size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size);
+void perf_top__reset_sample_counters(struct perf_top *top);
+float perf_top__decay_samples(struct perf_top *top, struct rb_root *root);
+void perf_top__find_widths(struct perf_top *top, struct rb_root *root,
+ int *dso_width, int *dso_short_width, int *sym_width);
+
+#ifdef NO_NEWT_SUPPORT
+static inline int perf_top__tui_browser(struct perf_top *top __used)
+{
+ return 0;
+}
+#else
+int perf_top__tui_browser(struct perf_top *top);
+#endif
+#endif /* __PERF_TOP_H */
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 73a02223c629..d8e622dd738a 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -153,7 +153,7 @@ void parse_proc_kallsyms(char *file, unsigned int size __unused)
char *next = NULL;
char *addr_str;
char ch;
- int ret;
+ int ret __used;
int i;
line = strtok_r(file, "\n", &next);
diff --git a/tools/perf/util/ui/browser.c b/tools/perf/util/ui/browser.c
index 8bc010edca25..60d6c815e1db 100644
--- a/tools/perf/util/ui/browser.c
+++ b/tools/perf/util/ui/browser.c
@@ -1,4 +1,5 @@
#include "libslang.h"
+#include "ui.h"
#include <linux/compiler.h>
#include <linux/list.h>
#include <linux/rbtree.h>
@@ -178,6 +179,7 @@ int ui_browser__show(struct ui_browser *self, const char *title,
if (self->sb == NULL)
return -1;
+ pthread_mutex_lock(&ui__lock);
SLsmg_gotorc(0, 0);
ui_browser__set_color(self, NEWT_COLORSET_ROOT);
slsmg_write_nstring(title, self->width);
@@ -188,25 +190,30 @@ int ui_browser__show(struct ui_browser *self, const char *title,
va_start(ap, helpline);
ui_helpline__vpush(helpline, ap);
va_end(ap);
+ pthread_mutex_unlock(&ui__lock);
return 0;
}
void ui_browser__hide(struct ui_browser *self)
{
+ pthread_mutex_lock(&ui__lock);
newtFormDestroy(self->form);
self->form = NULL;
ui_helpline__pop();
+ pthread_mutex_unlock(&ui__lock);
}
int ui_browser__refresh(struct ui_browser *self)
{
int row;
+ pthread_mutex_lock(&ui__lock);
newtScrollbarSet(self->sb, self->index, self->nr_entries - 1);
row = self->refresh(self);
ui_browser__set_color(self, HE_COLORSET_NORMAL);
SLsmg_fill_region(self->y + row, self->x,
self->height - row, self->width, ' ');
+ pthread_mutex_unlock(&ui__lock);
return 0;
}
diff --git a/tools/perf/util/ui/browsers/annotate.c b/tools/perf/util/ui/browsers/annotate.c
index 82b78f99251b..8c17a8730e4a 100644
--- a/tools/perf/util/ui/browsers/annotate.c
+++ b/tools/perf/util/ui/browsers/annotate.c
@@ -1,9 +1,12 @@
#include "../browser.h"
#include "../helpline.h"
#include "../libslang.h"
+#include "../../annotate.h"
#include "../../hist.h"
#include "../../sort.h"
#include "../../symbol.h"
+#include "../../annotate.h"
+#include <pthread.h>
static void ui__error_window(const char *fmt, ...)
{
@@ -42,8 +45,6 @@ static void annotate_browser__write(struct ui_browser *self, void *entry, int ro
struct objdump_line_rb_node *olrb = objdump_line__rb(ol);
ui_browser__set_percent_color(self, olrb->percent, current_entry);
slsmg_printf(" %7.2f ", olrb->percent);
- if (!current_entry)
- ui_browser__set_color(self, HE_COLORSET_CODE);
} else {
ui_browser__set_percent_color(self, 0, current_entry);
slsmg_write_nstring(" ", 9);
@@ -55,35 +56,40 @@ static void annotate_browser__write(struct ui_browser *self, void *entry, int ro
slsmg_write_nstring(" ", width - 18);
else
slsmg_write_nstring(ol->line, width - 18);
+
+ if (!current_entry)
+ ui_browser__set_color(self, HE_COLORSET_CODE);
}
static double objdump_line__calc_percent(struct objdump_line *self,
- struct list_head *head,
- struct symbol *sym)
+ struct symbol *sym, int evidx)
{
double percent = 0.0;
if (self->offset != -1) {
int len = sym->end - sym->start;
unsigned int hits = 0;
- struct sym_priv *priv = symbol__priv(sym);
- struct sym_ext *sym_ext = priv->ext;
- struct sym_hist *h = priv->hist;
+ struct annotation *notes = symbol__annotation(sym);
+ struct source_line *src_line = notes->src->lines;
+ struct sym_hist *h = annotation__histogram(notes, evidx);
s64 offset = self->offset;
- struct objdump_line *next = objdump__get_next_ip_line(head, self);
-
+ struct objdump_line *next;
+ next = objdump__get_next_ip_line(&notes->src->source, self);
while (offset < (s64)len &&
(next == NULL || offset < next->offset)) {
- if (sym_ext) {
- percent += sym_ext[offset].percent;
+ if (src_line) {
+ percent += src_line[offset].percent;
} else
- hits += h->ip[offset];
+ hits += h->addr[offset];
++offset;
}
-
- if (sym_ext == NULL && h->sum)
+ /*
+ * If the percentage wasn't already calculated in
+ * symbol__get_source_line, do it now:
+ */
+ if (src_line == NULL && h->sum)
percent = 100.0 * hits / h->sum;
}
@@ -133,103 +139,161 @@ static void annotate_browser__set_top(struct annotate_browser *self,
self->curr_hot = nd;
}
-static int annotate_browser__run(struct annotate_browser *self)
+static void annotate_browser__calc_percent(struct annotate_browser *browser,
+ int evidx)
{
- struct rb_node *nd;
- struct hist_entry *he = self->b.priv;
- int key;
+ struct symbol *sym = browser->b.priv;
+ struct annotation *notes = symbol__annotation(sym);
+ struct objdump_line *pos;
- if (ui_browser__show(&self->b, he->ms.sym->name,
- "<-, -> or ESC: exit, TAB/shift+TAB: cycle thru samples") < 0)
- return -1;
+ browser->entries = RB_ROOT;
+
+ pthread_mutex_lock(&notes->lock);
+
+ list_for_each_entry(pos, &notes->src->source, node) {
+ struct objdump_line_rb_node *rbpos = objdump_line__rb(pos);
+ rbpos->percent = objdump_line__calc_percent(pos, sym, evidx);
+ if (rbpos->percent < 0.01) {
+ RB_CLEAR_NODE(&rbpos->rb_node);
+ continue;
+ }
+ objdump__insert_line(&browser->entries, rbpos);
+ }
+ pthread_mutex_unlock(&notes->lock);
+
+ browser->curr_hot = rb_last(&browser->entries);
+}
+
+static int annotate_browser__run(struct annotate_browser *self, int evidx,
+ int refresh)
+{
+ struct rb_node *nd = NULL;
+ struct symbol *sym = self->b.priv;
/*
- * To allow builtin-annotate to cycle thru multiple symbols by
+ * RIGHT To allow builtin-annotate to cycle thru multiple symbols by
* examining the exit key for this function.
*/
- ui_browser__add_exit_key(&self->b, NEWT_KEY_RIGHT);
+ int exit_keys[] = { 'H', NEWT_KEY_TAB, NEWT_KEY_UNTAB,
+ NEWT_KEY_RIGHT, 0 };
+ int key;
+
+ if (ui_browser__show(&self->b, sym->name,
+ "<-, -> or ESC: exit, TAB/shift+TAB: "
+ "cycle hottest lines, H: Hottest") < 0)
+ return -1;
+
+ ui_browser__add_exit_keys(&self->b, exit_keys);
+ annotate_browser__calc_percent(self, evidx);
+
+ if (self->curr_hot)
+ annotate_browser__set_top(self, self->curr_hot);
nd = self->curr_hot;
- if (nd) {
- int tabs[] = { NEWT_KEY_TAB, NEWT_KEY_UNTAB, 0 };
- ui_browser__add_exit_keys(&self->b, tabs);
- }
+
+ if (refresh != 0)
+ newtFormSetTimer(self->b.form, refresh);
while (1) {
key = ui_browser__run(&self->b);
+ if (refresh != 0) {
+ annotate_browser__calc_percent(self, evidx);
+ /*
+ * Current line focus got out of the list of most active
+ * lines, NULL it so that if TAB|UNTAB is pressed, we
+ * move to curr_hot (current hottest line).
+ */
+ if (nd != NULL && RB_EMPTY_NODE(nd))
+ nd = NULL;
+ }
+
switch (key) {
+ case -1:
+ /*
+ * FIXME we need to check if it was
+ * es.reason == NEWT_EXIT_TIMER
+ */
+ if (refresh != 0)
+ symbol__annotate_decay_histogram(sym, evidx);
+ continue;
case NEWT_KEY_TAB:
- nd = rb_prev(nd);
- if (nd == NULL)
- nd = rb_last(&self->entries);
- annotate_browser__set_top(self, nd);
+ if (nd != NULL) {
+ nd = rb_prev(nd);
+ if (nd == NULL)
+ nd = rb_last(&self->entries);
+ } else
+ nd = self->curr_hot;
break;
case NEWT_KEY_UNTAB:
- nd = rb_next(nd);
- if (nd == NULL)
- nd = rb_first(&self->entries);
- annotate_browser__set_top(self, nd);
+ if (nd != NULL)
+ nd = rb_next(nd);
+ if (nd == NULL)
+ nd = rb_first(&self->entries);
+ else
+ nd = self->curr_hot;
+ break;
+ case 'H':
+ nd = self->curr_hot;
break;
default:
goto out;
}
+
+ if (nd != NULL)
+ annotate_browser__set_top(self, nd);
}
out:
ui_browser__hide(&self->b);
return key;
}
-int hist_entry__tui_annotate(struct hist_entry *self)
+int hist_entry__tui_annotate(struct hist_entry *he, int evidx)
+{
+ return symbol__tui_annotate(he->ms.sym, he->ms.map, evidx, 0);
+}
+
+int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
+ int refresh)
{
struct objdump_line *pos, *n;
- struct objdump_line_rb_node *rbpos;
- LIST_HEAD(head);
+ struct annotation *notes = symbol__annotation(sym);
struct annotate_browser browser = {
.b = {
- .entries = &head,
+ .entries = &notes->src->source,
.refresh = ui_browser__list_head_refresh,
.seek = ui_browser__list_head_seek,
.write = annotate_browser__write,
- .priv = self,
+ .priv = sym,
},
};
int ret;
- if (self->ms.sym == NULL)
+ if (sym == NULL)
return -1;
- if (self->ms.map->dso->annotate_warned)
+ if (map->dso->annotate_warned)
return -1;
- if (hist_entry__annotate(self, &head, sizeof(*rbpos)) < 0) {
+ if (symbol__annotate(sym, map, sizeof(struct objdump_line_rb_node)) < 0) {
ui__error_window(ui_helpline__last_msg);
return -1;
}
ui_helpline__push("Press <- or ESC to exit");
- list_for_each_entry(pos, &head, node) {
+ list_for_each_entry(pos, &notes->src->source, node) {
+ struct objdump_line_rb_node *rbpos;
size_t line_len = strlen(pos->line);
+
if (browser.b.width < line_len)
browser.b.width = line_len;
rbpos = objdump_line__rb(pos);
rbpos->idx = browser.b.nr_entries++;
- rbpos->percent = objdump_line__calc_percent(pos, &head, self->ms.sym);
- if (rbpos->percent < 0.01)
- continue;
- objdump__insert_line(&browser.entries, rbpos);
}
- /*
- * Position the browser at the hottest line.
- */
- browser.curr_hot = rb_last(&browser.entries);
- if (browser.curr_hot)
- annotate_browser__set_top(&browser, browser.curr_hot);
-
browser.b.width += 18; /* Percentage */
- ret = annotate_browser__run(&browser);
- list_for_each_entry_safe(pos, n, &head, node) {
+ ret = annotate_browser__run(&browser, evidx, refresh);
+ list_for_each_entry_safe(pos, n, &notes->src->source, node) {
list_del(&pos->node);
objdump_line__free(pos);
}
diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c
index 60c463c16028..497b3c4076a3 100644
--- a/tools/perf/util/ui/browsers/hists.c
+++ b/tools/perf/util/ui/browsers/hists.c
@@ -377,7 +377,7 @@ static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *self,
while (node) {
struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
struct rb_node *next = rb_next(node);
- u64 cumul = cumul_hits(child);
+ u64 cumul = callchain_cumul_hits(child);
struct callchain_list *chain;
char folded_sign = ' ';
int first = true;
@@ -797,7 +797,8 @@ static int hists__browser_title(struct hists *self, char *bf, size_t size,
return printed;
}
-int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
+int hists__browse(struct hists *self, const char *helpline,
+ const char *ev_name, int evidx)
{
struct hist_browser *browser = hist_browser__new(self);
struct pstack *fstack;
@@ -923,19 +924,11 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
if (choice == annotate) {
struct hist_entry *he;
do_annotate:
- if (browser->selection->map->dso->origin == DSO__ORIG_KERNEL) {
- browser->selection->map->dso->annotate_warned = 1;
- ui_helpline__puts("No vmlinux file found, can't "
- "annotate with just a "
- "kallsyms file");
- continue;
- }
-
he = hist_browser__selected_entry(browser);
if (he == NULL)
continue;
- hist_entry__tui_annotate(he);
+ hist_entry__tui_annotate(he, evidx);
} else if (choice == browse_map)
map__browse(browser->selection->map);
else if (choice == zoom_dso) {
@@ -984,7 +977,7 @@ out:
return key;
}
-int hists__tui_browse_tree(struct rb_root *self, const char *help)
+int hists__tui_browse_tree(struct rb_root *self, const char *help, int evidx)
{
struct rb_node *first = rb_first(self), *nd = first, *next;
int key = 0;
@@ -993,7 +986,7 @@ int hists__tui_browse_tree(struct rb_root *self, const char *help)
struct hists *hists = rb_entry(nd, struct hists, rb_node);
const char *ev_name = __event_name(hists->type, hists->config);
- key = hists__browse(hists, help, ev_name);
+ key = hists__browse(hists, help, ev_name, evidx);
switch (key) {
case NEWT_KEY_TAB:
next = rb_next(nd);
diff --git a/tools/perf/util/ui/browsers/map.c b/tools/perf/util/ui/browsers/map.c
index e5158369106e..8462bffe20bc 100644
--- a/tools/perf/util/ui/browsers/map.c
+++ b/tools/perf/util/ui/browsers/map.c
@@ -41,7 +41,7 @@ static int ui_entry__read(const char *title, char *bf, size_t size, int width)
out_free_form:
newtPopWindow();
newtFormDestroy(form);
- return 0;
+ return err;
}
struct map_browser {
diff --git a/tools/perf/util/ui/browsers/top.c b/tools/perf/util/ui/browsers/top.c
new file mode 100644
index 000000000000..2f47224426b6
--- /dev/null
+++ b/tools/perf/util/ui/browsers/top.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Parts came from builtin-{top,stat,record}.c, see those files for further
+ * copyright notes.
+ *
+ * Released under the GPL v2. (and only v2, not any later version)
+ */
+#include "../browser.h"
+#include "../../annotate.h"
+#include "../helpline.h"
+#include "../libslang.h"
+#include "../../evlist.h"
+#include "../../hist.h"
+#include "../../sort.h"
+#include "../../symbol.h"
+#include "../../top.h"
+
+struct perf_top_browser {
+ struct ui_browser b;
+ struct rb_root root;
+ struct sym_entry *selection;
+ float sum_ksamples;
+ int dso_width;
+ int dso_short_width;
+ int sym_width;
+};
+
+static void perf_top_browser__write(struct ui_browser *browser, void *entry, int row)
+{
+ struct perf_top_browser *top_browser = container_of(browser, struct perf_top_browser, b);
+ struct sym_entry *syme = rb_entry(entry, struct sym_entry, rb_node);
+ bool current_entry = ui_browser__is_current_entry(browser, row);
+ struct symbol *symbol = sym_entry__symbol(syme);
+ struct perf_top *top = browser->priv;
+ int width = browser->width;
+ double pcnt;
+
+ pcnt = 100.0 - (100.0 * ((top_browser->sum_ksamples - syme->snap_count) /
+ top_browser->sum_ksamples));
+ ui_browser__set_percent_color(browser, pcnt, current_entry);
+
+ if (top->evlist->nr_entries == 1 || !top->display_weighted) {
+ slsmg_printf("%20.2f ", syme->weight);
+ width -= 24;
+ } else {
+ slsmg_printf("%9.1f %10ld ", syme->weight, syme->snap_count);
+ width -= 23;
+ }
+
+ slsmg_printf("%4.1f%%", pcnt);
+ width -= 7;
+
+ if (verbose) {
+ slsmg_printf(" %016" PRIx64, symbol->start);
+ width -= 17;
+ }
+
+ slsmg_printf(" %-*.*s ", top_browser->sym_width, top_browser->sym_width,
+ symbol->name);
+ width -= top_browser->sym_width;
+ slsmg_write_nstring(width >= syme->map->dso->long_name_len ?
+ syme->map->dso->long_name :
+ syme->map->dso->short_name, width);
+
+ if (current_entry)
+ top_browser->selection = syme;
+}
+
+static void perf_top_browser__update_rb_tree(struct perf_top_browser *browser)
+{
+ struct perf_top *top = browser->b.priv;
+ u64 top_idx = browser->b.top_idx;
+
+ browser->root = RB_ROOT;
+ browser->b.top = NULL;
+ browser->sum_ksamples = perf_top__decay_samples(top, &browser->root);
+ perf_top__find_widths(top, &browser->root, &browser->dso_width,
+ &browser->dso_short_width,
+ &browser->sym_width);
+ if (browser->sym_width + browser->dso_width > browser->b.width - 29) {
+ browser->dso_width = browser->dso_short_width;
+ if (browser->sym_width + browser->dso_width > browser->b.width - 29)
+ browser->sym_width = browser->b.width - browser->dso_width - 29;
+ }
+
+ /*
+ * Adjust the ui_browser indexes since the entries in the browser->root
+ * rb_tree may have changed, then seek it from start, so that we get a
+ * possible new top of the screen.
+ */
+ browser->b.nr_entries = top->rb_entries;
+
+ if (top_idx >= browser->b.nr_entries) {
+ if (browser->b.height >= browser->b.nr_entries)
+ top_idx = browser->b.nr_entries - browser->b.height;
+ else
+ top_idx = 0;
+ }
+
+ if (browser->b.index >= top_idx + browser->b.height)
+ browser->b.index = top_idx + browser->b.index - browser->b.top_idx;
+
+ if (browser->b.index >= browser->b.nr_entries)
+ browser->b.index = browser->b.nr_entries - 1;
+
+ browser->b.top_idx = top_idx;
+ browser->b.seek(&browser->b, top_idx, SEEK_SET);
+}
+
+static void perf_top_browser__annotate(struct perf_top_browser *browser)
+{
+ struct sym_entry *syme = browser->selection;
+ struct symbol *sym = sym_entry__symbol(syme);
+ struct annotation *notes = symbol__annotation(sym);
+ struct perf_top *top = browser->b.priv;
+
+ if (notes->src != NULL)
+ goto do_annotation;
+
+ pthread_mutex_lock(&notes->lock);
+
+ top->sym_filter_entry = NULL;
+
+ if (symbol__alloc_hist(sym, top->evlist->nr_entries) < 0) {
+ pr_err("Not enough memory for annotating '%s' symbol!\n",
+ sym->name);
+ pthread_mutex_unlock(&notes->lock);
+ return;
+ }
+
+ top->sym_filter_entry = syme;
+
+ pthread_mutex_unlock(&notes->lock);
+do_annotation:
+ symbol__tui_annotate(sym, syme->map, 0, top->delay_secs * 1000);
+}
+
+static int perf_top_browser__run(struct perf_top_browser *browser)
+{
+ int key;
+ char title[160];
+ struct perf_top *top = browser->b.priv;
+ int delay_msecs = top->delay_secs * 1000;
+ int exit_keys[] = { 'a', NEWT_KEY_ENTER, NEWT_KEY_RIGHT, 0, };
+
+ perf_top_browser__update_rb_tree(browser);
+ perf_top__header_snprintf(top, title, sizeof(title));
+ perf_top__reset_sample_counters(top);
+
+ if (ui_browser__show(&browser->b, title,
+ "ESC: exit, ENTER|->|a: Live Annotate") < 0)
+ return -1;
+
+ newtFormSetTimer(browser->b.form, delay_msecs);
+ ui_browser__add_exit_keys(&browser->b, exit_keys);
+
+ while (1) {
+ key = ui_browser__run(&browser->b);
+
+ switch (key) {
+ case -1:
+ /* FIXME we need to check if it was es.reason == NEWT_EXIT_TIMER */
+ perf_top_browser__update_rb_tree(browser);
+ perf_top__header_snprintf(top, title, sizeof(title));
+ perf_top__reset_sample_counters(top);
+ ui_browser__set_color(&browser->b, NEWT_COLORSET_ROOT);
+ SLsmg_gotorc(0, 0);
+ slsmg_write_nstring(title, browser->b.width);
+ break;
+ case 'a':
+ case NEWT_KEY_RIGHT:
+ case NEWT_KEY_ENTER:
+ if (browser->selection)
+ perf_top_browser__annotate(browser);
+ break;
+ default:
+ goto out;
+ }
+ }
+out:
+ ui_browser__hide(&browser->b);
+ return key;
+}
+
+int perf_top__tui_browser(struct perf_top *top)
+{
+ struct perf_top_browser browser = {
+ .b = {
+ .entries = &browser.root,
+ .refresh = ui_browser__rb_tree_refresh,
+ .seek = ui_browser__rb_tree_seek,
+ .write = perf_top_browser__write,
+ .priv = top,
+ },
+ };
+
+ ui_helpline__push("Press <- or ESC to exit");
+ return perf_top_browser__run(&browser);
+}
diff --git a/tools/perf/util/ui/helpline.c b/tools/perf/util/ui/helpline.c
index 8d79daa4458a..f36d2ff509ed 100644
--- a/tools/perf/util/ui/helpline.c
+++ b/tools/perf/util/ui/helpline.c
@@ -5,6 +5,7 @@
#include "../debug.h"
#include "helpline.h"
+#include "ui.h"
void ui_helpline__pop(void)
{
@@ -55,7 +56,8 @@ int ui_helpline__show_help(const char *format, va_list ap)
int ret;
static int backlog;
- ret = vsnprintf(ui_helpline__last_msg + backlog,
+ pthread_mutex_lock(&ui__lock);
+ ret = vsnprintf(ui_helpline__last_msg + backlog,
sizeof(ui_helpline__last_msg) - backlog, format, ap);
backlog += ret;
@@ -64,6 +66,7 @@ int ui_helpline__show_help(const char *format, va_list ap)
newtRefresh();
backlog = 0;
}
+ pthread_mutex_unlock(&ui__lock);
return ret;
}
diff --git a/tools/perf/util/ui/libslang.h b/tools/perf/util/ui/libslang.h
index 5623da8e8080..2b63e1c9b181 100644
--- a/tools/perf/util/ui/libslang.h
+++ b/tools/perf/util/ui/libslang.h
@@ -13,11 +13,11 @@
#if SLANG_VERSION < 20104
#define slsmg_printf(msg, args...) \
- SLsmg_printf((char *)msg, ##args)
+ SLsmg_printf((char *)(msg), ##args)
#define slsmg_write_nstring(msg, len) \
- SLsmg_write_nstring((char *)msg, len)
+ SLsmg_write_nstring((char *)(msg), len)
#define sltt_set_color(obj, name, fg, bg) \
- SLtt_set_color(obj,(char *)name, (char *)fg, (char *)bg)
+ SLtt_set_color(obj,(char *)(name), (char *)(fg), (char *)(bg))
#else
#define slsmg_printf SLsmg_printf
#define slsmg_write_nstring SLsmg_write_nstring
diff --git a/tools/perf/util/ui/setup.c b/tools/perf/util/ui/setup.c
index 662085032eb7..ee46d671db59 100644
--- a/tools/perf/util/ui/setup.c
+++ b/tools/perf/util/ui/setup.c
@@ -6,6 +6,9 @@
#include "../debug.h"
#include "browser.h"
#include "helpline.h"
+#include "ui.h"
+
+pthread_mutex_t ui__lock = PTHREAD_MUTEX_INITIALIZER;
static void newt_suspend(void *d __used)
{
@@ -14,11 +17,12 @@ static void newt_suspend(void *d __used)
newtResume();
}
-void setup_browser(void)
+void setup_browser(bool fallback_to_pager)
{
if (!isatty(1) || !use_browser || dump_trace) {
use_browser = 0;
- setup_pager();
+ if (fallback_to_pager)
+ setup_pager();
return;
}
diff --git a/tools/perf/util/ui/ui.h b/tools/perf/util/ui/ui.h
new file mode 100644
index 000000000000..d264e059c829
--- /dev/null
+++ b/tools/perf/util/ui/ui.h
@@ -0,0 +1,8 @@
+#ifndef _PERF_UI_H_
+#define _PERF_UI_H_ 1
+
+#include <pthread.h>
+
+extern pthread_mutex_t ui__lock;
+
+#endif /* _PERF_UI_H_ */
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index e833f26f3bfc..fc784284ac8b 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -70,9 +70,7 @@
#include <sys/poll.h>
#include <sys/socket.h>
#include <sys/ioctl.h>
-#ifndef NO_SYS_SELECT_H
#include <sys/select.h>
-#endif
#include <netinet/in.h>
#include <netinet/tcp.h>
#include <arpa/inet.h>
@@ -83,10 +81,6 @@
#include "types.h"
#include <sys/ttydefaults.h>
-#ifndef NO_ICONV
-#include <iconv.h>
-#endif
-
extern const char *graph_line;
extern const char *graph_dotted_line;
extern char buildid_dir[];
@@ -236,26 +230,6 @@ static inline int sane_case(int x, int high)
return x;
}
-#ifndef DIR_HAS_BSD_GROUP_SEMANTICS
-# define FORCE_DIR_SET_GID S_ISGID
-#else
-# define FORCE_DIR_SET_GID 0
-#endif
-
-#ifdef NO_NSEC
-#undef USE_NSEC
-#define ST_CTIME_NSEC(st) 0
-#define ST_MTIME_NSEC(st) 0
-#else
-#ifdef USE_ST_TIMESPEC
-#define ST_CTIME_NSEC(st) ((unsigned int)((st).st_ctimespec.tv_nsec))
-#define ST_MTIME_NSEC(st) ((unsigned int)((st).st_mtimespec.tv_nsec))
-#else
-#define ST_CTIME_NSEC(st) ((unsigned int)((st).st_ctim.tv_nsec))
-#define ST_MTIME_NSEC(st) ((unsigned int)((st).st_mtim.tv_nsec))
-#endif
-#endif
-
int mkdir_p(char *path, mode_t mode);
int copyfile(const char *from, const char *to);
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index e1c62eeb88f5..ba7c63af6f3b 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -1,6 +1,6 @@
#!/usr/bin/perl -w
#
-# Copywrite 2010 - Steven Rostedt <srostedt@redhat.com>, Red Hat Inc.
+# Copyright 2010 - Steven Rostedt <srostedt@redhat.com>, Red Hat Inc.
# Licensed under the terms of the GNU GPL License version 2
#
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index f29abeb6a912..1fa0d292119a 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -69,7 +69,7 @@ MODULE_LICENSE("GPL");
* kvm->lock --> kvm->slots_lock --> kvm->irq_lock
*/
-DEFINE_SPINLOCK(kvm_lock);
+DEFINE_RAW_SPINLOCK(kvm_lock);
LIST_HEAD(vm_list);
static cpumask_var_t cpus_hardware_enabled;
@@ -137,6 +137,14 @@ void vcpu_load(struct kvm_vcpu *vcpu)
int cpu;
mutex_lock(&vcpu->mutex);
+ if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
+ /* The thread running this VCPU changed. */
+ struct pid *oldpid = vcpu->pid;
+ struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
+ rcu_assign_pointer(vcpu->pid, newpid);
+ synchronize_rcu();
+ put_pid(oldpid);
+ }
cpu = get_cpu();
preempt_notifier_register(&vcpu->preempt_notifier);
kvm_arch_vcpu_load(vcpu, cpu);
@@ -165,13 +173,16 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
zalloc_cpumask_var(&cpus, GFP_ATOMIC);
- raw_spin_lock(&kvm->requests_lock);
- me = smp_processor_id();
+ me = get_cpu();
kvm_for_each_vcpu(i, vcpu, kvm) {
- if (kvm_make_check_request(req, vcpu))
- continue;
+ kvm_make_request(req, vcpu);
cpu = vcpu->cpu;
- if (cpus != NULL && cpu != -1 && cpu != me)
+
+ /* Set ->requests bit before we read ->mode */
+ smp_mb();
+
+ if (cpus != NULL && cpu != -1 && cpu != me &&
+ kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE)
cpumask_set_cpu(cpu, cpus);
}
if (unlikely(cpus == NULL))
@@ -180,7 +191,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
smp_call_function_many(cpus, ack_flush, NULL, 1);
else
called = false;
- raw_spin_unlock(&kvm->requests_lock);
+ put_cpu();
free_cpumask_var(cpus);
return called;
}
@@ -209,6 +220,7 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
vcpu->cpu = -1;
vcpu->kvm = kvm;
vcpu->vcpu_id = id;
+ vcpu->pid = NULL;
init_waitqueue_head(&vcpu->wq);
kvm_async_pf_vcpu_init(vcpu);
@@ -233,6 +245,7 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_init);
void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
{
+ put_pid(vcpu->pid);
kvm_arch_vcpu_uninit(vcpu);
free_page((unsigned long)vcpu->run);
}
@@ -463,15 +476,14 @@ static struct kvm *kvm_create_vm(void)
kvm->mm = current->mm;
atomic_inc(&kvm->mm->mm_count);
spin_lock_init(&kvm->mmu_lock);
- raw_spin_lock_init(&kvm->requests_lock);
kvm_eventfd_init(kvm);
mutex_init(&kvm->lock);
mutex_init(&kvm->irq_lock);
mutex_init(&kvm->slots_lock);
atomic_set(&kvm->users_count, 1);
- spin_lock(&kvm_lock);
+ raw_spin_lock(&kvm_lock);
list_add(&kvm->vm_list, &vm_list);
- spin_unlock(&kvm_lock);
+ raw_spin_unlock(&kvm_lock);
return kvm;
@@ -544,9 +556,9 @@ static void kvm_destroy_vm(struct kvm *kvm)
struct mm_struct *mm = kvm->mm;
kvm_arch_sync_events(kvm);
- spin_lock(&kvm_lock);
+ raw_spin_lock(&kvm_lock);
list_del(&kvm->vm_list);
- spin_unlock(&kvm_lock);
+ raw_spin_unlock(&kvm_lock);
kvm_free_irq_routing(kvm);
for (i = 0; i < KVM_NR_BUSES; i++)
kvm_io_bus_destroy(kvm->buses[i]);
@@ -588,6 +600,7 @@ static int kvm_vm_release(struct inode *inode, struct file *filp)
return 0;
}
+#ifndef CONFIG_S390
/*
* Allocation size is twice as large as the actual dirty bitmap size.
* This makes it possible to do double buffering: see x86's
@@ -608,6 +621,7 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
memslot->dirty_bitmap_head = memslot->dirty_bitmap;
return 0;
}
+#endif /* !CONFIG_S390 */
/*
* Allocate some memory and give it an address in the guest physical address
@@ -621,7 +635,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
int user_alloc)
{
- int r, flush_shadow = 0;
+ int r;
gfn_t base_gfn;
unsigned long npages;
unsigned long i;
@@ -741,8 +755,6 @@ skip_lpage:
if (kvm_create_dirty_bitmap(&new) < 0)
goto out_free;
/* destroy any largepage mappings for dirty tracking */
- if (old.npages)
- flush_shadow = 1;
}
#else /* not defined CONFIG_S390 */
new.user_alloc = user_alloc;
@@ -813,9 +825,6 @@ skip_lpage:
kvm_free_physmem_slot(&old, &new);
kfree(old_memslots);
- if (flush_shadow)
- kvm_arch_flush_shadow(kvm);
-
return 0;
out_free:
@@ -1029,6 +1038,15 @@ static pfn_t get_fault_pfn(void)
return fault_pfn;
}
+static inline int check_user_page_hwpoison(unsigned long addr)
+{
+ int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE;
+
+ rc = __get_user_pages(current, current->mm, addr, 1,
+ flags, NULL, NULL, NULL);
+ return rc == -EHWPOISON;
+}
+
static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic,
bool *async, bool write_fault, bool *writable)
{
@@ -1076,7 +1094,7 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic,
return get_fault_pfn();
down_read(&current->mm->mmap_sem);
- if (is_hwpoison_address(addr)) {
+ if (check_user_page_hwpoison(addr)) {
up_read(&current->mm->mmap_sem);
get_page(hwpoison_page);
return page_to_pfn(hwpoison_page);
@@ -1466,18 +1484,55 @@ void kvm_resched(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_resched);
-void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu)
+void kvm_vcpu_on_spin(struct kvm_vcpu *me)
{
- ktime_t expires;
- DEFINE_WAIT(wait);
-
- prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
-
- /* Sleep for 100 us, and hope lock-holder got scheduled */
- expires = ktime_add_ns(ktime_get(), 100000UL);
- schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
+ struct kvm *kvm = me->kvm;
+ struct kvm_vcpu *vcpu;
+ int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
+ int yielded = 0;
+ int pass;
+ int i;
- finish_wait(&vcpu->wq, &wait);
+ /*
+ * We boost the priority of a VCPU that is runnable but not
+ * currently running, because it got preempted by something
+ * else and called schedule in __vcpu_run. Hopefully that
+ * VCPU is holding the lock that we need and will release it.
+ * We approximate round-robin by starting at the last boosted VCPU.
+ */
+ for (pass = 0; pass < 2 && !yielded; pass++) {
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ struct task_struct *task = NULL;
+ struct pid *pid;
+ if (!pass && i < last_boosted_vcpu) {
+ i = last_boosted_vcpu;
+ continue;
+ } else if (pass && i > last_boosted_vcpu)
+ break;
+ if (vcpu == me)
+ continue;
+ if (waitqueue_active(&vcpu->wq))
+ continue;
+ rcu_read_lock();
+ pid = rcu_dereference(vcpu->pid);
+ if (pid)
+ task = get_pid_task(vcpu->pid, PIDTYPE_PID);
+ rcu_read_unlock();
+ if (!task)
+ continue;
+ if (task->flags & PF_VCPU) {
+ put_task_struct(task);
+ continue;
+ }
+ if (yield_to(task, 1)) {
+ put_task_struct(task);
+ kvm->last_boosted_vcpu = i;
+ yielded = 1;
+ break;
+ }
+ put_task_struct(task);
+ }
+ }
}
EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
@@ -2122,9 +2177,9 @@ static void hardware_enable_nolock(void *junk)
static void hardware_enable(void *junk)
{
- spin_lock(&kvm_lock);
+ raw_spin_lock(&kvm_lock);
hardware_enable_nolock(junk);
- spin_unlock(&kvm_lock);
+ raw_spin_unlock(&kvm_lock);
}
static void hardware_disable_nolock(void *junk)
@@ -2139,9 +2194,9 @@ static void hardware_disable_nolock(void *junk)
static void hardware_disable(void *junk)
{
- spin_lock(&kvm_lock);
+ raw_spin_lock(&kvm_lock);
hardware_disable_nolock(junk);
- spin_unlock(&kvm_lock);
+ raw_spin_unlock(&kvm_lock);
}
static void hardware_disable_all_nolock(void)
@@ -2155,16 +2210,16 @@ static void hardware_disable_all_nolock(void)
static void hardware_disable_all(void)
{
- spin_lock(&kvm_lock);
+ raw_spin_lock(&kvm_lock);
hardware_disable_all_nolock();
- spin_unlock(&kvm_lock);
+ raw_spin_unlock(&kvm_lock);
}
static int hardware_enable_all(void)
{
int r = 0;
- spin_lock(&kvm_lock);
+ raw_spin_lock(&kvm_lock);
kvm_usage_count++;
if (kvm_usage_count == 1) {
@@ -2177,7 +2232,7 @@ static int hardware_enable_all(void)
}
}
- spin_unlock(&kvm_lock);
+ raw_spin_unlock(&kvm_lock);
return r;
}
@@ -2339,10 +2394,10 @@ static int vm_stat_get(void *_offset, u64 *val)
struct kvm *kvm;
*val = 0;
- spin_lock(&kvm_lock);
+ raw_spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list)
*val += *(u32 *)((void *)kvm + offset);
- spin_unlock(&kvm_lock);
+ raw_spin_unlock(&kvm_lock);
return 0;
}
@@ -2356,12 +2411,12 @@ static int vcpu_stat_get(void *_offset, u64 *val)
int i;
*val = 0;
- spin_lock(&kvm_lock);
+ raw_spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list)
kvm_for_each_vcpu(i, vcpu, kvm)
*val += *(u32 *)((void *)vcpu + offset);
- spin_unlock(&kvm_lock);
+ raw_spin_unlock(&kvm_lock);
return 0;
}
@@ -2402,7 +2457,7 @@ static int kvm_suspend(struct sys_device *dev, pm_message_t state)
static int kvm_resume(struct sys_device *dev)
{
if (kvm_usage_count) {
- WARN_ON(spin_is_locked(&kvm_lock));
+ WARN_ON(raw_spin_is_locked(&kvm_lock));
hardware_enable_nolock(NULL);
}
return 0;